summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitattributes2
-rw-r--r--.mailmap3
-rw-r--r--CREDITS25
-rw-r--r--Documentation/ABI/testing/sysfs-class-led3
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-trigger-oneshot36
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-trigger-usbport12
-rw-r--r--Documentation/ABI/testing/sysfs-class-mic.txt2
-rw-r--r--Documentation/ABI/testing/sysfs-class-power8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff15
-rw-r--r--Documentation/ABI/testing/sysfs-driver-wacom5
-rw-r--r--Documentation/ABI/testing/sysfs-i2c-bmp08531
-rw-r--r--Documentation/Changes264
-rw-r--r--Documentation/CodeOfConflict2
-rw-r--r--Documentation/CodingStyle404
-rw-r--r--Documentation/DMA-API-HOWTO.txt8
-rw-r--r--Documentation/DMA-API.txt22
-rw-r--r--Documentation/DocBook/Makefile10
-rw-r--r--Documentation/DocBook/device-drivers.tmpl521
-rw-r--r--Documentation/HOWTO148
-rw-r--r--Documentation/Makefile.sphinx64
-rw-r--r--Documentation/ManagementStyle154
-rw-r--r--Documentation/PCI/pcieaer-howto.txt26
-rw-r--r--Documentation/SecurityBugs8
-rw-r--r--Documentation/SubmitChecklist127
-rw-r--r--Documentation/SubmittingDrivers54
-rw-r--r--Documentation/SubmittingPatches312
-rw-r--r--Documentation/applying-patches.txt431
-rw-r--r--Documentation/arm/sunxi/README13
-rw-r--r--Documentation/block/biodoc.txt4
-rw-r--r--Documentation/clk.txt42
-rw-r--r--Documentation/conf.py104
-rw-r--r--Documentation/dev-tools/coccinelle.rst (renamed from Documentation/coccinelle.txt)359
-rw-r--r--Documentation/dev-tools/gcov.rst256
-rw-r--r--Documentation/dev-tools/gdb-kernel-debugging.rst (renamed from Documentation/gdb-kernel-debugging.txt)77
-rw-r--r--Documentation/dev-tools/kasan.rst173
-rw-r--r--Documentation/dev-tools/kcov.rst (renamed from Documentation/kcov.txt)84
-rw-r--r--Documentation/dev-tools/kmemcheck.rst733
-rw-r--r--Documentation/dev-tools/kmemleak.rst (renamed from Documentation/kmemleak.txt)93
-rw-r--r--Documentation/dev-tools/sparse.rst (renamed from Documentation/sparse.txt)39
-rw-r--r--Documentation/dev-tools/tools.rst25
-rw-r--r--Documentation/dev-tools/ubsan.rst (renamed from Documentation/ubsan.txt)42
-rw-r--r--Documentation/development-process/1.Intro.rst (renamed from Documentation/development-process/1.Intro)68
-rw-r--r--Documentation/development-process/2.Process.rst (renamed from Documentation/development-process/2.Process)41
-rw-r--r--Documentation/development-process/3.Early-stage.rst (renamed from Documentation/development-process/3.Early-stage)22
-rw-r--r--Documentation/development-process/4.Coding.rst (renamed from Documentation/development-process/4.Coding)46
-rw-r--r--Documentation/development-process/5.Posting.rst (renamed from Documentation/development-process/5.Posting)26
-rw-r--r--Documentation/development-process/6.Followthrough.rst (renamed from Documentation/development-process/6.Followthrough)14
-rw-r--r--Documentation/development-process/7.AdvancedTopics.rst (renamed from Documentation/development-process/7.AdvancedTopics)13
-rw-r--r--Documentation/development-process/8.Conclusion.rst (renamed from Documentation/development-process/8.Conclusion)8
-rw-r--r--Documentation/development-process/conf.py10
-rw-r--r--Documentation/development-process/development-process.rst29
-rw-r--r--Documentation/development-process/index.rst9
-rw-r--r--Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt98
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/davinci.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt39
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/armada-39x.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/marvell,orion5x.txt25
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,hifsys.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/pmu.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/shmobile.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/technologic.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/zte.txt24
-rw-r--r--Documentation/devicetree/bindings/bus/qcom,ebi2.txt138
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt45
-rw-r--r--Documentation/devicetree/bindings/clock/arm-syscon-icst.txt34
-rw-r--r--Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt70
-rw-r--r--Documentation/devicetree/bindings/clock/armada3700-tbg-clock.txt27
-rw-r--r--Documentation/devicetree/bindings/clock/armada3700-xtal-clock.txt28
-rw-r--r--Documentation/devicetree/bindings/clock/at91-clock.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.txt36
-rw-r--r--Documentation/devicetree/bindings/clock/clk-exynos-audss.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5410-clock.txt21
-rw-r--r--Documentation/devicetree/bindings/clock/maxim,max77686.txt118
-rw-r--r--Documentation/devicetree/bindings/clock/maxim,max77802.txt44
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-core-clock.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.txt17
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,lcc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/st,stm32-rcc.txt42
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,clkgen-divmux.txt49
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt20
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt35
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,clkgen-prediv.txt36
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,clkgen-vcc.txt61
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,clkgen.txt54
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,flexgen.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,quadfs.txt31
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi-ccu.txt5
-rw-r--r--Documentation/devicetree/bindings/clock/uniphier-clock.txt134
-rw-r--r--Documentation/devicetree/bindings/clock/xgene.txt18
-rw-r--r--Documentation/devicetree/bindings/clock/zx296718-clk.txt35
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt4
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt47
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/sun6i-dma.txt1
-rw-r--r--Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.txt41
-rw-r--r--Documentation/devicetree/bindings/firmware/meson/meson_sm.txt15
-rw-r--r--Documentation/devicetree/bindings/gpio/brcm,bcm6345-gpio.txt46
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-aspeed.txt36
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-axp209.txt30
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-tpic2810.txt16
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-tps65086.txt16
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-ts4900.txt30
-rw-r--r--Documentation/devicetree/bindings/gpio/mrvl-gpio.txt23
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt1
-rw-r--r--Documentation/devicetree/bindings/hwmon/ltc4151.txt18
-rw-r--r--Documentation/devicetree/bindings/hwmon/max6650.txt28
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt8
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-arb.txt35
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-gate.txt41
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-meson.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux.txt23
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rcar.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/nxp,pca9541.txt29
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/accel/dmard06.txt19
-rw-r--r--Documentation/devicetree/bindings/iio/accel/kionix,kxsd9.txt22
-rw-r--r--Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt29
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti-adc12138.txt37
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt16
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt22
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt29
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/zpa2326.txt31
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/sx9500.txt24
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/maxim_thermocouple.txt21
-rw-r--r--Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt107
-rw-r--r--Documentation/devicetree/bindings/input/adc-keys.txt49
-rw-r--r--Documentation/devicetree/bindings/input/gpio-decoder.txt23
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys-polled.txt5
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt8
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt27
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/focaltech-ft6236.txt35
-rw-r--r--Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt17
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt4
-rw-r--r--Documentation/devicetree/bindings/leds/common.txt7
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6328.txt2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6358.txt2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-gpio.txt9
-rw-r--r--Documentation/devicetree/bindings/leds/leds-is31fl319x.txt59
-rw-r--r--Documentation/devicetree/bindings/leds/leds-pm8058.txt67
-rw-r--r--Documentation/devicetree/bindings/leds/register-bit-led.txt9
-rw-r--r--Documentation/devicetree/bindings/mailbox/meson-mhu.txt34
-rw-r--r--Documentation/devicetree/bindings/media/meson-ir.txt5
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/fsl/ddr.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/mem-ctrlr.txt)2
-rw-r--r--Documentation/devicetree/bindings/mfd/ac100.txt54
-rw-r--r--Documentation/devicetree/bindings/mfd/act8945a.txt22
-rw-r--r--Documentation/devicetree/bindings/mfd/arizona.txt18
-rw-r--r--Documentation/devicetree/bindings/mfd/aspeed-scu.txt18
-rw-r--r--Documentation/devicetree/bindings/mfd/axp20x.txt28
-rw-r--r--Documentation/devicetree/bindings/mfd/lp873x.txt59
-rw-r--r--Documentation/devicetree/bindings/mfd/max77693.txt12
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom-rpm.txt15
-rw-r--r--Documentation/devicetree/bindings/mfd/rk808.txt37
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt70
-rw-r--r--Documentation/devicetree/bindings/mfd/stmpe.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/twl6040.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.txt (renamed from Documentation/devicetree/bindings/mmc/brcm,bcm7425-sdhci.txt)4
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt15
-rw-r--r--Documentation/devicetree/bindings/mmc/sunxi-mmc.txt7
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt1
-rw-r--r--Documentation/devicetree/bindings/net/apm-xgene-enet.txt3
-rw-r--r--Documentation/devicetree/bindings/net/brcm,amac.txt24
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt45
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt24
-rw-r--r--Documentation/devicetree/bindings/net/dsa/qca8k.txt89
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt4
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt1
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt8
-rw-r--r--Documentation/devicetree/bindings/net/meson-dwmac.txt45
-rw-r--r--Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt58
-rw-r--r--Documentation/devicetree/bindings/net/qcom-emac.txt111
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.txt8
-rw-r--r--Documentation/devicetree/bindings/net/sh_eth.txt2
-rw-r--r--Documentation/devicetree/bindings/net/smsc911x.txt15
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt99
-rw-r--r--Documentation/devicetree/bindings/net/stm32-dwmac.txt32
-rw-r--r--Documentation/devicetree/bindings/net/wireless/esp,esp8089.txt31
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_gmii2rgmii.txt35
-rw-r--r--Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt39
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt13
-rw-r--r--Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt7
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-pcie.txt106
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt5
-rw-r--r--Documentation/devicetree/bindings/perf/apm-xgene-pmu.txt112
-rw-r--r--Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.txt23
-rw-r--r--Documentation/devicetree/bindings/phy/meson-usb2-phy.txt27
-rw-r--r--Documentation/devicetree/bindings/phy/mxs-usb-phy.txt10
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt64
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt101
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt6
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt31
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt3
-rw-r--r--Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/ti-phy.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/marvell,orion-pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt65
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt39
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt44
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt5
-rw-r--r--Documentation/devicetree/bindings/power/act8945a-charger.txt35
-rw-r--r--Documentation/devicetree/bindings/power/reset/axxia-reset.txt (renamed from Documentation/devicetree/bindings/power_supply/axxia-reset.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt (renamed from Documentation/devicetree/bindings/power_supply/imx-snvs-poweroff.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/reset/msm-poweroff.txt (renamed from Documentation/devicetree/bindings/power_supply/msm-poweroff.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/reset/qnap-poweroff.txt (renamed from Documentation/devicetree/bindings/power_supply/qnap-poweroff.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/reset/restart-poweroff.txt (renamed from Documentation/devicetree/bindings/power_supply/restart-poweroff.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/reset/st-reset.txt7
-rw-r--r--Documentation/devicetree/bindings/power/supply/ab8500/btemp.txt (renamed from Documentation/devicetree/bindings/power_supply/ab8500/btemp.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/ab8500/chargalg.txt (renamed from Documentation/devicetree/bindings/power_supply/ab8500/chargalg.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/ab8500/charger.txt (renamed from Documentation/devicetree/bindings/power_supply/ab8500/charger.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/ab8500/fg.txt (renamed from Documentation/devicetree/bindings/power_supply/ab8500/fg.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/act8945a-charger.txt48
-rw-r--r--Documentation/devicetree/bindings/power/supply/axp20x_usb_power.txt (renamed from Documentation/devicetree/bindings/power_supply/axp20x_usb_power.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq2415x.txt (renamed from Documentation/devicetree/bindings/power/bq2415x.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq24257.txt (renamed from Documentation/devicetree/bindings/power/bq24257.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq25890.txt (renamed from Documentation/devicetree/bindings/power/bq25890.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/charger-manager.txt (renamed from Documentation/devicetree/bindings/power_supply/charger-manager.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/da9150-charger.txt (renamed from Documentation/devicetree/bindings/power/da9150-charger.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/da9150-fg.txt (renamed from Documentation/devicetree/bindings/power/da9150-fg.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/gpio-charger.txt (renamed from Documentation/devicetree/bindings/power_supply/gpio-charger.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/isp1704.txt (renamed from Documentation/devicetree/bindings/power/isp1704.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/lp8727_charger.txt (renamed from Documentation/devicetree/bindings/power_supply/lp8727_charger.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/ltc2941.txt (renamed from Documentation/devicetree/bindings/power/ltc2941.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/max17042_battery.txt (renamed from Documentation/devicetree/bindings/power_supply/max17042_battery.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/max8925_batter.txt (renamed from Documentation/devicetree/bindings/power_supply/max8925_batter.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/olpc_battery.txt (renamed from Documentation/devicetree/bindings/power_supply/olpc_battery.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/power_supply.txt (renamed from Documentation/devicetree/bindings/power_supply/power_supply.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom,coincell-charger.txt (renamed from Documentation/devicetree/bindings/power/qcom,coincell-charger.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom_smbb.txt (renamed from Documentation/devicetree/bindings/power_supply/qcom_smbb.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/rt9455_charger.txt (renamed from Documentation/devicetree/bindings/power/rt9455_charger.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/rx51-battery.txt (renamed from Documentation/devicetree/bindings/power/rx51-battery.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt (renamed from Documentation/devicetree/bindings/power_supply/sbs_sbs-battery.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/ti,bq24735.txt (renamed from Documentation/devicetree/bindings/power_supply/ti,bq24735.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/tps65090.txt (renamed from Documentation/devicetree/bindings/power_supply/tps65090.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/tps65217_charger.txt (renamed from Documentation/devicetree/bindings/power_supply/tps65217_charger.txt)0
-rw-r--r--Documentation/devicetree/bindings/power/supply/twl-charger.txt (renamed from Documentation/devicetree/bindings/power/twl-charger.txt)0
-rw-r--r--Documentation/devicetree/bindings/regulator/ltc3676.txt94
-rw-r--r--Documentation/devicetree/bindings/regulator/pv88080.txt23
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt132
-rw-r--r--Documentation/devicetree/bindings/reset/st,stm32-rcc.txt6
-rw-r--r--Documentation/devicetree/bindings/reset/uniphier-reset.txt93
-rw-r--r--Documentation/devicetree/bindings/serial/8250.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/st,stm32-usart.txt46
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/auxadc.txt21
-rw-r--r--Documentation/devicetree/bindings/sound/nau8810.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.txt42
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt30
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt75
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/rt5659.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/rt5660.txt47
-rw-r--r--Documentation/devicetree/bindings/sound/rt5663.txt30
-rw-r--r--Documentation/devicetree/bindings/sound/simple-card.txt37
-rw-r--r--Documentation/devicetree/bindings/sound/simple-scu-card.txt110
-rw-r--r--Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt52
-rw-r--r--Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic31xx.txt9
-rw-r--r--Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt233
-rw-r--r--Documentation/devicetree/bindings/spi/jcore,spi.txt34
-rw-r--r--Documentation/devicetree/bindings/spi/spi-bus.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-meson.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt7
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-cavium.txt28
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt8
-rw-r--r--Documentation/devicetree/bindings/usb/generic.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usbhs.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/rockchip,dwc3.txt59
-rw-r--r--Documentation/devicetree/bindings/usb/usb4604.txt19
-rw-r--r--Documentation/devicetree/bindings/usb/usbmisc-imx.txt1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt39
-rw-r--r--Documentation/devicetree/changesets.txt19
-rw-r--r--Documentation/dmaengine/provider.txt11
-rw-r--r--Documentation/docutils.conf7
-rw-r--r--Documentation/driver-api/basics.rst120
-rw-r--r--Documentation/driver-api/frame-buffer.rst62
-rw-r--r--Documentation/driver-api/hsi.rst88
-rw-r--r--Documentation/driver-api/i2c.rst46
-rw-r--r--Documentation/driver-api/index.rst26
-rw-r--r--Documentation/driver-api/infrastructure.rst169
-rw-r--r--Documentation/driver-api/input.rst51
-rw-r--r--Documentation/driver-api/message-based.rst12
-rw-r--r--Documentation/driver-api/miscellaneous.rst50
-rw-r--r--Documentation/driver-api/sound.rst54
-rw-r--r--Documentation/driver-api/spi.rst53
-rw-r--r--Documentation/driver-model/device.txt2
-rw-r--r--Documentation/driver-model/devres.txt8
-rw-r--r--Documentation/email-clients.txt214
-rw-r--r--Documentation/filesystems/dax.txt15
-rw-r--r--Documentation/filesystems/f2fs.txt1
-rw-r--r--Documentation/filesystems/proc.txt14
-rw-r--r--Documentation/filesystems/vfs.txt2
-rw-r--r--Documentation/filesystems/xfs.txt123
-rw-r--r--Documentation/gcov.txt257
-rw-r--r--Documentation/gpio/board.txt6
-rw-r--r--Documentation/gpio/driver.txt6
-rw-r--r--Documentation/gpio/gpio-legacy.txt16
-rw-r--r--Documentation/gpu/conf.py5
-rw-r--r--Documentation/gpu/index.rst7
-rw-r--r--Documentation/hid/intel-ish-hid.txt454
-rw-r--r--Documentation/hsi.txt75
-rw-r--r--Documentation/hwmon/adt747017
-rw-r--r--Documentation/hwmon/hwmon-kernel-api.txt231
-rw-r--r--Documentation/hwmon/max66501
-rw-r--r--Documentation/hwmon/ucd900012
-rw-r--r--Documentation/hwmon/xgene-hwmon30
-rw-r--r--Documentation/iio/iio_configfs.txt4
-rw-r--r--Documentation/index.rst11
-rw-r--r--Documentation/infiniband/sysfs.txt30
-rw-r--r--Documentation/ioctl/botching-up-ioctls.txt13
-rw-r--r--Documentation/kasan.txt171
-rw-r--r--Documentation/kbuild/kconfig-language.txt39
-rw-r--r--Documentation/kdump/kdump.txt9
-rw-r--r--Documentation/kernel-docs.txt1375
-rw-r--r--Documentation/kernel-documentation.rst31
-rw-r--r--Documentation/kernel-parameters.txt50
-rw-r--r--Documentation/kmemcheck.txt754
-rw-r--r--Documentation/kprobes.txt10
-rw-r--r--Documentation/leds/leds-mlxcpld.txt110
-rw-r--r--Documentation/leds/ledtrig-oneshot.txt20
-rw-r--r--Documentation/leds/ledtrig-usbport.txt41
-rw-r--r--Documentation/livepatch/module-elf-format.txt20
-rw-r--r--Documentation/media/Makefile3
-rw-r--r--Documentation/media/conf.py10
-rw-r--r--Documentation/media/conf_nitpick.py93
-rw-r--r--Documentation/media/index.rst19
-rw-r--r--Documentation/media/uapi/cec/cec-func-open.rst2
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst5
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/batman-adv.txt17
-rw-r--r--Documentation/networking/dsa/dsa.txt33
-rw-r--r--Documentation/networking/ena.txt305
-rw-r--r--Documentation/networking/ip-sysctl.txt45
-rw-r--r--Documentation/networking/ipvlan.txt7
-rw-r--r--Documentation/networking/rxrpc.txt90
-rw-r--r--Documentation/networking/strparser.txt136
-rw-r--r--Documentation/networking/switchdev.txt40
-rw-r--r--Documentation/perf/xgene-pmu.txt48
-rw-r--r--Documentation/power/power_supply_class.txt4
-rw-r--r--Documentation/powerpc/cxlflash.txt44
-rw-r--r--Documentation/powerpc/syscall64-abi.txt105
-rw-r--r--Documentation/remoteproc.txt6
-rw-r--r--Documentation/scsi/00-INDEX2
-rw-r--r--Documentation/scsi/dtc3x80.txt43
-rw-r--r--Documentation/scsi/in2000.txt202
-rw-r--r--Documentation/scsi/scsi-parameters.txt20
-rw-r--r--Documentation/scsi/smartpqi.txt80
-rw-r--r--Documentation/serial/serial-rs485.txt5
-rw-r--r--Documentation/sphinx-static/theme_overrides.css15
-rw-r--r--Documentation/sphinx/cdomain.py165
-rw-r--r--Documentation/sphinx/kernel-doc.py8
-rwxr-xr-xDocumentation/sphinx/kernel_include.py7
-rw-r--r--Documentation/sphinx/load_config.py32
-rwxr-xr-xDocumentation/sphinx/parse-headers.pl2
-rwxr-xr-x[-rw-r--r--]Documentation/sphinx/rstFlatTable.py6
-rw-r--r--Documentation/stable_api_nonsense.txt37
-rw-r--r--Documentation/stable_kernel_rules.txt110
-rw-r--r--Documentation/sysctl/README1
-rw-r--r--Documentation/sysctl/fs.txt7
-rw-r--r--Documentation/sysctl/user.txt66
-rw-r--r--Documentation/trace/ftrace.txt10
-rw-r--r--Documentation/trace/hwlat_detector.txt79
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic-its.txt38
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic-v3.txt206
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt52
-rw-r--r--Documentation/virtual/kvm/devices/vcpu.txt4
-rw-r--r--Documentation/vme_api.txt13
-rw-r--r--Documentation/x86/x86_64/mm.txt6
-rw-r--r--Documentation/xtensa/mmu.txt173
-rw-r--r--MAINTAINERS361
-rw-r--r--Makefile2
-rw-r--r--README8
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S1
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/Kconfig.debug22
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/Makefile2
-rw-r--r--arch/arm/boot/dts/Makefile59
-rw-r--r--arch/arm/boot/dts/am335x-baltos.dtsi6
-rw-r--r--arch/arm/boot/dts/am335x-base0033.dts4
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi12
-rw-r--r--arch/arm/boot/dts/am335x-boneblack.dts11
-rw-r--r--arch/arm/boot/dts/am335x-chilisom.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-cm-t335.dts8
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts18
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts70
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts47
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi8
-rw-r--r--arch/arm/boot/dts/am335x-lxm.dts6
-rw-r--r--arch/arm/boot/dts/am335x-nano.dts4
-rw-r--r--arch/arm/boot/dts/am335x-pepper.dts18
-rw-r--r--arch/arm/boot/dts/am335x-phycore-som.dtsi4
-rw-r--r--arch/arm/boot/dts/am335x-shc.dts16
-rw-r--r--arch/arm/boot/dts/am335x-sl50.dts15
-rw-r--r--arch/arm/boot/dts/am335x-wega.dtsi64
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi92
-rw-r--r--arch/arm/boot/dts/am3517-craneboard.dts2
-rw-r--r--arch/arm/boot/dts/am3517-evm.dts2
-rw-r--r--arch/arm/boot/dts/am3517.dtsi2
-rw-r--r--arch/arm/boot/dts/am3517_mt_ventoux.dts2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi10
-rw-r--r--arch/arm/boot/dts/am437x-cm-t43.dts6
-rw-r--r--arch/arm/boot/dts/am437x-gp-evm.dts21
-rw-r--r--arch/arm/boot/dts/am437x-idk-evm.dts2
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts45
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts11
-rw-r--r--arch/arm/boot/dts/am572x-idk.dts2
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi596
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts24
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts812
-rw-r--r--arch/arm/boot/dts/am57xx-cl-som-am57x.dts4
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi49
-rw-r--r--arch/arm/boot/dts/arm-realview-eb-11mp-bbrevd-ctrevb.dts32
-rw-r--r--arch/arm/boot/dts/arm-realview-eb-11mp-bbrevd.dts28
-rw-r--r--arch/arm/boot/dts/arm-realview-eb-11mp-ctrevb.dts (renamed from arch/arm/boot/dts/arm-realview-eb-11mp-revb.dts)0
-rw-r--r--arch/arm/boot/dts/arm-realview-eb-11mp.dts2
-rw-r--r--arch/arm/boot/dts/arm-realview-eb-a9mp-bbrevd.dts28
-rw-r--r--arch/arm/boot/dts/arm-realview-eb-bbrevd.dts29
-rw-r--r--arch/arm/boot/dts/arm-realview-eb-bbrevd.dtsi45
-rw-r--r--arch/arm/boot/dts/arm-realview-eb.dtsi23
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts29
-rw-r--r--arch/arm/boot/dts/armada-370-seagate-personal-cloud.dtsi40
-rw-r--r--arch/arm/boot/dts/armada-370-synology-ds213j.dts112
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi56
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi34
-rw-r--r--arch/arm/boot/dts/armada-385-db-ap.dts58
-rw-r--r--arch/arm/boot/dts/armada-385-linksys.dtsi9
-rw-r--r--arch/arm/boot/dts/armada-388-clearfog.dts48
-rw-r--r--arch/arm/boot/dts/armada-388-db.dts25
-rw-r--r--arch/arm/boot/dts/armada-388-gp.dts30
-rw-r--r--arch/arm/boot/dts/armada-388-rd.dts25
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi57
-rw-r--r--arch/arm/boot/dts/armada-390-db.dts175
-rw-r--r--arch/arm/boot/dts/armada-390.dtsi3
-rw-r--r--arch/arm/boot/dts/armada-395-gp.dts163
-rw-r--r--arch/arm/boot/dts/armada-395.dtsi76
-rw-r--r--arch/arm/boot/dts/armada-398-db.dts56
-rw-r--r--arch/arm/boot/dts/armada-398.dtsi10
-rw-r--r--arch/arm/boot/dts/armada-39x.dtsi126
-rw-r--r--arch/arm/boot/dts/armada-xp-axpwifiap.dts24
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts24
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts24
-rw-r--r--arch/arm/boot/dts/armada-xp-linksys-mamba.dts24
-rw-r--r--arch/arm/boot/dts/armada-xp-synology-ds414.dts112
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi31
-rw-r--r--arch/arm/boot/dts/armv7-m.dtsi2
-rw-r--r--arch/arm/boot/dts/artpec6.dtsi31
-rw-r--r--arch/arm/boot/dts/axp209.dtsi6
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi37
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-a-plus.dts1
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-a.dts1
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-plus.dts1
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts1
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b.dts1
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-zero.dts40
-rw-r--r--arch/arm/boot/dts/bcm2836-rpi-2-b.dts1
-rw-r--r--arch/arm/boot/dts/bcm283x-rpi-usb-host.dtsi3
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi9
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi11
-rw-r--r--arch/arm/boot/dts/bcm958522er.dts130
-rw-r--r--arch/arm/boot/dts/bcm958525er.dts142
-rw-r--r--arch/arm/boot/dts/bcm958525xmc.dts44
-rw-r--r--arch/arm/boot/dts/bcm958622hr.dts170
-rw-r--r--arch/arm/boot/dts/bcm958623hr.dts178
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts71
-rw-r--r--arch/arm/boot/dts/bcm958625k.dts13
-rw-r--r--arch/arm/boot/dts/bcm988312hr.dts182
-rw-r--r--arch/arm/boot/dts/berlin2-sony-nsz-gs7.dts2
-rw-r--r--arch/arm/boot/dts/berlin2.dtsi7
-rw-r--r--arch/arm/boot/dts/berlin2cd-google-chromecast.dts2
-rw-r--r--arch/arm/boot/dts/berlin2cd.dtsi5
-rw-r--r--arch/arm/boot/dts/berlin2q-marvell-dmp.dts2
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi8
-rw-r--r--arch/arm/boot/dts/da850-evm.dts51
-rw-r--r--arch/arm/boot/dts/da850-lcdk.dts221
-rw-r--r--arch/arm/boot/dts/da850.dtsi80
-rw-r--r--arch/arm/boot/dts/dm8148-evm.dts4
-rw-r--r--arch/arm/boot/dts/dm8148-t410.dts4
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi4
-rw-r--r--arch/arm/boot/dts/dm8168-evm.dts4
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi4
-rw-r--r--arch/arm/boot/dts/dra62x-j5eco-evm.dts4
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts14
-rw-r--r--arch/arm/boot/dts/dra7.dtsi36
-rw-r--r--arch/arm/boot/dts/dra72-evm-common.dtsi4
-rw-r--r--arch/arm/boot/dts/dra72-evm-revc.dts2
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi1
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi15
-rw-r--r--arch/arm/boot/dts/efm32gg-dk3750.dts5
-rw-r--r--arch/arm/boot/dts/efm32gg.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos3250-artik5.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos3250-monk.dts3
-rw-r--r--arch/arm/boot/dts/exynos3250-pinctrl.dtsi346
-rw-r--r--arch/arm/boot/dts/exynos3250-rinato.dts3
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts3
-rw-r--r--arch/arm/boot/dts/exynos4210-pinctrl.dtsi458
-rw-r--r--arch/arm/boot/dts/exynos4210-smdkv310.dts15
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts3
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts13
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi16
-rw-r--r--arch/arm/boot/dts/exynos4412-odroidu3.dts3
-rw-r--r--arch/arm/boot/dts/exynos4412-odroidx.dts5
-rw-r--r--arch/arm/boot/dts/exynos4412-odroidx2.dts3
-rw-r--r--arch/arm/boot/dts/exynos4412-origen.dts15
-rw-r--r--arch/arm/boot/dts/exynos4412-smdk4412.dts15
-rw-r--r--arch/arm/boot/dts/exynos4412-tiny4412.dts3
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts3
-rw-r--r--arch/arm/boot/dts/exynos4415-pinctrl.dtsi296
-rw-r--r--arch/arm/boot/dts/exynos4415.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos4x12-pinctrl.dtsi525
-rw-r--r--arch/arm/boot/dts/exynos4x12.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos5.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5250-arndale.dts5
-rw-r--r--arch/arm/boot/dts/exynos5250-pinctrl.dtsi404
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts11
-rw-r--r--arch/arm/boot/dts/exynos5250-snow-common.dtsi81
-rw-r--r--arch/arm/boot/dts/exynos5250-snow-rev5.dts6
-rw-r--r--arch/arm/boot/dts/exynos5250-snow.dts6
-rw-r--r--arch/arm/boot/dts/exynos5250-spring.dts77
-rw-r--r--arch/arm/boot/dts/exynos5260-pinctrl.dtsi280
-rw-r--r--arch/arm/boot/dts/exynos5260-xyref5260.dts9
-rw-r--r--arch/arm/boot/dts/exynos5260.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos5410-odroidxu.dts35
-rw-r--r--arch/arm/boot/dts/exynos5410-pinctrl.dtsi182
-rw-r--r--arch/arm/boot/dts/exynos5410-smdk5410.dts13
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts22
-rw-r--r--arch/arm/boot/dts/exynos5420-peach-pit.dts131
-rw-r--r--arch/arm/boot/dts/exynos5420-pinctrl.dtsi356
-rw-r--r--arch/arm/boot/dts/exynos5420-smdk5420.dts23
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi25
-rw-r--r--arch/arm/boot/dts/exynos5440-sd5v1.dts6
-rw-r--r--arch/arm/boot/dts/exynos5440-ssdk5440.dts6
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos54xx.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5800-peach-pi.dts131
-rw-r--r--arch/arm/boot/dts/imx35.dtsi7
-rw-r--r--arch/arm/boot/dts/imx50.dtsi10
-rw-r--r--arch/arm/boot/dts/imx53-usbarmory.dts224
-rw-r--r--arch/arm/boot/dts/imx53.dtsi18
-rw-r--r--arch/arm/boot/dts/imx6dl-gw553x.dts55
-rw-r--r--arch/arm/boot/dts/imx6dl-riotboard.dts22
-rw-r--r--arch/arm/boot/dts/imx6dl-ts4900.dts49
-rw-r--r--arch/arm/boot/dts/imx6dl.dtsi53
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts24
-rw-r--r--arch/arm/boot/dts/imx6q-b450v3.dts16
-rw-r--r--arch/arm/boot/dts/imx6q-b650v3.dts9
-rw-r--r--arch/arm/boot/dts/imx6q-ba16.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6q-bx50v3.dtsi70
-rw-r--r--arch/arm/boot/dts/imx6q-cm-fx6.dts24
-rw-r--r--arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts24
-rw-r--r--arch/arm/boot/dts/imx6q-evi.dts28
-rw-r--r--arch/arm/boot/dts/imx6q-gw5400-a.dts24
-rw-r--r--arch/arm/boot/dts/imx6q-gw553x.dts55
-rw-r--r--arch/arm/boot/dts/imx6q-marsboard.dts24
-rw-r--r--arch/arm/boot/dts/imx6q-novena.dts12
-rw-r--r--arch/arm/boot/dts/imx6q-sbc6x.dts24
-rw-r--r--arch/arm/boot/dts/imx6q-tbs2910.dts24
-rw-r--r--arch/arm/boot/dts/imx6q-ts4900.dts53
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi37
-rw-r--r--arch/arm/boot/dts/imx6qdl-apalis.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw51xx.dtsi36
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi39
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi39
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi44
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw551x.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw552x.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw553x.dtsi433
-rw-r--r--arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi36
-rw-r--r--arch/arm/boot/dts/imx6qdl-rex.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabrelite.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6qdl-ts4900.dtsi481
-rw-r--r--arch/arm/boot/dts/imx6qdl-udoo.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi47
-rw-r--r--arch/arm/boot/dts/imx6sx-pinfunc.h14
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi22
-rw-r--r--arch/arm/boot/dts/imx6ul-geam-kit.dts101
-rw-r--r--arch/arm/boot/dts/imx6ul-geam.dtsi361
-rw-r--r--arch/arm/boot/dts/imx6ul-pico-hobbit.dts33
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi9
-rw-r--r--arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi4
-rw-r--r--arch/arm/boot/dts/imx7-colibri.dtsi61
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi42
-rw-r--r--arch/arm/boot/dts/imx7s-warp.dts446
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi371
-rw-r--r--arch/arm/boot/dts/integratorap.dts51
-rw-r--r--arch/arm/boot/dts/integratorcp.dts74
-rw-r--r--arch/arm/boot/dts/keystone-k2e-evm.dts12
-rw-r--r--arch/arm/boot/dts/keystone-k2e.dtsi4
-rw-r--r--arch/arm/boot/dts/keystone-k2g.dtsi32
-rw-r--r--arch/arm/boot/dts/keystone-k2hk-evm.dts6
-rw-r--r--arch/arm/boot/dts/keystone-k2l-evm.dts6
-rw-r--r--arch/arm/boot/dts/keystone.dtsi4
-rw-r--r--arch/arm/boot/dts/kirkwood-openblocks_a6.dts9
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi5
-rw-r--r--arch/arm/boot/dts/lpc18xx.dtsi7
-rw-r--r--arch/arm/boot/dts/lpc32xx.dtsi12
-rw-r--r--arch/arm/boot/dts/lpc4337-ciaa.dts2
-rw-r--r--arch/arm/boot/dts/lpc4350-hitex-eval.dts4
-rw-r--r--arch/arm/boot/dts/lpc4357-ea4357-devkit.dts2
-rw-r--r--arch/arm/boot/dts/ls1021a-twr.dts13
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi21
-rw-r--r--arch/arm/boot/dts/mps2.dtsi1
-rw-r--r--arch/arm/boot/dts/ntc-gr8-evb.dts342
-rw-r--r--arch/arm/boot/dts/ntc-gr8.dtsi1087
-rw-r--r--arch/arm/boot/dts/omap2.dtsi4
-rw-r--r--arch/arm/boot/dts/omap2420-h4.dts2
-rw-r--r--arch/arm/boot/dts/omap2420-n8x0-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap2430-sdp.dts2
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts2
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts2
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3x.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-evm-37xx.dts2
-rw-r--r--arch/arm/boot/dts/omap3-evm.dts2
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-ldp.dts2
-rw-r--r--arch/arm/boot/dts/omap3-lilly-a83x.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3-overo-alto35-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-base.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-gallop43-common.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-overo-palo35-common.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-overo-palo43-common.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-pandora-common.dtsi10
-rw-r--r--arch/arm/boot/dts/omap3-sniper.dts2
-rw-r--r--arch/arm/boot/dts/omap3-tao3530.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-zoom3.dts2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3430-sdp.dts2
-rw-r--r--arch/arm/boot/dts/omap34xx.dtsi2
-rw-r--r--arch/arm/boot/dts/omap36xx.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4-duovero-parlor.dts2
-rw-r--r--arch/arm/boot/dts/omap4-duovero.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4-kc1.dts2
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi4
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts2
-rw-r--r--arch/arm/boot/dts/omap4-var-som-om44.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4.dtsi4
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi29
-rw-r--r--arch/arm/boot/dts/omap5-cm-t54.dts6
-rw-r--r--arch/arm/boot/dts/omap5-igep0050.dts42
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts18
-rw-r--r--arch/arm/boot/dts/omap5.dtsi26
-rw-r--r--arch/arm/boot/dts/orion5x-mv88f5181.dtsi49
-rw-r--r--arch/arm/boot/dts/orion5x-netgear-wnr854t.dts251
-rw-r--r--arch/arm/boot/dts/orion5x.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom-apq8060-dragonboard.dts39
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts15
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi180
-rw-r--r--arch/arm/boot/dts/qcom-apq8084.dtsi103
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064-ap148.dts1
-rw-r--r--arch/arm/boot/dts/qcom-msm8660.dtsi75
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts262
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts81
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi139
-rw-r--r--arch/arm/boot/dts/qcom-pm8941.dtsi5
-rw-r--r--arch/arm/boot/dts/r7s72100-rskrza1.dts61
-rw-r--r--arch/arm/boot/dts/r7s72100.dtsi22
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi23
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi18
-rw-r--r--arch/arm/boot/dts/r8a7792-blanche.dts264
-rw-r--r--arch/arm/boot/dts/r8a7792-wheat.dts199
-rw-r--r--arch/arm/boot/dts/r8a7792.dtsi554
-rw-r--r--arch/arm/boot/dts/r8a7794-alt.dts106
-rw-r--r--arch/arm/boot/dts/r8a7794-silk.dts70
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi309
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi12
-rw-r--r--arch/arm/boot/dts/rk3288-evb-act8846.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-evb-rk808.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-evb.dtsi45
-rw-r--r--arch/arm/boot/dts/rk3288-fennec.dts382
-rw-r--r--arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi310
-rw-r--r--arch/arm/boot/dts/rk3288-firefly-reload.dts403
-rw-r--r--arch/arm/boot/dts/rk3288-popmetal.dts12
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi73
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi12
-rw-r--r--arch/arm/boot/dts/s3c2416-pinctrl.dtsi38
-rw-r--r--arch/arm/boot/dts/s3c6410-mini6410.dts4
-rw-r--r--arch/arm/boot/dts/s3c64xx-pinctrl.dtsi356
-rw-r--r--arch/arm/boot/dts/s5pv210-aquila.dts6
-rw-r--r--arch/arm/boot/dts/s5pv210-goni.dts2
-rw-r--r--arch/arm/boot/dts/s5pv210-pinctrl.dtsi476
-rw-r--r--arch/arm/boot/dts/s5pv210-smdkc110.dts2
-rw-r--r--arch/arm/boot/dts/s5pv210-smdkv210.dts2
-rw-r--r--arch/arm/boot/dts/s5pv210-torbreck.dts2
-rw-r--r--arch/arm/boot/dts/s5pv210.dtsi4
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi29
-rw-r--r--arch/arm/boot/dts/skeleton.dtsi4
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi16
-rw-r--r--arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts12
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts68
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi28
-rw-r--r--arch/arm/boot/dts/stih407-clock.dtsi22
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi173
-rw-r--r--arch/arm/boot/dts/stih407-pinctrl.dtsi140
-rw-r--r--arch/arm/boot/dts/stih407.dtsi16
-rw-r--r--arch/arm/boot/dts/stih410-b2260.dts194
-rw-r--r--arch/arm/boot/dts/stih410-clock.dtsi20
-rw-r--r--arch/arm/boot/dts/stih410.dtsi34
-rw-r--r--arch/arm/boot/dts/stih415-pinctrl.dtsi54
-rw-r--r--arch/arm/boot/dts/stih416-b2020e.dts6
-rw-r--r--arch/arm/boot/dts/stih416-pinctrl.dtsi65
-rw-r--r--arch/arm/boot/dts/stih416.dtsi8
-rw-r--r--arch/arm/boot/dts/stih418-b2199.dts8
-rw-r--r--arch/arm/boot/dts/stih418-clock.dtsi20
-rw-r--r--arch/arm/boot/dts/stih41x-b2000.dtsi5
-rw-r--r--arch/arm/boot/dts/stih41x-b2020.dtsi6
-rw-r--r--arch/arm/boot/dts/stihxxx-b2120.dtsi52
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi2
-rw-r--r--arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13-empire-electronix-m712.dts51
-rw-r--r--arch/arm/boot/dts/sun5i-a13-inet-98v-rev2.dts164
-rw-r--r--arch/arm/boot/dts/sun5i-r8-chip.dts10
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi432
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-colorfly-e708-q1.dts138
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-inet-q972.dts100
-rw-r--r--arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi193
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi8
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi312
-rw-r--r--arch/arm/boot/dts/sun8i-a23-gt90h-v4.dts195
-rw-r--r--arch/arm/boot/dts/sun8i-a23-inet86dz.dts9
-rw-r--r--arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts68
-rw-r--r--arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts51
-rw-r--r--arch/arm/boot/dts/sun8i-a23.dtsi80
-rw-r--r--arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts28
-rw-r--r--arch/arm/boot/dts/sun8i-a33-inet-d978-rev2.dts108
-rw-r--r--arch/arm/boot/dts/sun8i-a33-olinuxino.dts226
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi236
-rw-r--r--arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts125
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-2.dts28
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts178
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-one.dts18
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts88
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts18
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts53
-rw-r--r--arch/arm/boot/dts/sun8i-h3.dtsi99
-rw-r--r--arch/arm/boot/dts/sun8i-q8-common.dtsi49
-rw-r--r--arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi26
-rw-r--r--arch/arm/boot/dts/sun9i-a80-cubieboard4.dts145
-rw-r--r--arch/arm/boot/dts/sun9i-a80-optimus.dts152
-rw-r--r--arch/arm/boot/dts/sun9i-a80.dtsi14
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi8
-rw-r--r--arch/arm/boot/dts/uniphier-common32.dtsi63
-rw-r--r--arch/arm/boot/dts/uniphier-ld4-ref.dts (renamed from arch/arm/boot/dts/uniphier-ph1-ld4-ref.dts)11
-rw-r--r--arch/arm/boot/dts/uniphier-ld4.dtsi (renamed from arch/arm/boot/dts/uniphier-ph1-ld4.dtsi)59
-rw-r--r--arch/arm/boot/dts/uniphier-ld6b-ref.dts (renamed from arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts)11
-rw-r--r--arch/arm/boot/dts/uniphier-ld6b.dtsi (renamed from arch/arm/boot/dts/uniphier-ph1-ld6b.dtsi)17
-rw-r--r--arch/arm/boot/dts/uniphier-pro4-ace.dts (renamed from arch/arm/boot/dts/uniphier-ph1-pro4-ace.dts)11
-rw-r--r--arch/arm/boot/dts/uniphier-pro4-ref.dts (renamed from arch/arm/boot/dts/uniphier-ph1-pro4-ref.dts)11
-rw-r--r--arch/arm/boot/dts/uniphier-pro4-sanji.dts (renamed from arch/arm/boot/dts/uniphier-ph1-pro4-sanji.dts)11
-rw-r--r--arch/arm/boot/dts/uniphier-pro4.dtsi (renamed from arch/arm/boot/dts/uniphier-ph1-pro4.dtsi)63
-rw-r--r--arch/arm/boot/dts/uniphier-pro5.dtsi (renamed from arch/arm/boot/dts/uniphier-ph1-pro5.dtsi)58
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2-gentil.dts (renamed from arch/arm/boot/dts/uniphier-proxstream2-gentil.dts)12
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2-vodka.dts (renamed from arch/arm/boot/dts/uniphier-proxstream2-vodka.dts)11
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2.dtsi (renamed from arch/arm/boot/dts/uniphier-proxstream2.dtsi)62
-rw-r--r--arch/arm/boot/dts/uniphier-sld3-ref.dts (renamed from arch/arm/boot/dts/uniphier-ph1-sld3-ref.dts)11
-rw-r--r--arch/arm/boot/dts/uniphier-sld3.dtsi (renamed from arch/arm/boot/dts/uniphier-ph1-sld3.dtsi)75
-rw-r--r--arch/arm/boot/dts/uniphier-sld8-ref.dts (renamed from arch/arm/boot/dts/uniphier-ph1-sld8-ref.dts)11
-rw-r--r--arch/arm/boot/dts/uniphier-sld8.dtsi (renamed from arch/arm/boot/dts/uniphier-ph1-sld8.dtsi)59
-rw-r--r--arch/arm/boot/dts/vf-colibri-eval-v3.dtsi13
-rw-r--r--arch/arm/boot/dts/vf610m4.dtsi1
-rw-r--r--arch/arm/common/bL_switcher_dummy_if.c14
-rw-r--r--arch/arm/common/sa1111.c434
-rw-r--r--arch/arm/configs/colibri_pxa270_defconfig1
-rw-r--r--arch/arm/configs/davinci_all_defconfig66
-rw-r--r--arch/arm/configs/exynos_defconfig9
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig3
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig17
-rw-r--r--arch/arm/configs/integrator_defconfig13
-rw-r--r--arch/arm/configs/lpd270_defconfig1
-rw-r--r--arch/arm/configs/multi_v4t_defconfig4
-rw-r--r--arch/arm/configs/multi_v7_defconfig11
-rw-r--r--arch/arm/configs/mvebu_v5_defconfig5
-rw-r--r--arch/arm/configs/mvebu_v7_defconfig3
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/configs/pxa255-idp_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig2
-rw-r--r--arch/arm/configs/s3c2410_defconfig1
-rw-r--r--arch/arm/configs/trizeps4_defconfig2
-rw-r--r--arch/arm/include/asm/arch_gicv3.h93
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/include/asm/cacheflush.h17
-rw-r--r--arch/arm/include/asm/cachetype.h39
-rw-r--r--arch/arm/include/asm/cp15.h15
-rw-r--r--arch/arm/include/asm/cputype.h37
-rw-r--r--arch/arm/include/asm/delay.h2
-rw-r--r--arch/arm/include/asm/flat.h5
-rw-r--r--arch/arm/include/asm/glue-cache.h4
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h19
-rw-r--r--arch/arm/include/asm/hardware/cache-uniphier.h20
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h4
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm/include/asm/irq.h5
-rw-r--r--arch/arm/include/asm/kvm_asm.h7
-rw-r--r--arch/arm/include/asm/kvm_emulate.h35
-rw-r--r--arch/arm/include/asm/kvm_host.h17
-rw-r--r--arch/arm/include/asm/kvm_hyp.h18
-rw-r--r--arch/arm/include/asm/kvm_mmu.h28
-rw-r--r--arch/arm/include/asm/memory.h15
-rw-r--r--arch/arm/include/asm/module.h6
-rw-r--r--arch/arm/include/asm/v7m.h22
-rw-r--r--arch/arm/include/debug/brcmstb.S145
-rw-r--r--arch/arm/include/uapi/asm/kvm.h7
-rw-r--r--arch/arm/kernel/cpuidle.c2
-rw-r--r--arch/arm/kernel/head-nommu.S16
-rw-r--r--arch/arm/kernel/module-plts.c243
-rw-r--r--arch/arm/kernel/module.lds3
-rw-r--r--arch/arm/kernel/setup.c26
-rw-r--r--arch/arm/kernel/smp.c15
-rw-r--r--arch/arm/kernel/vdso.c11
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S1
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/kvm/Makefile3
-rw-r--r--arch/arm/kvm/arm.c22
-rw-r--r--arch/arm/kvm/coproc.c35
-rw-r--r--arch/arm/kvm/emulate.c111
-rw-r--r--arch/arm/kvm/handle_exit.c49
-rw-r--r--arch/arm/kvm/hyp/Makefile1
-rw-r--r--arch/arm/kvm/hyp/entry.S31
-rw-r--r--arch/arm/kvm/hyp/hyp-entry.S16
-rw-r--r--arch/arm/kvm/hyp/switch.c25
-rw-r--r--arch/arm/kvm/hyp/tlb.c15
-rw-r--r--arch/arm/kvm/mmio.c6
-rw-r--r--arch/arm/kvm/mmu.c7
-rw-r--r--arch/arm/lib/delay.c2
-rw-r--r--arch/arm/mach-at91/Kconfig1
-rw-r--r--arch/arm/mach-axxia/Kconfig2
-rw-r--r--arch/arm/mach-bcm/Kconfig15
-rw-r--r--arch/arm/mach-bcm/brcmstb.c16
-rw-r--r--arch/arm/mach-clps711x/Makefile.boot0
-rw-r--r--arch/arm/mach-clps711x/board-autcpu12.c275
-rw-r--r--arch/arm/mach-clps711x/board-cdb89712.c147
-rw-r--r--arch/arm/mach-clps711x/board-clep7312.c45
-rw-r--r--arch/arm/mach-clps711x/board-edb7211.c188
-rw-r--r--arch/arm/mach-clps711x/board-p720t.c373
-rw-r--r--arch/arm/mach-clps711x/common.c65
-rw-r--r--arch/arm/mach-clps711x/common.h23
-rw-r--r--arch/arm/mach-clps711x/devices.c149
-rw-r--r--arch/arm/mach-clps711x/devices.h12
-rw-r--r--arch/arm/mach-cns3xxx/Kconfig1
-rw-r--r--arch/arm/mach-davinci/da850.c1
-rw-r--r--arch/arm/mach-davinci/da8xx-dt.c2
-rw-r--r--arch/arm/mach-exynos/Kconfig3
-rw-r--r--arch/arm/mach-exynos/exynos.c15
-rw-r--r--arch/arm/mach-exynos/include/mach/map.h5
-rw-r--r--arch/arm/mach-footbridge/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-imx/Kconfig52
-rw-r--r--arch/arm/mach-imx/Makefile8
-rw-r--r--arch/arm/mach-imx/common.h7
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c2
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sx.c11
-rw-r--r--arch/arm/mach-imx/devices-imx1.h30
-rw-r--r--arch/arm/mach-imx/devices/Makefile1
-rw-r--r--arch/arm/mach-imx/devices/devices-common.h12
-rw-r--r--arch/arm/mach-imx/devices/platform-imx-fb.c5
-rw-r--r--arch/arm/mach-imx/devices/platform-imx-i2c.c5
-rw-r--r--arch/arm/mach-imx/devices/platform-imx-uart.c37
-rw-r--r--arch/arm/mach-imx/devices/platform-spi_imx.c9
-rw-r--r--arch/arm/mach-imx/hardware.h3
-rw-r--r--arch/arm/mach-imx/iomux-mx1.h155
-rw-r--r--arch/arm/mach-imx/iomux-mx3.h34
-rw-r--r--arch/arm/mach-imx/mach-apf9328.c148
-rw-r--r--arch/arm/mach-imx/mach-armadillo5x0.c39
-rw-r--r--arch/arm/mach-imx/mach-imx1.c (renamed from arch/arm/mach-imx/imx1-dt.c)23
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c24
-rw-r--r--arch/arm/mach-imx/mach-imx6ul.c3
-rw-r--r--arch/arm/mach-imx/mach-kzm_arm11_01.c15
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c13
-rw-r--r--arch/arm/mach-imx/mach-mx27_3ds.c30
-rw-r--r--arch/arm/mach-imx/mach-mx27ads.c13
-rw-r--r--arch/arm/mach-imx/mach-mx31_3ds.c39
-rw-r--r--arch/arm/mach-imx/mach-mx31ads.c18
-rw-r--r--arch/arm/mach-imx/mach-mx31lilly.c50
-rw-r--r--arch/arm/mach-imx/mach-mx31lite.c68
-rw-r--r--arch/arm/mach-imx/mach-mx31moboard.c23
-rw-r--r--arch/arm/mach-imx/mach-mx35_3ds.c15
-rw-r--r--arch/arm/mach-imx/mach-pca100.c26
-rw-r--r--arch/arm/mach-imx/mach-pcm037.c69
-rw-r--r--arch/arm/mach-imx/mach-pcm043.c10
-rw-r--r--arch/arm/mach-imx/mach-qong.c2
-rw-r--r--arch/arm/mach-imx/mach-scb9328.c143
-rw-r--r--arch/arm/mach-imx/mach-vpr200.c29
-rw-r--r--arch/arm/mach-imx/mm-imx1.c67
-rw-r--r--arch/arm/mach-imx/mx1.h172
-rw-r--r--arch/arm/mach-imx/mx31lilly-db.c20
-rw-r--r--arch/arm/mach-imx/mx31lite-db.c33
-rw-r--r--arch/arm/mach-imx/pm-imx6.c8
-rw-r--r--arch/arm/mach-integrator/Kconfig1
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c36
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c123
-rw-r--r--arch/arm/mach-keystone/Kconfig2
-rw-r--r--arch/arm/mach-mv78xx0/common.c9
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c77
-rw-r--r--arch/arm/mach-nomadik/Kconfig1
-rw-r--r--arch/arm/mach-omap1/board-h2-mmc.c2
-rw-r--r--arch/arm/mach-omap1/board-h2.c2
-rw-r--r--arch/arm/mach-omap1/board-h3-mmc.c2
-rw-r--r--arch/arm/mach-omap1/board-h3.c2
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c4
-rw-r--r--arch/arm/mach-omap1/board-innovator.c4
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c4
-rw-r--r--arch/arm/mach-omap1/board-sx1-mmc.c2
-rw-r--r--arch/arm/mach-omap1/devices.c10
-rw-r--r--arch/arm/mach-omap1/fb.c2
-rw-r--r--arch/arm/mach-omap1/include/mach/usb.h2
-rw-r--r--arch/arm/mach-omap1/mmc.h2
-rw-r--r--arch/arm/mach-omap1/usb.c6
-rw-r--r--arch/arm/mach-omap2/Kconfig12
-rw-r--r--arch/arm/mach-omap2/Makefile4
-rw-r--r--arch/arm/mach-omap2/board-flash.c10
-rw-r--r--arch/arm/mach-omap2/board-flash.h11
-rw-r--r--arch/arm/mach-omap2/board-ldp.c430
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c5
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c1312
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c67
-rw-r--r--arch/arm/mach-omap2/board-rx51.c141
-rw-r--r--arch/arm/mach-omap2/board-rx51.h11
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c3
-rw-r--r--arch/arm/mach-omap2/devices.c7
-rw-r--r--arch/arm/mach-omap2/drm.c2
-rw-r--r--arch/arm/mach-omap2/fb.c2
-rw-r--r--arch/arm/mach-omap2/gpmc-smsc911x.h2
-rw-r--r--arch/arm/mach-omap2/hsmmc.c2
-rw-r--r--arch/arm/mach-omap2/hsmmc.h2
-rw-r--r--arch/arm/mach-omap2/twl-common.c3
-rw-r--r--arch/arm/mach-orion5x/common.c4
-rw-r--r--arch/arm/mach-orion5x/common.h2
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c4
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-ge-setup.c5
-rw-r--r--arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c6
-rw-r--r--arch/arm/mach-orion5x/wnr854t-setup.c4
-rw-r--r--arch/arm/mach-orion5x/wrt350n-v2-setup.c4
-rw-r--r--arch/arm/mach-pxa/Kconfig11
-rw-r--r--arch/arm/mach-pxa/Makefile3
-rw-r--r--arch/arm/mach-pxa/corgi_pm.c13
-rw-r--r--arch/arm/mach-pxa/devices.h1
-rw-r--r--arch/arm/mach-pxa/generic.h2
-rw-r--r--arch/arm/mach-pxa/hx4700.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/dma.h1
-rw-r--r--arch/arm/mach-pxa/magician.c6
-rw-r--r--arch/arm/mach-pxa/pm.c5
-rw-r--r--arch/arm/mach-pxa/pxa-dt.c35
-rw-r--r--arch/arm/mach-pxa/pxa25x.c26
-rw-r--r--arch/arm/mach-pxa/pxa27x.c14
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c11
-rw-r--r--arch/arm/mach-pxa/pxa_cplds_irqs.c24
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c2
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.h2
-rw-r--r--arch/arm/mach-pxa/spitz_pm.c9
-rw-r--r--arch/arm/mach-qcom/Makefile1
-rw-r--r--arch/arm/mach-realview/Kconfig72
-rw-r--r--arch/arm/mach-realview/Makefile12
-rw-r--r--arch/arm/mach-realview/board-eb.h94
-rw-r--r--arch/arm/mach-realview/board-pb1176.h81
-rw-r--r--arch/arm/mach-realview/board-pb11mp.h96
-rw-r--r--arch/arm/mach-realview/board-pba8.h71
-rw-r--r--arch/arm/mach-realview/board-pbx.h106
-rw-r--r--arch/arm/mach-realview/core.c405
-rw-r--r--arch/arm/mach-realview/core.h58
-rw-r--r--arch/arm/mach-realview/hardware.h40
-rw-r--r--arch/arm/mach-realview/hotplug.h1
-rw-r--r--arch/arm/mach-realview/irqs-eb.h114
-rw-r--r--arch/arm/mach-realview/irqs-pb1176.h77
-rw-r--r--arch/arm/mach-realview/irqs-pb11mp.h97
-rw-r--r--arch/arm/mach-realview/irqs-pba8.h71
-rw-r--r--arch/arm/mach-realview/irqs-pbx.h87
-rw-r--r--arch/arm/mach-realview/platform.h247
-rw-r--r--arch/arm/mach-realview/platsmp-dt.c3
-rw-r--r--arch/arm/mach-realview/platsmp.c86
-rw-r--r--arch/arm/mach-realview/realview_eb.c492
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c395
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c385
-rw-r--r--arch/arm/mach-realview/realview_pba8.c307
-rw-r--r--arch/arm/mach-realview/realview_pbx.c402
-rw-r--r--arch/arm/mach-rpc/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-s3c24xx/common.c55
-rw-r--r--arch/arm/mach-s3c24xx/mach-mini2440.c20
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c3
-rw-r--r--arch/arm/mach-sa1100/h3xxx.c2
-rw-r--r--arch/arm/mach-sa1100/include/mach/hardware.h20
-rw-r--r--arch/arm/mach-sa1100/jornada720.c16
-rw-r--r--arch/arm/mach-shmobile/Kconfig1
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7790.c2
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7791.c2
-rw-r--r--arch/arm/mach-spear/Kconfig1
-rw-r--r--arch/arm/mach-sunxi/Kconfig1
-rw-r--r--arch/arm/mach-sunxi/sunxi.c1
-rw-r--r--arch/arm/mach-uniphier/Makefile2
-rw-r--r--arch/arm/mach-uniphier/headsmp.S43
-rw-r--r--arch/arm/mach-uniphier/platsmp.c209
-rw-r--r--arch/arm/mach-versatile/Kconfig1
-rw-r--r--arch/arm/mm/Kconfig17
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/cache-l2x0-pmu.c584
-rw-r--r--arch/arm/mm/cache-l2x0.c11
-rw-r--r--arch/arm/mm/cache-uniphier.c63
-rw-r--r--arch/arm/mm/cache-v7m.S453
-rw-r--r--arch/arm/mm/dma-mapping.c67
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/mm/proc-macros.S16
-rw-r--r--arch/arm/mm/proc-v7m.S108
-rw-r--r--arch/arm/plat-orion/common.c85
-rw-r--r--arch/arm/plat-orion/include/plat/common.h10
-rw-r--r--arch/arm/plat-pxa/Makefile2
-rw-r--r--arch/arm/plat-pxa/dma.c386
-rw-r--r--arch/arm/plat-pxa/include/plat/dma.h100
-rw-r--r--arch/arm/plat-samsung/include/plat/map-s5p.h4
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/Kconfig.platforms30
-rw-r--r--arch/arm64/boot/dts/Makefile1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts11
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi35
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi42
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi329
-rw-r--r--arch/arm64/boot/dts/apm/apm-mustang.dts1
-rw-r--r--arch/arm64/boot/dts/apm/apm-shadowcat.dtsi135
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi105
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi8
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7.dtsi13
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts14
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts8
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi7
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi18
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts86
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi61
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip05.dtsi5
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi180
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip06-d03.dts28
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip06.dtsi365
-rw-r--r--arch/arm64/boot/dts/marvell/Makefile1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8020.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-db.dts150
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi14
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi249
-rw-r--r--arch/arm64/boot/dts/marvell/berlin4ct.dtsi12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts38
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi77
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-smaug.dts120
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi79
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile1
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi48
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi82
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi39
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dts (renamed from arch/arm/mach-qcom/board.c)24
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi88
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi286
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi94
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts328
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts87
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795.dtsi276
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts16
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796.dtsi117
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts382
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-r88.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-evb.dts87
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi607
-rw-r--r--arch/arm64/boot/dts/socionext/Makefile4
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts100
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi338
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts (renamed from arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts)11
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi (renamed from arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi)94
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi100
-rw-r--r--arch/arm64/boot/dts/zte/Makefile5
-rw-r--r--arch/arm64/boot/dts/zte/zx296718-evb.dts64
-rw-r--r--arch/arm64/boot/dts/zte/zx296718.dtsi292
-rw-r--r--arch/arm64/configs/defconfig35
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h13
-rw-r--r--arch/arm64/include/asm/kvm_arm.h4
-rw-r--r--arch/arm64/include/asm/kvm_asm.h9
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h11
-rw-r--r--arch/arm64/include/asm/kvm_host.h12
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm64/kernel/acpi.c11
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm64/kvm/Kconfig4
-rw-r--r--arch/arm64/kvm/Makefile3
-rw-r--r--arch/arm64/kvm/handle_exit.c23
-rw-r--r--arch/arm64/kvm/hyp/Makefile2
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c4
-rw-r--r--arch/arm64/kvm/hyp/entry.S128
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S73
-rw-r--r--arch/arm64/kvm/hyp/switch.c84
-rw-r--r--arch/arm64/kvm/hyp/tlb.c13
-rw-r--r--arch/arm64/kvm/inject_fault.c12
-rw-r--r--arch/avr32/kernel/traps.c3
-rw-r--r--arch/avr32/kernel/vmlinux.lds.S1
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/blackfin/Kconfig2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S1
-rw-r--r--arch/blackfin/mach-bf561/coreb.c13
-rw-r--r--arch/c6x/kernel/vmlinux.lds.S1
-rw-r--r--arch/cris/kernel/vmlinux.lds.S1
-rw-r--r--arch/frv/kernel/vmlinux.lds.S1
-rw-r--r--arch/h8300/kernel/vmlinux.lds.S1
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/include/asm/atomic.h16
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S1
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/Kconfig.cpu6
-rw-r--r--arch/m68k/coldfire/clk.c4
-rw-r--r--arch/m68k/coldfire/head.S8
-rw-r--r--arch/m68k/coldfire/m528x.c8
-rw-r--r--arch/m68k/coldfire/m53xx.c8
-rw-r--r--arch/m68k/coldfire/m54xx.c40
-rw-r--r--arch/m68k/include/asm/bootinfo.h6
-rw-r--r--arch/m68k/include/asm/m5441xsim.h4
-rw-r--r--arch/m68k/include/asm/m54xxacr.h2
-rw-r--r--arch/m68k/include/asm/m54xxsim.h4
-rw-r--r--arch/m68k/include/asm/mcfmmu.h1
-rw-r--r--arch/m68k/include/asm/nettel.h2
-rw-r--r--arch/m68k/include/uapi/asm/bootinfo.h1
-rw-r--r--arch/m68k/kernel/Makefile1
-rw-r--r--arch/m68k/kernel/process.c56
-rw-r--r--arch/m68k/kernel/setup_mm.c6
-rw-r--r--arch/m68k/kernel/setup_no.c103
-rw-r--r--arch/m68k/kernel/uboot.c107
-rw-r--r--arch/m68k/kernel/vmlinux-nommu.lds1
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds1
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds1
-rw-r--r--arch/m68k/mm/mcfmmu.c31
-rw-r--r--arch/metag/kernel/vmlinux.lds.S1
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S1
-rw-r--r--arch/microblaze/pci/pci-common.c4
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/irq.h5
-rw-r--r--arch/mips/include/asm/kvm_host.h63
-rw-r--r--arch/mips/include/asm/pgtable.h2
-rw-r--r--arch/mips/include/asm/uprobes.h12
-rw-r--r--arch/mips/kernel/process.c11
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/kvm/emulate.c78
-rw-r--r--arch/mips/kvm/mips.c40
-rw-r--r--arch/mips/kvm/mmu.c16
-rw-r--r--arch/mips/kvm/trap_emul.c18
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S1
-rwxr-xr-xarch/nios2/boot/dts/10m50_devboard.dts1
-rw-r--r--arch/nios2/kernel/vmlinux.lds.S1
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/parisc/Kconfig5
-rw-r--r--arch/parisc/include/asm/dwarf.h23
-rw-r--r--arch/parisc/include/asm/linkage.h12
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/asm/uaccess.h48
-rw-r--r--arch/parisc/kernel/entry.S46
-rw-r--r--arch/parisc/kernel/hpmc.S16
-rw-r--r--arch/parisc/kernel/inventory.c2
-rw-r--r--arch/parisc/kernel/pacache.S84
-rw-r--r--arch/parisc/kernel/real2.S24
-rw-r--r--arch/parisc/kernel/setup.c8
-rw-r--r--arch/parisc/kernel/smp.c4
-rw-r--r--arch/parisc/kernel/time.c6
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S3
-rw-r--r--arch/parisc/lib/fixup.S16
-rw-r--r--arch/parisc/lib/lusercopy.S8
-rw-r--r--arch/parisc/lib/memcpy.c11
-rw-r--r--arch/parisc/mm/fault.c48
-rw-r--r--arch/parisc/mm/init.c126
-rw-r--r--arch/powerpc/Kconfig11
-rw-r--r--arch/powerpc/Makefile43
-rw-r--r--arch/powerpc/boot/Makefile86
-rw-r--r--arch/powerpc/boot/cuboot-c2k.c1
-rw-r--r--arch/powerpc/boot/decompress.c148
-rw-r--r--arch/powerpc/boot/fixup-headers.sed12
-rw-r--r--arch/powerpc/boot/gunzip_util.c204
-rw-r--r--arch/powerpc/boot/gunzip_util.h45
-rw-r--r--arch/powerpc/boot/main.c35
-rw-r--r--arch/powerpc/boot/ops.h3
-rw-r--r--arch/powerpc/boot/stdbool.h14
-rw-r--r--arch/powerpc/boot/stdint.h13
-rw-r--r--arch/powerpc/boot/types.h14
-rwxr-xr-xarch/powerpc/boot/wrapper61
-rw-r--r--arch/powerpc/boot/xz_config.h39
-rw-r--r--arch/powerpc/configs/powernv_defconfig19
-rw-r--r--arch/powerpc/configs/ppc64_defconfig19
-rw-r--r--arch/powerpc/configs/pseries_defconfig19
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h43
-rw-r--r--arch/powerpc/include/asm/atomic.h4
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h37
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h7
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h88
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h1
-rw-r--r--arch/powerpc/include/asm/cputable.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h148
-rw-r--r--arch/powerpc/include/asm/fadump.h4
-rw-r--r--arch/powerpc/include/asm/head-64.h393
-rw-r--r--arch/powerpc/include/asm/io.h29
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h10
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h39
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h90
-rw-r--r--arch/powerpc/include/asm/kvm_host.h124
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h28
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h3
-rw-r--r--arch/powerpc/include/asm/mmu.h5
-rw-r--r--arch/powerpc/include/asm/mmu_context.h4
-rw-r--r--arch/powerpc/include/asm/mmzone.h3
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h6
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/opal.h1
-rw-r--r--arch/powerpc/include/asm/parport.h2
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h7
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h49
-rw-r--r--arch/powerpc/include/asm/processor.h16
-rw-r--r--arch/powerpc/include/asm/reg.h6
-rw-r--r--arch/powerpc/include/asm/signal.h2
-rw-r--r--arch/powerpc/include/asm/tm.h5
-rw-r--r--arch/powerpc/kernel/Makefile15
-rw-r--r--arch/powerpc/kernel/asm-offsets.c14
-rw-r--r--arch/powerpc/kernel/cputable.c19
-rw-r--r--arch/powerpc/kernel/eeh.c4
-rw-r--r--arch/powerpc/kernel/eeh_driver.c10
-rw-r--r--arch/powerpc/kernel/eeh_pe.c1
-rw-r--r--arch/powerpc/kernel/entry_32.S1
-rw-r--r--arch/powerpc/kernel/entry_64.S21
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2024
-rw-r--r--arch/powerpc/kernel/fadump.c13
-rw-r--r--arch/powerpc/kernel/fpu.S26
-rw-r--r--arch/powerpc/kernel/head_32.S3
-rw-r--r--arch/powerpc/kernel/head_64.S53
-rw-r--r--arch/powerpc/kernel/head_8xx.S1
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c9
-rw-r--r--arch/powerpc/kernel/ibmebus.c2
-rw-r--r--arch/powerpc/kernel/irq.c17
-rw-r--r--arch/powerpc/kernel/legacy_serial.c14
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c75
-rw-r--r--arch/powerpc/kernel/misc_32.S4
-rw-r--r--arch/powerpc/kernel/misc_64.S30
-rw-r--r--arch/powerpc/kernel/module.c2
-rw-r--r--arch/powerpc/kernel/nvram_64.c10
-rw-r--r--arch/powerpc/kernel/pci-common.c5
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c2
-rw-r--r--arch/powerpc/kernel/process.c181
-rw-r--r--arch/powerpc/kernel/prom_init.c82
-rw-r--r--arch/powerpc/kernel/ptrace.c353
-rw-r--r--arch/powerpc/kernel/signal.c41
-rw-r--r--arch/powerpc/kernel/signal.h18
-rw-r--r--arch/powerpc/kernel/signal_32.c122
-rw-r--r--arch/powerpc/kernel/signal_64.c207
-rw-r--r--arch/powerpc/kernel/syscalls.c1
-rw-r--r--arch/powerpc/kernel/time.c1
-rw-r--r--arch/powerpc/kernel/tm.S94
-rw-r--r--arch/powerpc/kernel/traps.c83
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile6
-rw-r--r--arch/powerpc/kernel/vdso64/datapage.S2
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S2
-rw-r--r--arch/powerpc/kernel/vector.S25
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S56
-rw-r--r--arch/powerpc/kvm/Kconfig3
-rw-r--r--arch/powerpc/kvm/Makefile19
-rw-r--r--arch/powerpc/kvm/book3s.c13
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c533
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c156
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c120
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S197
-rw-r--r--arch/powerpc/kvm/book3s_pr.c10
-rw-r--r--arch/powerpc/kvm/book3s_xics.c57
-rw-r--r--arch/powerpc/kvm/book3s_xics.h2
-rw-r--r--arch/powerpc/kvm/booke.c2
-rw-r--r--arch/powerpc/kvm/e500_mmu.c73
-rw-r--r--arch/powerpc/kvm/powerpc.c61
-rw-r--r--arch/powerpc/kvm/trace_hv.h22
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/mem_64.S2
-rw-r--r--arch/powerpc/mm/Makefile7
-rw-r--r--arch/powerpc/mm/fault.c4
-rw-r--r--arch/powerpc/mm/hash_native_64.c42
-rw-r--r--arch/powerpc/mm/hash_utils_64.c136
-rw-r--r--arch/powerpc/mm/hugetlbpage.c7
-rw-r--r--arch/powerpc/mm/init_32.c2
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c81
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c11
-rw-r--r--arch/powerpc/mm/pgtable-radix.c40
-rw-r--r--arch/powerpc/mm/pgtable.c2
-rw-r--r--arch/powerpc/mm/slb_low.S8
-rw-r--r--arch/powerpc/mm/tlb-radix.c24
-rw-r--r--arch/powerpc/net/bpf_jit.h2
-rw-r--r--arch/powerpc/net/bpf_jit64.h26
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c264
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c2
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c4
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/hv-gpci.c2
-rw-r--r--arch/powerpc/perf/power7-pmu.c2
-rw-r--r--arch/powerpc/perf/power8-pmu.c2
-rw-r--r--arch/powerpc/perf/power9-pmu.c2
-rw-r--r--arch/powerpc/platforms/44x/warp.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c8
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_lpbfifo.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c2
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c4
-rw-r--r--arch/powerpc/platforms/85xx/common.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c6
-rw-r--r--arch/powerpc/platforms/86xx/pic.c4
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c2
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c6
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c18
-rw-r--r--arch/powerpc/platforms/cell/iommu.c4
-rw-r--r--arch/powerpc/platforms/cell/pmu.c4
-rw-r--r--arch/powerpc/platforms/cell/ras.c2
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c18
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c16
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c9
-rw-r--r--arch/powerpc/platforms/chrp/setup.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c4
-rw-r--r--arch/powerpc/platforms/maple/pci.c6
-rw-r--r--arch/powerpc/platforms/maple/setup.c4
-rw-r--r--arch/powerpc/platforms/pasemi/Kconfig10
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c4
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c2
-rw-r--r--arch/powerpc/platforms/pasemi/misc.c2
-rw-r--r--arch/powerpc/platforms/pasemi/msi.c4
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c8
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c6
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c4
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c2
-rw-r--r--arch/powerpc/platforms/powermac/pic.c6
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c18
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S1
-rw-r--r--arch/powerpc/platforms/powernv/pci-cxl.c4
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c83
-rw-r--r--arch/powerpc/platforms/powernv/pci.c8
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c2
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c10
-rw-r--r--arch/powerpc/platforms/ps3/smp.c4
-rw-r--r--arch/powerpc/platforms/ps3/spu.c4
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c5
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c2
-rw-r--r--arch/powerpc/platforms/pseries/msi.c6
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c4
-rw-r--r--arch/powerpc/sysdev/axonram.c6
-rw-r--r--arch/powerpc/sysdev/cpm1.c8
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c4
-rw-r--r--arch/powerpc/sysdev/fsl_gtm.c2
-rw-r--r--arch/powerpc/sysdev/fsl_mpic_err.c6
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c12
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.c8
-rw-r--r--arch/powerpc/sysdev/i8259.c4
-rw-r--r--arch/powerpc/sysdev/ipic.c4
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c14
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c4
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c4
-rw-r--r--arch/powerpc/sysdev/mv64x60_pic.c2
-rw-r--r--arch/powerpc/sysdev/pmi.c2
-rw-r--r--arch/powerpc/sysdev/ppc4xx_hsta_msi.c6
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c6
-rw-r--r--arch/powerpc/sysdev/ppc4xx_soc.c2
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c2
-rw-r--r--arch/powerpc/sysdev/uic.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c6
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c6
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c8
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c2
-rw-r--r--arch/powerpc/xmon/spr_access.S4
-rw-r--r--arch/s390/Kconfig5
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/boot/compressed/Makefile1
-rw-r--r--arch/s390/configs/default_defconfig11
-rw-r--r--arch/s390/configs/gcov_defconfig13
-rw-r--r--arch/s390/configs/performance_defconfig13
-rw-r--r--arch/s390/crypto/aes_s390.c590
-rw-r--r--arch/s390/crypto/crc32-vx.c2
-rw-r--r--arch/s390/crypto/des_s390.c330
-rw-r--r--arch/s390/crypto/ghash_s390.c20
-rw-r--r--arch/s390/crypto/prng.c101
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/crypto/sha512_s390.c2
-rw-r--r--arch/s390/crypto/sha_common.c23
-rw-r--r--arch/s390/include/asm/cpacf.h178
-rw-r--r--arch/s390/include/asm/facilities_src.h24
-rw-r--r--arch/s390/include/asm/fpu/api.h32
-rw-r--r--arch/s390/include/asm/kvm_host.h136
-rw-r--r--arch/s390/include/asm/lowcore.h3
-rw-r--r--arch/s390/include/asm/mmu.h1
-rw-r--r--arch/s390/include/asm/mmu_context.h1
-rw-r--r--arch/s390/include/asm/pci.h5
-rw-r--r--arch/s390/include/asm/pgtable.h74
-rw-r--r--arch/s390/include/asm/tlbflush.h72
-rw-r--r--arch/s390/include/asm/uprobes.h10
-rw-r--r--arch/s390/include/asm/vx-insn.h148
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/compat_linux.c4
-rw-r--r--arch/s390/kernel/crash_dump.c4
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/fpu.c317
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/nmi.c67
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/time.c86
-rw-r--r--arch/s390/kernel/traps.c3
-rw-r--r--arch/s390/kernel/vdso32/Makefile3
-rw-r--r--arch/s390/kernel/vdso64/Makefile3
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kvm/gaccess.c37
-rw-r--r--arch/s390/kvm/guestdbg.c59
-rw-r--r--arch/s390/kvm/intercept.c1
-rw-r--r--arch/s390/kvm/interrupt.c98
-rw-r--r--arch/s390/kvm/kvm-s390.c113
-rw-r--r--arch/s390/kvm/kvm-s390.h14
-rw-r--r--arch/s390/kvm/priv.c21
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/gmap.c15
-rw-r--r--arch/s390/mm/pageattr.c4
-rw-r--r--arch/s390/mm/pgtable.c18
-rw-r--r--arch/s390/pci/pci.c9
-rw-r--r--arch/s390/pci/pci_dma.c246
-rw-r--r--arch/score/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/kernel/ftrace.c4
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/hypervisor.h1
-rw-r--r--arch/sparc/include/asm/irq_64.h5
-rw-r--r--arch/sparc/kernel/kprobes.c2
-rw-r--r--arch/sparc/kernel/pci_sun4v.c52
-rw-r--r--arch/sparc/kernel/process_64.c10
-rw-r--r--arch/sparc/kernel/traps_64.c2
-rw-r--r--arch/sparc/kernel/unaligned_64.c2
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S1
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/sparc/prom/ranges.c52
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/include/asm/irq.h5
-rw-r--r--arch/tile/kernel/entry.S2
-rw-r--r--arch/tile/kernel/pmc.c3
-rw-r--r--arch/tile/kernel/process.c73
-rw-r--r--arch/tile/kernel/traps.c9
-rw-r--r--arch/tile/kernel/vmlinux.lds.S1
-rw-r--r--arch/um/drivers/harddog_kern.c25
-rw-r--r--arch/um/kernel/dyn.lds.S1
-rw-r--r--arch/um/kernel/uml.lds.S1
-rw-r--r--arch/unicore32/kernel/vmlinux.lds.S1
-rw-r--r--arch/x86/Kconfig15
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/entry/entry_64.S2
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c2
-rw-r--r--arch/x86/include/asm/irq.h5
-rw-r--r--arch/x86/include/asm/irqflags.h12
-rw-r--r--arch/x86/include/asm/kvm_host.h78
-rw-r--r--arch/x86/include/asm/pci.h14
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/pvclock.h5
-rw-r--r--arch/x86/include/asm/xen/events.h11
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/cstate.c2
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c18
-rw-r--r--arch/x86/kernel/livepatch.c65
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/pvclock.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S1
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/cpuid.c3
-rw-r--r--arch/x86/kvm/debugfs.c69
-rw-r--r--arch/x86/kvm/hyperv.c157
-rw-r--r--arch/x86/kvm/hyperv.h3
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/mmu.c12
-rw-r--r--arch/x86/kvm/svm.c417
-rw-r--r--arch/x86/kvm/vmx.c207
-rw-r--r--arch/x86/kvm/x86.c171
-rw-r--r--arch/x86/kvm/x86.h6
-rw-r--r--arch/x86/pci/Makefile2
-rw-r--r--arch/x86/pci/common.c7
-rw-r--r--arch/x86/pci/xen.c2
-rw-r--r--arch/x86/xen/enlighten.c94
-rw-r--r--arch/x86/xen/grant-table.c2
-rw-r--r--arch/x86/xen/platform-pci-unplug.c2
-rw-r--r--arch/x86/xen/pmu.c7
-rw-r--r--arch/x86/xen/smp.c53
-rw-r--r--arch/x86/xen/smp.h13
-rw-r--r--arch/x86/xen/time.c5
-rw-r--r--arch/xtensa/Kconfig95
-rw-r--r--arch/xtensa/boot/boot-elf/boot.lds.S2
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S7
-rw-r--r--arch/xtensa/boot/boot-uboot/Makefile10
-rw-r--r--arch/xtensa/boot/dts/csp.dts54
-rw-r--r--arch/xtensa/boot/dts/xtfpga.dtsi15
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/cadence_csp_defconfig122
-rw-r--r--arch/xtensa/configs/common_defconfig609
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/iss_defconfig728
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig2
-rw-r--r--arch/xtensa/include/asm/bitops.h2
-rw-r--r--arch/xtensa/include/asm/cacheasm.h11
-rw-r--r--arch/xtensa/include/asm/fixmap.h5
-rw-r--r--arch/xtensa/include/asm/highmem.h5
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h44
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h74
-rw-r--r--arch/xtensa/include/asm/page.h27
-rw-r--r--arch/xtensa/include/asm/pgtable.h7
-rw-r--r--arch/xtensa/include/asm/platform.h6
-rw-r--r--arch/xtensa/include/asm/processor.h2
-rw-r--r--arch/xtensa/include/asm/sysmem.h21
-rw-r--r--arch/xtensa/include/asm/vectors.h67
-rw-r--r--arch/xtensa/include/uapi/asm/types.h3
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h15
-rw-r--r--arch/xtensa/kernel/entry.S5
-rw-r--r--arch/xtensa/kernel/head.S2
-rw-r--r--arch/xtensa/kernel/setup.c188
-rw-r--r--arch/xtensa/kernel/time.c40
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S7
-rw-r--r--arch/xtensa/mm/init.c279
-rw-r--r--arch/xtensa/platforms/iss/include/platform/simcall.h5
-rw-r--r--arch/xtensa/platforms/iss/setup.c28
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c1
-rw-r--r--arch/xtensa/platforms/xt2000/setup.c21
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c116
-rw-r--r--arch/xtensa/variants/csp/include/variant/core.h575
-rw-r--r--arch/xtensa/variants/csp/include/variant/tie-asm.h194
-rw-r--r--arch/xtensa/variants/csp/include/variant/tie.h161
-rw-r--r--block/Kconfig6
-rw-r--r--block/Makefile4
-rw-r--r--block/bio.c5
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-flush.c6
-rw-r--r--block/blk-mq-cpu.c67
-rw-r--r--block/blk-mq-cpumap.c25
-rw-r--r--block/blk-mq-pci.c47
-rw-r--r--block/blk-mq-sysfs.c40
-rw-r--r--block/blk-mq-tag.c514
-rw-r--r--block/blk-mq-tag.h44
-rw-r--r--block/blk-mq.c389
-rw-r--r--block/blk-mq.h28
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk.h11
-rw-r--r--block/cfq-iosched.c13
-rw-r--r--crypto/async_tx/async_pq.c8
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/processor_idle.c5
-rw-r--r--drivers/acpi/spcr.c111
-rw-r--r--drivers/amba/bus.c5
-rw-r--r--drivers/android/binder.c7
-rw-r--r--drivers/atm/eni.c5
-rw-r--r--drivers/atm/fore200e.c6
-rw-r--r--drivers/atm/he.c10
-rw-r--r--drivers/atm/iphase.c19
-rw-r--r--drivers/atm/nicstar.c15
-rw-r--r--drivers/atm/zatm.c16
-rw-r--r--drivers/base/Kconfig10
-rw-r--r--drivers/base/attribute_container.c2
-rw-r--r--drivers/base/core.c39
-rw-r--r--drivers/base/cpu.c11
-rw-r--r--drivers/base/dd.c33
-rw-r--r--drivers/base/dma-coherent.c10
-rw-r--r--drivers/base/dma-mapping.c11
-rw-r--r--drivers/base/memory.c5
-rw-r--r--drivers/base/pinctrl.c12
-rw-r--r--drivers/base/platform.c18
-rw-r--r--drivers/base/regmap/internal.h5
-rw-r--r--drivers/base/regmap/regmap-debugfs.c16
-rw-r--r--drivers/base/regmap/regmap.c64
-rw-r--r--drivers/base/soc.c9
-rw-r--r--drivers/bcma/driver_chipcommon.c32
-rw-r--r--drivers/bcma/main.c6
-rw-r--r--drivers/block/loop.c1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c3
-rw-r--r--drivers/block/nbd.c410
-rw-r--r--drivers/block/null_blk.c129
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/block/virtio_blk.c1
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/bluetooth/Kconfig23
-rw-r--r--drivers/bluetooth/Makefile2
-rw-r--r--drivers/bluetooth/bcm203x.c4
-rw-r--r--drivers/bluetooth/btqca.c8
-rw-r--r--drivers/bluetooth/btqcomsmd.c182
-rw-r--r--drivers/bluetooth/btrtl.c107
-rw-r--r--drivers/bluetooth/btusb.c14
-rw-r--r--drivers/bluetooth/btwilink.c4
-rw-r--r--drivers/bluetooth/hci_bcm.c2
-rw-r--r--drivers/bluetooth/hci_bcsp.c128
-rw-r--r--drivers/bluetooth/hci_intel.c6
-rw-r--r--drivers/bluetooth/hci_ldisc.c34
-rw-r--r--drivers/bluetooth/hci_mrvl.c387
-rw-r--r--drivers/bluetooth/hci_qca.c2
-rw-r--r--drivers/bluetooth/hci_uart.h9
-rw-r--r--drivers/bluetooth/hci_vhci.c16
-rw-r--r--drivers/bus/Kconfig14
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/qcom-ebi2.c408
-rw-r--r--drivers/bus/tegra-aconnect.c22
-rw-r--r--drivers/char/bfin-otp.c40
-rw-r--r--drivers/char/hw_random/pasemi-rng.c2
-rw-r--r--drivers/char/mem.c6
-rw-r--r--drivers/char/mwave/3780i.c64
-rw-r--r--drivers/char/mwave/3780i.h8
-rw-r--r--drivers/char/mwave/mwavedd.c42
-rw-r--r--drivers/char/mwave/mwavedd.h14
-rw-r--r--drivers/char/mwave/smapi.c4
-rw-r--r--drivers/char/mwave/smapi.h6
-rw-r--r--drivers/char/mwave/tp3780i.c52
-rw-r--r--drivers/char/ppdev.c25
-rw-r--r--drivers/char/snsc.c7
-rw-r--r--drivers/char/tile-srom.c28
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c2
-rw-r--r--drivers/char/tpm/tpm-dev.c2
-rw-r--r--drivers/char/tpm/tpm-interface.c84
-rw-r--r--drivers/char/tpm/tpm-sysfs.c4
-rw-r--r--drivers/char/tpm/tpm.h46
-rw-r--r--drivers/char/tpm/tpm2-cmd.c121
-rw-r--r--drivers/char/tpm/tpm_crb.c50
-rw-r--r--drivers/char/tpm/tpm_tis_core.c24
-rw-r--r--drivers/char/ttyprintk.c69
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/char/xillybus/xillybus_core.c4
-rw-r--r--drivers/clk/Kconfig20
-rw-r--r--drivers/clk/Makefile5
-rw-r--r--drivers/clk/at91/clk-generated.c30
-rw-r--r--drivers/clk/at91/clk-h32mx.c8
-rw-r--r--drivers/clk/at91/clk-main.c88
-rw-r--r--drivers/clk/at91/clk-master.c21
-rw-r--r--drivers/clk/at91/clk-peripheral.c39
-rw-r--r--drivers/clk/at91/clk-pll.c21
-rw-r--r--drivers/clk/at91/clk-plldiv.c24
-rw-r--r--drivers/clk/at91/clk-programmable.c22
-rw-r--r--drivers/clk/at91/clk-slow.c375
-rw-r--r--drivers/clk/at91/clk-smd.c22
-rw-r--r--drivers/clk/at91/clk-system.c22
-rw-r--r--drivers/clk/at91/clk-usb.c69
-rw-r--r--drivers/clk/at91/clk-utmi.c22
-rw-r--r--drivers/clk/at91/sckc.c464
-rw-r--r--drivers/clk/at91/sckc.h22
-rw-r--r--drivers/clk/axis/clk-artpec6.c4
-rw-r--r--drivers/clk/bcm/Kconfig30
-rw-r--r--drivers/clk/bcm/Makefile8
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c28
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c146
-rw-r--r--drivers/clk/bcm/clk-bcm53573-ilp.c148
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c80
-rw-r--r--drivers/clk/bcm/clk-kona.c9
-rw-r--r--drivers/clk/bcm/clk-kona.h7
-rw-r--r--drivers/clk/berlin/berlin2-avpll.c12
-rw-r--r--drivers/clk/berlin/berlin2-avpll.h8
-rw-r--r--drivers/clk/berlin/berlin2-div.c4
-rw-r--r--drivers/clk/berlin/berlin2-div.h4
-rw-r--r--drivers/clk/berlin/berlin2-pll.c6
-rw-r--r--drivers/clk/berlin/berlin2-pll.h9
-rw-r--r--drivers/clk/berlin/bg2.c98
-rw-r--r--drivers/clk/berlin/bg2q.c39
-rw-r--r--drivers/clk/clk-asm9260.c31
-rw-r--r--drivers/clk/clk-axi-clkgen.c12
-rw-r--r--drivers/clk/clk-axm5516.c39
-rw-r--r--drivers/clk/clk-cdce706.c40
-rw-r--r--drivers/clk/clk-cdce925.c42
-rw-r--r--drivers/clk/clk-clps711x.c78
-rw-r--r--drivers/clk/clk-cs2000-cp.c16
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/clk-efm32gg.c63
-rw-r--r--drivers/clk/clk-fixed-factor.c74
-rw-r--r--drivers/clk/clk-fixed-rate.c72
-rw-r--r--drivers/clk/clk-ls1x.c161
-rw-r--r--drivers/clk/clk-max-gen.c194
-rw-r--r--drivers/clk/clk-max-gen.h32
-rw-r--r--drivers/clk/clk-max77686.c262
-rw-r--r--drivers/clk/clk-max77802.c96
-rw-r--r--drivers/clk/clk-mb86s7x.c16
-rw-r--r--drivers/clk/clk-moxart.c22
-rw-r--r--drivers/clk/clk-nspire.c19
-rw-r--r--drivers/clk/clk-palmas.c12
-rw-r--r--drivers/clk/clk-pwm.c9
-rw-r--r--drivers/clk/clk-qoriq.c6
-rw-r--r--drivers/clk/clk-rk808.c44
-rw-r--r--drivers/clk/clk-scpi.c33
-rw-r--r--drivers/clk/clk-si514.c11
-rw-r--r--drivers/clk/clk-si5351.c71
-rw-r--r--drivers/clk/clk-si570.c13
-rw-r--r--drivers/clk/clk-twl6040.c87
-rw-r--r--drivers/clk/clk-vt8500.c22
-rw-r--r--drivers/clk/clk-wm831x.c21
-rw-r--r--drivers/clk/clk-xgene.c221
-rw-r--r--drivers/clk/clk.c49
-rw-r--r--drivers/clk/h8300/clk-div.c10
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c12
-rw-r--r--drivers/clk/imx/clk-imx1.c46
-rw-r--r--drivers/clk/imx/clk-imx35.c32
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c20
-rw-r--r--drivers/clk/imx/clk-imx6q.c46
-rw-r--r--drivers/clk/imx/clk-imx7d.c117
-rw-r--r--drivers/clk/imx/clk.h9
-rw-r--r--drivers/clk/loongson1/Makefile3
-rw-r--r--drivers/clk/loongson1/clk-loongson1b.c122
-rw-r--r--drivers/clk/loongson1/clk-loongson1c.c97
-rw-r--r--drivers/clk/loongson1/clk.c43
-rw-r--r--drivers/clk/loongson1/clk.h19
-rw-r--r--drivers/clk/mediatek/Kconfig21
-rw-r--r--drivers/clk/mediatek/Makefile6
-rw-r--r--drivers/clk/mediatek/clk-gate.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c4
-rw-r--r--drivers/clk/mediatek/clk-mtk.c12
-rw-r--r--drivers/clk/mediatek/clk-pll.c2
-rw-r--r--drivers/clk/meson/Makefile4
-rw-r--r--drivers/clk/meson/clkc.h2
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c191
-rw-r--r--drivers/clk/meson/gxbb.c177
-rw-r--r--drivers/clk/meson/gxbb.h31
-rw-r--r--drivers/clk/meson/meson8b.c (renamed from drivers/clk/meson/meson8b-clkc.c)299
-rw-r--r--drivers/clk/meson/meson8b.h151
-rw-r--r--drivers/clk/microchip/clk-core.c6
-rw-r--r--drivers/clk/microchip/clk-pic32mzda.c1
-rw-r--r--drivers/clk/mmp/clk-mmp2.c1
-rw-r--r--drivers/clk/mvebu/Kconfig3
-rw-r--r--drivers/clk/mvebu/Makefile3
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c447
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c158
-rw-r--r--drivers/clk/mvebu/armada-37xx-xtal.c91
-rw-r--r--drivers/clk/mvebu/armada-39x.c2
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c31
-rw-r--r--drivers/clk/mvebu/orion.c70
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-creg.c3
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c1
-rw-r--r--drivers/clk/qcom/Kconfig19
-rw-r--r--drivers/clk/qcom/Makefile5
-rw-r--r--drivers/clk/qcom/clk-regmap.c5
-rw-r--r--drivers/clk/qcom/clk-regmap.h3
-rw-r--r--drivers/clk/qcom/common.c58
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c1
-rw-r--r--drivers/clk/qcom/gcc-mdm9615.c1727
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c17
-rw-r--r--drivers/clk/qcom/lcc-mdm9615.c580
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c18
-rw-r--r--drivers/clk/renesas/clk-mstp.c2
-rw-r--r--drivers/clk/renesas/clk-rz.c24
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c31
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-ddr.c154
-rw-r--r--drivers/clk/rockchip/clk-pll.c4
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c56
-rw-r--r--drivers/clk/rockchip/clk-rockchip.c7
-rw-r--r--drivers/clk/rockchip/clk.c11
-rw-r--r--drivers/clk/rockchip/clk.h35
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c84
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c350
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c61
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c37
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c9
-rw-r--r--drivers/clk/samsung/clk-pll.c154
-rw-r--r--drivers/clk/samsung/clk-pll.h2
-rw-r--r--drivers/clk/st/clk-flexgen.c63
-rw-r--r--drivers/clk/st/clkgen-fsyn.c481
-rw-r--r--drivers/clk/st/clkgen-mux.c748
-rw-r--r--drivers/clk/st/clkgen-pll.c472
-rw-r--r--drivers/clk/sunxi-ng/Kconfig39
-rw-r--r--drivers/clk/sunxi-ng/Makefile4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c1239
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.h72
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h63
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c737
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c780
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h66
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c23
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c133
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.h35
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c56
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.h68
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c44
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.h23
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c21
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c16
-rw-r--r--drivers/clk/sunxi/clk-mod0.c3
-rw-r--r--drivers/clk/sunxi/clk-sun8i-apb0.c4
-rw-r--r--drivers/clk/uniphier/Kconfig9
-rw-r--r--drivers/clk/uniphier/Makefile8
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c214
-rw-r--r--drivers/clk/uniphier/clk-uniphier-fixed-factor.c48
-rw-r--r--drivers/clk/uniphier/clk-uniphier-fixed-rate.c47
-rw-r--r--drivers/clk/uniphier/clk-uniphier-gate.c97
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mio.c101
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mux.c95
-rw-r--r--drivers/clk/uniphier/clk-uniphier-peri.c57
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c151
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h122
-rw-r--r--drivers/clk/versatile/clk-icst.c310
-rw-r--r--drivers/clk/zte/Makefile1
-rw-r--r--drivers/clk/zte/clk-zx296718.c924
-rw-r--r--drivers/clk/zte/clk.c21
-rw-r--r--drivers/clk/zte/clk.h129
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/cpuidle/driver.c5
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/chelsio/Kconfig19
-rw-r--r--drivers/crypto/chelsio/Makefile4
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c1525
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h471
-rw-r--r--drivers/crypto/chelsio/chcr_core.c238
-rw-r--r--drivers/crypto/chelsio/chcr_core.h80
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h203
-rw-r--r--drivers/dma-buf/Kconfig13
-rw-r--r--drivers/dma-buf/Makefile1
-rw-r--r--drivers/dma-buf/sw_sync.c (renamed from drivers/staging/android/sw_sync.c)37
-rw-r--r--drivers/dma-buf/sync_debug.c (renamed from drivers/staging/android/sync_debug.c)2
-rw-r--r--drivers/dma-buf/sync_debug.h (renamed from drivers/staging/android/sync_debug.h)2
-rw-r--r--drivers/dma-buf/sync_trace.h (renamed from drivers/staging/android/trace/sync.h)6
-rw-r--r--drivers/dma/Kconfig40
-rw-r--r--drivers/dma/at_hdmac.c11
-rw-r--r--drivers/dma/at_xdmac.c8
-rw-r--r--drivers/dma/bestcomm/bestcomm.c4
-rw-r--r--drivers/dma/coh901318.c56
-rw-r--r--drivers/dma/coh901318_lli.c4
-rw-r--r--drivers/dma/cppi41.c142
-rw-r--r--drivers/dma/dma-jz4740.c2
-rw-r--r--drivers/dma/dma-jz4780.c10
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dmaengine.h84
-rw-r--r--drivers/dma/dmatest.c23
-rw-r--r--drivers/dma/dw/core.c53
-rw-r--r--drivers/dma/dw/regs.h5
-rw-r--r--drivers/dma/edma.c38
-rw-r--r--drivers/dma/ep93xx_dma.c28
-rw-r--r--drivers/dma/fsl_raid.c12
-rw-r--r--drivers/dma/fsldma.c24
-rw-r--r--drivers/dma/hsu/hsu.c9
-rw-r--r--drivers/dma/hsu/pci.c6
-rw-r--r--drivers/dma/imx-dma.c4
-rw-r--r--drivers/dma/imx-sdma.c85
-rw-r--r--drivers/dma/ioat/dma.c213
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/ioat/registers.h2
-rw-r--r--drivers/dma/iop-adma.c3
-rw-r--r--drivers/dma/ipu/ipu_idmac.c18
-rw-r--r--drivers/dma/ipu/ipu_irq.c9
-rw-r--r--drivers/dma/k3dma.c215
-rw-r--r--drivers/dma/mic_x100_dma.c6
-rw-r--r--drivers/dma/mmp_pdma.c14
-rw-r--r--drivers/dma/mmp_tdma.c6
-rw-r--r--drivers/dma/moxart-dma.c2
-rw-r--r--drivers/dma/mpc512x_dma.c7
-rw-r--r--drivers/dma/mv_xor.c102
-rw-r--r--drivers/dma/mv_xor.h7
-rw-r--r--drivers/dma/mxs-dma.c13
-rw-r--r--drivers/dma/nbpfaxi.c9
-rw-r--r--drivers/dma/omap-dma.c243
-rw-r--r--drivers/dma/pch_dma.c7
-rw-r--r--drivers/dma/pl330.c25
-rw-r--r--drivers/dma/ppc4xx/adma.c11
-rw-r--r--drivers/dma/qcom/hidma.c57
-rw-r--r--drivers/dma/qcom/hidma.h2
-rw-r--r--drivers/dma/qcom/hidma_ll.c32
-rw-r--r--drivers/dma/s3c24xx-dma.c9
-rw-r--r--drivers/dma/sa11x0-dma.c14
-rw-r--r--drivers/dma/sh/rcar-dmac.c132
-rw-r--r--drivers/dma/sh/shdma-base.c12
-rw-r--r--drivers/dma/sirf-dma.c9
-rw-r--r--drivers/dma/ste_dma40.c299
-rw-r--r--drivers/dma/stm32-dma.c2
-rw-r--r--drivers/dma/sun6i-dma.c7
-rw-r--r--drivers/dma/tegra20-apb-dma.c10
-rw-r--r--drivers/dma/tegra210-adma.c14
-rw-r--r--drivers/dma/ti-dma-crossbar.c30
-rw-r--r--drivers/dma/timb_dma.c9
-rw-r--r--drivers/dma/txx9dmac.c9
-rw-r--r--drivers/dma/virt-dma.c17
-rw-r--r--drivers/dma/virt-dma.h10
-rw-r--r--drivers/dma/xgene-dma.c6
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c10
-rw-r--r--drivers/edac/Kconfig42
-rw-r--r--drivers/edac/Makefile8
-rw-r--r--drivers/edac/altera_edac.c346
-rw-r--r--drivers/edac/altera_edac.h6
-rw-r--r--drivers/edac/amd64_edac.c24
-rw-r--r--drivers/edac/fsl_ddr_edac.c633
-rw-r--r--drivers/edac/fsl_ddr_edac.h79
-rw-r--r--drivers/edac/layerscape_edac.c73
-rw-r--r--drivers/edac/mpc85xx_edac.c685
-rw-r--r--drivers/edac/mpc85xx_edac.h66
-rw-r--r--drivers/edac/mv64x60_edac.c4
-rw-r--r--drivers/edac/ppc4xx_edac.c4
-rw-r--r--drivers/edac/sb_edac.c5
-rw-r--r--drivers/edac/wq.c2
-rw-r--r--drivers/extcon/Kconfig6
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-adc-jack.c27
-rw-r--r--drivers/extcon/extcon-arizona.c27
-rw-r--r--drivers/extcon/extcon-axp288.c8
-rw-r--r--drivers/extcon/extcon-gpio.c3
-rw-r--r--drivers/extcon/extcon-max14577.c18
-rw-r--r--drivers/extcon/extcon-max3355.c8
-rw-r--r--drivers/extcon/extcon-max77693.c46
-rw-r--r--drivers/extcon/extcon-max77843.c22
-rw-r--r--drivers/extcon/extcon-max8997.c20
-rw-r--r--drivers/extcon/extcon-palmas.c16
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c170
-rw-r--r--drivers/extcon/extcon-rt8973a.c4
-rw-r--r--drivers/extcon/extcon-sm5502.c4
-rw-r--r--drivers/extcon/extcon-usb-gpio.c8
-rw-r--r--drivers/extcon/extcon.c774
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/firmware/meson/Kconfig9
-rw-r--r--drivers/firmware/meson/Makefile1
-rw-r--r--drivers/firmware/meson/meson_sm.c248
-rw-r--r--drivers/firmware/qcom_scm.c19
-rw-r--r--drivers/fpga/Kconfig1
-rw-r--r--drivers/gpio/Kconfig102
-rw-r--r--drivers/gpio/Makefile8
-rw-r--r--drivers/gpio/gpio-altera.c1
-rw-r--r--drivers/gpio/gpio-arizona.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c455
-rw-r--r--drivers/gpio/gpio-ath79.c1
-rw-r--r--drivers/gpio/gpio-axp209.c192
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-da9052.c2
-rw-r--r--drivers/gpio/gpio-da9055.c2
-rw-r--r--drivers/gpio/gpio-f7188x.c3
-rw-r--r--drivers/gpio/gpio-gpio-mm.c267
-rw-r--r--drivers/gpio/gpio-htc-egpio.c (renamed from drivers/mfd/htc-egpio.c)2
-rw-r--r--drivers/gpio/gpio-iop.c115
-rw-r--r--drivers/gpio/gpio-it87.c2
-rw-r--r--drivers/gpio/gpio-loongson1.c6
-rw-r--r--drivers/gpio/gpio-lp873x.c183
-rw-r--r--drivers/gpio/gpio-lpc18xx.c2
-rw-r--r--drivers/gpio/gpio-lpc32xx.c15
-rw-r--r--drivers/gpio/gpio-mmio.c4
-rw-r--r--drivers/gpio/gpio-mockup.c214
-rw-r--r--drivers/gpio/gpio-msic.c6
-rw-r--r--drivers/gpio/gpio-mxc.c17
-rw-r--r--drivers/gpio/gpio-palmas.c1
-rw-r--r--drivers/gpio/gpio-pca953x.c342
-rw-r--r--drivers/gpio/gpio-pisosr.c2
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-sch.c2
-rw-r--r--drivers/gpio/gpio-spear-spics.c7
-rw-r--r--drivers/gpio/gpio-stmpe.c204
-rw-r--r--drivers/gpio/gpio-sx150x.c2
-rw-r--r--drivers/gpio/gpio-tc3589x.c47
-rw-r--r--drivers/gpio/gpio-tpic2810.c2
-rw-r--r--drivers/gpio/gpio-tps65086.c2
-rw-r--r--drivers/gpio/gpio-tps65218.c15
-rw-r--r--drivers/gpio/gpio-tps65912.c2
-rw-r--r--drivers/gpio/gpio-ts4800.c1
-rw-r--r--drivers/gpio/gpio-ts4900.c190
-rw-r--r--drivers/gpio/gpio-twl4030.c2
-rw-r--r--drivers/gpio/gpio-vf610.c7
-rw-r--r--drivers/gpio/gpio-wcove.c470
-rw-r--r--drivers/gpio/gpio-wm831x.c2
-rw-r--r--drivers/gpio/gpio-wm8350.c2
-rw-r--r--drivers/gpio/gpio-wm8994.c2
-rw-r--r--drivers/gpio/gpio-zynq.c15
-rw-r--r--drivers/gpio/gpiolib-acpi.c57
-rw-r--r--drivers/gpio/gpiolib-of.c39
-rw-r--r--drivers/gpio/gpiolib-sysfs.c4
-rw-r--r--drivers/gpio/gpiolib.c180
-rw-r--r--drivers/gpio/gpiolib.h45
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c10
-rw-r--r--drivers/hid/Kconfig26
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-alps.c26
-rw-r--r--drivers/hid/hid-core.c12
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-input.c10
-rw-r--r--drivers/hid/hid-kye.c123
-rw-r--r--drivers/hid/hid-lg.c86
-rw-r--r--drivers/hid/hid-lg4ff.c109
-rw-r--r--drivers/hid/hid-lg4ff.h4
-rw-r--r--drivers/hid/hid-microsoft.c12
-rw-r--r--drivers/hid/hid-saitek.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c4
-rw-r--r--drivers/hid/hid-sony.c88
-rw-r--r--drivers/hid/hid-uclogic.c187
-rw-r--r--drivers/hid/hid-waltop.c36
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig17
-rw-r--r--drivers/hid/intel-ish-hid/Makefile22
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h220
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h71
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c881
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c322
-rw-r--r--drivers/hid/intel-ish-hid/ipc/utils.h64
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c978
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c246
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h182
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c788
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h114
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c257
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c1054
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h182
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/dma-if.c175
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c1032
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.h321
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c115
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h277
-rw-r--r--drivers/hid/uhid.c13
-rw-r--r--drivers/hid/usbhid/hid-quirks.c4
-rw-r--r--drivers/hid/wacom.h96
-rw-r--r--drivers/hid/wacom_sys.c1187
-rw-r--r--drivers/hid/wacom_wac.c435
-rw-r--r--drivers/hid/wacom_wac.h22
-rw-r--r--drivers/hv/channel.c148
-rw-r--r--drivers/hv/channel_mgmt.c130
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hv/hv.c8
-rw-r--r--drivers/hv/hv_balloon.c254
-rw-r--r--drivers/hv/hv_fcopy.c14
-rw-r--r--drivers/hv/hv_kvp.c27
-rw-r--r--drivers/hv/hv_snapshot.c109
-rw-r--r--drivers/hv/hv_util.c155
-rw-r--r--drivers/hv/hv_utils_transport.c15
-rw-r--r--drivers/hv/hv_utils_transport.h4
-rw-r--r--drivers/hv/hyperv_vmbus.h9
-rw-r--r--drivers/hv/ring_buffer.c76
-rw-r--r--drivers/hv/vmbus_drv.c16
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/adt7411.c51
-rw-r--r--drivers/hwmon/adt7470.c108
-rw-r--r--drivers/hwmon/ftsteutates.c45
-rw-r--r--drivers/hwmon/hwmon.c616
-rw-r--r--drivers/hwmon/ibmpowernv.c8
-rw-r--r--drivers/hwmon/iio_hwmon.c5
-rw-r--r--drivers/hwmon/it87.c8
-rw-r--r--drivers/hwmon/jc42.c293
-rw-r--r--drivers/hwmon/lm75.c185
-rw-r--r--drivers/hwmon/lm90.c811
-rw-r--r--drivers/hwmon/lm95241.c469
-rw-r--r--drivers/hwmon/lm95245.c638
-rw-r--r--drivers/hwmon/ltc4151.c22
-rw-r--r--drivers/hwmon/ltc4245.c362
-rw-r--r--drivers/hwmon/max31790.c521
-rw-r--r--drivers/hwmon/max6650.c155
-rw-r--r--drivers/hwmon/nct6775.c23
-rw-r--r--drivers/hwmon/nct7904.c555
-rw-r--r--drivers/hwmon/ntc_thermistor.c107
-rw-r--r--drivers/hwmon/pmbus/Kconfig6
-rw-r--r--drivers/hwmon/pmbus/pmbus.c20
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c3
-rw-r--r--drivers/hwmon/scpi-hwmon.c1
-rw-r--r--drivers/hwmon/tmp102.c154
-rw-r--r--drivers/hwmon/tmp421.c133
-rw-r--r--drivers/hwmon/xgene-hwmon.c787
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c157
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h34
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h8
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c39
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c63
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c419
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h11
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c20
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h23
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator-qcom.c18
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c20
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c53
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c14
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c80
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c18
-rw-r--r--drivers/hwtracing/coresight/coresight.c31
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c3
-rw-r--r--drivers/i2c/busses/Kconfig13
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-amd756.c5
-rw-r--r--drivers/i2c/busses/i2c-at91.c2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c8
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c8
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c4
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c4
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c4
-rw-r--r--drivers/i2c/busses/i2c-cadence.c4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c4
-rw-r--r--drivers/i2c/busses/i2c-davinci.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c196
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h12
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c43
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c4
-rw-r--r--drivers/i2c/busses/i2c-dln2.c4
-rw-r--r--drivers/i2c/busses/i2c-efm32.c1
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c4
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c5
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c4
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c35
-rw-r--r--drivers/i2c/busses/i2c-isch.c4
-rw-r--r--drivers/i2c/busses/i2c-ismt.c4
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c4
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c4
-rw-r--r--drivers/i2c/busses/i2c-meson.c2
-rw-r--r--drivers/i2c/busses/i2c-mpc.c4
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c4
-rw-r--r--drivers/i2c/busses/i2c-mxs.c1
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c1
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c4
-rw-r--r--drivers/i2c/busses/i2c-ocores.c4
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c (renamed from drivers/i2c/busses/i2c-octeon.c)1080
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h216
-rw-r--r--drivers/i2c/busses/i2c-octeon-platdrv.c286
-rw-r--r--drivers/i2c/busses/i2c-omap.c4
-rw-r--r--drivers/i2c/busses/i2c-piix4.c1
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c4
-rw-r--r--drivers/i2c/busses/i2c-pnx.c4
-rw-r--r--drivers/i2c/busses/i2c-puv3.c5
-rw-r--r--drivers/i2c/busses/i2c-pxa.c4
-rw-r--r--drivers/i2c/busses/i2c-rcar.c5
-rw-r--r--drivers/i2c/busses/i2c-riic.c4
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c9
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c1
-rw-r--r--drivers/i2c/busses/i2c-sirf.c4
-rw-r--r--drivers/i2c/busses/i2c-st.c4
-rw-r--r--drivers/i2c/busses/i2c-stu300.c5
-rw-r--r--drivers/i2c/busses/i2c-tegra.c279
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c259
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c106
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c78
-rw-r--r--drivers/i2c/busses/i2c-wmt.c4
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c1
-rw-r--r--drivers/i2c/busses/i2c-xiic.c1
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c4
-rw-r--r--drivers/i2c/busses/i2c-xlr.c4
-rw-r--r--drivers/i2c/i2c-core.c272
-rw-r--r--drivers/i2c/i2c-mux.c73
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c11
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c46
-rw-r--r--drivers/idle/intel_idle.c4
-rw-r--r--drivers/iio/accel/Kconfig56
-rw-r--r--drivers/iio/accel/Makefile5
-rw-r--r--drivers/iio/accel/bma180.c9
-rw-r--r--drivers/iio/accel/dmard06.c241
-rw-r--r--drivers/iio/accel/dmard09.c157
-rw-r--r--drivers/iio/accel/kxcjk-1013.c1
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c64
-rw-r--r--drivers/iio/accel/kxsd9-spi.c56
-rw-r--r--drivers/iio/accel/kxsd9.c435
-rw-r--r--drivers/iio/accel/kxsd9.h12
-rw-r--r--drivers/iio/accel/mc3230.c211
-rw-r--r--drivers/iio/accel/mma7660.c1
-rw-r--r--drivers/iio/accel/mxc6255.c4
-rw-r--r--drivers/iio/accel/ssp_accel_sensor.c2
-rw-r--r--drivers/iio/adc/Kconfig61
-rw-r--r--drivers/iio/adc/Makefile5
-rw-r--r--drivers/iio/adc/ad7266.c4
-rw-r--r--drivers/iio/adc/ad7298.c20
-rw-r--r--drivers/iio/adc/ad7793.c10
-rw-r--r--drivers/iio/adc/at91_adc.c13
-rw-r--r--drivers/iio/adc/ina2xx-adc.c5
-rw-r--r--drivers/iio/adc/ltc2485.c148
-rw-r--r--drivers/iio/adc/men_z188_adc.c2
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c291
-rw-r--r--drivers/iio/adc/nau7802.c2
-rw-r--r--drivers/iio/adc/stx104.c (renamed from drivers/iio/dac/stx104.c)153
-rw-r--r--drivers/iio/adc/ti-adc12138.c552
-rw-r--r--drivers/iio/adc/ti-adc161s626.c248
-rw-r--r--drivers/iio/adc/ti-ads1015.c2
-rw-r--r--drivers/iio/adc/ti-ads8688.c4
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c24
-rw-r--r--drivers/iio/buffer/industrialio-triggered-buffer.c42
-rw-r--r--drivers/iio/chemical/Kconfig1
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c81
-rw-r--r--drivers/iio/chemical/vz89x.c201
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c29
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c53
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c4
-rw-r--r--drivers/iio/dac/Kconfig29
-rw-r--r--drivers/iio/dac/Makefile3
-rw-r--r--drivers/iio/dac/ad5755.c2
-rw-r--r--drivers/iio/dac/ad8801.c239
-rw-r--r--drivers/iio/dac/cio-dac.c144
-rw-r--r--drivers/iio/gyro/ssp_gyro_sensor.c2
-rw-r--r--drivers/iio/humidity/Kconfig8
-rw-r--r--drivers/iio/industrialio-core.c3
-rw-r--r--drivers/iio/industrialio-event.c8
-rw-r--r--drivers/iio/industrialio-trigger.c95
-rw-r--r--drivers/iio/light/Kconfig19
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/si1145.c1404
-rw-r--r--drivers/iio/light/us5182d.c2
-rw-r--r--drivers/iio/light/vcnl4000.c72
-rw-r--r--drivers/iio/magnetometer/Kconfig16
-rw-r--r--drivers/iio/magnetometer/Makefile1
-rw-r--r--drivers/iio/magnetometer/ak8974.c860
-rw-r--r--drivers/iio/magnetometer/mag3110.c23
-rw-r--r--drivers/iio/pressure/Kconfig24
-rw-r--r--drivers/iio/pressure/Makefile3
-rw-r--r--drivers/iio/pressure/ms5611_core.c6
-rw-r--r--drivers/iio/pressure/zpa2326.c1735
-rw-r--r--drivers/iio/pressure/zpa2326.h89
-rw-r--r--drivers/iio/pressure/zpa2326_i2c.c99
-rw-r--r--drivers/iio/pressure/zpa2326_spi.c103
-rw-r--r--drivers/iio/proximity/sx9500.c9
-rw-r--r--drivers/iio/temperature/Kconfig16
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c281
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/addr.c2
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/multicast.c2
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c121
-rw-r--r--drivers/infiniband/core/verbs.c77
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig1
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c290
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c17
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h11
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c67
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h12
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c203
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h3
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c57
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h8
-rw-r--r--drivers/infiniband/hw/hfi1/common.h8
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c38
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c35
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c185
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.h4
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c59
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h22
-rw-r--r--drivers/infiniband/hw/hfi1/init.c45
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c7
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c20
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h2
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c246
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c32
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c32
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c2
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c146
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c10
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c377
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h13
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c103
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c31
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ctxts.h13
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h14
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h4
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c15
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c61
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c40
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c63
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h93
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c2
-rw-r--r--drivers/infiniband/hw/hns/Kconfig10
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c128
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c257
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c368
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h80
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h325
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c446
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h734
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c762
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.h130
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c476
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h131
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c2800
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h981
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c1142
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c614
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c144
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c855
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_user.h53
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c7
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c73
-rw-r--r--drivers/infiniband/hw/mlx4/main.c147
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h7
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c25
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c111
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c404
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h52
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c187
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c4
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c347
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c7
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c73
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c19
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h94
-rw-r--r--drivers/infiniband/sw/rdmavt/dma.c17
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c119
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c36
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_dma.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c51
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c55
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c27
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c50
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c22
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c56
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c2
-rw-r--r--drivers/input/keyboard/Kconfig15
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adc-keys.c210
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c135
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c59
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c1
-rw-r--r--drivers/input/misc/Kconfig16
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/gpio_decoder.c137
-rw-r--r--drivers/input/misc/max77693-haptic.c4
-rw-r--r--drivers/input/misc/tps65218-pwrbutton.c92
-rw-r--r--drivers/input/misc/uinput.c15
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c20
-rw-r--r--drivers/input/mouse/focaltech.c3
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/serio/serport.c17
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c1
-rw-r--r--drivers/input/touchscreen/Kconfig25
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c8
-rw-r--r--drivers/input/touchscreen/ektf2127.c336
-rw-r--r--drivers/input/touchscreen/elants_i2c.c31
-rw-r--r--drivers/input/touchscreen/ft6236.c326
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c21
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c24
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c13
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c5
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/iommu/amd_iommu.c484
-rw-r--r--drivers/iommu/amd_iommu_init.c181
-rw-r--r--drivers/iommu/amd_iommu_proto.h1
-rw-r--r--drivers/iommu/amd_iommu_types.h149
-rw-r--r--drivers/iommu/s390-iommu.c3
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-metag-ext.c1
-rw-r--r--drivers/irqchip/irq-vic.c1
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c6
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c24
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/leds/Kconfig28
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/led-triggers.c25
-rw-r--r--drivers/leds/leds-gpio.c78
-rw-r--r--drivers/leds/leds-is31fl319x.c450
-rw-r--r--drivers/leds/leds-mlxcpld.c430
-rw-r--r--drivers/leds/leds-pm8058.c191
-rw-r--r--drivers/lightnvm/Kconfig2
-rw-r--r--drivers/lightnvm/Makefile2
-rw-r--r--drivers/lightnvm/core.c74
-rw-r--r--drivers/lightnvm/lightnvm.h35
-rw-r--r--drivers/lightnvm/sysfs.c198
-rw-r--r--drivers/macintosh/macio_asic.c4
-rw-r--r--drivers/macintosh/rack-meter.c2
-rw-r--r--drivers/macintosh/smu.c18
-rw-r--r--drivers/macintosh/via-cuda.c2
-rw-r--r--drivers/macintosh/via-pmu.c6
-rw-r--r--drivers/mailbox/Kconfig10
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/platform_mhu.c205
-rw-r--r--drivers/mcb/Kconfig9
-rw-r--r--drivers/mcb/Makefile1
-rw-r--r--drivers/mcb/mcb-core.c18
-rw-r--r--drivers/mcb/mcb-internal.h9
-rw-r--r--drivers/mcb/mcb-lpc.c158
-rw-r--r--drivers/mcb/mcb-parse.c126
-rw-r--r--drivers/mcb/mcb-pci.c1
-rw-r--r--drivers/md/bcache/btree.c6
-rw-r--r--drivers/md/bcache/debug.c6
-rw-r--r--drivers/md/bcache/movinggc.c5
-rw-r--r--drivers/md/bcache/request.c9
-rw-r--r--drivers/md/bcache/writeback.c5
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-bufio.c31
-rw-r--r--drivers/md/dm-cache-metadata.c183
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c2
-rw-r--r--drivers/md/dm-cache-policy-internal.h6
-rw-r--r--drivers/md/dm-cache-policy-smq.c45
-rw-r--r--drivers/md/dm-cache-policy.h10
-rw-r--r--drivers/md/dm-crypt.c26
-rw-r--r--drivers/md/dm-log-writes.c6
-rw-r--r--drivers/md/dm-mpath.c59
-rw-r--r--drivers/md/dm-rq.c107
-rw-r--r--drivers/md/dm-rq.h2
-rw-r--r--drivers/md/dm.c37
-rw-r--r--drivers/md/md-cluster.c99
-rw-r--r--drivers/md/md.c44
-rw-r--r--drivers/md/md.h5
-rw-r--r--drivers/md/persistent-data/dm-array.c228
-rw-r--r--drivers/md/persistent-data/dm-array.h52
-rw-r--r--drivers/md/persistent-data/dm-btree.c162
-rw-r--r--drivers/md/persistent-data/dm-btree.h35
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c1
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c1
-rw-r--r--drivers/media/rc/imon.c13
-rw-r--r--drivers/media/rc/meson-ir.c29
-rw-r--r--drivers/media/rc/redrat3.c4
-rw-r--r--drivers/media/usb/airspy/airspy.c1
-rw-r--r--drivers/media/usb/as102/as102_usb_drv.c2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c1
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c1
-rw-r--r--drivers/media/usb/gspca/benq.c4
-rw-r--r--drivers/media/usb/gspca/gspca.c4
-rw-r--r--drivers/media/usb/gspca/konica.c4
-rw-r--r--drivers/media/usb/hackrf/hackrf.c1
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c4
-rw-r--r--drivers/media/usb/msi2500/msi2500.c1
-rw-r--r--drivers/media/usb/pwc/pwc-if.c1
-rw-r--r--drivers/media/usb/s2255/s2255drv.c9
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c1
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c5
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c4
-rw-r--r--drivers/memory/atmel-ebi.c10
-rw-r--r--drivers/memory/atmel-sdramc.c4
-rw-r--r--drivers/memory/of_memory.c1
-rw-r--r--drivers/memory/omap-gpmc.c20
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptfc.c7
-rw-r--r--drivers/mfd/Kconfig48
-rw-r--r--drivers/mfd/Makefile6
-rw-r--r--drivers/mfd/ab8500-debugfs.c114
-rw-r--r--drivers/mfd/ac100.c137
-rw-r--r--drivers/mfd/act8945a.c1
-rw-r--r--drivers/mfd/altera-a10sr.c16
-rw-r--r--drivers/mfd/arizona-core.c113
-rw-r--r--drivers/mfd/atmel-hlcdc.c5
-rw-r--r--drivers/mfd/axp20x-rsb.c1
-rw-r--r--drivers/mfd/axp20x.c75
-rw-r--r--drivers/mfd/cros_ec.c58
-rw-r--r--drivers/mfd/cros_ec_spi.c2
-rw-r--r--drivers/mfd/da9052-core.c51
-rw-r--r--drivers/mfd/da9063-core.c7
-rw-r--r--drivers/mfd/da9063-i2c.c2
-rw-r--r--drivers/mfd/da9063-irq.c2
-rw-r--r--drivers/mfd/db8500-prcmu.c19
-rw-r--r--drivers/mfd/dm355evm_msp.c17
-rw-r--r--drivers/mfd/exynos-lpass.c185
-rw-r--r--drivers/mfd/intel-lpss-acpi.c14
-rw-r--r--drivers/mfd/intel-lpss-pci.c51
-rw-r--r--drivers/mfd/intel_msic.c9
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c23
-rw-r--r--drivers/mfd/lp873x.c97
-rw-r--r--drivers/mfd/max14577.c4
-rw-r--r--drivers/mfd/max8997-irq.c2
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/pm8921-core.c1
-rw-r--r--drivers/mfd/qcom_rpm.c72
-rw-r--r--drivers/mfd/rk808.c226
-rw-r--r--drivers/mfd/rtsx_usb.c10
-rw-r--r--drivers/mfd/sm501.c2
-rw-r--r--drivers/mfd/smsc-ece1099.c11
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stmpe.c161
-rw-r--r--drivers/mfd/stmpe.h85
-rw-r--r--drivers/mfd/sun6i-prcm.c8
-rw-r--r--drivers/mfd/tps65217.c205
-rw-r--r--drivers/mfd/tps65218.c9
-rw-r--r--drivers/mfd/twl-core.c9
-rw-r--r--drivers/mfd/twl6040.c6
-rw-r--r--drivers/mfd/ucb1x00-core.c6
-rw-r--r--drivers/misc/Kconfig28
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/bmp085-i2c.c83
-rw-r--r--drivers/misc/bmp085-spi.c79
-rw-r--r--drivers/misc/bmp085.c506
-rw-r--r--drivers/misc/bmp085.h33
-rw-r--r--drivers/misc/cxl/cxl.h6
-rw-r--r--drivers/misc/cxl/native.c31
-rw-r--r--drivers/misc/cxl/of.c8
-rw-r--r--drivers/misc/cxl/pci.c7
-rw-r--r--drivers/misc/eeprom/at24.c15
-rw-r--r--drivers/misc/eeprom/at25.c20
-rw-r--r--drivers/misc/genwqe/card_base.c15
-rw-r--r--drivers/misc/genwqe/card_ddcb.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/hpilo.c17
-rw-r--r--drivers/misc/mei/amthif.c345
-rw-r--r--drivers/misc/mei/bus.c11
-rw-r--r--drivers/misc/mei/client.c190
-rw-r--r--drivers/misc/mei/client.h7
-rw-r--r--drivers/misc/mei/hbm.c49
-rw-r--r--drivers/misc/mei/hw-me-regs.h3
-rw-r--r--drivers/misc/mei/hw-me.c3
-rw-r--r--drivers/misc/mei/hw-txe.c3
-rw-r--r--drivers/misc/mei/init.c2
-rw-r--r--drivers/misc/mei/interrupt.c77
-rw-r--r--drivers/misc/mei/main.c58
-rw-r--r--drivers/misc/mei/mei_dev.h31
-rw-r--r--drivers/misc/mei/pci-me.c11
-rw-r--r--drivers/misc/mei/pci-txe.c7
-rw-r--r--drivers/misc/mic/scif/scif_dma.c6
-rw-r--r--drivers/misc/mic/scif/scif_mmap.c4
-rw-r--r--drivers/misc/pch_phub.c20
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c14
-rw-r--r--drivers/mmc/card/block.c30
-rw-r--r--drivers/mmc/card/block.h1
-rw-r--r--drivers/mmc/card/mmc_test.c308
-rw-r--r--drivers/mmc/card/queue.c4
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/core.c181
-rw-r--r--drivers/mmc/core/mmc.c9
-rw-r--r--drivers/mmc/core/pwrseq_simple.c9
-rw-r--r--drivers/mmc/core/sd.c37
-rw-r--r--drivers/mmc/core/sdio_io.c47
-rw-r--r--drivers/mmc/core/sdio_ops.c9
-rw-r--r--drivers/mmc/host/davinci_mmc.c6
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c6
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c6
-rw-r--r--drivers/mmc/host/dw_mmc.c427
-rw-r--r--drivers/mmc/host/moxart-mmc.c5
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c6
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c4
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c7
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c136
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c62
-rw-r--r--drivers/mmc/host/sdhci-pci.h1
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c7
-rw-r--r--drivers/mmc/host/sdhci-tegra.c27
-rw-r--r--drivers/mmc/host/sdhci.c23
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c17
-rw-r--r--drivers/mmc/host/sunxi-mmc.c265
-rw-r--r--drivers/mmc/host/tmio_mmc.h4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c47
-rw-r--r--drivers/mmc/host/vub300.c2
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/ubi/block.c1
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c9
-rw-r--r--drivers/net/can/usb/esd_usb2.c3
-rw-r--r--drivers/net/can/usb/gs_usb.c9
-rw-r--r--drivers/net/can/usb/kvaser_usb.c7
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c6
-rw-r--r--drivers/net/can/usb/usb_8dev.c5
-rw-r--r--drivers/net/dsa/Kconfig10
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c199
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c2
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c2
-rw-r--r--drivers/net/dsa/b53/b53_priv.h11
-rw-r--r--drivers/net/dsa/b53/b53_regs.h3
-rw-r--r--drivers/net/dsa/b53/b53_spi.c4
-rw-r--r--drivers/net/dsa/b53/b53_srab.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c1036
-rw-r--r--drivers/net/dsa/bcm_sf2.h82
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h122
-rw-r--r--drivers/net/dsa/mv88e6060.c17
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig12
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile5
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2333
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c34
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h23
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c491
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h88
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h238
-rw-r--r--drivers/net/dsa/qca8k.c1040
-rw-r--r--drivers/net/dsa/qca8k.h185
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c42
-rw-r--r--drivers/net/ethernet/aeroflex/greth.h1
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/amazon/Kconfig27
-rw-r--r--drivers/net/ethernet/amazon/Makefile5
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h973
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c2666
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h1038
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h48
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c501
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h160
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h416
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c895
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c3272
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h324
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h67
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h133
-rw-r--r--drivers/net/ethernet/amd/7990.c6
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h5
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.c17
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.h10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c65
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c38
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h13
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c171
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c77
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h4
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h10
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c14
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h1
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c314
-rw-r--r--drivers/net/ethernet/broadcom/b44.c116
-rw-r--r--drivers/net/ethernet/broadcom/b44.h1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c79
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c19
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c28
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c131
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c276
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c135
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c187
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1251
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c90
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c140
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c112
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c27
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h1
-rw-r--r--drivers/net/ethernet/cadence/macb.c97
-rw-r--r--drivers/net/ethernet/cadence/macb.h14
-rw-r--r--drivers/net/ethernet/cavium/Kconfig12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/Makefile24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c1237
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h59
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h604
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c45
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c266
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c513
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c1128
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h34
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h59
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c117
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c352
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h114
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c46
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h32
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c170
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h87
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c433
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h15
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c77
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c89
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c460
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h33
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c235
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h194
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c135
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c721
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1396
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c483
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h294
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c697
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c556
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h110
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c74
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h437
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h166
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c63
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/Makefile4
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c149
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h160
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.h4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h92
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c277
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h45
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c40
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c767
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c99
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c18
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c344
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c59
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c59
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c59
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c121
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h31
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c81
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c134
-rw-r--r--drivers/net/ethernet/i825xx/82596.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c5
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c57
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c30
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c193
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c30
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c46
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h142
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c345
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c299
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c193
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h59
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c64
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h65
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c232
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c41
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c65
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h51
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c345
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c56
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c11
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c244
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c82
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c21
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c66
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c931
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h155
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c290
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h177
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c882
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c638
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c397
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c232
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c588
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mad.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c479
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c167
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c181
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c299
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c724
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c478
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c6
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h233
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf.h202
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c1813
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c171
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h47
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c134
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h51
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_offload.c294
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig14
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h71
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c6898
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h54
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c489
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h2500
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c99
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c153
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c259
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c1792
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h316
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c239
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c234
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h96
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h934
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2954
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h216
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c131
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c237
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c249
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h7
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h48
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c314
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c518
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_roce.c314
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c5
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig12
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c1528
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h248
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c227
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.h33
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c784
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.h24
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c755
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.h335
-rw-r--r--drivers/net/ethernet/rdc/r6040.c6
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/ravb.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c123
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c45
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h1
-rw-r--r--drivers/net/ethernet/rocker/rocker.h15
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c122
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c119
-rw-r--r--drivers/net/ethernet/sfc/ef10.c247
-rw-r--r--drivers/net/ethernet/sfc/efx.c108
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon.c9
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h530
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h17
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/nic.h9
-rw-r--r--drivers/net/ethernet/sfc/ptp.c16
-rw-r--r--drivers/net/ethernet/sfc/selftest.c10
-rw-r--r--drivers/net/ethernet/sfc/selftest.h2
-rw-r--r--drivers/net/ethernet/sfc/siena.c14
-rw-r--r--drivers/net/ethernet/sfc/sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/sriov.h2
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h4
-rw-r--r--drivers/net/ethernet/sis/sis900.c4
-rw-r--r--drivers/net/ethernet/sis/sis900.h2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c324
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c254
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c194
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c4
-rw-r--r--drivers/net/ethernet/ti/cpmac.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1306
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c91
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h13
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c22
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c21
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/fjes/fjes_main.c1
-rw-r--r--drivers/net/hamradio/6pack.c12
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h45
-rw-r--r--drivers/net/hyperv/netvsc.c331
-rw-r--r--drivers/net/hyperv/netvsc_drv.c411
-rw-r--r--drivers/net/hyperv/rndis_filter.c41
-rw-r--r--drivers/net/ieee802154/fakelb.c14
-rw-r--r--drivers/net/ipvlan/ipvlan.h6
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c94
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c87
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/phy/Kconfig435
-rw-r--r--drivers/net/phy/Makefile76
-rw-r--r--drivers/net/phy/micrel.c21
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/mscc.c337
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c112
-rw-r--r--drivers/net/ppp/ppp_generic.c63
-rw-r--r--drivers/net/ppp/pptp.c64
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/asix.h40
-rw-r--r--drivers/net/usb/asix_common.c212
-rw-r--r--drivers/net/usb/asix_devices.c450
-rw-r--r--drivers/net/usb/ax88172a.c29
-rw-r--r--drivers/net/usb/hso.c138
-rw-r--r--drivers/net/usb/kaweth.c15
-rw-r--r--drivers/net/usb/lan78xx.c28
-rw-r--r--drivers/net/usb/pegasus.c3
-rw-r--r--drivers/net/usb/r8152.c13
-rw-r--r--drivers/net/usb/smsc95xx.c109
-rw-r--r--drivers/net/usb/smsc95xx.h8
-rw-r--r--drivers/net/usb/usbnet.c5
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vrf.c294
-rw-r--r--drivers/net/vxlan.c22
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c2
-rw-r--r--drivers/net/wan/sbni.c4
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c119
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c142
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c199
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h81
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c72
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c77
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h33
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c59
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c192
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h56
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c11
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c104
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c6
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c151
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c53
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h14
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c92
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c15
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c63
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c46
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h932
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c50
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c55
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-a000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c184
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c180
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c391
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c252
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h128
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c370
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c410
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c20
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c27
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c78
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c167
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c26
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h74
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c270
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c182
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c28
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c66
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c137
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c144
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c62
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.h10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c12
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c38
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c44
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/regs.h4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c19
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/util.h77
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h29
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c146
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c7
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c371
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h61
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h208
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c29
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h13
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c1
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c19
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c37
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c76
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c48
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h13
-rw-r--r--drivers/net/wireless/wl3501_cs.c7
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netback/common.h4
-rw-r--r--drivers/net/xen-netback/hash.c13
-rw-r--r--drivers/net/xen-netback/interface.c38
-rw-r--r--drivers/net/xen-netback/netback.c18
-rw-r--r--drivers/net/xen-netfront.c15
-rw-r--r--drivers/ntb/ntb_transport.c193
-rw-r--r--drivers/nvme/host/core.c158
-rw-r--r--drivers/nvme/host/fabrics.c25
-rw-r--r--drivers/nvme/host/fabrics.h11
-rw-r--r--drivers/nvme/host/lightnvm.c33
-rw-r--r--drivers/nvme/host/nvme.h31
-rw-r--r--drivers/nvme/host/pci.c109
-rw-r--r--drivers/nvme/host/rdma.c27
-rw-r--r--drivers/nvme/host/scsi.c80
-rw-r--r--drivers/nvme/target/admin-cmd.c88
-rw-r--r--drivers/nvme/target/io-cmd.c3
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvme/target/rdma.c2
-rw-r--r--drivers/nvmem/Kconfig10
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/meson-efuse.c93
-rw-r--r--drivers/nvmem/rockchip-efuse.c133
-rw-r--r--drivers/of/base.c173
-rw-r--r--drivers/of/fdt.c11
-rw-r--r--drivers/of/platform.c2
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/host/Kconfig27
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-aardvark.c12
-rw-r--r--drivers/pci/host/pci-dra7xx.c31
-rw-r--r--drivers/pci/host/pci-exynos.c16
-rw-r--r--drivers/pci/host/pci-host-common.c15
-rw-r--r--drivers/pci/host/pci-hyperv.c65
-rw-r--r--drivers/pci/host/pci-imx6.c9
-rw-r--r--drivers/pci/host/pci-keystone.c5
-rw-r--r--drivers/pci/host/pci-tegra.c54
-rw-r--r--drivers/pci/host/pci-versatile.c8
-rw-r--r--drivers/pci/host/pcie-altera-msi.c20
-rw-r--r--drivers/pci/host/pcie-altera.c209
-rw-r--r--drivers/pci/host/pcie-artpec6.c4
-rw-r--r--drivers/pci/host/pcie-designware-plat.c3
-rw-r--r--drivers/pci/host/pcie-designware.c209
-rw-r--r--drivers/pci/host/pcie-designware.h10
-rw-r--r--drivers/pci/host/pcie-qcom.c28
-rw-r--r--drivers/pci/host/pcie-rcar.c190
-rw-r--r--drivers/pci/host/pcie-rockchip.c1229
-rw-r--r--drivers/pci/host/pcie-spear13xx.c11
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c105
-rw-r--r--drivers/pci/host/pcie-xilinx.c90
-rw-r--r--drivers/pci/host/vmd.c (renamed from arch/x86/pci/vmd.c)49
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c10
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c18
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c23
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c84
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c121
-rw-r--r--drivers/pci/hotplug/pnv_php.c283
-rw-r--r--drivers/pci/iov.c5
-rw-r--r--drivers/pci/pci-acpi.c22
-rw-r--r--drivers/pci/pci-driver.c17
-rw-r--r--drivers/pci/pci.c75
-rw-r--r--drivers/pci/pci.h9
-rw-r--r--drivers/pci/pcie/Kconfig11
-rw-r--r--drivers/pci/pcie/Makefile1
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c36
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c61
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c6
-rw-r--r--drivers/pci/pcie/pcie-dpc.c18
-rw-r--r--drivers/pci/pcie/pme.c16
-rw-r--r--drivers/pci/pcie/portdrv_pci.c17
-rw-r--r--drivers/pci/pcie/ptm.c142
-rw-r--r--drivers/pci/probe.c6
-rw-r--r--drivers/pci/quirks.c18
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/xgene_pmu.c1398
-rw-r--r--drivers/phy/Kconfig36
-rw-r--r--drivers/phy/Makefile4
-rw-r--r--drivers/phy/phy-bcm-ns-usb3.c274
-rw-r--r--drivers/phy/phy-bcm-ns2-pcie.c28
-rw-r--r--drivers/phy/phy-core.c15
-rw-r--r--drivers/phy/phy-da8xx-usb.c2
-rw-r--r--drivers/phy/phy-exynos5-usbdrd.c4
-rw-r--r--drivers/phy/phy-omap-usb2.c100
-rw-r--r--drivers/phy/phy-qcom-ufs.c6
-rw-r--r--drivers/phy/phy-rcar-gen3-usb2.c1
-rw-r--r--drivers/phy/phy-rockchip-inno-usb2.c707
-rw-r--r--drivers/phy/phy-rockchip-pcie.c357
-rw-r--r--drivers/phy/phy-rockchip-typec.c1023
-rw-r--r--drivers/phy/phy-rockchip-usb.c20
-rw-r--r--drivers/phy/phy-sun4i-usb.c154
-rw-r--r--drivers/phy/phy-twl4030-usb.c25
-rw-r--r--drivers/phy/tegra/xusb.c4
-rw-r--r--drivers/pinctrl/Kconfig1
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/aspeed/Kconfig24
-rw-r--r--drivers/pinctrl/aspeed/Makefile6
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c1231
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c808
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c498
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h569
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c4
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c55
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c33
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c199
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c222
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h52
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c24
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c24
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c23
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c8
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c13
-rw-r--r--drivers/pinctrl/pinctrl-amd.c14
-rw-r--r--drivers/pinctrl/pinctrl-at91.c2
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c4
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c7
-rw-r--r--drivers/pinctrl/pinctrl-st.c93
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c10
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig10
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile2
-rw-r--r--drivers/pinctrl/sh-pfc/core.c12
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c29
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7792.c2739
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c28
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c356
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c2671
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c8
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h113
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c5
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c12
-rw-r--r--drivers/pinctrl/stm32/Kconfig5
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c165
-rw-r--r--drivers/pinctrl/sunxi/Kconfig4
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-gr8.c541
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c12
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c1
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c92
-rw-r--r--drivers/power/Kconfig518
-rw-r--r--drivers/power/Makefile75
-rw-r--r--drivers/power/act8945a_charger.c359
-rw-r--r--drivers/power/reset/keystone-reset.c2
-rw-r--r--drivers/power/reset/reboot-mode.c59
-rw-r--r--drivers/power/reset/reboot-mode.h4
-rw-r--r--drivers/power/reset/st-poweroff.c41
-rw-r--r--drivers/power/reset/syscon-reboot-mode.c12
-rw-r--r--drivers/power/reset/xgene-reboot.c4
-rw-r--r--drivers/power/reset/zx-reboot.c5
-rw-r--r--drivers/power/supply/88pm860x_battery.c (renamed from drivers/power/88pm860x_battery.c)0
-rw-r--r--drivers/power/supply/88pm860x_charger.c (renamed from drivers/power/88pm860x_charger.c)0
-rw-r--r--drivers/power/supply/Kconfig514
-rw-r--r--drivers/power/supply/Makefile74
-rw-r--r--drivers/power/supply/ab8500_bmdata.c (renamed from drivers/power/ab8500_bmdata.c)0
-rw-r--r--drivers/power/supply/ab8500_btemp.c (renamed from drivers/power/ab8500_btemp.c)2
-rw-r--r--drivers/power/supply/ab8500_charger.c (renamed from drivers/power/ab8500_charger.c)4
-rw-r--r--drivers/power/supply/ab8500_fg.c (renamed from drivers/power/ab8500_fg.c)11
-rw-r--r--drivers/power/supply/abx500_chargalg.c (renamed from drivers/power/abx500_chargalg.c)4
-rw-r--r--drivers/power/supply/act8945a_charger.c666
-rw-r--r--drivers/power/supply/apm_power.c (renamed from drivers/power/apm_power.c)0
-rw-r--r--drivers/power/supply/axp20x_usb_power.c (renamed from drivers/power/axp20x_usb_power.c)0
-rw-r--r--drivers/power/supply/axp288_charger.c (renamed from drivers/power/axp288_charger.c)1
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c (renamed from drivers/power/axp288_fuel_gauge.c)1
-rw-r--r--drivers/power/supply/bq2415x_charger.c (renamed from drivers/power/bq2415x_charger.c)0
-rw-r--r--drivers/power/supply/bq24190_charger.c (renamed from drivers/power/bq24190_charger.c)0
-rw-r--r--drivers/power/supply/bq24257_charger.c (renamed from drivers/power/bq24257_charger.c)12
-rw-r--r--drivers/power/supply/bq24735-charger.c (renamed from drivers/power/bq24735-charger.c)45
-rw-r--r--drivers/power/supply/bq25890_charger.c (renamed from drivers/power/bq25890_charger.c)0
-rw-r--r--drivers/power/supply/bq27xxx_battery.c (renamed from drivers/power/bq27xxx_battery.c)43
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c (renamed from drivers/power/bq27xxx_battery_i2c.c)0
-rw-r--r--drivers/power/supply/charger-manager.c (renamed from drivers/power/charger-manager.c)0
-rw-r--r--drivers/power/supply/collie_battery.c (renamed from drivers/power/collie_battery.c)0
-rw-r--r--drivers/power/supply/da9030_battery.c (renamed from drivers/power/da9030_battery.c)0
-rw-r--r--drivers/power/supply/da9052-battery.c (renamed from drivers/power/da9052-battery.c)0
-rw-r--r--drivers/power/supply/da9150-charger.c (renamed from drivers/power/da9150-charger.c)0
-rw-r--r--drivers/power/supply/da9150-fg.c (renamed from drivers/power/da9150-fg.c)0
-rw-r--r--drivers/power/supply/ds2760_battery.c (renamed from drivers/power/ds2760_battery.c)7
-rw-r--r--drivers/power/supply/ds2780_battery.c (renamed from drivers/power/ds2780_battery.c)4
-rw-r--r--drivers/power/supply/ds2781_battery.c (renamed from drivers/power/ds2781_battery.c)4
-rw-r--r--drivers/power/supply/ds2782_battery.c (renamed from drivers/power/ds2782_battery.c)0
-rw-r--r--drivers/power/supply/generic-adc-battery.c (renamed from drivers/power/generic-adc-battery.c)0
-rw-r--r--drivers/power/supply/goldfish_battery.c (renamed from drivers/power/goldfish_battery.c)0
-rw-r--r--drivers/power/supply/gpio-charger.c (renamed from drivers/power/gpio-charger.c)0
-rw-r--r--drivers/power/supply/intel_mid_battery.c (renamed from drivers/power/intel_mid_battery.c)3
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c (renamed from drivers/power/ipaq_micro_battery.c)2
-rw-r--r--drivers/power/supply/isp1704_charger.c (renamed from drivers/power/isp1704_charger.c)0
-rw-r--r--drivers/power/supply/jz4740-battery.c (renamed from drivers/power/jz4740-battery.c)0
-rw-r--r--drivers/power/supply/lp8727_charger.c (renamed from drivers/power/lp8727_charger.c)0
-rw-r--r--drivers/power/supply/lp8788-charger.c (renamed from drivers/power/lp8788-charger.c)0
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c (renamed from drivers/power/ltc2941-battery-gauge.c)0
-rw-r--r--drivers/power/supply/max14577_charger.c (renamed from drivers/power/max14577_charger.c)4
-rw-r--r--drivers/power/supply/max17040_battery.c (renamed from drivers/power/max17040_battery.c)0
-rw-r--r--drivers/power/supply/max17042_battery.c (renamed from drivers/power/max17042_battery.c)0
-rw-r--r--drivers/power/supply/max77693_charger.c (renamed from drivers/power/max77693_charger.c)4
-rw-r--r--drivers/power/supply/max8903_charger.c (renamed from drivers/power/max8903_charger.c)0
-rw-r--r--drivers/power/supply/max8925_power.c (renamed from drivers/power/max8925_power.c)0
-rw-r--r--drivers/power/supply/max8997_charger.c (renamed from drivers/power/max8997_charger.c)0
-rw-r--r--drivers/power/supply/max8998_charger.c (renamed from drivers/power/max8998_charger.c)0
-rw-r--r--drivers/power/supply/olpc_battery.c (renamed from drivers/power/olpc_battery.c)0
-rw-r--r--drivers/power/supply/pcf50633-charger.c (renamed from drivers/power/pcf50633-charger.c)0
-rw-r--r--drivers/power/supply/pda_power.c (renamed from drivers/power/pda_power.c)0
-rw-r--r--drivers/power/supply/pm2301_charger.c (renamed from drivers/power/pm2301_charger.c)3
-rw-r--r--drivers/power/supply/pm2301_charger.h (renamed from drivers/power/pm2301_charger.h)0
-rw-r--r--drivers/power/supply/pmu_battery.c (renamed from drivers/power/pmu_battery.c)0
-rw-r--r--drivers/power/supply/power_supply.h (renamed from drivers/power/power_supply.h)0
-rw-r--r--drivers/power/supply/power_supply_core.c (renamed from drivers/power/power_supply_core.c)0
-rw-r--r--drivers/power/supply/power_supply_leds.c (renamed from drivers/power/power_supply_leds.c)0
-rw-r--r--drivers/power/supply/power_supply_sysfs.c (renamed from drivers/power/power_supply_sysfs.c)0
-rw-r--r--drivers/power/supply/qcom_smbb.c (renamed from drivers/power/qcom_smbb.c)0
-rw-r--r--drivers/power/supply/rt5033_battery.c (renamed from drivers/power/rt5033_battery.c)0
-rw-r--r--drivers/power/supply/rt9455_charger.c (renamed from drivers/power/rt9455_charger.c)0
-rw-r--r--drivers/power/supply/rx51_battery.c (renamed from drivers/power/rx51_battery.c)0
-rw-r--r--drivers/power/supply/s3c_adc_battery.c (renamed from drivers/power/s3c_adc_battery.c)0
-rw-r--r--drivers/power/supply/sbs-battery.c (renamed from drivers/power/sbs-battery.c)254
-rw-r--r--drivers/power/supply/smb347-charger.c (renamed from drivers/power/smb347-charger.c)0
-rw-r--r--drivers/power/supply/test_power.c (renamed from drivers/power/test_power.c)0
-rw-r--r--drivers/power/supply/tosa_battery.c (renamed from drivers/power/tosa_battery.c)0
-rw-r--r--drivers/power/supply/tps65090-charger.c (renamed from drivers/power/tps65090-charger.c)0
-rw-r--r--drivers/power/supply/tps65217_charger.c (renamed from drivers/power/tps65217_charger.c)40
-rw-r--r--drivers/power/supply/twl4030_charger.c (renamed from drivers/power/twl4030_charger.c)0
-rw-r--r--drivers/power/supply/twl4030_madc_battery.c (renamed from drivers/power/twl4030_madc_battery.c)0
-rw-r--r--drivers/power/supply/wm831x_backup.c (renamed from drivers/power/wm831x_backup.c)0
-rw-r--r--drivers/power/supply/wm831x_power.c (renamed from drivers/power/wm831x_power.c)0
-rw-r--r--drivers/power/supply/wm8350_power.c (renamed from drivers/power/wm8350_power.c)0
-rw-r--r--drivers/power/supply/wm97xx_battery.c (renamed from drivers/power/wm97xx_battery.c)2
-rw-r--r--drivers/power/supply/z2_battery.c (renamed from drivers/power/z2_battery.c)1
-rw-r--r--drivers/ps3/ps3-vuart.c4
-rw-r--r--drivers/ptp/ptp_clock.c1
-rw-r--r--drivers/ptp/ptp_ixp46x.c15
-rw-r--r--drivers/regulator/Kconfig16
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/axp20x-regulator.c118
-rw-r--r--drivers/regulator/core.c154
-rw-r--r--drivers/regulator/dbx500-prcmu.c18
-rw-r--r--drivers/regulator/devres.c7
-rw-r--r--drivers/regulator/hi6421-regulator.c3
-rw-r--r--drivers/regulator/ltc3676.c420
-rw-r--r--drivers/regulator/pv88080-regulator.c263
-rw-r--r--drivers/regulator/pv88080-regulator.h114
-rw-r--r--drivers/regulator/pwm-regulator.c10
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c66
-rw-r--r--drivers/regulator/rk808-regulator.c146
-rw-r--r--drivers/regulator/tps65218-regulator.c8
-rw-r--r--drivers/regulator/tps65910-regulator.c6
-rw-r--r--drivers/remoteproc/Kconfig20
-rw-r--r--drivers/remoteproc/Makefile2
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c6
-rw-r--r--drivers/remoteproc/omap_remoteproc.c9
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c8
-rw-r--r--drivers/remoteproc/qcom_wcnss.c624
-rw-r--r--drivers/remoteproc/qcom_wcnss.h22
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c188
-rw-r--r--drivers/remoteproc/remoteproc_core.c248
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c20
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c6
-rw-r--r--drivers/remoteproc/remoteproc_internal.h15
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c35
-rw-r--r--drivers/remoteproc/st_remoteproc.c4
-rw-r--r--drivers/remoteproc/ste_modem_rproc.c4
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c6
-rw-r--r--drivers/reset/Kconfig65
-rw-r--r--drivers/reset/Makefile20
-rw-r--r--drivers/reset/core.c12
-rw-r--r--drivers/reset/hisilicon/Kconfig3
-rw-r--r--drivers/reset/reset-ath79.c1
-rw-r--r--drivers/reset/reset-socfpga.c19
-rw-r--r--drivers/reset/reset-stm32.c108
-rw-r--r--drivers/reset/reset-uniphier.c440
-rw-r--r--drivers/rpmsg/Kconfig14
-rw-r--r--drivers/rpmsg/Makefile4
-rw-r--r--drivers/rpmsg/qcom_smd.c1434
-rw-r--r--drivers/rpmsg/rpmsg_core.c498
-rw-r--r--drivers/rpmsg/rpmsg_internal.h82
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c541
-rw-r--r--drivers/rtc/Kconfig14
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ac100.c627
-rw-r--r--drivers/rtc/rtc-pm8xxx.c1
-rw-r--r--drivers/s390/block/dasd.c39
-rw-r--r--drivers/s390/block/dasd_devmap.c1
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_erp.c4
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/char/con3270.c11
-rw-r--r--drivers/s390/char/sclp_ctl.c19
-rw-r--r--drivers/s390/char/tape_3590.c11
-rw-r--r--drivers/s390/char/vmur.c9
-rw-r--r--drivers/s390/cio/chsc.c20
-rw-r--r--drivers/s390/cio/cio.h1
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c162
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h14
-rw-r--r--drivers/s390/scsi/zfcp_erp.c12
-rw-r--r--drivers/s390/scsi/zfcp_ext.h8
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c22
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h4
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c8
-rw-r--r--drivers/scsi/Kconfig136
-rw-r--r--drivers/scsi/Makefile8
-rw-r--r--drivers/scsi/NCR5380.c15
-rw-r--r--drivers/scsi/NCR5380.h10
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c12
-rw-r--r--drivers/scsi/be2iscsi/be.h15
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c1096
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h142
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c408
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h25
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2478
-rw-r--r--drivers/scsi/be2iscsi/be_main.h220
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c1497
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h51
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c12
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c5
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/cxlflash/main.c81
-rw-r--r--drivers/scsi/cxlflash/superpipe.c180
-rw-r--r--drivers/scsi/cxlflash/superpipe.h3
-rw-r--r--drivers/scsi/cxlflash/vlun.c13
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c1
-rw-r--r--drivers/scsi/dtc.c447
-rw-r--r--drivers/scsi/dtc.h42
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c53
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h18
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c238
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c36
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c130
-rw-r--r--drivers/scsi/hosts.c12
-rw-r--r--drivers/scsi/hpsa.c139
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/hpsa_cmd.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c37
-rw-r--r--drivers/scsi/in2000.c2302
-rw-r--r--drivers/scsi/in2000.h412
-rw-r--r--drivers/scsi/ipr.c134
-rw-r--r--drivers/scsi/ipr.h8
-rw-r--r--drivers/scsi/libfc/fc_exch.c1
-rw-r--r--drivers/scsi/libfc/fc_rport.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c82
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c259
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h24
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c49
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c169
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c28
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c19
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c41
-rw-r--r--drivers/scsi/mvsas/mv_sas.c16
-rw-r--r--drivers/scsi/pas16.c565
-rw-r--r--drivers/scsi/pas16.h121
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c2
-rw-r--r--drivers/scsi/pmcraid.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2
-rw-r--r--drivers/scsi/scsi_debug.c54
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/sd.c11
-rw-r--r--drivers/scsi/sd.h30
-rw-r--r--drivers/scsi/sd_dif.c10
-rw-r--r--drivers/scsi/sg.c20
-rw-r--r--drivers/scsi/smartpqi/Kconfig54
-rw-r--r--drivers/scsi/smartpqi/Makefile3
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h1136
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c6303
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c350
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c404
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h34
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/scsi/t128.c407
-rw-r--r--drivers/scsi/t128.h97
-rw-r--r--drivers/scsi/u14-34f.c1971
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210.c1
-rw-r--r--drivers/scsi/ufs/ufs.h1
-rw-r--r--drivers/scsi/ufs/ufshcd.c6
-rw-r--r--drivers/scsi/ultrastor.c1210
-rw-r--r--drivers/scsi/ultrastor.h80
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/scsi/wd7000.c1657
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c2
-rw-r--r--drivers/soc/qcom/smd.c265
-rw-r--r--drivers/soc/qcom/smem.c3
-rw-r--r--drivers/soc/rockchip/pm_domains.c100
-rw-r--r--drivers/soc/tegra/pmc.c28
-rw-r--r--drivers/spi/Kconfig26
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/spi-bcm-qspi.c1397
-rw-r--r--drivers/spi/spi-bcm-qspi.h115
-rw-r--r--drivers/spi/spi-brcmstb-qspi.c53
-rw-r--r--drivers/spi/spi-cavium-thunderx.c120
-rw-r--r--drivers/spi/spi-cavium.h3
-rw-r--r--drivers/spi/spi-dw.c15
-rw-r--r--drivers/spi/spi-dw.h1
-rw-r--r--drivers/spi/spi-fsl-dspi.c16
-rw-r--r--drivers/spi/spi-fsl-espi.c607
-rw-r--r--drivers/spi/spi-fsl-lib.h3
-rw-r--r--drivers/spi/spi-imx.c35
-rw-r--r--drivers/spi/spi-iproc-qspi.c163
-rw-r--r--drivers/spi/spi-jcore.c231
-rw-r--r--drivers/spi/spi-loopback-test.c2
-rw-r--r--drivers/spi/spi-meson-spifc.c1
-rw-r--r--drivers/spi/spi-pic32-sqi.c6
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c7
-rw-r--r--drivers/spi/spi-pxa2xx.c172
-rw-r--r--drivers/spi/spi-pxa2xx.h5
-rw-r--r--drivers/spi/spi-qup.c6
-rw-r--r--drivers/spi/spi-rspi.c14
-rw-r--r--drivers/spi/spi-sc18is602.c9
-rw-r--r--drivers/spi/spi-st-ssc4.c19
-rw-r--r--drivers/spi/spi-ti-qspi.c139
-rw-r--r--drivers/spi/spi-txx9.c6
-rw-r--r--drivers/spi/spi-xlp.c13
-rw-r--r--drivers/spi/spi.c17
-rw-r--r--drivers/spmi/spmi-pmic-arb.c1
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/android/Kconfig13
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/ion/Kconfig12
-rw-r--r--drivers/staging/android/ion/Makefile4
-rw-r--r--drivers/staging/android/ion/devicetree.txt51
-rw-r--r--drivers/staging/android/ion/hisilicon/hi6220_ion.c195
-rw-r--r--drivers/staging/android/ion/ion-ioctl.c177
-rw-r--r--drivers/staging/android/ion/ion.c388
-rw-r--r--drivers/staging/android/ion/ion.h41
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c43
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c29
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c36
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c10
-rw-r--r--drivers/staging/android/ion/ion_heap.c10
-rw-r--r--drivers/staging/android/ion/ion_of.c185
-rw-r--r--drivers/staging/android/ion/ion_of.h37
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c12
-rw-r--r--drivers/staging/android/ion/ion_priv.h131
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c239
-rw-r--r--drivers/staging/android/ion/ion_test.c23
-rw-r--r--drivers/staging/android/lowmemorykiller.c13
-rw-r--r--drivers/staging/android/uapi/ion.h69
-rw-r--r--drivers/staging/comedi/comedi_fops.c8
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c141
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c85
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c3
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c577
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c73
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c4
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c4
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c2
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c15
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.h290
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c62
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c273
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c176
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c106
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c58
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c67
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c171
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c216
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c4
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h95
-rw-r--r--drivers/staging/comedi/drivers/s626.c287
-rw-r--r--drivers/staging/comedi/drivers/s626.h4
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c4
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c12
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c647
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c226
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h19
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c130
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c185
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c180
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h3
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c20
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h2
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c4
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c4
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c10
-rw-r--r--drivers/staging/fbtft/fb_pcd8544.c4
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c14
-rw-r--r--drivers/staging/fbtft/fb_s6d1121.c8
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c10
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c23
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c40
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c14
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c43
-rw-r--r--drivers/staging/fbtft/fb_tls8204.c59
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c12
-rw-r--r--drivers/staging/fbtft/fb_watterott.c8
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c3
-rw-r--r--drivers/staging/fbtft/fbtft-core.c30
-rw-r--r--drivers/staging/fbtft/fbtft.h26
-rw-r--r--drivers/staging/fbtft/fbtft_device.c36
-rw-r--r--drivers/staging/fsl-mc/bus/Makefile7
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c60
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c2
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng-cmd.h15
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng.c61
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h15
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c71
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c89
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c (renamed from drivers/staging/fsl-mc/bus/mc-allocator.c)213
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-bus.c (renamed from drivers/staging/fsl-mc/bus/mc-bus.c)83
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-msi.c (renamed from drivers/staging/fsl-mc/bus/mc-msi.c)8
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-private.h52
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c6
-rw-r--r--drivers/staging/fsl-mc/bus/mc-io.c320
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c155
-rw-r--r--drivers/staging/fsl-mc/include/dpbp-cmd.h60
-rw-r--r--drivers/staging/fsl-mc/include/mc-bus.h (renamed from drivers/staging/fsl-mc/include/mc-private.h)91
-rw-r--r--drivers/staging/fsl-mc/include/mc-sys.h15
-rw-r--r--drivers/staging/fsl-mc/include/mc.h13
-rw-r--r--drivers/staging/fwserial/fwserial.c6
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c2
-rw-r--r--drivers/staging/gdm724x/gdm_mux.h4
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c1
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c8
-rw-r--r--drivers/staging/gdm724x/gdm_usb.h6
-rw-r--r--drivers/staging/gdm724x/hci_packet.h1
-rw-r--r--drivers/staging/gdm724x/netlink_k.c11
-rw-r--r--drivers/staging/greybus/Documentation/firmware/authenticate.c139
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware-management333
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware.c262
-rw-r--r--drivers/staging/greybus/Documentation/sysfs-bus-greybus275
-rw-r--r--drivers/staging/greybus/Kconfig219
-rw-r--r--drivers/staging/greybus/Makefile96
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c522
-rw-r--r--drivers/staging/greybus/arche-platform.c827
-rw-r--r--drivers/staging/greybus/arche_platform.h39
-rw-r--r--drivers/staging/greybus/arpc.h109
-rw-r--r--drivers/staging/greybus/audio_apbridgea.c207
-rw-r--r--drivers/staging/greybus/audio_apbridgea.h156
-rw-r--r--drivers/staging/greybus/audio_codec.c1132
-rw-r--r--drivers/staging/greybus/audio_codec.h283
-rw-r--r--drivers/staging/greybus/audio_gb.c228
-rw-r--r--drivers/staging/greybus/audio_manager.c184
-rw-r--r--drivers/staging/greybus/audio_manager.h83
-rw-r--r--drivers/staging/greybus/audio_manager_module.c258
-rw-r--r--drivers/staging/greybus/audio_manager_private.h28
-rw-r--r--drivers/staging/greybus/audio_manager_sysfs.c102
-rw-r--r--drivers/staging/greybus/audio_module.c482
-rw-r--r--drivers/staging/greybus/audio_topology.c1443
-rw-r--r--drivers/staging/greybus/authentication.c429
-rw-r--r--drivers/staging/greybus/bootrom.c524
-rw-r--r--drivers/staging/greybus/bundle.c253
-rw-r--r--drivers/staging/greybus/bundle.h90
-rw-r--r--drivers/staging/greybus/camera.c1400
-rw-r--r--drivers/staging/greybus/connection.c938
-rw-r--r--drivers/staging/greybus/connection.h129
-rw-r--r--drivers/staging/greybus/control.c635
-rw-r--r--drivers/staging/greybus/control.h65
-rw-r--r--drivers/staging/greybus/core.c361
-rw-r--r--drivers/staging/greybus/debugfs.c31
-rw-r--r--drivers/staging/greybus/es2.c1597
-rw-r--r--drivers/staging/greybus/firmware.h42
-rw-r--r--drivers/staging/greybus/fw-core.c312
-rw-r--r--drivers/staging/greybus/fw-download.c465
-rw-r--r--drivers/staging/greybus/fw-management.c721
-rw-r--r--drivers/staging/greybus/gb-camera.h127
-rw-r--r--drivers/staging/greybus/gbphy.c360
-rw-r--r--drivers/staging/greybus/gbphy.h110
-rw-r--r--drivers/staging/greybus/gpio.c766
-rw-r--r--drivers/staging/greybus/greybus.h154
-rw-r--r--drivers/staging/greybus/greybus_authentication.h120
-rw-r--r--drivers/staging/greybus/greybus_firmware.h120
-rw-r--r--drivers/staging/greybus/greybus_id.h26
-rw-r--r--drivers/staging/greybus/greybus_manifest.h177
-rw-r--r--drivers/staging/greybus/greybus_protocols.h2268
-rw-r--r--drivers/staging/greybus/greybus_trace.h531
-rw-r--r--drivers/staging/greybus/hd.c257
-rw-r--r--drivers/staging/greybus/hd.h90
-rw-r--r--drivers/staging/greybus/hid.c536
-rw-r--r--drivers/staging/greybus/i2c.c343
-rw-r--r--drivers/staging/greybus/interface.c1316
-rw-r--r--drivers/staging/greybus/interface.h88
-rw-r--r--drivers/staging/greybus/light.c1361
-rw-r--r--drivers/staging/greybus/log.c132
-rw-r--r--drivers/staging/greybus/loopback.c1364
-rw-r--r--drivers/staging/greybus/manifest.c535
-rw-r--r--drivers/staging/greybus/manifest.h16
-rw-r--r--drivers/staging/greybus/module.c238
-rw-r--r--drivers/staging/greybus/module.h34
-rw-r--r--drivers/staging/greybus/operation.c1239
-rw-r--r--drivers/staging/greybus/operation.h210
-rw-r--r--drivers/staging/greybus/power_supply.c1141
-rw-r--r--drivers/staging/greybus/pwm.c338
-rw-r--r--drivers/staging/greybus/raw.c381
-rw-r--r--drivers/staging/greybus/sdio.c884
-rw-r--r--drivers/staging/greybus/spi.c79
-rw-r--r--drivers/staging/greybus/spilib.c565
-rw-r--r--drivers/staging/greybus/spilib.h24
-rw-r--r--drivers/staging/greybus/svc.c1486
-rw-r--r--drivers/staging/greybus/svc.h109
-rw-r--r--drivers/staging/greybus/svc_watchdog.c198
-rw-r--r--drivers/staging/greybus/timesync.c1357
-rw-r--r--drivers/staging/greybus/timesync.h45
-rw-r--r--drivers/staging/greybus/timesync_platform.c82
-rw-r--r--drivers/staging/greybus/tools/.gitignore1
-rw-r--r--drivers/staging/greybus/tools/Android.mk10
-rw-r--r--drivers/staging/greybus/tools/Makefile31
-rw-r--r--drivers/staging/greybus/tools/README.loopback198
-rwxr-xr-xdrivers/staging/greybus/tools/lbtest168
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c1000
-rw-r--r--drivers/staging/greybus/uart.c1075
-rw-r--r--drivers/staging/greybus/usb.c247
-rw-r--r--drivers/staging/greybus/vibrator.c249
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c4
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h5
-rw-r--r--drivers/staging/gs_fpgaboot/io.h2
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.c12
-rw-r--r--drivers/staging/i4l/act2000/capi.c12
-rw-r--r--drivers/staging/i4l/act2000/capi.h10
-rw-r--r--drivers/staging/i4l/act2000/module.c5
-rw-r--r--drivers/staging/i4l/icn/icn.c186
-rw-r--r--drivers/staging/i4l/pcbit/capi.c4
-rw-r--r--drivers/staging/i4l/pcbit/drv.c16
-rw-r--r--drivers/staging/i4l/pcbit/edss1.c7
-rw-r--r--drivers/staging/i4l/pcbit/layer2.c8
-rw-r--r--drivers/staging/iio/accel/sca3000.h1
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c244
-rw-r--r--drivers/staging/iio/adc/ad7280a.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c38
-rw-r--r--drivers/staging/iio/light/isl29018.c138
-rw-r--r--drivers/staging/iio/light/isl29028.c103
-rw-r--r--drivers/staging/iio/light/tsl2583.c2
-rw-r--r--drivers/staging/iio/meter/ade7754.c59
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c10
-rw-r--r--drivers/staging/iio/meter/ade7854.c40
-rw-r--r--drivers/staging/ks7010/eap_packet.h34
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c160
-rw-r--r--drivers/staging/ks7010/ks_hostif.c158
-rw-r--r--drivers/staging/ks7010/ks_hostif.h121
-rw-r--r--drivers/staging/ks7010/ks_wlan.h8
-rw-r--r--drivers/staging/ks7010/ks_wlan_ioctl.h6
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c121
-rw-r--r--drivers/staging/ks7010/michael_mic.c32
-rw-r--r--drivers/staging/ks7010/michael_mic.h7
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h10
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h3
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h21
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h65
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h7
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h16
-rw-r--r--drivers/staging/lustre/lnet/Kconfig1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c18
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h18
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c201
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c5
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h8
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c48
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c207
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c9
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c17
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c46
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c14
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c30
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c363
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c18
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c21
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c39
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c20
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c15
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h1
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c8
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c2
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c8
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h19
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c57
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h108
-rw-r--r--drivers/staging/lustre/lustre/include/interval_tree.h26
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_lite.h91
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_user.h66
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h143
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h38
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h486
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h412
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h329
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h26
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_compat.h (renamed from drivers/staging/lustre/lustre/include/linux/lustre_compat25.h)6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h16
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h36
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_eacl.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h32
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h5
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h24
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h318
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_linkea.h79
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lite.h97
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lmv.h184
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h52
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mds.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h102
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_patchless_compat.h (renamed from drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h)0
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h23
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ver.h19
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h390
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h195
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h34
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c100
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c109
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h20
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c32
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c84
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c49
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c119
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c53
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c61
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c899
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c739
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h371
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c715
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c70
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c221
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c378
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.c233
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.h82
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c37
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c28
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c1439
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c13
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h29
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c76
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c26
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_req.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c338
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c25
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c16
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c365
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h126
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c1484
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h14
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c17
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h9
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c25
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c39
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c349
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c50
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c60
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c12
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c18
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c78
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c6
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c17
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h63
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c236
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c176
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c36
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c725
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c27
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c19
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c50
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c30
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c57
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c148
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linkea.c201
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_internal.h5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c26
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c150
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c240
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c13
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c48
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c41
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c13
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c170
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c41
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c279
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h9
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c46
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c7
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c278
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c400
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c127
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c5
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c332
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c100
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c36
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c245
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h9
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c28
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c24
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c5
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c32
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c47
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c745
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c14
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h36
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c8
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c14
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c7
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c37
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c6
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c26
-rw-r--r--drivers/staging/most/Documentation/ABI/sysfs-class-most.txt134
-rw-r--r--drivers/staging/most/aim-cdev/cdev.c62
-rw-r--r--drivers/staging/most/aim-network/networking.c26
-rw-r--r--drivers/staging/most/aim-sound/sound.c11
-rw-r--r--drivers/staging/most/aim-v4l2/video.c123
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.c185
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.h9
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c108
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_reg.h11
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_sysfs.c2
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c502
-rw-r--r--drivers/staging/most/mostcore/core.c128
-rw-r--r--drivers/staging/most/mostcore/mostcore.h6
-rw-r--r--drivers/staging/netlogic/xlr_net.c14
-rw-r--r--drivers/staging/octeon-usb/Kconfig2
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c1
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c64
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c7
-rw-r--r--drivers/staging/octeon/ethernet-rx.c181
-rw-r--r--drivers/staging/octeon/ethernet-util.h7
-rw-r--r--drivers/staging/octeon/ethernet.c125
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h3
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c132
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c42
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c731
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c99
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c245
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c13
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c70
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c337
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c20
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c11
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c48
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c65
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c18
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c230
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c237
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c76
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c16
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c11
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c96
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c115
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c67
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c9
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c73
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c202
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h18
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h29
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h4
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h65
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h84
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h273
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h18
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h1
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h9
-rw-r--r--drivers/staging/rtl8188eu/include/phy.h6
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h32
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h3
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h7
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_led.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ap.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h94
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h121
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h12
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h23
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ht.h9
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h15
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h36
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h93
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h17
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h8
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h6
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_sreset.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h9
-rw-r--r--drivers/staging/rtl8188eu/include/usb_hal.h21
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h17
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h30
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h83
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c53
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c424
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c11
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c40
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c49
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c2
-rw-r--r--drivers/staging/rtl8192e/dot11d.c12
-rw-r--r--drivers/staging/rtl8192e/dot11d.h5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c42
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c120
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h3
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib.h12
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c99
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c34
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c75
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c10
-rw-r--r--drivers/staging/rtl8192u/ieee80211/Makefile3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c18
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c6
-rw-r--r--drivers/staging/rtl8192u/r8192U.h4
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c84
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c6
-rw-r--r--drivers/staging/rtl8712/ieee80211.c25
-rw-r--r--drivers/staging/rtl8712/os_intfs.c6
-rw-r--r--drivers/staging/rtl8712/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8712/osdep_service.h7
-rw-r--r--drivers/staging/rtl8712/recv_linux.c1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c14
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c410
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c66
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.h18
-rw-r--r--drivers/staging/rtl8712/rtl8712_spec.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h27
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c11
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c76
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h277
-rw-r--r--drivers/staging/rtl8712/rtl871x_ht.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c12
-rw-r--r--drivers/staging/rtl8712/rtl871x_led.h15
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c30
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h12
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h39
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h33
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c27
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h10
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h9
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h24
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c18
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h12
-rw-r--r--drivers/staging/rtl8712/usb_halinit.c3
-rw-r--r--drivers/staging/rtl8712/usb_intf.c14
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c92
-rw-r--r--drivers/staging/rtl8712/wifi.h15
-rw-r--r--drivers/staging/rtl8712/wlan_bssdef.h6
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c17
-rw-r--r--drivers/staging/rtl8723au/Kconfig33
-rw-r--r--drivers/staging/rtl8723au/Makefile53
-rw-r--r--drivers/staging/rtl8723au/TODO16
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c1738
-rw-r--r--drivers/staging/rtl8723au/core/rtw_cmd.c1470
-rw-r--r--drivers/staging/rtl8723au/core/rtw_efuse.c538
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ieee80211.c855
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme.c2314
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme_ext.c6187
-rw-r--r--drivers/staging/rtl8723au/core/rtw_pwrctrl.c606
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c2204
-rw-r--r--drivers/staging/rtl8723au/core/rtw_security.c1630
-rw-r--r--drivers/staging/rtl8723au/core/rtw_sreset.c214
-rw-r--r--drivers/staging/rtl8723au/core/rtw_sta_mgt.c439
-rw-r--r--drivers/staging/rtl8723au/core/rtw_wlan_util.c1537
-rw-r--r--drivers/staging/rtl8723au/core/rtw_xmit.c2341
-rw-r--r--drivers/staging/rtl8723au/hal/Hal8723PwrSeq.c80
-rw-r--r--drivers/staging/rtl8723au/hal/Hal8723UHWImg_CE.c136
-rw-r--r--drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c1097
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c565
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c187
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c259
-rw-r--r--drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c156
-rw-r--r--drivers/staging/rtl8723au/hal/hal_com.c853
-rw-r--r--drivers/staging/rtl8723au/hal/hal_intf.c42
-rw-r--r--drivers/staging/rtl8723au/hal/odm.c1732
-rw-r--r--drivers/staging/rtl8723au/hal/odm_HWConfig.c396
-rw-r--r--drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c88
-rw-r--r--drivers/staging/rtl8723au/hal/odm_debug.c39
-rw-r--r--drivers/staging/rtl8723au/hal/odm_interface.c49
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c11265
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_cmd.c755
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_dm.c194
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c2076
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c961
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c503
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rxdesc.c69
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_sreset.c55
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_recv.c267
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_xmit.c520
-rw-r--r--drivers/staging/rtl8723au/hal/usb_halinit.c1269
-rw-r--r--drivers/staging/rtl8723au/hal/usb_ops_linux.c690
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723APhyCfg.h162
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723APhyReg.h1078
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723PwrSeq.h126
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h29
-rw-r--r--drivers/staging/rtl8723au/include/HalDMOutSrc8723A.h64
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_BB.h38
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_FW.h28
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_MAC.h26
-rw-r--r--drivers/staging/rtl8723au/include/HalHWImg8723A_RF.h25
-rw-r--r--drivers/staging/rtl8723au/include/HalPwrSeqCmd.h130
-rw-r--r--drivers/staging/rtl8723au/include/HalVerDef.h114
-rw-r--r--drivers/staging/rtl8723au/include/drv_types.h274
-rw-r--r--drivers/staging/rtl8723au/include/hal_com.h182
-rw-r--r--drivers/staging/rtl8723au/include/hal_intf.h115
-rw-r--r--drivers/staging/rtl8723au/include/ieee80211.h341
-rw-r--r--drivers/staging/rtl8723au/include/ioctl_cfg80211.h66
-rw-r--r--drivers/staging/rtl8723au/include/mlme_osdep.h24
-rw-r--r--drivers/staging/rtl8723au/include/odm.h860
-rw-r--r--drivers/staging/rtl8723au/include/odm_HWConfig.h153
-rw-r--r--drivers/staging/rtl8723au/include/odm_RegConfig8723A.h27
-rw-r--r--drivers/staging/rtl8723au/include/odm_RegDefine11N.h165
-rw-r--r--drivers/staging/rtl8723au/include/odm_debug.h117
-rw-r--r--drivers/staging/rtl8723au/include/odm_interface.h62
-rw-r--r--drivers/staging/rtl8723au/include/odm_precomp.h49
-rw-r--r--drivers/staging/rtl8723au/include/odm_reg.h111
-rw-r--r--drivers/staging/rtl8723au/include/osdep_intf.h45
-rw-r--r--drivers/staging/rtl8723au/include/osdep_service.h88
-rw-r--r--drivers/staging/rtl8723au/include/recv_osdep.h36
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h1627
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_bt_intf.h69
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_cmd.h158
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_dm.h137
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_hal.h538
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_pg.h98
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_recv.h65
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_rf.h58
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_spec.h2148
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_sreset.h24
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_xmit.h225
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ap.h51
-rw-r--r--drivers/staging/rtl8723au/include/rtw_cmd.h815
-rw-r--r--drivers/staging/rtl8723au/include/rtw_debug.h191
-rw-r--r--drivers/staging/rtl8723au/include/rtw_eeprom.h135
-rw-r--r--drivers/staging/rtl8723au/include/rtw_efuse.h109
-rw-r--r--drivers/staging/rtl8723au/include/rtw_event.h74
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ht.h42
-rw-r--r--drivers/staging/rtl8723au/include/rtw_io.h237
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme.h340
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme_ext.h683
-rw-r--r--drivers/staging/rtl8723au/include/rtw_pwrctrl.h241
-rw-r--r--drivers/staging/rtl8723au/include/rtw_recv.h305
-rw-r--r--drivers/staging/rtl8723au/include/rtw_rf.h102
-rw-r--r--drivers/staging/rtl8723au/include/rtw_security.h331
-rw-r--r--drivers/staging/rtl8723au/include/rtw_sreset.h36
-rw-r--r--drivers/staging/rtl8723au/include/rtw_version.h1
-rw-r--r--drivers/staging/rtl8723au/include/rtw_xmit.h385
-rw-r--r--drivers/staging/rtl8723au/include/sta_info.h373
-rw-r--r--drivers/staging/rtl8723au/include/usb_ops.h68
-rw-r--r--drivers/staging/rtl8723au/include/usb_ops_linux.h41
-rw-r--r--drivers/staging/rtl8723au/include/wifi.h84
-rw-r--r--drivers/staging/rtl8723au/include/wlan_bssdef.h123
-rw-r--r--drivers/staging/rtl8723au/include/xmit_osdep.h38
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c3348
-rw-r--r--drivers/staging/rtl8723au/os_dep/mlme_linux.c81
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c852
-rw-r--r--drivers/staging/rtl8723au/os_dep/recv_linux.c165
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c627
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_ops_linux.c233
-rw-r--r--drivers/staging/rtl8723au/os_dep/xmit_linux.c154
-rw-r--r--drivers/staging/rts5208/ms.c85
-rw-r--r--drivers/staging/rts5208/ms.h1
-rw-r--r--drivers/staging/rts5208/rtsx.c63
-rw-r--r--drivers/staging/rts5208/rtsx.h24
-rw-r--r--drivers/staging/rts5208/rtsx_card.c45
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c18
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h90
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c28
-rw-r--r--drivers/staging/rts5208/rtsx_sys.h2
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h1
-rw-r--r--drivers/staging/rts5208/sd.c183
-rw-r--r--drivers/staging/rts5208/spi.c3
-rw-r--r--drivers/staging/rts5208/spi.h1
-rw-r--r--drivers/staging/rts5208/xd.c44
-rw-r--r--drivers/staging/slicoss/slic.h75
-rw-r--r--drivers/staging/slicoss/slichw.h409
-rw-r--r--drivers/staging/slicoss/slicoss.c646
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c106
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h2
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c15
-rw-r--r--drivers/staging/sm750fb/ddk750_display.h94
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c8
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c33
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c6
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c7
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.c14
-rw-r--r--drivers/staging/sm750fb/sm750.c2
-rw-r--r--drivers/staging/sm750fb/sm750.h16
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c100
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c114
-rw-r--r--drivers/staging/speakup/devsynth.c2
-rw-r--r--drivers/staging/speakup/kobjects.c17
-rw-r--r--drivers/staging/speakup/synth.c6
-rw-r--r--drivers/staging/speakup/varhandlers.c6
-rw-r--r--drivers/staging/unisys/include/channel.h221
-rw-r--r--drivers/staging/unisys/include/channel_guid.h55
-rw-r--r--drivers/staging/unisys/include/diagchannel.h38
-rw-r--r--drivers/staging/unisys/include/guestlinuxdebug.h180
-rw-r--r--drivers/staging/unisys/include/iochannel.h1
-rw-r--r--drivers/staging/unisys/include/periodic_work.h40
-rw-r--r--drivers/staging/unisys/include/vbushelper.h46
-rw-r--r--drivers/staging/unisys/include/version.h45
-rw-r--r--drivers/staging/unisys/include/visorbus.h111
-rw-r--r--drivers/staging/unisys/visorbus/Makefile1
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h76
-rw-r--r--drivers/staging/unisys/visorbus/controlvmcompletionstatus.h101
-rw-r--r--drivers/staging/unisys/visorbus/iovmcall_gnuc.h48
-rw-r--r--drivers/staging/unisys/visorbus/periodic_work.c204
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h211
-rw-r--r--drivers/staging/unisys/visorbus/vbusdeviceinfo.h213
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c841
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h99
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c573
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c1593
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h190
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c70
-rw-r--r--drivers/staging/unisys/visorinput/ultrainputreport.h2
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c149
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c100
-rw-r--r--drivers/staging/vc04_services/Kconfig9
-rw-r--r--drivers/staging/vc04_services/Makefile14
-rw-r--r--drivers/staging/vc04_services/interface/vchi/connections/connection.h328
-rw-r--r--drivers/staging/vc04_services/interface/vchi/message_drivers/message.h204
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h378
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_cfg.h224
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_cfg_internal.h71
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_common.h175
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_mh.h42
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h40
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835.h42
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c586
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c2903
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h220
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_build_info.h37
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h69
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c120
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h50
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c3934
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h712
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c383
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h52
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_genversion87
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h189
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h131
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c458
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h69
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h71
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h58
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c860
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c156
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h82
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_version.c59
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c12
-rw-r--r--drivers/staging/vme/devices/vme_user.c2
-rw-r--r--drivers/staging/vt6655/baseband.c2
-rw-r--r--drivers/staging/vt6655/card.c4
-rw-r--r--drivers/staging/vt6655/channel.c3
-rw-r--r--drivers/staging/vt6655/device_main.c12
-rw-r--r--drivers/staging/vt6655/key.c10
-rw-r--r--drivers/staging/vt6655/key.h2
-rw-r--r--drivers/staging/vt6655/power.c12
-rw-r--r--drivers/staging/vt6655/rf.c19
-rw-r--r--drivers/staging/vt6655/rxtx.c49
-rw-r--r--drivers/staging/vt6656/baseband.h4
-rw-r--r--drivers/staging/vt6656/card.c75
-rw-r--r--drivers/staging/vt6656/dpc.c15
-rw-r--r--drivers/staging/vt6656/dpc.h2
-rw-r--r--drivers/staging/vt6656/main_usb.c12
-rw-r--r--drivers/staging/vt6656/usbpipe.c5
-rw-r--r--drivers/staging/wilc1000/TODO1
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.h6
-rw-r--r--drivers/staging/wilc1000/host_interface.c9
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c1
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c31
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c33
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c2
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h2
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c13
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h2
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h1
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c151
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h904
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c307
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c8
-rw-r--r--drivers/staging/wlan-ng/p80211metadef.h19
-rw-r--r--drivers/staging/wlan-ng/p80211metastruct.h248
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c88
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h43
-rw-r--r--drivers/staging/wlan-ng/p80211req.c95
-rw-r--r--drivers/staging/wlan-ng/p80211req.h2
-rw-r--r--drivers/staging/wlan-ng/p80211types.h261
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c6
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c50
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c62
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h51
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c622
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c261
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c38
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c13
-rw-r--r--drivers/staging/xgifb/vb_setmode.c49
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c234
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c3
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/8250/8250.h5
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_dma.c14
-rw-r--r--drivers/tty/serial/8250/8250_dw.c48
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c378
-rw-r--r--drivers/tty/serial/8250/8250_mid.c8
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c6
-rw-r--r--drivers/tty/serial/8250/8250_of.c7
-rw-r--r--drivers/tty/serial/8250/8250_pci.c312
-rw-r--r--drivers/tty/serial/8250/8250_port.c81
-rw-r--r--drivers/tty/serial/8250/Kconfig16
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/altera_jtaguart.c2
-rw-r--r--drivers/tty/serial/altera_uart.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c57
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c58
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c2
-rw-r--r--drivers/tty/serial/earlycon-arm-semihost.c3
-rw-r--r--drivers/tty/serial/earlycon.c26
-rw-r--r--drivers/tty/serial/fsl_lpuart.c814
-rw-r--r--drivers/tty/serial/imx.c213
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c2
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/men_z135_uart.c2
-rw-r--r--drivers/tty/serial/mxs-auart.c15
-rw-r--r--drivers/tty/serial/pch_uart.c7
-rw-r--r--drivers/tty/serial/samsung.c2
-rw-r--r--drivers/tty/serial/samsung.h2
-rw-r--r--drivers/tty/serial/sc16is7xx.c20
-rw-r--r--drivers/tty/serial/serial_core.c136
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/serial/st-asc.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c613
-rw-r--r--drivers/tty/serial/stm32-usart.h229
-rw-r--r--drivers/tty/serial/timbuart.c2
-rw-r--r--drivers/tty/serial/uartlite.c2
-rw-r--r--drivers/tty/serial/vt8500_serial.c8
-rw-r--r--drivers/tty/serial/xilinx_uartps.c304
-rw-r--r--drivers/tty/vt/vt.c19
-rw-r--r--drivers/uio/uio_dmem_genirq.c2
-rw-r--r--drivers/usb/Kconfig24
-rw-r--r--drivers/usb/atm/cxacru.c6
-rw-r--r--drivers/usb/atm/speedtch.c1
-rw-r--r--drivers/usb/atm/ueagle-atm.c13
-rw-r--r--drivers/usb/atm/usbatm.c8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c3
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h1
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/udc.c42
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c22
-rw-r--r--drivers/usb/class/cdc-acm.c49
-rw-r--r--drivers/usb/class/cdc-wdm.c165
-rw-r--r--drivers/usb/class/usbtmc.c11
-rw-r--r--drivers/usb/common/ulpi.c16
-rw-r--r--drivers/usb/core/Kconfig25
-rw-r--r--drivers/usb/core/Makefile5
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/core/hub.c11
-rw-r--r--drivers/usb/core/ledtrig-usbport.c314
-rw-r--r--drivers/usb/core/message.c10
-rw-r--r--drivers/usb/core/of.c1
-rw-r--r--drivers/usb/core/otg_whitelist.h2
-rw-r--r--drivers/usb/core/urb.c4
-rw-r--r--drivers/usb/core/usb.c11
-rw-r--r--drivers/usb/dwc2/core.c126
-rw-r--r--drivers/usb/dwc2/core.h8
-rw-r--r--drivers/usb/dwc2/gadget.c94
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/dwc2/hw.h1
-rw-r--r--drivers/usb/dwc2/platform.c34
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c122
-rw-r--r--drivers/usb/dwc3/core.h35
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c48
-rw-r--r--drivers/usb/dwc3/gadget.c278
-rw-r--r--drivers/usb/dwc3/ulpi.c10
-rw-r--r--drivers/usb/gadget/Kconfig39
-rw-r--r--drivers/usb/gadget/composite.c16
-rw-r--r--drivers/usb/gadget/configfs.c5
-rw-r--r--drivers/usb/gadget/function/f_fs.c45
-rw-r--r--drivers/usb/gadget/function/f_hid.c28
-rw-r--r--drivers/usb/gadget/function/f_loopback.c14
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c28
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h1
-rw-r--r--drivers/usb/gadget/function/f_midi.c234
-rw-r--r--drivers/usb/gadget/function/f_ncm.c84
-rw-r--r--drivers/usb/gadget/function/f_printer.c6
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c6
-rw-r--r--drivers/usb/gadget/function/f_uvc.c7
-rw-r--r--drivers/usb/gadget/function/storage_common.c24
-rw-r--r--drivers/usb/gadget/function/storage_common.h10
-rw-r--r--drivers/usb/gadget/function/u_ether.c8
-rw-r--r--drivers/usb/gadget/function/u_ether.h1
-rw-r--r--drivers/usb/gadget/legacy/gmidi.c2
-rw-r--r--drivers/usb/gadget/u_f.c6
-rw-r--r--drivers/usb/gadget/u_f.h17
-rw-r--r--drivers/usb/gadget/udc/core.c4
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c16
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c5
-rw-r--r--drivers/usb/gadget/udc/net2280.c80
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c51
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c4
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/bcma-hcd.c169
-rw-r--r--drivers/usb/host/ehci-platform.c2
-rw-r--r--drivers/usb/host/fhci-hcd.c4
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c18
-rw-r--r--drivers/usb/host/max3421-hcd.c8
-rw-r--r--drivers/usb/host/ohci-at91.c69
-rw-r--r--drivers/usb/host/ohci-omap.c1
-rw-r--r--drivers/usb/host/ohci-sa1111.c4
-rw-r--r--drivers/usb/host/uhci-hcd.c5
-rw-r--r--drivers/usb/host/whci/init.c2
-rw-r--r--drivers/usb/host/xhci-tegra.c2
-rw-r--r--drivers/usb/host/xhci.c4
-rw-r--r--drivers/usb/misc/Kconfig6
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/adutux.c21
-rw-r--r--drivers/usb/misc/appledisplay.c17
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c5
-rw-r--r--drivers/usb/misc/cytherm.c32
-rw-r--r--drivers/usb/misc/ezusb.c2
-rw-r--r--drivers/usb/misc/ftdi-elan.c67
-rw-r--r--drivers/usb/misc/idmouse.c1
-rw-r--r--drivers/usb/misc/iowarrior.c26
-rw-r--r--drivers/usb/misc/ldusb.c28
-rw-r--r--drivers/usb/misc/legousbtower.c59
-rw-r--r--drivers/usb/misc/lvstest.c25
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c3
-rw-r--r--drivers/usb/misc/trancevibrator.c3
-rw-r--r--drivers/usb/misc/usb4604.c175
-rw-r--r--drivers/usb/misc/usblcd.c9
-rw-r--r--drivers/usb/misc/usbsevseg.c8
-rw-r--r--drivers/usb/misc/uss720.c5
-rw-r--r--drivers/usb/misc/yurex.c16
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/am35x.c8
-rw-r--r--drivers/usb/musb/da8xx.c164
-rw-r--r--drivers/usb/musb/musb_core.c81
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_dsps.c80
-rw-r--r--drivers/usb/musb/musb_gadget.c3
-rw-r--r--drivers/usb/musb/musb_virthub.c1
-rw-r--r--drivers/usb/musb/omap2430.c76
-rw-r--r--drivers/usb/musb/sunxi.c61
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c8
-rw-r--r--drivers/usb/phy/phy-generic.c9
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c61
-rw-r--r--drivers/usb/renesas_usbhs/common.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c8
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c10
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c4
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c171
-rw-r--r--drivers/usb/storage/alauda.c11
-rw-r--r--drivers/usb/storage/scsiglue.c8
-rw-r--r--drivers/usb/storage/sddr09.c14
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/usb.c16
-rw-r--r--drivers/usb/usb-skeleton.c14
-rw-r--r--drivers/usb/usbip/Kconfig24
-rw-r--r--drivers/usb/usbip/stub_rx.c1
-rw-r--r--drivers/usb/usbip/vhci.h54
-rw-r--r--drivers/usb/usbip/vhci_hcd.c285
-rw-r--r--drivers/usb/usbip/vhci_rx.c21
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c296
-rw-r--r--drivers/usb/usbip/vudc_dev.c2
-rw-r--r--drivers/usb/usbip/vudc_rx.c2
-rw-r--r--drivers/usb/wusbcore/cbaf.c3
-rw-r--r--drivers/usb/wusbcore/crypto.c4
-rw-r--r--drivers/usb/wusbcore/security.c4
-rw-r--r--drivers/usb/wusbcore/wa-nep.c9
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c4
-rw-r--r--drivers/uwb/hwa-rc.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c90
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c52
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h1
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c2
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c6
-rw-r--r--drivers/vhost/test.c13
-rw-r--r--drivers/video/backlight/pwm_bl.c4
-rw-r--r--drivers/video/backlight/tosa_bl.c1
-rw-r--r--drivers/vme/bridges/Kconfig8
-rw-r--r--drivers/vme/bridges/Makefile1
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c8
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.h3
-rw-r--r--drivers/vme/bridges/vme_fake.c1306
-rw-r--r--drivers/vme/bridges/vme_tsi148.c8
-rw-r--r--drivers/vme/bridges/vme_tsi148.h3
-rw-r--r--drivers/vme/vme.c31
-rw-r--r--drivers/vme/vme_bridge.h3
-rw-r--r--drivers/w1/slaves/w1_therm.c22
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/watchdog/pcwd_usb.c4
-rw-r--r--drivers/xen/events/events_base.c26
-rw-r--r--drivers/xen/events/events_fifo.c34
-rw-r--r--drivers/xen/platform-pci.c64
-rw-r--r--drivers/xen/sys-hypervisor.c12
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c117
-rw-r--r--fs/Kconfig3
-rw-r--r--fs/Kconfig.binfmt2
-rw-r--r--fs/afs/callback.c4
-rw-r--r--fs/afs/cmservice.c168
-rw-r--r--fs/afs/flock.c4
-rw-r--r--fs/afs/fsclient.c148
-rw-r--r--fs/afs/internal.h38
-rw-r--r--fs/afs/main.c1
-rw-r--r--fs/afs/rxrpc.c522
-rw-r--r--fs/afs/server.c11
-rw-r--r--fs/afs/vlclient.c7
-rw-r--r--fs/afs/vlocation.c4
-rw-r--r--fs/autofs4/waitq.c4
-rw-r--r--fs/befs/linuxvfs.c2
-rw-r--r--fs/block_dev.c18
-rw-r--r--fs/btrfs/inode.c5
-rw-r--r--fs/coda/file.c23
-rw-r--r--fs/crypto/crypto.c11
-rw-r--r--fs/crypto/fname.c85
-rw-r--r--fs/crypto/keyinfo.c71
-rw-r--r--fs/dax.c254
-rw-r--r--fs/debugfs/file.c15
-rw-r--r--fs/debugfs/internal.h4
-rw-r--r--fs/devpts/inode.c71
-rw-r--r--fs/direct-io.c3
-rw-r--r--fs/ext2/Kconfig1
-rw-r--r--fs/ext2/ext2.h1
-rw-r--r--fs/ext2/file.c77
-rw-r--r--fs/ext2/ialloc.c5
-rw-r--r--fs/ext2/inode.c110
-rw-r--r--fs/ext4/dir.c8
-rw-r--r--fs/ext4/ext4.h35
-rw-r--r--fs/ext4/extents.c27
-rw-r--r--fs/ext4/file.c11
-rw-r--r--fs/ext4/fsync.c9
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inode.c71
-rw-r--r--fs/ext4/ioctl.c11
-rw-r--r--fs/ext4/move_extent.c7
-rw-r--r--fs/ext4/namei.c22
-rw-r--r--fs/ext4/page-io.c4
-rw-r--r--fs/ext4/super.c126
-rw-r--r--fs/ext4/symlink.c10
-rw-r--r--fs/ext4/xattr.c340
-rw-r--r--fs/f2fs/acl.c12
-rw-r--r--fs/f2fs/acl.h1
-rw-r--r--fs/f2fs/checkpoint.c205
-rw-r--r--fs/f2fs/data.c131
-rw-r--r--fs/f2fs/debug.c17
-rw-r--r--fs/f2fs/dir.c116
-rw-r--r--fs/f2fs/f2fs.h175
-rw-r--r--fs/f2fs/file.c20
-rw-r--r--fs/f2fs/gc.c90
-rw-r--r--fs/f2fs/inline.c25
-rw-r--r--fs/f2fs/inode.c17
-rw-r--r--fs/f2fs/namei.c21
-rw-r--r--fs/f2fs/node.c33
-rw-r--r--fs/f2fs/node.h77
-rw-r--r--fs/f2fs/recovery.c124
-rw-r--r--fs/f2fs/segment.c219
-rw-r--r--fs/f2fs/segment.h11
-rw-r--r--fs/f2fs/super.c93
-rw-r--r--fs/f2fs/xattr.c39
-rw-r--r--fs/fuse/Kconfig1
-rw-r--r--fs/fuse/Makefile2
-rw-r--r--fs/fuse/acl.c99
-rw-r--r--fs/fuse/dev.c64
-rw-r--r--fs/fuse/dir.c288
-rw-r--r--fs/fuse/file.c68
-rw-r--r--fs/fuse/fuse_i.h40
-rw-r--r--fs/fuse/inode.c28
-rw-r--r--fs/fuse/xattr.c211
-rw-r--r--fs/gfs2/aops.c19
-rw-r--r--fs/gfs2/bmap.c6
-rw-r--r--fs/gfs2/dir.c20
-rw-r--r--fs/gfs2/file.c34
-rw-r--r--fs/gfs2/glock.c10
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/gfs2/inode.h2
-rw-r--r--fs/gfs2/main.c4
-rw-r--r--fs/gfs2/meta_io.c35
-rw-r--r--fs/gfs2/ops_fstype.c8
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/hugetlbfs/inode.c12
-rw-r--r--fs/internal.h11
-rw-r--r--fs/iomap.c89
-rw-r--r--fs/jbd2/journal.c131
-rw-r--r--fs/jbd2/transaction.c6
-rw-r--r--fs/jfs/jfs_txnmgr.c3
-rw-r--r--fs/jfs/resize.c10
-rw-r--r--fs/locks.c21
-rw-r--r--fs/mbcache.c6
-rw-r--r--fs/mount.h3
-rw-r--r--fs/namespace.c77
-rw-r--r--fs/nfs/file.c25
-rw-r--r--fs/nfs/internal.h8
-rw-r--r--fs/nfs/nfs4file.c2
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nfsd/auth.c6
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/notify/fanotify/fanotify_user.c39
-rw-r--r--fs/notify/group.c6
-rw-r--r--fs/notify/inotify/inotify_user.c16
-rw-r--r--fs/notify/notification.c35
-rw-r--r--fs/nsfs.c105
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c2
-rw-r--r--fs/ocfs2/file.c34
-rw-r--r--fs/ocfs2/inode.h2
-rw-r--r--fs/ocfs2/ocfs2_trace.h2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/orangefs/dcache.c2
-rw-r--r--fs/orangefs/devorangefs-req.c164
-rw-r--r--fs/orangefs/downcall.h11
-rw-r--r--fs/orangefs/file.c45
-rw-r--r--fs/orangefs/namei.c8
-rw-r--r--fs/orangefs/orangefs-cache.c6
-rw-r--r--fs/orangefs/orangefs-debugfs.c761
-rw-r--r--fs/orangefs/orangefs-debugfs.h8
-rw-r--r--fs/orangefs/orangefs-dev-proto.h6
-rw-r--r--fs/orangefs/orangefs-kernel.h51
-rw-r--r--fs/orangefs/orangefs-mod.c86
-rw-r--r--fs/orangefs/orangefs-sysfs.c1287
-rw-r--r--fs/orangefs/orangefs-utils.c403
-rw-r--r--fs/orangefs/protocol.h25
-rw-r--r--fs/orangefs/super.c31
-rw-r--r--fs/orangefs/upcall.h18
-rw-r--r--fs/orangefs/waitqueue.c10
-rw-r--r--fs/overlayfs/copy_up.c22
-rw-r--r--fs/overlayfs/dir.c10
-rw-r--r--fs/pipe.c13
-rw-r--r--fs/pnode.c2
-rw-r--r--fs/pnode.h1
-rw-r--r--fs/proc/array.c189
-rw-r--r--fs/proc/base.c52
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/meminfo.c211
-rw-r--r--fs/proc/proc_net.c13
-rw-r--r--fs/proc/proc_sysctl.c19
-rw-r--r--fs/proc/stat.c49
-rw-r--r--fs/proc/task_mmu.c10
-rw-r--r--fs/pstore/platform.c53
-rw-r--r--fs/pstore/pmsg.c35
-rw-r--r--fs/pstore/ram.c46
-rw-r--r--fs/pstore/ram_core.c96
-rw-r--r--fs/quota/quota.c18
-rw-r--r--fs/reiserfs/super.c12
-rw-r--r--fs/seq_file.c57
-rw-r--r--fs/splice.c683
-rw-r--r--fs/sysfs/group.c4
-rw-r--r--fs/udf/file.c18
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c325
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.h35
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c135
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h25
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c136
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h12
-rw-r--r--fs/xfs/libxfs/xfs_btree.c59
-rw-r--r--fs/xfs/libxfs/xfs_btree.h28
-rw-r--r--fs/xfs/libxfs/xfs_defer.c79
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h10
-rw-r--r--fs/xfs/xfs_aops.c31
-rw-r--r--fs/xfs/xfs_aops.h1
-rw-r--r--fs/xfs/xfs_bmap_util.c2
-rw-r--r--fs/xfs/xfs_buf_item.c9
-rw-r--r--fs/xfs/xfs_extent_busy.c2
-rw-r--r--fs/xfs/xfs_file.c124
-rw-r--r--fs/xfs/xfs_filestream.c13
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_icache.c14
-rw-r--r--fs/xfs/xfs_inode.h1
-rw-r--r--fs/xfs/xfs_iomap.c494
-rw-r--r--fs/xfs/xfs_iomap.h2
-rw-r--r--fs/xfs/xfs_log_priv.h3
-rw-r--r--fs/xfs/xfs_log_recover.c191
-rw-r--r--fs/xfs/xfs_mount.c14
-rw-r--r--fs/xfs/xfs_mount.h44
-rw-r--r--fs/xfs/xfs_rmap_item.c36
-rw-r--r--fs/xfs/xfs_rmap_item.h8
-rw-r--r--fs/xfs/xfs_super.c7
-rw-r--r--fs/xfs/xfs_super.h1
-rw-r--r--fs/xfs/xfs_sysfs.c47
-rw-r--r--fs/xfs/xfs_trace.h115
-rw-r--r--fs/xfs/xfs_trans.c3
-rw-r--r--fs/xfs/xfs_trans_extfree.c3
-rw-r--r--fs/xfs/xfs_xattr.c1
-rw-r--r--include/asm-generic/pgtable.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/dt-bindings/clock/exynos5410.h3
-rw-r--r--include/dt-bindings/clock/exynos5420.h11
-rw-r--r--include/dt-bindings/clock/exynos5440.h2
-rw-r--r--include/dt-bindings/clock/gxbb-aoclkc.h66
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h16
-rw-r--r--include/dt-bindings/clock/imx5-clock.h15
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h4
-rw-r--r--include/dt-bindings/clock/maxim,max77620.h21
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h2
-rw-r--r--include/dt-bindings/clock/mt2701-clk.h486
-rw-r--r--include/dt-bindings/clock/qcom,gcc-mdm9615.h327
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8996.h5
-rw-r--r--include/dt-bindings/clock/qcom,lcc-mdm9615.h52
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8996.h1
-rw-r--r--include/dt-bindings/clock/r7s72100-clock.h3
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h25
-rw-r--r--include/dt-bindings/clock/rk3399-cru.h3
-rw-r--r--include/dt-bindings/clock/sun6i-a31-ccu.h187
-rw-r--r--include/dt-bindings/clock/sun8i-a23-a33-ccu.h127
-rw-r--r--include/dt-bindings/clock/zx296718-clock.h163
-rw-r--r--include/dt-bindings/mfd/qcom-rpm.h22
-rw-r--r--include/dt-bindings/mfd/stm32f4-rcc.h98
-rw-r--r--include/dt-bindings/net/mscc-phy-vsc8531.h21
-rw-r--r--include/dt-bindings/pinctrl/samsung.h57
-rw-r--r--include/dt-bindings/reset/gxbb-aoclkc.h66
-rw-r--r--include/dt-bindings/reset/mt2701-resets.h83
-rw-r--r--include/dt-bindings/reset/qcom,gcc-mdm9615.h136
-rw-r--r--include/dt-bindings/reset/sun6i-a31-ccu.h106
-rw-r--r--include/dt-bindings/reset/sun8i-a23-a33-ccu.h87
-rw-r--r--include/dt-bindings/soc/rockchip,boot-mode.h15
-rw-r--r--include/kvm/arm_vgic.h18
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/amba/bus.h6
-rw-r--r--include/linux/amba/serial.h9
-rw-r--r--include/linux/amd-iommu.h43
-rw-r--r--include/linux/atmel_serial.h2
-rw-r--r--include/linux/bcma/bcma.h3
-rw-r--r--include/linux/bcma/bcma_regs.h2
-rw-r--r--include/linux/bio.h3
-rw-r--r--include/linux/bitfield.h93
-rw-r--r--include/linux/bitops.h36
-rw-r--r--include/linux/blk-cgroup.h2
-rw-r--r--include/linux/blk-mq-pci.h9
-rw-r--r--include/linux/blk-mq.h48
-rw-r--r--include/linux/blk_types.h21
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/blockgroup_lock.h28
-rw-r--r--include/linux/bootmem.h9
-rw-r--r--include/linux/bpf.h15
-rw-r--r--include/linux/bpf_verifier.h102
-rw-r--r--include/linux/bug.h3
-rw-r--r--include/linux/cgroup.h24
-rw-r--r--include/linux/clk-provider.h16
-rw-r--r--include/linux/compaction.h40
-rw-r--r--include/linux/console.h6
-rw-r--r--include/linux/coresight.h5
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/cpuhotplug.h3
-rw-r--r--include/linux/cred.h11
-rw-r--r--include/linux/dax.h6
-rw-r--r--include/linux/debugfs.h17
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/dma-debug.h19
-rw-r--r--include/linux/dma-mapping.h41
-rw-r--r--include/linux/dma/dw.h5
-rw-r--r--include/linux/dma/hsu.h9
-rw-r--r--include/linux/dmaengine.h16
-rw-r--r--include/linux/extcon.h191
-rw-r--r--include/linux/extcon/extcon-adc-jack.h4
-rw-r--r--include/linux/f2fs_fs.h1
-rw-r--r--include/linux/filter.h64
-rw-r--r--include/linux/firmware/meson/meson_sm.h31
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/fscrypto.h24
-rw-r--r--include/linux/fsnotify_backend.h3
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/ftrace_irq.h31
-rw-r--r--include/linux/gpio/driver.h8
-rw-r--r--include/linux/hid-sensor-hub.h1
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/huge_mm.h15
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/hwmon.h342
-rw-r--r--include/linux/hyperv.h101
-rw-r--r--include/linux/i2c-mux.h8
-rw-r--r--include/linux/i2c.h47
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/if_vlan.h34
-rw-r--r--include/linux/iio/consumer.h12
-rw-r--r--include/linux/iio/iio.h3
-rw-r--r--include/linux/iio/trigger.h26
-rw-r--r--include/linux/iio/triggered_buffer.h8
-rw-r--r--include/linux/inet_diag.h4
-rw-r--r--include/linux/init.h19
-rw-r--r--include/linux/iomap.h4
-rw-r--r--include/linux/ioprio.h1
-rw-r--r--include/linux/ipc_namespace.h1
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/jiffies.h4
-rw-r--r--include/linux/kern_levels.h2
-rw-r--r--include/linux/kernel.h48
-rw-r--r--include/linux/ktime.h5
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--include/linux/leds.h9
-rw-r--r--include/linux/lightnvm.h18
-rw-r--r--include/linux/livepatch.h3
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--include/linux/lsm_audit.h2
-rw-r--r--include/linux/lsm_hooks.h37
-rw-r--r--include/linux/mbus.h18
-rw-r--r--include/linux/mcb.h23
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memcontrol.h21
-rw-r--r--include/linux/mfd/88pm80x.h4
-rw-r--r--include/linux/mfd/abx500/ab8500.h2
-rw-r--r--include/linux/mfd/ac100.h178
-rw-r--r--include/linux/mfd/arizona/core.h12
-rw-r--r--include/linux/mfd/axp20x.h60
-rw-r--r--include/linux/mfd/cros_ec.h18
-rw-r--r--include/linux/mfd/cros_ec_commands.h34
-rw-r--r--include/linux/mfd/da9063/core.h4
-rw-r--r--include/linux/mfd/da9063/pdata.h4
-rw-r--r--include/linux/mfd/da9063/registers.h4
-rw-r--r--include/linux/mfd/db8500-prcmu.h6
-rw-r--r--include/linux/mfd/dbx500-prcmu.h9
-rw-r--r--include/linux/mfd/lp873x.h268
-rw-r--r--include/linux/mfd/max14577-private.h2
-rw-r--r--include/linux/mfd/max14577.h2
-rw-r--r--include/linux/mfd/rk808.h154
-rw-r--r--include/linux/mfd/stmpe.h21
-rw-r--r--include/linux/mfd/syscon/exynos5-pmu.h4
-rw-r--r--include/linux/mfd/tps65217.h12
-rw-r--r--include/linux/mfd/tps65218.h6
-rw-r--r--include/linux/mfd/twl6040.h2
-rw-r--r--include/linux/miscdevice.h8
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h16
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mlx5/cq.h6
-rw-r--r--include/linux/mlx5/device.h441
-rw-r--r--include/linux/mlx5/driver.h35
-rw-r--r--include/linux/mlx5/fs.h6
-rw-r--r--include/linux/mlx5/mlx5_ifc.h297
-rw-r--r--include/linux/mlx5/port.h40
-rw-r--r--include/linux/mlx5/qp.h128
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h44
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmc/core.h10
-rw-r--r--include/linux/mmc/dw_mmc.h2
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/netdevice.h35
-rw-r--r--include/linux/netfilter.h63
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h64
-rw-r--r--include/linux/netfilter_ingress.h18
-rw-r--r--include/linux/nmi.h31
-rw-r--r--include/linux/of.h144
-rw-r--r--include/linux/of_fdt.h3
-rw-r--r--include/linux/of_gpio.h5
-rw-r--r--include/linux/omap-dma.h19
-rw-r--r--include/linux/omap-gpmc.h4
-rw-r--r--include/linux/oom.h46
-rw-r--r--include/linux/page_ext.h8
-rw-r--r--include/linux/page_owner.h2
-rw-r--r--include/linux/pagemap.h14
-rw-r--r--include/linux/pci.h26
-rw-r--r--include/linux/perf_event.h9
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/phy/phy.h3
-rw-r--r--include/linux/pid_namespace.h1
-rw-r--r--include/linux/pipe_fs_i.h59
-rw-r--r--include/linux/platform_data/dma-dw.h4
-rw-r--r--include/linux/platform_data/dma-mmp_tdma.h2
-rw-r--r--include/linux/platform_data/dma-s3c24xx.h6
-rw-r--r--include/linux/platform_data/gpio-htc-egpio.h (renamed from include/linux/mfd/htc-egpio.h)0
-rw-r--r--include/linux/platform_data/gpio-lpc32xx.h50
-rw-r--r--include/linux/platform_data/remoteproc-omap.h6
-rw-r--r--include/linux/power/bq24735-charger.h4
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/power/sbs-battery.h8
-rw-r--r--include/linux/printk.h1
-rw-r--r--include/linux/proc_ns.h2
-rw-r--r--include/linux/pstore.h17
-rw-r--r--include/linux/pstore_ram.h7
-rw-r--r--include/linux/ptp_clock_kernel.h5
-rw-r--r--include/linux/pxa2xx_ssp.h20
-rw-r--r--include/linux/qed/common_hsi.h359
-rw-r--r--include/linux/qed/eth_common.h155
-rw-r--r--include/linux/qed/iscsi_common.h28
-rw-r--r--include/linux/qed/qed_chain.h13
-rw-r--r--include/linux/qed/qed_eth_if.h3
-rw-r--r--include/linux/qed/qed_if.h36
-rw-r--r--include/linux/qed/qed_ll2_if.h139
-rw-r--r--include/linux/qed/qed_roce_if.h604
-rw-r--r--include/linux/qed/qede_roce.h88
-rw-r--r--include/linux/qed/rdma_common.h1
-rw-r--r--include/linux/qed/tcp_common.h16
-rw-r--r--include/linux/radix-tree.h6
-rw-r--r--include/linux/raid/pq.h6
-rw-r--r--include/linux/regmap.h8
-rw-r--r--include/linux/regulator/consumer.h3
-rw-r--r--include/linux/regulator/driver.h10
-rw-r--r--include/linux/remoteproc.h16
-rw-r--r--include/linux/rhashtable.h543
-rw-r--r--include/linux/rpmsg.h248
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/sbitmap.h373
-rw-r--r--include/linux/sched.h22
-rw-r--r--include/linux/security.h25
-rw-r--r--include/linux/seq_file.h4
-rw-r--r--include/linux/serial_core.h13
-rw-r--r--include/linux/skbuff.h81
-rw-r--r--include/linux/soc/qcom/smd.h29
-rw-r--r--include/linux/spi/spi.h83
-rw-r--r--include/linux/splice.h3
-rw-r--r--include/linux/swap.h15
-rw-r--r--include/linux/sysctl.h7
-rw-r--r--include/linux/t10-pi.h20
-rw-r--r--include/linux/tcp.h21
-rw-r--r--include/linux/uio.h14
-rw-r--r--include/linux/ulpi/driver.h8
-rw-r--r--include/linux/ulpi/interface.h9
-rw-r--r--include/linux/usb/composite.h3
-rw-r--r--include/linux/usb/gadget.h30
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/user_namespace.h44
-rw-r--r--include/linux/utsname.h1
-rw-r--r--include/linux/vme.h2
-rw-r--r--include/linux/win_minmax.h37
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/af_rxrpc.h53
-rw-r--r--include/net/bluetooth/bluetooth.h4
-rw-r--r--include/net/bluetooth/hci.h7
-rw-r--r--include/net/bluetooth/hci_core.h11
-rw-r--r--include/net/bluetooth/hci_mon.h4
-rw-r--r--include/net/bluetooth/mgmt.h24
-rw-r--r--include/net/cfg80211.h259
-rw-r--r--include/net/devlink.h1
-rw-r--r--include/net/dsa.h53
-rw-r--r--include/net/dst_metadata.h52
-rw-r--r--include/net/flow.h3
-rw-r--r--include/net/flow_dissector.h14
-rw-r--r--include/net/fq.h3
-rw-r--r--include/net/fq_impl.h7
-rw-r--r--include/net/gre.h10
-rw-r--r--include/net/ieee80211_radiotap.h21
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet_connection_sock.h4
-rw-r--r--include/net/ip.h23
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip6_tunnel.h1
-rw-r--r--include/net/ip_fib.h49
-rw-r--r--include/net/ip_tunnels.h21
-rw-r--r--include/net/kcm.h37
-rw-r--r--include/net/l3mdev.h153
-rw-r--r--include/net/lwtunnel.h44
-rw-r--r--include/net/mac80211.h108
-rw-r--r--include/net/mpls.h15
-rw-r--r--include/net/ncsi.h5
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netfilter/br_netfilter.h6
-rw-r--r--include/net/netfilter/nf_conntrack.h56
-rw-r--r--include/net/netfilter/nf_conntrack_core.h3
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h17
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h4
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h8
-rw-r--r--include/net/netfilter/nf_log.h14
-rw-r--r--include/net/netfilter/nf_queue.h69
-rw-r--r--include/net/netfilter/nf_tables.h22
-rw-r--r--include/net/netfilter/nf_tables_bridge.h7
-rw-r--r--include/net/netfilter/nf_tables_core.h3
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h42
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h53
-rw-r--r--include/net/netns/conntrack.h8
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/netfilter.h2
-rw-r--r--include/net/netns/xfrm.h12
-rw-r--r--include/net/pkt_cls.h24
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/pptp.h23
-rw-r--r--include/net/route.h10
-rw-r--r--include/net/sch_generic.h76
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/sm.h94
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/sock.h13
-rw-r--r--include/net/strparser.h142
-rw-r--r--include/net/switchdev.h52
-rw-r--r--include/net/tc_act/tc_ife.h2
-rw-r--r--include/net/tc_act/tc_skbmod.h30
-rw-r--r--include/net/tc_act/tc_tunnel_key.h30
-rw-r--r--include/net/tc_act/tc_vlan.h26
-rw-r--r--include/net/tcp.h63
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/vxlan.h18
-rw-r--r--include/net/xfrm.h2
-rw-r--r--include/rdma/ib_hdrs.h178
-rw-r--r--include/rdma/ib_verbs.h104
-rw-r--r--include/rdma/rdmavt_qp.h19
-rw-r--r--include/rxrpc/packet.h17
-rw-r--r--include/soc/at91/atmel-sfr.h14
-rw-r--r--include/soc/rockchip/rockchip_sip.h27
-rw-r--r--include/sound/da7219.h2
-rw-r--r--include/sound/hda_register.h36
-rw-r--r--include/sound/hdaudio.h13
-rw-r--r--include/sound/hdaudio_ext.h12
-rw-r--r--include/sound/l3.h15
-rw-r--r--include/sound/rt5660.h31
-rw-r--r--include/sound/s3c24xx_uda134x.h1
-rw-r--r--include/sound/simple_card_utils.h35
-rw-r--r--include/sound/soc.h19
-rw-r--r--include/sound/tlv.h78
-rw-r--r--include/trace/events/compaction.h2
-rw-r--r--include/trace/events/f2fs.h18
-rw-r--r--include/trace/events/intel_ish.h30
-rw-r--r--include/trace/events/irq.h2
-rw-r--r--include/trace/events/rxrpc.h625
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/audit.h4
-rw-r--r--include/uapi/linux/batman_adv.h94
-rw-r--r--include/uapi/linux/bpf.h51
-rw-r--r--include/uapi/linux/bpf_perf_event.h18
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/fuse.h10
-rw-r--r--include/uapi/linux/hsi/hsi_char.h17
-rw-r--r--include/uapi/linux/if_bridge.h2
-rw-r--r--include/uapi/linux/if_link.h30
-rw-r--r--include/uapi/linux/if_tunnel.h17
-rw-r--r--include/uapi/linux/inet_diag.h20
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/mii.h1
-rw-r--r--include/uapi/linux/netfilter/nf_log.h12
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h106
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h8
-rw-r--r--include/uapi/linux/netfilter/xt_hashlimit.h23
-rw-r--r--include/uapi/linux/nl80211.h270
-rw-r--r--include/uapi/linux/nsfs.h13
-rw-r--r--include/uapi/linux/openvswitch.h17
-rw-r--r--include/uapi/linux/pci_regs.h12
-rw-r--r--include/uapi/linux/pkt_cls.h19
-rw-r--r--include/uapi/linux/pkt_sched.h4
-rw-r--r--include/uapi/linux/serial_reg.h8
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/tc_act/tc_ife.h3
-rw-r--r--include/uapi/linux/tc_act/tc_skbmod.h39
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h41
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h2
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/linux/tipc_netlink.h4
-rw-r--r--include/uapi/linux/usb/functionfs.h2
-rw-r--r--include/uapi/linux/xfrm.h2
-rw-r--r--include/uapi/rdma/Kbuild7
-rw-r--r--include/uapi/rdma/cxgb3-abi.h (renamed from drivers/infiniband/hw/cxgb3/iwch_user.h)8
-rw-r--r--include/uapi/rdma/cxgb4-abi.h (renamed from drivers/infiniband/hw/cxgb4/user.h)11
-rw-r--r--include/uapi/rdma/ib_user_verbs.h27
-rw-r--r--include/uapi/rdma/mlx4-abi.h (renamed from drivers/infiniband/hw/mlx4/user.h)6
-rw-r--r--include/uapi/rdma/mlx5-abi.h (renamed from drivers/infiniband/hw/mlx5/user.h)52
-rw-r--r--include/uapi/rdma/mthca-abi.h111
-rw-r--r--include/uapi/rdma/nes-abi.h (renamed from drivers/infiniband/hw/nes/nes_user.h)6
-rw-r--r--include/uapi/rdma/ocrdma-abi.h (renamed from drivers/infiniband/hw/ocrdma/ocrdma_abi.h)138
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h19
-rw-r--r--include/uapi/sound/Kbuild1
-rw-r--r--include/uapi/sound/asoc.h35
-rw-r--r--include/uapi/sound/asound.h3
-rw-r--r--include/uapi/sound/snd_sst_tokens.h214
-rw-r--r--include/uapi/sound/tlv.h69
-rw-r--r--include/xen/xen.h3
-rw-r--r--init/Kconfig6
-rw-r--r--ipc/namespace.c51
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit.c8
-rw-r--r--kernel/auditsc.c12
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/core.c4
-rw-r--r--kernel/bpf/helpers.c55
-rw-r--r--kernel/bpf/stackmap.c5
-rw-r--r--kernel/bpf/verifier.c919
-rw-r--r--kernel/cgroup.c24
-rw-r--r--kernel/configs/kvm_guest.config (renamed from arch/x86/configs/kvm_guest.config)1
-rw-r--r--kernel/events/core.c89
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c13
-rw-r--r--kernel/groups.c67
-rw-r--r--kernel/livepatch/core.c19
-rw-r--r--kernel/module.c13
-rw-r--r--kernel/pid_namespace.c50
-rw-r--r--kernel/power/process.c17
-rw-r--r--kernel/printk/printk.c137
-rw-r--r--kernel/relay.c78
-rw-r--r--kernel/sched/idle.c13
-rw-r--r--kernel/sysctl.c9
-rw-r--r--kernel/time/timekeeping.c7
-rw-r--r--kernel/trace/Kconfig35
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/bpf_trace.c160
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace.h5
-rw-r--r--kernel/trace/trace_entries.h27
-rw-r--r--kernel/trace/trace_events_trigger.c1
-rw-r--r--kernel/trace/trace_functions_graph.c6
-rw-r--r--kernel/trace/trace_hwlat.c633
-rw-r--r--kernel/trace/trace_output.c66
-rw-r--r--kernel/trace/trace_syscalls.c6
-rw-r--r--kernel/trace/trace_uprobe.c4
-rw-r--r--kernel/ucount.c235
-rw-r--r--kernel/uid16.c4
-rw-r--r--kernel/user_namespace.c99
-rw-r--r--kernel/utsname.c40
-rw-r--r--kernel/workqueue.c40
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Kconfig.ubsan11
-rw-r--r--lib/Makefile4
-rw-r--r--lib/atomic64_test.c4
-rw-r--r--lib/dma-debug.c52
-rw-r--r--lib/iov_iter.c395
-rw-r--r--lib/nmi_backtrace.c42
-rw-r--r--lib/radix-tree.c14
-rw-r--r--lib/raid6/.gitignore1
-rw-r--r--lib/raid6/Makefile8
-rw-r--r--lib/raid6/algos.c18
-rw-r--r--lib/raid6/avx512.c569
-rw-r--r--lib/raid6/recov_avx512.c388
-rw-r--r--lib/raid6/recov_s390xc.c116
-rw-r--r--lib/raid6/s390vx.uc168
-rw-r--r--lib/raid6/test/Makefile5
-rw-r--r--lib/raid6/test/test.c7
-rw-r--r--lib/raid6/x86.h10
-rw-r--r--lib/random32.c4
-rw-r--r--lib/rhashtable.c300
-rw-r--r--lib/sbitmap.c347
-rw-r--r--lib/test_bpf.c1
-rw-r--r--lib/win_minmax.c98
-rw-r--r--mm/bootmem.c14
-rw-r--r--mm/compaction.c205
-rw-r--r--mm/debug.c5
-rw-r--r--mm/filemap.c69
-rw-r--r--mm/huge_memory.c81
-rw-r--r--mm/hugetlb.c53
-rw-r--r--mm/internal.h3
-rw-r--r--mm/ksm.c7
-rw-r--r--mm/memblock.c5
-rw-r--r--mm/memcontrol.c154
-rw-r--r--mm/memory.c21
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mincore.c5
-rw-r--r--mm/mlock.c52
-rw-r--r--mm/mmap.c238
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/nobootmem.c20
-rw-r--r--mm/oom_kill.c381
-rw-r--r--mm/page-writeback.c34
-rw-r--r--mm/page_alloc.c281
-rw-r--r--mm/page_ext.c45
-rw-r--r--mm/page_io.c7
-rw-r--r--mm/page_isolation.c2
-rw-r--r--mm/page_owner.c156
-rw-r--r--mm/shmem.c117
-rw-r--r--mm/swap.c4
-rw-r--r--mm/swap_state.c14
-rw-r--r--mm/swapfile.c137
-rw-r--r--mm/vmacache.c8
-rw-r--r--mm/vmalloc.c22
-rw-r--r--mm/vmscan.c53
-rw-r--r--mm/vmstat.c95
-rw-r--r--net/6lowpan/ndisc.c2
-rw-r--r--net/9p/trans_rdma.c2
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/lec.c12
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/batman-adv/Kconfig15
-rw-r--r--net/batman-adv/Makefile4
-rw-r--r--net/batman-adv/bat_algo.c70
-rw-r--r--net/batman-adv/bat_algo.h3
-rw-r--r--net/batman-adv/bat_iv_ogm.c837
-rw-r--r--net/batman-adv/bat_v.c734
-rw-r--r--net/batman-adv/bat_v_ogm.c5
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c348
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h17
-rw-r--r--net/batman-adv/debugfs.c18
-rw-r--r--net/batman-adv/debugfs.h2
-rw-r--r--net/batman-adv/distributed-arp-table.c4
-rw-r--r--net/batman-adv/gateway_client.c285
-rw-r--r--net/batman-adv/gateway_client.h7
-rw-r--r--net/batman-adv/gateway_common.c5
-rw-r--r--net/batman-adv/hard-interface.c84
-rw-r--r--net/batman-adv/icmp_socket.h18
-rw-r--r--net/batman-adv/main.c19
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/multicast.c4
-rw-r--r--net/batman-adv/netlink.c221
-rw-r--r--net/batman-adv/netlink.h6
-rw-r--r--net/batman-adv/network-coding.c11
-rw-r--r--net/batman-adv/originator.c172
-rw-r--r--net/batman-adv/originator.h4
-rw-r--r--net/batman-adv/packet.h36
-rw-r--r--net/batman-adv/routing.c43
-rw-r--r--net/batman-adv/send.c136
-rw-r--r--net/batman-adv/send.h6
-rw-r--r--net/batman-adv/soft-interface.c51
-rw-r--r--net/batman-adv/sysfs.c183
-rw-r--r--net/batman-adv/translation-table.c556
-rw-r--r--net/batman-adv/translation-table.h7
-rw-r--r--net/batman-adv/tvlv.c9
-rw-r--r--net/batman-adv/types.h69
-rw-r--r--net/bluetooth/af_bluetooth.c15
-rw-r--r--net/bluetooth/hci_core.c1
-rw-r--r--net/bluetooth/hci_request.c49
-rw-r--r--net/bluetooth/hci_request.h5
-rw-r--r--net/bluetooth/hci_sock.c396
-rw-r--r--net/bluetooth/leds.c27
-rw-r--r--net/bluetooth/leds.h10
-rw-r--r--net/bluetooth/mgmt.c353
-rw-r--r--net/bluetooth/mgmt_util.c66
-rw-r--r--net/bluetooth/smp.c5
-rw-r--r--net/bridge/Makefile2
-rw-r--r--net/bridge/br.c6
-rw-r--r--net/bridge/br_device.c8
-rw-r--r--net/bridge/br_fdb.c23
-rw-r--r--net/bridge/br_forward.c10
-rw-r--r--net/bridge/br_if.c12
-rw-r--r--net/bridge/br_input.c42
-rw-r--r--net/bridge/br_netfilter_hooks.c53
-rw-r--r--net/bridge/br_netfilter_ipv6.c12
-rw-r--r--net/bridge/br_netlink.c132
-rw-r--r--net/bridge/br_private.h46
-rw-r--r--net/bridge/br_stp_if.c43
-rw-r--r--net/bridge/br_switchdev.c57
-rw-r--r--net/bridge/br_sysfs_if.c1
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/bridge/netfilter/ebt_redirect.c2
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/bridge/netfilter/nf_log_bridge.c3
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c92
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c44
-rw-r--r--net/core/dev.c159
-rw-r--r--net/core/drop_monitor.c2
-rw-r--r--net/core/filter.c472
-rw-r--r--net/core/flow_dissector.c164
-rw-r--r--net/core/lwtunnel.c35
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/net_namespace.c88
-rw-r--r--net/core/pktgen.c21
-rw-r--r--net/core/rtnetlink.c305
-rw-r--r--net/core/skbuff.c150
-rw-r--r--net/core/sock.c32
-rw-r--r--net/core/stream.c1
-rw-r--r--net/dsa/Kconfig3
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/dsa.c89
-rw-r--r--net/dsa/dsa2.c26
-rw-r--r--net/dsa/dsa_priv.h2
-rw-r--r--net/dsa/slave.c222
-rw-r--r--net/dsa/tag_qca.c138
-rw-r--r--net/ipv4/Kconfig18
-rw-r--r--net/ipv4/Makefile3
-rw-r--r--net/ipv4/af_inet.c37
-rw-r--r--net/ipv4/fib_frontend.c36
-rw-r--r--net/ipv4/fib_rules.c15
-rw-r--r--net/ipv4/fib_semantics.c3
-rw-r--r--net/ipv4/fib_trie.c176
-rw-r--r--net/ipv4/fou.c2
-rw-r--r--net/ipv4/gre_offload.c6
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/inet_diag.c107
-rw-r--r--net/ipv4/ip_gre.c23
-rw-r--r--net/ipv4/ip_output.c21
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/ip_tunnel.c76
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/ipconfig.c71
-rw-r--r--net/ipv4/ipip.c35
-rw-r--r--net/ipv4/netfilter/Kconfig11
-rw-r--r--net/ipv4/netfilter/Makefile5
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c72
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c492
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c41
-rw-r--r--net/ipv4/netfilter/nf_dup_ipv4.c10
-rw-r--r--net/ipv4/netfilter/nf_log_arp.c7
-rw-r--r--net/ipv4/netfilter/nf_log_ipv4.c13
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_gre.c13
-rw-r--r--net/ipv4/netfilter/nf_tables_arp.c7
-rw-r--r--net/ipv4/netfilter/nf_tables_ipv4.c5
-rw-r--r--net/ipv4/ping.c15
-rw-r--r--net/ipv4/proc.c103
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/route.c28
-rw-r--r--net/ipv4/tcp.c89
-rw-r--r--net/ipv4/tcp_bbr.c896
-rw-r--r--net/ipv4/tcp_cdg.c12
-rw-r--r--net/ipv4/tcp_cong.c2
-rw-r--r--net/ipv4/tcp_input.c531
-rw-r--r--net/ipv4/tcp_ipv4.c41
-rw-r--r--net/ipv4/tcp_metrics.c2
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/tcp_offload.c13
-rw-r--r--net/ipv4/tcp_output.c102
-rw-r--r--net/ipv4/tcp_rate.c186
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv4/udp.c23
-rw-r--r--net/ipv4/udp_diag.c89
-rw-r--r--net/ipv4/udp_offload.c6
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c94
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/fib6_rules.c3
-rw-r--r--net/ipv6/ila/ila_common.c1
-rw-r--r--net/ipv6/ila/ila_lwt.c2
-rw-r--r--net/ipv6/ila/ila_xlat.c2
-rw-r--r--net/ipv6/ip6_fib.c6
-rw-r--r--net/ipv6/ip6_gre.c14
-rw-r--r--net/ipv6/ip6_offload.c5
-rw-r--r--net/ipv6/ip6_output.c27
-rw-r--r--net/ipv6/ip6_tunnel.c188
-rw-r--r--net/ipv6/ip6_vti.c10
-rw-r--r--net/ipv6/mcast.c10
-rw-r--r--net/ipv6/ndisc.c11
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c21
-rw-r--r--net/ipv6/netfilter/nf_tables_ipv6.c9
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c4
-rw-r--r--net/ipv6/output_core.c7
-rw-r--r--net/ipv6/proc.c30
-rw-r--r--net/ipv6/raw.c7
-rw-r--r--net/ipv6/route.c41
-rw-r--r--net/ipv6/sit.c12
-rw-r--r--net/ipv6/tcp_ipv6.c27
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/kcm/Kconfig1
-rw-r--r--net/kcm/kcmproc.c58
-rw-r--r--net/kcm/kcmsock.c499
-rw-r--r--net/l2tp/l2tp_core.h2
-rw-r--r--net/l2tp/l2tp_eth.c6
-rw-r--r--net/l2tp/l2tp_netlink.c2
-rw-r--r--net/l2tp/l2tp_ppp.c24
-rw-r--r--net/l3mdev/l3mdev.c105
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/agg-rx.c11
-rw-r--r--net/mac80211/cfg.c243
-rw-r--r--net/mac80211/chan.c6
-rw-r--r--net/mac80211/debugfs.c160
-rw-r--r--net/mac80211/debugfs_netdev.c49
-rw-r--r--net/mac80211/debugfs_sta.c56
-rw-r--r--net/mac80211/driver-ops.c17
-rw-r--r--net/mac80211/driver-ops.h109
-rw-r--r--net/mac80211/ieee80211_i.h39
-rw-r--r--net/mac80211/iface.c49
-rw-r--r--net/mac80211/main.c11
-rw-r--r--net/mac80211/mesh_hwmp.c27
-rw-r--r--net/mac80211/mesh_sync.c12
-rw-r--r--net/mac80211/mlme.c12
-rw-r--r--net/mac80211/offchannel.c4
-rw-r--r--net/mac80211/pm.c3
-rw-r--r--net/mac80211/rx.c83
-rw-r--r--net/mac80211/scan.c2
-rw-r--r--net/mac80211/sta_info.c92
-rw-r--r--net/mac80211/sta_info.h24
-rw-r--r--net/mac80211/status.c15
-rw-r--r--net/mac80211/trace.h159
-rw-r--r--net/mac80211/tx.c469
-rw-r--r--net/mac80211/util.c64
-rw-r--r--net/mac802154/iface.c1
-rw-r--r--net/mac802154/rx.c9
-rw-r--r--net/mpls/af_mpls.c5
-rw-r--r--net/mpls/internal.h10
-rw-r--r--net/mpls/mpls_gso.c40
-rw-r--r--net/mpls/mpls_iptunnel.c13
-rw-r--r--net/ncsi/internal.h22
-rw-r--r--net/ncsi/ncsi-aen.c37
-rw-r--r--net/ncsi/ncsi-cmd.c2
-rw-r--r--net/ncsi/ncsi-manage.c198
-rw-r--r--net/ncsi/ncsi-rsp.c4
-rw-r--r--net/netfilter/Kconfig22
-rw-r--r--net/netfilter/Makefile10
-rw-r--r--net/netfilter/core.c210
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c7
-rw-r--r--net/netfilter/nf_conntrack_core.c245
-rw-r--r--net/netfilter/nf_conntrack_ecache.c22
-rw-r--r--net/netfilter/nf_conntrack_ftp.c17
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c2
-rw-r--r--net/netfilter/nf_conntrack_helper.c17
-rw-r--r--net/netfilter/nf_conntrack_netlink.c50
-rw-r--r--net/netfilter/nf_conntrack_pptp.c3
-rw-r--r--net/netfilter/nf_conntrack_proto.c81
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c39
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c14
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c89
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c131
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c53
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c3
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c20
-rw-r--r--net/netfilter/nf_conntrack_sip.c10
-rw-r--r--net/netfilter/nf_conntrack_standalone.c16
-rw-r--r--net/netfilter/nf_internals.h10
-rw-r--r--net/netfilter/nf_log.c14
-rw-r--r--net/netfilter/nf_log_common.c4
-rw-r--r--net/netfilter/nf_nat_core.c6
-rw-r--r--net/netfilter/nf_queue.c18
-rw-r--r--net/netfilter/nf_tables_api.c228
-rw-r--r--net/netfilter/nf_tables_core.c16
-rw-r--r--net/netfilter/nf_tables_inet.c5
-rw-r--r--net/netfilter/nf_tables_netdev.c101
-rw-r--r--net/netfilter/nf_tables_trace.c20
-rw-r--r--net/netfilter/nfnetlink_cthelper.c2
-rw-r--r--net/netfilter/nfnetlink_log.c8
-rw-r--r--net/netfilter/nfnetlink_queue.c19
-rw-r--r--net/netfilter/nft_bitwise.c8
-rw-r--r--net/netfilter/nft_byteorder.c15
-rw-r--r--net/netfilter/nft_cmp.c3
-rw-r--r--net/netfilter/nft_ct.c21
-rw-r--r--net/netfilter/nft_dynset.c20
-rw-r--r--net/netfilter/nft_exthdr.c12
-rw-r--r--net/netfilter/nft_hash.c424
-rw-r--r--net/netfilter/nft_immediate.c4
-rw-r--r--net/netfilter/nft_limit.c4
-rw-r--r--net/netfilter/nft_log.c9
-rw-r--r--net/netfilter/nft_lookup.c2
-rw-r--r--net/netfilter/nft_meta.c2
-rw-r--r--net/netfilter/nft_numgen.c212
-rw-r--r--net/netfilter/nft_payload.c4
-rw-r--r--net/netfilter/nft_queue.c113
-rw-r--r--net/netfilter/nft_quota.c121
-rw-r--r--net/netfilter/nft_range.c138
-rw-r--r--net/netfilter/nft_set_hash.c404
-rw-r--r--net/netfilter/nft_set_rbtree.c (renamed from net/netfilter/nft_rbtree.c)12
-rw-r--r--net/netfilter/xt_RATEEST.c6
-rw-r--r--net/netfilter/xt_TCPMSS.c12
-rw-r--r--net/netfilter/xt_TEE.c8
-rw-r--r--net/netfilter/xt_connlimit.c8
-rw-r--r--net/netfilter/xt_conntrack.c4
-rw-r--r--net/netfilter/xt_hashlimit.c340
-rw-r--r--net/netfilter/xt_helper.c4
-rw-r--r--net/netfilter/xt_physdev.c4
-rw-r--r--net/netfilter/xt_recent.c7
-rw-r--r--net/netfilter/xt_sctp.c2
-rw-r--r--net/netlink/diag.c102
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/openvswitch/actions.c79
-rw-r--r--net/openvswitch/conntrack.c2
-rw-r--r--net/openvswitch/datapath.c25
-rw-r--r--net/openvswitch/flow.c118
-rw-r--r--net/openvswitch/flow.h12
-rw-r--r--net/openvswitch/flow_netlink.c316
-rw-r--r--net/openvswitch/flow_netlink.h3
-rw-r--r--net/openvswitch/flow_table.c25
-rw-r--r--net/openvswitch/vport.c7
-rw-r--r--net/rds/ib.c2
-rw-r--r--net/rds/ib.h1
-rw-r--r--net/rds/rds.h1
-rw-r--r--net/rxrpc/Kconfig14
-rw-r--r--net/rxrpc/Makefile1
-rw-r--r--net/rxrpc/af_rxrpc.c175
-rw-r--r--net/rxrpc/ar-internal.h832
-rw-r--r--net/rxrpc/call_accept.c717
-rw-r--r--net/rxrpc/call_event.c1426
-rw-r--r--net/rxrpc/call_object.c796
-rw-r--r--net/rxrpc/conn_client.c993
-rw-r--r--net/rxrpc/conn_event.c271
-rw-r--r--net/rxrpc/conn_object.c204
-rw-r--r--net/rxrpc/conn_service.c117
-rw-r--r--net/rxrpc/input.c1399
-rw-r--r--net/rxrpc/insecure.c26
-rw-r--r--net/rxrpc/local_event.c19
-rw-r--r--net/rxrpc/local_object.c51
-rw-r--r--net/rxrpc/misc.c192
-rw-r--r--net/rxrpc/output.c933
-rw-r--r--net/rxrpc/peer_event.c103
-rw-r--r--net/rxrpc/peer_object.c199
-rw-r--r--net/rxrpc/proc.c72
-rw-r--r--net/rxrpc/recvmsg.c866
-rw-r--r--net/rxrpc/rxkad.c209
-rw-r--r--net/rxrpc/security.c18
-rw-r--r--net/rxrpc/sendmsg.c606
-rw-r--r--net/rxrpc/skbuff.c174
-rw-r--r--net/rxrpc/sysctl.c45
-rw-r--r--net/rxrpc/utils.c2
-rw-r--r--net/sched/Kconfig27
-rw-r--r--net/sched/Makefile3
-rw-r--r--net/sched/act_api.c36
-rw-r--r--net/sched/act_bpf.c5
-rw-r--r--net/sched/act_csum.c36
-rw-r--r--net/sched/act_gact.c3
-rw-r--r--net/sched/act_ife.c26
-rw-r--r--net/sched/act_meta_skbtcindex.c79
-rw-r--r--net/sched/act_mirred.c11
-rw-r--r--net/sched/act_police.c12
-rw-r--r--net/sched/act_skbmod.c301
-rw-r--r--net/sched/act_tunnel_key.c342
-rw-r--r--net/sched/act_vlan.c51
-rw-r--r--net/sched/cls_api.c18
-rw-r--r--net/sched/cls_basic.c12
-rw-r--r--net/sched/cls_bpf.c153
-rw-r--r--net/sched/cls_cgroup.c13
-rw-r--r--net/sched/cls_flow.c53
-rw-r--r--net/sched/cls_flower.c232
-rw-r--r--net/sched/cls_fw.c28
-rw-r--r--net/sched/cls_route.c24
-rw-r--r--net/sched/cls_rsvp.h17
-rw-r--r--net/sched/cls_tcindex.c102
-rw-r--r--net/sched/cls_u32.c51
-rw-r--r--net/sched/sch_api.c84
-rw-r--r--net/sched/sch_codel.c4
-rw-r--r--net/sched/sch_fifo.c4
-rw-r--r--net/sched/sch_fq.c71
-rw-r--r--net/sched/sch_generic.c36
-rw-r--r--net/sched/sch_hfsc.c51
-rw-r--r--net/sched/sch_htb.c24
-rw-r--r--net/sched/sch_mq.c2
-rw-r--r--net/sched/sch_mqprio.c2
-rw-r--r--net/sched/sch_netem.c20
-rw-r--r--net/sched/sch_pie.c4
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sctp/chunk.c26
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/output.c62
-rw-r--r--net/sctp/outqueue.c99
-rw-r--r--net/sctp/proc.c10
-rw-r--r--net/sctp/sctp_diag.c20
-rw-r--r--net/sctp/sm_make_chunk.c28
-rw-r--r--net/sctp/sm_sideeffect.c25
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/sctp/transport.c4
-rw-r--r--net/sctp/ulpevent.c4
-rw-r--r--net/sctp/ulpqueue.c3
-rw-r--r--net/strparser/Kconfig4
-rw-r--r--net/strparser/Makefile1
-rw-r--r--net/strparser/strparser.c510
-rw-r--r--net/sunrpc/auth_generic.c4
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/auth_unix.c4
-rw-r--r--net/sunrpc/svcauth_unix.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/switchdev/switchdev.c278
-rw-r--r--net/sysctl_net.c33
-rw-r--r--net/tipc/bcast.c8
-rw-r--r--net/tipc/bcast.h4
-rw-r--r--net/tipc/bearer.c130
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/link.c149
-rw-r--r--net/tipc/link.h6
-rw-r--r--net/tipc/msg.h10
-rw-r--r--net/tipc/net.h2
-rw-r--r--net/tipc/netlink.c18
-rw-r--r--net/tipc/node.c95
-rw-r--r--net/tipc/node.h12
-rw-r--r--net/tipc/udp_media.c522
-rw-r--r--net/tipc/udp_media.h46
-rw-r--r--net/unix/af_unix.c17
-rw-r--r--net/wireless/chan.c2
-rw-r--r--net/wireless/core.c43
-rw-r--r--net/wireless/core.h9
-rw-r--r--net/wireless/ibss.c14
-rw-r--r--net/wireless/mlme.c3
-rw-r--r--net/wireless/nl80211.c1355
-rw-r--r--net/wireless/nl80211.h3
-rw-r--r--net/wireless/rdev-ops.h58
-rw-r--r--net/wireless/scan.c58
-rw-r--r--net/wireless/sme.c9
-rw-r--r--net/wireless/sysfs.c2
-rw-r--r--net/wireless/trace.h90
-rw-r--r--net/wireless/util.c43
-rw-r--r--net/wireless/wext-compat.c21
-rw-r--r--net/wireless/wext-sme.c5
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/xfrm/xfrm_algo.c2
-rw-r--r--net/xfrm/xfrm_policy.c145
-rw-r--r--net/xfrm/xfrm_proc.c10
-rw-r--r--net/xfrm/xfrm_replay.c6
-rw-r--r--net/xfrm/xfrm_state.c125
-rw-r--r--net/xfrm/xfrm_sysctl.c4
-rw-r--r--samples/bpf/Makefile14
-rw-r--r--samples/bpf/bpf_helpers.h16
-rw-r--r--samples/bpf/bpf_load.c7
-rw-r--r--samples/bpf/libbpf.h8
-rw-r--r--samples/bpf/sampleip_kern.c38
-rw-r--r--samples/bpf/sampleip_user.c196
-rw-r--r--samples/bpf/sockex2_kern.c10
-rw-r--r--samples/bpf/sockex3_kern.c8
-rw-r--r--samples/bpf/sockex3_user.c4
-rw-r--r--samples/bpf/tcbpf2_kern.c381
-rw-r--r--samples/bpf/test_current_task_under_cgroup_kern.c43
-rw-r--r--samples/bpf/test_current_task_under_cgroup_user.c145
-rwxr-xr-xsamples/bpf/test_ipip.sh178
-rwxr-xr-xsamples/bpf/test_tunnel_bpf.sh167
-rw-r--r--samples/bpf/test_verifier.c888
-rw-r--r--samples/bpf/trace_event_kern.c65
-rw-r--r--samples/bpf/trace_event_user.c213
-rw-r--r--samples/bpf/tracex5_kern.c16
-rw-r--r--samples/bpf/tracex5_user.c3
-rw-r--r--samples/rpmsg/rpmsg_client_sample.c32
-rw-r--r--scripts/Makefile.ubsan5
-rwxr-xr-xscripts/checkkconfigsymbols.py338
-rwxr-xr-xscripts/kernel-doc48
-rw-r--r--scripts/mod/modpost.c2
-rw-r--r--scripts/recordmcount.c1
-rwxr-xr-xscripts/recordmcount.pl1
-rw-r--r--scripts/spelling.txt1
-rwxr-xr-xscripts/tracing/ftrace-bisect.sh115
-rwxr-xr-xscripts/ver_linux260
-rw-r--r--security/inode.c7
-rw-r--r--security/lsm_audit.c21
-rw-r--r--security/security.c27
-rw-r--r--security/selinux/Kconfig38
-rw-r--r--security/selinux/hooks.c106
-rw-r--r--security/selinux/include/security.h4
-rw-r--r--security/selinux/ss/conditional.c2
-rw-r--r--security/selinux/ss/ebitmap.c3
-rw-r--r--security/selinux/ss/policydb.c16
-rw-r--r--security/smack/Kconfig12
-rw-r--r--security/smack/smack.h10
-rw-r--r--security/smack/smack_lsm.c14
-rw-r--r--security/smack/smack_netfilter.c4
-rw-r--r--security/smack/smackfs.c11
-rw-r--r--sound/aoa/core/gpio-feature.c4
-rw-r--r--sound/aoa/fabrics/layout.c15
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c2
-rw-r--r--sound/core/compress_offload.c14
-rw-r--r--sound/core/pcm_native.c4
-rw-r--r--sound/core/seq/seq_clientmgr.c722
-rw-r--r--sound/core/seq/seq_compat.c7
-rw-r--r--sound/firewire/bebob/Makefile5
-rw-r--r--sound/firewire/bebob/bebob.c8
-rw-r--r--sound/firewire/bebob/bebob.h3
-rw-r--r--sound/firewire/bebob/bebob_terratec.c31
-rw-r--r--sound/firewire/bebob/bebob_yamaha_terratec.c (renamed from sound/firewire/bebob/bebob_yamaha.c)9
-rw-r--r--sound/firewire/dice/dice-pcm.c4
-rw-r--r--sound/firewire/digi00x/digi00x-pcm.c4
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c4
-rw-r--r--sound/firewire/tascam/tascam-pcm.c4
-rw-r--r--sound/hda/ext/hdac_ext_controller.c91
-rw-r--r--sound/hda/ext/hdac_ext_stream.c56
-rw-r--r--sound/hda/hdac_controller.c75
-rw-r--r--sound/pci/ad1889.c4
-rw-r--r--sound/pci/ali5451/ali5451.c2
-rw-r--r--sound/pci/als300.c4
-rw-r--r--sound/pci/als4000.c4
-rw-r--r--sound/pci/asihpi/asihpi.c4
-rw-r--r--sound/pci/asihpi/hpifunc.c7
-rw-r--r--sound/pci/atiixp.c6
-rw-r--r--sound/pci/atiixp_modem.c4
-rw-r--r--sound/pci/au88x0/au88x0_core.c2
-rw-r--r--sound/pci/aw2/aw2-alsa.c4
-rw-r--r--sound/pci/azt3328.c6
-rw-r--r--sound/pci/bt87x.c2
-rw-r--r--sound/pci/ca0106/ca0106_main.c16
-rw-r--r--sound/pci/cmipci.c10
-rw-r--r--sound/pci/cs4281.c4
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c4
-rw-r--r--sound/pci/cs46xx/dsp_spos.c2
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c4
-rw-r--r--sound/pci/ctxfi/ctatc.c2
-rw-r--r--sound/pci/ctxfi/ctpcm.c4
-rw-r--r--sound/pci/ctxfi/ctvmem.c6
-rw-r--r--sound/pci/emu10k1/emu10k1x.c4
-rw-r--r--sound/pci/emu10k1/emupcm.c12
-rw-r--r--sound/pci/emu10k1/p16v.c20
-rw-r--r--sound/pci/ens1370.c6
-rw-r--r--sound/pci/es1938.c4
-rw-r--r--sound/pci/es1968.c4
-rw-r--r--sound/pci/fm801.c4
-rw-r--r--sound/pci/hda/hda_controller.c205
-rw-r--r--sound/pci/hda/hda_controller.h3
-rw-r--r--sound/pci/hda/hda_intel.c17
-rw-r--r--sound/pci/hda/patch_ca0132.c10
-rw-r--r--sound/pci/hda/patch_conexant.c10
-rw-r--r--sound/pci/hda/patch_realtek.c19
-rw-r--r--sound/pci/ice1712/ice1712.c10
-rw-r--r--sound/pci/ice1712/ice1724.c14
-rw-r--r--sound/pci/korg1212/korg1212.c4
-rw-r--r--sound/pci/lola/lola_pcm.c2
-rw-r--r--sound/pci/lx6464es/lx6464es.c4
-rw-r--r--sound/pci/maestro3.c4
-rw-r--r--sound/pci/mixart/mixart.c4
-rw-r--r--sound/pci/nm256/nm256.c4
-rw-r--r--sound/pci/oxygen/oxygen_pcm.c12
-rw-r--r--sound/pci/pcxhr/pcxhr.c2
-rw-r--r--sound/pci/riptide/riptide.c6
-rw-r--r--sound/pci/rme32.c16
-rw-r--r--sound/pci/rme96.c8
-rw-r--r--sound/pci/rme9652/hdsp.c4
-rw-r--r--sound/pci/rme9652/hdspm.c4
-rw-r--r--sound/pci/rme9652/rme9652.c4
-rw-r--r--sound/pci/sonicvibes.c4
-rw-r--r--sound/pci/trident/trident_main.c12
-rw-r--r--sound/pci/via82xx.c10
-rw-r--r--sound/pci/via82xx_modem.c4
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c10
-rw-r--r--sound/ppc/snd_ps3.c4
-rw-r--r--sound/ppc/tumbler.c8
-rw-r--r--sound/soc/amd/acp-pcm-dma.c2
-rw-r--r--sound/soc/atmel/atmel-classd.c6
-rw-r--r--sound/soc/atmel/atmel-pcm-pdc.c2
-rw-r--r--sound/soc/atmel/atmel-pdmic.c20
-rw-r--r--sound/soc/codecs/88pm860x-codec.c14
-rw-r--r--sound/soc/codecs/Kconfig18
-rw-r--r--sound/soc/codecs/Makefile6
-rw-r--r--sound/soc/codecs/ab8500-codec.c24
-rw-r--r--sound/soc/codecs/ac97.c10
-rw-r--r--sound/soc/codecs/ad1836.c14
-rw-r--r--sound/soc/codecs/ad193x.c14
-rw-r--r--sound/soc/codecs/ad1980.c14
-rw-r--r--sound/soc/codecs/ad73311.c10
-rw-r--r--sound/soc/codecs/adau1373.c14
-rw-r--r--sound/soc/codecs/adau1701.c15
-rw-r--r--sound/soc/codecs/adau1761.c14
-rw-r--r--sound/soc/codecs/adau1781.c14
-rw-r--r--sound/soc/codecs/adau1977.c14
-rw-r--r--sound/soc/codecs/adau7002.c10
-rw-r--r--sound/soc/codecs/adav80x.c14
-rw-r--r--sound/soc/codecs/ads117x.c10
-rw-r--r--sound/soc/codecs/ak4104.c15
-rw-r--r--sound/soc/codecs/ak4535.c14
-rw-r--r--sound/soc/codecs/ak4554.c10
-rw-r--r--sound/soc/codecs/ak4613.c14
-rw-r--r--sound/soc/codecs/ak4641.c14
-rw-r--r--sound/soc/codecs/ak4642.c14
-rw-r--r--sound/soc/codecs/ak4671.c14
-rw-r--r--sound/soc/codecs/ak5386.c10
-rw-r--r--sound/soc/codecs/alc5632.c14
-rw-r--r--sound/soc/codecs/arizona.c78
-rw-r--r--sound/soc/codecs/arizona.h17
-rw-r--r--sound/soc/codecs/bt-sco.c10
-rw-r--r--sound/soc/codecs/cq93vc.c6
-rw-r--r--sound/soc/codecs/cs35l32.c15
-rw-r--r--sound/soc/codecs/cs35l33.c15
-rw-r--r--sound/soc/codecs/cs4265.c15
-rw-r--r--sound/soc/codecs/cs4270.c14
-rw-r--r--sound/soc/codecs/cs4271.c14
-rw-r--r--sound/soc/codecs/cs42l51.c14
-rw-r--r--sound/soc/codecs/cs42l52.c15
-rw-r--r--sound/soc/codecs/cs42l56.c15
-rw-r--r--sound/soc/codecs/cs42l73.c19
-rw-r--r--sound/soc/codecs/cs42xx8.c14
-rw-r--r--sound/soc/codecs/cs4349.c15
-rw-r--r--sound/soc/codecs/cs47l24.c26
-rw-r--r--sound/soc/codecs/cs53l30.c23
-rw-r--r--sound/soc/codecs/cx20442.c10
-rw-r--r--sound/soc/codecs/da7210.c15
-rw-r--r--sound/soc/codecs/da7213.c145
-rw-r--r--sound/soc/codecs/da7213.h12
-rw-r--r--sound/soc/codecs/da7218.c49
-rw-r--r--sound/soc/codecs/da7218.h2
-rw-r--r--sound/soc/codecs/da7219-aad.c108
-rw-r--r--sound/soc/codecs/da7219-aad.h11
-rw-r--r--sound/soc/codecs/da7219.c95
-rw-r--r--sound/soc/codecs/da7219.h8
-rw-r--r--sound/soc/codecs/da732x.c14
-rw-r--r--sound/soc/codecs/da9055.c15
-rw-r--r--sound/soc/codecs/dmic.c10
-rw-r--r--sound/soc/codecs/es8328.c14
-rw-r--r--sound/soc/codecs/gtm601.c10
-rw-r--r--sound/soc/codecs/hdac_hdmi.c2
-rw-r--r--sound/soc/codecs/hdmi-codec.c79
-rw-r--r--sound/soc/codecs/inno_rk3036.c14
-rw-r--r--sound/soc/codecs/isabelle.c14
-rw-r--r--sound/soc/codecs/jz4740.c14
-rw-r--r--sound/soc/codecs/l3.c71
-rw-r--r--sound/soc/codecs/lm49453.c14
-rw-r--r--sound/soc/codecs/max98088.c14
-rw-r--r--sound/soc/codecs/max98095.c14
-rw-r--r--sound/soc/codecs/max98357a.c10
-rw-r--r--sound/soc/codecs/max98371.c1
-rw-r--r--sound/soc/codecs/max9850.c14
-rw-r--r--sound/soc/codecs/max9860.c14
-rw-r--r--sound/soc/codecs/max9867.c19
-rw-r--r--sound/soc/codecs/max98925.c14
-rw-r--r--sound/soc/codecs/max98926.c20
-rw-r--r--sound/soc/codecs/mc13783.c14
-rw-r--r--sound/soc/codecs/ml26124.c14
-rw-r--r--sound/soc/codecs/nau8810.c884
-rw-r--r--sound/soc/codecs/nau8810.h281
-rw-r--r--sound/soc/codecs/nau8825.c16
-rw-r--r--sound/soc/codecs/pcm1681.c14
-rw-r--r--sound/soc/codecs/pcm179x.c14
-rw-r--r--sound/soc/codecs/pcm3008.c10
-rw-r--r--sound/soc/codecs/pcm3168a.c14
-rw-r--r--sound/soc/codecs/pcm512x.c14
-rw-r--r--sound/soc/codecs/rt286.c14
-rw-r--r--sound/soc/codecs/rt298.c14
-rw-r--r--sound/soc/codecs/rt5514-spi.c2
-rw-r--r--sound/soc/codecs/rt5514.c20
-rw-r--r--sound/soc/codecs/rt5514.h4
-rw-r--r--sound/soc/codecs/rt5616.c23
-rw-r--r--sound/soc/codecs/rt5631.c14
-rw-r--r--sound/soc/codecs/rt5640.c17
-rw-r--r--sound/soc/codecs/rt5640.h1
-rw-r--r--sound/soc/codecs/rt5645.c14
-rw-r--r--sound/soc/codecs/rt5651.c14
-rw-r--r--sound/soc/codecs/rt5659.c41
-rw-r--r--sound/soc/codecs/rt5659.h7
-rw-r--r--sound/soc/codecs/rt5660.c1353
-rw-r--r--sound/soc/codecs/rt5660.h847
-rw-r--r--sound/soc/codecs/rt5663.c3218
-rw-r--r--sound/soc/codecs/rt5663.h1121
-rw-r--r--sound/soc/codecs/rt5670.c14
-rw-r--r--sound/soc/codecs/rt5677.c69
-rw-r--r--sound/soc/codecs/sgtl5000.c17
-rw-r--r--sound/soc/codecs/si476x.c10
-rw-r--r--sound/soc/codecs/sn95031.c14
-rw-r--r--sound/soc/codecs/spdif_receiver.c10
-rw-r--r--sound/soc/codecs/spdif_transmitter.c10
-rw-r--r--sound/soc/codecs/ssm2518.c14
-rw-r--r--sound/soc/codecs/ssm2602.c14
-rw-r--r--sound/soc/codecs/ssm4567.c14
-rw-r--r--sound/soc/codecs/sta32x.c14
-rw-r--r--sound/soc/codecs/sta350.c14
-rw-r--r--sound/soc/codecs/sta529.c6
-rw-r--r--sound/soc/codecs/stac9766.c14
-rw-r--r--sound/soc/codecs/sti-sas.c8
-rw-r--r--sound/soc/codecs/tas2552.c14
-rw-r--r--sound/soc/codecs/tas5086.c16
-rw-r--r--sound/soc/codecs/tas571x.c14
-rw-r--r--sound/soc/codecs/tas5720.c14
-rw-r--r--sound/soc/codecs/tfa9879.c15
-rw-r--r--sound/soc/codecs/tlv320aic23.c14
-rw-r--r--sound/soc/codecs/tlv320aic26.c14
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c218
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h2
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c14
-rw-r--r--sound/soc/codecs/tlv320aic3x.c14
-rw-r--r--sound/soc/codecs/tlv320dac33.c31
-rw-r--r--sound/soc/codecs/tpa6130a2.c49
-rw-r--r--sound/soc/codecs/twl4030.c14
-rw-r--r--sound/soc/codecs/twl6040.c14
-rw-r--r--sound/soc/codecs/uda134x.c17
-rw-r--r--sound/soc/codecs/uda1380.c14
-rw-r--r--sound/soc/codecs/wl1273.c14
-rw-r--r--sound/soc/codecs/wm0010.c12
-rw-r--r--sound/soc/codecs/wm1250-ev1.c13
-rw-r--r--sound/soc/codecs/wm2000.c16
-rw-r--r--sound/soc/codecs/wm2200.c16
-rw-r--r--sound/soc/codecs/wm5100.c18
-rw-r--r--sound/soc/codecs/wm5102.c27
-rw-r--r--sound/soc/codecs/wm5110.c28
-rw-r--r--sound/soc/codecs/wm8350.c16
-rw-r--r--sound/soc/codecs/wm8400.c16
-rw-r--r--sound/soc/codecs/wm8510.c16
-rw-r--r--sound/soc/codecs/wm8523.c16
-rw-r--r--sound/soc/codecs/wm8580.c16
-rw-r--r--sound/soc/codecs/wm8711.c16
-rw-r--r--sound/soc/codecs/wm8727.c12
-rw-r--r--sound/soc/codecs/wm8728.c16
-rw-r--r--sound/soc/codecs/wm8731.c16
-rw-r--r--sound/soc/codecs/wm8737.c16
-rw-r--r--sound/soc/codecs/wm8741.c12
-rw-r--r--sound/soc/codecs/wm8750.c16
-rw-r--r--sound/soc/codecs/wm8753.c16
-rw-r--r--sound/soc/codecs/wm8770.c16
-rw-r--r--sound/soc/codecs/wm8776.c16
-rw-r--r--sound/soc/codecs/wm8782.c12
-rw-r--r--sound/soc/codecs/wm8804.c10
-rw-r--r--sound/soc/codecs/wm8900.c16
-rw-r--r--sound/soc/codecs/wm8903.c18
-rw-r--r--sound/soc/codecs/wm8904.c2
-rw-r--r--sound/soc/codecs/wm8940.c16
-rw-r--r--sound/soc/codecs/wm8955.c16
-rw-r--r--sound/soc/codecs/wm8960.c7
-rw-r--r--sound/soc/codecs/wm8961.c16
-rw-r--r--sound/soc/codecs/wm8962.c6
-rw-r--r--sound/soc/codecs/wm8971.c16
-rw-r--r--sound/soc/codecs/wm8974.c16
-rw-r--r--sound/soc/codecs/wm8978.c16
-rw-r--r--sound/soc/codecs/wm8983.c16
-rw-r--r--sound/soc/codecs/wm8985.c16
-rw-r--r--sound/soc/codecs/wm8988.c14
-rw-r--r--sound/soc/codecs/wm8990.c16
-rw-r--r--sound/soc/codecs/wm8991.c44
-rw-r--r--sound/soc/codecs/wm8993.c2
-rw-r--r--sound/soc/codecs/wm8994.c2
-rw-r--r--sound/soc/codecs/wm8995.c14
-rw-r--r--sound/soc/codecs/wm8996.c18
-rw-r--r--sound/soc/codecs/wm8997.c16
-rw-r--r--sound/soc/codecs/wm8998.c26
-rw-r--r--sound/soc/codecs/wm9081.c16
-rw-r--r--sound/soc/codecs/wm9090.c2
-rw-r--r--sound/soc/codecs/wm9705.c16
-rw-r--r--sound/soc/codecs/wm9712.c16
-rw-r--r--sound/soc/codecs/wm9713.c16
-rw-r--r--sound/soc/codecs/wm_adsp.c88
-rw-r--r--sound/soc/codecs/wm_adsp.h7
-rw-r--r--sound/soc/davinci/davinci-mcasp.c2
-rw-r--r--sound/soc/dwc/designware_i2s.c2
-rw-r--r--sound/soc/fsl/fsl_asrc.c12
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c2
-rw-r--r--sound/soc/fsl/fsl_esai.c8
-rw-r--r--sound/soc/fsl/fsl_sai.c6
-rw-r--r--sound/soc/fsl/fsl_spdif.c2
-rw-r--r--sound/soc/fsl/fsl_ssi.c2
-rw-r--r--sound/soc/generic/Kconfig8
-rw-r--r--sound/soc/generic/Makefile2
-rw-r--r--sound/soc/generic/simple-card-utils.c141
-rw-r--r--sound/soc/generic/simple-card.c305
-rw-r--r--sound/soc/generic/simple-scu-card.c345
-rw-r--r--sound/soc/img/pistachio-internal-dac.c14
-rw-r--r--sound/soc/intel/Kconfig12
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.c32
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.h6
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c2
-rw-r--r--sound/soc/intel/atom/sst/sst.c5
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c97
-rw-r--r--sound/soc/intel/atom/sst/sst_ipc.c3
-rw-r--r--sound/soc/intel/atom/sst/sst_pvt.c14
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c12
-rw-r--r--sound/soc/intel/boards/Makefile2
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c347
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c75
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c14
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c507
-rw-r--r--sound/soc/intel/common/sst-acpi.c1
-rw-r--r--sound/soc/intel/haswell/sst-haswell-pcm.c2
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c118
-rw-r--r--sound/soc/intel/skylake/skl-messages.c67
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c60
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.c4
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.h18
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c41
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.h12
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c176
-rw-r--r--sound/soc/intel/skylake/skl-sst.c49
-rw-r--r--sound/soc/intel/skylake/skl-topology.c918
-rw-r--r--sound/soc/intel/skylake/skl-topology.h15
-rw-r--r--sound/soc/intel/skylake/skl-tplg-interface.h95
-rw-r--r--sound/soc/intel/skylake/skl.c6
-rw-r--r--sound/soc/intel/skylake/skl.h2
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c2
-rw-r--r--sound/soc/mediatek/common/mtk-afe-fe-dai.c5
-rw-r--r--sound/soc/omap/mcbsp.c3
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c14
-rw-r--r--sound/soc/omap/omap-pcm.c2
-rw-r--r--sound/soc/qcom/apq8016_sbc.c20
-rw-r--r--sound/soc/qcom/lpass-platform.c16
-rw-r--r--sound/soc/qcom/lpass.h2
-rw-r--r--sound/soc/rockchip/Kconfig12
-rw-r--r--sound/soc/rockchip/Makefile2
-rw-r--r--sound/soc/rockchip/rk3399_gru_sound.c397
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c10
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c12
-rw-r--r--sound/soc/samsung/Kconfig57
-rw-r--r--sound/soc/samsung/ac97.c34
-rw-r--r--sound/soc/samsung/dma.h20
-rw-r--r--sound/soc/samsung/dmaengine.c31
-rw-r--r--sound/soc/samsung/i2s.c56
-rw-r--r--sound/soc/samsung/idma.c1
-rw-r--r--sound/soc/samsung/pcm.c26
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c3
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.h4
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c24
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c24
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c134
-rw-r--r--sound/soc/samsung/smdk_wm8580pcm.c1
-rw-r--r--sound/soc/samsung/smdk_wm8994pcm.c1
-rw-r--r--sound/soc/samsung/spdif.c12
-rw-r--r--sound/soc/sh/Kconfig6
-rw-r--r--sound/soc/sh/rcar/Makefile3
-rw-r--r--sound/soc/sh/rcar/core.c4
-rw-r--r--sound/soc/sh/rcar/rsrc-card.c486
-rw-r--r--sound/soc/sh/rcar/ssi.c2
-rw-r--r--sound/soc/soc-ac97.c2
-rw-r--r--sound/soc/soc-core.c15
-rw-r--r--sound/soc/soc-dapm.c23
-rw-r--r--sound/soc/soc-ops.c2
-rw-r--r--sound/soc/soc-pcm.c13
-rw-r--r--sound/soc/soc-topology.c115
-rw-r--r--sound/soc/soc-utils.c2
-rw-r--r--sound/soc/sti/sti_uniperif.c130
-rw-r--r--sound/soc/sti/uniperif.h28
-rw-r--r--sound/soc/sti/uniperif_player.c83
-rw-r--r--sound/soc/sti/uniperif_reader.c46
-rw-r--r--sound/soc/sunxi/Kconfig1
-rw-r--r--sound/soc/sunxi/sun4i-codec.c60
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c17
-rw-r--r--sound/soc/tegra/Kconfig11
-rw-r--r--sound/soc/tegra/Makefile2
-rw-r--r--sound/soc/tegra/tegra_rt5640.c2
-rw-r--r--sound/soc/tegra/tegra_sgtl5000.c212
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c6
-rw-r--r--sound/usb/caiaq/audio.c1
-rw-r--r--sound/usb/card.h2
-rw-r--r--sound/usb/endpoint.c13
-rw-r--r--sound/usb/format.c4
-rw-r--r--sound/usb/line6/Kconfig5
-rw-r--r--sound/usb/line6/capture.c50
-rw-r--r--sound/usb/line6/driver.c269
-rw-r--r--sound/usb/line6/driver.h59
-rw-r--r--sound/usb/line6/midi.c2
-rw-r--r--sound/usb/line6/pcm.c83
-rw-r--r--sound/usb/line6/pcm.h20
-rw-r--r--sound/usb/line6/playback.c37
-rw-r--r--sound/usb/line6/pod.c12
-rw-r--r--sound/usb/line6/podhd.c300
-rw-r--r--sound/usb/line6/toneport.c6
-rw-r--r--sound/usb/line6/variax.c6
-rw-r--r--sound/usb/mixer_quirks.c22
-rw-r--r--sound/usb/quirks.c6
-rw-r--r--tools/hv/hv_kvp_daemon.c2
-rw-r--r--tools/hv/hv_vss_daemon.c3
-rw-r--r--tools/iio/iio_utils.c11
-rw-r--r--tools/iio/lsiio.c3
-rw-r--r--tools/spi/Makefile2
-rw-r--r--tools/spi/spidev_test.c3
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/nsfs/Makefile12
-rw-r--r--tools/testing/selftests/nsfs/owner.c91
-rw-r--r--tools/testing/selftests/nsfs/pidns.c78
-rw-r--r--tools/testing/selftests/powerpc/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/fpu_asm.h80
-rw-r--r--tools/testing/selftests/powerpc/gpr_asm.h96
-rw-r--r--tools/testing/selftests/powerpc/harness.c9
-rw-r--r--tools/testing/selftests/powerpc/math/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_asm.S73
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_asm.S85
-rw-r--r--tools/testing/selftests/powerpc/math/vsx_asm.S61
-rw-r--r--tools/testing/selftests/powerpc/math/vsx_preempt.c147
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile13
-rw-r--r--tools/testing/selftests/powerpc/signal/signal.S50
-rw-r--r--tools/testing/selftests/powerpc/signal/signal.c111
-rw-r--r--tools/testing/selftests/powerpc/signal/signal_tm.c110
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile8
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c92
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c90
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c110
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c125
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal.S114
-rw-r--r--tools/testing/selftests/powerpc/tm/tm.h27
-rw-r--r--tools/testing/selftests/powerpc/utils.h9
-rw-r--r--tools/testing/selftests/powerpc/vmx_asm.h96
-rw-r--r--tools/testing/selftests/powerpc/vsx_asm.h71
-rw-r--r--tools/testing/selftests/vm/.gitignore1
-rw-r--r--tools/testing/selftests/vm/Makefile4
-rw-r--r--tools/testing/selftests/vm/mlock-random-test.c293
-rw-r--r--tools/testing/selftests/vm/mlock2-tests.c63
-rw-r--r--tools/testing/selftests/vm/mlock2.h62
-rw-r--r--virt/kvm/arm/aarch32.c (renamed from arch/arm64/kvm/emulate.c)25
-rw-r--r--virt/kvm/arm/arch_timer.c17
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c57
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c (renamed from arch/arm64/kvm/hyp/vgic-v3-sr.c)17
-rw-r--r--virt/kvm/arm/pmu.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-irqfd.c6
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c133
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.h4
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c71
-rw-r--r--virt/kvm/arm/vgic/vgic.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.h54
-rw-r--r--virt/kvm/eventfd.c22
-rw-r--r--virt/kvm/kvm_main.c50
6421 files changed, 384253 insertions, 204086 deletions
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 000000000000..89c411b5ce6b
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,2 @@
+*.c diff=cpp
+*.h diff=cpp
diff --git a/.mailmap b/.mailmap
index 1dab0a156489..2408e56e241b 100644
--- a/.mailmap
+++ b/.mailmap
@@ -75,6 +75,8 @@ Jean Tourrilhes <jt@hpl.hp.com>
Jeff Garzik <jgarzik@pretzel.yyz.us>
Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
+Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
John Stultz <johnstul@us.ibm.com>
<josh@joshtriplett.org> <josh@freedesktop.org>
@@ -160,6 +162,7 @@ Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
+Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
diff --git a/CREDITS b/CREDITS
index 7cf4e5606d0a..513aaa3546bf 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1090,6 +1090,10 @@ S: 6350 Stoneridge Mall Road
S: Pleasanton, CA 94588
S: USA
+N: Dmitry Eremin-Solenikov
+E: dbaryshkov@gmail.com
+D: Power Supply Maintainer from v3.14 - v3.15
+
N: Doug Evans
E: dje@cygnus.com
D: Wrote Xenix FS (part of standard kernel since 0.99.15)
@@ -1944,6 +1948,11 @@ E: kraxel@bytesex.org
E: kraxel@suse.de
D: video4linux, bttv, vesafb, some scsi, misc fixes
+N: Hans J. Koch
+D: USERSPACE I/O, MAX6650
+D: Hans passed away in June 2016, and will be greatly missed.
+W: https://lwn.net/Articles/691000/
+
N: Harald Koenig
E: koenig@tat.physik.uni-tuebingen.de
D: XFree86 (S3), DCF77, some kernel hacks and fixes
@@ -2287,11 +2296,11 @@ D: Initial implementation of VC's, pty's and select()
N: Pavel Machek
E: pavel@ucw.cz
-D: Softcursor for vga, hypertech cdrom support, vcsa bugfix, nbd
+P: 4096R/92DFCE96 4FA7 9EEF FCD4 C44F C585 B8C7 C060 2241 92DF CE96
+D: Softcursor for vga, hypertech cdrom support, vcsa bugfix, nbd,
D: sun4/330 port, capabilities for elf, speedup for rm on ext2, USB,
-D: work on suspend-to-ram/disk, killing duplicates from ioctl32
-S: Volkova 1131
-S: 198 00 Praha 9
+D: work on suspend-to-ram/disk, killing duplicates from ioctl32,
+D: Altera SoCFPGA and Nokia N900 support.
S: Czech Republic
N: Paul Mackerras
@@ -3518,6 +3527,10 @@ S: 145 Howard St.
S: Northborough, MA 01532
S: USA
+N: Doug Thompson
+E: dougthompson@xmission.com
+D: EDAC
+
N: Tommy Thorn
E: Tommy.Thorn@irisa.fr
W: http://www.irisa.fr/prive/thorn/index.html
@@ -3654,6 +3667,10 @@ S: Obere Heerbergstrasse 17
S: 97078 Wuerzburg
S: Germany
+N: Jason Uhlenkott
+E: juhlenko@akamai.com
+D: I3000 EDAC driver
+
N: Greg Ungerer
E: gerg@snapgear.com
D: uClinux kernel hacker
diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
index 3646ec85d513..86ace287d48b 100644
--- a/Documentation/ABI/testing/sysfs-class-led
+++ b/Documentation/ABI/testing/sysfs-class-led
@@ -24,7 +24,8 @@ Description:
of led events.
You can change triggers in a similar manner to the way an IO
scheduler is chosen. Trigger specific parameters can appear in
- /sys/class/leds/<led> once a given trigger is selected.
+ /sys/class/leds/<led> once a given trigger is selected. For
+ their documentation see sysfs-class-led-trigger-*.
What: /sys/class/leds/<led>/inverted
Date: January 2011
diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-oneshot b/Documentation/ABI/testing/sysfs-class-led-trigger-oneshot
new file mode 100644
index 000000000000..378a3a4df3c8
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-oneshot
@@ -0,0 +1,36 @@
+What: /sys/class/leds/<led>/delay_on
+Date: Jun 2012
+KernelVersion: 3.6
+Contact: linux-leds@vger.kernel.org
+Description:
+ Specifies for how many milliseconds the LED has to stay at
+ LED_FULL brightness after it has been armed.
+ Defaults to 100 ms.
+
+What: /sys/class/leds/<led>/delay_off
+Date: Jun 2012
+KernelVersion: 3.6
+Contact: linux-leds@vger.kernel.org
+Description:
+ Specifies for how many milliseconds the LED has to stay at
+ LED_OFF brightness after it has been armed.
+ Defaults to 100 ms.
+
+What: /sys/class/leds/<led>/invert
+Date: Jun 2012
+KernelVersion: 3.6
+Contact: linux-leds@vger.kernel.org
+Description:
+ Reverse the blink logic. If set to 0 (default) blink on for
+ delay_on ms, then blink off for delay_off ms, leaving the LED
+ normally off. If set to 1, blink off for delay_off ms, then
+ blink on for delay_on ms, leaving the LED normally on.
+ Setting this value also immediately changes the LED state.
+
+What: /sys/class/leds/<led>/shot
+Date: Jun 2012
+KernelVersion: 3.6
+Contact: linux-leds@vger.kernel.org
+Description:
+ Write any non-empty string to signal an events, this starts a
+ blink sequence if not already running.
diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-usbport b/Documentation/ABI/testing/sysfs-class-led-trigger-usbport
new file mode 100644
index 000000000000..f440e690daef
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-usbport
@@ -0,0 +1,12 @@
+What: /sys/class/leds/<led>/ports/<port>
+Date: September 2016
+KernelVersion: 4.9
+Contact: linux-leds@vger.kernel.org
+ linux-usb@vger.kernel.org
+Description:
+ Every dir entry represents a single USB port that can be
+ selected for the USB port trigger. Selecting ports makes trigger
+ observing them for any connected devices and lighting on LED if
+ there are any.
+ Echoing "1" value selects USB port. Echoing "0" unselects it.
+ Current state can be also read.
diff --git a/Documentation/ABI/testing/sysfs-class-mic.txt b/Documentation/ABI/testing/sysfs-class-mic.txt
index d45eed2bf128..6ef682603179 100644
--- a/Documentation/ABI/testing/sysfs-class-mic.txt
+++ b/Documentation/ABI/testing/sysfs-class-mic.txt
@@ -153,7 +153,7 @@ Description:
What: /sys/class/mic/mic(x)/heartbeat_enable
Date: March 2015
-KernelVersion: 3.20
+KernelVersion: 4.4
Contact: Ashutosh Dixit <ashutosh.dixit@intel.com>
Description:
The MIC drivers detect and inform user space about card crashes
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index fa05719f9981..f85ce9e327b9 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -22,7 +22,7 @@ Description:
What: /sys/class/power_supply/max14577-charger/device/fast_charge_timer
Date: October 2014
KernelVersion: 3.18.0
-Contact: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Contact: Krzysztof Kozlowski <krzk@kernel.org>
Description:
This entry shows and sets the maximum time the max14577
charger operates in fast-charge mode. When the timer expires
@@ -36,7 +36,7 @@ Description:
What: /sys/class/power_supply/max77693-charger/device/fast_charge_timer
Date: January 2015
KernelVersion: 3.19.0
-Contact: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Contact: Krzysztof Kozlowski <krzk@kernel.org>
Description:
This entry shows and sets the maximum time the max77693
charger operates in fast-charge mode. When the timer expires
@@ -50,7 +50,7 @@ Description:
What: /sys/class/power_supply/max77693-charger/device/top_off_threshold_current
Date: January 2015
KernelVersion: 3.19.0
-Contact: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Contact: Krzysztof Kozlowski <krzk@kernel.org>
Description:
This entry shows and sets the charging current threshold for
entering top-off charging mode. When charging current in fast
@@ -65,7 +65,7 @@ Description:
What: /sys/class/power_supply/max77693-charger/device/top_off_timer
Date: January 2015
KernelVersion: 3.19.0
-Contact: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Contact: Krzysztof Kozlowski <krzk@kernel.org>
Description:
This entry shows and sets the maximum time the max77693
charger operates in top-off charge mode. When the timer expires
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
index db197a879580..305dffd229a8 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
+++ b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
@@ -35,6 +35,12 @@ Description: Displays a set of alternate modes supported by a wheel. Each
DF-EX <*--------> G25 <-> G27
DF-EX <*----------------> G27
+ G29:
+ DF-EX <*> DFP <-> G25 <-> G27 <-> G29
+ DF-EX <*--------> G25 <-> G27 <-> G29
+ DF-EX <*----------------> G27 <-> G29
+ DF-EX <*------------------------> G29
+
DFGT:
DF-EX <*> DFP <-> DFGT
DF-EX <*--------> DFGT
@@ -50,3 +56,12 @@ Description: Displays the real model of the wheel regardless of any
alternate mode the wheel might be switched to.
It is a read-only value.
This entry is not created for devices that have only one mode.
+
+What: /sys/bus/hid/drivers/logitech/<dev>/combine_pedals
+Date: Sep 2016
+KernelVersion: 4.9
+Contact: Simon Wood <simon@mungewell.org>
+Description: Controls whether a combined value of accelerator and brake is
+ reported on the Y axis of the controller. Useful for older games
+ which can do not work with separate accelerator/brake axis.
+ Off ('0') by default, enabled by setting '1'.
diff --git a/Documentation/ABI/testing/sysfs-driver-wacom b/Documentation/ABI/testing/sysfs-driver-wacom
index dca429340772..2aa5503ee200 100644
--- a/Documentation/ABI/testing/sysfs-driver-wacom
+++ b/Documentation/ABI/testing/sysfs-driver-wacom
@@ -24,6 +24,7 @@ What: /sys/bus/hid/devices/<bus>:<vid>:<pid>.<n>/wacom_led/status0_luminance
Date: August 2014
Contact: linux-input@vger.kernel.org
Description:
+ <obsoleted by the LED class API now exported by the driver>
Writing to this file sets the status LED luminance (1..127)
when the stylus does not touch the tablet surface, and no
button is pressed on the stylus. This luminance level is
@@ -33,6 +34,7 @@ What: /sys/bus/hid/devices/<bus>:<vid>:<pid>.<n>/wacom_led/status1_luminance
Date: August 2014
Contact: linux-input@vger.kernel.org
Description:
+ <obsoleted by the LED class API now exported by the driver>
Writing to this file sets the status LED luminance (1..127)
when the stylus touches the tablet surface, or any button is
pressed on the stylus.
@@ -41,6 +43,7 @@ What: /sys/bus/hid/devices/<bus>:<vid>:<pid>.<n>/wacom_led/status_led0_select
Date: August 2014
Contact: linux-input@vger.kernel.org
Description:
+ <obsoleted by the LED class API now exported by the driver>
Writing to this file sets which one of the four (for Intuos 4
and Intuos 5) or of the right four (for Cintiq 21UX2 and Cintiq
24HD) status LEDs is active (0..3). The other three LEDs on the
@@ -50,6 +53,7 @@ What: /sys/bus/hid/devices/<bus>:<vid>:<pid>.<n>/wacom_led/status_led1_select
Date: August 2014
Contact: linux-input@vger.kernel.org
Description:
+ <obsoleted by the LED class API now exported by the driver>
Writing to this file sets which one of the left four (for Cintiq 21UX2
and Cintiq 24HD) status LEDs is active (0..3). The other three LEDs on
the left are always inactive.
@@ -91,6 +95,7 @@ What: /sys/bus/hid/devices/<bus>:<vid>:<pid>.<n>/wacom_remote/<serial_number>/r
Date: July 2015
Contact: linux-input@vger.kernel.org
Description:
+ <obsoleted by the LED class API now exported by the driver>
Reading from this file reports the mode status of the
remote as indicated by the LED lights on the device. If no
reports have been received from the paired device, reading
diff --git a/Documentation/ABI/testing/sysfs-i2c-bmp085 b/Documentation/ABI/testing/sysfs-i2c-bmp085
deleted file mode 100644
index 585962ad0465..000000000000
--- a/Documentation/ABI/testing/sysfs-i2c-bmp085
+++ /dev/null
@@ -1,31 +0,0 @@
-What: /sys/bus/i2c/devices/<busnum>-<devaddr>/pressure0_input
-Date: June 2010
-Contact: Christoph Mair <christoph.mair@gmail.com>
-Description: Start a pressure measurement and read the result. Values
- represent the ambient air pressure in pascal (0.01 millibar).
-
- Reading: returns the current air pressure.
-
-
-What: /sys/bus/i2c/devices/<busnum>-<devaddr>/temp0_input
-Date: June 2010
-Contact: Christoph Mair <christoph.mair@gmail.com>
-Description: Measure the ambient temperature. The returned value represents
- the ambient temperature in units of 0.1 degree celsius.
-
- Reading: returns the current temperature.
-
-
-What: /sys/bus/i2c/devices/<busnum>-<devaddr>/oversampling
-Date: June 2010
-Contact: Christoph Mair <christoph.mair@gmail.com>
-Description: Tell the bmp085 to use more samples to calculate a pressure
- value. When writing to this file the chip will use 2^x samples
- to calculate the next pressure value with x being the value
- written. Using this feature will decrease RMS noise and
- increase the measurement time.
-
- Reading: returns the current oversampling setting.
-
- Writing: sets a new oversampling setting.
- Accepted values: 0..3.
diff --git a/Documentation/Changes b/Documentation/Changes
index ec97b77c8b00..22797a15dc24 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -1,8 +1,13 @@
+.. _changes:
+
+Minimal requerements to compile the Kernel
+++++++++++++++++++++++++++++++++++++++++++
+
Intro
=====
This document is designed to provide a list of the minimum levels of
-software necessary to run the 3.0 kernels.
+software necessary to run the 4.x kernels.
This document is originally based on my "Changes" file for 2.0.x kernels
and therefore owes credit to the same people as that file (Jared Mauch,
@@ -10,9 +15,9 @@ Axel Boldt, Alessandro Sigala, and countless other users all over the
'net).
Current Minimal Requirements
-============================
+****************************
-Upgrade to at *least* these software revisions before thinking you've
+Upgrade to at **least** these software revisions before thinking you've
encountered a bug! If you're unsure what version you're currently
running, the suggested command should tell you.
@@ -21,34 +26,40 @@ running a Linux kernel. Also, not all tools are necessary on all
systems; obviously, if you don't have any ISDN hardware, for example,
you probably needn't concern yourself with isdn4k-utils.
-o GNU C 3.2 # gcc --version
-o GNU make 3.80 # make --version
-o binutils 2.12 # ld -v
-o util-linux 2.10o # fdformat --version
-o module-init-tools 0.9.10 # depmod -V
-o e2fsprogs 1.41.4 # e2fsck -V
-o jfsutils 1.1.3 # fsck.jfs -V
-o reiserfsprogs 3.6.3 # reiserfsck -V
-o xfsprogs 2.6.0 # xfs_db -V
-o squashfs-tools 4.0 # mksquashfs -version
-o btrfs-progs 0.18 # btrfsck
-o pcmciautils 004 # pccardctl -V
-o quota-tools 3.09 # quota -V
-o PPP 2.4.0 # pppd --version
-o isdn4k-utils 3.1pre1 # isdnctrl 2>&1|grep version
-o nfs-utils 1.0.5 # showmount --version
-o procps 3.2.0 # ps --version
-o oprofile 0.9 # oprofiled --version
-o udev 081 # udevd --version
-o grub 0.93 # grub --version || grub-install --version
-o mcelog 0.6 # mcelog --version
-o iptables 1.4.2 # iptables -V
-o openssl & libcrypto 1.0.0 # openssl version
-o bc 1.06.95 # bc --version
-
+====================== =============== ========================================
+ Program Minimal version Command to check the version
+====================== =============== ========================================
+GNU C 3.2 gcc --version
+GNU make 3.80 make --version
+binutils 2.12 ld -v
+util-linux 2.10o fdformat --version
+module-init-tools 0.9.10 depmod -V
+e2fsprogs 1.41.4 e2fsck -V
+jfsutils 1.1.3 fsck.jfs -V
+reiserfsprogs 3.6.3 reiserfsck -V
+xfsprogs 2.6.0 xfs_db -V
+squashfs-tools 4.0 mksquashfs -version
+btrfs-progs 0.18 btrfsck
+pcmciautils 004 pccardctl -V
+quota-tools 3.09 quota -V
+PPP 2.4.0 pppd --version
+isdn4k-utils 3.1pre1 isdnctrl 2>&1|grep version
+nfs-utils 1.0.5 showmount --version
+procps 3.2.0 ps --version
+oprofile 0.9 oprofiled --version
+udev 081 udevd --version
+grub 0.93 grub --version || grub-install --version
+mcelog 0.6 mcelog --version
+iptables 1.4.2 iptables -V
+openssl & libcrypto 1.0.0 openssl version
+bc 1.06.95 bc --version
+Sphinx\ [#f1]_ 1.2 sphinx-build --version
+====================== =============== ========================================
+
+.. [#f1] Sphinx is needed only to build the Kernel documentation
Kernel compilation
-==================
+******************
GCC
---
@@ -64,16 +75,16 @@ You will need GNU make 3.80 or later to build the kernel.
Binutils
--------
-Linux on IA-32 has recently switched from using as86 to using gas for
-assembling the 16-bit boot code, removing the need for as86 to compile
+Linux on IA-32 has recently switched from using ``as86`` to using ``gas`` for
+assembling the 16-bit boot code, removing the need for ``as86`` to compile
your kernel. This change does, however, mean that you need a recent
release of binutils.
Perl
----
-You will need perl 5 and the following modules: Getopt::Long, Getopt::Std,
-File::Basename, and File::Find to build the kernel.
+You will need perl 5 and the following modules: ``Getopt::Long``,
+``Getopt::Std``, ``File::Basename``, and ``File::Find`` to build the kernel.
BC
--
@@ -93,7 +104,7 @@ and higher.
System utilities
-================
+****************
Architectural changes
---------------------
@@ -115,7 +126,7 @@ well as the desired DocBook stylesheets.
Util-linux
----------
-New versions of util-linux provide *fdisk support for larger disks,
+New versions of util-linux provide ``fdisk`` support for larger disks,
support new options to mount, recognize more supported partition
types, have a fdformat which works with 2.4 kernels, and similar goodies.
You'll probably want to upgrade.
@@ -125,54 +136,57 @@ Ksymoops
If the unthinkable happens and your kernel oopses, you may need the
ksymoops tool to decode it, but in most cases you don't.
-It is generally preferred to build the kernel with CONFIG_KALLSYMS so
+It is generally preferred to build the kernel with ``CONFIG_KALLSYMS`` so
that it produces readable dumps that can be used as-is (this also
produces better output than ksymoops). If for some reason your kernel
-is not build with CONFIG_KALLSYMS and you have no way to rebuild and
+is not build with ``CONFIG_KALLSYMS`` and you have no way to rebuild and
reproduce the Oops with that option, then you can still decode that Oops
with ksymoops.
Module-Init-Tools
-----------------
-A new module loader is now in the kernel that requires module-init-tools
+A new module loader is now in the kernel that requires ``module-init-tools``
to use. It is backward compatible with the 2.4.x series kernels.
Mkinitrd
--------
-These changes to the /lib/modules file tree layout also require that
+These changes to the ``/lib/modules`` file tree layout also require that
mkinitrd be upgraded.
E2fsprogs
---------
-The latest version of e2fsprogs fixes several bugs in fsck and
+The latest version of ``e2fsprogs`` fixes several bugs in fsck and
debugfs. Obviously, it's a good idea to upgrade.
JFSutils
--------
-The jfsutils package contains the utilities for the file system.
+The ``jfsutils`` package contains the utilities for the file system.
The following utilities are available:
-o fsck.jfs - initiate replay of the transaction log, and check
+
+- ``fsck.jfs`` - initiate replay of the transaction log, and check
and repair a JFS formatted partition.
-o mkfs.jfs - create a JFS formatted partition.
-o other file system utilities are also available in this package.
+
+- ``mkfs.jfs`` - create a JFS formatted partition.
+
+- other file system utilities are also available in this package.
Reiserfsprogs
-------------
The reiserfsprogs package should be used for reiserfs-3.6.x
(Linux kernels 2.4.x). It is a combined package and contains working
-versions of mkreiserfs, resize_reiserfs, debugreiserfs and
-reiserfsck. These utils work on both i386 and alpha platforms.
+versions of ``mkreiserfs``, ``resize_reiserfs``, ``debugreiserfs`` and
+``reiserfsck``. These utils work on both i386 and alpha platforms.
Xfsprogs
--------
-The latest version of xfsprogs contains mkfs.xfs, xfs_db, and the
-xfs_repair utilities, among others, for the XFS filesystem. It is
+The latest version of ``xfsprogs`` contains ``mkfs.xfs``, ``xfs_db``, and the
+``xfs_repair`` utilities, among others, for the XFS filesystem. It is
architecture independent and any version from 2.0.0 onward should
work correctly with this version of the XFS kernel code (2.6.0 or
later is recommended, due to some significant improvements).
@@ -180,7 +194,7 @@ later is recommended, due to some significant improvements).
PCMCIAutils
-----------
-PCMCIAutils replaces pcmcia-cs. It properly sets up
+PCMCIAutils replaces ``pcmcia-cs``. It properly sets up
PCMCIA sockets at system startup and loads the appropriate modules
for 16-bit PCMCIA devices if the kernel is modularized and the hotplug
subsystem is used.
@@ -198,19 +212,20 @@ Intel IA32 microcode
A driver has been added to allow updating of Intel IA32 microcode,
accessible as a normal (misc) character device. If you are not using
-udev you may need to:
+udev you may need to::
-mkdir /dev/cpu
-mknod /dev/cpu/microcode c 10 184
-chmod 0644 /dev/cpu/microcode
+ mkdir /dev/cpu
+ mknod /dev/cpu/microcode c 10 184
+ chmod 0644 /dev/cpu/microcode
as root before you can use this. You'll probably also want to
get the user-space microcode_ctl utility to use with this.
udev
----
-udev is a userspace application for populating /dev dynamically with
-only entries for devices actually present. udev replaces the basic
+
+``udev`` is a userspace application for populating ``/dev`` dynamically with
+only entries for devices actually present. ``udev`` replaces the basic
functionality of devfs, while allowing persistent device naming for
devices.
@@ -218,10 +233,10 @@ FUSE
----
Needs libfuse 2.4.0 or later. Absolute minimum is 2.3.0 but mount
-options 'direct_io' and 'kernel_cache' won't work.
+options ``direct_io`` and ``kernel_cache`` won't work.
Networking
-==========
+**********
General changes
---------------
@@ -243,9 +258,9 @@ enable it to operate over diverse media layers. If you use PPP,
upgrade pppd to at least 2.4.0.
If you are not using udev, you must have the device file /dev/ppp
-which can be made by:
+which can be made by::
-mknod /dev/ppp c 108 0
+ mknod /dev/ppp c 108 0
as root.
@@ -260,22 +275,22 @@ NFS-utils
In ancient (2.4 and earlier) kernels, the nfs server needed to know
about any client that expected to be able to access files via NFS. This
-information would be given to the kernel by "mountd" when the client
-mounted the filesystem, or by "exportfs" at system startup. exportfs
-would take information about active clients from /var/lib/nfs/rmtab.
+information would be given to the kernel by ``mountd`` when the client
+mounted the filesystem, or by ``exportfs`` at system startup. exportfs
+would take information about active clients from ``/var/lib/nfs/rmtab``.
This approach is quite fragile as it depends on rmtab being correct
which is not always easy, particularly when trying to implement
-fail-over. Even when the system is working well, rmtab suffers from
+fail-over. Even when the system is working well, ``rmtab`` suffers from
getting lots of old entries that never get removed.
With modern kernels we have the option of having the kernel tell mountd
when it gets a request from an unknown host, and mountd can give
appropriate export information to the kernel. This removes the
-dependency on rmtab and means that the kernel only needs to know about
+dependency on ``rmtab`` and means that the kernel only needs to know about
currently active clients.
-To enable this new functionality, you need to:
+To enable this new functionality, you need to::
mount -t nfsd nfsd /proc/fs/nfsd
@@ -287,8 +302,32 @@ mcelog
------
On x86 kernels the mcelog utility is needed to process and log machine check
-events when CONFIG_X86_MCE is enabled. Machine check events are errors reported
-by the CPU. Processing them is strongly encouraged.
+events when ``CONFIG_X86_MCE`` is enabled. Machine check events are errors
+reported by the CPU. Processing them is strongly encouraged.
+
+Kernel documentation
+********************
+
+Sphinx
+------
+
+The ReST markups currently used by the Documentation/ files are meant to be
+built with ``Sphinx`` version 1.2 or upper. If you're desiring to build
+PDF outputs, it is recommended to use version 1.4.6.
+
+.. note::
+
+ Please notice that, for PDF and LaTeX output, you'll also need ``XeLaTeX``
+ version 3.14159265. Depending on the distribution, you may also need
+ to install a series of ``texlive`` packages that provide the minimal
+ set of functionalities required for ``XeLaTex`` to work.
+
+Other tools
+-----------
+
+In order to produce documentation from DocBook, you'll also need ``xmlto``.
+Please notice, however, that we're currently migrating all documents to use
+``Sphinx``.
Getting updated software
========================
@@ -298,114 +337,149 @@ Kernel compilation
gcc
---
-o <ftp://ftp.gnu.org/gnu/gcc/>
+
+- <ftp://ftp.gnu.org/gnu/gcc/>
Make
----
-o <ftp://ftp.gnu.org/gnu/make/>
+
+- <ftp://ftp.gnu.org/gnu/make/>
Binutils
--------
-o <ftp://ftp.kernel.org/pub/linux/devel/binutils/>
+
+- <ftp://ftp.kernel.org/pub/linux/devel/binutils/>
OpenSSL
-------
-o <https://www.openssl.org/>
+
+- <https://www.openssl.org/>
System utilities
****************
Util-linux
----------
-o <ftp://ftp.kernel.org/pub/linux/utils/util-linux/>
+
+- <ftp://ftp.kernel.org/pub/linux/utils/util-linux/>
Ksymoops
--------
-o <ftp://ftp.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
+
+- <ftp://ftp.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
Module-Init-Tools
-----------------
-o <ftp://ftp.kernel.org/pub/linux/kernel/people/rusty/modules/>
+
+- <ftp://ftp.kernel.org/pub/linux/kernel/people/rusty/modules/>
Mkinitrd
--------
-o <https://code.launchpad.net/initrd-tools/main>
+
+- <https://code.launchpad.net/initrd-tools/main>
E2fsprogs
---------
-o <http://prdownloads.sourceforge.net/e2fsprogs/e2fsprogs-1.29.tar.gz>
+
+- <http://prdownloads.sourceforge.net/e2fsprogs/e2fsprogs-1.29.tar.gz>
JFSutils
--------
-o <http://jfs.sourceforge.net/>
+
+- <http://jfs.sourceforge.net/>
Reiserfsprogs
-------------
-o <http://www.kernel.org/pub/linux/utils/fs/reiserfs/>
+
+- <http://www.kernel.org/pub/linux/utils/fs/reiserfs/>
Xfsprogs
--------
-o <ftp://oss.sgi.com/projects/xfs/>
+
+- <ftp://oss.sgi.com/projects/xfs/>
Pcmciautils
-----------
-o <ftp://ftp.kernel.org/pub/linux/utils/kernel/pcmcia/>
+
+- <ftp://ftp.kernel.org/pub/linux/utils/kernel/pcmcia/>
Quota-tools
-----------
-o <http://sourceforge.net/projects/linuxquota/>
+-----------
+
+- <http://sourceforge.net/projects/linuxquota/>
DocBook Stylesheets
-------------------
-o <http://sourceforge.net/projects/docbook/files/docbook-dsssl/>
+
+- <http://sourceforge.net/projects/docbook/files/docbook-dsssl/>
XMLTO XSLT Frontend
-------------------
-o <http://cyberelk.net/tim/xmlto/>
+
+- <http://cyberelk.net/tim/xmlto/>
Intel P6 microcode
------------------
-o <https://downloadcenter.intel.com/>
+
+- <https://downloadcenter.intel.com/>
udev
----
-o <http://www.freedesktop.org/software/systemd/man/udev.html>
+
+- <http://www.freedesktop.org/software/systemd/man/udev.html>
FUSE
----
-o <http://sourceforge.net/projects/fuse>
+
+- <http://sourceforge.net/projects/fuse>
mcelog
------
-o <http://www.mcelog.org/>
+
+- <http://www.mcelog.org/>
Networking
**********
PPP
---
-o <ftp://ftp.samba.org/pub/ppp/>
+
+- <ftp://ftp.samba.org/pub/ppp/>
Isdn4k-utils
------------
-o <ftp://ftp.isdn4linux.de/pub/isdn4linux/utils/>
+
+- <ftp://ftp.isdn4linux.de/pub/isdn4linux/utils/>
NFS-utils
---------
-o <http://sourceforge.net/project/showfiles.php?group_id=14>
+
+- <http://sourceforge.net/project/showfiles.php?group_id=14>
Iptables
--------
-o <http://www.iptables.org/downloads.html>
+
+- <http://www.iptables.org/downloads.html>
Ip-route2
---------
-o <https://www.kernel.org/pub/linux/utils/net/iproute2/>
+
+- <https://www.kernel.org/pub/linux/utils/net/iproute2/>
OProfile
--------
-o <http://oprofile.sf.net/download/>
+
+- <http://oprofile.sf.net/download/>
NFS-Utils
---------
-o <http://nfs.sourceforge.net/>
+
+- <http://nfs.sourceforge.net/>
+
+Kernel documentation
+********************
+
+Sphinx
+------
+
+- <http://www.sphinx-doc.org/>
diff --git a/Documentation/CodeOfConflict b/Documentation/CodeOfConflict
index 1684d0b4efa6..49a8ecc157a2 100644
--- a/Documentation/CodeOfConflict
+++ b/Documentation/CodeOfConflict
@@ -19,7 +19,7 @@ please contact the Linux Foundation's Technical Advisory Board at
will work to resolve the issue to the best of their ability. For more
information on who is on the Technical Advisory Board and what their
role is, please see:
- http://www.linuxfoundation.org/programs/advisory-councils/tab
+ http://www.linuxfoundation.org/projects/linux/tab
As a reviewer of code, please strive to keep things civil and focused on
the technical issues involved. We are all humans, and frustrations can
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index a096836723ca..9c61c039ccd9 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -1,8 +1,10 @@
+.. _codingstyle:
- Linux kernel coding style
+Linux kernel coding style
+=========================
This is a short document describing the preferred coding style for the
-linux kernel. Coding style is very personal, and I won't _force_ my
+linux kernel. Coding style is very personal, and I won't **force** my
views on anybody, but this is what goes for anything that I have to be
able to maintain, and I'd prefer it for most other things too. Please
at least consider the points made here.
@@ -13,7 +15,8 @@ and NOT read it. Burn them, it's a great symbolic gesture.
Anyway, here goes:
- Chapter 1: Indentation
+1) Indentation
+--------------
Tabs are 8 characters, and thus indentations are also 8 characters.
There are heretic movements that try to make indentations 4 (or even 2!)
@@ -36,8 +39,10 @@ benefit of warning you when you're nesting your functions too deep.
Heed that warning.
The preferred way to ease multiple indentation levels in a switch statement is
-to align the "switch" and its subordinate "case" labels in the same column
-instead of "double-indenting" the "case" labels. E.g.:
+to align the ``switch`` and its subordinate ``case`` labels in the same column
+instead of ``double-indenting`` the ``case`` labels. E.g.:
+
+.. code-block:: c
switch (suffix) {
case 'G':
@@ -59,6 +64,8 @@ instead of "double-indenting" the "case" labels. E.g.:
Don't put multiple statements on a single line unless you have
something to hide:
+.. code-block:: c
+
if (condition) do_this;
do_something_everytime;
@@ -71,7 +78,8 @@ used for indentation, and the above example is deliberately broken.
Get a decent editor and don't leave whitespace at the end of lines.
- Chapter 2: Breaking long lines and strings
+2) Breaking long lines and strings
+----------------------------------
Coding style is all about readability and maintainability using commonly
available tools.
@@ -87,7 +95,8 @@ with a long argument list. However, never break user-visible strings such as
printk messages, because that breaks the ability to grep for them.
- Chapter 3: Placing Braces and Spaces
+3) Placing Braces and Spaces
+----------------------------
The other issue that always comes up in C styling is the placement of
braces. Unlike the indent size, there are few technical reasons to
@@ -95,6 +104,8 @@ choose one placement strategy over the other, but the preferred way, as
shown to us by the prophets Kernighan and Ritchie, is to put the opening
brace last on the line, and put the closing brace first, thusly:
+.. code-block:: c
+
if (x is true) {
we do y
}
@@ -102,6 +113,8 @@ brace last on the line, and put the closing brace first, thusly:
This applies to all non-function statement blocks (if, switch, for,
while, do). E.g.:
+.. code-block:: c
+
switch (action) {
case KOBJ_ADD:
return "add";
@@ -116,6 +129,8 @@ while, do). E.g.:
However, there is one special case, namely functions: they have the
opening brace at the beginning of the next line, thus:
+.. code-block:: c
+
int function(int x)
{
body of function
@@ -123,20 +138,24 @@ opening brace at the beginning of the next line, thus:
Heretic people all over the world have claimed that this inconsistency
is ... well ... inconsistent, but all right-thinking people know that
-(a) K&R are _right_ and (b) K&R are right. Besides, functions are
+(a) K&R are **right** and (b) K&R are right. Besides, functions are
special anyway (you can't nest them in C).
-Note that the closing brace is empty on a line of its own, _except_ in
+Note that the closing brace is empty on a line of its own, **except** in
the cases where it is followed by a continuation of the same statement,
-ie a "while" in a do-statement or an "else" in an if-statement, like
+ie a ``while`` in a do-statement or an ``else`` in an if-statement, like
this:
+.. code-block:: c
+
do {
body of do-loop
} while (condition);
and
+.. code-block:: c
+
if (x == y) {
..
} else if (x > y) {
@@ -155,11 +174,15 @@ comments on.
Do not unnecessarily use braces where a single statement will do.
+.. code-block:: c
+
if (condition)
action();
and
+.. code-block:: none
+
if (condition)
do_this();
else
@@ -168,6 +191,8 @@ and
This does not apply if only one branch of a conditional statement is a single
statement; in the latter case use braces in both branches:
+.. code-block:: c
+
if (condition) {
do_this();
do_that();
@@ -175,57 +200,67 @@ statement; in the latter case use braces in both branches:
otherwise();
}
- 3.1: Spaces
+3.1) Spaces
+***********
Linux kernel style for use of spaces depends (mostly) on
function-versus-keyword usage. Use a space after (most) keywords. The
notable exceptions are sizeof, typeof, alignof, and __attribute__, which look
somewhat like functions (and are usually used with parentheses in Linux,
-although they are not required in the language, as in: "sizeof info" after
-"struct fileinfo info;" is declared).
+although they are not required in the language, as in: ``sizeof info`` after
+``struct fileinfo info;`` is declared).
-So use a space after these keywords:
+So use a space after these keywords::
if, switch, case, for, do, while
but not with sizeof, typeof, alignof, or __attribute__. E.g.,
+.. code-block:: c
+
+
s = sizeof(struct file);
Do not add spaces around (inside) parenthesized expressions. This example is
-*bad*:
+**bad**:
+
+.. code-block:: c
+
s = sizeof( struct file );
When declaring pointer data or a function that returns a pointer type, the
-preferred use of '*' is adjacent to the data name or function name and not
+preferred use of ``*`` is adjacent to the data name or function name and not
adjacent to the type name. Examples:
+.. code-block:: c
+
+
char *linux_banner;
unsigned long long memparse(char *ptr, char **retptr);
char *match_strdup(substring_t *s);
Use one space around (on each side of) most binary and ternary operators,
-such as any of these:
+such as any of these::
= + - < > * / % | & ^ <= >= == != ? :
-but no space after unary operators:
+but no space after unary operators::
& * + - ~ ! sizeof typeof alignof __attribute__ defined
-no space before the postfix increment & decrement unary operators:
+no space before the postfix increment & decrement unary operators::
++ --
-no space after the prefix increment & decrement unary operators:
+no space after the prefix increment & decrement unary operators::
++ --
-and no space around the '.' and "->" structure member operators.
+and no space around the ``.`` and ``->`` structure member operators.
Do not leave trailing whitespace at the ends of lines. Some editors with
-"smart" indentation will insert whitespace at the beginning of new lines as
+``smart`` indentation will insert whitespace at the beginning of new lines as
appropriate, so you can start typing the next line of code right away.
However, some such editors do not remove the whitespace if you end up not
putting a line of code there, such as if you leave a blank line. As a result,
@@ -237,22 +272,23 @@ of patches, this may make later patches in the series fail by changing their
context lines.
- Chapter 4: Naming
+4) Naming
+---------
C is a Spartan language, and so should your naming be. Unlike Modula-2
and Pascal programmers, C programmers do not use cute names like
ThisVariableIsATemporaryCounter. A C programmer would call that
-variable "tmp", which is much easier to write, and not the least more
+variable ``tmp``, which is much easier to write, and not the least more
difficult to understand.
HOWEVER, while mixed-case names are frowned upon, descriptive names for
-global variables are a must. To call a global function "foo" is a
+global variables are a must. To call a global function ``foo`` is a
shooting offense.
-GLOBAL variables (to be used only if you _really_ need them) need to
+GLOBAL variables (to be used only if you **really** need them) need to
have descriptive names, as do global functions. If you have a function
that counts the number of active users, you should call that
-"count_active_users()" or similar, you should _not_ call it "cntusr()".
+``count_active_users()`` or similar, you should **not** call it ``cntusr()``.
Encoding the type of a function into the name (so-called Hungarian
notation) is brain damaged - the compiler knows the types anyway and can
@@ -260,9 +296,9 @@ check those, and it only confuses the programmer. No wonder MicroSoft
makes buggy programs.
LOCAL variable names should be short, and to the point. If you have
-some random integer loop counter, it should probably be called "i".
-Calling it "loop_counter" is non-productive, if there is no chance of it
-being mis-understood. Similarly, "tmp" can be just about any type of
+some random integer loop counter, it should probably be called ``i``.
+Calling it ``loop_counter`` is non-productive, if there is no chance of it
+being mis-understood. Similarly, ``tmp`` can be just about any type of
variable that is used to hold a temporary value.
If you are afraid to mix up your local variable names, you have another
@@ -270,59 +306,69 @@ problem, which is called the function-growth-hormone-imbalance syndrome.
See chapter 6 (Functions).
- Chapter 5: Typedefs
+5) Typedefs
+-----------
+
+Please don't use things like ``vps_t``.
+It's a **mistake** to use typedef for structures and pointers. When you see a
+
+.. code-block:: c
-Please don't use things like "vps_t".
-It's a _mistake_ to use typedef for structures and pointers. When you see a
vps_t a;
in the source, what does it mean?
In contrast, if it says
+.. code-block:: c
+
struct virtual_container *a;
-you can actually tell what "a" is.
+you can actually tell what ``a`` is.
-Lots of people think that typedefs "help readability". Not so. They are
+Lots of people think that typedefs ``help readability``. Not so. They are
useful only for:
- (a) totally opaque objects (where the typedef is actively used to _hide_
+ (a) totally opaque objects (where the typedef is actively used to **hide**
what the object is).
- Example: "pte_t" etc. opaque objects that you can only access using
+ Example: ``pte_t`` etc. opaque objects that you can only access using
the proper accessor functions.
- NOTE! Opaqueness and "accessor functions" are not good in themselves.
- The reason we have them for things like pte_t etc. is that there
- really is absolutely _zero_ portably accessible information there.
+ .. note::
+
+ Opaqueness and ``accessor functions`` are not good in themselves.
+ The reason we have them for things like pte_t etc. is that there
+ really is absolutely **zero** portably accessible information there.
- (b) Clear integer types, where the abstraction _helps_ avoid confusion
- whether it is "int" or "long".
+ (b) Clear integer types, where the abstraction **helps** avoid confusion
+ whether it is ``int`` or ``long``.
u8/u16/u32 are perfectly fine typedefs, although they fit into
category (d) better than here.
- NOTE! Again - there needs to be a _reason_ for this. If something is
- "unsigned long", then there's no reason to do
+ .. note::
+
+ Again - there needs to be a **reason** for this. If something is
+ ``unsigned long``, then there's no reason to do
typedef unsigned long myflags_t;
but if there is a clear reason for why it under certain circumstances
- might be an "unsigned int" and under other configurations might be
- "unsigned long", then by all means go ahead and use a typedef.
+ might be an ``unsigned int`` and under other configurations might be
+ ``unsigned long``, then by all means go ahead and use a typedef.
- (c) when you use sparse to literally create a _new_ type for
+ (c) when you use sparse to literally create a **new** type for
type-checking.
(d) New types which are identical to standard C99 types, in certain
exceptional circumstances.
Although it would only take a short amount of time for the eyes and
- brain to become accustomed to the standard types like 'uint32_t',
+ brain to become accustomed to the standard types like ``uint32_t``,
some people object to their use anyway.
- Therefore, the Linux-specific 'u8/u16/u32/u64' types and their
+ Therefore, the Linux-specific ``u8/u16/u32/u64`` types and their
signed equivalents which are identical to standard types are
permitted -- although they are not mandatory in new code of your
own.
@@ -333,7 +379,7 @@ useful only for:
(e) Types safe for use in userspace.
In certain structures which are visible to userspace, we cannot
- require C99 types and cannot use the 'u32' form above. Thus, we
+ require C99 types and cannot use the ``u32`` form above. Thus, we
use __u32 and similar types in all structures which are shared
with userspace.
@@ -341,10 +387,11 @@ Maybe there are other cases too, but the rule should basically be to NEVER
EVER use a typedef unless you can clearly match one of those rules.
In general, a pointer, or a struct that has elements that can reasonably
-be directly accessed should _never_ be a typedef.
+be directly accessed should **never** be a typedef.
- Chapter 6: Functions
+6) Functions
+------------
Functions should be short and sweet, and do just one thing. They should
fit on one or two screenfuls of text (the ISO/ANSI screen size is 80x24,
@@ -372,8 +419,10 @@ and it gets confused. You know you're brilliant, but maybe you'd like
to understand what you did 2 weeks from now.
In source files, separate functions with one blank line. If the function is
-exported, the EXPORT* macro for it should follow immediately after the closing
-function brace line. E.g.:
+exported, the **EXPORT** macro for it should follow immediately after the
+closing function brace line. E.g.:
+
+.. code-block:: c
int system_is_up(void)
{
@@ -386,7 +435,8 @@ Although this is not required by the C language, it is preferred in Linux
because it is a simple way to add valuable information for the reader.
- Chapter 7: Centralized exiting of functions
+7) Centralized exiting of functions
+-----------------------------------
Albeit deprecated by some people, the equivalent of the goto statement is
used frequently by compilers in form of the unconditional jump instruction.
@@ -396,18 +446,21 @@ locations and some common work such as cleanup has to be done. If there is no
cleanup needed then just return directly.
Choose label names which say what the goto does or why the goto exists. An
-example of a good name could be "out_buffer:" if the goto frees "buffer". Avoid
-using GW-BASIC names like "err1:" and "err2:". Also don't name them after the
-goto location like "err_kmalloc_failed:"
+example of a good name could be ``out_free_buffer:`` if the goto frees ``buffer``.
+Avoid using GW-BASIC names like ``err1:`` and ``err2:``, as you would have to
+renumber them if you ever add or remove exit paths, and they make correctness
+difficult to verify anyway.
The rationale for using gotos is:
- unconditional statements are easier to understand and follow
- nesting is reduced
- errors by not updating individual exit points when making
- modifications are prevented
+ modifications are prevented
- saves the compiler work to optimize redundant code away ;)
+.. code-block:: c
+
int fun(int a)
{
int result = 0;
@@ -425,27 +478,41 @@ The rationale for using gotos is:
goto out_buffer;
}
...
- out_buffer:
+ out_free_buffer:
kfree(buffer);
return result;
}
-A common type of bug to be aware of is "one err bugs" which look like this:
+A common type of bug to be aware of is ``one err bugs`` which look like this:
+
+.. code-block:: c
err:
kfree(foo->bar);
kfree(foo);
return ret;
-The bug in this code is that on some exit paths "foo" is NULL. Normally the
-fix for this is to split it up into two error labels "err_bar:" and "err_foo:".
+The bug in this code is that on some exit paths ``foo`` is NULL. Normally the
+fix for this is to split it up into two error labels ``err_free_bar:`` and
+``err_free_foo:``:
+.. code-block:: c
- Chapter 8: Commenting
+ err_free_bar:
+ kfree(foo->bar);
+ err_free_foo:
+ kfree(foo);
+ return ret;
+
+Ideally you should simulate errors to test all exit paths.
+
+
+8) Commenting
+-------------
Comments are good, but there is also a danger of over-commenting. NEVER
try to explain HOW your code works in a comment: it's much better to
-write the code so that the _working_ is obvious, and it's a waste of
+write the code so that the **working** is obvious, and it's a waste of
time to explain badly written code.
Generally, you want your comments to tell WHAT your code does, not HOW.
@@ -461,11 +528,10 @@ When commenting the kernel API functions, please use the kernel-doc format.
See the files Documentation/kernel-documentation.rst and scripts/kernel-doc
for details.
-Linux style for comments is the C89 "/* ... */" style.
-Don't use C99-style "// ..." comments.
-
The preferred style for long (multi-line) comments is:
+.. code-block:: c
+
/*
* This is the preferred style for multi-line
* comments in the Linux kernel source code.
@@ -478,6 +544,8 @@ The preferred style for long (multi-line) comments is:
For files in net/ and drivers/net/ the preferred style for long (multi-line)
comments is a little different.
+.. code-block:: c
+
/* The preferred comment style for files in net/ and drivers/net
* looks like this.
*
@@ -491,10 +559,11 @@ multiple data declarations). This leaves you room for a small comment on each
item, explaining its use.
- Chapter 9: You've made a mess of it
+9) You've made a mess of it
+---------------------------
That's OK, we all do. You've probably been told by your long-time Unix
-user helper that "GNU emacs" automatically formats the C sources for
+user helper that ``GNU emacs`` automatically formats the C sources for
you, and you've noticed that yes, it does do that, but the defaults it
uses are less than desirable (in fact, they are worse than random
typing - an infinite number of monkeys typing into GNU emacs would never
@@ -503,63 +572,66 @@ make a good program).
So, you can either get rid of GNU emacs, or change it to use saner
values. To do the latter, you can stick the following in your .emacs file:
-(defun c-lineup-arglist-tabs-only (ignored)
- "Line up argument lists by tabs, not spaces"
- (let* ((anchor (c-langelem-pos c-syntactic-element))
- (column (c-langelem-2nd-pos c-syntactic-element))
- (offset (- (1+ column) anchor))
- (steps (floor offset c-basic-offset)))
- (* (max steps 1)
- c-basic-offset)))
-
-(add-hook 'c-mode-common-hook
- (lambda ()
- ;; Add kernel style
- (c-add-style
- "linux-tabs-only"
- '("linux" (c-offsets-alist
- (arglist-cont-nonempty
- c-lineup-gcc-asm-reg
- c-lineup-arglist-tabs-only))))))
-
-(add-hook 'c-mode-hook
- (lambda ()
- (let ((filename (buffer-file-name)))
- ;; Enable kernel mode for the appropriate files
- (when (and filename
- (string-match (expand-file-name "~/src/linux-trees")
- filename))
- (setq indent-tabs-mode t)
- (setq show-trailing-whitespace t)
- (c-set-style "linux-tabs-only")))))
+.. code-block:: none
+
+ (defun c-lineup-arglist-tabs-only (ignored)
+ "Line up argument lists by tabs, not spaces"
+ (let* ((anchor (c-langelem-pos c-syntactic-element))
+ (column (c-langelem-2nd-pos c-syntactic-element))
+ (offset (- (1+ column) anchor))
+ (steps (floor offset c-basic-offset)))
+ (* (max steps 1)
+ c-basic-offset)))
+
+ (add-hook 'c-mode-common-hook
+ (lambda ()
+ ;; Add kernel style
+ (c-add-style
+ "linux-tabs-only"
+ '("linux" (c-offsets-alist
+ (arglist-cont-nonempty
+ c-lineup-gcc-asm-reg
+ c-lineup-arglist-tabs-only))))))
+
+ (add-hook 'c-mode-hook
+ (lambda ()
+ (let ((filename (buffer-file-name)))
+ ;; Enable kernel mode for the appropriate files
+ (when (and filename
+ (string-match (expand-file-name "~/src/linux-trees")
+ filename))
+ (setq indent-tabs-mode t)
+ (setq show-trailing-whitespace t)
+ (c-set-style "linux-tabs-only")))))
This will make emacs go better with the kernel coding style for C
-files below ~/src/linux-trees.
+files below ``~/src/linux-trees``.
But even if you fail in getting emacs to do sane formatting, not
-everything is lost: use "indent".
+everything is lost: use ``indent``.
Now, again, GNU indent has the same brain-dead settings that GNU emacs
has, which is why you need to give it a few command line options.
However, that's not too bad, because even the makers of GNU indent
recognize the authority of K&R (the GNU people aren't evil, they are
just severely misguided in this matter), so you just give indent the
-options "-kr -i8" (stands for "K&R, 8 character indents"), or use
-"scripts/Lindent", which indents in the latest style.
+options ``-kr -i8`` (stands for ``K&R, 8 character indents``), or use
+``scripts/Lindent``, which indents in the latest style.
-"indent" has a lot of options, and especially when it comes to comment
+``indent`` has a lot of options, and especially when it comes to comment
re-formatting you may want to take a look at the man page. But
-remember: "indent" is not a fix for bad programming.
+remember: ``indent`` is not a fix for bad programming.
- Chapter 10: Kconfig configuration files
+10) Kconfig configuration files
+-------------------------------
For all of the Kconfig* configuration files throughout the source tree,
-the indentation is somewhat different. Lines under a "config" definition
+the indentation is somewhat different. Lines under a ``config`` definition
are indented with one tab, while help text is indented an additional two
-spaces. Example:
+spaces. Example::
-config AUDIT
+ config AUDIT
bool "Auditing support"
depends on NET
help
@@ -569,9 +641,9 @@ config AUDIT
auditing without CONFIG_AUDITSYSCALL.
Seriously dangerous features (such as write support for certain
-filesystems) should advertise this prominently in their prompt string:
+filesystems) should advertise this prominently in their prompt string::
-config ADFS_FS_RW
+ config ADFS_FS_RW
bool "ADFS write support (DANGEROUS)"
depends on ADFS_FS
...
@@ -580,41 +652,45 @@ For full documentation on the configuration files, see the file
Documentation/kbuild/kconfig-language.txt.
- Chapter 11: Data structures
+11) Data structures
+-------------------
Data structures that have visibility outside the single-threaded
environment they are created and destroyed in should always have
reference counts. In the kernel, garbage collection doesn't exist (and
outside the kernel garbage collection is slow and inefficient), which
-means that you absolutely _have_ to reference count all your uses.
+means that you absolutely **have** to reference count all your uses.
Reference counting means that you can avoid locking, and allows multiple
users to have access to the data structure in parallel - and not having
to worry about the structure suddenly going away from under them just
because they slept or did something else for a while.
-Note that locking is _not_ a replacement for reference counting.
+Note that locking is **not** a replacement for reference counting.
Locking is used to keep data structures coherent, while reference
counting is a memory management technique. Usually both are needed, and
they are not to be confused with each other.
Many data structures can indeed have two levels of reference counting,
-when there are users of different "classes". The subclass count counts
+when there are users of different ``classes``. The subclass count counts
the number of subclass users, and decrements the global count just once
when the subclass count goes to zero.
-Examples of this kind of "multi-level-reference-counting" can be found in
-memory management ("struct mm_struct": mm_users and mm_count), and in
-filesystem code ("struct super_block": s_count and s_active).
+Examples of this kind of ``multi-level-reference-counting`` can be found in
+memory management (``struct mm_struct``: mm_users and mm_count), and in
+filesystem code (``struct super_block``: s_count and s_active).
Remember: if another thread can find your data structure, and you don't
have a reference count on it, you almost certainly have a bug.
- Chapter 12: Macros, Enums and RTL
+12) Macros, Enums and RTL
+-------------------------
Names of macros defining constants and labels in enums are capitalized.
+.. code-block:: c
+
#define CONSTANT 0x12345
Enums are preferred when defining several related constants.
@@ -626,7 +702,9 @@ Generally, inline functions are preferable to macros resembling functions.
Macros with multiple statements should be enclosed in a do - while block:
- #define macrofun(a, b, c) \
+.. code-block:: c
+
+ #define macrofun(a, b, c) \
do { \
if (a == 5) \
do_this(b, c); \
@@ -636,17 +714,21 @@ Things to avoid when using macros:
1) macros that affect control flow:
+.. code-block:: c
+
#define FOO(x) \
do { \
if (blah(x) < 0) \
return -EBUGGERED; \
} while (0)
-is a _very_ bad idea. It looks like a function call but exits the "calling"
+is a **very** bad idea. It looks like a function call but exits the ``calling``
function; don't break the internal parsers of those who will read the code.
2) macros that depend on having a local variable with a magic name:
+.. code-block:: c
+
#define FOO(val) bar(index, val)
might look like a good thing, but it's confusing as hell when one reads the
@@ -659,18 +741,22 @@ bite you if somebody e.g. turns FOO into an inline function.
must enclose the expression in parentheses. Beware of similar issues with
macros using parameters.
+.. code-block:: c
+
#define CONSTANT 0x4000
#define CONSTEXP (CONSTANT | 3)
5) namespace collisions when defining local variables in macros resembling
functions:
-#define FOO(x) \
-({ \
- typeof(x) ret; \
- ret = calc_ret(x); \
- (ret); \
-})
+.. code-block:: c
+
+ #define FOO(x) \
+ ({ \
+ typeof(x) ret; \
+ ret = calc_ret(x); \
+ (ret); \
+ })
ret is a common name for a local variable - __foo_ret is less likely
to collide with an existing variable.
@@ -679,11 +765,12 @@ The cpp manual deals with macros exhaustively. The gcc internals manual also
covers RTL which is used frequently with assembly language in the kernel.
- Chapter 13: Printing kernel messages
+13) Printing kernel messages
+----------------------------
Kernel developers like to be seen as literate. Do mind the spelling
of kernel messages to make a good impression. Do not use crippled
-words like "dont"; use "do not" or "don't" instead. Make the messages
+words like ``dont``; use ``do not`` or ``don't`` instead. Make the messages
concise, clear, and unambiguous.
Kernel messages do not have to be terminated with a period.
@@ -713,7 +800,8 @@ already inside a debug-related #ifdef section, printk(KERN_DEBUG ...) can be
used.
- Chapter 14: Allocating memory
+14) Allocating memory
+---------------------
The kernel provides the following general purpose memory allocators:
kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc(), and
@@ -722,6 +810,8 @@ about them.
The preferred form for passing a size of a struct is the following:
+.. code-block:: c
+
p = kmalloc(sizeof(*p), ...);
The alternative form where struct name is spelled out hurts readability and
@@ -734,20 +824,25 @@ language.
The preferred form for allocating an array is the following:
+.. code-block:: c
+
p = kmalloc_array(n, sizeof(...), ...);
The preferred form for allocating a zeroed array is the following:
+.. code-block:: c
+
p = kcalloc(n, sizeof(...), ...);
Both forms check for overflow on the allocation size n * sizeof(...),
and return NULL if that occurred.
- Chapter 15: The inline disease
+15) The inline disease
+----------------------
There appears to be a common misperception that gcc has a magic "make me
-faster" speedup option called "inline". While the use of inlines can be
+faster" speedup option called ``inline``. While the use of inlines can be
appropriate (for example as a means of replacing macros, see Chapter 12), it
very often is not. Abundant use of the inline keyword leads to a much bigger
kernel, which in turn slows the system as a whole down, due to a bigger
@@ -771,26 +866,27 @@ appears outweighs the potential value of the hint that tells gcc to do
something it would have done anyway.
- Chapter 16: Function return values and names
+16) Function return values and names
+------------------------------------
Functions can return values of many different kinds, and one of the
most common is a value indicating whether the function succeeded or
failed. Such a value can be represented as an error-code integer
-(-Exxx = failure, 0 = success) or a "succeeded" boolean (0 = failure,
+(-Exxx = failure, 0 = success) or a ``succeeded`` boolean (0 = failure,
non-zero = success).
Mixing up these two sorts of representations is a fertile source of
difficult-to-find bugs. If the C language included a strong distinction
between integers and booleans then the compiler would find these mistakes
for us... but it doesn't. To help prevent such bugs, always follow this
-convention:
+convention::
If the name of a function is an action or an imperative command,
the function should return an error-code integer. If the name
is a predicate, the function should return a "succeeded" boolean.
-For example, "add work" is a command, and the add_work() function returns 0
-for success or -EBUSY for failure. In the same way, "PCI device present" is
+For example, ``add work`` is a command, and the add_work() function returns 0
+for success or -EBUSY for failure. In the same way, ``PCI device present`` is
a predicate, and the pci_dev_present() function returns 1 if it succeeds in
finding a matching device or 0 if it doesn't.
@@ -805,17 +901,22 @@ result. Typical examples would be functions that return pointers; they use
NULL or the ERR_PTR mechanism to report failure.
- Chapter 17: Don't re-invent the kernel macros
+17) Don't re-invent the kernel macros
+-------------------------------------
The header file include/linux/kernel.h contains a number of macros that
you should use, rather than explicitly coding some variant of them yourself.
For example, if you need to calculate the length of an array, take advantage
of the macro
+.. code-block:: c
+
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
Similarly, if you need to calculate the size of some structure member, use
+.. code-block:: c
+
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
There are also min() and max() macros that do strict type checking if you
@@ -823,16 +924,21 @@ need them. Feel free to peruse that header file to see what else is already
defined that you shouldn't reproduce in your code.
- Chapter 18: Editor modelines and other cruft
+18) Editor modelines and other cruft
+------------------------------------
Some editors can interpret configuration information embedded in source files,
indicated with special markers. For example, emacs interprets lines marked
like this:
+.. code-block:: c
+
-*- mode: c -*-
Or like this:
+.. code-block:: c
+
/*
Local Variables:
compile-command: "gcc -DMAGIC_DEBUG_FLAG foo.c"
@@ -841,6 +947,8 @@ Or like this:
Vim interprets markers that look like this:
+.. code-block:: c
+
/* vim:set sw=8 noet */
Do not include any of these in source files. People have their own personal
@@ -850,7 +958,8 @@ own custom mode, or may have some other magic method for making indentation
work correctly.
- Chapter 19: Inline assembly
+19) Inline assembly
+-------------------
In architecture-specific code, you may need to use inline assembly to interface
with CPU or platform functionality. Don't hesitate to do so when necessary.
@@ -863,7 +972,7 @@ that inline assembly can use C parameters.
Large, non-trivial assembly functions should go in .S files, with corresponding
C prototypes defined in C header files. The C prototypes for assembly
-functions should use "asmlinkage".
+functions should use ``asmlinkage``.
You may need to mark your asm statement as volatile, to prevent GCC from
removing it if GCC doesn't notice any side effects. You don't always need to
@@ -874,12 +983,15 @@ instructions, put each instruction on a separate line in a separate quoted
string, and end each string except the last with \n\t to properly indent the
next instruction in the assembly output:
+.. code-block:: c
+
asm ("magic %reg1, #42\n\t"
"more_magic %reg2, %reg3"
: /* outputs */ : /* inputs */ : /* clobbers */);
- Chapter 20: Conditional Compilation
+20) Conditional Compilation
+---------------------------
Wherever possible, don't use preprocessor conditionals (#if, #ifdef) in .c
files; doing so makes code harder to read and logic harder to follow. Instead,
@@ -903,6 +1015,8 @@ unused, delete it.)
Within code, where possible, use the IS_ENABLED macro to convert a Kconfig
symbol into a C boolean expression, and use it in a normal C conditional:
+.. code-block:: c
+
if (IS_ENABLED(CONFIG_SOMETHING)) {
...
}
@@ -918,12 +1032,15 @@ At the end of any non-trivial #if or #ifdef block (more than a few lines),
place a comment after the #endif on the same line, noting the conditional
expression used. For instance:
+.. code-block:: c
+
#ifdef CONFIG_SOMETHING
...
#endif /* CONFIG_SOMETHING */
- Appendix I: References
+Appendix I) References
+----------------------
The C Programming Language, Second Edition
by Brian W. Kernighan and Dennis M. Ritchie.
@@ -943,4 +1060,3 @@ language C, URL: http://www.open-std.org/JTC1/SC22/WG14/
Kernel CodingStyle, by greg@kroah.com at OLS 2002:
http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
-
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 781024ef9050..979228bc9035 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -699,7 +699,7 @@ to use the dma_sync_*() interfaces.
dma_addr_t mapping;
mapping = dma_map_single(cp->dev, buffer, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(cp->dev, dma_handle)) {
+ if (dma_mapping_error(cp->dev, mapping)) {
/*
* reduce current DMA mapping usage,
* delay and try again later or
@@ -931,10 +931,8 @@ to "Closing".
1) Struct scatterlist requirements.
- Don't invent the architecture specific struct scatterlist; just use
- <asm-generic/scatterlist.h>. You need to enable
- CONFIG_NEED_SG_DMA_LENGTH if the architecture supports IOMMUs
- (including software IOMMU).
+ You need to enable CONFIG_NEED_SG_DMA_LENGTH if the architecture
+ supports IOMMUs (including software IOMMU).
2) ARCH_DMA_MINALIGN
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 1d26eeb6b5f6..6b20128fab8a 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -277,14 +277,26 @@ and <size> parameters are provided to do partial page mapping, it is
recommended that you never use these unless you really know what the
cache width is.
+dma_addr_t
+dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+
+void
+dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+
+API for mapping and unmapping for MMIO resources. All the notes and
+warnings for the other mapping APIs apply here. The API should only be
+used to map device MMIO resources, mapping of RAM is not permitted.
+
int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-In some circumstances dma_map_single() and dma_map_page() will fail to create
-a mapping. A driver can check for these errors by testing the returned
-DMA address with dma_mapping_error(). A non-zero return value means the mapping
-could not be created and the driver should take appropriate action (e.g.
-reduce current DMA mapping usage or delay and try again later).
+In some circumstances dma_map_single(), dma_map_page() and dma_map_resource()
+will fail to create a mapping. A driver can check for these errors by testing
+the returned DMA address with dma_mapping_error(). A non-zero return value
+means the mapping could not be created and the driver should take appropriate
+action (e.g. reduce current DMA mapping usage or delay and try again later).
int
dma_map_sg(struct device *dev, struct scatterlist *sg,
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 64460a897f56..736f5916daea 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -6,7 +6,7 @@
# To add a new book the only step required is to add the book to the
# list of DOCBOOKS.
-DOCBOOKS := z8530book.xml device-drivers.xml \
+DOCBOOKS := z8530book.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
writing_usb_driver.xml networking.xml \
kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
@@ -22,9 +22,15 @@ ifeq ($(DOCBOOKS),)
# Skip DocBook build if the user explicitly requested no DOCBOOKS.
.DEFAULT:
@echo " SKIP DocBook $@ target (DOCBOOKS=\"\" specified)."
+else
+ifneq ($(SPHINXDIRS),)
+# Skip DocBook build if the user explicitly requested a sphinx dir
+.DEFAULT:
+ @echo " SKIP DocBook $@ target (SPHINXDIRS specified)."
else
+
###
# The build process is as follows (targets):
# (xmldocs) [by docproc]
@@ -66,6 +72,7 @@ installmandocs: mandocs
# no-op for the DocBook toolchain
epubdocs:
+latexdocs:
###
#External programs used
@@ -221,6 +228,7 @@ silent_gen_xml = :
echo "</programlisting>") > $@
endif # DOCBOOKS=""
+endif # SPHINDIR=...
###
# Help targets as used by the top-level makefile
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
deleted file mode 100644
index 9c10030eb2be..000000000000
--- a/Documentation/DocBook/device-drivers.tmpl
+++ /dev/null
@@ -1,521 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="LinuxDriversAPI">
- <bookinfo>
- <title>Linux Device Drivers</title>
-
- <legalnotice>
- <para>
- This documentation is free software; you can redistribute
- it and/or modify it under the terms of the GNU General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later
- version.
- </para>
-
- <para>
- This program is distributed in the hope that it will be
- useful, but WITHOUT ANY WARRANTY; without even the implied
- warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- </para>
-
- <para>
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- </para>
-
- <para>
- For more details see the file COPYING in the source
- distribution of Linux.
- </para>
- </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
- <chapter id="Basics">
- <title>Driver Basics</title>
- <sect1><title>Driver Entry and Exit points</title>
-!Iinclude/linux/init.h
- </sect1>
-
- <sect1><title>Atomic and pointer manipulation</title>
-!Iarch/x86/include/asm/atomic.h
- </sect1>
-
- <sect1><title>Delaying, scheduling, and timer routines</title>
-!Iinclude/linux/sched.h
-!Ekernel/sched/core.c
-!Ikernel/sched/cpupri.c
-!Ikernel/sched/fair.c
-!Iinclude/linux/completion.h
-!Ekernel/time/timer.c
- </sect1>
- <sect1><title>Wait queues and Wake events</title>
-!Iinclude/linux/wait.h
-!Ekernel/sched/wait.c
- </sect1>
- <sect1><title>High-resolution timers</title>
-!Iinclude/linux/ktime.h
-!Iinclude/linux/hrtimer.h
-!Ekernel/time/hrtimer.c
- </sect1>
- <sect1><title>Workqueues and Kevents</title>
-!Iinclude/linux/workqueue.h
-!Ekernel/workqueue.c
- </sect1>
- <sect1><title>Internal Functions</title>
-!Ikernel/exit.c
-!Ikernel/signal.c
-!Iinclude/linux/kthread.h
-!Ekernel/kthread.c
- </sect1>
-
- <sect1><title>Kernel objects manipulation</title>
-<!--
-X!Iinclude/linux/kobject.h
--->
-!Elib/kobject.c
- </sect1>
-
- <sect1><title>Kernel utility functions</title>
-!Iinclude/linux/kernel.h
-!Ekernel/printk/printk.c
-!Ekernel/panic.c
-!Ekernel/sys.c
-!Ekernel/rcu/srcu.c
-!Ekernel/rcu/tree.c
-!Ekernel/rcu/tree_plugin.h
-!Ekernel/rcu/update.c
- </sect1>
-
- <sect1><title>Device Resource Management</title>
-!Edrivers/base/devres.c
- </sect1>
-
- </chapter>
-
- <chapter id="devdrivers">
- <title>Device drivers infrastructure</title>
- <sect1><title>The Basic Device Driver-Model Structures </title>
-!Iinclude/linux/device.h
- </sect1>
- <sect1><title>Device Drivers Base</title>
-!Idrivers/base/init.c
-!Edrivers/base/driver.c
-!Edrivers/base/core.c
-!Edrivers/base/syscore.c
-!Edrivers/base/class.c
-!Idrivers/base/node.c
-!Edrivers/base/firmware_class.c
-!Edrivers/base/transport_class.c
-<!-- Cannot be included, because
- attribute_container_add_class_device_adapter
- and attribute_container_classdev_to_container
- exceed allowed 44 characters maximum
-X!Edrivers/base/attribute_container.c
--->
-!Edrivers/base/dd.c
-<!--
-X!Edrivers/base/interface.c
--->
-!Iinclude/linux/platform_device.h
-!Edrivers/base/platform.c
-!Edrivers/base/bus.c
- </sect1>
- <sect1>
- <title>Buffer Sharing and Synchronization</title>
- <para>
- The dma-buf subsystem provides the framework for sharing buffers
- for hardware (DMA) access across multiple device drivers and
- subsystems, and for synchronizing asynchronous hardware access.
- </para>
- <para>
- This is used, for example, by drm "prime" multi-GPU support, but
- is of course not limited to GPU use cases.
- </para>
- <para>
- The three main components of this are: (1) dma-buf, representing
- a sg_table and exposed to userspace as a file descriptor to allow
- passing between devices, (2) fence, which provides a mechanism
- to signal when one device as finished access, and (3) reservation,
- which manages the shared or exclusive fence(s) associated with
- the buffer.
- </para>
- <sect2><title>dma-buf</title>
-!Edrivers/dma-buf/dma-buf.c
-!Iinclude/linux/dma-buf.h
- </sect2>
- <sect2><title>reservation</title>
-!Pdrivers/dma-buf/reservation.c Reservation Object Overview
-!Edrivers/dma-buf/reservation.c
-!Iinclude/linux/reservation.h
- </sect2>
- <sect2><title>fence</title>
-!Edrivers/dma-buf/fence.c
-!Iinclude/linux/fence.h
-!Edrivers/dma-buf/seqno-fence.c
-!Iinclude/linux/seqno-fence.h
-!Edrivers/dma-buf/fence-array.c
-!Iinclude/linux/fence-array.h
-!Edrivers/dma-buf/reservation.c
-!Iinclude/linux/reservation.h
-!Edrivers/dma-buf/sync_file.c
-!Iinclude/linux/sync_file.h
- </sect2>
- </sect1>
- <sect1><title>Device Drivers DMA Management</title>
-!Edrivers/base/dma-coherent.c
-!Edrivers/base/dma-mapping.c
- </sect1>
- <sect1><title>Device Drivers Power Management</title>
-!Edrivers/base/power/main.c
- </sect1>
- <sect1><title>Device Drivers ACPI Support</title>
-<!-- Internal functions only
-X!Edrivers/acpi/sleep/main.c
-X!Edrivers/acpi/sleep/wakeup.c
-X!Edrivers/acpi/motherboard.c
-X!Edrivers/acpi/bus.c
--->
-!Edrivers/acpi/scan.c
-!Idrivers/acpi/scan.c
-<!-- No correct structured comments
-X!Edrivers/acpi/pci_bind.c
--->
- </sect1>
- <sect1><title>Device drivers PnP support</title>
-!Idrivers/pnp/core.c
-<!-- No correct structured comments
-X!Edrivers/pnp/system.c
- -->
-!Edrivers/pnp/card.c
-!Idrivers/pnp/driver.c
-!Edrivers/pnp/manager.c
-!Edrivers/pnp/support.c
- </sect1>
- <sect1><title>Userspace IO devices</title>
-!Edrivers/uio/uio.c
-!Iinclude/linux/uio_driver.h
- </sect1>
- </chapter>
-
- <chapter id="parportdev">
- <title>Parallel Port Devices</title>
-!Iinclude/linux/parport.h
-!Edrivers/parport/ieee1284.c
-!Edrivers/parport/share.c
-!Idrivers/parport/daisy.c
- </chapter>
-
- <chapter id="message_devices">
- <title>Message-based devices</title>
- <sect1><title>Fusion message devices</title>
-!Edrivers/message/fusion/mptbase.c
-!Idrivers/message/fusion/mptbase.c
-!Edrivers/message/fusion/mptscsih.c
-!Idrivers/message/fusion/mptscsih.c
-!Idrivers/message/fusion/mptctl.c
-!Idrivers/message/fusion/mptspi.c
-!Idrivers/message/fusion/mptfc.c
-!Idrivers/message/fusion/mptlan.c
- </sect1>
- </chapter>
-
- <chapter id="snddev">
- <title>Sound Devices</title>
-!Iinclude/sound/core.h
-!Esound/sound_core.c
-!Iinclude/sound/pcm.h
-!Esound/core/pcm.c
-!Esound/core/device.c
-!Esound/core/info.c
-!Esound/core/rawmidi.c
-!Esound/core/sound.c
-!Esound/core/memory.c
-!Esound/core/pcm_memory.c
-!Esound/core/init.c
-!Esound/core/isadma.c
-!Esound/core/control.c
-!Esound/core/pcm_lib.c
-!Esound/core/hwdep.c
-!Esound/core/pcm_native.c
-!Esound/core/memalloc.c
-<!-- FIXME: Removed for now since no structured comments in source
-X!Isound/sound_firmware.c
--->
- </chapter>
-
-
- <chapter id="uart16x50">
- <title>16x50 UART Driver</title>
-!Edrivers/tty/serial/serial_core.c
-!Edrivers/tty/serial/8250/8250_core.c
- </chapter>
-
- <chapter id="fbdev">
- <title>Frame Buffer Library</title>
-
- <para>
- The frame buffer drivers depend heavily on four data structures.
- These structures are declared in include/linux/fb.h. They are
- fb_info, fb_var_screeninfo, fb_fix_screeninfo and fb_monospecs.
- The last three can be made available to and from userland.
- </para>
-
- <para>
- fb_info defines the current state of a particular video card.
- Inside fb_info, there exists a fb_ops structure which is a
- collection of needed functions to make fbdev and fbcon work.
- fb_info is only visible to the kernel.
- </para>
-
- <para>
- fb_var_screeninfo is used to describe the features of a video card
- that are user defined. With fb_var_screeninfo, things such as
- depth and the resolution may be defined.
- </para>
-
- <para>
- The next structure is fb_fix_screeninfo. This defines the
- properties of a card that are created when a mode is set and can't
- be changed otherwise. A good example of this is the start of the
- frame buffer memory. This "locks" the address of the frame buffer
- memory, so that it cannot be changed or moved.
- </para>
-
- <para>
- The last structure is fb_monospecs. In the old API, there was
- little importance for fb_monospecs. This allowed for forbidden things
- such as setting a mode of 800x600 on a fix frequency monitor. With
- the new API, fb_monospecs prevents such things, and if used
- correctly, can prevent a monitor from being cooked. fb_monospecs
- will not be useful until kernels 2.5.x.
- </para>
-
- <sect1><title>Frame Buffer Memory</title>
-!Edrivers/video/fbdev/core/fbmem.c
- </sect1>
-<!--
- <sect1><title>Frame Buffer Console</title>
-X!Edrivers/video/console/fbcon.c
- </sect1>
--->
- <sect1><title>Frame Buffer Colormap</title>
-!Edrivers/video/fbdev/core/fbcmap.c
- </sect1>
-<!-- FIXME:
- drivers/video/fbgen.c has no docs, which stuffs up the sgml. Comment
- out until somebody adds docs. KAO
- <sect1><title>Frame Buffer Generic Functions</title>
-X!Idrivers/video/fbgen.c
- </sect1>
-KAO -->
- <sect1><title>Frame Buffer Video Mode Database</title>
-!Idrivers/video/fbdev/core/modedb.c
-!Edrivers/video/fbdev/core/modedb.c
- </sect1>
- <sect1><title>Frame Buffer Macintosh Video Mode Database</title>
-!Edrivers/video/fbdev/macmodes.c
- </sect1>
- <sect1><title>Frame Buffer Fonts</title>
- <para>
- Refer to the file lib/fonts/fonts.c for more information.
- </para>
-<!-- FIXME: Removed for now since no structured comments in source
-X!Ilib/fonts/fonts.c
--->
- </sect1>
- </chapter>
-
- <chapter id="input_subsystem">
- <title>Input Subsystem</title>
- <sect1><title>Input core</title>
-!Iinclude/linux/input.h
-!Edrivers/input/input.c
-!Edrivers/input/ff-core.c
-!Edrivers/input/ff-memless.c
- </sect1>
- <sect1><title>Multitouch Library</title>
-!Iinclude/linux/input/mt.h
-!Edrivers/input/input-mt.c
- </sect1>
- <sect1><title>Polled input devices</title>
-!Iinclude/linux/input-polldev.h
-!Edrivers/input/input-polldev.c
- </sect1>
- <sect1><title>Matrix keyboards/keypads</title>
-!Iinclude/linux/input/matrix_keypad.h
- </sect1>
- <sect1><title>Sparse keymap support</title>
-!Iinclude/linux/input/sparse-keymap.h
-!Edrivers/input/sparse-keymap.c
- </sect1>
- </chapter>
-
- <chapter id="spi">
- <title>Serial Peripheral Interface (SPI)</title>
- <para>
- SPI is the "Serial Peripheral Interface", widely used with
- embedded systems because it is a simple and efficient
- interface: basically a multiplexed shift register.
- Its three signal wires hold a clock (SCK, often in the range
- of 1-20 MHz), a "Master Out, Slave In" (MOSI) data line, and
- a "Master In, Slave Out" (MISO) data line.
- SPI is a full duplex protocol; for each bit shifted out the
- MOSI line (one per clock) another is shifted in on the MISO line.
- Those bits are assembled into words of various sizes on the
- way to and from system memory.
- An additional chipselect line is usually active-low (nCS);
- four signals are normally used for each peripheral, plus
- sometimes an interrupt.
- </para>
- <para>
- The SPI bus facilities listed here provide a generalized
- interface to declare SPI busses and devices, manage them
- according to the standard Linux driver model, and perform
- input/output operations.
- At this time, only "master" side interfaces are supported,
- where Linux talks to SPI peripherals and does not implement
- such a peripheral itself.
- (Interfaces to support implementing SPI slaves would
- necessarily look different.)
- </para>
- <para>
- The programming interface is structured around two kinds of driver,
- and two kinds of device.
- A "Controller Driver" abstracts the controller hardware, which may
- be as simple as a set of GPIO pins or as complex as a pair of FIFOs
- connected to dual DMA engines on the other side of the SPI shift
- register (maximizing throughput). Such drivers bridge between
- whatever bus they sit on (often the platform bus) and SPI, and
- expose the SPI side of their device as a
- <structname>struct spi_master</structname>.
- SPI devices are children of that master, represented as a
- <structname>struct spi_device</structname> and manufactured from
- <structname>struct spi_board_info</structname> descriptors which
- are usually provided by board-specific initialization code.
- A <structname>struct spi_driver</structname> is called a
- "Protocol Driver", and is bound to a spi_device using normal
- driver model calls.
- </para>
- <para>
- The I/O model is a set of queued messages. Protocol drivers
- submit one or more <structname>struct spi_message</structname>
- objects, which are processed and completed asynchronously.
- (There are synchronous wrappers, however.) Messages are
- built from one or more <structname>struct spi_transfer</structname>
- objects, each of which wraps a full duplex SPI transfer.
- A variety of protocol tweaking options are needed, because
- different chips adopt very different policies for how they
- use the bits transferred with SPI.
- </para>
-!Iinclude/linux/spi/spi.h
-!Fdrivers/spi/spi.c spi_register_board_info
-!Edrivers/spi/spi.c
- </chapter>
-
- <chapter id="i2c">
- <title>I<superscript>2</superscript>C and SMBus Subsystem</title>
-
- <para>
- I<superscript>2</superscript>C (or without fancy typography, "I2C")
- is an acronym for the "Inter-IC" bus, a simple bus protocol which is
- widely used where low data rate communications suffice.
- Since it's also a licensed trademark, some vendors use another
- name (such as "Two-Wire Interface", TWI) for the same bus.
- I2C only needs two signals (SCL for clock, SDA for data), conserving
- board real estate and minimizing signal quality issues.
- Most I2C devices use seven bit addresses, and bus speeds of up
- to 400 kHz; there's a high speed extension (3.4 MHz) that's not yet
- found wide use.
- I2C is a multi-master bus; open drain signaling is used to
- arbitrate between masters, as well as to handshake and to
- synchronize clocks from slower clients.
- </para>
-
- <para>
- The Linux I2C programming interfaces support only the master
- side of bus interactions, not the slave side.
- The programming interface is structured around two kinds of driver,
- and two kinds of device.
- An I2C "Adapter Driver" abstracts the controller hardware; it binds
- to a physical device (perhaps a PCI device or platform_device) and
- exposes a <structname>struct i2c_adapter</structname> representing
- each I2C bus segment it manages.
- On each I2C bus segment will be I2C devices represented by a
- <structname>struct i2c_client</structname>. Those devices will
- be bound to a <structname>struct i2c_driver</structname>,
- which should follow the standard Linux driver model.
- (At this writing, a legacy model is more widely used.)
- There are functions to perform various I2C protocol operations; at
- this writing all such functions are usable only from task context.
- </para>
-
- <para>
- The System Management Bus (SMBus) is a sibling protocol. Most SMBus
- systems are also I2C conformant. The electrical constraints are
- tighter for SMBus, and it standardizes particular protocol messages
- and idioms. Controllers that support I2C can also support most
- SMBus operations, but SMBus controllers don't support all the protocol
- options that an I2C controller will.
- There are functions to perform various SMBus protocol operations,
- either using I2C primitives or by issuing SMBus commands to
- i2c_adapter devices which don't support those I2C operations.
- </para>
-
-!Iinclude/linux/i2c.h
-!Fdrivers/i2c/i2c-boardinfo.c i2c_register_board_info
-!Edrivers/i2c/i2c-core.c
- </chapter>
-
- <chapter id="hsi">
- <title>High Speed Synchronous Serial Interface (HSI)</title>
-
- <para>
- High Speed Synchronous Serial Interface (HSI) is a
- serial interface mainly used for connecting application
- engines (APE) with cellular modem engines (CMT) in cellular
- handsets.
-
- HSI provides multiplexing for up to 16 logical channels,
- low-latency and full duplex communication.
- </para>
-
-!Iinclude/linux/hsi/hsi.h
-!Edrivers/hsi/hsi_core.c
- </chapter>
-
- <chapter id="pwm">
- <title>Pulse-Width Modulation (PWM)</title>
- <para>
- Pulse-width modulation is a modulation technique primarily used to
- control power supplied to electrical devices.
- </para>
- <para>
- The PWM framework provides an abstraction for providers and consumers
- of PWM signals. A controller that provides one or more PWM signals is
- registered as <structname>struct pwm_chip</structname>. Providers are
- expected to embed this structure in a driver-specific structure. This
- structure contains fields that describe a particular chip.
- </para>
- <para>
- A chip exposes one or more PWM signal sources, each of which exposed
- as a <structname>struct pwm_device</structname>. Operations can be
- performed on PWM devices to control the period, duty cycle, polarity
- and active state of the signal.
- </para>
- <para>
- Note that PWM devices are exclusive resources: they can always only be
- used by one consumer at a time.
- </para>
-!Iinclude/linux/pwm.h
-!Edrivers/pwm/core.c
- </chapter>
-
-</book>
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 1f345da28ec5..5f042349f987 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -1,5 +1,5 @@
HOWTO do Linux kernel development
----------------------------------
+=================================
This is the be-all, end-all document on this topic. It contains
instructions on how to become a Linux kernel developer and how to learn
@@ -28,6 +28,7 @@ kernel development. Assembly (any architecture) is not required unless
you plan to do low-level development for that architecture. Though they
are not a good substitute for a solid C education and/or years of
experience, the following books are good for, if anything, reference:
+
- "The C Programming Language" by Kernighan and Ritchie [Prentice Hall]
- "Practical C Programming" by Steve Oualline [O'Reilly]
- "C: A Reference Manual" by Harbison and Steele [Prentice Hall]
@@ -64,7 +65,8 @@ people on the mailing lists are not lawyers, and you should not rely on
their statements on legal matters.
For common questions and answers about the GPL, please see:
- http://www.gnu.org/licenses/gpl-faq.html
+
+ https://www.gnu.org/licenses/gpl-faq.html
Documentation
@@ -82,96 +84,118 @@ linux-api@vger.kernel.org.
Here is a list of files that are in the kernel source tree that are
required reading:
+
README
This file gives a short background on the Linux kernel and describes
what is necessary to do to configure and build the kernel. People
who are new to the kernel should start here.
- Documentation/Changes
+ :ref:`Documentation/Changes <changes>`
This file gives a list of the minimum levels of various software
packages that are necessary to build and run the kernel
successfully.
- Documentation/CodingStyle
+ :ref:`Documentation/CodingStyle <codingstyle>`
This describes the Linux kernel coding style, and some of the
rationale behind it. All new code is expected to follow the
guidelines in this document. Most maintainers will only accept
patches if these rules are followed, and many people will only
review code if it is in the proper style.
- Documentation/SubmittingPatches
- Documentation/SubmittingDrivers
+ :ref:`Documentation/SubmittingPatches <submittingpatches>` and :ref:`Documentation/SubmittingDrivers <submittingdrivers>`
These files describe in explicit detail how to successfully create
and send a patch, including (but not limited to):
+
- Email contents
- Email format
- Who to send it to
+
Following these rules will not guarantee success (as all patches are
subject to scrutiny for content and style), but not following them
will almost always prevent it.
Other excellent descriptions of how to create patches properly are:
+
"The Perfect Patch"
- http://www.ozlabs.org/~akpm/stuff/tpp.txt
+ https://www.ozlabs.org/~akpm/stuff/tpp.txt
+
"Linux kernel patch submission format"
http://linux.yyz.us/patch-format.html
- Documentation/stable_api_nonsense.txt
+ :ref:`Documentation/stable_api_nonsense.txt <stable_api_nonsense>`
This file describes the rationale behind the conscious decision to
not have a stable API within the kernel, including things like:
+
- Subsystem shim-layers (for compatibility?)
- Driver portability between Operating Systems.
- Mitigating rapid change within the kernel source tree (or
preventing rapid change)
+
This document is crucial for understanding the Linux development
philosophy and is very important for people moving to Linux from
development on other Operating Systems.
- Documentation/SecurityBugs
+ :ref:`Documentation/SecurityBugs <securitybugs>`
If you feel you have found a security problem in the Linux kernel,
please follow the steps in this document to help notify the kernel
developers, and help solve the issue.
- Documentation/ManagementStyle
+ :ref:`Documentation/ManagementStyle <managementstyle>`
This document describes how Linux kernel maintainers operate and the
shared ethos behind their methodologies. This is important reading
for anyone new to kernel development (or anyone simply curious about
it), as it resolves a lot of common misconceptions and confusion
about the unique behavior of kernel maintainers.
- Documentation/stable_kernel_rules.txt
+ :ref:`Documentation/stable_kernel_rules.txt <stable_kernel_rules>`
This file describes the rules on how the stable kernel releases
happen, and what to do if you want to get a change into one of these
releases.
- Documentation/kernel-docs.txt
+ :ref:`Documentation/kernel-docs.txt <kernel_docs>`
A list of external documentation that pertains to kernel
development. Please consult this list if you do not find what you
are looking for within the in-kernel documentation.
- Documentation/applying-patches.txt
+ :ref:`Documentation/applying-patches.txt <applying_patches>`
A good introduction describing exactly what a patch is and how to
apply it to the different development branches of the kernel.
The kernel also has a large number of documents that can be
-automatically generated from the source code itself. This includes a
+automatically generated from the source code itself or from
+ReStructuredText markups (ReST), like this one. This includes a
full description of the in-kernel API, and rules on how to handle
-locking properly. The documents will be created in the
-Documentation/DocBook/ directory and can be generated as PDF,
-Postscript, HTML, and man pages by running:
+locking properly.
+
+All such documents can be generated as PDF or HTML by running::
+
make pdfdocs
- make psdocs
make htmldocs
- make mandocs
+
respectively from the main kernel source directory.
+The documents that uses ReST markup will be generated at Documentation/output.
+They can also be generated on LaTeX and ePub formats with::
+
+ make latexdocs
+ make epubdocs
+
+Currently, there are some documents written on DocBook that are in
+the process of conversion to ReST. Such documents will be created in the
+Documentation/DocBook/ directory and can be generated also as
+Postscript or man pages by running::
+
+ make psdocs
+ make mandocs
Becoming A Kernel Developer
---------------------------
If you do not know anything about Linux kernel development, you should
look at the Linux KernelNewbies project:
- http://kernelnewbies.org
+
+ https://kernelnewbies.org
+
It consists of a helpful mailing list where you can ask almost any type
of basic kernel development question (make sure to search the archives
first, before asking something that has already been answered in the
@@ -187,7 +211,9 @@ apply a patch.
If you do not know where you want to start, but you want to look for
some task to start doing to join into the kernel development community,
go to the Linux Kernel Janitor's project:
- http://kernelnewbies.org/KernelJanitors
+
+ https://kernelnewbies.org/KernelJanitors
+
It is a great place to start. It describes a list of relatively simple
problems that need to be cleaned up and fixed within the Linux kernel
source tree. Working with the developers in charge of this project, you
@@ -199,7 +225,8 @@ If you already have a chunk of code that you want to put into the kernel
tree, but need some help getting it in the proper form, the
kernel-mentors project was created to help you out with this. It is a
mailing list, and can be found at:
- http://selenic.com/mailman/listinfo/kernel-mentors
+
+ https://selenic.com/mailman/listinfo/kernel-mentors
Before making any actual modifications to the Linux kernel code, it is
imperative to understand how the code in question works. For this
@@ -209,6 +236,7 @@ tools. One such tool that is particularly recommended is the Linux
Cross-Reference project, which is able to present source code in a
self-referential, indexed webpage format. An excellent up-to-date
repository of the kernel code may be found at:
+
http://lxr.free-electrons.com/
@@ -218,6 +246,7 @@ The development process
Linux kernel development process currently consists of a few different
main kernel "branches" and lots of different subsystem-specific kernel
branches. These different branches are:
+
- main 4.x kernel tree
- 4.x.y -stable kernel tree
- 4.x -git kernel patches
@@ -227,14 +256,15 @@ branches. These different branches are:
4.x kernel tree
-----------------
4.x kernels are maintained by Linus Torvalds, and can be found on
-kernel.org in the pub/linux/kernel/v4.x/ directory. Its development
+https://kernel.org in the pub/linux/kernel/v4.x/ directory. Its development
process is as follows:
+
- As soon as a new kernel is released a two weeks window is open,
during this period of time maintainers can submit big diffs to
Linus, usually the patches that have already been included in the
-next kernel for a few weeks. The preferred way to submit big changes
is using git (the kernel's source management tool, more information
- can be found at http://git-scm.com/) but plain patches are also just
+ can be found at https://git-scm.com/) but plain patches are also just
fine.
- After two weeks a -rc1 kernel is released it is now possible to push
only patches that do not include new features that could affect the
@@ -253,9 +283,10 @@ process is as follows:
It is worth mentioning what Andrew Morton wrote on the linux-kernel
mailing list about kernel releases:
- "Nobody knows when a kernel will be released, because it's
+
+ *"Nobody knows when a kernel will be released, because it's
released according to perceived bug status, not according to a
- preconceived timeline."
+ preconceived timeline."*
4.x.y -stable kernel tree
-------------------------
@@ -301,7 +332,7 @@ submission and other already ongoing work are avoided.
Most of these repositories are git trees, but there are also other SCMs
in use, or patch queues being published as quilt series. Addresses of
these subsystem repositories are listed in the MAINTAINERS file. Many
-of them can be browsed at http://git.kernel.org/.
+of them can be browsed at https://git.kernel.org/.
Before a proposed patch is committed to such a subsystem tree, it is
subject to review which primarily happens on mailing lists (see the
@@ -310,7 +341,7 @@ process is tracked with the tool patchwork. Patchwork offers a web
interface which shows patch postings, any comments on a patch or
revisions to it, and maintainers can mark patches as under review,
accepted, or rejected. Most of these patchwork sites are listed at
-http://patchwork.kernel.org/.
+https://patchwork.kernel.org/.
4.x -next kernel tree for integration tests
-------------------------------------------
@@ -318,7 +349,8 @@ Before updates from subsystem trees are merged into the mainline 4.x
tree, they need to be integration-tested. For this purpose, a special
testing repository exists into which virtually all subsystem trees are
pulled on an almost daily basis:
- http://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
+
+ https://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
This way, the -next kernel gives a summary outlook onto what will be
expected to go into the mainline kernel at the next merge period.
@@ -328,10 +360,11 @@ Adventurous testers are very welcome to runtime-test the -next kernel.
Bug Reporting
-------------
-bugzilla.kernel.org is where the Linux kernel developers track kernel
+https://bugzilla.kernel.org is where the Linux kernel developers track kernel
bugs. Users are encouraged to report all bugs that they find in this
tool. For details on how to use the kernel bugzilla, please see:
- http://bugzilla.kernel.org/page.cgi?id=faq.html
+
+ https://bugzilla.kernel.org/page.cgi?id=faq.html
The file REPORTING-BUGS in the main kernel source directory has a good
template for how to report a possible kernel bug, and details what kind
@@ -349,13 +382,14 @@ your skills, and other developers will be aware of your presence. Fixing
bugs is one of the best ways to get merits among other developers, because
not many people like wasting time fixing other people's bugs.
-To work in the already reported bug reports, go to http://bugzilla.kernel.org.
+To work in the already reported bug reports, go to https://bugzilla.kernel.org.
If you want to be advised of the future bug reports, you can subscribe to the
bugme-new mailing list (only new bug reports are mailed here) or to the
bugme-janitor mailing list (every change in the bugzilla is mailed here)
- http://lists.linux-foundation.org/mailman/listinfo/bugme-new
- http://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
+ https://lists.linux-foundation.org/mailman/listinfo/bugme-new
+
+ https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
@@ -365,10 +399,14 @@ Mailing lists
As some of the above documents describe, the majority of the core kernel
developers participate on the Linux Kernel Mailing list. Details on how
to subscribe and unsubscribe from the list can be found at:
+
http://vger.kernel.org/vger-lists.html#linux-kernel
+
There are archives of the mailing list on the web in many different
places. Use a search engine to find these archives. For example:
+
http://dir.gmane.org/gmane.linux.kernel
+
It is highly recommended that you search the archives about the topic
you want to bring up, before you post it to the list. A lot of things
already discussed in detail are only recorded at the mailing list
@@ -381,11 +419,13 @@ groups.
Many of the lists are hosted on kernel.org. Information on them can be
found at:
+
http://vger.kernel.org/vger-lists.html
Please remember to follow good behavioral habits when using the lists.
Though a bit cheesy, the following URL has some simple guidelines for
interacting with the list (or any list):
+
http://www.albion.com/netiquette/
If multiple people respond to your mail, the CC: list of recipients may
@@ -400,13 +440,14 @@ add your statements between the individual quoted sections instead of
writing at the top of the mail.
If you add patches to your mail, make sure they are plain readable text
-as stated in Documentation/SubmittingPatches. Kernel developers don't
-want to deal with attachments or compressed patches; they may want
-to comment on individual lines of your patch, which works only that way.
-Make sure you use a mail program that does not mangle spaces and tab
-characters. A good first test is to send the mail to yourself and try
-to apply your own patch by yourself. If that doesn't work, get your
-mail program fixed or change it until it works.
+as stated in Documentation/SubmittingPatches.
+Kernel developers don't want to deal with
+attachments or compressed patches; they may want to comment on
+individual lines of your patch, which works only that way. Make sure you
+use a mail program that does not mangle spaces and tab characters. A
+good first test is to send the mail to yourself and try to apply your
+own patch by yourself. If that doesn't work, get your mail program fixed
+or change it until it works.
Above all, please remember to show respect to other subscribers.
@@ -418,6 +459,7 @@ The goal of the kernel community is to provide the best possible kernel
there is. When you submit a patch for acceptance, it will be reviewed
on its technical merits and those alone. So, what should you be
expecting?
+
- criticism
- comments
- requests for change
@@ -432,6 +474,7 @@ If there are no responses to your posting, wait a few days and try
again, sometimes things get lost in the huge volume.
What should you not do?
+
- expect your patch to be accepted without question
- become defensive
- ignore comments
@@ -445,8 +488,8 @@ Remember, being wrong is acceptable as long as you are willing to work
toward a solution that is right.
It is normal that the answers to your first patch might simply be a list
-of a dozen things you should correct. This does _not_ imply that your
-patch will not be accepted, and it is _not_ meant against you
+of a dozen things you should correct. This does **not** imply that your
+patch will not be accepted, and it is **not** meant against you
personally. Simply correct all issues raised against your patch and
resend it.
@@ -457,7 +500,9 @@ Differences between the kernel community and corporate structures
The kernel community works differently than most traditional corporate
development environments. Here are a list of things that you can try to
do to avoid problems:
+
Good things to say regarding your proposed changes:
+
- "This solves multiple problems."
- "This deletes 2000 lines of code."
- "Here is a patch that explains what I am trying to describe."
@@ -466,6 +511,7 @@ do to avoid problems:
- "This increases performance on typical machines..."
Bad things you should avoid saying:
+
- "We did it this way in AIX/ptx/Solaris, so therefore it must be
good..."
- "I've being doing this for 20 years, so..."
@@ -527,17 +573,18 @@ The reasons for breaking things up are the following:
and simplify (or simply re-order) patches before submitting them.
Here is an analogy from kernel developer Al Viro:
- "Think of a teacher grading homework from a math student. The
+
+ *"Think of a teacher grading homework from a math student. The
teacher does not want to see the student's trials and errors
before they came up with the solution. They want to see the
cleanest, most elegant answer. A good student knows this, and
would never submit her intermediate work before the final
- solution."
+ solution.*
- The same is true of kernel development. The maintainers and
+ *The same is true of kernel development. The maintainers and
reviewers do not want to see the thought process behind the
solution to the problem one is solving. They want to see a
- simple and elegant solution."
+ simple and elegant solution."*
It may be challenging to keep the balance between presenting an elegant
solution and working together with the community and discussing your
@@ -565,6 +612,7 @@ When sending in your patches, pay special attention to what you say in
the text in your email. This information will become the ChangeLog
information for the patch, and will be preserved for everyone to see for
all time. It should describe the patch completely, containing:
+
- why the change is necessary
- the overall design approach in the patch
- implementation details
@@ -572,12 +620,11 @@ all time. It should describe the patch completely, containing:
For more details on what this should all look like, please see the
ChangeLog section of the document:
+
"The Perfect Patch"
http://www.ozlabs.org/~akpm/stuff/tpp.txt
-
-
All of these things are sometimes very hard to do. It can take years to
perfect these practices (if at all). It's a continuous process of
improvement that requires a lot of patience and determination. But
@@ -588,8 +635,9 @@ start exactly where you are now.
----------
+
Thanks to Paolo Ciarrocchi who allowed the "Development Process"
-(http://lwn.net/Articles/94386/) section
+(https://lwn.net/Articles/94386/) section
to be based on text he had written, and to Randy Dunlap and Gerrit
Huizenga for some of the list of things you should and should not say.
Also thanks to Pat Mochel, Hanna Linder, Randy Dunlap, Kay Sievers,
diff --git a/Documentation/Makefile.sphinx b/Documentation/Makefile.sphinx
index 857f1e273418..92deea30b183 100644
--- a/Documentation/Makefile.sphinx
+++ b/Documentation/Makefile.sphinx
@@ -5,6 +5,9 @@
# You can set these variables from the command line.
SPHINXBUILD = sphinx-build
SPHINXOPTS =
+SPHINXDIRS = .
+_SPHINXDIRS = $(patsubst $(srctree)/Documentation/%/conf.py,%,$(wildcard $(srctree)/Documentation/*/conf.py))
+SPHINX_CONF = conf.py
PAPER =
BUILDDIR = $(obj)/output
@@ -25,38 +28,62 @@ else ifneq ($(DOCBOOKS),)
else # HAVE_SPHINX
-# User-friendly check for rst2pdf
-HAVE_RST2PDF := $(shell if python -c "import rst2pdf" >/dev/null 2>&1; then echo 1; else echo 0; fi)
+# User-friendly check for pdflatex
+HAVE_PDFLATEX := $(shell if which xelatex >/dev/null 2>&1; then echo 1; else echo 0; fi)
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
KERNELDOC = $(srctree)/scripts/kernel-doc
KERNELDOC_CONF = -D kerneldoc_srctree=$(srctree) -D kerneldoc_bin=$(KERNELDOC)
-ALLSPHINXOPTS = -D version=$(KERNELVERSION) -D release=$(KERNELRELEASE) -d $(BUILDDIR)/.doctrees $(KERNELDOC_CONF) $(PAPEROPT_$(PAPER)) -c $(srctree)/$(src) $(SPHINXOPTS) $(srctree)/$(src)
+ALLSPHINXOPTS = $(KERNELDOC_CONF) $(PAPEROPT_$(PAPER)) $(SPHINXOPTS)
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-quiet_cmd_sphinx = SPHINX $@
- cmd_sphinx = BUILDDIR=$(BUILDDIR) $(SPHINXBUILD) -b $2 $(ALLSPHINXOPTS) $(BUILDDIR)/$2
+# commands; the 'cmd' from scripts/Kbuild.include is not *loopable*
+loop_cmd = $(echo-cmd) $(cmd_$(1))
+
+# $2 sphinx builder e.g. "html"
+# $3 name of the build subfolder / e.g. "media", used as:
+# * dest folder relative to $(BUILDDIR) and
+# * cache folder relative to $(BUILDDIR)/.doctrees
+# $4 dest subfolder e.g. "man" for man pages at media/man
+# $5 reST source folder relative to $(srctree)/$(src),
+# e.g. "media" for the linux-tv book-set at ./Documentation/media
+
+quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4);
+ cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media all;\
+ BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
+ $(SPHINXBUILD) \
+ -b $2 \
+ -c $(abspath $(srctree)/$(src)) \
+ -d $(abspath $(BUILDDIR)/.doctrees/$3) \
+ -D version=$(KERNELVERSION) -D release=$(KERNELRELEASE) \
+ $(ALLSPHINXOPTS) \
+ $(abspath $(srctree)/$(src)/$5) \
+ $(abspath $(BUILDDIR)/$3/$4);
htmldocs:
- $(MAKE) BUILDDIR=$(BUILDDIR) -f $(srctree)/Documentation/media/Makefile $@
- $(call cmd,sphinx,html)
+ @$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,html,$(var),,$(var)))
-pdfdocs:
-ifeq ($(HAVE_RST2PDF),0)
- $(warning The Python 'rst2pdf' module was not found. Make sure you have the module installed to produce PDF output.)
+latexdocs:
+ifeq ($(HAVE_PDFLATEX),0)
+ $(warning The 'xelatex' command was not found. Make sure you have it installed and in PATH to produce PDF output.)
@echo " SKIP Sphinx $@ target."
-else # HAVE_RST2PDF
- $(call cmd,sphinx,pdf)
-endif # HAVE_RST2PDF
+else # HAVE_PDFLATEX
+ @$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,latex,$(var),latex,$(var)))
+endif # HAVE_PDFLATEX
+
+pdfdocs: latexdocs
+ifneq ($(HAVE_PDFLATEX),0)
+ $(foreach var,$(SPHINXDIRS), $(MAKE) PDFLATEX=xelatex LATEXOPTS="-interaction=nonstopmode" -C $(BUILDDIR)/$(var)/latex)
+endif # HAVE_PDFLATEX
epubdocs:
- $(call cmd,sphinx,epub)
+ @$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,epub,$(var),epub,$(var)))
xmldocs:
- $(call cmd,sphinx,xml)
+ @$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,xml,$(var),xml,$(var)))
# no-ops for the Sphinx toolchain
sgmldocs:
@@ -72,7 +99,14 @@ endif # HAVE_SPHINX
dochelp:
@echo ' Linux kernel internal documentation in different formats (Sphinx):'
@echo ' htmldocs - HTML'
+ @echo ' latexdocs - LaTeX'
@echo ' pdfdocs - PDF'
@echo ' epubdocs - EPUB'
@echo ' xmldocs - XML'
@echo ' cleandocs - clean all generated files'
+ @echo
+ @echo ' make SPHINXDIRS="s1 s2" [target] Generate only docs of folder s1, s2'
+ @echo ' valid values for SPHINXDIRS are: $(_SPHINXDIRS)'
+ @echo
+ @echo ' make SPHINX_CONF={conf-file} [target] use *additional* sphinx-build'
+ @echo ' configuration. This is e.g. useful to build with nit-picking config.'
diff --git a/Documentation/ManagementStyle b/Documentation/ManagementStyle
index a211ee8d8b44..dea2e66c9a10 100644
--- a/Documentation/ManagementStyle
+++ b/Documentation/ManagementStyle
@@ -1,10 +1,12 @@
+.. _managementstyle:
- Linux kernel management style
+Linux kernel management style
+=============================
This is a short document describing the preferred (or made up, depending
on who you ask) management style for the linux kernel. It's meant to
mirror the CodingStyle document to some degree, and mainly written to
-avoid answering (*) the same (or similar) questions over and over again.
+avoid answering [#f1]_ the same (or similar) questions over and over again.
Management style is very personal and much harder to quantify than
simple coding style rules, so this document may or may not have anything
@@ -14,50 +16,52 @@ might not actually be true. You'll have to decide for yourself.
Btw, when talking about "kernel manager", it's all about the technical
lead persons, not the people who do traditional management inside
companies. If you sign purchase orders or you have any clue about the
-budget of your group, you're almost certainly not a kernel manager.
-These suggestions may or may not apply to you.
+budget of your group, you're almost certainly not a kernel manager.
+These suggestions may or may not apply to you.
First off, I'd suggest buying "Seven Habits of Highly Effective
-People", and NOT read it. Burn it, it's a great symbolic gesture.
+People", and NOT read it. Burn it, it's a great symbolic gesture.
-(*) This document does so not so much by answering the question, but by
-making it painfully obvious to the questioner that we don't have a clue
-to what the answer is.
+.. [#f1] This document does so not so much by answering the question, but by
+ making it painfully obvious to the questioner that we don't have a clue
+ to what the answer is.
Anyway, here goes:
+.. _decisions:
- Chapter 1: Decisions
+1) Decisions
+------------
Everybody thinks managers make decisions, and that decision-making is
important. The bigger and more painful the decision, the bigger the
manager must be to make it. That's very deep and obvious, but it's not
-actually true.
+actually true.
-The name of the game is to _avoid_ having to make a decision. In
+The name of the game is to **avoid** having to make a decision. In
particular, if somebody tells you "choose (a) or (b), we really need you
to decide on this", you're in trouble as a manager. The people you
manage had better know the details better than you, so if they come to
you for a technical decision, you're screwed. You're clearly not
-competent to make that decision for them.
+competent to make that decision for them.
(Corollary:if the people you manage don't know the details better than
-you, you're also screwed, although for a totally different reason.
-Namely that you are in the wrong job, and that _they_ should be managing
-your brilliance instead).
+you, you're also screwed, although for a totally different reason.
+Namely that you are in the wrong job, and that **they** should be managing
+your brilliance instead).
-So the name of the game is to _avoid_ decisions, at least the big and
+So the name of the game is to **avoid** decisions, at least the big and
painful ones. Making small and non-consequential decisions is fine, and
makes you look like you know what you're doing, so what a kernel manager
needs to do is to turn the big and painful ones into small things where
-nobody really cares.
+nobody really cares.
It helps to realize that the key difference between a big decision and a
small one is whether you can fix your decision afterwards. Any decision
can be made small by just always making sure that if you were wrong (and
-you _will_ be wrong), you can always undo the damage later by
+you **will** be wrong), you can always undo the damage later by
backtracking. Suddenly, you get to be doubly managerial for making
-_two_ inconsequential decisions - the wrong one _and_ the right one.
+**two** inconsequential decisions - the wrong one **and** the right one.
And people will even see that as true leadership (*cough* bullshit
*cough*).
@@ -65,10 +69,10 @@ And people will even see that as true leadership (*cough* bullshit
Thus the key to avoiding big decisions becomes to just avoiding to do
things that can't be undone. Don't get ushered into a corner from which
you cannot escape. A cornered rat may be dangerous - a cornered manager
-is just pitiful.
+is just pitiful.
It turns out that since nobody would be stupid enough to ever really let
-a kernel manager have huge fiscal responsibility _anyway_, it's usually
+a kernel manager have huge fiscal responsibility **anyway**, it's usually
fairly easy to backtrack. Since you're not going to be able to waste
huge amounts of money that you might not be able to repay, the only
thing you can backtrack on is a technical decision, and there
@@ -76,113 +80,118 @@ back-tracking is very easy: just tell everybody that you were an
incompetent nincompoop, say you're sorry, and undo all the worthless
work you had people work on for the last year. Suddenly the decision
you made a year ago wasn't a big decision after all, since it could be
-easily undone.
+easily undone.
It turns out that some people have trouble with this approach, for two
reasons:
+
- admitting you were an idiot is harder than it looks. We all like to
maintain appearances, and coming out in public to say that you were
- wrong is sometimes very hard indeed.
+ wrong is sometimes very hard indeed.
- having somebody tell you that what you worked on for the last year
wasn't worthwhile after all can be hard on the poor lowly engineers
- too, and while the actual _work_ was easy enough to undo by just
+ too, and while the actual **work** was easy enough to undo by just
deleting it, you may have irrevocably lost the trust of that
engineer. And remember: "irrevocable" was what we tried to avoid in
the first place, and your decision ended up being a big one after
- all.
+ all.
Happily, both of these reasons can be mitigated effectively by just
admitting up-front that you don't have a friggin' clue, and telling
people ahead of the fact that your decision is purely preliminary, and
might be the wrong thing. You should always reserve the right to change
-your mind, and make people very _aware_ of that. And it's much easier
-to admit that you are stupid when you haven't _yet_ done the really
+your mind, and make people very **aware** of that. And it's much easier
+to admit that you are stupid when you haven't **yet** done the really
stupid thing.
Then, when it really does turn out to be stupid, people just roll their
-eyes and say "Oops, he did it again".
+eyes and say "Oops, he did it again".
This preemptive admission of incompetence might also make the people who
actually do the work also think twice about whether it's worth doing or
-not. After all, if _they_ aren't certain whether it's a good idea, you
+not. After all, if **they** aren't certain whether it's a good idea, you
sure as hell shouldn't encourage them by promising them that what they
work on will be included. Make them at least think twice before they
-embark on a big endeavor.
+embark on a big endeavor.
Remember: they'd better know more about the details than you do, and
they usually already think they have the answer to everything. The best
thing you can do as a manager is not to instill confidence, but rather a
-healthy dose of critical thinking on what they do.
+healthy dose of critical thinking on what they do.
Btw, another way to avoid a decision is to plaintively just whine "can't
we just do both?" and look pitiful. Trust me, it works. If it's not
clear which approach is better, they'll eventually figure it out. The
answer may end up being that both teams get so frustrated by the
-situation that they just give up.
+situation that they just give up.
That may sound like a failure, but it's usually a sign that there was
something wrong with both projects, and the reason the people involved
couldn't decide was that they were both wrong. You end up coming up
smelling like roses, and you avoided yet another decision that you could
-have screwed up on.
+have screwed up on.
- Chapter 2: People
+2) People
+---------
Most people are idiots, and being a manager means you'll have to deal
-with it, and perhaps more importantly, that _they_ have to deal with
-_you_.
+with it, and perhaps more importantly, that **they** have to deal with
+**you**.
It turns out that while it's easy to undo technical mistakes, it's not
as easy to undo personality disorders. You just have to live with
-theirs - and yours.
+theirs - and yours.
However, in order to prepare yourself as a kernel manager, it's best to
remember not to burn any bridges, bomb any innocent villagers, or
alienate too many kernel developers. It turns out that alienating people
is fairly easy, and un-alienating them is hard. Thus "alienating"
immediately falls under the heading of "not reversible", and becomes a
-no-no according to Chapter 1.
+no-no according to :ref:`decisions`.
There's just a few simple rules here:
+
(1) don't call people d*ckheads (at least not in public)
(2) learn how to apologize when you forgot rule (1)
The problem with #1 is that it's very easy to do, since you can say
-"you're a d*ckhead" in millions of different ways (*), sometimes without
+"you're a d*ckhead" in millions of different ways [#f2]_, sometimes without
even realizing it, and almost always with a white-hot conviction that
-you are right.
+you are right.
And the more convinced you are that you are right (and let's face it,
-you can call just about _anybody_ a d*ckhead, and you often _will_ be
-right), the harder it ends up being to apologize afterwards.
+you can call just about **anybody** a d*ckhead, and you often **will** be
+right), the harder it ends up being to apologize afterwards.
To solve this problem, you really only have two options:
+
- get really good at apologies
- spread the "love" out so evenly that nobody really ends up feeling
like they get unfairly targeted. Make it inventive enough, and they
- might even be amused.
+ might even be amused.
The option of being unfailingly polite really doesn't exist. Nobody will
trust somebody who is so clearly hiding his true character.
-(*) Paul Simon sang "Fifty Ways to Leave Your Lover", because quite
-frankly, "A Million Ways to Tell a Developer He Is a D*ckhead" doesn't
-scan nearly as well. But I'm sure he thought about it.
+.. [#f2] Paul Simon sang "Fifty Ways to Leave Your Lover", because quite
+ frankly, "A Million Ways to Tell a Developer He Is a D*ckhead" doesn't
+ scan nearly as well. But I'm sure he thought about it.
- Chapter 3: People II - the Good Kind
+3) People II - the Good Kind
+----------------------------
While it turns out that most people are idiots, the corollary to that is
sadly that you are one too, and that while we can all bask in the secure
knowledge that we're better than the average person (let's face it,
nobody ever believes that they're average or below-average), we should
also admit that we're not the sharpest knife around, and there will be
-other people that are less of an idiot than you are.
+other people that are less of an idiot than you are.
-Some people react badly to smart people. Others take advantage of them.
+Some people react badly to smart people. Others take advantage of them.
-Make sure that you, as a kernel maintainer, are in the second group.
+Make sure that you, as a kernel maintainer, are in the second group.
Suck up to them, because they are the people who will make your job
easier. In particular, they'll be able to make your decisions for you,
which is what the game is all about.
@@ -191,7 +200,7 @@ So when you find somebody smarter than you are, just coast along. Your
management responsibilities largely become ones of saying "Sounds like a
good idea - go wild", or "That sounds good, but what about xxx?". The
second version in particular is a great way to either learn something
-new about "xxx" or seem _extra_ managerial by pointing out something the
+new about "xxx" or seem **extra** managerial by pointing out something the
smarter person hadn't thought about. In either case, you win.
One thing to look out for is to realize that greatness in one area does
@@ -199,47 +208,49 @@ not necessarily translate to other areas. So you might prod people in
specific directions, but let's face it, they might be good at what they
do, and suck at everything else. The good news is that people tend to
naturally gravitate back to what they are good at, so it's not like you
-are doing something irreversible when you _do_ prod them in some
+are doing something irreversible when you **do** prod them in some
direction, just don't push too hard.
- Chapter 4: Placing blame
+4) Placing blame
+----------------
Things will go wrong, and people want somebody to blame. Tag, you're it.
It's not actually that hard to accept the blame, especially if people
-kind of realize that it wasn't _all_ your fault. Which brings us to the
+kind of realize that it wasn't **all** your fault. Which brings us to the
best way of taking the blame: do it for another guy. You'll feel good
for taking the fall, he'll feel good about not getting blamed, and the
guy who lost his whole 36GB porn-collection because of your incompetence
will grudgingly admit that you at least didn't try to weasel out of it.
Then make the developer who really screwed up (if you can find him) know
-_in_private_ that he screwed up. Not just so he can avoid it in the
+**in_private** that he screwed up. Not just so he can avoid it in the
future, but so that he knows he owes you one. And, perhaps even more
importantly, he's also likely the person who can fix it. Because, let's
-face it, it sure ain't you.
+face it, it sure ain't you.
-Taking the blame is also why you get to be manager in the first place.
+Taking the blame is also why you get to be manager in the first place.
It's part of what makes people trust you, and allow you the potential
glory, because you're the one who gets to say "I screwed up". And if
you've followed the previous rules, you'll be pretty good at saying that
-by now.
+by now.
- Chapter 5: Things to avoid
+5) Things to avoid
+------------------
There's one thing people hate even more than being called "d*ckhead",
and that is being called a "d*ckhead" in a sanctimonious voice. The
first you can apologize for, the second one you won't really get the
chance. They likely will no longer be listening even if you otherwise
-do a good job.
+do a good job.
We all think we're better than anybody else, which means that when
-somebody else puts on airs, it _really_ rubs us the wrong way. You may
+somebody else puts on airs, it **really** rubs us the wrong way. You may
be morally and intellectually superior to everybody around you, but
-don't try to make it too obvious unless you really _intend_ to irritate
-somebody (*).
+don't try to make it too obvious unless you really **intend** to irritate
+somebody [#f3]_.
Similarly, don't be too polite or subtle about things. Politeness easily
ends up going overboard and hiding the problem, and as they say, "On the
@@ -251,15 +262,16 @@ Some humor can help pad both the bluntness and the moralizing. Going
overboard to the point of being ridiculous can drive a point home
without making it painful to the recipient, who just thinks you're being
silly. It can thus help get through the personal mental block we all
-have about criticism.
+have about criticism.
-(*) Hint: internet newsgroups that are not directly related to your work
-are great ways to take out your frustrations at other people. Write
-insulting posts with a sneer just to get into a good flame every once in
-a while, and you'll feel cleansed. Just don't crap too close to home.
+.. [#f3] Hint: internet newsgroups that are not directly related to your work
+ are great ways to take out your frustrations at other people. Write
+ insulting posts with a sneer just to get into a good flame every once in
+ a while, and you'll feel cleansed. Just don't crap too close to home.
- Chapter 6: Why me?
+6) Why me?
+----------
Since your main responsibility seems to be to take the blame for other
peoples mistakes, and make it painfully obvious to everybody else that
@@ -268,9 +280,9 @@ first place?
First off, while you may or may not get screaming teenage girls (or
boys, let's not be judgmental or sexist here) knocking on your dressing
-room door, you _will_ get an immense feeling of personal accomplishment
+room door, you **will** get an immense feeling of personal accomplishment
for being "in charge". Never mind the fact that you're really leading
by trying to keep up with everybody else and running after them as fast
-as you can. Everybody will still think you're the person in charge.
+as you can. Everybody will still think you're the person in charge.
It's a great job if you can hack it.
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index b4987c0bcb20..ea8cafba255c 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -49,25 +49,17 @@ depends on CONFIG_PCIEPORTBUS, so pls. set CONFIG_PCIEPORTBUS=y and
CONFIG_PCIEAER = y.
2.2 Load PCI Express AER Root Driver
-There is a case where a system has AER support in BIOS. Enabling the AER
-Root driver and having AER support in BIOS may result unpredictable
-behavior. To avoid this conflict, a successful load of the AER Root driver
-requires ACPI _OSC support in the BIOS to allow the AER Root driver to
-request for native control of AER. See the PCI FW 3.0 Specification for
-details regarding OSC usage. Currently, lots of firmwares don't provide
-_OSC support while they use PCI Express. To support such firmwares,
-forceload, a parameter of type bool, could enable AER to continue to
-be initiated although firmwares have no _OSC support. To enable the
-walkaround, pls. add aerdriver.forceload=y to kernel boot parameter line
-when booting kernel. Note that forceload=n by default.
-
-nosourceid, another parameter of type bool, can be used when broken
-hardware (mostly chipsets) has root ports that cannot obtain the reporting
-source ID. nosourceid=n by default.
+
+Some systems have AER support in firmware. Enabling Linux AER support at
+the same time the firmware handles AER may result in unpredictable
+behavior. Therefore, Linux does not handle AER events unless the firmware
+grants AER control to the OS via the ACPI _OSC method. See the PCI FW 3.0
+Specification for details regarding _OSC usage.
2.3 AER error output
-When a PCI-E AER error is captured, an error message will be outputted to
-console. If it's a correctable error, it is outputted as a warning.
+
+When a PCIe AER error is captured, an error message will be output to
+console. If it's a correctable error, it is output as a warning.
Otherwise, it is printed as an error. So users could choose different
log level to filter out correctable error messages.
diff --git a/Documentation/SecurityBugs b/Documentation/SecurityBugs
index a660d494c8ed..342d769834f6 100644
--- a/Documentation/SecurityBugs
+++ b/Documentation/SecurityBugs
@@ -1,9 +1,15 @@
+.. _securitybugs:
+
+Security bugs
+=============
+
Linux kernel developers take security very seriously. As such, we'd
like to know when a security bug is found so that it can be fixed and
disclosed as quickly as possible. Please report security bugs to the
Linux kernel security team.
1) Contact
+----------
The Linux kernel security team can be contacted by email at
<security@kernel.org>. This is a private list of security officers
@@ -18,6 +24,7 @@ Any exploit code is very helpful and will not be released without
consent from the reporter unless it has already been made public.
2) Disclosure
+-------------
The goal of the Linux kernel security team is to work with the
bug submitter to bug resolution as well as disclosure. We prefer
@@ -33,6 +40,7 @@ to a few weeks. As a basic default policy, we expect report date to
disclosure date to be on the order of 7 days.
3) Non-disclosure agreements
+----------------------------
The Linux kernel security team is not a formal body and therefore unable
to enter any non-disclosure agreements.
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
index 2b7e32dfe00d..894289b22b15 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/SubmitChecklist
@@ -1,109 +1,120 @@
+.. _submitchecklist:
+
Linux Kernel patch submission checklist
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here are some basic things that developers should do if they want to see their
kernel patch submissions accepted more quickly.
These are all above and beyond the documentation that is provided in
-Documentation/SubmittingPatches and elsewhere regarding submitting Linux
-kernel patches.
+:ref:`Documentation/SubmittingPatches <submittingpatches>`
+and elsewhere regarding submitting Linux kernel patches.
-1: If you use a facility then #include the file that defines/declares
+1) If you use a facility then #include the file that defines/declares
that facility. Don't depend on other header files pulling in ones
that you use.
-2: Builds cleanly with applicable or modified CONFIG options =y, =m, and
- =n. No gcc warnings/errors, no linker warnings/errors.
+2) Builds cleanly:
+
+ a) with applicable or modified ``CONFIG`` options ``=y``, ``=m``, and
+ ``=n``. No ``gcc`` warnings/errors, no linker warnings/errors.
-2b: Passes allnoconfig, allmodconfig
+ b) Passes ``allnoconfig``, ``allmodconfig``
-2c: Builds successfully when using O=builddir
+ c) Builds successfully when using ``O=builddir``
-3: Builds on multiple CPU architectures by using local cross-compile tools
+3) Builds on multiple CPU architectures by using local cross-compile tools
or some other build farm.
-4: ppc64 is a good architecture for cross-compilation checking because it
- tends to use `unsigned long' for 64-bit quantities.
+4) ppc64 is a good architecture for cross-compilation checking because it
+ tends to use ``unsigned long`` for 64-bit quantities.
-5: Check your patch for general style as detailed in
- Documentation/CodingStyle. Check for trivial violations with the
- patch style checker prior to submission (scripts/checkpatch.pl).
+5) Check your patch for general style as detailed in
+ :ref:`Documentation/CodingStyle <codingstyle>`.
+ Check for trivial violations with the patch style checker prior to
+ submission (``scripts/checkpatch.pl``).
You should be able to justify all violations that remain in
your patch.
-6: Any new or modified CONFIG options don't muck up the config menu.
+6) Any new or modified ``CONFIG`` options don't muck up the config menu.
-7: All new Kconfig options have help text.
+7) All new ``Kconfig`` options have help text.
-8: Has been carefully reviewed with respect to relevant Kconfig
+8) Has been carefully reviewed with respect to relevant ``Kconfig``
combinations. This is very hard to get right with testing -- brainpower
pays off here.
-9: Check cleanly with sparse.
+9) Check cleanly with sparse.
+
+10) Use ``make checkstack`` and ``make namespacecheck`` and fix any problems
+ that they find.
+
+ .. note::
-10: Use 'make checkstack' and 'make namespacecheck' and fix any problems
- that they find. Note: checkstack does not point out problems explicitly,
- but any one function that uses more than 512 bytes on the stack is a
- candidate for change.
+ ``checkstack`` does not point out problems explicitly,
+ but any one function that uses more than 512 bytes on the stack is a
+ candidate for change.
-11: Include kernel-doc to document global kernel APIs. (Not required for
- static functions, but OK there also.) Use 'make htmldocs' or 'make
- mandocs' to check the kernel-doc and fix any issues.
+11) Include :ref:`kernel-doc <kernel_doc>` to document global kernel APIs.
+ (Not required for static functions, but OK there also.) Use
+ ``make htmldocs`` or ``make pdfdocs`` to check the
+ :ref:`kernel-doc <kernel_doc>` and fix any issues.
-12: Has been tested with CONFIG_PREEMPT, CONFIG_DEBUG_PREEMPT,
- CONFIG_DEBUG_SLAB, CONFIG_DEBUG_PAGEALLOC, CONFIG_DEBUG_MUTEXES,
- CONFIG_DEBUG_SPINLOCK, CONFIG_DEBUG_ATOMIC_SLEEP, CONFIG_PROVE_RCU
- and CONFIG_DEBUG_OBJECTS_RCU_HEAD all simultaneously enabled.
+12) Has been tested with ``CONFIG_PREEMPT``, ``CONFIG_DEBUG_PREEMPT``,
+ ``CONFIG_DEBUG_SLAB``, ``CONFIG_DEBUG_PAGEALLOC``, ``CONFIG_DEBUG_MUTEXES``,
+ ``CONFIG_DEBUG_SPINLOCK``, ``CONFIG_DEBUG_ATOMIC_SLEEP``,
+ ``CONFIG_PROVE_RCU`` and ``CONFIG_DEBUG_OBJECTS_RCU_HEAD`` all
+ simultaneously enabled.
-13: Has been build- and runtime tested with and without CONFIG_SMP and
- CONFIG_PREEMPT.
+13) Has been build- and runtime tested with and without ``CONFIG_SMP`` and
+ ``CONFIG_PREEMPT.``
-14: If the patch affects IO/Disk, etc: has been tested with and without
- CONFIG_LBDAF.
+14) If the patch affects IO/Disk, etc: has been tested with and without
+ ``CONFIG_LBDAF.``
-15: All codepaths have been exercised with all lockdep features enabled.
+15) All codepaths have been exercised with all lockdep features enabled.
-16: All new /proc entries are documented under Documentation/
+16) All new ``/proc`` entries are documented under ``Documentation/``
-17: All new kernel boot parameters are documented in
- Documentation/kernel-parameters.txt.
+17) All new kernel boot parameters are documented in
+ ``Documentation/kernel-parameters.txt``.
-18: All new module parameters are documented with MODULE_PARM_DESC()
+18) All new module parameters are documented with ``MODULE_PARM_DESC()``
-19: All new userspace interfaces are documented in Documentation/ABI/.
- See Documentation/ABI/README for more information.
+19) All new userspace interfaces are documented in ``Documentation/ABI/``.
+ See ``Documentation/ABI/README`` for more information.
Patches that change userspace interfaces should be CCed to
linux-api@vger.kernel.org.
-20: Check that it all passes `make headers_check'.
+20) Check that it all passes ``make headers_check``.
-21: Has been checked with injection of at least slab and page-allocation
- failures. See Documentation/fault-injection/.
+21) Has been checked with injection of at least slab and page-allocation
+ failures. See ``Documentation/fault-injection/``.
If the new code is substantial, addition of subsystem-specific fault
injection might be appropriate.
-22: Newly-added code has been compiled with `gcc -W' (use "make
- EXTRA_CFLAGS=-W"). This will generate lots of noise, but is good for
- finding bugs like "warning: comparison between signed and unsigned".
+22) Newly-added code has been compiled with ``gcc -W`` (use
+ ``make EXTRA_CFLAGS=-W``). This will generate lots of noise, but is good
+ for finding bugs like "warning: comparison between signed and unsigned".
-23: Tested after it has been merged into the -mm patchset to make sure
+23) Tested after it has been merged into the -mm patchset to make sure
that it still works with all of the other queued patches and various
changes in the VM, VFS, and other subsystems.
-24: All memory barriers {e.g., barrier(), rmb(), wmb()} need a comment in the
- source code that explains the logic of what they are doing and why.
+24) All memory barriers {e.g., ``barrier()``, ``rmb()``, ``wmb()``} need a
+ comment in the source code that explains the logic of what they are doing
+ and why.
-25: If any ioctl's are added by the patch, then also update
- Documentation/ioctl/ioctl-number.txt.
+25) If any ioctl's are added by the patch, then also update
+ ``Documentation/ioctl/ioctl-number.txt``.
-26: If your modified source code depends on or uses any of the kernel
- APIs or features that are related to the following kconfig symbols,
- then test multiple builds with the related kconfig symbols disabled
- and/or =m (if that option is available) [not all of these at the
+26) If your modified source code depends on or uses any of the kernel
+ APIs or features that are related to the following ``Kconfig`` symbols,
+ then test multiple builds with the related ``Kconfig`` symbols disabled
+ and/or ``=m`` (if that option is available) [not all of these at the
same time, just various/random combinations of them]:
- CONFIG_SMP, CONFIG_SYSFS, CONFIG_PROC_FS, CONFIG_INPUT, CONFIG_PCI,
- CONFIG_BLOCK, CONFIG_PM, CONFIG_MAGIC_SYSRQ,
- CONFIG_NET, CONFIG_INET=n (but latter with CONFIG_NET=y)
+ ``CONFIG_SMP``, ``CONFIG_SYSFS``, ``CONFIG_PROC_FS``, ``CONFIG_INPUT``, ``CONFIG_PCI``, ``CONFIG_BLOCK``, ``CONFIG_PM``, ``CONFIG_MAGIC_SYSRQ``,
+ ``CONFIG_NET``, ``CONFIG_INET=n`` (but latter with ``CONFIG_NET=y``).
diff --git a/Documentation/SubmittingDrivers b/Documentation/SubmittingDrivers
index 31d372609ac0..252b77a23fad 100644
--- a/Documentation/SubmittingDrivers
+++ b/Documentation/SubmittingDrivers
@@ -1,5 +1,7 @@
+.. _submittingdrivers:
+
Submitting Drivers For The Linux Kernel
----------------------------------------
+=======================================
This document is intended to explain how to submit device drivers to the
various kernel trees. Note that if you are interested in video card drivers
@@ -38,42 +40,48 @@ Linux 2.4:
maintainer does not respond or you cannot find the appropriate
maintainer then please contact Willy Tarreau <w@1wt.eu>.
-Linux 2.6:
+Linux 2.6 and upper:
The same rules apply as 2.4 except that you should follow linux-kernel
- to track changes in API's. The final contact point for Linux 2.6
+ to track changes in API's. The final contact point for Linux 2.6+
submissions is Andrew Morton.
What Criteria Determine Acceptance
----------------------------------
-Licensing: The code must be released to us under the
+Licensing:
+ The code must be released to us under the
GNU General Public License. We don't insist on any kind
of exclusive GPL licensing, and if you wish the driver
to be useful to other communities such as BSD you may well
wish to release under multiple licenses.
See accepted licenses at include/linux/module.h
-Copyright: The copyright owner must agree to use of GPL.
+Copyright:
+ The copyright owner must agree to use of GPL.
It's best if the submitter and copyright owner
are the same person/entity. If not, the name of
the person/entity authorizing use of GPL should be
listed in case it's necessary to verify the will of
the copyright owner.
-Interfaces: If your driver uses existing interfaces and behaves like
+Interfaces:
+ If your driver uses existing interfaces and behaves like
other drivers in the same class it will be much more likely
to be accepted than if it invents gratuitous new ones.
If you need to implement a common API over Linux and NT
drivers do it in userspace.
-Code: Please use the Linux style of code formatting as documented
- in Documentation/CodingStyle. If you have sections of code
+Code:
+ Please use the Linux style of code formatting as documented
+ in :ref:`Documentation/CodingStyle <codingStyle>`.
+ If you have sections of code
that need to be in other formats, for example because they
are shared with a windows driver kit and you want to
maintain them just once separate them out nicely and note
this fact.
-Portability: Pointers are not always 32bits, not all computers are little
+Portability:
+ Pointers are not always 32bits, not all computers are little
endian, people do not all have floating point and you
shouldn't use inline x86 assembler in your driver without
careful thought. Pure x86 drivers generally are not popular.
@@ -81,12 +89,14 @@ Portability: Pointers are not always 32bits, not all computers are little
but it is easy to make sure the code can easily be made
portable.
-Clarity: It helps if anyone can see how to fix the driver. It helps
+Clarity:
+ It helps if anyone can see how to fix the driver. It helps
you because you get patches not bug reports. If you submit a
driver that intentionally obfuscates how the hardware works
it will go in the bitbucket.
-PM support: Since Linux is used on many portable and desktop systems, your
+PM support:
+ Since Linux is used on many portable and desktop systems, your
driver is likely to be used on such a system and therefore it
should support basic power management by implementing, if
necessary, the .suspend and .resume methods used during the
@@ -101,7 +111,8 @@ PM support: Since Linux is used on many portable and desktop systems, your
complete overview of the power management issues related to
drivers see Documentation/power/devices.txt .
-Control: In general if there is active maintenance of a driver by
+Control:
+ In general if there is active maintenance of a driver by
the author then patches will be redirected to them unless
they are totally obvious and without need of checking.
If you want to be the contact and update point for the
@@ -111,13 +122,15 @@ Control: In general if there is active maintenance of a driver by
What Criteria Do Not Determine Acceptance
-----------------------------------------
-Vendor: Being the hardware vendor and maintaining the driver is
+Vendor:
+ Being the hardware vendor and maintaining the driver is
often a good thing. If there is a stable working driver from
other people already in the tree don't expect 'we are the
vendor' to get your driver chosen. Ideally work with the
existing driver author to build a single perfect driver.
-Author: It doesn't matter if a large Linux company wrote the driver,
+Author:
+ It doesn't matter if a large Linux company wrote the driver,
or you did. Nobody has any special access to the kernel
tree. Anyone who tells you otherwise isn't telling the
whole story.
@@ -127,8 +140,10 @@ Resources
---------
Linux kernel master tree:
- ftp.??.kernel.org:/pub/linux/kernel/...
- ?? == your country code, such as "us", "uk", "fr", etc.
+ ftp.\ *country_code*\ .kernel.org:/pub/linux/kernel/...
+
+ where *country_code* == your country code, such as
+ **us**, **uk**, **fr**, etc.
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
@@ -141,14 +156,19 @@ Linux Device Drivers, Third Edition (covers 2.6.10):
LWN.net:
Weekly summary of kernel development activity - http://lwn.net/
+
2.6 API changes:
+
http://lwn.net/Articles/2.6-kernel-api/
+
Porting drivers from prior kernels to 2.6:
+
http://lwn.net/Articles/driver-porting/
KernelNewbies:
Documentation and assistance for new kernel programmers
- http://kernelnewbies.org/
+
+ http://kernelnewbies.org/
Linux USB project:
http://www.linux-usb.org/
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 8c79f1d53731..36f1dedc944c 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -1,9 +1,7 @@
+.. _submittingpatches:
- How to Get Your Change Into the Linux Kernel
- or
- Care And Operation Of Your Linus Torvalds
-
-
+How to Get Your Change Into the Linux Kernel or Care And Operation Of Your Linus Torvalds
+=========================================================================================
For a person or company who wishes to submit a change to the Linux
kernel, the process can sometimes be daunting if you're not familiar
@@ -12,57 +10,59 @@ can greatly increase the chances of your change being accepted.
This document contains a large number of suggestions in a relatively terse
format. For detailed information on how the kernel development process
-works, see Documentation/development-process. Also, read
-Documentation/SubmitChecklist for a list of items to check before
+works, see :ref:`Documentation/development-process <development_process_main>`.
+Also, read :ref:`Documentation/SubmitChecklist <submitchecklist>`
+for a list of items to check before
submitting code. If you are submitting a driver, also read
-Documentation/SubmittingDrivers; for device tree binding patches, read
+:ref:`Documentation/SubmittingDrivers <submittingdrivers>`;
+for device tree binding patches, read
Documentation/devicetree/bindings/submitting-patches.txt.
-Many of these steps describe the default behavior of the git version
-control system; if you use git to prepare your patches, you'll find much
+Many of these steps describe the default behavior of the ``git`` version
+control system; if you use ``git`` to prepare your patches, you'll find much
of the mechanical work done for you, though you'll still need to prepare
-and document a sensible set of patches. In general, use of git will make
+and document a sensible set of patches. In general, use of ``git`` will make
your life as a kernel developer easier.
---------------------------------------------
-SECTION 1 - CREATING AND SENDING YOUR CHANGE
---------------------------------------------
+Creating and Sending your Change
+********************************
0) Obtain a current source tree
-------------------------------
If you do not have a repository with the current kernel source handy, use
-git to obtain one. You'll want to start with the mainline repository,
-which can be grabbed with:
+``git`` to obtain one. You'll want to start with the mainline repository,
+which can be grabbed with::
- git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+ git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
Note, however, that you may not want to develop against the mainline tree
directly. Most subsystem maintainers run their own trees and want to see
-patches prepared against those trees. See the "T:" entry for the subsystem
+patches prepared against those trees. See the **T:** entry for the subsystem
in the MAINTAINERS file to find that tree, or simply ask the maintainer if
the tree is not listed there.
It is still possible to download kernel releases via tarballs (as described
in the next section), but that is the hard way to do kernel development.
-1) "diff -up"
-------------
+1) ``diff -up``
+---------------
-If you must generate your patches by hand, use "diff -up" or "diff -uprN"
+If you must generate your patches by hand, use ``diff -up`` or ``diff -uprN``
to create patches. Git generates patches in this form by default; if
-you're using git, you can skip this section entirely.
+you're using ``git``, you can skip this section entirely.
All changes to the Linux kernel occur in the form of patches, as
-generated by diff(1). When creating your patch, make sure to create it
-in "unified diff" format, as supplied by the '-u' argument to diff(1).
-Also, please use the '-p' argument which shows which C function each
-change is in - that makes the resultant diff a lot easier to read.
+generated by :manpage:`diff(1)`. When creating your patch, make sure to
+create it in "unified diff" format, as supplied by the ``-u`` argument
+to :manpage:`diff(1)`.
+Also, please use the ``-p`` argument which shows which C function each
+change is in - that makes the resultant ``diff`` a lot easier to read.
Patches should be based in the root kernel source directory,
not in any lower subdirectory.
-To create a patch for a single file, it is often sufficient to do:
+To create a patch for a single file, it is often sufficient to do::
SRCTREE= linux
MYFILE= drivers/net/mydriver.c
@@ -74,8 +74,8 @@ To create a patch for a single file, it is often sufficient to do:
diff -up $SRCTREE/$MYFILE{.orig,} > /tmp/patch
To create a patch for multiple files, you should unpack a "vanilla",
-or unmodified kernel source tree, and generate a diff against your
-own source tree. For example:
+or unmodified kernel source tree, and generate a ``diff`` against your
+own source tree. For example::
MYSRC= /devel/linux
@@ -84,27 +84,27 @@ own source tree. For example:
diff -uprN -X linux-3.19-vanilla/Documentation/dontdiff \
linux-3.19-vanilla $MYSRC > /tmp/patch
-"dontdiff" is a list of files which are generated by the kernel during
-the build process, and should be ignored in any diff(1)-generated
+``dontdiff`` is a list of files which are generated by the kernel during
+the build process, and should be ignored in any :manpage:`diff(1)`-generated
patch.
Make sure your patch does not include any extra files which do not
belong in a patch submission. Make sure to review your patch -after-
-generating it with diff(1), to ensure accuracy.
+generating it with :manpage:`diff(1)`, to ensure accuracy.
If your changes produce a lot of deltas, you need to split them into
-individual patches which modify things in logical stages; see section
-#3. This will facilitate review by other kernel developers,
+individual patches which modify things in logical stages; see
+:ref:`split_changes`. This will facilitate review by other kernel developers,
very important if you want your patch accepted.
-If you're using git, "git rebase -i" can help you with this process. If
-you're not using git, quilt <http://savannah.nongnu.org/projects/quilt>
+If you're using ``git``, ``git rebase -i`` can help you with this process. If
+you're not using ``git``, ``quilt`` <http://savannah.nongnu.org/projects/quilt>
is another popular alternative.
+.. _describe_changes:
-
-2) Describe your changes.
--------------------------
+2) Describe your changes
+------------------------
Describe your problem. Whether your patch is a one-line bug fix or
5000 lines of a new feature, there must be an underlying problem that
@@ -137,11 +137,11 @@ as you intend it to.
The maintainer will thank you if you write your patch description in a
form which can be easily pulled into Linux's source code management
-system, git, as a "commit log". See #15, below.
+system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
Solve only one problem per patch. If your description starts to get
long, that's a sign that you probably need to split up your patch.
-See #3, next.
+See :ref:`split_changes`.
When you submit or resubmit a patch or patch series, include the
complete patch description and justification for it. Don't just
@@ -160,7 +160,7 @@ its behaviour.
If the patch fixes a logged bug entry, refer to that bug entry by
number and URL. If the patch follows from a mailing list discussion,
give a URL to the mailing list archive; use the https://lkml.kernel.org/
-redirector with a Message-Id, to ensure that the links cannot become
+redirector with a ``Message-Id``, to ensure that the links cannot become
stale.
However, try to make your explanation understandable without external
@@ -171,7 +171,7 @@ patch as submitted.
If you want to refer to a specific commit, don't just refer to the
SHA-1 ID of the commit. Please also include the oneline summary of
the commit, to make it easier for reviewers to know what it is about.
-Example:
+Example::
Commit e21d2170f36602ae2708 ("video: remove unnecessary
platform_set_drvdata()") removed the unnecessary
@@ -185,23 +185,25 @@ there is no collision with your six-character ID now, that condition may
change five years from now.
If your patch fixes a bug in a specific commit, e.g. you found an issue using
-git-bisect, please use the 'Fixes:' tag with the first 12 characters of the
-SHA-1 ID, and the one line summary. For example:
+``git bisect``, please use the 'Fixes:' tag with the first 12 characters of
+the SHA-1 ID, and the one line summary. For example::
Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()")
-The following git-config settings can be used to add a pretty format for
-outputting the above style in the git log or git show commands
+The following ``git config`` settings can be used to add a pretty format for
+outputting the above style in the ``git log`` or ``git show`` commands::
[core]
abbrev = 12
[pretty]
fixes = Fixes: %h (\"%s\")
-3) Separate your changes.
--------------------------
+.. _split_changes:
+
+3) Separate your changes
+------------------------
-Separate each _logical change_ into a separate patch.
+Separate each **logical change** into a separate patch.
For example, if your changes include both bug fixes and performance
enhancements for a single driver, separate those changes into two
@@ -217,12 +219,12 @@ change that can be verified by reviewers. Each patch should be justifiable
on its own merits.
If one patch depends on another patch in order for a change to be
-complete, that is OK. Simply note "this patch depends on patch X"
+complete, that is OK. Simply note **"this patch depends on patch X"**
in your patch description.
When dividing your change into a series of patches, take special care to
ensure that the kernel builds and runs properly after each patch in the
-series. Developers using "git bisect" to track down a problem can end up
+series. Developers using ``git bisect`` to track down a problem can end up
splitting your patch series at any point; they will not thank you if you
introduce bugs in the middle.
@@ -231,11 +233,13 @@ then only post say 15 or so at a time and wait for review and integration.
-4) Style-check your changes.
-----------------------------
+4) Style-check your changes
+---------------------------
Check your patch for basic style violations, details of which can be
-found in Documentation/CodingStyle. Failure to do so simply wastes
+found in
+:ref:`Documentation/CodingStyle <codingstyle>`.
+Failure to do so simply wastes
the reviewers time and will get your patch rejected, probably
without even being read.
@@ -260,8 +264,8 @@ You should be able to justify all violations that remain in your
patch.
-5) Select the recipients for your patch.
-----------------------------------------
+5) Select the recipients for your patch
+---------------------------------------
You should always copy the appropriate subsystem maintainer(s) on any patch
to code that they maintain; look through the MAINTAINERS file and the
@@ -295,13 +299,14 @@ to allow distributors to get the patch out to users; in such cases,
obviously, the patch should not be sent to any public lists.
Patches that fix a severe bug in a released kernel should be directed
-toward the stable maintainers by putting a line like this:
+toward the stable maintainers by putting a line like this::
Cc: stable@vger.kernel.org
into the sign-off area of your patch (note, NOT an email recipient). You
-should also read Documentation/stable_kernel_rules.txt in addition to this
-file.
+should also read
+:ref:`Documentation/stable_kernel_rules.txt <stable_kernel_rules>`
+in addition to this file.
Note, however, that some subsystem maintainers want to come to their own
conclusions on which patches should go to the stable trees. The networking
@@ -312,28 +317,30 @@ If changes affect userland-kernel interfaces, please send the MAN-PAGES
maintainer (as listed in the MAINTAINERS file) a man-pages patch, or at
least a notification of the change, so that some information makes its way
into the manual pages. User-space API changes should also be copied to
-linux-api@vger.kernel.org.
+linux-api@vger.kernel.org.
For small patches you may want to CC the Trivial Patch Monkey
trivial@kernel.org which collects "trivial" patches. Have a look
into the MAINTAINERS file for its current manager.
+
Trivial patches must qualify for one of the following rules:
- Spelling fixes in documentation
- Spelling fixes for errors which could break grep(1)
- Warning fixes (cluttering with useless warnings is bad)
- Compilation fixes (only if they are actually correct)
- Runtime fixes (only if they actually fix things)
- Removing use of deprecated functions/macros
- Contact detail and documentation fixes
- Non-portable code replaced by portable code (even in arch-specific,
- since people copy, as long as it's trivial)
- Any fix by the author/maintainer of the file (ie. patch monkey
- in re-transmission mode)
+
+- Spelling fixes in documentation
+- Spelling fixes for errors which could break :manpage:`grep(1)`
+- Warning fixes (cluttering with useless warnings is bad)
+- Compilation fixes (only if they are actually correct)
+- Runtime fixes (only if they actually fix things)
+- Removing use of deprecated functions/macros
+- Contact detail and documentation fixes
+- Non-portable code replaced by portable code (even in arch-specific,
+ since people copy, as long as it's trivial)
+- Any fix by the author/maintainer of the file (ie. patch monkey
+ in re-transmission mode)
-6) No MIME, no links, no compression, no attachments. Just plain text.
------------------------------------------------------------------------
+6) No MIME, no links, no compression, no attachments. Just plain text
+----------------------------------------------------------------------
Linus and other kernel developers need to be able to read and comment
on the changes you are submitting. It is important for a kernel
@@ -341,8 +348,11 @@ developer to be able to "quote" your changes, using standard e-mail
tools, so that they may comment on specific portions of your code.
For this reason, all patches should be submitted by e-mail "inline".
-WARNING: Be wary of your editor's word-wrap corrupting your patch,
-if you choose to cut-n-paste your patch.
+
+.. warning::
+
+ Be wary of your editor's word-wrap corrupting your patch,
+ if you choose to cut-n-paste your patch.
Do not attach the patch as a MIME attachment, compressed or not.
Many popular e-mail applications will not always transmit a MIME
@@ -353,11 +363,12 @@ decreasing the likelihood of your MIME-attached change being accepted.
Exception: If your mailer is mangling patches then someone may ask
you to re-send them using MIME.
-See Documentation/email-clients.txt for hints about configuring
-your e-mail client so that it sends your patches untouched.
+See :ref:`Documentation/email-clients.txt <email_clients>`
+for hints about configuring your e-mail client so that it sends your patches
+untouched.
-7) E-mail size.
----------------
+7) E-mail size
+--------------
Large changes are not appropriate for mailing lists, and some
maintainers. If your patch, uncompressed, exceeds 300 kB in size,
@@ -366,8 +377,8 @@ server, and provide instead a URL (link) pointing to your patch. But note
that if your patch exceeds 300 kB, it almost certainly needs to be broken up
anyway.
-8) Respond to review comments.
-------------------------------
+8) Respond to review comments
+-----------------------------
Your patch will almost certainly get comments from reviewers on ways in
which the patch can be improved. You must respond to those comments;
@@ -382,8 +393,8 @@ reviewers sometimes get grumpy. Even in that case, though, respond
politely and address the problems they have pointed out.
-9) Don't get discouraged - or impatient.
-----------------------------------------
+9) Don't get discouraged - or impatient
+---------------------------------------
After you have submitted your change, be patient and wait. Reviewers are
busy people and may not get to your patch right away.
@@ -419,9 +430,10 @@ patch, which certifies that you wrote it or otherwise have the right to
pass it on as an open-source patch. The rules are pretty simple: if you
can certify the below:
- Developer's Certificate of Origin 1.1
+Developer's Certificate of Origin 1.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- By making a contribution to this project, I certify that:
+By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
@@ -445,7 +457,7 @@ can certify the below:
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
-then you just add a line saying
+then you just add a line saying::
Signed-off-by: Random J Developer <random@developer.example.org>
@@ -466,7 +478,7 @@ you add a line between the last Signed-off-by header and yours, indicating
the nature of your changes. While there is nothing mandatory about this, it
seems like prepending the description with your mail and/or name, all
enclosed in square brackets, is noticeable enough to make it obvious that
-you are responsible for last-minute changes. Example :
+you are responsible for last-minute changes. Example::
Signed-off-by: Random J Developer <random@developer.example.org>
[lucky@maintainer.example.org: struct foo moved from foo.c to foo.h]
@@ -481,15 +493,15 @@ which appears in the changelog.
Special note to back-porters: It seems to be a common and useful practice
to insert an indication of the origin of a patch at the top of the commit
message (just after the subject line) to facilitate tracking. For instance,
-here's what we see in a 3.x-stable release:
+here's what we see in a 3.x-stable release::
-Date: Tue Oct 7 07:26:38 2014 -0400
+ Date: Tue Oct 7 07:26:38 2014 -0400
libata: Un-break ATA blacklist
commit 1c40279960bcd7d52dbdf1d466b20d24b99176c8 upstream.
-And here's what might appear in an older kernel once a patch is backported:
+And here's what might appear in an older kernel once a patch is backported::
Date: Tue May 13 22:12:27 2008 +0200
@@ -529,7 +541,7 @@ When in doubt people should refer to the original discussion in the mailing
list archives.
If a person has had the opportunity to comment on a patch, but has not
-provided such comments, you may optionally add a "Cc:" tag to the patch.
+provided such comments, you may optionally add a ``Cc:`` tag to the patch.
This is the only tag which might be added without an explicit action by the
person it names - but it should indicate that this person was copied on the
patch. This tag documents that potentially interested parties
@@ -552,11 +564,12 @@ future patches, and ensures credit for the testers.
Reviewed-by:, instead, indicates that the patch has been reviewed and found
acceptable according to the Reviewer's Statement:
- Reviewer's statement of oversight
+Reviewer's statement of oversight
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- By offering my Reviewed-by: tag, I state that:
+By offering my Reviewed-by: tag, I state that:
- (a) I have carried out a technical review of this patch to
+ (a) I have carried out a technical review of this patch to
evaluate its appropriateness and readiness for inclusion into
the mainline kernel.
@@ -594,24 +607,25 @@ A Fixes: tag indicates that the patch fixes an issue in a previous commit. It
is used to make it easy to determine where a bug originated, which can help
review a bug fix. This tag also assists the stable kernel team in determining
which stable kernel versions should receive your fix. This is the preferred
-method for indicating a bug fixed by the patch. See #2 above for more details.
+method for indicating a bug fixed by the patch. See :ref:`describe_changes`
+for more details.
14) The canonical patch format
------------------------------
This section describes how the patch itself should be formatted. Note
-that, if you have your patches stored in a git repository, proper patch
-formatting can be had with "git format-patch". The tools cannot create
+that, if you have your patches stored in a ``git`` repository, proper patch
+formatting can be had with ``git format-patch``. The tools cannot create
the necessary text, though, so read the instructions below anyway.
-The canonical patch subject line is:
+The canonical patch subject line is::
Subject: [PATCH 001/123] subsystem: summary phrase
The canonical patch message body contains the following:
- - A "from" line specifying the patch author (only needed if the person
+ - A ``from`` line specifying the patch author (only needed if the person
sending the patch is not the author).
- An empty line.
@@ -619,46 +633,46 @@ The canonical patch message body contains the following:
- The body of the explanation, line wrapped at 75 columns, which will
be copied to the permanent changelog to describe this patch.
- - The "Signed-off-by:" lines, described above, which will
+ - The ``Signed-off-by:`` lines, described above, which will
also go in the changelog.
- - A marker line containing simply "---".
+ - A marker line containing simply ``---``.
- Any additional comments not suitable for the changelog.
- - The actual patch (diff output).
+ - The actual patch (``diff`` output).
The Subject line format makes it very easy to sort the emails
alphabetically by subject line - pretty much any email reader will
support that - since because the sequence number is zero-padded,
the numerical and alphabetic sort is the same.
-The "subsystem" in the email's Subject should identify which
+The ``subsystem`` in the email's Subject should identify which
area or subsystem of the kernel is being patched.
-The "summary phrase" in the email's Subject should concisely
-describe the patch which that email contains. The "summary
-phrase" should not be a filename. Do not use the same "summary
-phrase" for every patch in a whole patch series (where a "patch
-series" is an ordered sequence of multiple, related patches).
+The ``summary phrase`` in the email's Subject should concisely
+describe the patch which that email contains. The ``summary
+phrase`` should not be a filename. Do not use the same ``summary
+phrase`` for every patch in a whole patch series (where a ``patch
+series`` is an ordered sequence of multiple, related patches).
-Bear in mind that the "summary phrase" of your email becomes a
+Bear in mind that the ``summary phrase`` of your email becomes a
globally-unique identifier for that patch. It propagates all the way
-into the git changelog. The "summary phrase" may later be used in
+into the ``git`` changelog. The ``summary phrase`` may later be used in
developer discussions which refer to the patch. People will want to
-google for the "summary phrase" to read discussion regarding that
+google for the ``summary phrase`` to read discussion regarding that
patch. It will also be the only thing that people may quickly see
when, two or three months later, they are going through perhaps
-thousands of patches using tools such as "gitk" or "git log
---oneline".
+thousands of patches using tools such as ``gitk`` or ``git log
+--oneline``.
-For these reasons, the "summary" must be no more than 70-75
+For these reasons, the ``summary`` must be no more than 70-75
characters, and it must describe both what the patch changes, as well
as why the patch might be necessary. It is challenging to be both
succinct and descriptive, but that is what a well-written summary
should do.
-The "summary phrase" may be prefixed by tags enclosed in square
+The ``summary phrase`` may be prefixed by tags enclosed in square
brackets: "Subject: [PATCH <tag>...] <summary phrase>". The tags are
not considered part of the summary phrase, but describe how the patch
should be treated. Common tags might include a version descriptor if
@@ -670,19 +684,19 @@ that developers understand the order in which the patches should be
applied and that they have reviewed or applied all of the patches in
the patch series.
-A couple of example Subjects:
+A couple of example Subjects::
Subject: [PATCH 2/5] ext2: improve scalability of bitmap searching
Subject: [PATCH v2 01/27] x86: fix eflags tracking
-The "from" line must be the very first line in the message body,
+The ``from`` line must be the very first line in the message body,
and has the form:
From: Original Author <author@example.com>
-The "from" line specifies who will be credited as the author of the
-patch in the permanent changelog. If the "from" line is missing,
-then the "From:" line from the email header will be used to determine
+The ``from`` line specifies who will be credited as the author of the
+patch in the permanent changelog. If the ``from`` line is missing,
+then the ``From:`` line from the email header will be used to determine
the patch author in the changelog.
The explanation body will be committed to the permanent source
@@ -694,35 +708,37 @@ especially useful for people who might be searching the commit logs
looking for the applicable patch. If a patch fixes a compile failure,
it may not be necessary to include _all_ of the compile failures; just
enough that it is likely that someone searching for the patch can find
-it. As in the "summary phrase", it is important to be both succinct as
+it. As in the ``summary phrase``, it is important to be both succinct as
well as descriptive.
-The "---" marker line serves the essential purpose of marking for patch
+The ``---`` marker line serves the essential purpose of marking for patch
handling tools where the changelog message ends.
-One good use for the additional comments after the "---" marker is for
-a diffstat, to show what files have changed, and the number of
-inserted and deleted lines per file. A diffstat is especially useful
+One good use for the additional comments after the ``---`` marker is for
+a ``diffstat``, to show what files have changed, and the number of
+inserted and deleted lines per file. A ``diffstat`` is especially useful
on bigger patches. Other comments relevant only to the moment or the
maintainer, not suitable for the permanent changelog, should also go
-here. A good example of such comments might be "patch changelogs"
+here. A good example of such comments might be ``patch changelogs``
which describe what has changed between the v1 and v2 version of the
patch.
-If you are going to include a diffstat after the "---" marker, please
-use diffstat options "-p 1 -w 70" so that filenames are listed from
+If you are going to include a ``diffstat`` after the ``---`` marker, please
+use ``diffstat`` options ``-p 1 -w 70`` so that filenames are listed from
the top of the kernel source tree and don't use too much horizontal
-space (easily fit in 80 columns, maybe with some indentation). (git
+space (easily fit in 80 columns, maybe with some indentation). (``git``
generates appropriate diffstats by default.)
See more details on the proper patch format in the following
references.
+.. _explicit_in_reply_to:
+
15) Explicit In-Reply-To headers
--------------------------------
It can be helpful to manually add In-Reply-To: headers to a patch
-(e.g., when using "git send-email") to associate the patch with
+(e.g., when using ``git send-email``) to associate the patch with
previous relevant discussion, e.g. to link a bug fix to the email with
the bug report. However, for a multi-patch series, it is generally
best to avoid using In-Reply-To: to link to older versions of the
@@ -732,12 +748,12 @@ helpful, you can use the https://lkml.kernel.org/ redirector (e.g., in
the cover email text) to link to an earlier version of the patch series.
-16) Sending "git pull" requests
--------------------------------
+16) Sending ``git pull`` requests
+---------------------------------
If you have a series of patches, it may be most convenient to have the
maintainer pull them directly into the subsystem repository with a
-"git pull" operation. Note, however, that pulling patches from a developer
+``git pull`` operation. Note, however, that pulling patches from a developer
requires a higher degree of trust than taking patches from a mailing list.
As a result, many subsystem maintainers are reluctant to take pull
requests, especially from new, unknown developers. If in doubt you can use
@@ -746,7 +762,7 @@ series, giving the maintainer the option of using either.
A pull request should have [GIT] or [PULL] in the subject line. The
request itself should include the repository name and the branch of
-interest on a single line; it should look something like:
+interest on a single line; it should look something like::
Please pull from
@@ -755,10 +771,10 @@ interest on a single line; it should look something like:
to get these changes:
A pull request should also include an overall message saying what will be
-included in the request, a "git shortlog" listing of the patches
-themselves, and a diffstat showing the overall effect of the patch series.
+included in the request, a ``git shortlog`` listing of the patches
+themselves, and a ``diffstat`` showing the overall effect of the patch series.
The easiest way to get all this information together is, of course, to let
-git do it for you with the "git request-pull" command.
+``git`` do it for you with the ``git request-pull`` command.
Some maintainers (including Linus) want to see pull requests from signed
commits; that increases their confidence that the request actually came
@@ -770,8 +786,8 @@ signed by one or more core kernel developers. This step can be hard for
new developers, but there is no way around it. Attending conferences can
be a good way to find developers who can sign your key.
-Once you have prepared a patch series in git that you wish to have somebody
-pull, create a signed tag with "git tag -s". This will create a new tag
+Once you have prepared a patch series in ``git`` that you wish to have somebody
+pull, create a signed tag with ``git tag -s``. This will create a new tag
identifying the last commit in the series and containing a signature
created with your private key. You will also have the opportunity to add a
changelog-style message to the tag; this is an ideal place to describe the
@@ -782,14 +798,13 @@ are working from, don't forget to push the signed tag explicitly to the
public tree.
When generating your pull request, use the signed tag as the target. A
-command like this will do the trick:
+command like this will do the trick::
git request-pull master git://my.public.tree/linux.git my-signed-tag
-----------------------
-SECTION 2 - REFERENCES
-----------------------
+REFERENCES
+**********
Andrew Morton, "The perfect patch" (tpp).
<http://www.ozlabs.org/~akpm/stuff/tpp.txt>
@@ -799,23 +814,28 @@ Jeff Garzik, "Linux kernel patch submission format".
Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
<http://www.kroah.com/log/linux/maintainer.html>
+
<http://www.kroah.com/log/linux/maintainer-02.html>
+
<http://www.kroah.com/log/linux/maintainer-03.html>
+
<http://www.kroah.com/log/linux/maintainer-04.html>
+
<http://www.kroah.com/log/linux/maintainer-05.html>
+
<http://www.kroah.com/log/linux/maintainer-06.html>
NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
<https://lkml.org/lkml/2005/7/11/336>
Kernel Documentation/CodingStyle:
- <Documentation/CodingStyle>
+ :ref:`Documentation/CodingStyle <codingstyle>`
Linus Torvalds's mail on the canonical patch format:
<http://lkml.org/lkml/2005/4/7/183>
Andi Kleen, "On submitting kernel patches"
Some strategies to get difficult or controversial changes in.
+
http://halobates.de/on-submitting-patches.pdf
---
diff --git a/Documentation/applying-patches.txt b/Documentation/applying-patches.txt
index 77df55b0225a..02ce4924468e 100644
--- a/Documentation/applying-patches.txt
+++ b/Documentation/applying-patches.txt
@@ -1,9 +1,13 @@
+.. _applying_patches:
- Applying Patches To The Linux Kernel
- ------------------------------------
+Applying Patches To The Linux Kernel
+++++++++++++++++++++++++++++++++++++
- Original by: Jesper Juhl, August 2005
- Last update: 2006-01-05
+Original by:
+ Jesper Juhl, August 2005
+
+Last update:
+ 2016-09-14
A frequently asked question on the Linux Kernel Mailing List is how to apply
@@ -17,10 +21,12 @@ their specific patches) is also provided.
What is a patch?
----
- A patch is a small text document containing a delta of changes between two
-different versions of a source tree. Patches are created with the `diff'
+================
+
+A patch is a small text document containing a delta of changes between two
+different versions of a source tree. Patches are created with the ``diff``
program.
+
To correctly apply a patch you need to know what base it was generated from
and what new version the patch will change the source tree into. These
should both be present in the patch file metadata or be possible to deduce
@@ -28,8 +34,9 @@ from the filename.
How do I apply or revert a patch?
----
- You apply a patch with the `patch' program. The patch program reads a diff
+=================================
+
+You apply a patch with the ``patch`` program. The patch program reads a diff
(or patch) file and makes the changes to the source tree described in it.
Patches for the Linux kernel are generated relative to the parent directory
@@ -38,26 +45,33 @@ holding the kernel source dir.
This means that paths to files inside the patch file contain the name of the
kernel source directories it was generated against (or some other directory
names like "a/" and "b/").
+
Since this is unlikely to match the name of the kernel source dir on your
local machine (but is often useful info to see what version an otherwise
unlabeled patch was generated against) you should change into your kernel
source directory and then strip the first element of the path from filenames
-in the patch file when applying it (the -p1 argument to `patch' does this).
+in the patch file when applying it (the ``-p1`` argument to ``patch`` does
+this).
To revert a previously applied patch, use the -R argument to patch.
-So, if you applied a patch like this:
+So, if you applied a patch like this::
+
patch -p1 < ../patch-x.y.z
-You can revert (undo) it like this:
+You can revert (undo) it like this::
+
patch -R -p1 < ../patch-x.y.z
-How do I feed a patch/diff file to `patch'?
----
- This (as usual with Linux and other UNIX like operating systems) can be
+How do I feed a patch/diff file to ``patch``?
+=============================================
+
+This (as usual with Linux and other UNIX like operating systems) can be
done in several different ways.
+
In all the examples below I feed the file (in uncompressed form) to patch
-via stdin using the following syntax:
+via stdin using the following syntax::
+
patch -p1 < path/to/patch-x.y.z
If you just want to be able to follow the examples below and don't want to
@@ -65,35 +79,40 @@ know of more than one way to use patch, then you can stop reading this
section here.
Patch can also get the name of the file to use via the -i argument, like
-this:
+this::
+
patch -p1 -i path/to/patch-x.y.z
-If your patch file is compressed with gzip or bzip2 and you don't want to
+If your patch file is compressed with gzip or xz and you don't want to
uncompress it before applying it, then you can feed it to patch like this
-instead:
- zcat path/to/patch-x.y.z.gz | patch -p1
- bzcat path/to/patch-x.y.z.bz2 | patch -p1
+instead::
+
+ xzcat path/to/patch-x.y.z.xz | patch -p1
+ bzcat path/to/patch-x.y.z.gz | patch -p1
If you wish to uncompress the patch file by hand first before applying it
(what I assume you've done in the examples below), then you simply run
-gunzip or bunzip2 on the file -- like this:
+gunzip or xz on the file -- like this::
+
gunzip patch-x.y.z.gz
- bunzip2 patch-x.y.z.bz2
+ xz -d patch-x.y.z.xz
Which will leave you with a plain text patch-x.y.z file that you can feed to
-patch via stdin or the -i argument, as you prefer.
+patch via stdin or the ``-i`` argument, as you prefer.
-A few other nice arguments for patch are -s which causes patch to be silent
+A few other nice arguments for patch are ``-s`` which causes patch to be silent
except for errors which is nice to prevent errors from scrolling out of the
-screen too fast, and --dry-run which causes patch to just print a listing of
-what would happen, but doesn't actually make any changes. Finally --verbose
+screen too fast, and ``--dry-run`` which causes patch to just print a listing of
+what would happen, but doesn't actually make any changes. Finally ``--verbose``
tells patch to print more information about the work being done.
Common errors when patching
----
- When patch applies a patch file it attempts to verify the sanity of the
+===========================
+
+When patch applies a patch file it attempts to verify the sanity of the
file in different ways.
+
Checking that the file looks like a valid patch file and checking the code
around the bits being modified matches the context provided in the patch are
just two of the basic sanity checks patch does.
@@ -111,13 +130,13 @@ everything looks good it has just moved up or down a bit, and patch will
usually adjust the line numbers and apply the patch.
Whenever patch applies a patch that it had to modify a bit to make it fit
-it'll tell you about it by saying the patch applied with 'fuzz'.
+it'll tell you about it by saying the patch applied with **fuzz**.
You should be wary of such changes since even though patch probably got it
right it doesn't /always/ get it right, and the result will sometimes be
wrong.
When patch encounters a change that it can't fix up with fuzz it rejects it
-outright and leaves a file with a .rej extension (a reject file). You can
+outright and leaves a file with a ``.rej`` extension (a reject file). You can
read this file to see exactly what change couldn't be applied, so you can
go fix it up by hand if you wish.
@@ -132,43 +151,47 @@ to start with a fresh tree downloaded in full from kernel.org.
Let's look a bit more at some of the messages patch can produce.
-If patch stops and presents a "File to patch:" prompt, then patch could not
+If patch stops and presents a ``File to patch:`` prompt, then patch could not
find a file to be patched. Most likely you forgot to specify -p1 or you are
in the wrong directory. Less often, you'll find patches that need to be
-applied with -p0 instead of -p1 (reading the patch file should reveal if
+applied with ``-p0`` instead of ``-p1`` (reading the patch file should reveal if
this is the case -- if so, then this is an error by the person who created
the patch but is not fatal).
-If you get "Hunk #2 succeeded at 1887 with fuzz 2 (offset 7 lines)." or a
+If you get ``Hunk #2 succeeded at 1887 with fuzz 2 (offset 7 lines).`` or a
message similar to that, then it means that patch had to adjust the location
of the change (in this example it needed to move 7 lines from where it
expected to make the change to make it fit).
+
The resulting file may or may not be OK, depending on the reason the file
was different than expected.
+
This often happens if you try to apply a patch that was generated against a
different kernel version than the one you are trying to patch.
-If you get a message like "Hunk #3 FAILED at 2387.", then it means that the
+If you get a message like ``Hunk #3 FAILED at 2387.``, then it means that the
patch could not be applied correctly and the patch program was unable to
-fuzz its way through. This will generate a .rej file with the change that
-caused the patch to fail and also a .orig file showing you the original
+fuzz its way through. This will generate a ``.rej`` file with the change that
+caused the patch to fail and also a ``.orig`` file showing you the original
content that couldn't be changed.
-If you get "Reversed (or previously applied) patch detected! Assume -R? [n]"
+If you get ``Reversed (or previously applied) patch detected! Assume -R? [n]``
then patch detected that the change contained in the patch seems to have
already been made.
+
If you actually did apply this patch previously and you just re-applied it
in error, then just say [n]o and abort this patch. If you applied this patch
previously and actually intended to revert it, but forgot to specify -R,
-then you can say [y]es here to make patch revert it for you.
+then you can say [**y**]es here to make patch revert it for you.
+
This can also happen if the creator of the patch reversed the source and
destination directories when creating the patch, and in that case reverting
the patch will in fact apply it.
-A message similar to "patch: **** unexpected end of file in patch" or "patch
-unexpectedly ends in middle of line" means that patch could make no sense of
-the file you fed to it. Either your download is broken, you tried to feed
-patch a compressed patch file without uncompressing it first, or the patch
+A message similar to ``patch: **** unexpected end of file in patch`` or
+``patch unexpectedly ends in middle of line`` means that patch could make no
+sense of the file you fed to it. Either your download is broken, you tried to
+feed patch a compressed patch file without uncompressing it first, or the patch
file that you are using has been mangled by a mail client or mail transfer
agent along the way somewhere, e.g., by splitting a long line into two lines.
Often these warnings can easily be fixed by joining (concatenating) the
@@ -182,28 +205,32 @@ to start over with a fresh download of a full kernel tree and the patch you
wish to apply.
-Are there any alternatives to `patch'?
----
- Yes there are alternatives.
+Are there any alternatives to ``patch``?
+========================================
- You can use the `interdiff' program (http://cyberelk.net/tim/patchutils/) to
+
+Yes there are alternatives.
+
+You can use the ``interdiff`` program (http://cyberelk.net/tim/patchutils/) to
generate a patch representing the differences between two patches and then
apply the result.
-This will let you move from something like 2.6.12.2 to 2.6.12.3 in a single
+
+This will let you move from something like 4.7.2 to 4.7.3 in a single
step. The -z flag to interdiff will even let you feed it patches in gzip or
bzip2 compressed form directly without the use of zcat or bzcat or manual
decompression.
-Here's how you'd go from 2.6.12.2 to 2.6.12.3 in a single step:
- interdiff -z ../patch-2.6.12.2.bz2 ../patch-2.6.12.3.gz | patch -p1
+Here's how you'd go from 4.7.2 to 4.7.3 in a single step::
+
+ interdiff -z ../patch-4.7.2.gz ../patch-4.7.3.gz | patch -p1
Although interdiff may save you a step or two you are generally advised to
do the additional steps since interdiff can get things wrong in some cases.
- Another alternative is `ketchup', which is a python script for automatic
+Another alternative is ``ketchup``, which is a python script for automatic
downloading and applying of patches (http://www.selenic.com/ketchup/).
- Other nice tools are diffstat, which shows a summary of changes made by a
+Other nice tools are diffstat, which shows a summary of changes made by a
patch; lsdiff, which displays a short listing of affected files in a patch
file, along with (optionally) the line numbers of the start of each patch;
and grepdiff, which displays a list of the files modified by a patch where
@@ -211,99 +238,103 @@ the patch contains a given regular expression.
Where can I download the patches?
----
- The patches are available at http://kernel.org/
+=================================
+
+The patches are available at http://kernel.org/
Most recent patches are linked from the front page, but they also have
specific homes.
-The 2.6.x.y (-stable) and 2.6.x patches live at
- ftp://ftp.kernel.org/pub/linux/kernel/v2.6/
+The 4.x.y (-stable) and 4.x patches live at
-The -rc patches live at
- ftp://ftp.kernel.org/pub/linux/kernel/v2.6/testing/
+ ftp://ftp.kernel.org/pub/linux/kernel/v4.x/
-The -git patches live at
- ftp://ftp.kernel.org/pub/linux/kernel/v2.6/snapshots/
+The -rc patches live at
-The -mm kernels live at
- ftp://ftp.kernel.org/pub/linux/kernel/people/akpm/patches/2.6/
+ ftp://ftp.kernel.org/pub/linux/kernel/v4.x/testing/
-In place of ftp.kernel.org you can use ftp.cc.kernel.org, where cc is a
+In place of ``ftp.kernel.org`` you can use ``ftp.cc.kernel.org``, where cc is a
country code. This way you'll be downloading from a mirror site that's most
likely geographically closer to you, resulting in faster downloads for you,
less bandwidth used globally and less load on the main kernel.org servers --
these are good things, so do use mirrors when possible.
-The 2.6.x kernels
----
- These are the base stable releases released by Linus. The highest numbered
+The 4.x kernels
+===============
+
+These are the base stable releases released by Linus. The highest numbered
release is the most recent.
If regressions or other serious flaws are found, then a -stable fix patch
-will be released (see below) on top of this base. Once a new 2.6.x base
+will be released (see below) on top of this base. Once a new 4.x base
kernel is released, a patch is made available that is a delta between the
-previous 2.6.x kernel and the new one.
+previous 4.x kernel and the new one.
+
+To apply a patch moving from 4.6 to 4.7, you'd do the following (note
+that such patches do **NOT** apply on top of 4.x.y kernels but on top of the
+base 4.x kernel -- if you need to move from 4.x.y to 4.x+1 you need to
+first revert the 4.x.y patch).
+
+Here are some examples::
-To apply a patch moving from 2.6.11 to 2.6.12, you'd do the following (note
-that such patches do *NOT* apply on top of 2.6.x.y kernels but on top of the
-base 2.6.x kernel -- if you need to move from 2.6.x.y to 2.6.x+1 you need to
-first revert the 2.6.x.y patch).
+ # moving from 4.6 to 4.7
-Here are some examples:
+ $ cd ~/linux-4.6 # change to kernel source dir
+ $ patch -p1 < ../patch-4.7 # apply the 4.7 patch
+ $ cd ..
+ $ mv linux-4.6 linux-4.7 # rename source dir
-# moving from 2.6.11 to 2.6.12
-$ cd ~/linux-2.6.11 # change to kernel source dir
-$ patch -p1 < ../patch-2.6.12 # apply the 2.6.12 patch
-$ cd ..
-$ mv linux-2.6.11 linux-2.6.12 # rename source dir
+ # moving from 4.6.1 to 4.7
-# moving from 2.6.11.1 to 2.6.12
-$ cd ~/linux-2.6.11.1 # change to kernel source dir
-$ patch -p1 -R < ../patch-2.6.11.1 # revert the 2.6.11.1 patch
- # source dir is now 2.6.11
-$ patch -p1 < ../patch-2.6.12 # apply new 2.6.12 patch
-$ cd ..
-$ mv linux-2.6.11.1 linux-2.6.12 # rename source dir
+ $ cd ~/linux-4.6.1 # change to kernel source dir
+ $ patch -p1 -R < ../patch-4.6.1 # revert the 4.6.1 patch
+ # source dir is now 4.6
+ $ patch -p1 < ../patch-4.7 # apply new 4.7 patch
+ $ cd ..
+ $ mv linux-4.6.1 linux-4.7 # rename source dir
-The 2.6.x.y kernels
----
- Kernels with 4-digit versions are -stable kernels. They contain small(ish)
+The 4.x.y kernels
+=================
+
+Kernels with 3-digit versions are -stable kernels. They contain small(ish)
critical fixes for security problems or significant regressions discovered
-in a given 2.6.x kernel.
+in a given 4.x kernel.
This is the recommended branch for users who want the most recent stable
kernel and are not interested in helping test development/experimental
versions.
-If no 2.6.x.y kernel is available, then the highest numbered 2.6.x kernel is
+If no 4.x.y kernel is available, then the highest numbered 4.x kernel is
the current stable kernel.
- note: the -stable team usually do make incremental patches available as well
+.. note::
+
+ The -stable team usually do make incremental patches available as well
as patches against the latest mainline release, but I only cover the
non-incremental ones below. The incremental ones can be found at
- ftp://ftp.kernel.org/pub/linux/kernel/v2.6/incr/
+ ftp://ftp.kernel.org/pub/linux/kernel/v4.x/incr/
-These patches are not incremental, meaning that for example the 2.6.12.3
-patch does not apply on top of the 2.6.12.2 kernel source, but rather on top
-of the base 2.6.12 kernel source .
-So, in order to apply the 2.6.12.3 patch to your existing 2.6.12.2 kernel
-source you have to first back out the 2.6.12.2 patch (so you are left with a
-base 2.6.12 kernel source) and then apply the new 2.6.12.3 patch.
+These patches are not incremental, meaning that for example the 4.7.3
+patch does not apply on top of the 4.7.2 kernel source, but rather on top
+of the base 4.7 kernel source.
-Here's a small example:
+So, in order to apply the 4.7.3 patch to your existing 4.7.2 kernel
+source you have to first back out the 4.7.2 patch (so you are left with a
+base 4.7 kernel source) and then apply the new 4.7.3 patch.
-$ cd ~/linux-2.6.12.2 # change into the kernel source dir
-$ patch -p1 -R < ../patch-2.6.12.2 # revert the 2.6.12.2 patch
-$ patch -p1 < ../patch-2.6.12.3 # apply the new 2.6.12.3 patch
-$ cd ..
-$ mv linux-2.6.12.2 linux-2.6.12.3 # rename the kernel source dir
+Here's a small example::
+ $ cd ~/linux-4.7.2 # change to the kernel source dir
+ $ patch -p1 -R < ../patch-4.7.2 # revert the 4.7.2 patch
+ $ patch -p1 < ../patch-4.7.3 # apply the new 4.7.3 patch
+ $ cd ..
+ $ mv linux-4.7.2 linux-4.7.3 # rename the kernel source dir
The -rc kernels
----
- These are release-candidate kernels. These are development kernels released
+===============
+
+These are release-candidate kernels. These are development kernels released
by Linus whenever he deems the current git (the kernel's source management
tool) tree to be in a reasonably sane state adequate for testing.
@@ -317,39 +348,44 @@ This is a good branch to run for people who want to help out testing
development kernels but do not want to run some of the really experimental
stuff (such people should see the sections about -git and -mm kernels below).
-The -rc patches are not incremental, they apply to a base 2.6.x kernel, just
-like the 2.6.x.y patches described above. The kernel version before the -rcN
+The -rc patches are not incremental, they apply to a base 4.x kernel, just
+like the 4.x.y patches described above. The kernel version before the -rcN
suffix denotes the version of the kernel that this -rc kernel will eventually
turn into.
-So, 2.6.13-rc5 means that this is the fifth release candidate for the 2.6.13
-kernel and the patch should be applied on top of the 2.6.12 kernel source.
-Here are 3 examples of how to apply these patches:
+So, 4.8-rc5 means that this is the fifth release candidate for the 4.8
+kernel and the patch should be applied on top of the 4.7 kernel source.
-# first an example of moving from 2.6.12 to 2.6.13-rc3
-$ cd ~/linux-2.6.12 # change into the 2.6.12 source dir
-$ patch -p1 < ../patch-2.6.13-rc3 # apply the 2.6.13-rc3 patch
-$ cd ..
-$ mv linux-2.6.12 linux-2.6.13-rc3 # rename the source dir
+Here are 3 examples of how to apply these patches::
-# now let's move from 2.6.13-rc3 to 2.6.13-rc5
-$ cd ~/linux-2.6.13-rc3 # change into the 2.6.13-rc3 dir
-$ patch -p1 -R < ../patch-2.6.13-rc3 # revert the 2.6.13-rc3 patch
-$ patch -p1 < ../patch-2.6.13-rc5 # apply the new 2.6.13-rc5 patch
-$ cd ..
-$ mv linux-2.6.13-rc3 linux-2.6.13-rc5 # rename the source dir
+ # first an example of moving from 4.7 to 4.8-rc3
-# finally let's try and move from 2.6.12.3 to 2.6.13-rc5
-$ cd ~/linux-2.6.12.3 # change to the kernel source dir
-$ patch -p1 -R < ../patch-2.6.12.3 # revert the 2.6.12.3 patch
-$ patch -p1 < ../patch-2.6.13-rc5 # apply new 2.6.13-rc5 patch
-$ cd ..
-$ mv linux-2.6.12.3 linux-2.6.13-rc5 # rename the kernel source dir
+ $ cd ~/linux-4.7 # change to the 4.7 source dir
+ $ patch -p1 < ../patch-4.8-rc3 # apply the 4.8-rc3 patch
+ $ cd ..
+ $ mv linux-4.7 linux-4.8-rc3 # rename the source dir
+
+ # now let's move from 4.8-rc3 to 4.8-rc5
+
+ $ cd ~/linux-4.8-rc3 # change to the 4.8-rc3 dir
+ $ patch -p1 -R < ../patch-4.8-rc3 # revert the 4.8-rc3 patch
+ $ patch -p1 < ../patch-4.8-rc5 # apply the new 4.8-rc5 patch
+ $ cd ..
+ $ mv linux-4.8-rc3 linux-4.8-rc5 # rename the source dir
+
+ # finally let's try and move from 4.7.3 to 4.8-rc5
+
+ $ cd ~/linux-4.7.3 # change to the kernel source dir
+ $ patch -p1 -R < ../patch-4.7.3 # revert the 4.7.3 patch
+ $ patch -p1 < ../patch-4.8-rc5 # apply new 4.8-rc5 patch
+ $ cd ..
+ $ mv linux-4.7.3 linux-4.8-rc5 # rename the kernel source dir
The -git kernels
----
- These are daily snapshots of Linus' kernel tree (managed in a git
+================
+
+These are daily snapshots of Linus' kernel tree (managed in a git
repository, hence the name).
These patches are usually released daily and represent the current state of
@@ -357,91 +393,66 @@ Linus's tree. They are more experimental than -rc kernels since they are
generated automatically without even a cursory glance to see if they are
sane.
--git patches are not incremental and apply either to a base 2.6.x kernel or
-a base 2.6.x-rc kernel -- you can see which from their name.
-A patch named 2.6.12-git1 applies to the 2.6.12 kernel source and a patch
-named 2.6.13-rc3-git2 applies to the source of the 2.6.13-rc3 kernel.
-
-Here are some examples of how to apply these patches:
-
-# moving from 2.6.12 to 2.6.12-git1
-$ cd ~/linux-2.6.12 # change to the kernel source dir
-$ patch -p1 < ../patch-2.6.12-git1 # apply the 2.6.12-git1 patch
-$ cd ..
-$ mv linux-2.6.12 linux-2.6.12-git1 # rename the kernel source dir
-
-# moving from 2.6.12-git1 to 2.6.13-rc2-git3
-$ cd ~/linux-2.6.12-git1 # change to the kernel source dir
-$ patch -p1 -R < ../patch-2.6.12-git1 # revert the 2.6.12-git1 patch
- # we now have a 2.6.12 kernel
-$ patch -p1 < ../patch-2.6.13-rc2 # apply the 2.6.13-rc2 patch
- # the kernel is now 2.6.13-rc2
-$ patch -p1 < ../patch-2.6.13-rc2-git3 # apply the 2.6.13-rc2-git3 patch
- # the kernel is now 2.6.13-rc2-git3
-$ cd ..
-$ mv linux-2.6.12-git1 linux-2.6.13-rc2-git3 # rename source dir
-
-
-The -mm kernels
----
- These are experimental kernels released by Andrew Morton.
-
-The -mm tree serves as a sort of proving ground for new features and other
-experimental patches.
-Once a patch has proved its worth in -mm for a while Andrew pushes it on to
-Linus for inclusion in mainline.
-
-Although it's encouraged that patches flow to Linus via the -mm tree, this
-is not always enforced.
-Subsystem maintainers (or individuals) sometimes push their patches directly
-to Linus, even though (or after) they have been merged and tested in -mm (or
-sometimes even without prior testing in -mm).
-
-You should generally strive to get your patches into mainline via -mm to
-ensure maximum testing.
-
-This branch is in constant flux and contains many experimental features, a
+-git patches are not incremental and apply either to a base 4.x kernel or
+a base 4.x-rc kernel -- you can see which from their name.
+A patch named 4.7-git1 applies to the 4.7 kernel source and a patch
+named 4.8-rc3-git2 applies to the source of the 4.8-rc3 kernel.
+
+Here are some examples of how to apply these patches::
+
+ # moving from 4.7 to 4.7-git1
+
+ $ cd ~/linux-4.7 # change to the kernel source dir
+ $ patch -p1 < ../patch-4.7-git1 # apply the 4.7-git1 patch
+ $ cd ..
+ $ mv linux-4.7 linux-4.7-git1 # rename the kernel source dir
+
+ # moving from 4.7-git1 to 4.8-rc2-git3
+
+ $ cd ~/linux-4.7-git1 # change to the kernel source dir
+ $ patch -p1 -R < ../patch-4.7-git1 # revert the 4.7-git1 patch
+ # we now have a 4.7 kernel
+ $ patch -p1 < ../patch-4.8-rc2 # apply the 4.8-rc2 patch
+ # the kernel is now 4.8-rc2
+ $ patch -p1 < ../patch-4.8-rc2-git3 # apply the 4.8-rc2-git3 patch
+ # the kernel is now 4.8-rc2-git3
+ $ cd ..
+ $ mv linux-4.7-git1 linux-4.8-rc2-git3 # rename source dir
+
+
+The -mm patches and the linux-next tree
+=======================================
+
+The -mm patches are experimental patches released by Andrew Morton.
+
+In the past, -mm tree were used to also test subsystem patches, but this
+function is now done via the
+:ref:`linux-next <https://www.kernel.org/doc/man-pages/linux-next.html>`
+tree. The Subsystem maintainers push their patches first to linux-next,
+and, during the merge window, sends them directly to Linus.
+
+The -mm patches serve as a sort of proving ground for new features and other
+experimental patches that aren't merged via a subsystem tree.
+Once such patches has proved its worth in -mm for a while Andrew pushes
+it on to Linus for inclusion in mainline.
+
+The linux-next tree is daily updated, and includes the -mm patches.
+Both are in constant flux and contains many experimental features, a
lot of debugging patches not appropriate for mainline etc., and is the most
experimental of the branches described in this document.
-These kernels are not appropriate for use on systems that are supposed to be
+These patches are not appropriate for use on systems that are supposed to be
stable and they are more risky to run than any of the other branches (make
sure you have up-to-date backups -- that goes for any experimental kernel but
-even more so for -mm kernels).
-
-These kernels in addition to all the other experimental patches they contain
-usually also contain any changes in the mainline -git kernels available at
-the time of release.
-
-Testing of -mm kernels is greatly appreciated since the whole point of the
-tree is to weed out regressions, crashes, data corruption bugs, build
-breakage (and any other bug in general) before changes are merged into the
-more stable mainline Linus tree.
-But testers of -mm should be aware that breakage in this tree is more common
-than in any other tree.
-
-The -mm kernels are not released on a fixed schedule, but usually a few -mm
-kernels are released in between each -rc kernel (1 to 3 is common).
-The -mm kernels apply to either a base 2.6.x kernel (when no -rc kernels
-have been released yet) or to a Linus -rc kernel.
-
-Here are some examples of applying the -mm patches:
-
-# moving from 2.6.12 to 2.6.12-mm1
-$ cd ~/linux-2.6.12 # change to the 2.6.12 source dir
-$ patch -p1 < ../2.6.12-mm1 # apply the 2.6.12-mm1 patch
-$ cd ..
-$ mv linux-2.6.12 linux-2.6.12-mm1 # rename the source appropriately
-
-# moving from 2.6.12-mm1 to 2.6.13-rc3-mm3
-$ cd ~/linux-2.6.12-mm1
-$ patch -p1 -R < ../2.6.12-mm1 # revert the 2.6.12-mm1 patch
- # we now have a 2.6.12 source
-$ patch -p1 < ../patch-2.6.13-rc3 # apply the 2.6.13-rc3 patch
- # we now have a 2.6.13-rc3 source
-$ patch -p1 < ../2.6.13-rc3-mm3 # apply the 2.6.13-rc3-mm3 patch
-$ cd ..
-$ mv linux-2.6.12-mm1 linux-2.6.13-rc3-mm3 # rename the source dir
+even more so for -mm patches or using a Kernel from the linux-next tree).
+
+Testing of -mm patches and linux-next is greatly appreciated since the whole
+point of those are to weed out regressions, crashes, data corruption bugs,
+build breakage (and any other bug in general) before changes are merged into
+the more stable mainline Linus tree.
+
+But testers of -mm and linux-next should be aware that breakages are
+more common than in any other tree.
This concludes this list of explanations of the various kernel trees.
diff --git a/Documentation/arm/sunxi/README b/Documentation/arm/sunxi/README
index e5a115f24471..cd0243302bc1 100644
--- a/Documentation/arm/sunxi/README
+++ b/Documentation/arm/sunxi/README
@@ -31,6 +31,8 @@ SunXi family
+ User Manual
http://dl.linux-sunxi.org/A13/A13%20User%20Manual%20-%20v1.2%20%282013-01-08%29.pdf
+ - Next Thing Co GR8 (sun5i)
+
* Dual ARM Cortex-A7 based SoCs
- Allwinner A20 (sun7i)
+ User Manual
@@ -73,4 +75,13 @@ SunXi family
* Octa ARM Cortex-A7 based SoCs
- Allwinner A83T
+ Datasheet
- http://dl.linux-sunxi.org/A83T/A83T_datasheet_Revision_1.1.pdf
+ https://github.com/allwinner-zh/documents/raw/master/A83T/A83T_Datasheet_v1.3_20150510.pdf
+ + User Manual
+ https://github.com/allwinner-zh/documents/raw/master/A83T/A83T_User_Manual_v1.5.1_20150513.pdf
+
+ * Quad ARM Cortex-A53 based SoCs
+ - Allwinner A64
+ + Datasheet
+ http://dl.linux-sunxi.org/A64/A64_Datasheet_V1.1.pdf
+ + User Manual
+ http://dl.linux-sunxi.org/A64/Allwinner%20A64%20User%20Manual%20v1.0.pdf
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index bcdb2b4c1f12..918e1e0d0e78 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -115,7 +115,7 @@ i. Per-queue limits/values exported to the generic layer by the driver
Various parameters that the generic i/o scheduler logic uses are set at
a per-queue level (e.g maximum request size, maximum number of segments in
-a scatter-gather list, hardsect size)
+a scatter-gather list, logical block size)
Some parameters that were earlier available as global arrays indexed by
major/minor are now directly associated with the queue. Some of these may
@@ -156,7 +156,7 @@ Some new queue property settings:
blk_queue_max_segment_size(q, max_seg_size)
Maximum size of a clustered segment, 64kB default.
- blk_queue_hardsect_size(q, hardsect_size)
+ blk_queue_logical_block_size(q, logical_block_size)
Lowest possible sector size that the hardware can operate
on, 512 bytes default.
diff --git a/Documentation/clk.txt b/Documentation/clk.txt
index 5c4bc4d01d0c..22f026aa2f34 100644
--- a/Documentation/clk.txt
+++ b/Documentation/clk.txt
@@ -31,24 +31,25 @@ serve as a convenient shorthand for the implementation of the
hardware-specific bits for the hypothetical "foo" hardware.
Tying the two halves of this interface together is struct clk_hw, which
-is defined in struct clk_foo and pointed to within struct clk. This
+is defined in struct clk_foo and pointed to within struct clk_core. This
allows for easy navigation between the two discrete halves of the common
clock interface.
Part 2 - common data structures and api
-Below is the common struct clk definition from
-include/linux/clk-private.h, modified for brevity:
+Below is the common struct clk_core definition from
+drivers/clk/clk.c, modified for brevity:
- struct clk {
+ struct clk_core {
const char *name;
const struct clk_ops *ops;
struct clk_hw *hw;
- char **parent_names;
- struct clk **parents;
- struct clk *parent;
- struct hlist_head children;
- struct hlist_node child_node;
+ struct module *owner;
+ struct clk_core *parent;
+ const char **parent_names;
+ struct clk_core **parents;
+ u8 num_parents;
+ u8 new_parent_index;
...
};
@@ -56,16 +57,19 @@ The members above make up the core of the clk tree topology. The clk
api itself defines several driver-facing functions which operate on
struct clk. That api is documented in include/linux/clk.h.
-Platforms and devices utilizing the common struct clk use the struct
-clk_ops pointer in struct clk to perform the hardware-specific parts of
-the operations defined in clk.h:
+Platforms and devices utilizing the common struct clk_core use the struct
+clk_ops pointer in struct clk_core to perform the hardware-specific parts of
+the operations defined in clk-provider.h:
struct clk_ops {
int (*prepare)(struct clk_hw *hw);
void (*unprepare)(struct clk_hw *hw);
+ int (*is_prepared)(struct clk_hw *hw);
+ void (*unprepare_unused)(struct clk_hw *hw);
int (*enable)(struct clk_hw *hw);
void (*disable)(struct clk_hw *hw);
int (*is_enabled)(struct clk_hw *hw);
+ void (*disable_unused)(struct clk_hw *hw);
unsigned long (*recalc_rate)(struct clk_hw *hw,
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw,
@@ -84,6 +88,8 @@ the operations defined in clk.h:
u8 index);
unsigned long (*recalc_accuracy)(struct clk_hw *hw,
unsigned long parent_accuracy);
+ int (*get_phase)(struct clk_hw *hw);
+ int (*set_phase)(struct clk_hw *hw, int degrees);
void (*init)(struct clk_hw *hw);
int (*debug_init)(struct clk_hw *hw,
struct dentry *dentry);
@@ -91,7 +97,7 @@ the operations defined in clk.h:
Part 3 - hardware clk implementations
-The strength of the common struct clk comes from its .ops and .hw pointers
+The strength of the common struct clk_core comes from its .ops and .hw pointers
which abstract the details of struct clk from the hardware-specific bits, and
vice versa. To illustrate consider the simple gateable clk implementation in
drivers/clk/clk-gate.c:
@@ -107,7 +113,7 @@ struct clk_gate contains struct clk_hw hw as well as hardware-specific
knowledge about which register and bit controls this clk's gating.
Nothing about clock topology or accounting, such as enable_count or
notifier_count, is needed here. That is all handled by the common
-framework code and struct clk.
+framework code and struct clk_core.
Let's walk through enabling this clk from driver code:
@@ -139,22 +145,18 @@ static void clk_gate_set_bit(struct clk_gate *gate)
Note that to_clk_gate is defined as:
-#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, clk)
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
This pattern of abstraction is used for every clock hardware
representation.
Part 4 - supporting your own clk hardware
-When implementing support for a new type of clock it only necessary to
+When implementing support for a new type of clock it is only necessary to
include the following header:
#include <linux/clk-provider.h>
-include/linux/clk.h is included within that header and clk-private.h
-must never be included from the code which implements the operations for
-a clock. More on that below in Part 5.
-
To construct a clk hardware structure for your platform you must define
the following:
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 106ae9c740b9..0cc8765d3f98 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -14,11 +14,17 @@
import sys
import os
+import sphinx
+
+# Get Sphinx version
+major, minor, patch = map(int, sphinx.__version__.split("."))
+
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinx'))
+from load_config import loadConfig
# -- General configuration ------------------------------------------------
@@ -28,14 +34,13 @@ sys.path.insert(0, os.path.abspath('sphinx'))
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['kernel-doc', 'rstFlatTable', 'kernel_include']
+extensions = ['kernel-doc', 'rstFlatTable', 'kernel_include', 'cdomain']
-# Gracefully handle missing rst2pdf.
-try:
- import rst2pdf
- extensions += ['rst2pdf.pdfbuilder']
-except ImportError:
- pass
+# The name of the math extension changed on Sphinx 1.4
+if minor > 3:
+ extensions.append("sphinx.ext.imgmath")
+else:
+ extensions.append("sphinx.ext.pngmath")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -252,23 +257,90 @@ htmlhelp_basename = 'TheLinuxKerneldoc'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
+'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+'pointsize': '8pt',
# Latex figure (float) alignment
#'figure_align': 'htbp',
+
+# Don't mangle with UTF-8 chars
+'inputenc': '',
+'utf8extra': '',
+
+# Additional stuff for the LaTeX preamble.
+ 'preamble': '''
+ % Adjust margins
+ \\usepackage[margin=0.5in, top=1in, bottom=1in]{geometry}
+
+ % Allow generate some pages in landscape
+ \\usepackage{lscape}
+
+ % Put notes in color and let them be inside a table
+ \\definecolor{NoteColor}{RGB}{204,255,255}
+ \\definecolor{WarningColor}{RGB}{255,204,204}
+ \\definecolor{AttentionColor}{RGB}{255,255,204}
+ \\definecolor{OtherColor}{RGB}{204,204,204}
+ \\newlength{\\mynoticelength}
+ \\makeatletter\\newenvironment{coloredbox}[1]{%
+ \\setlength{\\fboxrule}{1pt}
+ \\setlength{\\fboxsep}{7pt}
+ \\setlength{\\mynoticelength}{\\linewidth}
+ \\addtolength{\\mynoticelength}{-2\\fboxsep}
+ \\addtolength{\\mynoticelength}{-2\\fboxrule}
+ \\begin{lrbox}{\\@tempboxa}\\begin{minipage}{\\mynoticelength}}{\\end{minipage}\\end{lrbox}%
+ \\ifthenelse%
+ {\\equal{\\py@noticetype}{note}}%
+ {\\colorbox{NoteColor}{\\usebox{\\@tempboxa}}}%
+ {%
+ \\ifthenelse%
+ {\\equal{\\py@noticetype}{warning}}%
+ {\\colorbox{WarningColor}{\\usebox{\\@tempboxa}}}%
+ {%
+ \\ifthenelse%
+ {\\equal{\\py@noticetype}{attention}}%
+ {\\colorbox{AttentionColor}{\\usebox{\\@tempboxa}}}%
+ {\\colorbox{OtherColor}{\\usebox{\\@tempboxa}}}%
+ }%
+ }%
+ }\\makeatother
+
+ \\makeatletter
+ \\renewenvironment{notice}[2]{%
+ \\def\\py@noticetype{#1}
+ \\begin{coloredbox}{#1}
+ \\bf\\it
+ \\par\\strong{#2}
+ \\csname py@noticestart@#1\\endcsname
+ }
+ {
+ \\csname py@noticeend@\\py@noticetype\\endcsname
+ \\end{coloredbox}
+ }
+ \\makeatother
+
+ % Use some font with UTF-8 support with XeLaTeX
+ \\usepackage{fontspec}
+ \\setsansfont{DejaVu Serif}
+ \\setromanfont{DejaVu Sans}
+ \\setmonofont{DejaVu Sans Mono}
+
+ % To allow adjusting table sizes
+ \\usepackage{adjustbox}
+
+ '''
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'TheLinuxKernel.tex', 'The Linux Kernel Documentation',
+ ('kernel-documentation', 'kernel-documentation.tex', 'The Linux Kernel Documentation',
+ 'The kernel development community', 'manual'),
+ ('development-process/index', 'development-process.tex', 'Linux Kernel Development Documentation',
+ 'The kernel development community', 'manual'),
+ ('gpu/index', 'gpu.tex', 'Linux GPU Driver Developer\'s Guide',
'The kernel development community', 'manual'),
]
@@ -419,3 +491,9 @@ pdf_documents = [
# line arguments.
kerneldoc_bin = '../scripts/kernel-doc'
kerneldoc_srctree = '..'
+
+# ------------------------------------------------------------------------------
+# Since loadConfig overwrites settings from the global namespace, it has to be
+# the last statement in the conf.py file
+# ------------------------------------------------------------------------------
+loadConfig(globals())
diff --git a/Documentation/coccinelle.txt b/Documentation/dev-tools/coccinelle.rst
index 01fb1dae3163..4a64b4c69d3f 100644
--- a/Documentation/coccinelle.txt
+++ b/Documentation/dev-tools/coccinelle.rst
@@ -1,10 +1,18 @@
-Copyright 2010 Nicolas Palix <npalix@diku.dk>
-Copyright 2010 Julia Lawall <julia@diku.dk>
-Copyright 2010 Gilles Muller <Gilles.Muller@lip6.fr>
+.. Copyright 2010 Nicolas Palix <npalix@diku.dk>
+.. Copyright 2010 Julia Lawall <julia@diku.dk>
+.. Copyright 2010 Gilles Muller <Gilles.Muller@lip6.fr>
+.. highlight:: none
- Getting Coccinelle
-~~~~~~~~~~~~~~~~~~~~
+Coccinelle
+==========
+
+Coccinelle is a tool for pattern matching and text transformation that has
+many uses in kernel development, including the application of complex,
+tree-wide patches and detection of problematic programming patterns.
+
+Getting Coccinelle
+-------------------
The semantic patches included in the kernel use features and options
which are provided by Coccinelle version 1.0.0-rc11 and above.
@@ -22,24 +30,23 @@ of many distributions, e.g. :
- NetBSD
- FreeBSD
-
You can get the latest version released from the Coccinelle homepage at
http://coccinelle.lip6.fr/
Information and tips about Coccinelle are also provided on the wiki
pages at http://cocci.ekstranet.diku.dk/wiki/doku.php
-Once you have it, run the following command:
+Once you have it, run the following command::
./configure
make
-as a regular user, and install it with
+as a regular user, and install it with::
sudo make install
- Supplemental documentation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Supplemental documentation
+---------------------------
For supplemental documentation refer to the wiki:
@@ -47,49 +54,52 @@ https://bottest.wiki.kernel.org/coccicheck
The wiki documentation always refers to the linux-next version of the script.
- Using Coccinelle on the Linux kernel
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Using Coccinelle on the Linux kernel
+------------------------------------
A Coccinelle-specific target is defined in the top level
-Makefile. This target is named 'coccicheck' and calls the 'coccicheck'
-front-end in the 'scripts' directory.
+Makefile. This target is named ``coccicheck`` and calls the ``coccicheck``
+front-end in the ``scripts`` directory.
-Four basic modes are defined: patch, report, context, and org. The mode to
-use is specified by setting the MODE variable with 'MODE=<mode>'.
+Four basic modes are defined: ``patch``, ``report``, ``context``, and
+``org``. The mode to use is specified by setting the MODE variable with
+``MODE=<mode>``.
-'patch' proposes a fix, when possible.
+- ``patch`` proposes a fix, when possible.
-'report' generates a list in the following format:
+- ``report`` generates a list in the following format:
file:line:column-column: message
-'context' highlights lines of interest and their context in a
-diff-like style.Lines of interest are indicated with '-'.
+- ``context`` highlights lines of interest and their context in a
+ diff-like style.Lines of interest are indicated with ``-``.
-'org' generates a report in the Org mode format of Emacs.
+- ``org`` generates a report in the Org mode format of Emacs.
Note that not all semantic patches implement all modes. For easy use
of Coccinelle, the default mode is "report".
Two other modes provide some common combinations of these modes.
-'chain' tries the previous modes in the order above until one succeeds.
+- ``chain`` tries the previous modes in the order above until one succeeds.
+
+- ``rep+ctxt`` runs successively the report mode and the context mode.
+ It should be used with the C option (described later)
+ which checks the code on a file basis.
-'rep+ctxt' runs successively the report mode and the context mode.
- It should be used with the C option (described later)
- which checks the code on a file basis.
+Examples
+~~~~~~~~
-Examples:
- To make a report for every semantic patch, run the following command:
+To make a report for every semantic patch, run the following command::
make coccicheck MODE=report
- To produce patches, run:
+To produce patches, run::
make coccicheck MODE=patch
The coccicheck target applies every semantic patch available in the
-sub-directories of 'scripts/coccinelle' to the entire Linux kernel.
+sub-directories of ``scripts/coccinelle`` to the entire Linux kernel.
For each semantic patch, a commit message is proposed. It gives a
description of the problem being checked by the semantic patch, and
@@ -99,15 +109,15 @@ As any static code analyzer, Coccinelle produces false
positives. Thus, reports must be carefully checked, and patches
reviewed.
-To enable verbose messages set the V= variable, for example:
+To enable verbose messages set the V= variable, for example::
make coccicheck MODE=report V=1
- Coccinelle parallelization
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Coccinelle parallelization
+---------------------------
By default, coccicheck tries to run as parallel as possible. To change
-the parallelism, set the J= variable. For example, to run across 4 CPUs:
+the parallelism, set the J= variable. For example, to run across 4 CPUs::
make coccicheck MODE=report J=4
@@ -115,44 +125,47 @@ As of Coccinelle 1.0.2 Coccinelle uses Ocaml parmap for parallelization,
if support for this is detected you will benefit from parmap parallelization.
When parmap is enabled coccicheck will enable dynamic load balancing by using
-'--chunksize 1' argument, this ensures we keep feeding threads with work
+``--chunksize 1`` argument, this ensures we keep feeding threads with work
one by one, so that we avoid the situation where most work gets done by only
a few threads. With dynamic load balancing, if a thread finishes early we keep
feeding it more work.
When parmap is enabled, if an error occurs in Coccinelle, this error
-value is propagated back, the return value of the 'make coccicheck'
+value is propagated back, the return value of the ``make coccicheck``
captures this return value.
- Using Coccinelle with a single semantic patch
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Using Coccinelle with a single semantic patch
+---------------------------------------------
The optional make variable COCCI can be used to check a single
semantic patch. In that case, the variable must be initialized with
the name of the semantic patch to apply.
-For instance:
+For instance::
make coccicheck COCCI=<my_SP.cocci> MODE=patch
-or
+
+or::
+
make coccicheck COCCI=<my_SP.cocci> MODE=report
- Controlling Which Files are Processed by Coccinelle
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Controlling Which Files are Processed by Coccinelle
+---------------------------------------------------
+
By default the entire kernel source tree is checked.
-To apply Coccinelle to a specific directory, M= can be used.
-For example, to check drivers/net/wireless/ one may write:
+To apply Coccinelle to a specific directory, ``M=`` can be used.
+For example, to check drivers/net/wireless/ one may write::
make coccicheck M=drivers/net/wireless/
To apply Coccinelle on a file basis, instead of a directory basis, the
-following command may be used:
+following command may be used::
make C=1 CHECK="scripts/coccicheck"
-To check only newly edited code, use the value 2 for the C flag, i.e.
+To check only newly edited code, use the value 2 for the C flag, i.e.::
make C=2 CHECK="scripts/coccicheck"
@@ -166,8 +179,8 @@ semantic patch as shown in the previous section.
The "report" mode is the default. You can select another one with the
MODE variable explained above.
- Debugging Coccinelle SmPL patches
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Debugging Coccinelle SmPL patches
+---------------------------------
Using coccicheck is best as it provides in the spatch command line
include options matching the options used when we compile the kernel.
@@ -177,8 +190,8 @@ manually run Coccinelle with debug options added.
Alternatively you can debug running Coccinelle against SmPL patches
by asking for stderr to be redirected to stderr, by default stderr
is redirected to /dev/null, if you'd like to capture stderr you
-can specify the DEBUG_FILE="file.txt" option to coccicheck. For
-instance:
+can specify the ``DEBUG_FILE="file.txt"`` option to coccicheck. For
+instance::
rm -f cocci.err
make coccicheck COCCI=scripts/coccinelle/free/kfree.cocci MODE=report DEBUG_FILE=cocci.err
@@ -186,7 +199,7 @@ instance:
You can use SPFLAGS to add debugging flags, for instance you may want to
add both --profile --show-trying to SPFLAGS when debugging. For instance
-you may want to use:
+you may want to use::
rm -f err.log
export COCCI=scripts/coccinelle/misc/irqf_oneshot.cocci
@@ -198,24 +211,24 @@ work.
DEBUG_FILE support is only supported when using coccinelle >= 1.2.
- .cocciconfig support
-~~~~~~~~~~~~~~~~~~~~~~
+.cocciconfig support
+--------------------
Coccinelle supports reading .cocciconfig for default Coccinelle options that
should be used every time spatch is spawned, the order of precedence for
variables for .cocciconfig is as follows:
- o Your current user's home directory is processed first
- o Your directory from which spatch is called is processed next
- o The directory provided with the --dir option is processed last, if used
+- Your current user's home directory is processed first
+- Your directory from which spatch is called is processed next
+- The directory provided with the --dir option is processed last, if used
Since coccicheck runs through make, it naturally runs from the kernel
proper dir, as such the second rule above would be implied for picking up a
-.cocciconfig when using 'make coccicheck'.
+.cocciconfig when using ``make coccicheck``.
-'make coccicheck' also supports using M= targets.If you do not supply
+``make coccicheck`` also supports using M= targets.If you do not supply
any M= target, it is assumed you want to target the entire kernel.
-The kernel coccicheck script has:
+The kernel coccicheck script has::
if [ "$KBUILD_EXTMOD" = "" ] ; then
OPTIONS="--dir $srctree $COCCIINCLUDE"
@@ -235,12 +248,12 @@ override any of the kernel's .coccicheck's settings using SPFLAGS.
We help Coccinelle when used against Linux with a set of sensible defaults
options for Linux with our own Linux .cocciconfig. This hints to coccinelle
-git can be used for 'git grep' queries over coccigrep. A timeout of 200
+git can be used for ``git grep`` queries over coccigrep. A timeout of 200
seconds should suffice for now.
The options picked up by coccinelle when reading a .cocciconfig do not appear
as arguments to spatch processes running on your system, to confirm what
-options will be used by Coccinelle run:
+options will be used by Coccinelle run::
spatch --print-options-only
@@ -252,219 +265,227 @@ carries its own .cocciconfig, you will need to use SPFLAGS to use idutils if
desired. See below section "Additional flags" for more details on how to use
idutils.
- Additional flags
-~~~~~~~~~~~~~~~~~~
+Additional flags
+----------------
Additional flags can be passed to spatch through the SPFLAGS
variable. This works as Coccinelle respects the last flags
-given to it when options are in conflict.
+given to it when options are in conflict. ::
make SPFLAGS=--use-glimpse coccicheck
Coccinelle supports idutils as well but requires coccinelle >= 1.0.6.
When no ID file is specified coccinelle assumes your ID database file
is in the file .id-utils.index on the top level of the kernel, coccinelle
-carries a script scripts/idutils_index.sh which creates the database with
+carries a script scripts/idutils_index.sh which creates the database with::
mkid -i C --output .id-utils.index
If you have another database filename you can also just symlink with this
-name.
+name. ::
make SPFLAGS=--use-idutils coccicheck
Alternatively you can specify the database filename explicitly, for
-instance:
+instance::
make SPFLAGS="--use-idutils /full-path/to/ID" coccicheck
-See spatch --help to learn more about spatch options.
+See ``spatch --help`` to learn more about spatch options.
-Note that the '--use-glimpse' and '--use-idutils' options
+Note that the ``--use-glimpse`` and ``--use-idutils`` options
require external tools for indexing the code. None of them is
thus active by default. However, by indexing the code with
one of these tools, and according to the cocci file used,
spatch could proceed the entire code base more quickly.
- SmPL patch specific options
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+SmPL patch specific options
+---------------------------
SmPL patches can have their own requirements for options passed
to Coccinelle. SmPL patch specific options can be provided by
-providing them at the top of the SmPL patch, for instance:
+providing them at the top of the SmPL patch, for instance::
-// Options: --no-includes --include-headers
+ // Options: --no-includes --include-headers
- SmPL patch Coccinelle requirements
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+SmPL patch Coccinelle requirements
+----------------------------------
As Coccinelle features get added some more advanced SmPL patches
may require newer versions of Coccinelle. If an SmPL patch requires
at least a version of Coccinelle, this can be specified as follows,
-as an example if requiring at least Coccinelle >= 1.0.5:
+as an example if requiring at least Coccinelle >= 1.0.5::
-// Requires: 1.0.5
+ // Requires: 1.0.5
- Proposing new semantic patches
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Proposing new semantic patches
+-------------------------------
New semantic patches can be proposed and submitted by kernel
developers. For sake of clarity, they should be organized in the
-sub-directories of 'scripts/coccinelle/'.
+sub-directories of ``scripts/coccinelle/``.
- Detailed description of the 'report' mode
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Detailed description of the ``report`` mode
+-------------------------------------------
+
+``report`` generates a list in the following format::
-'report' generates a list in the following format:
file:line:column-column: message
-Example:
+Example
+~~~~~~~
-Running
+Running::
make coccicheck MODE=report COCCI=scripts/coccinelle/api/err_cast.cocci
-will execute the following part of the SmPL script.
+will execute the following part of the SmPL script::
-<smpl>
-@r depends on !context && !patch && (org || report)@
-expression x;
-position p;
-@@
+ <smpl>
+ @r depends on !context && !patch && (org || report)@
+ expression x;
+ position p;
+ @@
- ERR_PTR@p(PTR_ERR(x))
+ ERR_PTR@p(PTR_ERR(x))
-@script:python depends on report@
-p << r.p;
-x << r.x;
-@@
+ @script:python depends on report@
+ p << r.p;
+ x << r.x;
+ @@
-msg="ERR_CAST can be used with %s" % (x)
-coccilib.report.print_report(p[0], msg)
-</smpl>
+ msg="ERR_CAST can be used with %s" % (x)
+ coccilib.report.print_report(p[0], msg)
+ </smpl>
This SmPL excerpt generates entries on the standard output, as
-illustrated below:
+illustrated below::
-/home/user/linux/crypto/ctr.c:188:9-16: ERR_CAST can be used with alg
-/home/user/linux/crypto/authenc.c:619:9-16: ERR_CAST can be used with auth
-/home/user/linux/crypto/xts.c:227:9-16: ERR_CAST can be used with alg
+ /home/user/linux/crypto/ctr.c:188:9-16: ERR_CAST can be used with alg
+ /home/user/linux/crypto/authenc.c:619:9-16: ERR_CAST can be used with auth
+ /home/user/linux/crypto/xts.c:227:9-16: ERR_CAST can be used with alg
- Detailed description of the 'patch' mode
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Detailed description of the ``patch`` mode
+------------------------------------------
-When the 'patch' mode is available, it proposes a fix for each problem
+When the ``patch`` mode is available, it proposes a fix for each problem
identified.
-Example:
+Example
+~~~~~~~
+
+Running::
-Running
make coccicheck MODE=patch COCCI=scripts/coccinelle/api/err_cast.cocci
-will execute the following part of the SmPL script.
+will execute the following part of the SmPL script::
-<smpl>
-@ depends on !context && patch && !org && !report @
-expression x;
-@@
+ <smpl>
+ @ depends on !context && patch && !org && !report @
+ expression x;
+ @@
-- ERR_PTR(PTR_ERR(x))
-+ ERR_CAST(x)
-</smpl>
+ - ERR_PTR(PTR_ERR(x))
+ + ERR_CAST(x)
+ </smpl>
This SmPL excerpt generates patch hunks on the standard output, as
-illustrated below:
+illustrated below::
-diff -u -p a/crypto/ctr.c b/crypto/ctr.c
---- a/crypto/ctr.c 2010-05-26 10:49:38.000000000 +0200
-+++ b/crypto/ctr.c 2010-06-03 23:44:49.000000000 +0200
-@@ -185,7 +185,7 @@ static struct crypto_instance *crypto_ct
+ diff -u -p a/crypto/ctr.c b/crypto/ctr.c
+ --- a/crypto/ctr.c 2010-05-26 10:49:38.000000000 +0200
+ +++ b/crypto/ctr.c 2010-06-03 23:44:49.000000000 +0200
+ @@ -185,7 +185,7 @@ static struct crypto_instance *crypto_ct
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
-- return ERR_PTR(PTR_ERR(alg));
-+ return ERR_CAST(alg);
-
+ - return ERR_PTR(PTR_ERR(alg));
+ + return ERR_CAST(alg);
+
/* Block size must be >= 4 bytes. */
err = -EINVAL;
- Detailed description of the 'context' mode
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Detailed description of the ``context`` mode
+--------------------------------------------
-'context' highlights lines of interest and their context
+``context`` highlights lines of interest and their context
in a diff-like style.
-NOTE: The diff-like output generated is NOT an applicable patch. The
- intent of the 'context' mode is to highlight the important lines
- (annotated with minus, '-') and gives some surrounding context
+ **NOTE**: The diff-like output generated is NOT an applicable patch. The
+ intent of the ``context`` mode is to highlight the important lines
+ (annotated with minus, ``-``) and gives some surrounding context
lines around. This output can be used with the diff mode of
Emacs to review the code.
-Example:
+Example
+~~~~~~~
+
+Running::
-Running
make coccicheck MODE=context COCCI=scripts/coccinelle/api/err_cast.cocci
-will execute the following part of the SmPL script.
+will execute the following part of the SmPL script::
-<smpl>
-@ depends on context && !patch && !org && !report@
-expression x;
-@@
+ <smpl>
+ @ depends on context && !patch && !org && !report@
+ expression x;
+ @@
-* ERR_PTR(PTR_ERR(x))
-</smpl>
+ * ERR_PTR(PTR_ERR(x))
+ </smpl>
This SmPL excerpt generates diff hunks on the standard output, as
-illustrated below:
+illustrated below::
-diff -u -p /home/user/linux/crypto/ctr.c /tmp/nothing
---- /home/user/linux/crypto/ctr.c 2010-05-26 10:49:38.000000000 +0200
-+++ /tmp/nothing
-@@ -185,7 +185,6 @@ static struct crypto_instance *crypto_ct
+ diff -u -p /home/user/linux/crypto/ctr.c /tmp/nothing
+ --- /home/user/linux/crypto/ctr.c 2010-05-26 10:49:38.000000000 +0200
+ +++ /tmp/nothing
+ @@ -185,7 +185,6 @@ static struct crypto_instance *crypto_ct
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
-- return ERR_PTR(PTR_ERR(alg));
-
+ - return ERR_PTR(PTR_ERR(alg));
+
/* Block size must be >= 4 bytes. */
err = -EINVAL;
- Detailed description of the 'org' mode
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Detailed description of the ``org`` mode
+----------------------------------------
+
+``org`` generates a report in the Org mode format of Emacs.
-'org' generates a report in the Org mode format of Emacs.
+Example
+~~~~~~~
-Example:
+Running::
-Running
make coccicheck MODE=org COCCI=scripts/coccinelle/api/err_cast.cocci
-will execute the following part of the SmPL script.
+will execute the following part of the SmPL script::
-<smpl>
-@r depends on !context && !patch && (org || report)@
-expression x;
-position p;
-@@
+ <smpl>
+ @r depends on !context && !patch && (org || report)@
+ expression x;
+ position p;
+ @@
- ERR_PTR@p(PTR_ERR(x))
+ ERR_PTR@p(PTR_ERR(x))
-@script:python depends on org@
-p << r.p;
-x << r.x;
-@@
+ @script:python depends on org@
+ p << r.p;
+ x << r.x;
+ @@
-msg="ERR_CAST can be used with %s" % (x)
-msg_safe=msg.replace("[","@(").replace("]",")")
-coccilib.org.print_todo(p[0], msg_safe)
-</smpl>
+ msg="ERR_CAST can be used with %s" % (x)
+ msg_safe=msg.replace("[","@(").replace("]",")")
+ coccilib.org.print_todo(p[0], msg_safe)
+ </smpl>
This SmPL excerpt generates Org entries on the standard output, as
-illustrated below:
+illustrated below::
-* TODO [[view:/home/user/linux/crypto/ctr.c::face=ovl-face1::linb=188::colb=9::cole=16][ERR_CAST can be used with alg]]
-* TODO [[view:/home/user/linux/crypto/authenc.c::face=ovl-face1::linb=619::colb=9::cole=16][ERR_CAST can be used with auth]]
-* TODO [[view:/home/user/linux/crypto/xts.c::face=ovl-face1::linb=227::colb=9::cole=16][ERR_CAST can be used with alg]]
+ * TODO [[view:/home/user/linux/crypto/ctr.c::face=ovl-face1::linb=188::colb=9::cole=16][ERR_CAST can be used with alg]]
+ * TODO [[view:/home/user/linux/crypto/authenc.c::face=ovl-face1::linb=619::colb=9::cole=16][ERR_CAST can be used with auth]]
+ * TODO [[view:/home/user/linux/crypto/xts.c::face=ovl-face1::linb=227::colb=9::cole=16][ERR_CAST can be used with alg]]
diff --git a/Documentation/dev-tools/gcov.rst b/Documentation/dev-tools/gcov.rst
new file mode 100644
index 000000000000..19eedfea8800
--- /dev/null
+++ b/Documentation/dev-tools/gcov.rst
@@ -0,0 +1,256 @@
+Using gcov with the Linux kernel
+================================
+
+gcov profiling kernel support enables the use of GCC's coverage testing
+tool gcov_ with the Linux kernel. Coverage data of a running kernel
+is exported in gcov-compatible format via the "gcov" debugfs directory.
+To get coverage data for a specific file, change to the kernel build
+directory and use gcov with the ``-o`` option as follows (requires root)::
+
+ # cd /tmp/linux-out
+ # gcov -o /sys/kernel/debug/gcov/tmp/linux-out/kernel spinlock.c
+
+This will create source code files annotated with execution counts
+in the current directory. In addition, graphical gcov front-ends such
+as lcov_ can be used to automate the process of collecting data
+for the entire kernel and provide coverage overviews in HTML format.
+
+Possible uses:
+
+* debugging (has this line been reached at all?)
+* test improvement (how do I change my test to cover these lines?)
+* minimizing kernel configurations (do I need this option if the
+ associated code is never run?)
+
+.. _gcov: http://gcc.gnu.org/onlinedocs/gcc/Gcov.html
+.. _lcov: http://ltp.sourceforge.net/coverage/lcov.php
+
+
+Preparation
+-----------
+
+Configure the kernel with::
+
+ CONFIG_DEBUG_FS=y
+ CONFIG_GCOV_KERNEL=y
+
+select the gcc's gcov format, default is autodetect based on gcc version::
+
+ CONFIG_GCOV_FORMAT_AUTODETECT=y
+
+and to get coverage data for the entire kernel::
+
+ CONFIG_GCOV_PROFILE_ALL=y
+
+Note that kernels compiled with profiling flags will be significantly
+larger and run slower. Also CONFIG_GCOV_PROFILE_ALL may not be supported
+on all architectures.
+
+Profiling data will only become accessible once debugfs has been
+mounted::
+
+ mount -t debugfs none /sys/kernel/debug
+
+
+Customization
+-------------
+
+To enable profiling for specific files or directories, add a line
+similar to the following to the respective kernel Makefile:
+
+- For a single file (e.g. main.o)::
+
+ GCOV_PROFILE_main.o := y
+
+- For all files in one directory::
+
+ GCOV_PROFILE := y
+
+To exclude files from being profiled even when CONFIG_GCOV_PROFILE_ALL
+is specified, use::
+
+ GCOV_PROFILE_main.o := n
+
+and::
+
+ GCOV_PROFILE := n
+
+Only files which are linked to the main kernel image or are compiled as
+kernel modules are supported by this mechanism.
+
+
+Files
+-----
+
+The gcov kernel support creates the following files in debugfs:
+
+``/sys/kernel/debug/gcov``
+ Parent directory for all gcov-related files.
+
+``/sys/kernel/debug/gcov/reset``
+ Global reset file: resets all coverage data to zero when
+ written to.
+
+``/sys/kernel/debug/gcov/path/to/compile/dir/file.gcda``
+ The actual gcov data file as understood by the gcov
+ tool. Resets file coverage data to zero when written to.
+
+``/sys/kernel/debug/gcov/path/to/compile/dir/file.gcno``
+ Symbolic link to a static data file required by the gcov
+ tool. This file is generated by gcc when compiling with
+ option ``-ftest-coverage``.
+
+
+Modules
+-------
+
+Kernel modules may contain cleanup code which is only run during
+module unload time. The gcov mechanism provides a means to collect
+coverage data for such code by keeping a copy of the data associated
+with the unloaded module. This data remains available through debugfs.
+Once the module is loaded again, the associated coverage counters are
+initialized with the data from its previous instantiation.
+
+This behavior can be deactivated by specifying the gcov_persist kernel
+parameter::
+
+ gcov_persist=0
+
+At run-time, a user can also choose to discard data for an unloaded
+module by writing to its data file or the global reset file.
+
+
+Separated build and test machines
+---------------------------------
+
+The gcov kernel profiling infrastructure is designed to work out-of-the
+box for setups where kernels are built and run on the same machine. In
+cases where the kernel runs on a separate machine, special preparations
+must be made, depending on where the gcov tool is used:
+
+a) gcov is run on the TEST machine
+
+ The gcov tool version on the test machine must be compatible with the
+ gcc version used for kernel build. Also the following files need to be
+ copied from build to test machine:
+
+ from the source tree:
+ - all C source files + headers
+
+ from the build tree:
+ - all C source files + headers
+ - all .gcda and .gcno files
+ - all links to directories
+
+ It is important to note that these files need to be placed into the
+ exact same file system location on the test machine as on the build
+ machine. If any of the path components is symbolic link, the actual
+ directory needs to be used instead (due to make's CURDIR handling).
+
+b) gcov is run on the BUILD machine
+
+ The following files need to be copied after each test case from test
+ to build machine:
+
+ from the gcov directory in sysfs:
+ - all .gcda files
+ - all links to .gcno files
+
+ These files can be copied to any location on the build machine. gcov
+ must then be called with the -o option pointing to that directory.
+
+ Example directory setup on the build machine::
+
+ /tmp/linux: kernel source tree
+ /tmp/out: kernel build directory as specified by make O=
+ /tmp/coverage: location of the files copied from the test machine
+
+ [user@build] cd /tmp/out
+ [user@build] gcov -o /tmp/coverage/tmp/out/init main.c
+
+
+Troubleshooting
+---------------
+
+Problem
+ Compilation aborts during linker step.
+
+Cause
+ Profiling flags are specified for source files which are not
+ linked to the main kernel or which are linked by a custom
+ linker procedure.
+
+Solution
+ Exclude affected source files from profiling by specifying
+ ``GCOV_PROFILE := n`` or ``GCOV_PROFILE_basename.o := n`` in the
+ corresponding Makefile.
+
+Problem
+ Files copied from sysfs appear empty or incomplete.
+
+Cause
+ Due to the way seq_file works, some tools such as cp or tar
+ may not correctly copy files from sysfs.
+
+Solution
+ Use ``cat``' to read ``.gcda`` files and ``cp -d`` to copy links.
+ Alternatively use the mechanism shown in Appendix B.
+
+
+Appendix A: gather_on_build.sh
+------------------------------
+
+Sample script to gather coverage meta files on the build machine
+(see 6a)::
+
+ #!/bin/bash
+
+ KSRC=$1
+ KOBJ=$2
+ DEST=$3
+
+ if [ -z "$KSRC" ] || [ -z "$KOBJ" ] || [ -z "$DEST" ]; then
+ echo "Usage: $0 <ksrc directory> <kobj directory> <output.tar.gz>" >&2
+ exit 1
+ fi
+
+ KSRC=$(cd $KSRC; printf "all:\n\t@echo \${CURDIR}\n" | make -f -)
+ KOBJ=$(cd $KOBJ; printf "all:\n\t@echo \${CURDIR}\n" | make -f -)
+
+ find $KSRC $KOBJ \( -name '*.gcno' -o -name '*.[ch]' -o -type l \) -a \
+ -perm /u+r,g+r | tar cfz $DEST -P -T -
+
+ if [ $? -eq 0 ] ; then
+ echo "$DEST successfully created, copy to test system and unpack with:"
+ echo " tar xfz $DEST -P"
+ else
+ echo "Could not create file $DEST"
+ fi
+
+
+Appendix B: gather_on_test.sh
+-----------------------------
+
+Sample script to gather coverage data files on the test machine
+(see 6b)::
+
+ #!/bin/bash -e
+
+ DEST=$1
+ GCDA=/sys/kernel/debug/gcov
+
+ if [ -z "$DEST" ] ; then
+ echo "Usage: $0 <output.tar.gz>" >&2
+ exit 1
+ fi
+
+ TEMPDIR=$(mktemp -d)
+ echo Collecting data..
+ find $GCDA -type d -exec mkdir -p $TEMPDIR/\{\} \;
+ find $GCDA -name '*.gcda' -exec sh -c 'cat < $0 > '$TEMPDIR'/$0' {} \;
+ find $GCDA -name '*.gcno' -exec sh -c 'cp -d $0 '$TEMPDIR'/$0' {} \;
+ tar czf $DEST -C $TEMPDIR sys
+ rm -rf $TEMPDIR
+
+ echo "$DEST successfully created, copy to build system and unpack with:"
+ echo " tar xfz $DEST"
diff --git a/Documentation/gdb-kernel-debugging.txt b/Documentation/dev-tools/gdb-kernel-debugging.rst
index 7050ce8794b9..5e93c9bc6619 100644
--- a/Documentation/gdb-kernel-debugging.txt
+++ b/Documentation/dev-tools/gdb-kernel-debugging.rst
@@ -1,3 +1,5 @@
+.. highlight:: none
+
Debugging kernel and modules via gdb
====================================
@@ -13,54 +15,58 @@ be transferred to the other gdb stubs as well.
Requirements
------------
- o gdb 7.2+ (recommended: 7.4+) with python support enabled (typically true
- for distributions)
+- gdb 7.2+ (recommended: 7.4+) with python support enabled (typically true
+ for distributions)
Setup
-----
- o Create a virtual Linux machine for QEMU/KVM (see www.linux-kvm.org and
- www.qemu.org for more details). For cross-development,
- http://landley.net/aboriginal/bin keeps a pool of machine images and
- toolchains that can be helpful to start from.
+- Create a virtual Linux machine for QEMU/KVM (see www.linux-kvm.org and
+ www.qemu.org for more details). For cross-development,
+ http://landley.net/aboriginal/bin keeps a pool of machine images and
+ toolchains that can be helpful to start from.
- o Build the kernel with CONFIG_GDB_SCRIPTS enabled, but leave
- CONFIG_DEBUG_INFO_REDUCED off. If your architecture supports
- CONFIG_FRAME_POINTER, keep it enabled.
+- Build the kernel with CONFIG_GDB_SCRIPTS enabled, but leave
+ CONFIG_DEBUG_INFO_REDUCED off. If your architecture supports
+ CONFIG_FRAME_POINTER, keep it enabled.
- o Install that kernel on the guest.
+- Install that kernel on the guest.
+ Alternatively, QEMU allows to boot the kernel directly using -kernel,
+ -append, -initrd command line switches. This is generally only useful if
+ you do not depend on modules. See QEMU documentation for more details on
+ this mode.
- Alternatively, QEMU allows to boot the kernel directly using -kernel,
- -append, -initrd command line switches. This is generally only useful if
- you do not depend on modules. See QEMU documentation for more details on
- this mode.
+- Enable the gdb stub of QEMU/KVM, either
- o Enable the gdb stub of QEMU/KVM, either
- at VM startup time by appending "-s" to the QEMU command line
- or
+
+ or
+
- during runtime by issuing "gdbserver" from the QEMU monitor
console
- o cd /path/to/linux-build
+- cd /path/to/linux-build
- o Start gdb: gdb vmlinux
+- Start gdb: gdb vmlinux
- Note: Some distros may restrict auto-loading of gdb scripts to known safe
- directories. In case gdb reports to refuse loading vmlinux-gdb.py, add
+ Note: Some distros may restrict auto-loading of gdb scripts to known safe
+ directories. In case gdb reports to refuse loading vmlinux-gdb.py, add::
add-auto-load-safe-path /path/to/linux-build
- to ~/.gdbinit. See gdb help for more details.
+ to ~/.gdbinit. See gdb help for more details.
+
+- Attach to the booted guest::
- o Attach to the booted guest:
(gdb) target remote :1234
Examples of using the Linux-provided gdb helpers
------------------------------------------------
- o Load module (and main kernel) symbols:
+- Load module (and main kernel) symbols::
+
(gdb) lx-symbols
loading vmlinux
scanning for modules in /home/user/linux/build
@@ -72,17 +78,20 @@ Examples of using the Linux-provided gdb helpers
...
loading @0xffffffffa0000000: /home/user/linux/build/drivers/ata/ata_generic.ko
- o Set a breakpoint on some not yet loaded module function, e.g.:
+- Set a breakpoint on some not yet loaded module function, e.g.::
+
(gdb) b btrfs_init_sysfs
Function "btrfs_init_sysfs" not defined.
Make breakpoint pending on future shared library load? (y or [n]) y
Breakpoint 1 (btrfs_init_sysfs) pending.
- o Continue the target
+- Continue the target::
+
(gdb) c
- o Load the module on the target and watch the symbols being loaded as well as
- the breakpoint hit:
+- Load the module on the target and watch the symbols being loaded as well as
+ the breakpoint hit::
+
loading @0xffffffffa0034000: /home/user/linux/build/lib/libcrc32c.ko
loading @0xffffffffa0050000: /home/user/linux/build/lib/lzo/lzo_compress.ko
loading @0xffffffffa006e000: /home/user/linux/build/lib/zlib_deflate/zlib_deflate.ko
@@ -91,7 +100,8 @@ Examples of using the Linux-provided gdb helpers
Breakpoint 1, btrfs_init_sysfs () at /home/user/linux/fs/btrfs/sysfs.c:36
36 btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj);
- o Dump the log buffer of the target kernel:
+- Dump the log buffer of the target kernel::
+
(gdb) lx-dmesg
[ 0.000000] Initializing cgroup subsys cpuset
[ 0.000000] Initializing cgroup subsys cpu
@@ -102,19 +112,22 @@ Examples of using the Linux-provided gdb helpers
[ 0.000000] BIOS-e820: [mem 0x000000000009fc00-0x000000000009ffff] reserved
....
- o Examine fields of the current task struct:
+- Examine fields of the current task struct::
+
(gdb) p $lx_current().pid
$1 = 4998
(gdb) p $lx_current().comm
$2 = "modprobe\000\000\000\000\000\000\000"
- o Make use of the per-cpu function for the current or a specified CPU:
+- Make use of the per-cpu function for the current or a specified CPU::
+
(gdb) p $lx_per_cpu("runqueues").nr_running
$3 = 1
(gdb) p $lx_per_cpu("runqueues", 2).nr_running
$4 = 0
- o Dig into hrtimers using the container_of helper:
+- Dig into hrtimers using the container_of helper::
+
(gdb) set $next = $lx_per_cpu("hrtimer_bases").clock_base[0].active.next
(gdb) p *$container_of($next, "struct hrtimer", "node")
$5 = {
@@ -144,7 +157,7 @@ List of commands and functions
------------------------------
The number of commands and convenience functions may evolve over the time,
-this is just a snapshot of the initial version:
+this is just a snapshot of the initial version::
(gdb) apropos lx
function lx_current -- Return current task
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
new file mode 100644
index 000000000000..f7a18f274357
--- /dev/null
+++ b/Documentation/dev-tools/kasan.rst
@@ -0,0 +1,173 @@
+The Kernel Address Sanitizer (KASAN)
+====================================
+
+Overview
+--------
+
+KernelAddressSANitizer (KASAN) is a dynamic memory error detector. It provides
+a fast and comprehensive solution for finding use-after-free and out-of-bounds
+bugs.
+
+KASAN uses compile-time instrumentation for checking every memory access,
+therefore you will need a GCC version 4.9.2 or later. GCC 5.0 or later is
+required for detection of out-of-bounds accesses to stack or global variables.
+
+Currently KASAN is supported only for the x86_64 and arm64 architectures.
+
+Usage
+-----
+
+To enable KASAN configure kernel with::
+
+ CONFIG_KASAN = y
+
+and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline and
+inline are compiler instrumentation types. The former produces smaller binary
+the latter is 1.1 - 2 times faster. Inline instrumentation requires a GCC
+version 5.0 or later.
+
+KASAN works with both SLUB and SLAB memory allocators.
+For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
+
+To disable instrumentation for specific files or directories, add a line
+similar to the following to the respective kernel Makefile:
+
+- For a single file (e.g. main.o)::
+
+ KASAN_SANITIZE_main.o := n
+
+- For all files in one directory::
+
+ KASAN_SANITIZE := n
+
+Error reports
+~~~~~~~~~~~~~
+
+A typical out of bounds access report looks like this::
+
+ ==================================================================
+ BUG: AddressSanitizer: out of bounds access in kmalloc_oob_right+0x65/0x75 [test_kasan] at addr ffff8800693bc5d3
+ Write of size 1 by task modprobe/1689
+ =============================================================================
+ BUG kmalloc-128 (Not tainted): kasan error
+ -----------------------------------------------------------------------------
+
+ Disabling lock debugging due to kernel taint
+ INFO: Allocated in kmalloc_oob_right+0x3d/0x75 [test_kasan] age=0 cpu=0 pid=1689
+ __slab_alloc+0x4b4/0x4f0
+ kmem_cache_alloc_trace+0x10b/0x190
+ kmalloc_oob_right+0x3d/0x75 [test_kasan]
+ init_module+0x9/0x47 [test_kasan]
+ do_one_initcall+0x99/0x200
+ load_module+0x2cb3/0x3b20
+ SyS_finit_module+0x76/0x80
+ system_call_fastpath+0x12/0x17
+ INFO: Slab 0xffffea0001a4ef00 objects=17 used=7 fp=0xffff8800693bd728 flags=0x100000000004080
+ INFO: Object 0xffff8800693bc558 @offset=1368 fp=0xffff8800693bc720
+
+ Bytes b4 ffff8800693bc548: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+ Object ffff8800693bc558: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ Object ffff8800693bc568: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ Object ffff8800693bc578: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ Object ffff8800693bc588: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ Object ffff8800693bc598: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ Object ffff8800693bc5a8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ Object ffff8800693bc5b8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ Object ffff8800693bc5c8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b a5 kkkkkkkkkkkkkkk.
+ Redzone ffff8800693bc5d8: cc cc cc cc cc cc cc cc ........
+ Padding ffff8800693bc718: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
+ CPU: 0 PID: 1689 Comm: modprobe Tainted: G B 3.18.0-rc1-mm1+ #98
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014
+ ffff8800693bc000 0000000000000000 ffff8800693bc558 ffff88006923bb78
+ ffffffff81cc68ae 00000000000000f3 ffff88006d407600 ffff88006923bba8
+ ffffffff811fd848 ffff88006d407600 ffffea0001a4ef00 ffff8800693bc558
+ Call Trace:
+ [<ffffffff81cc68ae>] dump_stack+0x46/0x58
+ [<ffffffff811fd848>] print_trailer+0xf8/0x160
+ [<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan]
+ [<ffffffff811ff0f5>] object_err+0x35/0x40
+ [<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan]
+ [<ffffffff8120b9fa>] kasan_report_error+0x38a/0x3f0
+ [<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40
+ [<ffffffff8120b344>] ? kasan_unpoison_shadow+0x14/0x40
+ [<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40
+ [<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan]
+ [<ffffffff8120a995>] __asan_store1+0x75/0xb0
+ [<ffffffffa0002601>] ? kmem_cache_oob+0x1d/0xc3 [test_kasan]
+ [<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan]
+ [<ffffffffa0002065>] kmalloc_oob_right+0x65/0x75 [test_kasan]
+ [<ffffffffa00026b0>] init_module+0x9/0x47 [test_kasan]
+ [<ffffffff810002d9>] do_one_initcall+0x99/0x200
+ [<ffffffff811e4e5c>] ? __vunmap+0xec/0x160
+ [<ffffffff81114f63>] load_module+0x2cb3/0x3b20
+ [<ffffffff8110fd70>] ? m_show+0x240/0x240
+ [<ffffffff81115f06>] SyS_finit_module+0x76/0x80
+ [<ffffffff81cd3129>] system_call_fastpath+0x12/0x17
+ Memory state around the buggy address:
+ ffff8800693bc300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800693bc380: fc fc 00 00 00 00 00 00 00 00 00 00 00 00 00 fc
+ ffff8800693bc400: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800693bc480: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800693bc500: fc fc fc fc fc fc fc fc fc fc fc 00 00 00 00 00
+ >ffff8800693bc580: 00 00 00 00 00 00 00 00 00 00 03 fc fc fc fc fc
+ ^
+ ffff8800693bc600: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800693bc680: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800693bc700: fc fc fc fc fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8800693bc780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8800693bc800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ==================================================================
+
+The header of the report discribe what kind of bug happened and what kind of
+access caused it. It's followed by the description of the accessed slub object
+(see 'SLUB Debug output' section in Documentation/vm/slub.txt for details) and
+the description of the accessed memory page.
+
+In the last section the report shows memory state around the accessed address.
+Reading this part requires some understanding of how KASAN works.
+
+The state of each 8 aligned bytes of memory is encoded in one shadow byte.
+Those 8 bytes can be accessible, partially accessible, freed or be a redzone.
+We use the following encoding for each shadow byte: 0 means that all 8 bytes
+of the corresponding memory region are accessible; number N (1 <= N <= 7) means
+that the first N bytes are accessible, and other (8 - N) bytes are not;
+any negative value indicates that the entire 8-byte word is inaccessible.
+We use different negative values to distinguish between different kinds of
+inaccessible memory like redzones or freed memory (see mm/kasan/kasan.h).
+
+In the report above the arrows point to the shadow byte 03, which means that
+the accessed address is partially accessible.
+
+
+Implementation details
+----------------------
+
+From a high level, our approach to memory error detection is similar to that
+of kmemcheck: use shadow memory to record whether each byte of memory is safe
+to access, and use compile-time instrumentation to check shadow memory on each
+memory access.
+
+AddressSanitizer dedicates 1/8 of kernel memory to its shadow memory
+(e.g. 16TB to cover 128TB on x86_64) and uses direct mapping with a scale and
+offset to translate a memory address to its corresponding shadow address.
+
+Here is the function which translates an address to its corresponding shadow
+address::
+
+ static inline void *kasan_mem_to_shadow(const void *addr)
+ {
+ return ((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ + KASAN_SHADOW_OFFSET;
+ }
+
+where ``KASAN_SHADOW_SCALE_SHIFT = 3``.
+
+Compile-time instrumentation used for checking memory accesses. Compiler inserts
+function calls (__asan_load*(addr), __asan_store*(addr)) before each memory
+access of size 1, 2, 4, 8 or 16. These functions check whether memory access is
+valid or not by checking corresponding shadow memory.
+
+GCC 5.0 has possibility to perform inline instrumentation. Instead of making
+function calls GCC directly inserts the code to check the shadow memory.
+This option significantly enlarges kernel but it gives x1.1-x2 performance
+boost over outline instrumented kernel.
diff --git a/Documentation/kcov.txt b/Documentation/dev-tools/kcov.rst
index 779ff4ab1c1d..aca0e27ca197 100644
--- a/Documentation/kcov.txt
+++ b/Documentation/dev-tools/kcov.rst
@@ -12,38 +12,38 @@ To achieve this goal it does not collect coverage in soft/hard interrupts
and instrumentation of some inherently non-deterministic parts of kernel is
disbled (e.g. scheduler, locking).
-Usage:
-======
+Usage
+-----
-Configure kernel with:
+Configure the kernel with::
CONFIG_KCOV=y
CONFIG_KCOV requires gcc built on revision 231296 or later.
-Profiling data will only become accessible once debugfs has been mounted:
+Profiling data will only become accessible once debugfs has been mounted::
mount -t debugfs none /sys/kernel/debug
-The following program demonstrates kcov usage from within a test program:
-
-#include <stdio.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <unistd.h>
-#include <fcntl.h>
-
-#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
-#define KCOV_ENABLE _IO('c', 100)
-#define KCOV_DISABLE _IO('c', 101)
-#define COVER_SIZE (64<<10)
-
-int main(int argc, char **argv)
-{
+The following program demonstrates kcov usage from within a test program::
+
+ #include <stdio.h>
+ #include <stddef.h>
+ #include <stdint.h>
+ #include <stdlib.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <sys/ioctl.h>
+ #include <sys/mman.h>
+ #include <unistd.h>
+ #include <fcntl.h>
+
+ #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
+ #define KCOV_ENABLE _IO('c', 100)
+ #define KCOV_DISABLE _IO('c', 101)
+ #define COVER_SIZE (64<<10)
+
+ int main(int argc, char **argv)
+ {
int fd;
unsigned long *cover, n, i;
@@ -83,24 +83,24 @@ int main(int argc, char **argv)
if (close(fd))
perror("close"), exit(1);
return 0;
-}
-
-After piping through addr2line output of the program looks as follows:
-
-SyS_read
-fs/read_write.c:562
-__fdget_pos
-fs/file.c:774
-__fget_light
-fs/file.c:746
-__fget_light
-fs/file.c:750
-__fget_light
-fs/file.c:760
-__fdget_pos
-fs/file.c:784
-SyS_read
-fs/read_write.c:562
+ }
+
+After piping through addr2line output of the program looks as follows::
+
+ SyS_read
+ fs/read_write.c:562
+ __fdget_pos
+ fs/file.c:774
+ __fget_light
+ fs/file.c:746
+ __fget_light
+ fs/file.c:750
+ __fget_light
+ fs/file.c:760
+ __fdget_pos
+ fs/file.c:784
+ SyS_read
+ fs/read_write.c:562
If a program needs to collect coverage from several threads (independently),
it needs to open /sys/kernel/debug/kcov in each thread separately.
diff --git a/Documentation/dev-tools/kmemcheck.rst b/Documentation/dev-tools/kmemcheck.rst
new file mode 100644
index 000000000000..7f3d1985de74
--- /dev/null
+++ b/Documentation/dev-tools/kmemcheck.rst
@@ -0,0 +1,733 @@
+Getting started with kmemcheck
+==============================
+
+Vegard Nossum <vegardno@ifi.uio.no>
+
+
+Introduction
+------------
+
+kmemcheck is a debugging feature for the Linux Kernel. More specifically, it
+is a dynamic checker that detects and warns about some uses of uninitialized
+memory.
+
+Userspace programmers might be familiar with Valgrind's memcheck. The main
+difference between memcheck and kmemcheck is that memcheck works for userspace
+programs only, and kmemcheck works for the kernel only. The implementations
+are of course vastly different. Because of this, kmemcheck is not as accurate
+as memcheck, but it turns out to be good enough in practice to discover real
+programmer errors that the compiler is not able to find through static
+analysis.
+
+Enabling kmemcheck on a kernel will probably slow it down to the extent that
+the machine will not be usable for normal workloads such as e.g. an
+interactive desktop. kmemcheck will also cause the kernel to use about twice
+as much memory as normal. For this reason, kmemcheck is strictly a debugging
+feature.
+
+
+Downloading
+-----------
+
+As of version 2.6.31-rc1, kmemcheck is included in the mainline kernel.
+
+
+Configuring and compiling
+-------------------------
+
+kmemcheck only works for the x86 (both 32- and 64-bit) platform. A number of
+configuration variables must have specific settings in order for the kmemcheck
+menu to even appear in "menuconfig". These are:
+
+- ``CONFIG_CC_OPTIMIZE_FOR_SIZE=n``
+ This option is located under "General setup" / "Optimize for size".
+
+ Without this, gcc will use certain optimizations that usually lead to
+ false positive warnings from kmemcheck. An example of this is a 16-bit
+ field in a struct, where gcc may load 32 bits, then discard the upper
+ 16 bits. kmemcheck sees only the 32-bit load, and may trigger a
+ warning for the upper 16 bits (if they're uninitialized).
+
+- ``CONFIG_SLAB=y`` or ``CONFIG_SLUB=y``
+ This option is located under "General setup" / "Choose SLAB
+ allocator".
+
+- ``CONFIG_FUNCTION_TRACER=n``
+ This option is located under "Kernel hacking" / "Tracers" / "Kernel
+ Function Tracer"
+
+ When function tracing is compiled in, gcc emits a call to another
+ function at the beginning of every function. This means that when the
+ page fault handler is called, the ftrace framework will be called
+ before kmemcheck has had a chance to handle the fault. If ftrace then
+ modifies memory that was tracked by kmemcheck, the result is an
+ endless recursive page fault.
+
+- ``CONFIG_DEBUG_PAGEALLOC=n``
+ This option is located under "Kernel hacking" / "Memory Debugging"
+ / "Debug page memory allocations".
+
+In addition, I highly recommend turning on ``CONFIG_DEBUG_INFO=y``. This is also
+located under "Kernel hacking". With this, you will be able to get line number
+information from the kmemcheck warnings, which is extremely valuable in
+debugging a problem. This option is not mandatory, however, because it slows
+down the compilation process and produces a much bigger kernel image.
+
+Now the kmemcheck menu should be visible (under "Kernel hacking" / "Memory
+Debugging" / "kmemcheck: trap use of uninitialized memory"). Here follows
+a description of the kmemcheck configuration variables:
+
+- ``CONFIG_KMEMCHECK``
+ This must be enabled in order to use kmemcheck at all...
+
+- ``CONFIG_KMEMCHECK_``[``DISABLED`` | ``ENABLED`` | ``ONESHOT``]``_BY_DEFAULT``
+ This option controls the status of kmemcheck at boot-time. "Enabled"
+ will enable kmemcheck right from the start, "disabled" will boot the
+ kernel as normal (but with the kmemcheck code compiled in, so it can
+ be enabled at run-time after the kernel has booted), and "one-shot" is
+ a special mode which will turn kmemcheck off automatically after
+ detecting the first use of uninitialized memory.
+
+ If you are using kmemcheck to actively debug a problem, then you
+ probably want to choose "enabled" here.
+
+ The one-shot mode is mostly useful in automated test setups because it
+ can prevent floods of warnings and increase the chances of the machine
+ surviving in case something is really wrong. In other cases, the one-
+ shot mode could actually be counter-productive because it would turn
+ itself off at the very first error -- in the case of a false positive
+ too -- and this would come in the way of debugging the specific
+ problem you were interested in.
+
+ If you would like to use your kernel as normal, but with a chance to
+ enable kmemcheck in case of some problem, it might be a good idea to
+ choose "disabled" here. When kmemcheck is disabled, most of the run-
+ time overhead is not incurred, and the kernel will be almost as fast
+ as normal.
+
+- ``CONFIG_KMEMCHECK_QUEUE_SIZE``
+ Select the maximum number of error reports to store in an internal
+ (fixed-size) buffer. Since errors can occur virtually anywhere and in
+ any context, we need a temporary storage area which is guaranteed not
+ to generate any other page faults when accessed. The queue will be
+ emptied as soon as a tasklet may be scheduled. If the queue is full,
+ new error reports will be lost.
+
+ The default value of 64 is probably fine. If some code produces more
+ than 64 errors within an irqs-off section, then the code is likely to
+ produce many, many more, too, and these additional reports seldom give
+ any more information (the first report is usually the most valuable
+ anyway).
+
+ This number might have to be adjusted if you are not using serial
+ console or similar to capture the kernel log. If you are using the
+ "dmesg" command to save the log, then getting a lot of kmemcheck
+ warnings might overflow the kernel log itself, and the earlier reports
+ will get lost in that way instead. Try setting this to 10 or so on
+ such a setup.
+
+- ``CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT``
+ Select the number of shadow bytes to save along with each entry of the
+ error-report queue. These bytes indicate what parts of an allocation
+ are initialized, uninitialized, etc. and will be displayed when an
+ error is detected to help the debugging of a particular problem.
+
+ The number entered here is actually the logarithm of the number of
+ bytes that will be saved. So if you pick for example 5 here, kmemcheck
+ will save 2^5 = 32 bytes.
+
+ The default value should be fine for debugging most problems. It also
+ fits nicely within 80 columns.
+
+- ``CONFIG_KMEMCHECK_PARTIAL_OK``
+ This option (when enabled) works around certain GCC optimizations that
+ produce 32-bit reads from 16-bit variables where the upper 16 bits are
+ thrown away afterwards.
+
+ The default value (enabled) is recommended. This may of course hide
+ some real errors, but disabling it would probably produce a lot of
+ false positives.
+
+- ``CONFIG_KMEMCHECK_BITOPS_OK``
+ This option silences warnings that would be generated for bit-field
+ accesses where not all the bits are initialized at the same time. This
+ may also hide some real bugs.
+
+ This option is probably obsolete, or it should be replaced with
+ the kmemcheck-/bitfield-annotations for the code in question. The
+ default value is therefore fine.
+
+Now compile the kernel as usual.
+
+
+How to use
+----------
+
+Booting
+~~~~~~~
+
+First some information about the command-line options. There is only one
+option specific to kmemcheck, and this is called "kmemcheck". It can be used
+to override the default mode as chosen by the ``CONFIG_KMEMCHECK_*_BY_DEFAULT``
+option. Its possible settings are:
+
+- ``kmemcheck=0`` (disabled)
+- ``kmemcheck=1`` (enabled)
+- ``kmemcheck=2`` (one-shot mode)
+
+If SLUB debugging has been enabled in the kernel, it may take precedence over
+kmemcheck in such a way that the slab caches which are under SLUB debugging
+will not be tracked by kmemcheck. In order to ensure that this doesn't happen
+(even though it shouldn't by default), use SLUB's boot option ``slub_debug``,
+like this: ``slub_debug=-``
+
+In fact, this option may also be used for fine-grained control over SLUB vs.
+kmemcheck. For example, if the command line includes
+``kmemcheck=1 slub_debug=,dentry``, then SLUB debugging will be used only
+for the "dentry" slab cache, and with kmemcheck tracking all the other
+caches. This is advanced usage, however, and is not generally recommended.
+
+
+Run-time enable/disable
+~~~~~~~~~~~~~~~~~~~~~~~
+
+When the kernel has booted, it is possible to enable or disable kmemcheck at
+run-time. WARNING: This feature is still experimental and may cause false
+positive warnings to appear. Therefore, try not to use this. If you find that
+it doesn't work properly (e.g. you see an unreasonable amount of warnings), I
+will be happy to take bug reports.
+
+Use the file ``/proc/sys/kernel/kmemcheck`` for this purpose, e.g.::
+
+ $ echo 0 > /proc/sys/kernel/kmemcheck # disables kmemcheck
+
+The numbers are the same as for the ``kmemcheck=`` command-line option.
+
+
+Debugging
+~~~~~~~~~
+
+A typical report will look something like this::
+
+ WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
+ 80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+ ^
+
+ Pid: 1856, comm: ntpdate Not tainted 2.6.29-rc5 #264 945P-A
+ RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
+ RSP: 0018:ffff88003cdf7d98 EFLAGS: 00210002
+ RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
+ RDX: ffff88003e5d6018 RSI: ffff88003e5d6024 RDI: ffff88003cdf7e84
+ RBP: ffff88003cdf7db8 R08: ffff88003e5d6000 R09: 0000000000000000
+ R10: 0000000000000080 R11: 0000000000000000 R12: 000000000000000e
+ R13: ffff88003cdf7e78 R14: ffff88003d530710 R15: ffff88003d5a98c8
+ FS: 0000000000000000(0000) GS:ffff880001982000(0063) knlGS:00000
+ CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033
+ CR2: ffff88003f806ea0 CR3: 000000003c036000 CR4: 00000000000006a0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000ffff4ff0 DR7: 0000000000000400
+ [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
+ [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
+ [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
+ [<ffffffff8100c7b5>] int_signal+0x12/0x17
+ [<ffffffffffffffff>] 0xffffffffffffffff
+
+The single most valuable information in this report is the RIP (or EIP on 32-
+bit) value. This will help us pinpoint exactly which instruction that caused
+the warning.
+
+If your kernel was compiled with ``CONFIG_DEBUG_INFO=y``, then all we have to do
+is give this address to the addr2line program, like this::
+
+ $ addr2line -e vmlinux -i ffffffff8104ede8
+ arch/x86/include/asm/string_64.h:12
+ include/asm-generic/siginfo.h:287
+ kernel/signal.c:380
+ kernel/signal.c:410
+
+The "``-e vmlinux``" tells addr2line which file to look in. **IMPORTANT:**
+This must be the vmlinux of the kernel that produced the warning in the
+first place! If not, the line number information will almost certainly be
+wrong.
+
+The "``-i``" tells addr2line to also print the line numbers of inlined
+functions. In this case, the flag was very important, because otherwise,
+it would only have printed the first line, which is just a call to
+``memcpy()``, which could be called from a thousand places in the kernel, and
+is therefore not very useful. These inlined functions would not show up in
+the stack trace above, simply because the kernel doesn't load the extra
+debugging information. This technique can of course be used with ordinary
+kernel oopses as well.
+
+In this case, it's the caller of ``memcpy()`` that is interesting, and it can be
+found in ``include/asm-generic/siginfo.h``, line 287::
+
+ 281 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+ 282 {
+ 283 if (from->si_code < 0)
+ 284 memcpy(to, from, sizeof(*to));
+ 285 else
+ 286 /* _sigchld is currently the largest know union member */
+ 287 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+ 288 }
+
+Since this was a read (kmemcheck usually warns about reads only, though it can
+warn about writes to unallocated or freed memory as well), it was probably the
+"from" argument which contained some uninitialized bytes. Following the chain
+of calls, we move upwards to see where "from" was allocated or initialized,
+``kernel/signal.c``, line 380::
+
+ 359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+ 360 {
+ ...
+ 367 list_for_each_entry(q, &list->list, list) {
+ 368 if (q->info.si_signo == sig) {
+ 369 if (first)
+ 370 goto still_pending;
+ 371 first = q;
+ ...
+ 377 if (first) {
+ 378 still_pending:
+ 379 list_del_init(&first->list);
+ 380 copy_siginfo(info, &first->info);
+ 381 __sigqueue_free(first);
+ ...
+ 392 }
+ 393 }
+
+Here, it is ``&first->info`` that is being passed on to ``copy_siginfo()``. The
+variable ``first`` was found on a list -- passed in as the second argument to
+``collect_signal()``. We continue our journey through the stack, to figure out
+where the item on "list" was allocated or initialized. We move to line 410::
+
+ 395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
+ 396 siginfo_t *info)
+ 397 {
+ ...
+ 410 collect_signal(sig, pending, info);
+ ...
+ 414 }
+
+Now we need to follow the ``pending`` pointer, since that is being passed on to
+``collect_signal()`` as ``list``. At this point, we've run out of lines from the
+"addr2line" output. Not to worry, we just paste the next addresses from the
+kmemcheck stack dump, i.e.::
+
+ [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
+ [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
+ [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
+ [<ffffffff8100c7b5>] int_signal+0x12/0x17
+
+ $ addr2line -e vmlinux -i ffffffff8104f04e ffffffff81050bd8 \
+ ffffffff8100b87d ffffffff8100c7b5
+ kernel/signal.c:446
+ kernel/signal.c:1806
+ arch/x86/kernel/signal.c:805
+ arch/x86/kernel/signal.c:871
+ arch/x86/kernel/entry_64.S:694
+
+Remember that since these addresses were found on the stack and not as the
+RIP value, they actually point to the _next_ instruction (they are return
+addresses). This becomes obvious when we look at the code for line 446::
+
+ 422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+ 423 {
+ ...
+ 431 signr = __dequeue_signal(&tsk->signal->shared_pending,
+ 432 mask, info);
+ 433 /*
+ 434 * itimer signal ?
+ 435 *
+ 436 * itimers are process shared and we restart periodic
+ 437 * itimers in the signal delivery path to prevent DoS
+ 438 * attacks in the high resolution timer case. This is
+ 439 * compliant with the old way of self restarting
+ 440 * itimers, as the SIGALRM is a legacy signal and only
+ 441 * queued once. Changing the restart behaviour to
+ 442 * restart the timer in the signal dequeue path is
+ 443 * reducing the timer noise on heavy loaded !highres
+ 444 * systems too.
+ 445 */
+ 446 if (unlikely(signr == SIGALRM)) {
+ ...
+ 489 }
+
+So instead of looking at 446, we should be looking at 431, which is the line
+that executes just before 446. Here we see that what we are looking for is
+``&tsk->signal->shared_pending``.
+
+Our next task is now to figure out which function that puts items on this
+``shared_pending`` list. A crude, but efficient tool, is ``git grep``::
+
+ $ git grep -n 'shared_pending' kernel/
+ ...
+ kernel/signal.c:828: pending = group ? &t->signal->shared_pending : &t->pending;
+ kernel/signal.c:1339: pending = group ? &t->signal->shared_pending : &t->pending;
+ ...
+
+There were more results, but none of them were related to list operations,
+and these were the only assignments. We inspect the line numbers more closely
+and find that this is indeed where items are being added to the list::
+
+ 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+ 817 int group)
+ 818 {
+ ...
+ 828 pending = group ? &t->signal->shared_pending : &t->pending;
+ ...
+ 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
+ 852 (is_si_special(info) ||
+ 853 info->si_code >= 0)));
+ 854 if (q) {
+ 855 list_add_tail(&q->list, &pending->list);
+ ...
+ 890 }
+
+and::
+
+ 1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
+ 1310 {
+ ....
+ 1339 pending = group ? &t->signal->shared_pending : &t->pending;
+ 1340 list_add_tail(&q->list, &pending->list);
+ ....
+ 1347 }
+
+In the first case, the list element we are looking for, ``q``, is being
+returned from the function ``__sigqueue_alloc()``, which looks like an
+allocation function. Let's take a look at it::
+
+ 187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
+ 188 int override_rlimit)
+ 189 {
+ 190 struct sigqueue *q = NULL;
+ 191 struct user_struct *user;
+ 192
+ 193 /*
+ 194 * We won't get problems with the target's UID changing under us
+ 195 * because changing it requires RCU be used, and if t != current, the
+ 196 * caller must be holding the RCU readlock (by way of a spinlock) and
+ 197 * we use RCU protection here
+ 198 */
+ 199 user = get_uid(__task_cred(t)->user);
+ 200 atomic_inc(&user->sigpending);
+ 201 if (override_rlimit ||
+ 202 atomic_read(&user->sigpending) <=
+ 203 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
+ 204 q = kmem_cache_alloc(sigqueue_cachep, flags);
+ 205 if (unlikely(q == NULL)) {
+ 206 atomic_dec(&user->sigpending);
+ 207 free_uid(user);
+ 208 } else {
+ 209 INIT_LIST_HEAD(&q->list);
+ 210 q->flags = 0;
+ 211 q->user = user;
+ 212 }
+ 213
+ 214 return q;
+ 215 }
+
+We see that this function initializes ``q->list``, ``q->flags``, and
+``q->user``. It seems that now is the time to look at the definition of
+``struct sigqueue``, e.g.::
+
+ 14 struct sigqueue {
+ 15 struct list_head list;
+ 16 int flags;
+ 17 siginfo_t info;
+ 18 struct user_struct *user;
+ 19 };
+
+And, you might remember, it was a ``memcpy()`` on ``&first->info`` that
+caused the warning, so this makes perfect sense. It also seems reasonable
+to assume that it is the caller of ``__sigqueue_alloc()`` that has the
+responsibility of filling out (initializing) this member.
+
+But just which fields of the struct were uninitialized? Let's look at
+kmemcheck's report again::
+
+ WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
+ 80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+ ^
+
+These first two lines are the memory dump of the memory object itself, and
+the shadow bytemap, respectively. The memory object itself is in this case
+``&first->info``. Just beware that the start of this dump is NOT the start
+of the object itself! The position of the caret (^) corresponds with the
+address of the read (ffff88003e4a2024).
+
+The shadow bytemap dump legend is as follows:
+
+- i: initialized
+- u: uninitialized
+- a: unallocated (memory has been allocated by the slab layer, but has not
+ yet been handed off to anybody)
+- f: freed (memory has been allocated by the slab layer, but has been freed
+ by the previous owner)
+
+In order to figure out where (relative to the start of the object) the
+uninitialized memory was located, we have to look at the disassembly. For
+that, we'll need the RIP address again::
+
+ RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
+
+ $ objdump -d --no-show-raw-insn vmlinux | grep -C 8 ffffffff8104ede8:
+ ffffffff8104edc8: mov %r8,0x8(%r8)
+ ffffffff8104edcc: test %r10d,%r10d
+ ffffffff8104edcf: js ffffffff8104ee88 <__dequeue_signal+0x168>
+ ffffffff8104edd5: mov %rax,%rdx
+ ffffffff8104edd8: mov $0xc,%ecx
+ ffffffff8104eddd: mov %r13,%rdi
+ ffffffff8104ede0: mov $0x30,%eax
+ ffffffff8104ede5: mov %rdx,%rsi
+ ffffffff8104ede8: rep movsl %ds:(%rsi),%es:(%rdi)
+ ffffffff8104edea: test $0x2,%al
+ ffffffff8104edec: je ffffffff8104edf0 <__dequeue_signal+0xd0>
+ ffffffff8104edee: movsw %ds:(%rsi),%es:(%rdi)
+ ffffffff8104edf0: test $0x1,%al
+ ffffffff8104edf2: je ffffffff8104edf5 <__dequeue_signal+0xd5>
+ ffffffff8104edf4: movsb %ds:(%rsi),%es:(%rdi)
+ ffffffff8104edf5: mov %r8,%rdi
+ ffffffff8104edf8: callq ffffffff8104de60 <__sigqueue_free>
+
+As expected, it's the "``rep movsl``" instruction from the ``memcpy()``
+that causes the warning. We know about ``REP MOVSL`` that it uses the register
+``RCX`` to count the number of remaining iterations. By taking a look at the
+register dump again (from the kmemcheck report), we can figure out how many
+bytes were left to copy::
+
+ RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
+
+By looking at the disassembly, we also see that ``%ecx`` is being loaded
+with the value ``$0xc`` just before (ffffffff8104edd8), so we are very
+lucky. Keep in mind that this is the number of iterations, not bytes. And
+since this is a "long" operation, we need to multiply by 4 to get the
+number of bytes. So this means that the uninitialized value was encountered
+at 4 * (0xc - 0x9) = 12 bytes from the start of the object.
+
+We can now try to figure out which field of the "``struct siginfo``" that
+was not initialized. This is the beginning of the struct::
+
+ 40 typedef struct siginfo {
+ 41 int si_signo;
+ 42 int si_errno;
+ 43 int si_code;
+ 44
+ 45 union {
+ ..
+ 92 } _sifields;
+ 93 } siginfo_t;
+
+On 64-bit, the int is 4 bytes long, so it must the union member that has
+not been initialized. We can verify this using gdb::
+
+ $ gdb vmlinux
+ ...
+ (gdb) p &((struct siginfo *) 0)->_sifields
+ $1 = (union {...} *) 0x10
+
+Actually, it seems that the union member is located at offset 0x10 -- which
+means that gcc has inserted 4 bytes of padding between the members ``si_code``
+and ``_sifields``. We can now get a fuller picture of the memory dump::
+
+ _----------------------------=> si_code
+ / _--------------------=> (padding)
+ | / _------------=> _sifields(._kill._pid)
+ | | / _----=> _sifields(._kill._uid)
+ | | | /
+ -------|-------|-------|-------|
+ 80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+
+This allows us to realize another important fact: ``si_code`` contains the
+value 0x80. Remember that x86 is little endian, so the first 4 bytes
+"80000000" are really the number 0x00000080. With a bit of research, we
+find that this is actually the constant ``SI_KERNEL`` defined in
+``include/asm-generic/siginfo.h``::
+
+ 144 #define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
+
+This macro is used in exactly one place in the x86 kernel: In ``send_signal()``
+in ``kernel/signal.c``::
+
+ 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+ 817 int group)
+ 818 {
+ ...
+ 828 pending = group ? &t->signal->shared_pending : &t->pending;
+ ...
+ 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
+ 852 (is_si_special(info) ||
+ 853 info->si_code >= 0)));
+ 854 if (q) {
+ 855 list_add_tail(&q->list, &pending->list);
+ 856 switch ((unsigned long) info) {
+ ...
+ 865 case (unsigned long) SEND_SIG_PRIV:
+ 866 q->info.si_signo = sig;
+ 867 q->info.si_errno = 0;
+ 868 q->info.si_code = SI_KERNEL;
+ 869 q->info.si_pid = 0;
+ 870 q->info.si_uid = 0;
+ 871 break;
+ ...
+ 890 }
+
+Not only does this match with the ``.si_code`` member, it also matches the place
+we found earlier when looking for where siginfo_t objects are enqueued on the
+``shared_pending`` list.
+
+So to sum up: It seems that it is the padding introduced by the compiler
+between two struct fields that is uninitialized, and this gets reported when
+we do a ``memcpy()`` on the struct. This means that we have identified a false
+positive warning.
+
+Normally, kmemcheck will not report uninitialized accesses in ``memcpy()`` calls
+when both the source and destination addresses are tracked. (Instead, we copy
+the shadow bytemap as well). In this case, the destination address clearly
+was not tracked. We can dig a little deeper into the stack trace from above::
+
+ arch/x86/kernel/signal.c:805
+ arch/x86/kernel/signal.c:871
+ arch/x86/kernel/entry_64.S:694
+
+And we clearly see that the destination siginfo object is located on the
+stack::
+
+ 782 static void do_signal(struct pt_regs *regs)
+ 783 {
+ 784 struct k_sigaction ka;
+ 785 siginfo_t info;
+ ...
+ 804 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ ...
+ 854 }
+
+And this ``&info`` is what eventually gets passed to ``copy_siginfo()`` as the
+destination argument.
+
+Now, even though we didn't find an actual error here, the example is still a
+good one, because it shows how one would go about to find out what the report
+was all about.
+
+
+Annotating false positives
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are a few different ways to make annotations in the source code that
+will keep kmemcheck from checking and reporting certain allocations. Here
+they are:
+
+- ``__GFP_NOTRACK_FALSE_POSITIVE``
+ This flag can be passed to ``kmalloc()`` or ``kmem_cache_alloc()``
+ (therefore also to other functions that end up calling one of
+ these) to indicate that the allocation should not be tracked
+ because it would lead to a false positive report. This is a "big
+ hammer" way of silencing kmemcheck; after all, even if the false
+ positive pertains to particular field in a struct, for example, we
+ will now lose the ability to find (real) errors in other parts of
+ the same struct.
+
+ Example::
+
+ /* No warnings will ever trigger on accessing any part of x */
+ x = kmalloc(sizeof *x, GFP_KERNEL | __GFP_NOTRACK_FALSE_POSITIVE);
+
+- ``kmemcheck_bitfield_begin(name)``/``kmemcheck_bitfield_end(name)`` and
+ ``kmemcheck_annotate_bitfield(ptr, name)``
+ The first two of these three macros can be used inside struct
+ definitions to signal, respectively, the beginning and end of a
+ bitfield. Additionally, this will assign the bitfield a name, which
+ is given as an argument to the macros.
+
+ Having used these markers, one can later use
+ kmemcheck_annotate_bitfield() at the point of allocation, to indicate
+ which parts of the allocation is part of a bitfield.
+
+ Example::
+
+ struct foo {
+ int x;
+
+ kmemcheck_bitfield_begin(flags);
+ int flag_a:1;
+ int flag_b:1;
+ kmemcheck_bitfield_end(flags);
+
+ int y;
+ };
+
+ struct foo *x = kmalloc(sizeof *x);
+
+ /* No warnings will trigger on accessing the bitfield of x */
+ kmemcheck_annotate_bitfield(x, flags);
+
+ Note that ``kmemcheck_annotate_bitfield()`` can be used even before the
+ return value of ``kmalloc()`` is checked -- in other words, passing NULL
+ as the first argument is legal (and will do nothing).
+
+
+Reporting errors
+----------------
+
+As we have seen, kmemcheck will produce false positive reports. Therefore, it
+is not very wise to blindly post kmemcheck warnings to mailing lists and
+maintainers. Instead, I encourage maintainers and developers to find errors
+in their own code. If you get a warning, you can try to work around it, try
+to figure out if it's a real error or not, or simply ignore it. Most
+developers know their own code and will quickly and efficiently determine the
+root cause of a kmemcheck report. This is therefore also the most efficient
+way to work with kmemcheck.
+
+That said, we (the kmemcheck maintainers) will always be on the lookout for
+false positives that we can annotate and silence. So whatever you find,
+please drop us a note privately! Kernel configs and steps to reproduce (if
+available) are of course a great help too.
+
+Happy hacking!
+
+
+Technical description
+---------------------
+
+kmemcheck works by marking memory pages non-present. This means that whenever
+somebody attempts to access the page, a page fault is generated. The page
+fault handler notices that the page was in fact only hidden, and so it calls
+on the kmemcheck code to make further investigations.
+
+When the investigations are completed, kmemcheck "shows" the page by marking
+it present (as it would be under normal circumstances). This way, the
+interrupted code can continue as usual.
+
+But after the instruction has been executed, we should hide the page again, so
+that we can catch the next access too! Now kmemcheck makes use of a debugging
+feature of the processor, namely single-stepping. When the processor has
+finished the one instruction that generated the memory access, a debug
+exception is raised. From here, we simply hide the page again and continue
+execution, this time with the single-stepping feature turned off.
+
+kmemcheck requires some assistance from the memory allocator in order to work.
+The memory allocator needs to
+
+ 1. Tell kmemcheck about newly allocated pages and pages that are about to
+ be freed. This allows kmemcheck to set up and tear down the shadow memory
+ for the pages in question. The shadow memory stores the status of each
+ byte in the allocation proper, e.g. whether it is initialized or
+ uninitialized.
+
+ 2. Tell kmemcheck which parts of memory should be marked uninitialized.
+ There are actually a few more states, such as "not yet allocated" and
+ "recently freed".
+
+If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
+memory that can take page faults because of kmemcheck.
+
+If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
+request memory with the __GFP_NOTRACK or __GFP_NOTRACK_FALSE_POSITIVE flags.
+This does not prevent the page faults from occurring, however, but marks the
+object in question as being initialized so that no warnings will ever be
+produced for this object.
+
+Currently, the SLAB and SLUB allocators are supported by kmemcheck.
diff --git a/Documentation/kmemleak.txt b/Documentation/dev-tools/kmemleak.rst
index 18e24abb3ecf..1788722d5495 100644
--- a/Documentation/kmemleak.txt
+++ b/Documentation/dev-tools/kmemleak.rst
@@ -1,15 +1,12 @@
Kernel Memory Leak Detector
===========================
-Introduction
-------------
-
Kmemleak provides a way of detecting possible kernel memory leaks in a
way similar to a tracing garbage collector
(https://en.wikipedia.org/wiki/Garbage_collection_%28computer_science%29#Tracing_garbage_collectors),
with the difference that the orphan objects are not freed but only
reported via /sys/kernel/debug/kmemleak. A similar method is used by the
-Valgrind tool (memcheck --leak-check) to detect the memory leaks in
+Valgrind tool (``memcheck --leak-check``) to detect the memory leaks in
user-space applications.
Kmemleak is supported on x86, arm, powerpc, sparc, sh, microblaze, ppc, mips, s390, metag and tile.
@@ -19,20 +16,20 @@ Usage
CONFIG_DEBUG_KMEMLEAK in "Kernel hacking" has to be enabled. A kernel
thread scans the memory every 10 minutes (by default) and prints the
number of new unreferenced objects found. To display the details of all
-the possible memory leaks:
+the possible memory leaks::
# mount -t debugfs nodev /sys/kernel/debug/
# cat /sys/kernel/debug/kmemleak
-To trigger an intermediate memory scan:
+To trigger an intermediate memory scan::
# echo scan > /sys/kernel/debug/kmemleak
-To clear the list of all current possible memory leaks:
+To clear the list of all current possible memory leaks::
# echo clear > /sys/kernel/debug/kmemleak
-New leaks will then come up upon reading /sys/kernel/debug/kmemleak
+New leaks will then come up upon reading ``/sys/kernel/debug/kmemleak``
again.
Note that the orphan objects are listed in the order they were allocated
@@ -40,22 +37,31 @@ and one object at the beginning of the list may cause other subsequent
objects to be reported as orphan.
Memory scanning parameters can be modified at run-time by writing to the
-/sys/kernel/debug/kmemleak file. The following parameters are supported:
-
- off - disable kmemleak (irreversible)
- stack=on - enable the task stacks scanning (default)
- stack=off - disable the tasks stacks scanning
- scan=on - start the automatic memory scanning thread (default)
- scan=off - stop the automatic memory scanning thread
- scan=<secs> - set the automatic memory scanning period in seconds
- (default 600, 0 to stop the automatic scanning)
- scan - trigger a memory scan
- clear - clear list of current memory leak suspects, done by
- marking all current reported unreferenced objects grey,
- or free all kmemleak objects if kmemleak has been disabled.
- dump=<addr> - dump information about the object found at <addr>
-
-Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on
+``/sys/kernel/debug/kmemleak`` file. The following parameters are supported:
+
+- off
+ disable kmemleak (irreversible)
+- stack=on
+ enable the task stacks scanning (default)
+- stack=off
+ disable the tasks stacks scanning
+- scan=on
+ start the automatic memory scanning thread (default)
+- scan=off
+ stop the automatic memory scanning thread
+- scan=<secs>
+ set the automatic memory scanning period in seconds
+ (default 600, 0 to stop the automatic scanning)
+- scan
+ trigger a memory scan
+- clear
+ clear list of current memory leak suspects, done by
+ marking all current reported unreferenced objects grey,
+ or free all kmemleak objects if kmemleak has been disabled.
+- dump=<addr>
+ dump information about the object found at <addr>
+
+Kmemleak can also be disabled at boot-time by passing ``kmemleak=off`` on
the kernel command line.
Memory may be allocated or freed before kmemleak is initialised and
@@ -63,13 +69,14 @@ these actions are stored in an early log buffer. The size of this buffer
is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option.
If CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF are enabled, the kmemleak is
-disabled by default. Passing "kmemleak=on" on the kernel command
+disabled by default. Passing ``kmemleak=on`` on the kernel command
line enables the function.
Basic Algorithm
---------------
-The memory allocations via kmalloc, vmalloc, kmem_cache_alloc and
+The memory allocations via :c:func:`kmalloc`, :c:func:`vmalloc`,
+:c:func:`kmem_cache_alloc` and
friends are traced and the pointers, together with additional
information like size and stack trace, are stored in a rbtree.
The corresponding freeing function calls are tracked and the pointers
@@ -113,13 +120,13 @@ when doing development. To work around these situations you can use the
you can find new unreferenced objects; this should help with testing
specific sections of code.
-To test a critical section on demand with a clean kmemleak do:
+To test a critical section on demand with a clean kmemleak do::
# echo clear > /sys/kernel/debug/kmemleak
... test your kernel or modules ...
# echo scan > /sys/kernel/debug/kmemleak
-Then as usual to get your report with:
+Then as usual to get your report with::
# cat /sys/kernel/debug/kmemleak
@@ -131,7 +138,7 @@ disabled by the user or due to an fatal error, internal kmemleak objects
won't be freed when kmemleak is disabled, and those objects may occupy
a large part of physical memory.
-In this situation, you may reclaim memory with:
+In this situation, you may reclaim memory with::
# echo clear > /sys/kernel/debug/kmemleak
@@ -140,20 +147,20 @@ Kmemleak API
See the include/linux/kmemleak.h header for the functions prototype.
-kmemleak_init - initialize kmemleak
-kmemleak_alloc - notify of a memory block allocation
-kmemleak_alloc_percpu - notify of a percpu memory block allocation
-kmemleak_free - notify of a memory block freeing
-kmemleak_free_part - notify of a partial memory block freeing
-kmemleak_free_percpu - notify of a percpu memory block freeing
-kmemleak_update_trace - update object allocation stack trace
-kmemleak_not_leak - mark an object as not a leak
-kmemleak_ignore - do not scan or report an object as leak
-kmemleak_scan_area - add scan areas inside a memory block
-kmemleak_no_scan - do not scan a memory block
-kmemleak_erase - erase an old value in a pointer variable
-kmemleak_alloc_recursive - as kmemleak_alloc but checks the recursiveness
-kmemleak_free_recursive - as kmemleak_free but checks the recursiveness
+- ``kmemleak_init`` - initialize kmemleak
+- ``kmemleak_alloc`` - notify of a memory block allocation
+- ``kmemleak_alloc_percpu`` - notify of a percpu memory block allocation
+- ``kmemleak_free`` - notify of a memory block freeing
+- ``kmemleak_free_part`` - notify of a partial memory block freeing
+- ``kmemleak_free_percpu`` - notify of a percpu memory block freeing
+- ``kmemleak_update_trace`` - update object allocation stack trace
+- ``kmemleak_not_leak`` - mark an object as not a leak
+- ``kmemleak_ignore`` - do not scan or report an object as leak
+- ``kmemleak_scan_area`` - add scan areas inside a memory block
+- ``kmemleak_no_scan`` - do not scan a memory block
+- ``kmemleak_erase`` - erase an old value in a pointer variable
+- ``kmemleak_alloc_recursive`` - as kmemleak_alloc but checks the recursiveness
+- ``kmemleak_free_recursive`` - as kmemleak_free but checks the recursiveness
Dealing with false positives/negatives
--------------------------------------
diff --git a/Documentation/sparse.txt b/Documentation/dev-tools/sparse.rst
index eceab1308a8c..8c250e8a2105 100644
--- a/Documentation/sparse.txt
+++ b/Documentation/dev-tools/sparse.rst
@@ -1,11 +1,20 @@
-Copyright 2004 Linus Torvalds
-Copyright 2004 Pavel Machek <pavel@ucw.cz>
-Copyright 2006 Bob Copeland <me@bobcopeland.com>
+.. Copyright 2004 Linus Torvalds
+.. Copyright 2004 Pavel Machek <pavel@ucw.cz>
+.. Copyright 2006 Bob Copeland <me@bobcopeland.com>
+
+Sparse
+======
+
+Sparse is a semantic checker for C programs; it can be used to find a
+number of potential problems with kernel code. See
+https://lwn.net/Articles/689907/ for an overview of sparse; this document
+contains some kernel-specific sparse information.
+
Using sparse for typechecking
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------
-"__bitwise" is a type attribute, so you have to do something like this:
+"__bitwise" is a type attribute, so you have to do something like this::
typedef int __bitwise pm_request_t;
@@ -20,13 +29,13 @@ but in this case we really _do_ want to force the conversion). And because
the enum values are all the same type, now "enum pm_request" will be that
type too.
-And with gcc, all the __bitwise/__force stuff goes away, and it all ends
-up looking just like integers to gcc.
+And with gcc, all the "__bitwise"/"__force stuff" goes away, and it all
+ends up looking just like integers to gcc.
Quite frankly, you don't need the enum there. The above all really just
boils down to one special "int __bitwise" type.
-So the simpler way is to just do
+So the simpler way is to just do::
typedef int __bitwise pm_request_t;
@@ -50,7 +59,7 @@ __bitwise - noisy stuff; in particular, __le*/__be* are that. We really
don't want to drown in noise unless we'd explicitly asked for it.
Using sparse for lock checking
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------
The following macros are undefined for gcc and defined during a sparse
run to use the "context" tracking feature of sparse, applied to
@@ -69,22 +78,22 @@ annotation is needed. The tree annotations above are for cases where
sparse would otherwise report a context imbalance.
Getting sparse
-~~~~~~~~~~~~~~
+--------------
You can get latest released versions from the Sparse homepage at
https://sparse.wiki.kernel.org/index.php/Main_Page
Alternatively, you can get snapshots of the latest development version
-of sparse using git to clone..
+of sparse using git to clone::
git://git.kernel.org/pub/scm/devel/sparse/sparse.git
-DaveJ has hourly generated tarballs of the git tree available at..
+DaveJ has hourly generated tarballs of the git tree available at::
http://www.codemonkey.org.uk/projects/git-snapshots/sparse/
-Once you have it, just do
+Once you have it, just do::
make
make install
@@ -92,7 +101,7 @@ Once you have it, just do
as a regular user, and it will install sparse in your ~/bin directory.
Using sparse
-~~~~~~~~~~~~
+------------
Do a kernel make with "make C=1" to run sparse on all the C files that get
recompiled, or use "make C=2" to run sparse on the files whether they need to
@@ -101,7 +110,7 @@ have already built it.
The optional make variable CF can be used to pass arguments to sparse. The
build system passes -Wbitwise to sparse automatically. To perform endianness
-checks, you may define __CHECK_ENDIAN__:
+checks, you may define __CHECK_ENDIAN__::
make C=2 CF="-D__CHECK_ENDIAN__"
diff --git a/Documentation/dev-tools/tools.rst b/Documentation/dev-tools/tools.rst
new file mode 100644
index 000000000000..824ae8e54dd5
--- /dev/null
+++ b/Documentation/dev-tools/tools.rst
@@ -0,0 +1,25 @@
+================================
+Development tools for the kernel
+================================
+
+This document is a collection of documents about development tools that can
+be used to work on the kernel. For now, the documents have been pulled
+together without any significant effot to integrate them into a coherent
+whole; patches welcome!
+
+.. class:: toc-title
+
+ Table of contents
+
+.. toctree::
+ :maxdepth: 2
+
+ coccinelle
+ sparse
+ kcov
+ gcov
+ kasan
+ ubsan
+ kmemleak
+ kmemcheck
+ gdb-kernel-debugging
diff --git a/Documentation/ubsan.txt b/Documentation/dev-tools/ubsan.rst
index f58215ef5797..655e6b63c227 100644
--- a/Documentation/ubsan.txt
+++ b/Documentation/dev-tools/ubsan.rst
@@ -1,7 +1,5 @@
-Undefined Behavior Sanitizer - UBSAN
-
-Overview
---------
+The Undefined Behavior Sanitizer - UBSAN
+========================================
UBSAN is a runtime undefined behaviour checker.
@@ -10,11 +8,13 @@ Compiler inserts code that perform certain kinds of checks before operations
that may cause UB. If check fails (i.e. UB detected) __ubsan_handle_*
function called to print error message.
-GCC has that feature since 4.9.x [1] (see -fsanitize=undefined option and
-its suboptions). GCC 5.x has more checkers implemented [2].
+GCC has that feature since 4.9.x [1_] (see ``-fsanitize=undefined`` option and
+its suboptions). GCC 5.x has more checkers implemented [2_].
Report example
----------------
+--------------
+
+::
================================================================================
UBSAN: Undefined behaviour in ../include/linux/bitops.h:110:33
@@ -47,29 +47,33 @@ Report example
Usage
-----
-To enable UBSAN configure kernel with:
+To enable UBSAN configure kernel with::
CONFIG_UBSAN=y
-and to check the entire kernel:
+and to check the entire kernel::
CONFIG_UBSAN_SANITIZE_ALL=y
To enable instrumentation for specific files or directories, add a line
similar to the following to the respective kernel Makefile:
- For a single file (e.g. main.o):
- UBSAN_SANITIZE_main.o := y
+- For a single file (e.g. main.o)::
+
+ UBSAN_SANITIZE_main.o := y
- For all files in one directory:
- UBSAN_SANITIZE := y
+- For all files in one directory::
+
+ UBSAN_SANITIZE := y
To exclude files from being instrumented even if
-CONFIG_UBSAN_SANITIZE_ALL=y, use:
+``CONFIG_UBSAN_SANITIZE_ALL=y``, use::
+
+ UBSAN_SANITIZE_main.o := n
+
+and::
- UBSAN_SANITIZE_main.o := n
- and:
- UBSAN_SANITIZE := n
+ UBSAN_SANITIZE := n
Detection of unaligned accesses controlled through the separate option -
CONFIG_UBSAN_ALIGNMENT. It's off by default on architectures that support
@@ -80,5 +84,5 @@ reports.
References
----------
-[1] - https://gcc.gnu.org/onlinedocs/gcc-4.9.0/gcc/Debugging-Options.html
-[2] - https://gcc.gnu.org/onlinedocs/gcc/Debugging-Options.html
+.. _1: https://gcc.gnu.org/onlinedocs/gcc-4.9.0/gcc/Debugging-Options.html
+.. _2: https://gcc.gnu.org/onlinedocs/gcc/Debugging-Options.html
diff --git a/Documentation/development-process/1.Intro b/Documentation/development-process/1.Intro.rst
index 9b614480aa84..22642b3fe903 100644
--- a/Documentation/development-process/1.Intro
+++ b/Documentation/development-process/1.Intro.rst
@@ -1,16 +1,8 @@
-1: A GUIDE TO THE KERNEL DEVELOPMENT PROCESS
+Introdution
+===========
-The purpose of this document is to help developers (and their managers)
-work with the development community with a minimum of frustration. It is
-an attempt to document how this community works in a way which is
-accessible to those who are not intimately familiar with Linux kernel
-development (or, indeed, free software development in general). While
-there is some technical material here, this is very much a process-oriented
-discussion which does not require a deep knowledge of kernel programming to
-understand.
-
-
-1.1: EXECUTIVE SUMMARY
+Executive summary
+-----------------
The rest of this section covers the scope of the kernel development process
and the kinds of frustrations that developers and their employers can
@@ -20,41 +12,41 @@ availability to users, community support in many forms, and the ability to
influence the direction of kernel development. Code contributed to the
Linux kernel must be made available under a GPL-compatible license.
-Section 2 introduces the development process, the kernel release cycle, and
-the mechanics of the merge window. The various phases in the patch
-development, review, and merging cycle are covered. There is some
+:ref:`development_process` introduces the development process, the kernel
+release cycle, and the mechanics of the merge window. The various phases in
+the patch development, review, and merging cycle are covered. There is some
discussion of tools and mailing lists. Developers wanting to get started
with kernel development are encouraged to track down and fix bugs as an
initial exercise.
-Section 3 covers early-stage project planning, with an emphasis on
-involving the development community as soon as possible.
+:ref:`development_early_stage` covers early-stage project planning, with an
+emphasis on involving the development community as soon as possible.
-Section 4 is about the coding process; several pitfalls which have been
-encountered by other developers are discussed. Some requirements for
+:ref:`development_coding` is about the coding process; several pitfalls which
+have been encountered by other developers are discussed. Some requirements for
patches are covered, and there is an introduction to some of the tools
which can help to ensure that kernel patches are correct.
-Section 5 talks about the process of posting patches for review. To be
-taken seriously by the development community, patches must be properly
-formatted and described, and they must be sent to the right place.
+:ref:`development_posting` talks about the process of posting patches for
+review. To be taken seriously by the development community, patches must be
+properly formatted and described, and they must be sent to the right place.
Following the advice in this section should help to ensure the best
possible reception for your work.
-Section 6 covers what happens after posting patches; the job is far from
-done at that point. Working with reviewers is a crucial part of the
-development process; this section offers a number of tips on how to avoid
-problems at this important stage. Developers are cautioned against
+:ref:`development_followthrough` covers what happens after posting patches; the
+job is far from done at that point. Working with reviewers is a crucial part
+of the development process; this section offers a number of tips on how to
+avoid problems at this important stage. Developers are cautioned against
assuming that the job is done when a patch is merged into the mainline.
-Section 7 introduces a couple of "advanced" topics: managing patches with
-git and reviewing patches posted by others.
-
-Section 8 concludes the document with pointers to sources for more
-information on kernel development.
+:ref:`development_advancedtopics` introduces a couple of "advanced" topics:
+managing patches with git and reviewing patches posted by others.
+:ref:`development_conclusion` concludes the document with pointers to sources
+for more information on kernel development.
-1.2: WHAT THIS DOCUMENT IS ABOUT
+What this document is about
+---------------------------
The Linux kernel, at over 8 million lines of code and well over 1000
contributors to each release, is one of the largest and most active free
@@ -108,8 +100,8 @@ community is always in need of developers who will help to make the kernel
better; the following text should help you - or those who work for you -
join our community.
-
-1.3: CREDITS
+Credits
+-------
This document was written by Jonathan Corbet, corbet@lwn.net. It has been
improved by comments from Johannes Berg, James Berry, Alex Chiang, Roland
@@ -120,8 +112,8 @@ Jochen Voß.
This work was supported by the Linux Foundation; thanks especially to
Amanda McPherson, who saw the value of this effort and made it all happen.
-
-1.4: THE IMPORTANCE OF GETTING CODE INTO THE MAINLINE
+The importance of getting code into the mainline
+------------------------------------------------
Some companies and developers occasionally wonder why they should bother
learning how to work with the kernel community and get their code into the
@@ -233,8 +225,8 @@ commercial life, after which a new version must be released. At that
point, vendors whose code is in the mainline and well maintained will be
much better positioned to get the new product ready for market quickly.
-
-1.5: LICENSING
+Licensing
+---------
Code is contributed to the Linux kernel under a number of licenses, but all
code must be compatible with version 2 of the GNU General Public License
diff --git a/Documentation/development-process/2.Process b/Documentation/development-process/2.Process.rst
index c24e156a6118..ce5561bb3f8e 100644
--- a/Documentation/development-process/2.Process
+++ b/Documentation/development-process/2.Process.rst
@@ -1,4 +1,7 @@
-2: HOW THE DEVELOPMENT PROCESS WORKS
+.. _development_process:
+
+How the development process works
+=================================
Linux kernel development in the early 1990's was a pretty loose affair,
with relatively small numbers of users and developers involved. With a
@@ -7,19 +10,21 @@ course of one year, the kernel has since had to evolve a number of
processes to keep development happening smoothly. A solid understanding of
how the process works is required in order to be an effective part of it.
-
-2.1: THE BIG PICTURE
+The big picture
+---------------
The kernel developers use a loosely time-based release process, with a new
major kernel release happening every two or three months. The recent
release history looks like this:
+ ====== =================
2.6.38 March 14, 2011
2.6.37 January 4, 2011
2.6.36 October 20, 2010
2.6.35 August 1, 2010
2.6.34 May 15, 2010
2.6.33 February 24, 2010
+ ====== =================
Every 2.6.x release is a major kernel release with new features, internal
API changes, and more. A typical 2.6 release can contain nearly 10,000
@@ -68,6 +73,7 @@ At that point the whole process starts over again.
As an example, here is how the 2.6.38 development cycle went (all dates in
2011):
+ ============== ===============================
January 4 2.6.37 stable release
January 18 2.6.38-rc1, merge window closes
January 21 2.6.38-rc2
@@ -78,6 +84,7 @@ As an example, here is how the 2.6.38 development cycle went (all dates in
March 1 2.6.38-rc7
March 7 2.6.38-rc8
March 14 2.6.38 stable release
+ ============== ===============================
How do the developers decide when to close the development cycle and create
the stable release? The most significant metric used is the list of
@@ -105,11 +112,13 @@ next development kernel. Kernels will typically receive stable updates for
a little more than one development cycle past their initial release. So,
for example, the 2.6.36 kernel's history looked like:
+ ============== ===============================
October 10 2.6.36 stable release
November 22 2.6.36.1
December 9 2.6.36.2
January 7 2.6.36.3
February 17 2.6.36.4
+ ============== ===============================
2.6.36.4 was the final stable update for the 2.6.36 release.
@@ -117,9 +126,11 @@ Some kernels are designated "long term" kernels; they will receive support
for a longer period. As of this writing, the current long term kernels
and their maintainers are:
+ ====== ====================== ===========================
2.6.27 Willy Tarreau (Deep-frozen stable kernel)
2.6.32 Greg Kroah-Hartman
2.6.35 Andi Kleen (Embedded flag kernel)
+ ====== ====================== ===========================
The selection of a kernel for long-term support is purely a matter of a
maintainer having the need and the time to maintain that release. There
@@ -127,7 +138,8 @@ are no known plans for long-term support for any specific upcoming
release.
-2.2: THE LIFECYCLE OF A PATCH
+The lifecycle of a patch
+------------------------
Patches do not go directly from the developer's keyboard into the mainline
kernel. There is, instead, a somewhat involved (if somewhat informal)
@@ -195,8 +207,8 @@ is to try to cut the process down to a single "merging into the mainline"
step. This approach invariably leads to frustration for everybody
involved.
-
-2.3: HOW PATCHES GET INTO THE KERNEL
+How patches get into the Kernel
+-------------------------------
There is exactly one person who can merge patches into the mainline kernel
repository: Linus Torvalds. But, of the over 9,500 patches which went
@@ -242,7 +254,8 @@ finding the right maintainer. Sending patches directly to Linus is not
normally the right way to go.
-2.4: NEXT TREES
+Next trees
+----------
The chain of subsystem trees guides the flow of patches into the kernel,
but it also raises an interesting question: what if somebody wants to look
@@ -294,7 +307,8 @@ all patches merged during a given merge window should really have found
their way into linux-next some time before the merge window opens.
-2.4.1: STAGING TREES
+Staging trees
+-------------
The kernel source tree contains the drivers/staging/ directory, where
many sub-directories for drivers or filesystems that are on their way to
@@ -322,7 +336,8 @@ staging drivers. So staging is, at best, a stop on the way toward becoming
a proper mainline driver.
-2.5: TOOLS
+Tools
+-----
As can be seen from the above text, the kernel development process depends
heavily on the ability to herd collections of patches in various
@@ -368,7 +383,8 @@ upstream. For the management of certain kinds of trees (-mm, for example),
quilt is the best tool for the job.
-2.6: MAILING LISTS
+Mailing lists
+-------------
A great deal of Linux kernel development work is done by way of mailing
lists. It is hard to be a fully-functioning member of the community
@@ -436,7 +452,8 @@ filesystem, etc. subsystems. The best place to look for mailing lists is
in the MAINTAINERS file packaged with the kernel source.
-2.7: GETTING STARTED WITH KERNEL DEVELOPMENT
+Getting started with Kernel development
+---------------------------------------
Questions about how to get started with the kernel development process are
common - from both individuals and companies. Equally common are missteps
@@ -463,6 +480,8 @@ they wish for by these means.
Andrew Morton gives this advice for aspiring kernel developers
+::
+
The #1 project for all kernel beginners should surely be "make sure
that the kernel runs perfectly at all times on all machines which
you can lay your hands on". Usually the way to do this is to work
diff --git a/Documentation/development-process/3.Early-stage b/Documentation/development-process/3.Early-stage.rst
index f87ba7b3fbac..af2c0af931d6 100644
--- a/Documentation/development-process/3.Early-stage
+++ b/Documentation/development-process/3.Early-stage.rst
@@ -1,4 +1,7 @@
-3: EARLY-STAGE PLANNING
+.. _development_early_stage:
+
+Early-stage planning
+====================
When contemplating a Linux kernel development project, it can be tempting
to jump right in and start coding. As with any significant project,
@@ -7,7 +10,8 @@ line of code is written. Some time spent in early planning and
communication can save far more time later on.
-3.1: SPECIFYING THE PROBLEM
+Specifying the problem
+----------------------
Like any engineering project, a successful kernel enhancement starts with a
clear description of the problem to be solved. In some cases, this step is
@@ -64,7 +68,8 @@ answers to a short set of questions:
Only then does it make sense to start considering possible solutions.
-3.2: EARLY DISCUSSION
+Early discussion
+----------------
When planning a kernel development project, it makes great sense to hold
discussions with the community before launching into implementation. Early
@@ -117,7 +122,8 @@ In each of these cases, a great deal of pain and extra work could have been
avoided with some early discussion with the kernel developers.
-3.3: WHO DO YOU TALK TO?
+Who do you talk to?
+-------------------
When developers decide to take their plans public, the next question will
be: where do we start? The answer is to find the right mailing list(s) and
@@ -141,6 +147,8 @@ development project.
The task of finding the right maintainer is sometimes challenging enough
that the kernel developers have added a script to ease the process:
+::
+
.../scripts/get_maintainer.pl
This script will return the current maintainer(s) for a given file or
@@ -155,7 +163,8 @@ If all else fails, talking to Andrew Morton can be an effective way to
track down a maintainer for a specific piece of code.
-3.4: WHEN TO POST?
+When to post?
+-------------
If possible, posting your plans during the early stages can only be
helpful. Describe the problem being solved and any plans that have been
@@ -179,7 +188,8 @@ idea. The best thing to do in this situation is to proceed, keeping the
community informed as you go.
-3.5: GETTING OFFICIAL BUY-IN
+Getting official buy-in
+-----------------------
If your work is being done in a corporate environment - as most Linux
kernel work is - you must, obviously, have permission from suitably
diff --git a/Documentation/development-process/4.Coding b/Documentation/development-process/4.Coding.rst
index 9a3ee77cefb1..9d5cef996f7f 100644
--- a/Documentation/development-process/4.Coding
+++ b/Documentation/development-process/4.Coding.rst
@@ -1,4 +1,7 @@
-4: GETTING THE CODE RIGHT
+.. _development_coding:
+
+Getting the code right
+======================
While there is much to be said for a solid and community-oriented design
process, the proof of any kernel development project is in the resulting
@@ -12,9 +15,11 @@ will shift toward doing things right and the tools which can help in that
quest.
-4.1: PITFALLS
+Pitfalls
+---------
-* Coding style
+Coding style
+************
The kernel has long had a standard coding style, described in
Documentation/CodingStyle. For much of that time, the policies described
@@ -54,7 +59,8 @@ style (a line which becomes far less readable if split to fit within the
80-column limit, for example), just do it.
-* Abstraction layers
+Abstraction layers
+******************
Computer Science professors teach students to make extensive use of
abstraction layers in the name of flexibility and information hiding.
@@ -87,7 +93,8 @@ implement that functionality at a higher level. There is no value in
replicating the same code throughout the kernel.
-* #ifdef and preprocessor use in general
+#ifdef and preprocessor use in general
+**************************************
The C preprocessor seems to present a powerful temptation to some C
programmers, who see it as a way to efficiently encode a great deal of
@@ -113,7 +120,8 @@ easier to read, do not evaluate their arguments multiple times, and allow
the compiler to perform type checking on the arguments and return value.
-* Inline functions
+Inline functions
+****************
Inline functions present a hazard of their own, though. Programmers can
become enamored of the perceived efficiency inherent in avoiding a function
@@ -137,7 +145,8 @@ placement of "inline" keywords may not just be excessive; it could also be
irrelevant.
-* Locking
+Locking
+*******
In May, 2006, the "Devicescape" networking stack was, with great
fanfare, released under the GPL and made available for inclusion in the
@@ -151,7 +160,7 @@ This code showed a number of signs of having been developed behind
corporate doors. But one large problem in particular was that it was not
designed to work on multiprocessor systems. Before this networking stack
(now called mac80211) could be merged, a locking scheme needed to be
-retrofitted onto it.
+retrofitted onto it.
Once upon a time, Linux kernel code could be developed without thinking
about the concurrency issues presented by multiprocessor systems. Now,
@@ -169,7 +178,8 @@ enough to pick the right tool for the job. Code which shows a lack of
attention to concurrency will have a difficult path into the mainline.
-* Regressions
+Regressions
+***********
One final hazard worth mentioning is this: it can be tempting to make a
change (which may bring big improvements) which causes something to break
@@ -185,6 +195,8 @@ change if it brings new functionality to ten systems for each one it
breaks? The best answer to this question was expressed by Linus in July,
2007:
+::
+
So we don't fix bugs by introducing new problems. That way lies
madness, and nobody ever knows if you actually make any real
progress at all. Is it two steps forwards, one step back, or one
@@ -201,8 +213,8 @@ reason, a great deal of thought, clear documentation, and wide review for
user-space interfaces is always required.
-
-4.2: CODE CHECKING TOOLS
+Code checking tools
+-------------------
For now, at least, the writing of error-free code remains an ideal that few
of us can reach. What we can hope to do, though, is to catch and fix as
@@ -250,7 +262,7 @@ testing purposes. In particular, you should turn on:
There are quite a few other debugging options, some of which will be
discussed below. Some of them have a significant performance impact and
should not be used all of the time. But some time spent learning the
-available options will likely be paid back many times over in short order.
+available options will likely be paid back many times over in short order.
One of the heavier debugging tools is the locking checker, or "lockdep."
This tool will track the acquisition and release of every lock (spinlock or
@@ -263,7 +275,7 @@ occasion, deadlock. This kind of problem can be painful (for both
developers and users) in a deployed system; lockdep allows them to be found
in an automated manner ahead of time. Code with any sort of non-trivial
locking should be run with lockdep enabled before being submitted for
-inclusion.
+inclusion.
As a diligent kernel programmer, you will, beyond doubt, check the return
status of any operation (such as a memory allocation) which can fail. The
@@ -300,7 +312,7 @@ Documentation/coccinelle.txt for more information.
Other kinds of portability errors are best found by compiling your code for
other architectures. If you do not happen to have an S/390 system or a
Blackfin development board handy, you can still perform the compilation
-step. A large set of cross compilers for x86 systems can be found at
+step. A large set of cross compilers for x86 systems can be found at
http://www.kernel.org/pub/tools/crosstool/
@@ -308,7 +320,8 @@ Some time spent installing and using these compilers will help avoid
embarrassment later.
-4.3: DOCUMENTATION
+Documentation
+-------------
Documentation has often been more the exception than the rule with kernel
development. Even so, adequate documentation will help to ease the merging
@@ -364,7 +377,8 @@ out. Anything which might tempt a code janitor to make an incorrect
"cleanup" needs a comment saying why it is done the way it is. And so on.
-4.4: INTERNAL API CHANGES
+Internal API changes
+--------------------
The binary interface provided by the kernel to user space cannot be broken
except under the most severe circumstances. The kernel's internal
diff --git a/Documentation/development-process/5.Posting b/Documentation/development-process/5.Posting.rst
index 8a48c9b62864..b511ddf7e82a 100644
--- a/Documentation/development-process/5.Posting
+++ b/Documentation/development-process/5.Posting.rst
@@ -1,4 +1,7 @@
-5: POSTING PATCHES
+.. _development_posting:
+
+Posting patches
+===============
Sooner or later, the time comes when your work is ready to be presented to
the community for review and, eventually, inclusion into the mainline
@@ -11,7 +14,8 @@ SubmittingDrivers, and SubmitChecklist in the kernel documentation
directory.
-5.1: WHEN TO POST
+When to post
+------------
There is a constant temptation to avoid posting patches before they are
completely "ready." For simple patches, that is not a problem. If the
@@ -27,7 +31,8 @@ patches which are known to be half-baked, but those who do will come in
with the idea that they can help you drive the work in the right direction.
-5.2: BEFORE CREATING PATCHES
+Before creating patches
+-----------------------
There are a number of things which should be done before you consider
sending patches to the development community. These include:
@@ -52,7 +57,8 @@ As a general rule, putting in some extra thought before posting code almost
always pays back the effort in short order.
-5.3: PATCH PREPARATION
+Patch preparation
+-----------------
The preparation of patches for posting can be a surprising amount of work,
but, once again, attempting to save time here is not generally advisable
@@ -122,7 +128,8 @@ which takes quite a bit of time and thought after the "real work" has been
done. When done properly, though, it is time well spent.
-5.4: PATCH FORMATTING AND CHANGELOGS
+Patch formatting and changelogs
+-------------------------------
So now you have a perfect series of patches for posting, but the work is
not done quite yet. Each patch needs to be formatted into a message which
@@ -140,6 +147,8 @@ that end, each patch will be composed of the following:
subsystem name first, followed by the purpose of the patch. For
example:
+ ::
+
gpio: fix build on CONFIG_GPIO_SYSFS=n
- A blank line followed by a detailed description of the contents of the
@@ -192,6 +201,8 @@ been associated with the development of this patch. They are described in
detail in the SubmittingPatches document; what follows here is a brief
summary. Each of these lines has the format:
+::
+
tag: Full Name <email address> optional-other-stuff
The tags in common use are:
@@ -225,7 +236,8 @@ Be careful in the addition of tags to your patches: only Cc: is appropriate
for addition without the explicit permission of the person named.
-5.5: SENDING THE PATCH
+Sending the patch
+-----------------
Before you mail your patches, there are a couple of other things you should
take care of:
@@ -287,6 +299,8 @@ obvious maintainer, Andrew Morton is often the patch target of last resort.
Patches need good subject lines. The canonical format for a patch line is
something like:
+::
+
[PATCH nn/mm] subsys: one-line description of the patch
where "nn" is the ordinal number of the patch, "mm" is the total number of
diff --git a/Documentation/development-process/6.Followthrough b/Documentation/development-process/6.Followthrough.rst
index 41d324a9420d..a173cd5f93d2 100644
--- a/Documentation/development-process/6.Followthrough
+++ b/Documentation/development-process/6.Followthrough.rst
@@ -1,4 +1,7 @@
-6: FOLLOWTHROUGH
+.. _development_followthrough:
+
+Followthrough
+=============
At this point, you have followed the guidelines given so far and, with the
addition of your own engineering skills, have posted a perfect series of
@@ -16,7 +19,8 @@ standards. A failure to participate in this process is quite likely to
prevent the inclusion of your patches into the mainline.
-6.1: WORKING WITH REVIEWERS
+Working with reviewers
+----------------------
A patch of any significance will result in a number of comments from other
developers as they review the code. Working with reviewers can be, for
@@ -97,7 +101,8 @@ though, and not before all other alternatives have been explored. And bear
in mind, of course, that he may not agree with you either.
-6.2: WHAT HAPPENS NEXT
+What happens next
+-----------------
If a patch is considered to be a good thing to add to the kernel, and once
most of the review issues have been resolved, the next step is usually
@@ -177,7 +182,8 @@ it with the assumption that you will not be around to maintain it
afterward.
-6.3: OTHER THINGS THAT CAN HAPPEN
+Other things that can happen
+-----------------------------
One day, you may open your mail client and see that somebody has mailed you
a patch to your code. That is one of the advantages of having your code
diff --git a/Documentation/development-process/7.AdvancedTopics b/Documentation/development-process/7.AdvancedTopics.rst
index 26dc3fa196e4..81d61c5d62dd 100644
--- a/Documentation/development-process/7.AdvancedTopics
+++ b/Documentation/development-process/7.AdvancedTopics.rst
@@ -1,11 +1,15 @@
-7: ADVANCED TOPICS
+.. _development_advancedtopics:
+
+Advanced topics
+===============
At this point, hopefully, you have a handle on how the development process
works. There is still more to learn, however! This section will cover a
number of topics which can be helpful for developers wanting to become a
regular part of the Linux kernel development process.
-7.1: MANAGING PATCHES WITH GIT
+Managing patches with git
+-------------------------
The use of distributed version control for the kernel began in early 2002,
when Linus first started playing with the proprietary BitKeeper
@@ -114,6 +118,8 @@ radar. Kernel developers tend to get unhappy when they see that kind of
thing happening; putting up a git tree with unreviewed or off-topic patches
can affect your ability to get trees pulled in the future. Quoting Linus:
+::
+
You can send me patches, but for me to pull a git patch from you, I
need to know that you know what you're doing, and I need to be able
to trust things *without* then having to go and check every
@@ -141,7 +147,8 @@ format the request as other developers expect, and will also check to be
sure that you have remembered to push those changes to the public server.
-7.2: REVIEWING PATCHES
+Reviewing patches
+-----------------
Some readers will certainly object to putting this section with "advanced
topics" on the grounds that even beginning kernel developers should be
diff --git a/Documentation/development-process/8.Conclusion b/Documentation/development-process/8.Conclusion.rst
index caef69022e9c..23ec7cbc2d2b 100644
--- a/Documentation/development-process/8.Conclusion
+++ b/Documentation/development-process/8.Conclusion.rst
@@ -1,4 +1,7 @@
-8: FOR MORE INFORMATION
+.. _development_conclusion:
+
+For more information
+====================
There are numerous sources of information on Linux kernel development and
related topics. First among those will always be the Documentation
@@ -47,7 +50,8 @@ Documentation for git can be found at:
http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
-9: CONCLUSION
+Conclusion
+==========
Congratulations to anybody who has made it through this long-winded
document. Hopefully it has provided a helpful understanding of how the
diff --git a/Documentation/development-process/conf.py b/Documentation/development-process/conf.py
new file mode 100644
index 000000000000..4b4a12dace02
--- /dev/null
+++ b/Documentation/development-process/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = 'Linux Kernel Development Documentation'
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'development-process.tex', 'Linux Kernel Development Documentation',
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/development-process/development-process.rst b/Documentation/development-process/development-process.rst
new file mode 100644
index 000000000000..bd1399f7202a
--- /dev/null
+++ b/Documentation/development-process/development-process.rst
@@ -0,0 +1,29 @@
+.. _development_process_main:
+
+A guide to the Kernel Development Process
+=========================================
+
+Contents:
+
+.. toctree::
+ :numbered:
+ :maxdepth: 2
+
+ 1.Intro
+ 2.Process
+ 3.Early-stage
+ 4.Coding
+ 5.Posting
+ 6.Followthrough
+ 7.AdvancedTopics
+ 8.Conclusion
+
+The purpose of this document is to help developers (and their managers)
+work with the development community with a minimum of frustration. It is
+an attempt to document how this community works in a way which is
+accessible to those who are not intimately familiar with Linux kernel
+development (or, indeed, free software development in general). While
+there is some technical material here, this is very much a process-oriented
+discussion which does not require a deep knowledge of kernel programming to
+understand.
+
diff --git a/Documentation/development-process/index.rst b/Documentation/development-process/index.rst
new file mode 100644
index 000000000000..c37475d91090
--- /dev/null
+++ b/Documentation/development-process/index.rst
@@ -0,0 +1,9 @@
+Linux Kernel Development Documentation
+======================================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ development-process
diff --git a/Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt b/Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt
index b545856a444f..4a1714f96bab 100644
--- a/Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt
+++ b/Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt
@@ -90,6 +90,47 @@ Required Properties:
- interrupts : Should be single bit error interrupt, then double bit error
interrupt, in this order.
+NAND FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-nand-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent NAND node.
+- interrupts : Should be single bit error interrupt, then double bit error
+ interrupt, in this order.
+
+DMA FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-dma-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent DMA node.
+- interrupts : Should be single bit error interrupt, then double bit error
+ interrupt, in this order.
+
+USB FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-usb-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent USB node.
+- interrupts : Should be single bit error interrupt, then double bit error
+ interrupt, in this order.
+
+QSPI FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-qspi-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent QSPI node.
+- interrupts : Should be single bit error interrupt, then double bit error
+ interrupt, in this order.
+
+SDMMC FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-sdmmc-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent SD/MMC node.
+- interrupts : Should be single bit error interrupt, then double bit error
+ interrupt, in this order for port A, and then single bit error interrupt,
+ then double bit error interrupt in this order for port B.
+
Example:
eccmgr: eccmgr@ffd06000 {
@@ -132,4 +173,61 @@ Example:
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>,
<37 IRQ_TYPE_LEVEL_HIGH>;
};
+
+ nand-buf-ecc@ff8c2000 {
+ compatible = "altr,socfpga-nand-ecc";
+ reg = <0xff8c2000 0x400>;
+ altr,ecc-parent = <&nand>;
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH>,
+ <43 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ nand-rd-ecc@ff8c2400 {
+ compatible = "altr,socfpga-nand-ecc";
+ reg = <0xff8c2400 0x400>;
+ altr,ecc-parent = <&nand>;
+ interrupts = <13 IRQ_TYPE_LEVEL_HIGH>,
+ <45 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ nand-wr-ecc@ff8c2800 {
+ compatible = "altr,socfpga-nand-ecc";
+ reg = <0xff8c2800 0x400>;
+ altr,ecc-parent = <&nand>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
+ <44 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ dma-ecc@ff8c8000 {
+ compatible = "altr,socfpga-dma-ecc";
+ reg = <0xff8c8000 0x400>;
+ altr,ecc-parent = <&pdma>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>,
+ <42 IRQ_TYPE_LEVEL_HIGH>;
+
+ usb0-ecc@ff8c8800 {
+ compatible = "altr,socfpga-usb-ecc";
+ reg = <0xff8c8800 0x400>;
+ altr,ecc-parent = <&usb0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>,
+ <34 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ qspi-ecc@ff8c8400 {
+ compatible = "altr,socfpga-qspi-ecc";
+ reg = <0xff8c8400 0x400>;
+ altr,ecc-parent = <&qspi>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
+ <46 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sdmmc-ecc@ff8c2c00 {
+ compatible = "altr,socfpga-sdmmc-ecc";
+ reg = <0xff8c2c00 0x400>;
+ altr,ecc-parent = <&mmc>;
+ interrupts = <15 IRQ_TYPE_LEVEL_HIGH>,
+ <47 IRQ_TYPE_LEVEL_HIGH>,
+ <16 IRQ_TYPE_LEVEL_HIGH>,
+ <48 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
index 6ffe08778465..9c97de23919a 100644
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
@@ -38,6 +38,10 @@ Raspberry Pi Compute Module
Required root node properties:
compatible = "raspberrypi,compute-module", "brcm,bcm2835";
+Raspberry Pi Zero
+Required root node properties:
+compatible = "raspberrypi,model-zero", "brcm,bcm2835";
+
Generic BCM2835 board
Required root node properties:
compatible = "brcm,bcm2835";
diff --git a/Documentation/devicetree/bindings/arm/davinci.txt b/Documentation/devicetree/bindings/arm/davinci.txt
index cfaeda4274e6..f0841ce725b5 100644
--- a/Documentation/devicetree/bindings/arm/davinci.txt
+++ b/Documentation/devicetree/bindings/arm/davinci.txt
@@ -5,6 +5,10 @@ DA850/OMAP-L138/AM18x Evaluation Module (EVM) board
Required root node properties:
- compatible = "ti,da850-evm", "ti,da850";
+DA850/OMAP-L138/AM18x L138/C6748 Development Kit (LCDK) board
+Required root node properties:
+ - compatible = "ti,da850-lcdk", "ti,da850";
+
EnBW AM1808 based CMC board
Required root node properties:
- compatible = "enbw,cmc", "ti,da850;
diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
index 83fe816ae050..3f81575aa6be 100644
--- a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
+++ b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
@@ -175,38 +175,55 @@ Example:
};
-----------------------------------------------------------------------
-Hisilicon HiP05 PCIe-SAS system controller
+Hisilicon HiP05/HiP06 PCIe-SAS sub system controller
Required properties:
- compatible : "hisilicon,pcie-sas-subctrl", "syscon";
- reg : Register address and size
-The HiP05 PCIe-SAS system controller is shared by PCIe and SAS controllers in
-HiP05 Soc to implement some basic configurations.
+The PCIe-SAS sub system controller is shared by PCIe and SAS controllers in
+HiP05 or HiP06 Soc to implement some basic configurations.
Example:
- /* for HiP05 PCIe-SAS system */
- pcie_sas: system_controller@0xb0000000 {
+ /* for HiP05 PCIe-SAS sub system */
+ pcie_sas: system_controller@b0000000 {
compatible = "hisilicon,pcie-sas-subctrl", "syscon";
reg = <0xb0000000 0x10000>;
};
-Hisilicon HiP05 PERISUB system controller
+Hisilicon HiP05/HiP06 PERI sub system controller
Required properties:
-- compatible : "hisilicon,hip05-perisubc", "syscon";
+- compatible : "hisilicon,peri-subctrl", "syscon";
- reg : Register address and size
-The HiP05 PERISUB system controller is shared by peripheral controllers in
-HiP05 Soc to implement some basic configurations. The peripheral
+The PERI sub system controller is shared by peripheral controllers in
+HiP05 or HiP06 Soc to implement some basic configurations. The peripheral
controllers include mdio, ddr, iic, uart, timer and so on.
Example:
- /* for HiP05 perisub-ctrl-c system */
+ /* for HiP05 sub peri system */
peri_c_subctrl: syscon@80000000 {
- compatible = "hisilicon,hip05-perisubc", "syscon";
+ compatible = "hisilicon,peri-subctrl", "syscon";
reg = <0x0 0x80000000 0x0 0x10000>;
};
+
+Hisilicon HiP05/HiP06 DSA sub system controller
+
+Required properties:
+- compatible : "hisilicon,dsa-subctrl", "syscon";
+- reg : Register address and size
+
+The DSA sub system controller is shared by peripheral controllers in
+HiP05 or HiP06 Soc to implement some basic configurations.
+
+Example:
+ /* for HiP05 dsa sub system */
+ pcie_sas: system_controller@a0000000 {
+ compatible = "hisilicon,dsa-subctrl", "syscon";
+ reg = <0xa0000000 0x10000>;
+ };
+
-----------------------------------------------------------------------
Hisilicon CPU controller
diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-39x.txt b/Documentation/devicetree/bindings/arm/marvell/armada-39x.txt
index 53d4ff9ea8ad..89468664f6ea 100644
--- a/Documentation/devicetree/bindings/arm/marvell/armada-39x.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/armada-39x.txt
@@ -8,8 +8,19 @@ Required root node property:
- compatible: must contain "marvell,armada390"
-In addition, boards using the Marvell Armada 398 SoC shall have the
-following property before the previous one:
+In addition, boards using the Marvell Armada 395 SoC shall have the
+following property before the common "marvell,armada390" one:
+
+Required root node property:
+
+compatible: must contain "marvell,armada395"
+
+Example:
+
+compatible = "marvell,a395-gp", "marvell,armada395", "marvell,armada390";
+
+Boards using the Marvell Armada 398 SoC shall have the following
+property before the common "marvell,armada390" one:
Required root node property:
diff --git a/Documentation/devicetree/bindings/arm/marvell/marvell,orion5x.txt b/Documentation/devicetree/bindings/arm/marvell/marvell,orion5x.txt
new file mode 100644
index 000000000000..748a8f287462
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/marvell/marvell,orion5x.txt
@@ -0,0 +1,25 @@
+Marvell Orion SoC Family Device Tree Bindings
+---------------------------------------------
+
+Boards with a SoC of the Marvell Orion family, eg 88f5181
+
+* Required root node properties:
+compatible: must contain "marvell,orion5x"
+
+In addition, the above compatible shall be extended with the specific
+SoC. Currently known SoC compatibles are:
+
+"marvell,orion5x-88f5181"
+"marvell,orion5x-88f5182"
+
+And in addition, the compatible shall be extended with the specific
+board. Currently known boards are:
+
+"buffalo,lsgl"
+"buffalo,lswsgl"
+"buffalo,lswtgl"
+"lacie,ethernet-disk-mini-v2"
+"lacie,d2-network"
+"marvell,rd-88f5182-nas"
+"maxtor,shared-storage-2"
+"netgear,wnr854t"
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
index 936166fbee09..cb0054ac7121 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
@@ -5,7 +5,8 @@ The Mediatek apmixedsys controller provides the PLLs to the system.
Required Properties:
-- compatible: Should be:
+- compatible: Should be one of:
+ - "mediatek,mt2701-apmixedsys"
- "mediatek,mt8135-apmixedsys"
- "mediatek,mt8173-apmixedsys"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt
new file mode 100644
index 000000000000..4137196dd686
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt
@@ -0,0 +1,22 @@
+Mediatek bdpsys controller
+============================
+
+The Mediatek bdpsys controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+ - "mediatek,mt2701-bdpsys", "syscon"
+- #clock-cells: Must be 1
+
+The bdpsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+bdpsys: clock-controller@1c000000 {
+ compatible = "mediatek,mt2701-bdpsys", "syscon";
+ reg = <0 0x1c000000 0 0x1000>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
new file mode 100644
index 000000000000..768f3a5bc055
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
@@ -0,0 +1,22 @@
+Mediatek ethsys controller
+============================
+
+The Mediatek ethsys controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+ - "mediatek,mt2701-ethsys", "syscon"
+- #clock-cells: Must be 1
+
+The ethsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+ethsys: clock-controller@1b000000 {
+ compatible = "mediatek,mt2701-ethsys", "syscon";
+ reg = <0 0x1b000000 0 0x1000>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,hifsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,hifsys.txt
new file mode 100644
index 000000000000..beed7b594cea
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,hifsys.txt
@@ -0,0 +1,24 @@
+Mediatek hifsys controller
+============================
+
+The Mediatek hifsys controller provides various clocks and reset
+outputs to the system.
+
+Required Properties:
+
+- compatible: Should be:
+ - "mediatek,mt2701-hifsys", "syscon"
+- #clock-cells: Must be 1
+
+The hifsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+hifsys: clock-controller@1a000000 {
+ compatible = "mediatek,mt2701-hifsys", "syscon";
+ reg = <0 0x1a000000 0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
index b1f2ce17dff8..f6a916686f4c 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
@@ -5,7 +5,8 @@ The Mediatek imgsys controller provides various clocks to the system.
Required Properties:
-- compatible: Should be:
+- compatible: Should be one of:
+ - "mediatek,mt2701-imgsys", "syscon"
- "mediatek,mt8173-imgsys", "syscon"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
index aaf8d1460c4d..1620ec2a5a3f 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
@@ -6,7 +6,8 @@ outputs to the system.
Required Properties:
-- compatible: Should be:
+- compatible: Should be one of:
+ - "mediatek,mt2701-infracfg", "syscon"
- "mediatek,mt8135-infracfg", "syscon"
- "mediatek,mt8173-infracfg", "syscon"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
index 4385946eadef..67dd2e473d25 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
@@ -5,7 +5,8 @@ The Mediatek mmsys controller provides various clocks to the system.
Required Properties:
-- compatible: Should be:
+- compatible: Should be one of:
+ - "mediatek,mt2701-mmsys", "syscon"
- "mediatek,mt8173-mmsys", "syscon"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt
index 2f6ff86df49f..e494366782aa 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt
@@ -6,7 +6,8 @@ outputs to the system.
Required Properties:
-- compatible: Should be:
+- compatible: Should be one of:
+ - "mediatek,mt2701-pericfg", "syscon"
- "mediatek,mt8135-pericfg", "syscon"
- "mediatek,mt8173-pericfg", "syscon"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
index f9e917994ced..9f2fe7860114 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
@@ -5,7 +5,8 @@ The Mediatek topckgen controller provides various clocks to the system.
Required Properties:
-- compatible: Should be:
+- compatible: Should be one of:
+ - "mediatek,mt2701-topckgen"
- "mediatek,mt8135-topckgen"
- "mediatek,mt8173-topckgen"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
index 1faacf1c1b25..2440f73450c3 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
@@ -5,7 +5,8 @@ The Mediatek vdecsys controller provides various clocks to the system.
Required Properties:
-- compatible: Should be:
+- compatible: Should be one of:
+ - "mediatek,mt2701-vdecsys", "syscon"
- "mediatek,mt8173-vdecsys", "syscon"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index 94b57f247615..f53e2ee65e35 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -180,3 +180,9 @@ Boards:
- DRA722 EVM: Software Development Board for DRA722
compatible = "ti,dra72-evm", "ti,dra722", "ti,dra72", "ti,dra7"
+
+- DM3730 Logic PD Torpedo + Wireless: Commercial System on Module with WiFi and Bluetooth
+ compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3630", "ti,omap3"
+
+- DM3730 Logic PD SOM-LV: Commercial System on Module with WiFi and Bluetooth
+ compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3630", "ti,omap3"
diff --git a/Documentation/devicetree/bindings/arm/rockchip.txt b/Documentation/devicetree/bindings/arm/rockchip.txt
index 666864517069..55f388f954de 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.txt
+++ b/Documentation/devicetree/bindings/arm/rockchip.txt
@@ -31,6 +31,10 @@ Rockchip platforms device tree bindings
or
- compatible = "firefly,firefly-rk3288-beta", "rockchip,rk3288";
+- Firefly Firefly-RK3288 Reload board:
+ Required root node properties:
+ - compatible = "firefly,firefly-rk3288-reload", "rockchip,rk3288";
+
- ChipSPARK PopMetal-RK3288 board:
Required root node properties:
- compatible = "chipspark,popmetal-rk3288", "rockchip,rk3288";
@@ -110,6 +114,14 @@ Rockchip platforms device tree bindings
- Rockchip RK3229 Evaluation board:
- compatible = "rockchip,rk3229-evb", "rockchip,rk3229";
+- Rockchip RK3288 Fennec board:
+ Required root node properties:
+ - compatible = "rockchip,rk3288-fennec", "rockchip,rk3288";
+
- Rockchip RK3399 evb:
Required root node properties:
- compatible = "rockchip,rk3399-evb", "rockchip,rk3399";
+
+- Tronsmart Orion R68 Meta
+ Required root node properties:
+ - compatible = "tronsmart,orion-r68-meta", "rockchip,rk3368";
diff --git a/Documentation/devicetree/bindings/arm/samsung/pmu.txt b/Documentation/devicetree/bindings/arm/samsung/pmu.txt
index 2d6356d8daf4..bf5fc59a6938 100644
--- a/Documentation/devicetree/bindings/arm/samsung/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/pmu.txt
@@ -10,6 +10,7 @@ Properties:
- "samsung,exynos5260-pmu" - for Exynos5260 SoC.
- "samsung,exynos5410-pmu" - for Exynos5410 SoC,
- "samsung,exynos5420-pmu" - for Exynos5420 SoC.
+ - "samsung,exynos5433-pmu" - for Exynos5433 SoC.
- "samsung,exynos7-pmu" - for Exynos7 SoC.
second value must be always "syscon".
diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt
index 1df32d339da5..2f0b7169f132 100644
--- a/Documentation/devicetree/bindings/arm/shmobile.txt
+++ b/Documentation/devicetree/bindings/arm/shmobile.txt
@@ -49,6 +49,8 @@ Boards:
compatible = "renesas,genmai", "renesas,r7s72100"
- Gose
compatible = "renesas,gose", "renesas,r8a7793"
+ - H3ULCB (RTP0RC7795SKB00010S)
+ compatible = "renesas,h3ulcb", "renesas,r8a7795";
- Henninger
compatible = "renesas,henninger", "renesas,r8a7791"
- Koelsch (RTP0RC7791SEB00010S)
@@ -63,9 +65,13 @@ Boards:
compatible = "renesas,marzen", "renesas,r8a7779"
- Porter (M2-LCDP)
compatible = "renesas,porter", "renesas,r8a7791"
+ - RSKRZA1 (YR0K77210C000BE)
+ compatible = "renesas,rskrza1", "renesas,r7s72100"
- Salvator-X (RTP0RC7795SIPB0010S)
compatible = "renesas,salvator-x", "renesas,r8a7795";
- Salvator-X
compatible = "renesas,salvator-x", "renesas,r8a7796";
- SILK (RTP0RC7794LCB00011S)
compatible = "renesas,silk", "renesas,r8a7794"
+ - Wheat
+ compatible = "renesas,wheat", "renesas,r8a7792"
diff --git a/Documentation/devicetree/bindings/arm/sunxi.txt b/Documentation/devicetree/bindings/arm/sunxi.txt
index 7e79fcc36b0d..3975d0a0e4c2 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.txt
+++ b/Documentation/devicetree/bindings/arm/sunxi.txt
@@ -14,3 +14,4 @@ using one of the following compatible strings:
allwinner,sun8i-a83t
allwinner,sun8i-h3
allwinner,sun9i-a80
+ nextthing,gr8
diff --git a/Documentation/devicetree/bindings/arm/technologic.txt b/Documentation/devicetree/bindings/arm/technologic.txt
index 842298894cf0..33797acad846 100644
--- a/Documentation/devicetree/bindings/arm/technologic.txt
+++ b/Documentation/devicetree/bindings/arm/technologic.txt
@@ -4,3 +4,9 @@ Technologic Systems Platforms Device Tree Bindings
TS-4800 board
Required root node properties:
- compatible = "technologic,imx51-ts4800", "fsl,imx51";
+
+TS-4900 is a System-on-Module based on the Freescale i.MX6 System-on-Chip.
+It can be mounted on a carrier board providing additional peripheral connectors.
+Required root node properties:
+ - compatible = "technologic,imx6dl-ts4900", "fsl,imx6dl"
+ - compatible = "technologic,imx6q-ts4900", "fsl,imx6q"
diff --git a/Documentation/devicetree/bindings/arm/zte.txt b/Documentation/devicetree/bindings/arm/zte.txt
index 3ff5c9e85c1c..83369785d29c 100644
--- a/Documentation/devicetree/bindings/arm/zte.txt
+++ b/Documentation/devicetree/bindings/arm/zte.txt
@@ -13,3 +13,27 @@ Low power management required properties:
Bus matrix required properties:
- compatible = "zte,zx-bus-matrix"
+
+
+---------------------------------------
+- ZX296718 SoC:
+ Required root node properties:
+ - compatible = "zte,zx296718"
+
+ZX296718 EVB board:
+ - "zte,zx296718-evb"
+
+System management required properties:
+ - compatible = "zte,zx296718-aon-sysctrl"
+ - compatible = "zte,zx296718-sysctrl"
+
+Example:
+aon_sysctrl: aon-sysctrl@116000 {
+ compatible = "zte,zx296718-aon-sysctrl", "syscon";
+ reg = <0x116000 0x1000>;
+};
+
+sysctrl: sysctrl@1463000 {
+ compatible = "zte,zx296718-sysctrl", "syscon";
+ reg = <0x1463000 0x1000>;
+};
diff --git a/Documentation/devicetree/bindings/bus/qcom,ebi2.txt b/Documentation/devicetree/bindings/bus/qcom,ebi2.txt
new file mode 100644
index 000000000000..920681f552db
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/qcom,ebi2.txt
@@ -0,0 +1,138 @@
+Qualcomm External Bus Interface 2 (EBI2)
+
+The EBI2 contains two peripheral blocks: XMEM and LCDC. The XMEM handles any
+external memory (such as NAND or other memory-mapped peripherals) whereas
+LCDC handles LCD displays.
+
+As it says it connects devices to an external bus interface, meaning address
+lines (up to 9 address lines so can only address 1KiB external memory space),
+data lines (16 bits), OE (output enable), ADV (address valid, used on some
+NOR flash memories), WE (write enable). This on top of 6 different chip selects
+(CS0 thru CS5) so that in theory 6 different devices can be connected.
+
+Apparently this bus is clocked at 64MHz. It has dedicated pins on the package
+and the bus can only come out on these pins, however if some of the pins are
+unused they can be left unconnected or remuxed to be used as GPIO or in some
+cases other orthogonal functions as well.
+
+Also CS1 and CS2 has -A and -B signals. Why they have that is unclear to me.
+
+The chip selects have the following memory range assignments. This region of
+memory is referred to as "Chip Peripheral SS FPB0" and is 168MB big.
+
+Chip Select Physical address base
+CS0 GPIO134 0x1a800000-0x1b000000 (8MB)
+CS1 GPIO39 (A) / GPIO123 (B) 0x1b000000-0x1b800000 (8MB)
+CS2 GPIO40 (A) / GPIO124 (B) 0x1b800000-0x1c000000 (8MB)
+CS3 GPIO133 0x1d000000-0x25000000 (128 MB)
+CS4 GPIO132 0x1c800000-0x1d000000 (8MB)
+CS5 GPIO131 0x1c000000-0x1c800000 (8MB)
+
+The APQ8060 Qualcomm Application Processor User Guide, 80-N7150-14 Rev. A,
+August 6, 2012 contains some incomplete documentation of the EBI2.
+
+FIXME: the manual mentions "write precharge cycles" and "precharge cycles".
+We have not been able to figure out which bit fields these correspond to
+in the hardware, or what valid values exist. The current hypothesis is that
+this is something just used on the FAST chip selects and that the SLOW
+chip selects are understood fully. There is also a "byte device enable"
+flag somewhere for 8bit memories.
+
+FIXME: The chipselects have SLOW and FAST configuration registers. It's a bit
+unclear what this means, if they are mutually exclusive or can be used
+together, or if some chip selects are hardwired to be FAST and others are SLOW
+by design.
+
+The XMEM registers are totally undocumented but could be partially decoded
+because the Cypress AN49576 Antioch Westbridge apparently has suspiciously
+similar register layout, see: http://www.cypress.com/file/105771/download
+
+Required properties:
+- compatible: should be one of:
+ "qcom,msm8660-ebi2"
+ "qcom,apq8060-ebi2"
+- #address-cells: shoule be <2>: the first cell is the chipselect,
+ the second cell is the offset inside the memory range
+- #size-cells: should be <1>
+- ranges: should be set to:
+ ranges = <0 0x0 0x1a800000 0x00800000>,
+ <1 0x0 0x1b000000 0x00800000>,
+ <2 0x0 0x1b800000 0x00800000>,
+ <3 0x0 0x1d000000 0x08000000>,
+ <4 0x0 0x1c800000 0x00800000>,
+ <5 0x0 0x1c000000 0x00800000>;
+- reg: two ranges of registers: EBI2 config and XMEM config areas
+- reg-names: should be "ebi2", "xmem"
+- clocks: two clocks, EBI_2X and EBI
+- clock-names: shoule be "ebi2x", "ebi2"
+
+Optional subnodes:
+- Nodes inside the EBI2 will be considered device nodes.
+
+The following optional properties are properties that can be tagged onto
+any device subnode. We are assuming that there can be only ONE device per
+chipselect subnode, else the properties will become ambigous.
+
+Optional properties arrays for SLOW chip selects:
+- qcom,xmem-recovery-cycles: recovery cycles is the time the memory continues to
+ drive the data bus after OE is de-asserted, in order to avoid contention on
+ the data bus. They are inserted when reading one CS and switching to another
+ CS or read followed by write on the same CS. Valid values 0 thru 15. Minimum
+ value is actually 1, so a value of 0 will still yield 1 recovery cycle.
+- qcom,xmem-write-hold-cycles: write hold cycles, these are extra cycles
+ inserted after every write minimum 1. The data out is driven from the time
+ WE is asserted until CS is asserted. With a hold of 1 (value = 0), the CS
+ stays active for 1 extra cycle etc. Valid values 0 thru 15.
+- qcom,xmem-write-delta-cycles: initial latency for write cycles inserted for
+ the first write to a page or burst memory. Valid values 0 thru 255.
+- qcom,xmem-read-delta-cycles: initial latency for read cycles inserted for the
+ first read to a page or burst memory. Valid values 0 thru 255.
+- qcom,xmem-write-wait-cycles: number of wait cycles for every write access, 0=1
+ cycle. Valid values 0 thru 15.
+- qcom,xmem-read-wait-cycles: number of wait cycles for every read access, 0=1
+ cycle. Valid values 0 thru 15.
+
+Optional properties arrays for FAST chip selects:
+- qcom,xmem-address-hold-enable: this is a boolean property stating that we
+ shall hold the address for an extra cycle to meet hold time requirements
+ with ADV assertion.
+- qcom,xmem-adv-to-oe-recovery-cycles: the number of cycles elapsed before an OE
+ assertion, with respect to the cycle where ADV (address valid) is asserted.
+ 2 means 2 cycles between ADV and OE. Valid values 0, 1, 2 or 3.
+- qcom,xmem-read-hold-cycles: the length in cycles of the first segment of a
+ read transfer. For a single read trandfer this will be the time from CS
+ assertion to OE assertion. Valid values 0 thru 15.
+
+
+Example:
+
+ebi2@1a100000 {
+ compatible = "qcom,apq8060-ebi2";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x1a800000 0x00800000>,
+ <1 0x0 0x1b000000 0x00800000>,
+ <2 0x0 0x1b800000 0x00800000>,
+ <3 0x0 0x1d000000 0x08000000>,
+ <4 0x0 0x1c800000 0x00800000>,
+ <5 0x0 0x1c000000 0x00800000>;
+ reg = <0x1a100000 0x1000>, <0x1a110000 0x1000>;
+ reg-names = "ebi2", "xmem";
+ clocks = <&gcc EBI2_2X_CLK>, <&gcc EBI2_CLK>;
+ clock-names = "ebi2x", "ebi2";
+ /* Make sure to set up the pin control for the EBI2 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&foo_ebi2_pins>;
+
+ foo-ebi2@2,0 {
+ compatible = "foo";
+ reg = <2 0x0 0x100>;
+ (...)
+ qcom,xmem-recovery-cycles = <0>;
+ qcom,xmem-write-hold-cycles = <3>;
+ qcom,xmem-write-delta-cycles = <31>;
+ qcom,xmem-read-delta-cycles = <28>;
+ qcom,xmem-write-wait-cycles = <9>;
+ qcom,xmem-read-wait-cycles = <9>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
new file mode 100644
index 000000000000..a55d31b48d6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/amlogic,gxbb-aoclkc.txt
@@ -0,0 +1,45 @@
+* Amlogic GXBB AO Clock and Reset Unit
+
+The Amlogic GXBB AO clock controller generates and supplies clock to various
+controllers within the Always-On part of the SoC.
+
+Required Properties:
+
+- compatible: should be "amlogic,gxbb-aoclkc"
+- reg: physical base address of the clock controller and length of memory
+ mapped region.
+
+- #clock-cells: should be 1.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/gxbb-aoclkc.h header and can be
+used in device tree sources.
+
+- #reset-cells: should be 1.
+
+Each reset is assigned an identifier and client nodes can use this identifier
+to specify the reset which they consume. All available resets are defined as
+preprocessor macros in the dt-bindings/reset/gxbb-aoclkc.h header and can be
+used in device tree sources.
+
+Example: AO Clock controller node:
+
+ clkc_AO: clock-controller@040 {
+ compatible = "amlogic,gxbb-aoclkc";
+ reg = <0x0 0x040 0x0 0x4>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+Example: UART controller node that consumes the clock and reset generated
+ by the clock controller:
+
+ uart_AO: serial@4c0 {
+ compatible = "amlogic,meson-uart";
+ reg = <0x4c0 0x14>;
+ interrupts = <0 90 1>;
+ clocks = <&clkc_AO CLKID_AO_UART1>;
+ resets = <&clkc_AO RESET_AO_UART1>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/clock/arm-syscon-icst.txt b/Documentation/devicetree/bindings/clock/arm-syscon-icst.txt
index 8b7177cecb36..27468119fd94 100644
--- a/Documentation/devicetree/bindings/clock/arm-syscon-icst.txt
+++ b/Documentation/devicetree/bindings/clock/arm-syscon-icst.txt
@@ -5,20 +5,50 @@ Technology (IDT). ARM integrated these oscillators deeply into their
reference designs by adding special control registers that manage such
oscillators to their system controllers.
-The ARM system controller contains logic to serialize and initialize
+The various ARM system controllers contain logic to serialize and initialize
an ICST clock request after a write to the 32 bit register at an offset
into the system controller. Furthermore, to even be able to alter one of
these frequencies, the system controller must first be unlocked by
writing a special token to another offset in the system controller.
+Some ARM hardware contain special versions of the serial interface that only
+connects the low 8 bits of the VDW (missing one bit), hardwires RDW to
+different values and sometimes also hardwire the output divider. They
+therefore have special compatible strings as per this table (the OD value is
+the value on the pins, not the resulting output divider):
+
+Hardware variant: RDW OD VDW
+
+Integrator/AP 22 1 Bit 8 0, rest variable
+integratorap-cm
+
+Integrator/AP 46 3 Bit 8 0, rest variable
+integratorap-sys
+
+Integrator/AP 22 or 1 17 or (33 or 25 MHz)
+integratorap-pci 14 1 14
+
+Integrator/CP 22 variable Bit 8 0, rest variable
+integratorcp-cm-core
+
+Integrator/CP 22 variable Bit 8 0, rest variable
+integratorcp-cm-mem
+
The ICST oscillator must be provided inside a system controller node.
Required properties:
+- compatible: must be one of
+ "arm,syscon-icst525"
+ "arm,syscon-icst307"
+ "arm,syscon-icst525-integratorap-cm"
+ "arm,syscon-icst525-integratorap-sys"
+ "arm,syscon-icst525-integratorap-pci"
+ "arm,syscon-icst525-integratorcp-cm-core"
+ "arm,syscon-icst525-integratorcp-cm-mem"
- lock-offset: the offset address into the system controller where the
unlocking register is located
- vco-offset: the offset address into the system controller where the
ICST control register is located (even 32 bit address)
-- compatible: must be one of "arm,syscon-icst525" or "arm,syscon-icst307"
- #clock-cells: must be <0>
- clocks: parent clock, since the ICST needs a parent clock to derive its
frequency from, this attribute is compulsory.
diff --git a/Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt b/Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt
new file mode 100644
index 000000000000..1e3370ba189f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt
@@ -0,0 +1,70 @@
+* Peripheral Clock bindings for Marvell Armada 37xx SoCs
+
+Marvell Armada 37xx SoCs provide peripheral clocks which are
+used as clock source for the peripheral of the SoC.
+
+There are two different blocks associated to north bridge and south
+bridge.
+
+The peripheral clock consumer should specify the desired clock by
+having the clock ID in its "clocks" phandle cell.
+
+The following is a list of provided IDs for Armada 370 North bridge clocks:
+ID Clock name Description
+-----------------------------------
+0 mmc MMC controller
+1 sata_host Sata Host
+2 sec_at Security AT
+3 sac_dap Security DAP
+4 tsecm Security Engine
+5 setm_tmx Serial Embedded Trace Module
+6 avs Adaptive Voltage Scaling
+7 sqf SPI
+8 pwm PWM
+9 i2c_2 I2C 2
+10 i2c_1 I2C 1
+11 ddr_phy DDR PHY
+12 ddr_fclk DDR F clock
+13 trace Trace
+14 counter Counter
+15 eip97 EIP 97
+16 cpu CPU
+
+The following is a list of provided IDs for Armada 370 South bridge clocks:
+ID Clock name Description
+-----------------------------------
+0 gbe-50 50 MHz parent clock for Gigabit Ethernet
+1 gbe-core parent clock for Gigabit Ethernet core
+2 gbe-125 125 MHz parent clock for Gigabit Ethernet
+3 gbe1-50 50 MHz clock for Gigabit Ethernet port 1
+4 gbe0-50 50 MHz clock for Gigabit Ethernet port 0
+5 gbe1-125 125 MHz clock for Gigabit Ethernet port 1
+6 gbe0-125 125 MHz clock for Gigabit Ethernet port 0
+7 gbe1-core Gigabit Ethernet core port 1
+8 gbe0-core Gigabit Ethernet core port 0
+9 gbe-bm Gigabit Ethernet Buffer Manager
+10 sdio SDIO
+11 usb32-sub2-sys USB 2 clock
+12 usb32-ss-sys USB 3 clock
+
+Required properties:
+
+- compatible : shall be "marvell,armada-3700-periph-clock-nb" for the
+ north bridge block, or
+ "marvell,armada-3700-periph-clock-sb" for the south bridge block
+- reg : must be the register address of North/South Bridge Clock register
+- #clock-cells : from common clock binding; shall be set to 1
+
+- clocks : list of the parent clock phandle in the following order:
+ TBG-A P, TBG-B P, TBG-A S, TBG-B S and finally the xtal clock.
+
+
+Example:
+
+nb_perih_clk: nb-periph-clk@13000{
+ compatible = "marvell,armada-3700-periph-clock-nb";
+ reg = <0x13000 0x1000>;
+ clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
+ <&tbg 3>, <&xtalclk>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/clock/armada3700-tbg-clock.txt b/Documentation/devicetree/bindings/clock/armada3700-tbg-clock.txt
new file mode 100644
index 000000000000..0ba1d83ff363
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/armada3700-tbg-clock.txt
@@ -0,0 +1,27 @@
+* Time Base Generator Clock bindings for Marvell Armada 37xx SoCs
+
+Marvell Armada 37xx SoCs provde Time Base Generator clocks which are
+used as parent clocks for the peripheral clocks.
+
+The TBG clock consumer should specify the desired clock by having the
+clock ID in its "clocks" phandle cell.
+
+The following is a list of provided IDs and clock names on Armada 3700:
+ 0 = TBG A P
+ 1 = TBG B P
+ 2 = TBG A S
+ 3 = TBG B S
+
+Required properties:
+- compatible : shall be "marvell,armada-3700-tbg-clock"
+- reg : must be the register address of North Bridge PLL register
+- #clock-cells : from common clock binding; shall be set to 1
+
+Example:
+
+tbg: tbg@13200 {
+ compatible = "marvell,armada-3700-tbg-clock";
+ reg = <0x13200 0x1000>;
+ clocks = <&xtalclk>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/clock/armada3700-xtal-clock.txt b/Documentation/devicetree/bindings/clock/armada3700-xtal-clock.txt
new file mode 100644
index 000000000000..a88f1f05fbd6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/armada3700-xtal-clock.txt
@@ -0,0 +1,28 @@
+* Xtal Clock bindings for Marvell Armada 37xx SoCs
+
+Marvell Armada 37xx SoCs allow to determine the xtal clock frequencies by
+reading the gpio latch register.
+
+This node must be a subnode of the node exposing the register address
+of the GPIO block where the gpio latch is located.
+
+Required properties:
+- compatible : shall be one of the following:
+ "marvell,armada-3700-xtal-clock"
+- #clock-cells : from common clock binding; shall be set to 0
+
+Optional properties:
+- clock-output-names : from common clock binding; allows overwrite default clock
+ output names ("xtal")
+
+Example:
+gpio1: gpio@13800 {
+ compatible = "marvell,armada-3700-gpio", "syscon", "simple-mfd";
+ reg = <0x13800 0x1000>;
+
+ xtalclk: xtal-clk {
+ compatible = "marvell,armada-3700-xtal-clock";
+ clock-output-names = "xtal";
+ #clock-cells = <0>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt
index 181bc8ac4e3a..5f3ad65daf69 100644
--- a/Documentation/devicetree/bindings/clock/at91-clock.txt
+++ b/Documentation/devicetree/bindings/clock/at91-clock.txt
@@ -6,7 +6,8 @@ This binding uses the common clock binding[1].
Required properties:
- compatible : shall be one of the following:
- "atmel,at91sam9x5-sckc":
+ "atmel,at91sam9x5-sckc" or
+ "atmel,sama5d4-sckc":
at91 SCKC (Slow Clock Controller)
This node contains the slow clock definitions.
diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.txt b/Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.txt
new file mode 100644
index 000000000000..2ebb107331dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/brcm,bcm53573-ilp.txt
@@ -0,0 +1,36 @@
+Broadcom BCM53573 ILP clock
+===========================
+
+This binding uses the common clock binding:
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+This binding is used for ILP clock (sometimes referred as "slow clock")
+on Broadcom BCM53573 devices using Cortex-A7 CPU.
+
+ILP's rate has to be calculated on runtime and it depends on ALP clock
+which has to be referenced.
+
+This clock is part of PMU (Power Management Unit), a Broadcom's device
+handing power-related aspects. Its node must be sub-node of the PMU
+device.
+
+Required properties:
+- compatible: "brcm,bcm53573-ilp"
+- clocks: has to reference an ALP clock
+- #clock-cells: should be <0>
+- clock-output-names: from common clock bindings, should contain clock
+ name
+
+Example:
+
+pmu@18012000 {
+ compatible = "simple-mfd", "syscon";
+ reg = <0x18012000 0x00001000>;
+
+ ilp {
+ compatible = "brcm,bcm53573-ilp";
+ clocks = <&alp>;
+ #clock-cells = <0>;
+ clock-output-names = "ilp";
+ };
+};
diff --git a/Documentation/devicetree/bindings/clock/clk-exynos-audss.txt b/Documentation/devicetree/bindings/clock/clk-exynos-audss.txt
index 180e8835569e..0c3d6015868d 100644
--- a/Documentation/devicetree/bindings/clock/clk-exynos-audss.txt
+++ b/Documentation/devicetree/bindings/clock/clk-exynos-audss.txt
@@ -10,6 +10,8 @@ Required Properties:
- "samsung,exynos4210-audss-clock" - controller compatible with all Exynos4 SoCs.
- "samsung,exynos5250-audss-clock" - controller compatible with Exynos5250
SoCs.
+ - "samsung,exynos5410-audss-clock" - controller compatible with Exynos5410
+ SoCs.
- "samsung,exynos5420-audss-clock" - controller compatible with Exynos5420
SoCs.
- reg: physical base address and length of the controller's register set.
@@ -91,5 +93,5 @@ i2s0: i2s@03830000 {
<&clock_audss EXYNOS_MOUT_AUDSS>,
<&clock_audss EXYNOS_MOUT_I2S>;
clock-names = "iis", "i2s_opclk0", "i2s_opclk1",
- "mout_audss", "mout_i2s";
+ "mout_audss", "mout_i2s";
};
diff --git a/Documentation/devicetree/bindings/clock/exynos5410-clock.txt b/Documentation/devicetree/bindings/clock/exynos5410-clock.txt
index aeab635b07b5..4527de3ea205 100644
--- a/Documentation/devicetree/bindings/clock/exynos5410-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5410-clock.txt
@@ -12,24 +12,29 @@ Required Properties:
- #clock-cells: should be 1.
+- clocks: should contain an entry specifying the root clock from external
+ oscillator supplied through XXTI or XusbXTI pin. This clock should be
+ defined using standard clock bindings with "fin_pll" clock-output-name.
+ That clock is being passed internally to the 9 PLLs.
+
All available clocks are defined as preprocessor macros in
dt-bindings/clock/exynos5410.h header and can be used in device
tree sources.
-External clock:
-
-There is clock that is generated outside the SoC. It
-is expected that it is defined using standard clock bindings
-with following clock-output-name:
-
- - "fin_pll" - PLL input clock from XXTI
-
Example 1: An example of a clock controller node is listed below.
+ fin_pll: xxti {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "fin_pll";
+ #clock-cells = <0>;
+ };
+
clock: clock-controller@0x10010000 {
compatible = "samsung,exynos5410-clock";
reg = <0x10010000 0x30000>;
#clock-cells = <1>;
+ clocks = <&fin_pll>;
};
Example 2: UART controller node that consumes the clock generated by the clock
diff --git a/Documentation/devicetree/bindings/clock/maxim,max77686.txt b/Documentation/devicetree/bindings/clock/maxim,max77686.txt
index 9c40739a661a..8398a3a5e106 100644
--- a/Documentation/devicetree/bindings/clock/maxim,max77686.txt
+++ b/Documentation/devicetree/bindings/clock/maxim,max77686.txt
@@ -1,10 +1,24 @@
-Binding for Maxim MAX77686 32k clock generator block
+Binding for Maxim MAX77686/MAX77802/MAX77620 32k clock generator block
-This is a part of device tree bindings of MAX77686 multi-function device.
-More information can be found in bindings/mfd/max77686.txt file.
+This is a part of device tree bindings of MAX77686/MAX77802/MAX77620
+multi-function device. More information can be found in MFD DT binding
+doc as follows:
+ bindings/mfd/max77686.txt for MAX77686 and
+ bindings/mfd/max77802.txt for MAX77802 and
+ bindings/mfd/max77620.txt for MAX77620.
The MAX77686 contains three 32.768khz clock outputs that can be controlled
-(gated/ungated) over I2C.
+(gated/ungated) over I2C. Clocks are defined as preprocessor macros in
+dt-bindings/clock/maxim,max77686.h.
+
+
+The MAX77802 contains two 32.768khz clock outputs that can be controlled
+(gated/ungated) over I2C. Clocks are defined as preprocessor macros in
+dt-bindings/clock/maxim,max77802.h.
+
+The MAX77686 contains one 32.768khz clock outputs that can be controlled
+(gated/ungated) over I2C. Clocks are defined as preprocessor macros in
+dt-bindings/clock/maxim,max77620.h.
Following properties should be presend in main device node of the MFD chip.
@@ -17,30 +31,84 @@ Optional properties:
Each clock is assigned an identifier and client nodes can use this identifier
to specify the clock which they consume. Following indices are allowed:
- - 0: 32khz_ap clock,
- - 1: 32khz_cp clock,
- - 2: 32khz_pmic clock.
+ - 0: 32khz_ap clock (max77686, max77802), 32khz_out0 (max77620)
+ - 1: 32khz_cp clock (max77686, max77802),
+ - 2: 32khz_pmic clock (max77686).
+
+Clocks are defined as preprocessor macros in above dt-binding header for
+respective chips.
+
+Example:
+
+1. With MAX77686:
+
+#include <dt-bindings/clock/maxim,max77686.h>
+/* ... */
+
+ Node of the MFD chip
+ max77686: max77686@09 {
+ compatible = "maxim,max77686";
+ interrupt-parent = <&wakeup_eint>;
+ interrupts = <26 0>;
+ reg = <0x09>;
+ #clock-cells = <1>;
+
+ /* ... */
+ };
+
+ Clock consumer node
+
+ foo@0 {
+ compatible = "bar,foo";
+ /* ... */
+ clock-names = "my-clock";
+ clocks = <&max77686 MAX77686_CLK_PMIC>;
+ };
+
+2. With MAX77802:
+
+#include <dt-bindings/clock/maxim,max77802.h>
+/* ... */
+
+ Node of the MFD chip
+ max77802: max77802@09 {
+ compatible = "maxim,max77802";
+ interrupt-parent = <&wakeup_eint>;
+ interrupts = <26 0>;
+ reg = <0x09>;
+ #clock-cells = <1>;
+
+ /* ... */
+ };
+
+ Clock consumer node
+
+ foo@0 {
+ compatible = "bar,foo";
+ /* ... */
+ clock-names = "my-clock";
+ clocks = <&max77802 MAX77802_CLK_32K_AP>;
+ };
-Clocks are defined as preprocessor macros in dt-bindings/clock/maxim,max77686.h
-header and can be used in device tree sources.
-Example: Node of the MFD chip
+3. With MAX77620:
- max77686: max77686@09 {
- compatible = "maxim,max77686";
- interrupt-parent = <&wakeup_eint>;
- interrupts = <26 0>;
- reg = <0x09>;
- #clock-cells = <1>;
+#include <dt-bindings/clock/maxim,max77620.h>
+/* ... */
- /* ... */
- };
+ Node of the MFD chip
+ max77620: max77620@3c {
+ compatible = "maxim,max77620";
+ reg = <0x3c>;
+ #clock-cells = <1>;
+ /* ... */
+ };
-Example: Clock consumer node
+ Clock consumer node
- foo@0 {
- compatible = "bar,foo";
- /* ... */
- clock-names = "my-clock";
- clocks = <&max77686 MAX77686_CLK_PMIC>;
- };
+ foo@0 {
+ compatible = "bar,foo";
+ /* ... */
+ clock-names = "my-clock";
+ clocks = <&max77620 MAX77620_CLK_32K_OUT0>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/maxim,max77802.txt b/Documentation/devicetree/bindings/clock/maxim,max77802.txt
deleted file mode 100644
index c6dc7835f06c..000000000000
--- a/Documentation/devicetree/bindings/clock/maxim,max77802.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-Binding for Maxim MAX77802 32k clock generator block
-
-This is a part of device tree bindings of MAX77802 multi-function device.
-More information can be found in bindings/mfd/max77802.txt file.
-
-The MAX77802 contains two 32.768khz clock outputs that can be controlled
-(gated/ungated) over I2C.
-
-Following properties should be present in main device node of the MFD chip.
-
-Required properties:
-- #clock-cells: From common clock binding; shall be set to 1.
-
-Optional properties:
-- clock-output-names: From common clock binding.
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. Following indices are allowed:
- - 0: 32khz_ap clock,
- - 1: 32khz_cp clock.
-
-Clocks are defined as preprocessor macros in dt-bindings/clock/maxim,max77802.h
-header and can be used in device tree sources.
-
-Example: Node of the MFD chip
-
- max77802: max77802@09 {
- compatible = "maxim,max77802";
- interrupt-parent = <&wakeup_eint>;
- interrupts = <26 0>;
- reg = <0x09>;
- #clock-cells = <1>;
-
- /* ... */
- };
-
-Example: Clock consumer node
-
- foo@0 {
- compatible = "bar,foo";
- /* ... */
- clock-names = "my-clock";
- clocks = <&max77802 MAX77802_CLK_32K_AP>;
- };
diff --git a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
index 670c2af3e931..eb985a633d59 100644
--- a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
+++ b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
@@ -52,6 +52,7 @@ Required properties:
"marvell,dove-core-clock" - for Dove SoC core clocks
"marvell,kirkwood-core-clock" - for Kirkwood SoC (except mv88f6180)
"marvell,mv88f6180-core-clock" - for Kirkwood MV88f6180 SoC
+ "marvell,mv88f5181-core-clock" - for Orion MV88F5181 SoC
"marvell,mv88f5182-core-clock" - for Orion MV88F5182 SoC
"marvell,mv88f5281-core-clock" - for Orion MV88F5281 SoC
"marvell,mv88f6183-core-clock" - for Orion MV88F6183 SoC
diff --git a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
index 660e64912cce..cb8542d910b3 100644
--- a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
+++ b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
@@ -86,6 +86,8 @@ ID Clock Peripheral
7 pex3 PCIe 3
8 pex0 PCIe 0
9 usb3h0 USB3 Host 0
+10 usb3h1 USB3 Host 1
+15 sata0 SATA 0
17 sdio SDIO
22 xor0 XOR 0
28 xor1 XOR 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 9a60fde32b02..3cf44217068e 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -15,6 +15,7 @@ Required properties :
"qcom,gcc-msm8974pro"
"qcom,gcc-msm8974pro-ac"
"qcom,gcc-msm8996"
+ "qcom,gcc-mdm9615"
- reg : shall contain base register location and length
- #clock-cells : shall contain 1
@@ -22,6 +23,11 @@ Required properties :
Optional properties :
- #power-domain-cells : shall contain 1
+- Qualcomm TSENS (thermal sensor device) on some devices can
+be part of GCC and hence the TSENS properties can also be
+part of the GCC/clock-controller node.
+For more details on the TSENS properties please refer
+Documentation/devicetree/bindings/thermal/qcom-tsens.txt
Example:
clock-controller@900000 {
@@ -31,3 +37,14 @@ Example:
#reset-cells = <1>;
#power-domain-cells = <1>;
};
+
+Example of GCC with TSENS properties:
+ clock-controller@900000 {
+ compatible = "qcom,gcc-apq8064";
+ reg = <0x00900000 0x4000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #thermal-sensor-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,lcc.txt b/Documentation/devicetree/bindings/clock/qcom,lcc.txt
index dd755be63a01..a3c78aa88038 100644
--- a/Documentation/devicetree/bindings/clock/qcom,lcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,lcc.txt
@@ -7,6 +7,7 @@ Required properties :
"qcom,lcc-msm8960"
"qcom,lcc-apq8064"
"qcom,lcc-ipq8064"
+ "qcom,lcc-mdm9615"
- reg : shall contain base register location and length
- #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt
index fee3205cdff9..c209de6cfadb 100644
--- a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt
+++ b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt
@@ -1,16 +1,16 @@
STMicroelectronics STM32 Reset and Clock Controller
===================================================
-The RCC IP is both a reset and a clock controller. This documentation only
-describes the clock part.
+The RCC IP is both a reset and a clock controller.
-Please also refer to clock-bindings.txt in this directory for common clock
-controller binding usage.
+Please refer to clock-bindings.txt for common clock controller binding usage.
+Please also refer to reset.txt for common reset controller binding usage.
Required properties:
- compatible: Should be "st,stm32f42xx-rcc"
- reg: should be register base and length as documented in the
datasheet
+- #reset-cells: 1, see below
- #clock-cells: 2, device nodes should specify the clock in their "clocks"
property, containing a phandle to the clock device node, an index selecting
between gated clocks and other clocks and an index specifying the clock to
@@ -19,6 +19,7 @@ Required properties:
Example:
rcc: rcc@40023800 {
+ #reset-cells = <1>;
#clock-cells = <2>
compatible = "st,stm32f42xx-rcc", "st,stm32-rcc";
reg = <0x40023800 0x400>;
@@ -35,16 +36,23 @@ from the first RCC clock enable register (RCC_AHB1ENR, address offset 0x30).
It is calculated as: index = register_offset / 4 * 32 + bit_offset.
Where bit_offset is the bit offset within the register (LSB is 0, MSB is 31).
+To simplify the usage and to share bit definition with the reset and clock
+drivers of the RCC IP, macros are available to generate the index in
+human-readble format.
+
+For STM32F4 series, the macro are available here:
+ - include/dt-bindings/mfd/stm32f4-rcc.h
+
Example:
/* Gated clock, AHB1 bit 0 (GPIOA) */
... {
- clocks = <&rcc 0 0>
+ clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>
};
/* Gated clock, AHB2 bit 4 (CRYP) */
... {
- clocks = <&rcc 0 36>
+ clocks = <&rcc 0 STM32F4_AHB2_CLOCK(CRYP)>
};
Specifying other clocks
@@ -61,5 +69,25 @@ Example:
/* Misc clock, FCLK */
... {
- clocks = <&rcc 1 1>
+ clocks = <&rcc 1 STM32F4_APB1_CLOCK(TIM2)>
+ };
+
+
+Specifying softreset control of devices
+=======================================
+
+Device nodes should specify the reset channel required in their "resets"
+property, containing a phandle to the reset device node and an index specifying
+which channel to use.
+The index is the bit number within the RCC registers bank, starting from RCC
+base address.
+It is calculated as: index = register_offset / 4 * 32 + bit_offset.
+Where bit_offset is the bit offset within the register.
+For example, for CRC reset:
+ crc = AHB1RSTR_offset / 4 * 32 + CRCRST_bit_offset = 0x10 / 4 * 32 + 12 = 140
+
+example:
+
+ timer2 {
+ resets = <&rcc STM32F4_APB1_RESET(TIM2)>;
};
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-divmux.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-divmux.txt
deleted file mode 100644
index 6247652044a0..000000000000
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-divmux.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-Binding for a ST divider and multiplexer clock driver.
-
-This binding uses the common clock binding[1].
-Base address is located to the parent node. See clock binding[2]
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/clock/st/st,clkgen.txt
-
-Required properties:
-
-- compatible : shall be:
- "st,clkgena-divmux-c65-hs", "st,clkgena-divmux"
- "st,clkgena-divmux-c65-ls", "st,clkgena-divmux"
- "st,clkgena-divmux-c32-odf0", "st,clkgena-divmux"
- "st,clkgena-divmux-c32-odf1", "st,clkgena-divmux"
- "st,clkgena-divmux-c32-odf2", "st,clkgena-divmux"
- "st,clkgena-divmux-c32-odf3", "st,clkgena-divmux"
-
-- #clock-cells : From common clock binding; shall be set to 1.
-
-- clocks : From common clock binding
-
-- clock-output-names : From common clock binding.
-
-Example:
-
- clockgen-a@fd345000 {
- reg = <0xfd345000 0xb50>;
-
- clk_m_a1_div1: clk-m-a1-div1 {
- #clock-cells = <1>;
- compatible = "st,clkgena-divmux-c32-odf1",
- "st,clkgena-divmux";
-
- clocks = <&clk_m_a1_osc_prediv>,
- <&clk_m_a1_pll0 1>, /* PLL0 PHI1 */
- <&clk_m_a1_pll1 1>; /* PLL1 PHI1 */
-
- clock-output-names = "clk-m-rx-icn-ts",
- "clk-m-rx-icn-vdp-0",
- "", /* unused */
- "clk-m-prv-t1-bus",
- "clk-m-icn-reg-12",
- "clk-m-icn-reg-10",
- "", /* unused */
- "clk-m-icn-st231";
- };
- };
-
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt
index f1fa91c68768..9a46cb1d7a04 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt
@@ -10,14 +10,7 @@ This binding uses the common clock binding[1].
Required properties:
- compatible : shall be:
- "st,stih416-clkgenc-vcc-hd", "st,clkgen-mux"
- "st,stih416-clkgenf-vcc-fvdp", "st,clkgen-mux"
- "st,stih416-clkgenf-vcc-hva", "st,clkgen-mux"
- "st,stih416-clkgenf-vcc-hd", "st,clkgen-mux"
- "st,stih416-clkgenf-vcc-sd", "st,clkgen-mux"
- "st,stih415-clkgen-a9-mux", "st,clkgen-mux"
- "st,stih416-clkgen-a9-mux", "st,clkgen-mux"
- "st,stih407-clkgen-a9-mux", "st,clkgen-mux"
+ "st,stih407-clkgen-a9-mux"
- #clock-cells : from common clock binding; shall be set to 0.
@@ -27,10 +20,13 @@ Required properties:
Example:
- clk_m_hva: clk-m-hva@fd690868 {
+ clk_m_a9: clk-m-a9@92b0000 {
#clock-cells = <0>;
- compatible = "st,stih416-clkgenf-vcc-hva", "st,clkgen-mux";
- reg = <0xfd690868 4>;
+ compatible = "st,stih407-clkgen-a9-mux";
+ reg = <0x92b0000 0x10000>;
- clocks = <&clockgen_f 1>, <&clk_m_a1_div0 3>;
+ clocks = <&clockgen_a9_pll 0>,
+ <&clockgen_a9_pll 0>,
+ <&clk_s_c0_flexgen 13>,
+ <&clk_m_a9_ext2f_div2>;
};
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
index 844b3a0976bf..f207053e0550 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
@@ -9,24 +9,10 @@ Base address is located to the parent node. See clock binding[2]
Required properties:
- compatible : shall be:
- "st,clkgena-prediv-c65", "st,clkgena-prediv"
- "st,clkgena-prediv-c32", "st,clkgena-prediv"
-
- "st,clkgena-plls-c65"
- "st,plls-c32-a1x-0", "st,clkgen-plls-c32"
- "st,plls-c32-a1x-1", "st,clkgen-plls-c32"
- "st,stih415-plls-c32-a9", "st,clkgen-plls-c32"
- "st,stih415-plls-c32-ddr", "st,clkgen-plls-c32"
- "st,stih416-plls-c32-a9", "st,clkgen-plls-c32"
- "st,stih416-plls-c32-ddr", "st,clkgen-plls-c32"
- "st,stih407-plls-c32-a0", "st,clkgen-plls-c32"
- "st,stih407-plls-c32-a9", "st,clkgen-plls-c32"
- "sst,plls-c32-cx_0", "st,clkgen-plls-c32"
- "sst,plls-c32-cx_1", "st,clkgen-plls-c32"
- "st,stih418-plls-c28-a9", "st,clkgen-plls-c32"
-
- "st,stih415-gpu-pll-c32", "st,clkgengpu-pll-c32"
- "st,stih416-gpu-pll-c32", "st,clkgengpu-pll-c32"
+ "st,clkgen-pll0"
+ "st,clkgen-pll1"
+ "st,stih407-clkgen-plla9"
+ "st,stih418-clkgen-plla9"
- #clock-cells : From common clock binding; shall be set to 1.
@@ -36,17 +22,16 @@ Required properties:
Example:
- clockgen-a@fee62000 {
- reg = <0xfee62000 0xb48>;
+ clockgen-a9@92b0000 {
+ compatible = "st,clkgen-c32";
+ reg = <0x92b0000 0xffff>;
- clk_s_a0_pll: clk-s-a0-pll {
+ clockgen_a9_pll: clockgen-a9-pll {
#clock-cells = <1>;
- compatible = "st,clkgena-plls-c65";
+ compatible = "st,stih407-clkgen-plla9";
clocks = <&clk_sysin>;
- clock-output-names = "clk-s-a0-pll0-hs",
- "clk-s-a0-pll0-ls",
- "clk-s-a0-pll1";
+ clock-output-names = "clockgen-a9-pll-odf";
};
};
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-prediv.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-prediv.txt
deleted file mode 100644
index 604766c2619e..000000000000
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-prediv.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Binding for a ST pre-divider clock driver.
-
-This binding uses the common clock binding[1].
-Base address is located to the parent node. See clock binding[2]
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/clock/st/st,clkgen.txt
-
-Required properties:
-
-- compatible : shall be:
- "st,clkgena-prediv-c65", "st,clkgena-prediv"
- "st,clkgena-prediv-c32", "st,clkgena-prediv"
-
-- #clock-cells : From common clock binding; shall be set to 0.
-
-- clocks : From common clock binding
-
-- clock-output-names : From common clock binding.
-
-Example:
-
- clockgen-a@fd345000 {
- reg = <0xfd345000 0xb50>;
-
- clk_m_a2_osc_prediv: clk-m-a2-osc-prediv {
- #clock-cells = <0>;
- compatible = "st,clkgena-prediv-c32",
- "st,clkgena-prediv";
-
- clocks = <&clk_sysin>;
-
- clock-output-names = "clk-m-a2-osc-prediv";
- };
- };
-
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-vcc.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-vcc.txt
deleted file mode 100644
index 109b3eddcb17..000000000000
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-vcc.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-Binding for a type of STMicroelectronics clock crossbar (VCC).
-
-The crossbar can take up to 4 input clocks and control up to 16
-output clocks. Not all inputs or outputs have to be in use in a
-particular instantiation. Each output can be individually enabled,
-select any of the input clocks and apply a divide (by 1,2,4 or 8) to
-that selected clock.
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-
-- compatible : shall be:
- "st,stih416-clkgenc", "st,vcc"
- "st,stih416-clkgenf", "st,vcc"
-
-- #clock-cells : from common clock binding; shall be set to 1.
-
-- reg : A Base address and length of the register set.
-
-- clocks : from common clock binding
-
-- clock-output-names : From common clock binding. The block has 16
- clock outputs but not all of them in a specific instance
- have to be used in the SoC. If a clock name is left as
- an empty string then no clock will be created for the
- output associated with that string index. If fewer than
- 16 strings are provided then no clocks will be created
- for the remaining outputs.
-
-Example:
-
- clockgen_c_vcc: clockgen-c-vcc@0xfe8308ac {
- #clock-cells = <1>;
- compatible = "st,stih416-clkgenc", "st,clkgen-vcc";
- reg = <0xfe8308ac 12>;
-
- clocks = <&clk_s_vcc_hd>,
- <&clockgen_c 1>,
- <&clk_s_tmds_fromphy>,
- <&clockgen_c 2>;
-
- clock-output-names = "clk-s-pix-hdmi",
- "clk-s-pix-dvo",
- "clk-s-out-dvo",
- "clk-s-pix-hd",
- "clk-s-hddac",
- "clk-s-denc",
- "clk-s-sddac",
- "clk-s-pix-main",
- "clk-s-pix-aux",
- "clk-s-stfe-frc-0",
- "clk-s-ref-mcru",
- "clk-s-slave-mcru",
- "clk-s-tmds-hdmi",
- "clk-s-hdmi-reject-pll",
- "clk-s-thsens";
- };
-
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen.txt
index b18bf86f926f..c35390f60545 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen.txt
@@ -13,14 +13,6 @@ address is common of all subnode.
...
};
- prediv_node {
- ...
- };
-
- divmux_node {
- ...
- };
-
quadfs_node {
...
};
@@ -29,10 +21,6 @@ address is common of all subnode.
...
};
- vcc_node {
- ...
- };
-
flexgen_node {
...
};
@@ -43,11 +31,8 @@ This binding uses the common clock binding[1].
Each subnode should use the binding described in [2]..[7]
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/clock/st,clkgen-divmux.txt
[3] Documentation/devicetree/bindings/clock/st,clkgen-mux.txt
[4] Documentation/devicetree/bindings/clock/st,clkgen-pll.txt
-[5] Documentation/devicetree/bindings/clock/st,clkgen-prediv.txt
-[6] Documentation/devicetree/bindings/clock/st,vcc.txt
[7] Documentation/devicetree/bindings/clock/st,quadfs.txt
[8] Documentation/devicetree/bindings/clock/st,flexgen.txt
@@ -57,44 +42,27 @@ Required properties:
Example:
- clockgen-a@fee62000 {
-
- reg = <0xfee62000 0xb48>;
+ clockgen-a@090ff000 {
+ compatible = "st,clkgen-c32";
+ reg = <0x90ff000 0x1000>;
clk_s_a0_pll: clk-s-a0-pll {
#clock-cells = <1>;
- compatible = "st,clkgena-plls-c65";
-
- clocks = <&clk-sysin>;
-
- clock-output-names = "clk-s-a0-pll0-hs",
- "clk-s-a0-pll0-ls",
- "clk-s-a0-pll1";
- };
-
- clk_s_a0_osc_prediv: clk-s-a0-osc-prediv {
- #clock-cells = <0>;
- compatible = "st,clkgena-prediv-c65",
- "st,clkgena-prediv";
+ compatible = "st,clkgen-pll0";
clocks = <&clk_sysin>;
- clock-output-names = "clk-s-a0-osc-prediv";
+ clock-output-names = "clk-s-a0-pll-ofd-0";
};
- clk_s_a0_hs: clk-s-a0-hs {
+ clk_s_a0_flexgen: clk-s-a0-flexgen {
+ compatible = "st,flexgen";
+
#clock-cells = <1>;
- compatible = "st,clkgena-divmux-c65-hs",
- "st,clkgena-divmux";
- clocks = <&clk-s_a0_osc_prediv>,
- <&clk-s_a0_pll 0>, /* pll0 hs */
- <&clk-s_a0_pll 2>; /* pll1 */
+ clocks = <&clk_s_a0_pll 0>,
+ <&clk_sysin>;
- clock-output-names = "clk-s-fdma-0",
- "clk-s-fdma-1",
- ""; /* clk-s-jit-sense */
- /* fourth output unused */
+ clock-output-names = "clk-ic-lmi0";
};
};
-
diff --git a/Documentation/devicetree/bindings/clock/st/st,flexgen.txt b/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
index b7ee5c7e0f75..7ff77fc57dff 100644
--- a/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
@@ -60,6 +60,10 @@ This binding uses the common clock binding[2].
Required properties:
- compatible : shall be:
"st,flexgen"
+ "st,flexgen-audio", "st,flexgen" (enable clock propagation on parent for
+ audio use case)
+ "st,flexgen-video", "st,flexgen" (enable clock propagation on parent
+ and activate synchronous mode)
- #clock-cells : from common clock binding; shall be set to 1 (multiple clock
outputs).
diff --git a/Documentation/devicetree/bindings/clock/st/st,quadfs.txt b/Documentation/devicetree/bindings/clock/st/st,quadfs.txt
index cedeb9cc8208..d93d49342e60 100644
--- a/Documentation/devicetree/bindings/clock/st/st,quadfs.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,quadfs.txt
@@ -11,12 +11,8 @@ This binding uses the common clock binding[1].
Required properties:
- compatible : shall be:
- "st,stih416-quadfs216", "st,quadfs"
- "st,stih416-quadfs432", "st,quadfs"
- "st,stih416-quadfs660-E", "st,quadfs"
- "st,stih416-quadfs660-F", "st,quadfs"
- "st,stih407-quadfs660-C", "st,quadfs"
- "st,stih407-quadfs660-D", "st,quadfs"
+ "st,quadfs"
+ "st,quadfs-pll"
- #clock-cells : from common clock binding; shall be set to 1.
@@ -35,14 +31,15 @@ Required properties:
Example:
- clockgen_e: clockgen-e@fd3208bc {
- #clock-cells = <1>;
- compatible = "st,stih416-quadfs660-E", "st,quadfs";
- reg = <0xfd3208bc 0xB0>;
-
- clocks = <&clk_sysin>;
- clock-output-names = "clk-m-pix-mdtp-0",
- "clk-m-pix-mdtp-1",
- "clk-m-pix-mdtp-2",
- "clk-m-mpelpc";
- };
+ clk_s_c0_quadfs: clk-s-c0-quadfs@9103000 {
+ #clock-cells = <1>;
+ compatible = "st,quadfs-pll";
+ reg = <0x9103000 0x1000>;
+
+ clocks = <&clk_sysin>;
+
+ clock-output-names = "clk-s-c0-fs0-ch0",
+ "clk-s-c0-fs0-ch1",
+ "clk-s-c0-fs0-ch2",
+ "clk-s-c0-fs0-ch3";
+ };
diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
index cb91507ffb1e..3868458a5feb 100644
--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
@@ -2,7 +2,10 @@ Allwinner Clock Control Unit Binding
------------------------------------
Required properties :
-- compatible: must contain one of the following compatible:
+- compatible: must contain one of the following compatibles:
+ - "allwinner,sun6i-a31-ccu"
+ - "allwinner,sun8i-a23-ccu"
+ - "allwinner,sun8i-a33-ccu"
- "allwinner,sun8i-h3-ccu"
- reg: Must contain the registers base address and length
diff --git a/Documentation/devicetree/bindings/clock/uniphier-clock.txt b/Documentation/devicetree/bindings/clock/uniphier-clock.txt
new file mode 100644
index 000000000000..c7179d3b5c33
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/uniphier-clock.txt
@@ -0,0 +1,134 @@
+UniPhier clock controller
+
+
+System clock
+------------
+
+Required properties:
+- compatible: should be one of the following:
+ "socionext,uniphier-sld3-clock" - for sLD3 SoC.
+ "socionext,uniphier-ld4-clock" - for LD4 SoC.
+ "socionext,uniphier-pro4-clock" - for Pro4 SoC.
+ "socionext,uniphier-sld8-clock" - for sLD8 SoC.
+ "socionext,uniphier-pro5-clock" - for Pro5 SoC.
+ "socionext,uniphier-pxs2-clock" - for PXs2/LD6b SoC.
+ "socionext,uniphier-ld11-clock" - for LD11 SoC.
+ "socionext,uniphier-ld20-clock" - for LD20 SoC.
+- #clock-cells: should be 1.
+
+Example:
+
+ sysctrl@61840000 {
+ compatible = "socionext,uniphier-sysctrl",
+ "simple-mfd", "syscon";
+ reg = <0x61840000 0x4000>;
+
+ clock {
+ compatible = "socionext,uniphier-ld20-clock";
+ #clock-cells = <1>;
+ };
+
+ other nodes ...
+ };
+
+Provided clocks:
+
+ 8: ST DMAC
+12: GIO (Giga bit stream I/O)
+14: USB3 ch0 host
+15: USB3 ch1 host
+16: USB3 ch0 PHY0
+17: USB3 ch0 PHY1
+20: USB3 ch1 PHY0
+21: USB3 ch1 PHY1
+
+
+Media I/O (MIO) clock
+---------------------
+
+Required properties:
+- compatible: should be one of the following:
+ "socionext,uniphier-sld3-mio-clock" - for sLD3 SoC.
+ "socionext,uniphier-ld4-mio-clock" - for LD4 SoC.
+ "socionext,uniphier-pro4-mio-clock" - for Pro4 SoC.
+ "socionext,uniphier-sld8-mio-clock" - for sLD8 SoC.
+ "socionext,uniphier-pro5-mio-clock" - for Pro5 SoC.
+ "socionext,uniphier-pxs2-mio-clock" - for PXs2/LD6b SoC.
+ "socionext,uniphier-ld11-mio-clock" - for LD11 SoC.
+ "socionext,uniphier-ld20-mio-clock" - for LD20 SoC.
+- #clock-cells: should be 1.
+
+Example:
+
+ mioctrl@59810000 {
+ compatible = "socionext,uniphier-mioctrl",
+ "simple-mfd", "syscon";
+ reg = <0x59810000 0x800>;
+
+ clock {
+ compatible = "socionext,uniphier-ld20-mio-clock";
+ #clock-cells = <1>;
+ };
+
+ other nodes ...
+ };
+
+Provided clocks:
+
+ 0: SD ch0 host
+ 1: eMMC host
+ 2: SD ch1 host
+ 7: MIO DMAC
+ 8: USB2 ch0 host
+ 9: USB2 ch1 host
+10: USB2 ch2 host
+11: USB2 ch3 host
+12: USB2 ch0 PHY
+13: USB2 ch1 PHY
+14: USB2 ch2 PHY
+15: USB2 ch3 PHY
+
+
+Peripheral clock
+----------------
+
+Required properties:
+- compatible: should be one of the following:
+ "socionext,uniphier-sld3-peri-clock" - for sLD3 SoC.
+ "socionext,uniphier-ld4-peri-clock" - for LD4 SoC.
+ "socionext,uniphier-pro4-peri-clock" - for Pro4 SoC.
+ "socionext,uniphier-sld8-peri-clock" - for sLD8 SoC.
+ "socionext,uniphier-pro5-peri-clock" - for Pro5 SoC.
+ "socionext,uniphier-pxs2-peri-clock" - for PXs2/LD6b SoC.
+ "socionext,uniphier-ld11-peri-clock" - for LD11 SoC.
+ "socionext,uniphier-ld20-peri-clock" - for LD20 SoC.
+- #clock-cells: should be 1.
+
+Example:
+
+ perictrl@59820000 {
+ compatible = "socionext,uniphier-perictrl",
+ "simple-mfd", "syscon";
+ reg = <0x59820000 0x200>;
+
+ clock {
+ compatible = "socionext,uniphier-ld20-peri-clock";
+ #clock-cells = <1>;
+ };
+
+ other nodes ...
+ };
+
+Provided clocks:
+
+ 0: UART ch0
+ 1: UART ch1
+ 2: UART ch2
+ 3: UART ch3
+ 4: I2C ch0
+ 5: I2C ch1
+ 6: I2C ch2
+ 7: I2C ch3
+ 8: I2C ch4
+ 9: I2C ch5
+10: I2C ch6
diff --git a/Documentation/devicetree/bindings/clock/xgene.txt b/Documentation/devicetree/bindings/clock/xgene.txt
index 82f9638121db..8233e771711b 100644
--- a/Documentation/devicetree/bindings/clock/xgene.txt
+++ b/Documentation/devicetree/bindings/clock/xgene.txt
@@ -8,6 +8,7 @@ Required properties:
- compatible : shall be one of the following:
"apm,xgene-socpll-clock" - for a X-Gene SoC PLL clock
"apm,xgene-pcppll-clock" - for a X-Gene PCP PLL clock
+ "apm,xgene-pmd-clock" - for a X-Gene PMD clock
"apm,xgene-device-clock" - for a X-Gene device clock
"apm,xgene-socpll-v2-clock" - for a X-Gene SoC PLL v2 clock
"apm,xgene-pcppll-v2-clock" - for a X-Gene PCP PLL v2 clock
@@ -22,6 +23,15 @@ Required properties for SoC or PCP PLL clocks:
Optional properties for PLL clocks:
- clock-names : shall be the name of the PLL. If missing, use the device name.
+Required properties for PMD clocks:
+- reg : shall be the physical register address for the pmd clock.
+- clocks : shall be the input parent clock phandle for the clock.
+- #clock-cells : shall be set to 1.
+- clock-output-names : shall be the name of the clock referenced by derive
+ clock.
+Optional properties for PLL clocks:
+- clock-names : shall be the name of the clock. If missing, use the device name.
+
Required properties for device clocks:
- reg : shall be a list of address and length pairs describing the CSR
reset and/or the divider. Either may be omitted, but at least
@@ -59,6 +69,14 @@ For example:
type = <0>;
};
+ pmd0clk: pmd0clk@7e200200 {
+ compatible = "apm,xgene-pmd-clock";
+ #clock-cells = <1>;
+ clocks = <&pmdpll 0>;
+ reg = <0x0 0x7e200200 0x0 0x10>;
+ clock-output-names = "pmd0clk";
+ };
+
socpll: socpll@17000120 {
compatible = "apm,xgene-socpll-clock";
#clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/zx296718-clk.txt b/Documentation/devicetree/bindings/clock/zx296718-clk.txt
new file mode 100644
index 000000000000..8c18b7b237bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/zx296718-clk.txt
@@ -0,0 +1,35 @@
+Device Tree Clock bindings for ZTE zx296718
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be one of the following:
+ "zte,zx296718-topcrm":
+ zx296718 top clock selection, divider and gating
+
+ "zte,zx296718-lsp0crm" and
+ "zte,zx296718-lsp1crm":
+ zx296718 device level clock selection and gating
+
+- reg: Address and length of the register set
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell. See include/dt-bindings/clock/zx296718-clock.h
+for the full list of zx296718 clock IDs.
+
+
+topclk: topcrm@1461000 {
+ compatible = "zte,zx296718-topcrm-clk";
+ reg = <0x01461000 0x1000>;
+ #clock-cells = <1>;
+};
+
+usbphy0:usb-phy0 {
+ compatible = "zte,zx296718-usb-phy";
+ #phy-cells = <0>;
+ clocks = <&topclk USB20_PHY_CLK>;
+ clock-names = "phyclk";
+ status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt b/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
index a2ec4c1c9382..6394ea9e3b9e 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
@@ -9,7 +9,7 @@ Required properties:
- reg: physical base address of the hdmi and length of memory mapped
region.
- interrupts: interrupt number to the cpu.
-- hpd-gpio: following information about the hotplug gpio pin.
+- hpd-gpios: following information about the hotplug gpio pin.
a) phandle of the gpio controller node.
b) pin number within the gpio controller.
c) optional flags and pull up/down.
@@ -56,7 +56,7 @@ Example:
compatible = "samsung,exynos4212-hdmi";
reg = <0x14530000 0x100000>;
interrupts = <0 95 0>;
- hpd-gpio = <&gpx3 7 1>;
+ hpd-gpios = <&gpx3 7 1>;
ddc = <&hdmi_ddc_node>;
phy = <&hdmi_phy_node>;
samsung,syscon-phandle = <&pmu_system_controller>;
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt b/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
new file mode 100644
index 000000000000..f5e3c6f2095a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
@@ -0,0 +1,47 @@
+TPO TPG110 Panel
+================
+
+This binding builds on the DPI bindings, adding a few properties
+as a superset of a DPI. See panel-dpi.txt for the required DPI
+bindings.
+
+Required properties:
+- compatible : "tpo,tpg110"
+- grestb-gpios : panel reset GPIO
+- scen-gpios : serial control enable GPIO
+- scl-gpios : serial control clock line GPIO
+- sda-gpios : serial control data line GPIO
+
+Required nodes:
+- Video port for DPI input, see panel-dpi.txt
+- Panel timing for DPI setup, see panel-dpi.txt
+
+Example
+-------
+
+panel {
+ compatible = "tpo,tpg110", "panel-dpi";
+ grestb-gpios = <&stmpe_gpio44 5 GPIO_ACTIVE_LOW>;
+ scen-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+ scl-gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+ backlight = <&bl>;
+
+ port {
+ nomadik_clcd_panel: endpoint {
+ remote-endpoint = <&nomadik_clcd_pads>;
+ };
+ };
+
+ panel-timing {
+ clock-frequency = <33200000>;
+ hactive = <800>;
+ hback-porch = <216>;
+ hfront-porch = <40>;
+ hsync-len = <1>;
+ vactive = <480>;
+ vback-porch = <35>;
+ vfront-porch = <10>;
+ vsync-len = <1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
index 175f0e44ed85..3c9a57a8443b 100644
--- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
@@ -8,6 +8,7 @@ Required properties:
"fsl,imx51-sdma"
"fsl,imx53-sdma"
"fsl,imx6q-sdma"
+ "fsl,imx7d-sdma"
The -to variants should be preferred since they allow to determine the
correct ROM script addresses needed for the driver to work without additional
firmware.
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index 5b902ac8d97e..5f2ce669789a 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -1,4 +1,4 @@
-* Renesas R-Car DMA Controller Device Tree bindings
+* Renesas R-Car (RZ/G) DMA Controller Device Tree bindings
Renesas R-Car Generation 2 SoCs have multiple multi-channel DMA
controller instances named DMAC capable of serving multiple clients. Channels
@@ -16,6 +16,8 @@ Required Properties:
- compatible: "renesas,dmac-<soctype>", "renesas,rcar-dmac" as fallback.
Examples with soctypes are:
+ - "renesas,dmac-r8a7743" (RZ/G1M)
+ - "renesas,dmac-r8a7745" (RZ/G1E)
- "renesas,dmac-r8a7790" (R-Car H2)
- "renesas,dmac-r8a7791" (R-Car M2-W)
- "renesas,dmac-r8a7792" (R-Car V2H)
diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt
index d13c136cef8c..6b267045f522 100644
--- a/Documentation/devicetree/bindings/dma/sun6i-dma.txt
+++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt
@@ -7,6 +7,7 @@ Required properties:
- compatible: Must be one of
"allwinner,sun6i-a31-dma"
"allwinner,sun8i-a23-dma"
+ "allwinner,sun8i-a83t-dma"
"allwinner,sun8i-h3-dma"
- reg: Should contain the registers base address and length
- interrupts: Should contain a reference to the interrupt used by this device
diff --git a/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.txt b/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.txt
new file mode 100644
index 000000000000..35383adb10f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.txt
@@ -0,0 +1,41 @@
+Qualcomm's PM8941 USB ID Extcon device
+
+Some Qualcomm PMICs have a "misc" module that can be used to detect when
+the USB ID pin has been pulled low or high.
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Should contain "qcom,pm8941-misc";
+
+- reg:
+ Usage: required
+ Value type: <u32>
+ Definition: Should contain the offset to the misc address space
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Should contain the usb id interrupt
+
+- interrupt-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Should contain the string "usb_id" for the usb id interrupt
+
+Example:
+
+ pmic {
+ usb_id: misc@900 {
+ compatible = "qcom,pm8941-misc";
+ reg = <0x900>;
+ interrupts = <0x0 0x9 0 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "usb_id";
+ };
+ }
+
+ usb-controller {
+ extcon = <&usb_id>;
+ };
diff --git a/Documentation/devicetree/bindings/firmware/meson/meson_sm.txt b/Documentation/devicetree/bindings/firmware/meson/meson_sm.txt
new file mode 100644
index 000000000000..c248cd44f727
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/meson/meson_sm.txt
@@ -0,0 +1,15 @@
+* Amlogic Secure Monitor
+
+In the Amlogic SoCs the Secure Monitor code is used to provide access to the
+NVMEM, enable JTAG, set USB boot, etc...
+
+Required properties for the secure monitor node:
+- compatible: Should be "amlogic,meson-gxbb-sm"
+
+Example:
+
+ firmware {
+ sm: secure-monitor {
+ compatible = "amlogic,meson-gxbb-sm";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/gpio/brcm,bcm6345-gpio.txt b/Documentation/devicetree/bindings/gpio/brcm,bcm6345-gpio.txt
new file mode 100644
index 000000000000..e7853143fa42
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/brcm,bcm6345-gpio.txt
@@ -0,0 +1,46 @@
+Bindings for the Broadcom's brcm,bcm6345-gpio memory-mapped GPIO controllers.
+
+These bindings can be used on any BCM63xx SoC. However, BCM6338 and BCM6345
+are the only ones which don't need a pinctrl driver.
+BCM6338 have 8-bit data and dirout registers, where GPIO state can be read
+and/or written, and the direction changed from input to output.
+BCM6345 have 16-bit data and dirout registers, where GPIO state can be read
+and/or written, and the direction changed from input to output.
+
+Required properties:
+ - compatible: should be "brcm,bcm6345-gpio"
+ - reg-names: must contain
+ "dat" - data register
+ "dirout" - direction (output) register
+ - reg: address + size pairs describing the GPIO register sets;
+ order must correspond with the order of entries in reg-names
+ - #gpio-cells: must be set to 2. The first cell is the pin number and
+ the second cell is used to specify the gpio polarity:
+ 0 = active high
+ 1 = active low
+ - gpio-controller: Marks the device node as a gpio controller.
+
+Optional properties:
+ - native-endian: use native endian memory.
+
+Examples:
+ - BCM6338:
+ gpio: gpio-controller@fffe0407 {
+ compatible = "brcm,bcm6345-gpio";
+ reg-names = "dirout", "dat";
+ reg = <0xfffe0407 1>, <0xfffe040f 1>;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ - BCM6345:
+ gpio: gpio-controller@fffe0406 {
+ compatible = "brcm,bcm6345-gpio";
+ reg-names = "dirout", "dat";
+ reg = <0xfffe0406 2>, <0xfffe040a 2>;
+ native-endian;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt b/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
new file mode 100644
index 000000000000..393bb2ed8a77
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
@@ -0,0 +1,36 @@
+Aspeed GPIO controller Device Tree Bindings
+-------------------------------------------
+
+Required properties:
+- compatible : Either "aspeed,ast2400-gpio" or "aspeed,ast2500-gpio"
+
+- #gpio-cells : Should be two
+ - First cell is the GPIO line number
+ - Second cell is used to specify optional
+ parameters (unused)
+
+- reg : Address and length of the register set for the device
+- gpio-controller : Marks the device node as a GPIO controller.
+- interrupts : Interrupt specifier (see interrupt bindings for
+ details)
+- interrupt-controller : Mark the GPIO controller as an interrupt-controller
+
+Optional properties:
+
+- interrupt-parent : The parent interrupt controller, optional if inherited
+
+The gpio and interrupt properties are further described in their respective
+bindings documentation:
+
+- Documentation/devicetree/bindings/gpio/gpio.txt
+- Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+
+ Example:
+ gpio@1e780000 {
+ #gpio-cells = <2>;
+ compatible = "aspeed,ast2400-gpio";
+ gpio-controller;
+ interrupts = <20>;
+ reg = <0x1e780000 0x1000>;
+ interrupt-controller;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-axp209.txt b/Documentation/devicetree/bindings/gpio/gpio-axp209.txt
new file mode 100644
index 000000000000..a6611304dd3c
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-axp209.txt
@@ -0,0 +1,30 @@
+AXP209 GPIO controller
+
+This driver follows the usual GPIO bindings found in
+Documentation/devicetree/bindings/gpio/gpio.txt
+
+Required properties:
+- compatible: Should be "x-powers,axp209-gpio"
+- #gpio-cells: Should be two. The first cell is the pin number and the
+ second is the GPIO flags.
+- gpio-controller: Marks the device node as a GPIO controller.
+
+This node must be a subnode of the axp20x PMIC, documented in
+Documentation/devicetree/bindings/mfd/axp20x.txt
+
+Example:
+
+axp209: pmic@34 {
+ compatible = "x-powers,axp209";
+ reg = <0x34>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ axp_gpio: gpio {
+ compatible = "x-powers,axp209-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-tpic2810.txt b/Documentation/devicetree/bindings/gpio/gpio-tpic2810.txt
new file mode 100644
index 000000000000..1afc2de7a537
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-tpic2810.txt
@@ -0,0 +1,16 @@
+TPIC2810 GPIO controller bindings
+
+Required properties:
+ - compatible : Should be "ti,tpic2810".
+ - reg : The I2C address of the device
+ - gpio-controller : Marks the device node as a GPIO controller.
+ - #gpio-cells : Should be two. For consumer use see gpio.txt.
+
+Example:
+
+ gpio@60 {
+ compatible = "ti,tpic2810";
+ reg = <0x60>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-tps65086.txt b/Documentation/devicetree/bindings/gpio/gpio-tps65086.txt
deleted file mode 100644
index ba051074bedc..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-tps65086.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-* TPS65086 GPO Controller bindings
-
-Required properties:
- - compatible : Should be "ti,tps65086-gpio".
- - gpio-controller : Marks the device node as a GPIO Controller.
- - #gpio-cells : Should be two. The first cell is the pin number
- and the second cell is used to specify flags.
- See ../gpio/gpio.txt for possible values.
-
-Example:
-
- gpio4: gpio {
- compatible = "ti,tps65086-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-ts4900.txt b/Documentation/devicetree/bindings/gpio/gpio-ts4900.txt
new file mode 100644
index 000000000000..3f8e71b1ab2a
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-ts4900.txt
@@ -0,0 +1,30 @@
+* Technologic Systems I2C-FPGA's GPIO controller bindings
+
+This bindings describes the GPIO controller for Technologic's FPGA core.
+TS-4900's FPGA encodes the GPIO state on 3 bits, whereas the TS-7970's FPGA
+uses 2 bits: it doesn't use a dedicated input bit.
+
+Required properties:
+- compatible: Should be one of the following
+ "technologic,ts4900-gpio"
+ "technologic,ts7970-gpio"
+- reg: Physical base address of the controller and length
+ of memory mapped region.
+- #gpio-cells: Should be two. The first cell is the pin number.
+- gpio-controller: Marks the device node as a gpio controller.
+
+Optional property:
+- ngpios: Number of GPIOs this controller is instantiated with,
+ the default is 32. See gpio.txt for more details.
+
+Example:
+
+&i2c2 {
+ gpio8: gpio@28 {
+ compatible = "technologic,ts4900-gpio";
+ reg = <0x28>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ ngpios = <32>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
index 98d198396956..c3d016532d8e 100644
--- a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
@@ -44,26 +44,3 @@ Example for a PXA3xx platform:
interrupt-controller;
#interrupt-cells = <0x2>;
};
-
-* Marvell Orion GPIO Controller
-
-Required properties:
-- compatible : Should be "marvell,orion-gpio"
-- reg : Address and length of the register set for controller.
-- gpio-controller : So we know this is a gpio controller.
-- ngpio : How many gpios this controller has.
-- interrupts : Up to 4 Interrupts for the controller.
-
-Optional properties:
-- mask-offset : For SMP Orions, offset for Nth CPU
-
-Example:
-
- gpio0: gpio@10100 {
- compatible = "marvell,orion-gpio";
- #gpio-cells = <2>;
- gpio-controller;
- reg = <0x10100 0x40>;
- ngpio = <32>;
- interrupts = <35>, <36>, <37>, <38>;
- };
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 8da26b35b5c3..7c1ab3b3254f 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -11,6 +11,7 @@ Required Properties:
- "renesas,gpio-r8a7793": for R8A7793 (R-Car M2-N) compatible GPIO controller.
- "renesas,gpio-r8a7794": for R8A7794 (R-Car E2) compatible GPIO controller.
- "renesas,gpio-r8a7795": for R8A7795 (R-Car H3) compatible GPIO controller.
+ - "renesas,gpio-r8a7796": for R8A7796 (R-Car M3-W) compatible GPIO controller.
- "renesas,gpio-rcar": for generic R-Car GPIO controller.
- reg: Base address and length of each memory resource used by the GPIO
diff --git a/Documentation/devicetree/bindings/hwmon/ltc4151.txt b/Documentation/devicetree/bindings/hwmon/ltc4151.txt
new file mode 100644
index 000000000000..d008a5ef525a
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/ltc4151.txt
@@ -0,0 +1,18 @@
+LTC4151 High Voltage I2C Current and Voltage Monitor
+
+Required properties:
+- compatible: Must be "lltc,ltc4151"
+- reg: I2C address
+
+Optional properties:
+- shunt-resistor-micro-ohms
+ Shunt resistor value in micro-Ohms
+ Defaults to <1000> if unset.
+
+Example:
+
+ltc4151@6e {
+ compatible = "lltc,ltc4151";
+ reg = <0x6e>;
+ shunt-resistor-micro-ohms = <1500>;
+};
diff --git a/Documentation/devicetree/bindings/hwmon/max6650.txt b/Documentation/devicetree/bindings/hwmon/max6650.txt
new file mode 100644
index 000000000000..f6bd87d8e284
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/max6650.txt
@@ -0,0 +1,28 @@
+Bindings for MAX6651 and MAX6650 I2C fan controllers
+
+Reference:
+[1] https://datasheets.maximintegrated.com/en/ds/MAX6650-MAX6651.pdf
+
+Required properties:
+- compatible : One of "maxim,max6650" or "maxim,max6651"
+- reg : I2C address, one of 0x1b, 0x1f, 0x4b, 0x48.
+
+Optional properties, default is to retain the chip's current setting:
+- maxim,fan-microvolt : The supply voltage of the fan, either 5000000 uV or
+ 12000000 uV.
+- maxim,fan-prescale : Pre-scaling value, as per datasheet [1]. Lower values
+ allow more fine-grained control of slower fans.
+ Valid: 1, 2, 4, 8, 16.
+- maxim,fan-target-rpm: Initial requested fan rotation speed. If specified, the
+ driver selects closed-loop mode and the requested speed.
+ This ensures the fan is already running before userspace
+ takes over.
+
+Example:
+ fan-max6650: max6650@1b {
+ reg = <0x1b>;
+ compatible = "maxim,max6650";
+ maxim,fan-microvolt = <12000000>;
+ maxim,fan-prescale = <4>;
+ maxim,fan-target-rpm = <1200>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
index 71191ff0e781..248a155414c2 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
@@ -44,8 +44,7 @@ Required properties:
- our-claim-gpio: The GPIO that we use to claim the bus.
- their-claim-gpios: The GPIOs that the other sides use to claim the bus.
Note that some implementations may only support a single other master.
-- Standard I2C mux properties. See i2c-mux.txt in this directory.
-- Single I2C child bus node at reg 0. See i2c-mux.txt in this directory.
+- I2C arbitration bus node. See i2c-arb.txt in this directory.
Optional properties:
- slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us.
@@ -63,8 +62,6 @@ Example:
i2c-arbitrator {
compatible = "i2c-arb-gpio-challenge";
- #address-cells = <1>;
- #size-cells = <0>;
i2c-parent = <&{/i2c@12CA0000}>;
@@ -74,8 +71,7 @@ Example:
wait-retry-us = <3000>;
wait-free-us = <50000>;
- i2c@0 {
- reg = <0>;
+ i2c-arb {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/i2c/i2c-arb.txt b/Documentation/devicetree/bindings/i2c/i2c-arb.txt
new file mode 100644
index 000000000000..59abf9277bdc
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-arb.txt
@@ -0,0 +1,35 @@
+Common i2c arbitration bus properties.
+
+- i2c-arb child node
+
+Required properties for the i2c-arb child node:
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties for i2c-arb child node:
+- Child nodes conforming to i2c bus binding
+
+
+Example :
+
+ /*
+ An NXP pca9541 I2C bus master selector at address 0x74
+ with a NXP pca8574 GPIO expander attached.
+ */
+
+ arb@74 {
+ compatible = "nxp,pca9541";
+ reg = <0x74>;
+
+ i2c-arb {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio@38 {
+ compatible = "nxp,pca8574";
+ reg = <0x38>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-gate.txt b/Documentation/devicetree/bindings/i2c/i2c-gate.txt
new file mode 100644
index 000000000000..1846d236e656
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-gate.txt
@@ -0,0 +1,41 @@
+An i2c gate is useful to e.g. reduce the digital noise for RF tuners connected
+to the i2c bus. Gates are similar to arbitrators in that you need to perform
+some kind of operation to access the i2c bus past the arbitrator/gate, but
+there are no competing masters to consider for gates and therefore there is
+no arbitration happening for gates.
+
+Common i2c gate properties.
+
+- i2c-gate child node
+
+Required properties for the i2c-gate child node:
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties for i2c-gate child node:
+- Child nodes conforming to i2c bus binding
+
+
+Example :
+
+ /*
+ An Invensense mpu9150 at address 0x68 featuring an on-chip Asahi
+ Kasei ak8975 compass behind a gate.
+ */
+
+ mpu9150@68 {
+ compatible = "invensense,mpu9150";
+ reg = <0x68>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <18 1>;
+
+ i2c-gate {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ax8975@c {
+ compatible = "ak,ak8975";
+ reg = <0x0c>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-meson.txt b/Documentation/devicetree/bindings/i2c/i2c-meson.txt
index 682f9a6f766e..386357d1aab0 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-meson.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-meson.txt
@@ -1,7 +1,7 @@
Amlogic Meson I2C controller
Required properties:
- - compatible: must be "amlogic,meson6-i2c"
+ - compatible: must be "amlogic,meson6-i2c" or "amlogic,meson-gxbb-i2c"
- reg: physical address and length of the device registers
- interrupts: a single interrupt specifier
- clocks: clock for the device
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux.txt b/Documentation/devicetree/bindings/i2c/i2c-mux.txt
index af84cce5cd7b..212e6779dc5c 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux.txt
@@ -2,19 +2,32 @@ Common i2c bus multiplexer/switch properties.
An i2c bus multiplexer/switch will have several child busses that are
numbered uniquely in a device dependent manner. The nodes for an i2c bus
-multiplexer/switch will have one child node for each child
-bus.
+multiplexer/switch will have one child node for each child bus.
-Required properties:
+Optional properties:
+- #address-cells = <1>;
+ This property is required is the i2c-mux child node does not exist.
+
+- #size-cells = <0>;
+ This property is required is the i2c-mux child node does not exist.
+
+- i2c-mux
+ For i2c multiplexers/switches that have child nodes that are a mixture
+ of both i2c child busses and other child nodes, the 'i2c-mux' subnode
+ can be used for populating the i2c child busses. If an 'i2c-mux'
+ subnode is present, only subnodes of this will be considered as i2c
+ child busses.
+
+Required properties for the i2c-mux child node:
- #address-cells = <1>;
- #size-cells = <0>;
-Required properties for child nodes:
+Required properties for i2c child bus nodes:
- #address-cells = <1>;
- #size-cells = <0>;
- reg : The sub-bus number.
-Optional properties for child nodes:
+Optional properties for i2c child bus nodes:
- Other properties specific to the multiplexer/switch hardware.
- Child nodes conforming to i2c bus binding
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
index 5f0cb502b1db..239632a0d709 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
@@ -11,6 +11,7 @@ Required properties:
"renesas,i2c-r8a7793"
"renesas,i2c-r8a7794"
"renesas,i2c-r8a7795"
+ "renesas,i2c-r8a7796"
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: interrupt specifier.
diff --git a/Documentation/devicetree/bindings/i2c/nxp,pca9541.txt b/Documentation/devicetree/bindings/i2c/nxp,pca9541.txt
new file mode 100644
index 000000000000..0fbbc6970ec5
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/nxp,pca9541.txt
@@ -0,0 +1,29 @@
+* NXP PCA9541 I2C bus master selector
+
+Required Properties:
+
+ - compatible: Must be "nxp,pca9541"
+
+ - reg: The I2C address of the device.
+
+ The following required properties are defined externally:
+
+ - I2C arbitration bus node. See i2c-arb.txt in this directory.
+
+
+Example:
+
+ i2c-arbitrator@74 {
+ compatible = "nxp,pca9541";
+ reg = <0x74>;
+
+ i2c-arb {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@54 {
+ compatible = "at,24c08";
+ reg = <0x54>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 5c70ce9c1954..1416c6a0d2cd 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -38,6 +38,7 @@ dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O
dallas,ds75 Digital Thermometer and Thermostat
dlg,da9053 DA9053: flexible system level PMIC with multicore support
dlg,da9063 DA9063: system PMIC for quad-core application processors
+domintech,dmard09 DMARD09: 3-axis Accelerometer
epson,rx8010 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
@@ -56,6 +57,7 @@ maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator
maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
mc,rv3029c2 Real Time Clock Module with I2C-Bus
+mcube,mc3230 mCube 3-axis 8-bit digital accelerometer
microchip,mcp4531-502 Microchip 7-bit Single I2C Digital Potentiometer (5k)
microchip,mcp4531-103 Microchip 7-bit Single I2C Digital Potentiometer (10k)
microchip,mcp4531-503 Microchip 7-bit Single I2C Digital Potentiometer (50k)
diff --git a/Documentation/devicetree/bindings/iio/accel/dmard06.txt b/Documentation/devicetree/bindings/iio/accel/dmard06.txt
new file mode 100644
index 000000000000..ce105a12c645
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/dmard06.txt
@@ -0,0 +1,19 @@
+Device tree bindings for Domintech DMARD05, DMARD06, DMARD07 accelerometers
+
+Required properties:
+ - compatible : Should be "domintech,dmard05"
+ or "domintech,dmard06"
+ or "domintech,dmard07"
+ - reg : I2C address of the chip. Should be 0x1c
+
+Example:
+ &i2c1 {
+ /* ... */
+
+ accelerometer@1c {
+ compatible = "domintech,dmard06";
+ reg = <0x1c>;
+ };
+
+ /* ... */
+ };
diff --git a/Documentation/devicetree/bindings/iio/accel/kionix,kxsd9.txt b/Documentation/devicetree/bindings/iio/accel/kionix,kxsd9.txt
new file mode 100644
index 000000000000..b25bf3a77e0f
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/kionix,kxsd9.txt
@@ -0,0 +1,22 @@
+Kionix KXSD9 Accelerometer device tree bindings
+
+Required properties:
+ - compatible: should be set to "kionix,kxsd9"
+ - reg: i2c slave address
+
+Optional properties:
+ - vdd-supply: The input supply for VDD
+ - iovdd-supply: The input supply for IOVDD
+ - interrupts: The movement detection interrupt
+ - mount-matrix: See mount-matrix.txt
+
+Example:
+
+kxsd9@18 {
+ compatible = "kionix,kxsd9";
+ reg = <0x18>;
+ interrupt-parent = <&foo>;
+ interrupts = <57 IRQ_TYPE_EDGE_FALLING>;
+ iovdd-supply = <&bar>;
+ vdd-supply = <&baz>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt b/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt
new file mode 100644
index 000000000000..68c45cbbe3d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt
@@ -0,0 +1,29 @@
+* Mediatek AUXADC - Analog to Digital Converter on Mediatek mobile soc (mt65xx/mt81xx/mt27xx)
+===============
+
+The Auxiliary Analog/Digital Converter (AUXADC) is an ADC found
+in some Mediatek SoCs which among other things measures the temperatures
+in the SoC. It can be used directly with register accesses, but it is also
+used by thermal controller which reads the temperatures from the AUXADC
+directly via its own bus interface. See
+Documentation/devicetree/bindings/thermal/mediatek-thermal.txt
+for the Thermal Controller which holds a phandle to the AUXADC.
+
+Required properties:
+ - compatible: Should be one of:
+ - "mediatek,mt2701-auxadc": For MT2701 family of SoCs
+ - "mediatek,mt8173-auxadc": For MT8173 family of SoCs
+ - reg: Address range of the AUXADC unit.
+ - clocks: Should contain a clock specifier for each entry in clock-names
+ - clock-names: Should contain "main".
+ - #io-channel-cells: Should be 1, see ../iio-bindings.txt
+
+Example:
+
+auxadc: adc@11001000 {
+ compatible = "mediatek,mt2701-auxadc";
+ reg = <0 0x11001000 0 0x1000>;
+ clocks = <&pericfg CLK_PERI_AUXADC>;
+ clock-names = "main";
+ #io-channel-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/ti-adc12138.txt b/Documentation/devicetree/bindings/iio/adc/ti-adc12138.txt
new file mode 100644
index 000000000000..049a1d36f013
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ti-adc12138.txt
@@ -0,0 +1,37 @@
+* Texas Instruments' ADC12130/ADC12132/ADC12138
+
+Required properties:
+ - compatible: Should be one of
+ * "ti,adc12130"
+ * "ti,adc12132"
+ * "ti,adc12138"
+ - reg: SPI chip select number for the device
+ - interrupts: Should contain interrupt for EOC (end of conversion)
+ - clocks: phandle to conversion clock input
+ - spi-max-frequency: Definision as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+ - vref-p-supply: The regulator supply for positive analog voltage reference
+
+Optional properties:
+ - vref-n-supply: The regulator supply for negative analog voltage reference
+ (Note that this must not go below GND or exceed vref-p)
+ If not specified, this is assumed to be analog ground.
+ - ti,acquisition-time: The number of conversion clock periods for the S/H's
+ acquisition time. Should be one of 6, 10, 18, 34. If not specified,
+ default value of 10 is used.
+ For high source impedances, this value can be increased to 18 or 34.
+ For less ADC accuracy and/or slower CCLK frequencies this value may be
+ decreased to 6. See section 6.0 INPUT SOURCE RESISTANCE in the
+ datasheet for details.
+
+Example:
+adc@0 {
+ compatible = "ti,adc12138";
+ reg = <0>;
+ interrupts = <28 IRQ_TYPE_EDGE_RISING>;
+ interrupt-parent = <&gpio1>;
+ clocks = <&cclk>;
+ vref-p-supply = <&ldo4_reg>;
+ spi-max-frequency = <5000000>;
+ ti,acquisition-time = <6>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt b/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
new file mode 100644
index 000000000000..9ed2315781e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
@@ -0,0 +1,16 @@
+* Texas Instruments ADC141S626 and ADC161S626 chips
+
+Required properties:
+ - compatible: Should be "ti,adc141s626" or "ti,adc161s626"
+ - reg: spi chip select number for the device
+
+Recommended properties:
+ - spi-max-frequency: Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+adc@0 {
+ compatible = "ti,adc161s626";
+ reg = <0>;
+ spi-max-frequency = <4300000>;
+};
diff --git a/Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt b/Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt
new file mode 100644
index 000000000000..5d8b687d5edc
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt
@@ -0,0 +1,22 @@
+* Atlas Scientific ORP-SM OEM sensor
+
+https://www.atlas-scientific.com/_files/_datasheets/_oem/ORP_oem_datasheet.pdf
+
+Required properties:
+
+ - compatible: must be "atlas,orp-sm"
+ - reg: the I2C address of the sensor
+ - interrupt-parent: should be the phandle for the interrupt controller
+ - interrupts: the sole interrupt generated by the device
+
+ Refer to interrupt-controller/interrupts.txt for generic interrupt client
+ node bindings.
+
+Example:
+
+atlas@66 {
+ compatible = "atlas,orp-sm";
+ reg = <0x66>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <16 2>;
+};
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt b/Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt
new file mode 100644
index 000000000000..77d5aba1bd8c
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt
@@ -0,0 +1,29 @@
+* Asahi Kasei AK8974 magnetometer sensor
+
+Required properties:
+
+- compatible : should be "asahi-kasei,ak8974"
+- reg : the I2C address of the magnetometer
+
+Optional properties:
+
+- avdd-supply: regulator supply for the analog voltage
+ (see regulator/regulator.txt)
+- dvdd-supply: regulator supply for the digital voltage
+ (see regulator/regulator.txt)
+- interrupts: data ready (DRDY) and interrupt (INT1) lines
+ from the chip, the DRDY interrupt must be placed first.
+ The interrupts can be triggered on rising or falling
+ edges alike.
+- mount-matrix: an optional 3x3 mounting rotation matrix
+
+Example:
+
+ak8974@0f {
+ compatible = "asahi-kasei,ak8974";
+ reg = <0x0f>;
+ avdd-supply = <&foo_reg>;
+ dvdd-supply = <&bar_reg>;
+ interrupts = <0 IRQ_TYPE_EDGE_RISING>,
+ <1 IRQ_TYPE_EDGE_RISING>;
+};
diff --git a/Documentation/devicetree/bindings/iio/pressure/zpa2326.txt b/Documentation/devicetree/bindings/iio/pressure/zpa2326.txt
new file mode 100644
index 000000000000..fb85de676e03
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/zpa2326.txt
@@ -0,0 +1,31 @@
+Murata ZPA2326 pressure sensor
+
+Pressure sensor from Murata with SPI and I2C bus interfaces.
+
+Required properties:
+- compatible: "murata,zpa2326"
+- reg: the I2C address or SPI chip select the device will respond to
+
+Recommended properties for SPI bus usage:
+- spi-max-frequency: maximum SPI bus frequency as documented in
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Optional properties:
+- vref-supply: an optional regulator that needs to be on to provide VREF
+ power to the sensor
+- vdd-supply: an optional regulator that needs to be on to provide VDD
+ power to the sensor
+- interrupt-parent: phandle to the parent interrupt controller as documented in
+ Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+- interrupts: interrupt mapping for IRQ as documented in
+ Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+
+Example:
+
+zpa2326@5c {
+ compatible = "murata,zpa2326";
+ reg = <0x5c>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12>;
+ vdd-supply = <&ldo_1v8_gnss>;
+};
diff --git a/Documentation/devicetree/bindings/iio/proximity/sx9500.txt b/Documentation/devicetree/bindings/iio/proximity/sx9500.txt
new file mode 100644
index 000000000000..b301dd2b35da
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/sx9500.txt
@@ -0,0 +1,24 @@
+Semtech's SX9500 capacitive proximity button device driver
+
+Required properties:
+ - compatible: must be "semtech,sx9500"
+ - reg: i2c address where to find the device
+ - interrupt-parent : should be the phandle for the interrupt controller
+ - interrupts : the sole interrupt generated by the device
+
+ Refer to interrupt-controller/interrupts.txt for generic
+ interrupt client node bindings.
+
+Optional properties:
+ - reset-gpios: Reference to the GPIO connected to the device's active
+ low reset pin.
+
+Example:
+
+sx9500@28 {
+ compatible = "semtech,sx9500";
+ reg = <0x28>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
+};
diff --git a/Documentation/devicetree/bindings/iio/temperature/maxim_thermocouple.txt b/Documentation/devicetree/bindings/iio/temperature/maxim_thermocouple.txt
new file mode 100644
index 000000000000..28bc5c4d965b
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/temperature/maxim_thermocouple.txt
@@ -0,0 +1,21 @@
+Maxim thermocouple support
+
+* https://datasheets.maximintegrated.com/en/ds/MAX6675.pdf
+* https://datasheets.maximintegrated.com/en/ds/MAX31855.pdf
+
+Required properties:
+
+ - compatible: must be "maxim,max31855" or "maxim,max6675"
+ - reg: SPI chip select number for the device
+ - spi-max-frequency: must be 4300000
+ - spi-cpha: must be defined for max6675 to enable SPI mode 1
+
+ Refer to spi/spi-bus.txt for generic SPI slave bindings.
+
+Example:
+
+ max31855@0 {
+ compatible = "maxim,max31855";
+ reg = <0>;
+ spi-max-frequency = <4300000>;
+ };
diff --git a/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt b/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
new file mode 100644
index 000000000000..f97993be2dcb
--- /dev/null
+++ b/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
@@ -0,0 +1,107 @@
+Hisilicon RoCE DT description
+
+Hisilicon RoCE engine is a part of network subsystem.
+It works depending on other part of network wubsytem, such as, gmac and
+dsa fabric.
+
+Additional properties are described here:
+
+Required properties:
+- compatible: Should contain "hisilicon,hns-roce-v1".
+- reg: Physical base address of the RoCE driver and
+length of memory mapped region.
+- eth-handle: phandle, specifies a reference to a node
+representing a ethernet device.
+- dsaf-handle: phandle, specifies a reference to a node
+representing a dsaf device.
+- #address-cells: must be 2
+- #size-cells: must be 2
+Optional properties:
+- dma-coherent: Present if DMA operations are coherent.
+- interrupt-parent: the interrupt parent of this device.
+- interrupts: should contain 32 completion event irq,1 async event irq
+and 1 event overflow irq.
+- interrupt-names:should be one of 34 irqs for roce device
+ - hns-roce-comp-0 ~ hns-roce-comp-31: 32 complete event irq
+ - hns-roce-async: 1 async event irq
+ - hns-roce-common: named common exception warning irq
+Example:
+ infiniband@c4000000 {
+ compatible = "hisilicon,hns-roce-v1";
+ reg = <0x0 0xc4000000 0x0 0x100000>;
+ dma-coherent;
+ eth-handle = <&eth2 &eth3 &eth4 &eth5 &eth6 &eth7>;
+ dsaf-handle = <&soc0_dsa>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mbigen_dsa>;
+ interrupts = <722 1>,
+ <723 1>,
+ <724 1>,
+ <725 1>,
+ <726 1>,
+ <727 1>,
+ <728 1>,
+ <729 1>,
+ <730 1>,
+ <731 1>,
+ <732 1>,
+ <733 1>,
+ <734 1>,
+ <735 1>,
+ <736 1>,
+ <737 1>,
+ <738 1>,
+ <739 1>,
+ <740 1>,
+ <741 1>,
+ <742 1>,
+ <743 1>,
+ <744 1>,
+ <745 1>,
+ <746 1>,
+ <747 1>,
+ <748 1>,
+ <749 1>,
+ <750 1>,
+ <751 1>,
+ <752 1>,
+ <753 1>,
+ <785 1>,
+ <754 4>;
+
+ interrupt-names = "hns-roce-comp-0",
+ "hns-roce-comp-1",
+ "hns-roce-comp-2",
+ "hns-roce-comp-3",
+ "hns-roce-comp-4",
+ "hns-roce-comp-5",
+ "hns-roce-comp-6",
+ "hns-roce-comp-7",
+ "hns-roce-comp-8",
+ "hns-roce-comp-9",
+ "hns-roce-comp-10",
+ "hns-roce-comp-11",
+ "hns-roce-comp-12",
+ "hns-roce-comp-13",
+ "hns-roce-comp-14",
+ "hns-roce-comp-15",
+ "hns-roce-comp-16",
+ "hns-roce-comp-17",
+ "hns-roce-comp-18",
+ "hns-roce-comp-19",
+ "hns-roce-comp-20",
+ "hns-roce-comp-21",
+ "hns-roce-comp-22",
+ "hns-roce-comp-23",
+ "hns-roce-comp-24",
+ "hns-roce-comp-25",
+ "hns-roce-comp-26",
+ "hns-roce-comp-27",
+ "hns-roce-comp-28",
+ "hns-roce-comp-29",
+ "hns-roce-comp-30",
+ "hns-roce-comp-31",
+ "hns-roce-async",
+ "hns-roce-common";
+ };
diff --git a/Documentation/devicetree/bindings/input/adc-keys.txt b/Documentation/devicetree/bindings/input/adc-keys.txt
new file mode 100644
index 000000000000..e551814629b4
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/adc-keys.txt
@@ -0,0 +1,49 @@
+ADC attached resistor ladder buttons
+------------------------------------
+
+Required properties:
+ - compatible: "adc-keys"
+ - io-channels: Phandle to an ADC channel
+ - io-channel-names = "buttons";
+ - keyup-threshold-microvolt: Voltage at which all the keys are considered up.
+
+Optional properties:
+ - poll-interval: Poll interval time in milliseconds
+ - autorepeat: Boolean, Enable auto repeat feature of Linux input
+ subsystem.
+
+Each button (key) is represented as a sub-node of "adc-keys":
+
+Required subnode-properties:
+ - label: Descriptive name of the key.
+ - linux,code: Keycode to emit.
+ - press-threshold-microvolt: Voltage ADC input when this key is pressed.
+
+Example:
+
+#include <dt-bindings/input/input.h>
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&lradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <2000000>;
+
+ button-up {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ press-threshold-microvolt = <1500000>;
+ };
+
+ button-down {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ press-threshold-microvolt = <1000000>;
+ };
+
+ button-enter {
+ label = "Enter";
+ linux,code = <KEY_ENTER>;
+ press-threshold-microvolt = <500000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/gpio-decoder.txt b/Documentation/devicetree/bindings/input/gpio-decoder.txt
new file mode 100644
index 000000000000..14a77fb96cf0
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/gpio-decoder.txt
@@ -0,0 +1,23 @@
+* GPIO Decoder DT bindings
+
+Required Properties:
+- compatible: should be "gpio-decoder"
+- gpios: a spec of gpios (at least two) to be decoded to a number with
+ first entry representing the MSB.
+
+Optional Properties:
+- decoder-max-value: Maximum possible value that can be reported by
+ the gpios.
+- linux,axis: the input subsystem axis to map to (ABS_X/ABS_Y).
+ Defaults to 0 (ABS_X).
+
+Example:
+ gpio-decoder0 {
+ compatible = "gpio-decoder";
+ gpios = <&pca9536 3 GPIO_ACTIVE_HIGH>,
+ <&pca9536 2 GPIO_ACTIVE_HIGH>,
+ <&pca9536 1 GPIO_ACTIVE_HIGH>,
+ <&pca9536 0 GPIO_ACTIVE_HIGH>;
+ linux,axis = <0>; /* ABS_X */
+ decoder-max-value = <9>;
+ };
diff --git a/Documentation/devicetree/bindings/input/gpio-keys-polled.txt b/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
index 95d0fb11a787..4d9a3717eaaf 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
@@ -34,11 +34,10 @@ Example nodes:
gpio_keys_polled {
compatible = "gpio-keys-polled";
- #address-cells = <1>;
- #size-cells = <0>;
poll-interval = <100>;
autorepeat;
- button@21 {
+
+ button21 {
label = "GPIO Key UP";
linux,code = <103>;
gpios = <&gpio1 0 1>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
index f99528da1b1d..6db22103e2dd 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
@@ -19,6 +19,7 @@ Required properties:
or: "edt,edt-ft5306"
or: "edt,edt-ft5406"
or: "edt,edt-ft5506"
+ or: "focaltech,ft6236"
- reg: I2C slave address of the chip (0x38)
- interrupt-parent: a phandle pointing to the interrupt controller
@@ -43,6 +44,13 @@ Optional properties:
- offset: allows setting the edge compensation in the range from
0 to 31.
+ - touchscreen-size-x : See touchscreen.txt
+ - touchscreen-size-y : See touchscreen.txt
+ - touchscreen-fuzz-x : See touchscreen.txt
+ - touchscreen-fuzz-y : See touchscreen.txt
+ - touchscreen-inverted-x : See touchscreen.txt
+ - touchscreen-inverted-y : See touchscreen.txt
+ - touchscreen-swapped-x-y : See touchscreen.txt
Example:
polytouch: edt-ft5x06@38 {
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt b/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt
new file mode 100644
index 000000000000..5a19f4c3e9d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt
@@ -0,0 +1,27 @@
+* Elan eKTF2127 I2C touchscreen controller
+
+Required properties:
+ - compatible : "elan,ektf2127"
+ - reg : I2C slave address of the chip (0x40)
+ - interrupt-parent : a phandle pointing to the interrupt controller
+ serving the interrupt for this chip
+ - interrupts : interrupt specification for the ektf2127 interrupt
+ - power-gpios : GPIO specification for the pin connected to the
+ ektf2127's wake input. This needs to be driven high
+ to take ektf2127 out of it's low power state
+
+For additional optional properties see: touchscreen.txt
+
+Example:
+
+i2c@00000000 {
+ ektf2127: touchscreen@15 {
+ compatible = "elan,ektf2127";
+ reg = <0x15>;
+ interrupt-parent = <&pio>;
+ interrupts = <6 11 IRQ_TYPE_EDGE_FALLING>
+ power-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>;
+ touchscreen-inverted-x;
+ touchscreen-swapped-x-y;
+ };
+};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/focaltech-ft6236.txt b/Documentation/devicetree/bindings/input/touchscreen/focaltech-ft6236.txt
deleted file mode 100644
index 777521da3da5..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/focaltech-ft6236.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-* FocalTech FT6236 I2C touchscreen controller
-
-Required properties:
- - compatible : "focaltech,ft6236"
- - reg : I2C slave address of the chip (0x38)
- - interrupt-parent : a phandle pointing to the interrupt controller
- serving the interrupt for this chip
- - interrupts : interrupt specification for the touch controller
- interrupt
- - reset-gpios : GPIO specification for the RSTN input
- - touchscreen-size-x : horizontal resolution of touchscreen (in pixels)
- - touchscreen-size-y : vertical resolution of touchscreen (in pixels)
-
-Optional properties:
- - touchscreen-fuzz-x : horizontal noise value of the absolute input
- device (in pixels)
- - touchscreen-fuzz-y : vertical noise value of the absolute input
- device (in pixels)
- - touchscreen-inverted-x : X axis is inverted (boolean)
- - touchscreen-inverted-y : Y axis is inverted (boolean)
- - touchscreen-swapped-x-y: X and Y axis are swapped (boolean)
- Swapping is done after inverting the axis
-
-Example:
-
- ft6x06@38 {
- compatible = "focaltech,ft6236";
- reg = <0x38>;
- interrupt-parent = <&gpio>;
- interrupts = <23 2>;
- touchscreen-size-x = <320>;
- touchscreen-size-y = <480>;
- touchscreen-inverted-x;
- touchscreen-swapped-x-y;
- };
diff --git a/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt b/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
index e30e0b93f2b3..3e5b9793341f 100644
--- a/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
+++ b/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
@@ -1,13 +1,24 @@
-Texas Instruments TPS65218 power button
+Texas Instruments TPS65217 and TPS65218 power button
+
+This module is part of the TPS65217/TPS65218. For more details about the whole
+TPS65217 chip see Documentation/devicetree/bindings/regulator/tps65217.txt.
This driver provides a simple power button event via an Interrupt.
Required properties:
-- compatible: should be "ti,tps65218-pwrbutton"
+- compatible: should be "ti,tps65217-pwrbutton" or "ti,tps65218-pwrbutton"
+
+Required properties for TPS65218:
- interrupts: should be one of the following
- <3 IRQ_TYPE_EDGE_BOTH>: For controllers compatible with tps65218
-Example:
+Examples:
+
+&tps {
+ tps65217-pwrbutton {
+ compatible = "ti,tps65217-pwrbutton";
+ };
+};
&tps {
power-button {
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index ae5054c27c99..e3f052d8c11a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -1,10 +1,12 @@
-DT bindings for the R-Mobile/R-Car interrupt controller
+DT bindings for the R-Mobile/R-Car/RZ/G interrupt controller
Required properties:
- compatible: has to be "renesas,irqc-<soctype>", "renesas,irqc" as fallback.
Examples with soctypes are:
- "renesas,irqc-r8a73a4" (R-Mobile APE6)
+ - "renesas,irqc-r8a7743" (RZ/G1M)
+ - "renesas,irqc-r8a7745" (RZ/G1E)
- "renesas,irqc-r8a7790" (R-Car H2)
- "renesas,irqc-r8a7791" (R-Car M2-W)
- "renesas,irqc-r8a7792" (R-Car V2H)
diff --git a/Documentation/devicetree/bindings/leds/common.txt b/Documentation/devicetree/bindings/leds/common.txt
index 93ef6e6e43b5..696be5792625 100644
--- a/Documentation/devicetree/bindings/leds/common.txt
+++ b/Documentation/devicetree/bindings/leds/common.txt
@@ -19,6 +19,13 @@ Optional properties for child nodes:
a device, i.e. no other LED class device can be assigned the same
label.
+- default-state : The initial state of the LED. Valid values are "on", "off",
+ and "keep". If the LED is already on or off and the default-state property is
+ set the to same value, then no glitch should be produced where the LED
+ momentarily turns off (or on). The "keep" setting will keep the LED at
+ whatever its current state is, without producing a glitch. The default is
+ off if this property is not present.
+
- linux,default-trigger : This parameter, if present, is a
string defining the trigger assigned to the LED. Current triggers are:
"backlight" - LED will act as a back-light, controlled by the framebuffer
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
index 3f48c1eaf085..ccebce597f37 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
@@ -49,7 +49,7 @@ LED sub-node optional properties:
- active-low : Boolean, makes LED active low.
Default : false
- default-state : see
- Documentation/devicetree/bindings/leds/leds-gpio.txt
+ Documentation/devicetree/bindings/leds/common.txt
- linux,default-trigger : see
Documentation/devicetree/bindings/leds/common.txt
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
index b22a55bcc65d..da5708e7b43b 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
@@ -28,7 +28,7 @@ LED sub-node optional properties:
- active-low : Boolean, makes LED active low.
Default : false
- default-state : see
- Documentation/devicetree/bindings/leds/leds-gpio.txt
+ Documentation/devicetree/bindings/leds/common.txt
- linux,default-trigger : see
Documentation/devicetree/bindings/leds/common.txt
diff --git a/Documentation/devicetree/bindings/leds/leds-gpio.txt b/Documentation/devicetree/bindings/leds/leds-gpio.txt
index 5b1b43a64265..76535ca37120 100644
--- a/Documentation/devicetree/bindings/leds/leds-gpio.txt
+++ b/Documentation/devicetree/bindings/leds/leds-gpio.txt
@@ -14,13 +14,8 @@ LED sub-node properties:
see Documentation/devicetree/bindings/leds/common.txt
- linux,default-trigger : (optional)
see Documentation/devicetree/bindings/leds/common.txt
-- default-state: (optional) The initial state of the LED. Valid
- values are "on", "off", and "keep". If the LED is already on or off
- and the default-state property is set the to same value, then no
- glitch should be produced where the LED momentarily turns off (or
- on). The "keep" setting will keep the LED at whatever its current
- state is, without producing a glitch. The default is off if this
- property is not present.
+- default-state: (optional) The initial state of the LED.
+ see Documentation/devicetree/bindings/leds/common.txt
- retain-state-suspended: (optional) The suspend state can be retained.Such
as charge-led gpio.
- panic-indicator : (optional)
diff --git a/Documentation/devicetree/bindings/leds/leds-is31fl319x.txt b/Documentation/devicetree/bindings/leds/leds-is31fl319x.txt
new file mode 100644
index 000000000000..fc2603484544
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-is31fl319x.txt
@@ -0,0 +1,59 @@
+LEDs connected to is31fl319x LED controller chip
+
+Required properties:
+- compatible : Should be any of
+ "issi,is31fl3190"
+ "issi,is31fl3191"
+ "issi,is31fl3193"
+ "issi,is31fl3196"
+ "issi,is31fl3199"
+ "si-en,sn3199".
+- #address-cells: Must be 1.
+- #size-cells: Must be 0.
+- reg: 0x64, 0x65, 0x66, or 0x67.
+
+Optional properties:
+- audio-gain-db : audio gain selection for external analog modulation input.
+ Valid values: 0 - 21, step by 3 (rounded down)
+ Default: 0
+
+Each led is represented as a sub-node of the issi,is31fl319x device.
+There can be less leds subnodes than the chip can support but not more.
+
+Required led sub-node properties:
+- reg : number of LED line
+ Valid values: 1 - number of leds supported by the chip variant.
+
+Optional led sub-node properties:
+- label : see Documentation/devicetree/bindings/leds/common.txt.
+- linux,default-trigger :
+ see Documentation/devicetree/bindings/leds/common.txt.
+- led-max-microamp : (optional)
+ Valid values: 5000 - 40000, step by 5000 (rounded down)
+ Default: 20000 (20 mA)
+ Note: a driver will take the lowest of all led limits since the
+ chip has a single global setting. The lowest value will be chosen
+ due to the PWM specificity, where lower brightness is achieved
+ by reducing the dury-cycle of pulses and not the current, which
+ will always have its peak value equal to led-max-microamp.
+
+Examples:
+
+fancy_leds: leds@65 {
+ compatible = "issi,is31fl3196";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x65>;
+
+ red_aux: led@1 {
+ label = "red:aux";
+ reg = <1>;
+ led-max-microamp = <10000>;
+ };
+
+ green_power: led@5 {
+ label = "green:power";
+ reg = <5>;
+ linux,default-trigger = "default-on";
+ };
+};
diff --git a/Documentation/devicetree/bindings/leds/leds-pm8058.txt b/Documentation/devicetree/bindings/leds/leds-pm8058.txt
new file mode 100644
index 000000000000..89584c49aab2
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-pm8058.txt
@@ -0,0 +1,67 @@
+Qualcomm PM8058 LED driver
+
+The Qualcomm PM8058 is a multi-functional device which contains
+an LED driver block for up to six LEDs: three normal LEDs, two
+"flash" LEDs and one "keypad backlight" LED. The names are
+quoted because sometimes these LED drivers are used for wildly
+different things than flash or keypad backlight: their names
+are more of a suggestion than a hard-wired usecase.
+
+Hardware-wise the different LEDs support slightly different
+output currents. The "flash" LEDs do not need to charge nor
+do they support external triggers. They are just powerful LED
+drivers.
+
+The LEDs appear as children to the PM8058 device, with the
+proper compatible string. For the PM8058 bindings see:
+mfd/qcom-pm8xxx.txt.
+
+Each LED is represented as a sub-node of the syscon device. Each
+node's name represents the name of the corresponding LED.
+
+LED sub-node properties:
+
+Required properties:
+- compatible: one of
+ "qcom,pm8058-led" (for the normal LEDs at 0x131, 0x132 and 0x133)
+ "qcom,pm8058-keypad-led" (for the "keypad" LED at 0x48)
+ "qcom,pm8058-flash-led" (for the "flash" LEDs at 0x49 and 0xFB)
+
+Optional properties:
+- label: see Documentation/devicetree/bindings/leds/common.txt
+- default-state: see Documentation/devicetree/bindings/leds/common.txt
+- linux,default-trigger: see Documentation/devicetree/bindings/leds/common.txt
+
+Example:
+
+qcom,ssbi@500000 {
+ pmicintc: pmic@0 {
+ compatible = "qcom,pm8058";
+ led@48 {
+ compatible = "qcom,pm8058-keypad-led";
+ reg = <0x48>;
+ label = "pm8050:white:keypad";
+ default-state = "off";
+ };
+ led@131 {
+ compatible = "qcom,pm8058-led";
+ reg = <0x131>;
+ label = "pm8058:red";
+ default-state = "off";
+ };
+ led@132 {
+ compatible = "qcom,pm8058-led";
+ reg = <0x132>;
+ label = "pm8058:yellow";
+ default-state = "off";
+ linux,default-trigger = "mmc0";
+ };
+ led@133 {
+ compatible = "qcom,pm8058-led";
+ reg = <0x133>;
+ label = "pm8058:green";
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/leds/register-bit-led.txt b/Documentation/devicetree/bindings/leds/register-bit-led.txt
index 379cefdc0bda..59b56365f648 100644
--- a/Documentation/devicetree/bindings/leds/register-bit-led.txt
+++ b/Documentation/devicetree/bindings/leds/register-bit-led.txt
@@ -23,13 +23,8 @@ Optional properties:
see Documentation/devicetree/bindings/leds/common.txt
- linux,default-trigger : (optional)
see Documentation/devicetree/bindings/leds/common.txt
-- default-state: (optional) The initial state of the LED. Valid
- values are "on", "off", and "keep". If the LED is already on or off
- and the default-state property is set the to same value, then no
- glitch should be produced where the LED momentarily turns off (or
- on). The "keep" setting will keep the LED at whatever its current
- state is, without producing a glitch. The default is off if this
- property is not present.
+- default-state: (optional) The initial state of the LED
+ see Documentation/devicetree/bindings/leds/common.txt
Example:
diff --git a/Documentation/devicetree/bindings/mailbox/meson-mhu.txt b/Documentation/devicetree/bindings/mailbox/meson-mhu.txt
new file mode 100644
index 000000000000..a530310772b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/meson-mhu.txt
@@ -0,0 +1,34 @@
+Amlogic Meson MHU Mailbox Driver
+================================
+
+The Amlogic's Meson SoCs Message-Handling-Unit (MHU) is a mailbox controller
+that has 3 independent channels/links to communicate with remote processor(s).
+MHU links are hardwired on a platform. A link raises interrupt for any
+received data. However, there is no specified way of knowing if the sent
+data has been read by the remote. This driver assumes the sender polls
+STAT register and the remote clears it after having read the data.
+
+Mailbox Device Node:
+====================
+
+Required properties:
+--------------------
+- compatible: Shall be "amlogic,meson-gxbb-mhu"
+- reg: Contains the mailbox register address range (base
+ address and length)
+- #mbox-cells Shall be 1 - the index of the channel needed.
+- interrupts: Contains the interrupt information corresponding to
+ each of the 2 links of MHU.
+
+Example:
+--------
+
+ mailbox: mailbox@c883c404 {
+ #mbox-cells = <1>;
+ compatible = "amlogic,meson-gxbb-mhu";
+ reg = <0 0xc883c404 0 0x4c>;
+ interrupts = <0 208 IRQ_TYPE_EDGE_RISING>,
+ <0 209 IRQ_TYPE_EDGE_RISING>,
+ <0 210 IRQ_TYPE_EDGE_RISING>;
+ #mbox-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/media/meson-ir.txt b/Documentation/devicetree/bindings/media/meson-ir.txt
index 407848e85f31..e7e3f3c4fc8f 100644
--- a/Documentation/devicetree/bindings/media/meson-ir.txt
+++ b/Documentation/devicetree/bindings/media/meson-ir.txt
@@ -1,7 +1,10 @@
* Amlogic Meson IR remote control receiver
Required properties:
- - compatible : should be "amlogic,meson6-ir"
+ - compatible : depending on the platform this should be one of:
+ - "amlogic,meson6-ir"
+ - "amlogic,meson8b-ir"
+ - "amlogic,meson-gxbb-ir"
- reg : physical base address and length of the device registers
- interrupts : a single specifier for the interrupt from the device
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mem-ctrlr.txt b/Documentation/devicetree/bindings/memory-controllers/fsl/ddr.txt
index f87856faf1ab..dde6d837083a 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/mem-ctrlr.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/fsl/ddr.txt
@@ -7,6 +7,8 @@ Properties:
"fsl,qoriq-memory-controller".
- reg : Address and size of DDR controller registers
- interrupts : Error interrupt of DDR controller
+- little-endian : Specifies little-endian access to registers
+ If omitted, big-endian will be used.
Example 1:
diff --git a/Documentation/devicetree/bindings/mfd/ac100.txt b/Documentation/devicetree/bindings/mfd/ac100.txt
new file mode 100644
index 000000000000..b8ef00667599
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/ac100.txt
@@ -0,0 +1,54 @@
+X-Powers AC100 Codec/RTC IC Device Tree bindings
+
+AC100 is a audio codec and RTC subsystem combo IC. The 2 parts are
+separated, including power supplies and interrupt lines, but share
+a common register address space and host interface.
+
+Required properties:
+- compatible: "x-powers,ac100"
+- reg: The I2C slave address or RSB hardware address for the chip
+- sub-nodes:
+ - codec
+ - compatible: "x-powers,ac100-codec"
+ - interrupt-parent: The parent interrupt controller
+ - interrupts: SoC NMI / GPIO interrupt connected to the
+ IRQ_AUDIO pin
+ - #clock-cells: Shall be 0
+ - clock-output-names: "4M_adda"
+
+ - see clock/clock-bindings.txt for common clock bindings
+
+ - rtc
+ - compatible: "x-powers,ac100-rtc"
+ - interrupt-parent: The parent interrupt controller
+ - interrupts: SoC NMI / GPIO interrupt connected to the
+ IRQ_RTC pin
+ - clocks: A phandle to the codec's "4M_adda" clock
+ - #clock-cells: Shall be 1
+ - clock-output-names: "cko1_rtc", "cko2_rtc", "cko3_rtc"
+
+ - see clock/clock-bindings.txt for common clock bindings
+
+Example:
+
+ac100: codec@e89 {
+ compatible = "x-powers,ac100";
+ reg = <0xe89>;
+
+ ac100_codec: codec {
+ compatible = "x-powers,ac100-codec";
+ interrupt-parent = <&r_pio>;
+ interrupts = <0 9 IRQ_TYPE_LEVEL_LOW>; /* PL9 */
+ #clock-cells = <0>;
+ clock-output-names = "4M_adda";
+ };
+
+ ac100_rtc: rtc {
+ compatible = "x-powers,ac100-rtc";
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&ac100_codec>;
+ #clock-cells = <1>;
+ clock-output-names = "cko1_rtc", "cko2_rtc", "cko3_rtc";
+ };
+};
diff --git a/Documentation/devicetree/bindings/mfd/act8945a.txt b/Documentation/devicetree/bindings/mfd/act8945a.txt
index f71283055685..462819ac3da8 100644
--- a/Documentation/devicetree/bindings/mfd/act8945a.txt
+++ b/Documentation/devicetree/bindings/mfd/act8945a.txt
@@ -14,13 +14,6 @@ Example:
reg = <0x5b>;
status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_charger_chglev>;
- active-semi,chglev-gpio = <&pioA 12 GPIO_ACTIVE_HIGH>;
- active-semi,input-voltage-threshold-microvolt = <6600>;
- active-semi,precondition-timeout = <40>;
- active-semi,total-timeout = <3>;
-
active-semi,vsel-high;
regulators {
@@ -73,4 +66,19 @@ Example:
regulator-always-on;
};
};
+
+ charger {
+ compatible = "active-semi,act8945a-charger";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_charger_chglev &pinctrl_charger_lbo &pinctrl_charger_irq>;
+ interrupt-parent = <&pioA>;
+ interrupts = <45 GPIO_ACTIVE_LOW>;
+
+ active-semi,chglev-gpios = <&pioA 12 GPIO_ACTIVE_HIGH>;
+ active-semi,lbo-gpios = <&pioA 72 GPIO_ACTIVE_LOW>;
+ active-semi,input-voltage-threshold-microvolt = <6600>;
+ active-semi,precondition-timeout = <40>;
+ active-semi,total-timeout = <3>;
+ status = "okay";
+ };
};
diff --git a/Documentation/devicetree/bindings/mfd/arizona.txt b/Documentation/devicetree/bindings/mfd/arizona.txt
index a6e2ea41160c..8f2e2822238d 100644
--- a/Documentation/devicetree/bindings/mfd/arizona.txt
+++ b/Documentation/devicetree/bindings/mfd/arizona.txt
@@ -85,6 +85,24 @@ Optional properties:
present, the number of values should be less than or equal to the
number of inputs, unspecified inputs will use the chip default.
+ - wlf,max-channels-clocked : The maximum number of channels to be clocked on
+ each AIF, useful for I2S systems with multiple data lines being mastered.
+ Specify one cell for each AIF to be configured, specify zero for AIFs that
+ should be handled normally.
+ If present, number of cells must be less than or equal to the number of
+ AIFs. If less than the number of AIFs, for cells that have not been
+ specified the corresponding AIFs will be treated as default setting.
+
+ - wlf,spk-fmt : PDM speaker data format, must contain 2 cells (OUT5 and OUT6).
+ See the datasheet for values.
+ The second cell is ignored for codecs that do not have OUT6 (wm5102, wm8997,
+ wm8998, wm1814)
+
+ - wlf,spk-mute : PDM speaker mute setting, must contain 2 cells (OUT5 and OUT6).
+ See the datasheet for values.
+ The second cell is ignored for codecs that do not have OUT6 (wm5102, wm8997,
+ wm8998, wm1814)
+
- DCVDD-supply, MICVDD-supply : Power supplies, only need to be specified if
they are being externally supplied. As covered in
Documentation/devicetree/bindings/regulator/regulator.txt
diff --git a/Documentation/devicetree/bindings/mfd/aspeed-scu.txt b/Documentation/devicetree/bindings/mfd/aspeed-scu.txt
new file mode 100644
index 000000000000..4fc5b83726d6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/aspeed-scu.txt
@@ -0,0 +1,18 @@
+The Aspeed System Control Unit manages the global behaviour of the SoC,
+configuring elements such as clocks, pinmux, and reset.
+
+Required properties:
+- compatible: One of:
+ "aspeed,ast2400-scu", "syscon", "simple-mfd"
+ "aspeed,g4-scu", "syscon", "simple-mfd"
+ "aspeed,ast2500-scu", "syscon", "simple-mfd"
+ "aspeed,g5-scu", "syscon", "simple-mfd"
+
+- reg: contains the offset and length of the SCU memory region
+
+Example:
+
+syscon: syscon@1e6e2000 {
+ compatible = "aspeed,ast2400-scu", "syscon", "simple-mfd";
+ reg = <0x1e6e2000 0x1a8>;
+};
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index 585a95546288..8f3ad9ab4637 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -10,7 +10,8 @@ axp809 (X-Powers)
Required properties:
- compatible: "x-powers,axp152", "x-powers,axp202", "x-powers,axp209",
- "x-powers,axp221", "x-powers,axp223", "x-powers,axp809"
+ "x-powers,axp221", "x-powers,axp223", "x-powers,axp806",
+ "x-powers,axp809"
- reg: The I2C slave address or RSB hardware address for the AXP chip
- interrupt-parent: The parent interrupt controller
- interrupts: SoC NMI / GPIO interrupt connected to the PMIC's IRQ pin
@@ -47,7 +48,6 @@ Optional properties for DCDC regulators:
probably makes sense for HiFi audio related
applications that aren't battery constrained.
-
AXP202/AXP209 regulators, type, and corresponding input supply names:
Regulator Type Supply Name Notes
@@ -86,6 +86,30 @@ LDO_IO1 : LDO : ips-supply : GPIO 1
RTC_LDO : LDO : ips-supply : always on
DRIVEVBUS : Enable output : drivevbus-supply : external regulator
+AXP806 regulators, type, and corresponding input supply names:
+
+Regulator Type Supply Name Notes
+--------- ---- ----------- -----
+DCDCA : DC-DC buck : vina-supply : poly-phase capable
+DCDCB : DC-DC buck : vinb-supply : poly-phase capable
+DCDCC : DC-DC buck : vinc-supply : poly-phase capable
+DCDCD : DC-DC buck : vind-supply : poly-phase capable
+DCDCE : DC-DC buck : vine-supply : poly-phase capable
+ALDO1 : LDO : aldoin-supply : shared supply
+ALDO2 : LDO : aldoin-supply : shared supply
+ALDO3 : LDO : aldoin-supply : shared supply
+BLDO1 : LDO : bldoin-supply : shared supply
+BLDO2 : LDO : bldoin-supply : shared supply
+BLDO3 : LDO : bldoin-supply : shared supply
+BLDO4 : LDO : bldoin-supply : shared supply
+CLDO1 : LDO : cldoin-supply : shared supply
+CLDO2 : LDO : cldoin-supply : shared supply
+CLDO3 : LDO : cldoin-supply : shared supply
+SW : On/Off Switch : swin-supply
+
+Additionally, the AXP806 DC-DC regulators support poly-phase arrangements
+for higher output current. The possible groupings are: A+B, A+B+C, D+E.
+
AXP809 regulators, type, and corresponding input supply names:
Regulator Type Supply Name Notes
diff --git a/Documentation/devicetree/bindings/mfd/lp873x.txt b/Documentation/devicetree/bindings/mfd/lp873x.txt
new file mode 100644
index 000000000000..52766c2035f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/lp873x.txt
@@ -0,0 +1,59 @@
+TI LP873X PMIC MFD driver
+
+Required properties:
+ - compatible: "ti,lp8732", "ti,lp8733"
+ - reg: I2C slave address.
+ - gpio-controller: Marks the device node as a GPIO Controller.
+ - #gpio-cells: Should be two. The first cell is the pin number and
+ the second cell is used to specify flags.
+ See ../gpio/gpio.txt for more information.
+ - regulators: List of child nodes that specify the regulator
+ initialization data.
+Example:
+
+pmic: lp8733@60 {
+ compatible = "ti,lp8733";
+ reg = <0x60>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ regulators {
+ lp8733_buck0: buck0 {
+ regulator-name = "lp8733-buck0";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-min-microamp = <1500000>;
+ regulator-max-microamp = <4000000>;
+ regulator-ramp-delay = <10000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ lp8733_buck1: buck1 {
+ regulator-name = "lp8733-buck1";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-min-microamp = <1500000>;
+ regulator-max-microamp = <4000000>;
+ regulator-ramp-delay = <10000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ lp8733_ldo0: ldo0 {
+ regulator-name = "lp8733-ldo0";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ lp8733_ldo1: ldo1 {
+ regulator-name = "lp8733-ldo1";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mfd/max77693.txt b/Documentation/devicetree/bindings/mfd/max77693.txt
index d3425846aa5b..6a1ae3a2b77f 100644
--- a/Documentation/devicetree/bindings/mfd/max77693.txt
+++ b/Documentation/devicetree/bindings/mfd/max77693.txt
@@ -17,28 +17,28 @@ Required properties:
- interrupt-parent : The parent interrupt controller.
Optional properties:
-- regulators : The regulators of max77693 have to be instantiated under subnod
+- regulators : The regulators of max77693 have to be instantiated under subnode
named "regulators" using the following format.
regulators {
- regualtor-compatible = ESAFEOUT1/ESAFEOUT2/CHARGER
- standard regulator constratints[*].
+ regulator-compatible = ESAFEOUT1/ESAFEOUT2/CHARGER
+ standard regulator constraints[*].
};
[*] refer Documentation/devicetree/bindings/regulator/regulator.txt
- haptic : The MAX77693 haptic device utilises a PWM controlled motor to provide
users with tactile feedback. PWM period and duty-cycle are varied in
- order to provide the approprite level of feedback.
+ order to provide the appropriate level of feedback.
Required properties:
- - compatible : Must be "maxim,max77693-hpatic"
+ - compatible : Must be "maxim,max77693-haptic"
- haptic-supply : power supply for the haptic motor
[*] refer Documentation/devicetree/bindings/regulator/regulator.txt
- pwms : phandle to the physical PWM(Pulse Width Modulation) device.
PWM properties should be named "pwms". And number of cell is different
for each pwm device.
- To get more informations, please refer to documentaion.
+ To get more information, please refer to documentation.
[*] refer Documentation/devicetree/bindings/pwm/pwm.txt
- charger : Node configuring the charger driver.
diff --git a/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt b/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt
index f24f33409164..37a088f9a648 100644
--- a/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt
+++ b/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt
@@ -62,6 +62,7 @@ The below bindings specify the set of valid subnodes.
"qcom,pm8058-rtc"
"qcom,pm8921-rtc"
"qcom,pm8941-rtc"
+ "qcom,pm8018-rtc"
- reg:
Usage: required
diff --git a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt
index b98b291a31ba..485bc59fcc48 100644
--- a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt
+++ b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt
@@ -13,6 +13,7 @@ frequencies.
"qcom,rpm-msm8660"
"qcom,rpm-msm8960"
"qcom,rpm-ipq8064"
+ "qcom,rpm-mdm9615"
- reg:
Usage: required
@@ -59,6 +60,7 @@ Regulator nodes are identified by their compatible:
"qcom,rpm-pm8058-regulators"
"qcom,rpm-pm8901-regulators"
"qcom,rpm-pm8921-regulators"
+ "qcom,rpm-pm8018-regulators"
- vdd_l0_l1_lvs-supply:
- vdd_l2_l11_l12-supply:
@@ -137,6 +139,15 @@ Regulator nodes are identified by their compatible:
Definition: reference to regulator supplying the input pin, as
described in the data sheet
+- vin_lvs1-supply:
+- vdd_l7-supply:
+- vdd_l8-supply:
+- vdd_l9_l10_l11_l12-supply:
+ Usage: optional (pm8018 only)
+ Value type: <phandle>
+ Definition: reference to regulator supplying the input pin, as
+ described in the data sheet
+
The regulator node houses sub-nodes for each regulator within the device. Each
sub-node is identified using the node's name, with valid values listed for each
of the pmics below.
@@ -156,6 +167,10 @@ pm8921:
l29, lvs1, lvs2, lvs3, lvs4, lvs5, lvs6, lvs7, usb-switch, hdmi-switch,
ncp
+pm8018:
+ s1, s2, s3, s4, s5, , l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
+ l12, l14, lvs1
+
The content of each sub-node is defined by the standard binding for regulators -
see regulator.txt - with additional custom properties described below:
diff --git a/Documentation/devicetree/bindings/mfd/rk808.txt b/Documentation/devicetree/bindings/mfd/rk808.txt
index 4ca6aab4273a..9636ae8d8d41 100644
--- a/Documentation/devicetree/bindings/mfd/rk808.txt
+++ b/Documentation/devicetree/bindings/mfd/rk808.txt
@@ -1,7 +1,11 @@
-RK808 Power Management Integrated Circuit
+RK8XX Power Management Integrated Circuit
+
+The rk8xx family current members:
+rk808
+rk818
Required properties:
-- compatible: "rockchip,rk808"
+- compatible: "rockchip,rk808", "rockchip,rk818"
- reg: I2C slave address
- interrupt-parent: The parent interrupt controller.
- interrupts: the interrupt outputs of the controller.
@@ -13,6 +17,8 @@ Optional properties:
default output clock name
- rockchip,system-power-controller: Telling whether or not this pmic is controlling
the system power.
+
+Optional RK808 properties:
- vcc1-supply: The input supply for DCDC_REG1
- vcc2-supply: The input supply for DCDC_REG2
- vcc3-supply: The input supply for DCDC_REG3
@@ -29,7 +35,20 @@ Optional properties:
the gpio controller. If DVS GPIOs aren't present, voltage changes will happen
very quickly with no slow ramp time.
-Regulators: All the regulators of RK808 to be instantiated shall be
+Optional RK818 properties:
+- vcc1-supply: The input supply for DCDC_REG1
+- vcc2-supply: The input supply for DCDC_REG2
+- vcc3-supply: The input supply for DCDC_REG3
+- vcc4-supply: The input supply for DCDC_REG4
+- boost-supply: The input supply for DCDC_BOOST
+- vcc6-supply: The input supply for LDO_REG1 and LDO_REG2
+- vcc7-supply: The input supply for LDO_REG3, LDO_REG5 and LDO_REG7
+- vcc8-supply: The input supply for LDO_REG4, LDO_REG6 and LDO_REG8
+- vcc9-supply: The input supply for LDO_REG9 and SWITCH_REG
+- h_5v-supply: The input supply for HDMI_SWITCH
+- usb-supply: The input supply for OTG_SWITCH
+
+Regulators: All the regulators of RK8XX to be instantiated shall be
listed in a child node named 'regulators'. Each regulator is represented
by a child node of the 'regulators' node.
@@ -48,6 +67,18 @@ number as described in RK808 datasheet.
- SWITCH_REGn
- valid values for n are 1 to 2
+Following regulators of the RK818 PMIC block are supported. Note that
+the 'n' in regulator name, as in DCDC_REGn or LDOn, represents the DCDC or LDO
+number as described in RK818 datasheet.
+
+ - DCDC_REGn
+ - valid values for n are 1 to 4.
+ - LDO_REGn
+ - valid values for n are 1 to 9.
+ - SWITCH_REG
+ - HDMI_SWITCH
+ - OTG_SWITCH
+
Standard regulator bindings are used inside regulator subnodes. Check
Documentation/devicetree/bindings/regulator/regulator.txt
for more details
diff --git a/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt b/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt
new file mode 100644
index 000000000000..c110e118b79f
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt
@@ -0,0 +1,70 @@
+Samsung Exynos SoC Low Power Audio Subsystem (LPASS)
+
+Required properties:
+
+ - compatible : "samsung,exynos5433-lpass"
+ - reg : should contain the LPASS top SFR region location
+ and size
+ - samsung,pmu-syscon : the phandle to the Power Management Unit node
+ - #address-cells : should be 1
+ - #size-cells : should be 1
+ - ranges : must be present
+
+Each IP block of the Low Power Audio Subsystem should be specified as
+an optional sub-node. For "samsung,exynos5433-lpass" compatible this includes:
+UART, SLIMBUS, PCM, I2S, DMAC, Timers 0...4, VIC, WDT 0...1 devices.
+
+Bindings of the sub-nodes are described in:
+ ../serial/samsung_uart.txt
+ ../sound/samsung-i2s.txt
+ ../dma/arm-pl330.txt
+
+
+Example:
+
+audio-subsystem {
+ compatible = "samsung,exynos5433-lpass";
+ reg = <0x11400000 0x100>, <0x11500000 0x08>;
+ samsung,pmu-syscon = <&pmu_system_controller>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ adma: adma@11420000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x11420000 0x1000>;
+ interrupts = <0 73 0>;
+ clocks = <&cmu_aud CLK_ACLK_DMAC>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
+ };
+
+ i2s0: i2s0@11440000 {
+ compatible = "samsung,exynos7-i2s";
+ reg = <0x11440000 0x100>;
+ dmas = <&adma 0 &adma 2>;
+ dma-names = "tx", "rx";
+ interrupts = <0 70 0>;
+ clocks = <&cmu_aud CLK_PCLK_AUD_I2S>,
+ <&cmu_aud CLK_SCLK_AUD_I2S>,
+ <&cmu_aud CLK_SCLK_I2S_BCLK>;
+ clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_bus>;
+ status = "disabled";
+ };
+
+ serial_3: serial@11460000 {
+ compatible = "samsung,exynos5433-uart";
+ reg = <0x11460000 0x100>;
+ interrupts = <0 67 0>;
+ clocks = <&cmu_aud CLK_PCLK_AUD_UART>,
+ <&cmu_aud CLK_SCLK_AUD_UART>;
+ clock-names = "uart", "clk_uart_baud0";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_aud_bus>;
+ status = "disabled";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/stmpe.txt b/Documentation/devicetree/bindings/mfd/stmpe.txt
index 3fb68bfefc8b..f9065a5781a2 100644
--- a/Documentation/devicetree/bindings/mfd/stmpe.txt
+++ b/Documentation/devicetree/bindings/mfd/stmpe.txt
@@ -4,7 +4,7 @@ STMPE is an MFD device which may expose the following inbuilt devices: gpio,
keypad, touchscreen, adc, pwm, rotator.
Required properties:
- - compatible : "st,stmpe[610|801|811|1601|2401|2403]"
+ - compatible : "st,stmpe[610|801|811|1600|1601|2401|2403]"
- reg : I2C/SPI address of the device
Optional properties:
diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt
index e6afdfa3543d..9a98ee7c323d 100644
--- a/Documentation/devicetree/bindings/mfd/twl6040.txt
+++ b/Documentation/devicetree/bindings/mfd/twl6040.txt
@@ -12,6 +12,7 @@ Required properties:
- interrupt-parent: The parent interrupt controller
- gpio-controller:
- #gpio-cells = <1>: twl6040 provides GPO lines.
+- #clock-cells = <0>; twl6040 is a provider of pdmclk which is used by McPDM
- twl6040,audpwron-gpio: Power on GPIO line for the twl6040
- vio-supply: Regulator for the twl6040 VIO supply
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index 3404afa9b938..49df630bd44f 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -36,6 +36,9 @@ Optional Properties:
- #clock-cells: If specified this should be the value <0>. With this property
in place we will export a clock representing the Card Clock. This clock
is expected to be consumed by our PHY. You must also specify
+ - xlnx,fails-without-test-cd: when present, the controller doesn't work when
+ the CD line is not connected properly, and the line is not connected
+ properly. Test mode can be used to force the controller to function.
Example:
sdhci@e0100000 {
diff --git a/Documentation/devicetree/bindings/mmc/brcm,bcm7425-sdhci.txt b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.txt
index 82847174c37d..733b64a4d8eb 100644
--- a/Documentation/devicetree/bindings/mmc/brcm,bcm7425-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.txt
@@ -8,7 +8,9 @@ on Device Tree properties to enable them for SoC/Board combinations
that support them.
Required properties:
-- compatible: "brcm,bcm7425-sdhci"
+- compatible: should be one of the following
+ - "brcm,bcm7425-sdhci"
+ - "brcm,bcm7445-sdhci"
Refer to clocks/clock-bindings.txt for generic clock consumer properties.
diff --git a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
index ce0e76749671..e25436861867 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
@@ -16,6 +16,8 @@ Optional properties:
See ../clocks/clock-bindings.txt for details.
- clock-names : Must include the following entry:
"ext_clock" (External clock provided to the card).
+- post-power-on-delay-ms : Delay in ms after powering the card and
+ de-asserting the reset-gpios (if any)
Example:
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index 22d1e1f3f38b..8a377827695b 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -75,6 +75,17 @@ Optional SDIO properties:
- wakeup-source: Enables wake up of host system on SDIO IRQ assertion
(Legacy property supported: "enable-sdio-wakeup")
+MMC power
+---------
+
+Controllers may implement power control from both the connected cards and
+the IO signaling (for example to change to high-speed 1.8V signalling). If
+the system supports this, then the following two properties should point
+to valid regulator nodes:
+
+- vqmmc-supply: supply node for IO line power
+- vmmc-supply: supply node for card's power
+
MMC power sequences:
--------------------
@@ -102,11 +113,13 @@ Required host node properties when using function subnodes:
- #size-cells: should be zero.
Required function subnode properties:
-- compatible: name of SDIO function following generic names recommended practice
- reg: Must contain the SDIO function number of the function this subnode
describes. A value of 0 denotes the memory SD function, values from
1 to 7 denote the SDIO functions.
+Optional function subnode properties:
+- compatible: name of SDIO function following generic names recommended practice
+
Examples
--------
diff --git a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
index 4bf41d833804..55cdd804cdba 100644
--- a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
@@ -8,7 +8,12 @@ as the speed of SD standard 3.0.
Absolute maximum transfer rate is 200MB/s
Required properties:
- - compatible : "allwinner,sun4i-a10-mmc" or "allwinner,sun5i-a13-mmc"
+ - compatible : should be one of:
+ * "allwinner,sun4i-a10-mmc"
+ * "allwinner,sun5i-a13-mmc"
+ * "allwinner,sun7i-a20-mmc"
+ * "allwinner,sun9i-a80-mmc"
+ * "allwinner,sun50i-a64-mmc"
- reg : mmc controller base registers
- clocks : a list with 4 phandle + clock specifier pairs
- clock-names : must contain "ahb", "mmc", "output" and "sample"
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index 8636f5ae97e5..4e00e859e885 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -39,6 +39,10 @@ Required Properties:
Optional properties:
+* resets: phandle + reset specifier pair, intended to represent hardware
+ reset signal present internally in some host controller IC designs.
+ See Documentation/devicetree/bindings/reset/reset.txt for details.
+
* clocks: from common clock binding: handle to biu and ciu clocks for the
bus interface unit clock and the card interface unit clock.
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index 0f610d4b5b00..13df9c2399c3 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -23,6 +23,7 @@ Required properties:
"renesas,sdhi-r8a7793" - SDHI IP on R8A7793 SoC
"renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC
"renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC
+ "renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC
Optional properties:
- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
index e41b2d59ca7f..f591ab782dbc 100644
--- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
+++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@ -47,6 +47,9 @@ Optional properties:
Valid values are between 0 to 7, that maps to
273, 589, 899, 1222, 1480, 1806, 2147, 2464 ps
Default value is 2, which corresponds to 899 ps
+- rxlos-gpios: Input gpio from SFP+ module to indicate availability of
+ incoming signal.
+
Example:
menetclk: menetclk {
diff --git a/Documentation/devicetree/bindings/net/brcm,amac.txt b/Documentation/devicetree/bindings/net/brcm,amac.txt
new file mode 100644
index 000000000000..ba5ecc1041a5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/brcm,amac.txt
@@ -0,0 +1,24 @@
+Broadcom AMAC Ethernet Controller Device Tree Bindings
+-------------------------------------------------------------
+
+Required properties:
+ - compatible: "brcm,amac" or "brcm,nsp-amac"
+ - reg: Address and length of the GMAC registers,
+ Address and length of the GMAC IDM registers
+ - reg-names: Names of the registers. Must have both "amac_base" and
+ "idm_base"
+ - interrupts: Interrupt number
+
+Optional properties:
+- mac-address: See ethernet.txt file in the same directory
+
+Examples:
+
+amac0: ethernet@18022000 {
+ compatible = "brcm,nsp-amac";
+ reg = <0x18022000 0x1000>,
+ <0x18110000 0x1000>;
+ reg-names = "amac_base", "idm_base";
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index 30d487597ecb..fb40891ee606 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
@@ -6,9 +6,13 @@ Required properties:
- reg: addresses and length of the register sets for the device, must be 6
pairs of register addresses and lengths
- interrupts: interrupts for the devices, must be two interrupts
+- #address-cells: must be 1, see dsa/dsa.txt
+- #size-cells: must be 0, see dsa/dsa.txt
+
+Deprecated binding required properties:
+
- dsa,mii-bus: phandle to the MDIO bus controller, see dsa/dsa.txt
- dsa,ethernet: phandle to the CPU network interface controller, see dsa/dsa.txt
-- #size-cells: must be 0
- #address-cells: must be 2, see dsa/dsa.txt
Subnodes:
@@ -48,6 +52,45 @@ switch_top@f0b00000 {
ethernet_switch@0 {
compatible = "brcm,bcm7445-switch-v4.0";
#size-cells = <0>;
+ #address-cells = <1>;
+ reg = <0x0 0x40000
+ 0x40000 0x110
+ 0x40340 0x30
+ 0x40380 0x30
+ 0x40400 0x34
+ 0x40600 0x208>;
+ reg-names = "core", "reg", intrl2_0", "intrl2_1",
+ "fcb, "acb";
+ interrupts = <0 0x18 0
+ 0 0x19 0>;
+ brcm,num-gphy = <1>;
+ brcm,num-rgmii-ports = <2>;
+ brcm,fcb-pause-override;
+ brcm,acb-packets-inflight;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ label = "gphy";
+ reg = <0>;
+ };
+ };
+ };
+};
+
+Example using the old DSA DeviceTree binding:
+
+switch_top@f0b00000 {
+ compatible = "simple-bus";
+ #size-cells = <1>;
+ #address-cells = <1>;
+ ranges = <0 0xf0b00000 0x40804>;
+
+ ethernet_switch@0 {
+ compatible = "brcm,bcm7445-switch-v4.0";
+ #size-cells = <0>;
#address-cells = <2>;
reg = <0x0 0x40000
0x40000 0x110
diff --git a/Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt b/Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt
new file mode 100644
index 000000000000..022946caa7e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt
@@ -0,0 +1,24 @@
+Broadcom GMAC Ethernet Controller Device Tree Bindings
+-------------------------------------------------------------
+
+Required properties:
+ - compatible: "brcm,bgmac-nsp"
+ - reg: Address and length of the GMAC registers,
+ Address and length of the GMAC IDM registers
+ - reg-names: Names of the registers. Must have both "gmac_base" and
+ "idm_base"
+ - interrupts: Interrupt number
+
+Optional properties:
+- mac-address: See ethernet.txt file in the same directory
+
+Examples:
+
+gmac0: ethernet@18022000 {
+ compatible = "brcm,bgmac-nsp";
+ reg = <0x18022000 0x1000>,
+ <0x18110000 0x1000>;
+ reg-names = "gmac_base", "idm_base";
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
new file mode 100644
index 000000000000..9c67ee4890d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
@@ -0,0 +1,89 @@
+* Qualcomm Atheros QCA8xxx switch family
+
+Required properties:
+
+- compatible: should be "qca,qca8337"
+- #size-cells: must be 0
+- #address-cells: must be 1
+
+Subnodes:
+
+The integrated switch subnode should be specified according to the binding
+described in dsa/dsa.txt. As the QCA8K switches do not have a N:N mapping of
+port and PHY id, each subnode describing a port needs to have a valid phandle
+referencing the internal PHY connected to it. The CPU port of this switch is
+always port 0.
+
+Example:
+
+
+ &mdio0 {
+ phy_port1: phy@0 {
+ reg = <0>;
+ };
+
+ phy_port2: phy@1 {
+ reg = <1>;
+ };
+
+ phy_port3: phy@2 {
+ reg = <2>;
+ };
+
+ phy_port4: phy@3 {
+ reg = <3>;
+ };
+
+ phy_port5: phy@4 {
+ reg = <4>;
+ };
+
+ switch0@0 {
+ compatible = "qca,qca8337";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&gmac1>;
+ phy-mode = "rgmii";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ phy-handle = <&phy_port1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-handle = <&phy_port2>;
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ phy-handle = <&phy_port3>;
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan4";
+ phy-handle = <&phy_port4>;
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "wan";
+ phy-handle = <&phy_port5>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index 5d88f37480b6..e1d76812419c 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -11,8 +11,8 @@ The following properties are common to the Ethernet controllers:
the maximum frame size (there's contradiction in ePAPR).
- phy-mode: string, operation mode of the PHY interface; supported values are
"mii", "gmii", "sgmii", "qsgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
- "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii"; this is now a de-facto
- standard property;
+ "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii", "trgmii"; this is now a
+ de-facto standard property;
- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
- phy-handle: phandle, specifies a reference to a node representing a PHY
device; this property is described in ePAPR and so preferred;
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index b5a42df4c928..1506e948610c 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -21,6 +21,7 @@ Required properties:
- clock-names: Tuple listing input clock names.
Required elements: 'pclk', 'hclk'
Optional elements: 'tx_clk'
+ Optional elements: 'rx_clk' applies to cdns,zynqmp-gem
- clocks: Phandles to input clocks.
Optional properties for PHY child node:
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 32eaaca04d9b..f09525772369 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -24,14 +24,17 @@ Required properties:
Optional properties:
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
-
+- mediatek,hwlro: the capability if the hardware supports LRO functions
* Ethernet MAC node
Required properties:
- compatible: Should be "mediatek,eth-mac"
- reg: The number of the MAC
-- phy-handle: see ethernet.txt file in the same directory.
+- phy-handle: see ethernet.txt file in the same directory and
+ the phy-mode "trgmii" required being provided when reg
+ is equal to 0 and the MAC uses fixed-link to connect
+ with internal switch such as MT7530.
Example:
@@ -51,6 +54,7 @@ eth: ethernet@1b100000 {
reset-names = "eth";
mediatek,ethsys = <&ethsys>;
mediatek,pctl = <&syscfg_pctl_a>;
+ mediatek,hwlro;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/meson-dwmac.txt b/Documentation/devicetree/bindings/net/meson-dwmac.txt
index ec633d74a8a8..89e62ddc69ca 100644
--- a/Documentation/devicetree/bindings/net/meson-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/meson-dwmac.txt
@@ -1,18 +1,32 @@
* Amlogic Meson DWMAC Ethernet controller
The device inherits all the properties of the dwmac/stmmac devices
-described in the file net/stmmac.txt with the following changes.
+described in the file stmmac.txt in the current directory with the
+following changes.
-Required properties:
+Required properties on all platforms:
-- compatible: should be "amlogic,meson6-dwmac" along with "snps,dwmac"
- and any applicable more detailed version number
- described in net/stmmac.txt
+- compatible: Depending on the platform this should be one of:
+ - "amlogic,meson6-dwmac"
+ - "amlogic,meson8b-dwmac"
+ - "amlogic,meson-gxbb-dwmac"
+ Additionally "snps,dwmac" and any applicable more
+ detailed version number described in net/stmmac.txt
+ should be used.
-- reg: should contain a register range for the dwmac controller and
- another one for the Amlogic specific configuration
+- reg: The first register range should be the one of the DWMAC
+ controller. The second range is is for the Amlogic specific
+ configuration (for example the PRG_ETHERNET register range
+ on Meson8b and newer)
-Example:
+Required properties on Meson8b and newer:
+- clock-names: Should contain the following:
+ - "stmmaceth" - see stmmac.txt
+ - "clkin0" - first parent clock of the internal mux
+ - "clkin1" - second parent clock of the internal mux
+
+
+Example for Meson6:
ethmac: ethernet@c9410000 {
compatible = "amlogic,meson6-dwmac", "snps,dwmac";
@@ -23,3 +37,18 @@ Example:
clocks = <&clk81>;
clock-names = "stmmaceth";
}
+
+Example for GXBB:
+ ethmac: ethernet@c9410000 {
+ compatible = "amlogic,meson-gxbb-dwmac", "snps,dwmac";
+ reg = <0x0 0xc9410000 0x0 0x10000>,
+ <0x0 0xc8834540 0x0 0x8>;
+ interrupts = <0 8 1>;
+ interrupt-names = "macirq";
+ clocks = <&clkc CLKID_ETH>,
+ <&clkc CLKID_FCLK_DIV2>,
+ <&clkc CLKID_MPLL2>;
+ clock-names = "stmmaceth", "clkin0", "clkin1";
+ phy-mode = "rgmii";
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
new file mode 100644
index 000000000000..99c7eb0a00c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
@@ -0,0 +1,58 @@
+* Microsemi - vsc8531 Giga bit ethernet phy
+
+Required properties:
+- compatible : Should contain phy id as "ethernet-phy-idAAAA.BBBB"
+ The PHY device uses the binding described in
+ Documentation/devicetree/bindings/net/phy.txt
+
+Optional properties:
+- vsc8531,vddmac : The vddmac in mV.
+- vsc8531,edge-slowdown : % the edge should be slowed down relative to
+ the fastest possible edge time. Native sign
+ need not enter.
+ Edge rate sets the drive strength of the MAC
+ interface output signals. Changing the drive
+ strength will affect the edge rate of the output
+ signal. The goal of this setting is to help
+ reduce electrical emission (EMI) by being able
+ to reprogram drive strength and in effect slow
+ down the edge rate if desired. Table 1 shows the
+ impact to the edge rate per VDDMAC supply for each
+ drive strength setting.
+ Ref: Table:1 - Edge rate change below.
+
+Note: see dt-bindings/net/mscc-phy-vsc8531.h for applicable values
+
+Table: 1 - Edge rate change
+----------------------------------------------------------------|
+| Edge Rate Change (VDDMAC) |
+| |
+| 3300 mV 2500 mV 1800 mV 1500 mV |
+|---------------------------------------------------------------|
+| Default Deafult Default Default |
+| (Fastest) (recommended) (recommended) |
+|---------------------------------------------------------------|
+| -2% -3% -5% -6% |
+|---------------------------------------------------------------|
+| -4% -6% -9% -14% |
+|---------------------------------------------------------------|
+| -7% -10% -16% -21% |
+|(recommended) (recommended) |
+|---------------------------------------------------------------|
+| -10% -14% -23% -29% |
+|---------------------------------------------------------------|
+| -17% -23% -35% -42% |
+|---------------------------------------------------------------|
+| -29% -37% -52% -58% |
+|---------------------------------------------------------------|
+| -53% -63% -76% -77% |
+| (slowest) |
+|---------------------------------------------------------------|
+
+Example:
+
+ vsc8531_0: ethernet-phy@0 {
+ compatible = "ethernet-phy-id0007.0570";
+ vsc8531,vddmac = <3300>;
+ vsc8531,edge-slowdown = <21>;
+ };
diff --git a/Documentation/devicetree/bindings/net/qcom-emac.txt b/Documentation/devicetree/bindings/net/qcom-emac.txt
new file mode 100644
index 000000000000..346e6c7f47b7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom-emac.txt
@@ -0,0 +1,111 @@
+Qualcomm Technologies EMAC Gigabit Ethernet Controller
+
+This network controller consists of two devices: a MAC and an SGMII
+internal PHY. Each device is represented by a device tree node. A phandle
+connects the MAC node to its corresponding internal phy node. Another
+phandle points to the external PHY node.
+
+Required properties:
+
+MAC node:
+- compatible : Should be "qcom,fsm9900-emac".
+- reg : Offset and length of the register regions for the device
+- interrupts : Interrupt number used by this controller
+- mac-address : The 6-byte MAC address. If present, it is the default
+ MAC address.
+- internal-phy : phandle to the internal PHY node
+- phy-handle : phandle the the external PHY node
+
+Internal PHY node:
+- compatible : Should be "qcom,fsm9900-emac-sgmii" or "qcom,qdf2432-emac-sgmii".
+- reg : Offset and length of the register region(s) for the device
+- interrupts : Interrupt number used by this controller
+
+The external phy child node:
+- reg : The phy address
+
+Example:
+
+FSM9900:
+
+soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ emac0: ethernet@feb20000 {
+ compatible = "qcom,fsm9900-emac";
+ reg = <0xfeb20000 0x10000>,
+ <0xfeb36000 0x1000>;
+ interrupts = <76>;
+
+ clocks = <&gcc 0>, <&gcc 1>, <&gcc 3>, <&gcc 4>, <&gcc 5>,
+ <&gcc 6>, <&gcc 7>;
+ clock-names = "axi_clk", "cfg_ahb_clk", "high_speed_clk",
+ "mdio_clk", "tx_clk", "rx_clk", "sys_clk";
+
+ internal-phy = <&emac_sgmii>;
+
+ phy-handle = <&phy0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mdio_pins_a>;
+ };
+
+ emac_sgmii: ethernet@feb38000 {
+ compatible = "qcom,fsm9900-emac-sgmii";
+ reg = <0xfeb38000 0x1000>;
+ interrupts = <80>;
+ };
+
+ tlmm: pinctrl@fd510000 {
+ compatible = "qcom,fsm9900-pinctrl";
+
+ mdio_pins_a: mdio {
+ state {
+ pins = "gpio123", "gpio124";
+ function = "mdio";
+ };
+ };
+ };
+
+
+QDF2432:
+
+soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ emac0: ethernet@38800000 {
+ compatible = "qcom,fsm9900-emac";
+ reg = <0x0 0x38800000 0x0 0x10000>,
+ <0x0 0x38816000 0x0 0x1000>;
+ interrupts = <0 256 4>;
+
+ clocks = <&gcc 0>, <&gcc 1>, <&gcc 3>, <&gcc 4>, <&gcc 5>,
+ <&gcc 6>, <&gcc 7>;
+ clock-names = "axi_clk", "cfg_ahb_clk", "high_speed_clk",
+ "mdio_clk", "tx_clk", "rx_clk", "sys_clk";
+
+ internal-phy = <&emac_sgmii>;
+
+ phy-handle = <&phy0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@4 {
+ reg = <4>;
+ };
+ };
+
+ emac_sgmii: ethernet@410400 {
+ compatible = "qcom,qdf2432-emac-sgmii";
+ reg = <0x0 0x00410400 0x0 0xc00>, /* Base address */
+ <0x0 0x00410000 0x0 0x400>; /* Per-lane digital */
+ interrupts = <0 254 1>;
+ };
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
index cccd945fc45b..95383c5131fc 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
@@ -3,8 +3,12 @@ Rockchip SoC RK3288 10/100/1000 Ethernet driver(GMAC)
The device node has following properties.
Required properties:
- - compatible: Can be one of "rockchip,rk3228-gmac", "rockchip,rk3288-gmac",
- "rockchip,rk3368-gmac"
+ - compatible: should be "rockchip,<name>-gamc"
+ "rockchip,rk3228-gmac": found on RK322x SoCs
+ "rockchip,rk3288-gmac": found on RK3288 SoCs
+ "rockchip,rk3366-gmac": found on RK3366 SoCs
+ "rockchip,rk3368-gmac": found on RK3368 SoCs
+ "rockchip,rk3399-gmac": found on RK3399 SoCs
- reg: addresses and length of the register sets for the device.
- interrupts: Should contain the GMAC interrupts.
- interrupt-names: Should contain the interrupt names "macirq".
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt
index 2f6ec85fda8e..0115c85a2425 100644
--- a/Documentation/devicetree/bindings/net/sh_eth.txt
+++ b/Documentation/devicetree/bindings/net/sh_eth.txt
@@ -5,6 +5,8 @@ interface contains.
Required properties:
- compatible: "renesas,gether-r8a7740" if the device is a part of R8A7740 SoC.
+ "renesas,ether-r8a7743" if the device is a part of R8A7743 SoC.
+ "renesas,ether-r8a7745" if the device is a part of R8A7745 SoC.
"renesas,ether-r8a7778" if the device is a part of R8A7778 SoC.
"renesas,ether-r8a7779" if the device is a part of R8A7779 SoC.
"renesas,ether-r8a7790" if the device is a part of R8A7790 SoC.
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
index 3fed3c124411..16c3a9501f5d 100644
--- a/Documentation/devicetree/bindings/net/smsc911x.txt
+++ b/Documentation/devicetree/bindings/net/smsc911x.txt
@@ -3,9 +3,11 @@
Required properties:
- compatible : Should be "smsc,lan<model>", "smsc,lan9115"
- reg : Address and length of the io space for SMSC LAN
-- interrupts : Should contain SMSC LAN interrupt line
-- interrupt-parent : Should be the phandle for the interrupt controller
- that services interrupts for this device
+- interrupts : one or two interrupt specifiers
+ - The first interrupt is the SMSC LAN interrupt line
+ - The second interrupt (if present) is the PME (power
+ management event) interrupt that is able to wake up the host
+ system with a 50ms pulse on network activity
- phy-mode : See ethernet.txt file in the same directory
Optional properties:
@@ -21,6 +23,10 @@ Optional properties:
external PHY
- smsc,save-mac-address : Indicates that mac address needs to be saved
before resetting the controller
+- reset-gpios : a GPIO line connected to the RESET (active low) signal
+ of the device. On many systems this is wired high so the device goes
+ out of reset at power-on, but if it is under program control, this
+ optional GPIO can wake up in response to it.
Examples:
@@ -29,7 +35,8 @@ lan9220@f4000000 {
reg = <0xf4000000 0x2000000>;
phy-mode = "mii";
interrupt-parent = <&gpio1>;
- interrupts = <31>;
+ interrupts = <31>, <32>;
+ reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
reg-io-width = <4>;
smsc,irq-push-pull;
};
diff --git a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
index 51f8d2eba8d8..d93f71ce8346 100644
--- a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
+++ b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
@@ -1,21 +1,111 @@
* Synopsys DWC Ethernet QoS IP version 4.10 driver (GMAC)
+This binding supports the Synopsys Designware Ethernet QoS (Quality Of Service)
+IP block. The IP supports multiple options for bus type, clocking and reset
+structure, and feature list. Consequently, a number of properties and list
+entries in properties are marked as optional, or only required in specific HW
+configurations.
Required properties:
-- compatible: Should be "snps,dwc-qos-ethernet-4.10"
+- compatible: One of:
+ - "axis,artpec6-eqos", "snps,dwc-qos-ethernet-4.10"
+ Represents the IP core when integrated into the Axis ARTPEC-6 SoC.
+ - "nvidia,tegra186-eqos", "snps,dwc-qos-ethernet-4.10"
+ Represents the IP core when integrated into the NVIDIA Tegra186 SoC.
+ - "snps,dwc-qos-ethernet-4.10"
+ This combination is deprecated. It should be treated as equivalent to
+ "axis,artpec6-eqos", "snps,dwc-qos-ethernet-4.10". It is supported to be
+ compatible with earlier revisions of this binding.
- reg: Address and length of the register set for the device
-- clocks: Phandles to the reference clock and the bus clock
-- clock-names: Should be "phy_ref_clk" for the reference clock and "apb_pclk"
- for the bus clock.
+- clocks: Phandle and clock specifiers for each entry in clock-names, in the
+ same order. See ../clock/clock-bindings.txt.
+- clock-names: May contain any/all of the following depending on the IP
+ configuration, in any order:
+ - "tx"
+ The EQOS transmit path clock. The HW signal name is clk_tx_i.
+ In some configurations (e.g. GMII/RGMII), this clock also drives the PHY TX
+ path. In other configurations, other clocks (such as tx_125, rmii) may
+ drive the PHY TX path.
+ - "rx"
+ The EQOS receive path clock. The HW signal name is clk_rx_i.
+ In some configurations (e.g. GMII/RGMII), this clock is derived from the
+ PHY's RX clock output. In other configurations, other clocks (such as
+ rx_125, rmii) may drive the EQOS RX path.
+ In cases where the PHY clock is directly fed into the EQOS receive path
+ without intervening logic, the DT need not represent this clock, since it
+ is assumed to be fully under the control of the PHY device/driver. In
+ cases where SoC integration adds additional logic to this path, such as a
+ SW-controlled clock gate, this clock should be represented in DT.
+ - "slave_bus"
+ The CPU/slave-bus (CSR) interface clock. This applies to any bus type;
+ APB, AHB, AXI, etc. The HW signal name is hclk_i (AHB) or clk_csr_i (other
+ buses).
+ - "master_bus"
+ The master bus interface clock. Only required in configurations that use a
+ separate clock for the master and slave bus interfaces. The HW signal name
+ is hclk_i (AHB) or aclk_i (AXI).
+ - "ptp_ref"
+ The PTP reference clock. The HW signal name is clk_ptp_ref_i.
+ - "phy_ref_clk"
+ This clock is deprecated and should not be used by new compatible values.
+ It is equivalent to "tx".
+ - "apb_pclk"
+ This clock is deprecated and should not be used by new compatible values.
+ It is equivalent to "slave_bus".
+
+ Note: Support for additional IP configurations may require adding the
+ following clocks to this list in the future: clk_rx_125_i, clk_tx_125_i,
+ clk_pmarx_0_i, clk_pmarx1_i, clk_rmii_i, clk_revmii_rx_i, clk_revmii_tx_i.
+ Configurations exist where multiple similar clocks are used at once, e.g. all
+ of clk_rx_125_i, clk_pmarx_0_i, clk_pmarx1_i. For this reason it is best to
+ extend the binding with a separate clock-names entry for each of those RX
+ clocks, rather than repurposing the existing "rx" clock-names entry as a
+ generic/logical clock in a similar fashion to "master_bus" and "slave_bus".
+ This will allow easy support for configurations that support multiple PHY
+ interfaces using a mux, and hence need to have explicit control over
+ specific RX clocks.
+
+ The following compatible values require the following set of clocks:
+ - "nvidia,tegra186-eqos", "snps,dwc-qos-ethernet-4.10":
+ - "slave_bus"
+ - "master_bus"
+ - "rx"
+ - "tx"
+ - "ptp_ref"
+ - "axis,artpec6-eqos", "snps,dwc-qos-ethernet-4.10":
+ - "slave_bus"
+ - "master_bus"
+ - "tx"
+ - "ptp_ref"
+ - "snps,dwc-qos-ethernet-4.10" (deprecated):
+ - "phy_ref_clk"
+ - "apb_clk"
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
- interrupts: Should contain the core's combined interrupt signal
- phy-mode: See ethernet.txt file in the same directory
+- resets: Phandle and reset specifiers for each entry in reset-names, in the
+ same order. See ../reset/reset.txt.
+- reset-names: May contain any/all of the following depending on the IP
+ configuration, in any order:
+ - "eqos". The reset to the entire module. The HW signal name is hreset_n
+ (AHB) or aresetn_i (AXI).
+
+ The following compatible values require the following set of resets:
+ (the reset properties may be omitted if empty)
+ - "nvidia,tegra186-eqos", "snps,dwc-qos-ethernet-4.10":
+ - "eqos".
+ - "axis,artpec6-eqos", "snps,dwc-qos-ethernet-4.10":
+ - None.
+ - "snps,dwc-qos-ethernet-4.10" (deprecated):
+ - None.
Optional properties:
- dma-coherent: Present if dma operations are coherent
- mac-address: See ethernet.txt in the same directory
- local-mac-address: See ethernet.txt in the same directory
+- phy-reset-gpios: Phandle and specifier for any GPIO used to reset the PHY.
+ See ../gpio/gpio.txt.
- snps,en-lpi: If present it enables use of the AXI low-power interface
- snps,write-requests: Number of write requests that the AXI port can issue.
It depends on the SoC configuration.
@@ -52,6 +142,7 @@ ethernet2@40010000 {
reg = <0x40010000 0x4000>;
phy-handle = <&phy2>;
phy-mode = "gmii";
+ phy-reset-gpios = <&gpioctlr 43 GPIO_ACTIVE_LOW>;
snps,en-tx-lpi-clockgating;
snps,en-lpi;
diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.txt b/Documentation/devicetree/bindings/net/stm32-dwmac.txt
new file mode 100644
index 000000000000..c35afb7e956a
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/stm32-dwmac.txt
@@ -0,0 +1,32 @@
+STMicroelectronics STM32 / MCU DWMAC glue layer controller
+
+This file documents platform glue layer for stmmac.
+Please see stmmac.txt for the other unchanged properties.
+
+The device node has following properties.
+
+Required properties:
+- compatible: Should be "st,stm32-dwmac" to select glue, and
+ "snps,dwmac-3.50a" to select IP version.
+- clocks: Must contain a phandle for each entry in clock-names.
+- clock-names: Should be "stmmaceth" for the host clock.
+ Should be "mac-clk-tx" for the MAC TX clock.
+ Should be "mac-clk-rx" for the MAC RX clock.
+- st,syscon : Should be phandle/offset pair. The phandle to the syscon node which
+ encompases the glue register, and the offset of the control register.
+Example:
+
+ ethernet@40028000 {
+ compatible = "st,stm32-dwmac", "snps,dwmac-3.50a";
+ status = "disabled";
+ reg = <0x40028000 0x8000>;
+ reg-names = "stmmaceth";
+ interrupts = <0 61 0>, <0 62 0>;
+ interrupt-names = "macirq", "eth_wake_irq";
+ clock-names = "stmmaceth", "mac-clk-tx", "mac-clk-rx";
+ clocks = <&rcc 0 25>, <&rcc 0 26>, <&rcc 0 27>;
+ st,syscon = <&syscfg 0x4>;
+ snps,pbl = <8>;
+ snps,mixed-burst;
+ dma-ranges;
+ };
diff --git a/Documentation/devicetree/bindings/net/wireless/esp,esp8089.txt b/Documentation/devicetree/bindings/net/wireless/esp,esp8089.txt
new file mode 100644
index 000000000000..19331bb4ff6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/esp,esp8089.txt
@@ -0,0 +1,31 @@
+Espressif ESP8089 wireless SDIO devices
+
+This node provides properties for controlling the ESP8089 wireless device.
+The node is expected to be specified as a child node to the SDIO controller
+that connects the device to the system.
+
+Required properties:
+
+ - compatible : Should be "esp,esp8089".
+
+Optional properties:
+ - esp,crystal-26M-en: Integer value for the crystal_26M_en firmware parameter
+
+Example:
+
+&mmc1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vmmc-supply = <&reg_dldo1>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ esp8089: sdio_wifi@1 {
+ compatible = "esp,esp8089";
+ reg = <1>;
+ esp,crystal-26M-en = <2>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/xilinx_gmii2rgmii.txt b/Documentation/devicetree/bindings/net/xilinx_gmii2rgmii.txt
new file mode 100644
index 000000000000..038dda48b8e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_gmii2rgmii.txt
@@ -0,0 +1,35 @@
+XILINX GMIITORGMII Converter Driver Device Tree Bindings
+--------------------------------------------------------
+
+The Gigabit Media Independent Interface (GMII) to Reduced Gigabit Media
+Independent Interface (RGMII) core provides the RGMII between RGMII-compliant
+Ethernet physical media devices (PHY) and the Gigabit Ethernet controller.
+This core can be used in all three modes of operation(10/100/1000 Mb/s).
+The Management Data Input/Output (MDIO) interface is used to configure the
+Speed of operation. This core can switch dynamically between the three
+Different speed modes by configuring the conveter register through mdio write.
+
+This converter sits between the ethernet MAC and the external phy.
+MAC <==> GMII2RGMII <==> RGMII_PHY
+
+For more details about mdio please refer phy.txt file in the same directory.
+
+Required properties:
+- compatible : Should be "xlnx,gmii-to-rgmii-1.0"
+- reg : The ID number for the phy, usually a small integer
+- phy-handle : Should point to the external phy device.
+ See ethernet.txt file in the same directory.
+
+Example:
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy: ethernet-phy@0 {
+ ......
+ };
+ gmiitorgmii: gmiitorgmii@8 {
+ compatible = "xlnx,gmii-to-rgmii-1.0";
+ reg = <8>;
+ phy-handle = <&phy>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt b/Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt
new file mode 100644
index 000000000000..fafd85bd67a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt
@@ -0,0 +1,39 @@
+= Amlogic eFuse device tree bindings =
+
+Required properties:
+- compatible: should be "amlogic,meson-gxbb-efuse"
+
+= Data cells =
+Are child nodes of eFuse, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+Example:
+
+ efuse: efuse {
+ compatible = "amlogic,meson-gxbb-efuse";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sn: sn@14 {
+ reg = <0x14 0x10>;
+ };
+
+ eth_mac: eth_mac@34 {
+ reg = <0x34 0x10>;
+ };
+
+ bid: bid@46 {
+ reg = <0x46 0x30>;
+ };
+ };
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+For example:
+
+ eth_mac {
+ ...
+ nvmem-cells = <&eth_mac>;
+ nvmem-cell-names = "eth_mac";
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
index 8f86ab3b1046..94aeeeabadd5 100644
--- a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
@@ -1,11 +1,20 @@
= Rockchip eFuse device tree bindings =
Required properties:
-- compatible: Should be "rockchip,rockchip-efuse"
+- compatible: Should be one of the following.
+ - "rockchip,rk3066a-efuse" - for RK3066a SoCs.
+ - "rockchip,rk3188-efuse" - for RK3188 SoCs.
+ - "rockchip,rk3288-efuse" - for RK3288 SoCs.
+ - "rockchip,rk3399-efuse" - for RK3399 SoCs.
- reg: Should contain the registers location and exact eFuse size
- clocks: Should be the clock id of eFuse
- clock-names: Should be "pclk_efuse"
+Deprecated properties:
+- compatible: "rockchip,rockchip-efuse"
+ Old efuse compatible value compatible to rk3066a, rk3188 and rk3288
+ efuses
+
= Data cells =
Are child nodes of eFuse, bindings of which as described in
bindings/nvmem/nvmem.txt
@@ -13,7 +22,7 @@ bindings/nvmem/nvmem.txt
Example:
efuse: efuse@ffb40000 {
- compatible = "rockchip,rockchip-efuse";
+ compatible = "rockchip,rk3288-efuse";
reg = <0xffb40000 0x20>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt
index 330a45b5f0b5..5ecaea1e6eee 100644
--- a/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt
@@ -24,16 +24,17 @@ Example:
compatible = "axis,artpec6-pcie", "snps,dw-pcie";
reg = <0xf8050000 0x2000
0xf8040000 0x1000
- 0xc0000000 0x1000>;
+ 0xc0000000 0x2000>;
reg-names = "dbi", "phy", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
/* downstream I/O */
- ranges = <0x81000000 0 0x00010000 0xc0010000 0 0x00010000
+ ranges = <0x81000000 0 0 0xc0002000 0 0x00010000
/* non-prefetchable memory */
- 0x82000000 0 0xc0020000 0xc0020000 0 0x1ffe0000>;
+ 0x82000000 0 0xc0012000 0xc0012000 0 0x1ffee000>;
num-lanes = <2>;
+ bus-range = <0x00 0xff>;
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
#interrupt-cells = <1>;
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index 6c5322c55411..1392c705ceca 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -17,6 +17,8 @@ Required properties:
- num-lanes: number of lanes to use
Optional properties:
+- num-viewport: number of view ports configured in hardware. If a platform
+ does not specify it, the driver assumes 2.
- num-lanes: number of lanes to use (this property should be specified unless
the link is brought already up in BIOS)
- reset-gpio: gpio pin number of power good signal
@@ -44,4 +46,5 @@ Example configuration:
interrupts = <25>, <24>;
#interrupt-cells = <1>;
num-lanes = <1>;
+ num-viewport = <3>;
};
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
new file mode 100644
index 000000000000..ba67b39939c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
@@ -0,0 +1,106 @@
+* Rockchip AXI PCIe Root Port Bridge DT description
+
+Required properties:
+- #address-cells: Address representation for root ports, set to <3>
+- #size-cells: Size representation for root ports, set to <2>
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- compatible: Should contain "rockchip,rk3399-pcie"
+- reg: Two register ranges as listed in the reg-names property
+- reg-names: Must include the following names
+ - "axi-base"
+ - "apb-base"
+- clocks: Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+ - "aclk"
+ - "aclk-perf"
+ - "hclk"
+ - "pm"
+- msi-map: Maps a Requester ID to an MSI controller and associated
+ msi-specifier data. See ./pci-msi.txt
+- phys: From PHY bindings: Phandle for the Generic PHY for PCIe.
+- phy-names: MUST be "pcie-phy".
+- interrupts: Three interrupt entries must be specified.
+- interrupt-names: Must include the following names
+ - "sys"
+ - "legacy"
+ - "client"
+- resets: Must contain five entries for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names: Must include the following names
+ - "core"
+ - "mgmt"
+ - "mgmt-sticky"
+ - "pipe"
+- pinctrl-names : The pin control state names
+- pinctrl-0: The "default" pinctrl state
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- interrupt-map-mask and interrupt-map: standard PCI properties
+
+Optional Property:
+- ep-gpios: contain the entry for pre-reset gpio
+- num-lanes: number of lanes to use
+- vpcie3v3-supply: The phandle to the 3.3v regulator to use for PCIe.
+- vpcie1v8-supply: The phandle to the 1.8v regulator to use for PCIe.
+- vpcie0v9-supply: The phandle to the 0.9v regulator to use for PCIe.
+
+*Interrupt controller child node*
+The core controller provides a single interrupt for legacy INTx. The PCIe node
+should contain an interrupt controller node as a target for the PCI
+'interrupt-map' property. This node represents the domain at which the four
+INTx interrupts are decoded and routed.
+
+
+Required properties for Interrupt controller child node:
+- interrupt-controller: identifies the node as an interrupt controller
+- #address-cells: specifies the number of cells needed to encode an
+ address. The value must be 0.
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+
+Example:
+
+pcie0: pcie@f8000000 {
+ compatible = "rockchip,rk3399-pcie";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
+ <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
+ clock-names = "aclk", "aclk-perf",
+ "hclk", "pm";
+ bus-range = <0x0 0x1>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "sys", "legacy", "client";
+ assigned-clocks = <&cru SCLK_PCIEPHY_REF>;
+ assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>;
+ assigned-clock-rates = <100000000>;
+ ep-gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
+ ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
+ 0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
+ num-lanes = <4>;
+ msi-map = <0x0 &its 0x0 0x1000>;
+ reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
+ reg-names = "axi-base", "apb-base";
+ resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+ phys = <&pcie_phy>;
+ phy-names = "pcie-phy";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_clkreq>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie0_intc 0>,
+ <0 0 0 2 &pcie0_intc 1>,
+ <0 0 0 3 &pcie0_intc 2>,
+ <0 0 0 4 &pcie0_intc 3>;
+ pcie0_intc: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
index 337fc97d18c9..3259798a1192 100644
--- a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
@@ -55,9 +55,10 @@ nwl_pcie: pcie@fd0e0000 {
msi-parent = <&nwl_pcie>;
reg = <0x0 0xfd0e0000 0x0 0x1000>,
<0x0 0xfd480000 0x0 0x1000>,
- <0x0 0xe0000000 0x0 0x1000000>;
+ <0x80 0x00000000 0x0 0x1000000>;
reg-names = "breg", "pcireg", "cfg";
- ranges = <0x02000000 0x00000000 0xe1000000 0x00000000 0xe1000000 0 0x0f000000>;
+ ranges = <0x02000000 0x00000000 0xe0000000 0x00000000 0xe0000000 0x00000000 0x10000000 /* non-prefetchable memory */
+ 0x43000000 0x00000006 0x00000000 0x00000006 0x00000000 0x00000002 0x00000000>;/* prefetchable memory */
pcie_intc: legacy-interrupt-controller {
interrupt-controller;
diff --git a/Documentation/devicetree/bindings/perf/apm-xgene-pmu.txt b/Documentation/devicetree/bindings/perf/apm-xgene-pmu.txt
new file mode 100644
index 000000000000..afb11cf693c0
--- /dev/null
+++ b/Documentation/devicetree/bindings/perf/apm-xgene-pmu.txt
@@ -0,0 +1,112 @@
+* APM X-Gene SoC PMU bindings
+
+This is APM X-Gene SoC PMU (Performance Monitoring Unit) module.
+The following PMU devices are supported:
+
+ L3C - L3 cache controller
+ IOB - IO bridge
+ MCB - Memory controller bridge
+ MC - Memory controller
+
+The following section describes the SoC PMU DT node binding.
+
+Required properties:
+- compatible : Shall be "apm,xgene-pmu" for revision 1 or
+ "apm,xgene-pmu-v2" for revision 2.
+- regmap-csw : Regmap of the CPU switch fabric (CSW) resource.
+- regmap-mcba : Regmap of the MCB-A (memory bridge) resource.
+- regmap-mcbb : Regmap of the MCB-B (memory bridge) resource.
+- reg : First resource shall be the CPU bus PMU resource.
+- interrupts : Interrupt-specifier for PMU IRQ.
+
+Required properties for L3C subnode:
+- compatible : Shall be "apm,xgene-pmu-l3c".
+- reg : First resource shall be the L3C PMU resource.
+
+Required properties for IOB subnode:
+- compatible : Shall be "apm,xgene-pmu-iob".
+- reg : First resource shall be the IOB PMU resource.
+
+Required properties for MCB subnode:
+- compatible : Shall be "apm,xgene-pmu-mcb".
+- reg : First resource shall be the MCB PMU resource.
+- enable-bit-index : The bit indicates if the according MCB is enabled.
+
+Required properties for MC subnode:
+- compatible : Shall be "apm,xgene-pmu-mc".
+- reg : First resource shall be the MC PMU resource.
+- enable-bit-index : The bit indicates if the according MC is enabled.
+
+Example:
+ csw: csw@7e200000 {
+ compatible = "apm,xgene-csw", "syscon";
+ reg = <0x0 0x7e200000 0x0 0x1000>;
+ };
+
+ mcba: mcba@7e700000 {
+ compatible = "apm,xgene-mcb", "syscon";
+ reg = <0x0 0x7e700000 0x0 0x1000>;
+ };
+
+ mcbb: mcbb@7e720000 {
+ compatible = "apm,xgene-mcb", "syscon";
+ reg = <0x0 0x7e720000 0x0 0x1000>;
+ };
+
+ pmu: pmu@78810000 {
+ compatible = "apm,xgene-pmu-v2";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ regmap-csw = <&csw>;
+ regmap-mcba = <&mcba>;
+ regmap-mcbb = <&mcbb>;
+ reg = <0x0 0x78810000 0x0 0x1000>;
+ interrupts = <0x0 0x22 0x4>;
+
+ pmul3c@7e610000 {
+ compatible = "apm,xgene-pmu-l3c";
+ reg = <0x0 0x7e610000 0x0 0x1000>;
+ };
+
+ pmuiob@7e940000 {
+ compatible = "apm,xgene-pmu-iob";
+ reg = <0x0 0x7e940000 0x0 0x1000>;
+ };
+
+ pmucmcb@7e710000 {
+ compatible = "apm,xgene-pmu-mcb";
+ reg = <0x0 0x7e710000 0x0 0x1000>;
+ enable-bit-index = <0>;
+ };
+
+ pmucmcb@7e730000 {
+ compatible = "apm,xgene-pmu-mcb";
+ reg = <0x0 0x7e730000 0x0 0x1000>;
+ enable-bit-index = <1>;
+ };
+
+ pmucmc@7e810000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e810000 0x0 0x1000>;
+ enable-bit-index = <0>;
+ };
+
+ pmucmc@7e850000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e850000 0x0 0x1000>;
+ enable-bit-index = <1>;
+ };
+
+ pmucmc@7e890000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e890000 0x0 0x1000>;
+ enable-bit-index = <2>;
+ };
+
+ pmucmc@7e8d0000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e8d0000 0x0 0x1000>;
+ enable-bit-index = <3>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.txt b/Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.txt
new file mode 100644
index 000000000000..09aeba94538d
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.txt
@@ -0,0 +1,23 @@
+Driver for Broadcom Northstar USB 3.0 PHY
+
+Required properties:
+
+- compatible: one of: "brcm,ns-ax-usb3-phy", "brcm,ns-bx-usb3-phy".
+- reg: register mappings for DMP (Device Management Plugin) and ChipCommon B
+ MMI.
+- reg-names: "dmp" and "ccb-mii"
+
+Initialization of USB 3.0 PHY depends on Northstar version. There are currently
+three known series: Ax, Bx and Cx.
+Known A0: BCM4707 rev 0
+Known B0: BCM4707 rev 4, BCM53573 rev 2
+Known B1: BCM4707 rev 6
+Known C0: BCM47094 rev 0
+
+Example:
+ usb3-phy {
+ compatible = "brcm,ns-ax-usb3-phy";
+ reg = <0x18105000 0x1000>, <0x18003000 0x1000>;
+ reg-names = "dmp", "ccb-mii";
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/meson-usb2-phy.txt b/Documentation/devicetree/bindings/phy/meson-usb2-phy.txt
new file mode 100644
index 000000000000..9da5ea234154
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/meson-usb2-phy.txt
@@ -0,0 +1,27 @@
+* Amlogic USB2 PHY
+
+Required properties:
+- compatible: Depending on the platform this should be one of:
+ "amlogic,meson8b-usb2-phy"
+ "amlogic,meson-gxbb-usb2-phy"
+- reg: The base address and length of the registers
+- #phys-cells: should be 0 (see phy-bindings.txt in this directory)
+- clocks: phandle and clock identifier for the phy clocks
+- clock-names: "usb_general" and "usb"
+
+Optional properties:
+- resets: reference to the reset controller
+- phy-supply: see phy-bindings.txt in this directory
+
+
+Example:
+
+usb0_phy: usb_phy@0 {
+ compatible = "amlogic,meson-gxbb-usb2-phy";
+ #phy-cells = <0>;
+ reg = <0x0 0x0 0x0 0x20>;
+ resets = <&reset RESET_USB_OTG>;
+ clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB0>;
+ clock-names = "usb_general", "usb";
+ phy-supply = <&usb_vbus>;
+};
diff --git a/Documentation/devicetree/bindings/phy/mxs-usb-phy.txt b/Documentation/devicetree/bindings/phy/mxs-usb-phy.txt
index 379b84a567cc..1d25b04cd05e 100644
--- a/Documentation/devicetree/bindings/phy/mxs-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/mxs-usb-phy.txt
@@ -12,6 +12,16 @@ Required properties:
- interrupts: Should contain phy interrupt
- fsl,anatop: phandle for anatop register, it is only for imx6 SoC series
+Optional properties:
+- fsl,tx-cal-45-dn-ohms: Integer [30-55]. Resistance (in ohms) of switchable
+ high-speed trimming resistor connected in parallel with the 45 ohm resistor
+ that terminates the DN output signal. Default: 45
+- fsl,tx-cal-45-dp-ohms: Integer [30-55]. Resistance (in ohms) of switchable
+ high-speed trimming resistor connected in parallel with the 45 ohm resistor
+ that terminates the DP output signal. Default: 45
+- fsl,tx-d-cal: Integer [79-119]. Current trimming value (as a percentage) of
+ the 17.78mA TX reference current. Default: 100
+
Example:
usbphy1: usbphy@020c9000 {
compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
new file mode 100644
index 000000000000..3c29c77a7018
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
@@ -0,0 +1,64 @@
+ROCKCHIP USB2.0 PHY WITH INNO IP BLOCK
+
+Required properties (phy (parent) node):
+ - compatible : should be one of the listed compatibles:
+ * "rockchip,rk3366-usb2phy"
+ * "rockchip,rk3399-usb2phy"
+ - reg : the address offset of grf for usb-phy configuration.
+ - #clock-cells : should be 0.
+ - clock-output-names : specify the 480m output clock name.
+
+Optional properties:
+ - clocks : phandle + phy specifier pair, for the input clock of phy.
+ - clock-names : input clock name of phy, must be "phyclk".
+
+Required nodes : a sub-node is required for each port the phy provides.
+ The sub-node name is used to identify host or otg port,
+ and shall be the following entries:
+ * "otg-port" : the name of otg port.
+ * "host-port" : the name of host port.
+
+Required properties (port (child) node):
+ - #phy-cells : must be 0. See ./phy-bindings.txt for details.
+ - interrupts : specify an interrupt for each entry in interrupt-names.
+ - interrupt-names : a list which shall be the following entries:
+ * "otg-id" : for the otg id interrupt.
+ * "otg-bvalid" : for the otg vbus interrupt.
+ * "linestate" : for the host/otg linestate interrupt.
+
+Optional properties:
+ - phy-supply : phandle to a regulator that provides power to VBUS.
+ See ./phy-bindings.txt for details.
+
+Example:
+
+grf: syscon@ff770000 {
+ compatible = "rockchip,rk3366-grf", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+...
+
+ u2phy: usb2-phy@700 {
+ compatible = "rockchip,rk3366-usb2phy";
+ reg = <0x700 0x2c>;
+ #clock-cells = <0>;
+ clock-output-names = "sclk_otgphy0_480m";
+
+ u2phy_otg: otg-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "otg-id", "otg-bvalid", "linestate";
+ status = "okay";
+ };
+
+ u2phy_host: host-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "linestate";
+ status = "okay";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt
new file mode 100644
index 000000000000..6ea867e3176f
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt
@@ -0,0 +1,101 @@
+* ROCKCHIP type-c PHY
+---------------------
+
+Required properties:
+ - compatible : must be "rockchip,rk3399-typec-phy"
+ - reg: Address and length of the usb phy control register set
+ - rockchip,grf : phandle to the syscon managing the "general
+ register files"
+ - clocks : phandle + clock specifier for the phy clocks
+ - clock-names : string, clock name, must be "tcpdcore", "tcpdphy-ref";
+ - assigned-clocks: main clock, should be <&cru SCLK_UPHY0_TCPDCORE> or
+ <&cru SCLK_UPHY1_TCPDCORE>;
+ - assigned-clock-rates : the phy core clk frequency, shall be: 50000000
+ - resets : a list of phandle + reset specifier pairs
+ - reset-names : string reset name, must be:
+ "uphy", "uphy-pipe", "uphy-tcphy"
+ - extcon : extcon specifier for the Power Delivery
+
+Note, there are 2 type-c phys for RK3399, and they are almost identical, except
+these registers(description below), every register node contains 3 sections:
+offset, enable bit, write mask bit.
+ - rockchip,typec-conn-dir : the register of type-c connector direction,
+ for type-c phy0, it must be <0xe580 0 16>;
+ for type-c phy1, it must be <0xe58c 0 16>;
+ - rockchip,usb3tousb2-en : the register of type-c force usb3 to usb2 enable
+ control.
+ for type-c phy0, it must be <0xe580 3 19>;
+ for type-c phy1, it must be <0xe58c 3 19>;
+ - rockchip,external-psm : the register of type-c phy external psm clock
+ selection.
+ for type-c phy0, it must be <0xe588 14 30>;
+ for type-c phy1, it must be <0xe594 14 30>;
+ - rockchip,pipe-status : the register of type-c phy pipe status.
+ for type-c phy0, it must be <0xe5c0 0 0>;
+ for type-c phy1, it must be <0xe5c0 16 16>;
+
+Required nodes : a sub-node is required for each port the phy provides.
+ The sub-node name is used to identify dp or usb3 port,
+ and shall be the following entries:
+ * "dp-port" : the name of DP port.
+ * "usb3-port" : the name of USB3 port.
+
+Required properties (port (child) node):
+- #phy-cells : must be 0, See ./phy-bindings.txt for details.
+
+Example:
+ tcphy0: phy@ff7c0000 {
+ compatible = "rockchip,rk3399-typec-phy";
+ reg = <0x0 0xff7c0000 0x0 0x40000>;
+ rockchip,grf = <&grf>;
+ extcon = <&fusb0>;
+ clocks = <&cru SCLK_UPHY0_TCPDCORE>,
+ <&cru SCLK_UPHY0_TCPDPHY_REF>;
+ clock-names = "tcpdcore", "tcpdphy-ref";
+ assigned-clocks = <&cru SCLK_UPHY0_TCPDCORE>;
+ assigned-clock-rates = <50000000>;
+ resets = <&cru SRST_UPHY0>,
+ <&cru SRST_UPHY0_PIPE_L00>,
+ <&cru SRST_P_UPHY0_TCPHY>;
+ reset-names = "uphy", "uphy-pipe", "uphy-tcphy";
+ rockchip,typec-conn-dir = <0xe580 0 16>;
+ rockchip,usb3tousb2-en = <0xe580 3 19>;
+ rockchip,external-psm = <0xe588 14 30>;
+ rockchip,pipe-status = <0xe5c0 0 0>;
+
+ tcphy0_dp: dp-port {
+ #phy-cells = <0>;
+ };
+
+ tcphy0_usb3: usb3-port {
+ #phy-cells = <0>;
+ };
+ };
+
+ tcphy1: phy@ff800000 {
+ compatible = "rockchip,rk3399-typec-phy";
+ reg = <0x0 0xff800000 0x0 0x40000>;
+ rockchip,grf = <&grf>;
+ extcon = <&fusb1>;
+ clocks = <&cru SCLK_UPHY1_TCPDCORE>,
+ <&cru SCLK_UPHY1_TCPDPHY_REF>;
+ clock-names = "tcpdcore", "tcpdphy-ref";
+ assigned-clocks = <&cru SCLK_UPHY1_TCPDCORE>;
+ assigned-clock-rates = <50000000>;
+ resets = <&cru SRST_UPHY1>,
+ <&cru SRST_UPHY1_PIPE_L00>,
+ <&cru SRST_P_UPHY1_TCPHY>;
+ reset-names = "uphy", "uphy-pipe", "uphy-tcphy";
+ rockchip,typec-conn-dir = <0xe58c 0 16>;
+ rockchip,usb3tousb2-en = <0xe58c 3 19>;
+ rockchip,external-psm = <0xe594 14 30>;
+ rockchip,pipe-status = <0xe5c0 16 16>;
+
+ tcphy1_dp: dp-port {
+ #phy-cells = <0>;
+ };
+
+ tcphy1_usb3: usb3-port {
+ #phy-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
index 2281d6cdecb1..ace9cce2704a 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
@@ -6,6 +6,8 @@ This file provides information on what the device node for the R-Car generation
Required properties:
- compatible: "renesas,usb2-phy-r8a7795" if the device is a part of an R8A7795
SoC.
+ "renesas,usb2-phy-r8a7796" if the device is a part of an R8A7796
+ SoC.
"renesas,rcar-gen3-usb2-phy" for a generic R-Car Gen3 compatible device.
When compatible with the generic version, nodes must list the
@@ -30,11 +32,11 @@ Example (R-Car H3):
compatible = "renesas,usb2-phy-r8a7795", "renesas,rcar-gen3-usb2-phy";
reg = <0 0xee080200 0 0x700>;
interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp7_clks R8A7795_CLK_EHCI0>;
+ clocks = <&cpg CPG_MOD 703>;
};
usb-phy@ee0a0200 {
compatible = "renesas,usb2-phy-r8a7795", "renesas,rcar-gen3-usb2-phy";
reg = <0 0xee0a0200 0 0x700>;
- clocks = <&mstp7_clks R8A7795_CLK_EHCI0>;
+ clocks = <&cpg CPG_MOD 702>;
};
diff --git a/Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt
new file mode 100644
index 000000000000..0f6222a672ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt
@@ -0,0 +1,31 @@
+Rockchip PCIE PHY
+-----------------------
+
+Required properties:
+ - compatible: rockchip,rk3399-pcie-phy
+ - #phy-cells: must be 0
+ - clocks: Must contain an entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names: Must be "refclk"
+ - resets: Must contain an entry in reset-names.
+ See ../reset/reset.txt for details.
+ - reset-names: Must be "phy"
+
+Example:
+
+grf: syscon@ff770000 {
+ compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ...
+
+ pcie_phy: pcie-phy {
+ compatible = "rockchip,rk3399-pcie-phy";
+ #phy-cells = <0>;
+ clocks = <&cru SCLK_PCIEPHY_REF>;
+ clock-names = "refclk";
+ resets = <&cru SRST_PCIEPHY>;
+ reset-names = "phy";
+ };
+};
diff --git a/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt
index cc6be9680a6d..57dc388e2fa2 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt
@@ -27,6 +27,9 @@ Optional Properties:
- clocks : phandle + clock specifier for the phy clocks
- clock-names: string, clock name, must be "phyclk"
- #clock-cells: for users of the phy-pll, should be 0
+- reset-names: Only allow the following entries:
+ - phy-reset
+- resets: Must contain an entry for each entry in reset-names.
Example:
diff --git a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt b/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
index 95736d77fbb7..287150db6db4 100644
--- a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
@@ -10,6 +10,7 @@ Required properties:
* allwinner,sun8i-a23-usb-phy
* allwinner,sun8i-a33-usb-phy
* allwinner,sun8i-h3-usb-phy
+ * allwinner,sun50i-a64-usb-phy
- reg : a list of offset + length pairs
- reg-names :
* "phy_ctrl"
diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt
index a3b394587874..cd13e6157088 100644
--- a/Documentation/devicetree/bindings/phy/ti-phy.txt
+++ b/Documentation/devicetree/bindings/phy/ti-phy.txt
@@ -31,6 +31,8 @@ OMAP USB2 PHY
Required properties:
- compatible: Should be "ti,omap-usb2"
+ Should be "ti,dra7x-usb2" for the 1st instance of USB2 PHY on
+ DRA7x
Should be "ti,dra7x-usb2-phy2" for the 2nd instance of USB2 PHY
in DRA7x
- reg : Address and length of the register set for the device.
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index 69617220c5d6..1685821eea41 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -23,6 +23,7 @@ Required properties:
"allwinner,sun8i-h3-pinctrl"
"allwinner,sun8i-h3-r-pinctrl"
"allwinner,sun50i-a64-pinctrl"
+ "nextthing,gr8-pinctrl"
- reg: Should contain the register physical address and length for the
pin controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,orion-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,orion-pinctrl.txt
index 27570a3a1741..ec8aa3c6936b 100644
--- a/Documentation/devicetree/bindings/pinctrl/marvell,orion-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,orion-pinctrl.txt
@@ -4,7 +4,9 @@ Please refer to marvell,mvebu-pinctrl.txt in this directory for common binding
part and usage.
Required properties:
-- compatible: "marvell,88f5181l-pinctrl", "marvell,88f5182-pinctrl",
+- compatible: "marvell,88f5181-pinctrl",
+ "marvell,88f5181l-pinctrl",
+ "marvell,88f5182-pinctrl",
"marvell,88f5281-pinctrl"
- reg: two register areas, the first one describing the first two
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
new file mode 100644
index 000000000000..5e60ad18f147
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
@@ -0,0 +1,65 @@
+Aspeed Pin Controllers
+----------------------
+
+The Aspeed SoCs vary in functionality inside a generation but have a common mux
+device register layout.
+
+Required properties:
+- compatible : Should be any one of the following:
+ "aspeed,ast2400-pinctrl"
+ "aspeed,g4-pinctrl"
+ "aspeed,ast2500-pinctrl"
+ "aspeed,g5-pinctrl"
+
+The pin controller node should be a child of a syscon node with the required
+property:
+- compatible: "syscon", "simple-mfd"
+
+Refer to the the bindings described in
+Documentation/devicetree/bindings/mfd/syscon.txt
+
+Subnode Format
+--------------
+
+The required properties of child nodes are (as defined in pinctrl-bindings):
+- function
+- groups
+
+Each function has only one associated pin group. Each group is named by its
+function. The following values for the function and groups properties are
+supported:
+
+aspeed,ast2400-pinctrl, aspeed,g4-pinctrl:
+
+ACPI BMCINT DDCCLK DDCDAT FLACK FLBUSY FLWP GPID0 GPIE0 GPIE2 GPIE4 GPIE6 I2C10
+I2C11 I2C12 I2C13 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8 I2C9 LPCPD LPCPME LPCSMI MDIO1
+MDIO2 NCTS1 NCTS3 NCTS4 NDCD1 NDCD3 NDCD4 NDSR1 NDSR3 NDTR1 NDTR3 NRI1 NRI3
+NRI4 NRTS1 NRTS3 PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7 RGMII1 RMII1 ROM16
+ROM8 ROMCS1 ROMCS2 ROMCS3 ROMCS4 RXD1 RXD3 RXD4 SD1 SGPMI SIOPBI SIOPBO TIMER3
+TIMER5 TIMER6 TIMER7 TIMER8 TXD1 TXD3 TXD4 UART6 VGAHS VGAVS VPI18 VPI24 VPI30
+VPO12 VPO24
+
+aspeed,ast2500-pinctrl, aspeed,g5-pinctrl:
+
+GPID0 GPID2 GPIE0 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8
+I2C9 MAC1LINK MDIO1 MDIO2 OSCCLK PEWAKE PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7
+RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 TIMER4 TIMER5 TIMER6 TIMER7 TIMER8
+
+Examples:
+
+syscon: scu@1e6e2000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1e6e2000 0x1a8>;
+
+ pinctrl: pinctrl {
+ compatible = "aspeed,g4-pinctrl";
+
+ pinctrl_i2c3_default: i2c3_default {
+ function = "I2C3";
+ groups = "I2C3";
+ };
+ };
+};
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
index 26bcb18f4e60..013c675b5b64 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
@@ -30,8 +30,7 @@ Second type has a dedicated interrupt per gpio bank.
Pin controller node:
Required properties:
-- compatible : should be "st,<SOC>-<pio-block>-pinctrl"
- like st,stih415-sbc-pinctrl, st,stih415-front-pinctrl and so on.
+- compatible : should be "st,stih407-<pio-block>-pinctrl"
- st,syscfg : Should be a phandle of the syscfg node.
- st,retime-pin-mask : Should be mask to specify which pins can be retimed.
If the property is not present, it is assumed that all the pins in the
@@ -50,7 +49,11 @@ Optional properties:
GPIO controller/bank node.
Required properties:
- gpio-controller : Indicates this device is a GPIO controller
-- #gpio-cells : Should be one. The first cell is the pin number.
+- #gpio-cells : Must be two.
+ - First cell: specifies the pin number inside the controller
+ - Second cell: specifies whether the pin is logically inverted.
+ - 0 = active high
+ - 1 = active low
- st,bank-name : Should be a name string for this bank as specified in
datasheet.
@@ -76,23 +79,23 @@ include/dt-bindings/interrupt-controller/irq.h
Example:
pin-controller-sbc {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,stih415-sbc-pinctrl";
- st,syscfg = <&syscfg_sbc>;
- reg = <0xfe61f080 0x4>;
- reg-names = "irqmux";
- interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "irqmux";
- ranges = <0 0xfe610000 0x5000>;
-
- PIO0: gpio@fe610000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,stih407-sbc-pinctrl";
+ st,syscfg = <&syscfg_sbc>;
+ reg = <0x0961f080 0x4>;
+ reg-names = "irqmux";
+ interrupts = <GIC_SPI 188 IRQ_TYPE_NONE>;
+ interrupt-names = "irqmux";
+ ranges = <0 0x09610000 0x6000>;
+
+ pio0: gpio@09610000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
- reg = <0 0x100>;
- st,bank-name = "PIO0";
+ reg = <0x0 0x100>;
+ st,bank-name = "PIO0";
};
...
pin-functions nodes follow...
@@ -162,7 +165,7 @@ pin-controller {
sdhci0:sdhci@fe810000{
...
- interrupt-parent = <&PIO3>;
+ interrupt-parent = <&pio3>;
#interrupt-cells = <2>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */
interrupt-names = "card-detect";
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index a54c39ebbf8b..8d893a874634 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -17,6 +17,9 @@ PMIC's from Qualcomm.
"qcom,pm8994-gpio"
"qcom,pma8084-gpio"
+ And must contain either "qcom,spmi-gpio" or "qcom,ssbi-gpio"
+ if the device is on an spmi bus or an ssbi bus respectively
+
- reg:
Usage: required
Value type: <prop-encoded-array>
@@ -183,7 +186,7 @@ to specify in a pin configuration subnode:
Example:
pm8921_gpio: gpio@150 {
- compatible = "qcom,pm8921-gpio";
+ compatible = "qcom,pm8921-gpio", "qcom,ssbi-gpio";
reg = <0x150 0x160>;
interrupts = <192 1>, <193 1>, <194 1>,
<195 1>, <196 1>, <197 1>,
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
index b484ba1af78c..2ab95bc26066 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
@@ -19,6 +19,9 @@ of PMIC's from Qualcomm.
"qcom,pm8994-mpp",
"qcom,pma8084-mpp",
+ And must contain either "qcom,spmi-mpp" or "qcom,ssbi-mpp"
+ if the device is on an spmi bus or an ssbi bus respectively.
+
- reg:
Usage: required
Value type: <prop-encoded-array>
@@ -158,7 +161,7 @@ to specify in a pin configuration subnode:
Example:
mpps@a000 {
- compatible = "qcom,pm8841-mpp";
+ compatible = "qcom,pm8841-mpp", "qcom,spmi-mpp";
reg = <0xa000>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index e4cf022c992e..13df9498311a 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -17,9 +17,11 @@ Required Properties:
- "renesas,pfc-r8a7779": for R8A7779 (R-Car H1) compatible pin-controller.
- "renesas,pfc-r8a7790": for R8A7790 (R-Car H2) compatible pin-controller.
- "renesas,pfc-r8a7791": for R8A7791 (R-Car M2-W) compatible pin-controller.
+ - "renesas,pfc-r8a7792": for R8A7792 (R-Car V2H) compatible pin-controller.
- "renesas,pfc-r8a7793": for R8A7793 (R-Car M2-N) compatible pin-controller.
- "renesas,pfc-r8a7794": for R8A7794 (R-Car E2) compatible pin-controller.
- "renesas,pfc-r8a7795": for R8A7795 (R-Car H3) compatible pin-controller.
+ - "renesas,pfc-r8a7796": for R8A7796 (R-Car M3-W) compatible pin-controller.
- "renesas,pfc-sh73a0": for SH73A0 (SH-Mobile AG5) compatible pin-controller.
- reg: Base address and length of each memory resource used by the pin
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
index 6db16b90873a..d49e22d2a8b5 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -229,6 +229,8 @@ Example: A pin-controller node with pin banks:
Example 1: A pin-controller node with pin groups.
+ #include <dt-bindings/pinctrl/samsung.h>
+
pinctrl_0: pinctrl@11400000 {
compatible = "samsung,exynos4210-pinctrl";
reg = <0x11400000 0x1000>;
@@ -238,53 +240,53 @@ Example 1: A pin-controller node with pin groups.
uart0_data: uart0-data {
samsung,pins = "gpa0-0", "gpa0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gpa0-2", "gpa0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_data: uart1-data {
samsung,pins = "gpa0-4", "gpa0-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c2_bus: i2c2-bus {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
sd4_bus8: sd4-bus-width8 {
part-1 {
samsung,pins = "gpk0-3", "gpk0-4",
"gpk0-5", "gpk0-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
part-2 {
samsung,pins = "gpk1-3", "gpk1-4",
"gpk1-5", "gpk1-6";
- samsung,pin-function = <4>;
- samsung,pin-pud = <4>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
index 587bffb9cbc6..f9753c416974 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
@@ -14,6 +14,11 @@ Required properies:
- #size-cells : The value of this property must be 1
- ranges : defines mapping between pin controller node (parent) to
gpio-bank node (children).
+ - interrupt-parent: phandle of the interrupt parent to which the external
+ GPIO interrupts are forwarded to.
+ - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
+ which includes IRQ mux selection register, and the offset of the IRQ mux
+ selection register.
- pins-are-numbered: Specify the subnodes are using numbered pinmux to
specify pins.
diff --git a/Documentation/devicetree/bindings/power/act8945a-charger.txt b/Documentation/devicetree/bindings/power/act8945a-charger.txt
deleted file mode 100644
index bea254c9d136..000000000000
--- a/Documentation/devicetree/bindings/power/act8945a-charger.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-Device-Tree bindings for charger of Active-semi ACT8945A Multi-Function Device
-
-Required properties:
- - compatible: "active-semi,act8945a", please refer to ../mfd/act8945a.txt.
- - active-semi,chglev-gpios: charge current level phandle with args
- as described in ../gpio/gpio.txt.
-
-Optional properties:
- - active-semi,check-battery-temperature: boolean to check the battery
- temperature or not.
- - active-semi,input-voltage-threshold-microvolt: unit: mV;
- Specifies the charger's input over-voltage threshold value;
- The value can be: 6600, 7000, 7500, 8000; default: 6600
- - active-semi,precondition-timeout: unit: minutes;
- Specifies the charger's PRECONDITION safety timer setting value;
- The value can be: 40, 60, 80, 0; If 0, it means to disable this timer;
- default: 40.
- - active-semi,total-timeout: unit: hours;
- Specifies the charger's total safety timer setting value;
- The value can be: 3, 4, 5, 0; If 0, it means to disable this timer;
- default: 3.
-
-Example:
- pmic@5b {
- compatible = "active-semi,act8945a";
- reg = <0x5b>;
- status = "okay";
-
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_charger_chglev>;
- active-semi,chglev-gpios = <&pioA 12 GPIO_ACTIVE_HIGH>;
- active-semi,input-voltage-threshold-microvolt = <6600>;
- active-semi,precondition-timeout = <40>;
- active-semi,total-timeout = <3>;
- };
diff --git a/Documentation/devicetree/bindings/power_supply/axxia-reset.txt b/Documentation/devicetree/bindings/power/reset/axxia-reset.txt
index 47e720d249d2..47e720d249d2 100644
--- a/Documentation/devicetree/bindings/power_supply/axxia-reset.txt
+++ b/Documentation/devicetree/bindings/power/reset/axxia-reset.txt
diff --git a/Documentation/devicetree/bindings/power_supply/imx-snvs-poweroff.txt b/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt
index dc7c9bad63ea..dc7c9bad63ea 100644
--- a/Documentation/devicetree/bindings/power_supply/imx-snvs-poweroff.txt
+++ b/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt
diff --git a/Documentation/devicetree/bindings/power_supply/msm-poweroff.txt b/Documentation/devicetree/bindings/power/reset/msm-poweroff.txt
index ce44ad357565..ce44ad357565 100644
--- a/Documentation/devicetree/bindings/power_supply/msm-poweroff.txt
+++ b/Documentation/devicetree/bindings/power/reset/msm-poweroff.txt
diff --git a/Documentation/devicetree/bindings/power_supply/qnap-poweroff.txt b/Documentation/devicetree/bindings/power/reset/qnap-poweroff.txt
index af25e77c0e0c..af25e77c0e0c 100644
--- a/Documentation/devicetree/bindings/power_supply/qnap-poweroff.txt
+++ b/Documentation/devicetree/bindings/power/reset/qnap-poweroff.txt
diff --git a/Documentation/devicetree/bindings/power_supply/restart-poweroff.txt b/Documentation/devicetree/bindings/power/reset/restart-poweroff.txt
index 5776e684afda..5776e684afda 100644
--- a/Documentation/devicetree/bindings/power_supply/restart-poweroff.txt
+++ b/Documentation/devicetree/bindings/power/reset/restart-poweroff.txt
diff --git a/Documentation/devicetree/bindings/power/reset/st-reset.txt b/Documentation/devicetree/bindings/power/reset/st-reset.txt
index 809af54f02f3..83734dc3a389 100644
--- a/Documentation/devicetree/bindings/power/reset/st-reset.txt
+++ b/Documentation/devicetree/bindings/power/reset/st-reset.txt
@@ -1,11 +1,12 @@
*Device-Tree bindings for ST SW reset functionality
Required properties:
-- compatible: should be "st,<chip>-restart".
+- compatible: should be "stih407-restart".
- st,syscfg: should be a phandle of the syscfg node.
Example node:
restart {
- compatible = "st,stih416-restart";
- st,syscfg = <&syscfg_sbc>;
+ compatible = "st,stih407-restart";
+ st,syscfg = <&syscfg_sbc_reg>;
+ status = "okay";
};
diff --git a/Documentation/devicetree/bindings/power_supply/ab8500/btemp.txt b/Documentation/devicetree/bindings/power/supply/ab8500/btemp.txt
index 0ba1bcc7f33a..0ba1bcc7f33a 100644
--- a/Documentation/devicetree/bindings/power_supply/ab8500/btemp.txt
+++ b/Documentation/devicetree/bindings/power/supply/ab8500/btemp.txt
diff --git a/Documentation/devicetree/bindings/power_supply/ab8500/chargalg.txt b/Documentation/devicetree/bindings/power/supply/ab8500/chargalg.txt
index ef5328371122..ef5328371122 100644
--- a/Documentation/devicetree/bindings/power_supply/ab8500/chargalg.txt
+++ b/Documentation/devicetree/bindings/power/supply/ab8500/chargalg.txt
diff --git a/Documentation/devicetree/bindings/power_supply/ab8500/charger.txt b/Documentation/devicetree/bindings/power/supply/ab8500/charger.txt
index 6bdbb08ea9e0..6bdbb08ea9e0 100644
--- a/Documentation/devicetree/bindings/power_supply/ab8500/charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/ab8500/charger.txt
diff --git a/Documentation/devicetree/bindings/power_supply/ab8500/fg.txt b/Documentation/devicetree/bindings/power/supply/ab8500/fg.txt
index ccafcb9112fb..ccafcb9112fb 100644
--- a/Documentation/devicetree/bindings/power_supply/ab8500/fg.txt
+++ b/Documentation/devicetree/bindings/power/supply/ab8500/fg.txt
diff --git a/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt b/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt
new file mode 100644
index 000000000000..de78d761ce44
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt
@@ -0,0 +1,48 @@
+Device-Tree bindings for charger of Active-semi ACT8945A Multi-Function Device
+
+Required properties:
+ - compatible: "active-semi,act8945a-charger".
+ - active-semi,chglev-gpios: charge current level phandle with args
+ as described in ../gpio/gpio.txt.
+ - active-semi,lbo-gpios: specify the low battery voltage detect phandle
+ with args as as described in ../gpio/gpio.txt.
+ - interrupts: <a b> where a is the interrupt number and b is a
+ field that represents an encoding of the sense and level
+ information for the interrupt.
+ - interrupt-parent: the phandle for the interrupt controller that
+ services interrupts for this device.
+
+Optional properties:
+ - active-semi,input-voltage-threshold-microvolt: unit: mV;
+ Specifies the charger's input over-voltage threshold value;
+ The value can be: 6600, 7000, 7500, 8000; default: 6600
+ - active-semi,precondition-timeout: unit: minutes;
+ Specifies the charger's PRECONDITION safety timer setting value;
+ The value can be: 40, 60, 80, 0; If 0, it means to disable this timer;
+ default: 40.
+ - active-semi,total-timeout: unit: hours;
+ Specifies the charger's total safety timer setting value;
+ The value can be: 3, 4, 5, 0; If 0, it means to disable this timer;
+ default: 3.
+
+Example:
+ pmic@5b {
+ compatible = "active-semi,act8945a";
+ reg = <0x5b>;
+ status = "okay";
+
+ charger {
+ compatible = "active-semi,act8945a-charger";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_charger_chglev &pinctrl_charger_lbo &pinctrl_charger_irq>;
+ interrupt-parent = <&pioA>;
+ interrupts = <45 GPIO_ACTIVE_LOW>;
+
+ active-semi,chglev-gpios = <&pioA 12 GPIO_ACTIVE_HIGH>;
+ active-semi,lbo-gpios = <&pioA 72 GPIO_ACTIVE_LOW>;
+ active-semi,input-voltage-threshold-microvolt = <6600>;
+ active-semi,precondition-timeout = <40>;
+ active-semi,total-timeout = <3>;
+ status = "okay";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power_supply/axp20x_usb_power.txt b/Documentation/devicetree/bindings/power/supply/axp20x_usb_power.txt
index f1d7beec45bf..f1d7beec45bf 100644
--- a/Documentation/devicetree/bindings/power_supply/axp20x_usb_power.txt
+++ b/Documentation/devicetree/bindings/power/supply/axp20x_usb_power.txt
diff --git a/Documentation/devicetree/bindings/power/bq2415x.txt b/Documentation/devicetree/bindings/power/supply/bq2415x.txt
index d0327f0b59ad..d0327f0b59ad 100644
--- a/Documentation/devicetree/bindings/power/bq2415x.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq2415x.txt
diff --git a/Documentation/devicetree/bindings/power/bq24257.txt b/Documentation/devicetree/bindings/power/supply/bq24257.txt
index d693702c9c1e..d693702c9c1e 100644
--- a/Documentation/devicetree/bindings/power/bq24257.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq24257.txt
diff --git a/Documentation/devicetree/bindings/power/bq25890.txt b/Documentation/devicetree/bindings/power/supply/bq25890.txt
index c9dd17d142ad..c9dd17d142ad 100644
--- a/Documentation/devicetree/bindings/power/bq25890.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq25890.txt
diff --git a/Documentation/devicetree/bindings/power_supply/charger-manager.txt b/Documentation/devicetree/bindings/power/supply/charger-manager.txt
index ec4fe9de3137..ec4fe9de3137 100644
--- a/Documentation/devicetree/bindings/power_supply/charger-manager.txt
+++ b/Documentation/devicetree/bindings/power/supply/charger-manager.txt
diff --git a/Documentation/devicetree/bindings/power/da9150-charger.txt b/Documentation/devicetree/bindings/power/supply/da9150-charger.txt
index f3906663c454..f3906663c454 100644
--- a/Documentation/devicetree/bindings/power/da9150-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/da9150-charger.txt
diff --git a/Documentation/devicetree/bindings/power/da9150-fg.txt b/Documentation/devicetree/bindings/power/supply/da9150-fg.txt
index 00236fe3ea31..00236fe3ea31 100644
--- a/Documentation/devicetree/bindings/power/da9150-fg.txt
+++ b/Documentation/devicetree/bindings/power/supply/da9150-fg.txt
diff --git a/Documentation/devicetree/bindings/power_supply/gpio-charger.txt b/Documentation/devicetree/bindings/power/supply/gpio-charger.txt
index adbb5dc5b6e9..adbb5dc5b6e9 100644
--- a/Documentation/devicetree/bindings/power_supply/gpio-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/gpio-charger.txt
diff --git a/Documentation/devicetree/bindings/power/isp1704.txt b/Documentation/devicetree/bindings/power/supply/isp1704.txt
index fa3596907967..fa3596907967 100644
--- a/Documentation/devicetree/bindings/power/isp1704.txt
+++ b/Documentation/devicetree/bindings/power/supply/isp1704.txt
diff --git a/Documentation/devicetree/bindings/power_supply/lp8727_charger.txt b/Documentation/devicetree/bindings/power/supply/lp8727_charger.txt
index 2246bc5c874b..2246bc5c874b 100644
--- a/Documentation/devicetree/bindings/power_supply/lp8727_charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/lp8727_charger.txt
diff --git a/Documentation/devicetree/bindings/power/ltc2941.txt b/Documentation/devicetree/bindings/power/supply/ltc2941.txt
index ea42ae12d924..ea42ae12d924 100644
--- a/Documentation/devicetree/bindings/power/ltc2941.txt
+++ b/Documentation/devicetree/bindings/power/supply/ltc2941.txt
diff --git a/Documentation/devicetree/bindings/power_supply/max17042_battery.txt b/Documentation/devicetree/bindings/power/supply/max17042_battery.txt
index 3f3894aaeebc..3f3894aaeebc 100644
--- a/Documentation/devicetree/bindings/power_supply/max17042_battery.txt
+++ b/Documentation/devicetree/bindings/power/supply/max17042_battery.txt
diff --git a/Documentation/devicetree/bindings/power_supply/max8925_batter.txt b/Documentation/devicetree/bindings/power/supply/max8925_batter.txt
index d7e3e0c0f71d..d7e3e0c0f71d 100644
--- a/Documentation/devicetree/bindings/power_supply/max8925_batter.txt
+++ b/Documentation/devicetree/bindings/power/supply/max8925_batter.txt
diff --git a/Documentation/devicetree/bindings/power_supply/olpc_battery.txt b/Documentation/devicetree/bindings/power/supply/olpc_battery.txt
index c8901b3992d9..c8901b3992d9 100644
--- a/Documentation/devicetree/bindings/power_supply/olpc_battery.txt
+++ b/Documentation/devicetree/bindings/power/supply/olpc_battery.txt
diff --git a/Documentation/devicetree/bindings/power_supply/power_supply.txt b/Documentation/devicetree/bindings/power/supply/power_supply.txt
index 8391bfa0edac..8391bfa0edac 100644
--- a/Documentation/devicetree/bindings/power_supply/power_supply.txt
+++ b/Documentation/devicetree/bindings/power/supply/power_supply.txt
diff --git a/Documentation/devicetree/bindings/power/qcom,coincell-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom,coincell-charger.txt
index 747899223262..747899223262 100644
--- a/Documentation/devicetree/bindings/power/qcom,coincell-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom,coincell-charger.txt
diff --git a/Documentation/devicetree/bindings/power_supply/qcom_smbb.txt b/Documentation/devicetree/bindings/power/supply/qcom_smbb.txt
index 65b88fac854b..65b88fac854b 100644
--- a/Documentation/devicetree/bindings/power_supply/qcom_smbb.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom_smbb.txt
diff --git a/Documentation/devicetree/bindings/power/rt9455_charger.txt b/Documentation/devicetree/bindings/power/supply/rt9455_charger.txt
index 5d9ad5cf2c5a..5d9ad5cf2c5a 100644
--- a/Documentation/devicetree/bindings/power/rt9455_charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/rt9455_charger.txt
diff --git a/Documentation/devicetree/bindings/power/rx51-battery.txt b/Documentation/devicetree/bindings/power/supply/rx51-battery.txt
index 90438453db58..90438453db58 100644
--- a/Documentation/devicetree/bindings/power/rx51-battery.txt
+++ b/Documentation/devicetree/bindings/power/supply/rx51-battery.txt
diff --git a/Documentation/devicetree/bindings/power_supply/sbs_sbs-battery.txt b/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt
index c40e8926facf..c40e8926facf 100644
--- a/Documentation/devicetree/bindings/power_supply/sbs_sbs-battery.txt
+++ b/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt
diff --git a/Documentation/devicetree/bindings/power_supply/ti,bq24735.txt b/Documentation/devicetree/bindings/power/supply/ti,bq24735.txt
index 3bf55757ceec..3bf55757ceec 100644
--- a/Documentation/devicetree/bindings/power_supply/ti,bq24735.txt
+++ b/Documentation/devicetree/bindings/power/supply/ti,bq24735.txt
diff --git a/Documentation/devicetree/bindings/power_supply/tps65090.txt b/Documentation/devicetree/bindings/power/supply/tps65090.txt
index 8e5e0d3910df..8e5e0d3910df 100644
--- a/Documentation/devicetree/bindings/power_supply/tps65090.txt
+++ b/Documentation/devicetree/bindings/power/supply/tps65090.txt
diff --git a/Documentation/devicetree/bindings/power_supply/tps65217_charger.txt b/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
index 98d131acee95..98d131acee95 100644
--- a/Documentation/devicetree/bindings/power_supply/tps65217_charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
diff --git a/Documentation/devicetree/bindings/power/twl-charger.txt b/Documentation/devicetree/bindings/power/supply/twl-charger.txt
index 3b4ea1b73b38..3b4ea1b73b38 100644
--- a/Documentation/devicetree/bindings/power/twl-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/twl-charger.txt
diff --git a/Documentation/devicetree/bindings/regulator/ltc3676.txt b/Documentation/devicetree/bindings/regulator/ltc3676.txt
new file mode 100644
index 000000000000..d4eb366ce18c
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/ltc3676.txt
@@ -0,0 +1,94 @@
+Linear Technology LTC3676 8-output regulators
+
+Required properties:
+- compatible: "lltc,ltc3676"
+- reg: I2C slave address
+
+Required child node:
+- regulators: Contains eight regulator child nodes sw1, sw2, sw3, sw4,
+ ldo1, ldo2, ldo3, and ldo4, specifying the initialization data as
+ documented in Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Each regulator is defined using the standard binding for regulators. The
+nodes for sw1, sw2, sw3, sw4, ldo1, ldo2 and ldo4 additionally need to specify
+the resistor values of their external feedback voltage dividers:
+
+Required properties (not on ldo3):
+- lltc,fb-voltage-divider: An array of two integers containing the resistor
+ values R1 and R2 of the feedback voltage divider in ohms.
+
+Regulators sw1, sw2, sw3, sw4 can regulate the feedback reference from:
+412.5mV to 800mV in 12.5 mV steps. The output voltage thus ranges between
+0.4125 * (1 + R1/R2) V and 0.8 * (1 + R1/R2) V.
+
+Regulators ldo1, ldo2, and ldo4 have a fixed 0.725 V reference and thus output
+0.725 * (1 + R1/R2) V. The ldo3 regulator is fixed to 1.8 V. The ldo1 standby
+regulator can not be disabled and thus should have the regulator-always-on
+property set.
+
+Example:
+
+ ltc3676: pmic@3c {
+ compatible = "lltc,ltc3676";
+ reg = <0x3c>;
+
+ regulators {
+ sw1_reg: sw1 {
+ regulator-min-microvolt = <674400>;
+ regulator-max-microvolt = <1308000>;
+ lltc,fb-voltage-divider = <127000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <1033310>;
+ regulator-max-microvolt = <200400>;
+ lltc,fb-voltage-divider = <301000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3_reg: sw3 {
+ regulator-min-microvolt = <674400>;
+ regulator-max-microvolt = <130800>;
+ lltc,fb-voltage-divider = <127000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-min-microvolt = <868310>;
+ regulator-max-microvolt = <168400>;
+ lltc,fb-voltage-divider = <221000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: ldo2 {
+ regulator-min-microvolt = <2490375>;
+ regulator-max-microvolt = <2490375>;
+ lltc,fb-voltage-divider = <487000 200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3_reg: ldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+
+ ldo4_reg: ldo4 {
+ regulator-min-microvolt = <3023250>;
+ regulator-max-microvolt = <3023250>;
+ lltc,fb-voltage-divider = <634000 200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/pv88080.txt b/Documentation/devicetree/bindings/regulator/pv88080.txt
index 38a614210dcb..e6e4b9c82d89 100644
--- a/Documentation/devicetree/bindings/regulator/pv88080.txt
+++ b/Documentation/devicetree/bindings/regulator/pv88080.txt
@@ -1,22 +1,28 @@
* Powerventure Semiconductor PV88080 Voltage Regulator
Required properties:
-- compatible: "pvs,pv88080".
-- reg: I2C slave address, usually 0x49.
+- compatible: Must be one of the following, depending on the
+ silicon version:
+ - "pvs,pv88080" (DEPRECATED)
+
+ - "pvs,pv88080-aa" for PV88080 AA or AB silicon
+ - "pvs,pv88080-ba" for PV88080 BA or BB silicon
+ NOTE: The use of the compatibles with no silicon version is deprecated.
+- reg: I2C slave address, usually 0x49
- interrupts: the interrupt outputs of the controller
- regulators: A node that houses a sub-node for each regulator within the
device. Each sub-node is identified using the node's name, with valid
values listed below. The content of each sub-node is defined by the
standard binding for regulators; see regulator.txt.
- BUCK1, BUCK2, and BUCK3.
+ BUCK1, BUCK2, BUCK3 and HVBUCK.
Optional properties:
- Any optional property defined in regulator.txt
-Example
+Example:
pmic: pv88080@49 {
- compatible = "pvs,pv88080";
+ compatible = "pvs,pv88080-ba";
reg = <0x49>;
interrupt-parent = <&gpio>;
interrupts = <24 24>;
@@ -45,5 +51,12 @@ Example
regulator-min-microamp = <1496000>;
regulator-max-microamp = <4189000>;
};
+
+ HVBUCK {
+ regulator-name = "hvbuck";
+ regulator-min-microvolt = < 5000>;
+ regulator-max-microvolt = <1275000>;
+ };
};
};
+
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index ecfc593cac15..6ab5aef619d9 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -13,7 +13,7 @@ Optional properties:
- regulator-allow-bypass: allow the regulator to go into bypass mode
- regulator-allow-set-load: allow the regulator performance level to be configured
- <name>-supply: phandle to the parent supply/regulator node
-- regulator-ramp-delay: ramp delay for regulator(in uV/uS)
+- regulator-ramp-delay: ramp delay for regulator(in uV/us)
For hardware which supports disabling ramp rate, it should be explicitly
initialised to zero (regulator-ramp-delay = <0>) for disabling ramp delay.
- regulator-enable-ramp-delay: The time taken, in microseconds, for the supply
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt b/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt
new file mode 100644
index 000000000000..0d2361ebe3d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt
@@ -0,0 +1,132 @@
+Qualcomm WCNSS Peripheral Image Loader
+
+This document defines the binding for a component that loads and boots firmware
+on the Qualcomm WCNSS core.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,riva-pil",
+ "qcom,pronto-v1-pil",
+ "qcom,pronto-v2-pil"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must specify the base address and size of the CCU, DXE and
+ PMU register blocks
+
+- reg-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "ccu", "dxe", "pmu"
+
+- interrupts-extended:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must list the watchdog and fatal IRQs and may specify the
+ ready, handover and stop-ack IRQs
+
+- interrupt-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: should be "wdog", "fatal", optionally followed by "ready",
+ "handover", "stop-ack"
+
+- vddmx-supply:
+- vddcx-supply:
+- vddpx-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the regulators to be held on behalf of the
+ booting of the WCNSS core
+
+- qcom,smem-states:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: reference to the SMEM state used to indicate to WCNSS that
+ it should shut down
+
+- qcom,smem-state-names:
+ Usage: optional
+ Value type: <stringlist>
+ Definition: should be "stop"
+
+- memory-region:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to reserved-memory node for the remote processor
+ see ../reserved-memory/reserved-memory.txt
+
+= SUBNODES
+A single subnode of the WCNSS PIL describes the attached rf module and its
+resource dependencies.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,wcn3620",
+ "qcom,wcn3660",
+ "qcom,wcn3680"
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the xo clock and optionally the rf clock
+
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: should be "xo", optionally followed by "rf"
+
+- vddxo-supply:
+- vddrfa-supply:
+- vddpa-supply:
+- vdddig-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the regulators to be held on behalf of the
+ booting of the WCNSS core
+
+= EXAMPLE
+The following example describes the resources needed to boot control the WCNSS,
+with attached WCN3680, as it is commonly found on MSM8974 boards.
+
+pronto@fb204000 {
+ compatible = "qcom,pronto-v2-pil";
+ reg = <0xfb204000 0x2000>, <0xfb202000 0x1000>, <0xfb21b000 0x3000>;
+ reg-names = "ccu", "dxe", "pmu";
+
+ interrupts-extended = <&intc 0 149 1>,
+ <&wcnss_smp2p_slave 0 0>,
+ <&wcnss_smp2p_slave 1 0>,
+ <&wcnss_smp2p_slave 2 0>,
+ <&wcnss_smp2p_slave 3 0>;
+ interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack";
+
+ vddmx-supply = <&pm8841_s1>;
+ vddcx-supply = <&pm8841_s2>;
+ vddpx-supply = <&pm8941_s3>;
+
+ qcom,smem-states = <&wcnss_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ memory-region = <&wcnss_region>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcnss_pin_a>;
+
+ iris {
+ compatible = "qcom,wcn3680";
+
+ clocks = <&rpmcc RPM_CXO_CLK_SRC>, <&rpmcc RPM_CXO_A2>;
+ clock-names = "xo", "rf";
+
+ vddxo-supply = <&pm8941_l6>;
+ vddrfa-supply = <&pm8941_l11>;
+ vddpa-supply = <&pm8941_l19>;
+ vdddig-supply = <&pm8941_s3>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/reset/st,stm32-rcc.txt b/Documentation/devicetree/bindings/reset/st,stm32-rcc.txt
new file mode 100644
index 000000000000..01db34375192
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/st,stm32-rcc.txt
@@ -0,0 +1,6 @@
+STMicroelectronics STM32 Peripheral Reset Controller
+====================================================
+
+The RCC IP is both a reset and a clock controller.
+
+Please see Documentation/devicetree/bindings/clock/st,stm32-rcc.txt
diff --git a/Documentation/devicetree/bindings/reset/uniphier-reset.txt b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
new file mode 100644
index 000000000000..e6bbfccd56c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
@@ -0,0 +1,93 @@
+UniPhier reset controller
+
+
+System reset
+------------
+
+Required properties:
+- compatible: should be one of the following:
+ "socionext,uniphier-sld3-reset" - for PH1-sLD3 SoC.
+ "socionext,uniphier-ld4-reset" - for PH1-LD4 SoC.
+ "socionext,uniphier-pro4-reset" - for PH1-Pro4 SoC.
+ "socionext,uniphier-sld8-reset" - for PH1-sLD8 SoC.
+ "socionext,uniphier-pro5-reset" - for PH1-Pro5 SoC.
+ "socionext,uniphier-pxs2-reset" - for ProXstream2/PH1-LD6b SoC.
+ "socionext,uniphier-ld11-reset" - for PH1-LD11 SoC.
+ "socionext,uniphier-ld20-reset" - for PH1-LD20 SoC.
+- #reset-cells: should be 1.
+
+Example:
+
+ sysctrl@61840000 {
+ compatible = "socionext,uniphier-ld20-sysctrl",
+ "simple-mfd", "syscon";
+ reg = <0x61840000 0x4000>;
+
+ reset {
+ compatible = "socionext,uniphier-ld20-reset";
+ #reset-cells = <1>;
+ };
+
+ other nodes ...
+ };
+
+
+Media I/O (MIO) reset
+---------------------
+
+Required properties:
+- compatible: should be one of the following:
+ "socionext,uniphier-sld3-mio-reset" - for PH1-sLD3 SoC.
+ "socionext,uniphier-ld4-mio-reset" - for PH1-LD4 SoC.
+ "socionext,uniphier-pro4-mio-reset" - for PH1-Pro4 SoC.
+ "socionext,uniphier-sld8-mio-reset" - for PH1-sLD8 SoC.
+ "socionext,uniphier-pro5-mio-reset" - for PH1-Pro5 SoC.
+ "socionext,uniphier-pxs2-mio-reset" - for ProXstream2/PH1-LD6b SoC.
+ "socionext,uniphier-ld11-mio-reset" - for PH1-LD11 SoC.
+ "socionext,uniphier-ld20-mio-reset" - for PH1-LD20 SoC.
+- #reset-cells: should be 1.
+
+Example:
+
+ mioctrl@59810000 {
+ compatible = "socionext,uniphier-ld20-mioctrl",
+ "simple-mfd", "syscon";
+ reg = <0x59810000 0x800>;
+
+ reset {
+ compatible = "socionext,uniphier-ld20-mio-reset";
+ #reset-cells = <1>;
+ };
+
+ other nodes ...
+ };
+
+
+Peripheral reset
+----------------
+
+Required properties:
+- compatible: should be one of the following:
+ "socionext,uniphier-ld4-peri-reset" - for PH1-LD4 SoC.
+ "socionext,uniphier-pro4-peri-reset" - for PH1-Pro4 SoC.
+ "socionext,uniphier-sld8-peri-reset" - for PH1-sLD8 SoC.
+ "socionext,uniphier-pro5-peri-reset" - for PH1-Pro5 SoC.
+ "socionext,uniphier-pxs2-peri-reset" - for ProXstream2/PH1-LD6b SoC.
+ "socionext,uniphier-ld11-peri-reset" - for PH1-LD11 SoC.
+ "socionext,uniphier-ld20-peri-reset" - for PH1-LD20 SoC.
+- #reset-cells: should be 1.
+
+Example:
+
+ perictrl@59820000 {
+ compatible = "socionext,uniphier-ld20-perictrl",
+ "simple-mfd", "syscon";
+ reg = <0x59820000 0x200>;
+
+ reset {
+ compatible = "socionext,uniphier-ld20-peri-reset";
+ #reset-cells = <1>;
+ };
+
+ other nodes ...
+ };
diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt
index 936ab5b87324..f86bb06c39e9 100644
--- a/Documentation/devicetree/bindings/serial/8250.txt
+++ b/Documentation/devicetree/bindings/serial/8250.txt
@@ -42,6 +42,8 @@ Optional properties:
- auto-flow-control: one way to enable automatic flow control support. The
driver is allowed to detect support for the capability even without this
property.
+- tx-threshold: Specify the TX FIFO low water indication for parts with
+ programmable TX FIFO thresholds.
Note:
* fsl,ns16550:
diff --git a/Documentation/devicetree/bindings/serial/st,stm32-usart.txt b/Documentation/devicetree/bindings/serial/st,stm32-usart.txt
new file mode 100644
index 000000000000..85ec5f2b1996
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/st,stm32-usart.txt
@@ -0,0 +1,46 @@
+* STMicroelectronics STM32 USART
+
+Required properties:
+- compatible: Can be either "st,stm32-usart", "st,stm32-uart",
+"st,stm32f7-usart" or "st,stm32f7-uart" depending on whether
+the device supports synchronous mode and is compatible with
+stm32(f4) or stm32f7.
+- reg: The address and length of the peripheral registers space
+- interrupts: The interrupt line of the USART instance
+- clocks: The input clock of the USART instance
+
+Optional properties:
+- pinctrl: The reference on the pins configuration
+- st,hw-flow-ctrl: bool flag to enable hardware flow control.
+- dmas: phandle(s) to DMA controller node(s). Refer to stm32-dma.txt
+- dma-names: "rx" and/or "tx"
+
+Examples:
+usart4: serial@40004c00 {
+ compatible = "st,stm32-uart";
+ reg = <0x40004c00 0x400>;
+ interrupts = <52>;
+ clocks = <&clk_pclk1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usart4>;
+};
+
+usart2: serial@40004400 {
+ compatible = "st,stm32-usart", "st,stm32-uart";
+ reg = <0x40004400 0x400>;
+ interrupts = <38>;
+ clocks = <&clk_pclk1>;
+ st,hw-flow-ctrl;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usart2 &pinctrl_usart2_rtscts>;
+};
+
+usart1: serial@40011000 {
+ compatible = "st,stm32-usart", "st,stm32-uart";
+ reg = <0x40011000 0x400>;
+ interrupts = <37>;
+ clocks = <&rcc 0 164>;
+ dmas = <&dma2 2 4 0x414 0x0>,
+ <&dma2 7 4 0x414 0x0>;
+ dma-names = "rx", "tx";
+};
diff --git a/Documentation/devicetree/bindings/soc/mediatek/auxadc.txt b/Documentation/devicetree/bindings/soc/mediatek/auxadc.txt
deleted file mode 100644
index bdb782918a72..000000000000
--- a/Documentation/devicetree/bindings/soc/mediatek/auxadc.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-MediaTek AUXADC
-===============
-
-The Auxiliary Analog/Digital Converter (AUXADC) is an ADC found
-in some Mediatek SoCs which among other things measures the temperatures
-in the SoC. It can be used directly with register accesses, but it is also
-used by thermal controller which reads the temperatures from the AUXADC
-directly via its own bus interface. See
-Documentation/devicetree/bindings/thermal/mediatek-thermal.txt
-for the Thermal Controller which holds a phandle to the AUXADC.
-
-Required properties:
-- compatible: Must be "mediatek,mt8173-auxadc"
-- reg: Address range of the AUXADC unit
-
-Example:
-
-auxadc: auxadc@11001000 {
- compatible = "mediatek,mt8173-auxadc";
- reg = <0 0x11001000 0 0x1000>;
-};
diff --git a/Documentation/devicetree/bindings/sound/nau8810.txt b/Documentation/devicetree/bindings/sound/nau8810.txt
new file mode 100644
index 000000000000..05830e477acd
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nau8810.txt
@@ -0,0 +1,16 @@
+NAU8810 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+ - compatible : "nuvoton,nau8810"
+
+ - reg : the I2C address of the device.
+
+Example:
+
+codec: nau8810@1a {
+ compatible = "nuvoton,nau8810";
+ reg = <0x1a>;
+};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.txt
new file mode 100644
index 000000000000..5da7da4ea07a
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.txt
@@ -0,0 +1,42 @@
+NVIDIA Tegra audio complex, with SGTL5000 CODEC
+
+Required properties:
+- compatible : "nvidia,tegra-audio-sgtl5000"
+- clocks : Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names : Must include the following entries:
+ - pll_a
+ - pll_a_out0
+ - mclk (The Tegra cdev1/extern1 clock, which feeds the CODEC's mclk)
+- nvidia,model : The user-visible name of this sound complex.
+- nvidia,audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the SGTL5000's pins (as documented in its binding), and the jacks
+ on the board:
+
+ * Headphone Jack
+ * Line In Jack
+ * Mic Jack
+
+- nvidia,i2s-controller : The phandle of the Tegra I2S controller that's
+ connected to the CODEC.
+- nvidia,audio-codec : The phandle of the SGTL5000 audio codec.
+
+Example:
+
+sound {
+ compatible = "toradex,tegra-audio-sgtl5000-apalis_t30",
+ "nvidia,tegra-audio-sgtl5000";
+ nvidia,model = "Toradex Apalis T30";
+ nvidia,audio-routing =
+ "Headphone Jack", "HP_OUT",
+ "LINE_IN", "Line In Jack",
+ "MIC_IN", "Mic Jack";
+ nvidia,i2s-controller = <&tegra_i2s2>;
+ nvidia,audio-codec = <&sgtl5000>;
+ clocks = <&tegra_car TEGRA30_CLK_PLL_A>,
+ <&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA30_CLK_EXTERN1>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
index 48129368d4d9..d9d8635ff94c 100644
--- a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
@@ -16,6 +16,24 @@ Required properties:
* "spkr-iomux"
- qcom,model : Name of the sound card.
+- qcom,audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, MicBias
+ of msm8x16_wcd codec and the jacks on the board:
+
+ Power supplies:
+ * MIC BIAS External1
+ * MIC BIAS External2
+ * MIC BIAS Internal1
+ * MIC BIAS Internal2
+
+ Board connectors:
+ * Headset Mic
+ * Secondary Mic",
+ * DMIC
+ * Ext Spk
+
Dai-link subnode properties and subnodes:
Required dai-link subnodes:
@@ -37,6 +55,18 @@ sound: sound {
reg-names = "mic-iomux", "spkr-iomux";
qcom,model = "DB410c";
+ qcom,audio-routing =
+ "MIC BIAS External1", "Handset Mic",
+ "MIC BIAS Internal2", "Headset Mic",
+ "MIC BIAS External1", "Secondary Mic",
+ "AMIC1", "MIC BIAS External1",
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External1",
+ "DMIC1", "MIC BIAS Internal1",
+ "MIC BIAS Internal1", "Digital Mic1",
+ "DMIC2", "MIC BIAS Internal1",
+ "MIC BIAS Internal1", "Digital Mic2";
+
/* I2S - Internal codec */
internal-dai-link@0 {
cpu { /* PRIMARY */
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt b/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
deleted file mode 100644
index 255ece3043ad..000000000000
--- a/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-Renesas Sampling Rate Convert Sound Card:
-
-Renesas Sampling Rate Convert Sound Card specifies audio DAI connections of SoC <-> codec.
-
-Required properties:
-
-- compatible : "renesas,rsrc-card{,<board>}"
- Examples with boards are:
- - "renesas,rsrc-card"
- - "renesas,rsrc-card,lager"
- - "renesas,rsrc-card,koelsch"
-Optional properties:
-
-- card_name : User specified audio sound card name, one string
- property.
-- cpu : CPU sub-node
-- codec : CODEC sub-node
-
-Optional subnode properties:
-
-- format : CPU/CODEC common audio format.
- "i2s", "right_j", "left_j" , "dsp_a"
- "dsp_b", "ac97", "pdm", "msb", "lsb"
-- frame-master : Indicates dai-link frame master.
- phandle to a cpu or codec subnode.
-- bitclock-master : Indicates dai-link bit clock master.
- phandle to a cpu or codec subnode.
-- bitclock-inversion : bool property. Add this if the
- dai-link uses bit clock inversion.
-- frame-inversion : bool property. Add this if the
- dai-link uses frame clock inversion.
-- convert-rate : platform specified sampling rate convert
-- convert-channels : platform specified converted channel size (2 - 8 ch)
-- audio-prefix : see audio-routing
-- audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names for sources.
- use audio-prefix if some components is using same sink/sources naming.
- it can be used if compatible was "renesas,rsrc-card";
-
-Required CPU/CODEC subnodes properties:
-
-- sound-dai : phandle and port of CPU/CODEC
-
-Optional CPU/CODEC subnodes properties:
-
-- clocks / system-clock-frequency : specify subnode's clock if needed.
- it can be specified via "clocks" if system has
- clock node (= common clock), or "system-clock-frequency"
- (if system doens't support common clock)
- If a clock is specified, it is
- enabled with clk_prepare_enable()
- in dai startup() and disabled with
- clk_disable_unprepare() in dai
- shutdown().
-
-Example
-
-sound {
- compatible = "renesas,rsrc-card,lager";
-
- card-name = "rsnd-ak4643";
- format = "left_j";
- bitclock-master = <&sndcodec>;
- frame-master = <&sndcodec>;
-
- sndcpu: cpu {
- sound-dai = <&rcar_sound>;
- };
-
- sndcodec: codec {
- sound-dai = <&ak4643>;
- system-clock-frequency = <11289600>;
- };
-};
diff --git a/Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt b/Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt
new file mode 100644
index 000000000000..eac91db07178
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt
@@ -0,0 +1,22 @@
+ROCKCHIP with MAX98357A/RT5514/DA7219 codecs on GRU boards
+
+Required properties:
+- compatible: "rockchip,rk3399-gru-sound"
+- rockchip,cpu: The phandle of the Rockchip I2S controller that's
+ connected to the codecs
+- rockchip,codec: The phandle of the MAX98357A/RT5514/DA7219 codecs
+
+Optional properties:
+- dmic-wakeup-delay-ms : specify delay time (ms) for DMIC ready.
+ If this option is specified, which means it's required dmic need
+ delay for DMIC to ready so that rt5514 can avoid recording before
+ DMIC send valid data
+
+Example:
+
+sound {
+ compatible = "rockchip,rk3399-gru-sound";
+ rockchip,cpu = <&i2s0>;
+ rockchip,codec = <&max98357a &rt5514 &da7219>;
+ dmic-wakeup-delay-ms = <20>;
+};
diff --git a/Documentation/devicetree/bindings/sound/rt5659.txt b/Documentation/devicetree/bindings/sound/rt5659.txt
index 5f79e7fde032..1766e0543fc5 100644
--- a/Documentation/devicetree/bindings/sound/rt5659.txt
+++ b/Documentation/devicetree/bindings/sound/rt5659.txt
@@ -12,6 +12,9 @@ Required properties:
Optional properties:
+- clocks: The phandle of the master clock to the CODEC
+- clock-names: Should be "mclk"
+
- realtek,in1-differential
- realtek,in3-differential
- realtek,in4-differential
diff --git a/Documentation/devicetree/bindings/sound/rt5660.txt b/Documentation/devicetree/bindings/sound/rt5660.txt
new file mode 100644
index 000000000000..30be5f921930
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rt5660.txt
@@ -0,0 +1,47 @@
+RT5660 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible : "realtek,rt5660".
+
+- reg : The I2C address of the device.
+
+Optional properties:
+
+- clocks: The phandle of the master clock to the CODEC
+- clock-names: Should be "mclk"
+
+- realtek,in1-differential
+- realtek,in3-differential
+ Boolean. Indicate MIC1/3 input are differential, rather than single-ended.
+
+- realtek,poweroff-in-suspend
+ Boolean. If the codec will be powered off in suspend, the resume should be
+ added delay time for waiting codec power ready.
+
+- realtek,dmic1-data-pin
+ 0: dmic1 is not used
+ 1: using GPIO2 pin as dmic1 data pin
+ 2: using IN1P pin as dmic1 data pin
+
+Pins on the device (for linking into audio routes) for RT5660:
+
+ * DMIC L1
+ * DMIC R1
+ * IN1P
+ * IN1N
+ * IN2P
+ * IN3P
+ * IN3N
+ * SPO
+ * LOUTL
+ * LOUTR
+
+Example:
+
+rt5660 {
+ compatible = "realtek,rt5660";
+ reg = <0x1c>;
+};
diff --git a/Documentation/devicetree/bindings/sound/rt5663.txt b/Documentation/devicetree/bindings/sound/rt5663.txt
new file mode 100644
index 000000000000..7d3c974c6e2e
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rt5663.txt
@@ -0,0 +1,30 @@
+RT5663/RT5668 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible : One of "realtek,rt5663" or "realtek,rt5668".
+
+- reg : The I2C address of the device.
+
+- interrupts : The CODEC's interrupt output.
+
+Optional properties:
+
+Pins on the device (for linking into audio routes) for RT5663/RT5668:
+
+ * IN1P
+ * IN1N
+ * IN2P
+ * IN2N
+ * HPOL
+ * HPOR
+
+Example:
+
+codec: rt5663@12 {
+ compatible = "realtek,rt5663";
+ reg = <0x12>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+};
diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt
index 59d862801e59..c7a93931fad2 100644
--- a/Documentation/devicetree/bindings/sound/simple-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-card.txt
@@ -22,6 +22,8 @@ Optional properties:
headphones are attached.
- simple-audio-card,mic-det-gpio : Reference to GPIO that signals when
a microphone is attached.
+- simple-audio-card,aux-devs : List of phandles pointing to auxiliary devices, such
+ as amplifiers, to be added to the sound card.
Optional subnodes:
@@ -162,3 +164,38 @@ sound {
};
};
};
+
+Example 3 - route audio from IMX6 SSI2 through TLV320DAC3100 codec
+through TPA6130A2 amplifier to headphones:
+
+&i2c0 {
+ codec: tlv320dac3100@18 {
+ compatible = "ti,tlv320dac3100";
+ ...
+ }
+
+ amp: tpa6130a2@60 {
+ compatible = "ti,tpa6130a2";
+ ...
+ }
+}
+
+sound {
+ compatible = "simple-audio-card";
+ ...
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack";
+ simple-audio-card,routing =
+ "Headphone Jack", "HPLEFT",
+ "Headphone Jack", "HPRIGHT",
+ "LEFTIN", "HPL",
+ "RIGHTIN", "HPR";
+ simple-audio-card,aux-devs = <&amp>;
+ simple-audio-card,cpu {
+ sound-dai = <&ssi2>;
+ };
+ simple-audio-card,codec {
+ sound-dai = <&codec>;
+ clocks = ...
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/simple-scu-card.txt b/Documentation/devicetree/bindings/sound/simple-scu-card.txt
new file mode 100644
index 000000000000..d6fe47ed09af
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/simple-scu-card.txt
@@ -0,0 +1,110 @@
+ASoC simple SCU Sound Card
+
+Simple-Card specifies audio DAI connections of SoC <-> codec.
+
+Required properties:
+
+- compatible : "simple-scu-audio-card"
+ "renesas,rsrc-card"
+
+Optional properties:
+
+- simple-audio-card,name : User specified audio sound card name, one string
+ property.
+- simple-audio-card,cpu : CPU sub-node
+- simple-audio-card,codec : CODEC sub-node
+
+Optional subnode properties:
+
+- simple-audio-card,format : CPU/CODEC common audio format.
+ "i2s", "right_j", "left_j" , "dsp_a"
+ "dsp_b", "ac97", "pdm", "msb", "lsb"
+- simple-audio-card,frame-master : Indicates dai-link frame master.
+ phandle to a cpu or codec subnode.
+- simple-audio-card,bitclock-master : Indicates dai-link bit clock master.
+ phandle to a cpu or codec subnode.
+- simple-audio-card,bitclock-inversion : bool property. Add this if the
+ dai-link uses bit clock inversion.
+- simple-audio-card,frame-inversion : bool property. Add this if the
+ dai-link uses frame clock inversion.
+- simple-audio-card,convert-rate : platform specified sampling rate convert
+- simple-audio-card,convert-channels : platform specified converted channel size (2 - 8 ch)
+- simple-audio-card,prefix : see audio-routing
+- simple-audio-card,routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources.
+ use audio-prefix if some components is using same sink/sources naming.
+ it can be used if compatible was "renesas,rsrc-card";
+
+Required CPU/CODEC subnodes properties:
+
+- sound-dai : phandle and port of CPU/CODEC
+
+Optional CPU/CODEC subnodes properties:
+
+- clocks / system-clock-frequency : specify subnode's clock if needed.
+ it can be specified via "clocks" if system has
+ clock node (= common clock), or "system-clock-frequency"
+ (if system doens't support common clock)
+ If a clock is specified, it is
+ enabled with clk_prepare_enable()
+ in dai startup() and disabled with
+ clk_disable_unprepare() in dai
+ shutdown().
+
+Example 1. Sampling Rate Covert
+
+sound {
+ compatible = "simple-scu-audio-card";
+
+ simple-audio-card,name = "rsnd-ak4643";
+ simple-audio-card,format = "left_j";
+ simple-audio-card,format = "left_j";
+ simple-audio-card,bitclock-master = <&sndcodec>;
+ simple-audio-card,frame-master = <&sndcodec>;
+
+ simple-audio-card,convert-rate = <48000>; /* see audio_clk_a */
+
+ simple-audio-card,prefix = "ak4642";
+ simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
+ "DAI0 Capture", "ak4642 Capture";
+
+ sndcpu: simple-audio-card,cpu {
+ sound-dai = <&rcar_sound>;
+ };
+
+ sndcodec: simple-audio-card,codec {
+ sound-dai = <&ak4643>;
+ system-clock-frequency = <11289600>;
+ };
+};
+
+Example 2. 2 CPU 1 Codec
+
+sound {
+ compatible = "renesas,rsrc-card";
+
+ card-name = "rsnd-ak4643";
+ format = "left_j";
+ bitclock-master = <&dpcmcpu>;
+ frame-master = <&dpcmcpu>;
+
+ convert-rate = <48000>; /* see audio_clk_a */
+
+ audio-prefix = "ak4642";
+ audio-routing = "ak4642 Playback", "DAI0 Playback",
+ "ak4642 Playback", "DAI1 Playback";
+
+ dpcmcpu: cpu@0 {
+ sound-dai = <&rcar_sound 0>;
+ };
+
+ cpu@1 {
+ sound-dai = <&rcar_sound 1>;
+ };
+
+ codec {
+ sound-dai = <&ak4643>;
+ clocks = <&audio_clock>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
index 16bcdfb6760e..745dc62f76ea 100644
--- a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
+++ b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -11,7 +11,9 @@ Documentation/devicetree/bindings/sound/simple-card.txt.
---------------------------------------
Required properties:
- - compatible: "st,sti-uni-player" or "st,sti-uni-reader"
+ - compatible: "st,stih407-uni-player-hdmi", "st,stih407-uni-player-pcm-out",
+ "st,stih407-uni-player-dac", "st,stih407-uni-player-spdif",
+ "st,stih407-uni-reader-pcm_in", "st,stih407-uni-reader-hdmi",
- st,syscfg: phandle to boot-device system configuration registers
@@ -33,32 +35,24 @@ Required properties:
"tx" for "st,sti-uni-player" compatibility
"rx" for "st,sti-uni-reader" compatibility
- - st,version: IP version integrated in SOC.
-
- - dai-name: DAI name that describes the IP.
-
- - st,mode: IP working mode depending on associated codec.
- "HDMI" connected to HDMI codec and support IEC HDMI formats (player only).
- "SPDIF" connected to SPDIF codec and support SPDIF formats (player only).
- "PCM" PCM standard mode for I2S or TDM bus.
- "TDM" TDM mode for TDM bus.
-
Required properties ("st,sti-uni-player" compatibility only):
- clocks: CPU_DAI IP clock source, listed in the same order than the
CPU_DAI properties.
- - st,uniperiph-id: internal SOC IP instance ID.
-
Optional properties:
- pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for
external codecs connection.
- pinctrl-names: should contain only one value - "default".
+ - st,tdm-mode: to declare to set TDM mode for unireader and uniplayer IPs.
+ Only compartible with IPs in charge of the external I2S/TDM bus.
+ Should be declared depending on associated codec.
+
Example:
- sti_uni_player1: sti-uni-player@1 {
- compatible = "st,sti-uni-player";
+ sti_uni_player1: sti-uni-player@0x8D81000 {
+ compatible = "st,stih407-uni-player-hdmi";
status = "okay";
#sound-dai-cells = <0>;
st,syscfg = <&syscfg_core>;
@@ -66,15 +60,12 @@ Example:
reg = <0x8D81000 0x158>;
interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
dmas = <&fdma0 3 0 1>;
- st,dai-name = "Uni Player #1 (I2S)";
dma-names = "tx";
- st,uniperiph-id = <1>;
- st,version = <5>;
- st,mode = "TDM";
+ st,tdm-mode = <1>;
};
- sti_uni_player2: sti-uni-player@2 {
- compatible = "st,sti-uni-player";
+ sti_uni_player2: sti-uni-player@0x8D82000 {
+ compatible = "st,stih407-uni-player-pcm-out";
status = "okay";
#sound-dai-cells = <0>;
st,syscfg = <&syscfg_core>;
@@ -82,15 +73,11 @@ Example:
reg = <0x8D82000 0x158>;
interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
dmas = <&fdma0 4 0 1>;
- dai-name = "Uni Player #2 (DAC)";
dma-names = "tx";
- st,uniperiph-id = <2>;
- st,version = <5>;
- st,mode = "PCM";
};
- sti_uni_player3: sti-uni-player@3 {
- compatible = "st,sti-uni-player";
+ sti_uni_player3: sti-uni-player@0x8D85000 {
+ compatible = "st,stih407-uni-player-spdif";
status = "okay";
#sound-dai-cells = <0>;
st,syscfg = <&syscfg_core>;
@@ -99,14 +86,10 @@ Example:
interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
dmas = <&fdma0 7 0 1>;
dma-names = "tx";
- dai-name = "Uni Player #3 (SPDIF)";
- st,uniperiph-id = <3>;
- st,version = <5>;
- st,mode = "SPDIF";
};
- sti_uni_reader1: sti-uni-reader@1 {
- compatible = "st,sti-uni-reader";
+ sti_uni_reader1: sti-uni-reader@0x8D84000 {
+ compatible = "st,stih407-uni-reader-hdmi";
status = "disabled";
#sound-dai-cells = <0>;
st,syscfg = <&syscfg_core>;
@@ -114,9 +97,6 @@ Example:
interrupts = <GIC_SPI 88 IRQ_TYPE_NONE>;
dmas = <&fdma0 6 0 1>;
dma-names = "rx";
- dai-name = "Uni Reader #1 (HDMI RX)";
- st,version = <3>;
- st,mode = "PCM";
};
2) sti-sas-codec: internal audio codec IPs driver
diff --git a/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt b/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt
index 13503aa505a9..0230c4d20506 100644
--- a/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt
+++ b/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt
@@ -9,6 +9,7 @@ Required properties:
- compatible : should be one of the following:
- "allwinner,sun4i-a10-spdif": for the Allwinner A10 SoC
+ - "allwinner,sun6i-a31-spdif": for the Allwinner A31 SoC
- reg : Offset and length of the register set for the device.
@@ -25,6 +26,8 @@ Required properties:
"apb" clock for the spdif bus.
"spdif" clock for spdif controller.
+ - resets : reset specifier for the ahb reset (A31 and newer only)
+
Example:
spdif: spdif@01c21000 {
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
index eff12be5e789..9340d2ddcc54 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
@@ -11,6 +11,7 @@ Required properties:
"ti,tlv320aic3110" - TLV320AIC3110 (stereo speaker amp, no MiniDSP)
"ti,tlv320aic3120" - TLV320AIC3120 (mono speaker amp, MiniDSP)
"ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
+ "ti,tlv320dac3100" - TLV320DAC3100 (no ADC, mono speaker amp, no MiniDSP)
- reg - <int> - I2C slave address
- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
@@ -37,9 +38,11 @@ CODEC output pins:
* MICBIAS
CODEC input pins:
- * MIC1LP
- * MIC1RP
- * MIC1LM
+ * MIC1LP, devices with ADC
+ * MIC1RP, devices with ADC
+ * MIC1LM, devices with ADC
+ * AIN1, devices without ADC
+ * AIN2, devices without ADC
The pins can be used in referring sound node's audio-routing property.
diff --git a/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
new file mode 100644
index 000000000000..ad7ac80a3841
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
@@ -0,0 +1,233 @@
+Broadcom SPI controller
+
+The Broadcom SPI controller is a SPI master found on various SOCs, including
+BRCMSTB (BCM7XXX), Cygnus, NSP and NS2. The Broadcom Master SPI hw IP consits
+of :
+ MSPI : SPI master controller can read and write to a SPI slave device
+ BSPI : Broadcom SPI in combination with the MSPI hw IP provides acceleration
+ for flash reads and be configured to do single, double, quad lane
+ io with 3-byte and 4-byte addressing support.
+
+ Supported Broadcom SoCs have one instance of MSPI+BSPI controller IP.
+ MSPI master can be used wihout BSPI. BRCMSTB SoCs have an additional instance
+ of a MSPI master without the BSPI to use with non flash slave devices that
+ use SPI protocol.
+
+Required properties:
+
+- #address-cells:
+ Must be <1>, as required by generic SPI binding.
+
+- #size-cells:
+ Must be <0>, also as required by generic SPI binding.
+
+- compatible:
+ Must be one of :
+ "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-qspi" : MSPI+BSPI on BRCMSTB SoCs
+ "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
+ BRCMSTB SoCs
+ "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi" : MSPI+BSPI on Cygnus, NSP
+ "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi" : NS2 SoCs
+
+- reg:
+ Define the bases and ranges of the associated I/O address spaces.
+ The required range is MSPI controller registers.
+
+- reg-names:
+ First name does not matter, but must be reserved for the MSPI controller
+ register range as mentioned in 'reg' above, and will typically contain
+ - "bspi_regs": BSPI register range, not required with compatible
+ "spi-brcmstb-mspi"
+ - "mspi_regs": MSPI register range is required for compatible strings
+ - "intr_regs", "intr_status_reg" : Interrupt and status register for
+ NSP, NS2, Cygnus SoC
+
+- interrupts
+ The interrupts used by the MSPI and/or BSPI controller.
+
+- interrupt-names:
+ Names of interrupts associated with MSPI
+ - "mspi_halted" :
+ - "mspi_done": Indicates that the requested SPI operation is complete.
+ - "spi_lr_fullness_reached" : Linear read BSPI pipe full
+ - "spi_lr_session_aborted" : Linear read BSPI pipe aborted
+ - "spi_lr_impatient" : Linear read BSPI requested when pipe empty
+ - "spi_lr_session_done" : Linear read BSPI session done
+
+- clocks:
+ A phandle to the reference clock for this block.
+
+Optional properties:
+
+
+- native-endian
+ Defined when using BE SoC and device uses BE register read/write
+
+Recommended optional m25p80 properties:
+- spi-rx-bus-width: Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Examples:
+
+BRCMSTB SoC Example:
+
+ SPI Master (MSPI+BSPI) for SPI-NOR access:
+
+ spi@f03e3400 {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-qspi";
+ reg = <0xf03e0920 0x4 0xf03e3400 0x188 0xf03e3200 0x50>;
+ reg-names = "cs_reg", "mspi", "bspi";
+ interrupts = <0x6 0x5 0x4 0x3 0x2 0x1 0x0>;
+ interrupt-parent = <0x1c>;
+ interrupt-names = "mspi_halted",
+ "mspi_done",
+ "spi_lr_overread",
+ "spi_lr_session_done",
+ "spi_lr_impatient",
+ "spi_lr_session_aborted",
+ "spi_lr_fullness_reached";
+
+ clocks = <&hif_spi>;
+ clock-names = "sw_spi";
+
+ m25p80@0 {
+ #size-cells = <0x2>;
+ #address-cells = <0x2>;
+ compatible = "m25p80";
+ reg = <0x0>;
+ spi-max-frequency = <0x2625a00>;
+ spi-cpol;
+ spi-cpha;
+ m25p,fast-read;
+
+ flash0.bolt@0 {
+ reg = <0x0 0x0 0x0 0x100000>;
+ };
+
+ flash0.macadr@100000 {
+ reg = <0x0 0x100000 0x0 0x10000>;
+ };
+
+ flash0.nvram@110000 {
+ reg = <0x0 0x110000 0x0 0x10000>;
+ };
+
+ flash0.kernel@120000 {
+ reg = <0x0 0x120000 0x0 0x400000>;
+ };
+
+ flash0.devtree@520000 {
+ reg = <0x0 0x520000 0x0 0x10000>;
+ };
+
+ flash0.splash@530000 {
+ reg = <0x0 0x530000 0x0 0x80000>;
+ };
+
+ flash0@0 {
+ reg = <0x0 0x0 0x0 0x4000000>;
+ };
+ };
+ };
+
+
+ MSPI master for any SPI device :
+
+ spi@f0416000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&upg_fixed>;
+ compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-mspi";
+ reg = <0xf0416000 0x180>;
+ reg-names = "mspi";
+ interrupts = <0x14>;
+ interrupt-parent = <&irq0_aon_intc>;
+ interrupt-names = "mspi_done";
+ };
+
+iProc SoC Example:
+
+ qspi: spi@18027200 {
+ compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ reg = <0x18027200 0x184>,
+ <0x18027000 0x124>,
+ <0x1811c408 0x004>,
+ <0x180273a0 0x01c>;
+ reg-names = "mspi_regs", "bspi_regs", "intr_regs", "intr_status_reg";
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names =
+ "spi_lr_fullness_reached",
+ "spi_lr_session_aborted",
+ "spi_lr_impatient",
+ "spi_lr_session_done",
+ "mspi_done",
+ "mspi_halted";
+ clocks = <&iprocmed>;
+ clock-names = "iprocmed";
+ num-cs = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+
+ NS2 SoC Example:
+
+ qspi: spi@66470200 {
+ compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
+ reg = <0x66470200 0x184>,
+ <0x66470000 0x124>,
+ <0x67017408 0x004>,
+ <0x664703a0 0x01c>;
+ reg-names = "mspi", "bspi", "intr_regs",
+ "intr_status_reg";
+ interrupts = <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "spi_l1_intr";
+ clocks = <&iprocmed>;
+ clock-names = "iprocmed";
+ num-cs = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+
+ m25p80 node for NSP, NS2
+
+ &qspi {
+ flash: m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "m25p80";
+ reg = <0x0>;
+ spi-max-frequency = <12500000>;
+ m25p,fast-read;
+ spi-cpol;
+ spi-cpha;
+
+ partition@0 {
+ label = "boot";
+ reg = <0x00000000 0x000a0000>;
+ };
+
+ partition@a0000 {
+ label = "env";
+ reg = <0x000a0000 0x00060000>;
+ };
+
+ partition@100000 {
+ label = "system";
+ reg = <0x00100000 0x00600000>;
+ };
+
+ partition@700000 {
+ label = "rootfs";
+ reg = <0x00700000 0x01900000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/jcore,spi.txt b/Documentation/devicetree/bindings/spi/jcore,spi.txt
new file mode 100644
index 000000000000..93936d16e139
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/jcore,spi.txt
@@ -0,0 +1,34 @@
+J-Core SPI master
+
+Required properties:
+
+- compatible: Must be "jcore,spi2".
+
+- reg: Memory region for registers.
+
+- #address-cells: Must be 1.
+
+- #size-cells: Must be 0.
+
+Optional properties:
+
+- clocks: If a phandle named "ref_clk" is present, SPI clock speed
+ programming is relative to the frequency of the indicated clock.
+ Necessary only if the input clock rate is something other than a
+ fixed 50 MHz.
+
+- clock-names: Clock names, one for each phandle in clocks.
+
+See spi-bus.txt for additional properties not specific to this device.
+
+Example:
+
+spi@40 {
+ compatible = "jcore,spi2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x40 0x8>;
+ spi-max-frequency = <25000000>;
+ clocks = <&bus_clk>;
+ clock-names = "ref_clk";
+}
diff --git a/Documentation/devicetree/bindings/spi/spi-bus.txt b/Documentation/devicetree/bindings/spi/spi-bus.txt
index 17822860cb98..4b1d6e74c744 100644
--- a/Documentation/devicetree/bindings/spi/spi-bus.txt
+++ b/Documentation/devicetree/bindings/spi/spi-bus.txt
@@ -31,7 +31,7 @@ with max(cs-gpios > hw cs).
So if for example the controller has 2 CS lines, and the cs-gpios
property looks like this:
-cs-gpios = <&gpio1 0 0> <0> <&gpio1 1 0> <&gpio1 2 0>;
+cs-gpios = <&gpio1 0 0>, <0>, <&gpio1 1 0>, <&gpio1 2 0>;
Then it should be configured so that num_chipselect = 4 with the
following mapping:
diff --git a/Documentation/devicetree/bindings/spi/spi-meson.txt b/Documentation/devicetree/bindings/spi/spi-meson.txt
index bb52a86f3365..dc6d0313324a 100644
--- a/Documentation/devicetree/bindings/spi/spi-meson.txt
+++ b/Documentation/devicetree/bindings/spi/spi-meson.txt
@@ -7,7 +7,7 @@ NOR memories, without DMA support and a 64-byte unified transmit /
receive buffer.
Required properties:
- - compatible: should be "amlogic,meson6-spifc"
+ - compatible: should be "amlogic,meson6-spifc" or "amlogic,meson-gxbb-spifc"
- reg: physical base address and length of the controller registers
- clocks: phandle of the input clock for the baud rate generator
- #address-cells: should be 1
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
index 341dc67f3472..0e03344e2e8b 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -81,6 +81,8 @@ i.mx specific properties
- fsl,usbmisc: phandler of non-core register device, with one
argument that indicate usb controller index
- disable-over-current: disable over current detect
+- over-current-active-high: over current signal polarity is high active,
+ typically over current signal polarity is low active.
- external-vbus-divider: enables off-chip resistor divider for Vbus
Example:
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 20a68bf2b4e7..455f2c310a1b 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -10,6 +10,8 @@ Required properties:
- "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
- "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
- "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
+ - "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
+ - "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
- snps,dwc2: A generic DWC2 USB controller with default parameters.
- reg : Should contain 1 register range (address and length)
- interrupts : Should contain 1 interrupt
@@ -26,7 +28,10 @@ Refer to phy/phy-bindings.txt for generic phy consumer properties
- g-use-dma: enable dma usage in gadget driver.
- g-rx-fifo-size: size of rx fifo size in gadget mode.
- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
-- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
+
+Deprecated properties:
+- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0)
+ in gadget mode.
Example:
diff --git a/Documentation/devicetree/bindings/usb/dwc3-cavium.txt b/Documentation/devicetree/bindings/usb/dwc3-cavium.txt
new file mode 100644
index 000000000000..710b782ccf65
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/dwc3-cavium.txt
@@ -0,0 +1,28 @@
+Cavium SuperSpeed DWC3 USB SoC controller
+
+Required properties:
+- compatible: Should contain "cavium,octeon-7130-usb-uctl"
+
+Required child node:
+A child node must exist to represent the core DWC3 IP block. The name of
+the node is not important. The content of the node is defined in dwc3.txt.
+
+Example device node:
+
+ uctl@1180069000000 {
+ compatible = "cavium,octeon-7130-usb-uctl";
+ reg = <0x00011800 0x69000000 0x00000000 0x00000100>;
+ ranges;
+ #address-cells = <0x00000002>;
+ #size-cells = <0x00000002>;
+ refclk-frequency = <0x05f5e100>;
+ refclk-type-ss = "dlmc_ref_clk0";
+ refclk-type-hs = "dlmc_ref_clk0";
+ power = <0x00000002 0x00000002 0x00000001>;
+ xhci@1690000000000 {
+ compatible = "cavium,octeon-7130-xhci", "synopsys,dwc3";
+ reg = <0x00016900 0x00000000 0x00000010 0x00000000>;
+ interrupt-parent = <0x00000010>;
+ interrupts = <0x00000009 0x00000004>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 7d7ce089b003..e3e6983288e3 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -13,7 +13,8 @@ Optional properties:
in the array is expected to be a handle to the USB2/HS PHY and
the second element is expected to be a handle to the USB3/SS PHY
- phys: from the *Generic PHY* bindings
- - phy-names: from the *Generic PHY* bindings
+ - phy-names: from the *Generic PHY* bindings; supported names are "usb2-phy"
+ or "usb3-phy".
- snps,usb3_lpm_capable: determines if platform is USB3 LPM capable
- snps,disable_scramble_quirk: true when SW should disable data scrambling.
Only really useful for FPGA builds.
@@ -39,6 +40,11 @@ Optional properties:
disabling the suspend signal to the PHY.
- snps,dis_rxdet_inp3_quirk: when set core will disable receiver detection
in PHY P3 power state.
+ - snps,dis-u2-freeclk-exists-quirk: when set, clear the u2_freeclk_exists
+ in GUSB2PHYCFG, specify that USB2 PHY doesn't provide
+ a free-running PHY clock.
+ - snps,dis-del-phy-power-chg-quirk: when set core will change PHY power
+ from P0 to P1/P2/P3 without delay.
- snps,is-utmi-l1-suspend: true when DWC3 asserts output signal
utmi_l1_suspend_n, false when asserts utmi_sleep_n
- snps,hird-threshold: HIRD threshold
diff --git a/Documentation/devicetree/bindings/usb/generic.txt b/Documentation/devicetree/bindings/usb/generic.txt
index bba825711873..bfadeb1c3bab 100644
--- a/Documentation/devicetree/bindings/usb/generic.txt
+++ b/Documentation/devicetree/bindings/usb/generic.txt
@@ -11,6 +11,11 @@ Optional properties:
"peripheral" and "otg". In case this attribute isn't
passed via DT, USB DRD controllers should default to
OTG.
+ - phy_type: tells USB controllers that we want to configure the core to support
+ a UTMI+ PHY with an 8- or 16-bit interface if UTMI+ is
+ selected. Valid arguments are "utmi" and "utmi_wide".
+ In case this isn't passed via DT, USB controllers should
+ default to HW capability.
- otg-rev: tells usb driver the release number of the OTG and EH supplement
with which the device and its descriptors are compliant,
in binary-coded decimal (i.e. 2.0 is 0200H). This
@@ -34,6 +39,7 @@ dwc3@4a030000 {
usb-phy = <&usb2_phy>, <&usb3,phy>;
maximum-speed = "super-speed";
dr_mode = "otg";
+ phy_type = "utmi_wide";
otg-rev = <0x0200>;
adp-disable;
};
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index b6040563e51a..9e18e000339e 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -9,6 +9,7 @@ Required properties:
- "renesas,usbhs-r8a7793" for r8a7793 (R-Car M2-N) compatible device
- "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device
- "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
+ - "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
- "renesas,rcar-gen2-usbhs" for R-Car Gen2 compatible device
- "renesas,rcar-gen3-usbhs" for R-Car Gen3 compatible device
diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
new file mode 100644
index 000000000000..0536a938e3ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
@@ -0,0 +1,59 @@
+Rockchip SuperSpeed DWC3 USB SoC controller
+
+Required properties:
+- compatible: should contain "rockchip,rk3399-dwc3" for rk3399 SoC
+- clocks: A list of phandle + clock-specifier pairs for the
+ clocks listed in clock-names
+- clock-names: Should contain the following:
+ "ref_clk" Controller reference clk, have to be 24 MHz
+ "suspend_clk" Controller suspend clk, have to be 24 MHz or 32 KHz
+ "bus_clk" Master/Core clock, have to be >= 62.5 MHz for SS
+ operation and >= 30MHz for HS operation
+ "grf_clk" Controller grf clk
+
+Required child node:
+A child node must exist to represent the core DWC3 IP block. The name of
+the node is not important. The content of the node is defined in dwc3.txt.
+
+Phy documentation is provided in the following places:
+Documentation/devicetree/bindings/phy/rockchip,dwc3-usb-phy.txt
+
+Example device nodes:
+
+ usbdrd3_0: usb@fe800000 {
+ compatible = "rockchip,rk3399-dwc3";
+ clocks = <&cru SCLK_USB3OTG0_REF>, <&cru SCLK_USB3OTG0_SUSPEND>,
+ <&cru ACLK_USB3OTG0>, <&cru ACLK_USB3_GRF>;
+ clock-names = "ref_clk", "suspend_clk",
+ "bus_clk", "grf_clk";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+ usbdrd_dwc3_0: dwc3@fe800000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0xfe800000 0x0 0x100000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ dr_mode = "otg";
+ status = "disabled";
+ };
+ };
+
+ usbdrd3_1: usb@fe900000 {
+ compatible = "rockchip,rk3399-dwc3";
+ clocks = <&cru SCLK_USB3OTG1_REF>, <&cru SCLK_USB3OTG1_SUSPEND>,
+ <&cru ACLK_USB3OTG1>, <&cru ACLK_USB3_GRF>;
+ clock-names = "ref_clk", "suspend_clk",
+ "bus_clk", "grf_clk";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+ usbdrd_dwc3_1: dwc3@fe900000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0xfe900000 0x0 0x100000>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ dr_mode = "otg";
+ status = "disabled";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/usb4604.txt b/Documentation/devicetree/bindings/usb/usb4604.txt
new file mode 100644
index 000000000000..82506d17712c
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/usb4604.txt
@@ -0,0 +1,19 @@
+SMSC USB4604 High-Speed Hub Controller
+
+Required properties:
+- compatible: Should be "smsc,usb4604"
+
+Optional properties:
+- reg: Specifies the i2c slave address, it is required and should be 0x2d
+ if I2C is used.
+- reset-gpios: Should specify GPIO for reset.
+- initial-mode: Should specify initial mode.
+ (1 for HUB mode, 2 for STANDBY mode)
+
+Examples:
+ usb-hub@2d {
+ compatible = "smsc,usb4604";
+ reg = <0x2d>;
+ reset-gpios = <&gpx3 5 1>;
+ initial-mode = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/usbmisc-imx.txt b/Documentation/devicetree/bindings/usb/usbmisc-imx.txt
index 3539d4e7d23e..f1e27faf528e 100644
--- a/Documentation/devicetree/bindings/usb/usbmisc-imx.txt
+++ b/Documentation/devicetree/bindings/usb/usbmisc-imx.txt
@@ -6,6 +6,7 @@ Required properties:
"fsl,imx6q-usbmisc" for imx6q
"fsl,vf610-usbmisc" for Vybrid vf610
"fsl,imx6sx-usbmisc" for imx6sx
+ "fsl,imx7d-usbmisc" for imx7d
- reg: Should contain registers location and length
Examples:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 1992aa97d45a..24c6f658bce1 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -3,8 +3,8 @@ Device tree binding vendor prefix registry. Keep list in alphabetical order.
This isn't an exhaustive list, but you should add new prefixes to it before
using them to avoid name-space collisions.
-abilis Abilis Systems
abcn Abracon Corporation
+abilis Abilis Systems
active-semi Active-Semi International Inc
ad Avionic Design GmbH
adapteva Adapteva, Inc.
@@ -36,6 +36,7 @@ aspeed ASPEED Technology Inc.
atlas Atlas Scientific LLC
atmel Atmel Corporation
auo AU Optronics Corporation
+auvidea Auvidea GmbH
avago Avago Technologies
avic Shanghai AVIC Optoelectronics Co., Ltd.
axis Axis Communications AB
@@ -75,6 +76,7 @@ digilent Diglent, Inc.
dlg Dialog Semiconductor
dlink D-Link Corporation
dmo Data Modul AG
+domintech Domintech Co., Ltd.
dptechnics DPTechnics
dragino Dragino Technology Co., Limited
ea Embedded Artists AB
@@ -85,6 +87,7 @@ elan Elan Microelectronic Corp.
embest Shenzhen Embest Technology Co., Ltd.
emmicro EM Microelectronic
energymicro Silicon Laboratories (formerly Energy Micro AS)
+engicam Engicam S.r.l.
epcos EPCOS AG
epfl Ecole Polytechnique Fédérale de Lausanne
epson Seiko Epson Corp.
@@ -98,11 +101,12 @@ ezchip EZchip Semiconductor
fcs Fairchild Semiconductor
firefly Firefly
focaltech FocalTech Systems Co.,Ltd
+friendlyarm Guangzhou FriendlyARM Computer Tech Co., Ltd
fsl Freescale Semiconductor
ge General Electric Company
geekbuying GeekBuying
-GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
+GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
geniatech Geniatech, Inc.
giantplus Giantplus Technology Co., Ltd.
globalscale Globalscale Technologies, Inc.
@@ -126,7 +130,6 @@ i2se I2SE GmbH
ibm International Business Machines (IBM)
idt Integrated Device Technologies, Inc.
ifi Ingenieurburo Fur Ic-Technologie (I/F/I)
-iom Iomega Corporation
img Imagination Technologies Ltd.
infineon Infineon Technologies
inforce Inforce Computing
@@ -135,11 +138,15 @@ innolux Innolux Corporation
intel Intel Corporation
intercontrol Inter Control Group
invensense InvenSense Inc.
+inversepath Inverse Path
+iom Iomega Corporation
isee ISEE 2007 S.L.
isil Intersil
issi Integrated Silicon Solutions Inc.
+jdi Japan Display Inc.
jedec JEDEC Solid State Technology Association
karo Ka-Ro electronics GmbH
+keithkoep Keith & Koep GmbH
keymile Keymile GmbH
kinetic Kinetic Technologies
kosagi Sutajio Ko-Usagi PTE Ltd.
@@ -149,8 +156,8 @@ lantiq Lantiq Semiconductor
lenovo Lenovo Group Ltd.
lg LG Corporation
linux Linux-specific binding
-lsi LSI Corp. (LSI Logic)
lltc Linear Technology Corporation
+lsi LSI Corp. (LSI Logic)
marvell Marvell Technology Group Ltd.
maxim Maxim Integrated Products
meas Measurement Specialties
@@ -190,20 +197,20 @@ onnn ON Semiconductor Corp.
ontat On Tat Industrial Company
opencores OpenCores.org
option Option NV
+ORCL Oracle Corporation
ortustech Ortus Technology Co., Ltd.
ovti OmniVision Technologies
-ORCL Oracle Corporation
oxsemi Oxford Semiconductor, Ltd.
panasonic Panasonic Corporation
parade Parade Technologies Inc.
pericom Pericom Technology Inc.
phytec PHYTEC Messtechnik GmbH
picochip Picochip Ltd
+pixcir PIXCIR MICROELECTRONICS Co., Ltd
plathome Plat'Home Co., Ltd.
plda PLDA
-pixcir PIXCIR MICROELECTRONICS Co., Ltd
-pulsedlight PulsedLight, Inc
powervr PowerVR (deprecated, use img)
+pulsedlight PulsedLight, Inc
qca Qualcomm Atheros, Inc.
qcom Qualcomm Technologies, Inc
qemu QEMU, a generic and open source machine emulator and virtualizer
@@ -231,12 +238,13 @@ sgx SGX Sensortech
sharp Sharp Corporation
si-en Si-En Technology Ltd.
sigma Sigma Designs, Inc.
+sii Seiko Instruments, Inc.
sil Silicon Image
silabs Silicon Laboratories
+silead Silead Inc.
+silergy Silergy Corp.
siliconmitus Silicon Mitus, Inc.
simtek
-sii Seiko Instruments, Inc.
-silergy Silergy Corp.
sirf SiRF Technology, Inc.
sis Silicon Integrated Systems Corp.
sitronix Sitronix Technology Corporation
@@ -254,9 +262,12 @@ starry Starry Electronic Technology (ShenZhen) Co., LTD
startek Startek
ste ST-Ericsson
stericsson ST-Ericsson
+summit Summit microelectronics
+sunchip Shenzhen Sunchip Technology Co., Ltd
+SUNW Sun Microsystems, Inc
+swir Sierra Wireless
syna Synaptics Inc.
synology Synology, Inc.
-SUNW Sun Microsystems, Inc
tbs TBS Technologies
tcg Trusted Computing Group
tcl Toby Churchill Ltd.
@@ -265,17 +276,19 @@ technologic Technologic Systems
thine THine Electronics, Inc.
ti Texas Instruments
tlm Trusted Logic Mobility
+topeet Topeet
toradex Toradex AG
toshiba Toshiba Corporation
toumaz Toumaz
-tplink TP-LINK Technologies Co., Ltd.
tpk TPK U.S.A. LLC
+tplink TP-LINK Technologies Co., Ltd.
+tpo TPO
tronfy Tronfy
tronsmart Tronsmart
truly Truly Semiconductors Limited
tyan Tyan Computer Corporation
-upisemi uPI Semiconductor Corp.
uniwest United Western Technologies Corp (UniWest)
+upisemi uPI Semiconductor Corp.
urt United Radiant Technology Corporation
usi Universal Scientific Industrial Co., Ltd.
v3 V3 Semiconductor
@@ -293,7 +306,7 @@ x-powers X-Powers
xes Extreme Engineering Solutions (X-ES)
xillybus Xillybus Ltd.
xlnx Xilinx
-zyxel ZyXEL Communications Corp.
zarlink Zarlink Semiconductor
zii Zodiac Inflight Innovations
zte ZTE Corp.
+zyxel ZyXEL Communications Corp.
diff --git a/Documentation/devicetree/changesets.txt b/Documentation/devicetree/changesets.txt
index 935ba5acc34e..cb488eeb6353 100644
--- a/Documentation/devicetree/changesets.txt
+++ b/Documentation/devicetree/changesets.txt
@@ -21,20 +21,11 @@ a set of changes. No changes to the active tree are made at this point.
All the change operations are recorded in the of_changeset 'entries'
list.
-3. mutex_lock(of_mutex) - starts a changeset; The global of_mutex
-ensures there can only be one editor at a time.
-
-4. of_changeset_apply() - Apply the changes to the tree. Either the
+3. of_changeset_apply() - Apply the changes to the tree. Either the
entire changeset will get applied, or if there is an error the tree will
-be restored to the previous state
-
-5. mutex_unlock(of_mutex) - All operations complete, release the mutex
+be restored to the previous state. The core ensures proper serialization
+through locking. An unlocked version __of_changeset_apply is available,
+if needed.
If a successfully applied changeset needs to be removed, it can be done
-with the following sequence.
-
-1. mutex_lock(of_mutex)
-
-2. of_changeset_revert()
-
-3. mutex_unlock(of_mutex)
+with of_changeset_revert().
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index 91ce82d5f0c4..c4fd47540b31 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -282,6 +282,17 @@ supported.
that is supposed to push the current
transaction descriptor to a pending queue, waiting
for issue_pending to be called.
+ - In this structure the function pointer callback_result can be
+ initialized in order for the submitter to be notified that a
+ transaction has completed. In the earlier code the function pointer
+ callback has been used. However it does not provide any status to the
+ transaction and will be deprecated. The result structure defined as
+ dmaengine_result that is passed in to callback_result has two fields:
+ + result: This provides the transfer result defined by
+ dmaengine_tx_result. Either success or some error
+ condition.
+ + residue: Provides the residue bytes of the transfer for those that
+ support residue.
* device_issue_pending
- Takes the first transaction descriptor in the pending queue,
diff --git a/Documentation/docutils.conf b/Documentation/docutils.conf
new file mode 100644
index 000000000000..2830772264c8
--- /dev/null
+++ b/Documentation/docutils.conf
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 mode: conf-colon -*-
+#
+# docutils configuration file
+# http://docutils.sourceforge.net/docs/user/config.html
+
+[general]
+halt_level: severe \ No newline at end of file
diff --git a/Documentation/driver-api/basics.rst b/Documentation/driver-api/basics.rst
new file mode 100644
index 000000000000..935b9b8d456c
--- /dev/null
+++ b/Documentation/driver-api/basics.rst
@@ -0,0 +1,120 @@
+Driver Basics
+=============
+
+Driver Entry and Exit points
+----------------------------
+
+.. kernel-doc:: include/linux/init.h
+ :internal:
+
+Atomic and pointer manipulation
+-------------------------------
+
+.. kernel-doc:: arch/x86/include/asm/atomic.h
+ :internal:
+
+Delaying, scheduling, and timer routines
+----------------------------------------
+
+.. kernel-doc:: include/linux/sched.h
+ :internal:
+
+.. kernel-doc:: kernel/sched/core.c
+ :export:
+
+.. kernel-doc:: kernel/sched/cpupri.c
+ :internal:
+
+.. kernel-doc:: kernel/sched/fair.c
+ :internal:
+
+.. kernel-doc:: include/linux/completion.h
+ :internal:
+
+.. kernel-doc:: kernel/time/timer.c
+ :export:
+
+Wait queues and Wake events
+---------------------------
+
+.. kernel-doc:: include/linux/wait.h
+ :internal:
+
+.. kernel-doc:: kernel/sched/wait.c
+ :export:
+
+High-resolution timers
+----------------------
+
+.. kernel-doc:: include/linux/ktime.h
+ :internal:
+
+.. kernel-doc:: include/linux/hrtimer.h
+ :internal:
+
+.. kernel-doc:: kernel/time/hrtimer.c
+ :export:
+
+Workqueues and Kevents
+----------------------
+
+.. kernel-doc:: include/linux/workqueue.h
+ :internal:
+
+.. kernel-doc:: kernel/workqueue.c
+ :export:
+
+Internal Functions
+------------------
+
+.. kernel-doc:: kernel/exit.c
+ :internal:
+
+.. kernel-doc:: kernel/signal.c
+ :internal:
+
+.. kernel-doc:: include/linux/kthread.h
+ :internal:
+
+.. kernel-doc:: kernel/kthread.c
+ :export:
+
+Kernel objects manipulation
+---------------------------
+
+.. kernel-doc:: lib/kobject.c
+ :export:
+
+Kernel utility functions
+------------------------
+
+.. kernel-doc:: include/linux/kernel.h
+ :internal:
+
+.. kernel-doc:: kernel/printk/printk.c
+ :export:
+
+.. kernel-doc:: kernel/panic.c
+ :export:
+
+.. kernel-doc:: kernel/sys.c
+ :export:
+
+.. kernel-doc:: kernel/rcu/srcu.c
+ :export:
+
+.. kernel-doc:: kernel/rcu/tree.c
+ :export:
+
+.. kernel-doc:: kernel/rcu/tree_plugin.h
+ :export:
+
+.. kernel-doc:: kernel/rcu/update.c
+ :export:
+
+Device Resource Management
+--------------------------
+
+.. kernel-doc:: drivers/base/devres.c
+ :export:
+
diff --git a/Documentation/driver-api/frame-buffer.rst b/Documentation/driver-api/frame-buffer.rst
new file mode 100644
index 000000000000..9dd3060f027d
--- /dev/null
+++ b/Documentation/driver-api/frame-buffer.rst
@@ -0,0 +1,62 @@
+Frame Buffer Library
+====================
+
+The frame buffer drivers depend heavily on four data structures. These
+structures are declared in include/linux/fb.h. They are fb_info,
+fb_var_screeninfo, fb_fix_screeninfo and fb_monospecs. The last
+three can be made available to and from userland.
+
+fb_info defines the current state of a particular video card. Inside
+fb_info, there exists a fb_ops structure which is a collection of
+needed functions to make fbdev and fbcon work. fb_info is only visible
+to the kernel.
+
+fb_var_screeninfo is used to describe the features of a video card
+that are user defined. With fb_var_screeninfo, things such as depth
+and the resolution may be defined.
+
+The next structure is fb_fix_screeninfo. This defines the properties
+of a card that are created when a mode is set and can't be changed
+otherwise. A good example of this is the start of the frame buffer
+memory. This "locks" the address of the frame buffer memory, so that it
+cannot be changed or moved.
+
+The last structure is fb_monospecs. In the old API, there was little
+importance for fb_monospecs. This allowed for forbidden things such as
+setting a mode of 800x600 on a fix frequency monitor. With the new API,
+fb_monospecs prevents such things, and if used correctly, can prevent a
+monitor from being cooked. fb_monospecs will not be useful until
+kernels 2.5.x.
+
+Frame Buffer Memory
+-------------------
+
+.. kernel-doc:: drivers/video/fbdev/core/fbmem.c
+ :export:
+
+Frame Buffer Colormap
+---------------------
+
+.. kernel-doc:: drivers/video/fbdev/core/fbcmap.c
+ :export:
+
+Frame Buffer Video Mode Database
+--------------------------------
+
+.. kernel-doc:: drivers/video/fbdev/core/modedb.c
+ :internal:
+
+.. kernel-doc:: drivers/video/fbdev/core/modedb.c
+ :export:
+
+Frame Buffer Macintosh Video Mode Database
+------------------------------------------
+
+.. kernel-doc:: drivers/video/fbdev/macmodes.c
+ :export:
+
+Frame Buffer Fonts
+------------------
+
+Refer to the file lib/fonts/fonts.c for more information.
+
diff --git a/Documentation/driver-api/hsi.rst b/Documentation/driver-api/hsi.rst
new file mode 100644
index 000000000000..f9cec02b72a1
--- /dev/null
+++ b/Documentation/driver-api/hsi.rst
@@ -0,0 +1,88 @@
+High Speed Synchronous Serial Interface (HSI)
+=============================================
+
+Introduction
+---------------
+
+High Speed Syncronous Interface (HSI) is a fullduplex, low latency protocol,
+that is optimized for die-level interconnect between an Application Processor
+and a Baseband chipset. It has been specified by the MIPI alliance in 2003 and
+implemented by multiple vendors since then.
+
+The HSI interface supports full duplex communication over multiple channels
+(typically 8) and is capable of reaching speeds up to 200 Mbit/s.
+
+The serial protocol uses two signals, DATA and FLAG as combined data and clock
+signals and an additional READY signal for flow control. An additional WAKE
+signal can be used to wakeup the chips from standby modes. The signals are
+commonly prefixed by AC for signals going from the application die to the
+cellular die and CA for signals going the other way around.
+
+::
+
+ +------------+ +---------------+
+ | Cellular | | Application |
+ | Die | | Die |
+ | | - - - - - - CAWAKE - - - - - - >| |
+ | T|------------ CADATA ------------>|R |
+ | X|------------ CAFLAG ------------>|X |
+ | |<----------- ACREADY ------------| |
+ | | | |
+ | | | |
+ | |< - - - - - ACWAKE - - - - - - -| |
+ | R|<----------- ACDATA -------------|T |
+ | X|<----------- ACFLAG -------------|X |
+ | |------------ CAREADY ----------->| |
+ | | | |
+ | | | |
+ +------------+ +---------------+
+
+HSI Subsystem in Linux
+-------------------------
+
+In the Linux kernel the hsi subsystem is supposed to be used for HSI devices.
+The hsi subsystem contains drivers for hsi controllers including support for
+multi-port controllers and provides a generic API for using the HSI ports.
+
+It also contains HSI client drivers, which make use of the generic API to
+implement a protocol used on the HSI interface. These client drivers can
+use an arbitrary number of channels.
+
+hsi-char Device
+------------------
+
+Each port automatically registers a generic client driver called hsi_char,
+which provides a charecter device for userspace representing the HSI port.
+It can be used to communicate via HSI from userspace. Userspace may
+configure the hsi_char device using the following ioctl commands:
+
+HSC_RESET
+ flush the HSI port
+
+HSC_SET_PM
+ enable or disable the client.
+
+HSC_SEND_BREAK
+ send break
+
+HSC_SET_RX
+ set RX configuration
+
+HSC_GET_RX
+ get RX configuration
+
+HSC_SET_TX
+ set TX configuration
+
+HSC_GET_TX
+ get TX configuration
+
+The kernel HSI API
+------------------
+
+.. kernel-doc:: include/linux/hsi/hsi.h
+ :internal:
+
+.. kernel-doc:: drivers/hsi/hsi_core.c
+ :export:
+
diff --git a/Documentation/driver-api/i2c.rst b/Documentation/driver-api/i2c.rst
new file mode 100644
index 000000000000..f3939f7852bd
--- /dev/null
+++ b/Documentation/driver-api/i2c.rst
@@ -0,0 +1,46 @@
+I\ :sup:`2`\ C and SMBus Subsystem
+==================================
+
+I\ :sup:`2`\ C (or without fancy typography, "I2C") is an acronym for
+the "Inter-IC" bus, a simple bus protocol which is widely used where low
+data rate communications suffice. Since it's also a licensed trademark,
+some vendors use another name (such as "Two-Wire Interface", TWI) for
+the same bus. I2C only needs two signals (SCL for clock, SDA for data),
+conserving board real estate and minimizing signal quality issues. Most
+I2C devices use seven bit addresses, and bus speeds of up to 400 kHz;
+there's a high speed extension (3.4 MHz) that's not yet found wide use.
+I2C is a multi-master bus; open drain signaling is used to arbitrate
+between masters, as well as to handshake and to synchronize clocks from
+slower clients.
+
+The Linux I2C programming interfaces support only the master side of bus
+interactions, not the slave side. The programming interface is
+structured around two kinds of driver, and two kinds of device. An I2C
+"Adapter Driver" abstracts the controller hardware; it binds to a
+physical device (perhaps a PCI device or platform_device) and exposes a
+:c:type:`struct i2c_adapter <i2c_adapter>` representing each
+I2C bus segment it manages. On each I2C bus segment will be I2C devices
+represented by a :c:type:`struct i2c_client <i2c_client>`.
+Those devices will be bound to a :c:type:`struct i2c_driver
+<i2c_driver>`, which should follow the standard Linux driver
+model. (At this writing, a legacy model is more widely used.) There are
+functions to perform various I2C protocol operations; at this writing
+all such functions are usable only from task context.
+
+The System Management Bus (SMBus) is a sibling protocol. Most SMBus
+systems are also I2C conformant. The electrical constraints are tighter
+for SMBus, and it standardizes particular protocol messages and idioms.
+Controllers that support I2C can also support most SMBus operations, but
+SMBus controllers don't support all the protocol options that an I2C
+controller will. There are functions to perform various SMBus protocol
+operations, either using I2C primitives or by issuing SMBus commands to
+i2c_adapter devices which don't support those I2C operations.
+
+.. kernel-doc:: include/linux/i2c.h
+ :internal:
+
+.. kernel-doc:: drivers/i2c/i2c-boardinfo.c
+ :functions: i2c_register_board_info
+
+.. kernel-doc:: drivers/i2c/i2c-core.c
+ :export:
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
new file mode 100644
index 000000000000..8e259c5d0322
--- /dev/null
+++ b/Documentation/driver-api/index.rst
@@ -0,0 +1,26 @@
+========================================
+The Linux driver implementer's API guide
+========================================
+
+The kernel offers a wide variety of interfaces to support the development
+of device drivers. This document is an only somewhat organized collection
+of some of those interfaces — it will hopefully get better over time! The
+available subsections can be seen below.
+
+.. class:: toc-title
+
+ Table of contents
+
+.. toctree::
+ :maxdepth: 2
+
+ basics
+ infrastructure
+ message-based
+ sound
+ frame-buffer
+ input
+ spi
+ i2c
+ hsi
+ miscellaneous
diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst
new file mode 100644
index 000000000000..5d50d6733db3
--- /dev/null
+++ b/Documentation/driver-api/infrastructure.rst
@@ -0,0 +1,169 @@
+Device drivers infrastructure
+=============================
+
+The Basic Device Driver-Model Structures
+----------------------------------------
+
+.. kernel-doc:: include/linux/device.h
+ :internal:
+
+Device Drivers Base
+-------------------
+
+.. kernel-doc:: drivers/base/init.c
+ :internal:
+
+.. kernel-doc:: drivers/base/driver.c
+ :export:
+
+.. kernel-doc:: drivers/base/core.c
+ :export:
+
+.. kernel-doc:: drivers/base/syscore.c
+ :export:
+
+.. kernel-doc:: drivers/base/class.c
+ :export:
+
+.. kernel-doc:: drivers/base/node.c
+ :internal:
+
+.. kernel-doc:: drivers/base/firmware_class.c
+ :export:
+
+.. kernel-doc:: drivers/base/transport_class.c
+ :export:
+
+.. kernel-doc:: drivers/base/dd.c
+ :export:
+
+.. kernel-doc:: include/linux/platform_device.h
+ :internal:
+
+.. kernel-doc:: drivers/base/platform.c
+ :export:
+
+.. kernel-doc:: drivers/base/bus.c
+ :export:
+
+Buffer Sharing and Synchronization
+----------------------------------
+
+The dma-buf subsystem provides the framework for sharing buffers for
+hardware (DMA) access across multiple device drivers and subsystems, and
+for synchronizing asynchronous hardware access.
+
+This is used, for example, by drm "prime" multi-GPU support, but is of
+course not limited to GPU use cases.
+
+The three main components of this are: (1) dma-buf, representing a
+sg_table and exposed to userspace as a file descriptor to allow passing
+between devices, (2) fence, which provides a mechanism to signal when
+one device as finished access, and (3) reservation, which manages the
+shared or exclusive fence(s) associated with the buffer.
+
+dma-buf
+~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/dma-buf.c
+ :export:
+
+.. kernel-doc:: include/linux/dma-buf.h
+ :internal:
+
+reservation
+~~~~~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/reservation.c
+ :doc: Reservation Object Overview
+
+.. kernel-doc:: drivers/dma-buf/reservation.c
+ :export:
+
+.. kernel-doc:: include/linux/reservation.h
+ :internal:
+
+fence
+~~~~~
+
+.. kernel-doc:: drivers/dma-buf/fence.c
+ :export:
+
+.. kernel-doc:: include/linux/fence.h
+ :internal:
+
+.. kernel-doc:: drivers/dma-buf/seqno-fence.c
+ :export:
+
+.. kernel-doc:: include/linux/seqno-fence.h
+ :internal:
+
+.. kernel-doc:: drivers/dma-buf/fence-array.c
+ :export:
+
+.. kernel-doc:: include/linux/fence-array.h
+ :internal:
+
+.. kernel-doc:: drivers/dma-buf/reservation.c
+ :export:
+
+.. kernel-doc:: include/linux/reservation.h
+ :internal:
+
+.. kernel-doc:: drivers/dma-buf/sync_file.c
+ :export:
+
+.. kernel-doc:: include/linux/sync_file.h
+ :internal:
+
+Device Drivers DMA Management
+-----------------------------
+
+.. kernel-doc:: drivers/base/dma-coherent.c
+ :export:
+
+.. kernel-doc:: drivers/base/dma-mapping.c
+ :export:
+
+Device Drivers Power Management
+-------------------------------
+
+.. kernel-doc:: drivers/base/power/main.c
+ :export:
+
+Device Drivers ACPI Support
+---------------------------
+
+.. kernel-doc:: drivers/acpi/scan.c
+ :export:
+
+.. kernel-doc:: drivers/acpi/scan.c
+ :internal:
+
+Device drivers PnP support
+--------------------------
+
+.. kernel-doc:: drivers/pnp/core.c
+ :internal:
+
+.. kernel-doc:: drivers/pnp/card.c
+ :export:
+
+.. kernel-doc:: drivers/pnp/driver.c
+ :internal:
+
+.. kernel-doc:: drivers/pnp/manager.c
+ :export:
+
+.. kernel-doc:: drivers/pnp/support.c
+ :export:
+
+Userspace IO devices
+--------------------
+
+.. kernel-doc:: drivers/uio/uio.c
+ :export:
+
+.. kernel-doc:: include/linux/uio_driver.h
+ :internal:
+
diff --git a/Documentation/driver-api/input.rst b/Documentation/driver-api/input.rst
new file mode 100644
index 000000000000..d05bf58fa83e
--- /dev/null
+++ b/Documentation/driver-api/input.rst
@@ -0,0 +1,51 @@
+Input Subsystem
+===============
+
+Input core
+----------
+
+.. kernel-doc:: include/linux/input.h
+ :internal:
+
+.. kernel-doc:: drivers/input/input.c
+ :export:
+
+.. kernel-doc:: drivers/input/ff-core.c
+ :export:
+
+.. kernel-doc:: drivers/input/ff-memless.c
+ :export:
+
+Multitouch Library
+------------------
+
+.. kernel-doc:: include/linux/input/mt.h
+ :internal:
+
+.. kernel-doc:: drivers/input/input-mt.c
+ :export:
+
+Polled input devices
+--------------------
+
+.. kernel-doc:: include/linux/input-polldev.h
+ :internal:
+
+.. kernel-doc:: drivers/input/input-polldev.c
+ :export:
+
+Matrix keyboards/keypads
+------------------------
+
+.. kernel-doc:: include/linux/input/matrix_keypad.h
+ :internal:
+
+Sparse keymap support
+---------------------
+
+.. kernel-doc:: include/linux/input/sparse-keymap.h
+ :internal:
+
+.. kernel-doc:: drivers/input/sparse-keymap.c
+ :export:
+
diff --git a/Documentation/driver-api/message-based.rst b/Documentation/driver-api/message-based.rst
new file mode 100644
index 000000000000..18ff94ef6d8e
--- /dev/null
+++ b/Documentation/driver-api/message-based.rst
@@ -0,0 +1,12 @@
+Message-based devices
+=====================
+
+Fusion message devices
+----------------------
+
+.. kernel-doc:: drivers/message/fusion/mptbase.c
+ :export:
+
+.. kernel-doc:: drivers/message/fusion/mptscsih.c
+ :export:
+
diff --git a/Documentation/driver-api/miscellaneous.rst b/Documentation/driver-api/miscellaneous.rst
new file mode 100644
index 000000000000..8da7d115bafc
--- /dev/null
+++ b/Documentation/driver-api/miscellaneous.rst
@@ -0,0 +1,50 @@
+Parallel Port Devices
+=====================
+
+.. kernel-doc:: include/linux/parport.h
+ :internal:
+
+.. kernel-doc:: drivers/parport/ieee1284.c
+ :export:
+
+.. kernel-doc:: drivers/parport/share.c
+ :export:
+
+.. kernel-doc:: drivers/parport/daisy.c
+ :internal:
+
+16x50 UART Driver
+=================
+
+.. kernel-doc:: drivers/tty/serial/serial_core.c
+ :export:
+
+.. kernel-doc:: drivers/tty/serial/8250/8250_core.c
+ :export:
+
+Pulse-Width Modulation (PWM)
+============================
+
+Pulse-width modulation is a modulation technique primarily used to
+control power supplied to electrical devices.
+
+The PWM framework provides an abstraction for providers and consumers of
+PWM signals. A controller that provides one or more PWM signals is
+registered as :c:type:`struct pwm_chip <pwm_chip>`. Providers
+are expected to embed this structure in a driver-specific structure.
+This structure contains fields that describe a particular chip.
+
+A chip exposes one or more PWM signal sources, each of which exposed as
+a :c:type:`struct pwm_device <pwm_device>`. Operations can be
+performed on PWM devices to control the period, duty cycle, polarity and
+active state of the signal.
+
+Note that PWM devices are exclusive resources: they can always only be
+used by one consumer at a time.
+
+.. kernel-doc:: include/linux/pwm.h
+ :internal:
+
+.. kernel-doc:: drivers/pwm/core.c
+ :export:
+
diff --git a/Documentation/driver-api/sound.rst b/Documentation/driver-api/sound.rst
new file mode 100644
index 000000000000..afef6eabc073
--- /dev/null
+++ b/Documentation/driver-api/sound.rst
@@ -0,0 +1,54 @@
+Sound Devices
+=============
+
+.. kernel-doc:: include/sound/core.h
+ :internal:
+
+.. kernel-doc:: sound/sound_core.c
+ :export:
+
+.. kernel-doc:: include/sound/pcm.h
+ :internal:
+
+.. kernel-doc:: sound/core/pcm.c
+ :export:
+
+.. kernel-doc:: sound/core/device.c
+ :export:
+
+.. kernel-doc:: sound/core/info.c
+ :export:
+
+.. kernel-doc:: sound/core/rawmidi.c
+ :export:
+
+.. kernel-doc:: sound/core/sound.c
+ :export:
+
+.. kernel-doc:: sound/core/memory.c
+ :export:
+
+.. kernel-doc:: sound/core/pcm_memory.c
+ :export:
+
+.. kernel-doc:: sound/core/init.c
+ :export:
+
+.. kernel-doc:: sound/core/isadma.c
+ :export:
+
+.. kernel-doc:: sound/core/control.c
+ :export:
+
+.. kernel-doc:: sound/core/pcm_lib.c
+ :export:
+
+.. kernel-doc:: sound/core/hwdep.c
+ :export:
+
+.. kernel-doc:: sound/core/pcm_native.c
+ :export:
+
+.. kernel-doc:: sound/core/memalloc.c
+ :export:
+
diff --git a/Documentation/driver-api/spi.rst b/Documentation/driver-api/spi.rst
new file mode 100644
index 000000000000..f64cb666498a
--- /dev/null
+++ b/Documentation/driver-api/spi.rst
@@ -0,0 +1,53 @@
+Serial Peripheral Interface (SPI)
+=================================
+
+SPI is the "Serial Peripheral Interface", widely used with embedded
+systems because it is a simple and efficient interface: basically a
+multiplexed shift register. Its three signal wires hold a clock (SCK,
+often in the range of 1-20 MHz), a "Master Out, Slave In" (MOSI) data
+line, and a "Master In, Slave Out" (MISO) data line. SPI is a full
+duplex protocol; for each bit shifted out the MOSI line (one per clock)
+another is shifted in on the MISO line. Those bits are assembled into
+words of various sizes on the way to and from system memory. An
+additional chipselect line is usually active-low (nCS); four signals are
+normally used for each peripheral, plus sometimes an interrupt.
+
+The SPI bus facilities listed here provide a generalized interface to
+declare SPI busses and devices, manage them according to the standard
+Linux driver model, and perform input/output operations. At this time,
+only "master" side interfaces are supported, where Linux talks to SPI
+peripherals and does not implement such a peripheral itself. (Interfaces
+to support implementing SPI slaves would necessarily look different.)
+
+The programming interface is structured around two kinds of driver, and
+two kinds of device. A "Controller Driver" abstracts the controller
+hardware, which may be as simple as a set of GPIO pins or as complex as
+a pair of FIFOs connected to dual DMA engines on the other side of the
+SPI shift register (maximizing throughput). Such drivers bridge between
+whatever bus they sit on (often the platform bus) and SPI, and expose
+the SPI side of their device as a :c:type:`struct spi_master
+<spi_master>`. SPI devices are children of that master,
+represented as a :c:type:`struct spi_device <spi_device>` and
+manufactured from :c:type:`struct spi_board_info
+<spi_board_info>` descriptors which are usually provided by
+board-specific initialization code. A :c:type:`struct spi_driver
+<spi_driver>` is called a "Protocol Driver", and is bound to a
+spi_device using normal driver model calls.
+
+The I/O model is a set of queued messages. Protocol drivers submit one
+or more :c:type:`struct spi_message <spi_message>` objects,
+which are processed and completed asynchronously. (There are synchronous
+wrappers, however.) Messages are built from one or more
+:c:type:`struct spi_transfer <spi_transfer>` objects, each of
+which wraps a full duplex SPI transfer. A variety of protocol tweaking
+options are needed, because different chips adopt very different
+policies for how they use the bits transferred with SPI.
+
+.. kernel-doc:: include/linux/spi/spi.h
+ :internal:
+
+.. kernel-doc:: drivers/spi/spi.c
+ :functions: spi_register_board_info
+
+.. kernel-doc:: drivers/spi/spi.c
+ :export:
diff --git a/Documentation/driver-model/device.txt b/Documentation/driver-model/device.txt
index 1e70220d20f4..2403eb856187 100644
--- a/Documentation/driver-model/device.txt
+++ b/Documentation/driver-model/device.txt
@@ -50,7 +50,7 @@ Attributes of devices can be exported by a device driver through sysfs.
Please see Documentation/filesystems/sysfs.txt for more information
on how sysfs works.
-As explained in Documentation/kobject.txt, device attributes must be be
+As explained in Documentation/kobject.txt, device attributes must be
created before the KOBJ_ADD uevent is generated. The only way to realize
that is by defining an attribute group.
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index b0d775d28e97..167070895498 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -266,8 +266,12 @@ IIO
devm_iio_device_unregister()
devm_iio_kfifo_allocate()
devm_iio_kfifo_free()
+ devm_iio_triggered_buffer_setup()
+ devm_iio_triggered_buffer_cleanup()
devm_iio_trigger_alloc()
devm_iio_trigger_free()
+ devm_iio_trigger_register()
+ devm_iio_trigger_unregister()
devm_iio_channel_get()
devm_iio_channel_release()
devm_iio_channel_get_all()
@@ -342,6 +346,10 @@ PINCTRL
devm_pinctrl_register()
devm_pinctrl_unregister()
+POWER
+ devm_reboot_mode_register()
+ devm_reboot_mode_unregister()
+
PWM
devm_pwm_get()
devm_pwm_put()
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index 2d485dea8cec..ac892b30815e 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -1,23 +1,27 @@
+.. _email_clients:
+
Email clients info for Linux
-======================================================================
+============================
Git
-----------------------------------------------------------------------
-These days most developers use `git send-email` instead of regular
+---
+
+These days most developers use ``git send-email`` instead of regular
email clients. The man page for this is quite good. On the receiving
-end, maintainers use `git am` to apply the patches.
+end, maintainers use ``git am`` to apply the patches.
-If you are new to git then send your first patch to yourself. Save it
-as raw text including all the headers. Run `git am raw_email.txt` and
-then review the changelog with `git log`. When that works then send
+If you are new to ``git`` then send your first patch to yourself. Save it
+as raw text including all the headers. Run ``git am raw_email.txt`` and
+then review the changelog with ``git log``. When that works then send
the patch to the appropriate mailing list(s).
General Preferences
-----------------------------------------------------------------------
+-------------------
+
Patches for the Linux kernel are submitted via email, preferably as
inline text in the body of the email. Some maintainers accept
attachments, but then the attachments should have content-type
-"text/plain". However, attachments are generally frowned upon because
+``text/plain``. However, attachments are generally frowned upon because
it makes quoting portions of the patch more difficult in the patch
review process.
@@ -25,7 +29,7 @@ Email clients that are used for Linux kernel patches should send the
patch text untouched. For example, they should not modify or delete tabs
or spaces, even at the beginning or end of lines.
-Don't send patches with "format=flowed". This can cause unexpected
+Don't send patches with ``format=flowed``. This can cause unexpected
and unwanted line breaks.
Don't let your email client do automatic word wrapping for you.
@@ -54,57 +58,63 @@ mailing lists.
Some email client (MUA) hints
-----------------------------------------------------------------------
+-----------------------------
+
Here are some specific MUA configuration hints for editing and sending
patches for the Linux kernel. These are not meant to be complete
software package configuration summaries.
+
Legend:
-TUI = text-based user interface
-GUI = graphical user interface
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- TUI = text-based user interface
+- GUI = graphical user interface
+
Alpine (TUI)
+************
Config options:
-In the "Sending Preferences" section:
-- "Do Not Send Flowed Text" must be enabled
-- "Strip Whitespace Before Sending" must be disabled
+In the :menuselection:`Sending Preferences` section:
+
+- :menuselection:`Do Not Send Flowed Text` must be ``enabled``
+- :menuselection:`Strip Whitespace Before Sending` must be ``disabled``
When composing the message, the cursor should be placed where the patch
-should appear, and then pressing CTRL-R let you specify the patch file
+should appear, and then pressing :kbd:`CTRL-R` let you specify the patch file
to insert into the message.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Claws Mail (GUI)
+****************
Works. Some people use this successfully for patches.
-To insert a patch use Message->Insert File (CTRL+i) or an external editor.
+To insert a patch use :menuselection:`Message-->Insert` File (:kbd:`CTRL-I`)
+or an external editor.
If the inserted patch has to be edited in the Claws composition window
-"Auto wrapping" in Configuration->Preferences->Compose->Wrapping should be
+"Auto wrapping" in
+:menuselection:`Configuration-->Preferences-->Compose-->Wrapping` should be
disabled.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Evolution (GUI)
+***************
Some people use this successfully for patches.
When composing mail select: Preformat
- from Format->Paragraph Style->Preformatted (Ctrl-7)
+ from :menuselection:`Format-->Paragraph Style-->Preformatted` (:kbd:`CTRL-7`)
or the toolbar
Then use:
- Insert->Text File... (Alt-n x)
+:menuselection:`Insert-->Text File...` (:kbd:`ALT-N x`)
to insert the patch.
-You can also "diff -Nru old.c new.c | xclip", select Preformat, then
-paste with the middle button.
+You can also ``diff -Nru old.c new.c | xclip``, select
+:menuselection:`Preformat`, then paste with the middle button.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Kmail (GUI)
+***********
Some people use Kmail successfully for patches.
@@ -120,11 +130,12 @@ word-wrapped and you can uncheck "word wrap" without losing the existing
wrapping.
At the bottom of your email, put the commonly-used patch delimiter before
-inserting your patch: three hyphens (---).
+inserting your patch: three hyphens (``---``).
-Then from the "Message" menu item, select insert file and choose your patch.
+Then from the :menuselection:`Message` menu item, select insert file and
+choose your patch.
As an added bonus you can customise the message creation toolbar menu
-and put the "insert file" icon there.
+and put the :menuselection:`insert file` icon there.
Make the composer window wide enough so that no lines wrap. As of
KMail 1.13.5 (KDE 4.5.4), KMail will apply word wrapping when sending
@@ -139,86 +150,96 @@ as inlined text will make them tricky to extract from their 7-bit encoding.
If you absolutely must send patches as attachments instead of inlining
them as text, right click on the attachment and select properties, and
-highlight "Suggest automatic display" to make the attachment inlined to
-make it more viewable.
+highlight :menuselection:`Suggest automatic display` to make the attachment
+inlined to make it more viewable.
When saving patches that are sent as inlined text, select the email that
contains the patch from the message list pane, right click and select
-"save as". You can use the whole email unmodified as a patch if it was
-properly composed. There is no option currently to save the email when you
-are actually viewing it in its own window -- there has been a request filed
-at kmail's bugzilla and hopefully this will be addressed. Emails are saved
-as read-write for user only so you will have to chmod them to make them
+:menuselection:`save as`. You can use the whole email unmodified as a patch
+if it was properly composed. There is no option currently to save the email
+when you are actually viewing it in its own window -- there has been a request
+filed at kmail's bugzilla and hopefully this will be addressed. Emails are
+saved as read-write for user only so you will have to chmod them to make them
group and world readable if you copy them elsewhere.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lotus Notes (GUI)
+*****************
Run away from it.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Mutt (TUI)
+**********
-Plenty of Linux developers use mutt, so it must work pretty well.
+Plenty of Linux developers use ``mutt``, so it must work pretty well.
Mutt doesn't come with an editor, so whatever editor you use should be
used in a way that there are no automatic linebreaks. Most editors have
-an "insert file" option that inserts the contents of a file unaltered.
+an :menuselection:`insert file` option that inserts the contents of a file
+unaltered.
+
+To use ``vim`` with mutt::
-To use 'vim' with mutt:
set editor="vi"
- If using xclip, type the command
+If using xclip, type the command::
+
:set paste
- before middle button or shift-insert or use
+
+before middle button or shift-insert or use::
+
:r filename
if you want to include the patch inline.
-(a)ttach works fine without "set paste".
+(a)ttach works fine without ``set paste``.
+
+You can also generate patches with ``git format-patch`` and then use Mutt
+to send them::
-You can also generate patches with 'git format-patch' and then use Mutt
-to send them:
$ mutt -H 0001-some-bug-fix.patch
Config options:
+
It should work with default settings.
-However, it's a good idea to set the "send_charset" to:
+However, it's a good idea to set the ``send_charset`` to::
+
set send_charset="us-ascii:utf-8"
Mutt is highly customizable. Here is a minimum configuration to start
-using Mutt to send patches through Gmail:
-
-# .muttrc
-# ================ IMAP ====================
-set imap_user = 'yourusername@gmail.com'
-set imap_pass = 'yourpassword'
-set spoolfile = imaps://imap.gmail.com/INBOX
-set folder = imaps://imap.gmail.com/
-set record="imaps://imap.gmail.com/[Gmail]/Sent Mail"
-set postponed="imaps://imap.gmail.com/[Gmail]/Drafts"
-set mbox="imaps://imap.gmail.com/[Gmail]/All Mail"
-
-# ================ SMTP ====================
-set smtp_url = "smtp://username@smtp.gmail.com:587/"
-set smtp_pass = $imap_pass
-set ssl_force_tls = yes # Require encrypted connection
-
-# ================ Composition ====================
-set editor = `echo \$EDITOR`
-set edit_headers = yes # See the headers when editing
-set charset = UTF-8 # value of $LANG; also fallback for send_charset
-# Sender, email address, and sign-off line must match
-unset use_domain # because joe@localhost is just embarrassing
-set realname = "YOUR NAME"
-set from = "username@gmail.com"
-set use_from = yes
+using Mutt to send patches through Gmail::
+
+ # .muttrc
+ # ================ IMAP ====================
+ set imap_user = 'yourusername@gmail.com'
+ set imap_pass = 'yourpassword'
+ set spoolfile = imaps://imap.gmail.com/INBOX
+ set folder = imaps://imap.gmail.com/
+ set record="imaps://imap.gmail.com/[Gmail]/Sent Mail"
+ set postponed="imaps://imap.gmail.com/[Gmail]/Drafts"
+ set mbox="imaps://imap.gmail.com/[Gmail]/All Mail"
+
+ # ================ SMTP ====================
+ set smtp_url = "smtp://username@smtp.gmail.com:587/"
+ set smtp_pass = $imap_pass
+ set ssl_force_tls = yes # Require encrypted connection
+
+ # ================ Composition ====================
+ set editor = `echo \$EDITOR`
+ set edit_headers = yes # See the headers when editing
+ set charset = UTF-8 # value of $LANG; also fallback for send_charset
+ # Sender, email address, and sign-off line must match
+ unset use_domain # because joe@localhost is just embarrassing
+ set realname = "YOUR NAME"
+ set from = "username@gmail.com"
+ set use_from = yes
The Mutt docs have lots more information:
+
http://dev.mutt.org/trac/wiki/UseCases/Gmail
+
http://dev.mutt.org/doc/manual.html
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Pine (TUI)
+**********
Pine has had some whitespace truncation issues in the past, but these
should all be fixed now.
@@ -226,12 +247,13 @@ should all be fixed now.
Use alpine (pine's successor) if you can.
Config options:
-- quell-flowed-text is needed for recent versions
-- the "no-strip-whitespace-before-send" option is needed
+
+- ``quell-flowed-text`` is needed for recent versions
+- the ``no-strip-whitespace-before-send`` option is needed
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sylpheed (GUI)
+**************
- Works well for inlining text (or using attachments).
- Allows use of an external editor.
@@ -241,50 +263,50 @@ Sylpheed (GUI)
- Adding addresses to address book doesn't understand the display name
properly.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Thunderbird (GUI)
+*****************
Thunderbird is an Outlook clone that likes to mangle text, but there are ways
to coerce it into behaving.
- Allow use of an external editor:
The easiest thing to do with Thunderbird and patches is to use an
- "external editor" extension and then just use your favorite $EDITOR
+ "external editor" extension and then just use your favorite ``$EDITOR``
for reading/merging patches into the body text. To do this, download
and install the extension, then add a button for it using
- View->Toolbars->Customize... and finally just click on it when in the
- Compose dialog.
+ :menuselection:`View-->Toolbars-->Customize...` and finally just click on it
+ when in the :menuselection:`Compose` dialog.
Please note that "external editor" requires that your editor must not
fork, or in other words, the editor must not return before closing.
You may have to pass additional flags or change the settings of your
editor. Most notably if you are using gvim then you must pass the -f
- option to gvim by putting "/usr/bin/gvim -f" (if the binary is in
- /usr/bin) to the text editor field in "external editor" settings. If you
- are using some other editor then please read its manual to find out how
- to do this.
+ option to gvim by putting ``/usr/bin/gvim -f`` (if the binary is in
+ ``/usr/bin``) to the text editor field in :menuselection:`external editor`
+ settings. If you are using some other editor then please read its manual
+ to find out how to do this.
To beat some sense out of the internal editor, do this:
-- Edit your Thunderbird config settings so that it won't use format=flowed.
- Go to "edit->preferences->advanced->config editor" to bring up the
- thunderbird's registry editor.
+- Edit your Thunderbird config settings so that it won't use ``format=flowed``.
+ Go to :menuselection:`edit-->preferences-->advanced-->config editor` to bring up
+ the thunderbird's registry editor.
-- Set "mailnews.send_plaintext_flowed" to "false"
+- Set ``mailnews.send_plaintext_flowed`` to ``false``
-- Set "mailnews.wraplength" from "72" to "0"
+- Set ``mailnews.wraplength`` from ``72`` to ``0``
-- "View" > "Message Body As" > "Plain Text"
+- :menuselection:`View-->Message Body As-->Plain Text`
-- "View" > "Character Encoding" > "Unicode (UTF-8)"
+- :menuselection:`View-->Character Encoding-->Unicode (UTF-8)`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
TkRat (GUI)
+***********
Works. Use "Insert file..." or external editor.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Gmail (Web GUI)
+***************
Does not work for sending patches.
@@ -295,5 +317,3 @@ although tab2space problem can be solved with external editor.
Another problem is that Gmail will base64-encode any message that has a
non-ASCII character. That includes things like European names.
-
- ###
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
index 0c16a22521a8..23d18b8a49d5 100644
--- a/Documentation/filesystems/dax.txt
+++ b/Documentation/filesystems/dax.txt
@@ -123,9 +123,12 @@ The DAX code does not work correctly on architectures which have virtually
mapped caches such as ARM, MIPS and SPARC.
Calling get_user_pages() on a range of user memory that has been mmaped
-from a DAX file will fail as there are no 'struct page' to describe
-those pages. This problem is being worked on. That means that O_DIRECT
-reads/writes to those memory ranges from a non-DAX file will fail (note
-that O_DIRECT reads/writes _of a DAX file_ do work, it is the memory
-that is being accessed that is key here). Other things that will not
-work include RDMA, sendfile() and splice().
+from a DAX file will fail when there are no 'struct page' to describe
+those pages. This problem has been addressed in some device drivers
+by adding optional struct page support for pages under the control of
+the driver (see CONFIG_NVDIMM_PFN in drivers/nvdimm for an example of
+how to do this). In the non struct page cases O_DIRECT reads/writes to
+those memory ranges from a non-DAX file will fail (note that O_DIRECT
+reads/writes _of a DAX file_ do work, it is the memory that is being
+accessed that is key here). Other things that will not work in the
+non struct page case include RDMA, sendfile() and splice().
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index ecd808088362..753dd4f96afe 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -131,6 +131,7 @@ inline_dentry Enable the inline dir feature: data in new created
directory entries can be written into inode block. The
space of inode block which is used to store inline
dentries is limited to ~3.4k.
+noinline_dentry Diable the inline dentry feature.
flush_merge Merge concurrent cache_flush commands as much as possible
to eliminate redundant command issues. If the underlying
device handles the cache_flush command relatively slowly,
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 68080ad6a75e..219ffd41a911 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -145,7 +145,7 @@ Table 1-1: Process specific entries in /proc
symbol the task is blocked in - or "0" if not blocked.
pagemap Page table
stack Report full stack trace, enable via CONFIG_STACKTRACE
- smaps a extension based on maps, showing the memory consumption of
+ smaps an extension based on maps, showing the memory consumption of
each mapping and flags associated with it
numa_maps an extension based on maps, showing the memory locality and
binding policy as well as mem usage (in pages) of each mapping.
@@ -515,6 +515,18 @@ be vanished or the reverse -- new added.
This file is only present if the CONFIG_MMU kernel configuration option is
enabled.
+Note: reading /proc/PID/maps or /proc/PID/smaps is inherently racy (consistent
+output can be achieved only in the single read call).
+This typically manifests when doing partial reads of these files while the
+memory map is being modified. Despite the races, we do provide the following
+guarantees:
+
+1) The mapped addresses never go backwards, which implies no two
+ regions will ever overlap.
+2) If there is something at a given vaddr during the entirety of the
+ life of the smaps/maps walk, there will be some output for it.
+
+
The /proc/PID/clear_refs is used to reset the PG_Referenced and ACCESSED/YOUNG
bits on both physical and virtual pages associated with a process, and the
soft-dirty bit on pte (see Documentation/vm/soft-dirty.txt for details).
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 9ace359d6cc5..cbec006e10e4 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -722,7 +722,7 @@ struct address_space_operations {
The second case is when a request has been made to invalidate
some or all pages in an address_space. This can happen
- through the fadvice(POSIX_FADV_DONTNEED) system call or by the
+ through the fadvise(POSIX_FADV_DONTNEED) system call or by the
filesystem explicitly requesting it as nfs and 9fs do (when
they believe the cache may be out of date with storage) by
calling invalidate_inode_pages2().
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 8146e9fd5ffc..c2d44e6e117b 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -348,3 +348,126 @@ Removed Sysctls
---- -------
fs.xfs.xfsbufd_centisec v4.0
fs.xfs.age_buffer_centisecs v4.0
+
+
+Error handling
+==============
+
+XFS can act differently according to the type of error found during its
+operation. The implementation introduces the following concepts to the error
+handler:
+
+ -failure speed:
+ Defines how fast XFS should propagate an error upwards when a specific
+ error is found during the filesystem operation. It can propagate
+ immediately, after a defined number of retries, after a set time period,
+ or simply retry forever.
+
+ -error classes:
+ Specifies the subsystem the error configuration will apply to, such as
+ metadata IO or memory allocation. Different subsystems will have
+ different error handlers for which behaviour can be configured.
+
+ -error handlers:
+ Defines the behavior for a specific error.
+
+The filesystem behavior during an error can be set via sysfs files. Each
+error handler works independently - the first condition met by an error handler
+for a specific class will cause the error to be propagated rather than reset and
+retried.
+
+The action taken by the filesystem when the error is propagated is context
+dependent - it may cause a shut down in the case of an unrecoverable error,
+it may be reported back to userspace, or it may even be ignored because
+there's nothing useful we can with the error or anyone we can report it to (e.g.
+during unmount).
+
+The configuration files are organized into the following hierarchy for each
+mounted filesystem:
+
+ /sys/fs/xfs/<dev>/error/<class>/<error>/
+
+Where:
+ <dev>
+ The short device name of the mounted filesystem. This is the same device
+ name that shows up in XFS kernel error messages as "XFS(<dev>): ..."
+
+ <class>
+ The subsystem the error configuration belongs to. As of 4.9, the defined
+ classes are:
+
+ - "metadata": applies metadata buffer write IO
+
+ <error>
+ The individual error handler configurations.
+
+
+Each filesystem has "global" error configuration options defined in their top
+level directory:
+
+ /sys/fs/xfs/<dev>/error/
+
+ fail_at_unmount (Min: 0 Default: 1 Max: 1)
+ Defines the filesystem error behavior at unmount time.
+
+ If set to a value of 1, XFS will override all other error configurations
+ during unmount and replace them with "immediate fail" characteristics.
+ i.e. no retries, no retry timeout. This will always allow unmount to
+ succeed when there are persistent errors present.
+
+ If set to 0, the configured retry behaviour will continue until all
+ retries and/or timeouts have been exhausted. This will delay unmount
+ completion when there are persistent errors, and it may prevent the
+ filesystem from ever unmounting fully in the case of "retry forever"
+ handler configurations.
+
+ Note: there is no guarantee that fail_at_unmount can be set whilst an
+ unmount is in progress. It is possible that the sysfs entries are
+ removed by the unmounting filesystem before a "retry forever" error
+ handler configuration causes unmount to hang, and hence the filesystem
+ must be configured appropriately before unmount begins to prevent
+ unmount hangs.
+
+Each filesystem has specific error class handlers that define the error
+propagation behaviour for specific errors. There is also a "default" error
+handler defined, which defines the behaviour for all errors that don't have
+specific handlers defined. Where multiple retry constraints are configuredi for
+a single error, the first retry configuration that expires will cause the error
+to be propagated. The handler configurations are found in the directory:
+
+ /sys/fs/xfs/<dev>/error/<class>/<error>/
+
+ max_retries (Min: -1 Default: Varies Max: INTMAX)
+ Defines the allowed number of retries of a specific error before
+ the filesystem will propagate the error. The retry count for a given
+ error context (e.g. a specific metadata buffer) is reset every time
+ there is a successful completion of the operation.
+
+ Setting the value to "-1" will cause XFS to retry forever for this
+ specific error.
+
+ Setting the value to "0" will cause XFS to fail immediately when the
+ specific error is reported.
+
+ Setting the value to "N" (where 0 < N < Max) will make XFS retry the
+ operation "N" times before propagating the error.
+
+ retry_timeout_seconds (Min: -1 Default: Varies Max: 1 day)
+ Define the amount of time (in seconds) that the filesystem is
+ allowed to retry its operations when the specific error is
+ found.
+
+ Setting the value to "-1" will allow XFS to retry forever for this
+ specific error.
+
+ Setting the value to "0" will cause XFS to fail immediately when the
+ specific error is reported.
+
+ Setting the value to "N" (where 0 < N < Max) will allow XFS to retry the
+ operation for up to "N" seconds before propagating the error.
+
+Note: The default behaviour for a specific error handler is dependent on both
+the class and error context. For example, the default values for
+"metadata/ENODEV" are "0" rather than "-1" so that this error handler defaults
+to "fail immediately" behaviour. This is done because ENODEV is a fatal,
+unrecoverable error no matter how many times the metadata IO is retried.
diff --git a/Documentation/gcov.txt b/Documentation/gcov.txt
deleted file mode 100644
index 7b727783db7e..000000000000
--- a/Documentation/gcov.txt
+++ /dev/null
@@ -1,257 +0,0 @@
-Using gcov with the Linux kernel
-================================
-
-1. Introduction
-2. Preparation
-3. Customization
-4. Files
-5. Modules
-6. Separated build and test machines
-7. Troubleshooting
-Appendix A: sample script: gather_on_build.sh
-Appendix B: sample script: gather_on_test.sh
-
-
-1. Introduction
-===============
-
-gcov profiling kernel support enables the use of GCC's coverage testing
-tool gcov [1] with the Linux kernel. Coverage data of a running kernel
-is exported in gcov-compatible format via the "gcov" debugfs directory.
-To get coverage data for a specific file, change to the kernel build
-directory and use gcov with the -o option as follows (requires root):
-
-# cd /tmp/linux-out
-# gcov -o /sys/kernel/debug/gcov/tmp/linux-out/kernel spinlock.c
-
-This will create source code files annotated with execution counts
-in the current directory. In addition, graphical gcov front-ends such
-as lcov [2] can be used to automate the process of collecting data
-for the entire kernel and provide coverage overviews in HTML format.
-
-Possible uses:
-
-* debugging (has this line been reached at all?)
-* test improvement (how do I change my test to cover these lines?)
-* minimizing kernel configurations (do I need this option if the
- associated code is never run?)
-
---
-
-[1] http://gcc.gnu.org/onlinedocs/gcc/Gcov.html
-[2] http://ltp.sourceforge.net/coverage/lcov.php
-
-
-2. Preparation
-==============
-
-Configure the kernel with:
-
- CONFIG_DEBUG_FS=y
- CONFIG_GCOV_KERNEL=y
-
-select the gcc's gcov format, default is autodetect based on gcc version:
-
- CONFIG_GCOV_FORMAT_AUTODETECT=y
-
-and to get coverage data for the entire kernel:
-
- CONFIG_GCOV_PROFILE_ALL=y
-
-Note that kernels compiled with profiling flags will be significantly
-larger and run slower. Also CONFIG_GCOV_PROFILE_ALL may not be supported
-on all architectures.
-
-Profiling data will only become accessible once debugfs has been
-mounted:
-
- mount -t debugfs none /sys/kernel/debug
-
-
-3. Customization
-================
-
-To enable profiling for specific files or directories, add a line
-similar to the following to the respective kernel Makefile:
-
- For a single file (e.g. main.o):
- GCOV_PROFILE_main.o := y
-
- For all files in one directory:
- GCOV_PROFILE := y
-
-To exclude files from being profiled even when CONFIG_GCOV_PROFILE_ALL
-is specified, use:
-
- GCOV_PROFILE_main.o := n
- and:
- GCOV_PROFILE := n
-
-Only files which are linked to the main kernel image or are compiled as
-kernel modules are supported by this mechanism.
-
-
-4. Files
-========
-
-The gcov kernel support creates the following files in debugfs:
-
- /sys/kernel/debug/gcov
- Parent directory for all gcov-related files.
-
- /sys/kernel/debug/gcov/reset
- Global reset file: resets all coverage data to zero when
- written to.
-
- /sys/kernel/debug/gcov/path/to/compile/dir/file.gcda
- The actual gcov data file as understood by the gcov
- tool. Resets file coverage data to zero when written to.
-
- /sys/kernel/debug/gcov/path/to/compile/dir/file.gcno
- Symbolic link to a static data file required by the gcov
- tool. This file is generated by gcc when compiling with
- option -ftest-coverage.
-
-
-5. Modules
-==========
-
-Kernel modules may contain cleanup code which is only run during
-module unload time. The gcov mechanism provides a means to collect
-coverage data for such code by keeping a copy of the data associated
-with the unloaded module. This data remains available through debugfs.
-Once the module is loaded again, the associated coverage counters are
-initialized with the data from its previous instantiation.
-
-This behavior can be deactivated by specifying the gcov_persist kernel
-parameter:
-
- gcov_persist=0
-
-At run-time, a user can also choose to discard data for an unloaded
-module by writing to its data file or the global reset file.
-
-
-6. Separated build and test machines
-====================================
-
-The gcov kernel profiling infrastructure is designed to work out-of-the
-box for setups where kernels are built and run on the same machine. In
-cases where the kernel runs on a separate machine, special preparations
-must be made, depending on where the gcov tool is used:
-
-a) gcov is run on the TEST machine
-
-The gcov tool version on the test machine must be compatible with the
-gcc version used for kernel build. Also the following files need to be
-copied from build to test machine:
-
-from the source tree:
- - all C source files + headers
-
-from the build tree:
- - all C source files + headers
- - all .gcda and .gcno files
- - all links to directories
-
-It is important to note that these files need to be placed into the
-exact same file system location on the test machine as on the build
-machine. If any of the path components is symbolic link, the actual
-directory needs to be used instead (due to make's CURDIR handling).
-
-b) gcov is run on the BUILD machine
-
-The following files need to be copied after each test case from test
-to build machine:
-
-from the gcov directory in sysfs:
- - all .gcda files
- - all links to .gcno files
-
-These files can be copied to any location on the build machine. gcov
-must then be called with the -o option pointing to that directory.
-
-Example directory setup on the build machine:
-
- /tmp/linux: kernel source tree
- /tmp/out: kernel build directory as specified by make O=
- /tmp/coverage: location of the files copied from the test machine
-
- [user@build] cd /tmp/out
- [user@build] gcov -o /tmp/coverage/tmp/out/init main.c
-
-
-7. Troubleshooting
-==================
-
-Problem: Compilation aborts during linker step.
-Cause: Profiling flags are specified for source files which are not
- linked to the main kernel or which are linked by a custom
- linker procedure.
-Solution: Exclude affected source files from profiling by specifying
- GCOV_PROFILE := n or GCOV_PROFILE_basename.o := n in the
- corresponding Makefile.
-
-Problem: Files copied from sysfs appear empty or incomplete.
-Cause: Due to the way seq_file works, some tools such as cp or tar
- may not correctly copy files from sysfs.
-Solution: Use 'cat' to read .gcda files and 'cp -d' to copy links.
- Alternatively use the mechanism shown in Appendix B.
-
-
-Appendix A: gather_on_build.sh
-==============================
-
-Sample script to gather coverage meta files on the build machine
-(see 6a):
-#!/bin/bash
-
-KSRC=$1
-KOBJ=$2
-DEST=$3
-
-if [ -z "$KSRC" ] || [ -z "$KOBJ" ] || [ -z "$DEST" ]; then
- echo "Usage: $0 <ksrc directory> <kobj directory> <output.tar.gz>" >&2
- exit 1
-fi
-
-KSRC=$(cd $KSRC; printf "all:\n\t@echo \${CURDIR}\n" | make -f -)
-KOBJ=$(cd $KOBJ; printf "all:\n\t@echo \${CURDIR}\n" | make -f -)
-
-find $KSRC $KOBJ \( -name '*.gcno' -o -name '*.[ch]' -o -type l \) -a \
- -perm /u+r,g+r | tar cfz $DEST -P -T -
-
-if [ $? -eq 0 ] ; then
- echo "$DEST successfully created, copy to test system and unpack with:"
- echo " tar xfz $DEST -P"
-else
- echo "Could not create file $DEST"
-fi
-
-
-Appendix B: gather_on_test.sh
-=============================
-
-Sample script to gather coverage data files on the test machine
-(see 6b):
-
-#!/bin/bash -e
-
-DEST=$1
-GCDA=/sys/kernel/debug/gcov
-
-if [ -z "$DEST" ] ; then
- echo "Usage: $0 <output.tar.gz>" >&2
- exit 1
-fi
-
-TEMPDIR=$(mktemp -d)
-echo Collecting data..
-find $GCDA -type d -exec mkdir -p $TEMPDIR/\{\} \;
-find $GCDA -name '*.gcda' -exec sh -c 'cat < $0 > '$TEMPDIR'/$0' {} \;
-find $GCDA -name '*.gcno' -exec sh -c 'cp -d $0 '$TEMPDIR'/$0' {} \;
-tar czf $DEST -C $TEMPDIR sys
-rm -rf $TEMPDIR
-
-echo "$DEST successfully created, copy to build system and unpack with:"
-echo " tar xfz $DEST"
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
index 86d3fa95fd12..40884c4fe40c 100644
--- a/Documentation/gpio/board.txt
+++ b/Documentation/gpio/board.txt
@@ -8,9 +8,9 @@ gpio-legacy.txt (actually, there is no real mapping possible with the old
interface; you just fetch an integer from somewhere and request the
corresponding GPIO.
-Platforms that make use of GPIOs must select ARCH_REQUIRE_GPIOLIB (if GPIO usage
-is mandatory) or ARCH_WANT_OPTIONAL_GPIOLIB (if GPIO support can be omitted) in
-their Kconfig. Then, how GPIOs are mapped depends on what the platform uses to
+All platforms can enable the GPIO library, but if the platform strictly
+requires GPIO functionality to be present, it needs to select GPIOLIB from its
+Kconfig. Then, how GPIOs are mapped depends on what the platform uses to
describe its hardware layout. Currently, mappings can be defined through device
tree, ACPI, and platform data.
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
index 6cb35a78eff4..368d5a294d89 100644
--- a/Documentation/gpio/driver.txt
+++ b/Documentation/gpio/driver.txt
@@ -262,6 +262,12 @@ symbol:
to the container using container_of().
(See Documentation/driver-model/design-patterns.txt)
+ If there is a need to exclude certain GPIOs from the IRQ domain, one can
+ set .irq_need_valid_mask of the gpiochip before gpiochip_add_data() is
+ called. This allocates .irq_valid_mask with as many bits set as there are
+ GPIOs in the chip. Drivers can exclude GPIOs by clearing bits from this
+ mask. The mask must be filled in before gpiochip_irqchip_add() is called.
+
* gpiochip_set_chained_irqchip(): sets up a chained irq handler for a
gpio_chip from a parent IRQ and passes the struct gpio_chip* as handler
data. (Notice handler data, since the irqchip data is likely used by the
diff --git a/Documentation/gpio/gpio-legacy.txt b/Documentation/gpio/gpio-legacy.txt
index 79ab5648d69b..b34fd94f7089 100644
--- a/Documentation/gpio/gpio-legacy.txt
+++ b/Documentation/gpio/gpio-legacy.txt
@@ -72,8 +72,8 @@ in this document, but drivers acting as clients to the GPIO interface must
not care how it's implemented.)
That said, if the convention is supported on their platform, drivers should
-use it when possible. Platforms must select ARCH_REQUIRE_GPIOLIB or
-ARCH_WANT_OPTIONAL_GPIOLIB in their Kconfig. Drivers that can't work without
+use it when possible. Platforms must select GPIOLIB if GPIO functionality
+is strictly required. Drivers that can't work without
standard GPIO calls should have Kconfig entries which depend on GPIOLIB. The
GPIO calls are available, either as "real code" or as optimized-away stubs,
when drivers use the include file:
@@ -553,22 +553,14 @@ either NULL or the label associated with that GPIO when it was requested.
Platform Support
----------------
-To support this framework, a platform's Kconfig will "select" either
-ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
-and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
-three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
+To force-enable this framework, a platform's Kconfig will "select" GPIOLIB,
+else it is up to the user to configure support for GPIO.
It may also provide a custom value for ARCH_NR_GPIOS, so that it better
reflects the number of GPIOs in actual use on that platform, without
wasting static table space. (It should count both built-in/SoC GPIOs and
also ones on GPIO expanders.
-ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
-into the kernel on that architecture.
-
-ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
-can enable it and build it into the kernel optionally.
-
If neither of these options are selected, the platform does not support
GPIOs through GPIO-lib and the code cannot be enabled by the user.
diff --git a/Documentation/gpu/conf.py b/Documentation/gpu/conf.py
new file mode 100644
index 000000000000..6314d1708230
--- /dev/null
+++ b/Documentation/gpu/conf.py
@@ -0,0 +1,5 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "Linux GPU Driver Developer's Guide"
+
+tags.add("subproject")
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index fcac0fa72056..5ff3d2b236af 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -12,3 +12,10 @@ Linux GPU Driver Developer's Guide
drm-uapi
i915
vga-switcheroo
+
+.. only:: subproject
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/hid/intel-ish-hid.txt b/Documentation/hid/intel-ish-hid.txt
new file mode 100644
index 000000000000..d48b21c71ddd
--- /dev/null
+++ b/Documentation/hid/intel-ish-hid.txt
@@ -0,0 +1,454 @@
+Intel Integrated Sensor Hub (ISH)
+===============================
+
+A sensor hub enables the ability to offload sensor polling and algorithm
+processing to a dedicated low power co-processor. This allows the core
+processor to go into low power modes more often, resulting in the increased
+battery life.
+
+There are many vendors providing external sensor hubs confirming to HID
+Sensor usage tables, and used in several tablets, 2 in 1 convertible laptops
+and embedded products. Linux had this support since Linux 3.9.
+
+Intel® introduced integrated sensor hubs as a part of the SoC starting from
+Cherry Trail and now supported on multiple generations of CPU packages. There
+are many commercial devices already shipped with Integrated Sensor Hubs (ISH).
+These ISH also comply to HID sensor specification, but the difference is the
+transport protocol used for communication. The current external sensor hubs
+mainly use HID over i2C or USB. But ISH doesn't use either i2c or USB.
+
+1. Overview
+
+Using a analogy with a usbhid implementation, the ISH follows a similar model
+for a very high speed communication:
+
+ ----------------- ----------------------
+ | USB HID | --> | ISH HID |
+ ----------------- ----------------------
+ ----------------- ----------------------
+ | USB protocol | --> | ISH Transport |
+ ----------------- ----------------------
+ ----------------- ----------------------
+ | EHCI/XHCI | --> | ISH IPC |
+ ----------------- ----------------------
+ PCI PCI
+ ----------------- ----------------------
+ |Host controller| --> | ISH processor |
+ ----------------- ----------------------
+ USB Link
+ ----------------- ----------------------
+ | USB End points| --> | ISH Clients |
+ ----------------- ----------------------
+
+Like USB protocol provides a method for device enumeration, link management
+and user data encapsulation, the ISH also provides similar services. But it is
+very light weight tailored to manage and communicate with ISH client
+applications implemented in the firmware.
+
+The ISH allows multiple sensor management applications executing in the
+firmware. Like USB endpoints the messaging can be to/from a client. As part of
+enumeration process, these clients are identified. These clients can be simple
+HID sensor applications, sensor calibration application or senor firmware
+update application.
+
+The implementation model is similar, like USB bus, ISH transport is also
+implemented as a bus. Each client application executing in the ISH processor
+is registered as a device on this bus. The driver, which binds each device
+(ISH HID driver) identifies the device type and registers with the hid core.
+
+2. ISH Implementation: Block Diagram
+
+ ---------------------------
+ | User Space Applications |
+ ---------------------------
+
+----------------IIO ABI----------------
+ --------------------------
+ | IIO Sensor Drivers |
+ --------------------------
+ --------------------------
+ | IIO core |
+ --------------------------
+ --------------------------
+ | HID Sensor Hub MFD |
+ --------------------------
+ --------------------------
+ | HID Core |
+ --------------------------
+ --------------------------
+ | HID over ISH Client |
+ --------------------------
+ --------------------------
+ | ISH Transport (ISHTP) |
+ --------------------------
+ --------------------------
+ | IPC Drivers |
+ --------------------------
+OS
+---------------- PCI -----------------
+Hardware + Firmware
+ ----------------------------
+ | ISH Hardware/Firmware(FW) |
+ ----------------------------
+
+3. High level processing in above blocks
+
+3.1 Hardware Interface
+
+The ISH is exposed as "Non-VGA unclassified PCI device" to the host. The PCI
+product and vendor IDs are changed from different generations of processors. So
+the source code which enumerate drivers needs to update from generation to
+generation.
+
+3.2 Inter Processor Communication (IPC) driver
+Location: drivers/hid/intel-ish-hid/ipc
+
+The IPC message used memory mapped I/O. The registers are defined in
+hw-ish-regs.h.
+
+3.2.1 IPC/FW message types
+
+There are two types of messages, one for management of link and other messages
+are to and from transport layers.
+
+TX and RX of Transport messages
+
+A set of memory mapped register offers support of multi byte messages TX and
+RX (E.g.IPC_REG_ISH2HOST_MSG, IPC_REG_HOST2ISH_MSG). The IPC layer maintains
+internal queues to sequence messages and send them in order to the FW.
+Optionally the caller can register handler to get notification of completion.
+A door bell mechanism is used in messaging to trigger processing in host and
+client firmware side. When ISH interrupt handler is called, the ISH2HOST
+doorbell register is used by host drivers to determine that the interrupt
+is for ISH.
+
+Each side has 32 32-bit message registers and a 32-bit doorbell. Doorbell
+register has the following format:
+Bits 0..6: fragment length (7 bits are used)
+Bits 10..13: encapsulated protocol
+Bits 16..19: management command (for IPC management protocol)
+Bit 31: doorbell trigger (signal H/W interrupt to the other side)
+Other bits are reserved, should be 0.
+
+3.2.2 Transport layer interface
+
+To abstract HW level IPC communication, a set of callbacks are registered.
+The transport layer uses them to send and receive messages.
+Refer to struct ishtp_hw_ops for callbacks.
+
+3.3 ISH Transport layer
+Location: drivers/hid/intel-ish-hid/ishtp/
+
+3.3.1 A Generic Transport Layer
+
+The transport layer is a bi-directional protocol, which defines:
+- Set of commands to start, stop, connect, disconnect and flow control
+(ishtp/hbm.h) for details
+- A flow control mechanism to avoid buffer overflows
+
+This protocol resembles bus messages described in the following document:
+http://www.intel.com/content/dam/www/public/us/en/documents/technical-\
+specifications/dcmi-hi-1-0-spec.pdf "Chapter 7: Bus Message Layer"
+
+3.3.2 Connection and Flow Control Mechanism
+
+Each FW client and a protocol is identified by an UUID. In order to communicate
+to a FW client, a connection must be established using connect request and
+response bus messages. If successful, a pair (host_client_id and fw_client_id)
+will identify the connection.
+
+Once connection is established, peers send each other flow control bus messages
+independently. Every peer may send a message only if it has received a
+flow-control credit before. Once it sent a message, it may not send another one
+before receiving the next flow control credit.
+Either side can send disconnect request bus message to end communication. Also
+the link will be dropped if major FW reset occurs.
+
+3.3.3 Peer to Peer data transfer
+
+Peer to Peer data transfer can happen with or without using DMA. Depending on
+the sensor bandwidth requirement DMA can be enabled by using module parameter
+ishtp_use_dma under intel_ishtp.
+
+Each side (host and FW) manages its DMA transfer memory independently. When an
+ISHTP client from either host or FW side wants to send something, it decides
+whether to send over IPC or over DMA; for each transfer the decision is
+independent. The sending side sends DMA_XFER message when the message is in
+the respective host buffer (TX when host client sends, RX when FW client
+sends). The recipient of DMA message responds with DMA_XFER_ACK, indicating
+the sender that the memory region for that message may be reused.
+
+DMA initialization is started with host sending DMA_ALLOC_NOTIFY bus message
+(that includes RX buffer) and FW responds with DMA_ALLOC_NOTIFY_ACK.
+Additionally to DMA address communication, this sequence checks capabilities:
+if thw host doesn't support DMA, then it won't send DMA allocation, so FW can't
+send DMA; if FW doesn't support DMA then it won't respond with
+DMA_ALLOC_NOTIFY_ACK, in which case host will not use DMA transfers.
+Here ISH acts as busmaster DMA controller. Hence when host sends DMA_XFER,
+it's request to do host->ISH DMA transfer; when FW sends DMA_XFER, it means
+that it already did DMA and the message resides at host. Thus, DMA_XFER
+and DMA_XFER_ACK act as ownership indicators.
+
+At initial state all outgoing memory belongs to the sender (TX to host, RX to
+FW), DMA_XFER transfers ownership on the region that contains ISHTP message to
+the receiving side, DMA_XFER_ACK returns ownership to the sender. A sender
+needs not wait for previous DMA_XFER to be ack'ed, and may send another message
+as long as remaining continuous memory in its ownership is enough.
+In principle, multiple DMA_XFER and DMA_XFER_ACK messages may be sent at once
+(up to IPC MTU), thus allowing for interrupt throttling.
+Currently, ISH FW decides to send over DMA if ISHTP message is more than 3 IPC
+fragments and via IPC otherwise.
+
+3.3.4 Ring Buffers
+
+When a client initiate a connection, a ring or RX and TX buffers are allocated.
+The size of ring can be specified by the client. HID client set 16 and 32 for
+TX and RX buffers respectively. On send request from client, the data to be
+sent is copied to one of the send ring buffer and scheduled to be sent using
+bus message protocol. These buffers are required because the FW may have not
+have processed the last message and may not have enough flow control credits
+to send. Same thing holds true on receive side and flow control is required.
+
+3.3.5 Host Enumeration
+
+The host enumeration bus command allow discovery of clients present in the FW.
+There can be multiple sensor clients and clients for calibration function.
+
+To ease in implantation and allow independent driver handle each client
+this transport layer takes advantage of Linux Bus driver model. Each
+client is registered as device on the the transport bus (ishtp bus).
+
+Enumeration sequence of messages:
+- Host sends HOST_START_REQ_CMD, indicating that host ISHTP layer is up.
+- FW responds with HOST_START_RES_CMD
+- Host sends HOST_ENUM_REQ_CMD (enumerate FW clients)
+- FW responds with HOST_ENUM_RES_CMD that includes bitmap of available FW
+client IDs
+- For each FW ID found in that bitmap host sends
+HOST_CLIENT_PROPERTIES_REQ_CMD
+- FW responds with HOST_CLIENT_PROPERTIES_RES_CMD. Properties include UUID,
+max ISHTP message size, etc.
+- Once host received properties for that last discovered client, it considers
+ISHTP device fully functional (and allocates DMA buffers)
+
+3.4 HID over ISH Client
+Location: drivers/hid/intel-ish-hid
+
+The ISHTP client driver is responsible for:
+- enumerate HID devices under FW ISH client
+- Get Report descriptor
+- Register with HID core as a LL driver
+- Process Get/Set feature request
+- Get input reports
+
+3.5 HID Sensor Hub MFD and IIO sensor drivers
+
+The functionality in these drivers is the same as an external sensor hub.
+Refer to
+Documentation/hid/hid-sensor.txt for HID sensor
+Documentation/ABI/testing/sysfs-bus-iio for IIO ABIs to user space
+
+3.6 End to End HID transport Sequence Diagram
+
+HID-ISH-CLN ISHTP IPC HW
+ | | | |
+ | | |-----WAKE UP------------------>|
+ | | | |
+ | | |-----HOST READY--------------->|
+ | | | |
+ | | |<----MNG_RESET_NOTIFY_ACK----- |
+ | | | |
+ | |<----ISHTP_START------ | |
+ | | | |
+ | |<-----------------HOST_START_RES_CMD-------------------|
+ | | | |
+ | |------------------QUERY_SUBSCRIBER-------------------->|
+ | | | |
+ | |------------------HOST_ENUM_REQ_CMD------------------->|
+ | | | |
+ | |<-----------------HOST_ENUM_RES_CMD--------------------|
+ | | | |
+ | |------------------HOST_CLIENT_PROPERTIES_REQ_CMD------>|
+ | | | |
+ | |<-----------------HOST_CLIENT_PROPERTIES_RES_CMD-------|
+ | Create new device on in ishtp bus | |
+ | | | |
+ | |------------------HOST_CLIENT_PROPERTIES_REQ_CMD------>|
+ | | | |
+ | |<-----------------HOST_CLIENT_PROPERTIES_RES_CMD-------|
+ | Create new device on in ishtp bus | |
+ | | | |
+ | |--Repeat HOST_CLIENT_PROPERTIES_REQ_CMD-till last one--|
+ | | | |
+ probed()
+ |----ishtp_cl_connect-->|----------------- CLIENT_CONNECT_REQ_CMD-------------->|
+ | | | |
+ | |<----------------CLIENT_CONNECT_RES_CMD----------------|
+ | | | |
+ |register event callback| | |
+ | | | |
+ |ishtp_cl_send(
+ HOSTIF_DM_ENUM_DEVICES) |----------fill ishtp_msg_hdr struct write to HW----- >|
+ | | | |
+ | | |<-----IRQ(IPC_PROTOCOL_ISHTP---|
+ | | | |
+ |<--ENUM_DEVICE RSP-----| | |
+ | | | |
+for each enumerated device
+ |ishtp_cl_send(
+ HOSTIF_GET_HID_DESCRIPTOR |----------fill ishtp_msg_hdr struct write to HW--- >|
+ | | | |
+ ...Response
+ | | | |
+for each enumerated device
+ |ishtp_cl_send(
+ HOSTIF_GET_REPORT_DESCRIPTOR |----------fill ishtp_msg_hdr struct write to HW- >|
+ | | | |
+ | | | |
+ hid_allocate_device
+ | | | |
+ hid_add_device | | |
+ | | | |
+
+
+3.7 ISH Debugging
+
+To debug ISH, event tracing mechanism is used. To enable debug logs
+echo 1 > /sys/kernel/debug/tracing/events/intel_ish/enable
+cat sys/kernel/debug/tracing/trace
+
+3.8 ISH IIO sysfs Example on Lenovo thinkpad Yoga 260
+
+root@otcpl-ThinkPad-Yoga-260:~# tree -l /sys/bus/iio/devices/
+/sys/bus/iio/devices/
+├── iio:device0 -> ../../../devices/0044:8086:22D8.0001/HID-SENSOR-200073.9.auto/iio:device0
+│   ├── buffer
+│   │   ├── enable
+│   │   ├── length
+│   │   └── watermark
+...
+│   ├── in_accel_hysteresis
+│   ├── in_accel_offset
+│   ├── in_accel_sampling_frequency
+│   ├── in_accel_scale
+│   ├── in_accel_x_raw
+│   ├── in_accel_y_raw
+│   ├── in_accel_z_raw
+│   ├── name
+│   ├── scan_elements
+│   │   ├── in_accel_x_en
+│   │   ├── in_accel_x_index
+│   │   ├── in_accel_x_type
+│   │   ├── in_accel_y_en
+│   │   ├── in_accel_y_index
+│   │   ├── in_accel_y_type
+│   │   ├── in_accel_z_en
+│   │   ├── in_accel_z_index
+│   │   └── in_accel_z_type
+...
+│   │   ├── devices
+│   │   │   │   ├── buffer
+│   │   │   │   │   ├── enable
+│   │   │   │   │   ├── length
+│   │   │   │   │   └── watermark
+│   │   │   │   ├── dev
+│   │   │   │   ├── in_intensity_both_raw
+│   │   │   │   ├── in_intensity_hysteresis
+│   │   │   │   ├── in_intensity_offset
+│   │   │   │   ├── in_intensity_sampling_frequency
+│   │   │   │   ├── in_intensity_scale
+│   │   │   │   ├── name
+│   │   │   │   ├── scan_elements
+│   │   │   │   │   ├── in_intensity_both_en
+│   │   │   │   │   ├── in_intensity_both_index
+│   │   │   │   │   └── in_intensity_both_type
+│   │   │   │   ├── trigger
+│   │   │   │   │   └── current_trigger
+...
+│   │   │   │   ├── buffer
+│   │   │   │   │   ├── enable
+│   │   │   │   │   ├── length
+│   │   │   │   │   └── watermark
+│   │   │   │   ├── dev
+│   │   │   │   ├── in_magn_hysteresis
+│   │   │   │   ├── in_magn_offset
+│   │   │   │   ├── in_magn_sampling_frequency
+│   │   │   │   ├── in_magn_scale
+│   │   │   │   ├── in_magn_x_raw
+│   │   │   │   ├── in_magn_y_raw
+│   │   │   │   ├── in_magn_z_raw
+│   │   │   │   ├── in_rot_from_north_magnetic_tilt_comp_raw
+│   │   │   │   ├── in_rot_hysteresis
+│   │   │   │   ├── in_rot_offset
+│   │   │   │   ├── in_rot_sampling_frequency
+│   │   │   │   ├── in_rot_scale
+│   │   │   │   ├── name
+...
+│   │   │   │   ├── scan_elements
+│   │   │   │   │   ├── in_magn_x_en
+│   │   │   │   │   ├── in_magn_x_index
+│   │   │   │   │   ├── in_magn_x_type
+│   │   │   │   │   ├── in_magn_y_en
+│   │   │   │   │   ├── in_magn_y_index
+│   │   │   │   │   ├── in_magn_y_type
+│   │   │   │   │   ├── in_magn_z_en
+│   │   │   │   │   ├── in_magn_z_index
+│   │   │   │   │   ├── in_magn_z_type
+│   │   │   │   │   ├── in_rot_from_north_magnetic_tilt_comp_en
+│   │   │   │   │   ├── in_rot_from_north_magnetic_tilt_comp_index
+│   │   │   │   │   └── in_rot_from_north_magnetic_tilt_comp_type
+│   │   │   │   ├── trigger
+│   │   │   │   │   └── current_trigger
+...
+│   │   │   │   ├── buffer
+│   │   │   │   │   ├── enable
+│   │   │   │   │   ├── length
+│   │   │   │   │   └── watermark
+│   │   │   │   ├── dev
+│   │   │   │   ├── in_anglvel_hysteresis
+│   │   │   │   ├── in_anglvel_offset
+│   │   │   │   ├── in_anglvel_sampling_frequency
+│   │   │   │   ├── in_anglvel_scale
+│   │   │   │   ├── in_anglvel_x_raw
+│   │   │   │   ├── in_anglvel_y_raw
+│   │   │   │   ├── in_anglvel_z_raw
+│   │   │   │   ├── name
+│   │   │   │   ├── scan_elements
+│   │   │   │   │   ├── in_anglvel_x_en
+│   │   │   │   │   ├── in_anglvel_x_index
+│   │   │   │   │   ├── in_anglvel_x_type
+│   │   │   │   │   ├── in_anglvel_y_en
+│   │   │   │   │   ├── in_anglvel_y_index
+│   │   │   │   │   ├── in_anglvel_y_type
+│   │   │   │   │   ├── in_anglvel_z_en
+│   │   │   │   │   ├── in_anglvel_z_index
+│   │   │   │   │   └── in_anglvel_z_type
+│   │   │   │   ├── trigger
+│   │   │   │   │   └── current_trigger
+...
+│   │   │   │   ├── buffer
+│   │   │   │   │   ├── enable
+│   │   │   │   │   ├── length
+│   │   │   │   │   └── watermark
+│   │   │   │   ├── dev
+│   │   │   │   ├── in_anglvel_hysteresis
+│   │   │   │   ├── in_anglvel_offset
+│   │   │   │   ├── in_anglvel_sampling_frequency
+│   │   │   │   ├── in_anglvel_scale
+│   │   │   │   ├── in_anglvel_x_raw
+│   │   │   │   ├── in_anglvel_y_raw
+│   │   │   │   ├── in_anglvel_z_raw
+│   │   │   │   ├── name
+│   │   │   │   ├── scan_elements
+│   │   │   │   │   ├── in_anglvel_x_en
+│   │   │   │   │   ├── in_anglvel_x_index
+│   │   │   │   │   ├── in_anglvel_x_type
+│   │   │   │   │   ├── in_anglvel_y_en
+│   │   │   │   │   ├── in_anglvel_y_index
+│   │   │   │   │   ├── in_anglvel_y_type
+│   │   │   │   │   ├── in_anglvel_z_en
+│   │   │   │   │   ├── in_anglvel_z_index
+│   │   │   │   │   └── in_anglvel_z_type
+│   │   │   │   ├── trigger
+│   │   │   │   │   └── current_trigger
+...
diff --git a/Documentation/hsi.txt b/Documentation/hsi.txt
deleted file mode 100644
index 6ac6cd51852a..000000000000
--- a/Documentation/hsi.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-HSI - High-speed Synchronous Serial Interface
-
-1. Introduction
-~~~~~~~~~~~~~~~
-
-High Speed Syncronous Interface (HSI) is a fullduplex, low latency protocol,
-that is optimized for die-level interconnect between an Application Processor
-and a Baseband chipset. It has been specified by the MIPI alliance in 2003 and
-implemented by multiple vendors since then.
-
-The HSI interface supports full duplex communication over multiple channels
-(typically 8) and is capable of reaching speeds up to 200 Mbit/s.
-
-The serial protocol uses two signals, DATA and FLAG as combined data and clock
-signals and an additional READY signal for flow control. An additional WAKE
-signal can be used to wakeup the chips from standby modes. The signals are
-commonly prefixed by AC for signals going from the application die to the
-cellular die and CA for signals going the other way around.
-
-+------------+ +---------------+
-| Cellular | | Application |
-| Die | | Die |
-| | - - - - - - CAWAKE - - - - - - >| |
-| T|------------ CADATA ------------>|R |
-| X|------------ CAFLAG ------------>|X |
-| |<----------- ACREADY ------------| |
-| | | |
-| | | |
-| |< - - - - - ACWAKE - - - - - - -| |
-| R|<----------- ACDATA -------------|T |
-| X|<----------- ACFLAG -------------|X |
-| |------------ CAREADY ----------->| |
-| | | |
-| | | |
-+------------+ +---------------+
-
-2. HSI Subsystem in Linux
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In the Linux kernel the hsi subsystem is supposed to be used for HSI devices.
-The hsi subsystem contains drivers for hsi controllers including support for
-multi-port controllers and provides a generic API for using the HSI ports.
-
-It also contains HSI client drivers, which make use of the generic API to
-implement a protocol used on the HSI interface. These client drivers can
-use an arbitrary number of channels.
-
-3. hsi-char Device
-~~~~~~~~~~~~~~~~~~
-
-Each port automatically registers a generic client driver called hsi_char,
-which provides a charecter device for userspace representing the HSI port.
-It can be used to communicate via HSI from userspace. Userspace may
-configure the hsi_char device using the following ioctl commands:
-
-* HSC_RESET:
- - flush the HSI port
-
-* HSC_SET_PM
- - enable or disable the client.
-
-* HSC_SEND_BREAK
- - send break
-
-* HSC_SET_RX
- - set RX configuration
-
-* HSC_GET_RX
- - get RX configuration
-
-* HSC_SET_TX
- - set TX configuration
-
-* HSC_GET_TX
- - get TX configuration
diff --git a/Documentation/hwmon/adt7470 b/Documentation/hwmon/adt7470
index 8ce4aa0a0f55..fe68e18a0c8d 100644
--- a/Documentation/hwmon/adt7470
+++ b/Documentation/hwmon/adt7470
@@ -65,6 +65,23 @@ from 0 (off) to 255 (full speed). Fan speed will be set to maximum when the
temperature sensor associated with the PWM control exceeds
pwm#_auto_point2_temp.
+The driver also allows control of the PWM frequency:
+
+* pwm1_freq
+
+The PWM frequency is rounded to the nearest one of:
+
+* 11.0 Hz
+* 14.7 Hz
+* 22.1 Hz
+* 29.4 Hz
+* 35.3 Hz
+* 44.1 Hz
+* 58.8 Hz
+* 88.2 Hz
+* 1.4 kHz
+* 22.5 kHz
+
Notes
-----
diff --git a/Documentation/hwmon/hwmon-kernel-api.txt b/Documentation/hwmon/hwmon-kernel-api.txt
index 2ecdbfc85ecf..ef9d74947f5c 100644
--- a/Documentation/hwmon/hwmon-kernel-api.txt
+++ b/Documentation/hwmon/hwmon-kernel-api.txt
@@ -34,6 +34,19 @@ devm_hwmon_device_register_with_groups(struct device *dev,
const char *name, void *drvdata,
const struct attribute_group **groups);
+struct device *
+hwmon_device_register_with_info(struct device *dev,
+ const char *name, void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **groups);
+
+struct device *
+devm_hwmon_device_register_with_info(struct device *dev,
+ const char *name,
+ void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **groups);
+
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
@@ -60,15 +73,229 @@ devm_hwmon_device_register_with_groups is similar to
hwmon_device_register_with_groups. However, it is device managed, meaning the
hwmon device does not have to be removed explicitly by the removal function.
+hwmon_device_register_with_info is the most comprehensive and preferred means
+to register a hardware monitoring device. It creates the standard sysfs
+attributes in the hardware monitoring core, letting the driver focus on reading
+from and writing to the chip instead of having to bother with sysfs attributes.
+Its parameters are described in more detail below.
+
+devm_hwmon_device_register_with_info is similar to
+hwmon_device_register_with_info. However, it is device managed, meaning the
+hwmon device does not have to be removed explicitly by the removal function.
+
hwmon_device_unregister deregisters a registered hardware monitoring device.
The parameter of this function is the pointer to the registered hardware
monitoring device structure. This function must be called from the driver
remove function if the hardware monitoring device was registered with
-hwmon_device_register or with hwmon_device_register_with_groups.
+hwmon_device_register, hwmon_device_register_with_groups, or
+hwmon_device_register_with_info.
devm_hwmon_device_unregister does not normally have to be called. It is only
needed for error handling, and only needed if the driver probe fails after
-the call to devm_hwmon_device_register_with_groups.
+the call to devm_hwmon_device_register_with_groups and if the automatic
+(device managed) removal would be too late.
+
+Using devm_hwmon_device_register_with_info()
+--------------------------------------------
+
+hwmon_device_register_with_info() registers a hardware monitoring device.
+The parameters to this function are
+
+struct device *dev Pointer to parent device
+const char *name Device name
+void *drvdata Driver private data
+const struct hwmon_chip_info *info
+ Pointer to chip description.
+const struct attribute_group **groups
+ Null-terminated list of additional sysfs attribute
+ groups.
+
+This function returns a pointer to the created hardware monitoring device
+on success and a negative error code for failure.
+
+The hwmon_chip_info structure looks as follows.
+
+struct hwmon_chip_info {
+ const struct hwmon_ops *ops;
+ const struct hwmon_channel_info **info;
+};
+
+It contains the following fields:
+
+* ops: Pointer to device operations.
+* info: NULL-terminated list of device channel descriptors.
+
+The list of hwmon operations is defined as:
+
+struct hwmon_ops {
+ umode_t (*is_visible)(const void *, enum hwmon_sensor_types type,
+ u32 attr, int);
+ int (*read)(struct device *, enum hwmon_sensor_types type,
+ u32 attr, int, long *);
+ int (*write)(struct device *, enum hwmon_sensor_types type,
+ u32 attr, int, long);
+};
+
+It defines the following operations.
+
+* is_visible: Pointer to a function to return the file mode for each supported
+ attribute. This function is mandatory.
+
+* read: Pointer to a function for reading a value from the chip. This function
+ is optional, but must be provided if any readable attributes exist.
+
+* write: Pointer to a function for writing a value to the chip. This function is
+ optional, but must be provided if any writeable attributes exist.
+
+Each sensor channel is described with struct hwmon_channel_info, which is
+defined as follows.
+
+struct hwmon_channel_info {
+ enum hwmon_sensor_types type;
+ u32 *config;
+};
+
+It contains following fields:
+
+* type: The hardware monitoring sensor type.
+ Supported sensor types are
+ * hwmon_chip A virtual sensor type, used to describe attributes
+ which apply to the entire chip.
+ * hwmon_temp Temperature sensor
+ * hwmon_in Voltage sensor
+ * hwmon_curr Current sensor
+ * hwmon_power Power sensor
+ * hwmon_energy Energy sensor
+ * hwmon_humidity Humidity sensor
+ * hwmon_fan Fan speed sensor
+ * hwmon_pwm PWM control
+
+* config: Pointer to a 0-terminated list of configuration values for each
+ sensor of the given type. Each value is a combination of bit values
+ describing the attributes supposed by a single sensor.
+
+As an example, here is the complete description file for a LM75 compatible
+sensor chip. The chip has a single temperature sensor. The driver wants to
+register with the thermal subsystem (HWMON_C_REGISTER_TZ), and it supports
+the update_interval attribute (HWMON_C_UPDATE_INTERVAL). The chip supports
+reading the temperature (HWMON_T_INPUT), it has a maximum temperature
+register (HWMON_T_MAX) as well as a maximum temperature hysteresis register
+(HWMON_T_MAX_HYST).
+
+static const u32 lm75_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
+};
+
+static const struct hwmon_channel_info lm75_chip = {
+ .type = hwmon_chip,
+ .config = lm75_chip_config,
+};
+
+static const u32 lm75_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
+ 0
+};
+
+static const struct hwmon_channel_info lm75_temp = {
+ .type = hwmon_temp,
+ .config = lm75_temp_config,
+};
+
+static const struct hwmon_channel_info *lm75_info[] = {
+ &lm75_chip,
+ &lm75_temp,
+ NULL
+};
+
+static const struct hwmon_ops lm75_hwmon_ops = {
+ .is_visible = lm75_is_visible,
+ .read = lm75_read,
+ .write = lm75_write,
+};
+
+static const struct hwmon_chip_info lm75_chip_info = {
+ .ops = &lm75_hwmon_ops,
+ .info = lm75_info,
+};
+
+A complete list of bit values indicating individual attribute support
+is defined in include/linux/hwmon.h. Definition prefixes are as follows.
+
+HWMON_C_xxxx Chip attributes, for use with hwmon_chip.
+HWMON_T_xxxx Temperature attributes, for use with hwmon_temp.
+HWMON_I_xxxx Voltage attributes, for use with hwmon_in.
+HWMON_C_xxxx Current attributes, for use with hwmon_curr.
+ Notice the prefix overlap with chip attributes.
+HWMON_P_xxxx Power attributes, for use with hwmon_power.
+HWMON_E_xxxx Energy attributes, for use with hwmon_energy.
+HWMON_H_xxxx Humidity attributes, for use with hwmon_humidity.
+HWMON_F_xxxx Fan speed attributes, for use with hwmon_fan.
+HWMON_PWM_xxxx PWM control attributes, for use with hwmon_pwm.
+
+Driver callback functions
+-------------------------
+
+Each driver provides is_visible, read, and write functions. Parameters
+and return values for those functions are as follows.
+
+umode_t is_visible_func(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+
+Parameters:
+ data: Pointer to device private data structure.
+ type: The sensor type.
+ attr: Attribute identifier associated with a specific attribute.
+ For example, the attribute value for HWMON_T_INPUT would be
+ hwmon_temp_input. For complete mappings of bit fields to
+ attribute values please see include/linux/hwmon.h.
+ channel:The sensor channel number.
+
+Return value:
+ The file mode for this attribute. Typically, this will be 0 (the
+ attribute will not be created), S_IRUGO, or 'S_IRUGO | S_IWUSR'.
+
+int read_func(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+
+Parameters:
+ dev: Pointer to the hardware monitoring device.
+ type: The sensor type.
+ attr: Attribute identifier associated with a specific attribute.
+ For example, the attribute value for HWMON_T_INPUT would be
+ hwmon_temp_input. For complete mappings please see
+ include/linux/hwmon.h.
+ channel:The sensor channel number.
+ val: Pointer to attribute value.
+
+Return value:
+ 0 on success, a negative error number otherwise.
+
+int write_func(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+
+Parameters:
+ dev: Pointer to the hardware monitoring device.
+ type: The sensor type.
+ attr: Attribute identifier associated with a specific attribute.
+ For example, the attribute value for HWMON_T_INPUT would be
+ hwmon_temp_input. For complete mappings please see
+ include/linux/hwmon.h.
+ channel:The sensor channel number.
+ val: The value to write to the chip.
+
+Return value:
+ 0 on success, a negative error number otherwise.
+
+
+Driver-provided sysfs attributes
+--------------------------------
+
+If the hardware monitoring device is registered with
+hwmon_device_register_with_info or devm_hwmon_device_register_with_info,
+it is most likely not necessary to provide sysfs attributes. Only non-standard
+sysfs attributes need to be provided when one of those registration functions
+is used.
The header file linux/hwmon-sysfs.h provides a number of useful macros to
declare and use hardware monitoring sysfs attributes.
diff --git a/Documentation/hwmon/max6650 b/Documentation/hwmon/max6650
index 58d9644a2bde..dff1d296a48b 100644
--- a/Documentation/hwmon/max6650
+++ b/Documentation/hwmon/max6650
@@ -34,6 +34,7 @@ fan3_input ro "
fan4_input ro "
fan1_target rw desired fan speed in RPM (closed loop mode only)
pwm1_enable rw regulator mode, 0=full on, 1=open loop, 2=closed loop
+ 3=off
pwm1 rw relative speed (0-255), 255=max. speed.
Used in open loop mode only.
fan1_div rw sets the speed range the inputs can handle. Legal
diff --git a/Documentation/hwmon/ucd9000 b/Documentation/hwmon/ucd9000
index 805e33edb978..262e713e60ff 100644
--- a/Documentation/hwmon/ucd9000
+++ b/Documentation/hwmon/ucd9000
@@ -2,12 +2,13 @@ Kernel driver ucd9000
=====================
Supported chips:
- * TI UCD90120, UCD90124, UCD9090, and UCD90910
- Prefixes: 'ucd90120', 'ucd90124', 'ucd9090', 'ucd90910'
+ * TI UCD90120, UCD90124, UCD90160, UCD9090, and UCD90910
+ Prefixes: 'ucd90120', 'ucd90124', 'ucd90160', 'ucd9090', 'ucd90910'
Addresses scanned: -
Datasheets:
http://focus.ti.com/lit/ds/symlink/ucd90120.pdf
http://focus.ti.com/lit/ds/symlink/ucd90124.pdf
+ http://focus.ti.com/lit/ds/symlink/ucd90160.pdf
http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
@@ -32,6 +33,13 @@ interrupts, cascading, or other system functions. Twelve of these pins offer PWM
functionality. Using these pins, the UCD90124 offers support for fan control,
margining, and general-purpose PWM functions.
+The UCD90160 is a 16-rail PMBus/I2C addressable power-supply sequencer and
+monitor. The device integrates a 12-bit ADC for monitoring up to 16 power-supply
+voltage inputs. Twenty-six GPIO pins can be used for power supply enables,
+power-on reset signals, external interrupts, cascading, or other system
+functions. Twelve of these pins offer PWM functionality. Using these pins, the
+UCD90160 offers support for margining, and general-purpose PWM functions.
+
The UCD9090 is a 10-rail PMBus/I2C addressable power-supply sequencer and
monitor. The device integrates a 12-bit ADC for monitoring up to 10 power-supply
voltage inputs. Twenty-three GPIO pins can be used for power supply enables,
diff --git a/Documentation/hwmon/xgene-hwmon b/Documentation/hwmon/xgene-hwmon
new file mode 100644
index 000000000000..6ec50ed7cc8f
--- /dev/null
+++ b/Documentation/hwmon/xgene-hwmon
@@ -0,0 +1,30 @@
+Kernel driver xgene-hwmon
+========================
+
+Supported chips:
+ * APM X-Gene SoC
+
+Description
+-----------
+
+This driver adds hardware temperature and power reading support for
+APM X-Gene SoC using the mailbox communication interface.
+For device tree, it is the standard DT mailbox.
+For ACPI, it is the PCC mailbox.
+
+The following sensors are supported
+
+ * Temperature
+ - SoC on-die temperature in milli-degree C
+ - Alarm when high/over temperature occurs
+ * Power
+ - CPU power in uW
+ - IO power in uW
+
+sysfs-Interface
+---------------
+
+temp0_input - SoC on-die temperature (milli-degree C)
+temp0_critical_alarm - An 1 would indicates on-die temperature exceeded threshold
+power0_input - CPU power in (uW)
+power1_input - IO power in (uW)
diff --git a/Documentation/iio/iio_configfs.txt b/Documentation/iio/iio_configfs.txt
index f0add35cd52e..4e5f101837a8 100644
--- a/Documentation/iio/iio_configfs.txt
+++ b/Documentation/iio/iio_configfs.txt
@@ -82,8 +82,8 @@ users to create hrtimer triggers under /config/iio/triggers/hrtimer.
e.g:
-$ mkdir /config/triggers/hrtimer/instance1
-$ rmdir /config/triggers/hrtimer/instance1
+$ mkdir /config/iio/triggers/hrtimer/instance1
+$ rmdir /config/iio/triggers/hrtimer/instance1
Each trigger can have one or more attributes specific to the trigger type.
diff --git a/Documentation/index.rst b/Documentation/index.rst
index e0fc72963e87..d9ccb94fca95 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -6,22 +6,19 @@
Welcome to The Linux Kernel's documentation!
============================================
-Nothing for you to see here *yet*. Please move along.
-
Contents:
.. toctree::
:maxdepth: 2
kernel-documentation
- media/media_uapi
- media/media_kapi
- media/dvb-drivers/index
- media/v4l-drivers/index
+ development-process/index
+ dev-tools/tools
+ driver-api/index
+ media/index
gpu/index
Indices and tables
==================
* :ref:`genindex`
-* :ref:`search`
diff --git a/Documentation/infiniband/sysfs.txt b/Documentation/infiniband/sysfs.txt
index 45bcafe6ff8a..77570d16b170 100644
--- a/Documentation/infiniband/sysfs.txt
+++ b/Documentation/infiniband/sysfs.txt
@@ -89,6 +89,36 @@ HFI1
nctxts - number of allowed contexts (PSM2)
chip_reset - diagnostic (root only)
boardversion - board version
+
+ sdma<N>/ - one directory per sdma engine (0 - 15)
+ sdma<N>/cpu_list - read-write, list of cpus for user-process to sdma
+ engine assignment.
+ sdma<N>/vl - read-only, vl the sdma engine maps to.
+
+ The new interface will give the user control on the affinity settings
+ for the hfi1 device.
+ As an example, to set an sdma engine irq affinity and thread affinity
+ of a user processes to use the sdma engine, which is "near" in terms
+ of NUMA configuration, or physical cpu location, the user will do:
+
+ echo "3" > /proc/irq/<N>/smp_affinity_list
+ echo "4-7" > /sys/devices/.../sdma3/cpu_list
+ cat /sys/devices/.../sdma3/vl
+ 0
+ echo "8" > /proc/irq/<M>/smp_affinity_list
+ echo "9-12" > /sys/devices/.../sdma4/cpu_list
+ cat /sys/devices/.../sdma4/vl
+ 1
+
+ to make sure that when a process runs on cpus 4,5,6, or 7,
+ and uses vl=0, then sdma engine 3 is selected by the driver,
+ and also the interrupt of the sdma engine 3 is steered to cpu 3.
+ Similarly, when a process runs on cpus 9,10,11, or 12 and sets vl=1,
+ then engine 4 will be selected and the irq of the sdma engine 4 is
+ steered to cpu 8.
+ This assumes that in the above N is the irq number of "sdma3",
+ and M is irq number of "sdma4" in the /proc/interrupts file.
+
ports/1/
CCMgtA/
cc_settings_bin - CCA tables used by PSM2
diff --git a/Documentation/ioctl/botching-up-ioctls.txt b/Documentation/ioctl/botching-up-ioctls.txt
index cc30b14791cb..36138c632f7a 100644
--- a/Documentation/ioctl/botching-up-ioctls.txt
+++ b/Documentation/ioctl/botching-up-ioctls.txt
@@ -34,15 +34,18 @@ will need to add a a 32-bit compat layer:
64-bit platforms do. So we always need padding to the natural size to get
this right.
- * Pad the entire struct to a multiple of 64-bits - the structure size will
- otherwise differ on 32-bit versus 64-bit. Having a different structure size
- hurts when passing arrays of structures to the kernel, or if the kernel
- checks the structure size, which e.g. the drm core does.
+ * Pad the entire struct to a multiple of 64-bits if the structure contains
+ 64-bit types - the structure size will otherwise differ on 32-bit versus
+ 64-bit. Having a different structure size hurts when passing arrays of
+ structures to the kernel, or if the kernel checks the structure size, which
+ e.g. the drm core does.
* Pointers are __u64, cast from/to a uintprt_t on the userspace side and
from/to a void __user * in the kernel. Try really hard not to delay this
conversion or worse, fiddle the raw __u64 through your code since that
- diminishes the checking tools like sparse can provide.
+ diminishes the checking tools like sparse can provide. The macro
+ u64_to_user_ptr can be used in the kernel to avoid warnings about integers
+ and pointres of different sizes.
Basics
diff --git a/Documentation/kasan.txt b/Documentation/kasan.txt
deleted file mode 100644
index 7dd95b35cd7c..000000000000
--- a/Documentation/kasan.txt
+++ /dev/null
@@ -1,171 +0,0 @@
-KernelAddressSanitizer (KASAN)
-==============================
-
-0. Overview
-===========
-
-KernelAddressSANitizer (KASAN) is a dynamic memory error detector. It provides
-a fast and comprehensive solution for finding use-after-free and out-of-bounds
-bugs.
-
-KASAN uses compile-time instrumentation for checking every memory access,
-therefore you will need a GCC version 4.9.2 or later. GCC 5.0 or later is
-required for detection of out-of-bounds accesses to stack or global variables.
-
-Currently KASAN is supported only for x86_64 architecture.
-
-1. Usage
-========
-
-To enable KASAN configure kernel with:
-
- CONFIG_KASAN = y
-
-and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline and
-inline are compiler instrumentation types. The former produces smaller binary
-the latter is 1.1 - 2 times faster. Inline instrumentation requires a GCC
-version 5.0 or later.
-
-KASAN works with both SLUB and SLAB memory allocators.
-For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
-
-To disable instrumentation for specific files or directories, add a line
-similar to the following to the respective kernel Makefile:
-
- For a single file (e.g. main.o):
- KASAN_SANITIZE_main.o := n
-
- For all files in one directory:
- KASAN_SANITIZE := n
-
-1.1 Error reports
-=================
-
-A typical out of bounds access report looks like this:
-
-==================================================================
-BUG: AddressSanitizer: out of bounds access in kmalloc_oob_right+0x65/0x75 [test_kasan] at addr ffff8800693bc5d3
-Write of size 1 by task modprobe/1689
-=============================================================================
-BUG kmalloc-128 (Not tainted): kasan error
------------------------------------------------------------------------------
-
-Disabling lock debugging due to kernel taint
-INFO: Allocated in kmalloc_oob_right+0x3d/0x75 [test_kasan] age=0 cpu=0 pid=1689
- __slab_alloc+0x4b4/0x4f0
- kmem_cache_alloc_trace+0x10b/0x190
- kmalloc_oob_right+0x3d/0x75 [test_kasan]
- init_module+0x9/0x47 [test_kasan]
- do_one_initcall+0x99/0x200
- load_module+0x2cb3/0x3b20
- SyS_finit_module+0x76/0x80
- system_call_fastpath+0x12/0x17
-INFO: Slab 0xffffea0001a4ef00 objects=17 used=7 fp=0xffff8800693bd728 flags=0x100000000004080
-INFO: Object 0xffff8800693bc558 @offset=1368 fp=0xffff8800693bc720
-
-Bytes b4 ffff8800693bc548: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
-Object ffff8800693bc558: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
-Object ffff8800693bc568: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
-Object ffff8800693bc578: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
-Object ffff8800693bc588: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
-Object ffff8800693bc598: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
-Object ffff8800693bc5a8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
-Object ffff8800693bc5b8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
-Object ffff8800693bc5c8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b a5 kkkkkkkkkkkkkkk.
-Redzone ffff8800693bc5d8: cc cc cc cc cc cc cc cc ........
-Padding ffff8800693bc718: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
-CPU: 0 PID: 1689 Comm: modprobe Tainted: G B 3.18.0-rc1-mm1+ #98
-Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014
- ffff8800693bc000 0000000000000000 ffff8800693bc558 ffff88006923bb78
- ffffffff81cc68ae 00000000000000f3 ffff88006d407600 ffff88006923bba8
- ffffffff811fd848 ffff88006d407600 ffffea0001a4ef00 ffff8800693bc558
-Call Trace:
- [<ffffffff81cc68ae>] dump_stack+0x46/0x58
- [<ffffffff811fd848>] print_trailer+0xf8/0x160
- [<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan]
- [<ffffffff811ff0f5>] object_err+0x35/0x40
- [<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan]
- [<ffffffff8120b9fa>] kasan_report_error+0x38a/0x3f0
- [<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40
- [<ffffffff8120b344>] ? kasan_unpoison_shadow+0x14/0x40
- [<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40
- [<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan]
- [<ffffffff8120a995>] __asan_store1+0x75/0xb0
- [<ffffffffa0002601>] ? kmem_cache_oob+0x1d/0xc3 [test_kasan]
- [<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan]
- [<ffffffffa0002065>] kmalloc_oob_right+0x65/0x75 [test_kasan]
- [<ffffffffa00026b0>] init_module+0x9/0x47 [test_kasan]
- [<ffffffff810002d9>] do_one_initcall+0x99/0x200
- [<ffffffff811e4e5c>] ? __vunmap+0xec/0x160
- [<ffffffff81114f63>] load_module+0x2cb3/0x3b20
- [<ffffffff8110fd70>] ? m_show+0x240/0x240
- [<ffffffff81115f06>] SyS_finit_module+0x76/0x80
- [<ffffffff81cd3129>] system_call_fastpath+0x12/0x17
-Memory state around the buggy address:
- ffff8800693bc300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
- ffff8800693bc380: fc fc 00 00 00 00 00 00 00 00 00 00 00 00 00 fc
- ffff8800693bc400: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
- ffff8800693bc480: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
- ffff8800693bc500: fc fc fc fc fc fc fc fc fc fc fc 00 00 00 00 00
->ffff8800693bc580: 00 00 00 00 00 00 00 00 00 00 03 fc fc fc fc fc
- ^
- ffff8800693bc600: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
- ffff8800693bc680: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
- ffff8800693bc700: fc fc fc fc fb fb fb fb fb fb fb fb fb fb fb fb
- ffff8800693bc780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
- ffff8800693bc800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
-==================================================================
-
-The header of the report discribe what kind of bug happened and what kind of
-access caused it. It's followed by the description of the accessed slub object
-(see 'SLUB Debug output' section in Documentation/vm/slub.txt for details) and
-the description of the accessed memory page.
-
-In the last section the report shows memory state around the accessed address.
-Reading this part requires some understanding of how KASAN works.
-
-The state of each 8 aligned bytes of memory is encoded in one shadow byte.
-Those 8 bytes can be accessible, partially accessible, freed or be a redzone.
-We use the following encoding for each shadow byte: 0 means that all 8 bytes
-of the corresponding memory region are accessible; number N (1 <= N <= 7) means
-that the first N bytes are accessible, and other (8 - N) bytes are not;
-any negative value indicates that the entire 8-byte word is inaccessible.
-We use different negative values to distinguish between different kinds of
-inaccessible memory like redzones or freed memory (see mm/kasan/kasan.h).
-
-In the report above the arrows point to the shadow byte 03, which means that
-the accessed address is partially accessible.
-
-
-2. Implementation details
-=========================
-
-From a high level, our approach to memory error detection is similar to that
-of kmemcheck: use shadow memory to record whether each byte of memory is safe
-to access, and use compile-time instrumentation to check shadow memory on each
-memory access.
-
-AddressSanitizer dedicates 1/8 of kernel memory to its shadow memory
-(e.g. 16TB to cover 128TB on x86_64) and uses direct mapping with a scale and
-offset to translate a memory address to its corresponding shadow address.
-
-Here is the function which translates an address to its corresponding shadow
-address:
-
-static inline void *kasan_mem_to_shadow(const void *addr)
-{
- return ((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
- + KASAN_SHADOW_OFFSET;
-}
-
-where KASAN_SHADOW_SCALE_SHIFT = 3.
-
-Compile-time instrumentation used for checking memory accesses. Compiler inserts
-function calls (__asan_load*(addr), __asan_store*(addr)) before each memory
-access of size 1, 2, 4, 8 or 16. These functions check whether memory access is
-valid or not by checking corresponding shadow memory.
-
-GCC 5.0 has possibility to perform inline instrumentation. Instead of making
-function calls GCC directly inserts the code to check the shadow memory.
-This option significantly enlarges kernel but it gives x1.1-x2 performance
-boost over outline instrumented kernel.
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index db101857b2c9..069fcb3eef6e 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -274,7 +274,44 @@ menuconfig:
This is similar to the simple config entry above, but it also gives a
hint to front ends, that all suboptions should be displayed as a
-separate list of options.
+separate list of options. To make sure all the suboptions will really
+show up under the menuconfig entry and not outside of it, every item
+from the <config options> list must depend on the menuconfig symbol.
+In practice, this is achieved by using one of the next two constructs:
+
+(1):
+menuconfig M
+if M
+ config C1
+ config C2
+endif
+
+(2):
+menuconfig M
+config C1
+ depends on M
+config C2
+ depends on M
+
+In the following examples (3) and (4), C1 and C2 still have the M
+dependency, but will not appear under menuconfig M anymore, because
+of C0, which doesn't depend on M:
+
+(3):
+menuconfig M
+ config C0
+if M
+ config C1
+ config C2
+endif
+
+(4):
+menuconfig M
+config C0
+config C1
+ depends on M
+config C2
+ depends on M
choices:
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index 88ff63d5fde3..b0eb27b956d9 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -393,6 +393,15 @@ Notes on loading the dump-capture kernel:
* We generally don' have to bring up a SMP kernel just to capture the
dump. Hence generally it is useful either to build a UP dump-capture
kernel or specify maxcpus=1 option while loading dump-capture kernel.
+ Note, though maxcpus always works, you had better replace it with
+ nr_cpus to save memory if supported by the current ARCH, such as x86.
+
+* You should enable multi-cpu support in dump-capture kernel if you intend
+ to use multi-thread programs with it, such as parallel dump feature of
+ makedumpfile. Otherwise, the multi-thread program may have a great
+ performance degradation. To enable multi-cpu support, you should bring up an
+ SMP dump-capture kernel and specify maxcpus/nr_cpus, disable_cpu_apicid=[X]
+ options while loading it.
* For s390x there are two kdump modes: If a ELF header is specified with
the elfcorehdr= kernel parameter, it is used by the kdump kernel as it
diff --git a/Documentation/kernel-docs.txt b/Documentation/kernel-docs.txt
index 1dafc52167b0..05a7857a4a83 100644
--- a/Documentation/kernel-docs.txt
+++ b/Documentation/kernel-docs.txt
@@ -1,731 +1,652 @@
+.. _kernel_docs:
- Index of Documentation for People Interested in Writing and/or
-
- Understanding the Linux Kernel.
+Index of Documentation for People Interested in Writing and/or Understanding the Linux Kernel
+=============================================================================================
Juan-Mariano de Goyeneche <jmseyas@dit.upm.es>
-/*
- * The latest version of this document may be found at:
- * http://www.dit.upm.es/~jmseyas/linux/kernel/hackers-docs.html
- */
-
- The need for a document like this one became apparent in the
- linux-kernel mailing list as the same questions, asking for pointers
- to information, appeared again and again.
-
- Fortunately, as more and more people get to GNU/Linux, more and more
- get interested in the Kernel. But reading the sources is not always
- enough. It is easy to understand the code, but miss the concepts, the
- philosophy and design decisions behind this code.
-
- Unfortunately, not many documents are available for beginners to
- start. And, even if they exist, there was no "well-known" place which
- kept track of them. These lines try to cover this lack. All documents
- available on line known by the author are listed, while some reference
- books are also mentioned.
-
- PLEASE, if you know any paper not listed here or write a new document,
- send me an e-mail, and I'll include a reference to it here. Any
- corrections, ideas or comments are also welcomed.
-
- The papers that follow are listed in no particular order. All are
- cataloged with the following fields: the document's "Title", the
- "Author"/s, the "URL" where they can be found, some "Keywords" helpful
- when searching for specific topics, and a brief "Description" of the
- Document.
-
- Enjoy!
-
- ON-LINE DOCS:
-
- * Title: "Linux Device Drivers, Third Edition"
- Author: Jonathan Corbet, Alessandro Rubini, Greg Kroah-Hartman
- URL: http://lwn.net/Kernel/LDD3/
- Description: A 600-page book covering the (2.6.10) driver
- programming API and kernel hacking in general. Available under the
- Creative Commons Attribution-ShareAlike 2.0 license.
-
- * Title: "The Linux Kernel"
- Author: David A. Rusling.
- URL: http://www.tldp.org/LDP/tlk/tlk.html
- Keywords: everything!, book.
- Description: On line, 200 pages book describing most aspects of
- the Linux Kernel. Probably, the first reference for beginners.
- Lots of illustrations explaining data structures use and
- relationships in the purest Richard W. Stevens' style. Contents:
- "1.-Hardware Basics, 2.-Software Basics, 3.-Memory Management,
- 4.-Processes, 5.-Interprocess Communication Mechanisms, 6.-PCI,
- 7.-Interrupts and Interrupt Handling, 8.-Device Drivers, 9.-The
- File system, 10.-Networks, 11.-Kernel Mechanisms, 12.-Modules,
- 13.-The Linux Kernel Sources, A.-Linux Data Structures, B.-The
- Alpha AXP Processor, C.-Useful Web and FTP Sites, D.-The GNU
- General Public License, Glossary". In short: a must have.
-
- * Title: "Linux Device Drivers, 2nd Edition"
- Author: Alessandro Rubini and Jonathan Corbet.
- URL: http://www.xml.com/ldd/chapter/book/index.html
- Keywords: device drivers, modules, debugging, memory, hardware,
- interrupt handling, char drivers, block drivers, kmod, mmap, DMA,
- buses.
- Description: O'Reilly's popular book, now also on-line under the
- GNU Free Documentation License.
- Notes: You can also buy it in paper-form from O'Reilly. See below
- under BOOKS (Not on-line).
-
- * Title: "Conceptual Architecture of the Linux Kernel"
- Author: Ivan T. Bowman.
- URL: http://plg.uwaterloo.ca/
- Keywords: conceptual software architecture, extracted design,
- reverse engineering, system structure.
- Description: Conceptual software architecture of the Linux kernel,
- automatically extracted from the source code. Very detailed. Good
- figures. Gives good overall kernel understanding.
-
- * Title: "Concrete Architecture of the Linux Kernel"
- Author: Ivan T. Bowman, Saheem Siddiqi, and Meyer C. Tanuan.
- URL: http://plg.uwaterloo.ca/
- Keywords: concrete architecture, extracted design, reverse
- engineering, system structure, dependencies.
- Description: Concrete architecture of the Linux kernel,
- automatically extracted from the source code. Very detailed. Good
- figures. Gives good overall kernel understanding. This papers
- focus on lower details than its predecessor (files, variables...).
-
- * Title: "Linux as a Case Study: Its Extracted Software
- Architecture"
- Author: Ivan T. Bowman, Richard C. Holt and Neil V. Brewster.
- URL: http://plg.uwaterloo.ca/
- Keywords: software architecture, architecture recovery,
- redocumentation.
- Description: Paper appeared at ICSE'99, Los Angeles, May 16-22,
- 1999. A mixture of the previous two documents from the same
- author.
-
- * Title: "Overview of the Virtual File System"
- Author: Richard Gooch.
- URL: http://www.mjmwired.net/kernel/Documentation/filesystems/vfs.txt
- Keywords: VFS, File System, mounting filesystems, opening files,
- dentries, dcache.
- Description: Brief introduction to the Linux Virtual File System.
- What is it, how it works, operations taken when opening a file or
- mounting a file system and description of important data
- structures explaining the purpose of each of their entries.
-
- * Title: "The Linux RAID-1, 4, 5 Code"
- Author: Ingo Molnar, Gadi Oxman and Miguel de Icaza.
- URL: http://www.linuxjournal.com/article.php?sid=2391
- Keywords: RAID, MD driver.
- Description: Linux Journal Kernel Korner article. Here is its
- abstract: "A description of the implementation of the RAID-1,
- RAID-4 and RAID-5 personalities of the MD device driver in the
- Linux kernel, providing users with high performance and reliable,
- secondary-storage capability using software".
-
- * Title: "Dynamic Kernels: Modularized Device Drivers"
- Author: Alessandro Rubini.
- URL: http://www.linuxjournal.com/article.php?sid=1219
- Keywords: device driver, module, loading/unloading modules,
- allocating resources.
- Description: Linux Journal Kernel Korner article. Here is its
- abstract: "This is the first of a series of four articles
- co-authored by Alessandro Rubini and Georg Zezchwitz which present
- a practical approach to writing Linux device drivers as kernel
- loadable modules. This installment presents an introduction to the
- topic, preparing the reader to understand next month's
- installment".
-
- * Title: "Dynamic Kernels: Discovery"
- Author: Alessandro Rubini.
- URL: http://www.linuxjournal.com/article.php?sid=1220
- Keywords: character driver, init_module, clean_up module,
- autodetection, mayor number, minor number, file operations,
- open(), close().
- Description: Linux Journal Kernel Korner article. Here is its
- abstract: "This article, the second of four, introduces part of
- the actual code to create custom module implementing a character
- device driver. It describes the code for module initialization and
- cleanup, as well as the open() and close() system calls".
-
- * Title: "The Devil's in the Details"
- Author: Georg v. Zezschwitz and Alessandro Rubini.
- URL: http://www.linuxjournal.com/article.php?sid=1221
- Keywords: read(), write(), select(), ioctl(), blocking/non
- blocking mode, interrupt handler.
- Description: Linux Journal Kernel Korner article. Here is its
- abstract: "This article, the third of four on writing character
- device drivers, introduces concepts of reading, writing, and using
- ioctl-calls".
-
- * Title: "Dissecting Interrupts and Browsing DMA"
- Author: Alessandro Rubini and Georg v. Zezschwitz.
- URL: http://www.linuxjournal.com/article.php?sid=1222
- Keywords: interrupts, irqs, DMA, bottom halves, task queues.
- Description: Linux Journal Kernel Korner article. Here is its
- abstract: "This is the fourth in a series of articles about
- writing character device drivers as loadable kernel modules. This
- month, we further investigate the field of interrupt handling.
- Though it is conceptually simple, practical limitations and
- constraints make this an ``interesting'' part of device driver
- writing, and several different facilities have been provided for
- different situations. We also investigate the complex topic of
- DMA".
-
- * Title: "Device Drivers Concluded"
- Author: Georg v. Zezschwitz.
- URL: http://www.linuxjournal.com/article.php?sid=1287
- Keywords: address spaces, pages, pagination, page management,
- demand loading, swapping, memory protection, memory mapping, mmap,
- virtual memory areas (VMAs), vremap, PCI.
- Description: Finally, the above turned out into a five articles
- series. This latest one's introduction reads: "This is the last of
- five articles about character device drivers. In this final
- section, Georg deals with memory mapping devices, beginning with
- an overall description of the Linux memory management concepts".
-
- * Title: "Network Buffers And Memory Management"
- Author: Alan Cox.
- URL: http://www.linuxjournal.com/article.php?sid=1312
- Keywords: sk_buffs, network devices, protocol/link layer
- variables, network devices flags, transmit, receive,
- configuration, multicast.
- Description: Linux Journal Kernel Korner. Here is the abstract:
- "Writing a network device driver for Linux is fundamentally
- simple---most of the complexity (other than talking to the
- hardware) involves managing network packets in memory".
-
- * Title: "Linux Kernel Hackers' Guide"
- Author: Michael K. Johnson.
- URL: http://www.tldp.org/LDP/khg/HyperNews/get/khg.html
- Keywords: device drivers, files, VFS, kernel interface, character vs
- block devices, hardware interrupts, scsi, DMA, access to user memory,
- memory allocation, timers.
- Description: A guide designed to help you get up to speed on the
- concepts that are not intuitevly obvious, and to document the internal
- structures of Linux.
-
- * Title: "The Venus kernel interface"
- Author: Peter J. Braam.
- URL:
- http://www.coda.cs.cmu.edu/doc/html/kernel-venus-protocol.html
- Keywords: coda, filesystem, venus, cache manager.
- Description: "This document describes the communication between
- Venus and kernel level file system code needed for the operation
- of the Coda filesystem. This version document is meant to describe
- the current interface (version 1.0) as well as improvements we
- envisage".
-
- * Title: "Programming PCI-Devices under Linux"
- Author: Claus Schroeter.
- URL:
- ftp://ftp.llp.fu-berlin.de/pub/linux/LINUX-LAB/whitepapers/pcip.ps.gz
- Keywords: PCI, device, busmastering.
- Description: 6 pages tutorial on PCI programming under Linux.
- Gives the basic concepts on the architecture of the PCI subsystem,
- as long as basic functions and macros to read/write the devices
- and perform busmastering.
-
- * Title: "Writing Character Device Driver for Linux"
- Author: R. Baruch and C. Schroeter.
- URL:
- ftp://ftp.llp.fu-berlin.de/pub/linux/LINUX-LAB/whitepapers/drivers.ps.gz
- Keywords: character device drivers, I/O, signals, DMA, accessing
- ports in user space, kernel environment.
- Description: 68 pages paper on writing character drivers. A little
- bit old (1.993, 1.994) although still useful.
-
- * Title: "Design and Implementation of the Second Extended
- Filesystem"
- Author: Rémy Card, Theodore Ts'o, Stephen Tweedie.
- URL: http://web.mit.edu/tytso/www/linux/ext2intro.html
- Keywords: ext2, linux fs history, inode, directory, link, devices,
- VFS, physical structure, performance, benchmarks, ext2fs library,
- ext2fs tools, e2fsck.
- Description: Paper written by three of the top ext2 hackers.
- Covers Linux filesystems history, ext2 motivation, ext2 features,
- design, physical structure on disk, performance, benchmarks,
- e2fsck's passes description... A must read!
- Notes: This paper was first published in the Proceedings of the
- First Dutch International Symposium on Linux, ISBN 90-367-0385-9.
-
- * Title: "Analysis of the Ext2fs structure"
- Author: Louis-Dominique Dubeau.
- URL: http://teaching.csse.uwa.edu.au/units/CITS2002/fs-ext2/
- Keywords: ext2, filesystem, ext2fs.
- Description: Description of ext2's blocks, directories, inodes,
- bitmaps, invariants...
-
- * Title: "Journaling the Linux ext2fs Filesystem"
- Author: Stephen C. Tweedie.
- URL:
- ftp://ftp.uk.linux.org/pub/linux/sct/fs/jfs/journal-design.ps.gz
- Keywords: ext3, journaling.
- Description: Excellent 8-pages paper explaining the journaling
- capabilities added to ext2 by the author, showing different
- problems faced and the alternatives chosen.
-
- * Title: "Kernel API changes from 2.0 to 2.2"
- Author: Richard Gooch.
- URL: http://www.safe-mbox.com/~rgooch/linux/docs/porting-to-2.2.html
- Keywords: 2.2, changes.
- Description: Kernel functions/structures/variables which changed
- from 2.0.x to 2.2.x.
-
- * Title: "Kernel API changes from 2.2 to 2.4"
- Author: Richard Gooch.
- URL: http://www.safe-mbox.com/~rgooch/linux/docs/porting-to-2.4.html
- Keywords: 2.4, changes.
- Description: Kernel functions/structures/variables which changed
- from 2.2.x to 2.4.x.
-
- * Title: "Linux Kernel Module Programming Guide"
- Author: Ori Pomerantz.
- URL: http://tldp.org/LDP/lkmpg/2.6/html/index.html
- Keywords: modules, GPL book, /proc, ioctls, system calls,
- interrupt handlers .
- Description: Very nice 92 pages GPL book on the topic of modules
- programming. Lots of examples.
-
- * Title: "I/O Event Handling Under Linux"
- Author: Richard Gooch.
- Keywords: IO, I/O, select(2), poll(2), FDs, aio_read(2), readiness
- event queues.
- Description: From the Introduction: "I/O Event handling is about
- how your Operating System allows you to manage a large number of
- open files (file descriptors in UNIX/POSIX, or FDs) in your
- application. You want the OS to notify you when FDs become active
- (have data ready to be read or are ready for writing). Ideally you
- want a mechanism that is scalable. This means a large number of
- inactive FDs cost very little in memory and CPU time to manage".
-
- * Title: "The Kernel Hacking HOWTO"
- Author: Various Talented People, and Rusty.
- Location: in kernel tree, Documentation/DocBook/kernel-hacking.tmpl
- (must be built as "make {htmldocs | psdocs | pdfdocs})
- Keywords: HOWTO, kernel contexts, deadlock, locking, modules,
- symbols, return conventions.
- Description: From the Introduction: "Please understand that I
- never wanted to write this document, being grossly underqualified,
- but I always wanted to read it, and this was the only way. I
- simply explain some best practices, and give reading entry-points
- into the kernel sources. I avoid implementation details: that's
- what the code is for, and I ignore whole tracts of useful
- routines. This document assumes familiarity with C, and an
- understanding of what the kernel is, and how it is used. It was
- originally written for the 2.3 kernels, but nearly all of it
- applies to 2.2 too; 2.0 is slightly different".
-
- * Title: "Writing an ALSA Driver"
- Author: Takashi Iwai <tiwai@suse.de>
- URL: http://www.alsa-project.org/~iwai/writing-an-alsa-driver/index.html
- Keywords: ALSA, sound, soundcard, driver, lowlevel, hardware.
- Description: Advanced Linux Sound Architecture for developers,
- both at kernel and user-level sides. ALSA is the Linux kernel
- sound architecture in the 2.6 kernel version.
-
- * Title: "Programming Guide for Linux USB Device Drivers"
- Author: Detlef Fliegl.
- URL: http://usb.in.tum.de/usbdoc/
- Keywords: USB, universal serial bus.
- Description: A must-read. From the Preface: "This document should
- give detailed information about the current state of the USB
- subsystem and its API for USB device drivers. The first section
- will deal with the basics of USB devices. You will learn about
- different types of devices and their properties. Going into detail
- you will see how USB devices communicate on the bus. The second
- section gives an overview of the Linux USB subsystem [2] and the
- device driver framework. Then the API and its data structures will
- be explained step by step. The last section of this document
- contains a reference of all API calls and their return codes".
- Notes: Beware: the main page states: "This document may not be
- published, printed or used in excerpts without explicit permission
- of the author". Fortunately, it may still be read...
-
- * Title: "Linux Kernel Mailing List Glossary"
- Author: various
- URL: http://kernelnewbies.org/glossary/
- Keywords: glossary, terms, linux-kernel.
- Description: From the introduction: "This glossary is intended as
- a brief description of some of the acronyms and terms you may hear
- during discussion of the Linux kernel".
-
- * Title: "Linux Kernel Locking HOWTO"
- Author: Various Talented People, and Rusty.
- Location: in kernel tree, Documentation/DocBook/kernel-locking.tmpl
- (must be built as "make {htmldocs | psdocs | pdfdocs})
- Keywords: locks, locking, spinlock, semaphore, atomic, race
- condition, bottom halves, tasklets, softirqs.
- Description: The title says it all: document describing the
- locking system in the Linux Kernel either in uniprocessor or SMP
- systems.
- Notes: "It was originally written for the later (>2.3.47) 2.3
- kernels, but most of it applies to 2.2 too; 2.0 is slightly
- different". Freely redistributable under the conditions of the GNU
- General Public License.
-
- * Title: "Global spinlock list and usage"
- Author: Rick Lindsley.
- URL: http://lse.sourceforge.net/lockhier/global-spin-lock
- Keywords: spinlock.
- Description: This is an attempt to document both the existence and
- usage of the spinlocks in the Linux 2.4.5 kernel. Comprehensive
- list of spinlocks showing when they are used, which functions
- access them, how each lock is acquired, under what conditions it
- is held, whether interrupts can occur or not while it is held...
-
- * Title: "Porting Linux 2.0 Drivers To Linux 2.2: Changes and New
- Features "
- Author: Alan Cox.
- URL: http://www.linux-mag.com/1999-05/gear_01.html
- Keywords: ports, porting.
- Description: Article from Linux Magazine on porting from 2.0 to
- 2.2 kernels.
-
- * Title: "Porting Device Drivers To Linux 2.2: part II"
- Author: Alan Cox.
- URL: http://www.linux-mag.com/id/238
- Keywords: ports, porting.
- Description: Second part on porting from 2.0 to 2.2 kernels.
-
- * Title: "How To Make Sure Your Driver Will Work On The Power
- Macintosh"
- Author: Paul Mackerras.
- URL: http://www.linux-mag.com/id/261
- Keywords: Mac, Power Macintosh, porting, drivers, compatibility.
- Description: The title says it all.
-
- * Title: "An Introduction to SCSI Drivers"
- Author: Alan Cox.
- URL: http://www.linux-mag.com/id/284
- Keywords: SCSI, device, driver.
- Description: The title says it all.
-
- * Title: "Advanced SCSI Drivers And Other Tales"
- Author: Alan Cox.
- URL: http://www.linux-mag.com/id/307
- Keywords: SCSI, device, driver, advanced.
- Description: The title says it all.
-
- * Title: "Writing Linux Mouse Drivers"
- Author: Alan Cox.
- URL: http://www.linux-mag.com/id/330
- Keywords: mouse, driver, gpm.
- Description: The title says it all.
-
- * Title: "More on Mouse Drivers"
- Author: Alan Cox.
- URL: http://www.linux-mag.com/id/356
- Keywords: mouse, driver, gpm, races, asynchronous I/O.
- Description: The title still says it all.
-
- * Title: "Writing Video4linux Radio Driver"
- Author: Alan Cox.
- URL: http://www.linux-mag.com/id/381
- Keywords: video4linux, driver, radio, radio devices.
- Description: The title says it all.
-
- * Title: "Video4linux Drivers, Part 1: Video-Capture Device"
- Author: Alan Cox.
- URL: http://www.linux-mag.com/id/406
- Keywords: video4linux, driver, video capture, capture devices,
- camera driver.
- Description: The title says it all.
-
- * Title: "Video4linux Drivers, Part 2: Video-capture Devices"
- Author: Alan Cox.
- URL: http://www.linux-mag.com/id/429
- Keywords: video4linux, driver, video capture, capture devices,
- camera driver, control, query capabilities, capability, facility.
- Description: The title says it all.
-
- * Title: "PCI Management in Linux 2.2"
- Author: Alan Cox.
- URL: http://www.linux-mag.com/id/452
- Keywords: PCI, bus, bus-mastering.
- Description: The title says it all.
-
- * Title: "Linux 2.4 Kernel Internals"
- Author: Tigran Aivazian and Christoph Hellwig.
- URL: http://www.moses.uklinux.net/patches/lki.html
- Keywords: Linux, kernel, booting, SMB boot, VFS, page cache.
- Description: A little book used for a short training course.
- Covers building the kernel image, booting (including SMP bootup),
- process management, VFS and more.
-
- * Title: "Linux IP Networking. A Guide to the Implementation and
- Modification of the Linux Protocol Stack."
- Author: Glenn Herrin.
- URL: http://www.cs.unh.edu/cnrg/gherrin
- Keywords: network, networking, protocol, IP, UDP, TCP, connection,
- socket, receiving, transmitting, forwarding, routing, packets,
- modules, /proc, sk_buff, FIB, tags.
- Description: Excellent paper devoted to the Linux IP Networking,
- explaining anything from the kernel's to the user space
- configuration tools' code. Very good to get a general overview of
- the kernel networking implementation and understand all steps
- packets follow from the time they are received at the network
- device till they are delivered to applications. The studied kernel
- code is from 2.2.14 version. Provides code for a working packet
- dropper example.
-
- * Title: "Get those boards talking under Linux."
- Author: Alex Ivchenko.
- URL: http://www.edn.com/article/CA46968.html
- Keywords: data-acquisition boards, drivers, modules, interrupts,
- memory allocation.
- Description: Article written for people wishing to make their data
- acquisition boards work on their GNU/Linux machines. Gives a basic
- overview on writing drivers, from the naming of functions to
- interrupt handling.
- Notes: Two-parts article. Part II is at
- URL: http://www.edn.com/article/CA46998.html
-
- * Title: "Linux PCMCIA Programmer's Guide"
- Author: David Hinds.
- URL: http://pcmcia-cs.sourceforge.net/ftp/doc/PCMCIA-PROG.html
- Keywords: PCMCIA.
- Description: "This document describes how to write kernel device
- drivers for the Linux PCMCIA Card Services interface. It also
- describes how to write user-mode utilities for communicating with
- Card Services.
-
- * Title: "The Linux Kernel NFSD Implementation"
- Author: Neil Brown.
- URL:
- http://www.cse.unsw.edu.au/~neilb/oss/linux-commentary/nfsd.html
- Keywords: knfsd, nfsd, NFS, RPC, lockd, mountd, statd.
- Description: The title says it all.
- Notes: Covers knfsd's version 1.4.7 (patch against 2.2.7 kernel).
-
- * Title: "A Linux vm README"
- Author: Kanoj Sarcar.
- URL: http://kos.enix.org/pub/linux-vmm.html
- Keywords: virtual memory, mm, pgd, vma, page, page flags, page
- cache, swap cache, kswapd.
- Description: Telegraphic, short descriptions and definitions
- relating the Linux virtual memory implementation.
-
- * Title: "(nearly) Complete Linux Loadable Kernel Modules. The
- definitive guide for hackers, virus coders and system
- administrators."
- Author: pragmatic/THC.
- URL: http://packetstormsecurity.org/docs/hack/LKM_HACKING.html
- Keywords: syscalls, intercept, hide, abuse, symbol table.
- Description: Interesting paper on how to abuse the Linux kernel in
- order to intercept and modify syscalls, make
- files/directories/processes invisible, become root, hijack ttys,
- write kernel modules based virus... and solutions for admins to
- avoid all those abuses.
- Notes: For 2.0.x kernels. Gives guidances to port it to 2.2.x
- kernels.
-
- BOOKS: (Not on-line)
-
- * Title: "Linux Device Drivers"
- Author: Alessandro Rubini.
- Publisher: O'Reilly & Associates.
- Date: 1998.
- Pages: 439.
- ISBN: 1-56592-292-1
-
- * Title: "Linux Device Drivers, 2nd Edition"
- Author: Alessandro Rubini and Jonathan Corbet.
- Publisher: O'Reilly & Associates.
- Date: 2001.
- Pages: 586.
- ISBN: 0-59600-008-1
- Notes: Further information in
- http://www.oreilly.com/catalog/linuxdrive2/
-
- * Title: "Linux Device Drivers, 3rd Edition"
- Authors: Jonathan Corbet, Alessandro Rubini, and Greg Kroah-Hartman
- Publisher: O'Reilly & Associates.
- Date: 2005.
- Pages: 636.
- ISBN: 0-596-00590-3
- Notes: Further information in
- http://www.oreilly.com/catalog/linuxdrive3/
- PDF format, URL: http://lwn.net/Kernel/LDD3/
-
- * Title: "Linux Kernel Internals"
- Author: Michael Beck.
- Publisher: Addison-Wesley.
- Date: 1997.
- ISBN: 0-201-33143-8 (second edition)
-
- * Title: "The Design of the UNIX Operating System"
- Author: Maurice J. Bach.
- Publisher: Prentice Hall.
- Date: 1986.
- Pages: 471.
- ISBN: 0-13-201757-1
-
- * Title: "The Design and Implementation of the 4.3 BSD UNIX
- Operating System"
- Author: Samuel J. Leffler, Marshall Kirk McKusick, Michael J.
- Karels, John S. Quarterman.
- Publisher: Addison-Wesley.
- Date: 1989 (reprinted with corrections on October, 1990).
- ISBN: 0-201-06196-1
-
- * Title: "The Design and Implementation of the 4.4 BSD UNIX
- Operating System"
- Author: Marshall Kirk McKusick, Keith Bostic, Michael J. Karels,
- John S. Quarterman.
- Publisher: Addison-Wesley.
- Date: 1996.
- ISBN: 0-201-54979-4
-
- * Title: "Programmation Linux 2.0 API systeme et fonctionnement du
- noyau"
- Author: Remy Card, Eric Dumas, Franck Mevel.
- Publisher: Eyrolles.
- Date: 1997.
- Pages: 520.
- ISBN: 2-212-08932-5
- Notes: French.
-
- * Title: "Unix internals -- the new frontiers"
- Author: Uresh Vahalia.
- Publisher: Prentice Hall.
- Date: 1996.
- Pages: 600.
- ISBN: 0-13-101908-2
-
- * Title: "Programming for the real world - POSIX.4"
- Author: Bill O. Gallmeister.
- Publisher: O'Reilly & Associates, Inc..
- Date: 1995.
- Pages: ???.
- ISBN: I-56592-074-0
- Notes: Though not being directly about Linux, Linux aims to be
- POSIX. Good reference.
-
- * Title: "UNIX Systems for Modern Architectures: Symmetric
- Multiprocessing and Caching for Kernel Programmers"
- Author: Curt Schimmel.
- Publisher: Addison Wesley.
- Date: June, 1994.
- Pages: 432.
- ISBN: 0-201-63338-8
-
- * Title: "Linux Kernel Development, 3rd Edition"
- Author: Robert Love
- Publisher: Addison-Wesley.
- Date: July, 2010
- Pages: 440
- ISBN: 978-0672329463
-
- MISCELLANEOUS:
-
- * Name: linux/Documentation
- Author: Many.
- URL: Just look inside your kernel sources.
- Keywords: anything, DocBook.
- Description: Documentation that comes with the kernel sources,
- inside the Documentation directory. Some pages from this document
- (including this document itself) have been moved there, and might
- be more up to date than the web version.
-
- * Name: "Linux Kernel Source Reference"
- Author: Thomas Graichen.
- URL: http://marc.info/?l=linux-kernel&m=96446640102205&w=4
- Keywords: CVS, web, cvsweb, browsing source code.
- Description: Web interface to a CVS server with the kernel
- sources. "Here you can have a look at any file of the Linux kernel
- sources of any version starting from 1.0 up to the (daily updated)
- current version available. Also you can check the differences
- between two versions of a file".
-
- * Name: "Cross-Referencing Linux"
- URL: http://lxr.free-electrons.com/
- Keywords: Browsing source code.
- Description: Another web-based Linux kernel source code browser.
- Lots of cross references to variables and functions. You can see
- where they are defined and where they are used.
-
- * Name: "Linux Weekly News"
- URL: http://lwn.net
- Keywords: latest kernel news.
- Description: The title says it all. There's a fixed kernel section
- summarizing developers' work, bug fixes, new features and versions
- produced during the week. Published every Thursday.
-
- * Name: "Kernel Traffic"
- URL: http://kt.earth.li/kernel-traffic/index.html
- Keywords: linux-kernel mailing list, weekly kernel news.
- Description: Weekly newsletter covering the most relevant
- discussions of the linux-kernel mailing list.
-
- * Name: "CuTTiNG.eDGe.LiNuX"
- URL: http://edge.kernelnotes.org
- Keywords: changelist.
- Description: Site which provides the changelist for every kernel
- release. What's new, what's better, what's changed. Myrdraal reads
- the patches and describes them. Pointers to the patches are there,
- too.
-
- * Name: "New linux-kernel Mailing List FAQ"
- URL: http://www.tux.org/lkml/
- Keywords: linux-kernel mailing list FAQ.
- Description: linux-kernel is a mailing list for developers to
- communicate. This FAQ builds on the previous linux-kernel mailing
- list FAQ maintained by Frohwalt Egerer, who no longer maintains
- it. Read it to see how to join the mailing list. Dozens of
- interesting questions regarding the list, Linux, developers (who
- is ...?), terms (what is...?) are answered here too. Just read it.
-
- * Name: "Linux Virtual File System"
- Author: Peter J. Braam.
- URL: http://www.coda.cs.cmu.edu/doc/talks/linuxvfs/
- Keywords: slides, VFS, inode, superblock, dentry, dcache.
- Description: Set of slides, presumably from a presentation on the
- Linux VFS layer. Covers version 2.1.x, with dentries and the
- dcache.
-
- * Name: "Gary's Encyclopedia - The Linux Kernel"
- Author: Gary (I suppose...).
- URL: http://slencyclopedia.berlios.de/index.html
- Keywords: linux, community, everything!
- Description: Gary's Encyclopedia exists to allow the rapid finding
- of documentation and other information of interest to GNU/Linux
- users. It has about 4000 links to external pages in 150 major
- categories. This link is for kernel-specific links, documents,
- sites... This list is now hosted by developer.Berlios.de,
- but seems not to have been updated since sometime in 1999.
-
- * Name: "The home page of Linux-MM"
- Author: The Linux-MM team.
- URL: http://linux-mm.org/
- Keywords: memory management, Linux-MM, mm patches, TODO, docs,
- mailing list.
- Description: Site devoted to Linux Memory Management development.
- Memory related patches, HOWTOs, links, mm developers... Don't miss
- it if you are interested in memory management development!
-
- * Name: "Kernel Newbies IRC Channel and Website"
- URL: http://www.kernelnewbies.org
- Keywords: IRC, newbies, channel, asking doubts.
- Description: #kernelnewbies on irc.oftc.net.
- #kernelnewbies is an IRC network dedicated to the 'newbie'
- kernel hacker. The audience mostly consists of people who are
- learning about the kernel, working on kernel projects or
- professional kernel hackers that want to help less seasoned kernel
- people.
- #kernelnewbies is on the OFTC IRC Network.
- Try irc.oftc.net as your server and then /join #kernelnewbies.
- The kernelnewbies website also hosts articles, documents, FAQs...
-
- * Name: "linux-kernel mailing list archives and search engines"
- URL: http://vger.kernel.org/vger-lists.html
- URL: http://www.uwsg.indiana.edu/hypermail/linux/kernel/index.html
- URL: http://marc.theaimsgroup.com/?l=linux-kernel
- URL: http://groups.google.com/group/mlist.linux.kernel
- URL: http://www.cs.helsinki.fi/linux/linux-kernel/
- URL: http://www.lib.uaa.alaska.edu/linux-kernel/
- Keywords: linux-kernel, archives, search.
- Description: Some of the linux-kernel mailing list archivers. If
- you have a better/another one, please let me know.
- _________________________________________________________________
-
- Document last updated on Sat 2005-NOV-19
+The need for a document like this one became apparent in the
+linux-kernel mailing list as the same questions, asking for pointers
+to information, appeared again and again.
+
+Fortunately, as more and more people get to GNU/Linux, more and more
+get interested in the Kernel. But reading the sources is not always
+enough. It is easy to understand the code, but miss the concepts, the
+philosophy and design decisions behind this code.
+
+Unfortunately, not many documents are available for beginners to
+start. And, even if they exist, there was no "well-known" place which
+kept track of them. These lines try to cover this lack. All documents
+available on line known by the author are listed, while some reference
+books are also mentioned.
+
+PLEASE, if you know any paper not listed here or write a new document,
+send me an e-mail, and I'll include a reference to it here. Any
+corrections, ideas or comments are also welcomed.
+
+The papers that follow are listed in no particular order. All are
+cataloged with the following fields: the document's "Title", the
+"Author"/s, the "URL" where they can be found, some "Keywords" helpful
+when searching for specific topics, and a brief "Description" of the
+Document.
+
+Enjoy!
+
+.. note::
+
+ The documents on each section of this document are ordered by its
+ published date, from the newest to the oldest.
+
+Docs at the Linux Kernel tree
+-----------------------------
+
+The DocBook books should be built with ``make {htmldocs | psdocs | pdfdocs}``.
+The Sphinx books should be built with ``make {htmldocs | pdfdocs | epubdocs}``.
+
+ * Name: **linux/Documentation**
+
+ :Author: Many.
+ :Location: Documentation/
+ :Keywords: text files, Sphinx, DocBook.
+ :Description: Documentation that comes with the kernel sources,
+ inside the Documentation directory. Some pages from this document
+ (including this document itself) have been moved there, and might
+ be more up to date than the web version.
+
+ * Title: **The Kernel Hacking HOWTO**
+
+ :Author: Various Talented People, and Rusty.
+ :Location: Documentation/DocBook/kernel-hacking.tmpl
+ :Keywords: HOWTO, kernel contexts, deadlock, locking, modules,
+ symbols, return conventions.
+ :Description: From the Introduction: "Please understand that I
+ never wanted to write this document, being grossly underqualified,
+ but I always wanted to read it, and this was the only way. I
+ simply explain some best practices, and give reading entry-points
+ into the kernel sources. I avoid implementation details: that's
+ what the code is for, and I ignore whole tracts of useful
+ routines. This document assumes familiarity with C, and an
+ understanding of what the kernel is, and how it is used. It was
+ originally written for the 2.3 kernels, but nearly all of it
+ applies to 2.2 too; 2.0 is slightly different".
+
+ * Title: **Linux Kernel Locking HOWTO**
+
+ :Author: Various Talented People, and Rusty.
+ :Location: Documentation/DocBook/kernel-locking.tmpl
+ :Keywords: locks, locking, spinlock, semaphore, atomic, race
+ condition, bottom halves, tasklets, softirqs.
+ :Description: The title says it all: document describing the
+ locking system in the Linux Kernel either in uniprocessor or SMP
+ systems.
+ :Notes: "It was originally written for the later (>2.3.47) 2.3
+ kernels, but most of it applies to 2.2 too; 2.0 is slightly
+ different". Freely redistributable under the conditions of the GNU
+ General Public License.
+
+On-line docs
+------------
+
+ * Title: **Linux Kernel Mailing List Glossary**
+
+ :Author: various
+ :URL: http://kernelnewbies.org/glossary/
+ :Date: rolling version
+ :Keywords: glossary, terms, linux-kernel.
+ :Description: From the introduction: "This glossary is intended as
+ a brief description of some of the acronyms and terms you may hear
+ during discussion of the Linux kernel".
+
+ * Title: **Tracing the Way of Data in a TCP Connection through the Linux Kernel**
+
+ :Author: Richard Sailer
+ :URL: https://archive.org/details/linux_kernel_data_flow_short_paper
+ :Date: 2016
+ :Keywords: Linux Kernel Networking, TCP, tracing, ftrace
+ :Description: A seminar paper explaining ftrace and how to use it for
+ understanding linux kernel internals,
+ illustrated at tracing the way of a TCP packet through the kernel.
+ :Abstract: *This short paper outlines the usage of ftrace a tracing framework
+ as a tool to understand a running Linux system.
+ Having obtained a trace-log a kernel hacker can read and understand
+ source code more determined and with context.
+ In a detailed example this approach is demonstrated in tracing
+ and the way of data in a TCP Connection through the kernel.
+ Finally this trace-log is used as base for more a exact conceptual
+ exploration and description of the Linux TCP/IP implementation.*
+
+ * Title: **On submitting kernel Patches**
+
+ :Author: Andi Kleen
+ :URL: http://halobates.de/on-submitting-kernel-patches.pdf
+ :Date: 2008
+ :Keywords: patches, review process, types of submissions, basic rules, case studies
+ :Description: This paper gives several experience values on what types of patches
+ there are and how likley they get merged.
+ :Abstract:
+ [...]. This paper examines some common problems for
+ submitting larger changes and some strategies to avoid problems.
+
+ * Title: **Overview of the Virtual File System**
+
+ :Author: Richard Gooch.
+ :URL: http://www.mjmwired.net/kernel/Documentation/filesystems/vfs.txt
+ :Date: 2007
+ :Keywords: VFS, File System, mounting filesystems, opening files,
+ dentries, dcache.
+ :Description: Brief introduction to the Linux Virtual File System.
+ What is it, how it works, operations taken when opening a file or
+ mounting a file system and description of important data
+ structures explaining the purpose of each of their entries.
+
+ * Title: **Linux Device Drivers, Third Edition**
+
+ :Author: Jonathan Corbet, Alessandro Rubini, Greg Kroah-Hartman
+ :URL: http://lwn.net/Kernel/LDD3/
+ :Date: 2005
+ :Description: A 600-page book covering the (2.6.10) driver
+ programming API and kernel hacking in general. Available under the
+ Creative Commons Attribution-ShareAlike 2.0 license.
+ :note: You can also :ref:`purchase a copy from O'Reilly or elsewhere <ldd3_published>`.
+
+ * Title: **Writing an ALSA Driver**
+
+ :Author: Takashi Iwai <tiwai@suse.de>
+ :URL: http://www.alsa-project.org/~iwai/writing-an-alsa-driver/index.html
+ :Date: 2005
+ :Keywords: ALSA, sound, soundcard, driver, lowlevel, hardware.
+ :Description: Advanced Linux Sound Architecture for developers,
+ both at kernel and user-level sides. ALSA is the Linux kernel
+ sound architecture in the 2.6 kernel version.
+
+ * Title: **Linux PCMCIA Programmer's Guide**
+
+ :Author: David Hinds.
+ :URL: http://pcmcia-cs.sourceforge.net/ftp/doc/PCMCIA-PROG.html
+ :Date: 2003
+ :Keywords: PCMCIA.
+ :Description: "This document describes how to write kernel device
+ drivers for the Linux PCMCIA Card Services interface. It also
+ describes how to write user-mode utilities for communicating with
+ Card Services.
+
+ * Title: **Linux Kernel Module Programming Guide**
+
+ :Author: Ori Pomerantz.
+ :URL: http://tldp.org/LDP/lkmpg/2.6/html/index.html
+ :Date: 2001
+ :Keywords: modules, GPL book, /proc, ioctls, system calls,
+ interrupt handlers .
+ :Description: Very nice 92 pages GPL book on the topic of modules
+ programming. Lots of examples.
+
+ * Title: **Global spinlock list and usage**
+
+ :Author: Rick Lindsley.
+ :URL: http://lse.sourceforge.net/lockhier/global-spin-lock
+ :Date: 2001
+ :Keywords: spinlock.
+ :Description: This is an attempt to document both the existence and
+ usage of the spinlocks in the Linux 2.4.5 kernel. Comprehensive
+ list of spinlocks showing when they are used, which functions
+ access them, how each lock is acquired, under what conditions it
+ is held, whether interrupts can occur or not while it is held...
+
+ * Title: **A Linux vm README**
+
+ :Author: Kanoj Sarcar.
+ :URL: http://kos.enix.org/pub/linux-vmm.html
+ :Date: 2001
+ :Keywords: virtual memory, mm, pgd, vma, page, page flags, page
+ cache, swap cache, kswapd.
+ :Description: Telegraphic, short descriptions and definitions
+ relating the Linux virtual memory implementation.
+
+ * Title: **Video4linux Drivers, Part 1: Video-Capture Device**
+
+ :Author: Alan Cox.
+ :URL: http://www.linux-mag.com/id/406
+ :Date: 2000
+ :Keywords: video4linux, driver, video capture, capture devices,
+ camera driver.
+ :Description: The title says it all.
+
+ * Title: **Video4linux Drivers, Part 2: Video-capture Devices**
+
+ :Author: Alan Cox.
+ :URL: http://www.linux-mag.com/id/429
+ :Date: 2000
+ :Keywords: video4linux, driver, video capture, capture devices,
+ camera driver, control, query capabilities, capability, facility.
+ :Description: The title says it all.
+
+ * Title: **Linux IP Networking. A Guide to the Implementation and Modification of the Linux Protocol Stack.**
+
+ :Author: Glenn Herrin.
+ :URL: http://www.cs.unh.edu/cnrg/gherrin
+ :Date: 2000
+ :Keywords: network, networking, protocol, IP, UDP, TCP, connection,
+ socket, receiving, transmitting, forwarding, routing, packets,
+ modules, /proc, sk_buff, FIB, tags.
+ :Description: Excellent paper devoted to the Linux IP Networking,
+ explaining anything from the kernel's to the user space
+ configuration tools' code. Very good to get a general overview of
+ the kernel networking implementation and understand all steps
+ packets follow from the time they are received at the network
+ device till they are delivered to applications. The studied kernel
+ code is from 2.2.14 version. Provides code for a working packet
+ dropper example.
+
+ * Title: **How To Make Sure Your Driver Will Work On The Power Macintosh**
+
+ :Author: Paul Mackerras.
+ :URL: http://www.linux-mag.com/id/261
+ :Date: 1999
+ :Keywords: Mac, Power Macintosh, porting, drivers, compatibility.
+ :Description: The title says it all.
+
+ * Title: **An Introduction to SCSI Drivers**
+
+ :Author: Alan Cox.
+ :URL: http://www.linux-mag.com/id/284
+ :Date: 1999
+ :Keywords: SCSI, device, driver.
+ :Description: The title says it all.
+
+ * Title: **Advanced SCSI Drivers And Other Tales**
+
+ :Author: Alan Cox.
+ :URL: http://www.linux-mag.com/id/307
+ :Date: 1999
+ :Keywords: SCSI, device, driver, advanced.
+ :Description: The title says it all.
+
+ * Title: **Writing Linux Mouse Drivers**
+
+ :Author: Alan Cox.
+ :URL: http://www.linux-mag.com/id/330
+ :Date: 1999
+ :Keywords: mouse, driver, gpm.
+ :Description: The title says it all.
+
+ * Title: **More on Mouse Drivers**
+
+ :Author: Alan Cox.
+ :URL: http://www.linux-mag.com/id/356
+ :Date: 1999
+ :Keywords: mouse, driver, gpm, races, asynchronous I/O.
+ :Description: The title still says it all.
+
+ * Title: **Writing Video4linux Radio Driver**
+
+ :Author: Alan Cox.
+ :URL: http://www.linux-mag.com/id/381
+ :Date: 1999
+ :Keywords: video4linux, driver, radio, radio devices.
+ :Description: The title says it all.
+
+ * Title: **I/O Event Handling Under Linux**
+
+ :Author: Richard Gooch.
+ :URL: http://web.mit.edu/~yandros/doc/io-events.html
+ :Date: 1999
+ :Keywords: IO, I/O, select(2), poll(2), FDs, aio_read(2), readiness
+ event queues.
+ :Description: From the Introduction: "I/O Event handling is about
+ how your Operating System allows you to manage a large number of
+ open files (file descriptors in UNIX/POSIX, or FDs) in your
+ application. You want the OS to notify you when FDs become active
+ (have data ready to be read or are ready for writing). Ideally you
+ want a mechanism that is scalable. This means a large number of
+ inactive FDs cost very little in memory and CPU time to manage".
+
+ * Title: **(nearly) Complete Linux Loadable Kernel Modules. The definitive guide for hackers, virus coders and system administrators.**
+
+ :Author: pragmatic/THC.
+ :URL: http://packetstormsecurity.org/docs/hack/LKM_HACKING.html
+ :Date: 1999
+ :Keywords: syscalls, intercept, hide, abuse, symbol table.
+ :Description: Interesting paper on how to abuse the Linux kernel in
+ order to intercept and modify syscalls, make
+ files/directories/processes invisible, become root, hijack ttys,
+ write kernel modules based virus... and solutions for admins to
+ avoid all those abuses.
+ :Notes: For 2.0.x kernels. Gives guidances to port it to 2.2.x
+ kernels.
+
+ * Name: **Linux Virtual File System**
+
+ :Author: Peter J. Braam.
+ :URL: http://www.coda.cs.cmu.edu/doc/talks/linuxvfs/
+ :Date: 1998
+ :Keywords: slides, VFS, inode, superblock, dentry, dcache.
+ :Description: Set of slides, presumably from a presentation on the
+ Linux VFS layer. Covers version 2.1.x, with dentries and the
+ dcache.
+
+ * Title: **The Venus kernel interface**
+
+ :Author: Peter J. Braam.
+ :URL: http://www.coda.cs.cmu.edu/doc/html/kernel-venus-protocol.html
+ :Date: 1998
+ :Keywords: coda, filesystem, venus, cache manager.
+ :Description: "This document describes the communication between
+ Venus and kernel level file system code needed for the operation
+ of the Coda filesystem. This version document is meant to describe
+ the current interface (version 1.0) as well as improvements we
+ envisage".
+
+ * Title: **Design and Implementation of the Second Extended Filesystem**
+
+ :Author: Rémy Card, Theodore Ts'o, Stephen Tweedie.
+ :URL: http://web.mit.edu/tytso/www/linux/ext2intro.html
+ :Date: 1998
+ :Keywords: ext2, linux fs history, inode, directory, link, devices,
+ VFS, physical structure, performance, benchmarks, ext2fs library,
+ ext2fs tools, e2fsck.
+ :Description: Paper written by three of the top ext2 hackers.
+ Covers Linux filesystems history, ext2 motivation, ext2 features,
+ design, physical structure on disk, performance, benchmarks,
+ e2fsck's passes description... A must read!
+ :Notes: This paper was first published in the Proceedings of the
+ First Dutch International Symposium on Linux, ISBN 90-367-0385-9.
+
+ * Title: **The Linux RAID-1, 4, 5 Code**
+
+ :Author: Ingo Molnar, Gadi Oxman and Miguel de Icaza.
+ :URL: http://www.linuxjournal.com/article.php?sid=2391
+ :Date: 1997
+ :Keywords: RAID, MD driver.
+ :Description: Linux Journal Kernel Korner article. Here is its
+ :Abstract: *A description of the implementation of the RAID-1,
+ RAID-4 and RAID-5 personalities of the MD device driver in the
+ Linux kernel, providing users with high performance and reliable,
+ secondary-storage capability using software*.
+
+ * Title: **Linux Kernel Hackers' Guide**
+
+ :Author: Michael K. Johnson.
+ :URL: http://www.tldp.org/LDP/khg/HyperNews/get/khg.html
+ :Date: 1997
+ :Keywords: device drivers, files, VFS, kernel interface, character vs
+ block devices, hardware interrupts, scsi, DMA, access to user memory,
+ memory allocation, timers.
+ :Description: A guide designed to help you get up to speed on the
+ concepts that are not intuitevly obvious, and to document the internal
+ structures of Linux.
+
+ * Title: **Dynamic Kernels: Modularized Device Drivers**
+
+ :Author: Alessandro Rubini.
+ :URL: http://www.linuxjournal.com/article.php?sid=1219
+ :Date: 1996
+ :Keywords: device driver, module, loading/unloading modules,
+ allocating resources.
+ :Description: Linux Journal Kernel Korner article. Here is its
+ :Abstract: *This is the first of a series of four articles
+ co-authored by Alessandro Rubini and Georg Zezchwitz which present
+ a practical approach to writing Linux device drivers as kernel
+ loadable modules. This installment presents an introduction to the
+ topic, preparing the reader to understand next month's
+ installment*.
+
+ * Title: **Dynamic Kernels: Discovery**
+
+ :Author: Alessandro Rubini.
+ :URL: http://www.linuxjournal.com/article.php?sid=1220
+ :Date: 1996
+ :Keywords: character driver, init_module, clean_up module,
+ autodetection, mayor number, minor number, file operations,
+ open(), close().
+ :Description: Linux Journal Kernel Korner article. Here is its
+ :Abstract: *This article, the second of four, introduces part of
+ the actual code to create custom module implementing a character
+ device driver. It describes the code for module initialization and
+ cleanup, as well as the open() and close() system calls*.
+
+ * Title: **The Devil's in the Details**
+
+ :Author: Georg v. Zezschwitz and Alessandro Rubini.
+ :URL: http://www.linuxjournal.com/article.php?sid=1221
+ :Date: 1996
+ :Keywords: read(), write(), select(), ioctl(), blocking/non
+ blocking mode, interrupt handler.
+ :Description: Linux Journal Kernel Korner article. Here is its
+ :Abstract: *This article, the third of four on writing character
+ device drivers, introduces concepts of reading, writing, and using
+ ioctl-calls*.
+
+ * Title: **Dissecting Interrupts and Browsing DMA**
+
+ :Author: Alessandro Rubini and Georg v. Zezschwitz.
+ :URL: http://www.linuxjournal.com/article.php?sid=1222
+ :Date: 1996
+ :Keywords: interrupts, irqs, DMA, bottom halves, task queues.
+ :Description: Linux Journal Kernel Korner article. Here is its
+ :Abstract: *This is the fourth in a series of articles about
+ writing character device drivers as loadable kernel modules. This
+ month, we further investigate the field of interrupt handling.
+ Though it is conceptually simple, practical limitations and
+ constraints make this an ''interesting'' part of device driver
+ writing, and several different facilities have been provided for
+ different situations. We also investigate the complex topic of
+ DMA*.
+
+ * Title: **Device Drivers Concluded**
+
+ :Author: Georg v. Zezschwitz.
+ :URL: http://www.linuxjournal.com/article.php?sid=1287
+ :Date: 1996
+ :Keywords: address spaces, pages, pagination, page management,
+ demand loading, swapping, memory protection, memory mapping, mmap,
+ virtual memory areas (VMAs), vremap, PCI.
+ :Description: Finally, the above turned out into a five articles
+ series. This latest one's introduction reads: "This is the last of
+ five articles about character device drivers. In this final
+ section, Georg deals with memory mapping devices, beginning with
+ an overall description of the Linux memory management concepts".
+
+ * Title: **Network Buffers And Memory Management**
+
+ :Author: Alan Cox.
+ :URL: http://www.linuxjournal.com/article.php?sid=1312
+ :Date: 1996
+ :Keywords: sk_buffs, network devices, protocol/link layer
+ variables, network devices flags, transmit, receive,
+ configuration, multicast.
+ :Description: Linux Journal Kernel Korner.
+ :Abstract: *Writing a network device driver for Linux is fundamentally
+ simple---most of the complexity (other than talking to the
+ hardware) involves managing network packets in memory*.
+
+ * Title: **Analysis of the Ext2fs structure**
+
+ :Author: Louis-Dominique Dubeau.
+ :URL: http://teaching.csse.uwa.edu.au/units/CITS2002/fs-ext2/
+ :Date: 1994
+ :Keywords: ext2, filesystem, ext2fs.
+ :Description: Description of ext2's blocks, directories, inodes,
+ bitmaps, invariants...
+
+Published books
+---------------
+
+ * Title: **Linux Treiber entwickeln**
+
+ :Author: Jürgen Quade, Eva-Katharina Kunst
+ :Publisher: dpunkt.verlag
+ :Date: Oct 2015 (4th edition)
+ :Pages: 688
+ :ISBN: 978-3-86490-288-8
+ :Note: German. The third edition from 2011 is
+ much cheaper and still quite up-to-date.
+
+ * Title: **Linux Kernel Networking: Implementation and Theory**
+
+ :Author: Rami Rosen
+ :Publisher: Apress
+ :Date: December 22, 2013
+ :Pages: 648
+ :ISBN: 978-1430261964
+
+ * Title: **Embedded Linux Primer: A practical Real-World Approach, 2nd Edition**
+
+ :Author: Christopher Hallinan
+ :Publisher: Pearson
+ :Date: November, 2010
+ :Pages: 656
+ :ISBN: 978-0137017836
+
+ * Title: **Linux Kernel Development, 3rd Edition**
+
+ :Author: Robert Love
+ :Publisher: Addison-Wesley
+ :Date: July, 2010
+ :Pages: 440
+ :ISBN: 978-0672329463
+
+ * Title: **Essential Linux Device Drivers**
+
+ :Author: Sreekrishnan Venkateswaran
+ :Published: Prentice Hall
+ :Date: April, 2008
+ :Pages: 744
+ :ISBN: 978-0132396554
+
+.. _ldd3_published:
+
+ * Title: **Linux Device Drivers, 3rd Edition**
+
+ :Authors: Jonathan Corbet, Alessandro Rubini, and Greg Kroah-Hartman
+ :Publisher: O'Reilly & Associates
+ :Date: 2005
+ :Pages: 636
+ :ISBN: 0-596-00590-3
+ :Notes: Further information in
+ http://www.oreilly.com/catalog/linuxdrive3/
+ PDF format, URL: http://lwn.net/Kernel/LDD3/
+
+ * Title: **Linux Kernel Internals**
+
+ :Author: Michael Beck
+ :Publisher: Addison-Wesley
+ :Date: 1997
+ :ISBN: 0-201-33143-8 (second edition)
+
+ * Title: **Programmation Linux 2.0 API systeme et fonctionnement du noyau**
+
+ :Author: Remy Card, Eric Dumas, Franck Mevel
+ :Publisher: Eyrolles
+ :Date: 1997
+ :Pages: 520
+ :ISBN: 2-212-08932-5
+ :Notes: French
+
+ * Title: **The Design and Implementation of the 4.4 BSD UNIX Operating System**
+
+ :Author: Marshall Kirk McKusick, Keith Bostic, Michael J. Karels,
+ John S. Quarterman
+ :Publisher: Addison-Wesley
+ :Date: 1996
+ :ISBN: 0-201-54979-4
+
+ * Title: **Unix internals -- the new frontiers**
+
+ :Author: Uresh Vahalia
+ :Publisher: Prentice Hall
+ :Date: 1996
+ :Pages: 600
+ :ISBN: 0-13-101908-2
+
+ * Title: **Programming for the real world - POSIX.4**
+
+ :Author: Bill O. Gallmeister
+ :Publisher: O'Reilly & Associates, Inc
+ :Date: 1995
+ :Pages: 552
+ :ISBN: I-56592-074-0
+ :Notes: Though not being directly about Linux, Linux aims to be
+ POSIX. Good reference.
+
+ * Title: **UNIX Systems for Modern Architectures: Symmetric Multiprocessing and Caching for Kernel Programmers**
+
+ :Author: Curt Schimmel
+ :Publisher: Addison Wesley
+ :Date: June, 1994
+ :Pages: 432
+ :ISBN: 0-201-63338-8
+
+ * Title: **The Design and Implementation of the 4.3 BSD UNIX Operating System**
+
+ :Author: Samuel J. Leffler, Marshall Kirk McKusick, Michael J
+ Karels, John S. Quarterman
+ :Publisher: Addison-Wesley
+ :Date: 1989 (reprinted with corrections on October, 1990)
+ :ISBN: 0-201-06196-1
+
+ * Title: **The Design of the UNIX Operating System**
+
+ :Author: Maurice J. Bach
+ :Publisher: Prentice Hall
+ :Date: 1986
+ :Pages: 471
+ :ISBN: 0-13-201757-1
+
+Miscellaneous
+-------------
+
+ * Name: **Cross-Referencing Linux**
+
+ :URL: http://lxr.free-electrons.com/
+ :Keywords: Browsing source code.
+ :Description: Another web-based Linux kernel source code browser.
+ Lots of cross references to variables and functions. You can see
+ where they are defined and where they are used.
+
+ * Name: **Linux Weekly News**
+
+ :URL: http://lwn.net
+ :Keywords: latest kernel news.
+ :Description: The title says it all. There's a fixed kernel section
+ summarizing developers' work, bug fixes, new features and versions
+ produced during the week. Published every Thursday.
+
+ * Name: **The home page of Linux-MM**
+
+ :Author: The Linux-MM team.
+ :URL: http://linux-mm.org/
+ :Keywords: memory management, Linux-MM, mm patches, TODO, docs,
+ mailing list.
+ :Description: Site devoted to Linux Memory Management development.
+ Memory related patches, HOWTOs, links, mm developers... Don't miss
+ it if you are interested in memory management development!
+
+ * Name: **Kernel Newbies IRC Channel and Website**
+
+ :URL: http://www.kernelnewbies.org
+ :Keywords: IRC, newbies, channel, asking doubts.
+ :Description: #kernelnewbies on irc.oftc.net.
+ #kernelnewbies is an IRC network dedicated to the 'newbie'
+ kernel hacker. The audience mostly consists of people who are
+ learning about the kernel, working on kernel projects or
+ professional kernel hackers that want to help less seasoned kernel
+ people.
+ #kernelnewbies is on the OFTC IRC Network.
+ Try irc.oftc.net as your server and then /join #kernelnewbies.
+ The kernelnewbies website also hosts articles, documents, FAQs...
+
+ * Name: **linux-kernel mailing list archives and search engines**
+
+ :URL: http://vger.kernel.org/vger-lists.html
+ :URL: http://www.uwsg.indiana.edu/hypermail/linux/kernel/index.html
+ :URL: http://groups.google.com/group/mlist.linux.kernel
+ :Keywords: linux-kernel, archives, search.
+ :Description: Some of the linux-kernel mailing list archivers. If
+ you have a better/another one, please let me know.
+
+-------
+
+Document last updated on Tue 2016-Sep-20
+
+This document is based on:
+ http://www.dit.upm.es/~jmseyas/linux/kernel/hackers-docs.html
diff --git a/Documentation/kernel-documentation.rst b/Documentation/kernel-documentation.rst
index 391decc66a18..10cc7ddb6235 100644
--- a/Documentation/kernel-documentation.rst
+++ b/Documentation/kernel-documentation.rst
@@ -107,6 +107,35 @@ Here are some specific guidelines for the kernel documentation:
the order as encountered."), having the higher levels the same overall makes
it easier to follow the documents.
+
+the C domain
+------------
+
+The `Sphinx C Domain`_ (name c) is suited for documentation of C API. E.g. a
+function prototype:
+
+.. code-block:: rst
+
+ .. c:function:: int ioctl( int fd, int request )
+
+The C domain of the kernel-doc has some additional features. E.g. you can
+*rename* the reference name of a function with a common name like ``open`` or
+``ioctl``:
+
+.. code-block:: rst
+
+ .. c:function:: int ioctl( int fd, int request )
+ :name: VIDIOC_LOG_STATUS
+
+The func-name (e.g. ioctl) remains in the output but the ref-name changed from
+``ioctl`` to ``VIDIOC_LOG_STATUS``. The index entry for this function is also
+changed to ``VIDIOC_LOG_STATUS`` and the function can now referenced by:
+
+.. code-block:: rst
+
+ :c:func:`VIDIOC_LOG_STATUS`
+
+
list tables
-----------
@@ -265,6 +294,8 @@ The kernel-doc extension is included in the kernel source tree, at
``scripts/kernel-doc`` script to extract the documentation comments from the
source.
+.. _kernel_doc:
+
Writing kernel-doc comments
===========================
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 25037ded3683..ec8d81417dc8 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -460,6 +460,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
driver will print ACPI tables for AMD IOMMU during
IOMMU initialization.
+ amd_iommu_intr= [HW,X86-64]
+ Specifies one of the following AMD IOMMU interrupt
+ remapping modes:
+ legacy - Use legacy interrupt remapping mode.
+ vapic - Use virtual APIC mode, which allows IOMMU
+ to inject interrupts directly into guest.
+ This mode requires kvm-amd.avic=1.
+ (Default when IOMMU HW support is present.)
+
amijoy.map= [HW,JOY] Amiga joystick support
Map of devices attached to JOY0DAT and JOY1DAT
Format: <a>,<b>
@@ -1054,11 +1063,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
determined by the stdout-path property in device
tree's chosen node.
- cdns,<addr>
- Start an early, polled-mode console on a cadence serial
- port at the specified address. The cadence serial port
- must already be setup and configured. Options are not
- yet supported.
+ cdns,<addr>[,options]
+ Start an early, polled-mode console on a Cadence
+ (xuartps) serial port at the specified address. Only
+ supported option is baud rate. If baud rate is not
+ specified, the serial port must already be setup and
+ configured.
uart[8250],io,<addr>[,options]
uart[8250],mmio,<addr>[,options]
@@ -1373,6 +1383,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
Default: 1024
+ gpio-mockup.gpio_mockup_ranges
+ [HW] Sets the ranges of gpiochip of for this device.
+ Format: <start1>,<end1>,<start2>,<end2>...
+
hardlockup_all_cpu_backtrace=
[KNL] Should the hard-lockup detector generate
backtraces on all cpus.
@@ -1697,7 +1711,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
intel_idle.max_cstate= [KNL,HW,ACPI,X86]
0 disables intel_idle and fall back on acpi_idle.
- 1 to 6 specify maximum depth of C-state.
+ 1 to 9 specify maximum depth of C-state.
intel_pstate= [X86]
disable
@@ -2170,10 +2184,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
than or equal to this physical address is ignored.
maxcpus= [SMP] Maximum number of processors that an SMP kernel
- should make use of. maxcpus=n : n >= 0 limits the
- kernel to using 'n' processors. n=0 is a special case,
- it is equivalent to "nosmp", which also disables
- the IO APIC.
+ will bring up during bootup. maxcpus=n : n >= 0 limits
+ the kernel to bring up 'n' processors. Surely after
+ bootup you can bring up the other plugged cpu by executing
+ "echo 1 > /sys/devices/system/cpu/cpuX/online". So maxcpus
+ only takes effect during system bootup.
+ While n=0 is a special case, it is equivalent to "nosmp",
+ which also disables the IO APIC.
max_loop= [LOOP] The number of loop block devices that get
(loop.max_loop) unconditionally pre-created at init time. The default
@@ -2580,8 +2597,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nodelayacct [KNL] Disable per-task delay accounting
- nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects.
-
nodsp [SH] Disable hardware DSP at boot time.
noefi Disable EFI runtime services support.
@@ -2782,9 +2797,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nr_cpus= [SMP] Maximum number of processors that an SMP kernel
could support. nr_cpus=n : n >= 1 limits the kernel to
- supporting 'n' processors. Later in runtime you can not
- use hotplug cpu feature to put more cpu back to online.
- just like you compile the kernel NR_CPUS=n
+ support 'n' processors. It could be larger than the
+ number of already plugged CPU during bootup, later in
+ runtime you can physically add extra cpu until it reaches
+ n. So during boot up some boot time memory for per-cpu
+ variables need be pre-allocated for later physical cpu
+ hot plugging.
nr_uarts= [SERIAL] maximum number of UARTs to be registered.
@@ -4247,6 +4265,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
u = IGNORE_UAS (don't bind to the uas driver);
w = NO_WP_DETECT (don't test whether the
medium is write-protected).
+ y = ALWAYS_SYNC (issue a SYNCHRONIZE_CACHE
+ even if the device claims no cache)
Example: quirks=0419:aaf5:rl,0421:0433:rc
user_debug= [KNL,ARM]
diff --git a/Documentation/kmemcheck.txt b/Documentation/kmemcheck.txt
deleted file mode 100644
index 80aae85d8da6..000000000000
--- a/Documentation/kmemcheck.txt
+++ /dev/null
@@ -1,754 +0,0 @@
-GETTING STARTED WITH KMEMCHECK
-==============================
-
-Vegard Nossum <vegardno@ifi.uio.no>
-
-
-Contents
-========
-0. Introduction
-1. Downloading
-2. Configuring and compiling
-3. How to use
-3.1. Booting
-3.2. Run-time enable/disable
-3.3. Debugging
-3.4. Annotating false positives
-4. Reporting errors
-5. Technical description
-
-
-0. Introduction
-===============
-
-kmemcheck is a debugging feature for the Linux Kernel. More specifically, it
-is a dynamic checker that detects and warns about some uses of uninitialized
-memory.
-
-Userspace programmers might be familiar with Valgrind's memcheck. The main
-difference between memcheck and kmemcheck is that memcheck works for userspace
-programs only, and kmemcheck works for the kernel only. The implementations
-are of course vastly different. Because of this, kmemcheck is not as accurate
-as memcheck, but it turns out to be good enough in practice to discover real
-programmer errors that the compiler is not able to find through static
-analysis.
-
-Enabling kmemcheck on a kernel will probably slow it down to the extent that
-the machine will not be usable for normal workloads such as e.g. an
-interactive desktop. kmemcheck will also cause the kernel to use about twice
-as much memory as normal. For this reason, kmemcheck is strictly a debugging
-feature.
-
-
-1. Downloading
-==============
-
-As of version 2.6.31-rc1, kmemcheck is included in the mainline kernel.
-
-
-2. Configuring and compiling
-============================
-
-kmemcheck only works for the x86 (both 32- and 64-bit) platform. A number of
-configuration variables must have specific settings in order for the kmemcheck
-menu to even appear in "menuconfig". These are:
-
- o CONFIG_CC_OPTIMIZE_FOR_SIZE=n
-
- This option is located under "General setup" / "Optimize for size".
-
- Without this, gcc will use certain optimizations that usually lead to
- false positive warnings from kmemcheck. An example of this is a 16-bit
- field in a struct, where gcc may load 32 bits, then discard the upper
- 16 bits. kmemcheck sees only the 32-bit load, and may trigger a
- warning for the upper 16 bits (if they're uninitialized).
-
- o CONFIG_SLAB=y or CONFIG_SLUB=y
-
- This option is located under "General setup" / "Choose SLAB
- allocator".
-
- o CONFIG_FUNCTION_TRACER=n
-
- This option is located under "Kernel hacking" / "Tracers" / "Kernel
- Function Tracer"
-
- When function tracing is compiled in, gcc emits a call to another
- function at the beginning of every function. This means that when the
- page fault handler is called, the ftrace framework will be called
- before kmemcheck has had a chance to handle the fault. If ftrace then
- modifies memory that was tracked by kmemcheck, the result is an
- endless recursive page fault.
-
- o CONFIG_DEBUG_PAGEALLOC=n
-
- This option is located under "Kernel hacking" / "Memory Debugging"
- / "Debug page memory allocations".
-
-In addition, I highly recommend turning on CONFIG_DEBUG_INFO=y. This is also
-located under "Kernel hacking". With this, you will be able to get line number
-information from the kmemcheck warnings, which is extremely valuable in
-debugging a problem. This option is not mandatory, however, because it slows
-down the compilation process and produces a much bigger kernel image.
-
-Now the kmemcheck menu should be visible (under "Kernel hacking" / "Memory
-Debugging" / "kmemcheck: trap use of uninitialized memory"). Here follows
-a description of the kmemcheck configuration variables:
-
- o CONFIG_KMEMCHECK
-
- This must be enabled in order to use kmemcheck at all...
-
- o CONFIG_KMEMCHECK_[DISABLED | ENABLED | ONESHOT]_BY_DEFAULT
-
- This option controls the status of kmemcheck at boot-time. "Enabled"
- will enable kmemcheck right from the start, "disabled" will boot the
- kernel as normal (but with the kmemcheck code compiled in, so it can
- be enabled at run-time after the kernel has booted), and "one-shot" is
- a special mode which will turn kmemcheck off automatically after
- detecting the first use of uninitialized memory.
-
- If you are using kmemcheck to actively debug a problem, then you
- probably want to choose "enabled" here.
-
- The one-shot mode is mostly useful in automated test setups because it
- can prevent floods of warnings and increase the chances of the machine
- surviving in case something is really wrong. In other cases, the one-
- shot mode could actually be counter-productive because it would turn
- itself off at the very first error -- in the case of a false positive
- too -- and this would come in the way of debugging the specific
- problem you were interested in.
-
- If you would like to use your kernel as normal, but with a chance to
- enable kmemcheck in case of some problem, it might be a good idea to
- choose "disabled" here. When kmemcheck is disabled, most of the run-
- time overhead is not incurred, and the kernel will be almost as fast
- as normal.
-
- o CONFIG_KMEMCHECK_QUEUE_SIZE
-
- Select the maximum number of error reports to store in an internal
- (fixed-size) buffer. Since errors can occur virtually anywhere and in
- any context, we need a temporary storage area which is guaranteed not
- to generate any other page faults when accessed. The queue will be
- emptied as soon as a tasklet may be scheduled. If the queue is full,
- new error reports will be lost.
-
- The default value of 64 is probably fine. If some code produces more
- than 64 errors within an irqs-off section, then the code is likely to
- produce many, many more, too, and these additional reports seldom give
- any more information (the first report is usually the most valuable
- anyway).
-
- This number might have to be adjusted if you are not using serial
- console or similar to capture the kernel log. If you are using the
- "dmesg" command to save the log, then getting a lot of kmemcheck
- warnings might overflow the kernel log itself, and the earlier reports
- will get lost in that way instead. Try setting this to 10 or so on
- such a setup.
-
- o CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT
-
- Select the number of shadow bytes to save along with each entry of the
- error-report queue. These bytes indicate what parts of an allocation
- are initialized, uninitialized, etc. and will be displayed when an
- error is detected to help the debugging of a particular problem.
-
- The number entered here is actually the logarithm of the number of
- bytes that will be saved. So if you pick for example 5 here, kmemcheck
- will save 2^5 = 32 bytes.
-
- The default value should be fine for debugging most problems. It also
- fits nicely within 80 columns.
-
- o CONFIG_KMEMCHECK_PARTIAL_OK
-
- This option (when enabled) works around certain GCC optimizations that
- produce 32-bit reads from 16-bit variables where the upper 16 bits are
- thrown away afterwards.
-
- The default value (enabled) is recommended. This may of course hide
- some real errors, but disabling it would probably produce a lot of
- false positives.
-
- o CONFIG_KMEMCHECK_BITOPS_OK
-
- This option silences warnings that would be generated for bit-field
- accesses where not all the bits are initialized at the same time. This
- may also hide some real bugs.
-
- This option is probably obsolete, or it should be replaced with
- the kmemcheck-/bitfield-annotations for the code in question. The
- default value is therefore fine.
-
-Now compile the kernel as usual.
-
-
-3. How to use
-=============
-
-3.1. Booting
-============
-
-First some information about the command-line options. There is only one
-option specific to kmemcheck, and this is called "kmemcheck". It can be used
-to override the default mode as chosen by the CONFIG_KMEMCHECK_*_BY_DEFAULT
-option. Its possible settings are:
-
- o kmemcheck=0 (disabled)
- o kmemcheck=1 (enabled)
- o kmemcheck=2 (one-shot mode)
-
-If SLUB debugging has been enabled in the kernel, it may take precedence over
-kmemcheck in such a way that the slab caches which are under SLUB debugging
-will not be tracked by kmemcheck. In order to ensure that this doesn't happen
-(even though it shouldn't by default), use SLUB's boot option "slub_debug",
-like this: slub_debug=-
-
-In fact, this option may also be used for fine-grained control over SLUB vs.
-kmemcheck. For example, if the command line includes "kmemcheck=1
-slub_debug=,dentry", then SLUB debugging will be used only for the "dentry"
-slab cache, and with kmemcheck tracking all the other caches. This is advanced
-usage, however, and is not generally recommended.
-
-
-3.2. Run-time enable/disable
-============================
-
-When the kernel has booted, it is possible to enable or disable kmemcheck at
-run-time. WARNING: This feature is still experimental and may cause false
-positive warnings to appear. Therefore, try not to use this. If you find that
-it doesn't work properly (e.g. you see an unreasonable amount of warnings), I
-will be happy to take bug reports.
-
-Use the file /proc/sys/kernel/kmemcheck for this purpose, e.g.:
-
- $ echo 0 > /proc/sys/kernel/kmemcheck # disables kmemcheck
-
-The numbers are the same as for the kmemcheck= command-line option.
-
-
-3.3. Debugging
-==============
-
-A typical report will look something like this:
-
-WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
-80000000000000000000000000000000000000000088ffff0000000000000000
- i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
- ^
-
-Pid: 1856, comm: ntpdate Not tainted 2.6.29-rc5 #264 945P-A
-RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
-RSP: 0018:ffff88003cdf7d98 EFLAGS: 00210002
-RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
-RDX: ffff88003e5d6018 RSI: ffff88003e5d6024 RDI: ffff88003cdf7e84
-RBP: ffff88003cdf7db8 R08: ffff88003e5d6000 R09: 0000000000000000
-R10: 0000000000000080 R11: 0000000000000000 R12: 000000000000000e
-R13: ffff88003cdf7e78 R14: ffff88003d530710 R15: ffff88003d5a98c8
-FS: 0000000000000000(0000) GS:ffff880001982000(0063) knlGS:00000
-CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033
-CR2: ffff88003f806ea0 CR3: 000000003c036000 CR4: 00000000000006a0
-DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
-DR3: 0000000000000000 DR6: 00000000ffff4ff0 DR7: 0000000000000400
- [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
- [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
- [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
- [<ffffffff8100c7b5>] int_signal+0x12/0x17
- [<ffffffffffffffff>] 0xffffffffffffffff
-
-The single most valuable information in this report is the RIP (or EIP on 32-
-bit) value. This will help us pinpoint exactly which instruction that caused
-the warning.
-
-If your kernel was compiled with CONFIG_DEBUG_INFO=y, then all we have to do
-is give this address to the addr2line program, like this:
-
- $ addr2line -e vmlinux -i ffffffff8104ede8
- arch/x86/include/asm/string_64.h:12
- include/asm-generic/siginfo.h:287
- kernel/signal.c:380
- kernel/signal.c:410
-
-The "-e vmlinux" tells addr2line which file to look in. IMPORTANT: This must
-be the vmlinux of the kernel that produced the warning in the first place! If
-not, the line number information will almost certainly be wrong.
-
-The "-i" tells addr2line to also print the line numbers of inlined functions.
-In this case, the flag was very important, because otherwise, it would only
-have printed the first line, which is just a call to memcpy(), which could be
-called from a thousand places in the kernel, and is therefore not very useful.
-These inlined functions would not show up in the stack trace above, simply
-because the kernel doesn't load the extra debugging information. This
-technique can of course be used with ordinary kernel oopses as well.
-
-In this case, it's the caller of memcpy() that is interesting, and it can be
-found in include/asm-generic/siginfo.h, line 287:
-
-281 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-282 {
-283 if (from->si_code < 0)
-284 memcpy(to, from, sizeof(*to));
-285 else
-286 /* _sigchld is currently the largest know union member */
-287 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
-288 }
-
-Since this was a read (kmemcheck usually warns about reads only, though it can
-warn about writes to unallocated or freed memory as well), it was probably the
-"from" argument which contained some uninitialized bytes. Following the chain
-of calls, we move upwards to see where "from" was allocated or initialized,
-kernel/signal.c, line 380:
-
-359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
-360 {
-...
-367 list_for_each_entry(q, &list->list, list) {
-368 if (q->info.si_signo == sig) {
-369 if (first)
-370 goto still_pending;
-371 first = q;
-...
-377 if (first) {
-378 still_pending:
-379 list_del_init(&first->list);
-380 copy_siginfo(info, &first->info);
-381 __sigqueue_free(first);
-...
-392 }
-393 }
-
-Here, it is &first->info that is being passed on to copy_siginfo(). The
-variable "first" was found on a list -- passed in as the second argument to
-collect_signal(). We continue our journey through the stack, to figure out
-where the item on "list" was allocated or initialized. We move to line 410:
-
-395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
-396 siginfo_t *info)
-397 {
-...
-410 collect_signal(sig, pending, info);
-...
-414 }
-
-Now we need to follow the "pending" pointer, since that is being passed on to
-collect_signal() as "list". At this point, we've run out of lines from the
-"addr2line" output. Not to worry, we just paste the next addresses from the
-kmemcheck stack dump, i.e.:
-
- [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
- [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
- [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
- [<ffffffff8100c7b5>] int_signal+0x12/0x17
-
- $ addr2line -e vmlinux -i ffffffff8104f04e ffffffff81050bd8 \
- ffffffff8100b87d ffffffff8100c7b5
- kernel/signal.c:446
- kernel/signal.c:1806
- arch/x86/kernel/signal.c:805
- arch/x86/kernel/signal.c:871
- arch/x86/kernel/entry_64.S:694
-
-Remember that since these addresses were found on the stack and not as the
-RIP value, they actually point to the _next_ instruction (they are return
-addresses). This becomes obvious when we look at the code for line 446:
-
-422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
-423 {
-...
-431 signr = __dequeue_signal(&tsk->signal->shared_pending,
-432 mask, info);
-433 /*
-434 * itimer signal ?
-435 *
-436 * itimers are process shared and we restart periodic
-437 * itimers in the signal delivery path to prevent DoS
-438 * attacks in the high resolution timer case. This is
-439 * compliant with the old way of self restarting
-440 * itimers, as the SIGALRM is a legacy signal and only
-441 * queued once. Changing the restart behaviour to
-442 * restart the timer in the signal dequeue path is
-443 * reducing the timer noise on heavy loaded !highres
-444 * systems too.
-445 */
-446 if (unlikely(signr == SIGALRM)) {
-...
-489 }
-
-So instead of looking at 446, we should be looking at 431, which is the line
-that executes just before 446. Here we see that what we are looking for is
-&tsk->signal->shared_pending.
-
-Our next task is now to figure out which function that puts items on this
-"shared_pending" list. A crude, but efficient tool, is git grep:
-
- $ git grep -n 'shared_pending' kernel/
- ...
- kernel/signal.c:828: pending = group ? &t->signal->shared_pending : &t->pending;
- kernel/signal.c:1339: pending = group ? &t->signal->shared_pending : &t->pending;
- ...
-
-There were more results, but none of them were related to list operations,
-and these were the only assignments. We inspect the line numbers more closely
-and find that this is indeed where items are being added to the list:
-
-816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
-817 int group)
-818 {
-...
-828 pending = group ? &t->signal->shared_pending : &t->pending;
-...
-851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
-852 (is_si_special(info) ||
-853 info->si_code >= 0)));
-854 if (q) {
-855 list_add_tail(&q->list, &pending->list);
-...
-890 }
-
-and:
-
-1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
-1310 {
-....
-1339 pending = group ? &t->signal->shared_pending : &t->pending;
-1340 list_add_tail(&q->list, &pending->list);
-....
-1347 }
-
-In the first case, the list element we are looking for, "q", is being returned
-from the function __sigqueue_alloc(), which looks like an allocation function.
-Let's take a look at it:
-
-187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
-188 int override_rlimit)
-189 {
-190 struct sigqueue *q = NULL;
-191 struct user_struct *user;
-192
-193 /*
-194 * We won't get problems with the target's UID changing under us
-195 * because changing it requires RCU be used, and if t != current, the
-196 * caller must be holding the RCU readlock (by way of a spinlock) and
-197 * we use RCU protection here
-198 */
-199 user = get_uid(__task_cred(t)->user);
-200 atomic_inc(&user->sigpending);
-201 if (override_rlimit ||
-202 atomic_read(&user->sigpending) <=
-203 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
-204 q = kmem_cache_alloc(sigqueue_cachep, flags);
-205 if (unlikely(q == NULL)) {
-206 atomic_dec(&user->sigpending);
-207 free_uid(user);
-208 } else {
-209 INIT_LIST_HEAD(&q->list);
-210 q->flags = 0;
-211 q->user = user;
-212 }
-213
-214 return q;
-215 }
-
-We see that this function initializes q->list, q->flags, and q->user. It seems
-that now is the time to look at the definition of "struct sigqueue", e.g.:
-
-14 struct sigqueue {
-15 struct list_head list;
-16 int flags;
-17 siginfo_t info;
-18 struct user_struct *user;
-19 };
-
-And, you might remember, it was a memcpy() on &first->info that caused the
-warning, so this makes perfect sense. It also seems reasonable to assume that
-it is the caller of __sigqueue_alloc() that has the responsibility of filling
-out (initializing) this member.
-
-But just which fields of the struct were uninitialized? Let's look at
-kmemcheck's report again:
-
-WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
-80000000000000000000000000000000000000000088ffff0000000000000000
- i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
- ^
-
-These first two lines are the memory dump of the memory object itself, and the
-shadow bytemap, respectively. The memory object itself is in this case
-&first->info. Just beware that the start of this dump is NOT the start of the
-object itself! The position of the caret (^) corresponds with the address of
-the read (ffff88003e4a2024).
-
-The shadow bytemap dump legend is as follows:
-
- i - initialized
- u - uninitialized
- a - unallocated (memory has been allocated by the slab layer, but has not
- yet been handed off to anybody)
- f - freed (memory has been allocated by the slab layer, but has been freed
- by the previous owner)
-
-In order to figure out where (relative to the start of the object) the
-uninitialized memory was located, we have to look at the disassembly. For
-that, we'll need the RIP address again:
-
-RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
-
- $ objdump -d --no-show-raw-insn vmlinux | grep -C 8 ffffffff8104ede8:
- ffffffff8104edc8: mov %r8,0x8(%r8)
- ffffffff8104edcc: test %r10d,%r10d
- ffffffff8104edcf: js ffffffff8104ee88 <__dequeue_signal+0x168>
- ffffffff8104edd5: mov %rax,%rdx
- ffffffff8104edd8: mov $0xc,%ecx
- ffffffff8104eddd: mov %r13,%rdi
- ffffffff8104ede0: mov $0x30,%eax
- ffffffff8104ede5: mov %rdx,%rsi
- ffffffff8104ede8: rep movsl %ds:(%rsi),%es:(%rdi)
- ffffffff8104edea: test $0x2,%al
- ffffffff8104edec: je ffffffff8104edf0 <__dequeue_signal+0xd0>
- ffffffff8104edee: movsw %ds:(%rsi),%es:(%rdi)
- ffffffff8104edf0: test $0x1,%al
- ffffffff8104edf2: je ffffffff8104edf5 <__dequeue_signal+0xd5>
- ffffffff8104edf4: movsb %ds:(%rsi),%es:(%rdi)
- ffffffff8104edf5: mov %r8,%rdi
- ffffffff8104edf8: callq ffffffff8104de60 <__sigqueue_free>
-
-As expected, it's the "rep movsl" instruction from the memcpy() that causes
-the warning. We know about REP MOVSL that it uses the register RCX to count
-the number of remaining iterations. By taking a look at the register dump
-again (from the kmemcheck report), we can figure out how many bytes were left
-to copy:
-
-RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
-
-By looking at the disassembly, we also see that %ecx is being loaded with the
-value $0xc just before (ffffffff8104edd8), so we are very lucky. Keep in mind
-that this is the number of iterations, not bytes. And since this is a "long"
-operation, we need to multiply by 4 to get the number of bytes. So this means
-that the uninitialized value was encountered at 4 * (0xc - 0x9) = 12 bytes
-from the start of the object.
-
-We can now try to figure out which field of the "struct siginfo" that was not
-initialized. This is the beginning of the struct:
-
-40 typedef struct siginfo {
-41 int si_signo;
-42 int si_errno;
-43 int si_code;
-44
-45 union {
-..
-92 } _sifields;
-93 } siginfo_t;
-
-On 64-bit, the int is 4 bytes long, so it must the union member that has
-not been initialized. We can verify this using gdb:
-
- $ gdb vmlinux
- ...
- (gdb) p &((struct siginfo *) 0)->_sifields
- $1 = (union {...} *) 0x10
-
-Actually, it seems that the union member is located at offset 0x10 -- which
-means that gcc has inserted 4 bytes of padding between the members si_code
-and _sifields. We can now get a fuller picture of the memory dump:
-
- _----------------------------=> si_code
- / _--------------------=> (padding)
- | / _------------=> _sifields(._kill._pid)
- | | / _----=> _sifields(._kill._uid)
- | | | /
--------|-------|-------|-------|
-80000000000000000000000000000000000000000088ffff0000000000000000
- i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
-
-This allows us to realize another important fact: si_code contains the value
-0x80. Remember that x86 is little endian, so the first 4 bytes "80000000" are
-really the number 0x00000080. With a bit of research, we find that this is
-actually the constant SI_KERNEL defined in include/asm-generic/siginfo.h:
-
-144 #define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
-
-This macro is used in exactly one place in the x86 kernel: In send_signal()
-in kernel/signal.c:
-
-816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
-817 int group)
-818 {
-...
-828 pending = group ? &t->signal->shared_pending : &t->pending;
-...
-851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
-852 (is_si_special(info) ||
-853 info->si_code >= 0)));
-854 if (q) {
-855 list_add_tail(&q->list, &pending->list);
-856 switch ((unsigned long) info) {
-...
-865 case (unsigned long) SEND_SIG_PRIV:
-866 q->info.si_signo = sig;
-867 q->info.si_errno = 0;
-868 q->info.si_code = SI_KERNEL;
-869 q->info.si_pid = 0;
-870 q->info.si_uid = 0;
-871 break;
-...
-890 }
-
-Not only does this match with the .si_code member, it also matches the place
-we found earlier when looking for where siginfo_t objects are enqueued on the
-"shared_pending" list.
-
-So to sum up: It seems that it is the padding introduced by the compiler
-between two struct fields that is uninitialized, and this gets reported when
-we do a memcpy() on the struct. This means that we have identified a false
-positive warning.
-
-Normally, kmemcheck will not report uninitialized accesses in memcpy() calls
-when both the source and destination addresses are tracked. (Instead, we copy
-the shadow bytemap as well). In this case, the destination address clearly
-was not tracked. We can dig a little deeper into the stack trace from above:
-
- arch/x86/kernel/signal.c:805
- arch/x86/kernel/signal.c:871
- arch/x86/kernel/entry_64.S:694
-
-And we clearly see that the destination siginfo object is located on the
-stack:
-
-782 static void do_signal(struct pt_regs *regs)
-783 {
-784 struct k_sigaction ka;
-785 siginfo_t info;
-...
-804 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-...
-854 }
-
-And this &info is what eventually gets passed to copy_siginfo() as the
-destination argument.
-
-Now, even though we didn't find an actual error here, the example is still a
-good one, because it shows how one would go about to find out what the report
-was all about.
-
-
-3.4. Annotating false positives
-===============================
-
-There are a few different ways to make annotations in the source code that
-will keep kmemcheck from checking and reporting certain allocations. Here
-they are:
-
- o __GFP_NOTRACK_FALSE_POSITIVE
-
- This flag can be passed to kmalloc() or kmem_cache_alloc() (therefore
- also to other functions that end up calling one of these) to indicate
- that the allocation should not be tracked because it would lead to
- a false positive report. This is a "big hammer" way of silencing
- kmemcheck; after all, even if the false positive pertains to
- particular field in a struct, for example, we will now lose the
- ability to find (real) errors in other parts of the same struct.
-
- Example:
-
- /* No warnings will ever trigger on accessing any part of x */
- x = kmalloc(sizeof *x, GFP_KERNEL | __GFP_NOTRACK_FALSE_POSITIVE);
-
- o kmemcheck_bitfield_begin(name)/kmemcheck_bitfield_end(name) and
- kmemcheck_annotate_bitfield(ptr, name)
-
- The first two of these three macros can be used inside struct
- definitions to signal, respectively, the beginning and end of a
- bitfield. Additionally, this will assign the bitfield a name, which
- is given as an argument to the macros.
-
- Having used these markers, one can later use
- kmemcheck_annotate_bitfield() at the point of allocation, to indicate
- which parts of the allocation is part of a bitfield.
-
- Example:
-
- struct foo {
- int x;
-
- kmemcheck_bitfield_begin(flags);
- int flag_a:1;
- int flag_b:1;
- kmemcheck_bitfield_end(flags);
-
- int y;
- };
-
- struct foo *x = kmalloc(sizeof *x);
-
- /* No warnings will trigger on accessing the bitfield of x */
- kmemcheck_annotate_bitfield(x, flags);
-
- Note that kmemcheck_annotate_bitfield() can be used even before the
- return value of kmalloc() is checked -- in other words, passing NULL
- as the first argument is legal (and will do nothing).
-
-
-4. Reporting errors
-===================
-
-As we have seen, kmemcheck will produce false positive reports. Therefore, it
-is not very wise to blindly post kmemcheck warnings to mailing lists and
-maintainers. Instead, I encourage maintainers and developers to find errors
-in their own code. If you get a warning, you can try to work around it, try
-to figure out if it's a real error or not, or simply ignore it. Most
-developers know their own code and will quickly and efficiently determine the
-root cause of a kmemcheck report. This is therefore also the most efficient
-way to work with kmemcheck.
-
-That said, we (the kmemcheck maintainers) will always be on the lookout for
-false positives that we can annotate and silence. So whatever you find,
-please drop us a note privately! Kernel configs and steps to reproduce (if
-available) are of course a great help too.
-
-Happy hacking!
-
-
-5. Technical description
-========================
-
-kmemcheck works by marking memory pages non-present. This means that whenever
-somebody attempts to access the page, a page fault is generated. The page
-fault handler notices that the page was in fact only hidden, and so it calls
-on the kmemcheck code to make further investigations.
-
-When the investigations are completed, kmemcheck "shows" the page by marking
-it present (as it would be under normal circumstances). This way, the
-interrupted code can continue as usual.
-
-But after the instruction has been executed, we should hide the page again, so
-that we can catch the next access too! Now kmemcheck makes use of a debugging
-feature of the processor, namely single-stepping. When the processor has
-finished the one instruction that generated the memory access, a debug
-exception is raised. From here, we simply hide the page again and continue
-execution, this time with the single-stepping feature turned off.
-
-kmemcheck requires some assistance from the memory allocator in order to work.
-The memory allocator needs to
-
- 1. Tell kmemcheck about newly allocated pages and pages that are about to
- be freed. This allows kmemcheck to set up and tear down the shadow memory
- for the pages in question. The shadow memory stores the status of each
- byte in the allocation proper, e.g. whether it is initialized or
- uninitialized.
-
- 2. Tell kmemcheck which parts of memory should be marked uninitialized.
- There are actually a few more states, such as "not yet allocated" and
- "recently freed".
-
-If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
-memory that can take page faults because of kmemcheck.
-
-If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
-request memory with the __GFP_NOTRACK or __GFP_NOTRACK_FALSE_POSITIVE flags.
-This does not prevent the page faults from occurring, however, but marks the
-object in question as being initialized so that no warnings will ever be
-produced for this object.
-
-Currently, the SLAB and SLUB allocators are supported by kmemcheck.
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 1f9b3e2b98ae..1f6d45abfe42 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -103,6 +103,16 @@ Note that the probed function's args may be passed on the stack
or in registers. The jprobe will work in either case, so long as the
handler's prototype matches that of the probed function.
+Note that in some architectures (e.g.: arm64 and sparc64) the stack
+copy is not done, as the actual location of stacked parameters may be
+outside of a reasonable MAX_STACK_SIZE value and because that location
+cannot be determined by the jprobes code. In this case the jprobes
+user must be careful to make certain the calling signature of the
+function does not cause parameters to be passed on the stack (e.g.:
+more than eight function arguments, an argument of more than sixteen
+bytes, or more than 64 bytes of argument data, depending on
+architecture).
+
1.3 Return Probes
1.3.1 How Does a Return Probe Work?
diff --git a/Documentation/leds/leds-mlxcpld.txt b/Documentation/leds/leds-mlxcpld.txt
new file mode 100644
index 000000000000..a0e8fd457117
--- /dev/null
+++ b/Documentation/leds/leds-mlxcpld.txt
@@ -0,0 +1,110 @@
+Kernel driver for Mellanox systems LEDs
+=======================================
+
+Provide system LED support for the nex Mellanox systems:
+"msx6710", "msx6720", "msb7700", "msn2700", "msx1410",
+"msn2410", "msb7800", "msn2740", "msn2100".
+
+Description
+-----------
+Driver provides the following LEDs for the systems "msx6710", "msx6720",
+"msb7700", "msn2700", "msx1410", "msn2410", "msb7800", "msn2740":
+ mlxcpld:fan1:green
+ mlxcpld:fan1:red
+ mlxcpld:fan2:green
+ mlxcpld:fan2:red
+ mlxcpld:fan3:green
+ mlxcpld:fan3:red
+ mlxcpld:fan4:green
+ mlxcpld:fan4:red
+ mlxcpld:psu:green
+ mlxcpld:psu:red
+ mlxcpld:status:green
+ mlxcpld:status:red
+
+ "status"
+ CPLD reg offset: 0x20
+ Bits [3:0]
+
+ "psu"
+ CPLD reg offset: 0x20
+ Bits [7:4]
+
+ "fan1"
+ CPLD reg offset: 0x21
+ Bits [3:0]
+
+ "fan2"
+ CPLD reg offset: 0x21
+ Bits [7:4]
+
+ "fan3"
+ CPLD reg offset: 0x22
+ Bits [3:0]
+
+ "fan4"
+ CPLD reg offset: 0x22
+ Bits [7:4]
+
+ Color mask for all the above LEDs:
+ [bit3,bit2,bit1,bit0] or
+ [bit7,bit6,bit5,bit4]:
+ [0,0,0,0] = LED OFF
+ [0,1,0,1] = Red static ON
+ [1,1,0,1] = Green static ON
+ [0,1,1,0] = Red blink 3Hz
+ [1,1,1,0] = Green blink 3Hz
+ [0,1,1,1] = Red blink 6Hz
+ [1,1,1,1] = Green blink 6Hz
+
+Driver provides the following LEDs for the system "msn2100":
+ mlxcpld:fan:green
+ mlxcpld:fan:red
+ mlxcpld:psu1:green
+ mlxcpld:psu1:red
+ mlxcpld:psu2:green
+ mlxcpld:psu2:red
+ mlxcpld:status:green
+ mlxcpld:status:red
+ mlxcpld:uid:blue
+
+ "status"
+ CPLD reg offset: 0x20
+ Bits [3:0]
+
+ "fan"
+ CPLD reg offset: 0x21
+ Bits [3:0]
+
+ "psu1"
+ CPLD reg offset: 0x23
+ Bits [3:0]
+
+ "psu2"
+ CPLD reg offset: 0x23
+ Bits [7:4]
+
+ "uid"
+ CPLD reg offset: 0x24
+ Bits [3:0]
+
+ Color mask for all the above LEDs, excepted uid:
+ [bit3,bit2,bit1,bit0] or
+ [bit7,bit6,bit5,bit4]:
+ [0,0,0,0] = LED OFF
+ [0,1,0,1] = Red static ON
+ [1,1,0,1] = Green static ON
+ [0,1,1,0] = Red blink 3Hz
+ [1,1,1,0] = Green blink 3Hz
+ [0,1,1,1] = Red blink 6Hz
+ [1,1,1,1] = Green blink 6Hz
+
+ Color mask for uid LED:
+ [bit3,bit2,bit1,bit0]:
+ [0,0,0,0] = LED OFF
+ [1,1,0,1] = Blue static ON
+ [1,1,1,0] = Blue blink 3Hz
+ [1,1,1,1] = Blue blink 6Hz
+
+Driver supports HW blinking at 3Hz and 6Hz frequency (50% duty cycle).
+For 3Hz duty cylce is about 167 msec, for 6Hz is about 83 msec.
diff --git a/Documentation/leds/ledtrig-oneshot.txt b/Documentation/leds/ledtrig-oneshot.txt
index 07cd1fa41a3a..fe57474a12e2 100644
--- a/Documentation/leds/ledtrig-oneshot.txt
+++ b/Documentation/leds/ledtrig-oneshot.txt
@@ -21,24 +21,8 @@ below:
echo oneshot > trigger
-This adds the following sysfs attributes to the LED:
-
- delay_on - specifies for how many milliseconds the LED has to stay at
- LED_FULL brightness after it has been armed.
- Default to 100 ms.
-
- delay_off - specifies for how many milliseconds the LED has to stay at
- LED_OFF brightness after it has been armed.
- Default to 100 ms.
-
- invert - reverse the blink logic. If set to 0 (default) blink on for delay_on
- ms, then blink off for delay_off ms, leaving the LED normally off. If
- set to 1, blink off for delay_off ms, then blink on for delay_on ms,
- leaving the LED normally on.
- Setting this value also immediately change the LED state.
-
- shot - write any non-empty string to signal an events, this starts a blink
- sequence if not already running.
+This adds sysfs attributes to the LED that are documented in:
+Documentation/ABI/testing/sysfs-class-led-trigger-oneshot
Example use-case: network devices, initialization:
diff --git a/Documentation/leds/ledtrig-usbport.txt b/Documentation/leds/ledtrig-usbport.txt
new file mode 100644
index 000000000000..69f54bfb4789
--- /dev/null
+++ b/Documentation/leds/ledtrig-usbport.txt
@@ -0,0 +1,41 @@
+USB port LED trigger
+====================
+
+This LED trigger can be used for signalling to the user a presence of USB device
+in a given port. It simply turns on LED when device appears and turns it off
+when it disappears.
+
+It requires selecting USB ports that should be observed. All available ones are
+listed as separated entries in a "ports" subdirectory. Selecting is handled by
+echoing "1" to a chosen port.
+
+Please note that this trigger allows selecting multiple USB ports for a single
+LED. This can be useful in two cases:
+
+1) Device with single USB LED and few physical ports
+
+In such a case LED will be turned on as long as there is at least one connected
+USB device.
+
+2) Device with a physical port handled by few controllers
+
+Some devices may have one controller per PHY standard. E.g. USB 3.0 physical
+port may be handled by ohci-platform, ehci-platform and xhci-hcd. If there is
+only one LED user will most likely want to assign ports from all 3 hubs.
+
+
+This trigger can be activated from user space on led class devices as shown
+below:
+
+ echo usbport > trigger
+
+This adds sysfs attributes to the LED that are documented in:
+Documentation/ABI/testing/sysfs-class-led-trigger-usbport
+
+Example use-case:
+
+ echo usbport > trigger
+ echo 1 > ports/usb1-port1
+ echo 1 > ports/usb2-port1
+ cat ports/usb1-port1
+ echo 0 > ports/usb1-port1
diff --git a/Documentation/livepatch/module-elf-format.txt b/Documentation/livepatch/module-elf-format.txt
index eedbdcf8ba50..f21a5289a09c 100644
--- a/Documentation/livepatch/module-elf-format.txt
+++ b/Documentation/livepatch/module-elf-format.txt
@@ -25,7 +25,8 @@ Table of Contents
3.3.2 Required name format
3.3.3 Example livepatch symbol names
3.3.4 Example `readelf --symbols` output
-4. Symbol table and Elf section access
+4. Architecture-specific sections
+5. Symbol table and Elf section access
----------------------------
0. Background and motivation
@@ -46,7 +47,7 @@ architecture.
Since apply_relocate_add() requires access to a module's section header
table, symbol table, and relocation section indices, Elf information is
-preserved for livepatch modules (see section 4). Livepatch manages its own
+preserved for livepatch modules (see section 5). Livepatch manages its own
relocation sections and symbols, which are described in this document. The
Elf constants used to mark livepatch symbols and relocation sections were
selected from OS-specific ranges according to the definitions from glibc.
@@ -117,7 +118,7 @@ also possible for a livepatch module to have no livepatch relocation
sections, as in the case of the sample livepatch module (see
samples/livepatch).
-Since Elf information is preserved for livepatch modules (see Section 4), a
+Since Elf information is preserved for livepatch modules (see Section 5), a
livepatch relocation section can be applied simply by passing in the
appropriate section index to apply_relocate_add(), which then uses it to
access the relocation section and apply the relocations.
@@ -292,8 +293,19 @@ Symbol table '.symtab' contains 127 entries:
[*] Note that the 'Ndx' (Section index) for these symbols is SHN_LIVEPATCH (0xff20).
"OS" means OS-specific.
+---------------------------------
+4. Architecture-specific sections
+---------------------------------
+Architectures may override arch_klp_init_object_loaded() to perform
+additional arch-specific tasks when a target module loads, such as applying
+arch-specific sections. On x86 for example, we must apply per-object
+.altinstructions and .parainstructions sections when a target module loads.
+These sections must be prefixed with ".klp.arch.$objname." so that they can
+be easily identified when iterating through a patch module's Elf sections
+(See arch/x86/kernel/livepatch.c for a complete example).
+
--------------------------------------
-4. Symbol table and Elf section access
+5. Symbol table and Elf section access
--------------------------------------
A livepatch module's symbol table is accessible through module->symtab.
diff --git a/Documentation/media/Makefile b/Documentation/media/Makefile
index 39e2d766dbe3..a7fb35291f6c 100644
--- a/Documentation/media/Makefile
+++ b/Documentation/media/Makefile
@@ -10,7 +10,8 @@ FILES = audio.h.rst ca.h.rst dmx.h.rst frontend.h.rst net.h.rst video.h.rst \
TARGETS := $(addprefix $(BUILDDIR)/, $(FILES))
-htmldocs: $(BUILDDIR) ${TARGETS}
+.PHONY: all
+all: $(BUILDDIR) ${TARGETS}
$(BUILDDIR):
$(Q)mkdir -p $@
diff --git a/Documentation/media/conf.py b/Documentation/media/conf.py
new file mode 100644
index 000000000000..bef927bc4659
--- /dev/null
+++ b/Documentation/media/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = 'Linux Media Subsystem Documentation'
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'media.tex', 'Linux Media Subsystem Documentation',
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/media/conf_nitpick.py b/Documentation/media/conf_nitpick.py
new file mode 100644
index 000000000000..11beac2e68fb
--- /dev/null
+++ b/Documentation/media/conf_nitpick.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = 'Linux Media Subsystem Documentation'
+
+# It is possible to run Sphinx in nickpick mode with:
+nitpicky = True
+
+# within nit-picking build, do not refer to any intersphinx object
+intersphinx_mapping = {}
+
+# In nickpick mode, it will complain about lots of missing references that
+#
+# 1) are just typedefs like: bool, __u32, etc;
+# 2) It will complain for things like: enum, NULL;
+# 3) It will complain for symbols that should be on different
+# books (but currently aren't ported to ReST)
+#
+# The list below has a list of such symbols to be ignored in nitpick mode
+#
+nitpick_ignore = [
+ ("c:func", "clock_gettime"),
+ ("c:func", "close"),
+ ("c:func", "container_of"),
+ ("c:func", "determine_valid_ioctls"),
+ ("c:func", "ERR_PTR"),
+ ("c:func", "ioctl"),
+ ("c:func", "IS_ERR"),
+ ("c:func", "mmap"),
+ ("c:func", "open"),
+ ("c:func", "pci_name"),
+ ("c:func", "poll"),
+ ("c:func", "PTR_ERR"),
+ ("c:func", "read"),
+ ("c:func", "release"),
+ ("c:func", "set"),
+ ("c:func", "struct fd_set"),
+ ("c:func", "struct pollfd"),
+ ("c:func", "usb_make_path"),
+ ("c:func", "write"),
+ ("c:type", "atomic_t"),
+ ("c:type", "bool"),
+ ("c:type", "buf_queue"),
+ ("c:type", "device"),
+ ("c:type", "device_driver"),
+ ("c:type", "device_node"),
+ ("c:type", "enum"),
+ ("c:type", "file"),
+ ("c:type", "i2c_adapter"),
+ ("c:type", "i2c_board_info"),
+ ("c:type", "i2c_client"),
+ ("c:type", "ktime_t"),
+ ("c:type", "led_classdev_flash"),
+ ("c:type", "list_head"),
+ ("c:type", "lock_class_key"),
+ ("c:type", "module"),
+ ("c:type", "mutex"),
+ ("c:type", "pci_dev"),
+ ("c:type", "pdvbdev"),
+ ("c:type", "poll_table_struct"),
+ ("c:type", "s32"),
+ ("c:type", "s64"),
+ ("c:type", "sd"),
+ ("c:type", "spi_board_info"),
+ ("c:type", "spi_device"),
+ ("c:type", "spi_master"),
+ ("c:type", "struct fb_fix_screeninfo"),
+ ("c:type", "struct pollfd"),
+ ("c:type", "struct timeval"),
+ ("c:type", "struct video_capability"),
+ ("c:type", "u16"),
+ ("c:type", "u32"),
+ ("c:type", "u64"),
+ ("c:type", "u8"),
+ ("c:type", "union"),
+ ("c:type", "usb_device"),
+
+ ("cpp:type", "boolean"),
+ ("cpp:type", "fd"),
+ ("cpp:type", "fd_set"),
+ ("cpp:type", "int16_t"),
+ ("cpp:type", "NULL"),
+ ("cpp:type", "off_t"),
+ ("cpp:type", "pollfd"),
+ ("cpp:type", "size_t"),
+ ("cpp:type", "ssize_t"),
+ ("cpp:type", "timeval"),
+ ("cpp:type", "__u16"),
+ ("cpp:type", "__u32"),
+ ("cpp:type", "__u64"),
+ ("cpp:type", "uint16_t"),
+ ("cpp:type", "uint32_t"),
+ ("cpp:type", "video_system_t"),
+]
diff --git a/Documentation/media/index.rst b/Documentation/media/index.rst
new file mode 100644
index 000000000000..7f8f0af620ce
--- /dev/null
+++ b/Documentation/media/index.rst
@@ -0,0 +1,19 @@
+Linux Media Subsystem Documentation
+===================================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ media_uapi
+ media_kapi
+ dvb-drivers/index
+ v4l-drivers/index
+
+.. only:: subproject
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/media/uapi/cec/cec-func-open.rst b/Documentation/media/uapi/cec/cec-func-open.rst
index 38fd7e0cfccd..7c0f981a6e07 100644
--- a/Documentation/media/uapi/cec/cec-func-open.rst
+++ b/Documentation/media/uapi/cec/cec-func-open.rst
@@ -32,7 +32,7 @@ Arguments
Open flags. Access mode must be ``O_RDWR``.
When the ``O_NONBLOCK`` flag is given, the
- :ref:`CEC_RECEIVE <CEC_RECEIVE>` and :ref:`CEC_DQEVENT <CEC_DQEVENT>` ioctls
+ :ref:`CEC_RECEIVE <CEC_RECEIVE>` and :c:func:`CEC_DQEVENT` ioctls
will return the ``EAGAIN`` error code when no message or event is available, and
ioctls :ref:`CEC_TRANSMIT <CEC_TRANSMIT>`,
:ref:`CEC_ADAP_S_PHYS_ADDR <CEC_ADAP_S_PHYS_ADDR>` and
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index 2e1e73928396..f8caa28a96d2 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -15,7 +15,8 @@ CEC_DQEVENT - Dequeue a CEC event
Synopsis
========
-.. cpp:function:: int ioctl( int fd, int request, struct cec_event *argp )
+.. c:function:: int ioctl( int fd, int request, struct cec_event *argp )
+ :name: CEC_DQEVENT
Arguments
=========
@@ -36,7 +37,7 @@ Description
and is currently only available as a staging kernel module.
CEC devices can send asynchronous events. These can be retrieved by
-calling :ref:`ioctl CEC_DQEVENT <CEC_DQEVENT>`. If the file descriptor is in
+calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
non-blocking mode and no event is pending, then it will return -1 and
set errno to the ``EAGAIN`` error code.
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 415154a487d0..a7697783ac4c 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -74,6 +74,8 @@ dns_resolver.txt
- The DNS resolver module allows kernel servies to make DNS queries.
driver.txt
- Softnet driver issues.
+ena.txt
+ - info on Amazon's Elastic Network Adapter (ENA)
e100.txt
- info on Intel's EtherExpress PRO/100 line of 10/100 boards
e1000.txt
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 1b5e7a7f2185..8a8d3d96f6c6 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -43,10 +43,15 @@ new interfaces to verify the compatibility. There is no need to
reload the module if you plug your USB wifi adapter into your ma-
chine after batman advanced was initially loaded.
-To activate a given interface simply write "bat0" into its
-"mesh_iface" file inside the batman_adv subfolder:
+The batman-adv soft-interface can be created using the iproute2
+tool "ip"
-# echo bat0 > /sys/class/net/eth0/batman_adv/mesh_iface
+# ip link add name bat0 type batadv
+
+To activate a given interface simply attach it to the "bat0"
+interface
+
+# ip link set dev eth0 master bat0
Repeat this step for all interfaces you wish to add. Now batman
starts using/broadcasting on this/these interface(s).
@@ -56,10 +61,10 @@ By reading the "iface_status" file you can check its status:
# cat /sys/class/net/eth0/batman_adv/iface_status
# active
-To deactivate an interface you have to write "none" into its
-"mesh_iface" file:
+To deactivate an interface you have to detach it from the
+"bat0" interface:
-# echo none > /sys/class/net/eth0/batman_adv/mesh_iface
+# ip link set dev eth0 nomaster
All mesh wide settings can be found in batman's own interface
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index f20c884c048a..6d6c07cf1a9a 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -227,9 +227,9 @@ to address individual switches in the tree.
dsa_switch: structure describing a switch device in the tree, referencing a
dsa_switch_tree as a backpointer, slave network devices, master network device,
-and a reference to the backing dsa_switch_driver
+and a reference to the backing dsa_switch_ops
-dsa_switch_driver: structure referencing function pointers, see below for a full
+dsa_switch_ops: structure referencing function pointers, see below for a full
description.
Design limitations
@@ -357,10 +357,10 @@ regular HWMON devices in /sys/class/hwmon/.
Driver development
==================
-DSA switch drivers need to implement a dsa_switch_driver structure which will
+DSA switch drivers need to implement a dsa_switch_ops structure which will
contain the various members described below.
-register_switch_driver() registers this dsa_switch_driver in its internal list
+register_switch_driver() registers this dsa_switch_ops in its internal list
of drivers to probe for. unregister_switch_driver() does the exact opposite.
Unless requested differently by setting the priv_size member accordingly, DSA
@@ -379,7 +379,7 @@ Switch configuration
buses, return a non-NULL string
- setup: setup function for the switch, this function is responsible for setting
- up the dsa_switch_driver private structure with all it needs: register maps,
+ up the dsa_switch_ops private structure with all it needs: register maps,
interrupts, mutexes, locks etc.. This function is also expected to properly
configure the switch to separate all network interfaces from each other, that
is, they should be isolated by the switch hardware itself, typically by creating
@@ -584,6 +584,29 @@ of DSA, would be the its port-based VLAN, used by the associated bridge device.
function that the driver has to call for each MAC address known to be behind
the given port. A switchdev object is used to carry the VID and FDB info.
+- port_mdb_prepare: bridge layer function invoked when the bridge prepares the
+ installation of a multicast database entry. If the operation is not supported,
+ this function should return -EOPNOTSUPP to inform the bridge code to fallback
+ to a software implementation. No hardware setup must be done in this function.
+ See port_fdb_add for this and details.
+
+- port_mdb_add: bridge layer function invoked when the bridge wants to install
+ a multicast database entry, the switch hardware should be programmed with the
+ specified address in the specified VLAN ID in the forwarding database
+ associated with this VLAN ID.
+
+Note: VLAN ID 0 corresponds to the port private database, which, in the context
+of DSA, would be the its port-based VLAN, used by the associated bridge device.
+
+- port_mdb_del: bridge layer function invoked when the bridge wants to remove a
+ multicast database entry, the switch hardware should be programmed to delete
+ the specified MAC address from the specified VLAN ID if it was mapped into
+ this port forwarding database.
+
+- port_mdb_dump: bridge layer function invoked with a switchdev callback
+ function that the driver has to call for each MAC address known to be behind
+ the given port. A switchdev object is used to carry the VID and MDB info.
+
TODO
====
diff --git a/Documentation/networking/ena.txt b/Documentation/networking/ena.txt
new file mode 100644
index 000000000000..2b4b6f57e549
--- /dev/null
+++ b/Documentation/networking/ena.txt
@@ -0,0 +1,305 @@
+Linux kernel driver for Elastic Network Adapter (ENA) family:
+=============================================================
+
+Overview:
+=========
+ENA is a networking interface designed to make good use of modern CPU
+features and system architectures.
+
+The ENA device exposes a lightweight management interface with a
+minimal set of memory mapped registers and extendable command set
+through an Admin Queue.
+
+The driver supports a range of ENA devices, is link-speed independent
+(i.e., the same driver is used for 10GbE, 25GbE, 40GbE, etc.), and has
+a negotiated and extendable feature set.
+
+Some ENA devices support SR-IOV. This driver is used for both the
+SR-IOV Physical Function (PF) and Virtual Function (VF) devices.
+
+ENA devices enable high speed and low overhead network traffic
+processing by providing multiple Tx/Rx queue pairs (the maximum number
+is advertised by the device via the Admin Queue), a dedicated MSI-X
+interrupt vector per Tx/Rx queue pair, adaptive interrupt moderation,
+and CPU cacheline optimized data placement.
+
+The ENA driver supports industry standard TCP/IP offload features such
+as checksum offload and TCP transmit segmentation offload (TSO).
+Receive-side scaling (RSS) is supported for multi-core scaling.
+
+The ENA driver and its corresponding devices implement health
+monitoring mechanisms such as watchdog, enabling the device and driver
+to recover in a manner transparent to the application, as well as
+debug logs.
+
+Some of the ENA devices support a working mode called Low-latency
+Queue (LLQ), which saves several more microseconds.
+
+Supported PCI vendor ID/device IDs:
+===================================
+1d0f:0ec2 - ENA PF
+1d0f:1ec2 - ENA PF with LLQ support
+1d0f:ec20 - ENA VF
+1d0f:ec21 - ENA VF with LLQ support
+
+ENA Source Code Directory Structure:
+====================================
+ena_com.[ch] - Management communication layer. This layer is
+ responsible for the handling all the management
+ (admin) communication between the device and the
+ driver.
+ena_eth_com.[ch] - Tx/Rx data path.
+ena_admin_defs.h - Definition of ENA management interface.
+ena_eth_io_defs.h - Definition of ENA data path interface.
+ena_common_defs.h - Common definitions for ena_com layer.
+ena_regs_defs.h - Definition of ENA PCI memory-mapped (MMIO) registers.
+ena_netdev.[ch] - Main Linux kernel driver.
+ena_syfsfs.[ch] - Sysfs files.
+ena_ethtool.c - ethtool callbacks.
+ena_pci_id_tbl.h - Supported device IDs.
+
+Management Interface:
+=====================
+ENA management interface is exposed by means of:
+- PCIe Configuration Space
+- Device Registers
+- Admin Queue (AQ) and Admin Completion Queue (ACQ)
+- Asynchronous Event Notification Queue (AENQ)
+
+ENA device MMIO Registers are accessed only during driver
+initialization and are not involved in further normal device
+operation.
+
+AQ is used for submitting management commands, and the
+results/responses are reported asynchronously through ACQ.
+
+ENA introduces a very small set of management commands with room for
+vendor-specific extensions. Most of the management operations are
+framed in a generic Get/Set feature command.
+
+The following admin queue commands are supported:
+- Create I/O submission queue
+- Create I/O completion queue
+- Destroy I/O submission queue
+- Destroy I/O completion queue
+- Get feature
+- Set feature
+- Configure AENQ
+- Get statistics
+
+Refer to ena_admin_defs.h for the list of supported Get/Set Feature
+properties.
+
+The Asynchronous Event Notification Queue (AENQ) is a uni-directional
+queue used by the ENA device to send to the driver events that cannot
+be reported using ACQ. AENQ events are subdivided into groups. Each
+group may have multiple syndromes, as shown below
+
+The events are:
+ Group Syndrome
+ Link state change - X -
+ Fatal error - X -
+ Notification Suspend traffic
+ Notification Resume traffic
+ Keep-Alive - X -
+
+ACQ and AENQ share the same MSI-X vector.
+
+Keep-Alive is a special mechanism that allows monitoring of the
+device's health. The driver maintains a watchdog (WD) handler which,
+if fired, logs the current state and statistics then resets and
+restarts the ENA device and driver. A Keep-Alive event is delivered by
+the device every second. The driver re-arms the WD upon reception of a
+Keep-Alive event. A missed Keep-Alive event causes the WD handler to
+fire.
+
+Data Path Interface:
+====================
+I/O operations are based on Tx and Rx Submission Queues (Tx SQ and Rx
+SQ correspondingly). Each SQ has a completion queue (CQ) associated
+with it.
+
+The SQs and CQs are implemented as descriptor rings in contiguous
+physical memory.
+
+The ENA driver supports two Queue Operation modes for Tx SQs:
+- Regular mode
+ * In this mode the Tx SQs reside in the host's memory. The ENA
+ device fetches the ENA Tx descriptors and packet data from host
+ memory.
+- Low Latency Queue (LLQ) mode or "push-mode".
+ * In this mode the driver pushes the transmit descriptors and the
+ first 128 bytes of the packet directly to the ENA device memory
+ space. The rest of the packet payload is fetched by the
+ device. For this operation mode, the driver uses a dedicated PCI
+ device memory BAR, which is mapped with write-combine capability.
+
+The Rx SQs support only the regular mode.
+
+Note: Not all ENA devices support LLQ, and this feature is negotiated
+ with the device upon initialization. If the ENA device does not
+ support LLQ mode, the driver falls back to the regular mode.
+
+The driver supports multi-queue for both Tx and Rx. This has various
+benefits:
+- Reduced CPU/thread/process contention on a given Ethernet interface.
+- Cache miss rate on completion is reduced, particularly for data
+ cache lines that hold the sk_buff structures.
+- Increased process-level parallelism when handling received packets.
+- Increased data cache hit rate, by steering kernel processing of
+ packets to the CPU, where the application thread consuming the
+ packet is running.
+- In hardware interrupt re-direction.
+
+Interrupt Modes:
+================
+The driver assigns a single MSI-X vector per queue pair (for both Tx
+and Rx directions). The driver assigns an additional dedicated MSI-X vector
+for management (for ACQ and AENQ).
+
+Management interrupt registration is performed when the Linux kernel
+probes the adapter, and it is de-registered when the adapter is
+removed. I/O queue interrupt registration is performed when the Linux
+interface of the adapter is opened, and it is de-registered when the
+interface is closed.
+
+The management interrupt is named:
+ ena-mgmnt@pci:<PCI domain:bus:slot.function>
+and for each queue pair, an interrupt is named:
+ <interface name>-Tx-Rx-<queue index>
+
+The ENA device operates in auto-mask and auto-clear interrupt
+modes. That is, once MSI-X is delivered to the host, its Cause bit is
+automatically cleared and the interrupt is masked. The interrupt is
+unmasked by the driver after NAPI processing is complete.
+
+Interrupt Moderation:
+=====================
+ENA driver and device can operate in conventional or adaptive interrupt
+moderation mode.
+
+In conventional mode the driver instructs device to postpone interrupt
+posting according to static interrupt delay value. The interrupt delay
+value can be configured through ethtool(8). The following ethtool
+parameters are supported by the driver: tx-usecs, rx-usecs
+
+In adaptive interrupt moderation mode the interrupt delay value is
+updated by the driver dynamically and adjusted every NAPI cycle
+according to the traffic nature.
+
+By default ENA driver applies adaptive coalescing on Rx traffic and
+conventional coalescing on Tx traffic.
+
+Adaptive coalescing can be switched on/off through ethtool(8)
+adaptive_rx on|off parameter.
+
+The driver chooses interrupt delay value according to the number of
+bytes and packets received between interrupt unmasking and interrupt
+posting. The driver uses interrupt delay table that subdivides the
+range of received bytes/packets into 5 levels and assigns interrupt
+delay value to each level.
+
+The user can enable/disable adaptive moderation, modify the interrupt
+delay table and restore its default values through sysfs.
+
+The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK
+and can be configured by the ETHTOOL_STUNABLE command of the
+SIOCETHTOOL ioctl.
+
+SKB:
+The driver-allocated SKB for frames received from Rx handling using
+NAPI context. The allocation method depends on the size of the packet.
+If the frame length is larger than rx_copybreak, napi_get_frags()
+is used, otherwise netdev_alloc_skb_ip_align() is used, the buffer
+content is copied (by CPU) to the SKB, and the buffer is recycled.
+
+Statistics:
+===========
+The user can obtain ENA device and driver statistics using ethtool.
+The driver can collect regular or extended statistics (including
+per-queue stats) from the device.
+
+In addition the driver logs the stats to syslog upon device reset.
+
+MTU:
+====
+The driver supports an arbitrarily large MTU with a maximum that is
+negotiated with the device. The driver configures MTU using the
+SetFeature command (ENA_ADMIN_MTU property). The user can change MTU
+via ip(8) and similar legacy tools.
+
+Stateless Offloads:
+===================
+The ENA driver supports:
+- TSO over IPv4/IPv6
+- TSO with ECN
+- IPv4 header checksum offload
+- TCP/UDP over IPv4/IPv6 checksum offloads
+
+RSS:
+====
+- The ENA device supports RSS that allows flexible Rx traffic
+ steering.
+- Toeplitz and CRC32 hash functions are supported.
+- Different combinations of L2/L3/L4 fields can be configured as
+ inputs for hash functions.
+- The driver configures RSS settings using the AQ SetFeature command
+ (ENA_ADMIN_RSS_HASH_FUNCTION, ENA_ADMIN_RSS_HASH_INPUT and
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG properties).
+- If the NETIF_F_RXHASH flag is set, the 32-bit result of the hash
+ function delivered in the Rx CQ descriptor is set in the received
+ SKB.
+- The user can provide a hash key, hash function, and configure the
+ indirection table through ethtool(8).
+
+DATA PATH:
+==========
+Tx:
+---
+end_start_xmit() is called by the stack. This function does the following:
+- Maps data buffers (skb->data and frags).
+- Populates ena_buf for the push buffer (if the driver and device are
+ in push mode.)
+- Prepares ENA bufs for the remaining frags.
+- Allocates a new request ID from the empty req_id ring. The request
+ ID is the index of the packet in the Tx info. This is used for
+ out-of-order TX completions.
+- Adds the packet to the proper place in the Tx ring.
+- Calls ena_com_prepare_tx(), an ENA communication layer that converts
+ the ena_bufs to ENA descriptors (and adds meta ENA descriptors as
+ needed.)
+ * This function also copies the ENA descriptors and the push buffer
+ to the Device memory space (if in push mode.)
+- Writes doorbell to the ENA device.
+- When the ENA device finishes sending the packet, a completion
+ interrupt is raised.
+- The interrupt handler schedules NAPI.
+- The ena_clean_tx_irq() function is called. This function handles the
+ completion descriptors generated by the ENA, with a single
+ completion descriptor per completed packet.
+ * req_id is retrieved from the completion descriptor. The tx_info of
+ the packet is retrieved via the req_id. The data buffers are
+ unmapped and req_id is returned to the empty req_id ring.
+ * The function stops when the completion descriptors are completed or
+ the budget is reached.
+
+Rx:
+---
+- When a packet is received from the ENA device.
+- The interrupt handler schedules NAPI.
+- The ena_clean_rx_irq() function is called. This function calls
+ ena_rx_pkt(), an ENA communication layer function, which returns the
+ number of descriptors used for a new unhandled packet, and zero if
+ no new packet is found.
+- Then it calls the ena_clean_rx_irq() function.
+- ena_eth_rx_skb() checks packet length:
+ * If the packet is small (len < rx_copybreak), the driver allocates
+ a SKB for the new packet, and copies the packet payload into the
+ SKB data buffer.
+ - In this way the original data buffer is not passed to the stack
+ and is reused for future Rx packets.
+ * Otherwise the function unmaps the Rx buffer, then allocates the
+ new SKB structure and hooks the Rx buffer to the SKB frags.
+- The new SKB is updated with the necessary information (protocol,
+ checksum hw verify result, etc.), and then passed to the network
+ stack, using the NAPI interface function napi_gro_receive().
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 9ae929395b24..3db8c67d2c8d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -575,32 +575,33 @@ tcp_syncookies - BOOLEAN
unconditionally generation of syncookies.
tcp_fastopen - INTEGER
- Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
- in the opening SYN packet. To use this feature, the client application
- must use sendmsg() or sendto() with MSG_FASTOPEN flag rather than
- connect() to perform a TCP handshake automatically.
+ Enable TCP Fast Open (RFC7413) to send and accept data in the opening
+ SYN packet.
- The values (bitmap) are
- 1: Enables sending data in the opening SYN on the client w/ MSG_FASTOPEN.
- 2: Enables TCP Fast Open on the server side, i.e., allowing data in
- a SYN packet to be accepted and passed to the application before
- 3-way hand shake finishes.
- 4: Send data in the opening SYN regardless of cookie availability and
- without a cookie option.
- 0x100: Accept SYN data w/o validating the cookie.
- 0x200: Accept data-in-SYN w/o any cookie option present.
- 0x400/0x800: Enable Fast Open on all listeners regardless of the
- TCP_FASTOPEN socket option. The two different flags designate two
- different ways of setting max_qlen without the TCP_FASTOPEN socket
- option.
+ The client support is enabled by flag 0x1 (on by default). The client
+ then must use sendmsg() or sendto() with the MSG_FASTOPEN flag,
+ rather than connect() to send data in SYN.
- Default: 1
+ The server support is enabled by flag 0x2 (off by default). Then
+ either enable for all listeners with another flag (0x400) or
+ enable individual listeners via TCP_FASTOPEN socket option with
+ the option value being the length of the syn-data backlog.
- Note that the client & server side Fast Open flags (1 and 2
- respectively) must be also enabled before the rest of flags can take
- effect.
+ The values (bitmap) are
+ 0x1: (client) enables sending data in the opening SYN on the client.
+ 0x2: (server) enables the server support, i.e., allowing data in
+ a SYN packet to be accepted and passed to the
+ application before 3-way handshake finishes.
+ 0x4: (client) send data in the opening SYN regardless of cookie
+ availability and without a cookie option.
+ 0x200: (server) accept data-in-SYN w/o any cookie option present.
+ 0x400: (server) enable all listeners to support Fast Open by
+ default without explicit TCP_FASTOPEN socket option.
+
+ Default: 0x1
- See include/net/tcp.h and the code for more details.
+ Note that that additional client or server features are only
+ effective if the basic support (0x1 and 0x2) are enabled respectively.
tcp_syn_retries - INTEGER
Number of times initial SYNs for an active TCP connection attempt
diff --git a/Documentation/networking/ipvlan.txt b/Documentation/networking/ipvlan.txt
index 14422f8fcdc4..24196cef7c91 100644
--- a/Documentation/networking/ipvlan.txt
+++ b/Documentation/networking/ipvlan.txt
@@ -22,7 +22,7 @@ The driver can be built into the kernel (CONFIG_IPVLAN=y) or as a module
There are no module parameters for this driver and it can be configured
using IProute2/ip utility.
- ip link add link <master-dev> <slave-dev> type ipvlan mode { l2 | L3 }
+ ip link add link <master-dev> <slave-dev> type ipvlan mode { l2 | l3 | l3s }
e.g. ip link add link ipvl0 eth0 type ipvlan mode l2
@@ -48,6 +48,11 @@ master device for the L2 processing and routing from that instance will be
used before packets are queued on the outbound device. In this mode the slaves
will not receive nor can send multicast / broadcast traffic.
+4.3 L3S mode:
+ This is very similar to the L3 mode except that iptables (conn-tracking)
+works in this mode and hence it is L3-symmetric (L3s). This will have slightly less
+performance but that shouldn't matter since you are choosing this mode over plain-L3
+mode to make conn-tracking work.
5. What to choose (macvlan vs. ipvlan)?
These two devices are very similar in many regards and the specific use
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index 70c926ae212d..1b63bbc6b94f 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -725,7 +725,8 @@ The kernel interface functions are as follows:
(*) End a client call.
- void rxrpc_kernel_end_call(struct rxrpc_call *call);
+ void rxrpc_kernel_end_call(struct socket *sock,
+ struct rxrpc_call *call);
This is used to end a previously begun call. The user_call_ID is expunged
from AF_RXRPC's knowledge and will not be seen again in association with
@@ -733,7 +734,9 @@ The kernel interface functions are as follows:
(*) Send data through a call.
- int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
+ int rxrpc_kernel_send_data(struct socket *sock,
+ struct rxrpc_call *call,
+ struct msghdr *msg,
size_t len);
This is used to supply either the request part of a client call or the
@@ -745,9 +748,42 @@ The kernel interface functions are as follows:
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
+ (*) Receive data from a call.
+
+ int rxrpc_kernel_recv_data(struct socket *sock,
+ struct rxrpc_call *call,
+ void *buf,
+ size_t size,
+ size_t *_offset,
+ bool want_more,
+ u32 *_abort)
+
+ This is used to receive data from either the reply part of a client call
+ or the request part of a service call. buf and size specify how much
+ data is desired and where to store it. *_offset is added on to buf and
+ subtracted from size internally; the amount copied into the buffer is
+ added to *_offset before returning.
+
+ want_more should be true if further data will be required after this is
+ satisfied and false if this is the last item of the receive phase.
+
+ There are three normal returns: 0 if the buffer was filled and want_more
+ was true; 1 if the buffer was filled, the last DATA packet has been
+ emptied and want_more was false; and -EAGAIN if the function needs to be
+ called again.
+
+ If the last DATA packet is processed but the buffer contains less than
+ the amount requested, EBADMSG is returned. If want_more wasn't set, but
+ more data was available, EMSGSIZE is returned.
+
+ If a remote ABORT is detected, the abort code received will be stored in
+ *_abort and ECONNABORTED will be returned.
+
(*) Abort a call.
- void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
+ void rxrpc_kernel_abort_call(struct socket *sock,
+ struct rxrpc_call *call,
+ u32 abort_code);
This is used to abort a call if it's still in an abortable state. The
abort code specified will be placed in the ABORT message sent.
@@ -820,47 +856,6 @@ The kernel interface functions are as follows:
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
- (*) Record the delivery of a data message.
-
- void rxrpc_kernel_data_consumed(struct rxrpc_call *call,
- struct sk_buff *skb);
-
- This is used to record a data message as having been consumed and to
- update the ACK state for the call. The message must still be passed to
- rxrpc_kernel_free_skb() for disposal by the caller.
-
- (*) Free a message.
-
- void rxrpc_kernel_free_skb(struct sk_buff *skb);
-
- This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
- socket.
-
- (*) Determine if a data message is the last one on a call.
-
- bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
-
- This is used to determine if a socket buffer holds the last data message
- to be received for a call (true will be returned if it does, false
- if not).
-
- The data message will be part of the reply on a client call and the
- request on an incoming call. In the latter case there will be more
- messages, but in the former case there will not.
-
- (*) Get the abort code from an abort message.
-
- u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
-
- This is used to extract the abort code from a remote abort message.
-
- (*) Get the error number from a local or network error message.
-
- int rxrpc_kernel_get_error_number(struct sk_buff *skb);
-
- This is used to extract the error number from a message indicating either
- a local error occurred or a network error occurred.
-
(*) Allocate a null key for doing anonymous security.
struct key *rxrpc_get_null_key(const char *keyname);
@@ -868,6 +863,13 @@ The kernel interface functions are as follows:
This is used to allocate a null RxRPC key that can be used to indicate
anonymous security for a particular domain.
+ (*) Get the peer address of a call.
+
+ void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call,
+ struct sockaddr_rxrpc *_srx);
+
+ This is used to find the remote peer address of a call.
+
=======================
CONFIGURABLE PARAMETERS
diff --git a/Documentation/networking/strparser.txt b/Documentation/networking/strparser.txt
new file mode 100644
index 000000000000..a0bf573dfa61
--- /dev/null
+++ b/Documentation/networking/strparser.txt
@@ -0,0 +1,136 @@
+Stream Parser
+-------------
+
+The stream parser (strparser) is a utility that parses messages of an
+application layer protocol running over a TCP connection. The stream
+parser works in conjunction with an upper layer in the kernel to provide
+kernel support for application layer messages. For instance, Kernel
+Connection Multiplexor (KCM) uses the Stream Parser to parse messages
+using a BPF program.
+
+Interface
+---------
+
+The API includes a context structure, a set of callbacks, utility
+functions, and a data_ready function. The callbacks include
+a parse_msg function that is called to perform parsing (e.g.
+BPF parsing in case of KCM), and a rcv_msg function that is called
+when a full message has been completed.
+
+A stream parser can be instantiated for a TCP connection. This is done
+by:
+
+strp_init(struct strparser *strp, struct sock *csk,
+ struct strp_callbacks *cb)
+
+strp is a struct of type strparser that is allocated by the upper layer.
+csk is the TCP socket associated with the stream parser. Callbacks are
+called by the stream parser.
+
+Callbacks
+---------
+
+There are four callbacks:
+
+int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
+
+ parse_msg is called to determine the length of the next message
+ in the stream. The upper layer must implement this function. It
+ should parse the sk_buff as containing the headers for the
+ next application layer messages in the stream.
+
+ The skb->cb in the input skb is a struct strp_rx_msg. Only
+ the offset field is relevant in parse_msg and gives the offset
+ where the message starts in the skb.
+
+ The return values of this function are:
+
+ >0 : indicates length of successfully parsed message
+ 0 : indicates more data must be received to parse the message
+ -ESTRPIPE : current message should not be processed by the
+ kernel, return control of the socket to userspace which
+ can proceed to read the messages itself
+ other < 0 : Error is parsing, give control back to userspace
+ assuming that synchronization is lost and the stream
+ is unrecoverable (application expected to close TCP socket)
+
+ In the case that an error is returned (return value is less than
+ zero) the stream parser will set the error on TCP socket and wake
+ it up. If parse_msg returned -ESTRPIPE and the stream parser had
+ previously read some bytes for the current message, then the error
+ set on the attached socket is ENODATA since the stream is
+ unrecoverable in that case.
+
+void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
+
+ rcv_msg is called when a full message has been received and
+ is queued. The callee must consume the sk_buff; it can
+ call strp_pause to prevent any further messages from being
+ received in rcv_msg (see strp_pause below). This callback
+ must be set.
+
+ The skb->cb in the input skb is a struct strp_rx_msg. This
+ struct contains two fields: offset and full_len. Offset is
+ where the message starts in the skb, and full_len is the
+ the length of the message. skb->len - offset may be greater
+ then full_len since strparser does not trim the skb.
+
+int (*read_sock_done)(struct strparser *strp, int err);
+
+ read_sock_done is called when the stream parser is done reading
+ the TCP socket. The stream parser may read multiple messages
+ in a loop and this function allows cleanup to occur when existing
+ the loop. If the callback is not set (NULL in strp_init) a
+ default function is used.
+
+void (*abort_parser)(struct strparser *strp, int err);
+
+ This function is called when stream parser encounters an error
+ in parsing. The default function stops the stream parser for the
+ TCP socket and sets the error in the socket. The default function
+ can be changed by setting the callback to non-NULL in strp_init.
+
+Functions
+---------
+
+The upper layer calls strp_tcp_data_ready when data is ready on the lower
+socket for strparser to process. This should be called from a data_ready
+callback that is set on the socket.
+
+strp_stop is called to completely stop stream parser operations. This
+is called internally when the stream parser encounters an error, and
+it is called from the upper layer when unattaching a TCP socket.
+
+strp_done is called to unattach the stream parser from the TCP socket.
+This must be called after the stream processor has be stopped.
+
+strp_check_rcv is called to check for new messages on the socket. This
+is normally called at initialization of the a stream parser instance
+of after strp_unpause.
+
+Statistics
+----------
+
+Various counters are kept for each stream parser for a TCP socket.
+These are in the strp_stats structure. strp_aggr_stats is a convenience
+structure for accumulating statistics for multiple stream parser
+instances. save_strp_stats and aggregate_strp_stats are helper functions
+to save and aggregate statistics.
+
+Message assembly limits
+-----------------------
+
+The stream parser provide mechanisms to limit the resources consumed by
+message assembly.
+
+A timer is set when assembly starts for a new message. The message
+timeout is taken from rcvtime for the associated TCP socket. If the
+timer fires before assembly completes the stream parser is aborted
+and the ETIMEDOUT error is set on the TCP socket.
+
+Message length is limited to the receive buffer size of the associated
+TCP socket. If the length returned by parse_msg is greater than
+the socket buffer size then the stream parser is aborted with
+EMSGSIZE error set on the TCP socket. Note that this makes the
+maximum size of receive skbuffs for a socket with a stream parser
+to be 2*sk_rcvbuf of the TCP socket.
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index 31c39115834d..2bbac05ab9e2 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -283,15 +283,10 @@ be sent to the port netdev for processing by the bridge driver. The
bridge should not reflood the packet to the same ports the device flooded,
otherwise there will be duplicate packets on the wire.
-To avoid duplicate packets, the device/driver should mark a packet as already
-forwarded using skb->offload_fwd_mark. The same mark is set on the device
-ports in the domain using dev->offload_fwd_mark. If the skb->offload_fwd_mark
-is non-zero and matches the forwarding egress port's dev->skb_mark, the kernel
-will drop the skb right before transmit on the egress port, with the
-understanding that the device already forwarded the packet on same egress port.
-The driver can use switchdev_port_fwd_mark_set() to set a globally unique mark
-for port's dev->offload_fwd_mark, based on the port's parent ID (switch ID) and
-a group ifindex.
+To avoid duplicate packets, the switch driver should mark a packet as already
+forwarded by setting the skb->offload_fwd_mark bit. The bridge driver will mark
+the skb using the ingress bridge port's mark and prevent it from being forwarded
+through any bridge port with the same mark.
It is possible for the switch device to not handle flooding and push the
packets up to the bridge driver for flooding. This is not ideal as the number
@@ -319,30 +314,29 @@ the kernel, with the device doing the FIB lookup and forwarding. The device
does a longest prefix match (LPM) on FIB entries matching route prefix and
forwards the packet to the matching FIB entry's nexthop(s) egress ports.
-To program the device, the driver implements support for
-SWITCHDEV_OBJ_IPV[4|6]_FIB object using switchdev_port_obj_xxx ops.
-switchdev_port_obj_add is used for both adding a new FIB entry to the device,
-or modifying an existing entry on the device.
+To program the device, the driver has to register a FIB notifier handler
+using register_fib_notifier. The following events are available:
+FIB_EVENT_ENTRY_ADD: used for both adding a new FIB entry to the device,
+ or modifying an existing entry on the device.
+FIB_EVENT_ENTRY_DEL: used for removing a FIB entry
+FIB_EVENT_RULE_ADD, FIB_EVENT_RULE_DEL: used to propagate FIB rule changes
-XXX: Currently, only SWITCHDEV_OBJ_ID_IPV4_FIB objects are supported.
+FIB_EVENT_ENTRY_ADD and FIB_EVENT_ENTRY_DEL events pass:
-SWITCHDEV_OBJ_ID_IPV4_FIB object passes:
-
- struct switchdev_obj_ipv4_fib { /* IPV4_FIB */
+ struct fib_entry_notifier_info {
+ struct fib_notifier_info info; /* must be first */
u32 dst;
int dst_len;
struct fib_info *fi;
u8 tos;
u8 type;
- u32 nlflags;
u32 tb_id;
- } ipv4_fib;
+ u32 nlflags;
+ };
to add/modify/delete IPv4 dst/dest_len prefix on table tb_id. The *fi
structure holds details on the route and route's nexthops. *dev is one of the
-port netdevs mentioned in the routes next hop list. If the output port netdevs
-referenced in the route's nexthop list don't all have the same switch ID, the
-driver is not called to add/modify/delete the FIB entry.
+port netdevs mentioned in the route's next hop list.
Routes offloaded to the device are labeled with "offload" in the ip route
listing:
@@ -360,6 +354,8 @@ listing:
12.0.0.4 via 11.0.0.9 dev sw1p2 proto zebra metric 20 offload
192.168.0.0/24 dev eth0 proto kernel scope link src 192.168.0.15
+The "offload" flag is set in case at least one device offloads the FIB entry.
+
XXX: add/mod/del IPv6 FIB API
Nexthop Resolution
diff --git a/Documentation/perf/xgene-pmu.txt b/Documentation/perf/xgene-pmu.txt
new file mode 100644
index 000000000000..d7cff4454e5b
--- /dev/null
+++ b/Documentation/perf/xgene-pmu.txt
@@ -0,0 +1,48 @@
+APM X-Gene SoC Performance Monitoring Unit (PMU)
+================================================
+
+X-Gene SoC PMU consists of various independent system device PMUs such as
+L3 cache(s), I/O bridge(s), memory controller bridge(s) and memory
+controller(s). These PMU devices are loosely architected to follow the
+same model as the PMU for ARM cores. The PMUs share the same top level
+interrupt and status CSR region.
+
+PMU (perf) driver
+-----------------
+
+The xgene-pmu driver registers several perf PMU drivers. Each of the perf
+driver provides description of its available events and configuration options
+in sysfs, see /sys/devices/<l3cX/iobX/mcbX/mcX>/.
+
+The "format" directory describes format of the config (event ID),
+config1 (agent ID) fields of the perf_event_attr structure. The "events"
+directory provides configuration templates for all supported event types that
+can be used with perf tool. For example, "l3c0/bank-fifo-full/" is an
+equivalent of "l3c0/config=0x0b/".
+
+Most of the SoC PMU has a specific list of agent ID used for monitoring
+performance of a specific datapath. For example, agents of a L3 cache can be
+a specific CPU or an I/O bridge. Each PMU has a set of 2 registers capable of
+masking the agents from which the request come from. If the bit with
+the bit number corresponding to the agent is set, the event is counted only if
+it is caused by a request from that agent. Each agent ID bit is inversely mapped
+to a corresponding bit in "config1" field. By default, the event will be
+counted for all agent requests (config1 = 0x0). For all the supported agents of
+each PMU, please refer to APM X-Gene User Manual.
+
+Each perf driver also provides a "cpumask" sysfs attribute, which contains a
+single CPU ID of the processor which will be used to handle all the PMU events.
+
+Example for perf tool use:
+
+ / # perf list | grep -e l3c -e iob -e mcb -e mc
+ l3c0/ackq-full/ [Kernel PMU event]
+ <...>
+ mcb1/mcb-csw-stall/ [Kernel PMU event]
+
+ / # perf stat -a -e l3c0/read-miss/,mcb1/csw-write-request/ sleep 1
+
+ / # perf stat -a -e l3c0/read-miss,config1=0xfffffffffffffffe/ sleep 1
+
+The driver does not support sampling, therefore "perf record" will
+not work. Per-task (without "-a") perf sessions are not supported.
diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt
index 82dacc06e355..0c72588bd967 100644
--- a/Documentation/power/power_supply_class.txt
+++ b/Documentation/power/power_supply_class.txt
@@ -39,8 +39,8 @@ kind of power supply, and can process/present them to a user in consistent
manner. Results for different power supplies and machines are also directly
comparable.
-See drivers/power/ds2760_battery.c and drivers/power/pda_power.c for the
-example how to declare and handle attributes.
+See drivers/power/supply/ds2760_battery.c and drivers/power/supply/pda_power.c
+for the example how to declare and handle attributes.
Units
diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.txt
index 4202d1bc583c..6d9a2ed32cad 100644
--- a/Documentation/powerpc/cxlflash.txt
+++ b/Documentation/powerpc/cxlflash.txt
@@ -121,7 +121,7 @@ Block library API
below.
The block library can be found on GitHub:
- http://www.github.com/mikehollinger/ibmcapikv
+ http://github.com/open-power/capiflash
CXL Flash Driver IOCTLs
@@ -171,11 +171,30 @@ DK_CXLFLASH_ATTACH
destroyed, the tokens are to be considered stale and subsequent
usage will result in errors.
+ - A valid adapter file descriptor (fd2 >= 0) is only returned on
+ the initial attach for a context. Subsequent attaches to an
+ existing context (DK_CXLFLASH_ATTACH_REUSE_CONTEXT flag present)
+ do not provide the adapter file descriptor as it was previously
+ made known to the application.
+
- When a context is no longer needed, the user shall detach from
- the context via the DK_CXLFLASH_DETACH ioctl.
+ the context via the DK_CXLFLASH_DETACH ioctl. When this ioctl
+ returns with a valid adapter file descriptor and the return flag
+ DK_CXLFLASH_APP_CLOSE_ADAP_FD is present, the application _must_
+ close the adapter file descriptor following a successful detach.
+
+ - When this ioctl returns with a valid fd2 and the return flag
+ DK_CXLFLASH_APP_CLOSE_ADAP_FD is present, the application _must_
+ close fd2 in the following circumstances:
+
+ + Following a successful detach of the last user of the context
+ + Following a successful recovery on the context's original fd2
+ + In the child process of a fork(), following a clone ioctl,
+ on the fd2 associated with the source context
- - A close on fd2 will invalidate the tokens. This operation is not
- required by the user.
+ - At any time, a close on fd2 will invalidate the tokens. Applications
+ should exercise caution to only close fd2 when appropriate (outlined
+ in the previous bullet) to avoid premature loss of I/O.
DK_CXLFLASH_USER_DIRECT
-----------------------
@@ -254,6 +273,10 @@ DK_CXLFLASH_DETACH
success, all "tokens" which had been provided to the user from the
DK_CXLFLASH_ATTACH onward are no longer valid.
+ When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
+ attach, the application _must_ close the fd2 associated with the context
+ following the detach of the final user of the context.
+
DK_CXLFLASH_VLUN_CLONE
----------------------
This ioctl is responsible for cloning a previously created
@@ -261,7 +284,7 @@ DK_CXLFLASH_VLUN_CLONE
support maintaining user space access to storage after a process
forks. Upon success, the child process (which invoked the ioctl)
will have access to the same LUNs via the same resource handle(s)
- and fd2 as the parent, but under a different context.
+ as the parent, but under a different context.
Context sharing across processes is not supported with CXL and
therefore each fork must be met with establishing a new context
@@ -275,6 +298,12 @@ DK_CXLFLASH_VLUN_CLONE
translation tables are copied from the parent context to the child's
and then synced with the AFU.
+ When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
+ attach, the application _must_ close the fd2 associated with the source
+ context (still resident/accessible in the parent process) following the
+ clone. This is to avoid a stale entry in the file descriptor table of the
+ child process.
+
DK_CXLFLASH_VERIFY
------------------
This ioctl is used to detect various changes such as the capacity of
@@ -309,6 +338,11 @@ DK_CXLFLASH_RECOVER_AFU
at which time the context/resources they held will be freed as part of
the release fop.
+ When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
+ attach, the application _must_ unmap and close the fd2 associated with the
+ original context following this ioctl returning success and indicating that
+ the context was recovered (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET).
+
DK_CXLFLASH_MANAGE_LUN
----------------------
This ioctl is used to switch a LUN from a mode where it is available
diff --git a/Documentation/powerpc/syscall64-abi.txt b/Documentation/powerpc/syscall64-abi.txt
new file mode 100644
index 000000000000..fa716a0d88bd
--- /dev/null
+++ b/Documentation/powerpc/syscall64-abi.txt
@@ -0,0 +1,105 @@
+===============================================
+Power Architecture 64-bit Linux system call ABI
+===============================================
+
+syscall
+=======
+
+syscall calling sequence[*] matches the Power Architecture 64-bit ELF ABI
+specification C function calling sequence, including register preservation
+rules, with the following differences.
+
+[*] Some syscalls (typically low-level management functions) may have
+ different calling sequences (e.g., rt_sigreturn).
+
+Parameters and return value
+---------------------------
+The system call number is specified in r0.
+
+There is a maximum of 6 integer parameters to a syscall, passed in r3-r8.
+
+Both a return value and a return error code are returned. cr0.SO is the return
+error code, and r3 is the return value or error code. When cr0.SO is clear,
+the syscall succeeded and r3 is the return value. When cr0.SO is set, the
+syscall failed and r3 is the error code that generally corresponds to errno.
+
+Stack
+-----
+System calls do not modify the caller's stack frame. For example, the caller's
+stack frame LR and CR save fields are not used.
+
+Register preservation rules
+---------------------------
+Register preservation rules match the ELF ABI calling sequence with the
+following differences:
+
+r0: Volatile. (System call number.)
+r3: Volatile. (Parameter 1, and return value.)
+r4-r8: Volatile. (Parameters 2-6.)
+cr0: Volatile (cr0.SO is the return error condition)
+cr1, cr5-7: Nonvolatile.
+lr: Nonvolatile.
+
+All floating point and vector data registers as well as control and status
+registers are nonvolatile.
+
+Invocation
+----------
+The syscall is performed with the sc instruction, and returns with execution
+continuing at the instruction following the sc instruction.
+
+Transactional Memory
+--------------------
+Syscall behavior can change if the processor is in transactional or suspended
+transaction state, and the syscall can affect the behavior of the transaction.
+
+If the processor is in suspended state when a syscall is made, the syscall
+will be performed as normal, and will return as normal. The syscall will be
+performed in suspended state, so its side effects will be persistent according
+to the usual transactional memory semantics. A syscall may or may not result
+in the transaction being doomed by hardware.
+
+If the processor is in transactional state when a syscall is made, then the
+behavior depends on the presence of PPC_FEATURE2_HTM_NOSC in the AT_HWCAP2 ELF
+auxiliary vector.
+
+- If present, which is the case for newer kernels, then the syscall will not
+ be performed and the transaction will be doomed by the kernel with the
+ failure code TM_CAUSE_SYSCALL | TM_CAUSE_PERSISTENT in the TEXASR SPR.
+
+- If not present (older kernels), then the kernel will suspend the
+ transactional state and the syscall will proceed as in the case of a
+ suspended state syscall, and will resume the transactional state before
+ returning to the caller. This case is not well defined or supported, so this
+ behavior should not be relied upon.
+
+
+vsyscall
+========
+
+vsyscall calling sequence matches the syscall calling sequence, with the
+following differences. Some vsyscalls may have different calling sequences.
+
+Parameters and return value
+---------------------------
+r0 is not used as an input. The vsyscall is selected by its address.
+
+Stack
+-----
+The vsyscall may or may not use the caller's stack frame save areas.
+
+Register preservation rules
+---------------------------
+r0: Volatile.
+cr1, cr5-7: Volatile.
+lr: Volatile.
+
+Invocation
+----------
+The vsyscall is performed with a branch-with-link instruction to the vsyscall
+function address.
+
+Transactional Memory
+--------------------
+vsyscalls will run in the same transactional state as the caller. A vsyscall
+may or may not result in the transaction being doomed by hardware.
diff --git a/Documentation/remoteproc.txt b/Documentation/remoteproc.txt
index ef0219fa4bb4..f07597482351 100644
--- a/Documentation/remoteproc.txt
+++ b/Documentation/remoteproc.txt
@@ -101,9 +101,9 @@ int dummy_rproc_example(struct rproc *my_rproc)
On success, the new rproc is returned, and on failure, NULL.
Note: _never_ directly deallocate @rproc, even if it was not registered
- yet. Instead, when you need to unroll rproc_alloc(), use rproc_put().
+ yet. Instead, when you need to unroll rproc_alloc(), use rproc_free().
- void rproc_put(struct rproc *rproc)
+ void rproc_free(struct rproc *rproc)
- Free an rproc handle that was allocated by rproc_alloc.
This function essentially unrolls rproc_alloc(), by decrementing the
rproc's refcount. It doesn't directly free rproc; that would happen
@@ -131,7 +131,7 @@ int dummy_rproc_example(struct rproc *my_rproc)
has completed successfully.
After rproc_del() returns, @rproc is still valid, and its
- last refcount should be decremented by calling rproc_put().
+ last refcount should be decremented by calling rproc_free().
Returns 0 on success and -EINVAL if @rproc isn't valid.
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index c4b978a72f78..bb4a76f823e1 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -64,8 +64,6 @@ hpsa.txt
- HP Smart Array Controller SCSI driver.
hptiop.txt
- HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
-in2000.txt
- - info on in2000 driver
libsas.txt
- Serial Attached SCSI management layer.
link_power_management_policy.txt
diff --git a/Documentation/scsi/dtc3x80.txt b/Documentation/scsi/dtc3x80.txt
deleted file mode 100644
index 1d7af9f9a8ed..000000000000
--- a/Documentation/scsi/dtc3x80.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-README file for the Linux DTC3180/3280 scsi driver.
-by Ray Van Tassle (rayvt@comm.mot.com) March 1996
-Based on the generic & core NCR5380 code by Drew Eckhard
-
-SCSI device driver for the DTC 3180/3280.
-Data Technology Corp---a division of Qume.
-
-The 3280 has a standard floppy interface.
-
-The 3180 does not. Otherwise, they are identical.
-
-The DTC3x80 does not support DMA but it does have Pseudo-DMA which is
-supported by the driver.
-
-Its DTC406 scsi chip is supposedly compatible with the NCR 53C400.
-It is memory mapped, uses an IRQ, but no dma or io-port. There is
-internal DMA, between SCSI bus and an on-chip 128-byte buffer. Double
-buffering is done automagically by the chip. Data is transferred
-between the on-chip buffer and CPU/RAM via memory moves.
-
-The driver detects the possible memory addresses (jumper selectable):
- CC00, DC00, C800, and D800
-The possible IRQ's (jumper selectable) are:
- IRQ 10, 11, 12, 15
-Parity is supported by the chip, but not by this driver.
-Information can be obtained from /proc/scsi/dtc3c80/N.
-
-Note on interrupts:
-
-The documentation says that it can be set to interrupt whenever the
-on-chip buffer needs CPU attention. I couldn't get this to work. So
-the driver polls for data-ready in the pseudo-DMA transfer routine.
-The interrupt support routines in the NCR3280.c core modules handle
-scsi disconnect/reconnect, and this (mostly) works. However..... I
-have tested it with 4 totally different hard drives (both SCSI-1 and
-SCSI-2), and one CDROM drive. Interrupts works great for all but one
-specific hard drive. For this one, the driver will eventually hang in
-the transfer state. I have tested with: "dd bs=4k count=2k
-of=/dev/null if=/dev/sdb". It reads ok for a while, then hangs.
-After beating my head against this for a couple of weeks, getting
-nowhere, I give up. So.....This driver does NOT use interrupts, even
-if you have the card jumpered to an IRQ. Probably nobody will ever
-care.
diff --git a/Documentation/scsi/in2000.txt b/Documentation/scsi/in2000.txt
deleted file mode 100644
index c3e2a90475d2..000000000000
--- a/Documentation/scsi/in2000.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
-UPDATE NEWS: version 1.33 - 26 Aug 98
-
- Interrupt management in this driver has become, over
- time, increasingly odd and difficult to explain - this
- has been mostly due to my own mental inadequacies. In
- recent kernels, it has failed to function at all when
- compiled for SMP. I've fixed that problem, and after
- taking a fresh look at interrupts in general, greatly
- reduced the number of places where they're fiddled
- with. Done some heavy testing and it looks very good.
- The driver now makes use of the __initfunc() and
- __initdata macros to save about 4k of kernel memory.
- Once again, the same code works for both 2.0.xx and
- 2.1.xx kernels.
-
-UPDATE NEWS: version 1.32 - 28 Mar 98
-
- Removed the check for legal IN2000 hardware versions:
- It appears that the driver works fine with serial
- EPROMs (the 8-pin chip that defines hardware rev) as
- old as 2.1, so we'll assume that all cards are OK.
-
-UPDATE NEWS: version 1.31 - 6 Jul 97
-
- Fixed a bug that caused incorrect SCSI status bytes to be
- returned from commands sent to LUNs greater than 0. This
- means that CDROM changers work now! Fixed a bug in the
- handling of command-line arguments when loaded as a module.
- Also put all the header data in in2000.h where it belongs.
- There are no longer any differences between this driver in
- the 2.1.xx source tree and the 2.0.xx tree, as of 2.0.31
- and 2.1.45 (or is it .46?) - this makes things much easier
- for me...
-
-UPDATE NEWS: version 1.30 - 14 Oct 96
-
- Fixed a bug in the code that sets the transfer direction
- bit (DESTID_DPD in the WD_DESTINATION_ID register). There
- are quite a few SCSI commands that do a write-to-device;
- now we deal with all of them correctly. Thanks to Joerg
- Dorchain for catching this one.
-
-UPDATE NEWS: version 1.29 - 24 Sep 96
-
- The memory-mapped hardware on the card is now accessed via
- the 'readb()' and 'readl()' macros - required by the new
- memory management scheme in the 2.1.x kernel series.
- As suggested by Andries Brouwer, 'bios_param()' no longer
- forces an artificial 1023 track limit on drives. Also
- removed some kludge-code left over from struggles with
- older (buggy) compilers.
-
-UPDATE NEWS: version 1.28 - 07 May 96
-
- Tightened up the "interrupts enabled/disabled" discipline
- in 'in2000_queuecommand()' and maybe 1 or 2 other places.
- I _think_ it may have been a little too lax, causing an
- occasional crash during full moon. A fully functional
- /proc interface is now in place - if you want to play
- with it, start by doing 'cat /proc/scsi/in2000/0'. You
- can also use it to change a few run-time parameters on
- the fly, but it's mostly for debugging. The curious
- should take a good look at 'in2000_proc_info()' in the
- in2000.c file to get an understanding of what it's all
- about; I figure that people who are really into it will
- want to add features suited to their own needs...
- Also, sync is now DISABLED by default.
-
-UPDATE NEWS: version 1.27 - 10 Apr 96
-
- Fixed a well-hidden bug in the adaptive-disconnect code
- that would show up every now and then during extreme
- heavy loads involving 2 or more simultaneously active
- devices. Thanks to Joe Mack for keeping my nose to the
- grindstone on this one.
-
-UPDATE NEWS: version 1.26 - 07 Mar 96
-
- 1.25 had a nasty bug that bit people with swap partitions
- and tape drives. Also, in my attempt to guess my way
- through Intel assembly language, I made an error in the
- inline code for IO writes. Made a few other changes and
- repairs - this version (fingers crossed) should work well.
-
-UPDATE NEWS: version 1.25 - 05 Mar 96
-
- Kernel 1.3.70 interrupt mods added; old kernels still OK.
- Big help from Bill Earnest and David Willmore on speed
- testing and optimizing: I think there's a real improvement
- in this area.
- New! User-friendly command-line interface for LILO and
- module loading - the old method is gone, so you'll need
- to read the comments for 'setup_strings' near the top
- of in2000.c. For people with CDROM's or other devices
- that have a tough time with sync negotiation, you can
- now selectively disable sync on individual devices -
- search for the 'nosync' keyword in the command-line
- comments. Some of you disable the BIOS on the card, which
- caused the auto-detect function to fail; there is now a
- command-line option to force detection of a ROM-less card.
-
-UPDATE NEWS: version 1.24a - 24 Feb 96
-
- There was a bug in the synchronous transfer code. Only
- a few people downloaded before I caught it - could have
- been worse.
-
-UPDATE NEWS: version 1.24 - 23 Feb 96
-
- Lots of good changes. Advice from Bill Earnest resulted
- in much better detection of cards, more efficient usage
- of the fifo, and (hopefully) faster data transfers. The
- jury is still out on speed - I hope it's improved some.
- One nifty new feature is a cool way of doing disconnect/
- reselect. The driver defaults to what I'm calling
- 'adaptive disconnect' - meaning that each command is
- evaluated individually as to whether or not it should be
- run with the option to disconnect/reselect (if the device
- chooses), or as a "SCSI-bus-hog". When several devices
- are operating simultaneously, disconnects are usually an
- advantage. In a single device system, or if only 1 device
- is being accessed, transfers usually go faster if disconnects
- are not allowed.
-
-
-
-The default arguments (you get these when you don't give an 'in2000'
-command-line argument, or you give a blank argument) will cause
-the driver to do adaptive disconnect, synchronous transfers, and a
-minimum of debug messages. If you want to fool with the options,
-search for 'setup_strings' near the top of the in2000.c file and
-check the 'hostdata->args' section in in2000.h - but be warned! Not
-everything is working yet (some things will never work, probably).
-I believe that disabling disconnects (DIS_NEVER) will allow you
-to choose a LEVEL2 value higher than 'L2_BASIC', but I haven't
-spent a lot of time testing this. You might try 'ENABLE_CLUSTERING'
-to see what happens: my tests showed little difference either way.
-There's also a define called 'DEFAULT_SX_PER'; this sets the data
-transfer speed for the asynchronous mode. I've put it at 500 ns
-despite the fact that the card could handle settings of 376 or
-252, because higher speeds may be a problem with poor quality
-cables or improper termination; 500 ns is a compromise. You can
-choose your own default through the command-line with the
-'period' keyword.
-
-
-------------------------------------------------
-*********** DIP switch settings **************
-------------------------------------------------
-
- sw1-1 sw1-2 BIOS address (hex)
- -----------------------------------------
- off off C8000 - CBFF0
- on off D8000 - DBFF0
- off on D0000 - D3FF0
- on on BIOS disabled
-
- sw1-3 sw1-4 IO port address (hex)
- ------------------------------------
- off off 220 - 22F
- on off 200 - 20F
- off on 110 - 11F
- on on 100 - 10F
-
- sw1-5 sw1-6 sw1-7 Interrupt
- ------------------------------
- off off off 15
- off on off 14
- off off on 11
- off on on 10
- on - - disabled
-
- sw1-8 function depends on BIOS version. In earlier versions this
- controlled synchronous data transfer support for MSDOS:
- off = disabled
- on = enabled
- In later ROMs (starting with 01.3 in April 1994) sw1-8 controls
- the "greater than 2 disk drive" feature that first appeared in
- MSDOS 5.0 (ignored by Linux):
- off = 2 drives maximum
- on = 7 drives maximum
-
- sw1-9 Floppy controller
- --------------------------
- off disabled
- on enabled
-
-------------------------------------------------
-
- I should mention that Drew Eckhardt's 'Generic NCR5380' sources
- were my main inspiration, with lots of reference to the IN2000
- driver currently distributed in the kernel source. I also owe
- much to a driver written by Hamish Macdonald for Linux-m68k(!).
- And to Eric Wright for being an ALPHA guinea pig. And to Bill
- Earnest for 2 tons of great input and information. And to David
- Willmore for extensive 'bonnie' testing. And to Joe Mack for
- continual testing and feedback.
-
-
- John Shifflett jshiffle@netcom.com
-
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt
index 1241ac11edb1..8e66dafa41e1 100644
--- a/Documentation/scsi/scsi-parameters.txt
+++ b/Documentation/scsi/scsi-parameters.txt
@@ -34,9 +34,6 @@ parameters may be changed at runtime by the command
See drivers/scsi/BusLogic.c, comment before function
BusLogic_ParseDriverOptions().
- dtc3181e= [HW,SCSI]
- See Documentation/scsi/g_NCR5380.txt.
-
eata= [HW,SCSI]
fdomain= [HW,SCSI]
@@ -47,9 +44,6 @@ parameters may be changed at runtime by the command
gvp11= [HW,SCSI]
- in2000= [HW,SCSI]
- See header of drivers/scsi/in2000.c.
-
ips= [HW,SCSI] Adaptec / IBM ServeRAID controller
See header of drivers/scsi/ips.c.
@@ -79,15 +73,10 @@ parameters may be changed at runtime by the command
ncr53c8xx= [HW,SCSI]
- nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects.
-
osst= [HW,SCSI] SCSI Tape Driver
Format: <buffer_size>,<write_threshold>
See also Documentation/scsi/st.txt.
- pas16= [HW,SCSI]
- See header of drivers/scsi/pas16.c.
-
scsi_debug_*= [SCSI]
See drivers/scsi/scsi_debug.c.
@@ -121,18 +110,9 @@ parameters may be changed at runtime by the command
sym53c416= [HW,SCSI]
See header of drivers/scsi/sym53c416.c.
- t128= [HW,SCSI]
- See header of drivers/scsi/t128.c.
-
tmscsim= [HW,SCSI]
See comment before function dc390_setup() in
drivers/scsi/tmscsim.c.
- u14-34f= [HW,SCSI] UltraStor 14F/34F SCSI host adapter
- See header of drivers/scsi/u14-34f.c.
-
wd33c93= [HW,SCSI]
See header of drivers/scsi/wd33c93.c.
-
- wd7000= [HW,SCSI]
- See header of drivers/scsi/wd7000.c.
diff --git a/Documentation/scsi/smartpqi.txt b/Documentation/scsi/smartpqi.txt
new file mode 100644
index 000000000000..ab377d9e5d1b
--- /dev/null
+++ b/Documentation/scsi/smartpqi.txt
@@ -0,0 +1,80 @@
+
+SMARTPQI - Microsemi Smart PQI Driver
+-----------------------------------------
+
+This file describes the smartpqi SCSI driver for Microsemi
+(http://www.microsemi.com) PQI controllers. The smartpqi driver
+is the next generation SCSI driver for Microsemi Corp. The smartpqi
+driver is the first SCSI driver to implement the PQI queuing model.
+
+The smartpqi driver will replace the aacraid driver for Adaptec Series 9
+controllers. Customers running an older kernel (Pre-4.9) using an Adaptec
+Series 9 controller will have to configure the smartpqi driver or their
+volumes will not be added to the OS.
+
+For Microsemi smartpqi controller support, enable the smartpqi driver
+when configuring the kernel.
+
+For more information on the PQI Queuing Interface, please see:
+http://www.t10.org/drafts.htm
+http://www.t10.org/members/w_pqi2.htm
+
+Supported devices:
+------------------
+<Controller names to be added as they become publically available.>
+
+smartpqi specific entries in /sys
+-----------------------------
+
+ smartpqi host attributes:
+ -------------------------
+ /sys/class/scsi_host/host*/rescan
+ /sys/class/scsi_host/host*/version
+
+ The host rescan attribute is a write only attribute. Writing to this
+ attribute will trigger the driver to scan for new, changed, or removed
+ devices and notify the SCSI mid-layer of any changes detected.
+
+ The version attribute is read-only and will return the driver version
+ and the controller firmware version.
+ For example:
+ driver: 0.9.13-370
+ firmware: 0.01-522
+
+ smartpqi sas device attributes
+ ------------------------------
+ HBA devices are added to the SAS transport layer. These attributes are
+ automatically added by the SAS transport layer.
+
+ /sys/class/sas_device/end_device-X:X/sas_address
+ /sys/class/sas_device/end_device-X:X/enclosure_identifier
+ /sys/class/sas_device/end_device-X:X/scsi_target_id
+
+smartpqi specific ioctls:
+-------------------------
+
+ For compatibility with applications written for the cciss protocol.
+
+ CCISS_DEREGDISK
+ CCISS_REGNEWDISK
+ CCISS_REGNEWD
+
+ The above three ioctls all do exactly the same thing, which is to cause the driver
+ to rescan for new devices. This does exactly the same thing as writing to the
+ smartpqi specific host "rescan" attribute.
+
+ CCISS_GETPCIINFO
+
+ Returns PCI domain, bus, device and function and "board ID" (PCI subsystem ID).
+
+ CCISS_GETDRIVVER
+
+ Returns driver version in three bytes encoded as:
+ (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | (DRIVER_RELEASE << 16) | DRIVER_REVISION;
+
+ CCISS_PASSTHRU
+
+ Allows "BMIC" and "CISS" commands to be passed through to the Smart Storage Array.
+ These are used extensively by the SSA Array Configuration Utility, SNMP storage
+ agents, etc.
+
diff --git a/Documentation/serial/serial-rs485.txt b/Documentation/serial/serial-rs485.txt
index 2253b8b45a74..389fcd4759e9 100644
--- a/Documentation/serial/serial-rs485.txt
+++ b/Documentation/serial/serial-rs485.txt
@@ -45,9 +45,8 @@
#include <linux/serial.h>
- /* RS485 ioctls: */
- #define TIOCGRS485 0x542E
- #define TIOCSRS485 0x542F
+ /* Include definition for RS485 ioctls: TIOCGRS485 and TIOCSRS485 */
+ #include <sys/ioctl.h>
/* Open your specific device (e.g., /dev/mydevice): */
int fd = open ("/dev/mydevice", O_RDWR);
diff --git a/Documentation/sphinx-static/theme_overrides.css b/Documentation/sphinx-static/theme_overrides.css
index e88461c4c1e6..d5764a4de5a2 100644
--- a/Documentation/sphinx-static/theme_overrides.css
+++ b/Documentation/sphinx-static/theme_overrides.css
@@ -42,6 +42,20 @@
caption a.headerlink { opacity: 0; }
caption a.headerlink:hover { opacity: 1; }
+ /* Menu selection and keystrokes */
+
+ span.menuselection {
+ color: blue;
+ font-family: "Courier New", Courier, monospace
+ }
+
+ code.kbd, code.kbd span {
+ color: white;
+ background-color: darkblue;
+ font-weight: bold;
+ font-family: "Courier New", Courier, monospace
+ }
+
/* inline literal: drop the borderbox, padding and red color */
code, .rst-content tt, .rst-content code {
@@ -55,5 +69,4 @@
.rst-content tt.literal,.rst-content tt.literal,.rst-content code.literal {
color: inherit;
}
-
}
diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py
new file mode 100644
index 000000000000..df0419c62096
--- /dev/null
+++ b/Documentation/sphinx/cdomain.py
@@ -0,0 +1,165 @@
+# -*- coding: utf-8; mode: python -*-
+# pylint: disable=W0141,C0113,C0103,C0325
+u"""
+ cdomain
+ ~~~~~~~
+
+ Replacement for the sphinx c-domain.
+
+ :copyright: Copyright (C) 2016 Markus Heiser
+ :license: GPL Version 2, June 1991 see Linux/COPYING for details.
+
+ List of customizations:
+
+ * Moved the *duplicate C object description* warnings for function
+ declarations in the nitpicky mode. See Sphinx documentation for
+ the config values for ``nitpick`` and ``nitpick_ignore``.
+
+ * Add option 'name' to the "c:function:" directive. With option 'name' the
+ ref-name of a function can be modified. E.g.::
+
+ .. c:function:: int ioctl( int fd, int request )
+ :name: VIDIOC_LOG_STATUS
+
+ The func-name (e.g. ioctl) remains in the output but the ref-name changed
+ from 'ioctl' to 'VIDIOC_LOG_STATUS'. The function is referenced by::
+
+ * :c:func:`VIDIOC_LOG_STATUS` or
+ * :any:`VIDIOC_LOG_STATUS` (``:any:`` needs sphinx 1.3)
+
+ * Handle signatures of function-like macros well. Don't try to deduce
+ arguments types of function-like macros.
+
+"""
+
+from docutils import nodes
+from docutils.parsers.rst import directives
+
+import sphinx
+from sphinx import addnodes
+from sphinx.domains.c import c_funcptr_sig_re, c_sig_re
+from sphinx.domains.c import CObject as Base_CObject
+from sphinx.domains.c import CDomain as Base_CDomain
+
+__version__ = '1.0'
+
+# Get Sphinx version
+major, minor, patch = map(int, sphinx.__version__.split("."))
+
+def setup(app):
+
+ app.override_domain(CDomain)
+
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
+
+class CObject(Base_CObject):
+
+ """
+ Description of a C language object.
+ """
+ option_spec = {
+ "name" : directives.unchanged
+ }
+
+ def handle_func_like_macro(self, sig, signode):
+ u"""Handles signatures of function-like macros.
+
+ If the objtype is 'function' and the the signature ``sig`` is a
+ function-like macro, the name of the macro is returned. Otherwise
+ ``False`` is returned. """
+
+ if not self.objtype == 'function':
+ return False
+
+ m = c_funcptr_sig_re.match(sig)
+ if m is None:
+ m = c_sig_re.match(sig)
+ if m is None:
+ raise ValueError('no match')
+
+ rettype, fullname, arglist, _const = m.groups()
+ arglist = arglist.strip()
+ if rettype or not arglist:
+ return False
+
+ arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
+ arglist = [a.strip() for a in arglist.split(",")]
+
+ # has the first argument a type?
+ if len(arglist[0].split(" ")) > 1:
+ return False
+
+ # This is a function-like macro, it's arguments are typeless!
+ signode += addnodes.desc_name(fullname, fullname)
+ paramlist = addnodes.desc_parameterlist()
+ signode += paramlist
+
+ for argname in arglist:
+ param = addnodes.desc_parameter('', '', noemph=True)
+ # separate by non-breaking space in the output
+ param += nodes.emphasis(argname, argname)
+ paramlist += param
+
+ return fullname
+
+ def handle_signature(self, sig, signode):
+ """Transform a C signature into RST nodes."""
+
+ fullname = self.handle_func_like_macro(sig, signode)
+ if not fullname:
+ fullname = super(CObject, self).handle_signature(sig, signode)
+
+ if "name" in self.options:
+ if self.objtype == 'function':
+ fullname = self.options["name"]
+ else:
+ # FIXME: handle :name: value of other declaration types?
+ pass
+ return fullname
+
+ def add_target_and_index(self, name, sig, signode):
+ # for C API items we add a prefix since names are usually not qualified
+ # by a module name and so easily clash with e.g. section titles
+ targetname = 'c.' + name
+ if targetname not in self.state.document.ids:
+ signode['names'].append(targetname)
+ signode['ids'].append(targetname)
+ signode['first'] = (not self.names)
+ self.state.document.note_explicit_target(signode)
+ inv = self.env.domaindata['c']['objects']
+ if (name in inv and self.env.config.nitpicky):
+ if self.objtype == 'function':
+ if ('c:func', name) not in self.env.config.nitpick_ignore:
+ self.state_machine.reporter.warning(
+ 'duplicate C object description of %s, ' % name +
+ 'other instance in ' + self.env.doc2path(inv[name][0]),
+ line=self.lineno)
+ inv[name] = (self.env.docname, self.objtype)
+
+ indextext = self.get_index_text(name)
+ if indextext:
+ if major == 1 and minor < 4:
+ # indexnode's tuple changed in 1.4
+ # https://github.com/sphinx-doc/sphinx/commit/e6a5a3a92e938fcd75866b4227db9e0524d58f7c
+ self.indexnode['entries'].append(
+ ('single', indextext, targetname, ''))
+ else:
+ self.indexnode['entries'].append(
+ ('single', indextext, targetname, '', None))
+
+class CDomain(Base_CDomain):
+
+ """C language domain."""
+ name = 'c'
+ label = 'C'
+ directives = {
+ 'function': CObject,
+ 'member': CObject,
+ 'macro': CObject,
+ 'type': CObject,
+ 'var': CObject,
+ }
diff --git a/Documentation/sphinx/kernel-doc.py b/Documentation/sphinx/kernel-doc.py
index f6920c0af6ee..d15e07f36881 100644
--- a/Documentation/sphinx/kernel-doc.py
+++ b/Documentation/sphinx/kernel-doc.py
@@ -39,6 +39,8 @@ from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
from sphinx.ext.autodoc import AutodocReporter
+__version__ = '1.0'
+
class KernelDocDirective(Directive):
"""Extract kernel-doc comments from the specified file"""
required_argument = 1
@@ -139,3 +141,9 @@ def setup(app):
app.add_config_value('kerneldoc_verbosity', 1, 'env')
app.add_directive('kernel-doc', KernelDocDirective)
+
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
index db5738238733..f523aa68a36b 100755
--- a/Documentation/sphinx/kernel_include.py
+++ b/Documentation/sphinx/kernel_include.py
@@ -39,11 +39,18 @@ from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
from docutils.parsers.rst.directives.misc import Include
+__version__ = '1.0'
+
# ==============================================================================
def setup(app):
# ==============================================================================
app.add_directive("kernel-include", KernelInclude)
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
# ==============================================================================
class KernelInclude(Include):
diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py
new file mode 100644
index 000000000000..301a21aa4f63
--- /dev/null
+++ b/Documentation/sphinx/load_config.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8; mode: python -*-
+# pylint: disable=R0903, C0330, R0914, R0912, E0401
+
+import os
+import sys
+from sphinx.util.pycompat import execfile_
+
+# ------------------------------------------------------------------------------
+def loadConfig(namespace):
+# ------------------------------------------------------------------------------
+
+ u"""Load an additional configuration file into *namespace*.
+
+ The name of the configuration file is taken from the environment
+ ``SPHINX_CONF``. The external configuration file extends (or overwrites) the
+ configuration values from the origin ``conf.py``. With this you are able to
+ maintain *build themes*. """
+
+ config_file = os.environ.get("SPHINX_CONF", None)
+ if (config_file is not None
+ and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ):
+ config_file = os.path.abspath(config_file)
+
+ if os.path.isfile(config_file):
+ sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
+ config = namespace.copy()
+ config['__file__'] = config_file
+ execfile_(config_file, config)
+ del config['__file__']
+ namespace.update(config)
+ else:
+ sys.stderr.write("WARNING: additional sphinx-config not found: %s\n" % config_file)
diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl
index 34bd9e2630b0..74089b0da798 100755
--- a/Documentation/sphinx/parse-headers.pl
+++ b/Documentation/sphinx/parse-headers.pl
@@ -220,7 +220,7 @@ $data =~ s/\n\s+\n/\n\n/g;
#
# Add escape codes for special characters
#
-$data =~ s,([\_\`\*\<\>\&\\\\:\/\|]),\\$1,g;
+$data =~ s,([\_\`\*\<\>\&\\\\:\/\|\%\$\#\{\}\~\^]),\\$1,g;
$data =~ s,DEPRECATED,**DEPRECATED**,g;
diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py
index 26db852e3c74..55f275793028 100644..100755
--- a/Documentation/sphinx/rstFlatTable.py
+++ b/Documentation/sphinx/rstFlatTable.py
@@ -73,6 +73,12 @@ def setup(app):
roles.register_local_role('cspan', c_span)
roles.register_local_role('rspan', r_span)
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
+
# ==============================================================================
def c_span(name, rawtext, text, lineno, inliner, options=None, content=None):
# ==============================================================================
diff --git a/Documentation/stable_api_nonsense.txt b/Documentation/stable_api_nonsense.txt
index db3be892afb2..24f5aeecee91 100644
--- a/Documentation/stable_api_nonsense.txt
+++ b/Documentation/stable_api_nonsense.txt
@@ -1,17 +1,26 @@
+.. _stable_api_nonsense:
+
The Linux Kernel Driver Interface
+==================================
+
(all of your questions answered and then some)
Greg Kroah-Hartman <greg@kroah.com>
-This is being written to try to explain why Linux does not have a binary
-kernel interface, nor does it have a stable kernel interface. Please
-realize that this article describes the _in kernel_ interfaces, not the
-kernel to userspace interfaces. The kernel to userspace interface is
-the one that application programs use, the syscall interface. That
-interface is _very_ stable over time, and will not break. I have old
-programs that were built on a pre 0.9something kernel that still work
-just fine on the latest 2.6 kernel release. That interface is the one
-that users and application programmers can count on being stable.
+This is being written to try to explain why Linux **does not have a binary
+kernel interface, nor does it have a stable kernel interface**.
+
+.. note::
+
+ Please realize that this article describes the **in kernel** interfaces, not
+ the kernel to userspace interfaces.
+
+ The kernel to userspace interface is the one that application programs use,
+ the syscall interface. That interface is **very** stable over time, and
+ will not break. I have old programs that were built on a pre 0.9something
+ kernel that still work just fine on the latest 2.6 kernel release.
+ That interface is the one that users and application programmers can count
+ on being stable.
Executive Summary
@@ -33,7 +42,7 @@ to worry about the in-kernel interfaces changing. For the majority of
the world, they neither see this interface, nor do they care about it at
all.
-First off, I'm not going to address _any_ legal issues about closed
+First off, I'm not going to address **any** legal issues about closed
source, hidden source, binary blobs, source wrappers, or any other term
that describes kernel drivers that do not have their source code
released under the GPL. Please consult a lawyer if you have any legal
@@ -51,19 +60,23 @@ Binary Kernel Interface
Assuming that we had a stable kernel source interface for the kernel, a
binary interface would naturally happen too, right? Wrong. Please
consider the following facts about the Linux kernel:
+
- Depending on the version of the C compiler you use, different kernel
data structures will contain different alignment of structures, and
possibly include different functions in different ways (putting
functions inline or not.) The individual function organization
isn't that important, but the different data structure padding is
very important.
+
- Depending on what kernel build options you select, a wide range of
different things can be assumed by the kernel:
+
- different structures can contain different fields
- Some functions may not be implemented at all, (i.e. some locks
compile away to nothing for non-SMP builds.)
- Memory within the kernel can be aligned in different ways,
depending on the build options.
+
- Linux runs on a wide range of different processor architectures.
There is no way that binary drivers from one architecture will run
on another architecture properly.
@@ -105,6 +118,7 @@ As a specific examples of this, the in-kernel USB interfaces have
undergone at least three different reworks over the lifetime of this
subsystem. These reworks were done to address a number of different
issues:
+
- A change from a synchronous model of data streams to an asynchronous
one. This reduced the complexity of a number of drivers and
increased the throughput of all USB drivers such that we are now
@@ -166,6 +180,7 @@ very little effort on your part.
The very good side effects of having your driver in the main kernel tree
are:
+
- The quality of the driver will rise as the maintenance costs (to the
original developer) will decrease.
- Other developers will add features to your driver.
@@ -175,7 +190,7 @@ are:
changes require it.
- The driver automatically gets shipped in all Linux distributions
without having to ask the distros to add it.
-
+
As Linux supports a larger number of different devices "out of the box"
than any other operating system, and it supports these devices on more
different processor architectures than any other operating system, this
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index ffd4575ec9f2..4d82e31b7958 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -1,4 +1,7 @@
-Everything you ever wanted to know about Linux -stable releases.
+.. _stable_kernel_rules:
+
+Everything you ever wanted to know about Linux -stable releases
+===============================================================
Rules on what kind of patches are accepted, and which ones are not, into the
"-stable" tree:
@@ -23,68 +26,94 @@ Rules on what kind of patches are accepted, and which ones are not, into the
race can be exploited is also provided.
- It cannot contain any "trivial" fixes in it (spelling changes,
whitespace cleanups, etc).
- - It must follow the Documentation/SubmittingPatches rules.
+ - It must follow the
+ :ref:`Documentation/SubmittingPatches <submittingpatches>`
+ rules.
- It or an equivalent fix must already exist in Linus' tree (upstream).
-Procedure for submitting patches to the -stable tree:
+Procedure for submitting patches to the -stable tree
+----------------------------------------------------
- If the patch covers files in net/ or drivers/net please follow netdev stable
submission guidelines as described in
Documentation/networking/netdev-FAQ.txt
- Security patches should not be handled (solely) by the -stable review
- process but should follow the procedures in Documentation/SecurityBugs.
+ process but should follow the procedures in
+ :ref:`Documentation/SecurityBugs <securitybugs>`.
+
+For all other submissions, choose one of the following procedures
+-----------------------------------------------------------------
+
+.. _option_1:
-For all other submissions, choose one of the following procedures:
+Option 1
+********
- --- Option 1 ---
+To have the patch automatically included in the stable tree, add the tag
+
+.. code-block:: none
- To have the patch automatically included in the stable tree, add the tag
Cc: stable@vger.kernel.org
- in the sign-off area. Once the patch is merged it will be applied to
- the stable tree without anything else needing to be done by the author
- or subsystem maintainer.
- --- Option 2 ---
+in the sign-off area. Once the patch is merged it will be applied to
+the stable tree without anything else needing to be done by the author
+or subsystem maintainer.
+
+.. _option_2:
- After the patch has been merged to Linus' tree, send an email to
- stable@vger.kernel.org containing the subject of the patch, the commit ID,
- why you think it should be applied, and what kernel version you wish it to
- be applied to.
+Option 2
+********
- --- Option 3 ---
+After the patch has been merged to Linus' tree, send an email to
+stable@vger.kernel.org containing the subject of the patch, the commit ID,
+why you think it should be applied, and what kernel version you wish it to
+be applied to.
- Send the patch, after verifying that it follows the above rules, to
- stable@vger.kernel.org. You must note the upstream commit ID in the
- changelog of your submission, as well as the kernel version you wish
- it to be applied to.
+.. _option_3:
-Option 1 is *strongly* preferred, is the easiest and most common. Options 2 and
-3 are more useful if the patch isn't deemed worthy at the time it is applied to
-a public git tree (for instance, because it deserves more regression testing
-first). Option 3 is especially useful if the patch needs some special handling
-to apply to an older kernel (e.g., if API's have changed in the meantime).
+Option 3
+********
-Note that for Option 3, if the patch deviates from the original upstream patch
-(for example because it had to be backported) this must be very clearly
-documented and justified in the patch description.
+Send the patch, after verifying that it follows the above rules, to
+stable@vger.kernel.org. You must note the upstream commit ID in the
+changelog of your submission, as well as the kernel version you wish
+it to be applied to.
+
+:ref:`option_1` is **strongly** preferred, is the easiest and most common.
+:ref:`option_2` and :ref:`option_3` are more useful if the patch isn't deemed
+worthy at the time it is applied to a public git tree (for instance, because
+it deserves more regression testing first). :ref:`option_3` is especially
+useful if the patch needs some special handling to apply to an older kernel
+(e.g., if API's have changed in the meantime).
+
+Note that for :ref:`option_3`, if the patch deviates from the original
+upstream patch (for example because it had to be backported) this must be very
+clearly documented and justified in the patch description.
The upstream commit ID must be specified with a separate line above the commit
text, like this:
+.. code-block:: none
+
commit <sha1> upstream.
Additionally, some patches submitted via Option 1 may have additional patch
prerequisites which can be cherry-picked. This can be specified in the following
format in the sign-off area:
+.. code-block:: none
+
Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
Cc: <stable@vger.kernel.org> # 3.3.x
- Signed-off-by: Ingo Molnar <mingo@elte.hu>
+ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+The tag sequence has the meaning of:
+
+.. code-block:: none
- The tag sequence has the meaning of:
git cherry-pick a1f84a3
git cherry-pick 1b9508f
git cherry-pick fd21073
@@ -93,12 +122,17 @@ format in the sign-off area:
Also, some patches may have kernel version prerequisites. This can be
specified in the following format in the sign-off area:
+.. code-block:: none
+
Cc: <stable@vger.kernel.org> # 3.3.x-
- The tag has the meaning of:
+The tag has the meaning of:
+
+.. code-block:: none
+
git cherry-pick <this commit>
- For each "-stable" tree starting with the specified version.
+For each "-stable" tree starting with the specified version.
Following the submission:
@@ -109,7 +143,8 @@ Following the submission:
other developers and by the relevant subsystem maintainer.
-Review cycle:
+Review cycle
+------------
- When the -stable maintainers decide for a review cycle, the patches will be
sent to the review committee, and the maintainer of the affected area of
@@ -125,17 +160,22 @@ Review cycle:
security kernel team, and not go through the normal review cycle.
Contact the kernel security team for more details on this procedure.
-Trees:
+Trees
+-----
- The queues of patches, for both completed versions and in progress
versions can be found at:
+
http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
+
- The finalized and tagged releases of all stable kernels can be found
in separate branches per version at:
+
http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git
-Review committee:
+Review committee
+----------------
- This is made up of a number of kernel developers who have volunteered for
this task, and a few that haven't.
diff --git a/Documentation/sysctl/README b/Documentation/sysctl/README
index 8c3306e01d52..91f54ffa0077 100644
--- a/Documentation/sysctl/README
+++ b/Documentation/sysctl/README
@@ -69,6 +69,7 @@ proc/ <empty>
sunrpc/ SUN Remote Procedure Call (NFS)
vm/ memory management tuning
buffer and cache management
+user/ Per user per user namespace limits
These are the subdirs I have on my system. There might be more
or other subdirs in another setup. If you see another dir, I'd
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 302b5ed616a6..35e17f748ca7 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -265,6 +265,13 @@ aio-nr can grow to.
==============================================================
+mount-max:
+
+This denotes the maximum number of mounts that may exist
+in a mount namespace.
+
+==============================================================
+
2. /proc/sys/fs/binfmt_misc
----------------------------------------------------------
diff --git a/Documentation/sysctl/user.txt b/Documentation/sysctl/user.txt
new file mode 100644
index 000000000000..1291c498f78f
--- /dev/null
+++ b/Documentation/sysctl/user.txt
@@ -0,0 +1,66 @@
+Documentation for /proc/sys/user/* kernel version 4.9.0
+ (c) 2016 Eric Biederman <ebiederm@xmission.com>
+
+==============================================================
+
+This file contains the documetation for the sysctl files in
+/proc/sys/user.
+
+The files in this directory can be used to override the default
+limits on the number of namespaces and other objects that have
+per user per user namespace limits.
+
+The primary purpose of these limits is to stop programs that
+malfunction and attempt to create a ridiculous number of objects,
+before the malfunction becomes a system wide problem. It is the
+intention that the defaults of these limits are set high enough that
+no program in normal operation should run into these limits.
+
+The creation of per user per user namespace objects are charged to
+the user in the user namespace who created the object and
+verified to be below the per user limit in that user namespace.
+
+The creation of objects is also charged to all of the users
+who created user namespaces the creation of the object happens
+in (user namespaces can be nested) and verified to be below the per user
+limits in the user namespaces of those users.
+
+This recursive counting of created objects ensures that creating a
+user namespace does not allow a user to escape their current limits.
+
+Currently, these files are in /proc/sys/user:
+
+- max_cgroup_namespaces
+
+ The maximum number of cgroup namespaces that any user in the current
+ user namespace may create.
+
+- max_ipc_namespaces
+
+ The maximum number of ipc namespaces that any user in the current
+ user namespace may create.
+
+- max_mnt_namespaces
+
+ The maximum number of mount namespaces that any user in the current
+ user namespace may create.
+
+- max_net_namespaces
+
+ The maximum number of network namespaces that any user in the
+ current user namespace may create.
+
+- max_pid_namespaces
+
+ The maximum number of pid namespaces that any user in the current
+ user namespace may create.
+
+- max_user_namespaces
+
+ The maximum number of user namespaces that any user in the current
+ user namespace may create.
+
+- max_uts_namespaces
+
+ The maximum number of user namespaces that any user in the current
+ user namespace may create.
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index a6b3705e62a6..185c39fea2a0 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -858,11 +858,11 @@ x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
When enabled, it will account time the task has been
scheduled out as part of the function call.
- graph-time - When running function graph tracer, to include the
- time to call nested functions. When this is not set,
- the time reported for the function will only include
- the time the function itself executed for, not the time
- for functions that it called.
+ graph-time - When running function profiler with function graph tracer,
+ to include the time to call nested functions. When this is
+ not set, the time reported for the function will only
+ include the time the function itself executed for, not the
+ time for functions that it called.
record-cmd - When any event or tracer is enabled, a hook is enabled
in the sched_switch trace point to fill comm cache
diff --git a/Documentation/trace/hwlat_detector.txt b/Documentation/trace/hwlat_detector.txt
new file mode 100644
index 000000000000..3207717a0d1a
--- /dev/null
+++ b/Documentation/trace/hwlat_detector.txt
@@ -0,0 +1,79 @@
+Introduction:
+-------------
+
+The tracer hwlat_detector is a special purpose tracer that is used to
+detect large system latencies induced by the behavior of certain underlying
+hardware or firmware, independent of Linux itself. The code was developed
+originally to detect SMIs (System Management Interrupts) on x86 systems,
+however there is nothing x86 specific about this patchset. It was
+originally written for use by the "RT" patch since the Real Time
+kernel is highly latency sensitive.
+
+SMIs are not serviced by the Linux kernel, which means that it does not
+even know that they are occuring. SMIs are instead set up by BIOS code
+and are serviced by BIOS code, usually for "critical" events such as
+management of thermal sensors and fans. Sometimes though, SMIs are used for
+other tasks and those tasks can spend an inordinate amount of time in the
+handler (sometimes measured in milliseconds). Obviously this is a problem if
+you are trying to keep event service latencies down in the microsecond range.
+
+The hardware latency detector works by hogging one of the cpus for configurable
+amounts of time (with interrupts disabled), polling the CPU Time Stamp Counter
+for some period, then looking for gaps in the TSC data. Any gap indicates a
+time when the polling was interrupted and since the interrupts are disabled,
+the only thing that could do that would be an SMI or other hardware hiccup
+(or an NMI, but those can be tracked).
+
+Note that the hwlat detector should *NEVER* be used in a production environment.
+It is intended to be run manually to determine if the hardware platform has a
+problem with long system firmware service routines.
+
+Usage:
+------
+
+Write the ASCII text "hwlat" into the current_tracer file of the tracing system
+(mounted at /sys/kernel/tracing or /sys/kernel/tracing). It is possible to
+redefine the threshold in microseconds (us) above which latency spikes will
+be taken into account.
+
+Example:
+
+ # echo hwlat > /sys/kernel/tracing/current_tracer
+ # echo 100 > /sys/kernel/tracing/tracing_thresh
+
+The /sys/kernel/tracing/hwlat_detector interface contains the following files:
+
+width - time period to sample with CPUs held (usecs)
+ must be less than the total window size (enforced)
+window - total period of sampling, width being inside (usecs)
+
+By default the width is set to 500,000 and window to 1,000,000, meaning that
+for every 1,000,000 usecs (1s) the hwlat detector will spin for 500,000 usecs
+(0.5s). If tracing_thresh contains zero when hwlat tracer is enabled, it will
+change to a default of 10 usecs. If any latencies that exceed the threshold is
+observed then the data will be written to the tracing ring buffer.
+
+The minimum sleep time between periods is 1 millisecond. Even if width
+is less than 1 millisecond apart from window, to allow the system to not
+be totally starved.
+
+If tracing_thresh was zero when hwlat detector was started, it will be set
+back to zero if another tracer is loaded. Note, the last value in
+tracing_thresh that hwlat detector had will be saved and this value will
+be restored in tracing_thresh if it is still zero when hwlat detector is
+started again.
+
+The following tracing directory files are used by the hwlat_detector:
+
+in /sys/kernel/tracing:
+
+ tracing_threshold - minimum latency value to be considered (usecs)
+ tracing_max_latency - maximum hardware latency actually observed (usecs)
+ tracing_cpumask - the CPUs to move the hwlat thread across
+ hwlat_detector/width - specified amount of time to spin within window (usecs)
+ hwlat_detector/window - amount of time between (width) runs (usecs)
+
+The hwlat detector's kernel thread will migrate across each CPU specified in
+tracing_cpumask between each window. To limit the migration, either modify
+tracing_cpumask, or modify the hwlat kernel thread (named [hwlatd]) CPU
+affinity directly, and the migration will stop.
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-its.txt b/Documentation/virtual/kvm/devices/arm-vgic-its.txt
new file mode 100644
index 000000000000..6081a5b7fc1e
--- /dev/null
+++ b/Documentation/virtual/kvm/devices/arm-vgic-its.txt
@@ -0,0 +1,38 @@
+ARM Virtual Interrupt Translation Service (ITS)
+===============================================
+
+Device types supported:
+ KVM_DEV_TYPE_ARM_VGIC_ITS ARM Interrupt Translation Service Controller
+
+The ITS allows MSI(-X) interrupts to be injected into guests. This extension is
+optional. Creating a virtual ITS controller also requires a host GICv3 (see
+arm-vgic-v3.txt), but does not depend on having physical ITS controllers.
+
+There can be multiple ITS controllers per guest, each of them has to have
+a separate, non-overlapping MMIO region.
+
+
+Groups:
+ KVM_DEV_ARM_VGIC_GRP_ADDR
+ Attributes:
+ KVM_VGIC_ITS_ADDR_TYPE (rw, 64-bit)
+ Base address in the guest physical address space of the GICv3 ITS
+ control register frame.
+ This address needs to be 64K aligned and the region covers 128K.
+ Errors:
+ -E2BIG: Address outside of addressable IPA range
+ -EINVAL: Incorrectly aligned address
+ -EEXIST: Address already configured
+ -EFAULT: Invalid user pointer for attr->addr.
+ -ENODEV: Incorrect attribute or the ITS is not supported.
+
+
+ KVM_DEV_ARM_VGIC_GRP_CTRL
+ Attributes:
+ KVM_DEV_ARM_VGIC_CTRL_INIT
+ request the initialization of the ITS, no additional parameter in
+ kvm_device_attr.addr.
+ Errors:
+ -ENXIO: ITS not properly configured as required prior to setting
+ this attribute
+ -ENOMEM: Memory shortage when allocating ITS internal data
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt b/Documentation/virtual/kvm/devices/arm-vgic-v3.txt
new file mode 100644
index 000000000000..9348b3caccd7
--- /dev/null
+++ b/Documentation/virtual/kvm/devices/arm-vgic-v3.txt
@@ -0,0 +1,206 @@
+ARM Virtual Generic Interrupt Controller v3 and later (VGICv3)
+==============================================================
+
+
+Device types supported:
+ KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0
+
+Only one VGIC instance may be instantiated through this API. The created VGIC
+will act as the VM interrupt controller, requiring emulated user-space devices
+to inject interrupts to the VGIC instead of directly to CPUs. It is not
+possible to create both a GICv3 and GICv2 on the same VM.
+
+Creating a guest GICv3 device requires a host GICv3 as well.
+
+
+Groups:
+ KVM_DEV_ARM_VGIC_GRP_ADDR
+ Attributes:
+ KVM_VGIC_V3_ADDR_TYPE_DIST (rw, 64-bit)
+ Base address in the guest physical address space of the GICv3 distributor
+ register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
+ This address needs to be 64K aligned and the region covers 64 KByte.
+
+ KVM_VGIC_V3_ADDR_TYPE_REDIST (rw, 64-bit)
+ Base address in the guest physical address space of the GICv3
+ redistributor register mappings. There are two 64K pages for each
+ VCPU and all of the redistributor pages are contiguous.
+ Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
+ This address needs to be 64K aligned.
+ Errors:
+ -E2BIG: Address outside of addressable IPA range
+ -EINVAL: Incorrectly aligned address
+ -EEXIST: Address already configured
+ -ENXIO: The group or attribute is unknown/unsupported for this device
+ or hardware support is missing.
+ -EFAULT: Invalid user pointer for attr->addr.
+
+
+
+ KVM_DEV_ARM_VGIC_GRP_DIST_REGS
+ KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
+ Attributes:
+ The attr field of kvm_device_attr encodes two values:
+ bits: | 63 .... 32 | 31 .... 0 |
+ values: | mpidr | offset |
+
+ All distributor regs are (rw, 32-bit) and kvm_device_attr.addr points to a
+ __u32 value. 64-bit registers must be accessed by separately accessing the
+ lower and higher word.
+
+ Writes to read-only registers are ignored by the kernel.
+
+ KVM_DEV_ARM_VGIC_GRP_DIST_REGS accesses the main distributor registers.
+ KVM_DEV_ARM_VGIC_GRP_REDIST_REGS accesses the redistributor of the CPU
+ specified by the mpidr.
+
+ The offset is relative to the "[Re]Distributor base address" as defined
+ in the GICv3/4 specs. Getting or setting such a register has the same
+ effect as reading or writing the register on real hardware, except for the
+ following registers: GICD_STATUSR, GICR_STATUSR, GICD_ISPENDR,
+ GICR_ISPENDR0, GICD_ICPENDR, and GICR_ICPENDR0. These registers behave
+ differently when accessed via this interface compared to their
+ architecturally defined behavior to allow software a full view of the
+ VGIC's internal state.
+
+ The mpidr field is used to specify which
+ redistributor is accessed. The mpidr is ignored for the distributor.
+
+ The mpidr encoding is based on the affinity information in the
+ architecture defined MPIDR, and the field is encoded as follows:
+ | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 |
+ | Aff3 | Aff2 | Aff1 | Aff0 |
+
+ Note that distributor fields are not banked, but return the same value
+ regardless of the mpidr used to access the register.
+
+ The GICD_STATUSR and GICR_STATUSR registers are architecturally defined such
+ that a write of a clear bit has no effect, whereas a write with a set bit
+ clears that value. To allow userspace to freely set the values of these two
+ registers, setting the attributes with the register offsets for these two
+ registers simply sets the non-reserved bits to the value written.
+
+
+ Accesses (reads and writes) to the GICD_ISPENDR register region and
+ GICR_ISPENDR0 registers get/set the value of the latched pending state for
+ the interrupts.
+
+ This is identical to the value returned by a guest read from ISPENDR for an
+ edge triggered interrupt, but may differ for level triggered interrupts.
+ For edge triggered interrupts, once an interrupt becomes pending (whether
+ because of an edge detected on the input line or because of a guest write
+ to ISPENDR) this state is "latched", and only cleared when either the
+ interrupt is activated or when the guest writes to ICPENDR. A level
+ triggered interrupt may be pending either because the level input is held
+ high by a device, or because of a guest write to the ISPENDR register. Only
+ ISPENDR writes are latched; if the device lowers the line level then the
+ interrupt is no longer pending unless the guest also wrote to ISPENDR, and
+ conversely writes to ICPENDR or activations of the interrupt do not clear
+ the pending status if the line level is still being held high. (These
+ rules are documented in the GICv3 specification descriptions of the ICPENDR
+ and ISPENDR registers.) For a level triggered interrupt the value accessed
+ here is that of the latch which is set by ISPENDR and cleared by ICPENDR or
+ interrupt activation, whereas the value returned by a guest read from
+ ISPENDR is the logical OR of the latch value and the input line level.
+
+ Raw access to the latch state is provided to userspace so that it can save
+ and restore the entire GIC internal state (which is defined by the
+ combination of the current input line level and the latch state, and cannot
+ be deduced from purely the line level and the value of the ISPENDR
+ registers).
+
+ Accesses to GICD_ICPENDR register region and GICR_ICPENDR0 registers have
+ RAZ/WI semantics, meaning that reads always return 0 and writes are always
+ ignored.
+
+ Errors:
+ -ENXIO: Getting or setting this register is not yet supported
+ -EBUSY: One or more VCPUs are running
+
+
+ KVM_DEV_ARM_VGIC_CPU_SYSREGS
+ Attributes:
+ The attr field of kvm_device_attr encodes two values:
+ bits: | 63 .... 32 | 31 .... 16 | 15 .... 0 |
+ values: | mpidr | RES | instr |
+
+ The mpidr field encodes the CPU ID based on the affinity information in the
+ architecture defined MPIDR, and the field is encoded as follows:
+ | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 |
+ | Aff3 | Aff2 | Aff1 | Aff0 |
+
+ The instr field encodes the system register to access based on the fields
+ defined in the A64 instruction set encoding for system register access
+ (RES means the bits are reserved for future use and should be zero):
+
+ | 15 ... 14 | 13 ... 11 | 10 ... 7 | 6 ... 3 | 2 ... 0 |
+ | Op 0 | Op1 | CRn | CRm | Op2 |
+
+ All system regs accessed through this API are (rw, 64-bit) and
+ kvm_device_attr.addr points to a __u64 value.
+
+ KVM_DEV_ARM_VGIC_CPU_SYSREGS accesses the CPU interface registers for the
+ CPU specified by the mpidr field.
+
+ Errors:
+ -ENXIO: Getting or setting this register is not yet supported
+ -EBUSY: VCPU is running
+ -EINVAL: Invalid mpidr supplied
+
+
+ KVM_DEV_ARM_VGIC_GRP_NR_IRQS
+ Attributes:
+ A value describing the number of interrupts (SGI, PPI and SPI) for
+ this GIC instance, ranging from 64 to 1024, in increments of 32.
+
+ kvm_device_attr.addr points to a __u32 value.
+
+ Errors:
+ -EINVAL: Value set is out of the expected range
+ -EBUSY: Value has already be set.
+
+
+ KVM_DEV_ARM_VGIC_GRP_CTRL
+ Attributes:
+ KVM_DEV_ARM_VGIC_CTRL_INIT
+ request the initialization of the VGIC, no additional parameter in
+ kvm_device_attr.addr.
+ Errors:
+ -ENXIO: VGIC not properly configured as required prior to calling
+ this attribute
+ -ENODEV: no online VCPU
+ -ENOMEM: memory shortage when allocating vgic internal data
+
+
+ KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO
+ Attributes:
+ The attr field of kvm_device_attr encodes the following values:
+ bits: | 63 .... 32 | 31 .... 10 | 9 .... 0 |
+ values: | mpidr | info | vINTID |
+
+ The vINTID specifies which set of IRQs is reported on.
+
+ The info field specifies which information userspace wants to get or set
+ using this interface. Currently we support the following info values:
+
+ VGIC_LEVEL_INFO_LINE_LEVEL:
+ Get/Set the input level of the IRQ line for a set of 32 contiguously
+ numbered interrupts.
+ vINTID must be a multiple of 32.
+
+ kvm_device_attr.addr points to a __u32 value which will contain a
+ bitmap where a set bit means the interrupt level is asserted.
+
+ Bit[n] indicates the status for interrupt vINTID + n.
+
+ SGIs and any interrupt with a higher ID than the number of interrupts
+ supported, will be RAZ/WI. LPIs are always edge-triggered and are
+ therefore not supported by this interface.
+
+ PPIs are reported per VCPU as specified in the mpidr field, and SPIs are
+ reported with the same value regardless of the mpidr specified.
+
+ The mpidr field encodes the CPU ID based on the affinity information in the
+ architecture defined MPIDR, and the field is encoded as follows:
+ | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 |
+ | Aff3 | Aff2 | Aff1 | Aff0 |
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index 89182f80cc7f..76e61c883347 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -1,24 +1,19 @@
-ARM Virtual Generic Interrupt Controller (VGIC)
-===============================================
+ARM Virtual Generic Interrupt Controller v2 (VGIC)
+==================================================
Device types supported:
KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0
- KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0
- KVM_DEV_TYPE_ARM_VGIC_ITS ARM Interrupt Translation Service Controller
-Only one VGIC instance of the V2/V3 types above may be instantiated through
-either this API or the legacy KVM_CREATE_IRQCHIP api. The created VGIC will
-act as the VM interrupt controller, requiring emulated user-space devices to
-inject interrupts to the VGIC instead of directly to CPUs.
+Only one VGIC instance may be instantiated through either this API or the
+legacy KVM_CREATE_IRQCHIP API. The created VGIC will act as the VM interrupt
+controller, requiring emulated user-space devices to inject interrupts to the
+VGIC instead of directly to CPUs.
-Creating a guest GICv3 device requires a host GICv3 as well.
-GICv3 implementations with hardware compatibility support allow a guest GICv2
-as well.
+GICv3 implementations with hardware compatibility support allow creating a
+guest GICv2 through this interface. For information on creating a guest GICv3
+device and guest ITS devices, see arm-vgic-v3.txt. It is not possible to
+create both a GICv3 and GICv2 device on the same VM.
-Creating a virtual ITS controller requires a host GICv3 (but does not depend
-on having physical ITS controllers).
-There can be multiple ITS controllers per guest, each of them has to have
-a separate, non-overlapping MMIO region.
Groups:
KVM_DEV_ARM_VGIC_GRP_ADDR
@@ -32,26 +27,13 @@ Groups:
Base address in the guest physical address space of the GIC virtual cpu
interface register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2.
This address needs to be 4K aligned and the region covers 4 KByte.
-
- KVM_VGIC_V3_ADDR_TYPE_DIST (rw, 64-bit)
- Base address in the guest physical address space of the GICv3 distributor
- register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
- This address needs to be 64K aligned and the region covers 64 KByte.
-
- KVM_VGIC_V3_ADDR_TYPE_REDIST (rw, 64-bit)
- Base address in the guest physical address space of the GICv3
- redistributor register mappings. There are two 64K pages for each
- VCPU and all of the redistributor pages are contiguous.
- Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
- This address needs to be 64K aligned.
-
- KVM_VGIC_V3_ADDR_TYPE_ITS (rw, 64-bit)
- Base address in the guest physical address space of the GICv3 ITS
- control register frame. The ITS allows MSI(-X) interrupts to be
- injected into guests. This extension is optional. If the kernel
- does not support the ITS, the call returns -ENODEV.
- Only valid for KVM_DEV_TYPE_ARM_VGIC_ITS.
- This address needs to be 64K aligned and the region covers 128K.
+ Errors:
+ -E2BIG: Address outside of addressable IPA range
+ -EINVAL: Incorrectly aligned address
+ -EEXIST: Address already configured
+ -ENXIO: The group or attribute is unknown/unsupported for this device
+ or hardware support is missing.
+ -EFAULT: Invalid user pointer for attr->addr.
KVM_DEV_ARM_VGIC_GRP_DIST_REGS
Attributes:
diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virtual/kvm/devices/vcpu.txt
index c04165868faf..02f50686c418 100644
--- a/Documentation/virtual/kvm/devices/vcpu.txt
+++ b/Documentation/virtual/kvm/devices/vcpu.txt
@@ -30,4 +30,6 @@ Returns: -ENODEV: PMUv3 not supported
attribute
-EBUSY: PMUv3 already initialized
-Request the initialization of the PMUv3.
+Request the initialization of the PMUv3. This must be done after creating the
+in-kernel irqchip. Creating a PMU with a userspace irqchip is currently not
+supported.
diff --git a/Documentation/vme_api.txt b/Documentation/vme_api.txt
index ca5b82797f6c..90006550f485 100644
--- a/Documentation/vme_api.txt
+++ b/Documentation/vme_api.txt
@@ -8,13 +8,14 @@ As with other subsystems within the Linux kernel, VME device drivers register
with the VME subsystem, typically called from the devices init routine. This is
achieved via a call to the following function:
- int vme_register_driver (struct vme_driver *driver);
+ int vme_register_driver (struct vme_driver *driver, unsigned int ndevs);
If driver registration is successful this function returns zero, if an error
occurred a negative error code will be returned.
A pointer to a structure of type 'vme_driver' must be provided to the
-registration function. The structure is as follows:
+registration function. Along with ndevs, which is the number of devices your
+driver is able to support. The structure is as follows:
struct vme_driver {
struct list_head node;
@@ -32,8 +33,8 @@ At the minimum, the '.name', '.match' and '.probe' elements of this structure
should be correctly set. The '.name' element is a pointer to a string holding
the device driver's name.
-The '.match' function allows controlling the number of devices that need to
-be registered. The match function should return 1 if a device should be
+The '.match' function allows control over which VME devices should be registered
+with the driver. The match function should return 1 if a device should be
probed and 0 otherwise. This example match function (from vme_user.c) limits
the number of devices probed to one:
@@ -385,13 +386,13 @@ location monitor location. Each location monitor can monitor a number of
adjacent locations:
int vme_lm_attach(struct vme_resource *res, int num,
- void (*callback)(int));
+ void (*callback)(void *));
int vme_lm_detach(struct vme_resource *res, int num);
The callback function is declared as follows.
- void callback(int num);
+ void callback(void *data);
Slot Detection
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 8c7dd5957ae1..5724092db811 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -12,13 +12,13 @@ ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
... unused hole ...
-ffffec0000000000 - fffffc0000000000 (=44 bits) kasan shadow memory (16TB)
+ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
... unused hole ...
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
... unused hole ...
-ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
+ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
... unused hole ...
-ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
+ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt
index 0312fe66475c..222a2c6748e6 100644
--- a/Documentation/xtensa/mmu.txt
+++ b/Documentation/xtensa/mmu.txt
@@ -3,15 +3,8 @@ MMUv3 initialization sequence.
The code in the initialize_mmu macro sets up MMUv3 memory mapping
identically to MMUv2 fixed memory mapping. Depending on
CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX symbol this code is
-located in one of the following address ranges:
-
- 0xF0000000..0xFFFFFFFF (will keep same address in MMU v2 layout;
- typically ROM)
- 0x00000000..0x07FFFFFF (system RAM; this code is actually linked
- at 0xD0000000..0xD7FFFFFF [cached]
- or 0xD8000000..0xDFFFFFFF [uncached];
- in any case, initially runs elsewhere
- than linked, so have to be careful)
+located in addresses it was linked for (symbol undefined), or not
+(symbol defined), so it needs to be position-independent.
The code has the following assumptions:
This code fragment is run only on an MMU v3.
@@ -28,24 +21,26 @@ TLB setup proceeds along the following steps.
PA = physical address (two upper nibbles of it);
pc = physical range that contains this code;
-After step 2, we jump to virtual address in 0x40000000..0x5fffffff
-that corresponds to next instruction to execute in this code.
-After step 4, we jump to intended (linked) address of this code.
-
- Step 0 Step1 Step 2 Step3 Step 4 Step5
- ============ ===== ============ ===== ============ =====
- VA PA PA VA PA PA VA PA PA
- ------ -- -- ------ -- -- ------ -- --
- E0..FF -> E0 -> E0 E0..FF -> E0 F0..FF -> F0 -> F0
- C0..DF -> C0 -> C0 C0..DF -> C0 E0..EF -> F0 -> F0
- A0..BF -> A0 -> A0 A0..BF -> A0 D8..DF -> 00 -> 00
- 80..9F -> 80 -> 80 80..9F -> 80 D0..D7 -> 00 -> 00
- 60..7F -> 60 -> 60 60..7F -> 60
- 40..5F -> 40 40..5F -> pc -> pc 40..5F -> pc
- 20..3F -> 20 -> 20 20..3F -> 20
- 00..1F -> 00 -> 00 00..1F -> 00
-
-The default location of IO peripherals is above 0xf0000000. This may change
+After step 2, we jump to virtual address in the range 0x40000000..0x5fffffff
+or 0x00000000..0x1fffffff, depending on whether the kernel was loaded below
+0x40000000 or above. That address corresponds to next instruction to execute
+in this code. After step 4, we jump to intended (linked) address of this code.
+The scheme below assumes that the kernel is loaded below 0x40000000.
+
+ Step0 Step1 Step2 Step3 Step4 Step5
+ ===== ===== ===== ===== ===== =====
+ VA PA PA PA PA VA PA PA
+ ------ -- -- -- -- ------ -- --
+ E0..FF -> E0 -> E0 -> E0 F0..FF -> F0 -> F0
+ C0..DF -> C0 -> C0 -> C0 E0..EF -> F0 -> F0
+ A0..BF -> A0 -> A0 -> A0 D8..DF -> 00 -> 00
+ 80..9F -> 80 -> 80 -> 80 D0..D7 -> 00 -> 00
+ 60..7F -> 60 -> 60 -> 60
+ 40..5F -> 40 -> pc -> pc 40..5F -> pc
+ 20..3F -> 20 -> 20 -> 20
+ 00..1F -> 00 -> 00 -> 00
+
+The default location of IO peripherals is above 0xf0000000. This may be changed
using a "ranges" property in a device tree simple-bus node. See ePAPR 1.1, §6.5
for details on the syntax and semantic of simple-bus nodes. The following
limitations apply:
@@ -62,3 +57,127 @@ limitations apply:
6. The IO area covers the entire 256MB segment of parent-bus-address; the
"ranges" triplet length field is ignored
+
+
+MMUv3 address space layouts.
+============================
+
+Default MMUv2-compatible layout.
+
+ Symbol VADDR Size
++------------------+
+| Userspace | 0x00000000 TASK_SIZE
++------------------+ 0x40000000
++------------------+
+| Page table | 0x80000000
++------------------+ 0x80400000
++------------------+
+| KMAP area | PKMAP_BASE PTRS_PER_PTE *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
+| | (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
+| | NR_CPUS *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
++------------------+ FIXADDR_TOP 0xbffff000
++------------------+
+| VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB
++------------------+ VMALLOC_END
+| Cache aliasing | TLBTEMP_BASE_1 0xc7ff0000 DCACHE_WAY_SIZE
+| remap area 1 |
++------------------+
+| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE
+| remap area 2 |
++------------------+
++------------------+
+| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xd0000000 128MB
++------------------+
+| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xd8000000 128MB
++------------------+
+| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB
++------------------+
+| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB
++------------------+
+
+
+256MB cached + 256MB uncached layout.
+
+ Symbol VADDR Size
++------------------+
+| Userspace | 0x00000000 TASK_SIZE
++------------------+ 0x40000000
++------------------+
+| Page table | 0x80000000
++------------------+ 0x80400000
++------------------+
+| KMAP area | PKMAP_BASE PTRS_PER_PTE *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
+| | (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
+| | NR_CPUS *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
++------------------+ FIXADDR_TOP 0x9ffff000
++------------------+
+| VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB
++------------------+ VMALLOC_END
+| Cache aliasing | TLBTEMP_BASE_1 0xa7ff0000 DCACHE_WAY_SIZE
+| remap area 1 |
++------------------+
+| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE
+| remap area 2 |
++------------------+
++------------------+
+| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xb0000000 256MB
++------------------+
+| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 256MB
++------------------+
++------------------+
+| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB
++------------------+
+| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB
++------------------+
+
+
+512MB cached + 512MB uncached layout.
+
+ Symbol VADDR Size
++------------------+
+| Userspace | 0x00000000 TASK_SIZE
++------------------+ 0x40000000
++------------------+
+| Page table | 0x80000000
++------------------+ 0x80400000
++------------------+
+| KMAP area | PKMAP_BASE PTRS_PER_PTE *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
+| | (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
+| | NR_CPUS *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
++------------------+ FIXADDR_TOP 0x8ffff000
++------------------+
+| VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB
++------------------+ VMALLOC_END
+| Cache aliasing | TLBTEMP_BASE_1 0x97ff0000 DCACHE_WAY_SIZE
+| remap area 1 |
++------------------+
+| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE
+| remap area 2 |
++------------------+
++------------------+
+| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xa0000000 512MB
++------------------+
+| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 512MB
++------------------+
+| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB
++------------------+
+| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB
++------------------+
diff --git a/MAINTAINERS b/MAINTAINERS
index bbfe20ca52b3..a009e004f8f7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -636,6 +636,15 @@ F: drivers/tty/serial/altera_jtaguart.c
F: include/linux/altera_uart.h
F: include/linux/altera_jtaguart.h
+AMAZON ETHERNET DRIVERS
+M: Netanel Belgazal <netanel@annapurnalabs.com>
+R: Saeed Bishara <saeed@annapurnalabs.com>
+R: Zorik Machulsky <zorik@annapurnalabs.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/networking/ena.txt
+F: drivers/net/ethernet/amazon/
+
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
M: Tom Lendacky <thomas.lendacky@amd.com>
M: Gary Hook <gary.hook@amd.com>
@@ -810,11 +819,11 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: sound/aoa/
-APEX EMBEDDED SYSTEMS STX104 DAC DRIVER
+APEX EMBEDDED SYSTEMS STX104 IIO DRIVER
M: William Breathitt Gray <vilhelm.gray@gmail.com>
L: linux-iio@vger.kernel.org
S: Maintained
-F: drivers/iio/dac/stx104.c
+F: drivers/iio/adc/stx104.c
APM DRIVER
M: Jiri Kosina <jikos@kernel.org>
@@ -857,6 +866,13 @@ F: drivers/net/phy/mdio-xgene.c
F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt
F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
+APPLIED MICRO (APM) X-GENE SOC PMU
+M: Tai Nguyen <ttnguyen@apm.com>
+S: Supported
+F: drivers/perf/xgene_pmu.c
+F: Documentation/perf/xgene-pmu.txt
+F: Documentation/devicetree/bindings/perf/apm-xgene-pmu.txt
+
APTINA CAMERA SENSOR PLL
M: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
@@ -994,6 +1010,7 @@ M: Chen-Yu Tsai <wens@csie.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
N: sun[x456789]i
+F: arch/arm/boot/dts/ntc-gr8*
ARM/Allwinner SoC Clock Support
M: Emilio López <emilio@elopez.com.ar>
@@ -1450,6 +1467,7 @@ F: arch/arm/mach-orion5x/ts78xx-*
ARM/OXNAS platform support
M: Neil Armstrong <narmstrong@baylibre.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-oxnas@lists.tuxfamily.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-oxnas/
F: arch/arm/boot/dts/oxnas*
@@ -1847,8 +1865,10 @@ F: arch/arm/mach-uniphier/
F: arch/arm/mm/cache-uniphier.c
F: arch/arm64/boot/dts/socionext/
F: drivers/bus/uniphier-system-bus.c
+F: drivers/clk/uniphier/
F: drivers/i2c/busses/i2c-uniphier*
F: drivers/pinctrl/uniphier/
+F: drivers/reset/reset-uniphier.c
F: drivers/tty/serial/8250/8250_uniphier.c
N: uniphier
@@ -1983,6 +2003,13 @@ S: Maintained
F: drivers/media/i2c/as3645a.c
F: include/media/i2c/as3645a.h
+ASAHI KASEI AK8974 DRIVER
+M: Linus Walleij <linus.walleij@linaro.org>
+L: linux-iio@vger.kernel.org
+W: http://www.akm.com/
+S: Supported
+F: drivers/iio/magnetometer/ak8974.c
+
ASC7621 HARDWARE MONITOR DRIVER
M: George Joseph <george.joseph@fairview5.com>
L: linux-hwmon@vger.kernel.org
@@ -2121,11 +2148,6 @@ M: Ludovic Desroches <ludovic.desroches@atmel.com>
S: Maintained
F: drivers/mmc/host/atmel-mci.c
-ATMEL AT91 / AT32 SERIAL DRIVER
-M: Nicolas Ferre <nicolas.ferre@atmel.com>
-S: Supported
-F: drivers/tty/serial/atmel_serial.c
-
ATMEL AT91 SAMA5D2-Compatible Shutdown Controller
M: Nicolas Ferre <nicolas.ferre@atmel.com>
S: Supported
@@ -2460,6 +2482,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: block/
F: kernel/trace/blktrace.c
+F: lib/sbitmap.c
BLOCK2MTD DRIVER
M: Joern Engel <joern@lazybastard.org>
@@ -2583,6 +2606,13 @@ F: arch/arm/mach-bcm/bcm_5301x.c
F: arch/arm/boot/dts/bcm5301x*.dtsi
F: arch/arm/boot/dts/bcm470*
+BROADCOM BCM53573 ARM ARCHITECTURE
+M: Rafał Miłecki <rafal@milecki.pl>
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: arch/arm/boot/dts/bcm53573*
+F: arch/arm/boot/dts/bcm47189*
+
BROADCOM BCM63XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
M: bcm-kernel-feedback-list@broadcom.com
@@ -2896,6 +2926,14 @@ S: Maintained
F: drivers/iio/light/cm*
F: Documentation/devicetree/bindings/i2c/trivial-devices.txt
+CAVIUM I2C DRIVER
+M: Jan Glauber <jglauber@cavium.com>
+M: David Daney <david.daney@cavium.com>
+W: http://www.cavium.com
+S: Supported
+F: drivers/i2c/busses/i2c-octeon*
+F: drivers/i2c/busses/i2c-thunderx*
+
CAVIUM LIQUIDIO NETWORK DRIVER
M: Derek Chickles <derek.chickles@caviumnetworks.com>
M: Satanand Burla <satananda.burla@caviumnetworks.com>
@@ -3143,7 +3181,7 @@ L: cocci@systeme.lip6.fr (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc
W: http://coccinelle.lip6.fr/
S: Supported
-F: Documentation/coccinelle.txt
+F: Documentation/dev-tools/coccinelle.rst
F: scripts/coccinelle/
F: scripts/coccicheck
@@ -3169,6 +3207,7 @@ COMMON CLK FRAMEWORK
M: Michael Turquette <mturquette@baylibre.com>
M: Stephen Boyd <sboyd@codeaurora.org>
L: linux-clk@vger.kernel.org
+Q: http://patchwork.kernel.org/project/linux-clk/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
S: Maintained
F: Documentation/devicetree/bindings/clock/
@@ -3470,6 +3509,7 @@ L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org
S: Supported
F: drivers/infiniband/hw/cxgb3/
+F: include/uapi/rdma/cxgb3-abi.h
CXGB4 ETHERNET DRIVER (CXGB4)
M: Hariprasad S <hariprasad@chelsio.com>
@@ -3491,6 +3531,7 @@ L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org
S: Supported
F: drivers/infiniband/hw/cxgb4/
+F: include/uapi/rdma/cxgb4-abi.h
CXGB4VF ETHERNET DRIVER (CXGB4VF)
M: Casey Leedom <leedom@chelsio.com>
@@ -3501,14 +3542,14 @@ F: drivers/net/ethernet/chelsio/cxgb4vf/
CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER
M: Ian Munsie <imunsie@au1.ibm.com>
-M: Michael Neuling <mikey@neuling.org>
+M: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
+F: arch/powerpc/platforms/powernv/pci-cxl.c
F: drivers/misc/cxl/
F: include/misc/cxl*
F: include/uapi/misc/cxl.h
F: Documentation/powerpc/cxl.txt
-F: Documentation/powerpc/cxl.txt
F: Documentation/ABI/testing/sysfs-class-cxl
CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
@@ -3771,8 +3812,8 @@ F: drivers/leds/leds-da90??.c
F: drivers/mfd/da903x.c
F: drivers/mfd/da90??-*.c
F: drivers/mfd/da91??-*.c
-F: drivers/power/da9052-battery.c
-F: drivers/power/da91??-*.c
+F: drivers/power/supply/da9052-battery.c
+F: drivers/power/supply/da91??-*.c
F: drivers/regulator/da903x.c
F: drivers/regulator/da9???-regulator.[ch]
F: drivers/rtc/rtc-da90??.c
@@ -3788,6 +3829,12 @@ F: include/linux/regulator/da9211.h
F: include/sound/da[79]*.h
F: sound/soc/codecs/da[79]*.[ch]
+DIAMOND SYSTEMS GPIO-MM GPIO DRIVER
+M: William Breathitt Gray <vilhelm.gray@gmail.com>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: drivers/gpio/gpio-gpio-mm.c
+
DIGI NEO AND CLASSIC PCI PRODUCTS
M: Lidza Louina <lidza.louina@gmail.com>
M: Mark Hounschell <markh@compro.net>
@@ -4413,7 +4460,6 @@ F: Documentation/filesystems/ecryptfs.txt
F: fs/ecryptfs/
EDAC-CORE
-M: Doug Thompson <dougthompson@xmission.com>
M: Borislav Petkov <bp@alien8.de>
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org>
@@ -4426,14 +4472,12 @@ F: drivers/edac/
F: include/linux/edac.h
EDAC-AMD64
-M: Doug Thompson <dougthompson@xmission.com>
M: Borislav Petkov <bp@alien8.de>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/amd64_edac*
EDAC-CALXEDA
-M: Doug Thompson <dougthompson@xmission.com>
M: Robert Richter <rric@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
@@ -4449,17 +4493,21 @@ F: drivers/edac/octeon_edac*
EDAC-E752X
M: Mark Gross <mark.gross@intel.com>
-M: Doug Thompson <dougthompson@xmission.com>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/e752x_edac.c
EDAC-E7XXX
-M: Doug Thompson <dougthompson@xmission.com>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/e7xxx_edac.c
+EDAC-FSL_DDR
+M: York Sun <york.sun@nxp.com>
+L: linux-edac@vger.kernel.org
+S: Maintained
+F: drivers/edac/fsl_ddr_edac.*
+
EDAC-GHES
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org>
@@ -4474,13 +4522,11 @@ S: Maintained
F: drivers/edac/i82443bxgx_edac.c
EDAC-I3000
-M: Jason Uhlenkott <juhlenko@akamai.com>
L: linux-edac@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/edac/i3000_edac.c
EDAC-I5000
-M: Doug Thompson <dougthompson@xmission.com>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/i5000_edac.c
@@ -4866,6 +4912,7 @@ F: tools/firewire/
FIRMWARE LOADER (request_firmware)
M: Ming Lei <ming.lei@canonical.com>
+M: Luis R. Rodriguez <mcgrof@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/firmware_class/
@@ -5086,10 +5133,9 @@ F: include/linux/fscrypto.h
F2FS FILE SYSTEM
M: Jaegeuk Kim <jaegeuk@kernel.org>
-M: Changman Lee <cm224.lee@samsung.com>
-R: Chao Yu <yuchao0@huawei.com>
+M: Chao Yu <yuchao0@huawei.com>
L: linux-f2fs-devel@lists.sourceforge.net
-W: http://en.wikipedia.org/wiki/F2FS
+W: https://f2fs.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git
S: Maintained
F: Documentation/filesystems/f2fs.txt
@@ -5151,7 +5197,7 @@ GCOV BASED KERNEL PROFILING
M: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
S: Maintained
F: kernel/gcov/
-F: Documentation/gcov.txt
+F: Documentation/dev-tools/gcov.rst
GDT SCSI DISK ARRAY CONTROLLER DRIVER
M: Achim Leubner <achim_leubner@adaptec.com>
@@ -5268,6 +5314,13 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/input/touchscreen/goodix.c
+GPIO MOCKUP DRIVER
+M: Bamvor Jian Zhang <bamvor.zhangjian@linaro.org>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: drivers/gpio/gpio-mockup.c
+F: tools/testing/selftests/gpio/
+
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
M: Alexandre Courbot <gnurou@gmail.com>
@@ -5299,6 +5352,77 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/aeroflex/
+GREYBUS SUBSYSTEM
+M: Johan Hovold <johan@kernel.org>
+M: Alex Elder <elder@kernel.org>
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+S: Maintained
+F: drivers/staging/greybus/
+
+GREYBUS AUDIO PROTOCOLS DRIVERS
+M: Vaibhav Agarwal <vaibhav.sr@gmail.com>
+M: Mark Greer <mgreer@animalcreek.com>
+S: Maintained
+F: drivers/staging/greybus/audio_apbridgea.c
+F: drivers/staging/greybus/audio_apbridgea.h
+F: drivers/staging/greybus/audio_codec.c
+F: drivers/staging/greybus/audio_codec.h
+F: drivers/staging/greybus/audio_gb.c
+F: drivers/staging/greybus/audio_manager.c
+F: drivers/staging/greybus/audio_manager.h
+F: drivers/staging/greybus/audio_manager_module.c
+F: drivers/staging/greybus/audio_manager_private.h
+F: drivers/staging/greybus/audio_manager_sysfs.c
+F: drivers/staging/greybus/audio_module.c
+F: drivers/staging/greybus/audio_topology.c
+
+GREYBUS PROTOCOLS DRIVERS
+M: Rui Miguel Silva <rmfrfs@gmail.com>
+S: Maintained
+F: drivers/staging/greybus/sdio.c
+F: drivers/staging/greybus/light.c
+F: drivers/staging/greybus/gpio.c
+F: drivers/staging/greybus/power_supply.c
+F: drivers/staging/greybus/spi.c
+F: drivers/staging/greybus/spilib.c
+
+GREYBUS PROTOCOLS DRIVERS
+M: Bryan O'Donoghue <pure.logic@nexus-software.ie>
+S: Maintained
+F: drivers/staging/greybus/loopback.c
+F: drivers/staging/greybus/timesync.c
+F: drivers/staging/greybus/timesync_platform.c
+
+GREYBUS PROTOCOLS DRIVERS
+M: Viresh Kumar <vireshk@kernel.org>
+S: Maintained
+F: drivers/staging/greybus/authentication.c
+F: drivers/staging/greybus/bootrom.c
+F: drivers/staging/greybus/firmware.h
+F: drivers/staging/greybus/fw-core.c
+F: drivers/staging/greybus/fw-download.c
+F: drivers/staging/greybus/fw-managament.c
+F: drivers/staging/greybus/greybus_authentication.h
+F: drivers/staging/greybus/greybus_firmware.h
+F: drivers/staging/greybus/hid.c
+F: drivers/staging/greybus/i2c.c
+F: drivers/staging/greybus/spi.c
+F: drivers/staging/greybus/spilib.c
+F: drivers/staging/greybus/spilib.h
+
+GREYBUS PROTOCOLS DRIVERS
+M: David Lin <dtwlin@gmail.com>
+S: Maintained
+F: drivers/staging/greybus/uart.c
+F: drivers/staging/greybus/log.c
+
+GREYBUS PLATFORM DRIVERS
+M: Vaibhav Hiremath <hvaibhav.linux@gmail.com>
+S: Maintained
+F: drivers/staging/greybus/arche-platform.c
+F: drivers/staging/greybus/arche-apb-ctrl.c
+F: drivers/staging/greybus/arche_platform.h
+
GSPCA FINEPIX SUBDRIVER
M: Frank Zago <frank@zago.net>
L: linux-media@vger.kernel.org
@@ -5590,6 +5714,14 @@ S: Maintained
F: drivers/net/ethernet/hisilicon/
F: Documentation/devicetree/bindings/net/hisilicon*.txt
+HISILICON ROCE DRIVER
+M: Lijun Ou <oulijun@huawei.com>
+M: Wei Hu(Xavier) <xavier.huwei@huawei.com>
+L: linux-rdma@vger.kernel.org
+S: Maintained
+F: drivers/infiniband/hw/hns/
+F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
+
HISILICON SAS Controller
M: John Garry <john.garry@huawei.com>
W: http://www.hisilicon.com
@@ -5599,10 +5731,9 @@ F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
HOST AP DRIVER
M: Jouni Malinen <j@w1.fi>
-L: hostap@shmoo.com (subscribers-only)
L: linux-wireless@vger.kernel.org
-W: http://hostap.epitest.fi/
-S: Maintained
+W: http://w1.fi/hostap-driver.html
+S: Obsolete
F: drivers/net/wireless/intersil/hostap/
HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER
@@ -5639,7 +5770,7 @@ M: Sebastian Reichel <sre@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-hsi.git
S: Maintained
F: Documentation/ABI/testing/sysfs-bus-hsi
-F: Documentation/hsi.txt
+F: Documentation/device-drivers/serial-interfaces.rst
F: drivers/hsi/
F: include/linux/hsi/
F: include/uapi/linux/hsi/
@@ -5693,6 +5824,8 @@ S: Maintained
F: Documentation/i2c/i2c-topology
F: Documentation/i2c/muxes/
F: Documentation/devicetree/bindings/i2c/i2c-mux*
+F: Documentation/devicetree/bindings/i2c/i2c-arb*
+F: Documentation/devicetree/bindings/i2c/i2c-gate*
F: drivers/i2c/i2c-mux.c
F: drivers/i2c/muxes/
F: include/linux/i2c-mux.h
@@ -6110,6 +6243,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
S: Supported
F: drivers/idle/intel_idle.c
+INTEL INTEGRATED SENSOR HUB DRIVER
+M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+M: Jiri Kosina <jikos@kernel.org>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/hid/intel-ish-hid/
+
INTEL PSTATE DRIVER
M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
M: Len Brown <lenb@kernel.org>
@@ -6620,7 +6760,7 @@ L: kasan-dev@googlegroups.com
S: Maintained
F: arch/*/include/asm/kasan.h
F: arch/*/mm/kasan_init*
-F: Documentation/kasan.txt
+F: Documentation/dev-tools/kasan.rst
F: include/linux/kasan*.h
F: lib/test_kasan.c
F: mm/kasan/
@@ -6836,7 +6976,7 @@ KMEMCHECK
M: Vegard Nossum <vegardno@ifi.uio.no>
M: Pekka Enberg <penberg@kernel.org>
S: Maintained
-F: Documentation/kmemcheck.txt
+F: Documentation/dev-tools/kmemcheck.rst
F: arch/x86/include/asm/kmemcheck.h
F: arch/x86/mm/kmemcheck/
F: include/linux/kmemcheck.h
@@ -6845,7 +6985,7 @@ F: mm/kmemcheck.c
KMEMLEAK
M: Catalin Marinas <catalin.marinas@arm.com>
S: Maintained
-F: Documentation/kmemleak.txt
+F: Documentation/dev-tools/kmemleak.rst
F: include/linux/kmemleak.h
F: mm/kmemleak.c
F: mm/kmemleak-test.c
@@ -7458,9 +7598,8 @@ F: Documentation/hwmon/max20751
F: drivers/hwmon/max20751.c
MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
-M: "Hans J. Koch" <hjk@hansjkoch.de>
L: linux-hwmon@vger.kernel.org
-S: Maintained
+S: Orphan
F: Documentation/hwmon/max6650
F: drivers/hwmon/max6650.c
@@ -7485,8 +7624,8 @@ M: Krzysztof Kozlowski <krzk@kernel.org>
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-pm@vger.kernel.org
S: Supported
-F: drivers/power/max14577_charger.c
-F: drivers/power/max77693_charger.c
+F: drivers/power/supply/max14577_charger.c
+F: drivers/power/supply/max77693_charger.c
MAXIM MAX77802 MULTIFUNCTION PMIC DEVICE DRIVERS
M: Javier Martinez Canillas <javier@osg.samsung.com>
@@ -7531,6 +7670,12 @@ L: linux-iio@vger.kernel.org
S: Maintained
F: drivers/iio/potentiometer/mcp4531.c
+MEASUREMENT COMPUTING CIO-DAC IIO DRIVER
+M: William Breathitt Gray <vilhelm.gray@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: drivers/iio/dac/cio-dac.c
+
MEDIA DRIVERS FOR RENESAS - FCP
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
@@ -7683,6 +7828,13 @@ W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlxsw/
+MELLANOX MLXCPLD LED DRIVER
+M: Vadim Pasternak <vadimp@mellanox.com>
+L: linux-leds@vger.kernel.org
+S: Supported
+F: drivers/leds/leds-mlxcpld.c
+F: Documentation/leds/leds-mlxcpld.txt
+
MELLANOX PLATFORM DRIVER
M: Vadim Pasternak <vadimp@mellanox.com>
L: platform-driver-x86@vger.kernel.org
@@ -7774,6 +7926,12 @@ T: git git://git.monstr.eu/linux-2.6-microblaze.git
S: Supported
F: arch/microblaze/
+MICROCHIP / ATMEL AT91 / AT32 SERIAL DRIVER
+M: Richard Genoud <richard.genoud@gmail.com>
+S: Maintained
+F: drivers/tty/serial/atmel_serial.c
+F: include/linux/atmel_serial.h
+
MICROSOFT SURFACE PRO 3 BUTTON DRIVER
M: Chen Yu <yu.c.chen@intel.com>
L: platform-driver-x86@vger.kernel.org
@@ -7822,6 +7980,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
S: Supported
F: drivers/net/ethernet/mellanox/mlx4/
F: include/linux/mlx4/
+F: include/uapi/rdma/mlx4-abi.h
MELLANOX MLX4 IB driver
M: Yishai Hadas <yishaih@mellanox.com>
@@ -7842,6 +8001,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
S: Supported
F: drivers/net/ethernet/mellanox/mlx5/core/
F: include/linux/mlx5/
+F: include/uapi/rdma/mlx5-abi.h
MELLANOX MLX5 IB driver
M: Matan Barak <matanb@mellanox.com>
@@ -7860,6 +8020,18 @@ W: http://www.melexis.com
S: Supported
F: drivers/iio/temperature/mlx90614.c
+MICROSEMI SMART ARRAY SMARTPQI DRIVER (smartpqi)
+M: Don Brace <don.brace@microsemi.com>
+L: esc.storagedev@microsemi.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/smartpqi/smartpqi*.[ch]
+F: drivers/scsi/smartpqi/Kconfig
+F: drivers/scsi/smartpqi/Makefile
+F: include/linux/cciss*.h
+F: include/uapi/linux/cciss*.h
+F: Documentation/scsi/smartpqi.txt
+
MN88472 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -7985,6 +8157,7 @@ MULTIFUNCTION DEVICES (MFD)
M: Lee Jones <lee.jones@linaro.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git
S: Supported
+F: Documentation/devicetree/bindings/mfd/
F: drivers/mfd/
F: include/linux/mfd/
@@ -8071,20 +8244,16 @@ M: Michael Schmitz <schmitzmic@gmail.com>
L: linux-scsi@vger.kernel.org
S: Maintained
F: Documentation/scsi/g_NCR5380.txt
-F: Documentation/scsi/dtc3x80.txt
F: drivers/scsi/NCR5380.*
F: drivers/scsi/arm/cumana_1.c
F: drivers/scsi/arm/oak.c
F: drivers/scsi/atari_scsi.*
F: drivers/scsi/dmx3191d.c
-F: drivers/scsi/dtc.*
F: drivers/scsi/g_NCR5380.*
F: drivers/scsi/g_NCR5380_mmio.c
F: drivers/scsi/mac_scsi.*
-F: drivers/scsi/pas16.*
F: drivers/scsi/sun3_scsi.*
F: drivers/scsi/sun3_scsi_vme.c
-F: drivers/scsi/t128.*
NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
@@ -8105,6 +8274,7 @@ L: linux-rdma@vger.kernel.org
W: http://www.intel.com/Products/Server/Adapters/Server-Cluster/Server-Cluster-overview.htm
S: Supported
F: drivers/infiniband/hw/nes/
+F: include/uapi/rdma/nes-abi.h
NETEM NETWORK EMULATOR
M: Stephen Hemminger <stephen@networkplumber.org>
@@ -8373,11 +8543,11 @@ R: Pali Rohár <pali.rohar@gmail.com>
F: include/linux/power/bq2415x_charger.h
F: include/linux/power/bq27xxx_battery.h
F: include/linux/power/isp1704_charger.h
-F: drivers/power/bq2415x_charger.c
-F: drivers/power/bq27xxx_battery.c
-F: drivers/power/bq27xxx_battery_i2c.c
-F: drivers/power/isp1704_charger.c
-F: drivers/power/rx51_battery.c
+F: drivers/power/supply/bq2415x_charger.c
+F: drivers/power/supply/bq27xxx_battery.c
+F: drivers/power/supply/bq27xxx_battery_i2c.c
+F: drivers/power/supply/isp1704_charger.c
+F: drivers/power/supply/rx51_battery.c
NTB DRIVER CORE
M: Jon Mason <jdmason@kudzu.us>
@@ -9134,6 +9304,15 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
F: drivers/pci/host/pcie-hisi.c
+PCIE DRIVER FOR ROCKCHIP
+M: Shawn Lin <shawn.lin@rock-chips.com>
+M: Wenrui Li <wenrui.li@rock-chips.com>
+L: linux-pci@vger.kernel.org
+L: linux-rockchip@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/rockchip-pcie.txt
+F: drivers/pci/host/pcie-rockchip.c
+
PCIE DRIVER FOR QUALCOMM MSM
M: Stanimir Varbanov <svarbanov@mm-sol.com>
L: linux-pci@vger.kernel.org
@@ -9287,6 +9466,8 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
F: drivers/pinctrl/samsung/
+F: include/dt-bindings/pinctrl/samsung.h
+F: Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
PIN CONTROLLER - SINGLE
M: Tony Lindgren <tony@atomide.com>
@@ -9377,16 +9558,12 @@ F: drivers/powercap/
POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS
M: Sebastian Reichel <sre@kernel.org>
-M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
-M: David Woodhouse <dwmw2@infradead.org>
L: linux-pm@vger.kernel.org
-T: git git://git.infradead.org/battery-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply.git
S: Maintained
-F: Documentation/devicetree/bindings/power/
-F: Documentation/devicetree/bindings/power_supply/
+F: Documentation/devicetree/bindings/power/supply/
F: include/linux/power_supply.h
-F: drivers/power/
-X: drivers/power/avs/
+F: drivers/power/supply/
POWER STATE COORDINATION INTERFACE (PSCI)
M: Mark Rutland <mark.rutland@arm.com>
@@ -9722,6 +9899,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
S: Supported
F: drivers/net/wireless/ath/ath10k/
+QUALCOMM EMAC GIGABIT ETHERNET DRIVER
+M: Timur Tabi <timur@codeaurora.org>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/qualcomm/emac/
+
QUALCOMM HEXAGON ARCHITECTURE
M: Richard Kuo <rkuo@codeaurora.org>
L: linux-hexagon@vger.kernel.org
@@ -9942,6 +10125,12 @@ F: drivers/rpmsg/
F: Documentation/rpmsg.txt
F: include/linux/rpmsg.h
+RENESAS CLOCK DRIVERS
+M: Geert Uytterhoeven <geert+renesas@glider.be>
+L: linux-renesas-soc@vger.kernel.org
+S: Supported
+F: drivers/clk/renesas/
+
RENESAS ETHERNET DRIVERS
R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
L: netdev@vger.kernel.org
@@ -9977,6 +10166,7 @@ F: net/rfkill/
RHASHTABLE
M: Thomas Graf <tgraf@suug.ch>
+M: Herbert Xu <herbert@gondor.apana.org.au>
L: netdev@vger.kernel.org
S: Maintained
F: lib/rhashtable.c
@@ -10120,8 +10310,8 @@ S: Supported
F: drivers/s390/cio/
S390 DASD DRIVER
-M: Stefan Weinhuber <wein@de.ibm.com>
-M: Stefan Haberland <stefan.haberland@de.ibm.com>
+M: Stefan Haberland <sth@linux.vnet.ibm.com>
+M: Jan Hoeppner <hoeppner@linux.vnet.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@@ -10285,9 +10475,12 @@ F: drivers/nfc/s3fwrn5
SAMSUNG SOC CLOCK DRIVERS
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
M: Tomasz Figa <tomasz.figa@gmail.com>
+M: Chanwoo Choi <cw00.choi@samsung.com>
S: Supported
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
F: drivers/clk/samsung/
+F: include/dt-bindings/clock/exynos*.h
+F: Documentation/devicetree/bindings/clock/exynos*.txt
SAMSUNG SPI DRIVERS
M: Kukjin Kim <kgene@kernel.org>
@@ -10385,8 +10578,8 @@ F: drivers/thunderbolt/
TI BQ27XXX POWER SUPPLY DRIVER
R: Andrew F. Davis <afd@ti.com>
F: include/linux/power/bq27xxx_battery.h
-F: drivers/power/bq27xxx_battery.c
-F: drivers/power/bq27xxx_battery_i2c.c
+F: drivers/power/supply/bq27xxx_battery.c
+F: drivers/power/supply/bq27xxx_battery_i2c.c
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
M: John Stultz <john.stultz@linaro.org>
@@ -10614,12 +10807,12 @@ S: Maintained
F: drivers/misc/phantom.c
F: include/uapi/linux/phantom.h
-SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
-M: Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
-M: Ketan Mukadam <ketan.mukadam@avagotech.com>
-M: John Soni Jose <sony.john@avagotech.com>
+Emulex 10Gbps iSCSI - OneConnect DRIVER
+M: Subbu Seetharaman <subbu.seetharaman@broadcom.com>
+M: Ketan Mukadam <ketan.mukadam@broadcom.com>
+M: Jitendra Bhivare <jitendra.bhivare@broadcom.com>
L: linux-scsi@vger.kernel.org
-W: http://www.avagotech.com
+W: http://www.broadcom.com
S: Supported
F: drivers/scsi/be2iscsi/
@@ -10641,6 +10834,7 @@ L: linux-rdma@vger.kernel.org
W: http://www.emulex.com
S: Supported
F: drivers/infiniband/hw/ocrdma/
+F: include/uapi/rdma/ocrdma-abi.h
SFC NETWORK DRIVER
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
@@ -11146,6 +11340,7 @@ F: Documentation/spi/
F: drivers/spi/
F: include/linux/spi/
F: include/uapi/linux/spi/
+F: tools/spi/
SPIDERNET NETWORK DRIVER for CELL
M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
@@ -11216,6 +11411,7 @@ F: drivers/staging/media/lirc/
STAGING - LUSTRE PARALLEL FILESYSTEM
M: Oleg Drokin <oleg.drokin@intel.com>
M: Andreas Dilger <andreas.dilger@intel.com>
+M: James Simmons <jsimmons@infradead.org>
L: lustre-devel@lists.lustre.org (moderated for non-subscribers)
W: http://wiki.lustre.org/
S: Maintained
@@ -11242,13 +11438,6 @@ M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
S: Odd Fixes
F: drivers/staging/rtl8712/
-STAGING - REALTEK RTL8723U WIRELESS DRIVER
-M: Larry Finger <Larry.Finger@lwfinger.net>
-M: Jes Sorensen <Jes.Sorensen@redhat.com>
-L: linux-wireless@vger.kernel.org
-S: Maintained
-F: drivers/staging/rtl8723au/
-
STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
M: Teddy Wang <teddy.wang@siliconmotion.com>
@@ -11388,6 +11577,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git
S: Supported
F: drivers/mfd/syscon.c
+SYSTEM RESET/SHUTDOWN DRIVERS
+M: Sebastian Reichel <sre@kernel.org>
+L: linux-pm@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply.git
+S: Maintained
+F: Documentation/devicetree/bindings/power/reset/
+F: drivers/power/reset/
+
SYSV FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>
S: Maintained
@@ -11736,7 +11933,7 @@ F: include/linux/platform_data/lp855x.h
TI LP8727 CHARGER DRIVER
M: Milo Kim <milo.kim@ti.com>
S: Maintained
-F: drivers/power/lp8727_charger.c
+F: drivers/power/supply/lp8727_charger.c
F: include/linux/platform_data/lp8727.h
TI LP8788 MFD DRIVER
@@ -11745,7 +11942,7 @@ S: Maintained
F: drivers/iio/adc/lp8788_adc.c
F: drivers/leds/leds-lp8788.c
F: drivers/mfd/lp8788*.c
-F: drivers/power/lp8788-charger.c
+F: drivers/power/supply/lp8788-charger.c
F: drivers/regulator/lp8788-*.c
F: include/linux/mfd/lp8788*.h
@@ -12014,12 +12211,6 @@ S: Maintained
F: drivers/tc/
F: include/linux/tc.h
-U14-34F SCSI DRIVER
-M: Dario Ballabio <ballabio_dario@emc.com>
-L: linux-scsi@vger.kernel.org
-S: Maintained
-F: drivers/scsi/u14-34f.c
-
UBI FILE SYSTEM (UBIFS)
M: Richard Weinberger <richard@nod.at>
M: Artem Bityutskiy <dedekind1@gmail.com>
@@ -12215,7 +12406,7 @@ S: Maintained
F: drivers/net/usb/lan78xx.*
USB MASS STORAGE DRIVER
-M: Matthew Dharm <mdharm-usb@one-eyed-alien.net>
+M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
L: usb-storage@lists.one-eyed-alien.net
S: Maintained
@@ -12299,6 +12490,7 @@ F: drivers/net/usb/rtl8150.c
USB SERIAL SUBSYSTEM
M: Johan Hovold <johan@kernel.org>
L: linux-usb@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/johan/usb-serial.git
S: Maintained
F: Documentation/usb/usb-serial.txt
F: drivers/usb/serial/
@@ -12312,6 +12504,7 @@ F: drivers/net/usb/smsc75xx.*
USB SMSC95XX ETHERNET DRIVER
M: Steve Glendinning <steve.glendinning@shawell.net>
+M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/usb/smsc95xx.*
@@ -12418,7 +12611,6 @@ F: fs/hostfs/
F: fs/hppfs/
USERSPACE I/O (UIO)
-M: "Hans J. Koch" <hjk@hansjkoch.de>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
@@ -12746,12 +12938,6 @@ F: drivers/watchdog/
F: include/linux/watchdog.h
F: include/uapi/linux/watchdog.h
-WD7000 SCSI DRIVER
-M: Miroslav Zagorac <zaga@fly.cc.fer.hr>
-L: linux-scsi@vger.kernel.org
-S: Maintained
-F: drivers/scsi/wd7000.c
-
WIIMOTE HID DRIVER
M: David Herrmann <dh.herrmann@googlemail.com>
L: linux-input@vger.kernel.org
@@ -12821,7 +13007,7 @@ F: drivers/input/touchscreen/wm97*.c
F: drivers/mfd/arizona*
F: drivers/mfd/wm*.c
F: drivers/mfd/cs47l24*
-F: drivers/power/wm83*.c
+F: drivers/power/supply/wm83*.c
F: drivers/rtc/rtc-wm83*.c
F: drivers/regulator/wm8*.c
F: drivers/video/backlight/wm83*_bl.c
@@ -12973,11 +13159,10 @@ F: arch/x86/xen/*swiotlb*
F: drivers/xen/*swiotlb*
XFS FILESYSTEM
-P: Silicon Graphics Inc
M: Dave Chinner <david@fromorbit.com>
-M: xfs@oss.sgi.com
-L: xfs@oss.sgi.com
-W: http://oss.sgi.com/projects/xfs
+M: linux-xfs@vger.kernel.org
+L: linux-xfs@vger.kernel.org
+W: http://xfs.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs.git
S: Supported
F: Documentation/filesystems/xfs.txt
diff --git a/Makefile b/Makefile
index 80b8671d5c46..addb235b537c 100644
--- a/Makefile
+++ b/Makefile
@@ -1425,7 +1425,7 @@ $(help-board-dirs): help-%:
# Documentation targets
# ---------------------------------------------------------------------------
-DOC_TARGETS := xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs epubdocs cleandocs
+DOC_TARGETS := xmldocs sgmldocs psdocs latexdocs pdfdocs htmldocs mandocs installmandocs epubdocs cleandocs
PHONY += $(DOC_TARGETS)
$(DOC_TARGETS): scripts_basic FORCE
$(Q)$(MAKE) $(build)=scripts build_docproc build_check-lc_ctype
diff --git a/README b/README
index e8c8a6dc1c2b..09f34f78f2bb 100644
--- a/README
+++ b/README
@@ -229,10 +229,6 @@ CONFIGURING the kernel:
under some circumstances lead to problems: probing for a
nonexistent controller card may confuse your other controllers
- - Compiling the kernel with "Processor type" set higher than 386
- will result in a kernel that does NOT work on a 386. The
- kernel will detect this on bootup, and give up.
-
- A kernel with math-emulation compiled in will still use the
coprocessor if one is present: the math emulation will just
never get used in that case. The kernel will be slightly larger,
@@ -289,7 +285,7 @@ COMPILING the kernel:
LOCALVERSION can be set in the "General Setup" menu.
- In order to boot your new kernel, you'll need to copy the kernel
- image (e.g. .../linux/arch/i386/boot/bzImage after compilation)
+ image (e.g. .../linux/arch/x86/boot/bzImage after compilation)
to the place where your regular bootable kernel is found.
- Booting a kernel directly from a floppy without the assistance of a
@@ -391,7 +387,7 @@ IF SOMETHING GOES WRONG:
- Alternatively, you can use gdb on a running kernel. (read-only; i.e. you
cannot change values or set break points.) To do this, first compile the
- kernel with -g; edit arch/i386/Makefile appropriately, then do a "make
+ kernel with -g; edit arch/x86/Makefile appropriately, then do a "make
clean". You'll also need to enable CONFIG_PROC_FS (via "make config").
After you've rebooted with the new kernel, do "gdb vmlinux /proc/kcore".
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 7f312d80b43b..0e49d39ea74a 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -15,7 +15,6 @@ config ALPHA
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_HAVE_NMI_SAFE_CMPXCHG
- select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select AUDIT_ARCH
select GENERIC_CLOCKEVENTS
select GENERIC_SMP_IDLE_THREAD
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 647b84c15382..cebecfb76fbf 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -22,6 +22,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 0b10ef2a4372..c332604606dd 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += msgbuf.h
+generic-y += msi.h
generic-y += param.h
generic-y += parport.h
generic-y += pci.h
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 36611072305f..f35ed578e007 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -89,6 +89,7 @@ SECTIONS
_text = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 3cd9042fbb62..b5d529fdffab 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2,7 +2,6 @@ config ARM
bool
default y
select ARCH_CLOCKSOURCE_DATA
- select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
@@ -279,10 +278,9 @@ config PHYS_OFFSET
ARCH_INTEGRATOR || \
ARCH_IOP13XX || \
ARCH_KS8695 || \
- (ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET)
+ ARCH_REALVIEW
default 0x10000000 if ARCH_OMAP1 || ARCH_RPC
default 0x20000000 if ARCH_S5PV210
- default 0x70000000 if REALVIEW_HIGH_PHYS_OFFSET
default 0xc0000000 if ARCH_SA1100
help
Please provide the physical address corresponding to the
@@ -338,6 +336,7 @@ config ARCH_MULTIPLATFORM
select GENERIC_CLOCKEVENTS
select MIGHT_HAVE_PCI
select MULTI_IRQ_HANDLER
+ select PCI_DOMAINS if PCI
select SPARSE_IRQ
select USE_OF
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index a9693b6987a6..d83f7c369e51 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -186,10 +186,11 @@ choice
config DEBUG_BRCMSTB_UART
bool "Use BRCMSTB UART for low-level debug"
depends on ARCH_BRCMSTB
- select DEBUG_UART_8250
help
Say Y here if you want the debug print routines to direct
- their output to the first serial port on these devices.
+ their output to the first serial port on these devices. The
+ UART physical and virtual address is automatically provided
+ based on the chip identification register value.
If you have a Broadcom STB chip and would like early print
messages to appear over the UART, select this option.
@@ -861,12 +862,12 @@ choice
via SCIF2 on Renesas R-Car H1 (R8A7779).
config DEBUG_RCAR_GEN2_SCIF0
- bool "Kernel low-level debugging messages via SCIF0 on R8A7790/R8A7791/R8A7793"
- depends on ARCH_R8A7790 || ARCH_R8A7791 || ARCH_R8A7793
+ bool "Kernel low-level debugging messages via SCIF0 on R8A7790/R8A7791/R8A7792/R8A7793"
+ depends on ARCH_R8A7790 || ARCH_R8A7791 || ARCH_R8A7792 || ARCH_R8A7793
help
Say Y here if you want kernel low-level debugging support
- via SCIF0 on Renesas R-Car H2 (R8A7790), M2-W (R8A7791), or
- M2-N (R8A7793).
+ via SCIF0 on Renesas R-Car H2 (R8A7790), M2-W (R8A7791), V2H
+ (R8A7792), or M2-N (R8A7793).
config DEBUG_RCAR_GEN2_SCIF2
bool "Kernel low-level debugging messages via SCIF2 on R8A7794"
@@ -1430,6 +1431,7 @@ config DEBUG_LL_INCLUDE
default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
+ default "debug/brcmstb.S" if DEBUG_BRCMSTB_UART
default "mach/debug-macro.S"
# Compatibility options for PL01x
@@ -1520,7 +1522,6 @@ config DEBUG_UART_PHYS
default 0xe6e60000 if DEBUG_RCAR_GEN2_SCIF0
default 0xe8008000 if DEBUG_R7S72100_SCIF2
default 0xf0000be0 if ARCH_EBSA110
- default 0xf040ab00 if DEBUG_BRCMSTB_UART
default 0xf1012000 if DEBUG_MVEBU_UART0_ALTERNATE
default 0xf1012100 if DEBUG_MVEBU_UART1_ALTERNATE
default 0xf7fc9000 if DEBUG_BERLIN_UART
@@ -1604,7 +1605,6 @@ config DEBUG_UART_VIRT
default 0xfb009000 if DEBUG_REALVIEW_STD_PORT
default 0xfb00c000 if DEBUG_AT91_SAMA5D4_USART3
default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
- default 0xfc40ab00 if DEBUG_BRCMSTB_UART
default 0xfc705000 if DEBUG_ZTE_ZX
default 0xfcfe8600 if DEBUG_BCM63XX_UART
default 0xfd000000 if DEBUG_SPEAR3XX || DEBUG_SPEAR13XX
@@ -1677,8 +1677,7 @@ config DEBUG_UART_8250_WORD
DEBUG_ALPINE_UART0 || \
DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
DEBUG_DAVINCI_DA8XX_UART2 || \
- DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2 || \
- DEBUG_BRCMSTB_UART
+ DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2
config DEBUG_UART_8250_PALMCHIP
bool "8250 UART is Palmchip BK-310x"
@@ -1697,7 +1696,8 @@ config DEBUG_UNCOMPRESS
bool
depends on ARCH_MULTIPLATFORM || PLAT_SAMSUNG || ARM_SINGLE_ARMV7M
default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
- (!DEBUG_TEGRA_UART || !ZBOOT_ROM)
+ (!DEBUG_TEGRA_UART || !ZBOOT_ROM) && \
+ !DEBUG_BRCMSTB_UART
help
This option influences the normal decompressor output for
multiplatform kernels. Normally, multiplatform kernels disable
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 61f6ccc19cfa..6be9ee148b78 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -23,7 +23,6 @@ ifeq ($(CONFIG_ARM_MODULE_PLTS),y)
LDFLAGS_MODULE += -T $(srctree)/arch/arm/kernel/module.lds
endif
-OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index bdc1d5af03d2..50f8d1be7fcb 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -11,6 +11,8 @@
# Copyright (C) 1995-2002 Russell King
#
+OBJCOPYFLAGS :=-O binary -R .comment -S
+
ifneq ($(MACHINE),)
include $(MACHINE)/Makefile.boot
endif
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index faacd52370d2..befcd2619902 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -69,7 +69,8 @@ dtb-$(CONFIG_ARCH_BCM2835) += \
bcm2835-rpi-b-rev2.dtb \
bcm2835-rpi-b-plus.dtb \
bcm2835-rpi-a-plus.dtb \
- bcm2836-rpi-2-b.dtb
+ bcm2836-rpi-2-b.dtb \
+ bcm2835-rpi-zero.dtb
dtb-$(CONFIG_ARCH_BCM_5301X) += \
bcm4708-asus-rt-ac56u.dtb \
bcm4708-asus-rt-ac68u.dtb \
@@ -102,8 +103,13 @@ dtb-$(CONFIG_ARCH_BCM_MOBILE) += \
bcm21664-garnet.dtb \
bcm23550-sparrow.dtb
dtb-$(CONFIG_ARCH_BCM_NSP) += \
+ bcm958522er.dtb \
+ bcm958525er.dtb \
bcm958525xmc.dtb \
+ bcm958622hr.dtb \
+ bcm958623hr.dtb \
bcm958625hr.dtb \
+ bcm988312hr.dtb \
bcm958625k.dtb
dtb-$(CONFIG_ARCH_BERLIN) += \
berlin2-sony-nsz-gs7.dtb \
@@ -114,6 +120,7 @@ dtb-$(CONFIG_ARCH_BRCMSTB) += \
dtb-$(CONFIG_ARCH_CLPS711X) += \
ep7211-edb7211.dtb
dtb-$(CONFIG_ARCH_DAVINCI) += \
+ da850-lcdk.dtb \
da850-enbw-cmc.dtb \
da850-evm.dtb
dtb-$(CONFIG_ARCH_DIGICOLOR) += \
@@ -315,6 +322,7 @@ dtb-$(CONFIG_SOC_IMX53) += \
imx53-smd.dtb \
imx53-tx53-x03x.dtb \
imx53-tx53-x13x.dtb \
+ imx53-usbarmory.dtb \
imx53-voipac-bsb.dtb
dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-apf6dev.dtb \
@@ -330,6 +338,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-gw54xx.dtb \
imx6dl-gw551x.dtb \
imx6dl-gw552x.dtb \
+ imx6dl-gw553x.dtb \
imx6dl-hummingboard.dtb \
imx6dl-nit6xlite.dtb \
imx6dl-nitrogen6x.dtb \
@@ -339,6 +348,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-sabreauto.dtb \
imx6dl-sabrelite.dtb \
imx6dl-sabresd.dtb \
+ imx6dl-ts4900.dtb \
imx6dl-tx6dl-comtft.dtb \
imx6dl-tx6s-8034.dtb \
imx6dl-tx6s-8035.dtb \
@@ -368,6 +378,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-gw54xx.dtb \
imx6q-gw551x.dtb \
imx6q-gw552x.dtb \
+ imx6q-gw553x.dtb \
imx6q-h100.dtb \
imx6q-hummingboard.dtb \
imx6q-icore-rqs.dtb \
@@ -382,6 +393,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-sabresd.dtb \
imx6q-sbc6x.dtb \
imx6q-tbs2910.dtb \
+ imx6q-ts4900.dtb \
imx6q-tx6q-1010.dtb \
imx6q-tx6q-1010-comtft.dtb \
imx6q-tx6q-1020.dtb \
@@ -407,6 +419,7 @@ dtb-$(CONFIG_SOC_IMX6SX) += \
imx6sx-sdb.dtb
dtb-$(CONFIG_SOC_IMX6UL) += \
imx6ul-14x14-evk.dtb \
+ imx6ul-geam-kit.dtb \
imx6ul-pico-hobbit.dtb \
imx6ul-tx6ul-0010.dtb \
imx6ul-tx6ul-0011.dtb \
@@ -417,7 +430,8 @@ dtb-$(CONFIG_SOC_IMX7D) += \
imx7d-nitrogen7.dtb \
imx7d-sbc-imx7.dtb \
imx7d-sdb.dtb \
- imx7s-colibri-eval-v3.dtb
+ imx7s-colibri-eval-v3.dtb \
+ imx7s-warp.dtb
dtb-$(CONFIG_SOC_LS1021A) += \
ls1021a-qds.dtb \
ls1021a-twr.dtb
@@ -570,6 +584,7 @@ dtb-$(CONFIG_SOC_OMAP5) += \
omap5-uevm.dtb
dtb-$(CONFIG_SOC_DRA7XX) += \
am57xx-beagle-x15.dtb \
+ am57xx-beagle-x15-revb1.dtb \
am57xx-cl-som-am57x.dtb \
am57xx-sbc-am57x.dtb \
am572x-idk.dtb \
@@ -584,6 +599,7 @@ dtb-$(CONFIG_ARCH_ORION5X) += \
orion5x-linkstation-lswtgl.dtb \
orion5x-lswsgl.dtb \
orion5x-maxtor-shared-storage-2.dtb \
+ orion5x-netgear-wnr854t.dtb \
orion5x-rd88f5182-nas.dtb
dtb-$(CONFIG_ARCH_PRIMA2) += \
prima2-evb.dtb
@@ -603,14 +619,19 @@ dtb-$(CONFIG_ARCH_QCOM) += \
qcom-ipq8064-ap148.dtb \
qcom-msm8660-surf.dtb \
qcom-msm8960-cdp.dtb \
+ qcom-msm8974-lge-nexus5-hammerhead.dtb \
qcom-msm8974-sony-xperia-honami.dtb
dtb-$(CONFIG_ARCH_REALVIEW) += \
arm-realview-pb1176.dtb \
arm-realview-pb11mp.dtb \
arm-realview-eb.dtb \
+ arm-realview-eb-bbrevd.dtb \
arm-realview-eb-11mp.dtb \
- arm-realview-eb-11mp-revb.dtb \
+ arm-realview-eb-11mp-bbrevd.dtb \
+ arm-realview-eb-11mp-ctrevb.dtb \
+ arm-realview-eb-11mp-bbrevd-ctrevb.dtb \
arm-realview-eb-a9mp.dtb \
+ arm-realview-eb-a9mp-bbrevd.dtb \
arm-realview-pba8.dtb \
arm-realview-pbx-a9.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += \
@@ -624,8 +645,10 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += \
rk3229-evb.dtb \
rk3288-evb-act8846.dtb \
rk3288-evb-rk808.dtb \
+ rk3288-fennec.dtb \
rk3288-firefly-beta.dtb \
rk3288-firefly.dtb \
+ rk3288-firefly-reload.dtb \
rk3288-miqi.dtb \
rk3288-popmetal.dtb \
rk3288-r89.dtb \
@@ -651,6 +674,7 @@ dtb-$(CONFIG_ARCH_S5PV210) += \
dtb-$(CONFIG_ARCH_SHMOBILE_MULTI) += \
emev2-kzm9d.dtb \
r7s72100-genmai.dtb \
+ r7s72100-rskrza1.dtb \
r8a73a4-ape6evm.dtb \
r8a7740-armadillo800eva.dtb \
r8a7778-bockw.dtb \
@@ -659,6 +683,7 @@ dtb-$(CONFIG_ARCH_SHMOBILE_MULTI) += \
r8a7791-koelsch.dtb \
r8a7791-porter.dtb \
r8a7792-blanche.dtb \
+ r8a7792-wheat.dtb \
r8a7793-gose.dtb \
r8a7794-alt.dtb \
r8a7794-silk.dtb \
@@ -686,6 +711,7 @@ dtb-$(CONFIG_ARCH_SPEAR6XX) += \
dtb-$(CONFIG_ARCH_STI) += \
stih407-b2120.dtb \
stih410-b2120.dtb \
+ stih410-b2260.dtb \
stih415-b2000.dtb \
stih415-b2020.dtb \
stih416-b2000.dtb \
@@ -719,6 +745,7 @@ dtb-$(CONFIG_MACH_SUN4I) += \
sun4i-a10-pcduino2.dtb \
sun4i-a10-pov-protab2-ips9.dtb
dtb-$(CONFIG_MACH_SUN5I) += \
+ ntc-gr8-evb.dtb \
sun5i-a10s-auxtek-t003.dtb \
sun5i-a10s-auxtek-t004.dtb \
sun5i-a10s-mk802.dtb \
@@ -727,6 +754,7 @@ dtb-$(CONFIG_MACH_SUN5I) += \
sun5i-a10s-wobo-i5.dtb \
sun5i-a13-difrnce-dit4350.dtb \
sun5i-a13-empire-electronix-d709.dtb \
+ sun5i-a13-empire-electronix-m712.dtb \
sun5i-a13-hsg-h702.dtb \
sun5i-a13-inet-98v-rev2.dtb \
sun5i-a13-olinuxino.dtb \
@@ -743,6 +771,7 @@ dtb-$(CONFIG_MACH_SUN6I) += \
sun6i-a31-mele-a1000g-quad.dtb \
sun6i-a31s-colorfly-e708-q1.dtb \
sun6i-a31s-cs908.dtb \
+ sun6i-a31s-inet-q972.dtb \
sun6i-a31s-primo81.dtb \
sun6i-a31s-sina31s.dtb \
sun6i-a31s-sinovoip-bpi-m2.dtb \
@@ -782,16 +811,22 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-a23-q8-tablet.dtb \
sun8i-a33-et-q8-v1.6.dtb \
sun8i-a33-ga10h-v1.1.dtb \
+ sun8i-a33-inet-d978-rev2.dtb \
sun8i-a33-ippo-q8h-v1.2.dtb \
+ sun8i-a33-olinuxino.dtb \
sun8i-a33-q8-tablet.dtb \
sun8i-a33-sinlinx-sina33.dtb \
sun8i-a83t-allwinner-h8homlet-v2.dtb \
sun8i-a83t-cubietruck-plus.dtb \
sun8i-h3-bananapi-m2-plus.dtb \
+ sun8i-h3-nanopi-neo.dtb \
sun8i-h3-orangepi-2.dtb \
+ sun8i-h3-orangepi-lite.dtb \
sun8i-h3-orangepi-one.dtb \
sun8i-h3-orangepi-pc.dtb \
+ sun8i-h3-orangepi-pc-plus.dtb \
sun8i-h3-orangepi-plus.dtb \
+ sun8i-h3-orangepi-plus2e.dtb \
sun8i-r16-parrot.dtb
dtb-$(CONFIG_MACH_SUN9I) += \
sun9i-a80-optimus.dtb \
@@ -836,15 +871,15 @@ dtb-$(CONFIG_ARCH_U8500) += \
ste-ccu8540.dtb \
ste-ccu9540.dtb
dtb-$(CONFIG_ARCH_UNIPHIER) += \
- uniphier-ph1-ld4-ref.dtb \
- uniphier-ph1-ld6b-ref.dtb \
- uniphier-ph1-pro4-ace.dtb \
- uniphier-ph1-pro4-ref.dtb \
- uniphier-ph1-pro4-sanji.dtb \
- uniphier-ph1-sld3-ref.dtb \
- uniphier-ph1-sld8-ref.dtb \
- uniphier-proxstream2-gentil.dtb \
- uniphier-proxstream2-vodka.dtb
+ uniphier-ld4-ref.dtb \
+ uniphier-ld6b-ref.dtb \
+ uniphier-pro4-ace.dtb \
+ uniphier-pro4-ref.dtb \
+ uniphier-pro4-sanji.dtb \
+ uniphier-pxs2-gentil.dtb \
+ uniphier-pxs2-vodka.dtb \
+ uniphier-sld3-ref.dtb \
+ uniphier-sld8-ref.dtb
dtb-$(CONFIG_ARCH_VERSATILE) += \
versatile-ab.dtb \
versatile-pb.dtb
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
index b689172632ef..dd45d172a892 100644
--- a/arch/arm/boot/dts/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -24,12 +24,12 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
- vbat: fixedregulator@0 {
+ vbat: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vbat";
regulator-min-microvolt = <5000000>;
@@ -37,7 +37,7 @@
regulator-boot-on;
};
- wl12xx_vmmc: fixedregulator@2 {
+ wl12xx_vmmc: fixedregulator2 {
pinctrl-names = "default";
pinctrl-0 = <&wl12xx_gpio>;
compatible = "regulator-fixed";
diff --git a/arch/arm/boot/dts/am335x-base0033.dts b/arch/arm/boot/dts/am335x-base0033.dts
index 58a05f7d0b7c..c2bee452dab8 100644
--- a/arch/arm/boot/dts/am335x-base0033.dts
+++ b/arch/arm/boot/dts/am335x-base0033.dts
@@ -29,13 +29,13 @@
compatible = "gpio-leds";
- led@0 {
+ led0 {
label = "base:red:user";
gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>; /* gpio1_21 */
default-state = "off";
};
- led@1 {
+ led1 {
label = "base:green:user";
gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; /* gpio2_0 */
default-state = "off";
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index e247c15e5176..007b5e5a51a9 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -13,7 +13,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
@@ -28,28 +28,28 @@
compatible = "gpio-leds";
- led@2 {
+ led2 {
label = "beaglebone:green:heartbeat";
gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
default-state = "off";
};
- led@3 {
+ led3 {
label = "beaglebone:green:mmc0";
gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "mmc0";
default-state = "off";
};
- led@4 {
+ led4 {
label = "beaglebone:green:usr2";
gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "cpu0";
default-state = "off";
};
- led@5 {
+ led5 {
label = "beaglebone:green:usr3";
gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "mmc1";
@@ -57,7 +57,7 @@
};
};
- vmmcsd_fixed: fixedregulator@0 {
+ vmmcsd_fixed: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index ca721670bd91..55c0e954b146 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -33,17 +33,6 @@
status = "okay";
};
-&cpu0_opp_table {
- /*
- * All PG 2.0 silicon may not support 1GHz but some of the early
- * BeagleBone Blacks have PG 2.0 silicon which is guaranteed
- * to support 1GHz OPP so enable it for PG 2.0 on this board.
- */
- oppnitro@1000000000 {
- opp-supported-hw = <0x06 0x0100>;
- };
-};
-
&am33xx_pinmux {
nxp_hdmi_bonelt_pins: nxp_hdmi_bonelt_pins {
pinctrl-single,pins = <
diff --git a/arch/arm/boot/dts/am335x-chilisom.dtsi b/arch/arm/boot/dts/am335x-chilisom.dtsi
index 1d647358f1c1..f9ee5859c154 100644
--- a/arch/arm/boot/dts/am335x-chilisom.dtsi
+++ b/arch/arm/boot/dts/am335x-chilisom.dtsi
@@ -19,7 +19,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
diff --git a/arch/arm/boot/dts/am335x-cm-t335.dts b/arch/arm/boot/dts/am335x-cm-t335.dts
index 817b1dec0683..947c81b7aaaf 100644
--- a/arch/arm/boot/dts/am335x-cm-t335.dts
+++ b/arch/arm/boot/dts/am335x-cm-t335.dts
@@ -17,7 +17,7 @@
model = "CompuLab CM-T335";
compatible = "compulab,cm-t335", "ti,am33xx";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x8000000>; /* 128 MB */
};
@@ -26,7 +26,7 @@
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&gpio_led_pins>;
- led@0 {
+ led0 {
label = "cm_t335:green";
gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; /* gpio2_0 */
linux,default-trigger = "heartbeat";
@@ -34,7 +34,7 @@
};
/* regulator for mmc */
- vmmc_fixed: fixedregulator@0 {
+ vmmc_fixed: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vmmc_fixed";
regulator-min-microvolt = <3300000>;
@@ -42,7 +42,7 @@
};
/* Regulator for WiFi */
- vwlan_fixed: fixedregulator@2 {
+ vwlan_fixed: fixedregulator2 {
compatible = "regulator-fixed";
regulator-name = "vwlan_fixed";
gpio = <&gpio0 20 GPIO_ACTIVE_HIGH>; /* gpio0_20 */
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 5d28712ad253..e82432c79f85 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -20,12 +20,12 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
- vbat: fixedregulator@0 {
+ vbat: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vbat";
regulator-min-microvolt = <5000000>;
@@ -33,13 +33,13 @@
regulator-boot-on;
};
- lis3_reg: fixedregulator@1 {
+ lis3_reg: fixedregulator1 {
compatible = "regulator-fixed";
regulator-name = "lis3_reg";
regulator-boot-on;
};
- wlan_en_reg: fixedregulator@2 {
+ wlan_en_reg: fixedregulator2 {
compatible = "regulator-fixed";
regulator-name = "wlan-en-regulator";
regulator-min-microvolt = <1800000>;
@@ -53,7 +53,7 @@
enable-active-high;
};
- matrix_keypad: matrix_keypad@0 {
+ matrix_keypad: matrix_keypad0 {
compatible = "gpio-matrix-keypad";
debounce-delay-ms = <5>;
col-scan-delay-us = <2>;
@@ -73,20 +73,20 @@
0x0201006c>; /* DOWN */
};
- gpio_keys: volume_keys@0 {
+ gpio_keys: volume_keys0 {
compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
autorepeat;
- switch@9 {
+ switch9 {
label = "volume-up";
linux,code = <115>;
gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
wakeup-source;
};
- switch@10 {
+ switch10 {
label = "volume-down";
linux,code = <114>;
gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
@@ -497,6 +497,8 @@
&lcdc {
status = "okay";
+
+ blue-and-red-wiring = "crossed";
};
&elm {
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 09308d66645b..975c36e332a2 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -27,12 +27,12 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
- vbat: fixedregulator@0 {
+ vbat: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vbat";
regulator-min-microvolt = <5000000>;
@@ -40,13 +40,13 @@
regulator-boot-on;
};
- lis3_reg: fixedregulator@1 {
+ lis3_reg: fixedregulator1 {
compatible = "regulator-fixed";
regulator-name = "lis3_reg";
regulator-boot-on;
};
- wl12xx_vmmc: fixedregulator@2 {
+ wl12xx_vmmc: fixedregulator2 {
pinctrl-names = "default";
pinctrl-0 = <&wl12xx_gpio>;
compatible = "regulator-fixed";
@@ -58,7 +58,7 @@
enable-active-high;
};
- vtt_fixed: fixedregulator@3 {
+ vtt_fixed: fixedregulator3 {
compatible = "regulator-fixed";
regulator-name = "vtt";
regulator-min-microvolt = <1500000>;
@@ -75,26 +75,26 @@
compatible = "gpio-leds";
- led@1 {
+ led1 {
label = "evmsk:green:usr0";
gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@2 {
+ led2 {
label = "evmsk:green:usr1";
gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@3 {
+ led3 {
label = "evmsk:green:mmc0";
gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "mmc0";
default-state = "off";
};
- led@4 {
+ led4 {
label = "evmsk:green:heartbeat";
gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
@@ -102,31 +102,31 @@
};
};
- gpio_buttons: gpio_buttons@0 {
+ gpio_buttons: gpio_buttons0 {
compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- switch@1 {
+ switch1 {
label = "button0";
linux,code = <0x100>;
gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
};
- switch@2 {
+ switch2 {
label = "button1";
linux,code = <0x101>;
gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>;
};
- switch@3 {
+ switch3 {
label = "button2";
linux,code = <0x102>;
gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>;
wakeup-source;
};
- switch@4 {
+ switch4 {
label = "button3";
linux,code = <0x103>;
gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>;
@@ -170,29 +170,29 @@
pinctrl-1 = <&lcd_pins_sleep>;
status = "okay";
panel-info {
- ac-bias = <255>;
- ac-bias-intrpt = <0>;
- dma-burst-sz = <16>;
- bpp = <32>;
- fdd = <0x80>;
- sync-edge = <0>;
- sync-ctrl = <1>;
- raster-order = <0>;
- fifo-th = <0>;
+ ac-bias = <255>;
+ ac-bias-intrpt = <0>;
+ dma-burst-sz = <16>;
+ bpp = <32>;
+ fdd = <0x80>;
+ sync-edge = <0>;
+ sync-ctrl = <1>;
+ raster-order = <0>;
+ fifo-th = <0>;
};
display-timings {
480x272 {
- hactive = <480>;
- vactive = <272>;
- hback-porch = <43>;
- hfront-porch = <8>;
- hsync-len = <4>;
- vback-porch = <12>;
- vfront-porch = <4>;
- vsync-len = <10>;
+ hactive = <480>;
+ vactive = <272>;
+ hback-porch = <43>;
+ hfront-porch = <8>;
+ hsync-len = <4>;
+ vback-porch = <12>;
+ vfront-porch = <4>;
+ vsync-len = <10>;
clock-frequency = <9000000>;
- hsync-active = <0>;
- vsync-active = <0>;
+ hsync-active = <0>;
+ vsync-active = <0>;
};
};
};
@@ -711,5 +711,7 @@
};
&lcdc {
- status = "okay";
+ status = "okay";
+
+ blue-and-red-wiring = "crossed";
};
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index 7d8b8fefdf08..85e04c205542 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -19,12 +19,12 @@
model = "TI AM3359 ICE-V2";
compatible = "ti,am3359-icev2", "ti,am33xx";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
- vbat: fixedregulator@0 {
+ vbat: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vbat";
regulator-min-microvolt = <5000000>;
@@ -32,7 +32,7 @@
regulator-boot-on;
};
- vtt_fixed: fixedregulator@1 {
+ vtt_fixed: fixedregulator1 {
compatible = "regulator-fixed";
regulator-name = "vtt";
regulator-min-microvolt = <1500000>;
@@ -43,52 +43,52 @@
enable-active-high;
};
- leds@0 {
+ leds0 {
compatible = "gpio-leds";
- led@0 {
+ led0 {
label = "out0";
gpios = <&tpic2810 0 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@1 {
+ led1 {
label = "out1";
gpios = <&tpic2810 1 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@2 {
+ led2 {
label = "out2";
gpios = <&tpic2810 2 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@3 {
+ led3 {
label = "out3";
gpios = <&tpic2810 3 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@4 {
+ led4 {
label = "out4";
gpios = <&tpic2810 4 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@5 {
+ led5 {
label = "out5";
gpios = <&tpic2810 5 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@6 {
+ led6 {
label = "out6";
gpios = <&tpic2810 6 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@7 {
+ led7 {
label = "out7";
gpios = <&tpic2810 7 GPIO_ACTIVE_HIGH>;
default-state = "off";
@@ -96,49 +96,58 @@
};
/* Tricolor status LEDs */
- leds@1 {
+ leds1 {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&user_leds>;
- led@0 {
+ led0 {
label = "status0:red:cpu0";
gpios = <&gpio0 17 GPIO_ACTIVE_HIGH>;
default-state = "off";
linux,default-trigger = "cpu0";
};
- led@1 {
+ led1 {
label = "status0:green:usr";
gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@2 {
+ led2 {
label = "status0:yellow:usr";
gpios = <&gpio3 9 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@3 {
+ led3 {
label = "status1:red:mmc0";
gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
default-state = "off";
linux,default-trigger = "mmc0";
};
- led@4 {
+ led4 {
label = "status1:green:usr";
gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@5 {
+ led5 {
label = "status1:yellow:usr";
gpios = <&gpio0 19 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
};
+ gpio-decoder {
+ compatible = "gpio-decoder";
+ gpios = <&pca9536 3 GPIO_ACTIVE_HIGH>,
+ <&pca9536 2 GPIO_ACTIVE_HIGH>,
+ <&pca9536 1 GPIO_ACTIVE_HIGH>,
+ <&pca9536 0 GPIO_ACTIVE_HIGH>;
+ linux,axis = <0>; /* ABS_X */
+ decoder-max-value = <9>;
+ };
};
&am33xx_pinmux {
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index e7d9ca1305fa..a5769a8f5fc8 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -20,7 +20,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
@@ -31,14 +31,14 @@
compatible = "gpio-leds";
- led@0 {
+ led0 {
label = "com:green:user";
gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
};
- vbat: fixedregulator@0 {
+ vbat: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vbat";
regulator-min-microvolt = <5000000>;
@@ -46,7 +46,7 @@
regulator-boot-on;
};
- vmmc: fixedregulator@0 {
+ vmmc: fixedregulator1 {
compatible = "regulator-fixed";
regulator-name = "vmmc";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts
index d97b0efa43f3..1d6c6fa703e4 100644
--- a/arch/arm/boot/dts/am335x-lxm.dts
+++ b/arch/arm/boot/dts/am335x-lxm.dts
@@ -19,13 +19,13 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
/* Power supply provides a fixed 5V @2A */
- vbat: fixedregulator@0 {
+ vbat: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vbat";
regulator-min-microvolt = <5000000>;
@@ -34,7 +34,7 @@
};
/* Power supply provides a fixed 3.3V @3A */
- vmmcsd_fixed: fixedregulator@1 {
+ vmmcsd_fixed: fixedregulator1 {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/am335x-nano.dts b/arch/arm/boot/dts/am335x-nano.dts
index f313999c503e..483d585c8908 100644
--- a/arch/arm/boot/dts/am335x-nano.dts
+++ b/arch/arm/boot/dts/am335x-nano.dts
@@ -19,7 +19,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
@@ -27,7 +27,7 @@
leds {
compatible = "gpio-leds";
- led@0 {
+ led0 {
label = "nanobone:green:usr1";
gpios = <&gpio1 5 0>;
default-state = "off";
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
index 8867aaaec54d..30e2f8770aaf 100644
--- a/arch/arm/boot/dts/am335x-pepper.dts
+++ b/arch/arm/boot/dts/am335x-pepper.dts
@@ -20,7 +20,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
@@ -41,15 +41,15 @@
compatible = "ti,da830-evm-audio";
};
- vbat: fixedregulator@0 {
+ vbat: fixedregulator0 {
compatible = "regulator-fixed";
};
- v3v3c_reg: fixedregulator@1 {
+ v3v3c_reg: fixedregulator1 {
compatible = "regulator-fixed";
};
- vdd5_reg: fixedregulator@2 {
+ vdd5_reg: fixedregulator2 {
compatible = "regulator-fixed";
};
};
@@ -595,14 +595,14 @@
pinctrl-names = "default";
pinctrl-0 = <&user_leds_pins>;
- led@0 {
+ led0 {
label = "pepper:user0:blue";
gpios = <&gpio1 20 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "none";
default-state = "off";
};
- led@1 {
+ led1 {
label = "pepper:user1:red";
gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "none";
@@ -616,21 +616,21 @@
#address-cells = <1>;
#size-cells = <0>;
- button@0 {
+ button0 {
label = "home";
linux,code = <KEY_HOME>;
gpios = <&gpio1 22 GPIO_ACTIVE_LOW>;
wakeup-source;
};
- button@1 {
+ button1 {
label = "menu";
linux,code = <KEY_MENU>;
gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
wakeup-source;
};
- buttons@2 {
+ buttons2 {
label = "power";
linux,code = <KEY_POWER>;
gpios = <&gpio0 7 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
index 1263c9d4cba3..75e24add3f13 100644
--- a/arch/arm/boot/dts/am335x-phycore-som.dtsi
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -25,7 +25,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
@@ -33,7 +33,7 @@
regulators {
compatible = "simple-bus";
- vcc5v: fixedregulator@0 {
+ vcc5v: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vcc5v";
regulator-min-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
index 837d5b80ea1d..bf8727a19ece 100644
--- a/arch/arm/boot/dts/am335x-shc.dts
+++ b/arch/arm/boot/dts/am335x-shc.dts
@@ -64,50 +64,50 @@
compatible = "gpio-leds";
- led@1 {
+ led1 {
label = "shc:power:red";
gpios = <&gpio0 23 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@2 {
+ led2 {
label = "shc:power:bl";
gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "timer";
default-state = "on";
};
- led@3 {
+ led3 {
label = "shc:lan:red";
gpios = <&gpio0 26 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@4 {
+ led4 {
label = "shc:lan:bl";
gpios = <&gpio1 17 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@5 {
+ led5 {
label = "shc:cloud:red";
gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- led@6 {
+ led6 {
label = "shc:cloud:bl";
gpios = <&gpio1 18 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
- vmmcsd_fixed: fixedregulator@0 {
+ vmmcsd_fixed: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts
index a6efbe6eda3b..b0dfa6f14cd5 100644
--- a/arch/arm/boot/dts/am335x-sl50.dts
+++ b/arch/arm/boot/dts/am335x-sl50.dts
@@ -19,6 +19,11 @@
};
};
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>; /* 512 MB */
+ };
+
chosen {
stdout-path = &uart0;
};
@@ -28,25 +33,25 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins>;
- led@0 {
+ led0 {
label = "sl50:green:usr0";
gpios = <&gpio1 21 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@1 {
+ led1 {
label = "sl50:red:usr1";
gpios = <&gpio1 22 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@2 {
+ led2 {
label = "sl50:green:usr2";
gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@3 {
+ led3 {
label = "sl50:red:usr3";
gpios = <&gpio1 24 GPIO_ACTIVE_LOW>;
default-state = "off";
@@ -103,7 +108,7 @@
reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
};
- vmmcsd_fixed: fixedregulator@0 {
+ vmmcsd_fixed: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/am335x-wega.dtsi b/arch/arm/boot/dts/am335x-wega.dtsi
index 282f6d4b27bc..02c67365c4e1 100644
--- a/arch/arm/boot/dts/am335x-wega.dtsi
+++ b/arch/arm/boot/dts/am335x-wega.dtsi
@@ -11,10 +11,14 @@
model = "Phytec AM335x phyBOARD-WEGA";
compatible = "phytec,am335x-wega", "phytec,am335x-phycore-som", "ti,am33xx";
+ sound: sound_iface {
+ compatible = "ti,da830-evm-audio";
+ };
+
regulators {
compatible = "simple-bus";
- vcc3v3: fixedregulator@1 {
+ vcc3v3: fixedregulator1 {
compatible = "regulator-fixed";
regulator-name = "vcc3v3";
regulator-min-microvolt = <3300000>;
@@ -24,6 +28,58 @@
};
};
+/* Audio */
+&am33xx_pinmux {
+ mcasp0_pins: pinmux_mcasp0 {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x9AC, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mcasp0_ahclkx.mcasp0_ahclkx */
+ AM33XX_IOPAD(0x990, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_aclkx.mcasp0_aclkx */
+ AM33XX_IOPAD(0x994, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_fsx.mcasp0_fsx */
+ AM33XX_IOPAD(0x998, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_axr0.mcasp0_axr0 */
+ AM33XX_IOPAD(0x9A8, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mcasp0_axr1.mcasp0_axr1 */
+ >;
+ };
+};
+
+&i2c0 {
+ tlv320aic3007: tlv320aic3007@18 {
+ compatible = "ti,tlv320aic3007";
+ reg = <0x18>;
+ AVDD-supply = <&vcc3v3>;
+ IOVDD-supply = <&vcc3v3>;
+ DRVDD-supply = <&vcc3v3>;
+ DVDD-supply = <&vdig1_reg>;
+ status = "okay";
+ };
+};
+
+&mcasp0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcasp0_pins>;
+ op-mode = <0>; /* DAVINCI_MCASP_IIS_MODE */
+ tdm-slots = <2>;
+ serial-dir = <
+ 2 1 0 0 /* # 0: INACTIVE, 1: TX, 2: RX */
+ >;
+ tx-num-evt = <16>;
+ rt-num-evt = <16>;
+ status = "okay";
+};
+
+&sound {
+ ti,model = "AM335x-Wega";
+ ti,audio-codec = <&tlv320aic3007>;
+ ti,mcasp-controller = <&mcasp0>;
+ ti,audio-routing =
+ "Line Out", "LLOUT",
+ "Line Out", "RLOUT",
+ "LINE1L", "Line In",
+ "LINE1R", "Line In";
+ clocks = <&mcasp0_fck>;
+ clock-names = "mclk";
+ status = "okay";
+};
+
/* CAN Busses */
&am33xx_pinmux {
dcan1_pins: pinmux_dcan1 {
@@ -99,6 +155,12 @@
status = "okay";
};
+/* Power */
+&vdig1_reg {
+ regulator-boot-on;
+ regulator-always-on;
+};
+
/* UARTs */
&am33xx_pinmux {
uart0_pins: pinmux_uart0 {
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 98748c61ed99..194d884c9de1 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -11,11 +11,11 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/am33xx.h>
-#include "skeleton.dtsi"
-
/ {
compatible = "ti,am33xx";
interrupt-parent = <&intc>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
i2c0 = &i2c0;
@@ -45,9 +45,19 @@
device_type = "cpu";
reg = <0>;
- operating-points-v2 = <&cpu0_opp_table>;
- ti,syscon-efuse = <&scm_conf 0x7fc 0x1fff 0>;
- ti,syscon-rev = <&scm_conf 0x600>;
+ /*
+ * To consider voltage drop between PMIC and SoC,
+ * tolerance value is reduced to 2% from 4% and
+ * voltage value is increased as a precaution.
+ */
+ operating-points = <
+ /* kHz uV */
+ 720000 1285000
+ 600000 1225000
+ 500000 1125000
+ 275000 1125000
+ >;
+ voltage-tolerance = <2>; /* 2 percentage */
clocks = <&dpll_mpu_ck>;
clock-names = "cpu";
@@ -56,78 +66,6 @@
};
};
- cpu0_opp_table: opp_table0 {
- compatible = "operating-points-v2";
-
- /*
- * The three following nodes are marked with opp-suspend
- * because the can not be enabled simultaneously on a
- * single SoC.
- */
- opp50@300000000 {
- opp-hz = /bits/ 64 <300000000>;
- opp-microvolt = <950000 931000 969000>;
- opp-supported-hw = <0x06 0x0010>;
- opp-suspend;
- };
-
- opp100@275000000 {
- opp-hz = /bits/ 64 <275000000>;
- opp-microvolt = <1100000 1078000 1122000>;
- opp-supported-hw = <0x01 0x00FF>;
- opp-suspend;
- };
-
- opp100@300000000 {
- opp-hz = /bits/ 64 <300000000>;
- opp-microvolt = <1100000 1078000 1122000>;
- opp-supported-hw = <0x06 0x0020>;
- opp-suspend;
- };
-
- opp100@500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <1100000 1078000 1122000>;
- opp-supported-hw = <0x01 0xFFFF>;
- };
-
- opp100@600000000 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <1100000 1078000 1122000>;
- opp-supported-hw = <0x06 0x0040>;
- };
-
- opp120@600000000 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <1200000 1176000 1224000>;
- opp-supported-hw = <0x01 0xFFFF>;
- };
-
- opp120@720000000 {
- opp-hz = /bits/ 64 <720000000>;
- opp-microvolt = <1200000 1176000 1224000>;
- opp-supported-hw = <0x06 0x0080>;
- };
-
- oppturbo@720000000 {
- opp-hz = /bits/ 64 <720000000>;
- opp-microvolt = <1260000 1234800 1285200>;
- opp-supported-hw = <0x01 0xFFFF>;
- };
-
- oppturbo@800000000 {
- opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <1260000 1234800 1285200>;
- opp-supported-hw = <0x06 0x0100>;
- };
-
- oppnitro@1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <1325000 1298500 1351500>;
- opp-supported-hw = <0x04 0x0200>;
- };
- };
-
pmu {
compatible = "arm,cortex-a8-pmu";
interrupts = <3>;
diff --git a/arch/arm/boot/dts/am3517-craneboard.dts b/arch/arm/boot/dts/am3517-craneboard.dts
index f9d8f3948c4a..083ff5073435 100644
--- a/arch/arm/boot/dts/am3517-craneboard.dts
+++ b/arch/arm/boot/dts/am3517-craneboard.dts
@@ -15,7 +15,7 @@
model = "TI AM3517 CraneBoard (TMDSEVM3517)";
compatible = "ti,am3517-craneboard", "ti,am3517", "ti,omap3";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
index b4127c6493a2..0e4a125f78e3 100644
--- a/arch/arm/boot/dts/am3517-evm.dts
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -13,7 +13,7 @@
model = "TI AM3517 EVM (AM3517/05 TMDSEVM3517)";
compatible = "ti,am3517-evm", "ti,am3517", "ti,omap3";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index 5e3f5e86ffcf..0db19d39d24c 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -15,7 +15,7 @@
serial3 = &uart4;
};
- ocp {
+ ocp@68000000 {
am35x_otg_hs: am35x_otg_hs@5c040000 {
compatible = "ti,omap3-musb";
ti,hwmods = "am35x_otg_hs";
diff --git a/arch/arm/boot/dts/am3517_mt_ventoux.dts b/arch/arm/boot/dts/am3517_mt_ventoux.dts
index fdf5ce63c8e6..3395783c5b4e 100644
--- a/arch/arm/boot/dts/am3517_mt_ventoux.dts
+++ b/arch/arm/boot/dts/am3517_mt_ventoux.dts
@@ -13,7 +13,7 @@
model = "TeeJet Mt.Ventoux";
compatible = "teejet,mt_ventoux", "ti,omap3";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 0fadae5396e1..a275fa956813 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -11,12 +11,16 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "skeleton.dtsi"
-
/ {
compatible = "ti,am4372", "ti,am43";
interrupt-parent = <&wakeupgen>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ memory@0 {
+ device_type = "memory";
+ reg = <0 0>;
+ };
aliases {
i2c0 = &i2c0;
@@ -132,7 +136,7 @@
cache-level = <2>;
};
- ocp {
+ ocp@44000000 {
compatible = "ti,am4372-l3-noc", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/am437x-cm-t43.dts b/arch/arm/boot/dts/am437x-cm-t43.dts
index 9551c4713173..9e92d480576b 100644
--- a/arch/arm/boot/dts/am437x-cm-t43.dts
+++ b/arch/arm/boot/dts/am437x-cm-t43.dts
@@ -209,7 +209,6 @@
#interrupt-cells = <2>;
dcdc1: regulator-dcdc1 {
- compatible = "ti,tps65218-dcdc1";
regulator-name = "vdd_core";
regulator-min-microvolt = <912000>;
regulator-max-microvolt = <1144000>;
@@ -218,7 +217,6 @@
};
dcdc2: regulator-dcdc2 {
- compatible = "ti,tps65218-dcdc2";
regulator-name = "vdd_mpu";
regulator-min-microvolt = <912000>;
regulator-max-microvolt = <1378000>;
@@ -227,7 +225,6 @@
};
dcdc3: regulator-dcdc3 {
- compatible = "ti,tps65218-dcdc3";
regulator-name = "vdcdc3";
regulator-suspend-enable;
regulator-min-microvolt = <1500000>;
@@ -237,7 +234,6 @@
};
dcdc5: regulator-dcdc5 {
- compatible = "ti,tps65218-dcdc5";
regulator-name = "v1_0bat";
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
@@ -246,7 +242,6 @@
};
dcdc6: regulator-dcdc6 {
- compatible = "ti,tps65218-dcdc6";
regulator-name = "v1_8bat";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -255,7 +250,6 @@
};
ldo1: regulator-ldo1 {
- compatible = "ti,tps65218-ldo1";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index 14677d599595..957840cc7b78 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -58,7 +58,7 @@
default-brightness-level = <8>;
};
- matrix_keypad: matrix_keypad@0 {
+ matrix_keypad: matrix_keypad0 {
compatible = "gpio-matrix-keypad";
debounce-delay-ms = <5>;
col-scan-delay-us = <2>;
@@ -513,7 +513,6 @@
#interrupt-cells = <2>;
dcdc1: regulator-dcdc1 {
- compatible = "ti,tps65218-dcdc1";
regulator-name = "vdd_core";
regulator-min-microvolt = <912000>;
regulator-max-microvolt = <1144000>;
@@ -522,7 +521,6 @@
};
dcdc2: regulator-dcdc2 {
- compatible = "ti,tps65218-dcdc2";
regulator-name = "vdd_mpu";
regulator-min-microvolt = <912000>;
regulator-max-microvolt = <1378000>;
@@ -531,33 +529,42 @@
};
dcdc3: regulator-dcdc3 {
- compatible = "ti,tps65218-dcdc3";
regulator-name = "vdcdc3";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
regulator-boot-on;
regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ regulator-state-disk {
+ regulator-off-in-suspend;
+ };
};
+
dcdc5: regulator-dcdc5 {
- compatible = "ti,tps65218-dcdc5";
regulator-name = "v1_0bat";
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
regulator-boot-on;
regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
};
dcdc6: regulator-dcdc6 {
- compatible = "ti,tps65218-dcdc6";
regulator-name = "v1_8bat";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
};
ldo1: regulator-ldo1 {
- compatible = "ti,tps65218-ldo1";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
index 12a69518383e..25ce611c6568 100644
--- a/arch/arm/boot/dts/am437x-idk-evm.dts
+++ b/arch/arm/boot/dts/am437x-idk-evm.dts
@@ -104,7 +104,7 @@
#address-cells = <1>;
#size-cells = <0>;
- switch@0 {
+ switch0 {
label = "power-button";
linux,code = <KEY_POWER>;
gpios = <&gpio4 2 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 5687d6b4da60..319d94205350 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -64,7 +64,7 @@
};
};
- matrix_keypad: matrix_keypad@0 {
+ matrix_keypad: matrix_keypad0 {
compatible = "gpio-matrix-keypad";
pinctrl-names = "default";
@@ -93,28 +93,28 @@
pinctrl-names = "default";
pinctrl-0 = <&leds_pins>;
- led@0 {
+ led0 {
label = "am437x-sk:red:heartbeat";
gpios = <&gpio5 0 GPIO_ACTIVE_HIGH>; /* Bank 5, pin 0 */
linux,default-trigger = "heartbeat";
default-state = "off";
};
- led@1 {
+ led1 {
label = "am437x-sk:green:mmc1";
gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>; /* Bank 5, pin 1 */
linux,default-trigger = "mmc0";
default-state = "off";
};
- led@2 {
+ led2 {
label = "am437x-sk:blue:cpu0";
gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>; /* Bank 5, pin 2 */
linux,default-trigger = "cpu0";
default-state = "off";
};
- led@3 {
+ led3 {
label = "am437x-sk:blue:usr3";
gpios = <&gpio5 3 GPIO_ACTIVE_HIGH>; /* Bank 5, pin 3 */
default-state = "off";
@@ -428,7 +428,6 @@
#interrupt-cells = <2>;
dcdc1: regulator-dcdc1 {
- compatible = "ti,tps65218-dcdc1";
/* VDD_CORE limits min of OPP50 and max of OPP100 */
regulator-name = "vdd_core";
regulator-min-microvolt = <912000>;
@@ -438,7 +437,6 @@
};
dcdc2: regulator-dcdc2 {
- compatible = "ti,tps65218-dcdc2";
/* VDD_MPU limits min of OPP50 and max of OPP_NITRO */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <912000>;
@@ -448,16 +446,20 @@
};
dcdc3: regulator-dcdc3 {
- compatible = "ti,tps65218-dcdc3";
regulator-name = "vdds_ddr";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
regulator-boot-on;
regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ regulator-state-disk {
+ regulator-off-in-suspend;
+ };
};
dcdc4: regulator-dcdc4 {
- compatible = "ti,tps65218-dcdc4";
regulator-name = "v3_3d";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -465,8 +467,31 @@
regulator-always-on;
};
+ dcdc5: regulator-dcdc5 {
+ compatible = "ti,tps65218-dcdc5";
+ regulator-name = "v1_0bat";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ dcdc6: regulator-dcdc6 {
+ compatible = "ti,tps65218-dcdc6";
+ regulator-name = "v1_8bat";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
ldo1: regulator-ldo1 {
- compatible = "ti,tps65218-ldo1";
regulator-name = "v1_8d";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index ad32e55532f8..9d35c3f07cad 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -32,7 +32,7 @@
enable-active-high;
};
- vbat: fixedregulator@0 {
+ vbat: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vbat";
regulator-min-microvolt = <5000000>;
@@ -67,7 +67,7 @@
};
};
- matrix_keypad: matrix_keypad@0 {
+ matrix_keypad: matrix_keypad0 {
compatible = "gpio-matrix-keypad";
debounce-delay-ms = <5>;
col-scan-delay-us = <2>;
@@ -421,7 +421,6 @@
#interrupt-cells = <2>;
dcdc1: regulator-dcdc1 {
- compatible = "ti,tps65218-dcdc1";
regulator-name = "vdd_core";
regulator-min-microvolt = <912000>;
regulator-max-microvolt = <1144000>;
@@ -430,7 +429,6 @@
};
dcdc2: regulator-dcdc2 {
- compatible = "ti,tps65218-dcdc2";
regulator-name = "vdd_mpu";
regulator-min-microvolt = <912000>;
regulator-max-microvolt = <1378000>;
@@ -439,7 +437,6 @@
};
dcdc3: regulator-dcdc3 {
- compatible = "ti,tps65218-dcdc3";
regulator-name = "vdcdc3";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
@@ -448,7 +445,6 @@
};
dcdc4: regulator-dcdc4 {
- compatible = "ti,tps65218-dcdc4";
regulator-name = "vdcdc4";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -457,21 +453,18 @@
};
dcdc5: regulator-dcdc5 {
- compatible = "ti,tps65218-dcdc5";
regulator-name = "v1_0bat";
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
};
dcdc6: regulator-dcdc6 {
- compatible = "ti,tps65218-dcdc6";
regulator-name = "v1_8bat";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
ldo1: regulator-ldo1 {
- compatible = "ti,tps65218-ldo1";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index e3acb99703e1..87bbc66f0f21 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -18,7 +18,7 @@
compatible = "ti,am5728-idk", "ti,am5728", "ti,dra742", "ti,dra74",
"ti,dra7";
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x80000000 0x0 0x80000000>;
};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
new file mode 100644
index 000000000000..6df7829a2c15
--- /dev/null
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -0,0 +1,596 @@
+/*
+ * Copyright (C) 2014-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "dra74x.dtsi"
+#include "am57xx-commercial-grade.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ compatible = "ti,am572x-beagle-x15", "ti,am5728", "ti,dra742", "ti,dra74", "ti,dra7";
+
+ aliases {
+ rtc0 = &mcp_rtc;
+ rtc1 = &tps659038_rtc;
+ rtc2 = &rtc;
+ display0 = &hdmi0;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x80000000>;
+ };
+
+ vdd_3v3: fixedregulator-vdd_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_3v3";
+ vin-supply = <&regen1>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ aic_dvdd: fixedregulator-aic_dvdd {
+ compatible = "regulator-fixed";
+ regulator-name = "aic_dvdd_fixed";
+ vin-supply = <&vdd_3v3>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vtt_fixed: fixedregulator-vtt {
+ /* TPS51200 */
+ compatible = "regulator-fixed";
+ regulator-name = "vtt_fixed";
+ vin-supply = <&smps3_reg>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ enable-active-high;
+ gpio = <&gpio7 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led0 {
+ label = "beagle-x15:usr0";
+ gpios = <&gpio7 9 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ led1 {
+ label = "beagle-x15:usr1";
+ gpios = <&gpio7 8 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "cpu0";
+ default-state = "off";
+ };
+
+ led2 {
+ label = "beagle-x15:usr2";
+ gpios = <&gpio7 14 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ default-state = "off";
+ };
+
+ led3 {
+ label = "beagle-x15:usr3";
+ gpios = <&gpio7 15 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "disk-activity";
+ default-state = "off";
+ };
+ };
+
+ gpio_fan: gpio_fan {
+ /* Based on 5v 500mA AFB02505HHB */
+ compatible = "gpio-fan";
+ gpios = <&tps659038_gpio 2 GPIO_ACTIVE_HIGH>;
+ gpio-fan,speed-map = <0 0>,
+ <13000 1>;
+ #cooling-cells = <2>;
+ };
+
+ hdmi0: connector {
+ compatible = "hdmi-connector";
+ label = "hdmi";
+
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&tpd12s015_out>;
+ };
+ };
+ };
+
+ tpd12s015: encoder {
+ compatible = "ti,tpd12s015";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ tpd12s015_in: endpoint {
+ remote-endpoint = <&hdmi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ tpd12s015_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+ };
+
+ sound0: sound0 {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "BeagleBoard-X15";
+ simple-audio-card,widgets =
+ "Line", "Line Out",
+ "Line", "Line In";
+ simple-audio-card,routing =
+ "Line Out", "LLOUT",
+ "Line Out", "RLOUT",
+ "MIC2L", "Line In",
+ "MIC2R", "Line In";
+ simple-audio-card,format = "dsp_b";
+ simple-audio-card,bitclock-master = <&sound0_master>;
+ simple-audio-card,frame-master = <&sound0_master>;
+ simple-audio-card,bitclock-inversion;
+
+ simple-audio-card,cpu {
+ sound-dai = <&mcasp3>;
+ };
+
+ sound0_master: simple-audio-card,codec {
+ sound-dai = <&tlv320aic3104>;
+ clocks = <&clkout2_clk>;
+ };
+ };
+};
+
+&dra7_pmx_core {
+ mmc1_pins_default: mmc1_pins_default {
+ pinctrl-single,pins = <
+ DRA7XX_CORE_IOPAD(0x376c, PIN_INPUT | MUX_MODE14) /* mmc1sdcd.gpio219 */
+ DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */
+ DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */
+ DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */
+ DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */
+ DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */
+ DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */
+ >;
+ };
+
+ mmc2_pins_default: mmc2_pins_default {
+ pinctrl-single,pins = <
+ DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */
+ DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */
+ DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */
+ DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */
+ DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */
+ DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */
+ DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */
+ DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */
+ DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */
+ DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */
+ >;
+ };
+};
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ tps659038: tps659038@58 {
+ compatible = "ti,tps659038";
+ reg = <0x58>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ ti,system-power-controller;
+
+ tps659038_pmic {
+ compatible = "ti,tps659038-pmic";
+
+ regulators {
+ smps12_reg: smps12 {
+ /* VDD_MPU */
+ regulator-name = "smps12";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ smps3_reg: smps3 {
+ /* VDD_DDR */
+ regulator-name = "smps3";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ smps45_reg: smps45 {
+ /* VDD_DSPEVE, VDD_IVA, VDD_GPU */
+ regulator-name = "smps45";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ smps6_reg: smps6 {
+ /* VDD_CORE */
+ regulator-name = "smps6";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ /* SMPS7 unused */
+
+ smps8_reg: smps8 {
+ /* VDD_1V8 */
+ regulator-name = "smps8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ /* SMPS9 unused */
+
+ ldo1_reg: ldo1 {
+ /* VDD_SD / VDDSHV8 */
+ regulator-name = "ldo1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: ldo2 {
+ /* VDD_SHV5 */
+ regulator-name = "ldo2";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo3_reg: ldo3 {
+ /* VDDA_1V8_PHYA */
+ regulator-name = "ldo3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo4_reg: ldo4 {
+ /* VDDA_1V8_PHYB */
+ regulator-name = "ldo4";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo9_reg: ldo9 {
+ /* VDD_RTC */
+ regulator-name = "ldo9";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldoln_reg: ldoln {
+ /* VDDA_1V8_PLL */
+ regulator-name = "ldoln";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldousb_reg: ldousb {
+ /* VDDA_3V_USB: VDDA_USBHS33 */
+ regulator-name = "ldousb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+
+ regen1: regen1 {
+ /* VDD_3V3_ON */
+ regulator-name = "regen1";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+
+ tps659038_rtc: tps659038_rtc {
+ compatible = "ti,palmas-rtc";
+ interrupt-parent = <&tps659038>;
+ interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+ wakeup-source;
+ };
+
+ tps659038_pwr_button: tps659038_pwr_button {
+ compatible = "ti,palmas-pwrbutton";
+ interrupt-parent = <&tps659038>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ wakeup-source;
+ ti,palmas-long-press-seconds = <12>;
+ };
+
+ tps659038_gpio: tps659038_gpio {
+ compatible = "ti,palmas-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ extcon_usb2: tps659038_usb {
+ compatible = "ti,palmas-usb-vid";
+ ti,enable-vbus-detection;
+ vbus-gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
+ };
+
+ };
+
+ tmp102: tmp102@48 {
+ compatible = "ti,tmp102";
+ reg = <0x48>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ tlv320aic3104: tlv320aic3104@18 {
+ #sound-dai-cells = <0>;
+ compatible = "ti,tlv320aic3104";
+ reg = <0x18>;
+ assigned-clocks = <&clkoutmux2_clk_mux>;
+ assigned-clock-parents = <&sys_clk2_dclk_div>;
+
+ status = "okay";
+ adc-settle-ms = <40>;
+
+ AVDD-supply = <&vdd_3v3>;
+ IOVDD-supply = <&vdd_3v3>;
+ DRVDD-supply = <&vdd_3v3>;
+ DVDD-supply = <&aic_dvdd>;
+ };
+
+ eeprom: eeprom@50 {
+ compatible = "at,24c32";
+ reg = <0x50>;
+ };
+};
+
+&i2c3 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ mcp_rtc: rtc@6f {
+ compatible = "microchip,mcp7941x";
+ reg = <0x6f>;
+ interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
+ <&dra7_pmx_core 0x424>;
+ interrupt-names = "irq", "wakeup";
+
+ vcc-supply = <&vdd_3v3>;
+ wakeup-source;
+ };
+};
+
+&gpio7 {
+ ti,no-reset-on-init;
+ ti,no-idle-on-init;
+};
+
+&cpu0 {
+ cpu0-supply = <&smps12_reg>;
+ voltage-tolerance = <1>;
+};
+
+&uart3 {
+ status = "okay";
+ interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <&dra7_pmx_core 0x3f8>;
+};
+
+&mac {
+ status = "okay";
+ dual_emac;
+};
+
+&cpsw_emac0 {
+ phy_id = <&davinci_mdio>, <1>;
+ phy-mode = "rgmii";
+ dual_emac_res_vlan = <1>;
+};
+
+&cpsw_emac1 {
+ phy_id = <&davinci_mdio>, <2>;
+ phy-mode = "rgmii";
+ dual_emac_res_vlan = <2>;
+};
+
+&mmc1 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_default>;
+
+ bus-width = <4>;
+ cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
+};
+
+&mmc2 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins_default>;
+
+ vmmc-supply = <&vdd_3v3>;
+ bus-width = <8>;
+ ti,non-removable;
+ cap-mmc-dual-data-rate;
+};
+
+&sata {
+ status = "okay";
+};
+
+&usb2_phy1 {
+ phy-supply = <&ldousb_reg>;
+};
+
+&usb2_phy2 {
+ phy-supply = <&ldousb_reg>;
+};
+
+&usb1 {
+ dr_mode = "host";
+};
+
+&omap_dwc3_2 {
+ extcon = <&extcon_usb2>;
+};
+
+&usb2 {
+ /*
+ * Stand alone usage is peripheral only.
+ * However, with some resistor modifications
+ * this port can be used via expansion connectors
+ * as "host" or "dual-role". If so, provide
+ * the necessary dr_mode override in the expansion
+ * board's DT.
+ */
+ dr_mode = "peripheral";
+};
+
+&cpu_trips {
+ cpu_alert1: cpu_alert1 {
+ temperature = <50000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "active";
+ };
+};
+
+&cpu_cooling_maps {
+ map1 {
+ trip = <&cpu_alert1>;
+ cooling-device = <&gpio_fan THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+};
+
+&thermal_zones {
+ board_thermal: board_thermal {
+ polling-delay-passive = <1250>; /* milliseconds */
+ polling-delay = <1500>; /* milliseconds */
+
+ /* sensor ID */
+ thermal-sensors = <&tmp102 0>;
+
+ board_trips: trips {
+ board_alert0: board_alert {
+ temperature = <40000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "active";
+ };
+
+ board_crit: board_crit {
+ temperature = <105000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+ };
+
+ board_cooling_maps: cooling-maps {
+ map0 {
+ trip = <&board_alert0>;
+ cooling-device =
+ <&gpio_fan THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+};
+
+&dss {
+ status = "ok";
+
+ vdda_video-supply = <&ldoln_reg>;
+};
+
+&hdmi {
+ status = "ok";
+ vdda-supply = <&ldo4_reg>;
+
+ port {
+ hdmi_out: endpoint {
+ remote-endpoint = <&tpd12s015_in>;
+ };
+ };
+};
+
+&pcie1 {
+ gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
+};
+
+&mcasp3 {
+ #sound-dai-cells = <0>;
+ assigned-clocks = <&mcasp3_ahclkx_mux>;
+ assigned-clock-parents = <&sys_clkin2>;
+ status = "okay";
+
+ op-mode = <0>; /* MCASP_IIS_MODE */
+ tdm-slots = <2>;
+ /* 4 serializers */
+ serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
+ 1 2 0 0
+ >;
+ tx-num-evt = <32>;
+ rx-num-evt = <32>;
+};
+
+&mailbox5 {
+ status = "okay";
+ mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
+ status = "okay";
+ };
+ mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
+ status = "okay";
+ };
+};
+
+&mailbox6 {
+ status = "okay";
+ mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
+ status = "okay";
+ };
+ mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
+ status = "okay";
+ };
+};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
new file mode 100644
index 000000000000..ca85570629fd
--- /dev/null
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2014-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "am57xx-beagle-x15-common.dtsi"
+
+/ {
+ model = "TI AM5728 BeagleBoard-X15 rev B1";
+};
+
+&tpd12s015 {
+ gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>, /* gpio7_10, CT CP HPD */
+ <&gpio2 30 GPIO_ACTIVE_HIGH>, /* gpio2_30, LS OE */
+ <&gpio7 12 GPIO_ACTIVE_HIGH>; /* gpio7_12/sp1_cs2, HPD */
+};
+
+&mmc1 {
+ vmmc-supply = <&vdd_3v3>;
+ vmmc-aux-supply = <&ldo1_reg>;
+};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index c4d04c5293b9..8c66f2efd283 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -1,822 +1,24 @@
/*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014-2016 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-/dts-v1/;
-#include "dra74x.dtsi"
-#include "am57xx-commercial-grade.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/interrupt-controller/irq.h>
+#include "am57xx-beagle-x15-common.dtsi"
/ {
+ /* NOTE: This describes the "original" pre-production A2 revision */
model = "TI AM5728 BeagleBoard-X15";
- compatible = "ti,am572x-beagle-x15", "ti,am5728", "ti,dra742", "ti,dra74", "ti,dra7";
-
- aliases {
- rtc0 = &mcp_rtc;
- rtc1 = &tps659038_rtc;
- rtc2 = &rtc;
- display0 = &hdmi0;
- };
-
- memory {
- device_type = "memory";
- reg = <0x0 0x80000000 0x0 0x80000000>;
- };
-
- vdd_3v3: fixedregulator-vdd_3v3 {
- compatible = "regulator-fixed";
- regulator-name = "vdd_3v3";
- vin-supply = <&regen1>;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-
- aic_dvdd: fixedregulator-aic_dvdd {
- compatible = "regulator-fixed";
- regulator-name = "aic_dvdd_fixed";
- vin-supply = <&vdd_3v3>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- vtt_fixed: fixedregulator-vtt {
- /* TPS51200 */
- compatible = "regulator-fixed";
- regulator-name = "vtt_fixed";
- vin-supply = <&smps3_reg>;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- enable-active-high;
- gpio = <&gpio7 11 GPIO_ACTIVE_HIGH>;
- };
-
- leds {
- compatible = "gpio-leds";
- pinctrl-names = "default";
- pinctrl-0 = <&leds_pins_default>;
-
- led@0 {
- label = "beagle-x15:usr0";
- gpios = <&gpio7 9 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "heartbeat";
- default-state = "off";
- };
-
- led@1 {
- label = "beagle-x15:usr1";
- gpios = <&gpio7 8 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "cpu0";
- default-state = "off";
- };
-
- led@2 {
- label = "beagle-x15:usr2";
- gpios = <&gpio7 14 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "mmc0";
- default-state = "off";
- };
-
- led@3 {
- label = "beagle-x15:usr3";
- gpios = <&gpio7 15 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "disk-activity";
- default-state = "off";
- };
- };
-
- gpio_fan: gpio_fan {
- /* Based on 5v 500mA AFB02505HHB */
- compatible = "gpio-fan";
- gpios = <&tps659038_gpio 2 GPIO_ACTIVE_HIGH>;
- gpio-fan,speed-map = <0 0>,
- <13000 1>;
- #cooling-cells = <2>;
- };
-
- hdmi0: connector {
- compatible = "hdmi-connector";
- label = "hdmi";
-
- type = "a";
-
- port {
- hdmi_connector_in: endpoint {
- remote-endpoint = <&tpd12s015_out>;
- };
- };
- };
-
- tpd12s015: encoder {
- compatible = "ti,tpd12s015";
-
- pinctrl-names = "default";
- pinctrl-0 = <&tpd12s015_pins>;
-
- gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>, /* gpio7_10, CT CP HPD */
- <&gpio6 28 GPIO_ACTIVE_HIGH>, /* gpio6_28, LS OE */
- <&gpio7 12 GPIO_ACTIVE_HIGH>; /* gpio7_12/sp1_cs2, HPD */
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- tpd12s015_in: endpoint {
- remote-endpoint = <&hdmi_out>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- tpd12s015_out: endpoint {
- remote-endpoint = <&hdmi_connector_in>;
- };
- };
- };
- };
-
- sound0: sound0 {
- compatible = "simple-audio-card";
- simple-audio-card,name = "BeagleBoard-X15";
- simple-audio-card,widgets =
- "Line", "Line Out",
- "Line", "Line In";
- simple-audio-card,routing =
- "Line Out", "LLOUT",
- "Line Out", "RLOUT",
- "MIC2L", "Line In",
- "MIC2R", "Line In";
- simple-audio-card,format = "dsp_b";
- simple-audio-card,bitclock-master = <&sound0_master>;
- simple-audio-card,frame-master = <&sound0_master>;
- simple-audio-card,bitclock-inversion;
-
- simple-audio-card,cpu {
- sound-dai = <&mcasp3>;
- };
-
- sound0_master: simple-audio-card,codec {
- sound-dai = <&tlv320aic3104>;
- clocks = <&clkout2_clk>;
- };
- };
};
-&dra7_pmx_core {
- leds_pins_default: leds_pins_default {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x37a8, PIN_OUTPUT | MUX_MODE14) /* spi1_d1.gpio7_8 */
- DRA7XX_CORE_IOPAD(0x37ac, PIN_OUTPUT | MUX_MODE14) /* spi1_d0.gpio7_9 */
- DRA7XX_CORE_IOPAD(0x37c0, PIN_OUTPUT | MUX_MODE14) /* spi2_sclk.gpio7_14 */
- DRA7XX_CORE_IOPAD(0x37c4, PIN_OUTPUT | MUX_MODE14) /* spi2_d1.gpio7_15 */
- >;
- };
-
- i2c1_pins_default: i2c1_pins_default {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x3800, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_sda.sda */
- DRA7XX_CORE_IOPAD(0x3804, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_scl.scl */
- >;
- };
-
- hdmi_pins: pinmux_hdmi_pins {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x3808, PIN_INPUT | MUX_MODE1) /* i2c2_sda.hdmi1_ddc_scl */
- DRA7XX_CORE_IOPAD(0x380c, PIN_INPUT | MUX_MODE1) /* i2c2_scl.hdmi1_ddc_sda */
- >;
- };
-
- i2c3_pins_default: i2c3_pins_default {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x36a4, PIN_INPUT| MUX_MODE10) /* mcasp1_aclkx.i2c3_sda */
- DRA7XX_CORE_IOPAD(0x36a8, PIN_INPUT| MUX_MODE10) /* mcasp1_fsx.i2c3_scl */
- >;
- };
-
- uart3_pins_default: uart3_pins_default {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_SLEW | MUX_MODE2) /* uart2_ctsn.uart3_rxd */
- DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_SLEW | MUX_MODE1) /* uart2_rtsn.uart3_txd */
- >;
- };
-
- mmc1_pins_default: mmc1_pins_default {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x376c, PIN_INPUT | MUX_MODE14) /* mmc1sdcd.gpio219 */
- DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */
- DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */
- DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */
- DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */
- DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */
- DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */
- >;
- };
-
- mmc2_pins_default: mmc2_pins_default {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */
- DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */
- DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */
- DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */
- DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */
- DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */
- DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */
- DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */
- DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */
- DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */
- >;
- };
-
- cpsw_pins_default: cpsw_pins_default {
- pinctrl-single,pins = <
- /* Slave 1 */
- DRA7XX_CORE_IOPAD(0x3650, PIN_OUTPUT | MUX_MODE0) /* rgmii1_tclk */
- DRA7XX_CORE_IOPAD(0x3654, PIN_OUTPUT | MUX_MODE0) /* rgmii1_tctl */
- DRA7XX_CORE_IOPAD(0x3658, PIN_OUTPUT | MUX_MODE0) /* rgmii1_td3 */
- DRA7XX_CORE_IOPAD(0x365c, PIN_OUTPUT | MUX_MODE0) /* rgmii1_td2 */
- DRA7XX_CORE_IOPAD(0x3660, PIN_OUTPUT | MUX_MODE0) /* rgmii1_td1 */
- DRA7XX_CORE_IOPAD(0x3664, PIN_OUTPUT | MUX_MODE0) /* rgmii1_td0 */
- DRA7XX_CORE_IOPAD(0x3668, PIN_INPUT | MUX_MODE0) /* rgmii1_rclk */
- DRA7XX_CORE_IOPAD(0x366c, PIN_INPUT | MUX_MODE0) /* rgmii1_rctl */
- DRA7XX_CORE_IOPAD(0x3670, PIN_INPUT | MUX_MODE0) /* rgmii1_rd3 */
- DRA7XX_CORE_IOPAD(0x3674, PIN_INPUT | MUX_MODE0) /* rgmii1_rd2 */
- DRA7XX_CORE_IOPAD(0x3678, PIN_INPUT | MUX_MODE0) /* rgmii1_rd1 */
- DRA7XX_CORE_IOPAD(0x367c, PIN_INPUT | MUX_MODE0) /* rgmii1_rd0 */
-
- /* Slave 2 */
- DRA7XX_CORE_IOPAD(0x3598, PIN_OUTPUT | MUX_MODE3) /* rgmii2_tclk */
- DRA7XX_CORE_IOPAD(0x359c, PIN_OUTPUT | MUX_MODE3) /* rgmii2_tctl */
- DRA7XX_CORE_IOPAD(0x35a0, PIN_OUTPUT | MUX_MODE3) /* rgmii2_td3 */
- DRA7XX_CORE_IOPAD(0x35a4, PIN_OUTPUT | MUX_MODE3) /* rgmii2_td2 */
- DRA7XX_CORE_IOPAD(0x35a8, PIN_OUTPUT | MUX_MODE3) /* rgmii2_td1 */
- DRA7XX_CORE_IOPAD(0x35ac, PIN_OUTPUT | MUX_MODE3) /* rgmii2_td0 */
- DRA7XX_CORE_IOPAD(0x35b0, PIN_INPUT | MUX_MODE3) /* rgmii2_rclk */
- DRA7XX_CORE_IOPAD(0x35b4, PIN_INPUT | MUX_MODE3) /* rgmii2_rctl */
- DRA7XX_CORE_IOPAD(0x35b8, PIN_INPUT | MUX_MODE3) /* rgmii2_rd3 */
- DRA7XX_CORE_IOPAD(0x35bc, PIN_INPUT | MUX_MODE3) /* rgmii2_rd2 */
- DRA7XX_CORE_IOPAD(0x35c0, PIN_INPUT | MUX_MODE3) /* rgmii2_rd1 */
- DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT | MUX_MODE3) /* rgmii2_rd0 */
- >;
-
- };
-
- cpsw_pins_sleep: cpsw_pins_sleep {
- pinctrl-single,pins = <
- /* Slave 1 */
- DRA7XX_CORE_IOPAD(0x3650, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x3654, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x3658, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x365c, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x3660, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x3664, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x3668, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x366c, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x3670, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x3674, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x3678, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x367c, PIN_INPUT | MUX_MODE15)
-
- /* Slave 2 */
- DRA7XX_CORE_IOPAD(0x3598, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x359c, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x35a0, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x35a4, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x35a8, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x35ac, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x35b0, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x35b4, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x35b8, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x35bc, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x35c0, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT | MUX_MODE15)
- >;
- };
-
- davinci_mdio_pins_default: davinci_mdio_pins_default {
- pinctrl-single,pins = <
- /* MDIO */
- DRA7XX_CORE_IOPAD(0x363c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_mclk */
- DRA7XX_CORE_IOPAD(0x3640, PIN_INPUT_PULLUP | MUX_MODE0) /* mdio_d */
- >;
- };
-
- davinci_mdio_pins_sleep: davinci_mdio_pins_sleep {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x363c, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x3640, PIN_INPUT | MUX_MODE15)
- >;
- };
-
- tps659038_pins_default: tps659038_pins_default {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x3818, PIN_INPUT_PULLUP | MUX_MODE14) /* wakeup0.gpio1_0 */
- >;
- };
-
- tmp102_pins_default: tmp102_pins_default {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x37c8, PIN_INPUT_PULLUP | MUX_MODE14) /* spi2_d0.gpio7_16 */
- >;
- };
-
- mcp79410_pins_default: mcp79410_pins_default {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x3824, PIN_INPUT_PULLUP | MUX_MODE1) /* wakeup3.sys_nirq1 */
- >;
- };
-
- usb1_pins: pinmux_usb1_pins {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x3680, PIN_INPUT_SLEW | MUX_MODE0) /* usb1_drvvbus */
- >;
- };
-
- tpd12s015_pins: pinmux_tpd12s015_pins {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */
- DRA7XX_CORE_IOPAD(0x37b8, PIN_INPUT_PULLDOWN | MUX_MODE14) /* gpio7_12 HPD */
- DRA7XX_CORE_IOPAD(0x3770, PIN_OUTPUT | MUX_MODE14) /* gpio6_28 LS_OE */
- >;
- };
-
- clkout2_pins_default: clkout2_pins_default {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x3694, PIN_OUTPUT_PULLDOWN | MUX_MODE9) /* xref_clk0.clkout2 */
- >;
- };
-
- clkout2_pins_sleep: clkout2_pins_sleep {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x3694, PIN_INPUT | MUX_MODE15) /* xref_clk0.clkout2 */
- >;
- };
-
- mcasp3_pins_default: mcasp3_pins_default {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x3724, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp3_aclkx.mcasp3_aclkx */
- DRA7XX_CORE_IOPAD(0x3728, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp3_fsx.mcasp3_fsx */
- DRA7XX_CORE_IOPAD(0x372c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mcasp3_axr0.mcasp3_axr0 */
- DRA7XX_CORE_IOPAD(0x3730, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp3_axr1.mcasp3_axr1 */
- >;
- };
-
- mcasp3_pins_sleep: mcasp3_pins_sleep {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x3724, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x3728, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x372c, PIN_INPUT | MUX_MODE15)
- DRA7XX_CORE_IOPAD(0x3730, PIN_INPUT | MUX_MODE15)
- >;
- };
-};
-
-&i2c1 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_pins_default>;
- clock-frequency = <400000>;
-
- tps659038: tps659038@58 {
- compatible = "ti,tps659038";
- reg = <0x58>;
- interrupt-parent = <&gpio1>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&tps659038_pins_default>;
-
- #interrupt-cells = <2>;
- interrupt-controller;
-
- ti,system-power-controller;
-
- tps659038_pmic {
- compatible = "ti,tps659038-pmic";
-
- regulators {
- smps12_reg: smps12 {
- /* VDD_MPU */
- regulator-name = "smps12";
- regulator-min-microvolt = < 850000>;
- regulator-max-microvolt = <1250000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- smps3_reg: smps3 {
- /* VDD_DDR */
- regulator-name = "smps3";
- regulator-min-microvolt = <1350000>;
- regulator-max-microvolt = <1350000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- smps45_reg: smps45 {
- /* VDD_DSPEVE, VDD_IVA, VDD_GPU */
- regulator-name = "smps45";
- regulator-min-microvolt = < 850000>;
- regulator-max-microvolt = <1250000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- smps6_reg: smps6 {
- /* VDD_CORE */
- regulator-name = "smps6";
- regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <1150000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- /* SMPS7 unused */
-
- smps8_reg: smps8 {
- /* VDD_1V8 */
- regulator-name = "smps8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- /* SMPS9 unused */
-
- ldo1_reg: ldo1 {
- /* VDD_SD / VDDSHV8 */
- regulator-name = "ldo1";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- ldo2_reg: ldo2 {
- /* VDD_SHV5 */
- regulator-name = "ldo2";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- ldo3_reg: ldo3 {
- /* VDDA_1V8_PHYA */
- regulator-name = "ldo3";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- ldo4_reg: ldo4 {
- /* VDDA_1V8_PHYB */
- regulator-name = "ldo4";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- ldo9_reg: ldo9 {
- /* VDD_RTC */
- regulator-name = "ldo9";
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1050000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- ldoln_reg: ldoln {
- /* VDDA_1V8_PLL */
- regulator-name = "ldoln";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- ldousb_reg: ldousb {
- /* VDDA_3V_USB: VDDA_USBHS33 */
- regulator-name = "ldousb";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-boot-on;
- };
-
- regen1: regen1 {
- /* VDD_3V3_ON */
- regulator-name = "regen1";
- regulator-boot-on;
- regulator-always-on;
- };
- };
- };
-
- tps659038_rtc: tps659038_rtc {
- compatible = "ti,palmas-rtc";
- interrupt-parent = <&tps659038>;
- interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
- wakeup-source;
- };
-
- tps659038_pwr_button: tps659038_pwr_button {
- compatible = "ti,palmas-pwrbutton";
- interrupt-parent = <&tps659038>;
- interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
- wakeup-source;
- ti,palmas-long-press-seconds = <12>;
- };
-
- tps659038_gpio: tps659038_gpio {
- compatible = "ti,palmas-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- extcon_usb2: tps659038_usb {
- compatible = "ti,palmas-usb-vid";
- ti,enable-vbus-detection;
- vbus-gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
- };
-
- };
-
- tmp102: tmp102@48 {
- compatible = "ti,tmp102";
- reg = <0x48>;
- pinctrl-names = "default";
- pinctrl-0 = <&tmp102_pins_default>;
- interrupt-parent = <&gpio7>;
- interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
- #thermal-sensor-cells = <1>;
- };
-
- tlv320aic3104: tlv320aic3104@18 {
- #sound-dai-cells = <0>;
- compatible = "ti,tlv320aic3104";
- reg = <0x18>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&clkout2_pins_default>;
- pinctrl-1 = <&clkout2_pins_sleep>;
- assigned-clocks = <&clkoutmux2_clk_mux>;
- assigned-clock-parents = <&sys_clk2_dclk_div>;
-
- status = "okay";
- adc-settle-ms = <40>;
-
- AVDD-supply = <&vdd_3v3>;
- IOVDD-supply = <&vdd_3v3>;
- DRVDD-supply = <&vdd_3v3>;
- DVDD-supply = <&aic_dvdd>;
- };
-
- eeprom: eeprom@50 {
- compatible = "at,24c32";
- reg = <0x50>;
- };
-};
-
-&i2c3 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c3_pins_default>;
- clock-frequency = <400000>;
-
- mcp_rtc: rtc@6f {
- compatible = "microchip,mcp7941x";
- reg = <0x6f>;
- interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
- <&dra7_pmx_core 0x424>;
- interrupt-names = "irq", "wakeup";
-
- pinctrl-names = "default";
- pinctrl-0 = <&mcp79410_pins_default>;
-
- vcc-supply = <&vdd_3v3>;
- wakeup-source;
- };
-};
-
-&gpio7 {
- ti,no-reset-on-init;
- ti,no-idle-on-init;
-};
-
-&cpu0 {
- cpu0-supply = <&smps12_reg>;
- voltage-tolerance = <1>;
-};
-
-&uart3 {
- status = "okay";
- interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
- <&dra7_pmx_core 0x3f8>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&uart3_pins_default>;
-};
-
-&mac {
- status = "okay";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&cpsw_pins_default>;
- pinctrl-1 = <&cpsw_pins_sleep>;
- dual_emac;
-};
-
-&cpsw_emac0 {
- phy_id = <&davinci_mdio>, <1>;
- phy-mode = "rgmii";
- dual_emac_res_vlan = <1>;
-};
-
-&cpsw_emac1 {
- phy_id = <&davinci_mdio>, <2>;
- phy-mode = "rgmii";
- dual_emac_res_vlan = <2>;
-};
-
-&davinci_mdio {
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&davinci_mdio_pins_default>;
- pinctrl-1 = <&davinci_mdio_pins_sleep>;
+&tpd12s015 {
+ gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>, /* gpio7_10, CT CP HPD */
+ <&gpio6 28 GPIO_ACTIVE_HIGH>, /* gpio6_28, LS OE */
+ <&gpio7 12 GPIO_ACTIVE_HIGH>; /* gpio7_12/sp1_cs2, HPD */
};
&mmc1 {
- status = "okay";
-
- pinctrl-names = "default";
- pinctrl-0 = <&mmc1_pins_default>;
-
vmmc-supply = <&ldo1_reg>;
- bus-width = <4>;
- cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
-};
-
-&mmc2 {
- status = "okay";
-
- pinctrl-names = "default";
- pinctrl-0 = <&mmc2_pins_default>;
-
- vmmc-supply = <&vdd_3v3>;
- bus-width = <8>;
- ti,non-removable;
- cap-mmc-dual-data-rate;
-};
-
-&sata {
- status = "okay";
-};
-
-&usb2_phy1 {
- phy-supply = <&ldousb_reg>;
-};
-
-&usb2_phy2 {
- phy-supply = <&ldousb_reg>;
-};
-
-&usb1 {
- dr_mode = "host";
- pinctrl-names = "default";
- pinctrl-0 = <&usb1_pins>;
-};
-
-&omap_dwc3_2 {
- extcon = <&extcon_usb2>;
-};
-
-&usb2 {
- /*
- * Stand alone usage is peripheral only.
- * However, with some resistor modifications
- * this port can be used via expansion connectors
- * as "host" or "dual-role". If so, provide
- * the necessary dr_mode override in the expansion
- * board's DT.
- */
- dr_mode = "peripheral";
-};
-
-&cpu_trips {
- cpu_alert1: cpu_alert1 {
- temperature = <50000>; /* millicelsius */
- hysteresis = <2000>; /* millicelsius */
- type = "active";
- };
-};
-
-&cpu_cooling_maps {
- map1 {
- trip = <&cpu_alert1>;
- cooling-device = <&gpio_fan THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
-};
-
-&thermal_zones {
- board_thermal: board_thermal {
- polling-delay-passive = <1250>; /* milliseconds */
- polling-delay = <1500>; /* milliseconds */
-
- /* sensor ID */
- thermal-sensors = <&tmp102 0>;
-
- board_trips: trips {
- board_alert0: board_alert {
- temperature = <40000>; /* millicelsius */
- hysteresis = <2000>; /* millicelsius */
- type = "active";
- };
-
- board_crit: board_crit {
- temperature = <105000>; /* millicelsius */
- hysteresis = <0>; /* millicelsius */
- type = "critical";
- };
- };
-
- board_cooling_maps: cooling-maps {
- map0 {
- trip = <&board_alert0>;
- cooling-device =
- <&gpio_fan THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
- };
- };
-};
-
-&dss {
- status = "ok";
-
- vdda_video-supply = <&ldoln_reg>;
-};
-
-&hdmi {
- status = "ok";
- vdda-supply = <&ldo4_reg>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&hdmi_pins>;
-
- port {
- hdmi_out: endpoint {
- remote-endpoint = <&tpd12s015_in>;
- };
- };
-};
-
-&pcie1 {
- gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
-};
-
-&mcasp3 {
- #sound-dai-cells = <0>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&mcasp3_pins_default>;
- pinctrl-1 = <&mcasp3_pins_sleep>;
- assigned-clocks = <&mcasp3_ahclkx_mux>;
- assigned-clock-parents = <&sys_clkin2>;
- status = "okay";
-
- op-mode = <0>; /* MCASP_IIS_MODE */
- tdm-slots = <2>;
- /* 4 serializers */
- serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
- 1 2 0 0
- >;
- tx-num-evt = <32>;
- rx-num-evt = <32>;
-};
-
-&mailbox5 {
- status = "okay";
- mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
- status = "okay";
- };
- mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
- status = "okay";
- };
-};
-
-&mailbox6 {
- status = "okay";
- mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
- status = "okay";
- };
- mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
- status = "okay";
- };
};
diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
index 378b142ef88c..203266f88480 100644
--- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
@@ -19,7 +19,7 @@
model = "CompuLab CL-SOM-AM57x";
compatible = "compulab,cl-som-am57x", "ti,am5728", "ti,dra742", "ti,dra74", "ti,dra7";
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x80000000 0x0 0x20000000>; /* 512 MB - minimal configuration */
};
@@ -29,7 +29,7 @@
pinctrl-names = "default";
pinctrl-0 = <&leds_pins_default>;
- led@0 {
+ led0 {
label = "cl-som-am57x:green";
gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 0e63b9dff6e7..03cec62260e1 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -304,3 +304,52 @@
ti,non-removable;
max-frequency = <96000000>;
};
+
+&qspi {
+ status = "okay";
+
+ spi-max-frequency = <76800000>;
+ m25p80@0 {
+ compatible = "s25fl256s1", "jedec,spi-nor";
+ spi-max-frequency = <76800000>;
+ reg = <0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* MTD partition table.
+ * The ROM checks the first four physical blocks
+ * for a valid file to boot and the flash here is
+ * 64KiB block size.
+ */
+ partition@0 {
+ label = "QSPI.SPL";
+ reg = <0x00000000 0x000040000>;
+ };
+ partition@1 {
+ label = "QSPI.u-boot";
+ reg = <0x00040000 0x00100000>;
+ };
+ partition@2 {
+ label = "QSPI.u-boot-spl-os";
+ reg = <0x00140000 0x00080000>;
+ };
+ partition@3 {
+ label = "QSPI.u-boot-env";
+ reg = <0x001c0000 0x00010000>;
+ };
+ partition@4 {
+ label = "QSPI.u-boot-env.backup1";
+ reg = <0x001d0000 0x0010000>;
+ };
+ partition@5 {
+ label = "QSPI.kernel";
+ reg = <0x001e0000 0x0800000>;
+ };
+ partition@6 {
+ label = "QSPI.file-system";
+ reg = <0x009e0000 0x01620000>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb-11mp-bbrevd-ctrevb.dts b/arch/arm/boot/dts/arm-realview-eb-11mp-bbrevd-ctrevb.dts
new file mode 100644
index 000000000000..e18769df9fd9
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-eb-11mp-bbrevd-ctrevb.dts
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "arm-realview-eb-11mp-ctrevb.dts"
+#include "arm-realview-eb-bbrevd.dtsi"
+
+/*
+ * This is the EB with the new Revision D baseboard with SMSC9118 ethernet and
+ * the Rev B core tile.
+ */
+/ {
+ model = "ARM RealView Emulation Baseboard Rev D with ARM11MPCore Core Tile Rev B";
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb-11mp-bbrevd.dts b/arch/arm/boot/dts/arm-realview-eb-11mp-bbrevd.dts
new file mode 100644
index 000000000000..26b1c69e9f43
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-eb-11mp-bbrevd.dts
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "arm-realview-eb-11mp.dts"
+#include "arm-realview-eb-bbrevd.dtsi"
+
+/ {
+ model = "ARM RealView Emulation Baseboard Rev D with ARM11MPCore Rev C Core Tile";
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb-11mp-revb.dts b/arch/arm/boot/dts/arm-realview-eb-11mp-ctrevb.dts
index e68527b0d552..e68527b0d552 100644
--- a/arch/arm/boot/dts/arm-realview-eb-11mp-revb.dts
+++ b/arch/arm/boot/dts/arm-realview-eb-11mp-ctrevb.dts
diff --git a/arch/arm/boot/dts/arm-realview-eb-11mp.dts b/arch/arm/boot/dts/arm-realview-eb-11mp.dts
index 87ff602a2a2d..aac1edd4b227 100644
--- a/arch/arm/boot/dts/arm-realview-eb-11mp.dts
+++ b/arch/arm/boot/dts/arm-realview-eb-11mp.dts
@@ -24,7 +24,7 @@
#include "arm-realview-eb-mp.dtsi"
/ {
- model = "ARM RealView Emulation Baseboard with ARM11MPCore Rev C";
+ model = "ARM RealView Emulation Baseboard with ARM11MPCore Rev C Core Tile";
arm,hbi = <0x146>;
/*
diff --git a/arch/arm/boot/dts/arm-realview-eb-a9mp-bbrevd.dts b/arch/arm/boot/dts/arm-realview-eb-a9mp-bbrevd.dts
new file mode 100644
index 000000000000..42efac7496ef
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-eb-a9mp-bbrevd.dts
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "arm-realview-eb-a9mp.dts"
+#include "arm-realview-eb-bbrevd.dtsi"
+
+/ {
+ model = "ARM RealView EB Baseboard Rev D Cortex A9 MPCore";
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb-bbrevd.dts b/arch/arm/boot/dts/arm-realview-eb-bbrevd.dts
new file mode 100644
index 000000000000..f533c8b49d97
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-eb-bbrevd.dts
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/* This derives from the Realview Baseboard, and overlays the new ethernet */
+#include "arm-realview-eb.dts"
+#include "arm-realview-eb-bbrevd.dtsi"
+
+/ {
+ model = "ARM RealView Emulation Baseboard Rev D";
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb-bbrevd.dtsi b/arch/arm/boot/dts/arm-realview-eb-bbrevd.dtsi
new file mode 100644
index 000000000000..a79e1d1d30a7
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-eb-bbrevd.dtsi
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/ {
+ /* Introduce a fixed regulator for the new ethernet controller */
+ veth: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "veth";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+};
+
+/*
+ * The revision D has a different ethernet controller that the elder boards:
+ * the older board uses LAN91C111 but the new one uses LAN9118.
+ */
+&ethernet {
+ compatible = "smsc,lan9118", "smsc,lan9115";
+ phy-mode = "mii";
+ smsc,irq-active-high;
+ smsc,irq-push-pull;
+ vdd33a-supply = <&veth>;
+ vddvario-supply = <&veth>;
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi
index 1c6a040218e3..e2e9599596e2 100644
--- a/arch/arm/boot/dts/arm-realview-eb.dtsi
+++ b/arch/arm/boot/dts/arm-realview-eb.dtsi
@@ -51,14 +51,6 @@
regulator-boot-on;
};
- veth: fixedregulator@0 {
- compatible = "regulator-fixed";
- regulator-name = "veth";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-boot-on;
- };
-
xtal24mhz: xtal24mhz@24M {
#clock-cells = <0>;
compatible = "fixed-clock";
@@ -134,16 +126,15 @@
bank-width = <4>;
};
- /* SMSC 9118 ethernet with PHY and EEPROM */
+ /* SMSC LAN91C111 ethernet with PHY and EEPROM */
ethernet: ethernet@4e000000 {
- compatible = "smsc,lan9118", "smsc,lan9115";
+ compatible = "smsc,lan91c111";
reg = <0x4e000000 0x10000>;
- phy-mode = "mii";
- reg-io-width = <4>;
- smsc,irq-active-high;
- smsc,irq-push-pull;
- vdd33a-supply = <&veth>;
- vddvario-supply = <&veth>;
+ /*
+ * This means the adapter can be accessed with 8, 16 or
+ * 32 bit reads/writes.
+ */
+ reg-io-width = <7>;
};
usb: usb@4f000000 {
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 2364fc56ae13..033fa63544f7 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -155,20 +155,6 @@
status = "okay";
};
- spi0: spi@10600 {
- pinctrl-0 = <&spi0_pins2>;
- pinctrl-names = "default";
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "mx25l25635e", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <50000000>;
- };
- };
-
nand@d0000 {
status = "okay";
num-cs = <1>;
@@ -274,3 +260,18 @@
compatible = "linux,spdif-dir";
};
};
+
+&spi0 {
+ pinctrl-0 = <&spi0_pins2>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "mx25l25635e", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <50000000>;
+ };
+};
+
diff --git a/arch/arm/boot/dts/armada-370-seagate-personal-cloud.dtsi b/arch/arm/boot/dts/armada-370-seagate-personal-cloud.dtsi
index 1aba08e4377c..01cded310cbc 100644
--- a/arch/arm/boot/dts/armada-370-seagate-personal-cloud.dtsi
+++ b/arch/arm/boot/dts/armada-370-seagate-personal-cloud.dtsi
@@ -68,26 +68,6 @@
phy-mode = "rgmii-id";
};
- spi@10600 {
- status = "okay";
- pinctrl-0 = <&spi0_pins2>;
- pinctrl-names = "default";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- /* MX25L8006E */
- compatible = "mxicy,mx25l8005", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <50000000>;
-
- partition@0 {
- label = "u-boot";
- reg = <0x0 0x100000>;
- };
- };
- };
-
usb@50000 {
status = "okay";
};
@@ -176,3 +156,23 @@
marvell,function = "gpio";
};
};
+
+&spi0 {
+ status = "okay";
+ pinctrl-0 = <&spi0_pins2>;
+ pinctrl-names = "default";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /* MX25L8006E */
+ compatible = "mxicy,mx25l8005", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <50000000>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0 0x100000>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-370-synology-ds213j.dts b/arch/arm/boot/dts/armada-370-synology-ds213j.dts
index 8ca7a4340c0f..a9cc42776874 100644
--- a/arch/arm/boot/dts/armada-370-synology-ds213j.dts
+++ b/arch/arm/boot/dts/armada-370-synology-ds213j.dts
@@ -87,62 +87,6 @@
status = "disabled";
};
- spi0: spi@10600 {
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "micron,n25q064", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <20000000>;
-
- /*
- * Warning!
- *
- * Synology u-boot uses its compiled-in environment
- * and it seems Synology did not care to change u-boot
- * default configuration in order to allow saving a
- * modified environment at a sensible location. So,
- * if you do a 'saveenv' under u-boot, your modified
- * environment will be saved at 1MB after the start
- * of the flash, i.e. in the middle of the uImage.
- * For that reason, it is strongly advised not to
- * change the default environment, unless you know
- * what you are doing.
- */
- partition@00000000 { /* u-boot */
- label = "RedBoot";
- reg = <0x00000000 0x000c0000>; /* 768KB */
- };
-
- partition@000c0000 { /* uImage */
- label = "zImage";
- reg = <0x000c0000 0x002d0000>; /* 2880KB */
- };
-
- partition@00390000 { /* uInitramfs */
- label = "rd.gz";
- reg = <0x00390000 0x00440000>; /* 4250KB */
- };
-
- partition@007d0000 { /* MAC address and serial number */
- label = "vendor";
- reg = <0x007d0000 0x00010000>; /* 64KB */
- };
-
- partition@007e0000 {
- label = "RedBoot config";
- reg = <0x007e0000 0x00010000>; /* 64KB */
- };
-
- partition@007f0000 {
- label = "FIS directory";
- reg = <0x007f0000 0x00010000>; /* 64KB */
- };
- };
- };
-
i2c@11000 {
compatible = "marvell,mv64xxx-i2c";
pinctrl-0 = <&i2c0_pins>;
@@ -347,3 +291,59 @@
marvell,function = "gpio";
};
};
+
+&spi0 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "micron,n25q064", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <20000000>;
+
+ /*
+ * Warning!
+ *
+ * Synology u-boot uses its compiled-in environment
+ * and it seems Synology did not care to change u-boot
+ * default configuration in order to allow saving a
+ * modified environment at a sensible location. So,
+ * if you do a 'saveenv' under u-boot, your modified
+ * environment will be saved at 1MB after the start
+ * of the flash, i.e. in the middle of the uImage.
+ * For that reason, it is strongly advised not to
+ * change the default environment, unless you know
+ * what you are doing.
+ */
+ partition@00000000 { /* u-boot */
+ label = "RedBoot";
+ reg = <0x00000000 0x000c0000>; /* 768KB */
+ };
+
+ partition@000c0000 { /* uImage */
+ label = "zImage";
+ reg = <0x000c0000 0x002d0000>; /* 2880KB */
+ };
+
+ partition@00390000 { /* uInitramfs */
+ label = "rd.gz";
+ reg = <0x00390000 0x00440000>; /* 4250KB */
+ };
+
+ partition@007d0000 { /* MAC address and serial number */
+ label = "vendor";
+ reg = <0x007d0000 0x00010000>; /* 64KB */
+ };
+
+ partition@007e0000 {
+ label = "RedBoot config";
+ reg = <0x007e0000 0x00010000>; /* 64KB */
+ };
+
+ partition@007f0000 {
+ label = "FIS directory";
+ reg = <0x007f0000 0x00010000>; /* 64KB */
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index a718866ba52d..3ccedc9dffb2 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -148,26 +148,6 @@
interrupts = <50>;
};
- spi0: spi@10600 {
- reg = <0x10600 0x28>;
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- interrupts = <30>;
- clocks = <&coreclk 0>;
- status = "disabled";
- };
-
- spi1: spi@10680 {
- reg = <0x10680 0x28>;
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- interrupts = <92>;
- clocks = <&coreclk 0>;
- status = "disabled";
- };
-
i2c0: i2c@11000 {
compatible = "marvell,mv64xxx-i2c";
#address-cells = <1>;
@@ -320,6 +300,42 @@
status = "disabled";
};
};
+
+ spi0: spi@10600 {
+ reg = <MBUS_ID(0xf0, 0x01) 0x10600 0x28>, /* control */
+ <MBUS_ID(0x01, 0x1e) 0 0xffffffff>, /* CS0 */
+ <MBUS_ID(0x01, 0x5e) 0 0xffffffff>, /* CS1 */
+ <MBUS_ID(0x01, 0x9e) 0 0xffffffff>, /* CS2 */
+ <MBUS_ID(0x01, 0xde) 0 0xffffffff>, /* CS3 */
+ <MBUS_ID(0x01, 0x1f) 0 0xffffffff>, /* CS4 */
+ <MBUS_ID(0x01, 0x5f) 0 0xffffffff>, /* CS5 */
+ <MBUS_ID(0x01, 0x9f) 0 0xffffffff>, /* CS6 */
+ <MBUS_ID(0x01, 0xdf) 0 0xffffffff>; /* CS7 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ interrupts = <30>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ spi1: spi@10680 {
+ reg = <MBUS_ID(0xf0, 0x01) 0x10680 0x28>, /* control */
+ <MBUS_ID(0x01, 0x1a) 0 0xffffffff>, /* CS0 */
+ <MBUS_ID(0x01, 0x5a) 0 0xffffffff>, /* CS1 */
+ <MBUS_ID(0x01, 0x9a) 0 0xffffffff>, /* CS2 */
+ <MBUS_ID(0x01, 0xda) 0 0xffffffff>, /* CS3 */
+ <MBUS_ID(0x01, 0x1b) 0 0xffffffff>, /* CS4 */
+ <MBUS_ID(0x01, 0x5b) 0 0xffffffff>, /* CS5 */
+ <MBUS_ID(0x01, 0x9b) 0 0xffffffff>, /* CS6 */
+ <MBUS_ID(0x01, 0xdb) 0 0xffffffff>; /* CS7 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ interrupts = <92>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
};
clocks {
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index 3b06aa835448..b4258105e91f 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -134,24 +134,6 @@
wt-override;
};
- /*
- * Default SPI pinctrl setting, can be overwritten on
- * board level if a different configuration is used.
- */
- spi0: spi@10600 {
- compatible = "marvell,armada-370-spi",
- "marvell,orion-spi";
- pinctrl-0 = <&spi0_pins1>;
- pinctrl-names = "default";
- };
-
- spi1: spi@10680 {
- compatible = "marvell,armada-370-spi",
- "marvell,orion-spi";
- pinctrl-0 = <&spi1_pins>;
- pinctrl-names = "default";
- };
-
i2c0: i2c@11000 {
reg = <0x11000 0x20>;
};
@@ -447,3 +429,19 @@
marvell,function = "ge1";
};
};
+
+/*
+ * Default SPI pinctrl setting, can be overwritten on
+ * board level if a different configuration is used.
+ */
+&spi0 {
+ compatible = "marvell,armada-370-spi", "marvell,orion-spi";
+ pinctrl-0 = <&spi0_pins1>;
+ pinctrl-names = "default";
+};
+
+&spi1 {
+ compatible = "marvell,armada-370-spi", "marvell,orion-spi";
+ pinctrl-0 = <&spi1_pins>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm/boot/dts/armada-385-db-ap.dts b/arch/arm/boot/dts/armada-385-db-ap.dts
index 2d3fd6e76e2c..db5b9f6b615d 100644
--- a/arch/arm/boot/dts/armada-385-db-ap.dts
+++ b/arch/arm/boot/dts/armada-385-db-ap.dts
@@ -65,20 +65,6 @@
MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
internal-regs {
- spi1: spi@10680 {
- pinctrl-names = "default";
- pinctrl-0 = <&spi1_pins>;
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,m25p128", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <54000000>;
- };
- };
-
i2c0: i2c@11000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
@@ -155,6 +141,10 @@
bm,pool-short = <3>;
};
+ usb@58000 {
+ status = "okay";
+ };
+
/* CON4 */
ethernet@70000 {
pinctrl-names = "default";
@@ -178,15 +168,35 @@
nfc: flash@d0000 {
status = "okay";
- #address-cells = <1>;
- #size-cells = <1>;
-
num-cs = <1>;
nand-ecc-strength = <4>;
nand-ecc-step-size = <512>;
marvell,nand-keep-config;
marvell,nand-enable-arbiter;
nand-on-flash-bbt;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0x00000000 0x00800000>;
+ read-only;
+ };
+
+ partition@800000 {
+ label = "uImage";
+ reg = <0x00800000 0x00400000>;
+ read-only;
+ };
+
+ partition@c00000 {
+ label = "Root";
+ reg = <0x00c00000 0x3f400000>;
+ };
+ };
};
usb3@f0000 {
@@ -239,3 +249,17 @@
gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
};
};
+
+&spi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_pins>;
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p128", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <54000000>;
+ };
+};
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index 22f7a13e20b4..8f0e508f64ae 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -62,11 +62,6 @@
MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
internal-regs {
-
- spi@10600 {
- status = "disabled";
- };
-
i2c@11000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
@@ -332,3 +327,7 @@
marvell,function = "gpio";
};
};
+
+&spi0 {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/armada-388-clearfog.dts b/arch/arm/boot/dts/armada-388-clearfog.dts
index d3e6bd805006..71ce201c903e 100644
--- a/arch/arm/boot/dts/armada-388-clearfog.dts
+++ b/arch/arm/boot/dts/armada-388-clearfog.dts
@@ -315,30 +315,6 @@
status = "okay";
};
- spi@10680 {
- /*
- * We don't seem to have the W25Q32 on the
- * A1 Rev 2.0 boards, so disable SPI.
- * CS0: W25Q32 (doesn't appear to be present)
- * CS1:
- * CS2: mikrobus
- */
- pinctrl-0 = <&spi1_pins
- &clearfog_spi1_cs_pins
- &mikro_spi_pins>;
- pinctrl-names = "default";
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "w25q32", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <3000000>;
- status = "disabled";
- };
- };
-
usb@58000 {
/* CON3, nearest power. */
status = "okay";
@@ -444,3 +420,27 @@
};
};
};
+
+&spi1 {
+ /*
+ * We don't seem to have the W25Q32 on the
+ * A1 Rev 2.0 boards, so disable SPI.
+ * CS0: W25Q32 (doesn't appear to be present)
+ * CS1:
+ * CS2: mikrobus
+ */
+ pinctrl-0 = <&spi1_pins
+ &clearfog_spi1_cs_pins
+ &mikro_spi_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "w25q32", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <3000000>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/boot/dts/armada-388-db.dts b/arch/arm/boot/dts/armada-388-db.dts
index ea93ed727030..de26c762239c 100644
--- a/arch/arm/boot/dts/armada-388-db.dts
+++ b/arch/arm/boot/dts/armada-388-db.dts
@@ -70,18 +70,6 @@
MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
internal-regs {
- spi@10600 {
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "w25q32", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <108000000>;
- };
- };
-
i2c@11000 {
status = "okay";
clock-frequency = <100000>;
@@ -201,3 +189,16 @@
};
};
};
+
+&spi0 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "w25q32", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <108000000>;
+ };
+};
+
diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
index fd75e5e9550f..895fa6cfa15a 100644
--- a/arch/arm/boot/dts/armada-388-gp.dts
+++ b/arch/arm/boot/dts/armada-388-gp.dts
@@ -64,21 +64,6 @@
MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
internal-regs {
- spi@10600 {
- pinctrl-names = "default";
- pinctrl-0 = <&spi0_pins>;
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,m25p128", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <50000000>;
- m25p,fast-read;
- };
- };
-
i2c@11000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
@@ -433,3 +418,18 @@
marvell,function = "gpio";
};
};
+
+&spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_pins>;
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p128", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ };
+};
diff --git a/arch/arm/boot/dts/armada-388-rd.dts b/arch/arm/boot/dts/armada-388-rd.dts
index 853f9735cc70..dd3462ddb6b9 100644
--- a/arch/arm/boot/dts/armada-388-rd.dts
+++ b/arch/arm/boot/dts/armada-388-rd.dts
@@ -70,18 +70,6 @@
MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
internal-regs {
- spi@10600 {
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,m25p128", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <108000000>;
- };
- };
-
i2c@11000 {
status = "okay";
clock-frequency = <100000>;
@@ -142,3 +130,16 @@
};
};
};
+
+&spi0 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p128", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <108000000>;
+ };
+};
+
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 3312be6c82cc..2d7668848c5a 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -170,30 +170,6 @@
<0xc100 0x100>;
};
- spi0: spi@10600 {
- compatible = "marvell,armada-380-spi",
- "marvell,orion-spi";
- reg = <0x10600 0x50>;
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&coreclk 0>;
- status = "disabled";
- };
-
- spi1: spi@10680 {
- compatible = "marvell,armada-380-spi",
- "marvell,orion-spi";
- reg = <0x10680 0x50>;
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&coreclk 0>;
- status = "disabled";
- };
-
i2c0: i2c@11000 {
compatible = "marvell,mv64xxx-i2c";
reg = <0x11000 0x20>;
@@ -287,6 +263,15 @@
marvell,function = "spi1";
};
+ nand_pins: nand-pins {
+ marvell,pins = "mpp22", "mpp34", "mpp23",
+ "mpp33", "mpp38", "mpp28",
+ "mpp40", "mpp42", "mpp35",
+ "mpp36", "mpp25", "mpp30",
+ "mpp32";
+ marvell,function = "dev";
+ };
+
uart0_pins: uart-pins-0 {
marvell,pins = "mpp0", "mpp1";
marvell,function = "ua0";
@@ -649,6 +634,30 @@
no-memory-wc;
status = "disabled";
};
+
+ spi0: spi@10600 {
+ compatible = "marvell,armada-380-spi",
+ "marvell,orion-spi";
+ reg = <MBUS_ID(0xf0, 0x01) 0x10600 0x50>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ spi1: spi@10680 {
+ compatible = "marvell,armada-380-spi",
+ "marvell,orion-spi";
+ reg = <MBUS_ID(0xf0, 0x01) 0x10680 0x50>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
};
clocks {
diff --git a/arch/arm/boot/dts/armada-390-db.dts b/arch/arm/boot/dts/armada-390-db.dts
new file mode 100644
index 000000000000..34e279d973c8
--- /dev/null
+++ b/arch/arm/boot/dts/armada-390-db.dts
@@ -0,0 +1,175 @@
+/*
+ * Device Tree file for Marvell Armada 390 Development Board
+ * (DB-88F6920)
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Grzegorz Jaszczyk <jaz@semihalf.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "armada-390.dtsi"
+
+/ {
+ model = "Marvell Armada 390 Development Board";
+ compatible = "marvell,a390-db", "marvell,armada390";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000>; /* 2 GB */
+ };
+
+ soc {
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+ MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000>;
+
+ internal-regs {
+ i2c@11000 {
+ status = "okay";
+ clock-frequency = <100000>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ /* CON104 */
+ serial@12000 {
+ status = "okay";
+ };
+
+ /* CON97 */
+ usb@58000 {
+ status = "okay";
+ };
+
+ flash@d0000 {
+ status = "okay";
+ pinctrl-0 = <&nand_pins>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ marvell,nand-keep-config;
+ marvell,nand-enable-arbiter;
+ nand-on-flash-bbt;
+ nand-ecc-strength = <8>;
+ nand-ecc-step-size = <512>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0 0x800000>;
+ };
+ partition@800000 {
+ label = "Linux";
+ reg = <0x800000 0x800000>;
+ };
+ partition@1000000 {
+ label = "Filesystem";
+ reg = <0x1000000 0x3f000000>;
+ };
+ };
+ };
+
+ /* CON98 */
+ usb3@f8000 {
+ status = "okay";
+ };
+ };
+
+ pcie-controller {
+ status = "okay";
+
+ /* CON30 */
+ pcie@1,0 {
+ status = "okay";
+ };
+
+ /* CON44 */
+ pcie@2,0 {
+ status = "okay";
+ };
+
+ /* CON61 */
+ pcie@3,0 {
+ status = "okay";
+ };
+ };
+ };
+};
+
+&spi1 {
+ status = "okay";
+ pinctrl-0 = <&spi1_pins>;
+ pinctrl-names = "default";
+
+ spi-flash@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "n25q128a13",
+ "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <108000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0 0x400000>;
+ };
+ partition@400000 {
+ label = "Filesystem";
+ reg = <0x400000 0xc00000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-390.dtsi b/arch/arm/boot/dts/armada-390.dtsi
index 094e39c66039..6cd18d8aaac7 100644
--- a/arch/arm/boot/dts/armada-390.dtsi
+++ b/arch/arm/boot/dts/armada-390.dtsi
@@ -47,6 +47,8 @@
#include "armada-39x.dtsi"
/ {
+ compatible = "marvell,armada390";
+
soc {
internal-regs {
pinctrl@18000 {
@@ -54,4 +56,5 @@
reg = <0x18000 0x20>;
};
};
+ };
};
diff --git a/arch/arm/boot/dts/armada-395-gp.dts b/arch/arm/boot/dts/armada-395-gp.dts
new file mode 100644
index 000000000000..2cdbba804c1e
--- /dev/null
+++ b/arch/arm/boot/dts/armada-395-gp.dts
@@ -0,0 +1,163 @@
+/*
+ * Device Tree file for Marvell Armada 395 GP board
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Grzegorz Jaszczyk <jaz@semihalf.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "armada-395.dtsi"
+
+/ {
+ model = "Marvell Armada 395 GP Board";
+ compatible = "marvell,a395-gp", "marvell,armada395",
+ "marvell,armada390";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>; /* 1 GB */
+ };
+
+ soc {
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+ MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000>;
+
+ internal-regs {
+ i2c@11000 {
+ status = "okay";
+ clock-frequency = <100000>;
+
+ eeprom@57 {
+ compatible = "atmel,24c64";
+ reg = <0x57>;
+ };
+ };
+
+ serial@12000 {
+ /*
+ * Exported on the micro USB connector CON17
+ * through an FTDI
+ */
+ status = "okay";
+ };
+
+ /* CON1 */
+ usb@58000 {
+ status = "okay";
+ };
+
+ /* CON2 */
+ sata@a8000 {
+ status = "okay";
+ };
+
+ flash@d0000 {
+ status = "okay";
+ pinctrl-0 = <&nand_pins>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ marvell,nand-keep-config;
+ marvell,nand-enable-arbiter;
+ nand-on-flash-bbt;
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0x00000000 0x00600000>;
+ read-only;
+ };
+
+ partition@800000 {
+ label = "uImage";
+ reg = <0x00600000 0x00400000>;
+ read-only;
+ };
+
+ partition@1000000 {
+ label = "Root";
+ reg = <0x00a00000 0x3f600000>;
+ };
+ };
+ };
+
+ /* CON18 */
+ sdhci@d8000 {
+ clock-frequency = <200000000>;
+ broken-cd;
+ wp-inverted;
+ bus-width = <8>;
+ status = "okay";
+ no-1-8-v;
+ };
+
+ /* CON4 */
+ usb3@f0000 {
+ status = "okay";
+ };
+ };
+
+ pcie-controller {
+ status = "okay";
+
+ /*
+ * The two PCIe units are accessible through
+ * mini PCIe slot on the board.
+ */
+
+ /* CON7 */
+ pcie@2,0 {
+ /* Port 1, Lane 0 */
+ status = "okay";
+ };
+
+ /* CON8 */
+ pcie@4,0 {
+ /* Port 3, Lane 0 */
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-395.dtsi b/arch/arm/boot/dts/armada-395.dtsi
new file mode 100644
index 000000000000..ab5dc49f2bff
--- /dev/null
+++ b/arch/arm/boot/dts/armada-395.dtsi
@@ -0,0 +1,76 @@
+/*
+ * Device Tree Include file for Marvell Armada 395 SoC.
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Grzegorz Jaszczyk <jaz@semihalf.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "armada-39x.dtsi"
+
+/ {
+ compatible = "marvell,armada395", "marvell,armada390";
+
+ soc {
+ internal-regs {
+ pinctrl@18000 {
+ compatible = "marvell,mv88f6925-pinctrl";
+ reg = <0x18000 0x20>;
+ };
+
+ sata@a8000 {
+ compatible = "marvell,armada-380-ahci";
+ reg = <0xa8000 0x2000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gateclk 15>;
+ status = "disabled";
+ };
+
+ usb3@f0000 {
+ compatible = "marvell,armada-380-xhci";
+ reg = <0xf0000 0x4000>,<0xf4000 0x4000>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gateclk 9>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-398-db.dts b/arch/arm/boot/dts/armada-398-db.dts
index 788c3badb681..268c8349c884 100644
--- a/arch/arm/boot/dts/armada-398-db.dts
+++ b/arch/arm/boot/dts/armada-398-db.dts
@@ -65,30 +65,6 @@
MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000>;
internal-regs {
- spi@10680 {
- status = "okay";
- pinctrl-0 = <&spi1_pins>;
- pinctrl-names = "default";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "n25q128a13", "jedec,spi-nor";
- reg = <0>;
- spi-max-frequency = <108000000>;
-
- partition@0 {
- label = "U-Boot";
- reg = <0 0x400000>;
- };
-
- partition@400000 {
- label = "Filesystem";
- reg = <0x400000 0x1000000>;
- };
- };
- };
-
i2c@11000 {
pinctrl-0 = <&i2c0_pins>;
pinctrl-names = "default";
@@ -108,6 +84,10 @@
status = "okay";
};
+ usb@58000 {
+ status = "okay";
+ };
+
flash@d0000 {
status = "okay";
pinctrl-0 = <&nand_pins>;
@@ -132,6 +112,10 @@
reg = <0x1000000 0x3f000000>;
};
};
+
+ usb3@f8000 {
+ status = "okay";
+ };
};
pcie-controller {
@@ -151,3 +135,27 @@
};
};
};
+
+&spi1 {
+ status = "okay";
+ pinctrl-0 = <&spi1_pins>;
+ pinctrl-names = "default";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "n25q128a13", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <108000000>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0 0x400000>;
+ };
+
+ partition@400000 {
+ label = "Filesystem";
+ reg = <0x400000 0x1000000>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-398.dtsi b/arch/arm/boot/dts/armada-398.dtsi
index fdc25914e3a3..234a99891a29 100644
--- a/arch/arm/boot/dts/armada-398.dtsi
+++ b/arch/arm/boot/dts/armada-398.dtsi
@@ -44,7 +44,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "armada-39x.dtsi"
+#include "armada-395.dtsi"
/ {
compatible = "marvell,armada398", "marvell,armada390";
@@ -55,6 +55,14 @@
compatible = "marvell,mv88f6928-pinctrl";
reg = <0x18000 0x20>;
};
+
+ sata@e0000 {
+ compatible = "marvell,armada-380-ahci";
+ reg = <0xe0000 0x2000>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gateclk 30>;
+ status = "disabled";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
index dc6efd386dbc..34cba87f9200 100644
--- a/arch/arm/boot/dts/armada-39x.dtsi
+++ b/arch/arm/boot/dts/armada-39x.dtsi
@@ -55,6 +55,8 @@
compatible = "marvell,armada390";
aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -78,6 +80,11 @@
};
};
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupts-extended = <&mpic 3>;
+ };
+
soc {
compatible = "marvell,armada390-mbus", "marvell,armadaxp-mbus",
"simple-bus";
@@ -131,30 +138,6 @@
<0xc100 0x100>;
};
- spi0: spi@10600 {
- compatible = "marvell,armada-390-spi",
- "marvell,orion-spi";
- reg = <0x10600 0x50>;
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&coreclk 0>;
- status = "disabled";
- };
-
- spi1: spi@10680 {
- compatible = "marvell,armada-390-spi",
- "marvell,orion-spi";
- reg = <0x10680 0x50>;
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&coreclk 0>;
- status = "disabled";
- };
-
i2c0: i2c@11000 {
compatible = "marvell,mv64xxx-i2c";
reg = <0x11000 0x20>;
@@ -269,6 +252,34 @@
};
};
+ gpio0: gpio@18100 {
+ compatible = "marvell,orion-gpio";
+ reg = <0x18100 0x40>;
+ ngpios = <32>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gpio1: gpio@18140 {
+ compatible = "marvell,orion-gpio";
+ reg = <0x18140 0x40>;
+ ngpios = <28>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
system-controller@18200 {
compatible = "marvell,armada-390-system-controller",
"marvell,armada-370-xp-system-controller";
@@ -317,11 +328,29 @@
clock-names = "nbclk", "fixed";
};
+ watchdog@20300 {
+ compatible = "marvell,armada-380-wdt";
+ reg = <0x20300 0x34>, <0x20704 0x4>,
+ <0x18260 0x4>;
+ clocks = <&coreclk 2>, <&refclk>;
+ clock-names = "nbclk", "fixed";
+ };
+
cpurst@20800 {
compatible = "marvell,armada-370-cpu-reset";
reg = <0x20800 0x10>;
};
+ mpcore-soc-ctrl@20d20 {
+ compatible = "marvell,armada-380-mpcore-soc-ctrl";
+ reg = <0x20d20 0x6c>;
+ };
+
+ coherency-fabric@21010 {
+ compatible = "marvell,armada-380-coherency-fabric";
+ reg = <0x21010 0x1c>;
+ };
+
pmsu@22000 {
compatible = "marvell,armada-390-pmsu",
"marvell,armada-380-pmsu";
@@ -368,6 +397,13 @@
};
};
+ rtc@a3800 {
+ compatible = "marvell,armada-380-rtc";
+ reg = <0xa3800 0x20>, <0x184a0 0x0c>;
+ reg-names = "rtc", "rtc-soc";
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
flash@d0000 {
compatible = "marvell,armada370-nand";
reg = <0xd0000 0x54>;
@@ -380,7 +416,10 @@
sdhci@d8000 {
compatible = "marvell,armada-380-sdhci";
- reg = <0xd8000 0x1000>, <0xdc000 0x100>;
+ reg-names = "sdhci", "mbus", "conf-sdio3";
+ reg = <0xd8000 0x1000>,
+ <0xdc000 0x100>,
+ <0x18454 0x4>;
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gateclk 17>;
mrvl,clk-delay-cycles = <0x1F>;
@@ -395,6 +434,12 @@
clocks = <&mainpll>;
clock-output-names = "nand";
};
+
+ thermal@e8078 {
+ compatible = "marvell,armada380-thermal";
+ reg = <0xe4078 0x4>, <0xe4074 0x4>;
+ status = "okay";
+ };
};
pcie-controller {
@@ -501,6 +546,30 @@
status = "disabled";
};
};
+
+ spi0: spi@10600 {
+ compatible = "marvell,armada-390-spi",
+ "marvell,orion-spi";
+ reg = <MBUS_ID(0xf0, 0x01) 0x10600 0x50>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ spi1: spi@10680 {
+ compatible = "marvell,armada-390-spi",
+ "marvell,orion-spi";
+ reg = <MBUS_ID(0xf0, 0x01) 0x10680 0x50>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
};
clocks {
@@ -510,5 +579,12 @@
#clock-cells = <0>;
clock-frequency = <1000000000>;
};
+
+ /* 25 MHz reference crystal */
+ refclk: oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-xp-axpwifiap.dts b/arch/arm/boot/dts/armada-xp-axpwifiap.dts
index 5c21b236721f..ce152719bc28 100644
--- a/arch/arm/boot/dts/armada-xp-axpwifiap.dts
+++ b/arch/arm/boot/dts/armada-xp-axpwifiap.dts
@@ -135,18 +135,6 @@
phy = <&phy1>;
phy-mode = "rgmii-id";
};
-
- spi0: spi@10600 {
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "n25q128a13", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <108000000>;
- };
- };
};
};
@@ -179,3 +167,15 @@
marvell,function = "gpio";
};
};
+
+&spi0 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "n25q128a13", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <108000000>;
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index 62422a90aeb2..075120bc3ec4 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -231,18 +231,6 @@
status = "okay";
};
- spi0: spi@10600 {
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "m25p64", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <20000000>;
- };
- };
-
nand@d0000 {
status = "okay";
num-cs = <1>;
@@ -277,3 +265,15 @@
};
};
};
+
+&spi0 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "m25p64", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <20000000>;
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 061f4237760e..190e4eccb180 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -232,18 +232,6 @@
status = "okay";
};
- spi0: spi@10600 {
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "n25q128a13", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <108000000>;
- };
- };
-
bm@c0000 {
status = "okay";
};
@@ -262,3 +250,15 @@
};
};
};
+
+&spi0 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "n25q128a13", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <108000000>;
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
index 7a461541ce50..076f27f22c3b 100644
--- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
+++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
@@ -279,18 +279,6 @@
reg = <0x180000 0x780000>; /* 7.5MB */
};
};
-
- spi0: spi@10600 {
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "everspin,mr25h256";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <40000000>;
- };
- };
};
};
@@ -398,3 +386,15 @@
marvell,function = "gpio";
};
};
+
+&spi0 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "everspin,mr25h256";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <40000000>;
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp-synology-ds414.dts b/arch/arm/boot/dts/armada-xp-synology-ds414.dts
index d17dab0a6f51..ae286736b90a 100644
--- a/arch/arm/boot/dts/armada-xp-synology-ds414.dts
+++ b/arch/arm/boot/dts/armada-xp-synology-ds414.dts
@@ -110,62 +110,6 @@
status = "disabled";
};
- spi0: spi@10600 {
- status = "okay";
-
- spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "micron,n25q064", "jedec,spi-nor";
- reg = <0>; /* Chip select 0 */
- spi-max-frequency = <20000000>;
-
- /*
- * Warning!
- *
- * Synology u-boot uses its compiled-in environment
- * and it seems Synology did not care to change u-boot
- * default configuration in order to allow saving a
- * modified environment at a sensible location. So,
- * if you do a 'saveenv' under u-boot, your modified
- * environment will be saved at 1MB after the start
- * of the flash, i.e. in the middle of the uImage.
- * For that reason, it is strongly advised not to
- * change the default environment, unless you know
- * what you are doing.
- */
- partition@00000000 { /* u-boot */
- label = "RedBoot";
- reg = <0x00000000 0x000d0000>; /* 832KB */
- };
-
- partition@000c0000 { /* uImage */
- label = "zImage";
- reg = <0x000d0000 0x002d0000>; /* 2880KB */
- };
-
- partition@003a0000 { /* uInitramfs */
- label = "rd.gz";
- reg = <0x003a0000 0x00430000>; /* 4250KB */
- };
-
- partition@007d0000 { /* MAC address and serial number */
- label = "vendor";
- reg = <0x007d0000 0x00010000>; /* 64KB */
- };
-
- partition@007e0000 {
- label = "RedBoot config";
- reg = <0x007e0000 0x00010000>; /* 64KB */
- };
-
- partition@007f0000 {
- label = "FIS directory";
- reg = <0x007f0000 0x00010000>; /* 64KB */
- };
- };
- };
-
i2c@11000 {
clock-frequency = <400000>;
status = "okay";
@@ -362,3 +306,59 @@
marvell,function = "gpio";
};
};
+
+&spi0 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "micron,n25q064", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <20000000>;
+
+ /*
+ * Warning!
+ *
+ * Synology u-boot uses its compiled-in environment
+ * and it seems Synology did not care to change u-boot
+ * default configuration in order to allow saving a
+ * modified environment at a sensible location. So,
+ * if you do a 'saveenv' under u-boot, your modified
+ * environment will be saved at 1MB after the start
+ * of the flash, i.e. in the middle of the uImage.
+ * For that reason, it is strongly advised not to
+ * change the default environment, unless you know
+ * what you are doing.
+ */
+ partition@00000000 { /* u-boot */
+ label = "RedBoot";
+ reg = <0x00000000 0x000d0000>; /* 832KB */
+ };
+
+ partition@000c0000 { /* uImage */
+ label = "zImage";
+ reg = <0x000d0000 0x002d0000>; /* 2880KB */
+ };
+
+ partition@003a0000 { /* uInitramfs */
+ label = "rd.gz";
+ reg = <0x003a0000 0x00430000>; /* 4250KB */
+ };
+
+ partition@007d0000 { /* MAC address and serial number */
+ label = "vendor";
+ reg = <0x007d0000 0x00010000>; /* 64KB */
+ };
+
+ partition@007e0000 {
+ label = "RedBoot config";
+ reg = <0x007e0000 0x00010000>; /* 64KB */
+ };
+
+ partition@007f0000 {
+ label = "FIS directory";
+ reg = <0x007f0000 0x00010000>; /* 64KB */
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index 553349c07f28..4a5f99e65b51 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -84,19 +84,6 @@
wt-override;
};
- spi0: spi@10600 {
- compatible = "marvell,armada-xp-spi",
- "marvell,orion-spi";
- pinctrl-0 = <&spi0_pins>;
- pinctrl-names = "default";
- };
-
- spi1: spi@10680 {
- compatible = "marvell,armada-xp-spi",
- "marvell,orion-spi";
- };
-
-
i2c0: i2c@11000 {
compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
reg = <0x11000 0x100>;
@@ -362,6 +349,12 @@
marvell,function = "spi0";
};
+ spi1_pins: spi1-pins {
+ marvell,pins = "mpp13", "mpp14",
+ "mpp16", "mpp17";
+ marvell,function = "spi1";
+ };
+
uart2_pins: uart2-pins {
marvell,pins = "mpp42", "mpp43";
marvell,function = "uart2";
@@ -372,3 +365,15 @@
marvell,function = "uart3";
};
};
+
+&spi0 {
+ compatible = "marvell,armada-xp-spi", "marvell,orion-spi";
+ pinctrl-0 = <&spi0_pins>;
+ pinctrl-names = "default";
+};
+
+&spi1 {
+ compatible = "marvell,armada-xp-spi", "marvell,orion-spi";
+ pinctrl-0 = <&spi1_pins>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm/boot/dts/armv7-m.dtsi b/arch/arm/boot/dts/armv7-m.dtsi
index 16331aa79775..ba332e399be4 100644
--- a/arch/arm/boot/dts/armv7-m.dtsi
+++ b/arch/arm/boot/dts/armv7-m.dtsi
@@ -1,5 +1,3 @@
-#include "skeleton.dtsi"
-
/ {
nvic: interrupt-controller@e000e100 {
compatible = "arm,armv7m-nvic";
diff --git a/arch/arm/boot/dts/artpec6.dtsi b/arch/arm/boot/dts/artpec6.dtsi
index 3fac4c4d0007..3489019cc0dc 100644
--- a/arch/arm/boot/dts/artpec6.dtsi
+++ b/arch/arm/boot/dts/artpec6.dtsi
@@ -41,6 +41,7 @@
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/axis,artpec6-clkctrl.h>
#include "skeleton.dtsi"
/ {
@@ -109,14 +110,14 @@
compatible = "arm,cortex-a9-global-timer";
reg = <0xfaf00200 0x20>;
interrupts = <GIC_PPI 11 0xf01>;
- clocks = <&clkctrl 1>;
+ clocks = <&clkctrl ARTPEC6_CLK_CPU_PERIPH>;
};
timer@faf00600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0xfaf00600 0x20>;
interrupts = <GIC_PPI 13 0xf04>;
- clocks = <&clkctrl 1>;
+ clocks = <&clkctrl ARTPEC6_CLK_CPU_PERIPH>;
status = "disabled";
};
@@ -136,12 +137,20 @@
arm,data-latency = <1 1 1>;
arm,tag-latency = <1 1 1>;
arm,filter-ranges = <0x0 0x80000000>;
+ arm,double-linefill = <1>;
+ arm,double-linefill-incr = <0>;
+ arm,double-linefill-wrap = <0>;
+ prefetch-data = <1>;
+ prefetch-instr = <1>;
+ arm,prefetch-offset = <0>;
+ arm,prefetch-drop = <1>;
};
pmu {
compatible = "arm,cortex-a9-pmu";
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>;
interrupt-parent = <&intc>;
};
@@ -157,7 +166,7 @@
ethernet: ethernet@f8010000 {
clock-names = "phy_ref_clk", "apb_pclk";
clocks = <&eth_phy_ref_clk>,
- <&clkctrl 4>;
+ <&clkctrl ARTPEC6_CLK_ETH_ACLK>;
compatible = "snps,dwc-qos-ethernet-4.10";
interrupt-parent = <&intc>;
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
@@ -175,8 +184,8 @@
compatible = "arm,pl011", "arm,primecell";
reg = <0xf8036000 0x1000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clkctrl 13>,
- <&clkctrl 12>;
+ clocks = <&clkctrl ARTPEC6_CLK_UART_REFCLK>,
+ <&clkctrl ARTPEC6_CLK_UART_PCLK>;
clock-names = "uart_clk", "apb_pclk";
status = "disabled";
};
@@ -184,8 +193,8 @@
compatible = "arm,pl011", "arm,primecell";
reg = <0xf8037000 0x1000>;
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clkctrl 13>,
- <&clkctrl 12>;
+ clocks = <&clkctrl ARTPEC6_CLK_UART_REFCLK>,
+ <&clkctrl ARTPEC6_CLK_UART_PCLK>;
clock-names = "uart_clk", "apb_pclk";
status = "disabled";
};
@@ -193,8 +202,8 @@
compatible = "arm,pl011", "arm,primecell";
reg = <0xf8038000 0x1000>;
interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clkctrl 13>,
- <&clkctrl 12>;
+ clocks = <&clkctrl ARTPEC6_CLK_UART_REFCLK>,
+ <&clkctrl ARTPEC6_CLK_UART_PCLK>;
clock-names = "uart_clk", "apb_pclk";
status = "disabled";
};
@@ -202,8 +211,8 @@
compatible = "arm,pl011", "arm,primecell";
reg = <0xf8039000 0x1000>;
interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clkctrl 13>,
- <&clkctrl 12>;
+ clocks = <&clkctrl ARTPEC6_CLK_UART_REFCLK>,
+ <&clkctrl ARTPEC6_CLK_UART_PCLK>;
clock-names = "uart_clk", "apb_pclk";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/axp209.dtsi b/arch/arm/boot/dts/axp209.dtsi
index afbe89c01df5..675bb0f30825 100644
--- a/arch/arm/boot/dts/axp209.dtsi
+++ b/arch/arm/boot/dts/axp209.dtsi
@@ -53,6 +53,12 @@
interrupt-controller;
#interrupt-cells = <1>;
+ axp_gpio: gpio {
+ compatible = "x-powers,axp209-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
regulators {
/* Default work frequency for buck regulators */
x-powers,dcdc-freq = <1500>;
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index c3bf7d23f136..7c9e0fae9bb9 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -209,6 +209,24 @@
#dma-cells = <1>;
};
+ amac0: ethernet@22000 {
+ compatible = "brcm,nsp-amac";
+ reg = <0x022000 0x1000>,
+ <0x110000 0x1000>;
+ reg-names = "amac_base", "idm_base";
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ amac1: ethernet@23000 {
+ compatible = "brcm,nsp-amac";
+ reg = <0x023000 0x1000>,
+ <0x111000 0x1000>;
+ reg-names = "amac_base", "idm_base";
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
nand: nand@26000 {
compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1";
reg = <0x026000 0x600>,
@@ -223,6 +241,14 @@
brcm,nand-has-wp;
};
+ pwm: pwm@31000 {
+ compatible = "brcm,iproc-pwm";
+ reg = <0x31000 0x28>;
+ clocks = <&osc>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
rng: rng@33000 {
compatible = "brcm,bcm-nsp-rng";
reg = <0x33000 0x14>;
@@ -246,6 +272,17 @@
clock-names = "apb_pclk";
};
+ srab: srab@36000 {
+ compatible = "brcm,nsp-srab";
+ reg = <0x36000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+
+ /* ports are defined in board DTS */
+ };
+
i2c0: i2c@38000 {
compatible = "brcm,iproc-i2c";
reg = <0x38000 0x50>;
diff --git a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
index 35ff4e7a4aac..f7f9db355d98 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
@@ -1,6 +1,7 @@
/dts-v1/;
#include "bcm2835.dtsi"
#include "bcm2835-rpi.dtsi"
+#include "bcm283x-rpi-usb-host.dtsi"
/ {
compatible = "raspberrypi,model-a-plus", "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2835-rpi-a.dts b/arch/arm/boot/dts/bcm2835-rpi-a.dts
index 306a84ee9898..8be102f5d826 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-a.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-a.dts
@@ -1,6 +1,7 @@
/dts-v1/;
#include "bcm2835.dtsi"
#include "bcm2835-rpi.dtsi"
+#include "bcm283x-rpi-usb-host.dtsi"
/ {
compatible = "raspberrypi,model-a", "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
index d5fdb8e761a3..35cde65c975e 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
@@ -2,6 +2,7 @@
#include "bcm2835.dtsi"
#include "bcm2835-rpi.dtsi"
#include "bcm283x-rpi-smsc9514.dtsi"
+#include "bcm283x-rpi-usb-host.dtsi"
/ {
compatible = "raspberrypi,model-b-plus", "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
index bfc4bd9b7733..84df85ea6296 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
@@ -2,6 +2,7 @@
#include "bcm2835.dtsi"
#include "bcm2835-rpi.dtsi"
#include "bcm283x-rpi-smsc9512.dtsi"
+#include "bcm283x-rpi-usb-host.dtsi"
/ {
compatible = "raspberrypi,model-b-rev2", "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b.dts b/arch/arm/boot/dts/bcm2835-rpi-b.dts
index 0371bb7374b8..8e626a80fe24 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b.dts
@@ -2,6 +2,7 @@
#include "bcm2835.dtsi"
#include "bcm2835-rpi.dtsi"
#include "bcm283x-rpi-smsc9512.dtsi"
+#include "bcm283x-rpi-usb-host.dtsi"
/ {
compatible = "raspberrypi,model-b", "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero.dts b/arch/arm/boot/dts/bcm2835-rpi-zero.dts
new file mode 100644
index 000000000000..60e359fafc5b
--- /dev/null
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero.dts
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Stefan Wahren <stefan.wahren@i2se.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "bcm2835.dtsi"
+#include "bcm2835-rpi.dtsi"
+#include "bcm283x-rpi-usb-host.dtsi"
+
+/ {
+ compatible = "raspberrypi,model-zero", "brcm,bcm2835";
+ model = "Raspberry Pi Zero";
+
+ leds {
+ act {
+ gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+&gpio {
+ pinctrl-0 = <&gpioout &alt0 &i2s_alt0 &alt3>;
+
+ /* I2S interface */
+ i2s_alt0: i2s_alt0 {
+ brcm,pins = <18 19 20 21>;
+ brcm,function = <BCM2835_FSEL_ALT0>;
+ };
+};
+
+&hdmi {
+ hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
index 29e1cfe8eb14..39dccf62ac96 100644
--- a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
+++ b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
@@ -2,6 +2,7 @@
#include "bcm2836.dtsi"
#include "bcm2835-rpi.dtsi"
#include "bcm283x-rpi-smsc9514.dtsi"
+#include "bcm283x-rpi-usb-host.dtsi"
/ {
compatible = "raspberrypi,2-model-b", "brcm,bcm2836";
diff --git a/arch/arm/boot/dts/bcm283x-rpi-usb-host.dtsi b/arch/arm/boot/dts/bcm283x-rpi-usb-host.dtsi
new file mode 100644
index 000000000000..73f4ece8dcd0
--- /dev/null
+++ b/arch/arm/boot/dts/bcm283x-rpi-usb-host.dtsi
@@ -0,0 +1,3 @@
+&usb {
+ dr_mode = "host";
+};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 445624a1a1de..46d46d894a44 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -290,6 +290,8 @@
interrupts = <1 9>;
#address-cells = <1>;
#size-cells = <0>;
+ clocks = <&clk_usb>;
+ clock-names = "otg";
};
v3d: v3d@7ec00000 {
@@ -317,5 +319,12 @@
clock-frequency = <19200000>;
};
+ clk_usb: clock@4 {
+ compatible = "fixed-clock";
+ reg = <4>;
+ #clock-cells = <0>;
+ clock-output-names = "otg";
+ clock-frequency = <480000000>;
+ };
};
};
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 8af47913b3b4..ae4b3880616d 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -140,6 +140,15 @@
};
};
+ usb2_phy: usb2-phy {
+ compatible = "brcm,ns-usb2-phy";
+ reg = <0x1800c000 0x1000>;
+ reg-names = "dmu";
+ #phy-cells = <0>;
+ clocks = <&genpll BCM_NSP_GENPLL_USB_PHY_REF_CLK>;
+ clock-names = "phy-ref-clk";
+ };
+
axi@18000000 {
compatible = "brcm,bus-axi";
reg = <0x18000000 0x1000>;
@@ -232,6 +241,8 @@
#address-cells = <1>;
#size-cells = <1>;
+
+ phys = <&usb2_phy>;
};
usb3: usb3@23000 {
diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts
new file mode 100644
index 000000000000..a21b0fd21f4e
--- /dev/null
+++ b/arch/arm/boot/dts/bcm958522er.dts
@@ -0,0 +1,130 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm-nsp.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "NorthStar Plus SVK (BCM958522ER)";
+ compatible = "brcm,bcm58522", "brcm,nsp";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x60000000 0x80000000>;
+ };
+
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ priority = <200>;
+ };
+};
+
+/* USB 2/3 support needed to be complete */
+
+&amac0 {
+ status = "okay";
+};
+
+
+&amac1 {
+ status = "okay";
+};
+
+&nand {
+ nandcs@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ nand-on-flash-bbt;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ nand-ecc-strength = <24>;
+ nand-ecc-step-size = <1024>;
+
+ brcm,nand-oob-sector-size = <27>;
+
+ partition@0 {
+ label = "nboot";
+ reg = <0x00000000 0x00200000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "nenv";
+ reg = <0x00200000 0x00400000>;
+ };
+ partition@600000 {
+ label = "nsystem";
+ reg = <0x00600000 0x00a00000>;
+ };
+ partition@1000000 {
+ label = "nrootfs";
+ reg = <0x01000000 0x03000000>;
+ };
+ partition@4000000 {
+ label = "ncustfs";
+ reg = <0x04000000 0x3c000000>;
+ };
+ };
+};
+
+&pcie0 {
+ status = "okay";
+};
+
+&pcie1 {
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_sel>;
+ nand_sel: nand_sel {
+ function = "nand";
+ groups = "nand_grp";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts
new file mode 100644
index 000000000000..be7f2f8ecf39
--- /dev/null
+++ b/arch/arm/boot/dts/bcm958525er.dts
@@ -0,0 +1,142 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm-nsp.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "NorthStar Plus SVK (BCM958525ER)";
+ compatible = "brcm,bcm58525", "brcm,nsp";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x60000000 0x80000000>;
+ };
+
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ priority = <200>;
+ };
+};
+
+/* USB 2/3 support needed to be complete */
+
+&amac0 {
+ status = "okay";
+};
+
+
+&amac1 {
+ status = "okay";
+};
+
+&nand {
+ nandcs@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ nand-on-flash-bbt;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ nand-ecc-strength = <24>;
+ nand-ecc-step-size = <1024>;
+
+ brcm,nand-oob-sector-size = <27>;
+
+ partition@0 {
+ label = "nboot";
+ reg = <0x00000000 0x00200000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "nenv";
+ reg = <0x00200000 0x00400000>;
+ };
+ partition@600000 {
+ label = "nsystem";
+ reg = <0x00600000 0x00a00000>;
+ };
+ partition@1000000 {
+ label = "nrootfs";
+ reg = <0x01000000 0x03000000>;
+ };
+ partition@4000000 {
+ label = "ncustfs";
+ reg = <0x04000000 0x3c000000>;
+ };
+ };
+};
+
+&pcie0 {
+ status = "okay";
+};
+
+&pcie1 {
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_sel>;
+ nand_sel: nand_sel {
+ function = "nand";
+ groups = "nand_grp";
+ };
+};
+
+&sata_phy0 {
+ status = "okay";
+};
+
+&sata_phy1 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
index d257e83dedfc..959cde911c3c 100644
--- a/arch/arm/boot/dts/bcm958525xmc.dts
+++ b/arch/arm/boot/dts/bcm958525xmc.dts
@@ -33,6 +33,7 @@
/dts-v1/;
#include "bcm-nsp.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "NorthStar Plus XMC (BCM958525xmc)";
@@ -45,6 +46,35 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ memory {
+ device_type = "memory";
+ reg = <0x60000000 0x40000000>;
+ };
+
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
+ priority = <200>;
+ };
+};
+
+&i2c0 {
+ temperature-sensor@4c {
+ compatible = "adi,adt7461a";
+ reg = <0x4c>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <16>;
+ };
+
+ rtc@68 {
+ compatible = "st,m41t81";
+ reg = <0x68>;
+ };
};
&nand {
@@ -85,7 +115,7 @@
};
};
-/* XHCI, SATA, MMC, and Ethernet support needed to be complete */
+/* XHCI, MMC, and Ethernet support needed to be complete */
&uart0 {
status = "okay";
@@ -99,6 +129,18 @@
status = "okay";
};
+&sata_phy0 {
+ status = "okay";
+};
+
+&sata_phy1 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
&pinctrl {
pinctrl-names = "default";
pinctrl-0 = <&nand_sel>;
diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts
new file mode 100644
index 000000000000..ad2aa87dd15a
--- /dev/null
+++ b/arch/arm/boot/dts/bcm958622hr.dts
@@ -0,0 +1,170 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm-nsp.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "NorthStar Plus SVK (BCM958622HR)";
+ compatible = "brcm,bcm58622", "brcm,nsp";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x60000000 0x80000000>;
+ };
+
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ priority = <200>;
+ };
+};
+
+/* USB 2/3 and SLIC support needed to be complete */
+
+&amac0 {
+ status = "okay";
+};
+
+&nand {
+ nandcs@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ nand-on-flash-bbt;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ nand-ecc-strength = <24>;
+ nand-ecc-step-size = <1024>;
+
+ brcm,nand-oob-sector-size = <27>;
+
+ partition@0 {
+ label = "nboot";
+ reg = <0x00000000 0x00200000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "nenv";
+ reg = <0x00200000 0x00400000>;
+ };
+ partition@600000 {
+ label = "nsystem";
+ reg = <0x00600000 0x00a00000>;
+ };
+ partition@1000000 {
+ label = "nrootfs";
+ reg = <0x01000000 0x03000000>;
+ };
+ partition@4000000 {
+ label = "ncustfs";
+ reg = <0x04000000 0x3c000000>;
+ };
+ };
+};
+
+&pcie0 {
+ status = "okay";
+};
+
+&pcie1 {
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_sel>;
+ nand_sel: nand_sel {
+ function = "nand";
+ groups = "nand_grp";
+ };
+};
+
+&srab {
+ compatible = "brcm,bcm58622-srab", "brcm,nsp-srab";
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ label = "port0";
+ reg = <0>;
+ };
+
+ port@1 {
+ label = "port1";
+ reg = <1>;
+ };
+
+ port@2 {
+ label = "port2";
+ reg = <2>;
+ };
+
+ port@3 {
+ label = "port3";
+ reg = <3>;
+ };
+
+ port@4 {
+ label = "port4";
+ reg = <4>;
+ };
+
+ port@5 {
+ ethernet = <&amac0>;
+ label = "cpu";
+ reg = <5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
new file mode 100644
index 000000000000..4ceb8fef8041
--- /dev/null
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -0,0 +1,178 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm-nsp.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "NorthStar Plus SVK (BCM958623HR)";
+ compatible = "brcm,bcm58623", "brcm,nsp";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x60000000 0x80000000>;
+ };
+
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ priority = <200>;
+ };
+};
+
+/* USB 2/3 and SLIC support needed to be complete */
+
+&amac0 {
+ status = "okay";
+};
+
+&nand {
+ nandcs@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ nand-on-flash-bbt;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ nand-ecc-strength = <24>;
+ nand-ecc-step-size = <1024>;
+
+ brcm,nand-oob-sector-size = <27>;
+
+ partition@0 {
+ label = "nboot";
+ reg = <0x00000000 0x00200000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "nenv";
+ reg = <0x00200000 0x00400000>;
+ };
+ partition@600000 {
+ label = "nsystem";
+ reg = <0x00600000 0x00a00000>;
+ };
+ partition@1000000 {
+ label = "nrootfs";
+ reg = <0x01000000 0x03000000>;
+ };
+ partition@4000000 {
+ label = "ncustfs";
+ reg = <0x04000000 0x3c000000>;
+ };
+ };
+};
+
+&pcie0 {
+ status = "okay";
+};
+
+&pcie1 {
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_sel>;
+ nand_sel: nand_sel {
+ function = "nand";
+ groups = "nand_grp";
+ };
+};
+
+&srab {
+ compatible = "brcm,bcm58623-srab", "brcm,nsp-srab";
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ label = "port0";
+ reg = <0>;
+ };
+
+ port@1 {
+ label = "port1";
+ reg = <1>;
+ };
+
+ port@2 {
+ label = "port2";
+ reg = <2>;
+ };
+
+ port@3 {
+ label = "port3";
+ reg = <3>;
+ };
+
+ port@4 {
+ label = "port4";
+ reg = <4>;
+ };
+
+ port@5 {
+ ethernet = <&amac0>;
+ label = "cpu";
+ reg = <5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+};
+
+&sata_phy0 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index 03b8bbeb694f..442002597063 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -33,6 +33,7 @@
/dts-v1/;
#include "bcm-nsp.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "NorthStar Plus SVK (BCM958625HR)";
@@ -47,7 +48,14 @@
};
memory {
- reg = <0x60000000 0x20000000>;
+ device_type = "memory";
+ reg = <0x60000000 0x80000000>;
+ };
+
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ priority = <200>;
};
};
@@ -109,3 +117,64 @@
groups = "nand_grp";
};
};
+
+&amac0 {
+ status = "okay";
+};
+
+&srab {
+ compatible = "brcm,bcm58625-srab", "brcm,nsp-srab";
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ label = "port0";
+ reg = <0>;
+ };
+
+ port@1 {
+ label = "port1";
+ reg = <1>;
+ };
+
+ port@2 {
+ label = "port2";
+ reg = <2>;
+ };
+
+ port@3 {
+ label = "port3";
+ reg = <3>;
+ };
+
+ port@4 {
+ label = "port4";
+ reg = <4>;
+ };
+
+ port@5 {
+ ethernet = <&amac0>;
+ label = "cpu";
+ reg = <5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+};
+
+&sata_phy0 {
+ status = "okay";
+};
+
+&sata_phy1 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm958625k.dts b/arch/arm/boot/dts/bcm958625k.dts
index 2d8422632b2b..05c5f98c8782 100644
--- a/arch/arm/boot/dts/bcm958625k.dts
+++ b/arch/arm/boot/dts/bcm958625k.dts
@@ -46,6 +46,11 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ memory {
+ device_type = "memory";
+ reg = <0x60000000 0x80000000>;
+ };
};
&uart0 {
@@ -56,6 +61,14 @@
status = "okay";
};
+&amac0 {
+ status = "okay";
+};
+
+&amac1 {
+ status = "okay";
+};
+
&pcie0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts
new file mode 100644
index 000000000000..104afe98a43b
--- /dev/null
+++ b/arch/arm/boot/dts/bcm988312hr.dts
@@ -0,0 +1,182 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm-nsp.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "NorthStar Plus SVK (BCM988312HR)";
+ compatible = "brcm,bcm88312", "brcm,nsp";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x60000000 0x80000000>;
+ };
+
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ priority = <200>;
+ };
+};
+
+/* USB 2/3 support needed to be complete */
+
+&amac0 {
+ status = "okay";
+};
+
+&nand {
+ nandcs@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ nand-on-flash-bbt;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ nand-ecc-strength = <24>;
+ nand-ecc-step-size = <1024>;
+
+ brcm,nand-oob-sector-size = <27>;
+
+ partition@0 {
+ label = "nboot";
+ reg = <0x00000000 0x00200000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "nenv";
+ reg = <0x00200000 0x00400000>;
+ };
+ partition@600000 {
+ label = "nsystem";
+ reg = <0x00600000 0x00a00000>;
+ };
+ partition@1000000 {
+ label = "nrootfs";
+ reg = <0x01000000 0x03000000>;
+ };
+ partition@4000000 {
+ label = "ncustfs";
+ reg = <0x04000000 0x3c000000>;
+ };
+ };
+};
+
+&pcie0 {
+ status = "okay";
+};
+
+&pcie1 {
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_sel>;
+ nand_sel: nand_sel {
+ function = "nand";
+ groups = "nand_grp";
+ };
+};
+
+&sata_phy0 {
+ status = "okay";
+};
+
+&sata_phy1 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
+&srab {
+ compatible = "brcm,bcm88312-srab", "brcm,nsp-srab";
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ label = "port0";
+ reg = <0>;
+ };
+
+ port@1 {
+ label = "port1";
+ reg = <1>;
+ };
+
+ port@2 {
+ label = "port2";
+ reg = <2>;
+ };
+
+ port@3 {
+ label = "port3";
+ reg = <3>;
+ };
+
+ port@4 {
+ label = "port4";
+ reg = <4>;
+ };
+
+ port@5 {
+ ethernet = <&amac0>;
+ label = "cpu";
+ reg = <5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/berlin2-sony-nsz-gs7.dts b/arch/arm/boot/dts/berlin2-sony-nsz-gs7.dts
index 3c0907b87fd6..1c475796d17f 100644
--- a/arch/arm/boot/dts/berlin2-sony-nsz-gs7.dts
+++ b/arch/arm/boot/dts/berlin2-sony-nsz-gs7.dts
@@ -49,7 +49,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x00000000 0x40000000>; /* 1 GB */
};
diff --git a/arch/arm/boot/dts/berlin2.dtsi b/arch/arm/boot/dts/berlin2.dtsi
index ae81009741ff..425c48971abe 100644
--- a/arch/arm/boot/dts/berlin2.dtsi
+++ b/arch/arm/boot/dts/berlin2.dtsi
@@ -39,13 +39,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/clock/berlin2.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
model = "Marvell Armada 1500 (BG2) SoC";
compatible = "marvell,berlin2", "marvell,berlin";
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
serial0 = &uart0;
@@ -89,7 +90,7 @@
clock-frequency = <25000000>;
};
- soc {
+ soc@f7000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -447,7 +448,6 @@
reg = <0x2000 0x100>;
clocks = <&refclk>;
interrupts = <1>;
- status = "disabled";
};
wdt2: watchdog@3000 {
@@ -455,7 +455,6 @@
reg = <0x3000 0x100>;
clocks = <&refclk>;
interrupts = <2>;
- status = "disabled";
};
sm_gpio1: gpio@5000 {
diff --git a/arch/arm/boot/dts/berlin2cd-google-chromecast.dts b/arch/arm/boot/dts/berlin2cd-google-chromecast.dts
index 8ba8b50ce997..ca24def0ce13 100644
--- a/arch/arm/boot/dts/berlin2cd-google-chromecast.dts
+++ b/arch/arm/boot/dts/berlin2cd-google-chromecast.dts
@@ -50,7 +50,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x00000000 0x20000000>; /* 512 MB */
};
diff --git a/arch/arm/boot/dts/berlin2cd.dtsi b/arch/arm/boot/dts/berlin2cd.dtsi
index 6d06b6118d83..4fe1574d08c3 100644
--- a/arch/arm/boot/dts/berlin2cd.dtsi
+++ b/arch/arm/boot/dts/berlin2cd.dtsi
@@ -39,13 +39,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/clock/berlin2.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
model = "Marvell Armada 1500-mini (BG2CD) SoC";
compatible = "marvell,berlin2cd", "marvell,berlin";
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
serial0 = &uart0;
@@ -78,7 +79,7 @@
clock-frequency = <25000000>;
};
- soc {
+ soc@f7000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
index 33b28757b8f6..f485308840ab 100644
--- a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
+++ b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
@@ -43,7 +43,7 @@
model = "Marvell BG2-Q DMP";
compatible = "marvell,berlin2q-dmp", "marvell,berlin2q", "marvell,berlin";
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x00000000 0x80000000>;
};
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 2c34bfb13632..e548229697fc 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -37,11 +37,11 @@
#include <dt-bindings/clock/berlin2q.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "skeleton.dtsi"
-
/ {
model = "Marvell Armada 1500 pro (BG2-Q) SoC";
compatible = "marvell,berlin2q", "marvell,berlin";
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
serial0 = &uart0;
@@ -99,7 +99,7 @@
clock-frequency = <25000000>;
};
- soc {
+ soc@f7000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -525,7 +525,6 @@
reg = <0x2000 0x100>;
clocks = <&refclk>;
interrupts = <1>;
- status = "disabled";
};
wdt2: watchdog@3000 {
@@ -533,7 +532,6 @@
reg = <0x3000 0x100>;
clocks = <&refclk>;
interrupts = <2>;
- status = "disabled";
};
sm_gpio1: gpio@5000 {
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 1a15db8e376b..41de15fe15a2 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -29,6 +29,20 @@
0x04 0x00011000 0x000ff000
>;
};
+ nand_pins: nand_pins {
+ pinctrl-single,bits = <
+ /* EMA_WAIT[0], EMA_OE, EMA_WE, EMA_CS[4], EMA_CS[3] */
+ 0x1c 0x10110110 0xf0ff0ff0
+ /*
+ * EMA_D[0], EMA_D[1], EMA_D[2],
+ * EMA_D[3], EMA_D[4], EMA_D[5],
+ * EMA_D[6], EMA_D[7]
+ */
+ 0x24 0x11111111 0xffffffff
+ /* EMA_A[1], EMA_A[2] */
+ 0x30 0x01100000 0x0ff00000
+ >;
+ };
};
serial0: serial@42000 {
status = "okay";
@@ -131,12 +145,7 @@
status = "okay";
};
};
- nand_cs3@62000000 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&nand_cs3_pins>;
- };
- vbat: fixedregulator@0 {
+ vbat: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vbat";
regulator-min-microvolt = <5000000>;
@@ -250,3 +259,33 @@
&edma1 {
ti,edma-reserved-slot-ranges = <32 90>;
};
+
+&aemif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_pins>;
+ status = "ok";
+ cs3 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ clock-ranges;
+ ranges;
+
+ ti,cs-chipselect = <3>;
+
+ nand@2000000,0 {
+ compatible = "ti,davinci-nand";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0 0x02000000 0x02000000
+ 1 0x00000000 0x00008000>;
+
+ ti,davinci-chipselect = <1>;
+ ti,davinci-mask-ale = <0>;
+ ti,davinci-mask-cle = <0>;
+ ti,davinci-mask-chipsel = <0>;
+ ti,davinci-ecc-mode = "hw";
+ ti,davinci-ecc-bits = <4>;
+ ti,davinci-nand-use-bbt;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
new file mode 100644
index 000000000000..7b8ab21fed6c
--- /dev/null
+++ b/arch/arm/boot/dts/da850-lcdk.dts
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2016 BayLibre, Inc.
+ *
+ * Licensed under GPLv2.
+ */
+/dts-v1/;
+#include "da850.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "DA850/AM1808/OMAP-L138 LCDK";
+ compatible = "ti,da850-lcdk", "ti,da850";
+
+ aliases {
+ serial2 = &serial2;
+ };
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0xc0000000 0x08000000>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "DA850/OMAP-L138 LCDK";
+ simple-audio-card,widgets =
+ "Line", "Line In",
+ "Line", "Line Out";
+ simple-audio-card,routing =
+ "LINE1L", "Line In",
+ "LINE1R", "Line In",
+ "Line Out", "LLOUT",
+ "Line Out", "RLOUT";
+ simple-audio-card,format = "dsp_b";
+ simple-audio-card,bitclock-master = <&link0_codec>;
+ simple-audio-card,frame-master = <&link0_codec>;
+ simple-audio-card,bitclock-inversion;
+
+ simple-audio-card,cpu {
+ sound-dai = <&mcasp0>;
+ system-clock-frequency = <24576000>;
+ };
+
+ link0_codec: simple-audio-card,codec {
+ sound-dai = <&tlv320aic3106>;
+ system-clock-frequency = <24576000>;
+ };
+ };
+};
+
+&pmx_core {
+ status = "okay";
+
+ mcasp0_pins: pinmux_mcasp0_pins {
+ pinctrl-single,bits = <
+ /* AHCLKX AFSX ACLKX */
+ 0x00 0x00101010 0x00f0f0f0
+ /* ARX13 ARX14 */
+ 0x04 0x00000110 0x00000ff0
+ >;
+ };
+
+ nand_pins: nand_pins {
+ pinctrl-single,bits = <
+ /* EMA_WAIT[0], EMA_OE, EMA_WE, EMA_CS[3] */
+ 0x1c 0x10110010 0xf0ff00f0
+ /*
+ * EMA_D[0], EMA_D[1], EMA_D[2],
+ * EMA_D[3], EMA_D[4], EMA_D[5],
+ * EMA_D[6], EMA_D[7]
+ */
+ 0x24 0x11111111 0xffffffff
+ /*
+ * EMA_D[8], EMA_D[9], EMA_D[10],
+ * EMA_D[11], EMA_D[12], EMA_D[13],
+ * EMA_D[14], EMA_D[15]
+ */
+ 0x20 0x11111111 0xffffffff
+ /* EMA_A[1], EMA_A[2] */
+ 0x30 0x01100000 0x0ff00000
+ >;
+ };
+};
+
+&serial2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&serial2_rxtx_pins>;
+ status = "okay";
+};
+
+&wdt {
+ status = "okay";
+};
+
+&rtc0 {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
+
+&mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mdio_pins>;
+ bus_freq = <2200000>;
+ status = "okay";
+};
+
+&eth0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mii_pins>;
+ status = "okay";
+};
+
+&mmc0 {
+ max-frequency = <50000000>;
+ bus-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>;
+ cd-gpios = <&gpio 64 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ clock-frequency = <100000>;
+ status = "okay";
+
+ tlv320aic3106: tlv320aic3106@18 {
+ #sound-dai-cells = <0>;
+ compatible = "ti,tlv320aic3106";
+ reg = <0x18>;
+ status = "okay";
+ };
+};
+
+&mcasp0 {
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcasp0_pins>;
+ status = "okay";
+
+ op-mode = <0>; /* DAVINCI_MCASP_IIS_MODE */
+ tdm-slots = <2>;
+ serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ 0 1 2 0
+ >;
+ tx-num-evt = <32>;
+ rx-num-evt = <32>;
+};
+
+&aemif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_pins>;
+ status = "okay";
+ cs3 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ clock-ranges;
+ ranges;
+
+ ti,cs-chipselect = <3>;
+
+ nand@2000000,0 {
+ compatible = "ti,davinci-nand";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0 0x02000000 0x02000000
+ 1 0x00000000 0x00008000>;
+
+ ti,davinci-chipselect = <1>;
+ ti,davinci-mask-ale = <0>;
+ ti,davinci-mask-cle = <0>;
+ ti,davinci-mask-chipsel = <0>;
+
+ ti,davinci-nand-buswidth = <16>;
+ ti,davinci-ecc-mode = "hw";
+ ti,davinci-ecc-bits = <4>;
+ ti,davinci-nand-use-bbt;
+
+ /*
+ * The OMAP-L132/L138 Bootloader doc SPRAB41E reads:
+ * "To boot from NAND Flash, the AIS should be written
+ * to NAND block 1 (NAND block 0 is not used by default)".
+ * The same doc mentions that for ROM "Silicon Revision 2.1",
+ * "Updated NAND boot mode to offer boot from block 0 or block 1".
+ * However the limitaion is left here by default for compatibility
+ * with older silicon and because it needs new boot pin settings
+ * not possible in stock LCDK.
+ */
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "u-boot env";
+ reg = <0 0x020000>;
+ };
+ partition@0x020000 {
+ /* The LCDK defaults to booting from this partition */
+ label = "u-boot";
+ reg = <0x020000 0x080000>;
+ };
+ partition@0x0a0000 {
+ label = "free space";
+ reg = <0x0a0000 0>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index 25f0f8e6dde5..f79e1b91c680 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -41,20 +41,40 @@
pinctrl-single,function-mask = <0xf>;
status = "disabled";
- nand_cs3_pins: pinmux_nand_pins {
+ serial0_rtscts_pins: pinmux_serial0_rtscts_pins {
pinctrl-single,bits = <
- /* EMA_OE, EMA_WE */
- 0x1c 0x00110000 0x00ff0000
- /* EMA_CS[4],EMA_CS[3]*/
- 0x1c 0x00000110 0x00000ff0
- /*
- * EMA_D[0], EMA_D[1], EMA_D[2],
- * EMA_D[3], EMA_D[4], EMA_D[5],
- * EMA_D[6], EMA_D[7]
- */
- 0x24 0x11111111 0xffffffff
- /* EMA_A[1], EMA_A[2] */
- 0x30 0x01100000 0x0ff00000
+ /* UART0_RTS UART0_CTS */
+ 0x0c 0x22000000 0xff000000
+ >;
+ };
+ serial0_rxtx_pins: pinmux_serial0_rxtx_pins {
+ pinctrl-single,bits = <
+ /* UART0_TXD UART0_RXD */
+ 0x0c 0x00220000 0x00ff0000
+ >;
+ };
+ serial1_rtscts_pins: pinmux_serial1_rtscts_pins {
+ pinctrl-single,bits = <
+ /* UART1_CTS UART1_RTS */
+ 0x00 0x00440000 0x00ff0000
+ >;
+ };
+ serial1_rxtx_pins: pinmux_serial1_rxtx_pins {
+ pinctrl-single,bits = <
+ /* UART1_TXD UART1_RXD */
+ 0x10 0x22000000 0xff000000
+ >;
+ };
+ serial2_rtscts_pins: pinmux_serial2_rtscts_pins {
+ pinctrl-single,bits = <
+ /* UART2_CTS UART2_RTS */
+ 0x00 0x44000000 0xff000000
+ >;
+ };
+ serial2_rxtx_pins: pinmux_serial2_rxtx_pins {
+ pinctrl-single,bits = <
+ /* UART2_TXD UART2_RXD */
+ 0x10 0x00220000 0x00ff0000
>;
};
i2c0_pins: pinmux_i2c0_pins {
@@ -274,31 +294,36 @@
status = "disabled";
};
ehrpwm0: pwm@300000 {
- compatible = "ti,da850-ehrpwm", "ti,am33xx-ehrpwm";
+ compatible = "ti,da850-ehrpwm", "ti,am3352-ehrpwm",
+ "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x300000 0x2000>;
status = "disabled";
};
ehrpwm1: pwm@302000 {
- compatible = "ti,da850-ehrpwm", "ti,am33xx-ehrpwm";
+ compatible = "ti,da850-ehrpwm", "ti,am3352-ehrpwm",
+ "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x302000 0x2000>;
status = "disabled";
};
ecap0: ecap@306000 {
- compatible = "ti,da850-ecap", "ti,am33xx-ecap";
+ compatible = "ti,da850-ecap", "ti,am3352-ecap",
+ "ti,am33xx-ecap";
#pwm-cells = <3>;
reg = <0x306000 0x80>;
status = "disabled";
};
ecap1: ecap@307000 {
- compatible = "ti,da850-ecap", "ti,am33xx-ecap";
+ compatible = "ti,da850-ecap", "ti,am3352-ecap",
+ "ti,am33xx-ecap";
#pwm-cells = <3>;
reg = <0x307000 0x80>;
status = "disabled";
};
ecap2: ecap@308000 {
- compatible = "ti,da850-ecap", "ti,am33xx-ecap";
+ compatible = "ti,da850-ecap", "ti,am3352-ecap",
+ "ti,am33xx-ecap";
#pwm-cells = <3>;
reg = <0x308000 0x80>;
status = "disabled";
@@ -375,17 +400,14 @@
dma-names = "tx", "rx";
};
};
- nand_cs3@62000000 {
- compatible = "ti,davinci-nand";
- reg = <0x62000000 0x807ff
- 0x68000000 0x8000>;
- ti,davinci-chipselect = <1>;
- ti,davinci-mask-ale = <0>;
- ti,davinci-mask-cle = <0>;
- ti,davinci-mask-chipsel = <0>;
- ti,davinci-ecc-mode = "hw";
- ti,davinci-ecc-bits = <4>;
- ti,davinci-nand-use-bbt;
+ aemif: aemif@68000000 {
+ compatible = "ti,da850-aemif";
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ reg = <0x68000000 0x00008000>;
+ ranges = <0 0 0x60000000 0x08000000
+ 1 0 0x68000000 0x00008000>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts
index 4128fa91823c..d6657b3bae84 100644
--- a/arch/arm/boot/dts/dm8148-evm.dts
+++ b/arch/arm/boot/dts/dm8148-evm.dts
@@ -12,13 +12,13 @@
model = "DM8148 EVM";
compatible = "ti,dm8148-evm", "ti,dm8148";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1 GB */
};
/* MIC94060YC6 controlled by SD1_POW pin */
- vmmcsd_fixed: fixedregulator@0 {
+ vmmcsd_fixed: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts
index 3f184863e0c5..63883b3479f9 100644
--- a/arch/arm/boot/dts/dm8148-t410.dts
+++ b/arch/arm/boot/dts/dm8148-t410.dts
@@ -11,7 +11,7 @@
model = "HP t410 Smart Zero Client";
compatible = "hp,t410", "ti,dm8148";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1 GB */
};
@@ -27,7 +27,7 @@
regulator-always-on;
};
- vmmcsd_fixed: fixedregulator@0 {
+ vmmcsd_fixed: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 68e412c9863c..ff90a6ce6bdc 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -7,11 +7,11 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/dm814x.h>
-#include "skeleton.dtsi"
-
/ {
compatible = "ti,dm814";
interrupt-parent = <&intc>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
i2c0 = &i2c1;
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index f50348bdd857..0bf55fa72dea 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -12,14 +12,14 @@
model = "DM8168 EVM";
compatible = "ti,dm8168-evm", "ti,dm8168";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x40000000 /* 1 GB */
0xc0000000 0x40000000>; /* 1 GB */
};
/* FDC6331L controlled by SD_POW pin */
- vmmcsd_fixed: fixedregulator@0 {
+ vmmcsd_fixed: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 44e39c743b53..f1e0f771ff29 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -7,11 +7,11 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/omap.h>
-#include "skeleton.dtsi"
-
/ {
compatible = "ti,dm816";
interrupt-parent = <&intc>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
i2c0 = &i2c1;
diff --git a/arch/arm/boot/dts/dra62x-j5eco-evm.dts b/arch/arm/boot/dts/dra62x-j5eco-evm.dts
index f820573f4a4a..155eb32ee213 100644
--- a/arch/arm/boot/dts/dra62x-j5eco-evm.dts
+++ b/arch/arm/boot/dts/dra62x-j5eco-evm.dts
@@ -12,13 +12,13 @@
model = "DRA62x J5 Eco EVM";
compatible = "ti,dra62x-j5eco-evm", "ti,dra62x", "ti,dm8148";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1 GB */
};
/* MIC94060YC6 controlled by SD1_POW pin */
- vmmcsd_fixed: fixedregulator@0 {
+ vmmcsd_fixed: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index bafcfac067ec..132f2be10889 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -16,7 +16,7 @@
model = "TI DRA742";
compatible = "ti,dra7-evm", "ti,dra742", "ti,dra74", "ti,dra7";
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x80000000 0x0 0x60000000>; /* 1536 MB */
};
@@ -105,25 +105,25 @@
leds {
compatible = "gpio-leds";
- led@0 {
+ led0 {
label = "dra7:usr1";
gpios = <&pcf_lcd 4 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@1 {
+ led1 {
label = "dra7:usr2";
gpios = <&pcf_lcd 5 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@2 {
+ led2 {
label = "dra7:usr3";
gpios = <&pcf_lcd 6 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- led@3 {
+ led3 {
label = "dra7:usr4";
gpios = <&pcf_lcd 7 GPIO_ACTIVE_LOW>;
default-state = "off";
@@ -664,10 +664,10 @@
&qspi {
status = "okay";
- spi-max-frequency = <64000000>;
+ spi-max-frequency = <76800000>;
m25p80@0 {
compatible = "s25fl256s1";
- spi-max-frequency = <64000000>;
+ spi-max-frequency = <76800000>;
reg = <0>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index d9bfb94a2992..d4fcd68f6349 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -10,8 +10,6 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/pinctrl/dra.h>
-#include "skeleton.dtsi"
-
#define MAX_SOURCES 400
/ {
@@ -82,9 +80,11 @@
compatible = "arm,cortex-a15";
reg = <0>;
- operating-points-v2 = <&cpu0_opp_table>;
- ti,syscon-efuse = <&scm_wkup 0x20c 0xf80000 19>;
- ti,syscon-rev = <&scm_wkup 0x204>;
+ operating-points = <
+ /* kHz uV */
+ 1000000 1060000
+ 1176000 1160000
+ >;
clocks = <&dpll_mpu_ck>;
clock-names = "cpu";
@@ -98,24 +98,6 @@
};
};
- cpu0_opp_table: opp_table0 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp_nom@1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <1060000 850000 1150000>;
- opp-supported-hw = <0xFF 0x01>;
- opp-suspend;
- };
-
- opp_od@1176000000 {
- opp-hz = /bits/ 64 <1176000000>;
- opp-microvolt = <1160000 885000 1160000>;
- opp-supported-hw = <0xFF 0x02>;
- };
- };
-
/*
* The soc node represents the soc top level view. It is used for IPs
* that are not memory mapped in the MPU view or for the MPU itself.
@@ -301,6 +283,7 @@
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
#interrupt-cells = <1>;
num-lanes = <1>;
+ linux,pci-domain = <0>;
ti,hwmods = "pcie1";
phys = <&pcie1_phy>;
phy-names = "pcie-phy0";
@@ -336,6 +319,7 @@
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
#interrupt-cells = <1>;
num-lanes = <1>;
+ linux,pci-domain = <1>;
ti,hwmods = "pcie2";
phys = <&pcie2_phy>;
phy-names = "pcie-phy0";
@@ -1413,7 +1397,7 @@
ti,hwmods = "ocp2scp1";
usb2_phy1: phy@4a084000 {
- compatible = "ti,omap-usb2";
+ compatible = "ti,dra7x-usb2", "ti,omap-usb2";
reg = <0x4a084000 0x400>;
syscon-phy-power = <&scm_conf 0x300>;
clocks = <&usb_phy1_always_on_clk32k>,
@@ -1717,7 +1701,7 @@
mac: ethernet@48484000 {
compatible = "ti,dra7-cpsw","ti,cpsw";
ti,hwmods = "gmac";
- clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>;
+ clocks = <&gmac_main_clk>, <&gmac_rft_clk_mux>;
clock-names = "fck", "cpts";
cpdma_channels = <8>;
ale_entries = <1024>;
@@ -1726,7 +1710,7 @@
mac_control = <0x20>;
slaves = <2>;
active_slave = <0>;
- cpts_clock_mult = <0x80000000>;
+ cpts_clock_mult = <0x784CFE14>;
cpts_clock_shift = <29>;
reg = <0x48484000 0x1000
0x48485200 0x2E00>;
diff --git a/arch/arm/boot/dts/dra72-evm-common.dtsi b/arch/arm/boot/dts/dra72-evm-common.dtsi
index 9d3cf50ca37e..c94d8d64710d 100644
--- a/arch/arm/boot/dts/dra72-evm-common.dtsi
+++ b/arch/arm/boot/dts/dra72-evm-common.dtsi
@@ -681,10 +681,10 @@
&qspi {
status = "okay";
- spi-max-frequency = <64000000>;
+ spi-max-frequency = <76800000>;
m25p80@0 {
compatible = "s25fl256s1";
- spi-max-frequency = <64000000>;
+ spi-max-frequency = <76800000>;
reg = <0>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts
index f9cfd3bb4dc2..064b322a7a04 100644
--- a/arch/arm/boot/dts/dra72-evm-revc.dts
+++ b/arch/arm/boot/dts/dra72-evm-revc.dts
@@ -11,7 +11,7 @@
/ {
model = "TI DRA722 Rev C EVM";
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x80000000 0x0 0x80000000>; /* 2GB */
};
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index cc1d32ca4a8a..e3a9b6985693 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -9,7 +9,7 @@
/ {
model = "TI DRA722";
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x80000000 0x0 0x40000000>; /* 1024 MB */
};
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index 8987b3e180a1..0a78347e6615 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -17,7 +17,6 @@
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <1>;
- operating-points-v2 = <&cpu0_opp_table>;
};
};
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index 8378b44ee567..3330738e4c6e 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -1003,6 +1003,14 @@
ti,index-power-of-two;
};
+ gmac_main_clk: gmac_main_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&gmac_250m_dclk_div>;
+ clock-mult = <1>;
+ clock-div = <2>;
+ };
+
l3init_480m_dclk_div: l3init_480m_dclk_div@1ac {
#clock-cells = <0>;
compatible = "ti,divider-clock";
@@ -1718,13 +1726,12 @@
reg = <0x0c00>;
};
- gmac_gmii_ref_clk_div: gmac_gmii_ref_clk_div@13d0 {
+ rmii_50mhz_clk_mux: rmii_50mhz_clk_mux@13d0 {
#clock-cells = <0>;
- compatible = "ti,divider-clock";
- clocks = <&dpll_gmac_m2_ck>;
+ compatible = "ti,mux-clock";
+ clocks = <&dpll_gmac_h11x2_ck>, <&rmii_clk_ck>;
ti,bit-shift = <24>;
reg = <0x13d0>;
- ti,dividers = <2>;
};
gmac_rft_clk_mux: gmac_rft_clk_mux@13d0 {
diff --git a/arch/arm/boot/dts/efm32gg-dk3750.dts b/arch/arm/boot/dts/efm32gg-dk3750.dts
index 504cf45d3cb8..98fc667d22c7 100644
--- a/arch/arm/boot/dts/efm32gg-dk3750.dts
+++ b/arch/arm/boot/dts/efm32gg-dk3750.dts
@@ -16,7 +16,8 @@
bootargs = "console=ttyefm4,115200 init=/linuxrc ignore_loglevel ihash_entries=64 dhash_entries=64 earlyprintk uclinux.physaddr=0x8c400000 root=/dev/mtdblock0";
};
- memory {
+ memory@88000000 {
+ device_type = "memory";
reg = <0x88000000 0x400000>;
};
@@ -74,7 +75,7 @@
status = "ok";
};
- boardfpga: boardfpga {
+ boardfpga: boardfpga@80000000 {
compatible = "efm32board";
reg = <0x80000000 0x400>;
irq-gpios = <&gpio 64 1>;
diff --git a/arch/arm/boot/dts/efm32gg.dtsi b/arch/arm/boot/dts/efm32gg.dtsi
index c747983771c7..b78c57e51ed5 100644
--- a/arch/arm/boot/dts/efm32gg.dtsi
+++ b/arch/arm/boot/dts/efm32gg.dtsi
@@ -4,10 +4,14 @@
* Documentation available from
* http://www.silabs.com/Support%20Documents/TechnicalDocs/EFM32GG-RM.pdf
*/
+
#include "armv7-m.dtsi"
#include "dt-bindings/clock/efm32-cmu.h"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
aliases {
i2c0 = &i2c0;
i2c1 = &i2c1;
diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi
index 130e946f1414..a70819b1b739 100644
--- a/arch/arm/boot/dts/exynos3250-artik5.dtsi
+++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi
@@ -24,7 +24,8 @@
stdout-path = &serial_2;
};
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x1ff00000>;
};
diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
index 8c8906266310..66f04f6ba6bb 100644
--- a/arch/arm/boot/dts/exynos3250-monk.dts
+++ b/arch/arm/boot/dts/exynos3250-monk.dts
@@ -27,7 +27,8 @@
i2c7 = &i2c_max77836;
};
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x1ff00000>;
};
diff --git a/arch/arm/boot/dts/exynos3250-pinctrl.dtsi b/arch/arm/boot/dts/exynos3250-pinctrl.dtsi
index 40ea7de44933..ec331169c3d9 100644
--- a/arch/arm/boot/dts/exynos3250-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos3250-pinctrl.dtsi
@@ -12,58 +12,46 @@
* published by the Free Software Foundation.
*/
-#define PIN_PULL_NONE 0
-#define PIN_PULL_DOWN 1
-#define PIN_PULL_UP 3
-
-#define PIN_DRV_LV1 0
-#define PIN_DRV_LV2 2
-#define PIN_DRV_LV3 1
-#define PIN_DRV_LV4 3
-
-#define PIN_PDN_OUT0 0
-#define PIN_PDN_OUT1 1
-#define PIN_PDN_INPUT 2
-#define PIN_PDN_PREV 3
-
-#define PIN_IN(_pin, _pull, _drv) \
- _pin { \
- samsung,pins = #_pin; \
- samsung,pin-function = <0>; \
- samsung,pin-pud = <PIN_PULL_ ##_pull>; \
- samsung,pin-drv = <PIN_DRV_ ##_drv>; \
+#include <dt-bindings/pinctrl/samsung.h>
+
+#define PIN_IN(_pin, _pull, _drv) \
+ _pin { \
+ samsung,pins = #_pin; \
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>; \
+ samsung,pin-pud = <EXYNOS_PIN_PULL_ ##_pull>; \
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_ ##_drv>; \
}
-#define PIN_OUT(_pin, _drv) \
- _pin { \
- samsung,pins = #_pin; \
- samsung,pin-function = <1>; \
- samsung,pin-pud = <0>; \
- samsung,pin-drv = <PIN_DRV_ ##_drv>; \
+#define PIN_OUT(_pin, _drv) \
+ _pin { \
+ samsung,pins = #_pin; \
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>; \
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; \
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_ ##_drv>; \
}
-#define PIN_OUT_SET(_pin, _val, _drv) \
- _pin { \
- samsung,pins = #_pin; \
- samsung,pin-function = <1>; \
- samsung,pin-pud = <0>; \
- samsung,pin-drv = <PIN_DRV_ ##_drv>; \
- samsung,pin-val = <_val>; \
+#define PIN_OUT_SET(_pin, _val, _drv) \
+ _pin { \
+ samsung,pins = #_pin; \
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>; \
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; \
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_ ##_drv>; \
+ samsung,pin-val = <_val>; \
}
-#define PIN_CFG(_pin, _sel, _pull, _drv) \
- _pin { \
- samsung,pins = #_pin; \
- samsung,pin-function = <_sel>; \
- samsung,pin-pud = <PIN_PULL_ ##_pull>; \
- samsung,pin-drv = <PIN_DRV_ ##_drv>; \
+#define PIN_CFG(_pin, _sel, _pull, _drv) \
+ _pin { \
+ samsung,pins = #_pin; \
+ samsung,pin-function = <_sel>; \
+ samsung,pin-pud = <EXYNOS_PIN_PULL_ ##_pull>; \
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_ ##_drv>; \
}
-#define PIN_SLP(_pin, _mode, _pull) \
- _pin { \
- samsung,pins = #_pin; \
- samsung,pin-con-pdn = <PIN_PDN_ ##_mode>; \
- samsung,pin-pud-pdn = <PIN_PULL_ ##_pull>; \
+#define PIN_SLP(_pin, _mode, _pull) \
+ _pin { \
+ samsung,pins = #_pin; \
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_ ##_mode>; \
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_ ##_pull>; \
}
&pinctrl_0 {
@@ -125,158 +113,158 @@
uart0_data: uart0-data {
samsung,pins = "gpa0-0", "gpa0-1";
- samsung,pin-function = <0x2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gpa0-2", "gpa0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_data: uart1-data {
samsung,pins = "gpa0-4", "gpa0-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c2_bus: i2c2-bus {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart2_data: uart2-data {
samsung,pins = "gpa1-0", "gpa1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c3_bus: i2c3-bus {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi0_bus: spi0-bus {
samsung,pins = "gpb-0", "gpb-2", "gpb-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c4_bus: i2c4-bus {
samsung,pins = "gpb-0", "gpb-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi1_bus: spi1-bus {
samsung,pins = "gpb-4", "gpb-6", "gpb-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c5_bus: i2c5-bus {
samsung,pins = "gpb-2", "gpb-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s2_bus: i2s2-bus {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
"gpc1-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm2_bus: pcm2-bus {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
"gpc1-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c6_bus: i2c6-bus {
samsung,pins = "gpc1-3", "gpc1-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm0_out: pwm0-out {
samsung,pins = "gpd0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm1_out: pwm1-out {
samsung,pins = "gpd0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c7_bus: i2c7-bus {
samsung,pins = "gpd0-2", "gpd0-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm2_out: pwm2-out {
samsung,pins = "gpd0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm3_out: pwm3-out {
samsung,pins = "gpd0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c0_bus: i2c0-bus {
samsung,pins = "gpd1-0", "gpd1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
mipi0_clk: mipi0-clk {
samsung,pins = "gpd1-0", "gpd1-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c1_bus: i2c1-bus {
samsung,pins = "gpd1-2", "gpd1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -408,164 +396,164 @@
sd0_clk: sd0-clk {
samsung,pins = "gpk0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpk0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cd: sd0-cd {
samsung,pins = "gpk0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_rdqs: sd0-rdqs {
samsung,pins = "gpk0-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus1: sd0-bus-width1 {
samsung,pins = "gpk0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus4: sd0-bus-width4 {
samsung,pins = "gpk0-4", "gpk0-5", "gpk0-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus8: sd0-bus-width8 {
samsung,pins = "gpl0-0", "gpl0-1", "gpl0-2", "gpl0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_clk: sd1-clk {
samsung,pins = "gpk1-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cmd: sd1-cmd {
samsung,pins = "gpk1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cd: sd1-cd {
samsung,pins = "gpk1-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus1: sd1-bus-width1 {
samsung,pins = "gpk1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus4: sd1-bus-width4 {
samsung,pins = "gpk1-4", "gpk1-5", "gpk1-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_clk: sd2-clk {
samsung,pins = "gpk2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cmd: sd2-cmd {
samsung,pins = "gpk2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cd: sd2-cd {
samsung,pins = "gpk2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus1: sd2-bus-width1 {
samsung,pins = "gpk2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus4: sd2-bus-width4 {
samsung,pins = "gpk2-4", "gpk2-5", "gpk2-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
cam_port_b_io: cam-port-b-io {
samsung,pins = "gpm0-0", "gpm0-1", "gpm0-2", "gpm0-3",
"gpm0-4", "gpm0-5", "gpm0-6", "gpm0-7",
"gpm1-0", "gpm1-1", "gpm2-0", "gpm2-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_b_clk_active: cam-port-b-clk-active {
samsung,pins = "gpm2-2";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
cam_port_b_clk_idle: cam-port-b-clk-idle {
samsung,pins = "gpm2-2";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
fimc_is_i2c0: fimc-is-i2c0 {
samsung,pins = "gpm4-0", "gpm4-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
fimc_is_i2c1: fimc-is-i2c1 {
samsung,pins = "gpm4-2", "gpm4-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
fimc_is_uart: fimc-is-uart {
samsung,pins = "gpm3-5", "gpm3-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index a92181368e5b..3967ee5f7752 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -27,7 +27,8 @@
i2c7 = &i2c_max77836;
};
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x1ff00000>;
};
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 70e3aceab3a9..e9d2556c0dfd 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -17,7 +17,6 @@
* published by the Free Software Foundation.
*/
-#include "skeleton.dtsi"
#include "exynos4-cpu-thermal.dtsi"
#include "exynos-syscon-restart.dtsi"
#include <dt-bindings/clock/exynos3250.h>
@@ -25,6 +24,8 @@
/ {
compatible = "samsung,exynos3250";
interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
pinctrl0 = &pinctrl_0;
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 32f22e12c70b..5f034eb5a5e2 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -21,11 +21,12 @@
#include <dt-bindings/clock/exynos4.h>
#include <dt-bindings/clock/exynos-audss-clk.h>
-#include "skeleton.dtsi"
#include "exynos-syscon-restart.dtsi"
/ {
interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
spi0 = &spi_0;
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index be2751eebaf8..a2c6a13fe67b 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -24,7 +24,8 @@
model = "Insignal Origen evaluation board based on Exynos4210";
compatible = "insignal,origen", "samsung,exynos4210", "samsung,exynos4";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x10000000
0x50000000 0x10000000
0x60000000 0x10000000
diff --git a/arch/arm/boot/dts/exynos4210-pinctrl.dtsi b/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
index 9331c6252eff..d9b6d25e4abe 100644
--- a/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
@@ -14,6 +14,8 @@
* published by the Free Software Foundation.
*/
+#include <dt-bindings/pinctrl/samsung.h>
+
/ {
pinctrl@11400000 {
gpa0: gpa0 {
@@ -146,245 +148,245 @@
uart0_data: uart0-data {
samsung,pins = "gpa0-0", "gpa0-1";
- samsung,pin-function = <0x2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gpa0-2", "gpa0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_data: uart1-data {
samsung,pins = "gpa0-4", "gpa0-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c2_bus: i2c2-bus {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart2_data: uart2-data {
samsung,pins = "gpa1-0", "gpa1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart2_fctl: uart2-fctl {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart_audio_a: uart-audio-a {
samsung,pins = "gpa1-0", "gpa1-1";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c3_bus: i2c3-bus {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart3_data: uart3-data {
samsung,pins = "gpa1-4", "gpa1-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart_audio_b: uart-audio-b {
samsung,pins = "gpa1-4", "gpa1-5";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi0_bus: spi0-bus {
samsung,pins = "gpb-0", "gpb-2", "gpb-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c4_bus: i2c4-bus {
samsung,pins = "gpb-2", "gpb-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi1_bus: spi1-bus {
samsung,pins = "gpb-4", "gpb-6", "gpb-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c5_bus: i2c5-bus {
samsung,pins = "gpb-6", "gpb-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s1_bus: i2s1-bus {
samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
"gpc0-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm1_bus: pcm1-bus {
samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
"gpc0-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
ac97_bus: ac97-bus {
samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
"gpc0-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s2_bus: i2s2-bus {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
"gpc1-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm2_bus: pcm2-bus {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
"gpc1-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spdif_bus: spdif-bus {
samsung,pins = "gpc1-0", "gpc1-1";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c6_bus: i2c6-bus {
samsung,pins = "gpc1-3", "gpc1-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi2_bus: spi2-bus {
samsung,pins = "gpc1-1", "gpc1-2", "gpc1-3", "gpc1-4";
- samsung,pin-function = <5>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_5>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c7_bus: i2c7-bus {
samsung,pins = "gpd0-2", "gpd0-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c0_bus: i2c0-bus {
samsung,pins = "gpd1-0", "gpd1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c1_bus: i2c1-bus {
samsung,pins = "gpd1-2", "gpd1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm0_out: pwm0-out {
samsung,pins = "gpd0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm1_out: pwm1-out {
samsung,pins = "gpd0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm2_out: pwm2-out {
samsung,pins = "gpd0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm3_out: pwm3-out {
samsung,pins = "gpd0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_ctrl: lcd-ctrl {
samsung,pins = "gpd0-0", "gpd0-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_sync: lcd-sync {
samsung,pins = "gpf0-0", "gpf0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_en: lcd-en {
samsung,pins = "gpe3-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_clk: lcd-clk {
samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_data16: lcd-data-width16 {
@@ -392,9 +394,9 @@
"gpf1-3", "gpf1-6", "gpf1-7", "gpf2-0",
"gpf2-1", "gpf2-2", "gpf2-3", "gpf2-7",
"gpf3-0", "gpf3-1", "gpf3-2", "gpf3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_data18: lcd-data-width18 {
@@ -403,9 +405,9 @@
"gpf2-0", "gpf2-1", "gpf2-2", "gpf2-3",
"gpf2-6", "gpf2-7", "gpf3-0", "gpf3-1",
"gpf3-2", "gpf3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_data24: lcd-data-width24 {
@@ -415,9 +417,9 @@
"gpf2-0", "gpf2-1", "gpf2-2", "gpf2-3",
"gpf2-4", "gpf2-5", "gpf2-6", "gpf2-7",
"gpf3-0", "gpf3-1", "gpf3-2", "gpf3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -569,263 +571,263 @@
sd0_clk: sd0-clk {
samsung,pins = "gpk0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpk0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cd: sd0-cd {
samsung,pins = "gpk0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus1: sd0-bus-width1 {
samsung,pins = "gpk0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus4: sd0-bus-width4 {
samsung,pins = "gpk0-3", "gpk0-4", "gpk0-5", "gpk0-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus8: sd0-bus-width8 {
samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_clk: sd4-clk {
samsung,pins = "gpk0-0";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_cmd: sd4-cmd {
samsung,pins = "gpk0-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_cd: sd4-cd {
samsung,pins = "gpk0-2";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_bus1: sd4-bus-width1 {
samsung,pins = "gpk0-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_bus4: sd4-bus-width4 {
samsung,pins = "gpk0-3", "gpk0-4", "gpk0-5", "gpk0-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_bus8: sd4-bus-width8 {
samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <4>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_clk: sd1-clk {
samsung,pins = "gpk1-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cmd: sd1-cmd {
samsung,pins = "gpk1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cd: sd1-cd {
samsung,pins = "gpk1-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus1: sd1-bus-width1 {
samsung,pins = "gpk1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus4: sd1-bus-width4 {
samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_clk: sd2-clk {
samsung,pins = "gpk2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cmd: sd2-cmd {
samsung,pins = "gpk2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cd: sd2-cd {
samsung,pins = "gpk2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus1: sd2-bus-width1 {
samsung,pins = "gpk2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus4: sd2-bus-width4 {
samsung,pins = "gpk2-3", "gpk2-4", "gpk2-5", "gpk2-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus8: sd2-bus-width8 {
samsung,pins = "gpk3-3", "gpk3-4", "gpk3-5", "gpk3-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_clk: sd3-clk {
samsung,pins = "gpk3-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_cmd: sd3-cmd {
samsung,pins = "gpk3-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_cd: sd3-cd {
samsung,pins = "gpk3-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_bus1: sd3-bus-width1 {
samsung,pins = "gpk3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_bus4: sd3-bus-width4 {
samsung,pins = "gpk3-3", "gpk3-4", "gpk3-5", "gpk3-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
eint0: ext-int0 {
samsung,pins = "gpx0-0";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint8: ext-int8 {
samsung,pins = "gpx1-0";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint15: ext-int15 {
samsung,pins = "gpx1-7";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint16: ext-int16 {
samsung,pins = "gpx2-0";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint31: ext-int31 {
samsung,pins = "gpx3-7";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_a_io: cam-port-a-io {
samsung,pins = "gpj0-0", "gpj0-1", "gpj0-2", "gpj0-3",
"gpj0-4", "gpj0-5", "gpj0-6", "gpj0-7",
"gpj1-0", "gpj1-1", "gpj1-2", "gpj1-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_a_clk_active: cam-port-a-clk-active {
samsung,pins = "gpj1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
cam_port_a_clk_idle: cam-port-a-clk-idle {
samsung,pins = "gpj1-3";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
hdmi_cec: hdmi-cec {
samsung,pins = "gpx3-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -838,17 +840,17 @@
i2s0_bus: i2s0-bus {
samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
"gpz-4", "gpz-5", "gpz-6";
- samsung,pin-function = <0x2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm0_bus: pcm0-bus {
samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
"gpz-4";
- samsung,pin-function = <0x3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
};
diff --git a/arch/arm/boot/dts/exynos4210-smdkv310.dts b/arch/arm/boot/dts/exynos4210-smdkv310.dts
index 847fae3dd1f1..9c98a3724396 100644
--- a/arch/arm/boot/dts/exynos4210-smdkv310.dts
+++ b/arch/arm/boot/dts/exynos4210-smdkv310.dts
@@ -23,7 +23,8 @@
model = "Samsung smdkv310 evaluation board based on Exynos4210";
compatible = "samsung,smdkv310", "samsung,exynos4210", "samsung,exynos4";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x80000000>;
};
@@ -136,17 +137,17 @@
&pinctrl_1 {
keypad_rows: keypad-rows {
samsung,pins = "gpx2-0", "gpx2-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_cols: keypad-cols {
samsung,pins = "gpx1-0", "gpx1-1", "gpx1-2", "gpx1-3",
"gpx1-4", "gpx1-5", "gpx1-6", "gpx1-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 79d983036560..0ca1b4d355f2 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -20,7 +20,8 @@
model = "Samsung Trats based on Exynos4210";
compatible = "samsung,trats", "samsung,exynos4210", "samsung,exynos4";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x10000000
0x50000000 0x10000000
0x60000000 0x10000000
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index 9a75e3effbc9..0c89ea99de54 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -20,7 +20,8 @@
model = "Samsung Universal C210 based on Exynos4210 rev0";
compatible = "samsung,universal_c210", "samsung,exynos4210", "samsung,exynos4";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x10000000
0x50000000 0x10000000>;
};
@@ -269,7 +270,7 @@
};
&hdmi {
- hpd-gpio = <&gpx3 7 GPIO_ACTIVE_HIGH>;
+ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_hpd>;
hdmi-en-supply = <&hdmi_en>;
@@ -521,16 +522,16 @@
&pinctrl_1 {
hdmi_hpd: hdmi-hpd {
samsung,pins = "gpx3-7";
- samsung,pin-pud = <0>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
};
};
&pinctrl_0 {
i2c_ddc_bus: i2c-ddc-bus {
samsung,pins = "gpe4-2", "gpe4-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 58ad48e7b8f7..8aa19ba14436 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -163,26 +163,26 @@
/* RSTN signal for eMMC */
&sd1_cd {
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
&pinctrl_1 {
gpio_power_key: power_key {
samsung,pins = "gpx1-3";
- samsung,pin-pud = <0>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
};
max77686_irq: max77686-irq {
samsung,pins = "gpx3-2";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
hdmi_hpd: hdmi-hpd {
samsung,pins = "gpx3-7";
- samsung,pin-pud = <1>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
};
};
@@ -227,7 +227,7 @@
};
&hdmi {
- hpd-gpio = <&gpx3 7 GPIO_ACTIVE_HIGH>;
+ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_hpd>;
vdd-supply = <&ldo8_reg>;
diff --git a/arch/arm/boot/dts/exynos4412-odroidu3.dts b/arch/arm/boot/dts/exynos4412-odroidu3.dts
index d73aa6c58fe3..99634c54dca9 100644
--- a/arch/arm/boot/dts/exynos4412-odroidu3.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidu3.dts
@@ -18,7 +18,8 @@
model = "Hardkernel ODROID-U3 board based on Exynos4412";
compatible = "hardkernel,odroid-u3", "samsung,exynos4412", "samsung,exynos4";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x7FF00000>;
};
diff --git a/arch/arm/boot/dts/exynos4412-odroidx.dts b/arch/arm/boot/dts/exynos4412-odroidx.dts
index 2af235151301..61906b35ea7a 100644
--- a/arch/arm/boot/dts/exynos4412-odroidx.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidx.dts
@@ -18,7 +18,8 @@
model = "Hardkernel ODROID-X board based on Exynos4412";
compatible = "hardkernel,odroid-x", "samsung,exynos4412", "samsung,exynos4";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x3FF00000>;
};
@@ -83,7 +84,7 @@
&pinctrl_1 {
gpio_home_key: home_key {
samsung,pins = "gpx2-2";
- samsung,pin-pud = <0>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
};
};
diff --git a/arch/arm/boot/dts/exynos4412-odroidx2.dts b/arch/arm/boot/dts/exynos4412-odroidx2.dts
index 3e3584270e00..4d228858f172 100644
--- a/arch/arm/boot/dts/exynos4412-odroidx2.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidx2.dts
@@ -17,7 +17,8 @@
model = "Hardkernel ODROID-X2 board based on Exynos4412";
compatible = "hardkernel,odroid-x2", "samsung,exynos4412", "samsung,exynos4";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x7FF00000>;
};
};
diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts
index 26a36fed9652..a1ab6f94bb64 100644
--- a/arch/arm/boot/dts/exynos4412-origen.dts
+++ b/arch/arm/boot/dts/exynos4412-origen.dts
@@ -22,7 +22,8 @@
model = "Insignal Origen evaluation board based on Exynos4412";
compatible = "insignal,origen4412", "samsung,exynos4412", "samsung,exynos4";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x40000000>;
};
@@ -500,16 +501,16 @@
&pinctrl_1 {
keypad_rows: keypad-rows {
samsung,pins = "gpx2-0", "gpx2-1", "gpx2-2";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_cols: keypad-cols {
samsung,pins = "gpx1-0", "gpx1-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos4412-smdk4412.dts b/arch/arm/boot/dts/exynos4412-smdk4412.dts
index 231ffbdbf9d0..7fcb43431b59 100644
--- a/arch/arm/boot/dts/exynos4412-smdk4412.dts
+++ b/arch/arm/boot/dts/exynos4412-smdk4412.dts
@@ -20,7 +20,8 @@
model = "Samsung SMDK evaluation board based on Exynos4412";
compatible = "samsung,smdk4412", "samsung,exynos4412", "samsung,exynos4";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x40000000>;
};
@@ -115,17 +116,17 @@
&pinctrl_1 {
keypad_rows: keypad-rows {
samsung,pins = "gpx2-0", "gpx2-1", "gpx2-2";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_cols: keypad-cols {
samsung,pins = "gpx1-0", "gpx1-1", "gpx1-2", "gpx1-3",
"gpx1-4", "gpx1-5", "gpx1-6", "gpx1-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos4412-tiny4412.dts b/arch/arm/boot/dts/exynos4412-tiny4412.dts
index 4840bbdaa9ec..5504398e6e37 100644
--- a/arch/arm/boot/dts/exynos4412-tiny4412.dts
+++ b/arch/arm/boot/dts/exynos4412-tiny4412.dts
@@ -23,7 +23,8 @@
stdout-path = &serial_0;
};
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x40000000>;
};
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 129e973a06a6..41ecd6d465a7 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -30,7 +30,8 @@
i2c12 = &i2c_max77693_fuel;
};
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x40000000>;
};
diff --git a/arch/arm/boot/dts/exynos4415-pinctrl.dtsi b/arch/arm/boot/dts/exynos4415-pinctrl.dtsi
index 75af9c56123e..76cfd872ead3 100644
--- a/arch/arm/boot/dts/exynos4415-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos4415-pinctrl.dtsi
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#include <dt-bindings/pinctrl/samsung.h>
+
&pinctrl_0 {
gpa0: gpa0 {
gpio-controller;
@@ -94,180 +96,180 @@
uart0_data: uart0-data {
samsung,pins = "gpa0-0", "gpa0-1";
- samsung,pin-function = <0x2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gpa0-2", "gpa0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_data: uart1-data {
samsung,pins = "gpa0-4", "gpa0-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart2_data: uart2-data {
samsung,pins = "gpa1-0", "gpa1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart2_fctl: uart2-fctl {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart3_data: uart3-data {
samsung,pins = "gpa1-4", "gpa1-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c2_bus: i2c2-bus {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c3_bus: i2c3-bus {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi0_bus: spi0-bus {
samsung,pins = "gpb-0", "gpb-2", "gpb-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c4_bus: i2c4-bus {
samsung,pins = "gpb-0", "gpb-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi1_bus: spi1-bus {
samsung,pins = "gpb-4", "gpb-6", "gpb-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c5_bus: i2c5-bus {
samsung,pins = "gpb-2", "gpb-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s1_bus: i2s1-bus {
samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
"gpc0-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s2_bus: i2s2-bus {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
"gpc1-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm2_bus: pcm2-bus {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
"gpc1-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c6_bus: i2c6-bus {
samsung,pins = "gpc1-3", "gpc1-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi2_bus: spi2-bus {
samsung,pins = "gpc1-1", "gpc1-3", "gpc1-4";
- samsung,pin-function = <5>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_5>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm0_out: pwm0-out {
samsung,pins = "gpd0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm1_out: pwm1-out {
samsung,pins = "gpd0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm2_out: pwm2-out {
samsung,pins = "gpd0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm3_out: pwm3-out {
samsung,pins = "gpd0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c7_bus: i2c7-bus {
samsung,pins = "gpd0-2", "gpd0-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c0_bus: i2c0-bus {
samsung,pins = "gpd1-0", "gpd1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c1_bus: i2c1-bus {
samsung,pins = "gpd1-2", "gpd1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -392,165 +394,165 @@
sd0_clk: sd0-clk {
samsung,pins = "gpk0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpk0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cd: sd0-cd {
samsung,pins = "gpk0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_rdqs: sd0-rdqs {
samsung,pins = "gpk0-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus1: sd0-bus-width1 {
samsung,pins = "gpk0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus4: sd0-bus-width4 {
samsung,pins = "gpk0-4", "gpk0-5", "gpk0-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus8: sd0-bus-width8 {
samsung,pins = "gpl0-0", "gpl0-1", "gpl0-2", "gpl0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_clk: sd1-clk {
samsung,pins = "gpk1-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cmd: sd1-cmd {
samsung,pins = "gpk1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cd: sd1-cd {
samsung,pins = "gpk1-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus1: sd1-bus-width1 {
samsung,pins = "gpk1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus4: sd1-bus-width4 {
samsung,pins = "gpk1-4", "gpk1-5", "gpk1-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_clk: sd2-clk {
samsung,pins = "gpk2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <4>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cmd: sd2-cmd {
samsung,pins = "gpk2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <4>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cd: sd2-cd {
samsung,pins = "gpk2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus1: sd2-bus-width1 {
samsung,pins = "gpk2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <4>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus4: sd2-bus-width4 {
samsung,pins = "gpk2-4", "gpk2-5", "gpk2-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <4>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
cam_port_b_io: cam-port-b-io {
samsung,pins = "gpm0-0", "gpm0-1", "gpm0-2", "gpm0-3",
"gpm0-4", "gpm0-5", "gpm0-6", "gpm0-7",
"gpm1-0", "gpm1-1", "gpm2-0", "gpm2-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_b_clk_active: cam-port-b-clk-active {
samsung,pins = "gpm2-2";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
cam_port_b_clk_idle: cam-port-b-clk-idle {
samsung,pins = "gpm2-2";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
fimc_is_i2c0: fimc-is-i2c0 {
samsung,pins = "gpm4-0", "gpm4-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
fimc_is_i2c1: fimc-is-i2c1 {
samsung,pins = "gpm4-2", "gpm4-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
fimc_is_uart: fimc-is-uart {
samsung,pins = "gpm3-5", "gpm3-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -566,8 +568,8 @@
i2s0_bus: i2s0-bus {
samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
"gpz-4", "gpz-5", "gpz-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos4415.dtsi b/arch/arm/boot/dts/exynos4415.dtsi
index 28b04b6795c9..3c40f8a956dd 100644
--- a/arch/arm/boot/dts/exynos4415.dtsi
+++ b/arch/arm/boot/dts/exynos4415.dtsi
@@ -16,13 +16,14 @@
* published by the Free Software Foundation.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/clock/exynos4415.h>
#include <dt-bindings/clock/exynos-audss-clk.h>
/ {
compatible = "samsung,exynos4415";
interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
pinctrl0 = &pinctrl_0;
diff --git a/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi b/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi
index 856b29254374..a56bf9b1a412 100644
--- a/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi
@@ -12,20 +12,13 @@
* published by the Free Software Foundation.
*/
-#define PIN_PULL_NONE 0
-#define PIN_PULL_DOWN 1
-#define PIN_PULL_UP 3
-
-#define PIN_PDN_OUT0 0
-#define PIN_PDN_OUT1 1
-#define PIN_PDN_INPUT 2
-#define PIN_PDN_PREV 3
-
-#define PIN_SLP(_pin, _mode, _pull) \
- _pin { \
- samsung,pins = #_pin; \
- samsung,pin-con-pdn = <PIN_PDN_ ##_mode>; \
- samsung,pin-pud-pdn = <PIN_PULL_ ##_pull>; \
+#include <dt-bindings/pinctrl/samsung.h>
+
+#define PIN_SLP(_pin, _mode, _pull) \
+ _pin { \
+ samsung,pins = #_pin; \
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_ ##_mode>; \
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_ ##_pull>; \
}
/ {
@@ -136,245 +129,245 @@
uart0_data: uart0-data {
samsung,pins = "gpa0-0", "gpa0-1";
- samsung,pin-function = <0x2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gpa0-2", "gpa0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_data: uart1-data {
samsung,pins = "gpa0-4", "gpa0-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c2_bus: i2c2-bus {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart2_data: uart2-data {
samsung,pins = "gpa1-0", "gpa1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart2_fctl: uart2-fctl {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart_audio_a: uart-audio-a {
samsung,pins = "gpa1-0", "gpa1-1";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c3_bus: i2c3-bus {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart3_data: uart3-data {
samsung,pins = "gpa1-4", "gpa1-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart_audio_b: uart-audio-b {
samsung,pins = "gpa1-4", "gpa1-5";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi0_bus: spi0-bus {
samsung,pins = "gpb-0", "gpb-2", "gpb-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c4_bus: i2c4-bus {
samsung,pins = "gpb-0", "gpb-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi1_bus: spi1-bus {
samsung,pins = "gpb-4", "gpb-6", "gpb-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c5_bus: i2c5-bus {
samsung,pins = "gpb-2", "gpb-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s1_bus: i2s1-bus {
samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
"gpc0-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm1_bus: pcm1-bus {
samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
"gpc0-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
ac97_bus: ac97-bus {
samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
"gpc0-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s2_bus: i2s2-bus {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
"gpc1-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm2_bus: pcm2-bus {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
"gpc1-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spdif_bus: spdif-bus {
samsung,pins = "gpc1-0", "gpc1-1";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c6_bus: i2c6-bus {
samsung,pins = "gpc1-3", "gpc1-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi2_bus: spi2-bus {
samsung,pins = "gpc1-1", "gpc1-3", "gpc1-4";
- samsung,pin-function = <5>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_5>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm0_out: pwm0-out {
samsung,pins = "gpd0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm1_out: pwm1-out {
samsung,pins = "gpd0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_ctrl: lcd-ctrl {
samsung,pins = "gpd0-0", "gpd0-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c7_bus: i2c7-bus {
samsung,pins = "gpd0-2", "gpd0-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm2_out: pwm2-out {
samsung,pins = "gpd0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm3_out: pwm3-out {
samsung,pins = "gpd0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c0_bus: i2c0-bus {
samsung,pins = "gpd1-0", "gpd1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
mipi0_clk: mipi0-clk {
samsung,pins = "gpd1-0", "gpd1-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c1_bus: i2c1-bus {
samsung,pins = "gpd1-2", "gpd1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
mipi1_clk: mipi1-clk {
samsung,pins = "gpd1-2", "gpd1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_clk: lcd-clk {
samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_data16: lcd-data-width16 {
@@ -382,9 +375,9 @@
"gpf1-3", "gpf1-6", "gpf1-7", "gpf2-0",
"gpf2-1", "gpf2-2", "gpf2-3", "gpf2-7",
"gpf3-0", "gpf3-1", "gpf3-2", "gpf3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_data18: lcd-data-width18 {
@@ -393,9 +386,9 @@
"gpf2-0", "gpf2-1", "gpf2-2", "gpf2-3",
"gpf2-6", "gpf2-7", "gpf3-0", "gpf3-1",
"gpf3-2", "gpf3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_data24: lcd-data-width24 {
@@ -405,39 +398,39 @@
"gpf2-0", "gpf2-1", "gpf2-2", "gpf2-3",
"gpf2-4", "gpf2-5", "gpf2-6", "gpf2-7",
"gpf3-0", "gpf3-1", "gpf3-2", "gpf3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_ldi: lcd-ldi {
samsung,pins = "gpf3-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_a_io: cam-port-a-io {
samsung,pins = "gpj0-0", "gpj0-1", "gpj0-2", "gpj0-3",
"gpj0-4", "gpj0-5", "gpj0-6", "gpj0-7",
"gpj1-0", "gpj1-1", "gpj1-2", "gpj1-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_a_clk_active: cam-port-a-clk-active {
samsung,pins = "gpj1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
cam_port_a_clk_idle: cam-port-a-clk-idle {
samsung,pins = "gpj1-3";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -613,284 +606,284 @@
sd0_clk: sd0-clk {
samsung,pins = "gpk0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpk0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cd: sd0-cd {
samsung,pins = "gpk0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus1: sd0-bus-width1 {
samsung,pins = "gpk0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus4: sd0-bus-width4 {
samsung,pins = "gpk0-3", "gpk0-4", "gpk0-5", "gpk0-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus8: sd0-bus-width8 {
samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_clk: sd4-clk {
samsung,pins = "gpk0-0";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_cmd: sd4-cmd {
samsung,pins = "gpk0-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_cd: sd4-cd {
samsung,pins = "gpk0-2";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_bus1: sd4-bus-width1 {
samsung,pins = "gpk0-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_bus4: sd4-bus-width4 {
samsung,pins = "gpk0-3", "gpk0-4", "gpk0-5", "gpk0-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd4_bus8: sd4-bus-width8 {
samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_clk: sd1-clk {
samsung,pins = "gpk1-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cmd: sd1-cmd {
samsung,pins = "gpk1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cd: sd1-cd {
samsung,pins = "gpk1-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus1: sd1-bus-width1 {
samsung,pins = "gpk1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus4: sd1-bus-width4 {
samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_clk: sd2-clk {
samsung,pins = "gpk2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cmd: sd2-cmd {
samsung,pins = "gpk2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cd: sd2-cd {
samsung,pins = "gpk2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus1: sd2-bus-width1 {
samsung,pins = "gpk2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus4: sd2-bus-width4 {
samsung,pins = "gpk2-3", "gpk2-4", "gpk2-5", "gpk2-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus8: sd2-bus-width8 {
samsung,pins = "gpk3-3", "gpk3-4", "gpk3-5", "gpk3-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_clk: sd3-clk {
samsung,pins = "gpk3-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_cmd: sd3-cmd {
samsung,pins = "gpk3-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_cd: sd3-cd {
samsung,pins = "gpk3-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_bus1: sd3-bus-width1 {
samsung,pins = "gpk3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_bus4: sd3-bus-width4 {
samsung,pins = "gpk3-3", "gpk3-4", "gpk3-5", "gpk3-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
cam_port_b_io: cam-port-b-io {
samsung,pins = "gpm0-0", "gpm0-1", "gpm0-2", "gpm0-3",
"gpm0-4", "gpm0-5", "gpm0-6", "gpm0-7",
"gpm1-0", "gpm1-1", "gpm2-0", "gpm2-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_b_clk_active: cam-port-b-clk-active {
samsung,pins = "gpm2-2";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
cam_port_b_clk_idle: cam-port-b-clk-idle {
samsung,pins = "gpm2-2";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint0: ext-int0 {
samsung,pins = "gpx0-0";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint8: ext-int8 {
samsung,pins = "gpx1-0";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint15: ext-int15 {
samsung,pins = "gpx1-7";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint16: ext-int16 {
samsung,pins = "gpx2-0";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint31: ext-int31 {
samsung,pins = "gpx3-7";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
fimc_is_i2c0: fimc-is-i2c0 {
samsung,pins = "gpm4-0", "gpm4-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
fimc_is_i2c1: fimc-is-i2c1 {
samsung,pins = "gpm4-2", "gpm4-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
fimc_is_uart: fimc-is-uart {
samsung,pins = "gpm3-5", "gpm3-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
hdmi_cec: hdmi-cec {
samsung,pins = "gpx3-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -906,17 +899,17 @@
i2s0_bus: i2s0-bus {
samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
"gpz-4", "gpz-5", "gpz-6";
- samsung,pin-function = <0x2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm0_bus: pcm0-bus {
samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
"gpz-4";
- samsung,pin-function = <0x3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -971,9 +964,9 @@
"gpv3-0", "gpv3-1", "gpv3-2", "gpv3-3",
"gpv3-4", "gpv3-5", "gpv3-6", "gpv3-7",
"gpv4-0", "gpv4-1";
- samsung,pin-function = <0x2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
};
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
index c452499ae8c9..3394bdcf10ae 100644
--- a/arch/arm/boot/dts/exynos4x12.dtsi
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -157,7 +157,9 @@
<&clock CLK_MOUT_MPLL_USER_T>,
<&clock CLK_FIMC_ISP>, <&clock CLK_FIMC_DRC>,
<&clock CLK_FIMC_FD>, <&clock CLK_MCUISP>,
- <&clock CLK_DIV_ISP0>,<&clock CLK_DIV_ISP1>,
+ <&clock CLK_GICISP>, <&clock CLK_MCUCTL_ISP>,
+ <&clock CLK_PWM_ISP>,
+ <&clock CLK_DIV_ISP0>, <&clock CLK_DIV_ISP1>,
<&clock CLK_DIV_MCUISP0>,
<&clock CLK_DIV_MCUISP1>,
<&clock CLK_UART_ISP_SCLK>,
@@ -167,6 +169,7 @@
clock-names = "lite0", "lite1", "ppmuispx",
"ppmuispmx", "mpll", "isp",
"drc", "fd", "mcuisp",
+ "gicisp", "mcuctl_isp", "pwm_isp",
"ispdiv0", "ispdiv1", "mcuispdiv0",
"mcuispdiv1", "uart", "aclk200",
"div_aclk200", "aclk400mcuisp",
diff --git a/arch/arm/boot/dts/exynos5.dtsi b/arch/arm/boot/dts/exynos5.dtsi
index cab91782e20c..8f06609879f5 100644
--- a/arch/arm/boot/dts/exynos5.dtsi
+++ b/arch/arm/boot/dts/exynos5.dtsi
@@ -13,11 +13,12 @@
* published by the Free Software Foundation.
*/
-#include "skeleton.dtsi"
#include "exynos-syscon-restart.dtsi"
/ {
interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
i2c0 = &i2c_0;
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index ea70603f660d..6098dacd09f1 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -20,7 +20,8 @@
model = "Insignal Arndale evaluation board based on EXYNOS5250";
compatible = "insignal,arndale", "samsung,exynos5250", "samsung,exynos5";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x80000000>;
};
@@ -152,7 +153,7 @@
};
&hdmi {
- hpd-gpio = <&gpx3 7 GPIO_ACTIVE_LOW>;
+ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_LOW>;
vdd_osc-supply = <&ldo10_reg>;
vdd_pll-supply = <&ldo8_reg>;
vdd-supply = <&ldo8_reg>;
diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
index 880917e508b2..2f6ab32b5954 100644
--- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
@@ -12,6 +12,8 @@
* published by the Free Software Foundation.
*/
+#include <dt-bindings/pinctrl/samsung.h>
+
&pinctrl_0 {
gpa0: gpa0 {
gpio-controller;
@@ -200,392 +202,392 @@
uart0_data: uart0-data {
samsung,pins = "gpa0-0", "gpa0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gpa0-2", "gpa0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c2_bus: i2c2-bus {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c2_hs_bus: i2c2-hs-bus {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart2_data: uart2-data {
samsung,pins = "gpa1-0", "gpa1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart2_fctl: uart2-fctl {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c3_bus: i2c3-bus {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c3_hs_bus: i2c3-hs-bus {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart3_data: uart3-data {
samsung,pins = "gpa1-4", "gpa1-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi0_bus: spi0-bus {
samsung,pins = "gpa2-0", "gpa2-2", "gpa2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c4_bus: i2c4-bus {
samsung,pins = "gpa2-0", "gpa2-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c5_bus: i2c5-bus {
samsung,pins = "gpa2-2", "gpa2-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi1_bus: spi1-bus {
samsung,pins = "gpa2-4", "gpa2-6", "gpa2-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s1_bus: i2s1-bus {
samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
"gpb0-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm1_bus: pcm1-bus {
samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
"gpb0-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
ac97_bus: ac97-bus {
samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
"gpb0-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s2_bus: i2s2-bus {
samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
"gpb1-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm2_bus: pcm2-bus {
samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
"gpb1-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spdif_bus: spdif-bus {
samsung,pins = "gpb1-0", "gpb1-1";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi2_bus: spi2-bus {
samsung,pins = "gpb1-1", "gpb1-3", "gpb1-4";
- samsung,pin-function = <5>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_5>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c6_bus: i2c6-bus {
samsung,pins = "gpb1-3", "gpb1-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm0_out: pwm0-out {
samsung,pins = "gpb2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm1_out: pwm1-out {
samsung,pins = "gpb2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm2_out: pwm2-out {
samsung,pins = "gpb2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm3_out: pwm3-out {
samsung,pins = "gpb2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c7_bus: i2c7-bus {
samsung,pins = "gpb2-2", "gpb2-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c0_bus: i2c0-bus {
samsung,pins = "gpb3-0", "gpb3-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c1_bus: i2c1-bus {
samsung,pins = "gpb3-2", "gpb3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c0_hs_bus: i2c0-hs-bus {
samsung,pins = "gpb3-0", "gpb3-1";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c1_hs_bus: i2c1-hs-bus {
samsung,pins = "gpb3-2", "gpb3-3";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
sd0_clk: sd0-clk {
samsung,pins = "gpc0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpc0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cd: sd0-cd {
samsung,pins = "gpc0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus1: sd0-bus-width1 {
samsung,pins = "gpc0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus4: sd0-bus-width4 {
samsung,pins = "gpc0-3", "gpc0-4", "gpc0-5", "gpc0-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus8: sd0-bus-width8 {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_clk: sd1-clk {
samsung,pins = "gpc2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cmd: sd1-cmd {
samsung,pins = "gpc2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cd: sd1-cd {
samsung,pins = "gpc2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus1: sd1-bus-width1 {
samsung,pins = "gpc2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus4: sd1-bus-width4 {
samsung,pins = "gpc2-3", "gpc2-4", "gpc2-5", "gpc2-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_clk: sd2-clk {
samsung,pins = "gpc3-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cmd: sd2-cmd {
samsung,pins = "gpc3-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cd: sd2-cd {
samsung,pins = "gpc3-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus1: sd2-bus-width1 {
samsung,pins = "gpc3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus4: sd2-bus-width4 {
samsung,pins = "gpc3-3", "gpc3-4", "gpc3-5", "gpc3-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus8: sd2-bus-width8 {
samsung,pins = "gpc4-3", "gpc4-4", "gpc4-5", "gpc4-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_clk: sd3-clk {
samsung,pins = "gpc4-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_cmd: sd3-cmd {
samsung,pins = "gpc4-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_cd: sd3-cd {
samsung,pins = "gpc4-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_bus1: sd3-bus-width1 {
samsung,pins = "gpc4-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_bus4: sd3-bus-width4 {
samsung,pins = "gpc4-3", "gpc4-4", "gpc4-5", "gpc4-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
uart1_data: uart1-data {
samsung,pins = "gpd0-0", "gpd0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gpd0-2", "gpd0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
dp_hpd: dp_hpd {
samsung,pins = "gpx0-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -666,52 +668,52 @@
samsung,pins = "gpe0-0", "gpe0-1", "gpe0-2", "gpe0-3",
"gpe0-4", "gpe0-5", "gpe0-6", "gpe0-7",
"gpe1-0", "gpe1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_gpio_b: cam-gpio-b {
samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3",
"gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_i2c2_bus: cam-i2c2-bus {
samsung,pins = "gpe0-6", "gpe1-0";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_spi1_bus: cam-spi1-bus {
samsung,pins = "gpe0-4", "gpe0-5", "gpf0-2", "gpf0-3";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_i2c1_bus: cam-i2c1-bus {
samsung,pins = "gpf0-2", "gpf0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_i2c0_bus: cam-i2c0-bus {
samsung,pins = "gpf0-0", "gpf0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_spi0_bus: cam-spi0-bus {
samsung,pins = "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_bayrgb_bus: cam-bayrgb-bus {
@@ -720,18 +722,18 @@
"gpg1-0", "gpg1-1", "gpg1-2", "gpg1-3",
"gpg1-4", "gpg1-5", "gpg1-6", "gpg1-7",
"gpg2-0", "gpg2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_a: cam-port-a {
samsung,pins = "gph0-0", "gph0-1", "gph0-2", "gph0-3",
"gph1-0", "gph1-1", "gph1-2", "gph1-3",
"gph1-4", "gph1-5", "gph1-6", "gph1-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -781,9 +783,9 @@
"gpv0-4", "gpv0-5", "gpv0-6", "gpv0-7",
"gpv1-0", "gpv1-1", "gpv1-2", "gpv1-3",
"gpv1-4", "gpv1-5", "gpv1-6", "gpv1-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
c2c_txd: c2c-txd {
@@ -791,9 +793,9 @@
"gpv2-4", "gpv2-5", "gpv2-6", "gpv2-7",
"gpv3-0", "gpv3-1", "gpv3-2", "gpv3-3",
"gpv3-4", "gpv3-5", "gpv3-6", "gpv3-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -809,8 +811,8 @@
i2s0_bus: i2s0-bus {
samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
"gpz-4", "gpz-5", "gpz-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 381af134c4c8..a97a785ccc6b 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -22,7 +22,8 @@
aliases {
};
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x80000000>;
};
@@ -116,7 +117,7 @@
};
&hdmi {
- hpd-gpio = <&gpx3 7 GPIO_ACTIVE_HIGH>;
+ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
};
&i2c_0 {
@@ -416,8 +417,8 @@
&pinctrl_0 {
max77686_irq: max77686-irq {
samsung,pins = "gpx3-2";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
index fadbea744e1a..d5d51916bb74 100644
--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
@@ -19,7 +19,8 @@
i2c104 = &i2c_104;
};
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x80000000>;
};
@@ -260,7 +261,7 @@
};
&hdmi {
- hpd-gpio = <&gpx3 7 GPIO_ACTIVE_HIGH>;
+ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_hpd_irq>;
phy = <&hdmiphy>;
@@ -440,7 +441,7 @@
* double-pulling gets us out of spec in some cases.
*/
&i2c2_bus {
- samsung,pin-pud = <0>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
};
&i2c_2 {
@@ -572,81 +573,81 @@
&pinctrl_0 {
wifi_en: wifi-en {
samsung,pins = "gpx0-1";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
wifi_rst: wifi-rst {
samsung,pins = "gpx0-2";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
power_key_irq: power-key-irq {
samsung,pins = "gpx1-3";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
ec_irq: ec-irq {
samsung,pins = "gpx1-6";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
tps65090_irq: tps65090-irq {
samsung,pins = "gpx2-6";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
usb3_vbus_en: usb3-vbus-en {
samsung,pins = "gpx2-7";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
max77686_irq: max77686-irq {
samsung,pins = "gpx3-2";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lid_irq: lid-irq {
samsung,pins = "gpx3-5";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
hdmi_hpd_irq: hdmi-hpd-irq {
samsung,pins = "gpx3-7";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
&pinctrl_1 {
arb_their_claim: arb-their-claim {
samsung,pins = "gpe0-4";
- samsung,pin-function = <0>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
arb_our_claim: arb-our-claim {
samsung,pins = "gpf0-3";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
@@ -657,16 +658,16 @@
};
&sd3_bus4 {
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
&sd3_clk {
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
&sd3_cmd {
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
&spi_1 {
diff --git a/arch/arm/boot/dts/exynos5250-snow-rev5.dts b/arch/arm/boot/dts/exynos5250-snow-rev5.dts
index f811dc800660..90560c316f64 100644
--- a/arch/arm/boot/dts/exynos5250-snow-rev5.dts
+++ b/arch/arm/boot/dts/exynos5250-snow-rev5.dts
@@ -40,8 +40,8 @@
&pinctrl_0 {
max98090_irq: max98090-irq {
samsung,pins = "gpx0-4";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts
index 995c7ce6c12b..df48f2cc96f7 100644
--- a/arch/arm/boot/dts/exynos5250-snow.dts
+++ b/arch/arm/boot/dts/exynos5250-snow.dts
@@ -36,8 +36,8 @@
&pinctrl_0 {
max98095_en: max98095-en {
samsung,pins = "gpx1-7";
- samsung,pin-function = <0>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
index 44f4292bfef6..4d7bdb735ed3 100644
--- a/arch/arm/boot/dts/exynos5250-spring.dts
+++ b/arch/arm/boot/dts/exynos5250-spring.dts
@@ -20,7 +20,8 @@
model = "Google Spring";
compatible = "google,spring", "samsung,exynos5250", "samsung,exynos5";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x80000000>;
};
@@ -91,7 +92,7 @@
};
&hdmi {
- hpd-gpio = <&gpx3 7 GPIO_ACTIVE_HIGH>;
+ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_hpd_irq>;
phy = <&hdmiphy>;
@@ -357,7 +358,7 @@
* double-pulling gets us out of spec in some cases.
*/
&i2c2_bus {
- samsung,pin-pud = <0>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
};
&i2c_2 {
@@ -460,92 +461,92 @@
&pinctrl_0 {
s5m8767_dvs: s5m8767-dvs {
samsung,pins = "gpd1-0", "gpd1-1", "gpd1-2";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
dp_hpd_gpio: dp-hpd-gpio {
samsung,pins = "gpc3-0";
- samsung,pin-function = <0>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
trackpad_irq: trackpad-irq {
samsung,pins = "gpx1-2";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
power_key_irq: power-key-irq {
samsung,pins = "gpx1-3";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
ec_irq: ec-irq {
samsung,pins = "gpx1-6";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
s5m8767_ds: s5m8767-ds {
samsung,pins = "gpx2-3", "gpx2-4", "gpx2-5";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
s5m8767_irq: s5m8767-irq {
samsung,pins = "gpx3-2";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lid_irq: lid-irq {
samsung,pins = "gpx3-5";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
hdmi_hpd_irq: hdmi-hpd-irq {
samsung,pins = "gpx3-7";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
&pinctrl_1 {
hsic_reset: hsic-reset {
samsung,pins = "gpe1-0";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
&sd1_bus4 {
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
&sd1_cd {
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
&sd1_clk {
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
&sd1_cmd {
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
&spi_1 {
diff --git a/arch/arm/boot/dts/exynos5260-pinctrl.dtsi b/arch/arm/boot/dts/exynos5260-pinctrl.dtsi
index f6ee55ea0708..1b911a219a27 100644
--- a/arch/arm/boot/dts/exynos5260-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5260-pinctrl.dtsi
@@ -12,9 +12,7 @@
* published by the Free Software Foundation.
*/
-#define PIN_PULL_NONE 0
-#define PIN_PULL_DOWN 1
-#define PIN_PULL_UP 3
+#include <dt-bindings/pinctrl/samsung.h>
&pinctrl_0 {
gpa0: gpa0 {
@@ -187,217 +185,217 @@
uart0_data: uart0-data {
samsung,pins = "gpa0-0", "gpa0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gpa0-2", "gpa0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
uart1_data: uart1-data {
samsung,pins = "gpa1-0", "gpa1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
uart2_data: uart2-data {
samsung,pins = "gpa1-4", "gpa1-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
spi0_bus: spi0-bus {
samsung,pins = "gpa2-0", "gpa2-2", "gpa2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
spi1_bus: spi1-bus {
samsung,pins = "gpa2-4", "gpa2-6", "gpa2-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
usb3_vbus0_en: usb3-vbus0-en {
samsung,pins = "gpa2-4";
- samsung,pin-function = <1>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2s1_bus: i2s1-bus {
samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
"gpb0-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
pcm1_bus: pcm1-bus {
samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
"gpb0-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
spdif1_bus: spdif1-bus {
samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
spi2_bus: spi2-bus {
samsung,pins = "gpb1-0", "gpb1-2", "gpb1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c0_hs_bus: i2c0-hs-bus {
samsung,pins = "gpb3-0", "gpb3-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c1_hs_bus: i2c1-hs-bus {
samsung,pins = "gpb3-2", "gpb3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c2_hs_bus: i2c2-hs-bus {
samsung,pins = "gpb3-4", "gpb3-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c3_hs_bus: i2c3-hs-bus {
samsung,pins = "gpb3-6", "gpb3-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c4_bus: i2c4-bus {
samsung,pins = "gpb4-0", "gpb4-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c5_bus: i2c5-bus {
samsung,pins = "gpb4-2", "gpb4-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c6_bus: i2c6-bus {
samsung,pins = "gpb4-4", "gpb4-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c7_bus: i2c7-bus {
samsung,pins = "gpb4-6", "gpb4-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c8_bus: i2c8-bus {
samsung,pins = "gpb5-0", "gpb5-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c9_bus: i2c9-bus {
samsung,pins = "gpb5-2", "gpb5-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c10_bus: i2c10-bus {
samsung,pins = "gpb5-4", "gpb5-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
i2c11_bus: i2c11-bus {
samsung,pins = "gpb5-6", "gpb5-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
cam_gpio_a: cam-gpio-a {
samsung,pins = "gpe0-0", "gpe0-1", "gpe0-2", "gpe0-3",
"gpe0-4", "gpe0-5", "gpe0-6", "gpe0-7",
"gpe1-0", "gpe1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
cam_gpio_b: cam-gpio-b {
samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3",
"gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
cam_i2c1_bus: cam-i2c1-bus {
samsung,pins = "gpf0-2", "gpf0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
cam_i2c0_bus: cam-i2c0-bus {
samsung,pins = "gpf0-0", "gpf0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
cam_spi0_bus: cam-spi0-bus {
samsung,pins = "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
cam_spi1_bus: cam-spi1-bus {
samsung,pins = "gpf1-4", "gpf1-5", "gpf1-6", "gpf1-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
};
@@ -444,114 +442,114 @@
sd0_clk: sd0-clk {
samsung,pins = "gpc0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpc0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd0_bus1: sd0-bus-width1 {
samsung,pins = "gpc0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd0_bus4: sd0-bus-width4 {
samsung,pins = "gpc0-3", "gpc0-4", "gpc0-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd0_bus8: sd0-bus-width8 {
samsung,pins = "gpc3-0", "gpc3-1", "gpc3-2", "gpc3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd0_rdqs: sd0-rdqs {
samsung,pins = "gpc0-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd1_clk: sd1-clk {
samsung,pins = "gpc1-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd1_cmd: sd1-cmd {
samsung,pins = "gpc1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd1_bus1: sd1-bus-width1 {
samsung,pins = "gpc1-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd1_bus4: sd1-bus-width4 {
samsung,pins = "gpc1-3", "gpc1-4", "gpc1-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd1_bus8: sd1-bus-width8 {
samsung,pins = "gpc4-0", "gpc4-1", "gpc4-2", "gpc4-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd2_clk: sd2-clk {
samsung,pins = "gpc2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd2_cmd: sd2-cmd {
samsung,pins = "gpc2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd2_cd: sd2-cd {
samsung,pins = "gpc2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd2_bus1: sd2-bus-width1 {
samsung,pins = "gpc2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
sd2_bus4: sd2-bus-width4 {
samsung,pins = "gpc2-4", "gpc2-5", "gpc2-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV6>;
};
};
diff --git a/arch/arm/boot/dts/exynos5260-xyref5260.dts b/arch/arm/boot/dts/exynos5260-xyref5260.dts
index 3daef94bee38..d0cc300cfb4b 100644
--- a/arch/arm/boot/dts/exynos5260-xyref5260.dts
+++ b/arch/arm/boot/dts/exynos5260-xyref5260.dts
@@ -16,7 +16,8 @@
model = "SAMSUNG XYREF5260 board based on EXYNOS5260";
compatible = "samsung,xyref5260", "samsung,exynos5260", "samsung,exynos5";
- memory {
+ memory@20000000 {
+ device_type = "memory";
reg = <0x20000000 0x80000000>;
};
@@ -42,9 +43,9 @@
&pinctrl_0 {
hdmi_hpd_irq: hdmi-hpd-irq {
samsung,pins = "gpx3-7";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5260_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5260.dtsi b/arch/arm/boot/dts/exynos5260.dtsi
index 36da38e29000..a86a4898d077 100644
--- a/arch/arm/boot/dts/exynos5260.dtsi
+++ b/arch/arm/boot/dts/exynos5260.dtsi
@@ -9,13 +9,13 @@
* published by the Free Software Foundation.
*/
-#include "skeleton.dtsi"
-
#include <dt-bindings/clock/exynos5260-clk.h>
/ {
compatible = "samsung,exynos5260", "samsung,exynos5";
interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
pinctrl0 = &pinctrl_0;
diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
index f6d135245a4b..3c271cb4b2be 100644
--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
+++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
@@ -21,12 +21,13 @@
model = "Hardkernel Odroid XU";
compatible = "hardkernel,odroid-xu", "samsung,exynos5410", "samsung,exynos5";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x7ea00000>;
};
chosen {
- linux,stdout-path = &serial_2;
+ stdout-path = "serial2:115200n8";
};
emmc_pwrseq: pwrseq {
@@ -473,38 +474,38 @@
&pinctrl_0 {
emmc_nrst_pin: emmc-nrst {
samsung,pins = "gpd1-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pmic_dvs_3: pmic-dvs-3 {
samsung,pins = "gpx0-0";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pmic_dvs_2: pmic-dvs-2 {
samsung,pins = "gpx0-1";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pmic_dvs_1: pmic-dvs-1 {
samsung,pins = "gpx0-2";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
samsung,pin-val = <1>;
};
max77802_irq: max77802-irq {
samsung,pins = "gpx0-4";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
index b58a0f29f42c..a083d23fdee3 100644
--- a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
+#include <dt-bindings/pinctrl/samsung.h>
+
&pinctrl_0 {
gpa0: gpa0 {
gpio-controller;
@@ -280,212 +282,212 @@
uart0_data: uart0-data {
samsung,pins = "gpa0-0", "gpa0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gpa0-2", "gpa0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart1_data: uart1-data {
samsung,pins = "gpa0-4", "gpa0-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c2_bus: i2c2-bus {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart2_data: uart2-data {
samsung,pins = "gpa1-0", "gpa1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart2_fctl: uart2-fctl {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c3_bus: i2c3-bus {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart3_data: uart3-data {
samsung,pins = "gpa1-4", "gpa1-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c4_hs_bus: i2c4-hs-bus {
samsung,pins = "gpa2-0", "gpa2-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c5_hs_bus: i2c5-hs-bus {
samsung,pins = "gpa2-2", "gpa2-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c6_hs_bus: i2c6-hs-bus {
samsung,pins = "gpb1-3", "gpb1-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pwm0_out: pwm0-out {
samsung,pins = "gpb2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pwm1_out: pwm1-out {
samsung,pins = "gpb2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pwm2_out: pwm2-out {
samsung,pins = "gpb2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pwm3_out: pwm3-out {
samsung,pins = "gpb2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c7_hs_bus: i2c7-hs-bus {
samsung,pins = "gpb2-2", "gpb2-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c0_bus: i2c0-bus {
samsung,pins = "gpb3-0", "gpb3-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c1_bus: i2c1-bus {
samsung,pins = "gpb3-2", "gpb3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
sd0_clk: sd0-clk {
samsung,pins = "gpc0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpc0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd0_cd: sd0-cd {
samsung,pins = "gpc0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd0_bus1: sd0-bus-width1 {
samsung,pins = "gpc0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd0_bus4: sd0-bus-width4 {
samsung,pins = "gpc0-4", "gpc0-5", "gpc0-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd2_clk: sd2-clk {
samsung,pins = "gpc2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd2_cmd: sd2-cmd {
samsung,pins = "gpc2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd2_cd: sd2-cd {
samsung,pins = "gpc2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd2_bus1: sd2-bus-width1 {
samsung,pins = "gpc2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd2_bus4: sd2-bus-width4 {
samsung,pins = "gpc2-4", "gpc2-5", "gpc2-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd0_bus8: sd0-bus-width8 {
samsung,pins = "gpc3-0", "gpc3-1", "gpc3-2", "gpc3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
};
diff --git a/arch/arm/boot/dts/exynos5410-smdk5410.dts b/arch/arm/boot/dts/exynos5410-smdk5410.dts
index 777fcf2edd79..6cc74d97daae 100644
--- a/arch/arm/boot/dts/exynos5410-smdk5410.dts
+++ b/arch/arm/boot/dts/exynos5410-smdk5410.dts
@@ -16,7 +16,8 @@
model = "Samsung SMDK5410 board based on EXYNOS5410";
compatible = "samsung,smdk5410", "samsung,exynos5410", "samsung,exynos5";
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x80000000>;
};
@@ -66,8 +67,8 @@
srom_ctl: srom-ctl {
samsung,pins = "gpy0-3", "gpy0-4", "gpy0-5",
"gpy1-0", "gpy1-1", "gpy1-2", "gpy1-3";
- samsung,pin-function = <2>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
srom_ebi: srom-ebi {
@@ -77,9 +78,9 @@
"gpy5-4", "gpy5-5", "gpy5-6", "gpy5-7",
"gpy6-0", "gpy6-1", "gpy6-2", "gpy6-3",
"gpy6-4", "gpy6-5", "gpy6-6", "gpy6-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index 39a3b81478fd..9cc83c51c925 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -22,7 +22,8 @@
model = "Insignal Arndale Octa evaluation board based on EXYNOS5420";
compatible = "insignal,arndale-octa", "samsung,exynos5420", "samsung,exynos5";
- memory {
+ memory@20000000 {
+ device_type = "memory";
reg = <0x20000000 0x80000000>;
};
@@ -70,6 +71,15 @@
status = "disabled";
};
+&hdmi {
+ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
+ vdd_osc-supply = <&ldo7_reg>;
+ vdd_pll-supply = <&ldo6_reg>;
+ vdd-supply = <&ldo6_reg>;
+ ddc = <&i2c_2>;
+ status = "okay";
+};
+
&hsi2c_4 {
status = "okay";
@@ -347,6 +357,10 @@
};
};
+&i2c_2 {
+ status = "okay";
+};
+
&mmc_0 {
status = "okay";
broken-cd;
@@ -378,9 +392,9 @@
&pinctrl_0 {
s2mps11_irq: s2mps11-irq {
samsung,pins = "gpx3-2";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index fe4e0915c0c6..ec4a00f1ce01 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -79,7 +79,8 @@
};
};
- memory {
+ memory@20000000 {
+ device_type = "memory";
reg = <0x20000000 0x80000000>;
};
@@ -179,7 +180,7 @@
&hdmi {
status = "okay";
- hpd-gpio = <&gpx3 7 GPIO_ACTIVE_HIGH>;
+ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_hpd_irq>;
ddc = <&i2c_2>;
@@ -753,171 +754,171 @@
wifi_en: wifi-en {
samsung,pins = "gpx0-0";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
max98090_irq: max98090-irq {
samsung,pins = "gpx0-2";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
/* We need GPX0_6 to be low at sleep time; just keep it low always */
mask_tpm_reset: mask-tpm-reset {
samsung,pins = "gpx0-6";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
samsung,pin-val = <0>;
};
tpm_irq: tpm-irq {
samsung,pins = "gpx1-0";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
trackpad_irq: trackpad-irq {
samsung,pins = "gpx1-1";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
power_key_irq: power-key-irq {
samsung,pins = "gpx1-2";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
ec_irq: ec-irq {
samsung,pins = "gpx1-5";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
tps65090_irq: tps65090-irq {
samsung,pins = "gpx2-5";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
dp_hpd_gpio: dp_hpd_gpio {
samsung,pins = "gpx2-6";
- samsung,pin-function = <0>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
max77802_irq: max77802-irq {
samsung,pins = "gpx3-1";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
lid_irq: lid-irq {
samsung,pins = "gpx3-4";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
hdmi_hpd_irq: hdmi-hpd-irq {
samsung,pins = "gpx3-7";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pmic_dvs_1: pmic-dvs-1 {
samsung,pins = "gpy7-6";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
&pinctrl_1 {
/* Adjust WiFi drive strengths lower for EMI */
sd1_clk: sd1-clk {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
sd1_cmd: sd1-cmd {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
sd1_bus1: sd1-bus-width1 {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
sd1_bus4: sd1-bus-width4 {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
sd1_bus8: sd1-bus-width8 {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
};
&pinctrl_2 {
pmic_dvs_2: pmic-dvs-2 {
samsung,pins = "gpj4-2";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pmic_dvs_3: pmic-dvs-3 {
samsung,pins = "gpj4-3";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
&pinctrl_3 {
/* Drive SPI lines at x2 for better integrity */
spi2-bus {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
/* Drive SPI chip select at x2 for better integrity */
ec_spi_cs: ec-spi-cs {
samsung,pins = "gpb1-2";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
usb300_vbus_en: usb300-vbus-en {
samsung,pins = "gph0-0";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
usb301_vbus_en: usb301-vbus-en {
samsung,pins = "gph0-1";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pmic_selb: pmic-selb {
samsung,pins = "gph0-2", "gph0-3", "gph0-4", "gph0-5",
"gph0-6";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5420-pinctrl.dtsi b/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
index 14beb7e07323..3924b4fafe72 100644
--- a/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
@@ -12,6 +12,8 @@
* published by the Free Software Foundation.
*/
+#include <dt-bindings/pinctrl/samsung.h>
+
&pinctrl_0 {
gpy7: gpy7 {
gpio-controller;
@@ -61,9 +63,9 @@
dp_hpd: dp_hpd {
samsung,pins = "gpx0-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
@@ -153,135 +155,135 @@
sd0_clk: sd0-clk {
samsung,pins = "gpc0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpc0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd0_cd: sd0-cd {
samsung,pins = "gpc0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd0_bus1: sd0-bus-width1 {
samsung,pins = "gpc0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd0_bus4: sd0-bus-width4 {
samsung,pins = "gpc0-4", "gpc0-5", "gpc0-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd0_bus8: sd0-bus-width8 {
samsung,pins = "gpc3-0", "gpc3-1", "gpc3-2", "gpc3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd0_rclk: sd0-rclk {
samsung,pins = "gpc0-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd1_clk: sd1-clk {
samsung,pins = "gpc1-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd1_cmd: sd1-cmd {
samsung,pins = "gpc1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd1_cd: sd1-cd {
samsung,pins = "gpc1-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd1_int: sd1-int {
samsung,pins = "gpd1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
sd1_bus1: sd1-bus-width1 {
samsung,pins = "gpc1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd1_bus4: sd1-bus-width4 {
samsung,pins = "gpc1-4", "gpc1-5", "gpc1-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd1_bus8: sd1-bus-width8 {
samsung,pins = "gpd1-4", "gpd1-5", "gpd1-6", "gpd1-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd2_clk: sd2-clk {
samsung,pins = "gpc2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd2_cmd: sd2-cmd {
samsung,pins = "gpc2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd2_cd: sd2-cd {
samsung,pins = "gpc2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd2_bus1: sd2-bus-width1 {
samsung,pins = "gpc2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
sd2_bus4: sd2-bus-width4 {
samsung,pins = "gpc2-4", "gpc2-5", "gpc2-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV4>;
};
};
@@ -354,52 +356,52 @@
samsung,pins = "gpe0-0", "gpe0-1", "gpe0-2", "gpe0-3",
"gpe0-4", "gpe0-5", "gpe0-6", "gpe0-7",
"gpe1-0", "gpe1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
cam_gpio_b: cam-gpio-b {
samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3",
"gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
cam_i2c2_bus: cam-i2c2-bus {
samsung,pins = "gpf0-4", "gpf0-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
cam_spi1_bus: cam-spi1-bus {
samsung,pins = "gpe0-4", "gpe0-5", "gpf0-2", "gpf0-3";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
cam_i2c1_bus: cam-i2c1-bus {
samsung,pins = "gpf0-2", "gpf0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
cam_i2c0_bus: cam-i2c0-bus {
samsung,pins = "gpf0-0", "gpf0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
cam_spi0_bus: cam-spi0-bus {
samsung,pins = "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
cam_bayrgb_bus: cam-bayrgb-bus {
@@ -408,9 +410,9 @@
"gpg1-0", "gpg1-1", "gpg1-2", "gpg1-3",
"gpg1-4", "gpg1-5", "gpg1-6", "gpg1-7",
"gpg2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
@@ -489,216 +491,216 @@
uart0_data: uart0-data {
samsung,pins = "gpa0-0", "gpa0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gpa0-2", "gpa0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart1_data: uart1-data {
samsung,pins = "gpa0-4", "gpa0-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c2_bus: i2c2-bus {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart2_data: uart2-data {
samsung,pins = "gpa1-0", "gpa1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart2_fctl: uart2-fctl {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c3_bus: i2c3-bus {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
uart3_data: uart3-data {
samsung,pins = "gpa1-4", "gpa1-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
spi0_bus: spi0-bus {
samsung,pins = "gpa2-0", "gpa2-1", "gpa2-2", "gpa2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
spi1_bus: spi1-bus {
samsung,pins = "gpa2-4", "gpa2-6", "gpa2-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c4_hs_bus: i2c4-hs-bus {
samsung,pins = "gpa2-0", "gpa2-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c5_hs_bus: i2c5-hs-bus {
samsung,pins = "gpa2-2", "gpa2-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2s1_bus: i2s1-bus {
samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
"gpb0-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pcm1_bus: pcm1-bus {
samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
"gpb0-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2s2_bus: i2s2-bus {
samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
"gpb1-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pcm2_bus: pcm2-bus {
samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
"gpb1-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
spdif_bus: spdif-bus {
samsung,pins = "gpb1-0", "gpb1-1";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
spi2_bus: spi2-bus {
samsung,pins = "gpb1-1", "gpb1-3", "gpb1-4";
- samsung,pin-function = <5>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_5>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c6_hs_bus: i2c6-hs-bus {
samsung,pins = "gpb1-3", "gpb1-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pwm0_out: pwm0-out {
samsung,pins = "gpb2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pwm1_out: pwm1-out {
samsung,pins = "gpb2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pwm2_out: pwm2-out {
samsung,pins = "gpb2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pwm3_out: pwm3-out {
samsung,pins = "gpb2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c7_hs_bus: i2c7-hs-bus {
samsung,pins = "gpb2-2", "gpb2-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c0_bus: i2c0-bus {
samsung,pins = "gpb3-0", "gpb3-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c1_bus: i2c1-bus {
samsung,pins = "gpb3-2", "gpb3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c8_hs_bus: i2c8-hs-bus {
samsung,pins = "gpb3-4", "gpb3-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c9_hs_bus: i2c9-hs-bus {
samsung,pins = "gpb3-6", "gpb3-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
i2c10_hs_bus: i2c10-hs-bus {
samsung,pins = "gpb4-0", "gpb4-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
@@ -714,8 +716,8 @@
i2s0_bus: i2s0-bus {
samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
"gpz-4", "gpz-5", "gpz-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts
index ed8f3426911b..aaccd0da41e5 100644
--- a/arch/arm/boot/dts/exynos5420-smdk5420.dts
+++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts
@@ -19,7 +19,8 @@
model = "Samsung SMDK5420 board based on EXYNOS5420";
compatible = "samsung,smdk5420", "samsung,exynos5420", "samsung,exynos5";
- memory {
+ memory@20000000 {
+ device_type = "memory";
reg = <0x20000000 0x80000000>;
};
@@ -130,7 +131,7 @@
&hdmi {
status = "okay";
- hpd-gpio = <&gpx3 7 GPIO_ACTIVE_HIGH>;
+ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_hpd_irq>;
};
@@ -386,25 +387,25 @@
&pinctrl_0 {
hdmi_hpd_irq: hdmi-hpd-irq {
samsung,pins = "gpx3-7";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
&pinctrl_2 {
usb300_vbus_en: usb300-vbus-en {
samsung,pins = "gpg0-5";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
usb301_vbus_en: usb301-vbus-en {
samsung,pins = "gpg1-4";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index d56253049ccb..246d298557f5 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -21,12 +21,13 @@
#include "exynos-mfc-reserved-memory.dtsi"
/ {
- memory {
+ memory@40000000 {
+ device_type = "memory";
reg = <0x40000000 0x7EA00000>;
};
chosen {
- linux,stdout-path = &serial_2;
+ stdout-path = "serial2:115200n8";
};
firmware@02073000 {
@@ -250,7 +251,7 @@
&hdmi {
status = "okay";
- hpd-gpio = <&gpx3 7 GPIO_ACTIVE_HIGH>;
+ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_hpd_irq>;
@@ -548,25 +549,25 @@
&pinctrl_0 {
hdmi_hpd_irq: hdmi-hpd-irq {
samsung,pins = "gpx3-7";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
s2mps11_irq: s2mps11-irq {
samsung,pins = "gpx0-4";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
&pinctrl_1 {
emmc_nrst_pin: emmc-nrst {
samsung,pins = "gpd1-0";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5440-sd5v1.dts b/arch/arm/boot/dts/exynos5440-sd5v1.dts
index a98501bab6fc..ad6f533b3f40 100644
--- a/arch/arm/boot/dts/exynos5440-sd5v1.dts
+++ b/arch/arm/boot/dts/exynos5440-sd5v1.dts
@@ -20,6 +20,12 @@
bootargs = "root=/dev/sda2 rw rootwait ignore_loglevel earlyprintk no_console_suspend mem=2048M@0x80000000 mem=6144M@0x100000000 console=ttySAC0,115200";
};
+ /* FIXME: set reg property with correct start address and size */
+ memory@0 {
+ device_type = "memory";
+ reg = <0 0>;
+ };
+
fixed-rate-clocks {
xtal {
compatible = "samsung,clock-xtal";
diff --git a/arch/arm/boot/dts/exynos5440-ssdk5440.dts b/arch/arm/boot/dts/exynos5440-ssdk5440.dts
index 6a0d802e87c8..92bd2c6f7631 100644
--- a/arch/arm/boot/dts/exynos5440-ssdk5440.dts
+++ b/arch/arm/boot/dts/exynos5440-ssdk5440.dts
@@ -21,6 +21,12 @@
bootargs = "root=/dev/sda2 rw rootwait ignore_loglevel earlyprintk no_console_suspend mem=2048M@0x80000000 mem=6144M@0x100000000 console=ttySAC0,115200";
};
+ /* FIXME: set reg property with correct start address and size */
+ memory@0 {
+ device_type = "memory";
+ reg = <0 0>;
+ };
+
fixed-rate-clocks {
xtal {
compatible = "samsung,clock-xtal";
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index fd176819b4bf..e6bffd13cedd 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -10,12 +10,13 @@
*/
#include <dt-bindings/clock/exynos5440.h>
-#include "skeleton.dtsi"
/ {
compatible = "samsung,exynos5440", "samsung,exynos5";
interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
serial0 = &serial_0;
diff --git a/arch/arm/boot/dts/exynos54xx.dtsi b/arch/arm/boot/dts/exynos54xx.dtsi
index 06a604911e87..9d31cdce1959 100644
--- a/arch/arm/boot/dts/exynos54xx.dtsi
+++ b/arch/arm/boot/dts/exynos54xx.dtsi
@@ -14,7 +14,6 @@
* published by the Free Software Foundation.
*/
-#include "skeleton.dtsi"
#include "exynos5.dtsi"
/ {
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index 5ec71e2400fd..01f466816fea 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -79,7 +79,8 @@
};
- memory {
+ memory@20000000 {
+ device_type = "memory";
reg = <0x20000000 0x80000000>;
};
@@ -179,7 +180,7 @@
&hdmi {
status = "okay";
- hpd-gpio = <&gpx3 7 GPIO_ACTIVE_HIGH>;
+ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_hpd_irq>;
ddc = <&i2c_2>;
@@ -722,171 +723,171 @@
wifi_en: wifi-en {
samsung,pins = "gpx0-0";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
max98091_irq: max98091-irq {
samsung,pins = "gpx0-2";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
/* We need GPX0_6 to be low at sleep time; just keep it low always */
mask_tpm_reset: mask-tpm-reset {
samsung,pins = "gpx0-6";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
samsung,pin-val = <0>;
};
tpm_irq: tpm-irq {
samsung,pins = "gpx1-0";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
trackpad_irq: trackpad-irq {
samsung,pins = "gpx1-1";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
power_key_irq: power-key-irq {
samsung,pins = "gpx1-2";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
ec_irq: ec-irq {
samsung,pins = "gpx1-5";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
tps65090_irq: tps65090-irq {
samsung,pins = "gpx2-5";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
dp_hpd_gpio: dp_hpd_gpio {
samsung,pins = "gpx2-6";
- samsung,pin-function = <0>;
- samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
max77802_irq: max77802-irq {
samsung,pins = "gpx3-1";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
lid_irq: lid-irq {
samsung,pins = "gpx3-4";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
hdmi_hpd_irq: hdmi-hpd-irq {
samsung,pins = "gpx3-7";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pmic_dvs_1: pmic-dvs-1 {
samsung,pins = "gpy7-6";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
&pinctrl_1 {
/* Adjust WiFi drive strengths lower for EMI */
sd1_clk: sd1-clk {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
sd1_cmd: sd1-cmd {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
sd1_bus1: sd1-bus-width1 {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
sd1_bus4: sd1-bus-width4 {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
sd1_bus8: sd1-bus-width8 {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
};
&pinctrl_2 {
pmic_dvs_2: pmic-dvs-2 {
samsung,pins = "gpj4-2";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pmic_dvs_3: pmic-dvs-3 {
samsung,pins = "gpj4-3";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
&pinctrl_3 {
/* Drive SPI lines at x2 for better integrity */
spi2-bus {
- samsung,pin-drv = <2>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
/* Drive SPI chip select at x2 for better integrity */
ec_spi_cs: ec-spi-cs {
samsung,pins = "gpb1-2";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV3>;
};
usb300_vbus_en: usb300-vbus-en {
samsung,pins = "gph0-0";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
usb301_vbus_en: usb301-vbus-en {
samsung,pins = "gph0-1";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
pmic_selb: pmic-selb {
samsung,pins = "gph0-2", "gph0-3", "gph0-4", "gph0-5",
"gph0-6";
- samsung,pin-function = <1>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index 490b7b44f1e7..f812d586c5ce 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -309,6 +309,13 @@
status = "disabled";
};
+ iim@53ff0000 {
+ compatible = "fsl,imx35-iim";
+ reg = <0x53ff0000 0x4000>;
+ interrupts = <19>;
+ clocks = <&clks 80>;
+ };
+
usbotg: usb@53ff4000 {
compatible = "fsl,imx35-usb", "fsl,imx27-usb";
reg = <0x53ff4000 0x0200>;
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
index e2457138311f..8fe8beeb68a4 100644
--- a/arch/arm/boot/dts/imx50.dtsi
+++ b/arch/arm/boot/dts/imx50.dtsi
@@ -227,6 +227,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 151 28>;
};
gpio2: gpio@53f88000 {
@@ -237,6 +238,10 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 75 8>, <&iomuxc 8 100 8>,
+ <&iomuxc 16 83 1>, <&iomuxc 17 85 1>,
+ <&iomuxc 18 87 1>, <&iomuxc 19 84 1>,
+ <&iomuxc 20 88 1>, <&iomuxc 21 86 1>;
};
gpio3: gpio@53f8c000 {
@@ -247,6 +252,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 108 32>;
};
gpio4: gpio@53f90000 {
@@ -257,6 +263,8 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 8 8>, <&iomuxc 8 45 12>,
+ <&iomuxc 20 140 11>;
};
wdog1: wdog@53f98000 {
@@ -346,6 +354,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 57 18>, <&iomuxc 18 89 11>;
};
gpio6: gpio@53fe0000 {
@@ -356,6 +365,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 27 18>, <&iomuxc 18 16 11>;
};
i2c3: i2c@53fec000 {
diff --git a/arch/arm/boot/dts/imx53-usbarmory.dts b/arch/arm/boot/dts/imx53-usbarmory.dts
new file mode 100644
index 000000000000..6782d7fc5961
--- /dev/null
+++ b/arch/arm/boot/dts/imx53-usbarmory.dts
@@ -0,0 +1,224 @@
+/*
+ * USB armory MkI device tree file
+ * https://inversepath.com/usbarmory
+ *
+ * Copyright (C) 2015, Inverse Path
+ * Andrej Rosano <andrej@inversepath.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx53.dtsi"
+
+/ {
+ model = "Inverse Path USB armory";
+ compatible = "inversepath,imx53-usbarmory", "fsl,imx53";
+};
+
+/ {
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ memory {
+ reg = <0x70000000 0x20000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led>;
+
+ user {
+ label = "LED";
+ gpios = <&gpio4 27 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
+
+/*
+ * Not every i.MX53 P/N supports clock > 800MHz.
+ * As USB armory does not mount a specific P/N set a safe clock upper limit.
+ */
+&cpu0 {
+ operating-points = <
+ /* kHz */
+ 166666 850000
+ 400000 900000
+ 800000 1050000
+ >;
+};
+
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_esdhc1: esdhc1grp {
+ fsl,pins = <
+ MX53_PAD_SD1_DATA0__ESDHC1_DAT0 0x1d5
+ MX53_PAD_SD1_DATA1__ESDHC1_DAT1 0x1d5
+ MX53_PAD_SD1_DATA2__ESDHC1_DAT2 0x1d5
+ MX53_PAD_SD1_DATA3__ESDHC1_DAT3 0x1d5
+ MX53_PAD_SD1_CMD__ESDHC1_CMD 0x1d5
+ MX53_PAD_SD1_CLK__ESDHC1_CLK 0x1d5
+ >;
+ };
+
+ pinctrl_i2c1_pmic: i2c1grp {
+ fsl,pins = <
+ MX53_PAD_EIM_D21__I2C1_SCL 0x80
+ MX53_PAD_EIM_D28__I2C1_SDA 0x80
+ >;
+ };
+
+ pinctrl_led: ledgrp {
+ fsl,pins = <
+ MX53_PAD_DISP0_DAT6__GPIO4_27 0x1e4
+ >;
+ };
+
+ /*
+ * UART mode pin header configration
+ * 3 - GPIO5[26], pull-down 100K
+ * 4 - GPIO5[27], pull-down 100K
+ * 5 - TX, pull-up 100K
+ * 6 - RX, pull-up 100K
+ * 7 - GPIO5[30], pull-down 100K
+ */
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX53_PAD_CSI0_DAT8__GPIO5_26 0xc0
+ MX53_PAD_CSI0_DAT9__GPIO5_27 0xc0
+ MX53_PAD_CSI0_DAT10__UART1_TXD_MUX 0x1e4
+ MX53_PAD_CSI0_DAT11__UART1_RXD_MUX 0x1e4
+ MX53_PAD_CSI0_DAT12__GPIO5_30 0xc0
+ >;
+ };
+};
+
+&i2c1 {
+ pinctrl-0 = <&pinctrl_i2c1_pmic>;
+ status = "okay";
+
+ ltc3589: pmic@34 {
+ compatible = "lltc,ltc3589-2";
+ reg = <0x34>;
+
+ regulators {
+ sw1_reg: sw1 {
+ regulator-min-microvolt = <591930>;
+ regulator-max-microvolt = <1224671>;
+ lltc,fb-voltage-divider = <100000 158000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <704123>;
+ regulator-max-microvolt = <1456803>;
+ lltc,fb-voltage-divider = <180000 191000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3_reg: sw3 {
+ regulator-min-microvolt = <1341250>;
+ regulator-max-microvolt = <2775000>;
+ lltc,fb-voltage-divider = <270000 100000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ bb_out_reg: bb-out {
+ regulator-min-microvolt = <3387341>;
+ regulator-max-microvolt = <3387341>;
+ lltc,fb-voltage-divider = <511000 158000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1_reg: ldo1 {
+ regulator-min-microvolt = <1306329>;
+ regulator-max-microvolt = <1306329>;
+ lltc,fb-voltage-divider = <100000 158000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: ldo2 {
+ regulator-min-microvolt = <704123>;
+ regulator-max-microvolt = <1456806>;
+ lltc,fb-voltage-divider = <180000 191000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3_reg: ldo3 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-boot-on;
+ };
+
+ ldo4_reg: ldo4 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3200000>;
+ };
+ };
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&usbotg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index cd170376eaca..0777b41cdfe8 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -136,6 +136,14 @@
clock-names = "bus", "di0", "di1";
resets = <&src 2>;
+ ipu_csi0: port@0 {
+ reg = <0>;
+ };
+
+ ipu_csi1: port@1 {
+ reg = <1>;
+ };
+
ipu_di0: port@2 {
#address-cells = <1>;
#size-cells = <0>;
@@ -217,6 +225,8 @@
clocks = <&clks IMX5_CLK_UART3_IPG_GATE>,
<&clks IMX5_CLK_UART3_PER_GATE>;
clock-names = "ipg", "per";
+ dmas = <&sdma 42 4 0>, <&sdma 43 4 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -498,6 +508,8 @@
clocks = <&clks IMX5_CLK_UART1_IPG_GATE>,
<&clks IMX5_CLK_UART1_PER_GATE>;
clock-names = "ipg", "per";
+ dmas = <&sdma 18 4 0>, <&sdma 19 4 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -508,6 +520,8 @@
clocks = <&clks IMX5_CLK_UART2_IPG_GATE>,
<&clks IMX5_CLK_UART2_PER_GATE>;
clock-names = "ipg", "per";
+ dmas = <&sdma 12 4 0>, <&sdma 13 4 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -591,6 +605,8 @@
clocks = <&clks IMX5_CLK_UART4_IPG_GATE>,
<&clks IMX5_CLK_UART4_PER_GATE>;
clock-names = "ipg", "per";
+ dmas = <&sdma 2 4 0>, <&sdma 3 4 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
};
@@ -621,6 +637,8 @@
clocks = <&clks IMX5_CLK_UART5_IPG_GATE>,
<&clks IMX5_CLK_UART5_PER_GATE>;
clock-names = "ipg", "per";
+ dmas = <&sdma 16 4 0>, <&sdma 17 4 0>;
+ dma-names = "rx", "tx";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6dl-gw553x.dts b/arch/arm/boot/dts/imx6dl-gw553x.dts
new file mode 100644
index 000000000000..59b8afc36e66
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-gw553x.dts
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Gateworks Corporation
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw553x.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 DualLite/Solo GW553X";
+ compatible = "gw,imx6dl-gw553x", "gw,ventana", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 2becd7cd6544..75d73437adf7 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -376,18 +376,18 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x0a0b1 /* AR8035 CLK_25M --> ENET_REF_CLK (V22) */
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0 /* AR8035 pin strapping: IO voltage: pull up */
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x130b0 /* AR8035 pin strapping: PHYADDR#0: pull down */
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x130b0 /* AR8035 pin strapping: PHYADDR#1: pull down */
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0 /* AR8035 pin strapping: MODE#1: pull up */
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0 /* AR8035 pin strapping: MODE#3: pull up */
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030 /* AR8035 pin strapping: IO voltage: pull up */
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x13030 /* AR8035 pin strapping: PHYADDR#0: pull down */
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x13030 /* AR8035 pin strapping: PHYADDR#1: pull down */
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030 /* AR8035 pin strapping: MODE#1: pull up */
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030 /* AR8035 pin strapping: MODE#3: pull up */
MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x130b0 /* AR8035 pin strapping: MODE#0: pull down */
MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8 /* GPIO16 -> AR8035 25MHz */
MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x130b0 /* RGMII_nRST */
diff --git a/arch/arm/boot/dts/imx6dl-ts4900.dts b/arch/arm/boot/dts/imx6dl-ts4900.dts
new file mode 100644
index 000000000000..85eddeb30e21
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-ts4900.dts
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 Technologic Systems
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-ts4900.dtsi"
+
+/ {
+ model = "Technologic Systems i.MX6 Solo/DualLite TS-4900 (Default Device Tree)";
+ compatible = "technologic,imx6dl-ts4900", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index 9a4c22c2dade..1ade1951e620 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -111,6 +111,59 @@
};
};
+&gpio1 {
+ gpio-ranges = <&iomuxc 0 131 2>, <&iomuxc 2 137 8>, <&iomuxc 10 189 2>,
+ <&iomuxc 12 194 1>, <&iomuxc 13 193 1>, <&iomuxc 14 192 1>,
+ <&iomuxc 15 191 1>, <&iomuxc 16 185 2>, <&iomuxc 18 184 1>,
+ <&iomuxc 19 187 1>, <&iomuxc 20 183 1>, <&iomuxc 21 188 1>,
+ <&iomuxc 22 123 3>, <&iomuxc 25 121 1>, <&iomuxc 26 127 1>,
+ <&iomuxc 27 126 1>, <&iomuxc 28 128 1>, <&iomuxc 29 130 1>,
+ <&iomuxc 30 129 1>, <&iomuxc 31 122 1>;
+};
+
+&gpio2 {
+ gpio-ranges = <&iomuxc 0 161 8>, <&iomuxc 8 208 8>, <&iomuxc 16 74 1>,
+ <&iomuxc 17 73 1>, <&iomuxc 18 72 1>, <&iomuxc 19 71 1>,
+ <&iomuxc 20 70 1>, <&iomuxc 21 69 1>, <&iomuxc 22 68 1>,
+ <&iomuxc 23 79 2>, <&iomuxc 25 118 2>, <&iomuxc 27 117 1>,
+ <&iomuxc 28 113 4>;
+};
+
+&gpio3 {
+ gpio-ranges = <&iomuxc 0 97 2>, <&iomuxc 2 105 8>, <&iomuxc 10 99 6>,
+ <&iomuxc 16 81 16>;
+};
+
+&gpio4 {
+ gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>,
+ <&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>,
+ <&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
+ <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>,
+ <&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>;
+};
+
+&gpio5 {
+ gpio-ranges = <&iomuxc 0 120 1>, <&iomuxc 2 77 1>, <&iomuxc 4 76 1>,
+ <&iomuxc 5 47 9>, <&iomuxc 14 57 4>, <&iomuxc 18 37 1>,
+ <&iomuxc 19 36 1>, <&iomuxc 20 35 1>, <&iomuxc 21 38 1>,
+ <&iomuxc 22 29 6>, <&iomuxc 28 19 4>;
+};
+
+&gpio6 {
+ gpio-ranges = <&iomuxc 0 23 6>, <&iomuxc 6 75 1>, <&iomuxc 7 156 1>,
+ <&iomuxc 8 155 1>, <&iomuxc 9 170 1>, <&iomuxc 10 169 1>,
+ <&iomuxc 11 157 1>, <&iomuxc 14 158 3>, <&iomuxc 17 204 1>,
+ <&iomuxc 18 203 1>, <&iomuxc 19 182 1>, <&iomuxc 20 177 4>,
+ <&iomuxc 24 175 1>, <&iomuxc 25 171 1>, <&iomuxc 26 181 1>,
+ <&iomuxc 27 172 3>, <&iomuxc 30 176 1>, <&iomuxc 31 78 1>;
+};
+
+&gpio7 {
+ gpio-ranges = <&iomuxc 0 202 1>, <&iomuxc 1 201 1>, <&iomuxc 2 196 1>,
+ <&iomuxc 3 195 1>, <&iomuxc 4 197 4>, <&iomuxc 8 205 1>,
+ <&iomuxc 9 207 1>, <&iomuxc 10 206 1>, <&iomuxc 11 133 3>;
+};
+
&gpt {
compatible = "fsl,imx6dl-gpt";
};
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index d8acf15611e4..4989d0bff10f 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -79,19 +79,19 @@
fsl,pins = <
MX6QDL_PAD_KEY_COL1__ENET_MDIO 0x1b0b0
MX6QDL_PAD_KEY_COL2__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
>;
};
diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts
index f0a2be5268e3..78bfc1a307d6 100644
--- a/arch/arm/boot/dts/imx6q-b450v3.dts
+++ b/arch/arm/boot/dts/imx6q-b450v3.dts
@@ -89,3 +89,19 @@
};
};
};
+
+&pca9539 {
+ P04 {
+ gpio-hog;
+ gpios = <4 0>;
+ output-low;
+ line-name = "PCA9539-P04";
+ };
+
+ P05 {
+ gpio-hog;
+ gpios = <5 0>;
+ output-low;
+ line-name = "PCA9539-P05";
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts
index 33cb71acadcc..d85388725426 100644
--- a/arch/arm/boot/dts/imx6q-b650v3.dts
+++ b/arch/arm/boot/dts/imx6q-b650v3.dts
@@ -89,3 +89,12 @@
};
};
};
+
+&pca9539 {
+ P05 {
+ gpio-hog;
+ gpios = <5 0>;
+ output-low;
+ line-name = "PCA9539-P05";
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-ba16.dtsi b/arch/arm/boot/dts/imx6q-ba16.dtsi
index f2adc60723da..308e11cea1db 100644
--- a/arch/arm/boot/dts/imx6q-ba16.dtsi
+++ b/arch/arm/boot/dts/imx6q-ba16.dtsi
@@ -448,19 +448,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x100b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x100b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x100b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x100b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x100b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x100b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x100b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x100b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x10030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x10030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x10030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x10030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x10030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x10030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
/* FEC Reset */
MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x1b0b0
/* AR8033 Interrupt */
diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
index cf3fd31e3406..e4a415fd899b 100644
--- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi
+++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
@@ -183,6 +183,76 @@
interrupt-controller;
interrupt-parent = <&gpio2>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ P06 {
+ gpio-hog;
+ gpios = <6 0>;
+ output-low;
+ line-name = "PCA9539-P06";
+ };
+
+ P07 {
+ gpio-hog;
+ gpios = <7 0>;
+ output-low;
+ line-name = "PCA9539-P07";
+ };
+
+ P10 {
+ gpio-hog;
+ gpios = <8 0>;
+ output-low;
+ line-name = "PCA9539-P10";
+ };
+
+ P11 {
+ gpio-hog;
+ gpios = <9 0>;
+ output-low;
+ line-name = "PCA9539-P11";
+ };
+
+ P12 {
+ gpio-hog;
+ gpios = <10 0>;
+ output-low;
+ line-name = "PCA9539-P12";
+ };
+
+ P13 {
+ gpio-hog;
+ gpios = <11 0>;
+ output-low;
+ line-name = "PCA9539-P13";
+ };
+
+ P14 {
+ gpio-hog;
+ gpios = <12 0>;
+ output-low;
+ line-name = "PCA9539-P14";
+ };
+
+ P15 {
+ gpio-hog;
+ gpios = <13 0>;
+ output-low;
+ line-name = "PCA9539-P15";
+ };
+
+ P16 {
+ gpio-hog;
+ gpios = <14 0>;
+ output-low;
+ line-name = "PCA9539-P16";
+ };
+
+ P17 {
+ gpio-hog;
+ gpios = <15 0>;
+ output-low;
+ line-name = "PCA9539-P17";
+ };
};
};
diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts
index b5de7e620905..59bc5a4dce17 100644
--- a/arch/arm/boot/dts/imx6q-cm-fx6.dts
+++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts
@@ -168,18 +168,18 @@
pinctrl_enet: enetgrp {
fsl,pins = <
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
diff --git a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
index 905907325f3b..908dab68bdca 100644
--- a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
+++ b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
@@ -324,18 +324,18 @@
pinctrl_enet: enetgrp {
fsl,pins = <
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
diff --git a/arch/arm/boot/dts/imx6q-evi.dts b/arch/arm/boot/dts/imx6q-evi.dts
index 4fa56019225e..6de21ff47c3a 100644
--- a/arch/arm/boot/dts/imx6q-evi.dts
+++ b/arch/arm/boot/dts/imx6q-evi.dts
@@ -139,6 +139,9 @@
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii";
phy-reset-gpios = <&gpio1 25 0>;
+ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,err006687-workaround-present;
status = "okay";
};
@@ -303,21 +306,22 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x4001b0a8
MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b0b0
+ MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
>;
};
diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts
index 0511137d1e23..747bc104ad00 100644
--- a/arch/arm/boot/dts/imx6q-gw5400-a.dts
+++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts
@@ -421,18 +421,18 @@
pinctrl_enet: enetgrp {
fsl,pins = <
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
diff --git a/arch/arm/boot/dts/imx6q-gw553x.dts b/arch/arm/boot/dts/imx6q-gw553x.dts
new file mode 100644
index 000000000000..e9c224cea752
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw553x.dts
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Gateworks Corporation
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-gw553x.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 Dual/Quad GW553X";
+ compatible = "gw,imx6q-gw553x", "gw,ventana", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6q-marsboard.dts b/arch/arm/boot/dts/imx6q-marsboard.dts
index 3f8013c85fb9..f7995c513b67 100644
--- a/arch/arm/boot/dts/imx6q-marsboard.dts
+++ b/arch/arm/boot/dts/imx6q-marsboard.dts
@@ -252,26 +252,26 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
/* AR8035 CLK_25M --> ENET_REF_CLK (V22) */
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x0a0b1
/* AR8035 pin strapping: IO voltage: pull up */
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
/* AR8035 pin strapping: PHYADDR#0: pull down */
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x130b0
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x13030
/* AR8035 pin strapping: PHYADDR#1: pull down */
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x130b0
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x13030
/* AR8035 pin strapping: MODE#1: pull up */
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
/* AR8035 pin strapping: MODE#3: pull up */
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
/* AR8035 pin strapping: MODE#0: pull down */
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x130b0
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x13030
/* GPIO16 -> AR8035 25MHz */
MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
/* RGMII_nRST */
diff --git a/arch/arm/boot/dts/imx6q-novena.dts b/arch/arm/boot/dts/imx6q-novena.dts
index 5acd0c63b33b..1723e89e3acc 100644
--- a/arch/arm/boot/dts/imx6q-novena.dts
+++ b/arch/arm/boot/dts/imx6q-novena.dts
@@ -549,12 +549,12 @@
MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b028
MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b028
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
/* Ethernet reset */
MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x1b0b1
diff --git a/arch/arm/boot/dts/imx6q-sbc6x.dts b/arch/arm/boot/dts/imx6q-sbc6x.dts
index 86cf09364664..255733063ea4 100644
--- a/arch/arm/boot/dts/imx6q-sbc6x.dts
+++ b/arch/arm/boot/dts/imx6q-sbc6x.dts
@@ -31,19 +31,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
>;
};
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
index d7c8ccb2da95..06f492e17ca7 100644
--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
@@ -284,19 +284,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b059
>;
diff --git a/arch/arm/boot/dts/imx6q-ts4900.dts b/arch/arm/boot/dts/imx6q-ts4900.dts
new file mode 100644
index 000000000000..9b81ebc8b0d4
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-ts4900.dts
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2015 Technologic Systems
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-ts4900.dtsi"
+
+/ {
+ model = "Technologic Systems i.MX6 Quad TS-4900 (Default Device Tree)";
+ compatible = "technologic,imx6q-ts4900", "fsl,imx6q";
+};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index c30c8368cae0..e9a5d0b8c7b0 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -209,6 +209,43 @@
};
};
+&gpio1 {
+ gpio-ranges = <&iomuxc 0 136 2>, <&iomuxc 2 141 1>, <&iomuxc 3 139 1>,
+ <&iomuxc 4 142 2>, <&iomuxc 6 140 1>, <&iomuxc 7 144 2>,
+ <&iomuxc 9 138 1>, <&iomuxc 10 213 3>, <&iomuxc 13 20 1>,
+ <&iomuxc 14 19 1>, <&iomuxc 15 21 1>, <&iomuxc 16 208 1>,
+ <&iomuxc 17 207 1>, <&iomuxc 18 210 3>, <&iomuxc 21 209 1>,
+ <&iomuxc 22 116 10>;
+};
+
+&gpio2 {
+ gpio-ranges = <&iomuxc 0 191 16>, <&iomuxc 16 55 14>, <&iomuxc 30 35 1>,
+ <&iomuxc 31 44 1>;
+};
+
+&gpio3 {
+ gpio-ranges = <&iomuxc 0 69 16>, <&iomuxc 16 36 8>, <&iomuxc 24 45 8>;
+};
+
+&gpio4 {
+ gpio-ranges = <&iomuxc 5 149 1>, <&iomuxc 6 126 10>, <&iomuxc 16 87 16>;
+};
+
+&gpio5 {
+ gpio-ranges = <&iomuxc 0 85 1>, <&iomuxc 2 34 1>, <&iomuxc 4 53 1>,
+ <&iomuxc 5 103 13>, <&iomuxc 18 150 14>;
+};
+
+&gpio6 {
+ gpio-ranges = <&iomuxc 0 164 6>, <&iomuxc 6 54 1>, <&iomuxc 7 181 5>,
+ <&iomuxc 14 186 3>, <&iomuxc 17 170 2>, <&iomuxc 19 22 12>,
+ <&iomuxc 31 86 1>;
+};
+
+&gpio7 {
+ gpio-ranges = <&iomuxc 0 172 9>, <&iomuxc 9 189 2>, <&iomuxc 11 146 3>;
+};
+
&hdmi {
compatible = "fsl,imx6q-hdmi";
diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
index 315e033ff1d8..99e323b57261 100644
--- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
@@ -586,19 +586,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x100b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x100b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x100b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x100b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x100b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x100b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x100b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x100b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x10030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x10030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x10030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x10030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x10030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x10030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
/* Ethernet PHY reset */
MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x000b0
/* Ethernet PHY interrupt */
diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
index da1341d47b14..b2c083d57598 100644
--- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
@@ -67,18 +67,18 @@
pinctrl_enet: enetgrp {
fsl,pins = <
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
index 9d7ab6cdc9a6..afec2c7628ef 100644
--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
@@ -228,22 +228,28 @@
status = "okay";
};
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
&iomuxc {
imx6qdl-gw51xx {
pinctrl_enet: enetgrp {
fsl,pins = <
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
@@ -364,5 +370,11 @@
MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0 /* OTG_PWR_EN */
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__WDOG1_B 0x1b0b0
+ >;
+ };
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index 7191b84770b9..a7100f99123e 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -315,6 +315,8 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
+ uart-has-rtscts;
+ rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
status = "okay";
};
@@ -353,6 +355,12 @@
status = "okay";
};
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
&iomuxc {
imx6qdl-gw52xx {
pinctrl_audmux: audmuxgrp {
@@ -376,18 +384,18 @@
pinctrl_enet: enetgrp {
fsl,pins = <
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
@@ -487,6 +495,7 @@
fsl,pins = <
MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
+ MX6QDL_PAD_SD3_DAT4__GPIO7_IO01 0x4001b0b1 /* TEN */
>;
};
@@ -549,5 +558,11 @@
MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170f9
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__WDOG1_B 0x1b0b0
+ >;
+ };
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index 40d06b09deba..8953eba0573d 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -312,6 +312,8 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
+ uart-has-rtscts;
+ rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
status = "okay";
};
@@ -351,6 +353,12 @@
status = "okay";
};
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
&iomuxc {
imx6qdl-gw53xx {
pinctrl_audmux: audmuxgrp {
@@ -365,18 +373,18 @@
pinctrl_enet: enetgrp {
fsl,pins = <
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
@@ -476,6 +484,7 @@
fsl,pins = <
MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
+ MX6QDL_PAD_SD3_DAT4__GPIO7_IO01 0x4001b0b1 /* TEN */
>;
};
@@ -539,5 +548,11 @@
MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170f9
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__WDOG1_B 0x1b0b0
+ >;
+ };
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index d6dbe2a88ee6..6ac41c7ed32e 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -414,6 +414,8 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
+ uart-has-rtscts;
+ rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
status = "okay";
};
@@ -453,6 +455,17 @@
status = "okay";
};
+&wdog1 {
+ status = "disabled";
+};
+
+&wdog2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
&iomuxc {
imx6qdl-gw54xx {
pinctrl_audmux: audmuxgrp {
@@ -467,18 +480,18 @@
pinctrl_enet: enetgrp {
fsl,pins = <
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
@@ -592,6 +605,7 @@
fsl,pins = <
MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
+ MX6QDL_PAD_SD3_DAT4__GPIO7_IO01 0x4001b0b1 /* TEN */
>;
};
@@ -654,5 +668,11 @@
MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170f9
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_DAT3__WDOG2_B 0x1b0b0
+ >;
+ };
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
index 118bea524dab..4b9fef834822 100644
--- a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
@@ -239,6 +239,12 @@
status = "okay";
};
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
&iomuxc {
imx6qdl-gw51xx {
pinctrl_flexcan1: flexcan1grp {
@@ -333,5 +339,11 @@
MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__WDOG1_B 0x1b0b0
+ >;
+ };
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
index f27f184558fb..805e23674a94 100644
--- a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
@@ -197,6 +197,12 @@
status = "okay";
};
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
&iomuxc {
imx6qdl-gw552x {
pinctrl_gpio_leds: gpioledsgrp {
@@ -286,5 +292,11 @@
MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__WDOG1_B 0x1b0b0
+ >;
+ };
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw553x.dtsi b/arch/arm/boot/dts/imx6qdl-gw553x.dtsi
new file mode 100644
index 000000000000..86cec0527f73
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw553x.dtsi
@@ -0,0 +1,433 @@
+/*
+ * Copyright 2016 Gateworks Corporation
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ /* these are used by bootloader for disabling nodes */
+ aliases {
+ led0 = &led0;
+ led1 = &led1;
+ nand = &gpmi;
+ usb0 = &usbh1;
+ usb1 = &usbotg;
+ };
+
+ chosen {
+ stdout-path = &uart2;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led0: user1 {
+ label = "user1";
+ gpios = <&gpio4 10 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led1: user2 {
+ label = "user2";
+ gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+ };
+ };
+
+ memory {
+ reg = <0x10000000 0x20000000>;
+ };
+
+ pps {
+ compatible = "pps-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pps>;
+ gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P0V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_5p0v: regulator-5p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "5P0V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_usb_otg_vbus: regulator-usb-otg-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&gpmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand>;
+ status = "okay";
+};
+
+&hdmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ ddc-i2c-bus = <&i2c3>;
+ status = "okay";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ gpio: pca9555@23 {
+ compatible = "nxp,pca9555";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom1: eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ eeprom2: eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+
+ eeprom3: eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <16>;
+ };
+
+ eeprom4: eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ };
+
+ rtc: ds1672@68 {
+ compatible = "dallas,ds1672";
+ reg = <0x68>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ reset-gpio = <&gpio1 0 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&pwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm2>; /* MX6_DIO1 */
+ status = "disabled";
+};
+
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm3>; /* MX6_DIO2 */
+ status = "disabled";
+};
+
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm4>; /* MX6_DIO3 */
+ status = "disabled";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5>;
+ status = "okay";
+};
+
+&usbh1 {
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
+&iomuxc {
+ pinctrl_gpmi_nand: gpminandgrp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
+ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
+ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
+ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
+ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
+ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
+ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
+ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
+ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
+ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
+ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
+ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
+ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
+ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
+ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
+ >;
+ };
+
+ pinctrl_hdmi: hdmigrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_A25__HDMI_TX_CEC_LINE 0x1f8b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_gpio_leds: gpioledsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL2__GPIO4_IO10 0x1b0b0
+ MX6QDL_PAD_KEY_ROW2__GPIO4_IO11 0x1b0b0
+ >;
+ };
+
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x4001b0b0 /* PCIESKT_WDIS# */
+ >;
+ };
+
+ pinctrl_pps: ppsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x1b0b1
+ >;
+ };
+
+ pinctrl_pwm2: pwm2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_DAT2__PWM2_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_pwm3: pwm3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_pwm4: pwm4grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
+ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0 /* OTG_PWR_EN */
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x17059 /* CD */
+ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x17059
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
+ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x170b9 /* CD */
+ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
+ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x170f9 /* CD */
+ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170f9
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__WDOG1_B 0x1b0b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
index f8d945a56525..d5c3aa88adbe 100644
--- a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
@@ -254,19 +254,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
>;
};
diff --git a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
index cfd50ea1ed48..880bd782a5b7 100644
--- a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
@@ -361,12 +361,12 @@
MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x100b0
MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x100b0
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
/* Phy reset */
MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x0f0b0
MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x1b0b0
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
index 9677bf323823..b0b3220a1fd9 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
@@ -484,19 +484,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x100b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x100b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x100b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x100b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x100b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x100b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x100b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x100b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x10030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x10030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x10030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x10030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x10030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x10030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
/* Phy reset */
MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x0f0b0
MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x1b0b0
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index 97d9c333902b..db868bc42c0f 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -394,19 +394,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x100b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x100b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x100b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x100b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x100b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x100b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x100b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x100b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x10030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x10030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x10030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x10030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x10030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x10030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
/* Phy reset */
MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x000b0
MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index d6d98d426384..e0280cac2484 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -231,19 +231,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
>;
};
@@ -379,6 +379,18 @@
status = "disabled";
};
+&reg_arm {
+ vin-supply = <&vddcore_reg>;
+};
+
+&reg_pu {
+ vin-supply = <&vddsoc_reg>;
+};
+
+&reg_soc {
+ vin-supply = <&vddsoc_reg>;
+};
+
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index cacf5933707d..17704a5c1bcb 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -196,19 +196,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
/* Phy reset */
MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x000b0
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 6aa193fb283f..e000e6f12bf5 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -345,19 +345,19 @@
fsl,pins = <
MX6QDL_PAD_KEY_COL1__ENET_MDIO 0x1b0b0
MX6QDL_PAD_KEY_COL2__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
>;
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
index f65fdfc2536d..81dd6cd1937d 100644
--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
@@ -359,19 +359,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x100b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x100b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x100b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x100b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x100b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x100b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x100b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x100b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x10030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x10030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x10030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x10030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x10030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x10030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
/* Phy reset */
MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x000b0
MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index d77ea9423bbc..8e9e0d98db2f 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -380,19 +380,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
>;
};
diff --git a/arch/arm/boot/dts/imx6qdl-ts4900.dtsi b/arch/arm/boot/dts/imx6qdl-ts4900.dtsi
new file mode 100644
index 000000000000..5c26b26e851a
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-ts4900.dtsi
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2015 Technologic Systems
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ aliases {
+ ethernet0 = &fec;
+ };
+
+ leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_leds1>;
+ compatible = "gpio-leds";
+
+ green-led {
+ label = "green-led";
+ gpios = <&gpio2 24 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+ };
+
+ red-led {
+ label = "red-led";
+ gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3p3v";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_usb_otg_vbus: regulator-usb-otg-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ status = "okay";
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2>;
+ status = "okay";
+};
+
+&ecspi1 {
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ n25q064: flash@0 {
+ compatible = "micron,n25q064", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ };
+};
+
+&ecspi2 {
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio6 2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi2>;
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
+ status = "okay";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio3 28 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ isl12022: rtc@6f {
+ compatible = "isil,isl12022";
+ reg = <0x6f>;
+ };
+
+ gpio8: gpio@28 {
+ compatible = "technologic,ts4900-gpio";
+ reg = <0x28>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ ngpio = <32>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ scl-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
+ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
+ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x100b1 /* Onboard flash CS1# */
+ >;
+ };
+
+ pinctrl_ecspi2: ecspi2grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT8__ECSPI2_SCLK 0x100b1
+ MX6QDL_PAD_CSI0_DAT9__ECSPI2_MOSI 0x100b1
+ MX6QDL_PAD_CSI0_DAT10__ECSPI2_MISO 0x100b1
+ MX6QDL_PAD_CSI0_DAT11__GPIO5_IO29 0x100b1 /* Offboard CS0# */
+ MX6QDL_PAD_CSI0_DAT16__GPIO6_IO02 0x100b1 /* FPGA CS1# */
+ MX6QDL_PAD_CSI0_VSYNC__GPIO5_IO21 0x1b0b1 /* FPGA_RESET# */
+ MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x1b0b1 /* FPGA_DONE */
+ MX6QDL_PAD_GPIO_3__XTALOSC_REF_CLK_24M 0x10 /* FPGA 24MHZ */
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b1 /* FPGA_IRQ */
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x4001b0a8
+ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x1b0b1
+ MX6QDL_PAD_DI0_PIN4__GPIO4_IO20 0x1b0b1 /* ETH_PHY_RESET */
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x1b0b1
+ MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x1b0b1
+ >;
+ };
+
+ pinctrl_flexcan2: flexcan2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX 0x1b0b1
+ MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX 0x1b0b1
+ >;
+ };
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_A17__GPIO2_IO21 0x1b0b1 /* OFF_BD_RESET# */
+ MX6QDL_PAD_EIM_A16__GPIO2_IO22 0x1b0b1 /* EN_USB_5V# */
+ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x1b0b1 /* EN_LCD_3.3V */
+ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x130b0 /* Audio CLK */
+ MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x1b0b1 /* DIO_1 */
+ MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x1b0b1 /* DIO_2 */
+ MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x1b0b1 /* DIO_3 */
+ MX6QDL_PAD_GPIO_16__GPIO7_IO11 0x1b0b1 /* DIO_4 */
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x1b0b1 /* DIO_5 */
+ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x1b0b1 /* DIO_7 */
+ MX6QDL_PAD_CSI0_MCLK__GPIO5_IO19 0x1b0b1 /* DIO_8 */
+ MX6QDL_PAD_CSI0_PIXCLK__GPIO5_IO18 0x1b0b1 /* DIO_9 */
+ MX6QDL_PAD_CSI0_DAT12__GPIO5_IO30 0x1b0b1 /* DIO_0 */
+ MX6QDL_PAD_CSI0_DAT13__GPIO5_IO31 0x1b0b1 /* DIO_6 */
+ MX6QDL_PAD_CSI0_DAT17__GPIO6_IO03 0x1b0b1 /* CPU_DIO_A */
+ MX6QDL_PAD_SD4_DAT7__GPIO2_IO15 0x1b0b1 /* DIO_2 */
+ MX6QDL_PAD_SD3_RST__GPIO7_IO08 0x1b0b1 /* CPU_DIO_B */
+ MX6QDL_PAD_EIM_LBA__GPIO2_IO27 0x1b0b1 /* BUS_ALE# */
+ MX6QDL_PAD_EIM_OE__GPIO2_IO25 0x1b0b1 /* DIO_15 */
+ MX6QDL_PAD_EIM_RW__GPIO2_IO26 0x1b0b1 /* BUS_DIR */
+ MX6QDL_PAD_EIM_CS0__GPIO2_IO23 0x1b0b1 /* BUS_CS# */
+ MX6QDL_PAD_EIM_A18__GPIO2_IO20 0x1b0b1 /* DIO_14 */
+ MX6QDL_PAD_EIM_A20__GPIO2_IO18 0x1b0b1 /* DIO_16 */
+ MX6QDL_PAD_EIM_A21__GPIO2_IO17 0x1b0b1 /* DIO_12 */
+ MX6QDL_PAD_EIM_A22__GPIO2_IO16 0x1b0b1 /* DIO_18 */
+ MX6QDL_PAD_EIM_A23__GPIO6_IO06 0x1b0b1 /* DIO_19 */
+ MX6QDL_PAD_EIM_A24__GPIO5_IO04 0x1b0b1 /* DIO_20 */
+ MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x1b0b1 /* BUS_BHE# */
+ MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x1b0b1 /* DIO_13 */
+ MX6QDL_PAD_EIM_WAIT__GPIO5_IO00 0x1b0b1 /* EIM_WAIT# */
+ MX6QDL_PAD_EIM_EB1__GPIO2_IO29 0x1b0b1 /* DIO_10 */
+ MX6QDL_PAD_EIM_DA0__GPIO3_IO00 0x1b0b1 /* MUX_AD_00 */
+ MX6QDL_PAD_EIM_DA1__GPIO3_IO01 0x1b0b1 /* MUX_AD_01 */
+ MX6QDL_PAD_EIM_DA2__GPIO3_IO02 0x1b0b1 /* MUX_AD_02 */
+ MX6QDL_PAD_EIM_DA3__GPIO3_IO03 0x1b0b1 /* MUX_AD_03 */
+ MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x1b0b1 /* MUX_AD_04 */
+ MX6QDL_PAD_EIM_DA5__GPIO3_IO05 0x1b0b1 /* MUX_AD_05 */
+ MX6QDL_PAD_EIM_DA6__GPIO3_IO06 0x1b0b1 /* MUX_AD_06 */
+ MX6QDL_PAD_EIM_DA7__GPIO3_IO07 0x1b0b1 /* MUX_AD_07 */
+ MX6QDL_PAD_EIM_DA8__GPIO3_IO08 0x1b0b1 /* MUX_AD_08 */
+ MX6QDL_PAD_EIM_DA9__GPIO3_IO09 0x1b0b1 /* MUX_AD_09 */
+ MX6QDL_PAD_EIM_DA10__GPIO3_IO10 0x1b0b1 /* MUX_AD_10 */
+ MX6QDL_PAD_EIM_DA11__GPIO3_IO11 0x1b0b1 /* MUX_AD_11 */
+ MX6QDL_PAD_EIM_DA12__GPIO3_IO12 0x1b0b1 /* MUX_AD_12 */
+ MX6QDL_PAD_EIM_DA13__GPIO3_IO13 0x1b0b1 /* MUX_AD_13 */
+ MX6QDL_PAD_EIM_DA14__GPIO3_IO14 0x1b0b1 /* MUX_AD_14 */
+ MX6QDL_PAD_EIM_DA15__GPIO3_IO15 0x1b0b1 /* MUX_AD_15 */
+ MX6QDL_PAD_DI0_DISP_CLK__GPIO4_IO16 0x1b0b1 /* LCD_CLK */
+ MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x1b0b1 /* DE */
+ MX6QDL_PAD_DI0_PIN2__GPIO4_IO18 0x1b0b1 /* Hsync */
+ MX6QDL_PAD_DI0_PIN3__GPIO4_IO19 0x1b0b1 /* Vsync */
+ MX6QDL_PAD_DISP0_DAT0__GPIO4_IO21 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT1__GPIO4_IO22 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT2__GPIO4_IO23 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT4__GPIO4_IO25 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT5__GPIO4_IO26 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT6__GPIO4_IO27 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT7__GPIO4_IO28 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT8__GPIO4_IO29 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT9__GPIO4_IO30 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT10__GPIO4_IO31 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT11__GPIO5_IO05 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT12__GPIO5_IO06 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT13__GPIO5_IO07 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT14__GPIO5_IO08 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT15__GPIO5_IO09 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT16__GPIO5_IO10 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT17__GPIO5_IO11 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT18__GPIO5_IO12 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT19__GPIO5_IO13 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT20__GPIO5_IO14 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT21__GPIO5_IO15 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT22__GPIO5_IO16 0x1b0b1
+ MX6QDL_PAD_DISP0_DAT23__GPIO5_IO17 0x1b0b1
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c1_gpio: i2c1gpiogrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__GPIO3_IO21 0x4001b8b1
+ MX6QDL_PAD_EIM_D28__GPIO3_IO28 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c2_gpio: i2c2gpiogrp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x4001b8b1
+ MX6QDL_PAD_KEY_ROW3__GPIO4_IO13 0x4001b8b1
+ >;
+ };
+
+ pinctrl_leds1: leds1grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b1 /* RED_LED# */
+ MX6QDL_PAD_EIM_CS1__GPIO2_IO24 0x1b0b1 /* GREEN_LED# */
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
+ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_7__UART2_TX_DATA 0x1b0b1
+ MX6QDL_PAD_GPIO_8__UART2_RX_DATA 0x1b0b1
+ MX6QDL_PAD_SD4_DAT6__UART2_CTS_B 0x1b0b1
+ MX6QDL_PAD_SD4_DAT5__UART2_RTS_B 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
+ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x17059 /* WIFI IRQ */
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+ MX6QDL_PAD_EIM_EB0__GPIO2_IO28 0x1b0b1 /* EN_SD_POWER# */
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ >;
+ };
+};
+
+&pcie {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5>;
+ status = "okay";
+};
+
+&usbh1 {
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ status = "okay";
+};
+
+/* SD */
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ vmmc-supply = <&reg_3p3v>;
+ bus-width = <4>;
+ fsl,wp-controller;
+ status = "okay";
+};
+
+/* eMMC */
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ vmmc-supply = <&reg_3p3v>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
index 3bee2f910067..c96c91d83678 100644
--- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
@@ -132,18 +132,18 @@
imx6q-udoo {
pinctrl_enet: enetgrp {
fsl,pins = <
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 3ffe00c557f1..2b9c2be436f9 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -109,19 +109,19 @@
fsl,pins = <
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
>;
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 542515089b1e..02378db3f5fc 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -375,6 +375,12 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 22 1>, <&iomuxc 1 20 2>,
+ <&iomuxc 3 23 1>, <&iomuxc 4 25 1>,
+ <&iomuxc 5 24 1>, <&iomuxc 6 19 1>,
+ <&iomuxc 7 36 2>, <&iomuxc 9 44 8>,
+ <&iomuxc 17 38 6>, <&iomuxc 23 68 4>,
+ <&iomuxc 27 64 4>, <&iomuxc 31 52 1>;
};
gpio2: gpio@020a0000 {
@@ -386,6 +392,13 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 53 3>, <&iomuxc 3 72 2>,
+ <&iomuxc 5 34 2>, <&iomuxc 7 57 4>,
+ <&iomuxc 11 56 1>, <&iomuxc 12 61 3>,
+ <&iomuxc 15 107 1>, <&iomuxc 16 132 2>,
+ <&iomuxc 18 135 1>, <&iomuxc 19 134 1>,
+ <&iomuxc 20 108 2>, <&iomuxc 22 120 1>,
+ <&iomuxc 23 125 7>, <&iomuxc 30 110 2>;
};
gpio3: gpio@020a4000 {
@@ -397,6 +410,14 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 112 8>, <&iomuxc 8 121 4>,
+ <&iomuxc 12 97 4>, <&iomuxc 16 166 3>,
+ <&iomuxc 19 85 2>, <&iomuxc 21 137 2>,
+ <&iomuxc 23 136 1>, <&iomuxc 24 91 1>,
+ <&iomuxc 25 99 1>, <&iomuxc 26 92 1>,
+ <&iomuxc 27 100 1>, <&iomuxc 28 93 1>,
+ <&iomuxc 29 101 1>, <&iomuxc 30 94 1>,
+ <&iomuxc 31 102 1>;
};
gpio4: gpio@020a8000 {
@@ -408,6 +429,21 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 95 1>, <&iomuxc 1 103 1>,
+ <&iomuxc 2 96 1>, <&iomuxc 3 104 1>,
+ <&iomuxc 4 97 1>, <&iomuxc 5 105 1>,
+ <&iomuxc 6 98 1>, <&iomuxc 7 106 1>,
+ <&iomuxc 8 28 1>, <&iomuxc 9 27 1>,
+ <&iomuxc 10 26 1>, <&iomuxc 11 29 1>,
+ <&iomuxc 12 32 1>, <&iomuxc 13 31 1>,
+ <&iomuxc 14 30 1>, <&iomuxc 15 33 1>,
+ <&iomuxc 16 84 1>, <&iomuxc 17 79 2>,
+ <&iomuxc 19 78 1>, <&iomuxc 20 76 1>,
+ <&iomuxc 21 81 2>, <&iomuxc 23 75 1>,
+ <&iomuxc 24 83 1>, <&iomuxc 25 74 1>,
+ <&iomuxc 26 77 1>, <&iomuxc 27 159 1>,
+ <&iomuxc 28 154 1>, <&iomuxc 29 157 1>,
+ <&iomuxc 30 152 1>, <&iomuxc 31 156 1>;
};
gpio5: gpio@020ac000 {
@@ -419,6 +455,17 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 158 1>, <&iomuxc 1 151 1>,
+ <&iomuxc 2 155 1>, <&iomuxc 3 153 1>,
+ <&iomuxc 4 150 1>, <&iomuxc 5 149 1>,
+ <&iomuxc 6 144 1>, <&iomuxc 7 147 1>,
+ <&iomuxc 8 142 1>, <&iomuxc 9 146 1>,
+ <&iomuxc 10 148 1>, <&iomuxc 11 141 1>,
+ <&iomuxc 12 145 1>, <&iomuxc 13 143 1>,
+ <&iomuxc 14 140 1>, <&iomuxc 15 139 1>,
+ <&iomuxc 16 164 2>, <&iomuxc 18 160 1>,
+ <&iomuxc 19 162 1>, <&iomuxc 20 163 1>,
+ <&iomuxc 21 161 1>;
};
kpp: kpp@020b8000 {
diff --git a/arch/arm/boot/dts/imx6sx-pinfunc.h b/arch/arm/boot/dts/imx6sx-pinfunc.h
index bb9c6b78cb97..42c4c800feea 100644
--- a/arch/arm/boot/dts/imx6sx-pinfunc.h
+++ b/arch/arm/boot/dts/imx6sx-pinfunc.h
@@ -308,6 +308,20 @@
#define MX6SX_PAD_ENET1_RX_CLK__VDEC_DEBUG_35 0x008C 0x03D4 0x0000 0x8 0x0
#define MX6SX_PAD_ENET1_RX_CLK__PCIE_CTRL_DEBUG_29 0x008C 0x03D4 0x0000 0x9 0x0
#define MX6SX_PAD_ENET1_TX_CLK__ENET1_TX_CLK 0x0090 0x03D8 0x0000 0x0 0x0
+/*
+ * SION bit is necessary for ENET1_REF_CLK1 (ENET2_REF_CLK2 untested) if it is
+ * used as clock output of IMX6SX_CLK_ENET_REF (ENET1_TX_CLK) to e.g. supply a
+ * PHY in RMII mode. This configuration is valid if:
+ * - bit 1 in field IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK is set
+ * - bit 1 in field IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK unset
+ * It seems to be a silicon bug that in this configuration ENET1_TX reference
+ * clock isn't provided automatically. According to i.MX6SX reference manual
+ * (IOMUXC_GPR_GPR1 field descriptions: ENET1_CLK_SEL, Rev. 0 from 2/2015) it
+ * should be the case.
+ * So this might have unwanted side effects for other hardware units that are
+ * also connected to that pin and using respective function as input (e.g.
+ * UART1's DTR handling on MX6SX_PAD_ENET1_TX_CLK__UART1_DTR_B).
+ */
#define MX6SX_PAD_ENET1_TX_CLK__ENET1_REF_CLK1 0x0090 0x03D8 0x0760 0x1 0x1
#define MX6SX_PAD_ENET1_TX_CLK__AUDMUX_AUD4_RXD 0x0090 0x03D8 0x0644 0x2 0x1
#define MX6SX_PAD_ENET1_TX_CLK__UART1_DTR_B 0x0090 0x03D8 0x0000 0x3 0x0
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 2863c52be6f5..1a473e83efbf 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -159,6 +159,16 @@
arm,data-latency = <4 2 3>;
};
+ gpu: gpu@01800000 {
+ compatible = "vivante,gc";
+ reg = <0x01800000 0x4000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SX_CLK_GPU>,
+ <&clks IMX6SX_CLK_GPU>,
+ <&clks IMX6SX_CLK_GPU>;
+ clock-names = "bus", "core", "shader";
+ };
+
dma_apbh: dma-apbh@01804000 {
compatible = "fsl,imx6sx-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x01804000 0x2000>;
@@ -438,6 +448,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 5 26>;
};
gpio2: gpio@020a0000 {
@@ -449,6 +460,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 31 20>;
};
gpio3: gpio@020a4000 {
@@ -460,6 +472,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 51 29>;
};
gpio4: gpio@020a8000 {
@@ -471,6 +484,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 80 32>;
};
gpio5: gpio@020ac000 {
@@ -482,6 +496,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 112 24>;
};
gpio6: gpio@020b0000 {
@@ -493,6 +508,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 136 12>, <&iomuxc 12 158 11>;
};
gpio7: gpio@020b4000 {
@@ -504,6 +520,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 148 10>, <&iomuxc 10 169 2>;
};
kpp: kpp@020b8000 {
@@ -1273,4 +1290,9 @@
status = "disabled";
};
};
+
+ gpu-subsystem {
+ compatible = "fsl,imx-gpu-subsystem";
+ cores = <&gpu>;
+ };
};
diff --git a/arch/arm/boot/dts/imx6ul-geam-kit.dts b/arch/arm/boot/dts/imx6ul-geam-kit.dts
new file mode 100644
index 000000000000..4c4af76143e3
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-geam-kit.dts
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2016 Amarula Solutions B.V.
+ * Copyright (C) 2016 Engicam S.r.l.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "imx6ul-geam.dtsi"
+
+/ {
+ model = "Engicam GEAM6UL";
+ compatible = "engicam,imx6ul-geam", "fsl,imx6ul";
+};
+
+&can1 {
+ status = "okay";
+};
+
+&can2 {
+ status = "okay";
+};
+
+&lcdif {
+ display = <&display0>;
+ status = "okay";
+
+ display0: display {
+ bits-per-pixel = <16>;
+ bus-width = <18>;
+ status = "okay";
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: timing0 {
+ clock-frequency = <28000000>;
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <30>;
+ hback-porch = <30>;
+ hsync-len = <64>;
+ vback-porch = <5>;
+ vfront-porch = <5>;
+ vsync-len = <20>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <0>;
+ };
+ };
+ };
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ status = "okay";
+};
+
+&tsc {
+ measure-delay-time = <0x1ffff>;
+ pre-charge-time = <0x1fff>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6ul-geam.dtsi b/arch/arm/boot/dts/imx6ul-geam.dtsi
new file mode 100644
index 000000000000..64eb9ed59b9c
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-geam.dtsi
@@ -0,0 +1,361 @@
+/*
+ * Copyright (C) 2016 Amarula Solutions B.V.
+ * Copyright (C) 2016 Engicam S.r.l.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "imx6ul.dtsi"
+
+/ {
+ memory {
+ reg = <0x80000000 0x08000000>;
+ };
+
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ xceiver-supply = <&reg_3p3v>;
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2>;
+ xceiver-supply = <&reg_3p3v>;
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet1>;
+ phy-mode = "rmii";
+ phy-handle = <&ethphy0>;
+ status = "okay";
+};
+
+&fec2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet2>;
+ phy-mode = "rmii";
+ phy-handle = <&ethphy1>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+
+ ethphy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+ };
+};
+
+&gpmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand>;
+ nand-on-flash-bbt;
+ status = "okay";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+};
+
+&i2c2 {
+ clock_frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+};
+
+&lcdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcdif_dat
+ &pinctrl_lcdif_ctrl>;
+ display = <&display0>;
+};
+
+&tsc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tsc>;
+ xnur-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ bus-width = <4>;
+ cd-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
+ no-1-8-v;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_enet1: enet1grp {
+ fsl,pins = <
+ MX6UL_PAD_ENET1_RX_EN__ENET1_RX_EN 0x1b0b0
+ MX6UL_PAD_ENET1_RX_DATA0__ENET1_RDATA00 0x1b0b0
+ MX6UL_PAD_ENET1_RX_DATA1__ENET1_RDATA01 0x1b0b0
+ MX6UL_PAD_ENET1_TX_EN__ENET1_TX_EN 0x1b0b0
+ MX6UL_PAD_ENET1_TX_DATA0__ENET1_TDATA00 0x1b0b0
+ MX6UL_PAD_ENET1_TX_DATA1__ENET1_TDATA01 0x1b0b0
+ MX6UL_PAD_ENET1_TX_CLK__ENET1_REF_CLK1 0x4001b031
+ >;
+ };
+
+ pinctrl_enet2: enet2grp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO07__ENET2_MDC 0x1b0b0
+ MX6UL_PAD_GPIO1_IO06__ENET2_MDIO 0x1b0b0
+ MX6UL_PAD_ENET2_RX_EN__ENET2_RX_EN 0x1b0b0
+ MX6UL_PAD_ENET2_RX_ER__GPIO2_IO15 0x1b0b0 /* ENET_nRST */
+ MX6UL_PAD_ENET2_RX_DATA0__ENET2_RDATA00 0x1b0b0
+ MX6UL_PAD_ENET2_RX_DATA1__ENET2_RDATA01 0x1b0b0
+ MX6UL_PAD_ENET2_TX_EN__ENET2_TX_EN 0x1b0b0
+ MX6UL_PAD_ENET2_TX_DATA0__ENET2_TDATA00 0x1b0b0
+ MX6UL_PAD_ENET2_TX_DATA1__ENET2_TDATA01 0x1b0b0
+ MX6UL_PAD_GPIO1_IO05__ENET2_REF_CLK2 0x4001b031
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <
+ MX6UL_PAD_UART3_RTS_B__FLEXCAN1_RX 0x1b020
+ MX6UL_PAD_UART3_CTS_B__FLEXCAN1_TX 0x1b020
+ >;
+ };
+
+ pinctrl_flexcan2: flexcan2grp {
+ fsl,pins = <
+ MX6UL_PAD_UART2_RTS_B__FLEXCAN2_RX 0x1b020
+ MX6UL_PAD_UART2_CTS_B__FLEXCAN2_TX 0x1b020
+ >;
+ };
+
+ pinctrl_gpmi_nand: gpmi-nand {
+ fsl,pins = <
+ MX6UL_PAD_NAND_CLE__RAWNAND_CLE 0xb0b1
+ MX6UL_PAD_NAND_ALE__RAWNAND_ALE 0xb0b1
+ MX6UL_PAD_NAND_WP_B__RAWNAND_WP_B 0xb0b1
+ MX6UL_PAD_NAND_READY_B__RAWNAND_READY_B 0xb000
+ MX6UL_PAD_NAND_CE0_B__RAWNAND_CE0_B 0xb0b1
+ MX6UL_PAD_NAND_RE_B__RAWNAND_RE_B 0xb0b1
+ MX6UL_PAD_NAND_WE_B__RAWNAND_WE_B 0xb0b1
+ MX6UL_PAD_NAND_DATA00__RAWNAND_DATA00 0xb0b1
+ MX6UL_PAD_NAND_DATA01__RAWNAND_DATA01 0xb0b1
+ MX6UL_PAD_NAND_DATA02__RAWNAND_DATA02 0xb0b1
+ MX6UL_PAD_NAND_DATA03__RAWNAND_DATA03 0xb0b1
+ MX6UL_PAD_NAND_DATA04__RAWNAND_DATA04 0xb0b1
+ MX6UL_PAD_NAND_DATA05__RAWNAND_DATA05 0xb0b1
+ MX6UL_PAD_NAND_DATA06__RAWNAND_DATA06 0xb0b1
+ MX6UL_PAD_NAND_DATA07__RAWNAND_DATA07 0xb0b1
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6UL_PAD_UART4_TX_DATA__I2C1_SCL 0x4001b8b0
+ MX6UL_PAD_UART4_RX_DATA__I2C1_SDA 0x4001b8b0
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6UL_PAD_UART5_TX_DATA__I2C2_SCL 0x4001b8b0
+ MX6UL_PAD_UART5_RX_DATA__I2C2_SDA 0x4001b8b0
+ >;
+ };
+
+ pinctrl_lcdif_ctrl: lcdifctrlgrp {
+ fsl,pins = <
+ MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x79
+ MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x79
+ MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC 0x79
+ MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC 0x79
+ >;
+ };
+
+ pinctrl_lcdif_dat: lcdifdatgrp {
+ fsl,pins = <
+ MX6UL_PAD_LCD_DATA00__LCDIF_DATA00 0x79
+ MX6UL_PAD_LCD_DATA01__LCDIF_DATA01 0x79
+ MX6UL_PAD_LCD_DATA02__LCDIF_DATA02 0x79
+ MX6UL_PAD_LCD_DATA03__LCDIF_DATA03 0x79
+ MX6UL_PAD_LCD_DATA04__LCDIF_DATA04 0x79
+ MX6UL_PAD_LCD_DATA05__LCDIF_DATA05 0x79
+ MX6UL_PAD_LCD_DATA06__LCDIF_DATA06 0x79
+ MX6UL_PAD_LCD_DATA07__LCDIF_DATA07 0x79
+ MX6UL_PAD_LCD_DATA08__LCDIF_DATA08 0x79
+ MX6UL_PAD_LCD_DATA09__LCDIF_DATA09 0x79
+ MX6UL_PAD_LCD_DATA10__LCDIF_DATA10 0x79
+ MX6UL_PAD_LCD_DATA11__LCDIF_DATA11 0x79
+ MX6UL_PAD_LCD_DATA12__LCDIF_DATA12 0x79
+ MX6UL_PAD_LCD_DATA13__LCDIF_DATA13 0x79
+ MX6UL_PAD_LCD_DATA14__LCDIF_DATA14 0x79
+ MX6UL_PAD_LCD_DATA15__LCDIF_DATA15 0x79
+ MX6UL_PAD_LCD_DATA16__LCDIF_DATA16 0x79
+ MX6UL_PAD_LCD_DATA17__LCDIF_DATA17 0x79
+ >;
+ };
+
+ pinctrl_tsc: tscgrp {
+ fsl,pin = <
+ MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0
+ MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0
+ MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0
+ MX6UL_PAD_GPIO1_IO04__GPIO1_IO04 0xb0
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX 0x1b0b1
+ MX6UL_PAD_UART3_RX_DATA__UART2_DCE_RTS 0x1b0b1
+ MX6UL_PAD_UART3_TX_DATA__UART2_DCE_CTS 0x1b0b1
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x17059
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x10059
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x17059
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x17059
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x17059
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x17059
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170b9
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170b9
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170b9
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170f9
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170f9
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170f9
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170f9
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6UL_PAD_CSI_VSYNC__USDHC2_CLK 0x17070
+ MX6UL_PAD_CSI_HSYNC__USDHC2_CMD 0x10070
+ MX6UL_PAD_CSI_DATA00__USDHC2_DATA0 0x17070
+ MX6UL_PAD_CSI_DATA01__USDHC2_DATA1 0x17070
+ MX6UL_PAD_CSI_DATA02__USDHC2_DATA2 0x17070
+ MX6UL_PAD_CSI_DATA03__USDHC2_DATA3 0x17070
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
index 86f68faded0e..827d9e8fc74e 100644
--- a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
+++ b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
@@ -100,6 +100,18 @@
gpio = <&gpio1 6 0>;
};
+ reg_brcm: regulator-brcm {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 8 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_brcm_reg>;
+ regulator-name = "brcm_reg";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <200000>;
+ };
+
sound {
compatible = "fsl,imx-audio-sgtl5000";
model = "imx6ul-sgtl5000";
@@ -325,12 +337,27 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
no-1-8-v;
+ non-removable;
keep-power-in-suspend;
wakeup-source;
+ vmmc-supply = <&reg_brcm>;
status = "okay";
};
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
&iomuxc {
+ pinctrl_brcm_reg: brcmreggrp {
+ fsl,pins = <
+ MX6UL_PAD_NAND_DATA06__GPIO4_IO08 0x10b0 /* WL_REG_ON */
+ MX6UL_PAD_NAND_DATA04__GPIO4_IO06 0x10b0 /* WL_HOST_WAKE */
+ >;
+ };
+
pinctrl_enet2: enet2grp {
fsl,pins = <
MX6UL_PAD_ENET1_TX_DATA1__ENET2_MDIO 0x1b0b0
@@ -513,4 +540,10 @@
MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x17059
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6UL_PAD_LCD_RESET__WDOG1_WDOG_ANY 0x30b0
+ >;
+ };
};
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index 33b95d78831a..c5c05fdccc78 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -411,6 +411,8 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 23 10>, <&iomuxc 10 17 6>,
+ <&iomuxc 16 33 16>;
};
gpio2: gpio@020a0000 {
@@ -422,6 +424,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 49 16>, <&iomuxc 16 111 6>;
};
gpio3: gpio@020a4000 {
@@ -433,6 +436,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 65 29>;
};
gpio4: gpio@020a8000 {
@@ -444,6 +448,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 94 17>, <&iomuxc 17 117 12>;
};
gpio5: gpio@020ac000 {
@@ -455,6 +460,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 7 10>, <&iomuxc 10 5 2>;
};
fec2: ethernet@020b4000 {
@@ -644,7 +650,8 @@
};
gpr: iomuxc-gpr@020e4000 {
- compatible = "fsl,imx6ul-iomuxc-gpr", "syscon";
+ compatible = "fsl,imx6ul-iomuxc-gpr",
+ "fsl,imx6q-iomuxc-gpr", "syscon";
reg = <0x020e4000 0x4000>;
};
diff --git a/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
index 1545661df583..373ee19196a6 100644
--- a/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
@@ -138,10 +138,6 @@
};
&usdhc1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usdhc1 &pinctrl_cd_usdhc1>;
- no-1-8-v;
- cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
keep-power-in-suspend;
wakeup-source;
status = "okay";
diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
index 0a9d3a822fc0..a9cc65725f19 100644
--- a/arch/arm/boot/dts/imx7-colibri.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri.dtsi
@@ -46,12 +46,18 @@
pwms = <&pwm1 0 5000000>;
};
- reg_3p3v: regulator-3p3v {
+ reg_module_3v3: regulator-module-3v3 {
compatible = "regulator-fixed";
- regulator-name = "3P3V";
+ regulator-name = "+V3.3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_module_3v3_avdd: regulator-module-3v3-avdd {
+ compatible = "regulator-fixed";
+ regulator-name = "+V3.3_AVDD_AUDIO";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- regulator-always-on;
};
reg_vref_1v8: regulator-vref-1v8 {
@@ -60,6 +66,22 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "imx7-sgtl5000";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&dailink_master>;
+ simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,cpu {
+ sound-dai = <&sai1>;
+ };
+
+ dailink_master: simple-audio-card,codec {
+ sound-dai = <&codec>;
+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
+ };
+ };
};
&adc1 {
@@ -97,6 +119,18 @@
pinctrl-0 = <&pinctrl_i2c1 &pinctrl_i2c1_int>;
status = "okay";
+ codec: sgtl5000@0a {
+ compatible = "fsl,sgtl5000";
+ #sound-dai-cells = <0>;
+ reg = <0x0a>;
+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1_mclk>;
+ VDDA-supply = <&reg_module_3v3_avdd>;
+ VDDIO-supply = <&reg_module_3v3>;
+ VDDD-supply = <&reg_DCDC3>;
+ };
+
ad7879@2c {
compatible = "adi,ad7879-1";
reg = <0x2c>;
@@ -217,6 +251,12 @@
vin-supply = <&reg_DCDC3>;
};
+&sai1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1>;
+ status = "okay";
+};
+
&snvs_pwrkey {
status = "disabled";
};
@@ -251,6 +291,14 @@
dr_mode = "host";
};
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1 &pinctrl_cd_usdhc1>;
+ no-1-8-v;
+ cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
+ disable-wp;
+};
+
&iomuxc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpio1 &pinctrl_gpio2 &pinctrl_gpio3 &pinctrl_gpio4>;
@@ -528,13 +576,18 @@
pinctrl_sai1: sai1-grp {
fsl,pins = <
- MX7D_PAD_SAI1_MCLK__SAI1_MCLK 0x1f
MX7D_PAD_ENET1_RX_CLK__SAI1_TX_BCLK 0x1f
MX7D_PAD_SAI1_TX_SYNC__SAI1_TX_SYNC 0x1f
MX7D_PAD_ENET1_COL__SAI1_TX_DATA0 0x30
MX7D_PAD_ENET1_TX_CLK__SAI1_RX_DATA0 0x1f
>;
};
+
+ pinctrl_sai1_mclk: sai1grp_mclk {
+ fsl,pins = <
+ MX7D_PAD_SAI1_MCLK__SAI1_MCLK 0x1f
+ >;
+ };
};
&iomuxc_lpsr {
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 51c13cbdffb7..f6dee41a05d9 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -45,30 +45,42 @@
/ {
cpus {
+ cpu0: cpu@0 {
+ operating-points = <
+ /* KHz uV */
+ 996000 1075000
+ 792000 975000
+ >;
+ clock-frequency = <996000000>;
+ };
+
cpu1: cpu@1 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <1>;
+ clock-frequency = <996000000>;
};
};
- etm@3007d000 {
- compatible = "arm,coresight-etm3x", "arm,primecell";
- reg = <0x3007d000 0x1000>;
+ soc {
+ etm@3007d000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x3007d000 0x1000>;
- /*
- * System will hang if added nosmp in kernel command line
- * without arm,primecell-periphid because amba bus try to
- * read id and core1 power off at this time.
- */
- arm,primecell-periphid = <0xbb956>;
- cpu = <&cpu1>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
+ /*
+ * System will hang if added nosmp in kernel command line
+ * without arm,primecell-periphid because amba bus try to
+ * read id and core1 power off at this time.
+ */
+ arm,primecell-periphid = <0xbb956>;
+ cpu = <&cpu1>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
- port {
- etm1_out_port: endpoint {
- remote-endpoint = <&ca_funnel_in_port1>;
+ port {
+ etm1_out_port: endpoint {
+ remote-endpoint = <&ca_funnel_in_port1>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/imx7s-warp.dts b/arch/arm/boot/dts/imx7s-warp.dts
new file mode 100644
index 000000000000..0345267f3390
--- /dev/null
+++ b/arch/arm/boot/dts/imx7s-warp.dts
@@ -0,0 +1,446 @@
+/*
+ * Copyright (C) 2016 NXP Semiconductors.
+ * Author: Fabio Estevam <fabio.estevam@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include "imx7s.dtsi"
+
+/ {
+ model = "Warp i.MX7 Board";
+ compatible = "warp,imx7s-warp", "fsl,imx7s";
+
+ memory {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&pinctrl_gpio>;
+ autorepeat;
+
+ back {
+ label = "Back";
+ gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
+ linux,code = <KEY_BACK>;
+ wakeup-source;
+ };
+ };
+
+ reg_brcm: regulator-brcm {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio5 10 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_brcm_reg>;
+ regulator-name = "brcm_reg";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <200000>;
+ };
+
+ reg_bt: regulator-bt {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_bt_reg>;
+ enable-active-high;
+ gpio = <&gpio5 17 GPIO_ACTIVE_HIGH>;
+ regulator-name = "bt_reg";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "imx7-sgtl5000";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&dailink_master>;
+ simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,cpu {
+ sound-dai = <&sai1>;
+ };
+
+ dailink_master: simple-audio-card,codec {
+ sound-dai = <&codec>;
+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
+ };
+ };
+};
+
+&clks {
+ assigned-clocks = <&clks IMX7D_PLL_AUDIO_POST_DIV>;
+ assigned-clock-rates = <884736000>;
+};
+
+&cpu0 {
+ arm-supply = <&sw1a_reg>;
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pmic: pfuze3000@08 {
+ compatible = "fsl,pfuze3000";
+ reg = <0x08>;
+
+ regulators {
+ sw1a_reg: sw1a {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1475000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ /* use sw1c_reg to align with pfuze100/pfuze200 */
+ sw1c_reg: sw1b {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1475000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3 {
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1650000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen2_reg: vldo2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen3_reg: vccsd {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: v33 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vldo4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+};
+
+&i2c4 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+
+ codec: sgtl5000@0a {
+ #sound-dai-cells = <0>;
+ reg = <0x0a>;
+ compatible = "fsl,sgtl5000";
+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1_mclk>;
+ VDDA-supply = <&vgen4_reg>;
+ VDDIO-supply = <&vgen4_reg>;
+ VDDD-supply = <&vgen2_reg>;
+ };
+
+ mpl3115@60 {
+ compatible = "fsl,mpl3115";
+ reg = <0x60>;
+ };
+};
+
+&sai1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1>;
+ assigned-clocks = <&clks IMX7D_SAI1_ROOT_SRC>,
+ <&clks IMX7D_SAI1_ROOT_CLK>;
+ assigned-clock-parents = <&clks IMX7D_PLL_AUDIO_POST_DIV>;
+ assigned-clock-rates = <0>, <36864000>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ assigned-clocks = <&clks IMX7D_UART1_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_PLL_SYS_MAIN_240M_CLK>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ assigned-clocks = <&clks IMX7D_UART3_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_PLL_SYS_MAIN_240M_CLK>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ bus-width = <4>;
+ keep-power-in-suspend;
+ no-1-8-v;
+ non-removable;
+ vmmc-supply = <&reg_brcm>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ assigned-clocks = <&clks IMX7D_USDHC3_ROOT_CLK>;
+ assigned-clock-rates = <400000000>;
+ bus-width = <8>;
+ fsl,tuning-step = <2>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_brcm_reg: brcmreggrp {
+ fsl,pins = <
+ MX7D_PAD_SD2_WP__GPIO5_IO10 0x14 /* WL_REG_ON */
+ >;
+ };
+
+ pinctrl_bt_reg: btreggrp {
+ fsl,pins = <
+ MX7D_PAD_SD2_DATA3__GPIO5_IO17 0x14 /* BT_REG_ON */
+ >;
+ };
+
+ pinctrl_gpio: gpiogrp {
+ fsl,pins = <
+ MX7D_PAD_ENET1_RGMII_RD1__GPIO7_IO1 0x14
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX7D_PAD_I2C1_SDA__I2C1_SDA 0x4000007f
+ MX7D_PAD_I2C1_SCL__I2C1_SCL 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX7D_PAD_I2C2_SDA__I2C2_SDA 0x4000007f
+ MX7D_PAD_I2C2_SCL__I2C2_SCL 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX7D_PAD_I2C4_SCL__I2C4_SCL 0x4000007f
+ MX7D_PAD_I2C4_SDA__I2C4_SDA 0x4000007f
+ >;
+ };
+
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <
+ MX7D_PAD_SAI1_RX_DATA__SAI1_RX_DATA0 0x1f
+ MX7D_PAD_SAI1_TX_BCLK__SAI1_TX_BCLK 0x1f
+ MX7D_PAD_SAI1_TX_SYNC__SAI1_TX_SYNC 0x1f
+ MX7D_PAD_SAI1_TX_DATA__SAI1_TX_DATA0 0x30
+ >;
+ };
+
+ pinctrl_sai1_mclk: sai1mclkgrp {
+ fsl,pins = <
+ MX7D_PAD_SAI1_MCLK__SAI1_MCLK 0x1f
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX7D_PAD_UART1_TX_DATA__UART1_DCE_TX 0x79
+ MX7D_PAD_UART1_RX_DATA__UART1_DCE_RX 0x79
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX7D_PAD_UART3_TX_DATA__UART3_DCE_TX 0x79
+ MX7D_PAD_UART3_RX_DATA__UART3_DCE_RX 0x79
+ MX7D_PAD_UART3_CTS_B__UART3_DCE_CTS 0x79
+ MX7D_PAD_UART3_RTS_B__UART3_DCE_RTS 0x79
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX7D_PAD_SD1_CMD__SD1_CMD 0x59
+ MX7D_PAD_SD1_CLK__SD1_CLK 0x19
+ MX7D_PAD_SD1_DATA0__SD1_DATA0 0x59
+ MX7D_PAD_SD1_DATA1__SD1_DATA1 0x59
+ MX7D_PAD_SD1_DATA2__SD1_DATA2 0x59
+ MX7D_PAD_SD1_DATA3__SD1_DATA3 0x59
+ MX7D_PAD_SD2_RESET_B__GPIO5_IO11 0x14 /* WL_HOST_WAKE */
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x59
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x19
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x59
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x59
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x59
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x59
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x59
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x59
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x59
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x59
+ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x19
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp_100mhz {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x5a
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x1a
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5a
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5a
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5a
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5a
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5a
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5a
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5a
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5a
+ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x1a
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp_200mhz {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x5b
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x1b
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5b
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5b
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5b
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5b
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5b
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5b
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5b
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5b
+ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x1b
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO00__WDOD1_WDOG_B 0x74
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 1e90bdbe3a6e..0d7d5ac6257b 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -85,26 +85,12 @@
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <0>;
- operating-points = <
- /* KHz uV */
- 996000 1075000
- 792000 975000
- >;
+ clock-frequency = <792000000>;
clock-latency = <61036>; /* two CLK32 periods */
clocks = <&clks IMX7D_CLK_ARM>;
};
};
- intc: interrupt-controller@31001000 {
- compatible = "arm,cortex-a7-gic";
- #interrupt-cells = <3>;
- interrupt-controller;
- reg = <0x31001000 0x1000>,
- <0x31002000 0x1000>,
- <0x31004000 0x2000>,
- <0x31006000 0x2000>;
- };
-
ckil: clock-cki {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -119,195 +105,205 @@
clock-output-names = "osc";
};
- timer {
- compatible = "arm,armv7-timer";
- interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
interrupt-parent = <&intc>;
- };
+ ranges;
+
+ funnel@30041000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0x30041000 0x1000>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
- etr@30086000 {
- compatible = "arm,coresight-tmc", "arm,primecell";
- reg = <0x30086000 0x1000>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
+ ca_funnel_ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- port {
- etr_in_port: endpoint {
- slave-mode;
- remote-endpoint = <&replicator_out_port1>;
+ /* funnel input ports */
+ port@0 {
+ reg = <0>;
+ ca_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm0_out_port>;
+ };
+ };
+
+ /* funnel output port */
+ port@2 {
+ reg = <0>;
+ ca_funnel_out_port0: endpoint {
+ remote-endpoint = <&hugo_funnel_in_port0>;
+ };
+ };
+
+ /* the other input ports are not connect to anything */
};
};
- };
- tpiu@30087000 {
- compatible = "arm,coresight-tpiu", "arm,primecell";
- reg = <0x30087000 0x1000>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
+ etm@3007c000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x3007c000 0x1000>;
+ cpu = <&cpu0>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
- port {
- tpiu_in_port: endpoint {
- slave-mode;
- remote-endpoint = <&replicator_out_port1>;
+ port {
+ etm0_out_port: endpoint {
+ remote-endpoint = <&ca_funnel_in_port0>;
+ };
};
};
- };
- replicator {
- /*
- * non-configurable replicators don't show up on the
- * AMBA bus. As such no need to add "arm,primecell"
- */
- compatible = "arm,coresight-replicator";
+ funnel@30083000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0x30083000 0x1000>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- /* replicator output ports */
- port@0 {
- reg = <0>;
- replicator_out_port0: endpoint {
- remote-endpoint = <&tpiu_in_port>;
+ /* funnel input ports */
+ port@0 {
+ reg = <0>;
+ hugo_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&ca_funnel_out_port0>;
+ };
};
- };
- port@1 {
- reg = <1>;
- replicator_out_port1: endpoint {
- remote-endpoint = <&etr_in_port>;
+ port@1 {
+ reg = <1>;
+ hugo_funnel_in_port1: endpoint {
+ slave-mode; /* M4 input */
+ };
};
- };
- /* replicator input port */
- port@2 {
- reg = <0>;
- replicator_in_port0: endpoint {
- slave-mode;
- remote-endpoint = <&etf_out_port>;
+ port@2 {
+ reg = <0>;
+ hugo_funnel_out_port0: endpoint {
+ remote-endpoint = <&etf_in_port>;
+ };
};
+
+ /* the other input ports are not connect to anything */
};
};
- };
- etf@30084000 {
- compatible = "arm,coresight-tmc", "arm,primecell";
- reg = <0x30084000 0x1000>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
+ etf@30084000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x30084000 0x1000>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- port@0 {
- reg = <0>;
- etf_in_port: endpoint {
- slave-mode;
- remote-endpoint = <&hugo_funnel_out_port0>;
+ port@0 {
+ reg = <0>;
+ etf_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&hugo_funnel_out_port0>;
+ };
};
- };
- port@1 {
- reg = <0>;
- etf_out_port: endpoint {
- remote-endpoint = <&replicator_in_port0>;
+ port@1 {
+ reg = <0>;
+ etf_out_port: endpoint {
+ remote-endpoint = <&replicator_in_port0>;
+ };
};
};
};
- };
-
- funnel@30083000 {
- compatible = "arm,coresight-funnel", "arm,primecell";
- reg = <0x30083000 0x1000>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
+ etr@30086000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x30086000 0x1000>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
- /* funnel input ports */
- port@0 {
- reg = <0>;
- hugo_funnel_in_port0: endpoint {
+ port {
+ etr_in_port: endpoint {
slave-mode;
- remote-endpoint = <&ca_funnel_out_port0>;
+ remote-endpoint = <&replicator_out_port1>;
};
};
+ };
- port@1 {
- reg = <1>;
- hugo_funnel_in_port1: endpoint {
- slave-mode; /* M4 input */
- };
- };
+ tpiu@30087000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0x30087000 0x1000>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
- port@2 {
- reg = <0>;
- hugo_funnel_out_port0: endpoint {
- remote-endpoint = <&etf_in_port>;
+ port {
+ tpiu_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port1>;
};
};
-
- /* the other input ports are not connect to anything */
};
- };
- funnel@30041000 {
- compatible = "arm,coresight-funnel", "arm,primecell";
- reg = <0x30041000 0x1000>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
+ replicator {
+ /*
+ * non-configurable replicators don't show up on the
+ * AMBA bus. As such no need to add "arm,primecell"
+ */
+ compatible = "arm,coresight-replicator";
- ca_funnel_ports: ports {
- #address-cells = <1>;
- #size-cells = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- /* funnel input ports */
- port@0 {
- reg = <0>;
- ca_funnel_in_port0: endpoint {
- slave-mode;
- remote-endpoint = <&etm0_out_port>;
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator_out_port0: endpoint {
+ remote-endpoint = <&tpiu_in_port>;
+ };
};
- };
- /* funnel output port */
- port@2 {
- reg = <0>;
- ca_funnel_out_port0: endpoint {
- remote-endpoint = <&hugo_funnel_in_port0>;
+ port@1 {
+ reg = <1>;
+ replicator_out_port1: endpoint {
+ remote-endpoint = <&etr_in_port>;
+ };
};
- };
- /* the other input ports are not connect to anything */
+ /* replicator input port */
+ port@2 {
+ reg = <0>;
+ replicator_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etf_out_port>;
+ };
+ };
+ };
};
- };
- etm@3007c000 {
- compatible = "arm,coresight-etm3x", "arm,primecell";
- reg = <0x3007c000 0x1000>;
- cpu = <&cpu0>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
-
- port {
- etm0_out_port: endpoint {
- remote-endpoint = <&ca_funnel_in_port0>;
- };
+ intc: interrupt-controller@31001000 {
+ compatible = "arm,cortex-a7-gic";
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x31001000 0x1000>,
+ <0x31002000 0x2000>,
+ <0x31004000 0x2000>,
+ <0x31006000 0x2000>;
};
- };
- soc {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- interrupt-parent = <&intc>;
- ranges;
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
aips1: aips-bus@30000000 {
compatible = "fsl,aips-bus", "simple-bus";
@@ -325,6 +321,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc_lpsr 0 0 8>, <&iomuxc 8 5 8>;
};
gpio2: gpio@30210000 {
@@ -336,6 +333,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 13 32>;
};
gpio3: gpio@30220000 {
@@ -347,6 +345,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 45 29>;
};
gpio4: gpio@30230000 {
@@ -358,6 +357,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 74 24>;
};
gpio5: gpio@30240000 {
@@ -369,6 +369,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 98 18>;
};
gpio6: gpio@30250000 {
@@ -380,6 +381,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 116 23>;
};
gpio7: gpio@30260000 {
@@ -391,6 +393,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 139 16>;
};
wdog1: wdog@30280000 {
@@ -723,6 +726,51 @@
status = "disabled";
};
+ sai1: sai@308a0000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,imx7d-sai", "fsl,imx6sx-sai";
+ reg = <0x308a0000 0x10000>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_SAI1_IPG_CLK>,
+ <&clks IMX7D_SAI1_ROOT_CLK>,
+ <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_CLK_DUMMY>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dma-names = "rx", "tx";
+ dmas = <&sdma 8 24 0>, <&sdma 9 24 0>;
+ status = "disabled";
+ };
+
+ sai2: sai@308b0000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,imx7d-sai", "fsl,imx6sx-sai";
+ reg = <0x308b0000 0x10000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_SAI2_IPG_CLK>,
+ <&clks IMX7D_SAI2_ROOT_CLK>,
+ <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_CLK_DUMMY>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dma-names = "rx", "tx";
+ dmas = <&sdma 10 24 0>, <&sdma 11 24 0>;
+ status = "disabled";
+ };
+
+ sai3: sai@308c0000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,imx7d-sai", "fsl,imx6sx-sai";
+ reg = <0x308c0000 0x10000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_SAI3_IPG_CLK>,
+ <&clks IMX7D_SAI3_ROOT_CLK>,
+ <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_CLK_DUMMY>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dma-names = "rx", "tx";
+ dmas = <&sdma 12 24 0>, <&sdma 13 24 0>;
+ status = "disabled";
+ };
+
flexcan1: can@30a00000 {
compatible = "fsl,imx7d-flexcan", "fsl,imx6q-flexcan";
reg = <0x30a00000 0x10000>;
@@ -911,6 +959,17 @@
status = "disabled";
};
+ sdma: sdma@30bd0000 {
+ compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma";
+ reg = <0x30bd0000 0x10000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_SDMA_CORE_CLK>,
+ <&clks IMX7D_AHB_CHANNEL_ROOT_CLK>;
+ clock-names = "ipg", "ahb";
+ #dma-cells = <3>;
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
+ };
+
fec1: ethernet@30be0000 {
compatible = "fsl,imx7d-fec", "fsl,imx6sx-fec";
reg = <0x30be0000 0x10000>;
diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts
index 4b34b54e09a1..6f16d09dc5a4 100644
--- a/arch/arm/boot/dts/integratorap.dts
+++ b/arch/arm/boot/dts/integratorap.dts
@@ -19,7 +19,7 @@
bootargs = "root=/dev/ram0 console=ttyAM0,38400n8 earlyprintk";
};
- /* 24 MHz chrystal on the core module */
+ /* 24 MHz chrystal on the Integrator/AP development board */
xtal24mhz: xtal24mhz@24M {
#clock-cells = <0>;
compatible = "fixed-clock";
@@ -39,6 +39,34 @@
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <14745600>;
+ clocks = <&xtal24mhz>;
+ };
+
+ core-module@10000000 {
+ /* 24 MHz chrystal on the core module */
+ cm24mhz: cm24mhz@24M {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ };
+
+ /* Oscillator on the core module, clocks the CPU core */
+ cmosc: cmosc@24M {
+ compatible = "arm,syscon-icst525-integratorap-cm";
+ #clock-cells = <0>;
+ lock-offset = <0x14>;
+ vco-offset = <0x08>;
+ clocks = <&cm24mhz>;
+ };
+
+ /* Auxilary oscillator on the core module, 32.369MHz at boot */
+ auxosc: auxosc@24M {
+ compatible = "arm,syscon-icst525";
+ #clock-cells = <0>;
+ lock-offset = <0x14>;
+ vco-offset = <0x1c>;
+ clocks = <&cm24mhz>;
+ };
};
syscon {
@@ -47,6 +75,27 @@
interrupt-parent = <&pic>;
/* These are the logical module IRQs */
interrupts = <9>, <10>, <11>, <12>;
+
+ /*
+ * SYSCLK clocks PCIv3 bridge, system controller and the
+ * logic modules.
+ */
+ sysclk: apsys@24M {
+ compatible = "arm,syscon-icst525-integratorap-sys";
+ #clock-cells = <0>;
+ lock-offset = <0x1c>;
+ vco-offset = <0x04>;
+ clocks = <&xtal24mhz>;
+ };
+
+ /* One-bit control for the PCI bus clock (33 or 25 MHz) */
+ pciclk: pciclk@24M {
+ compatible = "arm,syscon-icst525-integratorap-pci";
+ #clock-cells = <0>;
+ lock-offset = <0x1c>;
+ vco-offset = <0x04>;
+ clocks = <&xtal24mhz>;
+ };
};
timer0: timer@13000000 {
diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts
index 79430fbfec3b..1b5e4b006b72 100644
--- a/arch/arm/boot/dts/integratorcp.dts
+++ b/arch/arm/boot/dts/integratorcp.dts
@@ -58,20 +58,37 @@
core-module@10000000 {
/* 24 MHz chrystal on the core module */
- xtal24mhz: xtal24mhz@24M {
+ cm24mhz: cm24mhz@24M {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
};
- /*
- * External oscillator on the core module, usually used
- * to drive video circuitry. Driven from the 24MHz clock.
- */
- auxosc: cm_aux_osc@25M {
+ /* Oscillator on the core module, clocks the CPU core */
+ cmcore: cmosc@24M {
+ compatible = "arm,syscon-icst525-integratorcp-cm-core";
+ #clock-cells = <0>;
+ lock-offset = <0x14>;
+ vco-offset = <0x08>;
+ clocks = <&cm24mhz>;
+ };
+
+ /* Oscillator on the core module, clocks the memory bus */
+ cmmem: cmosc@24M {
+ compatible = "arm,syscon-icst525-integratorcp-cm-mem";
+ #clock-cells = <0>;
+ lock-offset = <0x14>;
+ vco-offset = <0x08>;
+ clocks = <&cm24mhz>;
+ };
+
+ /* Auxilary oscillator on the core module, clocks the CLCD */
+ auxosc: auxosc@24M {
+ compatible = "arm,syscon-icst525";
#clock-cells = <0>;
- compatible = "arm,integrator-cm-auxosc";
- clocks = <&xtal24mhz>;
+ lock-offset = <0x14>;
+ vco-offset = <0x1c>;
+ clocks = <&cm24mhz>;
};
/* The KMI clock is the 24 MHz oscillator divided to 8MHz */
@@ -80,7 +97,7 @@
compatible = "fixed-factor-clock";
clock-div = <3>;
clock-mult = <1>;
- clocks = <&xtal24mhz>;
+ clocks = <&cm24mhz>;
};
/* The timer clock is the 24 MHz oscillator divided to 1MHz */
@@ -89,7 +106,7 @@
compatible = "fixed-factor-clock";
clock-div = <24>;
clock-mult = <1>;
- clocks = <&xtal24mhz>;
+ clocks = <&cm24mhz>;
};
};
@@ -209,7 +226,42 @@
reg = <0xC0000000 0x1000>;
interrupts = <22>;
clocks = <&auxosc>, <&pclk>;
- clock-names = "clcd", "apb_pclk";
+ clock-names = "clcdclk", "apb_pclk";
+
+ port {
+ /*
+ * The VGA connected is implemented with a
+ * THS8134A triple DAC that can be run in 24bit
+ * or 16bit RGB mode.
+ */
+ clcd_pads: endpoint {
+ remote-endpoint = <&clcd_panel>;
+ arm,pl11x,tft-r0g0b0-pads = <1 7 13>;
+ };
+ };
+
+ panel {
+ compatible = "panel-dpi";
+
+ port {
+ clcd_panel: endpoint {
+ remote-endpoint = <&clcd_pads>;
+ };
+ };
+
+ /* Standard 640x480 VGA timings */
+ panel-timing {
+ clock-frequency = <25175000>;
+ hactive = <640>;
+ hback-porch = <48>;
+ hfront-porch = <16>;
+ hsync-len = <96>;
+ vactive = <480>;
+ vback-porch = <33>;
+ vfront-porch = <10>;
+ vsync-len = <2>;
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/keystone-k2e-evm.dts b/arch/arm/boot/dts/keystone-k2e-evm.dts
index 4c32ebc1425a..ae1ebe7ee021 100644
--- a/arch/arm/boot/dts/keystone-k2e-evm.dts
+++ b/arch/arm/boot/dts/keystone-k2e-evm.dts
@@ -47,18 +47,26 @@
status = "okay";
};
-&usb {
+&keystone_usb0 {
status = "okay";
};
+&usb0 {
+ dr_mode = "host";
+};
+
&usb1_phy {
status = "okay";
};
-&usb1 {
+&keystone_usb1 {
status = "okay";
};
+&usb1 {
+ dr_mode = "peripheral";
+};
+
&i2c0 {
dtt@50 {
compatible = "at,24c1024";
diff --git a/arch/arm/boot/dts/keystone-k2e.dtsi b/arch/arm/boot/dts/keystone-k2e.dtsi
index 9a51b8c88581..497c417db5b6 100644
--- a/arch/arm/boot/dts/keystone-k2e.dtsi
+++ b/arch/arm/boot/dts/keystone-k2e.dtsi
@@ -61,7 +61,7 @@
status = "disabled";
};
- usb1: usb@25000000 {
+ keystone_usb1: usb@25000000 {
compatible = "ti,keystone-dwc3";
#address-cells = <1>;
#size-cells = <1>;
@@ -74,7 +74,7 @@
dma-ranges;
status = "disabled";
- dwc3@25010000 {
+ usb1: dwc3@25010000 {
compatible = "synopsys,dwc3";
reg = <0x25010000 0x70000>;
interrupts = <GIC_SPI 414 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm/boot/dts/keystone-k2g.dtsi b/arch/arm/boot/dts/keystone-k2g.dtsi
index 3372615b885c..2919c5190653 100644
--- a/arch/arm/boot/dts/keystone-k2g.dtsi
+++ b/arch/arm/boot/dts/keystone-k2g.dtsi
@@ -83,6 +83,11 @@
pinctrl-single,function-mask = <0x001b0007>;
};
+ devctrl: device-state-control@02620000 {
+ compatible = "ti,keystone-devctrl", "syscon";
+ reg = <0x02620000 0x1000>;
+ };
+
uart0: serial@02530c00 {
compatible = "ns16550a";
current-speed = <115200>;
@@ -93,5 +98,32 @@
clock-frequency = <200000000>;
status = "disabled";
};
+
+ kirq0: keystone_irq@026202a0 {
+ compatible = "ti,keystone-irq";
+ interrupts = <GIC_SPI 1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ ti,syscon-dev = <&devctrl 0x2a0>;
+ };
+
+ dspgpio0: keystone_dsp_gpio@02620240 {
+ compatible = "ti,keystone-dsp-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio,syscon-dev = <&devctrl 0x240>;
+ };
+
+ msgmgr: msgmgr@02a00000 {
+ compatible = "ti,k2g-message-manager";
+ #mbox-cells = <2>;
+ reg-names = "queue_proxy_region",
+ "queue_state_debug_region";
+ reg = <0x02a00000 0x400000>, <0x028c3400 0x400>;
+ interrupt-names = "rx_005",
+ "rx_057";
+ interrupts = <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
};
diff --git a/arch/arm/boot/dts/keystone-k2hk-evm.dts b/arch/arm/boot/dts/keystone-k2hk-evm.dts
index b38b3441818b..2156ff92d08f 100644
--- a/arch/arm/boot/dts/keystone-k2hk-evm.dts
+++ b/arch/arm/boot/dts/keystone-k2hk-evm.dts
@@ -83,10 +83,14 @@
status = "okay";
};
-&usb {
+&keystone_usb0 {
status = "okay";
};
+&usb0 {
+ dr_mode = "host";
+};
+
&aemif {
cs0 {
#address-cells = <2>;
diff --git a/arch/arm/boot/dts/keystone-k2l-evm.dts b/arch/arm/boot/dts/keystone-k2l-evm.dts
index 7f9c2e94d605..056b42f99d7a 100644
--- a/arch/arm/boot/dts/keystone-k2l-evm.dts
+++ b/arch/arm/boot/dts/keystone-k2l-evm.dts
@@ -32,10 +32,14 @@
status = "okay";
};
-&usb {
+&keystone_usb0 {
status = "okay";
};
+&usb0 {
+ dr_mode = "host";
+};
+
&i2c0 {
dtt@50 {
compatible = "at,24c1024";
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index e23f46d15c80..02708ba2d4f4 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -188,7 +188,7 @@
status = "disabled";
};
- usb: usb@2680000 {
+ keystone_usb0: usb@2680000 {
compatible = "ti,keystone-dwc3";
#address-cells = <1>;
#size-cells = <1>;
@@ -201,7 +201,7 @@
dma-ranges;
status = "disabled";
- dwc3@2690000 {
+ usb0: dwc3@2690000 {
compatible = "synopsys,dwc3";
reg = <0x2690000 0x70000>;
interrupts = <GIC_SPI 393 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
index 0db0e3edc88f..94e49f32d5f9 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
@@ -41,7 +41,7 @@
};
pinctrl: pin-controller@10000 {
- pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>;
+ pinctrl-0 = <&pmx_dip_switches>;
pinctrl-names = "default";
pmx_uart0: pmx-uart0 {
@@ -174,3 +174,10 @@
phy-handle = <&ethphy0>;
};
};
+
+&gpio0 {
+ status = "okay";
+
+ pinctrl-0 = <&pmx_gpio_header>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 1c2c74655416..731ec37aed5b 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -13,6 +13,11 @@
};
};
+ memory@0 {
+ device_type = "memory";
+ reg = <0 0>;
+ };
+
leds {
compatible = "gpio-leds";
user0 {
diff --git a/arch/arm/boot/dts/lpc18xx.dtsi b/arch/arm/boot/dts/lpc18xx.dtsi
index fdb736c82045..7cae9c5e27db 100644
--- a/arch/arm/boot/dts/lpc18xx.dtsi
+++ b/arch/arm/boot/dts/lpc18xx.dtsi
@@ -20,6 +20,9 @@
#define LPC_GPIO(port, pin) (port * 32 + pin)
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -186,6 +189,10 @@
clock-names = "stmmaceth";
resets = <&rgu 22>;
reset-names = "stmmaceth";
+ rx-fifo-depth = <256>;
+ tx-fifo-depth = <256>;
+ snps,pbl = <4>; /* 32 (8x mode) */
+ snps,force_thresh_dma_mode;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index e295e1ec82a5..b5841fab51c1 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -51,9 +51,19 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
- ranges = <0x20000000 0x20000000 0x30000000>,
+ ranges = <0x00000000 0x00000000 0x10000000>,
+ <0x20000000 0x20000000 0x30000000>,
<0xe0000000 0xe0000000 0x04000000>;
+ iram: sram@08000000 {
+ compatible = "mmio-sram";
+ reg = <0x08000000 0x20000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x08000000 0x20000>;
+ };
+
/*
* Enable either SLC or MLC
*/
diff --git a/arch/arm/boot/dts/lpc4337-ciaa.dts b/arch/arm/boot/dts/lpc4337-ciaa.dts
index 5cfadb06c8df..7c16d639a1b4 100644
--- a/arch/arm/boot/dts/lpc4337-ciaa.dts
+++ b/arch/arm/boot/dts/lpc4337-ciaa.dts
@@ -30,7 +30,7 @@
stdout-path = &uart2;
};
- memory {
+ memory@28000000 {
device_type = "memory";
reg = <0x28000000 0x0800000>; /* 8 MB */
};
diff --git a/arch/arm/boot/dts/lpc4350-hitex-eval.dts b/arch/arm/boot/dts/lpc4350-hitex-eval.dts
index 6c9048d4d03c..874c75d44013 100644
--- a/arch/arm/boot/dts/lpc4350-hitex-eval.dts
+++ b/arch/arm/boot/dts/lpc4350-hitex-eval.dts
@@ -33,7 +33,7 @@
stdout-path = &uart0;
};
- memory {
+ memory@28000000 {
device_type = "memory";
reg = <0x28000000 0x800000>; /* 8 MB */
};
@@ -424,7 +424,7 @@
/* NXP SE97BTP with temperature sensor + eeprom */
sensor@18 {
- compatible = "nxp,jc42";
+ compatible = "nxp,se97", "jedec,jc-42.4-temp";
reg = <0x18>;
};
diff --git a/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts b/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
index 1919be4dab2b..9b5fad622522 100644
--- a/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
+++ b/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
@@ -33,7 +33,7 @@
stdout-path = &uart0;
};
- memory {
+ memory@28000000 {
device_type = "memory";
reg = <0x28000000 0x2000000>; /* 32 MB */
};
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index 75ecaed32ae5..a8b148ad1dd2 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -108,12 +108,23 @@
panel: panel {
compatible = "nec,nl4827hc19-05b";
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dcu_out>;
+ };
+ };
};
};
&dcu {
- fsl,panel = <&panel>;
status = "okay";
+
+ port {
+ dcu_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
};
&dspi1 {
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index fc4080de4b7b..41fd53671859 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -162,6 +162,27 @@
reg = <0xc1108000 0x4>, <0xc1104000 0x460>;
};
+ pwm_ab: pwm@8550 {
+ compatible = "amlogic,meson8b-pwm";
+ reg = <0xc1108550 0x10>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm_cd: pwm@8650 {
+ compatible = "amlogic,meson8b-pwm";
+ reg = <0xc1108650 0x10>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm_ef: pwm@86c0 {
+ compatible = "amlogic,meson8b-pwm";
+ reg = <0xc11086c0 0x10>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
pinctrl_cbus: pinctrl@c1109880 {
compatible = "amlogic,meson8b-cbus-pinctrl";
reg = <0xc1109880 0x10>;
diff --git a/arch/arm/boot/dts/mps2.dtsi b/arch/arm/boot/dts/mps2.dtsi
index e3fed8d34558..efb8a03cb970 100644
--- a/arch/arm/boot/dts/mps2.dtsi
+++ b/arch/arm/boot/dts/mps2.dtsi
@@ -42,6 +42,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include "skeleton.dtsi"
#include "armv7-m.dtsi"
/ {
diff --git a/arch/arm/boot/dts/ntc-gr8-evb.dts b/arch/arm/boot/dts/ntc-gr8-evb.dts
new file mode 100644
index 000000000000..4b622f3b5220
--- /dev/null
+++ b/arch/arm/boot/dts/ntc-gr8-evb.dts
@@ -0,0 +1,342 @@
+/*
+ * Copyright 2016 Free Electrons
+ * Copyright 2016 NextThing Co
+ *
+ * Mylène Josserand <mylene.josserand@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "ntc-gr8.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ model = "NextThing GR8-EVB";
+ compatible = "nextthing,gr8-evb", "nextthing,gr8";
+
+ aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 0 10000 0>;
+ enable-gpios = <&axp_gpio 1 GPIO_ACTIVE_HIGH>;
+
+ brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
+ default-brightness-level = <8>;
+ };
+};
+
+&be0 {
+ status = "okay";
+};
+
+&codec {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+
+ axp209: pmic@34 {
+ reg = <0x34>;
+
+ /*
+ * The interrupt is routed through the "External Fast
+ * Interrupt Request" pin (ball G13 of the module)
+ * directly to the main interrupt controller, without
+ * any other controller interfering.
+ */
+ interrupts = <0>;
+ };
+};
+
+#include "axp209.dtsi"
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins_a>;
+ status = "okay";
+
+ wm8978: codec@1a {
+ #sound-dai-cells = <0>;
+ compatible = "wlf,wm8978";
+ reg = <0x1a>;
+ };
+
+ pcf8563: rtc@51 {
+ compatible = "phg,pcf8563";
+ reg = <0x51>;
+ };
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins_a>;
+ status = "okay";
+};
+
+&i2s0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_mclk_pins_a>, <&i2s0_data_pins_a>;
+ status = "okay";
+};
+
+&ir0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir0_rx_pins_a>;
+ status = "okay";
+};
+
+&lradc {
+ vref-supply = <&reg_ldo2>;
+ status = "okay";
+
+ button@190 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ channel = <0>;
+ voltage = <190000>;
+ };
+
+ button@390 {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ channel = <0>;
+ voltage = <390000>;
+ };
+
+ button@600 {
+ label = "Menu";
+ linux,code = <KEY_MENU>;
+ channel = <0>;
+ voltage = <600000>;
+ };
+
+ button@800 {
+ label = "Search";
+ linux,code = <KEY_SEARCH>;
+ channel = <0>;
+ voltage = <800000>;
+ };
+
+ button@980 {
+ label = "Home";
+ linux,code = <KEY_HOMEPAGE>;
+ channel = <0>;
+ voltage = <980000>;
+ };
+
+ button@1180 {
+ label = "Esc";
+ linux,code = <KEY_ESC>;
+ channel = <0>;
+ voltage = <1180000>;
+ };
+
+ button@1400 {
+ label = "Enter";
+ linux,code = <KEY_ENTER>;
+ channel = <0>;
+ voltage = <1400000>;
+ };
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_gr8_evb>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ cd-gpios = <&pio 6 0 GPIO_ACTIVE_HIGH>; /* PG0 */
+ cd-inverted;
+ status = "okay";
+};
+
+&nfc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_pins_a &nand_cs0_pins_a &nand_rb0_pins_a>;
+
+ /* MLC Support sucks for now */
+ status = "disabled";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&otg_sram {
+ status = "okay";
+};
+
+&pio {
+ mmc0_cd_pin_gr8_evb: mmc0-cd-pin@0 {
+ allwinner,pins = "PG0";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ usb0_id_pin_gr8_evb: usb0-id-pin@0 {
+ allwinner,pins = "PG2";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ usb0_vbus_det_pin_gr8_evb: usb0-vbus-det-pin@0 {
+ allwinner,pins = "PG1";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ usb1_vbus_pin_gr8_evb: usb1-vbus-pin@0 {
+ allwinner,pins = "PG13";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&pwm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_pins_a>;
+ status = "okay";
+};
+
+&reg_dcdc2 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-cpu";
+ regulator-always-on;
+};
+
+&reg_dcdc3 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-name = "vdd-sys";
+ regulator-always-on;
+};
+
+&reg_ldo1 {
+ regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "avcc";
+ regulator-always-on;
+};
+
+&reg_usb1_vbus {
+ pinctrl-0 = <&usb1_vbus_pin_gr8_evb>;
+ gpio = <&pio 6 13 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&rtp {
+ allwinner,ts-attached;
+};
+
+&spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spdif_tx_pins_a>;
+ status = "okay";
+};
+
+&tve0 {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins_a>, <&uart1_cts_rts_pins_a>;
+ status = "okay";
+};
+
+&usb_otg {
+ /*
+ * The GR8-EVB has a somewhat interesting design. There's a
+ * pin supposed to control VBUS, an ID pin, a VBUS detect pin,
+ * so everything should work just fine.
+ *
+ * Except that the pin supposed to control VBUS is not
+ * connected to any controllable output, neither to the SoC
+ * through a GPIO or to the PMIC, and it is pulled down,
+ * meaning that we will never be able to enable VBUS on this
+ * board.
+ */
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
+&usbphy {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_id_pin_gr8_evb>, <&usb0_vbus_det_pin_gr8_evb>;
+ usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+ usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+ usb0_vbus_power-supply = <&usb_power_supply>;
+ usb1_vbus-supply = <&reg_usb1_vbus>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/ntc-gr8.dtsi b/arch/arm/boot/dts/ntc-gr8.dtsi
new file mode 100644
index 000000000000..ca54e03ef366
--- /dev/null
+++ b/arch/arm/boot/dts/ntc-gr8.dtsi
@@ -0,0 +1,1087 @@
+/*
+ * Copyright 2016 Mylène Josserand
+ *
+ * Mylène Josserand <mylene.josserand@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/clock/sun4i-a10-pll2.h>
+#include <dt-bindings/dma/sun4i-a10.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+ interrupt-parent = <&intc>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a8";
+ reg = <0x0>;
+ clocks = <&cpu>;
+ };
+ };
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /*
+ * This is a dummy clock, to be used as placeholder on
+ * other mux clocks when a specific parent clock is not
+ * yet implemented. It should be dropped when the driver
+ * is complete.
+ */
+ dummy: dummy {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <0>;
+ };
+
+ osc24M: clk@01c20050 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-osc-clk";
+ reg = <0x01c20050 0x4>;
+ clock-frequency = <24000000>;
+ clock-output-names = "osc24M";
+ };
+
+ osc3M: osc3M-clk {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+ clock-div = <8>;
+ clock-mult = <1>;
+ clocks = <&osc24M>;
+ clock-output-names = "osc3M";
+ };
+
+ osc32k: clk@0 {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "osc32k";
+ };
+
+ pll1: clk@01c20000 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-pll1-clk";
+ reg = <0x01c20000 0x4>;
+ clocks = <&osc24M>;
+ clock-output-names = "pll1";
+ };
+
+ pll2: clk@01c20008 {
+ #clock-cells = <1>;
+ compatible = "allwinner,sun5i-a13-pll2-clk";
+ reg = <0x01c20008 0x8>;
+ clocks = <&osc24M>;
+ clock-output-names = "pll2-1x", "pll2-2x",
+ "pll2-4x", "pll2-8x";
+ };
+
+ pll3: clk@01c20010 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-pll3-clk";
+ reg = <0x01c20010 0x4>;
+ clocks = <&osc3M>;
+ clock-output-names = "pll3";
+ };
+
+ pll3x2: pll3x2-clk {
+ compatible = "allwinner,sun4i-a10-pll3-2x-clk";
+ #clock-cells = <0>;
+ clock-div = <1>;
+ clock-mult = <2>;
+ clocks = <&pll3>;
+ clock-output-names = "pll3-2x";
+ };
+
+ pll4: clk@01c20018 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-pll1-clk";
+ reg = <0x01c20018 0x4>;
+ clocks = <&osc24M>;
+ clock-output-names = "pll4";
+ };
+
+ pll5: clk@01c20020 {
+ #clock-cells = <1>;
+ compatible = "allwinner,sun4i-a10-pll5-clk";
+ reg = <0x01c20020 0x4>;
+ clocks = <&osc24M>;
+ clock-output-names = "pll5_ddr", "pll5_other";
+ };
+
+ pll6: clk@01c20028 {
+ #clock-cells = <1>;
+ compatible = "allwinner,sun4i-a10-pll6-clk";
+ reg = <0x01c20028 0x4>;
+ clocks = <&osc24M>;
+ clock-output-names = "pll6_sata", "pll6_other", "pll6";
+ };
+
+ pll7: clk@01c20030 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-pll3-clk";
+ reg = <0x01c20030 0x4>;
+ clocks = <&osc3M>;
+ clock-output-names = "pll7";
+ };
+
+ pll7x2: pll7x2-clk {
+ compatible = "allwinner,sun4i-a10-pll3-2x-clk";
+ #clock-cells = <0>;
+ clock-div = <1>;
+ clock-mult = <2>;
+ clocks = <&pll7>;
+ clock-output-names = "pll7-2x";
+ };
+
+ /* dummy is 200M */
+ cpu: cpu@01c20054 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-cpu-clk";
+ reg = <0x01c20054 0x4>;
+ clocks = <&osc32k>, <&osc24M>, <&pll1>, <&dummy>;
+ clock-output-names = "cpu";
+ };
+
+ axi: axi@01c20054 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-axi-clk";
+ reg = <0x01c20054 0x4>;
+ clocks = <&cpu>;
+ clock-output-names = "axi";
+ };
+
+ ahb: ahb@01c20054 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun5i-a13-ahb-clk";
+ reg = <0x01c20054 0x4>;
+ clocks = <&axi>, <&cpu>, <&pll6 1>;
+ clock-output-names = "ahb";
+ /*
+ * Use PLL6 as parent, instead of CPU/AXI
+ * which has rate changes due to cpufreq
+ */
+ assigned-clocks = <&ahb>;
+ assigned-clock-parents = <&pll6 1>;
+ };
+
+ apb0: apb0@01c20054 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-apb0-clk";
+ reg = <0x01c20054 0x4>;
+ clocks = <&ahb>;
+ clock-output-names = "apb0";
+ };
+
+ apb1: clk@01c20058 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-apb1-clk";
+ reg = <0x01c20058 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
+ clock-output-names = "apb1";
+ };
+
+ axi_gates: clk@01c2005c {
+ #clock-cells = <1>;
+ compatible = "allwinner,sun4i-a10-gates-clk";
+ reg = <0x01c2005c 0x4>;
+ clocks = <&axi>;
+ clock-indices = <0>;
+ clock-output-names = "axi_dram";
+ };
+
+ ahb_gates: clk@01c20060 {
+ #clock-cells = <1>;
+ compatible = "allwinner,sun5i-a13-ahb-gates-clk";
+ reg = <0x01c20060 0x8>;
+ clocks = <&ahb>;
+ clock-indices = <0>, <1>,
+ <2>, <5>, <6>,
+ <7>, <8>, <9>,
+ <10>, <13>,
+ <14>, <17>, <20>,
+ <21>, <22>,
+ <28>, <32>, <34>,
+ <36>, <40>, <44>,
+ <46>, <51>,
+ <52>;
+ clock-output-names = "ahb_usbotg", "ahb_ehci",
+ "ahb_ohci", "ahb_ss", "ahb_dma",
+ "ahb_bist", "ahb_mmc0", "ahb_mmc1",
+ "ahb_mmc2", "ahb_nand",
+ "ahb_sdram", "ahb_emac", "ahb_spi0",
+ "ahb_spi1", "ahb_spi2",
+ "ahb_hstimer", "ahb_ve", "ahb_tve",
+ "ahb_lcd", "ahb_csi", "ahb_de_be",
+ "ahb_de_fe", "ahb_iep",
+ "ahb_mali400";
+ };
+
+ apb0_gates: clk@01c20068 {
+ #clock-cells = <1>;
+ compatible = "allwinner,sun4i-a10-gates-clk";
+ reg = <0x01c20068 0x4>;
+ clocks = <&apb0>;
+ clock-indices = <0>, <3>,
+ <5>, <6>;
+ clock-output-names = "apb0_codec", "apb0_i2s0",
+ "apb0_pio", "apb0_ir";
+ };
+
+ apb1_gates: clk@01c2006c {
+ #clock-cells = <1>;
+ compatible = "allwinner,sun4i-a10-gates-clk";
+ reg = <0x01c2006c 0x4>;
+ clocks = <&apb1>;
+ clock-indices = <0>, <1>,
+ <2>, <17>,
+ <18>, <19>;
+ clock-output-names = "apb1_i2c0", "apb1_i2c1",
+ "apb1_i2c2", "apb1_uart1",
+ "apb1_uart2", "apb1_uart3";
+ };
+
+ nand_clk: clk@01c20080 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod0-clk";
+ reg = <0x01c20080 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "nand";
+ };
+
+ ms_clk: clk@01c20084 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod0-clk";
+ reg = <0x01c20084 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "ms";
+ };
+
+ mmc0_clk: clk@01c20088 {
+ #clock-cells = <1>;
+ compatible = "allwinner,sun4i-a10-mmc-clk";
+ reg = <0x01c20088 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "mmc0",
+ "mmc0_output",
+ "mmc0_sample";
+ };
+
+ mmc1_clk: clk@01c2008c {
+ #clock-cells = <1>;
+ compatible = "allwinner,sun4i-a10-mmc-clk";
+ reg = <0x01c2008c 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "mmc1",
+ "mmc1_output",
+ "mmc1_sample";
+ };
+
+ mmc2_clk: clk@01c20090 {
+ #clock-cells = <1>;
+ compatible = "allwinner,sun4i-a10-mmc-clk";
+ reg = <0x01c20090 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "mmc2",
+ "mmc2_output",
+ "mmc2_sample";
+ };
+
+ ts_clk: clk@01c20098 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod0-clk";
+ reg = <0x01c20098 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "ts";
+ };
+
+ ss_clk: clk@01c2009c {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod0-clk";
+ reg = <0x01c2009c 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "ss";
+ };
+
+ spi0_clk: clk@01c200a0 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod0-clk";
+ reg = <0x01c200a0 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "spi0";
+ };
+
+ spi1_clk: clk@01c200a4 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod0-clk";
+ reg = <0x01c200a4 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "spi1";
+ };
+
+ spi2_clk: clk@01c200a8 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod0-clk";
+ reg = <0x01c200a8 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "spi2";
+ };
+
+ ir0_clk: clk@01c200b0 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod0-clk";
+ reg = <0x01c200b0 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "ir0";
+ };
+
+ i2s0_clk: clk@01c200b8 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod1-clk";
+ reg = <0x01c200b8 0x4>;
+ clocks = <&pll2 SUN4I_A10_PLL2_8X>,
+ <&pll2 SUN4I_A10_PLL2_4X>,
+ <&pll2 SUN4I_A10_PLL2_2X>,
+ <&pll2 SUN4I_A10_PLL2_1X>;
+ clock-output-names = "i2s0";
+ };
+
+ spdif_clk: clk@01c200c0 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod1-clk";
+ reg = <0x01c200c0 0x4>;
+ clocks = <&pll2 SUN4I_A10_PLL2_8X>,
+ <&pll2 SUN4I_A10_PLL2_4X>,
+ <&pll2 SUN4I_A10_PLL2_2X>,
+ <&pll2 SUN4I_A10_PLL2_1X>;
+ clock-output-names = "spdif";
+ };
+
+ usb_clk: clk@01c200cc {
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ compatible = "allwinner,sun5i-a13-usb-clk";
+ reg = <0x01c200cc 0x4>;
+ clocks = <&pll6 1>;
+ clock-output-names = "usb_ohci0", "usb_phy";
+ };
+
+ dram_gates: clk@01c20100 {
+ #clock-cells = <1>;
+ compatible = "nextthing,gr8-dram-gates-clk",
+ "allwinner,sun4i-a10-gates-clk";
+ reg = <0x01c20100 0x4>;
+ clocks = <&pll5 0>;
+ clock-indices = <0>,
+ <1>,
+ <25>,
+ <26>,
+ <29>,
+ <31>;
+ clock-output-names = "dram_ve",
+ "dram_csi",
+ "dram_de_fe",
+ "dram_de_be",
+ "dram_ace",
+ "dram_iep";
+ };
+
+ de_be_clk: clk@01c20104 {
+ #clock-cells = <0>;
+ #reset-cells = <0>;
+ compatible = "allwinner,sun4i-a10-display-clk";
+ reg = <0x01c20104 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll5 1>;
+ clock-output-names = "de-be";
+ };
+
+ de_fe_clk: clk@01c2010c {
+ #clock-cells = <0>;
+ #reset-cells = <0>;
+ compatible = "allwinner,sun4i-a10-display-clk";
+ reg = <0x01c2010c 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll5 1>;
+ clock-output-names = "de-fe";
+ };
+
+ tcon_ch0_clk: clk@01c20118 {
+ #clock-cells = <0>;
+ #reset-cells = <1>;
+ compatible = "allwinner,sun4i-a10-tcon-ch0-clk";
+ reg = <0x01c20118 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+ clock-output-names = "tcon-ch0-sclk";
+ };
+
+ tcon_ch1_clk: clk@01c2012c {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-tcon-ch1-clk";
+ reg = <0x01c2012c 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+ clock-output-names = "tcon-ch1-sclk";
+ };
+
+ codec_clk: clk@01c20140 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-codec-clk";
+ reg = <0x01c20140 0x4>;
+ clocks = <&pll2 SUN4I_A10_PLL2_1X>;
+ clock-output-names = "codec";
+ };
+
+ mbus_clk: clk@01c2015c {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun5i-a13-mbus-clk";
+ reg = <0x01c2015c 0x4>;
+ clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
+ clock-output-names = "mbus";
+ };
+ };
+
+ display-engine {
+ compatible = "allwinner,sun5i-a13-display-engine";
+ allwinner,pipelines = <&fe0>;
+ };
+
+ soc@01c00000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ sram-controller@01c00000 {
+ compatible = "allwinner,sun4i-a10-sram-controller";
+ reg = <0x01c00000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ sram_a: sram@00000000 {
+ compatible = "mmio-sram";
+ reg = <0x00000000 0xc000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x00000000 0xc000>;
+ };
+
+ sram_d: sram@00010000 {
+ compatible = "mmio-sram";
+ reg = <0x00010000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x00010000 0x1000>;
+
+ otg_sram: sram-section@0000 {
+ compatible = "allwinner,sun4i-a10-sram-d";
+ reg = <0x0000 0x1000>;
+ status = "disabled";
+ };
+ };
+ };
+
+ dma: dma-controller@01c02000 {
+ compatible = "allwinner,sun4i-a10-dma";
+ reg = <0x01c02000 0x1000>;
+ interrupts = <27>;
+ clocks = <&ahb_gates 6>;
+ #dma-cells = <2>;
+ };
+
+ nfc: nand@01c03000 {
+ compatible = "allwinner,sun4i-a10-nand";
+ reg = <0x01c03000 0x1000>;
+ interrupts = <37>;
+ clocks = <&ahb_gates 13>, <&nand_clk>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma SUN4I_DMA_DEDICATED 3>;
+ dma-names = "rxtx";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi0: spi@01c05000 {
+ compatible = "allwinner,sun4i-a10-spi";
+ reg = <0x01c05000 0x1000>;
+ interrupts = <10>;
+ clocks = <&ahb_gates 20>, <&spi0_clk>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma SUN4I_DMA_DEDICATED 27>,
+ <&dma SUN4I_DMA_DEDICATED 26>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi1: spi@01c06000 {
+ compatible = "allwinner,sun4i-a10-spi";
+ reg = <0x01c06000 0x1000>;
+ interrupts = <11>;
+ clocks = <&ahb_gates 21>, <&spi1_clk>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma SUN4I_DMA_DEDICATED 9>,
+ <&dma SUN4I_DMA_DEDICATED 8>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ tve0: tv-encoder@01c0a000 {
+ compatible = "allwinner,sun4i-a10-tv-encoder";
+ reg = <0x01c0a000 0x1000>;
+ clocks = <&ahb_gates 34>;
+ resets = <&tcon_ch0_clk 0>;
+ status = "disabled";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tve0_in_tcon0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&tcon0_out_tve0>;
+ };
+ };
+ };
+
+ tcon0: lcd-controller@01c0c000 {
+ compatible = "allwinner,sun5i-a13-tcon";
+ reg = <0x01c0c000 0x1000>;
+ interrupts = <44>;
+ resets = <&tcon_ch0_clk 1>;
+ reset-names = "lcd";
+ clocks = <&ahb_gates 36>,
+ <&tcon_ch0_clk>,
+ <&tcon_ch1_clk>;
+ clock-names = "ahb",
+ "tcon-ch0",
+ "tcon-ch1";
+ clock-output-names = "tcon-pixel-clock";
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tcon0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ tcon0_in_be0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&be0_out_tcon0>;
+ };
+ };
+
+ tcon0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ tcon0_out_tve0: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&tve0_in_tcon0>;
+ };
+ };
+ };
+ };
+
+ mmc0: mmc@01c0f000 {
+ compatible = "allwinner,sun5i-a13-mmc";
+ reg = <0x01c0f000 0x1000>;
+ clocks = <&ahb_gates 8>,
+ <&mmc0_clk 0>,
+ <&mmc0_clk 1>,
+ <&mmc0_clk 2>;
+ clock-names = "ahb",
+ "mmc",
+ "output",
+ "sample";
+ interrupts = <32>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mmc1: mmc@01c10000 {
+ compatible = "allwinner,sun5i-a13-mmc";
+ reg = <0x01c10000 0x1000>;
+ clocks = <&ahb_gates 9>,
+ <&mmc1_clk 0>,
+ <&mmc1_clk 1>,
+ <&mmc1_clk 2>;
+ clock-names = "ahb",
+ "mmc",
+ "output",
+ "sample";
+ interrupts = <33>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mmc2: mmc@01c11000 {
+ compatible = "allwinner,sun5i-a13-mmc";
+ reg = <0x01c11000 0x1000>;
+ clocks = <&ahb_gates 10>,
+ <&mmc2_clk 0>,
+ <&mmc2_clk 1>,
+ <&mmc2_clk 2>;
+ clock-names = "ahb",
+ "mmc",
+ "output",
+ "sample";
+ interrupts = <34>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ usb_otg: usb@01c13000 {
+ compatible = "allwinner,sun4i-a10-musb";
+ reg = <0x01c13000 0x0400>;
+ clocks = <&ahb_gates 0>;
+ interrupts = <38>;
+ interrupt-names = "mc";
+ phys = <&usbphy 0>;
+ phy-names = "usb";
+ extcon = <&usbphy 0>;
+ allwinner,sram = <&otg_sram 1>;
+ status = "disabled";
+
+ dr_mode = "otg";
+ };
+
+ usbphy: phy@01c13400 {
+ #phy-cells = <1>;
+ compatible = "allwinner,sun5i-a13-usb-phy";
+ reg = <0x01c13400 0x10 0x01c14800 0x4>;
+ reg-names = "phy_ctrl", "pmu1";
+ clocks = <&usb_clk 8>;
+ clock-names = "usb_phy";
+ resets = <&usb_clk 0>, <&usb_clk 1>;
+ reset-names = "usb0_reset", "usb1_reset";
+ status = "disabled";
+ };
+
+ ehci0: usb@01c14000 {
+ compatible = "allwinner,sun5i-a13-ehci", "generic-ehci";
+ reg = <0x01c14000 0x100>;
+ interrupts = <39>;
+ clocks = <&ahb_gates 1>;
+ phys = <&usbphy 1>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ ohci0: usb@01c14400 {
+ compatible = "allwinner,sun5i-a13-ohci", "generic-ohci";
+ reg = <0x01c14400 0x100>;
+ interrupts = <40>;
+ clocks = <&usb_clk 6>, <&ahb_gates 2>;
+ phys = <&usbphy 1>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ spi2: spi@01c17000 {
+ compatible = "allwinner,sun4i-a10-spi";
+ reg = <0x01c17000 0x1000>;
+ interrupts = <12>;
+ clocks = <&ahb_gates 22>, <&spi2_clk>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma SUN4I_DMA_DEDICATED 29>,
+ <&dma SUN4I_DMA_DEDICATED 28>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ intc: interrupt-controller@01c20400 {
+ compatible = "allwinner,sun4i-a10-ic";
+ reg = <0x01c20400 0x400>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ pio: pinctrl@01c20800 {
+ compatible = "nextthing,gr8-pinctrl";
+ reg = <0x01c20800 0x400>;
+ interrupts = <28>;
+ clocks = <&apb0_gates 5>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ #gpio-cells = <3>;
+
+ i2c0_pins_a: i2c0@0 {
+ allwinner,pins = "PB0", "PB1";
+ allwinner,function = "i2c0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ i2c1_pins_a: i2c1@0 {
+ allwinner,pins = "PB15", "PB16";
+ allwinner,function = "i2c1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ i2c2_pins_a: i2c2@0 {
+ allwinner,pins = "PB17", "PB18";
+ allwinner,function = "i2c2";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ i2s0_data_pins_a: i2s0-data@0 {
+ allwinner,pins = "PB6", "PB7", "PB8", "PB9";
+ allwinner,function = "i2s0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ i2s0_mclk_pins_a: i2s0-mclk@0 {
+ allwinner,pins = "PB6", "PB7", "PB8", "PB9";
+ allwinner,function = "i2s0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ ir0_rx_pins_a: ir0@0 {
+ allwinner,pins = "PB4";
+ allwinner,function = "ir0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ lcd_rgb666_pins: lcd-rgb666@0 {
+ allwinner,pins = "PD2", "PD3", "PD4", "PD5", "PD6", "PD7",
+ "PD10", "PD11", "PD12", "PD13", "PD14", "PD15",
+ "PD18", "PD19", "PD20", "PD21", "PD22", "PD23",
+ "PD24", "PD25", "PD26", "PD27";
+ allwinner,function = "lcd0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ mmc0_pins_a: mmc0@0 {
+ allwinner,pins = "PF0", "PF1", "PF2", "PF3",
+ "PF4", "PF5";
+ allwinner,function = "mmc0";
+ allwinner,drive = <SUN4I_PINCTRL_30_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ nand_pins_a: nand-base0@0 {
+ allwinner,pins = "PC0", "PC1", "PC2",
+ "PC5", "PC8", "PC9", "PC10",
+ "PC11", "PC12", "PC13", "PC14",
+ "PC15";
+ allwinner,function = "nand0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ nand_cs0_pins_a: nand-cs@0 {
+ allwinner,pins = "PC4";
+ allwinner,function = "nand0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ nand_rb0_pins_a: nand-rb@0 {
+ allwinner,pins = "PC6";
+ allwinner,function = "nand0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ pwm0_pins_a: pwm0@0 {
+ allwinner,pins = "PB2";
+ allwinner,function = "pwm0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ spdif_tx_pins_a: spdif@0 {
+ allwinner,pins = "PB10";
+ allwinner,function = "spdif";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+
+ uart1_pins_a: uart1@1 {
+ allwinner,pins = "PG3", "PG4";
+ allwinner,function = "uart1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ uart1_cts_rts_pins_a: uart1-cts-rts@0 {
+ allwinner,pins = "PG5", "PG6";
+ allwinner,function = "uart1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+ };
+
+ pwm: pwm@01c20e00 {
+ compatible = "allwinner,sun5i-a10s-pwm";
+ reg = <0x01c20e00 0xc>;
+ clocks = <&osc24M>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ timer@01c20c00 {
+ compatible = "allwinner,sun4i-a10-timer";
+ reg = <0x01c20c00 0x90>;
+ interrupts = <22>;
+ clocks = <&osc24M>;
+ };
+
+ wdt: watchdog@01c20c90 {
+ compatible = "allwinner,sun4i-a10-wdt";
+ reg = <0x01c20c90 0x10>;
+ };
+
+ spdif: spdif@01c21000 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun4i-a10-spdif";
+ reg = <0x01c21000 0x400>;
+ interrupts = <13>;
+ clocks = <&apb0_gates 1>, <&spdif_clk>;
+ clock-names = "apb", "spdif";
+ dmas = <&dma SUN4I_DMA_NORMAL 2>,
+ <&dma SUN4I_DMA_NORMAL 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ ir0: ir@01c21800 {
+ compatible = "allwinner,sun4i-a10-ir";
+ clocks = <&apb0_gates 6>, <&ir0_clk>;
+ clock-names = "apb", "ir";
+ interrupts = <5>;
+ reg = <0x01c21800 0x40>;
+ status = "disabled";
+ };
+
+ i2s0: i2s@01c22400 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun4i-a10-i2s";
+ reg = <0x01c22400 0x400>;
+ interrupts = <16>;
+ clocks = <&apb0_gates 3>, <&i2s0_clk>;
+ clock-names = "apb", "mod";
+ dmas = <&dma SUN4I_DMA_NORMAL 3>,
+ <&dma SUN4I_DMA_NORMAL 3>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ lradc: lradc@01c22800 {
+ compatible = "allwinner,sun4i-a10-lradc-keys";
+ reg = <0x01c22800 0x100>;
+ interrupts = <31>;
+ status = "disabled";
+ };
+
+ codec: codec@01c22c00 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun4i-a10-codec";
+ reg = <0x01c22c00 0x40>;
+ interrupts = <30>;
+ clocks = <&apb0_gates 0>, <&codec_clk>;
+ clock-names = "apb", "codec";
+ dmas = <&dma SUN4I_DMA_NORMAL 19>,
+ <&dma SUN4I_DMA_NORMAL 19>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ rtp: rtp@01c25000 {
+ compatible = "allwinner,sun5i-a13-ts";
+ reg = <0x01c25000 0x100>;
+ interrupts = <29>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ uart1: serial@01c28400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28400 0x400>;
+ interrupts = <2>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb1_gates 17>;
+ status = "disabled";
+ };
+
+ uart2: serial@01c28800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28800 0x400>;
+ interrupts = <3>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&apb1_gates 18>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@01c2ac00 {
+ compatible = "allwinner,sun4i-a10-i2c";
+ reg = <0x01c2ac00 0x400>;
+ interrupts = <7>;
+ clocks = <&apb1_gates 0>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c1: i2c@01c2b000 {
+ compatible = "allwinner,sun4i-a10-i2c";
+ reg = <0x01c2b000 0x400>;
+ interrupts = <8>;
+ clocks = <&apb1_gates 1>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c2: i2c@01c2b400 {
+ compatible = "allwinner,sun4i-a10-i2c";
+ reg = <0x01c2b400 0x400>;
+ interrupts = <9>;
+ clocks = <&apb1_gates 2>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ timer@01c60000 {
+ compatible = "allwinner,sun5i-a13-hstimer";
+ reg = <0x01c60000 0x1000>;
+ interrupts = <82>, <83>;
+ clocks = <&ahb_gates 28>;
+ };
+
+ fe0: display-frontend@01e00000 {
+ compatible = "allwinner,sun5i-a13-display-frontend";
+ reg = <0x01e00000 0x20000>;
+ interrupts = <47>;
+ clocks = <&ahb_gates 46>, <&de_fe_clk>,
+ <&dram_gates 25>;
+ clock-names = "ahb", "mod",
+ "ram";
+ resets = <&de_fe_clk>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fe0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ fe0_out_be0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&be0_in_fe0>;
+ };
+ };
+ };
+ };
+
+ be0: display-backend@01e60000 {
+ compatible = "allwinner,sun5i-a13-display-backend";
+ reg = <0x01e60000 0x10000>;
+ clocks = <&ahb_gates 44>, <&de_be_clk>,
+ <&dram_gates 26>;
+ clock-names = "ahb", "mod",
+ "ram";
+ resets = <&de_be_clk>;
+ status = "disabled";
+
+ assigned-clocks = <&de_be_clk>;
+ assigned-clock-rates = <300000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ be0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ be0_in_fe0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&fe0_out_be0>;
+ };
+ };
+
+ be0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ be0_out_tcon0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&tcon0_in_be0>;
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index 578fa2a54dce..4f793a025a72 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -12,11 +12,11 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/omap.h>
-#include "skeleton.dtsi"
-
/ {
compatible = "ti,omap2430", "ti,omap2420", "ti,omap2";
interrupt-parent = <&intc>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
serial0 = &uart1;
diff --git a/arch/arm/boot/dts/omap2420-h4.dts b/arch/arm/boot/dts/omap2420-h4.dts
index 34cdecb4fdda..9265c0b9c3f3 100644
--- a/arch/arm/boot/dts/omap2420-h4.dts
+++ b/arch/arm/boot/dts/omap2420-h4.dts
@@ -13,7 +13,7 @@
model = "TI OMAP2420 H4 board";
compatible = "ti,omap2420-h4", "ti,omap2420", "ti,omap2";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x4000000>; /* 64 MB */
};
diff --git a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
index db95aadcca70..7e5ffc583c90 100644
--- a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
+++ b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
@@ -1,7 +1,7 @@
#include "omap2420.dtsi"
/ {
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x8000000>; /* 128 MB */
};
diff --git a/arch/arm/boot/dts/omap2430-sdp.dts b/arch/arm/boot/dts/omap2430-sdp.dts
index 6b36ede58488..4f7d9d7c00c7 100644
--- a/arch/arm/boot/dts/omap2430-sdp.dts
+++ b/arch/arm/boot/dts/omap2430-sdp.dts
@@ -13,7 +13,7 @@
model = "TI OMAP2430 SDP";
compatible = "ti,omap2430-sdp", "ti,omap2430", "ti,omap2";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x8000000>; /* 128 MB */
};
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 8ffde06281ad..85e297ed0ea1 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -19,7 +19,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index a19d907d4850..4be85ce59dd1 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -19,7 +19,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
diff --git a/arch/arm/boot/dts/omap3-cm-t3x.dtsi b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
index 6a0df13fa0f3..57b9a028a49a 100644
--- a/arch/arm/boot/dts/omap3-cm-t3x.dtsi
+++ b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
@@ -4,7 +4,7 @@
/ {
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
diff --git a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
index 586010179752..f330c69cc683 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
+++ b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
@@ -10,7 +10,7 @@
#include "omap34xx.dtsi"
/ {
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
diff --git a/arch/arm/boot/dts/omap3-evm-37xx.dts b/arch/arm/boot/dts/omap3-evm-37xx.dts
index ed449827c3d3..4f9a76544602 100644
--- a/arch/arm/boot/dts/omap3-evm-37xx.dts
+++ b/arch/arm/boot/dts/omap3-evm-37xx.dts
@@ -15,7 +15,7 @@
model = "TI OMAP37XX EVM (TMDSEVM3730)";
compatible = "ti,omap3-evm-37xx", "ti,omap3630", "ti,omap3";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
diff --git a/arch/arm/boot/dts/omap3-evm.dts b/arch/arm/boot/dts/omap3-evm.dts
index e10dcd0fa539..99b2bfcd1059 100644
--- a/arch/arm/boot/dts/omap3-evm.dts
+++ b/arch/arm/boot/dts/omap3-evm.dts
@@ -14,7 +14,7 @@
model = "TI OMAP35XX EVM (TMDSEVM3530)";
compatible = "ti,omap3-evm", "ti,omap3";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index c09a0574af90..b3a8b1f24499 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -21,7 +21,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
@@ -102,7 +102,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm11 0 2000000 0>;
+ pwms = <&pwm11 0 12000000 0>;
pwm-names = "backlight";
brightness-levels = <0 11 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <9>; /* => 90 */
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index f4f2ce46d681..54c4c07bbe4a 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -13,7 +13,7 @@
#include "omap36xx.dtsi"
/ {
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
diff --git a/arch/arm/boot/dts/omap3-ldp.dts b/arch/arm/boot/dts/omap3-ldp.dts
index 2f353dadfa40..e28fe13cb007 100644
--- a/arch/arm/boot/dts/omap3-ldp.dts
+++ b/arch/arm/boot/dts/omap3-ldp.dts
@@ -15,7 +15,7 @@
model = "TI OMAP3430 LDP (Zoom1 Labrador)";
compatible = "ti,omap3-ldp", "ti,omap3";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x8000000>; /* 128 MB */
};
diff --git a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
index eff816e0bc0a..fa611a5e4850 100644
--- a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
+++ b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
@@ -17,7 +17,7 @@
bootargs = "console=ttyO0,115200n8 vt.global_cursor_default=0 consoleblank=0";
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x8000000>; /* 128 MB */
};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 2a6078a8422c..87ca50b53002 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -54,7 +54,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 927b17fc4ed8..5d8c4b4a4205 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -24,12 +24,12 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1 GB */
};
- vemmc: fixedregulator@0 {
+ vemmc: fixedregulator0 {
compatible = "regulator-fixed";
regulator-name = "VEMMC";
regulator-min-microvolt = <2900000>;
@@ -39,7 +39,7 @@
enable-active-high;
};
- vwlan_fixed: fixedregulator@2 {
+ vwlan_fixed: fixedregulator2 {
compatible = "regulator-fixed";
regulator-name = "VWLAN";
gpio = <&gpio2 3 GPIO_ACTIVE_HIGH>; /* gpio 35 */
diff --git a/arch/arm/boot/dts/omap3-overo-alto35-common.dtsi b/arch/arm/boot/dts/omap3-overo-alto35-common.dtsi
index 3b3a75997f81..99a7eee6e61f 100644
--- a/arch/arm/boot/dts/omap3-overo-alto35-common.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-alto35-common.dtsi
@@ -44,7 +44,7 @@
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&button_pins>;
- button0@10 {
+ button0 {
label = "button0";
linux,code = <BTN_0>;
gpios = <&gpio1 10 GPIO_ACTIVE_LOW>; /* gpio_10 */
diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi
index 3e946cac55f3..401fae838fe9 100644
--- a/arch/arm/boot/dts/omap3-overo-base.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-base.dtsi
@@ -11,6 +11,12 @@
*/
/ {
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0 0>;
+ };
+
pwmleds {
compatible = "pwm-leds";
diff --git a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
index 4f4c6efbd518..56dbd113430e 100644
--- a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
@@ -37,13 +37,13 @@
pinctrl-0 = <&button_pins>;
#address-cells = <1>;
#size-cells = <0>;
- button0@23 {
+ button0 {
label = "button0";
linux,code = <BTN_0>;
gpios = <&gpio1 23 GPIO_ACTIVE_LOW>; /* gpio_23 */
wakeup-source;
};
- button1@14 {
+ button1 {
label = "button1";
linux,code = <BTN_1>;
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; /* gpio_14 */
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
index ca86da68220c..854117dc0b77 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
@@ -119,7 +119,7 @@
pinctrl-names = "default";
pinctrl-0 = <&mcspi1_pins>;
- lcd0: display {
+ lcd0: display@1 {
compatible = "lgphilips,lb035q02";
label = "lcd35";
diff --git a/arch/arm/boot/dts/omap3-overo-gallop43-common.dtsi b/arch/arm/boot/dts/omap3-overo-gallop43-common.dtsi
index 250cc7fe5d5e..286f5baddf07 100644
--- a/arch/arm/boot/dts/omap3-overo-gallop43-common.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-gallop43-common.dtsi
@@ -37,13 +37,13 @@
pinctrl-0 = <&button_pins>;
#address-cells = <1>;
#size-cells = <0>;
- button0@23 {
+ button0 {
label = "button0";
linux,code = <BTN_0>;
gpios = <&gpio1 23 GPIO_ACTIVE_LOW>; /* gpio_23 */
wakeup-source;
};
- button1@14 {
+ button1 {
label = "button1";
linux,code = <BTN_1>;
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; /* gpio_14 */
diff --git a/arch/arm/boot/dts/omap3-overo-palo35-common.dtsi b/arch/arm/boot/dts/omap3-overo-palo35-common.dtsi
index 8df7ec35d17d..a8020fb42464 100644
--- a/arch/arm/boot/dts/omap3-overo-palo35-common.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-palo35-common.dtsi
@@ -37,13 +37,13 @@
pinctrl-0 = <&button_pins>;
#address-cells = <1>;
#size-cells = <0>;
- button0@23 {
+ button0 {
label = "button0";
linux,code = <BTN_0>;
gpios = <&gpio1 23 GPIO_ACTIVE_LOW>; /* gpio_23 */
wakeup-source;
};
- button1@14 {
+ button1 {
label = "button1";
linux,code = <BTN_1>;
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; /* gpio_14 */
diff --git a/arch/arm/boot/dts/omap3-overo-palo43-common.dtsi b/arch/arm/boot/dts/omap3-overo-palo43-common.dtsi
index 0ea2c451c809..11965737e2c9 100644
--- a/arch/arm/boot/dts/omap3-overo-palo43-common.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-palo43-common.dtsi
@@ -37,13 +37,13 @@
pinctrl-0 = <&button_pins>;
#address-cells = <1>;
#size-cells = <0>;
- button0@23 {
+ button0 {
label = "button0";
linux,code = <BTN_0>;
gpios = <&gpio1 23 GPIO_ACTIVE_LOW>; /* gpio_23 */
wakeup-source;
};
- button1@14 {
+ button1 {
label = "button1";
linux,code = <BTN_1>;
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; /* gpio_14 */
diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi
index dbc4dc721cc2..53e007abdc71 100644
--- a/arch/arm/boot/dts/omap3-pandora-common.dtsi
+++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi
@@ -18,7 +18,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
@@ -45,28 +45,28 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins>;
- led@1 {
+ led1 {
label = "pandora::sd1";
gpios = <&gpio5 0 GPIO_ACTIVE_HIGH>; /* GPIO_128 */
linux,default-trigger = "mmc0";
default-state = "off";
};
- led@2 {
+ led2 {
label = "pandora::sd2";
gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>; /* GPIO_129 */
linux,default-trigger = "mmc1";
default-state = "off";
};
- led@3 {
+ led3 {
label = "pandora::bluetooth";
gpios = <&gpio5 30 GPIO_ACTIVE_HIGH>; /* GPIO_158 */
linux,default-trigger = "heartbeat";
default-state = "off";
};
- led@4 {
+ led4 {
label = "pandora::wifi";
gpios = <&gpio5 31 GPIO_ACTIVE_HIGH>; /* GPIO_159 */
linux,default-trigger = "mmc2";
diff --git a/arch/arm/boot/dts/omap3-sniper.dts b/arch/arm/boot/dts/omap3-sniper.dts
index 78a1184cb312..bc4498e77bc9 100644
--- a/arch/arm/boot/dts/omap3-sniper.dts
+++ b/arch/arm/boot/dts/omap3-sniper.dts
@@ -20,7 +20,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
diff --git a/arch/arm/boot/dts/omap3-tao3530.dtsi b/arch/arm/boot/dts/omap3-tao3530.dtsi
index 644d3c8ea66a..dc80886b5329 100644
--- a/arch/arm/boot/dts/omap3-tao3530.dtsi
+++ b/arch/arm/boot/dts/omap3-tao3530.dtsi
@@ -26,7 +26,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts
index c29b41dc7b95..45e2ce0803de 100644
--- a/arch/arm/boot/dts/omap3-zoom3.dts
+++ b/arch/arm/boot/dts/omap3-zoom3.dts
@@ -20,7 +20,7 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 4c3c471d2a83..353d818ce5a6 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -12,11 +12,11 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/omap.h>
-#include "skeleton.dtsi"
-
/ {
compatible = "ti,omap3430", "ti,omap3";
interrupt-parent = <&intc>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
i2c0 = &i2c1;
@@ -78,7 +78,7 @@
* the moment, just use a fake OCP bus entry to represent the whole bus
* hierarchy.
*/
- ocp {
+ ocp@68000000 {
compatible = "ti,omap3-l3-smx", "simple-bus";
reg = <0x68000000 0x10000>;
interrupts = <9 10>;
diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts
index a0dc8d854142..abd6921143be 100644
--- a/arch/arm/boot/dts/omap3430-sdp.dts
+++ b/arch/arm/boot/dts/omap3430-sdp.dts
@@ -13,7 +13,7 @@
model = "TI OMAP3430 SDP";
compatible = "ti,omap3430-sdp", "ti,omap3";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x10000000>; /* 256 MB */
};
diff --git a/arch/arm/boot/dts/omap34xx.dtsi b/arch/arm/boot/dts/omap34xx.dtsi
index e44656258225..e41c52d3b113 100644
--- a/arch/arm/boot/dts/omap34xx.dtsi
+++ b/arch/arm/boot/dts/omap34xx.dtsi
@@ -28,7 +28,7 @@
};
};
- ocp {
+ ocp@68000000 {
omap3_pmx_core2: pinmux@480025d8 {
compatible = "ti,omap3-padconf", "pinctrl-single";
reg = <0x480025d8 0x24>;
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index 8b7979153008..718fa88407cd 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -30,7 +30,7 @@
};
};
- ocp {
+ ocp@68000000 {
uart4: serial@49042000 {
compatible = "ti,omap3-uart";
reg = <0x49042000 0x400>;
diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts
index 6b39808b8313..1b825128a7b9 100644
--- a/arch/arm/boot/dts/omap4-duovero-parlor.dts
+++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts
@@ -32,7 +32,7 @@
compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- button0@121 {
+ button0 {
label = "button0";
linux,code = <BTN_0>;
gpios = <&gpio4 25 GPIO_ACTIVE_LOW>; /* gpio_121 */
diff --git a/arch/arm/boot/dts/omap4-duovero.dtsi b/arch/arm/boot/dts/omap4-duovero.dtsi
index a90b582e4c3f..ec0bd9779e1a 100644
--- a/arch/arm/boot/dts/omap4-duovero.dtsi
+++ b/arch/arm/boot/dts/omap4-duovero.dtsi
@@ -12,7 +12,7 @@
model = "Gumstix Duovero";
compatible = "gumstix,omap4-duovero", "ti,omap4430", "ti,omap4";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1 GB */
};
diff --git a/arch/arm/boot/dts/omap4-kc1.dts b/arch/arm/boot/dts/omap4-kc1.dts
index 2251bd54e4e6..e3763ac75719 100644
--- a/arch/arm/boot/dts/omap4-kc1.dts
+++ b/arch/arm/boot/dts/omap4-kc1.dts
@@ -13,7 +13,7 @@
model = "Amazon Kindle Fire (first generation)";
compatible = "amazon,omap4-kc1", "ti,omap4430", "ti,omap4";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index f8f13952cfeb..1673689e6705 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -8,7 +8,7 @@
#include "elpida_ecb240abacn.dtsi"
/ {
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1 GB */
};
@@ -446,6 +446,8 @@
pinctrl-names = "default";
pinctrl-0 = <&wl12xx_pins>;
vmmc-supply = <&wl12xx_vmmc>;
+ interrupts-extended = <&wakeupgen GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH
+ &omap4_pmx_core 0x10e>;
non-removable;
bus-width = <4>;
cap-power-off-card;
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 10d73a784050..d728ec963111 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -14,7 +14,7 @@
model = "TI OMAP4 SDP board";
compatible = "ti,omap4-sdp", "ti,omap4430", "ti,omap4";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1 GB */
};
diff --git a/arch/arm/boot/dts/omap4-var-som-om44.dtsi b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
index 873cfc87260c..758b6eb7ae43 100644
--- a/arch/arm/boot/dts/omap4-var-som-om44.dtsi
+++ b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
@@ -12,7 +12,7 @@
model = "Variscite VAR-SOM-OM44";
compatible = "variscite,var-som-om44", "ti,omap4460", "ti,omap4";
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1 GB */
};
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 3fdc51cd0fad..0ced079b7ae3 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -10,11 +10,11 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/pinctrl/omap.h>
-#include "skeleton.dtsi"
-
/ {
compatible = "ti,omap4430", "ti,omap4";
interrupt-parent = <&wakeupgen>;
+ #address-cells = <1>;
+ #size-cells = <1>;
aliases {
i2c0 = &i2c1;
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 5196113202a2..6365635fea5c 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -77,16 +77,6 @@
reset-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>; /* gpio3_79 ETH_NRESET */
};
- leds {
- compatible = "gpio-leds";
- led@1 {
- label = "omap5:blue:usr1";
- gpios = <&gpio5 25 GPIO_ACTIVE_HIGH>; /* gpio5_153 D1 LED */
- linux,default-trigger = "heartbeat";
- default-state = "off";
- };
- };
-
tpd12s015: encoder {
compatible = "ti,tpd12s015";
@@ -332,7 +322,7 @@
wlcore_irq_pin: pinmux_wlcore_irq_pin {
pinctrl-single,pins = <
- OMAP5_IOPAD(0x40, PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
+ OMAP5_IOPAD(0x40, PIN_INPUT | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
>;
};
};
@@ -355,15 +345,17 @@
non-removable;
cap-power-off-card;
pinctrl-names = "default";
- pinctrl-0 = <&mmc3_pins &wlcore_irq_pin>;
- interrupts-extended = <&gic GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH
- &omap5_pmx_core 0x168>;
+ pinctrl-0 = <&mmc3_pins>;
+ interrupts-extended = <&wakeupgen GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH
+ &omap5_pmx_core 0x16a>;
#address-cells = <1>;
#size-cells = <0>;
wlcore: wlcore@2 {
compatible = "ti,wl1271";
reg = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlcore_irq_pin>;
interrupt-parent = <&gpio1>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>; /* gpio 14 */
ref-clock-frequency = <26000000>;
@@ -391,14 +383,23 @@
interrupt-controller;
#interrupt-cells = <2>;
ti,system-power-controller;
+ ti,mux-pad1 = <0xa1>;
+ ti,mux-pad2 = <0x1b>;
pinctrl-names = "default";
pinctrl-0 = <&palmas_sys_nirq_pins &palmas_msecure_pins>;
+ palmas_gpio: gpio {
+ compatible = "ti,palmas-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
extcon_usb3: palmas_usb {
compatible = "ti,palmas-usb-vid";
ti,enable-vbus-detection;
ti,enable-id-detection;
ti,wakeup;
+ id-gpios = <&palmas_gpio 0 GPIO_ACTIVE_HIGH>;
};
clk32kgaudio: palmas_clk32k@1 {
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index a9765605d53b..b153f604932a 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -11,9 +11,9 @@
model = "CompuLab CM-T54";
compatible = "compulab,omap5-cm-t54", "ti,omap5";
- memory {
+ memory@80000000 {
device_type = "memory";
- reg = <0x80000000 0x7F000000>; /* 2048 MB */
+ reg = <0 0x80000000 0 0x7f000000>; /* 2048 MB */
};
aliases {
@@ -72,7 +72,7 @@
leds {
compatible = "gpio-leds";
- led@1 {
+ led1 {
label = "Heartbeat";
gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>; /* gpio3_80 ACT_LED */
linux,default-trigger = "heartbeat";
diff --git a/arch/arm/boot/dts/omap5-igep0050.dts b/arch/arm/boot/dts/omap5-igep0050.dts
index f75ce02fb398..8fc19218057e 100644
--- a/arch/arm/boot/dts/omap5-igep0050.dts
+++ b/arch/arm/boot/dts/omap5-igep0050.dts
@@ -7,15 +7,47 @@
*/
/dts-v1/;
+#include <dt-bindings/input/input.h>
#include "omap5-board-common.dtsi"
/ {
model = "IGEPv5";
compatible = "isee,omap5-igep0050", "ti,omap5";
- memory {
+ memory@80000000 {
device_type = "memory";
- reg = <0x80000000 0x7f000000>; /* 2032 MB */
+ reg = <0x0 0x80000000 0 0x7f000000>; /* 2032 MB */
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&power_button_pin>;
+ pinctrl-names = "default";
+
+ power-button {
+ label = "Power Button";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led@1 {
+ label = "board:green:usr0";
+ gpios = <&tca6416 1 0>;
+ default-state = "off";
+ };
+ led@2 {
+ label = "board:red:usr1";
+ gpios = <&tca6416 2 0>;
+ default-state = "off";
+ };
+ led@3 {
+ label = "board:blue:usr1";
+ gpios = <&tca6416 3 0>;
+ default-state = "off";
+ };
};
};
@@ -58,6 +90,12 @@
OMAP5_IOPAD(0x0fa, PIN_INPUT | MUX_MODE0) /* i2c4_sda */
>;
};
+
+ power_button_pin: pinctrl_power_button_pin {
+ pinctrl-single,pins = <
+ OMAP5_IOPAD(0x086, PIN_INPUT | MUX_MODE6) /* gpio4_118 */
+ >;
+ };
};
&tpd12s015 {
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index a51e60518eb6..53d31a87b44b 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -13,9 +13,19 @@
model = "TI OMAP5 uEVM board";
compatible = "ti,omap5-uevm", "ti,omap5";
- memory {
+ memory@80000000 {
device_type = "memory";
- reg = <0x80000000 0x7F000000>; /* 2032 MB */
+ reg = <0 0x80000000 0 0x7f000000>; /* 2032 MB */
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led1 {
+ label = "omap5:blue:usr1";
+ gpios = <&gpio5 25 GPIO_ACTIVE_HIGH>; /* gpio5_153 D1 LED */
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
};
};
@@ -61,3 +71,7 @@
OMAP5_IOPAD(0x1be, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_141 */
>;
};
+
+&wlcore {
+ compatible = "ti,wl1837";
+};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 84c10195e79b..25262118ec3d 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -11,11 +11,9 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/pinctrl/omap.h>
-#include "skeleton.dtsi"
-
/ {
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
compatible = "ti,omap5";
interrupt-parent = <&wakeupgen>;
@@ -92,10 +90,10 @@
compatible = "arm,cortex-a15-gic";
interrupt-controller;
#interrupt-cells = <3>;
- reg = <0x48211000 0x1000>,
- <0x48212000 0x1000>,
- <0x48214000 0x2000>,
- <0x48216000 0x2000>;
+ reg = <0 0x48211000 0 0x1000>,
+ <0 0x48212000 0 0x1000>,
+ <0 0x48214000 0 0x2000>,
+ <0 0x48216000 0 0x2000>;
interrupt-parent = <&gic>;
};
@@ -103,7 +101,7 @@
compatible = "ti,omap5-wugen-mpu", "ti,omap4-wugen-mpu";
interrupt-controller;
#interrupt-cells = <3>;
- reg = <0x48281000 0x1000>;
+ reg = <0 0x48281000 0 0x1000>;
interrupt-parent = <&gic>;
};
@@ -131,11 +129,11 @@
compatible = "ti,omap5-l3-noc", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- ranges;
+ ranges = <0 0 0 0xc0000000>;
ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3";
- reg = <0x44000000 0x2000>,
- <0x44800000 0x3000>,
- <0x45000000 0x4000>;
+ reg = <0 0x44000000 0 0x2000>,
+ <0 0x44800000 0 0x3000>,
+ <0 0x45000000 0 0x4000>;
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
@@ -865,7 +863,7 @@
#size-cells = <1>;
utmi-mode = <2>;
ranges;
- dwc3@4a030000 {
+ dwc3: dwc3@4a030000 {
compatible = "snps,dwc3";
reg = <0x4a030000 0x10000>;
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/orion5x-mv88f5181.dtsi b/arch/arm/boot/dts/orion5x-mv88f5181.dtsi
new file mode 100644
index 000000000000..f667012b26ca
--- /dev/null
+++ b/arch/arm/boot/dts/orion5x-mv88f5181.dtsi
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2016 Jamie Lentin <jm@lentin.co.uk>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "orion5x.dtsi"
+
+/ {
+ compatible = "marvell,orion5x-88f5181", "marvell,orion5x";
+
+ soc {
+ compatible = "marvell,orion5x-88f5181-mbus", "simple-bus";
+
+ internal-regs {
+ pinctrl: pinctrl@10000 {
+ compatible = "marvell,88f5181-pinctrl";
+ reg = <0x10000 0x8>, <0x10050 0x4>;
+ };
+
+ core_clk: core-clocks@10030 {
+ compatible = "marvell,mv88f5181-core-clock";
+ reg = <0x10010 0x4>;
+ #clock-cells = <1>;
+ };
+
+ mbusc: mbus-controller@20000 {
+ compatible = "marvell,mbus-controller";
+ reg = <0x20000 0x100>, <0x1500 0x20>;
+ };
+ };
+ };
+};
+
+&pinctrl {
+ pmx_ge: pmx-ge {
+ marvell,pins = "mpp8", "mpp9", "mpp10", "mpp11",
+ "mpp12", "mpp13", "mpp14", "mpp15",
+ "mpp16", "mpp17", "mpp18", "mpp19";
+ marvell,function = "ge";
+ };
+};
+
+&eth {
+ pinctrl-0 = <&pmx_ge>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm/boot/dts/orion5x-netgear-wnr854t.dts b/arch/arm/boot/dts/orion5x-netgear-wnr854t.dts
new file mode 100644
index 000000000000..9f6ae4e1de06
--- /dev/null
+++ b/arch/arm/boot/dts/orion5x-netgear-wnr854t.dts
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2016 Jamie Lentin <jm@lentin.co.uk>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "orion5x-mv88f5181.dtsi"
+
+/ {
+ model = "Netgear WNR854-t";
+ compatible = "netgear,wnr854t", "marvell,orion5x-88f5181",
+ "marvell,orion5x";
+ aliases {
+ serial0 = &uart0;
+ };
+
+ memory {
+ reg = <0x00000000 0x2000000>; /* 32 MB */
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ soc {
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>,
+ <MBUS_ID(0x09, 0x00) 0 0xf2200000 0x800>,
+ <MBUS_ID(0x01, 0x0f) 0 0xf4000000 0x800000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&pmx_reset_button>;
+ pinctrl-names = "default";
+
+ reset {
+ label = "Reset Button";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&pmx_power_led &pmx_power_led_blink &pmx_wan_led>;
+ pinctrl-names = "default";
+
+ led@0 {
+ label = "wnr854t:green:power";
+ gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+ };
+
+ led@1 {
+ label = "wnr854t:blink:power";
+ gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+ };
+
+ led@2 {
+ label = "wnr854t:green:wan";
+ gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&devbus_bootcs {
+ status = "okay";
+
+ devbus,keep-config;
+
+ flash@0 {
+ compatible = "cfi-flash";
+ reg = <0 0x800000>;
+ bank-width = <2>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "kernel";
+ reg = <0x0 0x100000>;
+ };
+
+ partition@100000 {
+ label = "rootfs";
+ reg = <0x100000 0x660000>;
+ };
+
+ partition@760000 {
+ label = "uboot_env";
+ reg = <0x760000 0x20000>;
+ };
+
+ partition@780000 {
+ label = "uboot";
+ reg = <0x780000 0x80000>;
+ read-only;
+ };
+ };
+ };
+};
+
+&mdio {
+ status = "okay";
+
+ switch: switch@0 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ dsa,member = <0 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan3";
+ phy-handle = <&lan3phy>;
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan4";
+ phy-handle = <&lan4phy>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "wan";
+ phy-handle = <&wanphy>;
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "cpu";
+ ethernet = <&ethport>;
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "lan1";
+ phy-handle = <&lan1phy>;
+ };
+
+ port@7 {
+ reg = <7>;
+ label = "lan2";
+ phy-handle = <&lan2phy>;
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ lan3phy: ethernet-phy@0 {
+ /* Marvell 88E1121R (port 1) */
+ compatible = "ethernet-phy-id0141.0cb0",
+ "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ marvell,reg-init = <3 16 0 0x1777 3 17 0 0x15>;
+ };
+
+ lan4phy: ethernet-phy@1 {
+ /* Marvell 88E1121R (port 2) */
+ compatible = "ethernet-phy-id0141.0cb0",
+ "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ marvell,reg-init = <3 16 0 0x1777 3 17 0 0x15>;
+ };
+
+ wanphy: ethernet-phy@2 {
+ /* Marvell 88E1121R (port 1) */
+ compatible = "ethernet-phy-id0141.0cb0",
+ "ethernet-phy-ieee802.3-c22";
+ reg = <2>;
+ marvell,reg-init = <3 16 0 0x1777 3 17 0 0x15>;
+ };
+
+ lan1phy: ethernet-phy@5 {
+ /* Marvell 88E1112 */
+ compatible = "ethernet-phy-id0141.0cb0",
+ "ethernet-phy-ieee802.3-c22";
+ reg = <5>;
+ marvell,reg-init = <3 16 0 0x1777 3 17 0 0x15>;
+ };
+
+ lan2phy: ethernet-phy@7 {
+ /* Marvell 88E1112 */
+ compatible = "ethernet-phy-id0141.0cb0",
+ "ethernet-phy-ieee802.3-c22";
+ reg = <7>;
+ marvell,reg-init = <3 16 0 0x1777 3 17 0 0x15>;
+ };
+ };
+ };
+};
+
+&eth {
+ status = "okay";
+
+ ethernet-port@0 {
+ /* Hardwired to DSA switch */
+ speed = <1000>;
+ duplex = <1>;
+ };
+};
+
+&pinctrl {
+ pinctrl-0 = <&pmx_pci_gpios>;
+ pinctrl-names = "default";
+
+ pmx_power_led: pmx-power-led {
+ marvell,pins = "mpp0";
+ marvell,function = "gpio";
+ };
+
+ pmx_reset_button: pmx-reset-button {
+ marvell,pins = "mpp1";
+ marvell,function = "gpio";
+ };
+
+ pmx_power_led_blink: pmx-power-led-blink {
+ marvell,pins = "mpp2";
+ marvell,function = "gpio";
+ };
+
+ pmx_wan_led: pmx-wan-led {
+ marvell,pins = "mpp3";
+ marvell,function = "gpio";
+ };
+
+ pmx_pci_gpios: pmx-pci-gpios {
+ marvell,pins = "mpp4";
+ marvell,function = "gpio";
+ };
+};
+
+&uart0 {
+ /* Pin 1: Tx, Pin 7: Rx, Pin 8: Gnd */
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi
index e1b6d2a2ac49..fbccfbbab223 100644
--- a/arch/arm/boot/dts/orion5x.dtsi
+++ b/arch/arm/boot/dts/orion5x.dtsi
@@ -144,9 +144,10 @@
wdt: wdt@20300 {
compatible = "marvell,orion-wdt";
- reg = <0x20300 0x28>;
+ reg = <0x20300 0x28>, <0x20108 0x4>;
interrupt-parent = <&bridge_intc>;
interrupts = <3>;
+ clocks = <&core_clk 0>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
index 0abc93e5bb00..6c0038398ef2 100644
--- a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
@@ -239,6 +239,45 @@
};
};
};
+
+ led@48 {
+ /*
+ * The keypad LED @0x48 is routed to
+ * the sensor board where it is
+ * connected to an infrared LED
+ * SFH4650 (60mW, @850nm) next to the
+ * ambient light and proximity sensor
+ * Capella Microsystems CM3605.
+ */
+ compatible = "qcom,pm8058-keypad-led";
+ reg = <0x48>;
+ label = "pm8058:infrared:proximitysensor";
+ default-state = "off";
+ };
+ led@131 {
+ compatible = "qcom,pm8058-led";
+ reg = <0x131>;
+ label = "pm8058:red";
+ default-state = "off";
+ };
+ led@132 {
+ /*
+ * This is actually green too on my
+ * board, but documented as yellow.
+ */
+ compatible = "qcom,pm8058-led";
+ reg = <0x132>;
+ label = "pm8058:yellow";
+ default-state = "off";
+ linux,default-trigger = "mmc0";
+ };
+ led@133 {
+ compatible = "qcom,pm8058-led";
+ reg = <0x133>;
+ label = "pm8058:green";
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
index 7b05f072bfc2..b72e09506448 100644
--- a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
@@ -253,6 +253,7 @@
vddcx-supply = <&pm8921_s3>;
v3p3-supply = <&pm8921_l3>;
v1p8-supply = <&pm8921_l4>;
+ dr_mode = "otg";
};
gadget@12500000 {
@@ -272,5 +273,19 @@
vqmmc-supply = <&pm8921_s4>;
};
};
+
+ imem@2a03f000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x2a03f000 0x1000>;
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x65c>;
+
+ mode-normal = <0x77665501>;
+ mode-bootloader = <0x77665500>;
+ mode-recovery = <0x77665502>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index 74a9b6c394f5..1dbe697b2e90 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -5,6 +5,7 @@
#include <dt-bindings/reset/qcom,gcc-msm8960.h>
#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
#include <dt-bindings/soc/qcom,gsbi.h>
+#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
model = "Qualcomm APQ8064";
@@ -86,6 +87,92 @@
};
};
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 7>;
+ coefficients = <1199 0>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 8>;
+ coefficients = <1132 0>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 9>;
+ coefficients = <1199 0>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 10>;
+ coefficients = <1132 0>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <1 10 0x304>;
@@ -559,22 +646,50 @@
compatible = "qcom,pm8921-gpio",
"qcom,ssbi-gpio";
reg = <0x150>;
- interrupts = <192 1>, <193 1>, <194 1>,
- <195 1>, <196 1>, <197 1>,
- <198 1>, <199 1>, <200 1>,
- <201 1>, <202 1>, <203 1>,
- <204 1>, <205 1>, <206 1>,
- <207 1>, <208 1>, <209 1>,
- <210 1>, <211 1>, <212 1>,
- <213 1>, <214 1>, <215 1>,
- <216 1>, <217 1>, <218 1>,
- <219 1>, <220 1>, <221 1>,
- <222 1>, <223 1>, <224 1>,
- <225 1>, <226 1>, <227 1>,
- <228 1>, <229 1>, <230 1>,
- <231 1>, <232 1>, <233 1>,
- <234 1>, <235 1>;
-
+ interrupts = <192 IRQ_TYPE_NONE>,
+ <193 IRQ_TYPE_NONE>,
+ <194 IRQ_TYPE_NONE>,
+ <195 IRQ_TYPE_NONE>,
+ <196 IRQ_TYPE_NONE>,
+ <197 IRQ_TYPE_NONE>,
+ <198 IRQ_TYPE_NONE>,
+ <199 IRQ_TYPE_NONE>,
+ <200 IRQ_TYPE_NONE>,
+ <201 IRQ_TYPE_NONE>,
+ <202 IRQ_TYPE_NONE>,
+ <203 IRQ_TYPE_NONE>,
+ <204 IRQ_TYPE_NONE>,
+ <205 IRQ_TYPE_NONE>,
+ <206 IRQ_TYPE_NONE>,
+ <207 IRQ_TYPE_NONE>,
+ <208 IRQ_TYPE_NONE>,
+ <209 IRQ_TYPE_NONE>,
+ <210 IRQ_TYPE_NONE>,
+ <211 IRQ_TYPE_NONE>,
+ <212 IRQ_TYPE_NONE>,
+ <213 IRQ_TYPE_NONE>,
+ <214 IRQ_TYPE_NONE>,
+ <215 IRQ_TYPE_NONE>,
+ <216 IRQ_TYPE_NONE>,
+ <217 IRQ_TYPE_NONE>,
+ <218 IRQ_TYPE_NONE>,
+ <219 IRQ_TYPE_NONE>,
+ <220 IRQ_TYPE_NONE>,
+ <221 IRQ_TYPE_NONE>,
+ <222 IRQ_TYPE_NONE>,
+ <223 IRQ_TYPE_NONE>,
+ <224 IRQ_TYPE_NONE>,
+ <225 IRQ_TYPE_NONE>,
+ <226 IRQ_TYPE_NONE>,
+ <227 IRQ_TYPE_NONE>,
+ <228 IRQ_TYPE_NONE>,
+ <229 IRQ_TYPE_NONE>,
+ <230 IRQ_TYPE_NONE>,
+ <231 IRQ_TYPE_NONE>,
+ <232 IRQ_TYPE_NONE>,
+ <233 IRQ_TYPE_NONE>,
+ <234 IRQ_TYPE_NONE>,
+ <235 IRQ_TYPE_NONE>;
gpio-controller;
#gpio-cells = <2>;
@@ -587,9 +702,18 @@
gpio-controller;
#gpio-cells = <2>;
interrupts =
- <128 1>, <129 1>, <130 1>, <131 1>,
- <132 1>, <133 1>, <134 1>, <135 1>,
- <136 1>, <137 1>, <138 1>, <139 1>;
+ <128 IRQ_TYPE_NONE>,
+ <129 IRQ_TYPE_NONE>,
+ <130 IRQ_TYPE_NONE>,
+ <131 IRQ_TYPE_NONE>,
+ <132 IRQ_TYPE_NONE>,
+ <133 IRQ_TYPE_NONE>,
+ <134 IRQ_TYPE_NONE>,
+ <135 IRQ_TYPE_NONE>,
+ <136 IRQ_TYPE_NONE>,
+ <137 IRQ_TYPE_NONE>,
+ <138 IRQ_TYPE_NONE>,
+ <139 IRQ_TYPE_NONE>;
};
rtc@11d {
@@ -611,11 +735,28 @@
};
};
+ qfprom: qfprom@700000 {
+ compatible = "qcom,qfprom";
+ reg = <0x00700000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ tsens_calib: calib {
+ reg = <0x404 0x10>;
+ };
+ tsens_backup: backup_calib {
+ reg = <0x414 0x10>;
+ };
+ };
+
gcc: clock-controller@900000 {
compatible = "qcom,gcc-apq8064";
reg = <0x00900000 0x4000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
#clock-cells = <1>;
#reset-cells = <1>;
+ #thermal-sensor-cells = <1>;
};
lcc: clock-controller@28000000 {
@@ -712,7 +853,6 @@
reg = <0x12500000 0x400>;
interrupts = <GIC_SPI 100 IRQ_TYPE_NONE>;
status = "disabled";
- dr_mode = "host";
clocks = <&gcc USB_HS1_XCVR_CLK>,
<&gcc USB_HS1_H_CLK>;
diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi
index 7c2df062a025..39eb7a4ed16a 100644
--- a/arch/arm/boot/dts/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8084.dtsi
@@ -94,6 +94,88 @@
};
};
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 5>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 6>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 7>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 8>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <1 7 0xf04>;
@@ -150,6 +232,27 @@
reg = <0xf9011000 0x1000>;
};
+ qfprom: qfprom@fc4bc000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "qcom,qfprom";
+ reg = <0xfc4bc000 0x1000>;
+ tsens_calib: calib@d0 {
+ reg = <0xd0 0x18>;
+ };
+ tsens_backup: backup@440 {
+ reg = <0x440 0x10>;
+ };
+ };
+
+ tsens: thermal-sensor@fc4a8000 {
+ compatible = "qcom,msm8974-tsens";
+ reg = <0xfc4a8000 0x2000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ #thermal-sensor-cells = <1>;
+ };
+
timer@f9020000 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/qcom-ipq8064-ap148.dts b/arch/arm/boot/dts/qcom-ipq8064-ap148.dts
index d501382493e3..348503d1a1c1 100644
--- a/arch/arm/boot/dts/qcom-ipq8064-ap148.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-ap148.dts
@@ -95,6 +95,7 @@
};
sata@29000000 {
+ ports-implemented = <0x1>;
status = "ok";
};
};
diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi
index acbe71febe13..8c65e0d82559 100644
--- a/arch/arm/boot/dts/qcom-msm8660.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8660.dtsi
@@ -2,6 +2,7 @@
/include/ "skeleton.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8660.h>
#include <dt-bindings/soc/qcom,gsbi.h>
@@ -159,21 +160,50 @@
"qcom,ssbi-gpio";
reg = <0x150>;
interrupt-parent = <&pmicintc>;
- interrupts = <192 1>, <193 1>, <194 1>,
- <195 1>, <196 1>, <197 1>,
- <198 1>, <199 1>, <200 1>,
- <201 1>, <202 1>, <203 1>,
- <204 1>, <205 1>, <206 1>,
- <207 1>, <208 1>, <209 1>,
- <210 1>, <211 1>, <212 1>,
- <213 1>, <214 1>, <215 1>,
- <216 1>, <217 1>, <218 1>,
- <219 1>, <220 1>, <221 1>,
- <222 1>, <223 1>, <224 1>,
- <225 1>, <226 1>, <227 1>,
- <228 1>, <229 1>, <230 1>,
- <231 1>, <232 1>, <233 1>,
- <234 1>, <235 1>;
+ interrupts = <192 IRQ_TYPE_NONE>,
+ <193 IRQ_TYPE_NONE>,
+ <194 IRQ_TYPE_NONE>,
+ <195 IRQ_TYPE_NONE>,
+ <196 IRQ_TYPE_NONE>,
+ <197 IRQ_TYPE_NONE>,
+ <198 IRQ_TYPE_NONE>,
+ <199 IRQ_TYPE_NONE>,
+ <200 IRQ_TYPE_NONE>,
+ <201 IRQ_TYPE_NONE>,
+ <202 IRQ_TYPE_NONE>,
+ <203 IRQ_TYPE_NONE>,
+ <204 IRQ_TYPE_NONE>,
+ <205 IRQ_TYPE_NONE>,
+ <206 IRQ_TYPE_NONE>,
+ <207 IRQ_TYPE_NONE>,
+ <208 IRQ_TYPE_NONE>,
+ <209 IRQ_TYPE_NONE>,
+ <210 IRQ_TYPE_NONE>,
+ <211 IRQ_TYPE_NONE>,
+ <212 IRQ_TYPE_NONE>,
+ <213 IRQ_TYPE_NONE>,
+ <214 IRQ_TYPE_NONE>,
+ <215 IRQ_TYPE_NONE>,
+ <216 IRQ_TYPE_NONE>,
+ <217 IRQ_TYPE_NONE>,
+ <218 IRQ_TYPE_NONE>,
+ <219 IRQ_TYPE_NONE>,
+ <220 IRQ_TYPE_NONE>,
+ <221 IRQ_TYPE_NONE>,
+ <222 IRQ_TYPE_NONE>,
+ <223 IRQ_TYPE_NONE>,
+ <224 IRQ_TYPE_NONE>,
+ <225 IRQ_TYPE_NONE>,
+ <226 IRQ_TYPE_NONE>,
+ <227 IRQ_TYPE_NONE>,
+ <228 IRQ_TYPE_NONE>,
+ <229 IRQ_TYPE_NONE>,
+ <230 IRQ_TYPE_NONE>,
+ <231 IRQ_TYPE_NONE>,
+ <232 IRQ_TYPE_NONE>,
+ <233 IRQ_TYPE_NONE>,
+ <234 IRQ_TYPE_NONE>,
+ <235 IRQ_TYPE_NONE>;
gpio-controller;
#gpio-cells = <2>;
@@ -187,9 +217,18 @@
#gpio-cells = <2>;
interrupt-parent = <&pmicintc>;
interrupts =
- <128 1>, <129 1>, <130 1>, <131 1>,
- <132 1>, <133 1>, <134 1>, <135 1>,
- <136 1>, <137 1>, <138 1>, <139 1>;
+ <128 IRQ_TYPE_NONE>,
+ <129 IRQ_TYPE_NONE>,
+ <130 IRQ_TYPE_NONE>,
+ <131 IRQ_TYPE_NONE>,
+ <132 IRQ_TYPE_NONE>,
+ <133 IRQ_TYPE_NONE>,
+ <134 IRQ_TYPE_NONE>,
+ <135 IRQ_TYPE_NONE>,
+ <136 IRQ_TYPE_NONE>,
+ <137 IRQ_TYPE_NONE>,
+ <138 IRQ_TYPE_NONE>,
+ <139 IRQ_TYPE_NONE>;
};
pwrkey@1c {
diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
new file mode 100644
index 000000000000..c0fb4a698c56
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
@@ -0,0 +1,262 @@
+#include "qcom-msm8974.dtsi"
+#include "qcom-pm8841.dtsi"
+#include "qcom-pm8941.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+/ {
+ model = "LGE MSM 8974 HAMMERHEAD";
+ compatible = "lge,hammerhead", "qcom,msm8974";
+
+ aliases {
+ serial0 = &blsp1_uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ smd {
+ rpm {
+ rpm_requests {
+ pm8841-regulators {
+ s1 {
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ s2 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ s3 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ s4 {
+ regulator-min-microvolt = <815000>;
+ regulator-max-microvolt = <900000>;
+ };
+ };
+
+ pm8941-regulators {
+ vdd_l1_l3-supply = <&pm8941_s1>;
+ vdd_l2_lvs1_2_3-supply = <&pm8941_s3>;
+ vdd_l4_l11-supply = <&pm8941_s1>;
+ vdd_l5_l7-supply = <&pm8941_s2>;
+ vdd_l6_l12_l14_l15-supply = <&pm8941_s2>;
+ vdd_l8_l16_l18_l19-supply = <&vreg_vph_pwr>;
+ vdd_l9_l10_l17_l22-supply = <&vreg_boost>;
+ vdd_l13_l20_l23_l24-supply = <&vreg_boost>;
+ vdd_l21-supply = <&vreg_boost>;
+
+ s1 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ s2 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+
+ regulator-boot-on;
+ };
+
+ s3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ l3 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-boot-on;
+ };
+
+ l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-boot-on;
+ };
+
+ l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ l11 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+
+ regulator-boot-on;
+ };
+
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l15 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ l18 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ l19 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+
+ regulator-boot-on;
+ };
+
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+
+ regulator-boot-on;
+ };
+
+ l22 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l23 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+
+ regulator-boot-on;
+ };
+ };
+ };
+ };
+ };
+};
+
+&soc {
+ serial@f991d000 {
+ status = "ok";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ input-name = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_pin_a>;
+
+ volume-up {
+ label = "volume_up";
+ gpios = <&pm8941_gpios 2 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+
+ volume-down {
+ label = "volume_down";
+ gpios = <&pm8941_gpios 3 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+ };
+};
+
+&spmi_bus {
+ pm8941@0 {
+ gpios@c000 {
+ gpio_keys_pin_a: gpio-keys-active {
+ pins = "gpio2", "gpio3";
+ function = "normal";
+
+ bias-pull-up;
+ power-source = <PM8941_GPIO_S3>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
index 3fb4dada6b0d..e7c1577d56f4 100644
--- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
@@ -257,23 +257,6 @@
};
};
};
-
- vreg_boost: vreg-boost {
- compatible = "regulator-fixed";
-
- regulator-name = "vreg-boost";
- regulator-min-microvolt = <3150000>;
- regulator-max-microvolt = <3150000>;
-
- regulator-always-on;
- regulator-boot-on;
-
- gpio = <&pm8941_gpios 21 GPIO_ACTIVE_HIGH>;
- enable-active-high;
-
- pinctrl-names = "default";
- pinctrl-0 = <&boost_bypass_n_pin>;
- };
};
&soc {
@@ -311,6 +294,45 @@
pinctrl-0 = <&blsp1_uart2_pin_a>;
};
+ i2c@f9924000 {
+ status = "ok";
+
+ clock-frequency = <355000>;
+ qcom,src-freq = <50000000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+
+ synaptics@2c {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x2c>;
+
+ interrupts-extended = <&msmgpio 61 IRQ_TYPE_EDGE_FALLING>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdd-supply = <&pm8941_l22>;
+ vio-supply = <&pm8941_lvs3>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_int_pin>;
+
+ syna,startup-delay-ms = <10>;
+
+ rmi4-f01@1 {
+ reg = <0x1>;
+ syna,nosleep-mode = <1>;
+ };
+
+ rmi4-f11@11 {
+ reg = <0x11>;
+ touchscreen-inverted-x;
+ syna,sensor-type = <1>;
+ };
+ };
+ };
+
pinctrl@fd510000 {
blsp1_uart2_pin_a: blsp1-uart2-pin-active {
rx {
@@ -330,6 +352,16 @@
};
};
+ i2c2_pins: i2c2 {
+ mux {
+ pins = "gpio6", "gpio7";
+ function = "blsp_i2c2";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
sdhc1_pin_a: sdhc1-pin-active {
clk {
pins = "sdc1_clk";
@@ -366,6 +398,16 @@
};
};
+ ts_int_pin: touch-int {
+ pin {
+ pins = "gpio61";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ input-enable;
+ };
+ };
};
dma-controller@f9944000 {
@@ -387,11 +429,6 @@
};
gpios@c000 {
- boost_bypass_n_pin: boost-bypass {
- pins = "gpio21";
- function = "normal";
- };
-
gpio_keys_pin_a: gpio-keys-active {
pins = "gpio2", "gpio3", "gpio4", "gpio5";
function = "normal";
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index 561d4d136762..d2109475bdfd 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -2,6 +2,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8974.h>
+#include <dt-bindings/gpio/gpio.h>
#include "skeleton.dtsi"
/ {
@@ -131,6 +132,88 @@
};
};
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 5>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 6>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 7>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 8>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <1 7 0xf04>;
@@ -287,6 +370,27 @@
reg = <0xf9011000 0x1000>;
};
+ qfprom: qfprom@fc4bc000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "qcom,qfprom";
+ reg = <0xfc4bc000 0x1000>;
+ tsens_calib: calib@d0 {
+ reg = <0xd0 0x18>;
+ };
+ tsens_backup: backup@440 {
+ reg = <0x440 0x10>;
+ };
+ };
+
+ tsens: thermal-sensor@fc4a8000 {
+ compatible = "qcom,msm8974-tsens";
+ reg = <0xfc4a8000 0x2000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ #thermal-sensor-cells = <1>;
+ };
+
timer@f9020000 {
#address-cells = <1>;
#size-cells = <1>;
@@ -430,6 +534,15 @@
reg = <0xfc428000 0x4000>;
};
+ blsp1_uart1: serial@f991d000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0xf991d000 0x1000>;
+ interrupts = <0 107 0x0>;
+ clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
blsp1_uart2: serial@f991e000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0xf991e000 0x1000>;
@@ -615,4 +728,30 @@
};
};
};
+
+ vreg_boost: vreg-boost {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vreg-boost";
+ regulator-min-microvolt = <3150000>;
+ regulator-max-microvolt = <3150000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ gpio = <&pm8941_gpios 21 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&boost_bypass_n_pin>;
+ };
+ vreg_vph_pwr: vreg-vph-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "vph-pwr";
+
+ regulator-min-microvolt = <3600000>;
+ regulator-max-microvolt = <3600000>;
+
+ regulator-always-on;
+ };
};
diff --git a/arch/arm/boot/dts/qcom-pm8941.dtsi b/arch/arm/boot/dts/qcom-pm8941.dtsi
index d95edb6f6265..f8eb5e31c920 100644
--- a/arch/arm/boot/dts/qcom-pm8941.dtsi
+++ b/arch/arm/boot/dts/qcom-pm8941.dtsi
@@ -88,6 +88,11 @@
<0 0xe1 0 IRQ_TYPE_NONE>,
<0 0xe2 0 IRQ_TYPE_NONE>,
<0 0xe3 0 IRQ_TYPE_NONE>;
+
+ boost_bypass_n_pin: boost-bypass {
+ pins = "gpio21";
+ function = "normal";
+ };
};
pm8941_mpps: mpps@a000 {
diff --git a/arch/arm/boot/dts/r7s72100-rskrza1.dts b/arch/arm/boot/dts/r7s72100-rskrza1.dts
new file mode 100644
index 000000000000..e5dea5bb4032
--- /dev/null
+++ b/arch/arm/boot/dts/r7s72100-rskrza1.dts
@@ -0,0 +1,61 @@
+/*
+ * Device Tree Source for the RZ/A1H RSK board
+ *
+ * Copyright (C) 2016 Renesas Electronics
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r7s72100.dtsi"
+
+/ {
+ model = "RSKRZA1";
+ compatible = "renesas,rskrza1", "renesas,r7s72100";
+
+ aliases {
+ serial0 = &scif2;
+ };
+
+ chosen {
+ bootargs = "ignore_loglevel";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@8000000 {
+ device_type = "memory";
+ reg = <0x08000000 0x02000000>;
+ };
+
+ lbsc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&extal_clk {
+ clock-frequency = <13330000>;
+};
+
+&usb_x1_clk {
+ clock-frequency = <48000000>;
+};
+
+&mtu2 {
+ status = "okay";
+};
+
+&ether {
+ status = "okay";
+ renesas,no-ether-link;
+ phy-handle = <&phy0>;
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+};
+
+&scif2 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi
index e8e2a5d71976..fb9ef9ca120e 100644
--- a/arch/arm/boot/dts/r7s72100.dtsi
+++ b/arch/arm/boot/dts/r7s72100.dtsi
@@ -108,6 +108,15 @@
clock-output-names = "scif0", "scif1", "scif2", "scif3", "scif4", "scif5", "scif6", "scif7";
};
+ mstp7_clks: mstp7_clks@fcfe0430 {
+ #clock-cells = <1>;
+ compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0xfcfe0430 4>;
+ clocks = <&p0_clk>;
+ clock-indices = <R7S72100_CLK_ETHER>;
+ clock-output-names = "ether";
+ };
+
mstp9_clks: mstp9_clks@fcfe0438 {
#clock-cells = <1>;
compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks";
@@ -419,4 +428,17 @@
power-domains = <&cpg_clocks>;
status = "disabled";
};
+
+ ether: ethernet@e8203000 {
+ compatible = "renesas,ether-r7s72100";
+ reg = <0xe8203000 0x800>,
+ <0xe8204800 0x200>;
+ interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp7_clks R7S72100_CLK_ETHER>;
+ power-domains = <&cpg_clocks>;
+ phy-mode = "mii";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
};
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index d18558f21102..351fcc2f87df 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -944,11 +944,6 @@
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7790_CLK_VSP1_R>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
-
- renesas,has-sru;
- renesas,#rpf = <5>;
- renesas,#uds = <1>;
- renesas,#wpf = <4>;
};
vsp1@fe928000 {
@@ -957,12 +952,6 @@
interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7790_CLK_VSP1_S>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
-
- renesas,has-lut;
- renesas,has-sru;
- renesas,#rpf = <5>;
- renesas,#uds = <3>;
- renesas,#wpf = <4>;
};
vsp1@fe930000 {
@@ -971,12 +960,6 @@
interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU0>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
-
- renesas,has-lif;
- renesas,has-lut;
- renesas,#rpf = <4>;
- renesas,#uds = <1>;
- renesas,#wpf = <4>;
};
vsp1@fe938000 {
@@ -985,12 +968,6 @@
interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU1>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
-
- renesas,has-lif;
- renesas,has-lut;
- renesas,#rpf = <4>;
- renesas,#uds = <1>;
- renesas,#wpf = <4>;
};
du: display@feb00000 {
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 8f0086bbd96b..162b55c665a3 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -983,12 +983,6 @@
interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7791_CLK_VSP1_S>;
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
-
- renesas,has-lut;
- renesas,has-sru;
- renesas,#rpf = <5>;
- renesas,#uds = <3>;
- renesas,#wpf = <4>;
};
vsp1@fe930000 {
@@ -997,12 +991,6 @@
interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU0>;
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
-
- renesas,has-lif;
- renesas,has-lut;
- renesas,#rpf = <4>;
- renesas,#uds = <1>;
- renesas,#wpf = <4>;
};
vsp1@fe938000 {
@@ -1011,12 +999,6 @@
interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU1>;
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
-
- renesas,has-lif;
- renesas,has-lut;
- renesas,#rpf = <4>;
- renesas,#uds = <1>;
- renesas,#wpf = <4>;
};
du: display@feb00000 {
diff --git a/arch/arm/boot/dts/r8a7792-blanche.dts b/arch/arm/boot/dts/r8a7792-blanche.dts
index e7b40f0e7da6..f3ea43b7b724 100644
--- a/arch/arm/boot/dts/r8a7792-blanche.dts
+++ b/arch/arm/boot/dts/r8a7792-blanche.dts
@@ -11,6 +11,8 @@
/dts-v1/;
#include "r8a7792.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
/ {
model = "Blanche";
@@ -50,6 +52,139 @@
reg-io-width = <4>;
vddvario-supply = <&d3_3v>;
vdd33a-supply = <&d3_3v>;
+
+ pinctrl-0 = <&lan89218_pins>;
+ pinctrl-names = "default";
+ };
+
+ vga-encoder {
+ compatible = "adi,adv7123";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7123_in: endpoint {
+ remote-endpoint = <&du_out_rgb1>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ adv7123_out: endpoint {
+ remote-endpoint = <&vga_in>;
+ };
+ };
+ };
+ };
+
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con: endpoint {
+ remote-endpoint = <&adv7511_out>;
+ };
+ };
+ };
+
+ vga {
+ compatible = "vga-connector";
+
+ port {
+ vga_in: endpoint {
+ remote-endpoint = <&adv7123_out>;
+ };
+ };
+ };
+
+ x1_clk: x1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <74250000>;
+ };
+
+ x2_clk: x2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <65000000>;
+ };
+
+ keyboard {
+ compatible = "gpio-keys";
+
+ key-1 {
+ linux,code = <KEY_1>;
+ label = "SW2-1";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio3 10 GPIO_ACTIVE_LOW>;
+ };
+ key-2 {
+ linux,code = <KEY_2>;
+ label = "SW2-2";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
+ };
+ key-3 {
+ linux,code = <KEY_3>;
+ label = "SW2-3";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
+ };
+ key-4 {
+ linux,code = <KEY_4>;
+ label = "SW2-4";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio3 15 GPIO_ACTIVE_LOW>;
+ };
+ key-a {
+ linux,code = <KEY_A>;
+ label = "SW24";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio3 20 GPIO_ACTIVE_LOW>;
+ };
+ key-b {
+ linux,code = <KEY_B>;
+ label = "SW25";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio11 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led17 {
+ gpios = <&gpio10 10 GPIO_ACTIVE_HIGH>;
+ };
+ led18 {
+ gpios = <&gpio10 11 GPIO_ACTIVE_HIGH>;
+ };
+ led19 {
+ gpios = <&gpio10 12 GPIO_ACTIVE_HIGH>;
+ };
+ led20 {
+ gpios = <&gpio10 23 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ vcc_sdhi0: regulator-vcc-sdhi0 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "SDHI0 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&gpio11 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
};
};
@@ -57,10 +192,139 @@
clock-frequency = <20000000>;
};
+&can_clk {
+ clock-frequency = <48000000>;
+};
+
+&pfc {
+ scif0_pins: scif0 {
+ groups = "scif0_data";
+ function = "scif0";
+ };
+
+ scif3_pins: scif3 {
+ groups = "scif3_data";
+ function = "scif3";
+ };
+
+ lan89218_pins: lan89218 {
+ intc {
+ groups = "intc_irq0";
+ function = "intc";
+ };
+ lbsc {
+ groups = "lbsc_ex_cs0";
+ function = "lbsc";
+ };
+ };
+
+ can0_pins: can0 {
+ groups = "can0_data", "can_clk";
+ function = "can0";
+ };
+
+ sdhi0_pins: sdhi0 {
+ groups = "sdhi0_data4", "sdhi0_ctrl";
+ function = "sdhi0";
+ };
+
+ du0_pins: du0 {
+ groups = "du0_rgb888", "du0_sync", "du0_disp";
+ function = "du0";
+ };
+
+ du1_pins: du1 {
+ groups = "du1_rgb666", "du1_sync", "du1_disp";
+ function = "du1";
+ };
+};
+
&scif0 {
+ pinctrl-0 = <&scif0_pins>;
+ pinctrl-names = "default";
+
status = "okay";
};
&scif3 {
+ pinctrl-0 = <&scif3_pins>;
+ pinctrl-names = "default";
+
status = "okay";
};
+
+&can0 {
+ pinctrl-0 = <&can0_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&vcc_sdhi0>;
+ cd-gpios = <&gpio11 11 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ hdmi@39 {
+ compatible = "adi,adv7511w";
+ reg = <0x39>;
+ interrupt-parent = <&irqc>;
+ interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+
+ adi,input-depth = <8>;
+ adi,input-colorspace = "rgb";
+ adi,input-clock = "1x";
+ adi,input-style = <1>;
+ adi,input-justification = "evenly";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7511_in: endpoint {
+ remote-endpoint = <&du_out_rgb0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7511_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+ };
+ };
+};
+
+&du {
+ pinctrl-0 = <&du0_pins &du1_pins>;
+ pinctrl-names = "default";
+
+ clocks = <&mstp7_clks R8A7792_CLK_DU0>, <&mstp7_clks R8A7792_CLK_DU1>,
+ <&x1_clk>, <&x2_clk>;
+ clock-names = "du.0", "du.1", "dclkin.0", "dclkin.1";
+ status = "okay";
+
+ ports {
+ port@0 {
+ endpoint {
+ remote-endpoint = <&adv7511_in>;
+ };
+ };
+ port@1 {
+ endpoint {
+ remote-endpoint = <&adv7123_in>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/r8a7792-wheat.dts b/arch/arm/boot/dts/r8a7792-wheat.dts
new file mode 100644
index 000000000000..6dbb94114a93
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7792-wheat.dts
@@ -0,0 +1,199 @@
+/*
+ * Device Tree Source for the Wheat board
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r8a7792.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Wheat";
+ compatible = "renesas,wheat", "renesas,r8a7792";
+
+ aliases {
+ serial0 = &scif0;
+ };
+
+ chosen {
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0 0x40000000>;
+ };
+
+ d3_3v: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "D3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ethernet@18000000 {
+ compatible = "smsc,lan89218", "smsc,lan9115";
+ reg = <0 0x18000000 0 0x100>;
+ phy-mode = "mii";
+ interrupt-parent = <&irqc>;
+ interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+ smsc,irq-push-pull;
+ smsc,save-mac-address;
+ reg-io-width = <4>;
+ vddvario-supply = <&d3_3v>;
+ vdd33a-supply = <&d3_3v>;
+
+ pinctrl-0 = <&lan89218_pins>;
+ pinctrl-names = "default";
+ };
+
+ keyboard {
+ compatible = "gpio-keys";
+
+ key-a {
+ linux,code = <KEY_A>;
+ label = "SW2";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio3 20 GPIO_ACTIVE_LOW>;
+ };
+ key-b {
+ linux,code = <KEY_B>;
+ label = "SW3";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio11 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ vcc_sdhi0: regulator-vcc-sdhi0 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "SDHI0 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&gpio11 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&extal_clk {
+ clock-frequency = <20000000>;
+};
+
+&pfc {
+ scif0_pins: scif0 {
+ groups = "scif0_data";
+ function = "scif0";
+ };
+
+ lan89218_pins: lan89218 {
+ intc {
+ groups = "intc_irq0";
+ function = "intc";
+ };
+ lbsc {
+ groups = "lbsc_ex_cs0";
+ function = "lbsc";
+ };
+ };
+
+ can0_pins: can0 {
+ groups = "can0_data";
+ function = "can0";
+ };
+
+ can1_pins: can1 {
+ groups = "can1_data";
+ function = "can1";
+ };
+
+ sdhi0_pins: sdhi0 {
+ groups = "sdhi0_data4", "sdhi0_ctrl";
+ function = "sdhi0";
+ };
+
+ qspi_pins: qspi {
+ groups = "qspi_ctrl", "qspi_data4";
+ function = "qspi";
+ };
+};
+
+&scif0 {
+ pinctrl-0 = <&scif0_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&can0 {
+ pinctrl-0 = <&can0_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&can1 {
+ pinctrl-0 = <&can1_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&vcc_sdhi0>;
+ cd-gpios = <&gpio11 11 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&qspi {
+ pinctrl-0 = <&qspi_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ flash@0 {
+ compatible = "spansion,s25fl512s", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <30000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ spi-cpol;
+ spi-cpha;
+ m25p,fast-read;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "loader";
+ reg = <0x00000000 0x00040000>;
+ read-only;
+ };
+ partition@40000 {
+ label = "user";
+ reg = <0x00040000 0x00400000>;
+ read-only;
+ };
+ partition@440000 {
+ label = "flash";
+ reg = <0x00440000 0x03bc0000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index 3fd61d7ab906..713141d38b3e 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -18,6 +18,22 @@
#address-cells = <2>;
#size-cells = <2>;
+ aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ spi0 = &qspi;
+ vin0 = &vin0;
+ vin1 = &vin1;
+ vin2 = &vin2;
+ vin3 = &vin3;
+ vin4 = &vin4;
+ vin5 = &vin5;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -108,6 +124,179 @@
#power-domain-cells = <1>;
};
+ pfc: pin-controller@e6060000 {
+ compatible = "renesas,pfc-r8a7792";
+ reg = <0 0xe6060000 0 0x144>;
+ };
+
+ gpio0: gpio@e6050000 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6050000 0 0x50>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 0 29>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO0>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ gpio1: gpio@e6051000 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6051000 0 0x50>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 32 23>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO1>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ gpio2: gpio@e6052000 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6052000 0 0x50>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 64 32>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO2>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ gpio3: gpio@e6053000 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6053000 0 0x50>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 96 28>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO3>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ gpio4: gpio@e6054000 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6054000 0 0x50>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 128 17>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO4>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ gpio5: gpio@e6055000 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6055000 0 0x50>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 160 17>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO5>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ gpio6: gpio@e6055100 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6055100 0 0x50>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 192 17>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO6>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ gpio7: gpio@e6055200 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6055200 0 0x50>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 224 17>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO7>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ gpio8: gpio@e6055300 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6055300 0 0x50>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 256 17>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO8>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ gpio9: gpio@e6055400 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6055400 0 0x50>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 288 17>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO9>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ gpio10: gpio@e6055500 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6055500 0 0x50>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 320 32>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO10>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ gpio11: gpio@e6055600 {
+ compatible = "renesas,gpio-r8a7792",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6055600 0 0x50>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 352 30>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&mstp9_clks R8A7792_CLK_GPIO11>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
dmac0: dma-controller@e6700000 {
compatible = "renesas,dmac-r8a7792",
"renesas,rcar-dmac";
@@ -262,6 +451,18 @@
status = "disabled";
};
+ sdhi0: sd@ee100000 {
+ compatible = "renesas,sdhi-r8a7792";
+ reg = <0 0xee100000 0 0x328>;
+ interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmac0 0xcd>, <&dmac0 0xce>,
+ <&dmac1 0xcd>, <&dmac1 0xce>;
+ dma-names = "tx", "rx", "tx", "rx";
+ clocks = <&mstp3_clks R8A7792_CLK_SDHI0>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
jpu: jpeg-codec@fe980000 {
compatible = "renesas,jpu-r8a7792",
"renesas,rcar-gen2-jpu";
@@ -271,6 +472,242 @@
power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
};
+ avb: ethernet@e6800000 {
+ compatible = "renesas,etheravb-r8a7792",
+ "renesas,etheravb-rcar-gen2";
+ reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7792_CLK_ETHERAVB>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ /* I2C doesn't need pinmux */
+ i2c0: i2c@e6508000 {
+ compatible = "renesas,i2c-r8a7792";
+ reg = <0 0xe6508000 0 0x40>;
+ interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7792_CLK_I2C0>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ i2c-scl-internal-delay-ns = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@e6518000 {
+ compatible = "renesas,i2c-r8a7792";
+ reg = <0 0xe6518000 0 0x40>;
+ interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7792_CLK_I2C1>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ i2c-scl-internal-delay-ns = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@e6530000 {
+ compatible = "renesas,i2c-r8a7792";
+ reg = <0 0xe6530000 0 0x40>;
+ interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7792_CLK_I2C2>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ i2c-scl-internal-delay-ns = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@e6540000 {
+ compatible = "renesas,i2c-r8a7792";
+ reg = <0 0xe6540000 0 0x40>;
+ interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7792_CLK_I2C3>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ i2c-scl-internal-delay-ns = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@e6520000 {
+ compatible = "renesas,i2c-r8a7792";
+ reg = <0 0xe6520000 0 0x40>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7792_CLK_I2C4>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ i2c-scl-internal-delay-ns = <6>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@e6528000 {
+ compatible = "renesas,i2c-r8a7792";
+ reg = <0 0xe6528000 0 0x40>;
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7792_CLK_I2C5>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ i2c-scl-internal-delay-ns = <110>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ qspi: spi@e6b10000 {
+ compatible = "renesas,qspi-r8a7792", "renesas,qspi";
+ reg = <0 0xe6b10000 0 0x2c>;
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7792_CLK_QSPI_MOD>;
+ dmas = <&dmac0 0x17>, <&dmac0 0x18>,
+ <&dmac1 0x17>, <&dmac1 0x18>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ num-cs = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ du: display@feb00000 {
+ compatible = "renesas,du-r8a7792";
+ reg = <0 0xfeb00000 0 0x40000>;
+ reg-names = "du";
+ interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp7_clks R8A7792_CLK_DU0>,
+ <&mstp7_clks R8A7792_CLK_DU1>;
+ clock-names = "du.0", "du.1";
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ du_out_rgb0: endpoint {
+ };
+ };
+ port@1 {
+ reg = <1>;
+ du_out_rgb1: endpoint {
+ };
+ };
+ };
+ };
+
+ can0: can@e6e80000 {
+ compatible = "renesas,can-r8a7792",
+ "renesas,rcar-gen2-can";
+ reg = <0 0xe6e80000 0 0x1000>;
+ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7792_CLK_CAN0>,
+ <&rcan_clk>, <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ can1: can@e6e88000 {
+ compatible = "renesas,can-r8a7792",
+ "renesas,rcar-gen2-can";
+ reg = <0 0xe6e88000 0 0x1000>;
+ interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7792_CLK_CAN1>,
+ <&rcan_clk>, <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ vin0: video@e6ef0000 {
+ compatible = "renesas,vin-r8a7792",
+ "renesas,rcar-gen2-vin";
+ reg = <0 0xe6ef0000 0 0x1000>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7792_CLK_VIN0>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ vin1: video@e6ef1000 {
+ compatible = "renesas,vin-r8a7792",
+ "renesas,rcar-gen2-vin";
+ reg = <0 0xe6ef1000 0 0x1000>;
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7792_CLK_VIN1>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ vin2: video@e6ef2000 {
+ compatible = "renesas,vin-r8a7792",
+ "renesas,rcar-gen2-vin";
+ reg = <0 0xe6ef2000 0 0x1000>;
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7792_CLK_VIN2>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ vin3: video@e6ef3000 {
+ compatible = "renesas,vin-r8a7792",
+ "renesas,rcar-gen2-vin";
+ reg = <0 0xe6ef3000 0 0x1000>;
+ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7792_CLK_VIN3>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ vin4: video@e6ef4000 {
+ compatible = "renesas,vin-r8a7792",
+ "renesas,rcar-gen2-vin";
+ reg = <0 0xe6ef4000 0 0x1000>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7792_CLK_VIN4>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ vin5: video@e6ef5000 {
+ compatible = "renesas,vin-r8a7792",
+ "renesas,rcar-gen2-vin";
+ reg = <0 0xe6ef5000 0 0x1000>;
+ interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7792_CLK_VIN5>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ vsp1@fe928000 {
+ compatible = "renesas,vsp1";
+ reg = <0 0xfe928000 0 0x8000>;
+ interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp1_clks R8A7792_CLK_VSP1_SY>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ vsp1@fe930000 {
+ compatible = "renesas,vsp1";
+ reg = <0 0xfe930000 0 0x8000>;
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp1_clks R8A7792_CLK_VSP1DU0>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ vsp1@fe938000 {
+ compatible = "renesas,vsp1";
+ reg = <0 0xfe938000 0 0x8000>;
+ interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp1_clks R8A7792_CLK_VSP1DU1>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
/* Special CPG clocks */
cpg_clocks: cpg_clocks@e6150000 {
compatible = "renesas,r8a7792-cpg-clocks",
@@ -291,6 +728,13 @@
clock-div = <2>;
clock-mult = <1>;
};
+ zx_clk: zx {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
+ #clock-cells = <0>;
+ clock-div = <3>;
+ clock-mult = <1>;
+ };
zs_clk: zs {
compatible = "fixed-factor-clock";
clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
@@ -298,6 +742,13 @@
clock-div = <6>;
clock-mult = <1>;
};
+ hp_clk: hp {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
+ #clock-cells = <0>;
+ clock-div = <12>;
+ clock-mult = <1>;
+ };
p_clk: p {
compatible = "fixed-factor-clock";
clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
@@ -319,16 +770,42 @@
clock-div = <8>;
clock-mult = <1>;
};
+ sd_clk: sd {
+ compatible = "fixed-factor-clock";
+ clocks = <&pll1_div2_clk>;
+ #clock-cells = <0>;
+ clock-div = <8>;
+ clock-mult = <1>;
+ };
+ rcan_clk: rcan {
+ compatible = "fixed-factor-clock";
+ clocks = <&pll1_div2_clk>;
+ #clock-cells = <0>;
+ clock-div = <49>;
+ clock-mult = <1>;
+ };
+ zg_clk: zg {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
+ #clock-cells = <0>;
+ clock-div = <5>;
+ clock-mult = <1>;
+ };
/* Gate clocks */
mstp1_clks: mstp1_clks@e6150134 {
compatible = "renesas,r8a7792-mstp-clocks",
"renesas,cpg-mstp-clocks";
reg = <0 0xe6150134 0 4>, <0 0xe6150038 0 4>;
- clocks = <&m2_clk>;
+ clocks = <&m2_clk>, <&zs_clk>, <&zs_clk>, <&zs_clk>;
#clock-cells = <1>;
- clock-indices = <R8A7792_CLK_JPU>;
- clock-output-names = "jpu";
+ clock-indices = <
+ R8A7792_CLK_JPU
+ R8A7792_CLK_VSP1DU1 R8A7792_CLK_VSP1DU0
+ R8A7792_CLK_VSP1_SY
+ >;
+ clock-output-names = "jpu", "vsp1du1", "vsp1du0",
+ "vsp1-sy";
};
mstp2_clks: mstp2_clks@e6150138 {
compatible = "renesas,r8a7792-mstp-clocks",
@@ -341,6 +818,15 @@
>;
clock-output-names = "sys-dmac1", "sys-dmac0";
};
+ mstp3_clks: mstp3_clks@e615013c {
+ compatible = "renesas,r8a7792-mstp-clocks",
+ "renesas,cpg-mstp-clocks";
+ reg = <0 0xe615013c 0 4>, <0 0xe6150048 0 4>;
+ clocks = <&sd_clk>;
+ #clock-cells = <1>;
+ renesas,clock-indices = <R8A7792_CLK_SDHI0>;
+ clock-output-names = "sdhi0";
+ };
mstp4_clks: mstp4_clks@e6150140 {
compatible = "renesas,r8a7792-mstp-clocks",
"renesas,cpg-mstp-clocks";
@@ -355,15 +841,65 @@
"renesas,cpg-mstp-clocks";
reg = <0 0xe615014c 0 4>, <0 0xe61501c4 0 4>;
clocks = <&zs_clk>, <&zs_clk>, <&p_clk>, <&p_clk>,
- <&p_clk>, <&p_clk>;
+ <&p_clk>, <&p_clk>, <&zx_clk>, <&zx_clk>;
#clock-cells = <1>;
clock-indices = <
R8A7792_CLK_HSCIF1 R8A7792_CLK_HSCIF0
R8A7792_CLK_SCIF3 R8A7792_CLK_SCIF2
R8A7792_CLK_SCIF1 R8A7792_CLK_SCIF0
+ R8A7792_CLK_DU1 R8A7792_CLK_DU0
>;
clock-output-names = "hscif1", "hscif0", "scif3",
- "scif2", "scif1", "scif0";
+ "scif2", "scif1", "scif0",
+ "du1", "du0";
+ };
+ mstp8_clks: mstp8_clks@e6150990 {
+ compatible = "renesas,r8a7792-mstp-clocks",
+ "renesas,cpg-mstp-clocks";
+ reg = <0 0xe6150990 0 4>, <0 0xe61509a0 0 4>;
+ clocks = <&zg_clk>, <&zg_clk>, <&zg_clk>, <&zg_clk>,
+ <&zg_clk>, <&zg_clk>, <&hp_clk>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A7792_CLK_VIN5 R8A7792_CLK_VIN4
+ R8A7792_CLK_VIN3 R8A7792_CLK_VIN2
+ R8A7792_CLK_VIN1 R8A7792_CLK_VIN0
+ R8A7792_CLK_ETHERAVB
+ >;
+ clock-output-names = "vin5", "vin4", "vin3", "vin2",
+ "vin1", "vin0", "etheravb";
+ };
+ mstp9_clks: mstp9_clks@e6150994 {
+ compatible = "renesas,r8a7792-mstp-clocks",
+ "renesas,cpg-mstp-clocks";
+ reg = <0 0xe6150994 0 4>, <0 0xe61509a4 0 4>;
+ clocks = <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
+ <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
+ <&cp_clk>, <&cp_clk>, <&p_clk>, <&p_clk>,
+ <&cpg_clocks R8A7792_CLK_QSPI>,
+ <&cp_clk>, <&cp_clk>, <&hp_clk>, <&hp_clk>,
+ <&hp_clk>, <&hp_clk>, <&hp_clk>, <&hp_clk>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A7792_CLK_GPIO7 R8A7792_CLK_GPIO6
+ R8A7792_CLK_GPIO5 R8A7792_CLK_GPIO4
+ R8A7792_CLK_GPIO3 R8A7792_CLK_GPIO2
+ R8A7792_CLK_GPIO1 R8A7792_CLK_GPIO0
+ R8A7792_CLK_GPIO11 R8A7792_CLK_GPIO10
+ R8A7792_CLK_CAN1 R8A7792_CLK_CAN0
+ R8A7792_CLK_QSPI_MOD
+ R8A7792_CLK_GPIO9 R8A7792_CLK_GPIO8
+ R8A7792_CLK_I2C5 R8A7792_CLK_I2C4
+ R8A7792_CLK_I2C3 R8A7792_CLK_I2C2
+ R8A7792_CLK_I2C1 R8A7792_CLK_I2C0
+ >;
+ clock-output-names =
+ "gpio7", "gpio6", "gpio5", "gpio4",
+ "gpio3", "gpio2", "gpio1", "gpio0",
+ "gpio11", "gpio10", "can1", "can0",
+ "qspi_mod", "gpio9", "gpio8",
+ "i2c5", "i2c4", "i2c3", "i2c2",
+ "i2c1", "i2c0";
};
};
@@ -382,4 +918,12 @@
/* This value must be overridden by the board. */
clock-frequency = <0>;
};
+
+ /* External CAN clock */
+ can_clk: can {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
+ };
};
diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
index 1ad37d431a2a..8d1b35afaf82 100644
--- a/arch/arm/boot/dts/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/r8a7794-alt.dts
@@ -10,6 +10,7 @@
/dts-v1/;
#include "r8a7794.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "Alt";
@@ -29,6 +30,63 @@
reg = <0 0x40000000 0 0x40000000>;
};
+ d3_3v: regulator-d3-3v {
+ compatible = "regulator-fixed";
+ regulator-name = "D3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vcc_sdhi0: regulator-vcc-sdhi0 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "SDHI0 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&gpio2 26 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vccq_sdhi0: regulator-vccq-sdhi0 {
+ compatible = "regulator-gpio";
+
+ regulator-name = "SDHI0 VccQ";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
+ gpios-states = <1>;
+ states = <3300000 1
+ 1800000 0>;
+ };
+
+ vcc_sdhi1: regulator-vcc-sdhi1 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "SDHI1 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&gpio4 26 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vccq_sdhi1: regulator-vccq-sdhi1 {
+ compatible = "regulator-gpio";
+
+ regulator-name = "SDHI1 VccQ";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
+ gpios-states = <1>;
+ states = <3300000 1
+ 1800000 0>;
+ };
+
lbsc {
#address-cells = <1>;
#size-cells = <1>;
@@ -140,6 +198,21 @@
groups = "vin0_data8", "vin0_clk";
function = "vin0";
};
+
+ mmcif0_pins: mmcif0 {
+ groups = "mmc_data8", "mmc_ctrl";
+ function = "mmc";
+ };
+
+ sdhi0_pins: sd0 {
+ groups = "sdhi0_data4", "sdhi0_ctrl";
+ function = "sdhi0";
+ };
+
+ sdhi1_pins: sd1 {
+ groups = "sdhi1_data4", "sdhi1_ctrl";
+ function = "sdhi1";
+ };
};
&cmt0 {
@@ -169,6 +242,39 @@
};
};
+&mmcif0 {
+ pinctrl-0 = <&mmcif0_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&d3_3v>;
+ vqmmc-supply = <&d3_3v>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&vcc_sdhi0>;
+ vqmmc-supply = <&vccq_sdhi0>;
+ cd-gpios = <&gpio6 6 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio6 7 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&sdhi1 {
+ pinctrl-0 = <&sdhi1_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&vcc_sdhi1>;
+ vqmmc-supply = <&vccq_sdhi1>;
+ cd-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
&i2c1 {
pinctrl-0 = <&i2c1_pins>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
index cf24f45fff22..cf880ac06f4b 100644
--- a/arch/arm/boot/dts/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/r8a7794-silk.dts
@@ -10,6 +10,17 @@
* kind, whether express or implied.
*/
+/*
+ * SSI-AK4643
+ *
+ * SW1: 2-1: AK4643
+ * 2-3: ADV7511
+ *
+ * This command is required before playback/capture:
+ *
+ * amixer set "LINEOUT Mixer DACL" on
+ */
+
/dts-v1/;
#include "r8a7794.dtsi"
#include <dt-bindings/gpio/gpio.h>
@@ -119,6 +130,29 @@
#clock-cells = <0>;
clock-frequency = <74250000>;
};
+
+ x9_clk: audio_clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <12288000>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,format = "left_j";
+ simple-audio-card,bitclock-master = <&soundcodec>;
+ simple-audio-card,frame-master = <&soundcodec>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&rcar_sound>;
+ };
+
+ soundcodec: simple-audio-card,codec {
+ sound-dai = <&ak4643>;
+ clocks = <&x9_clk>;
+ };
+ };
};
&extal_clk {
@@ -193,6 +227,16 @@
groups = "du1_rgb666", "du1_sync", "du1_disp", "du1_clk0_out";
function = "du1";
};
+
+ ssi_pins: sound {
+ groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
+ function = "ssi";
+ };
+
+ audio_clk_pins: audio_clk {
+ groups = "audio_clkc";
+ function = "audio_clk";
+ };
};
&scif2 {
@@ -230,6 +274,12 @@
status = "okay";
clock-frequency = <400000>;
+ ak4643: codec@12 {
+ compatible = "asahi-kasei,ak4643";
+ #sound-dai-cells = <0>;
+ reg = <0x12>;
+ };
+
composite-in@20 {
compatible = "adi,adv7180";
reg = <0x20>;
@@ -392,3 +442,23 @@
};
};
};
+
+&rcar_sound {
+ pinctrl-0 = <&ssi_pins &audio_clk_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ /* Single DAI */
+ #sound-dai-cells = <0>;
+
+ rcar_sound,dai {
+ dai0 {
+ playback = <&ssi0>;
+ capture = <&ssi1>;
+ };
+ };
+};
+
+&ssi1 {
+ shared-pin;
+};
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 685f986cf962..9365580a194f 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -296,6 +296,34 @@
dma-channels = <15>;
};
+ audma0: dma-controller@ec700000 {
+ compatible = "renesas,dmac-r8a7794", "renesas,rcar-dmac";
+ reg = <0 0xec700000 0 0x10000>;
+ interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3", "ch4", "ch5",
+ "ch6", "ch7", "ch8", "ch9", "ch10", "ch11",
+ "ch12";
+ clocks = <&mstp5_clks R8A7794_CLK_AUDIO_DMAC0>;
+ clock-names = "fck";
+ power-domains = <&cpg_clocks>;
+ #dma-cells = <1>;
+ dma-channels = <13>;
+ };
+
scifa0: serial@e6c40000 {
compatible = "renesas,scifa-r8a7794",
"renesas,rcar-gen2-scifa", "renesas,scifa";
@@ -697,7 +725,7 @@
sdhi0: sd@ee100000 {
compatible = "renesas,sdhi-r8a7794";
- reg = <0 0xee100000 0 0x200>;
+ reg = <0 0xee100000 0 0x328>;
interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7794_CLK_SDHI0>;
dmas = <&dmac0 0xcd>, <&dmac0 0xce>,
@@ -866,6 +894,22 @@
};
};
+ vsp1@fe928000 {
+ compatible = "renesas,vsp1";
+ reg = <0 0xfe928000 0 0x8000>;
+ interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp1_clks R8A7794_CLK_VSP1_S>;
+ power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
+ };
+
+ vsp1@fe930000 {
+ compatible = "renesas,vsp1";
+ reg = <0 0xfe930000 0 0x8000>;
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp1_clks R8A7794_CLK_VSP1_DU0>;
+ power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
+ };
+
du: display@feb00000 {
compatible = "renesas,du-r8a7794";
reg = <0 0xfeb00000 0 0x40000>;
@@ -952,6 +996,27 @@
clock-frequency = <0>;
};
+ /*
+ * The external audio clocks are configured as 0 Hz fixed
+ * frequency clocks by default. Boards that provide audio
+ * clocks should override them.
+ */
+ audio_clka: audio_clka {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+ audio_clkb: audio_clkb {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+ audio_clkc: audio_clkc {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
/* Special CPG clocks */
cpg_clocks: cpg_clocks@e6150000 {
compatible = "renesas,r8a7794-cpg-clocks",
@@ -1183,6 +1248,15 @@
clock-indices = <R8A7794_CLK_IRQC>;
clock-output-names = "irqc";
};
+ mstp5_clks: mstp5_clks@e6150144 {
+ compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0 0xe6150144 0 4>, <0 0xe615003c 0 4>;
+ clocks = <&hp_clk>, <&p_clk>;
+ #clock-cells = <1>;
+ clock-indices = <R8A7794_CLK_AUDIO_DMAC0
+ R8A7794_CLK_PWM>;
+ clock-output-names = "audmac0", "pwm";
+ };
mstp7_clks: mstp7_clks@e615014c {
compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe615014c 0 4>, <0 0xe61501c4 0 4>;
@@ -1237,6 +1311,58 @@
"gpio1", "gpio0", "rcan1", "rcan0", "qspi_mod",
"i2c5", "i2c4", "i2c3", "i2c2", "i2c1", "i2c0";
};
+ mstp10_clks: mstp10_clks@e6150998 {
+ compatible = "renesas,r8a7794-mstp-clocks",
+ "renesas,cpg-mstp-clocks";
+ reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
+ clocks = <&p_clk>,
+ <&mstp10_clks R8A7794_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7794_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7794_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7794_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7794_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7794_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7794_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7794_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7794_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7794_CLK_SSI_ALL>,
+ <&p_clk>,
+ <&mstp10_clks R8A7794_CLK_SCU_ALL>,
+ <&mstp10_clks R8A7794_CLK_SCU_ALL>,
+ <&mstp10_clks R8A7794_CLK_SCU_ALL>,
+ <&mstp10_clks R8A7794_CLK_SCU_ALL>,
+ <&mstp10_clks R8A7794_CLK_SCU_ALL>,
+ <&mstp10_clks R8A7794_CLK_SCU_ALL>,
+ <&mstp10_clks R8A7794_CLK_SCU_ALL>,
+ <&mstp10_clks R8A7794_CLK_SCU_ALL>,
+ <&mstp10_clks R8A7794_CLK_SCU_ALL>,
+ <&mstp10_clks R8A7794_CLK_SCU_ALL>;
+ #clock-cells = <1>;
+ clock-indices = <R8A7794_CLK_SSI_ALL
+ R8A7794_CLK_SSI9 R8A7794_CLK_SSI8
+ R8A7794_CLK_SSI7 R8A7794_CLK_SSI6
+ R8A7794_CLK_SSI5 R8A7794_CLK_SSI4
+ R8A7794_CLK_SSI3 R8A7794_CLK_SSI2
+ R8A7794_CLK_SSI1 R8A7794_CLK_SSI0
+ R8A7794_CLK_SCU_ALL
+ R8A7794_CLK_SCU_DVC1
+ R8A7794_CLK_SCU_DVC0
+ R8A7794_CLK_SCU_CTU1_MIX1
+ R8A7794_CLK_SCU_CTU0_MIX0
+ R8A7794_CLK_SCU_SRC6
+ R8A7794_CLK_SCU_SRC5
+ R8A7794_CLK_SCU_SRC4
+ R8A7794_CLK_SCU_SRC3
+ R8A7794_CLK_SCU_SRC2
+ R8A7794_CLK_SCU_SRC1>;
+ clock-output-names = "ssi-all", "ssi9", "ssi8", "ssi7",
+ "ssi6", "ssi5", "ssi4", "ssi3",
+ "ssi2", "ssi1", "ssi0",
+ "scu-all", "scu-dvc1", "scu-dvc0",
+ "scu-ctu1-mix1", "scu-ctu0-mix0",
+ "scu-src6", "scu-src5", "scu-src4",
+ "scu-src3", "scu-src2", "scu-src1";
+ };
mstp11_clks: mstp11_clks@e615099c {
compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe615099c 0 4>, <0 0xe61509ac 0 4>;
@@ -1306,4 +1432,185 @@
#iommu-cells = <1>;
status = "disabled";
};
+
+ rcar_sound: sound@ec500000 {
+ /*
+ * #sound-dai-cells is required
+ *
+ * Single DAI : #sound-dai-cells = <0>; <&rcar_sound>;
+ * Multi DAI : #sound-dai-cells = <1>; <&rcar_sound N>;
+ */
+ compatible = "renesas,rcar_sound-r8a7794",
+ "renesas,rcar_sound-gen2";
+ reg = <0 0xec500000 0 0x1000>, /* SCU */
+ <0 0xec5a0000 0 0x100>, /* ADG */
+ <0 0xec540000 0 0x1000>, /* SSIU */
+ <0 0xec541000 0 0x280>, /* SSI */
+ <0 0xec740000 0 0x200>; /* Audio DMAC peri peri */
+ reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
+
+ clocks = <&mstp10_clks R8A7794_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7794_CLK_SSI9>,
+ <&mstp10_clks R8A7794_CLK_SSI8>,
+ <&mstp10_clks R8A7794_CLK_SSI7>,
+ <&mstp10_clks R8A7794_CLK_SSI6>,
+ <&mstp10_clks R8A7794_CLK_SSI5>,
+ <&mstp10_clks R8A7794_CLK_SSI4>,
+ <&mstp10_clks R8A7794_CLK_SSI3>,
+ <&mstp10_clks R8A7794_CLK_SSI2>,
+ <&mstp10_clks R8A7794_CLK_SSI1>,
+ <&mstp10_clks R8A7794_CLK_SSI0>,
+ <&mstp10_clks R8A7794_CLK_SCU_SRC6>,
+ <&mstp10_clks R8A7794_CLK_SCU_SRC5>,
+ <&mstp10_clks R8A7794_CLK_SCU_SRC4>,
+ <&mstp10_clks R8A7794_CLK_SCU_SRC3>,
+ <&mstp10_clks R8A7794_CLK_SCU_SRC2>,
+ <&mstp10_clks R8A7794_CLK_SCU_SRC1>,
+ <&mstp10_clks R8A7794_CLK_SCU_CTU0_MIX0>,
+ <&mstp10_clks R8A7794_CLK_SCU_CTU1_MIX1>,
+ <&mstp10_clks R8A7794_CLK_SCU_CTU0_MIX0>,
+ <&mstp10_clks R8A7794_CLK_SCU_CTU1_MIX1>,
+ <&mstp10_clks R8A7794_CLK_SCU_DVC0>,
+ <&mstp10_clks R8A7794_CLK_SCU_DVC1>,
+ <&audio_clka>, <&audio_clkb>, <&audio_clkc>,
+ <&m2_clk>;
+ clock-names = "ssi-all",
+ "ssi.9", "ssi.8", "ssi.7", "ssi.6", "ssi.5",
+ "ssi.4", "ssi.3", "ssi.2", "ssi.1", "ssi.0",
+ "src.6", "src.5", "src.4", "src.3", "src.2",
+ "src.1",
+ "ctu.0", "ctu.1",
+ "mix.0", "mix.1",
+ "dvc.0", "dvc.1",
+ "clk_a", "clk_b", "clk_c", "clk_i";
+ power-domains = <&cpg_clocks>;
+
+ status = "disabled";
+
+ rcar_sound,dvc {
+ dvc0: dvc@0 {
+ dmas = <&audma0 0xbc>;
+ dma-names = "tx";
+ };
+ dvc1: dvc@1 {
+ dmas = <&audma0 0xbe>;
+ dma-names = "tx";
+ };
+ };
+
+ rcar_sound,mix {
+ mix0: mix@0 { };
+ mix1: mix@1 { };
+ };
+
+ rcar_sound,ctu {
+ ctu00: ctu@0 { };
+ ctu01: ctu@1 { };
+ ctu02: ctu@2 { };
+ ctu03: ctu@3 { };
+ ctu10: ctu@4 { };
+ ctu11: ctu@5 { };
+ ctu12: ctu@6 { };
+ ctu13: ctu@7 { };
+ };
+
+ rcar_sound,src {
+ src@0 {
+ status = "disabled";
+ };
+ src1: src@1 {
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x87>, <&audma0 0x9c>;
+ dma-names = "rx", "tx";
+ };
+ src2: src@2 {
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x89>, <&audma0 0x9e>;
+ dma-names = "rx", "tx";
+ };
+ src3: src@3 {
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8b>, <&audma0 0xa0>;
+ dma-names = "rx", "tx";
+ };
+ src4: src@4 {
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8d>, <&audma0 0xb0>;
+ dma-names = "rx", "tx";
+ };
+ src5: src@5 {
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8f>, <&audma0 0xb2>;
+ dma-names = "rx", "tx";
+ };
+ src6: src@6 {
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x91>, <&audma0 0xb4>;
+ dma-names = "rx", "tx";
+ };
+ };
+
+ rcar_sound,ssi {
+ ssi0: ssi@0 {
+ interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x01>, <&audma0 0x02>,
+ <&audma0 0x15>, <&audma0 0x16>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi1: ssi@1 {
+ interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x03>, <&audma0 0x04>,
+ <&audma0 0x49>, <&audma0 0x4a>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi2: ssi@2 {
+ interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x05>, <&audma0 0x06>,
+ <&audma0 0x63>, <&audma0 0x64>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi3: ssi@3 {
+ interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x07>, <&audma0 0x08>,
+ <&audma0 0x6f>, <&audma0 0x70>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi4: ssi@4 {
+ interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x09>, <&audma0 0x0a>,
+ <&audma0 0x71>, <&audma0 0x72>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi5: ssi@5 {
+ interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0b>, <&audma0 0x0c>,
+ <&audma0 0x73>, <&audma0 0x74>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi6: ssi@6 {
+ interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0d>, <&audma0 0x0e>,
+ <&audma0 0x75>, <&audma0 0x76>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi7: ssi@7 {
+ interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0f>, <&audma0 0x10>,
+ <&audma0 0x79>, <&audma0 0x7a>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi8: ssi@8 {
+ interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x11>, <&audma0 0x12>,
+ <&audma0 0x7b>, <&audma0 0x7c>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi9: ssi@9 {
+ interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x13>, <&audma0 0x14>,
+ <&audma0 0x7d>, <&audma0 0x7e>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index 843d2be2e4e9..a935523a1eb8 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -43,6 +43,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/pinctrl/rockchip.h>
#include <dt-bindings/clock/rk3036-cru.h>
+#include <dt-bindings/soc/rockchip,boot-mode.h>
#include "skeleton.dtsi"
/ {
@@ -313,8 +314,17 @@
};
grf: syscon@20008000 {
- compatible = "rockchip,rk3036-grf", "syscon";
+ compatible = "rockchip,rk3036-grf", "syscon", "simple-mfd";
reg = <0x20008000 0x1000>;
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x1d8>;
+ mode-normal = <BOOT_NORMAL>;
+ mode-recovery = <BOOT_RECOVERY>;
+ mode-bootloader = <BOOT_FASTBOOT>;
+ mode-loader = <BOOT_BL_DOWNLOAD>;
+ };
};
acodec: acodec-ana@20030000 {
diff --git a/arch/arm/boot/dts/rk3288-evb-act8846.dts b/arch/arm/boot/dts/rk3288-evb-act8846.dts
index 452ca2441e84..041dd5d2d18c 100644
--- a/arch/arm/boot/dts/rk3288-evb-act8846.dts
+++ b/arch/arm/boot/dts/rk3288-evb-act8846.dts
@@ -206,6 +206,10 @@
};
};
+&panel {
+ power-supply = <&vcc_lcd>;
+};
+
&pinctrl {
lcd {
lcd_en: lcd-en {
diff --git a/arch/arm/boot/dts/rk3288-evb-rk808.dts b/arch/arm/boot/dts/rk3288-evb-rk808.dts
index 736b08b0bfdd..44ebc6e59b3a 100644
--- a/arch/arm/boot/dts/rk3288-evb-rk808.dts
+++ b/arch/arm/boot/dts/rk3288-evb-rk808.dts
@@ -233,3 +233,7 @@
};
};
};
+
+&panel {
+ power-supply = <&vcc_lcd>;
+};
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index 963365d12208..d59208b5eb6c 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -48,7 +48,7 @@
reg = <0x0 0x80000000>;
};
- backlight {
+ backlight: backlight {
compatible = "pwm-backlight";
brightness-levels = <
0 1 2 3 4 5 6 7
@@ -97,6 +97,21 @@
#clock-cells = <0>;
};
+ panel: panel {
+ compatible ="lg,lp079qx1-sp0v", "simple-panel";
+ backlight = <&backlight>;
+ enable-gpios = <&gpio7 4 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&lcd_cs>;
+
+ ports {
+ panel_in: port {
+ panel_in_edp: endpoint {
+ remote-endpoint = <&edp_out_panel>;
+ };
+ };
+ };
+ };
+
gpio-keys {
compatible = "gpio-keys";
autorepeat;
@@ -170,6 +185,28 @@
cpu0-supply = <&vdd_cpu>;
};
+&edp {
+ force-hpd;
+ status = "okay";
+
+ ports {
+ edp_out: port@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ edp_out_panel: endpoint {
+ reg = <0>;
+ remote-endpoint = <&panel_in_edp>;
+ };
+ };
+ };
+};
+
+&edp_phy {
+ status = "okay";
+};
+
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
@@ -280,6 +317,12 @@
};
};
+ lcd {
+ lcd_cs: lcd-cs {
+ rockchip,pins = <7 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
pmic {
pmic_int: pmic-int {
rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
diff --git a/arch/arm/boot/dts/rk3288-fennec.dts b/arch/arm/boot/dts/rk3288-fennec.dts
new file mode 100644
index 000000000000..2e3c34135ed8
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-fennec.dts
@@ -0,0 +1,382 @@
+/*
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "rk3288.dtsi"
+
+/ {
+ model = "Rockchip RK3288 Fennec Board";
+ compatible = "rockchip,rk3288-fennec", "rockchip,rk3288";
+
+ memory {
+ reg = <0x0 0x80000000>;
+ device_type = "memory";
+ };
+
+ ext_gmac: external-gmac-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ clock-output-names = "ext_gmac";
+ };
+
+ vcc_sys: vsys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&cpu0 {
+ cpu0-supply = <&vdd_cpu>;
+};
+
+&emmc {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ disable-wp;
+ non-removable;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>;
+ status = "okay";
+};
+
+&gmac {
+ assigned-clocks = <&cru SCLK_MAC>;
+ assigned-clock-parents = <&ext_gmac>;
+ clock_in_out = "input";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>, <&phy_rst>, <&phy_pmeb>, <&phy_int>;
+ phy-supply = <&vcc_lan>;
+ phy-mode = "rgmii";
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 1000000>;
+ snps,reset-gpio = <&gpio4 8 GPIO_ACTIVE_LOW>;
+ tx_delay = <0x30>;
+ rx_delay = <0x10>;
+ status = "okay";
+};
+
+&hdmi {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ rk808: pmic@1b {
+ compatible = "rockchip,rk808";
+ reg = <0x1b>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <1>;
+ clock-output-names = "xin32k", "rk808-clkout2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int &global_pwroff>;
+ rockchip,system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc_sys>;
+ vcc2-supply = <&vcc_sys>;
+ vcc3-supply = <&vcc_sys>;
+ vcc4-supply = <&vcc_sys>;
+ vcc6-supply = <&vcc_sys>;
+ vcc7-supply = <&vcc_sys>;
+ vcc8-supply = <&vcc_io>;
+ vcc9-supply = <&vcc_io>;
+ vcc10-supply = <&vcc_io>;
+ vcc11-supply = <&vcc_io>;
+ vcc12-supply = <&vcc_io>;
+ vddio-supply = <&vcc_io>;
+
+ regulators {
+ vdd_cpu: DCDC_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-name = "vdd_arm";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-name = "vdd_gpu";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_ddr";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_io: DCDC_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_io";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vccio_pmu: LDO_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_pmu";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcca_33: LDO_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcca_33";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_10: LDO_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-name = "vdd_10";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_wl: LDO_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_wl";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vdd10_lcd: LDO_REG6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-name = "vdd10_lcd";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_18: LDO_REG7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_18";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc18_lcd: LDO_REG8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc18_lcd";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_sd: SWITCH_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_sd";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_lan: SWITCH_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_lan";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&pinctrl {
+ pcfg_output_high: pcfg-output-high {
+ output-high;
+ };
+
+ pcfg_output_low: pcfg-output-low {
+ output-low;
+ };
+
+ pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+ drive-strength = <8>;
+ };
+
+ pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+ bias-pull-up;
+ drive-strength = <8>;
+ };
+
+ gmac {
+ phy_int: phy-int {
+ rockchip,pins = <0 9 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ phy_pmeb: phy-pmeb {
+ rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ phy_rst: phy-rst {
+ rockchip,pins = <4 8 RK_FUNC_GPIO &pcfg_output_high>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usbphy {
+ host_drv: host-drv {
+ rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usbphy {
+ pinctrl-names = "default";
+ pinctrl-0 = <&host_drv>;
+ vbus_drv-gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host1 {
+ status = "okay";
+};
+
+&usb_otg {
+ status = "okay";
+};
+
+&usb_hsic {
+ status = "okay";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi b/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi
new file mode 100644
index 000000000000..ec418c99de95
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi
@@ -0,0 +1,310 @@
+/*
+ * Device tree file for Firefly Rockchip RK3288 Core board
+ * Copyright (c) 2016 Randy Li <ayaka@soulik.info>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/input/input.h>
+#include "rk3288.dtsi"
+
+/ {
+ memory {
+ device_type = "memory";
+ reg = <0 0x80000000>;
+ };
+
+ ext_gmac: external-gmac-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ clock-output-names = "ext_gmac";
+ };
+
+
+ vcc_flash: flash-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_flash";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_io>;
+ };
+};
+
+&cpu0 {
+ cpu0-supply = <&vdd_cpu>;
+};
+
+&emmc {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ disable-wp;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ non-removable;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_pwr>, <&emmc_bus8>;
+ vmmc-supply = <&vcc_io>;
+ vqmmc-supply = <&vcc_flash>;
+ status = "okay";
+};
+
+&gmac {
+ assigned-clocks = <&cru SCLK_MAC>;
+ assigned-clock-parents = <&ext_gmac>;
+ clock_in_out = "input";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>, <&phy_rst>, <&phy_pmeb>, <&phy_int>;
+ phy-supply = <&vcc_lan>;
+ phy-mode = "rgmii";
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 1000000>;
+ snps,reset-gpio = <&gpio4 8 GPIO_ACTIVE_LOW>;
+ tx_delay = <0x30>;
+ rx_delay = <0x10>;
+ status = "ok";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ vdd_cpu: syr827@40 {
+ compatible = "silergy,syr827";
+ fcs,suspend-voltage-selector = <1>;
+ reg = <0x40>;
+ regulator-name = "vdd_cpu";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-enable-ramp-delay = <300>;
+ regulator-ramp-delay = <8000>;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vdd_gpu: syr828@41 {
+ compatible = "silergy,syr828";
+ fcs,suspend-voltage-selector = <1>;
+ reg = <0x41>;
+ regulator-name = "vdd_gpu";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ vin-supply = <&vcc_sys>;
+ };
+
+ act8846: act8846@5a {
+ compatible = "active-semi,act8846";
+ reg = <0x5a>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_vsel>, <&pwr_hold>;
+ system-power-controller;
+
+ vp1-supply = <&vcc_sys>;
+ vp2-supply = <&vcc_sys>;
+ vp3-supply = <&vcc_sys>;
+ vp4-supply = <&vcc_sys>;
+ inl1-supply = <&vcc_sys>;
+ inl2-supply = <&vcc_sys>;
+ inl3-supply = <&vcc_20>;
+
+ regulators {
+ vcc_ddr: REG1 {
+ regulator-name = "vcc_ddr";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+
+ vcc_io: REG2 {
+ regulator-name = "vcc_io";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vdd_log: REG3 {
+ regulator-name = "vdd_log";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+
+ vcc_20: REG4 {
+ regulator-name = "vcc_20";
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-always-on;
+ };
+
+ vccio_sd: REG5 {
+ regulator-name = "vccio_sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vdd10_lcd: REG6 {
+ regulator-name = "vdd10_lcd";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ vcca_18: REG7 {
+ regulator-name = "vcca_18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ vcca_33: REG8 {
+ regulator-name = "vcca_33";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vcc_lan: REG9 {
+ regulator-name = "vcca_lan";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vdd_10: REG10 {
+ regulator-name = "vdd_10";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ };
+
+ vccio_wl: vcc_18: REG11 {
+ regulator-name = "vcc_18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vcc18_lcd: REG12 {
+ regulator-name = "vcc18_lcd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+};
+
+&io_domains {
+ status = "okay";
+
+ audio-supply = <&vccio_wl>;
+ bb-supply = <&vcc_io>;
+ dvp-supply = <&dovdd_1v8>;
+ flash0-supply = <&vcc_flash>;
+ flash1-supply = <&vcc_lan>;
+ gpio30-supply = <&vcc_io>;
+ gpio1830-supply = <&vcc_io>;
+ lcdc-supply = <&vcc_io>;
+ sdcard-supply = <&vccio_sd>;
+ wifi-supply = <&vccio_wl>;
+};
+
+&pinctrl {
+ pcfg_output_high: pcfg-output-high {
+ output-high;
+ };
+
+ pcfg_output_low: pcfg-output-low {
+ output-low;
+ };
+
+ pcfg_pull_up_drv_12ma: pcfg-pull-up-drv-12ma {
+ bias-pull-up;
+ drive-strength = <12>;
+ };
+
+ act8846 {
+ pwr_hold: pwr-hold {
+ rockchip,pins = <0 1 RK_FUNC_GPIO &pcfg_output_high>;
+ };
+
+ pmic_vsel: pmic-vsel {
+ rockchip,pins = <7 14 RK_FUNC_GPIO &pcfg_output_low>;
+ };
+ };
+
+ gmac {
+ phy_int: phy-int {
+ rockchip,pins = <0 9 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ phy_pmeb: phy-pmeb {
+ rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ phy_rst: phy-rst {
+ rockchip,pins = <4 8 RK_FUNC_GPIO &pcfg_output_high>;
+ };
+ };
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <0>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
+
+&wdt {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3288-firefly-reload.dts b/arch/arm/boot/dts/rk3288-firefly-reload.dts
new file mode 100644
index 000000000000..751bee81128e
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-firefly-reload.dts
@@ -0,0 +1,403 @@
+/*
+ * Device tree file for Firefly Rockchip RK3288 Core board
+ * Copyright (c) 2016 Randy Li <ayaka@soulik.info>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3288-firefly-reload-core.dtsi"
+
+/ {
+ model = "Firefly-RK3288-reload";
+ compatible = "firefly,firefly-rk3288-reload", "rockchip,rk3288";
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ power {
+ wakeup-source;
+ gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+ label = "GPIO Power";
+ linux,code = <KEY_POWER>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwr_key>;
+ };
+ };
+
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ power {
+ gpios = <&gpio8 2 GPIO_ACTIVE_LOW>;
+ label = "firefly:blue:power";
+ pinctrl-names = "default";
+ pinctrl-0 = <&power_led>;
+ panic-indicator;
+ };
+
+ work {
+ gpios = <&gpio8 1 GPIO_ACTIVE_LOW>;
+ label = "firefly:blue:user";
+ linux,default-trigger = "rc-feedback";
+ pinctrl-names = "default";
+ pinctrl-0 = <&work_led>;
+ };
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&hym8563>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable>;
+ reset-gpios = <&gpio4 28 GPIO_ACTIVE_LOW>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "SPDIF";
+ simple-audio-card,dai-link@1 { /* S/PDIF - S/PDIF */
+ cpu { sound-dai = <&spdif>; };
+ codec { sound-dai = <&spdif_out>; };
+ };
+ };
+
+ spdif_out: spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
+ vcc_host_5v: usb-host-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 14 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&host_vbus_drv>;
+ regulator-name = "vcc_host_5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ vin-supply = <&vcc_5v>;
+ };
+
+ vcc_5v: vcc_sys: vsys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc_sd: sdmmc-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio7 11 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_pwr>;
+ regulator-name = "vcc_sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <100000>;
+ vin-supply = <&vcc_io>;
+ };
+
+ vcc_otg_5v: usb-otg-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&otg_vbus_drv>;
+ regulator-name = "vcc_otg_5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ vin-supply = <&vcc_5v>;
+ };
+
+ dovdd_1v8: dovdd-1v8-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 11 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dvp_pwr>;
+ regulator-name = "dovdd_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_io>;
+ };
+
+ vcc28_dvp: vcc28-dvp-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 11 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dvp_pwr>;
+ regulator-name = "vcc28_dvp";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vcc_io>;
+ };
+
+ af_28: af_28-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 11 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dvp_pwr>;
+ regulator-name = "af_28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vcc_io>;
+ };
+
+ dvdd_1v2: af_28-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cif_pwr>;
+ regulator-name = "dvdd_1v2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&vcc_io>;
+ };
+
+ vbat_wl: wifi-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vbat_wl";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_io>;
+ };
+};
+
+&i2c0 {
+ hym8563: hym8563@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "xin32k";
+ interrupt-parent = <&gpio7>;
+ interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtc_int>;
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ codec: es8328@10 {
+ compatible = "everest,es8328";
+ DVDD-supply = <&vcca_33>;
+ AVDD-supply = <&vcca_33>;
+ PVDD-supply = <&vcca_33>;
+ HPVDD-supply = <&vcca_33>;
+ clocks = <&cru HCLK_I2S0>, <&cru SCLK_I2S0>;
+ clock-names = "i2s_hclk", "i2s_clk";
+ reg = <0x10>;
+ };
+};
+
+&i2s {
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ card-detect-delay = <200>;
+ disable-wp;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk>, <&sdmmc_cmd>, <&sdmmc_cd>, <&sdmmc_bus4>;
+ vmmc-supply = <&vcc_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&sdio0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ disable-wp;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio0_bus4>, <&sdio0_cmd>, <&sdio0_clk>, <&sdio0_int>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-ddr50;
+ vmmc-supply = <&vbat_wl>;
+ vqmmc-supply = <&vccio_wl>;
+ status = "okay";
+};
+
+&spdif {
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer>, <&uart0_cts>, <&uart0_rts>;
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart3 {
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+};
+
+&usb_host1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usbhub_rst>;
+ status = "okay";
+};
+
+&usb_otg {
+ status = "okay";
+};
+
+&pinctrl {
+ ir {
+ ir_int: ir-int {
+ rockchip,pins = <7 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ dvp {
+ dvp_pwr: dvp-pwr {
+ rockchip,pins = <0 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ cif_pwr: cif-pwr {
+ rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ hym8563 {
+ rtc_int: rtc-int {
+ rockchip,pins = <7 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ keys {
+ pwr_key: pwr-key {
+ rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ leds {
+ power_led: power-led {
+ rockchip,pins = <8 2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ work_led: work-led {
+ rockchip,pins = <8 1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc {
+ /*
+ * Default drive strength isn't enough to achieve even
+ * high-speed mode on firefly board so bump up to 12ma.
+ */
+ sdmmc_bus4: sdmmc-bus4 {
+ rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+ <6 17 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+ <6 18 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+ <6 19 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ };
+
+ sdmmc_clk: sdmmc-clk {
+ rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_12ma>;
+ };
+
+ sdmmc_cmd: sdmmc-cmd {
+ rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ };
+
+ sdmmc_pwr: sdmmc-pwr {
+ rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdio {
+ wifi_enable: wifi-enable {
+ rockchip,pins = <4 28 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb_host {
+ host_vbus_drv: host-vbus-drv {
+ rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usbhub_rst: usbhub-rst {
+ rockchip,pins = <8 3 RK_FUNC_GPIO &pcfg_output_high>;
+ };
+ };
+
+ usb_otg {
+ otg_vbus_drv: otg-vbus-drv {
+ rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/rk3288-popmetal.dts b/arch/arm/boot/dts/rk3288-popmetal.dts
index dda8d259bb6d..56dd377d5658 100644
--- a/arch/arm/boot/dts/rk3288-popmetal.dts
+++ b/arch/arm/boot/dts/rk3288-popmetal.dts
@@ -387,12 +387,16 @@
interrupts = <1 IRQ_TYPE_EDGE_RISING>;
pinctrl-names = "default";
pinctrl-0 = <&comp_int>;
+ vdd-supply = <&vcc_io>;
+ vid-supply = <&vcc_io>;
};
- l3g4200d: l3g4200d@68 {
+ l3g4200d: l3g4200d@69 {
compatible = "st,l3g4200d-gyro";
st,drdy-int-pin = <2>;
- reg = <0x6b>;
+ reg = <0x69>;
+ vdd-supply = <&vcc_io>;
+ vddio-supply = <&vcc_io>;
};
mma8452: mma8452@1d {
@@ -525,3 +529,7 @@
&usbphy {
status = "okay";
};
+
+&usb_otg {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 91c4b3c7a8d5..17ec2e2d7a60 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -45,6 +45,7 @@
#include <dt-bindings/clock/rk3288-cru.h>
#include <dt-bindings/thermal/thermal.h>
#include <dt-bindings/power/rk3288-power.h>
+#include <dt-bindings/soc/rockchip,boot-mode.h>
#include "skeleton.dtsi"
/ {
@@ -793,6 +794,15 @@
clocks = <&cru ACLK_GPU>;
};
};
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x94>;
+ mode-normal = <BOOT_NORMAL>;
+ mode-recovery = <BOOT_RECOVERY>;
+ mode-bootloader = <BOOT_FASTBOOT>;
+ mode-loader = <BOOT_BL_DOWNLOAD>;
+ };
};
sgrf: syscon@ff740000 {
@@ -834,6 +844,37 @@
compatible = "rockchip,rk3288-io-voltage-domain";
status = "disabled";
};
+
+ usbphy: usbphy {
+ compatible = "rockchip,rk3288-usb-phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ usbphy0: usb-phy@320 {
+ #phy-cells = <0>;
+ reg = <0x320>;
+ clocks = <&cru SCLK_OTGPHY0>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ };
+
+ usbphy1: usb-phy@334 {
+ #phy-cells = <0>;
+ reg = <0x334>;
+ clocks = <&cru SCLK_OTGPHY1>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ };
+
+ usbphy2: usb-phy@348 {
+ #phy-cells = <0>;
+ reg = <0x348>;
+ clocks = <&cru SCLK_OTGPHY2>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ };
+ };
};
wdt: watchdog@ff800000 {
@@ -1087,38 +1128,6 @@
};
};
- usbphy: phy {
- compatible = "rockchip,rk3288-usb-phy";
- rockchip,grf = <&grf>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- usbphy0: usb-phy@320 {
- #phy-cells = <0>;
- reg = <0x320>;
- clocks = <&cru SCLK_OTGPHY0>;
- clock-names = "phyclk";
- #clock-cells = <0>;
- };
-
- usbphy1: usb-phy@334 {
- #phy-cells = <0>;
- reg = <0x334>;
- clocks = <&cru SCLK_OTGPHY1>;
- clock-names = "phyclk";
- #clock-cells = <0>;
- };
-
- usbphy2: usb-phy@348 {
- #phy-cells = <0>;
- reg = <0x348>;
- clocks = <&cru SCLK_OTGPHY2>;
- clock-names = "phyclk";
- #clock-cells = <0>;
- };
- };
-
pinctrl: pinctrl {
compatible = "rockchip,rk3288-pinctrl";
rockchip,grf = <&grf>;
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index e2cd683b4e4b..e15beb3c671e 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -43,6 +43,7 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/soc/rockchip,boot-mode.h>
#include "skeleton.dtsi"
/ {
@@ -246,8 +247,17 @@
};
pmu: pmu@20004000 {
- compatible = "rockchip,rk3066-pmu", "syscon";
+ compatible = "rockchip,rk3066-pmu", "syscon", "simple-mfd";
reg = <0x20004000 0x100>;
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x40>;
+ mode-normal = <BOOT_NORMAL>;
+ mode-recovery = <BOOT_RECOVERY>;
+ mode-bootloader = <BOOT_FASTBOOT>;
+ mode-loader = <BOOT_BL_DOWNLOAD>;
+ };
};
grf: grf@20008000 {
diff --git a/arch/arm/boot/dts/s3c2416-pinctrl.dtsi b/arch/arm/boot/dts/s3c2416-pinctrl.dtsi
index 527e3193817f..6274359fb323 100644
--- a/arch/arm/boot/dts/s3c2416-pinctrl.dtsi
+++ b/arch/arm/boot/dts/s3c2416-pinctrl.dtsi
@@ -8,6 +8,8 @@
* published by the Free Software Foundation.
*/
+#include <dt-bindings/pinctrl/samsung.h>
+
&pinctrl_0 {
/*
* Pin banks
@@ -83,91 +85,91 @@
uart0_data: uart0-data {
samsung,pins = "gph-0", "gph-1";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gph-8", "gph-9";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
uart1_data: uart1-data {
samsung,pins = "gph-2", "gph-3";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gph-10", "gph-11";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
uart2_data: uart2-data {
samsung,pins = "gph-4", "gph-5";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
uart2_fctl: uart2-fctl {
samsung,pins = "gph-6", "gph-7";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
uart3_data: uart3-data {
samsung,pins = "gph-6", "gph-7";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
extuart_clk: extuart-clk {
samsung,pins = "gph-12";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
i2c0_bus: i2c0-bus {
samsung,pins = "gpe-14", "gpe-15";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
spi0_bus: spi0-bus {
samsung,pins = "gpe-11", "gpe-12", "gpe-13";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
sd0_clk: sd0-clk {
samsung,pins = "gpe-5";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpe-6";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
sd0_bus1: sd0-bus1 {
samsung,pins = "gpe-7";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
sd0_bus4: sd0-bus4 {
samsung,pins = "gpe-8", "gpe-9", "gpe-10";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
sd1_cmd: sd1-cmd {
samsung,pins = "gpl-8";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
sd1_clk: sd1-clk {
samsung,pins = "gpl-9";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
sd1_bus1: sd1-bus1 {
samsung,pins = "gpl-0";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
sd1_bus4: sd1-bus4 {
samsung,pins = "gpl-1", "gpl-2", "gpl-3";
- samsung,pin-function = <2>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
};
};
diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts
index a25debb50401..f4afda3594f8 100644
--- a/arch/arm/boot/dts/s3c6410-mini6410.dts
+++ b/arch/arm/boot/dts/s3c6410-mini6410.dts
@@ -201,13 +201,13 @@
&pinctrl0 {
gpio_leds: gpio-leds {
samsung,pins = "gpk-4", "gpk-5", "gpk-6", "gpk-7";
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
gpio_keys: gpio-keys {
samsung,pins = "gpn-0", "gpn-1", "gpn-2", "gpn-3",
"gpn-4", "gpn-5", "gpl-11", "gpl-12";
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
};
diff --git a/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi b/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi
index b1197d8b04de..4e8e802b4ee1 100644
--- a/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi
+++ b/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi
@@ -12,9 +12,7 @@
* published by the Free Software Foundation.
*/
-#define PIN_PULL_NONE 0
-#define PIN_PULL_DOWN 1
-#define PIN_PULL_UP 2
+#include <dt-bindings/pinctrl/samsung.h>
&pinctrl0 {
/*
@@ -138,514 +136,514 @@
uart0_data: uart0-data {
samsung,pins = "gpa-0", "gpa-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gpa-2", "gpa-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
uart1_data: uart1-data {
samsung,pins = "gpa-4", "gpa-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gpa-6", "gpa-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
uart2_data: uart2-data {
samsung,pins = "gpb-0", "gpb-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
uart3_data: uart3-data {
samsung,pins = "gpb-2", "gpb-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
ext_dma_0: ext-dma-0 {
samsung,pins = "gpb-0", "gpb-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
ext_dma_1: ext-dma-1 {
samsung,pins = "gpb-2", "gpb-3";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
irda_data_0: irda-data-0 {
samsung,pins = "gpb-0", "gpb-1";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
irda_data_1: irda-data-1 {
samsung,pins = "gpb-2", "gpb-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
irda_sdbw: irda-sdbw {
samsung,pins = "gpb-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
i2c0_bus: i2c0-bus {
samsung,pins = "gpb-5", "gpb-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
i2c1_bus: i2c1-bus {
/* S3C6410-only */
samsung,pins = "gpb-2", "gpb-3";
- samsung,pin-function = <6>;
- samsung,pin-pud = <PIN_PULL_UP>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_6>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
spi0_bus: spi0-bus {
samsung,pins = "gpc-0", "gpc-1", "gpc-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
spi0_cs: spi0-cs {
samsung,pins = "gpc-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
spi1_bus: spi1-bus {
samsung,pins = "gpc-4", "gpc-5", "gpc-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
spi1_cs: spi1-cs {
samsung,pins = "gpc-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpg-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd0_clk: sd0-clk {
samsung,pins = "gpg-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd0_bus1: sd0-bus1 {
samsung,pins = "gpg-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd0_bus4: sd0-bus4 {
samsung,pins = "gpg-2", "gpg-3", "gpg-4", "gpg-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd0_cd: sd0-cd {
samsung,pins = "gpg-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_UP>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
sd1_cmd: sd1-cmd {
samsung,pins = "gph-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd1_clk: sd1-clk {
samsung,pins = "gph-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd1_bus1: sd1-bus1 {
samsung,pins = "gph-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd1_bus4: sd1-bus4 {
samsung,pins = "gph-2", "gph-3", "gph-4", "gph-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd1_bus8: sd1-bus8 {
samsung,pins = "gph-2", "gph-3", "gph-4", "gph-5",
"gph-6", "gph-7", "gph-8", "gph-9";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd1_cd: sd1-cd {
samsung,pins = "gpg-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_UP>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
sd2_cmd: sd2-cmd {
samsung,pins = "gpc-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd2_clk: sd2-clk {
samsung,pins = "gpc-5";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd2_bus1: sd2-bus1 {
samsung,pins = "gph-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
sd2_bus4: sd2-bus4 {
samsung,pins = "gph-6", "gph-7", "gph-8", "gph-9";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
i2s0_bus: i2s0-bus {
samsung,pins = "gpd-0", "gpd-2", "gpd-3", "gpd-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
i2s0_cdclk: i2s0-cdclk {
samsung,pins = "gpd-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
i2s1_bus: i2s1-bus {
samsung,pins = "gpe-0", "gpe-2", "gpe-3", "gpe-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
i2s1_cdclk: i2s1-cdclk {
samsung,pins = "gpe-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
i2s2_bus: i2s2-bus {
/* S3C6410-only */
samsung,pins = "gpc-4", "gpc-5", "gpc-6", "gph-6",
"gph-8", "gph-9";
- samsung,pin-function = <5>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_5>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
i2s2_cdclk: i2s2-cdclk {
/* S3C6410-only */
samsung,pins = "gph-7";
- samsung,pin-function = <5>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_5>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
pcm0_bus: pcm0-bus {
samsung,pins = "gpd-0", "gpd-2", "gpd-3", "gpd-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
pcm0_extclk: pcm0-extclk {
samsung,pins = "gpd-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
pcm1_bus: pcm1-bus {
samsung,pins = "gpe-0", "gpe-2", "gpe-3", "gpe-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
pcm1_extclk: pcm1-extclk {
samsung,pins = "gpe-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
ac97_bus_0: ac97-bus-0 {
samsung,pins = "gpd-0", "gpd-1", "gpd-2", "gpd-3", "gpd-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
ac97_bus_1: ac97-bus-1 {
samsung,pins = "gpe-0", "gpe-1", "gpe-2", "gpe-3", "gpe-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
cam_port: cam-port {
samsung,pins = "gpf-0", "gpf-1", "gpf-2", "gpf-4",
"gpf-5", "gpf-6", "gpf-7", "gpf-8",
"gpf-9", "gpf-10", "gpf-11", "gpf-12";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
cam_rst: cam-rst {
samsung,pins = "gpf-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
cam_field: cam-field {
/* S3C6410-only */
samsung,pins = "gpb-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
pwm_extclk: pwm-extclk {
samsung,pins = "gpf-13";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
pwm0_out: pwm0-out {
samsung,pins = "gpf-14";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
pwm1_out: pwm1-out {
samsung,pins = "gpf-15";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
clkout0: clkout-0 {
samsung,pins = "gpf-14";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col0_0: keypad-col0-0 {
samsung,pins = "gph-0";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col1_0: keypad-col1-0 {
samsung,pins = "gph-1";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col2_0: keypad-col2-0 {
samsung,pins = "gph-2";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col3_0: keypad-col3-0 {
samsung,pins = "gph-3";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col4_0: keypad-col4-0 {
samsung,pins = "gph-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col5_0: keypad-col5-0 {
samsung,pins = "gph-5";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col6_0: keypad-col6-0 {
samsung,pins = "gph-6";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col7_0: keypad-col7-0 {
samsung,pins = "gph-7";
- samsung,pin-function = <4>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col0_1: keypad-col0-1 {
samsung,pins = "gpl-0";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col1_1: keypad-col1-1 {
samsung,pins = "gpl-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col2_1: keypad-col2-1 {
samsung,pins = "gpl-2";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col3_1: keypad-col3-1 {
samsung,pins = "gpl-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col4_1: keypad-col4-1 {
samsung,pins = "gpl-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col5_1: keypad-col5-1 {
samsung,pins = "gpl-5";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col6_1: keypad-col6-1 {
samsung,pins = "gpl-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_col7_1: keypad-col7-1 {
samsung,pins = "gpl-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row0_0: keypad-row0-0 {
samsung,pins = "gpk-8";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row1_0: keypad-row1-0 {
samsung,pins = "gpk-9";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row2_0: keypad-row2-0 {
samsung,pins = "gpk-10";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row3_0: keypad-row3-0 {
samsung,pins = "gpk-11";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row4_0: keypad-row4-0 {
samsung,pins = "gpk-12";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row5_0: keypad-row5-0 {
samsung,pins = "gpk-13";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row6_0: keypad-row6-0 {
samsung,pins = "gpk-14";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row7_0: keypad-row7-0 {
samsung,pins = "gpk-15";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row0_1: keypad-row0-1 {
samsung,pins = "gpn-0";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row1_1: keypad-row1-1 {
samsung,pins = "gpn-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row2_1: keypad-row2-1 {
samsung,pins = "gpn-2";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row3_1: keypad-row3-1 {
samsung,pins = "gpn-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row4_1: keypad-row4-1 {
samsung,pins = "gpn-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row5_1: keypad-row5-1 {
samsung,pins = "gpn-5";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row6_1: keypad-row6-1 {
samsung,pins = "gpn-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
keypad_row7_1: keypad-row7-1 {
samsung,pins = "gpn-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
lcd_ctrl: lcd-ctrl {
samsung,pins = "gpj-8", "gpj-9", "gpj-10", "gpj-11";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
lcd_data16: lcd-data-width16 {
@@ -653,8 +651,8 @@
"gpi-7", "gpi-10", "gpi-11", "gpi-12",
"gpi-13", "gpi-14", "gpi-15", "gpj-3",
"gpj-4", "gpj-5", "gpj-6", "gpj-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
lcd_data18: lcd-data-width18 {
@@ -663,8 +661,8 @@
"gpi-12", "gpi-13", "gpi-14", "gpi-15",
"gpj-2", "gpj-3", "gpj-4", "gpj-5",
"gpj-6", "gpj-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
lcd_data24: lcd-data-width24 {
@@ -674,14 +672,14 @@
"gpi-12", "gpi-13", "gpi-14", "gpi-15",
"gpj-0", "gpj-1", "gpj-2", "gpj-3",
"gpj-4", "gpj-5", "gpj-6", "gpj-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
hsi_bus: hsi-bus {
samsung,pins = "gpk-0", "gpk-1", "gpk-2", "gpk-3",
"gpk-4", "gpk-5", "gpk-6", "gpk-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <PIN_PULL_NONE>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
};
diff --git a/arch/arm/boot/dts/s5pv210-aquila.dts b/arch/arm/boot/dts/s5pv210-aquila.dts
index da24ab570b0e..40139923eef0 100644
--- a/arch/arm/boot/dts/s5pv210-aquila.dts
+++ b/arch/arm/boot/dts/s5pv210-aquila.dts
@@ -29,7 +29,7 @@
bootargs = "console=ttySAC2,115200n8 root=/dev/mmcblk1p5 rw rootwait ignore_loglevel earlyprintk";
};
- memory {
+ memory@30000000 {
device_type = "memory";
reg = <0x30000000 0x05000000
0x40000000 0x18000000>;
@@ -387,7 +387,7 @@
&pinctrl0 {
t_flash_detect: t-flash-detect {
samsung,pins = "gph3-4";
- samsung,pin-function = <0>;
- samsung,pin-pud = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
};
diff --git a/arch/arm/boot/dts/s5pv210-goni.dts b/arch/arm/boot/dts/s5pv210-goni.dts
index 0a33d402138e..c56f51ee7897 100644
--- a/arch/arm/boot/dts/s5pv210-goni.dts
+++ b/arch/arm/boot/dts/s5pv210-goni.dts
@@ -29,7 +29,7 @@
bootargs = "console=ttySAC0,115200n8 root=/dev/mmcblk0p5 rw rootwait ignore_loglevel earlyprintk";
};
- memory {
+ memory@30000000 {
device_type = "memory";
reg = <0x30000000 0x05000000
0x40000000 0x10000000
diff --git a/arch/arm/boot/dts/s5pv210-pinctrl.dtsi b/arch/arm/boot/dts/s5pv210-pinctrl.dtsi
index 8c714088e3c6..9a3e851e2e22 100644
--- a/arch/arm/boot/dts/s5pv210-pinctrl.dtsi
+++ b/arch/arm/boot/dts/s5pv210-pinctrl.dtsi
@@ -19,6 +19,8 @@
* published by the Free Software Foundation.
*/
+#include <dt-bindings/pinctrl/samsung.h>
+
&pinctrl0 {
gpa0: gpa0 {
gpio-controller;
@@ -270,559 +272,559 @@
uart0_data: uart0-data {
samsung,pins = "gpa0-0", "gpa0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart0_fctl: uart0-fctl {
samsung,pins = "gpa0-2", "gpa0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_data: uart1-data {
samsung,pins = "gpa0-4", "gpa0-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart1_fctl: uart1-fctl {
samsung,pins = "gpa0-6", "gpa0-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart2_data: uart2-data {
samsung,pins = "gpa1-0", "gpa1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart2_fctl: uart2-fctl {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart3_data: uart3-data {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
uart_audio: uart-audio {
samsung,pins = "gpa1-2", "gpa1-3";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi0_bus: spi0-bus {
samsung,pins = "gpb-0", "gpb-2", "gpb-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi1_bus: spi1-bus {
samsung,pins = "gpb-4", "gpb-6", "gpb-7";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s0_bus: i2s0-bus {
samsung,pins = "gpi-0", "gpi-1", "gpi-2", "gpi-3",
"gpi-4", "gpi-5", "gpi-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s1_bus: i2s1-bus {
samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
"gpc0-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s2_bus: i2s2-bus {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
"gpc1-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm1_bus: pcm1-bus {
samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
"gpc0-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
ac97_bus: ac97-bus {
samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
"gpc0-4";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2s2_bus: i2s2-bus {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
"gpc1-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pcm2_bus: pcm2-bus {
samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
"gpc1-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spdif_bus: spdif-bus {
samsung,pins = "gpc1-0", "gpc1-1";
- samsung,pin-function = <4>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
spi2_bus: spi2-bus {
samsung,pins = "gpc1-1", "gpc1-2", "gpc1-3", "gpc1-4";
- samsung,pin-function = <5>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_5>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c0_bus: i2c0-bus {
samsung,pins = "gpd1-0", "gpd1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c1_bus: i2c1-bus {
samsung,pins = "gpd1-2", "gpd1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
i2c2_bus: i2c2-bus {
samsung,pins = "gpd1-4", "gpd1-5";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm0_out: pwm0-out {
samsung,pins = "gpd0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm1_out: pwm1-out {
samsung,pins = "gpd0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm2_out: pwm2-out {
samsung,pins = "gpd0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
pwm3_out: pwm3-out {
samsung,pins = "gpd0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_row0: keypad-row-0 {
samsung,pins = "gph3-0";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_row1: keypad-row-1 {
samsung,pins = "gph3-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_row2: keypad-row-2 {
samsung,pins = "gph3-2";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_row3: keypad-row-3 {
samsung,pins = "gph3-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_row4: keypad-row-4 {
samsung,pins = "gph3-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_row5: keypad-row-5 {
samsung,pins = "gph3-5";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_row6: keypad-row-6 {
samsung,pins = "gph3-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_row7: keypad-row-7 {
samsung,pins = "gph3-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_col0: keypad-col-0 {
samsung,pins = "gph2-0";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_col1: keypad-col-1 {
samsung,pins = "gph2-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_col2: keypad-col-2 {
samsung,pins = "gph2-2";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_col3: keypad-col-3 {
samsung,pins = "gph2-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_col4: keypad-col-4 {
samsung,pins = "gph2-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_col5: keypad-col-5 {
samsung,pins = "gph2-5";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_col6: keypad-col-6 {
samsung,pins = "gph2-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
keypad_col7: keypad-col-7 {
samsung,pins = "gph2-7";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
sd0_clk: sd0-clk {
samsung,pins = "gpg0-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpg0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_cd: sd0-cd {
samsung,pins = "gpg0-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus1: sd0-bus-width1 {
samsung,pins = "gpg0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus4: sd0-bus-width4 {
samsung,pins = "gpg0-3", "gpg0-4", "gpg0-5", "gpg0-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd0_bus8: sd0-bus-width8 {
samsung,pins = "gpg1-3", "gpg1-4", "gpg1-5", "gpg1-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_clk: sd1-clk {
samsung,pins = "gpg1-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cmd: sd1-cmd {
samsung,pins = "gpg1-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_cd: sd1-cd {
samsung,pins = "gpg1-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus1: sd1-bus-width1 {
samsung,pins = "gpg1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd1_bus4: sd1-bus-width4 {
samsung,pins = "gpg1-3", "gpg1-4", "gpg1-5", "gpg1-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_clk: sd2-clk {
samsung,pins = "gpg2-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cmd: sd2-cmd {
samsung,pins = "gpg2-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_cd: sd2-cd {
samsung,pins = "gpg2-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus1: sd2-bus-width1 {
samsung,pins = "gpg2-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus4: sd2-bus-width4 {
samsung,pins = "gpg2-3", "gpg2-4", "gpg2-5", "gpg2-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd2_bus8: sd2-bus-width8 {
samsung,pins = "gpg3-3", "gpg3-4", "gpg3-5", "gpg3-6";
- samsung,pin-function = <3>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_clk: sd3-clk {
samsung,pins = "gpg3-0";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_cmd: sd3-cmd {
samsung,pins = "gpg3-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_cd: sd3-cd {
samsung,pins = "gpg3-2";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_bus1: sd3-bus-width1 {
samsung,pins = "gpg3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
sd3_bus4: sd3-bus-width4 {
samsung,pins = "gpg3-3", "gpg3-4", "gpg3-5", "gpg3-6";
- samsung,pin-function = <2>;
- samsung,pin-pud = <2>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
eint0: ext-int0 {
samsung,pins = "gph0-0";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint8: ext-int8 {
samsung,pins = "gph1-0";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint15: ext-int15 {
samsung,pins = "gph1-7";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint16: ext-int16 {
samsung,pins = "gph2-0";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
eint31: ext-int31 {
samsung,pins = "gph3-7";
- samsung,pin-function = <0xf>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_a_io: cam-port-a-io {
samsung,pins = "gpe0-0", "gpe0-1", "gpe0-2", "gpe0-3",
"gpe0-4", "gpe0-5", "gpe0-6", "gpe0-7",
"gpe1-0", "gpe1-1", "gpe1-2", "gpe1-4";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_a_clk_active: cam-port-a-clk-active {
samsung,pins = "gpe1-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
cam_port_a_clk_idle: cam-port-a-clk-idle {
samsung,pins = "gpe1-3";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_b_io: cam-port-b-io {
samsung,pins = "gpj0-0", "gpj0-1", "gpj0-2", "gpj0-3",
"gpj0-4", "gpj0-5", "gpj0-6", "gpj0-7",
"gpj1-0", "gpj1-1", "gpj1-2", "gpj1-4";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
cam_port_b_clk_active: cam-port-b-clk-active {
samsung,pins = "gpj1-3";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <3>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
};
cam_port_b_clk_idle: cam-port-b-clk-idle {
samsung,pins = "gpj1-3";
- samsung,pin-function = <0>;
- samsung,pin-pud = <1>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_ctrl: lcd-ctrl {
samsung,pins = "gpd0-0", "gpd0-1";
- samsung,pin-function = <3>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_sync: lcd-sync {
samsung,pins = "gpf0-0", "gpf0-1";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_clk: lcd-clk {
samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
lcd_data24: lcd-data-width24 {
@@ -832,8 +834,8 @@
"gpf2-0", "gpf2-1", "gpf2-2", "gpf2-3",
"gpf2-4", "gpf2-5", "gpf2-6", "gpf2-7",
"gpf3-0", "gpf3-1", "gpf3-2", "gpf3-3";
- samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
};
diff --git a/arch/arm/boot/dts/s5pv210-smdkc110.dts b/arch/arm/boot/dts/s5pv210-smdkc110.dts
index 1eedab7ffe94..5d14da911aa5 100644
--- a/arch/arm/boot/dts/s5pv210-smdkc110.dts
+++ b/arch/arm/boot/dts/s5pv210-smdkc110.dts
@@ -29,7 +29,7 @@
bootargs = "console=ttySAC0,115200n8 root=/dev/mmcblk0p1 rw rootwait ignore_loglevel earlyprintk";
};
- memory {
+ memory@20000000 {
device_type = "memory";
reg = <0x20000000 0x20000000>;
};
diff --git a/arch/arm/boot/dts/s5pv210-smdkv210.dts b/arch/arm/boot/dts/s5pv210-smdkv210.dts
index 9eb6aff3e38f..75398318ed57 100644
--- a/arch/arm/boot/dts/s5pv210-smdkv210.dts
+++ b/arch/arm/boot/dts/s5pv210-smdkv210.dts
@@ -29,7 +29,7 @@
bootargs = "console=ttySAC0,115200n8 root=/dev/mmcblk0p1 rw rootwait ignore_loglevel earlyprintk";
};
- memory {
+ memory@20000000 {
device_type = "memory";
reg = <0x20000000 0x40000000>;
};
diff --git a/arch/arm/boot/dts/s5pv210-torbreck.dts b/arch/arm/boot/dts/s5pv210-torbreck.dts
index 622599fd2cfa..7cb50bcee888 100644
--- a/arch/arm/boot/dts/s5pv210-torbreck.dts
+++ b/arch/arm/boot/dts/s5pv210-torbreck.dts
@@ -29,7 +29,7 @@
bootargs = "console=ttySAC0,115200n8 root=/dev/mmcblk0p1 rw rootwait ignore_loglevel earlyprintk";
};
- memory {
+ memory@20000000 {
device_type = "memory";
reg = <0x20000000 0x20000000>;
};
diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
index ffc36bd24d2f..a853918be43f 100644
--- a/arch/arm/boot/dts/s5pv210.dtsi
+++ b/arch/arm/boot/dts/s5pv210.dtsi
@@ -19,11 +19,13 @@
* published by the Free Software Foundation.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/clock/s5pv210.h>
#include <dt-bindings/clock/s5pv210-audss.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
aliases {
csis0 = &csis0;
fimc0 = &fimc0;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 353d0e5ec83b..7173ec9059a1 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -77,6 +77,35 @@
interrupts = <2 IRQ_TYPE_LEVEL_HIGH 0>;
};
+ etb {
+ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0x740000 0x1000>;
+
+ clocks = <&mck>;
+ clock-names = "apb_pclk";
+
+ port {
+ etb_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm_out>;
+ };
+ };
+ };
+
+ etm {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x73C000 0x1000>;
+
+ clocks = <&mck>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm_out: endpoint {
+ remote-endpoint = <&etb_in>;
+ };
+ };
+ };
+
memory {
reg = <0x20000000 0x20000000>;
};
diff --git a/arch/arm/boot/dts/skeleton.dtsi b/arch/arm/boot/dts/skeleton.dtsi
index b41d241de2cd..28b81d60b407 100644
--- a/arch/arm/boot/dts/skeleton.dtsi
+++ b/arch/arm/boot/dts/skeleton.dtsi
@@ -1,4 +1,8 @@
/*
+ * This file is deprecated, and will be removed once existing users have been
+ * updated. New dts{,i} files should *not* include skeleton.dtsi, and should
+ * instead explicitly provide the below nodes only as required.
+ *
* Skeleton device tree; the bare minimum needed to boot; just include and
* add a compatible value. The bootloader will typically populate the memory
* node.
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index 94000cbe576b..f520cbff5e1c 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -639,6 +639,22 @@
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>,
<37 IRQ_TYPE_LEVEL_HIGH>;
};
+
+ dma-ecc@ff8c8000 {
+ compatible = "altr,socfpga-dma-ecc";
+ reg = <0xff8c8000 0x400>;
+ altr,ecc-parent = <&pdma>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>,
+ <42 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ usb0-ecc@ff8c8800 {
+ compatible = "altr,socfpga-usb-ecc";
+ reg = <0xff8c8800 0x400>;
+ altr,ecc-parent = <&usb0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>,
+ <34 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
rst: rstmgr@ffd05000 {
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts b/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts
index 8a7dfa473e98..040a164ba148 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts
@@ -25,3 +25,15 @@
broken-cd;
bus-width = <4>;
};
+
+&eccmgr {
+ sdmmca-ecc@ff8c2c00 {
+ compatible = "altr,socfpga-sdmmc-ecc";
+ reg = <0xff8c2c00 0x400>;
+ altr,ecc-parent = <&mmc>;
+ interrupts = <15 IRQ_TYPE_LEVEL_HIGH>,
+ <47 IRQ_TYPE_LEVEL_HIGH>,
+ <16 IRQ_TYPE_LEVEL_HIGH>,
+ <48 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index d35aa88791ad..1ec46a794a4d 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -140,6 +140,10 @@
0x03020067 // Up
0x0303006c>; // Down
};
+ stmpe0_pwm: stmpe_pwm {
+ compatible = "st,stmpe-pwm";
+ #pwm-cells = <2>;
+ };
};
stmpe1: stmpe2401@44 {
compatible = "st,stmpe2401";
@@ -172,6 +176,50 @@
};
amba {
+ clcd@10120000 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&clcd_24bit_mux>;
+ port {
+ nomadik_clcd_pads: endpoint {
+ remote-endpoint = <&nomadik_clcd_panel>;
+ arm,pl11x,tft-r0g0b0-pads = <16 8 0>;
+ };
+ };
+
+ /*
+ * WVGA connector 21
+ * WVGA (800x480): 4.3" TPG110 TDO43MTEA2 24-bit RGB
+ * with TPO touch screen.
+ */
+ panel {
+ compatible = "tpo,tpg110", "panel-dpi";
+ grestb-gpios = <&stmpe_gpio44 5 GPIO_ACTIVE_LOW>;
+ scen-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+ scl-gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+ backlight = <&bl>;
+
+ port {
+ nomadik_clcd_panel: endpoint {
+ remote-endpoint = <&nomadik_clcd_pads>;
+ };
+ };
+
+ panel-timing {
+ clock-frequency = <33200000>;
+ hactive = <800>;
+ hback-porch = <216>;
+ hfront-porch = <40>;
+ hsync-len = <1>;
+ vactive = <480>;
+ vback-porch = <35>;
+ vfront-porch = <10>;
+ vsync-len = <1>;
+ };
+ };
+ };
+
/* Activate RX/TX and CTS/RTS on UART 0 */
uart0: uart@101fd000 {
pinctrl-names = "default";
@@ -183,4 +231,24 @@
wp-gpios = <&stmpe_gpio44 18 GPIO_ACTIVE_HIGH>;
};
};
+
+ bl: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&stmpe0_pwm 0 500000>;
+ pwm-names = "backlight";
+ brightness-levels = <
+ 0 1 2 3 4 5 6 7 8 9
+ 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29
+ 30 31 32 33 34 35 36 37 38 39
+ 40 41 42 43 44 45 46 47 48 49
+ 50 51 52 53 54 55 56 57 58 59
+ 60 61 62 63 64 65 66 67 68 69
+ 70 71 72 73 74 75 76 77 78 79
+ 80 81 82 83 84 85 86 87 88 89
+ 90 91 92 93 94 95 96 97 98 99
+ 100
+ >;
+ default-brightness-level = <100>;
+ };
};
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index d2d532a9d783..adb1c0998b81 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -166,6 +166,24 @@
};
};
};
+ clcd {
+ /*
+ * This should be activated to use the additional
+ * 8 lines for bits 16 thru 23 from the CLCD block.
+ */
+ clcd_24bit_mux: clcd_mux {
+ clcd_24bit_mux {
+ function = "clcd";
+ groups = "clcd_16_23_b_1";
+ };
+ };
+ };
+ };
+
+ /* Power Management Unit */
+ pmu: pmu@101e9000 {
+ compatible = "stericsson,nomadik-pmu", "syscon";
+ reg = <0x101e0000 0x1000>;
};
src: src@101e0000 {
@@ -726,6 +744,16 @@
#size-cells = <1>;
ranges;
+ clcd@10120000 {
+ compatible = "arm,pl110", "arm,primecell";
+ reg = <0x10120000 0x1000>;
+ interrupt-names = "combined";
+ interrupts = <14>;
+ clocks = <&clcdclk>, <&hclkclcd>;
+ clock-names = "clcdclk", "apb_pclk";
+ status = "disabled";
+ };
+
vica: intc@10140000 {
compatible = "arm,versatile-vic";
interrupt-controller;
diff --git a/arch/arm/boot/dts/stih407-clock.dtsi b/arch/arm/boot/dts/stih407-clock.dtsi
index ad45f5e8fac7..13029c03d7c6 100644
--- a/arch/arm/boot/dts/stih407-clock.dtsi
+++ b/arch/arm/boot/dts/stih407-clock.dtsi
@@ -42,7 +42,7 @@
clockgen_a9_pll: clockgen-a9-pll {
#clock-cells = <1>;
- compatible = "st,stih407-plls-c32-a9", "st,clkgen-plls-c32";
+ compatible = "st,stih407-clkgen-plla9";
clocks = <&clk_sysin>;
@@ -55,7 +55,7 @@
*/
clk_m_a9: clk-m-a9@92b0000 {
#clock-cells = <0>;
- compatible = "st,stih407-clkgen-a9-mux", "st,clkgen-mux";
+ compatible = "st,stih407-clkgen-a9-mux";
reg = <0x92b0000 0x10000>;
clocks = <&clockgen_a9_pll 0>,
@@ -96,7 +96,7 @@
clk_s_a0_pll: clk-s-a0-pll {
#clock-cells = <1>;
- compatible = "st,stih407-plls-c32-a0", "st,clkgen-plls-c32";
+ compatible = "st,clkgen-pll0";
clocks = <&clk_sysin>;
@@ -117,7 +117,7 @@
clk_s_c0_quadfs: clk-s-c0-quadfs@9103000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-C", "st,quadfs";
+ compatible = "st,quadfs-pll";
reg = <0x9103000 0x1000>;
clocks = <&clk_sysin>;
@@ -134,7 +134,7 @@
clk_s_c0_pll0: clk-s-c0-pll0 {
#clock-cells = <1>;
- compatible = "st,plls-c32-cx_0", "st,clkgen-plls-c32";
+ compatible = "st,clkgen-pll0";
clocks = <&clk_sysin>;
@@ -143,7 +143,7 @@
clk_s_c0_pll1: clk-s-c0-pll1 {
#clock-cells = <1>;
- compatible = "st,plls-c32-cx_1", "st,clkgen-plls-c32";
+ compatible = "st,clkgen-pll1";
clocks = <&clk_sysin>;
@@ -199,7 +199,7 @@
clk_s_d0_quadfs: clk-s-d0-quadfs@9104000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-D", "st,quadfs";
+ compatible = "st,quadfs";
reg = <0x9104000 0x1000>;
clocks = <&clk_sysin>;
@@ -216,7 +216,7 @@
clk_s_d0_flexgen: clk-s-d0-flexgen {
#clock-cells = <1>;
- compatible = "st,flexgen";
+ compatible = "st,flexgen-audio", "st,flexgen";
clocks = <&clk_s_d0_quadfs 0>,
<&clk_s_d0_quadfs 1>,
@@ -233,7 +233,7 @@
clk_s_d2_quadfs: clk-s-d2-quadfs@9106000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-D", "st,quadfs";
+ compatible = "st,quadfs";
reg = <0x9106000 0x1000>;
clocks = <&clk_sysin>;
@@ -256,7 +256,7 @@
clk_s_d2_flexgen: clk-s-d2-flexgen {
#clock-cells = <1>;
- compatible = "st,flexgen";
+ compatible = "st,flexgen-video", "st,flexgen";
clocks = <&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 1>,
@@ -287,7 +287,7 @@
clk_s_d3_quadfs: clk-s-d3-quadfs@9107000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-D", "st,quadfs";
+ compatible = "st,quadfs";
reg = <0x9107000 0x1000>;
clocks = <&clk_sysin>;
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 8b063ab10c19..5430747c6b73 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -554,7 +554,6 @@
clocks = <&clk_s_c0_flexgen CLK_MMC_0>,
<&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
bus-width = <8>;
- non-removable;
};
mmc1: sdhci@09080000 {
@@ -610,6 +609,8 @@
clock-names = "ahci_clk";
clocks = <&clk_s_c0_flexgen CLK_ICN_REG>;
+ ports-implemented = <0x1>;
+
status = "disabled";
};
@@ -633,6 +634,8 @@
clock-names = "ahci_clk";
clocks = <&clk_s_c0_flexgen CLK_ICN_REG>;
+ ports-implemented = <0x1>;
+
status = "disabled";
};
@@ -669,6 +672,7 @@
compatible = "st,sti-pwm";
#pwm-cells = <2>;
reg = <0x9810000 0x68>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_NONE>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm0_chan0_default>;
clock-names = "pwm";
@@ -823,5 +827,172 @@
clock-frequency = <600000000>;
st,syscfg = <&syscfg_core 0x224>;
};
+
+ /* fdma audio */
+ fdma0: dma-controller@8e20000 {
+ compatible = "st,stih407-fdma-mpe31-11", "st,slim-rproc";
+ reg = <0x8e20000 0x8000>,
+ <0x8e30000 0x3000>,
+ <0x8e37000 0x1000>,
+ <0x8e38000 0x8000>;
+ reg-names = "slimcore", "dmem", "peripherals", "imem";
+ clocks = <&clk_s_c0_flexgen CLK_FDMA>,
+ <&clk_s_c0_flexgen CLK_EXT2F_A9>,
+ <&clk_s_c0_flexgen CLK_EXT2F_A9>,
+ <&clk_s_c0_flexgen CLK_EXT2F_A9>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_NONE>;
+ dma-channels = <16>;
+ #dma-cells = <3>;
+ };
+
+ /* fdma app */
+ fdma1: dma-controller@8e40000 {
+ compatible = "st,stih407-fdma-mpe31-12", "st,slim-rproc";
+ reg = <0x8e40000 0x8000>,
+ <0x8e50000 0x3000>,
+ <0x8e57000 0x1000>,
+ <0x8e58000 0x8000>;
+ reg-names = "slimcore", "dmem", "peripherals", "imem";
+ clocks = <&clk_s_c0_flexgen CLK_FDMA>,
+ <&clk_s_c0_flexgen CLK_TX_ICN_DMU>,
+ <&clk_s_c0_flexgen CLK_TX_ICN_DMU>,
+ <&clk_s_c0_flexgen CLK_EXT2F_A9>;
+
+ interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>;
+ dma-channels = <16>;
+ #dma-cells = <3>;
+ };
+
+ /* fdma free running */
+ fdma2: dma-controller@8e60000 {
+ compatible = "st,stih407-fdma-mpe31-13", "st,slim-rproc";
+ reg = <0x8e60000 0x8000>,
+ <0x8e70000 0x3000>,
+ <0x8e77000 0x1000>,
+ <0x8e78000 0x8000>;
+ reg-names = "slimcore", "dmem", "peripherals", "imem";
+ interrupts = <GIC_SPI 9 IRQ_TYPE_NONE>;
+ dma-channels = <16>;
+ #dma-cells = <3>;
+ clocks = <&clk_s_c0_flexgen CLK_FDMA>,
+ <&clk_s_c0_flexgen CLK_EXT2F_A9>,
+ <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+ <&clk_s_c0_flexgen CLK_EXT2F_A9>;
+ };
+
+ sti_sasg_codec: sti-sasg-codec {
+ compatible = "st,stih407-sas-codec";
+ #sound-dai-cells = <1>;
+ status = "disabled";
+ st,syscfg = <&syscfg_core>;
+ };
+
+ sti_uni_player0: sti-uni-player@8d80000 {
+ compatible = "st,sti-uni-player";
+ #sound-dai-cells = <0>;
+ st,syscfg = <&syscfg_core>;
+ clocks = <&clk_s_d0_flexgen CLK_PCM_0>;
+ assigned-clocks = <&clk_s_d0_quadfs 0>, <&clk_s_d0_flexgen CLK_PCM_0>;
+ assigned-clock-parents = <0>, <&clk_s_d0_quadfs 0>;
+ assigned-clock-rates = <50000000>;
+ reg = <0x8d80000 0x158>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_NONE>;
+ dmas = <&fdma0 2 0 1>;
+ dai-name = "Uni Player #0 (HDMI)";
+ dma-names = "tx";
+ st,uniperiph-id = <0>;
+ st,version = <5>;
+ st,mode = "HDMI";
+
+ status = "disabled";
+ };
+
+ sti_uni_player1: sti-uni-player@8d81000 {
+ compatible = "st,sti-uni-player";
+ #sound-dai-cells = <0>;
+ st,syscfg = <&syscfg_core>;
+ clocks = <&clk_s_d0_flexgen CLK_PCM_1>;
+ assigned-clocks = <&clk_s_d0_quadfs 1>, <&clk_s_d0_flexgen CLK_PCM_1>;
+ assigned-clock-parents = <0>, <&clk_s_d0_quadfs 1>;
+ assigned-clock-rates = <50000000>;
+ reg = <0x8d81000 0x158>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+ dmas = <&fdma0 3 0 1>;
+ dai-name = "Uni Player #1 (PIO)";
+ dma-names = "tx";
+ st,uniperiph-id = <1>;
+ st,version = <5>;
+ st,mode = "PCM";
+
+ status = "disabled";
+ };
+
+ sti_uni_player2: sti-uni-player@8d82000 {
+ compatible = "st,sti-uni-player";
+ #sound-dai-cells = <0>;
+ st,syscfg = <&syscfg_core>;
+ clocks = <&clk_s_d0_flexgen CLK_PCM_2>;
+ assigned-clocks = <&clk_s_d0_quadfs 2>, <&clk_s_d0_flexgen CLK_PCM_2>;
+ assigned-clock-parents = <0>, <&clk_s_d0_quadfs 2>;
+ assigned-clock-rates = <50000000>;
+ reg = <0x8d82000 0x158>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+ dmas = <&fdma0 4 0 1>;
+ dai-name = "Uni Player #1 (DAC)";
+ dma-names = "tx";
+ st,uniperiph-id = <2>;
+ st,version = <5>;
+ st,mode = "PCM";
+
+ status = "disabled";
+ };
+
+ sti_uni_player3: sti-uni-player@8d85000 {
+ compatible = "st,sti-uni-player";
+ #sound-dai-cells = <0>;
+ st,syscfg = <&syscfg_core>;
+ clocks = <&clk_s_d0_flexgen CLK_SPDIFF>;
+ assigned-clocks = <&clk_s_d0_quadfs 3>, <&clk_s_d0_flexgen CLK_SPDIFF>;
+ assigned-clock-parents = <0>, <&clk_s_d0_quadfs 3>;
+ assigned-clock-rates = <50000000>;
+ reg = <0x8d85000 0x158>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
+ dmas = <&fdma0 7 0 1>;
+ dma-names = "tx";
+ dai-name = "Uni Player #1 (PIO)";
+ st,uniperiph-id = <3>;
+ st,version = <5>;
+ st,mode = "SPDIF";
+
+ status = "disabled";
+ };
+
+ sti_uni_reader0: sti-uni-reader@8d83000 {
+ compatible = "st,sti-uni-reader";
+ #sound-dai-cells = <0>;
+ st,syscfg = <&syscfg_core>;
+ reg = <0x8d83000 0x158>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_NONE>;
+ dmas = <&fdma0 5 0 1>;
+ dma-names = "rx";
+ dai-name = "Uni Reader #0 (PCM IN)";
+ st,version = <3>;
+
+ status = "disabled";
+ };
+
+ sti_uni_reader1: sti-uni-reader@8d84000 {
+ compatible = "st,sti-uni-reader";
+ #sound-dai-cells = <0>;
+ st,syscfg = <&syscfg_core>;
+ reg = <0x8d84000 0x158>;
+ interrupts = <GIC_SPI 88 IRQ_TYPE_NONE>;
+ dmas = <&fdma0 6 0 1>;
+ dma-names = "rx";
+ dai-name = "Uni Reader #1 (HDMI RX)";
+ st,version = <3>;
+
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/stih407-pinctrl.dtsi b/arch/arm/boot/dts/stih407-pinctrl.dtsi
index a538ae52d32b..c325cc059ae4 100644
--- a/arch/arm/boot/dts/stih407-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih407-pinctrl.dtsi
@@ -58,7 +58,7 @@
pio0: gpio@09610000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x0 0x100>;
@@ -66,7 +66,7 @@
};
pio1: gpio@09611000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -74,7 +74,7 @@
};
pio2: gpio@09612000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -82,7 +82,7 @@
};
pio3: gpio@09613000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x3000 0x100>;
@@ -90,7 +90,7 @@
};
pio4: gpio@09614000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x4000 0x100>;
@@ -99,7 +99,7 @@
pio5: gpio@09615000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x5000 0x100>;
@@ -230,6 +230,13 @@
};
};
+ pinctrl_rgmii1_mdio_1: rgmii1-mdio-1 {
+ st,pins {
+ mdio = <&pio1 0 ALT1 OUT BYPASS 0>;
+ mdc = <&pio1 1 ALT1 OUT NICLK 0 CLK_A>;
+ };
+ };
+
pinctrl_mii1: mii1 {
st,pins {
txd0 = <&pio0 0 ALT1 OUT SE_NICLK_IO 0 CLK_A>;
@@ -289,10 +296,12 @@
pinctrl_pwm1_chan0_default: pwm1-0-default {
st,pins {
pwm-out = <&pio3 0 ALT1 OUT>;
+ pwm-capturein = <&pio3 2 ALT1 IN>;
};
};
pinctrl_pwm1_chan1_default: pwm1-1-default {
st,pins {
+ pwm-capturein = <&pio4 3 ALT1 IN>;
pwm-out = <&pio4 4 ALT1 OUT>;
};
};
@@ -373,7 +382,7 @@
pio10: pio@09200000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x0 0x100>;
@@ -381,7 +390,7 @@
};
pio11: pio@09201000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -389,7 +398,7 @@
};
pio12: pio@09202000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -397,7 +406,7 @@
};
pio13: pio@09203000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x3000 0x100>;
@@ -405,7 +414,7 @@
};
pio14: pio@09204000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x4000 0x100>;
@@ -413,7 +422,7 @@
};
pio15: pio@09205000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x5000 0x100>;
@@ -421,7 +430,7 @@
};
pio16: pio@09206000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x6000 0x100>;
@@ -429,7 +438,7 @@
};
pio17: pio@09207000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x7000 0x100>;
@@ -437,7 +446,7 @@
};
pio18: pio@09208000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x8000 0x100>;
@@ -445,7 +454,7 @@
};
pio19: pio@09209000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x9000 0x100>;
@@ -523,6 +532,13 @@
scl = <&pio15 5 ALT2 BIDIR>;
};
};
+
+ pinctrl_i2c2_alt2_1: i2c2-alt2-1 {
+ st,pins {
+ sda = <&pio12 6 ALT2 BIDIR>;
+ scl = <&pio12 5 ALT2 BIDIR>;
+ };
+ };
};
i2c3 {
@@ -916,6 +932,15 @@
interrupt-names = "irqmux";
ranges = <0 0x09210000 0x10000>;
+ pio20: pio@09210000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x0 0x100>;
+ st,bank-name = "PIO20";
+ };
+
tsin4 {
pinctrl_tsin4_serial_alt1: tsin4_serial_alt1 {
st,pins {
@@ -927,15 +952,6 @@
};
};
};
-
- pio20: pio@09210000 {
- gpio-controller;
- #gpio-cells = <1>;
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x0 0x100>;
- st,bank-name = "PIO20";
- };
};
pin-controller-rear {
@@ -951,7 +967,7 @@
pio30: gpio@09220000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x0 0x100>;
@@ -959,7 +975,7 @@
};
pio31: gpio@09221000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -967,7 +983,7 @@
};
pio32: gpio@09222000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -975,7 +991,7 @@
};
pio33: gpio@09223000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x3000 0x100>;
@@ -983,7 +999,7 @@
};
pio34: gpio@09224000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x4000 0x100>;
@@ -991,7 +1007,7 @@
};
pio35: gpio@09225000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x5000 0x100>;
@@ -1030,6 +1046,7 @@
pwm0 {
pinctrl_pwm0_chan0_default: pwm0-0-default {
st,pins {
+ pwm-capturein = <&pio31 0 ALT1 IN>;
pwm-out = <&pio31 1 ALT1 OUT>;
};
};
@@ -1067,6 +1084,61 @@
};
};
+ i2s_out {
+ pinctrl_i2s_8ch_out: i2s_8ch_out{
+ st,pins {
+ mclk = <&pio33 5 ALT1 OUT>;
+ lrclk = <&pio33 7 ALT1 OUT>;
+ sclk = <&pio33 6 ALT1 OUT>;
+ data0 = <&pio33 4 ALT1 OUT>;
+ data1 = <&pio34 0 ALT1 OUT>;
+ data2 = <&pio34 1 ALT1 OUT>;
+ data3 = <&pio34 2 ALT1 OUT>;
+ };
+ };
+
+ pinctrl_i2s_2ch_out: i2s_2ch_out{
+ st,pins {
+ mclk = <&pio33 5 ALT1 OUT>;
+ lrclk = <&pio33 7 ALT1 OUT>;
+ sclk = <&pio33 6 ALT1 OUT>;
+ data0 = <&pio33 4 ALT1 OUT>;
+ };
+ };
+ };
+
+ i2s_in {
+ pinctrl_i2s_8ch_in: i2s_8ch_in{
+ st,pins {
+ mclk = <&pio32 5 ALT1 IN>;
+ lrclk = <&pio32 7 ALT1 IN>;
+ sclk = <&pio32 6 ALT1 IN>;
+ data0 = <&pio32 4 ALT1 IN>;
+ data1 = <&pio33 0 ALT1 IN>;
+ data2 = <&pio33 1 ALT1 IN>;
+ data3 = <&pio33 2 ALT1 IN>;
+ data4 = <&pio33 3 ALT1 IN>;
+ };
+ };
+
+ pinctrl_i2s_2ch_in: i2s_2ch_in{
+ st,pins {
+ mclk = <&pio32 5 ALT1 IN>;
+ lrclk = <&pio32 7 ALT1 IN>;
+ sclk = <&pio32 6 ALT1 IN>;
+ data0 = <&pio32 4 ALT1 IN>;
+ };
+ };
+ };
+
+ spdif_out {
+ pinctrl_spdif_out: spdif_out{
+ st,pins {
+ spdif_out = <&pio34 7 ALT1 OUT>;
+ };
+ };
+ };
+
serial3 {
pinctrl_serial3: serial3-0 {
st,pins {
@@ -1090,7 +1162,7 @@
pio40: gpio@09230000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x100>;
@@ -1098,7 +1170,7 @@
};
pio41: gpio@09231000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -1106,7 +1178,7 @@
};
pio42: gpio@09232000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
index d60f0d8add26..291ffacbd2e0 100644
--- a/arch/arm/boot/dts/stih407.dtsi
+++ b/arch/arm/boot/dts/stih407.dtsi
@@ -16,7 +16,10 @@
#size-cells = <1>;
assigned-clocks = <&clk_s_d2_quadfs 0>,
- <&clk_s_d2_quadfs 0>,
+ <&clk_s_d2_quadfs 1>,
+ <&clk_s_c0_pll1 0>,
+ <&clk_s_c0_flexgen CLK_COMPO_DVP>,
+ <&clk_s_c0_flexgen CLK_MAIN_DISP>,
<&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>,
<&clk_s_d2_flexgen CLK_PIX_AUX_DISP>,
<&clk_s_d2_flexgen CLK_PIX_GDP1>,
@@ -26,14 +29,21 @@
assigned-clock-parents = <0>,
<0>,
+ <0>,
+ <&clk_s_c0_pll1 0>,
+ <&clk_s_c0_pll1 0>,
<&clk_s_d2_quadfs 0>,
- <&clk_s_d2_quadfs 0>,
+ <&clk_s_d2_quadfs 1>,
<&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 0>;
- assigned-clock-rates = <297000000>, <297000000>;
+ assigned-clock-rates = <297000000>,
+ <108000000>,
+ <0>,
+ <400000000>,
+ <400000000>;
ranges;
diff --git a/arch/arm/boot/dts/stih410-b2260.dts b/arch/arm/boot/dts/stih410-b2260.dts
new file mode 100644
index 000000000000..ef2ff2f518f6
--- /dev/null
+++ b/arch/arm/boot/dts/stih410-b2260.dts
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2016 STMicroelectronics (R&D) Limited.
+ * Author: Patrice Chotard <patrice.chotard@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+#include "stih410.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "STiH410 B2260";
+ compatible = "st,stih410-b2260", "st,stih410";
+
+ chosen {
+ bootargs = "console=ttyAS1,115200 clk_ignore_unused";
+ linux,stdout-path = &uart1;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x40000000 0x40000000>;
+ };
+
+ aliases {
+ ttyAS1 = &uart1;
+ ethernet0 = &ethernet0;
+ };
+
+ soc {
+
+ leds {
+ compatible = "gpio-leds";
+ user_green_1 {
+ label = "User_green_1";
+ gpios = <&pio1 3 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ user_green_2 {
+ label = "User_green_2";
+ gpios = <&pio4 1 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ user_green_3 {
+ label = "User_green_3";
+ gpios = <&pio2 1 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ user_green_4 {
+ label = "User_green_4";
+ gpios = <&pio2 5 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ /* Low speed expansion connector */
+ uart0: serial@9830000 {
+ label = "LS-UART0";
+ status = "okay";
+ };
+
+ /* Low speed expansion connector */
+ uart1: serial@9831000 {
+ label = "LS-UART1";
+ status = "okay";
+ };
+
+ /* Low speed expansion connector */
+ spi0: spi@9844000 {
+ label = "LS-SPI0";
+ cs-gpio = <&pio30 3 0>;
+ status = "okay";
+ };
+
+ /* Low speed expansion connector */
+ i2c0: i2c@9840000 {
+ label = "LS-I2C0";
+ status = "okay";
+ };
+
+ /* Low speed expansion connector */
+ i2c1: i2c@9841000 {
+ label = "LS-I2C1";
+ status = "okay";
+ };
+
+ /* high speed expansion connector */
+ i2c2: i2c@9842000 {
+ label = "HS-I2C2";
+ pinctrl-0 = <&pinctrl_i2c2_alt2_1>;
+ status = "okay";
+ };
+
+ /* high speed expansion connector */
+ i2c3: i2c@9843000 {
+ label = "HS-I2C3";
+ pinctrl-0 = <&pinctrl_i2c3_alt3_0>;
+ status = "okay";
+ };
+
+ mmc0: sdhci@09060000 {
+ pinctrl-0 = <&pinctrl_sd0>;
+ bus-width = <4>;
+ status = "okay";
+ };
+
+ /* high speed expansion connector */
+ mmc1: sdhci@09080000 {
+ status = "okay";
+ };
+
+ pwm0: pwm@9810000 {
+ status = "okay";
+ };
+
+ pwm1: pwm@9510000 {
+ status = "okay";
+ };
+
+ usb2_picophy1: phy2 {
+ status = "okay";
+ };
+
+ usb2_picophy2: phy3 {
+ status = "okay";
+ };
+
+ ohci0: usb@9a03c00 {
+ status = "okay";
+ };
+
+ ehci0: usb@9a03e00 {
+ status = "okay";
+ };
+
+ ohci1: usb@9a83c00 {
+ status = "okay";
+ };
+
+ ehci1: usb@9a83e00 {
+ status = "okay";
+ };
+
+ st_dwc3: dwc3@8f94000 {
+ status = "okay";
+ };
+
+ ethernet0: dwmac@9630000 {
+ phy-mode = "rgmii";
+ pinctrl-0 = <&pinctrl_rgmii1 &pinctrl_rgmii1_mdio_1>;
+
+ snps,phy-bus-name = "stmmac";
+ snps,phy-bus-id = <0>;
+ snps,phy-addr = <0>;
+ snps,reset-gpio = <&pio0 7 0>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 1000000>;
+
+ status = "okay";
+ };
+
+ /* SSC11 to HDMI */
+ hdmiddc: i2c@9541000 {
+ /* HDMI V1.3a supports Standard mode only */
+ clock-frequency = <100000>;
+ st,i2c-min-scl-pulse-width-us = <0>;
+ st,i2c-min-sda-pulse-width-us = <5>;
+ status = "okay";
+ };
+
+ sti-display-subsystem {
+ sti_hdmi: sti-hdmi@8d04000 {
+ status = "okay";
+ };
+ };
+
+ miphy28lp_phy: miphy28lp@9b22000 {
+
+ phy_port1: port@9b2a000 {
+ st,osc-force-ext;
+ };
+ };
+
+ sata1: sata@9b28000 {
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/stih410-clock.dtsi b/arch/arm/boot/dts/stih410-clock.dtsi
index fd5049682181..8598effd6c01 100644
--- a/arch/arm/boot/dts/stih410-clock.dtsi
+++ b/arch/arm/boot/dts/stih410-clock.dtsi
@@ -44,7 +44,7 @@
clockgen_a9_pll: clockgen-a9-pll {
#clock-cells = <1>;
- compatible = "st,stih407-plls-c32-a9", "st,clkgen-plls-c32";
+ compatible = "st,stih407-clkgen-plla9";
clocks = <&clk_sysin>;
@@ -98,7 +98,7 @@
clk_s_a0_pll: clk-s-a0-pll {
#clock-cells = <1>;
- compatible = "st,stih407-plls-c32-a0", "st,clkgen-plls-c32";
+ compatible = "st,clkgen-pll0";
clocks = <&clk_sysin>;
@@ -122,7 +122,7 @@
clk_s_c0_quadfs: clk-s-c0-quadfs@9103000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-C", "st,quadfs";
+ compatible = "st,quadfs-pll";
reg = <0x9103000 0x1000>;
clocks = <&clk_sysin>;
@@ -140,7 +140,7 @@
clk_s_c0_pll0: clk-s-c0-pll0 {
#clock-cells = <1>;
- compatible = "st,plls-c32-cx_0", "st,clkgen-plls-c32";
+ compatible = "st,clkgen-pll0";
clocks = <&clk_sysin>;
@@ -150,7 +150,7 @@
clk_s_c0_pll1: clk-s-c0-pll1 {
#clock-cells = <1>;
- compatible = "st,plls-c32-cx_1", "st,clkgen-plls-c32";
+ compatible = "st,clkgen-pll1";
clocks = <&clk_sysin>;
@@ -218,7 +218,7 @@
clk_s_d0_quadfs: clk-s-d0-quadfs@9104000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-D", "st,quadfs";
+ compatible = "st,quadfs";
reg = <0x9104000 0x1000>;
clocks = <&clk_sysin>;
@@ -235,7 +235,7 @@
clk_s_d0_flexgen: clk-s-d0-flexgen {
#clock-cells = <1>;
- compatible = "st,flexgen";
+ compatible = "st,flexgen-audio", "st,flexgen";
clocks = <&clk_s_d0_quadfs 0>,
<&clk_s_d0_quadfs 1>,
@@ -254,7 +254,7 @@
clk_s_d2_quadfs: clk-s-d2-quadfs@9106000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-D", "st,quadfs";
+ compatible = "st,quadfs";
reg = <0x9106000 0x1000>;
clocks = <&clk_sysin>;
@@ -277,7 +277,7 @@
clk_s_d2_flexgen: clk-s-d2-flexgen {
#clock-cells = <1>;
- compatible = "st,flexgen";
+ compatible = "st,flexgen-video", "st,flexgen";
clocks = <&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 1>,
@@ -308,7 +308,7 @@
clk_s_d3_quadfs: clk-s-d3-quadfs@9107000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-D", "st,quadfs";
+ compatible = "st,quadfs";
reg = <0x9107000 0x1000>;
clocks = <&clk_sysin>;
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index 40318869c733..a3ef7341c051 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -107,7 +107,10 @@
#size-cells = <1>;
assigned-clocks = <&clk_s_d2_quadfs 0>,
- <&clk_s_d2_quadfs 0>,
+ <&clk_s_d2_quadfs 1>,
+ <&clk_s_c0_pll1 0>,
+ <&clk_s_c0_flexgen CLK_COMPO_DVP>,
+ <&clk_s_c0_flexgen CLK_MAIN_DISP>,
<&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>,
<&clk_s_d2_flexgen CLK_PIX_AUX_DISP>,
<&clk_s_d2_flexgen CLK_PIX_GDP1>,
@@ -117,14 +120,21 @@
assigned-clock-parents = <0>,
<0>,
+ <0>,
+ <&clk_s_c0_pll1 0>,
+ <&clk_s_c0_pll1 0>,
<&clk_s_d2_quadfs 0>,
- <&clk_s_d2_quadfs 0>,
+ <&clk_s_d2_quadfs 1>,
<&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 0>;
- assigned-clock-rates = <297000000>, <297000000>;
+ assigned-clock-rates = <297000000>,
+ <108000000>,
+ <0>,
+ <400000000>,
+ <400000000>;
ranges;
@@ -231,5 +241,23 @@
clock-names = "bdisp";
clocks = <&clk_s_c0_flexgen CLK_IC_BDISP_0>;
};
+
+ hva@8c85000 {
+ compatible = "st,st-hva";
+ reg = <0x8c85000 0x400>, <0x6000000 0x40000>;
+ reg-names = "hva_registers", "hva_esram";
+ interrupts = <GIC_SPI 58 IRQ_TYPE_NONE>,
+ <GIC_SPI 59 IRQ_TYPE_NONE>;
+ clock-names = "clk_hva";
+ clocks = <&clk_s_c0_flexgen CLK_HVA>;
+ };
+
+ thermal@91a0000 {
+ compatible = "st,stih407-thermal";
+ reg = <0x91a0000 0x28>;
+ clock-names = "thermal";
+ clocks = <&clk_sysin>;
+ interrupts = <GIC_SPI 205 IRQ_TYPE_EDGE_RISING>;
+ };
};
};
diff --git a/arch/arm/boot/dts/stih415-pinctrl.dtsi b/arch/arm/boot/dts/stih415-pinctrl.dtsi
index 3791ad95dbaf..bd028ce98b61 100644
--- a/arch/arm/boot/dts/stih415-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih415-pinctrl.dtsi
@@ -54,7 +54,7 @@
pio0: gpio@fe610000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x100>;
@@ -62,7 +62,7 @@
};
pio1: gpio@fe611000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -70,7 +70,7 @@
};
pio2: gpio@fe612000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -78,7 +78,7 @@
};
pio3: gpio@fe613000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x3000 0x100>;
@@ -86,7 +86,7 @@
};
pio4: gpio@fe614000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x4000 0x100>;
@@ -208,7 +208,7 @@
pio5: gpio@fee00000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x100>;
@@ -216,7 +216,7 @@
};
pio6: gpio@fee01000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -224,7 +224,7 @@
};
pio7: gpio@fee02000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -232,7 +232,7 @@
};
pio8: gpio@fee03000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x3000 0x100>;
@@ -240,7 +240,7 @@
};
pio9: gpio@fee04000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x4000 0x100>;
@@ -248,7 +248,7 @@
};
pio10: gpio@fee05000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x5000 0x100>;
@@ -256,7 +256,7 @@
};
pio11: gpio@fee06000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x6000 0x100>;
@@ -264,7 +264,7 @@
};
pio12: gpio@fee07000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x7000 0x100>;
@@ -303,7 +303,7 @@
pio13: gpio@fe820000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x100>;
@@ -311,7 +311,7 @@
};
pio14: gpio@fe821000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -319,7 +319,7 @@
};
pio15: gpio@fe822000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -327,7 +327,7 @@
};
pio16: gpio@fe823000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x3000 0x100>;
@@ -335,7 +335,7 @@
};
pio17: gpio@fe824000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x4000 0x100>;
@@ -343,7 +343,7 @@
};
pio18: gpio@fe825000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x5000 0x100>;
@@ -465,7 +465,7 @@
pio100: gpio@fd6b0000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x100>;
@@ -473,7 +473,7 @@
};
pio101: gpio@fd6b1000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -481,7 +481,7 @@
};
pio102: gpio@fd6b2000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -502,7 +502,7 @@
pio103: gpio@fd330000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x100>;
@@ -510,7 +510,7 @@
};
pio104: gpio@fd331000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -518,7 +518,7 @@
};
pio105: gpio@fd332000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -526,7 +526,7 @@
};
pio106: gpio@fd333000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x3000 0x100>;
@@ -534,7 +534,7 @@
};
pio107: gpio@fd334000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x4000 0x100>;
diff --git a/arch/arm/boot/dts/stih416-b2020e.dts b/arch/arm/boot/dts/stih416-b2020e.dts
index f1ceee192a0e..de320cd067de 100644
--- a/arch/arm/boot/dts/stih416-b2020e.dts
+++ b/arch/arm/boot/dts/stih416-b2020e.dts
@@ -9,6 +9,7 @@
/dts-v1/;
#include "stih416.dtsi"
#include "stih41x-b2020.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "STiH416 B2020 REV-E";
compatible = "st,stih416-b2020", "st,stih416";
@@ -17,13 +18,12 @@
leds {
compatible = "gpio-leds";
red {
- #gpio-cells = <1>;
label = "Front Panel LED";
- gpios = <&pio4 1>;
+ gpios = <&pio4 1 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
green {
- gpios = <&pio1 3>;
+ gpios = <&pio1 3 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
};
diff --git a/arch/arm/boot/dts/stih416-pinctrl.dtsi b/arch/arm/boot/dts/stih416-pinctrl.dtsi
index 051fc16f3706..9c97f7e651a0 100644
--- a/arch/arm/boot/dts/stih416-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih416-pinctrl.dtsi
@@ -58,7 +58,7 @@
pio0: gpio@fe610000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x100>;
@@ -66,7 +66,7 @@
};
pio1: gpio@fe611000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -74,7 +74,7 @@
};
pio2: gpio@fe612000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -82,7 +82,7 @@
};
pio3: gpio@fe613000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x3000 0x100>;
@@ -90,7 +90,7 @@
};
pio4: gpio@fe614000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x4000 0x100>;
@@ -98,7 +98,7 @@
};
pio40: gpio@fe615000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x5000 0x100>;
@@ -221,11 +221,14 @@
pinctrl_pwm1_chan0_default: pwm1-0-default {
st,pins {
pwm-out = <&pio3 0 ALT1 OUT>;
+ pwm-capturein = <&pio3 2 ALT1 IN>;
+
};
};
pinctrl_pwm1_chan1_default: pwm1-1-default {
st,pins {
pwm-out = <&pio4 4 ALT1 OUT>;
+ pwm-capturein = <&pio4 3 ALT1 IN>;
};
};
pinctrl_pwm1_chan2_default: pwm1-2-default {
@@ -254,7 +257,7 @@
pio5: gpio@fee00000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x100>;
@@ -262,7 +265,7 @@
};
pio6: gpio@fee01000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -270,7 +273,7 @@
};
pio7: gpio@fee02000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -278,7 +281,7 @@
};
pio8: gpio@fee03000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x3000 0x100>;
@@ -286,7 +289,7 @@
};
pio9: gpio@fee04000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x4000 0x100>;
@@ -294,7 +297,7 @@
};
pio10: gpio@fee05000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x5000 0x100>;
@@ -302,7 +305,7 @@
};
pio11: gpio@fee06000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x6000 0x100>;
@@ -310,7 +313,7 @@
};
pio12: gpio@fee07000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x7000 0x100>;
@@ -318,7 +321,7 @@
};
pio30: gpio@fee08000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x8000 0x100>;
@@ -326,7 +329,7 @@
};
pio31: gpio@fee09000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x9000 0x100>;
@@ -337,6 +340,7 @@
pinctrl_pwm0_chan0_default: pwm0-0-default {
st,pins {
pwm-out = <&pio9 7 ALT2 OUT>;
+ pwm-capturein = <&pio9 6 ALT2 IN>;
};
};
};
@@ -404,7 +408,7 @@
pio13: gpio@fe820000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x100>;
@@ -412,7 +416,7 @@
};
pio14: gpio@fe821000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -420,7 +424,7 @@
};
pio15: gpio@fe822000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -428,7 +432,7 @@
};
pio16: gpio@fe823000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x3000 0x100>;
@@ -436,7 +440,7 @@
};
pio17: gpio@fe824000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x4000 0x100>;
@@ -444,7 +448,7 @@
};
pio18: gpio@fe825000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x5000 0x100>;
@@ -576,6 +580,7 @@
pinctrl_pwm0_chan1_default: pwm0-1-default {
st,pins {
pwm-out = <&pio13 2 ALT2 OUT>;
+ pwm-capturein = <&pio13 1 ALT2 IN>;
};
};
pinctrl_pwm0_chan2_default: pwm0-2-default {
@@ -605,7 +610,7 @@
pio100: gpio@fd6b0000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x100>;
@@ -613,7 +618,7 @@
};
pio101: gpio@fd6b1000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -621,7 +626,7 @@
};
pio102: gpio@fd6b2000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -642,7 +647,7 @@
pio103: gpio@fd330000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0 0x100>;
@@ -650,7 +655,7 @@
};
pio104: gpio@fd331000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x1000 0x100>;
@@ -658,7 +663,7 @@
};
pio105: gpio@fd332000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2000 0x100>;
@@ -666,7 +671,7 @@
};
pio106: gpio@fd333000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x3000 0x100>;
@@ -675,7 +680,7 @@
pio107: gpio@fd334000 {
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x4000 0x100>;
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
index 9e3170ccd18c..fe1f9cf770e4 100644
--- a/arch/arm/boot/dts/stih416.dtsi
+++ b/arch/arm/boot/dts/stih416.dtsi
@@ -474,6 +474,7 @@
status = "disabled";
#pwm-cells = <2>;
reg = <0xfed10000 0x68>;
+ interrupts = <GIC_SPI 200 IRQ_TYPE_NONE>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm0_chan0_default
@@ -481,9 +482,11 @@
&pinctrl_pwm0_chan2_default
&pinctrl_pwm0_chan3_default>;
- clock-names = "pwm";
- clocks = <&clk_sysin>;
+ clock-names = "pwm", "capture";
+ clocks = <&clk_sysin>, <&clk_s_a0_ls CLK_ICN_REG>;
+
st,pwm-num-chan = <4>;
+ st,capture-num-chan = <2>;
};
/* SBC PWM Module */
@@ -492,6 +495,7 @@
status = "disabled";
#pwm-cells = <2>;
reg = <0xfe510000 0x68>;
+ interrupts = <GIC_SPI 202 IRQ_TYPE_NONE>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1_chan0_default
diff --git a/arch/arm/boot/dts/stih418-b2199.dts b/arch/arm/boot/dts/stih418-b2199.dts
index 772d2bb07e5f..438e54c585b1 100644
--- a/arch/arm/boot/dts/stih418-b2199.dts
+++ b/arch/arm/boot/dts/stih418-b2199.dts
@@ -8,6 +8,7 @@
*/
/dts-v1/;
#include "stih418.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "STiH418 B2199";
compatible = "st,stih418-b2199", "st,stih418";
@@ -35,14 +36,12 @@
leds {
compatible = "gpio-leds";
red {
- #gpio-cells = <2>;
label = "Front Panel LED";
- gpios = <&pio4 1 0>;
+ gpios = <&pio4 1 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
green {
- #gpio-cells = <2>;
- gpios = <&pio1 3 0>;
+ gpios = <&pio1 3 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
};
@@ -86,6 +85,7 @@
sd-uhs-sdr50;
sd-uhs-sdr104;
sd-uhs-ddr50;
+ non-removable;
};
miphy28lp_phy: miphy28lp@9b22000 {
diff --git a/arch/arm/boot/dts/stih418-clock.dtsi b/arch/arm/boot/dts/stih418-clock.dtsi
index ae6d9978ea19..ee6614b79f7d 100644
--- a/arch/arm/boot/dts/stih418-clock.dtsi
+++ b/arch/arm/boot/dts/stih418-clock.dtsi
@@ -44,7 +44,7 @@
clockgen_a9_pll: clockgen-a9-pll {
#clock-cells = <1>;
- compatible = "st,stih418-plls-c28-a9", "st,clkgen-plls-c32";
+ compatible = "st,stih418-clkgen-plla9";
clocks = <&clk_sysin>;
@@ -98,7 +98,7 @@
clk_s_a0_pll: clk-s-a0-pll {
#clock-cells = <1>;
- compatible = "st,stih407-plls-c32-a0", "st,clkgen-plls-c32";
+ compatible = "st,clkgen-pll0";
clocks = <&clk_sysin>;
@@ -120,7 +120,7 @@
clk_s_c0_quadfs: clk-s-c0-quadfs@9103000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-C", "st,quadfs";
+ compatible = "st,quadfs-pll";
reg = <0x9103000 0x1000>;
clocks = <&clk_sysin>;
@@ -137,7 +137,7 @@
clk_s_c0_pll0: clk-s-c0-pll0 {
#clock-cells = <1>;
- compatible = "st,plls-c32-cx_0", "st,clkgen-plls-c32";
+ compatible = "st,clkgen-pll0";
clocks = <&clk_sysin>;
@@ -146,7 +146,7 @@
clk_s_c0_pll1: clk-s-c0-pll1 {
#clock-cells = <1>;
- compatible = "st,plls-c32-cx_1", "st,clkgen-plls-c32";
+ compatible = "st,clkgen-pll1";
clocks = <&clk_sysin>;
@@ -212,7 +212,7 @@
clk_s_d0_quadfs: clk-s-d0-quadfs@9104000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-D", "st,quadfs";
+ compatible = "st,quadfs";
reg = <0x9104000 0x1000>;
clocks = <&clk_sysin>;
@@ -229,7 +229,7 @@
clk_s_d0_flexgen: clk-s-d0-flexgen {
#clock-cells = <1>;
- compatible = "st,flexgen";
+ compatible = "st,flexgen-audio", "st,flexgen";
clocks = <&clk_s_d0_quadfs 0>,
<&clk_s_d0_quadfs 1>,
@@ -248,7 +248,7 @@
clk_s_d2_quadfs: clk-s-d2-quadfs@9106000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-D", "st,quadfs";
+ compatible = "st,quadfs";
reg = <0x9106000 0x1000>;
clocks = <&clk_sysin>;
@@ -271,7 +271,7 @@
clk_s_d2_flexgen: clk-s-d2-flexgen {
#clock-cells = <1>;
- compatible = "st,flexgen";
+ compatible = "st,flexgen-video", "st,flexgen";
clocks = <&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 1>,
@@ -309,7 +309,7 @@
clk_s_d3_quadfs: clk-s-d3-quadfs@9107000 {
#clock-cells = <1>;
- compatible = "st,stih407-quadfs660-D", "st,quadfs";
+ compatible = "st,quadfs";
reg = <0x9107000 0x1000>;
clocks = <&clk_sysin>;
diff --git a/arch/arm/boot/dts/stih41x-b2000.dtsi b/arch/arm/boot/dts/stih41x-b2000.dtsi
index 5f91f455f05b..9bfa0674b452 100644
--- a/arch/arm/boot/dts/stih41x-b2000.dtsi
+++ b/arch/arm/boot/dts/stih41x-b2000.dtsi
@@ -7,6 +7,8 @@
* publishhed by the Free Software Foundation.
*/
#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+
/ {
memory{
@@ -33,9 +35,8 @@
leds {
compatible = "gpio-leds";
fp_led {
- #gpio-cells = <1>;
label = "Front Panel LED";
- gpios = <&pio105 7>;
+ gpios = <&pio105 7 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
};
diff --git a/arch/arm/boot/dts/stih41x-b2020.dtsi b/arch/arm/boot/dts/stih41x-b2020.dtsi
index 487d7d87dbef..322e0e95176c 100644
--- a/arch/arm/boot/dts/stih41x-b2020.dtsi
+++ b/arch/arm/boot/dts/stih41x-b2020.dtsi
@@ -7,6 +7,7 @@
* publishhed by the Free Software Foundation.
*/
#include "stih41x-b2020x.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
memory{
device_type = "memory";
@@ -30,13 +31,12 @@
leds {
compatible = "gpio-leds";
red {
- #gpio-cells = <1>;
label = "Front Panel LED";
- gpios = <&pio4 1>;
+ gpios = <&pio4 1 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
green {
- gpios = <&pio4 7>;
+ gpios = <&pio4 7 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
};
diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi
index 133375bc8aa5..ed2b7a99ecff 100644
--- a/arch/arm/boot/dts/stihxxx-b2120.dtsi
+++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi
@@ -18,14 +18,12 @@
leds {
compatible = "gpio-leds";
red {
- #gpio-cells = <2>;
label = "Front Panel LED";
- gpios = <&pio4 1 0>;
+ gpios = <&pio4 1 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
green {
- #gpio-cells = <2>;
- gpios = <&pio1 3 0>;
+ gpios = <&pio1 3 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
};
@@ -65,6 +63,7 @@
};
mmc0: sdhci@09060000 {
+ non-removable;
status = "okay";
};
@@ -135,5 +134,50 @@
dvb-card = <STV0367_TDA18212_NIMA_1>;
};
};
+
+ sti_uni_player2: sti-uni-player@8d82000 {
+ status = "okay";
+ };
+
+ sti_uni_player3: sti-uni-player@8d85000 {
+ status = "okay";
+ };
+
+ sti_sasg_codec: sti-sasg-codec {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spdif_out>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "sti audio card";
+ status = "okay";
+
+ simple-audio-card,dai-link@0 {
+ /* DAC */
+ format = "i2s";
+ mclk-fs = <256>;
+ cpu {
+ sound-dai = <&sti_uni_player2>;
+ };
+
+ codec {
+ sound-dai = <&sti_sasg_codec 1>;
+ };
+ };
+ simple-audio-card,dai-link@1 {
+ /* SPDIF */
+ format = "left_j";
+ mclk-fs = <128>;
+ cpu {
+ sound-dai = <&sti_uni_player3>;
+ };
+
+ codec {
+ sound-dai = <&sti_sasg_codec 0>;
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index 1a189d44ad38..336ee4fb587d 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -45,6 +45,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include "skeleton.dtsi"
#include "armv7-m.dtsi"
#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
@@ -334,6 +335,7 @@
};
rcc: rcc@40023810 {
+ #reset-cells = <1>;
#clock-cells = <2>;
compatible = "st,stm32f42xx-rcc", "st,stm32-rcc";
reg = <0x40023800 0x400>;
diff --git a/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts b/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts
index f3cb297fd1db..5f98582232d6 100644
--- a/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts
+++ b/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts
@@ -121,10 +121,6 @@
status = "okay";
};
-&ohci1 {
- status = "okay";
-};
-
&otg_sram {
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun5i-a13-empire-electronix-m712.dts b/arch/arm/boot/dts/sun5i-a13-empire-electronix-m712.dts
new file mode 100644
index 000000000000..b1e2afd9de52
--- /dev/null
+++ b/arch/arm/boot/dts/sun5i-a13-empire-electronix-m712.dts
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun5i-a13.dtsi"
+#include "sun5i-reference-design-tablet.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ model = "Empire Electronix M712 tablet";
+ compatible = "empire-electronix,m712", "allwinner,sun5i-a13";
+};
diff --git a/arch/arm/boot/dts/sun5i-a13-inet-98v-rev2.dts b/arch/arm/boot/dts/sun5i-a13-inet-98v-rev2.dts
index 1b11ec95ae53..439ae3b537df 100644
--- a/arch/arm/boot/dts/sun5i-a13-inet-98v-rev2.dts
+++ b/arch/arm/boot/dts/sun5i-a13-inet-98v-rev2.dts
@@ -42,171 +42,9 @@
/dts-v1/;
#include "sun5i-a13.dtsi"
-#include "sunxi-common-regulators.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include "sun5i-reference-design-tablet.dtsi"
/ {
model = "INet-98V Rev 02";
compatible = "primux,inet98v-rev2", "allwinner,sun5i-a13";
-
- aliases {
- serial0 = &uart1;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
-};
-
-&cpu0 {
- cpu-supply = <&reg_dcdc2>;
-};
-
-&ehci0 {
- status = "okay";
-};
-
-&i2c0 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins_a>;
- status = "okay";
-
- axp209: pmic@34 {
- reg = <0x34>;
- interrupts = <0>;
- };
-};
-
-#include "axp209.dtsi"
-
-&i2c1 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_pins_a>;
- status = "okay";
-
- pcf8563: rtc@51 {
- compatible = "nxp,pcf8563";
- reg = <0x51>;
- };
-};
-
-&lradc {
- vref-supply = <&reg_ldo2>;
- status = "okay";
-
- button@200 {
- label = "Volume Up";
- linux,code = <KEY_VOLUMEUP>;
- channel = <0>;
- voltage = <200000>;
- };
-
- button@400 {
- label = "Volume Down";
- linux,code = <KEY_VOLUMEDOWN>;
- channel = <0>;
- voltage = <400000>;
- };
-};
-
-&mmc0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_inet98fv2>;
- vmmc-supply = <&reg_vcc3v3>;
- bus-width = <4>;
- cd-gpios = <&pio 6 0 GPIO_ACTIVE_HIGH>; /* PG0 */
- cd-inverted;
- status = "okay";
-};
-
-&otg_sram {
- status = "okay";
-};
-
-&pio {
- mmc0_cd_pin_inet98fv2: mmc0_cd_pin@0 {
- allwinner,pins = "PG0";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-
- usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
- allwinner,pins = "PG1";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
- };
-
- usb0_id_detect_pin: usb0_id_detect_pin@0 {
- allwinner,pins = "PG2";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-};
-
-&reg_dcdc2 {
- regulator-always-on;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-cpu";
-};
-
-&reg_dcdc3 {
- regulator-always-on;
- regulator-min-microvolt = <1250000>;
- regulator-max-microvolt = <1250000>;
- regulator-name = "vdd-int-pll";
-};
-
-&reg_ldo1 {
- regulator-name = "vdd-rtc";
-};
-
-&reg_ldo2 {
- regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "avcc";
-};
-
-&reg_ldo3 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc-wifi";
-};
-
-&reg_usb0_vbus {
- gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
- status = "okay";
-};
-
-&uart1 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart1_pins_b>;
- status = "okay";
-};
-
-&usb_otg {
- dr_mode = "otg";
- status = "okay";
-};
-
-&usb0_vbus_pin_a {
- allwinner,pins = "PG12";
-};
-
-&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
- usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
- usb0_vbus-supply = <&reg_usb0_vbus>;
- usb1_vbus-supply = <&reg_ldo3>;
- status = "okay";
};
diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
index f694482bdeb6..b68a12374b35 100644
--- a/arch/arm/boot/dts/sun5i-r8-chip.dts
+++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
@@ -64,6 +64,16 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ status {
+ label = "chip:white:status";
+ gpios = <&axp_gpio 2 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
};
&be0 {
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 1867af24ff52..ce1960453a0b 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -47,7 +47,9 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/clock/sun6i-a31-ccu.h>
#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include <dt-bindings/reset/sun6i-a31-ccu.h>
/ {
interrupt-parent = <&gic>;
@@ -65,7 +67,10 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll6 0>;
+ clocks = <&ccu CLK_AHB1_BE0>, <&ccu CLK_AHB1_LCD0>,
+ <&ccu CLK_AHB1_HDMI>, <&ccu CLK_DRAM_BE0>,
+ <&ccu CLK_IEP_DRC0>, <&ccu CLK_BE0>,
+ <&ccu CLK_LCD0_CH1>, <&ccu CLK_HDMI>;
status = "disabled";
};
@@ -73,7 +78,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0";
- clocks = <&pll6 0>;
+ clocks = <&ccu CLK_AHB1_BE0>, <&ccu CLK_AHB1_LCD0>,
+ <&ccu CLK_DRAM_BE0>, <&ccu CLK_IEP_DRC0>,
+ <&ccu CLK_BE0>, <&ccu CLK_LCD0_CH0>;
status = "disabled";
};
};
@@ -97,7 +104,7 @@
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <0>;
- clocks = <&cpu>;
+ clocks = <&ccu CLK_CPU>;
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
@@ -192,235 +199,6 @@
clock-output-names = "osc32k";
};
- pll1: clk@01c20000 {
- #clock-cells = <0>;
- compatible = "allwinner,sun6i-a31-pll1-clk";
- reg = <0x01c20000 0x4>;
- clocks = <&osc24M>;
- clock-output-names = "pll1";
- };
-
- pll6: clk@01c20028 {
- #clock-cells = <1>;
- compatible = "allwinner,sun6i-a31-pll6-clk";
- reg = <0x01c20028 0x4>;
- clocks = <&osc24M>;
- clock-output-names = "pll6", "pll6x2";
- };
-
- cpu: cpu@01c20050 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-cpu-clk";
- reg = <0x01c20050 0x4>;
-
- /*
- * PLL1 is listed twice here.
- * While it looks suspicious, it's actually documented
- * that way both in the datasheet and in the code from
- * Allwinner.
- */
- clocks = <&osc32k>, <&osc24M>, <&pll1>, <&pll1>;
- clock-output-names = "cpu";
- };
-
- axi: axi@01c20050 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-axi-clk";
- reg = <0x01c20050 0x4>;
- clocks = <&cpu>;
- clock-output-names = "axi";
- };
-
- ahb1: ahb1@01c20054 {
- #clock-cells = <0>;
- compatible = "allwinner,sun6i-a31-ahb1-clk";
- reg = <0x01c20054 0x4>;
- clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
- clock-output-names = "ahb1";
-
- /*
- * Clock AHB1 from PLL6, instead of CPU/AXI which
- * has rate changes due to cpufreq. Also the DMA
- * controller requires AHB1 clocked from PLL6.
- */
- assigned-clocks = <&ahb1>;
- assigned-clock-parents = <&pll6 0>;
- };
-
- ahb1_gates: clk@01c20060 {
- #clock-cells = <1>;
- compatible = "allwinner,sun6i-a31-ahb1-gates-clk";
- reg = <0x01c20060 0x8>;
- clocks = <&ahb1>;
- clock-indices = <1>, <5>,
- <6>, <8>, <9>,
- <10>, <11>, <12>,
- <13>, <14>,
- <17>, <18>, <19>,
- <20>, <21>, <22>,
- <23>, <24>, <26>,
- <27>, <29>,
- <30>, <31>, <32>,
- <36>, <37>, <40>,
- <43>, <44>, <45>,
- <46>, <47>, <50>,
- <52>, <55>, <56>,
- <57>, <58>;
- clock-output-names = "ahb1_mipidsi", "ahb1_ss",
- "ahb1_dma", "ahb1_mmc0", "ahb1_mmc1",
- "ahb1_mmc2", "ahb1_mmc3", "ahb1_nand1",
- "ahb1_nand0", "ahb1_sdram",
- "ahb1_gmac", "ahb1_ts", "ahb1_hstimer",
- "ahb1_spi0", "ahb1_spi1", "ahb1_spi2",
- "ahb1_spi3", "ahb1_otg", "ahb1_ehci0",
- "ahb1_ehci1", "ahb1_ohci0",
- "ahb1_ohci1", "ahb1_ohci2", "ahb1_ve",
- "ahb1_lcd0", "ahb1_lcd1", "ahb1_csi",
- "ahb1_hdmi", "ahb1_de0", "ahb1_de1",
- "ahb1_fe0", "ahb1_fe1", "ahb1_mp",
- "ahb1_gpu", "ahb1_deu0", "ahb1_deu1",
- "ahb1_drc0", "ahb1_drc1";
- };
-
- apb1: apb1@01c20054 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-apb0-clk";
- reg = <0x01c20054 0x4>;
- clocks = <&ahb1>;
- clock-output-names = "apb1";
- };
-
- apb1_gates: clk@01c20068 {
- #clock-cells = <1>;
- compatible = "allwinner,sun6i-a31-apb1-gates-clk";
- reg = <0x01c20068 0x4>;
- clocks = <&apb1>;
- clock-indices = <0>, <4>,
- <5>, <12>,
- <13>;
- clock-output-names = "apb1_codec", "apb1_digital_mic",
- "apb1_pio", "apb1_daudio0",
- "apb1_daudio1";
- };
-
- apb2: clk@01c20058 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-apb1-clk";
- reg = <0x01c20058 0x4>;
- clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
- clock-output-names = "apb2";
- };
-
- apb2_gates: clk@01c2006c {
- #clock-cells = <1>;
- compatible = "allwinner,sun6i-a31-apb2-gates-clk";
- reg = <0x01c2006c 0x4>;
- clocks = <&apb2>;
- clock-indices = <0>, <1>,
- <2>, <3>, <16>,
- <17>, <18>, <19>,
- <20>, <21>;
- clock-output-names = "apb2_i2c0", "apb2_i2c1",
- "apb2_i2c2", "apb2_i2c3",
- "apb2_uart0", "apb2_uart1",
- "apb2_uart2", "apb2_uart3",
- "apb2_uart4", "apb2_uart5";
- };
-
- mmc0_clk: clk@01c20088 {
- #clock-cells = <1>;
- compatible = "allwinner,sun4i-a10-mmc-clk";
- reg = <0x01c20088 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "mmc0",
- "mmc0_output",
- "mmc0_sample";
- };
-
- mmc1_clk: clk@01c2008c {
- #clock-cells = <1>;
- compatible = "allwinner,sun4i-a10-mmc-clk";
- reg = <0x01c2008c 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "mmc1",
- "mmc1_output",
- "mmc1_sample";
- };
-
- mmc2_clk: clk@01c20090 {
- #clock-cells = <1>;
- compatible = "allwinner,sun4i-a10-mmc-clk";
- reg = <0x01c20090 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "mmc2",
- "mmc2_output",
- "mmc2_sample";
- };
-
- mmc3_clk: clk@01c20094 {
- #clock-cells = <1>;
- compatible = "allwinner,sun4i-a10-mmc-clk";
- reg = <0x01c20094 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "mmc3",
- "mmc3_output",
- "mmc3_sample";
- };
-
- ss_clk: clk@01c2009c {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-mod0-clk";
- reg = <0x01c2009c 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "ss";
- };
-
- spi0_clk: clk@01c200a0 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-mod0-clk";
- reg = <0x01c200a0 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "spi0";
- };
-
- spi1_clk: clk@01c200a4 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-mod0-clk";
- reg = <0x01c200a4 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "spi1";
- };
-
- spi2_clk: clk@01c200a8 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-mod0-clk";
- reg = <0x01c200a8 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "spi2";
- };
-
- spi3_clk: clk@01c200ac {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-mod0-clk";
- reg = <0x01c200ac 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "spi3";
- };
-
- usb_clk: clk@01c200cc {
- #clock-cells = <1>;
- #reset-cells = <1>;
- compatible = "allwinner,sun6i-a31-usb-clk";
- reg = <0x01c200cc 0x4>;
- clocks = <&osc24M>;
- clock-indices = <8>, <9>, <10>,
- <16>, <17>,
- <18>;
- clock-output-names = "usb_phy0", "usb_phy1", "usb_phy2",
- "usb_ohci0", "usb_ohci1",
- "usb_ohci2";
- };
-
/*
* The following two are dummy clocks, placeholders
* used in the gmac_tx clock. The gmac driver will
@@ -463,23 +241,23 @@
compatible = "allwinner,sun6i-a31-dma";
reg = <0x01c02000 0x1000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 6>;
- resets = <&ahb1_rst 6>;
+ clocks = <&ccu CLK_AHB1_DMA>;
+ resets = <&ccu RST_AHB1_DMA>;
#dma-cells = <1>;
};
mmc0: mmc@01c0f000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c0f000 0x1000>;
- clocks = <&ahb1_gates 8>,
- <&mmc0_clk 0>,
- <&mmc0_clk 1>,
- <&mmc0_clk 2>;
+ clocks = <&ccu CLK_AHB1_MMC0>,
+ <&ccu CLK_MMC0>,
+ <&ccu CLK_MMC0_OUTPUT>,
+ <&ccu CLK_MMC0_SAMPLE>;
clock-names = "ahb",
"mmc",
"output",
"sample";
- resets = <&ahb1_rst 8>;
+ resets = <&ccu RST_AHB1_MMC0>;
reset-names = "ahb";
interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -488,17 +266,17 @@
};
mmc1: mmc@01c10000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c10000 0x1000>;
- clocks = <&ahb1_gates 9>,
- <&mmc1_clk 0>,
- <&mmc1_clk 1>,
- <&mmc1_clk 2>;
+ clocks = <&ccu CLK_AHB1_MMC1>,
+ <&ccu CLK_MMC1>,
+ <&ccu CLK_MMC1_OUTPUT>,
+ <&ccu CLK_MMC1_SAMPLE>;
clock-names = "ahb",
"mmc",
"output",
"sample";
- resets = <&ahb1_rst 9>;
+ resets = <&ccu RST_AHB1_MMC1>;
reset-names = "ahb";
interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -507,17 +285,17 @@
};
mmc2: mmc@01c11000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c11000 0x1000>;
- clocks = <&ahb1_gates 10>,
- <&mmc2_clk 0>,
- <&mmc2_clk 1>,
- <&mmc2_clk 2>;
+ clocks = <&ccu CLK_AHB1_MMC2>,
+ <&ccu CLK_MMC2>,
+ <&ccu CLK_MMC2_OUTPUT>,
+ <&ccu CLK_MMC2_SAMPLE>;
clock-names = "ahb",
"mmc",
"output",
"sample";
- resets = <&ahb1_rst 10>;
+ resets = <&ccu RST_AHB1_MMC2>;
reset-names = "ahb";
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -526,17 +304,17 @@
};
mmc3: mmc@01c12000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c12000 0x1000>;
- clocks = <&ahb1_gates 11>,
- <&mmc3_clk 0>,
- <&mmc3_clk 1>,
- <&mmc3_clk 2>;
+ clocks = <&ccu CLK_AHB1_MMC3>,
+ <&ccu CLK_MMC3>,
+ <&ccu CLK_MMC3_OUTPUT>,
+ <&ccu CLK_MMC3_SAMPLE>;
clock-names = "ahb",
"mmc",
"output",
"sample";
- resets = <&ahb1_rst 11>;
+ resets = <&ccu RST_AHB1_MMC3>;
reset-names = "ahb";
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -547,8 +325,8 @@
usb_otg: usb@01c19000 {
compatible = "allwinner,sun6i-a31-musb";
reg = <0x01c19000 0x0400>;
- clocks = <&ahb1_gates 24>;
- resets = <&ahb1_rst 24>;
+ clocks = <&ccu CLK_AHB1_OTG>;
+ resets = <&ccu RST_AHB1_OTG>;
interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "mc";
phys = <&usbphy 0>;
@@ -565,15 +343,15 @@
reg-names = "phy_ctrl",
"pmu1",
"pmu2";
- clocks = <&usb_clk 8>,
- <&usb_clk 9>,
- <&usb_clk 10>;
+ clocks = <&ccu CLK_USB_PHY0>,
+ <&ccu CLK_USB_PHY1>,
+ <&ccu CLK_USB_PHY2>;
clock-names = "usb0_phy",
"usb1_phy",
"usb2_phy";
- resets = <&usb_clk 0>,
- <&usb_clk 1>,
- <&usb_clk 2>;
+ resets = <&ccu RST_USB_PHY0>,
+ <&ccu RST_USB_PHY1>,
+ <&ccu RST_USB_PHY2>;
reset-names = "usb0_reset",
"usb1_reset",
"usb2_reset";
@@ -585,8 +363,8 @@
compatible = "allwinner,sun6i-a31-ehci", "generic-ehci";
reg = <0x01c1a000 0x100>;
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 26>;
- resets = <&ahb1_rst 26>;
+ clocks = <&ccu CLK_AHB1_EHCI0>;
+ resets = <&ccu RST_AHB1_EHCI0>;
phys = <&usbphy 1>;
phy-names = "usb";
status = "disabled";
@@ -596,8 +374,8 @@
compatible = "allwinner,sun6i-a31-ohci", "generic-ohci";
reg = <0x01c1a400 0x100>;
interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 29>, <&usb_clk 16>;
- resets = <&ahb1_rst 29>;
+ clocks = <&ccu CLK_AHB1_OHCI0>, <&ccu CLK_USB_OHCI0>;
+ resets = <&ccu RST_AHB1_OHCI0>;
phys = <&usbphy 1>;
phy-names = "usb";
status = "disabled";
@@ -607,8 +385,8 @@
compatible = "allwinner,sun6i-a31-ehci", "generic-ehci";
reg = <0x01c1b000 0x100>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 27>;
- resets = <&ahb1_rst 27>;
+ clocks = <&ccu CLK_AHB1_EHCI1>;
+ resets = <&ccu RST_AHB1_EHCI1>;
phys = <&usbphy 2>;
phy-names = "usb";
status = "disabled";
@@ -618,8 +396,8 @@
compatible = "allwinner,sun6i-a31-ohci", "generic-ohci";
reg = <0x01c1b400 0x100>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 30>, <&usb_clk 17>;
- resets = <&ahb1_rst 30>;
+ clocks = <&ccu CLK_AHB1_OHCI1>, <&ccu CLK_USB_OHCI1>;
+ resets = <&ccu RST_AHB1_OHCI1>;
phys = <&usbphy 2>;
phy-names = "usb";
status = "disabled";
@@ -629,11 +407,20 @@
compatible = "allwinner,sun6i-a31-ohci", "generic-ohci";
reg = <0x01c1c400 0x100>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 31>, <&usb_clk 18>;
- resets = <&ahb1_rst 31>;
+ clocks = <&ccu CLK_AHB1_OHCI2>, <&ccu CLK_USB_OHCI2>;
+ resets = <&ccu RST_AHB1_OHCI2>;
status = "disabled";
};
+ ccu: clock@01c20000 {
+ compatible = "allwinner,sun6i-a31-ccu";
+ reg = <0x01c20000 0x400>;
+ clocks = <&osc24M>, <&osc32k>;
+ clock-names = "hosc", "losc";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
pio: pinctrl@01c20800 {
compatible = "allwinner,sun6i-a31-pinctrl";
reg = <0x01c20800 0x400>;
@@ -641,7 +428,7 @@
<GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb1_gates 5>;
+ clocks = <&ccu CLK_APB1_PIO>;
gpio-controller;
interrupt-controller;
#interrupt-cells = <3>;
@@ -762,24 +549,6 @@
};
};
- ahb1_rst: reset@01c202c0 {
- #reset-cells = <1>;
- compatible = "allwinner,sun6i-a31-ahb1-reset";
- reg = <0x01c202c0 0xc>;
- };
-
- apb1_rst: reset@01c202d0 {
- #reset-cells = <1>;
- compatible = "allwinner,sun6i-a31-clock-reset";
- reg = <0x01c202d0 0x4>;
- };
-
- apb2_rst: reset@01c202d8 {
- #reset-cells = <1>;
- compatible = "allwinner,sun6i-a31-clock-reset";
- reg = <0x01c202d8 0x4>;
- };
-
timer@01c20c00 {
compatible = "allwinner,sun4i-a10-timer";
reg = <0x01c20c00 0xa0>;
@@ -816,8 +585,8 @@
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&apb2_gates 16>;
- resets = <&apb2_rst 16>;
+ clocks = <&ccu CLK_APB2_UART0>;
+ resets = <&ccu RST_APB2_UART0>;
dmas = <&dma 6>, <&dma 6>;
dma-names = "rx", "tx";
status = "disabled";
@@ -829,8 +598,8 @@
interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&apb2_gates 17>;
- resets = <&apb2_rst 17>;
+ clocks = <&ccu CLK_APB2_UART1>;
+ resets = <&ccu RST_APB2_UART1>;
dmas = <&dma 7>, <&dma 7>;
dma-names = "rx", "tx";
status = "disabled";
@@ -842,8 +611,8 @@
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&apb2_gates 18>;
- resets = <&apb2_rst 18>;
+ clocks = <&ccu CLK_APB2_UART2>;
+ resets = <&ccu RST_APB2_UART2>;
dmas = <&dma 8>, <&dma 8>;
dma-names = "rx", "tx";
status = "disabled";
@@ -855,8 +624,8 @@
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&apb2_gates 19>;
- resets = <&apb2_rst 19>;
+ clocks = <&ccu CLK_APB2_UART3>;
+ resets = <&ccu RST_APB2_UART3>;
dmas = <&dma 9>, <&dma 9>;
dma-names = "rx", "tx";
status = "disabled";
@@ -868,8 +637,8 @@
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&apb2_gates 20>;
- resets = <&apb2_rst 20>;
+ clocks = <&ccu CLK_APB2_UART4>;
+ resets = <&ccu RST_APB2_UART4>;
dmas = <&dma 10>, <&dma 10>;
dma-names = "rx", "tx";
status = "disabled";
@@ -881,8 +650,8 @@
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&apb2_gates 21>;
- resets = <&apb2_rst 21>;
+ clocks = <&ccu CLK_APB2_UART5>;
+ resets = <&ccu RST_APB2_UART5>;
dmas = <&dma 22>, <&dma 22>;
dma-names = "rx", "tx";
status = "disabled";
@@ -892,8 +661,8 @@
compatible = "allwinner,sun6i-a31-i2c";
reg = <0x01c2ac00 0x400>;
interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb2_gates 0>;
- resets = <&apb2_rst 0>;
+ clocks = <&ccu CLK_APB2_I2C0>;
+ resets = <&ccu RST_APB2_I2C0>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -903,8 +672,8 @@
compatible = "allwinner,sun6i-a31-i2c";
reg = <0x01c2b000 0x400>;
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb2_gates 1>;
- resets = <&apb2_rst 1>;
+ clocks = <&ccu CLK_APB2_I2C1>;
+ resets = <&ccu RST_APB2_I2C1>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -914,8 +683,8 @@
compatible = "allwinner,sun6i-a31-i2c";
reg = <0x01c2b400 0x400>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb2_gates 2>;
- resets = <&apb2_rst 2>;
+ clocks = <&ccu CLK_APB2_I2C2>;
+ resets = <&ccu RST_APB2_I2C2>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -925,8 +694,8 @@
compatible = "allwinner,sun6i-a31-i2c";
reg = <0x01c2b800 0x400>;
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb2_gates 3>;
- resets = <&apb2_rst 3>;
+ clocks = <&ccu CLK_APB2_I2C3>;
+ resets = <&ccu RST_APB2_I2C3>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -937,9 +706,9 @@
reg = <0x01c30000 0x1054>;
interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
- clocks = <&ahb1_gates 17>, <&gmac_tx_clk>;
+ clocks = <&ccu CLK_AHB1_EMAC>, <&gmac_tx_clk>;
clock-names = "stmmaceth", "allwinner_gmac_tx";
- resets = <&ahb1_rst 17>;
+ resets = <&ccu RST_AHB1_EMAC>;
reset-names = "stmmaceth";
snps,pbl = <2>;
snps,fixed-burst;
@@ -953,9 +722,9 @@
compatible = "allwinner,sun4i-a10-crypto";
reg = <0x01c15000 0x1000>;
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 5>, <&ss_clk>;
+ clocks = <&ccu CLK_AHB1_SS>, <&ccu CLK_SS>;
clock-names = "ahb", "mod";
- resets = <&ahb1_rst 5>;
+ resets = <&ccu RST_AHB1_SS>;
reset-names = "ahb";
};
@@ -967,19 +736,19 @@
<GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 19>;
- resets = <&ahb1_rst 19>;
+ clocks = <&ccu CLK_AHB1_HSTIMER>;
+ resets = <&ccu RST_AHB1_HSTIMER>;
};
spi0: spi@01c68000 {
compatible = "allwinner,sun6i-a31-spi";
reg = <0x01c68000 0x1000>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 20>, <&spi0_clk>;
+ clocks = <&ccu CLK_AHB1_SPI0>, <&ccu CLK_SPI0>;
clock-names = "ahb", "mod";
dmas = <&dma 23>, <&dma 23>;
dma-names = "rx", "tx";
- resets = <&ahb1_rst 20>;
+ resets = <&ccu RST_AHB1_SPI0>;
status = "disabled";
};
@@ -987,11 +756,11 @@
compatible = "allwinner,sun6i-a31-spi";
reg = <0x01c69000 0x1000>;
interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 21>, <&spi1_clk>;
+ clocks = <&ccu CLK_AHB1_SPI1>, <&ccu CLK_SPI1>;
clock-names = "ahb", "mod";
dmas = <&dma 24>, <&dma 24>;
dma-names = "rx", "tx";
- resets = <&ahb1_rst 21>;
+ resets = <&ccu RST_AHB1_SPI1>;
status = "disabled";
};
@@ -999,11 +768,11 @@
compatible = "allwinner,sun6i-a31-spi";
reg = <0x01c6a000 0x1000>;
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 22>, <&spi2_clk>;
+ clocks = <&ccu CLK_AHB1_SPI2>, <&ccu CLK_SPI2>;
clock-names = "ahb", "mod";
dmas = <&dma 25>, <&dma 25>;
dma-names = "rx", "tx";
- resets = <&ahb1_rst 22>;
+ resets = <&ccu RST_AHB1_SPI2>;
status = "disabled";
};
@@ -1011,11 +780,11 @@
compatible = "allwinner,sun6i-a31-spi";
reg = <0x01c6b000 0x1000>;
interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 23>, <&spi3_clk>;
+ clocks = <&ccu CLK_AHB1_SPI3>, <&ccu CLK_SPI3>;
clock-names = "ahb", "mod";
dmas = <&dma 26>, <&dma 26>;
dma-names = "rx", "tx";
- resets = <&ahb1_rst 23>;
+ resets = <&ccu RST_AHB1_SPI3>;
status = "disabled";
};
@@ -1052,8 +821,9 @@
ar100: ar100_clk {
compatible = "allwinner,sun6i-a31-ar100-clk";
#clock-cells = <0>;
- clocks = <&osc32k>, <&osc24M>, <&pll6 0>,
- <&pll6 0>;
+ clocks = <&osc32k>, <&osc24M>,
+ <&ccu CLK_PLL_PERIPH>,
+ <&ccu CLK_PLL_PERIPH>;
clock-output-names = "ar100";
};
diff --git a/arch/arm/boot/dts/sun6i-a31s-colorfly-e708-q1.dts b/arch/arm/boot/dts/sun6i-a31s-colorfly-e708-q1.dts
index e182eec6d878..882a4d89fa22 100644
--- a/arch/arm/boot/dts/sun6i-a31s-colorfly-e708-q1.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-colorfly-e708-q1.dts
@@ -42,32 +42,11 @@
/dts-v1/;
#include "sun6i-a31s.dtsi"
-#include "sunxi-common-regulators.dtsi"
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include "sun6i-reference-design-tablet.dtsi"
/ {
model = "Colorfly E708 Q1 tablet";
compatible = "colorfly,e708-q1", "allwinner,sun6i-a31s";
-
- aliases {
- serial0 = &uart0;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-};
-
-&cpu0 {
- cpu-supply = <&reg_dcdc3>;
-};
-
-&ehci0 {
- /* rtl8188etv wifi is connected here */
- status = "okay";
};
&lradc {
@@ -82,103 +61,6 @@
};
};
-&mmc0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_e708_q1>;
- vmmc-supply = <&reg_dcdc1>;
- bus-width = <4>;
- cd-gpios = <&pio 0 8 GPIO_ACTIVE_HIGH>; /* PA8 */
- cd-inverted;
- status = "okay";
-};
-
-&pio {
- mma8452_int_e708_q1: mma8452_int_pin@0 {
- allwinner,pins = "PA9";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-
- mmc0_cd_pin_e708_q1: mmc0_cd_pin@0 {
- allwinner,pins = "PA8";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-};
-
-&p2wi {
- status = "okay";
-
- axp22x: pmic@68 {
- compatible = "x-powers,axp221";
- reg = <0x68>;
- interrupt-parent = <&nmi_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
- };
-};
-
-#include "axp22x.dtsi"
-
-&reg_aldo3 {
- regulator-always-on;
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "avcc";
-};
-
-&reg_dc1sw {
- regulator-name = "vcc-lcd";
-};
-
-&reg_dc5ldo {
- regulator-always-on;
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1320000>;
- regulator-name = "vdd-cpus"; /* This is an educated guess */
-};
-
-&reg_dcdc1 {
- regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "vcc-3v0";
-};
-
-&reg_dcdc2 {
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1320000>;
- regulator-name = "vdd-gpu";
-};
-
-&reg_dcdc3 {
- regulator-always-on;
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1320000>;
- regulator-name = "vdd-cpu";
-};
-
-&reg_dcdc4 {
- regulator-always-on;
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1320000>;
- regulator-name = "vdd-sys-dll";
-};
-
-&reg_dcdc5 {
- regulator-always-on;
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
- regulator-name = "vcc-dram";
-};
-
-&reg_dldo1 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc-wifi";
-};
-
&reg_dldo2 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -186,23 +68,5 @@
};
&simplefb_lcd {
- vcc-lcd-supply = <&reg_dc1sw>;
vcc-pg-supply = <&reg_dldo2>;
};
-
-/*
- * FIXME for now we only support host mode and rely on u-boot to have
- * turned on Vbus which is controlled by the axp221 pmic on the board.
- *
- * Once we have axp221 power-supply and vbus-usb support we should switch
- * to fully supporting otg.
- */
-&usb_otg {
- dr_mode = "host";
- status = "okay";
-};
-
-&usbphy {
- usb1_vbus-supply = <&reg_dldo1>;
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/sun6i-a31s-inet-q972.dts b/arch/arm/boot/dts/sun6i-a31s-inet-q972.dts
new file mode 100644
index 000000000000..e584e6b186a7
--- /dev/null
+++ b/arch/arm/boot/dts/sun6i-a31s-inet-q972.dts
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun6i-a31s.dtsi"
+#include "sun6i-reference-design-tablet.dtsi"
+
+/ {
+ model = "iNet Q972 tablet";
+ compatible = "inet-tek,inet-q972", "allwinner,sun6i-a31s";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins_a>;
+ status = "okay";
+
+ ft5406ee8: touchscreen@38 {
+ compatible = "edt,edt-ft5406";
+ reg = <0x38>;
+ interrupt-parent = <&pio>;
+ interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>; /* PA3 */
+ touchscreen-size-x = <768>;
+ touchscreen-size-y = <1024>;
+ touchscreen-swapped-x-y;
+ };
+};
+
+&lradc {
+ vref-supply = <&reg_aldo3>;
+ status = "okay";
+
+ button@200 {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ channel = <0>;
+ voltage = <200000>;
+ };
+
+ button@900 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ channel = <0>;
+ voltage = <900000>;
+ };
+
+ button@1200 {
+ label = "Back";
+ linux,code = <KEY_BACK>;
+ channel = <0>;
+ voltage = <1200000>;
+ };
+};
+
+&ohci1 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
new file mode 100644
index 000000000000..0c434304e040
--- /dev/null
+++ b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc3>;
+};
+
+&ehci0 {
+ /* Wifi is connected here */
+ status = "okay";
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_e708_q1>;
+ vmmc-supply = <&reg_dcdc1>;
+ bus-width = <4>;
+ cd-gpios = <&pio 0 8 GPIO_ACTIVE_HIGH>; /* PA8 */
+ cd-inverted;
+ status = "okay";
+};
+
+&pio {
+ mmc0_cd_pin_e708_q1: mmc0_cd_pin@0 {
+ allwinner,pins = "PA8";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+
+ usb0_id_detect_pin: usb0_id_detect_pin@0 {
+ allwinner,pins = "PA15";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+};
+
+&p2wi {
+ status = "okay";
+
+ axp22x: pmic@68 {
+ compatible = "x-powers,axp221";
+ reg = <0x68>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ drivevbus-supply = <&reg_vcc5v0>;
+ x-powers,drive-vbus-en;
+ };
+};
+
+#include "axp22x.dtsi"
+
+&reg_aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "avcc";
+};
+
+&reg_dc1sw {
+ regulator-name = "vcc-lcd";
+};
+
+&reg_dc5ldo {
+ regulator-always-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-cpus"; /* This is an educated guess */
+};
+
+&reg_dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-3v0";
+};
+
+&reg_dcdc2 {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-gpu";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc4 {
+ regulator-always-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-sys-dll";
+};
+
+&reg_dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vcc-dram";
+};
+
+&reg_dldo1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+};
+
+&reg_drivevbus {
+ regulator-name = "usb0-vbus";
+ status = "okay";
+};
+
+&simplefb_lcd {
+ vcc-lcd-supply = <&reg_dc1sw>;
+};
+
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
+&usbphy {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_id_detect_pin>;
+ usb0_id_det-gpio = <&pio 0 15 GPIO_ACTIVE_HIGH>; /* PA15 */
+ usb0_vbus_power-supply = <&usb_power_supply>;
+ usb0_vbus-supply = <&reg_drivevbus>;
+ usb1_vbus-supply = <&reg_dldo1>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index bd0c47660243..94cf5a1c7172 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -905,7 +905,7 @@
};
mmc0: mmc@01c0f000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c0f000 0x1000>;
clocks = <&ahb_gates 8>,
<&mmc0_clk 0>,
@@ -922,7 +922,7 @@
};
mmc1: mmc@01c10000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c10000 0x1000>;
clocks = <&ahb_gates 9>,
<&mmc1_clk 0>,
@@ -939,7 +939,7 @@
};
mmc2: mmc@01c11000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c11000 0x1000>;
clocks = <&ahb_gates 10>,
<&mmc2_clk 0>,
@@ -956,7 +956,7 @@
};
mmc3: mmc@01c12000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c12000 0x1000>;
clocks = <&ahb_gates 11>,
<&mmc3_clk 0>,
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 7e05e09e61c7..48fc24f36fcb 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -46,7 +46,9 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/sun8i-a23-a33-ccu.h>
#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include <dt-bindings/reset/sun8i-a23-a33-ccu.h>
/ {
interrupt-parent = <&gic>;
@@ -60,7 +62,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0";
- clocks = <&pll6 0>;
+ clocks = <&ccu CLK_BUS_LCD>, <&ccu CLK_BUS_DE_BE>,
+ <&ccu CLK_LCD_CH0>, <&ccu CLK_DE_BE>,
+ <&ccu CLK_DRAM_DE_BE>, <&ccu CLK_DRC>;
status = "disabled";
};
};
@@ -111,143 +115,6 @@
clock-frequency = <32768>;
clock-output-names = "osc32k";
};
-
- pll1: clk@01c20000 {
- #clock-cells = <0>;
- compatible = "allwinner,sun8i-a23-pll1-clk";
- reg = <0x01c20000 0x4>;
- clocks = <&osc24M>;
- clock-output-names = "pll1";
- };
-
- /* dummy clock until actually implemented */
- pll5: pll5_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <0>;
- clock-output-names = "pll5";
- };
-
- pll6: clk@01c20028 {
- #clock-cells = <1>;
- compatible = "allwinner,sun6i-a31-pll6-clk";
- reg = <0x01c20028 0x4>;
- clocks = <&osc24M>;
- clock-output-names = "pll6", "pll6x2";
- };
-
- cpu: cpu_clk@01c20050 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-cpu-clk";
- reg = <0x01c20050 0x4>;
-
- /*
- * PLL1 is listed twice here.
- * While it looks suspicious, it's actually documented
- * that way both in the datasheet and in the code from
- * Allwinner.
- */
- clocks = <&osc32k>, <&osc24M>, <&pll1>, <&pll1>;
- clock-output-names = "cpu";
- };
-
- axi: axi_clk@01c20050 {
- #clock-cells = <0>;
- compatible = "allwinner,sun8i-a23-axi-clk";
- reg = <0x01c20050 0x4>;
- clocks = <&cpu>;
- clock-output-names = "axi";
- };
-
- ahb1: ahb1_clk@01c20054 {
- #clock-cells = <0>;
- compatible = "allwinner,sun6i-a31-ahb1-clk";
- reg = <0x01c20054 0x4>;
- clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
- clock-output-names = "ahb1";
- };
-
- apb1: apb1_clk@01c20054 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-apb0-clk";
- reg = <0x01c20054 0x4>;
- clocks = <&ahb1>;
- clock-output-names = "apb1";
- };
-
- apb1_gates: clk@01c20068 {
- #clock-cells = <1>;
- compatible = "allwinner,sun8i-a23-apb1-gates-clk";
- reg = <0x01c20068 0x4>;
- clocks = <&apb1>;
- clock-indices = <0>, <5>,
- <12>, <13>;
- clock-output-names = "apb1_codec", "apb1_pio",
- "apb1_daudio0", "apb1_daudio1";
- };
-
- apb2: clk@01c20058 {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-apb1-clk";
- reg = <0x01c20058 0x4>;
- clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
- clock-output-names = "apb2";
- };
-
- apb2_gates: clk@01c2006c {
- #clock-cells = <1>;
- compatible = "allwinner,sun8i-a23-apb2-gates-clk";
- reg = <0x01c2006c 0x4>;
- clocks = <&apb2>;
- clock-indices = <0>, <1>,
- <2>, <16>,
- <17>, <18>,
- <19>, <20>;
- clock-output-names = "apb2_i2c0", "apb2_i2c1",
- "apb2_i2c2", "apb2_uart0",
- "apb2_uart1", "apb2_uart2",
- "apb2_uart3", "apb2_uart4";
- };
-
- mmc0_clk: clk@01c20088 {
- #clock-cells = <1>;
- compatible = "allwinner,sun4i-a10-mmc-clk";
- reg = <0x01c20088 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "mmc0",
- "mmc0_output",
- "mmc0_sample";
- };
-
- mmc1_clk: clk@01c2008c {
- #clock-cells = <1>;
- compatible = "allwinner,sun4i-a10-mmc-clk";
- reg = <0x01c2008c 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "mmc1",
- "mmc1_output",
- "mmc1_sample";
- };
-
- mmc2_clk: clk@01c20090 {
- #clock-cells = <1>;
- compatible = "allwinner,sun4i-a10-mmc-clk";
- reg = <0x01c20090 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "mmc2",
- "mmc2_output",
- "mmc2_sample";
- };
-
- usb_clk: clk@01c200cc {
- #clock-cells = <1>;
- #reset-cells = <1>;
- compatible = "allwinner,sun8i-a23-usb-clk";
- reg = <0x01c200cc 0x4>;
- clocks = <&osc24M>;
- clock-output-names = "usb_phy0", "usb_phy1", "usb_hsic",
- "usb_hsic_12M", "usb_ohci0";
- };
};
soc@01c00000 {
@@ -260,23 +127,23 @@
compatible = "allwinner,sun8i-a23-dma";
reg = <0x01c02000 0x1000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 6>;
- resets = <&ahb1_rst 6>;
+ clocks = <&ccu CLK_BUS_DMA>;
+ resets = <&ccu RST_BUS_DMA>;
#dma-cells = <1>;
};
mmc0: mmc@01c0f000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c0f000 0x1000>;
- clocks = <&ahb1_gates 8>,
- <&mmc0_clk 0>,
- <&mmc0_clk 1>,
- <&mmc0_clk 2>;
+ clocks = <&ccu CLK_BUS_MMC0>,
+ <&ccu CLK_MMC0>,
+ <&ccu CLK_MMC0_OUTPUT>,
+ <&ccu CLK_MMC0_SAMPLE>;
clock-names = "ahb",
"mmc",
"output",
"sample";
- resets = <&ahb1_rst 8>;
+ resets = <&ccu RST_BUS_MMC0>;
reset-names = "ahb";
interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -285,17 +152,17 @@
};
mmc1: mmc@01c10000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c10000 0x1000>;
- clocks = <&ahb1_gates 9>,
- <&mmc1_clk 0>,
- <&mmc1_clk 1>,
- <&mmc1_clk 2>;
+ clocks = <&ccu CLK_BUS_MMC1>,
+ <&ccu CLK_MMC1>,
+ <&ccu CLK_MMC1_OUTPUT>,
+ <&ccu CLK_MMC1_SAMPLE>;
clock-names = "ahb",
"mmc",
"output",
"sample";
- resets = <&ahb1_rst 9>;
+ resets = <&ccu RST_BUS_MMC1>;
reset-names = "ahb";
interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -304,17 +171,17 @@
};
mmc2: mmc@01c11000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c11000 0x1000>;
- clocks = <&ahb1_gates 10>,
- <&mmc2_clk 0>,
- <&mmc2_clk 1>,
- <&mmc2_clk 2>;
+ clocks = <&ccu CLK_BUS_MMC2>,
+ <&ccu CLK_MMC2>,
+ <&ccu CLK_MMC2_OUTPUT>,
+ <&ccu CLK_MMC2_SAMPLE>;
clock-names = "ahb",
"mmc",
"output",
"sample";
- resets = <&ahb1_rst 10>;
+ resets = <&ccu RST_BUS_MMC2>;
reset-names = "ahb";
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
@@ -322,12 +189,55 @@
#size-cells = <0>;
};
+ nfc: nand@01c03000 {
+ compatible = "allwinner,sun4i-a10-nand";
+ reg = <0x01c03000 0x1000>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_NAND>, <&ccu CLK_NAND>;
+ clock-names = "ahb", "mod";
+ resets = <&ccu RST_BUS_NAND>;
+ reset-names = "ahb";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ usb_otg: usb@01c19000 {
+ /* compatible gets set in SoC specific dtsi file */
+ reg = <0x01c19000 0x0400>;
+ clocks = <&ccu CLK_BUS_OTG>;
+ resets = <&ccu RST_BUS_OTG>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mc";
+ phys = <&usbphy 0>;
+ phy-names = "usb";
+ extcon = <&usbphy 0>;
+ status = "disabled";
+ };
+
+ usbphy: phy@01c19400 {
+ /*
+ * compatible and address regions get set in
+ * SoC specific dtsi file
+ */
+ clocks = <&ccu CLK_USB_PHY0>,
+ <&ccu CLK_USB_PHY1>;
+ clock-names = "usb0_phy",
+ "usb1_phy";
+ resets = <&ccu RST_USB_PHY0>,
+ <&ccu RST_USB_PHY1>;
+ reset-names = "usb0_reset",
+ "usb1_reset";
+ status = "disabled";
+ #phy-cells = <1>;
+ };
+
ehci0: usb@01c1a000 {
compatible = "allwinner,sun8i-a23-ehci", "generic-ehci";
reg = <0x01c1a000 0x100>;
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 26>;
- resets = <&ahb1_rst 26>;
+ clocks = <&ccu CLK_BUS_EHCI>;
+ resets = <&ccu RST_BUS_EHCI>;
phys = <&usbphy 1>;
phy-names = "usb";
status = "disabled";
@@ -337,18 +247,26 @@
compatible = "allwinner,sun8i-a23-ohci", "generic-ohci";
reg = <0x01c1a400 0x100>;
interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 29>, <&usb_clk 16>;
- resets = <&ahb1_rst 29>;
+ clocks = <&ccu CLK_BUS_OHCI>, <&ccu CLK_USB_OHCI>;
+ resets = <&ccu RST_BUS_OHCI>;
phys = <&usbphy 1>;
phy-names = "usb";
status = "disabled";
};
+ ccu: clock@01c20000 {
+ reg = <0x01c20000 0x400>;
+ clocks = <&osc24M>, <&osc32k>;
+ clock-names = "hosc", "losc";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
pio: pinctrl@01c20800 {
/* compatible gets set in SoC specific dtsi file */
reg = <0x01c20800 0x400>;
/* interrupts get set in SoC specific dtsi file */
- clocks = <&apb1_gates 5>;
+ clocks = <&ccu CLK_BUS_PIO>;
gpio-controller;
interrupt-controller;
#interrupt-cells = <3>;
@@ -361,6 +279,16 @@
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
+ uart1_pins_a: uart1@0 {
+ allwinner,pins = "PG6", "PG7";
+ allwinner,function = "uart1";
+ };
+
+ uart1_pins_cts_rts_a: uart1-cts-rts@0 {
+ allwinner,pins = "PG8", "PG9";
+ allwinner,function = "uart1";
+ };
+
mmc0_pins_a: mmc0@0 {
allwinner,pins = "PF0", "PF1", "PF2",
"PF3", "PF4", "PF5";
@@ -414,24 +342,16 @@
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- };
- ahb1_rst: reset@01c202c0 {
- #reset-cells = <1>;
- compatible = "allwinner,sun6i-a31-clock-reset";
- reg = <0x01c202c0 0xc>;
- };
-
- apb1_rst: reset@01c202d0 {
- #reset-cells = <1>;
- compatible = "allwinner,sun6i-a31-clock-reset";
- reg = <0x01c202d0 0x4>;
- };
-
- apb2_rst: reset@01c202d8 {
- #reset-cells = <1>;
- compatible = "allwinner,sun6i-a31-clock-reset";
- reg = <0x01c202d8 0x4>;
+ lcd_rgb666_pins: lcd-rgb666@0 {
+ allwinner,pins = "PD2", "PD3", "PD4", "PD5", "PD6", "PD7",
+ "PD10", "PD11", "PD12", "PD13", "PD14", "PD15",
+ "PD18", "PD19", "PD20", "PD21", "PD22", "PD23",
+ "PD24", "PD25", "PD26", "PD27";
+ allwinner,function = "lcd0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
};
timer@01c20c00 {
@@ -469,8 +389,8 @@
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&apb2_gates 16>;
- resets = <&apb2_rst 16>;
+ clocks = <&ccu CLK_BUS_UART0>;
+ resets = <&ccu RST_BUS_UART0>;
dmas = <&dma 6>, <&dma 6>;
dma-names = "rx", "tx";
status = "disabled";
@@ -482,8 +402,8 @@
interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&apb2_gates 17>;
- resets = <&apb2_rst 17>;
+ clocks = <&ccu CLK_BUS_UART1>;
+ resets = <&ccu RST_BUS_UART1>;
dmas = <&dma 7>, <&dma 7>;
dma-names = "rx", "tx";
status = "disabled";
@@ -495,8 +415,8 @@
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&apb2_gates 18>;
- resets = <&apb2_rst 18>;
+ clocks = <&ccu CLK_BUS_UART2>;
+ resets = <&ccu RST_BUS_UART2>;
dmas = <&dma 8>, <&dma 8>;
dma-names = "rx", "tx";
status = "disabled";
@@ -508,8 +428,8 @@
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&apb2_gates 19>;
- resets = <&apb2_rst 19>;
+ clocks = <&ccu CLK_BUS_UART3>;
+ resets = <&ccu RST_BUS_UART3>;
dmas = <&dma 9>, <&dma 9>;
dma-names = "rx", "tx";
status = "disabled";
@@ -521,8 +441,8 @@
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&apb2_gates 20>;
- resets = <&apb2_rst 20>;
+ clocks = <&ccu CLK_BUS_UART4>;
+ resets = <&ccu RST_BUS_UART4>;
dmas = <&dma 10>, <&dma 10>;
dma-names = "rx", "tx";
status = "disabled";
@@ -532,8 +452,8 @@
compatible = "allwinner,sun6i-a31-i2c";
reg = <0x01c2ac00 0x400>;
interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb2_gates 0>;
- resets = <&apb2_rst 0>;
+ clocks = <&ccu CLK_BUS_I2C0>;
+ resets = <&ccu RST_BUS_I2C0>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -543,8 +463,8 @@
compatible = "allwinner,sun6i-a31-i2c";
reg = <0x01c2b000 0x400>;
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb2_gates 1>;
- resets = <&apb2_rst 1>;
+ clocks = <&ccu CLK_BUS_I2C1>;
+ resets = <&ccu RST_BUS_I2C1>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -554,8 +474,8 @@
compatible = "allwinner,sun6i-a31-i2c";
reg = <0x01c2b400 0x400>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb2_gates 2>;
- resets = <&apb2_rst 2>;
+ clocks = <&ccu CLK_BUS_I2C2>;
+ resets = <&ccu RST_BUS_I2C2>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/sun8i-a23-gt90h-v4.dts b/arch/arm/boot/dts/sun8i-a23-gt90h-v4.dts
index b2ce284a65a2..e3c7a25ca37d 100644
--- a/arch/arm/boot/dts/sun8i-a23-gt90h-v4.dts
+++ b/arch/arm/boot/dts/sun8i-a23-gt90h-v4.dts
@@ -42,70 +42,27 @@
/dts-v1/;
#include "sun8i-a23.dtsi"
-#include "sunxi-common-regulators.dtsi"
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
-#include <dt-bindings/pwm/pwm.h>
+#include "sun8i-reference-design-tablet.dtsi"
/ {
model = "Allwinner GT90H Dual Core Tablet (v4)";
compatible = "allwinner,gt90h-v4", "allwinner,sun8i-a23";
-
- aliases {
- serial0 = &r_uart;
- };
-
- backlight: backlight {
- compatible = "pwm-backlight";
- pinctrl-names = "default";
- pinctrl-0 = <&bl_en_pin_gt90h>;
- pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
- brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
- default-brightness-level = <8>;
- enable-gpios = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
};
&ehci0 {
status = "okay";
};
-&i2c0 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins_a>;
- status = "okay";
-};
-
-&i2c1 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_pins_a>;
+&touchscreen {
+ reg = <0x40>;
+ compatible = "silead,gsl3675";
+ firmware-name = "gsl3675-gt90h.fw";
+ touchscreen-size-x = <1792>;
+ touchscreen-size-y = <1024>;
status = "okay";
};
&lradc {
- vref-supply = <&reg_vcc3v0>;
- status = "okay";
-
- button@200 {
- label = "Volume Up";
- linux,code = <KEY_VOLUMEUP>;
- channel = <0>;
- voltage = <200000>;
- };
-
- button@400 {
- label = "Volume Down";
- linux,code = <KEY_VOLUMEDOWN>;
- channel = <0>;
- voltage = <400000>;
- };
-
button@600 {
label = "Back";
linux,code = <KEY_BACK>;
@@ -114,144 +71,6 @@
};
};
-&mmc0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_gt90h>;
- vmmc-supply = <&reg_aldo1>;
- bus-width = <4>;
- cd-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
- cd-inverted;
- status = "okay";
-};
-
-&pio {
- bl_en_pin_gt90h: bl_en_pin@0 {
- allwinner,pins = "PH6";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
- };
-
- mmc0_cd_pin_gt90h: mmc0_cd_pin@0 {
- allwinner,pins = "PB4";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-};
-
-&pwm {
- pinctrl-names = "default";
- pinctrl-0 = <&pwm0_pins>;
- status = "okay";
-};
-
-&r_rsb {
- status = "okay";
-
- axp22x: pmic@3a3 {
- compatible = "x-powers,axp223";
- reg = <0x3a3>;
- interrupt-parent = <&nmi_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
- eldoin-supply = <&reg_dcdc1>;
- };
-};
-
-&r_uart {
- pinctrl-names = "default";
- pinctrl-0 = <&r_uart_pins_a>;
- status = "okay";
-};
-
-#include "axp22x.dtsi"
-
-&reg_aldo1 {
- regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "vcc-io";
-};
-
-&reg_aldo2 {
- regulator-always-on;
- regulator-min-microvolt = <2350000>;
- regulator-max-microvolt = <2650000>;
- regulator-name = "vdd-dll";
-};
-
-&reg_aldo3 {
- regulator-always-on;
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc-pll-avcc";
-};
-
-&reg_dc1sw {
- regulator-name = "vcc-lcd";
-};
-
-&reg_dc5ldo {
- regulator-always-on;
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-cpus";
-};
-
-&reg_dcdc1 {
- regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "vcc-3v0";
-};
-
-&reg_dcdc2 {
- regulator-always-on;
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-sys";
-};
-
-&reg_dcdc3 {
- regulator-always-on;
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-cpu";
-};
-
-&reg_dcdc5 {
- regulator-always-on;
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
- regulator-name = "vcc-dram";
-};
-
-&reg_dldo1 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc-wifi";
-};
-
-&reg_rtc_ldo {
- regulator-name = "vcc-rtc";
-};
-
-&simplefb_lcd {
- vcc-lcd-supply = <&reg_dc1sw>;
-};
-
-/*
- * FIXME for now we only support host mode and rely on u-boot to have
- * turned on Vbus which is controlled by the axp223 pmic on the board.
- *
- * Once we have axp223 support we should switch to fully supporting otg.
- */
-&usb_otg {
- dr_mode = "host";
- status = "okay";
-};
-
&usbphy {
usb1_vbus-supply = <&reg_dldo1>;
- status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-a23-inet86dz.dts b/arch/arm/boot/dts/sun8i-a23-inet86dz.dts
index 0f9f71b14047..d4405752a414 100644
--- a/arch/arm/boot/dts/sun8i-a23-inet86dz.dts
+++ b/arch/arm/boot/dts/sun8i-a23-inet86dz.dts
@@ -53,6 +53,15 @@
status = "okay";
};
+&touchscreen {
+ reg = <0x40>;
+ compatible = "silead,gsl1680";
+ firmware-name = "gsl1680-inet86dz.fw";
+ touchscreen-size-x = <960>;
+ touchscreen-size-y = <640>;
+ status = "okay";
+};
+
&usbphy {
usb1_vbus-supply = <&reg_dldo1>;
};
diff --git a/arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts
index e3004428e7a7..a86cbedda34c 100644
--- a/arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts
+++ b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts
@@ -47,4 +47,72 @@
/ {
model = "Polaroid MID2407PXE03 tablet";
compatible = "polaroid,mid2407pxe03", "allwinner,sun8i-a23";
+
+ aliases {
+ ethernet0 = &esp8089;
+ };
+
+ wifi_pwrseq: wifi_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_pwrseq_pin_mid2407>;
+ reset-gpios = <&r_pio 0 6 GPIO_ACTIVE_LOW>; /* PL6 */
+ /* The esp8089 needs 200 ms after driving wifi-en high */
+ post-power-on-delay-ms = <200>;
+ };
+};
+
+&i2c1 {
+ mma7660: accelerometer@4c {
+ reg = <0x4c>;
+ compatible = "fsl,mma7660";
+ };
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_a>;
+ vmmc-supply = <&reg_dldo1>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ esp8089: sdio_wifi@1 {
+ compatible = "esp,esp8089";
+ reg = <1>;
+ esp,crystal-26M-en = <2>;
+ };
+};
+
+&mmc1_pins_a {
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+};
+
+&r_pio {
+ wifi_pwrseq_pin_mid2407: wifi_pwrseq_pin@0 {
+ allwinner,pins = "PL6";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&reg_ldo_io1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-touchscreen";
+ status = "okay";
+};
+
+&touchscreen {
+ reg = <0x40>;
+ compatible = "silead,gsl1680";
+ firmware-name = "gsl1680-polaroid-mid2407pxe03.fw";
+ touchscreen-size-x = <960>;
+ touchscreen-size-y = <640>;
+ touchscreen-inverted-x;
+ touchscreen-inverted-y;
+ vddio-supply = <&reg_ldo_io1>;
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts
index 6d06e24d446b..9955f85f9147 100644
--- a/arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts
+++ b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts
@@ -47,4 +47,55 @@
/ {
model = "Polaroid MID2809PXE04 tablet";
compatible = "polaroid,mid2809pxe04", "allwinner,sun8i-a23";
+
+ aliases {
+ ethernet0 = &esp8089;
+ };
+
+ wifi_pwrseq: wifi_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_pwrseq_pin_mid2809>;
+ reset-gpios = <&r_pio 0 6 GPIO_ACTIVE_LOW>; /* PL6 */
+ /* The esp8089 needs 200 ms after driving wifi-en high */
+ post-power-on-delay-ms = <200>;
+ };
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_a>;
+ vmmc-supply = <&reg_dldo1>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ esp8089: sdio_wifi@1 {
+ compatible = "esp,esp8089";
+ reg = <1>;
+ esp,crystal-26M-en = <2>;
+ };
+};
+
+&mmc1_pins_a {
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+};
+
+&r_pio {
+ wifi_pwrseq_pin_mid2809: wifi_pwrseq_pin@0 {
+ allwinner,pins = "PL6";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&touchscreen {
+ reg = <0x40>;
+ compatible = "silead,gsl3670";
+ firmware-name = "gsl3670-polaroid-mid2809pxe04.fw";
+ touchscreen-size-x = <1660>;
+ touchscreen-size-y = <890>;
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi
index 92e6616979ea..54d045dab825 100644
--- a/arch/arm/boot/dts/sun8i-a23.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23.dtsi
@@ -48,74 +48,10 @@
memory {
reg = <0x40000000 0x40000000>;
};
+};
- clocks {
- ahb1_gates: clk@01c20060 {
- #clock-cells = <1>;
- compatible = "allwinner,sun8i-a23-ahb1-gates-clk";
- reg = <0x01c20060 0x8>;
- clocks = <&ahb1>;
- clock-indices = <1>, <6>,
- <8>, <9>, <10>,
- <13>, <14>,
- <19>, <20>,
- <21>, <24>, <26>,
- <29>, <32>, <36>,
- <40>, <44>, <46>,
- <52>, <53>,
- <54>, <57>;
- clock-output-names = "ahb1_mipidsi", "ahb1_dma",
- "ahb1_mmc0", "ahb1_mmc1", "ahb1_mmc2",
- "ahb1_nand", "ahb1_sdram",
- "ahb1_hstimer", "ahb1_spi0",
- "ahb1_spi1", "ahb1_otg", "ahb1_ehci",
- "ahb1_ohci", "ahb1_ve", "ahb1_lcd",
- "ahb1_csi", "ahb1_be", "ahb1_fe",
- "ahb1_gpu", "ahb1_msgbox",
- "ahb1_spinlock", "ahb1_drc";
- };
-
- mbus_clk: clk@01c2015c {
- #clock-cells = <0>;
- compatible = "allwinner,sun8i-a23-mbus-clk";
- reg = <0x01c2015c 0x4>;
- clocks = <&osc24M>, <&pll6 1>, <&pll5>;
- clock-output-names = "mbus";
- };
- };
-
- soc@01c00000 {
- usb_otg: usb@01c19000 {
- compatible = "allwinner,sun6i-a31-musb";
- reg = <0x01c19000 0x0400>;
- clocks = <&ahb1_gates 24>;
- resets = <&ahb1_rst 24>;
- interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "mc";
- phys = <&usbphy 0>;
- phy-names = "usb";
- extcon = <&usbphy 0>;
- status = "disabled";
- };
-
- usbphy: phy@01c19400 {
- compatible = "allwinner,sun8i-a23-usb-phy";
- reg = <0x01c19400 0x10>,
- <0x01c1a800 0x4>;
- reg-names = "phy_ctrl",
- "pmu1";
- clocks = <&usb_clk 8>,
- <&usb_clk 9>;
- clock-names = "usb0_phy",
- "usb1_phy";
- resets = <&usb_clk 0>,
- <&usb_clk 1>;
- reset-names = "usb0_reset",
- "usb1_reset";
- status = "disabled";
- #phy-cells = <1>;
- };
- };
+&ccu {
+ compatible = "allwinner,sun8i-a23-ccu";
};
&pio {
@@ -124,3 +60,13 @@
<GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
};
+
+&usb_otg {
+ compatible = "allwinner,sun6i-a31-musb";
+};
+
+&usbphy {
+ compatible = "allwinner,sun8i-a23-usb-phy";
+ reg = <0x01c19400 0x10>, <0x01c1a800 0x4>;
+ reg-names = "phy_ctrl", "pmu1";
+};
diff --git a/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts b/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts
index 65660324005c..f71159987cac 100644
--- a/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts
+++ b/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts
@@ -47,12 +47,27 @@
/ {
model = "Allwinner GA10H Quad Core Tablet (v1.1)";
compatible = "allwinner,ga10h-v1.1", "allwinner,sun8i-a33";
+
+ aliases {
+ /* Make u-boot set mac-address for rtl8703as (no eeprom) */
+ ethernet0 = &rtl8703as;
+ };
};
&ehci0 {
status = "okay";
};
+&touchscreen {
+ reg = <0x40>;
+ compatible = "silead,gsl3675";
+ firmware-name = "gsl3675-ga10h.fw";
+ touchscreen-size-x = <1630>;
+ touchscreen-size-y = <990>;
+ touchscreen-inverted-y;
+ status = "okay";
+};
+
&lradc {
button@600 {
label = "Back";
@@ -62,6 +77,19 @@
};
};
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_a>;
+ vmmc-supply = <&reg_dldo1>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ rtl8703as: sdio_wifi@1 {
+ reg = <1>;
+ };
+};
+
&ohci0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-a33-inet-d978-rev2.dts b/arch/arm/boot/dts/sun8i-a33-inet-d978-rev2.dts
new file mode 100644
index 000000000000..fb4665576dff
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-a33-inet-d978-rev2.dts
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 Hans de Goede <hdegoede@redhat.com>
+ * Copyright 2016 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-a33.dtsi"
+#include "sun8i-reference-design-tablet.dtsi"
+
+/ {
+ model = "INet-D978 Rev 02";
+ compatible = "primux,inet-d978-rev2", "allwinner,sun8i-a33";
+
+ aliases {
+ serial0 = &uart1;
+ };
+
+ chosen {
+ /* Delete debug UART as serial0 is the UART for bluetooth */
+ /delete-property/stdout-path;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pin_d978>;
+
+ home {
+ label = "d978:blue:home";
+ gpios = <&r_pio 0 5 GPIO_ACTIVE_HIGH>; /* PL5 */
+ };
+ };
+};
+
+&mmc1_pins_a {
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_a>;
+ vmmc-supply = <&reg_dldo1>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ rtl8723bs: sdio_wifi@1 {
+ reg = <1>;
+ };
+};
+
+&r_pio {
+ led_pin_d978: led_pin_d978@0 {
+ allwinner,pins = "PL5";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_20_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&r_uart {
+ status = "disabled";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins_a>,
+ <&uart1_pins_cts_rts_a>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-a33-olinuxino.dts b/arch/arm/boot/dts/sun8i-a33-olinuxino.dts
new file mode 100644
index 000000000000..9ea637e82b2d
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-a33-olinuxino.dts
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2016 - Stefan Mavrodiev <stefan.mavrodiev@gmail.com>
+ * Olimex LTD. <support@olimex.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-a33.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Olimex A33-OLinuXino";
+ compatible = "olimex,a33-olinuxino","allwinner,sun8i-a33";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pin_olinuxino>;
+
+ green {
+ label = "a33-olinuxino:green:usr";
+ gpios = <&pio 1 7 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_olinuxino>;
+ vmmc-supply = <&reg_dcdc1>;
+ bus-width = <4>;
+ cd-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
+ cd-inverted;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&pio {
+ led_pin_olinuxino: led_pins@0 {
+ allwinner,pins = "PB7";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ mmc0_cd_pin_olinuxino: mmc0_cd_pin@0 {
+ allwinner,pins = "PB4";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ usb0_id_detect_pin: usb0_id_detect_pin@0 {
+ allwinner,pins = "PB3";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&r_rsb {
+ status = "okay";
+
+ axp22x: pmic@3a3 {
+ compatible = "x-powers,axp223";
+ reg = <0x3a3>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ eldoin-supply = <&reg_dcdc1>;
+ x-powers,drive-vbus-en;
+ };
+};
+
+#include "axp22x.dtsi"
+
+&reg_aldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-io";
+};
+
+&reg_aldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <2350000>;
+ regulator-max-microvolt = <2650000>;
+ regulator-name = "vdd-dll";
+};
+
+&reg_aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-avcc";
+};
+
+&reg_dc1sw {
+ regulator-name = "vcc-lcd";
+};
+
+&reg_dc5ldo {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-cpus";
+};
+
+&reg_dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-3v3";
+};
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-sys";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vcc-dram";
+};
+
+&reg_drivevbus {
+ regulator-name = "usb0-vbus";
+ status = "okay";
+};
+
+&reg_rtc_ldo {
+ regulator-name = "vcc-rtc";
+};
+
+&simplefb_lcd {
+ vcc-lcd-supply = <&reg_dc1sw>;
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins_b>;
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
+&usbphy {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_id_detect_pin>;
+ usb0_id_det-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */
+ usb0_vbus_power-supply = <&usb_power_supply>;
+ usb0_vbus-supply = <&reg_drivevbus>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 001d8402ca18..fd1e1cddd4a8 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -59,107 +59,179 @@
};
};
+ de: display-engine {
+ compatible = "allwinner,sun8i-a33-display-engine";
+ allwinner,pipelines = <&fe0>;
+ status = "disabled";
+ };
+
memory {
reg = <0x40000000 0x80000000>;
};
- clocks {
- /* Dummy clock for pll11 (DDR1) until actually implemented */
- pll11: pll11_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <0>;
- clock-output-names = "pll11";
- };
+ soc@01c00000 {
+ tcon0: lcd-controller@01c0c000 {
+ compatible = "allwinner,sun8i-a33-tcon";
+ reg = <0x01c0c000 0x1000>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_LCD>,
+ <&ccu CLK_LCD_CH0>;
+ clock-names = "ahb",
+ "tcon-ch0";
+ clock-output-names = "tcon-pixel-clock";
+ resets = <&ccu RST_BUS_LCD>;
+ reset-names = "lcd";
+ status = "disabled";
- ahb1_gates: clk@01c20060 {
- #clock-cells = <1>;
- compatible = "allwinner,sun8i-a33-ahb1-gates-clk";
- reg = <0x01c20060 0x8>;
- clocks = <&ahb1>;
- clock-indices = <1>, <5>,
- <6>, <8>, <9>,
- <10>, <13>, <14>,
- <19>, <20>,
- <21>, <24>, <26>,
- <29>, <32>, <36>,
- <40>, <44>, <46>,
- <52>, <53>,
- <54>, <57>,
- <58>;
- clock-output-names = "ahb1_mipidsi", "ahb1_ss",
- "ahb1_dma","ahb1_mmc0", "ahb1_mmc1",
- "ahb1_mmc2", "ahb1_nand", "ahb1_sdram",
- "ahb1_hstimer", "ahb1_spi0",
- "ahb1_spi1", "ahb1_otg", "ahb1_ehci",
- "ahb1_ohci", "ahb1_ve", "ahb1_lcd",
- "ahb1_csi", "ahb1_be", "ahb1_fe",
- "ahb1_gpu", "ahb1_msgbox",
- "ahb1_spinlock", "ahb1_drc",
- "ahb1_sat";
- };
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- ss_clk: clk@01c2009c {
- #clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-mod0-clk";
- reg = <0x01c2009c 0x4>;
- clocks = <&osc24M>, <&pll6 0>;
- clock-output-names = "ss";
- };
+ tcon0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
- mbus_clk: clk@01c2015c {
- #clock-cells = <0>;
- compatible = "allwinner,sun8i-a23-mbus-clk";
- reg = <0x01c2015c 0x4>;
- clocks = <&osc24M>, <&pll6 1>, <&pll5>, <&pll11>;
- clock-output-names = "mbus";
+ tcon0_in_drc0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&drc0_out_tcon0>;
+ };
+ };
+
+ tcon0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ };
};
- };
- soc@01c00000 {
crypto: crypto-engine@01c15000 {
compatible = "allwinner,sun4i-a10-crypto";
reg = <0x01c15000 0x1000>;
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ahb1_gates 5>, <&ss_clk>;
+ clocks = <&ccu CLK_BUS_SS>, <&ccu CLK_SS>;
clock-names = "ahb", "mod";
- resets = <&ahb1_rst 5>;
+ resets = <&ccu RST_BUS_SS>;
reset-names = "ahb";
};
- usb_otg: usb@01c19000 {
- compatible = "allwinner,sun8i-a33-musb";
- reg = <0x01c19000 0x0400>;
- clocks = <&ahb1_gates 24>;
- resets = <&ahb1_rst 24>;
- interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "mc";
- phys = <&usbphy 0>;
- phy-names = "usb";
- extcon = <&usbphy 0>;
+ fe0: display-frontend@01e00000 {
+ compatible = "allwinner,sun8i-a33-display-frontend";
+ reg = <0x01e00000 0x20000>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_DE_FE>, <&ccu CLK_DE_FE>,
+ <&ccu CLK_DRAM_DE_FE>;
+ clock-names = "ahb", "mod",
+ "ram";
+ resets = <&ccu RST_BUS_DE_FE>;
status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fe0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ fe0_out_be0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&be0_in_fe0>;
+ };
+ };
+ };
};
- usbphy: phy@01c19400 {
- compatible = "allwinner,sun8i-a33-usb-phy";
- reg = <0x01c19400 0x14>,
- <0x01c1a800 0x4>;
- reg-names = "phy_ctrl",
- "pmu1";
- clocks = <&usb_clk 8>,
- <&usb_clk 9>;
- clock-names = "usb0_phy",
- "usb1_phy";
- resets = <&usb_clk 0>,
- <&usb_clk 1>;
- reset-names = "usb0_reset",
- "usb1_reset";
- status = "disabled";
- #phy-cells = <1>;
+ be0: display-backend@01e60000 {
+ compatible = "allwinner,sun8i-a33-display-backend";
+ reg = <0x01e60000 0x10000>, <0x01e80000 0x1000>;
+ reg-names = "be", "sat";
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_DE_BE>, <&ccu CLK_DE_BE>,
+ <&ccu CLK_DRAM_DE_BE>, <&ccu CLK_BUS_SAT>;
+ clock-names = "ahb", "mod",
+ "ram", "sat";
+ resets = <&ccu RST_BUS_DE_BE>, <&ccu RST_BUS_SAT>;
+ reset-names = "be", "sat";
+ assigned-clocks = <&ccu CLK_DE_BE>;
+ assigned-clock-rates = <300000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ be0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ be0_in_fe0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&fe0_out_be0>;
+ };
+ };
+
+ be0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ be0_out_drc0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&drc0_in_be0>;
+ };
+ };
+ };
+ };
+
+ drc0: drc@01e70000 {
+ compatible = "allwinner,sun8i-a33-drc";
+ reg = <0x01e70000 0x10000>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_DRC>, <&ccu CLK_DRC>,
+ <&ccu CLK_DRAM_DRC>;
+ clock-names = "ahb", "mod", "ram";
+ resets = <&ccu RST_BUS_DRC>;
+
+ assigned-clocks = <&ccu CLK_DRC>;
+ assigned-clock-rates = <300000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ drc0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ drc0_in_be0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&be0_out_drc0>;
+ };
+ };
+
+ drc0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ drc0_out_tcon0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&tcon0_in_drc0>;
+ };
+ };
+ };
};
};
};
+&ccu {
+ compatible = "allwinner,sun8i-a33-ccu";
+};
+
&pio {
compatible = "allwinner,sun8i-a33-pinctrl";
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
@@ -173,3 +245,13 @@
};
};
+
+&usb_otg {
+ compatible = "allwinner,sun8i-a33-musb";
+};
+
+&usbphy {
+ compatible = "allwinner,sun8i-a33-usb-phy";
+ reg = <0x01c19400 0x14>, <0x01c1a800 0x4>;
+ reg-names = "phy_ctrl", "pmu1";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
index f3b1d5f6dbd2..06fddaae8edd 100644
--- a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
@@ -185,7 +185,7 @@
&uart1 {
pinctrl-names = "default";
- pinctrl-0 = <&uart1_pins_a>;
+ pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
new file mode 100644
index 000000000000..3d64cafc1e90
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2016 James Pettigrew <james@innovum.com.au>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-h3.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+ model = "FriendlyARM NanoPi NEO";
+ compatible = "friendlyarm,nanopi-neo", "allwinner,sun8i-h3";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&leds_opc>, <&leds_r_opc>;
+
+ pwr {
+ label = "nanopi:green:pwr";
+ gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */
+ default-state = "on";
+ };
+
+ status {
+ label = "nanopi:blue:status";
+ gpios = <&pio 0 10 GPIO_ACTIVE_HIGH>; /* PA10 */
+ };
+ };
+};
+
+&ehci3 {
+ status = "okay";
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */
+ cd-inverted;
+ status = "okay";
+};
+
+&ohci3 {
+ status = "okay";
+};
+
+&pio {
+ leds_opc: led-pins {
+ allwinner,pins = "PA10";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&r_pio {
+ leds_r_opc: led-pins {
+ allwinner,pins = "PL10";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins_a>;
+ status = "okay";
+};
+
+&usbphy {
+ /* USB VBUS is always on */
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
index f93f5d1695c4..e5bcaba3e87f 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
@@ -54,6 +54,8 @@
aliases {
serial0 = &uart0;
+ /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
+ ethernet1 = &rtl8189;
};
chosen {
@@ -131,6 +133,14 @@
bus-width = <4>;
non-removable;
status = "okay";
+
+ /*
+ * Explicitly define the sdio device, so that we can add an ethernet
+ * alias for it (which e.g. makes u-boot set a mac-address).
+ */
+ rtl8189: sdio_wifi@1 {
+ reg = <1>;
+ };
};
&pio {
@@ -176,6 +186,24 @@
status = "okay";
};
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
+ status = "disabled";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+ status = "disabled";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+ status = "disabled";
+};
+
&usb1_vbus_pin_a {
allwinner,pins = "PG13";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts
new file mode 100644
index 000000000000..1550fee1ec68
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-h3.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+ model = "Xunlong Orange Pi Lite";
+ compatible = "xunlong,orangepi-lite", "allwinner,sun8i-h3";
+
+ aliases {
+ /* The H3 emac is not used so the wifi is ethernet0 */
+ ethernet0 = &rtl8189ftv;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&leds_opc>, <&leds_r_opc>;
+
+ pwr_led {
+ label = "orangepi:green:pwr";
+ gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+
+ status_led {
+ label = "orangepi:red:status";
+ gpios = <&pio 0 15 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ r_gpio_keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sw_r_opc>;
+
+ sw4 {
+ label = "sw4";
+ linux,code = <BTN_0>;
+ gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&ir {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_pins_a>;
+ status = "okay";
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */
+ cd-inverted;
+ status = "okay";
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_a>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ /*
+ * Explicitly define the sdio device, so that we can add an ethernet
+ * alias for it (which e.g. makes u-boot set a mac-address).
+ */
+ rtl8189ftv: sdio_wifi@1 {
+ reg = <1>;
+ };
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&pio {
+ leds_opc: led_pins@0 {
+ allwinner,pins = "PA15";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&r_pio {
+ leds_r_opc: led_pins@0 {
+ allwinner,pins = "PL10";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ sw_r_opc: key_pins@0 {
+ allwinner,pins = "PL3";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins_a>;
+ status = "okay";
+};
+
+&usbphy {
+ /* USB VBUS is always on */
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
index 0adf932fd923..5c9b5bfa5c21 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
@@ -139,6 +139,24 @@
status = "okay";
};
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
+ status = "disabled";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+ status = "disabled";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+ status = "disabled";
+};
+
&usbphy {
/* USB VBUS is always on */
status = "okay";
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
new file mode 100644
index 000000000000..851fd2c2cc8c
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* The Orange Pi PC Plus is an extended version of the regular PC */
+#include "sun8i-h3-orangepi-pc.dts"
+
+/ {
+ model = "Xunlong Orange Pi PC Plus";
+ compatible = "xunlong,orangepi-pc-plus", "allwinner,sun8i-h3";
+
+ aliases {
+ /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
+ ethernet1 = &rtl8189ftv;
+ };
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_a>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ /*
+ * Explicitly define the sdio device, so that we can add an ethernet
+ * alias for it (which e.g. makes u-boot set a mac-address).
+ */
+ rtl8189ftv: sdio_wifi@1 {
+ reg = <1>;
+ };
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_8bit_pins>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <8>;
+ non-removable;
+ cap-mmc-hw-reset;
+ status = "okay";
+};
+
+&mmc2_8bit_pins {
+ /* Increase drive strength for DDR modes */
+ allwinner,drive = <SUN4I_PINCTRL_40_MA>;
+ /* eMMC is missing pull-ups */
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
index daf50b9a6657..3ec971285aa3 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
@@ -161,6 +161,24 @@
status = "okay";
};
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
+ status = "disabled";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+ status = "disabled";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+ status = "disabled";
+};
+
&usbphy {
/* USB VBUS is always on */
status = "okay";
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
index b0cb41787e09..bb585918cf54 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
@@ -44,7 +44,7 @@
#include "sun8i-h3-orangepi-2.dts"
/ {
- model = "Xunlong Orange Pi Plus";
+ model = "Xunlong Orange Pi Plus / Plus 2";
compatible = "xunlong,orangepi-plus", "allwinner,sun8i-h3";
reg_usb3_vbus: usb3-vbus {
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
new file mode 100644
index 000000000000..5851a47a3089
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * The Orange Pi Plus 2E is an extended version of the Orange Pi PC Plus,
+ * with 2G RAM and an external gbit ethernet phy.
+ */
+
+#include "sun8i-h3-orangepi-pc-plus.dts"
+
+/ {
+ model = "Xunlong Orange Pi Plus 2E";
+ compatible = "xunlong,orangepi-plus2e", "allwinner,sun8i-h3";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index fdf9fdbda267..75a865406d3e 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -150,7 +150,7 @@
};
mmc0: mmc@01c0f000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c0f000 0x1000>;
clocks = <&ccu CLK_BUS_MMC0>,
<&ccu CLK_MMC0>,
@@ -169,7 +169,7 @@
};
mmc1: mmc@01c10000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c10000 0x1000>;
clocks = <&ccu CLK_BUS_MMC1>,
<&ccu CLK_MMC1>,
@@ -188,7 +188,7 @@
};
mmc2: mmc@01c11000 {
- compatible = "allwinner,sun5i-a13-mmc";
+ compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c11000 0x1000>;
clocks = <&ccu CLK_BUS_MMC2>,
<&ccu CLK_MMC2>,
@@ -327,6 +327,27 @@
interrupt-controller;
#interrupt-cells = <3>;
+ i2c0_pins: i2c0 {
+ allwinner,pins = "PA11", "PA12";
+ allwinner,function = "i2c0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ i2c1_pins: i2c1 {
+ allwinner,pins = "PA18", "PA19";
+ allwinner,function = "i2c1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ i2c2_pins: i2c2 {
+ allwinner,pins = "PE12", "PE13";
+ allwinner,function = "i2c2";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
mmc0_pins_a: mmc0@0 {
allwinner,pins = "PF0", "PF1", "PF2", "PF3",
"PF4", "PF5";
@@ -367,12 +388,33 @@
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart1_pins_a: uart1@0 {
- allwinner,pins = "PG6", "PG7", "PG8", "PG9";
+ uart1_pins: uart1 {
+ allwinner,pins = "PG6", "PG7";
+ allwinner,function = "uart1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ uart1_rts_cts_pins: uart1_rts_cts {
+ allwinner,pins = "PG8", "PG9";
allwinner,function = "uart1";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
+
+ uart2_pins: uart2 {
+ allwinner,pins = "PA0", "PA1";
+ allwinner,function = "uart2";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ uart3_pins: uart3 {
+ allwinner,pins = "PG13", "PG14";
+ allwinner,function = "uart3";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
};
timer@01c20c00 {
@@ -389,6 +431,14 @@
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
};
+ pwm: pwm@01c21400 {
+ compatible = "allwinner,sun8i-h3-pwm";
+ reg = <0x01c21400 0x8>;
+ clocks = <&osc24M>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
uart0: serial@01c28000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28000 0x400>;
@@ -441,6 +491,45 @@
status = "disabled";
};
+ i2c0: i2c@01c2ac00 {
+ compatible = "allwinner,sun6i-a31-i2c";
+ reg = <0x01c2ac00 0x400>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C0>;
+ resets = <&ccu RST_BUS_I2C0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c1: i2c@01c2b000 {
+ compatible = "allwinner,sun6i-a31-i2c";
+ reg = <0x01c2b000 0x400>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C1>;
+ resets = <&ccu RST_BUS_I2C1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c2: i2c@01c2b400 {
+ compatible = "allwinner,sun6i-a31-i2c";
+ reg = <0x01c2b000 0x400>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C2>;
+ resets = <&ccu RST_BUS_I2C2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
gic: interrupt-controller@01c81000 {
compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
reg = <0x01c81000 0x1000>,
diff --git a/arch/arm/boot/dts/sun8i-q8-common.dtsi b/arch/arm/boot/dts/sun8i-q8-common.dtsi
index 60fa9585022b..29f837a47771 100644
--- a/arch/arm/boot/dts/sun8i-q8-common.dtsi
+++ b/arch/arm/boot/dts/sun8i-q8-common.dtsi
@@ -42,10 +42,59 @@
#include "sunxi-reference-design-tablet.dtsi"
#include "sun8i-reference-design-tablet.dtsi"
+/ {
+ aliases {
+ serial0 = &r_uart;
+ /* Make u-boot set mac-address for wifi without an eeprom */
+ ethernet0 = &sdio_wifi;
+ };
+
+ wifi_pwrseq: wifi_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ /*
+ * Q8 boards use various PL# pins as wifi-en. On other boards
+ * these may be connected to a wifi module output pin. To avoid
+ * short-circuits we configure these as inputs with pull-ups via
+ * pinctrl, instead of listing them as active-low reset-gpios.
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_pwrseq_pin_q8>;
+ /* The esp8089 needs 200 ms after driving wifi-en high */
+ post-power-on-delay-ms = <200>;
+ };
+};
+
&ehci0 {
status = "okay";
};
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_a>;
+ vmmc-supply = <&reg_dldo1>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ sdio_wifi: sdio_wifi@1 {
+ reg = <1>;
+ };
+};
+
+&mmc1_pins_a {
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+};
+
+&r_pio {
+ wifi_pwrseq_pin_q8: wifi_pwrseq_pin@0 {
+ allwinner,pins = "PL6", "PL7", "PL11";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+};
+
&usbphy {
usb1_vbus-supply = <&reg_dldo1>;
};
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
index 9d9036140511..08cd00143635 100644
--- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
@@ -63,6 +63,25 @@
};
};
+&i2c0 {
+ /*
+ * The gsl1680 is rated at 400KHz and it will not work reliable at
+ * 100KHz, this has been confirmed on multiple different q8 tablets.
+ * The gsl1680 is the only device on this bus.
+ */
+ clock-frequency = <400000>;
+
+ touchscreen: touchscreen@0 {
+ interrupt-parent = <&pio>;
+ interrupts = <1 5 IRQ_TYPE_EDGE_FALLING>; /* PB5 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_power_pin>;
+ power-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
+ /* Tablet dts must provide reg and compatible */
+ status = "disabled";
+ };
+};
+
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
@@ -88,6 +107,13 @@
allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
};
+ ts_power_pin: ts_power_pin@0 {
+ allwinner,pins = "PH1";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
usb0_id_detect_pin: usb0_id_detect_pin@0 {
allwinner,pins = "PH8";
allwinner,function = "gpio_in";
diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
index 1526b41c70f1..439847acd41e 100644
--- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
+++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
@@ -103,6 +103,11 @@
allwinner,drive = <SUN4I_PINCTRL_40_MA>;
};
+&osc32k {
+ /* osc32k input is from AC100 */
+ clocks = <&ac100_rtc 0>;
+};
+
&pio {
led_pins_cubieboard4: led-pins@0 {
allwinner,pins = "PH6", "PH17";
@@ -248,6 +253,146 @@
reg_rtc_ldo: rtc_ldo {
regulator-name = "vcc-rtc-vdd1v8-io";
};
+
+ sw {
+ /* unused */
+ };
+ };
+ };
+
+ axp806: pmic@745 {
+ compatible = "x-powers,axp806";
+ reg = <0x745>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ bldoin-supply = <&reg_dcdce>;
+
+ regulators {
+ reg_s_aldo1: aldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "avcc";
+ };
+
+ aldo2 {
+ /*
+ * unused, but use a different name to
+ * avoid name clash with axp809's aldo's
+ */
+ regulator-name = "s_aldo2";
+ };
+
+ aldo3 {
+ /*
+ * unused, but use a different name to
+ * avoid name clash with axp809's aldo's
+ */
+ regulator-name = "s_aldo3";
+ };
+
+ reg_bldo1: bldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-name = "vcc18-efuse-adc-display-csi";
+ };
+
+ reg_bldo2: bldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-name =
+ "vdd18-drampll-vcc18-pll-cpvdd";
+ };
+
+ bldo3 {
+ /* unused */
+ };
+
+ reg_bldo4: bldo4 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-name = "vcc12-hsic";
+ };
+
+ reg_cldo1: cldo1 {
+ /*
+ * This was 3V in the original design, but
+ * 3.3V is the recommended supply voltage
+ * for the Ethernet PHY.
+ */
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-gmac-phy";
+ };
+
+ reg_cldo2: cldo2 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-name = "afvcc-cam";
+ };
+
+ reg_cldo3: cldo3 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-io-wifi-codec-io2";
+ };
+
+ reg_dcdca: dcdca {
+ regulator-always-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-cpub";
+ };
+
+ reg_dcdcd: dcdcd {
+ regulator-always-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-vpu";
+ };
+
+ reg_dcdce: dcdce {
+ regulator-always-on;
+ regulator-min-microvolt = <2100000>;
+ regulator-max-microvolt = <2100000>;
+ regulator-name = "vcc-bldo-codec-ldoin";
+ };
+
+ sw {
+ /*
+ * unused, but use a different name to
+ * avoid name clash with axp809's sw
+ */
+ regulator-name = "s_sw";
+ };
+ };
+ };
+
+ ac100: codec@e89 {
+ compatible = "x-powers,ac100";
+ reg = <0xe89>;
+
+ ac100_codec: codec {
+ compatible = "x-powers,ac100-codec";
+ interrupt-parent = <&r_pio>;
+ interrupts = <0 9 IRQ_TYPE_LEVEL_LOW>; /* PL9 */
+ #clock-cells = <0>;
+ clock-output-names = "4M_adda";
+ };
+
+ ac100_rtc: rtc {
+ compatible = "x-powers,ac100-rtc";
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&ac100_codec>;
+ #clock-cells = <1>;
+ clock-output-names = "cko1_rtc",
+ "cko2_rtc",
+ "cko3_rtc";
};
};
};
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
index 7fd22e888602..ceb6ef15d669 100644
--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
+++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
@@ -112,7 +112,8 @@
};
&ehci1 {
- status = "okay";
+ /* Enable if HSIC peripheral is connected */
+ status = "disabled";
};
&ehci2 {
@@ -152,6 +153,11 @@
status = "okay";
};
+&osc32k {
+ /* osc32k input is from AC100 */
+ clocks = <&ac100_rtc 0>;
+};
+
&pio {
led_pins_optimus: led-pins@0 {
allwinner,pins = "PH0", "PH1";
@@ -320,6 +326,146 @@
reg_rtc_ldo: rtc_ldo {
regulator-name = "vcc-rtc-vdd1v8-io";
};
+
+ sw {
+ /* unused */
+ };
+ };
+ };
+
+ axp806: pmic@745 {
+ compatible = "x-powers,axp806";
+ reg = <0x745>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ bldoin-supply = <&reg_dcdce>;
+
+ regulators {
+ reg_s_aldo1: aldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "avcc";
+ };
+
+ aldo2 {
+ /*
+ * unused, but use a different name to
+ * avoid name clash with axp809's aldo's
+ */
+ regulator-name = "s_aldo2";
+ };
+
+ aldo3 {
+ /*
+ * unused, but use a different name to
+ * avoid name clash with axp809's aldo's
+ */
+ regulator-name = "s_aldo3";
+ };
+
+ reg_bldo1: bldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-name = "vcc18-efuse-adc-display-csi";
+ };
+
+ reg_bldo2: bldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-name =
+ "vdd18-drampll-vcc18-pll-cpvdd";
+ };
+
+ bldo3 {
+ /* unused */
+ };
+
+ reg_bldo4: bldo4 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-name = "vcc12-hsic";
+ };
+
+ reg_cldo1: cldo1 {
+ /*
+ * This was 3V in the original design, but
+ * 3.3V is the recommended supply voltage
+ * for the Ethernet PHY.
+ */
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-gmac-phy";
+ };
+
+ reg_cldo2: cldo2 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-name = "afvcc-cam";
+ };
+
+ reg_cldo3: cldo3 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-io-wifi-codec-io2";
+ };
+
+ reg_dcdca: dcdca {
+ regulator-always-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-cpub";
+ };
+
+ reg_dcdcd: dcdcd {
+ regulator-always-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-vpu";
+ };
+
+ reg_dcdce: dcdce {
+ regulator-always-on;
+ regulator-min-microvolt = <2100000>;
+ regulator-max-microvolt = <2100000>;
+ regulator-name = "vcc-bldo-codec-ldoin";
+ };
+
+ sw {
+ /*
+ * unused, but use a different name to
+ * avoid name clash with axp809's sw
+ */
+ regulator-name = "s_sw";
+ };
+ };
+ };
+
+ ac100: codec@e89 {
+ compatible = "x-powers,ac100";
+ reg = <0xe89>;
+
+ ac100_codec: codec {
+ compatible = "x-powers,ac100-codec";
+ interrupt-parent = <&r_pio>;
+ interrupts = <0 9 IRQ_TYPE_LEVEL_LOW>; /* PL9 */
+ #clock-cells = <0>;
+ clock-output-names = "4M_adda";
+ };
+
+ ac100_rtc: rtc {
+ compatible = "x-powers,ac100-rtc";
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&ac100_codec>;
+ #clock-cells = <1>;
+ clock-output-names = "cko1_rtc",
+ "cko2_rtc",
+ "cko3_rtc";
};
};
};
@@ -338,7 +484,9 @@
};
&usbphy2 {
- status = "okay";
+ phy-supply = <&reg_bldo4>;
+ /* Enable if HSIC peripheral is connected */
+ status = "disabled";
};
&usbphy3 {
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index f68b3242b33a..3c5214cbe4e6 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -148,15 +148,14 @@
/*
* The 32k clock is from an external source, normally the
- * AC100 codec/RTC chip. This clock is by default enabled
- * and clocked at 32768 Hz, from the oscillator connected
- * to the AC100. It is configurable, but no such driver or
- * bindings exist yet.
+ * AC100 codec/RTC chip. This serves as a placeholder for
+ * board dts files to specify the source.
*/
osc32k: osc32k_clk {
#clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <32768>;
+ compatible = "fixed-factor-clock";
+ clock-div = <1>;
+ clock-mult = <1>;
clock-output-names = "osc32k";
};
@@ -899,8 +898,7 @@
resets = <&apbs_rst 0>;
gpio-controller;
interrupt-controller;
- #address-cells = <1>;
- #size-cells = <0>;
+ #interrupt-cells = <3>;
#gpio-cells = <3>;
r_ir_pins: r_ir {
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index cb9393a53422..8932ea3afd5f 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -672,7 +672,7 @@
};
usb@7d000000 {
- compatible = "nvidia,tegra30-ehci", "usb-ehci";
+ compatible = "nvidia,tegra114-ehci", "nvidia,tegra30-ehci", "usb-ehci";
reg = <0x7d000000 0x4000>;
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
phy_type = "utmi";
@@ -684,7 +684,7 @@
};
phy1: usb-phy@7d000000 {
- compatible = "nvidia,tegra30-usb-phy";
+ compatible = "nvidia,tegra114-usb-phy", "nvidia,tegra30-usb-phy";
reg = <0x7d000000 0x4000 0x7d000000 0x4000>;
phy_type = "utmi";
clocks = <&tegra_car TEGRA114_CLK_USBD>,
@@ -708,7 +708,7 @@
};
usb@7d008000 {
- compatible = "nvidia,tegra30-ehci", "usb-ehci";
+ compatible = "nvidia,tegra114-ehci", "nvidia,tegra30-ehci", "usb-ehci";
reg = <0x7d008000 0x4000>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
phy_type = "utmi";
@@ -720,7 +720,7 @@
};
phy3: usb-phy@7d008000 {
- compatible = "nvidia,tegra30-usb-phy";
+ compatible = "nvidia,tegra114-usb-phy", "nvidia,tegra30-usb-phy";
reg = <0x7d008000 0x4000 0x7d000000 0x4000>;
phy_type = "utmi";
clocks = <&tegra_car TEGRA114_CLK_USB3>,
diff --git a/arch/arm/boot/dts/uniphier-common32.dtsi b/arch/arm/boot/dts/uniphier-common32.dtsi
index 03f60ec340b5..8c8a85176b64 100644
--- a/arch/arm/boot/dts/uniphier-common32.dtsi
+++ b/arch/arm/boot/dts/uniphier-common32.dtsi
@@ -1,7 +1,8 @@
/*
* Device Tree Source commonly used by UniPhier ARM SoCs
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -45,6 +46,11 @@
/include/ "skeleton.dtsi"
/ {
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
clocks {
refclk: ref {
#clock-cells = <0>;
@@ -66,7 +72,7 @@
interrupts = <0 33 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart0>;
- clocks = <&uart_clk>;
+ clocks = <&peri_clk 0>;
};
serial1: serial@54006900 {
@@ -76,7 +82,7 @@
interrupts = <0 35 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- clocks = <&uart_clk>;
+ clocks = <&peri_clk 1>;
};
serial2: serial@54006a00 {
@@ -86,7 +92,7 @@
interrupts = <0 37 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
- clocks = <&uart_clk>;
+ clocks = <&peri_clk 2>;
};
serial3: serial@54006b00 {
@@ -96,7 +102,7 @@
interrupts = <0 177 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- clocks = <&uart_clk>;
+ clocks = <&peri_clk 3>;
};
system_bus: system-bus@58c00000 {
@@ -114,6 +120,34 @@
reg = <0x59801000 0x400>;
};
+ mioctrl@59810000 {
+ compatible = "socionext,uniphier-mioctrl",
+ "simple-mfd", "syscon";
+ reg = <0x59810000 0x800>;
+
+ mio_clk: clock {
+ #clock-cells = <1>;
+ };
+
+ mio_rst: reset {
+ #reset-cells = <1>;
+ };
+ };
+
+ perictrl@59820000 {
+ compatible = "socionext,uniphier-perictrl",
+ "simple-mfd", "syscon";
+ reg = <0x59820000 0x200>;
+
+ peri_clk: clock {
+ #clock-cells = <1>;
+ };
+
+ peri_rst: reset {
+ #reset-cells = <1>;
+ };
+ };
+
timer@60000200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x60000200 0x20>;
@@ -137,11 +171,26 @@
};
soc-glue@5f800000 {
- compatible = "simple-mfd", "syscon";
+ compatible = "socionext,uniphier-soc-glue",
+ "simple-mfd", "syscon";
reg = <0x5f800000 0x2000>;
pinctrl: pinctrl {
- /* specify compatible in each SoC DTSI */
+ /* specify compatible in each SoC DTSI */
+ };
+ };
+
+ sysctrl@61840000 {
+ compatible = "socionext,uniphier-sysctrl",
+ "simple-mfd", "syscon";
+ reg = <0x61840000 0x4000>;
+
+ sys_clk: clock {
+ #clock-cells = <1>;
+ };
+
+ sys_rst: reset {
+ #reset-cells = <1>;
};
};
};
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld4-ref.dts b/arch/arm/boot/dts/uniphier-ld4-ref.dts
index ec94b7a661f2..110031bc0e7e 100644
--- a/arch/arm/boot/dts/uniphier-ph1-ld4-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ld4-ref.dts
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-LD4 Reference Board
+ * Device Tree Source for UniPhier LD4 Reference Board
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -43,13 +44,13 @@
*/
/dts-v1/;
-/include/ "uniphier-ph1-ld4.dtsi"
+/include/ "uniphier-ld4.dtsi"
/include/ "uniphier-ref-daughter.dtsi"
/include/ "uniphier-support-card.dtsi"
/ {
- model = "UniPhier PH1-LD4 Reference Board";
- compatible = "socionext,ph1-ld4-ref", "socionext,ph1-ld4";
+ model = "UniPhier LD4 Reference Board";
+ compatible = "socionext,uniphier-ld4-ref", "socionext,uniphier-ld4";
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld4.dtsi b/arch/arm/boot/dts/uniphier-ld4.dtsi
index debad7ffef05..95f342c9d9c1 100644
--- a/arch/arm/boot/dts/uniphier-ph1-ld4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ld4.dtsi
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-LD4 SoC
+ * Device Tree Source for UniPhier LD4 SoC
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -45,7 +46,7 @@
/include/ "uniphier-common32.dtsi"
/ {
- compatible = "socionext,ph1-ld4";
+ compatible = "socionext,uniphier-ld4";
cpus {
#address-cells = <1>;
@@ -55,6 +56,7 @@
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
};
@@ -65,18 +67,6 @@
compatible = "fixed-clock";
clock-frequency = <50000000>;
};
-
- uart_clk: uart_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <36864000>;
- };
-
- iobus_clk: iobus_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <100000000>;
- };
};
};
@@ -101,7 +91,7 @@
interrupts = <0 41 1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0>;
- clocks = <&iobus_clk>;
+ clocks = <&peri_clk 4>;
clock-frequency = <100000>;
};
@@ -114,7 +104,7 @@
interrupts = <0 42 1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
- clocks = <&iobus_clk>;
+ clocks = <&peri_clk 5>;
clock-frequency = <100000>;
};
@@ -127,7 +117,7 @@
interrupts = <0 43 1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
- clocks = <&iobus_clk>;
+ clocks = <&peri_clk 6>;
clock-frequency = <400000>;
};
@@ -140,7 +130,7 @@
interrupts = <0 44 1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c3>;
- clocks = <&iobus_clk>;
+ clocks = <&peri_clk 7>;
clock-frequency = <100000>;
};
@@ -151,6 +141,8 @@
interrupts = <0 80 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
+ clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+ resets = <&mio_rst 7>, <&mio_rst 8>, <&mio_rst 12>, <&sys_rst 8>;
};
usb1: usb@5a810100 {
@@ -160,6 +152,8 @@
interrupts = <0 81 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>;
+ clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+ resets = <&mio_rst 7>, <&mio_rst 9>, <&mio_rst 13>, <&sys_rst 8>;
};
usb2: usb@5a820100 {
@@ -169,6 +163,8 @@
interrupts = <0 82 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb2>;
+ clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+ resets = <&mio_rst 7>, <&mio_rst 10>, <&mio_rst 14>, <&sys_rst 8>;
};
};
@@ -181,6 +177,31 @@
interrupts = <0 29 4>;
};
+&mio_clk {
+ compatible = "socionext,uniphier-ld4-mio-clock";
+};
+
+&mio_rst {
+ compatible = "socionext,uniphier-ld4-mio-reset";
+ resets = <&sys_rst 7>;
+};
+
+&peri_clk {
+ compatible = "socionext,uniphier-ld4-peri-clock";
+};
+
+&peri_rst {
+ compatible = "socionext,uniphier-ld4-peri-reset";
+};
+
&pinctrl {
compatible = "socionext,uniphier-ld4-pinctrl";
};
+
+&sys_clk {
+ compatible = "socionext,uniphier-ld4-clock";
+};
+
+&sys_rst {
+ compatible = "socionext,uniphier-ld4-reset";
+};
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts b/arch/arm/boot/dts/uniphier-ld6b-ref.dts
index b8134c6e094b..c05d631dcf02 100644
--- a/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ld6b-ref.dts
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-LD6b Reference Board
+ * Device Tree Source for UniPhier LD6b Reference Board
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -43,13 +44,13 @@
*/
/dts-v1/;
-/include/ "uniphier-ph1-ld6b.dtsi"
+/include/ "uniphier-ld6b.dtsi"
/include/ "uniphier-ref-daughter.dtsi"
/include/ "uniphier-support-card.dtsi"
/ {
- model = "UniPhier PH1-LD6b Reference Board";
- compatible = "socionext,ph1-ld6b-ref", "socionext,ph1-ld6b";
+ model = "UniPhier LD6b Reference Board";
+ compatible = "socionext,uniphier-ld6b-ref", "socionext,uniphier-ld6b";
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld6b.dtsi b/arch/arm/boot/dts/uniphier-ld6b.dtsi
index 19c107c66bae..905c77d499eb 100644
--- a/arch/arm/boot/dts/uniphier-ph1-ld6b.dtsi
+++ b/arch/arm/boot/dts/uniphier-ld6b.dtsi
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-LD6b SoC
+ * Device Tree Source for UniPhier LD6b SoC
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -43,14 +44,14 @@
*/
/*
- * PH1-LD6b consists of two silicon dies: D-chip and A-chip.
- * The D-chip (digital chip) is the same as the ProXstream2 die.
- * Reuse the ProXstream2 device tree with some properties overridden.
+ * LD6b consists of two silicon dies: D-chip and A-chip.
+ * The D-chip (digital chip) is the same as the PXs2 die.
+ * Reuse the PXs2 device tree with some properties overridden.
*/
-/include/ "uniphier-proxstream2.dtsi"
+/include/ "uniphier-pxs2.dtsi"
/ {
- compatible = "socionext,ph1-ld6b";
+ compatible = "socionext,uniphier-ld6b";
};
/* UART3 unavailable: the pads are not wired to the package balls */
@@ -59,7 +60,7 @@
};
/*
- * PH1-LD6b and ProXstream2 have completely different packages,
+ * LD6b and PXs2 have completely different packages,
* which makes the pinctrl driver unshareable.
*/
&pinctrl {
diff --git a/arch/arm/boot/dts/uniphier-ph1-pro4-ace.dts b/arch/arm/boot/dts/uniphier-pro4-ace.dts
index d34358632bec..0ab0a40c041e 100644
--- a/arch/arm/boot/dts/uniphier-ph1-pro4-ace.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-ace.dts
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-Pro4 Ace Board
+ * Device Tree Source for UniPhier Pro4 Ace Board
*
- * Copyright (C) 2016 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -43,11 +44,11 @@
*/
/dts-v1/;
-/include/ "uniphier-ph1-pro4.dtsi"
+/include/ "uniphier-pro4.dtsi"
/ {
- model = "UniPhier PH1-Pro4 Ace Board";
- compatible = "socionext,ph1-pro4-ace", "socionext,ph1-pro4";
+ model = "UniPhier Pro4 Ace Board";
+ compatible = "socionext,uniphier-pro4-ace", "socionext,uniphier-pro4";
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/uniphier-ph1-pro4-ref.dts b/arch/arm/boot/dts/uniphier-pro4-ref.dts
index 95f631a3de35..9e92e60d25ce 100644
--- a/arch/arm/boot/dts/uniphier-ph1-pro4-ref.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-ref.dts
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-Pro4 Reference Board
+ * Device Tree Source for UniPhier Pro4 Reference Board
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -43,13 +44,13 @@
*/
/dts-v1/;
-/include/ "uniphier-ph1-pro4.dtsi"
+/include/ "uniphier-pro4.dtsi"
/include/ "uniphier-ref-daughter.dtsi"
/include/ "uniphier-support-card.dtsi"
/ {
- model = "UniPhier PH1-Pro4 Reference Board";
- compatible = "socionext,ph1-pro4-ref", "socionext,ph1-pro4";
+ model = "UniPhier Pro4 Reference Board";
+ compatible = "socionext,uniphier-pro4-ref", "socionext,uniphier-pro4";
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/uniphier-ph1-pro4-sanji.dts b/arch/arm/boot/dts/uniphier-pro4-sanji.dts
index 7c3a1fcc9f3c..dc4ea8832ce2 100644
--- a/arch/arm/boot/dts/uniphier-ph1-pro4-sanji.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-sanji.dts
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-Pro4 Sanji Board
+ * Device Tree Source for UniPhier Pro4 Sanji Board
*
- * Copyright (C) 2016 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -43,11 +44,11 @@
*/
/dts-v1/;
-/include/ "uniphier-ph1-pro4.dtsi"
+/include/ "uniphier-pro4.dtsi"
/ {
- model = "UniPhier PH1-Pro4 Sanji Board";
- compatible = "socionext,ph1-pro4-sanji", "socionext,ph1-pro4";
+ model = "UniPhier Pro4 Sanji Board";
+ compatible = "socionext,uniphier-pro4-sanji", "socionext,uniphier-pro4";
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/uniphier-ph1-pro4.dtsi b/arch/arm/boot/dts/uniphier-pro4.dtsi
index 7b9da0852005..ba700267ad66 100644
--- a/arch/arm/boot/dts/uniphier-ph1-pro4.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro4.dtsi
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-Pro4 SoC
+ * Device Tree Source for UniPhier Pro4 SoC
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -45,17 +46,17 @@
/include/ "uniphier-common32.dtsi"
/ {
- compatible = "socionext,ph1-pro4";
+ compatible = "socionext,uniphier-pro4";
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "socionext,uniphier-smp";
cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
@@ -63,6 +64,7 @@
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
};
@@ -73,18 +75,6 @@
compatible = "fixed-clock";
clock-frequency = <50000000>;
};
-
- uart_clk: uart_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <73728000>;
- };
-
- i2c_clk: i2c_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <50000000>;
- };
};
};
@@ -109,7 +99,7 @@
interrupts = <0 41 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 4>;
clock-frequency = <100000>;
};
@@ -122,7 +112,7 @@
interrupts = <0 42 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 5>;
clock-frequency = <100000>;
};
@@ -135,7 +125,7 @@
interrupts = <0 43 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 6>;
clock-frequency = <100000>;
};
@@ -148,7 +138,7 @@
interrupts = <0 44 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c3>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 7>;
clock-frequency = <100000>;
};
@@ -161,7 +151,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 25 4>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 9>;
clock-frequency = <400000>;
};
@@ -172,7 +162,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 26 4>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 10>;
clock-frequency = <400000>;
};
@@ -183,6 +173,8 @@
interrupts = <0 80 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb2>;
+ clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+ resets = <&mio_rst 7>, <&mio_rst 8>, <&mio_rst 12>, <&sys_rst 8>;
};
usb3: usb@5a810100 {
@@ -192,6 +184,8 @@
interrupts = <0 81 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb3>;
+ clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+ resets = <&mio_rst 7>, <&mio_rst 9>, <&mio_rst 13>, <&sys_rst 8>;
};
};
@@ -199,6 +193,31 @@
clock-frequency = <25000000>;
};
+&mio_clk {
+ compatible = "socionext,uniphier-pro4-mio-clock";
+};
+
+&mio_rst {
+ compatible = "socionext,uniphier-pro4-mio-reset";
+ resets = <&sys_rst 7>;
+};
+
+&peri_clk {
+ compatible = "socionext,uniphier-pro4-peri-clock";
+};
+
+&peri_rst {
+ compatible = "socionext,uniphier-pro4-peri-reset";
+};
+
&pinctrl {
compatible = "socionext,uniphier-pro4-pinctrl";
};
+
+&sys_clk {
+ compatible = "socionext,uniphier-pro4-clock";
+};
+
+&sys_rst {
+ compatible = "socionext,uniphier-pro4-reset";
+};
diff --git a/arch/arm/boot/dts/uniphier-ph1-pro5.dtsi b/arch/arm/boot/dts/uniphier-pro5.dtsi
index 7e4aa2fde719..2c49c3614bda 100644
--- a/arch/arm/boot/dts/uniphier-ph1-pro5.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro5.dtsi
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-Pro5 SoC
+ * Device Tree Source for UniPhier Pro5 SoC
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -45,17 +46,17 @@
/include/ "uniphier-common32.dtsi"
/ {
- compatible = "socionext,ph1-pro5";
+ compatible = "socionext,uniphier-pro5";
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "socionext,uniphier-smp";
cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
@@ -63,6 +64,7 @@
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
};
@@ -73,18 +75,6 @@
compatible = "fixed-clock";
clock-frequency = <50000000>;
};
-
- uart_clk: uart_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <73728000>;
- };
-
- i2c_clk: i2c_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <50000000>;
- };
};
};
@@ -121,7 +111,7 @@
interrupts = <0 41 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 4>;
clock-frequency = <100000>;
};
@@ -134,7 +124,7 @@
interrupts = <0 42 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 5>;
clock-frequency = <100000>;
};
@@ -147,7 +137,7 @@
interrupts = <0 43 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 6>;
clock-frequency = <100000>;
};
@@ -160,7 +150,7 @@
interrupts = <0 44 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c3>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 7>;
clock-frequency = <100000>;
};
@@ -173,7 +163,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 25 4>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 9>;
clock-frequency = <400000>;
};
@@ -184,7 +174,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 26 4>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 10>;
clock-frequency = <400000>;
};
};
@@ -193,6 +183,30 @@
clock-frequency = <20000000>;
};
+&mio_clk {
+ compatible = "socionext,uniphier-pro5-mio-clock";
+};
+
+&mio_rst {
+ compatible = "socionext,uniphier-pro5-mio-reset";
+};
+
+&peri_clk {
+ compatible = "socionext,uniphier-pro5-peri-clock";
+};
+
+&peri_rst {
+ compatible = "socionext,uniphier-pro5-peri-reset";
+};
+
&pinctrl {
compatible = "socionext,uniphier-pro5-pinctrl";
};
+
+&sys_clk {
+ compatible = "socionext,uniphier-pro5-clock";
+};
+
+&sys_rst {
+ compatible = "socionext,uniphier-pro5-reset";
+};
diff --git a/arch/arm/boot/dts/uniphier-proxstream2-gentil.dts b/arch/arm/boot/dts/uniphier-pxs2-gentil.dts
index 98d895b7af1d..373818ace086 100644
--- a/arch/arm/boot/dts/uniphier-proxstream2-gentil.dts
+++ b/arch/arm/boot/dts/uniphier-pxs2-gentil.dts
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier ProXstream2 Gentil Board
+ * Device Tree Source for UniPhier PXs2 Gentil Board
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -43,11 +44,12 @@
*/
/dts-v1/;
-/include/ "uniphier-proxstream2.dtsi"
+/include/ "uniphier-pxs2.dtsi"
/ {
- model = "UniPhier ProXstream2 Gentil Board";
- compatible = "socionext,proxstream2-gentil", "socionext,proxstream2";
+ model = "UniPhier PXs2 Gentil Board";
+ compatible = "socionext,uniphier-pxs2-gentil",
+ "socionext,uniphier-pxs2";
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/uniphier-proxstream2-vodka.dts b/arch/arm/boot/dts/uniphier-pxs2-vodka.dts
index 1fb8bd7bb686..51a3eacddfc6 100644
--- a/arch/arm/boot/dts/uniphier-proxstream2-vodka.dts
+++ b/arch/arm/boot/dts/uniphier-pxs2-vodka.dts
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier ProXstream2 Vodka Board
+ * Device Tree Source for UniPhier PXs2 Vodka Board
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -43,11 +44,11 @@
*/
/dts-v1/;
-/include/ "uniphier-proxstream2.dtsi"
+/include/ "uniphier-pxs2.dtsi"
/ {
- model = "UniPhier ProXstream2 Vodka Board";
- compatible = "socionext,proxstream2-vodka", "socionext,proxstream2";
+ model = "UniPhier PXs2 Vodka Board";
+ compatible = "socionext,uniphier-pxs2-vodka", "socionext,uniphier-pxs2";
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/uniphier-proxstream2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index d00d6f5c2668..8789cd518933 100644
--- a/arch/arm/boot/dts/uniphier-proxstream2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier ProXstream2 SoC
+ * Device Tree Source for UniPhier PXs2 SoC
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -45,17 +46,17 @@
/include/ "uniphier-common32.dtsi"
/ {
- compatible = "socionext,proxstream2";
+ compatible = "socionext,uniphier-pxs2";
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "socionext,uniphier-smp";
cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
@@ -63,6 +64,7 @@
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
@@ -70,6 +72,7 @@
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <2>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
@@ -77,6 +80,7 @@
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <3>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
};
@@ -87,18 +91,6 @@
compatible = "fixed-clock";
clock-frequency = <50000000>;
};
-
- uart_clk: uart_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <88900000>;
- };
-
- i2c_clk: i2c_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <50000000>;
- };
};
};
@@ -123,7 +115,7 @@
interrupts = <0 41 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 4>;
clock-frequency = <100000>;
};
@@ -136,7 +128,7 @@
interrupts = <0 42 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 5>;
clock-frequency = <100000>;
};
@@ -149,7 +141,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
interrupts = <0 43 4>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 6>;
clock-frequency = <100000>;
};
@@ -162,7 +154,7 @@
interrupts = <0 44 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c3>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 7>;
clock-frequency = <100000>;
};
@@ -173,7 +165,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 45 4>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 8>;
clock-frequency = <400000>;
};
@@ -184,7 +176,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 25 4>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 9>;
clock-frequency = <400000>;
};
@@ -195,7 +187,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 26 4>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 10>;
clock-frequency = <400000>;
};
};
@@ -204,6 +196,30 @@
clock-frequency = <25000000>;
};
+&mio_clk {
+ compatible = "socionext,uniphier-pxs2-mio-clock";
+};
+
+&mio_rst {
+ compatible = "socionext,uniphier-pxs2-mio-reset";
+};
+
+&peri_clk {
+ compatible = "socionext,uniphier-pxs2-peri-clock";
+};
+
+&peri_rst {
+ compatible = "socionext,uniphier-pxs2-peri-reset";
+};
+
&pinctrl {
compatible = "socionext,uniphier-pxs2-pinctrl";
};
+
+&sys_clk {
+ compatible = "socionext,uniphier-pxs2-clock";
+};
+
+&sys_rst {
+ compatible = "socionext,uniphier-pxs2-reset";
+};
diff --git a/arch/arm/boot/dts/uniphier-ph1-sld3-ref.dts b/arch/arm/boot/dts/uniphier-sld3-ref.dts
index acb420492b36..ac792ae07ae0 100644
--- a/arch/arm/boot/dts/uniphier-ph1-sld3-ref.dts
+++ b/arch/arm/boot/dts/uniphier-sld3-ref.dts
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-sLD3 Reference Board
+ * Device Tree Source for UniPhier sLD3 Reference Board
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -43,13 +44,13 @@
*/
/dts-v1/;
-/include/ "uniphier-ph1-sld3.dtsi"
+/include/ "uniphier-sld3.dtsi"
/include/ "uniphier-ref-daughter.dtsi"
/include/ "uniphier-support-card.dtsi"
/ {
- model = "UniPhier PH1-sLD3 Reference Board";
- compatible = "socionext,ph1-sld3-ref", "socionext,ph1-sld3";
+ model = "UniPhier sLD3 Reference Board";
+ compatible = "socionext,uniphier-sld3-ref", "socionext,uniphier-sld3";
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/uniphier-ph1-sld3.dtsi b/arch/arm/boot/dts/uniphier-sld3.dtsi
index 03292f443305..5fa96c939b5c 100644
--- a/arch/arm/boot/dts/uniphier-ph1-sld3.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld3.dtsi
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-sLD3 SoC
+ * Device Tree Source for UniPhier sLD3 SoC
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -45,17 +46,17 @@
/include/ "skeleton.dtsi"
/ {
- compatible = "socionext,ph1-sld3";
+ compatible = "socionext,uniphier-sld3";
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "socionext,uniphier-smp";
cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
@@ -63,10 +64,16 @@
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
};
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
clocks {
refclk: ref {
#clock-cells = <0>;
@@ -79,18 +86,6 @@
compatible = "fixed-clock";
clock-frequency = <50000000>;
};
-
- uart_clk: uart_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <36864000>;
- };
-
- iobus_clk: iobus_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <100000000>;
- };
};
soc {
@@ -139,7 +134,7 @@
status = "disabled";
reg = <0x54006800 0x40>;
interrupts = <0 33 4>;
- clocks = <&uart_clk>;
+ clocks = <&sys_clk 0>;
fifo-size = <64>;
};
@@ -148,7 +143,7 @@
status = "disabled";
reg = <0x54006900 0x40>;
interrupts = <0 35 4>;
- clocks = <&uart_clk>;
+ clocks = <&sys_clk 0>;
fifo-size = <64>;
};
@@ -157,7 +152,7 @@
status = "disabled";
reg = <0x54006a00 0x40>;
interrupts = <0 37 4>;
- clocks = <&uart_clk>;
+ clocks = <&sys_clk 0>;
fifo-size = <64>;
};
@@ -168,7 +163,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 41 1>;
- clocks = <&iobus_clk>;
+ clocks = <&sys_clk 1>;
clock-frequency = <100000>;
};
@@ -179,7 +174,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 42 1>;
- clocks = <&iobus_clk>;
+ clocks = <&sys_clk 1>;
clock-frequency = <100000>;
};
@@ -190,7 +185,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 43 1>;
- clocks = <&iobus_clk>;
+ clocks = <&sys_clk 1>;
clock-frequency = <100000>;
};
@@ -201,7 +196,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 44 1>;
- clocks = <&iobus_clk>;
+ clocks = <&sys_clk 1>;
clock-frequency = <100000>;
};
@@ -212,7 +207,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 45 1>;
- clocks = <&iobus_clk>;
+ clocks = <&sys_clk 1>;
clock-frequency = <400000>;
};
@@ -229,6 +224,22 @@
reg = <0x59801000 0x400>;
};
+ mioctrl@59810000 {
+ compatible = "socionext,uniphier-mioctrl",
+ "simple-mfd", "syscon";
+ reg = <0x59810000 0x800>;
+
+ mio_clk: clock {
+ compatible = "socionext,uniphier-sld3-mio-clock";
+ #clock-cells = <1>;
+ };
+
+ mio_rst: reset {
+ compatible = "socionext,uniphier-sld3-mio-reset";
+ #reset-cells = <1>;
+ };
+ };
+
usb0: usb@5a800100 {
compatible = "socionext,uniphier-ehci", "generic-ehci";
status = "disabled";
@@ -256,5 +267,21 @@
reg = <0x5a830100 0x100>;
interrupts = <0 83 4>;
};
+
+ sysctrl@f1840000 {
+ compatible = "socionext,uniphier-sysctrl",
+ "simple-mfd", "syscon";
+ reg = <0xf1840000 0x4000>;
+
+ sys_clk: clock {
+ compatible = "socionext,uniphier-sld3-clock";
+ #clock-cells = <1>;
+ };
+
+ sys_rst: reset {
+ compatible = "socionext,uniphier-sld3-reset";
+ #reset-cells = <1>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/uniphier-ph1-sld8-ref.dts b/arch/arm/boot/dts/uniphier-sld8-ref.dts
index d594f40e7f76..a8291f988066 100644
--- a/arch/arm/boot/dts/uniphier-ph1-sld8-ref.dts
+++ b/arch/arm/boot/dts/uniphier-sld8-ref.dts
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-sLD8 Reference Board
+ * Device Tree Source for UniPhier sLD8 Reference Board
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -43,13 +44,13 @@
*/
/dts-v1/;
-/include/ "uniphier-ph1-sld8.dtsi"
+/include/ "uniphier-sld8.dtsi"
/include/ "uniphier-ref-daughter.dtsi"
/include/ "uniphier-support-card.dtsi"
/ {
- model = "UniPhier PH1-sLD8 Reference Board";
- compatible = "socionext,ph1-sld8-ref", "socionext,ph1-sld8";
+ model = "UniPhier sLD8 Reference Board";
+ compatible = "socionext,uniphier-sld8-ref", "socionext,uniphier-sld8";
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/uniphier-ph1-sld8.dtsi b/arch/arm/boot/dts/uniphier-sld8.dtsi
index 467f9d8e9873..d8cf0e7e11ea 100644
--- a/arch/arm/boot/dts/uniphier-ph1-sld8.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld8.dtsi
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-sLD8 SoC
+ * Device Tree Source for UniPhier sLD8 SoC
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -45,7 +46,7 @@
/include/ "uniphier-common32.dtsi"
/ {
- compatible = "socionext,ph1-sld8";
+ compatible = "socionext,uniphier-sld8";
cpus {
#address-cells = <1>;
@@ -55,6 +56,7 @@
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
+ enable-method = "psci";
next-level-cache = <&l2>;
};
};
@@ -65,18 +67,6 @@
compatible = "fixed-clock";
clock-frequency = <50000000>;
};
-
- uart_clk: uart_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <80000000>;
- };
-
- iobus_clk: iobus_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <100000000>;
- };
};
};
@@ -101,7 +91,7 @@
interrupts = <0 41 1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0>;
- clocks = <&iobus_clk>;
+ clocks = <&peri_clk 4>;
clock-frequency = <100000>;
};
@@ -114,7 +104,7 @@
interrupts = <0 42 1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
- clocks = <&iobus_clk>;
+ clocks = <&peri_clk 5>;
clock-frequency = <100000>;
};
@@ -127,7 +117,7 @@
interrupts = <0 43 1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
- clocks = <&iobus_clk>;
+ clocks = <&peri_clk 6>;
clock-frequency = <400000>;
};
@@ -140,7 +130,7 @@
interrupts = <0 44 1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c3>;
- clocks = <&iobus_clk>;
+ clocks = <&peri_clk 7>;
clock-frequency = <100000>;
};
@@ -151,6 +141,8 @@
interrupts = <0 80 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
+ clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+ resets = <&mio_rst 7>, <&mio_rst 8>, <&mio_rst 12>, <&sys_rst 8>;
};
usb1: usb@5a810100 {
@@ -160,6 +152,8 @@
interrupts = <0 81 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>;
+ clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+ resets = <&mio_rst 7>, <&mio_rst 9>, <&mio_rst 13>, <&sys_rst 8>;
};
usb2: usb@5a820100 {
@@ -169,6 +163,8 @@
interrupts = <0 82 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb2>;
+ clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+ resets = <&mio_rst 7>, <&mio_rst 10>, <&mio_rst 14>, <&sys_rst 8>;
};
};
@@ -180,6 +176,31 @@
interrupts = <0 29 4>;
};
+&mio_clk {
+ compatible = "socionext,uniphier-sld8-mio-clock";
+};
+
+&mio_rst {
+ compatible = "socionext,uniphier-sld8-mio-reset";
+ resets = <&sys_rst 7>;
+};
+
+&peri_clk {
+ compatible = "socionext,uniphier-sld8-peri-clock";
+};
+
+&peri_rst {
+ compatible = "socionext,uniphier-sld8-peri-reset";
+};
+
&pinctrl {
compatible = "socionext,uniphier-sld8-pinctrl";
};
+
+&sys_clk {
+ compatible = "socionext,uniphier-sld8-clock";
+};
+
+&sys_rst {
+ compatible = "socionext,uniphier-sld8-reset";
+};
diff --git a/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi b/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
index a8a8e434fb27..1e0b823f7e8f 100644
--- a/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
+++ b/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
@@ -53,6 +53,12 @@
panel: panel {
compatible = "edt,et057090dhu";
backlight = <&bl>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dcu_out>;
+ };
+ };
};
reg_3v3: regulator-3v3 {
@@ -91,8 +97,13 @@
&dcu0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dcu0_1>;
- fsl,panel = <&panel>;
status = "okay";
+
+ port {
+ dcu_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
};
&dspi1 {
diff --git a/arch/arm/boot/dts/vf610m4.dtsi b/arch/arm/boot/dts/vf610m4.dtsi
index 9ffe2eb68ed4..9f2c731839f2 100644
--- a/arch/arm/boot/dts/vf610m4.dtsi
+++ b/arch/arm/boot/dts/vf610m4.dtsi
@@ -42,6 +42,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include "skeleton.dtsi"
#include "armv7-m.dtsi"
#include "vfxxx.dtsi"
diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c
index 3f47f1203c6b..6053f64c3752 100644
--- a/arch/arm/common/bL_switcher_dummy_if.c
+++ b/arch/arm/common/bL_switcher_dummy_if.c
@@ -56,16 +56,4 @@ static struct miscdevice bL_switcher_device = {
"b.L_switcher",
&bL_switcher_fops
};
-
-static int __init bL_switcher_dummy_if_init(void)
-{
- return misc_register(&bL_switcher_device);
-}
-
-static void __exit bL_switcher_dummy_if_exit(void)
-{
- misc_deregister(&bL_switcher_device);
-}
-
-module_init(bL_switcher_dummy_if_init);
-module_exit(bL_switcher_dummy_if_exit);
+module_misc_device(bL_switcher_device);
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 2e076c492005..4ecd5120fce7 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -15,6 +15,7 @@
* from machine specific code with proper arguments when required.
*/
#include <linux/module.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -107,6 +108,7 @@ struct sa1111 {
spinlock_t lock;
void __iomem *base;
struct sa1111_platform_data *pdata;
+ struct gpio_chip gc;
#ifdef CONFIG_PM
void *saved_state;
#endif
@@ -231,132 +233,44 @@ static void sa1111_irq_handler(struct irq_desc *desc)
#define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base))
#define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32))
-static void sa1111_ack_irq(struct irq_data *d)
-{
-}
-
-static void sa1111_mask_lowirq(struct irq_data *d)
+static u32 sa1111_irqmask(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
- void __iomem *mapbase = sachip->base + SA1111_INTC;
- unsigned long ie0;
- ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
- ie0 &= ~SA1111_IRQMASK_LO(d->irq);
- writel(ie0, mapbase + SA1111_INTEN0);
+ return BIT((d->irq - sachip->irq_base) & 31);
}
-static void sa1111_unmask_lowirq(struct irq_data *d)
+static int sa1111_irqbank(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
- void __iomem *mapbase = sachip->base + SA1111_INTC;
- unsigned long ie0;
-
- ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
- ie0 |= SA1111_IRQMASK_LO(d->irq);
- sa1111_writel(ie0, mapbase + SA1111_INTEN0);
-}
-
-/*
- * Attempt to re-trigger the interrupt. The SA1111 contains a register
- * (INTSET) which claims to do this. However, in practice no amount of
- * manipulation of INTEN and INTSET guarantees that the interrupt will
- * be triggered. In fact, its very difficult, if not impossible to get
- * INTSET to re-trigger the interrupt.
- */
-static int sa1111_retrigger_lowirq(struct irq_data *d)
-{
- struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
- void __iomem *mapbase = sachip->base + SA1111_INTC;
- unsigned int mask = SA1111_IRQMASK_LO(d->irq);
- unsigned long ip0;
- int i;
-
- ip0 = sa1111_readl(mapbase + SA1111_INTPOL0);
- for (i = 0; i < 8; i++) {
- sa1111_writel(ip0 ^ mask, mapbase + SA1111_INTPOL0);
- sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
- if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
- break;
- }
- if (i == 8)
- pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
- d->irq);
- return i == 8 ? -1 : 0;
+ return ((d->irq - sachip->irq_base) / 32) * 4;
}
-static int sa1111_type_lowirq(struct irq_data *d, unsigned int flags)
-{
- struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
- void __iomem *mapbase = sachip->base + SA1111_INTC;
- unsigned int mask = SA1111_IRQMASK_LO(d->irq);
- unsigned long ip0;
-
- if (flags == IRQ_TYPE_PROBE)
- return 0;
-
- if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
- return -EINVAL;
-
- ip0 = sa1111_readl(mapbase + SA1111_INTPOL0);
- if (flags & IRQ_TYPE_EDGE_RISING)
- ip0 &= ~mask;
- else
- ip0 |= mask;
- sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
- sa1111_writel(ip0, mapbase + SA1111_WAKEPOL0);
-
- return 0;
-}
-
-static int sa1111_wake_lowirq(struct irq_data *d, unsigned int on)
+static void sa1111_ack_irq(struct irq_data *d)
{
- struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
- void __iomem *mapbase = sachip->base + SA1111_INTC;
- unsigned int mask = SA1111_IRQMASK_LO(d->irq);
- unsigned long we0;
-
- we0 = sa1111_readl(mapbase + SA1111_WAKEEN0);
- if (on)
- we0 |= mask;
- else
- we0 &= ~mask;
- sa1111_writel(we0, mapbase + SA1111_WAKEEN0);
-
- return 0;
}
-static struct irq_chip sa1111_low_chip = {
- .name = "SA1111-l",
- .irq_ack = sa1111_ack_irq,
- .irq_mask = sa1111_mask_lowirq,
- .irq_unmask = sa1111_unmask_lowirq,
- .irq_retrigger = sa1111_retrigger_lowirq,
- .irq_set_type = sa1111_type_lowirq,
- .irq_set_wake = sa1111_wake_lowirq,
-};
-
-static void sa1111_mask_highirq(struct irq_data *d)
+static void sa1111_mask_irq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
- void __iomem *mapbase = sachip->base + SA1111_INTC;
- unsigned long ie1;
+ void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
+ u32 ie;
- ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
- ie1 &= ~SA1111_IRQMASK_HI(d->irq);
- sa1111_writel(ie1, mapbase + SA1111_INTEN1);
+ ie = sa1111_readl(mapbase + SA1111_INTEN0);
+ ie &= ~sa1111_irqmask(d);
+ sa1111_writel(ie, mapbase + SA1111_INTEN0);
}
-static void sa1111_unmask_highirq(struct irq_data *d)
+static void sa1111_unmask_irq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
- void __iomem *mapbase = sachip->base + SA1111_INTC;
- unsigned long ie1;
+ void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
+ u32 ie;
- ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
- ie1 |= SA1111_IRQMASK_HI(d->irq);
- sa1111_writel(ie1, mapbase + SA1111_INTEN1);
+ ie = sa1111_readl(mapbase + SA1111_INTEN0);
+ ie |= sa1111_irqmask(d);
+ sa1111_writel(ie, mapbase + SA1111_INTEN0);
}
/*
@@ -366,19 +280,18 @@ static void sa1111_unmask_highirq(struct irq_data *d)
* be triggered. In fact, its very difficult, if not impossible to get
* INTSET to re-trigger the interrupt.
*/
-static int sa1111_retrigger_highirq(struct irq_data *d)
+static int sa1111_retrigger_irq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
- void __iomem *mapbase = sachip->base + SA1111_INTC;
- unsigned int mask = SA1111_IRQMASK_HI(d->irq);
- unsigned long ip1;
+ void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
+ u32 ip, mask = sa1111_irqmask(d);
int i;
- ip1 = sa1111_readl(mapbase + SA1111_INTPOL1);
+ ip = sa1111_readl(mapbase + SA1111_INTPOL0);
for (i = 0; i < 8; i++) {
- sa1111_writel(ip1 ^ mask, mapbase + SA1111_INTPOL1);
- sa1111_writel(ip1, mapbase + SA1111_INTPOL1);
- if (sa1111_readl(mapbase + SA1111_INTSTATCLR1) & mask)
+ sa1111_writel(ip ^ mask, mapbase + SA1111_INTPOL0);
+ sa1111_writel(ip, mapbase + SA1111_INTPOL0);
+ if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
break;
}
@@ -388,12 +301,11 @@ static int sa1111_retrigger_highirq(struct irq_data *d)
return i == 8 ? -1 : 0;
}
-static int sa1111_type_highirq(struct irq_data *d, unsigned int flags)
+static int sa1111_type_irq(struct irq_data *d, unsigned int flags)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
- void __iomem *mapbase = sachip->base + SA1111_INTC;
- unsigned int mask = SA1111_IRQMASK_HI(d->irq);
- unsigned long ip1;
+ void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
+ u32 ip, mask = sa1111_irqmask(d);
if (flags == IRQ_TYPE_PROBE)
return 0;
@@ -401,42 +313,41 @@ static int sa1111_type_highirq(struct irq_data *d, unsigned int flags)
if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
return -EINVAL;
- ip1 = sa1111_readl(mapbase + SA1111_INTPOL1);
+ ip = sa1111_readl(mapbase + SA1111_INTPOL0);
if (flags & IRQ_TYPE_EDGE_RISING)
- ip1 &= ~mask;
+ ip &= ~mask;
else
- ip1 |= mask;
- sa1111_writel(ip1, mapbase + SA1111_INTPOL1);
- sa1111_writel(ip1, mapbase + SA1111_WAKEPOL1);
+ ip |= mask;
+ sa1111_writel(ip, mapbase + SA1111_INTPOL0);
+ sa1111_writel(ip, mapbase + SA1111_WAKEPOL0);
return 0;
}
-static int sa1111_wake_highirq(struct irq_data *d, unsigned int on)
+static int sa1111_wake_irq(struct irq_data *d, unsigned int on)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
- void __iomem *mapbase = sachip->base + SA1111_INTC;
- unsigned int mask = SA1111_IRQMASK_HI(d->irq);
- unsigned long we1;
+ void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
+ u32 we, mask = sa1111_irqmask(d);
- we1 = sa1111_readl(mapbase + SA1111_WAKEEN1);
+ we = sa1111_readl(mapbase + SA1111_WAKEEN0);
if (on)
- we1 |= mask;
+ we |= mask;
else
- we1 &= ~mask;
- sa1111_writel(we1, mapbase + SA1111_WAKEEN1);
+ we &= ~mask;
+ sa1111_writel(we, mapbase + SA1111_WAKEEN0);
return 0;
}
-static struct irq_chip sa1111_high_chip = {
- .name = "SA1111-h",
+static struct irq_chip sa1111_irq_chip = {
+ .name = "SA1111",
.irq_ack = sa1111_ack_irq,
- .irq_mask = sa1111_mask_highirq,
- .irq_unmask = sa1111_unmask_highirq,
- .irq_retrigger = sa1111_retrigger_highirq,
- .irq_set_type = sa1111_type_highirq,
- .irq_set_wake = sa1111_wake_highirq,
+ .irq_mask = sa1111_mask_irq,
+ .irq_unmask = sa1111_unmask_irq,
+ .irq_retrigger = sa1111_retrigger_irq,
+ .irq_set_type = sa1111_type_irq,
+ .irq_set_wake = sa1111_wake_irq,
};
static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
@@ -482,16 +393,14 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
for (i = IRQ_GPAIN0; i <= SSPROR; i++) {
irq = sachip->irq_base + i;
- irq_set_chip_and_handler(irq, &sa1111_low_chip,
- handle_edge_irq);
+ irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq);
irq_set_chip_data(irq, sachip);
irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) {
irq = sachip->irq_base + i;
- irq_set_chip_and_handler(irq, &sa1111_high_chip,
- handle_edge_irq);
+ irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq);
irq_set_chip_data(irq, sachip);
irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
@@ -509,6 +418,181 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
return 0;
}
+static void sa1111_remove_irq(struct sa1111 *sachip)
+{
+ void __iomem *irqbase = sachip->base + SA1111_INTC;
+
+ /* disable all IRQs */
+ sa1111_writel(0, irqbase + SA1111_INTEN0);
+ sa1111_writel(0, irqbase + SA1111_INTEN1);
+ sa1111_writel(0, irqbase + SA1111_WAKEEN0);
+ sa1111_writel(0, irqbase + SA1111_WAKEEN1);
+
+ if (sachip->irq != NO_IRQ) {
+ irq_set_chained_handler_and_data(sachip->irq, NULL, NULL);
+ irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
+
+ release_mem_region(sachip->phys + SA1111_INTC, 512);
+ }
+}
+
+enum {
+ SA1111_GPIO_PXDDR = (SA1111_GPIO_PADDR - SA1111_GPIO_PADDR),
+ SA1111_GPIO_PXDRR = (SA1111_GPIO_PADRR - SA1111_GPIO_PADDR),
+ SA1111_GPIO_PXDWR = (SA1111_GPIO_PADWR - SA1111_GPIO_PADDR),
+ SA1111_GPIO_PXSDR = (SA1111_GPIO_PASDR - SA1111_GPIO_PADDR),
+ SA1111_GPIO_PXSSR = (SA1111_GPIO_PASSR - SA1111_GPIO_PADDR),
+};
+
+static struct sa1111 *gc_to_sa1111(struct gpio_chip *gc)
+{
+ return container_of(gc, struct sa1111, gc);
+}
+
+static void __iomem *sa1111_gpio_map_reg(struct sa1111 *sachip, unsigned offset)
+{
+ void __iomem *reg = sachip->base + SA1111_GPIO;
+
+ if (offset < 4)
+ return reg + SA1111_GPIO_PADDR;
+ if (offset < 10)
+ return reg + SA1111_GPIO_PBDDR;
+ if (offset < 18)
+ return reg + SA1111_GPIO_PCDDR;
+ return NULL;
+}
+
+static u32 sa1111_gpio_map_bit(unsigned offset)
+{
+ if (offset < 4)
+ return BIT(offset);
+ if (offset < 10)
+ return BIT(offset - 4);
+ if (offset < 18)
+ return BIT(offset - 10);
+ return 0;
+}
+
+static void sa1111_gpio_modify(void __iomem *reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = readl_relaxed(reg);
+ val &= ~mask;
+ val |= mask & set;
+ writel_relaxed(val, reg);
+}
+
+static int sa1111_gpio_get_direction(struct gpio_chip *gc, unsigned offset)
+{
+ struct sa1111 *sachip = gc_to_sa1111(gc);
+ void __iomem *reg = sa1111_gpio_map_reg(sachip, offset);
+ u32 mask = sa1111_gpio_map_bit(offset);
+
+ return !!(readl_relaxed(reg + SA1111_GPIO_PXDDR) & mask);
+}
+
+static int sa1111_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct sa1111 *sachip = gc_to_sa1111(gc);
+ unsigned long flags;
+ void __iomem *reg = sa1111_gpio_map_reg(sachip, offset);
+ u32 mask = sa1111_gpio_map_bit(offset);
+
+ spin_lock_irqsave(&sachip->lock, flags);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PXDDR, mask, mask);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PXSDR, mask, mask);
+ spin_unlock_irqrestore(&sachip->lock, flags);
+
+ return 0;
+}
+
+static int sa1111_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct sa1111 *sachip = gc_to_sa1111(gc);
+ unsigned long flags;
+ void __iomem *reg = sa1111_gpio_map_reg(sachip, offset);
+ u32 mask = sa1111_gpio_map_bit(offset);
+
+ spin_lock_irqsave(&sachip->lock, flags);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PXDWR, mask, value ? mask : 0);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PXSSR, mask, value ? mask : 0);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PXDDR, mask, 0);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PXSDR, mask, 0);
+ spin_unlock_irqrestore(&sachip->lock, flags);
+
+ return 0;
+}
+
+static int sa1111_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct sa1111 *sachip = gc_to_sa1111(gc);
+ void __iomem *reg = sa1111_gpio_map_reg(sachip, offset);
+ u32 mask = sa1111_gpio_map_bit(offset);
+
+ return !!(readl_relaxed(reg + SA1111_GPIO_PXDRR) & mask);
+}
+
+static void sa1111_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct sa1111 *sachip = gc_to_sa1111(gc);
+ unsigned long flags;
+ void __iomem *reg = sa1111_gpio_map_reg(sachip, offset);
+ u32 mask = sa1111_gpio_map_bit(offset);
+
+ spin_lock_irqsave(&sachip->lock, flags);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PXDWR, mask, value ? mask : 0);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PXSSR, mask, value ? mask : 0);
+ spin_unlock_irqrestore(&sachip->lock, flags);
+}
+
+static void sa1111_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct sa1111 *sachip = gc_to_sa1111(gc);
+ unsigned long flags;
+ void __iomem *reg = sachip->base + SA1111_GPIO;
+ u32 msk, val;
+
+ msk = *mask;
+ val = *bits;
+
+ spin_lock_irqsave(&sachip->lock, flags);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PADWR, msk & 15, val);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PASSR, msk & 15, val);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PBDWR, (msk >> 4) & 255, val >> 4);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PBSSR, (msk >> 4) & 255, val >> 4);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PCDWR, (msk >> 12) & 255, val >> 12);
+ sa1111_gpio_modify(reg + SA1111_GPIO_PCSSR, (msk >> 12) & 255, val >> 12);
+ spin_unlock_irqrestore(&sachip->lock, flags);
+}
+
+static int sa1111_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct sa1111 *sachip = gc_to_sa1111(gc);
+
+ return sachip->irq_base + offset;
+}
+
+static int sa1111_setup_gpios(struct sa1111 *sachip)
+{
+ sachip->gc.label = "sa1111";
+ sachip->gc.parent = sachip->dev;
+ sachip->gc.owner = THIS_MODULE;
+ sachip->gc.get_direction = sa1111_gpio_get_direction;
+ sachip->gc.direction_input = sa1111_gpio_direction_input;
+ sachip->gc.direction_output = sa1111_gpio_direction_output;
+ sachip->gc.get = sa1111_gpio_get;
+ sachip->gc.set = sa1111_gpio_set;
+ sachip->gc.set_multiple = sa1111_gpio_set_multiple;
+ sachip->gc.to_irq = sa1111_gpio_to_irq;
+ sachip->gc.base = -1;
+ sachip->gc.ngpio = 18;
+
+ return devm_gpiochip_add_data(sachip->dev, &sachip->gc, sachip);
+}
+
/*
* Bring the SA1111 out of reset. This requires a set procedure:
* 1. nRESET asserted (by hardware)
@@ -607,7 +691,7 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
static void sa1111_dev_release(struct device *_dev)
{
- struct sa1111_dev *dev = SA1111_DEV(_dev);
+ struct sa1111_dev *dev = to_sa1111_device(_dev);
kfree(dev);
}
@@ -696,19 +780,17 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
if (!pd)
return -EINVAL;
- sachip = kzalloc(sizeof(struct sa1111), GFP_KERNEL);
+ sachip = devm_kzalloc(me, sizeof(struct sa1111), GFP_KERNEL);
if (!sachip)
return -ENOMEM;
- sachip->clk = clk_get(me, "SA1111_CLK");
- if (IS_ERR(sachip->clk)) {
- ret = PTR_ERR(sachip->clk);
- goto err_free;
- }
+ sachip->clk = devm_clk_get(me, "SA1111_CLK");
+ if (IS_ERR(sachip->clk))
+ return PTR_ERR(sachip->clk);
ret = clk_prepare(sachip->clk);
if (ret)
- goto err_clkput;
+ return ret;
spin_lock_init(&sachip->lock);
@@ -757,6 +839,11 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
goto err_clk;
}
+ /* Setup the GPIOs - should really be done after the IRQ setup */
+ ret = sa1111_setup_gpios(sachip);
+ if (ret)
+ goto err_irq;
+
#ifdef CONFIG_ARCH_SA1100
{
unsigned int val;
@@ -799,22 +886,22 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
return 0;
+ err_irq:
+ sa1111_remove_irq(sachip);
err_clk:
clk_disable(sachip->clk);
err_unmap:
iounmap(sachip->base);
err_clk_unprep:
clk_unprepare(sachip->clk);
- err_clkput:
- clk_put(sachip->clk);
- err_free:
- kfree(sachip);
return ret;
}
static int sa1111_remove_one(struct device *dev, void *data)
{
- struct sa1111_dev *sadev = SA1111_DEV(dev);
+ struct sa1111_dev *sadev = to_sa1111_device(dev);
+ if (dev->bus != &sa1111_bus_type)
+ return 0;
device_del(&sadev->dev);
release_resource(&sadev->res);
put_device(&sadev->dev);
@@ -823,29 +910,14 @@ static int sa1111_remove_one(struct device *dev, void *data)
static void __sa1111_remove(struct sa1111 *sachip)
{
- void __iomem *irqbase = sachip->base + SA1111_INTC;
-
device_for_each_child(sachip->dev, NULL, sa1111_remove_one);
- /* disable all IRQs */
- sa1111_writel(0, irqbase + SA1111_INTEN0);
- sa1111_writel(0, irqbase + SA1111_INTEN1);
- sa1111_writel(0, irqbase + SA1111_WAKEEN0);
- sa1111_writel(0, irqbase + SA1111_WAKEEN1);
+ sa1111_remove_irq(sachip);
clk_disable(sachip->clk);
clk_unprepare(sachip->clk);
- if (sachip->irq != NO_IRQ) {
- irq_set_chained_handler_and_data(sachip->irq, NULL, NULL);
- irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
-
- release_mem_region(sachip->phys + SA1111_INTC, 512);
- }
-
iounmap(sachip->base);
- clk_put(sachip->clk);
- kfree(sachip);
}
struct sa1111_save_data {
@@ -1285,6 +1357,14 @@ void sa1111_disable_device(struct sa1111_dev *sadev)
}
EXPORT_SYMBOL(sa1111_disable_device);
+int sa1111_get_irq(struct sa1111_dev *sadev, unsigned num)
+{
+ if (num >= ARRAY_SIZE(sadev->irq))
+ return -EINVAL;
+ return sadev->irq[num];
+}
+EXPORT_SYMBOL_GPL(sa1111_get_irq);
+
/*
* SA1111 "Register Access Bus."
*
@@ -1293,7 +1373,7 @@ EXPORT_SYMBOL(sa1111_disable_device);
*/
static int sa1111_match(struct device *_dev, struct device_driver *_drv)
{
- struct sa1111_dev *dev = SA1111_DEV(_dev);
+ struct sa1111_dev *dev = to_sa1111_device(_dev);
struct sa1111_driver *drv = SA1111_DRV(_drv);
return !!(dev->devid & drv->devid);
@@ -1301,7 +1381,7 @@ static int sa1111_match(struct device *_dev, struct device_driver *_drv)
static int sa1111_bus_suspend(struct device *dev, pm_message_t state)
{
- struct sa1111_dev *sadev = SA1111_DEV(dev);
+ struct sa1111_dev *sadev = to_sa1111_device(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = 0;
@@ -1312,7 +1392,7 @@ static int sa1111_bus_suspend(struct device *dev, pm_message_t state)
static int sa1111_bus_resume(struct device *dev)
{
- struct sa1111_dev *sadev = SA1111_DEV(dev);
+ struct sa1111_dev *sadev = to_sa1111_device(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = 0;
@@ -1326,12 +1406,12 @@ static void sa1111_bus_shutdown(struct device *dev)
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
if (drv && drv->shutdown)
- drv->shutdown(SA1111_DEV(dev));
+ drv->shutdown(to_sa1111_device(dev));
}
static int sa1111_bus_probe(struct device *dev)
{
- struct sa1111_dev *sadev = SA1111_DEV(dev);
+ struct sa1111_dev *sadev = to_sa1111_device(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = -ENODEV;
@@ -1342,7 +1422,7 @@ static int sa1111_bus_probe(struct device *dev)
static int sa1111_bus_remove(struct device *dev)
{
- struct sa1111_dev *sadev = SA1111_DEV(dev);
+ struct sa1111_dev *sadev = to_sa1111_device(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = 0;
@@ -1407,7 +1487,7 @@ static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
static int sa1111_notifier_call(struct notifier_block *n, unsigned long action,
void *data)
{
- struct sa1111_dev *dev = SA1111_DEV(data);
+ struct sa1111_dev *dev = to_sa1111_device(data);
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
diff --git a/arch/arm/configs/colibri_pxa270_defconfig b/arch/arm/configs/colibri_pxa270_defconfig
index 0b9211b2b73b..3146ad055716 100644
--- a/arch/arm/configs/colibri_pxa270_defconfig
+++ b/arch/arm/configs/colibri_pxa270_defconfig
@@ -83,7 +83,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=8
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_PHYLIB=y
CONFIG_NET_ETHERNET=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index f33d042b1273..5e5dd6bc5ed9 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -1,8 +1,8 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -14,16 +14,16 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_DAVINCI=y
CONFIG_ARCH_DAVINCI_DM644x=y
CONFIG_ARCH_DAVINCI_DM355=y
CONFIG_ARCH_DAVINCI_DM646x=y
-CONFIG_ARCH_DAVINCI_DM365=y
CONFIG_ARCH_DAVINCI_DA830=y
CONFIG_ARCH_DAVINCI_DA850=y
-CONFIG_MACH_DA8XX_DT=y
+CONFIG_ARCH_DAVINCI_DM365=y
CONFIG_MACH_SFFSDR=y
CONFIG_MACH_NEUROS_OSD2=y
CONFIG_MACH_DM355_LEOPARD=y
@@ -32,13 +32,8 @@ CONFIG_MACH_OMAPL138_HAWKBOARD=y
CONFIG_DAVINCI_MUX_DEBUG=y
CONFIG_DAVINCI_MUX_WARNINGS=y
CONFIG_DAVINCI_RESET_CLOCKS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
-CONFIG_LEDS=y
-CONFIG_USE_OF=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
@@ -50,22 +45,18 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=m
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_ONDEMAND=m
CONFIG_CPU_IDLE=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FW_LOADER is not set
CONFIG_MTD=m
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=m
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=m
CONFIG_MTD_CFI_INTELEXT=m
@@ -75,7 +66,8 @@ CONFIG_MTD_M25P80=m
CONFIG_MTD_NAND=m
CONFIG_MTD_NAND_DAVINCI=m
CONFIG_MTD_SPI_NOR=m
-CONFIG_PROC_DEVICETREE=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_BLOCK=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=1
@@ -86,25 +78,23 @@ CONFIG_BLK_DEV_PALMCHIP_BK3710=m
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
CONFIG_TUN=m
+CONFIG_DM9000=y
+CONFIG_TI_DAVINCI_EMAC=y
CONFIG_LXT_PHY=y
+CONFIG_SMSC_PHY=y
CONFIG_LSI_ET1011C_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_TI_DAVINCI_EMAC=y
-CONFIG_DM9000=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
CONFIG_PPP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_NETCONSOLE=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_EVBUG=m
CONFIG_KEYBOARD_ATKBD=m
CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
CONFIG_KEYBOARD_XTKBD=m
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
@@ -115,8 +105,9 @@ CONFIG_SERIO_LIBPS2=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=3
-# CONFIG_HW_RANDOM is not set
+CONFIG_SERIAL_8250_RUNTIME_UARTS=3
CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_DAVINCI=y
@@ -124,26 +115,25 @@ CONFIG_SPI=y
CONFIG_SPI_DAVINCI=m
CONFIG_PINCTRL_SINGLE=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_PCF857X=y
+CONFIG_GPIO_PCA953X=y
CONFIG_WATCHDOG=y
CONFIG_DAVINCI_WATCHDOG=m
CONFIG_MFD_DM355EVM_MSP=y
CONFIG_TPS6507X=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_TPS6507X=y
CONFIG_FB=y
-CONFIG_FB_DA8XX=y
CONFIG_FIRMWARE_EDID=y
-# CONFIG_VGA_CONSOLE is not set
+CONFIG_FB_DA8XX=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_SOC=m
-CONFIG_SND_DAVINCI_SOC=m
-CONFIG_SND_DAVINCI_SOC_EVM=m
-CONFIG_SND_DM6467_SOC_EVM=m
+CONFIG_SND_EDMA_SOC=m
+CONFIG_SND_DA850_SOC_EVM=m
+CONFIG_SND_SIMPLE_CARD=m
CONFIG_HID=m
CONFIG_HID_A4TECH=m
CONFIG_HID_APPLE=m
@@ -163,10 +153,9 @@ CONFIG_HID_SONY=m
CONFIG_HID_SUNPLUS=m
CONFIG_USB=m
CONFIG_USB_MON=m
+CONFIG_USB_STORAGE=m
CONFIG_USB_MUSB_HDRC=m
-CONFIG_USB_GADGET_MUSB_HDRC=y
CONFIG_MUSB_PIO_ONLY=y
-CONFIG_USB_STORAGE=m
CONFIG_USB_TEST=m
CONFIG_USB_GADGET=m
CONFIG_USB_GADGET_DEBUG_FILES=y
@@ -188,8 +177,11 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_OMAP=m
CONFIG_DMADEVICES=y
CONFIG_TI_EDMA=y
+CONFIG_MEMORY=y
+CONFIG_TI_AEMIF=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_XFS_FS=m
@@ -198,30 +190,22 @@ CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=m
+CONFIG_UBIFS_FS=m
CONFIG_CRAMFS=y
CONFIG_MINIX_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_ARM_UNWIND is not set
CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=m
-CONFIG_GPIO_PCA953X=y
-CONFIG_KEYBOARD_GPIO_POLLED=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 36cc7cc012f9..4e484f406419 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -17,6 +17,7 @@ CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
CONFIG_CMA=y
+CONFIG_SECCOMP=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
@@ -35,6 +36,8 @@ CONFIG_ARM_EXYNOS_CPUIDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_KERNEL_MODE_NEON=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -219,6 +222,12 @@ CONFIG_CROS_EC_CHARDEV=y
CONFIG_COMMON_CLK_MAX77686=y
CONFIG_COMMON_CLK_MAX77802=y
CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_ARM_EXYNOS_BUS_DEVFREQ=y
+CONFIG_DEVFREQ_EVENT_EXYNOS_NOCP=y
CONFIG_EXTCON=y
CONFIG_EXTCON_MAX14577=y
CONFIG_EXTCON_MAX77693=y
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 9083399a8ab1..5f013c9fc1ed 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -22,14 +22,13 @@ CONFIG_ARCH_MULTI_V4T=y
CONFIG_ARCH_MULTI_V5=y
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_MXC=y
-CONFIG_MACH_SCB9328=y
-CONFIG_MACH_APF9328=y
CONFIG_MACH_MX21ADS=y
CONFIG_MACH_MX27ADS=y
CONFIG_MACH_MX27_3DS=y
CONFIG_MACH_IMX27_VISSTRIM_M10=y
CONFIG_MACH_PCA100=y
CONFIG_MACH_IMX27_DT=y
+CONFIG_SOC_IMX1=y
CONFIG_SOC_IMX25=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 3219480b9dbf..8ec4dbbb50b0 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -1,6 +1,5 @@
CONFIG_KERNEL_LZO=y
CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
@@ -72,7 +71,6 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_CAN=y
CONFIG_CAN_FLEXCAN=y
@@ -228,7 +226,6 @@ CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_SOC_CAMERA=y
-CONFIG_VIDEO_MX3=y
CONFIG_V4L_MEM2MEM_DRIVERS=y
CONFIG_VIDEO_CODA=y
CONFIG_SOC_CAMERA_OV2640=y
@@ -241,6 +238,7 @@ CONFIG_DRM_IMX_PARALLEL_DISPLAY=y
CONFIG_DRM_IMX_TVE=y
CONFIG_DRM_IMX_LDB=y
CONFIG_DRM_IMX_HDMI=y
+CONFIG_DRM_ETNAVIV=y
CONFIG_FB_MXS=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_L4F00242T03=y
@@ -264,6 +262,7 @@ CONFIG_SND_SOC_IMX_MC13783=y
CONFIG_SND_SOC_FSL_ASOC_CARD=y
CONFIG_SND_SOC_CS42XX8_I2C=y
CONFIG_SND_SOC_TLV320AIC3X=y
+CONFIG_SND_SOC_WM8960=y
CONFIG_SND_SIMPLE_CARD=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
@@ -276,6 +275,7 @@ CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_FTDI_SIO=m
CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_TEST=m
CONFIG_USB_EHSET_TEST_FIXTURE=m
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_MXS_PHY=y
@@ -293,7 +293,14 @@ CONFIG_USB_CONFIGFS_EEM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_LB_SS=y
CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_UAC1=y
+CONFIG_USB_CONFIGFS_F_UAC2=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_UVC=y
+CONFIG_USB_CONFIGFS_F_PRINTER=y
CONFIG_USB_ZERO=m
+CONFIG_USB_AUDIO=m
CONFIG_USB_ETH=m
CONFIG_USB_G_NCM=m
CONFIG_USB_GADGETFS=m
@@ -329,13 +336,12 @@ CONFIG_FSL_EDMA=y
CONFIG_IMX_SDMA=y
CONFIG_MXS_DMA=y
CONFIG_STAGING=y
-# CONFIG_IOMMU_SUPPORT is not set
CONFIG_IIO=y
CONFIG_VF610_ADC=y
+CONFIG_MPL3115=y
CONFIG_PWM=y
CONFIG_PWM_FSL_FTM=y
CONFIG_PWM_IMX=y
-CONFIG_NVMEM=y
CONFIG_NVMEM_IMX_OCOTP=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
@@ -355,7 +361,6 @@ CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index 71f14675d009..869faae67201 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -13,15 +13,8 @@ CONFIG_ARCH_MULTI_V5=y
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_INTEGRATOR=y
CONFIG_ARCH_INTEGRATOR_AP=y
-CONFIG_ARCH_INTEGRATOR_CP=y
CONFIG_INTEGRATOR_IMPD1=y
-CONFIG_CPU_ARM720T=y
-CONFIG_CPU_ARM920T=y
-CONFIG_CPU_ARM922T=y
-CONFIG_CPU_ARM926T=y
-CONFIG_CPU_ARM1020=y
-CONFIG_CPU_ARM1022=y
-CONFIG_CPU_ARM1026=y
+CONFIG_ARCH_INTEGRATOR_CP=y
CONFIG_PCI=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
@@ -33,7 +26,6 @@ CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_FPE_NWFPE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -51,7 +43,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_PROC_DEVICETREE=y
+CONFIG_MTD_PHYSMAP_OF=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -71,6 +63,7 @@ CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_SYSCON=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_CPU=y
diff --git a/arch/arm/configs/lpd270_defconfig b/arch/arm/configs/lpd270_defconfig
index 1c8c9ee71d31..a9dd1e93b556 100644
--- a/arch/arm/configs/lpd270_defconfig
+++ b/arch/arm/configs/lpd270_defconfig
@@ -31,7 +31,6 @@ CONFIG_MTD_CFI_GEOMETRY=y
# CONFIG_MTD_CFI_I1 is not set
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_BLK_DEV_NBD=y
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_SMC91X=y
diff --git a/arch/arm/configs/multi_v4t_defconfig b/arch/arm/configs/multi_v4t_defconfig
index 433eebb4103f..9a6390c172d6 100644
--- a/arch/arm/configs/multi_v4t_defconfig
+++ b/arch/arm/configs/multi_v4t_defconfig
@@ -20,9 +20,7 @@ CONFIG_INTEGRATOR_CM720T=y
CONFIG_INTEGRATOR_CM920T=y
CONFIG_INTEGRATOR_CM922T_XA10=y
CONFIG_ARCH_MXC=y
-CONFIG_MACH_SCB9328=y
-CONFIG_MACH_APF9328=y
-CONFIG_MACH_IMX1_DT=y
+CONFIG_SOC_IMX1=y
CONFIG_ARCH_NSPIRE=y
CONFIG_AEABI=y
# CONFIG_ATAGS is not set
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 58459105cadc..437d0740dec6 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -126,9 +126,11 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=16
CONFIG_HIGHPTE=y
CONFIG_CMA=y
+CONFIG_SECCOMP=y
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_KEXEC=y
+CONFIG_EFI=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT_DETAILS=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
@@ -136,6 +138,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_USERSPACE=m
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_IMX6Q_CPUFREQ=y
CONFIG_QORIQ_CPUFREQ=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
@@ -220,6 +223,7 @@ CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_AHCI_BRCM=y
CONFIG_AHCI_ST=y
+CONFIG_AHCI_IMX=y
CONFIG_AHCI_SUNXI=y
CONFIG_AHCI_TEGRA=y
CONFIG_SATA_HIGHBANK=y
@@ -277,7 +281,7 @@ CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_MOUSE_CYAPA=m
CONFIG_MOUSE_ELAN_I2C=y
CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
CONFIG_TOUCHSCREEN_MMS114=m
CONFIG_TOUCHSCREEN_ST1232=m
CONFIG_TOUCHSCREEN_STMPE=y
@@ -499,6 +503,7 @@ CONFIG_MFD_TPS65910=y
CONFIG_REGULATOR_ACT8945A=y
CONFIG_REGULATOR_AB8500=y
CONFIG_REGULATOR_ACT8865=y
+CONFIG_REGULATOR_ANATOP=y
CONFIG_REGULATOR_AS3711=y
CONFIG_REGULATOR_AS3722=y
CONFIG_REGULATOR_AXP20X=m
@@ -593,6 +598,7 @@ CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_STI=m
CONFIG_DRM_VC4=y
CONFIG_FB_ARMCLCD=y
+CONFIG_FB_EFI=y
CONFIG_FB_WM8505=y
CONFIG_FB_SH_MOBILE_LCDC=y
CONFIG_FB_SIMPLE=y
@@ -750,6 +756,7 @@ CONFIG_RTC_DRV_S35390A=m
CONFIG_RTC_DRV_RX8581=m
CONFIG_RTC_DRV_EM3027=y
CONFIG_RTC_DRV_DA9063=m
+CONFIG_RTC_DRV_EFI=m
CONFIG_RTC_DRV_DIGICOLOR=m
CONFIG_RTC_DRV_S5M=m
CONFIG_RTC_DRV_S3C=m
@@ -867,6 +874,8 @@ CONFIG_NVMEM=y
CONFIG_NVMEM_SUNXI_SID=y
CONFIG_BCM2835_MBOX=y
CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_VARS=m
+CONFIG_EFI_CAPSULE_LOADER=m
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig
index 6051c51ca188..f7f6039419aa 100644
--- a/arch/arm/configs/mvebu_v5_defconfig
+++ b/arch/arm/configs/mvebu_v5_defconfig
@@ -91,10 +91,7 @@ CONFIG_SATA_AHCI=y
CONFIG_SATA_MV=y
CONFIG_NETDEVICES=y
CONFIG_NET_DSA_MV88E6060=y
-CONFIG_NET_DSA_MV88E6131=y
-CONFIG_NET_DSA_MV88E6123=y
-CONFIG_NET_DSA_MV88E6171=y
-CONFIG_NET_DSA_MV88E6352=y
+CONFIG_NET_DSA_MV88E6XXX=y
CONFIG_MV643XX_ETH=y
CONFIG_R8169=y
CONFIG_MARVELL_PHY=y
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index 486a4cabb0dd..f1a0e2503cbe 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -48,6 +48,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -59,6 +60,7 @@ CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_PXA3xx=y
CONFIG_MTD_SPI_NOR=y
CONFIG_SRAM=y
+CONFIG_MTD_UBI=y
CONFIG_EEPROM_AT24=y
CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
@@ -140,6 +142,7 @@ CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
+CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 487c6c3b13fd..53e1a884a1ea 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -139,7 +139,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_SENSORS_TSL2550=m
-CONFIG_BMP085_I2C=m
CONFIG_SRAM=y
CONFIG_EEPROM_AT24=m
CONFIG_SENSORS_LIS3_I2C=m
@@ -428,6 +427,7 @@ CONFIG_EXTCON_USB_GPIO=m
CONFIG_TI_EMIF=m
CONFIG_IIO=m
CONFIG_TI_AM335X_ADC=m
+CONFIG_BMP280=m
CONFIG_PWM=y
CONFIG_PWM_OMAP_DMTIMER=m
CONFIG_PWM_TIECAP=m
diff --git a/arch/arm/configs/pxa255-idp_defconfig b/arch/arm/configs/pxa255-idp_defconfig
index 917a070b4bb9..088627ad875f 100644
--- a/arch/arm/configs/pxa255-idp_defconfig
+++ b/arch/arm/configs/pxa255-idp_defconfig
@@ -28,7 +28,6 @@ CONFIG_MTD_CFI_GEOMETRY=y
# CONFIG_MTD_MAP_BANK_WIDTH_2 is not set
# CONFIG_MTD_CFI_I1 is not set
CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_SMC91X=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index dc5517eaf09f..a016ecc0084b 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -26,8 +26,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_LDM_PARTITION=y
CONFIG_CMDLINE_PARTITION=y
CONFIG_ARCH_PXA=y
-CONFIG_MACH_PXA27X_DT=y
-CONFIG_MACH_PXA3XX_DT=y
CONFIG_ARCH_LUBBOCK=y
CONFIG_MACH_MAINSTONE=y
CONFIG_MACH_ZYLONITE300=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index b3ade552a2a5..bc4bfe02e611 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -76,7 +76,6 @@ CONFIG_TCP_CONG_LP=m
CONFIG_TCP_CONG_VENO=m
CONFIG_TCP_CONG_YEAH=m
CONFIG_TCP_CONG_ILLINOIS=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig
index 0ada29d568ec..492f7f3eb4ac 100644
--- a/arch/arm/configs/trizeps4_defconfig
+++ b/arch/arm/configs/trizeps4_defconfig
@@ -94,8 +94,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=8
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index dfe4002812da..a8088290b778 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -22,9 +22,7 @@
#include <linux/io.h>
#include <asm/barrier.h>
-
-#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
-#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
+#include <asm/cp15.h>
#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1)
@@ -99,68 +97,129 @@
#define ICH_AP1R2 __AP1Rx(2)
#define ICH_AP1R3 __AP1Rx(3)
+/* A32-to-A64 mappings used by VGIC save/restore */
+
+#define CPUIF_MAP(a32, a64) \
+static inline void write_ ## a64(u32 val) \
+{ \
+ write_sysreg(val, a32); \
+} \
+static inline u32 read_ ## a64(void) \
+{ \
+ return read_sysreg(a32); \
+} \
+
+#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \
+static inline void write_ ## a64(u64 val) \
+{ \
+ write_sysreg(lower_32_bits(val), a32lo);\
+ write_sysreg(upper_32_bits(val), a32hi);\
+} \
+static inline u64 read_ ## a64(void) \
+{ \
+ u64 val = read_sysreg(a32lo); \
+ \
+ val |= (u64)read_sysreg(a32hi) << 32; \
+ \
+ return val; \
+}
+
+CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
+CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
+CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
+CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
+CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2)
+CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
+CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
+CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
+CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
+CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
+CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
+CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
+CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
+CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
+CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
+CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
+
+CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
+CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
+CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
+CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
+CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
+CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
+CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
+CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
+CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
+CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
+CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
+CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
+CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
+CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
+CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
+CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
+
+#define read_gicreg(r) read_##r()
+#define write_gicreg(v, r) write_##r(v)
+
/* Low-level accessors */
static inline void gic_write_eoir(u32 irq)
{
- asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq));
+ write_sysreg(irq, ICC_EOIR1);
isb();
}
static inline void gic_write_dir(u32 val)
{
- asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val));
+ write_sysreg(val, ICC_DIR);
isb();
}
static inline u32 gic_read_iar(void)
{
- u32 irqstat;
+ u32 irqstat = read_sysreg(ICC_IAR1);
- asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
dsb(sy);
+
return irqstat;
}
static inline void gic_write_pmr(u32 val)
{
- asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val));
+ write_sysreg(val, ICC_PMR);
}
static inline void gic_write_ctlr(u32 val)
{
- asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val));
+ write_sysreg(val, ICC_CTLR);
isb();
}
static inline void gic_write_grpen1(u32 val)
{
- asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val));
+ write_sysreg(val, ICC_IGRPEN1);
isb();
}
static inline void gic_write_sgi1r(u64 val)
{
- asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val));
+ write_sysreg(val, ICC_SGI1R);
}
static inline u32 gic_read_sre(void)
{
- u32 val;
-
- asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val));
- return val;
+ return read_sysreg(ICC_SRE);
}
static inline void gic_write_sre(u32 val)
{
- asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val));
+ write_sysreg(val, ICC_SRE);
isb();
}
static inline void gic_write_bpr1(u32 val)
{
- asm volatile("mcr " __stringify(ICC_BPR1) : : "r" (val));
+ write_sysreg(val, ICC_BPR1);
}
/*
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4eaea2173bf8..68b06f9c65de 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -159,7 +159,11 @@
.endm
.macro save_and_disable_irqs_notrace, oldcpsr
+#ifdef CONFIG_CPU_V7M
+ mrs \oldcpsr, primask
+#else
mrs \oldcpsr, cpsr
+#endif
disable_irq_notrace
.endm
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 9156fc303afd..bdd283bc5842 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -501,21 +501,4 @@ static inline void set_kernel_text_ro(void) { }
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
-/**
- * secure_flush_area - ensure coherency across the secure boundary
- * @addr: virtual address
- * @size: size of region
- *
- * Ensure that the specified area of memory is coherent across the secure
- * boundary from the non-secure side. This is used when calling secure
- * firmware where the secure firmware does not ensure coherency.
- */
-static inline void secure_flush_area(const void *addr, size_t size)
-{
- phys_addr_t phys = __pa(addr);
-
- __cpuc_flush_dcache_area((void *)addr, size);
- outer_flush_range(phys, phys + size);
-}
-
#endif
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
index 7ea78144ae22..01509ae0bbec 100644
--- a/arch/arm/include/asm/cachetype.h
+++ b/arch/arm/include/asm/cachetype.h
@@ -56,4 +56,43 @@ static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
(~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
}
+#define CSSELR_ICACHE 1
+#define CSSELR_DCACHE 0
+
+#define CSSELR_L1 (0 << 1)
+#define CSSELR_L2 (1 << 1)
+#define CSSELR_L3 (2 << 1)
+#define CSSELR_L4 (3 << 1)
+#define CSSELR_L5 (4 << 1)
+#define CSSELR_L6 (5 << 1)
+#define CSSELR_L7 (6 << 1)
+
+#ifndef CONFIG_CPU_V7M
+static inline void set_csselr(unsigned int cache_selector)
+{
+ asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (cache_selector));
+}
+
+static inline unsigned int read_ccsidr(void)
+{
+ unsigned int val;
+
+ asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (val));
+ return val;
+}
+#else /* CONFIG_CPU_V7M */
+#include <linux/io.h>
+#include "asm/v7m.h"
+
+static inline void set_csselr(unsigned int cache_selector)
+{
+ writel(cache_selector, BASEADDR_V7M_SCB + V7M_SCB_CTR);
+}
+
+static inline unsigned int read_ccsidr(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CCSIDR);
+}
+#endif
+
#endif
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index c3f11524f10c..dbdbce1b3a72 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -49,6 +49,21 @@
#ifdef CONFIG_CPU_CP15
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
+ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
+#define __ACCESS_CP15_64(Op1, CRm) \
+ "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
+
+#define __read_sysreg(r, w, c, t) ({ \
+ t __val; \
+ asm volatile(r " " c : "=r" (__val)); \
+ __val; \
+})
+#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
+
+#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
+#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+
extern unsigned long cr_alignment; /* defined in entry-armv.S */
static inline unsigned long get_cr(void)
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 1ee94c716a7f..522b5feb4eaa 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -55,11 +55,13 @@
#define MPIDR_LEVEL_BITS 8
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
+#define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level)
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_DEC 0x44
#define ARM_CPU_IMP_INTEL 0x69
/* ARM implemented processors */
@@ -76,6 +78,17 @@
#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
#define ARM_CPU_PART_MASK 0xff00fff0
+/* DEC implemented cores */
+#define ARM_CPU_PART_SA1100 0x4400a110
+
+/* Intel implemented cores */
+#define ARM_CPU_PART_SA1110 0x6900b110
+#define ARM_CPU_REV_SA1110_A0 0
+#define ARM_CPU_REV_SA1110_B0 4
+#define ARM_CPU_REV_SA1110_B1 5
+#define ARM_CPU_REV_SA1110_B2 6
+#define ARM_CPU_REV_SA1110_B4 8
+
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
@@ -152,6 +165,11 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return read_cpuid(CPUID_ID);
}
+static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
+{
+ return read_cpuid(CPUID_CACHETYPE);
+}
+
#elif defined(CONFIG_CPU_V7M)
static inline unsigned int __attribute_const__ read_cpuid_id(void)
@@ -159,6 +177,11 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID);
}
+static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR);
+}
+
#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
static inline unsigned int __attribute_const__ read_cpuid_id(void)
@@ -173,6 +196,11 @@ static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
return (read_cpuid_id() & 0xFF000000) >> 24;
}
+static inline unsigned int __attribute_const__ read_cpuid_revision(void)
+{
+ return read_cpuid_id() & 0x0000000f;
+}
+
/*
* The CPU part number is meaningless without referring to the CPU
* implementer: implementers are free to define their own part numbers
@@ -193,11 +221,6 @@ static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
}
-static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
-{
- return read_cpuid(CPUID_CACHETYPE);
-}
-
static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
{
return read_cpuid(CPUID_TCM);
@@ -208,6 +231,10 @@ static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
return read_cpuid(CPUID_MPIDR);
}
+/* StrongARM-11x0 CPUs */
+#define cpu_is_sa1100() (read_cpuid_part() == ARM_CPU_PART_SA1100)
+#define cpu_is_sa1110() (read_cpuid_part() == ARM_CPU_PART_SA1110)
+
/*
* Intel's XScale3 core supports some v6 features (supersections, L2)
* but advertises itself as v5 as it does not support the v6 ISA. For
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index b7a428154355..b1ce037e4380 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -10,7 +10,7 @@
#include <asm/param.h> /* HZ */
#define MAX_UDELAY_MS 2
-#define UDELAY_MULT UL(2047 * HZ + 483648 * HZ / 1000000)
+#define UDELAY_MULT UL(2147 * HZ + 483648 * HZ / 1000000)
#define UDELAY_SHIFT 31
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h
index e847d23351ed..acf1d14b89a6 100644
--- a/arch/arm/include/asm/flat.h
+++ b/arch/arm/include/asm/flat.h
@@ -8,8 +8,9 @@
#define flat_argvp_envp_on_stack() 1
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-#define flat_get_addr_from_rp(rp, relval, flags, persistent) ((void)persistent,get_unaligned(rp))
-#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
+#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
+ ({ unsigned long __val; __get_user_unaligned(__val, rp); __val; })
+#define flat_put_addr_at_rp(rp, val, relval) __put_user_unaligned(val, rp)
#define flat_get_relocate_addr(rel) (rel)
#define flat_set_persistent(relval, p) 0
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index cab07f69382d..01c3d92624e5 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -118,11 +118,7 @@
#endif
#if defined(CONFIG_CPU_V7M)
-# ifdef _CACHE
# define MULTI_CACHE 1
-# else
-# define _CACHE nop
-# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 3a5ec1c25659..736292b42fca 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -87,6 +87,15 @@
#define L310_CACHE_ID_RTL_R3P2 0x08
#define L310_CACHE_ID_RTL_R3P3 0x09
+#define L2X0_EVENT_CNT_CTRL_ENABLE BIT(0)
+
+#define L2X0_EVENT_CNT_CFG_SRC_SHIFT 2
+#define L2X0_EVENT_CNT_CFG_SRC_MASK 0xf
+#define L2X0_EVENT_CNT_CFG_SRC_DISABLED 0
+#define L2X0_EVENT_CNT_CFG_INT_DISABLED 0
+#define L2X0_EVENT_CNT_CFG_INT_INCR 1
+#define L2X0_EVENT_CNT_CFG_INT_OVERFLOW 2
+
/* L2C auxiliary control register - bits common to L2C-210/220/310 */
#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
@@ -157,6 +166,16 @@ static inline int l2x0_of_init(u32 aux_val, u32 aux_mask)
}
#endif
+#ifdef CONFIG_CACHE_L2X0_PMU
+void l2x0_pmu_register(void __iomem *base, u32 part);
+void l2x0_pmu_suspend(void);
+void l2x0_pmu_resume(void);
+#else
+static inline void l2x0_pmu_register(void __iomem *base, u32 part) {}
+static inline void l2x0_pmu_suspend(void) {}
+static inline void l2x0_pmu_resume(void) {}
+#endif
+
struct l2x0_regs {
unsigned long phy_base;
unsigned long aux_ctrl;
diff --git a/arch/arm/include/asm/hardware/cache-uniphier.h b/arch/arm/include/asm/hardware/cache-uniphier.h
index 102e3fbe1e10..eaa60da7dac3 100644
--- a/arch/arm/include/asm/hardware/cache-uniphier.h
+++ b/arch/arm/include/asm/hardware/cache-uniphier.h
@@ -1,5 +1,6 @@
/*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,28 +20,11 @@
#ifdef CONFIG_CACHE_UNIPHIER
int uniphier_cache_init(void);
-int uniphier_cache_l2_is_enabled(void);
-void uniphier_cache_l2_touch_range(unsigned long start, unsigned long end);
-void uniphier_cache_l2_set_locked_ways(u32 way_mask);
#else
static inline int uniphier_cache_init(void)
{
return -ENODEV;
}
-
-static inline int uniphier_cache_l2_is_enabled(void)
-{
- return 0;
-}
-
-static inline void uniphier_cache_l2_touch_range(unsigned long start,
- unsigned long end)
-{
-}
-
-static inline void uniphier_cache_l2_set_locked_ways(u32 way_mask)
-{
-}
#endif
#endif /* __CACHE_UNIPHIER_H */
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
index 7c2bbc7f0be1..8979fa3bbf2d 100644
--- a/arch/arm/include/asm/hardware/sa1111.h
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -420,7 +420,7 @@ struct sa1111_dev {
u64 dma_mask;
};
-#define SA1111_DEV(_d) container_of((_d), struct sa1111_dev, dev)
+#define to_sa1111_device(x) container_of(x, struct sa1111_dev, dev)
#define sa1111_get_drvdata(d) dev_get_drvdata(&(d)->dev)
#define sa1111_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, p)
@@ -446,6 +446,8 @@ struct sa1111_driver {
int sa1111_enable_device(struct sa1111_dev *);
void sa1111_disable_device(struct sa1111_dev *);
+int sa1111_get_irq(struct sa1111_dev *, unsigned num);
+
unsigned int sa1111_pll_clock(struct sa1111_dev *);
#define SA1111_AUDIO_ACLINK 0
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index 8e427c7b4425..afcaf8bf971b 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -114,7 +114,6 @@ struct notifier_block;
struct perf_event;
struct pmu;
-extern struct pmu perf_ops_bp;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type);
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 1bd9510de1b9..e53638c8ed8a 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -36,8 +36,9 @@ extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
#ifdef CONFIG_SMP
-extern void arch_trigger_all_cpu_backtrace(bool);
-#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x)
+extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
+ bool exclude_self);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
static inline int nr_legacy_irqs(void)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 58faff5f1eb2..d7ea6bcb29bf 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -21,6 +21,10 @@
#include <asm/virt.h>
+#define ARM_EXIT_WITH_ABORT_BIT 31
+#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
+#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
+
#define ARM_EXCEPTION_RESET 0
#define ARM_EXCEPTION_UNDEFINED 1
#define ARM_EXCEPTION_SOFTWARE 2
@@ -68,6 +72,9 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern void __init_stage2_translation(void);
extern void __kvm_hyp_reset(unsigned long);
+
+extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern void __vgic_v3_init_lrs(void);
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index ee5328fc4b06..9a8a45aaf19a 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -40,18 +40,29 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
*vcpu_reg(vcpu, reg_num) = val;
}
-bool kvm_condition_valid(struct kvm_vcpu *vcpu);
-void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
+bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
+void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
+{
+ return kvm_condition_valid32(vcpu);
+}
+
+static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+ kvm_skip_instr32(vcpu, is_wide_instr);
+}
+
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr = HCR_GUEST_MASK;
}
-static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.hcr;
}
@@ -61,7 +72,7 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
vcpu->arch.hcr = hcr;
}
-static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
+static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
{
return 1;
}
@@ -71,9 +82,9 @@ static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
}
-static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
{
- return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
+ return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
}
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -93,11 +104,21 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
return cpsr_mode > USR_MODE;;
}
-static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
+static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hsr;
}
+static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
+{
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+ if (hsr & HSR_CV)
+ return (hsr & HSR_COND) >> HSR_COND_SHIFT;
+
+ return -1;
+}
+
static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hxfar;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index de338d93d11b..2d19e02d03fd 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -39,7 +39,12 @@
#include <kvm/arm_vgic.h>
+
+#ifdef CONFIG_ARM_GIC_V3
+#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
+#else
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
+#endif
#define KVM_REQ_VCPU_EXIT 8
@@ -183,15 +188,15 @@ struct kvm_vcpu_arch {
};
struct kvm_vm_stat {
- u32 remote_tlb_flush;
+ ulong remote_tlb_flush;
};
struct kvm_vcpu_stat {
- u32 halt_successful_poll;
- u32 halt_attempted_poll;
- u32 halt_poll_invalid;
- u32 halt_wakeup;
- u32 hvc_exit_stat;
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 hvc_exit_stat;
u64 wfe_exit_stat;
u64 wfi_exit_stat;
u64 mmio_exit_user;
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 6eaff28f2ff3..343135ede5fa 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -20,28 +20,15 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
+#include <asm/cp15.h>
#include <asm/kvm_mmu.h>
#include <asm/vfp.h>
#define __hyp_text __section(.hyp.text) notrace
-#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
- "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
-#define __ACCESS_CP15_64(Op1, CRm) \
- "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
#define __ACCESS_VFP(CRn) \
"mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
-#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
-#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
-
-#define __read_sysreg(r, w, c, t) ({ \
- t __val; \
- asm volatile(r " " c : "=r" (__val)); \
- __val; \
-})
-#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
-
#define write_special(v, r) \
asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
#define read_special(r) ({ \
@@ -119,6 +106,9 @@ void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
void __sysreg_save_state(struct kvm_cpu_context *ctxt);
void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
+void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
+
void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp);
void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp);
static inline bool __vfp_enabled(void)
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 3bb803d6814b..74a44727f8e1 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -63,37 +63,13 @@ void kvm_clear_hyp_idmap(void);
static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
{
*pmd = new_pmd;
- flush_pmd_entry(pmd);
+ dsb(ishst);
}
static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
{
*pte = new_pte;
- /*
- * flush_pmd_entry just takes a void pointer and cleans the necessary
- * cache entries, so we can reuse the function for ptes.
- */
- flush_pmd_entry(pte);
-}
-
-static inline void kvm_clean_pgd(pgd_t *pgd)
-{
- clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
-}
-
-static inline void kvm_clean_pmd(pmd_t *pmd)
-{
- clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
-}
-
-static inline void kvm_clean_pmd_entry(pmd_t *pmd)
-{
- clean_pmd_entry(pmd);
-}
-
-static inline void kvm_clean_pte(pte_t *pte)
-{
- clean_pte_table(pte);
+ dsb(ishst);
}
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 31c07a2cc100..76cbd9c674df 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -159,13 +159,8 @@
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*/
-#if defined(__virt_to_phys)
-#define PHYS_OFFSET PLAT_PHYS_OFFSET
-#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
-
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
+#if defined(CONFIG_ARM_PATCH_PHYS_VIRT)
/*
* Constants used to force the right instruction encodings and shifts
@@ -182,10 +177,6 @@ extern const void *__pv_table_begin, *__pv_table_end;
#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
-#define virt_to_pfn(kaddr) \
- ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
- PHYS_PFN_OFFSET)
-
#define __pv_stub(from,to,instr,type) \
__asm__("@ __pv_stub\n" \
"1: " instr " %0, %1, %2\n" \
@@ -257,12 +248,12 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
return x - PHYS_OFFSET + PAGE_OFFSET;
}
+#endif
+
#define virt_to_pfn(kaddr) \
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
PHYS_PFN_OFFSET)
-#endif
-
/*
* These are *only* valid on the kernel direct mapped RAM memory.
* Note: Drivers should NOT use these. They are the wrong
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index e358b7966c06..464748b9fd7d 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -23,10 +23,8 @@ struct mod_arch_specific {
struct unwind_table *unwind[ARM_SEC_MAX];
#endif
#ifdef CONFIG_ARM_MODULE_PLTS
- struct elf32_shdr *core_plt;
- struct elf32_shdr *init_plt;
- int core_plt_count;
- int init_plt_count;
+ struct elf32_shdr *plt;
+ int plt_count;
#endif
};
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index 615781c61627..1fd775c1bc5d 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -24,6 +24,9 @@
#define V7M_SCB_CCR 0x14
#define V7M_SCB_CCR_STKALIGN (1 << 9)
+#define V7M_SCB_CCR_DC (1 << 16)
+#define V7M_SCB_CCR_IC (1 << 17)
+#define V7M_SCB_CCR_BP (1 << 18)
#define V7M_SCB_SHPR2 0x1c
#define V7M_SCB_SHPR3 0x20
@@ -47,6 +50,25 @@
#define EXC_RET_STACK_MASK 0x00000004
#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
+/* Cache related definitions */
+
+#define V7M_SCB_CLIDR 0x78 /* Cache Level ID register */
+#define V7M_SCB_CTR 0x7c /* Cache Type register */
+#define V7M_SCB_CCSIDR 0x80 /* Cache size ID register */
+#define V7M_SCB_CSSELR 0x84 /* Cache size selection register */
+
+/* Cache opeartions */
+#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */
+#define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */
+#define V7M_SCB_DCIMVAC 0x25c /* D-cache invalidate by MVA to PoC */
+#define V7M_SCB_DCISW 0x260 /* D-cache invalidate by set-way */
+#define V7M_SCB_DCCMVAU 0x264 /* D-cache clean by MVA to PoU */
+#define V7M_SCB_DCCMVAC 0x268 /* D-cache clean by MVA to PoC */
+#define V7M_SCB_DCCSW 0x26c /* D-cache clean by set-way */
+#define V7M_SCB_DCCIMVAC 0x270 /* D-cache clean and invalidate by MVA to PoC */
+#define V7M_SCB_DCCISW 0x274 /* D-cache clean and invalidate by set-way */
+#define V7M_SCB_BPIALL 0x278 /* D-cache clean and invalidate by set-way */
+
#ifndef __ASSEMBLY__
enum reboot_mode;
diff --git a/arch/arm/include/debug/brcmstb.S b/arch/arm/include/debug/brcmstb.S
new file mode 100644
index 000000000000..9113d7b33ae0
--- /dev/null
+++ b/arch/arm/include/debug/brcmstb.S
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/serial_reg.h>
+
+/* Physical register offset and virtual register offset */
+#define REG_PHYS_BASE 0xf0000000
+#define REG_VIRT_BASE 0xfc000000
+#define REG_PHYS_ADDR(x) ((x) + REG_PHYS_BASE)
+
+/* Product id can be read from here */
+#define SUN_TOP_CTRL_BASE REG_PHYS_ADDR(0x404000)
+
+#define UARTA_3390 REG_PHYS_ADDR(0x40a900)
+#define UARTA_7250 REG_PHYS_ADDR(0x40b400)
+#define UARTA_7268 REG_PHYS_ADDR(0x40c000)
+#define UARTA_7271 UARTA_7268
+#define UARTA_7364 REG_PHYS_ADDR(0x40b000)
+#define UARTA_7366 UARTA_7364
+#define UARTA_74371 REG_PHYS_ADDR(0x406b00)
+#define UARTA_7439 REG_PHYS_ADDR(0x40a900)
+#define UARTA_7445 REG_PHYS_ADDR(0x40ab00)
+
+#define UART_SHIFT 2
+
+#define checkuart(rp, rv, family_id, family) \
+ /* Load family id */ \
+ ldr rp, =family_id ; \
+ /* Compare SUN_TOP_CTRL value against it */ \
+ cmp rp, rv ; \
+ /* Passed test, load address */ \
+ ldreq rp, =UARTA_##family ; \
+ /* Jump to save UART address */ \
+ beq 91f
+
+ .macro addruart, rp, rv, tmp
+ adr \rp, 99f @ actual addr of 99f
+ ldr \rv, [\rp] @ linked addr is stored there
+ sub \rv, \rv, \rp @ offset between the two
+ ldr \rp, [\rp, #4] @ linked brcmstb_uart_config
+ sub \tmp, \rp, \rv @ actual brcmstb_uart_config
+ ldr \rp, [\tmp] @ Load brcmstb_uart_config
+ cmp \rp, #1 @ needs initialization?
+ bne 100f @ no; go load the addresses
+ mov \rv, #0 @ yes; record init is done
+ str \rv, [\tmp]
+
+ /* Check SUN_TOP_CTRL base */
+ ldr \rp, =SUN_TOP_CTRL_BASE @ load SUN_TOP_CTRL PA
+ ldr \rv, [\rp, #0] @ get register contents
+ and \rv, \rv, #0xffffff00 @ strip revision bits [7:0]
+
+ /* Chip specific detection starts here */
+20: checkuart(\rp, \rv, 0x33900000, 3390)
+21: checkuart(\rp, \rv, 0x72500000, 7250)
+22: checkuart(\rp, \rv, 0x72680000, 7268)
+23: checkuart(\rp, \rv, 0x72710000, 7271)
+24: checkuart(\rp, \rv, 0x73640000, 7364)
+25: checkuart(\rp, \rv, 0x73660000, 7366)
+26: checkuart(\rp, \rv, 0x07437100, 74371)
+27: checkuart(\rp, \rv, 0x74390000, 7439)
+28: checkuart(\rp, \rv, 0x74450000, 7445)
+
+ /* No valid UART found */
+90: mov \rp, #0
+ /* fall through */
+
+ /* Record whichever UART we chose */
+91: str \rp, [\tmp, #4] @ Store in brcmstb_uart_phys
+ cmp \rp, #0 @ Valid UART address?
+ bne 92f @ Yes, go process it
+ str \rp, [\tmp, #8] @ Store 0 in brcmstb_uart_virt
+ b 100f @ Done
+92: and \rv, \rp, #0xffffff @ offset within 16MB section
+ add \rv, \rv, #REG_VIRT_BASE
+ str \rv, [\tmp, #8] @ Store in brcmstb_uart_virt
+ b 100f
+
+ .align
+99: .word .
+ .word brcmstb_uart_config
+ .ltorg
+
+ /* Load previously selected UART address */
+100: ldr \rp, [\tmp, #4] @ Load brcmstb_uart_phys
+ ldr \rv, [\tmp, #8] @ Load brcmstb_uart_virt
+ .endm
+
+ .macro store, rd, rx:vararg
+ str \rd, \rx
+ .endm
+
+ .macro load, rd, rx:vararg
+ ldr \rd, \rx
+ .endm
+
+ .macro senduart,rd,rx
+ store \rd, [\rx, #UART_TX << UART_SHIFT]
+ .endm
+
+ .macro busyuart,rd,rx
+1002: load \rd, [\rx, #UART_LSR << UART_SHIFT]
+ and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ bne 1002b
+ .endm
+
+ .macro waituart,rd,rx
+ .endm
+
+/*
+ * Storage for the state maintained by the macros above.
+ *
+ * In the kernel proper, this data is located in arch/arm/mach-bcm/brcmstb.c.
+ * That's because this header is included from multiple files, and we only
+ * want a single copy of the data. In particular, the UART probing code above
+ * assumes it's running using physical addresses. This is true when this file
+ * is included from head.o, but not when included from debug.o. So we need
+ * to share the probe results between the two copies, rather than having
+ * to re-run the probing again later.
+ *
+ * In the decompressor, we put the symbol/storage right here, since common.c
+ * isn't included in the decompressor build. This symbol gets put in .text
+ * even though it's really data, since .data is discarded from the
+ * decompressor. Luckily, .text is writeable in the decompressor, unless
+ * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
+ */
+#if defined(ZIMAGE)
+brcmstb_uart_config:
+ /* Debug UART initialization required */
+ .word 1
+ /* Debug UART physical address */
+ .word 0
+ /* Debug UART virtual address */
+ .word 0
+#endif
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index a2b3eb313a25..b38c10c73579 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -84,6 +84,13 @@ struct kvm_regs {
#define KVM_VGIC_V2_DIST_SIZE 0x1000
#define KVM_VGIC_V2_CPU_SIZE 0x2000
+/* Supported VGICv3 address types */
+#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
+
+#define KVM_VGIC_V3_DIST_SIZE SZ_64K
+#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
+
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
index 7dccc964d75f..a3308ad1a024 100644
--- a/arch/arm/kernel/cpuidle.c
+++ b/arch/arm/kernel/cpuidle.c
@@ -19,7 +19,7 @@ extern struct of_cpuidle_method __cpuidle_method_of_table[];
static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
__used __section(__cpuidle_method_of_table_end);
-static struct cpuidle_ops cpuidle_ops[NR_CPUS];
+static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init;
/**
* arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index fb1a69eb49c1..6b4eb27b8758 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -158,7 +158,21 @@ __after_proc_init:
bic r0, r0, #CR_V
#endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg
-#endif /* CONFIG_CPU_CP15 */
+#elif defined (CONFIG_CPU_V7M)
+ /* For V7M systems we want to modify the CCR similarly to the SCTLR */
+#ifdef CONFIG_CPU_DCACHE_DISABLE
+ bic r0, r0, #V7M_SCB_CCR_DC
+#endif
+#ifdef CONFIG_CPU_BPREDICT_DISABLE
+ bic r0, r0, #V7M_SCB_CCR_BP
+#endif
+#ifdef CONFIG_CPU_ICACHE_DISABLE
+ bic r0, r0, #V7M_SCB_CCR_IC
+#endif
+ movw r3, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
+ movt r3, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
+ str r0, [r3]
+#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)
.ltorg
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 0c7efc3446c0..3a5cba90c971 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -9,6 +9,7 @@
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/sort.h>
#include <asm/cache.h>
#include <asm/opcodes.h>
@@ -30,154 +31,198 @@ struct plt_entries {
u32 lit[PLT_ENT_COUNT];
};
-static bool in_init(const struct module *mod, u32 addr)
+u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
{
- return addr - (u32)mod->init_layout.base < mod->init_layout.size;
+ struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr;
+ int idx = 0;
+
+ /*
+ * Look for an existing entry pointing to 'val'. Given that the
+ * relocations are sorted, this will be the last entry we allocated.
+ * (if one exists).
+ */
+ if (mod->arch.plt_count > 0) {
+ plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT;
+ idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT;
+
+ if (plt->lit[idx] == val)
+ return (u32)&plt->ldr[idx];
+
+ idx = (idx + 1) % PLT_ENT_COUNT;
+ if (!idx)
+ plt++;
+ }
+
+ mod->arch.plt_count++;
+ BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size);
+
+ if (!idx)
+ /* Populate a new set of entries */
+ *plt = (struct plt_entries){
+ { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
+ { val, }
+ };
+ else
+ plt->lit[idx] = val;
+
+ return (u32)&plt->ldr[idx];
}
-u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
+#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
+
+static int cmp_rel(const void *a, const void *b)
{
- struct plt_entries *plt, *plt_end;
- int c, *count;
-
- if (in_init(mod, loc)) {
- plt = (void *)mod->arch.init_plt->sh_addr;
- plt_end = (void *)plt + mod->arch.init_plt->sh_size;
- count = &mod->arch.init_plt_count;
- } else {
- plt = (void *)mod->arch.core_plt->sh_addr;
- plt_end = (void *)plt + mod->arch.core_plt->sh_size;
- count = &mod->arch.core_plt_count;
- }
+ const Elf32_Rel *x = a, *y = b;
+ int i;
- /* Look for an existing entry pointing to 'val' */
- for (c = *count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) {
- int i;
-
- if (!c) {
- /* Populate a new set of entries */
- *plt = (struct plt_entries){
- { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
- { val, }
- };
- ++*count;
- return (u32)plt->ldr;
- }
- for (i = 0; i < PLT_ENT_COUNT; i++) {
- if (!plt->lit[i]) {
- plt->lit[i] = val;
- ++*count;
- }
- if (plt->lit[i] == val)
- return (u32)&plt->ldr[i];
- }
+ /* sort by type and symbol index */
+ i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info));
+ if (i == 0)
+ i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info));
+ return i;
+}
+
+static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel)
+{
+ u32 *tval = (u32 *)(base + rel->r_offset);
+
+ /*
+ * Do a bitwise compare on the raw addend rather than fully decoding
+ * the offset and doing an arithmetic comparison.
+ * Note that a zero-addend jump/call relocation is encoded taking the
+ * PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
+ */
+ switch (ELF32_R_TYPE(rel->r_info)) {
+ u16 upper, lower;
+
+ case R_ARM_THM_CALL:
+ case R_ARM_THM_JUMP24:
+ upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]);
+ lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]);
+
+ return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe;
+
+ case R_ARM_CALL:
+ case R_ARM_PC24:
+ case R_ARM_JUMP24:
+ return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe;
}
BUG();
}
-static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num,
- u32 mask)
+static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
{
- u32 *loc1, *loc2;
- int i;
+ const Elf32_Rel *prev;
- for (i = 0; i < num; i++) {
- if (rel[i].r_info != rel[num].r_info)
- continue;
+ /*
+ * Entries are sorted by type and symbol index. That means that,
+ * if a duplicate entry exists, it must be in the preceding
+ * slot.
+ */
+ if (!num)
+ return false;
- /*
- * Identical relocation types against identical symbols can
- * still result in different PLT entries if the addend in the
- * place is different. So resolve the target of the relocation
- * to compare the values.
- */
- loc1 = (u32 *)(base + rel[i].r_offset);
- loc2 = (u32 *)(base + rel[num].r_offset);
- if (((*loc1 ^ *loc2) & mask) == 0)
- return 1;
- }
- return 0;
+ prev = rel + num - 1;
+ return cmp_rel(rel + num, prev) == 0 &&
+ is_zero_addend_relocation(base, prev);
}
/* Count how many PLT entries we may need */
-static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
+static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
+ const Elf32_Rel *rel, int num)
{
unsigned int ret = 0;
+ const Elf32_Sym *s;
int i;
- /*
- * Sure, this is order(n^2), but it's usually short, and not
- * time critical
- */
- for (i = 0; i < num; i++)
+ for (i = 0; i < num; i++) {
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_ARM_CALL:
case R_ARM_PC24:
case R_ARM_JUMP24:
- if (!duplicate_rel(base, rel, i,
- __opcode_to_mem_arm(0x00ffffff)))
- ret++;
- break;
-#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
- if (!duplicate_rel(base, rel, i,
- __opcode_to_mem_thumb32(0x07ff2fff)))
+ /*
+ * We only have to consider branch targets that resolve
+ * to undefined symbols. This is not simply a heuristic,
+ * it is a fundamental limitation, since the PLT itself
+ * is part of the module, and needs to be within range
+ * as well, so modules can never grow beyond that limit.
+ */
+ s = syms + ELF32_R_SYM(rel[i].r_info);
+ if (s->st_shndx != SHN_UNDEF)
+ break;
+
+ /*
+ * Jump relocations with non-zero addends against
+ * undefined symbols are supported by the ELF spec, but
+ * do not occur in practice (e.g., 'jump n bytes past
+ * the entry point of undefined function symbol f').
+ * So we need to support them, but there is no need to
+ * take them into consideration when trying to optimize
+ * this code. So let's only check for duplicates when
+ * the addend is zero.
+ */
+ if (!is_zero_addend_relocation(base, rel + i) ||
+ !duplicate_rel(base, rel, i))
ret++;
-#endif
}
+ }
return ret;
}
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
- unsigned long core_plts = 0, init_plts = 0;
+ unsigned long plts = 0;
Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
+ Elf32_Sym *syms = NULL;
/*
* To store the PLTs, we expand the .text section for core module code
- * and the .init.text section for initialization code.
+ * and for initialization code.
*/
- for (s = sechdrs; s < sechdrs_end; ++s)
- if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
- mod->arch.core_plt = s;
- else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
- mod->arch.init_plt = s;
-
- if (!mod->arch.core_plt || !mod->arch.init_plt) {
- pr_err("%s: sections missing\n", mod->name);
+ for (s = sechdrs; s < sechdrs_end; ++s) {
+ if (strcmp(".plt", secstrings + s->sh_name) == 0)
+ mod->arch.plt = s;
+ else if (s->sh_type == SHT_SYMTAB)
+ syms = (Elf32_Sym *)s->sh_addr;
+ }
+
+ if (!mod->arch.plt) {
+ pr_err("%s: module PLT section missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!syms) {
+ pr_err("%s: module symtab section missing\n", mod->name);
return -ENOEXEC;
}
for (s = sechdrs + 1; s < sechdrs_end; ++s) {
- const Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
+ Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
int numrels = s->sh_size / sizeof(Elf32_Rel);
Elf32_Shdr *dstsec = sechdrs + s->sh_info;
if (s->sh_type != SHT_REL)
continue;
- if (strstr(secstrings + s->sh_name, ".init"))
- init_plts += count_plts(dstsec->sh_addr, rels, numrels);
- else
- core_plts += count_plts(dstsec->sh_addr, rels, numrels);
+ /* ignore relocations that operate on non-exec sections */
+ if (!(dstsec->sh_flags & SHF_EXECINSTR))
+ continue;
+
+ /* sort by type and symbol index */
+ sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
+
+ plts += count_plts(syms, dstsec->sh_addr, rels, numrels);
}
- mod->arch.core_plt->sh_type = SHT_NOBITS;
- mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.core_plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.core_plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
- sizeof(struct plt_entries));
- mod->arch.core_plt_count = 0;
-
- mod->arch.init_plt->sh_type = SHT_NOBITS;
- mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.init_plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.init_plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
- sizeof(struct plt_entries));
- mod->arch.init_plt_count = 0;
- pr_debug("%s: core.plt=%x, init.plt=%x\n", __func__,
- mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size);
+ mod->arch.plt->sh_type = SHT_NOBITS;
+ mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE,
+ sizeof(struct plt_entries));
+ mod->arch.plt_count = 0;
+
+ pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size);
return 0;
}
diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds
index 3682fa107918..05881e2b414c 100644
--- a/arch/arm/kernel/module.lds
+++ b/arch/arm/kernel/module.lds
@@ -1,4 +1,3 @@
SECTIONS {
- .core.plt : { BYTE(0) }
- .init.plt : { BYTE(0) }
+ .plt : { BYTE(0) }
}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index df7f2a75e769..34e3f3c45634 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -114,19 +114,19 @@ EXPORT_SYMBOL(elf_hwcap2);
#ifdef MULTI_CPU
-struct processor processor __read_mostly;
+struct processor processor __ro_after_init;
#endif
#ifdef MULTI_TLB
-struct cpu_tlb_fns cpu_tlb __read_mostly;
+struct cpu_tlb_fns cpu_tlb __ro_after_init;
#endif
#ifdef MULTI_USER
-struct cpu_user_fns cpu_user __read_mostly;
+struct cpu_user_fns cpu_user __ro_after_init;
#endif
#ifdef MULTI_CACHE
-struct cpu_cache_fns cpu_cache __read_mostly;
+struct cpu_cache_fns cpu_cache __ro_after_init;
#endif
#ifdef CONFIG_OUTER_CACHE
-struct outer_cache_fns outer_cache __read_mostly;
+struct outer_cache_fns outer_cache __ro_after_init;
EXPORT_SYMBOL(outer_cache);
#endif
@@ -290,12 +290,9 @@ static int cpu_has_aliasing_icache(unsigned int arch)
/* arch specifies the register format */
switch (arch) {
case CPU_ARCH_ARMv7:
- asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
- : /* No output operands */
- : "r" (1));
+ set_csselr(CSSELR_ICACHE | CSSELR_L1);
isb();
- asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
- : "=r" (id_reg));
+ id_reg = read_ccsidr();
line_size = 4 << ((id_reg & 0x7) + 2);
num_sets = ((id_reg >> 13) & 0x7fff) + 1;
aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
@@ -315,11 +312,12 @@ static void __init cacheid_init(void)
{
unsigned int arch = cpu_architecture();
- if (arch == CPU_ARCH_ARMv7M) {
- cacheid = 0;
- } else if (arch >= CPU_ARCH_ARMv6) {
+ if (arch >= CPU_ARCH_ARMv6) {
unsigned int cachetype = read_cpuid_cachetype();
- if ((cachetype & (7 << 29)) == 4 << 29) {
+
+ if ((arch == CPU_ARCH_ARMv7M) && !cachetype) {
+ cacheid = 0;
+ } else if ((cachetype & (7 << 29)) == 4 << 29) {
/* ARMv7 register format */
arch = CPU_ARCH_ARMv7;
cacheid = CACHEID_VIPT_NONALIASING;
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 861521606c6d..7dd14e8395e6 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -82,7 +82,7 @@ enum ipi_msg_type {
static DECLARE_COMPLETION(cpu_running);
-static struct smp_operations smp_ops;
+static struct smp_operations smp_ops __ro_after_init;
void __init smp_set_ops(const struct smp_operations *ops)
{
@@ -748,19 +748,10 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask)
{
- /*
- * Generate the backtrace directly if we are running in a calling
- * context that is not preemptible by the backtrace IPI. Note
- * that nmi_cpu_backtrace() automatically removes the current cpu
- * from mask.
- */
- if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
- nmi_cpu_backtrace(NULL);
-
smp_cross_call(mask, IPI_CPU_BACKTRACE);
}
-void arch_trigger_all_cpu_backtrace(bool include_self)
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
- nmi_trigger_all_cpu_backtrace(include_self, raise_nmi);
+ nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
}
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index a0affd14086a..53cf86cf2d1a 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/cache.h>
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -39,7 +40,7 @@
static struct page **vdso_text_pagelist;
/* Total number of pages needed for the data and text portions of the VDSO. */
-unsigned int vdso_total_pages __read_mostly;
+unsigned int vdso_total_pages __ro_after_init;
/*
* The VDSO data page.
@@ -47,13 +48,13 @@ unsigned int vdso_total_pages __read_mostly;
static union vdso_data_store vdso_data_store __page_aligned_data;
static struct vdso_data *vdso_data = &vdso_data_store.data;
-static struct page *vdso_data_page;
-static struct vm_special_mapping vdso_data_mapping = {
+static struct page *vdso_data_page __ro_after_init;
+static const struct vm_special_mapping vdso_data_mapping = {
.name = "[vvar]",
.pages = &vdso_data_page,
};
-static struct vm_special_mapping vdso_text_mapping = {
+static struct vm_special_mapping vdso_text_mapping __ro_after_init = {
.name = "[vdso]",
};
@@ -67,7 +68,7 @@ struct elfinfo {
/* Cached result of boot-time check for whether the arch timer exists,
* and if so, whether the virtual counter is useable.
*/
-static bool cntvct_ok __read_mostly;
+static bool cntvct_ok __ro_after_init;
static bool __init cntvct_functional(void)
{
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index cba1ec899a69..7fa487ef7e2f 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -98,6 +98,7 @@ SECTIONS
IRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.gnu.warning)
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index d24e5dd2aa7a..f7f55df0bf7b 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -111,6 +111,7 @@ SECTIONS
SOFTIRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
HYPERVISOR_TEXT
KPROBES_TEXT
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 10d77a66cad5..f19842ea5418 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -21,13 +21,16 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
+obj-y += $(KVM)/arm/aarch32.o
obj-y += $(KVM)/arm/vgic/vgic.o
obj-y += $(KVM)/arm/vgic/vgic-init.o
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
obj-y += $(KVM)/arm/vgic/vgic-v2.o
+obj-y += $(KVM)/arm/vgic/vgic-v3.o
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
+obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
obj-y += $(KVM)/irqchip.o
obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index c94b90d43772..03e9273f1876 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -144,6 +144,16 @@ out_fail_alloc:
return ret;
}
+bool kvm_arch_has_vcpu_debugfs(void)
+{
+ return false;
+}
+
+int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
@@ -1176,6 +1186,10 @@ static int init_common_resources(void)
return -ENOMEM;
}
+ /* set size of VMID supported by CPU */
+ kvm_vmid_bits = kvm_get_vmid_bits();
+ kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+
return 0;
}
@@ -1241,10 +1255,6 @@ static void teardown_hyp_mode(void)
static int init_vhe_mode(void)
{
- /* set size of VMID supported by CPU */
- kvm_vmid_bits = kvm_get_vmid_bits();
- kvm_info("%d-bit VMID\n", kvm_vmid_bits);
-
kvm_info("VHE mode initialized successfully\n");
return 0;
}
@@ -1328,10 +1338,6 @@ static int init_hyp_mode(void)
}
}
- /* set size of VMID supported by CPU */
- kvm_vmid_bits = kvm_get_vmid_bits();
- kvm_info("%d-bit VMID\n", kvm_vmid_bits);
-
kvm_info("Hyp mode initialized successfully\n");
return 0;
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 1bb2b79c01ff..3e5e4194ef86 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -228,6 +228,35 @@ bool access_vm_reg(struct kvm_vcpu *vcpu,
return true;
}
+static bool access_gic_sgi(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ u64 reg;
+
+ if (!p->is_write)
+ return read_from_write_only(vcpu, p);
+
+ reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
+ reg |= *vcpu_reg(vcpu, p->Rt1) ;
+
+ vgic_v3_dispatch_sgi(vcpu, reg);
+
+ return true;
+}
+
+static bool access_gic_sre(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
+{
+ if (p->is_write)
+ return ignore_write(vcpu, p);
+
+ *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
+
+ return true;
+}
+
/*
* We could trap ID_DFR0 and tell the guest we don't support performance
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
@@ -361,10 +390,16 @@ static const struct coproc_reg cp15_regs[] = {
{ CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
access_vm_reg, reset_unknown, c10_AMAIR1},
+ /* ICC_SGI1R */
+ { CRm64(12), Op1( 0), is64, access_gic_sgi},
+
/* VBAR: swapped by interrupt.S. */
{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
NULL, reset_val, c12_VBAR, 0x00000000 },
+ /* ICC_SRE */
+ { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
+
/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
access_vm_reg, reset_val, c13_CID, 0x00000000 },
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index af93e3ffc9f3..0064b86a2c87 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -161,105 +161,6 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
}
}
-/*
- * A conditional instruction is allowed to trap, even though it
- * wouldn't be executed. So let's re-implement the hardware, in
- * software!
- */
-bool kvm_condition_valid(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr, cond, insn;
-
- /*
- * Exception Code 0 can only happen if we set HCR.TGE to 1, to
- * catch undefined instructions, and then we won't get past
- * the arm_exit_handlers test anyway.
- */
- BUG_ON(!kvm_vcpu_trap_get_class(vcpu));
-
- /* Top two bits non-zero? Unconditional. */
- if (kvm_vcpu_get_hsr(vcpu) >> 30)
- return true;
-
- cpsr = *vcpu_cpsr(vcpu);
-
- /* Is condition field valid? */
- if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT)
- cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT;
- else {
- /* This can happen in Thumb mode: examine IT state. */
- unsigned long it;
-
- it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
-
- /* it == 0 => unconditional. */
- if (it == 0)
- return true;
-
- /* The cond for this insn works out as the top 4 bits. */
- cond = (it >> 4);
- }
-
- /* Shift makes it look like an ARM-mode instruction */
- insn = cond << 28;
- return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
-}
-
-/**
- * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
- * @vcpu: The VCPU pointer
- *
- * When exceptions occur while instructions are executed in Thumb IF-THEN
- * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
- * to do this little bit of work manually. The fields map like this:
- *
- * IT[7:0] -> CPSR[26:25],CPSR[15:10]
- */
-static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
-{
- unsigned long itbits, cond;
- unsigned long cpsr = *vcpu_cpsr(vcpu);
- bool is_arm = !(cpsr & PSR_T_BIT);
-
- BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
-
- if (!(cpsr & PSR_IT_MASK))
- return;
-
- cond = (cpsr & 0xe000) >> 13;
- itbits = (cpsr & 0x1c00) >> (10 - 2);
- itbits |= (cpsr & (0x3 << 25)) >> 25;
-
- /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
- if ((itbits & 0x7) == 0)
- itbits = cond = 0;
- else
- itbits = (itbits << 1) & 0x1f;
-
- cpsr &= ~PSR_IT_MASK;
- cpsr |= cond << 13;
- cpsr |= (itbits & 0x1c) << (10 - 2);
- cpsr |= (itbits & 0x3) << 25;
- *vcpu_cpsr(vcpu) = cpsr;
-}
-
-/**
- * kvm_skip_instr - skip a trapped instruction and proceed to the next
- * @vcpu: The vcpu pointer
- */
-void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
-{
- bool is_thumb;
-
- is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
- if (is_thumb && !is_wide_instr)
- *vcpu_pc(vcpu) += 2;
- else
- *vcpu_pc(vcpu) += 4;
- kvm_adjust_itstate(vcpu);
-}
-
-
/******************************************************************************
* Inject exceptions into the guest
*/
@@ -402,3 +303,15 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
inject_abt(vcpu, true, addr);
}
+
+/**
+ * kvm_inject_vabt - inject an async abort / SError into the guest
+ * @vcpu: The VCPU to receive the exception
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_vabt(struct kvm_vcpu *vcpu)
+{
+ vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VA);
+}
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 3f1ef0dbc899..4e40d1955e35 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -28,14 +28,6 @@
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
-static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- /* SVC called from Hyp mode should never get here */
- kvm_debug("SVC called from Hyp mode shouldn't go here\n");
- BUG();
- return -EINVAL; /* Squash warning */
-}
-
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
@@ -59,22 +51,6 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
-static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- /* The hypervisor should never cause aborts */
- kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
- kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
- return -EFAULT;
-}
-
-static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- /* This is either an error in the ws. code or an external abort */
- kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
- kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
- return -EFAULT;
-}
-
/**
* kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
* @vcpu: the vcpu pointer
@@ -112,13 +88,10 @@ static exit_handle_fn arm_exit_handlers[] = {
[HSR_EC_CP14_64] = kvm_handle_cp14_access,
[HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
[HSR_EC_CP10_ID] = kvm_handle_cp10_id,
- [HSR_EC_SVC_HYP] = handle_svc_hyp,
[HSR_EC_HVC] = handle_hvc,
[HSR_EC_SMC] = handle_smc,
[HSR_EC_IABT] = kvm_handle_guest_abort,
- [HSR_EC_IABT_HYP] = handle_pabt_hyp,
[HSR_EC_DABT] = kvm_handle_guest_abort,
- [HSR_EC_DABT_HYP] = handle_dabt_hyp,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
@@ -144,6 +117,25 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
{
exit_handle_fn exit_handler;
+ if (ARM_ABORT_PENDING(exception_index)) {
+ u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+
+ /*
+ * HVC/SMC already have an adjusted PC, which we need
+ * to correct in order to return to after having
+ * injected the abort.
+ */
+ if (hsr_ec == HSR_EC_HVC || hsr_ec == HSR_EC_SMC) {
+ u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
+ *vcpu_pc(vcpu) -= adj;
+ }
+
+ kvm_inject_vabt(vcpu);
+ return 1;
+ }
+
+ exception_index = ARM_EXCEPTION_CODE(exception_index);
+
switch (exception_index) {
case ARM_EXCEPTION_IRQ:
return 1;
@@ -160,6 +152,9 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
exit_handler = kvm_get_exit_handler(vcpu);
return exit_handler(vcpu, run);
+ case ARM_EXCEPTION_DATA_ABORT:
+ kvm_inject_vabt(vcpu);
+ return 1;
default:
kvm_pr_unimpl("Unsupported exception type: %d",
exception_index);
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 8dfa5f7f9290..3023bb530edf 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -5,6 +5,7 @@
KVM=../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S
index 21c238871c9e..60783f3b57cc 100644
--- a/arch/arm/kvm/hyp/entry.S
+++ b/arch/arm/kvm/hyp/entry.S
@@ -18,6 +18,7 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
.arch_extension virt
@@ -63,6 +64,36 @@ ENTRY(__guest_exit)
ldr lr, [r0, #4]
mov r0, r1
+ mrs r1, SPSR
+ mrs r2, ELR_hyp
+ mrc p15, 4, r3, c5, c2, 0 @ HSR
+
+ /*
+ * Force loads and stores to complete before unmasking aborts
+ * and forcing the delivery of the exception. This gives us a
+ * single instruction window, which the handler will try to
+ * match.
+ */
+ dsb sy
+ cpsie a
+
+ .global abort_guest_exit_start
+abort_guest_exit_start:
+
+ isb
+
+ .global abort_guest_exit_end
+abort_guest_exit_end:
+
+ /*
+ * If we took an abort, r0[31] will be set, and cmp will set
+ * the N bit in PSTATE.
+ */
+ cmp r0, #0
+ msrmi SPSR_cxsf, r1
+ msrmi ELR_hyp, r2
+ mcrmi p15, 4, r3, c5, c2, 0 @ HSR
+
bx lr
ENDPROC(__guest_exit)
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
index 78091383a5d9..96beb53934c9 100644
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -81,7 +81,6 @@ __kvm_hyp_vector:
invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED
invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE
invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT
- invalid_vector hyp_dabt ARM_EXCEPTION_DATA_ABORT
invalid_vector hyp_fiq ARM_EXCEPTION_FIQ
ENTRY(__hyp_do_panic)
@@ -164,6 +163,21 @@ hyp_irq:
load_vcpu r0 @ Load VCPU pointer to r0
b __guest_exit
+hyp_dabt:
+ push {r0, r1}
+ mrs r0, ELR_hyp
+ ldr r1, =abort_guest_exit_start
+THUMB( add r1, r1, #1)
+ cmp r0, r1
+ ldrne r1, =abort_guest_exit_end
+THUMB( addne r1, r1, #1)
+ cmpne r0, r1
+ pop {r0, r1}
+ bne __hyp_panic
+
+ orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT)
+ eret
+
.ltorg
.popsection
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index b13caa90cd44..92678b7bd046 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -14,6 +14,7 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/jump_label.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
@@ -54,6 +55,15 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
{
u32 val;
+ /*
+ * If we pended a virtual abort, preserve it until it gets
+ * cleared. See B1.9.9 (Virtual Abort exception) for details,
+ * but the crucial bit is the zeroing of HCR.VA in the
+ * pseudocode.
+ */
+ if (vcpu->arch.hcr & HCR_VA)
+ vcpu->arch.hcr = read_sysreg(HCR);
+
write_sysreg(0, HCR);
write_sysreg(0, HSTR);
val = read_sysreg(HDCR);
@@ -74,14 +84,21 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
write_sysreg(read_sysreg(MIDR), VPIDR);
}
+
static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
{
- __vgic_v2_save_state(vcpu);
+ if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ __vgic_v3_save_state(vcpu);
+ else
+ __vgic_v2_save_state(vcpu);
}
static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
{
- __vgic_v2_restore_state(vcpu);
+ if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ __vgic_v3_restore_state(vcpu);
+ else
+ __vgic_v2_restore_state(vcpu);
}
static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
@@ -134,7 +151,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
return true;
}
-static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
+int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
@@ -191,8 +208,6 @@ again:
return exit_code;
}
-__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
-
static const char * const __hyp_panic_string[] = {
[ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x",
[ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
index a2636001e616..729652854f90 100644
--- a/arch/arm/kvm/hyp/tlb.c
+++ b/arch/arm/kvm/hyp/tlb.c
@@ -34,7 +34,7 @@
* As v7 does not support flushing per IPA, just nuke the whole TLB
* instead, ignoring the ipa value.
*/
-static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
+void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
dsb(ishst);
@@ -50,21 +50,14 @@ static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
write_sysreg(0, VTTBR);
}
-__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
-
-static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
- __tlb_flush_vmid(kvm);
+ __kvm_tlb_flush_vmid(kvm);
}
-__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
- phys_addr_t ipa);
-
-static void __hyp_text __tlb_flush_vm_context(void)
+void __hyp_text __kvm_flush_vm_context(void)
{
write_sysreg(0, TLBIALLNSNHIS);
write_sysreg(0, ICIALLUIS);
dsb(ish);
}
-
-__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 10f80a6c797a..b6e715fd3c90 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -126,12 +126,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
int access_size;
bool sign_extend;
- if (kvm_vcpu_dabt_isextabt(vcpu)) {
- /* cache operation on I/O addr, tell guest unsupported */
- kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
- return 1;
- }
-
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
/* page table accesses IO mem: tell guest to fix its TTBR */
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index e9a5c0e0c115..a5265edbeeab 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -744,7 +744,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
if (!pgd)
return -ENOMEM;
- kvm_clean_pgd(pgd);
kvm->arch.pgd = pgd;
return 0;
}
@@ -936,7 +935,6 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */
pte = mmu_memory_cache_alloc(cache);
- kvm_clean_pte(pte);
pmd_populate_kernel(NULL, pmd, pte);
get_page(virt_to_page(pmd));
}
@@ -1434,6 +1432,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
int ret, idx;
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
+ if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) {
+ kvm_inject_vabt(vcpu);
+ return 1;
+ }
+
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 8044591dca72..2cef11884857 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -29,7 +29,7 @@
/*
* Default to the loop-based delay implementation.
*/
-struct arm_delay_ops arm_delay_ops = {
+struct arm_delay_ops arm_delay_ops __ro_after_init = {
.delay = __loop_delay,
.const_udelay = __loop_const_udelay,
.udelay = __loop_udelay,
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 5204395efda8..841e924143f9 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -55,7 +55,6 @@ config SOC_AT91RM9200
select ATMEL_ST
select CPU_ARM920T
select HAVE_AT91_USB_CLK
- select MIGHT_HAVE_PCI
select PINCTRL_AT91
select SOC_SAM_V4_V5
select SRAM if PM
diff --git a/arch/arm/mach-axxia/Kconfig b/arch/arm/mach-axxia/Kconfig
index 6c6d5e76565b..fe627cbcfdc5 100644
--- a/arch/arm/mach-axxia/Kconfig
+++ b/arch/arm/mach-axxia/Kconfig
@@ -7,8 +7,6 @@ config ARCH_AXXIA
select ARM_TIMER_SP804
select HAVE_ARM_ARCH_TIMER
select MFD_SYSCON
- select MIGHT_HAVE_PCI
- select PCI_DOMAINS if PCI
select ZONE_DMA
help
This enables support for the LSI Axxia devices.
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 34f0fca0b847..a0e66d8200c5 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -15,7 +15,6 @@ config ARCH_BCM_IPROC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select ARM_GLOBAL_TIMER
- select COMMON_CLK_IPROC
select CLKSRC_MMIO
select GPIOLIB
select ARM_AMBA
@@ -159,6 +158,20 @@ config ARCH_BCM2835
This enables support for the Broadcom BCM2835 and BCM2836 SoCs.
This SoC is used in the Raspberry Pi and Roku 2 devices.
+config ARCH_BCM_53573
+ bool "Broadcom BCM53573 SoC series support"
+ depends on ARCH_MULTI_V7
+ select ARCH_BCM_IPROC
+ select HAVE_ARM_ARCH_TIMER
+ help
+ BCM53573 series is set of SoCs using ARM Cortex-A7 CPUs with wireless
+ embedded in the chipset.
+ This SoC line is mostly used in home routers and is some cheaper
+ alternative for Northstar family.
+
+ The base chip is BCM53573 and there are some packaging modifications
+ like BCM47189 and BCM47452.
+
config ARCH_BCM_63XX
bool "Broadcom BCM63xx DSL SoC"
depends on ARCH_MULTI_V7
diff --git a/arch/arm/mach-bcm/brcmstb.c b/arch/arm/mach-bcm/brcmstb.c
index 99a67cfb7c0d..07e3a86c6466 100644
--- a/arch/arm/mach-bcm/brcmstb.c
+++ b/arch/arm/mach-bcm/brcmstb.c
@@ -19,6 +19,22 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+/*
+ * Storage for debug-macro.S's state.
+ *
+ * This must be in .data not .bss so that it gets initialized each time the
+ * kernel is loaded. The data is declared here rather than debug-macro.S so
+ * that multiple inclusions of debug-macro.S point at the same data.
+ */
+u32 brcmstb_uart_config[3] = {
+ /* Debug UART initialization required */
+ 1,
+ /* Debug UART physical address */
+ 0,
+ /* Debug UART virtual address */
+ 0,
+};
+
static void __init brcmstb_init_irq(void)
{
irqchip_init();
diff --git a/arch/arm/mach-clps711x/Makefile.boot b/arch/arm/mach-clps711x/Makefile.boot
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/arch/arm/mach-clps711x/Makefile.boot
+++ /dev/null
diff --git a/arch/arm/mach-clps711x/board-autcpu12.c b/arch/arm/mach-clps711x/board-autcpu12.c
deleted file mode 100644
index ba3d7d1b28f8..000000000000
--- a/arch/arm/mach-clps711x/board-autcpu12.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * linux/arch/arm/mach-clps711x/autcpu12.c
- *
- * (c) 2001 Thomas Gleixner, autronix automation <gleixner@autronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/mtd/physmap.h>
-#include <linux/mtd/plat-ram.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/nand-gpio.h>
-#include <linux/platform_device.h>
-#include <linux/gpio/driver.h>
-
-#include <mach/hardware.h>
-#include <asm/sizes.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include "devices.h"
-
-/* NOR flash */
-#define AUTCPU12_FLASH_BASE (CS0_PHYS_BASE)
-
-/* Board specific hardware definitions */
-#define AUTCPU12_CHAR_LCD_BASE (CS1_PHYS_BASE + 0x00000000)
-#define AUTCPU12_CSAUX1_BASE (CS1_PHYS_BASE + 0x04000000)
-#define AUTCPU12_CAN_BASE (CS1_PHYS_BASE + 0x08000000)
-#define AUTCPU12_TOUCH_BASE (CS1_PHYS_BASE + 0x0a000000)
-#define AUTCPU12_IO_BASE (CS1_PHYS_BASE + 0x0c000000)
-#define AUTCPU12_LPT_BASE (CS1_PHYS_BASE + 0x0e000000)
-
-/* NVRAM */
-#define AUTCPU12_NVRAM_BASE (CS1_PHYS_BASE + 0x02000000)
-
-/* SmartMedia flash */
-#define AUTCPU12_SMC_BASE (CS1_PHYS_BASE + 0x06000000)
-#define AUTCPU12_SMC_SEL_BASE (AUTCPU12_SMC_BASE + 0x10)
-
-/* Ethernet */
-#define AUTCPU12_CS8900_BASE (CS2_PHYS_BASE + 0x300)
-#define AUTCPU12_CS8900_IRQ (IRQ_EINT3)
-
-/* NAND flash */
-#define AUTCPU12_MMGPIO_BASE (CLPS711X_NR_GPIO)
-#define AUTCPU12_SMC_NCE (AUTCPU12_MMGPIO_BASE + 0) /* Bit 0 */
-#define AUTCPU12_SMC_RDY CLPS711X_GPIO(1, 2)
-#define AUTCPU12_SMC_ALE CLPS711X_GPIO(1, 3)
-#define AUTCPU12_SMC_CLE CLPS711X_GPIO(1, 4)
-
-/* LCD contrast digital potentiometer */
-#define AUTCPU12_DPOT_CS CLPS711X_GPIO(4, 0)
-#define AUTCPU12_DPOT_CLK CLPS711X_GPIO(4, 1)
-#define AUTCPU12_DPOT_UD CLPS711X_GPIO(4, 2)
-
-static struct resource autcpu12_cs8900_resource[] __initdata = {
- DEFINE_RES_MEM(AUTCPU12_CS8900_BASE, SZ_1K),
- DEFINE_RES_IRQ(AUTCPU12_CS8900_IRQ),
-};
-
-static struct resource autcpu12_nand_resource[] __initdata = {
- DEFINE_RES_MEM(AUTCPU12_SMC_BASE, SZ_16),
-};
-
-static struct mtd_partition autcpu12_nand_parts[] __initdata = {
- {
- .name = "Flash partition 1",
- .offset = 0,
- .size = SZ_8M,
- },
- {
- .name = "Flash partition 2",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static void __init autcpu12_adjust_parts(struct gpio_nand_platdata *pdata,
- size_t sz)
-{
- switch (sz) {
- case SZ_16M:
- case SZ_32M:
- break;
- case SZ_64M:
- case SZ_128M:
- pdata->parts[0].size = SZ_16M;
- break;
- default:
- pr_warn("Unsupported SmartMedia device size %u\n", sz);
- break;
- }
-}
-
-static struct gpio_nand_platdata autcpu12_nand_pdata __initdata = {
- .gpio_rdy = AUTCPU12_SMC_RDY,
- .gpio_nce = AUTCPU12_SMC_NCE,
- .gpio_ale = AUTCPU12_SMC_ALE,
- .gpio_cle = AUTCPU12_SMC_CLE,
- .gpio_nwp = -1,
- .chip_delay = 20,
- .parts = autcpu12_nand_parts,
- .num_parts = ARRAY_SIZE(autcpu12_nand_parts),
- .adjust_parts = autcpu12_adjust_parts,
-};
-
-static struct platform_device autcpu12_nand_pdev __initdata = {
- .name = "gpio-nand",
- .id = -1,
- .resource = autcpu12_nand_resource,
- .num_resources = ARRAY_SIZE(autcpu12_nand_resource),
- .dev = {
- .platform_data = &autcpu12_nand_pdata,
- },
-};
-
-static struct resource autcpu12_mmgpio_resource[] __initdata = {
- DEFINE_RES_MEM_NAMED(AUTCPU12_SMC_SEL_BASE, SZ_1, "dat"),
-};
-
-static struct bgpio_pdata autcpu12_mmgpio_pdata __initdata = {
- .base = AUTCPU12_MMGPIO_BASE,
- .ngpio = 8,
-};
-
-static struct platform_device autcpu12_mmgpio_pdev __initdata = {
- .name = "basic-mmio-gpio",
- .id = -1,
- .resource = autcpu12_mmgpio_resource,
- .num_resources = ARRAY_SIZE(autcpu12_mmgpio_resource),
- .dev = {
- .platform_data = &autcpu12_mmgpio_pdata,
- },
-};
-
-static const struct gpio const autcpu12_gpios[] __initconst = {
- { AUTCPU12_DPOT_CS, GPIOF_OUT_INIT_HIGH, "DPOT CS" },
- { AUTCPU12_DPOT_CLK, GPIOF_OUT_INIT_LOW, "DPOT CLK" },
- { AUTCPU12_DPOT_UD, GPIOF_OUT_INIT_LOW, "DPOT UD" },
-};
-
-static struct mtd_partition autcpu12_flash_partitions[] = {
- {
- .name = "NOR.0",
- .offset = 0,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct physmap_flash_data autcpu12_flash_pdata = {
- .width = 4,
- .parts = autcpu12_flash_partitions,
- .nr_parts = ARRAY_SIZE(autcpu12_flash_partitions),
-};
-
-static struct resource autcpu12_flash_resources[] __initdata = {
- DEFINE_RES_MEM(AUTCPU12_FLASH_BASE, SZ_8M),
-};
-
-static struct platform_device autcpu12_flash_pdev __initdata = {
- .name = "physmap-flash",
- .id = 0,
- .resource = autcpu12_flash_resources,
- .num_resources = ARRAY_SIZE(autcpu12_flash_resources),
- .dev = {
- .platform_data = &autcpu12_flash_pdata,
- },
-};
-
-static struct resource autcpu12_nvram_resource[] __initdata = {
- DEFINE_RES_MEM(AUTCPU12_NVRAM_BASE, 0),
-};
-
-static struct platdata_mtd_ram autcpu12_nvram_pdata = {
- .bankwidth = 4,
-};
-
-static struct platform_device autcpu12_nvram_pdev __initdata = {
- .name = "mtd-ram",
- .id = 0,
- .resource = autcpu12_nvram_resource,
- .num_resources = ARRAY_SIZE(autcpu12_nvram_resource),
- .dev = {
- .platform_data = &autcpu12_nvram_pdata,
- },
-};
-
-static void __init autcpu12_nvram_init(void)
-{
- void __iomem *nvram;
- unsigned int save[2];
- resource_size_t nvram_size = SZ_128K;
-
- /*
- * Check for 32K/128K
- * Read ofs 0K
- * Read ofs 64K
- * Write complement to ofs 64K
- * Read and check result on ofs 0K
- * Restore contents
- */
- nvram = ioremap(autcpu12_nvram_resource[0].start, SZ_128K);
- if (nvram) {
- save[0] = readl(nvram + 0);
- save[1] = readl(nvram + SZ_64K);
- writel(~save[0], nvram + SZ_64K);
- if (readl(nvram + 0) != save[0]) {
- writel(save[0], nvram + 0);
- nvram_size = SZ_32K;
- } else
- writel(save[1], nvram + SZ_64K);
- iounmap(nvram);
-
- autcpu12_nvram_resource[0].end =
- autcpu12_nvram_resource[0].start + nvram_size - 1;
- platform_device_register(&autcpu12_nvram_pdev);
- } else
- pr_err("Failed to remap NVRAM resource\n");
-}
-
-static void __init autcpu12_init(void)
-{
- clps711x_devices_init();
- platform_device_register(&autcpu12_flash_pdev);
- platform_device_register_simple("video-clps711x", 0, NULL, 0);
- platform_device_register_simple("cs89x0", 0, autcpu12_cs8900_resource,
- ARRAY_SIZE(autcpu12_cs8900_resource));
- platform_device_register(&autcpu12_mmgpio_pdev);
- autcpu12_nvram_init();
-}
-
-static void __init autcpu12_init_late(void)
-{
- gpio_request_array(autcpu12_gpios, ARRAY_SIZE(autcpu12_gpios));
- platform_device_register(&autcpu12_nand_pdev);
-}
-
-MACHINE_START(AUTCPU12, "autronix autcpu12")
- /* Maintainer: Thomas Gleixner */
- .atag_offset = 0x20000,
- .map_io = clps711x_map_io,
- .init_irq = clps711x_init_irq,
- .init_time = clps711x_timer_init,
- .init_machine = autcpu12_init,
- .init_late = autcpu12_init_late,
- .restart = clps711x_restart,
-MACHINE_END
-
diff --git a/arch/arm/mach-clps711x/board-cdb89712.c b/arch/arm/mach-clps711x/board-cdb89712.c
deleted file mode 100644
index 972abdb10028..000000000000
--- a/arch/arm/mach-clps711x/board-cdb89712.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * linux/arch/arm/mach-clps711x/cdb89712.c
- *
- * Copyright (C) 2000-2001 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <linux/mtd/physmap.h>
-#include <linux/mtd/plat-ram.h>
-#include <linux/mtd/partitions.h>
-
-#include <mach/hardware.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include "devices.h"
-
-#define CDB89712_CS8900_BASE (CS2_PHYS_BASE + 0x300)
-#define CDB89712_CS8900_IRQ (IRQ_EINT3)
-
-static struct resource cdb89712_cs8900_resource[] __initdata = {
- DEFINE_RES_MEM(CDB89712_CS8900_BASE, SZ_1K),
- DEFINE_RES_IRQ(CDB89712_CS8900_IRQ),
-};
-
-static struct mtd_partition cdb89712_flash_partitions[] __initdata = {
- {
- .name = "Flash",
- .offset = 0,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct physmap_flash_data cdb89712_flash_pdata __initdata = {
- .width = 4,
- .probe_type = "map_rom",
- .parts = cdb89712_flash_partitions,
- .nr_parts = ARRAY_SIZE(cdb89712_flash_partitions),
-};
-
-static struct resource cdb89712_flash_resources[] __initdata = {
- DEFINE_RES_MEM(CS0_PHYS_BASE, SZ_8M),
-};
-
-static struct platform_device cdb89712_flash_pdev __initdata = {
- .name = "physmap-flash",
- .id = 0,
- .resource = cdb89712_flash_resources,
- .num_resources = ARRAY_SIZE(cdb89712_flash_resources),
- .dev = {
- .platform_data = &cdb89712_flash_pdata,
- },
-};
-
-static struct mtd_partition cdb89712_bootrom_partitions[] __initdata = {
- {
- .name = "BootROM",
- .offset = 0,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct physmap_flash_data cdb89712_bootrom_pdata __initdata = {
- .width = 4,
- .probe_type = "map_rom",
- .parts = cdb89712_bootrom_partitions,
- .nr_parts = ARRAY_SIZE(cdb89712_bootrom_partitions),
-};
-
-static struct resource cdb89712_bootrom_resources[] __initdata = {
- DEFINE_RES_NAMED(CS7_PHYS_BASE, SZ_128, "BOOTROM", IORESOURCE_MEM |
- IORESOURCE_READONLY),
-};
-
-static struct platform_device cdb89712_bootrom_pdev __initdata = {
- .name = "physmap-flash",
- .id = 1,
- .resource = cdb89712_bootrom_resources,
- .num_resources = ARRAY_SIZE(cdb89712_bootrom_resources),
- .dev = {
- .platform_data = &cdb89712_bootrom_pdata,
- },
-};
-
-static struct platdata_mtd_ram cdb89712_sram_pdata __initdata = {
- .bankwidth = 4,
-};
-
-static struct resource cdb89712_sram_resources[] __initdata = {
- DEFINE_RES_MEM(CLPS711X_SRAM_BASE, CLPS711X_SRAM_SIZE),
-};
-
-static struct platform_device cdb89712_sram_pdev __initdata = {
- .name = "mtd-ram",
- .id = 0,
- .resource = cdb89712_sram_resources,
- .num_resources = ARRAY_SIZE(cdb89712_sram_resources),
- .dev = {
- .platform_data = &cdb89712_sram_pdata,
- },
-};
-
-static void __init cdb89712_init(void)
-{
- clps711x_devices_init();
- platform_device_register(&cdb89712_flash_pdev);
- platform_device_register(&cdb89712_bootrom_pdev);
- platform_device_register(&cdb89712_sram_pdev);
- platform_device_register_simple("cs89x0", 0, cdb89712_cs8900_resource,
- ARRAY_SIZE(cdb89712_cs8900_resource));
-}
-
-MACHINE_START(CDB89712, "Cirrus-CDB89712")
- /* Maintainer: Ray Lehtiniemi */
- .atag_offset = 0x100,
- .map_io = clps711x_map_io,
- .init_irq = clps711x_init_irq,
- .init_time = clps711x_timer_init,
- .init_machine = cdb89712_init,
- .restart = clps711x_restart,
-MACHINE_END
diff --git a/arch/arm/mach-clps711x/board-clep7312.c b/arch/arm/mach-clps711x/board-clep7312.c
deleted file mode 100644
index f9ca22b646bf..000000000000
--- a/arch/arm/mach-clps711x/board-clep7312.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * linux/arch/arm/mach-clps711x/clep7312.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/memblock.h>
-
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "devices.h"
-
-static void __init
-fixup_clep7312(struct tag *tags, char **cmdline)
-{
- memblock_add(0xc0000000, 0x01000000);
-}
-
-MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312")
- /* Maintainer: Nobody */
- .atag_offset = 0x0100,
- .fixup = fixup_clep7312,
- .map_io = clps711x_map_io,
- .init_irq = clps711x_init_irq,
- .init_time = clps711x_timer_init,
- .init_machine = clps711x_devices_init,
- .restart = clps711x_restart,
-MACHINE_END
diff --git a/arch/arm/mach-clps711x/board-edb7211.c b/arch/arm/mach-clps711x/board-edb7211.c
deleted file mode 100644
index f33979784f38..000000000000
--- a/arch/arm/mach-clps711x/board-edb7211.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (C) 2000, 2001 Blue Mug, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <linux/memblock.h>
-#include <linux/types.h>
-#include <linux/i2c-gpio.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/pwm.h>
-#include <linux/pwm_backlight.h>
-#include <linux/memblock.h>
-
-#include <linux/mtd/physmap.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/setup.h>
-#include <asm/mach/map.h>
-#include <asm/mach/arch.h>
-#include <asm/mach-types.h>
-
-#include <video/platform_lcd.h>
-
-#include <mach/hardware.h>
-
-#include "common.h"
-#include "devices.h"
-
-#define VIDEORAM_SIZE SZ_128K
-
-#define EDB7211_LCD_DC_DC_EN CLPS711X_GPIO(3, 1)
-#define EDB7211_LCDEN CLPS711X_GPIO(3, 2)
-#define EDB7211_LCDBL CLPS711X_GPIO(3, 3)
-
-#define EDB7211_I2C_SDA CLPS711X_GPIO(3, 4)
-#define EDB7211_I2C_SCL CLPS711X_GPIO(3, 5)
-
-#define EDB7211_FLASH0_BASE (CS0_PHYS_BASE)
-#define EDB7211_FLASH1_BASE (CS1_PHYS_BASE)
-
-#define EDB7211_CS8900_BASE (CS2_PHYS_BASE + 0x300)
-#define EDB7211_CS8900_IRQ (IRQ_EINT3)
-
-/* The extra 8 lines of the keyboard matrix */
-#define EDB7211_EXTKBD_BASE (CS3_PHYS_BASE)
-
-static struct i2c_gpio_platform_data edb7211_i2c_pdata __initdata = {
- .sda_pin = EDB7211_I2C_SDA,
- .scl_pin = EDB7211_I2C_SCL,
- .scl_is_output_only = 1,
-};
-
-static struct resource edb7211_cs8900_resource[] __initdata = {
- DEFINE_RES_MEM(EDB7211_CS8900_BASE, SZ_1K),
- DEFINE_RES_IRQ(EDB7211_CS8900_IRQ),
-};
-
-static struct mtd_partition edb7211_flash_partitions[] __initdata = {
- {
- .name = "Flash",
- .offset = 0,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct physmap_flash_data edb7211_flash_pdata __initdata = {
- .width = 4,
- .parts = edb7211_flash_partitions,
- .nr_parts = ARRAY_SIZE(edb7211_flash_partitions),
-};
-
-static struct resource edb7211_flash_resources[] __initdata = {
- DEFINE_RES_MEM(EDB7211_FLASH0_BASE, SZ_8M),
- DEFINE_RES_MEM(EDB7211_FLASH1_BASE, SZ_8M),
-};
-
-static struct platform_device edb7211_flash_pdev __initdata = {
- .name = "physmap-flash",
- .id = 0,
- .resource = edb7211_flash_resources,
- .num_resources = ARRAY_SIZE(edb7211_flash_resources),
- .dev = {
- .platform_data = &edb7211_flash_pdata,
- },
-};
-
-static void edb7211_lcd_power_set(struct plat_lcd_data *pd, unsigned int power)
-{
- if (power) {
- gpio_set_value(EDB7211_LCDEN, 1);
- udelay(100);
- gpio_set_value(EDB7211_LCD_DC_DC_EN, 1);
- } else {
- gpio_set_value(EDB7211_LCD_DC_DC_EN, 0);
- udelay(100);
- gpio_set_value(EDB7211_LCDEN, 0);
- }
-}
-
-static struct plat_lcd_data edb7211_lcd_power_pdata = {
- .set_power = edb7211_lcd_power_set,
-};
-
-static struct pwm_lookup edb7211_pwm_lookup[] = {
- PWM_LOOKUP("clps711x-pwm", 0, "pwm-backlight.0", NULL,
- 0, PWM_POLARITY_NORMAL),
-};
-
-static struct platform_pwm_backlight_data pwm_bl_pdata = {
- .dft_brightness = 0x01,
- .max_brightness = 0x0f,
- .enable_gpio = EDB7211_LCDBL,
-};
-
-static struct resource clps711x_pwm_res =
- DEFINE_RES_MEM(CLPS711X_PHYS_BASE + PMPCON, SZ_4);
-
-static struct gpio edb7211_gpios[] __initconst = {
- { EDB7211_LCD_DC_DC_EN, GPIOF_OUT_INIT_LOW, "LCD DC-DC" },
- { EDB7211_LCDEN, GPIOF_OUT_INIT_LOW, "LCD POWER" },
-};
-
-/* Reserve screen memory region at the start of main system memory. */
-static void __init edb7211_reserve(void)
-{
- memblock_reserve(PHYS_OFFSET, VIDEORAM_SIZE);
-}
-
-static void __init
-fixup_edb7211(struct tag *tags, char **cmdline)
-{
- /*
- * Bank start addresses are not present in the information
- * passed in from the boot loader. We could potentially
- * detect them, but instead we hard-code them.
- *
- * Banks sizes _are_ present in the param block, but we're
- * not using that information yet.
- */
- memblock_add(0xc0000000, SZ_8M);
- memblock_add(0xc1000000, SZ_8M);
-}
-
-static void __init edb7211_init_late(void)
-{
- gpio_request_array(edb7211_gpios, ARRAY_SIZE(edb7211_gpios));
-
- platform_device_register(&edb7211_flash_pdev);
-
- platform_device_register_data(NULL, "platform-lcd", 0,
- &edb7211_lcd_power_pdata,
- sizeof(edb7211_lcd_power_pdata));
-
- platform_device_register_simple("clps711x-pwm", PLATFORM_DEVID_NONE,
- &clps711x_pwm_res, 1);
- pwm_add_table(edb7211_pwm_lookup, ARRAY_SIZE(edb7211_pwm_lookup));
-
- platform_device_register_data(&platform_bus, "pwm-backlight", 0,
- &pwm_bl_pdata, sizeof(pwm_bl_pdata));
-
- platform_device_register_simple("video-clps711x", 0, NULL, 0);
- platform_device_register_simple("cs89x0", 0, edb7211_cs8900_resource,
- ARRAY_SIZE(edb7211_cs8900_resource));
- platform_device_register_data(NULL, "i2c-gpio", 0,
- &edb7211_i2c_pdata,
- sizeof(edb7211_i2c_pdata));
-}
-
-MACHINE_START(EDB7211, "CL-EDB7211 (EP7211 eval board)")
- /* Maintainer: Jon McClintock */
- .atag_offset = VIDEORAM_SIZE + 0x100,
- .fixup = fixup_edb7211,
- .reserve = edb7211_reserve,
- .map_io = clps711x_map_io,
- .init_irq = clps711x_init_irq,
- .init_time = clps711x_timer_init,
- .init_machine = clps711x_devices_init,
- .init_late = edb7211_init_late,
- .restart = clps711x_restart,
-MACHINE_END
diff --git a/arch/arm/mach-clps711x/board-p720t.c b/arch/arm/mach-clps711x/board-p720t.c
deleted file mode 100644
index 80a16a8b3776..000000000000
--- a/arch/arm/mach-clps711x/board-p720t.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * linux/arch/arm/mach-clps711x/p720t.c
- *
- * Copyright (C) 2000-2001 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/leds.h>
-#include <linux/sizes.h>
-#include <linux/backlight.h>
-#include <linux/gpio/driver.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/nand-gpio.h>
-
-#include <mach/hardware.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <video/platform_lcd.h>
-
-#include "common.h"
-#include "devices.h"
-
-#define P720T_USERLED CLPS711X_GPIO(3, 0)
-#define P720T_NAND_CLE CLPS711X_GPIO(4, 0)
-#define P720T_NAND_ALE CLPS711X_GPIO(4, 1)
-#define P720T_NAND_NCE CLPS711X_GPIO(4, 2)
-
-#define P720T_NAND_BASE (CLPS711X_SDRAM1_BASE)
-
-#define P720T_MMGPIO_BASE (CLPS711X_NR_GPIO)
-
-#define SYSPLD_PHYS_BASE IOMEM(CS1_PHYS_BASE)
-
-#define PLD_INT (SYSPLD_PHYS_BASE + 0x000000)
-#define PLD_INT_MMGPIO_BASE (P720T_MMGPIO_BASE + 0)
-#define PLD_INT_PENIRQ (PLD_INT_MMGPIO_BASE + 5)
-#define PLD_INT_UCB_IRQ (PLD_INT_MMGPIO_BASE + 1)
-#define PLD_INT_KBD_ATN (PLD_INT_MMGPIO_BASE + 0) /* EINT1 */
-
-#define PLD_PWR (SYSPLD_PHYS_BASE + 0x000004)
-#define PLD_PWR_MMGPIO_BASE (P720T_MMGPIO_BASE + 8)
-#define PLD_PWR_EXT (PLD_PWR_MMGPIO_BASE + 5)
-#define PLD_PWR_MODE (PLD_PWR_MMGPIO_BASE + 4) /* 1 = PWM, 0 = PFM */
-#define PLD_S4_ON (PLD_PWR_MMGPIO_BASE + 3) /* LCD bias voltage enable */
-#define PLD_S3_ON (PLD_PWR_MMGPIO_BASE + 2) /* LCD backlight enable */
-#define PLD_S2_ON (PLD_PWR_MMGPIO_BASE + 1) /* LCD 3V3 supply enable */
-#define PLD_S1_ON (PLD_PWR_MMGPIO_BASE + 0) /* LCD 3V supply enable */
-
-#define PLD_KBD (SYSPLD_PHYS_BASE + 0x000008)
-#define PLD_KBD_MMGPIO_BASE (P720T_MMGPIO_BASE + 16)
-#define PLD_KBD_WAKE (PLD_KBD_MMGPIO_BASE + 1)
-#define PLD_KBD_EN (PLD_KBD_MMGPIO_BASE + 0)
-
-#define PLD_SPI (SYSPLD_PHYS_BASE + 0x00000c)
-#define PLD_SPI_MMGPIO_BASE (P720T_MMGPIO_BASE + 24)
-#define PLD_SPI_EN (PLD_SPI_MMGPIO_BASE + 0)
-
-#define PLD_IO (SYSPLD_PHYS_BASE + 0x000010)
-#define PLD_IO_MMGPIO_BASE (P720T_MMGPIO_BASE + 32)
-#define PLD_IO_BOOTSEL (PLD_IO_MMGPIO_BASE + 6) /* Boot sel switch */
-#define PLD_IO_USER (PLD_IO_MMGPIO_BASE + 5) /* User defined switch */
-#define PLD_IO_LED3 (PLD_IO_MMGPIO_BASE + 4)
-#define PLD_IO_LED2 (PLD_IO_MMGPIO_BASE + 3)
-#define PLD_IO_LED1 (PLD_IO_MMGPIO_BASE + 2)
-#define PLD_IO_LED0 (PLD_IO_MMGPIO_BASE + 1)
-#define PLD_IO_LEDEN (PLD_IO_MMGPIO_BASE + 0)
-
-#define PLD_IRDA (SYSPLD_PHYS_BASE + 0x000014)
-#define PLD_IRDA_MMGPIO_BASE (P720T_MMGPIO_BASE + 40)
-#define PLD_IRDA_EN (PLD_IRDA_MMGPIO_BASE + 0)
-
-#define PLD_COM2 (SYSPLD_PHYS_BASE + 0x000018)
-#define PLD_COM2_MMGPIO_BASE (P720T_MMGPIO_BASE + 48)
-#define PLD_COM2_EN (PLD_COM2_MMGPIO_BASE + 0)
-
-#define PLD_COM1 (SYSPLD_PHYS_BASE + 0x00001c)
-#define PLD_COM1_MMGPIO_BASE (P720T_MMGPIO_BASE + 56)
-#define PLD_COM1_EN (PLD_COM1_MMGPIO_BASE + 0)
-
-#define PLD_AUD (SYSPLD_PHYS_BASE + 0x000020)
-#define PLD_AUD_MMGPIO_BASE (P720T_MMGPIO_BASE + 64)
-#define PLD_AUD_DIV1 (PLD_AUD_MMGPIO_BASE + 6)
-#define PLD_AUD_DIV0 (PLD_AUD_MMGPIO_BASE + 5)
-#define PLD_AUD_CLK_SEL1 (PLD_AUD_MMGPIO_BASE + 4)
-#define PLD_AUD_CLK_SEL0 (PLD_AUD_MMGPIO_BASE + 3)
-#define PLD_AUD_MIC_PWR (PLD_AUD_MMGPIO_BASE + 2)
-#define PLD_AUD_MIC_GAIN (PLD_AUD_MMGPIO_BASE + 1)
-#define PLD_AUD_CODEC_EN (PLD_AUD_MMGPIO_BASE + 0)
-
-#define PLD_CF (SYSPLD_PHYS_BASE + 0x000024)
-#define PLD_CF_MMGPIO_BASE (P720T_MMGPIO_BASE + 72)
-#define PLD_CF2_SLEEP (PLD_CF_MMGPIO_BASE + 5)
-#define PLD_CF1_SLEEP (PLD_CF_MMGPIO_BASE + 4)
-#define PLD_CF2_nPDREQ (PLD_CF_MMGPIO_BASE + 3)
-#define PLD_CF1_nPDREQ (PLD_CF_MMGPIO_BASE + 2)
-#define PLD_CF2_nIRQ (PLD_CF_MMGPIO_BASE + 1)
-#define PLD_CF1_nIRQ (PLD_CF_MMGPIO_BASE + 0)
-
-#define PLD_SDC (SYSPLD_PHYS_BASE + 0x000028)
-#define PLD_SDC_MMGPIO_BASE (P720T_MMGPIO_BASE + 80)
-#define PLD_SDC_INT_EN (PLD_SDC_MMGPIO_BASE + 2)
-#define PLD_SDC_WP (PLD_SDC_MMGPIO_BASE + 1)
-#define PLD_SDC_CD (PLD_SDC_MMGPIO_BASE + 0)
-
-#define PLD_CODEC (SYSPLD_PHYS_BASE + 0x400000)
-#define PLD_CODEC_MMGPIO_BASE (P720T_MMGPIO_BASE + 88)
-#define PLD_CODEC_IRQ3 (PLD_CODEC_MMGPIO_BASE + 4)
-#define PLD_CODEC_IRQ2 (PLD_CODEC_MMGPIO_BASE + 3)
-#define PLD_CODEC_IRQ1 (PLD_CODEC_MMGPIO_BASE + 2)
-#define PLD_CODEC_EN (PLD_CODEC_MMGPIO_BASE + 0)
-
-#define PLD_BRITE (SYSPLD_PHYS_BASE + 0x400004)
-#define PLD_BRITE_MMGPIO_BASE (P720T_MMGPIO_BASE + 96)
-#define PLD_BRITE_UP (PLD_BRITE_MMGPIO_BASE + 1)
-#define PLD_BRITE_DN (PLD_BRITE_MMGPIO_BASE + 0)
-
-#define PLD_LCDEN (SYSPLD_PHYS_BASE + 0x400008)
-#define PLD_LCDEN_MMGPIO_BASE (P720T_MMGPIO_BASE + 104)
-#define PLD_LCDEN_EN (PLD_LCDEN_MMGPIO_BASE + 0)
-
-#define PLD_TCH (SYSPLD_PHYS_BASE + 0x400010)
-#define PLD_TCH_MMGPIO_BASE (P720T_MMGPIO_BASE + 112)
-#define PLD_TCH_PENIRQ (PLD_TCH_MMGPIO_BASE + 1)
-#define PLD_TCH_EN (PLD_TCH_MMGPIO_BASE + 0)
-
-#define PLD_GPIO (SYSPLD_PHYS_BASE + 0x400014)
-#define PLD_GPIO_MMGPIO_BASE (P720T_MMGPIO_BASE + 120)
-#define PLD_GPIO2 (PLD_GPIO_MMGPIO_BASE + 2)
-#define PLD_GPIO1 (PLD_GPIO_MMGPIO_BASE + 1)
-#define PLD_GPIO0 (PLD_GPIO_MMGPIO_BASE + 0)
-
-static struct gpio p720t_gpios[] __initconst = {
- { PLD_S1_ON, GPIOF_OUT_INIT_LOW, "PLD_S1_ON" },
- { PLD_S2_ON, GPIOF_OUT_INIT_LOW, "PLD_S2_ON" },
- { PLD_S3_ON, GPIOF_OUT_INIT_LOW, "PLD_S3_ON" },
- { PLD_S4_ON, GPIOF_OUT_INIT_LOW, "PLD_S4_ON" },
- { PLD_KBD_EN, GPIOF_OUT_INIT_LOW, "PLD_KBD_EN" },
- { PLD_SPI_EN, GPIOF_OUT_INIT_LOW, "PLD_SPI_EN" },
- { PLD_IO_USER, GPIOF_OUT_INIT_LOW, "PLD_IO_USER" },
- { PLD_IO_LED0, GPIOF_OUT_INIT_LOW, "PLD_IO_LED0" },
- { PLD_IO_LED1, GPIOF_OUT_INIT_LOW, "PLD_IO_LED1" },
- { PLD_IO_LED2, GPIOF_OUT_INIT_LOW, "PLD_IO_LED2" },
- { PLD_IO_LED3, GPIOF_OUT_INIT_LOW, "PLD_IO_LED3" },
- { PLD_IO_LEDEN, GPIOF_OUT_INIT_LOW, "PLD_IO_LEDEN" },
- { PLD_IRDA_EN, GPIOF_OUT_INIT_LOW, "PLD_IRDA_EN" },
- { PLD_COM1_EN, GPIOF_OUT_INIT_HIGH, "PLD_COM1_EN" },
- { PLD_COM2_EN, GPIOF_OUT_INIT_HIGH, "PLD_COM2_EN" },
- { PLD_CODEC_EN, GPIOF_OUT_INIT_LOW, "PLD_CODEC_EN" },
- { PLD_LCDEN_EN, GPIOF_OUT_INIT_LOW, "PLD_LCDEN_EN" },
- { PLD_TCH_EN, GPIOF_OUT_INIT_LOW, "PLD_TCH_EN" },
- { P720T_USERLED,GPIOF_OUT_INIT_LOW, "USER_LED" },
-};
-
-static struct resource p720t_mmgpio_resource[] __initdata = {
- DEFINE_RES_MEM_NAMED(0, 4, "dat"),
-};
-
-static struct bgpio_pdata p720t_mmgpio_pdata = {
- .ngpio = 8,
-};
-
-static struct platform_device p720t_mmgpio __initdata = {
- .name = "basic-mmio-gpio",
- .id = -1,
- .resource = p720t_mmgpio_resource,
- .num_resources = ARRAY_SIZE(p720t_mmgpio_resource),
- .dev = {
- .platform_data = &p720t_mmgpio_pdata,
- },
-};
-
-static void __init p720t_mmgpio_init(void __iomem *addrbase, int gpiobase)
-{
- p720t_mmgpio_resource[0].start = (unsigned long)addrbase;
- p720t_mmgpio_pdata.base = gpiobase;
-
- platform_device_register(&p720t_mmgpio);
-}
-
-static struct {
- void __iomem *addrbase;
- int gpiobase;
-} mmgpios[] __initconst = {
- { PLD_INT, PLD_INT_MMGPIO_BASE },
- { PLD_PWR, PLD_PWR_MMGPIO_BASE },
- { PLD_KBD, PLD_KBD_MMGPIO_BASE },
- { PLD_SPI, PLD_SPI_MMGPIO_BASE },
- { PLD_IO, PLD_IO_MMGPIO_BASE },
- { PLD_IRDA, PLD_IRDA_MMGPIO_BASE },
- { PLD_COM2, PLD_COM2_MMGPIO_BASE },
- { PLD_COM1, PLD_COM1_MMGPIO_BASE },
- { PLD_AUD, PLD_AUD_MMGPIO_BASE },
- { PLD_CF, PLD_CF_MMGPIO_BASE },
- { PLD_SDC, PLD_SDC_MMGPIO_BASE },
- { PLD_CODEC, PLD_CODEC_MMGPIO_BASE },
- { PLD_BRITE, PLD_BRITE_MMGPIO_BASE },
- { PLD_LCDEN, PLD_LCDEN_MMGPIO_BASE },
- { PLD_TCH, PLD_TCH_MMGPIO_BASE },
- { PLD_GPIO, PLD_GPIO_MMGPIO_BASE },
-};
-
-static struct resource p720t_nand_resource[] __initdata = {
- DEFINE_RES_MEM(P720T_NAND_BASE, SZ_4),
-};
-
-static struct mtd_partition p720t_nand_parts[] __initdata = {
- {
- .name = "Flash partition 1",
- .offset = 0,
- .size = SZ_2M,
- },
- {
- .name = "Flash partition 2",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct gpio_nand_platdata p720t_nand_pdata __initdata = {
- .gpio_rdy = -1,
- .gpio_nce = P720T_NAND_NCE,
- .gpio_ale = P720T_NAND_ALE,
- .gpio_cle = P720T_NAND_CLE,
- .gpio_nwp = -1,
- .chip_delay = 15,
- .parts = p720t_nand_parts,
- .num_parts = ARRAY_SIZE(p720t_nand_parts),
-};
-
-static struct platform_device p720t_nand_pdev __initdata = {
- .name = "gpio-nand",
- .id = -1,
- .resource = p720t_nand_resource,
- .num_resources = ARRAY_SIZE(p720t_nand_resource),
- .dev = {
- .platform_data = &p720t_nand_pdata,
- },
-};
-
-static void p720t_lcd_power_set(struct plat_lcd_data *pd, unsigned int power)
-{
- if (power) {
- gpio_set_value(PLD_LCDEN_EN, 1);
- gpio_set_value(PLD_S1_ON, 1);
- gpio_set_value(PLD_S2_ON, 1);
- gpio_set_value(PLD_S4_ON, 1);
- } else {
- gpio_set_value(PLD_S1_ON, 0);
- gpio_set_value(PLD_S2_ON, 0);
- gpio_set_value(PLD_S4_ON, 0);
- gpio_set_value(PLD_LCDEN_EN, 0);
- }
-}
-
-static struct plat_lcd_data p720t_lcd_power_pdata = {
- .set_power = p720t_lcd_power_set,
-};
-
-static void p720t_lcd_backlight_set_intensity(int intensity)
-{
- gpio_set_value(PLD_S3_ON, intensity);
-}
-
-static struct generic_bl_info p720t_lcd_backlight_pdata = {
- .name = "lcd-backlight.0",
- .default_intensity = 0x01,
- .max_intensity = 0x01,
- .set_bl_intensity = p720t_lcd_backlight_set_intensity,
-};
-
-static void __init
-fixup_p720t(struct tag *tag, char **cmdline)
-{
- /*
- * Our bootloader doesn't setup any tags (yet).
- */
- if (tag->hdr.tag != ATAG_CORE) {
- tag->hdr.tag = ATAG_CORE;
- tag->hdr.size = tag_size(tag_core);
- tag->u.core.flags = 0;
- tag->u.core.pagesize = PAGE_SIZE;
- tag->u.core.rootdev = 0x0100;
-
- tag = tag_next(tag);
- tag->hdr.tag = ATAG_MEM;
- tag->hdr.size = tag_size(tag_mem32);
- tag->u.mem.size = 4096;
- tag->u.mem.start = PHYS_OFFSET;
-
- tag = tag_next(tag);
- tag->hdr.tag = ATAG_NONE;
- tag->hdr.size = 0;
- }
-}
-
-static struct gpio_led p720t_gpio_leds[] = {
- {
- .name = "User LED",
- .default_trigger = "heartbeat",
- .gpio = P720T_USERLED,
- },
-};
-
-static struct gpio_led_platform_data p720t_gpio_led_pdata __initdata = {
- .leds = p720t_gpio_leds,
- .num_leds = ARRAY_SIZE(p720t_gpio_leds),
-};
-
-static void __init p720t_init(void)
-{
- int i;
-
- clps711x_devices_init();
-
- for (i = 0; i < ARRAY_SIZE(mmgpios); i++)
- p720t_mmgpio_init(mmgpios[i].addrbase, mmgpios[i].gpiobase);
-
- platform_device_register(&p720t_nand_pdev);
-}
-
-static void __init p720t_init_late(void)
-{
- WARN_ON(gpio_request_array(p720t_gpios, ARRAY_SIZE(p720t_gpios)));
-
- platform_device_register_data(NULL, "platform-lcd", 0,
- &p720t_lcd_power_pdata,
- sizeof(p720t_lcd_power_pdata));
- platform_device_register_data(NULL, "generic-bl", 0,
- &p720t_lcd_backlight_pdata,
- sizeof(p720t_lcd_backlight_pdata));
- platform_device_register_simple("video-clps711x", 0, NULL, 0);
- platform_device_register_data(NULL, "leds-gpio", 0,
- &p720t_gpio_led_pdata,
- sizeof(p720t_gpio_led_pdata));
-}
-
-MACHINE_START(P720T, "ARM-Prospector720T")
- /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .atag_offset = 0x100,
- .fixup = fixup_p720t,
- .map_io = clps711x_map_io,
- .init_irq = clps711x_init_irq,
- .init_time = clps711x_timer_init,
- .init_machine = p720t_init,
- .init_late = p720t_init_late,
- .restart = clps711x_restart,
-MACHINE_END
diff --git a/arch/arm/mach-clps711x/common.c b/arch/arm/mach-clps711x/common.c
deleted file mode 100644
index 6466da8f3c11..000000000000
--- a/arch/arm/mach-clps711x/common.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * linux/arch/arm/mach-clps711x/core.c
- *
- * Core support for the CLPS711x-based machines.
- *
- * Copyright (C) 2001,2011 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/init.h>
-#include <linux/sizes.h>
-
-#include <asm/mach/map.h>
-#include <asm/system_misc.h>
-
-#include <mach/hardware.h>
-
-#include "common.h"
-
-/*
- * This maps the generic CLPS711x registers
- */
-static struct map_desc clps711x_io_desc[] __initdata = {
- {
- .virtual = (unsigned long)CLPS711X_VIRT_BASE,
- .pfn = __phys_to_pfn(CLPS711X_PHYS_BASE),
- .length = 48 * SZ_1K,
- .type = MT_DEVICE,
- }
-};
-
-void __init clps711x_map_io(void)
-{
- iotable_init(clps711x_io_desc, ARRAY_SIZE(clps711x_io_desc));
-}
-
-void __init clps711x_init_irq(void)
-{
- clps711x_intc_init(CLPS711X_PHYS_BASE, SZ_16K);
-}
-
-void __init clps711x_timer_init(void)
-{
- clps711x_clk_init(CLPS711X_VIRT_BASE);
- clps711x_clksrc_init(CLPS711X_VIRT_BASE + TC1D,
- CLPS711X_VIRT_BASE + TC2D, IRQ_TC2OI);
-}
-
-void clps711x_restart(enum reboot_mode mode, const char *cmd)
-{
- soft_restart(0);
-}
diff --git a/arch/arm/mach-clps711x/common.h b/arch/arm/mach-clps711x/common.h
deleted file mode 100644
index 370200b26333..000000000000
--- a/arch/arm/mach-clps711x/common.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * linux/arch/arm/mach-clps711x/common.h
- *
- * Common bits.
- */
-
-#include <linux/reboot.h>
-
-#define CLPS711X_NR_GPIO (4 * 8 + 3)
-#define CLPS711X_GPIO(prt, bit) ((prt) * 8 + (bit))
-
-extern void clps711x_map_io(void);
-extern void clps711x_init_irq(void);
-extern void clps711x_timer_init(void);
-extern void clps711x_restart(enum reboot_mode mode, const char *cmd);
-
-/* drivers/irqchip/irq-clps711x.c */
-void clps711x_intc_init(phys_addr_t, resource_size_t);
-/* drivers/clk/clk-clps711x.c */
-void clps711x_clk_init(void __iomem *base);
-/* drivers/clocksource/clps711x-timer.c */
-void clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
- unsigned int irq);
diff --git a/arch/arm/mach-clps711x/devices.c b/arch/arm/mach-clps711x/devices.c
deleted file mode 100644
index 77a9617c216d..000000000000
--- a/arch/arm/mach-clps711x/devices.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * CLPS711X common devices definitions
- *
- * Author: Alexander Shiyan <shc_work@mail.ru>, 2013-2014
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/io.h>
-#include <linux/of_fdt.h>
-#include <linux/platform_device.h>
-#include <linux/random.h>
-#include <linux/sizes.h>
-#include <linux/slab.h>
-#include <linux/sys_soc.h>
-
-#include <asm/system_info.h>
-
-#include <mach/hardware.h>
-
-static const struct resource clps711x_cpuidle_res __initconst =
- DEFINE_RES_MEM(CLPS711X_PHYS_BASE + HALT, SZ_128);
-
-static void __init clps711x_add_cpuidle(void)
-{
- platform_device_register_simple("clps711x-cpuidle", PLATFORM_DEVID_NONE,
- &clps711x_cpuidle_res, 1);
-}
-
-static const phys_addr_t clps711x_gpios[][2] __initconst = {
- { PADR, PADDR },
- { PBDR, PBDDR },
- { PCDR, PCDDR },
- { PDDR, PDDDR },
- { PEDR, PEDDR },
-};
-
-static void __init clps711x_add_gpio(void)
-{
- unsigned i;
- struct resource gpio_res[2];
-
- memset(gpio_res, 0, sizeof(gpio_res));
-
- gpio_res[0].flags = IORESOURCE_MEM;
- gpio_res[1].flags = IORESOURCE_MEM;
-
- for (i = 0; i < ARRAY_SIZE(clps711x_gpios); i++) {
- gpio_res[0].start = CLPS711X_PHYS_BASE + clps711x_gpios[i][0];
- gpio_res[0].end = gpio_res[0].start;
- gpio_res[1].start = CLPS711X_PHYS_BASE + clps711x_gpios[i][1];
- gpio_res[1].end = gpio_res[1].start;
-
- platform_device_register_simple("clps711x-gpio", i,
- gpio_res, ARRAY_SIZE(gpio_res));
- }
-}
-
-const struct resource clps711x_syscon_res[] __initconst = {
- /* SYSCON1, SYSFLG1 */
- DEFINE_RES_MEM(CLPS711X_PHYS_BASE + SYSCON1, SZ_128),
- /* SYSCON2, SYSFLG2 */
- DEFINE_RES_MEM(CLPS711X_PHYS_BASE + SYSCON2, SZ_128),
- /* SYSCON3 */
- DEFINE_RES_MEM(CLPS711X_PHYS_BASE + SYSCON3, SZ_64),
-};
-
-static void __init clps711x_add_syscon(void)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(clps711x_syscon_res); i++)
- platform_device_register_simple("syscon", i + 1,
- &clps711x_syscon_res[i], 1);
-}
-
-static const struct resource clps711x_uart1_res[] __initconst = {
- DEFINE_RES_MEM(CLPS711X_PHYS_BASE + UARTDR1, SZ_128),
- DEFINE_RES_IRQ(IRQ_UTXINT1),
- DEFINE_RES_IRQ(IRQ_URXINT1),
-};
-
-static const struct resource clps711x_uart2_res[] __initconst = {
- DEFINE_RES_MEM(CLPS711X_PHYS_BASE + UARTDR2, SZ_128),
- DEFINE_RES_IRQ(IRQ_UTXINT2),
- DEFINE_RES_IRQ(IRQ_URXINT2),
-};
-
-static void __init clps711x_add_uart(void)
-{
- platform_device_register_simple("clps711x-uart", 0, clps711x_uart1_res,
- ARRAY_SIZE(clps711x_uart1_res));
- platform_device_register_simple("clps711x-uart", 1, clps711x_uart2_res,
- ARRAY_SIZE(clps711x_uart2_res));
-};
-
-static void __init clps711x_soc_init(void)
-{
- struct soc_device_attribute *soc_dev_attr;
- struct soc_device *soc_dev;
- void __iomem *base;
- u32 id[5];
-
- base = ioremap(CLPS711X_PHYS_BASE, SZ_32K);
- if (!base)
- return;
-
- id[0] = readl(base + UNIQID);
- id[1] = readl(base + RANDID0);
- id[2] = readl(base + RANDID1);
- id[3] = readl(base + RANDID2);
- id[4] = readl(base + RANDID3);
- system_rev = SYSFLG1_VERID(readl(base + SYSFLG1));
-
- add_device_randomness(id, sizeof(id));
-
- system_serial_low = id[0];
-
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
- if (!soc_dev_attr)
- goto out_unmap;
-
- soc_dev_attr->machine = of_flat_dt_get_machine_name();
- soc_dev_attr->family = "Cirrus Logic CLPS711X";
- soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u", system_rev);
- soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%08x", id[0]);
-
- soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- kfree(soc_dev_attr->revision);
- kfree(soc_dev_attr->soc_id);
- kfree(soc_dev_attr);
- }
-
-out_unmap:
- iounmap(base);
-}
-
-void __init clps711x_devices_init(void)
-{
- clps711x_add_cpuidle();
- clps711x_add_gpio();
- clps711x_add_syscon();
- clps711x_add_uart();
- clps711x_soc_init();
-}
diff --git a/arch/arm/mach-clps711x/devices.h b/arch/arm/mach-clps711x/devices.h
deleted file mode 100644
index a5efc1744b84..000000000000
--- a/arch/arm/mach-clps711x/devices.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * CLPS711X common devices definitions
- *
- * Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-void clps711x_devices_init(void);
diff --git a/arch/arm/mach-cns3xxx/Kconfig b/arch/arm/mach-cns3xxx/Kconfig
index eb14a0ff0093..5fd836be2701 100644
--- a/arch/arm/mach-cns3xxx/Kconfig
+++ b/arch/arm/mach-cns3xxx/Kconfig
@@ -2,7 +2,6 @@ menuconfig ARCH_CNS3XXX
bool "Cavium Networks CNS3XXX family"
depends on ARCH_MULTI_V6
select ARM_GIC
- select PCI_DOMAINS if PCI
help
Support for Cavium Networks CNS3XXX platform.
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 0d046accb35b..ed3d0e9f72ac 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -501,6 +501,7 @@ static struct clk_lookup da850_clks[] = {
CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
CLK("da830-mmc.0", NULL, &mmcsd0_clk),
CLK("da830-mmc.1", NULL, &mmcsd1_clk),
+ CLK("ti-aemif", NULL, &aemif_clk),
CLK(NULL, "aemif", &aemif_clk),
CLK(NULL, "usb11", &usb11_clk),
CLK(NULL, "usb20", &usb20_clk),
diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c
index 754f478110b4..c9f7e9274aa8 100644
--- a/arch/arm/mach-davinci/da8xx-dt.c
+++ b/arch/arm/mach-davinci/da8xx-dt.c
@@ -37,6 +37,7 @@ static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("ti,davinci-dm6467-emac", 0x01e20000, "davinci_emac.1",
NULL),
OF_DEV_AUXDATA("ti,da830-mcasp-audio", 0x01d00000, "davinci-mcasp.0", NULL),
+ OF_DEV_AUXDATA("ti,da850-aemif", 0x68000000, "ti-aemif", NULL),
{}
};
@@ -49,6 +50,7 @@ static void __init da850_init_machine(void)
static const char *const da850_boards_compat[] __initconst = {
"enbw,cmc",
+ "ti,da850-lcdk",
"ti,da850-evm",
"ti,da850",
NULL,
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 8f820de890b4..0bb63b8d21e7 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -12,6 +12,7 @@ menuconfig ARCH_EXYNOS
depends on ARCH_MULTI_V7
select ARCH_HAS_BANDGAP
select ARCH_HAS_HOLES_MEMORYMODEL
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
select COMMON_CLK_SAMSUNG
@@ -126,8 +127,6 @@ config SOC_EXYNOS5440
select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
select HAVE_ARM_ARCH_TIMER
select AUTO_ZRELADDR
- select MIGHT_HAVE_PCI
- select PCI_DOMAINS if PCI
select PINCTRL_EXYNOS5440
select PM_OPP
help
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index acabf0bffc5d..757fc11de30d 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -30,25 +30,10 @@
static struct map_desc exynos4_iodesc[] __initdata = {
{
- .virtual = (unsigned long)S5P_VA_CMU,
- .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
- .length = SZ_128K,
- .type = MT_DEVICE,
- }, {
.virtual = (unsigned long)S5P_VA_COREPERI_BASE,
.pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
.length = SZ_8K,
.type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_DMC0,
- .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
- .length = SZ_64K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_DMC1,
- .pfn = __phys_to_pfn(EXYNOS4_PA_DMC1),
- .length = SZ_64K,
- .type = MT_DEVICE,
},
};
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index c48ba4fbdfd2..5fb0040cc6d3 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -18,11 +18,6 @@
#define EXYNOS_PA_CHIPID 0x10000000
-#define EXYNOS4_PA_CMU 0x10030000
-
-#define EXYNOS4_PA_DMC0 0x10400000
-#define EXYNOS4_PA_DMC1 0x10410000
-
#define EXYNOS4_PA_COREPERI 0x10500000
#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-footbridge/include/mach/hardware.h b/arch/arm/mach-footbridge/include/mach/hardware.h
index 02f6d7a706b1..20d5ad781fe2 100644
--- a/arch/arm/mach-footbridge/include/mach/hardware.h
+++ b/arch/arm/mach-footbridge/include/mach/hardware.h
@@ -59,7 +59,7 @@
#define XBUS_SWITCH_J17_11 ((*XBUS_SWITCH) & (1 << 5))
#define XBUS_SWITCH_J17_9 ((*XBUS_SWITCH) & (1 << 6))
-#define UNCACHEABLE_ADDR (ARMCSR_BASE + 0x108)
+#define UNCACHEABLE_ADDR (ARMCSR_BASE + 0x108) /* CSR_ROMBASEMASK */
/* PIC irq control */
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index ee9a318cab31..9155b639c9aa 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -64,13 +64,6 @@ config IMX_HAVE_IOMUX_V1
config ARCH_MXC_IOMUX_V3
bool
-config SOC_IMX1
- bool
- select CPU_ARM920T
- select IMX_HAVE_IOMUX_V1
- select MXC_AVIC
- select PINCTRL_IMX1
-
config SOC_IMX21
bool
select CPU_ARM926T
@@ -88,7 +81,6 @@ config SOC_IMX31
bool
select CPU_V6
select MXC_AVIC
- select SMP_ON_UP if SMP
config SOC_IMX35
bool
@@ -96,35 +88,6 @@ config SOC_IMX35
select HAVE_EPIT
select MXC_AVIC
select PINCTRL_IMX35
- select SMP_ON_UP if SMP
-
-if ARCH_MULTI_V4T
-
-comment "MX1 platforms:"
-
-config MACH_SCB9328
- bool "Synertronixx scb9328"
- select IMX_HAVE_PLATFORM_IMX_UART
- select SOC_IMX1
- help
- Say Y here if you are using a Synertronixx scb9328 board
-
-config MACH_APF9328
- bool "APF9328"
- select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_IMX_UART
- select SOC_IMX1
- help
- Say Yes here if you are using the Armadeus APF9328 development board
-
-config MACH_IMX1_DT
- bool "Support i.MX1 platforms from device tree"
- select SOC_IMX1
- help
- Include support for Freescale i.MX1 based platforms
- using the device tree for discovery.
-
-endif
if ARCH_MULTI_V5
@@ -461,6 +424,18 @@ endif
comment "Device tree only"
+if ARCH_MULTI_V4T
+
+config SOC_IMX1
+ bool "i.MX1 support"
+ select CPU_ARM920T
+ select MXC_AVIC
+ select PINCTRL_IMX1
+ help
+ This enables support for Freescale i.MX1 processor
+
+endif
+
if ARCH_MULTI_V5
config SOC_IMX25
@@ -523,7 +498,6 @@ config SOC_IMX6Q
select ARM_ERRATA_764369 if SMP
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD
- select PCI_DOMAINS if PCI
select PINCTRL_IMX6Q
select SOC_IMX6
@@ -569,7 +543,6 @@ config SOC_LS1021A
bool "Freescale LS1021A support"
select ARM_GIC
select HAVE_ARM_ARCH_TIMER
- select PCI_DOMAINS if PCI
select ZONE_DMA if ARM_LPAE
help
This enables support for Freescale LS1021A processor.
@@ -585,7 +558,6 @@ config SOC_VF610
select ARM_GIC if ARCH_MULTI_V7
select PINCTRL_VF610
select PL310_ERRATA_769419 if CACHE_L2X0
- select SMP_ON_UP if SMP
help
This enables support for Freescale Vybrid VF610 processor.
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 9f5fffd62702..cab128913e72 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -1,6 +1,5 @@
obj-y := cpu.o system.o irq-common.o
-obj-$(CONFIG_SOC_IMX1) += mm-imx1.o
obj-$(CONFIG_SOC_IMX21) += mm-imx21.o
obj-$(CONFIG_SOC_IMX25) += cpu-imx25.o mach-imx25.o pm-imx25.o
@@ -28,6 +27,7 @@ obj-$(CONFIG_SOC_IMX5) += cpuidle-imx5.o
obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o
obj-$(CONFIG_SOC_IMX6SL) += cpuidle-imx6sl.o
obj-$(CONFIG_SOC_IMX6SX) += cpuidle-imx6sx.o
+obj-$(CONFIG_SOC_IMX6UL) += cpuidle-imx6sx.o
endif
ifdef CONFIG_SND_IMX_SOC
@@ -35,11 +35,6 @@ obj-y += ssi-fiq.o
obj-y += ssi-fiq-ksym.o
endif
-# i.MX1 based machines
-obj-$(CONFIG_MACH_SCB9328) += mach-scb9328.o
-obj-$(CONFIG_MACH_APF9328) += mach-apf9328.o
-obj-$(CONFIG_MACH_IMX1_DT) += imx1-dt.o
-
# i.MX21 based machines
obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o
@@ -93,6 +88,7 @@ obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
endif
obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
+obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
obj-$(CONFIG_SOC_IMX50) += mach-imx50.o
obj-$(CONFIG_SOC_IMX51) += mach-imx51.o
obj-$(CONFIG_SOC_IMX53) += mach-imx53.o
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index a8f469333027..c4436d9c52ff 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -21,29 +21,24 @@ struct device_node;
enum mxc_cpu_pwr_mode;
struct of_device_id;
-void mx1_map_io(void);
void mx21_map_io(void);
void mx27_map_io(void);
void mx31_map_io(void);
void mx35_map_io(void);
-void imx1_init_early(void);
void imx21_init_early(void);
void imx27_init_early(void);
void imx31_init_early(void);
void imx35_init_early(void);
void mxc_init_irq(void __iomem *);
-void mx1_init_irq(void);
void mx21_init_irq(void);
void mx27_init_irq(void);
void mx31_init_irq(void);
void mx35_init_irq(void);
-void imx1_soc_init(void);
void imx21_soc_init(void);
void imx27_soc_init(void);
void imx31_soc_init(void);
void imx35_soc_init(void);
void epit_timer_init(void __iomem *base, int irq);
-int mx1_clocks_init(unsigned long fref);
int mx21_clocks_init(unsigned long lref, unsigned long fref);
int mx27_clocks_init(unsigned long fref);
int mx31_clocks_init(unsigned long fref);
@@ -109,7 +104,7 @@ void imx_anatop_init(void);
void imx_anatop_pre_suspend(void);
void imx_anatop_post_resume(void);
int imx6_set_lpm(enum mxc_cpu_pwr_mode mode);
-void imx6q_set_int_mem_clk_lpm(bool enable);
+void imx6_set_int_mem_clk_lpm(bool enable);
void imx6sl_set_wait_clk(bool enter);
int imx_mmdc_get_ddr_type(void);
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index db0f48c4b17e..bfeb25aaf9a2 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -85,7 +85,7 @@ EXPORT_SYMBOL_GPL(imx6q_cpuidle_fec_irqs_unused);
int __init imx6q_cpuidle_init(void)
{
/* Set INT_MEM_CLK_LPM bit to get a reliable WAIT mode support */
- imx6q_set_int_mem_clk_lpm(true);
+ imx6_set_int_mem_clk_lpm(true);
return cpuidle_register(&imx6q_cpuidle_driver, NULL);
}
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 3c6672b3796b..c5a5c3a70ab1 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -9,6 +9,7 @@
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/module.h>
+#include <asm/cacheflush.h>
#include <asm/cpuidle.h>
#include <asm/suspend.h>
@@ -17,6 +18,15 @@
static int imx6sx_idle_finish(unsigned long val)
{
+ /*
+ * for Cortex-A7 which has an internal L2
+ * cache, need to flush it before powering
+ * down ARM platform, since flushing L1 cache
+ * here again has very small overhead, compared
+ * to adding conditional code for L2 cache type,
+ * just call flush_cache_all() is fine.
+ */
+ flush_cache_all();
cpu_do_idle();
return 0;
@@ -90,6 +100,7 @@ static struct cpuidle_driver imx6sx_cpuidle_driver = {
int __init imx6sx_cpuidle_init(void)
{
+ imx6_set_int_mem_clk_lpm(true);
imx6_enable_rbc(false);
/*
* set ARM power up/down timing to the fastest,
diff --git a/arch/arm/mach-imx/devices-imx1.h b/arch/arm/mach-imx/devices-imx1.h
deleted file mode 100644
index f9b5afc6bcd1..000000000000
--- a/arch/arm/mach-imx/devices-imx1.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2010 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- */
-#include "devices/devices-common.h"
-
-extern const struct imx_imx_fb_data imx1_imx_fb_data;
-#define imx1_add_imx_fb(pdata) \
- imx_add_imx_fb(&imx1_imx_fb_data, pdata)
-
-extern const struct imx_imx_i2c_data imx1_imx_i2c_data;
-#define imx1_add_imx_i2c(pdata) \
- imx_add_imx_i2c(&imx1_imx_i2c_data, pdata)
-
-extern const struct imx_imx_uart_3irq_data imx1_imx_uart_data[];
-#define imx1_add_imx_uart(id, pdata) \
- imx_add_imx_uart_3irq(&imx1_imx_uart_data[id], pdata)
-#define imx1_add_imx_uart0(pdata) imx1_add_imx_uart(0, pdata)
-#define imx1_add_imx_uart1(pdata) imx1_add_imx_uart(1, pdata)
-
-extern const struct imx_spi_imx_data imx1_cspi_data[];
-#define imx1_add_cspi(id, pdata) \
- imx_add_spi_imx(&imx1_cspi_data[id], pdata)
-
-#define imx1_add_spi_imx0(pdata) imx1_add_cspi(0, pdata)
-#define imx1_add_spi_imx1(pdata) imx1_add_cspi(1, pdata)
diff --git a/arch/arm/mach-imx/devices/Makefile b/arch/arm/mach-imx/devices/Makefile
index e5cf587bc1a0..aa6cee870795 100644
--- a/arch/arm/mach-imx/devices/Makefile
+++ b/arch/arm/mach-imx/devices/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_IMX_HAVE_PLATFORM_MX2_CAMERA) += platform-mx2-camera.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_MXC_EHCI) += platform-mxc-ehci.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_MXC_MMC) += platform-mxc-mmc.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_MXC_NAND) += platform-mxc_nand.o
-obj-$(CONFIG_IMX_HAVE_PLATFORM_MXC_RNGA) += platform-mxc_rnga.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_MXC_RTC) += platform-mxc_rtc.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_MXC_W1) += platform-mxc_w1.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX) += platform-sdhci-esdhc-imx.o
diff --git a/arch/arm/mach-imx/devices/devices-common.h b/arch/arm/mach-imx/devices/devices-common.h
index 09cebd8cef2b..6920e356f4e5 100644
--- a/arch/arm/mach-imx/devices/devices-common.h
+++ b/arch/arm/mach-imx/devices/devices-common.h
@@ -154,18 +154,6 @@ struct platform_device *__init imx_add_imx_ssi(
const struct imx_ssi_platform_data *pdata);
#include <linux/platform_data/serial-imx.h>
-struct imx_imx_uart_3irq_data {
- int id;
- resource_size_t iobase;
- resource_size_t iosize;
- resource_size_t irqrx;
- resource_size_t irqtx;
- resource_size_t irqrts;
-};
-struct platform_device *__init imx_add_imx_uart_3irq(
- const struct imx_imx_uart_3irq_data *data,
- const struct imxuart_platform_data *pdata);
-
struct imx_imx_uart_1irq_data {
int id;
resource_size_t iobase;
diff --git a/arch/arm/mach-imx/devices/platform-imx-fb.c b/arch/arm/mach-imx/devices/platform-imx-fb.c
index 7df6328306f9..aa00272252e0 100644
--- a/arch/arm/mach-imx/devices/platform-imx-fb.c
+++ b/arch/arm/mach-imx/devices/platform-imx-fb.c
@@ -19,11 +19,6 @@
.irq = soc ## _INT_LCDC, \
}
-#ifdef CONFIG_SOC_IMX1
-const struct imx_imx_fb_data imx1_imx_fb_data __initconst =
- imx_imx_fb_data_entry_single(MX1, "imx1-fb", SZ_4K);
-#endif /* ifdef CONFIG_SOC_IMX1 */
-
#ifdef CONFIG_SOC_IMX21
const struct imx_imx_fb_data imx21_imx_fb_data __initconst =
imx_imx_fb_data_entry_single(MX21, "imx21-fb", SZ_4K);
diff --git a/arch/arm/mach-imx/devices/platform-imx-i2c.c b/arch/arm/mach-imx/devices/platform-imx-i2c.c
index ae9791522fc8..9822bedb5d09 100644
--- a/arch/arm/mach-imx/devices/platform-imx-i2c.c
+++ b/arch/arm/mach-imx/devices/platform-imx-i2c.c
@@ -21,11 +21,6 @@
#define imx_imx_i2c_data_entry(soc, _devid, _id, _hwid, _size) \
[_id] = imx_imx_i2c_data_entry_single(soc, _devid, _id, _hwid, _size)
-#ifdef CONFIG_SOC_IMX1
-const struct imx_imx_i2c_data imx1_imx_i2c_data __initconst =
- imx_imx_i2c_data_entry_single(MX1, "imx1-i2c", 0, , SZ_4K);
-#endif /* ifdef CONFIG_SOC_IMX1 */
-
#ifdef CONFIG_SOC_IMX21
const struct imx_imx_i2c_data imx21_imx_i2c_data __initconst =
imx_imx_i2c_data_entry_single(MX21, "imx21-i2c", 0, , SZ_4K);
diff --git a/arch/arm/mach-imx/devices/platform-imx-uart.c b/arch/arm/mach-imx/devices/platform-imx-uart.c
index 6962cff4a950..e3c89e9caf93 100644
--- a/arch/arm/mach-imx/devices/platform-imx-uart.c
+++ b/arch/arm/mach-imx/devices/platform-imx-uart.c
@@ -27,15 +27,6 @@
.irq = soc ## _INT_UART ## _hwid, \
}
-#ifdef CONFIG_SOC_IMX1
-const struct imx_imx_uart_3irq_data imx1_imx_uart_data[] __initconst = {
-#define imx1_imx_uart_data_entry(_id, _hwid) \
- imx_imx_uart_3irq_data_entry(MX1, _id, _hwid, 0xd0)
- imx1_imx_uart_data_entry(0, 1),
- imx1_imx_uart_data_entry(1, 2),
-};
-#endif /* ifdef CONFIG_SOC_IMX1 */
-
#ifdef CONFIG_SOC_IMX21
const struct imx_imx_uart_1irq_data imx21_imx_uart_data[] __initconst = {
#define imx21_imx_uart_data_entry(_id, _hwid) \
@@ -82,34 +73,6 @@ const struct imx_imx_uart_1irq_data imx35_imx_uart_data[] __initconst = {
};
#endif /* ifdef CONFIG_SOC_IMX35 */
-struct platform_device *__init imx_add_imx_uart_3irq(
- const struct imx_imx_uart_3irq_data *data,
- const struct imxuart_platform_data *pdata)
-{
- struct resource res[] = {
- {
- .start = data->iobase,
- .end = data->iobase + data->iosize - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .start = data->irqrx,
- .end = data->irqrx,
- .flags = IORESOURCE_IRQ,
- }, {
- .start = data->irqtx,
- .end = data->irqtx,
- .flags = IORESOURCE_IRQ,
- }, {
- .start = data->irqrts,
- .end = data->irqrx,
- .flags = IORESOURCE_IRQ,
- },
- };
-
- return imx_add_platform_device("imx1-uart", data->id, res,
- ARRAY_SIZE(res), pdata, sizeof(*pdata));
-}
-
struct platform_device *__init imx_add_imx_uart_1irq(
const struct imx_imx_uart_1irq_data *data,
const struct imxuart_platform_data *pdata)
diff --git a/arch/arm/mach-imx/devices/platform-spi_imx.c b/arch/arm/mach-imx/devices/platform-spi_imx.c
index 5e9707b47f92..d93c446c9c02 100644
--- a/arch/arm/mach-imx/devices/platform-spi_imx.c
+++ b/arch/arm/mach-imx/devices/platform-spi_imx.c
@@ -21,15 +21,6 @@
#define imx_spi_imx_data_entry(soc, type, devid, id, hwid, size) \
[id] = imx_spi_imx_data_entry_single(soc, type, devid, id, hwid, size)
-#ifdef CONFIG_SOC_IMX1
-const struct imx_spi_imx_data imx1_cspi_data[] __initconst = {
-#define imx1_cspi_data_entry(_id, _hwid) \
- imx_spi_imx_data_entry(MX1, CSPI, "imx1-cspi", _id, _hwid, SZ_4K)
- imx1_cspi_data_entry(0, 1),
- imx1_cspi_data_entry(1, 2),
-};
-#endif
-
#ifdef CONFIG_SOC_IMX21
const struct imx_spi_imx_data imx21_cspi_data[] __initconst = {
#define imx21_cspi_data_entry(_id, _hwid) \
diff --git a/arch/arm/mach-imx/hardware.h b/arch/arm/mach-imx/hardware.h
index d737f95ebb07..90e10cbd8fd1 100644
--- a/arch/arm/mach-imx/hardware.h
+++ b/arch/arm/mach-imx/hardware.h
@@ -112,7 +112,6 @@
#include "mx2x.h"
#include "mx21.h"
#include "mx27.h"
-#include "mx1.h"
#define imx_map_entry(soc, name, _type) { \
.virtual = soc ## _IO_P2V(soc ## _ ## name ## _BASE_ADDR), \
@@ -121,7 +120,7 @@
.type = _type, \
}
-/* There's a off-by-one betweem the gpio bank number and the gpiochip */
+/* There's an off-by-one between the gpio bank number and the gpiochip */
/* range e.g. GPIO_1_5 is gpio 5 under linux */
#define IMX_GPIO_NR(bank, nr) (((bank) - 1) * 32 + (nr))
diff --git a/arch/arm/mach-imx/iomux-mx1.h b/arch/arm/mach-imx/iomux-mx1.h
deleted file mode 100644
index 95f4681d85d7..000000000000
--- a/arch/arm/mach-imx/iomux-mx1.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-#ifndef __MACH_IOMUX_MX1_H__
-#define __MACH_IOMUX_MX1_H__
-
-#include "iomux-v1.h"
-
-#define PA0_AIN_SPI2_CLK (GPIO_PORTA | GPIO_AIN | GPIO_OUT | 0)
-#define PA0_AF_ETMTRACESYNC (GPIO_PORTA | GPIO_AF | 0)
-#define PA1_AOUT_SPI2_RXD (GPIO_PORTA | GPIO_AOUT | GPIO_IN | 1)
-#define PA1_PF_TIN (GPIO_PORTA | GPIO_PF | 1)
-#define PA2_PF_PWM0 (GPIO_PORTA | GPIO_PF | GPIO_OUT | 2)
-#define PA3_PF_CSI_MCLK (GPIO_PORTA | GPIO_PF | 3)
-#define PA4_PF_CSI_D0 (GPIO_PORTA | GPIO_PF | 4)
-#define PA5_PF_CSI_D1 (GPIO_PORTA | GPIO_PF | 5)
-#define PA6_PF_CSI_D2 (GPIO_PORTA | GPIO_PF | 6)
-#define PA7_PF_CSI_D3 (GPIO_PORTA | GPIO_PF | 7)
-#define PA8_PF_CSI_D4 (GPIO_PORTA | GPIO_PF | 8)
-#define PA9_PF_CSI_D5 (GPIO_PORTA | GPIO_PF | 9)
-#define PA10_PF_CSI_D6 (GPIO_PORTA | GPIO_PF | 10)
-#define PA11_PF_CSI_D7 (GPIO_PORTA | GPIO_PF | 11)
-#define PA12_PF_CSI_VSYNC (GPIO_PORTA | GPIO_PF | 12)
-#define PA13_PF_CSI_HSYNC (GPIO_PORTA | GPIO_PF | 13)
-#define PA14_PF_CSI_PIXCLK (GPIO_PORTA | GPIO_PF | 14)
-#define PA15_PF_I2C_SDA (GPIO_PORTA | GPIO_PF | GPIO_OUT | 15)
-#define PA16_PF_I2C_SCL (GPIO_PORTA | GPIO_PF | GPIO_OUT | 16)
-#define PA17_AF_ETMTRACEPKT4 (GPIO_PORTA | GPIO_AF | 17)
-#define PA17_AIN_SPI2_SS (GPIO_PORTA | GPIO_AIN | GPIO_OUT | 17)
-#define PA18_AF_ETMTRACEPKT5 (GPIO_PORTA | GPIO_AF | 18)
-#define PA19_AF_ETMTRACEPKT6 (GPIO_PORTA | GPIO_AF | 19)
-#define PA20_AF_ETMTRACEPKT7 (GPIO_PORTA | GPIO_AF | 20)
-#define PA21_PF_A0 (GPIO_PORTA | GPIO_PF | 21)
-#define PA22_PF_CS4 (GPIO_PORTA | GPIO_PF | 22)
-#define PA23_PF_CS5 (GPIO_PORTA | GPIO_PF | 23)
-#define PA24_PF_A16 (GPIO_PORTA | GPIO_PF | 24)
-#define PA24_AF_ETMTRACEPKT0 (GPIO_PORTA | GPIO_AF | 24)
-#define PA25_PF_A17 (GPIO_PORTA | GPIO_PF | 25)
-#define PA25_AF_ETMTRACEPKT1 (GPIO_PORTA | GPIO_AF | 25)
-#define PA26_PF_A18 (GPIO_PORTA | GPIO_PF | 26)
-#define PA26_AF_ETMTRACEPKT2 (GPIO_PORTA | GPIO_AF | 26)
-#define PA27_PF_A19 (GPIO_PORTA | GPIO_PF | 27)
-#define PA27_AF_ETMTRACEPKT3 (GPIO_PORTA | GPIO_AF | 27)
-#define PA28_PF_A20 (GPIO_PORTA | GPIO_PF | 28)
-#define PA28_AF_ETMPIPESTAT0 (GPIO_PORTA | GPIO_AF | 28)
-#define PA29_PF_A21 (GPIO_PORTA | GPIO_PF | 29)
-#define PA29_AF_ETMPIPESTAT1 (GPIO_PORTA | GPIO_AF | 29)
-#define PA30_PF_A22 (GPIO_PORTA | GPIO_PF | 30)
-#define PA30_AF_ETMPIPESTAT2 (GPIO_PORTA | GPIO_AF | 30)
-#define PA31_PF_A23 (GPIO_PORTA | GPIO_PF | 31)
-#define PA31_AF_ETMTRACECLK (GPIO_PORTA | GPIO_AF | 31)
-#define PB8_PF_SD_DAT0 (GPIO_PORTB | GPIO_PF | GPIO_PUEN | 8)
-#define PB8_AF_MS_PIO (GPIO_PORTB | GPIO_AF | 8)
-#define PB9_PF_SD_DAT1 (GPIO_PORTB | GPIO_PF | GPIO_PUEN | 9)
-#define PB9_AF_MS_PI1 (GPIO_PORTB | GPIO_AF | 9)
-#define PB10_PF_SD_DAT2 (GPIO_PORTB | GPIO_PF | GPIO_PUEN | 10)
-#define PB10_AF_MS_SCLKI (GPIO_PORTB | GPIO_AF | 10)
-#define PB11_PF_SD_DAT3 (GPIO_PORTB | GPIO_PF | 11)
-#define PB11_AF_MS_SDIO (GPIO_PORTB | GPIO_AF | 11)
-#define PB12_PF_SD_CLK (GPIO_PORTB | GPIO_PF | 12)
-#define PB12_AF_MS_SCLK0 (GPIO_PORTB | GPIO_AF | 12)
-#define PB13_PF_SD_CMD (GPIO_PORTB | GPIO_PF | GPIO_PUEN | 13)
-#define PB13_AF_MS_BS (GPIO_PORTB | GPIO_AF | 13)
-#define PB14_AF_SSI_RXFS (GPIO_PORTB | GPIO_AF | 14)
-#define PB15_AF_SSI_RXCLK (GPIO_PORTB | GPIO_AF | 15)
-#define PB16_AF_SSI_RXDAT (GPIO_PORTB | GPIO_AF | GPIO_IN | 16)
-#define PB17_AF_SSI_TXDAT (GPIO_PORTB | GPIO_AF | GPIO_OUT | 17)
-#define PB18_AF_SSI_TXFS (GPIO_PORTB | GPIO_AF | 18)
-#define PB19_AF_SSI_TXCLK (GPIO_PORTB | GPIO_AF | 19)
-#define PB20_PF_USBD_AFE (GPIO_PORTB | GPIO_PF | 20)
-#define PB21_PF_USBD_OE (GPIO_PORTB | GPIO_PF | 21)
-#define PB22_PF_USBD_RCV (GPIO_PORTB | GPIO_PF | 22)
-#define PB23_PF_USBD_SUSPND (GPIO_PORTB | GPIO_PF | 23)
-#define PB24_PF_USBD_VP (GPIO_PORTB | GPIO_PF | 24)
-#define PB25_PF_USBD_VM (GPIO_PORTB | GPIO_PF | 25)
-#define PB26_PF_USBD_VPO (GPIO_PORTB | GPIO_PF | 26)
-#define PB27_PF_USBD_VMO (GPIO_PORTB | GPIO_PF | 27)
-#define PB28_PF_UART2_CTS (GPIO_PORTB | GPIO_PF | GPIO_OUT | 28)
-#define PB29_PF_UART2_RTS (GPIO_PORTB | GPIO_PF | GPIO_IN | 29)
-#define PB30_PF_UART2_TXD (GPIO_PORTB | GPIO_PF | GPIO_OUT | 30)
-#define PB31_PF_UART2_RXD (GPIO_PORTB | GPIO_PF | GPIO_IN | 31)
-#define PC3_PF_SSI_RXFS (GPIO_PORTC | GPIO_PF | 3)
-#define PC4_PF_SSI_RXCLK (GPIO_PORTC | GPIO_PF | 4)
-#define PC5_PF_SSI_RXDAT (GPIO_PORTC | GPIO_PF | GPIO_IN | 5)
-#define PC6_PF_SSI_TXDAT (GPIO_PORTC | GPIO_PF | GPIO_OUT | 6)
-#define PC7_PF_SSI_TXFS (GPIO_PORTC | GPIO_PF | 7)
-#define PC8_PF_SSI_TXCLK (GPIO_PORTC | GPIO_PF | 8)
-#define PC9_PF_UART1_CTS (GPIO_PORTC | GPIO_PF | GPIO_OUT | 9)
-#define PC10_PF_UART1_RTS (GPIO_PORTC | GPIO_PF | GPIO_IN | 10)
-#define PC11_PF_UART1_TXD (GPIO_PORTC | GPIO_PF | GPIO_OUT | 11)
-#define PC12_PF_UART1_RXD (GPIO_PORTC | GPIO_PF | GPIO_IN | 12)
-#define PC13_PF_SPI1_SPI_RDY (GPIO_PORTC | GPIO_PF | 13)
-#define PC14_PF_SPI1_SCLK (GPIO_PORTC | GPIO_PF | 14)
-#define PC15_PF_SPI1_SS (GPIO_PORTC | GPIO_PF | 15)
-#define PC16_PF_SPI1_MISO (GPIO_PORTC | GPIO_PF | 16)
-#define PC17_PF_SPI1_MOSI (GPIO_PORTC | GPIO_PF | 17)
-#define PC24_BIN_UART3_RI (GPIO_PORTC | GPIO_BIN | GPIO_OUT | 24)
-#define PC25_BIN_UART3_DSR (GPIO_PORTC | GPIO_BIN | GPIO_OUT | 25)
-#define PC26_AOUT_UART3_DTR (GPIO_PORTC | GPIO_AOUT | GPIO_IN | 26)
-#define PC27_BIN_UART3_DCD (GPIO_PORTC | GPIO_BIN | GPIO_OUT | 27)
-#define PC28_BIN_UART3_CTS (GPIO_PORTC | GPIO_BIN | GPIO_OUT | 28)
-#define PC29_AOUT_UART3_RTS (GPIO_PORTC | GPIO_AOUT | GPIO_IN | 29)
-#define PC30_BIN_UART3_TX (GPIO_PORTC | GPIO_BIN | 30)
-#define PC31_AOUT_UART3_RX (GPIO_PORTC | GPIO_AOUT | GPIO_IN | 31)
-#define PD6_PF_LSCLK (GPIO_PORTD | GPIO_PF | GPIO_OUT | 6)
-#define PD7_PF_REV (GPIO_PORTD | GPIO_PF | 7)
-#define PD7_AF_UART2_DTR (GPIO_PORTD | GPIO_AF | GPIO_IN | 7)
-#define PD7_AIN_SPI2_SCLK (GPIO_PORTD | GPIO_AIN | 7)
-#define PD8_PF_CLS (GPIO_PORTD | GPIO_PF | 8)
-#define PD8_AF_UART2_DCD (GPIO_PORTD | GPIO_AF | GPIO_OUT | 8)
-#define PD8_AIN_SPI2_SS (GPIO_PORTD | GPIO_AIN | 8)
-#define PD9_PF_PS (GPIO_PORTD | GPIO_PF | 9)
-#define PD9_AF_UART2_RI (GPIO_PORTD | GPIO_AF | GPIO_OUT | 9)
-#define PD9_AOUT_SPI2_RXD (GPIO_PORTD | GPIO_AOUT | GPIO_IN | 9)
-#define PD10_PF_SPL_SPR (GPIO_PORTD | GPIO_PF | GPIO_OUT | 10)
-#define PD10_AF_UART2_DSR (GPIO_PORTD | GPIO_AF | GPIO_OUT | 10)
-#define PD10_AIN_SPI2_TXD (GPIO_PORTD | GPIO_AIN | GPIO_OUT | 10)
-#define PD11_PF_CONTRAST (GPIO_PORTD | GPIO_PF | GPIO_OUT | 11)
-#define PD12_PF_ACD_OE (GPIO_PORTD | GPIO_PF | GPIO_OUT | 12)
-#define PD13_PF_LP_HSYNC (GPIO_PORTD | GPIO_PF | GPIO_OUT | 13)
-#define PD14_PF_FLM_VSYNC (GPIO_PORTD | GPIO_PF | GPIO_OUT | 14)
-#define PD15_PF_LD0 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 15)
-#define PD16_PF_LD1 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 16)
-#define PD17_PF_LD2 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 17)
-#define PD18_PF_LD3 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 18)
-#define PD19_PF_LD4 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 19)
-#define PD20_PF_LD5 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 20)
-#define PD21_PF_LD6 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 21)
-#define PD22_PF_LD7 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 22)
-#define PD23_PF_LD8 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 23)
-#define PD24_PF_LD9 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 24)
-#define PD25_PF_LD10 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 25)
-#define PD26_PF_LD11 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 26)
-#define PD27_PF_LD12 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 27)
-#define PD28_PF_LD13 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 28)
-#define PD29_PF_LD14 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 29)
-#define PD30_PF_LD15 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 30)
-#define PD31_PF_TMR2OUT (GPIO_PORTD | GPIO_PF | 31)
-#define PD31_BIN_SPI2_TXD (GPIO_PORTD | GPIO_BIN | 31)
-
-#endif /* ifndef __MACH_IOMUX_MX1_H__ */
diff --git a/arch/arm/mach-imx/iomux-mx3.h b/arch/arm/mach-imx/iomux-mx3.h
index 2e4a0ddca76c..368667b32760 100644
--- a/arch/arm/mach-imx/iomux-mx3.h
+++ b/arch/arm/mach-imx/iomux-mx3.h
@@ -598,10 +598,7 @@ enum iomux_pins {
#define MX31_PIN_CONTRAST__CONTRAST IOMUX_MODE(MX31_PIN_CONTRAST, IOMUX_CONFIG_FUNC)
#define MX31_PIN_D3_SPL__D3_SPL IOMUX_MODE(MX31_PIN_D3_SPL, IOMUX_CONFIG_FUNC)
#define MX31_PIN_D3_CLS__D3_CLS IOMUX_MODE(MX31_PIN_D3_CLS, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_LCS0__GPI03_23 IOMUX_MODE(MX31_PIN_LCS0, IOMUX_CONFIG_GPIO)
#define MX31_PIN_GPIO1_1__GPIO IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO)
-#define MX31_PIN_I2C_CLK__SCL IOMUX_MODE(MX31_PIN_I2C_CLK, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_I2C_DAT__SDA IOMUX_MODE(MX31_PIN_I2C_DAT, IOMUX_CONFIG_FUNC)
#define MX31_PIN_DCD_DTE1__I2C2_SDA IOMUX_MODE(MX31_PIN_DCD_DTE1, IOMUX_CONFIG_ALT2)
#define MX31_PIN_RI_DTE1__I2C2_SCL IOMUX_MODE(MX31_PIN_RI_DTE1, IOMUX_CONFIG_ALT2)
#define MX31_PIN_CSPI2_SS2__I2C3_SDA IOMUX_MODE(MX31_PIN_CSPI2_SS2, IOMUX_CONFIG_ALT1)
@@ -665,37 +662,6 @@ enum iomux_pins {
#define MX31_PIN_USB_OC__GPIO1_30 IOMUX_MODE(MX31_PIN_USB_OC, IOMUX_CONFIG_GPIO)
#define MX31_PIN_I2C_DAT__I2C1_SDA IOMUX_MODE(MX31_PIN_I2C_DAT, IOMUX_CONFIG_FUNC)
#define MX31_PIN_I2C_CLK__I2C1_SCL IOMUX_MODE(MX31_PIN_I2C_CLK, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_DCD_DTE1__I2C2_SDA IOMUX_MODE(MX31_PIN_DCD_DTE1, IOMUX_CONFIG_ALT2)
-#define MX31_PIN_RI_DTE1__I2C2_SCL IOMUX_MODE(MX31_PIN_RI_DTE1, IOMUX_CONFIG_ALT2)
-#define MX31_PIN_ATA_CS0__GPIO3_26 IOMUX_MODE(MX31_PIN_ATA_CS0, IOMUX_CONFIG_GPIO)
-#define MX31_PIN_ATA_CS1__GPIO3_27 IOMUX_MODE(MX31_PIN_ATA_CS1, IOMUX_CONFIG_GPIO)
-#define MX31_PIN_PC_PWRON__SD2_DATA3 IOMUX_MODE(MX31_PIN_PC_PWRON, IOMUX_CONFIG_ALT1)
-#define MX31_PIN_PC_VS1__SD2_DATA2 IOMUX_MODE(MX31_PIN_PC_VS1, IOMUX_CONFIG_ALT1)
-#define MX31_PIN_PC_READY__SD2_DATA1 IOMUX_MODE(MX31_PIN_PC_READY, IOMUX_CONFIG_ALT1)
-#define MX31_PIN_PC_WAIT_B__SD2_DATA0 IOMUX_MODE(MX31_PIN_PC_WAIT_B, IOMUX_CONFIG_ALT1)
-#define MX31_PIN_PC_CD2_B__SD2_CLK IOMUX_MODE(MX31_PIN_PC_CD2_B, IOMUX_CONFIG_ALT1)
-#define MX31_PIN_PC_CD1_B__SD2_CMD IOMUX_MODE(MX31_PIN_PC_CD1_B, IOMUX_CONFIG_ALT1)
-#define MX31_PIN_ATA_DIOR__GPIO3_28 IOMUX_MODE(MX31_PIN_ATA_DIOR, IOMUX_CONFIG_GPIO)
-#define MX31_PIN_ATA_DIOW__GPIO3_29 IOMUX_MODE(MX31_PIN_ATA_DIOW, IOMUX_CONFIG_GPIO)
-#define MX31_PIN_CSI_D4__CSI_D4 IOMUX_MODE(MX31_PIN_CSI_D4, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_D5__CSI_D5 IOMUX_MODE(MX31_PIN_CSI_D5, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_D6__CSI_D6 IOMUX_MODE(MX31_PIN_CSI_D6, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_D7__CSI_D7 IOMUX_MODE(MX31_PIN_CSI_D7, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_D8__CSI_D8 IOMUX_MODE(MX31_PIN_CSI_D8, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_D9__CSI_D9 IOMUX_MODE(MX31_PIN_CSI_D9, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_D10__CSI_D10 IOMUX_MODE(MX31_PIN_CSI_D10, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_D11__CSI_D11 IOMUX_MODE(MX31_PIN_CSI_D11, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_D12__CSI_D12 IOMUX_MODE(MX31_PIN_CSI_D12, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_D13__CSI_D13 IOMUX_MODE(MX31_PIN_CSI_D13, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_D14__CSI_D14 IOMUX_MODE(MX31_PIN_CSI_D14, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_D15__CSI_D15 IOMUX_MODE(MX31_PIN_CSI_D15, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_HSYNC__CSI_HSYNC IOMUX_MODE(MX31_PIN_CSI_HSYNC, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_MCLK__CSI_MCLK IOMUX_MODE(MX31_PIN_CSI_MCLK, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_PIXCLK__CSI_PIXCLK IOMUX_MODE(MX31_PIN_CSI_PIXCLK, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_CSI_VSYNC__CSI_VSYNC IOMUX_MODE(MX31_PIN_CSI_VSYNC, IOMUX_CONFIG_FUNC)
-#define MX31_PIN_GPIO3_0__GPIO3_0 IOMUX_MODE(MX31_PIN_GPIO3_0, IOMUX_CONFIG_GPIO)
-#define MX31_PIN_GPIO3_1__GPIO3_1 IOMUX_MODE(MX31_PIN_GPIO3_1, IOMUX_CONFIG_GPIO)
-#define MX31_PIN_TXD2__GPIO1_28 IOMUX_MODE(MX31_PIN_TXD2, IOMUX_CONFIG_GPIO)
#define MX31_PIN_GPIO1_0__GPIO1_0 IOMUX_MODE(MX31_PIN_GPIO1_0, IOMUX_CONFIG_GPIO)
#define MX31_PIN_SVEN0__GPIO2_0 IOMUX_MODE(MX31_PIN_SVEN0, IOMUX_CONFIG_GPIO)
#define MX31_PIN_STX0__GPIO2_1 IOMUX_MODE(MX31_PIN_STX0, IOMUX_CONFIG_GPIO)
diff --git a/arch/arm/mach-imx/mach-apf9328.c b/arch/arm/mach-imx/mach-apf9328.c
deleted file mode 100644
index ebbb5ab63529..000000000000
--- a/arch/arm/mach-imx/mach-apf9328.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * linux/arch/arm/mach-imx/mach-apf9328.c
- *
- * Copyright (c) 2005-2011 ARMadeus systems <support@armadeus.com>
- *
- * This work is based on mach-scb9328.c which is:
- * Copyright (c) 2004 Sascha Hauer <saschahauer@web.de>
- * Copyright (c) 2006-2008 Juergen Beisert <jbeisert@netscape.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/physmap.h>
-#include <linux/dm9000.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-
-#include "common.h"
-#include "devices-imx1.h"
-#include "hardware.h"
-#include "iomux-mx1.h"
-
-static const int apf9328_pins[] __initconst = {
- /* UART1 */
- PC9_PF_UART1_CTS,
- PC10_PF_UART1_RTS,
- PC11_PF_UART1_TXD,
- PC12_PF_UART1_RXD,
- /* UART2 */
- PB28_PF_UART2_CTS,
- PB29_PF_UART2_RTS,
- PB30_PF_UART2_TXD,
- PB31_PF_UART2_RXD,
- /* I2C */
- PA15_PF_I2C_SDA,
- PA16_PF_I2C_SCL,
-};
-
-/*
- * The APF9328 can have up to 32MB NOR Flash
- */
-static struct resource flash_resource = {
- .start = MX1_CS0_PHYS,
- .end = MX1_CS0_PHYS + SZ_32M - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct physmap_flash_data apf9328_flash_data = {
- .width = 2,
-};
-
-static struct platform_device apf9328_flash_device = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &apf9328_flash_data,
- },
- .resource = &flash_resource,
- .num_resources = 1,
-};
-
-/*
- * APF9328 has a DM9000 Ethernet controller
- */
-static struct dm9000_plat_data dm9000_setup = {
- .flags = DM9000_PLATF_16BITONLY
-};
-
-static struct resource dm9000_resources[] = {
- {
- .start = MX1_CS4_PHYS + 0x00C00000,
- .end = MX1_CS4_PHYS + 0x00C00001,
- .flags = IORESOURCE_MEM,
- }, {
- .start = MX1_CS4_PHYS + 0x00C00002,
- .end = MX1_CS4_PHYS + 0x00C00003,
- .flags = IORESOURCE_MEM,
- }, {
- /* irq number is run-time assigned */
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
- },
-};
-
-static struct platform_device dm9000x_device = {
- .name = "dm9000",
- .id = 0,
- .num_resources = ARRAY_SIZE(dm9000_resources),
- .resource = dm9000_resources,
- .dev = {
- .platform_data = &dm9000_setup,
- }
-};
-
-static const struct imxuart_platform_data uart1_pdata __initconst = {
- .flags = IMXUART_HAVE_RTSCTS,
-};
-
-static const struct imxi2c_platform_data apf9328_i2c_data __initconst = {
- .bitrate = 100000,
-};
-
-static struct platform_device *devices[] __initdata = {
- &apf9328_flash_device,
- &dm9000x_device,
-};
-
-static void __init apf9328_init(void)
-{
- imx1_soc_init();
-
- mxc_gpio_setup_multiple_pins(apf9328_pins,
- ARRAY_SIZE(apf9328_pins),
- "APF9328");
-
- imx1_add_imx_uart0(NULL);
- imx1_add_imx_uart1(&uart1_pdata);
-
- imx1_add_imx_i2c(&apf9328_i2c_data);
-
- dm9000_resources[2].start = gpio_to_irq(IMX_GPIO_NR(2, 14));
- dm9000_resources[2].end = gpio_to_irq(IMX_GPIO_NR(2, 14));
- platform_add_devices(devices, ARRAY_SIZE(devices));
-}
-
-static void __init apf9328_timer_init(void)
-{
- mx1_clocks_init(32768);
-}
-
-MACHINE_START(APF9328, "Armadeus APF9328")
- /* Maintainer: Gwenhael Goavec-Merou, ARMadeus Systems */
- .map_io = mx1_map_io,
- .init_early = imx1_init_early,
- .init_irq = mx1_init_irq,
- .init_time = apf9328_timer_init,
- .init_machine = apf9328_init,
- .restart = mxc_restart,
-MACHINE_END
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index eaee47a2fcc0..17a97ba2cecf 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -493,24 +493,12 @@ static void __init armadillo5x0_init(void)
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
- armadillo5x0_smc911x_resources[1].start =
- gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
- armadillo5x0_smc911x_resources[1].end =
- gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
- platform_add_devices(devices, ARRAY_SIZE(devices));
- imx_add_gpio_keys(&armadillo5x0_button_data);
imx31_add_imx_i2c1(NULL);
/* Register UART */
imx31_add_imx_uart0(&uart_pdata);
imx31_add_imx_uart1(&uart_pdata);
- /* SMSC9118 IRQ pin */
- gpio_direction_input(MX31_PIN_GPIO1_0);
-
- /* Register SDHC */
- imx31_add_mxc_mmc(0, &sdhc_pdata);
-
/* Register FB */
imx31_add_ipu_core();
imx31_add_mx3_sdc_fb(&mx3fb_pdata);
@@ -527,21 +515,39 @@ static void __init armadillo5x0_init(void)
/* set NAND page size to 2k if not configured via boot mode pins */
imx_writel(imx_readl(mx3_ccm_base + MXC_CCM_RCSR) | (1 << 30),
mx3_ccm_base + MXC_CCM_RCSR);
+}
+
+static void __init armadillo5x0_late(void)
+{
+ armadillo5x0_smc911x_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
+ armadillo5x0_smc911x_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+
+ imx_add_gpio_keys(&armadillo5x0_button_data);
+
+ /* SMSC9118 IRQ pin */
+ gpio_direction_input(MX31_PIN_GPIO1_0);
+
+ /* Register SDHC */
+ imx31_add_mxc_mmc(0, &sdhc_pdata);
/* RTC */
/* Get RTC IRQ and register the chip */
- if (gpio_request(ARMADILLO5X0_RTC_GPIO, "rtc") == 0) {
- if (gpio_direction_input(ARMADILLO5X0_RTC_GPIO) == 0)
- armadillo5x0_i2c_rtc.irq = gpio_to_irq(ARMADILLO5X0_RTC_GPIO);
+ if (!gpio_request(ARMADILLO5X0_RTC_GPIO, "rtc")) {
+ if (!gpio_direction_input(ARMADILLO5X0_RTC_GPIO))
+ armadillo5x0_i2c_rtc.irq =
+ gpio_to_irq(ARMADILLO5X0_RTC_GPIO);
else
gpio_free(ARMADILLO5X0_RTC_GPIO);
}
+
if (armadillo5x0_i2c_rtc.irq == 0)
pr_warn("armadillo5x0_init: failed to get RTC IRQ\n");
i2c_register_board_info(1, &armadillo5x0_i2c_rtc, 1);
/* USB */
-
usbotg_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS |
ULPI_OTG_DRVVBUS_EXT);
if (usbotg_pdata.otg)
@@ -565,5 +571,6 @@ MACHINE_START(ARMADILLO5X0, "Armadillo-500")
.init_irq = mx31_init_irq,
.init_time = armadillo5x0_timer_init,
.init_machine = armadillo5x0_init,
+ .init_late = armadillo5x0_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/imx1-dt.c b/arch/arm/mach-imx/mach-imx1.c
index 6f915b0961c4..de5ab8d88549 100644
--- a/arch/arm/mach-imx/imx1-dt.c
+++ b/arch/arm/mach-imx/mach-imx1.c
@@ -9,8 +9,27 @@
#include <linux/of_platform.h>
#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
#include "common.h"
+#include "hardware.h"
+
+#define MX1_AVIC_ADDR 0x00223000
+
+static void __init imx1_init_early(void)
+{
+ mxc_set_cpu_type(MXC_CPU_MX1);
+}
+
+static void __init imx1_init_irq(void)
+{
+ void __iomem *avic_addr;
+
+ avic_addr = ioremap(MX1_AVIC_ADDR, SZ_4K);
+ WARN_ON(!avic_addr);
+
+ mxc_init_irq(avic_addr);
+}
static const char * const imx1_dt_board_compat[] __initconst = {
"fsl,imx1",
@@ -18,9 +37,9 @@ static const char * const imx1_dt_board_compat[] __initconst = {
};
DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
- .map_io = mx1_map_io,
+ .map_io = debug_ll_io_init,
.init_early = imx1_init_early,
- .init_irq = mx1_init_irq,
+ .init_irq = imx1_init_irq,
.dt_compat = imx1_dt_board_compat,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index ede2bdbb5dd5..dd75a4756761 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -540,7 +540,6 @@ static void __init visstrim_m10_revision(void)
static void __init visstrim_m10_board_init(void)
{
int ret;
- int mo_version;
imx27_soc_init();
visstrim_m10_revision();
@@ -550,11 +549,6 @@ static void __init visstrim_m10_board_init(void)
if (ret)
pr_err("Failed to setup pins (%d)\n", ret);
- ret = gpio_request_array(visstrim_m10_gpios,
- ARRAY_SIZE(visstrim_m10_gpios));
- if (ret)
- pr_err("Failed to request gpios (%d)\n", ret);
-
imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata);
imx27_add_imx_uart0(&uart_pdata);
@@ -566,12 +560,26 @@ static void __init visstrim_m10_board_init(void)
imx27_add_mxc_mmc(0, &visstrim_m10_sdhc_pdata);
imx27_add_mxc_ehci_otg(&visstrim_m10_usbotg_pdata);
imx27_add_fec(NULL);
- imx_add_gpio_keys(&visstrim_gpio_keys_platform_data);
+
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+}
+
+static void __init visstrim_m10_late_init(void)
+{
+ int mo_version, ret;
+
+ ret = gpio_request_array(visstrim_m10_gpios,
+ ARRAY_SIZE(visstrim_m10_gpios));
+ if (ret)
+ pr_err("Failed to request gpios (%d)\n", ret);
+
+ imx_add_gpio_keys(&visstrim_gpio_keys_platform_data);
+
imx_add_platform_device("mx27vis", 0, NULL, 0, &snd_mx27vis_pdata,
sizeof(snd_mx27vis_pdata));
platform_device_register_resndata(NULL, "soc-camera-pdrv", 0, NULL, 0,
&iclink_tvp5150, sizeof(iclink_tvp5150));
+
gpio_led_register_device(0, &visstrim_m10_led_data);
/* Use mother board version to decide what video devices we shall use */
@@ -591,6 +599,7 @@ static void __init visstrim_m10_board_init(void)
visstrim_deinterlace_init();
visstrim_analog_camera_init();
}
+
visstrim_coda_init();
}
@@ -607,5 +616,6 @@ MACHINE_START(IMX27_VISSTRIM_M10, "Vista Silicon Visstrim_M10")
.init_irq = mx27_init_irq,
.init_time = visstrim_m10_timer_init,
.init_machine = visstrim_m10_board_init,
+ .init_late = visstrim_m10_late_init,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
index 6bb7d9cf1e38..58a2b88233e6 100644
--- a/arch/arm/mach-imx/mach-imx6ul.c
+++ b/arch/arm/mach-imx/mach-imx6ul.c
@@ -16,6 +16,7 @@
#include <asm/mach/map.h>
#include "common.h"
+#include "cpuidle.h"
static void __init imx6ul_enet_clk_init(void)
{
@@ -80,6 +81,8 @@ static void __init imx6ul_init_irq(void)
static void __init imx6ul_init_late(void)
{
+ imx6sx_cpuidle_init();
+
if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ))
platform_device_register_simple("imx6q-cpufreq", -1, NULL, 0);
}
diff --git a/arch/arm/mach-imx/mach-kzm_arm11_01.c b/arch/arm/mach-imx/mach-kzm_arm11_01.c
index 31df4361996f..ab847e2c822a 100644
--- a/arch/arm/mach-imx/mach-kzm_arm11_01.c
+++ b/arch/arm/mach-imx/mach-kzm_arm11_01.c
@@ -63,7 +63,7 @@
*/
#define KZM_ARM11_16550 (MX31_CS4_BASE_ADDR + 0x1050)
-#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
+#if IS_ENABLED(CONFIG_SERIAL_8250)
/*
* KZM-ARM11-01 has an external UART on FPGA
*/
@@ -141,7 +141,7 @@ static inline int kzm_init_ext_uart(void)
/*
* SMSC LAN9118
*/
-#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
+#if IS_ENABLED(CONFIG_SMSC911X)
static struct smsc911x_platform_config kzm_smsc9118_config = {
.phy_interface = PHY_INTERFACE_MODE_MII,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH,
@@ -201,7 +201,7 @@ static inline int kzm_init_smsc9118(void)
}
#endif
-#if defined(CONFIG_SERIAL_IMX) || defined(CONFIG_SERIAL_IMX_MODULE)
+#if IS_ENABLED(CONFIG_SERIAL_IMX)
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
@@ -245,13 +245,17 @@ static void __init kzm_board_init(void)
mxc_iomux_setup_multiple_pins(kzm_pins,
ARRAY_SIZE(kzm_pins), "kzm");
- kzm_init_ext_uart();
- kzm_init_smsc9118();
kzm_init_imx_uart();
pr_info("Clock input source is 26MHz\n");
}
+static void __init kzm_late_init(void)
+{
+ kzm_init_ext_uart();
+ kzm_init_smsc9118();
+}
+
/*
* This structure defines static mappings for the kzm-arm11-01 board.
*/
@@ -291,5 +295,6 @@ MACHINE_START(KZM_ARM11_01, "Kyoto Microcomputer Co., Ltd. KZM-ARM11-01")
.init_irq = mx31_init_irq,
.init_time = kzm_timer_init,
.init_machine = kzm_board_init,
+ .init_late = kzm_late_init,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index 9986f9a697c8..5e366824814f 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -302,12 +302,16 @@ static void __init mx21ads_board_init(void)
imx21_add_imx_uart0(&uart_pdata_rts);
imx21_add_imx_uart2(&uart_pdata_norts);
imx21_add_imx_uart3(&uart_pdata_rts);
- imx21_add_mxc_mmc(0, &mx21ads_sdhc_pdata);
imx21_add_mxc_nand(&mx21ads_nand_board_info);
- platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
-
imx21_add_imx_fb(&mx21ads_fb_data);
+}
+
+static void __init mx21ads_late_init(void)
+{
+ imx21_add_mxc_mmc(0, &mx21ads_sdhc_pdata);
+
+ platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
mx21ads_cs8900_resources[1].start =
gpio_to_irq(MX21ADS_CS8900A_IRQ_GPIO);
@@ -328,6 +332,7 @@ MACHINE_START(MX21ADS, "Freescale i.MX21ADS")
.init_early = imx21_init_early,
.init_irq = mx21_init_irq,
.init_time = mx21ads_timer_init,
- .init_machine = mx21ads_board_init,
+ .init_machine = mx21ads_board_init,
+ .init_late = mx21ads_late_init,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index 9ef4640f3660..7ba651a9b5b8 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -485,17 +485,32 @@ static const struct imxi2c_platform_data mx27_3ds_i2c0_data __initconst = {
static void __init mx27pdk_init(void)
{
- int ret;
imx27_soc_init();
mxc_gpio_setup_multiple_pins(mx27pdk_pins, ARRAY_SIZE(mx27pdk_pins),
"mx27pdk");
- mx27_3ds_sdhc1_enable_level_translator();
imx27_add_imx_uart0(&uart_pdata);
imx27_add_fec(NULL);
imx27_add_imx_keypad(&mx27_3ds_keymap_data);
- imx27_add_mxc_mmc(0, &sdhc1_pdata);
imx27_add_imx2_wdt();
+
+ imx27_add_spi_imx1(&spi2_pdata);
+ imx27_add_spi_imx0(&spi1_pdata);
+
+ imx27_add_imx_i2c(0, &mx27_3ds_i2c0_data);
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+ imx27_add_imx_fb(&mx27_3ds_fb_data);
+
+ imx27_add_imx_ssi(0, &mx27_3ds_ssi_pdata);
+}
+
+static void __init mx27pdk_late_init(void)
+{
+ int ret;
+
+ mx27_3ds_sdhc1_enable_level_translator();
+ imx27_add_mxc_mmc(0, &sdhc1_pdata);
+
otg_phy_init();
if (otg_mode_host) {
@@ -509,17 +524,12 @@ static void __init mx27pdk_init(void)
if (!otg_mode_host)
imx27_add_fsl_usb2_udc(&otg_device_pdata);
- imx27_add_spi_imx1(&spi2_pdata);
- imx27_add_spi_imx0(&spi1_pdata);
mx27_3ds_spi_devs[0].irq = gpio_to_irq(PMIC_INT);
spi_register_board_info(mx27_3ds_spi_devs,
- ARRAY_SIZE(mx27_3ds_spi_devs));
+ ARRAY_SIZE(mx27_3ds_spi_devs));
if (mxc_expio_init(MX27_CS5_BASE_ADDR, IMX_GPIO_NR(3, 28)))
pr_warn("Init of the debugboard failed, all devices on the debugboard are unusable.\n");
- imx27_add_imx_i2c(0, &mx27_3ds_i2c0_data);
- platform_add_devices(devices, ARRAY_SIZE(devices));
- imx27_add_imx_fb(&mx27_3ds_fb_data);
ret = gpio_request_array(mx27_3ds_camera_gpios,
ARRAY_SIZE(mx27_3ds_camera_gpios));
@@ -529,7 +539,6 @@ static void __init mx27pdk_init(void)
}
imx27_add_mx2_camera(&mx27_3ds_cam_pdata);
- imx27_add_imx_ssi(0, &mx27_3ds_ssi_pdata);
imx_add_platform_device("imx_mc13783", 0, NULL, 0, NULL, 0);
}
@@ -547,5 +556,6 @@ MACHINE_START(MX27_3DS, "Freescale MX27PDK")
.init_irq = mx27_init_irq,
.init_time = mx27pdk_timer_init,
.init_machine = mx27pdk_init,
+ .init_late = mx27pdk_late_init,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index a4c389eae31a..a04bb094ded1 100644
--- a/arch/arm/mach-imx/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -352,14 +352,20 @@ static void __init mx27ads_board_init(void)
i2c_register_board_info(1, mx27ads_i2c_devices,
ARRAY_SIZE(mx27ads_i2c_devices));
imx27_add_imx_i2c(1, &mx27ads_i2c1_data);
- mx27ads_regulator_init();
imx27_add_imx_fb(&mx27ads_fb_data);
+
+ imx27_add_fec(NULL);
+ imx27_add_mxc_w1();
+}
+
+static void __init mx27ads_late_init(void)
+{
+ mx27ads_regulator_init();
+
imx27_add_mxc_mmc(0, &sdhc1_pdata);
imx27_add_mxc_mmc(1, &sdhc2_pdata);
- imx27_add_fec(NULL);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
- imx27_add_mxc_w1();
}
static void __init mx27ads_timer_init(void)
@@ -395,5 +401,6 @@ MACHINE_START(MX27ADS, "Freescale i.MX27ADS")
.init_irq = mx27_init_irq,
.init_time = mx27ads_timer_init,
.init_machine = mx27ads_board_init,
+ .init_late = mx27ads_late_init,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c
index 65a0dc06a97c..12b8a52c9cb4 100644
--- a/arch/arm/mach-imx/mach-mx31_3ds.c
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c
@@ -694,8 +694,6 @@ static struct platform_device *devices[] __initdata = {
static void __init mx31_3ds_init(void)
{
- int ret;
-
imx31_soc_init();
/* Configure SPI1 IOMUX */
@@ -708,14 +706,31 @@ static void __init mx31_3ds_init(void)
imx31_add_mxc_nand(&mx31_3ds_nand_board_info);
imx31_add_spi_imx1(&spi1_pdata);
+
+ imx31_add_imx_keypad(&mx31_3ds_keymap_data);
+
+ imx31_add_imx2_wdt();
+ imx31_add_imx_i2c0(&mx31_3ds_i2c0_data);
+
+ imx31_add_spi_imx0(&spi0_pdata);
+ imx31_add_ipu_core();
+ imx31_add_mx3_sdc_fb(&mx3fb_pdata);
+
+ imx31_add_imx_ssi(0, &mx31_3ds_ssi_pdata);
+
+ imx_add_platform_device("imx_mc13783", 0, NULL, 0, NULL, 0);
+}
+
+static void __init mx31_3ds_late(void)
+{
+ int ret;
+
mx31_3ds_spi_devs[0].irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
spi_register_board_info(mx31_3ds_spi_devs,
- ARRAY_SIZE(mx31_3ds_spi_devs));
+ ARRAY_SIZE(mx31_3ds_spi_devs));
platform_add_devices(devices, ARRAY_SIZE(devices));
- imx31_add_imx_keypad(&mx31_3ds_keymap_data);
-
mx31_3ds_usbotg_init();
if (otg_mode_host) {
otg_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS |
@@ -733,14 +748,9 @@ static void __init mx31_3ds_init(void)
if (mxc_expio_init(MX31_CS5_BASE_ADDR, IOMUX_TO_GPIO(MX31_PIN_GPIO1_1)))
printk(KERN_WARNING "Init of the debug board failed, all "
- "devices on the debug board are unusable.\n");
- imx31_add_imx2_wdt();
- imx31_add_imx_i2c0(&mx31_3ds_i2c0_data);
- imx31_add_mxc_mmc(0, &sdhc1_pdata);
+ "devices on the debug board are unusable.\n");
- imx31_add_spi_imx0(&spi0_pdata);
- imx31_add_ipu_core();
- imx31_add_mx3_sdc_fb(&mx3fb_pdata);
+ imx31_add_mxc_mmc(0, &sdhc1_pdata);
/* CSI */
/* Camera power: default - off */
@@ -752,10 +762,6 @@ static void __init mx31_3ds_init(void)
}
mx31_3ds_init_camera();
-
- imx31_add_imx_ssi(0, &mx31_3ds_ssi_pdata);
-
- imx_add_platform_device("imx_mc13783", 0, NULL, 0, NULL, 0);
}
static void __init mx31_3ds_timer_init(void)
@@ -778,6 +784,7 @@ MACHINE_START(MX31_3DS, "Freescale MX31PDK (3DS)")
.init_irq = mx31_init_irq,
.init_time = mx31_3ds_timer_init,
.init_machine = mx31_3ds_init,
+ .init_late = mx31_3ds_late,
.reserve = mx31_3ds_reserve,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c
index 4f2c56d44ba1..766b8b93fb97 100644
--- a/arch/arm/mach-imx/mach-mx31ads.c
+++ b/arch/arm/mach-imx/mach-mx31ads.c
@@ -554,20 +554,19 @@ static void __init mx31ads_map_io(void)
iotable_init(mx31ads_io_desc, ARRAY_SIZE(mx31ads_io_desc));
}
-static void __init mx31ads_init_irq(void)
-{
- mx31_init_irq();
- mx31ads_init_expio();
-}
-
static void __init mx31ads_init(void)
{
imx31_soc_init();
- mxc_init_extuart();
mxc_init_imx_uart();
- mxc_init_i2c();
mxc_init_audio();
+}
+
+static void __init mx31ads_late(void)
+{
+ mx31ads_init_expio();
+ mxc_init_extuart();
+ mxc_init_i2c();
mxc_init_ext_ethernet();
}
@@ -581,8 +580,9 @@ MACHINE_START(MX31ADS, "Freescale MX31ADS")
.atag_offset = 0x100,
.map_io = mx31ads_map_io,
.init_early = imx31_init_early,
- .init_irq = mx31ads_init_irq,
+ .init_irq = mx31_init_irq,
.init_time = mx31ads_timer_init,
.init_machine = mx31ads_init,
+ .init_late = mx31ads_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c
index e9549a3c0223..6fd463642954 100644
--- a/arch/arm/mach-imx/mach-mx31lilly.c
+++ b/arch/arm/mach-imx/mach-mx31lilly.c
@@ -56,6 +56,26 @@
* appropriate baseboard support code.
*/
+static unsigned int mx31lilly_pins[] __initdata = {
+ MX31_PIN_CTS1__CTS1,
+ MX31_PIN_RTS1__RTS1,
+ MX31_PIN_TXD1__TXD1,
+ MX31_PIN_RXD1__RXD1,
+ MX31_PIN_CTS2__CTS2,
+ MX31_PIN_RTS2__RTS2,
+ MX31_PIN_TXD2__TXD2,
+ MX31_PIN_RXD2__RXD2,
+ MX31_PIN_CSPI3_MOSI__RXD3,
+ MX31_PIN_CSPI3_MISO__TXD3,
+ MX31_PIN_CSPI3_SCLK__RTS3,
+ MX31_PIN_CSPI3_SPI_RDY__CTS3,
+};
+
+/* UART */
+static const struct imxuart_platform_data uart_pdata __initconst = {
+ .flags = IMXUART_HAVE_RTSCTS,
+};
+
/* SMSC ethernet support */
static struct resource smsc91x_resources[] = {
@@ -252,16 +272,12 @@ static void __init mx31lilly_board_init(void)
{
imx31_soc_init();
- switch (mx31lilly_baseboard) {
- case MX31LILLY_NOBOARD:
- break;
- case MX31LILLY_DB:
- mx31lilly_db_init();
- break;
- default:
- printk(KERN_ERR "Illegal mx31lilly_baseboard type %d\n",
- mx31lilly_baseboard);
- }
+ mxc_iomux_setup_multiple_pins(mx31lilly_pins,
+ ARRAY_SIZE(mx31lilly_pins), "mx31lily");
+
+ imx31_add_imx_uart0(&uart_pdata);
+ imx31_add_imx_uart1(&uart_pdata);
+ imx31_add_imx_uart2(&uart_pdata);
mxc_iomux_alloc_pin(MX31_PIN_CS4__CS4, "Ethernet CS");
@@ -284,10 +300,17 @@ static void __init mx31lilly_board_init(void)
imx31_add_spi_imx0(&spi0_pdata);
imx31_add_spi_imx1(&spi1_pdata);
- mc13783_dev.irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
- spi_register_board_info(&mc13783_dev, 1);
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+}
+
+static void __init mx31lilly_late_init(void)
+{
+ if (mx31lilly_baseboard == MX31LILLY_DB)
+ mx31lilly_db_init();
+
+ mc13783_dev.irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
+ spi_register_board_info(&mc13783_dev, 1);
smsc91x_resources[1].start =
gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
@@ -310,6 +333,7 @@ MACHINE_START(LILLY1131, "INCO startec LILLY-1131")
.init_early = imx31_init_early,
.init_irq = mx31_init_irq,
.init_time = mx31lilly_timer_init,
- .init_machine = mx31lilly_board_init,
+ .init_machine = mx31lilly_board_init,
+ .init_late = mx31lilly_late_init,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c
index 4822a1738de4..f033a57d5694 100644
--- a/arch/arm/mach-imx/mach-mx31lite.c
+++ b/arch/arm/mach-imx/mach-mx31lite.c
@@ -52,6 +52,19 @@
*/
static unsigned int mx31lite_pins[] = {
+ /* UART1 */
+ MX31_PIN_CTS1__CTS1,
+ MX31_PIN_RTS1__RTS1,
+ MX31_PIN_TXD1__TXD1,
+ MX31_PIN_RXD1__RXD1,
+ /* SPI 0 */
+ MX31_PIN_CSPI1_SCLK__SCLK,
+ MX31_PIN_CSPI1_MOSI__MOSI,
+ MX31_PIN_CSPI1_MISO__MISO,
+ MX31_PIN_CSPI1_SPI_RDY__SPI_RDY,
+ MX31_PIN_CSPI1_SS0__SS0,
+ MX31_PIN_CSPI1_SS1__SS1,
+ MX31_PIN_CSPI1_SS2__SS2,
/* LAN9117 IRQ pin */
IOMUX_MODE(MX31_PIN_SFS6, IOMUX_CONFIG_GPIO),
/* SPI 1 */
@@ -64,6 +77,23 @@ static unsigned int mx31lite_pins[] = {
MX31_PIN_CSPI2_SS2__SS2,
};
+/* UART */
+static const struct imxuart_platform_data uart_pdata __initconst = {
+ .flags = IMXUART_HAVE_RTSCTS,
+};
+
+/* SPI */
+static int spi0_internal_chipselect[] = {
+ MXC_SPI_CS(0),
+ MXC_SPI_CS(1),
+ MXC_SPI_CS(2),
+};
+
+static const struct spi_imx_master spi0_pdata __initconst = {
+ .chipselect = spi0_internal_chipselect,
+ .num_chipselect = ARRAY_SIZE(spi0_internal_chipselect),
+};
+
static const struct mxc_nand_platform_data
mx31lite_nand_board_info __initconst = {
.width = 1,
@@ -103,13 +133,13 @@ static struct platform_device smsc911x_device = {
* The MC13783 is the only hard-wired SPI device on the module.
*/
-static int spi_internal_chipselect[] = {
+static int spi1_internal_chipselect[] = {
MXC_SPI_CS(0),
};
static const struct spi_imx_master spi1_pdata __initconst = {
- .chipselect = spi_internal_chipselect,
- .num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
+ .chipselect = spi1_internal_chipselect,
+ .num_chipselect = ARRAY_SIZE(spi1_internal_chipselect),
};
static struct mc13xxx_platform_data mc13783_pdata __initdata = {
@@ -200,8 +230,6 @@ static struct platform_device physmap_flash_device = {
.num_resources = 1,
};
-
-
/*
* This structure defines the MX31 memory map.
*/
@@ -233,29 +261,30 @@ static struct regulator_consumer_supply dummy_supplies[] = {
static void __init mx31lite_init(void)
{
- int ret;
-
imx31_soc_init();
- switch (mx31lite_baseboard) {
- case MX31LITE_NOBOARD:
- break;
- case MX31LITE_DB:
- mx31lite_db_init();
- break;
- default:
- printk(KERN_ERR "Illegal mx31lite_baseboard type %d\n",
- mx31lite_baseboard);
- }
-
mxc_iomux_setup_multiple_pins(mx31lite_pins, ARRAY_SIZE(mx31lite_pins),
"mx31lite");
+ imx31_add_imx_uart0(&uart_pdata);
+ imx31_add_spi_imx0(&spi0_pdata);
+
/* NOR and NAND flash */
platform_device_register(&physmap_flash_device);
imx31_add_mxc_nand(&mx31lite_nand_board_info);
imx31_add_spi_imx1(&spi1_pdata);
+
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+}
+
+static void __init mx31lite_late(void)
+{
+ int ret;
+
+ if (mx31lite_baseboard == MX31LITE_DB)
+ mx31lite_db_init();
+
mc13783_spi_dev.irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
spi_register_board_info(&mc13783_spi_dev, 1);
@@ -265,8 +294,6 @@ static void __init mx31lite_init(void)
if (usbh2_pdata.otg)
imx31_add_mxc_ehci_hs(2, &usbh2_pdata);
- regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
-
/* SMSC9117 IRQ pin */
ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_SFS6), "sms9117-irq");
if (ret)
@@ -294,5 +321,6 @@ MACHINE_START(MX31LITE, "LogicPD i.MX31 SOM")
.init_irq = mx31_init_irq,
.init_time = mx31lite_timer_init,
.init_machine = mx31lite_init,
+ .init_late = mx31lite_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c
index 4f2d99888afd..cc867682520e 100644
--- a/arch/arm/mach-imx/mach-mx31moboard.c
+++ b/arch/arm/mach-imx/mach-mx31moboard.c
@@ -526,11 +526,9 @@ static void __init mx31moboard_init(void)
"moboard");
platform_add_devices(devices, ARRAY_SIZE(devices));
- gpio_led_register_device(-1, &mx31moboard_led_pdata);
imx31_add_imx2_wdt();
- moboard_uart0_init();
imx31_add_imx_uart0(&uart0_pdata);
imx31_add_imx_uart4(&uart4_pdata);
@@ -540,6 +538,19 @@ static void __init mx31moboard_init(void)
imx31_add_spi_imx1(&moboard_spi1_pdata);
imx31_add_spi_imx2(&moboard_spi2_pdata);
+ mx31moboard_init_cam();
+
+ imx31_add_imx_ssi(0, &moboard_ssi_pdata);
+
+ pm_power_off = mx31moboard_poweroff;
+}
+
+static void __init mx31moboard_late(void)
+{
+ gpio_led_register_device(-1, &mx31moboard_led_pdata);
+
+ moboard_uart0_init();
+
gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3), "pmic-irq");
gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
moboard_spi_board_info[0].irq =
@@ -549,18 +560,11 @@ static void __init mx31moboard_init(void)
imx31_add_mxc_mmc(0, &sdhc1_pdata);
- mx31moboard_init_cam();
-
usb_xcvr_reset();
-
moboard_usbh2_init();
- imx31_add_imx_ssi(0, &moboard_ssi_pdata);
-
imx_add_platform_device("imx_mc13783", 0, NULL, 0, NULL, 0);
- pm_power_off = mx31moboard_poweroff;
-
switch (mx31moboard_baseboard) {
case MX31NOBOARD:
break;
@@ -601,5 +605,6 @@ MACHINE_START(MX31MOBOARD, "EPFL Mobots mx31moboard")
.init_irq = mx31_init_irq,
.init_time = mx31moboard_timer_init,
.init_machine = mx31moboard_init,
+ .init_late = mx31moboard_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c
index 7e315f00648d..c8c2e0956048 100644
--- a/arch/arm/mach-imx/mach-mx35_3ds.c
+++ b/arch/arm/mach-imx/mach-mx35_3ds.c
@@ -555,8 +555,6 @@ static const struct imxi2c_platform_data mx35_3ds_i2c0_data __initconst = {
*/
static void __init mx35_3ds_init(void)
{
- struct platform_device *imx35_fb_pdev;
-
imx35_soc_init();
mxc_iomux_v3_setup_multiple_pads(mx35pdk_pads, ARRAY_SIZE(mx35pdk_pads));
@@ -579,9 +577,6 @@ static void __init mx35_3ds_init(void)
imx35_add_mxc_nand(&mx35pdk_nand_board_info);
imx35_add_sdhci_esdhc_imx(0, NULL);
- if (mxc_expio_init(MX35_CS5_BASE_ADDR, IMX_GPIO_NR(1, 1)))
- pr_warn("Init of the debugboard failed, all "
- "devices on the debugboard are unusable.\n");
imx35_add_imx_i2c0(&mx35_3ds_i2c0_data);
i2c_register_board_info(
@@ -590,6 +585,15 @@ static void __init mx35_3ds_init(void)
imx35_add_ipu_core();
platform_device_register(&mx35_3ds_ov2640);
imx35_3ds_init_camera();
+}
+
+static void __init mx35_3ds_late_init(void)
+{
+ struct platform_device *imx35_fb_pdev;
+
+ if (mxc_expio_init(MX35_CS5_BASE_ADDR, IMX_GPIO_NR(1, 1)))
+ pr_warn("Init of the debugboard failed, all "
+ "devices on the debugboard are unusable.\n");
imx35_fb_pdev = imx35_add_mx3_sdc_fb(&mx3fb_pdata);
mx35_3ds_lcd.dev.parent = &imx35_fb_pdev->dev;
@@ -618,6 +622,7 @@ MACHINE_START(MX35_3DS, "Freescale MX35PDK")
.init_irq = mx35_init_irq,
.init_time = mx35pdk_timer_init,
.init_machine = mx35_3ds_init,
+ .init_late = mx35_3ds_late_init,
.reserve = mx35_3ds_reserve,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c
index 2d1c50bd8bdf..ed675863655b 100644
--- a/arch/arm/mach-imx/mach-pca100.c
+++ b/arch/arm/mach-imx/mach-pca100.c
@@ -362,12 +362,8 @@ static void __init pca100_init(void)
if (ret)
printk(KERN_ERR "pca100: Failed to setup pins (%d)\n", ret);
- imx27_add_imx_ssi(0, &pca100_ssi_pdata);
-
imx27_add_imx_uart0(&uart_pdata);
- imx27_add_mxc_mmc(1, &sdhc_pdata);
-
imx27_add_mxc_nand(&pca100_nand_board_info);
/* only the i2c master 1 is used on this CPU card */
@@ -382,6 +378,19 @@ static void __init pca100_init(void)
ARRAY_SIZE(pca100_spi_board_info));
imx27_add_spi_imx0(&pca100_spi0_data);
+ imx27_add_imx_fb(&pca100_fb_data);
+
+ imx27_add_fec(NULL);
+ imx27_add_imx2_wdt();
+ imx27_add_mxc_w1();
+}
+
+static void __init pca100_late_init(void)
+{
+ imx27_add_imx_ssi(0, &pca100_ssi_pdata);
+
+ imx27_add_mxc_mmc(1, &sdhc_pdata);
+
gpio_request(OTG_PHY_CS_GPIO, "usb-otg-cs");
gpio_direction_output(OTG_PHY_CS_GPIO, 1);
gpio_request(USBH2_PHY_CS_GPIO, "usb-host2-cs");
@@ -403,12 +412,6 @@ static void __init pca100_init(void)
if (usbh2_pdata.otg)
imx27_add_mxc_ehci_hs(2, &usbh2_pdata);
-
- imx27_add_imx_fb(&pca100_fb_data);
-
- imx27_add_fec(NULL);
- imx27_add_imx2_wdt();
- imx27_add_mxc_w1();
}
static void __init pca100_timer_init(void)
@@ -421,7 +424,8 @@ MACHINE_START(PCA100, "phyCARD-i.MX27")
.map_io = mx27_map_io,
.init_early = imx27_init_early,
.init_irq = mx27_init_irq,
- .init_machine = pca100_init,
+ .init_machine = pca100_init,
+ .init_late = pca100_late_init,
.init_time = pca100_timer_init,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index 6d879417db49..9f0f55b0422c 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -149,7 +149,7 @@ static unsigned int pcm037_pins[] = {
MX31_PIN_CONTRAST__CONTRAST,
MX31_PIN_D3_SPL__D3_SPL,
MX31_PIN_D3_CLS__D3_CLS,
- MX31_PIN_LCS0__GPI03_23,
+ MX31_PIN_LCS0__GPIO3_23,
/* CSI */
IOMUX_MODE(MX31_PIN_CSI_D5, IOMUX_CONFIG_GPIO),
MX31_PIN_CSI_D6__CSI_D6,
@@ -576,8 +576,6 @@ static struct regulator_consumer_supply dummy_supplies[] = {
*/
static void __init pcm037_init(void)
{
- int ret;
-
imx31_soc_init();
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
@@ -621,20 +619,6 @@ static void __init pcm037_init(void)
imx31_add_mxc_w1();
- /* LAN9217 IRQ pin */
- ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1), "lan9217-irq");
- if (ret)
- pr_warn("could not get LAN irq gpio\n");
- else {
- gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
- smsc911x_resources[1].start =
- gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
- smsc911x_resources[1].end =
- gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
- platform_device_register(&pcm037_eth);
- }
-
-
/* I2C adapters and devices */
i2c_register_board_info(1, pcm037_i2c_devices,
ARRAY_SIZE(pcm037_i2c_devices));
@@ -643,26 +627,9 @@ static void __init pcm037_init(void)
imx31_add_imx_i2c2(&pcm037_i2c2_data);
imx31_add_mxc_nand(&pcm037_nand_board_info);
- imx31_add_mxc_mmc(0, &sdhc_pdata);
imx31_add_ipu_core();
imx31_add_mx3_sdc_fb(&mx3fb_pdata);
- /* CSI */
- /* Camera power: default - off */
- ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_CSI_D5), "mt9t031-power");
- if (!ret)
- gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_CSI_D5), 1);
- else
- iclink_mt9t031.power = NULL;
-
- pcm037_init_camera();
-
- pcm970_sja1000_resources[1].start =
- gpio_to_irq(IOMUX_TO_GPIO(IOMUX_PIN(48, 105)));
- pcm970_sja1000_resources[1].end =
- gpio_to_irq(IOMUX_TO_GPIO(IOMUX_PIN(48, 105)));
- platform_device_register(&pcm970_sja1000);
-
if (otg_mode_host) {
otg_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS |
ULPI_OTG_DRVVBUS_EXT);
@@ -677,7 +644,6 @@ static void __init pcm037_init(void)
if (!otg_mode_host)
imx31_add_fsl_usb2_udc(&otg_device_pdata);
-
}
static void __init pcm037_timer_init(void)
@@ -694,6 +660,39 @@ static void __init pcm037_reserve(void)
static void __init pcm037_init_late(void)
{
+ int ret;
+
+ /* LAN9217 IRQ pin */
+ ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1), "lan9217-irq");
+ if (!ret) {
+ gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
+ smsc911x_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
+ smsc911x_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
+ platform_device_register(&pcm037_eth);
+ } else {
+ pr_warn("could not get LAN irq gpio\n");
+ }
+
+ imx31_add_mxc_mmc(0, &sdhc_pdata);
+
+ /* CSI */
+ /* Camera power: default - off */
+ ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_CSI_D5), "mt9t031-power");
+ if (!ret)
+ gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_CSI_D5), 1);
+ else
+ iclink_mt9t031.power = NULL;
+
+ pcm037_init_camera();
+
+ pcm970_sja1000_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(IOMUX_PIN(48, 105)));
+ pcm970_sja1000_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(IOMUX_PIN(48, 105)));
+ platform_device_register(&pcm970_sja1000);
+
pcm037_eet_init_devices();
}
diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c
index e447e59c0604..78e2bf8dcd96 100644
--- a/arch/arm/mach-imx/mach-pcm043.c
+++ b/arch/arm/mach-imx/mach-pcm043.c
@@ -363,7 +363,6 @@ static void __init pcm043_init(void)
imx35_add_imx_uart0(&uart_pdata);
imx35_add_mxc_nand(&pcm037_nand_board_info);
- imx35_add_imx_ssi(0, &pcm043_ssi_pdata);
imx35_add_imx_uart1(&uart_pdata);
@@ -387,6 +386,12 @@ static void __init pcm043_init(void)
imx35_add_fsl_usb2_udc(&otg_device_pdata);
imx35_add_flexcan1();
+}
+
+static void __init pcm043_late_init(void)
+{
+ imx35_add_imx_ssi(0, &pcm043_ssi_pdata);
+
imx35_add_sdhci_esdhc_imx(0, &sd1_pdata);
}
@@ -402,6 +407,7 @@ MACHINE_START(PCM043, "Phytec Phycore pcm043")
.init_early = imx35_init_early,
.init_irq = mx35_init_irq,
.init_time = pcm043_timer_init,
- .init_machine = pcm043_init,
+ .init_machine = pcm043_init,
+ .init_late = pcm043_late_init,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-qong.c b/arch/arm/mach-imx/mach-qong.c
index 34df64f133ed..8c2cbd693d21 100644
--- a/arch/arm/mach-imx/mach-qong.c
+++ b/arch/arm/mach-imx/mach-qong.c
@@ -251,7 +251,6 @@ static void __init qong_init(void)
mxc_init_imx_uart();
qong_init_nor_mtd();
- qong_init_fpga();
imx31_add_imx2_wdt();
}
@@ -268,5 +267,6 @@ MACHINE_START(QONG, "Dave/DENX QongEVB-LITE")
.init_irq = mx31_init_irq,
.init_time = qong_timer_init,
.init_machine = qong_init,
+ .init_late = qong_init_fpga,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c
deleted file mode 100644
index 1f6bc3f7ae14..000000000000
--- a/arch/arm/mach-imx/mach-scb9328.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * linux/arch/arm/mach-mx1/mach-scb9328.c
- *
- * Copyright (c) 2004 Sascha Hauer <saschahauer@web.de>
- * Copyright (c) 2006-2008 Juergen Beisert <jbeisert@netscape.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/mtd/physmap.h>
-#include <linux/interrupt.h>
-#include <linux/dm9000.h>
-#include <linux/gpio.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-
-#include "common.h"
-#include "devices-imx1.h"
-#include "hardware.h"
-#include "iomux-mx1.h"
-
-/*
- * This scb9328 has a 32MiB flash
- */
-static struct resource flash_resource = {
- .start = MX1_CS0_PHYS,
- .end = MX1_CS0_PHYS + (32 * 1024 * 1024) - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct physmap_flash_data scb_flash_data = {
- .width = 2,
-};
-
-static struct platform_device scb_flash_device = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &scb_flash_data,
- },
- .resource = &flash_resource,
- .num_resources = 1,
-};
-
-/*
- * scb9328 has a DM9000 network controller
- * connected to CS5, with 16 bit data path
- * and interrupt connected to GPIO 3
- */
-
-/*
- * internal datapath is fixed 16 bit
- */
-static struct dm9000_plat_data dm9000_platdata = {
- .flags = DM9000_PLATF_16BITONLY,
-};
-
-/*
- * the DM9000 drivers wants two defined address spaces
- * to gain access to address latch registers and the data path.
- */
-static struct resource dm9000x_resources[] = {
- {
- .name = "address area",
- .start = MX1_CS5_PHYS,
- .end = MX1_CS5_PHYS + 1,
- .flags = IORESOURCE_MEM, /* address access */
- }, {
- .name = "data area",
- .start = MX1_CS5_PHYS + 4,
- .end = MX1_CS5_PHYS + 5,
- .flags = IORESOURCE_MEM, /* data access */
- }, {
- /* irq number is run-time assigned */
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
- },
-};
-
-static struct platform_device dm9000x_device = {
- .name = "dm9000",
- .id = 0,
- .num_resources = ARRAY_SIZE(dm9000x_resources),
- .resource = dm9000x_resources,
- .dev = {
- .platform_data = &dm9000_platdata,
- }
-};
-
-static const int mxc_uart1_pins[] = {
- PC9_PF_UART1_CTS,
- PC10_PF_UART1_RTS,
- PC11_PF_UART1_TXD,
- PC12_PF_UART1_RXD,
-};
-
-static const struct imxuart_platform_data uart_pdata __initconst = {
- .flags = IMXUART_HAVE_RTSCTS,
-};
-
-static struct platform_device *devices[] __initdata = {
- &scb_flash_device,
- &dm9000x_device,
-};
-
-/*
- * scb9328_init - Init the CPU card itself
- */
-static void __init scb9328_init(void)
-{
- imx1_soc_init();
-
- mxc_gpio_setup_multiple_pins(mxc_uart1_pins,
- ARRAY_SIZE(mxc_uart1_pins), "UART1");
-
- imx1_add_imx_uart0(&uart_pdata);
-
- printk(KERN_INFO"Scb9328: Adding devices\n");
- dm9000x_resources[2].start = gpio_to_irq(IMX_GPIO_NR(3, 3));
- dm9000x_resources[2].end = gpio_to_irq(IMX_GPIO_NR(3, 3));
- platform_add_devices(devices, ARRAY_SIZE(devices));
-}
-
-static void __init scb9328_timer_init(void)
-{
- mx1_clocks_init(32000);
-}
-
-MACHINE_START(SCB9328, "Synertronixx scb9328")
- /* Sascha Hauer */
- .atag_offset = 100,
- .map_io = mx1_map_io,
- .init_early = imx1_init_early,
- .init_irq = mx1_init_irq,
- .init_time = scb9328_timer_init,
- .init_machine = scb9328_init,
- .restart = mxc_restart,
-MACHINE_END
diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c
index 27a8f7e3ec08..5ff154c9a086 100644
--- a/arch/arm/mach-imx/mach-vpr200.c
+++ b/arch/arm/mach-imx/mach-vpr200.c
@@ -268,6 +268,22 @@ static void __init vpr200_board_init(void)
imx35_add_fec(NULL);
imx35_add_imx2_wdt();
+
+ imx35_add_imx_uart0(NULL);
+ imx35_add_imx_uart2(NULL);
+
+ imx35_add_ipu_core();
+ imx35_add_mx3_sdc_fb(&mx3fb_pdata);
+
+ imx35_add_fsl_usb2_udc(&otg_device_pdata);
+ imx35_add_mxc_ehci_hs(&usb_host_pdata);
+
+ imx35_add_mxc_nand(&vpr200_nand_board_info);
+ imx35_add_sdhci_esdhc_imx(0, NULL);
+}
+
+static void __init vpr200_late_init(void)
+{
imx_add_gpio_keys(&vpr200_gpio_keys_data);
platform_add_devices(devices, ARRAY_SIZE(devices));
@@ -282,18 +298,6 @@ static void __init vpr200_board_init(void)
else
gpio_direction_input(GPIO_PMIC_INT);
- imx35_add_imx_uart0(NULL);
- imx35_add_imx_uart2(NULL);
-
- imx35_add_ipu_core();
- imx35_add_mx3_sdc_fb(&mx3fb_pdata);
-
- imx35_add_fsl_usb2_udc(&otg_device_pdata);
- imx35_add_mxc_ehci_hs(&usb_host_pdata);
-
- imx35_add_mxc_nand(&vpr200_nand_board_info);
- imx35_add_sdhci_esdhc_imx(0, NULL);
-
vpr200_i2c_devices[1].irq = gpio_to_irq(GPIO_PMIC_INT);
i2c_register_board_info(0, vpr200_i2c_devices,
ARRAY_SIZE(vpr200_i2c_devices));
@@ -313,5 +317,6 @@ MACHINE_START(VPR200, "VPR200")
.init_irq = mx35_init_irq,
.init_time = vpr200_timer_init,
.init_machine = vpr200_board_init,
+ .init_late = vpr200_late_init,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mm-imx1.c b/arch/arm/mach-imx/mm-imx1.c
deleted file mode 100644
index 9a42f19be81e..000000000000
--- a/arch/arm/mach-imx/mm-imx1.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * author: Sascha Hauer
- * Created: april 20th, 2004
- * Copyright: Synertronixx GmbH
- *
- * Common code for i.MX1 machines
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/pinctrl/machine.h>
-
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include "devices/devices-common.h"
-#include "hardware.h"
-#include "iomux-v1.h"
-
-static struct map_desc imx_io_desc[] __initdata = {
- imx_map_entry(MX1, IO, MT_DEVICE),
-};
-
-void __init mx1_map_io(void)
-{
- iotable_init(imx_io_desc, ARRAY_SIZE(imx_io_desc));
-}
-
-void __init imx1_init_early(void)
-{
- mxc_set_cpu_type(MXC_CPU_MX1);
- imx_iomuxv1_init(MX1_IO_ADDRESS(MX1_GPIO_BASE_ADDR),
- MX1_NUM_GPIO_PORT);
-}
-
-void __init mx1_init_irq(void)
-{
- mxc_init_irq(MX1_IO_ADDRESS(MX1_AVIC_BASE_ADDR));
-}
-
-void __init imx1_soc_init(void)
-{
- imx1_reset_init(MX1_IO_ADDRESS(MX1_WDT_BASE_ADDR));
- mxc_device_init();
-
- mxc_register_gpio("imx1-gpio", 0, MX1_GPIO1_BASE_ADDR, SZ_256,
- MX1_GPIO_INT_PORTA, 0);
- mxc_register_gpio("imx1-gpio", 1, MX1_GPIO2_BASE_ADDR, SZ_256,
- MX1_GPIO_INT_PORTB, 0);
- mxc_register_gpio("imx1-gpio", 2, MX1_GPIO3_BASE_ADDR, SZ_256,
- MX1_GPIO_INT_PORTC, 0);
- mxc_register_gpio("imx1-gpio", 3, MX1_GPIO4_BASE_ADDR, SZ_256,
- MX1_GPIO_INT_PORTD, 0);
- imx_add_imx_dma("imx1-dma", MX1_DMA_BASE_ADDR,
- MX1_DMA_INT, MX1_DMA_ERR);
- pinctrl_provide_dummies();
-}
diff --git a/arch/arm/mach-imx/mx1.h b/arch/arm/mach-imx/mx1.h
deleted file mode 100644
index 45bd31cc34d6..000000000000
--- a/arch/arm/mach-imx/mx1.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 1997,1998 Russell King
- * Copyright (C) 1999 ARM Limited
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (c) 2008 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MACH_MX1_H__
-#define __MACH_MX1_H__
-
-/*
- * Memory map
- */
-#define MX1_IO_BASE_ADDR 0x00200000
-#define MX1_IO_SIZE SZ_1M
-
-#define MX1_CS0_PHYS 0x10000000
-#define MX1_CS0_SIZE 0x02000000
-
-#define MX1_CS1_PHYS 0x12000000
-#define MX1_CS1_SIZE 0x01000000
-
-#define MX1_CS2_PHYS 0x13000000
-#define MX1_CS2_SIZE 0x01000000
-
-#define MX1_CS3_PHYS 0x14000000
-#define MX1_CS3_SIZE 0x01000000
-
-#define MX1_CS4_PHYS 0x15000000
-#define MX1_CS4_SIZE 0x01000000
-
-#define MX1_CS5_PHYS 0x16000000
-#define MX1_CS5_SIZE 0x01000000
-
-/*
- * Register BASEs, based on OFFSETs
- */
-#define MX1_AIPI1_BASE_ADDR (0x00000 + MX1_IO_BASE_ADDR)
-#define MX1_WDT_BASE_ADDR (0x01000 + MX1_IO_BASE_ADDR)
-#define MX1_TIM1_BASE_ADDR (0x02000 + MX1_IO_BASE_ADDR)
-#define MX1_TIM2_BASE_ADDR (0x03000 + MX1_IO_BASE_ADDR)
-#define MX1_RTC_BASE_ADDR (0x04000 + MX1_IO_BASE_ADDR)
-#define MX1_LCDC_BASE_ADDR (0x05000 + MX1_IO_BASE_ADDR)
-#define MX1_UART1_BASE_ADDR (0x06000 + MX1_IO_BASE_ADDR)
-#define MX1_UART2_BASE_ADDR (0x07000 + MX1_IO_BASE_ADDR)
-#define MX1_PWM_BASE_ADDR (0x08000 + MX1_IO_BASE_ADDR)
-#define MX1_DMA_BASE_ADDR (0x09000 + MX1_IO_BASE_ADDR)
-#define MX1_AIPI2_BASE_ADDR (0x10000 + MX1_IO_BASE_ADDR)
-#define MX1_SIM_BASE_ADDR (0x11000 + MX1_IO_BASE_ADDR)
-#define MX1_USBD_BASE_ADDR (0x12000 + MX1_IO_BASE_ADDR)
-#define MX1_CSPI1_BASE_ADDR (0x13000 + MX1_IO_BASE_ADDR)
-#define MX1_MMC_BASE_ADDR (0x14000 + MX1_IO_BASE_ADDR)
-#define MX1_ASP_BASE_ADDR (0x15000 + MX1_IO_BASE_ADDR)
-#define MX1_BTA_BASE_ADDR (0x16000 + MX1_IO_BASE_ADDR)
-#define MX1_I2C_BASE_ADDR (0x17000 + MX1_IO_BASE_ADDR)
-#define MX1_SSI_BASE_ADDR (0x18000 + MX1_IO_BASE_ADDR)
-#define MX1_CSPI2_BASE_ADDR (0x19000 + MX1_IO_BASE_ADDR)
-#define MX1_MSHC_BASE_ADDR (0x1A000 + MX1_IO_BASE_ADDR)
-#define MX1_CCM_BASE_ADDR (0x1B000 + MX1_IO_BASE_ADDR)
-#define MX1_SCM_BASE_ADDR (0x1B804 + MX1_IO_BASE_ADDR)
-#define MX1_GPIO_BASE_ADDR (0x1C000 + MX1_IO_BASE_ADDR)
-#define MX1_GPIO1_BASE_ADDR (0x1C000 + MX1_IO_BASE_ADDR)
-#define MX1_GPIO2_BASE_ADDR (0x1C100 + MX1_IO_BASE_ADDR)
-#define MX1_GPIO3_BASE_ADDR (0x1C200 + MX1_IO_BASE_ADDR)
-#define MX1_GPIO4_BASE_ADDR (0x1C300 + MX1_IO_BASE_ADDR)
-#define MX1_EIM_BASE_ADDR (0x20000 + MX1_IO_BASE_ADDR)
-#define MX1_SDRAMC_BASE_ADDR (0x21000 + MX1_IO_BASE_ADDR)
-#define MX1_MMA_BASE_ADDR (0x22000 + MX1_IO_BASE_ADDR)
-#define MX1_AVIC_BASE_ADDR (0x23000 + MX1_IO_BASE_ADDR)
-#define MX1_CSI_BASE_ADDR (0x24000 + MX1_IO_BASE_ADDR)
-
-/* macro to get at IO space when running virtually */
-#define MX1_IO_P2V(x) IMX_IO_P2V(x)
-#define MX1_IO_ADDRESS(x) IOMEM(MX1_IO_P2V(x))
-
-/* fixed interrput numbers */
-#include <asm/irq.h>
-#define MX1_INT_SOFTINT (NR_IRQS_LEGACY + 0)
-#define MX1_INT_CSI (NR_IRQS_LEGACY + 6)
-#define MX1_DSPA_MAC_INT (NR_IRQS_LEGACY + 7)
-#define MX1_DSPA_INT (NR_IRQS_LEGACY + 8)
-#define MX1_COMP_INT (NR_IRQS_LEGACY + 9)
-#define MX1_MSHC_XINT (NR_IRQS_LEGACY + 10)
-#define MX1_GPIO_INT_PORTA (NR_IRQS_LEGACY + 11)
-#define MX1_GPIO_INT_PORTB (NR_IRQS_LEGACY + 12)
-#define MX1_GPIO_INT_PORTC (NR_IRQS_LEGACY + 13)
-#define MX1_INT_LCDC (NR_IRQS_LEGACY + 14)
-#define MX1_SIM_INT (NR_IRQS_LEGACY + 15)
-#define MX1_SIM_DATA_INT (NR_IRQS_LEGACY + 16)
-#define MX1_RTC_INT (NR_IRQS_LEGACY + 17)
-#define MX1_RTC_SAMINT (NR_IRQS_LEGACY + 18)
-#define MX1_INT_UART2PFERR (NR_IRQS_LEGACY + 19)
-#define MX1_INT_UART2RTS (NR_IRQS_LEGACY + 20)
-#define MX1_INT_UART2DTR (NR_IRQS_LEGACY + 21)
-#define MX1_INT_UART2UARTC (NR_IRQS_LEGACY + 22)
-#define MX1_INT_UART2TX (NR_IRQS_LEGACY + 23)
-#define MX1_INT_UART2RX (NR_IRQS_LEGACY + 24)
-#define MX1_INT_UART1PFERR (NR_IRQS_LEGACY + 25)
-#define MX1_INT_UART1RTS (NR_IRQS_LEGACY + 26)
-#define MX1_INT_UART1DTR (NR_IRQS_LEGACY + 27)
-#define MX1_INT_UART1UARTC (NR_IRQS_LEGACY + 28)
-#define MX1_INT_UART1TX (NR_IRQS_LEGACY + 29)
-#define MX1_INT_UART1RX (NR_IRQS_LEGACY + 30)
-#define MX1_VOICE_DAC_INT (NR_IRQS_LEGACY + 31)
-#define MX1_VOICE_ADC_INT (NR_IRQS_LEGACY + 32)
-#define MX1_PEN_DATA_INT (NR_IRQS_LEGACY + 33)
-#define MX1_PWM_INT (NR_IRQS_LEGACY + 34)
-#define MX1_SDHC_INT (NR_IRQS_LEGACY + 35)
-#define MX1_INT_I2C (NR_IRQS_LEGACY + 39)
-#define MX1_INT_CSPI2 (NR_IRQS_LEGACY + 40)
-#define MX1_INT_CSPI1 (NR_IRQS_LEGACY + 41)
-#define MX1_SSI_TX_INT (NR_IRQS_LEGACY + 42)
-#define MX1_SSI_TX_ERR_INT (NR_IRQS_LEGACY + 43)
-#define MX1_SSI_RX_INT (NR_IRQS_LEGACY + 44)
-#define MX1_SSI_RX_ERR_INT (NR_IRQS_LEGACY + 45)
-#define MX1_TOUCH_INT (NR_IRQS_LEGACY + 46)
-#define MX1_INT_USBD0 (NR_IRQS_LEGACY + 47)
-#define MX1_INT_USBD1 (NR_IRQS_LEGACY + 48)
-#define MX1_INT_USBD2 (NR_IRQS_LEGACY + 49)
-#define MX1_INT_USBD3 (NR_IRQS_LEGACY + 50)
-#define MX1_INT_USBD4 (NR_IRQS_LEGACY + 51)
-#define MX1_INT_USBD5 (NR_IRQS_LEGACY + 52)
-#define MX1_INT_USBD6 (NR_IRQS_LEGACY + 53)
-#define MX1_BTSYS_INT (NR_IRQS_LEGACY + 55)
-#define MX1_BTTIM_INT (NR_IRQS_LEGACY + 56)
-#define MX1_BTWUI_INT (NR_IRQS_LEGACY + 57)
-#define MX1_TIM2_INT (NR_IRQS_LEGACY + 58)
-#define MX1_TIM1_INT (NR_IRQS_LEGACY + 59)
-#define MX1_DMA_ERR (NR_IRQS_LEGACY + 60)
-#define MX1_DMA_INT (NR_IRQS_LEGACY + 61)
-#define MX1_GPIO_INT_PORTD (NR_IRQS_LEGACY + 62)
-#define MX1_WDT_INT (NR_IRQS_LEGACY + 63)
-
-/* DMA */
-#define MX1_DMA_REQ_UART3_T 2
-#define MX1_DMA_REQ_UART3_R 3
-#define MX1_DMA_REQ_SSI2_T 4
-#define MX1_DMA_REQ_SSI2_R 5
-#define MX1_DMA_REQ_CSI_STAT 6
-#define MX1_DMA_REQ_CSI_R 7
-#define MX1_DMA_REQ_MSHC 8
-#define MX1_DMA_REQ_DSPA_DCT_DOUT 9
-#define MX1_DMA_REQ_DSPA_DCT_DIN 10
-#define MX1_DMA_REQ_DSPA_MAC 11
-#define MX1_DMA_REQ_EXT 12
-#define MX1_DMA_REQ_SDHC 13
-#define MX1_DMA_REQ_SPI1_R 14
-#define MX1_DMA_REQ_SPI1_T 15
-#define MX1_DMA_REQ_SSI_T 16
-#define MX1_DMA_REQ_SSI_R 17
-#define MX1_DMA_REQ_ASP_DAC 18
-#define MX1_DMA_REQ_ASP_ADC 19
-#define MX1_DMA_REQ_USP_EP(x) (20 + (x))
-#define MX1_DMA_REQ_SPI2_R 26
-#define MX1_DMA_REQ_SPI2_T 27
-#define MX1_DMA_REQ_UART2_T 28
-#define MX1_DMA_REQ_UART2_R 29
-#define MX1_DMA_REQ_UART1_T 30
-#define MX1_DMA_REQ_UART1_R 31
-
-/*
- * This doesn't depend on IMX_NEEDS_DEPRECATED_SYMBOLS
- * to not break drivers/usb/gadget/imx_udc. Should go
- * away after this driver uses the new name.
- */
-#define USBD_INT0 MX1_INT_USBD0
-
-#endif /* ifndef __MACH_MX1_H__ */
diff --git a/arch/arm/mach-imx/mx31lilly-db.c b/arch/arm/mach-imx/mx31lilly-db.c
index 649fe49ce85e..231f900a1de7 100644
--- a/arch/arm/mach-imx/mx31lilly-db.c
+++ b/arch/arm/mach-imx/mx31lilly-db.c
@@ -43,18 +43,6 @@
*/
static unsigned int lilly_db_board_pins[] __initdata = {
- MX31_PIN_CTS1__CTS1,
- MX31_PIN_RTS1__RTS1,
- MX31_PIN_TXD1__TXD1,
- MX31_PIN_RXD1__RXD1,
- MX31_PIN_CTS2__CTS2,
- MX31_PIN_RTS2__RTS2,
- MX31_PIN_TXD2__TXD2,
- MX31_PIN_RXD2__RXD2,
- MX31_PIN_CSPI3_MOSI__RXD3,
- MX31_PIN_CSPI3_MISO__TXD3,
- MX31_PIN_CSPI3_SCLK__RTS3,
- MX31_PIN_CSPI3_SPI_RDY__CTS3,
MX31_PIN_SD1_DATA3__SD1_DATA3,
MX31_PIN_SD1_DATA2__SD1_DATA2,
MX31_PIN_SD1_DATA1__SD1_DATA1,
@@ -86,11 +74,6 @@ static unsigned int lilly_db_board_pins[] __initdata = {
MX31_PIN_CONTRAST__CONTRAST,
};
-/* UART */
-static const struct imxuart_platform_data uart_pdata __initconst = {
- .flags = IMXUART_HAVE_RTSCTS,
-};
-
/* MMC support */
static int mxc_mmc1_get_ro(struct device *dev)
@@ -203,9 +186,6 @@ void __init mx31lilly_db_init(void)
mxc_iomux_setup_multiple_pins(lilly_db_board_pins,
ARRAY_SIZE(lilly_db_board_pins),
"development board pins");
- imx31_add_imx_uart0(&uart_pdata);
- imx31_add_imx_uart1(&uart_pdata);
- imx31_add_imx_uart2(&uart_pdata);
imx31_add_mxc_mmc(0, &mmc_pdata);
mx31lilly_init_fb();
}
diff --git a/arch/arm/mach-imx/mx31lite-db.c b/arch/arm/mach-imx/mx31lite-db.c
index 5a160b7e4fce..c66a006bf2fd 100644
--- a/arch/arm/mach-imx/mx31lite-db.c
+++ b/arch/arm/mach-imx/mx31lite-db.c
@@ -45,19 +45,6 @@
*/
static unsigned int litekit_db_board_pins[] __initdata = {
- /* UART1 */
- MX31_PIN_CTS1__CTS1,
- MX31_PIN_RTS1__RTS1,
- MX31_PIN_TXD1__TXD1,
- MX31_PIN_RXD1__RXD1,
- /* SPI 0 */
- MX31_PIN_CSPI1_SCLK__SCLK,
- MX31_PIN_CSPI1_MOSI__MOSI,
- MX31_PIN_CSPI1_MISO__MISO,
- MX31_PIN_CSPI1_SPI_RDY__SPI_RDY,
- MX31_PIN_CSPI1_SS0__SS0,
- MX31_PIN_CSPI1_SS1__SS1,
- MX31_PIN_CSPI1_SS2__SS2,
/* SDHC1 */
MX31_PIN_SD1_DATA0__SD1_DATA0,
MX31_PIN_SD1_DATA1__SD1_DATA1,
@@ -67,11 +54,6 @@ static unsigned int litekit_db_board_pins[] __initdata = {
MX31_PIN_SD1_CMD__SD1_CMD,
};
-/* UART */
-static const struct imxuart_platform_data uart_pdata __initconst = {
- .flags = IMXUART_HAVE_RTSCTS,
-};
-
/* MMC */
static int gpio_det, gpio_wp;
@@ -146,19 +128,6 @@ static const struct imxmmc_platform_data mmc_pdata __initconst = {
.exit = mxc_mmc1_exit,
};
-/* SPI */
-
-static int spi_internal_chipselect[] = {
- MXC_SPI_CS(0),
- MXC_SPI_CS(1),
- MXC_SPI_CS(2),
-};
-
-static const struct spi_imx_master spi0_pdata __initconst = {
- .chipselect = spi_internal_chipselect,
- .num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
-};
-
/* GPIO LEDs */
static const struct gpio_led litekit_leds[] __initconst = {
@@ -187,9 +156,7 @@ void __init mx31lite_db_init(void)
mxc_iomux_setup_multiple_pins(litekit_db_board_pins,
ARRAY_SIZE(litekit_db_board_pins),
"development board pins");
- imx31_add_imx_uart0(&uart_pdata);
imx31_add_mxc_mmc(0, &mmc_pdata);
- imx31_add_spi_imx0(&spi0_pdata);
gpio_led_register_device(-1, &litekit_led_platform_data);
imx31_add_imx2_wdt();
imx31_add_mxc_rtc();
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index fe708e26d021..1515e498d348 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -217,7 +217,7 @@ struct imx6_cpu_pm_info {
u32 mmdc_io_val[MX6_MAX_MMDC_IO_NUM][2]; /* To save offset and value */
} __aligned(8);
-void imx6q_set_int_mem_clk_lpm(bool enable)
+void imx6_set_int_mem_clk_lpm(bool enable)
{
u32 val = readl_relaxed(ccm_base + CGPR);
@@ -367,7 +367,7 @@ static int imx6q_pm_enter(suspend_state_t state)
switch (state) {
case PM_SUSPEND_STANDBY:
imx6_set_lpm(STOP_POWER_ON);
- imx6q_set_int_mem_clk_lpm(true);
+ imx6_set_int_mem_clk_lpm(true);
imx_gpc_pre_suspend(false);
if (cpu_is_imx6sl())
imx6sl_set_wait_clk(true);
@@ -380,7 +380,7 @@ static int imx6q_pm_enter(suspend_state_t state)
break;
case PM_SUSPEND_MEM:
imx6_set_lpm(STOP_POWER_OFF);
- imx6q_set_int_mem_clk_lpm(false);
+ imx6_set_int_mem_clk_lpm(false);
imx6q_enable_wb(true);
/*
* For suspend into ocram, asm code already take care of
@@ -398,7 +398,7 @@ static int imx6q_pm_enter(suspend_state_t state)
imx_gpc_post_resume();
imx6_enable_rbc(false);
imx6q_enable_wb(false);
- imx6q_set_int_mem_clk_lpm(true);
+ imx6_set_int_mem_clk_lpm(true);
imx6_set_lpm(WAIT_CLOCKED);
break;
default:
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index 599f973e10d8..cefe44f6889b 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -21,7 +21,6 @@ if ARCH_INTEGRATOR
config ARCH_INTEGRATOR_AP
bool "Support Integrator/AP and Integrator/PP2 platforms"
select INTEGRATOR_AP_TIMER
- select MIGHT_HAVE_PCI
select SERIAL_AMBA_PL010 if TTY
select SERIAL_AMBA_PL010_CONSOLE if TTY
select SOC_BUS
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index c7bb83205f5b..23b98fd414bf 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -17,33 +17,19 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/string.h>
#include <linux/syscore_ops.h>
#include <linux/amba/bus.h>
-#include <linux/amba/kmi.h>
#include <linux/io.h>
#include <linux/irqchip.h>
-#include <linux/platform_data/clk-integrator.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <linux/stat.h>
#include <linux/termios.h>
-#include <asm/setup.h>
-#include <asm/param.h> /* HZ */
-#include <asm/mach-types.h>
-
#include <asm/mach/arch.h>
-#include <asm/mach/irq.h>
#include <asm/mach/map.h>
-#include <asm/mach/time.h>
#include "hardware.h"
#include "cm.h"
@@ -68,14 +54,8 @@ static void __iomem *ebi_base;
/*
* Logical Physical
- * ef000000 Cache flush
- * f1100000 11000000 System controller registers
- * f1300000 13000000 Counter/Timer
* f1400000 14000000 Interrupt controller
* f1600000 16000000 UART 0
- * f1700000 17000000 UART 1
- * f1a00000 1a000000 Debug LEDs
- * f1b00000 1b000000 GPIO
*/
static struct map_desc ap_io_desc[] __initdata __maybe_unused = {
@@ -89,16 +69,6 @@ static struct map_desc ap_io_desc[] __initdata __maybe_unused = {
.pfn = __phys_to_pfn(INTEGRATOR_UART0_BASE),
.length = SZ_4K,
.type = MT_DEVICE
- }, {
- .virtual = IO_ADDRESS(INTEGRATOR_DBG_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_DBG_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE
- }, {
- .virtual = IO_ADDRESS(INTEGRATOR_AP_GPIO_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_AP_GPIO_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE
}
};
@@ -196,16 +166,10 @@ static void __init ap_init_irq_of(void)
/* For the Device Tree, add in the UART callbacks as AUXDATA */
static struct of_dev_auxdata ap_auxdata_lookup[] __initdata = {
- OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_RTC_BASE,
- "rtc", NULL),
OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_UART0_BASE,
"uart0", &ap_uart_data),
OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_UART1_BASE,
"uart1", &ap_uart_data),
- OF_DEV_AUXDATA("arm,primecell", KMI0_BASE,
- "kmi0", NULL),
- OF_DEV_AUXDATA("arm,primecell", KMI1_BASE,
- "kmi1", NULL),
{ /* sentinel */ },
};
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 825298349bf5..772a7cf2010e 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -7,67 +7,40 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
-#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/string.h>
-#include <linux/device.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/kmi.h>
-#include <linux/amba/clcd.h>
-#include <linux/platform_data/video-clcd-versatile.h>
#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <linux/irqchip.h>
-#include <linux/gfp.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/sched_clock.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/irq.h>
#include <asm/mach/map.h>
-#include <asm/mach/time.h>
#include "hardware.h"
#include "cm.h"
#include "common.h"
+/* Base address to the core module header */
+static struct regmap *cm_map;
/* Base address to the CP controller */
static void __iomem *intcp_con_base;
-#define INTCP_PA_CLCD_BASE 0xc0000000
+#define CM_COUNTER_OFFSET 0x28
/*
* Logical Physical
- * f1000000 10000000 Core module registers
- * f1300000 13000000 Counter/Timer
* f1400000 14000000 Interrupt controller
* f1600000 16000000 UART 0
- * f1700000 17000000 UART 1
- * f1a00000 1a000000 Debug LEDs
- * fc900000 c9000000 GPIO
* fca00000 ca000000 SIC
*/
static struct map_desc intcp_io_desc[] __initdata __maybe_unused = {
{
- .virtual = IO_ADDRESS(INTEGRATOR_HDR_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_HDR_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE
- }, {
- .virtual = IO_ADDRESS(INTEGRATOR_CT_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_CT_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE
- }, {
.virtual = IO_ADDRESS(INTEGRATOR_IC_BASE),
.pfn = __phys_to_pfn(INTEGRATOR_IC_BASE),
.length = SZ_4K,
@@ -78,16 +51,6 @@ static struct map_desc intcp_io_desc[] __initdata __maybe_unused = {
.length = SZ_4K,
.type = MT_DEVICE
}, {
- .virtual = IO_ADDRESS(INTEGRATOR_DBG_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_DBG_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE
- }, {
- .virtual = IO_ADDRESS(INTEGRATOR_CP_GPIO_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_CP_GPIO_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE
- }, {
.virtual = IO_ADDRESS(INTEGRATOR_CP_SIC_BASE),
.pfn = __phys_to_pfn(INTEGRATOR_CP_SIC_BASE),
.length = SZ_4K,
@@ -121,66 +84,20 @@ static struct mmci_platform_data mmc_data = {
.gpio_cd = -1,
};
-/*
- * CLCD support
- */
-/*
- * Ensure VGA is selected.
- */
-static void cp_clcd_enable(struct clcd_fb *fb)
-{
- struct fb_var_screeninfo *var = &fb->fb.var;
- u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2
- | CM_CTRL_LCDEN0 | CM_CTRL_LCDEN1;
-
- if (var->bits_per_pixel <= 8 ||
- (var->bits_per_pixel == 16 && var->green.length == 5))
- /* Pseudocolor, RGB555, BGR555 */
- val |= CM_CTRL_LCDMUXSEL_VGA555_TFT555;
- else if (fb->fb.var.bits_per_pixel <= 16)
- /* truecolor RGB565 */
- val |= CM_CTRL_LCDMUXSEL_VGA565_TFT555;
- else
- val = 0; /* no idea for this, don't trust the docs */
-
- cm_control(CM_CTRL_LCDMUXSEL_MASK|
- CM_CTRL_LCDEN0|
- CM_CTRL_LCDEN1|
- CM_CTRL_STATIC1|
- CM_CTRL_STATIC2|
- CM_CTRL_STATIC|
- CM_CTRL_n24BITEN, val);
-}
-
-static int cp_clcd_setup(struct clcd_fb *fb)
-{
- fb->panel = versatile_clcd_get_panel("VGA");
- if (!fb->panel)
- return -EINVAL;
-
- return versatile_clcd_setup_dma(fb, SZ_1M);
-}
-
-static struct clcd_board clcd_data = {
- .name = "Integrator/CP",
- .caps = CLCD_CAP_5551 | CLCD_CAP_RGB565 | CLCD_CAP_888,
- .check = clcdfb_check,
- .decode = clcdfb_decode,
- .enable = cp_clcd_enable,
- .setup = cp_clcd_setup,
- .mmap = versatile_clcd_mmap_dma,
- .remove = versatile_clcd_remove_dma,
-};
-
-#define REFCOUNTER (__io_address(INTEGRATOR_HDR_BASE) + 0x28)
-
static u64 notrace intcp_read_sched_clock(void)
{
- return readl(REFCOUNTER);
+ unsigned int val;
+
+ /* MMIO so discard return code */
+ regmap_read(cm_map, CM_COUNTER_OFFSET, &val);
+ return val;
}
static void __init intcp_init_early(void)
{
+ cm_map = syscon_regmap_lookup_by_compatible("arm,core-module-integrator");
+ if (IS_ERR(cm_map))
+ return;
sched_clock_register(intcp_read_sched_clock, 32, 24000000);
}
@@ -195,22 +112,8 @@ static void __init intcp_init_irq_of(void)
* and enforce the bus names since these are used for clock lookups.
*/
static struct of_dev_auxdata intcp_auxdata_lookup[] __initdata = {
- OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_RTC_BASE,
- "rtc", NULL),
- OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_UART0_BASE,
- "uart0", NULL),
- OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_UART1_BASE,
- "uart1", NULL),
- OF_DEV_AUXDATA("arm,primecell", KMI0_BASE,
- "kmi0", NULL),
- OF_DEV_AUXDATA("arm,primecell", KMI1_BASE,
- "kmi1", NULL),
OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_CP_MMC_BASE,
"mmci", &mmc_data),
- OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_CP_AACI_BASE,
- "aaci", &mmc_data),
- OF_DEV_AUXDATA("arm,primecell", INTCP_PA_CLCD_BASE,
- "clcd", &clcd_data),
{ /* sentinel */ },
};
diff --git a/arch/arm/mach-keystone/Kconfig b/arch/arm/mach-keystone/Kconfig
index 8ff61be1a29f..24bd64dabdfc 100644
--- a/arch/arm/mach-keystone/Kconfig
+++ b/arch/arm/mach-keystone/Kconfig
@@ -8,8 +8,6 @@ config ARCH_KEYSTONE
select COMMON_CLK_KEYSTONE
select ARCH_SUPPORTS_BIG_ENDIAN
select ZONE_DMA if ARM_LPAE
- select MIGHT_HAVE_PCI
- select PCI_DOMAINS if PCI
select PINCTRL
help
Support for boards based on the Texas Instruments Keystone family of
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index 6af5430d0d97..f72e1e9f5fc5 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -219,7 +219,6 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
- NO_IRQ,
MV643XX_TX_CSUM_DEFAULT_LIMIT);
}
@@ -242,9 +241,7 @@ void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
eth_data->duplex = DUPLEX_FULL;
}
- orion_ge10_init(eth_data,
- GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM,
- NO_IRQ);
+ orion_ge10_init(eth_data, GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM);
}
@@ -266,9 +263,7 @@ void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data)
eth_data->duplex = DUPLEX_FULL;
}
- orion_ge11_init(eth_data,
- GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM,
- NO_IRQ);
+ orion_ge11_init(eth_data, GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM);
}
/*****************************************************************************
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 0b7fe74ff46d..e4f21086b42b 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -268,80 +268,6 @@ static void __init apx4devkit_init(void)
apx4devkit_phy_fixup);
}
-#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0)
-#define ENET0_MDIO__GPIO_4_1 MXS_GPIO_NR(4, 1)
-#define ENET0_RX_EN__GPIO_4_2 MXS_GPIO_NR(4, 2)
-#define ENET0_RXD0__GPIO_4_3 MXS_GPIO_NR(4, 3)
-#define ENET0_RXD1__GPIO_4_4 MXS_GPIO_NR(4, 4)
-#define ENET0_TX_EN__GPIO_4_6 MXS_GPIO_NR(4, 6)
-#define ENET0_TXD0__GPIO_4_7 MXS_GPIO_NR(4, 7)
-#define ENET0_TXD1__GPIO_4_8 MXS_GPIO_NR(4, 8)
-#define ENET_CLK__GPIO_4_16 MXS_GPIO_NR(4, 16)
-
-#define TX28_FEC_PHY_POWER MXS_GPIO_NR(3, 29)
-#define TX28_FEC_PHY_RESET MXS_GPIO_NR(4, 13)
-#define TX28_FEC_nINT MXS_GPIO_NR(4, 5)
-
-static const struct gpio const tx28_gpios[] __initconst = {
- { ENET0_MDC__GPIO_4_0, GPIOF_OUT_INIT_LOW, "GPIO_4_0" },
- { ENET0_MDIO__GPIO_4_1, GPIOF_OUT_INIT_LOW, "GPIO_4_1" },
- { ENET0_RX_EN__GPIO_4_2, GPIOF_OUT_INIT_LOW, "GPIO_4_2" },
- { ENET0_RXD0__GPIO_4_3, GPIOF_OUT_INIT_LOW, "GPIO_4_3" },
- { ENET0_RXD1__GPIO_4_4, GPIOF_OUT_INIT_LOW, "GPIO_4_4" },
- { ENET0_TX_EN__GPIO_4_6, GPIOF_OUT_INIT_LOW, "GPIO_4_6" },
- { ENET0_TXD0__GPIO_4_7, GPIOF_OUT_INIT_LOW, "GPIO_4_7" },
- { ENET0_TXD1__GPIO_4_8, GPIOF_OUT_INIT_LOW, "GPIO_4_8" },
- { ENET_CLK__GPIO_4_16, GPIOF_OUT_INIT_LOW, "GPIO_4_16" },
- { TX28_FEC_PHY_POWER, GPIOF_OUT_INIT_LOW, "fec-phy-power" },
- { TX28_FEC_PHY_RESET, GPIOF_OUT_INIT_LOW, "fec-phy-reset" },
- { TX28_FEC_nINT, GPIOF_DIR_IN, "fec-int" },
-};
-
-static void __init tx28_post_init(void)
-{
- struct device_node *np;
- struct platform_device *pdev;
- struct pinctrl *pctl;
- int ret;
-
- enable_clk_enet_out();
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx28-fec");
- pdev = of_find_device_by_node(np);
- if (!pdev) {
- pr_err("%s: failed to find fec device\n", __func__);
- return;
- }
-
- pctl = pinctrl_get_select(&pdev->dev, "gpio_mode");
- if (IS_ERR(pctl)) {
- pr_err("%s: failed to get pinctrl state\n", __func__);
- return;
- }
-
- ret = gpio_request_array(tx28_gpios, ARRAY_SIZE(tx28_gpios));
- if (ret) {
- pr_err("%s: failed to request gpios: %d\n", __func__, ret);
- return;
- }
-
- /* Power up fec phy */
- gpio_set_value(TX28_FEC_PHY_POWER, 1);
- msleep(26); /* 25ms according to data sheet */
-
- /* Mode strap pins */
- gpio_set_value(ENET0_RX_EN__GPIO_4_2, 1);
- gpio_set_value(ENET0_RXD0__GPIO_4_3, 1);
- gpio_set_value(ENET0_RXD1__GPIO_4_4, 1);
-
- udelay(100); /* minimum assertion time for nRST */
-
- /* Deasserting FEC PHY RESET */
- gpio_set_value(TX28_FEC_PHY_RESET, 1);
-
- pinctrl_put(pctl);
-}
-
static void __init crystalfontz_init(void)
{
update_fec_mac_prop(OUI_CRYSTALFONTZ);
@@ -501,9 +427,6 @@ static void __init mxs_machine_init(void)
of_platform_default_populate(NULL, NULL, parent);
mxs_restart_init();
-
- if (of_machine_is_compatible("karo,tx28"))
- tx28_post_init();
}
#define MXS_CLKCTRL_RESET_CHIP (1 << 1)
diff --git a/arch/arm/mach-nomadik/Kconfig b/arch/arm/mach-nomadik/Kconfig
index b7e9801fdaa4..3ae45b8d7b0a 100644
--- a/arch/arm/mach-nomadik/Kconfig
+++ b/arch/arm/mach-nomadik/Kconfig
@@ -7,6 +7,7 @@ menuconfig ARCH_NOMADIK
select CLKSRC_NOMADIK_MTU_SCHED_CLOCK
select CPU_ARM926T
select GPIOLIB
+ select MFD_SYSCON
select MIGHT_HAVE_CACHE_L2X0
select PINCTRL
select PINCTRL_NOMADIK
diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c
index 7119ef28e0ad..357be2debc9d 100644
--- a/arch/arm/mach-omap1/board-h2-mmc.c
+++ b/arch/arm/mach-omap1/board-h2-mmc.c
@@ -19,7 +19,7 @@
#include "board-h2.h"
#include "mmc.h"
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_MMC_OMAP)
static int mmc_set_power(struct device *dev, int slot, int power_on,
int vdd)
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index cd146ed0538d..675254ee4b1e 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -349,7 +349,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
#if IS_ENABLED(CONFIG_USB_OMAP)
.hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
/* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
-#elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+#elif IS_ENABLED(CONFIG_USB_OHCI_HCD)
/* needs OTG cable, or NONSTANDARD (B-to-MiniB) */
.hmc_mode = 20, /* 1:dev|otg(off) 1:host 2:disabled */
#endif
diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c
index 43aab63cbc39..4f58bfa5e754 100644
--- a/arch/arm/mach-omap1/board-h3-mmc.c
+++ b/arch/arm/mach-omap1/board-h3-mmc.c
@@ -20,7 +20,7 @@
#include "board-h3.h"
#include "mmc.h"
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_MMC_OMAP)
static int mmc_set_power(struct device *dev, int slot, int power_on,
int vdd)
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index f7c8c63dd532..e62f9d454f10 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -368,7 +368,7 @@ static struct omap_usb_config h3_usb_config __initdata = {
#if IS_ENABLED(CONFIG_USB_OMAP)
.hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
-#elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+#elif IS_ENABLED(CONFIG_USB_OHCI_HCD)
/* NONSTANDARD CABLE NEEDED (B-to-Mini-B) */
.hmc_mode = 20, /* 1:dev|otg(off) 1:host 2:disabled */
#endif
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 9525ef9bc6c0..e424df901dbd 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -401,7 +401,7 @@ static struct platform_device lcd_device = {
};
/* MMC Card */
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_MMC_OMAP)
static struct omap_mmc_platform_data htc_mmc1_data = {
.nr_slots = 1,
.switch_slot = NULL,
@@ -586,7 +586,7 @@ static void __init htcherald_init(void)
omap_register_i2c_bus(1, 100, NULL, 0);
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_MMC_OMAP)
htc_mmc_data[0] = &htc_mmc1_data;
omap1_init_mmc(htc_mmc_data, 1);
#endif
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index ae90bd02b3bf..67e188271643 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -315,7 +315,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
#if IS_ENABLED(CONFIG_USB_OMAP)
.hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
/* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
-#elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+#elif IS_ENABLED(CONFIG_USB_OHCI_HCD)
/* NONSTANDARD CABLE NEEDED (B-to-Mini-B) */
.hmc_mode = 20, /* 1:dev|otg(off) 1:host 2:disabled */
#endif
@@ -328,7 +328,7 @@ static struct omap_lcd_config innovator1610_lcd_config __initdata = {
};
#endif
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_MMC_OMAP)
static int mmc_set_power(struct device *dev, int slot, int power_on,
int vdd)
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index dd3a3ad797ea..ee8d9f553db4 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -159,7 +159,7 @@ static struct omap_usb_config nokia770_usb_config __initdata = {
.extcon = "tahvo-usb",
};
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_MMC_OMAP)
#define NOKIA770_GPIO_MMC_POWER 41
#define NOKIA770_GPIO_MMC_SWITCH 23
@@ -216,7 +216,7 @@ static inline void nokia770_mmc_init(void)
}
#endif
-#if defined(CONFIG_I2C_CBUS_GPIO) || defined(CONFIG_I2C_CBUS_GPIO_MODULE)
+#if IS_ENABLED(CONFIG_I2C_CBUS_GPIO)
static struct i2c_cbus_platform_data nokia770_cbus_data = {
.clk_gpio = OMAP_MPUIO(9),
.dat_gpio = OMAP_MPUIO(10),
diff --git a/arch/arm/mach-omap1/board-sx1-mmc.c b/arch/arm/mach-omap1/board-sx1-mmc.c
index a9373570bbb1..79f0af8bfae0 100644
--- a/arch/arm/mach-omap1/board-sx1-mmc.c
+++ b/arch/arm/mach-omap1/board-sx1-mmc.c
@@ -20,7 +20,7 @@
#include "mmc.h"
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_MMC_OMAP)
static int mmc_set_power(struct device *dev, int slot, int power_on,
int vdd)
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index 8c8be861fff2..baaf902b7016 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -33,7 +33,7 @@
#include "mmc.h"
#include "sram.h"
-#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_RTC_DRV_OMAP)
#define OMAP_RTC_BASE 0xfffb4800
@@ -72,7 +72,7 @@ static inline void omap_init_mbox(void) { }
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_MMC_OMAP)
static inline void omap1_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
int controller_nr)
@@ -230,7 +230,7 @@ void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
/*-------------------------------------------------------------------------*/
/* OMAP7xx SPI support */
-#if defined(CONFIG_SPI_OMAP_100K) || defined(CONFIG_SPI_OMAP_100K_MODULE)
+#if IS_ENABLED(CONFIG_SPI_OMAP_100K)
struct platform_device omap_spi1 = {
.name = "omap1_spi100k",
@@ -312,7 +312,7 @@ static inline void omap_init_sti(void) {}
* mcbsp1..3 = 5..7
*/
-#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
+#if IS_ENABLED(CONFIG_SPI_OMAP_UWIRE)
#define OMAP_UWIRE_BASE 0xfffb3000
@@ -418,7 +418,7 @@ static int __init omap1_init_devices(void)
}
arch_initcall(omap1_init_devices);
-#if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
+#if IS_ENABLED(CONFIG_OMAP_WATCHDOG)
static struct resource wdt_resources[] = {
{
diff --git a/arch/arm/mach-omap1/fb.c b/arch/arm/mach-omap1/fb.c
index c770d45c7226..ddab04087b7a 100644
--- a/arch/arm/mach-omap1/fb.c
+++ b/arch/arm/mach-omap1/fb.c
@@ -33,7 +33,7 @@
#include <asm/mach/map.h>
-#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_FB_OMAP)
static bool omapfb_lcd_configured;
static struct omapfb_platform_data omapfb_config;
diff --git a/arch/arm/mach-omap1/include/mach/usb.h b/arch/arm/mach-omap1/include/mach/usb.h
index 2c263051dc51..a7c5559caef2 100644
--- a/arch/arm/mach-omap1/include/mach/usb.h
+++ b/arch/arm/mach-omap1/include/mach/usb.h
@@ -12,7 +12,7 @@
void omap_otg_init(struct omap_usb_config *config);
-#if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE)
+#if IS_ENABLED(CONFIG_USB)
void omap1_usb_init(struct omap_usb_config *pdata);
#else
static inline void omap1_usb_init(struct omap_usb_config *pdata)
diff --git a/arch/arm/mach-omap1/mmc.h b/arch/arm/mach-omap1/mmc.h
index 39c2b13de884..d7b46880e4ca 100644
--- a/arch/arm/mach-omap1/mmc.h
+++ b/arch/arm/mach-omap1/mmc.h
@@ -7,7 +7,7 @@
#define OMAP1_MMC1_BASE 0xfffb7800
#define OMAP1_MMC2_BASE 0xfffb7c00 /* omap16xx only */
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_MMC_OMAP)
void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
int nr_controllers);
#else
diff --git a/arch/arm/mach-omap1/usb.c b/arch/arm/mach-omap1/usb.c
index 4118db50d5e8..2506e598a067 100644
--- a/arch/arm/mach-omap1/usb.c
+++ b/arch/arm/mach-omap1/usb.c
@@ -136,7 +136,7 @@ omap_otg_init(struct omap_usb_config *config)
}
#endif
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
if (config->otg || config->register_host) {
struct platform_device *ohci_device = config->ohci_device;
int status;
@@ -221,7 +221,7 @@ static inline void udc_device_init(struct omap_usb_config *pdata)
#endif
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
/* The dmamask must be set for OHCI to work */
static u64 ohci_dmamask = ~(u32)0;
@@ -612,7 +612,7 @@ static void __init omap_1510_usb_init(struct omap_usb_config *config)
}
#endif
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
if (config->register_host) {
int status;
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 5a0b380a8166..a9afeebd59f2 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -192,12 +192,6 @@ config MACH_OMAP2_TUSB6010
depends on ARCH_OMAP2 && SOC_OMAP2420
default y if MACH_NOKIA_N8X0
-config MACH_OMAP_LDP
- bool "OMAP3 LDP board"
- depends on ARCH_OMAP3
- default y
- select OMAP_PACKAGE_CBB
-
config MACH_OMAP3517EVM
bool "OMAP3517/ AM3517 EVM board"
depends on ARCH_OMAP3
@@ -222,12 +216,6 @@ config MACH_NOKIA_N8X0
select MACH_NOKIA_N810
select MACH_NOKIA_N810_WIMAX
-config MACH_NOKIA_RX51
- bool "Nokia N900 (RX-51) phone"
- depends on ARCH_OMAP3
- default y
- select OMAP_PACKAGE_CBB
-
config OMAP3_SDRC_AC_TIMING
bool "Enable SDRC AC timing register changes"
depends on ARCH_OMAP3
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index a7f2d051f524..5b37ec29996e 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -231,11 +231,7 @@ obj-$(CONFIG_SOC_OMAP2420) += msdi.o
# Specific board support
obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o pdata-quirks.o
-obj-$(CONFIG_MACH_OMAP_LDP) += board-ldp.o
obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
-obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o sdram-nokia.o
-obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-peripherals.o
-obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-video.o
# Platform specific device init code
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index 70b21cc279ba..2188dc30e232 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -81,8 +81,7 @@ __init board_nor_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
pr_err("Unable to register NOR device\n");
}
-#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
- defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
+#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
static struct omap_onenand_platform_data board_onenand_data = {
.dma_channel = -1, /* disable DMA in OMAP OneNAND driver */
};
@@ -97,10 +96,9 @@ __init board_onenand_init(struct mtd_partition *onenand_parts,
gpmc_onenand_init(&board_onenand_data);
}
-#endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
+#endif /* IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) */
-#if defined(CONFIG_MTD_NAND_OMAP2) || \
- defined(CONFIG_MTD_NAND_OMAP2_MODULE)
+#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
/* Note that all values in this struct are in nanoseconds */
struct gpmc_timings nand_default_timings[1] = {
@@ -144,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_SW;
gpmc_nand_init(&board_nand_data, gpmc_t);
}
-#endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
+#endif /* IS_ENABLED(CONFIG_MTD_NAND_OMAP2) */
/**
* get_gpmc0_type - Reads the FPGA DIP_SWITCH_INPUT_REGISTER2 to get
diff --git a/arch/arm/mach-omap2/board-flash.h b/arch/arm/mach-omap2/board-flash.h
index ea9aaebe11e7..8b39eec07318 100644
--- a/arch/arm/mach-omap2/board-flash.h
+++ b/arch/arm/mach-omap2/board-flash.h
@@ -23,10 +23,7 @@ struct flash_partitions {
int nr_parts;
};
-#if defined(CONFIG_MTD_NAND_OMAP2) || \
- defined(CONFIG_MTD_NAND_OMAP2_MODULE) || \
- defined(CONFIG_MTD_ONENAND_OMAP2) || \
- defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
+#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2) || IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
extern void board_flash_init(struct flash_partitions [],
char chip_sel[][GPMC_CS_NUM], int nand_type);
#else
@@ -36,8 +33,7 @@ static inline void board_flash_init(struct flash_partitions part[],
}
#endif
-#if defined(CONFIG_MTD_NAND_OMAP2) || \
- defined(CONFIG_MTD_NAND_OMAP2_MODULE)
+#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
extern void board_nand_init(struct mtd_partition *nand_parts,
u8 nr_parts, u8 cs, int nand_type, struct gpmc_timings *gpmc_t);
extern struct gpmc_timings nand_default_timings[];
@@ -49,8 +45,7 @@ static inline void board_nand_init(struct mtd_partition *nand_parts,
#define nand_default_timings NULL
#endif
-#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
- defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
+#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
extern void board_onenand_init(struct mtd_partition *nand_parts,
u8 nr_parts, u8 cs);
#else
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
deleted file mode 100644
index 390795b334c3..000000000000
--- a/arch/arm/mach-omap2/board-ldp.c
+++ /dev/null
@@ -1,430 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/board-ldp.c
- *
- * Copyright (C) 2008 Texas Instruments Inc.
- * Nishant Kamat <nskamat@ti.com>
- *
- * Modified from mach-omap2/board-3430sdp.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/gpio.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/gpio_keys.h>
-#include <linux/workqueue.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/spi/spi.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-#include <linux/i2c/twl.h>
-#include <linux/io.h>
-#include <linux/smsc911x.h>
-#include <linux/mmc/host.h>
-#include <linux/usb/phy.h>
-#include <linux/platform_data/spi-omap2-mcspi.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include "gpmc.h"
-#include "gpmc-smsc911x.h"
-
-#include <linux/platform_data/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#include "board-flash.h"
-#include "mux.h"
-#include "hsmmc.h"
-#include "control.h"
-#include "common-board-devices.h"
-#include "display.h"
-
-#define LDP_SMSC911X_CS 1
-#define LDP_SMSC911X_GPIO 152
-#define DEBUG_BASE 0x08000000
-#define LDP_ETHR_START DEBUG_BASE
-
-static uint32_t board_keymap[] = {
- KEY(0, 0, KEY_1),
- KEY(1, 0, KEY_2),
- KEY(2, 0, KEY_3),
- KEY(0, 1, KEY_4),
- KEY(1, 1, KEY_5),
- KEY(2, 1, KEY_6),
- KEY(3, 1, KEY_F5),
- KEY(0, 2, KEY_7),
- KEY(1, 2, KEY_8),
- KEY(2, 2, KEY_9),
- KEY(3, 2, KEY_F6),
- KEY(0, 3, KEY_F7),
- KEY(1, 3, KEY_0),
- KEY(2, 3, KEY_F8),
- PERSISTENT_KEY(4, 5),
- KEY(4, 4, KEY_VOLUMEUP),
- KEY(5, 5, KEY_VOLUMEDOWN),
- 0
-};
-
-static struct matrix_keymap_data board_map_data = {
- .keymap = board_keymap,
- .keymap_size = ARRAY_SIZE(board_keymap),
-};
-
-static struct twl4030_keypad_data ldp_kp_twl4030_data = {
- .keymap_data = &board_map_data,
- .rows = 6,
- .cols = 6,
- .rep = 1,
-};
-
-static struct gpio_keys_button ldp_gpio_keys_buttons[] = {
- [0] = {
- .code = KEY_ENTER,
- .gpio = 101,
- .desc = "enter sw",
- .active_low = 1,
- .debounce_interval = 30,
- },
- [1] = {
- .code = KEY_F1,
- .gpio = 102,
- .desc = "func 1",
- .active_low = 1,
- .debounce_interval = 30,
- },
- [2] = {
- .code = KEY_F2,
- .gpio = 103,
- .desc = "func 2",
- .active_low = 1,
- .debounce_interval = 30,
- },
- [3] = {
- .code = KEY_F3,
- .gpio = 104,
- .desc = "func 3",
- .active_low = 1,
- .debounce_interval = 30,
- },
- [4] = {
- .code = KEY_F4,
- .gpio = 105,
- .desc = "func 4",
- .active_low = 1,
- .debounce_interval = 30,
- },
- [5] = {
- .code = KEY_LEFT,
- .gpio = 106,
- .desc = "left sw",
- .active_low = 1,
- .debounce_interval = 30,
- },
- [6] = {
- .code = KEY_RIGHT,
- .gpio = 107,
- .desc = "right sw",
- .active_low = 1,
- .debounce_interval = 30,
- },
- [7] = {
- .code = KEY_UP,
- .gpio = 108,
- .desc = "up sw",
- .active_low = 1,
- .debounce_interval = 30,
- },
- [8] = {
- .code = KEY_DOWN,
- .gpio = 109,
- .desc = "down sw",
- .active_low = 1,
- .debounce_interval = 30,
- },
-};
-
-static struct gpio_keys_platform_data ldp_gpio_keys = {
- .buttons = ldp_gpio_keys_buttons,
- .nbuttons = ARRAY_SIZE(ldp_gpio_keys_buttons),
- .rep = 1,
-};
-
-static struct platform_device ldp_gpio_keys_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &ldp_gpio_keys,
- },
-};
-
-static struct omap_smsc911x_platform_data smsc911x_cfg = {
- .cs = LDP_SMSC911X_CS,
- .gpio_irq = LDP_SMSC911X_GPIO,
- .gpio_reset = -EINVAL,
- .flags = SMSC911X_USE_32BIT,
-};
-
-static inline void __init ldp_init_smsc911x(void)
-{
- gpmc_smsc911x_init(&smsc911x_cfg);
-}
-
-/* LCD */
-
-#define LCD_PANEL_RESET_GPIO 55
-#define LCD_PANEL_QVGA_GPIO 56
-
-static const struct display_timing ldp_lcd_videomode = {
- .pixelclock = { 0, 5400000, 0 },
-
- .hactive = { 0, 240, 0 },
- .hfront_porch = { 0, 3, 0 },
- .hback_porch = { 0, 39, 0 },
- .hsync_len = { 0, 3, 0 },
-
- .vactive = { 0, 320, 0 },
- .vfront_porch = { 0, 2, 0 },
- .vback_porch = { 0, 7, 0 },
- .vsync_len = { 0, 1, 0 },
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
-};
-
-static struct panel_dpi_platform_data ldp_lcd_pdata = {
- .name = "lcd",
- .source = "dpi.0",
-
- .data_lines = 18,
-
- .display_timing = &ldp_lcd_videomode,
-
- .enable_gpio = -1, /* filled in code */
- .backlight_gpio = -1, /* filled in code */
-};
-
-static struct platform_device ldp_lcd_device = {
- .name = "panel-dpi",
- .id = 0,
- .dev.platform_data = &ldp_lcd_pdata,
-};
-
-static struct omap_dss_board_info ldp_dss_data = {
- .default_display_name = "lcd",
-};
-
-static void __init ldp_display_init(void)
-{
- int r;
-
- static struct gpio gpios[] __initdata = {
- {LCD_PANEL_RESET_GPIO, GPIOF_OUT_INIT_HIGH, "LCD RESET"},
- {LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "LCD QVGA"},
- };
-
- r = gpio_request_array(gpios, ARRAY_SIZE(gpios));
- if (r) {
- pr_err("Cannot request LCD GPIOs, error %d\n", r);
- return;
- }
-
- omap_display_init(&ldp_dss_data);
-}
-
-static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
-{
- int res;
-
- /* LCD enable GPIO */
- ldp_lcd_pdata.enable_gpio = gpio + 7;
-
- /* Backlight enable GPIO */
- ldp_lcd_pdata.backlight_gpio = gpio + 15;
-
- res = platform_device_register(&ldp_lcd_device);
- if (res)
- pr_err("Unable to register LCD: %d\n", res);
-
- return 0;
-}
-
-static struct twl4030_gpio_platform_data ldp_gpio_data = {
- .setup = ldp_twl_gpio_setup,
-};
-
-static struct regulator_consumer_supply ldp_vmmc1_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
-};
-
-/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
-static struct regulator_init_data ldp_vmmc1 = {
- .constraints = {
- .min_uV = 1850000,
- .max_uV = 3150000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ldp_vmmc1_supply),
- .consumer_supplies = ldp_vmmc1_supply,
-};
-
-/* ads7846 on SPI */
-static struct regulator_consumer_supply ldp_vaux1_supplies[] = {
- REGULATOR_SUPPLY("vcc", "spi1.0"),
-};
-
-/* VAUX1 */
-static struct regulator_init_data ldp_vaux1 = {
- .constraints = {
- .min_uV = 3000000,
- .max_uV = 3000000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ldp_vaux1_supplies),
- .consumer_supplies = ldp_vaux1_supplies,
-};
-
-static struct regulator_consumer_supply ldp_vpll2_supplies[] = {
- REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss_dpi.0"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi.0"),
-};
-
-static struct regulator_init_data ldp_vpll2 = {
- .constraints = {
- .name = "VDVI",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ldp_vpll2_supplies),
- .consumer_supplies = ldp_vpll2_supplies,
-};
-
-static struct twl4030_platform_data ldp_twldata = {
- /* platform_data for children goes here */
- .vmmc1 = &ldp_vmmc1,
- .vaux1 = &ldp_vaux1,
- .vpll2 = &ldp_vpll2,
- .gpio = &ldp_gpio_data,
- .keypad = &ldp_kp_twl4030_data,
-};
-
-static int __init omap_i2c_init(void)
-{
- omap3_pmic_get_config(&ldp_twldata,
- TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC, 0);
- omap3_pmic_init("twl4030", &ldp_twldata);
- omap_register_i2c_bus(2, 400, NULL, 0);
- omap_register_i2c_bus(3, 400, NULL, 0);
- return 0;
-}
-
-static struct omap2_hsmmc_info mmc[] __initdata = {
- {
- .mmc = 1,
- .caps = MMC_CAP_4_BIT_DATA,
- .gpio_cd = -EINVAL,
- .gpio_wp = -EINVAL,
- },
- {} /* Terminator */
-};
-
-static struct platform_device *ldp_devices[] __initdata = {
- &ldp_gpio_keys_device,
-};
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-static struct mtd_partition ldp_nand_partitions[] = {
- /* All the partition sizes are listed in terms of NAND block size */
- {
- .name = "X-Loader-NAND",
- .offset = 0,
- .size = 4 * (64 * 2048), /* 512KB, 0x80000 */
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "U-Boot-NAND",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
- .size = 10 * (64 * 2048), /* 1.25MB, 0x140000 */
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "Boot Env-NAND",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x1c0000 */
- .size = 2 * (64 * 2048), /* 256KB, 0x40000 */
- },
- {
- .name = "Kernel-NAND",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x0200000*/
- .size = 240 * (64 * 2048), /* 30M, 0x1E00000 */
- },
- {
- .name = "File System - NAND",
- .offset = MTDPART_OFS_APPEND, /* Offset = 0x2000000 */
- .size = MTDPART_SIZ_FULL, /* 96MB, 0x6000000 */
- },
-
-};
-
-static struct regulator_consumer_supply dummy_supplies[] = {
- REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
- REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
-};
-
-static void __init omap_ldp_init(void)
-{
- regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
- ldp_init_smsc911x();
- omap_i2c_init();
- platform_add_devices(ldp_devices, ARRAY_SIZE(ldp_devices));
- omap_ads7846_init(1, 54, 310, NULL);
- omap_serial_init();
- omap_sdrc_init(NULL, NULL);
- usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
- usb_musb_init(NULL);
- board_nand_init(ldp_nand_partitions, ARRAY_SIZE(ldp_nand_partitions),
- 0, 0, nand_default_timings);
-
- omap_hsmmc_init(mmc);
- ldp_display_init();
-}
-
-MACHINE_START(OMAP_LDP, "OMAP LDP board")
- .atag_offset = 0x100,
- .reserve = omap_reserve,
- .map_io = omap3_map_io,
- .init_early = omap3430_init_early,
- .init_irq = omap3_init_irq,
- .init_machine = omap_ldp_init,
- .init_late = omap3430_init_late,
- .init_time = omap_init_time,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index b6443a4e0c78..6b6fda65fb3b 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -66,7 +66,7 @@ static void board_check_revision(void)
pr_err("Unknown board\n");
}
-#if defined(CONFIG_USB_MUSB_TUSB6010) || defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
+#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
/*
* Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and
* 1.5 V voltage regulators of PM companion chip. Companion chip will then
@@ -163,8 +163,7 @@ static struct spi_board_info n800_spi_board_info[] __initdata = {
},
};
-#if defined(CONFIG_MENELAUS) && \
- (defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE))
+#if defined(CONFIG_MENELAUS) && IS_ENABLED(CONFIG_MMC_OMAP)
/*
* On both N800 and N810, only the first of the two MMC controllers is in use.
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
deleted file mode 100644
index a5ab712c1a59..000000000000
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ /dev/null
@@ -1,1312 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/board-rx51-peripherals.c
- *
- * Copyright (C) 2008-2009 Nokia
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/spi/spi.h>
-#include <linux/wl12xx.h>
-#include <linux/spi/tsc2005.h>
-#include <linux/i2c.h>
-#include <linux/i2c/twl.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/regulator/machine.h>
-#include <linux/gpio.h>
-#include <linux/gpio_keys.h>
-#include <linux/gpio/machine.h>
-#include <linux/omap-gpmc.h>
-#include <linux/mmc/host.h>
-#include <linux/power/isp1704_charger.h>
-#include <linux/platform_data/spi-omap2-mcspi.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
-
-#include <plat/dmtimer.h>
-
-#include <asm/system_info.h>
-
-#include "common.h"
-#include <linux/omap-dma.h>
-
-#include "board-rx51.h"
-
-#include <sound/tlv320aic3x.h>
-#include <sound/tpa6130a2-plat.h>
-#include <linux/platform_data/media/si4713.h>
-#include <linux/platform_data/leds-lp55xx.h>
-
-#include <linux/platform_data/tsl2563.h>
-#include <linux/lis3lv02d.h>
-
-#include <video/omap-panel-data.h>
-
-#include <linux/platform_data/pwm_omap_dmtimer.h>
-#include <linux/platform_data/media/ir-rx51.h>
-
-#include "mux.h"
-#include "omap-pm.h"
-#include "hsmmc.h"
-#include "common-board-devices.h"
-#include "soc.h"
-#include "omap-secure.h"
-
-#define SYSTEM_REV_B_USES_VAUX3 0x1699
-#define SYSTEM_REV_S_USES_VAUX3 0x8
-
-#define RX51_WL1251_POWER_GPIO 87
-#define RX51_WL1251_IRQ_GPIO 42
-#define RX51_FMTX_RESET_GPIO 163
-#define RX51_FMTX_IRQ 53
-#define RX51_LP5523_CHIP_EN_GPIO 41
-
-#define RX51_USB_TRANSCEIVER_RST_GPIO 67
-
-#define RX51_TSC2005_RESET_GPIO 104
-#define RX51_TSC2005_IRQ_GPIO 100
-
-#define LIS302_IRQ1_GPIO 181
-#define LIS302_IRQ2_GPIO 180 /* Not yet in use */
-
-/* List all SPI devices here. Note that the list/probe order seems to matter! */
-enum {
- RX51_SPI_WL1251,
- RX51_SPI_TSC2005, /* Touch Controller */
- RX51_SPI_MIPID, /* LCD panel */
-};
-
-static struct wl1251_platform_data wl1251_pdata;
-static struct tsc2005_platform_data tsc2005_pdata;
-
-#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
-static int lis302_setup(void)
-{
- int err;
- int irq1 = LIS302_IRQ1_GPIO;
- int irq2 = LIS302_IRQ2_GPIO;
-
- /* gpio for interrupt pin 1 */
- err = gpio_request(irq1, "lis3lv02dl_irq1");
- if (err) {
- printk(KERN_ERR "lis3lv02dl: gpio request failed\n");
- goto out;
- }
-
- /* gpio for interrupt pin 2 */
- err = gpio_request(irq2, "lis3lv02dl_irq2");
- if (err) {
- gpio_free(irq1);
- printk(KERN_ERR "lis3lv02dl: gpio request failed\n");
- goto out;
- }
-
- gpio_direction_input(irq1);
- gpio_direction_input(irq2);
-
-out:
- return err;
-}
-
-static int lis302_release(void)
-{
- gpio_free(LIS302_IRQ1_GPIO);
- gpio_free(LIS302_IRQ2_GPIO);
-
- return 0;
-}
-
-static struct lis3lv02d_platform_data rx51_lis3lv02d_data = {
- .click_flags = LIS3_CLICK_SINGLE_X | LIS3_CLICK_SINGLE_Y |
- LIS3_CLICK_SINGLE_Z,
- /* Limits are 0.5g * value */
- .click_thresh_x = 8,
- .click_thresh_y = 8,
- .click_thresh_z = 10,
- /* Click must be longer than time limit */
- .click_time_limit = 9,
- /* Kind of debounce filter */
- .click_latency = 50,
-
- /* Limits for all axis. millig-value / 18 to get HW values */
- .wakeup_flags = LIS3_WAKEUP_X_HI | LIS3_WAKEUP_Y_HI,
- .wakeup_thresh = 800 / 18,
- .wakeup_flags2 = LIS3_WAKEUP_Z_HI ,
- .wakeup_thresh2 = 900 / 18,
-
- .hipass_ctrl = LIS3_HIPASS1_DISABLE | LIS3_HIPASS2_DISABLE,
-
- /* Interrupt line 2 for click detection, line 1 for thresholds */
- .irq_cfg = LIS3_IRQ2_CLICK | LIS3_IRQ1_FF_WU_12,
-
- .axis_x = LIS3_DEV_X,
- .axis_y = LIS3_INV_DEV_Y,
- .axis_z = LIS3_INV_DEV_Z,
- .setup_resources = lis302_setup,
- .release_resources = lis302_release,
- .st_min_limits = {-32, 3, 3},
- .st_max_limits = {-3, 32, 32},
-};
-#endif
-
-#if defined(CONFIG_SENSORS_TSL2563) || defined(CONFIG_SENSORS_TSL2563_MODULE)
-static struct tsl2563_platform_data rx51_tsl2563_platform_data = {
- .cover_comp_gain = 16,
-};
-#endif
-
-#if defined(CONFIG_LEDS_LP5523) || defined(CONFIG_LEDS_LP5523_MODULE)
-static struct lp55xx_led_config rx51_lp5523_led_config[] = {
- {
- .name = "lp5523:kb1",
- .chan_nr = 0,
- .led_current = 50,
- .max_current = 100,
- }, {
- .name = "lp5523:kb2",
- .chan_nr = 1,
- .led_current = 50,
- .max_current = 100,
- }, {
- .name = "lp5523:kb3",
- .chan_nr = 2,
- .led_current = 50,
- .max_current = 100,
- }, {
- .name = "lp5523:kb4",
- .chan_nr = 3,
- .led_current = 50,
- .max_current = 100,
- }, {
- .name = "lp5523:b",
- .chan_nr = 4,
- .led_current = 50,
- .max_current = 100,
- }, {
- .name = "lp5523:g",
- .chan_nr = 5,
- .led_current = 50,
- .max_current = 100,
- }, {
- .name = "lp5523:r",
- .chan_nr = 6,
- .led_current = 50,
- .max_current = 100,
- }, {
- .name = "lp5523:kb5",
- .chan_nr = 7,
- .led_current = 50,
- .max_current = 100,
- }, {
- .name = "lp5523:kb6",
- .chan_nr = 8,
- .led_current = 50,
- .max_current = 100,
- }
-};
-
-static struct lp55xx_platform_data rx51_lp5523_platform_data = {
- .led_config = rx51_lp5523_led_config,
- .num_channels = ARRAY_SIZE(rx51_lp5523_led_config),
- .clock_mode = LP55XX_CLOCK_AUTO,
- .enable_gpio = RX51_LP5523_CHIP_EN_GPIO,
-};
-#endif
-
-#define RX51_LCD_RESET_GPIO 90
-
-static struct panel_acx565akm_platform_data acx_pdata = {
- .name = "lcd",
- .source = "sdi.0",
- .reset_gpio = RX51_LCD_RESET_GPIO,
- .datapairs = 2,
-};
-
-static struct omap2_mcspi_device_config wl1251_mcspi_config = {
- .turbo_mode = 0,
-};
-
-static struct omap2_mcspi_device_config mipid_mcspi_config = {
- .turbo_mode = 0,
-};
-
-static struct omap2_mcspi_device_config tsc2005_mcspi_config = {
- .turbo_mode = 0,
-};
-
-static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
- [RX51_SPI_WL1251] = {
- .modalias = "wl1251",
- .bus_num = 4,
- .chip_select = 0,
- .max_speed_hz = 48000000,
- .mode = SPI_MODE_3,
- .controller_data = &wl1251_mcspi_config,
- .platform_data = &wl1251_pdata,
- },
- [RX51_SPI_MIPID] = {
- .modalias = "acx565akm",
- .bus_num = 1,
- .chip_select = 2,
- .max_speed_hz = 6000000,
- .controller_data = &mipid_mcspi_config,
- .platform_data = &acx_pdata,
- },
- [RX51_SPI_TSC2005] = {
- .modalias = "tsc2005",
- .bus_num = 1,
- .chip_select = 0,
- .max_speed_hz = 6000000,
- .controller_data = &tsc2005_mcspi_config,
- .platform_data = &tsc2005_pdata,
- },
-};
-
-static struct platform_device rx51_battery_device = {
- .name = "rx51-battery",
- .id = -1,
-};
-
-static void rx51_charger_set_power(bool on)
-{
- gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, on);
-}
-
-static struct isp1704_charger_data rx51_charger_data = {
- .set_power = rx51_charger_set_power,
-};
-
-static struct platform_device rx51_charger_device = {
- .name = "isp1704_charger",
- .dev = {
- .platform_data = &rx51_charger_data,
- },
-};
-
-static void __init rx51_charger_init(void)
-{
- WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
- GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
-
- platform_device_register(&rx51_battery_device);
- platform_device_register(&rx51_charger_device);
-}
-
-#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
-
-#define RX51_GPIO_CAMERA_LENS_COVER 110
-#define RX51_GPIO_CAMERA_FOCUS 68
-#define RX51_GPIO_CAMERA_CAPTURE 69
-#define RX51_GPIO_KEYPAD_SLIDE 71
-#define RX51_GPIO_LOCK_BUTTON 113
-#define RX51_GPIO_PROXIMITY 89
-
-#define RX51_GPIO_DEBOUNCE_TIMEOUT 10
-
-static struct gpio_keys_button rx51_gpio_keys[] = {
- {
- .desc = "Camera Lens Cover",
- .type = EV_SW,
- .code = SW_CAMERA_LENS_COVER,
- .gpio = RX51_GPIO_CAMERA_LENS_COVER,
- .active_low = 1,
- .debounce_interval = RX51_GPIO_DEBOUNCE_TIMEOUT,
- }, {
- .desc = "Camera Focus",
- .type = EV_KEY,
- .code = KEY_CAMERA_FOCUS,
- .gpio = RX51_GPIO_CAMERA_FOCUS,
- .active_low = 1,
- .debounce_interval = RX51_GPIO_DEBOUNCE_TIMEOUT,
- }, {
- .desc = "Camera Capture",
- .type = EV_KEY,
- .code = KEY_CAMERA,
- .gpio = RX51_GPIO_CAMERA_CAPTURE,
- .active_low = 1,
- .debounce_interval = RX51_GPIO_DEBOUNCE_TIMEOUT,
- }, {
- .desc = "Lock Button",
- .type = EV_KEY,
- .code = KEY_SCREENLOCK,
- .gpio = RX51_GPIO_LOCK_BUTTON,
- .active_low = 1,
- .debounce_interval = RX51_GPIO_DEBOUNCE_TIMEOUT,
- }, {
- .desc = "Keypad Slide",
- .type = EV_SW,
- .code = SW_KEYPAD_SLIDE,
- .gpio = RX51_GPIO_KEYPAD_SLIDE,
- .active_low = 1,
- .debounce_interval = RX51_GPIO_DEBOUNCE_TIMEOUT,
- }, {
- .desc = "Proximity Sensor",
- .type = EV_SW,
- .code = SW_FRONT_PROXIMITY,
- .gpio = RX51_GPIO_PROXIMITY,
- .active_low = 0,
- .debounce_interval = RX51_GPIO_DEBOUNCE_TIMEOUT,
- }
-};
-
-static struct gpio_keys_platform_data rx51_gpio_keys_data = {
- .buttons = rx51_gpio_keys,
- .nbuttons = ARRAY_SIZE(rx51_gpio_keys),
-};
-
-static struct platform_device rx51_gpio_keys_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &rx51_gpio_keys_data,
- },
-};
-
-static void __init rx51_add_gpio_keys(void)
-{
- platform_device_register(&rx51_gpio_keys_device);
-}
-#else
-static void __init rx51_add_gpio_keys(void)
-{
-}
-#endif /* CONFIG_KEYBOARD_GPIO || CONFIG_KEYBOARD_GPIO_MODULE */
-
-static uint32_t board_keymap[] = {
- /*
- * Note that KEY(x, 8, KEY_XXX) entries represent "entrire row
- * connected to the ground" matrix state.
- */
- KEY(0, 0, KEY_Q),
- KEY(0, 1, KEY_O),
- KEY(0, 2, KEY_P),
- KEY(0, 3, KEY_COMMA),
- KEY(0, 4, KEY_BACKSPACE),
- KEY(0, 6, KEY_A),
- KEY(0, 7, KEY_S),
-
- KEY(1, 0, KEY_W),
- KEY(1, 1, KEY_D),
- KEY(1, 2, KEY_F),
- KEY(1, 3, KEY_G),
- KEY(1, 4, KEY_H),
- KEY(1, 5, KEY_J),
- KEY(1, 6, KEY_K),
- KEY(1, 7, KEY_L),
-
- KEY(2, 0, KEY_E),
- KEY(2, 1, KEY_DOT),
- KEY(2, 2, KEY_UP),
- KEY(2, 3, KEY_ENTER),
- KEY(2, 5, KEY_Z),
- KEY(2, 6, KEY_X),
- KEY(2, 7, KEY_C),
- KEY(2, 8, KEY_F9),
-
- KEY(3, 0, KEY_R),
- KEY(3, 1, KEY_V),
- KEY(3, 2, KEY_B),
- KEY(3, 3, KEY_N),
- KEY(3, 4, KEY_M),
- KEY(3, 5, KEY_SPACE),
- KEY(3, 6, KEY_SPACE),
- KEY(3, 7, KEY_LEFT),
-
- KEY(4, 0, KEY_T),
- KEY(4, 1, KEY_DOWN),
- KEY(4, 2, KEY_RIGHT),
- KEY(4, 4, KEY_LEFTCTRL),
- KEY(4, 5, KEY_RIGHTALT),
- KEY(4, 6, KEY_LEFTSHIFT),
- KEY(4, 8, KEY_F10),
-
- KEY(5, 0, KEY_Y),
- KEY(5, 8, KEY_F11),
-
- KEY(6, 0, KEY_U),
-
- KEY(7, 0, KEY_I),
- KEY(7, 1, KEY_F7),
- KEY(7, 2, KEY_F8),
-};
-
-static struct matrix_keymap_data board_map_data = {
- .keymap = board_keymap,
- .keymap_size = ARRAY_SIZE(board_keymap),
-};
-
-static struct twl4030_keypad_data rx51_kp_data = {
- .keymap_data = &board_map_data,
- .rows = 8,
- .cols = 8,
- .rep = 1,
-};
-
-/* Enable input logic and pull all lines up when eMMC is on. */
-static struct omap_board_mux rx51_mmc2_on_mux[] = {
- OMAP3_MUX(SDMMC2_CMD, OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT0, OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT1, OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT2, OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT3, OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT4, OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT5, OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT6, OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT7, OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-
-/* Disable input logic and pull all lines down when eMMC is off. */
-static struct omap_board_mux rx51_mmc2_off_mux[] = {
- OMAP3_MUX(SDMMC2_CMD, OMAP_PULL_ENA | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT0, OMAP_PULL_ENA | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT1, OMAP_PULL_ENA | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT2, OMAP_PULL_ENA | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT3, OMAP_PULL_ENA | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT4, OMAP_PULL_ENA | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT5, OMAP_PULL_ENA | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT6, OMAP_PULL_ENA | OMAP_MUX_MODE0),
- OMAP3_MUX(SDMMC2_DAT7, OMAP_PULL_ENA | OMAP_MUX_MODE0),
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-
-static struct omap_mux_partition *partition;
-
-/*
- * Current flows to eMMC when eMMC is off and the data lines are pulled up,
- * so pull them down. N.B. we pull 8 lines because we are using 8 lines.
- */
-static void rx51_mmc2_remux(struct device *dev, int power_on)
-{
- if (power_on)
- omap_mux_write_array(partition, rx51_mmc2_on_mux);
- else
- omap_mux_write_array(partition, rx51_mmc2_off_mux);
-}
-
-static struct omap2_hsmmc_info mmc[] __initdata = {
- {
- .name = "external",
- .mmc = 1,
- .caps = MMC_CAP_4_BIT_DATA,
- .cover_only = true,
- .gpio_cd = 160,
- .gpio_wp = -EINVAL,
- },
- {
- .name = "internal",
- .mmc = 2,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
- /* See also rx51_mmc2_remux */
- .gpio_cd = -EINVAL,
- .gpio_wp = -EINVAL,
- .nonremovable = true,
- .remux = rx51_mmc2_remux,
- },
- {} /* Terminator */
-};
-
-static struct regulator_consumer_supply rx51_vmmc1_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
-};
-
-static struct regulator_consumer_supply rx51_vaux2_supply[] = {
- REGULATOR_SUPPLY("vdds_csib", "omap3isp"),
-};
-
-static struct regulator_consumer_supply rx51_vaux3_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
-};
-
-static struct regulator_consumer_supply rx51_vsim_supply[] = {
- REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"),
-};
-
-static struct regulator_consumer_supply rx51_vmmc2_supplies[] = {
- /* tlv320aic3x analog supplies */
- REGULATOR_SUPPLY("AVDD", "2-0018"),
- REGULATOR_SUPPLY("DRVDD", "2-0018"),
- REGULATOR_SUPPLY("AVDD", "2-0019"),
- REGULATOR_SUPPLY("DRVDD", "2-0019"),
- /* tpa6130a2 */
- REGULATOR_SUPPLY("Vdd", "2-0060"),
- /* Keep vmmc as last item. It is not iterated for newer boards */
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
-};
-
-static struct regulator_consumer_supply rx51_vio_supplies[] = {
- /* tlv320aic3x digital supplies */
- REGULATOR_SUPPLY("IOVDD", "2-0018"),
- REGULATOR_SUPPLY("DVDD", "2-0018"),
- REGULATOR_SUPPLY("IOVDD", "2-0019"),
- REGULATOR_SUPPLY("DVDD", "2-0019"),
- /* Si4713 IO supply */
- REGULATOR_SUPPLY("vio", "2-0063"),
- /* lis3lv02d */
- REGULATOR_SUPPLY("Vdd_IO", "3-001d"),
-};
-
-static struct regulator_consumer_supply rx51_vaux1_consumers[] = {
- REGULATOR_SUPPLY("vdds_sdi", "omapdss"),
- REGULATOR_SUPPLY("vdds_sdi", "omapdss_sdi.0"),
- /* Si4713 supply */
- REGULATOR_SUPPLY("vdd", "2-0063"),
- /* lis3lv02d */
- REGULATOR_SUPPLY("Vdd", "3-001d"),
-};
-
-static struct regulator_init_data rx51_vaux1 = {
- .constraints = {
- .name = "V28",
- .min_uV = 2800000,
- .max_uV = 2800000,
- .always_on = true, /* due battery cover sensor */
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(rx51_vaux1_consumers),
- .consumer_supplies = rx51_vaux1_consumers,
-};
-
-static struct regulator_init_data rx51_vaux2 = {
- .constraints = {
- .name = "VCSI",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(rx51_vaux2_supply),
- .consumer_supplies = rx51_vaux2_supply,
-};
-
-/* VAUX3 - adds more power to VIO_18 rail */
-static struct regulator_init_data rx51_vaux3_cam = {
- .constraints = {
- .name = "VCAM_DIG_18",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data rx51_vaux3_mmc = {
- .constraints = {
- .name = "VMMC2_30",
- .min_uV = 2800000,
- .max_uV = 3000000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(rx51_vaux3_supply),
- .consumer_supplies = rx51_vaux3_supply,
-};
-
-static struct regulator_init_data rx51_vaux4 = {
- .constraints = {
- .name = "VCAM_ANA_28",
- .min_uV = 2800000,
- .max_uV = 2800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data rx51_vmmc1 = {
- .constraints = {
- .min_uV = 1850000,
- .max_uV = 3150000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(rx51_vmmc1_supply),
- .consumer_supplies = rx51_vmmc1_supply,
-};
-
-static struct regulator_init_data rx51_vmmc2 = {
- .constraints = {
- .name = "V28_A",
- .min_uV = 2800000,
- .max_uV = 3000000,
- .always_on = true, /* due VIO leak to AIC34 VDDs */
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(rx51_vmmc2_supplies),
- .consumer_supplies = rx51_vmmc2_supplies,
-};
-
-static struct regulator_init_data rx51_vpll1 = {
- .constraints = {
- .name = "VPLL",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .always_on = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE,
- },
-};
-
-static struct regulator_init_data rx51_vpll2 = {
- .constraints = {
- .name = "VSDI_CSI",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .always_on = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE,
- },
-};
-
-static struct regulator_init_data rx51_vsim = {
- .constraints = {
- .name = "VMMC2_IO_18",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(rx51_vsim_supply),
- .consumer_supplies = rx51_vsim_supply,
-};
-
-static struct regulator_init_data rx51_vio = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(rx51_vio_supplies),
- .consumer_supplies = rx51_vio_supplies,
-};
-
-static struct regulator_init_data rx51_vintana1 = {
- .constraints = {
- .name = "VINTANA1",
- .min_uV = 1500000,
- .max_uV = 1500000,
- .always_on = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE,
- },
-};
-
-static struct regulator_init_data rx51_vintana2 = {
- .constraints = {
- .name = "VINTANA2",
- .min_uV = 2750000,
- .max_uV = 2750000,
- .apply_uV = true,
- .always_on = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE,
- },
-};
-
-static struct regulator_init_data rx51_vintdig = {
- .constraints = {
- .name = "VINTDIG",
- .min_uV = 1500000,
- .max_uV = 1500000,
- .always_on = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE,
- },
-};
-
-static struct gpiod_lookup_table rx51_fmtx_gpios_table = {
- .dev_id = "2-0063",
- .table = {
- GPIO_LOOKUP("gpio.6", 3, "reset", GPIO_ACTIVE_HIGH), /* 163 */
- { },
- },
-};
-
-static __init void rx51_gpio_init(void)
-{
- gpiod_add_lookup_table(&rx51_fmtx_gpios_table);
-}
-
-static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
-{
- /* FIXME this gpio setup is just a placeholder for now */
- gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm");
- gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "speaker_en");
-
- return 0;
-}
-
-static struct twl4030_gpio_platform_data rx51_gpio_data = {
- .pulldowns = BIT(0) | BIT(1) | BIT(2) | BIT(3)
- | BIT(4) | BIT(5)
- | BIT(8) | BIT(9) | BIT(10) | BIT(11)
- | BIT(12) | BIT(13) | BIT(14) | BIT(15)
- | BIT(16) | BIT(17) ,
- .setup = rx51_twlgpio_setup,
-};
-
-static struct twl4030_ins sleep_on_seq[] __initdata = {
-/*
- * Turn off everything
- */
- {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_SLEEP), 2},
-};
-
-static struct twl4030_script sleep_on_script __initdata = {
- .script = sleep_on_seq,
- .size = ARRAY_SIZE(sleep_on_seq),
- .flags = TWL4030_SLEEP_SCRIPT,
-};
-
-static struct twl4030_ins wakeup_seq[] __initdata = {
-/*
- * Reenable everything
- */
- {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_ACTIVE), 2},
-};
-
-static struct twl4030_script wakeup_script __initdata = {
- .script = wakeup_seq,
- .size = ARRAY_SIZE(wakeup_seq),
- .flags = TWL4030_WAKEUP12_SCRIPT,
-};
-
-static struct twl4030_ins wakeup_p3_seq[] __initdata = {
-/*
- * Reenable everything
- */
- {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_ACTIVE), 2},
-};
-
-static struct twl4030_script wakeup_p3_script __initdata = {
- .script = wakeup_p3_seq,
- .size = ARRAY_SIZE(wakeup_p3_seq),
- .flags = TWL4030_WAKEUP3_SCRIPT,
-};
-
-static struct twl4030_ins wrst_seq[] __initdata = {
-/*
- * Reset twl4030.
- * Reset VDD1 regulator.
- * Reset VDD2 regulator.
- * Reset VPLL1 regulator.
- * Enable sysclk output.
- * Reenable twl4030.
- */
- {MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_OFF), 2},
- {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 0, 1, RES_STATE_ACTIVE),
- 0x13},
- {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_PP, 0, 3, RES_STATE_OFF), 0x13},
- {MSG_SINGULAR(DEV_GRP_NULL, RES_VDD1, RES_STATE_WRST), 0x13},
- {MSG_SINGULAR(DEV_GRP_NULL, RES_VDD2, RES_STATE_WRST), 0x13},
- {MSG_SINGULAR(DEV_GRP_NULL, RES_VPLL1, RES_STATE_WRST), 0x35},
- {MSG_SINGULAR(DEV_GRP_P3, RES_HFCLKOUT, RES_STATE_ACTIVE), 2},
- {MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_ACTIVE), 2},
-};
-
-static struct twl4030_script wrst_script __initdata = {
- .script = wrst_seq,
- .size = ARRAY_SIZE(wrst_seq),
- .flags = TWL4030_WRST_SCRIPT,
-};
-
-static struct twl4030_script *twl4030_scripts[] __initdata = {
- /* wakeup12 script should be loaded before sleep script, otherwise a
- board might hit retention before loading of wakeup script is
- completed. This can cause boot failures depending on timing issues.
- */
- &wakeup_script,
- &sleep_on_script,
- &wakeup_p3_script,
- &wrst_script,
-};
-
-static struct twl4030_resconfig twl4030_rconfig[] __initdata = {
- { .resource = RES_VDD1, .devgroup = -1,
- .type = 1, .type2 = -1, .remap_off = RES_STATE_OFF,
- .remap_sleep = RES_STATE_OFF
- },
- { .resource = RES_VDD2, .devgroup = -1,
- .type = 1, .type2 = -1, .remap_off = RES_STATE_OFF,
- .remap_sleep = RES_STATE_OFF
- },
- { .resource = RES_VPLL1, .devgroup = -1,
- .type = 1, .type2 = -1, .remap_off = RES_STATE_OFF,
- .remap_sleep = RES_STATE_OFF
- },
- { .resource = RES_VPLL2, .devgroup = -1,
- .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VAUX1, .devgroup = -1,
- .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VAUX2, .devgroup = -1,
- .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VAUX3, .devgroup = -1,
- .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VAUX4, .devgroup = -1,
- .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VMMC1, .devgroup = -1,
- .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VMMC2, .devgroup = -1,
- .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VDAC, .devgroup = -1,
- .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VSIM, .devgroup = -1,
- .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VINTANA1, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
- .type = -1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VINTANA2, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
- .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VINTDIG, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
- .type = -1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_VIO, .devgroup = DEV_GRP_P3,
- .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_CLKEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
- .type = 1, .type2 = -1 , .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_REGEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
- .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_NRES_PWRON, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
- .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_SYSEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
- .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_HFCLKOUT, .devgroup = DEV_GRP_P3,
- .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_32KCLKOUT, .devgroup = -1,
- .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_RESET, .devgroup = -1,
- .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
- },
- { .resource = RES_MAIN_REF, .devgroup = -1,
- .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
- },
- { 0, 0},
-};
-
-static struct twl4030_power_data rx51_t2scripts_data __initdata = {
- .scripts = twl4030_scripts,
- .num = ARRAY_SIZE(twl4030_scripts),
- .resource_config = twl4030_rconfig,
-};
-
-static struct twl4030_vibra_data rx51_vibra_data __initdata = {
- .coexist = 0,
-};
-
-static struct twl4030_audio_data rx51_audio_data __initdata = {
- .audio_mclk = 26000000,
- .vibra = &rx51_vibra_data,
-};
-
-static struct twl4030_platform_data rx51_twldata __initdata = {
- /* platform_data for children goes here */
- .gpio = &rx51_gpio_data,
- .keypad = &rx51_kp_data,
- .power = &rx51_t2scripts_data,
- .audio = &rx51_audio_data,
-
- .vaux1 = &rx51_vaux1,
- .vaux2 = &rx51_vaux2,
- .vaux4 = &rx51_vaux4,
- .vmmc1 = &rx51_vmmc1,
- .vpll1 = &rx51_vpll1,
- .vpll2 = &rx51_vpll2,
- .vsim = &rx51_vsim,
- .vintana1 = &rx51_vintana1,
- .vintana2 = &rx51_vintana2,
- .vintdig = &rx51_vintdig,
- .vio = &rx51_vio,
-};
-
-static struct tpa6130a2_platform_data rx51_tpa6130a2_data __initdata_or_module = {
- .power_gpio = 98,
-};
-
-/* Audio setup data */
-static struct aic3x_setup_data rx51_aic34_setup = {
- .gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED,
- .gpio_func[1] = AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT,
-};
-
-static struct aic3x_pdata rx51_aic3x_data = {
- .setup = &rx51_aic34_setup,
- .gpio_reset = 60,
-};
-
-static struct aic3x_pdata rx51_aic3x_data2 = {
- .gpio_reset = 60,
-};
-
-#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713)
-static struct si4713_platform_data rx51_si4713_platform_data = {
- .is_platform_device = true
-};
-#endif
-
-static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
-#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713)
- {
- I2C_BOARD_INFO("si4713", 0x63),
- .platform_data = &rx51_si4713_platform_data,
- },
-#endif
- {
- I2C_BOARD_INFO("tlv320aic3x", 0x18),
- .platform_data = &rx51_aic3x_data,
- },
- {
- I2C_BOARD_INFO("tlv320aic3x", 0x19),
- .platform_data = &rx51_aic3x_data2,
- },
-#if defined(CONFIG_SENSORS_TSL2563) || defined(CONFIG_SENSORS_TSL2563_MODULE)
- {
- I2C_BOARD_INFO("tsl2563", 0x29),
- .platform_data = &rx51_tsl2563_platform_data,
- },
-#endif
-#if defined(CONFIG_LEDS_LP5523) || defined(CONFIG_LEDS_LP5523_MODULE)
- {
- I2C_BOARD_INFO("lp5523", 0x32),
- .platform_data = &rx51_lp5523_platform_data,
- },
-#endif
- {
- I2C_BOARD_INFO("bq27200", 0x55),
- },
- {
- I2C_BOARD_INFO("tpa6130a2", 0x60),
- .platform_data = &rx51_tpa6130a2_data,
- }
-};
-
-static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = {
-#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
- {
- I2C_BOARD_INFO("lis3lv02d", 0x1d),
- .platform_data = &rx51_lis3lv02d_data,
- },
-#endif
-};
-
-static int __init rx51_i2c_init(void)
-{
-#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713)
- int err;
-#endif
-
- if ((system_rev >= SYSTEM_REV_S_USES_VAUX3 && system_rev < 0x100) ||
- system_rev >= SYSTEM_REV_B_USES_VAUX3) {
- rx51_twldata.vaux3 = &rx51_vaux3_mmc;
- /* Only older boards use VMMC2 for internal MMC */
- rx51_vmmc2.num_consumer_supplies--;
- } else {
- rx51_twldata.vaux3 = &rx51_vaux3_cam;
- }
- rx51_twldata.vmmc2 = &rx51_vmmc2;
- omap3_pmic_get_config(&rx51_twldata,
- TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC,
- TWL_COMMON_REGULATOR_VDAC);
-
- rx51_twldata.vdac->constraints.apply_uV = true;
- rx51_twldata.vdac->constraints.name = "VDAC";
-
- omap_pmic_init(1, 2200, "twl5030", 7 + OMAP_INTC_START, &rx51_twldata);
-#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713)
- err = gpio_request_one(RX51_FMTX_IRQ, GPIOF_DIR_IN, "si4713 irq");
- if (err) {
- printk(KERN_ERR "Cannot request si4713 irq gpio. %d\n", err);
- return err;
- }
- rx51_peripherals_i2c_board_info_2[0].irq = gpio_to_irq(RX51_FMTX_IRQ);
-#endif
- omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
- ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
-#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
- rx51_lis3lv02d_data.irq2 = gpio_to_irq(LIS302_IRQ2_GPIO);
- rx51_peripherals_i2c_board_info_3[0].irq = gpio_to_irq(LIS302_IRQ1_GPIO);
-#endif
- omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3,
- ARRAY_SIZE(rx51_peripherals_i2c_board_info_3));
- return 0;
-}
-
-#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
- defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
-
-static struct mtd_partition onenand_partitions[] = {
- {
- .name = "bootloader",
- .offset = 0,
- .size = 0x20000,
- .mask_flags = MTD_WRITEABLE, /* Force read-only */
- },
- {
- .name = "config",
- .offset = MTDPART_OFS_APPEND,
- .size = 0x60000,
- },
- {
- .name = "log",
- .offset = MTDPART_OFS_APPEND,
- .size = 0x40000,
- },
- {
- .name = "kernel",
- .offset = MTDPART_OFS_APPEND,
- .size = 0x200000,
- },
- {
- .name = "initfs",
- .offset = MTDPART_OFS_APPEND,
- .size = 0x200000,
- },
- {
- .name = "rootfs",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct omap_onenand_platform_data board_onenand_data[] = {
- {
- .cs = 0,
- .gpio_irq = 65,
- .parts = onenand_partitions,
- .nr_parts = ARRAY_SIZE(onenand_partitions),
- .flags = ONENAND_SYNC_READWRITE,
- }
-};
-#endif
-
-static struct gpio rx51_wl1251_gpios[] __initdata = {
- { RX51_WL1251_IRQ_GPIO, GPIOF_IN, "wl1251 irq" },
-};
-
-static void __init rx51_init_wl1251(void)
-{
- int irq, ret;
-
- ret = gpio_request_array(rx51_wl1251_gpios,
- ARRAY_SIZE(rx51_wl1251_gpios));
- if (ret < 0)
- goto error;
-
- irq = gpio_to_irq(RX51_WL1251_IRQ_GPIO);
- if (irq < 0)
- goto err_irq;
-
- wl1251_pdata.power_gpio = RX51_WL1251_POWER_GPIO;
- rx51_peripherals_spi_board_info[RX51_SPI_WL1251].irq = irq;
-
- return;
-
-err_irq:
- gpio_free(RX51_WL1251_IRQ_GPIO);
-error:
- printk(KERN_ERR "wl1251 board initialisation failed\n");
- wl1251_pdata.power_gpio = -1;
-
- /*
- * Now rx51_peripherals_spi_board_info[1].irq is zero and
- * set_power is null, and wl1251_probe() will fail.
- */
-}
-
-static struct tsc2005_platform_data tsc2005_pdata = {
- .ts_pressure_max = 2048,
- .ts_pressure_fudge = 2,
- .ts_x_max = 4096,
- .ts_x_fudge = 4,
- .ts_y_max = 4096,
- .ts_y_fudge = 7,
- .ts_x_plate_ohm = 280,
- .esd_timeout_ms = 8000,
-};
-
-static struct gpio rx51_tsc2005_gpios[] __initdata = {
- { RX51_TSC2005_IRQ_GPIO, GPIOF_IN, "tsc2005 IRQ" },
- { RX51_TSC2005_RESET_GPIO, GPIOF_OUT_INIT_HIGH, "tsc2005 reset" },
-};
-
-static void rx51_tsc2005_set_reset(bool enable)
-{
- gpio_set_value(RX51_TSC2005_RESET_GPIO, enable);
-}
-
-static void __init rx51_init_tsc2005(void)
-{
- int r;
-
- omap_mux_init_gpio(RX51_TSC2005_RESET_GPIO, OMAP_PIN_OUTPUT);
- omap_mux_init_gpio(RX51_TSC2005_IRQ_GPIO, OMAP_PIN_INPUT_PULLUP);
-
- r = gpio_request_array(rx51_tsc2005_gpios,
- ARRAY_SIZE(rx51_tsc2005_gpios));
- if (r < 0) {
- printk(KERN_ERR "tsc2005 board initialization failed\n");
- tsc2005_pdata.esd_timeout_ms = 0;
- return;
- }
-
- tsc2005_pdata.set_reset = rx51_tsc2005_set_reset;
- rx51_peripherals_spi_board_info[RX51_SPI_TSC2005].irq =
- gpio_to_irq(RX51_TSC2005_IRQ_GPIO);
-}
-
-#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
-static struct pwm_omap_dmtimer_pdata __maybe_unused pwm_dmtimer_pdata = {
- .request_by_node = omap_dm_timer_request_by_node,
- .request_specific = omap_dm_timer_request_specific,
- .request = omap_dm_timer_request,
- .set_source = omap_dm_timer_set_source,
- .get_irq = omap_dm_timer_get_irq,
- .set_int_enable = omap_dm_timer_set_int_enable,
- .set_int_disable = omap_dm_timer_set_int_disable,
- .free = omap_dm_timer_free,
- .enable = omap_dm_timer_enable,
- .disable = omap_dm_timer_disable,
- .get_fclk = omap_dm_timer_get_fclk,
- .start = omap_dm_timer_start,
- .stop = omap_dm_timer_stop,
- .set_load = omap_dm_timer_set_load,
- .set_match = omap_dm_timer_set_match,
- .set_pwm = omap_dm_timer_set_pwm,
- .set_prescaler = omap_dm_timer_set_prescaler,
- .read_counter = omap_dm_timer_read_counter,
- .write_counter = omap_dm_timer_write_counter,
- .read_status = omap_dm_timer_read_status,
- .write_status = omap_dm_timer_write_status,
-};
-#endif
-
-#if defined(CONFIG_IR_RX51) || defined(CONFIG_IR_RX51_MODULE)
-static struct lirc_rx51_platform_data rx51_lirc_data = {
- .set_max_mpu_wakeup_lat = omap_pm_set_max_mpu_wakeup_lat,
-};
-
-static struct platform_device rx51_lirc_device = {
- .name = "lirc_rx51",
- .id = -1,
- .dev = {
- .platform_data = &rx51_lirc_data,
- },
-};
-
-static void __init rx51_init_lirc(void)
-{
- platform_device_register(&rx51_lirc_device);
-}
-#else
-static void __init rx51_init_lirc(void)
-{
-}
-#endif
-
-static struct platform_device madc_hwmon = {
- .name = "twl4030_madc_hwmon",
- .id = -1,
-};
-
-static void __init rx51_init_twl4030_hwmon(void)
-{
- platform_device_register(&madc_hwmon);
-}
-
-static struct platform_device omap3_rom_rng_device = {
- .name = "omap3-rom-rng",
- .id = -1,
- .dev = {
- .platform_data = rx51_secure_rng_call,
- },
-};
-
-static void __init rx51_init_omap3_rom_rng(void)
-{
- if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
- pr_info("RX-51: Registering OMAP3 HWRNG device\n");
- platform_device_register(&omap3_rom_rng_device);
- }
-}
-
-void __init rx51_peripherals_init(void)
-{
- rx51_gpio_init();
- rx51_i2c_init();
- regulator_has_full_constraints();
- gpmc_onenand_init(board_onenand_data);
- rx51_add_gpio_keys();
- rx51_init_wl1251();
- rx51_init_tsc2005();
- rx51_init_lirc();
- spi_register_board_info(rx51_peripherals_spi_board_info,
- ARRAY_SIZE(rx51_peripherals_spi_board_info));
-
- partition = omap_mux_get("core");
- if (partition)
- omap_hsmmc_init(mmc);
-
- rx51_charger_init();
- rx51_init_twl4030_hwmon();
- rx51_init_omap3_rom_rng();
-}
-
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
deleted file mode 100644
index 180c6aa633bd..000000000000
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/board-rx51-video.c
- *
- * Copyright (C) 2010 Nokia
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/mm.h>
-#include <asm/mach-types.h>
-#include <linux/platform_data/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#include <linux/platform_data/spi-omap2-mcspi.h>
-
-#include "soc.h"
-#include "board-rx51.h"
-#include "display.h"
-
-#include "mux.h"
-
-#define RX51_LCD_RESET_GPIO 90
-
-#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
-
-static struct connector_atv_platform_data rx51_tv_pdata = {
- .name = "tv",
- .source = "venc.0",
- .invert_polarity = false,
-};
-
-static struct platform_device rx51_tv_connector_device = {
- .name = "connector-analog-tv",
- .id = 0,
- .dev.platform_data = &rx51_tv_pdata,
-};
-
-static struct omap_dss_board_info rx51_dss_board_info = {
- .default_display_name = "lcd",
-};
-
-static int __init rx51_video_init(void)
-{
- if (!machine_is_nokia_rx51())
- return 0;
-
- if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) {
- pr_err("%s cannot configure MUX for LCD RESET\n", __func__);
- return 0;
- }
-
- omap_display_init(&rx51_dss_board_info);
-
- platform_device_register(&rx51_tv_connector_device);
-
- return 0;
-}
-
-omap_subsys_initcall(rx51_video_init);
-#endif /* defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) */
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
deleted file mode 100644
index 41161ca97d74..000000000000
--- a/arch/arm/mach-omap2/board-rx51.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Board support file for Nokia N900 (aka RX-51).
- *
- * Copyright (C) 2007, 2008 Nokia
- * Copyright (C) 2012 Ivaylo Dimitrov <freemangordon@abv.bg>
- * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/leds.h>
-#include <linux/usb/phy.h>
-#include <linux/usb/musb.h>
-#include <linux/platform_data/spi-omap2-mcspi.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <linux/omap-dma.h>
-
-#include "common.h"
-#include "mux.h"
-#include "gpmc.h"
-#include "pm.h"
-#include "soc.h"
-#include "sdram-nokia.h"
-#include "omap-secure.h"
-
-#define RX51_GPIO_SLEEP_IND 162
-
-static struct gpio_led gpio_leds[] = {
- {
- .name = "sleep_ind",
- .gpio = RX51_GPIO_SLEEP_IND,
- },
-};
-
-static struct gpio_led_platform_data gpio_led_info = {
- .leds = gpio_leds,
- .num_leds = ARRAY_SIZE(gpio_leds),
-};
-
-static struct platform_device leds_gpio = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &gpio_led_info,
- },
-};
-
-/*
- * cpuidle C-states definition for rx51.
- *
- * The 'exit_latency' field is the sum of sleep
- * and wake-up latencies.
-
- ---------------------------------------------
- | state | exit_latency | target_residency |
- ---------------------------------------------
- | C1 | 110 + 162 | 5 |
- | C2 | 106 + 180 | 309 |
- | C3 | 107 + 410 | 46057 |
- | C4 | 121 + 3374 | 46057 |
- | C5 | 855 + 1146 | 46057 |
- | C6 | 7580 + 4134 | 484329 |
- | C7 | 7505 + 15274 | 484329 |
- ---------------------------------------------
-
-*/
-
-extern void __init rx51_peripherals_init(void);
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
- { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-static struct omap_musb_board_data musb_board_data = {
- .interface_type = MUSB_INTERFACE_ULPI,
- .mode = MUSB_OTG,
- .power = 0,
-};
-
-static void __init rx51_init(void)
-{
- struct omap_sdrc_params *sdrc_params;
-
- omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
- omap_serial_init();
-
- sdrc_params = nokia_get_sdram_timings();
- omap_sdrc_init(sdrc_params, sdrc_params);
-
- usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
- usb_musb_init(&musb_board_data);
- rx51_peripherals_init();
-
- if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
-#ifdef CONFIG_ARM_ERRATA_430973
- pr_info("RX-51: Enabling ARM errata 430973 workaround\n");
- /* set IBE to 1 */
- rx51_secure_update_aux_cr(BIT(6), 0);
-#endif
- }
-
- /* Ensure SDRC pins are mux'd for self-refresh */
- omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
- omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
-
- platform_device_register(&leds_gpio);
-}
-
-static void __init rx51_reserve(void)
-{
- omap_reserve();
-}
-
-MACHINE_START(NOKIA_RX51, "Nokia RX-51 board")
- /* Maintainer: Lauri Leukkunen <lauri.leukkunen@nokia.com> */
- .atag_offset = 0x100,
- .reserve = rx51_reserve,
- .map_io = omap3_map_io,
- .init_early = omap3430_init_early,
- .init_irq = omap3_init_irq,
- .init_machine = rx51_init,
- .init_late = omap3430_init_late,
- .init_time = omap_init_time,
- .restart = omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rx51.h b/arch/arm/mach-omap2/board-rx51.h
deleted file mode 100644
index b76f49e7eed5..000000000000
--- a/arch/arm/mach-omap2/board-rx51.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Defines for rx51 boards
- */
-
-#ifndef _OMAP_BOARD_RX51_H
-#define _OMAP_BOARD_RX51_H
-
-extern void __init rx51_peripherals_init(void);
-extern void __init rx51_video_mem_init(void);
-
-#endif
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index d246efd9f734..5388fcd3de72 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -29,8 +29,7 @@
#include "common.h"
#include "common-board-devices.h"
-#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
- defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
+#if IS_ENABLED(CONFIG_TOUCHSCREEN_ADS7846)
static struct omap2_mcspi_device_config ads7846_mcspi_config = {
.turbo_mode = 0,
};
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index d7f1d69daf6d..60a20f3b44de 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -67,7 +67,7 @@ omap_postcore_initcall(omap3_l3_init);
static inline void omap_init_sti(void) {}
-#if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
+#if IS_ENABLED(CONFIG_SPI_OMAP24XX)
#include <linux/platform_data/spi-omap2-mcspi.h>
@@ -163,9 +163,8 @@ static void __init omap_init_aes(void)
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_VIDEO_OMAP2_VOUT) || \
- defined(CONFIG_VIDEO_OMAP2_VOUT_MODULE)
-#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_OMAP2_VOUT)
+#if IS_ENABLED(CONFIG_FB_OMAP2)
static struct resource omap_vout_resource[3 - CONFIG_FB_OMAP2_NUM_FBS] = {
};
#else
diff --git a/arch/arm/mach-omap2/drm.c b/arch/arm/mach-omap2/drm.c
index facd7406a03d..44fef961bb70 100644
--- a/arch/arm/mach-omap2/drm.c
+++ b/arch/arm/mach-omap2/drm.c
@@ -28,7 +28,7 @@
#include "soc.h"
#include "display.h"
-#if defined(CONFIG_DRM_OMAP) || defined(CONFIG_DRM_OMAP_MODULE)
+#if IS_ENABLED(CONFIG_DRM_OMAP)
static struct omap_drm_platform_data platform_data;
diff --git a/arch/arm/mach-omap2/fb.c b/arch/arm/mach-omap2/fb.c
index 1f1ecf8807eb..ecd00b63181e 100644
--- a/arch/arm/mach-omap2/fb.c
+++ b/arch/arm/mach-omap2/fb.c
@@ -90,7 +90,7 @@ int __init omap_init_vrfb(void)
int __init omap_init_vrfb(void) { return 0; }
#endif
-#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+#if IS_ENABLED(CONFIG_FB_OMAP2)
static u64 omap_fb_dma_mask = ~(u32)0;
static struct omapfb_platform_data omapfb_config;
diff --git a/arch/arm/mach-omap2/gpmc-smsc911x.h b/arch/arm/mach-omap2/gpmc-smsc911x.h
index ea6c9c88c725..99a05b8412fa 100644
--- a/arch/arm/mach-omap2/gpmc-smsc911x.h
+++ b/arch/arm/mach-omap2/gpmc-smsc911x.h
@@ -21,7 +21,7 @@ struct omap_smsc911x_platform_data {
u32 flags;
};
-#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
+#if IS_ENABLED(CONFIG_SMSC911X)
extern void gpmc_smsc911x_init(struct omap_smsc911x_platform_data *d);
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index cff079e563f4..478097741bce 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -26,7 +26,7 @@
#include "hsmmc.h"
#include "control.h"
-#if defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
+#if IS_ENABLED(CONFIG_MMC_OMAP_HS)
static u16 control_pbias_offset;
static u16 control_devconf1_offset;
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index 148cd9b15499..69b619ddc765 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -28,7 +28,7 @@ struct omap2_hsmmc_info {
void (*init_card)(struct mmc_card *card);
};
-#if defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
+#if IS_ENABLED(CONFIG_MMC_OMAP_HS)
void omap_hsmmc_init(struct omap2_hsmmc_info *);
void omap_hsmmc_late_init(struct omap2_hsmmc_info *);
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 292eca0e78ed..a72738eab009 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -532,8 +532,7 @@ void __init omap4_pmic_get_config(struct twl4030_platform_data *pmic_data,
}
#endif /* CONFIG_ARCH_OMAP4 */
-#if defined(CONFIG_SND_OMAP_SOC_OMAP_TWL4030) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP_TWL4030_MODULE)
+#if IS_ENABLED(CONFIG_SND_OMAP_SOC_OMAP_TWL4030)
#include <linux/platform_data/omap-twl4030.h>
/* Commonly used configuration */
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 058994e99570..04910764c385 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -105,9 +105,9 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
/*****************************************************************************
* Ethernet switch
****************************************************************************/
-void __init orion5x_eth_switch_init(struct dsa_platform_data *d, int irq)
+void __init orion5x_eth_switch_init(struct dsa_platform_data *d)
{
- orion_ge00_switch_init(d, irq);
+ orion_ge00_switch_init(d);
}
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index cd0389c6e822..8a4115bd441d 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -41,7 +41,7 @@ void orion5x_setup_wins(void);
void orion5x_ehci0_init(void);
void orion5x_ehci1_init(void);
void orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data);
-void orion5x_eth_switch_init(struct dsa_platform_data *d, int irq);
+void orion5x_eth_switch_init(struct dsa_platform_data *d);
void orion5x_i2c_init(void);
void orion5x_sata_init(struct mv_sata_platform_data *sata_data);
void orion5x_spi_init(void);
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index c742e7b40b0d..dccadf68ea2b 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -101,7 +101,7 @@ static struct dsa_chip_data rd88f5181l_fxo_switch_chip_data = {
.port_names[7] = "lan3",
};
-static struct dsa_platform_data rd88f5181l_fxo_switch_plat_data = {
+static struct dsa_platform_data __initdata rd88f5181l_fxo_switch_plat_data = {
.nr_chips = 1,
.chip = &rd88f5181l_fxo_switch_chip_data,
};
@@ -120,7 +120,7 @@ static void __init rd88f5181l_fxo_init(void)
*/
orion5x_ehci0_init();
orion5x_eth_init(&rd88f5181l_fxo_eth_data);
- orion5x_eth_switch_init(&rd88f5181l_fxo_switch_plat_data, NO_IRQ);
+ orion5x_eth_switch_init(&rd88f5181l_fxo_switch_plat_data);
orion5x_uart0_init();
mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index 7e977b794b0c..affe5ec825de 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -102,7 +102,7 @@ static struct dsa_chip_data rd88f5181l_ge_switch_chip_data = {
.port_names[7] = "lan3",
};
-static struct dsa_platform_data rd88f5181l_ge_switch_plat_data = {
+static struct dsa_platform_data __initdata rd88f5181l_ge_switch_plat_data = {
.nr_chips = 1,
.chip = &rd88f5181l_ge_switch_chip_data,
};
@@ -125,8 +125,7 @@ static void __init rd88f5181l_ge_init(void)
*/
orion5x_ehci0_init();
orion5x_eth_init(&rd88f5181l_ge_eth_data);
- orion5x_eth_switch_init(&rd88f5181l_ge_switch_plat_data,
- gpio_to_irq(8));
+ orion5x_eth_switch_init(&rd88f5181l_ge_switch_plat_data);
orion5x_i2c_init();
orion5x_uart0_init();
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index 4bf80dd5478c..67ee8571b03c 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -40,7 +40,7 @@ static struct dsa_chip_data rd88f6183ap_ge_switch_chip_data = {
.port_names[5] = "cpu",
};
-static struct dsa_platform_data rd88f6183ap_ge_switch_plat_data = {
+static struct dsa_platform_data __initdata rd88f6183ap_ge_switch_plat_data = {
.nr_chips = 1,
.chip = &rd88f6183ap_ge_switch_chip_data,
};
@@ -71,7 +71,6 @@ static struct spi_board_info __initdata rd88f6183ap_ge_spi_slave_info[] = {
{
.modalias = "m25p80",
.platform_data = &rd88f6183ap_ge_spi_slave_data,
- .irq = NO_IRQ,
.max_speed_hz = 20000000,
.bus_num = 0,
.chip_select = 0,
@@ -90,8 +89,7 @@ static void __init rd88f6183ap_ge_init(void)
*/
orion5x_ehci0_init();
orion5x_eth_init(&rd88f6183ap_ge_eth_data);
- orion5x_eth_switch_init(&rd88f6183ap_ge_switch_plat_data,
- gpio_to_irq(3));
+ orion5x_eth_switch_init(&rd88f6183ap_ge_switch_plat_data);
spi_register_board_info(rd88f6183ap_ge_spi_slave_info,
ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info));
orion5x_spi_init();
diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c
index 4e1e5c8f6111..4dbcdbe1de7c 100644
--- a/arch/arm/mach-orion5x/wnr854t-setup.c
+++ b/arch/arm/mach-orion5x/wnr854t-setup.c
@@ -106,7 +106,7 @@ static struct dsa_chip_data wnr854t_switch_chip_data = {
.port_names[7] = "lan2",
};
-static struct dsa_platform_data wnr854t_switch_plat_data = {
+static struct dsa_platform_data __initdata wnr854t_switch_plat_data = {
.nr_chips = 1,
.chip = &wnr854t_switch_chip_data,
};
@@ -124,7 +124,7 @@ static void __init wnr854t_init(void)
* Configure peripherals.
*/
orion5x_eth_init(&wnr854t_eth_data);
- orion5x_eth_switch_init(&wnr854t_switch_plat_data, NO_IRQ);
+ orion5x_eth_switch_init(&wnr854t_switch_plat_data);
orion5x_uart0_init();
mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index 61e9027ef224..a6a8c4648d74 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -191,7 +191,7 @@ static struct dsa_chip_data wrt350n_v2_switch_chip_data = {
.port_names[7] = "lan4",
};
-static struct dsa_platform_data wrt350n_v2_switch_plat_data = {
+static struct dsa_platform_data __initdata wrt350n_v2_switch_plat_data = {
.nr_chips = 1,
.chip = &wrt350n_v2_switch_chip_data,
};
@@ -210,7 +210,7 @@ static void __init wrt350n_v2_init(void)
*/
orion5x_ehci0_init();
orion5x_eth_init(&wrt350n_v2_eth_data);
- orion5x_eth_switch_init(&wrt350n_v2_switch_plat_data, NO_IRQ);
+ orion5x_eth_switch_init(&wrt350n_v2_switch_plat_data);
orion5x_uart0_init();
mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index cd894d69e766..76fbc115ec33 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -4,6 +4,17 @@ menu "Intel PXA2xx/PXA3xx Implementations"
comment "Intel/Marvell Dev Platforms (sorted by hardware release time)"
+config MACH_PXA25X_DT
+ bool "Support PXA25x platforms from device tree"
+ select PINCTRL
+ select POWER_SUPPLY
+ select PXA25x
+ select USE_OF
+ help
+ Include support for Marvell PXA25x based platforms using
+ the device tree. Needn't select any other machine while
+ MACH_PXA25x_DT is enabled.
+
config MACH_PXA27X_DT
bool "Support PXA27x platforms from device tree"
select PINCTRL
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index 2ceed407eda9..ef25dc597f30 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -19,8 +19,9 @@ obj-$(CONFIG_CPU_PXA930) += pxa930.o
# NOTE: keep the order of boards in accordance to their order in Kconfig
# Device Tree support
-obj-$(CONFIG_MACH_PXA3XX_DT) += pxa-dt.o
+obj-$(CONFIG_MACH_PXA25X_DT) += pxa-dt.o
obj-$(CONFIG_MACH_PXA27X_DT) += pxa-dt.o
+obj-$(CONFIG_MACH_PXA3XX_DT) += pxa-dt.o
# Intel/Marvell Dev Platforms
obj-$(CONFIG_ARCH_LUBBOCK) += lubbock.o
diff --git a/arch/arm/mach-pxa/corgi_pm.c b/arch/arm/mach-pxa/corgi_pm.c
index d9206811be9b..c71c483f410e 100644
--- a/arch/arm/mach-pxa/corgi_pm.c
+++ b/arch/arm/mach-pxa/corgi_pm.c
@@ -131,16 +131,11 @@ static int corgi_should_wakeup(unsigned int resume_on_alarm)
return is_resume;
}
-static unsigned long corgi_charger_wakeup(void)
+static bool corgi_charger_wakeup(void)
{
- unsigned long ret;
-
- ret = (!gpio_get_value(CORGI_GPIO_AC_IN) << GPIO_bit(CORGI_GPIO_AC_IN))
- | (!gpio_get_value(CORGI_GPIO_KEY_INT)
- << GPIO_bit(CORGI_GPIO_KEY_INT))
- | (!gpio_get_value(CORGI_GPIO_WAKEUP)
- << GPIO_bit(CORGI_GPIO_WAKEUP));
- return ret;
+ return !gpio_get_value(CORGI_GPIO_AC_IN) ||
+ !gpio_get_value(CORGI_GPIO_KEY_INT) ||
+ !gpio_get_value(CORGI_GPIO_WAKEUP);
}
unsigned long corgipm_read_devdata(int type)
diff --git a/arch/arm/mach-pxa/devices.h b/arch/arm/mach-pxa/devices.h
index 4a13c32fb705..04580c407276 100644
--- a/arch/arm/mach-pxa/devices.h
+++ b/arch/arm/mach-pxa/devices.h
@@ -54,3 +54,4 @@ extern struct platform_device pxa3xx_device_gpio;
extern struct platform_device pxa93x_device_gpio;
void __init pxa_register_device(struct platform_device *dev, void *data);
+void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors);
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h
index 0b1dbb54871a..75e3f611e5d8 100644
--- a/arch/arm/mach-pxa/generic.h
+++ b/arch/arm/mach-pxa/generic.h
@@ -33,14 +33,12 @@ extern void __init pxa26x_init_irq(void);
#define pxa27x_handle_irq ichp_handle_irq
extern int __init pxa27x_clocks_init(void);
-extern void __init pxa27x_dt_init_irq(void);
extern unsigned pxa27x_get_clk_frequency_khz(int);
extern void __init pxa27x_init_irq(void);
extern void __init pxa27x_map_io(void);
#define pxa3xx_handle_irq ichp_handle_irq
extern int __init pxa3xx_clocks_init(void);
-extern void __init pxa3xx_dt_init_irq(void);
extern void __init pxa3xx_init_irq(void);
extern void __init pxa3xx_map_io(void);
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 4a2f9aba93ea..66184f5cbe40 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -24,10 +24,10 @@
#include <linux/input.h>
#include <linux/input/navpoint.h>
#include <linux/lcd.h>
-#include <linux/mfd/htc-egpio.h>
#include <linux/mfd/asic3.h>
#include <linux/mtd/physmap.h>
#include <linux/pda_power.h>
+#include <linux/platform_data/gpio-htc-egpio.h>
#include <linux/pwm.h>
#include <linux/pwm_backlight.h>
#include <linux/regulator/driver.h>
diff --git a/arch/arm/mach-pxa/include/mach/dma.h b/arch/arm/mach-pxa/include/mach/dma.h
index 5bd55894a48d..20026bdc6b24 100644
--- a/arch/arm/mach-pxa/include/mach/dma.h
+++ b/arch/arm/mach-pxa/include/mach/dma.h
@@ -17,5 +17,4 @@
/* DMA Controller Registers Definitions */
#define DMAC_REGS_VIRT io_p2v(0x40000000)
-#include <plat/dma.h>
#endif /* _ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index abc918169367..b413e36506af 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -20,10 +20,10 @@
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
-#include <linux/mfd/htc-egpio.h>
#include <linux/mfd/htc-pasic3.h>
#include <linux/mtd/physmap.h>
#include <linux/pda_power.h>
+#include <linux/platform_data/gpio-htc-egpio.h>
#include <linux/pwm.h>
#include <linux/pwm_backlight.h>
#include <linux/regulator/driver.h>
@@ -121,10 +121,6 @@ static unsigned long magician_pin_config[] __initdata = {
GPIO107_GPIO, /* DS1WM_IRQ */
GPIO108_GPIO, /* GSM_READY */
GPIO115_GPIO, /* nPEN_IRQ */
-
- /* I2C */
- GPIO117_I2C_SCL,
- GPIO118_I2C_SDA,
};
/*
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 388463b99090..e7450fb49d24 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -104,8 +104,9 @@ static int __init pxa_pm_init(void)
return -EINVAL;
}
- sleep_save = kmalloc(pxa_cpu_pm_fns->save_count * sizeof(unsigned long),
- GFP_KERNEL);
+ sleep_save = kmalloc_array(pxa_cpu_pm_fns->save_count,
+ sizeof(*sleep_save),
+ GFP_KERNEL);
if (!sleep_save) {
printk(KERN_ERR "failed to alloc memory for pm save\n");
return -ENOMEM;
diff --git a/arch/arm/mach-pxa/pxa-dt.c b/arch/arm/mach-pxa/pxa-dt.c
index f128133a8f30..aa9b255f5570 100644
--- a/arch/arm/mach-pxa/pxa-dt.c
+++ b/arch/arm/mach-pxa/pxa-dt.c
@@ -18,20 +18,16 @@
#include "generic.h"
-#ifdef CONFIG_PXA3xx
-static const char *const pxa3xx_dt_board_compat[] __initconst = {
- "marvell,pxa300",
- "marvell,pxa310",
- "marvell,pxa320",
+#ifdef CONFIG_PXA25x
+static const char * const pxa25x_dt_board_compat[] __initconst = {
+ "marvell,pxa250",
NULL,
};
-DT_MACHINE_START(PXA_DT, "Marvell PXA3xx (Device Tree Support)")
- .map_io = pxa3xx_map_io,
- .init_irq = pxa3xx_dt_init_irq,
- .handle_irq = pxa3xx_handle_irq,
+DT_MACHINE_START(PXA25X_DT, "Marvell PXA25x (Device Tree Support)")
+ .map_io = pxa25x_map_io,
.restart = pxa_restart,
- .dt_compat = pxa3xx_dt_board_compat,
+ .dt_compat = pxa25x_dt_board_compat,
MACHINE_END
#endif
@@ -41,11 +37,24 @@ static const char * const pxa27x_dt_board_compat[] __initconst = {
NULL,
};
-DT_MACHINE_START(PXA27X_DT, "Marvell PXA2xx (Device Tree Support)")
+DT_MACHINE_START(PXA27X_DT, "Marvell PXA27x (Device Tree Support)")
.map_io = pxa27x_map_io,
- .init_irq = pxa27x_dt_init_irq,
- .handle_irq = pxa27x_handle_irq,
.restart = pxa_restart,
.dt_compat = pxa27x_dt_board_compat,
MACHINE_END
#endif
+
+#ifdef CONFIG_PXA3xx
+static const char *const pxa3xx_dt_board_compat[] __initconst = {
+ "marvell,pxa300",
+ "marvell,pxa310",
+ "marvell,pxa320",
+ NULL,
+};
+
+DT_MACHINE_START(PXA_DT, "Marvell PXA3xx (Device Tree Support)")
+ .map_io = pxa3xx_map_io,
+ .restart = pxa_restart,
+ .dt_compat = pxa3xx_dt_board_compat,
+MACHINE_END
+#endif
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 823504f48f80..12b94357fbc1 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -25,6 +25,7 @@
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <asm/mach/map.h>
#include <asm/suspend.h>
@@ -151,6 +152,16 @@ void __init pxa26x_init_irq(void)
}
#endif
+static int __init __init
+pxa25x_dt_init_irq(struct device_node *node, struct device_node *parent)
+{
+ pxa_dt_irq_init(pxa25x_set_wake);
+ set_handle_irq(ichp_handle_irq);
+
+ return 0;
+}
+IRQCHIP_DECLARE(pxa25x_intc, "marvell,pxa-intc", pxa25x_dt_init_irq);
+
static struct map_desc pxa25x_io_desc[] __initdata = {
{ /* Mem Ctl */
.virtual = (unsigned long)SMEMC_VIRT,
@@ -198,20 +209,17 @@ static int __init pxa25x_init(void)
reset_status = RCSR;
- if ((ret = pxa_init_dma(IRQ_DMA, 16)))
- return ret;
-
pxa25x_init_pm();
register_syscore_ops(&pxa_irq_syscore_ops);
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
- pxa2xx_set_dmac_info(16, 40);
- pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info);
- ret = platform_add_devices(pxa25x_devices,
- ARRAY_SIZE(pxa25x_devices));
- if (ret)
- return ret;
+ if (!of_have_populated_dt()) {
+ pxa2xx_set_dmac_info(16, 40);
+ pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info);
+ ret = platform_add_devices(pxa25x_devices,
+ ARRAY_SIZE(pxa25x_devices));
+ }
}
return ret;
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 2eaa341dd3f8..c0185c5c5a08 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/irqchip.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
@@ -233,11 +234,15 @@ void __init pxa27x_init_irq(void)
pxa_init_irq(34, pxa27x_set_wake);
}
-void __init pxa27x_dt_init_irq(void)
+static int __init
+pxa27x_dt_init_irq(struct device_node *node, struct device_node *parent)
{
- if (IS_ENABLED(CONFIG_OF))
- pxa_dt_irq_init(pxa27x_set_wake);
+ pxa_dt_irq_init(pxa27x_set_wake);
+ set_handle_irq(ichp_handle_irq);
+
+ return 0;
}
+IRQCHIP_DECLARE(pxa27x_intc, "marvell,pxa-intc", pxa27x_dt_init_irq);
static struct map_desc pxa27x_io_desc[] __initdata = {
{ /* Mem Ctl */
@@ -300,9 +305,6 @@ static int __init pxa27x_init(void)
reset_status = RCSR;
- if ((ret = pxa_init_dma(IRQ_DMA, 32)))
- return ret;
-
pxa27x_init_pm();
register_syscore_ops(&pxa_irq_syscore_ops);
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 3c9184d1d6b9..87acc96388c7 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -19,6 +19,7 @@
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/syscore_ops.h>
@@ -356,11 +357,16 @@ void __init pxa3xx_init_irq(void)
}
#ifdef CONFIG_OF
-void __init pxa3xx_dt_init_irq(void)
+static int __init __init
+pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent)
{
__pxa3xx_init_irq();
pxa_dt_irq_init(pxa3xx_set_wake);
+ set_handle_irq(ichp_handle_irq);
+
+ return 0;
}
+IRQCHIP_DECLARE(pxa3xx_intc, "marvell,pxa-intc", pxa3xx_dt_init_irq);
#endif /* CONFIG_OF */
static struct map_desc pxa3xx_io_desc[] __initdata = {
@@ -438,9 +444,6 @@ static int __init pxa3xx_init(void)
*/
NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
- if ((ret = pxa_init_dma(IRQ_DMA, 32)))
- return ret;
-
pxa3xx_init_pm();
register_syscore_ops(&pxa_irq_syscore_ops);
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
index 2385052b0ce1..e362f865fcd2 100644
--- a/arch/arm/mach-pxa/pxa_cplds_irqs.c
+++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
@@ -41,30 +41,35 @@ static irqreturn_t cplds_irq_handler(int in_irq, void *d)
unsigned long pending;
unsigned int bit;
- pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
- for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
- generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit));
+ do {
+ pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
+ for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) {
+ generic_handle_irq(irq_find_mapping(fpga->irqdomain,
+ bit));
+ }
+ } while (pending);
return IRQ_HANDLED;
}
-static void cplds_irq_mask_ack(struct irq_data *d)
+static void cplds_irq_mask(struct irq_data *d)
{
struct cplds *fpga = irq_data_get_irq_chip_data(d);
unsigned int cplds_irq = irqd_to_hwirq(d);
- unsigned int set, bit = BIT(cplds_irq);
+ unsigned int bit = BIT(cplds_irq);
fpga->irq_mask &= ~bit;
writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
- set = readl(fpga->base + FPGA_IRQ_SET_CLR);
- writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
}
static void cplds_irq_unmask(struct irq_data *d)
{
struct cplds *fpga = irq_data_get_irq_chip_data(d);
unsigned int cplds_irq = irqd_to_hwirq(d);
- unsigned int bit = BIT(cplds_irq);
+ unsigned int set, bit = BIT(cplds_irq);
+
+ set = readl(fpga->base + FPGA_IRQ_SET_CLR);
+ writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
fpga->irq_mask |= bit;
writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
@@ -72,7 +77,8 @@ static void cplds_irq_unmask(struct irq_data *d)
static struct irq_chip cplds_irq_chip = {
.name = "pxa_cplds",
- .irq_mask_ack = cplds_irq_mask_ack,
+ .irq_ack = cplds_irq_mask,
+ .irq_mask = cplds_irq_mask,
.irq_unmask = cplds_irq_unmask,
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
};
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index b80eab9993c5..249b7bd5fbc4 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -744,7 +744,7 @@ static int sharpsl_off_charge_battery(void)
time = RCNR;
while (1) {
/* Check if any wakeup event had occurred */
- if (sharpsl_pm.machinfo->charger_wakeup() != 0)
+ if (sharpsl_pm.machinfo->charger_wakeup())
return 0;
/* Check for timeout */
if ((RCNR - time) > SHARPSL_WAIT_CO_TIME)
diff --git a/arch/arm/mach-pxa/sharpsl_pm.h b/arch/arm/mach-pxa/sharpsl_pm.h
index 905be6755f04..fa75b6df8134 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.h
+++ b/arch/arm/mach-pxa/sharpsl_pm.h
@@ -34,7 +34,7 @@ struct sharpsl_charger_machinfo {
#define SHARPSL_STATUS_LOCK 5
#define SHARPSL_STATUS_CHRGFULL 6
#define SHARPSL_STATUS_FATAL 7
- unsigned long (*charger_wakeup)(void);
+ bool (*charger_wakeup)(void);
int (*should_wakeup)(unsigned int resume_on_alarm);
void (*backlight_limit)(int);
int (*backlight_get_status) (void);
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c
index ea9f9034cb54..4e64a140252e 100644
--- a/arch/arm/mach-pxa/spitz_pm.c
+++ b/arch/arm/mach-pxa/spitz_pm.c
@@ -165,13 +165,10 @@ static int spitz_should_wakeup(unsigned int resume_on_alarm)
return is_resume;
}
-static unsigned long spitz_charger_wakeup(void)
+static bool spitz_charger_wakeup(void)
{
- unsigned long ret;
- ret = ((!gpio_get_value(SPITZ_GPIO_KEY_INT)
- << GPIO_bit(SPITZ_GPIO_KEY_INT))
- | gpio_get_value(SPITZ_GPIO_SYNC));
- return ret;
+ return !gpio_get_value(SPITZ_GPIO_KEY_INT) ||
+ gpio_get_value(SPITZ_GPIO_SYNC);
}
unsigned long spitzpm_read_devdata(int type)
diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile
index e324375fa919..12878e9a2c0c 100644
--- a/arch/arm/mach-qcom/Makefile
+++ b/arch/arm/mach-qcom/Makefile
@@ -1,2 +1 @@
-obj-y := board.o
obj-$(CONFIG_SMP) += platsmp.o
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index 70ab4a25a5f8..1d7c83e73ffb 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -2,34 +2,29 @@ menuconfig ARCH_REALVIEW
bool "ARM Ltd. RealView family"
depends on ARCH_MULTI_V5 || ARCH_MULTI_V6 || ARCH_MULTI_V7
select ARM_AMBA
+ select ARM_GIC
select ARM_TIMER_SP804
+ select CLK_SP810
select COMMON_CLK_VERSATILE
select GPIO_PL061 if GPIOLIB
- select ICST
- select PLAT_VERSATILE
- select PLAT_VERSATILE_SCHED_CLOCK
- help
- This enables support for ARM Ltd RealView boards.
-
-if ARCH_REALVIEW
-
-config REALVIEW_DT
- bool "Support RealView(R) Device Tree based boot"
- select ARM_GIC
- select CLK_SP810
- select HAVE_SMP
+ select HAVE_ARM_SCU if SMP
+ select HAVE_ARM_TWD if SMP
+ select HAVE_PATA_PLATFORM
+ select HAVE_TCM
select ICST
select MACH_REALVIEW_EB if ARCH_MULTI_V5
select MFD_SYSCON
+ select PLAT_VERSATILE
+ select PLAT_VERSATILE_SCHED_CLOCK
select POWER_RESET
select POWER_RESET_VERSATILE
select POWER_SUPPLY
- select SMP_ON_UP if SMP
select SOC_REALVIEW
select USE_OF
help
- Include support for booting the ARM(R) RealView(R) evaluation
- boards using a device tree machine description.
+ This enables support for ARM Ltd RealView boards.
+
+if ARCH_REALVIEW
config MACH_REALVIEW_EB
bool "Support RealView(R) Emulation Baseboard"
@@ -60,8 +55,6 @@ config REALVIEW_EB_ARM1176
config REALVIEW_EB_A9MP
bool "Support Multicore Cortex-A9 Tile"
depends on MACH_REALVIEW_EB && ARCH_MULTI_V7
- select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if SMP
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
help
@@ -71,30 +64,15 @@ config REALVIEW_EB_A9MP
config REALVIEW_EB_ARM11MP
bool "Support ARM11MPCore Tile"
depends on MACH_REALVIEW_EB && ARCH_MULTI_V6
- select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if SMP
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
help
Enable support for the ARM11MPCore tile fitted to the Realview(R)
Emulation Baseboard platform.
-config REALVIEW_EB_ARM11MP_REVB
- bool "Support ARM11MPCore RevB Tile"
- depends on REALVIEW_EB_ARM11MP && ARCH_MULTI_V6
- help
- Enable support for the ARM11MPCore Revision B tile on the
- Realview(R) Emulation Baseboard platform. Since there are device
- address differences, a kernel built with this option enabled is
- not compatible with other revisions of the ARM11MPCore tile.
-
config MACH_REALVIEW_PB11MP
bool "Support RealView(R) Platform Baseboard for ARM11MPCore"
depends on ARCH_MULTI_V6
- select ARM_GIC
- select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if SMP
- select HAVE_PATA_PLATFORM
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
help
@@ -106,7 +84,6 @@ config MACH_REALVIEW_PB11MP
config MACH_REALVIEW_PB1176
bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S"
depends on ARCH_MULTI_V6
- select ARM_GIC
select CPU_V6
select HAVE_TCM
select MIGHT_HAVE_CACHE_L2X0
@@ -114,20 +91,9 @@ config MACH_REALVIEW_PB1176
Include support for the ARM(R) RealView(R) Platform Baseboard for
ARM1176JZF-S.
-config REALVIEW_PB1176_SECURE_FLASH
- bool "Allow access to the secure flash memory block"
- depends on MACH_REALVIEW_PB1176
- default n
- help
- Select this option if Linux will only run in secure mode on the
- RealView PB1176 platform and access to the secure flash memory
- block (64MB @ 0x3c000000) is required.
-
config MACH_REALVIEW_PBA8
bool "Support RealView(R) Platform Baseboard for Cortex(tm)-A8 platform"
depends on ARCH_MULTI_V7
- select ARM_GIC
- select HAVE_PATA_PLATFORM
help
Include support for the ARM(R) RealView Platform Baseboard for
Cortex(tm)-A8. This platform has an on-board Cortex-A8 and has
@@ -136,10 +102,6 @@ config MACH_REALVIEW_PBA8
config MACH_REALVIEW_PBX
bool "Support RealView(R) Platform Baseboard Explore for Cortex-A9"
depends on ARCH_MULTI_V7
- select ARM_GIC
- select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if SMP
- select HAVE_PATA_PLATFORM
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
select ZONE_DMA
@@ -147,16 +109,4 @@ config MACH_REALVIEW_PBX
Include support for the ARM(R) RealView(R) Platform Baseboard
Explore.
-config REALVIEW_HIGH_PHYS_OFFSET
- bool "High physical base address for the RealView platform"
- depends on MMU && !MACH_REALVIEW_PB1176
- default y
- help
- RealView boards other than PB1176 have the RAM available at
- 0x70000000, 256MB of which being mirrored at 0x00000000. If
- the board supports 512MB of RAM, this option allows the
- memory to be accessed contiguously at the high physical
- offset. On the PBX board, disabling this option allows 1GB of
- RAM to be used with HIGHMEM.
-
endif
diff --git a/arch/arm/mach-realview/Makefile b/arch/arm/mach-realview/Makefile
index 404882130956..adf39ad71cc3 100644
--- a/arch/arm/mach-realview/Makefile
+++ b/arch/arm/mach-realview/Makefile
@@ -3,16 +3,6 @@
#
ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/include
-obj-y := core.o
-obj-$(CONFIG_REALVIEW_DT) += realview-dt.o
+obj-y += realview-dt.o
obj-$(CONFIG_SMP) += platsmp-dt.o
-
-ifdef CONFIG_ATAGS
-obj-$(CONFIG_MACH_REALVIEW_EB) += realview_eb.o
-obj-$(CONFIG_MACH_REALVIEW_PB11MP) += realview_pb11mp.o
-obj-$(CONFIG_MACH_REALVIEW_PB1176) += realview_pb1176.o
-obj-$(CONFIG_MACH_REALVIEW_PBA8) += realview_pba8.o
-obj-$(CONFIG_MACH_REALVIEW_PBX) += realview_pbx.o
-obj-$(CONFIG_SMP) += platsmp.o
-endif
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-realview/board-eb.h b/arch/arm/mach-realview/board-eb.h
deleted file mode 100644
index a850ae6945b0..000000000000
--- a/arch/arm/mach-realview/board-eb.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (C) 2007 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __ASM_ARCH_BOARD_EB_H
-#define __ASM_ARCH_BOARD_EB_H
-
-#include "platform.h"
-
-/*
- * RealView EB + ARM11MPCore peripheral addresses
- */
-#define REALVIEW_EB_UART0_BASE 0x10009000 /* UART 0 */
-#define REALVIEW_EB_UART1_BASE 0x1000A000 /* UART 1 */
-#define REALVIEW_EB_UART2_BASE 0x1000B000 /* UART 2 */
-#define REALVIEW_EB_UART3_BASE 0x1000C000 /* UART 3 */
-#define REALVIEW_EB_SSP_BASE 0x1000D000 /* Synchronous Serial Port */
-#define REALVIEW_EB_WATCHDOG_BASE 0x10010000 /* watchdog interface */
-#define REALVIEW_EB_TIMER0_1_BASE 0x10011000 /* Timer 0 and 1 */
-#define REALVIEW_EB_TIMER2_3_BASE 0x10012000 /* Timer 2 and 3 */
-#define REALVIEW_EB_GPIO0_BASE 0x10013000 /* GPIO port 0 */
-#define REALVIEW_EB_RTC_BASE 0x10017000 /* Real Time Clock */
-#define REALVIEW_EB_CLCD_BASE 0x10020000 /* CLCD */
-#define REALVIEW_EB_GIC_CPU_BASE 0x10040000 /* Generic interrupt controller CPU interface */
-#define REALVIEW_EB_GIC_DIST_BASE 0x10041000 /* Generic interrupt controller distributor */
-#define REALVIEW_EB_SMC_BASE 0x10080000 /* Static memory controller */
-
-#define REALVIEW_EB_FLASH_BASE 0x40000000
-#define REALVIEW_EB_FLASH_SIZE SZ_64M
-#define REALVIEW_EB_ETH_BASE 0x4E000000 /* Ethernet */
-#define REALVIEW_EB_USB_BASE 0x4F000000 /* USB */
-
-#ifdef CONFIG_REALVIEW_EB_ARM11MP_REVB
-#define REALVIEW_EB11MP_PRIV_MEM_BASE 0x10100000
-#define REALVIEW_EB11MP_L220_BASE 0x10102000 /* L220 registers */
-#define REALVIEW_EB11MP_SYS_PLD_CTRL1 0xD8 /* Register offset for MPCore sysctl */
-#else
-#define REALVIEW_EB11MP_PRIV_MEM_BASE 0x1F000000
-#define REALVIEW_EB11MP_L220_BASE 0x1F002000 /* L220 registers */
-#define REALVIEW_EB11MP_SYS_PLD_CTRL1 0x74 /* Register offset for MPCore sysctl */
-#endif
-
-#define REALVIEW_EB11MP_PRIV_MEM_SIZE SZ_8K
-#define REALVIEW_EB11MP_PRIV_MEM_OFF(x) (REALVIEW_EB11MP_PRIV_MEM_BASE + (x))
-
-#define REALVIEW_EB11MP_SCU_BASE REALVIEW_EB11MP_PRIV_MEM_OFF(0) /* SCU registers */
-#define REALVIEW_EB11MP_GIC_CPU_BASE REALVIEW_EB11MP_PRIV_MEM_OFF(0x0100) /* Generic interrupt controller CPU interface */
-#define REALVIEW_EB11MP_TWD_BASE REALVIEW_EB11MP_PRIV_MEM_OFF(0x0600)
-#define REALVIEW_EB11MP_GIC_DIST_BASE REALVIEW_EB11MP_PRIV_MEM_OFF(0x1000) /* Generic interrupt controller distributor */
-
-/*
- * Core tile identification (REALVIEW_SYS_PROCID)
- */
-#define REALVIEW_EB_PROC_MASK 0xFF000000
-#define REALVIEW_EB_PROC_ARM7TDMI 0x00000000
-#define REALVIEW_EB_PROC_ARM9 0x02000000
-#define REALVIEW_EB_PROC_ARM11 0x04000000
-#define REALVIEW_EB_PROC_ARM11MP 0x06000000
-#define REALVIEW_EB_PROC_A9MP 0x0C000000
-
-#define check_eb_proc(proc_type) \
- ((readl(__io_address(REALVIEW_SYS_PROCID)) & REALVIEW_EB_PROC_MASK) \
- == proc_type)
-
-#ifdef CONFIG_REALVIEW_EB_ARM11MP
-#define core_tile_eb11mp() check_eb_proc(REALVIEW_EB_PROC_ARM11MP)
-#else
-#define core_tile_eb11mp() 0
-#endif
-
-#ifdef CONFIG_REALVIEW_EB_A9MP
-#define core_tile_a9mp() check_eb_proc(REALVIEW_EB_PROC_A9MP)
-#else
-#define core_tile_a9mp() 0
-#endif
-
-#define machine_is_realview_eb_mp() \
- (machine_is_realview_eb() && (core_tile_eb11mp() || core_tile_a9mp()))
-
-#endif /* __ASM_ARCH_BOARD_EB_H */
diff --git a/arch/arm/mach-realview/board-pb1176.h b/arch/arm/mach-realview/board-pb1176.h
deleted file mode 100644
index 29c04a9e1344..000000000000
--- a/arch/arm/mach-realview/board-pb1176.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2008 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __ASM_ARCH_BOARD_PB1176_H
-#define __ASM_ARCH_BOARD_PB1176_H
-
-#include "platform.h"
-
-/*
- * Peripheral addresses
- */
-#define REALVIEW_PB1176_UART4_BASE 0x10009000 /* UART 4 */
-#define REALVIEW_PB1176_SCTL_BASE 0x10100000 /* System controller */
-#define REALVIEW_PB1176_SMC_BASE 0x10111000 /* SMC */
-#define REALVIEW_PB1176_DMC_BASE 0x10109000 /* DMC configuration */
-#define REALVIEW_PB1176_SDRAM67_BASE 0x70000000 /* SDRAM banks 6 and 7 */
-#define REALVIEW_PB1176_FLASH_BASE 0x30000000
-#define REALVIEW_PB1176_FLASH_SIZE SZ_64M
-#define REALVIEW_PB1176_SEC_FLASH_BASE 0x3C000000 /* Secure flash */
-#define REALVIEW_PB1176_SEC_FLASH_SIZE SZ_64M
-
-#define REALVIEW_PB1176_TIMER0_1_BASE 0x10104000 /* Timer 0 and 1 */
-#define REALVIEW_PB1176_TIMER2_3_BASE 0x10105000 /* Timer 2 and 3 */
-#define REALVIEW_PB1176_TIMER4_5_BASE 0x10106000 /* Timer 4 and 5 */
-#define REALVIEW_PB1176_WATCHDOG_BASE 0x10107000 /* watchdog interface */
-#define REALVIEW_PB1176_RTC_BASE 0x10108000 /* Real Time Clock */
-#define REALVIEW_PB1176_GPIO0_BASE 0x1010A000 /* GPIO port 0 */
-#define REALVIEW_PB1176_SSP_BASE 0x1010B000 /* Synchronous Serial Port */
-#define REALVIEW_PB1176_UART0_BASE 0x1010C000 /* UART 0 */
-#define REALVIEW_PB1176_UART1_BASE 0x1010D000 /* UART 1 */
-#define REALVIEW_PB1176_UART2_BASE 0x1010E000 /* UART 2 */
-#define REALVIEW_PB1176_UART3_BASE 0x1010F000 /* UART 3 */
-#define REALVIEW_PB1176_CLCD_BASE 0x10112000 /* CLCD */
-#define REALVIEW_PB1176_ETH_BASE 0x3A000000 /* Ethernet */
-#define REALVIEW_PB1176_USB_BASE 0x3B000000 /* USB */
-
-/*
- * PCI regions
- */
-#define REALVIEW_PB1176_PCI_BASE 0x60000000 /* PCI self config */
-#define REALVIEW_PB1176_PCI_CFG_BASE 0x61000000 /* PCI config */
-#define REALVIEW_PB1176_PCI_IO_BASE0 0x62000000 /* PCI IO region */
-#define REALVIEW_PB1176_PCI_MEM_BASE0 0x63000000 /* Memory region 1 */
-#define REALVIEW_PB1176_PCI_MEM_BASE1 0x64000000 /* Memory region 2 */
-#define REALVIEW_PB1176_PCI_MEM_BASE2 0x68000000 /* Memory region 3 */
-
-#define REALVIEW_PB1176_PCI_BASE_SIZE 0x01000000 /* 16MB */
-#define REALVIEW_PB1176_PCI_CFG_BASE_SIZE 0x01000000 /* 16MB */
-#define REALVIEW_PB1176_PCI_IO_BASE0_SIZE 0x01000000 /* 16MB */
-#define REALVIEW_PB1176_PCI_MEM_BASE0_SIZE 0x01000000 /* 16MB */
-#define REALVIEW_PB1176_PCI_MEM_BASE1_SIZE 0x04000000 /* 64MB */
-#define REALVIEW_PB1176_PCI_MEM_BASE2_SIZE 0x08000000 /* 128MB */
-
-#define REALVIEW_DC1176_GIC_CPU_BASE 0x10120000 /* GIC CPU interface, on devchip */
-#define REALVIEW_DC1176_GIC_DIST_BASE 0x10121000 /* GIC distributor, on devchip */
-#define REALVIEW_DC1176_ROM_BASE 0x10200000 /* 16KiB NRAM preudo-ROM, on devchip */
-#define REALVIEW_PB1176_GIC_CPU_BASE 0x10040000 /* GIC CPU interface, on FPGA */
-#define REALVIEW_PB1176_GIC_DIST_BASE 0x10041000 /* GIC distributor, on FPGA */
-#define REALVIEW_PB1176_L220_BASE 0x10110000 /* L220 registers */
-
-/*
- * Control register SYS_RESETCTL Bit 8 is set to 1 to force a soft reset
- */
-#define REALVIEW_PB1176_SYS_SOFT_RESET 0x0100
-
-#endif /* __ASM_ARCH_BOARD_PB1176_H */
diff --git a/arch/arm/mach-realview/board-pb11mp.h b/arch/arm/mach-realview/board-pb11mp.h
deleted file mode 100644
index b16e6e85e92d..000000000000
--- a/arch/arm/mach-realview/board-pb11mp.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2008 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __ASM_ARCH_BOARD_PB11MP_H
-#define __ASM_ARCH_BOARD_PB11MP_H
-
-#include "platform.h"
-
-/*
- * Peripheral addresses
- */
-#define REALVIEW_PB11MP_UART0_BASE 0x10009000 /* UART 0 */
-#define REALVIEW_PB11MP_UART1_BASE 0x1000A000 /* UART 1 */
-#define REALVIEW_PB11MP_UART2_BASE 0x1000B000 /* UART 2 */
-#define REALVIEW_PB11MP_UART3_BASE 0x1000C000 /* UART 3 */
-#define REALVIEW_PB11MP_SSP_BASE 0x1000D000 /* Synchronous Serial Port */
-#define REALVIEW_PB11MP_WATCHDOG0_BASE 0x1000F000 /* Watchdog 0 */
-#define REALVIEW_PB11MP_WATCHDOG_BASE 0x10010000 /* watchdog interface */
-#define REALVIEW_PB11MP_TIMER0_1_BASE 0x10011000 /* Timer 0 and 1 */
-#define REALVIEW_PB11MP_TIMER2_3_BASE 0x10012000 /* Timer 2 and 3 */
-#define REALVIEW_PB11MP_GPIO0_BASE 0x10013000 /* GPIO port 0 */
-#define REALVIEW_PB11MP_RTC_BASE 0x10017000 /* Real Time Clock */
-#define REALVIEW_PB11MP_TIMER4_5_BASE 0x10018000 /* Timer 4/5 */
-#define REALVIEW_PB11MP_TIMER6_7_BASE 0x10019000 /* Timer 6/7 */
-#define REALVIEW_PB11MP_SCTL_BASE 0x1001A000 /* System Controller */
-#define REALVIEW_PB11MP_CLCD_BASE 0x10020000 /* CLCD */
-#define REALVIEW_PB11MP_ONB_SRAM_BASE 0x10060000 /* On-board SRAM */
-#define REALVIEW_PB11MP_DMC_BASE 0x100E0000 /* DMC configuration */
-#define REALVIEW_PB11MP_SMC_BASE 0x100E1000 /* SMC configuration */
-#define REALVIEW_PB11MP_CAN_BASE 0x100E2000 /* CAN bus */
-#define REALVIEW_PB11MP_CF_BASE 0x18000000 /* Compact flash */
-#define REALVIEW_PB11MP_CF_MEM_BASE 0x18003000 /* SMC for Compact flash */
-#define REALVIEW_PB11MP_GIC_CPU_BASE 0x1E000000 /* Generic interrupt controller CPU interface */
-#define REALVIEW_PB11MP_FLASH0_BASE 0x40000000
-#define REALVIEW_PB11MP_FLASH0_SIZE SZ_64M
-#define REALVIEW_PB11MP_FLASH1_BASE 0x44000000
-#define REALVIEW_PB11MP_FLASH1_SIZE SZ_64M
-#define REALVIEW_PB11MP_ETH_BASE 0x4E000000 /* Ethernet */
-#define REALVIEW_PB11MP_USB_BASE 0x4F000000 /* USB */
-#define REALVIEW_PB11MP_GIC_DIST_BASE 0x1E001000 /* Generic interrupt controller distributor */
-#define REALVIEW_PB11MP_LT_BASE 0xC0000000 /* Logic Tile expansion */
-#define REALVIEW_PB11MP_SDRAM6_BASE 0x70000000 /* SDRAM bank 6 256MB */
-#define REALVIEW_PB11MP_SDRAM7_BASE 0x80000000 /* SDRAM bank 7 256MB */
-
-#define REALVIEW_PB11MP_SYS_PLD_CTRL1 0x74
-
-/*
- * PB11MPCore PCI regions
- */
-#define REALVIEW_PB11MP_PCI_BASE 0x90040000 /* PCI-X Unit base */
-#define REALVIEW_PB11MP_PCI_IO_BASE 0x90050000 /* IO Region on AHB */
-#define REALVIEW_PB11MP_PCI_MEM_BASE 0xA0000000 /* MEM Region on AHB */
-
-#define REALVIEW_PB11MP_PCI_BASE_SIZE 0x10000 /* 16 Kb */
-#define REALVIEW_PB11MP_PCI_IO_SIZE 0x1000 /* 4 Kb */
-#define REALVIEW_PB11MP_PCI_MEM_SIZE 0x20000000 /* 512 MB */
-
-/*
- * Testchip peripheral and fpga gic regions
- */
-#define REALVIEW_TC11MP_PRIV_MEM_BASE 0x1F000000
-#define REALVIEW_TC11MP_PRIV_MEM_SIZE SZ_8K
-#define REALVIEW_TC11MP_SCU_BASE 0x1F000000 /* IRQ, Test chip */
-#define REALVIEW_TC11MP_GIC_CPU_BASE 0x1F000100 /* Test chip interrupt controller CPU interface */
-#define REALVIEW_TC11MP_TWD_BASE 0x1F000600
-#define REALVIEW_TC11MP_GIC_DIST_BASE 0x1F001000 /* Test chip interrupt controller distributor */
-#define REALVIEW_TC11MP_L220_BASE 0x1F002000 /* L220 registers */
-
- /*
- * Values for REALVIEW_SYS_RESET_CTRL
- */
-#define REALVIEW_PB11MP_SYS_CTRL_RESET_CONFIGCLR 0x01
-#define REALVIEW_PB11MP_SYS_CTRL_RESET_CONFIGINIT 0x02
-#define REALVIEW_PB11MP_SYS_CTRL_RESET_DLLRESET 0x03
-#define REALVIEW_PB11MP_SYS_CTRL_RESET_PLLRESET 0x04
-#define REALVIEW_PB11MP_SYS_CTRL_RESET_POR 0x05
-#define REALVIEW_PB11MP_SYS_CTRL_RESET_DoC 0x06
-
-#define REALVIEW_PB11MP_SYS_CTRL_LED (1 << 0)
-
-#endif /* __ASM_ARCH_BOARD_PB11MP_H */
diff --git a/arch/arm/mach-realview/board-pba8.h b/arch/arm/mach-realview/board-pba8.h
deleted file mode 100644
index 6a1391f50373..000000000000
--- a/arch/arm/mach-realview/board-pba8.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2008 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __ASM_ARCH_BOARD_PBA8_H
-#define __ASM_ARCH_BOARD_PBA8_H
-
-#include "platform.h"
-
-/*
- * Peripheral addresses
- */
-#define REALVIEW_PBA8_UART0_BASE 0x10009000 /* UART 0 */
-#define REALVIEW_PBA8_UART1_BASE 0x1000A000 /* UART 1 */
-#define REALVIEW_PBA8_UART2_BASE 0x1000B000 /* UART 2 */
-#define REALVIEW_PBA8_UART3_BASE 0x1000C000 /* UART 3 */
-#define REALVIEW_PBA8_SSP_BASE 0x1000D000 /* Synchronous Serial Port */
-#define REALVIEW_PBA8_WATCHDOG0_BASE 0x1000F000 /* Watchdog 0 */
-#define REALVIEW_PBA8_WATCHDOG_BASE 0x10010000 /* watchdog interface */
-#define REALVIEW_PBA8_TIMER0_1_BASE 0x10011000 /* Timer 0 and 1 */
-#define REALVIEW_PBA8_TIMER2_3_BASE 0x10012000 /* Timer 2 and 3 */
-#define REALVIEW_PBA8_GPIO0_BASE 0x10013000 /* GPIO port 0 */
-#define REALVIEW_PBA8_RTC_BASE 0x10017000 /* Real Time Clock */
-#define REALVIEW_PBA8_TIMER4_5_BASE 0x10018000 /* Timer 4/5 */
-#define REALVIEW_PBA8_TIMER6_7_BASE 0x10019000 /* Timer 6/7 */
-#define REALVIEW_PBA8_SCTL_BASE 0x1001A000 /* System Controller */
-#define REALVIEW_PBA8_CLCD_BASE 0x10020000 /* CLCD */
-#define REALVIEW_PBA8_ONB_SRAM_BASE 0x10060000 /* On-board SRAM */
-#define REALVIEW_PBA8_DMC_BASE 0x100E0000 /* DMC configuration */
-#define REALVIEW_PBA8_SMC_BASE 0x100E1000 /* SMC configuration */
-#define REALVIEW_PBA8_CAN_BASE 0x100E2000 /* CAN bus */
-#define REALVIEW_PBA8_GIC_CPU_BASE 0x1E000000 /* Generic interrupt controller CPU interface */
-#define REALVIEW_PBA8_FLASH0_BASE 0x40000000
-#define REALVIEW_PBA8_FLASH0_SIZE SZ_64M
-#define REALVIEW_PBA8_FLASH1_BASE 0x44000000
-#define REALVIEW_PBA8_FLASH1_SIZE SZ_64M
-#define REALVIEW_PBA8_ETH_BASE 0x4E000000 /* Ethernet */
-#define REALVIEW_PBA8_USB_BASE 0x4F000000 /* USB */
-#define REALVIEW_PBA8_GIC_DIST_BASE 0x1E001000 /* Generic interrupt controller distributor */
-#define REALVIEW_PBA8_LT_BASE 0xC0000000 /* Logic Tile expansion */
-#define REALVIEW_PBA8_SDRAM6_BASE 0x70000000 /* SDRAM bank 6 256MB */
-#define REALVIEW_PBA8_SDRAM7_BASE 0x80000000 /* SDRAM bank 7 256MB */
-
-#define REALVIEW_PBA8_SYS_PLD_CTRL1 0x74
-
-/*
- * PBA8 PCI regions
- */
-#define REALVIEW_PBA8_PCI_BASE 0x90040000 /* PCI-X Unit base */
-#define REALVIEW_PBA8_PCI_IO_BASE 0x90050000 /* IO Region on AHB */
-#define REALVIEW_PBA8_PCI_MEM_BASE 0xA0000000 /* MEM Region on AHB */
-
-#define REALVIEW_PBA8_PCI_BASE_SIZE 0x10000 /* 16 Kb */
-#define REALVIEW_PBA8_PCI_IO_SIZE 0x1000 /* 4 Kb */
-#define REALVIEW_PBA8_PCI_MEM_SIZE 0x20000000 /* 512 MB */
-
-#endif /* __ASM_ARCH_BOARD_PBA8_H */
diff --git a/arch/arm/mach-realview/board-pbx.h b/arch/arm/mach-realview/board-pbx.h
deleted file mode 100644
index 5cda480b12bb..000000000000
--- a/arch/arm/mach-realview/board-pbx.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 2009 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __ASM_ARCH_BOARD_PBX_H
-#define __ASM_ARCH_BOARD_PBX_H
-
-#include "platform.h"
-
-/*
- * Peripheral addresses
- */
-#define REALVIEW_PBX_UART0_BASE 0x10009000 /* UART 0 */
-#define REALVIEW_PBX_UART1_BASE 0x1000A000 /* UART 1 */
-#define REALVIEW_PBX_UART2_BASE 0x1000B000 /* UART 2 */
-#define REALVIEW_PBX_UART3_BASE 0x1000C000 /* UART 3 */
-#define REALVIEW_PBX_SSP_BASE 0x1000D000 /* Synchronous Serial Port */
-#define REALVIEW_PBX_WATCHDOG0_BASE 0x1000F000 /* Watchdog 0 */
-#define REALVIEW_PBX_WATCHDOG_BASE 0x10010000 /* watchdog interface */
-#define REALVIEW_PBX_TIMER0_1_BASE 0x10011000 /* Timer 0 and 1 */
-#define REALVIEW_PBX_TIMER2_3_BASE 0x10012000 /* Timer 2 and 3 */
-#define REALVIEW_PBX_GPIO0_BASE 0x10013000 /* GPIO port 0 */
-#define REALVIEW_PBX_RTC_BASE 0x10017000 /* Real Time Clock */
-#define REALVIEW_PBX_TIMER4_5_BASE 0x10018000 /* Timer 4/5 */
-#define REALVIEW_PBX_TIMER6_7_BASE 0x10019000 /* Timer 6/7 */
-#define REALVIEW_PBX_SCTL_BASE 0x1001A000 /* System Controller */
-#define REALVIEW_PBX_CLCD_BASE 0x10020000 /* CLCD */
-#define REALVIEW_PBX_ONB_SRAM_BASE 0x10060000 /* On-board SRAM */
-#define REALVIEW_PBX_DMC_BASE 0x100E0000 /* DMC configuration */
-#define REALVIEW_PBX_SMC_BASE 0x100E1000 /* SMC configuration */
-#define REALVIEW_PBX_CAN_BASE 0x100E2000 /* CAN bus */
-#define REALVIEW_PBX_GIC_CPU_BASE 0x1E000000 /* Generic interrupt controller CPU interface */
-#define REALVIEW_PBX_FLASH0_BASE 0x40000000
-#define REALVIEW_PBX_FLASH0_SIZE SZ_64M
-#define REALVIEW_PBX_FLASH1_BASE 0x44000000
-#define REALVIEW_PBX_FLASH1_SIZE SZ_64M
-#define REALVIEW_PBX_ETH_BASE 0x4E000000 /* Ethernet */
-#define REALVIEW_PBX_USB_BASE 0x4F000000 /* USB */
-#define REALVIEW_PBX_GIC_DIST_BASE 0x1E001000 /* Generic interrupt controller distributor */
-#define REALVIEW_PBX_LT_BASE 0xC0000000 /* Logic Tile expansion */
-#define REALVIEW_PBX_SDRAM6_BASE 0x70000000 /* SDRAM bank 6 256MB */
-#define REALVIEW_PBX_SDRAM7_BASE 0x80000000 /* SDRAM bank 7 256MB */
-
-/*
- * Tile-specific addresses
- */
-#define REALVIEW_PBX_TILE_SCU_BASE 0x1F000000 /* SCU registers */
-#define REALVIEW_PBX_TILE_GIC_CPU_BASE 0x1F000100 /* Private Generic interrupt controller CPU interface */
-#define REALVIEW_PBX_TILE_TWD_BASE 0x1F000600
-#define REALVIEW_PBX_TILE_TWD_PERCPU_BASE 0x1F000700
-#define REALVIEW_PBX_TILE_TWD_SIZE 0x00000100
-#define REALVIEW_PBX_TILE_GIC_DIST_BASE 0x1F001000 /* Private Generic interrupt controller distributor */
-#define REALVIEW_PBX_TILE_L220_BASE 0x1F002000 /* L220 registers */
-
-#define REALVIEW_PBX_SYS_PLD_CTRL1 0x74
-
-/*
- * PBX PCI regions
- */
-#define REALVIEW_PBX_PCI_BASE 0x90040000 /* PCI-X Unit base */
-#define REALVIEW_PBX_PCI_IO_BASE 0x90050000 /* IO Region on AHB */
-#define REALVIEW_PBX_PCI_MEM_BASE 0xA0000000 /* MEM Region on AHB */
-
-#define REALVIEW_PBX_PCI_BASE_SIZE 0x10000 /* 16 Kb */
-#define REALVIEW_PBX_PCI_IO_SIZE 0x1000 /* 4 Kb */
-#define REALVIEW_PBX_PCI_MEM_SIZE 0x20000000 /* 512 MB */
-
-/*
- * Core tile identification (REALVIEW_SYS_PROCID)
- */
-#define REALVIEW_PBX_PROC_MASK 0xFF000000
-#define REALVIEW_PBX_PROC_ARM7TDMI 0x00000000
-#define REALVIEW_PBX_PROC_ARM9 0x02000000
-#define REALVIEW_PBX_PROC_ARM11 0x04000000
-#define REALVIEW_PBX_PROC_ARM11MP 0x06000000
-#define REALVIEW_PBX_PROC_A9MP 0x0C000000
-#define REALVIEW_PBX_PROC_A8 0x0E000000
-
-#define check_pbx_proc(proc_type) \
- ((readl(__io_address(REALVIEW_SYS_PROCID)) & REALVIEW_PBX_PROC_MASK) \
- == proc_type)
-
-#ifdef CONFIG_MACH_REALVIEW_PBX
-#define core_tile_pbx11mp() check_pbx_proc(REALVIEW_PBX_PROC_ARM11MP)
-#define core_tile_pbxa9mp() check_pbx_proc(REALVIEW_PBX_PROC_A9MP)
-#define core_tile_pbxa8() check_pbx_proc(REALVIEW_PBX_PROC_A8)
-#else
-#define core_tile_pbx11mp() 0
-#define core_tile_pbxa9mp() 0
-#define core_tile_pbxa8() 0
-#endif
-
-#endif /* __ASM_ARCH_BOARD_PBX_H */
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
deleted file mode 100644
index a0ead0ae23d6..000000000000
--- a/arch/arm/mach-realview/core.c
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/core.c
- *
- * Copyright (C) 1999 - 2003 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-#include <linux/platform_data/video-clcd-versatile.h>
-#include <linux/io.h>
-#include <linux/smsc911x.h>
-#include <linux/smc91x.h>
-#include <linux/ata_platform.h>
-#include <linux/amba/mmci.h>
-#include <linux/gfp.h>
-#include <linux/mtd/physmap.h>
-#include <linux/memblock.h>
-
-#include <clocksource/timer-sp804.h>
-#include "hardware.h"
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-#include <asm/hardware/icst.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/irq.h>
-#include <asm/mach/map.h>
-
-#include "platform.h"
-
-#include <plat/sched_clock.h>
-
-#include "core.h"
-
-#define REALVIEW_FLASHCTRL (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_FLASH_OFFSET)
-
-static void realview_flash_set_vpp(struct platform_device *pdev, int on)
-{
- u32 val;
-
- val = __raw_readl(REALVIEW_FLASHCTRL);
- if (on)
- val |= REALVIEW_FLASHPROG_FLVPPEN;
- else
- val &= ~REALVIEW_FLASHPROG_FLVPPEN;
- __raw_writel(val, REALVIEW_FLASHCTRL);
-}
-
-static struct physmap_flash_data realview_flash_data = {
- .width = 4,
- .set_vpp = realview_flash_set_vpp,
-};
-
-struct platform_device realview_flash_device = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &realview_flash_data,
- },
-};
-
-int realview_flash_register(struct resource *res, u32 num)
-{
- realview_flash_device.resource = res;
- realview_flash_device.num_resources = num;
- return platform_device_register(&realview_flash_device);
-}
-
-static struct smsc911x_platform_config smsc911x_config = {
- .flags = SMSC911X_USE_32BIT,
- .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH,
- .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
- .phy_interface = PHY_INTERFACE_MODE_MII,
-};
-
-static struct smc91x_platdata smc91x_platdata = {
- .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
- SMC91X_NOWAIT,
-};
-
-static struct platform_device realview_eth_device = {
- .name = "smsc911x",
- .id = 0,
- .num_resources = 2,
-};
-
-int realview_eth_register(const char *name, struct resource *res)
-{
- if (name)
- realview_eth_device.name = name;
- realview_eth_device.resource = res;
- if (strcmp(realview_eth_device.name, "smsc911x") == 0)
- realview_eth_device.dev.platform_data = &smsc911x_config;
- else
- realview_eth_device.dev.platform_data = &smc91x_platdata;
-
- return platform_device_register(&realview_eth_device);
-}
-
-struct platform_device realview_usb_device = {
- .name = "isp1760",
- .num_resources = 2,
-};
-
-int realview_usb_register(struct resource *res)
-{
- realview_usb_device.resource = res;
- return platform_device_register(&realview_usb_device);
-}
-
-static struct pata_platform_info pata_platform_data = {
- .ioport_shift = 1,
-};
-
-static struct resource pata_resources[] = {
- [0] = {
- .start = REALVIEW_CF_BASE,
- .end = REALVIEW_CF_BASE + 0xff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = REALVIEW_CF_BASE + 0x100,
- .end = REALVIEW_CF_BASE + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-struct platform_device realview_cf_device = {
- .name = "pata_platform",
- .id = -1,
- .num_resources = ARRAY_SIZE(pata_resources),
- .resource = pata_resources,
- .dev = {
- .platform_data = &pata_platform_data,
- },
-};
-
-static struct resource realview_leds_resources[] = {
- {
- .start = REALVIEW_SYS_BASE + REALVIEW_SYS_LED_OFFSET,
- .end = REALVIEW_SYS_BASE + REALVIEW_SYS_LED_OFFSET + 4,
- .flags = IORESOURCE_MEM,
- },
-};
-
-struct platform_device realview_leds_device = {
- .name = "versatile-leds",
- .id = -1,
- .num_resources = ARRAY_SIZE(realview_leds_resources),
- .resource = realview_leds_resources,
-};
-
-static struct resource realview_i2c_resource = {
- .start = REALVIEW_I2C_BASE,
- .end = REALVIEW_I2C_BASE + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
-};
-
-struct platform_device realview_i2c_device = {
- .name = "versatile-i2c",
- .id = 0,
- .num_resources = 1,
- .resource = &realview_i2c_resource,
-};
-
-static struct i2c_board_info realview_i2c_board_info[] = {
- {
- I2C_BOARD_INFO("ds1338", 0xd0 >> 1),
- },
-};
-
-static int __init realview_i2c_init(void)
-{
- return i2c_register_board_info(0, realview_i2c_board_info,
- ARRAY_SIZE(realview_i2c_board_info));
-}
-arch_initcall(realview_i2c_init);
-
-#define REALVIEW_SYSMCI (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_MCI_OFFSET)
-
-/*
- * This is only used if GPIOLIB support is disabled
- */
-static unsigned int realview_mmc_status(struct device *dev)
-{
- struct amba_device *adev = container_of(dev, struct amba_device, dev);
- u32 mask;
-
- if (machine_is_realview_pb1176()) {
- static bool inserted = false;
-
- /*
- * The PB1176 does not have the status register,
- * assume it is inserted at startup, then invert
- * for each call so card insertion/removal will
- * be detected anyway. This will not be called if
- * GPIO on PL061 is active, which is the proper
- * way to do this on the PB1176.
- */
- inserted = !inserted;
- return inserted ? 0 : 1;
- }
-
- if (adev->res.start == REALVIEW_MMCI0_BASE)
- mask = 1;
- else
- mask = 2;
-
- return readl(REALVIEW_SYSMCI) & mask;
-}
-
-struct mmci_platform_data realview_mmc0_plat_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .status = realview_mmc_status,
- .gpio_wp = 17,
- .gpio_cd = 16,
- .cd_invert = true,
-};
-
-struct mmci_platform_data realview_mmc1_plat_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .status = realview_mmc_status,
- .gpio_wp = 19,
- .gpio_cd = 18,
- .cd_invert = true,
-};
-
-void __init realview_init_early(void)
-{
- void __iomem *sys = __io_address(REALVIEW_SYS_BASE);
-
- versatile_sched_clock_init(sys + REALVIEW_SYS_24MHz_OFFSET, 24000000);
-}
-
-/*
- * CLCD support.
- */
-#define SYS_CLCD_NLCDIOON (1 << 2)
-#define SYS_CLCD_VDDPOSSWITCH (1 << 3)
-#define SYS_CLCD_PWR3V5SWITCH (1 << 4)
-#define SYS_CLCD_ID_MASK (0x1f << 8)
-#define SYS_CLCD_ID_SANYO_3_8 (0x00 << 8)
-#define SYS_CLCD_ID_UNKNOWN_8_4 (0x01 << 8)
-#define SYS_CLCD_ID_EPSON_2_2 (0x02 << 8)
-#define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8)
-#define SYS_CLCD_ID_VGA (0x1f << 8)
-
-/*
- * Disable all display connectors on the interface module.
- */
-static void realview_clcd_disable(struct clcd_fb *fb)
-{
- void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET;
- u32 val;
-
- val = readl(sys_clcd);
- val &= ~SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH;
- writel(val, sys_clcd);
-}
-
-/*
- * Enable the relevant connector on the interface module.
- */
-static void realview_clcd_enable(struct clcd_fb *fb)
-{
- void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET;
- u32 val;
-
- /*
- * Enable the PSUs
- */
- val = readl(sys_clcd);
- val |= SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH;
- writel(val, sys_clcd);
-}
-
-/*
- * Detect which LCD panel is connected, and return the appropriate
- * clcd_panel structure. Note: we do not have any information on
- * the required timings for the 8.4in panel, so we presently assume
- * VGA timings.
- */
-static int realview_clcd_setup(struct clcd_fb *fb)
-{
- void __iomem *sys_clcd = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_CLCD_OFFSET;
- const char *panel_name, *vga_panel_name;
- unsigned long framesize;
- u32 val;
-
- if (machine_is_realview_eb()) {
- /* VGA, 16bpp */
- framesize = 640 * 480 * 2;
- vga_panel_name = "VGA";
- } else {
- /* XVGA, 16bpp */
- framesize = 1024 * 768 * 2;
- vga_panel_name = "XVGA";
- }
-
- val = readl(sys_clcd) & SYS_CLCD_ID_MASK;
- if (val == SYS_CLCD_ID_SANYO_3_8)
- panel_name = "Sanyo TM38QV67A02A";
- else if (val == SYS_CLCD_ID_SANYO_2_5)
- panel_name = "Sanyo QVGA Portrait";
- else if (val == SYS_CLCD_ID_EPSON_2_2)
- panel_name = "Epson L2F50113T00";
- else if (val == SYS_CLCD_ID_VGA)
- panel_name = vga_panel_name;
- else {
- pr_err("CLCD: unknown LCD panel ID 0x%08x, using VGA\n", val);
- panel_name = vga_panel_name;
- }
-
- fb->panel = versatile_clcd_get_panel(panel_name);
- if (!fb->panel)
- return -EINVAL;
-
- return versatile_clcd_setup_dma(fb, framesize);
-}
-
-struct clcd_board clcd_plat_data = {
- .name = "RealView",
- .caps = CLCD_CAP_ALL,
- .check = clcdfb_check,
- .decode = clcdfb_decode,
- .disable = realview_clcd_disable,
- .enable = realview_clcd_enable,
- .setup = realview_clcd_setup,
- .mmap = versatile_clcd_mmap_dma,
- .remove = versatile_clcd_remove_dma,
-};
-
-/*
- * Where is the timer (VA)?
- */
-void __iomem *timer0_va_base;
-void __iomem *timer1_va_base;
-void __iomem *timer2_va_base;
-void __iomem *timer3_va_base;
-
-/*
- * Set up the clock source and clock events devices
- */
-void __init realview_timer_init(unsigned int timer_irq)
-{
- u32 val;
-
- /*
- * set clock frequency:
- * REALVIEW_REFCLK is 32KHz
- * REALVIEW_TIMCLK is 1MHz
- */
- val = readl(__io_address(REALVIEW_SCTL_BASE));
- writel((REALVIEW_TIMCLK << REALVIEW_TIMER1_EnSel) |
- (REALVIEW_TIMCLK << REALVIEW_TIMER2_EnSel) |
- (REALVIEW_TIMCLK << REALVIEW_TIMER3_EnSel) |
- (REALVIEW_TIMCLK << REALVIEW_TIMER4_EnSel) | val,
- __io_address(REALVIEW_SCTL_BASE));
-
- /*
- * Initialise to a known state (all timers off)
- */
- sp804_timer_disable(timer0_va_base);
- sp804_timer_disable(timer1_va_base);
- sp804_timer_disable(timer2_va_base);
- sp804_timer_disable(timer3_va_base);
-
- sp804_clocksource_init(timer3_va_base, "timer3");
- sp804_clockevents_init(timer0_va_base, timer_irq, "timer0");
-}
-
-/*
- * Setup the memory banks.
- */
-void realview_fixup(struct tag *tags, char **from)
-{
- /*
- * Most RealView platforms have 512MB contiguous RAM at 0x70000000.
- * Half of this is mirrored at 0.
- */
-#ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET
- memblock_add(0x70000000, SZ_512M);
-#else
- memblock_add(0, SZ_256M);
-#endif
-}
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
deleted file mode 100644
index 05a995ea16d3..000000000000
--- a/arch/arm/mach-realview/core.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2004 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __ASM_ARCH_REALVIEW_H
-#define __ASM_ARCH_REALVIEW_H
-
-#include <linux/amba/bus.h>
-#include <linux/io.h>
-
-#include <asm/setup.h>
-
-#define APB_DEVICE(name, busid, base, plat) \
-static AMBA_APB_DEVICE(name, busid, 0, REALVIEW_##base##_BASE, base##_IRQ, plat)
-
-#define AHB_DEVICE(name, busid, base, plat) \
-static AMBA_AHB_DEVICE(name, busid, 0, REALVIEW_##base##_BASE, base##_IRQ, plat)
-
-struct machine_desc;
-
-extern struct platform_device realview_flash_device;
-extern struct platform_device realview_cf_device;
-extern struct platform_device realview_leds_device;
-extern struct platform_device realview_i2c_device;
-extern struct mmci_platform_data realview_mmc0_plat_data;
-extern struct mmci_platform_data realview_mmc1_plat_data;
-extern struct clcd_board clcd_plat_data;
-extern void __iomem *timer0_va_base;
-extern void __iomem *timer1_va_base;
-extern void __iomem *timer2_va_base;
-extern void __iomem *timer3_va_base;
-
-extern void realview_timer_init(unsigned int timer_irq);
-extern int realview_flash_register(struct resource *res, u32 num);
-extern int realview_eth_register(const char *name, struct resource *res);
-extern int realview_usb_register(struct resource *res);
-extern void realview_init_early(void);
-extern void realview_fixup(struct tag *tags, char **from);
-
-extern const struct smp_operations realview_smp_ops;
-extern void realview_cpu_die(unsigned int cpu);
-
-#endif
diff --git a/arch/arm/mach-realview/hardware.h b/arch/arm/mach-realview/hardware.h
deleted file mode 100644
index 957a230aadf4..000000000000
--- a/arch/arm/mach-realview/hardware.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * This file contains the hardware definitions of the RealView boards.
- *
- * Copyright (C) 2003 ARM Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#include <asm/sizes.h>
-
-/* macro to get at IO space when running virtually */
-#ifdef CONFIG_MMU
-/*
- * Statically mapped addresses:
- *
- * 10xx xxxx -> fbxx xxxx
- * 1exx xxxx -> fdxx xxxx
- * 1fxx xxxx -> fexx xxxx
- */
-#define IO_ADDRESS(x) (((x) & 0x03ffffff) + 0xfb000000)
-#else
-#define IO_ADDRESS(x) (x)
-#endif
-#define __io_address(n) IOMEM(IO_ADDRESS(n))
-
-#endif
diff --git a/arch/arm/mach-realview/hotplug.h b/arch/arm/mach-realview/hotplug.h
new file mode 100644
index 000000000000..eacd7a4dad2f
--- /dev/null
+++ b/arch/arm/mach-realview/hotplug.h
@@ -0,0 +1 @@
+void realview_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-realview/irqs-eb.h b/arch/arm/mach-realview/irqs-eb.h
deleted file mode 100644
index 61e31680a749..000000000000
--- a/arch/arm/mach-realview/irqs-eb.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (C) 2007 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __MACH_IRQS_EB_H
-#define __MACH_IRQS_EB_H
-
-#define IRQ_LOCALTIMER 29
-#define IRQ_EB_GIC_START 32
-
-/*
- * RealView EB interrupt sources
- */
-#define IRQ_EB_WDOG (IRQ_EB_GIC_START + 0) /* Watchdog timer */
-#define IRQ_EB_SOFT (IRQ_EB_GIC_START + 1) /* Software interrupt */
-#define IRQ_EB_COMMRx (IRQ_EB_GIC_START + 2) /* Debug Comm Rx interrupt */
-#define IRQ_EB_COMMTx (IRQ_EB_GIC_START + 3) /* Debug Comm Tx interrupt */
-#define IRQ_EB_TIMER0_1 (IRQ_EB_GIC_START + 4) /* Timer 0 and 1 */
-#define IRQ_EB_TIMER2_3 (IRQ_EB_GIC_START + 5) /* Timer 2 and 3 */
-#define IRQ_EB_GPIO0 (IRQ_EB_GIC_START + 6) /* GPIO 0 */
-#define IRQ_EB_GPIO1 (IRQ_EB_GIC_START + 7) /* GPIO 1 */
-#define IRQ_EB_GPIO2 (IRQ_EB_GIC_START + 8) /* GPIO 2 */
- /* 9 reserved */
-#define IRQ_EB_RTC (IRQ_EB_GIC_START + 10) /* Real Time Clock */
-#define IRQ_EB_SSP (IRQ_EB_GIC_START + 11) /* Synchronous Serial Port */
-#define IRQ_EB_UART0 (IRQ_EB_GIC_START + 12) /* UART 0 on development chip */
-#define IRQ_EB_UART1 (IRQ_EB_GIC_START + 13) /* UART 1 on development chip */
-#define IRQ_EB_UART2 (IRQ_EB_GIC_START + 14) /* UART 2 on development chip */
-#define IRQ_EB_UART3 (IRQ_EB_GIC_START + 15) /* UART 3 on development chip */
-#define IRQ_EB_SCI (IRQ_EB_GIC_START + 16) /* Smart Card Interface */
-#define IRQ_EB_MMCI0A (IRQ_EB_GIC_START + 17) /* Multimedia Card 0A */
-#define IRQ_EB_MMCI0B (IRQ_EB_GIC_START + 18) /* Multimedia Card 0B */
-#define IRQ_EB_AACI (IRQ_EB_GIC_START + 19) /* Audio Codec */
-#define IRQ_EB_KMI0 (IRQ_EB_GIC_START + 20) /* Keyboard/Mouse port 0 */
-#define IRQ_EB_KMI1 (IRQ_EB_GIC_START + 21) /* Keyboard/Mouse port 1 */
-#define IRQ_EB_CHARLCD (IRQ_EB_GIC_START + 22) /* Character LCD */
-#define IRQ_EB_CLCD (IRQ_EB_GIC_START + 23) /* CLCD controller */
-#define IRQ_EB_DMA (IRQ_EB_GIC_START + 24) /* DMA controller */
-#define IRQ_EB_PWRFAIL (IRQ_EB_GIC_START + 25) /* Power failure */
-#define IRQ_EB_PISMO (IRQ_EB_GIC_START + 26) /* PISMO interface */
-#define IRQ_EB_DoC (IRQ_EB_GIC_START + 27) /* Disk on Chip memory controller */
-#define IRQ_EB_ETH (IRQ_EB_GIC_START + 28) /* Ethernet controller */
-#define IRQ_EB_USB (IRQ_EB_GIC_START + 29) /* USB controller */
-#define IRQ_EB_TSPEN (IRQ_EB_GIC_START + 30) /* Touchscreen pen */
-#define IRQ_EB_TSKPAD (IRQ_EB_GIC_START + 31) /* Touchscreen keypad */
-
-/*
- * RealView EB + ARM11MPCore interrupt sources (primary GIC on the core tile)
- */
-#define IRQ_EB11MP_AACI (IRQ_EB_GIC_START + 0)
-#define IRQ_EB11MP_TIMER0_1 (IRQ_EB_GIC_START + 1)
-#define IRQ_EB11MP_TIMER2_3 (IRQ_EB_GIC_START + 2)
-#define IRQ_EB11MP_USB (IRQ_EB_GIC_START + 3)
-#define IRQ_EB11MP_UART0 (IRQ_EB_GIC_START + 4)
-#define IRQ_EB11MP_UART1 (IRQ_EB_GIC_START + 5)
-#define IRQ_EB11MP_RTC (IRQ_EB_GIC_START + 6)
-#define IRQ_EB11MP_KMI0 (IRQ_EB_GIC_START + 7)
-#define IRQ_EB11MP_KMI1 (IRQ_EB_GIC_START + 8)
-#define IRQ_EB11MP_ETH (IRQ_EB_GIC_START + 9)
-#define IRQ_EB11MP_EB_IRQ1 (IRQ_EB_GIC_START + 10) /* main GIC */
-#define IRQ_EB11MP_EB_IRQ2 (IRQ_EB_GIC_START + 11) /* tile GIC */
-#define IRQ_EB11MP_EB_FIQ1 (IRQ_EB_GIC_START + 12) /* main GIC */
-#define IRQ_EB11MP_EB_FIQ2 (IRQ_EB_GIC_START + 13) /* tile GIC */
-#define IRQ_EB11MP_MMCI0A (IRQ_EB_GIC_START + 14)
-#define IRQ_EB11MP_MMCI0B (IRQ_EB_GIC_START + 15)
-
-#define IRQ_EB11MP_PMU_CPU0 (IRQ_EB_GIC_START + 17)
-#define IRQ_EB11MP_PMU_CPU1 (IRQ_EB_GIC_START + 18)
-#define IRQ_EB11MP_PMU_CPU2 (IRQ_EB_GIC_START + 19)
-#define IRQ_EB11MP_PMU_CPU3 (IRQ_EB_GIC_START + 20)
-#define IRQ_EB11MP_PMU_SCU0 (IRQ_EB_GIC_START + 21)
-#define IRQ_EB11MP_PMU_SCU1 (IRQ_EB_GIC_START + 22)
-#define IRQ_EB11MP_PMU_SCU2 (IRQ_EB_GIC_START + 23)
-#define IRQ_EB11MP_PMU_SCU3 (IRQ_EB_GIC_START + 24)
-#define IRQ_EB11MP_PMU_SCU4 (IRQ_EB_GIC_START + 25)
-#define IRQ_EB11MP_PMU_SCU5 (IRQ_EB_GIC_START + 26)
-#define IRQ_EB11MP_PMU_SCU6 (IRQ_EB_GIC_START + 27)
-#define IRQ_EB11MP_PMU_SCU7 (IRQ_EB_GIC_START + 28)
-
-#define IRQ_EB11MP_L220_EVENT (IRQ_EB_GIC_START + 29)
-#define IRQ_EB11MP_L220_SLAVE (IRQ_EB_GIC_START + 30)
-#define IRQ_EB11MP_L220_DECODE (IRQ_EB_GIC_START + 31)
-
-/*
- * The 11MPcore tile leaves the following unconnected.
- */
-#define IRQ_EB11MP_UART2 0
-#define IRQ_EB11MP_UART3 0
-#define IRQ_EB11MP_CLCD 0
-#define IRQ_EB11MP_DMA 0
-#define IRQ_EB11MP_WDOG 0
-#define IRQ_EB11MP_GPIO0 0
-#define IRQ_EB11MP_GPIO1 0
-#define IRQ_EB11MP_GPIO2 0
-#define IRQ_EB11MP_SCI 0
-#define IRQ_EB11MP_SSP 0
-
-#define NR_GIC_EB11MP 2
-
-#endif /* __MACH_IRQS_EB_H */
diff --git a/arch/arm/mach-realview/irqs-pb1176.h b/arch/arm/mach-realview/irqs-pb1176.h
deleted file mode 100644
index 778edfd430e7..000000000000
--- a/arch/arm/mach-realview/irqs-pb1176.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2008 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __MACH_IRQS_PB1176_H
-#define __MACH_IRQS_PB1176_H
-
-#define IRQ_DC1176_GIC_START 32
-#define IRQ_PB1176_GIC_START 64
-
-/*
- * ARM1176 DevChip interrupt sources (primary GIC)
- */
-#define IRQ_DC1176_WATCHDOG (IRQ_DC1176_GIC_START + 0) /* Watchdog timer */
-#define IRQ_DC1176_SOFTINT (IRQ_DC1176_GIC_START + 1) /* Software interrupt */
-#define IRQ_DC1176_COMMRx (IRQ_DC1176_GIC_START + 2) /* Debug Comm Rx interrupt */
-#define IRQ_DC1176_COMMTx (IRQ_DC1176_GIC_START + 3) /* Debug Comm Tx interrupt */
-#define IRQ_DC1176_CORE_PMU (IRQ_DC1176_GIC_START + 7) /* Core PMU interrupt */
-#define IRQ_DC1176_TIMER0 (IRQ_DC1176_GIC_START + 8) /* Timer 0 */
-#define IRQ_DC1176_TIMER1 (IRQ_DC1176_GIC_START + 9) /* Timer 1 */
-#define IRQ_DC1176_TIMER2 (IRQ_DC1176_GIC_START + 10) /* Timer 2 */
-#define IRQ_DC1176_APC (IRQ_DC1176_GIC_START + 11)
-#define IRQ_DC1176_IEC (IRQ_DC1176_GIC_START + 12)
-#define IRQ_DC1176_L2CC (IRQ_DC1176_GIC_START + 13)
-#define IRQ_DC1176_RTC (IRQ_DC1176_GIC_START + 14)
-#define IRQ_DC1176_CLCD (IRQ_DC1176_GIC_START + 15) /* CLCD controller */
-#define IRQ_DC1176_GPIO0 (IRQ_DC1176_GIC_START + 16)
-#define IRQ_DC1176_SSP (IRQ_DC1176_GIC_START + 17) /* SSP port */
-#define IRQ_DC1176_UART0 (IRQ_DC1176_GIC_START + 18) /* UART 0 on development chip */
-#define IRQ_DC1176_UART1 (IRQ_DC1176_GIC_START + 19) /* UART 1 on development chip */
-#define IRQ_DC1176_UART2 (IRQ_DC1176_GIC_START + 20) /* UART 2 on development chip */
-#define IRQ_DC1176_UART3 (IRQ_DC1176_GIC_START + 21) /* UART 3 on development chip */
-
-#define IRQ_DC1176_PB_IRQ2 (IRQ_DC1176_GIC_START + 30) /* tile GIC */
-#define IRQ_DC1176_PB_IRQ1 (IRQ_DC1176_GIC_START + 31) /* main GIC */
-
-/*
- * RealView PB1176 interrupt sources (secondary GIC)
- */
-#define IRQ_PB1176_MMCI0A (IRQ_PB1176_GIC_START + 1) /* Multimedia Card 0A */
-#define IRQ_PB1176_MMCI0B (IRQ_PB1176_GIC_START + 2) /* Multimedia Card 0A */
-#define IRQ_PB1176_KMI0 (IRQ_PB1176_GIC_START + 3) /* Keyboard/Mouse port 0 */
-#define IRQ_PB1176_KMI1 (IRQ_PB1176_GIC_START + 4) /* Keyboard/Mouse port 1 */
-#define IRQ_PB1176_SCI (IRQ_PB1176_GIC_START + 5)
-#define IRQ_PB1176_UART4 (IRQ_PB1176_GIC_START + 6) /* UART 4 on baseboard */
-#define IRQ_PB1176_CHARLCD (IRQ_PB1176_GIC_START + 7) /* Character LCD */
-#define IRQ_PB1176_GPIO1 (IRQ_PB1176_GIC_START + 8)
-#define IRQ_PB1176_GPIO2 (IRQ_PB1176_GIC_START + 9)
-#define IRQ_PB1176_ETH (IRQ_PB1176_GIC_START + 10) /* Ethernet controller */
-#define IRQ_PB1176_USB (IRQ_PB1176_GIC_START + 11) /* USB controller */
-
-#define IRQ_PB1176_PISMO (IRQ_PB1176_GIC_START + 16)
-
-#define IRQ_PB1176_AACI (IRQ_PB1176_GIC_START + 19) /* Audio Codec */
-
-#define IRQ_PB1176_TIMER0_1 (IRQ_PB1176_GIC_START + 22)
-#define IRQ_PB1176_TIMER2_3 (IRQ_PB1176_GIC_START + 23)
-#define IRQ_PB1176_DMAC (IRQ_PB1176_GIC_START + 24) /* DMA controller */
-#define IRQ_PB1176_RTC (IRQ_PB1176_GIC_START + 25) /* Real Time Clock */
-
-#define IRQ_PB1176_SCTL -1
-
-#endif /* __MACH_IRQS_PB1176_H */
diff --git a/arch/arm/mach-realview/irqs-pb11mp.h b/arch/arm/mach-realview/irqs-pb11mp.h
deleted file mode 100644
index 938898a3df9f..000000000000
--- a/arch/arm/mach-realview/irqs-pb11mp.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (C) 2008 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __MACH_IRQS_PB11MP_H
-#define __MACH_IRQS_PB11MP_H
-
-#define IRQ_LOCALTIMER 29
-#define IRQ_TC11MP_GIC_START 32
-#define IRQ_PB11MP_GIC_START 64
-
-/*
- * ARM11MPCore test chip interrupt sources (primary GIC on the test chip)
- */
-#define IRQ_TC11MP_AACI (IRQ_TC11MP_GIC_START + 0)
-#define IRQ_TC11MP_TIMER0_1 (IRQ_TC11MP_GIC_START + 1)
-#define IRQ_TC11MP_TIMER2_3 (IRQ_TC11MP_GIC_START + 2)
-#define IRQ_TC11MP_USB (IRQ_TC11MP_GIC_START + 3)
-#define IRQ_TC11MP_UART0 (IRQ_TC11MP_GIC_START + 4)
-#define IRQ_TC11MP_UART1 (IRQ_TC11MP_GIC_START + 5)
-#define IRQ_TC11MP_RTC (IRQ_TC11MP_GIC_START + 6)
-#define IRQ_TC11MP_KMI0 (IRQ_TC11MP_GIC_START + 7)
-#define IRQ_TC11MP_KMI1 (IRQ_TC11MP_GIC_START + 8)
-#define IRQ_TC11MP_ETH (IRQ_TC11MP_GIC_START + 9)
-#define IRQ_TC11MP_PB_IRQ1 (IRQ_TC11MP_GIC_START + 10) /* main GIC */
-#define IRQ_TC11MP_PB_IRQ2 (IRQ_TC11MP_GIC_START + 11) /* tile GIC */
-#define IRQ_TC11MP_PB_FIQ1 (IRQ_TC11MP_GIC_START + 12) /* main GIC */
-#define IRQ_TC11MP_PB_FIQ2 (IRQ_TC11MP_GIC_START + 13) /* tile GIC */
-#define IRQ_TC11MP_MMCI0A (IRQ_TC11MP_GIC_START + 14)
-#define IRQ_TC11MP_MMCI0B (IRQ_TC11MP_GIC_START + 15)
-
-#define IRQ_TC11MP_PMU_CPU0 (IRQ_TC11MP_GIC_START + 17)
-#define IRQ_TC11MP_PMU_CPU1 (IRQ_TC11MP_GIC_START + 18)
-#define IRQ_TC11MP_PMU_CPU2 (IRQ_TC11MP_GIC_START + 19)
-#define IRQ_TC11MP_PMU_CPU3 (IRQ_TC11MP_GIC_START + 20)
-#define IRQ_TC11MP_PMU_SCU0 (IRQ_TC11MP_GIC_START + 21)
-#define IRQ_TC11MP_PMU_SCU1 (IRQ_TC11MP_GIC_START + 22)
-#define IRQ_TC11MP_PMU_SCU2 (IRQ_TC11MP_GIC_START + 23)
-#define IRQ_TC11MP_PMU_SCU3 (IRQ_TC11MP_GIC_START + 24)
-#define IRQ_TC11MP_PMU_SCU4 (IRQ_TC11MP_GIC_START + 25)
-#define IRQ_TC11MP_PMU_SCU5 (IRQ_TC11MP_GIC_START + 26)
-#define IRQ_TC11MP_PMU_SCU6 (IRQ_TC11MP_GIC_START + 27)
-#define IRQ_TC11MP_PMU_SCU7 (IRQ_TC11MP_GIC_START + 28)
-
-#define IRQ_TC11MP_L220_EVENT (IRQ_TC11MP_GIC_START + 29)
-#define IRQ_TC11MP_L220_SLAVE (IRQ_TC11MP_GIC_START + 30)
-#define IRQ_TC11MP_L220_DECODE (IRQ_TC11MP_GIC_START + 31)
-
-/*
- * RealView PB11MPCore GIC interrupt sources (secondary GIC on the board)
- */
-#define IRQ_PB11MP_WATCHDOG (IRQ_PB11MP_GIC_START + 0) /* Watchdog timer */
-#define IRQ_PB11MP_SOFT (IRQ_PB11MP_GIC_START + 1) /* Software interrupt */
-#define IRQ_PB11MP_COMMRx (IRQ_PB11MP_GIC_START + 2) /* Debug Comm Rx interrupt */
-#define IRQ_PB11MP_COMMTx (IRQ_PB11MP_GIC_START + 3) /* Debug Comm Tx interrupt */
-#define IRQ_PB11MP_GPIO0 (IRQ_PB11MP_GIC_START + 6) /* GPIO 0 */
-#define IRQ_PB11MP_GPIO1 (IRQ_PB11MP_GIC_START + 7) /* GPIO 1 */
-#define IRQ_PB11MP_GPIO2 (IRQ_PB11MP_GIC_START + 8) /* GPIO 2 */
- /* 9 reserved */
-#define IRQ_PB11MP_RTC_GIC1 (IRQ_PB11MP_GIC_START + 10) /* Real Time Clock */
-#define IRQ_PB11MP_SSP (IRQ_PB11MP_GIC_START + 11) /* Synchronous Serial Port */
-#define IRQ_PB11MP_UART0_GIC1 (IRQ_PB11MP_GIC_START + 12) /* UART 0 on development chip */
-#define IRQ_PB11MP_UART1_GIC1 (IRQ_PB11MP_GIC_START + 13) /* UART 1 on development chip */
-#define IRQ_PB11MP_UART2 (IRQ_PB11MP_GIC_START + 14) /* UART 2 on development chip */
-#define IRQ_PB11MP_UART3 (IRQ_PB11MP_GIC_START + 15) /* UART 3 on development chip */
-#define IRQ_PB11MP_SCI (IRQ_PB11MP_GIC_START + 16) /* Smart Card Interface */
-#define IRQ_PB11MP_MMCI0A_GIC1 (IRQ_PB11MP_GIC_START + 17) /* Multimedia Card 0A */
-#define IRQ_PB11MP_MMCI0B_GIC1 (IRQ_PB11MP_GIC_START + 18) /* Multimedia Card 0B */
-#define IRQ_PB11MP_AACI_GIC1 (IRQ_PB11MP_GIC_START + 19) /* Audio Codec */
-#define IRQ_PB11MP_KMI0_GIC1 (IRQ_PB11MP_GIC_START + 20) /* Keyboard/Mouse port 0 */
-#define IRQ_PB11MP_KMI1_GIC1 (IRQ_PB11MP_GIC_START + 21) /* Keyboard/Mouse port 1 */
-#define IRQ_PB11MP_CHARLCD (IRQ_PB11MP_GIC_START + 22) /* Character LCD */
-#define IRQ_PB11MP_CLCD (IRQ_PB11MP_GIC_START + 23) /* CLCD controller */
-#define IRQ_PB11MP_DMAC (IRQ_PB11MP_GIC_START + 24) /* DMA controller */
-#define IRQ_PB11MP_PWRFAIL (IRQ_PB11MP_GIC_START + 25) /* Power failure */
-#define IRQ_PB11MP_PISMO (IRQ_PB11MP_GIC_START + 26) /* PISMO interface */
-#define IRQ_PB11MP_DoC (IRQ_PB11MP_GIC_START + 27) /* Disk on Chip memory controller */
-#define IRQ_PB11MP_ETH_GIC1 (IRQ_PB11MP_GIC_START + 28) /* Ethernet controller */
-#define IRQ_PB11MP_USB_GIC1 (IRQ_PB11MP_GIC_START + 29) /* USB controller */
-#define IRQ_PB11MP_TSPEN (IRQ_PB11MP_GIC_START + 30) /* Touchscreen pen */
-#define IRQ_PB11MP_TSKPAD (IRQ_PB11MP_GIC_START + 31) /* Touchscreen keypad */
-
-#endif /* __MACH_IRQS_PB11MP_H */
diff --git a/arch/arm/mach-realview/irqs-pba8.h b/arch/arm/mach-realview/irqs-pba8.h
deleted file mode 100644
index 262e321938b8..000000000000
--- a/arch/arm/mach-realview/irqs-pba8.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2008 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __MACH_IRQS_PBA8_H
-#define __MACH_IRQS_PBA8_H
-
-#define IRQ_PBA8_GIC_START 32
-
-/*
- * PB-A8 on-board gic irq sources
- */
-#define IRQ_PBA8_WATCHDOG (IRQ_PBA8_GIC_START + 0) /* Watchdog timer */
-#define IRQ_PBA8_SOFT (IRQ_PBA8_GIC_START + 1) /* Software interrupt */
-#define IRQ_PBA8_COMMRx (IRQ_PBA8_GIC_START + 2) /* Debug Comm Rx interrupt */
-#define IRQ_PBA8_COMMTx (IRQ_PBA8_GIC_START + 3) /* Debug Comm Tx interrupt */
-#define IRQ_PBA8_TIMER0_1 (IRQ_PBA8_GIC_START + 4) /* Timer 0/1 (default timer) */
-#define IRQ_PBA8_TIMER2_3 (IRQ_PBA8_GIC_START + 5) /* Timer 2/3 */
-#define IRQ_PBA8_GPIO0 (IRQ_PBA8_GIC_START + 6) /* GPIO 0 */
-#define IRQ_PBA8_GPIO1 (IRQ_PBA8_GIC_START + 7) /* GPIO 1 */
-#define IRQ_PBA8_GPIO2 (IRQ_PBA8_GIC_START + 8) /* GPIO 2 */
- /* 9 reserved */
-#define IRQ_PBA8_RTC (IRQ_PBA8_GIC_START + 10) /* Real Time Clock */
-#define IRQ_PBA8_SSP (IRQ_PBA8_GIC_START + 11) /* Synchronous Serial Port */
-#define IRQ_PBA8_UART0 (IRQ_PBA8_GIC_START + 12) /* UART 0 on development chip */
-#define IRQ_PBA8_UART1 (IRQ_PBA8_GIC_START + 13) /* UART 1 on development chip */
-#define IRQ_PBA8_UART2 (IRQ_PBA8_GIC_START + 14) /* UART 2 on development chip */
-#define IRQ_PBA8_UART3 (IRQ_PBA8_GIC_START + 15) /* UART 3 on development chip */
-#define IRQ_PBA8_SCI (IRQ_PBA8_GIC_START + 16) /* Smart Card Interface */
-#define IRQ_PBA8_MMCI0A (IRQ_PBA8_GIC_START + 17) /* Multimedia Card 0A */
-#define IRQ_PBA8_MMCI0B (IRQ_PBA8_GIC_START + 18) /* Multimedia Card 0B */
-#define IRQ_PBA8_AACI (IRQ_PBA8_GIC_START + 19) /* Audio Codec */
-#define IRQ_PBA8_KMI0 (IRQ_PBA8_GIC_START + 20) /* Keyboard/Mouse port 0 */
-#define IRQ_PBA8_KMI1 (IRQ_PBA8_GIC_START + 21) /* Keyboard/Mouse port 1 */
-#define IRQ_PBA8_CHARLCD (IRQ_PBA8_GIC_START + 22) /* Character LCD */
-#define IRQ_PBA8_CLCD (IRQ_PBA8_GIC_START + 23) /* CLCD controller */
-#define IRQ_PBA8_DMAC (IRQ_PBA8_GIC_START + 24) /* DMA controller */
-#define IRQ_PBA8_PWRFAIL (IRQ_PBA8_GIC_START + 25) /* Power failure */
-#define IRQ_PBA8_PISMO (IRQ_PBA8_GIC_START + 26) /* PISMO interface */
-#define IRQ_PBA8_DoC (IRQ_PBA8_GIC_START + 27) /* Disk on Chip memory controller */
-#define IRQ_PBA8_ETH (IRQ_PBA8_GIC_START + 28) /* Ethernet controller */
-#define IRQ_PBA8_USB (IRQ_PBA8_GIC_START + 29) /* USB controller */
-#define IRQ_PBA8_TSPEN (IRQ_PBA8_GIC_START + 30) /* Touchscreen pen */
-#define IRQ_PBA8_TSKPAD (IRQ_PBA8_GIC_START + 31) /* Touchscreen keypad */
-
-#define IRQ_PBA8_PMU (IRQ_PBA8_GIC_START + 47) /* Cortex-A8 PMU */
-
-/* ... */
-#define IRQ_PBA8_PCI0 (IRQ_PBA8_GIC_START + 50)
-#define IRQ_PBA8_PCI1 (IRQ_PBA8_GIC_START + 51)
-#define IRQ_PBA8_PCI2 (IRQ_PBA8_GIC_START + 52)
-#define IRQ_PBA8_PCI3 (IRQ_PBA8_GIC_START + 53)
-
-#define IRQ_PBA8_SMC -1
-#define IRQ_PBA8_SCTL -1
-
-#endif /* __MACH_IRQS_PBA8_H */
diff --git a/arch/arm/mach-realview/irqs-pbx.h b/arch/arm/mach-realview/irqs-pbx.h
deleted file mode 100644
index 4ef0567dec32..000000000000
--- a/arch/arm/mach-realview/irqs-pbx.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2009 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __MACH_IRQS_PBX_H
-#define __MACH_IRQS_PBX_H
-
-#define IRQ_LOCALTIMER 29
-#define IRQ_PBX_GIC_START 32
-
-/*
- * PBX on-board gic irq sources
- */
-#define IRQ_PBX_WATCHDOG (IRQ_PBX_GIC_START + 0) /* Watchdog timer */
-#define IRQ_PBX_SOFT (IRQ_PBX_GIC_START + 1) /* Software interrupt */
-#define IRQ_PBX_COMMRx (IRQ_PBX_GIC_START + 2) /* Debug Comm Rx interrupt */
-#define IRQ_PBX_COMMTx (IRQ_PBX_GIC_START + 3) /* Debug Comm Tx interrupt */
-#define IRQ_PBX_TIMER0_1 (IRQ_PBX_GIC_START + 4) /* Timer 0/1 (default timer) */
-#define IRQ_PBX_TIMER2_3 (IRQ_PBX_GIC_START + 5) /* Timer 2/3 */
-#define IRQ_PBX_GPIO0 (IRQ_PBX_GIC_START + 6) /* GPIO 0 */
-#define IRQ_PBX_GPIO1 (IRQ_PBX_GIC_START + 7) /* GPIO 1 */
-#define IRQ_PBX_GPIO2 (IRQ_PBX_GIC_START + 8) /* GPIO 2 */
- /* 9 reserved */
-#define IRQ_PBX_RTC (IRQ_PBX_GIC_START + 10) /* Real Time Clock */
-#define IRQ_PBX_SSP (IRQ_PBX_GIC_START + 11) /* Synchronous Serial Port */
-#define IRQ_PBX_UART0 (IRQ_PBX_GIC_START + 12) /* UART 0 on development chip */
-#define IRQ_PBX_UART1 (IRQ_PBX_GIC_START + 13) /* UART 1 on development chip */
-#define IRQ_PBX_UART2 (IRQ_PBX_GIC_START + 14) /* UART 2 on development chip */
-#define IRQ_PBX_UART3 (IRQ_PBX_GIC_START + 15) /* UART 3 on development chip */
-#define IRQ_PBX_SCI (IRQ_PBX_GIC_START + 16) /* Smart Card Interface */
-#define IRQ_PBX_MMCI0A (IRQ_PBX_GIC_START + 17) /* Multimedia Card 0A */
-#define IRQ_PBX_MMCI0B (IRQ_PBX_GIC_START + 18) /* Multimedia Card 0B */
-#define IRQ_PBX_AACI (IRQ_PBX_GIC_START + 19) /* Audio Codec */
-#define IRQ_PBX_KMI0 (IRQ_PBX_GIC_START + 20) /* Keyboard/Mouse port 0 */
-#define IRQ_PBX_KMI1 (IRQ_PBX_GIC_START + 21) /* Keyboard/Mouse port 1 */
-#define IRQ_PBX_CHARLCD (IRQ_PBX_GIC_START + 22) /* Character LCD */
-#define IRQ_PBX_CLCD (IRQ_PBX_GIC_START + 23) /* CLCD controller */
-#define IRQ_PBX_DMAC (IRQ_PBX_GIC_START + 24) /* DMA controller */
-#define IRQ_PBX_PWRFAIL (IRQ_PBX_GIC_START + 25) /* Power failure */
-#define IRQ_PBX_PISMO (IRQ_PBX_GIC_START + 26) /* PISMO interface */
-#define IRQ_PBX_DoC (IRQ_PBX_GIC_START + 27) /* Disk on Chip memory controller */
-#define IRQ_PBX_ETH (IRQ_PBX_GIC_START + 28) /* Ethernet controller */
-#define IRQ_PBX_USB (IRQ_PBX_GIC_START + 29) /* USB controller */
-#define IRQ_PBX_TSPEN (IRQ_PBX_GIC_START + 30) /* Touchscreen pen */
-#define IRQ_PBX_TSKPAD (IRQ_PBX_GIC_START + 31) /* Touchscreen keypad */
-
-#define IRQ_PBX_PMU_SCU0 (IRQ_PBX_GIC_START + 32) /* SCU PMU Interrupts (11mp) */
-#define IRQ_PBX_PMU_SCU1 (IRQ_PBX_GIC_START + 33)
-#define IRQ_PBX_PMU_SCU2 (IRQ_PBX_GIC_START + 34)
-#define IRQ_PBX_PMU_SCU3 (IRQ_PBX_GIC_START + 35)
-#define IRQ_PBX_PMU_SCU4 (IRQ_PBX_GIC_START + 36)
-#define IRQ_PBX_PMU_SCU5 (IRQ_PBX_GIC_START + 37)
-#define IRQ_PBX_PMU_SCU6 (IRQ_PBX_GIC_START + 38)
-#define IRQ_PBX_PMU_SCU7 (IRQ_PBX_GIC_START + 39)
-
-#define IRQ_PBX_WATCHDOG1 (IRQ_PBX_GIC_START + 40) /* Watchdog1 timer */
-#define IRQ_PBX_TIMER4_5 (IRQ_PBX_GIC_START + 41) /* Timer 0/1 (default timer) */
-#define IRQ_PBX_TIMER6_7 (IRQ_PBX_GIC_START + 42) /* Timer 2/3 */
-/* ... */
-#define IRQ_PBX_PMU_CPU0 (IRQ_PBX_GIC_START + 44) /* CPU PMU Interrupts */
-#define IRQ_PBX_PMU_CPU1 (IRQ_PBX_GIC_START + 45)
-#define IRQ_PBX_PMU_CPU2 (IRQ_PBX_GIC_START + 46)
-#define IRQ_PBX_PMU_CPU3 (IRQ_PBX_GIC_START + 47)
-
-/* ... */
-#define IRQ_PBX_PCI0 (IRQ_PBX_GIC_START + 50)
-#define IRQ_PBX_PCI1 (IRQ_PBX_GIC_START + 51)
-#define IRQ_PBX_PCI2 (IRQ_PBX_GIC_START + 52)
-#define IRQ_PBX_PCI3 (IRQ_PBX_GIC_START + 53)
-
-#define IRQ_PBX_SMC -1
-#define IRQ_PBX_SCTL -1
-
-#endif /* __MACH_IRQS_PBX_H */
diff --git a/arch/arm/mach-realview/platform.h b/arch/arm/mach-realview/platform.h
deleted file mode 100644
index 11121739d371..000000000000
--- a/arch/arm/mach-realview/platform.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright (c) ARM Limited 2003. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __ASM_ARCH_PLATFORM_H
-#define __ASM_ARCH_PLATFORM_H
-
-/*
- * Memory definitions
- */
-#define REALVIEW_BOOT_ROM_LO 0x30000000 /* DoC Base (64Mb)...*/
-#define REALVIEW_BOOT_ROM_HI 0x30000000
-#define REALVIEW_BOOT_ROM_BASE REALVIEW_BOOT_ROM_HI /* Normal position */
-#define REALVIEW_BOOT_ROM_SIZE SZ_64M
-
-#define REALVIEW_SSRAM_BASE /* REALVIEW_SSMC_BASE ? */
-#define REALVIEW_SSRAM_SIZE SZ_2M
-
-/*
- * SDRAM
- */
-#define REALVIEW_SDRAM_BASE 0x00000000
-
-/*
- * Logic expansion modules
- *
- */
-
-
-/* ------------------------------------------------------------------------
- * RealView Registers
- * ------------------------------------------------------------------------
- *
- */
-#define REALVIEW_SYS_ID_OFFSET 0x00
-#define REALVIEW_SYS_SW_OFFSET 0x04
-#define REALVIEW_SYS_LED_OFFSET 0x08
-#define REALVIEW_SYS_OSC0_OFFSET 0x0C
-
-#define REALVIEW_SYS_OSC1_OFFSET 0x10
-#define REALVIEW_SYS_OSC2_OFFSET 0x14
-#define REALVIEW_SYS_OSC3_OFFSET 0x18
-#define REALVIEW_SYS_OSC4_OFFSET 0x1C /* OSC1 for RealView/AB */
-
-#define REALVIEW_SYS_LOCK_OFFSET 0x20
-#define REALVIEW_SYS_100HZ_OFFSET 0x24
-#define REALVIEW_SYS_CFGDATA1_OFFSET 0x28
-#define REALVIEW_SYS_CFGDATA2_OFFSET 0x2C
-#define REALVIEW_SYS_FLAGS_OFFSET 0x30
-#define REALVIEW_SYS_FLAGSSET_OFFSET 0x30
-#define REALVIEW_SYS_FLAGSCLR_OFFSET 0x34
-#define REALVIEW_SYS_NVFLAGS_OFFSET 0x38
-#define REALVIEW_SYS_NVFLAGSSET_OFFSET 0x38
-#define REALVIEW_SYS_NVFLAGSCLR_OFFSET 0x3C
-#define REALVIEW_SYS_RESETCTL_OFFSET 0x40
-#define REALVIEW_SYS_PCICTL_OFFSET 0x44
-#define REALVIEW_SYS_MCI_OFFSET 0x48
-#define REALVIEW_SYS_FLASH_OFFSET 0x4C
-#define REALVIEW_SYS_CLCD_OFFSET 0x50
-#define REALVIEW_SYS_CLCDSER_OFFSET 0x54
-#define REALVIEW_SYS_BOOTCS_OFFSET 0x58
-#define REALVIEW_SYS_24MHz_OFFSET 0x5C
-#define REALVIEW_SYS_MISC_OFFSET 0x60
-#define REALVIEW_SYS_IOSEL_OFFSET 0x70
-#define REALVIEW_SYS_PROCID_OFFSET 0x84
-#define REALVIEW_SYS_TEST_OSC0_OFFSET 0xC0
-#define REALVIEW_SYS_TEST_OSC1_OFFSET 0xC4
-#define REALVIEW_SYS_TEST_OSC2_OFFSET 0xC8
-#define REALVIEW_SYS_TEST_OSC3_OFFSET 0xCC
-#define REALVIEW_SYS_TEST_OSC4_OFFSET 0xD0
-
-#define REALVIEW_SYS_BASE 0x10000000
-#define REALVIEW_SYS_ID (REALVIEW_SYS_BASE + REALVIEW_SYS_ID_OFFSET)
-#define REALVIEW_SYS_SW (REALVIEW_SYS_BASE + REALVIEW_SYS_SW_OFFSET)
-#define REALVIEW_SYS_LED (REALVIEW_SYS_BASE + REALVIEW_SYS_LED_OFFSET)
-#define REALVIEW_SYS_OSC0 (REALVIEW_SYS_BASE + REALVIEW_SYS_OSC0_OFFSET)
-#define REALVIEW_SYS_OSC1 (REALVIEW_SYS_BASE + REALVIEW_SYS_OSC1_OFFSET)
-
-#define REALVIEW_SYS_LOCK (REALVIEW_SYS_BASE + REALVIEW_SYS_LOCK_OFFSET)
-#define REALVIEW_SYS_100HZ (REALVIEW_SYS_BASE + REALVIEW_SYS_100HZ_OFFSET)
-#define REALVIEW_SYS_CFGDATA1 (REALVIEW_SYS_BASE + REALVIEW_SYS_CFGDATA1_OFFSET)
-#define REALVIEW_SYS_CFGDATA2 (REALVIEW_SYS_BASE + REALVIEW_SYS_CFGDATA2_OFFSET)
-#define REALVIEW_SYS_FLAGS (REALVIEW_SYS_BASE + REALVIEW_SYS_FLAGS_OFFSET)
-#define REALVIEW_SYS_FLAGSSET (REALVIEW_SYS_BASE + REALVIEW_SYS_FLAGSSET_OFFSET)
-#define REALVIEW_SYS_FLAGSCLR (REALVIEW_SYS_BASE + REALVIEW_SYS_FLAGSCLR_OFFSET)
-#define REALVIEW_SYS_NVFLAGS (REALVIEW_SYS_BASE + REALVIEW_SYS_NVFLAGS_OFFSET)
-#define REALVIEW_SYS_NVFLAGSSET (REALVIEW_SYS_BASE + REALVIEW_SYS_NVFLAGSSET_OFFSET)
-#define REALVIEW_SYS_NVFLAGSCLR (REALVIEW_SYS_BASE + REALVIEW_SYS_NVFLAGSCLR_OFFSET)
-#define REALVIEW_SYS_RESETCTL (REALVIEW_SYS_BASE + REALVIEW_SYS_RESETCTL_OFFSET)
-#define REALVIEW_SYS_PCICTL (REALVIEW_SYS_BASE + REALVIEW_SYS_PCICTL_OFFSET)
-#define REALVIEW_SYS_MCI (REALVIEW_SYS_BASE + REALVIEW_SYS_MCI_OFFSET)
-#define REALVIEW_SYS_FLASH (REALVIEW_SYS_BASE + REALVIEW_SYS_FLASH_OFFSET)
-#define REALVIEW_SYS_CLCD (REALVIEW_SYS_BASE + REALVIEW_SYS_CLCD_OFFSET)
-#define REALVIEW_SYS_CLCDSER (REALVIEW_SYS_BASE + REALVIEW_SYS_CLCDSER_OFFSET)
-#define REALVIEW_SYS_BOOTCS (REALVIEW_SYS_BASE + REALVIEW_SYS_BOOTCS_OFFSET)
-#define REALVIEW_SYS_24MHz (REALVIEW_SYS_BASE + REALVIEW_SYS_24MHz_OFFSET)
-#define REALVIEW_SYS_MISC (REALVIEW_SYS_BASE + REALVIEW_SYS_MISC_OFFSET)
-#define REALVIEW_SYS_IOSEL (REALVIEW_SYS_BASE + REALVIEW_SYS_IOSEL_OFFSET)
-#define REALVIEW_SYS_PROCID (REALVIEW_SYS_BASE + REALVIEW_SYS_PROCID_OFFSET)
-#define REALVIEW_SYS_TEST_OSC0 (REALVIEW_SYS_BASE + REALVIEW_SYS_TEST_OSC0_OFFSET)
-#define REALVIEW_SYS_TEST_OSC1 (REALVIEW_SYS_BASE + REALVIEW_SYS_TEST_OSC1_OFFSET)
-#define REALVIEW_SYS_TEST_OSC2 (REALVIEW_SYS_BASE + REALVIEW_SYS_TEST_OSC2_OFFSET)
-#define REALVIEW_SYS_TEST_OSC3 (REALVIEW_SYS_BASE + REALVIEW_SYS_TEST_OSC3_OFFSET)
-#define REALVIEW_SYS_TEST_OSC4 (REALVIEW_SYS_BASE + REALVIEW_SYS_TEST_OSC4_OFFSET)
-
-/* ------------------------------------------------------------------------
- * RealView control registers
- * ------------------------------------------------------------------------
- */
-
-/*
- * REALVIEW_IDFIELD
- *
- * 31:24 = manufacturer (0x41 = ARM)
- * 23:16 = architecture (0x08 = AHB system bus, ASB processor bus)
- * 15:12 = FPGA (0x3 = XVC600 or XVC600E)
- * 11:4 = build value
- * 3:0 = revision number (0x1 = rev B (AHB))
- */
-
-/*
- * REALVIEW_SYS_LOCK
- * control access to SYS_OSCx, SYS_CFGDATAx, SYS_RESETCTL,
- * SYS_CLD, SYS_BOOTCS
- */
-#define REALVIEW_SYS_LOCK_LOCKED (1 << 16)
-#define REALVIEW_SYS_LOCK_VAL 0xA05F /* Enable write access */
-
-/*
- * REALVIEW_SYS_FLASH
- */
-#define REALVIEW_FLASHPROG_FLVPPEN (1 << 0) /* Enable writing to flash */
-
-/*
- * REALVIEW_INTREG
- * - used to acknowledge and control MMCI and UART interrupts
- */
-#define REALVIEW_INTREG_WPROT 0x00 /* MMC protection status (no interrupt generated) */
-#define REALVIEW_INTREG_RI0 0x01 /* Ring indicator UART0 is asserted, */
-#define REALVIEW_INTREG_CARDIN 0x08 /* MMCI card in detect */
- /* write 1 to acknowledge and clear */
-#define REALVIEW_INTREG_RI1 0x02 /* Ring indicator UART1 is asserted, */
-#define REALVIEW_INTREG_CARDINSERT 0x03 /* Signal insertion of MMC card */
-
-/*
- * RealView common peripheral addresses
- */
-#define REALVIEW_SCTL_BASE 0x10001000 /* System controller */
-#define REALVIEW_I2C_BASE 0x10002000 /* I2C control */
-#define REALVIEW_AACI_BASE 0x10004000 /* Audio */
-#define REALVIEW_MMCI0_BASE 0x10005000 /* MMC interface */
-#define REALVIEW_KMI0_BASE 0x10006000 /* KMI interface */
-#define REALVIEW_KMI1_BASE 0x10007000 /* KMI 2nd interface */
-#define REALVIEW_CHAR_LCD_BASE 0x10008000 /* Character LCD */
-#define REALVIEW_SCI_BASE 0x1000E000 /* Smart card controller */
-#define REALVIEW_GPIO1_BASE 0x10014000 /* GPIO port 1 */
-#define REALVIEW_GPIO2_BASE 0x10015000 /* GPIO port 2 */
-#define REALVIEW_DMC_BASE 0x10018000 /* DMC configuration */
-#define REALVIEW_DMAC_BASE 0x10030000 /* DMA controller */
-
-/* PCI space */
-#define REALVIEW_PCI_BASE 0x41000000 /* PCI Interface */
-#define REALVIEW_PCI_CFG_BASE 0x42000000
-#define REALVIEW_PCI_MEM_BASE0 0x44000000
-#define REALVIEW_PCI_MEM_BASE1 0x50000000
-#define REALVIEW_PCI_MEM_BASE2 0x60000000
-/* Sizes of above maps */
-#define REALVIEW_PCI_BASE_SIZE 0x01000000
-#define REALVIEW_PCI_CFG_BASE_SIZE 0x02000000
-#define REALVIEW_PCI_MEM_BASE0_SIZE 0x0c000000 /* 32Mb */
-#define REALVIEW_PCI_MEM_BASE1_SIZE 0x10000000 /* 256Mb */
-#define REALVIEW_PCI_MEM_BASE2_SIZE 0x10000000 /* 256Mb */
-
-#define REALVIEW_SDRAM67_BASE 0x70000000 /* SDRAM banks 6 and 7 */
-#define REALVIEW_LT_BASE 0x80000000 /* Logic Tile expansion */
-
-/*
- * CompactFlash
- */
-#define REALVIEW_CF_BASE 0x18000000 /* CompactFlash */
-#define REALVIEW_CF_MEM_BASE 0x18003000 /* SMC for CompactFlash */
-
-/*
- * Disk on Chip
- */
-#define REALVIEW_DOC_BASE 0x2C000000
-#define REALVIEW_DOC_SIZE (16 << 20)
-#define REALVIEW_DOC_PAGE_SIZE 512
-#define REALVIEW_DOC_TOTAL_PAGES (DOC_SIZE / PAGE_SIZE)
-
-#define ERASE_UNIT_PAGES 32
-#define START_PAGE 0x80
-
-/*
- * LED settings, bits [7:0]
- */
-#define REALVIEW_SYS_LED0 (1 << 0)
-#define REALVIEW_SYS_LED1 (1 << 1)
-#define REALVIEW_SYS_LED2 (1 << 2)
-#define REALVIEW_SYS_LED3 (1 << 3)
-#define REALVIEW_SYS_LED4 (1 << 4)
-#define REALVIEW_SYS_LED5 (1 << 5)
-#define REALVIEW_SYS_LED6 (1 << 6)
-#define REALVIEW_SYS_LED7 (1 << 7)
-
-#define ALL_LEDS 0xFF
-
-#define LED_BANK REALVIEW_SYS_LED
-
-/*
- * Control registers
- */
-#define REALVIEW_IDFIELD_OFFSET 0x0 /* RealView build information */
-#define REALVIEW_FLASHPROG_OFFSET 0x4 /* Flash devices */
-#define REALVIEW_INTREG_OFFSET 0x8 /* Interrupt control */
-#define REALVIEW_DECODE_OFFSET 0xC /* Fitted logic modules */
-
-/*
- * System controller bit assignment
- */
-#define REALVIEW_REFCLK 0
-#define REALVIEW_TIMCLK 1
-
-#define REALVIEW_TIMER1_EnSel 15
-#define REALVIEW_TIMER2_EnSel 17
-#define REALVIEW_TIMER3_EnSel 19
-#define REALVIEW_TIMER4_EnSel 21
-
-
-#define REALVIEW_CSR_BASE 0x10000000
-#define REALVIEW_CSR_SIZE 0x10000000
-
-#endif /* __ASM_ARCH_PLATFORM_H */
diff --git a/arch/arm/mach-realview/platsmp-dt.c b/arch/arm/mach-realview/platsmp-dt.c
index 6964e8876061..70ca99eb52c6 100644
--- a/arch/arm/mach-realview/platsmp-dt.c
+++ b/arch/arm/mach-realview/platsmp-dt.c
@@ -17,8 +17,7 @@
#include <asm/smp_scu.h>
#include <plat/platsmp.h>
-
-#include "core.h"
+#include "hotplug.h"
#define REALVIEW_SYS_FLAGSSET_OFFSET 0x30
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
deleted file mode 100644
index e8ab69c7abfb..000000000000
--- a/arch/arm/mach-realview/platsmp.c
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/platsmp.c
- *
- * Copyright (C) 2002 ARM Ltd.
- * All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/smp.h>
-#include <linux/io.h>
-
-#include "hardware.h"
-#include <asm/mach-types.h>
-#include <asm/smp_scu.h>
-
-#include "board-eb.h"
-#include "board-pb11mp.h"
-#include "board-pbx.h"
-
-#include <plat/platsmp.h>
-
-#include "core.h"
-
-static void __iomem *scu_base_addr(void)
-{
- if (machine_is_realview_eb_mp())
- return __io_address(REALVIEW_EB11MP_SCU_BASE);
- else if (machine_is_realview_pb11mp())
- return __io_address(REALVIEW_TC11MP_SCU_BASE);
- else if (machine_is_realview_pbx() &&
- (core_tile_pbx11mp() || core_tile_pbxa9mp()))
- return __io_address(REALVIEW_PBX_TILE_SCU_BASE);
- else
- return (void __iomem *)0;
-}
-
-/*
- * Initialise the CPU possible map early - this describes the CPUs
- * which may be present or become present in the system.
- */
-static void __init realview_smp_init_cpus(void)
-{
- void __iomem *scu_base = scu_base_addr();
- unsigned int i, ncores;
-
- ncores = scu_base ? scu_get_core_count(scu_base) : 1;
-
- /* sanity check */
- if (ncores > nr_cpu_ids) {
- pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
- ncores, nr_cpu_ids);
- ncores = nr_cpu_ids;
- }
-
- for (i = 0; i < ncores; i++)
- set_cpu_possible(i, true);
-}
-
-static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
-{
-
- scu_enable(scu_base_addr());
-
- /*
- * Write the address of secondary startup into the
- * system-wide flags register. The BootMonitor waits
- * until it receives a soft interrupt, and then the
- * secondary CPU branches to this address.
- */
- __raw_writel(virt_to_phys(versatile_secondary_startup),
- __io_address(REALVIEW_SYS_FLAGSSET));
-}
-
-const struct smp_operations realview_smp_ops __initconst = {
- .smp_init_cpus = realview_smp_init_cpus,
- .smp_prepare_cpus = realview_smp_prepare_cpus,
- .smp_secondary_init = versatile_secondary_init,
- .smp_boot_secondary = versatile_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_die = realview_cpu_die,
-#endif
-};
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
deleted file mode 100644
index b442fa61e943..000000000000
--- a/arch/arm/mach-realview/realview_eb.c
+++ /dev/null
@@ -1,492 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/realview_eb.c
- *
- * Copyright (C) 2004 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/pl061.h>
-#include <linux/amba/mmci.h>
-#include <linux/amba/pl022.h>
-#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/platform_data/clk-realview.h>
-#include <linux/reboot.h>
-
-#include "hardware.h"
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-#include <asm/pgtable.h>
-#include <asm/hardware/cache-l2x0.h>
-#include <asm/smp_twd.h>
-#include <asm/system_info.h>
-#include <asm/outercache.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/time.h>
-
-#include "board-eb.h"
-#include "irqs-eb.h"
-
-#include "core.h"
-
-static struct map_desc realview_eb_io_desc[] __initdata = {
- {
- .virtual = IO_ADDRESS(REALVIEW_SYS_BASE),
- .pfn = __phys_to_pfn(REALVIEW_SYS_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_EB_GIC_CPU_BASE),
- .pfn = __phys_to_pfn(REALVIEW_EB_GIC_CPU_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_EB_GIC_DIST_BASE),
- .pfn = __phys_to_pfn(REALVIEW_EB_GIC_DIST_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_SCTL_BASE),
- .pfn = __phys_to_pfn(REALVIEW_SCTL_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_EB_TIMER0_1_BASE),
- .pfn = __phys_to_pfn(REALVIEW_EB_TIMER0_1_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_EB_TIMER2_3_BASE),
- .pfn = __phys_to_pfn(REALVIEW_EB_TIMER2_3_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-#ifdef CONFIG_DEBUG_LL
- {
- .virtual = IO_ADDRESS(REALVIEW_EB_UART0_BASE),
- .pfn = __phys_to_pfn(REALVIEW_EB_UART0_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }
-#endif
-};
-
-static struct map_desc realview_eb11mp_io_desc[] __initdata = {
- {
- .virtual = IO_ADDRESS(REALVIEW_EB11MP_PRIV_MEM_BASE),
- .pfn = __phys_to_pfn(REALVIEW_EB11MP_PRIV_MEM_BASE),
- .length = REALVIEW_EB11MP_PRIV_MEM_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_EB11MP_L220_BASE),
- .pfn = __phys_to_pfn(REALVIEW_EB11MP_L220_BASE),
- .length = SZ_8K,
- .type = MT_DEVICE,
- }
-};
-
-static void __init realview_eb_map_io(void)
-{
- iotable_init(realview_eb_io_desc, ARRAY_SIZE(realview_eb_io_desc));
- if (core_tile_eb11mp() || core_tile_a9mp())
- iotable_init(realview_eb11mp_io_desc, ARRAY_SIZE(realview_eb11mp_io_desc));
-}
-
-static struct pl061_platform_data gpio0_plat_data = {
- .gpio_base = 0,
-};
-
-static struct pl061_platform_data gpio1_plat_data = {
- .gpio_base = 8,
-};
-
-static struct pl061_platform_data gpio2_plat_data = {
- .gpio_base = 16,
-};
-
-static struct pl022_ssp_controller ssp0_plat_data = {
- .bus_id = 0,
- .enable_dma = 0,
- .num_chipselect = 1,
-};
-
-/*
- * RealView EB AMBA devices
- */
-
-/*
- * These devices are connected via the core APB bridge
- */
-#define GPIO2_IRQ { IRQ_EB_GPIO2 }
-#define GPIO3_IRQ { IRQ_EB_GPIO3 }
-
-#define AACI_IRQ { IRQ_EB_AACI }
-#define MMCI0_IRQ { IRQ_EB_MMCI0A, IRQ_EB_MMCI0B }
-#define KMI0_IRQ { IRQ_EB_KMI0 }
-#define KMI1_IRQ { IRQ_EB_KMI1 }
-
-/*
- * These devices are connected directly to the multi-layer AHB switch
- */
-#define EB_SMC_IRQ { }
-#define MPMC_IRQ { }
-#define EB_CLCD_IRQ { IRQ_EB_CLCD }
-#define DMAC_IRQ { IRQ_EB_DMA }
-
-/*
- * These devices are connected via the core APB bridge
- */
-#define SCTL_IRQ { }
-#define EB_WATCHDOG_IRQ { IRQ_EB_WDOG }
-#define EB_GPIO0_IRQ { IRQ_EB_GPIO0 }
-#define GPIO1_IRQ { IRQ_EB_GPIO1 }
-#define EB_RTC_IRQ { IRQ_EB_RTC }
-
-/*
- * These devices are connected via the DMA APB bridge
- */
-#define SCI_IRQ { IRQ_EB_SCI }
-#define EB_UART0_IRQ { IRQ_EB_UART0 }
-#define EB_UART1_IRQ { IRQ_EB_UART1 }
-#define EB_UART2_IRQ { IRQ_EB_UART2 }
-#define EB_UART3_IRQ { IRQ_EB_UART3 }
-#define EB_SSP_IRQ { IRQ_EB_SSP }
-
-/* FPGA Primecells */
-APB_DEVICE(aaci, "fpga:aaci", AACI, NULL);
-APB_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data);
-APB_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL);
-APB_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL);
-APB_DEVICE(uart3, "fpga:uart3", EB_UART3, NULL);
-
-/* DevChip Primecells */
-AHB_DEVICE(smc, "dev:smc", EB_SMC, NULL);
-AHB_DEVICE(clcd, "dev:clcd", EB_CLCD, &clcd_plat_data);
-AHB_DEVICE(dmac, "dev:dmac", DMAC, NULL);
-AHB_DEVICE(sctl, "dev:sctl", SCTL, NULL);
-APB_DEVICE(wdog, "dev:wdog", EB_WATCHDOG, NULL);
-APB_DEVICE(gpio0, "dev:gpio0", EB_GPIO0, &gpio0_plat_data);
-APB_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data);
-APB_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data);
-APB_DEVICE(rtc, "dev:rtc", EB_RTC, NULL);
-APB_DEVICE(sci0, "dev:sci0", SCI, NULL);
-APB_DEVICE(uart0, "dev:uart0", EB_UART0, NULL);
-APB_DEVICE(uart1, "dev:uart1", EB_UART1, NULL);
-APB_DEVICE(uart2, "dev:uart2", EB_UART2, NULL);
-APB_DEVICE(ssp0, "dev:ssp0", EB_SSP, &ssp0_plat_data);
-
-static struct amba_device *amba_devs[] __initdata = {
- &dmac_device,
- &uart0_device,
- &uart1_device,
- &uart2_device,
- &uart3_device,
- &smc_device,
- &clcd_device,
- &sctl_device,
- &wdog_device,
- &gpio0_device,
- &gpio1_device,
- &gpio2_device,
- &rtc_device,
- &sci0_device,
- &ssp0_device,
- &aaci_device,
- &mmc0_device,
- &kmi0_device,
- &kmi1_device,
-};
-
-/*
- * RealView EB platform devices
- */
-static struct resource realview_eb_flash_resource = {
- .start = REALVIEW_EB_FLASH_BASE,
- .end = REALVIEW_EB_FLASH_BASE + REALVIEW_EB_FLASH_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct resource realview_eb_eth_resources[] = {
- [0] = {
- .start = REALVIEW_EB_ETH_BASE,
- .end = REALVIEW_EB_ETH_BASE + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_EB_ETH,
- .end = IRQ_EB_ETH,
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
- },
-};
-
-/*
- * Detect and register the correct Ethernet device. RealView/EB rev D
- * platforms use the newer SMSC LAN9118 Ethernet chip
- */
-static int eth_device_register(void)
-{
- void __iomem *eth_addr = ioremap(REALVIEW_EB_ETH_BASE, SZ_4K);
- const char *name = NULL;
- u32 idrev;
-
- if (!eth_addr)
- return -ENOMEM;
-
- idrev = readl(eth_addr + 0x50);
- if ((idrev & 0xFFFF0000) != 0x01180000)
- /* SMSC LAN9118 not present, use LAN91C111 instead */
- name = "smc91x";
-
- iounmap(eth_addr);
- return realview_eth_register(name, realview_eb_eth_resources);
-}
-
-static struct resource realview_eb_isp1761_resources[] = {
- [0] = {
- .start = REALVIEW_EB_USB_BASE,
- .end = REALVIEW_EB_USB_BASE + SZ_128K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_EB_USB,
- .end = IRQ_EB_USB,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource pmu_resources[] = {
- [0] = {
- .start = IRQ_EB11MP_PMU_CPU0,
- .end = IRQ_EB11MP_PMU_CPU0,
- .flags = IORESOURCE_IRQ,
- },
- [1] = {
- .start = IRQ_EB11MP_PMU_CPU1,
- .end = IRQ_EB11MP_PMU_CPU1,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_EB11MP_PMU_CPU2,
- .end = IRQ_EB11MP_PMU_CPU2,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = IRQ_EB11MP_PMU_CPU3,
- .end = IRQ_EB11MP_PMU_CPU3,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device pmu_device = {
- .id = -1,
- .num_resources = ARRAY_SIZE(pmu_resources),
- .resource = pmu_resources,
-};
-
-static struct resource char_lcd_resources[] = {
- {
- .start = REALVIEW_CHAR_LCD_BASE,
- .end = (REALVIEW_CHAR_LCD_BASE + SZ_4K - 1),
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_EB_CHARLCD,
- .end = IRQ_EB_CHARLCD,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device char_lcd_device = {
- .name = "arm-charlcd",
- .id = -1,
- .num_resources = ARRAY_SIZE(char_lcd_resources),
- .resource = char_lcd_resources,
-};
-
-static void __init gic_init_irq(void)
-{
- if (core_tile_eb11mp() || core_tile_a9mp()) {
- unsigned int pldctrl;
-
- /* new irq mode */
- writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK));
- pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + REALVIEW_EB11MP_SYS_PLD_CTRL1);
- pldctrl |= 0x00800000;
- writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + REALVIEW_EB11MP_SYS_PLD_CTRL1);
- writel(0x00000000, __io_address(REALVIEW_SYS_LOCK));
-
- /* core tile GIC, primary */
- gic_init(0, 29, __io_address(REALVIEW_EB11MP_GIC_DIST_BASE),
- __io_address(REALVIEW_EB11MP_GIC_CPU_BASE));
-
-#ifndef CONFIG_REALVIEW_EB_ARM11MP_REVB
- /* board GIC, secondary */
- gic_init(1, 96, __io_address(REALVIEW_EB_GIC_DIST_BASE),
- __io_address(REALVIEW_EB_GIC_CPU_BASE));
- gic_cascade_irq(1, IRQ_EB11MP_EB_IRQ1);
-#endif
- } else {
- /* board GIC, primary */
- gic_init(0, 29, __io_address(REALVIEW_EB_GIC_DIST_BASE),
- __io_address(REALVIEW_EB_GIC_CPU_BASE));
- }
-}
-
-/*
- * Fix up the IRQ numbers for the RealView EB/ARM11MPCore tile
- */
-static void realview_eb11mp_fixup(void)
-{
- /* AMBA devices */
- dmac_device.irq[0] = IRQ_EB11MP_DMA;
- uart0_device.irq[0] = IRQ_EB11MP_UART0;
- uart1_device.irq[0] = IRQ_EB11MP_UART1;
- uart2_device.irq[0] = IRQ_EB11MP_UART2;
- uart3_device.irq[0] = IRQ_EB11MP_UART3;
- clcd_device.irq[0] = IRQ_EB11MP_CLCD;
- wdog_device.irq[0] = IRQ_EB11MP_WDOG;
- gpio0_device.irq[0] = IRQ_EB11MP_GPIO0;
- gpio1_device.irq[0] = IRQ_EB11MP_GPIO1;
- gpio2_device.irq[0] = IRQ_EB11MP_GPIO2;
- rtc_device.irq[0] = IRQ_EB11MP_RTC;
- sci0_device.irq[0] = IRQ_EB11MP_SCI;
- ssp0_device.irq[0] = IRQ_EB11MP_SSP;
- aaci_device.irq[0] = IRQ_EB11MP_AACI;
- mmc0_device.irq[0] = IRQ_EB11MP_MMCI0A;
- mmc0_device.irq[1] = IRQ_EB11MP_MMCI0B;
- kmi0_device.irq[0] = IRQ_EB11MP_KMI0;
- kmi1_device.irq[0] = IRQ_EB11MP_KMI1;
-
- /* platform devices */
- realview_eb_eth_resources[1].start = IRQ_EB11MP_ETH;
- realview_eb_eth_resources[1].end = IRQ_EB11MP_ETH;
- realview_eb_isp1761_resources[1].start = IRQ_EB11MP_USB;
- realview_eb_isp1761_resources[1].end = IRQ_EB11MP_USB;
-}
-
-#ifdef CONFIG_HAVE_ARM_TWD
-static DEFINE_TWD_LOCAL_TIMER(twd_local_timer,
- REALVIEW_EB11MP_TWD_BASE,
- IRQ_LOCALTIMER);
-
-static void __init realview_eb_twd_init(void)
-{
- if (core_tile_eb11mp() || core_tile_a9mp()) {
- int err = twd_local_timer_register(&twd_local_timer);
- if (err)
- pr_err("twd_local_timer_register failed %d\n", err);
- }
-}
-#else
-#define realview_eb_twd_init() do { } while(0)
-#endif
-
-static void __init realview_eb_timer_init(void)
-{
- unsigned int timer_irq;
-
- timer0_va_base = __io_address(REALVIEW_EB_TIMER0_1_BASE);
- timer1_va_base = __io_address(REALVIEW_EB_TIMER0_1_BASE) + 0x20;
- timer2_va_base = __io_address(REALVIEW_EB_TIMER2_3_BASE);
- timer3_va_base = __io_address(REALVIEW_EB_TIMER2_3_BASE) + 0x20;
-
- if (core_tile_eb11mp() || core_tile_a9mp())
- timer_irq = IRQ_EB11MP_TIMER0_1;
- else
- timer_irq = IRQ_EB_TIMER0_1;
-
- realview_clk_init(__io_address(REALVIEW_SYS_BASE), false);
- realview_timer_init(timer_irq);
- realview_eb_twd_init();
-}
-
-static void realview_eb_restart(enum reboot_mode mode, const char *cmd)
-{
- void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
- void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
-
- /*
- * To reset, we hit the on-board reset register
- * in the system FPGA
- */
- __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
- if (core_tile_eb11mp())
- __raw_writel(0x0008, reset_ctrl);
- dsb();
-}
-
-static void __init realview_eb_init(void)
-{
- int i;
-
- if (core_tile_eb11mp() || core_tile_a9mp()) {
- realview_eb11mp_fixup();
-
-#ifdef CONFIG_CACHE_L2X0
- /*
- * The PL220 needs to be manually configured as the hardware
- * doesn't report the correct sizes.
- * 1MB (128KB/way), 8-way associativity, event monitor and
- * parity enabled, ignore share bit, no force write allocate
- * Bits: .... ...0 0111 1001 0000 .... .... ....
- */
- l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff);
-
- /*
- * due to a bug in the l220 cache controller, we must not call
- * the sync function. stub it out here instead!
- */
- outer_cache.sync = NULL;
-#endif
- pmu_device.name = core_tile_a9mp() ? "armv7-pmu" : "armv6-pmu";
- platform_device_register(&pmu_device);
- }
-
- realview_flash_register(&realview_eb_flash_resource, 1);
- platform_device_register(&realview_i2c_device);
- platform_device_register(&char_lcd_device);
- platform_device_register(&realview_leds_device);
- eth_device_register();
- realview_usb_register(realview_eb_isp1761_resources);
-
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
- struct amba_device *d = amba_devs[i];
- amba_device_register(d, &iomem_resource);
- }
-}
-
-MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
- /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .atag_offset = 0x100,
- .smp = smp_ops(realview_smp_ops),
- .fixup = realview_fixup,
- .map_io = realview_eb_map_io,
- .init_early = realview_init_early,
- .init_irq = gic_init_irq,
- .init_time = realview_eb_timer_init,
- .init_machine = realview_eb_init,
-#ifdef CONFIG_ZONE_DMA
- .dma_zone_size = SZ_256M,
-#endif
- .restart = realview_eb_restart,
-MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
deleted file mode 100644
index 537f3878d501..000000000000
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/realview_pb1176.c
- *
- * Copyright (C) 2008 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/pl061.h>
-#include <linux/amba/mmci.h>
-#include <linux/amba/pl022.h>
-#include <linux/mtd/physmap.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/platform_data/clk-realview.h>
-#include <linux/reboot.h>
-#include <linux/memblock.h>
-
-#include "hardware.h"
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-#include <asm/pgtable.h>
-#include <asm/hardware/cache-l2x0.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/flash.h>
-#include <asm/mach/map.h>
-#include <asm/mach/time.h>
-
-#include "board-pb1176.h"
-#include "irqs-pb1176.h"
-
-#include "core.h"
-
-static struct map_desc realview_pb1176_io_desc[] __initdata = {
- {
- .virtual = IO_ADDRESS(REALVIEW_SYS_BASE),
- .pfn = __phys_to_pfn(REALVIEW_SYS_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PB1176_GIC_CPU_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PB1176_GIC_CPU_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PB1176_GIC_DIST_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PB1176_GIC_DIST_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_DC1176_GIC_CPU_BASE),
- .pfn = __phys_to_pfn(REALVIEW_DC1176_GIC_CPU_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_DC1176_GIC_DIST_BASE),
- .pfn = __phys_to_pfn(REALVIEW_DC1176_GIC_DIST_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_SCTL_BASE),
- .pfn = __phys_to_pfn(REALVIEW_SCTL_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PB1176_TIMER0_1_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PB1176_TIMER0_1_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PB1176_TIMER2_3_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PB1176_TIMER2_3_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PB1176_L220_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PB1176_L220_BASE),
- .length = SZ_8K,
- .type = MT_DEVICE,
- },
-#ifdef CONFIG_DEBUG_LL
- {
- .virtual = IO_ADDRESS(REALVIEW_PB1176_UART0_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PB1176_UART0_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-#endif
-};
-
-static void __init realview_pb1176_map_io(void)
-{
- iotable_init(realview_pb1176_io_desc, ARRAY_SIZE(realview_pb1176_io_desc));
-}
-
-static struct pl061_platform_data gpio0_plat_data = {
- .gpio_base = 0,
-};
-
-static struct pl061_platform_data gpio1_plat_data = {
- .gpio_base = 8,
-};
-
-static struct pl061_platform_data gpio2_plat_data = {
- .gpio_base = 16,
-};
-
-static struct pl022_ssp_controller ssp0_plat_data = {
- .bus_id = 0,
- .enable_dma = 0,
- .num_chipselect = 1,
-};
-
-/*
- * RealView PB1176 AMBA devices
- */
-#define GPIO2_IRQ { IRQ_PB1176_GPIO2 }
-#define GPIO3_IRQ { IRQ_PB1176_GPIO3 }
-#define AACI_IRQ { IRQ_PB1176_AACI }
-#define MMCI0_IRQ { IRQ_PB1176_MMCI0A, IRQ_PB1176_MMCI0B }
-#define KMI0_IRQ { IRQ_PB1176_KMI0 }
-#define KMI1_IRQ { IRQ_PB1176_KMI1 }
-#define PB1176_SMC_IRQ { }
-#define MPMC_IRQ { }
-#define PB1176_CLCD_IRQ { IRQ_DC1176_CLCD }
-#define SCTL_IRQ { }
-#define PB1176_WATCHDOG_IRQ { IRQ_DC1176_WATCHDOG }
-#define PB1176_GPIO0_IRQ { IRQ_DC1176_GPIO0 }
-#define GPIO1_IRQ { IRQ_PB1176_GPIO1 }
-#define PB1176_RTC_IRQ { IRQ_DC1176_RTC }
-#define SCI_IRQ { IRQ_PB1176_SCI }
-#define PB1176_UART0_IRQ { IRQ_DC1176_UART0 }
-#define PB1176_UART1_IRQ { IRQ_DC1176_UART1 }
-#define PB1176_UART2_IRQ { IRQ_DC1176_UART2 }
-#define PB1176_UART3_IRQ { IRQ_DC1176_UART3 }
-#define PB1176_UART4_IRQ { IRQ_PB1176_UART4 }
-#define PB1176_SSP_IRQ { IRQ_DC1176_SSP }
-
-/* FPGA Primecells */
-APB_DEVICE(aaci, "fpga:aaci", AACI, NULL);
-APB_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data);
-APB_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL);
-APB_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL);
-APB_DEVICE(uart4, "fpga:uart4", PB1176_UART4, NULL);
-
-/* DevChip Primecells */
-AHB_DEVICE(smc, "dev:smc", PB1176_SMC, NULL);
-AHB_DEVICE(sctl, "dev:sctl", SCTL, NULL);
-APB_DEVICE(wdog, "dev:wdog", PB1176_WATCHDOG, NULL);
-APB_DEVICE(gpio0, "dev:gpio0", PB1176_GPIO0, &gpio0_plat_data);
-APB_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data);
-APB_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data);
-APB_DEVICE(rtc, "dev:rtc", PB1176_RTC, NULL);
-APB_DEVICE(sci0, "dev:sci0", SCI, NULL);
-APB_DEVICE(uart0, "dev:uart0", PB1176_UART0, NULL);
-APB_DEVICE(uart1, "dev:uart1", PB1176_UART1, NULL);
-APB_DEVICE(uart2, "dev:uart2", PB1176_UART2, NULL);
-APB_DEVICE(uart3, "dev:uart3", PB1176_UART3, NULL);
-APB_DEVICE(ssp0, "dev:ssp0", PB1176_SSP, &ssp0_plat_data);
-AHB_DEVICE(clcd, "dev:clcd", PB1176_CLCD, &clcd_plat_data);
-
-static struct amba_device *amba_devs[] __initdata = {
- &uart0_device,
- &uart1_device,
- &uart2_device,
- &uart3_device,
- &uart4_device,
- &smc_device,
- &clcd_device,
- &sctl_device,
- &wdog_device,
- &gpio0_device,
- &gpio1_device,
- &gpio2_device,
- &rtc_device,
- &sci0_device,
- &ssp0_device,
- &aaci_device,
- &mmc0_device,
- &kmi0_device,
- &kmi1_device,
-};
-
-/*
- * RealView PB1176 platform devices
- */
-static struct resource realview_pb1176_flash_resources[] = {
- {
- .start = REALVIEW_PB1176_FLASH_BASE,
- .end = REALVIEW_PB1176_FLASH_BASE + REALVIEW_PB1176_FLASH_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
-#ifdef CONFIG_REALVIEW_PB1176_SECURE_FLASH
- {
- .start = REALVIEW_PB1176_SEC_FLASH_BASE,
- .end = REALVIEW_PB1176_SEC_FLASH_BASE + REALVIEW_PB1176_SEC_FLASH_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
-#endif
-};
-
-static struct physmap_flash_data pb1176_rom_pdata = {
- .probe_type = "map_rom",
- .width = 4,
- .nr_parts = 0,
-};
-
-static struct resource pb1176_rom_resources[] = {
- /*
- * This exposes the PB1176 DevChip ROM as an MTD ROM mapping.
- * The reference manual states that this is actually a pseudo-ROM
- * programmed in NVRAM.
- */
- {
- .start = REALVIEW_DC1176_ROM_BASE,
- .end = REALVIEW_DC1176_ROM_BASE + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- }
-};
-
-static struct platform_device pb1176_rom_device = {
- .name = "physmap-flash",
- .id = -1,
- .num_resources = ARRAY_SIZE(pb1176_rom_resources),
- .resource = pb1176_rom_resources,
- .dev = {
- .platform_data = &pb1176_rom_pdata,
- },
-};
-
-static struct resource realview_pb1176_smsc911x_resources[] = {
- [0] = {
- .start = REALVIEW_PB1176_ETH_BASE,
- .end = REALVIEW_PB1176_ETH_BASE + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_PB1176_ETH,
- .end = IRQ_PB1176_ETH,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource realview_pb1176_isp1761_resources[] = {
- [0] = {
- .start = REALVIEW_PB1176_USB_BASE,
- .end = REALVIEW_PB1176_USB_BASE + SZ_128K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_PB1176_USB,
- .end = IRQ_PB1176_USB,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource pmu_resource = {
- .start = IRQ_DC1176_CORE_PMU,
- .end = IRQ_DC1176_CORE_PMU,
- .flags = IORESOURCE_IRQ,
-};
-
-static struct platform_device pmu_device = {
- .name = "armv6-pmu",
- .id = -1,
- .num_resources = 1,
- .resource = &pmu_resource,
-};
-
-static struct resource char_lcd_resources[] = {
- {
- .start = REALVIEW_CHAR_LCD_BASE,
- .end = (REALVIEW_CHAR_LCD_BASE + SZ_4K - 1),
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_PB1176_CHARLCD,
- .end = IRQ_PB1176_CHARLCD,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device char_lcd_device = {
- .name = "arm-charlcd",
- .id = -1,
- .num_resources = ARRAY_SIZE(char_lcd_resources),
- .resource = char_lcd_resources,
-};
-
-static void __init gic_init_irq(void)
-{
- /* ARM1176 DevChip GIC, primary */
- gic_init(0, IRQ_DC1176_GIC_START,
- __io_address(REALVIEW_DC1176_GIC_DIST_BASE),
- __io_address(REALVIEW_DC1176_GIC_CPU_BASE));
-
- /* board GIC, secondary */
- gic_init(1, IRQ_PB1176_GIC_START,
- __io_address(REALVIEW_PB1176_GIC_DIST_BASE),
- __io_address(REALVIEW_PB1176_GIC_CPU_BASE));
- gic_cascade_irq(1, IRQ_DC1176_PB_IRQ1);
-}
-
-static void __init realview_pb1176_timer_init(void)
-{
- timer0_va_base = __io_address(REALVIEW_PB1176_TIMER0_1_BASE);
- timer1_va_base = __io_address(REALVIEW_PB1176_TIMER0_1_BASE) + 0x20;
- timer2_va_base = __io_address(REALVIEW_PB1176_TIMER2_3_BASE);
- timer3_va_base = __io_address(REALVIEW_PB1176_TIMER2_3_BASE) + 0x20;
-
- realview_clk_init(__io_address(REALVIEW_SYS_BASE), true);
- realview_timer_init(IRQ_DC1176_TIMER0);
-}
-
-static void realview_pb1176_restart(enum reboot_mode mode, const char *cmd)
-{
- void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
- void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
- __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
- __raw_writel(REALVIEW_PB1176_SYS_SOFT_RESET, reset_ctrl);
- dsb();
-}
-
-static void realview_pb1176_fixup(struct tag *tags, char **from)
-{
- /*
- * RealView PB1176 only has 128MB of RAM mapped at 0.
- */
- memblock_add(0, SZ_128M);
-}
-
-static void __init realview_pb1176_init(void)
-{
- int i;
-
-#ifdef CONFIG_CACHE_L2X0
- /*
- * The PL220 needs to be manually configured as the hardware
- * doesn't report the correct sizes.
- * 128kB (16kB/way), 8-way associativity, event monitor and
- * parity enabled, ignore share bit, no force write allocate
- * Bits: .... ...0 0111 0011 0000 .... .... ....
- */
- l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff);
-#endif
-
- realview_flash_register(realview_pb1176_flash_resources,
- ARRAY_SIZE(realview_pb1176_flash_resources));
- platform_device_register(&pb1176_rom_device);
- realview_eth_register(NULL, realview_pb1176_smsc911x_resources);
- platform_device_register(&realview_i2c_device);
- realview_usb_register(realview_pb1176_isp1761_resources);
- platform_device_register(&pmu_device);
- platform_device_register(&char_lcd_device);
- platform_device_register(&realview_leds_device);
-
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
- struct amba_device *d = amba_devs[i];
- amba_device_register(d, &iomem_resource);
- }
-}
-
-MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176")
- /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .atag_offset = 0x100,
- .fixup = realview_pb1176_fixup,
- .map_io = realview_pb1176_map_io,
- .init_early = realview_init_early,
- .init_irq = gic_init_irq,
- .init_time = realview_pb1176_timer_init,
- .init_machine = realview_pb1176_init,
-#ifdef CONFIG_ZONE_DMA
- .dma_zone_size = SZ_256M,
-#endif
- .restart = realview_pb1176_restart,
-MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
deleted file mode 100644
index a90a0752f157..000000000000
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/realview_pb11mp.c
- *
- * Copyright (C) 2008 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/pl061.h>
-#include <linux/amba/mmci.h>
-#include <linux/amba/pl022.h>
-#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/platform_data/clk-realview.h>
-#include <linux/reboot.h>
-
-#include "hardware.h"
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-#include <asm/pgtable.h>
-#include <asm/hardware/cache-l2x0.h>
-#include <asm/smp_twd.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/flash.h>
-#include <asm/mach/map.h>
-#include <asm/mach/time.h>
-#include <asm/outercache.h>
-
-#include "board-pb11mp.h"
-#include "irqs-pb11mp.h"
-
-#include "core.h"
-
-static struct map_desc realview_pb11mp_io_desc[] __initdata = {
- {
- .virtual = IO_ADDRESS(REALVIEW_SYS_BASE),
- .pfn = __phys_to_pfn(REALVIEW_SYS_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PB11MP_GIC_CPU_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PB11MP_GIC_CPU_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PB11MP_GIC_DIST_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PB11MP_GIC_DIST_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, { /* Maps the SCU, GIC CPU interface, TWD, GIC DIST */
- .virtual = IO_ADDRESS(REALVIEW_TC11MP_PRIV_MEM_BASE),
- .pfn = __phys_to_pfn(REALVIEW_TC11MP_PRIV_MEM_BASE),
- .length = REALVIEW_TC11MP_PRIV_MEM_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_SCTL_BASE),
- .pfn = __phys_to_pfn(REALVIEW_SCTL_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PB11MP_TIMER0_1_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PB11MP_TIMER0_1_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PB11MP_TIMER2_3_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PB11MP_TIMER2_3_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_TC11MP_L220_BASE),
- .pfn = __phys_to_pfn(REALVIEW_TC11MP_L220_BASE),
- .length = SZ_8K,
- .type = MT_DEVICE,
- },
-#ifdef CONFIG_DEBUG_LL
- {
- .virtual = IO_ADDRESS(REALVIEW_PB11MP_UART0_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PB11MP_UART0_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-#endif
-};
-
-static void __init realview_pb11mp_map_io(void)
-{
- iotable_init(realview_pb11mp_io_desc, ARRAY_SIZE(realview_pb11mp_io_desc));
-}
-
-static struct pl061_platform_data gpio0_plat_data = {
- .gpio_base = 0,
-};
-
-static struct pl061_platform_data gpio1_plat_data = {
- .gpio_base = 8,
-};
-
-static struct pl061_platform_data gpio2_plat_data = {
- .gpio_base = 16,
-};
-
-static struct pl022_ssp_controller ssp0_plat_data = {
- .bus_id = 0,
- .enable_dma = 0,
- .num_chipselect = 1,
-};
-
-/*
- * RealView PB11MPCore AMBA devices
- */
-
-#define GPIO2_IRQ { IRQ_PB11MP_GPIO2 }
-#define GPIO3_IRQ { IRQ_PB11MP_GPIO3 }
-#define AACI_IRQ { IRQ_TC11MP_AACI }
-#define MMCI0_IRQ { IRQ_TC11MP_MMCI0A, IRQ_TC11MP_MMCI0B }
-#define KMI0_IRQ { IRQ_TC11MP_KMI0 }
-#define KMI1_IRQ { IRQ_TC11MP_KMI1 }
-#define PB11MP_SMC_IRQ { }
-#define MPMC_IRQ { }
-#define PB11MP_CLCD_IRQ { IRQ_PB11MP_CLCD }
-#define DMAC_IRQ { IRQ_PB11MP_DMAC }
-#define SCTL_IRQ { }
-#define PB11MP_WATCHDOG_IRQ { IRQ_PB11MP_WATCHDOG }
-#define PB11MP_GPIO0_IRQ { IRQ_PB11MP_GPIO0 }
-#define GPIO1_IRQ { IRQ_PB11MP_GPIO1 }
-#define PB11MP_RTC_IRQ { IRQ_TC11MP_RTC }
-#define SCI_IRQ { IRQ_PB11MP_SCI }
-#define PB11MP_UART0_IRQ { IRQ_TC11MP_UART0 }
-#define PB11MP_UART1_IRQ { IRQ_TC11MP_UART1 }
-#define PB11MP_UART2_IRQ { IRQ_PB11MP_UART2 }
-#define PB11MP_UART3_IRQ { IRQ_PB11MP_UART3 }
-#define PB11MP_SSP_IRQ { IRQ_PB11MP_SSP }
-
-/* FPGA Primecells */
-APB_DEVICE(aaci, "fpga:aaci", AACI, NULL);
-APB_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data);
-APB_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL);
-APB_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL);
-APB_DEVICE(uart3, "fpga:uart3", PB11MP_UART3, NULL);
-
-/* DevChip Primecells */
-AHB_DEVICE(smc, "dev:smc", PB11MP_SMC, NULL);
-AHB_DEVICE(sctl, "dev:sctl", SCTL, NULL);
-APB_DEVICE(wdog, "dev:wdog", PB11MP_WATCHDOG, NULL);
-APB_DEVICE(gpio0, "dev:gpio0", PB11MP_GPIO0, &gpio0_plat_data);
-APB_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data);
-APB_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data);
-APB_DEVICE(rtc, "dev:rtc", PB11MP_RTC, NULL);
-APB_DEVICE(sci0, "dev:sci0", SCI, NULL);
-APB_DEVICE(uart0, "dev:uart0", PB11MP_UART0, NULL);
-APB_DEVICE(uart1, "dev:uart1", PB11MP_UART1, NULL);
-APB_DEVICE(uart2, "dev:uart2", PB11MP_UART2, NULL);
-APB_DEVICE(ssp0, "dev:ssp0", PB11MP_SSP, &ssp0_plat_data);
-
-/* Primecells on the NEC ISSP chip */
-AHB_DEVICE(clcd, "issp:clcd", PB11MP_CLCD, &clcd_plat_data);
-AHB_DEVICE(dmac, "issp:dmac", DMAC, NULL);
-
-static struct amba_device *amba_devs[] __initdata = {
- &dmac_device,
- &uart0_device,
- &uart1_device,
- &uart2_device,
- &uart3_device,
- &smc_device,
- &clcd_device,
- &sctl_device,
- &wdog_device,
- &gpio0_device,
- &gpio1_device,
- &gpio2_device,
- &rtc_device,
- &sci0_device,
- &ssp0_device,
- &aaci_device,
- &mmc0_device,
- &kmi0_device,
- &kmi1_device,
-};
-
-/*
- * RealView PB11MPCore platform devices
- */
-static struct resource realview_pb11mp_flash_resource[] = {
- [0] = {
- .start = REALVIEW_PB11MP_FLASH0_BASE,
- .end = REALVIEW_PB11MP_FLASH0_BASE + REALVIEW_PB11MP_FLASH0_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = REALVIEW_PB11MP_FLASH1_BASE,
- .end = REALVIEW_PB11MP_FLASH1_BASE + REALVIEW_PB11MP_FLASH1_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct resource realview_pb11mp_smsc911x_resources[] = {
- [0] = {
- .start = REALVIEW_PB11MP_ETH_BASE,
- .end = REALVIEW_PB11MP_ETH_BASE + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_TC11MP_ETH,
- .end = IRQ_TC11MP_ETH,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource realview_pb11mp_isp1761_resources[] = {
- [0] = {
- .start = REALVIEW_PB11MP_USB_BASE,
- .end = REALVIEW_PB11MP_USB_BASE + SZ_128K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_TC11MP_USB,
- .end = IRQ_TC11MP_USB,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource pmu_resources[] = {
- [0] = {
- .start = IRQ_TC11MP_PMU_CPU0,
- .end = IRQ_TC11MP_PMU_CPU0,
- .flags = IORESOURCE_IRQ,
- },
- [1] = {
- .start = IRQ_TC11MP_PMU_CPU1,
- .end = IRQ_TC11MP_PMU_CPU1,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_TC11MP_PMU_CPU2,
- .end = IRQ_TC11MP_PMU_CPU2,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = IRQ_TC11MP_PMU_CPU3,
- .end = IRQ_TC11MP_PMU_CPU3,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device pmu_device = {
- .name = "armv6-pmu",
- .id = -1,
- .num_resources = ARRAY_SIZE(pmu_resources),
- .resource = pmu_resources,
-};
-
-static void __init gic_init_irq(void)
-{
- unsigned int pldctrl;
-
- /* new irq mode with no DCC */
- writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK));
- pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + REALVIEW_PB11MP_SYS_PLD_CTRL1);
- pldctrl |= 2 << 22;
- writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + REALVIEW_PB11MP_SYS_PLD_CTRL1);
- writel(0x00000000, __io_address(REALVIEW_SYS_LOCK));
-
- /* ARM11MPCore test chip GIC, primary */
- gic_init(0, 29, __io_address(REALVIEW_TC11MP_GIC_DIST_BASE),
- __io_address(REALVIEW_TC11MP_GIC_CPU_BASE));
-
- /* board GIC, secondary */
- gic_init(1, IRQ_PB11MP_GIC_START,
- __io_address(REALVIEW_PB11MP_GIC_DIST_BASE),
- __io_address(REALVIEW_PB11MP_GIC_CPU_BASE));
- gic_cascade_irq(1, IRQ_TC11MP_PB_IRQ1);
-}
-
-#ifdef CONFIG_HAVE_ARM_TWD
-static DEFINE_TWD_LOCAL_TIMER(twd_local_timer,
- REALVIEW_TC11MP_TWD_BASE,
- IRQ_LOCALTIMER);
-
-static void __init realview_pb11mp_twd_init(void)
-{
- int err = twd_local_timer_register(&twd_local_timer);
- if (err)
- pr_err("twd_local_timer_register failed %d\n", err);
-}
-#else
-#define realview_pb11mp_twd_init() do {} while(0)
-#endif
-
-static void __init realview_pb11mp_timer_init(void)
-{
- timer0_va_base = __io_address(REALVIEW_PB11MP_TIMER0_1_BASE);
- timer1_va_base = __io_address(REALVIEW_PB11MP_TIMER0_1_BASE) + 0x20;
- timer2_va_base = __io_address(REALVIEW_PB11MP_TIMER2_3_BASE);
- timer3_va_base = __io_address(REALVIEW_PB11MP_TIMER2_3_BASE) + 0x20;
-
- realview_clk_init(__io_address(REALVIEW_SYS_BASE), false);
- realview_timer_init(IRQ_TC11MP_TIMER0_1);
- realview_pb11mp_twd_init();
-}
-
-static void realview_pb11mp_restart(enum reboot_mode mode, const char *cmd)
-{
- void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
- void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
-
- /*
- * To reset, we hit the on-board reset register
- * in the system FPGA
- */
- __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
- __raw_writel(0x0000, reset_ctrl);
- __raw_writel(0x0004, reset_ctrl);
- dsb();
-}
-
-static void __init realview_pb11mp_init(void)
-{
- int i;
-
-#ifdef CONFIG_CACHE_L2X0
- /*
- * The PL220 needs to be manually configured as the hardware
- * doesn't report the correct sizes.
- * 1MB (128KB/way), 8-way associativity, event monitor and
- * parity enabled, ignore share bit, no force write allocate
- * Bits: .... ...0 0111 1001 0000 .... .... ....
- */
- l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff);
- /*
- * due to a bug in the l220 cache controller, we must not call
- * the sync function. stub it out here instead!
- */
- outer_cache.sync = NULL;
-#endif
-
- realview_flash_register(realview_pb11mp_flash_resource,
- ARRAY_SIZE(realview_pb11mp_flash_resource));
- realview_eth_register(NULL, realview_pb11mp_smsc911x_resources);
- platform_device_register(&realview_i2c_device);
- platform_device_register(&realview_cf_device);
- platform_device_register(&realview_leds_device);
- realview_usb_register(realview_pb11mp_isp1761_resources);
- platform_device_register(&pmu_device);
-
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
- struct amba_device *d = amba_devs[i];
- amba_device_register(d, &iomem_resource);
- }
-}
-
-MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore")
- /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .atag_offset = 0x100,
- .smp = smp_ops(realview_smp_ops),
- .fixup = realview_fixup,
- .map_io = realview_pb11mp_map_io,
- .init_early = realview_init_early,
- .init_irq = gic_init_irq,
- .init_time = realview_pb11mp_timer_init,
- .init_machine = realview_pb11mp_init,
-#ifdef CONFIG_ZONE_DMA
- .dma_zone_size = SZ_256M,
-#endif
- .restart = realview_pb11mp_restart,
-MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
deleted file mode 100644
index ddafb67c2b6f..000000000000
--- a/arch/arm/mach-realview/realview_pba8.c
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/realview_pba8.c
- *
- * Copyright (C) 2008 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/pl061.h>
-#include <linux/amba/mmci.h>
-#include <linux/amba/pl022.h>
-#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/platform_data/clk-realview.h>
-#include <linux/reboot.h>
-
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-#include <asm/pgtable.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/time.h>
-
-#include "hardware.h"
-#include "board-pba8.h"
-#include "irqs-pba8.h"
-
-#include "core.h"
-
-static struct map_desc realview_pba8_io_desc[] __initdata = {
- {
- .virtual = IO_ADDRESS(REALVIEW_SYS_BASE),
- .pfn = __phys_to_pfn(REALVIEW_SYS_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PBA8_GIC_CPU_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBA8_GIC_CPU_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PBA8_GIC_DIST_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBA8_GIC_DIST_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_SCTL_BASE),
- .pfn = __phys_to_pfn(REALVIEW_SCTL_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PBA8_TIMER0_1_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBA8_TIMER0_1_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PBA8_TIMER2_3_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBA8_TIMER2_3_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-#ifdef CONFIG_DEBUG_LL
- {
- .virtual = IO_ADDRESS(REALVIEW_PBA8_UART0_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBA8_UART0_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-#endif
-};
-
-static void __init realview_pba8_map_io(void)
-{
- iotable_init(realview_pba8_io_desc, ARRAY_SIZE(realview_pba8_io_desc));
-}
-
-static struct pl061_platform_data gpio0_plat_data = {
- .gpio_base = 0,
-};
-
-static struct pl061_platform_data gpio1_plat_data = {
- .gpio_base = 8,
-};
-
-static struct pl061_platform_data gpio2_plat_data = {
- .gpio_base = 16,
-};
-
-static struct pl022_ssp_controller ssp0_plat_data = {
- .bus_id = 0,
- .enable_dma = 0,
- .num_chipselect = 1,
-};
-
-/*
- * RealView PBA8Core AMBA devices
- */
-
-#define GPIO2_IRQ { IRQ_PBA8_GPIO2 }
-#define GPIO3_IRQ { IRQ_PBA8_GPIO3 }
-#define AACI_IRQ { IRQ_PBA8_AACI }
-#define MMCI0_IRQ { IRQ_PBA8_MMCI0A, IRQ_PBA8_MMCI0B }
-#define KMI0_IRQ { IRQ_PBA8_KMI0 }
-#define KMI1_IRQ { IRQ_PBA8_KMI1 }
-#define PBA8_SMC_IRQ { }
-#define MPMC_IRQ { }
-#define PBA8_CLCD_IRQ { IRQ_PBA8_CLCD }
-#define DMAC_IRQ { IRQ_PBA8_DMAC }
-#define SCTL_IRQ { }
-#define PBA8_WATCHDOG_IRQ { IRQ_PBA8_WATCHDOG }
-#define PBA8_GPIO0_IRQ { IRQ_PBA8_GPIO0 }
-#define GPIO1_IRQ { IRQ_PBA8_GPIO1 }
-#define PBA8_RTC_IRQ { IRQ_PBA8_RTC }
-#define SCI_IRQ { IRQ_PBA8_SCI }
-#define PBA8_UART0_IRQ { IRQ_PBA8_UART0 }
-#define PBA8_UART1_IRQ { IRQ_PBA8_UART1 }
-#define PBA8_UART2_IRQ { IRQ_PBA8_UART2 }
-#define PBA8_UART3_IRQ { IRQ_PBA8_UART3 }
-#define PBA8_SSP_IRQ { IRQ_PBA8_SSP }
-
-/* FPGA Primecells */
-APB_DEVICE(aaci, "fpga:aaci", AACI, NULL);
-APB_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data);
-APB_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL);
-APB_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL);
-APB_DEVICE(uart3, "fpga:uart3", PBA8_UART3, NULL);
-
-/* DevChip Primecells */
-AHB_DEVICE(smc, "dev:smc", PBA8_SMC, NULL);
-AHB_DEVICE(sctl, "dev:sctl", SCTL, NULL);
-APB_DEVICE(wdog, "dev:wdog", PBA8_WATCHDOG, NULL);
-APB_DEVICE(gpio0, "dev:gpio0", PBA8_GPIO0, &gpio0_plat_data);
-APB_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data);
-APB_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data);
-APB_DEVICE(rtc, "dev:rtc", PBA8_RTC, NULL);
-APB_DEVICE(sci0, "dev:sci0", SCI, NULL);
-APB_DEVICE(uart0, "dev:uart0", PBA8_UART0, NULL);
-APB_DEVICE(uart1, "dev:uart1", PBA8_UART1, NULL);
-APB_DEVICE(uart2, "dev:uart2", PBA8_UART2, NULL);
-APB_DEVICE(ssp0, "dev:ssp0", PBA8_SSP, &ssp0_plat_data);
-
-/* Primecells on the NEC ISSP chip */
-AHB_DEVICE(clcd, "issp:clcd", PBA8_CLCD, &clcd_plat_data);
-AHB_DEVICE(dmac, "issp:dmac", DMAC, NULL);
-
-static struct amba_device *amba_devs[] __initdata = {
- &dmac_device,
- &uart0_device,
- &uart1_device,
- &uart2_device,
- &uart3_device,
- &smc_device,
- &clcd_device,
- &sctl_device,
- &wdog_device,
- &gpio0_device,
- &gpio1_device,
- &gpio2_device,
- &rtc_device,
- &sci0_device,
- &ssp0_device,
- &aaci_device,
- &mmc0_device,
- &kmi0_device,
- &kmi1_device,
-};
-
-/*
- * RealView PB-A8 platform devices
- */
-static struct resource realview_pba8_flash_resource[] = {
- [0] = {
- .start = REALVIEW_PBA8_FLASH0_BASE,
- .end = REALVIEW_PBA8_FLASH0_BASE + REALVIEW_PBA8_FLASH0_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = REALVIEW_PBA8_FLASH1_BASE,
- .end = REALVIEW_PBA8_FLASH1_BASE + REALVIEW_PBA8_FLASH1_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct resource realview_pba8_smsc911x_resources[] = {
- [0] = {
- .start = REALVIEW_PBA8_ETH_BASE,
- .end = REALVIEW_PBA8_ETH_BASE + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_PBA8_ETH,
- .end = IRQ_PBA8_ETH,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource realview_pba8_isp1761_resources[] = {
- [0] = {
- .start = REALVIEW_PBA8_USB_BASE,
- .end = REALVIEW_PBA8_USB_BASE + SZ_128K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_PBA8_USB,
- .end = IRQ_PBA8_USB,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource pmu_resource = {
- .start = IRQ_PBA8_PMU,
- .end = IRQ_PBA8_PMU,
- .flags = IORESOURCE_IRQ,
-};
-
-static struct platform_device pmu_device = {
- .name = "armv7-pmu",
- .id = -1,
- .num_resources = 1,
- .resource = &pmu_resource,
-};
-
-static void __init gic_init_irq(void)
-{
- /* ARM PB-A8 on-board GIC */
- gic_init(0, IRQ_PBA8_GIC_START,
- __io_address(REALVIEW_PBA8_GIC_DIST_BASE),
- __io_address(REALVIEW_PBA8_GIC_CPU_BASE));
-}
-
-static void __init realview_pba8_timer_init(void)
-{
- timer0_va_base = __io_address(REALVIEW_PBA8_TIMER0_1_BASE);
- timer1_va_base = __io_address(REALVIEW_PBA8_TIMER0_1_BASE) + 0x20;
- timer2_va_base = __io_address(REALVIEW_PBA8_TIMER2_3_BASE);
- timer3_va_base = __io_address(REALVIEW_PBA8_TIMER2_3_BASE) + 0x20;
-
- realview_clk_init(__io_address(REALVIEW_SYS_BASE), false);
- realview_timer_init(IRQ_PBA8_TIMER0_1);
-}
-
-static void realview_pba8_restart(enum reboot_mode mode, const char *cmd)
-{
- void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
- void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
-
- /*
- * To reset, we hit the on-board reset register
- * in the system FPGA
- */
- __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
- __raw_writel(0x0000, reset_ctrl);
- __raw_writel(0x0004, reset_ctrl);
- dsb();
-}
-
-static void __init realview_pba8_init(void)
-{
- int i;
-
- realview_flash_register(realview_pba8_flash_resource,
- ARRAY_SIZE(realview_pba8_flash_resource));
- realview_eth_register(NULL, realview_pba8_smsc911x_resources);
- platform_device_register(&realview_i2c_device);
- platform_device_register(&realview_cf_device);
- platform_device_register(&realview_leds_device);
- realview_usb_register(realview_pba8_isp1761_resources);
- platform_device_register(&pmu_device);
-
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
- struct amba_device *d = amba_devs[i];
- amba_device_register(d, &iomem_resource);
- }
-}
-
-MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
- /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .atag_offset = 0x100,
- .fixup = realview_fixup,
- .map_io = realview_pba8_map_io,
- .init_early = realview_init_early,
- .init_irq = gic_init_irq,
- .init_time = realview_pba8_timer_init,
- .init_machine = realview_pba8_init,
-#ifdef CONFIG_ZONE_DMA
- .dma_zone_size = SZ_256M,
-#endif
- .restart = realview_pba8_restart,
-MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
deleted file mode 100644
index be1cec5fe3ad..000000000000
--- a/arch/arm/mach-realview/realview_pbx.c
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * arch/arm/mach-realview/realview_pbx.c
- *
- * Copyright (C) 2009 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/pl061.h>
-#include <linux/amba/mmci.h>
-#include <linux/amba/pl022.h>
-#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/platform_data/clk-realview.h>
-#include <linux/reboot.h>
-#include <linux/memblock.h>
-
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-#include <asm/smp_twd.h>
-#include <asm/pgtable.h>
-#include <asm/hardware/cache-l2x0.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/time.h>
-
-#include "hardware.h"
-#include "board-pbx.h"
-#include "irqs-pbx.h"
-
-#include "core.h"
-
-static struct map_desc realview_pbx_io_desc[] __initdata = {
- {
- .virtual = IO_ADDRESS(REALVIEW_SYS_BASE),
- .pfn = __phys_to_pfn(REALVIEW_SYS_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PBX_GIC_CPU_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBX_GIC_CPU_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PBX_GIC_DIST_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBX_GIC_DIST_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_SCTL_BASE),
- .pfn = __phys_to_pfn(REALVIEW_SCTL_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PBX_TIMER0_1_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBX_TIMER0_1_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PBX_TIMER2_3_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBX_TIMER2_3_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-#ifdef CONFIG_DEBUG_LL
- {
- .virtual = IO_ADDRESS(REALVIEW_PBX_UART0_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBX_UART0_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-#endif
-};
-
-static struct map_desc realview_local_io_desc[] __initdata = {
- {
- .virtual = IO_ADDRESS(REALVIEW_PBX_TILE_SCU_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBX_TILE_SCU_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PBX_TILE_GIC_DIST_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBX_TILE_GIC_DIST_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = IO_ADDRESS(REALVIEW_PBX_TILE_L220_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBX_TILE_L220_BASE),
- .length = SZ_8K,
- .type = MT_DEVICE,
- }
-};
-
-static void __init realview_pbx_map_io(void)
-{
- iotable_init(realview_pbx_io_desc, ARRAY_SIZE(realview_pbx_io_desc));
- if (core_tile_pbx11mp() || core_tile_pbxa9mp())
- iotable_init(realview_local_io_desc, ARRAY_SIZE(realview_local_io_desc));
-}
-
-static struct pl061_platform_data gpio0_plat_data = {
- .gpio_base = 0,
-};
-
-static struct pl061_platform_data gpio1_plat_data = {
- .gpio_base = 8,
-};
-
-static struct pl061_platform_data gpio2_plat_data = {
- .gpio_base = 16,
-};
-
-static struct pl022_ssp_controller ssp0_plat_data = {
- .bus_id = 0,
- .enable_dma = 0,
- .num_chipselect = 1,
-};
-
-/*
- * RealView PBXCore AMBA devices
- */
-
-#define GPIO2_IRQ { IRQ_PBX_GPIO2 }
-#define GPIO3_IRQ { IRQ_PBX_GPIO3 }
-#define AACI_IRQ { IRQ_PBX_AACI }
-#define MMCI0_IRQ { IRQ_PBX_MMCI0A, IRQ_PBX_MMCI0B }
-#define KMI0_IRQ { IRQ_PBX_KMI0 }
-#define KMI1_IRQ { IRQ_PBX_KMI1 }
-#define PBX_SMC_IRQ { }
-#define MPMC_IRQ { }
-#define PBX_CLCD_IRQ { IRQ_PBX_CLCD }
-#define DMAC_IRQ { IRQ_PBX_DMAC }
-#define SCTL_IRQ { }
-#define PBX_WATCHDOG_IRQ { IRQ_PBX_WATCHDOG }
-#define PBX_GPIO0_IRQ { IRQ_PBX_GPIO0 }
-#define GPIO1_IRQ { IRQ_PBX_GPIO1 }
-#define PBX_RTC_IRQ { IRQ_PBX_RTC }
-#define SCI_IRQ { IRQ_PBX_SCI }
-#define PBX_UART0_IRQ { IRQ_PBX_UART0 }
-#define PBX_UART1_IRQ { IRQ_PBX_UART1 }
-#define PBX_UART2_IRQ { IRQ_PBX_UART2 }
-#define PBX_UART3_IRQ { IRQ_PBX_UART3 }
-#define PBX_SSP_IRQ { IRQ_PBX_SSP }
-
-/* FPGA Primecells */
-APB_DEVICE(aaci, "fpga:aaci", AACI, NULL);
-APB_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data);
-APB_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL);
-APB_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL);
-APB_DEVICE(uart3, "fpga:uart3", PBX_UART3, NULL);
-
-/* DevChip Primecells */
-AHB_DEVICE(smc, "dev:smc", PBX_SMC, NULL);
-AHB_DEVICE(sctl, "dev:sctl", SCTL, NULL);
-APB_DEVICE(wdog, "dev:wdog", PBX_WATCHDOG, NULL);
-APB_DEVICE(gpio0, "dev:gpio0", PBX_GPIO0, &gpio0_plat_data);
-APB_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data);
-APB_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data);
-APB_DEVICE(rtc, "dev:rtc", PBX_RTC, NULL);
-APB_DEVICE(sci0, "dev:sci0", SCI, NULL);
-APB_DEVICE(uart0, "dev:uart0", PBX_UART0, NULL);
-APB_DEVICE(uart1, "dev:uart1", PBX_UART1, NULL);
-APB_DEVICE(uart2, "dev:uart2", PBX_UART2, NULL);
-APB_DEVICE(ssp0, "dev:ssp0", PBX_SSP, &ssp0_plat_data);
-
-/* Primecells on the NEC ISSP chip */
-AHB_DEVICE(clcd, "issp:clcd", PBX_CLCD, &clcd_plat_data);
-AHB_DEVICE(dmac, "issp:dmac", DMAC, NULL);
-
-static struct amba_device *amba_devs[] __initdata = {
- &dmac_device,
- &uart0_device,
- &uart1_device,
- &uart2_device,
- &uart3_device,
- &smc_device,
- &clcd_device,
- &sctl_device,
- &wdog_device,
- &gpio0_device,
- &gpio1_device,
- &gpio2_device,
- &rtc_device,
- &sci0_device,
- &ssp0_device,
- &aaci_device,
- &mmc0_device,
- &kmi0_device,
- &kmi1_device,
-};
-
-/*
- * RealView PB-X platform devices
- */
-static struct resource realview_pbx_flash_resources[] = {
- [0] = {
- .start = REALVIEW_PBX_FLASH0_BASE,
- .end = REALVIEW_PBX_FLASH0_BASE + REALVIEW_PBX_FLASH0_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = REALVIEW_PBX_FLASH1_BASE,
- .end = REALVIEW_PBX_FLASH1_BASE + REALVIEW_PBX_FLASH1_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct resource realview_pbx_smsc911x_resources[] = {
- [0] = {
- .start = REALVIEW_PBX_ETH_BASE,
- .end = REALVIEW_PBX_ETH_BASE + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_PBX_ETH,
- .end = IRQ_PBX_ETH,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource realview_pbx_isp1761_resources[] = {
- [0] = {
- .start = REALVIEW_PBX_USB_BASE,
- .end = REALVIEW_PBX_USB_BASE + SZ_128K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_PBX_USB,
- .end = IRQ_PBX_USB,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-#ifdef CONFIG_CACHE_L2X0
-static struct resource pmu_resources[] = {
- [0] = {
- .start = IRQ_PBX_PMU_CPU0,
- .end = IRQ_PBX_PMU_CPU0,
- .flags = IORESOURCE_IRQ,
- },
- [1] = {
- .start = IRQ_PBX_PMU_CPU1,
- .end = IRQ_PBX_PMU_CPU1,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_PBX_PMU_CPU2,
- .end = IRQ_PBX_PMU_CPU2,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = IRQ_PBX_PMU_CPU3,
- .end = IRQ_PBX_PMU_CPU3,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device pmu_device = {
- .name = "armv7-pmu",
- .id = -1,
- .num_resources = ARRAY_SIZE(pmu_resources),
- .resource = pmu_resources,
-};
-#endif
-
-static void __init gic_init_irq(void)
-{
- /* ARM PBX on-board GIC */
- if (core_tile_pbx11mp() || core_tile_pbxa9mp()) {
- gic_init(0, 29, __io_address(REALVIEW_PBX_TILE_GIC_DIST_BASE),
- __io_address(REALVIEW_PBX_TILE_GIC_CPU_BASE));
- } else {
- gic_init(0, IRQ_PBX_GIC_START,
- __io_address(REALVIEW_PBX_GIC_DIST_BASE),
- __io_address(REALVIEW_PBX_GIC_CPU_BASE));
- }
-}
-
-#ifdef CONFIG_HAVE_ARM_TWD
-static DEFINE_TWD_LOCAL_TIMER(twd_local_timer,
- REALVIEW_PBX_TILE_TWD_BASE,
- IRQ_LOCALTIMER);
-
-static void __init realview_pbx_twd_init(void)
-{
- int err = twd_local_timer_register(&twd_local_timer);
- if (err)
- pr_err("twd_local_timer_register failed %d\n", err);
-}
-#else
-#define realview_pbx_twd_init() do { } while(0)
-#endif
-
-static void __init realview_pbx_timer_init(void)
-{
- timer0_va_base = __io_address(REALVIEW_PBX_TIMER0_1_BASE);
- timer1_va_base = __io_address(REALVIEW_PBX_TIMER0_1_BASE) + 0x20;
- timer2_va_base = __io_address(REALVIEW_PBX_TIMER2_3_BASE);
- timer3_va_base = __io_address(REALVIEW_PBX_TIMER2_3_BASE) + 0x20;
-
- realview_clk_init(__io_address(REALVIEW_SYS_BASE), false);
- realview_timer_init(IRQ_PBX_TIMER0_1);
- realview_pbx_twd_init();
-}
-
-static void realview_pbx_fixup(struct tag *tags, char **from)
-{
-#ifdef CONFIG_SPARSEMEM
- /*
- * Memory configuration with SPARSEMEM enabled on RealView PBX (see
- * asm/mach/memory.h for more information).
- */
-
- memblock_add(0, SZ_256M);
- memblock_add(0x20000000, SZ_512M);
- memblock_add(0x80000000, SZ_256M);
-#else
- realview_fixup(tags, from);
-#endif
-}
-
-static void realview_pbx_restart(enum reboot_mode mode, const char *cmd)
-{
- void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
- void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
-
- /*
- * To reset, we hit the on-board reset register
- * in the system FPGA
- */
- __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
- __raw_writel(0x00F0, reset_ctrl);
- __raw_writel(0x00F4, reset_ctrl);
- dsb();
-}
-
-static void __init realview_pbx_init(void)
-{
- int i;
-
-#ifdef CONFIG_CACHE_L2X0
- if (core_tile_pbxa9mp()) {
- void __iomem *l2x0_base =
- __io_address(REALVIEW_PBX_TILE_L220_BASE);
-
- /* set RAM latencies to 1 cycle for eASIC */
- writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
- writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
-
- /* 16KB way size, 8-way associativity, parity disabled
- * Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */
- l2x0_init(l2x0_base, 0x02520000, 0xc0000fff);
- platform_device_register(&pmu_device);
- }
-#endif
-
- realview_flash_register(realview_pbx_flash_resources,
- ARRAY_SIZE(realview_pbx_flash_resources));
- realview_eth_register(NULL, realview_pbx_smsc911x_resources);
- platform_device_register(&realview_i2c_device);
- platform_device_register(&realview_cf_device);
- platform_device_register(&realview_leds_device);
- realview_usb_register(realview_pbx_isp1761_resources);
-
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
- struct amba_device *d = amba_devs[i];
- amba_device_register(d, &iomem_resource);
- }
-}
-
-MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
- /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .atag_offset = 0x100,
- .smp = smp_ops(realview_smp_ops),
- .fixup = realview_pbx_fixup,
- .map_io = realview_pbx_map_io,
- .init_early = realview_init_early,
- .init_irq = gic_init_irq,
- .init_time = realview_pbx_timer_init,
- .init_machine = realview_pbx_init,
-#ifdef CONFIG_ZONE_DMA
- .dma_zone_size = SZ_256M,
-#endif
- .restart = realview_pbx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-rpc/include/mach/hardware.h b/arch/arm/mach-rpc/include/mach/hardware.h
index 257166b21f3d..aa79fa47373a 100644
--- a/arch/arm/mach-rpc/include/mach/hardware.h
+++ b/arch/arm/mach-rpc/include/mach/hardware.h
@@ -40,7 +40,7 @@
#define SCREEN_END 0xdfc00000
#define SCREEN_BASE 0xdf800000
-#define UNCACHEABLE_ADDR 0xdf010000
+#define UNCACHEABLE_ADDR (FLUSH_BASE + 0x10000)
/*
* IO Addresses
diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c
index bf50328107bd..f6c3f151d0d4 100644
--- a/arch/arm/mach-s3c24xx/common.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -21,7 +21,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -33,6 +33,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/platform_data/dma-s3c24xx.h>
+#include <linux/dmaengine.h>
#include <mach/hardware.h>
#include <mach/regs-clock.h>
@@ -304,6 +305,8 @@ struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
},
};
+#define s3c24xx_device_dma_mask (*((u64[]) { DMA_BIT_MASK(32) }))
+
#if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2412) || \
defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2442)
static struct resource s3c2410_dma_resource[] = {
@@ -354,7 +357,9 @@ struct platform_device s3c2410_device_dma = {
.num_resources = ARRAY_SIZE(s3c2410_dma_resource),
.resource = s3c2410_dma_resource,
.dev = {
- .platform_data = &s3c2410_dma_platdata,
+ .dma_mask = &s3c24xx_device_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s3c2410_dma_platdata,
},
};
#endif
@@ -395,7 +400,9 @@ struct platform_device s3c2412_device_dma = {
.num_resources = ARRAY_SIZE(s3c2410_dma_resource),
.resource = s3c2410_dma_resource,
.dev = {
- .platform_data = &s3c2412_dma_platdata,
+ .dma_mask = &s3c24xx_device_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s3c2412_dma_platdata,
},
};
#endif
@@ -439,10 +446,44 @@ static struct s3c24xx_dma_channel s3c2440_dma_channels[DMACH_MAX] = {
[DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), },
};
+static const struct dma_slave_map s3c2440_dma_slave_map[] = {
+ /* TODO: DMACH_XD0 */
+ /* TODO: DMACH_XD1 */
+ { "s3c2440-sdi", "rx-tx", (void *)DMACH_SDI },
+ { "s3c2410-spi.0", "rx", (void *)DMACH_SPI0 },
+ { "s3c2410-spi.0", "tx", (void *)DMACH_SPI0 },
+ { "s3c2410-spi.1", "rx", (void *)DMACH_SPI1 },
+ { "s3c2410-spi.1", "tx", (void *)DMACH_SPI1 },
+ { "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
+ { "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
+ { "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
+ { "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
+ { "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
+ { "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
+ { "s3c2440-uart.3", "rx", (void *)DMACH_UART3 },
+ { "s3c2440-uart.3", "tx", (void *)DMACH_UART3 },
+ /* TODO: DMACH_TIMER */
+ { "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
+ { "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
+ { "samsung-ac97", "rx", (void *)DMACH_PCM_IN },
+ { "samsung-ac97", "tx", (void *)DMACH_PCM_OUT },
+ { "samsung-ac97", "rx", (void *)DMACH_MIC_IN },
+ { "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
+ { "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
+ { "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
+ { "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
+ { "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
+ { "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
+ { "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
+ { "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
+};
+
static struct s3c24xx_dma_platdata s3c2440_dma_platdata = {
.num_phy_channels = 4,
.channels = s3c2440_dma_channels,
.num_channels = DMACH_MAX,
+ .slave_map = s3c2440_dma_slave_map,
+ .slavecnt = ARRAY_SIZE(s3c2440_dma_slave_map),
};
struct platform_device s3c2440_device_dma = {
@@ -451,7 +492,9 @@ struct platform_device s3c2440_device_dma = {
.num_resources = ARRAY_SIZE(s3c2410_dma_resource),
.resource = s3c2410_dma_resource,
.dev = {
- .platform_data = &s3c2440_dma_platdata,
+ .dma_mask = &s3c24xx_device_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s3c2440_dma_platdata,
},
};
#endif
@@ -503,7 +546,9 @@ struct platform_device s3c2443_device_dma = {
.num_resources = ARRAY_SIZE(s3c2443_dma_resource),
.resource = s3c2443_dma_resource,
.dev = {
- .platform_data = &s3c2443_dma_platdata,
+ .dma_mask = &s3c24xx_device_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s3c2443_dma_platdata,
},
};
#endif
diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
index a8521684a7f5..ec60bd4a1646 100644
--- a/arch/arm/mach-s3c24xx/mach-mini2440.c
+++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
@@ -497,9 +497,28 @@ static struct i2c_board_info mini2440_i2c_devs[] __initdata = {
},
};
+static struct uda134x_platform_data s3c24xx_uda134x = {
+ .l3 = {
+ .gpio_clk = S3C2410_GPB(4),
+ .gpio_data = S3C2410_GPB(3),
+ .gpio_mode = S3C2410_GPB(2),
+ .use_gpios = 1,
+ .data_hold = 1,
+ .data_setup = 1,
+ .clock_high = 1,
+ .mode_hold = 1,
+ .mode = 1,
+ .mode_setup = 1,
+ },
+ .model = UDA134X_UDA1341,
+};
+
static struct platform_device uda1340_codec = {
.name = "uda134x-codec",
.id = -1,
+ .dev = {
+ .platform_data = &s3c24xx_uda134x,
+ },
};
static struct platform_device *mini2440_devices[] __initdata = {
@@ -516,6 +535,7 @@ static struct platform_device *mini2440_devices[] __initdata = {
&mini2440_button_device,
&s3c_device_nand,
&s3c_device_sdi,
+ &s3c2440_device_dma,
&s3c_device_iis,
&uda1340_codec,
&mini2440_audio,
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 571f95cc5a53..ccc3ab8d58e7 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -393,8 +393,7 @@ static const struct i2c_device_id wlf_gf_module_id[] = {
static struct i2c_driver wlf_gf_module_driver = {
.driver = {
- .name = "wlf-gf-module",
- .owner = THIS_MODULE,
+ .name = "wlf-gf-module"
},
.probe = wlf_gf_module_probe,
.id_table = wlf_gf_module_id,
diff --git a/arch/arm/mach-sa1100/h3xxx.c b/arch/arm/mach-sa1100/h3xxx.c
index b1d4faa12f9a..b69e76614d5b 100644
--- a/arch/arm/mach-sa1100/h3xxx.c
+++ b/arch/arm/mach-sa1100/h3xxx.c
@@ -14,9 +14,9 @@
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
-#include <linux/mfd/htc-egpio.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/platform_data/gpio-htc-egpio.h>
#include <linux/platform_data/sa11x0-serial.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
diff --git a/arch/arm/mach-sa1100/include/mach/hardware.h b/arch/arm/mach-sa1100/include/mach/hardware.h
index cbedd75a9d65..d944fd7e464f 100644
--- a/arch/arm/mach-sa1100/include/mach/hardware.h
+++ b/arch/arm/mach-sa1100/include/mach/hardware.h
@@ -13,7 +13,7 @@
#define __ASM_ARCH_HARDWARE_H
-#define UNCACHEABLE_ADDR 0xfa050000
+#define UNCACHEABLE_ADDR 0xfa050000 /* ICIP */
/*
@@ -36,28 +36,10 @@
#define io_v2p( x ) \
( (((x)&0x00ffffff) | (((x)&(0x30000000>>VIO_SHIFT))<<VIO_SHIFT)) + PIO_START )
-#define CPU_SA1110_A0 (0)
-#define CPU_SA1110_B0 (4)
-#define CPU_SA1110_B1 (5)
-#define CPU_SA1110_B2 (6)
-#define CPU_SA1110_B4 (8)
-
-#define CPU_SA1100_ID (0x4401a110)
-#define CPU_SA1100_MASK (0xfffffff0)
-#define CPU_SA1110_ID (0x6901b110)
-#define CPU_SA1110_MASK (0xfffffff0)
-
#define __MREG(x) IOMEM(io_p2v(x))
#ifndef __ASSEMBLY__
-#include <asm/cputype.h>
-
-#define CPU_REVISION (read_cpuid_id() & 15)
-
-#define cpu_is_sa1100() ((read_cpuid_id() & CPU_SA1100_MASK) == CPU_SA1100_ID)
-#define cpu_is_sa1110() ((read_cpuid_id() & CPU_SA1110_MASK) == CPU_SA1110_ID)
-
# define __REG(x) (*((volatile unsigned long __iomem *)io_p2v(x)))
# define __PREG(x) (io_v2p((unsigned long)&(x)))
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c
index c0b1f5bafae4..0a2ca9be00e6 100644
--- a/arch/arm/mach-sa1100/jornada720.c
+++ b/arch/arm/mach-sa1100/jornada720.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/delay.h>
+#include <linux/gpio/machine.h>
#include <linux/platform_data/sa11x0-serial.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
@@ -217,9 +218,22 @@ static struct platform_device jornada_ssp_device = {
.id = -1,
};
+static struct resource jornada_kbd_resources[] = {
+ DEFINE_RES_IRQ(IRQ_GPIO0),
+};
+
static struct platform_device jornada_kbd_device = {
.name = "jornada720_kbd",
.id = -1,
+ .num_resources = ARRAY_SIZE(jornada_kbd_resources),
+ .resource = jornada_kbd_resources,
+};
+
+static struct gpiod_lookup_table jornada_ts_gpiod_table = {
+ .dev_id = "jornada_ts",
+ .table = {
+ GPIO_LOOKUP("gpio", 9, "penup", GPIO_ACTIVE_HIGH),
+ },
};
static struct platform_device jornada_ts_device = {
@@ -250,6 +264,8 @@ static int __init jornada720_init(void)
GPSR = GPIO_GPIO20; /* restart gpio20 */
udelay(20); /* give it some time to restart */
+ gpiod_add_lookup_table(&jornada_ts_gpiod_table);
+
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
}
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 4a48c9f5f725..09817bae4558 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -22,7 +22,6 @@ config ARCH_RCAR_GEN2
select PM_GENERIC_DOMAINS
select RENESAS_IRQC
select SYS_SUPPORTS_SH_CMT
- select PCI_DOMAINS if PCI
config ARCH_RMOBILE
bool
diff --git a/arch/arm/mach-shmobile/setup-r8a7790.c b/arch/arm/mach-shmobile/setup-r8a7790.c
index 3506327e0bed..78d3e859bd64 100644
--- a/arch/arm/mach-shmobile/setup-r8a7790.c
+++ b/arch/arm/mach-shmobile/setup-r8a7790.c
@@ -28,7 +28,7 @@ static const char * const r8a7790_boards_compat_dt[] __initconst = {
};
DT_MACHINE_START(R8A7790_DT, "Generic R8A7790 (Flattened Device Tree)")
- .smp_init = shmobile_smp_init_fallback_ops,
+ .smp_init = smp_init_ops(shmobile_smp_init_fallback_ops),
.smp = smp_ops(r8a7790_smp_ops),
.init_early = shmobile_init_delay,
.init_time = rcar_gen2_timer_init,
diff --git a/arch/arm/mach-shmobile/setup-r8a7791.c b/arch/arm/mach-shmobile/setup-r8a7791.c
index 110e8b588e56..26e2d181a190 100644
--- a/arch/arm/mach-shmobile/setup-r8a7791.c
+++ b/arch/arm/mach-shmobile/setup-r8a7791.c
@@ -29,7 +29,7 @@ static const char *const r8a7791_boards_compat_dt[] __initconst = {
};
DT_MACHINE_START(R8A7791_DT, "Generic R8A7791 (Flattened Device Tree)")
- .smp_init = shmobile_smp_init_fallback_ops,
+ .smp_init = smp_init_ops(shmobile_smp_init_fallback_ops),
.smp = smp_ops(r8a7791_smp_ops),
.init_early = shmobile_init_delay,
.init_time = rcar_gen2_timer_init,
diff --git a/arch/arm/mach-spear/Kconfig b/arch/arm/mach-spear/Kconfig
index b7260c2b510c..1b6cae5e78f4 100644
--- a/arch/arm/mach-spear/Kconfig
+++ b/arch/arm/mach-spear/Kconfig
@@ -20,7 +20,6 @@ config ARCH_SPEAR13XX
select HAVE_ARM_TWD if SMP
select PINCTRL
select MFD_SYSCON
- select MIGHT_HAVE_PCI
help
Supports for ARM's SPEAR13XX family
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index 096ed216c6d5..b9863f9a35fa 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -32,6 +32,7 @@ config MACH_SUN7I
default ARCH_SUNXI
select ARM_GIC
select ARM_PSCI
+ select ARCH_SUPPORTS_BIG_ENDIAN
select HAVE_ARM_ARCH_TIMER
select SUN5I_HSTIMER
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 95dca8c2c9ed..2e2bde271205 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -22,6 +22,7 @@ static const char * const sunxi_board_dt_compat[] = {
"allwinner,sun5i-a10s",
"allwinner,sun5i-a13",
"allwinner,sun5i-r8",
+ "nextthing,gr8",
NULL,
};
diff --git a/arch/arm/mach-uniphier/Makefile b/arch/arm/mach-uniphier/Makefile
index 396afe109e67..6bea3d3a2dd7 100644
--- a/arch/arm/mach-uniphier/Makefile
+++ b/arch/arm/mach-uniphier/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj- += dummy.o
diff --git a/arch/arm/mach-uniphier/headsmp.S b/arch/arm/mach-uniphier/headsmp.S
deleted file mode 100644
index c819dff84546..000000000000
--- a/arch/arm/mach-uniphier/headsmp.S
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/cp15.h>
-
-ENTRY(uniphier_smp_trampoline)
-ARM_BE8(setend be) @ ensure we are in BE8 mode
- mrc p15, 0, r0, c0, c0, 5 @ MPIDR (Multiprocessor Affinity Reg)
- and r2, r0, #0x3 @ CPU ID
- ldr r1, uniphier_smp_trampoline_jump
- ldr r3, uniphier_smp_trampoline_poll_addr
- mrc p15, 0, r0, c1, c0, 0 @ SCTLR (System Control Register)
- orr r0, r0, #CR_I @ Enable ICache
- bic r0, r0, #(CR_C | CR_M) @ Disable MMU and Dcache
- mcr p15, 0, r0, c1, c0, 0
- b 1f @ cache the following 5 instructions
-0: wfe
-1: ldr r0, [r3]
- cmp r0, r2
- bxeq r1 @ branch to secondary_startup
- b 0b
- .globl uniphier_smp_trampoline_jump
-uniphier_smp_trampoline_jump:
- .word 0 @ set virt_to_phys(secondary_startup)
- .globl uniphier_smp_trampoline_poll_addr
-uniphier_smp_trampoline_poll_addr:
- .word 0 @ set CPU ID to be kicked to this reg
- .globl uniphier_smp_trampoline_end
-uniphier_smp_trampoline_end:
-ENDPROC(uniphier_smp_trampoline)
diff --git a/arch/arm/mach-uniphier/platsmp.c b/arch/arm/mach-uniphier/platsmp.c
deleted file mode 100644
index 9978c41128f6..000000000000
--- a/arch/arm/mach-uniphier/platsmp.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "uniphier: " fmt
-
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/sizes.h>
-#include <asm/cacheflush.h>
-#include <asm/hardware/cache-uniphier.h>
-#include <asm/pgtable.h>
-#include <asm/smp.h>
-#include <asm/smp_scu.h>
-
-/*
- * The secondary CPUs check this register from the boot ROM for the jump
- * destination. After that, it can be reused as a scratch register.
- */
-#define UNIPHIER_SMPCTRL_ROM_RSV2 0x208
-
-static void __iomem *uniphier_smp_rom_boot_rsv2;
-static unsigned int uniphier_smp_max_cpus;
-
-extern char uniphier_smp_trampoline;
-extern char uniphier_smp_trampoline_jump;
-extern char uniphier_smp_trampoline_poll_addr;
-extern char uniphier_smp_trampoline_end;
-
-/*
- * Copy trampoline code to the tail of the 1st section of the page table used
- * in the boot ROM. This area is directly accessible by the secondary CPUs
- * for all the UniPhier SoCs.
- */
-static const phys_addr_t uniphier_smp_trampoline_dest_end = SECTION_SIZE;
-static phys_addr_t uniphier_smp_trampoline_dest;
-
-static int __init uniphier_smp_copy_trampoline(phys_addr_t poll_addr)
-{
- size_t trmp_size;
- static void __iomem *trmp_base;
-
- if (!uniphier_cache_l2_is_enabled()) {
- pr_warn("outer cache is needed for SMP, but not enabled\n");
- return -ENODEV;
- }
-
- uniphier_cache_l2_set_locked_ways(1);
-
- outer_flush_all();
-
- trmp_size = &uniphier_smp_trampoline_end - &uniphier_smp_trampoline;
- uniphier_smp_trampoline_dest = uniphier_smp_trampoline_dest_end -
- trmp_size;
-
- uniphier_cache_l2_touch_range(uniphier_smp_trampoline_dest,
- uniphier_smp_trampoline_dest_end);
-
- trmp_base = ioremap_cache(uniphier_smp_trampoline_dest, trmp_size);
- if (!trmp_base) {
- pr_err("failed to map trampoline destination area\n");
- return -ENOMEM;
- }
-
- memcpy(trmp_base, &uniphier_smp_trampoline, trmp_size);
-
- writel(virt_to_phys(secondary_startup),
- trmp_base + (&uniphier_smp_trampoline_jump -
- &uniphier_smp_trampoline));
-
- writel(poll_addr, trmp_base + (&uniphier_smp_trampoline_poll_addr -
- &uniphier_smp_trampoline));
-
- flush_cache_all(); /* flush out trampoline code to outer cache */
-
- iounmap(trmp_base);
-
- return 0;
-}
-
-static int __init uniphier_smp_prepare_trampoline(unsigned int max_cpus)
-{
- struct device_node *np;
- struct resource res;
- phys_addr_t rom_rsv2_phys;
- int ret;
-
- np = of_find_compatible_node(NULL, NULL, "socionext,uniphier-smpctrl");
- ret = of_address_to_resource(np, 0, &res);
- of_node_put(np);
- if (ret) {
- pr_err("failed to get resource of SMP control\n");
- return ret;
- }
-
- rom_rsv2_phys = res.start + UNIPHIER_SMPCTRL_ROM_RSV2;
-
- ret = uniphier_smp_copy_trampoline(rom_rsv2_phys);
- if (ret)
- return ret;
-
- uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, SZ_4);
- if (!uniphier_smp_rom_boot_rsv2) {
- pr_err("failed to map ROM_BOOT_RSV2 register\n");
- return -ENOMEM;
- }
-
- writel(uniphier_smp_trampoline_dest, uniphier_smp_rom_boot_rsv2);
- asm("sev"); /* Bring up all secondary CPUs to the trampoline code */
-
- uniphier_smp_max_cpus = max_cpus; /* save for later use */
-
- return 0;
-}
-
-static void __init uniphier_smp_unprepare_trampoline(void)
-{
- iounmap(uniphier_smp_rom_boot_rsv2);
-
- if (uniphier_smp_trampoline_dest)
- outer_inv_range(uniphier_smp_trampoline_dest,
- uniphier_smp_trampoline_dest_end);
-
- uniphier_cache_l2_set_locked_ways(0);
-}
-
-static int __init uniphier_smp_enable_scu(void)
-{
- unsigned long scu_base_phys = 0;
- void __iomem *scu_base;
-
- if (scu_a9_has_base())
- scu_base_phys = scu_a9_get_base();
-
- if (!scu_base_phys) {
- pr_err("failed to get scu base\n");
- return -ENODEV;
- }
-
- scu_base = ioremap(scu_base_phys, SZ_128);
- if (!scu_base) {
- pr_err("failed to map scu base\n");
- return -ENOMEM;
- }
-
- scu_enable(scu_base);
- iounmap(scu_base);
-
- return 0;
-}
-
-static void __init uniphier_smp_prepare_cpus(unsigned int max_cpus)
-{
- static cpumask_t only_cpu_0 = { CPU_BITS_CPU0 };
- int ret;
-
- ret = uniphier_smp_prepare_trampoline(max_cpus);
- if (ret)
- goto err;
-
- ret = uniphier_smp_enable_scu();
- if (ret)
- goto err;
-
- return;
-err:
- pr_warn("disabling SMP\n");
- init_cpu_present(&only_cpu_0);
- uniphier_smp_unprepare_trampoline();
-}
-
-static int __init uniphier_smp_boot_secondary(unsigned int cpu,
- struct task_struct *idle)
-{
- if (WARN_ON_ONCE(!uniphier_smp_rom_boot_rsv2))
- return -EFAULT;
-
- writel(cpu, uniphier_smp_rom_boot_rsv2);
- readl(uniphier_smp_rom_boot_rsv2); /* relax */
-
- asm("sev"); /* wake up secondary CPUs sleeping in the trampoline */
-
- if (cpu == uniphier_smp_max_cpus - 1) {
- /* clean up resources if this is the last CPU */
- uniphier_smp_unprepare_trampoline();
- }
-
- return 0;
-}
-
-static const struct smp_operations uniphier_smp_ops __initconst = {
- .smp_prepare_cpus = uniphier_smp_prepare_cpus,
- .smp_boot_secondary = uniphier_smp_boot_secondary,
-};
-CPU_METHOD_OF_DECLARE(uniphier_smp, "socionext,uniphier-smp",
- &uniphier_smp_ops);
diff --git a/arch/arm/mach-versatile/Kconfig b/arch/arm/mach-versatile/Kconfig
index b0cc26284fc9..c257d40ca51d 100644
--- a/arch/arm/mach-versatile/Kconfig
+++ b/arch/arm/mach-versatile/Kconfig
@@ -9,7 +9,6 @@ config ARCH_VERSATILE
select CPU_ARM926T
select ICST
select MFD_SYSCON
- select MIGHT_HAVE_PCI
select PLAT_VERSATILE
select POWER_RESET
select POWER_RESET_VERSATILE
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index d15a7fe51618..c1799dd1d0d9 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -403,6 +403,7 @@ config CPU_V7M
bool
select CPU_32v7M
select CPU_ABRT_NOMMU
+ select CPU_CACHE_V7M
select CPU_CACHE_NOP
select CPU_PABRT_LEGACY
select CPU_THUMBONLY
@@ -518,6 +519,9 @@ config CPU_CACHE_VIPT
config CPU_CACHE_FA
bool
+config CPU_CACHE_V7M
+ bool
+
if MMU
# The copy-page model
config CPU_COPY_V4WT
@@ -750,14 +754,14 @@ config CPU_HIGH_VECTOR
config CPU_ICACHE_DISABLE
bool "Disable I-Cache (I-bit)"
- depends on CPU_CP15 && !(CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3)
+ depends on (CPU_CP15 && !(CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3)) || CPU_V7M
help
Say Y here to disable the processor instruction cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_DISABLE
bool "Disable D-Cache (C-bit)"
- depends on CPU_CP15 && !SMP
+ depends on (CPU_CP15 && !SMP) || CPU_V7M
help
Say Y here to disable the processor data cache. Unless
you have a reason not to or are unsure, say N.
@@ -792,7 +796,7 @@ config CPU_CACHE_ROUND_ROBIN
config CPU_BPREDICT_DISABLE
bool "Disable branch prediction"
- depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526
+ depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 || CPU_V7M
help
Say Y here to disable branch prediction. If unsure, say N.
@@ -916,6 +920,13 @@ config CACHE_L2X0
help
This option enables the L2x0 PrimeCell.
+config CACHE_L2X0_PMU
+ bool "L2x0 performance monitor support" if CACHE_L2X0
+ depends on PERF_EVENTS
+ help
+ This option enables support for the performance monitoring features
+ of the L220 and PL310 outer cache controllers.
+
if CACHE_L2X0
config PL310_ERRATA_588369
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 7f76d96ce546..e8698241ece9 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -43,9 +43,11 @@ obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o
+obj-$(CONFIG_CPU_CACHE_V7M) += cache-v7m.o
AFLAGS_cache-v6.o :=-Wa,-march=armv6
AFLAGS_cache-v7.o :=-Wa,-march=armv7-a
+AFLAGS_cache-v7m.o :=-Wa,-march=armv7-m
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
@@ -101,6 +103,7 @@ AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o
+obj-$(CONFIG_CACHE_L2X0_PMU) += cache-l2x0-pmu.o
obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
obj-$(CONFIG_CACHE_UNIPHIER) += cache-uniphier.o
diff --git a/arch/arm/mm/cache-l2x0-pmu.c b/arch/arm/mm/cache-l2x0-pmu.c
new file mode 100644
index 000000000000..976d3057272e
--- /dev/null
+++ b/arch/arm/mm/cache-l2x0-pmu.c
@@ -0,0 +1,584 @@
+/*
+ * L220/L310 cache controller support
+ *
+ * Copyright (C) 2016 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/errno.h>
+#include <linux/hrtimer.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/perf_event.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/hardware/cache-l2x0.h>
+
+#define PMU_NR_COUNTERS 2
+
+static void __iomem *l2x0_base;
+static struct pmu *l2x0_pmu;
+static cpumask_t pmu_cpu;
+
+static const char *l2x0_name;
+
+static ktime_t l2x0_pmu_poll_period;
+static struct hrtimer l2x0_pmu_hrtimer;
+
+/*
+ * The L220/PL310 PMU has two equivalent counters, Counter1 and Counter0.
+ * Registers controlling these are laid out in pairs, in descending order, i.e.
+ * the register for Counter1 comes first, followed by the register for
+ * Counter0.
+ * We ensure that idx 0 -> Counter0, and idx1 -> Counter1.
+ */
+static struct perf_event *events[PMU_NR_COUNTERS];
+
+/* Find an unused counter */
+static int l2x0_pmu_find_idx(void)
+{
+ int i;
+
+ for (i = 0; i < PMU_NR_COUNTERS; i++) {
+ if (!events[i])
+ return i;
+ }
+
+ return -1;
+}
+
+/* How many counters are allocated? */
+static int l2x0_pmu_num_active_counters(void)
+{
+ int i, cnt = 0;
+
+ for (i = 0; i < PMU_NR_COUNTERS; i++) {
+ if (events[i])
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static void l2x0_pmu_counter_config_write(int idx, u32 val)
+{
+ writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT0_CFG - 4 * idx);
+}
+
+static u32 l2x0_pmu_counter_read(int idx)
+{
+ return readl_relaxed(l2x0_base + L2X0_EVENT_CNT0_VAL - 4 * idx);
+}
+
+static void l2x0_pmu_counter_write(int idx, u32 val)
+{
+ writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT0_VAL - 4 * idx);
+}
+
+static void __l2x0_pmu_enable(void)
+{
+ u32 val = readl_relaxed(l2x0_base + L2X0_EVENT_CNT_CTRL);
+ val |= L2X0_EVENT_CNT_CTRL_ENABLE;
+ writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT_CTRL);
+}
+
+static void __l2x0_pmu_disable(void)
+{
+ u32 val = readl_relaxed(l2x0_base + L2X0_EVENT_CNT_CTRL);
+ val &= ~L2X0_EVENT_CNT_CTRL_ENABLE;
+ writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT_CTRL);
+}
+
+static void l2x0_pmu_enable(struct pmu *pmu)
+{
+ if (l2x0_pmu_num_active_counters() == 0)
+ return;
+
+ __l2x0_pmu_enable();
+}
+
+static void l2x0_pmu_disable(struct pmu *pmu)
+{
+ if (l2x0_pmu_num_active_counters() == 0)
+ return;
+
+ __l2x0_pmu_disable();
+}
+
+static void warn_if_saturated(u32 count)
+{
+ if (count != 0xffffffff)
+ return;
+
+ pr_warn_ratelimited("L2X0 counter saturated. Poll period too long\n");
+}
+
+static void l2x0_pmu_event_read(struct perf_event *event)
+{
+ struct hw_perf_event *hw = &event->hw;
+ u64 prev_count, new_count, mask;
+
+ do {
+ prev_count = local64_read(&hw->prev_count);
+ new_count = l2x0_pmu_counter_read(hw->idx);
+ } while (local64_xchg(&hw->prev_count, new_count) != prev_count);
+
+ mask = GENMASK_ULL(31, 0);
+ local64_add((new_count - prev_count) & mask, &event->count);
+
+ warn_if_saturated(new_count);
+}
+
+static void l2x0_pmu_event_configure(struct perf_event *event)
+{
+ struct hw_perf_event *hw = &event->hw;
+
+ /*
+ * The L2X0 counters saturate at 0xffffffff rather than wrapping, so we
+ * will *always* lose some number of events when a counter saturates,
+ * and have no way of detecting how many were lost.
+ *
+ * To minimize the impact of this, we try to maximize the period by
+ * always starting counters at zero. To ensure that group ratios are
+ * representative, we poll periodically to avoid counters saturating.
+ * See l2x0_pmu_poll().
+ */
+ local64_set(&hw->prev_count, 0);
+ l2x0_pmu_counter_write(hw->idx, 0);
+}
+
+static enum hrtimer_restart l2x0_pmu_poll(struct hrtimer *hrtimer)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ __l2x0_pmu_disable();
+
+ for (i = 0; i < PMU_NR_COUNTERS; i++) {
+ struct perf_event *event = events[i];
+
+ if (!event)
+ continue;
+
+ l2x0_pmu_event_read(event);
+ l2x0_pmu_event_configure(event);
+ }
+
+ __l2x0_pmu_enable();
+ local_irq_restore(flags);
+
+ hrtimer_forward_now(hrtimer, l2x0_pmu_poll_period);
+ return HRTIMER_RESTART;
+}
+
+
+static void __l2x0_pmu_event_enable(int idx, u32 event)
+{
+ u32 val;
+
+ val = event << L2X0_EVENT_CNT_CFG_SRC_SHIFT;
+ val |= L2X0_EVENT_CNT_CFG_INT_DISABLED;
+ l2x0_pmu_counter_config_write(idx, val);
+}
+
+static void l2x0_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hw = &event->hw;
+
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
+
+ if (flags & PERF_EF_RELOAD) {
+ WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
+ l2x0_pmu_event_configure(event);
+ }
+
+ hw->state = 0;
+
+ __l2x0_pmu_event_enable(hw->idx, hw->config_base);
+}
+
+static void __l2x0_pmu_event_disable(int idx)
+{
+ u32 val;
+
+ val = L2X0_EVENT_CNT_CFG_SRC_DISABLED << L2X0_EVENT_CNT_CFG_SRC_SHIFT;
+ val |= L2X0_EVENT_CNT_CFG_INT_DISABLED;
+ l2x0_pmu_counter_config_write(idx, val);
+}
+
+static void l2x0_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hw = &event->hw;
+
+ if (WARN_ON_ONCE(event->hw.state & PERF_HES_STOPPED))
+ return;
+
+ __l2x0_pmu_event_disable(hw->idx);
+
+ hw->state |= PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_UPDATE) {
+ l2x0_pmu_event_read(event);
+ hw->state |= PERF_HES_UPTODATE;
+ }
+}
+
+static int l2x0_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hw = &event->hw;
+ int idx = l2x0_pmu_find_idx();
+
+ if (idx == -1)
+ return -EAGAIN;
+
+ /*
+ * Pin the timer, so that the overflows are handled by the chosen
+ * event->cpu (this is the same one as presented in "cpumask"
+ * attribute).
+ */
+ if (l2x0_pmu_num_active_counters() == 0)
+ hrtimer_start(&l2x0_pmu_hrtimer, l2x0_pmu_poll_period,
+ HRTIMER_MODE_REL_PINNED);
+
+ events[idx] = event;
+ hw->idx = idx;
+
+ l2x0_pmu_event_configure(event);
+
+ hw->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (flags & PERF_EF_START)
+ l2x0_pmu_event_start(event, 0);
+
+ return 0;
+}
+
+static void l2x0_pmu_event_del(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hw = &event->hw;
+
+ l2x0_pmu_event_stop(event, PERF_EF_UPDATE);
+
+ events[hw->idx] = NULL;
+ hw->idx = -1;
+
+ if (l2x0_pmu_num_active_counters() == 0)
+ hrtimer_cancel(&l2x0_pmu_hrtimer);
+}
+
+static bool l2x0_pmu_group_is_valid(struct perf_event *event)
+{
+ struct pmu *pmu = event->pmu;
+ struct perf_event *leader = event->group_leader;
+ struct perf_event *sibling;
+ int num_hw = 0;
+
+ if (leader->pmu == pmu)
+ num_hw++;
+ else if (!is_software_event(leader))
+ return false;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (sibling->pmu == pmu)
+ num_hw++;
+ else if (!is_software_event(sibling))
+ return false;
+ }
+
+ return num_hw <= PMU_NR_COUNTERS;
+}
+
+static int l2x0_pmu_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hw = &event->hw;
+
+ if (event->attr.type != l2x0_pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event) ||
+ event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ if (event->attr.config & ~L2X0_EVENT_CNT_CFG_SRC_MASK)
+ return -EINVAL;
+
+ hw->config_base = event->attr.config;
+
+ if (!l2x0_pmu_group_is_valid(event))
+ return -EINVAL;
+
+ event->cpu = cpumask_first(&pmu_cpu);
+
+ return 0;
+}
+
+struct l2x0_event_attribute {
+ struct device_attribute attr;
+ unsigned int config;
+ bool pl310_only;
+};
+
+#define L2X0_EVENT_ATTR(_name, _config, _pl310_only) \
+ (&((struct l2x0_event_attribute[]) {{ \
+ .attr = __ATTR(_name, S_IRUGO, l2x0_pmu_event_show, NULL), \
+ .config = _config, \
+ .pl310_only = _pl310_only, \
+ }})[0].attr.attr)
+
+#define L220_PLUS_EVENT_ATTR(_name, _config) \
+ L2X0_EVENT_ATTR(_name, _config, false)
+
+#define PL310_EVENT_ATTR(_name, _config) \
+ L2X0_EVENT_ATTR(_name, _config, true)
+
+static ssize_t l2x0_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct l2x0_event_attribute *lattr;
+
+ lattr = container_of(attr, typeof(*lattr), attr);
+ return snprintf(buf, PAGE_SIZE, "config=0x%x\n", lattr->config);
+}
+
+static umode_t l2x0_pmu_event_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct l2x0_event_attribute *lattr;
+
+ lattr = container_of(attr, typeof(*lattr), attr.attr);
+
+ if (!lattr->pl310_only || strcmp("l2c_310", pmu->name) == 0)
+ return attr->mode;
+
+ return 0;
+}
+
+static struct attribute *l2x0_pmu_event_attrs[] = {
+ L220_PLUS_EVENT_ATTR(co, 0x1),
+ L220_PLUS_EVENT_ATTR(drhit, 0x2),
+ L220_PLUS_EVENT_ATTR(drreq, 0x3),
+ L220_PLUS_EVENT_ATTR(dwhit, 0x4),
+ L220_PLUS_EVENT_ATTR(dwreq, 0x5),
+ L220_PLUS_EVENT_ATTR(dwtreq, 0x6),
+ L220_PLUS_EVENT_ATTR(irhit, 0x7),
+ L220_PLUS_EVENT_ATTR(irreq, 0x8),
+ L220_PLUS_EVENT_ATTR(wa, 0x9),
+ PL310_EVENT_ATTR(ipfalloc, 0xa),
+ PL310_EVENT_ATTR(epfhit, 0xb),
+ PL310_EVENT_ATTR(epfalloc, 0xc),
+ PL310_EVENT_ATTR(srrcvd, 0xd),
+ PL310_EVENT_ATTR(srconf, 0xe),
+ PL310_EVENT_ATTR(epfrcvd, 0xf),
+ NULL
+};
+
+static struct attribute_group l2x0_pmu_event_attrs_group = {
+ .name = "events",
+ .attrs = l2x0_pmu_event_attrs,
+ .is_visible = l2x0_pmu_event_attr_is_visible,
+};
+
+static ssize_t l2x0_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return cpumap_print_to_pagebuf(true, buf, &pmu_cpu);
+}
+
+static struct device_attribute l2x0_pmu_cpumask_attr =
+ __ATTR(cpumask, S_IRUGO, l2x0_pmu_cpumask_show, NULL);
+
+static struct attribute *l2x0_pmu_cpumask_attrs[] = {
+ &l2x0_pmu_cpumask_attr.attr,
+ NULL,
+};
+
+static struct attribute_group l2x0_pmu_cpumask_attr_group = {
+ .attrs = l2x0_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *l2x0_pmu_attr_groups[] = {
+ &l2x0_pmu_event_attrs_group,
+ &l2x0_pmu_cpumask_attr_group,
+ NULL,
+};
+
+static void l2x0_pmu_reset(void)
+{
+ int i;
+
+ __l2x0_pmu_disable();
+
+ for (i = 0; i < PMU_NR_COUNTERS; i++)
+ __l2x0_pmu_event_disable(i);
+}
+
+static int l2x0_pmu_offline_cpu(unsigned int cpu)
+{
+ unsigned int target;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &pmu_cpu))
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(l2x0_pmu, cpu, target);
+ cpumask_set_cpu(target, &pmu_cpu);
+
+ return 0;
+}
+
+void l2x0_pmu_suspend(void)
+{
+ int i;
+
+ if (!l2x0_pmu)
+ return;
+
+ l2x0_pmu_disable(l2x0_pmu);
+
+ for (i = 0; i < PMU_NR_COUNTERS; i++) {
+ if (events[i])
+ l2x0_pmu_event_stop(events[i], PERF_EF_UPDATE);
+ }
+
+}
+
+void l2x0_pmu_resume(void)
+{
+ int i;
+
+ if (!l2x0_pmu)
+ return;
+
+ l2x0_pmu_reset();
+
+ for (i = 0; i < PMU_NR_COUNTERS; i++) {
+ if (events[i])
+ l2x0_pmu_event_start(events[i], PERF_EF_RELOAD);
+ }
+
+ l2x0_pmu_enable(l2x0_pmu);
+}
+
+void __init l2x0_pmu_register(void __iomem *base, u32 part)
+{
+ /*
+ * Determine whether we support the PMU, and choose the name for sysfs.
+ * This is also used by l2x0_pmu_event_attr_is_visible to determine
+ * which events to display, as the PL310 PMU supports a superset of
+ * L220 events.
+ *
+ * The L210 PMU has a different programmer's interface, and is not
+ * supported by this driver.
+ *
+ * We must defer registering the PMU until the perf subsystem is up and
+ * running, so just stash the name and base, and leave that to another
+ * initcall.
+ */
+ switch (part & L2X0_CACHE_ID_PART_MASK) {
+ case L2X0_CACHE_ID_PART_L220:
+ l2x0_name = "l2c_220";
+ break;
+ case L2X0_CACHE_ID_PART_L310:
+ l2x0_name = "l2c_310";
+ break;
+ default:
+ return;
+ }
+
+ l2x0_base = base;
+}
+
+static __init int l2x0_pmu_init(void)
+{
+ int ret;
+
+ if (!l2x0_base)
+ return 0;
+
+ l2x0_pmu = kzalloc(sizeof(*l2x0_pmu), GFP_KERNEL);
+ if (!l2x0_pmu) {
+ pr_warn("Unable to allocate L2x0 PMU\n");
+ return -ENOMEM;
+ }
+
+ *l2x0_pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+ .pmu_enable = l2x0_pmu_enable,
+ .pmu_disable = l2x0_pmu_disable,
+ .read = l2x0_pmu_event_read,
+ .start = l2x0_pmu_event_start,
+ .stop = l2x0_pmu_event_stop,
+ .add = l2x0_pmu_event_add,
+ .del = l2x0_pmu_event_del,
+ .event_init = l2x0_pmu_event_init,
+ .attr_groups = l2x0_pmu_attr_groups,
+ };
+
+ l2x0_pmu_reset();
+
+ /*
+ * We always use a hrtimer rather than an interrupt.
+ * See comments in l2x0_pmu_event_configure and l2x0_pmu_poll.
+ *
+ * Polling once a second allows the counters to fill up to 1/128th on a
+ * quad-core test chip with cores clocked at 400MHz. Hopefully this
+ * leaves sufficient headroom to avoid overflow on production silicon
+ * at higher frequencies.
+ */
+ l2x0_pmu_poll_period = ms_to_ktime(1000);
+ hrtimer_init(&l2x0_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ l2x0_pmu_hrtimer.function = l2x0_pmu_poll;
+
+ cpumask_set_cpu(0, &pmu_cpu);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE,
+ "AP_PERF_ARM_L2X0_ONLINE", NULL,
+ l2x0_pmu_offline_cpu);
+ if (ret)
+ goto out_pmu;
+
+ ret = perf_pmu_register(l2x0_pmu, l2x0_name, -1);
+ if (ret)
+ goto out_cpuhp;
+
+ return 0;
+
+out_cpuhp:
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE);
+out_pmu:
+ kfree(l2x0_pmu);
+ l2x0_pmu = NULL;
+ return ret;
+}
+device_initcall(l2x0_pmu_init);
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index cc12905ae6f8..d1870c777c6e 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -142,6 +142,8 @@ static void l2c_disable(void)
{
void __iomem *base = l2x0_base;
+ l2x0_pmu_suspend();
+
outer_cache.flush_all();
l2c_write_sec(0, base, L2X0_CTRL);
dsb(st);
@@ -159,6 +161,8 @@ static void l2c_resume(void)
/* Do not touch the controller if already enabled. */
if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
l2c_enable(base, l2x0_data->num_lock);
+
+ l2x0_pmu_resume();
}
/*
@@ -709,9 +713,8 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
if (revision >= L310_CACHE_ID_RTL_R3P0 &&
revision < L310_CACHE_ID_RTL_R3P2) {
u32 val = l2x0_saved_regs.prefetch_ctrl;
- /* I don't think bit23 is required here... but iMX6 does so */
- if (val & (BIT(30) | BIT(23))) {
- val &= ~(BIT(30) | BIT(23));
+ if (val & L310_PREFETCH_CTRL_DBL_LINEFILL) {
+ val &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
l2x0_saved_regs.prefetch_ctrl = val;
errata[n++] = "752271";
}
@@ -892,6 +895,8 @@ static int __init __l2c_init(const struct l2c_init_data *data,
pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
data->type, cache_id, aux);
+ l2x0_pmu_register(l2x0_base, cache_id);
+
return 0;
}
diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c
index c8e2f4947223..dfe97b409916 100644
--- a/arch/arm/mm/cache-uniphier.c
+++ b/arch/arm/mm/cache-uniphier.c
@@ -1,5 +1,6 @@
/*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -43,27 +44,15 @@
#define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
#define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
#define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */
-#define UNIPHIER_SSCOQM_TID_MASK (0x3 << 21)
-#define UNIPHIER_SSCOQM_TID_LRU_DATA (0x0 << 21)
-#define UNIPHIER_SSCOQM_TID_LRU_INST (0x1 << 21)
-#define UNIPHIER_SSCOQM_TID_WAY (0x2 << 21)
#define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
#define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
#define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
-#define UNIPHIER_SSCOQM_S_WAY (0x2 << 17)
#define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */
#define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
#define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
#define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
-#define UNIPHIER_SSCOQM_CM_PREFETCH 0x3 /* prefetch to cache */
-#define UNIPHIER_SSCOQM_CM_PREFETCH_BUF 0x4 /* prefetch to pf-buf */
-#define UNIPHIER_SSCOQM_CM_TOUCH 0x5 /* touch */
-#define UNIPHIER_SSCOQM_CM_TOUCH_ZERO 0x6 /* touch to zero */
-#define UNIPHIER_SSCOQM_CM_TOUCH_DIRTY 0x7 /* touch with dirty */
#define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */
#define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */
-#define UNIPHIER_SSCOQMASK 0x254 /* Cache Operation Queue Address Mask */
-#define UNIPHIER_SSCOQWN 0x258 /* Cache Operation Queue Way Number */
#define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/
#define UNIPHIER_SSCOPPQSEF_FE BIT(1)
#define UNIPHIER_SSCOPPQSEF_OE BIT(0)
@@ -72,9 +61,6 @@
#define UNIPHIER_SSCOLPQS_EST BIT(1)
#define UNIPHIER_SSCOLPQS_QST BIT(0)
-/* Is the touch/pre-fetch destination specified by ways? */
-#define UNIPHIER_SSCOQM_TID_IS_WAY(op) \
- ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY)
/* Is the operation region specified by address range? */
#define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
@@ -178,11 +164,6 @@ static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
}
-
- /* set target ways if needed */
- if (unlikely(UNIPHIER_SSCOQM_TID_IS_WAY(operation)))
- writel_relaxed(data->way_locked_mask,
- data->op_base + UNIPHIER_SSCOQWN);
} while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
(UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
@@ -338,46 +319,8 @@ static void uniphier_cache_sync(void)
__uniphier_cache_sync(data);
}
-int __init uniphier_cache_l2_is_enabled(void)
-{
- struct uniphier_cache_data *data;
-
- data = list_first_entry_or_null(&uniphier_cache_list,
- struct uniphier_cache_data, list);
- if (!data)
- return 0;
-
- return !!(readl_relaxed(data->ctrl_base + UNIPHIER_SSCC) &
- UNIPHIER_SSCC_ON);
-}
-
-void __init uniphier_cache_l2_touch_range(unsigned long start,
- unsigned long end)
-{
- struct uniphier_cache_data *data;
-
- data = list_first_entry_or_null(&uniphier_cache_list,
- struct uniphier_cache_data, list);
- if (data)
- __uniphier_cache_maint_range(data, start, end,
- UNIPHIER_SSCOQM_TID_WAY |
- UNIPHIER_SSCOQM_CM_TOUCH);
-}
-
-void __init uniphier_cache_l2_set_locked_ways(u32 way_mask)
-{
- struct uniphier_cache_data *data;
-
- data = list_first_entry_or_null(&uniphier_cache_list,
- struct uniphier_cache_data, list);
- if (data)
- __uniphier_cache_set_locked_ways(data, way_mask);
-}
-
static const struct of_device_id uniphier_cache_match[] __initconst = {
- {
- .compatible = "socionext,uniphier-system-cache",
- },
+ { .compatible = "socionext,uniphier-system-cache" },
{ /* sentinel */ }
};
diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S
new file mode 100644
index 000000000000..816a7e44e6f1
--- /dev/null
+++ b/arch/arm/mm/cache-v7m.S
@@ -0,0 +1,453 @@
+/*
+ * linux/arch/arm/mm/cache-v7m.S
+ *
+ * Based on linux/arch/arm/mm/cache-v7.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Copyright (C) 2005 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the "shell" of the ARMv7M processor support.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+#include <asm/unwind.h>
+#include <asm/v7m.h>
+
+#include "proc-macros.S"
+
+/* Generic V7M read/write macros for memory mapped cache operations */
+.macro v7m_cache_read, rt, reg
+ movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg
+ movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg
+ ldr \rt, [\rt]
+.endm
+
+.macro v7m_cacheop, rt, tmp, op, c = al
+ movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op
+ movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op
+ str\c \rt, [\tmp]
+.endm
+
+
+.macro read_ccsidr, rt
+ v7m_cache_read \rt, V7M_SCB_CCSIDR
+.endm
+
+.macro read_clidr, rt
+ v7m_cache_read \rt, V7M_SCB_CLIDR
+.endm
+
+.macro write_csselr, rt, tmp
+ v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR
+.endm
+
+/*
+ * dcisw: Invalidate data cache by set/way
+ */
+.macro dcisw, rt, tmp
+ v7m_cacheop \rt, \tmp, V7M_SCB_DCISW
+.endm
+
+/*
+ * dccisw: Clean and invalidate data cache by set/way
+ */
+.macro dccisw, rt, tmp
+ v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW
+.endm
+
+/*
+ * dccimvac: Clean and invalidate data cache line by MVA to PoC.
+ */
+.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
+.macro dccimvac\c, rt, tmp
+ v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c
+.endm
+.endr
+
+/*
+ * dcimvac: Invalidate data cache line by MVA to PoC
+ */
+.macro dcimvac, rt, tmp
+ v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
+.endm
+
+/*
+ * dccmvau: Clean data cache line by MVA to PoU
+ */
+.macro dccmvau, rt, tmp
+ v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU
+.endm
+
+/*
+ * dccmvac: Clean data cache line by MVA to PoC
+ */
+.macro dccmvac, rt, tmp
+ v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC
+.endm
+
+/*
+ * icimvau: Invalidate instruction caches by MVA to PoU
+ */
+.macro icimvau, rt, tmp
+ v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU
+.endm
+
+/*
+ * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP.
+ * rt data ignored by ICIALLU(IS), so can be used for the address
+ */
+.macro invalidate_icache, rt
+ v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU
+ mov \rt, #0
+.endm
+
+/*
+ * Invalidate the BTB, inner shareable if SMP.
+ * rt data ignored by BPIALL, so it can be used for the address
+ */
+.macro invalidate_bp, rt
+ v7m_cacheop \rt, \rt, V7M_SCB_BPIALL
+ mov \rt, #0
+.endm
+
+ENTRY(v7m_invalidate_l1)
+ mov r0, #0
+
+ write_csselr r0, r1
+ read_ccsidr r0
+
+ movw r1, #0x7fff
+ and r2, r1, r0, lsr #13
+
+ movw r1, #0x3ff
+
+ and r3, r1, r0, lsr #3 @ NumWays - 1
+ add r2, r2, #1 @ NumSets
+
+ and r0, r0, #0x7
+ add r0, r0, #4 @ SetShift
+
+ clz r1, r3 @ WayShift
+ add r4, r3, #1 @ NumWays
+1: sub r2, r2, #1 @ NumSets--
+ mov r3, r4 @ Temp = NumWays
+2: subs r3, r3, #1 @ Temp--
+ mov r5, r3, lsl r1
+ mov r6, r2, lsl r0
+ orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
+ dcisw r5, r6
+ bgt 2b
+ cmp r2, #0
+ bgt 1b
+ dsb st
+ isb
+ ret lr
+ENDPROC(v7m_invalidate_l1)
+
+/*
+ * v7m_flush_icache_all()
+ *
+ * Flush the whole I-cache.
+ *
+ * Registers:
+ * r0 - set to 0
+ */
+ENTRY(v7m_flush_icache_all)
+ invalidate_icache r0
+ ret lr
+ENDPROC(v7m_flush_icache_all)
+
+/*
+ * v7m_flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ *
+ * Corrupted registers: r0-r7, r9-r11
+ */
+ENTRY(v7m_flush_dcache_all)
+ dmb @ ensure ordering with previous memory accesses
+ read_clidr r0
+ mov r3, r0, lsr #23 @ move LoC into position
+ ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
+ beq finished @ if loc is 0, then no need to clean
+start_flush_levels:
+ mov r10, #0 @ start clean at cache level 0
+flush_levels:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt skip @ skip if no cache, or just i-cache
+#ifdef CONFIG_PREEMPT
+ save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
+#endif
+ write_csselr r10, r1 @ set current cache level
+ isb @ isb to sych the new cssr&csidr
+ read_ccsidr r1 @ read the new csidr
+#ifdef CONFIG_PREEMPT
+ restore_irqs_notrace r9
+#endif
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ movw r4, #0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ movw r7, #0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+loop1:
+ mov r9, r7 @ create working copy of max index
+loop2:
+ lsl r6, r4, r5
+ orr r11, r10, r6 @ factor way and cache number into r11
+ lsl r6, r9, r2
+ orr r11, r11, r6 @ factor index number into r11
+ dccisw r11, r6 @ clean/invalidate by set/way
+ subs r9, r9, #1 @ decrement the index
+ bge loop2
+ subs r4, r4, #1 @ decrement the way
+ bge loop1
+skip:
+ add r10, r10, #2 @ increment cache number
+ cmp r3, r10
+ bgt flush_levels
+finished:
+ mov r10, #0 @ swith back to cache level 0
+ write_csselr r10, r3 @ select current cache level in cssr
+ dsb st
+ isb
+ ret lr
+ENDPROC(v7m_flush_dcache_all)
+
+/*
+ * v7m_flush_cache_all()
+ *
+ * Flush the entire cache system.
+ * The data cache flush is now achieved using atomic clean / invalidates
+ * working outwards from L1 cache. This is done using Set/Way based cache
+ * maintenance instructions.
+ * The instruction cache can still be invalidated back to the point of
+ * unification in a single instruction.
+ *
+ */
+ENTRY(v7m_flush_kern_cache_all)
+ stmfd sp!, {r4-r7, r9-r11, lr}
+ bl v7m_flush_dcache_all
+ invalidate_icache r0
+ ldmfd sp!, {r4-r7, r9-r11, lr}
+ ret lr
+ENDPROC(v7m_flush_kern_cache_all)
+
+/*
+ * v7m_flush_cache_all()
+ *
+ * Flush all TLB entries in a particular address space
+ *
+ * - mm - mm_struct describing address space
+ */
+ENTRY(v7m_flush_user_cache_all)
+ /*FALLTHROUGH*/
+
+/*
+ * v7m_flush_cache_range(start, end, flags)
+ *
+ * Flush a range of TLB entries in the specified address space.
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ * - flags - vm_area_struct flags describing address space
+ *
+ * It is assumed that:
+ * - we have a VIPT cache.
+ */
+ENTRY(v7m_flush_user_cache_range)
+ ret lr
+ENDPROC(v7m_flush_user_cache_all)
+ENDPROC(v7m_flush_user_cache_range)
+
+/*
+ * v7m_coherent_kern_range(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified
+ * region. This is typically used when code has been written to
+ * a memory region, and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ *
+ * It is assumed that:
+ * - the Icache does not read data from the write buffer
+ */
+ENTRY(v7m_coherent_kern_range)
+ /* FALLTHROUGH */
+
+/*
+ * v7m_coherent_user_range(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified
+ * region. This is typically used when code has been written to
+ * a memory region, and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ *
+ * It is assumed that:
+ * - the Icache does not read data from the write buffer
+ */
+ENTRY(v7m_coherent_user_range)
+ UNWIND(.fnstart )
+ dcache_line_size r2, r3
+ sub r3, r2, #1
+ bic r12, r0, r3
+1:
+/*
+ * We use open coded version of dccmvau otherwise USER() would
+ * point at movw instruction.
+ */
+ dccmvau r12, r3
+ add r12, r12, r2
+ cmp r12, r1
+ blo 1b
+ dsb ishst
+ icache_line_size r2, r3
+ sub r3, r2, #1
+ bic r12, r0, r3
+2:
+ icimvau r12, r3
+ add r12, r12, r2
+ cmp r12, r1
+ blo 2b
+ invalidate_bp r0
+ dsb ishst
+ isb
+ ret lr
+ UNWIND(.fnend )
+ENDPROC(v7m_coherent_kern_range)
+ENDPROC(v7m_coherent_user_range)
+
+/*
+ * v7m_flush_kern_dcache_area(void *addr, size_t size)
+ *
+ * Ensure that the data held in the page kaddr is written back
+ * to the page in question.
+ *
+ * - addr - kernel address
+ * - size - region size
+ */
+ENTRY(v7m_flush_kern_dcache_area)
+ dcache_line_size r2, r3
+ add r1, r0, r1
+ sub r3, r2, #1
+ bic r0, r0, r3
+1:
+ dccimvac r0, r3 @ clean & invalidate D line / unified line
+ add r0, r0, r2
+ cmp r0, r1
+ blo 1b
+ dsb st
+ ret lr
+ENDPROC(v7m_flush_kern_dcache_area)
+
+/*
+ * v7m_dma_inv_range(start,end)
+ *
+ * Invalidate the data cache within the specified region; we will
+ * be performing a DMA operation in this region and we want to
+ * purge old data in the cache.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+v7m_dma_inv_range:
+ dcache_line_size r2, r3
+ sub r3, r2, #1
+ tst r0, r3
+ bic r0, r0, r3
+ dccimvacne r0, r3
+ subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
+ tst r1, r3
+ bic r1, r1, r3
+ dccimvacne r1, r3
+1:
+ dcimvac r0, r3
+ add r0, r0, r2
+ cmp r0, r1
+ blo 1b
+ dsb st
+ ret lr
+ENDPROC(v7m_dma_inv_range)
+
+/*
+ * v7m_dma_clean_range(start,end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+v7m_dma_clean_range:
+ dcache_line_size r2, r3
+ sub r3, r2, #1
+ bic r0, r0, r3
+1:
+ dccmvac r0, r3 @ clean D / U line
+ add r0, r0, r2
+ cmp r0, r1
+ blo 1b
+ dsb st
+ ret lr
+ENDPROC(v7m_dma_clean_range)
+
+/*
+ * v7m_dma_flush_range(start,end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(v7m_dma_flush_range)
+ dcache_line_size r2, r3
+ sub r3, r2, #1
+ bic r0, r0, r3
+1:
+ dccimvac r0, r3 @ clean & invalidate D / U line
+ add r0, r0, r2
+ cmp r0, r1
+ blo 1b
+ dsb st
+ ret lr
+ENDPROC(v7m_dma_flush_range)
+
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v7m_dma_map_area)
+ add r1, r1, r0
+ teq r2, #DMA_FROM_DEVICE
+ beq v7m_dma_inv_range
+ b v7m_dma_clean_range
+ENDPROC(v7m_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v7m_dma_unmap_area)
+ add r1, r1, r0
+ teq r2, #DMA_TO_DEVICE
+ bne v7m_dma_inv_range
+ ret lr
+ENDPROC(v7m_dma_unmap_area)
+
+ .globl v7m_flush_kern_cache_louis
+ .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
+
+ __INITDATA
+
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions v7m
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c6834c0cfd1c..ab4f74536057 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -436,7 +436,7 @@ static int __init atomic_pool_init(void)
gen_pool_set_algo(atomic_pool,
gen_pool_first_fit_order_align,
(void *)PAGE_SHIFT);
- pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n",
+ pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
atomic_pool_size / 1024);
return 0;
}
@@ -445,7 +445,7 @@ destroy_genpool:
gen_pool_destroy(atomic_pool);
atomic_pool = NULL;
out:
- pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n",
+ pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
atomic_pool_size / 1024);
return -ENOMEM;
}
@@ -2014,6 +2014,63 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
__free_iova(mapping, iova, len);
}
+/**
+ * arm_iommu_map_resource - map a device resource for DMA
+ * @dev: valid struct device pointer
+ * @phys_addr: physical address of resource
+ * @size: size of resource to map
+ * @dir: DMA transfer direction
+ */
+static dma_addr_t arm_iommu_map_resource(struct device *dev,
+ phys_addr_t phys_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+ dma_addr_t dma_addr;
+ int ret, prot;
+ phys_addr_t addr = phys_addr & PAGE_MASK;
+ unsigned int offset = phys_addr & ~PAGE_MASK;
+ size_t len = PAGE_ALIGN(size + offset);
+
+ dma_addr = __alloc_iova(mapping, len);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
+
+ ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
+ if (ret < 0)
+ goto fail;
+
+ return dma_addr + offset;
+fail:
+ __free_iova(mapping, dma_addr, len);
+ return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_unmap_resource - unmap a device DMA resource
+ * @dev: valid struct device pointer
+ * @dma_handle: DMA address to resource
+ * @size: size of resource to map
+ * @dir: DMA transfer direction
+ */
+static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+ dma_addr_t iova = dma_handle & PAGE_MASK;
+ unsigned int offset = dma_handle & ~PAGE_MASK;
+ size_t len = PAGE_ALIGN(size + offset);
+
+ if (!iova)
+ return;
+
+ iommu_unmap(mapping->domain, iova, len);
+ __free_iova(mapping, iova, len);
+}
+
static void arm_iommu_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
@@ -2057,6 +2114,9 @@ struct dma_map_ops iommu_ops = {
.unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
+
+ .map_resource = arm_iommu_map_resource,
+ .unmap_resource = arm_iommu_unmap_resource,
};
struct dma_map_ops iommu_coherent_ops = {
@@ -2070,6 +2130,9 @@ struct dma_map_ops iommu_coherent_ops = {
.map_sg = arm_coherent_iommu_map_sg,
.unmap_sg = arm_coherent_iommu_unmap_sg,
+
+ .map_resource = arm_iommu_map_resource,
+ .unmap_resource = arm_iommu_unmap_resource,
};
/**
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 30fe03f95c85..4001dd15818d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -243,7 +243,7 @@ __setup("noalign", noalign_setup);
#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
-static struct mem_type mem_types[] = {
+static struct mem_type mem_types[] __ro_after_init = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index c671f345266a..0d40c285bd86 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -7,6 +7,10 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
+#ifdef CONFIG_CPU_V7M
+#include <asm/v7m.h>
+#endif
+
/*
* vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
*/
@@ -70,7 +74,13 @@
* on ARMv7.
*/
.macro dcache_line_size, reg, tmp
+#ifdef CONFIG_CPU_V7M
+ movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
+ movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
+ ldr \tmp, [\tmp]
+#else
mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
+#endif
lsr \tmp, \tmp, #16
and \tmp, \tmp, #0xf @ cache line size encoding
mov \reg, #4 @ bytes per word
@@ -82,7 +92,13 @@
* on ARMv7.
*/
.macro icache_line_size, reg, tmp
+#ifdef CONFIG_CPU_V7M
+ movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
+ movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
+ ldr \tmp, [\tmp]
+#else
mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
+#endif
and \tmp, \tmp, #0xf @ cache line size encoding
mov \reg, #4 @ bytes per word
mov \reg, \reg, lsl \tmp @ actual cache line size
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 7229d8d0be1a..f6d333f09bfe 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -74,14 +74,42 @@ ENTRY(cpu_v7m_do_resume)
ENDPROC(cpu_v7m_do_resume)
#endif
+ENTRY(cpu_cm7_dcache_clean_area)
+ dcache_line_size r2, r3
+ movw r3, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
+ movt r3, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
+
+1: str r0, [r3] @ clean D entry
+ add r0, r0, r2
+ subs r1, r1, r2
+ bhi 1b
+ dsb
+ ret lr
+ENDPROC(cpu_cm7_dcache_clean_area)
+
+ENTRY(cpu_cm7_proc_fin)
+ movw r2, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
+ movt r2, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
+ ldr r0, [r2]
+ bic r0, r0, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC)
+ str r0, [r2]
+ ret lr
+ENDPROC(cpu_cm7_proc_fin)
+
.section ".text.init", #alloc, #execinstr
+__v7m_cm7_setup:
+ mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
+ b __v7m_setup_cont
/*
* __v7m_setup
*
* This should be able to cover all ARMv7-M cores.
*/
__v7m_setup:
+ mov r8, 0
+
+__v7m_setup_cont:
@ Configure the vector table base address
ldr r0, =BASEADDR_V7M_SCB
ldr r12, =vector_table
@@ -104,6 +132,7 @@ __v7m_setup:
badr r1, 1f
ldr r5, [r12, #11 * 4] @ read the SVC vector entry
str r1, [r12, #11 * 4] @ write the temporary SVC vector entry
+ dsb
mov r6, lr @ save LR
ldr sp, =init_thread_union + THREAD_START_SP
cpsie i
@@ -116,15 +145,32 @@ __v7m_setup:
mov r1, #1
msr control, r1 @ Thread mode has unpriviledged access
+ @ Configure caches (if implemented)
+ teq r8, #0
+ stmneia r12, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
+ blne v7m_invalidate_l1
+ teq r8, #0 @ re-evalutae condition
+ ldmneia r12, {r0-r6, lr}
+
@ Configure the System Control Register to ensure 8-byte stack alignment
@ Note the STKALIGN bit is either RW or RAO.
- ldr r12, [r0, V7M_SCB_CCR] @ system control register
- orr r12, #V7M_SCB_CCR_STKALIGN
- str r12, [r0, V7M_SCB_CCR]
+ ldr r0, [r0, V7M_SCB_CCR] @ system control register
+ orr r0, #V7M_SCB_CCR_STKALIGN
+ orr r0, r0, r8
+
ret lr
ENDPROC(__v7m_setup)
+/*
+ * Cortex-M7 processor functions
+ */
+ globl_equ cpu_cm7_proc_init, cpu_v7m_proc_init
+ globl_equ cpu_cm7_reset, cpu_v7m_reset
+ globl_equ cpu_cm7_do_idle, cpu_v7m_do_idle
+ globl_equ cpu_cm7_switch_mm, cpu_v7m_switch_mm
+
define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
+ define_processor_functions cm7, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
.section ".rodata"
string cpu_arch_name, "armv7m"
@@ -133,6 +179,50 @@ ENDPROC(__v7m_setup)
.section ".proc.info.init", #alloc
+.macro __v7m_proc name, initfunc, cache_fns = nop_cache_fns, hwcaps = 0, proc_fns = v7m_processor_functions
+ .long 0 /* proc_info_list.__cpu_mm_mmu_flags */
+ .long 0 /* proc_info_list.__cpu_io_mmu_flags */
+ initfn \initfunc, \name
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \hwcaps
+ .long cpu_v7m_name
+ .long \proc_fns
+ .long 0 /* proc_info_list.tlb */
+ .long 0 /* proc_info_list.user */
+ .long \cache_fns
+.endm
+
+ /*
+ * Match ARM Cortex-M7 processor.
+ */
+ .type __v7m_cm7_proc_info, #object
+__v7m_cm7_proc_info:
+ .long 0x410fc270 /* ARM Cortex-M7 0xC27 */
+ .long 0xff0ffff0 /* Mask off revision, patch release */
+ __v7m_proc __v7m_cm7_proc_info, __v7m_cm7_setup, hwcaps = HWCAP_EDSP, cache_fns = v7m_cache_fns, proc_fns = cm7_processor_functions
+ .size __v7m_cm7_proc_info, . - __v7m_cm7_proc_info
+
+ /*
+ * Match ARM Cortex-M4 processor.
+ */
+ .type __v7m_cm4_proc_info, #object
+__v7m_cm4_proc_info:
+ .long 0x410fc240 /* ARM Cortex-M4 0xC24 */
+ .long 0xff0ffff0 /* Mask off revision, patch release */
+ __v7m_proc __v7m_cm4_proc_info, __v7m_setup, hwcaps = HWCAP_EDSP
+ .size __v7m_cm4_proc_info, . - __v7m_cm4_proc_info
+
+ /*
+ * Match ARM Cortex-M3 processor.
+ */
+ .type __v7m_cm3_proc_info, #object
+__v7m_cm3_proc_info:
+ .long 0x410fc230 /* ARM Cortex-M3 0xC23 */
+ .long 0xff0ffff0 /* Mask off revision, patch release */
+ __v7m_proc __v7m_cm3_proc_info, __v7m_setup
+ .size __v7m_cm3_proc_info, . - __v7m_cm3_proc_info
+
/*
* Match any ARMv7-M processor core.
*/
@@ -140,16 +230,6 @@ ENDPROC(__v7m_setup)
__v7m_proc_info:
.long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID
- .long 0 @ proc_info_list.__cpu_mm_mmu_flags
- .long 0 @ proc_info_list.__cpu_io_mmu_flags
- initfn __v7m_setup, __v7m_proc_info @ proc_info_list.__cpu_flush
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT
- .long cpu_v7m_name
- .long v7m_processor_functions @ proc_info_list.proc
- .long 0 @ proc_info_list.tlb
- .long 0 @ proc_info_list.user
- .long nop_cache_fns @ proc_info_list.cache
+ __v7m_proc __v7m_proc_info, __v7m_setup
.size __v7m_proc_info, . - __v7m_proc_info
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 78c8bf4043c0..272f49b2c68f 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -52,21 +52,27 @@ void __init orion_clkdev_init(struct clk *tclk)
static void fill_resources(struct platform_device *device,
struct resource *resources,
resource_size_t mapbase,
- resource_size_t size,
- unsigned int irq)
+ resource_size_t size)
{
device->resource = resources;
device->num_resources = 1;
resources[0].flags = IORESOURCE_MEM;
resources[0].start = mapbase;
resources[0].end = mapbase + size;
+}
- if (irq != NO_IRQ) {
- device->num_resources++;
- resources[1].flags = IORESOURCE_IRQ;
- resources[1].start = irq;
- resources[1].end = irq;
- }
+static void fill_resources_irq(struct platform_device *device,
+ struct resource *resources,
+ resource_size_t mapbase,
+ resource_size_t size,
+ unsigned int irq)
+{
+ fill_resources(device, resources, mapbase, size);
+
+ device->num_resources++;
+ resources[1].flags = IORESOURCE_IRQ;
+ resources[1].start = irq;
+ resources[1].end = irq;
}
/*****************************************************************************
@@ -93,7 +99,7 @@ static void __init uart_complete(
data->uartclk = uart_get_clk_rate(clk);
orion_uart->dev.platform_data = data;
- fill_resources(orion_uart, resources, mapbase, 0xff, irq);
+ fill_resources_irq(orion_uart, resources, mapbase, 0xff, irq);
platform_device_register(orion_uart);
}
@@ -305,8 +311,8 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned int tx_csum_limit)
{
fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
- mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
- fill_resources(&orion_ge_mvmdio, orion_ge_mvmdio_resources,
+ mapbase + 0x2000, SZ_16K - 1);
+ fill_resources_irq(&orion_ge_mvmdio, orion_ge_mvmdio_resources,
mapbase + 0x2004, 0x84 - 1, irq_err);
orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
ge_complete(&orion_ge00_shared_data,
@@ -354,11 +360,10 @@ static struct platform_device orion_ge01 = {
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
unsigned int tx_csum_limit)
{
fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
- mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
+ mapbase + 0x2000, SZ_16K - 1);
orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
ge_complete(&orion_ge01_shared_data,
orion_ge01_resources, irq, &orion_ge01_shared,
@@ -404,11 +409,10 @@ static struct platform_device orion_ge10 = {
void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
- unsigned long irq,
- unsigned long irq_err)
+ unsigned long irq)
{
fill_resources(&orion_ge10_shared, orion_ge10_shared_resources,
- mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
+ mapbase + 0x2000, SZ_16K - 1);
ge_complete(&orion_ge10_shared_data,
orion_ge10_resources, irq, &orion_ge10_shared,
NULL,
@@ -453,11 +457,10 @@ static struct platform_device orion_ge11 = {
void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
- unsigned long irq,
- unsigned long irq_err)
+ unsigned long irq)
{
fill_resources(&orion_ge11_shared, orion_ge11_shared_resources,
- mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
+ mapbase + 0x2000, SZ_16K - 1);
ge_complete(&orion_ge11_shared_data,
orion_ge11_resources, irq, &orion_ge11_shared,
NULL,
@@ -467,37 +470,15 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
/*****************************************************************************
* Ethernet switch
****************************************************************************/
-static struct resource orion_switch_resources[] = {
- {
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device orion_switch_device = {
- .name = "dsa",
- .id = 0,
- .num_resources = 0,
- .resource = orion_switch_resources,
-};
-
-void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq)
+void __init orion_ge00_switch_init(struct dsa_platform_data *d)
{
int i;
- if (irq != NO_IRQ) {
- orion_switch_resources[0].start = irq;
- orion_switch_resources[0].end = irq;
- orion_switch_device.num_resources = 1;
- }
-
d->netdev = &orion_ge00.dev;
for (i = 0; i < d->nr_chips; i++)
d->chip[i].host_dev = &orion_ge_mvmdio.dev;
- orion_switch_device.dev.platform_data = d;
- platform_device_register(&orion_switch_device);
+ platform_device_register_data(NULL, "dsa", 0, d, sizeof(d));
}
/*****************************************************************************
@@ -538,7 +519,7 @@ void __init orion_i2c_init(unsigned long mapbase,
unsigned long freq_m)
{
orion_i2c_pdata.freq_m = freq_m;
- fill_resources(&orion_i2c, orion_i2c_resources, mapbase,
+ fill_resources_irq(&orion_i2c, orion_i2c_resources, mapbase,
SZ_32 - 1, irq);
platform_device_register(&orion_i2c);
}
@@ -548,7 +529,7 @@ void __init orion_i2c_1_init(unsigned long mapbase,
unsigned long freq_m)
{
orion_i2c_1_pdata.freq_m = freq_m;
- fill_resources(&orion_i2c_1, orion_i2c_1_resources, mapbase,
+ fill_resources_irq(&orion_i2c_1, orion_i2c_1_resources, mapbase,
SZ_32 - 1, irq);
platform_device_register(&orion_i2c_1);
}
@@ -576,14 +557,14 @@ static struct platform_device orion_spi_1 = {
void __init orion_spi_init(unsigned long mapbase)
{
fill_resources(&orion_spi, &orion_spi_resources,
- mapbase, SZ_512 - 1, NO_IRQ);
+ mapbase, SZ_512 - 1);
platform_device_register(&orion_spi);
}
void __init orion_spi_1_init(unsigned long mapbase)
{
fill_resources(&orion_spi_1, &orion_spi_1_resources,
- mapbase, SZ_512 - 1, NO_IRQ);
+ mapbase, SZ_512 - 1);
platform_device_register(&orion_spi_1);
}
@@ -741,7 +722,7 @@ void __init orion_ehci_init(unsigned long mapbase,
enum orion_ehci_phy_ver phy_version)
{
orion_ehci_data.phy_version = phy_version;
- fill_resources(&orion_ehci, orion_ehci_resources, mapbase, SZ_4K - 1,
+ fill_resources_irq(&orion_ehci, orion_ehci_resources, mapbase, SZ_4K - 1,
irq);
platform_device_register(&orion_ehci);
@@ -765,7 +746,7 @@ static struct platform_device orion_ehci_1 = {
void __init orion_ehci_1_init(unsigned long mapbase,
unsigned long irq)
{
- fill_resources(&orion_ehci_1, orion_ehci_1_resources,
+ fill_resources_irq(&orion_ehci_1, orion_ehci_1_resources,
mapbase, SZ_4K - 1, irq);
platform_device_register(&orion_ehci_1);
@@ -789,7 +770,7 @@ static struct platform_device orion_ehci_2 = {
void __init orion_ehci_2_init(unsigned long mapbase,
unsigned long irq)
{
- fill_resources(&orion_ehci_2, orion_ehci_2_resources,
+ fill_resources_irq(&orion_ehci_2, orion_ehci_2_resources,
mapbase, SZ_4K - 1, irq);
platform_device_register(&orion_ehci_2);
@@ -819,7 +800,7 @@ void __init orion_sata_init(struct mv_sata_platform_data *sata_data,
unsigned long irq)
{
orion_sata.dev.platform_data = sata_data;
- fill_resources(&orion_sata, orion_sata_resources,
+ fill_resources_irq(&orion_sata, orion_sata_resources,
mapbase, 0x5000 - 1, irq);
platform_device_register(&orion_sata);
@@ -849,7 +830,7 @@ void __init orion_crypto_init(unsigned long mapbase,
unsigned long sram_size,
unsigned long irq)
{
- fill_resources(&orion_crypto, orion_crypto_resources,
+ fill_resources_irq(&orion_crypto, orion_crypto_resources,
mapbase, 0xffff, irq);
orion_crypto.num_resources = 3;
orion_crypto_resources[2].start = srambase;
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index 9e6d76ad48a9..9347f3c58a6d 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -47,21 +47,17 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
unsigned int tx_csum_limit);
void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
- unsigned long irq,
- unsigned long irq_err);
+ unsigned long irq);
void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
- unsigned long irq,
- unsigned long irq_err);
+ unsigned long irq);
-void __init orion_ge00_switch_init(struct dsa_platform_data *d,
- int irq);
+void __init orion_ge00_switch_init(struct dsa_platform_data *d);
void __init orion_i2c_init(unsigned long mapbase,
unsigned long irq,
diff --git a/arch/arm/plat-pxa/Makefile b/arch/arm/plat-pxa/Makefile
index 557b134db772..2f06a2e8b1dd 100644
--- a/arch/arm/plat-pxa/Makefile
+++ b/arch/arm/plat-pxa/Makefile
@@ -3,8 +3,6 @@
#
ccflags-$(CONFIG_ARCH_MMP) := -I$(srctree)/$(src)/include
-obj-$(CONFIG_ARCH_PXA) := dma.o
-
obj-$(CONFIG_PXA3xx) += mfp.o
obj-$(CONFIG_ARCH_MMP) += mfp.o
diff --git a/arch/arm/plat-pxa/dma.c b/arch/arm/plat-pxa/dma.c
deleted file mode 100644
index de2b061889ec..000000000000
--- a/arch/arm/plat-pxa/dma.c
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * linux/arch/arm/plat-pxa/dma.c
- *
- * PXA DMA registration and IRQ dispatching
- *
- * Author: Nicolas Pitre
- * Created: Nov 15, 2001
- * Copyright: MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/irq.h>
-#include <asm/memory.h>
-#include <mach/hardware.h>
-#include <mach/dma.h>
-
-#define DMA_DEBUG_NAME "pxa_dma"
-#define DMA_MAX_REQUESTERS 64
-
-struct dma_channel {
- char *name;
- pxa_dma_prio prio;
- void (*irq_handler)(int, void *);
- void *data;
- spinlock_t lock;
-};
-
-static struct dma_channel *dma_channels;
-static int num_dma_channels;
-
-/*
- * Debug fs
- */
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/seq_file.h>
-
-static struct dentry *dbgfs_root, *dbgfs_state, **dbgfs_chan;
-
-static int dbg_show_requester_chan(struct seq_file *s, void *p)
-{
- int chan = (int)s->private;
- int i;
- u32 drcmr;
-
- seq_printf(s, "DMA channel %d requesters list :\n", chan);
- for (i = 0; i < DMA_MAX_REQUESTERS; i++) {
- drcmr = DRCMR(i);
- if ((drcmr & DRCMR_CHLNUM) == chan)
- seq_printf(s, "\tRequester %d (MAPVLD=%d)\n",
- i, !!(drcmr & DRCMR_MAPVLD));
- }
-
- return 0;
-}
-
-static inline int dbg_burst_from_dcmd(u32 dcmd)
-{
- int burst = (dcmd >> 16) & 0x3;
-
- return burst ? 4 << burst : 0;
-}
-
-static int is_phys_valid(unsigned long addr)
-{
- return pfn_valid(__phys_to_pfn(addr));
-}
-
-#define DCSR_STR(flag) (dcsr & DCSR_##flag ? #flag" " : "")
-#define DCMD_STR(flag) (dcmd & DCMD_##flag ? #flag" " : "")
-
-static int dbg_show_descriptors(struct seq_file *s, void *p)
-{
- int chan = (int)s->private;
- int i, max_show = 20, burst, width;
- u32 dcmd;
- unsigned long phys_desc;
- struct pxa_dma_desc *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&dma_channels[chan].lock, flags);
- phys_desc = DDADR(chan);
-
- seq_printf(s, "DMA channel %d descriptors :\n", chan);
- seq_printf(s, "[%03d] First descriptor unknown\n", 0);
- for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
- desc = phys_to_virt(phys_desc);
- dcmd = desc->dcmd;
- burst = dbg_burst_from_dcmd(dcmd);
- width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
-
- seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
- i, phys_desc, desc);
- seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
- seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
- seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
- seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
- dcmd,
- DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
- DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
- DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
- DCMD_STR(ENDIAN), burst, width,
- dcmd & DCMD_LENGTH);
- phys_desc = desc->ddadr;
- }
- if (i == max_show)
- seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
- i, phys_desc);
- else
- seq_printf(s, "[%03d] Desc at %08lx is %s\n",
- i, phys_desc, phys_desc == DDADR_STOP ?
- "DDADR_STOP" : "invalid");
-
- spin_unlock_irqrestore(&dma_channels[chan].lock, flags);
-
- return 0;
-}
-
-static int dbg_show_chan_state(struct seq_file *s, void *p)
-{
- int chan = (int)s->private;
- u32 dcsr, dcmd;
- int burst, width;
- static char *str_prio[] = { "high", "normal", "low" };
-
- dcsr = DCSR(chan);
- dcmd = DCMD(chan);
- burst = dbg_burst_from_dcmd(dcmd);
- width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
-
- seq_printf(s, "DMA channel %d\n", chan);
- seq_printf(s, "\tPriority : %s\n", str_prio[dma_channels[chan].prio]);
- seq_printf(s, "\tUnaligned transfer bit: %s\n",
- DALGN & (1 << chan) ? "yes" : "no");
- seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
- dcsr, DCSR_STR(RUN), DCSR_STR(NODESC),
- DCSR_STR(STOPIRQEN), DCSR_STR(EORIRQEN),
- DCSR_STR(EORJMPEN), DCSR_STR(EORSTOPEN),
- DCSR_STR(SETCMPST), DCSR_STR(CLRCMPST),
- DCSR_STR(CMPST), DCSR_STR(EORINTR), DCSR_STR(REQPEND),
- DCSR_STR(STOPSTATE), DCSR_STR(ENDINTR),
- DCSR_STR(STARTINTR), DCSR_STR(BUSERR));
-
- seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
- dcmd,
- DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
- DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
- DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
- DCMD_STR(ENDIAN), burst, width, dcmd & DCMD_LENGTH);
- seq_printf(s, "\tDSADR = %08x\n", DSADR(chan));
- seq_printf(s, "\tDTADR = %08x\n", DTADR(chan));
- seq_printf(s, "\tDDADR = %08x\n", DDADR(chan));
-
- return 0;
-}
-
-static int dbg_show_state(struct seq_file *s, void *p)
-{
- /* basic device status */
- seq_puts(s, "DMA engine status\n");
- seq_printf(s, "\tChannel number: %d\n", num_dma_channels);
-
- return 0;
-}
-
-#define DBGFS_FUNC_DECL(name) \
-static int dbg_open_##name(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, dbg_show_##name, inode->i_private); \
-} \
-static const struct file_operations dbg_fops_##name = { \
- .owner = THIS_MODULE, \
- .open = dbg_open_##name, \
- .llseek = seq_lseek, \
- .read = seq_read, \
- .release = single_release, \
-}
-
-DBGFS_FUNC_DECL(state);
-DBGFS_FUNC_DECL(chan_state);
-DBGFS_FUNC_DECL(descriptors);
-DBGFS_FUNC_DECL(requester_chan);
-
-static struct dentry *pxa_dma_dbg_alloc_chan(int ch, struct dentry *chandir)
-{
- char chan_name[11];
- struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
- struct dentry *chan_reqs = NULL;
- void *dt;
-
- scnprintf(chan_name, sizeof(chan_name), "%d", ch);
- chan = debugfs_create_dir(chan_name, chandir);
- dt = (void *)ch;
-
- if (chan)
- chan_state = debugfs_create_file("state", 0400, chan, dt,
- &dbg_fops_chan_state);
- if (chan_state)
- chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
- &dbg_fops_descriptors);
- if (chan_descr)
- chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
- &dbg_fops_requester_chan);
- if (!chan_reqs)
- goto err_state;
-
- return chan;
-
-err_state:
- debugfs_remove_recursive(chan);
- return NULL;
-}
-
-static void pxa_dma_init_debugfs(void)
-{
- int i;
- struct dentry *chandir;
-
- dbgfs_root = debugfs_create_dir(DMA_DEBUG_NAME, NULL);
- if (IS_ERR(dbgfs_root) || !dbgfs_root)
- goto err_root;
-
- dbgfs_state = debugfs_create_file("state", 0400, dbgfs_root, NULL,
- &dbg_fops_state);
- if (!dbgfs_state)
- goto err_state;
-
- dbgfs_chan = kmalloc(sizeof(*dbgfs_state) * num_dma_channels,
- GFP_KERNEL);
- if (!dbgfs_chan)
- goto err_alloc;
-
- chandir = debugfs_create_dir("channels", dbgfs_root);
- if (!chandir)
- goto err_chandir;
-
- for (i = 0; i < num_dma_channels; i++) {
- dbgfs_chan[i] = pxa_dma_dbg_alloc_chan(i, chandir);
- if (!dbgfs_chan[i])
- goto err_chans;
- }
-
- return;
-err_chans:
-err_chandir:
- kfree(dbgfs_chan);
-err_alloc:
-err_state:
- debugfs_remove_recursive(dbgfs_root);
-err_root:
- pr_err("pxa_dma: debugfs is not available\n");
-}
-
-static void __exit pxa_dma_cleanup_debugfs(void)
-{
- debugfs_remove_recursive(dbgfs_root);
-}
-#else
-static inline void pxa_dma_init_debugfs(void) {}
-static inline void pxa_dma_cleanup_debugfs(void) {}
-#endif
-
-int pxa_request_dma (char *name, pxa_dma_prio prio,
- void (*irq_handler)(int, void *),
- void *data)
-{
- unsigned long flags;
- int i, found = 0;
-
- /* basic sanity checks */
- if (!name || !irq_handler)
- return -EINVAL;
-
- local_irq_save(flags);
-
- do {
- /* try grabbing a DMA channel with the requested priority */
- for (i = 0; i < num_dma_channels; i++) {
- if ((dma_channels[i].prio == prio) &&
- !dma_channels[i].name &&
- !pxad_toggle_reserved_channel(i)) {
- found = 1;
- break;
- }
- }
- /* if requested prio group is full, try a hier priority */
- } while (!found && prio--);
-
- if (found) {
- DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
- dma_channels[i].name = name;
- dma_channels[i].irq_handler = irq_handler;
- dma_channels[i].data = data;
- } else {
- printk (KERN_WARNING "No more available DMA channels for %s\n", name);
- i = -ENODEV;
- }
-
- local_irq_restore(flags);
- return i;
-}
-EXPORT_SYMBOL(pxa_request_dma);
-
-void pxa_free_dma (int dma_ch)
-{
- unsigned long flags;
-
- if (!dma_channels[dma_ch].name) {
- printk (KERN_CRIT
- "%s: trying to free channel %d which is already freed\n",
- __func__, dma_ch);
- return;
- }
-
- local_irq_save(flags);
- DCSR(dma_ch) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
- dma_channels[dma_ch].name = NULL;
- pxad_toggle_reserved_channel(dma_ch);
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL(pxa_free_dma);
-
-static irqreturn_t dma_irq_handler(int irq, void *dev_id)
-{
- int i, dint = DINT, done = 0;
- struct dma_channel *channel;
-
- while (dint) {
- i = __ffs(dint);
- dint &= (dint - 1);
- channel = &dma_channels[i];
- if (channel->name && channel->irq_handler) {
- channel->irq_handler(i, channel->data);
- done++;
- }
- }
- if (done)
- return IRQ_HANDLED;
- else
- return IRQ_NONE;
-}
-
-int __init pxa_init_dma(int irq, int num_ch)
-{
- int i, ret;
-
- dma_channels = kzalloc(sizeof(struct dma_channel) * num_ch, GFP_KERNEL);
- if (dma_channels == NULL)
- return -ENOMEM;
-
- /* dma channel priorities on pxa2xx processors:
- * ch 0 - 3, 16 - 19 <--> (0) DMA_PRIO_HIGH
- * ch 4 - 7, 20 - 23 <--> (1) DMA_PRIO_MEDIUM
- * ch 8 - 15, 24 - 31 <--> (2) DMA_PRIO_LOW
- */
- for (i = 0; i < num_ch; i++) {
- DCSR(i) = 0;
- dma_channels[i].prio = min((i & 0xf) >> 2, DMA_PRIO_LOW);
- spin_lock_init(&dma_channels[i].lock);
- }
-
- ret = request_irq(irq, dma_irq_handler, IRQF_SHARED, "DMA",
- dma_channels);
- if (ret) {
- printk (KERN_CRIT "Wow! Can't register IRQ for DMA\n");
- kfree(dma_channels);
- return ret;
- }
- num_dma_channels = num_ch;
-
- pxa_dma_init_debugfs();
-
- return 0;
-}
diff --git a/arch/arm/plat-pxa/include/plat/dma.h b/arch/arm/plat-pxa/include/plat/dma.h
deleted file mode 100644
index ceba3e4184fc..000000000000
--- a/arch/arm/plat-pxa/include/plat/dma.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef __PLAT_DMA_H
-#define __PLAT_DMA_H
-
-#define DMAC_REG(x) (*((volatile u32 *)(DMAC_REGS_VIRT + (x))))
-
-#define DCSR(n) DMAC_REG((n) << 2)
-#define DALGN DMAC_REG(0x00a0) /* DMA Alignment Register */
-#define DINT DMAC_REG(0x00f0) /* DMA Interrupt Register */
-#define DDADR(n) DMAC_REG(0x0200 + ((n) << 4))
-#define DSADR(n) DMAC_REG(0x0204 + ((n) << 4))
-#define DTADR(n) DMAC_REG(0x0208 + ((n) << 4))
-#define DCMD(n) DMAC_REG(0x020c + ((n) << 4))
-#define DRCMR(n) DMAC_REG((((n) < 64) ? 0x0100 : 0x1100) + \
- (((n) & 0x3f) << 2))
-
-#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
-#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
-#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
-#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
-#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
-#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
-#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
-#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
-
-#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
-#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
-#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
-#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
-#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
-#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
-#define DCSR_EORINTR (1 << 9) /* The end of Receive */
-
-#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
-#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
-
-#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
-#define DDADR_STOP (1 << 0) /* Stop (read / write) */
-
-#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
-#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
-#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
-#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
-#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
-#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
-#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
-#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
-#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
-#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
-#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
-#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
-#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
-#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
-
-/*
- * Descriptor structure for PXA's DMA engine
- * Note: this structure must always be aligned to a 16-byte boundary.
- */
-
-typedef struct pxa_dma_desc {
- volatile u32 ddadr; /* Points to the next descriptor + flags */
- volatile u32 dsadr; /* DSADR value for the current transfer */
- volatile u32 dtadr; /* DTADR value for the current transfer */
- volatile u32 dcmd; /* DCMD value for the current transfer */
-} pxa_dma_desc;
-
-typedef enum {
- DMA_PRIO_HIGH = 0,
- DMA_PRIO_MEDIUM = 1,
- DMA_PRIO_LOW = 2
-} pxa_dma_prio;
-
-/*
- * DMA registration
- */
-
-int __init pxa_init_dma(int irq, int num_ch);
-
-int pxa_request_dma (char *name,
- pxa_dma_prio prio,
- void (*irq_handler)(int, void *),
- void *data);
-
-void pxa_free_dma (int dma_ch);
-
-/*
- * Cooperation with pxa_dma + dmaengine while there remains at least one pxa
- * driver not converted to dmaengine.
- */
-#if defined(CONFIG_PXA_DMA)
-extern int pxad_toggle_reserved_channel(int legacy_channel);
-#else
-static inline int pxad_toggle_reserved_channel(int legacy_channel)
-{
- return 0;
-}
-#endif
-
-extern void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors);
-
-#endif /* __PLAT_DMA_H */
diff --git a/arch/arm/plat-samsung/include/plat/map-s5p.h b/arch/arm/plat-samsung/include/plat/map-s5p.h
index b63aeebb93f3..0fe2828f9354 100644
--- a/arch/arm/plat-samsung/include/plat/map-s5p.h
+++ b/arch/arm/plat-samsung/include/plat/map-s5p.h
@@ -14,10 +14,6 @@
#define __ASM_PLAT_MAP_S5P_H __FILE__
#define S5P_VA_CHIPID S3C_ADDR(0x02000000)
-#define S5P_VA_CMU S3C_ADDR(0x02100000)
-
-#define S5P_VA_DMC0 S3C_ADDR(0x02440000)
-#define S5P_VA_DMC1 S3C_ADDR(0x02480000)
#define S5P_VA_COREPERI_BASE S3C_ADDR(0x02800000)
#define S5P_VA_COREPERI(x) (S5P_VA_COREPERI_BASE + (x))
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 17c14a1d9112..30398dbc940a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -4,19 +4,19 @@ config ARM64
select ACPI_GENERIC_GSI if ACPI
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
select ACPI_MCFG if ACPI
+ select ACPI_SPCR_TABLE if ACPI
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
- select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING
- select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
select ARCH_HAS_UBSAN_SANITIZE_ALL
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index a8c77c72a831..cfbdf02ef566 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -3,6 +3,8 @@ menu "Platform selection"
config ARCH_SUNXI
bool "Allwinner sunxi 64-bit SoC Family"
select GENERIC_IRQ_CHIP
+ select PINCTRL
+ select PINCTRL_SUN50I_A64
help
This enables support for Allwinner sunxi based SoCs like the A64.
@@ -15,8 +17,8 @@ config ARCH_ALPINE
config ARCH_BCM2835
bool "Broadcom BCM2835 family"
- select ARCH_REQUIRE_GPIOLIB
select CLKSRC_OF
+ select GPIOLIB
select PINCTRL
select PINCTRL_BCM2835
select ARM_AMBA
@@ -29,19 +31,26 @@ config ARCH_BCM2835
config ARCH_BCM_IPROC
bool "Broadcom iProc SoC Family"
select COMMON_CLK_IPROC
+ select GPIOLIB
select PINCTRL
- select ARCH_REQUIRE_GPIOLIB
help
This enables support for Broadcom iProc based SoCs
config ARCH_BERLIN
bool "Marvell Berlin SoC Family"
- select ARCH_REQUIRE_GPIOLIB
select DW_APB_ICTL
+ select GPIOLIB
select PINCTRL
help
This enables support for Marvell Berlin SoC Family
+config ARCH_BRCMSTB
+ bool "Broadcom Set-Top-Box SoCs"
+ select BRCMSTB_L2_IRQ
+ select GENERIC_IRQ_CHIP
+ help
+ This enables support for Broadcom's ARMv8 Set Top Box SoCs
+
config ARCH_EXYNOS
bool "ARMv8 based Samsung Exynos SoC family"
select COMMON_CLK_SAMSUNG
@@ -55,6 +64,7 @@ config ARCH_EXYNOS
config ARCH_LAYERSCAPE
bool "ARMv8 based Freescale Layerscape SoC family"
+ select EDAC_SUPPORT
help
This enables support for the Freescale Layerscape SoC family.
@@ -67,6 +77,7 @@ config ARCH_HISI
bool "Hisilicon SoC Family"
select ARM_TIMER_SP804
select HISILICON_IRQ_MBIGEN if PCI
+ select PINCTRL
help
This enables support for Hisilicon ARMv8 SoC family
@@ -109,7 +120,7 @@ config ARCH_QCOM
config ARCH_ROCKCHIP
bool "Rockchip Platforms"
select ARCH_HAS_RESET_CONTROLLER
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select PINCTRL
select PINCTRL_ROCKCHIP
select ROCKCHIP_TIMER
@@ -155,12 +166,14 @@ config ARCH_STRATIX10
config ARCH_TEGRA
bool "NVIDIA Tegra SoC Family"
select ARCH_HAS_RESET_CONTROLLER
- select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
select CLKSRC_MMIO
select CLKSRC_OF
select GENERIC_CLOCKEVENTS
+ select GPIOLIB
select PINCTRL
+ select PM
+ select PM_GENERIC_DOMAINS
select RESET_CONTROLLER
help
This enables support for the NVIDIA Tegra SoC family.
@@ -183,8 +196,8 @@ config ARCH_UNIPHIER
config ARCH_VEXPRESS
bool "ARMv8 software model (Versatile Express)"
- select ARCH_REQUIRE_GPIOLIB
select COMMON_CLK_VERSATILE
+ select GPIOLIB
select PM
select PM_GENERIC_DOMAINS
select POWER_RESET_VEXPRESS
@@ -204,6 +217,11 @@ config ARCH_XGENE
help
This enables support for AppliedMicro X-Gene SOC Family
+config ARCH_ZX
+ bool "ZTE ZX SoC Family"
+ help
+ This enables support for ZTE ZX SoC Family
+
config ARCH_ZYNQMP
bool "Xilinx ZynqMP Family"
help
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 6e199c903676..6684f97c2722 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -19,6 +19,7 @@ dts-dirs += socionext
dts-dirs += sprd
dts-dirs += xilinx
dts-dirs += lg
+dts-dirs += zte
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 90a84c514d3d..e6e3491d48a5 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -87,3 +87,14 @@
pinctrl-names = "default";
};
+&ir {
+ status = "okay";
+ pinctrl-0 = <&remote_input_ao_pins>;
+ pinctrl-names = "default";
+};
+
+&i2c_A {
+ status = "okay";
+ pinctrl-0 = <&i2c_a_pins>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts
index 62979076e250..03e3d76626dd 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts
@@ -50,3 +50,9 @@
compatible = "amlogic,p200", "amlogic,meson-gxbb";
model = "Amlogic Meson GXBB P200 Development Board";
};
+
+&i2c_B {
+ status = "okay";
+ pinctrl-0 = <&i2c_b_pins>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index f4f30f674b4c..06a34dc6002f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -57,6 +57,19 @@
device_type = "memory";
reg = <0x0 0x0 0x0 0x40000000>;
};
+
+ usb_pwr: regulator-usb-pwrs {
+ compatible = "regulator-fixed";
+
+ regulator-name = "USB_PWR";
+
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ /* signal name in schematic: USB_PWR_EN */
+ gpio = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
};
/* This UART is brought out to the DB9 connector */
@@ -72,3 +85,25 @@
pinctrl-names = "default";
};
+&ir {
+ status = "okay";
+ pinctrl-0 = <&remote_input_ao_pins>;
+ pinctrl-names = "default";
+};
+
+&usb0_phy {
+ status = "okay";
+ phy-supply = <&usb_pwr>;
+};
+
+&usb1_phy {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
+
+&usb1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 54bb7c739089..73f159370188 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -52,6 +52,19 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ usb_vbus: regulator-usb0-vbus {
+ compatible = "regulator-fixed";
+
+ regulator-name = "USB0_VBUS";
+
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ gpio = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
};
&uart_AO {
@@ -60,3 +73,32 @@
pinctrl-names = "default";
};
+
+&ir {
+ status = "okay";
+ pinctrl-0 = <&remote_input_ao_pins>;
+ pinctrl-names = "default";
+};
+
+&ethmac {
+ status = "okay";
+ pinctrl-0 = <&eth_pins>;
+ pinctrl-names = "default";
+};
+
+&usb0_phy {
+ status = "okay";
+ phy-supply = <&usb_vbus>;
+};
+
+&usb1_phy {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
+
+&usb1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index bf6c8d051002..610e0e1c3cee 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -45,6 +45,9 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/gpio/meson-gxbb-gpio.h>
#include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
+#include <dt-bindings/clock/gxbb-clkc.h>
+#include <dt-bindings/clock/gxbb-aoclkc.h>
+#include <dt-bindings/reset/gxbb-aoclkc.h>
/ {
compatible = "amlogic,meson-gxbb";
@@ -99,6 +102,30 @@
method = "smc";
};
+ firmware {
+ sm: secure-monitor {
+ compatible = "amlogic,meson-gxbb-sm";
+ };
+ };
+
+ efuse: efuse {
+ compatible = "amlogic,meson-gxbb-efuse";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sn: sn@14 {
+ reg = <0x14 0x10>;
+ };
+
+ eth_mac: eth_mac@34 {
+ reg = <0x34 0x10>;
+ };
+
+ bid: bid@46 {
+ reg = <0x46 0x30>;
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13
@@ -124,6 +151,25 @@
#size-cells = <2>;
ranges;
+ usb0_phy: phy@c0000000 {
+ compatible = "amlogic,meson-gxbb-usb2-phy";
+ #phy-cells = <0>;
+ reg = <0x0 0xc0000000 0x0 0x20>;
+ resets = <&reset RESET_USB_OTG>;
+ clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB0>;
+ clock-names = "usb_general", "usb";
+ status = "disabled";
+ };
+
+ usb1_phy: phy@c0000020 {
+ compatible = "amlogic,meson-gxbb-usb2-phy";
+ #phy-cells = <0>;
+ reg = <0x0 0xc0000020 0x0 0x20>;
+ clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB1>;
+ clock-names = "usb_general", "usb";
+ status = "disabled";
+ };
+
cbus: cbus@c1100000 {
compatible = "simple-bus";
reg = <0x0 0xc1100000 0x0 0x100000>;
@@ -153,6 +199,27 @@
status = "disabled";
};
+ pwm_ab: pwm@8550 {
+ compatible = "amlogic,meson-gxbb-pwm";
+ reg = <0x0 0x08550 0x0 0x10>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm_cd: pwm@8650 {
+ compatible = "amlogic,meson-gxbb-pwm";
+ reg = <0x0 0x08650 0x0 0x10>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm_ef: pwm@86c0 {
+ compatible = "amlogic,meson-gxbb-pwm";
+ reg = <0x0 0x086c0 0x0 0x10>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
uart_C: serial@8700 {
compatible = "amlogic,meson-uart";
reg = <0x0 0x8700 0x0 0x14>;
@@ -160,6 +227,51 @@
clocks = <&xtal>;
status = "disabled";
};
+
+ watchdog@98d0 {
+ compatible = "amlogic,meson-gxbb-wdt";
+ reg = <0x0 0x098d0 0x0 0x10>;
+ clocks = <&xtal>;
+ };
+
+ spifc: spi@8c80 {
+ compatible = "amlogic,meson-gxbb-spifc";
+ reg = <0x0 0x08c80 0x0 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkc CLKID_SPI>;
+ status = "disabled";
+ };
+
+ i2c_A: i2c@8500 {
+ compatible = "amlogic,meson-gxbb-i2c";
+ reg = <0x0 0x08500 0x0 0x20>;
+ interrupts = <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc CLKID_I2C>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c_B: i2c@87c0 {
+ compatible = "amlogic,meson-gxbb-i2c";
+ reg = <0x0 0x087c0 0x0 0x20>;
+ interrupts = <GIC_SPI 214 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc CLKID_I2C>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c_C: i2c@87e0 {
+ compatible = "amlogic,meson-gxbb-i2c";
+ reg = <0x0 0x087e0 0x0 0x20>;
+ interrupts = <GIC_SPI 215 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc CLKID_I2C>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
};
gic: interrupt-controller@c4301000 {
@@ -203,6 +315,56 @@
function = "uart_ao";
};
};
+
+ remote_input_ao_pins: remote_input_ao {
+ mux {
+ groups = "remote_input_ao";
+ function = "remote_input_ao";
+ };
+ };
+
+ i2c_ao_pins: i2c_ao {
+ mux {
+ groups = "i2c_sck_ao",
+ "i2c_sda_ao";
+ function = "i2c_ao";
+ };
+ };
+
+ pwm_ao_a_3_pins: pwm_ao_a_3 {
+ mux {
+ groups = "pwm_ao_a_3";
+ function = "pwm_ao_a_3";
+ };
+ };
+
+ pwm_ao_a_6_pins: pwm_ao_a_6 {
+ mux {
+ groups = "pwm_ao_a_6";
+ function = "pwm_ao_a_6";
+ };
+ };
+
+ pwm_ao_a_12_pins: pwm_ao_a_12 {
+ mux {
+ groups = "pwm_ao_a_12";
+ function = "pwm_ao_a_12";
+ };
+ };
+
+ pwm_ao_b_pins: pwm_ao_b {
+ mux {
+ groups = "pwm_ao_b";
+ function = "pwm_ao_b";
+ };
+ };
+ };
+
+ clkc_AO: clock-controller@040 {
+ compatible = "amlogic,gxbb-aoclkc";
+ reg = <0x0 0x00040 0x0 0x4>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
};
uart_AO: serial@4c0 {
@@ -212,6 +374,30 @@
clocks = <&xtal>;
status = "disabled";
};
+
+ ir: ir@580 {
+ compatible = "amlogic,meson-gxbb-ir";
+ reg = <0x0 0x00580 0x0 0x40>;
+ interrupts = <GIC_SPI 196 IRQ_TYPE_EDGE_RISING>;
+ status = "disabled";
+ };
+
+ pwm_ab_AO: pwm@550 {
+ compatible = "amlogic,meson-gxbb-pwm";
+ reg = <0x0 0x0550 0x0 0x10>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ i2c_AO: i2c@500 {
+ compatible = "amlogic,meson-gxbb-i2c";
+ reg = <0x0 0x500 0x0 0x20>;
+ interrupts = <GIC_SPI 195 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc CLKID_AO_I2C>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
};
periphs: periphs@c8834000 {
@@ -251,6 +437,16 @@
};
};
+ nor_pins: nor {
+ mux {
+ groups = "nor_d",
+ "nor_q",
+ "nor_c",
+ "nor_cs";
+ function = "nor";
+ };
+ };
+
sdcard_pins: sdcard {
mux {
groups = "sdcard_d0",
@@ -263,6 +459,25 @@
};
};
+ sdio_pins: sdio {
+ mux {
+ groups = "sdio_d0",
+ "sdio_d1",
+ "sdio_d2",
+ "sdio_d3",
+ "sdio_cmd",
+ "sdio_clk";
+ function = "sdio";
+ };
+ };
+
+ sdio_irq_pins: sdio_irq {
+ mux {
+ groups = "sdio_irq";
+ function = "sdio";
+ };
+ };
+
uart_a_pins: uart_a {
mux {
groups = "uart_tx_a",
@@ -287,6 +502,30 @@
};
};
+ i2c_a_pins: i2c_a {
+ mux {
+ groups = "i2c_sck_a",
+ "i2c_sda_a";
+ function = "i2c_a";
+ };
+ };
+
+ i2c_b_pins: i2c_b {
+ mux {
+ groups = "i2c_sck_b",
+ "i2c_sda_b";
+ function = "i2c_b";
+ };
+ };
+
+ i2c_c_pins: i2c_c {
+ mux {
+ groups = "i2c_sck_c",
+ "i2c_sda_c";
+ function = "i2c_c";
+ };
+ };
+
eth_pins: eth_c {
mux {
groups = "eth_mdio",
@@ -306,6 +545,55 @@
function = "eth";
};
};
+
+ pwm_a_x_pins: pwm_a_x {
+ mux {
+ groups = "pwm_a_x";
+ function = "pwm_a_x";
+ };
+ };
+
+ pwm_a_y_pins: pwm_a_y {
+ mux {
+ groups = "pwm_a_y";
+ function = "pwm_a_y";
+ };
+ };
+
+ pwm_b_pins: pwm_b {
+ mux {
+ groups = "pwm_b";
+ function = "pwm_b";
+ };
+ };
+
+ pwm_d_pins: pwm_d {
+ mux {
+ groups = "pwm_d";
+ function = "pwm_d";
+ };
+ };
+
+ pwm_e_pins: pwm_e {
+ mux {
+ groups = "pwm_e";
+ function = "pwm_e";
+ };
+ };
+
+ pwm_f_x_pins: pwm_f_x {
+ mux {
+ groups = "pwm_f_x";
+ function = "pwm_f_x";
+ };
+ };
+
+ pwm_f_y_pins: pwm_f_y {
+ mux {
+ groups = "pwm_f_y";
+ function = "pwm_f_y";
+ };
+ };
};
};
@@ -321,6 +609,15 @@
#clock-cells = <1>;
reg = <0x0 0x0 0x0 0x3db>;
};
+
+ mailbox: mailbox@404 {
+ compatible = "amlogic,meson-gxbb-mhu";
+ reg = <0 0x404 0 0x4c>;
+ interrupts = <0 208 IRQ_TYPE_EDGE_RISING>,
+ <0 209 IRQ_TYPE_EDGE_RISING>,
+ <0 210 IRQ_TYPE_EDGE_RISING>;
+ #mbox-cells = <1>;
+ };
};
apb: apb@d0000000 {
@@ -331,14 +628,40 @@
ranges = <0x0 0x0 0x0 0xd0000000 0x0 0x200000>;
};
+ usb0: usb@c9000000 {
+ compatible = "amlogic,meson-gxbb-usb", "snps,dwc2";
+ reg = <0x0 0xc9000000 0x0 0x40000>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc CLKID_USB0_DDR_BRIDGE>;
+ clock-names = "otg";
+ phys = <&usb0_phy>;
+ phy-names = "usb2-phy";
+ dr_mode = "host";
+ status = "disabled";
+ };
+
+ usb1: usb@c9100000 {
+ compatible = "amlogic,meson-gxbb-usb", "snps,dwc2";
+ reg = <0x0 0xc9100000 0x0 0x40000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
+ clock-names = "otg";
+ phys = <&usb1_phy>;
+ phy-names = "usb2-phy";
+ dr_mode = "host";
+ status = "disabled";
+ };
+
ethmac: ethernet@c9410000 {
- compatible = "amlogic,meson6-dwmac", "snps,dwmac";
+ compatible = "amlogic,meson-gxbb-dwmac", "snps,dwmac";
reg = <0x0 0xc9410000 0x0 0x10000
0x0 0xc8834540 0x0 0x4>;
interrupts = <0 8 1>;
interrupt-names = "macirq";
- clocks = <&xtal>;
- clock-names = "stmmaceth";
+ clocks = <&clkc CLKID_ETH>,
+ <&clkc CLKID_FCLK_DIV2>,
+ <&clkc CLKID_MPLL2>;
+ clock-names = "stmmaceth", "clkin0", "clkin1";
phy-mode = "rgmii";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts
index b7fb5d9295c2..32a961c5e98a 100644
--- a/arch/arm64/boot/dts/apm/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm/apm-mustang.dts
@@ -74,6 +74,7 @@
&xgenet {
status = "ok";
+ rxlos-gpios = <&sbgpio 12 1>;
};
&mmc0 {
diff --git a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
index 1425ed41620c..72720e9132a1 100644
--- a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
@@ -26,6 +26,8 @@
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
next-level-cache = <&xgene_L2_0>;
+ #clock-cells = <1>;
+ clocks = <&pmd0clk 0>;
};
cpu@001 {
device_type = "cpu";
@@ -34,6 +36,8 @@
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
next-level-cache = <&xgene_L2_0>;
+ #clock-cells = <1>;
+ clocks = <&pmd0clk 0>;
};
cpu@100 {
device_type = "cpu";
@@ -42,6 +46,8 @@
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
next-level-cache = <&xgene_L2_1>;
+ #clock-cells = <1>;
+ clocks = <&pmd1clk 0>;
};
cpu@101 {
device_type = "cpu";
@@ -50,6 +56,8 @@
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
next-level-cache = <&xgene_L2_1>;
+ #clock-cells = <1>;
+ clocks = <&pmd1clk 0>;
};
cpu@200 {
device_type = "cpu";
@@ -58,6 +66,8 @@
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
next-level-cache = <&xgene_L2_2>;
+ #clock-cells = <1>;
+ clocks = <&pmd2clk 0>;
};
cpu@201 {
device_type = "cpu";
@@ -66,6 +76,8 @@
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
next-level-cache = <&xgene_L2_2>;
+ #clock-cells = <1>;
+ clocks = <&pmd2clk 0>;
};
cpu@300 {
device_type = "cpu";
@@ -74,6 +86,8 @@
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
next-level-cache = <&xgene_L2_3>;
+ #clock-cells = <1>;
+ clocks = <&pmd3clk 0>;
};
cpu@301 {
device_type = "cpu";
@@ -82,6 +96,8 @@
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
next-level-cache = <&xgene_L2_3>;
+ #clock-cells = <1>;
+ clocks = <&pmd3clk 0>;
};
xgene_L2_0: l2-cache-0 {
compatible = "cache";
@@ -223,6 +239,46 @@
clock-output-names = "refclk";
};
+ pmdpll: pmdpll@170000f0 {
+ compatible = "apm,xgene-pcppll-v2-clock";
+ #clock-cells = <1>;
+ clocks = <&refclk 0>;
+ reg = <0x0 0x170000f0 0x0 0x10>;
+ clock-output-names = "pmdpll";
+ };
+
+ pmd0clk: pmd0clk@7e200200 {
+ compatible = "apm,xgene-pmd-clock";
+ #clock-cells = <1>;
+ clocks = <&pmdpll 0>;
+ reg = <0x0 0x7e200200 0x0 0x10>;
+ clock-output-names = "pmd0clk";
+ };
+
+ pmd1clk: pmd1clk@7e200210 {
+ compatible = "apm,xgene-pmd-clock";
+ #clock-cells = <1>;
+ clocks = <&pmdpll 0>;
+ reg = <0x0 0x7e200210 0x0 0x10>;
+ clock-output-names = "pmd1clk";
+ };
+
+ pmd2clk: pmd2clk@7e200220 {
+ compatible = "apm,xgene-pmd-clock";
+ #clock-cells = <1>;
+ clocks = <&pmdpll 0>;
+ reg = <0x0 0x7e200220 0x0 0x10>;
+ clock-output-names = "pmd2clk";
+ };
+
+ pmd3clk: pmd3clk@7e200230 {
+ compatible = "apm,xgene-pmd-clock";
+ #clock-cells = <1>;
+ clocks = <&pmdpll 0>;
+ reg = <0x0 0x7e200230 0x0 0x10>;
+ clock-output-names = "pmd3clk";
+ };
+
socpll: socpll@17000120 {
compatible = "apm,xgene-socpll-v2-clock";
#clock-cells = <1>;
@@ -453,6 +509,64 @@
};
};
+ pmu: pmu@78810000 {
+ compatible = "apm,xgene-pmu-v2";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ regmap-csw = <&csw>;
+ regmap-mcba = <&mcba>;
+ regmap-mcbb = <&mcbb>;
+ reg = <0x0 0x78810000 0x0 0x1000>;
+ interrupts = <0x0 0x22 0x4>;
+
+ pmul3c@7e610000 {
+ compatible = "apm,xgene-pmu-l3c";
+ reg = <0x0 0x7e610000 0x0 0x1000>;
+ };
+
+ pmuiob@7e940000 {
+ compatible = "apm,xgene-pmu-iob";
+ reg = <0x0 0x7e940000 0x0 0x1000>;
+ };
+
+ pmucmcb@7e710000 {
+ compatible = "apm,xgene-pmu-mcb";
+ reg = <0x0 0x7e710000 0x0 0x1000>;
+ enable-bit-index = <0>;
+ };
+
+ pmucmcb@7e730000 {
+ compatible = "apm,xgene-pmu-mcb";
+ reg = <0x0 0x7e730000 0x0 0x1000>;
+ enable-bit-index = <1>;
+ };
+
+ pmucmc@7e810000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e810000 0x0 0x1000>;
+ enable-bit-index = <0>;
+ };
+
+ pmucmc@7e850000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e850000 0x0 0x1000>;
+ enable-bit-index = <1>;
+ };
+
+ pmucmc@7e890000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e890000 0x0 0x1000>;
+ enable-bit-index = <2>;
+ };
+
+ pmucmc@7e8d0000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e8d0000 0x0 0x1000>;
+ enable-bit-index = <3>;
+ };
+ };
+
mailbox: mailbox@10540000 {
compatible = "apm,xgene-slimpro-mbox";
reg = <0x0 0x10540000 0x0 0x8000>;
@@ -472,6 +586,11 @@
mboxes = <&mailbox 0>;
};
+ hwmonslimpro {
+ compatible = "apm,xgene-slimpro-hwmon";
+ mboxes = <&mailbox 7>;
+ };
+
serial0: serial@10600000 {
device_type = "serial";
compatible = "ns16550";
@@ -508,10 +627,10 @@
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
- interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 0x0 0x10 0x1
- 0x0 0x0 0x0 0x2 &gic 0x0 0x0 0x0 0x11 0x1
- 0x0 0x0 0x0 0x3 &gic 0x0 0x0 0x0 0x12 0x1
- 0x0 0x0 0x0 0x4 &gic 0x0 0x0 0x0 0x13 0x1>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 0x0 0x10 0x4
+ 0x0 0x0 0x0 0x2 &gic 0x0 0x0 0x0 0x11 0x4
+ 0x0 0x0 0x0 0x3 &gic 0x0 0x0 0x0 0x12 0x4
+ 0x0 0x0 0x0 0x4 &gic 0x0 0x0 0x0 0x13 0x4>;
dma-coherent;
clocks = <&pcie0clk 0>;
msi-parent = <&v2m0>;
@@ -533,10 +652,10 @@
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
- interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 0x0 0x16 0x1
- 0x0 0x0 0x0 0x2 &gic 0x0 0x0 0x0 0x17 0x1
- 0x0 0x0 0x0 0x3 &gic 0x0 0x0 0x0 0x18 0x1
- 0x0 0x0 0x0 0x4 &gic 0x0 0x0 0x0 0x19 0x1>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 0x0 0x16 0x4
+ 0x0 0x0 0x0 0x2 &gic 0x0 0x0 0x0 0x17 0x4
+ 0x0 0x0 0x0 0x3 &gic 0x0 0x0 0x0 0x18 0x4
+ 0x0 0x0 0x0 0x4 &gic 0x0 0x0 0x0 0x19 0x4>;
dma-coherent;
clocks = <&pcie1clk 0>;
msi-parent = <&v2m0>;
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index c29dab9d1834..63be8e51eaa8 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -553,6 +553,64 @@
};
};
+ pmu: pmu@78810000 {
+ compatible = "apm,xgene-pmu-v2";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ regmap-csw = <&csw>;
+ regmap-mcba = <&mcba>;
+ regmap-mcbb = <&mcbb>;
+ reg = <0x0 0x78810000 0x0 0x1000>;
+ interrupts = <0x0 0x22 0x4>;
+
+ pmul3c@7e610000 {
+ compatible = "apm,xgene-pmu-l3c";
+ reg = <0x0 0x7e610000 0x0 0x1000>;
+ };
+
+ pmuiob@7e940000 {
+ compatible = "apm,xgene-pmu-iob";
+ reg = <0x0 0x7e940000 0x0 0x1000>;
+ };
+
+ pmucmcb@7e710000 {
+ compatible = "apm,xgene-pmu-mcb";
+ reg = <0x0 0x7e710000 0x0 0x1000>;
+ enable-bit-index = <0>;
+ };
+
+ pmucmcb@7e730000 {
+ compatible = "apm,xgene-pmu-mcb";
+ reg = <0x0 0x7e730000 0x0 0x1000>;
+ enable-bit-index = <1>;
+ };
+
+ pmucmc@7e810000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e810000 0x0 0x1000>;
+ enable-bit-index = <0>;
+ };
+
+ pmucmc@7e850000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e850000 0x0 0x1000>;
+ enable-bit-index = <1>;
+ };
+
+ pmucmc@7e890000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e890000 0x0 0x1000>;
+ enable-bit-index = <2>;
+ };
+
+ pmucmc@7e8d0000 {
+ compatible = "apm,xgene-pmu-mc";
+ reg = <0x0 0x7e8d0000 0x0 0x1000>;
+ enable-bit-index = <3>;
+ };
+ };
+
pcie0: pcie@1f2b0000 {
status = "disabled";
device_type = "pci";
@@ -569,10 +627,10 @@
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
- interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1
- 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1
- 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1
- 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x4
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x4
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x4
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x4>;
dma-coherent;
clocks = <&pcie0clk 0>;
msi-parent = <&msi>;
@@ -594,10 +652,10 @@
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
- interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc8 0x1
- 0x0 0x0 0x0 0x2 &gic 0x0 0xc9 0x1
- 0x0 0x0 0x0 0x3 &gic 0x0 0xca 0x1
- 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc8 0x4
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xc9 0x4
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xca 0x4
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x4>;
dma-coherent;
clocks = <&pcie1clk 0>;
msi-parent = <&msi>;
@@ -619,10 +677,10 @@
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
- interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xce 0x1
- 0x0 0x0 0x0 0x2 &gic 0x0 0xcf 0x1
- 0x0 0x0 0x0 0x3 &gic 0x0 0xd0 0x1
- 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xce 0x4
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xcf 0x4
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xd0 0x4
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x4>;
dma-coherent;
clocks = <&pcie2clk 0>;
msi-parent = <&msi>;
@@ -644,10 +702,10 @@
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
- interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xd4 0x1
- 0x0 0x0 0x0 0x2 &gic 0x0 0xd5 0x1
- 0x0 0x0 0x0 0x3 &gic 0x0 0xd6 0x1
- 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xd4 0x4
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xd5 0x4
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xd6 0x4
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x4>;
dma-coherent;
clocks = <&pcie3clk 0>;
msi-parent = <&msi>;
@@ -669,10 +727,10 @@
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
- interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xda 0x1
- 0x0 0x0 0x0 0x2 &gic 0x0 0xdb 0x1
- 0x0 0x0 0x0 0x3 &gic 0x0 0xdc 0x1
- 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xda 0x4
+ 0x0 0x0 0x0 0x2 &gic 0x0 0xdb 0x4
+ 0x0 0x0 0x0 0x3 &gic 0x0 0xdc 0x4
+ 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x4>;
dma-coherent;
clocks = <&pcie4clk 0>;
msi-parent = <&msi>;
@@ -697,6 +755,11 @@
mboxes = <&mailbox 0>;
};
+ hwmonslimpro {
+ compatible = "apm,xgene-slimpro-hwmon";
+ mboxes = <&mailbox 7>;
+ };
+
serial0: serial@1c020000 {
status = "disabled";
device_type = "serial";
@@ -923,7 +986,7 @@
/* mac address will be overwritten by the bootloader */
local-mac-address = [00 00 00 00 00 00];
phy-connection-type = "rgmii";
- phy-handle = <&menet0phy>,<&menetphy>;
+ phy-handle = <&menetphy>,<&menet0phy>;
mdio {
compatible = "apm,xgene-mdio";
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index d4a12fad8afd..d95dc408629a 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -299,6 +299,14 @@
};
};
+ pwm: pwm@66010000 {
+ compatible = "brcm,iproc-pwm";
+ reg = <0x66010000 0x28>;
+ clocks = <&osc>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
mdio_mux_iproc: mdio-mux@6602023c {
compatible = "brcm,mdio-mux-iproc";
reg = <0x6602023c 0x14>;
diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
index 162831546e18..6328a66ed97e 100644
--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
@@ -10,6 +10,7 @@
*/
#include <dt-bindings/clock/exynos7-clk.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
compatible = "samsung,exynos7";
@@ -473,10 +474,14 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 0xff08>,
- <1 14 0xff08>,
- <1 11 0xff08>,
- <1 10 0xff08>;
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
};
pmu_system_controller: system-controller@105c0000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
index 9d3e9fe1c87c..dd9e91941df4 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
@@ -56,12 +56,14 @@
gpio1 = &gpio2;
gpio2 = &gpio3;
gpio3 = &gpio4;
- serial0 = &lpuart0;
- serial1 = &lpuart1;
- serial2 = &lpuart2;
- serial3 = &lpuart3;
- serial4 = &lpuart4;
- serial5 = &lpuart5;
+ serial0 = &duart0;
+ serial1 = &duart1;
+ serial2 = &duart2;
+ serial3 = &duart3;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
index 40846319be69..d2313e05fd22 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
@@ -52,6 +52,14 @@
aliases {
crypto = &crypto;
+ serial0 = &duart0;
+ serial1 = &duart1;
+ serial2 = &duart2;
+ serial3 = &duart3;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index a67e210e2019..58635f7f4668 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -247,6 +247,13 @@
bus-width = <4>;
};
+ ddr: memory-controller@1080000 {
+ compatible = "fsl,qoriq-memory-controller";
+ reg = <0x0 0x1080000 0x0 0x1000>;
+ interrupts = <0 144 0x4>;
+ big-endian;
+ };
+
dspi0: dspi@2100000 {
compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi";
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
index e8801faca996..b0dd010979e7 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
@@ -57,6 +57,9 @@
serial1 = &serial1;
};
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
};
&esdhc {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
index e127f0baab19..ad0ebb8a1949 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
@@ -56,6 +56,10 @@
serial0 = &serial0;
serial1 = &serial1;
};
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
};
&esdhc {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index e3b6034ea5d9..d1059765dfee 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -588,6 +588,7 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
+ dma-coherent;
num-lanes = <4>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -612,6 +613,7 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
+ dma-coherent;
num-lanes = <4>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -636,6 +638,7 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
+ dma-coherent;
num-lanes = <8>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -660,6 +663,7 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
+ dma-coherent;
num-lanes = <4>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -715,4 +719,18 @@
interrupts = <0 12 4>;
};
};
+
+ ddr1: memory-controller@1080000 {
+ compatible = "fsl,qoriq-memory-controller";
+ reg = <0x0 0x1080000 0x0 0x1000>;
+ interrupts = <0 17 0x4>;
+ little-endian;
+ };
+
+ ddr2: memory-controller@1090000 {
+ compatible = "fsl,qoriq-memory-controller";
+ reg = <0x0 0x1090000 0x0 0x1000>;
+ interrupts = <0 18 0x4>;
+ little-endian;
+ };
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 593c7e43de79..dba3c131c62c 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -29,16 +29,56 @@
* Reserve below regions from memory node:
*
* 0x05e0,0000 - 0x05ef,ffff: MCU firmware runtime using
+ * 0x05f0,1000 - 0x05f0,1fff: Reboot reason
* 0x06df,f000 - 0x06df,ffff: Mailbox message data
* 0x0740,f000 - 0x0740,ffff: MCU firmware section
+ * 0x21f0,0000 - 0x21ff,ffff: pstore/ramoops buffer
* 0x3e00,0000 - 0x3fff,ffff: OP-TEE
*/
memory@0 {
device_type = "memory";
reg = <0x00000000 0x00000000 0x00000000 0x05e00000>,
- <0x00000000 0x05f00000 0x00000000 0x00eff000>,
+ <0x00000000 0x05f00000 0x00000000 0x00001000>,
+ <0x00000000 0x05f02000 0x00000000 0x00efd000>,
<0x00000000 0x06e00000 0x00000000 0x0060f000>,
- <0x00000000 0x07410000 0x00000000 0x36bf0000>;
+ <0x00000000 0x07410000 0x00000000 0x1aaf0000>,
+ <0x00000000 0x22000000 0x00000000 0x1c000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ramoops@0x21f00000 {
+ compatible = "ramoops";
+ reg = <0x0 0x21f00000 0x0 0x00100000>;
+ record-size = <0x00020000>;
+ console-size = <0x00020000>;
+ ftrace-size = <0x00020000>;
+ };
+
+ /* global autoconfigured region for contiguous allocations */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x00000000 0x08000000>;
+ linux,cma-default;
+ };
+ };
+
+ reboot-mode-syscon@5f01000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x0 0x05f01000 0x0 0x00001000>;
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x0>;
+
+ mode-normal = <0x77665501>;
+ mode-bootloader = <0x77665500>;
+ mode-recovery = <0x77665502>;
+ };
};
soc {
@@ -55,6 +95,8 @@
};
uart1: uart@f7111000 {
+ assigned-clocks = <&sys_ctrl HI6220_UART1_SRC>;
+ assigned-clock-rates = <150000000>;
status = "ok";
};
@@ -372,3 +414,43 @@
&uart3 {
label = "LS-UART1";
};
+
+&ade {
+ status = "ok";
+};
+
+&dsi {
+ status = "ok";
+
+ ports {
+ /* 1 for output port */
+ port@1 {
+ reg = <1>;
+
+ dsi_out0: endpoint@0 {
+ remote-endpoint = <&adv7533_in>;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "ok";
+
+ adv7533: adv7533@39 {
+ compatible = "adi,adv7533";
+ reg = <0x39>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <1 2>;
+ pd-gpio = <&gpio0 4 0>;
+ adi,dsi-lanes = <4>;
+
+ port {
+ adv7533_in: endpoint {
+ remote-endpoint = <&dsi_out0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 4f270410a5cb..17839db585d5 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -262,6 +262,11 @@
#clock-cells = <1>;
};
+ medianoc_ade: medianoc_ade@f4520000 {
+ compatible = "syscon";
+ reg = <0x0 0xf4520000 0x0 0x4000>;
+ };
+
stub_clock: stub_clock {
compatible = "hisilicon,hi6220-stub-clk";
hisilicon,hi6220-clk-sram = <&sram>;
@@ -766,6 +771,7 @@
interrupts = <0x0 0x48 0x4>;
clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
clock-names = "ciu", "biu";
+ resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
bus-width = <0x8>;
vmmc-supply = <&ldo19>;
pinctrl-names = "default";
@@ -779,12 +785,16 @@
card-detect-delay = <200>;
hisilicon,peripheral-syscon = <&ao_ctrl>;
cap-sd-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
reg = <0x0 0xf723e000 0x0 0x1000>;
interrupts = <0x0 0x49 0x4>;
#address-cells = <0x1>;
#size-cells = <0x0>;
clocks = <&sys_ctrl 4>, <&sys_ctrl 3>;
clock-names = "ciu", "biu";
+ resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
vqmmc-supply = <&ldo7>;
vmmc-supply = <&ldo10>;
bus-width = <0x4>;
@@ -802,6 +812,7 @@
interrupts = <0x0 0x4a 0x4>;
clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
clock-names = "ciu", "biu";
+ resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
bus-width = <0x4>;
broken-cd;
pinctrl-names = "default", "idle";
@@ -850,5 +861,55 @@
};
};
};
+
+ ade: ade@f4100000 {
+ compatible = "hisilicon,hi6220-ade";
+ reg = <0x0 0xf4100000 0x0 0x7800>;
+ reg-names = "ade_base";
+ hisilicon,noc-syscon = <&medianoc_ade>;
+ resets = <&media_ctrl MEDIA_ADE>;
+ interrupts = <0 115 4>; /* ldi interrupt */
+
+ clocks = <&media_ctrl HI6220_ADE_CORE>,
+ <&media_ctrl HI6220_CODEC_JPEG>,
+ <&media_ctrl HI6220_ADE_PIX_SRC>;
+ /*clock name*/
+ clock-names = "clk_ade_core",
+ "clk_codec_jpeg",
+ "clk_ade_pix";
+
+ assigned-clocks = <&media_ctrl HI6220_ADE_CORE>,
+ <&media_ctrl HI6220_CODEC_JPEG>;
+ assigned-clock-rates = <360000000>, <288000000>;
+ dma-coherent;
+ status = "disabled";
+
+ port {
+ ade_out: endpoint {
+ remote-endpoint = <&dsi_in>;
+ };
+ };
+ };
+
+ dsi: dsi@f4107800 {
+ compatible = "hisilicon,hi6220-dsi";
+ reg = <0x0 0xf4107800 0x0 0x100>;
+ clocks = <&media_ctrl HI6220_DSI_PCLK>;
+ clock-names = "pclk";
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* 0 for input port */
+ port@0 {
+ reg = <0>;
+ dsi_in: endpoint {
+ remote-endpoint = <&ade_out>;
+ };
+ };
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/hisilicon/hip05.dtsi b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
index bf322ed038b8..4b472a302cd8 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
@@ -300,11 +300,6 @@
clock-frequency = <200000000>;
};
- peri_c_subctrl: syscon@80000000 {
- compatible = "hisilicon,hip05-perisubc", "syscon";
- reg = < 0x0 0x80000000 0x0 0x10000>;
- };
-
uart0: uart@80300000 {
compatible = "snps,dw-apb-uart";
reg = <0x0 0x80300000 0x0 0x10000>;
diff --git a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
deleted file mode 100644
index b6a130c2e5a4..000000000000
--- a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
+++ /dev/null
@@ -1,180 +0,0 @@
-soc0: soc@000000000 {
- #address-cells = <2>;
- #size-cells = <2>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x0 0x0 0x0 0x0 0x1 0x0>;
- chip-id = <0>;
-
- soc0_mdio0: mdio@803c0000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "hisilicon,hns-mdio";
- reg = <0x0 0x803c0000 0x0 0x10000>;
- subctrl-vbase = <&peri_c_subctrl>;
-
- soc0_phy0: ethernet-phy@0 {
- reg = <0x0>;
- compatible = "ethernet-phy-ieee802.3-c22";
- };
- soc0_phy1: ethernet-phy@1 {
- reg = <0x1>;
- compatible = "ethernet-phy-ieee802.3-c22";
- };
- };
-
- dsaf0: dsa@c7000000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "hisilicon,hns-dsaf-v1";
- mode = "6port-16rss";
- interrupt-parent = <&mbigen_dsa>;
-
- reg = <0x0 0xc5000000 0x0 0x890000
- 0x0 0xc7000000 0x0 0x60000
- >;
-
- reg-names = "ppe-base","dsaf-base";
- subctrl-syscon = <&dsaf_subctrl>;
- reset-field-offset = <0>;
- interrupts = <
- /* [14] ge fifo err 8 / xge 6**/
- 149 0x4 150 0x4 151 0x4 152 0x4
- 153 0x4 154 0x4 26 0x4 27 0x4
- 155 0x4 156 0x4 157 0x4 158 0x4 159 0x4 160 0x4
- /* [12] rcb com 4*3**/
- 0x6 0x4 0x7 0x4 0x8 0x4 0x9 0x4
- 16 0x4 17 0x4 18 0x4 19 0x4
- 22 0x4 23 0x4 24 0x4 25 0x4
- /* [8] ppe tnl 0-7***/
- 0x0 0x4 0x1 0x4 0x2 0x4 0x3 0x4
- 0x4 0x4 0x5 0x4 12 0x4 13 0x4
- /* [21] dsaf event int 3+18**/
- 128 0x4 129 0x4 130 0x4
- 0x83 0x4 0x84 0x4 0x85 0x4 0x86 0x4 0x87 0x4 0x88 0x4
- 0x89 0x4 0x8a 0x4 0x8b 0x4 0x8c 0x4 0x8d 0x4 0x8e 0x4
- 0x8f 0x4 0x90 0x4 0x91 0x4 0x92 0x4 0x93 0x4 0x94 0x4
- /* [4] debug rcb 2*2*/
- 0xe 0x1 0xf 0x1 0x14 0x1 0x15 0x1
- /* [256] sevice rcb 2*128*/
- 0x180 0x1 0x181 0x1 0x182 0x1 0x183 0x1
- 0x184 0x1 0x185 0x1 0x186 0x1 0x187 0x1
- 0x188 0x1 0x189 0x1 0x18a 0x1 0x18b 0x1
- 0x18c 0x1 0x18d 0x1 0x18e 0x1 0x18f 0x1
- 0x190 0x1 0x191 0x1 0x192 0x1 0x193 0x1
- 0x194 0x1 0x195 0x1 0x196 0x1 0x197 0x1
- 0x198 0x1 0x199 0x1 0x19a 0x1 0x19b 0x1
- 0x19c 0x1 0x19d 0x1 0x19e 0x1 0x19f 0x1
- 0x1a0 0x1 0x1a1 0x1 0x1a2 0x1 0x1a3 0x1
- 0x1a4 0x1 0x1a5 0x1 0x1a6 0x1 0x1a7 0x1
- 0x1a8 0x1 0x1a9 0x1 0x1aa 0x1 0x1ab 0x1
- 0x1ac 0x1 0x1ad 0x1 0x1ae 0x1 0x1af 0x1
- 0x1b0 0x1 0x1b1 0x1 0x1b2 0x1 0x1b3 0x1
- 0x1b4 0x1 0x1b5 0x1 0x1b6 0x1 0x1b7 0x1
- 0x1b8 0x1 0x1b9 0x1 0x1ba 0x1 0x1bb 0x1
- 0x1bc 0x1 0x1bd 0x1 0x1be 0x1 0x1bf 0x1
- 0x1c0 0x1 0x1c1 0x1 0x1c2 0x1 0x1c3 0x1
- 0x1c4 0x1 0x1c5 0x1 0x1c6 0x1 0x1c7 0x1
- 0x1c8 0x1 0x1c9 0x1 0x1ca 0x1 0x1cb 0x1
- 0x1cc 0x1 0x1cd 0x1 0x1ce 0x1 0x1cf 0x1
- 0x1d0 0x1 0x1d1 0x1 0x1d2 0x1 0x1d3 0x1
- 0x1d4 0x1 0x1d5 0x1 0x1d6 0x1 0x1d7 0x1
- 0x1d8 0x1 0x1d9 0x1 0x1da 0x1 0x1db 0x1
- 0x1dc 0x1 0x1dd 0x1 0x1de 0x1 0x1df 0x1
- 0x1e0 0x1 0x1e1 0x1 0x1e2 0x1 0x1e3 0x1
- 0x1e4 0x1 0x1e5 0x1 0x1e6 0x1 0x1e7 0x1
- 0x1e8 0x1 0x1e9 0x1 0x1ea 0x1 0x1eb 0x1
- 0x1ec 0x1 0x1ed 0x1 0x1ee 0x1 0x1ef 0x1
- 0x1f0 0x1 0x1f1 0x1 0x1f2 0x1 0x1f3 0x1
- 0x1f4 0x1 0x1f5 0x1 0x1f6 0x1 0x1f7 0x1
- 0x1f8 0x1 0x1f9 0x1 0x1fa 0x1 0x1fb 0x1
- 0x1fc 0x1 0x1fd 0x1 0x1fe 0x1 0x1ff 0x1
- 0x200 0x1 0x201 0x1 0x202 0x1 0x203 0x1
- 0x204 0x1 0x205 0x1 0x206 0x1 0x207 0x1
- 0x208 0x1 0x209 0x1 0x20a 0x1 0x20b 0x1
- 0x20c 0x1 0x20d 0x1 0x20e 0x1 0x20f 0x1
- 0x210 0x1 0x211 0x1 0x212 0x1 0x213 0x1
- 0x214 0x1 0x215 0x1 0x216 0x1 0x217 0x1
- 0x218 0x1 0x219 0x1 0x21a 0x1 0x21b 0x1
- 0x21c 0x1 0x21d 0x1 0x21e 0x1 0x21f 0x1
- 0x220 0x1 0x221 0x1 0x222 0x1 0x223 0x1
- 0x224 0x1 0x225 0x1 0x226 0x1 0x227 0x1
- 0x228 0x1 0x229 0x1 0x22a 0x1 0x22b 0x1
- 0x22c 0x1 0x22d 0x1 0x22e 0x1 0x22f 0x1
- 0x230 0x1 0x231 0x1 0x232 0x1 0x233 0x1
- 0x234 0x1 0x235 0x1 0x236 0x1 0x237 0x1
- 0x238 0x1 0x239 0x1 0x23a 0x1 0x23b 0x1
- 0x23c 0x1 0x23d 0x1 0x23e 0x1 0x23f 0x1
- 0x240 0x1 0x241 0x1 0x242 0x1 0x243 0x1
- 0x244 0x1 0x245 0x1 0x246 0x1 0x247 0x1
- 0x248 0x1 0x249 0x1 0x24a 0x1 0x24b 0x1
- 0x24c 0x1 0x24d 0x1 0x24e 0x1 0x24f 0x1
- 0x250 0x1 0x251 0x1 0x252 0x1 0x253 0x1
- 0x254 0x1 0x255 0x1 0x256 0x1 0x257 0x1
- 0x258 0x1 0x259 0x1 0x25a 0x1 0x25b 0x1
- 0x25c 0x1 0x25d 0x1 0x25e 0x1 0x25f 0x1
- 0x260 0x1 0x261 0x1 0x262 0x1 0x263 0x1
- 0x264 0x1 0x265 0x1 0x266 0x1 0x267 0x1
- 0x268 0x1 0x269 0x1 0x26a 0x1 0x26b 0x1
- 0x26c 0x1 0x26d 0x1 0x26e 0x1 0x26f 0x1
- 0x270 0x1 0x271 0x1 0x272 0x1 0x273 0x1
- 0x274 0x1 0x275 0x1 0x276 0x1 0x277 0x1
- 0x278 0x1 0x279 0x1 0x27a 0x1 0x27b 0x1
- 0x27c 0x1 0x27d 0x1 0x27e 0x1 0x27f 0x1>;
- buf-size = <4096>;
- desc-num = <1024>;
- dma-coherent;
-
- port@0 {
- reg = <0>;
- serdes-syscon = <&serdes_ctrl0>;
- };
- port@1 {
- reg = <1>;
- serdes-syscon = <&serdes_ctrl0>;
- };
- port@4 {
- reg = <4>;
- phy-handle = <&soc0_phy0>;
- serdes-syscon = <&serdes_ctrl1>;
- };
- port@5 {
- reg = <5>;
- phy-handle = <&soc0_phy1>;
- serdes-syscon = <&serdes_ctrl1>;
- };
- };
-
- eth0: ethernet@0{
- compatible = "hisilicon,hns-nic-v1";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <0>;
- local-mac-address = [00 00 00 01 00 58];
- status = "disabled";
- dma-coherent;
- };
- eth1: ethernet@1{
- compatible = "hisilicon,hns-nic-v1";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <1>;
- local-mac-address = [00 00 00 01 00 59];
- status = "disabled";
- dma-coherent;
- };
- eth2: ethernet@4{
- compatible = "hisilicon,hns-nic-v1";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <4>;
- local-mac-address = [00 00 00 01 00 5a];
- status = "disabled";
- dma-coherent;
- };
- eth3: ethernet@5{
- compatible = "hisilicon,hns-nic-v1";
- ae-handle = <&dsaf0>;
- port-idx-in-ae = <5>;
- local-mac-address = [00 00 00 01 00 5b];
- status = "disabled";
- dma-coherent;
- };
-};
diff --git a/arch/arm64/boot/dts/hisilicon/hip06-d03.dts b/arch/arm64/boot/dts/hisilicon/hip06-d03.dts
index f3e5323e430b..f54b28359607 100644
--- a/arch/arm64/boot/dts/hisilicon/hip06-d03.dts
+++ b/arch/arm64/boot/dts/hisilicon/hip06-d03.dts
@@ -25,6 +25,34 @@
chosen { };
};
+&eth0 {
+ status = "ok";
+};
+
+&eth1 {
+ status = "ok";
+};
+
+&eth2 {
+ status = "ok";
+};
+
+&eth3 {
+ status = "ok";
+};
+
+&sas0 {
+ status = "ok";
+};
+
+&sas1 {
+ status = "ok";
+};
+
+&sas2 {
+ status = "ok";
+};
+
&usb_ohci {
status = "ok";
};
diff --git a/arch/arm64/boot/dts/hisilicon/hip06.dtsi b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
index 5927bc472f1b..b548763366dd 100644
--- a/arch/arm64/boot/dts/hisilicon/hip06.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
@@ -277,6 +277,39 @@
#interrupt-cells = <2>;
num-pins = <2>;
};
+
+ mbigen_sas1: intc_sas1 {
+ msi-parent = <&its_dsa 0x40000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <128>;
+ };
+
+ mbigen_sas2: intc_sas2 {
+ msi-parent = <&its_dsa 0x40040>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <128>;
+ };
+ };
+
+ mbigen_dsa@c0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x0 0xc0080000 0x0 0x10000>;
+
+ mbigen_dsaf0: intc_dsaf0 {
+ msi-parent = <&its_dsa 0x40800>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <409>;
+ };
+
+ mbigen_sas0: intc-sas0 {
+ msi-parent = <&its_dsa 0x40900>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <128>;
+ };
};
soc {
@@ -302,6 +335,338 @@
dma-coherent;
status = "disabled";
};
+
+ peri_c_subctrl: sub_ctrl_c@60000000 {
+ compatible = "hisilicon,peri-subctrl","syscon";
+ reg = <0 0x60000000 0x0 0x10000>;
+ };
+
+ dsa_subctrl: dsa_subctrl@c0000000 {
+ compatible = "hisilicon,dsa-subctrl", "syscon";
+ reg = <0x0 0xc0000000 0x0 0x10000>;
+ };
+
+ pcie_subctl: pcie_subctl@a0000000 {
+ compatible = "hisilicon,pcie-sas-subctrl", "syscon";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ };
+
+ serdes_ctrl: sds_ctrl@c2200000 {
+ compatible = "syscon";
+ reg = <0 0xc2200000 0x0 0x80000>;
+ };
+
+ mdio@603c0000 {
+ compatible = "hisilicon,hns-mdio";
+ reg = <0x0 0x603c0000 0x0 0x1000>;
+ subctrl-vbase = <&peri_c_subctrl 0x338 0xa38 0x531c 0x5a1c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+ };
+
+ dsaf0: dsa@c7000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "hisilicon,hns-dsaf-v2";
+ mode = "6port-16rss";
+ reg = <0x0 0xc5000000 0x0 0x890000
+ 0x0 0xc7000000 0x0 0x600000>;
+ reg-names = "ppe-base", "dsaf-base";
+ interrupt-parent = <&mbigen_dsaf0>;
+ subctrl-syscon = <&dsa_subctrl>;
+ reset-field-offset = <0>;
+ interrupts =
+ <576 1>, <577 1>, <578 1>, <579 1>, <580 1>,
+ <581 1>, <582 1>, <583 1>, <584 1>, <585 1>,
+ <586 1>, <587 1>, <588 1>, <589 1>, <590 1>,
+ <591 1>, <592 1>, <593 1>, <594 1>, <595 1>,
+ <596 1>, <597 1>, <598 1>, <599 1>, <600 1>,
+ <960 1>, <961 1>, <962 1>, <963 1>, <964 1>,
+ <965 1>, <966 1>, <967 1>, <968 1>, <969 1>,
+ <970 1>, <971 1>, <972 1>, <973 1>, <974 1>,
+ <975 1>, <976 1>, <977 1>, <978 1>, <979 1>,
+ <980 1>, <981 1>, <982 1>, <983 1>, <984 1>,
+ <985 1>, <986 1>, <987 1>, <988 1>, <989 1>,
+ <990 1>, <991 1>, <992 1>, <993 1>, <994 1>,
+ <995 1>, <996 1>, <997 1>, <998 1>, <999 1>,
+ <1000 1>, <1001 1>, <1002 1>, <1003 1>, <1004 1>,
+ <1005 1>, <1006 1>, <1007 1>, <1008 1>, <1009 1>,
+ <1010 1>, <1011 1>, <1012 1>, <1013 1>, <1014 1>,
+ <1015 1>, <1016 1>, <1017 1>, <1018 1>, <1019 1>,
+ <1020 1>, <1021 1>, <1022 1>, <1023 1>, <1024 1>,
+ <1025 1>, <1026 1>, <1027 1>, <1028 1>, <1029 1>,
+ <1030 1>, <1031 1>, <1032 1>, <1033 1>, <1034 1>,
+ <1035 1>, <1036 1>, <1037 1>, <1038 1>, <1039 1>,
+ <1040 1>, <1041 1>, <1042 1>, <1043 1>, <1044 1>,
+ <1045 1>, <1046 1>, <1047 1>, <1048 1>, <1049 1>,
+ <1050 1>, <1051 1>, <1052 1>, <1053 1>, <1054 1>,
+ <1055 1>, <1056 1>, <1057 1>, <1058 1>, <1059 1>,
+ <1060 1>, <1061 1>, <1062 1>, <1063 1>, <1064 1>,
+ <1065 1>, <1066 1>, <1067 1>, <1068 1>, <1069 1>,
+ <1070 1>, <1071 1>, <1072 1>, <1073 1>, <1074 1>,
+ <1075 1>, <1076 1>, <1077 1>, <1078 1>, <1079 1>,
+ <1080 1>, <1081 1>, <1082 1>, <1083 1>, <1084 1>,
+ <1085 1>, <1086 1>, <1087 1>, <1088 1>, <1089 1>,
+ <1090 1>, <1091 1>, <1092 1>, <1093 1>, <1094 1>,
+ <1095 1>, <1096 1>, <1097 1>, <1098 1>, <1099 1>,
+ <1100 1>, <1101 1>, <1102 1>, <1103 1>, <1104 1>,
+ <1105 1>, <1106 1>, <1107 1>, <1108 1>, <1109 1>,
+ <1110 1>, <1111 1>, <1112 1>, <1113 1>, <1114 1>,
+ <1115 1>, <1116 1>, <1117 1>, <1118 1>, <1119 1>,
+ <1120 1>, <1121 1>, <1122 1>, <1123 1>, <1124 1>,
+ <1125 1>, <1126 1>, <1127 1>, <1128 1>, <1129 1>,
+ <1130 1>, <1131 1>, <1132 1>, <1133 1>, <1134 1>,
+ <1135 1>, <1136 1>, <1137 1>, <1138 1>, <1139 1>,
+ <1140 1>, <1141 1>, <1142 1>, <1143 1>, <1144 1>,
+ <1145 1>, <1146 1>, <1147 1>, <1148 1>, <1149 1>,
+ <1150 1>, <1151 1>, <1152 1>, <1153 1>, <1154 1>,
+ <1155 1>, <1156 1>, <1157 1>, <1158 1>, <1159 1>,
+ <1160 1>, <1161 1>, <1162 1>, <1163 1>, <1164 1>,
+ <1165 1>, <1166 1>, <1167 1>, <1168 1>, <1169 1>,
+ <1170 1>, <1171 1>, <1172 1>, <1173 1>, <1174 1>,
+ <1175 1>, <1176 1>, <1177 1>, <1178 1>, <1179 1>,
+ <1180 1>, <1181 1>, <1182 1>, <1183 1>, <1184 1>,
+ <1185 1>, <1186 1>, <1187 1>, <1188 1>, <1189 1>,
+ <1190 1>, <1191 1>, <1192 1>, <1193 1>, <1194 1>,
+ <1195 1>, <1196 1>, <1197 1>, <1198 1>, <1199 1>,
+ <1200 1>, <1201 1>, <1202 1>, <1203 1>, <1204 1>,
+ <1205 1>, <1206 1>, <1207 1>, <1208 1>, <1209 1>,
+ <1210 1>, <1211 1>, <1212 1>, <1213 1>, <1214 1>,
+ <1215 1>, <1216 1>, <1217 1>, <1218 1>, <1219 1>,
+ <1220 1>, <1221 1>, <1222 1>, <1223 1>, <1224 1>,
+ <1225 1>, <1226 1>, <1227 1>, <1228 1>, <1229 1>,
+ <1230 1>, <1231 1>, <1232 1>, <1233 1>, <1234 1>,
+ <1235 1>, <1236 1>, <1237 1>, <1238 1>, <1239 1>,
+ <1240 1>, <1241 1>, <1242 1>, <1243 1>, <1244 1>,
+ <1245 1>, <1246 1>, <1247 1>, <1248 1>, <1249 1>,
+ <1250 1>, <1251 1>, <1252 1>, <1253 1>, <1254 1>,
+ <1255 1>, <1256 1>, <1257 1>, <1258 1>, <1259 1>,
+ <1260 1>, <1261 1>, <1262 1>, <1263 1>, <1264 1>,
+ <1265 1>, <1266 1>, <1267 1>, <1268 1>, <1269 1>,
+ <1270 1>, <1271 1>, <1272 1>, <1273 1>, <1274 1>,
+ <1275 1>, <1276 1>, <1277 1>, <1278 1>, <1279 1>,
+ <1280 1>, <1281 1>, <1282 1>, <1283 1>, <1284 1>,
+ <1285 1>, <1286 1>, <1287 1>, <1288 1>, <1289 1>,
+ <1290 1>, <1291 1>, <1292 1>, <1293 1>, <1294 1>,
+ <1295 1>, <1296 1>, <1297 1>, <1298 1>, <1299 1>,
+ <1300 1>, <1301 1>, <1302 1>, <1303 1>, <1304 1>,
+ <1305 1>, <1306 1>, <1307 1>, <1308 1>, <1309 1>,
+ <1310 1>, <1311 1>, <1312 1>, <1313 1>, <1314 1>,
+ <1315 1>, <1316 1>, <1317 1>, <1318 1>, <1319 1>,
+ <1320 1>, <1321 1>, <1322 1>, <1323 1>, <1324 1>,
+ <1325 1>, <1326 1>, <1327 1>, <1328 1>, <1329 1>,
+ <1330 1>, <1331 1>, <1332 1>, <1333 1>, <1334 1>,
+ <1335 1>, <1336 1>, <1337 1>, <1338 1>, <1339 1>,
+ <1340 1>, <1341 1>, <1342 1>, <1343 1>;
+
+ desc-num = <0x400>;
+ buf-size = <0x1000>;
+ dma-coherent;
+
+ port@0 {
+ reg = <0>;
+ serdes-syscon = <&serdes_ctrl>;
+ port-rst-offset = <0>;
+ port-mode-offset = <0>;
+ media-type = "fiber";
+ };
+
+ port@1 {
+ reg = <1>;
+ serdes-syscon= <&serdes_ctrl>;
+ port-rst-offset = <1>;
+ port-mode-offset = <1>;
+ media-type = "fiber";
+ };
+
+ port@4 {
+ reg = <4>;
+ phy-handle = <&phy0>;
+ serdes-syscon= <&serdes_ctrl>;
+ port-rst-offset = <4>;
+ port-mode-offset = <2>;
+ media-type = "copper";
+ };
+
+ port@5 {
+ reg = <5>;
+ phy-handle = <&phy1>;
+ serdes-syscon= <&serdes_ctrl>;
+ port-rst-offset = <5>;
+ port-mode-offset = <3>;
+ media-type = "copper";
+ };
+ };
+
+ eth0: ethernet@4{
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <4>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth1: ethernet@5{
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <5>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth2: ethernet@0{
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <0>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth3: ethernet@1{
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <1>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ sas0: sas@c3000000 {
+ compatible = "hisilicon,hip06-sas-v2";
+ reg = <0 0xc3000000 0 0x10000>;
+ sas-addr = [50 01 88 20 16 00 00 00];
+ hisilicon,sas-syscon = <&dsa_subctrl>;
+ ctrl-reset-reg = <0xa60>;
+ ctrl-reset-sts-reg = <0x5a30>;
+ ctrl-clock-ena-reg = <0x338>;
+ queue-count = <16>;
+ phy-count = <8>;
+ dma-coherent;
+ interrupt-parent = <&mbigen_sas0>;
+ interrupts = <64 4>,<65 4>,<66 4>,<67 4>,<68 4>,
+ <69 4>,<70 4>,<71 4>,<72 4>,<73 4>,
+ <75 4>,<76 4>,<77 4>,<78 4>,<79 4>,
+ <80 4>,<81 4>,<82 4>,<83 4>,<84 4>,
+ <85 4>,<86 4>,<87 4>,<88 4>,<89 4>,
+ <90 4>,<91 4>,<92 4>,<93 4>,<94 4>,
+ <95 4>,<96 4>,<97 4>,<98 4>,<99 4>,
+ <100 4>,<101 4>,<102 4>,<103 4>,<104 4>,
+ <105 4>,<106 4>,<107 4>,<108 4>,<109 4>,
+ <110 4>,<111 4>,<112 4>,<113 4>,<114 4>,
+ <115 4>,<116 4>,<117 4>,<118 4>,<119 4>,
+ <120 4>,<121 4>,<122 4>,<123 4>,<124 4>,
+ <125 4>,<126 4>,<127 4>,<128 4>,<129 4>,
+ <130 4>,<131 4>,<132 4>,<133 4>,<134 4>,
+ <135 4>,<136 4>,<137 4>,<138 4>,<139 4>,
+ <140 4>,<141 4>,<142 4>,<143 4>,<144 4>,
+ <145 4>,<146 4>,<147 4>,<148 4>,<149 4>,
+ <150 4>,<151 4>,<152 4>,<153 4>,<154 4>,
+ <155 4>,<156 4>,<157 4>,<158 4>,<159 4>,
+ <160 4>,<601 1>,<602 1>,<603 1>,<604 1>,
+ <605 1>,<606 1>,<607 1>,<608 1>,<609 1>,
+ <610 1>,<611 1>,<612 1>,<613 1>,<614 1>,
+ <615 1>,<616 1>,<617 1>,<618 1>,<619 1>,
+ <620 1>,<621 1>,<622 1>,<623 1>,<624 1>,
+ <625 1>,<626 1>,<627 1>,<628 1>,<629 1>,
+ <630 1>,<631 1>,<632 1>;
+ status = "disabled";
+ };
+
+ sas1: sas@a2000000 {
+ compatible = "hisilicon,hip06-sas-v2";
+ reg = <0 0xa2000000 0 0x10000>;
+ sas-addr = [50 01 88 20 16 00 00 00];
+ hisilicon,sas-syscon = <&pcie_subctl>;
+ am-max-trans;
+ ctrl-reset-reg = <0xa18>;
+ ctrl-reset-sts-reg = <0x5a0c>;
+ ctrl-clock-ena-reg = <0x318>;
+ queue-count = <16>;
+ phy-count = <8>;
+ dma-coherent;
+ interrupt-parent = <&mbigen_sas1>;
+ interrupts = <64 4>,<65 4>,<66 4>,<67 4>,<68 4>,
+ <69 4>,<70 4>,<71 4>,<72 4>,<73 4>,
+ <74 4>,<75 4>,<76 4>,<77 4>,<78 4>,
+ <79 4>,<80 4>,<81 4>,<82 4>,<83 4>,
+ <84 4>,<85 4>,<86 4>,<87 4>,<88 4>,
+ <89 4>,<90 4>,<91 4>,<92 4>,<93 4>,
+ <94 4>,<95 4>,<96 4>,<97 4>,<98 4>,
+ <99 4>,<100 4>,<101 4>,<102 4>,<103 4>,
+ <104 4>,<105 4>,<106 4>,<107 4>,<108 4>,
+ <109 4>,<110 4>,<111 4>,<112 4>,<113 4>,
+ <114 4>,<115 4>,<116 4>,<117 4>,<118 4>,
+ <119 4>,<120 4>,<121 4>,<122 4>,<123 4>,
+ <124 4>,<125 4>,<126 4>,<127 4>,<128 4>,
+ <129 4>,<130 4>,<131 4>,<132 4>,<133 4>,
+ <134 4>,<135 4>,<136 4>,<137 4>,<138 4>,
+ <139 4>,<140 4>,<141 4>,<142 4>,<143 4>,
+ <144 4>,<145 4>,<146 4>,<147 4>,<148 4>,
+ <149 4>,<150 4>,<151 4>,<152 4>,<153 4>,
+ <154 4>,<155 4>,<156 4>,<157 4>,<158 4>,
+ <159 4>,<576 1>,<577 1>,<578 1>,<579 1>,
+ <580 1>,<581 1>,<582 1>,<583 1>,<584 1>,
+ <585 1>,<586 1>,<587 1>,<588 1>,<589 1>,
+ <590 1>,<591 1>,<592 1>,<593 1>,<594 1>,
+ <595 1>,<596 1>,<597 1>,<598 1>,<599 1>,
+ <600 1>,<601 1>,<602 1>,<603 1>,<604 1>,
+ <605 1>,<606 1>,<607 1>;
+ status = "disabled";
+ };
+
+ sas2: sas@a3000000 {
+ compatible = "hisilicon,hip06-sas-v2";
+ reg = <0 0xa3000000 0 0x10000>;
+ sas-addr = [50 01 88 20 16 00 00 00];
+ hisilicon,sas-syscon = <&pcie_subctl>;
+ ctrl-reset-reg = <0xae0>;
+ ctrl-reset-sts-reg = <0x5a70>;
+ ctrl-clock-ena-reg = <0x3a8>;
+ queue-count = <16>;
+ phy-count = <9>;
+ dma-coherent;
+ interrupt-parent = <&mbigen_sas2>;
+ interrupts = <192 4>,<193 4>,<194 4>,<195 4>,<196 4>,
+ <197 4>,<198 4>,<199 4>,<200 4>,<201 4>,
+ <202 4>,<203 4>,<204 4>,<205 4>,<206 4>,
+ <207 4>,<208 4>,<209 4>,<210 4>,<211 4>,
+ <212 4>,<213 4>,<214 4>,<215 4>,<216 4>,
+ <217 4>,<218 4>,<219 4>,<220 4>,<221 4>,
+ <222 4>,<223 4>,<224 4>,<225 4>,<226 4>,
+ <227 4>,<228 4>,<229 4>,<230 4>,<231 4>,
+ <232 4>,<233 4>,<234 4>,<235 4>,<236 4>,
+ <237 4>,<238 4>,<239 4>,<240 4>,<241 4>,
+ <242 4>,<243 4>,<244 4>,<245 4>,<246 4>,
+ <247 4>,<248 4>,<249 4>,<250 4>,<251 4>,
+ <252 4>,<253 4>,<254 4>,<255 4>,<256 4>,
+ <257 4>,<258 4>,<259 4>,<260 4>,<261 4>,
+ <262 4>,<263 4>,<264 4>,<265 4>,<266 4>,
+ <267 4>,<268 4>,<269 4>,<270 4>,<271 4>,
+ <272 4>,<273 4>,<274 4>,<275 4>,<276 4>,
+ <277 4>,<278 4>,<279 4>,<280 4>,<281 4>,
+ <282 4>,<283 4>,<284 4>,<285 4>,<286 4>,
+ <287 4>,<608 1>,<609 1>,<610 1>,<611 1>,
+ <612 1>,<613 1>,<614 1>,<615 1>,<616 1>,
+ <617 1>,<618 1>,<619 1>,<620 1>,<621 1>,
+ <622 1>,<623 1>,<624 1>,<625 1>,<626 1>,
+ <627 1>,<628 1>,<629 1>,<630 1>,<631 1>,
+ <632 1>,<633 1>,<634 1>,<635 1>,<636 1>,
+ <637 1>,<638 1>,<639 1>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile
index 308468d377d0..cf3953124cef 100644
--- a/arch/arm64/boot/dts/marvell/Makefile
+++ b/arch/arm64/boot/dts/marvell/Makefile
@@ -5,6 +5,7 @@ dtb-$(CONFIG_ARCH_BERLIN) += berlin4ct-stb.dtb
# Mvebu SoC Family
dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-db.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-7040-db.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-db.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/marvell/armada-8020.dtsi b/arch/arm64/boot/dts/marvell/armada-8020.dtsi
index 3753c1c6d54d..048e5cf5160e 100644
--- a/arch/arm64/boot/dts/marvell/armada-8020.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8020.dtsi
@@ -47,6 +47,7 @@
#include "armada-ap806-dual.dtsi"
#include "armada-cp110-master.dtsi"
+#include "armada-cp110-slave.dtsi"
/ {
model = "Marvell Armada 8020";
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
new file mode 100644
index 000000000000..6e6f182fb297
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2016 Marvell Technology Group Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Device Tree file for Marvell Armada 8040 Development board platform
+ */
+
+#include "armada-8040.dtsi"
+
+/ {
+ model = "Marvell Armada 8040 DB board";
+ compatible = "marvell,armada8040-db", "marvell,armada8040",
+ "marvell,armada-ap806-quad", "marvell,armada-ap806";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@00000000 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+};
+
+&i2c0 {
+ status = "okay";
+ clock-frequency = <100000>;
+};
+
+&spi0 {
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0 0x200000>;
+ };
+ partition@400000 {
+ label = "Filesystem";
+ reg = <0x200000 0xce0000>;
+ };
+ };
+ };
+};
+
+/* Accessible over the mini-USB CON9 connector on the main board */
+&uart0 {
+ status = "okay";
+};
+
+
+/* CON5 on CP0 expansion */
+&cpm_pcie2 {
+ status = "okay";
+};
+
+&cpm_i2c0 {
+ status = "okay";
+ clock-frequency = <100000>;
+};
+
+/* CON4 on CP0 expansion */
+&cpm_sata0 {
+ status = "okay";
+};
+
+/* CON9 on CP0 expansion */
+&cpm_usb3_0 {
+ status = "okay";
+};
+
+/* CON10 on CP0 expansion */
+&cpm_usb3_1 {
+ status = "okay";
+};
+
+/* CON5 on CP1 expansion */
+&cps_pcie2 {
+ status = "okay";
+};
+
+&cps_i2c0 {
+ status = "okay";
+ clock-frequency = <100000>;
+};
+
+/* CON4 on CP1 expansion */
+&cps_sata0 {
+ status = "okay";
+};
+
+/* CON9 on CP1 expansion */
+&cps_usb3_0 {
+ status = "okay";
+};
+
+/* CON10 on CP1 expansion */
+&cps_usb3_1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040.dtsi b/arch/arm64/boot/dts/marvell/armada-8040.dtsi
index 8bd0d8f8ad4c..9c1b28c47683 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8040.dtsi
@@ -47,6 +47,7 @@
#include "armada-ap806-quad.dtsi"
#include "armada-cp110-master.dtsi"
+#include "armada-cp110-slave.dtsi"
/ {
model = "Marvell Armada 8040";
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index c2a6745f168c..7b6136182ad0 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -128,6 +128,12 @@
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
+ pmu {
+ compatible = "arm,cortex-a72-pmu";
+ interrupt-parent = <&pic>;
+ interrupts = <17>;
+ };
+
odmi: odmi@300000 {
compatible = "marvell,odmi-controller";
interrupt-controller;
@@ -140,6 +146,14 @@
marvell,spi-base = <128>, <136>, <144>, <152>;
};
+ pic: interrupt-controller@3f0100 {
+ compatible = "marvell,armada-8k-pic";
+ reg = <0x3f0100 0x10>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
xor@400000 {
compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
reg = <0x400000 0x1000>,
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index da31bbbbb59e..e5e3ed678b6f 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -176,6 +176,7 @@
#interrupt-cells = <1>;
device_type = "pci";
dma-coherent;
+ msi-parent = <&gic_v2m0>;
bus-range = <0 0xff>;
ranges =
@@ -201,6 +202,7 @@
#interrupt-cells = <1>;
device_type = "pci";
dma-coherent;
+ msi-parent = <&gic_v2m0>;
bus-range = <0 0xff>;
ranges =
@@ -227,6 +229,7 @@
#interrupt-cells = <1>;
device_type = "pci";
dma-coherent;
+ msi-parent = <&gic_v2m0>;
bus-range = <0 0xff>;
ranges =
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
new file mode 100644
index 000000000000..842fb333285c
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2016 Marvell Technology Group Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Device Tree file for Marvell Armada CP110 Slave.
+ */
+
+/ {
+ cp110-slave {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ ranges;
+
+ config-space {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ ranges = <0x0 0x0 0xf4000000 0x2000000>;
+
+ cps_syscon0: system-controller@440000 {
+ compatible = "marvell,cp110-system-controller0",
+ "syscon";
+ reg = <0x440000 0x1000>;
+ #clock-cells = <2>;
+ core-clock-output-names =
+ "cps-apll", "cps-ppv2-core", "cps-eip",
+ "cps-core", "cps-nand-core";
+ gate-clock-output-names =
+ "cps-audio", "cps-communit", "cps-nand",
+ "cps-ppv2", "cps-sdio", "cps-mg-domain",
+ "cps-mg-core", "cps-xor1", "cps-xor0",
+ "cps-gop-dp", "none", "cps-pcie_x10",
+ "cps-pcie_x11", "cps-pcie_x4", "cps-pcie-xor",
+ "cps-sata", "cps-sata-usb", "cps-main",
+ "cps-sd-mmc", "none", "none",
+ "cps-slow-io", "cps-usb3h0", "cps-usb3h1",
+ "cps-usb3dev", "cps-eip150", "cps-eip197";
+ };
+
+ cps_sata0: sata@540000 {
+ compatible = "marvell,armada-8k-ahci";
+ reg = <0x540000 0x30000>;
+ interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cps_syscon0 1 15>;
+ status = "disabled";
+ };
+
+ cps_usb3_0: usb3@500000 {
+ compatible = "marvell,armada-8k-xhci",
+ "generic-xhci";
+ reg = <0x500000 0x4000>;
+ dma-coherent;
+ interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cps_syscon0 1 22>;
+ status = "disabled";
+ };
+
+ cps_usb3_1: usb3@510000 {
+ compatible = "marvell,armada-8k-xhci",
+ "generic-xhci";
+ reg = <0x510000 0x4000>;
+ dma-coherent;
+ interrupts = <GIC_SPI 285 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cps_syscon0 1 23>;
+ status = "disabled";
+ };
+
+ cps_xor0: xor@6a0000 {
+ compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
+ reg = <0x6a0000 0x1000>,
+ <0x6b0000 0x1000>;
+ dma-coherent;
+ msi-parent = <&gic_v2m0>;
+ clocks = <&cps_syscon0 1 8>;
+ };
+
+ cps_xor1: xor@6c0000 {
+ compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
+ reg = <0x6c0000 0x1000>,
+ <0x6d0000 0x1000>;
+ dma-coherent;
+ msi-parent = <&gic_v2m0>;
+ clocks = <&cps_syscon0 1 7>;
+ };
+
+ cps_spi0: spi@700600 {
+ compatible = "marvell,armada-380-spi";
+ reg = <0x700600 0x50>;
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ cell-index = <1>;
+ clocks = <&cps_syscon0 0 3>;
+ status = "disabled";
+ };
+
+ cps_spi1: spi@700680 {
+ compatible = "marvell,armada-380-spi";
+ reg = <0x700680 0x50>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <2>;
+ clocks = <&cps_syscon0 1 21>;
+ status = "disabled";
+ };
+
+ cps_i2c0: i2c@701000 {
+ compatible = "marvell,mv78230-i2c";
+ reg = <0x701000 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cps_syscon0 1 21>;
+ status = "disabled";
+ };
+
+ cps_i2c1: i2c@701100 {
+ compatible = "marvell,mv78230-i2c";
+ reg = <0x701100 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cps_syscon0 1 21>;
+ status = "disabled";
+ };
+ };
+
+ cps_pcie0: pcie@f4600000 {
+ compatible = "marvell,armada8k-pcie", "snps,dw-pcie";
+ reg = <0 0xf4600000 0 0x10000>,
+ <0 0xfaf00000 0 0x80000>;
+ reg-names = "ctrl", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ device_type = "pci";
+ dma-coherent;
+ msi-parent = <&gic_v2m0>;
+
+ bus-range = <0 0xff>;
+ ranges =
+ /* downstream I/O */
+ <0x81000000 0 0xfd000000 0 0xfd000000 0 0x10000
+ /* non-prefetchable memory */
+ 0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
+ num-lanes = <1>;
+ clocks = <&cps_syscon0 1 13>;
+ status = "disabled";
+ };
+
+ cps_pcie1: pcie@f4620000 {
+ compatible = "marvell,armada8k-pcie", "snps,dw-pcie";
+ reg = <0 0xf4620000 0 0x10000>,
+ <0 0xfbf00000 0 0x80000>;
+ reg-names = "ctrl", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ device_type = "pci";
+ dma-coherent;
+ msi-parent = <&gic_v2m0>;
+
+ bus-range = <0 0xff>;
+ ranges =
+ /* downstream I/O */
+ <0x81000000 0 0xfd010000 0 0xfd010000 0 0x10000
+ /* non-prefetchable memory */
+ 0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>;
+
+ num-lanes = <1>;
+ clocks = <&cps_syscon0 1 11>;
+ status = "disabled";
+ };
+
+ cps_pcie2: pcie@f4640000 {
+ compatible = "marvell,armada8k-pcie", "snps,dw-pcie";
+ reg = <0 0xf4640000 0 0x10000>,
+ <0 0xfcf00000 0 0x80000>;
+ reg-names = "ctrl", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ device_type = "pci";
+ dma-coherent;
+ msi-parent = <&gic_v2m0>;
+
+ bus-range = <0 0xff>;
+ ranges =
+ /* downstream I/O */
+ <0x81000000 0 0xfd020000 0 0xfd020000 0 0x10000
+ /* non-prefetchable memory */
+ 0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>;
+
+ num-lanes = <1>;
+ clocks = <&cps_syscon0 1 12>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/marvell/berlin4ct.dtsi b/arch/arm64/boot/dts/marvell/berlin4ct.dtsi
index 099ad93bb49b..85c23facb9fe 100644
--- a/arch/arm64/boot/dts/marvell/berlin4ct.dtsi
+++ b/arch/arm64/boot/dts/marvell/berlin4ct.dtsi
@@ -68,6 +68,7 @@
device_type = "cpu";
reg = <0x0>;
enable-method = "psci";
+ next-level-cache = <&l2>;
cpu-idle-states = <&CPU_SLEEP_0>;
};
@@ -76,6 +77,7 @@
device_type = "cpu";
reg = <0x1>;
enable-method = "psci";
+ next-level-cache = <&l2>;
cpu-idle-states = <&CPU_SLEEP_0>;
};
@@ -84,6 +86,7 @@
device_type = "cpu";
reg = <0x2>;
enable-method = "psci";
+ next-level-cache = <&l2>;
cpu-idle-states = <&CPU_SLEEP_0>;
};
@@ -92,9 +95,14 @@
device_type = "cpu";
reg = <0x3>;
enable-method = "psci";
+ next-level-cache = <&l2>;
cpu-idle-states = <&CPU_SLEEP_0>;
};
+ l2: cache {
+ compatible = "cache";
+ };
+
idle-states {
entry-method = "psci";
CPU_SLEEP_0: cpu-sleep-0 {
@@ -115,7 +123,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu", "arm,armv8-pmuv3";
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
@@ -279,7 +287,6 @@
reg = <0x4000 0x100>;
clocks = <&osc>;
interrupts = <1>;
- status = "disabled";
};
wdt2: watchdog@5000 {
@@ -287,7 +294,6 @@
reg = <0x5000 0x100>;
clocks = <&osc>;
interrupts = <2>;
- status = "disabled";
};
sm_gpio0: gpio@8000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 7453a47b3047..2a7f731c7759 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -42,6 +42,44 @@
gpio = <&pio 130 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
+
+ connector {
+ compatible = "hdmi-connector";
+ label = "hdmi";
+ type = "d";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi0_out>;
+ };
+ };
+ };
+};
+
+&cec {
+ status = "okay";
+};
+
+&dpi0 {
+ status = "okay";
+};
+
+&hdmi_phy {
+ status = "okay";
+};
+
+&hdmi0 {
+ status = "okay";
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ hdmi0_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
};
&i2c1 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 10f638f4e7d8..1c71e256601d 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -254,6 +254,16 @@
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ hdmi_pin: xxx {
+
+ /*hdmi htplg pin*/
+ pins1 {
+ pinmux = <MT8173_PIN_21_HTPLG__FUNC_HTPLG>;
+ input-enable;
+ bias-pull-down;
+ };
+ };
+
i2c0_pins_a: i2c0 {
pins1 {
pinmux = <MT8173_PIN_45_SDA0__FUNC_SDA0>,
@@ -341,6 +351,14 @@
clock-names = "spi", "wrap";
};
+ cec: cec@10013000 {
+ compatible = "mediatek,mt8173-cec";
+ reg = <0 0x10013000 0 0xbc>;
+ interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg CLK_INFRA_CEC>;
+ status = "disabled";
+ };
+
vpu: vpu@10020000 {
compatible = "mediatek,mt8173-vpu";
reg = <0 0x10020000 0 0x30000>,
@@ -383,6 +401,19 @@
#clock-cells = <1>;
};
+ hdmi_phy: hdmi-phy@10209100 {
+ compatible = "mediatek,mt8173-hdmi-phy";
+ reg = <0 0x10209100 0 0x24>;
+ clocks = <&apmixedsys CLK_APMIXED_HDMI_REF>;
+ clock-names = "pll_ref";
+ clock-output-names = "hdmitx_dig_cts";
+ mediatek,ibias = <0xa>;
+ mediatek,ibias_up = <0x1c>;
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
mipi_tx0: mipi-dphy@10215000 {
compatible = "mediatek,mt8173-mipi-tx";
reg = <0 0x10215000 0 0x1000>;
@@ -577,6 +608,14 @@
status = "disabled";
};
+ hdmiddc0: i2c@11012000 {
+ compatible = "mediatek,mt8173-hdmi-ddc";
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_LOW>;
+ reg = <0 0x11012000 0 0x1C>;
+ clocks = <&pericfg CLK_PERI_I2C5>;
+ clock-names = "ddc-i2c";
+ };
+
i2c6: i2c@11013000 {
compatible = "mediatek,mt8173-i2c";
reg = <0 0x11013000 0 0x70>,
@@ -885,6 +924,12 @@
<&apmixedsys CLK_APMIXED_TVDPLL>;
clock-names = "pixel", "engine", "pll";
status = "disabled";
+
+ port {
+ dpi0_out: endpoint {
+ remote-endpoint = <&hdmi0_in>;
+ };
+ };
};
pwm0: pwm@1401e000 {
@@ -942,6 +987,38 @@
clocks = <&mmsys CLK_MM_DISP_OD>;
};
+ hdmi0: hdmi@14025000 {
+ compatible = "mediatek,mt8173-hdmi";
+ reg = <0 0x14025000 0 0x400>;
+ interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&mmsys CLK_MM_HDMI_PIXEL>,
+ <&mmsys CLK_MM_HDMI_PLLCK>,
+ <&mmsys CLK_MM_HDMI_AUDIO>,
+ <&mmsys CLK_MM_HDMI_SPDIF>;
+ clock-names = "pixel", "pll", "bclk", "spdif";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_pin>;
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi";
+ mediatek,syscon-hdmi = <&mmsys 0x900>;
+ assigned-clocks = <&topckgen CLK_TOP_HDMI_SEL>;
+ assigned-clock-parents = <&hdmi_phy>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ hdmi0_in: endpoint {
+ remote-endpoint = <&dpi0_out>;
+ };
+ };
+ };
+ };
+
larb4: larb@14027000 {
compatible = "mediatek,mt8173-smi-larb";
reg = <0 0x14027000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
index 431266a48e9c..c2becb603e11 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
@@ -27,6 +27,12 @@
reg = <0x0 0x80000000 0x0 0xc0000000>;
};
+ host1x@50000000 {
+ dpaux: dpaux@545c0000 {
+ status = "okay";
+ };
+ };
+
pinmux: pinmux@700008d4 {
pinctrl-names = "boot";
pinctrl-0 = <&state_boot>;
@@ -1556,6 +1562,46 @@
};
};
+ i2c@7000d100 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ nau8825@1a {
+ compatible = "nuvoton,nau8825";
+ reg = <0x1a>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(E, 6) IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&tegra_car TEGRA210_CLK_CLK_OUT_2>;
+ clock-names = "mclk";
+
+ nuvoton,jkdet-enable;
+ nuvoton,jkdet-polarity = <GPIO_ACTIVE_LOW>;
+ nuvoton,vref-impedance = <2>;
+ nuvoton,micbias-voltage = <6>;
+ nuvoton,sar-threshold-num = <4>;
+ nuvoton,sar-threshold = <0xc 0x1e 0x38 0x60>;
+ nuvoton,sar-hysteresis = <1>;
+ nuvoton,sar-voltage = <0>;
+ nuvoton,sar-compare-time = <0>;
+ nuvoton,sar-sampling-time = <0>;
+ nuvoton,short-key-debounce = <2>;
+ nuvoton,jack-insert-debounce = <7>;
+ nuvoton,jack-eject-debounce = <7>;
+ status = "okay";
+ };
+
+ audio-codec@2d {
+ compatible = "realtek,rt5677";
+ reg = <0x2d>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(X, 0) IRQ_TYPE_LEVEL_HIGH>;
+ realtek,reset-gpio = <&gpio TEGRA_GPIO(BB, 3) GPIO_ACTIVE_LOW>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "okay";
+ };
+ };
+
pmc@7000e400 {
nvidia,invert-interrupt;
nvidia,suspend-mode = <0>;
@@ -1568,12 +1614,81 @@
status = "okay";
};
+ usb@70090000 {
+ phys = <&{/padctl@7009f000/pads/usb2/lanes/usb2-0}>,
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-6}>;
+ phy-names = "usb2-0", "usb3-0";
+
+ dvddio-pex-supply = <&avddio_1v05>;
+ hvddio-pex-supply = <&pp1800>;
+ avdd-usb-supply = <&pp3300>;
+ avdd-pll-utmip-supply = <&pp1800>;
+ avdd-pll-uerefe-supply = <&pp1050_avdd>;
+ dvdd-pex-pll-supply = <&avddio_1v05>;
+ hvdd-pex-pll-e-supply = <&pp1800>;
+
+ status = "okay";
+ };
+
+ padctl@7009f000 {
+ status = "okay";
+
+ pads {
+ usb2 {
+ status = "okay";
+
+ lanes {
+ usb2-0 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+ };
+ };
+
+ pcie {
+ status = "okay";
+
+ lanes {
+ pcie-6 {
+ nvidia,function = "usb3-ss";
+ status = "okay";
+ };
+ };
+ };
+ };
+
+ ports {
+ usb2-0 {
+ status = "okay";
+ vbus-supply = <&usbc_vbus>;
+ mode = "otg";
+ };
+
+ usb3-0 {
+ nvidia,usb2-companion = <0>;
+ status = "okay";
+ };
+ };
+ };
+
sdhci@700b0600 {
bus-width = <8>;
non-removable;
status = "okay";
};
+ aconnect@702c0000 {
+ status = "okay";
+
+ dma@702e2000 {
+ status = "okay";
+ };
+
+ agic@702f9000 {
+ status = "okay";
+ };
+ };
+
clocks {
compatible = "simple-bus";
#address-cells = <1>;
@@ -1646,6 +1761,11 @@
};
};
+ max98357a {
+ compatible = "maxim,max98357a";
+ status = "okay";
+ };
+
psci {
compatible = "arm,psci-1.0";
method = "smc";
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index c4cfdcf60d26..f6739797150a 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -34,6 +34,7 @@
clock-names = "dpaux", "parent";
resets = <&tegra_car 207>;
reset-names = "dpaux";
+ power-domains = <&pd_sor>;
status = "disabled";
state_dpaux1_aux: pinmux-aux {
@@ -108,6 +109,7 @@
clock-names = "dsi", "lp", "parent";
resets = <&tegra_car 48>;
reset-names = "dsi";
+ power-domains = <&pd_sor>;
nvidia,mipi-calibrate = <&mipi 0x0c0>; /* DSIA & DSIB pads */
status = "disabled";
@@ -137,6 +139,7 @@
clock-names = "dsi", "lp", "parent";
resets = <&tegra_car 82>;
reset-names = "dsi";
+ power-domains = <&pd_sor>;
nvidia,mipi-calibrate = <&mipi 0x300>; /* DSIC & DSID pads */
status = "disabled";
@@ -178,6 +181,7 @@
pinctrl-1 = <&state_dpaux_i2c>;
pinctrl-2 = <&state_dpaux_off>;
pinctrl-names = "aux", "i2c", "off";
+ power-domains = <&pd_sor>;
status = "disabled";
};
@@ -197,6 +201,7 @@
pinctrl-1 = <&state_dpaux1_i2c>;
pinctrl-2 = <&state_dpaux1_off>;
pinctrl-names = "aux", "i2c", "off";
+ power-domains = <&pd_sor>;
status = "disabled";
};
@@ -209,6 +214,7 @@
clock-names = "dpaux", "parent";
resets = <&tegra_car 181>;
reset-names = "dpaux";
+ power-domains = <&pd_sor>;
status = "disabled";
state_dpaux_aux: pinmux-aux {
@@ -325,7 +331,7 @@
};
gpio: gpio@6000d000 {
- compatible = "nvidia,tegra210-gpio", "nvidia,tegra124-gpio", "nvidia,tegra30-gpio";
+ compatible = "nvidia,tegra210-gpio", "nvidia,tegra30-gpio";
reg = <0x0 0x6000d000 0x0 0x1000>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
@@ -648,27 +654,41 @@
#power-domain-cells = <0>;
};
+ pd_sor: sor {
+ clocks = <&tegra_car TEGRA210_CLK_SOR0>,
+ <&tegra_car TEGRA210_CLK_SOR1>,
+ <&tegra_car TEGRA210_CLK_CSI>,
+ <&tegra_car TEGRA210_CLK_DSIA>,
+ <&tegra_car TEGRA210_CLK_DSIB>,
+ <&tegra_car TEGRA210_CLK_DPAUX>,
+ <&tegra_car TEGRA210_CLK_DPAUX1>,
+ <&tegra_car TEGRA210_CLK_MIPI_CAL>;
+ resets = <&tegra_car TEGRA210_CLK_SOR0>,
+ <&tegra_car TEGRA210_CLK_SOR1>,
+ <&tegra_car TEGRA210_CLK_CSI>,
+ <&tegra_car TEGRA210_CLK_DSIA>,
+ <&tegra_car TEGRA210_CLK_DSIB>,
+ <&tegra_car TEGRA210_CLK_DPAUX>,
+ <&tegra_car TEGRA210_CLK_DPAUX1>,
+ <&tegra_car TEGRA210_CLK_MIPI_CAL>;
+ #power-domain-cells = <0>;
+ };
+
pd_xusbss: xusba {
clocks = <&tegra_car TEGRA210_CLK_XUSB_SS>;
- clock-names = "xusb-ss";
resets = <&tegra_car TEGRA210_CLK_XUSB_SS>;
- reset-names = "xusb-ss";
#power-domain-cells = <0>;
};
pd_xusbdev: xusbb {
clocks = <&tegra_car TEGRA210_CLK_XUSB_DEV>;
- clock-names = "xusb-dev";
resets = <&tegra_car 95>;
- reset-names = "xusb-dev";
#power-domain-cells = <0>;
};
pd_xusbhost: xusbc {
clocks = <&tegra_car TEGRA210_CLK_XUSB_HOST>;
- clock-names = "xusb-host";
resets = <&tegra_car TEGRA210_CLK_XUSB_HOST>;
- reset-names = "xusb-host";
#power-domain-cells = <0>;
};
};
@@ -948,6 +968,7 @@
reg = <0x0 0x700e3000 0x0 0x100>;
clocks = <&tegra_car TEGRA210_CLK_MIPI_CAL>;
clock-names = "mipi-cal";
+ power-domains = <&pd_sor>;
#nvidia,mipi-calibrate-cells = <1>;
};
@@ -961,6 +982,50 @@
#size-cells = <1>;
ranges = <0x702c0000 0x0 0x702c0000 0x00040000>;
status = "disabled";
+
+ adma: dma@702e2000 {
+ compatible = "nvidia,tegra210-adma";
+ reg = <0x702e2000 0x2000>;
+ interrupt-parent = <&agic>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ clocks = <&tegra_car TEGRA210_CLK_D_AUDIO>;
+ clock-names = "d_audio";
+ status = "disabled";
+ };
+
+ agic: agic@702f9000 {
+ compatible = "nvidia,tegra210-agic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x702f9000 0x2000>,
+ <0x702fa000 0x2000>;
+ interrupts = <GIC_SPI 102 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ clocks = <&tegra_car TEGRA210_CLK_APE>;
+ clock-names = "clk";
+ status = "disabled";
+ };
};
spi@70410000 {
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index fa1f661ccccf..5dd05de5619b 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -1,5 +1,6 @@
dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb msm8916-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += apq8096-db820c.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
index ee828a8a8236..e1e6c6b5c489 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
@@ -24,4 +24,52 @@
bias-pull-up;
};
};
+
+ adv7533_int_active: adv533_int_active {
+ pinmux {
+ function = "gpio";
+ pins = "gpio31";
+ };
+ pinconf {
+ pins = "gpio31";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ adv7533_int_suspend: adv7533_int_suspend {
+ pinmux {
+ function = "gpio";
+ pins = "gpio31";
+ };
+ pinconf {
+ pins = "gpio31";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ adv7533_switch_active: adv7533_switch_active {
+ pinmux {
+ function = "gpio";
+ pins = "gpio32";
+ };
+ pinconf {
+ pins = "gpio32";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ adv7533_switch_suspend: adv7533_switch_suspend {
+ pinmux {
+ function = "gpio";
+ pins = "gpio32";
+ };
+ pinconf {
+ pins = "gpio32";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 18639bc0a506..bb062b547110 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -63,6 +63,47 @@
/* On High speed expansion */
label = "HS-I2C2";
status = "okay";
+
+ adv_bridge: bridge@39 {
+ status = "okay";
+
+ compatible = "adi,adv7533";
+ reg = <0x39>;
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <31 2>;
+
+ adi,dsi-lanes = <4>;
+
+ pd-gpios = <&msmgpio 32 0>;
+
+ avdd-supply = <&pm8916_l6>;
+ v1p2-supply = <&pm8916_l6>;
+ v3p3-supply = <&pm8916_l17>;
+
+ pinctrl-names = "default","sleep";
+ pinctrl-0 = <&adv7533_int_active &adv7533_switch_active>;
+ pinctrl-1 = <&adv7533_int_suspend &adv7533_switch_suspend>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7533_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7533_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+ };
+ };
};
i2c@78ba000 {
@@ -180,6 +221,36 @@
lpass@07708000 {
status = "okay";
};
+
+ mdss@1a00000 {
+ status = "okay";
+
+ mdp@1a01000 {
+ status = "okay";
+ };
+
+ dsi@1a98000 {
+ status = "okay";
+
+ vdda-supply = <&pm8916_l2>;
+ vddio-supply = <&pm8916_l6>;
+
+ ports {
+ port@1 {
+ endpoint {
+ remote-endpoint = <&adv7533_in>;
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+ };
+
+ dsi-phy@1a98300 {
+ status = "okay";
+
+ vddio-supply = <&pm8916_l6>;
+ };
+ };
};
usb2513 {
@@ -194,6 +265,17 @@
pinctrl-names = "default";
pinctrl-0 = <&usb_id_default>;
};
+
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con: endpoint {
+ remote-endpoint = <&adv7533_out>;
+ };
+ };
+ };
};
&smd_rpm_regulators {
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi
new file mode 100644
index 000000000000..24552f19b3fa
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+&msmgpio {
+ sdc2_cd_on: sdc2_cd_on {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_cd_off: sdc2_cd_off {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+};
diff --git a/arch/arm/mach-qcom/board.c b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
index d8060dfd1a21..230e9c8484ac 100644
--- a/arch/arm/mach-qcom/board.c
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2010-2014 The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,22 +11,11 @@
* GNU General Public License for more details.
*/
-#include <linux/init.h>
+/dts-v1/;
-#include <asm/mach/arch.h>
+#include "apq8096-db820c.dtsi"
-static const char * const qcom_dt_match[] __initconst = {
- "qcom,apq8064",
- "qcom,apq8074-dragonboard",
- "qcom,apq8084",
- "qcom,ipq8062",
- "qcom,ipq8064",
- "qcom,msm8660-surf",
- "qcom,msm8960-cdp",
- "qcom,mdm9615",
- NULL
+/ {
+ model = "Qualcomm Technologies, Inc. DB820c";
+ compatible = "arrow,apq8096-db820c", "qcom,apq8096-sbc";
};
-
-DT_MACHINE_START(QCOM_DT, "Qualcomm (Flattened Device Tree)")
- .dt_compat = qcom_dt_match,
-MACHINE_END
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
new file mode 100644
index 000000000000..afb218cffc60
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+#include "apq8096-db820c-pins.dtsi"
+
+/ {
+ aliases {
+ serial0 = &blsp2_uart1;
+ serial1 = &blsp2_uart2;
+ i2c0 = &blsp1_i2c2;
+ i2c1 = &blsp2_i2c1;
+ i2c2 = &blsp2_i2c0;
+ spi0 = &blsp1_spi0;
+ spi1 = &blsp2_spi5;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ soc {
+ serial@75b0000 {
+ label = "LS-UART1";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart1_2pins_default>;
+ pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+ };
+
+ serial@75b1000 {
+ label = "LS-UART0";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart2_4pins_default>;
+ pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+ };
+
+ i2c@07577000 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+ };
+
+ i2c@075b6000 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+ };
+
+ spi@07575000 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+ };
+
+ i2c@075b5000 {
+ /* On High speed expansion */
+ label = "HS-I2C2";
+ status = "okay";
+ };
+
+ spi@075ba000{
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+ };
+
+ sdhci@74a4900 {
+ /* External SD card */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ cd-gpios = <&msmgpio 38 0x1>;
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 11bdc24cfc74..466ca5705c99 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -86,6 +86,11 @@
reg = <0x0 0x89300000 0x0 0x600000>;
no-map;
};
+
+ mba_mem: mba@8ea00000 {
+ no-map;
+ reg = <0 0x8ea00000 0 0x100000>;
+ };
};
cpus {
@@ -155,6 +160,49 @@
interrupts = <GIC_PPI 7 GIC_CPU_MASK_SIMPLE(4)>;
};
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 4>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 3>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
@@ -187,10 +235,11 @@
};
firmware {
- scm {
+ scm: scm {
compatible = "qcom,scm";
clocks = <&gcc GCC_CRYPTO_CLK>, <&gcc GCC_CRYPTO_AXI_CLK>, <&gcc GCC_CRYPTO_AHB_CLK>;
clock-names = "core", "bus", "iface";
+ #reset-cells = <1>;
};
};
@@ -228,6 +277,11 @@
reg = <0x1905000 0x20000>;
};
+ tcsr: syscon@1937000 {
+ compatible = "qcom,tcsr-msm8916", "syscon";
+ reg = <0x1937000 0x30000>;
+ };
+
tcsr_mutex: hwlock {
compatible = "qcom,tcsr-mutex";
syscon = <&tcsr_mutex_regs 0 0x1000>;
@@ -483,7 +537,7 @@
compatible = "qcom,ci-hdrc";
reg = <0x78d9000 0x400>;
dr_mode = "peripheral";
- interrupts = <GIC_SPI 134 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
usb-phy = <&usb_otg>;
status = "disabled";
};
@@ -491,7 +545,7 @@
usb_host: ehci@78d9000 {
compatible = "qcom,ehci-host";
reg = <0x78d9000 0x400>;
- interrupts = <GIC_SPI 134 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
usb-phy = <&usb_otg>;
status = "disabled";
};
@@ -499,8 +553,8 @@
usb_otg: phy@78d9000 {
compatible = "qcom,usb-otg-snps";
reg = <0x78d9000 0x400>;
- interrupts = <GIC_SPI 134 IRQ_TYPE_EDGE_BOTH>,
- <GIC_SPI 140 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
qcom,vdd-levels = <500000 1000000 1320000>;
qcom,phy-init-sequence = <0x44 0x6B 0x24 0x13>;
@@ -594,7 +648,7 @@
<0x200a000 0x002100>;
reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
interrupt-names = "periph_irq";
- interrupts = <GIC_SPI 190 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
qcom,ee = <0>;
qcom,channel = <0>;
#address-cells = <2>;
@@ -609,6 +663,144 @@
clocks = <&gcc GCC_PRNG_AHB_CLK>;
clock-names = "core";
};
+
+ qfprom: qfprom@5c000 {
+ compatible = "qcom,qfprom";
+ reg = <0x5c000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ tsens_caldata: caldata@d0 {
+ reg = <0xd0 0x8>;
+ };
+ tsens_calsel: calsel@ec {
+ reg = <0xec 0x4>;
+ };
+ };
+
+ tsens: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8916-tsens";
+ reg = <0x4a8000 0x2000>;
+ nvmem-cells = <&tsens_caldata>, <&tsens_calsel>;
+ nvmem-cell-names = "calib", "calib_sel";
+ #thermal-sensor-cells = <1>;
+ };
+
+ mdss: mdss@1a00000 {
+ compatible = "qcom,mdss";
+ reg = <0x1a00000 0x1000>,
+ <0x1ac8000 0x3000>;
+ reg-names = "mdss_phys", "vbif_phys";
+
+ power-domains = <&gcc MDSS_GDSC>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "vsync_clk";
+
+ interrupts = <0 72 0>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mdp: mdp@1a01000 {
+ compatible = "qcom,mdp5";
+ reg = <0x1a01000 0x90000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 0>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk",
+ "vsync_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp5_intf1_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+ };
+ };
+
+ dsi0: dsi@1a98000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ reg = <0x1a98000 0x25c>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4 0>;
+
+ assigned-clocks = <&gcc BYTE0_CLK_SRC>,
+ <&gcc PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&dsi_phy0 0>,
+ <&dsi_phy0 1>;
+
+ clocks = <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_BYTE0_CLK>,
+ <&gcc GCC_MDSS_PCLK0_CLK>,
+ <&gcc GCC_MDSS_ESC0_CLK>;
+ clock-names = "mdp_core_clk",
+ "iface_clk",
+ "bus_clk",
+ "byte_clk",
+ "pixel_clk",
+ "core_clk";
+ phys = <&dsi_phy0>;
+ phy-names = "dsi-phy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&mdp5_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ };
+ };
+ };
+ };
+
+ dsi_phy0: dsi-phy@1a98300 {
+ compatible = "qcom,dsi-phy-28nm-lp";
+ reg = <0x1a98300 0xd4>,
+ <0x1a98500 0x280>,
+ <0x1a98780 0x30>;
+ reg-names = "dsi_pll",
+ "dsi_phy",
+ "dsi_phy_regulator";
+
+ #clock-cells = <1>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+ };
+ };
};
smd {
@@ -657,6 +849,88 @@
};
};
};
+
+ hexagon-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupts = <0 27 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 8 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ hexagon_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+
+ #qcom,smem-state-cells = <1>;
+ };
+
+ hexagon_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ wcnss-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <451>, <431>;
+
+ interrupts = <0 143 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 8 18>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <4>;
+
+ wcnss_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+
+ #qcom,smem-state-cells = <1>;
+ };
+
+ wcnss_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smsm {
+ compatible = "qcom,smsm";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ipc-1 = <&apcs 0 13>;
+ qcom,ipc-6 = <&apcs 0 19>;
+
+ apps_smsm: apps@0 {
+ reg = <0>;
+
+ #qcom,smem-state-cells = <1>;
+ };
+
+ hexagon_smsm: hexagon@1 {
+ reg = <1>;
+ interrupts = <0 26 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ wcnss_smsm: wcnss@6 {
+ reg = <6>;
+ interrupts = <0 144 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
};
#include "msm8916-pins.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 55ec3e8326b7..338f82a7fdc7 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -97,6 +97,92 @@
};
};
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cpu_alert1: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit1: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ cpu_alert2: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit2: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ cpu_alert3: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit3: trip1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
@@ -181,6 +267,12 @@
status = "disabled";
};
+ tsens0: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8996-tsens";
+ reg = <0x4a8000 0x2000>;
+ #thermal-sensor-cells = <1>;
+ };
+
blsp2_uart1: serial@75b0000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x75b0000 0x1000>;
@@ -339,7 +431,7 @@
<0x400a000 0x002100>;
reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
interrupt-names = "periph_irq";
- interrupts = <GIC_SPI 326 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
qcom,ee = <0>;
qcom,channel = <0>;
#address-cells = <2>;
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index 17139f7003a6..eb72830ec9eb 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -1,4 +1,4 @@
-dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-x.dtb
+dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-x.dtb r8a7795-h3ulcb.dtb
dtb-$(CONFIG_ARCH_R8A7796) += r8a7796-salvator-x.dtb
always := $(dtb-y)
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
new file mode 100644
index 000000000000..bcb11a868343
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
@@ -0,0 +1,328 @@
+/*
+ * Device Tree Source for the H3ULCB board
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r8a7795.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Renesas H3ULCB board based on r8a7795";
+ compatible = "renesas,h3ulcb", "renesas,r8a7795";
+
+ aliases {
+ serial0 = &scif2;
+ ethernet0 = &avb;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@48000000 {
+ device_type = "memory";
+ /* first 128MB is reserved for secure area. */
+ reg = <0x0 0x48000000 0x0 0x38000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led5 {
+ gpios = <&gpio6 12 GPIO_ACTIVE_HIGH>;
+ };
+ led6 {
+ gpios = <&gpio6 13 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ keyboard {
+ compatible = "gpio-keys";
+
+ key-1 {
+ linux,code = <KEY_1>;
+ label = "SW3";
+ wakeup-source;
+ debounce-interval = <20>;
+ gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ x12_clk: x12 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ };
+
+ vcc_sdhi0: regulator-vcc-sdhi0 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "SDHI0 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vccq_sdhi0: regulator-vccq-sdhi0 {
+ compatible = "regulator-gpio";
+
+ regulator-name = "SDHI0 VccQ";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
+ gpios-states = <1>;
+ states = <3300000 1
+ 1800000 0>;
+ };
+
+ audio_clkout: audio-clkout {
+ /*
+ * This is same as <&rcar_sound 0>
+ * but needed to avoid cs2000/rcar_sound probe dead-lock
+ */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <11289600>;
+ };
+
+ rsnd_ak4613: sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,format = "left_j";
+ simple-audio-card,bitclock-master = <&sndcpu>;
+ simple-audio-card,frame-master = <&sndcpu>;
+
+ sndcpu: simple-audio-card,cpu {
+ sound-dai = <&rcar_sound>;
+ };
+
+ sndcodec: simple-audio-card,codec {
+ sound-dai = <&ak4613>;
+ };
+ };
+};
+
+&extal_clk {
+ clock-frequency = <16666666>;
+};
+
+&extalr_clk {
+ clock-frequency = <32768>;
+};
+
+&pfc {
+ pinctrl-0 = <&scif_clk_pins>;
+ pinctrl-names = "default";
+
+ scif2_pins: scif2 {
+ groups = "scif2_data_a";
+ function = "scif2";
+ };
+
+ scif_clk_pins: scif_clk {
+ groups = "scif_clk_a";
+ function = "scif_clk";
+ };
+
+ i2c2_pins: i2c2 {
+ groups = "i2c2_a";
+ function = "i2c2";
+ };
+
+ avb_pins: avb {
+ groups = "avb_mdc";
+ function = "avb";
+ };
+
+ sdhi0_pins_3v3: sd0_3v3 {
+ groups = "sdhi0_data4", "sdhi0_ctrl";
+ function = "sdhi0";
+ power-source = <3300>;
+ };
+
+ sdhi0_pins_1v8: sd0_1v8 {
+ groups = "sdhi0_data4", "sdhi0_ctrl";
+ function = "sdhi0";
+ power-source = <1800>;
+ };
+
+ sound_pins: sound {
+ groups = "ssi01239_ctrl", "ssi0_data", "ssi1_data_a";
+ function = "ssi";
+ };
+
+ sound_clk_pins: sound-clk {
+ groups = "audio_clk_a_a", "audio_clk_b_a", "audio_clk_c_a",
+ "audio_clkout_a", "audio_clkout3_a";
+ function = "audio_clk";
+ };
+
+ usb1_pins: usb1 {
+ groups = "usb1";
+ function = "usb1";
+ };
+};
+
+&scif2 {
+ pinctrl-0 = <&scif2_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&scif_clk {
+ clock-frequency = <14745600>;
+ status = "okay";
+};
+
+&i2c2 {
+ pinctrl-0 = <&i2c2_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ clock-frequency = <100000>;
+
+ ak4613: codec@10 {
+ compatible = "asahi-kasei,ak4613";
+ #sound-dai-cells = <0>;
+ reg = <0x10>;
+ clocks = <&rcar_sound 3>;
+
+ asahi-kasei,in1-single-end;
+ asahi-kasei,in2-single-end;
+ asahi-kasei,out1-single-end;
+ asahi-kasei,out2-single-end;
+ asahi-kasei,out3-single-end;
+ asahi-kasei,out4-single-end;
+ asahi-kasei,out5-single-end;
+ asahi-kasei,out6-single-end;
+ };
+
+ cs2000: clk-multiplier@4f {
+ #clock-cells = <0>;
+ compatible = "cirrus,cs2000-cp";
+ reg = <0x4f>;
+ clocks = <&audio_clkout>, <&x12_clk>;
+ clock-names = "clk_in", "ref_clk";
+
+ assigned-clocks = <&cs2000>;
+ assigned-clock-rates = <24576000>; /* 1/1 divide */
+ };
+};
+
+&rcar_sound {
+ pinctrl-0 = <&sound_pins &sound_clk_pins>;
+ pinctrl-names = "default";
+
+ /* Single DAI */
+ #sound-dai-cells = <0>;
+
+ /* audio_clkout0/1/2/3 */
+ #clock-cells = <1>;
+ clock-frequency = <11289600>;
+
+ status = "okay";
+
+ /* update <audio_clk_b> to <cs2000> */
+ clocks = <&cpg CPG_MOD 1005>,
+ <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
+ <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
+ <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
+ <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
+ <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
+ <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
+ <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
+ <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
+ <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
+ <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
+ <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
+ <&audio_clk_a>, <&cs2000>,
+ <&audio_clk_c>,
+ <&cpg CPG_CORE R8A7795_CLK_S0D4>;
+
+ rcar_sound,dai {
+ dai0 {
+ playback = <&ssi0 &src0 &dvc0>;
+ capture = <&ssi1 &src1 &dvc1>;
+ };
+ };
+};
+
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_pins_3v3>;
+ pinctrl-1 = <&sdhi0_pins_1v8>;
+ pinctrl-names = "default", "state_uhs";
+
+ vmmc-supply = <&vcc_sdhi0>;
+ vqmmc-supply = <&vccq_sdhi0>;
+ cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ sd-uhs-sdr50;
+ status = "okay";
+};
+
+&ssi1 {
+ shared-pin;
+};
+
+&wdt0 {
+ timeout-sec = <60>;
+ status = "okay";
+};
+
+&audio_clk_a {
+ clock-frequency = <22579200>;
+};
+
+&avb {
+ pinctrl-0 = <&avb_pins>;
+ pinctrl-names = "default";
+ renesas,no-ether-link;
+ phy-handle = <&phy0>;
+ status = "okay";
+
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <900>;
+ rxdv-skew-ps = <0>;
+ rxd0-skew-ps = <0>;
+ rxd1-skew-ps = <0>;
+ rxd2-skew-ps = <0>;
+ rxd3-skew-ps = <0>;
+ txc-skew-ps = <900>;
+ txen-skew-ps = <0>;
+ txd0-skew-ps = <0>;
+ txd1-skew-ps = <0>;
+ txd2-skew-ps = <0>;
+ txd3-skew-ps = <0>;
+ reg = <0>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&usb2_phy1 {
+ pinctrl-0 = <&usb1_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
index 98f02631a0f0..b1eab6876f8c 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
@@ -110,6 +110,17 @@
1800000 0>;
};
+ vbus0_usb2: regulator-vbus0-usb2 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "USB20_VBUS0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ gpio = <&gpio6 16 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
audio_clkout: audio_clkout {
/*
* This is same as <&rcar_sound 0>
@@ -135,6 +146,52 @@
sound-dai = <&ak4613>;
};
};
+
+ vga-encoder {
+ compatible = "adi,adv7123";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7123_in: endpoint {
+ remote-endpoint = <&du_out_rgb>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ adv7123_out: endpoint {
+ remote-endpoint = <&vga_in>;
+ };
+ };
+ };
+ };
+
+ vga {
+ compatible = "vga-connector";
+
+ port {
+ vga_in: endpoint {
+ remote-endpoint = <&adv7123_out>;
+ };
+ };
+ };
+};
+
+&du {
+ pinctrl-0 = <&du_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ ports {
+ port@0 {
+ endpoint {
+ remote-endpoint = <&adv7123_in>;
+ };
+ };
+ };
};
&extal_clk {
@@ -172,6 +229,11 @@
function = "avb";
};
+ du_pins: du {
+ groups = "du_rgb888", "du_sync", "du_oddf", "du_clk_out_0";
+ function = "du";
+ };
+
sdhi0_pins: sd0 {
groups = "sdhi0_data4", "sdhi0_ctrl";
function = "sdhi0";
@@ -193,6 +255,11 @@
function = "audio_clk";
};
+ usb0_pins: usb0 {
+ groups = "usb0";
+ function = "usb0";
+ };
+
usb1_pins: usb1 {
groups = "usb1";
function = "usb1";
@@ -369,6 +436,14 @@
status = "okay";
};
+&usb2_phy0 {
+ pinctrl-0 = <&usb0_pins>;
+ pinctrl-names = "default";
+
+ vbus-supply = <&vbus0_usb2>;
+ status = "okay";
+};
+
&usb2_phy1 {
pinctrl-0 = <&usb1_pins>;
pinctrl-names = "default";
@@ -383,6 +458,10 @@
status = "okay";
};
+&ehci0 {
+ status = "okay";
+};
+
&ehci1 {
status = "okay";
};
@@ -391,6 +470,10 @@
status = "okay";
};
+&ohci0 {
+ status = "okay";
+};
+
&ohci1 {
status = "okay";
};
@@ -399,6 +482,10 @@
status = "okay";
};
+&hsusb {
+ status = "okay";
+};
+
&pcie_bus_clk {
clock-frequency = <100000000>;
status = "okay";
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index b902356873c2..8c15040f2540 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -328,7 +328,8 @@
};
audma0: dma-controller@ec700000 {
- compatible = "renesas,rcar-dmac";
+ compatible = "renesas,dmac-r8a7795",
+ "renesas,rcar-dmac";
reg = <0 0xec700000 0 0x10000>;
interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH
GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
@@ -360,7 +361,8 @@
};
audma1: dma-controller@ec720000 {
- compatible = "renesas,rcar-dmac";
+ compatible = "renesas,dmac-r8a7795",
+ "renesas,rcar-dmac";
reg = <0 0xec720000 0 0x10000>;
interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH
GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
@@ -1098,6 +1100,7 @@
reg = <0 0xee100000 0 0x2000>;
interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 314>;
+ max-frequency = <200000000>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -1107,6 +1110,7 @@
reg = <0 0xee120000 0 0x2000>;
interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 313>;
+ max-frequency = <200000000>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -1116,8 +1120,8 @@
reg = <0 0xee140000 0 0x2000>;
interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 312>;
+ max-frequency = <200000000>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
- cap-mmc-highspeed;
status = "disabled";
};
@@ -1126,8 +1130,8 @@
reg = <0 0xee160000 0 0x2000>;
interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 311>;
+ max-frequency = <200000000>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
- cap-mmc-highspeed;
status = "disabled";
};
@@ -1224,6 +1228,23 @@
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
status = "disabled";
};
+
+ hsusb: usb@e6590000 {
+ compatible = "renesas,usbhs-r8a7795",
+ "renesas,rcar-gen3-usbhs";
+ reg = <0 0xe6590000 0 0x100>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 704>;
+ dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
+ <&usb_dmac1 0>, <&usb_dmac1 1>;
+ dma-names = "ch0", "ch1", "ch2", "ch3";
+ renesas,buswait = <11>;
+ phys = <&usb2_phy0>;
+ phy-names = "usb";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
pciec0: pcie@fe000000 {
compatible = "renesas,pcie-r8a7795";
reg = <0 0xfe000000 0 0x80000>;
@@ -1273,5 +1294,252 @@
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
status = "disabled";
};
+
+ vspbc: vsp@fe920000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfe920000 0 0x8000>;
+ interrupts = <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 624>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+
+ renesas,fcp = <&fcpvb1>;
+ };
+
+ fcpvb1: fcp@fe92f000 {
+ compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+ reg = <0 0xfe92f000 0 0x200>;
+ clocks = <&cpg CPG_MOD 606>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+ };
+
+ fcpf0: fcp@fe950000 {
+ compatible = "renesas,r8a7795-fcpf", "renesas,fcpf";
+ reg = <0 0xfe950000 0 0x200>;
+ clocks = <&cpg CPG_MOD 615>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+ };
+
+ fcpf1: fcp@fe951000 {
+ compatible = "renesas,r8a7795-fcpf", "renesas,fcpf";
+ reg = <0 0xfe951000 0 0x200>;
+ clocks = <&cpg CPG_MOD 614>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+ };
+
+ fcpf2: fcp@fe952000 {
+ compatible = "renesas,r8a7795-fcpf", "renesas,fcpf";
+ reg = <0 0xfe952000 0 0x200>;
+ clocks = <&cpg CPG_MOD 613>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+ };
+
+ vspbd: vsp@fe960000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfe960000 0 0x8000>;
+ interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 626>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+
+ renesas,fcp = <&fcpvb0>;
+ };
+
+ fcpvb0: fcp@fe96f000 {
+ compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+ reg = <0 0xfe96f000 0 0x200>;
+ clocks = <&cpg CPG_MOD 607>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+ };
+
+ vspi0: vsp@fe9a0000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfe9a0000 0 0x8000>;
+ interrupts = <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 631>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+
+ renesas,fcp = <&fcpvi0>;
+ };
+
+ fcpvi0: fcp@fe9af000 {
+ compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+ reg = <0 0xfe9af000 0 0x200>;
+ clocks = <&cpg CPG_MOD 611>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+ };
+
+ vspi1: vsp@fe9b0000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfe9b0000 0 0x8000>;
+ interrupts = <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 630>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+
+ renesas,fcp = <&fcpvi1>;
+ };
+
+ fcpvi1: fcp@fe9bf000 {
+ compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+ reg = <0 0xfe9bf000 0 0x200>;
+ clocks = <&cpg CPG_MOD 610>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+ };
+
+ vspi2: vsp@fe9c0000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfe9c0000 0 0x8000>;
+ interrupts = <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 629>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+
+ renesas,fcp = <&fcpvi2>;
+ };
+
+ fcpvi2: fcp@fe9cf000 {
+ compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+ reg = <0 0xfe9cf000 0 0x200>;
+ clocks = <&cpg CPG_MOD 609>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+ };
+
+ vspd0: vsp@fea20000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfea20000 0 0x4000>;
+ interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 623>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+
+ renesas,fcp = <&fcpvd0>;
+ };
+
+ fcpvd0: fcp@fea27000 {
+ compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+ reg = <0 0xfea27000 0 0x200>;
+ clocks = <&cpg CPG_MOD 603>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ };
+
+ vspd1: vsp@fea28000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfea28000 0 0x4000>;
+ interrupts = <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 622>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+
+ renesas,fcp = <&fcpvd1>;
+ };
+
+ fcpvd1: fcp@fea2f000 {
+ compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+ reg = <0 0xfea2f000 0 0x200>;
+ clocks = <&cpg CPG_MOD 602>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ };
+
+ vspd2: vsp@fea30000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfea30000 0 0x4000>;
+ interrupts = <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 621>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+
+ renesas,fcp = <&fcpvd2>;
+ };
+
+ fcpvd2: fcp@fea37000 {
+ compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+ reg = <0 0xfea37000 0 0x200>;
+ clocks = <&cpg CPG_MOD 601>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ };
+
+ vspd3: vsp@fea38000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfea38000 0 0x4000>;
+ interrupts = <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 620>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+
+ renesas,fcp = <&fcpvd3>;
+ };
+
+ fcpvd3: fcp@fea3f000 {
+ compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+ reg = <0 0xfea3f000 0 0x200>;
+ clocks = <&cpg CPG_MOD 600>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ };
+
+ fdp1@fe940000 {
+ compatible = "renesas,fdp1";
+ reg = <0 0xfe940000 0 0x2400>;
+ interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 119>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+ renesas,fcp = <&fcpf0>;
+ };
+
+ fdp1@fe944000 {
+ compatible = "renesas,fdp1";
+ reg = <0 0xfe944000 0 0x2400>;
+ interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 118>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+ renesas,fcp = <&fcpf1>;
+ };
+
+ fdp1@fe948000 {
+ compatible = "renesas,fdp1";
+ reg = <0 0xfe948000 0 0x2400>;
+ interrupts = <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 117>;
+ power-domains = <&sysc R8A7795_PD_A3VP>;
+ renesas,fcp = <&fcpf2>;
+ };
+
+ du: display@feb00000 {
+ compatible = "renesas,du-r8a7795";
+ reg = <0 0xfeb00000 0 0x80000>,
+ <0 0xfeb90000 0 0x14>;
+ reg-names = "du", "lvds.0";
+ interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 724>,
+ <&cpg CPG_MOD 723>,
+ <&cpg CPG_MOD 722>,
+ <&cpg CPG_MOD 721>,
+ <&cpg CPG_MOD 727>;
+ clock-names = "du.0", "du.1", "du.2", "du.3", "lvds.0";
+ status = "disabled";
+
+ vsps = <&vspd0 &vspd1 &vspd2 &vspd3>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ du_out_rgb: endpoint {
+ };
+ };
+ port@1 {
+ reg = <1>;
+ du_out_hdmi0: endpoint {
+ };
+ };
+ port@2 {
+ reg = <2>;
+ du_out_hdmi1: endpoint {
+ };
+ };
+ port@3 {
+ reg = <3>;
+ du_out_lvds0: endpoint {
+ };
+ };
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
index e72be3856d79..13db7d61c26c 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
@@ -31,11 +31,27 @@
};
};
+&pfc {
+ pinctrl-0 = <&scif_clk_pins>;
+ pinctrl-names = "default";
+
+ scif2_pins: scif2 {
+ groups = "scif2_data_a";
+ function = "scif2";
+ };
+ scif_clk_pins: scif_clk {
+ groups = "scif_clk_a";
+ function = "scif_clk";
+ };
+};
+
&extal_clk {
clock-frequency = <16666666>;
};
&scif2 {
+ pinctrl-0 = <&scif2_pins>;
+ pinctrl-names = "default";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index 1edf82440d78..9217da983525 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -107,6 +107,123 @@
status = "disabled";
};
+ gpio0: gpio@e6050000 {
+ compatible = "renesas,gpio-r8a7796",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6050000 0 0x50>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 0 16>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 912>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ };
+
+ gpio1: gpio@e6051000 {
+ compatible = "renesas,gpio-r8a7796",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6051000 0 0x50>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 32 29>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 911>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ };
+
+ gpio2: gpio@e6052000 {
+ compatible = "renesas,gpio-r8a7796",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6052000 0 0x50>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 64 15>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 910>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ };
+
+ gpio3: gpio@e6053000 {
+ compatible = "renesas,gpio-r8a7796",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6053000 0 0x50>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 96 16>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 909>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ };
+
+ gpio4: gpio@e6054000 {
+ compatible = "renesas,gpio-r8a7796",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6054000 0 0x50>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 128 18>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 908>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ };
+
+ gpio5: gpio@e6055000 {
+ compatible = "renesas,gpio-r8a7796",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6055000 0 0x50>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 160 26>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 907>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ };
+
+ gpio6: gpio@e6055400 {
+ compatible = "renesas,gpio-r8a7796",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6055400 0 0x50>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 192 32>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 906>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ };
+
+ gpio7: gpio@e6055800 {
+ compatible = "renesas,gpio-r8a7796",
+ "renesas,gpio-rcar";
+ reg = <0 0xe6055800 0 0x50>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 224 4>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 905>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ };
+
+ pfc: pin-controller@e6060000 {
+ compatible = "renesas,pfc-r8a7796";
+ reg = <0 0xe6060000 0 0x50c>;
+ };
+
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a7796-cpg-mssr";
reg = <0 0xe6150000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 7037a161b6ef..87669f656454 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -1,5 +1,6 @@
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-evb-act8846.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-geekbox.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-orion-r68-meta.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-r88.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-evb.dtb
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
new file mode 100644
index 000000000000..5797933ef80e
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2016 Matthias Brugger <mbrugger@suse.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include <dt-bindings/input/input.h>
+#include "rk3368.dtsi"
+
+/ {
+ model = "Rockchip Orion R68";
+ compatible = "tronsmart,orion-r68-meta", "rockchip,rk3368";
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ pinctrl-0 = <&emmc_reset>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
+ };
+
+ ext_gmac: external-gmac-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ clock-output-names = "ext_gmac";
+ };
+
+ keys: gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwr_key>;
+
+ power {
+ wakeup-source;
+ gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+ label = "GPIO Power";
+ linux,code = <KEY_POWER>;
+ };
+ };
+
+ leds: gpio-leds {
+ compatible = "gpio-leds";
+
+ red {
+ gpios = <&gpio3 29 GPIO_ACTIVE_HIGH>;
+ label = "orion:red:led";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_ctl>;
+ default-state = "on";
+ };
+
+ blue {
+ gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+ label = "orion:blue:led";
+ pinctrl-names = "default";
+ pinctrl-0 = <&stby_pwren>;
+ default-state = "off";
+ };
+ };
+
+ vcc_18: vcc18-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_sys>;
+ };
+
+ /* supplies both host and otg */
+ vcc_host: vcc-host-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio0 4 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&host_vbus_drv>;
+ regulator-name = "vcc_host";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc_io: vcc-io-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_io";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc_lan: vcc-lan-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_lan";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_io>;
+ };
+
+ vcc_sd: vcc-sd-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sd";
+ gpio = <&gpio3 11 GPIO_ACTIVE_LOW>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_io>;
+ };
+
+ vcc_sys: vcc-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vccio_sd: vcc-io-sd-regulator {
+ compatible = "regulator-fixed";
+ regulator-name= "vccio_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_io>;
+ };
+
+ vccio_wl: vccio-wl-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vccio_wl";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_io>;
+ };
+
+ vdd_10: vdd-10-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_10";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_sys>;
+ };
+};
+
+&emmc {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ disable-wp;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&emmc_pwrseq>;
+ mmc-hs200-1_2v;
+ mmc-hs200-1_8v;
+ non-removable;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
+ status = "okay";
+};
+
+&gmac {
+ assigned-clocks = <&cru SCLK_MAC>;
+ assigned-clock-parents = <&ext_gmac>;
+ clock_in_out = "input";
+ phy-supply = <&vcc_lan>;
+ phy-mode = "rgmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ snps,reset-gpio = <&gpio3 12 0>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 1000000>;
+ tx_delay = <0x30>;
+ rx_delay = <0x10>;
+ status = "ok";
+};
+
+&i2c0 {
+ status = "okay";
+
+ vdd_cpu: syr827@40 {
+ compatible = "silergy,syr827";
+ reg = <0x40>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-enable-ramp-delay = <300>;
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <8000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_sys>;
+ };
+
+ hym8563: hym8563@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "xin32k";
+ /* rtc_int is not connected */
+ };
+};
+
+&pinctrl {
+ pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+ bias-disable;
+ drive-strength = <8>;
+ };
+
+ pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+ bias-pull-up;
+ drive-strength = <8>;
+ };
+
+ emmc {
+ emmc_bus8: emmc-bus8 {
+ rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 19 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 20 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 21 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 22 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 23 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 24 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 25 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ };
+
+ emmc-clk {
+ rockchip,pins = <2 4 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+ };
+
+ emmc-cmd {
+ rockchip,pins = <1 26 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ };
+
+ emmc_reset: emmc-reset {
+ rockchip,pins = <2 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ keys {
+ pwr_key: pwr-key {
+ rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ leds {
+ stby_pwren: stby-pwren {
+ rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ led_ctl: led-ctl {
+ rockchip,pins = <3 29 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc {
+ sdmmc_clk: sdmmc-clk {
+ rockchip,pins = <2 9 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ };
+
+ sdmmc_cmd: sdmmc-cmd {
+ rockchip,pins = <2 10 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+
+ sdmmc_cd: sdmmc-cd {
+ rockchip,pins = <2 11 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+
+ sdmmc_bus1: sdmmc-bus1 {
+ rockchip,pins = <2 5 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+
+ sdmmc_bus4: sdmmc-bus4 {
+ rockchip,pins = <2 5 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <2 6 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <2 7 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <2 8 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+ };
+
+ usb {
+ host_vbus_drv: host-vbus-drv {
+ rockchip,pins = <0 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&saradc {
+ vref-supply = <&vcc_18>;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ clock-frequency = <50000000>;
+ clock-freq-min-max = <400000 50000000>;
+ cap-sd-highspeed;
+ card-detect-delay = <200>;
+ keep-power-in-suspend;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
+ vmmc-supply = <&vcc_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_xfer>;
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_otg {
+ status = "okay";
+};
+
+&wdt {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
index 82a32e5591e2..eed1ef6669ff 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
@@ -248,7 +248,6 @@
&sdio0 {
assigned-clocks = <&cru SCLK_SDIO0>;
assigned-clock-parents = <&cru PLL_CPLL>;
- broken-cd;
bus-width = <4>;
cap-sd-highspeed;
cap-sdio-irq;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 4f44d1191bfd..0fcb2147c9f9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -45,6 +45,7 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,boot-mode.h>
#include <dt-bindings/thermal/thermal.h>
/ {
@@ -641,6 +642,15 @@
compatible = "rockchip,rk3368-pmu-io-voltage-domain";
status = "disabled";
};
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x200>;
+ mode-normal = <BOOT_NORMAL>;
+ mode-recovery = <BOOT_RECOVERY>;
+ mode-bootloader = <BOOT_FASTBOOT>;
+ mode-loader = <BOOT_BL_DOWNLOAD>;
+ };
};
cru: clock-controller@ff760000 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
index d33aa06d46f5..8e82497925fe 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
@@ -49,6 +49,13 @@
compatible = "rockchip,rk3399-evb", "rockchip,rk3399",
"google,rk3399evb-rev2";
+ clkin_gmac: external-gmac-clock {
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ clock-output-names = "clkin_gmac";
+ #clock-cells = <0>;
+ };
+
vdd_center: vdd-center {
compatible = "pwm-regulator";
pwms = <&pwm3 0 25000 0>;
@@ -69,18 +76,61 @@
regulator-max-microvolt = <3300000>;
};
+ vcc5v0_sys: vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc5v0_host: vcc5v0-host-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 25 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+ regulator-name = "vcc5v0_host";
+ vin-supply = <&vcc5v0_sys>;
+ };
+
vcc_phy: vcc-phy-regulator {
compatible = "regulator-fixed";
regulator-name = "vcc_phy";
regulator-always-on;
regulator-boot-on;
};
+
+ vcc_phy: vcc-phy-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_phy";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
};
&emmc_phy {
status = "okay";
};
+&gmac {
+ assigned-clocks = <&cru SCLK_RMII_SRC>;
+ assigned-clock-parents = <&clkin_gmac>;
+ clock_in_out = "input";
+ phy-supply = <&vcc_phy>;
+ phy-mode = "rgmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ snps,reset-gpio = <&gpio3 15 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 50000>;
+ tx_delay = <0x28>;
+ rx_delay = <0x11>;
+ status = "okay";
+};
+
&pwm0 {
status = "okay";
};
@@ -101,6 +151,36 @@
status = "okay";
};
+&pcie_phy {
+ status = "disabled";
+};
+
+&pcie0 {
+ ep-gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
+ num-lanes = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_clkreqn>;
+ status = "disabled";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
&uart2 {
status = "okay";
};
@@ -133,4 +213,11 @@
<1 18 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
+
+ usb2 {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins =
+ <4 25 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index a6dd623a8845..b65c193dc64e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -45,6 +45,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/power/rk3399-power.h>
#include <dt-bindings/thermal/thermal.h>
/ {
@@ -152,6 +153,16 @@
};
};
+ pmu_a53 {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW &ppi_cluster0>;
+ };
+
+ pmu_a72 {
+ compatible = "arm,cortex-a72-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW &ppi_cluster1>;
+ };
+
psci {
compatible = "arm,psci-1.0";
method = "smc";
@@ -159,10 +170,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW 0>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW 0>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW 0>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW 0>;
};
xin24m: xin24m {
@@ -181,8 +192,8 @@
dmac_bus: dma-controller@ff6d0000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x0 0xff6d0000 0x0 0x4000>;
- interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH 0>;
#dma-cells = <1>;
clocks = <&cru ACLK_DMAC0_PERILP>;
clock-names = "apb_pclk";
@@ -191,19 +202,39 @@
dmac_peri: dma-controller@ff6e0000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x0 0xff6e0000 0x0 0x4000>;
- interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH 0>;
#dma-cells = <1>;
clocks = <&cru ACLK_DMAC1_PERILP>;
clock-names = "apb_pclk";
};
};
+ gmac: ethernet@fe300000 {
+ compatible = "rockchip,rk3399-gmac";
+ reg = <0x0 0xfe300000 0x0 0x10000>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "macirq";
+ clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX>,
+ <&cru SCLK_MAC_TX>, <&cru SCLK_MACREF>,
+ <&cru SCLK_MACREF_OUT>, <&cru ACLK_GMAC>,
+ <&cru PCLK_GMAC>;
+ clock-names = "stmmaceth", "mac_clk_rx",
+ "mac_clk_tx", "clk_mac_ref",
+ "clk_mac_refout", "aclk_mac",
+ "pclk_mac";
+ power-domains = <&power RK3399_PD_GMAC>;
+ resets = <&cru SRST_A_GMAC>;
+ reset-names = "stmmaceth";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+ };
+
sdio0: dwmmc@fe310000 {
compatible = "rockchip,rk3399-dw-mshc",
"rockchip,rk3288-dw-mshc";
reg = <0x0 0xfe310000 0x0 0x4000>;
- interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH 0>;
clock-freq-min-max = <400000 150000000>;
clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
<&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
@@ -216,7 +247,7 @@
compatible = "rockchip,rk3399-dw-mshc",
"rockchip,rk3288-dw-mshc";
reg = <0x0 0xfe320000 0x0 0x4000>;
- interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH 0>;
clock-freq-min-max = <400000 150000000>;
clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
<&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
@@ -228,7 +259,7 @@
sdhci: sdhci@fe330000 {
compatible = "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1";
reg = <0x0 0xfe330000 0x0 0x10000>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH 0>;
arasan,soc-ctl-syscon = <&grf>;
assigned-clocks = <&cru SCLK_EMMC>;
assigned-clock-rates = <200000000>;
@@ -241,19 +272,60 @@
status = "disabled";
};
+ pcie0: pcie@f8000000 {
+ compatible = "rockchip,rk3399-pcie";
+ reg = <0x0 0xf8000000 0x0 0x2000000>,
+ <0x0 0xfd000000 0x0 0x1000000>;
+ reg-names = "axi-base", "apb-base";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ bus-range = <0x0 0x1>;
+ clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
+ <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
+ clock-names = "aclk", "aclk-perf",
+ "hclk", "pm";
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "sys", "legacy", "client";
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie0_intc 0>,
+ <0 0 0 2 &pcie0_intc 1>,
+ <0 0 0 3 &pcie0_intc 2>,
+ <0 0 0 4 &pcie0_intc 3>;
+ msi-map = <0x0 &its 0x0 0x1000>;
+ phys = <&pcie_phy>;
+ phy-names = "pcie-phy";
+ ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
+ 0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
+ resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+ status = "disabled";
+
+ pcie0_intc: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+
usb_host0_ehci: usb@fe380000 {
compatible = "generic-ehci";
reg = <0x0 0xfe380000 0x0 0x20000>;
- interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>;
clock-names = "hclk_host0", "hclk_host0_arb";
+ phys = <&u2phy0_host>;
+ phy-names = "usb";
status = "disabled";
};
usb_host0_ohci: usb@fe3a0000 {
compatible = "generic-ohci";
reg = <0x0 0xfe3a0000 0x0 0x20000>;
- interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>;
clock-names = "hclk_host0", "hclk_host0_arb";
status = "disabled";
@@ -262,16 +334,18 @@
usb_host1_ehci: usb@fe3c0000 {
compatible = "generic-ehci";
reg = <0x0 0xfe3c0000 0x0 0x20000>;
- interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>;
clock-names = "hclk_host1", "hclk_host1_arb";
+ phys = <&u2phy1_host>;
+ phy-names = "usb";
status = "disabled";
};
usb_host1_ohci: usb@fe3e0000 {
compatible = "generic-ohci";
reg = <0x0 0xfe3e0000 0x0 0x20000>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>;
clock-names = "hclk_host1", "hclk_host1_arb";
status = "disabled";
@@ -279,7 +353,7 @@
gic: interrupt-controller@fee00000 {
compatible = "arm,gic-v3";
- #interrupt-cells = <3>;
+ #interrupt-cells = <4>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -290,12 +364,34 @@
<0x0 0xfff00000 0 0x10000>, /* GICC */
<0x0 0xfff10000 0 0x10000>, /* GICH */
<0x0 0xfff20000 0 0x10000>; /* GICV */
- interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
its: interrupt-controller@fee20000 {
compatible = "arm,gic-v3-its";
msi-controller;
reg = <0x0 0xfee20000 0x0 0x20000>;
};
+
+ ppi-partitions {
+ ppi_cluster0: interrupt-partition-0 {
+ affinity = <&cpu_l0 &cpu_l1 &cpu_l2 &cpu_l3>;
+ };
+
+ ppi_cluster1: interrupt-partition-1 {
+ affinity = <&cpu_b0 &cpu_b1>;
+ };
+ };
+ };
+
+ saradc: saradc@ff100000 {
+ compatible = "rockchip,rk3399-saradc";
+ reg = <0x0 0xff100000 0x0 0x100>;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH 0>;
+ #io-channel-cells = <1>;
+ clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
+ clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_P_SARADC>;
+ reset-names = "saradc-apb";
+ status = "disabled";
};
i2c1: i2c@ff110000 {
@@ -305,7 +401,7 @@
assigned-clock-rates = <200000000>;
clocks = <&cru SCLK_I2C1>, <&cru PCLK_I2C1>;
clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c1_xfer>;
#address-cells = <1>;
@@ -320,7 +416,7 @@
assigned-clock-rates = <200000000>;
clocks = <&cru SCLK_I2C2>, <&cru PCLK_I2C2>;
clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c2_xfer>;
#address-cells = <1>;
@@ -335,7 +431,7 @@
assigned-clock-rates = <200000000>;
clocks = <&cru SCLK_I2C3>, <&cru PCLK_I2C3>;
clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c3_xfer>;
#address-cells = <1>;
@@ -350,7 +446,7 @@
assigned-clock-rates = <200000000>;
clocks = <&cru SCLK_I2C5>, <&cru PCLK_I2C5>;
clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c5_xfer>;
#address-cells = <1>;
@@ -365,7 +461,7 @@
assigned-clock-rates = <200000000>;
clocks = <&cru SCLK_I2C6>, <&cru PCLK_I2C6>;
clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c6_xfer>;
#address-cells = <1>;
@@ -380,7 +476,7 @@
assigned-clock-rates = <200000000>;
clocks = <&cru SCLK_I2C7>, <&cru PCLK_I2C7>;
clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c7_xfer>;
#address-cells = <1>;
@@ -393,7 +489,7 @@
reg = <0x0 0xff180000 0x0 0x100>;
clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
clock-names = "baudclk", "apb_pclk";
- interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH 0>;
reg-shift = <2>;
reg-io-width = <4>;
pinctrl-names = "default";
@@ -406,7 +502,7 @@
reg = <0x0 0xff190000 0x0 0x100>;
clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
clock-names = "baudclk", "apb_pclk";
- interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH 0>;
reg-shift = <2>;
reg-io-width = <4>;
pinctrl-names = "default";
@@ -419,7 +515,7 @@
reg = <0x0 0xff1a0000 0x0 0x100>;
clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
clock-names = "baudclk", "apb_pclk";
- interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH 0>;
reg-shift = <2>;
reg-io-width = <4>;
pinctrl-names = "default";
@@ -432,7 +528,7 @@
reg = <0x0 0xff1b0000 0x0 0x100>;
clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
clock-names = "baudclk", "apb_pclk";
- interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH 0>;
reg-shift = <2>;
reg-io-width = <4>;
pinctrl-names = "default";
@@ -445,7 +541,7 @@
reg = <0x0 0xff1c0000 0x0 0x1000>;
clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>;
clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&spi0_clk &spi0_tx &spi0_rx &spi0_cs0>;
#address-cells = <1>;
@@ -458,7 +554,7 @@
reg = <0x0 0xff1d0000 0x0 0x1000>;
clocks = <&cru SCLK_SPI1>, <&cru PCLK_SPI1>;
clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&spi1_clk &spi1_tx &spi1_rx &spi1_cs0>;
#address-cells = <1>;
@@ -471,7 +567,7 @@
reg = <0x0 0xff1e0000 0x0 0x1000>;
clocks = <&cru SCLK_SPI2>, <&cru PCLK_SPI2>;
clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&spi2_clk &spi2_tx &spi2_rx &spi2_cs0>;
#address-cells = <1>;
@@ -484,7 +580,7 @@
reg = <0x0 0xff1f0000 0x0 0x1000>;
clocks = <&cru SCLK_SPI4>, <&cru PCLK_SPI4>;
clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&spi4_clk &spi4_tx &spi4_rx &spi4_cs0>;
#address-cells = <1>;
@@ -497,7 +593,7 @@
reg = <0x0 0xff200000 0x0 0x1000>;
clocks = <&cru SCLK_SPI5>, <&cru PCLK_SPI5>;
clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&spi5_clk &spi5_tx &spi5_rx &spi5_cs0>;
#address-cells = <1>;
@@ -577,7 +673,7 @@
tsadc: tsadc@ff260000 {
compatible = "rockchip,rk3399-tsadc";
reg = <0x0 0xff260000 0x0 0x100>;
- interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH 0>;
assigned-clocks = <&cru SCLK_TSADC>;
assigned-clock-rates = <750000>;
clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
@@ -594,6 +690,203 @@
status = "disabled";
};
+ qos_gmac: qos@ffa5c000 {
+ compatible = "syscon";
+ reg = <0x0 0xffa5c000 0x0 0x20>;
+ };
+
+ qos_hdcp: qos@ffa90000 {
+ compatible = "syscon";
+ reg = <0x0 0xffa90000 0x0 0x20>;
+ };
+
+ qos_iep: qos@ffa98000 {
+ compatible = "syscon";
+ reg = <0x0 0xffa98000 0x0 0x20>;
+ };
+
+ qos_isp0_m0: qos@ffaa0000 {
+ compatible = "syscon";
+ reg = <0x0 0xffaa0000 0x0 0x20>;
+ };
+
+ qos_isp0_m1: qos@ffaa0080 {
+ compatible = "syscon";
+ reg = <0x0 0xffaa0080 0x0 0x20>;
+ };
+
+ qos_isp1_m0: qos@ffaa8000 {
+ compatible = "syscon";
+ reg = <0x0 0xffaa8000 0x0 0x20>;
+ };
+
+ qos_isp1_m1: qos@ffaa8080 {
+ compatible = "syscon";
+ reg = <0x0 0xffaa8080 0x0 0x20>;
+ };
+
+ qos_rga_r: qos@ffab0000 {
+ compatible = "syscon";
+ reg = <0x0 0xffab0000 0x0 0x20>;
+ };
+
+ qos_rga_w: qos@ffab0080 {
+ compatible = "syscon";
+ reg = <0x0 0xffab0080 0x0 0x20>;
+ };
+
+ qos_video_m0: qos@ffab8000 {
+ compatible = "syscon";
+ reg = <0x0 0xffab8000 0x0 0x20>;
+ };
+
+ qos_video_m1_r: qos@ffac0000 {
+ compatible = "syscon";
+ reg = <0x0 0xffac0000 0x0 0x20>;
+ };
+
+ qos_video_m1_w: qos@ffac0080 {
+ compatible = "syscon";
+ reg = <0x0 0xffac0080 0x0 0x20>;
+ };
+
+ qos_vop_big_r: qos@ffac8000 {
+ compatible = "syscon";
+ reg = <0x0 0xffac8000 0x0 0x20>;
+ };
+
+ qos_vop_big_w: qos@ffac8080 {
+ compatible = "syscon";
+ reg = <0x0 0xffac8080 0x0 0x20>;
+ };
+
+ qos_vop_little: qos@ffad0000 {
+ compatible = "syscon";
+ reg = <0x0 0xffad0000 0x0 0x20>;
+ };
+
+ qos_gpu: qos@ffae0000 {
+ compatible = "syscon";
+ reg = <0x0 0xffae0000 0x0 0x20>;
+ };
+
+ pmu: power-management@ff310000 {
+ compatible = "rockchip,rk3399-pmu", "syscon", "simple-mfd";
+ reg = <0x0 0xff310000 0x0 0x1000>;
+
+ /*
+ * Note: RK3399 supports 6 voltage domains including VD_CORE_L,
+ * VD_CORE_B, VD_CENTER, VD_GPU, VD_LOGIC and VD_PMU.
+ * Some of the power domains are grouped together for every
+ * voltage domain.
+ * The detail contents as below.
+ */
+ power: power-controller {
+ compatible = "rockchip,rk3399-power-controller";
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* These power domains are grouped by VD_CENTER */
+ pd_iep@RK3399_PD_IEP {
+ reg = <RK3399_PD_IEP>;
+ clocks = <&cru ACLK_IEP>,
+ <&cru HCLK_IEP>;
+ pm_qos = <&qos_iep>;
+ };
+ pd_rga@RK3399_PD_RGA {
+ reg = <RK3399_PD_RGA>;
+ clocks = <&cru ACLK_RGA>,
+ <&cru HCLK_RGA>;
+ pm_qos = <&qos_rga_r>,
+ <&qos_rga_w>;
+ };
+ pd_vcodec@RK3399_PD_VCODEC {
+ reg = <RK3399_PD_VCODEC>;
+ clocks = <&cru ACLK_VCODEC>,
+ <&cru HCLK_VCODEC>;
+ pm_qos = <&qos_video_m0>;
+ };
+ pd_vdu@RK3399_PD_VDU {
+ reg = <RK3399_PD_VDU>;
+ clocks = <&cru ACLK_VDU>,
+ <&cru HCLK_VDU>;
+ pm_qos = <&qos_video_m1_r>,
+ <&qos_video_m1_w>;
+ };
+
+ /* These power domains are grouped by VD_GPU */
+ pd_gpu@RK3399_PD_GPU {
+ reg = <RK3399_PD_GPU>;
+ clocks = <&cru ACLK_GPU>;
+ pm_qos = <&qos_gpu>;
+ };
+
+ /* These power domains are grouped by VD_LOGIC */
+ pd_gmac@RK3399_PD_GMAC {
+ reg = <RK3399_PD_GMAC>;
+ clocks = <&cru ACLK_GMAC>;
+ pm_qos = <&qos_gmac>;
+ };
+ pd_vio@RK3399_PD_VIO {
+ reg = <RK3399_PD_VIO>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pd_hdcp@RK3399_PD_HDCP {
+ reg = <RK3399_PD_HDCP>;
+ clocks = <&cru ACLK_HDCP>,
+ <&cru HCLK_HDCP>,
+ <&cru PCLK_HDCP>;
+ pm_qos = <&qos_hdcp>;
+ };
+ pd_isp0@RK3399_PD_ISP0 {
+ reg = <RK3399_PD_ISP0>;
+ clocks = <&cru ACLK_ISP0>,
+ <&cru HCLK_ISP0>;
+ pm_qos = <&qos_isp0_m0>,
+ <&qos_isp0_m1>;
+ };
+ pd_isp1@RK3399_PD_ISP1 {
+ reg = <RK3399_PD_ISP1>;
+ clocks = <&cru ACLK_ISP1>,
+ <&cru HCLK_ISP1>;
+ pm_qos = <&qos_isp1_m0>,
+ <&qos_isp1_m1>;
+ };
+ pd_tcpc0@RK3399_PD_TCPC0 {
+ reg = <RK3399_PD_TCPD0>;
+ clocks = <&cru SCLK_UPHY0_TCPDCORE>,
+ <&cru SCLK_UPHY0_TCPDPHY_REF>;
+ };
+ pd_tcpc1@RK3399_PD_TCPC1 {
+ reg = <RK3399_PD_TCPD1>;
+ clocks = <&cru SCLK_UPHY1_TCPDCORE>,
+ <&cru SCLK_UPHY1_TCPDPHY_REF>;
+ };
+ pd_vo@RK3399_PD_VO {
+ reg = <RK3399_PD_VO>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pd_vopb@RK3399_PD_VOPB {
+ reg = <RK3399_PD_VOPB>;
+ clocks = <&cru ACLK_VOP0>,
+ <&cru HCLK_VOP0>;
+ pm_qos = <&qos_vop_big_r>,
+ <&qos_vop_big_w>;
+ };
+ pd_vopl@RK3399_PD_VOPL {
+ reg = <RK3399_PD_VOPL>;
+ clocks = <&cru ACLK_VOP1>,
+ <&cru HCLK_VOP1>;
+ pm_qos = <&qos_vop_little>;
+ };
+ };
+ };
+ };
+ };
+
pmugrf: syscon@ff320000 {
compatible = "rockchip,rk3399-pmugrf", "syscon", "simple-mfd";
reg = <0x0 0xff320000 0x0 0x1000>;
@@ -611,7 +904,7 @@
reg = <0x0 0xff350000 0x0 0x1000>;
clocks = <&pmucru SCLK_SPI3_PMU>, <&pmucru PCLK_SPI3_PMU>;
clock-names = "spiclk", "apb_pclk";
- interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&spi3_clk &spi3_tx &spi3_rx &spi3_cs0>;
#address-cells = <1>;
@@ -624,7 +917,7 @@
reg = <0x0 0xff370000 0x0 0x100>;
clocks = <&pmucru SCLK_UART4_PMU>, <&pmucru PCLK_UART4_PMU>;
clock-names = "baudclk", "apb_pclk";
- interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH 0>;
reg-shift = <2>;
reg-io-width = <4>;
pinctrl-names = "default";
@@ -639,7 +932,7 @@
assigned-clock-rates = <200000000>;
clocks = <&pmucru SCLK_I2C0_PMU>, <&pmucru PCLK_I2C0_PMU>;
clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c0_xfer>;
#address-cells = <1>;
@@ -654,7 +947,7 @@
assigned-clock-rates = <200000000>;
clocks = <&pmucru SCLK_I2C4_PMU>, <&pmucru PCLK_I2C4_PMU>;
clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c4_xfer>;
#address-cells = <1>;
@@ -669,7 +962,7 @@
assigned-clock-rates = <200000000>;
clocks = <&pmucru SCLK_I2C8_PMU>, <&pmucru PCLK_I2C8_PMU>;
clock-names = "i2c", "pclk";
- interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
pinctrl-names = "default";
pinctrl-0 = <&i2c8_xfer>;
#address-cells = <1>;
@@ -721,6 +1014,35 @@
status = "disabled";
};
+ efuse0: efuse@ff690000 {
+ compatible = "rockchip,rk3399-efuse";
+ reg = <0x0 0xff690000 0x0 0x80>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cru PCLK_EFUSE1024NS>;
+ clock-names = "pclk_efuse";
+
+ /* Data cells */
+ cpub_leakage: cpu-leakage@17 {
+ reg = <0x17 0x1>;
+ };
+ gpu_leakage: gpu-leakage@18 {
+ reg = <0x18 0x1>;
+ };
+ center_leakage: center-leakage@19 {
+ reg = <0x19 0x1>;
+ };
+ cpul_leakage: cpu-leakage@1a {
+ reg = <0x1a 0x1>;
+ };
+ logic_leakage: logic-leakage@1b {
+ reg = <0x1b 0x1>;
+ };
+ wafer_info: wafer-info@1c {
+ reg = <0x1c 0x1>;
+ };
+ };
+
pmucru: pmu-clock-controller@ff750000 {
compatible = "rockchip,rk3399-pmucru";
reg = <0x0 0xff750000 0x0 0x1000>;
@@ -741,7 +1063,7 @@
<&cru ACLK_PERIHP>, <&cru HCLK_PERIHP>,
<&cru PCLK_PERIHP>,
<&cru ACLK_PERILP0>, <&cru HCLK_PERILP0>,
- <&cru PCLK_PERILP0>,
+ <&cru PCLK_PERILP0>, <&cru ACLK_CCI>,
<&cru HCLK_PERILP1>, <&cru PCLK_PERILP1>;
assigned-clock-rates =
<594000000>, <800000000>,
@@ -749,7 +1071,7 @@
<150000000>, <75000000>,
<37500000>,
<100000000>, <100000000>,
- <50000000>,
+ <50000000>, <600000000>,
<100000000>, <50000000>;
};
@@ -764,6 +1086,40 @@
status = "disabled";
};
+ u2phy0: usb2-phy@e450 {
+ compatible = "rockchip,rk3399-usb2phy";
+ reg = <0xe450 0x10>;
+ clocks = <&cru SCLK_USB2PHY0_REF>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ clock-output-names = "clk_usbphy0_480m";
+ status = "disabled";
+
+ u2phy0_host: host-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "linestate";
+ status = "disabled";
+ };
+ };
+
+ u2phy1: usb2-phy@e460 {
+ compatible = "rockchip,rk3399-usb2phy";
+ reg = <0xe460 0x10>;
+ clocks = <&cru SCLK_USB2PHY1_REF>;
+ clock-names = "phyclk";
+ #clock-cells = <0>;
+ clock-output-names = "clk_usbphy1_480m";
+ status = "disabled";
+
+ u2phy1_host: host-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "linestate";
+ status = "disabled";
+ };
+ };
+
emmc_phy: phy@f780 {
compatible = "rockchip,rk3399-emmc-phy";
reg = <0xf780 0x24>;
@@ -772,19 +1128,85 @@
#phy-cells = <0>;
status = "disabled";
};
+
+ pcie_phy: pcie-phy {
+ compatible = "rockchip,rk3399-pcie-phy";
+ clocks = <&cru SCLK_PCIEPHY_REF>;
+ clock-names = "refclk";
+ #phy-cells = <0>;
+ resets = <&cru SRST_PCIEPHY>;
+ reset-names = "phy";
+ status = "disabled";
+ };
};
- watchdog@ff840000 {
+ tcphy0: phy@ff7c0000 {
+ compatible = "rockchip,rk3399-typec-phy";
+ reg = <0x0 0xff7c0000 0x0 0x40000>;
+ clocks = <&cru SCLK_UPHY0_TCPDCORE>,
+ <&cru SCLK_UPHY0_TCPDPHY_REF>;
+ clock-names = "tcpdcore", "tcpdphy-ref";
+ assigned-clocks = <&cru SCLK_UPHY0_TCPDCORE>;
+ assigned-clock-rates = <50000000>;
+ resets = <&cru SRST_UPHY0>,
+ <&cru SRST_UPHY0_PIPE_L00>,
+ <&cru SRST_P_UPHY0_TCPHY>;
+ reset-names = "uphy", "uphy-pipe", "uphy-tcphy";
+ rockchip,grf = <&grf>;
+ rockchip,typec-conn-dir = <0xe580 0 16>;
+ rockchip,usb3tousb2-en = <0xe580 3 19>;
+ rockchip,external-psm = <0xe588 14 30>;
+ rockchip,pipe-status = <0xe5c0 0 0>;
+ status = "disabled";
+
+ tcphy0_dp: dp-port {
+ #phy-cells = <0>;
+ };
+
+ tcphy0_usb3: usb3-port {
+ #phy-cells = <0>;
+ };
+ };
+
+ tcphy1: phy@ff800000 {
+ compatible = "rockchip,rk3399-typec-phy";
+ reg = <0x0 0xff800000 0x0 0x40000>;
+ clocks = <&cru SCLK_UPHY1_TCPDCORE>,
+ <&cru SCLK_UPHY1_TCPDPHY_REF>;
+ clock-names = "tcpdcore", "tcpdphy-ref";
+ assigned-clocks = <&cru SCLK_UPHY1_TCPDCORE>;
+ assigned-clock-rates = <50000000>;
+ resets = <&cru SRST_UPHY1>,
+ <&cru SRST_UPHY1_PIPE_L00>,
+ <&cru SRST_P_UPHY1_TCPHY>;
+ reset-names = "uphy", "uphy-pipe", "uphy-tcphy";
+ rockchip,grf = <&grf>;
+ rockchip,typec-conn-dir = <0xe58c 0 16>;
+ rockchip,usb3tousb2-en = <0xe58c 3 19>;
+ rockchip,external-psm = <0xe594 14 30>;
+ rockchip,pipe-status = <0xe5c0 16 16>;
+ status = "disabled";
+
+ tcphy1_dp: dp-port {
+ #phy-cells = <0>;
+ };
+
+ tcphy1_usb3: usb3-port {
+ #phy-cells = <0>;
+ };
+ };
+
+ watchdog@ff848000 {
compatible = "snps,dw-wdt";
- reg = <0x0 0xff840000 0x0 0x100>;
+ reg = <0x0 0xff848000 0x0 0x100>;
clocks = <&cru PCLK_WDT>;
- interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH 0>;
};
rktimer: rktimer@ff850000 {
compatible = "rockchip,rk3399-timer";
reg = <0x0 0xff850000 0x0 0x1000>;
- interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>;
clock-names = "pclk", "timer";
};
@@ -792,7 +1214,7 @@
spdif: spdif@ff870000 {
compatible = "rockchip,rk3399-spdif";
reg = <0x0 0xff870000 0x0 0x1000>;
- interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH 0>;
dmas = <&dmac_bus 7>;
dma-names = "tx";
clock-names = "mclk", "hclk";
@@ -806,7 +1228,7 @@
compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
reg = <0x0 0xff880000 0x0 0x1000>;
rockchip,grf = <&grf>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH 0>;
dmas = <&dmac_bus 0>, <&dmac_bus 1>;
dma-names = "tx", "rx";
clock-names = "i2s_clk", "i2s_hclk";
@@ -819,7 +1241,7 @@
i2s1: i2s@ff890000 {
compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
reg = <0x0 0xff890000 0x0 0x1000>;
- interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH 0>;
dmas = <&dmac_bus 2>, <&dmac_bus 3>;
dma-names = "tx", "rx";
clock-names = "i2s_clk", "i2s_hclk";
@@ -832,7 +1254,7 @@
i2s2: i2s@ff8a0000 {
compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
reg = <0x0 0xff8a0000 0x0 0x1000>;
- interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH 0>;
dmas = <&dmac_bus 4>, <&dmac_bus 5>;
dma-names = "tx", "rx";
clock-names = "i2s_clk", "i2s_hclk";
@@ -852,7 +1274,7 @@
compatible = "rockchip,gpio-bank";
reg = <0x0 0xff720000 0x0 0x100>;
clocks = <&pmucru PCLK_GPIO0_PMU>;
- interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH 0>;
gpio-controller;
#gpio-cells = <0x2>;
@@ -865,7 +1287,7 @@
compatible = "rockchip,gpio-bank";
reg = <0x0 0xff730000 0x0 0x100>;
clocks = <&pmucru PCLK_GPIO1_PMU>;
- interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH 0>;
gpio-controller;
#gpio-cells = <0x2>;
@@ -878,7 +1300,7 @@
compatible = "rockchip,gpio-bank";
reg = <0x0 0xff780000 0x0 0x100>;
clocks = <&cru PCLK_GPIO2>;
- interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH 0>;
gpio-controller;
#gpio-cells = <0x2>;
@@ -891,7 +1313,7 @@
compatible = "rockchip,gpio-bank";
reg = <0x0 0xff788000 0x0 0x100>;
clocks = <&cru PCLK_GPIO3>;
- interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
gpio-controller;
#gpio-cells = <0x2>;
@@ -904,7 +1326,7 @@
compatible = "rockchip,gpio-bank";
reg = <0x0 0xff790000 0x0 0x100>;
clocks = <&cru PCLK_GPIO4>;
- interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH 0>;
gpio-controller;
#gpio-cells = <0x2>;
@@ -955,6 +1377,72 @@
drive-strength = <13>;
};
+ clock {
+ clk_32k: clk-32k {
+ rockchip,pins = <0 0 RK_FUNC_2 &pcfg_pull_none>;
+ };
+ };
+
+ gmac {
+ rgmii_pins: rgmii-pins {
+ rockchip,pins =
+ /* mac_txclk */
+ <3 17 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ /* mac_rxclk */
+ <3 14 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_mdio */
+ <3 13 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_txen */
+ <3 12 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ /* mac_clk */
+ <3 11 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_rxdv */
+ <3 9 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_mdc */
+ <3 8 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_rxd1 */
+ <3 7 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_rxd0 */
+ <3 6 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_txd1 */
+ <3 5 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ /* mac_txd0 */
+ <3 4 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ /* mac_rxd3 */
+ <3 3 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_rxd2 */
+ <3 2 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_txd3 */
+ <3 1 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ /* mac_txd2 */
+ <3 0 RK_FUNC_1 &pcfg_pull_none_13ma>;
+ };
+
+ rmii_pins: rmii-pins {
+ rockchip,pins =
+ /* mac_mdio */
+ <3 13 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_txen */
+ <3 12 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ /* mac_clk */
+ <3 11 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_rxer */
+ <3 10 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_rxdv */
+ <3 9 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_mdc */
+ <3 8 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_rxd1 */
+ <3 7 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_rxd0 */
+ <3 6 RK_FUNC_1 &pcfg_pull_none>,
+ /* mac_txd1 */
+ <3 5 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ /* mac_txd0 */
+ <3 4 RK_FUNC_1 &pcfg_pull_none_13ma>;
+ };
+ };
+
i2c0 {
i2c0_xfer: i2c0-xfer {
rockchip,pins =
@@ -1326,5 +1814,18 @@
<1 14 RK_FUNC_1 &pcfg_pull_none>;
};
};
+
+ pcie {
+ pcie_clkreqn: pci-clkreqn {
+ rockchip,pins =
+ <2 26 RK_FUNC_2 &pcfg_pull_none>;
+ };
+
+ pcie_clkreqnb: pci-clkreqnb {
+ rockchip,pins =
+ <4 24 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
};
};
diff --git a/arch/arm64/boot/dts/socionext/Makefile b/arch/arm64/boot/dts/socionext/Makefile
index 299b67ec4d44..5538598b302c 100644
--- a/arch/arm64/boot/dts/socionext/Makefile
+++ b/arch/arm64/boot/dts/socionext/Makefile
@@ -1,4 +1,6 @@
-dtb-$(CONFIG_ARCH_UNIPHIER) += uniphier-ph1-ld20-ref.dtb
+dtb-$(CONFIG_ARCH_UNIPHIER) += \
+ uniphier-ld11-ref.dtb \
+ uniphier-ld20-ref.dtb
always := $(dtb-y)
clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
new file mode 100644
index 000000000000..7168cf818ad8
--- /dev/null
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
@@ -0,0 +1,100 @@
+/*
+ * Device Tree Source for UniPhier LD11 Reference Board
+ *
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+/include/ "uniphier-ld11.dtsi"
+/include/ "uniphier-ref-daughter.dtsi"
+/include/ "uniphier-support-card.dtsi"
+
+/ {
+ model = "UniPhier LD11 Reference Board";
+ compatible = "socionext,uniphier-ld11-ref", "socionext,uniphier-ld11";
+
+ memory {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x40000000>;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ };
+};
+
+&ethsc {
+ interrupts = <0 48 4>;
+};
+
+&serial0 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
+
+&usb1 {
+ status = "okay";
+};
+
+&usb2 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
new file mode 100644
index 000000000000..3eb4c42ce7b9
--- /dev/null
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
@@ -0,0 +1,338 @@
+/*
+ * Device Tree Source for UniPhier LD11 SoC
+ *
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/memreserve/ 0x80000000 0x00000008; /* cpu-release-addr */
+
+/ {
+ compatible = "socionext,uniphier-ld11";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&gic>;
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+ core1 {
+ cpu = <&cpu1>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0 0x000>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x80000000>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0 0x001>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x80000000>;
+ };
+ };
+
+ clocks {
+ refclk: ref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 13 4>,
+ <1 14 4>,
+ <1 11 4>,
+ <1 10 4>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+
+ serial0: serial@54006800 {
+ compatible = "socionext,uniphier-uart";
+ status = "disabled";
+ reg = <0x54006800 0x40>;
+ interrupts = <0 33 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0>;
+ clocks = <&peri_clk 0>;
+ };
+
+ serial1: serial@54006900 {
+ compatible = "socionext,uniphier-uart";
+ status = "disabled";
+ reg = <0x54006900 0x40>;
+ interrupts = <0 35 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ clocks = <&peri_clk 1>;
+ };
+
+ serial2: serial@54006a00 {
+ compatible = "socionext,uniphier-uart";
+ status = "disabled";
+ reg = <0x54006a00 0x40>;
+ interrupts = <0 37 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ clocks = <&peri_clk 2>;
+ };
+
+ serial3: serial@54006b00 {
+ compatible = "socionext,uniphier-uart";
+ status = "disabled";
+ reg = <0x54006b00 0x40>;
+ interrupts = <0 177 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ clocks = <&peri_clk 3>;
+ };
+
+ i2c0: i2c@58780000 {
+ compatible = "socionext,uniphier-fi2c";
+ status = "disabled";
+ reg = <0x58780000 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0 41 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c0>;
+ clocks = <&peri_clk 4>;
+ clock-frequency = <100000>;
+ };
+
+ i2c1: i2c@58781000 {
+ compatible = "socionext,uniphier-fi2c";
+ status = "disabled";
+ reg = <0x58781000 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0 42 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ clocks = <&peri_clk 5>;
+ clock-frequency = <100000>;
+ };
+
+ i2c2: i2c@58782000 {
+ compatible = "socionext,uniphier-fi2c";
+ reg = <0x58782000 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0 43 4>;
+ clocks = <&peri_clk 6>;
+ clock-frequency = <400000>;
+ };
+
+ i2c3: i2c@58783000 {
+ compatible = "socionext,uniphier-fi2c";
+ status = "disabled";
+ reg = <0x58783000 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0 44 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ clocks = <&peri_clk 7>;
+ clock-frequency = <100000>;
+ };
+
+ i2c4: i2c@58784000 {
+ compatible = "socionext,uniphier-fi2c";
+ status = "disabled";
+ reg = <0x58784000 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0 45 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ clocks = <&peri_clk 8>;
+ clock-frequency = <100000>;
+ };
+
+ i2c5: i2c@58785000 {
+ compatible = "socionext,uniphier-fi2c";
+ reg = <0x58785000 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0 25 4>;
+ clocks = <&peri_clk 9>;
+ clock-frequency = <400000>;
+ };
+
+ system_bus: system-bus@58c00000 {
+ compatible = "socionext,uniphier-system-bus";
+ status = "disabled";
+ reg = <0x58c00000 0x400>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_system_bus>;
+ };
+
+ smpctrl@59800000 {
+ compatible = "socionext,uniphier-smpctrl";
+ reg = <0x59801000 0x400>;
+ };
+
+ perictrl@59820000 {
+ compatible = "socionext,uniphier-perictrl",
+ "simple-mfd", "syscon";
+ reg = <0x59820000 0x200>;
+
+ peri_clk: clock {
+ compatible = "socionext,uniphier-ld11-peri-clock";
+ #clock-cells = <1>;
+ };
+
+ peri_rst: reset {
+ compatible = "socionext,uniphier-ld11-peri-reset";
+ #reset-cells = <1>;
+ };
+ };
+
+ usb0: usb@5a800100 {
+ compatible = "socionext,uniphier-ehci", "generic-ehci";
+ status = "disabled";
+ reg = <0x5a800100 0x100>;
+ interrupts = <0 243 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0>;
+ clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+ resets = <&mio_rst 7>, <&mio_rst 8>, <&mio_rst 12>, <&sys_rst 8>;
+ };
+
+ usb1: usb@5a810100 {
+ compatible = "socionext,uniphier-ehci", "generic-ehci";
+ status = "disabled";
+ reg = <0x5a810100 0x100>;
+ interrupts = <0 244 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1>;
+ clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+ resets = <&mio_rst 7>, <&mio_rst 9>, <&mio_rst 13>, <&sys_rst 8>;
+ };
+
+ usb2: usb@5a820100 {
+ compatible = "socionext,uniphier-ehci", "generic-ehci";
+ status = "disabled";
+ reg = <0x5a820100 0x100>;
+ interrupts = <0 245 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb2>;
+ clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+ resets = <&mio_rst 7>, <&mio_rst 10>, <&mio_rst 14>, <&sys_rst 8>;
+ };
+
+ mioctrl@5b3e0000 {
+ compatible = "socionext,uniphier-mioctrl",
+ "simple-mfd", "syscon";
+ reg = <0x5b3e0000 0x800>;
+
+ mio_clk: clock {
+ compatible = "socionext,uniphier-ld11-mio-clock";
+ #clock-cells = <1>;
+ };
+
+ mio_rst: reset {
+ compatible = "socionext,uniphier-ld11-mio-reset";
+ #reset-cells = <1>;
+ resets = <&sys_rst 7>;
+ };
+ };
+
+ soc-glue@5f800000 {
+ compatible = "socionext,uniphier-soc-glue",
+ "simple-mfd", "syscon";
+ reg = <0x5f800000 0x2000>;
+
+ pinctrl: pinctrl {
+ compatible = "socionext,uniphier-ld11-pinctrl";
+ };
+ };
+
+ gic: interrupt-controller@5fe00000 {
+ compatible = "arm,gic-v3";
+ reg = <0x5fe00000 0x10000>, /* GICD */
+ <0x5fe40000 0x80000>; /* GICR */
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupts = <1 9 4>;
+ };
+
+ sysctrl@61840000 {
+ compatible = "socionext,uniphier-ld11-sysctrl",
+ "simple-mfd", "syscon";
+ reg = <0x61840000 0x4000>;
+
+ sys_clk: clock {
+ compatible = "socionext,uniphier-ld11-clock";
+ #clock-cells = <1>;
+ };
+
+ sys_rst: reset {
+ compatible = "socionext,uniphier-ld11-reset";
+ #reset-cells = <1>;
+ };
+ };
+ };
+};
+
+/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
index 2adad8c8cd27..609162a1a322 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-LD20 Reference Board
+ * Device Tree Source for UniPhier LD20 Reference Board
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -43,13 +44,13 @@
*/
/dts-v1/;
-/include/ "uniphier-ph1-ld20.dtsi"
+/include/ "uniphier-ld20.dtsi"
/include/ "uniphier-ref-daughter.dtsi"
/include/ "uniphier-support-card.dtsi"
/ {
- model = "UniPhier PH1-LD20 Reference Board";
- compatible = "socionext,ph1-ld20-ref", "socionext,ph1-ld20";
+ model = "UniPhier LD20 Reference Board";
+ compatible = "socionext,uniphier-ld20-ref", "socionext,uniphier-ld20";
memory {
device_type = "memory";
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index d73bdc8c9115..08fd7cf7769c 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -1,7 +1,8 @@
/*
- * Device Tree Source for UniPhier PH1-LD20 SoC
+ * Device Tree Source for UniPhier LD20 SoC
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -45,7 +46,7 @@
/memreserve/ 0x80000000 0x00000008; /* cpu-release-addr */
/ {
- compatible = "socionext,ph1-ld20";
+ compatible = "socionext,uniphier-ld20";
#address-cells = <2>;
#size-cells = <2>;
interrupt-parent = <&gic>;
@@ -113,18 +114,6 @@
#clock-cells = <0>;
clock-frequency = <25000000>;
};
-
- uart_clk: uart_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <58820000>;
- };
-
- i2c_clk: i2c_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <50000000>;
- };
};
timer {
@@ -148,7 +137,7 @@
interrupts = <0 33 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart0>;
- clocks = <&uart_clk>;
+ clocks = <&peri_clk 0>;
};
serial1: serial@54006900 {
@@ -158,7 +147,7 @@
interrupts = <0 35 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- clocks = <&uart_clk>;
+ clocks = <&peri_clk 1>;
};
serial2: serial@54006a00 {
@@ -168,7 +157,7 @@
interrupts = <0 37 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
- clocks = <&uart_clk>;
+ clocks = <&peri_clk 2>;
};
serial3: serial@54006b00 {
@@ -178,7 +167,7 @@
interrupts = <0 177 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- clocks = <&uart_clk>;
+ clocks = <&peri_clk 3>;
};
i2c0: i2c@58780000 {
@@ -190,7 +179,7 @@
interrupts = <0 41 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 4>;
clock-frequency = <100000>;
};
@@ -203,7 +192,7 @@
interrupts = <0 42 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 5>;
clock-frequency = <100000>;
};
@@ -213,7 +202,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 43 4>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 6>;
clock-frequency = <400000>;
};
@@ -226,7 +215,7 @@
interrupts = <0 44 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c3>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 7>;
clock-frequency = <100000>;
};
@@ -239,7 +228,7 @@
interrupts = <0 45 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c4>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 8>;
clock-frequency = <100000>;
};
@@ -249,7 +238,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 25 4>;
- clocks = <&i2c_clk>;
+ clocks = <&peri_clk 9>;
clock-frequency = <400000>;
};
@@ -259,6 +248,8 @@
reg = <0x58c00000 0x400>;
#address-cells = <2>;
#size-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_system_bus>;
};
smpctrl@59800000 {
@@ -266,12 +257,45 @@
reg = <0x59801000 0x400>;
};
+ mioctrl@59810000 {
+ compatible = "socionext,uniphier-mioctrl",
+ "simple-mfd", "syscon";
+ reg = <0x59810000 0x800>;
+
+ mio_clk: clock {
+ compatible = "socionext,uniphier-ld20-mio-clock";
+ #clock-cells = <1>;
+ };
+
+ mio_rst: reset {
+ compatible = "socionext,uniphier-ld20-mio-reset";
+ #reset-cells = <1>;
+ };
+ };
+
+ perictrl@59820000 {
+ compatible = "socionext,uniphier-perictrl",
+ "simple-mfd", "syscon";
+ reg = <0x59820000 0x200>;
+
+ peri_clk: clock {
+ compatible = "socionext,uniphier-ld20-peri-clock";
+ #clock-cells = <1>;
+ };
+
+ peri_rst: reset {
+ compatible = "socionext,uniphier-ld20-peri-reset";
+ #reset-cells = <1>;
+ };
+ };
+
soc-glue@5f800000 {
- compatible = "simple-mfd", "syscon";
+ compatible = "socionext,uniphier-soc-glue",
+ "simple-mfd", "syscon";
reg = <0x5f800000 0x2000>;
pinctrl: pinctrl {
- compatible = "socionext,uniphier-ld20-pinctrl";
+ compatible = "socionext,uniphier-ld20-pinctrl";
};
};
@@ -283,6 +307,22 @@
#interrupt-cells = <3>;
interrupts = <1 9 4>;
};
+
+ sysctrl@61840000 {
+ compatible = "socionext,uniphier-sysctrl",
+ "simple-mfd", "syscon";
+ reg = <0x61840000 0x4000>;
+
+ sys_clk: clock {
+ compatible = "socionext,uniphier-ld20-clock";
+ #clock-cells = <1>;
+ };
+
+ sys_rst: reset {
+ compatible = "socionext,uniphier-ld20-reset";
+ #reset-cells = <1>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index acb0527fdc4a..358089687a69 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -29,7 +29,7 @@
memory {
device_type = "memory";
- reg = <0x0 0x0 0x40000000>;
+ reg = <0x0 0x0 0x0 0x40000000>;
};
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 3e2e51fbd2bc..68a908334c7b 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -14,7 +14,7 @@
/ {
compatible = "xlnx,zynqmp";
#address-cells = <2>;
- #size-cells = <1>;
+ #size-cells = <2>;
cpus {
#address-cells = <1>;
@@ -51,6 +51,7 @@
pmu {
compatible = "arm,armv8-pmuv3";
+ interrupt-parent = <&gic>;
interrupts = <0 143 4>,
<0 144 4>,
<0 145 4>,
@@ -75,15 +76,15 @@
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <1>;
- ranges;
+ ranges = <0 0 0 0 0xffffffff>;
gic: interrupt-controller@f9010000 {
compatible = "arm,gic-400", "arm,cortex-a15-gic";
#interrupt-cells = <3>;
reg = <0x0 0xf9010000 0x10000>,
- <0x0 0xf902f000 0x2000>,
+ <0x0 0xf9020000 0x20000>,
<0x0 0xf9040000 0x20000>,
- <0x0 0xf906f000 0x2000>;
+ <0x0 0xf9060000 0x20000>;
interrupt-controller;
interrupt-parent = <&gic>;
interrupts = <1 9 0xf04>;
@@ -93,14 +94,14 @@
amba: amba {
compatible = "simple-bus";
#address-cells = <2>;
- #size-cells = <1>;
+ #size-cells = <2>;
ranges;
can0: can@ff060000 {
compatible = "xlnx,zynq-can-1.0";
status = "disabled";
clock-names = "can_clk", "pclk";
- reg = <0x0 0xff060000 0x1000>;
+ reg = <0x0 0xff060000 0x0 0x1000>;
interrupts = <0 23 4>;
interrupt-parent = <&gic>;
tx-fifo-depth = <0x40>;
@@ -111,7 +112,7 @@
compatible = "xlnx,zynq-can-1.0";
status = "disabled";
clock-names = "can_clk", "pclk";
- reg = <0x0 0xff070000 0x1000>;
+ reg = <0x0 0xff070000 0x0 0x1000>;
interrupts = <0 24 4>;
interrupt-parent = <&gic>;
tx-fifo-depth = <0x40>;
@@ -123,7 +124,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 57 4>, <0 57 4>;
- reg = <0x0 0xff0b0000 0x1000>;
+ reg = <0x0 0xff0b0000 0x0 0x1000>;
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
@@ -134,7 +135,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 59 4>, <0 59 4>;
- reg = <0x0 0xff0c0000 0x1000>;
+ reg = <0x0 0xff0c0000 0x0 0x1000>;
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
@@ -145,7 +146,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 61 4>, <0 61 4>;
- reg = <0x0 0xff0d0000 0x1000>;
+ reg = <0x0 0xff0d0000 0x0 0x1000>;
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
@@ -156,7 +157,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 63 4>, <0 63 4>;
- reg = <0x0 0xff0e0000 0x1000>;
+ reg = <0x0 0xff0e0000 0x0 0x1000>;
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
@@ -170,7 +171,7 @@
interrupts = <0 16 4>;
interrupt-controller;
#interrupt-cells = <2>;
- reg = <0x0 0xff0a0000 0x1000>;
+ reg = <0x0 0xff0a0000 0x0 0x1000>;
};
i2c0: i2c@ff020000 {
@@ -178,7 +179,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 17 4>;
- reg = <0x0 0xff020000 0x1000>;
+ reg = <0x0 0xff020000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
};
@@ -188,15 +189,54 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 18 4>;
- reg = <0x0 0xff030000 0x1000>;
+ reg = <0x0 0xff030000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
};
+ pcie: pcie@fd0e0000 {
+ compatible = "xlnx,nwl-pcie-2.11";
+ status = "disabled";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ msi-controller;
+ device_type = "pci";
+ interrupt-parent = <&gic>;
+ interrupts = <0 118 4>,
+ <0 117 4>,
+ <0 116 4>,
+ <0 115 4>, /* MSI_1 [63...32] */
+ <0 114 4>; /* MSI_0 [31...0] */
+ interrupt-names = "misc", "dummy", "intx",
+ "msi1", "msi0";
+ msi-parent = <&pcie>;
+ reg = <0x0 0xfd0e0000 0x0 0x1000>,
+ <0x0 0xfd480000 0x0 0x1000>,
+ <0x80 0x00000000 0x0 0x1000000>;
+ reg-names = "breg", "pcireg", "cfg";
+ ranges = <0x02000000 0x00000000 0xe0000000 0x00000000
+ 0xe0000000 0x00000000 0x10000000
+ /* non-prefetchable memory */
+ 0x43000000 0x00000006 0x00000000 0x00000006
+ 0x00000000 0x00000002 0x00000000>;
+ /* prefetchable memory */
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &pcie_intc 0x1>,
+ <0x0 0x0 0x0 0x2 &pcie_intc 0x2>,
+ <0x0 0x0 0x0 0x3 &pcie_intc 0x3>,
+ <0x0 0x0 0x0 0x4 &pcie_intc 0x4>;
+ pcie_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+
sata: ahci@fd0c0000 {
compatible = "ceva,ahci-1v84";
status = "disabled";
- reg = <0x0 0xfd0c0000 0x2000>;
+ reg = <0x0 0xfd0c0000 0x0 0x2000>;
interrupt-parent = <&gic>;
interrupts = <0 133 4>;
};
@@ -206,7 +246,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 48 4>;
- reg = <0x0 0xff160000 0x1000>;
+ reg = <0x0 0xff160000 0x0 0x1000>;
clock-names = "clk_xin", "clk_ahb";
};
@@ -215,13 +255,13 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 49 4>;
- reg = <0x0 0xff170000 0x1000>;
+ reg = <0x0 0xff170000 0x0 0x1000>;
clock-names = "clk_xin", "clk_ahb";
};
smmu: smmu@fd800000 {
compatible = "arm,mmu-500";
- reg = <0x0 0xfd800000 0x20000>;
+ reg = <0x0 0xfd800000 0x0 0x20000>;
#global-interrupts = <1>;
interrupt-parent = <&gic>;
interrupts = <0 157 4>,
@@ -236,7 +276,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 19 4>;
- reg = <0x0 0xff040000 0x1000>;
+ reg = <0x0 0xff040000 0x0 0x1000>;
clock-names = "ref_clk", "pclk";
#address-cells = <1>;
#size-cells = <0>;
@@ -247,7 +287,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 20 4>;
- reg = <0x0 0xff050000 0x1000>;
+ reg = <0x0 0xff050000 0x0 0x1000>;
clock-names = "ref_clk", "pclk";
#address-cells = <1>;
#size-cells = <0>;
@@ -258,7 +298,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 36 4>, <0 37 4>, <0 38 4>;
- reg = <0x0 0xff110000 0x1000>;
+ reg = <0x0 0xff110000 0x0 0x1000>;
timer-width = <32>;
};
@@ -267,7 +307,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 39 4>, <0 40 4>, <0 41 4>;
- reg = <0x0 0xff120000 0x1000>;
+ reg = <0x0 0xff120000 0x0 0x1000>;
timer-width = <32>;
};
@@ -276,7 +316,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 42 4>, <0 43 4>, <0 44 4>;
- reg = <0x0 0xff130000 0x1000>;
+ reg = <0x0 0xff130000 0x0 0x1000>;
timer-width = <32>;
};
@@ -285,7 +325,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 45 4>, <0 46 4>, <0 47 4>;
- reg = <0x0 0xff140000 0x1000>;
+ reg = <0x0 0xff140000 0x0 0x1000>;
timer-width = <32>;
};
@@ -294,7 +334,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 21 4>;
- reg = <0x0 0xff000000 0x1000>;
+ reg = <0x0 0xff000000 0x0 0x1000>;
clock-names = "uart_clk", "pclk";
};
@@ -303,7 +343,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 22 4>;
- reg = <0x0 0xff010000 0x1000>;
+ reg = <0x0 0xff010000 0x0 0x1000>;
clock-names = "uart_clk", "pclk";
};
@@ -312,7 +352,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 65 4>;
- reg = <0x0 0xfe200000 0x40000>;
+ reg = <0x0 0xfe200000 0x0 0x40000>;
clock-names = "clk_xin", "clk_ahb";
};
@@ -321,7 +361,7 @@
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 70 4>;
- reg = <0x0 0xfe300000 0x40000>;
+ reg = <0x0 0xfe300000 0x0 0x40000>;
clock-names = "clk_xin", "clk_ahb";
};
@@ -329,8 +369,8 @@
compatible = "cdns,wdt-r1p2";
status = "disabled";
interrupt-parent = <&gic>;
- interrupts = <0 52 1>;
- reg = <0x0 0xfd4d0000 0x1000>;
+ interrupts = <0 113 1>;
+ reg = <0x0 0xfd4d0000 0x0 0x1000>;
timeout-sec = <10>;
};
};
diff --git a/arch/arm64/boot/dts/zte/Makefile b/arch/arm64/boot/dts/zte/Makefile
new file mode 100644
index 000000000000..667806620f59
--- /dev/null
+++ b/arch/arm64/boot/dts/zte/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_ZX) += zx296718-evb.dtb
+
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/zte/zx296718-evb.dts b/arch/arm64/boot/dts/zte/zx296718-evb.dts
new file mode 100644
index 000000000000..e164ff6de5fc
--- /dev/null
+++ b/arch/arm64/boot/dts/zte/zx296718-evb.dts
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2016 ZTE Corporation.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "zx296718.dtsi"
+
+/ {
+ model = "ZTE zx296718 evaluation board";
+ compatible = "zte,zx296718-evb", "zte,zx296718";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x40000000 0x40000000>;
+ };
+
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/zte/zx296718.dtsi b/arch/arm64/boot/dts/zte/zx296718.dtsi
new file mode 100644
index 000000000000..a223066f24ce
--- /dev/null
+++ b/arch/arm64/boot/dts/zte/zx296718.dtsi
@@ -0,0 +1,292 @@
+/*
+ * Copyright 2016 ZTE Corporation.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ compatible = "zte,zx296718";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&gic>;
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+ core1 {
+ cpu = <&cpu1>;
+ };
+ core2 {
+ cpu = <&cpu2>;
+ };
+ core3 {
+ cpu = <&cpu3>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ };
+ };
+
+ clk24k: clk-24k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000>;
+ clock-output-names = "rtcclk";
+ };
+
+ osc32k: clk-osc32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32000>;
+ clock-output-names = "osc32k";
+ };
+
+ osc12m: clk-osc12m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <12000000>;
+ clock-output-names = "osc12m";
+ };
+
+ osc24m: clk-osc24m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "osc24m";
+ };
+
+ osc25m: clk-osc25m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "osc25m";
+ };
+
+ osc60m: clk-osc60m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <60000000>;
+ clock-output-names = "osc60m";
+ };
+
+ osc99m: clk-osc99m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <99000000>;
+ clock-output-names = "osc99m";
+ };
+
+ osc125m: clk-osc125m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ clock-output-names = "osc125m";
+ };
+
+ osc198m: clk-osc198m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <198000000>;
+ clock-output-names = "osc198m";
+ };
+
+ pll_audio: clk-pll-884m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <884000000>;
+ clock-output-names = "pll_audio";
+ };
+
+ pll_ddr: clk-pll-932m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <932000000>;
+ clock-output-names = "pll_ddr";
+ };
+
+ pll_hsic: clk-pll-960m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <960000000>;
+ clock-output-names = "pll_hsic";
+ };
+
+ pll_mac: clk-pll-1000m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000000>;
+ clock-output-names = "pll_mac";
+ };
+
+ pll_vga: clk-pll-1073m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1073000000>;
+ clock-output-names = "pll_vga";
+ };
+
+ pll_mm0: clk-pll-1188m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1188000000>;
+ clock-output-names = "pll_mm0";
+ };
+
+ pll_mm1: clk-pll-1296m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1296000000>;
+ clock-output-names = "pll_mm1";
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ gic: interrupt-controller@2a00000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ #redistributor-regions = <6>;
+ redistributor-stride = <0x0 0x40000>;
+ interrupt-controller;
+ reg = <0x02a00000 0x10000>,
+ <0x02b00000 0x20000>,
+ <0x02b20000 0x20000>,
+ <0x02b40000 0x20000>,
+ <0x02b60000 0x20000>,
+ <0x02b80000 0x20000>,
+ <0x02ba0000 0x20000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges;
+
+ aon_sysctrl: aon-sysctrl@116000 {
+ compatible = "zte,zx296718-aon-sysctrl", "syscon";
+ reg = <0x116000 0x1000>;
+ };
+
+ uart0: uart@11f000 {
+ compatible = "arm,pl011", "arm,primecell";
+ arm,primecell-periphid = <0x001feffe>;
+ reg = <0x11f000 0x1000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&osc24m>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ dma: dma-controller@1460000 {
+ compatible = "zte,zx296702-dma";
+ reg = <0x01460000 0x1000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&osc24m>;
+ clock-names = "dmaclk";
+ #dma-cells = <1>;
+ dma-channels = <32>;
+ dma-requests = <32>;
+ };
+
+ sysctrl: sysctrl@1463000 {
+ compatible = "zte,zx296718-sysctrl", "syscon";
+ reg = <0x1463000 0x1000>;
+ };
+ };
+};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index eadf4855ad2d..dab2cb0c1f1c 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -57,10 +57,12 @@ CONFIG_ARCH_UNIPHIER=y
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_VULCAN=y
CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZX=y
CONFIG_ARCH_ZYNQMP=y
CONFIG_PCI=y
CONFIG_PCI_MSI=y
CONFIG_PCI_IOV=y
+CONFIG_PCI_AARDVARK=y
CONFIG_PCIE_RCAR=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCI_XGENE=y
@@ -82,6 +84,7 @@ CONFIG_COMPAT=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
+CONFIG_CPUFREQ_DT=y
CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
CONFIG_ARM_SCPI_CPUFREQ=y
CONFIG_NET=y
@@ -120,6 +123,14 @@ CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_VLAN_8021Q_MVRP=y
CONFIG_BPF_JIT=y
+CONFIG_BT=m
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_LL=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MAC80211_LEDS=y
@@ -139,7 +150,8 @@ CONFIG_VIRTIO_BLK=y
CONFIG_SRAM=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
-# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
@@ -160,6 +172,8 @@ CONFIG_VIRTIO_NET=y
CONFIG_AMD_XGBE=y
CONFIG_NET_XGENE=y
CONFIG_MACB=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
CONFIG_E1000E=y
CONFIG_IGB=y
CONFIG_IGBVF=y
@@ -186,6 +200,7 @@ CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_HISI_POWERKEY=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_AMBAKMI=y
CONFIG_LEGACY_PTY_COUNT=16
@@ -211,12 +226,12 @@ CONFIG_SERIAL_XILINX_PS_UART=y
CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
CONFIG_SERIAL_MVEBU_UART=y
CONFIG_VIRTIO_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
CONFIG_I2C_MUX_PCA954x=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
CONFIG_I2C_MV64XXX=y
CONFIG_I2C_QUP=y
CONFIG_I2C_TEGRA=y
@@ -224,6 +239,7 @@ CONFIG_I2C_UNIPHIER_F=y
CONFIG_I2C_RCAR=y
CONFIG_I2C_CROS_EC_TUNNEL=y
CONFIG_SPI=y
+CONFIG_SPI_MESON_SPIFC=m
CONFIG_SPI_ORION=y
CONFIG_SPI_PL022=y
CONFIG_SPI_QUP=y
@@ -234,6 +250,7 @@ CONFIG_PINCTRL_SINGLE=y
CONFIG_PINCTRL_MAX77620=y
CONFIG_PINCTRL_MSM8916=y
CONFIG_PINCTRL_MSM8996=y
+CONFIG_PINCTRL_QDF2XXX=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_DWAPB=y
@@ -252,10 +269,14 @@ CONFIG_SENSORS_INA2XX=m
CONFIG_SENSORS_ARM_SCPI=y
CONFIG_THERMAL=y
CONFIG_THERMAL_EMULATION=y
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
CONFIG_EXYNOS_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_RENESAS_WDT=y
CONFIG_S3C2410_WATCHDOG=y
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
CONFIG_MFD_MAX77620=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_SEC_CORE=y
@@ -264,6 +285,7 @@ CONFIG_REGULATOR=y
CONFIG_MFD_CROS_EC=y
CONFIG_MFD_CROS_EC_I2C=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
CONFIG_REGULATOR_HI655X=y
CONFIG_REGULATOR_MAX77620=y
CONFIG_REGULATOR_PWM=y
@@ -274,6 +296,8 @@ CONFIG_DRM=m
CONFIG_DRM_NOUVEAU=m
CONFIG_DRM_TEGRA=m
CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_HISI_KIRIN=m
CONFIG_FB=y
CONFIG_FB_ARMCLCD=y
CONFIG_BACKLIGHT_GENERIC=m
@@ -291,6 +315,7 @@ CONFIG_SND_SOC_AK4613=y
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
CONFIG_USB_XHCI_RCAR=y
CONFIG_USB_EHCI_EXYNOS=y
CONFIG_USB_XHCI_TEGRA=y
@@ -300,6 +325,7 @@ CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_EXYNOS=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
CONFIG_USB_STORAGE=y
CONFIG_USB_DWC2=y
CONFIG_USB_DWC3=y
@@ -311,15 +337,18 @@ CONFIG_USB_HSIC_USB3503=y
CONFIG_USB_MSM_OTG=y
CONFIG_USB_ULPI=y
CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_MMC_SDHCI_TEGRA=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
CONFIG_MMC_DW=y
CONFIG_MMC_DW_EXYNOS=y
CONFIG_MMC_DW_K3=y
@@ -345,6 +374,8 @@ CONFIG_DMADEVICES=y
CONFIG_PL330_DMA=y
CONFIG_TEGRA20_APB_DMA=y
CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
CONFIG_RCAR_DMAC=y
CONFIG_VFIO=y
CONFIG_VFIO_PCI=y
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index fc2a0cb47b2c..f8ae6d6e4767 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -80,6 +80,19 @@
#include <linux/stringify.h>
#include <asm/barrier.h>
+#define read_gicreg(r) \
+ ({ \
+ u64 reg; \
+ asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
+ reg; \
+ })
+
+#define write_gicreg(v,r) \
+ do { \
+ u64 __val = (v); \
+ asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
+ } while (0)
+
/*
* Low-level accessors
*
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 4b5c977af465..2a2752b5b6aa 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -50,7 +50,7 @@
#define HCR_BSU (3 << 10)
#define HCR_BSU_IS (UL(1) << 10)
#define HCR_FB (UL(1) << 9)
-#define HCR_VA (UL(1) << 8)
+#define HCR_VSE (UL(1) << 8)
#define HCR_VI (UL(1) << 7)
#define HCR_VF (UL(1) << 6)
#define HCR_AMO (UL(1) << 5)
@@ -80,7 +80,7 @@
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
-#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 7561f63f1c28..18f746551bf6 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -20,10 +20,15 @@
#include <asm/virt.h>
+#define ARM_EXIT_WITH_SERROR_BIT 31
+#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
+#define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
+
#define ARM_EXCEPTION_IRQ 0
-#define ARM_EXCEPTION_TRAP 1
+#define ARM_EXCEPTION_EL1_SERROR 1
+#define ARM_EXCEPTION_TRAP 2
/* The hyp-stub will return this for any kvm_call_hyp() call */
-#define ARM_EXCEPTION_HYP_GONE 2
+#define ARM_EXCEPTION_HYP_GONE 3
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 4cdeae3b17c6..fd9d5fd788f5 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -38,6 +38,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -147,6 +148,16 @@ static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
return vcpu->arch.fault.esr_el2;
}
+static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
+{
+ u32 esr = kvm_vcpu_get_hsr(vcpu);
+
+ if (esr & ESR_ELx_CV)
+ return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
+
+ return -1;
+}
+
static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.far_el2;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 3eda975837d0..bd94e6766759 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -290,15 +290,15 @@ struct kvm_vcpu_arch {
#endif
struct kvm_vm_stat {
- u32 remote_tlb_flush;
+ ulong remote_tlb_flush;
};
struct kvm_vcpu_stat {
- u32 halt_successful_poll;
- u32 halt_attempted_poll;
- u32 halt_poll_invalid;
- u32 halt_wakeup;
- u32 hvc_exit_stat;
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 hvc_exit_stat;
u64 wfe_exit_stat;
u64 wfi_exit_stat;
u64 mmio_exit_user;
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index cff510574fae..b18e852d27e8 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -123,6 +123,7 @@ typeof(orig) * __hyp_text fname(void) \
void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index dff109871f2a..a79b969c26fc 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -162,12 +162,6 @@ void kvm_clear_hyp_idmap(void);
#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
-static inline void kvm_clean_pgd(pgd_t *pgd) {}
-static inline void kvm_clean_pmd(pmd_t *pmd) {}
-static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
-static inline void kvm_clean_pte(pte_t *pte) {}
-static inline void kvm_clean_pte_entry(pte_t *pte) {}
-
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
{
pte_val(pte) |= PTE_S2_RDWR;
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 3e4f1a45b125..252a6d9c1da5 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -24,6 +24,7 @@
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/smp.h>
+#include <linux/serial_core.h>
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
@@ -206,7 +207,7 @@ void __init acpi_boot_table_init(void)
if (param_acpi_off ||
(!param_acpi_on && !param_acpi_force &&
of_scan_flat_dt(dt_scan_depth1_nodes, NULL)))
- return;
+ goto done;
/*
* ACPI is disabled at this point. Enable it in order to parse
@@ -226,6 +227,14 @@ void __init acpi_boot_table_init(void)
if (!param_acpi_force)
disable_acpi();
}
+
+done:
+ if (acpi_disabled) {
+ if (earlycon_init_is_deferred)
+ early_init_dt_scan_chosen_stdout();
+ } else {
+ parse_spcr(earlycon_init_is_deferred);
+ }
}
#ifdef CONFIG_ACPI_APEI
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 5ce9b2929e0d..1105aab1e6d6 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -122,6 +122,7 @@ SECTIONS
ENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
HYPERVISOR_TEXT
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 9c9edc98d271..6eaf12c1d627 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -16,7 +16,7 @@ menuconfig VIRTUALIZATION
if VIRTUALIZATION
-config KVM_ARM_VGIC_V3
+config KVM_ARM_VGIC_V3_ITS
bool
config KVM
@@ -34,7 +34,7 @@ config KVM
select KVM_VFIO
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
- select KVM_ARM_VGIC_V3
+ select KVM_ARM_VGIC_V3_ITS
select KVM_ARM_PMU if HW_PERF_EVENTS
select HAVE_KVM_MSI
select HAVE_KVM_IRQCHIP
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 695eb3c7ef41..d50a82a16ff6 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -16,9 +16,10 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/e
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
-kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
+kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index fa96fe2bd469..a204adf29f0a 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -170,9 +170,32 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
{
exit_handle_fn exit_handler;
+ if (ARM_SERROR_PENDING(exception_index)) {
+ u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
+
+ /*
+ * HVC/SMC already have an adjusted PC, which we need
+ * to correct in order to return to after having
+ * injected the SError.
+ */
+ if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
+ hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
+ u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
+ *vcpu_pc(vcpu) -= adj;
+ }
+
+ kvm_inject_vabt(vcpu);
+ return 1;
+ }
+
+ exception_index = ARM_EXCEPTION_CODE(exception_index);
+
switch (exception_index) {
case ARM_EXCEPTION_IRQ:
return 1;
+ case ARM_EXCEPTION_EL1_SERROR:
+ kvm_inject_vabt(vcpu);
+ return 1;
case ARM_EXCEPTION_TRAP:
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 0c85febcc1eb..aaf42ae8d8c3 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -5,9 +5,9 @@
KVM=../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
-obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 33342a776ec7..4ba5c9095d03 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -131,9 +131,7 @@ void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu)
vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY;
}
-static u32 __hyp_text __debug_read_mdcr_el2(void)
+u32 __hyp_text __kvm_get_mdcr_el2(void)
{
return read_sysreg(mdcr_el2);
}
-
-__alias(__debug_read_mdcr_el2) u32 __kvm_get_mdcr_el2(void);
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index ce9e5e5f28cf..12ee62d6d410 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -55,79 +55,111 @@
*/
ENTRY(__guest_enter)
// x0: vcpu
- // x1: host/guest context
- // x2-x18: clobbered by macros
+ // x1: host context
+ // x2-x17: clobbered by macros
+ // x18: guest context
// Store the host regs
save_callee_saved_regs x1
- // Preserve vcpu & host_ctxt for use at exit time
- stp x0, x1, [sp, #-16]!
+ // Store the host_ctxt for use at exit time
+ str x1, [sp, #-16]!
- add x1, x0, #VCPU_CONTEXT
+ add x18, x0, #VCPU_CONTEXT
- // Prepare x0-x1 for later restore by pushing them onto the stack
- ldp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
- stp x2, x3, [sp, #-16]!
+ // Restore guest regs x0-x17
+ ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
+ ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
+ ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
+ ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
+ ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
+ ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
+ ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
+ ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
+ ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
- // x2-x18
- ldp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
- ldp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
- ldp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
- ldp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
- ldp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
- ldp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
- ldp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
- ldp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
- ldr x18, [x1, #CPU_XREG_OFFSET(18)]
-
- // x19-x29, lr
- restore_callee_saved_regs x1
-
- // Last bits of the 64bit state
- ldp x0, x1, [sp], #16
+ // Restore guest regs x19-x29, lr
+ restore_callee_saved_regs x18
+
+ // Restore guest reg x18
+ ldr x18, [x18, #CPU_XREG_OFFSET(18)]
// Do not touch any register after this!
eret
ENDPROC(__guest_enter)
ENTRY(__guest_exit)
- // x0: vcpu
- // x1: return code
- // x2-x3: free
- // x4-x29,lr: vcpu regs
- // vcpu x0-x3 on the stack
+ // x0: return code
+ // x1: vcpu
+ // x2-x29,lr: vcpu regs
+ // vcpu x0-x1 on the stack
- add x2, x0, #VCPU_CONTEXT
+ add x1, x1, #VCPU_CONTEXT
- stp x4, x5, [x2, #CPU_XREG_OFFSET(4)]
- stp x6, x7, [x2, #CPU_XREG_OFFSET(6)]
- stp x8, x9, [x2, #CPU_XREG_OFFSET(8)]
- stp x10, x11, [x2, #CPU_XREG_OFFSET(10)]
- stp x12, x13, [x2, #CPU_XREG_OFFSET(12)]
- stp x14, x15, [x2, #CPU_XREG_OFFSET(14)]
- stp x16, x17, [x2, #CPU_XREG_OFFSET(16)]
- str x18, [x2, #CPU_XREG_OFFSET(18)]
+ ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
- ldp x6, x7, [sp], #16 // x2, x3
- ldp x4, x5, [sp], #16 // x0, x1
+ // Store the guest regs x2 and x3
+ stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
- stp x4, x5, [x2, #CPU_XREG_OFFSET(0)]
- stp x6, x7, [x2, #CPU_XREG_OFFSET(2)]
+ // Retrieve the guest regs x0-x1 from the stack
+ ldp x2, x3, [sp], #16 // x0, x1
+
+ // Store the guest regs x0-x1 and x4-x18
+ stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
+ stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
+ stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
+ stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
+ stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
+ stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
+ stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
+ stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
+ str x18, [x1, #CPU_XREG_OFFSET(18)]
+
+ // Store the guest regs x19-x29, lr
+ save_callee_saved_regs x1
- save_callee_saved_regs x2
+ // Restore the host_ctxt from the stack
+ ldr x2, [sp], #16
- // Restore vcpu & host_ctxt from the stack
- // (preserving return code in x1)
- ldp x0, x2, [sp], #16
// Now restore the host regs
restore_callee_saved_regs x2
- mov x0, x1
- ret
+ // If we have a pending asynchronous abort, now is the
+ // time to find out. From your VAXorcist book, page 666:
+ // "Threaten me not, oh Evil one! For I speak with
+ // the power of DEC, and I command thee to show thyself!"
+ mrs x2, elr_el2
+ mrs x3, esr_el2
+ mrs x4, spsr_el2
+ mov x5, x0
+
+ dsb sy // Synchronize against in-flight ld/st
+ msr daifclr, #4 // Unmask aborts
+
+ // This is our single instruction exception window. A pending
+ // SError is guaranteed to occur at the earliest when we unmask
+ // it, and at the latest just after the ISB.
+ .global abort_guest_exit_start
+abort_guest_exit_start:
+
+ isb
+
+ .global abort_guest_exit_end
+abort_guest_exit_end:
+
+ // If the exception took place, restore the EL1 exception
+ // context so that we can report some information.
+ // Merge the exception code with the SError pending bit.
+ tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
+ msr elr_el2, x2
+ msr esr_el2, x3
+ msr spsr_el2, x4
+ orr x0, x0, x5
+1: ret
ENDPROC(__guest_exit)
ENTRY(__fpsimd_guest_restore)
+ stp x2, x3, [sp, #-16]!
stp x4, lr, [sp, #-16]!
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index f6d9694ae3b1..4e92399f7105 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -27,16 +27,6 @@
.text
.pushsection .hyp.text, "ax"
-.macro save_x0_to_x3
- stp x0, x1, [sp, #-16]!
- stp x2, x3, [sp, #-16]!
-.endm
-
-.macro restore_x0_to_x3
- ldp x2, x3, [sp], #16
- ldp x0, x1, [sp], #16
-.endm
-
.macro do_el2_call
/*
* Shuffle the parameters before calling the function
@@ -79,23 +69,23 @@ ENTRY(__kvm_hyp_teardown)
ENDPROC(__kvm_hyp_teardown)
el1_sync: // Guest trapped into EL2
- save_x0_to_x3
+ stp x0, x1, [sp, #-16]!
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs x1, esr_el2
alternative_else
mrs x1, esr_el1
alternative_endif
- lsr x2, x1, #ESR_ELx_EC_SHIFT
+ lsr x0, x1, #ESR_ELx_EC_SHIFT
- cmp x2, #ESR_ELx_EC_HVC64
+ cmp x0, #ESR_ELx_EC_HVC64
b.ne el1_trap
- mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
- cbnz x3, el1_trap // called HVC
+ mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest
+ cbnz x1, el1_trap // called HVC
/* Here, we're pretty sure the host called HVC. */
- restore_x0_to_x3
+ ldp x0, x1, [sp], #16
cmp x0, #HVC_GET_VECTORS
b.ne 1f
@@ -113,24 +103,51 @@ alternative_endif
el1_trap:
/*
- * x1: ESR
- * x2: ESR_EC
+ * x0: ESR_EC
*/
/* Guest accessed VFP/SIMD registers, save host, restore Guest */
- cmp x2, #ESR_ELx_EC_FP_ASIMD
+ cmp x0, #ESR_ELx_EC_FP_ASIMD
b.eq __fpsimd_guest_restore
- mrs x0, tpidr_el2
- mov x1, #ARM_EXCEPTION_TRAP
+ mrs x1, tpidr_el2
+ mov x0, #ARM_EXCEPTION_TRAP
b __guest_exit
el1_irq:
- save_x0_to_x3
- mrs x0, tpidr_el2
- mov x1, #ARM_EXCEPTION_IRQ
+ stp x0, x1, [sp, #-16]!
+ mrs x1, tpidr_el2
+ mov x0, #ARM_EXCEPTION_IRQ
+ b __guest_exit
+
+el1_error:
+ stp x0, x1, [sp, #-16]!
+ mrs x1, tpidr_el2
+ mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit
+el2_error:
+ /*
+ * Only two possibilities:
+ * 1) Either we come from the exit path, having just unmasked
+ * PSTATE.A: change the return code to an EL2 fault, and
+ * carry on, as we're already in a sane state to handle it.
+ * 2) Or we come from anywhere else, and that's a bug: we panic.
+ *
+ * For (1), x0 contains the original return code and x1 doesn't
+ * contain anything meaningful at that stage. We can reuse them
+ * as temp registers.
+ * For (2), who cares?
+ */
+ mrs x0, elr_el2
+ adr x1, abort_guest_exit_start
+ cmp x0, x1
+ adr x1, abort_guest_exit_end
+ ccmp x0, x1, #4, ne
+ b.ne __hyp_panic
+ mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
+ eret
+
ENTRY(__hyp_do_panic)
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
@@ -155,11 +172,9 @@ ENDPROC(\label)
invalid_vector el2h_sync_invalid
invalid_vector el2h_irq_invalid
invalid_vector el2h_fiq_invalid
- invalid_vector el2h_error_invalid
invalid_vector el1_sync_invalid
invalid_vector el1_irq_invalid
invalid_vector el1_fiq_invalid
- invalid_vector el1_error_invalid
.ltorg
@@ -174,15 +189,15 @@ ENTRY(__kvm_hyp_vector)
ventry el2h_sync_invalid // Synchronous EL2h
ventry el2h_irq_invalid // IRQ EL2h
ventry el2h_fiq_invalid // FIQ EL2h
- ventry el2h_error_invalid // Error EL2h
+ ventry el2_error // Error EL2h
ventry el1_sync // Synchronous 64-bit EL1
ventry el1_irq // IRQ 64-bit EL1
ventry el1_fiq_invalid // FIQ 64-bit EL1
- ventry el1_error_invalid // Error 64-bit EL1
+ ventry el1_error // Error 64-bit EL1
ventry el1_sync // Synchronous 32-bit EL1
ventry el1_irq // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1
- ventry el1_error_invalid // Error 32-bit EL1
+ ventry el1_error // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector)
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 5a84b4562603..83037cd62d01 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -16,7 +16,10 @@
*/
#include <linux/types.h>
+#include <linux/jump_label.h>
+
#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
static bool __hyp_text __fpsimd_enabled_nvhe(void)
@@ -109,6 +112,15 @@ static hyp_alternate_select(__deactivate_traps_arch,
static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
{
+ /*
+ * If we pended a virtual abort, preserve it until it gets
+ * cleared. See D1.14.3 (Virtual Interrupts) for details, but
+ * the crucial bit is "On taking a vSError interrupt,
+ * HCR_EL2.VSE is cleared to 0."
+ */
+ if (vcpu->arch.hcr_el2 & HCR_VSE)
+ vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
+
__deactivate_traps_arch()();
write_sysreg(0, hstr_el2);
write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
@@ -126,17 +138,13 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
write_sysreg(0, vttbr_el2);
}
-static hyp_alternate_select(__vgic_call_save_state,
- __vgic_v2_save_state, __vgic_v3_save_state,
- ARM64_HAS_SYSREG_GIC_CPUIF);
-
-static hyp_alternate_select(__vgic_call_restore_state,
- __vgic_v2_restore_state, __vgic_v3_restore_state,
- ARM64_HAS_SYSREG_GIC_CPUIF);
-
static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
{
- __vgic_call_save_state()(vcpu);
+ if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ __vgic_v3_save_state(vcpu);
+ else
+ __vgic_v2_save_state(vcpu);
+
write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
}
@@ -149,7 +157,10 @@ static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
val |= vcpu->arch.irq_lines;
write_sysreg(val, hcr_el2);
- __vgic_call_restore_state()(vcpu);
+ if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ __vgic_v3_restore_state(vcpu);
+ else
+ __vgic_v2_restore_state(vcpu);
}
static bool __hyp_text __true_value(void)
@@ -232,7 +243,22 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
return true;
}
-static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
+static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
+{
+ *vcpu_pc(vcpu) = read_sysreg_el2(elr);
+
+ if (vcpu_mode_is_32bit(vcpu)) {
+ vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
+ kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
+ } else {
+ *vcpu_pc(vcpu) += 4;
+ }
+
+ write_sysreg_el2(*vcpu_pc(vcpu), elr);
+}
+
+int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
@@ -267,9 +293,43 @@ again:
exit_code = __guest_enter(vcpu, host_ctxt);
/* And we're baaack! */
+ /*
+ * We're using the raw exception code in order to only process
+ * the trap if no SError is pending. We will come back to the
+ * same PC once the SError has been injected, and replay the
+ * trapping instruction.
+ */
if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
goto again;
+ if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
+ exit_code == ARM_EXCEPTION_TRAP) {
+ bool valid;
+
+ valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
+ kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
+ kvm_vcpu_dabt_isvalid(vcpu) &&
+ !kvm_vcpu_dabt_isextabt(vcpu) &&
+ !kvm_vcpu_dabt_iss1tw(vcpu);
+
+ if (valid) {
+ int ret = __vgic_v2_perform_cpuif_access(vcpu);
+
+ if (ret == 1) {
+ __skip_instr(vcpu);
+ goto again;
+ }
+
+ if (ret == -1) {
+ /* Promote an illegal access to an SError */
+ __skip_instr(vcpu);
+ exit_code = ARM_EXCEPTION_EL1_SERROR;
+ }
+
+ /* 0 falls through to be handler out of EL2 */
+ }
+ }
+
fp_enabled = __fpsimd_enabled();
__sysreg_save_guest_state(guest_ctxt);
@@ -293,8 +353,6 @@ again:
return exit_code;
}
-__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
-
static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index be8177cdd3bf..9cc0ea784ae6 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -17,7 +17,7 @@
#include <asm/kvm_hyp.h>
-static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
dsb(ishst);
@@ -48,10 +48,7 @@ static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
write_sysreg(0, vttbr_el2);
}
-__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
- phys_addr_t ipa);
-
-static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
+void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
dsb(ishst);
@@ -67,14 +64,10 @@ static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
write_sysreg(0, vttbr_el2);
}
-__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
-
-static void __hyp_text __tlb_flush_vm_context(void)
+void __hyp_text __kvm_flush_vm_context(void)
{
dsb(ishst);
asm volatile("tlbi alle1is \n"
"ic ialluis ": : );
dsb(ish);
}
-
-__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 898c0e6aedd4..da6a8cfa54a0 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -231,3 +231,15 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
else
inject_undef64(vcpu);
}
+
+/**
+ * kvm_inject_vabt - inject an async abort / SError into the guest
+ * @vcpu: The VCPU to receive the exception
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_vabt(struct kvm_vcpu *vcpu)
+{
+ vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
+}
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index 682b2478691a..eb4a3fcfbaff 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -11,7 +11,8 @@
#include <linux/init.h>
#include <linux/kallsyms.h>
#include <linux/kdebug.h>
-#include <linux/module.h>
+#include <linux/extable.h>
+#include <linux/module.h> /* print_modules */
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S
index a4589176bed5..17f2730eb497 100644
--- a/arch/avr32/kernel/vmlinux.lds.S
+++ b/arch/avr32/kernel/vmlinux.lds.S
@@ -52,6 +52,7 @@ SECTIONS
KPROBES_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index a4b7edac8f10..b3977e9208a3 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -10,7 +10,7 @@
*/
#include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/pagemap.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 28c63fea786d..3c1bd640042a 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -26,7 +26,7 @@ config BLACKFIN
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select ARCH_HAVE_CUSTOM_GPIO_H
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select HAVE_UID16
select HAVE_UNDERSCORE_SYMBOL_PREFIX
select VIRT_TO_BUS
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index d920b959ff3a..68069a120055 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -33,6 +33,7 @@ SECTIONS
#ifndef CONFIG_SCHEDULE_L1
SCHED_TEXT
#endif
+ CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
diff --git a/arch/blackfin/mach-bf561/coreb.c b/arch/blackfin/mach-bf561/coreb.c
index 78ecb50bafc8..8a2543c654b3 100644
--- a/arch/blackfin/mach-bf561/coreb.c
+++ b/arch/blackfin/mach-bf561/coreb.c
@@ -59,18 +59,7 @@ static struct miscdevice coreb_dev = {
.name = "coreb",
.fops = &coreb_fops,
};
-
-static int __init bf561_coreb_init(void)
-{
- return misc_register(&coreb_dev);
-}
-module_init(bf561_coreb_init);
-
-static void __exit bf561_coreb_exit(void)
-{
- misc_deregister(&coreb_dev);
-}
-module_exit(bf561_coreb_exit);
+module_misc_device(coreb_dev);
MODULE_AUTHOR("Bas Vermeulen <bvermeul@blackstar.xs4all.nl>");
MODULE_DESCRIPTION("BF561 Core B Support");
diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S
index 50bc10f97bcb..a1a5c166bc9b 100644
--- a/arch/c6x/kernel/vmlinux.lds.S
+++ b/arch/c6x/kernel/vmlinux.lds.S
@@ -70,6 +70,7 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 7552c2557506..979586261520 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -43,6 +43,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.text.__*)
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index 7e958d829ec9..aa6e573d57da 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -63,6 +63,7 @@ SECTIONS
*(.text..tlbmiss)
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
#ifdef CONFIG_DEBUG_INFO
INIT_TEXT
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
index cb5dfb02c88d..7f11da1b895e 100644
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -29,6 +29,7 @@ SECTIONS
_stext = . ;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
#if defined(CONFIG_ROMKERNEL)
*(.int_redirect)
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 5f268c1071b3..ec87e67feb19 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -50,6 +50,7 @@ SECTIONS
_text = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index f565ad376142..65d4bb2b6685 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -269,6 +269,22 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long c, old, dec;
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
/*
* Atomically add I to V and return TRUE if the resulting value is
* negative.
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index dc506b05ffbd..f89d20c97412 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -46,6 +46,7 @@ SECTIONS {
__end_ivt_text = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.gnu.linkonce.t*)
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index 018e4a711d79..ad1fe56455aa 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -31,6 +31,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 498b567f007b..d140206d5d29 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -14,7 +14,6 @@ config M68K
select GENERIC_IOMAP
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
- select FPU if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 967260f2eb1c..d2219f30b78f 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -62,6 +62,7 @@ config MCPU32
config M68020
bool "68020 support"
depends on MMU
+ select FPU
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68020
@@ -72,6 +73,7 @@ config M68020
config M68030
bool "68030 support"
depends on MMU && !MMU_SUN3
+ select FPU
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68030
@@ -81,6 +83,7 @@ config M68030
config M68040
bool "68040 support"
depends on MMU && !MMU_SUN3
+ select FPU
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68LC040
@@ -91,6 +94,7 @@ config M68040
config M68060
bool "68060 support"
depends on MMU && !MMU_SUN3
+ select FPU
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68060
@@ -259,6 +263,7 @@ config M547x
bool "MCF547x"
select M54xx
select MMU_COLDFIRE if MMU
+ select FPU if MMU
select HAVE_CACHE_CB
select HAVE_MBAR
select CPU_NO_EFFICIENT_FFS
@@ -268,6 +273,7 @@ config M547x
config M548x
bool "MCF548x"
select MMU_COLDFIRE if MMU
+ select FPU if MMU
select M54xx
select HAVE_CACHE_CB
select HAVE_MBAR
diff --git a/arch/m68k/coldfire/clk.c b/arch/m68k/coldfire/clk.c
index fddfdccae63b..1e3c7e9193d1 100644
--- a/arch/m68k/coldfire/clk.c
+++ b/arch/m68k/coldfire/clk.c
@@ -101,6 +101,10 @@ EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
unsigned long flags;
+
+ if (!clk)
+ return;
+
spin_lock_irqsave(&clk_lock, flags);
if ((--clk->enabled == 0) && clk->clk_ops)
clk->clk_ops->disable(clk);
diff --git a/arch/m68k/coldfire/head.S b/arch/m68k/coldfire/head.S
index 73d92ea0ce65..bdb472c48401 100644
--- a/arch/m68k/coldfire/head.S
+++ b/arch/m68k/coldfire/head.S
@@ -280,10 +280,10 @@ _clear_bss:
movel %d0,m68k_cputype /* Mark us as a ColdFire */
movel #MMU_COLDFIRE,%d0
movel %d0,m68k_mmutype
- movel #FPU_COLDFIRE,%d0
- movel %d0,m68k_fputype
- movel #MACH_M54XX,%d0
- movel %d0,m68k_machtype /* Mark us as a 54xx machine */
+ movel #FPUTYPE,%d0
+ movel %d0,m68k_fputype /* Mark FPU type */
+ movel #MACHINE,%d0
+ movel %d0,m68k_machtype /* Mark machine type */
lea init_task,%a2 /* Set "current" init task */
#endif
diff --git a/arch/m68k/coldfire/m528x.c b/arch/m68k/coldfire/m528x.c
index 45e947aeade4..12f9e370d8dd 100644
--- a/arch/m68k/coldfire/m528x.c
+++ b/arch/m68k/coldfire/m528x.c
@@ -102,14 +102,14 @@ void wildfiremod_halt(void)
printk(KERN_INFO "WildFireMod hibernating...\n");
/* Set portE.5 to Digital IO */
- MCF5282_GPIO_PEPAR &= ~(1 << (5 * 2));
+ writew(readw(MCFGPIO_PEPAR) & ~(1 << (5 * 2)), MCFGPIO_PEPAR);
/* Make portE.5 an output */
- MCF5282_GPIO_DDRE |= (1 << 5);
+ writeb(readb(MCFGPIO_PDDR_E) | (1 << 5), MCFGPIO_PDDR_E);
/* Now toggle portE.5 from low to high */
- MCF5282_GPIO_PORTE &= ~(1 << 5);
- MCF5282_GPIO_PORTE |= (1 << 5);
+ writeb(readb(MCFGPIO_PODR_E) & ~(1 << 5), MCFGPIO_PODR_E);
+ writeb(readb(MCFGPIO_PODR_E) | (1 << 5), MCFGPIO_PODR_E);
printk(KERN_EMERG "Failed to hibernate. Halting!\n");
}
diff --git a/arch/m68k/coldfire/m53xx.c b/arch/m68k/coldfire/m53xx.c
index 80879a7fe3d5..2502f63960bc 100644
--- a/arch/m68k/coldfire/m53xx.c
+++ b/arch/m68k/coldfire/m53xx.c
@@ -271,9 +271,6 @@ void __init config_BSP(char *commandp, int size)
#define NAND_FLASH_ADDRESS (0xD0000000)
-int sys_clk_khz = 0;
-int sys_clk_mhz = 0;
-
void wtm_init(void);
void scm_init(void);
void gpio_init(void);
@@ -286,9 +283,8 @@ int get_sys_clock (void);
asmlinkage void __init sysinit(void)
{
- sys_clk_khz = clock_pll(0, 0);
- sys_clk_mhz = sys_clk_khz/1000;
-
+ clock_pll(0, 0);
+
wtm_init();
scm_init();
gpio_init();
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c
index c32f76791f48..386df3b68cdf 100644
--- a/arch/m68k/coldfire/m54xx.c
+++ b/arch/m68k/coldfire/m54xx.c
@@ -25,7 +25,6 @@
#include <asm/m54xxgpt.h>
#ifdef CONFIG_MMU
#include <asm/mmu_context.h>
-#include <linux/pfn.h>
#endif
/***************************************************************************/
@@ -78,47 +77,10 @@ static void mcf54xx_reset(void)
/***************************************************************************/
-#ifdef CONFIG_MMU
-
-unsigned long num_pages;
-
-static void __init mcf54xx_bootmem_alloc(void)
-{
- unsigned long start_pfn;
- unsigned long memstart;
-
- /* _rambase and _ramend will be naturally page aligned */
- m68k_memory[0].addr = _rambase;
- m68k_memory[0].size = _ramend - _rambase;
-
- /* compute total pages in system */
- num_pages = PFN_DOWN(_ramend - _rambase);
-
- /* page numbers */
- memstart = PAGE_ALIGN(_ramstart);
- min_low_pfn = PFN_DOWN(_rambase);
- start_pfn = PFN_DOWN(memstart);
- max_pfn = max_low_pfn = PFN_DOWN(_ramend);
- high_memory = (void *)_ramend;
-
- m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
- module_fixup(NULL, __start_fixup, __stop_fixup);
-
- /* setup bootmem data */
- m68k_setup_node(0);
- memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
- min_low_pfn, max_low_pfn);
- free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
-}
-
-#endif /* CONFIG_MMU */
-
-/***************************************************************************/
-
void __init config_BSP(char *commandp, int size)
{
#ifdef CONFIG_MMU
- mcf54xx_bootmem_alloc();
+ cf_bootmem_alloc();
mmu_context_init();
#endif
mach_reset = mcf54xx_reset;
diff --git a/arch/m68k/include/asm/bootinfo.h b/arch/m68k/include/asm/bootinfo.h
index 8e213267f8e7..81c91af8ec6c 100644
--- a/arch/m68k/include/asm/bootinfo.h
+++ b/arch/m68k/include/asm/bootinfo.h
@@ -22,6 +22,12 @@ extern void save_bootinfo(const struct bi_record *bi);
static inline void save_bootinfo(const struct bi_record *bi) {}
#endif
+#ifdef CONFIG_UBOOT
+void process_uboot_commandline(char *commandp, int size);
+#else
+static inline void process_uboot_commandline(char *commandp, int size) {}
+#endif
+
#endif /* __ASSEMBLY__ */
diff --git a/arch/m68k/include/asm/m5441xsim.h b/arch/m68k/include/asm/m5441xsim.h
index cc798ab9524b..64f60be47066 100644
--- a/arch/m68k/include/asm/m5441xsim.h
+++ b/arch/m68k/include/asm/m5441xsim.h
@@ -10,6 +10,10 @@
#define CPU_NAME "COLDFIRE(m5441x)"
#define CPU_INSTR_PER_JIFFY 2
#define MCF_BUSCLK (MCF_CLK / 2)
+#define MACHINE MACH_M5441X
+#define FPUTYPE 0
+#define IOMEMBASE 0xe0000000
+#define IOMEMSIZE 0x20000000
#include <asm/m54xxacr.h>
diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h
index 59e171063c2f..c6ac05cda282 100644
--- a/arch/m68k/include/asm/m54xxacr.h
+++ b/arch/m68k/include/asm/m54xxacr.h
@@ -94,7 +94,7 @@
* register region as non-cacheable. And then we map all our RAM as
* cacheable and supervisor access only.
*/
-#define ACR0_MODE (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \
+#define ACR0_MODE (ACR_BA(IOMEMBASE)+ACR_ADMSK(IOMEMSIZE)+ \
ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP)
#if defined(CONFIG_CACHE_COPYBACK)
#define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
diff --git a/arch/m68k/include/asm/m54xxsim.h b/arch/m68k/include/asm/m54xxsim.h
index a5fbd17ab0a5..73d937ff36eb 100644
--- a/arch/m68k/include/asm/m54xxsim.h
+++ b/arch/m68k/include/asm/m54xxsim.h
@@ -8,6 +8,10 @@
#define CPU_NAME "COLDFIRE(m54xx)"
#define CPU_INSTR_PER_JIFFY 2
#define MCF_BUSCLK (MCF_CLK / 2)
+#define MACHINE MACH_M54XX
+#define FPUTYPE FPU_COLDFIRE
+#define IOMEMBASE MCF_MBAR
+#define IOMEMSIZE 0x01000000
#include <asm/m54xxacr.h>
diff --git a/arch/m68k/include/asm/mcfmmu.h b/arch/m68k/include/asm/mcfmmu.h
index 8824236e303f..10f9930ec49a 100644
--- a/arch/m68k/include/asm/mcfmmu.h
+++ b/arch/m68k/include/asm/mcfmmu.h
@@ -105,6 +105,7 @@ static inline void mmu_write(u32 a, u32 v)
__asm__ __volatile__ ("nop");
}
+void cf_bootmem_alloc(void);
int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word);
#endif
diff --git a/arch/m68k/include/asm/nettel.h b/arch/m68k/include/asm/nettel.h
index 2a7a7667d807..926375d538ec 100644
--- a/arch/m68k/include/asm/nettel.h
+++ b/arch/m68k/include/asm/nettel.h
@@ -92,7 +92,7 @@ static __inline__ unsigned int mcf_getppdata(void)
static __inline__ void mcf_setppdata(unsigned int mask, unsigned int bits)
{
- write((readw(MCFSIM_PBDAT) & ~mask) | bits, MCFSIM_PBDAT);
+ writew((readw(MCFSIM_PBDAT) & ~mask) | bits, MCFSIM_PBDAT);
}
#endif
diff --git a/arch/m68k/include/uapi/asm/bootinfo.h b/arch/m68k/include/uapi/asm/bootinfo.h
index cdeb26a015b0..a48cf544c762 100644
--- a/arch/m68k/include/uapi/asm/bootinfo.h
+++ b/arch/m68k/include/uapi/asm/bootinfo.h
@@ -81,6 +81,7 @@ struct mem_info {
#define MACH_Q40 10
#define MACH_SUN3X 11
#define MACH_M54XX 12
+#define MACH_M5441X 13
/*
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index e47778f8588d..8a1c4d3f91c8 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_HAS_DMA) += dma.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_BOOTINFO_PROC) += bootinfo_proc.o
+obj-$(CONFIG_UBOOT) += uboot.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index c55ff719fa72..4ba1ae7345c3 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -203,11 +203,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
}
/* Fill in the fpu structure for a core dump. */
-#ifdef CONFIG_FPU
int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
{
- char fpustate[216];
-
if (FPU_IS_EMU) {
int i;
@@ -222,37 +219,40 @@ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
return 1;
}
- /* First dump the fpu context to avoid protocol violation. */
- asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
- if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
- return 0;
+ if (IS_ENABLED(CONFIG_FPU)) {
+ char fpustate[216];
- if (CPU_IS_COLDFIRE) {
- asm volatile ("fmovel %/fpiar,%0\n\t"
- "fmovel %/fpcr,%1\n\t"
- "fmovel %/fpsr,%2\n\t"
- "fmovemd %/fp0-%/fp7,%3"
- :
- : "m" (fpu->fpcntl[0]),
- "m" (fpu->fpcntl[1]),
- "m" (fpu->fpcntl[2]),
- "m" (fpu->fpregs[0])
- : "memory");
- } else {
- asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
- :
- : "m" (fpu->fpcntl[0])
- : "memory");
- asm volatile ("fmovemx %/fp0-%/fp7,%0"
- :
- : "m" (fpu->fpregs[0])
- : "memory");
+ /* First dump the fpu context to avoid protocol violation. */
+ asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
+ if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
+ return 0;
+
+ if (CPU_IS_COLDFIRE) {
+ asm volatile ("fmovel %/fpiar,%0\n\t"
+ "fmovel %/fpcr,%1\n\t"
+ "fmovel %/fpsr,%2\n\t"
+ "fmovemd %/fp0-%/fp7,%3"
+ :
+ : "m" (fpu->fpcntl[0]),
+ "m" (fpu->fpcntl[1]),
+ "m" (fpu->fpcntl[2]),
+ "m" (fpu->fpregs[0])
+ : "memory");
+ } else {
+ asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
+ :
+ : "m" (fpu->fpcntl[0])
+ : "memory");
+ asm volatile ("fmovemx %/fp0-%/fp7,%0"
+ :
+ : "m" (fpu->fpregs[0])
+ : "memory");
+ }
}
return 1;
}
EXPORT_SYMBOL(dump_fpu);
-#endif /* CONFIG_FPU */
unsigned long get_wchan(struct task_struct *p)
{
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 50633c38f1e2..7a2c21212820 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -245,7 +245,7 @@ void __init setup_arch(char **cmdline_p)
* We should really do our own FPU check at startup.
* [what do we do with buggy 68LC040s? if we have problems
* with them, we should add a test to check_bugs() below] */
-#ifndef CONFIG_M68KFPU_EMU_ONLY
+#if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU_ONLY)
/* clear the fpu if we have one */
if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) {
volatile int zero = 0;
@@ -274,6 +274,7 @@ void __init setup_arch(char **cmdline_p)
strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
m68k_command_line[CL_SIZE - 1] = 0;
#endif /* CONFIG_BOOTPARAM */
+ process_uboot_commandline(&m68k_command_line[0], CL_SIZE);
*cmdline_p = m68k_command_line;
memcpy(boot_command_line, *cmdline_p, CL_SIZE);
@@ -341,6 +342,7 @@ void __init setup_arch(char **cmdline_p)
#endif
#ifdef CONFIG_COLDFIRE
case MACH_M54XX:
+ case MACH_M5441X:
config_BSP(NULL, 0);
break;
#endif
@@ -548,7 +550,7 @@ module_init(proc_hardware_init);
void check_bugs(void)
{
-#ifndef CONFIG_M68KFPU_EMU
+#if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU)
if (m68k_fputype == 0) {
pr_emerg("*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
"WHICH IS REQUIRED BY LINUX/M68K ***\n");
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 9309789215a8..8afe6f651c1c 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -34,6 +34,7 @@
#include <linux/rtc.h>
#include <asm/setup.h>
+#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/pgtable.h>
@@ -82,69 +83,6 @@ void (*mach_power_off)(void);
#define CPU_INSTR_PER_JIFFY 16
#endif
-#if defined(CONFIG_UBOOT)
-/*
- * parse_uboot_commandline
- *
- * Copies u-boot commandline arguments and store them in the proper linux
- * variables.
- *
- * Assumes:
- * _init_sp global contains the address in the stack pointer when the
- * kernel starts (see head.S::_start)
- *
- * U-Boot calling convention:
- * (*kernel) (kbd, initrd_start, initrd_end, cmd_start, cmd_end);
- *
- * _init_sp can be parsed as such
- *
- * _init_sp+00 = u-boot cmd after jsr into kernel (skip)
- * _init_sp+04 = &kernel board_info (residual data)
- * _init_sp+08 = &initrd_start
- * _init_sp+12 = &initrd_end
- * _init_sp+16 = &cmd_start
- * _init_sp+20 = &cmd_end
- *
- * This also assumes that the memory locations pointed to are still
- * unmodified. U-boot places them near the end of external SDRAM.
- *
- * Argument(s):
- * commandp = the linux commandline arg container to fill.
- * size = the sizeof commandp.
- *
- * Returns:
- */
-static void __init parse_uboot_commandline(char *commandp, int size)
-{
- extern unsigned long _init_sp;
- unsigned long *sp;
- unsigned long uboot_kbd;
- unsigned long uboot_initrd_start, uboot_initrd_end;
- unsigned long uboot_cmd_start, uboot_cmd_end;
-
-
- sp = (unsigned long *)_init_sp;
- uboot_kbd = sp[1];
- uboot_initrd_start = sp[2];
- uboot_initrd_end = sp[3];
- uboot_cmd_start = sp[4];
- uboot_cmd_end = sp[5];
-
- if (uboot_cmd_start && uboot_cmd_end)
- strncpy(commandp, (const char *)uboot_cmd_start, size);
-#if defined(CONFIG_BLK_DEV_INITRD)
- if (uboot_initrd_start && uboot_initrd_end &&
- (uboot_initrd_end > uboot_initrd_start)) {
- initrd_start = uboot_initrd_start;
- initrd_end = uboot_initrd_end;
- ROOT_DEV = Root_RAM0;
- printk(KERN_INFO "initrd at 0x%lx:0x%lx\n",
- initrd_start, initrd_end);
- }
-#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
-}
-#endif /* #if defined(CONFIG_UBOOT) */
-
void __init setup_arch(char **cmdline_p)
{
int bootmap_size;
@@ -164,53 +102,38 @@ void __init setup_arch(char **cmdline_p)
command_line[sizeof(command_line) - 1] = 0;
#endif /* CONFIG_BOOTPARAM */
-#if defined(CONFIG_UBOOT)
- /* CONFIG_UBOOT and CONFIG_BOOTPARAM defined, concatenate cmdline */
- #if defined(CONFIG_BOOTPARAM)
- /* Add the whitespace separator */
- command_line[strlen(CONFIG_BOOTPARAM_STRING)] = ' ';
- /* Parse uboot command line into the rest of the buffer */
- parse_uboot_commandline(
- &command_line[(strlen(CONFIG_BOOTPARAM_STRING)+1)],
- (sizeof(command_line) -
- (strlen(CONFIG_BOOTPARAM_STRING)+1)));
- /* Only CONFIG_UBOOT defined, create cmdline */
- #else
- parse_uboot_commandline(&command_line[0], sizeof(command_line));
- #endif /* CONFIG_BOOTPARAM */
- command_line[sizeof(command_line) - 1] = 0;
-#endif /* CONFIG_UBOOT */
+ process_uboot_commandline(&command_line[0], sizeof(command_line));
- printk(KERN_INFO "\x0F\r\n\nuClinux/" CPU_NAME "\n");
+ pr_info("uClinux with CPU " CPU_NAME "\n");
#ifdef CONFIG_UCDIMM
- printk(KERN_INFO "uCdimm by Lineo, Inc. <www.lineo.com>\n");
+ pr_info("uCdimm by Lineo, Inc. <www.lineo.com>\n");
#endif
#ifdef CONFIG_M68VZ328
- printk(KERN_INFO "M68VZ328 support by Evan Stawnyczy <e@lineo.ca>\n");
+ pr_info("M68VZ328 support by Evan Stawnyczy <e@lineo.ca>\n");
#endif
#ifdef CONFIG_COLDFIRE
- printk(KERN_INFO "COLDFIRE port done by Greg Ungerer, gerg@snapgear.com\n");
+ pr_info("COLDFIRE port done by Greg Ungerer, gerg@snapgear.com\n");
#ifdef CONFIG_M5307
- printk(KERN_INFO "Modified for M5307 by Dave Miller, dmiller@intellistor.com\n");
+ pr_info("Modified for M5307 by Dave Miller, dmiller@intellistor.com\n");
#endif
#ifdef CONFIG_ELITE
- printk(KERN_INFO "Modified for M5206eLITE by Rob Scott, rscott@mtrob.fdns.net\n");
+ pr_info("Modified for M5206eLITE by Rob Scott, rscott@mtrob.fdns.net\n");
#endif
#endif
- printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
+ pr_info("Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
#if defined( CONFIG_PILOT ) && defined( CONFIG_M68328 )
- printk(KERN_INFO "TRG SuperPilot FLASH card support <info@trgnet.com>\n");
+ pr_info("TRG SuperPilot FLASH card support <info@trgnet.com>\n");
#endif
#if defined( CONFIG_PILOT ) && defined( CONFIG_M68EZ328 )
- printk(KERN_INFO "PalmV support by Lineo Inc. <jeff@uclinux.com>\n");
+ pr_info("PalmV support by Lineo Inc. <jeff@uclinux.com>\n");
#endif
#ifdef CONFIG_DRAGEN2
- printk(KERN_INFO "DragonEngine II board support by Georges Menie\n");
+ pr_info("DragonEngine II board support by Georges Menie\n");
#endif
#ifdef CONFIG_M5235EVB
- printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
+ pr_info("Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
#endif
pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c
new file mode 100644
index 000000000000..b3536a82a262
--- /dev/null
+++ b/arch/m68k/kernel/uboot.c
@@ -0,0 +1,107 @@
+/*
+ * uboot.c -- uboot arguments support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/console.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/root_dev.h>
+#include <linux/rtc.h>
+
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/machdep.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+
+/*
+ * parse_uboot_commandline
+ *
+ * Copies u-boot commandline arguments and store them in the proper linux
+ * variables.
+ *
+ * Assumes:
+ * _init_sp global contains the address in the stack pointer when the
+ * kernel starts (see head.S::_start)
+ *
+ * U-Boot calling convention:
+ * (*kernel) (kbd, initrd_start, initrd_end, cmd_start, cmd_end);
+ *
+ * _init_sp can be parsed as such
+ *
+ * _init_sp+00 = u-boot cmd after jsr into kernel (skip)
+ * _init_sp+04 = &kernel board_info (residual data)
+ * _init_sp+08 = &initrd_start
+ * _init_sp+12 = &initrd_end
+ * _init_sp+16 = &cmd_start
+ * _init_sp+20 = &cmd_end
+ *
+ * This also assumes that the memory locations pointed to are still
+ * unmodified. U-boot places them near the end of external SDRAM.
+ *
+ * Argument(s):
+ * commandp = the linux commandline arg container to fill.
+ * size = the sizeof commandp.
+ *
+ * Returns:
+ */
+static void __init parse_uboot_commandline(char *commandp, int size)
+{
+ extern unsigned long _init_sp;
+ unsigned long *sp;
+ unsigned long uboot_kbd;
+ unsigned long uboot_initrd_start, uboot_initrd_end;
+ unsigned long uboot_cmd_start, uboot_cmd_end;
+
+ sp = (unsigned long *)_init_sp;
+ uboot_kbd = sp[1];
+ uboot_initrd_start = sp[2];
+ uboot_initrd_end = sp[3];
+ uboot_cmd_start = sp[4];
+ uboot_cmd_end = sp[5];
+
+ if (uboot_cmd_start && uboot_cmd_end)
+ strncpy(commandp, (const char *)uboot_cmd_start, size);
+#if defined(CONFIG_BLK_DEV_INITRD)
+ if (uboot_initrd_start && uboot_initrd_end &&
+ (uboot_initrd_end > uboot_initrd_start)) {
+ initrd_start = uboot_initrd_start;
+ initrd_end = uboot_initrd_end;
+ ROOT_DEV = Root_RAM0;
+ printk(KERN_INFO "initrd at 0x%lx:0x%lx\n",
+ initrd_start, initrd_end);
+ }
+#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
+}
+
+__init void process_uboot_commandline(char *commandp, int size)
+{
+ int len, n;
+
+ n = strnlen(commandp, size);
+ commandp += n;
+ len = size - n;
+ if (len) {
+ /* Add the whitespace separator */
+ *commandp++ = ' ';
+ len--;
+ }
+
+ parse_uboot_commandline(commandp, len);
+ commandp[size - 1] = 0;
+}
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
index 06a763f49fd3..d2c8abf1c8c4 100644
--- a/arch/m68k/kernel/vmlinux-nommu.lds
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -45,6 +45,7 @@ SECTIONS {
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
. = ALIGN(16);
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index d0993594f558..5b5ce1e4d1ed 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -16,6 +16,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index 8080469ee6c1..fe5ea1974b16 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -16,6 +16,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index f58fafe7e4c9..87131cd3bc8f 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -27,7 +27,7 @@ mm_context_t next_mmu_context;
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
atomic_t nr_free_contexts;
struct mm_struct *context_mm[LAST_CONTEXT+1];
-extern unsigned long num_pages;
+unsigned long num_pages;
/*
* ColdFire paging_init derived from sun3.
@@ -150,6 +150,35 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
return 0;
}
+void __init cf_bootmem_alloc(void)
+{
+ unsigned long start_pfn;
+ unsigned long memstart;
+
+ /* _rambase and _ramend will be naturally page aligned */
+ m68k_memory[0].addr = _rambase;
+ m68k_memory[0].size = _ramend - _rambase;
+
+ /* compute total pages in system */
+ num_pages = PFN_DOWN(_ramend - _rambase);
+
+ /* page numbers */
+ memstart = PAGE_ALIGN(_ramstart);
+ min_low_pfn = PFN_DOWN(_rambase);
+ start_pfn = PFN_DOWN(memstart);
+ max_pfn = max_low_pfn = PFN_DOWN(_ramend);
+ high_memory = (void *)_ramend;
+
+ m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
+ module_fixup(NULL, __start_fixup, __stop_fixup);
+
+ /* setup bootmem data */
+ m68k_setup_node(0);
+ memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
+ min_low_pfn, max_low_pfn);
+ free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
+}
+
/*
* Initialize the context management stuff.
* The following was taken from arch/ppc/mmu_context.c
diff --git a/arch/metag/kernel/vmlinux.lds.S b/arch/metag/kernel/vmlinux.lds.S
index 150ace92c7ad..e6c700eaf207 100644
--- a/arch/metag/kernel/vmlinux.lds.S
+++ b/arch/metag/kernel/vmlinux.lds.S
@@ -21,6 +21,7 @@ SECTIONS
.text : {
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 0a47f0410554..289d0e7f3e3a 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -33,6 +33,7 @@ SECTIONS {
EXIT_TEXT
EXIT_CALL
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 81556b843a8e..7f696f97f9dd 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -632,10 +632,10 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
}
}
-/* Decide whether to display the domain number in /proc */
+/* Display the domain number in /proc */
int pci_proc_domain(struct pci_bus *bus)
{
- return 0;
+ return pci_domain_nr(bus);
}
/* This header fixup will do the resource fixup for all devices as they are
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 212ff92920d2..1a322c807f22 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -30,7 +30,6 @@ config MIPS
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
select RTC_LIB if !MACH_LOONGSON64
select GENERIC_ATOMIC64 if !64BIT
- select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_DMA_CONTIGUOUS
select HAVE_DMA_API_DEBUG
select GENERIC_IRQ_PROBE
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 15e0fecbc300..6bf10e796553 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -51,7 +51,8 @@ extern int cp0_fdc_irq;
extern int get_c0_fdc_int(void);
-void arch_trigger_all_cpu_backtrace(bool);
-#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
+ bool exclude_self);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif /* _ASM_IRQ_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index b54bcadd8aec..07f58cfc1ab9 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -107,35 +107,49 @@
#define KVM_INVALID_INST 0xdeadbeef
#define KVM_INVALID_ADDR 0xdeadbeef
+/*
+ * EVA has overlapping user & kernel address spaces, so user VAs may be >
+ * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
+ * PAGE_OFFSET.
+ */
+
+#define KVM_HVA_ERR_BAD (-1UL)
+#define KVM_HVA_ERR_RO_BAD (-2UL)
+
+static inline bool kvm_is_error_hva(unsigned long addr)
+{
+ return IS_ERR_VALUE(addr);
+}
+
extern atomic_t kvm_mips_instance;
struct kvm_vm_stat {
- u32 remote_tlb_flush;
+ ulong remote_tlb_flush;
};
struct kvm_vcpu_stat {
- u32 wait_exits;
- u32 cache_exits;
- u32 signal_exits;
- u32 int_exits;
- u32 cop_unusable_exits;
- u32 tlbmod_exits;
- u32 tlbmiss_ld_exits;
- u32 tlbmiss_st_exits;
- u32 addrerr_st_exits;
- u32 addrerr_ld_exits;
- u32 syscall_exits;
- u32 resvd_inst_exits;
- u32 break_inst_exits;
- u32 trap_inst_exits;
- u32 msa_fpe_exits;
- u32 fpe_exits;
- u32 msa_disabled_exits;
- u32 flush_dcache_exits;
- u32 halt_successful_poll;
- u32 halt_attempted_poll;
- u32 halt_poll_invalid;
- u32 halt_wakeup;
+ u64 wait_exits;
+ u64 cache_exits;
+ u64 signal_exits;
+ u64 int_exits;
+ u64 cop_unusable_exits;
+ u64 tlbmod_exits;
+ u64 tlbmiss_ld_exits;
+ u64 tlbmiss_st_exits;
+ u64 addrerr_st_exits;
+ u64 addrerr_ld_exits;
+ u64 syscall_exits;
+ u64 resvd_inst_exits;
+ u64 break_inst_exits;
+ u64 trap_inst_exits;
+ u64 msa_fpe_exits;
+ u64 fpe_exits;
+ u64 msa_disabled_exits;
+ u64 flush_dcache_exits;
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
};
struct kvm_arch_memory_slot {
@@ -314,6 +328,9 @@ struct kvm_vcpu_arch {
u32 guest_kernel_asid[NR_CPUS];
struct mm_struct guest_kernel_mm, guest_user_mm;
+ /* Guest ASID of last user mode execution */
+ unsigned int last_user_gasid;
+
int last_sched_cpu;
/* WAIT executed */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 70128d3f770a..9e9e94415d08 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -673,8 +673,6 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
struct file;
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
-int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t *vma_prot);
#endif
/*
diff --git a/arch/mips/include/asm/uprobes.h b/arch/mips/include/asm/uprobes.h
index 70a4a2f173ff..b86d1ae07125 100644
--- a/arch/mips/include/asm/uprobes.h
+++ b/arch/mips/include/asm/uprobes.h
@@ -42,16 +42,4 @@ struct arch_uprobe_task {
unsigned long saved_trap_nr;
};
-extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
- struct mm_struct *mm, unsigned long addr);
-extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
-extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
-extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
-extern int arch_uprobe_exception_notify(struct notifier_block *self,
- unsigned long val, void *data);
-extern void arch_uprobe_abort_xol(struct arch_uprobe *aup,
- struct pt_regs *regs);
-extern unsigned long arch_uretprobe_hijack_return_addr(
- unsigned long trampoline_vaddr, struct pt_regs *regs);
-
#endif /* __ASM_UPROBES_H */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index d2d061520a23..9514e5f2209f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -569,9 +569,16 @@ static void arch_dump_stack(void *info)
dump_stack();
}
-void arch_trigger_all_cpu_backtrace(bool include_self)
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
- smp_call_function(arch_dump_stack, NULL, 1);
+ long this_cpu = get_cpu();
+
+ if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
+ dump_stack();
+
+ smp_call_function_many(mask, arch_dump_stack, NULL, 1);
+
+ put_cpu();
}
int mips_get_process_fp_mode(struct task_struct *task)
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index a82c178d0bb9..d5de67591735 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -55,6 +55,7 @@ SECTIONS
.text : {
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index e788515f766b..4db4c0370859 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -846,6 +846,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
return EMULATE_FAIL;
}
+/**
+ * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
+ * @vcpu: VCPU with changed mappings.
+ * @tlb: TLB entry being removed.
+ *
+ * This is called to indicate a single change in guest MMU mappings, so that we
+ * can arrange TLB flushes on this and other CPUs.
+ */
+static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb)
+{
+ int cpu, i;
+ bool user;
+
+ /* No need to flush for entries which are already invalid */
+ if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
+ return;
+ /* User address space doesn't need flushing for KSeg2/3 changes */
+ user = tlb->tlb_hi < KVM_GUEST_KSEG0;
+
+ preempt_disable();
+
+ /*
+ * Probe the shadow host TLB for the entry being overwritten, if one
+ * matches, invalidate it
+ */
+ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+ /* Invalidate the whole ASID on other CPUs */
+ cpu = smp_processor_id();
+ for_each_possible_cpu(i) {
+ if (i == cpu)
+ continue;
+ if (user)
+ vcpu->arch.guest_user_asid[i] = 0;
+ vcpu->arch.guest_kernel_asid[i] = 0;
+ }
+
+ preempt_enable();
+}
+
/* Write Guest TLB Entry @ Index */
enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
{
@@ -865,11 +906,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
}
tlb = &vcpu->arch.guest_tlb[index];
- /*
- * Probe the shadow host TLB for the entry being overwritten, if one
- * matches, invalidate it
- */
- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+ kvm_mips_invalidate_guest_tlb(vcpu, tlb);
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@@ -898,11 +936,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
tlb = &vcpu->arch.guest_tlb[index];
- /*
- * Probe the shadow host TLB for the entry being overwritten, if one
- * matches, invalidate it
- */
- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+ kvm_mips_invalidate_guest_tlb(vcpu, tlb);
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@@ -1026,6 +1060,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
enum emulation_result er = EMULATE_DONE;
u32 rt, rd, sel;
unsigned long curr_pc;
+ int cpu, i;
/*
* Update PC and hold onto current PC in case there is
@@ -1127,16 +1162,31 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
u32 nasid =
vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
- if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
- ((kvm_read_c0_guest_entryhi(cop0) &
+ if (((kvm_read_c0_guest_entryhi(cop0) &
KVM_ENTRYHI_ASID) != nasid)) {
trace_kvm_asid_change(vcpu,
kvm_read_c0_guest_entryhi(cop0)
& KVM_ENTRYHI_ASID,
nasid);
- /* Blow away the shadow host TLBs */
- kvm_mips_flush_host_tlb(1);
+ /*
+ * Regenerate/invalidate kernel MMU
+ * context.
+ * The user MMU context will be
+ * regenerated lazily on re-entry to
+ * guest user if the guest ASID actually
+ * changes.
+ */
+ preempt_disable();
+ cpu = smp_processor_id();
+ kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm,
+ cpu, vcpu);
+ vcpu->arch.guest_kernel_asid[cpu] =
+ vcpu->arch.guest_kernel_mm.context.asid[cpu];
+ for_each_possible_cpu(i)
+ if (i != cpu)
+ vcpu->arch.guest_kernel_asid[i] = 0;
+ preempt_enable();
}
kvm_write_c0_guest_entryhi(cop0,
vcpu->arch.gprs[rt]);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index a6ea084b4d9d..ce961495b5e1 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -140,6 +140,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return 0;
}
+bool kvm_arch_has_vcpu_debugfs(void)
+{
+ return false;
+}
+
+int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
void kvm_mips_free_vcpus(struct kvm *kvm)
{
unsigned int i;
@@ -411,6 +421,31 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD;
}
+/* Must be called with preemption disabled, just before entering guest */
+static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ int cpu = smp_processor_id();
+ unsigned int gasid;
+
+ /*
+ * Lazy host ASID regeneration for guest user mode.
+ * If the guest ASID has changed since the last guest usermode
+ * execution, regenerate the host ASID so as to invalidate stale TLB
+ * entries.
+ */
+ if (!KVM_GUEST_KERNEL_MODE(vcpu)) {
+ gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
+ if (gasid != vcpu->arch.last_user_gasid) {
+ kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu,
+ vcpu);
+ vcpu->arch.guest_user_asid[cpu] =
+ vcpu->arch.guest_user_mm.context.asid[cpu];
+ vcpu->arch.last_user_gasid = gasid;
+ }
+ }
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int r = 0;
@@ -438,6 +473,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
htw_stop();
trace_kvm_enter(vcpu);
+
+ kvm_mips_check_asids(vcpu);
+
r = vcpu->arch.vcpu_run(run, vcpu);
trace_kvm_out(vcpu);
@@ -1551,6 +1589,8 @@ skip_emul:
if (ret == RESUME_GUEST) {
trace_kvm_reenter(vcpu);
+ kvm_mips_check_asids(vcpu);
+
/*
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
* is live), restore FCR31 / MSACSR.
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 121008c0fcc9..03883ba806e2 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -250,15 +250,27 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
vcpu->arch.guest_kernel_asid[cpu] =
vcpu->arch.guest_kernel_mm.context.asid[cpu];
+ newasid++;
+
+ kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
+ cpu_context(cpu, current->mm));
+ kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+ cpu, vcpu->arch.guest_kernel_asid[cpu]);
+ }
+
+ if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
+ asid_version_mask(cpu)) {
+ u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
+ KVM_ENTRYHI_ASID;
+
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
+ vcpu->arch.last_user_gasid = gasid;
newasid++;
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm));
- kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
- cpu, vcpu->arch.guest_kernel_asid[cpu]);
kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
vcpu->arch.guest_user_asid[cpu]);
}
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 091553942bcb..3a5484f9aa50 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -175,6 +175,24 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
+ } else if (KVM_GUEST_KERNEL_MODE(vcpu)
+ && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+ /*
+ * With EVA we may get a TLB exception instead of an address
+ * error when the guest performs MMIO to KSeg1 addresses.
+ */
+ kvm_debug("Emulate %s MMIO space\n",
+ store ? "Store to" : "Load from");
+ er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+ if (er == EMULATE_FAIL) {
+ kvm_err("Emulate %s MMIO space failed\n",
+ store ? "Store to" : "Load from");
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ } else {
+ run->exit_reason = KVM_EXIT_MMIO;
+ ret = RESUME_HOST;
+ }
} else {
kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
store ? "ST" : "LD", cause, opc, badvaddr);
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index 13c4814c29f8..2d5f1c3f1afb 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -30,6 +30,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
diff --git a/arch/nios2/boot/dts/10m50_devboard.dts b/arch/nios2/boot/dts/10m50_devboard.dts
index 3e411c644824..f362b2224ee7 100755
--- a/arch/nios2/boot/dts/10m50_devboard.dts
+++ b/arch/nios2/boot/dts/10m50_devboard.dts
@@ -83,6 +83,7 @@
fifo-size = <32>;
reg-io-width = <4>;
reg-shift = <2>;
+ tx-threshold = <16>;
};
sysid: sysid@18001528 {
diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S
index e23e89539967..6a8045bb1a77 100644
--- a/arch/nios2/kernel/vmlinux.lds.S
+++ b/arch/nios2/kernel/vmlinux.lds.S
@@ -37,6 +37,7 @@ SECTIONS
.text : {
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index d936de4c07ca..d68b9ede8423 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -47,6 +47,7 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index af12c2db9bb8..71c4a3aa3752 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -10,12 +10,12 @@ config PARISC
select RTC_CLASS
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
+ select HAVE_MEMBLOCK
+ select NO_BOOTMEM
select BUG
select BUILDTIME_EXTABLE_SORT
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
- select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- select BROKEN_RODATA
select GENERIC_IRQ_PROBE
select GENERIC_PCI_IOMAP
select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -24,6 +24,7 @@ config PARISC
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_ARCH_HARDENED_USERCOPY
select VIRT_TO_BUS
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
diff --git a/arch/parisc/include/asm/dwarf.h b/arch/parisc/include/asm/dwarf.h
new file mode 100644
index 000000000000..8fe7d6b2cc42
--- /dev/null
+++ b/arch/parisc/include/asm/dwarf.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Helge Deller <deller@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_PARISC_DWARF_H
+#define _ASM_PARISC_DWARF_H
+
+#ifdef __ASSEMBLY__
+
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA .cfi_def_cfa
+#define CFI_REGISTER .cfi_register
+#define CFI_REL_OFFSET .cfi_rel_offset
+#define CFI_UNDEFINED .cfi_undefined
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PARISC_DWARF_H */
diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h
index 0b19a7242d0c..67e6b433d399 100644
--- a/arch/parisc/include/asm/linkage.h
+++ b/arch/parisc/include/asm/linkage.h
@@ -1,6 +1,8 @@
#ifndef __ASM_PARISC_LINKAGE_H
#define __ASM_PARISC_LINKAGE_H
+#include <asm/dwarf.h>
+
#ifndef __ALIGN
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"
@@ -10,6 +12,8 @@
* In parisc assembly a semicolon marks a comment while a
* exclamation mark is used to separate independent lines.
*/
+#define ASM_NL !
+
#ifdef __ASSEMBLY__
#define ENTRY(name) \
@@ -26,6 +30,14 @@ name:
END(name)
#endif
+#define ENTRY_CFI(name) \
+ ENTRY(name) ASM_NL\
+ CFI_STARTPROC
+
+#define ENDPROC_CFI(name) \
+ ENDPROC(name) ASM_NL\
+ CFI_ENDPROC
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_LINKAGE_H */
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 291cee28ccb6..e44bdb9078a5 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -83,7 +83,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
/* This is the size of the initially mapped kernel memory */
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_SMP)
#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
#else
#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 482847865dac..9a2aee1b90fc 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -11,6 +11,7 @@
#include <linux/bug.h>
#include <linux/string.h>
+#include <linux/thread_info.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -201,10 +202,12 @@ extern long lstrnlen_user(const char __user *, long);
#define clear_user lclear_user
#define __clear_user lclear_user
-unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len);
-#define __copy_to_user copy_to_user
-unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len);
-unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len);
+unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
+ unsigned long len);
+unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
+ unsigned long len);
+unsigned long copy_in_user(void __user *dst, const void __user *src,
+ unsigned long len);
#define __copy_in_user copy_in_user
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
@@ -217,23 +220,40 @@ static inline void copy_user_overflow(int size, unsigned long count)
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
-static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
+static __always_inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
{
- int sz = __compiletime_object_size(to);
- unsigned long ret = n;
+ int sz = __compiletime_object_size(to);
+ unsigned long ret = n;
- if (likely(sz == -1 || sz >= n))
- ret = __copy_from_user(to, from, n);
- else if (!__builtin_constant_p(n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(to, n, false);
+ ret = __copy_from_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
- __bad_copy_user();
+ __bad_copy_user();
if (unlikely(ret))
memset(to + (n - ret), 0, ret);
- return ret;
+
+ return ret;
+}
+
+static __always_inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(from);
+
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(from, n, true);
+ n = __copy_to_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
+
+ return n;
}
struct pt_regs;
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index baa3d9d6e971..4fcff2dcc9c3 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -766,7 +766,7 @@ ENTRY(end_fault_vector)
* copy_thread moved args into task save area.
*/
-ENTRY(ret_from_kernel_thread)
+ENTRY_CFI(ret_from_kernel_thread)
/* Call schedule_tail first though */
BL schedule_tail, %r2
@@ -782,7 +782,7 @@ ENTRY(ret_from_kernel_thread)
copy %r31, %r2
b finish_child_return
nop
-ENDPROC(ret_from_kernel_thread)
+ENDPROC_CFI(ret_from_kernel_thread)
/*
@@ -790,7 +790,7 @@ ENDPROC(ret_from_kernel_thread)
* struct task_struct *next)
*
* switch kernel stacks and return prev */
-ENTRY(_switch_to)
+ENTRY_CFI(_switch_to)
STREG %r2, -RP_OFFSET(%r30)
callee_save_float
@@ -815,7 +815,7 @@ _switch_to_ret:
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
copy %r26, %r28
-ENDPROC(_switch_to)
+ENDPROC_CFI(_switch_to)
/*
* Common rfi return path for interruptions, kernel execve, and
@@ -833,7 +833,7 @@ ENDPROC(_switch_to)
.align PAGE_SIZE
-ENTRY(syscall_exit_rfi)
+ENTRY_CFI(syscall_exit_rfi)
mfctl %cr30,%r16
LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
ldo TASK_REGS(%r16),%r16
@@ -1037,12 +1037,12 @@ intr_extint:
b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
-ENDPROC(syscall_exit_rfi)
+ENDPROC_CFI(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
-ENTRY(intr_save) /* for os_hpmc */
+ENTRY_CFI(intr_save) /* for os_hpmc */
mfsp %sr7,%r16
cmpib,COND(=),n 0,%r16,1f
get_stack_use_cr30
@@ -1117,7 +1117,7 @@ skip_save_ior:
b handle_interruption
ldo R%intr_check_sig(%r2), %r2
-ENDPROC(intr_save)
+ENDPROC_CFI(intr_save)
/*
@@ -1720,7 +1720,7 @@ dtlb_fault:
.endm
.macro fork_like name
-ENTRY(sys_\name\()_wrapper)
+ENTRY_CFI(sys_\name\()_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
ldo TASK_REGS(%r1),%r1
reg_save %r1
@@ -1728,7 +1728,7 @@ ENTRY(sys_\name\()_wrapper)
ldil L%sys_\name, %r31
be R%sys_\name(%sr4,%r31)
STREG %r28, PT_CR27(%r1)
-ENDPROC(sys_\name\()_wrapper)
+ENDPROC_CFI(sys_\name\()_wrapper)
.endm
fork_like clone
@@ -1736,7 +1736,7 @@ fork_like fork
fork_like vfork
/* Set the return value for the child */
-ENTRY(child_return)
+ENTRY_CFI(child_return)
BL schedule_tail, %r2
nop
finish_child_return:
@@ -1748,9 +1748,9 @@ finish_child_return:
reg_restore %r1
b syscall_exit
copy %r0,%r28
-ENDPROC(child_return)
+ENDPROC_CFI(child_return)
-ENTRY(sys_rt_sigreturn_wrapper)
+ENTRY_CFI(sys_rt_sigreturn_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
ldo TASK_REGS(%r26),%r26 /* get pt regs */
/* Don't save regs, we are going to restore them from sigcontext. */
@@ -1778,9 +1778,9 @@ ENTRY(sys_rt_sigreturn_wrapper)
*/
bv %r0(%r2)
LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
-ENDPROC(sys_rt_sigreturn_wrapper)
+ENDPROC_CFI(sys_rt_sigreturn_wrapper)
-ENTRY(syscall_exit)
+ENTRY_CFI(syscall_exit)
/* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
* via syscall_exit_rfi if the signal was received while the process
* was running.
@@ -1979,7 +1979,7 @@ syscall_do_resched:
#else
nop
#endif
-ENDPROC(syscall_exit)
+ENDPROC_CFI(syscall_exit)
#ifdef CONFIG_FUNCTION_TRACER
@@ -2023,7 +2023,7 @@ ENDPROC(mcount)
.align 8
.globl return_to_handler
.type return_to_handler, @function
-ENTRY(return_to_handler)
+ENTRY_CFI(return_to_handler)
.proc
.callinfo caller,frame=FRAME_SIZE
.entry
@@ -2067,7 +2067,7 @@ parisc_return_to_handler:
LDREGM -FRAME_SIZE(%sp),%r3
.exit
.procend
-ENDPROC(return_to_handler)
+ENDPROC_CFI(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -2076,7 +2076,7 @@ ENDPROC(return_to_handler)
#ifdef CONFIG_IRQSTACKS
/* void call_on_stack(unsigned long param1, void *func,
unsigned long new_stack) */
-ENTRY(call_on_stack)
+ENTRY_CFI(call_on_stack)
copy %sp, %r1
/* Regarding the HPPA calling conventions for function pointers,
@@ -2112,10 +2112,10 @@ ENTRY(call_on_stack)
bv (%rp)
LDREG -68(%sp), %sp
# endif /* CONFIG_64BIT */
-ENDPROC(call_on_stack)
+ENDPROC_CFI(call_on_stack)
#endif /* CONFIG_IRQSTACKS */
-get_register:
+ENTRY_CFI(get_register)
/*
* get_register is used by the non access tlb miss handlers to
* copy the value of the general register specified in r8 into
@@ -2192,9 +2192,10 @@ get_register:
copy %r30,%r1
bv %r0(%r25) /* r31 */
copy %r31,%r1
+ENDPROC_CFI(get_register)
-set_register:
+ENTRY_CFI(set_register)
/*
* set_register is used by the non access tlb miss handlers to
* copy the value of r1 into the general register specified in
@@ -2266,4 +2267,5 @@ set_register:
copy %r1,%r30
bv %r0(%r25) /* r31 */
copy %r1,%r31
+ENDPROC_CFI(set_register)
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index e158b6fbf1b4..0fbd0a0e1cda 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -41,12 +41,12 @@
*/
.level 1.1
- .data
#include <asm/assembly.h>
#include <asm/pdc.h>
#include <linux/linkage.h>
+#include <linux/init.h>
/*
* stack for os_hpmc, the HPMC handler.
@@ -55,22 +55,26 @@
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
*/
+ __PAGE_ALIGNED_BSS
.align 4096
hpmc_stack:
.block 16384
#define HPMC_IODC_BUF_SIZE 0x8000
+ __PAGE_ALIGNED_BSS
.align 4096
hpmc_iodc_buf:
.block HPMC_IODC_BUF_SIZE
+ .section .bss
.align 8
hpmc_raddr:
.block 128
#define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
+ .section .bss
.align 8
ENTRY(hpmc_pim_data)
.block HPMC_PIM_DATA_SIZE
@@ -79,7 +83,7 @@ END(hpmc_pim_data)
.text
.import intr_save, code
-ENTRY(os_hpmc)
+ENTRY_CFI(os_hpmc)
.os_hpmc:
/*
@@ -295,11 +299,11 @@ os_hpmc_6:
b .
nop
-ENDPROC(os_hpmc)
+ENDPROC_CFI(os_hpmc)
.os_hpmc_end:
- nop
-.data
-.align 4
+
+
+ __INITRODATA
.export os_hpmc_size
os_hpmc_size:
.word .os_hpmc_end-.os_hpmc
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index f0b6722fc706..545f9d2fe711 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -506,7 +506,7 @@ add_system_map_addresses(struct parisc_device *dev, int num_addrs,
long status;
struct pdc_system_map_addr_info addr_result;
- dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
+ dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL);
if(!dev->addr) {
printk(KERN_ERR "%s %s(): memory allocation failure\n",
__FILE__, __func__);
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index b743a80eaba0..985e06da37f5 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -41,7 +41,7 @@
.text
.align 128
-ENTRY(flush_tlb_all_local)
+ENTRY_CFI(flush_tlb_all_local)
.proc
.callinfo NO_CALLS
.entry
@@ -190,11 +190,11 @@ fdtdone:
.exit
.procend
-ENDPROC(flush_tlb_all_local)
+ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data
-ENTRY(flush_instruction_cache_local)
+ENTRY_CFI(flush_instruction_cache_local)
.proc
.callinfo NO_CALLS
.entry
@@ -257,11 +257,11 @@ fisync:
.exit
.procend
-ENDPROC(flush_instruction_cache_local)
+ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data
-ENTRY(flush_data_cache_local)
+ENTRY_CFI(flush_data_cache_local)
.proc
.callinfo NO_CALLS
.entry
@@ -325,7 +325,7 @@ fdsync:
.exit
.procend
-ENDPROC(flush_data_cache_local)
+ENDPROC_CFI(flush_data_cache_local)
.align 16
@@ -356,7 +356,7 @@ ENDPROC(flush_data_cache_local)
/* Clear page using kernel mapping. */
-ENTRY(clear_page_asm)
+ENTRY_CFI(clear_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -422,11 +422,11 @@ ENTRY(clear_page_asm)
.exit
.procend
-ENDPROC(clear_page_asm)
+ENDPROC_CFI(clear_page_asm)
/* Copy page using kernel mapping. */
-ENTRY(copy_page_asm)
+ENTRY_CFI(copy_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -540,7 +540,7 @@ ENTRY(copy_page_asm)
.exit
.procend
-ENDPROC(copy_page_asm)
+ENDPROC_CFI(copy_page_asm)
/*
* NOTE: Code in clear_user_page has a hard coded dependency on the
@@ -573,11 +573,17 @@ ENDPROC(copy_page_asm)
.endm
/*
- * We can't do this since copy_user_page is used to bring in
- * file data that might have instructions. Since the data would
- * then need to be flushed out so the i-fetch can see it, it
- * makes more sense to just copy through the kernel translation
- * and flush it.
+ * copy_user_page_asm() performs a page copy using mappings
+ * equivalent to the user page mappings. It can be used to
+ * implement copy_user_page() but unfortunately both the `from'
+ * and `to' pages need to be flushed through mappings equivalent
+ * to the user mappings after the copy because the kernel accesses
+ * the `from' page through the kmap kernel mapping and the `to'
+ * page needs to be flushed since code can be copied. As a
+ * result, this implementation is less efficient than the simpler
+ * copy using the kernel mapping. It only needs the `from' page
+ * to flushed via the user mapping. The kunmap routines handle
+ * the flushes needed for the kernel mapping.
*
* I'm still keeping this around because it may be possible to
* use it if more information is passed into copy_user_page().
@@ -586,7 +592,7 @@ ENDPROC(copy_page_asm)
*
*/
-ENTRY(copy_user_page_asm)
+ENTRY_CFI(copy_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -742,9 +748,9 @@ ENTRY(copy_user_page_asm)
.exit
.procend
-ENDPROC(copy_user_page_asm)
+ENDPROC_CFI(copy_user_page_asm)
-ENTRY(clear_user_page_asm)
+ENTRY_CFI(clear_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -828,9 +834,9 @@ ENTRY(clear_user_page_asm)
.exit
.procend
-ENDPROC(clear_user_page_asm)
+ENDPROC_CFI(clear_user_page_asm)
-ENTRY(flush_dcache_page_asm)
+ENTRY_CFI(flush_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -904,9 +910,9 @@ ENTRY(flush_dcache_page_asm)
.exit
.procend
-ENDPROC(flush_dcache_page_asm)
+ENDPROC_CFI(flush_dcache_page_asm)
-ENTRY(flush_icache_page_asm)
+ENTRY_CFI(flush_icache_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -982,9 +988,9 @@ ENTRY(flush_icache_page_asm)
.exit
.procend
-ENDPROC(flush_icache_page_asm)
+ENDPROC_CFI(flush_icache_page_asm)
-ENTRY(flush_kernel_dcache_page_asm)
+ENTRY_CFI(flush_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -1025,9 +1031,9 @@ ENTRY(flush_kernel_dcache_page_asm)
.exit
.procend
-ENDPROC(flush_kernel_dcache_page_asm)
+ENDPROC_CFI(flush_kernel_dcache_page_asm)
-ENTRY(purge_kernel_dcache_page_asm)
+ENTRY_CFI(purge_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -1067,9 +1073,9 @@ ENTRY(purge_kernel_dcache_page_asm)
.exit
.procend
-ENDPROC(purge_kernel_dcache_page_asm)
+ENDPROC_CFI(purge_kernel_dcache_page_asm)
-ENTRY(flush_user_dcache_range_asm)
+ENTRY_CFI(flush_user_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -1088,9 +1094,9 @@ ENTRY(flush_user_dcache_range_asm)
.exit
.procend
-ENDPROC(flush_user_dcache_range_asm)
+ENDPROC_CFI(flush_user_dcache_range_asm)
-ENTRY(flush_kernel_dcache_range_asm)
+ENTRY_CFI(flush_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -1110,9 +1116,9 @@ ENTRY(flush_kernel_dcache_range_asm)
.exit
.procend
-ENDPROC(flush_kernel_dcache_range_asm)
+ENDPROC_CFI(flush_kernel_dcache_range_asm)
-ENTRY(flush_user_icache_range_asm)
+ENTRY_CFI(flush_user_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -1131,9 +1137,9 @@ ENTRY(flush_user_icache_range_asm)
.exit
.procend
-ENDPROC(flush_user_icache_range_asm)
+ENDPROC_CFI(flush_user_icache_range_asm)
-ENTRY(flush_kernel_icache_page)
+ENTRY_CFI(flush_kernel_icache_page)
.proc
.callinfo NO_CALLS
.entry
@@ -1174,9 +1180,9 @@ ENTRY(flush_kernel_icache_page)
.exit
.procend
-ENDPROC(flush_kernel_icache_page)
+ENDPROC_CFI(flush_kernel_icache_page)
-ENTRY(flush_kernel_icache_range_asm)
+ENTRY_CFI(flush_kernel_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -1194,13 +1200,13 @@ ENTRY(flush_kernel_icache_range_asm)
nop
.exit
.procend
-ENDPROC(flush_kernel_icache_range_asm)
+ENDPROC_CFI(flush_kernel_icache_range_asm)
/* align should cover use of rfi in disable_sr_hashing_asm and
* srdis_done.
*/
.align 256
-ENTRY(disable_sr_hashing_asm)
+ENTRY_CFI(disable_sr_hashing_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -1289,6 +1295,6 @@ srdis_done:
.exit
.procend
-ENDPROC(disable_sr_hashing_asm)
+ENDPROC_CFI(disable_sr_hashing_asm)
.end
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index 5f3d3a1f9037..1db58e546230 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -61,7 +61,7 @@ save_cr_end:
* iodc_fn is the IODC function to call
*/
-ENTRY(real32_call_asm)
+ENTRY_CFI(real32_call_asm)
STREG %rp, -RP_OFFSET(%sp) /* save RP */
#ifdef CONFIG_64BIT
callee_save
@@ -119,14 +119,14 @@ ric_ret:
LDREG -RP_OFFSET(%sp), %rp /* restore RP */
bv 0(%rp)
nop
-ENDPROC(real32_call_asm)
+ENDPROC_CFI(real32_call_asm)
# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
# define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
.text
-save_control_regs:
+ENTRY_CFI(save_control_regs)
load32 PA(save_cr_space), %r28
PUSH_CR(%cr24, %r28)
PUSH_CR(%cr25, %r28)
@@ -139,8 +139,9 @@ save_control_regs:
PUSH_CR(%cr15, %r28)
bv 0(%r2)
nop
+ENDPROC_CFI(save_control_regs)
-restore_control_regs:
+ENTRY_CFI(restore_control_regs)
load32 PA(save_cr_end), %r26
POP_CR(%cr15, %r26)
POP_CR(%cr31, %r26)
@@ -153,13 +154,14 @@ restore_control_regs:
POP_CR(%cr24, %r26)
bv 0(%r2)
nop
+ENDPROC_CFI(restore_control_regs)
/* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
* more general-purpose use by the several places which need RFIs
*/
.text
.align 128
-rfi_virt2real:
+ENTRY_CFI(rfi_virt2real)
/* switch to real mode... */
rsm PSW_SM_I,%r0
load32 PA(rfi_v2r_1), %r1
@@ -191,10 +193,11 @@ rfi_v2r_1:
tophys_r1 %r2
bv 0(%r2)
nop
+ENDPROC_CFI(rfi_virt2real)
.text
.align 128
-rfi_real2virt:
+ENTRY_CFI(rfi_real2virt)
rsm PSW_SM_I,%r0
load32 (rfi_r2v_1), %r1
nop
@@ -225,6 +228,7 @@ rfi_r2v_1:
tovirt_r1 %r2
bv 0(%r2)
nop
+ENDPROC_CFI(rfi_real2virt)
#ifdef CONFIG_64BIT
@@ -238,7 +242,7 @@ rfi_r2v_1:
* arg0p points to where saved arg values may be found
* iodc_fn is the IODC function to call
*/
-ENTRY(real64_call_asm)
+ENTRY_CFI(real64_call_asm)
std %rp, -0x10(%sp) /* save RP */
std %sp, -8(%arg0) /* save SP on real-mode stack */
copy %arg0, %sp /* adopt the real-mode SP */
@@ -284,7 +288,7 @@ r64_ret:
ldd -0x10(%sp), %rp /* restore RP */
bv 0(%rp)
nop
-ENDPROC(real64_call_asm)
+ENDPROC_CFI(real64_call_asm)
#endif
@@ -293,12 +297,12 @@ ENDPROC(real64_call_asm)
** GCC 3.3 and later has a new function in libgcc.a for
** comparing function pointers.
*/
-ENTRY(__canonicalize_funcptr_for_compare)
+ENTRY_CFI(__canonicalize_funcptr_for_compare)
#ifdef CONFIG_64BIT
bve (%r2)
#else
bv %r0(%r2)
#endif
copy %r26,%r28
-ENDPROC(__canonicalize_funcptr_for_compare)
+ENDPROC_CFI(__canonicalize_funcptr_for_compare)
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index f7ea626e29c9..81d6f6391944 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -38,6 +38,7 @@
#include <linux/export.h>
#include <asm/processor.h>
+#include <asm/sections.h>
#include <asm/pdc.h>
#include <asm/led.h>
#include <asm/machdep.h> /* for pa7300lc_init() proto */
@@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p)
#endif
printk(KERN_CONT ".\n");
+ /*
+ * Check if initial kernel page mappings are sufficient.
+ * panic early if not, else we may access kernel functions
+ * and variables which can't be reached.
+ */
+ if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
+ panic("KERNEL_INITIAL_ORDER too small!");
pdc_console_init();
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index c2a9cc55a62f..75dab2871346 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -412,8 +412,8 @@ void smp_cpus_done(unsigned int cpu_max)
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- if (cpu != 0 && cpu < parisc_max_cpus)
- smp_boot_one_cpu(cpu, tidle);
+ if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle))
+ return -ENOSYS;
return cpu_online(cpu) ? 0 : -ENOSYS;
}
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 4b0b963d52a7..9b63b876a13a 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -226,12 +226,6 @@ void __init start_cpu_itimer(void)
unsigned int cpu = smp_processor_id();
unsigned long next_tick = mfctl(16) + clocktick;
-#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
- /* With multiple 64bit CPUs online, the cr16's are not syncronized. */
- if (cpu != 0)
- clear_sched_clock_stable();
-#endif
-
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
per_cpu(cpu_data, cpu).it_value = next_tick;
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index f3ead0b6ce46..b37787dbe775 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -69,6 +69,7 @@ SECTIONS
.text ALIGN(PAGE_SIZE) : {
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
@@ -138,8 +139,6 @@ SECTIONS
/* BSS */
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
- /* bootmap is allocated in setup_bootmem() directly behind bss. */
-
. = ALIGN(HUGEPAGE_SIZE);
_end = . ;
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index 1052b747e011..a5b72f22c7a6 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -65,34 +65,34 @@
.section .fixup, "ax"
/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
-ENTRY(fixup_get_user_skip_1)
+ENTRY_CFI(fixup_get_user_skip_1)
get_fault_ip %r1,%r8
ldo 4(%r1), %r1
ldi -EFAULT, %r8
bv %r0(%r1)
copy %r0, %r9
-ENDPROC(fixup_get_user_skip_1)
+ENDPROC_CFI(fixup_get_user_skip_1)
-ENTRY(fixup_get_user_skip_2)
+ENTRY_CFI(fixup_get_user_skip_2)
get_fault_ip %r1,%r8
ldo 8(%r1), %r1
ldi -EFAULT, %r8
bv %r0(%r1)
copy %r0, %r9
-ENDPROC(fixup_get_user_skip_2)
+ENDPROC_CFI(fixup_get_user_skip_2)
/* put_user() fixups, store -EFAULT in r8 */
-ENTRY(fixup_put_user_skip_1)
+ENTRY_CFI(fixup_put_user_skip_1)
get_fault_ip %r1,%r8
ldo 4(%r1), %r1
bv %r0(%r1)
ldi -EFAULT, %r8
-ENDPROC(fixup_put_user_skip_1)
+ENDPROC_CFI(fixup_put_user_skip_1)
-ENTRY(fixup_put_user_skip_2)
+ENTRY_CFI(fixup_put_user_skip_2)
get_fault_ip %r1,%r8
ldo 8(%r1), %r1
bv %r0(%r1)
ldi -EFAULT, %r8
-ENDPROC(fixup_put_user_skip_2)
+ENDPROC_CFI(fixup_put_user_skip_2)
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index a512f07d4feb..56845de6b5df 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -67,7 +67,7 @@
* otherwise, returns number of bytes not transferred.
*/
-ENTRY(lclear_user)
+ENTRY_CFI(lclear_user)
.proc
.callinfo NO_CALLS
.entry
@@ -81,7 +81,7 @@ $lclu_done:
bv %r0(%r2)
copy %r25,%r28
.exit
-ENDPROC(lclear_user)
+ENDPROC_CFI(lclear_user)
.section .fixup,"ax"
2: fixup_branch $lclu_done
@@ -100,7 +100,7 @@ ENDPROC(lclear_user)
* else strlen + 1 (i.e. includes zero byte).
*/
-ENTRY(lstrnlen_user)
+ENTRY_CFI(lstrnlen_user)
.proc
.callinfo NO_CALLS
.entry
@@ -120,7 +120,7 @@ $lslen_done:
$lslen_nzero:
b $lslen_done
ldo 1(%r26),%r26 /* special case for N == 0 */
-ENDPROC(lstrnlen_user)
+ENDPROC_CFI(lstrnlen_user)
.section .fixup,"ax"
3: fixup_branch $lslen_done
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index b2b441b32341..f82ff10ed974 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -489,20 +489,23 @@ static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
}
#ifdef __KERNEL__
-unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len)
+unsigned long __copy_to_user(void __user *dst, const void *src,
+ unsigned long len)
{
mtsp(get_kernel_space(), 1);
mtsp(get_user_space(), 2);
return pa_memcpy((void __force *)dst, src, len);
}
+EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__copy_from_user);
-unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len)
+unsigned long __copy_from_user(void *dst, const void __user *src,
+ unsigned long len)
{
mtsp(get_user_space(), 1);
mtsp(get_kernel_space(), 2);
return pa_memcpy(dst, (void __force *)src, len);
}
+EXPORT_SYMBOL(__copy_from_user);
unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len)
{
@@ -520,8 +523,6 @@ void * memcpy(void * dst,const void *src, size_t count)
return dst;
}
-EXPORT_SYMBOL(copy_to_user);
-EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_in_user);
EXPORT_SYMBOL(memcpy);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 163af2c31d76..47a6ca4c9e40 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -168,6 +168,43 @@ int fixup_exception(struct pt_regs *regs)
}
/*
+ * parisc hardware trap list
+ *
+ * Documented in section 3 "Addressing and Access Control" of the
+ * "PA-RISC 1.1 Architecture and Instruction Set Reference Manual"
+ * https://parisc.wiki.kernel.org/index.php/File:Pa11_acd.pdf
+ *
+ * For implementation see handle_interruption() in traps.c
+ */
+static const char * const trap_description[] = {
+ [1] "High-priority machine check (HPMC)",
+ [2] "Power failure interrupt",
+ [3] "Recovery counter trap",
+ [5] "Low-priority machine check",
+ [6] "Instruction TLB miss fault",
+ [7] "Instruction access rights / protection trap",
+ [8] "Illegal instruction trap",
+ [9] "Break instruction trap",
+ [10] "Privileged operation trap",
+ [11] "Privileged register trap",
+ [12] "Overflow trap",
+ [13] "Conditional trap",
+ [14] "FP Assist Exception trap",
+ [15] "Data TLB miss fault",
+ [16] "Non-access ITLB miss fault",
+ [17] "Non-access DTLB miss fault",
+ [18] "Data memory protection/unaligned access trap",
+ [19] "Data memory break trap",
+ [20] "TLB dirty bit trap",
+ [21] "Page reference trap",
+ [22] "Assist emulation trap",
+ [25] "Taken branch trap",
+ [26] "Data memory access rights trap",
+ [27] "Data memory protection ID trap",
+ [28] "Unaligned data reference trap",
+};
+
+/*
* Print out info about fatal segfaults, if the show_unhandled_signals
* sysctl is set:
*/
@@ -176,6 +213,8 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
unsigned long address, struct task_struct *tsk,
struct vm_area_struct *vma)
{
+ const char *trap_name = NULL;
+
if (!unhandled_signal(tsk, SIGSEGV))
return;
@@ -186,8 +225,15 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
tsk->comm, code, address);
print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
+
+ if (code < ARRAY_SIZE(trap_description))
+ trap_name = trap_description[code];
+ pr_warn(KERN_CONT " trap #%lu: %s%c", code,
+ trap_name ? trap_name : "unknown",
+ vma ? ',':'\n');
+
if (vma)
- pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n",
+ pr_warn(KERN_CONT " vm_start = 0x%08lx, vm_end = 0x%08lx\n",
vma->vm_start, vma->vm_end);
show_regs(regs);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 6b3e7c6ee096..356f38473b5d 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -79,6 +80,34 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
int npmem_ranges __read_mostly;
+/*
+ * get_memblock() allocates pages via memblock.
+ * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it
+ * doesn't allocate from bottom to top which is needed because we only created
+ * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code.
+ */
+static void * __init get_memblock(unsigned long size)
+{
+ static phys_addr_t search_addr __initdata;
+ phys_addr_t phys;
+
+ if (!search_addr)
+ search_addr = PAGE_ALIGN(__pa((unsigned long) &_end));
+ search_addr = ALIGN(search_addr, size);
+ while (!memblock_is_region_memory(search_addr, size) ||
+ memblock_is_region_reserved(search_addr, size)) {
+ search_addr += size;
+ }
+ phys = search_addr;
+
+ if (phys)
+ memblock_reserve(phys, size);
+ else
+ panic("get_memblock() failed.\n");
+
+ return __va(phys);
+}
+
#ifdef CONFIG_64BIT
#define MAX_MEM (~0UL)
#else /* !CONFIG_64BIT */
@@ -118,11 +147,7 @@ static void __init mem_limit_func(void)
static void __init setup_bootmem(void)
{
- unsigned long bootmap_size;
unsigned long mem_max;
- unsigned long bootmap_pages;
- unsigned long bootmap_start_pfn;
- unsigned long bootmap_pfn;
#ifndef CONFIG_DISCONTIGMEM
physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
int npmem_holes;
@@ -178,33 +203,29 @@ static void __init setup_bootmem(void)
}
#endif
- if (npmem_ranges > 1) {
-
- /* Print the memory ranges */
+ /* Print the memory ranges */
+ pr_info("Memory Ranges:\n");
- printk(KERN_INFO "Memory Ranges:\n");
+ for (i = 0; i < npmem_ranges; i++) {
+ struct resource *res = &sysram_resources[i];
+ unsigned long start;
+ unsigned long size;
- for (i = 0; i < npmem_ranges; i++) {
- unsigned long start;
- unsigned long size;
+ size = (pmem_ranges[i].pages << PAGE_SHIFT);
+ start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
+ pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
+ i, start, start + (size - 1), size >> 20);
- size = (pmem_ranges[i].pages << PAGE_SHIFT);
- start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
- printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
- i,start, start + (size - 1), size >> 20);
- }
- }
-
- sysram_resource_count = npmem_ranges;
- for (i = 0; i < sysram_resource_count; i++) {
- struct resource *res = &sysram_resources[i];
+ /* request memory resource */
res->name = "System RAM";
- res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
- res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
+ res->start = start;
+ res->end = start + size - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
}
+ sysram_resource_count = npmem_ranges;
+
/*
* For 32 bit kernels we limit the amount of memory we can
* support, in order to preserve enough kernel address space
@@ -263,16 +284,9 @@ static void __init setup_bootmem(void)
}
#endif
- bootmap_pages = 0;
- for (i = 0; i < npmem_ranges; i++)
- bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
-
- bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
-
#ifdef CONFIG_DISCONTIGMEM
for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
memset(NODE_DATA(i), 0, sizeof(pg_data_t));
- NODE_DATA(i)->bdata = &bootmem_node_data[i];
}
memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
@@ -284,28 +298,24 @@ static void __init setup_bootmem(void)
/*
* Initialize and free the full range of memory in each range.
- * Note that the only writing these routines do are to the bootmap,
- * and we've made sure to locate the bootmap properly so that they
- * won't be writing over anything important.
*/
- bootmap_pfn = bootmap_start_pfn;
max_pfn = 0;
for (i = 0; i < npmem_ranges; i++) {
unsigned long start_pfn;
unsigned long npages;
+ unsigned long start;
+ unsigned long size;
start_pfn = pmem_ranges[i].start_pfn;
npages = pmem_ranges[i].pages;
- bootmap_size = init_bootmem_node(NODE_DATA(i),
- bootmap_pfn,
- start_pfn,
- (start_pfn + npages) );
- free_bootmem_node(NODE_DATA(i),
- (start_pfn << PAGE_SHIFT),
- (npages << PAGE_SHIFT) );
- bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start = start_pfn << PAGE_SHIFT;
+ size = npages << PAGE_SHIFT;
+
+ /* add system RAM memblock */
+ memblock_add(start, size);
+
if ((start_pfn + npages) > max_pfn)
max_pfn = start_pfn + npages;
}
@@ -317,32 +327,22 @@ static void __init setup_bootmem(void)
*/
max_low_pfn = max_pfn;
- /* bootmap sizing messed up? */
- BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
-
/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
#define PDC_CONSOLE_IO_IODC_SIZE 32768
- reserve_bootmem_node(NODE_DATA(0), 0UL,
- (unsigned long)(PAGE0->mem_free +
- PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
- reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START),
- (unsigned long)(_end - KERNEL_BINARY_TEXT_START),
- BOOTMEM_DEFAULT);
- reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
- ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
- BOOTMEM_DEFAULT);
+ memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
+ PDC_CONSOLE_IO_IODC_SIZE));
+ memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
+ (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
#ifndef CONFIG_DISCONTIGMEM
/* reserve the holes */
for (i = 0; i < npmem_holes; i++) {
- reserve_bootmem_node(NODE_DATA(0),
- (pmem_holes[i].start_pfn << PAGE_SHIFT),
- (pmem_holes[i].pages << PAGE_SHIFT),
- BOOTMEM_DEFAULT);
+ memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
+ (pmem_holes[i].pages << PAGE_SHIFT));
}
#endif
@@ -360,8 +360,7 @@ static void __init setup_bootmem(void)
initrd_below_start_ok = 1;
printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
- reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
- initrd_reserve, BOOTMEM_DEFAULT);
+ memblock_reserve(__pa(initrd_start), initrd_reserve);
}
}
#endif
@@ -439,7 +438,7 @@ static void __init map_pages(unsigned long start_vaddr,
*/
if (!pmd) {
- pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER);
+ pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER);
pmd = (pmd_t *) __pa(pmd);
}
@@ -458,8 +457,7 @@ static void __init map_pages(unsigned long start_vaddr,
pg_table = (pte_t *)pmd_address(*pmd);
if (!pg_table) {
- pg_table = (pte_t *)
- alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
+ pg_table = (pte_t *) get_memblock(PAGE_SIZE);
pg_table = (pte_t *) __pa(pg_table);
}
@@ -737,7 +735,7 @@ static void __init pagetable_init(void)
}
#endif
- empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = get_memblock(PAGE_SIZE);
}
static void __init gateway_init(void)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 927d2ab2ce08..65fba4c34cd7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -12,11 +12,6 @@ config 64BIT
bool
default y if PPC64
-config WORD_SIZE
- int
- default 64 if PPC64
- default 32 if !PPC64
-
config ARCH_PHYS_ADDR_T_64BIT
def_bool PPC64 || PHYS_64BIT
@@ -101,7 +96,7 @@ config PPC
select VIRT_TO_BUS if !PPC64
select HAVE_IDE
select HAVE_IOREMAP_PROT
- select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_LITTLE_ENDIAN
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
select HAVE_KPROBES
select HAVE_ARCH_KGDB
select HAVE_KRETPROBES
@@ -113,7 +108,6 @@ config PPC
select HAVE_DEBUG_KMEMLEAK
select ARCH_HAS_SG_CHAIN
select GENERIC_ATOMIC64 if PPC32
- select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -167,6 +161,7 @@ config PPC
select GENERIC_CPU_AUTOPROBE
select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_ARCH_HARDENED_USERCOPY
+ select HAVE_KERNEL_GZIP
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -637,7 +632,7 @@ config FORCE_MAX_ZONEORDER
int "Maximum zone order"
range 8 9 if PPC64 && PPC_64K_PAGES
default "9" if PPC64 && PPC_64K_PAGES
- range 9 13 if PPC64 && !PPC_64K_PAGES
+ range 13 13 if PPC64 && !PPC_64K_PAGES
default "13" if PPC64 && !PPC_64K_PAGES
range 9 64 if PPC32 && PPC_16K_PAGES
default "9" if PPC32 && PPC_16K_PAGES
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 1934707bf321..50d020ac0f48 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -43,31 +43,24 @@ NM := $(NM) --synthetic
endif
endif
-ifeq ($(CONFIG_PPC64),y)
-ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-OLDARCH := ppc64le
-else
-OLDARCH := ppc64
-endif
-else
-ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-OLDARCH := ppcle
-else
-OLDARCH := ppc
-endif
-endif
+# BITS is used as extension for files which are available in a 32 bit
+# and a 64 bit version to simplify shared Makefiles.
+# e.g.: obj-y += foo_$(BITS).o
+export BITS
-# It seems there are times we use this Makefile without
-# including the config file, but this replicates the old behaviour
-ifeq ($(CONFIG_WORD_SIZE),)
-CONFIG_WORD_SIZE := 32
+ifdef CONFIG_PPC64
+ BITS := 64
+else
+ BITS := 32
endif
-UTS_MACHINE := $(OLDARCH)
+machine-y = ppc
+machine-$(CONFIG_PPC64) += 64
+machine-$(CONFIG_CPU_LITTLE_ENDIAN) += le
+UTS_MACHINE := $(subst $(space),,$(machine-y))
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
override LD += -EL
-override CROSS32AS += -mlittle-endian
LDEMULATION := lppc
GNUTARGET := powerpcle
MULTIPLEWORD := -mno-multiple
@@ -89,10 +82,10 @@ aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
ifeq ($(HAS_BIARCH),y)
-override AS += -a$(CONFIG_WORD_SIZE)
-override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION)
-override CC += -m$(CONFIG_WORD_SIZE)
-override AR := GNUTARGET=elf$(CONFIG_WORD_SIZE)-$(GNUTARGET) $(AR)
+override AS += -a$(BITS)
+override LD += -m elf$(BITS)$(LDEMULATION)
+override CC += -m$(BITS)
+override AR := GNUTARGET=elf$(BITS)-$(GNUTARGET) $(AR)
endif
LDFLAGS_vmlinux-y := -Bstatic
@@ -179,7 +172,7 @@ KBUILD_CFLAGS += $(call cc-option,-msoft-float)
KBUILD_CFLAGS += -pipe -Iarch/$(ARCH) $(CFLAGS-y)
CPP = $(CC) -E $(KBUILD_CFLAGS)
-CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
+CHECKFLAGS += -m$(BITS) -D__powerpc__ -D__powerpc$(BITS)__
ifdef CONFIG_CPU_BIG_ENDIAN
CHECKFLAGS += -D__BIG_ENDIAN__
else
@@ -234,7 +227,7 @@ KBUILD_CFLAGS += $(cpu-as-y)
KBUILD_AFLAGS += $(aflags-y)
KBUILD_CFLAGS += $(cflags-y)
-head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o
+head-y := arch/powerpc/kernel/head_$(BITS).o
head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o
head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 1a2a6e8dc40d..eae2dc8bc218 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -19,10 +19,15 @@
all: $(obj)/zImage
+compress-$(CONFIG_KERNEL_GZIP) := CONFIG_KERNEL_GZIP
+compress-$(CONFIG_KERNEL_XZ) := CONFIG_KERNEL_XZ
+
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -Os -msoft-float -pipe \
-fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
- -isystem $(shell $(CROSS32CC) -print-file-name=include)
+ -isystem $(shell $(CROSS32CC) -print-file-name=include) \
+ -D$(compress-y)
+
ifdef CONFIG_PPC64_BOOT_WRAPPER
BOOTCFLAGS += -m64
endif
@@ -59,13 +64,30 @@ $(obj)/treeboot-currituck.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-akebono.o: BOOTCFLAGS += -mcpu=405
$(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
+# The pre-boot decompressors pull in a lot of kernel headers and other source
+# files. This creates a bit of a dependency headache since we need to copy
+# these files into the build dir, fix up any includes and ensure that dependent
+# files are copied in the right order.
+
+# these need to be seperate variables because they are copied out of different
+# directories in the kernel tree. Sure you COULd merge them, but it's a
+# cure-is-worse-than-disease situation.
+zlib-decomp-$(CONFIG_KERNEL_GZIP) := decompress_inflate.c
+zlib-$(CONFIG_KERNEL_GZIP) := inffast.c inflate.c inftrees.c
+zlibheader-$(CONFIG_KERNEL_GZIP) := inffast.h inffixed.h inflate.h inftrees.h infutil.h
+zliblinuxheader-$(CONFIG_KERNEL_GZIP) := zlib.h zconf.h zutil.h
-zlib := inffast.c inflate.c inftrees.c
-zlibheader := inffast.h inffixed.h inflate.h inftrees.h infutil.h
-zliblinuxheader := zlib.h zconf.h zutil.h
+$(addprefix $(obj)/, decompress.o): \
+ $(addprefix $(obj)/,$(zlib-decomp-y))
-$(addprefix $(obj)/,$(zlib) cuboot-c2k.o gunzip_util.o main.o): \
- $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader))
+$(addprefix $(obj)/, $(zlib-decomp-y)): \
+ $(addprefix $(obj)/,$(zliblinuxheader-y)) \
+ $(addprefix $(obj)/,$(zlibheader-y)) \
+ $(addprefix $(obj)/,$(zlib-y))
+
+$(addprefix $(obj)/,$(zlib-y)): \
+ $(addprefix $(obj)/,$(zliblinuxheader-y)) \
+ $(addprefix $(obj)/,$(zlibheader-y))
libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
libfdtheader := fdt.h libfdt.h libfdt_internal.h
@@ -73,10 +95,10 @@ libfdtheader := fdt.h libfdt.h libfdt_internal.h
$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
$(addprefix $(obj)/,$(libfdtheader))
-src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
+src-wlib-y := string.S crt0.S crtsavres.S stdio.c decompress.c main.c \
$(libfdt) libfdt-wrapper.c \
ns16550.c serial.c simple_alloc.c div64.S util.S \
- gunzip_util.c elf_util.c $(zlib) devtree.c stdlib.c \
+ elf_util.c $(zlib-y) devtree.c stdlib.c \
oflib.c ofconsole.c cuboot.c mpsc.c cpm-serial.c \
uartlite.c mpc52xx-psc.c opal.c opal-calls.S
src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
@@ -125,23 +147,20 @@ obj-wlib := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-wlib))))
obj-plat := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-plat))))
obj-plat: $(libfdt)
-quiet_cmd_copy_zlib = COPY $@
- cmd_copy_zlib = sed "s@__used@@;s@<linux/\([^>]*\).*@\"\1\"@" $< > $@
+quiet_cmd_copy_kern_src = COPY $@
+ cmd_copy_kern_src = sed -f $(srctree)/arch/powerpc/boot/fixup-headers.sed $< > $@
-quiet_cmd_copy_zlibheader = COPY $@
- cmd_copy_zlibheader = sed "s@<linux/\([^>]*\).*@\"\1\"@" $< > $@
-# stddef.h for NULL
-quiet_cmd_copy_zliblinuxheader = COPY $@
- cmd_copy_zliblinuxheader = sed "s@<linux/string.h>@\"string.h\"@;s@<linux/kernel.h>@<stddef.h>@;s@<linux/\([^>]*\).*@\"\1\"@" $< > $@
+$(addprefix $(obj)/,$(zlib-y)): $(obj)/%: $(srctree)/lib/zlib_inflate/%
+ $(call cmd,copy_kern_src)
-$(addprefix $(obj)/,$(zlib)): $(obj)/%: $(srctree)/lib/zlib_inflate/%
- $(call cmd,copy_zlib)
+$(addprefix $(obj)/,$(zlibheader-y)): $(obj)/%: $(srctree)/lib/zlib_inflate/%
+ $(call cmd,copy_kern_src)
-$(addprefix $(obj)/,$(zlibheader)): $(obj)/%: $(srctree)/lib/zlib_inflate/%
- $(call cmd,copy_zlibheader)
+$(addprefix $(obj)/,$(zliblinuxheader-y)): $(obj)/%: $(srctree)/include/linux/%
+ $(call cmd,copy_kern_src)
-$(addprefix $(obj)/,$(zliblinuxheader)): $(obj)/%: $(srctree)/include/linux/%
- $(call cmd,copy_zliblinuxheader)
+$(addprefix $(obj)/,$(zlib-decomp-y)): $(obj)/%: $(srctree)/lib/%
+ $(call cmd,copy_kern_src)
quiet_cmd_copy_libfdt = COPY $@
cmd_copy_libfdt = cp $< $@
@@ -150,17 +169,17 @@ $(addprefix $(obj)/,$(libfdt) $(libfdtheader)): $(obj)/%: $(srctree)/scripts/dtc
$(call cmd,copy_libfdt)
$(obj)/empty.c:
- @touch $@
+ $(Q)touch $@
$(obj)/zImage.lds: $(obj)/%: $(srctree)/$(src)/%.S
$(CROSS32CC) $(cpp_flags) -E -Wp,-MD,$(depfile) -P -Upowerpc \
-D__ASSEMBLY__ -DLINKER_SCRIPT -o $@ $<
$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S
- @cp $< $@
+ $(Q)cp $< $@
-clean-files := $(zlib) $(zlibheader) $(zliblinuxheader) \
- $(libfdt) $(libfdtheader) \
+clean-files := $(zlib-) $(zlibheader-) $(zliblinuxheader-) \
+ $(zlib-decomp-) $(libfdt) $(libfdtheader) \
empty.c zImage.coff.lds zImage.ps3.lds zImage.lds
quiet_cmd_bootcc = BOOTCC $@
@@ -207,10 +226,14 @@ CROSSWRAP := -C "$(CROSS_COMPILE)"
endif
endif
+compressor-$(CONFIG_KERNEL_GZIP) := gz
+compressor-$(CONFIG_KERNEL_XZ) := xz
+
# args (to if_changed): 1 = (this rule), 2 = platform, 3 = dts 4=dtb 5=initrd
quiet_cmd_wrap = WRAP $@
- cmd_wrap =$(CONFIG_SHELL) $(wrapper) -c -o $@ -p $2 $(CROSSWRAP) \
- $(if $3, -s $3)$(if $4, -d $4)$(if $5, -i $5) vmlinux
+ cmd_wrap =$(CONFIG_SHELL) $(wrapper) -Z $(compressor-y) -c -o $@ -p $2 \
+ $(CROSSWRAP) $(if $3, -s $3)$(if $4, -d $4)$(if $5, -i $5) \
+ vmlinux
image-$(CONFIG_PPC_PSERIES) += zImage.pseries
image-$(CONFIG_PPC_POWERNV) += zImage.pseries
@@ -391,9 +414,9 @@ image-y := vmlinux.strip
endif
$(obj)/zImage: $(addprefix $(obj)/, $(image-y))
- @rm -f $@; ln $< $@
+ $(Q)rm -f $@; ln $< $@
$(obj)/zImage.initrd: $(addprefix $(obj)/, $(initrd-y))
- @rm -f $@; ln $< $@
+ $(Q)rm -f $@; ln $< $@
# Only install the vmlinux
install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
@@ -410,8 +433,9 @@ clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
zImage.maple simpleImage.* otheros.bld *.dtb
# clean up files cached by wrapper
-clean-kernel := vmlinux.strip vmlinux.bin
-clean-kernel += $(addsuffix .gz,$(clean-kernel))
+clean-kernel-base := vmlinux.strip vmlinux.bin
+clean-kernel := $(addsuffix .gz,$(clean-kernel-base))
+clean-kernel += $(addsuffix .xz,$(clean-kernel-base))
# If not absolute clean-files are relative to $(obj).
clean-files += $(addprefix $(objtree)/, $(clean-kernel))
diff --git a/arch/powerpc/boot/cuboot-c2k.c b/arch/powerpc/boot/cuboot-c2k.c
index e43594950ba3..9309c51f1d65 100644
--- a/arch/powerpc/boot/cuboot-c2k.c
+++ b/arch/powerpc/boot/cuboot-c2k.c
@@ -18,7 +18,6 @@
#include "io.h"
#include "ops.h"
#include "elf.h"
-#include "gunzip_util.h"
#include "mv64x60.h"
#include "cuboot.h"
#include "ppcboot.h"
diff --git a/arch/powerpc/boot/decompress.c b/arch/powerpc/boot/decompress.c
new file mode 100644
index 000000000000..3aff4423ad01
--- /dev/null
+++ b/arch/powerpc/boot/decompress.c
@@ -0,0 +1,148 @@
+/*
+ * Wrapper around the kernel's pre-boot decompression library.
+ *
+ * Copyright (C) IBM Corporation 2016.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "elf.h"
+#include "page.h"
+#include "string.h"
+#include "stdio.h"
+#include "ops.h"
+#include "reg.h"
+#include "types.h"
+
+/*
+ * The decompressor_*.c files play #ifdef games so they can be used in both
+ * pre-boot and regular kernel code. We need these definitions to make the
+ * includes work.
+ */
+
+#define STATIC static
+#define INIT
+#define __always_inline inline
+
+/*
+ * The build process will copy the required zlib source files and headers
+ * out of lib/ and "fix" the includes so they do not pull in other kernel
+ * headers.
+ */
+
+#ifdef CONFIG_KERNEL_GZIP
+# include "decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_XZ
+# include "xz_config.h"
+# include "../../../lib/decompress_unxz.c"
+#endif
+
+/* globals for tracking the state of the decompression */
+static unsigned long decompressed_bytes;
+static unsigned long limit;
+static unsigned long skip;
+static char *output_buffer;
+
+/*
+ * flush() is called by __decompress() when the decompressor's scratch buffer is
+ * full.
+ */
+static long flush(void *v, unsigned long buffer_size)
+{
+ unsigned long end = decompressed_bytes + buffer_size;
+ unsigned long size = buffer_size;
+ unsigned long offset = 0;
+ char *in = v;
+ char *out;
+
+ /*
+ * if we hit our decompression limit, we need to fake an error to abort
+ * the in-progress decompression.
+ */
+ if (decompressed_bytes >= limit)
+ return -1;
+
+ /* skip this entire block */
+ if (end <= skip) {
+ decompressed_bytes += buffer_size;
+ return buffer_size;
+ }
+
+ /* skip some data at the start, but keep the rest of the block */
+ if (decompressed_bytes < skip && end > skip) {
+ offset = skip - decompressed_bytes;
+
+ in += offset;
+ size -= offset;
+ decompressed_bytes += offset;
+ }
+
+ out = &output_buffer[decompressed_bytes - skip];
+ size = min(decompressed_bytes + size, limit) - decompressed_bytes;
+
+ memcpy(out, in, size);
+ decompressed_bytes += size;
+
+ return buffer_size;
+}
+
+static void print_err(char *s)
+{
+ /* suppress the "error" when we terminate the decompressor */
+ if (decompressed_bytes >= limit)
+ return;
+
+ printf("Decompression error: '%s'\n\r", s);
+}
+
+/**
+ * partial_decompress - decompresses part or all of a compressed buffer
+ * @inbuf: input buffer
+ * @input_size: length of the input buffer
+ * @outbuf: input buffer
+ * @output_size: length of the input buffer
+ * @skip number of output bytes to ignore
+ *
+ * This function takes compressed data from inbuf, decompresses and write it to
+ * outbuf. Once output_size bytes are written to the output buffer, or the
+ * stream is exhausted the function will return the number of bytes that were
+ * decompressed. Otherwise it will return whatever error code the decompressor
+ * reported (NB: This is specific to each decompressor type).
+ *
+ * The skip functionality is mainly there so the program and discover
+ * the size of the compressed image so that it can ask firmware (if present)
+ * for an appropriately sized buffer.
+ */
+long partial_decompress(void *inbuf, unsigned long input_size,
+ void *outbuf, unsigned long output_size, unsigned long _skip)
+{
+ int ret;
+
+ /*
+ * The skipped bytes needs to be included in the size of data we want
+ * to decompress.
+ */
+ output_size += _skip;
+
+ decompressed_bytes = 0;
+ output_buffer = outbuf;
+ limit = output_size;
+ skip = _skip;
+
+ ret = __decompress(inbuf, input_size, NULL, flush, outbuf,
+ output_size, NULL, print_err);
+
+ /*
+ * If decompression was aborted due to an actual error rather than
+ * a fake error that we used to abort, then we should report it.
+ */
+ if (decompressed_bytes < limit)
+ return ret;
+
+ return decompressed_bytes - skip;
+}
diff --git a/arch/powerpc/boot/fixup-headers.sed b/arch/powerpc/boot/fixup-headers.sed
new file mode 100644
index 000000000000..96362428eb37
--- /dev/null
+++ b/arch/powerpc/boot/fixup-headers.sed
@@ -0,0 +1,12 @@
+# Copyright 2016 IBM Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+
+s@#include <linux/decompress/mm\.h>@@;
+s@\"zlib_inflate/\([^\"]*\).*@"\1"@;
+s@<linux/kernel.h>@<stddef.h>@;
+
+s@__used@@;
+s@<linux/\([^>]*\).*@"\1"@;
diff --git a/arch/powerpc/boot/gunzip_util.c b/arch/powerpc/boot/gunzip_util.c
deleted file mode 100644
index 9dc52501de83..000000000000
--- a/arch/powerpc/boot/gunzip_util.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright 2007 David Gibson, IBM Corporation.
- * Based on earlier work, Copyright (C) Paul Mackerras 1997.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <stddef.h>
-#include "string.h"
-#include "stdio.h"
-#include "ops.h"
-#include "gunzip_util.h"
-
-#define HEAD_CRC 2
-#define EXTRA_FIELD 4
-#define ORIG_NAME 8
-#define COMMENT 0x10
-#define RESERVED 0xe0
-
-/**
- * gunzip_start - prepare to decompress gzip data
- * @state: decompressor state structure to be initialized
- * @src: buffer containing gzip compressed or uncompressed data
- * @srclen: size in bytes of the buffer at src
- *
- * If the buffer at @src contains a gzip header, this function
- * initializes zlib to decompress the data, storing the decompression
- * state in @state. The other functions in this file can then be used
- * to decompress data from the gzipped stream.
- *
- * If the buffer at @src does not contain a gzip header, it is assumed
- * to contain uncompressed data. The buffer information is recorded
- * in @state and the other functions in this file will simply copy
- * data from the uncompressed data stream at @src.
- *
- * Any errors, such as bad compressed data, cause an error to be
- * printed an the platform's exit() function to be called.
- */
-void gunzip_start(struct gunzip_state *state, void *src, int srclen)
-{
- char *hdr = src;
- int hdrlen = 0;
-
- memset(state, 0, sizeof(*state));
-
- /* Check for gzip magic number */
- if ((hdr[0] == 0x1f) && (hdr[1] == 0x8b)) {
- /* gzip data, initialize zlib parameters */
- int r, flags;
-
- state->s.workspace = state->scratch;
- if (zlib_inflate_workspacesize() > sizeof(state->scratch))
- fatal("insufficient scratch space for gunzip\n\r");
-
- /* skip header */
- hdrlen = 10;
- flags = hdr[3];
- if (hdr[2] != Z_DEFLATED || (flags & RESERVED) != 0)
- fatal("bad gzipped data\n\r");
- if ((flags & EXTRA_FIELD) != 0)
- hdrlen = 12 + hdr[10] + (hdr[11] << 8);
- if ((flags & ORIG_NAME) != 0)
- while (hdr[hdrlen++] != 0)
- ;
- if ((flags & COMMENT) != 0)
- while (hdr[hdrlen++] != 0)
- ;
- if ((flags & HEAD_CRC) != 0)
- hdrlen += 2;
- if (hdrlen >= srclen)
- fatal("gunzip_start: ran out of data in header\n\r");
-
- r = zlib_inflateInit2(&state->s, -MAX_WBITS);
- if (r != Z_OK)
- fatal("inflateInit2 returned %d\n\r", r);
- }
-
- state->s.total_in = hdrlen;
- state->s.next_in = src + hdrlen;
- state->s.avail_in = srclen - hdrlen;
-}
-
-/**
- * gunzip_partial - extract bytes from a gzip data stream
- * @state: gzip state structure previously initialized by gunzip_start()
- * @dst: buffer to store extracted data
- * @dstlen: maximum number of bytes to extract
- *
- * This function extracts at most @dstlen bytes from the data stream
- * previously associated with @state by gunzip_start(), decompressing
- * if necessary. Exactly @dstlen bytes are extracted unless the data
- * stream doesn't contain enough bytes, in which case the entire
- * remainder of the stream is decompressed.
- *
- * Returns the actual number of bytes extracted. If any errors occur,
- * such as a corrupted compressed stream, an error is printed an the
- * platform's exit() function is called.
- */
-int gunzip_partial(struct gunzip_state *state, void *dst, int dstlen)
-{
- int len;
-
- if (state->s.workspace) {
- /* gunzipping */
- int r;
-
- state->s.next_out = dst;
- state->s.avail_out = dstlen;
- r = zlib_inflate(&state->s, Z_FULL_FLUSH);
- if (r != Z_OK && r != Z_STREAM_END)
- fatal("inflate returned %d msg: %s\n\r", r, state->s.msg);
- len = state->s.next_out - (Byte *)dst;
- } else {
- /* uncompressed image */
- len = min(state->s.avail_in, (uLong)dstlen);
- memcpy(dst, state->s.next_in, len);
- state->s.next_in += len;
- state->s.avail_in -= len;
- }
- return len;
-}
-
-/**
- * gunzip_exactly - extract a fixed number of bytes from a gzip data stream
- * @state: gzip state structure previously initialized by gunzip_start()
- * @dst: buffer to store extracted data
- * @dstlen: number of bytes to extract
- *
- * This function extracts exactly @dstlen bytes from the data stream
- * previously associated with @state by gunzip_start(), decompressing
- * if necessary.
- *
- * If there are less @dstlen bytes available in the data stream, or if
- * any other errors occur, such as a corrupted compressed stream, an
- * error is printed an the platform's exit() function is called.
- */
-void gunzip_exactly(struct gunzip_state *state, void *dst, int dstlen)
-{
- int len;
-
- len = gunzip_partial(state, dst, dstlen);
- if (len < dstlen)
- fatal("\n\rgunzip_exactly: ran out of data!"
- " Wanted %d, got %d.\n\r", dstlen, len);
-}
-
-/**
- * gunzip_discard - discard bytes from a gzip data stream
- * @state: gzip state structure previously initialized by gunzip_start()
- * @len: number of bytes to discard
- *
- * This function extracts, then discards exactly @len bytes from the
- * data stream previously associated with @state by gunzip_start().
- * Subsequent gunzip_partial(), gunzip_exactly() or gunzip_finish()
- * calls will extract the data following the discarded bytes in the
- * data stream.
- *
- * If there are less @len bytes available in the data stream, or if
- * any other errors occur, such as a corrupted compressed stream, an
- * error is printed an the platform's exit() function is called.
- */
-void gunzip_discard(struct gunzip_state *state, int len)
-{
- static char discard_buf[128];
-
- while (len > sizeof(discard_buf)) {
- gunzip_exactly(state, discard_buf, sizeof(discard_buf));
- len -= sizeof(discard_buf);
- }
-
- if (len > 0)
- gunzip_exactly(state, discard_buf, len);
-}
-
-/**
- * gunzip_finish - extract all remaining bytes from a gzip data stream
- * @state: gzip state structure previously initialized by gunzip_start()
- * @dst: buffer to store extracted data
- * @dstlen: maximum number of bytes to extract
- *
- * This function extracts all remaining data, or at most @dstlen
- * bytes, from the stream previously associated with @state by
- * gunzip_start(). zlib is then shut down, so it is an error to use
- * any of the functions in this file on @state until it is
- * re-initialized with another call to gunzip_start().
- *
- * If any errors occur, such as a corrupted compressed stream, an
- * error is printed an the platform's exit() function is called.
- */
-int gunzip_finish(struct gunzip_state *state, void *dst, int dstlen)
-{
- int len;
-
- len = gunzip_partial(state, dst, dstlen);
-
- if (state->s.workspace) {
- zlib_inflateEnd(&state->s);
- }
-
- return len;
-}
diff --git a/arch/powerpc/boot/gunzip_util.h b/arch/powerpc/boot/gunzip_util.h
deleted file mode 100644
index b3dfa6e87b3a..000000000000
--- a/arch/powerpc/boot/gunzip_util.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Decompression convenience functions
- *
- * Copyright 2007 David Gibson, IBM Corporation.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#ifndef _PPC_BOOT_GUNZIP_UTIL_H_
-#define _PPC_BOOT_GUNZIP_UTIL_H_
-
-#include "zlib.h"
-
-/*
- * These functions are designed to make life easy for decompressing
- * kernel images, initrd images or any other gzip compressed image,
- * particularly if its useful to decompress part of the image (e.g. to
- * examine headers) before decompressing the remainder.
- *
- * To use:
- * - declare a gunzip_state structure
- * - use gunzip_start() to initialize the state, associating it
- * with a stream of compressed data
- * - use gunzip_partial(), gunzip_exactly() and gunzip_discard()
- * in any combination to extract pieces of data from the stream
- * - Finally use gunzip_finish() to extract the tail of the
- * compressed stream and wind up zlib
- */
-
-/* scratch space for gunzip; 46912 is from zlib_inflate_workspacesize() */
-#define GUNZIP_SCRATCH_SIZE 46912
-
-struct gunzip_state {
- z_stream s;
- char scratch[46912];
-};
-
-void gunzip_start(struct gunzip_state *state, void *src, int srclen);
-int gunzip_partial(struct gunzip_state *state, void *dst, int dstlen);
-void gunzip_exactly(struct gunzip_state *state, void *dst, int len);
-void gunzip_discard(struct gunzip_state *state, int len);
-int gunzip_finish(struct gunzip_state *state, void *dst, int len);
-
-#endif /* _PPC_BOOT_GUNZIP_UTIL_H_ */
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index d80161b633f4..f7a184b6c35b 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -15,11 +15,8 @@
#include "string.h"
#include "stdio.h"
#include "ops.h"
-#include "gunzip_util.h"
#include "reg.h"
-static struct gunzip_state gzstate;
-
struct addr_range {
void *addr;
unsigned long size;
@@ -30,15 +27,14 @@ struct addr_range {
static struct addr_range prep_kernel(void)
{
char elfheader[256];
- void *vmlinuz_addr = _vmlinux_start;
+ unsigned char *vmlinuz_addr = (unsigned char *)_vmlinux_start;
unsigned long vmlinuz_size = _vmlinux_end - _vmlinux_start;
void *addr = 0;
struct elf_info ei;
- int len;
+ long len;
- /* gunzip the ELF header of the kernel */
- gunzip_start(&gzstate, vmlinuz_addr, vmlinuz_size);
- gunzip_exactly(&gzstate, elfheader, sizeof(elfheader));
+ partial_decompress(vmlinuz_addr, vmlinuz_size,
+ elfheader, sizeof(elfheader), 0);
if (!parse_elf64(elfheader, &ei) && !parse_elf32(elfheader, &ei))
fatal("Error: not a valid PPC32 or PPC64 ELF file!\n\r");
@@ -51,7 +47,7 @@ static struct addr_range prep_kernel(void)
* the kernel bss must be claimed (it will be zero'd by the
* kernel itself)
*/
- printf("Allocating 0x%lx bytes for kernel ...\n\r", ei.memsize);
+ printf("Allocating 0x%lx bytes for kernel...\n\r", ei.memsize);
if (platform_ops.vmlinux_alloc) {
addr = platform_ops.vmlinux_alloc(ei.memsize);
@@ -71,16 +67,21 @@ static struct addr_range prep_kernel(void)
"device tree\n\r");
}
- /* Finally, gunzip the kernel */
- printf("gunzipping (0x%p <- 0x%p:0x%p)...", addr,
+ /* Finally, decompress the kernel */
+ printf("Decompressing (0x%p <- 0x%p:0x%p)...\n\r", addr,
vmlinuz_addr, vmlinuz_addr+vmlinuz_size);
- /* discard up to the actual load data */
- gunzip_discard(&gzstate, ei.elfoffset - sizeof(elfheader));
- len = gunzip_finish(&gzstate, addr, ei.loadsize);
+
+ len = partial_decompress(vmlinuz_addr, vmlinuz_size,
+ addr, ei.loadsize, ei.elfoffset);
+
+ if (len < 0)
+ fatal("Decompression failed with error code %ld\n\r", len);
+
if (len != ei.loadsize)
- fatal("ran out of data! only got 0x%x of 0x%lx bytes.\n\r",
- len, ei.loadsize);
- printf("done 0x%x bytes\n\r", len);
+ fatal("Decompression error: got 0x%lx bytes, expected 0x%lx.\n\r",
+ len, ei.loadsize);
+
+ printf("Done! Decompressed 0x%lx bytes\n\r", len);
flush_cache(addr, ei.loadsize);
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index e19b64ef977a..309d1b127e96 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -260,4 +260,7 @@ int __ilog2_u32(u32 n)
return 31 - bit;
}
+long partial_decompress(void *inbuf, unsigned long input_size, void *outbuf,
+ unsigned long output_size, unsigned long skip);
+
#endif /* _PPC_BOOT_OPS_H_ */
diff --git a/arch/powerpc/boot/stdbool.h b/arch/powerpc/boot/stdbool.h
new file mode 100644
index 000000000000..f818efb08891
--- /dev/null
+++ b/arch/powerpc/boot/stdbool.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) IBM Corporation 2016.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file is only necessary because some of the pre-boot decompressors
+ * expect stdbool.h to be available.
+ *
+ */
+
+#include "types.h"
diff --git a/arch/powerpc/boot/stdint.h b/arch/powerpc/boot/stdint.h
new file mode 100644
index 000000000000..c1c853be7490
--- /dev/null
+++ b/arch/powerpc/boot/stdint.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) IBM Corporation 2016.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file is only necessary because some of the pre-boot decompressors
+ * expect stdint.h to be available.
+ */
+
+#include "types.h"
diff --git a/arch/powerpc/boot/types.h b/arch/powerpc/boot/types.h
index 85565a89bcc2..af6b66b842c4 100644
--- a/arch/powerpc/boot/types.h
+++ b/arch/powerpc/boot/types.h
@@ -1,6 +1,8 @@
#ifndef _TYPES_H_
#define _TYPES_H_
+#include <stdbool.h>
+
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
typedef unsigned char u8;
@@ -34,4 +36,16 @@ typedef s64 int64_t;
(void) (&_x == &_y); \
_x > _y ? _x : _y; })
+#define min_t(type, a, b) min(((type) a), ((type) b))
+#define max_t(type, a, b) max(((type) a), ((type) b))
+
+typedef int bool;
+
+#ifndef true
+#define true 1
+#endif
+
+#ifndef false
+#define false 0
+#endif
#endif /* _TYPES_H_ */
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 6681ec3625c9..404b3aabdb4d 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -20,6 +20,8 @@
# -D dir specify directory containing data files used by script
# (default ./arch/powerpc/boot)
# -W dir specify working directory for temporary files (default .)
+# -z use gzip (legacy)
+# -Z zsuffix compression to use (gz, xz or none)
# Stop execution if any command fails
set -e
@@ -38,7 +40,7 @@ dtb=
dts=
cacheit=
binary=
-gzip=.gz
+compression=.gz
pie=
format=
@@ -59,7 +61,8 @@ tmpdir=.
usage() {
echo 'Usage: wrapper [-o output] [-p platform] [-i initrd]' >&2
echo ' [-d devtree] [-s tree.dts] [-c] [-C cross-prefix]' >&2
- echo ' [-D datadir] [-W workingdir] [--no-gzip] [vmlinux]' >&2
+ echo ' [-D datadir] [-W workingdir] [-Z (gz|xz|none)]' >&2
+ echo ' [--no-compression] [vmlinux]' >&2
exit 1
}
@@ -126,8 +129,24 @@ while [ "$#" -gt 0 ]; do
[ "$#" -gt 0 ] || usage
tmpdir="$1"
;;
+ -z)
+ compression=.gz
+ ;;
+ -Z)
+ shift
+ [ "$#" -gt 0 ] || usage
+ [ "$1" != "gz" -o "$1" != "xz" -o "$1" != "none" ] || usage
+
+ compression=".$1"
+
+ if [ $compression = ".none" ]; then
+ compression=
+ fi
+ ;;
--no-gzip)
- gzip=
+ # a "feature" of the the wrapper script is that it can be used outside
+ # the kernel tree. So keeping this around for backwards compatibility.
+ compression=
;;
-?)
usage
@@ -140,6 +159,7 @@ while [ "$#" -gt 0 ]; do
shift
done
+
if [ -n "$dts" ]; then
if [ ! -r "$dts" -a -r "$object/dts/$dts" ]; then
dts="$object/dts/$dts"
@@ -212,7 +232,7 @@ miboot|uboot*)
;;
cuboot*)
binary=y
- gzip=
+ compression=
case "$platform" in
*-mpc866ads|*-mpc885ads|*-adder875*|*-ep88xc)
platformo=$object/cuboot-8xx.o
@@ -243,7 +263,7 @@ cuboot*)
ps3)
platformo="$object/ps3-head.o $object/ps3-hvcall.o $object/ps3.o"
lds=$object/zImage.ps3.lds
- gzip=
+ compression=
ext=bin
objflags="-O binary --set-section-flags=.bss=contents,alloc,load,data"
ksection=.kernel:vmlinux.bin
@@ -310,27 +330,37 @@ mvme7100)
esac
vmz="$tmpdir/`basename \"$kernel\"`.$ext"
-if [ -z "$cacheit" -o ! -f "$vmz$gzip" -o "$vmz$gzip" -ot "$kernel" ]; then
- ${CROSS}objcopy $objflags "$kernel" "$vmz.$$"
- strip_size=$(stat -c %s $vmz.$$)
+# Calculate the vmlinux.strip size
+${CROSS}objcopy $objflags "$kernel" "$vmz.$$"
+strip_size=$(stat -c %s $vmz.$$)
- if [ -n "$gzip" ]; then
+if [ -z "$cacheit" -o ! -f "$vmz$compression" -o "$vmz$compression" -ot "$kernel" ]; then
+ # recompress the image if we need to
+ case $compression in
+ .xz)
+ xz --check=crc32 -f -6 "$vmz.$$"
+ ;;
+ .gz)
gzip -n -f -9 "$vmz.$$"
- fi
+ ;;
+ *)
+ # drop the compression suffix so the stripped vmlinux is used
+ compression=
+ ;;
+ esac
if [ -n "$cacheit" ]; then
- mv -f "$vmz.$$$gzip" "$vmz$gzip"
+ mv -f "$vmz.$$$compression" "$vmz$compression"
else
vmz="$vmz.$$"
fi
else
- # Calculate the vmlinux.strip size
- ${CROSS}objcopy $objflags "$kernel" "$vmz.$$"
- strip_size=$(stat -c %s $vmz.$$)
rm -f $vmz.$$
fi
+vmz="$vmz$compression"
+
if [ "$make_space" = "y" ]; then
# Round the size to next higher MB limit
round_size=$(((strip_size + 0xfffff) & 0xfff00000))
@@ -346,8 +376,6 @@ if [ "$make_space" = "y" ]; then
fi
fi
-vmz="$vmz$gzip"
-
# Extract kernel version information, some platforms want to include
# it in the image header
version=`${CROSS}strings "$kernel" | grep '^Linux version [-0-9.]' | \
@@ -417,6 +445,7 @@ if [ "$platform" != "miboot" ]; then
if [ -n "$link_address" ] ; then
text_start="-Ttext $link_address"
fi
+#link everything
${CROSS}ld -m $format -T $lds $text_start $pie -o "$ofile" \
$platformo $tmp $object/wrapper.a
rm $tmp
diff --git a/arch/powerpc/boot/xz_config.h b/arch/powerpc/boot/xz_config.h
new file mode 100644
index 000000000000..5c6afdbca642
--- /dev/null
+++ b/arch/powerpc/boot/xz_config.h
@@ -0,0 +1,39 @@
+#ifndef __XZ_CONFIG_H__
+#define __XZ_CONFIG_H__
+
+/*
+ * most of this is copied from lib/xz/xz_private.h, we can't use their defines
+ * since the boot wrapper is not built in the same environment as the rest of
+ * the kernel.
+ */
+
+#include "types.h"
+#include "swab.h"
+
+static inline uint32_t swab32p(void *p)
+{
+ uint32_t *q = p;
+
+ return swab32(*q);
+}
+
+#ifdef __LITTLE_ENDIAN__
+#define get_le32(p) (*((uint32_t *) (p)))
+#else
+#define get_le32(p) swab32p(p)
+#endif
+
+#define memeq(a, b, size) (memcmp(a, b, size) == 0)
+#define memzero(buf, size) memset(buf, 0, size)
+
+/* prevent the inclusion of the xz-preboot MM headers */
+#define DECOMPR_MM_H
+#define memmove memmove
+#define XZ_EXTERN static
+
+/* xz.h needs to be included directly since we need enum xz_mode */
+#include "../../../include/linux/xz.h"
+
+#undef XZ_EXTERN
+
+#endif
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index dce352e9153b..d98b6eb3254f 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -15,6 +15,8 @@ CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
CONFIG_NUMA_BALANCING=y
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
@@ -95,7 +97,7 @@ CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_AMD74XX=y
CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
+CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
@@ -107,7 +109,7 @@ CONFIG_SCSI_CXGB4_ISCSI=m
CONFIG_SCSI_BNX2_ISCSI=m
CONFIG_BE2ISCSI=m
CONFIG_SCSI_MPT2SAS=m
-CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_2=m
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_IPR=y
CONFIG_SCSI_QLA_FC=m
@@ -149,10 +151,10 @@ CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_VHOST_NET=m
-CONFIG_VORTEX=y
+CONFIG_VORTEX=m
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_PCNET32=y
+CONFIG_PCNET32=m
CONFIG_TIGON3=y
CONFIG_BNX2X=m
CONFIG_CHELSIO_T1=m
@@ -163,6 +165,7 @@ CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_IXGB=m
CONFIG_IXGBE=m
+CONFIG_I40E=m
CONFIG_MLX4_EN=m
CONFIG_MYRI10GE=m
CONFIG_QLGE=m
@@ -238,7 +241,7 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=y
+CONFIG_REISERFS_FS=m
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
@@ -253,10 +256,10 @@ CONFIG_NILFS2_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=m
-CONFIG_ISO9660_FS=y
+CONFIG_ISO9660_FS=m
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
+CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
@@ -310,6 +313,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_DEV_NX=y
+CONFIG_CRYPTO_DEV_VMX=y
+CONFIG_CRYPTO_DEV_VMX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 0a8d250cb97e..58a98d40086f 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -10,6 +10,8 @@ CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
CONFIG_BLK_DEV_INITRD=y
@@ -90,7 +92,7 @@ CONFIG_BLK_DEV_AMD74XX=y
CONFIG_BLK_DEV_IDE_PMAC=y
CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
+CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
@@ -103,7 +105,7 @@ CONFIG_BE2ISCSI=m
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_IBMVSCSI=y
CONFIG_SCSI_IBMVFC=m
-CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_2=m
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_IPR=y
CONFIG_SCSI_QLA_FC=m
@@ -149,10 +151,10 @@ CONFIG_NETCONSOLE=y
CONFIG_TUN=m
CONFIG_VIRTIO_NET=m
CONFIG_VHOST_NET=m
-CONFIG_VORTEX=y
+CONFIG_VORTEX=m
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_PCNET32=y
+CONFIG_PCNET32=m
CONFIG_TIGON3=y
CONFIG_BNX2X=m
CONFIG_CHELSIO_T1=m
@@ -165,6 +167,7 @@ CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_IXGB=m
CONFIG_IXGBE=m
+CONFIG_I40E=m
CONFIG_MLX4_EN=m
CONFIG_MYRI10GE=m
CONFIG_PASEMI_MAC=y
@@ -269,7 +272,7 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=y
+CONFIG_REISERFS_FS=m
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
@@ -284,10 +287,10 @@ CONFIG_NILFS2_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=m
-CONFIG_ISO9660_FS=y
+CONFIG_ISO9660_FS=m
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
+CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
@@ -347,6 +350,8 @@ CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
+CONFIG_CRYPTO_DEV_VMX=y
+CONFIG_CRYPTO_DEV_VMX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 654aeffc57ef..8a3bc016b732 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -15,6 +15,8 @@ CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
CONFIG_NUMA_BALANCING=y
CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
CONFIG_CGROUPS=y
@@ -95,7 +97,7 @@ CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_AMD74XX=y
CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
+CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
@@ -108,7 +110,7 @@ CONFIG_BE2ISCSI=m
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_IBMVSCSI=y
CONFIG_SCSI_IBMVFC=m
-CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_2=m
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_IPR=y
CONFIG_SCSI_QLA_FC=m
@@ -150,10 +152,10 @@ CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_VHOST_NET=m
-CONFIG_VORTEX=y
+CONFIG_VORTEX=m
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_PCNET32=y
+CONFIG_PCNET32=m
CONFIG_TIGON3=y
CONFIG_BNX2X=m
CONFIG_CHELSIO_T1=m
@@ -166,6 +168,7 @@ CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_IXGB=m
CONFIG_IXGBE=m
+CONFIG_I40E=m
CONFIG_MLX4_EN=m
CONFIG_MYRI10GE=m
CONFIG_QLGE=m
@@ -241,7 +244,7 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=y
+CONFIG_REISERFS_FS=m
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
@@ -256,10 +259,10 @@ CONFIG_NILFS2_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=m
-CONFIG_ISO9660_FS=y
+CONFIG_ISO9660_FS=m
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
+CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
@@ -314,6 +317,8 @@ CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
+CONFIG_CRYPTO_DEV_VMX=y
+CONFIG_CRYPTO_DEV_VMX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index e71b9097594c..d1492736d852 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -15,6 +15,8 @@
#include <linux/threads.h>
#include <linux/kprobes.h>
+#include <uapi/asm/ucontext.h>
+
/* SMP */
extern struct thread_info *current_set[NR_CPUS];
extern struct thread_info *secondary_ti;
@@ -52,8 +54,8 @@ void SMIException(struct pt_regs *regs);
void handle_hmi_exception(struct pt_regs *regs);
void instruction_breakpoint_exception(struct pt_regs *regs);
void RunModeException(struct pt_regs *regs);
-void __kprobes single_step_exception(struct pt_regs *regs);
-void __kprobes program_check_exception(struct pt_regs *regs);
+void single_step_exception(struct pt_regs *regs);
+void program_check_exception(struct pt_regs *regs);
void alignment_exception(struct pt_regs *regs);
void StackOverflow(struct pt_regs *regs);
void nonrecoverable_exception(struct pt_regs *regs);
@@ -70,6 +72,41 @@ void unrecoverable_exception(struct pt_regs *regs);
void kernel_bad_stack(struct pt_regs *regs);
void system_reset_exception(struct pt_regs *regs);
void machine_check_exception(struct pt_regs *regs);
-void __kprobes emulation_assist_interrupt(struct pt_regs *regs);
+void emulation_assist_interrupt(struct pt_regs *regs);
+
+/* signals, syscalls and interrupts */
+#ifdef CONFIG_PPC64
+int sys_swapcontext(struct ucontext __user *old_ctx,
+ struct ucontext __user *new_ctx,
+ long ctx_size, long r6, long r7, long r8, struct pt_regs *regs);
+#else
+long sys_swapcontext(struct ucontext __user *old_ctx,
+ struct ucontext __user *new_ctx,
+ int ctx_size, int r6, int r7, int r8, struct pt_regs *regs);
+#endif
+long sys_switch_endian(void);
+notrace unsigned int __check_irq_replay(void);
+void notrace restore_interrupts(void);
+
+/* ptrace */
+long do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
+/* process */
+void restore_math(struct pt_regs *regs);
+void restore_tm_state(struct pt_regs *regs);
+
+/* prom_init (OpenFirmware) */
+unsigned long __init prom_init(unsigned long r3, unsigned long r4,
+ unsigned long pp,
+ unsigned long r6, unsigned long r7,
+ unsigned long kbase);
+
+/* setup */
+void __init early_setup(unsigned long dt_ptr);
+void early_setup_secondary(void);
+
+/* time */
+void accumulate_stolen_time(void);
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index f08d567e0ca4..2b90335194a7 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -233,7 +233,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%1 # __atomic_add_unless\n\
cmpw 0,%0,%3 \n\
- beq- 2f \n\
+ beq 2f \n\
add %0,%2,%0 \n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%1 \n\
@@ -539,7 +539,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%1 # __atomic_add_unless\n\
cmpd 0,%0,%3 \n\
- beq- 2f \n\
+ beq 2f \n\
add %0,%2,%0 \n"
" stdcx. %0,0,%1 \n\
bne- 1b \n"
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 38b33dcfcc9d..6b8b2d57fdc8 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -223,7 +223,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
}
-static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+static inline void __ptep_set_access_flags(struct mm_struct *mm,
+ pte_t *ptep, pte_t entry)
{
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 287a656ceb57..e407af2b7333 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -245,6 +245,43 @@ static inline int segment_shift(int ssize)
}
/*
+ * This array is indexed by the LP field of the HPTE second dword.
+ * Since this field may contain some RPN bits, some entries are
+ * replicated so that we get the same value irrespective of RPN.
+ * The top 4 bits are the page size index (MMU_PAGE_*) for the
+ * actual page size, the bottom 4 bits are the base page size.
+ */
+extern u8 hpte_page_sizes[1 << LP_BITS];
+
+static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
+ bool is_base_size)
+{
+ unsigned int i, lp;
+
+ if (!(h & HPTE_V_LARGE))
+ return 1ul << 12;
+
+ /* Look at the 8 bit LP value */
+ lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+ i = hpte_page_sizes[lp];
+ if (!i)
+ return 0;
+ if (!is_base_size)
+ i >>= 4;
+ return 1ul << mmu_psize_defs[i & 0xf].shift;
+}
+
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+ return __hpte_page_size(h, l, 0);
+}
+
+static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
+{
+ return __hpte_page_size(h, l, 1);
+}
+
+/*
* The current system page and segment sizes
*/
extern int mmu_kernel_ssize;
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 263bf39ced40..9fd77f8794a0 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -6,6 +6,8 @@
*/
#define _PAGE_BIT_SWAP_TYPE 0
+#define _PAGE_RO 0
+
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
#define _PAGE_READ 0x00004 /* read access allowed */
@@ -565,10 +567,11 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
* Generic functions with hash/radix callbacks
*/
-static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+static inline void __ptep_set_access_flags(struct mm_struct *mm,
+ pte_t *ptep, pte_t entry)
{
if (radix_enabled())
- return radix__ptep_set_access_flags(ptep, entry);
+ return radix__ptep_set_access_flags(mm, ptep, entry);
return hash__ptep_set_access_flags(ptep, entry);
}
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index df294224e280..2a46dea8e1b1 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -11,6 +11,11 @@
#include <asm/book3s/64/radix-4k.h>
#endif
+#ifndef __ASSEMBLY__
+#include <asm/book3s/64/tlbflush-radix.h>
+#include <asm/cpu_has_feature.h>
+#endif
+
/* An empty PTE can still have a R or C writeback */
#define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
@@ -105,11 +110,8 @@
#define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
#define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
-static inline unsigned long radix__pte_update(struct mm_struct *mm,
- unsigned long addr,
- pte_t *ptep, unsigned long clr,
- unsigned long set,
- int huge)
+static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
+ unsigned long set)
{
pte_t pte;
unsigned long old_pte, new_pte;
@@ -121,9 +123,39 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
- /* We already do a sync in cmpxchg, is ptesync needed ?*/
+ return old_pte;
+}
+
+
+static inline unsigned long radix__pte_update(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, unsigned long clr,
+ unsigned long set,
+ int huge)
+{
+ unsigned long old_pte;
+
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+
+ unsigned long new_pte;
+
+ old_pte = __radix_pte_update(ptep, ~0, 0);
+ asm volatile("ptesync" : : : "memory");
+ /*
+ * new value of pte
+ */
+ new_pte = (old_pte | set) & ~clr;
+
+ /*
+ * For now let's do heavy pid flush
+ * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
+ */
+ radix__flush_tlb_mm(mm);
+
+ __radix_pte_update(ptep, 0, new_pte);
+ } else
+ old_pte = __radix_pte_update(ptep, clr, set);
asm volatile("ptesync" : : : "memory");
- /* huge pages use the old page table lock */
if (!huge)
assert_pte_locked(mm, addr);
@@ -134,20 +166,33 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
* Set the dirty and/or accessed bits atomically in a linux PTE, this
* function doesn't need to invalidate tlb.
*/
-static inline void radix__ptep_set_access_flags(pte_t *ptep, pte_t entry)
+static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
+ pte_t *ptep, pte_t entry)
{
- pte_t pte;
- unsigned long old_pte, new_pte;
+
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_RW | _PAGE_EXEC);
- do {
- pte = READ_ONCE(*ptep);
- old_pte = pte_val(pte);
+
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+
+ unsigned long old_pte, new_pte;
+
+ old_pte = __radix_pte_update(ptep, ~0, 0);
+ asm volatile("ptesync" : : : "memory");
+ /*
+ * new value of pte
+ */
new_pte = old_pte | set;
- } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
+ /*
+ * For now let's do heavy pid flush
+ * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
+ */
+ radix__flush_tlb_mm(mm);
- /* We already do a sync in cmpxchg, is ptesync needed ?*/
+ __radix_pte_update(ptep, 0, new_pte);
+ } else
+ __radix_pte_update(ptep, 0, set);
asm volatile("ptesync" : : : "memory");
}
@@ -233,14 +278,19 @@ static inline unsigned long radix__get_tree_size(void)
{
unsigned long rts_field;
/*
- * we support 52 bits, hence 52-31 = 21, 0b10101
+ * We support 52 bits, hence:
+ * DD1 52-28 = 24, 0b11000
+ * Others 52-31 = 21, 0b10101
* RTS encoding details
* bits 0 - 3 of rts -> bits 6 - 8 unsigned long
* bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
*/
- rts_field = (0x5UL << 5); /* 6 - 8 bits */
- rts_field |= (0x2UL << 61);
-
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ rts_field = (0x3UL << 61);
+ else {
+ rts_field = (0x5UL << 5); /* 6 - 8 bits */
+ rts_field |= (0x2UL << 61);
+ }
return rts_field;
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 65037762b120..a9e19cb2f7c5 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -41,4 +41,5 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
unsigned long page_size);
extern void radix__flush_tlb_lpid(unsigned long lpid);
+extern void radix__flush_tlb_all(void);
#endif
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 82026b419341..f752e6f7cfbe 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -212,6 +212,7 @@ enum {
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000)
+#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
#ifndef __ASSEMBLY__
@@ -472,6 +473,7 @@ enum {
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300)
+#define CPU_FTRS_POWER9_DD1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -490,7 +492,7 @@ enum {
(CPU_FTRS_POWER4 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \
- CPU_FTRS_PA6T | CPU_FTR_VSX | CPU_FTRS_POWER9)
+ CPU_FTRS_PA6T | CPU_FTR_VSX | CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1)
#endif
#else
enum {
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index bed66e5743b3..2e4e7d878c8e 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -34,6 +34,7 @@
* exception handlers (including pSeries LPAR) and iSeries LPAR
* implementations as possible.
*/
+#include <asm/head-64.h>
#define EX_R9 0
#define EX_R10 8
@@ -52,7 +53,6 @@
#ifdef CONFIG_RELOCATABLE
#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
- ld r12,PACAKBASE(r13); /* get high part of &label */ \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label); \
mtctr r12; \
@@ -84,13 +84,14 @@
/*
* We're short on space and time in the exception prolog, so we can't
- * use the normal SET_REG_IMMEDIATE macro. Normally we just need the
- * low halfword of the address, but for Kdump we need the whole low
- * word.
+ * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
+ * Instead we get the base of the kernel from paca->kernelbase and or in the low
+ * part of label. This requires that the label be within 64KB of kernelbase, and
+ * that kernelbase be 64K aligned.
*/
#define LOAD_HANDLER(reg, label) \
- /* Handlers must be within 64K of kbase, which must be 64k aligned */ \
- ori reg,reg,(label)-_stext; /* virt addr of handler ... */
+ ld reg,PACAKBASE(r13); /* get high part of &label */ \
+ ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
/* Exception register prefixes */
#define EXC_HV H
@@ -175,7 +176,6 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
__EXCEPTION_PROLOG_1(area, extra, vec)
#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \
- ld r12,PACAKBASE(r13); /* get high part of &label */ \
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label) \
@@ -192,10 +192,10 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
EXCEPTION_PROLOG_1(area, extra, vec); \
EXCEPTION_PROLOG_PSERIES_1(label, h);
-#define __KVMTEST(n) \
- lbz r10,HSTATE_IN_GUEST(r13); \
+#define __KVMTEST(h, n) \
+ lbz r10,HSTATE_IN_GUEST(r13); \
cmpwi r10,0; \
- bne do_kvm_##n
+ bne do_kvm_##h##n
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
@@ -208,8 +208,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define kvmppc_interrupt kvmppc_interrupt_pr
#endif
-#define __KVM_HANDLER(area, h, n) \
-do_kvm_##n: \
+#define __KVM_HANDLER_PROLOG(area, n) \
BEGIN_FTR_SECTION_NESTED(947) \
ld r10,area+EX_CFAR(r13); \
std r10,HSTATE_CFAR(r13); \
@@ -222,21 +221,23 @@ do_kvm_##n: \
stw r9,HSTATE_SCRATCH1(r13); \
ld r9,area+EX_R9(r13); \
std r12,HSTATE_SCRATCH0(r13); \
+
+#define __KVM_HANDLER(area, h, n) \
+ __KVM_HANDLER_PROLOG(area, n) \
li r12,n; \
b kvmppc_interrupt
#define __KVM_HANDLER_SKIP(area, h, n) \
-do_kvm_##n: \
cmpwi r10,KVM_GUEST_MODE_SKIP; \
ld r10,area+EX_R10(r13); \
beq 89f; \
- stw r9,HSTATE_SCRATCH1(r13); \
+ stw r9,HSTATE_SCRATCH1(r13); \
BEGIN_FTR_SECTION_NESTED(948) \
ld r9,area+EX_PPR(r13); \
std r9,HSTATE_PPR(r13); \
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
ld r9,area+EX_R9(r13); \
- std r12,HSTATE_SCRATCH0(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
li r12,n; \
b kvmppc_interrupt; \
89: mtocrf 0x80,r9; \
@@ -244,12 +245,12 @@ do_kvm_##n: \
b kvmppc_skip_##h##interrupt
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#define KVMTEST(n) __KVMTEST(n)
+#define KVMTEST(h, n) __KVMTEST(h, n)
#define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n)
#define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
#else
-#define KVMTEST(n)
+#define KVMTEST(h, n)
#define KVM_HANDLER(area, h, n)
#define KVM_HANDLER_SKIP(area, h, n)
#endif
@@ -333,94 +334,79 @@ do_kvm_##n: \
/*
* Exception vectors.
*/
-#define STD_EXCEPTION_PSERIES(vec, label) \
- . = vec; \
- .globl label##_pSeries; \
-label##_pSeries: \
+#define STD_EXCEPTION_PSERIES(vec, label) \
SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
- EXC_STD, KVMTEST, vec)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label, \
+ EXC_STD, KVMTEST_PR, vec); \
/* Version of above for when we have to branch out-of-line */
+#define __OOL_EXCEPTION(vec, label, hdlr) \
+ SET_SCRATCH0(r13) \
+ EXCEPTION_PROLOG_0(PACA_EXGEN) \
+ b hdlr;
+
#define STD_EXCEPTION_PSERIES_OOL(vec, label) \
- .globl label##_pSeries; \
-label##_pSeries: \
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD)
-
-#define STD_EXCEPTION_HV(loc, vec, label) \
- . = loc; \
- .globl label##_hv; \
-label##_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD)
+
+#define STD_EXCEPTION_HV(loc, vec, label) \
SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
- EXC_HV, KVMTEST, vec)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label, \
+ EXC_HV, KVMTEST_HV, vec);
-/* Version of above for when we have to branch out-of-line */
-#define STD_EXCEPTION_HV_OOL(vec, label) \
- .globl label##_hv; \
-label##_hv: \
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV)
+#define STD_EXCEPTION_HV_OOL(vec, label) \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV)
#define STD_RELON_EXCEPTION_PSERIES(loc, vec, label) \
- . = loc; \
- .globl label##_relon_pSeries; \
-label##_relon_pSeries: \
/* No guest interrupts come through here */ \
SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
- EXC_STD, NOTEST, vec)
+ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label, EXC_STD, NOTEST, vec);
#define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
- .globl label##_relon_pSeries; \
-label##_relon_pSeries: \
EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
- EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD)
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_STD)
#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
- . = loc; \
- .globl label##_relon_hv; \
-label##_relon_hv: \
/* No guest interrupts come through here */ \
SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
- EXC_HV, NOTEST, vec)
+ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label, EXC_HV, NOTEST, vec);
#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
- .globl label##_relon_hv; \
-label##_relon_hv: \
EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
- EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV)
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV)
/* This associate vector numbers with bits in paca->irq_happened */
#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
-#define SOFTEN_VALUE_0x502 PACA_IRQ_EE
#define SOFTEN_VALUE_0x900 PACA_IRQ_DEC
-#define SOFTEN_VALUE_0x982 PACA_IRQ_DEC
+#define SOFTEN_VALUE_0x980 PACA_IRQ_DEC
#define SOFTEN_VALUE_0xa00 PACA_IRQ_DBELL
#define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL
-#define SOFTEN_VALUE_0xe82 PACA_IRQ_DBELL
#define SOFTEN_VALUE_0xe60 PACA_IRQ_HMI
-#define SOFTEN_VALUE_0xe62 PACA_IRQ_HMI
#define SOFTEN_VALUE_0xea0 PACA_IRQ_EE
-#define SOFTEN_VALUE_0xea2 PACA_IRQ_EE
#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
cmpwi r10,0; \
li r10,SOFTEN_VALUE_##vec; \
beq masked_##h##interrupt
+
#define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec)
#define SOFTEN_TEST_PR(vec) \
- KVMTEST(vec); \
+ KVMTEST(EXC_STD, vec); \
_SOFTEN_TEST(EXC_STD, vec)
#define SOFTEN_TEST_HV(vec) \
- KVMTEST(vec); \
+ KVMTEST(EXC_HV, vec); \
_SOFTEN_TEST(EXC_HV, vec)
+#define KVMTEST_PR(vec) \
+ KVMTEST(EXC_STD, vec)
+
+#define KVMTEST_HV(vec) \
+ KVMTEST(EXC_HV, vec)
+
#define SOFTEN_NOTEST_PR(vec) _SOFTEN_TEST(EXC_STD, vec)
#define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec)
@@ -428,58 +414,47 @@ label##_relon_hv: \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(PACA_EXGEN); \
__EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label##_common, h);
+ EXCEPTION_PROLOG_PSERIES_1(label, h);
#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
__MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra)
#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \
- . = loc; \
- .globl label##_pSeries; \
-label##_pSeries: \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
EXC_STD, SOFTEN_TEST_PR)
+#define MASKABLE_EXCEPTION_PSERIES_OOL(vec, label) \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_PR, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD)
+
#define MASKABLE_EXCEPTION_HV(loc, vec, label) \
- . = loc; \
- .globl label##_hv; \
-label##_hv: \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
EXC_HV, SOFTEN_TEST_HV)
#define MASKABLE_EXCEPTION_HV_OOL(vec, label) \
- .globl label##_hv; \
-label##_hv: \
EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
+ EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV)
#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(PACA_EXGEN); \
- __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
- EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, h);
-#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
+ __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)
+
+#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
__MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra)
#define MASKABLE_RELON_EXCEPTION_PSERIES(loc, vec, label) \
- . = loc; \
- .globl label##_relon_pSeries; \
-label##_relon_pSeries: \
_MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
EXC_STD, SOFTEN_NOTEST_PR)
#define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label) \
- . = loc; \
- .globl label##_relon_hv; \
-label##_relon_hv: \
_MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
EXC_HV, SOFTEN_NOTEST_HV)
#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label) \
- .globl label##_relon_hv; \
-label##_relon_hv: \
EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
+ EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV)
/*
* Our exception common code can be passed various "additions"
@@ -505,9 +480,6 @@ BEGIN_FTR_SECTION \
END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
#define EXCEPTION_COMMON(trap, label, hdlr, ret, additions) \
- .align 7; \
- .globl label##_common; \
-label##_common: \
EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
/* Volatile regs are potentially clobbered here */ \
additions; \
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index b4407d0add27..0031806475f0 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -45,10 +45,6 @@
#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt)
-#ifndef ELF_CORE_EFLAGS
-#define ELF_CORE_EFLAGS 0
-#endif
-
/* Firmware provided dump sections */
#define FADUMP_CPU_STATE_DATA 0x0001
#define FADUMP_HPTE_REGION 0x0002
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
new file mode 100644
index 000000000000..ab90c2fa1ea6
--- /dev/null
+++ b/arch/powerpc/include/asm/head-64.h
@@ -0,0 +1,393 @@
+#ifndef _ASM_POWERPC_HEAD_64_H
+#define _ASM_POWERPC_HEAD_64_H
+
+#include <asm/cache.h>
+
+/*
+ * We can't do CPP stringification and concatination directly into the section
+ * name for some reason, so these macros can do it for us.
+ */
+.macro define_ftsec name
+ .section ".head.text.\name\()","ax",@progbits
+.endm
+.macro define_data_ftsec name
+ .section ".head.data.\name\()","a",@progbits
+.endm
+.macro use_ftsec name
+ .section ".head.text.\name\()"
+.endm
+
+/*
+ * Fixed (location) sections are used by opening fixed sections and emitting
+ * fixed section entries into them before closing them. Multiple fixed sections
+ * can be open at any time.
+ *
+ * Each fixed section created in a .S file must have corresponding linkage
+ * directives including location, added to arch/powerpc/kernel/vmlinux.lds.S
+ *
+ * For each fixed section, code is generated into it in the order which it
+ * appears in the source. Fixed section entries can be placed at a fixed
+ * location within the section using _LOCATION postifx variants. These must
+ * be ordered according to their relative placements within the section.
+ *
+ * OPEN_FIXED_SECTION(section_name, start_address, end_address)
+ * FIXED_SECTION_ENTRY_BEGIN(section_name, label1)
+ *
+ * USE_FIXED_SECTION(section_name)
+ * label3:
+ * li r10,128
+ * mv r11,r10
+
+ * FIXED_SECTION_ENTRY_BEGIN_LOCATION(section_name, label2, start_address)
+ * FIXED_SECTION_ENTRY_END_LOCATION(section_name, label2, end_address)
+ * CLOSE_FIXED_SECTION(section_name)
+ *
+ * ZERO_FIXED_SECTION can be used to emit zeroed data.
+ *
+ * Troubleshooting:
+ * - If the build dies with "Error: attempt to move .org backwards" at
+ * CLOSE_FIXED_SECTION() or elsewhere, there may be something
+ * unexpected being added there. Remove the '. = x_len' line, rebuild, and
+ * check what is pushing the section down.
+ * - If the build dies in linking, check arch/powerpc/kernel/vmlinux.lds.S
+ * for instructions.
+ * - If the kernel crashes or hangs in very early boot, it could be linker
+ * stubs at the start of the main text.
+ */
+
+#define OPEN_FIXED_SECTION(sname, start, end) \
+ sname##_start = (start); \
+ sname##_end = (end); \
+ sname##_len = (end) - (start); \
+ define_ftsec sname; \
+ . = 0x0; \
+start_##sname:
+
+#define OPEN_TEXT_SECTION(start) \
+ text_start = (start); \
+ .section ".text","ax",@progbits; \
+ . = 0x0; \
+start_text:
+
+#define ZERO_FIXED_SECTION(sname, start, end) \
+ sname##_start = (start); \
+ sname##_end = (end); \
+ sname##_len = (end) - (start); \
+ define_data_ftsec sname; \
+ . = 0x0; \
+ . = sname##_len;
+
+#define USE_FIXED_SECTION(sname) \
+ fs_label = start_##sname; \
+ fs_start = sname##_start; \
+ use_ftsec sname;
+
+#define USE_TEXT_SECTION() \
+ fs_label = start_text; \
+ fs_start = text_start; \
+ .text
+
+#define CLOSE_FIXED_SECTION(sname) \
+ USE_FIXED_SECTION(sname); \
+ . = sname##_len; \
+end_##sname:
+
+
+#define __FIXED_SECTION_ENTRY_BEGIN(sname, name, __align) \
+ USE_FIXED_SECTION(sname); \
+ .align __align; \
+ .global name; \
+name:
+
+#define FIXED_SECTION_ENTRY_BEGIN(sname, name) \
+ __FIXED_SECTION_ENTRY_BEGIN(sname, name, 0)
+
+#define FIXED_SECTION_ENTRY_BEGIN_LOCATION(sname, name, start) \
+ USE_FIXED_SECTION(sname); \
+ name##_start = (start); \
+ .if (start) < sname##_start; \
+ .error "Fixed section underflow"; \
+ .abort; \
+ .endif; \
+ . = (start) - sname##_start; \
+ .global name; \
+name:
+
+#define FIXED_SECTION_ENTRY_END_LOCATION(sname, name, end) \
+ .if (end) > sname##_end; \
+ .error "Fixed section overflow"; \
+ .abort; \
+ .endif; \
+ .if (. - name > end - name##_start); \
+ .error "Fixed entry overflow"; \
+ .abort; \
+ .endif; \
+ . = ((end) - sname##_start); \
+
+
+/*
+ * These macros are used to change symbols in other fixed sections to be
+ * absolute or related to our current fixed section.
+ *
+ * - DEFINE_FIXED_SYMBOL / FIXED_SYMBOL_ABS_ADDR is used to find the
+ * absolute address of a symbol within a fixed section, from any section.
+ *
+ * - ABS_ADDR is used to find the absolute address of any symbol, from within
+ * a fixed section.
+ */
+#define DEFINE_FIXED_SYMBOL(label) \
+ label##_absolute = (label - fs_label + fs_start)
+
+#define FIXED_SYMBOL_ABS_ADDR(label) \
+ (label##_absolute)
+
+#define ABS_ADDR(label) (label - fs_label + fs_start)
+
+/*
+ * Following are the BOOK3S exception handler helper macros.
+ * Handlers come in a number of types, and each type has a number of varieties.
+ *
+ * EXC_REAL_* - real, unrelocated exception vectors
+ * EXC_VIRT_* - virt (AIL), unrelocated exception vectors
+ * TRAMP_REAL_* - real, unrelocated helpers (virt can call these)
+ * TRAMP_VIRT_* - virt, unreloc helpers (in practice, real can use)
+ * TRAMP_KVM - KVM handlers that get put into real, unrelocated
+ * EXC_COMMON_* - virt, relocated common handlers
+ *
+ * The EXC handlers are given a name, and branch to name_common, or the
+ * appropriate KVM or masking function. Vector handler verieties are as
+ * follows:
+ *
+ * EXC_{REAL|VIRT}_BEGIN/END - used to open-code the exception
+ *
+ * EXC_{REAL|VIRT} - standard exception
+ *
+ * EXC_{REAL|VIRT}_suffix
+ * where _suffix is:
+ * - _MASKABLE - maskable exception
+ * - _OOL - out of line with trampoline to common handler
+ * - _HV - HV exception
+ *
+ * There can be combinations, e.g., EXC_VIRT_OOL_MASKABLE_HV
+ *
+ * The one unusual case is __EXC_REAL_OOL_HV_DIRECT, which is
+ * an OOL vector that branches to a specified handler rather than the usual
+ * trampoline that goes to common. It, and other underscore macros, should
+ * be used with care.
+ *
+ * KVM handlers come in the following verieties:
+ * TRAMP_KVM
+ * TRAMP_KVM_SKIP
+ * TRAMP_KVM_HV
+ * TRAMP_KVM_HV_SKIP
+ *
+ * COMMON handlers come in the following verieties:
+ * EXC_COMMON_BEGIN/END - used to open-code the handler
+ * EXC_COMMON
+ * EXC_COMMON_ASYNC
+ * EXC_COMMON_HV
+ *
+ * TRAMP_REAL and TRAMP_VIRT can be used with BEGIN/END. KVM
+ * and OOL handlers are implemented as types of TRAMP and TRAMP_VIRT handlers.
+ */
+
+#define EXC_REAL_BEGIN(name, start, end) \
+ FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start)
+
+#define EXC_REAL_END(name, start, end) \
+ FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, end)
+
+#define EXC_VIRT_BEGIN(name, start, end) \
+ FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start)
+
+#define EXC_VIRT_END(name, start, end) \
+ FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, end)
+
+#define EXC_COMMON_BEGIN(name) \
+ USE_TEXT_SECTION(); \
+ .align 7; \
+ .global name; \
+ DEFINE_FIXED_SYMBOL(name); \
+name:
+
+#define TRAMP_REAL_BEGIN(name) \
+ FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name)
+
+#define TRAMP_VIRT_BEGIN(name) \
+ FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#define TRAMP_KVM_BEGIN(name) \
+ TRAMP_REAL_BEGIN(name)
+#else
+#define TRAMP_KVM_BEGIN(name)
+#endif
+
+#define EXC_REAL_NONE(start, end) \
+ FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start); \
+ FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, end)
+
+#define EXC_VIRT_NONE(start, end) \
+ FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start); \
+ FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, end);
+
+
+#define EXC_REAL(name, start, end) \
+ EXC_REAL_BEGIN(name, start, end); \
+ STD_EXCEPTION_PSERIES(start, name##_common); \
+ EXC_REAL_END(name, start, end);
+
+#define EXC_VIRT(name, start, end, realvec) \
+ EXC_VIRT_BEGIN(name, start, end); \
+ STD_RELON_EXCEPTION_PSERIES(start, realvec, name##_common); \
+ EXC_VIRT_END(name, start, end);
+
+#define EXC_REAL_MASKABLE(name, start, end) \
+ EXC_REAL_BEGIN(name, start, end); \
+ MASKABLE_EXCEPTION_PSERIES(start, start, name##_common); \
+ EXC_REAL_END(name, start, end);
+
+#define EXC_VIRT_MASKABLE(name, start, end, realvec) \
+ EXC_VIRT_BEGIN(name, start, end); \
+ MASKABLE_RELON_EXCEPTION_PSERIES(start, realvec, name##_common); \
+ EXC_VIRT_END(name, start, end);
+
+#define EXC_REAL_HV(name, start, end) \
+ EXC_REAL_BEGIN(name, start, end); \
+ STD_EXCEPTION_HV(start, start, name##_common); \
+ EXC_REAL_END(name, start, end);
+
+#define EXC_VIRT_HV(name, start, end, realvec) \
+ EXC_VIRT_BEGIN(name, start, end); \
+ STD_RELON_EXCEPTION_HV(start, realvec, name##_common); \
+ EXC_VIRT_END(name, start, end);
+
+#define __EXC_REAL_OOL(name, start, end) \
+ EXC_REAL_BEGIN(name, start, end); \
+ __OOL_EXCEPTION(start, label, tramp_real_##name); \
+ EXC_REAL_END(name, start, end);
+
+#define __TRAMP_REAL_REAL_OOL(name, vec) \
+ TRAMP_REAL_BEGIN(tramp_real_##name); \
+ STD_EXCEPTION_PSERIES_OOL(vec, name##_common); \
+
+#define EXC_REAL_OOL(name, start, end) \
+ __EXC_REAL_OOL(name, start, end); \
+ __TRAMP_REAL_REAL_OOL(name, start);
+
+#define __EXC_REAL_OOL_MASKABLE(name, start, end) \
+ __EXC_REAL_OOL(name, start, end);
+
+#define __TRAMP_REAL_REAL_OOL_MASKABLE(name, vec) \
+ TRAMP_REAL_BEGIN(tramp_real_##name); \
+ MASKABLE_EXCEPTION_PSERIES_OOL(vec, name##_common); \
+
+#define EXC_REAL_OOL_MASKABLE(name, start, end) \
+ __EXC_REAL_OOL_MASKABLE(name, start, end); \
+ __TRAMP_REAL_REAL_OOL_MASKABLE(name, start);
+
+#define __EXC_REAL_OOL_HV_DIRECT(name, start, end, handler) \
+ EXC_REAL_BEGIN(name, start, end); \
+ __OOL_EXCEPTION(start, label, handler); \
+ EXC_REAL_END(name, start, end);
+
+#define __EXC_REAL_OOL_HV(name, start, end) \
+ __EXC_REAL_OOL(name, start, end);
+
+#define __TRAMP_REAL_REAL_OOL_HV(name, vec) \
+ TRAMP_REAL_BEGIN(tramp_real_##name); \
+ STD_EXCEPTION_HV_OOL(vec, name##_common); \
+
+#define EXC_REAL_OOL_HV(name, start, end) \
+ __EXC_REAL_OOL_HV(name, start, end); \
+ __TRAMP_REAL_REAL_OOL_HV(name, start);
+
+#define __EXC_REAL_OOL_MASKABLE_HV(name, start, end) \
+ __EXC_REAL_OOL(name, start, end);
+
+#define __TRAMP_REAL_REAL_OOL_MASKABLE_HV(name, vec) \
+ TRAMP_REAL_BEGIN(tramp_real_##name); \
+ MASKABLE_EXCEPTION_HV_OOL(vec, name##_common); \
+
+#define EXC_REAL_OOL_MASKABLE_HV(name, start, end) \
+ __EXC_REAL_OOL_MASKABLE_HV(name, start, end); \
+ __TRAMP_REAL_REAL_OOL_MASKABLE_HV(name, start);
+
+#define __EXC_VIRT_OOL(name, start, end) \
+ EXC_VIRT_BEGIN(name, start, end); \
+ __OOL_EXCEPTION(start, label, tramp_virt_##name); \
+ EXC_VIRT_END(name, start, end);
+
+#define __TRAMP_REAL_VIRT_OOL(name, realvec) \
+ TRAMP_VIRT_BEGIN(tramp_virt_##name); \
+ STD_RELON_EXCEPTION_PSERIES_OOL(realvec, name##_common); \
+
+#define EXC_VIRT_OOL(name, start, end, realvec) \
+ __EXC_VIRT_OOL(name, start, end); \
+ __TRAMP_REAL_VIRT_OOL(name, realvec);
+
+#define __EXC_VIRT_OOL_MASKABLE(name, start, end) \
+ __EXC_VIRT_OOL(name, start, end);
+
+#define __TRAMP_REAL_VIRT_OOL_MASKABLE(name, realvec) \
+ TRAMP_VIRT_BEGIN(tramp_virt_##name); \
+ MASKABLE_RELON_EXCEPTION_PSERIES_OOL(realvec, name##_common); \
+
+#define EXC_VIRT_OOL_MASKABLE(name, start, end, realvec) \
+ __EXC_VIRT_OOL_MASKABLE(name, start, end); \
+ __TRAMP_REAL_VIRT_OOL_MASKABLE(name, realvec);
+
+#define __EXC_VIRT_OOL_HV(name, start, end) \
+ __EXC_VIRT_OOL(name, start, end);
+
+#define __TRAMP_REAL_VIRT_OOL_HV(name, realvec) \
+ TRAMP_VIRT_BEGIN(tramp_virt_##name); \
+ STD_RELON_EXCEPTION_HV_OOL(realvec, name##_common); \
+
+#define EXC_VIRT_OOL_HV(name, start, end, realvec) \
+ __EXC_VIRT_OOL_HV(name, start, end); \
+ __TRAMP_REAL_VIRT_OOL_HV(name, realvec);
+
+#define __EXC_VIRT_OOL_MASKABLE_HV(name, start, end) \
+ __EXC_VIRT_OOL(name, start, end);
+
+#define __TRAMP_REAL_VIRT_OOL_MASKABLE_HV(name, realvec) \
+ TRAMP_VIRT_BEGIN(tramp_virt_##name); \
+ MASKABLE_RELON_EXCEPTION_HV_OOL(realvec, name##_common); \
+
+#define EXC_VIRT_OOL_MASKABLE_HV(name, start, end, realvec) \
+ __EXC_VIRT_OOL_MASKABLE_HV(name, start, end); \
+ __TRAMP_REAL_VIRT_OOL_MASKABLE_HV(name, realvec);
+
+#define TRAMP_KVM(area, n) \
+ TRAMP_KVM_BEGIN(do_kvm_##n); \
+ KVM_HANDLER(area, EXC_STD, n); \
+
+#define TRAMP_KVM_SKIP(area, n) \
+ TRAMP_KVM_BEGIN(do_kvm_##n); \
+ KVM_HANDLER_SKIP(area, EXC_STD, n); \
+
+/*
+ * HV variant exceptions get the 0x2 bit added to their trap number.
+ */
+#define TRAMP_KVM_HV(area, n) \
+ TRAMP_KVM_BEGIN(do_kvm_H##n); \
+ KVM_HANDLER(area, EXC_HV, n + 0x2); \
+
+#define TRAMP_KVM_HV_SKIP(area, n) \
+ TRAMP_KVM_BEGIN(do_kvm_H##n); \
+ KVM_HANDLER_SKIP(area, EXC_HV, n + 0x2); \
+
+#define EXC_COMMON(name, realvec, hdlr) \
+ EXC_COMMON_BEGIN(name); \
+ STD_EXCEPTION_COMMON(realvec, name, hdlr); \
+
+#define EXC_COMMON_ASYNC(name, realvec, hdlr) \
+ EXC_COMMON_BEGIN(name); \
+ STD_EXCEPTION_COMMON_ASYNC(realvec, name, hdlr); \
+
+#define EXC_COMMON_HV(name, realvec, hdlr) \
+ EXC_COMMON_BEGIN(name); \
+ STD_EXCEPTION_COMMON(realvec + 0x2, name, hdlr); \
+
+#endif /* _ASM_POWERPC_HEAD_64_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 2fd1690b79d2..f6fda8482f60 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -241,6 +241,35 @@ static inline void out_be64(volatile u64 __iomem *addr, u64 val)
#endif
#endif /* __powerpc64__ */
+
+/*
+ * Simple Cache inhibited accessors
+ * Unlike the DEF_MMIO_* macros, these don't include any h/w memory
+ * barriers, callers need to manage memory barriers on their own.
+ * These can only be used in hypervisor real mode.
+ */
+
+static inline u32 _lwzcix(unsigned long addr)
+{
+ u32 ret;
+
+ __asm__ __volatile__("lwzcix %0,0, %1"
+ : "=r" (ret) : "r" (addr) : "memory");
+ return ret;
+}
+
+static inline void _stbcix(u64 addr, u8 val)
+{
+ __asm__ __volatile__("stbcix %0,0,%1"
+ : : "r" (val), "r" (addr) : "memory");
+}
+
+static inline void _stwcix(u64 addr, u32 val)
+{
+ __asm__ __volatile__("stwcix %0,0,%1"
+ : : "r" (val), "r" (addr) : "memory");
+}
+
/*
* Low level IO stream instructions are defined out of line for now
*/
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 5bca220bbb60..05cabed3d1bd 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -105,6 +105,15 @@
#define BOOK3S_INTERRUPT_FAC_UNAVAIL 0xf60
#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80
+/* book3s_hv */
+
+/*
+ * Special trap used to indicate to host that this is a
+ * passthrough interrupt that could not be handled
+ * completely in the guest.
+ */
+#define BOOK3S_INTERRUPT_HV_RM_HARD 0x5555
+
#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
#define BOOK3S_IRQPRIO_DATA_SEGMENT 1
#define BOOK3S_IRQPRIO_INST_SEGMENT 2
@@ -136,6 +145,7 @@
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
#define RESUME_FLAG_ARCH1 (1<<2)
+#define RESUME_FLAG_ARCH2 (1<<3)
#define RESUME_GUEST 0
#define RESUME_GUEST_NV RESUME_FLAG_NV
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 8f39796c9da8..5cf306ae0ac3 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -69,6 +69,43 @@ struct hpte_cache {
int pagesize;
};
+/*
+ * Struct for a virtual core.
+ * Note: entry_exit_map combines a bitmap of threads that have entered
+ * in the bottom 8 bits and a bitmap of threads that have exited in the
+ * next 8 bits. This is so that we can atomically set the entry bit
+ * iff the exit map is 0 without taking a lock.
+ */
+struct kvmppc_vcore {
+ int n_runnable;
+ int num_threads;
+ int entry_exit_map;
+ int napping_threads;
+ int first_vcpuid;
+ u16 pcpu;
+ u16 last_cpu;
+ u8 vcore_state;
+ u8 in_guest;
+ struct kvmppc_vcore *master_vcore;
+ struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
+ struct list_head preempt_list;
+ spinlock_t lock;
+ struct swait_queue_head wq;
+ spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
+ u64 stolen_tb;
+ u64 preempt_tb;
+ struct kvm_vcpu *runner;
+ struct kvm *kvm;
+ u64 tb_offset; /* guest timebase - host timebase */
+ ulong lpcr;
+ u32 arch_compat;
+ ulong pcr;
+ ulong dpdes; /* doorbell state (POWER8) */
+ ulong vtb; /* virtual timebase */
+ ulong conferring_threads;
+ unsigned int halt_poll_ns;
+};
+
struct kvmppc_vcpu_book3s {
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct {
@@ -83,6 +120,7 @@ struct kvmppc_vcpu_book3s {
u64 sdr1;
u64 hior;
u64 msr_mask;
+ u64 vtb;
#ifdef CONFIG_PPC_BOOK3S_32
u32 vsid_pool[VSID_POOL_SIZE];
u32 vsid_next;
@@ -191,6 +229,7 @@ extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
struct kvmppc_book3s_shadow_vcpu *svcpu);
+extern int kvm_irq_bypass;
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 88d17b4ea9c8..848292176908 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,6 +20,8 @@
#ifndef __ASM_KVM_BOOK3S_64_H__
#define __ASM_KVM_BOOK3S_64_H__
+#include <asm/book3s/64/mmu-hash.h>
+
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
{
@@ -97,56 +99,20 @@ static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
hpte[0] = cpu_to_be64(hpte_v);
}
-static inline int __hpte_actual_psize(unsigned int lp, int psize)
-{
- int i, shift;
- unsigned int mask;
-
- /* start from 1 ignoring MMU_PAGE_4K */
- for (i = 1; i < MMU_PAGE_COUNT; i++) {
-
- /* invalid penc */
- if (mmu_psize_defs[psize].penc[i] == -1)
- continue;
- /*
- * encoding bits per actual page size
- * PTE LP actual page size
- * rrrr rrrz >=8KB
- * rrrr rrzz >=16KB
- * rrrr rzzz >=32KB
- * rrrr zzzz >=64KB
- * .......
- */
- shift = mmu_psize_defs[i].shift - LP_SHIFT;
- if (shift > LP_BITS)
- shift = LP_BITS;
- mask = (1 << shift) - 1;
- if ((lp & mask) == mmu_psize_defs[psize].penc[i])
- return i;
- }
- return -1;
-}
-
static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
unsigned long pte_index)
{
- int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
+ int i, b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
unsigned int penc;
unsigned long rb = 0, va_low, sllp;
unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
if (v & HPTE_V_LARGE) {
- for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
-
- /* valid entries have a shift value */
- if (!mmu_psize_defs[b_psize].shift)
- continue;
-
- a_psize = __hpte_actual_psize(lp, b_psize);
- if (a_psize != -1)
- break;
- }
+ i = hpte_page_sizes[lp];
+ b_psize = i & 0xf;
+ a_psize = i >> 4;
}
+
/*
* Ignore the top 14 bits of va
* v have top two bits covering segment size, hence move
@@ -159,7 +125,6 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
/* This covers 14..54 bits of va*/
rb = (v & ~0x7fUL) << 16; /* AVA field */
- rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
/*
* AVA in v had cleared lower 23 bits. We need to derive
* that from pteg index
@@ -211,49 +176,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
break;
}
}
- rb |= (v >> 54) & 0x300; /* B field */
+ rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
return rb;
}
-static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
- bool is_base_size)
-{
-
- int size, a_psize;
- /* Look at the 8 bit LP value */
- unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
-
- /* only handle 4k, 64k and 16M pages for now */
- if (!(h & HPTE_V_LARGE))
- return 1ul << 12;
- else {
- for (size = 0; size < MMU_PAGE_COUNT; size++) {
- /* valid entries have a shift value */
- if (!mmu_psize_defs[size].shift)
- continue;
-
- a_psize = __hpte_actual_psize(lp, size);
- if (a_psize != -1) {
- if (is_base_size)
- return 1ul << mmu_psize_defs[size].shift;
- return 1ul << mmu_psize_defs[a_psize].shift;
- }
- }
-
- }
- return 0;
-}
-
-static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
-{
- return __hpte_page_size(h, l, 0);
-}
-
-static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
-{
- return __hpte_page_size(h, l, 1);
-}
-
static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
{
return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index ec35af34a3fb..28350a294b1e 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -43,6 +43,8 @@
#include <asm/cputhreads.h>
#define KVM_MAX_VCPU_ID (threads_per_subcore * KVM_MAX_VCORES)
+#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+
#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#endif
@@ -95,42 +97,49 @@ struct kvmppc_vcpu_book3s;
struct kvmppc_book3s_shadow_vcpu;
struct kvm_vm_stat {
- u32 remote_tlb_flush;
+ ulong remote_tlb_flush;
};
struct kvm_vcpu_stat {
- u32 sum_exits;
- u32 mmio_exits;
- u32 signal_exits;
- u32 light_exits;
+ u64 sum_exits;
+ u64 mmio_exits;
+ u64 signal_exits;
+ u64 light_exits;
/* Account for special types of light exits: */
- u32 itlb_real_miss_exits;
- u32 itlb_virt_miss_exits;
- u32 dtlb_real_miss_exits;
- u32 dtlb_virt_miss_exits;
- u32 syscall_exits;
- u32 isi_exits;
- u32 dsi_exits;
- u32 emulated_inst_exits;
- u32 dec_exits;
- u32 ext_intr_exits;
- u32 halt_successful_poll;
- u32 halt_attempted_poll;
- u32 halt_poll_invalid;
- u32 halt_wakeup;
- u32 dbell_exits;
- u32 gdbell_exits;
- u32 ld;
- u32 st;
+ u64 itlb_real_miss_exits;
+ u64 itlb_virt_miss_exits;
+ u64 dtlb_real_miss_exits;
+ u64 dtlb_virt_miss_exits;
+ u64 syscall_exits;
+ u64 isi_exits;
+ u64 dsi_exits;
+ u64 emulated_inst_exits;
+ u64 dec_exits;
+ u64 ext_intr_exits;
+ u64 halt_poll_success_ns;
+ u64 halt_poll_fail_ns;
+ u64 halt_wait_ns;
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_successful_wait;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 dbell_exits;
+ u64 gdbell_exits;
+ u64 ld;
+ u64 st;
#ifdef CONFIG_PPC_BOOK3S
- u32 pf_storage;
- u32 pf_instruc;
- u32 sp_storage;
- u32 sp_instruc;
- u32 queue_intr;
- u32 ld_slow;
- u32 st_slow;
+ u64 pf_storage;
+ u64 pf_instruc;
+ u64 sp_storage;
+ u64 sp_instruc;
+ u64 queue_intr;
+ u64 ld_slow;
+ u64 st_slow;
#endif
+ u64 pthru_all;
+ u64 pthru_host;
+ u64 pthru_bad_aff;
};
enum kvm_exit_types {
@@ -197,6 +206,8 @@ struct kvmppc_spapr_tce_table {
struct kvmppc_xics;
struct kvmppc_icp;
+struct kvmppc_passthru_irqmap;
+
/*
* The reverse mapping array has one entry for each HPTE,
* which stores the guest's view of the second word of the HPTE
@@ -267,6 +278,7 @@ struct kvm_arch {
#endif
#ifdef CONFIG_KVM_XICS
struct kvmppc_xics *xics;
+ struct kvmppc_passthru_irqmap *pimap;
#endif
struct kvmppc_ops *kvm_ops;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -275,41 +287,6 @@ struct kvm_arch {
#endif
};
-/*
- * Struct for a virtual core.
- * Note: entry_exit_map combines a bitmap of threads that have entered
- * in the bottom 8 bits and a bitmap of threads that have exited in the
- * next 8 bits. This is so that we can atomically set the entry bit
- * iff the exit map is 0 without taking a lock.
- */
-struct kvmppc_vcore {
- int n_runnable;
- int num_threads;
- int entry_exit_map;
- int napping_threads;
- int first_vcpuid;
- u16 pcpu;
- u16 last_cpu;
- u8 vcore_state;
- u8 in_guest;
- struct kvmppc_vcore *master_vcore;
- struct list_head runnable_threads;
- struct list_head preempt_list;
- spinlock_t lock;
- struct swait_queue_head wq;
- spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
- u64 stolen_tb;
- u64 preempt_tb;
- struct kvm_vcpu *runner;
- struct kvm *kvm;
- u64 tb_offset; /* guest timebase - host timebase */
- ulong lpcr;
- u32 arch_compat;
- ulong pcr;
- ulong dpdes; /* doorbell state (POWER8) */
- ulong conferring_threads;
-};
-
#define VCORE_ENTRY_MAP(vc) ((vc)->entry_exit_map & 0xff)
#define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
#define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
@@ -329,6 +306,7 @@ struct kvmppc_vcore {
#define VCORE_SLEEPING 3
#define VCORE_RUNNING 4
#define VCORE_EXITING 5
+#define VCORE_POLLING 6
/*
* Struct used to manage memory for a virtual processor area
@@ -397,6 +375,20 @@ struct kvmhv_tb_accumulator {
u64 tb_max; /* max time */
};
+#ifdef CONFIG_PPC_BOOK3S_64
+struct kvmppc_irq_map {
+ u32 r_hwirq;
+ u32 v_hwirq;
+ struct irq_desc *desc;
+};
+
+#define KVMPPC_PIRQ_MAPPED 1024
+struct kvmppc_passthru_irqmap {
+ int n_mapped;
+ struct kvmppc_irq_map mapped[KVMPPC_PIRQ_MAPPED];
+};
+#endif
+
# ifdef CONFIG_PPC_FSL_BOOK3E
#define KVMPPC_BOOKE_IAC_NUM 2
#define KVMPPC_BOOKE_DAC_NUM 2
@@ -483,7 +475,6 @@ struct kvm_vcpu_arch {
ulong purr;
ulong spurr;
ulong ic;
- ulong vtb;
ulong dscr;
ulong amr;
ulong uamor;
@@ -668,7 +659,6 @@ struct kvm_vcpu_arch {
long pgfault_index;
unsigned long pgfault_hpte[2];
- struct list_head run_list;
struct task_struct *run_task;
struct kvm_run *kvm_run;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 2544edabe7f3..f6e49640dbe1 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -287,6 +287,10 @@ struct kvmppc_ops {
long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
unsigned long arg);
int (*hcall_implemented)(unsigned long hcall);
+ int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
+ void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
+ struct irq_bypass_producer *);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
@@ -453,8 +457,19 @@ static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
{
return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
}
+
+static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
+ struct kvm *kvm)
+{
+ if (kvm && kvm_irq_bypass)
+ return kvm->arch.pimap;
+ return NULL;
+}
+
extern void kvmppc_alloc_host_rm_ops(void);
extern void kvmppc_free_host_rm_ops(void);
+extern void kvmppc_free_pimap(struct kvm *kvm);
+extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
@@ -464,10 +479,23 @@ extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu);
extern void kvmppc_xics_ipi_action(void);
+extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ unsigned long host_irq);
+extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ unsigned long host_irq);
+extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, u32 xirr,
+ struct kvmppc_irq_map *irq_map,
+ struct kvmppc_passthru_irqmap *pimap);
extern int h_ipi_redirect;
#else
+static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
+ struct kvm *kvm)
+ { return NULL; }
static inline void kvmppc_alloc_host_rm_ops(void) {};
static inline void kvmppc_free_host_rm_ops(void) {};
+static inline void kvmppc_free_pimap(struct kvm *kvm) {};
+static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
+ { return 0; }
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
{ return 0; }
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 0420b388dd83..e02cbc6a6c70 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -61,7 +61,7 @@ struct machdep_calls {
void (*init_IRQ)(void);
- /* Return an irq, or NO_IRQ to indicate there are none pending. */
+ /* Return an irq, or 0 to indicate there are none pending. */
unsigned int (*get_irq)(void);
/* PCI stuff */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index cd4f04a74802..b62a8d43a06c 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -313,6 +313,9 @@ extern int book3e_htw_mode;
* return 1, indicating that the tlb requires preloading.
*/
#define HUGETLB_NEED_PRELOAD
+
+#define mmu_cleanup_all NULL
+
#endif
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e2fb408f8398..e88368354e49 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -204,6 +204,10 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
* make it match the size our of bolted TLB area
*/
extern u64 ppc64_rma_size;
+
+/* Cleanup function used by kexec */
+extern void mmu_cleanup_all(void);
+extern void radix__mmu_cleanup_all(void);
#endif /* CONFIG_PPC64 */
struct mm_struct;
@@ -271,6 +275,7 @@ static inline bool early_radix_enabled(void)
#define MMU_PAGE_16G 13
#define MMU_PAGE_64G 14
+/* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */
#define MMU_PAGE_COUNT 15
#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 9d2cd0c36ec2..5c451140660a 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -18,6 +18,7 @@ extern void destroy_context(struct mm_struct *mm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct mm_iommu_table_group_mem_t;
+extern int isolate_lru_page(struct page *page); /* from internal.h */
extern bool mm_iommu_preregistered(void);
extern long mm_iommu_get(unsigned long ua, unsigned long entries,
struct mm_iommu_table_group_mem_t **pmem);
@@ -71,7 +72,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
/* Mark this context has been used on the new CPU */
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
/* 32-bit keeps track of the current PGDIR in the thread struct */
#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 7b589178be46..4d52ccfc2366 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -41,6 +41,9 @@ u64 memory_hotplug_max(void);
#else
#define memory_hotplug_max() memblock_end_of_DRAM()
#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#ifdef CONFIG_FA_DUMP
+#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
index d4f471fb1031..088420d8aa59 100644
--- a/arch/powerpc/include/asm/mpic_msgr.h
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -122,9 +122,9 @@ static inline void mpic_msgr_set_destination(struct mpic_msgr *msgr,
* @msgr: the message register whose IRQ is to be returned
*
* Returns the IRQ number associated with the given message register.
- * NO_IRQ is returned if this message register is not capable of
- * receiving interrupts. What message register can and cannot receive
- * interrupts is specified in the device tree for the system.
+ * 0 is returned if this message register is not capable of receiving
+ * interrupts. What message register can and cannot receive interrupts is
+ * specified in the device tree for the system.
*/
static inline int mpic_msgr_get_irq(struct mpic_msgr *msgr)
{
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 780847597514..c219ef7be53b 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -267,7 +267,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
}
-static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+static inline void __ptep_set_access_flags(struct mm_struct *mm,
+ pte_t *ptep, pte_t entry)
{
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index d4d808cf905e..653a1838469d 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -300,7 +300,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
/* Set the dirty and/or accessed bits atomically in a linux PTE, this
* function doesn't need to flush the hash entry
*/
-static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+static inline void __ptep_set_access_flags(struct mm_struct *mm,
+ pte_t *ptep, pte_t entry)
{
unsigned long bits = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index ee05bd203630..e958b7096f19 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -67,6 +67,7 @@ int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func,
uint64_t offset, uint32_t data);
int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
+int64_t opal_rm_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority);
int64_t opal_register_exception_handler(uint64_t opal_exception,
uint64_t handler_address,
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index a452968b29ea..6595ad1d18cc 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -28,7 +28,7 @@ static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
io1 = prop[1]; io2 = prop[2];
virq = irq_of_parse_and_map(np, 0);
- if (virq == NO_IRQ)
+ if (!virq)
continue;
if (parport_pc_probe_port(io1, io2, virq, autodma, NULL, 0)
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index 0cbd8134ce81..696438f09aea 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -12,10 +12,11 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
+#include <linux/irq.h>
#include <misc/cxl-base.h>
#include <asm/opal-api.h>
-#define PCI_SLOT_ID_PREFIX 0x8000000000000000
+#define PCI_SLOT_ID_PREFIX (1UL << 63)
#define PCI_SLOT_ID(phb_id, bdfn) \
(PCI_SLOT_ID_PREFIX | ((uint64_t)(bdfn) << 16) | (phb_id))
@@ -33,6 +34,8 @@ int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
int pnv_cxl_get_irq_count(struct pci_dev *dev);
struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev);
+int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq);
+bool is_pnv_opal_msi(struct irq_chip *chip);
#ifdef CONFIG_CXL_BASE
int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
@@ -60,6 +63,8 @@ struct pnv_php_slot {
#define PNV_PHP_STATE_POPULATED 2
#define PNV_PHP_STATE_OFFLINE 3
int state;
+ int irq;
+ struct workqueue_struct *wq;
struct device_node *dn;
struct pci_dev *pdev;
struct pci_bus *bus;
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 127ebf5862b4..54ff8ce7fa96 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -236,6 +236,7 @@
#define PPC_INST_STWU 0x94000000
#define PPC_INST_MFLR 0x7c0802a6
#define PPC_INST_MTLR 0x7c0803a6
+#define PPC_INST_MTCTR 0x7c0903a6
#define PPC_INST_CMPWI 0x2c000000
#define PPC_INST_CMPDI 0x2c200000
#define PPC_INST_CMPW 0x7c000000
@@ -250,6 +251,7 @@
#define PPC_INST_SUB 0x7c000050
#define PPC_INST_BLR 0x4e800020
#define PPC_INST_BLRL 0x4e800021
+#define PPC_INST_BCTR 0x4e800420
#define PPC_INST_MULLD 0x7c0001d2
#define PPC_INST_MULLW 0x7c0001d6
#define PPC_INST_MULHWU 0x7c000016
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index d5d5b5e348f2..c73750b0d9fa 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -201,14 +201,12 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#ifdef PPC64_ELF_ABI_v2
#define _GLOBAL(name) \
- .section ".text"; \
.align 2 ; \
.type name,@function; \
.globl name; \
name:
#define _GLOBAL_TOC(name) \
- .section ".text"; \
.align 2 ; \
.type name,@function; \
.globl name; \
@@ -217,13 +215,6 @@ name: \
addi r2,r2,(.TOC.-0b)@l; \
.localentry name,.-name
-#define _KPROBE(name) \
- .section ".kprobes.text","a"; \
- .align 2 ; \
- .type name,@function; \
- .globl name; \
-name:
-
#define DOTSYM(a) a
#else
@@ -232,35 +223,20 @@ name:
#define GLUE(a,b) XGLUE(a,b)
#define _GLOBAL(name) \
- .section ".text"; \
.align 2 ; \
.globl name; \
.globl GLUE(.,name); \
- .section ".opd","aw"; \
+ .pushsection ".opd","aw"; \
name: \
.quad GLUE(.,name); \
.quad .TOC.@tocbase; \
.quad 0; \
- .previous; \
+ .popsection; \
.type GLUE(.,name),@function; \
GLUE(.,name):
#define _GLOBAL_TOC(name) _GLOBAL(name)
-#define _KPROBE(name) \
- .section ".kprobes.text","a"; \
- .align 2 ; \
- .globl name; \
- .globl GLUE(.,name); \
- .section ".opd","aw"; \
-name: \
- .quad GLUE(.,name); \
- .quad .TOC.@tocbase; \
- .quad 0; \
- .previous; \
- .type GLUE(.,name),@function; \
-GLUE(.,name):
-
#define DOTSYM(a) GLUE(.,a)
#endif
@@ -272,20 +248,28 @@ GLUE(.,name):
n:
#define _GLOBAL(n) \
- .text; \
.stabs __stringify(n:F-1),N_FUN,0,0,n;\
.globl n; \
n:
#define _GLOBAL_TOC(name) _GLOBAL(name)
-#define _KPROBE(n) \
- .section ".kprobes.text","a"; \
- .globl n; \
-n:
-
#endif
+/*
+ * __kprobes (the C annotation) puts the symbol into the .kprobes.text
+ * section, which gets emitted at the end of regular text.
+ *
+ * _ASM_NOKPROBE_SYMBOL and NOKPROBE_SYMBOL just adds the symbol to
+ * a blacklist. The former is for core kprobe functions/data, the
+ * latter is for those that incdentially must be excluded from probing
+ * and allows them to be linked at more optimal location within text.
+ */
+#define _ASM_NOKPROBE_SYMBOL(entry) \
+ .pushsection "_kprobe_blacklist","aw"; \
+ PPC_LONG (entry) ; \
+ .popsection
+
#define FUNC_START(name) _GLOBAL(name)
#define FUNC_END(name)
@@ -527,7 +511,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#endif
#define MTMSRD(r) mtmsr r
#define MTMSR_EERI(reg) mtmsr reg
-#define CLR_TOP32(r)
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 68e3bf57b027..c07c31b0e89e 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -147,7 +147,7 @@ typedef struct {
} mm_segment_t;
#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
-#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET]
+#define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET]
/* FP and VSX 0-31 register set */
struct thread_fp_state {
@@ -257,6 +257,7 @@ struct thread_struct {
int used_spe; /* set if process has used spe */
#endif /* CONFIG_SPE */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u8 load_tm;
u64 tm_tfhar; /* Transaction fail handler addr */
u64 tm_texasr; /* Transaction exception & summary */
u64 tm_tfiar; /* Transaction fail instr address reg */
@@ -267,20 +268,17 @@ struct thread_struct {
unsigned long tm_dscr;
/*
- * Transactional FP and VSX 0-31 register set.
- * NOTE: the sense of these is the opposite of the integer ckpt_regs!
+ * Checkpointed FP and VSX 0-31 register set.
*
* When a transaction is active/signalled/scheduled etc., *regs is the
* most recent set of/speculated GPRs with ckpt_regs being the older
* checkpointed regs to which we roll back if transaction aborts.
*
- * However, fpr[] is the checkpointed 'base state' of FP regs, and
- * transact_fpr[] is the new set of transactional values.
- * VRs work the same way.
+ * These are analogous to how ckpt_regs and pt_regs work
*/
- struct thread_fp_state transact_fp;
- struct thread_vr_state transact_vr;
- unsigned long transact_vrsave;
+ struct thread_fp_state ckfp_state; /* Checkpointed FP state */
+ struct thread_vr_state ckvr_state; /* Checkpointed VR state */
+ unsigned long ckvrsave; /* Checkpointed VRSAVE */
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
void* kvm_shadow_vcpu; /* KVM internal data */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index f69f40f1519a..2a620789954b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -475,6 +475,9 @@
#define HID0_POWER8_1TO4LPAR __MASK(51)
#define HID0_POWER8_DYNLPARDIS __MASK(48)
+/* POWER9 HID0 bits */
+#define HID0_POWER9_RADIX __MASK(63 - 8)
+
#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
#ifdef CONFIG_6xx
#define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */
@@ -737,6 +740,7 @@
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
#define SPRN_MMCR2 785
+#define SPRN_UMMCR2 769
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -1247,7 +1251,7 @@ static inline void mtmsr_isync(unsigned long val)
: "memory")
#endif
-extern void msr_check_and_set(unsigned long bits);
+extern unsigned long msr_check_and_set(unsigned long bits);
extern bool strict_msr_control;
extern void __msr_check_and_clear(unsigned long bits);
static inline void msr_check_and_clear(unsigned long bits)
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index 9322c28aebd2..5ff77722a52d 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -5,6 +5,4 @@
#include <uapi/asm/signal.h>
#include <uapi/asm/ptrace.h>
-extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
-
#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index c22d704b6d41..82e06ca3a49b 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -9,11 +9,6 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-extern void do_load_up_transact_fpu(struct thread_struct *thread);
-extern void do_load_up_transact_altivec(struct thread_struct *thread);
-#endif
-
extern void tm_enable(void);
extern void tm_reclaim(struct thread_struct *thread,
unsigned long orig_msr, uint8_t cause);
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index fe4c075bcf50..aded29ad2e8f 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -31,8 +31,7 @@ obj-y := cputable.o ptrace.o syscalls.o \
process.o systbl.o idle.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
- udbg.o misc.o io.o dma.o \
- misc_$(CONFIG_WORD_SIZE).o \
+ udbg.o misc.o io.o dma.o misc_$(BITS).o \
of_platform.o prom_parse.o
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
@@ -70,23 +69,23 @@ obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
ifeq ($(CONFIG_FSL_BOOKE),y)
obj-$(CONFIG_HIBERNATION) += swsusp_booke.o
else
-obj-$(CONFIG_HIBERNATION) += swsusp_$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_HIBERNATION) += swsusp_$(BITS).o
endif
obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o
-obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_MODULES) += module.o module_$(BITS).o
obj-$(CONFIG_44x) += cpu_setup_44x.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o
obj-$(CONFIG_PPC_DOORBELL) += dbell.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
-extra-y := head_$(CONFIG_WORD_SIZE).o
+extra-y := head_$(BITS).o
extra-$(CONFIG_40x) := head_40x.o
extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-y += vmlinux.lds
-obj-$(CONFIG_RELOCATABLE) += reloc_$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
@@ -104,11 +103,11 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
pci64-$(CONFIG_PPC64) += pci_dn.o pci-hotplug.o isa-bridge.o
-obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \
+obj-$(CONFIG_PCI) += pci_$(BITS).o $(pci64-y) \
pci-common.o pci_of_scan.o
obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \
- machine_kexec_$(CONFIG_WORD_SIZE).o
+ machine_kexec_$(BITS).o
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index b89d14c0352c..caec7bf3b99a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -142,12 +142,12 @@ int main(void)
DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
- DEFINE(THREAD_TRANSACT_VRSTATE, offsetof(struct thread_struct,
- transact_vr));
- DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct,
- transact_vrsave));
- DEFINE(THREAD_TRANSACT_FPSTATE, offsetof(struct thread_struct,
- transact_fp));
+ DEFINE(THREAD_CKVRSTATE, offsetof(struct thread_struct,
+ ckvr_state));
+ DEFINE(THREAD_CKVRSAVE, offsetof(struct thread_struct,
+ ckvrsave));
+ DEFINE(THREAD_CKFPSTATE, offsetof(struct thread_struct,
+ ckfp_state));
/* Local pt_regs on stack for Transactional Memory funcs. */
DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
sizeof(struct pt_regs) + 16);
@@ -506,7 +506,6 @@ int main(void)
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
- DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb));
DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
@@ -557,6 +556,7 @@ int main(void)
DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes));
+ DEFINE(VCORE_VTB, offsetof(struct kvmppc_vcore, vtb));
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 74248ab18e98..6c4646ac9234 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -506,6 +506,25 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
+ { /* Power9 DD1*/
+ .pvr_mask = 0xffffff00,
+ .pvr_value = 0x004e0100,
+ .cpu_name = "POWER9 (raw)",
+ .cpu_features = CPU_FTRS_POWER9_DD1,
+ .cpu_user_features = COMMON_USER_POWER9,
+ .cpu_user_features2 = COMMON_USER2_POWER9,
+ .mmu_features = MMU_FTRS_POWER9,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power9",
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = __setup_cpu_power9,
+ .cpu_restore = __restore_cpu_power9,
+ .flush_tlb = __flush_tlb_power9,
+ .platform = "power9",
+ },
{ /* Power9 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004e0000,
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 7429556eb8df..f25731627d7f 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -116,6 +116,7 @@ struct eeh_ops *eeh_ops = NULL;
/* Lock to avoid races due to multiple reports of an error */
DEFINE_RAW_SPINLOCK(confirm_error_lock);
+EXPORT_SYMBOL_GPL(confirm_error_lock);
/* Lock to protect passed flags */
static DEFINE_MUTEX(eeh_dev_mutex);
@@ -1044,7 +1045,7 @@ int eeh_init(void)
if (eeh_enabled())
pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
else
- pr_warn("EEH: No capable adapters found\n");
+ pr_info("EEH: No capable adapters found\n");
return ret;
}
@@ -1502,6 +1503,7 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option)
break;
case EEH_OPT_THAW_MMIO:
case EEH_OPT_THAW_DMA:
+ case EEH_OPT_FREEZE_PE:
if (!eeh_ops || !eeh_ops->set_option) {
ret = -ENOENT;
break;
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 5f36e8a70daa..a62be72da274 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -993,9 +993,17 @@ static void eeh_handle_special_event(void)
/* Notify all devices to be down */
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
- bus = eeh_pe_bus_get(phb_pe);
eeh_pe_dev_traverse(pe,
eeh_report_failure, NULL);
+ bus = eeh_pe_bus_get(phb_pe);
+ if (!bus) {
+ pr_err("%s: Cannot find PCI bus for "
+ "PHB#%d-PE#%x\n",
+ __func__,
+ pe->phb->global_number,
+ pe->addr);
+ break;
+ }
pci_hp_remove_devices(bus);
}
pci_unlock_rescan_remove();
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index f0520da85759..de7d091c4c31 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -581,6 +581,7 @@ void eeh_pe_state_mark(struct eeh_pe *pe, int state)
{
eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
}
+EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
static void *__eeh_pe_dev_mode_mark(void *data, void *flag)
{
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 9899032230b4..83428a283fa0 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -654,7 +654,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SMP */
tophys(r0,r4)
- CLR_TOP32(r0)
mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
lwz r1,KSP(r4) /* Load new stack pointer */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 5afd03e5e8b8..51df82b61084 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -139,7 +139,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
- ld r11,PACAKMSR(r13)
+ li r11,MSR_RI
ori r11,r11,MSR_EE
mtmsrd r11,1
#endif /* CONFIG_PPC_BOOK3E */
@@ -195,7 +195,6 @@ system_call: /* label this so stack traces look sane */
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- ld r10,PACAKMSR(r13)
/*
* For performance reasons we clear RI the same time that we
* clear EE. We only need to clear RI just before we restore r13
@@ -203,8 +202,7 @@ system_call: /* label this so stack traces look sane */
* We have to be careful to restore RI if we branch anywhere from
* here (eg syscall_exit_work).
*/
- li r9,MSR_RI
- andc r11,r10,r9
+ li r11,0
mtmsrd r11,1
#endif /* CONFIG_PPC_BOOK3E */
@@ -221,13 +219,12 @@ system_call: /* label this so stack traces look sane */
#endif
2: addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_BOOK3S
+ li r10,MSR_RI
mtmsrd r10,1 /* Restore RI */
#endif
bl restore_math
#ifdef CONFIG_PPC_BOOK3S
- ld r10,PACAKMSR(r13)
- li r9,MSR_RI
- andc r11,r10,r9 /* Re-clear RI */
+ li r11,0
mtmsrd r11,1
#endif
ld r8,_MSR(r1)
@@ -308,6 +305,7 @@ syscall_enosys:
syscall_exit_work:
#ifdef CONFIG_PPC_BOOK3S
+ li r10,MSR_RI
mtmsrd r10,1 /* Restore RI */
#endif
/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
@@ -354,7 +352,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
- ld r10,PACAKMSR(r13)
+ li r10,MSR_RI
ori r10,r10,MSR_EE
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
@@ -619,7 +617,7 @@ _GLOBAL(ret_from_except_lite)
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
+ li r10,MSR_RI
mtmsrd r10,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
@@ -751,7 +749,7 @@ resume_kernel:
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
+ li r10,MSR_RI
mtmsrd r10,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
#endif /* CONFIG_PREEMPT */
@@ -841,8 +839,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
* userspace and we take an exception after restoring r13,
* we end up corrupting the userspace r13 value.
*/
- ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
- andc r4,r4,r0 /* r0 contains MSR_RI here */
+ li r4,0
mtmsrd r4,1
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index bffec73dbffc..08992f8f5036 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -16,72 +16,71 @@
#include <asm/exception-64s.h>
#include <asm/ptrace.h>
#include <asm/cpuidle.h>
+#include <asm/head-64.h>
/*
+ * There are a few constraints to be concerned with.
+ * - Real mode exceptions code/data must be located at their physical location.
+ * - Virtual mode exceptions must be mapped at their 0xc000... location.
+ * - Fixed location code must not call directly beyond the __end_interrupts
+ * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
+ * must be used.
+ * - LOAD_HANDLER targets must be within first 64K of physical 0 /
+ * virtual 0xc00...
+ * - Conditional branch targets must be within +/-32K of caller.
+ *
+ * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
+ * therefore don't have to run in physically located code or rfid to
+ * virtual mode kernel code. However on relocatable kernels they do have
+ * to branch to KERNELBASE offset because the rest of the kernel (outside
+ * the exception vectors) may be located elsewhere.
+ *
+ * Virtual exceptions correspond with physical, except their entry points
+ * are offset by 0xc000000000000000 and also tend to get an added 0x4000
+ * offset applied. Virtual exceptions are enabled with the Alternate
+ * Interrupt Location (AIL) bit set in the LPCR. However this does not
+ * guarantee they will be delivered virtually. Some conditions (see the ISA)
+ * cause exceptions to be delivered in real mode.
+ *
+ * It's impossible to receive interrupts below 0x300 via AIL.
+ *
+ * KVM: None of the virtual exceptions are from the guest. Anything that
+ * escalated to HV=1 from HV=0 is delivered via real mode handlers.
+ *
+ *
* We layout physical memory as follows:
* 0x0000 - 0x00ff : Secondary processor spin code
- * 0x0100 - 0x17ff : pSeries Interrupt prologs
- * 0x1800 - 0x4000 : interrupt support common interrupt prologs
- * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
- * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
+ * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
+ * 0x1900 - 0x3fff : Real mode trampolines
+ * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
+ * 0x5900 - 0x6fff : Relon mode trampolines
* 0x7000 - 0x7fff : FWNMI data area
- * 0x8000 - 0x8fff : Initial (CPU0) segment table
- * 0x9000 - : Early init and support code
+ * 0x8000 - .... : Common interrupt handlers, remaining early
+ * setup code, rest of kernel.
+ *
+ * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
+ * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
+ * vectors there.
*/
- /* Syscall routine is used twice, in reloc-off and reloc-on paths */
-#define SYSCALL_PSERIES_1 \
-BEGIN_FTR_SECTION \
- cmpdi r0,0x1ebe ; \
- beq- 1f ; \
-END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
- mr r9,r13 ; \
- GET_PACA(r13) ; \
- mfspr r11,SPRN_SRR0 ; \
-0:
-
-#define SYSCALL_PSERIES_2_RFID \
- mfspr r12,SPRN_SRR1 ; \
- ld r10,PACAKBASE(r13) ; \
- LOAD_HANDLER(r10, system_call_entry) ; \
- mtspr SPRN_SRR0,r10 ; \
- ld r10,PACAKMSR(r13) ; \
- mtspr SPRN_SRR1,r10 ; \
- rfid ; \
- b . ; /* prevent speculative execution */
-
-#define SYSCALL_PSERIES_3 \
- /* Fast LE/BE switch system call */ \
-1: mfspr r12,SPRN_SRR1 ; \
- xori r12,r12,MSR_LE ; \
- mtspr SPRN_SRR1,r12 ; \
- rfid ; /* return to userspace */ \
- b . ; /* prevent speculative execution */
-
-#if defined(CONFIG_RELOCATABLE)
- /*
- * We can't branch directly so we do it via the CTR which
- * is volatile across system calls.
- */
-#define SYSCALL_PSERIES_2_DIRECT \
- mflr r10 ; \
- ld r12,PACAKBASE(r13) ; \
- LOAD_HANDLER(r12, system_call_entry) ; \
- mtctr r12 ; \
- mfspr r12,SPRN_SRR1 ; \
- /* Re-use of r13... No spare regs to do this */ \
- li r13,MSR_RI ; \
- mtmsrd r13,1 ; \
- GET_PACA(r13) ; /* get r13 back */ \
- bctr ;
+OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
+OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000)
+OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900)
+OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
+#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+/*
+ * Data area reserved for FWNMI option.
+ * This address (0x7000) is fixed by the RPA.
+ * pseries and powernv need to keep the whole page from
+ * 0x7000 to 0x8000 free for use by the firmware
+ */
+ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000)
+OPEN_TEXT_SECTION(0x8000)
#else
- /* We can branch directly */
-#define SYSCALL_PSERIES_2_DIRECT \
- mfspr r12,SPRN_SRR1 ; \
- li r10,MSR_RI ; \
- mtmsrd r10,1 ; /* Set RI (EE=0) */ \
- b system_call_common ;
+OPEN_TEXT_SECTION(0x7000)
#endif
+USE_FIXED_SECTION(real_vectors)
+
/*
* This is the start of the interrupt handlers for pSeries
* This code runs with relocation off.
@@ -90,12 +89,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
* Therefore any relative branches in this section must only
* branch to labels in this section.
*/
- . = 0x100
.globl __start_interrupts
__start_interrupts:
- .globl system_reset_pSeries;
-system_reset_pSeries:
+/* No virt vectors corresponding with 0x0..0x100 */
+EXC_VIRT_NONE(0x4000, 0x4100)
+
+EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
SET_SCRATCH0(r13)
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
@@ -136,9 +136,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif /* CONFIG_PPC_P7_NAP */
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
NOTEST, 0x100)
+EXC_REAL_END(system_reset, 0x100, 0x200)
+EXC_VIRT_NONE(0x4100, 0x4200)
+EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
+
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Vectors for the FWNMI option. Share common code.
+ */
+TRAMP_REAL_BEGIN(system_reset_fwnmi)
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ NOTEST, 0x100)
+#endif /* CONFIG_PPC_PSERIES */
+
- . = 0x200
-machine_check_pSeries_1:
+EXC_REAL_BEGIN(machine_check, 0x200, 0x300)
/* This is moved out of line as it can be patched by FW, but
* some code path might still want to branch into the original
* vector
@@ -158,253 +171,9 @@ BEGIN_FTR_SECTION
FTR_SECTION_ELSE
b machine_check_pSeries_0
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
-
- . = 0x300
- .globl data_access_pSeries
-data_access_pSeries:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
- KVMTEST, 0x300)
-
- . = 0x380
- .globl data_access_slb_pSeries
-data_access_slb_pSeries:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXSLB)
- EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r3,SPRN_DAR
- mfspr r12,SPRN_SRR1
-#ifndef CONFIG_RELOCATABLE
- b slb_miss_realmode
-#else
- /*
- * We can't just use a direct branch to slb_miss_realmode
- * because the distance from here to there depends on where
- * the kernel ends up being put.
- */
- mfctr r11
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10, slb_miss_realmode)
- mtctr r10
- bctr
-#endif
-
- STD_EXCEPTION_PSERIES(0x400, instruction_access)
-
- . = 0x480
- .globl instruction_access_slb_pSeries
-instruction_access_slb_pSeries:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXSLB)
- EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480)
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- mfspr r12,SPRN_SRR1
-#ifndef CONFIG_RELOCATABLE
- b slb_miss_realmode
-#else
- mfctr r11
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10, slb_miss_realmode)
- mtctr r10
- bctr
-#endif
-
- /* We open code these as we can't have a ". = x" (even with
- * x = "." within a feature section
- */
- . = 0x500;
- .globl hardware_interrupt_pSeries;
- .globl hardware_interrupt_hv;
-hardware_interrupt_pSeries:
-hardware_interrupt_hv:
- BEGIN_FTR_SECTION
- _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
- EXC_HV, SOFTEN_TEST_HV)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
- FTR_SECTION_ELSE
- _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
- EXC_STD, SOFTEN_TEST_PR)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
-
- STD_EXCEPTION_PSERIES(0x600, alignment)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600)
-
- STD_EXCEPTION_PSERIES(0x700, program_check)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700)
-
- STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800)
-
- . = 0x900
- .globl decrementer_pSeries
-decrementer_pSeries:
- _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
-
- STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
-
- MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00)
-
- STD_EXCEPTION_PSERIES(0xb00, trap_0b)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00)
-
- . = 0xc00
- .globl system_call_pSeries
-system_call_pSeries:
- /*
- * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
- * that support it) before changing to HMT_MEDIUM. That allows the KVM
- * code to save that value into the guest state (it is the guest's PPR
- * value). Otherwise just change to HMT_MEDIUM as userspace has
- * already saved the PPR.
- */
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
- SET_SCRATCH0(r13)
- GET_PACA(r13)
- std r9,PACA_EXGEN+EX_R9(r13)
- OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
- HMT_MEDIUM;
- std r10,PACA_EXGEN+EX_R10(r13)
- OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
- mfcr r9
- KVMTEST(0xc00)
- GET_SCRATCH0(r13)
-#else
- HMT_MEDIUM;
-#endif
- SYSCALL_PSERIES_1
- SYSCALL_PSERIES_2_RFID
- SYSCALL_PSERIES_3
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
-
- STD_EXCEPTION_PSERIES(0xd00, single_step)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00)
-
- /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
- * out of line to handle them
- */
- . = 0xe00
-hv_data_storage_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_data_storage_hv
-
- . = 0xe20
-hv_instr_storage_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_instr_storage_hv
-
- . = 0xe40
-emulation_assist_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b emulation_assist_hv
-
- . = 0xe60
-hv_exception_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b hmi_exception_early
-
- . = 0xe80
-hv_doorbell_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_doorbell_hv
-
- . = 0xea0
-hv_virt_irq_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_virt_irq_hv
-
- /* We need to deal with the Altivec unavailable exception
- * here which is at 0xf20, thus in the middle of the
- * prolog code of the PerformanceMonitor one. A little
- * trickery is thus necessary
- */
- . = 0xf00
-performance_monitor_pseries_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b performance_monitor_pSeries
-
- . = 0xf20
-altivec_unavailable_pseries_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b altivec_unavailable_pSeries
-
- . = 0xf40
-vsx_unavailable_pseries_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b vsx_unavailable_pSeries
-
- . = 0xf60
-facility_unavailable_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b facility_unavailable_pSeries
-
- . = 0xf80
-hv_facility_unavailable_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b facility_unavailable_hv
-
-#ifdef CONFIG_CBE_RAS
- STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
-#endif /* CONFIG_CBE_RAS */
-
- STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
-
- . = 0x1500
- .global denorm_exception_hv
-denorm_exception_hv:
- mtspr SPRN_SPRG_HSCRATCH0,r13
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
-
-#ifdef CONFIG_PPC_DENORMALISATION
- mfspr r10,SPRN_HSRR1
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
- addi r11,r11,-4 /* HSRR0 is next instruction */
- bne+ denorm_assist
-#endif
-
- KVMTEST(0x1500)
- EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
-
-#ifdef CONFIG_CBE_RAS
- STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
-#endif /* CONFIG_CBE_RAS */
-
- STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700)
-
-#ifdef CONFIG_CBE_RAS
- STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
-#else
- . = 0x1800
-#endif /* CONFIG_CBE_RAS */
-
-
-/*** Out of line interrupts support ***/
-
- .align 7
- /* moved from 0x200 */
-machine_check_powernv_early:
+EXC_REAL_END(machine_check, 0x200, 0x300)
+EXC_VIRT_NONE(0x4200, 0x4300)
+TRAMP_REAL_BEGIN(machine_check_powernv_early)
BEGIN_FTR_SECTION
EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
/*
@@ -457,7 +226,6 @@ BEGIN_FTR_SECTION
mfmsr r11 /* get MSR value */
ori r11,r11,MSR_ME /* turn on ME bit */
ori r11,r11,MSR_RI /* turn on RI bit */
- ld r12,PACAKBASE(r13) /* get high part of &label */
LOAD_HANDLER(r12, machine_check_handle_early)
1: mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r11
@@ -470,7 +238,6 @@ BEGIN_FTR_SECTION
*/
addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
ld r11,PACAKMSR(r13)
- ld r12,PACAKBASE(r13)
LOAD_HANDLER(r12, unrecover_mce)
li r10,MSR_ME
andc r11,r11,r10 /* Turn off MSR_ME */
@@ -478,20 +245,19 @@ BEGIN_FTR_SECTION
b . /* prevent speculative execution */
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
-machine_check_pSeries:
+TRAMP_REAL_BEGIN(machine_check_pSeries)
.globl machine_check_fwnmi
machine_check_fwnmi:
SET_SCRATCH0(r13) /* save r13 */
EXCEPTION_PROLOG_0(PACA_EXMC)
machine_check_pSeries_0:
- EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
+ EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200)
/*
* The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
* difference that MSR_RI is not enabled, because PACA_EXMC is being
* used, so nested machine check corrupts it. machine_check_common
* enables MSR_RI.
*/
- ld r12,PACAKBASE(r13)
ld r10,PACAKMSR(r13)
xori r10,r10,MSR_RI
mfspr r11,SPRN_SRR0
@@ -502,488 +268,13 @@ machine_check_pSeries_0:
rfid
b . /* prevent speculative execution */
- KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
- KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400)
- KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
-
-#ifdef CONFIG_PPC_DENORMALISATION
-denorm_assist:
-BEGIN_FTR_SECTION
-/*
- * To denormalise we need to move a copy of the register to itself.
- * For POWER6 do that here for all FP regs.
- */
- mfmsr r10
- ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
- xori r10,r10,(MSR_FE0|MSR_FE1)
- mtmsrd r10
- sync
-
-#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
-#define FMR4(n) FMR2(n) ; FMR2(n+2)
-#define FMR8(n) FMR4(n) ; FMR4(n+4)
-#define FMR16(n) FMR8(n) ; FMR8(n+8)
-#define FMR32(n) FMR16(n) ; FMR16(n+16)
- FMR32(0)
-
-FTR_SECTION_ELSE
-/*
- * To denormalise we need to move a copy of the register to itself.
- * For POWER7 do that here for the first 32 VSX registers only.
- */
- mfmsr r10
- oris r10,r10,MSR_VSX@h
- mtmsrd r10
- sync
-
-#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
-#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
-#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
-#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
-#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
- XVCPSGNDP32(0)
-
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
-
-BEGIN_FTR_SECTION
- b denorm_done
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
-/*
- * To denormalise we need to move a copy of the register to itself.
- * For POWER8 we need to do that for all 64 VSX registers
- */
- XVCPSGNDP32(32)
-denorm_done:
- mtspr SPRN_HSRR0,r11
- mtcrf 0x80,r9
- ld r9,PACA_EXGEN+EX_R9(r13)
- RESTORE_PPR_PACA(PACA_EXGEN, r10)
-BEGIN_FTR_SECTION
- ld r10,PACA_EXGEN+EX_CFAR(r13)
- mtspr SPRN_CFAR,r10
-END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
- ld r10,PACA_EXGEN+EX_R10(r13)
- ld r11,PACA_EXGEN+EX_R11(r13)
- ld r12,PACA_EXGEN+EX_R12(r13)
- ld r13,PACA_EXGEN+EX_R13(r13)
- HRFID
- b .
-#endif
-
- .align 7
- /* moved from 0xe00 */
- STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
- STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
- STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
- MASKABLE_EXCEPTION_HV_OOL(0xe62, hmi_exception)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
-
- MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
-
- MASKABLE_EXCEPTION_HV_OOL(0xea2, h_virt_irq)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xea2)
-
- /* moved from 0xf00 */
- STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
- STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20)
- STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40)
- STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf60)
- STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
-
-/*
- * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
- * - If it was a decrementer interrupt, we bump the dec to max and and return.
- * - If it was a doorbell we return immediately since doorbells are edge
- * triggered and won't automatically refire.
- * - If it was a HMI we return immediately since we handled it in realmode
- * and it won't refire.
- * - else we hard disable and return.
- * This is called with r10 containing the value to OR to the paca field.
- */
-#define MASKED_INTERRUPT(_H) \
-masked_##_H##interrupt: \
- std r11,PACA_EXGEN+EX_R11(r13); \
- lbz r11,PACAIRQHAPPENED(r13); \
- or r11,r11,r10; \
- stb r11,PACAIRQHAPPENED(r13); \
- cmpwi r10,PACA_IRQ_DEC; \
- bne 1f; \
- lis r10,0x7fff; \
- ori r10,r10,0xffff; \
- mtspr SPRN_DEC,r10; \
- b 2f; \
-1: cmpwi r10,PACA_IRQ_DBELL; \
- beq 2f; \
- cmpwi r10,PACA_IRQ_HMI; \
- beq 2f; \
- mfspr r10,SPRN_##_H##SRR1; \
- rldicl r10,r10,48,1; /* clear MSR_EE */ \
- rotldi r10,r10,16; \
- mtspr SPRN_##_H##SRR1,r10; \
-2: mtcrf 0x80,r9; \
- ld r9,PACA_EXGEN+EX_R9(r13); \
- ld r10,PACA_EXGEN+EX_R10(r13); \
- ld r11,PACA_EXGEN+EX_R11(r13); \
- GET_SCRATCH0(r13); \
- ##_H##rfid; \
- b .
-
- MASKED_INTERRUPT()
- MASKED_INTERRUPT(H)
-
-/*
- * Called from arch_local_irq_enable when an interrupt needs
- * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
- * which kind of interrupt. MSR:EE is already off. We generate a
- * stackframe like if a real interrupt had happened.
- *
- * Note: While MSR:EE is off, we need to make sure that _MSR
- * in the generated frame has EE set to 1 or the exception
- * handler will not properly re-enable them.
- */
-_GLOBAL(__replay_interrupt)
- /* We are going to jump to the exception common code which
- * will retrieve various register values from the PACA which
- * we don't give a damn about, so we don't bother storing them.
- */
- mfmsr r12
- mflr r11
- mfcr r9
- ori r12,r12,MSR_EE
- cmpwi r3,0x900
- beq decrementer_common
- cmpwi r3,0x500
- beq hardware_interrupt_common
-BEGIN_FTR_SECTION
- cmpwi r3,0xe80
- beq h_doorbell_common
- cmpwi r3,0xea0
- beq h_virt_irq_common
- cmpwi r3,0xe60
- beq hmi_exception_common
-FTR_SECTION_ELSE
- cmpwi r3,0xa00
- beq doorbell_super_common
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
- blr
-
-#ifdef CONFIG_PPC_PSERIES
-/*
- * Vectors for the FWNMI option. Share common code.
- */
- .globl system_reset_fwnmi
- .align 7
-system_reset_fwnmi:
- SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
- NOTEST, 0x100)
-
-#endif /* CONFIG_PPC_PSERIES */
-
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-kvmppc_skip_interrupt:
- /*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
- */
- mfspr r13, SPRN_SRR0
- addi r13, r13, 4
- mtspr SPRN_SRR0, r13
- GET_SCRATCH0(r13)
- rfid
- b .
-
-kvmppc_skip_Hinterrupt:
- /*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
- */
- mfspr r13, SPRN_HSRR0
- addi r13, r13, 4
- mtspr SPRN_HSRR0, r13
- GET_SCRATCH0(r13)
- hrfid
- b .
-#endif
-
-/*
- * Ensure that any handlers that get invoked from the exception prologs
- * above are below the first 64KB (0x10000) of the kernel image because
- * the prologs assemble the addresses of these handlers using the
- * LOAD_HANDLER macro, which uses an ori instruction.
- */
-
-/*** Common interrupt handlers ***/
-
- STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception)
-
- STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
- STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt)
- STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt)
-#ifdef CONFIG_PPC_DOORBELL
- STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception)
-#else
- STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception)
-#endif
- STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception)
- STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception)
- STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception)
- STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt)
- STD_EXCEPTION_COMMON_ASYNC(0xe60, hmi_exception, handle_hmi_exception)
-#ifdef CONFIG_PPC_DOORBELL
- STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception)
-#else
- STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)
-#endif
- STD_EXCEPTION_COMMON_ASYNC(0xea0, h_virt_irq, do_IRQ)
- STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception)
- STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception)
- STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)
-#ifdef CONFIG_ALTIVEC
- STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception)
-#else
- STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)
-#endif
-
- /*
- * Relocation-on interrupts: A subset of the interrupts can be delivered
- * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
- * it. Addresses are the same as the original interrupt addresses, but
- * offset by 0xc000000000004000.
- * It's impossible to receive interrupts below 0x300 via this mechanism.
- * KVM: None of these traps are from the guest ; anything that escalated
- * to HV=1 from HV=0 is delivered via real mode handlers.
- */
-
- /*
- * This uses the standard macro, since the original 0x300 vector
- * only has extra guff for STAB-based processors -- which never
- * come here.
- */
- STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
- . = 0x4380
- .globl data_access_slb_relon_pSeries
-data_access_slb_relon_pSeries:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXSLB)
- EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r3,SPRN_DAR
- mfspr r12,SPRN_SRR1
-#ifndef CONFIG_RELOCATABLE
- b slb_miss_realmode
-#else
- /*
- * We can't just use a direct branch to slb_miss_realmode
- * because the distance from here to there depends on where
- * the kernel ends up being put.
- */
- mfctr r11
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10, slb_miss_realmode)
- mtctr r10
- bctr
-#endif
-
- STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
- . = 0x4480
- .globl instruction_access_slb_relon_pSeries
-instruction_access_slb_relon_pSeries:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXSLB)
- EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- mfspr r12,SPRN_SRR1
-#ifndef CONFIG_RELOCATABLE
- b slb_miss_realmode
-#else
- mfctr r11
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10, slb_miss_realmode)
- mtctr r10
- bctr
-#endif
-
- . = 0x4500
- .globl hardware_interrupt_relon_pSeries;
- .globl hardware_interrupt_relon_hv;
-hardware_interrupt_relon_pSeries:
-hardware_interrupt_relon_hv:
- BEGIN_FTR_SECTION
- _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
- FTR_SECTION_ELSE
- _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
- STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
- STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
- STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
- MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
- STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
- MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
- STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
-
- . = 0x4c00
- .globl system_call_relon_pSeries
-system_call_relon_pSeries:
- HMT_MEDIUM
- SYSCALL_PSERIES_1
- SYSCALL_PSERIES_2_DIRECT
- SYSCALL_PSERIES_3
-
- STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
-
- . = 0x4e00
- b . /* Can't happen, see v2.07 Book III-S section 6.5 */
-
- . = 0x4e20
- b . /* Can't happen, see v2.07 Book III-S section 6.5 */
-
- . = 0x4e40
-emulation_assist_relon_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b emulation_assist_relon_hv
-
- . = 0x4e60
- b . /* Can't happen, see v2.07 Book III-S section 6.5 */
-
- . = 0x4e80
-h_doorbell_relon_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_doorbell_relon_hv
-
- . = 0x4ea0
-h_virt_irq_relon_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b h_virt_irq_relon_hv
-
- . = 0x4f00
-performance_monitor_relon_pseries_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b performance_monitor_relon_pSeries
-
- . = 0x4f20
-altivec_unavailable_relon_pseries_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b altivec_unavailable_relon_pSeries
-
- . = 0x4f40
-vsx_unavailable_relon_pseries_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b vsx_unavailable_relon_pSeries
-
- . = 0x4f60
-facility_unavailable_relon_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b facility_unavailable_relon_pSeries
-
- . = 0x4f80
-hv_facility_unavailable_relon_trampoline:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b hv_facility_unavailable_relon_hv
-
- STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
-#ifdef CONFIG_PPC_DENORMALISATION
- . = 0x5500
- b denorm_exception_hv
-#endif
- STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
-
- .align 7
-system_call_entry:
- b system_call_common
-
-ppc64_runlatch_on_trampoline:
- b __ppc64_runlatch_on
-
-/*
- * Here r13 points to the paca, r9 contains the saved CR,
- * SRR0 and SRR1 are saved in r11 and r12,
- * r9 - r13 are saved in paca->exgen.
- */
- .align 7
- .globl data_access_common
-data_access_common:
- mfspr r10,SPRN_DAR
- std r10,PACA_EXGEN+EX_DAR(r13)
- mfspr r10,SPRN_DSISR
- stw r10,PACA_EXGEN+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
- RECONCILE_IRQ_STATE(r10, r11)
- ld r12,_MSR(r1)
- ld r3,PACA_EXGEN+EX_DAR(r13)
- lwz r4,PACA_EXGEN+EX_DSISR(r13)
- li r5,0x300
- std r3,_DAR(r1)
- std r4,_DSISR(r1)
-BEGIN_MMU_FTR_SECTION
- b do_hash_page /* Try to handle as hpte fault */
-MMU_FTR_SECTION_ELSE
- b handle_page_fault
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
-
- .align 7
- .globl h_data_storage_common
-h_data_storage_common:
- mfspr r10,SPRN_HDAR
- std r10,PACA_EXGEN+EX_DAR(r13)
- mfspr r10,SPRN_HDSISR
- stw r10,PACA_EXGEN+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
- b ret_from_except
-
- .align 7
- .globl instruction_access_common
-instruction_access_common:
- EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
- RECONCILE_IRQ_STATE(r10, r11)
- ld r12,_MSR(r1)
- ld r3,_NIP(r1)
- andis. r4,r12,0x5820
- li r5,0x400
- std r3,_DAR(r1)
- std r4,_DSISR(r1)
-BEGIN_MMU_FTR_SECTION
- b do_hash_page /* Try to handle as hpte fault */
-MMU_FTR_SECTION_ELSE
- b handle_page_fault
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
-
- STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
+TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
+EXC_COMMON_BEGIN(machine_check_common)
/*
* Machine check is different because we use a different
* save area: PACA_EXMC instead of PACA_EXGEN.
*/
- .align 7
- .globl machine_check_common
-machine_check_common:
-
mfspr r10,SPRN_DAR
std r10,PACA_EXMC+EX_DAR(r13)
mfspr r10,SPRN_DSISR
@@ -1003,230 +294,6 @@ machine_check_common:
bl machine_check_exception
b ret_from_except
- .align 7
- .globl alignment_common
-alignment_common:
- mfspr r10,SPRN_DAR
- std r10,PACA_EXGEN+EX_DAR(r13)
- mfspr r10,SPRN_DSISR
- stw r10,PACA_EXGEN+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
- ld r3,PACA_EXGEN+EX_DAR(r13)
- lwz r4,PACA_EXGEN+EX_DSISR(r13)
- std r3,_DAR(r1)
- std r4,_DSISR(r1)
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl alignment_exception
- b ret_from_except
-
- .align 7
- .globl program_check_common
-program_check_common:
- EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl program_check_exception
- b ret_from_except
-
- .align 7
- .globl fp_unavailable_common
-fp_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
- bne 1f /* if from user, just load it up */
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl kernel_fp_unavailable_exception
- BUG_OPCODE
-1:
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
- * transaction), go do TM stuff
- */
- rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
- bne- 2f
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
- bl load_up_fpu
- b fast_exception_return
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-2: /* User process was in a transaction */
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl fp_unavailable_tm
- b ret_from_except
-#endif
- .align 7
- .globl altivec_unavailable_common
-altivec_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- beq 1f
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- BEGIN_FTR_SECTION_NESTED(69)
- /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
- * transaction), go do TM stuff
- */
- rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
- bne- 2f
- END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
-#endif
- bl load_up_altivec
- b fast_exception_return
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-2: /* User process was in a transaction */
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl altivec_unavailable_tm
- b ret_from_except
-#endif
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl altivec_unavailable_exception
- b ret_from_except
-
- .align 7
- .globl vsx_unavailable_common
-vsx_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- beq 1f
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- BEGIN_FTR_SECTION_NESTED(69)
- /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
- * transaction), go do TM stuff
- */
- rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
- bne- 2f
- END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
-#endif
- b load_up_vsx
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-2: /* User process was in a transaction */
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl vsx_unavailable_tm
- b ret_from_except
-#endif
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl vsx_unavailable_exception
- b ret_from_except
-
- /* Equivalents to the above handlers for relocation-on interrupt vectors */
- STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
- MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
- MASKABLE_RELON_EXCEPTION_HV_OOL(0xea0, h_virt_irq)
-
- STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
- STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
- STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
- STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
- STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
-
- /*
- * The __end_interrupts marker must be past the out-of-line (OOL)
- * handlers, so that they are copied to real address 0x100 when running
- * a relocatable kernel. This ensures they can be reached from the short
- * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
- * directly, without using LOAD_HANDLER().
- */
- .align 7
- .globl __end_interrupts
-__end_interrupts:
-
-#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
-/*
- * Data area reserved for FWNMI option.
- * This address (0x7000) is fixed by the RPA.
- */
- .= 0x7000
- .globl fwnmi_data_area
-fwnmi_data_area:
-
- /* pseries and powernv need to keep the whole page from
- * 0x7000 to 0x8000 free for use by the firmware
- */
- . = 0x8000
-#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
-
- STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
- STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
-
-#ifdef CONFIG_CBE_RAS
- STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
- STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
- STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
-#endif /* CONFIG_CBE_RAS */
-
- .globl hmi_exception_early
-hmi_exception_early:
- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, 0xe62)
- mr r10,r1 /* Save r1 */
- ld r1,PACAEMERGSP(r13) /* Use emergency stack */
- subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
- std r9,_CCR(r1) /* save CR in stackframe */
- mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
- std r11,_NIP(r1) /* save HSRR0 in stackframe */
- mfspr r12,SPRN_HSRR1 /* Save SRR1 */
- std r12,_MSR(r1) /* save SRR1 in stackframe */
- std r10,0(r1) /* make stack chain pointer */
- std r0,GPR0(r1) /* save r0 in stackframe */
- std r10,GPR1(r1) /* save r1 in stackframe */
- EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
- EXCEPTION_PROLOG_COMMON_3(0xe60)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl hmi_exception_realmode
- /* Windup the stack. */
- /* Move original HSRR0 and HSRR1 into the respective regs */
- ld r9,_MSR(r1)
- mtspr SPRN_HSRR1,r9
- ld r3,_NIP(r1)
- mtspr SPRN_HSRR0,r3
- ld r9,_CTR(r1)
- mtctr r9
- ld r9,_XER(r1)
- mtxer r9
- ld r9,_LINK(r1)
- mtlr r9
- REST_GPR(0, r1)
- REST_8GPRS(2, r1)
- REST_GPR(10, r1)
- ld r11,_CCR(r1)
- mtcr r11
- REST_GPR(11, r1)
- REST_2GPRS(12, r1)
- /* restore original r1. */
- ld r1,GPR1(r1)
-
- /*
- * Go to virtual mode and pull the HMI event information from
- * firmware.
- */
- .globl hmi_exception_after_realmode
-hmi_exception_after_realmode:
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXGEN)
- b hmi_exception_hv
-
-
#define MACHINE_CHECK_HANDLER_WINDUP \
/* Clear MSR_RI before setting SRR0 and SRR1. */\
li r0,MSR_RI; \
@@ -1262,9 +329,7 @@ hmi_exception_after_realmode:
* Handle machine check early in real mode. We come here with
* ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
*/
- .align 7
- .globl machine_check_handle_early
-machine_check_handle_early:
+EXC_COMMON_BEGIN(machine_check_handle_early)
std r0,GPR0(r1) /* Save r0 */
EXCEPTION_PROLOG_COMMON_3(0x200)
bl save_nvgprs
@@ -1353,7 +418,6 @@ machine_check_handle_early:
andi. r11,r12,MSR_RI
bne 2f
1: mfspr r11,SPRN_SRR0
- ld r10,PACAKBASE(r13)
LOAD_HANDLER(r10,unrecover_mce)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
@@ -1391,7 +455,7 @@ machine_check_handle_early:
MACHINE_CHECK_HANDLER_WINDUP
b machine_check_pSeries
-unrecover_mce:
+EXC_COMMON_BEGIN(unrecover_mce)
/* Invoke machine_check_exception to print MCE event and panic. */
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
@@ -1402,15 +466,155 @@ unrecover_mce:
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl unrecoverable_exception
b 1b
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-slb_miss_realmode:
+
+
+EXC_REAL(data_access, 0x300, 0x380)
+EXC_VIRT(data_access, 0x4300, 0x4380, 0x300)
+TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
+
+EXC_COMMON_BEGIN(data_access_common)
+ /*
+ * Here r13 points to the paca, r9 contains the saved CR,
+ * SRR0 and SRR1 are saved in r11 and r12,
+ * r9 - r13 are saved in paca->exgen.
+ */
+ mfspr r10,SPRN_DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_DSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
+ RECONCILE_IRQ_STATE(r10, r11)
+ ld r12,_MSR(r1)
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ lwz r4,PACA_EXGEN+EX_DSISR(r13)
+ li r5,0x300
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+BEGIN_MMU_FTR_SECTION
+ b do_hash_page /* Try to handle as hpte fault */
+MMU_FTR_SECTION_ELSE
+ b handle_page_fault
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+
+
+EXC_REAL_BEGIN(data_access_slb, 0x380, 0x400)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXSLB)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_DAR
+ mfspr r12,SPRN_SRR1
+ crset 4*cr6+eq
+#ifndef CONFIG_RELOCATABLE
+ b slb_miss_realmode
+#else
+ /*
+ * We can't just use a direct branch to slb_miss_realmode
+ * because the distance from here to there depends on where
+ * the kernel ends up being put.
+ */
+ mfctr r11
+ LOAD_HANDLER(r10, slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
+EXC_REAL_END(data_access_slb, 0x380, 0x400)
+
+EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x4400)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXSLB)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_DAR
+ mfspr r12,SPRN_SRR1
+ crset 4*cr6+eq
+#ifndef CONFIG_RELOCATABLE
+ b slb_miss_realmode
+#else
+ /*
+ * We can't just use a direct branch to slb_miss_realmode
+ * because the distance from here to there depends on where
+ * the kernel ends up being put.
+ */
+ mfctr r11
+ LOAD_HANDLER(r10, slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
+EXC_VIRT_END(data_access_slb, 0x4380, 0x4400)
+TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
+
+
+EXC_REAL(instruction_access, 0x400, 0x480)
+EXC_VIRT(instruction_access, 0x4400, 0x4480, 0x400)
+TRAMP_KVM(PACA_EXGEN, 0x400)
+
+EXC_COMMON_BEGIN(instruction_access_common)
+ EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
+ RECONCILE_IRQ_STATE(r10, r11)
+ ld r12,_MSR(r1)
+ ld r3,_NIP(r1)
+ andis. r4,r12,0x5820
+ li r5,0x400
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+BEGIN_MMU_FTR_SECTION
+ b do_hash_page /* Try to handle as hpte fault */
+MMU_FTR_SECTION_ELSE
+ b handle_page_fault
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+
+
+EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXSLB)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
+ mfspr r12,SPRN_SRR1
+ crclr 4*cr6+eq
+#ifndef CONFIG_RELOCATABLE
+ b slb_miss_realmode
+#else
+ mfctr r11
+ LOAD_HANDLER(r10, slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
+EXC_REAL_END(instruction_access_slb, 0x480, 0x500)
+
+EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x4500)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXSLB)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
+ mfspr r12,SPRN_SRR1
+ crclr 4*cr6+eq
+#ifndef CONFIG_RELOCATABLE
+ b slb_miss_realmode
+#else
+ mfctr r11
+ LOAD_HANDLER(r10, slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
+EXC_VIRT_END(instruction_access_slb, 0x4480, 0x4500)
+TRAMP_KVM(PACA_EXSLB, 0x480)
+
+
+/* This handler is used by both 0x380 and 0x480 slb miss interrupts */
+EXC_COMMON_BEGIN(slb_miss_realmode)
+ /*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * cr6.eq is set for a D-SLB miss, clear for a I-SLB miss
+ * We assume we aren't going to take any exceptions during this
+ * procedure.
+ */
mflr r10
#ifdef CONFIG_RELOCATABLE
mtctr r11
@@ -1418,29 +622,31 @@ slb_miss_realmode:
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
+ std r3,PACA_EXSLB+EX_DAR(r13)
+ crset 4*cr0+eq
#ifdef CONFIG_PPC_STD_MMU_64
BEGIN_MMU_FTR_SECTION
bl slb_allocate_realmode
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
#endif
- /* All done -- return from exception. */
ld r10,PACA_EXSLB+EX_LR(r13)
ld r3,PACA_EXSLB+EX_R3(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
-
mtlr r10
+
+ beq 8f /* if bad address, make full stack frame */
+
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
-BEGIN_MMU_FTR_SECTION
beq- 2f
-FTR_SECTION_ELSE
- b 2f
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+
+ /* All done -- return from exception. */
.machine push
.machine "power4"
mtcrf 0x80,r9
+ mtcrf 0x02,r9 /* I/D indication is in cr6 */
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
.machine pop
@@ -1454,7 +660,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
b . /* prevent speculative execution */
2: mfspr r11,SPRN_SRR0
- ld r10,PACAKBASE(r13)
LOAD_HANDLER(r10,unrecov_slb)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
@@ -1462,7 +667,15 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
rfid
b .
-unrecov_slb:
+8: mfspr r11,SPRN_SRR0
+ LOAD_HANDLER(r10,bad_addr_slb)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
+ mtspr SPRN_SRR1,r10
+ rfid
+ b .
+
+EXC_COMMON_BEGIN(unrecov_slb)
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
RECONCILE_IRQ_STATE(r10, r11)
bl save_nvgprs
@@ -1470,9 +683,701 @@ unrecov_slb:
bl unrecoverable_exception
b 1b
+EXC_COMMON_BEGIN(bad_addr_slb)
+ EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
+ RECONCILE_IRQ_STATE(r10, r11)
+ ld r3, PACA_EXSLB+EX_DAR(r13)
+ std r3, _DAR(r1)
+ beq cr6, 2f
+ li r10, 0x480 /* fix trap number for I-SLB miss */
+ std r10, _TRAP(r1)
+2: bl save_nvgprs
+ addi r3, r1, STACK_FRAME_OVERHEAD
+ bl slb_miss_bad_addr
+ b ret_from_except
+
+EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x600)
+ .globl hardware_interrupt_hv;
+hardware_interrupt_hv:
+ BEGIN_FTR_SECTION
+ _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
+ EXC_HV, SOFTEN_TEST_HV)
+do_kvm_H0x500:
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
+ FTR_SECTION_ELSE
+ _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
+ EXC_STD, SOFTEN_TEST_PR)
+do_kvm_0x500:
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+EXC_REAL_END(hardware_interrupt, 0x500, 0x600)
+
+EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x4600)
+ .globl hardware_interrupt_relon_hv;
+hardware_interrupt_relon_hv:
+ BEGIN_FTR_SECTION
+ _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common, EXC_HV, SOFTEN_TEST_HV)
+ FTR_SECTION_ELSE
+ _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common, EXC_STD, SOFTEN_TEST_PR)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+EXC_VIRT_END(hardware_interrupt, 0x4500, 0x4600)
+
+EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
+
+
+EXC_REAL(alignment, 0x600, 0x700)
+EXC_VIRT(alignment, 0x4600, 0x4700, 0x600)
+TRAMP_KVM(PACA_EXGEN, 0x600)
+EXC_COMMON_BEGIN(alignment_common)
+ mfspr r10,SPRN_DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_DSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ lwz r4,PACA_EXGEN+EX_DSISR(r13)
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl alignment_exception
+ b ret_from_except
+
+
+EXC_REAL(program_check, 0x700, 0x800)
+EXC_VIRT(program_check, 0x4700, 0x4800, 0x700)
+TRAMP_KVM(PACA_EXGEN, 0x700)
+EXC_COMMON_BEGIN(program_check_common)
+ EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl program_check_exception
+ b ret_from_except
+
+
+EXC_REAL(fp_unavailable, 0x800, 0x900)
+EXC_VIRT(fp_unavailable, 0x4800, 0x4900, 0x800)
+TRAMP_KVM(PACA_EXGEN, 0x800)
+EXC_COMMON_BEGIN(fp_unavailable_common)
+ EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
+ bne 1f /* if from user, just load it up */
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl kernel_fp_unavailable_exception
+ BUG_OPCODE
+1:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
+ * transaction), go do TM stuff
+ */
+ rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
+ bne- 2f
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+ bl load_up_fpu
+ b fast_exception_return
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+2: /* User process was in a transaction */
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl fp_unavailable_tm
+ b ret_from_except
+#endif
+
+
+EXC_REAL_MASKABLE(decrementer, 0x900, 0x980)
+EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x4980, 0x900)
+TRAMP_KVM(PACA_EXGEN, 0x900)
+EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
+
+
+EXC_REAL_HV(hdecrementer, 0x980, 0xa00)
+EXC_VIRT_HV(hdecrementer, 0x4980, 0x4a00, 0x980)
+TRAMP_KVM_HV(PACA_EXGEN, 0x980)
+EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
+
+
+EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0xb00)
+EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x4b00, 0xa00)
+TRAMP_KVM(PACA_EXGEN, 0xa00)
+#ifdef CONFIG_PPC_DOORBELL
+EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
+#else
+EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
+#endif
+
+
+EXC_REAL(trap_0b, 0xb00, 0xc00)
+EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
+TRAMP_KVM(PACA_EXGEN, 0xb00)
+EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
+
+
+#define LOAD_SYSCALL_HANDLER(reg) \
+ ld reg,PACAKBASE(r13); \
+ ori reg,reg,(ABS_ADDR(system_call_common))@l;
+
+/* Syscall routine is used twice, in reloc-off and reloc-on paths */
+#define SYSCALL_PSERIES_1 \
+BEGIN_FTR_SECTION \
+ cmpdi r0,0x1ebe ; \
+ beq- 1f ; \
+END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
+ mr r9,r13 ; \
+ GET_PACA(r13) ; \
+ mfspr r11,SPRN_SRR0 ; \
+0:
+
+#define SYSCALL_PSERIES_2_RFID \
+ mfspr r12,SPRN_SRR1 ; \
+ LOAD_SYSCALL_HANDLER(r10) ; \
+ mtspr SPRN_SRR0,r10 ; \
+ ld r10,PACAKMSR(r13) ; \
+ mtspr SPRN_SRR1,r10 ; \
+ rfid ; \
+ b . ; /* prevent speculative execution */
+
+#define SYSCALL_PSERIES_3 \
+ /* Fast LE/BE switch system call */ \
+1: mfspr r12,SPRN_SRR1 ; \
+ xori r12,r12,MSR_LE ; \
+ mtspr SPRN_SRR1,r12 ; \
+ rfid ; /* return to userspace */ \
+ b . ; /* prevent speculative execution */
+
+#if defined(CONFIG_RELOCATABLE)
+ /*
+ * We can't branch directly so we do it via the CTR which
+ * is volatile across system calls.
+ */
+#define SYSCALL_PSERIES_2_DIRECT \
+ LOAD_SYSCALL_HANDLER(r12) ; \
+ mtctr r12 ; \
+ mfspr r12,SPRN_SRR1 ; \
+ li r10,MSR_RI ; \
+ mtmsrd r10,1 ; \
+ bctr ;
+#else
+ /* We can branch directly */
+#define SYSCALL_PSERIES_2_DIRECT \
+ mfspr r12,SPRN_SRR1 ; \
+ li r10,MSR_RI ; \
+ mtmsrd r10,1 ; /* Set RI (EE=0) */ \
+ b system_call_common ;
+#endif
+
+EXC_REAL_BEGIN(system_call, 0xc00, 0xd00)
+ /*
+ * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
+ * that support it) before changing to HMT_MEDIUM. That allows the KVM
+ * code to save that value into the guest state (it is the guest's PPR
+ * value). Otherwise just change to HMT_MEDIUM as userspace has
+ * already saved the PPR.
+ */
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
+ std r9,PACA_EXGEN+EX_R9(r13)
+ OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
+ HMT_MEDIUM;
+ std r10,PACA_EXGEN+EX_R10(r13)
+ OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
+ mfcr r9
+ KVMTEST_PR(0xc00)
+ GET_SCRATCH0(r13)
+#else
+ HMT_MEDIUM;
+#endif
+ SYSCALL_PSERIES_1
+ SYSCALL_PSERIES_2_RFID
+ SYSCALL_PSERIES_3
+EXC_REAL_END(system_call, 0xc00, 0xd00)
+
+EXC_VIRT_BEGIN(system_call, 0x4c00, 0x4d00)
+ HMT_MEDIUM
+ SYSCALL_PSERIES_1
+ SYSCALL_PSERIES_2_DIRECT
+ SYSCALL_PSERIES_3
+EXC_VIRT_END(system_call, 0x4c00, 0x4d00)
+
+TRAMP_KVM(PACA_EXGEN, 0xc00)
+
+
+EXC_REAL(single_step, 0xd00, 0xe00)
+EXC_VIRT(single_step, 0x4d00, 0x4e00, 0xd00)
+TRAMP_KVM(PACA_EXGEN, 0xd00)
+EXC_COMMON(single_step_common, 0xd00, single_step_exception)
+
+EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0xe20)
+EXC_VIRT_NONE(0x4e00, 0x4e20)
+TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00)
+EXC_COMMON_BEGIN(h_data_storage_common)
+ mfspr r10,SPRN_HDAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_HDSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl unknown_exception
+ b ret_from_except
+
+
+EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0xe40)
+EXC_VIRT_NONE(0x4e20, 0x4e40)
+TRAMP_KVM_HV(PACA_EXGEN, 0xe20)
+EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
+
+
+EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0xe60)
+EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x4e60, 0xe40)
+TRAMP_KVM_HV(PACA_EXGEN, 0xe40)
+EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
+
+
+/*
+ * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
+ * first, and then eventaully from there to the trampoline to get into virtual
+ * mode.
+ */
+__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0xe80, hmi_exception_early)
+__TRAMP_REAL_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60)
+EXC_VIRT_NONE(0x4e60, 0x4e80)
+TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
+TRAMP_REAL_BEGIN(hmi_exception_early)
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60)
+ mr r10,r1 /* Save r1 */
+ ld r1,PACAEMERGSP(r13) /* Use emergency stack */
+ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
+ std r9,_CCR(r1) /* save CR in stackframe */
+ mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
+ std r11,_NIP(r1) /* save HSRR0 in stackframe */
+ mfspr r12,SPRN_HSRR1 /* Save SRR1 */
+ std r12,_MSR(r1) /* save SRR1 in stackframe */
+ std r10,0(r1) /* make stack chain pointer */
+ std r0,GPR0(r1) /* save r0 in stackframe */
+ std r10,GPR1(r1) /* save r1 in stackframe */
+ EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
+ EXCEPTION_PROLOG_COMMON_3(0xe60)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl hmi_exception_realmode
+ /* Windup the stack. */
+ /* Move original HSRR0 and HSRR1 into the respective regs */
+ ld r9,_MSR(r1)
+ mtspr SPRN_HSRR1,r9
+ ld r3,_NIP(r1)
+ mtspr SPRN_HSRR0,r3
+ ld r9,_CTR(r1)
+ mtctr r9
+ ld r9,_XER(r1)
+ mtxer r9
+ ld r9,_LINK(r1)
+ mtlr r9
+ REST_GPR(0, r1)
+ REST_8GPRS(2, r1)
+ REST_GPR(10, r1)
+ ld r11,_CCR(r1)
+ mtcr r11
+ REST_GPR(11, r1)
+ REST_2GPRS(12, r1)
+ /* restore original r1. */
+ ld r1,GPR1(r1)
+
+ /*
+ * Go to virtual mode and pull the HMI event information from
+ * firmware.
+ */
+ .globl hmi_exception_after_realmode
+hmi_exception_after_realmode:
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b tramp_real_hmi_exception
+
+EXC_COMMON_ASYNC(hmi_exception_common, 0xe60, handle_hmi_exception)
+
+
+EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0xea0)
+EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x4ea0, 0xe80)
+TRAMP_KVM_HV(PACA_EXGEN, 0xe80)
+#ifdef CONFIG_PPC_DOORBELL
+EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
+#else
+EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
+#endif
+
+
+EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0xec0)
+EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x4ec0, 0xea0)
+TRAMP_KVM_HV(PACA_EXGEN, 0xea0)
+EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
+
+
+EXC_REAL_NONE(0xec0, 0xf00)
+EXC_VIRT_NONE(0x4ec0, 0x4f00)
+
+
+EXC_REAL_OOL(performance_monitor, 0xf00, 0xf20)
+EXC_VIRT_OOL(performance_monitor, 0x4f00, 0x4f20, 0xf00)
+TRAMP_KVM(PACA_EXGEN, 0xf00)
+EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
+
+
+EXC_REAL_OOL(altivec_unavailable, 0xf20, 0xf40)
+EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x4f40, 0xf20)
+TRAMP_KVM(PACA_EXGEN, 0xf20)
+EXC_COMMON_BEGIN(altivec_unavailable_common)
+ EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ beq 1f
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION_NESTED(69)
+ /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
+ * transaction), go do TM stuff
+ */
+ rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
+ bne- 2f
+ END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
+#endif
+ bl load_up_altivec
+ b fast_exception_return
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+2: /* User process was in a transaction */
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl altivec_unavailable_tm
+ b ret_from_except
+#endif
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl altivec_unavailable_exception
+ b ret_from_except
+
+
+EXC_REAL_OOL(vsx_unavailable, 0xf40, 0xf60)
+EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x4f60, 0xf40)
+TRAMP_KVM(PACA_EXGEN, 0xf40)
+EXC_COMMON_BEGIN(vsx_unavailable_common)
+ EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ beq 1f
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION_NESTED(69)
+ /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
+ * transaction), go do TM stuff
+ */
+ rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
+ bne- 2f
+ END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
+#endif
+ b load_up_vsx
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+2: /* User process was in a transaction */
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl vsx_unavailable_tm
+ b ret_from_except
+#endif
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl vsx_unavailable_exception
+ b ret_from_except
+
+
+EXC_REAL_OOL(facility_unavailable, 0xf60, 0xf80)
+EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x4f80, 0xf60)
+TRAMP_KVM(PACA_EXGEN, 0xf60)
+EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
+
+
+EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0xfa0)
+EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x4fa0, 0xf80)
+TRAMP_KVM_HV(PACA_EXGEN, 0xf80)
+EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
+
+
+EXC_REAL_NONE(0xfa0, 0x1200)
+EXC_VIRT_NONE(0x4fa0, 0x5200)
+
+#ifdef CONFIG_CBE_RAS
+EXC_REAL_HV(cbe_system_error, 0x1200, 0x1300)
+EXC_VIRT_NONE(0x5200, 0x5300)
+TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200)
+EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
+#else /* CONFIG_CBE_RAS */
+EXC_REAL_NONE(0x1200, 0x1300)
+EXC_VIRT_NONE(0x5200, 0x5300)
+#endif
+
+
+EXC_REAL(instruction_breakpoint, 0x1300, 0x1400)
+EXC_VIRT(instruction_breakpoint, 0x5300, 0x5400, 0x1300)
+TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300)
+EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
+
+EXC_REAL_NONE(0x1400, 0x1500)
+EXC_VIRT_NONE(0x5400, 0x5500)
+
+EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x1600)
+ mtspr SPRN_SPRG_HSCRATCH0,r13
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
+
+#ifdef CONFIG_PPC_DENORMALISATION
+ mfspr r10,SPRN_HSRR1
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
+ andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
+ addi r11,r11,-4 /* HSRR0 is next instruction */
+ bne+ denorm_assist
+#endif
+
+ KVMTEST_PR(0x1500)
+ EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
+EXC_REAL_END(denorm_exception_hv, 0x1500, 0x1600)
+
+#ifdef CONFIG_PPC_DENORMALISATION
+EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x5600)
+ b exc_real_0x1500_denorm_exception_hv
+EXC_VIRT_END(denorm_exception, 0x5500, 0x5600)
+#else
+EXC_VIRT_NONE(0x5500, 0x5600)
+#endif
+
+TRAMP_KVM_SKIP(PACA_EXGEN, 0x1500)
+
+#ifdef CONFIG_PPC_DENORMALISATION
+TRAMP_REAL_BEGIN(denorm_assist)
+BEGIN_FTR_SECTION
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER6 do that here for all FP regs.
+ */
+ mfmsr r10
+ ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
+ xori r10,r10,(MSR_FE0|MSR_FE1)
+ mtmsrd r10
+ sync
+
+#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
+#define FMR4(n) FMR2(n) ; FMR2(n+2)
+#define FMR8(n) FMR4(n) ; FMR4(n+4)
+#define FMR16(n) FMR8(n) ; FMR8(n+8)
+#define FMR32(n) FMR16(n) ; FMR16(n+16)
+ FMR32(0)
+
+FTR_SECTION_ELSE
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER7 do that here for the first 32 VSX registers only.
+ */
+ mfmsr r10
+ oris r10,r10,MSR_VSX@h
+ mtmsrd r10
+ sync
+
+#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
+#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
+#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
+#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
+#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
+ XVCPSGNDP32(0)
+
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
+
+BEGIN_FTR_SECTION
+ b denorm_done
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER8 we need to do that for all 64 VSX registers
+ */
+ XVCPSGNDP32(32)
+denorm_done:
+ mtspr SPRN_HSRR0,r11
+ mtcrf 0x80,r9
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ RESTORE_PPR_PACA(PACA_EXGEN, r10)
+BEGIN_FTR_SECTION
+ ld r10,PACA_EXGEN+EX_CFAR(r13)
+ mtspr SPRN_CFAR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r12,PACA_EXGEN+EX_R12(r13)
+ ld r13,PACA_EXGEN+EX_R13(r13)
+ HRFID
+ b .
+#endif
+
+EXC_COMMON_HV(denorm_common, 0x1500, unknown_exception)
+
+
+#ifdef CONFIG_CBE_RAS
+EXC_REAL_HV(cbe_maintenance, 0x1600, 0x1700)
+EXC_VIRT_NONE(0x5600, 0x5700)
+TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600)
+EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
+#else /* CONFIG_CBE_RAS */
+EXC_REAL_NONE(0x1600, 0x1700)
+EXC_VIRT_NONE(0x5600, 0x5700)
+#endif
+
+
+EXC_REAL(altivec_assist, 0x1700, 0x1800)
+EXC_VIRT(altivec_assist, 0x5700, 0x5800, 0x1700)
+TRAMP_KVM(PACA_EXGEN, 0x1700)
+#ifdef CONFIG_ALTIVEC
+EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
+#else
+EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
+#endif
+
+
+#ifdef CONFIG_CBE_RAS
+EXC_REAL_HV(cbe_thermal, 0x1800, 0x1900)
+EXC_VIRT_NONE(0x5800, 0x5900)
+TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)
+EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
+#else /* CONFIG_CBE_RAS */
+EXC_REAL_NONE(0x1800, 0x1900)
+EXC_VIRT_NONE(0x5800, 0x5900)
+#endif
+
+
+/*
+ * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
+ * - If it was a decrementer interrupt, we bump the dec to max and and return.
+ * - If it was a doorbell we return immediately since doorbells are edge
+ * triggered and won't automatically refire.
+ * - If it was a HMI we return immediately since we handled it in realmode
+ * and it won't refire.
+ * - else we hard disable and return.
+ * This is called with r10 containing the value to OR to the paca field.
+ */
+#define MASKED_INTERRUPT(_H) \
+masked_##_H##interrupt: \
+ std r11,PACA_EXGEN+EX_R11(r13); \
+ lbz r11,PACAIRQHAPPENED(r13); \
+ or r11,r11,r10; \
+ stb r11,PACAIRQHAPPENED(r13); \
+ cmpwi r10,PACA_IRQ_DEC; \
+ bne 1f; \
+ lis r10,0x7fff; \
+ ori r10,r10,0xffff; \
+ mtspr SPRN_DEC,r10; \
+ b 2f; \
+1: cmpwi r10,PACA_IRQ_DBELL; \
+ beq 2f; \
+ cmpwi r10,PACA_IRQ_HMI; \
+ beq 2f; \
+ mfspr r10,SPRN_##_H##SRR1; \
+ rldicl r10,r10,48,1; /* clear MSR_EE */ \
+ rotldi r10,r10,16; \
+ mtspr SPRN_##_H##SRR1,r10; \
+2: mtcrf 0x80,r9; \
+ ld r9,PACA_EXGEN+EX_R9(r13); \
+ ld r10,PACA_EXGEN+EX_R10(r13); \
+ ld r11,PACA_EXGEN+EX_R11(r13); \
+ GET_SCRATCH0(r13); \
+ ##_H##rfid; \
+ b .
+
+/*
+ * Real mode exceptions actually use this too, but alternate
+ * instruction code patches (which end up in the common .text area)
+ * cannot reach these if they are put there.
+ */
+USE_FIXED_SECTION(virt_trampolines)
+ MASKED_INTERRUPT()
+ MASKED_INTERRUPT(H)
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_SRR0
+ addi r13, r13, 4
+ mtspr SPRN_SRR0, r13
+ GET_SCRATCH0(r13)
+ rfid
+ b .
+
+TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_HSRR0
+ addi r13, r13, 4
+ mtspr SPRN_HSRR0, r13
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+#endif
+
+/*
+ * Ensure that any handlers that get invoked from the exception prologs
+ * above are below the first 64KB (0x10000) of the kernel image because
+ * the prologs assemble the addresses of these handlers using the
+ * LOAD_HANDLER macro, which uses an ori instruction.
+ */
+
+/*** Common interrupt handlers ***/
+
+
+ /*
+ * Relocation-on interrupts: A subset of the interrupts can be delivered
+ * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
+ * it. Addresses are the same as the original interrupt addresses, but
+ * offset by 0xc000000000004000.
+ * It's impossible to receive interrupts below 0x300 via this mechanism.
+ * KVM: None of these traps are from the guest ; anything that escalated
+ * to HV=1 from HV=0 is delivered via real mode handlers.
+ */
+
+ /*
+ * This uses the standard macro, since the original 0x300 vector
+ * only has extra guff for STAB-based processors -- which never
+ * come here.
+ */
+
+EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
+ b __ppc64_runlatch_on
+
+USE_FIXED_SECTION(virt_trampolines)
+ /*
+ * The __end_interrupts marker must be past the out-of-line (OOL)
+ * handlers, so that they are copied to real address 0x100 when running
+ * a relocatable kernel. This ensures they can be reached from the short
+ * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
+ * directly, without using LOAD_HANDLER().
+ */
+ .align 7
+ .globl __end_interrupts
+__end_interrupts:
+DEFINE_FIXED_SYMBOL(__end_interrupts)
#ifdef CONFIG_PPC_970_NAP
-power4_fixup_nap:
+TRAMP_REAL_BEGIN(power4_fixup_nap)
andc r9,r9,r10
std r9,TI_LOCAL_FLAGS(r11)
ld r10,_LINK(r1) /* make idle task do the */
@@ -1480,6 +1385,13 @@ power4_fixup_nap:
blr
#endif
+CLOSE_FIXED_SECTION(real_vectors);
+CLOSE_FIXED_SECTION(real_trampolines);
+CLOSE_FIXED_SECTION(virt_vectors);
+CLOSE_FIXED_SECTION(virt_trampolines);
+
+USE_TEXT_SECTION()
+
/*
* Hash table stuff
*/
@@ -1625,3 +1537,39 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl kernel_bad_stack
b 1b
+
+/*
+ * Called from arch_local_irq_enable when an interrupt needs
+ * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
+ * which kind of interrupt. MSR:EE is already off. We generate a
+ * stackframe like if a real interrupt had happened.
+ *
+ * Note: While MSR:EE is off, we need to make sure that _MSR
+ * in the generated frame has EE set to 1 or the exception
+ * handler will not properly re-enable them.
+ */
+_GLOBAL(__replay_interrupt)
+ /* We are going to jump to the exception common code which
+ * will retrieve various register values from the PACA which
+ * we don't give a damn about, so we don't bother storing them.
+ */
+ mfmsr r12
+ mflr r11
+ mfcr r9
+ ori r12,r12,MSR_EE
+ cmpwi r3,0x900
+ beq decrementer_common
+ cmpwi r3,0x500
+ beq hardware_interrupt_common
+BEGIN_FTR_SECTION
+ cmpwi r3,0xe80
+ beq h_doorbell_common
+ cmpwi r3,0xea0
+ beq h_virt_irq_common
+ cmpwi r3,0xe60
+ beq hmi_exception_common
+FTR_SECTION_ELSE
+ cmpwi r3,0xa00
+ beq doorbell_super_common
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+ blr
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index b3a663333d36..8f0c7c5d93f2 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -333,6 +333,11 @@ int __init fadump_reserve_mem(void)
return 1;
}
+unsigned long __init arch_reserved_kernel_pages(void)
+{
+ return memblock_reserved_size() / PAGE_SIZE;
+}
+
/* Look for fadump= cmdline option. */
static int __init early_fadump_param(char *p)
{
@@ -778,7 +783,11 @@ static int fadump_init_elfcore_header(char *bufp)
elf->e_entry = 0;
elf->e_phoff = sizeof(struct elfhdr);
elf->e_shoff = 0;
- elf->e_flags = ELF_CORE_EFLAGS;
+#if defined(_CALL_ELF)
+ elf->e_flags = _CALL_ELF;
+#else
+ elf->e_flags = 0;
+#endif
elf->e_ehsize = sizeof(struct elfhdr);
elf->e_phentsize = sizeof(struct elf_phdr);
elf->e_phnum = 0;
@@ -1104,7 +1113,9 @@ static ssize_t fadump_release_memory_store(struct kobject *kobj,
* Take away the '/proc/vmcore'. We are releasing the dump
* memory, hence it will not be valid anymore.
*/
+#ifdef CONFIG_PROC_VMCORE
vmcore_cleanup();
+#endif
fadump_invalidate_release_mem();
} else
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 15da2b5df85e..08d14b096eb9 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -50,32 +50,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/* void do_load_up_transact_fpu(struct thread_struct *thread)
- *
- * This is similar to load_up_fpu but for the transactional version of the FP
- * register set. It doesn't mess with the task MSR or valid flags.
- * Furthermore, we don't do lazy FP with TM currently.
- */
-_GLOBAL(do_load_up_transact_fpu)
- mfmsr r6
- ori r5,r6,MSR_FP
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- oris r5,r5,MSR_VSX@h
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
- SYNC
- MTMSRD(r5)
-
- addi r7,r3,THREAD_TRANSACT_FPSTATE
- lfd fr0,FPSTATE_FPSCR(r7)
- MTFSF_L(fr0)
- REST_32FPVSRS(0, R4, R7)
-
- blr
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-
/*
* Load state from memory into FP registers including FPSCR.
* Assumes the caller has enabled FP in the MSR.
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index dc0488b6f6e1..a3f821eb7e9a 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -266,7 +266,6 @@ __secondary_hold_acknowledge:
#define EXCEPTION_PROLOG_2 \
- CLR_TOP32(r11); \
stw r10,_CCR(r11); /* save registers */ \
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
@@ -862,7 +861,6 @@ __secondary_start:
/* ptr to phys current thread */
tophys(r4,r2)
addi r4,r4,THREAD /* phys address of our thread_struct */
- CLR_TOP32(r4)
mtspr SPRN_SPRG_THREAD,r4
li r3,0
mtspr SPRN_SPRG_RTAS,r3 /* 0 => not in RTAS */
@@ -949,7 +947,6 @@ start_here:
/* ptr to phys current thread */
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
- CLR_TOP32(r4)
mtspr SPRN_SPRG_THREAD,r4
li r3,0
mtspr SPRN_SPRG_RTAS,r3 /* 0 => not in RTAS */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index f765b0434731..79da0641bae2 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -28,6 +28,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
+#include <asm/head-64.h>
#include <asm/asm-offsets.h>
#include <asm/bug.h>
#include <asm/cputable.h>
@@ -65,9 +66,14 @@
* 2. The kernel is entered at __start
*/
- .text
- .globl _stext
-_stext:
+OPEN_FIXED_SECTION(first_256B, 0x0, 0x100)
+USE_FIXED_SECTION(first_256B)
+ /*
+ * Offsets are relative from the start of fixed section, and
+ * first_256B starts at 0. Offsets are a bit easier to use here
+ * than the fixed section entry macros.
+ */
+ . = 0x0
_GLOBAL(__start)
/* NOP this out unconditionally */
BEGIN_FTR_SECTION
@@ -104,6 +110,7 @@ __secondary_hold_acknowledge:
. = 0x5c
.globl __run_at_load
__run_at_load:
+DEFINE_FIXED_SYMBOL(__run_at_load)
.long 0x72756e30 /* "run0" -- relocate to 0 by default */
#endif
@@ -133,7 +140,7 @@ __secondary_hold:
/* Tell the master cpu we're here */
/* Relocation is off & we are located at an address less */
/* than 0x100, so only need to grab low order offset. */
- std r24,__secondary_hold_acknowledge-_stext(0)
+ std r24,(ABS_ADDR(__secondary_hold_acknowledge))(0)
sync
li r26,0
@@ -141,7 +148,7 @@ __secondary_hold:
tovirt(r26,r26)
#endif
/* All secondary cpus wait here until told to start. */
-100: ld r12,__secondary_hold_spinloop-_stext(r26)
+100: ld r12,(ABS_ADDR(__secondary_hold_spinloop))(r26)
cmpdi 0,r12,0
beq 100b
@@ -166,12 +173,13 @@ __secondary_hold:
#else
BUG_OPCODE
#endif
+CLOSE_FIXED_SECTION(first_256B)
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"
exception_marker:
.tc ID_72656773_68657265[TC],0x7265677368657265
- .text
+ .previous
/*
* On server, we include the exception vectors code here as it
@@ -180,8 +188,12 @@ exception_marker:
*/
#ifdef CONFIG_PPC_BOOK3S
#include "exceptions-64s.S"
+#else
+OPEN_TEXT_SECTION(0x100)
#endif
+USE_TEXT_SECTION()
+
#ifdef CONFIG_PPC_BOOK3E
/*
* The booting_thread_hwid holds the thread id we want to boot in cpu
@@ -558,7 +570,7 @@ __after_prom_start:
#if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
#endif
- lwz r7,__run_at_load-_stext(r26)
+ lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
#if defined(CONFIG_PPC_BOOK3E)
tophys(r26,r26)
#endif
@@ -601,7 +613,7 @@ __after_prom_start:
#if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
#endif
- lwz r7,__run_at_load-_stext(r26)
+ lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
cmplwi cr0,r7,1
bne 3f
@@ -611,28 +623,35 @@ __after_prom_start:
sub r5,r5,r11
#else
/* just copy interrupts */
- LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
+ LOAD_REG_IMMEDIATE(r5, FIXED_SYMBOL_ABS_ADDR(__end_interrupts))
#endif
b 5f
3:
#endif
- lis r5,(copy_to_here - _stext)@ha
- addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
+ /* # bytes of memory to copy */
+ lis r5,(ABS_ADDR(copy_to_here))@ha
+ addi r5,r5,(ABS_ADDR(copy_to_here))@l
bl copy_and_flush /* copy the first n bytes */
/* this includes the code being */
/* executed here. */
- addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */
- addi r12,r8,(4f - _stext)@l /* that we just made */
+ /* Jump to the copy of this code that we just made */
+ addis r8,r3,(ABS_ADDR(4f))@ha
+ addi r12,r8,(ABS_ADDR(4f))@l
mtctr r12
bctr
.balign 8
-p_end: .llong _end - _stext
+p_end: .llong _end - copy_to_here
-4: /* Now copy the rest of the kernel up to _end */
- addis r5,r26,(p_end - _stext)@ha
- ld r5,(p_end - _stext)@l(r5) /* get _end */
+4:
+ /*
+ * Now copy the rest of the kernel up to _end, add
+ * _end - copy_to_here to the copy limit and run again.
+ */
+ addis r8,r26,(ABS_ADDR(p_end))@ha
+ ld r8,(ABS_ADDR(p_end))@l(r8)
+ add r5,r5,r8
5: bl copy_and_flush /* copy the rest */
9: b start_here_multiplatform
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 43ddaae42baf..3a185c51ce8f 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -151,7 +151,6 @@ turn_on_mmu:
#define EXCEPTION_PROLOG_2 \
- CLR_TOP32(r11); \
stw r10,_CCR(r11); /* save registers */ \
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index aec9a1b1d25b..9781c69eae57 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -206,7 +206,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
/*
* Handle debug exception notifications.
*/
-int __kprobes hw_breakpoint_handler(struct die_args *args)
+int hw_breakpoint_handler(struct die_args *args)
{
int rc = NOTIFY_STOP;
struct perf_event *bp;
@@ -290,11 +290,12 @@ out:
rcu_read_unlock();
return rc;
}
+NOKPROBE_SYMBOL(hw_breakpoint_handler);
/*
* Handle single-step exceptions following a DABR hit.
*/
-static int __kprobes single_step_dabr_instruction(struct die_args *args)
+static int single_step_dabr_instruction(struct die_args *args)
{
struct pt_regs *regs = args->regs;
struct perf_event *bp = NULL;
@@ -329,11 +330,12 @@ static int __kprobes single_step_dabr_instruction(struct die_args *args)
return NOTIFY_STOP;
}
+NOKPROBE_SYMBOL(single_step_dabr_instruction);
/*
* Handle debug exception notifications.
*/
-int __kprobes hw_breakpoint_exceptions_notify(
+int hw_breakpoint_exceptions_notify(
struct notifier_block *unused, unsigned long val, void *data)
{
int ret = NOTIFY_DONE;
@@ -349,6 +351,7 @@ int __kprobes hw_breakpoint_exceptions_notify(
return ret;
}
+NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
/*
* Release the user breakpoints used by ptrace
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index c1ca9282f4a0..6ca9a2ffaac7 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -227,7 +227,7 @@ int ibmebus_request_irq(u32 ist, irq_handler_t handler,
{
unsigned int irq = irq_create_mapping(NULL, ist);
- if (irq == NO_IRQ)
+ if (!irq)
return -EINVAL;
return request_irq(irq, handler, irq_flags, devname, dev_id);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 08887cf2b20e..3c05c311e35e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -67,6 +67,7 @@
#include <asm/smp.h>
#include <asm/debug.h>
#include <asm/livepatch.h>
+#include <asm/asm-prototypes.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -156,6 +157,15 @@ notrace unsigned int __check_irq_replay(void)
}
/*
+ * Check if an hypervisor Maintenance interrupt happened.
+ * This is a higher priority interrupt than the others, so
+ * replay it first.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_HMI;
+ if (happened & PACA_IRQ_HMI)
+ return 0xe60;
+
+ /*
* We may have missed a decrementer interrupt. We check the
* decrementer itself rather than the paca irq_happened field
* in case we also had a rollover while hard disabled
@@ -190,11 +200,6 @@ notrace unsigned int __check_irq_replay(void)
}
#endif /* CONFIG_PPC_BOOK3E */
- /* Check if an hypervisor Maintenance interrupt happened */
- local_paca->irq_happened &= ~PACA_IRQ_HMI;
- if (happened & PACA_IRQ_HMI)
- return 0xe60;
-
/* There should be nothing left ! */
BUG_ON(local_paca->irq_happened != 0);
@@ -514,7 +519,7 @@ void __do_irq(struct pt_regs *regs)
may_hard_irq_enable();
/* And finally process it */
- if (unlikely(irq == NO_IRQ))
+ if (unlikely(!irq))
__this_cpu_inc(irq_stat.spurious_irqs);
else
generic_handle_irq(irq);
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 7b750c4ed5c7..bc525ea0dc09 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -193,10 +193,10 @@ static int __init add_legacy_soc_port(struct device_node *np,
*/
if (tsi && !strcmp(tsi->type, "tsi-bridge"))
return add_legacy_port(np, -1, UPIO_TSI, addr, addr,
- NO_IRQ, legacy_port_flags, 0);
+ 0, legacy_port_flags, 0);
else
return add_legacy_port(np, -1, UPIO_MEM, addr, addr,
- NO_IRQ, legacy_port_flags, 0);
+ 0, legacy_port_flags, 0);
}
static int __init add_legacy_isa_port(struct device_node *np,
@@ -242,7 +242,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
/* Add port, irq will be dealt with later */
return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]),
- taddr, NO_IRQ, legacy_port_flags, 0);
+ taddr, 0, legacy_port_flags, 0);
}
@@ -314,7 +314,7 @@ static int __init add_legacy_pci_port(struct device_node *np,
/* Add port, irq will be dealt with later. We passed a translated
* IO port value. It will be fixed up later along with the irq
*/
- return add_legacy_port(np, index, iotype, base, addr, NO_IRQ,
+ return add_legacy_port(np, index, iotype, base, addr, 0,
legacy_port_flags, np != pci_dev);
}
#endif
@@ -462,14 +462,14 @@ static void __init fixup_port_irq(int index,
DBG("fixup_port_irq(%d)\n", index);
virq = irq_of_parse_and_map(np, 0);
- if (virq == NO_IRQ && legacy_serial_infos[index].irq_check_parent) {
+ if (!virq && legacy_serial_infos[index].irq_check_parent) {
np = of_get_parent(np);
if (np == NULL)
return;
virq = irq_of_parse_and_map(np, 0);
of_node_put(np);
}
- if (virq == NO_IRQ)
+ if (!virq)
return;
port->irq = virq;
@@ -543,7 +543,7 @@ static int __init serial_dev_init(void)
struct plat_serial8250_port *port = &legacy_serial_ports[i];
struct device_node *np = legacy_serial_infos[i].np;
- if (port->irq == NO_IRQ)
+ if (!port->irq)
fixup_port_irq(i, np, port);
if (port->iotype == UPIO_PORT)
fixup_port_pio(i, np, port);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 4c780a342282..a205fa3d9bf3 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -23,6 +23,7 @@
#include <asm/current.h>
#include <asm/machdep.h>
#include <asm/cacheflush.h>
+#include <asm/firmware.h>
#include <asm/paca.h>
#include <asm/mmu.h>
#include <asm/sections.h> /* _end */
@@ -31,21 +32,6 @@
#include <asm/hw_breakpoint.h>
#include <asm/asm-prototypes.h>
-#ifdef CONFIG_PPC_BOOK3E
-int default_machine_kexec_prepare(struct kimage *image)
-{
- int i;
- /*
- * Since we use the kernel fault handlers and paging code to
- * handle the virtual mode, we must make sure no destination
- * overlaps kernel static data or bss.
- */
- for (i = 0; i < image->nr_segments; i++)
- if (image->segment[i].mem < __pa(_end))
- return -ETXTBSY;
- return 0;
-}
-#else
int default_machine_kexec_prepare(struct kimage *image)
{
int i;
@@ -55,9 +41,6 @@ int default_machine_kexec_prepare(struct kimage *image)
const unsigned long *basep;
const unsigned int *sizep;
- if (!mmu_hash_ops.hpte_clear_all)
- return -ENOENT;
-
/*
* Since we use the kernel fault handlers and paging code to
* handle the virtual mode, we must make sure no destination
@@ -67,31 +50,6 @@ int default_machine_kexec_prepare(struct kimage *image)
if (image->segment[i].mem < __pa(_end))
return -ETXTBSY;
- /*
- * For non-LPAR, we absolutely can not overwrite the mmu hash
- * table, since we are still using the bolted entries in it to
- * do the copy. Check that here.
- *
- * It is safe if the end is below the start of the blocked
- * region (end <= low), or if the beginning is after the
- * end of the blocked region (begin >= high). Use the
- * boolean identity !(a || b) === (!a && !b).
- */
-#ifdef CONFIG_PPC_STD_MMU_64
- if (htab_address) {
- low = __pa(htab_address);
- high = low + htab_size_bytes;
-
- for (i = 0; i < image->nr_segments; i++) {
- begin = image->segment[i].mem;
- end = begin + image->segment[i].memsz;
-
- if ((begin < high) && (end > low))
- return -ETXTBSY;
- }
- }
-#endif /* CONFIG_PPC_STD_MMU_64 */
-
/* We also should not overwrite the tce tables */
for_each_node_by_type(node, "pci") {
basep = of_get_property(node, "linux,tce-base", NULL);
@@ -113,7 +71,6 @@ int default_machine_kexec_prepare(struct kimage *image)
return 0;
}
-#endif /* !CONFIG_PPC_BOOK3E */
static void copy_segments(unsigned long ind)
{
@@ -332,11 +289,14 @@ struct paca_struct kexec_paca;
/* Our assembly helper, in misc_64.S */
extern void kexec_sequence(void *newstack, unsigned long start,
void *image, void *control,
- void (*clear_all)(void)) __noreturn;
+ void (*clear_all)(void),
+ bool copy_with_mmu_off) __noreturn;
/* too late to fail here */
void default_machine_kexec(struct kimage *image)
{
+ bool copy_with_mmu_off;
+
/* prepare control code if any */
/*
@@ -374,18 +334,29 @@ void default_machine_kexec(struct kimage *image)
/* XXX: If anyone does 'dynamic lppacas' this will also need to be
* switched to a static version!
*/
+ /*
+ * On Book3S, the copy must happen with the MMU off if we are either
+ * using Radix page tables or we are not in an LPAR since we can
+ * overwrite the page tables while copying.
+ *
+ * In an LPAR, we keep the MMU on otherwise we can't access beyond
+ * the RMA. On BookE there is no real MMU off mode, so we have to
+ * keep it enabled as well (but then we have bolted TLB entries).
+ */
+#ifdef CONFIG_PPC_BOOK3E
+ copy_with_mmu_off = false;
+#else
+ copy_with_mmu_off = radix_enabled() ||
+ !(firmware_has_feature(FW_FEATURE_LPAR) ||
+ firmware_has_feature(FW_FEATURE_PS3_LV1));
+#endif
/* Some things are best done in assembly. Finding globals with
* a toc is easier in C, so pass in what we can.
*/
kexec_sequence(&kexec_stack, image->start, image,
- page_address(image->control_code_page),
-#ifdef CONFIG_PPC_STD_MMU
- mmu_hash_ops.hpte_clear_all
-#else
- NULL
-#endif
- );
+ page_address(image->control_code_page),
+ mmu_cleanup_all, copy_with_mmu_off);
/* NOTREACHED */
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index d9c912b6e632..03756ffdcd71 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -328,7 +328,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
*
* flush_icache_range(unsigned long start, unsigned long stop)
*/
-_KPROBE(flush_icache_range)
+_GLOBAL(flush_icache_range)
BEGIN_FTR_SECTION
PURGE_PREFETCHED_INS
blr /* for 601, do nothing */
@@ -358,6 +358,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
sync /* additional sync needed on g4 */
isync
blr
+_ASM_NOKPROBE_SYMBOL(flush_icache_range)
+
/*
* Flush a particular page from the data cache to RAM.
* Note: this is necessary because the instruction cache does *not*
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index cb195157b318..9f0bed214bcb 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -66,7 +66,7 @@ PPC64_CACHES:
* flush all bytes from start through stop-1 inclusive
*/
-_KPROBE(flush_icache_range)
+_GLOBAL(flush_icache_range)
BEGIN_FTR_SECTION
PURGE_PREFETCHED_INS
blr
@@ -109,7 +109,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
bdnz 2b
isync
blr
- .previous .text
+_ASM_NOKPROBE_SYMBOL(flush_icache_range)
+
/*
* Like above, but only do the D-cache.
*
@@ -591,7 +592,8 @@ real_mode: /* assume normal blr return */
#endif
/*
- * kexec_sequence(newstack, start, image, control, clear_all())
+ * kexec_sequence(newstack, start, image, control, clear_all(),
+ copy_with_mmu_off)
*
* does the grungy work with stack switching and real mode switches
* also does simple calls to other code
@@ -627,7 +629,7 @@ _GLOBAL(kexec_sequence)
mr r29,r5 /* image (virt) */
mr r28,r6 /* control, unused */
mr r27,r7 /* clear_all() fn desc */
- mr r26,r8 /* spare */
+ mr r26,r8 /* copy_with_mmu_off */
lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
/* disable interrupts, we are overwriting kernel data next */
@@ -639,15 +641,24 @@ _GLOBAL(kexec_sequence)
mtmsrd r3,1
#endif
+ /* We need to turn the MMU off unless we are in hash mode
+ * under a hypervisor
+ */
+ cmpdi r26,0
+ beq 1f
+ bl real_mode
+1:
/* copy dest pages, flush whole dest image */
mr r3,r29
bl kexec_copy_flush /* (image) */
- /* turn off mmu */
+ /* turn off mmu now if not done earlier */
+ cmpdi r26,0
+ bne 1f
bl real_mode
/* copy 0x100 bytes starting at start to 0 */
- li r3,0
+1: li r3,0
mr r4,r30 /* start, aka phys mem offset */
li r5,0x100
li r6,0
@@ -659,7 +670,9 @@ _GLOBAL(kexec_sequence)
li r6,1
stw r6,kexec_flag-1b(5)
-#ifndef CONFIG_PPC_BOOK3E
+ cmpdi r27,0
+ beq 1f
+
/* clear out hardware hash page table and tlb */
#ifdef PPC64_ELF_ABI_v1
ld r12,0(r27) /* deref function descriptor */
@@ -668,7 +681,6 @@ _GLOBAL(kexec_sequence)
#endif
mtctr r12
bctrl /* mmu_hash_ops.hpte_clear_all(void); */
-#endif /* !CONFIG_PPC_BOOK3E */
/*
* kexec image calling is:
@@ -695,7 +707,7 @@ _GLOBAL(kexec_sequence)
* are the boot cpu ?????
* other device tree differences (prop sizes, va vs pa, etc)...
*/
- mr r3,r25 # my phys cpu
+1: mr r3,r25 # my phys cpu
mr r4,r30 # start, aka phys mem offset
mtlr 4
li r5,0
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index d1f1b35bf0c7..30b89d5cbb03 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -27,7 +27,7 @@
#include <linux/sort.h>
#include <asm/setup.h>
-LIST_HEAD(module_bug_list);
+static LIST_HEAD(module_bug_list);
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 64174bf95611..34d2c595de23 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -542,9 +542,9 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
time->tv_nsec = 0;
}
*buf = kmemdup(buff + hdr_size, length, GFP_KERNEL);
+ kfree(buff);
if (*buf == NULL)
return -ENOMEM;
- kfree(buff);
*ecc_notice_size = 0;
if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
@@ -851,7 +851,7 @@ static long dev_nvram_ioctl(struct file *file, unsigned int cmd,
}
}
-const struct file_operations nvram_fops = {
+static const struct file_operations nvram_fops = {
.owner = THIS_MODULE,
.llseek = dev_nvram_llseek,
.read = dev_nvram_read,
@@ -956,7 +956,7 @@ int __init nvram_remove_partition(const char *name, int sig,
/* Make partition a free partition */
part->header.signature = NVRAM_SIG_FREE;
- strncpy(part->header.name, "wwwwwwwwwwww", 12);
+ memset(part->header.name, 'w', 12);
part->header.checksum = nvram_checksum(&part->header);
rc = nvram_write_header(part);
if (rc <= 0) {
@@ -974,8 +974,8 @@ int __init nvram_remove_partition(const char *name, int sig,
}
if (prev) {
prev->header.length += part->header.length;
- prev->header.checksum = nvram_checksum(&part->header);
- rc = nvram_write_header(part);
+ prev->header.checksum = nvram_checksum(&prev->header);
+ rc = nvram_write_header(prev);
if (rc <= 0) {
printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
return rc;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index e58908066b0e..95d3769a2e26 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -360,7 +360,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
line, pin);
virq = irq_create_mapping(NULL, line);
- if (virq != NO_IRQ)
+ if (virq)
irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
@@ -369,7 +369,8 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
virq = irq_create_of_mapping(&oirq);
}
- if(virq == NO_IRQ) {
+
+ if (!virq) {
pr_debug(" Failed to map !\n");
return -1;
}
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 526ac6750e4d..ea3d98115b88 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -178,7 +178,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
dev->rom_base_reg = PCI_ROM_ADDRESS;
/* Maybe do a default OF mapping here */
- dev->irq = NO_IRQ;
+ dev->irq = 0;
}
of_pci_parse_addrs(node, dev);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9ee2623e0f67..9e7c10fe205f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -59,6 +59,7 @@
#include <asm/exec.h>
#include <asm/livepatch.h>
#include <asm/cpu_has_feature.h>
+#include <asm/asm-prototypes.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
@@ -88,7 +89,13 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
set_thread_flag(TIF_RESTORE_TM);
}
}
+
+static inline bool msr_tm_active(unsigned long msr)
+{
+ return MSR_TM_ACTIVE(msr);
+}
#else
+static inline bool msr_tm_active(unsigned long msr) { return false; }
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -104,7 +111,7 @@ static int __init enable_strict_msr_control(char *str)
}
early_param("ppc_strict_facility_enable", enable_strict_msr_control);
-void msr_check_and_set(unsigned long bits)
+unsigned long msr_check_and_set(unsigned long bits)
{
unsigned long oldmsr = mfmsr();
unsigned long newmsr;
@@ -118,6 +125,8 @@ void msr_check_and_set(unsigned long bits)
if (oldmsr != newmsr)
mtmsr_isync(newmsr);
+
+ return newmsr;
}
void __msr_check_and_clear(unsigned long bits)
@@ -196,19 +205,30 @@ EXPORT_SYMBOL_GPL(flush_fp_to_thread);
void enable_kernel_fp(void)
{
+ unsigned long cpumsr;
+
WARN_ON(preemptible());
- msr_check_and_set(MSR_FP);
+ cpumsr = msr_check_and_set(MSR_FP);
if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
check_if_tm_restore_required(current);
+ /*
+ * If a thread has already been reclaimed then the
+ * checkpointed registers are on the CPU but have definitely
+ * been saved by the reclaim code. Don't need to and *cannot*
+ * giveup as this would save to the 'live' structure not the
+ * checkpointed structure.
+ */
+ if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+ return;
__giveup_fpu(current);
}
}
EXPORT_SYMBOL(enable_kernel_fp);
static int restore_fp(struct task_struct *tsk) {
- if (tsk->thread.load_fp) {
+ if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) {
load_fp_state(&current->thread.fp_state);
current->thread.load_fp++;
return 1;
@@ -248,12 +268,23 @@ EXPORT_SYMBOL(giveup_altivec);
void enable_kernel_altivec(void)
{
+ unsigned long cpumsr;
+
WARN_ON(preemptible());
- msr_check_and_set(MSR_VEC);
+ cpumsr = msr_check_and_set(MSR_VEC);
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
check_if_tm_restore_required(current);
+ /*
+ * If a thread has already been reclaimed then the
+ * checkpointed registers are on the CPU but have definitely
+ * been saved by the reclaim code. Don't need to and *cannot*
+ * giveup as this would save to the 'live' structure not the
+ * checkpointed structure.
+ */
+ if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+ return;
__giveup_altivec(current);
}
}
@@ -278,7 +309,8 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
static int restore_altivec(struct task_struct *tsk)
{
- if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+ (tsk->thread.load_vec || msr_tm_active(tsk->thread.regs->msr))) {
load_vr_state(&tsk->thread.vr_state);
tsk->thread.used_vr = 1;
tsk->thread.load_vec++;
@@ -321,12 +353,23 @@ static void save_vsx(struct task_struct *tsk)
void enable_kernel_vsx(void)
{
+ unsigned long cpumsr;
+
WARN_ON(preemptible());
- msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
+ cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
check_if_tm_restore_required(current);
+ /*
+ * If a thread has already been reclaimed then the
+ * checkpointed registers are on the CPU but have definitely
+ * been saved by the reclaim code. Don't need to and *cannot*
+ * giveup as this would save to the 'live' structure not the
+ * checkpointed structure.
+ */
+ if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+ return;
if (current->thread.regs->msr & MSR_FP)
__giveup_fpu(current);
if (current->thread.regs->msr & MSR_VEC)
@@ -438,6 +481,7 @@ void giveup_all(struct task_struct *tsk)
return;
msr_check_and_set(msr_all_available);
+ check_if_tm_restore_required(tsk);
#ifdef CONFIG_PPC_FPU
if (usermsr & MSR_FP)
@@ -464,7 +508,8 @@ void restore_math(struct pt_regs *regs)
{
unsigned long msr;
- if (!current->thread.load_fp && !loadvec(current->thread))
+ if (!msr_tm_active(regs->msr) &&
+ !current->thread.load_fp && !loadvec(current->thread))
return;
msr = regs->msr;
@@ -767,29 +812,15 @@ static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+
+static inline bool tm_enabled(struct task_struct *tsk)
+{
+ return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
+}
+
static void tm_reclaim_thread(struct thread_struct *thr,
struct thread_info *ti, uint8_t cause)
{
- unsigned long msr_diff = 0;
-
- /*
- * If FP/VSX registers have been already saved to the
- * thread_struct, move them to the transact_fp array.
- * We clear the TIF_RESTORE_TM bit since after the reclaim
- * the thread will no longer be transactional.
- */
- if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
- msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr;
- if (msr_diff & MSR_FP)
- memcpy(&thr->transact_fp, &thr->fp_state,
- sizeof(struct thread_fp_state));
- if (msr_diff & MSR_VEC)
- memcpy(&thr->transact_vr, &thr->vr_state,
- sizeof(struct thread_vr_state));
- clear_ti_thread_flag(ti, TIF_RESTORE_TM);
- msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
- }
-
/*
* Use the current MSR TM suspended bit to track if we have
* checkpointed state outstanding.
@@ -808,15 +839,9 @@ static void tm_reclaim_thread(struct thread_struct *thr,
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
- tm_reclaim(thr, thr->regs->msr, cause);
+ giveup_all(container_of(thr, struct task_struct, thread));
- /* Having done the reclaim, we now have the checkpointed
- * FP/VSX values in the registers. These might be valid
- * even if we have previously called enable_kernel_fp() or
- * flush_fp_to_thread(), so update thr->regs->msr to
- * indicate their current validity.
- */
- thr->regs->msr |= msr_diff;
+ tm_reclaim(thr, thr->ckpt_regs.msr, cause);
}
void tm_reclaim_current(uint8_t cause)
@@ -832,8 +857,8 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
*
* In switching we need to maintain a 2nd register state as
* oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
- * checkpointed (tbegin) state in ckpt_regs and saves the transactional
- * (current) FPRs into oldtask->thread.transact_fpr[].
+ * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
+ * ckvr_state
*
* We also context switch (save) TFHAR/TEXASR/TFIAR in here.
*/
@@ -845,14 +870,6 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
if (!MSR_TM_ACTIVE(thr->regs->msr))
goto out_and_saveregs;
- /* Stash the original thread MSR, as giveup_fpu et al will
- * modify it. We hold onto it to see whether the task used
- * FP & vector regs. If the TIF_RESTORE_TM flag is set,
- * ckpt_regs.msr is already set.
- */
- if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
- thr->ckpt_regs.msr = thr->regs->msr;
-
TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
"ccr=%lx, msr=%lx, trap=%lx)\n",
tsk->pid, thr->regs->nip,
@@ -881,6 +898,9 @@ void tm_recheckpoint(struct thread_struct *thread,
{
unsigned long flags;
+ if (!(thread->regs->msr & MSR_TM))
+ return;
+
/* We really can't be interrupted here as the TEXASR registers can't
* change and later in the trecheckpoint code, we have a userspace R1.
* So let's hard disable over this region.
@@ -910,10 +930,10 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
* If the task was using FP, we non-lazily reload both the original and
* the speculative FP register states. This is because the kernel
* doesn't see if/when a TM rollback occurs, so if we take an FP
- * unavoidable later, we are unable to determine which set of FP regs
+ * unavailable later, we are unable to determine which set of FP regs
* need to be restored.
*/
- if (!new->thread.regs)
+ if (!tm_enabled(new))
return;
if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
@@ -926,35 +946,35 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
"(new->msr 0x%lx, new->origmsr 0x%lx)\n",
new->pid, new->thread.regs->msr, msr);
- /* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&new->thread, msr);
- /* This loads the speculative FP/VEC state, if used */
- if (msr & MSR_FP) {
- do_load_up_transact_fpu(&new->thread);
- new->thread.regs->msr |=
- (MSR_FP | new->thread.fpexc_mode);
- }
-#ifdef CONFIG_ALTIVEC
- if (msr & MSR_VEC) {
- do_load_up_transact_altivec(&new->thread);
- new->thread.regs->msr |= MSR_VEC;
- }
-#endif
- /* We may as well turn on VSX too since all the state is restored now */
- if (msr & MSR_VSX)
- new->thread.regs->msr |= MSR_VSX;
+ /*
+ * The checkpointed state has been restored but the live state has
+ * not, ensure all the math functionality is turned off to trigger
+ * restore_math() to reload.
+ */
+ new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
TM_DEBUG("*** tm_recheckpoint of pid %d complete "
"(kernel msr 0x%lx)\n",
new->pid, mfmsr());
}
-static inline void __switch_to_tm(struct task_struct *prev)
+static inline void __switch_to_tm(struct task_struct *prev,
+ struct task_struct *new)
{
if (cpu_has_feature(CPU_FTR_TM)) {
- tm_enable();
- tm_reclaim_task(prev);
+ if (tm_enabled(prev) || tm_enabled(new))
+ tm_enable();
+
+ if (tm_enabled(prev)) {
+ prev->thread.load_tm++;
+ tm_reclaim_task(prev);
+ if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
+ prev->thread.regs->msr &= ~MSR_TM;
+ }
+
+ tm_recheckpoint_new_task(new);
}
}
@@ -976,6 +996,12 @@ void restore_tm_state(struct pt_regs *regs)
{
unsigned long msr_diff;
+ /*
+ * This is the only moment we should clear TIF_RESTORE_TM as
+ * it is here that ckpt_regs.msr and pt_regs.msr become the same
+ * again, anything else could lead to an incorrect ckpt_msr being
+ * saved and therefore incorrect signal contexts.
+ */
clear_thread_flag(TIF_RESTORE_TM);
if (!MSR_TM_ACTIVE(regs->msr))
return;
@@ -983,6 +1009,13 @@ void restore_tm_state(struct pt_regs *regs)
msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
+ /* Ensure that restore_math() will restore */
+ if (msr_diff & MSR_FP)
+ current->thread.load_fp = 1;
+#ifdef CONFIG_ALIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
+ current->thread.load_vec = 1;
+#endif
restore_math(regs);
regs->msr |= msr_diff;
@@ -990,7 +1023,7 @@ void restore_tm_state(struct pt_regs *regs)
#else
#define tm_recheckpoint_new_task(new)
-#define __switch_to_tm(prev)
+#define __switch_to_tm(prev, new)
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
static inline void save_sprs(struct thread_struct *t)
@@ -1131,11 +1164,11 @@ struct task_struct *__switch_to(struct task_struct *prev,
*/
save_sprs(&prev->thread);
- __switch_to_tm(prev);
-
/* Save FPU, Altivec, VSX and SPE state */
giveup_all(prev);
+ __switch_to_tm(prev, new);
+
/*
* We can't take a PMU exception inside _switch() since there is a
* window where the kernel stack SLB and the kernel stack are out
@@ -1143,8 +1176,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
*/
hard_irq_disable();
- tm_recheckpoint_new_task(new);
-
/*
* Call restore_sprs() before calling _switch(). If we move it after
* _switch() then we miss out on calling it for new tasks. The reason
@@ -1379,9 +1410,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
* transitions the CPU out of TM mode. Hence we need to call
* tm_recheckpoint_new_task() (on the same task) to restore the
* checkpointed state back and the TM mode.
+ *
+ * Can't pass dst because it isn't ready. Doesn't matter, passing
+ * dst is only important for __switch_to()
*/
- __switch_to_tm(src);
- tm_recheckpoint_new_task(src);
+ __switch_to_tm(src, src);
*dst = *src;
@@ -1623,8 +1656,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.used_spe = 0;
#endif /* CONFIG_SPE */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (cpu_has_feature(CPU_FTR_TM))
- regs->msr |= MSR_TM;
current->thread.tm_tfhar = 0;
current->thread.tm_texasr = 0;
current->thread.tm_tfiar = 0;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index d3eff99e938c..88ac964f4858 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -42,6 +42,7 @@
#include <asm/sections.h>
#include <asm/machdep.h>
#include <asm/opal.h>
+#include <asm/asm-prototypes.h>
#include <linux/linux_logo.h>
@@ -2643,6 +2644,86 @@ static void __init fixup_device_tree_efika(void)
#define fixup_device_tree_efika()
#endif
+#ifdef CONFIG_PPC_PASEMI_NEMO
+/*
+ * CFE supplied on Nemo is broken in several ways, biggest
+ * problem is that it reassigns ISA interrupts to unused mpic ints.
+ * Add an interrupt-controller property for the io-bridge to use
+ * and correct the ints so we can attach them to an irq_domain
+ */
+static void __init fixup_device_tree_pasemi(void)
+{
+ u32 interrupts[2], parent, rval, val = 0;
+ char *name, *pci_name;
+ phandle iob, node;
+
+ /* Find the root pci node */
+ name = "/pxp@0,e0000000";
+ iob = call_prom("finddevice", 1, 1, ADDR(name));
+ if (!PHANDLE_VALID(iob))
+ return;
+
+ /* check if interrupt-controller node set yet */
+ if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
+ return;
+
+ prom_printf("adding interrupt-controller property for SB600...\n");
+
+ prom_setprop(iob, name, "interrupt-controller", &val, 0);
+
+ pci_name = "/pxp@0,e0000000/pci@11";
+ node = call_prom("finddevice", 1, 1, ADDR(pci_name));
+ parent = ADDR(iob);
+
+ for( ; prom_next_node(&node); ) {
+ /* scan each node for one with an interrupt */
+ if (!PHANDLE_VALID(node))
+ continue;
+
+ rval = prom_getproplen(node, "interrupts");
+ if (rval == 0 || rval == PROM_ERROR)
+ continue;
+
+ prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
+ if ((interrupts[0] < 212) || (interrupts[0] > 222))
+ continue;
+
+ /* found a node, update both interrupts and interrupt-parent */
+ if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
+ interrupts[0] -= 203;
+ if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
+ interrupts[0] -= 213;
+ if (interrupts[0] == 221)
+ interrupts[0] = 14;
+ if (interrupts[0] == 222)
+ interrupts[0] = 8;
+
+ prom_setprop(node, pci_name, "interrupts", interrupts,
+ sizeof(interrupts));
+ prom_setprop(node, pci_name, "interrupt-parent", &parent,
+ sizeof(parent));
+ }
+
+ /*
+ * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
+ * so that generic isa-bridge code can add the SB600 and its on-board
+ * peripherals.
+ */
+ name = "/pxp@0,e0000000/io-bridge@0";
+ iob = call_prom("finddevice", 1, 1, ADDR(name));
+ if (!PHANDLE_VALID(iob))
+ return;
+
+ /* device_type is already set, just change it. */
+
+ prom_printf("Changing device_type of SB600 node...\n");
+
+ prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
+}
+#else /* !CONFIG_PPC_PASEMI_NEMO */
+static inline void fixup_device_tree_pasemi(void) { }
+#endif
+
static void __init fixup_device_tree(void)
{
fixup_device_tree_maple();
@@ -2650,6 +2731,7 @@ static void __init fixup_device_tree(void)
fixup_device_tree_chrp();
fixup_device_tree_pmac();
fixup_device_tree_efika();
+ fixup_device_tree_pasemi();
}
static void __init prom_find_boot_cpu(void)
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index bf91658a8a40..b1ec62f2cc31 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -39,6 +39,7 @@
#include <asm/pgtable.h>
#include <asm/switch_to.h>
#include <asm/tm.h>
+#include <asm/asm-prototypes.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
@@ -402,13 +403,9 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
}
/*
- * When the transaction is active, 'transact_fp' holds the current running
- * value of all FPR registers and 'fp_state' holds the last checkpointed
- * value of all FPR registers for the current transaction. When transaction
- * is not active 'fp_state' holds the current running state of all the FPR
- * registers. So this function which returns the current running values of
- * all the FPR registers, needs to know whether any transaction is active
- * or not.
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction.
*
* Userspace interface buffer layout:
*
@@ -416,13 +413,6 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
* u64 fpr[32];
* u64 fpscr;
* };
- *
- * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
- * which determines the final code in this function. All the combinations of
- * these two config options are possible except the one below as transactional
- * memory config pulls in CONFIG_VSX automatically.
- *
- * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
*/
static int fpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -431,50 +421,29 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
#ifdef CONFIG_VSX
u64 buf[33];
int i;
-#endif
- flush_fp_to_thread(target);
-#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
- /* copy to local buffer then write that out */
- if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
- flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.TS_TRANS_FPR(i);
- buf[32] = target->thread.transact_fp.fpscr;
- } else {
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.TS_FPR(i);
- buf[32] = target->thread.fp_state.fpscr;
- }
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
-#endif
+ flush_fp_to_thread(target);
-#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
buf[32] = target->thread.fp_state.fpscr;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
-#endif
-
-#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
+#else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32]));
+ flush_fp_to_thread(target);
+
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
#endif
}
/*
- * When the transaction is active, 'transact_fp' holds the current running
- * value of all FPR registers and 'fp_state' holds the last checkpointed
- * value of all FPR registers for the current transaction. When transaction
- * is not active 'fp_state' holds the current running state of all the FPR
- * registers. So this function which setss the current running values of
- * all the FPR registers, needs to know whether any transaction is active
- * or not.
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction.
*
* Userspace interface buffer layout:
*
@@ -483,12 +452,6 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
* u64 fpscr;
* };
*
- * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
- * which determines the final code in this function. All the combinations of
- * these two config options are possible except the one below as transactional
- * memory config pulls in CONFIG_VSX automatically.
- *
- * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
*/
static int fpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -497,44 +460,24 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
#ifdef CONFIG_VSX
u64 buf[33];
int i;
-#endif
+
flush_fp_to_thread(target);
-#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
- if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
- flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
- for (i = 0; i < 32 ; i++)
- target->thread.TS_TRANS_FPR(i) = buf[i];
- target->thread.transact_fp.fpscr = buf[32];
- } else {
- for (i = 0; i < 32 ; i++)
- target->thread.TS_FPR(i) = buf[i];
- target->thread.fp_state.fpscr = buf[32];
- }
- return 0;
-#endif
-
-#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
- /* copy to local buffer then write that out */
- i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
- if (i)
- return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
target->thread.fp_state.fpscr = buf[32];
return 0;
-#endif
-
-#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
+#else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32]));
+ flush_fp_to_thread(target);
+
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
#endif
@@ -562,13 +505,10 @@ static int vr_active(struct task_struct *target,
}
/*
- * When the transaction is active, 'transact_vr' holds the current running
- * value of all the VMX registers and 'vr_state' holds the last checkpointed
- * value of all the VMX registers for the current transaction to fall back
- * on in case it aborts. When transaction is not active 'vr_state' holds
- * the current running state of all the VMX registers. So this function which
- * gets the current running values of all the VMX registers, needs to know
- * whether any transaction is active or not.
+ * Regardless of transactions, 'vr_state' holds the current running
+ * value of all the VMX registers and 'ckvr_state' holds the last
+ * checkpointed value of all the VMX registers for the current
+ * transaction to fall back on in case it aborts.
*
* Userspace interface buffer layout:
*
@@ -582,7 +522,6 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- struct thread_vr_state *addr;
int ret;
flush_altivec_to_thread(target);
@@ -590,19 +529,8 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
offsetof(struct thread_vr_state, vr[32]));
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
- flush_fp_to_thread(target);
- flush_tmregs_to_thread(target);
- addr = &target->thread.transact_vr;
- } else {
- addr = &target->thread.vr_state;
- }
-#else
- addr = &target->thread.vr_state;
-#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- addr, 0,
+ &target->thread.vr_state, 0,
33 * sizeof(vector128));
if (!ret) {
/*
@@ -614,14 +542,7 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (MSR_TM_ACTIVE(target->thread.regs->msr))
- vrsave.word = target->thread.transact_vrsave;
- else
- vrsave.word = target->thread.vrsave;
-#else
vrsave.word = target->thread.vrsave;
-#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
@@ -631,13 +552,10 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
}
/*
- * When the transaction is active, 'transact_vr' holds the current running
- * value of all the VMX registers and 'vr_state' holds the last checkpointed
- * value of all the VMX registers for the current transaction to fall back
- * on in case it aborts. When transaction is not active 'vr_state' holds
- * the current running state of all the VMX registers. So this function which
- * sets the current running values of all the VMX registers, needs to know
- * whether any transaction is active or not.
+ * Regardless of transactions, 'vr_state' holds the current running
+ * value of all the VMX registers and 'ckvr_state' holds the last
+ * checkpointed value of all the VMX registers for the current
+ * transaction to fall back on in case it aborts.
*
* Userspace interface buffer layout:
*
@@ -651,7 +569,6 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct thread_vr_state *addr;
int ret;
flush_altivec_to_thread(target);
@@ -659,19 +576,8 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
offsetof(struct thread_vr_state, vr[32]));
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
- flush_fp_to_thread(target);
- flush_tmregs_to_thread(target);
- addr = &target->thread.transact_vr;
- } else {
- addr = &target->thread.vr_state;
- }
-#else
- addr = &target->thread.vr_state;
-#endif
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- addr, 0,
+ &target->thread.vr_state, 0,
33 * sizeof(vector128));
if (!ret && count > 0) {
/*
@@ -683,27 +589,12 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (MSR_TM_ACTIVE(target->thread.regs->msr))
- vrsave.word = target->thread.transact_vrsave;
- else
- vrsave.word = target->thread.vrsave;
-#else
vrsave.word = target->thread.vrsave;
-#endif
+
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
- if (!ret) {
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (MSR_TM_ACTIVE(target->thread.regs->msr))
- target->thread.transact_vrsave = vrsave.word;
- else
- target->thread.vrsave = vrsave.word;
-#else
+ if (!ret)
target->thread.vrsave = vrsave.word;
-#endif
- }
}
return ret;
@@ -725,13 +616,10 @@ static int vsr_active(struct task_struct *target,
}
/*
- * When the transaction is active, 'transact_fp' holds the current running
- * value of all FPR registers and 'fp_state' holds the last checkpointed
- * value of all FPR registers for the current transaction. When transaction
- * is not active 'fp_state' holds the current running state of all the FPR
- * registers. So this function which returns the current running values of
- * all the FPR registers, needs to know whether any transaction is active
- * or not.
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last
+ * checkpointed value of all FPR registers for the current
+ * transaction.
*
* Userspace interface buffer layout:
*
@@ -746,27 +634,14 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
u64 buf[32];
int ret, i;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
-#endif
flush_vsx_to_thread(target);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.
- transact_fp.fpr[i][TS_VSRLOWOFFSET];
- } else {
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.
- fp_state.fpr[i][TS_VSRLOWOFFSET];
- }
-#else
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
-#endif
+
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
@@ -774,12 +649,10 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
}
/*
- * When the transaction is active, 'transact_fp' holds the current running
- * value of all FPR registers and 'fp_state' holds the last checkpointed
- * value of all FPR registers for the current transaction. When transaction
- * is not active 'fp_state' holds the current running state of all the FPR
- * registers. So this function which sets the current running values of all
- * the FPR registers, needs to know whether any transaction is active or not.
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last
+ * checkpointed value of all FPR registers for the current
+ * transaction.
*
* Userspace interface buffer layout:
*
@@ -794,31 +667,16 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
u64 buf[32];
int ret,i;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
-#endif
flush_vsx_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
- for (i = 0; i < 32 ; i++)
- target->thread.transact_fp.
- fpr[i][TS_VSRLOWOFFSET] = buf[i];
- } else {
+ if (!ret)
for (i = 0; i < 32 ; i++)
- target->thread.fp_state.
- fpr[i][TS_VSRLOWOFFSET] = buf[i];
- }
-#else
- for (i = 0; i < 32 ; i++)
- target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
-#endif
-
+ target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
}
@@ -944,9 +802,9 @@ static int tm_cgpr_get(struct task_struct *target,
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.ckpt_regs,
@@ -1009,9 +867,9 @@ static int tm_cgpr_set(struct task_struct *target,
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ckpt_regs,
@@ -1087,7 +945,7 @@ static int tm_cfpr_active(struct task_struct *target,
*
* This function gets in transaction checkpointed FPR registers.
*
- * When the transaction is active 'fp_state' holds the checkpointed
+ * When the transaction is active 'ckfp_state' holds the checkpointed
* values for the current transaction to fall back on if it aborts
* in between. This function gets those checkpointed FPR registers.
* The userspace interface buffer layout is as follows.
@@ -1111,14 +969,14 @@ static int tm_cfpr_get(struct task_struct *target,
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.TS_FPR(i);
- buf[32] = target->thread.fp_state.fpscr;
+ buf[i] = target->thread.TS_CKFPR(i);
+ buf[32] = target->thread.ckfp_state.fpscr;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
}
@@ -1133,7 +991,7 @@ static int tm_cfpr_get(struct task_struct *target,
*
* This function sets in transaction checkpointed FPR registers.
*
- * When the transaction is active 'fp_state' holds the checkpointed
+ * When the transaction is active 'ckfp_state' holds the checkpointed
* FPR register values for the current transaction to fall back on
* if it aborts in between. This function sets these checkpointed
* FPR registers. The userspace interface buffer layout is as follows.
@@ -1157,17 +1015,17 @@ static int tm_cfpr_set(struct task_struct *target,
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
for (i = 0; i < 32 ; i++)
- target->thread.TS_FPR(i) = buf[i];
- target->thread.fp_state.fpscr = buf[32];
+ target->thread.TS_CKFPR(i) = buf[i];
+ target->thread.ckfp_state.fpscr = buf[32];
return 0;
}
@@ -1202,7 +1060,7 @@ static int tm_cvmx_active(struct task_struct *target,
*
* This function gets in transaction checkpointed VMX registers.
*
- * When the transaction is active 'vr_state' and 'vr_save' hold
+ * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
* the checkpointed values for the current transaction to fall
* back on if it aborts in between. The userspace interface buffer
* layout is as follows.
@@ -1229,12 +1087,12 @@ static int tm_cvmx_get(struct task_struct *target,
return -ENODATA;
/* Flush the state */
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr_state, 0,
+ &target->thread.ckvr_state, 0,
33 * sizeof(vector128));
if (!ret) {
/*
@@ -1245,7 +1103,7 @@ static int tm_cvmx_get(struct task_struct *target,
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
- vrsave.word = target->thread.vrsave;
+ vrsave.word = target->thread.ckvrsave;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
}
@@ -1264,7 +1122,7 @@ static int tm_cvmx_get(struct task_struct *target,
*
* This function sets in transaction checkpointed VMX registers.
*
- * When the transaction is active 'vr_state' and 'vr_save' hold
+ * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
* the checkpointed values for the current transaction to fall
* back on if it aborts in between. The userspace interface buffer
* layout is as follows.
@@ -1290,12 +1148,12 @@ static int tm_cvmx_set(struct task_struct *target,
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr_state, 0,
+ &target->thread.ckvr_state, 0,
33 * sizeof(vector128));
if (!ret && count > 0) {
/*
@@ -1306,11 +1164,11 @@ static int tm_cvmx_set(struct task_struct *target,
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
- vrsave.word = target->thread.vrsave;
+ vrsave.word = target->thread.ckvrsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
if (!ret)
- target->thread.vrsave = vrsave.word;
+ target->thread.ckvrsave = vrsave.word;
}
return ret;
@@ -1348,7 +1206,7 @@ static int tm_cvsx_active(struct task_struct *target,
*
* This function gets in transaction checkpointed VSX registers.
*
- * When the transaction is active 'fp_state' holds the checkpointed
+ * When the transaction is active 'ckfp_state' holds the checkpointed
* values for the current transaction to fall back on if it aborts
* in between. This function gets those checkpointed VSX registers.
* The userspace interface buffer layout is as follows.
@@ -1372,13 +1230,13 @@ static int tm_cvsx_get(struct task_struct *target,
return -ENODATA;
/* Flush the state */
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+ buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
@@ -1396,7 +1254,7 @@ static int tm_cvsx_get(struct task_struct *target,
*
* This function sets in transaction checkpointed VSX registers.
*
- * When the transaction is active 'fp_state' holds the checkpointed
+ * When the transaction is active 'ckfp_state' holds the checkpointed
* VSX register values for the current transaction to fall back on
* if it aborts in between. This function sets these checkpointed
* FPR registers. The userspace interface buffer layout is as follows.
@@ -1420,15 +1278,16 @@ static int tm_cvsx_set(struct task_struct *target,
return -ENODATA;
/* Flush the state */
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
flush_vsx_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
- for (i = 0; i < 32 ; i++)
- target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ if (!ret)
+ for (i = 0; i < 32 ; i++)
+ target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
}
@@ -1484,9 +1343,9 @@ static int tm_spr_get(struct task_struct *target,
return -ENODEV;
/* Flush the states */
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
/* TFHAR register */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -1540,9 +1399,9 @@ static int tm_spr_set(struct task_struct *target,
return -ENODEV;
/* Flush the states */
+ flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
- flush_tmregs_to_thread(target);
/* TFHAR register */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -2065,33 +1924,12 @@ static const struct user_regset_view user_ppc_native_view = {
static int gpr32_get_common(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf, bool tm_active)
+ void *kbuf, void __user *ubuf,
+ unsigned long *regs)
{
- const unsigned long *regs = &target->thread.regs->gpr[0];
- const unsigned long *ckpt_regs;
compat_ulong_t *k = kbuf;
compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
- int i;
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- ckpt_regs = &target->thread.ckpt_regs.gpr[0];
-#endif
- if (tm_active) {
- regs = ckpt_regs;
- } else {
- if (target->thread.regs == NULL)
- return -EIO;
-
- if (!FULL_REGS(target->thread.regs)) {
- /*
- * We have a partial register set.
- * Fill 14-31 with bogus values.
- */
- for (i = 14; i < 32; i++)
- target->thread.regs->gpr[i] = NV_REG_POISON;
- }
- }
pos /= sizeof(reg);
count /= sizeof(reg);
@@ -2133,29 +1971,13 @@ static int gpr32_get_common(struct task_struct *target,
static int gpr32_set_common(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf, bool tm_active)
+ const void *kbuf, const void __user *ubuf,
+ unsigned long *regs)
{
- unsigned long *regs = &target->thread.regs->gpr[0];
- unsigned long *ckpt_regs;
const compat_ulong_t *k = kbuf;
const compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- ckpt_regs = &target->thread.ckpt_regs.gpr[0];
-#endif
-
- if (tm_active) {
- regs = ckpt_regs;
- } else {
- regs = &target->thread.regs->gpr[0];
-
- if (target->thread.regs == NULL)
- return -EIO;
-
- CHECK_FULL_REGS(target->thread.regs);
- }
-
pos /= sizeof(reg);
count /= sizeof(reg);
@@ -2220,7 +2042,8 @@ static int tm_cgpr32_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 1);
+ return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
+ &target->thread.ckpt_regs.gpr[0]);
}
static int tm_cgpr32_set(struct task_struct *target,
@@ -2228,7 +2051,8 @@ static int tm_cgpr32_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 1);
+ return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
+ &target->thread.ckpt_regs.gpr[0]);
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -2237,7 +2061,21 @@ static int gpr32_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 0);
+ int i;
+
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ if (!FULL_REGS(target->thread.regs)) {
+ /*
+ * We have a partial register set.
+ * Fill 14-31 with bogus values.
+ */
+ for (i = 14; i < 32; i++)
+ target->thread.regs->gpr[i] = NV_REG_POISON;
+ }
+ return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
+ &target->thread.regs->gpr[0]);
}
static int gpr32_set(struct task_struct *target,
@@ -2245,7 +2083,12 @@ static int gpr32_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 0);
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ CHECK_FULL_REGS(target->thread.regs);
+ return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
+ &target->thread.regs->gpr[0]);
}
/*
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index cb64d6feb45a..bbe77aed198d 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -99,22 +99,24 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
}
}
-static void do_signal(struct pt_regs *regs)
+static void do_signal(struct task_struct *tsk)
{
sigset_t *oldset = sigmask_to_save();
struct ksignal ksig;
int ret;
int is32 = is_32bit_task();
+ BUG_ON(tsk != current);
+
get_signal(&ksig);
/* Is there any syscall restart business here ? */
- check_syscall_restart(regs, &ksig.ka, ksig.sig > 0);
+ check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0);
if (ksig.sig <= 0) {
/* No signal to deliver -- put the saved sigmask back */
restore_saved_sigmask();
- regs->trap = 0;
+ tsk->thread.regs->trap = 0;
return; /* no signals delivered */
}
@@ -124,23 +126,22 @@ static void do_signal(struct pt_regs *regs)
* user space. The DABR will have been cleared if it
* triggered inside the kernel.
*/
- if (current->thread.hw_brk.address &&
- current->thread.hw_brk.type)
- __set_breakpoint(&current->thread.hw_brk);
+ if (tsk->thread.hw_brk.address && tsk->thread.hw_brk.type)
+ __set_breakpoint(&tsk->thread.hw_brk);
#endif
/* Re-enable the breakpoints for the signal stack */
- thread_change_pc(current, regs);
+ thread_change_pc(tsk, tsk->thread.regs);
if (is32) {
if (ksig.ka.sa.sa_flags & SA_SIGINFO)
- ret = handle_rt_signal32(&ksig, oldset, regs);
+ ret = handle_rt_signal32(&ksig, oldset, tsk);
else
- ret = handle_signal32(&ksig, oldset, regs);
+ ret = handle_signal32(&ksig, oldset, tsk);
} else {
- ret = handle_rt_signal64(&ksig, oldset, regs);
+ ret = handle_rt_signal64(&ksig, oldset, tsk);
}
- regs->trap = 0;
+ tsk->thread.regs->trap = 0;
signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP));
}
@@ -151,8 +152,10 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
- if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs);
+ if (thread_info_flags & _TIF_SIGPENDING) {
+ BUG_ON(regs != current->thread.regs);
+ do_signal(current);
+ }
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
@@ -162,7 +165,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
user_enter();
}
-unsigned long get_tm_stackpointer(struct pt_regs *regs)
+unsigned long get_tm_stackpointer(struct task_struct *tsk)
{
/* When in an active transaction that takes a signal, we need to be
* careful with the stack. It's possible that the stack has moved back
@@ -187,11 +190,13 @@ unsigned long get_tm_stackpointer(struct pt_regs *regs)
*/
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (MSR_TM_ACTIVE(regs->msr)) {
+ BUG_ON(tsk != current);
+
+ if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
tm_reclaim_current(TM_CAUSE_SIGNAL);
- if (MSR_TM_TRANSACTIONAL(regs->msr))
- return current->thread.ckpt_regs.gpr[1];
+ if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
+ return tsk->thread.ckpt_regs.gpr[1];
}
#endif
- return regs->gpr[1];
+ return tsk->thread.regs->gpr[1];
}
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index be305c858e51..7c59d88b9d86 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -16,39 +16,41 @@ extern void __user *get_sigframe(struct ksignal *ksig, unsigned long sp,
size_t frame_size, int is_32);
extern int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
- struct pt_regs *regs);
+ struct task_struct *tsk);
extern int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
- struct pt_regs *regs);
+ struct task_struct *tsk);
extern unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task);
-extern unsigned long copy_transact_fpr_to_user(void __user *to,
+extern unsigned long copy_ckfpr_to_user(void __user *to,
struct task_struct *task);
extern unsigned long copy_fpr_from_user(struct task_struct *task,
void __user *from);
-extern unsigned long copy_transact_fpr_from_user(struct task_struct *task,
+extern unsigned long copy_ckfpr_from_user(struct task_struct *task,
void __user *from);
+extern unsigned long get_tm_stackpointer(struct task_struct *tsk);
+
#ifdef CONFIG_VSX
extern unsigned long copy_vsx_to_user(void __user *to,
struct task_struct *task);
-extern unsigned long copy_transact_vsx_to_user(void __user *to,
+extern unsigned long copy_ckvsx_to_user(void __user *to,
struct task_struct *task);
extern unsigned long copy_vsx_from_user(struct task_struct *task,
void __user *from);
-extern unsigned long copy_transact_vsx_from_user(struct task_struct *task,
+extern unsigned long copy_ckvsx_from_user(struct task_struct *task,
void __user *from);
#endif
#ifdef CONFIG_PPC64
extern int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
- struct pt_regs *regs);
+ struct task_struct *tsk);
#else /* CONFIG_PPC64 */
static inline int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
- struct pt_regs *regs)
+ struct task_struct *tsk)
{
return -EFAULT;
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index a7daf749b97f..27aa913ac91d 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -44,6 +44,7 @@
#include <asm/vdso.h>
#include <asm/switch_to.h>
#include <asm/tm.h>
+#include <asm/asm-prototypes.h>
#ifdef CONFIG_PPC64
#include "ppc32.h"
#include <asm/unistd.h>
@@ -315,7 +316,7 @@ unsigned long copy_vsx_from_user(struct task_struct *task,
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-unsigned long copy_transact_fpr_to_user(void __user *to,
+unsigned long copy_ckfpr_to_user(void __user *to,
struct task_struct *task)
{
u64 buf[ELF_NFPREG];
@@ -323,12 +324,12 @@ unsigned long copy_transact_fpr_to_user(void __user *to,
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
- buf[i] = task->thread.TS_TRANS_FPR(i);
- buf[i] = task->thread.transact_fp.fpscr;
+ buf[i] = task->thread.TS_CKFPR(i);
+ buf[i] = task->thread.ckfp_state.fpscr;
return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
}
-unsigned long copy_transact_fpr_from_user(struct task_struct *task,
+unsigned long copy_ckfpr_from_user(struct task_struct *task,
void __user *from)
{
u64 buf[ELF_NFPREG];
@@ -337,13 +338,13 @@ unsigned long copy_transact_fpr_from_user(struct task_struct *task,
if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
return 1;
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
- task->thread.TS_TRANS_FPR(i) = buf[i];
- task->thread.transact_fp.fpscr = buf[i];
+ task->thread.TS_CKFPR(i) = buf[i];
+ task->thread.ckfp_state.fpscr = buf[i];
return 0;
}
-unsigned long copy_transact_vsx_to_user(void __user *to,
+unsigned long copy_ckvsx_to_user(void __user *to,
struct task_struct *task)
{
u64 buf[ELF_NVSRHALFREG];
@@ -351,11 +352,11 @@ unsigned long copy_transact_vsx_to_user(void __user *to,
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < ELF_NVSRHALFREG; i++)
- buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET];
+ buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
}
-unsigned long copy_transact_vsx_from_user(struct task_struct *task,
+unsigned long copy_ckvsx_from_user(struct task_struct *task,
void __user *from)
{
u64 buf[ELF_NVSRHALFREG];
@@ -364,7 +365,7 @@ unsigned long copy_transact_vsx_from_user(struct task_struct *task,
if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
return 1;
for (i = 0; i < ELF_NVSRHALFREG ; i++)
- task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return 0;
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -384,17 +385,17 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task,
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-inline unsigned long copy_transact_fpr_to_user(void __user *to,
+inline unsigned long copy_ckfpr_to_user(void __user *to,
struct task_struct *task)
{
- return __copy_to_user(to, task->thread.transact_fp.fpr,
+ return __copy_to_user(to, task->thread.ckfp_state.fpr,
ELF_NFPREG * sizeof(double));
}
-inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
+inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
void __user *from)
{
- return __copy_from_user(task->thread.transact_fp.fpr, from,
+ return __copy_from_user(task->thread.ckfp_state.fpr, from,
ELF_NFPREG * sizeof(double));
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -525,9 +526,6 @@ static int save_tm_user_regs(struct pt_regs *regs,
*/
regs->msr &= ~MSR_TS_MASK;
- /* Make sure floating point registers are stored in regs */
- flush_fp_to_thread(current);
-
/* Save both sets of general registers */
if (save_general_regs(&current->thread.ckpt_regs, frame)
|| save_general_regs(regs, tm_frame))
@@ -545,18 +543,17 @@ static int save_tm_user_regs(struct pt_regs *regs,
#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
- flush_altivec_to_thread(current);
- if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
+ if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
if (msr & MSR_VEC) {
if (__copy_to_user(&tm_frame->mc_vregs,
- &current->thread.transact_vr,
+ &current->thread.vr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
} else {
if (__copy_to_user(&tm_frame->mc_vregs,
- &current->thread.vr_state,
+ &current->thread.ckvr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
}
@@ -573,28 +570,28 @@ static int save_tm_user_regs(struct pt_regs *regs,
* most significant bits of that same vector. --BenH
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- current->thread.vrsave = mfspr(SPRN_VRSAVE);
- if (__put_user(current->thread.vrsave,
+ current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
+ if (__put_user(current->thread.ckvrsave,
(u32 __user *)&frame->mc_vregs[32]))
return 1;
if (msr & MSR_VEC) {
- if (__put_user(current->thread.transact_vrsave,
+ if (__put_user(current->thread.vrsave,
(u32 __user *)&tm_frame->mc_vregs[32]))
return 1;
} else {
- if (__put_user(current->thread.vrsave,
+ if (__put_user(current->thread.ckvrsave,
(u32 __user *)&tm_frame->mc_vregs[32]))
return 1;
}
#endif /* CONFIG_ALTIVEC */
- if (copy_fpr_to_user(&frame->mc_fregs, current))
+ if (copy_ckfpr_to_user(&frame->mc_fregs, current))
return 1;
if (msr & MSR_FP) {
- if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current))
+ if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
return 1;
} else {
- if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
+ if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
return 1;
}
@@ -606,15 +603,14 @@ static int save_tm_user_regs(struct pt_regs *regs,
* contains valid data
*/
if (current->thread.used_vsr) {
- flush_vsx_to_thread(current);
- if (copy_vsx_to_user(&frame->mc_vsregs, current))
+ if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
return 1;
if (msr & MSR_VSX) {
- if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs,
+ if (copy_vsx_to_user(&tm_frame->mc_vsregs,
current))
return 1;
} else {
- if (copy_vsx_to_user(&tm_frame->mc_vsregs, current))
+ if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
return 1;
}
@@ -698,6 +694,7 @@ static long restore_user_regs(struct pt_regs *regs,
if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
sizeof(sr->mc_vregs)))
return 1;
+ current->thread.used_vr = true;
} else if (current->thread.used_vr)
memset(&current->thread.vr_state, 0,
ELF_NVRREG * sizeof(vector128));
@@ -724,6 +721,7 @@ static long restore_user_regs(struct pt_regs *regs,
*/
if (copy_vsx_from_user(current, &sr->mc_vsregs))
return 1;
+ current->thread.used_vsr = true;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++)
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
@@ -743,6 +741,7 @@ static long restore_user_regs(struct pt_regs *regs,
if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
ELF_NEVRREG * sizeof(u32)))
return 1;
+ current->thread.used_spe = true;
} else if (current->thread.used_spe)
memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
@@ -793,33 +792,34 @@ static long restore_tm_user_regs(struct pt_regs *regs,
regs->msr &= ~MSR_VEC;
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
- if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
+ if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
sizeof(sr->mc_vregs)) ||
- __copy_from_user(&current->thread.transact_vr,
+ __copy_from_user(&current->thread.vr_state,
&tm_sr->mc_vregs,
sizeof(sr->mc_vregs)))
return 1;
+ current->thread.used_vr = true;
} else if (current->thread.used_vr) {
memset(&current->thread.vr_state, 0,
ELF_NVRREG * sizeof(vector128));
- memset(&current->thread.transact_vr, 0,
+ memset(&current->thread.ckvr_state, 0,
ELF_NVRREG * sizeof(vector128));
}
/* Always get VRSAVE back */
- if (__get_user(current->thread.vrsave,
+ if (__get_user(current->thread.ckvrsave,
(u32 __user *)&sr->mc_vregs[32]) ||
- __get_user(current->thread.transact_vrsave,
+ __get_user(current->thread.vrsave,
(u32 __user *)&tm_sr->mc_vregs[32]))
return 1;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- mtspr(SPRN_VRSAVE, current->thread.vrsave);
+ mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
#endif /* CONFIG_ALTIVEC */
regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
if (copy_fpr_from_user(current, &sr->mc_fregs) ||
- copy_transact_fpr_from_user(current, &tm_sr->mc_fregs))
+ copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
return 1;
#ifdef CONFIG_VSX
@@ -829,13 +829,14 @@ static long restore_tm_user_regs(struct pt_regs *regs,
* Restore altivec registers from the stack to a local
* buffer, then write this out to the thread_struct
*/
- if (copy_vsx_from_user(current, &sr->mc_vsregs) ||
- copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs))
+ if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
+ copy_ckvsx_from_user(current, &sr->mc_vsregs))
return 1;
+ current->thread.used_vsr = true;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++) {
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
- current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
}
#endif /* CONFIG_VSX */
@@ -848,6 +849,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
ELF_NEVRREG * sizeof(u32)))
return 1;
+ current->thread.used_spe = true;
} else if (current->thread.used_spe)
memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
@@ -877,13 +879,14 @@ static long restore_tm_user_regs(struct pt_regs *regs,
tm_recheckpoint(&current->thread, msr);
/* This loads the speculative FP/VEC state, if used */
+ msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
- do_load_up_transact_fpu(&current->thread);
+ load_fp_state(&current->thread.fp_state);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
- do_load_up_transact_altivec(&current->thread);
+ load_vr_state(&current->thread.vr_state);
regs->msr |= MSR_VEC;
}
#endif
@@ -971,7 +974,7 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
* (one which gets siginfo).
*/
int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
- struct pt_regs *regs)
+ struct task_struct *tsk)
{
struct rt_sigframe __user *rt_sf;
struct mcontext __user *frame;
@@ -980,10 +983,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
unsigned long newsp = 0;
int sigret;
unsigned long tramp;
+ struct pt_regs *regs = tsk->thread.regs;
+
+ BUG_ON(tsk != current);
/* Set up Signal Frame */
/* Put a Real Time Context onto stack */
- rt_sf = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
+ rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
addr = rt_sf;
if (unlikely(rt_sf == NULL))
goto badframe;
@@ -1000,9 +1006,9 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
/* Save user registers on the stack */
frame = &rt_sf->uc.uc_mcontext;
addr = frame;
- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
+ if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
sigret = 0;
- tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+ tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
} else {
sigret = __NR_rt_sigreturn;
tramp = (unsigned long) frame->tramp;
@@ -1029,7 +1035,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
}
regs->link = tramp;
- current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
+ tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
/* create a stack frame for the caller of the handler */
newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
@@ -1054,7 +1060,7 @@ badframe:
printk_ratelimited(KERN_INFO
"%s[%d]: bad frame in handle_rt_signal32: "
"%p nip %08lx lr %08lx\n",
- current->comm, current->pid,
+ tsk->comm, tsk->pid,
addr, regs->nip, regs->link);
return 1;
@@ -1410,7 +1416,8 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
/*
* OK, we're invoking a handler
*/
-int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs)
+int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
+ struct task_struct *tsk)
{
struct sigcontext __user *sc;
struct sigframe __user *frame;
@@ -1418,9 +1425,12 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs
unsigned long newsp = 0;
int sigret;
unsigned long tramp;
+ struct pt_regs *regs = tsk->thread.regs;
+
+ BUG_ON(tsk != current);
/* Set up Signal Frame */
- frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 1);
+ frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
if (unlikely(frame == NULL))
goto badframe;
sc = (struct sigcontext __user *) &frame->sctx;
@@ -1439,9 +1449,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs
|| __put_user(ksig->sig, &sc->signal))
goto badframe;
- if (vdso32_sigtramp && current->mm->context.vdso_base) {
+ if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
sigret = 0;
- tramp = current->mm->context.vdso_base + vdso32_sigtramp;
+ tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
} else {
sigret = __NR_sigreturn;
tramp = (unsigned long) frame->mctx.tramp;
@@ -1463,7 +1473,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs
regs->link = tramp;
- current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
+ tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
/* create a stack frame for the caller of the handler */
newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
@@ -1483,7 +1493,7 @@ badframe:
printk_ratelimited(KERN_INFO
"%s[%d]: bad frame in handle_signal32: "
"%p nip %08lx lr %08lx\n",
- current->comm, current->pid,
+ tsk->comm, tsk->pid,
frame, regs->nip, regs->link);
return 1;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 70409bb90a95..96698fdf93b4 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -35,6 +35,7 @@
#include <asm/vdso.h>
#include <asm/switch_to.h>
#include <asm/tm.h>
+#include <asm/asm-prototypes.h>
#include "signal.h"
@@ -90,9 +91,9 @@ static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
* Set up the sigcontext for the signal frame.
*/
-static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
- int signr, sigset_t *set, unsigned long handler,
- int ctx_has_vsx_region)
+static long setup_sigcontext(struct sigcontext __user *sc,
+ struct task_struct *tsk, int signr, sigset_t *set,
+ unsigned long handler, int ctx_has_vsx_region)
{
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
* process never used altivec yet (MSR_VEC is zero in pt_regs of
@@ -106,17 +107,20 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
unsigned long vrsave;
#endif
+ struct pt_regs *regs = tsk->thread.regs;
unsigned long msr = regs->msr;
long err = 0;
+ BUG_ON(tsk != current);
+
#ifdef CONFIG_ALTIVEC
err |= __put_user(v_regs, &sc->v_regs);
/* save altivec registers */
- if (current->thread.used_vr) {
- flush_altivec_to_thread(current);
+ if (tsk->thread.used_vr) {
+ flush_altivec_to_thread(tsk);
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
- err |= __copy_to_user(v_regs, &current->thread.vr_state,
+ err |= __copy_to_user(v_regs, &tsk->thread.vr_state,
33 * sizeof(vector128));
/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
* contains valid data.
@@ -129,16 +133,16 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
vrsave = 0;
if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
vrsave = mfspr(SPRN_VRSAVE);
- current->thread.vrsave = vrsave;
+ tsk->thread.vrsave = vrsave;
}
err |= __put_user(vrsave, (u32 __user *)&v_regs[33]);
#else /* CONFIG_ALTIVEC */
err |= __put_user(0, &sc->v_regs);
#endif /* CONFIG_ALTIVEC */
- flush_fp_to_thread(current);
+ flush_fp_to_thread(tsk);
/* copy fpr regs and fpscr */
- err |= copy_fpr_to_user(&sc->fp_regs, current);
+ err |= copy_fpr_to_user(&sc->fp_regs, tsk);
/*
* Clear the MSR VSX bit to indicate there is no valid state attached
@@ -151,10 +155,10 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
* then out to userspace. Update v_regs to point after the
* VMX data.
*/
- if (current->thread.used_vsr && ctx_has_vsx_region) {
- flush_vsx_to_thread(current);
+ if (tsk->thread.used_vsr && ctx_has_vsx_region) {
+ flush_vsx_to_thread(tsk);
v_regs += ELF_NVRREG;
- err |= copy_vsx_to_user(v_regs, current);
+ err |= copy_vsx_to_user(v_regs, tsk);
/* set MSR_VSX in the MSR value in the frame to
* indicate that sc->vs_reg) contains valid data.
*/
@@ -187,7 +191,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
*/
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
struct sigcontext __user *tm_sc,
- struct pt_regs *regs,
+ struct task_struct *tsk,
int signr, sigset_t *set, unsigned long handler)
{
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
@@ -202,9 +206,12 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
#endif
- unsigned long msr = regs->msr;
+ struct pt_regs *regs = tsk->thread.regs;
+ unsigned long msr = tsk->thread.ckpt_regs.msr;
long err = 0;
+ BUG_ON(tsk != current);
+
BUG_ON(!MSR_TM_ACTIVE(regs->msr));
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
@@ -214,28 +221,25 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
*/
regs->msr &= ~MSR_TS_MASK;
- flush_fp_to_thread(current);
-
#ifdef CONFIG_ALTIVEC
err |= __put_user(v_regs, &sc->v_regs);
err |= __put_user(tm_v_regs, &tm_sc->v_regs);
/* save altivec registers */
- if (current->thread.used_vr) {
- flush_altivec_to_thread(current);
+ if (tsk->thread.used_vr) {
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
- err |= __copy_to_user(v_regs, &current->thread.vr_state,
+ err |= __copy_to_user(v_regs, &tsk->thread.ckvr_state,
33 * sizeof(vector128));
/* If VEC was enabled there are transactional VRs valid too,
* else they're a copy of the checkpointed VRs.
*/
if (msr & MSR_VEC)
err |= __copy_to_user(tm_v_regs,
- &current->thread.transact_vr,
+ &tsk->thread.vr_state,
33 * sizeof(vector128));
else
err |= __copy_to_user(tm_v_regs,
- &current->thread.vr_state,
+ &tsk->thread.ckvr_state,
33 * sizeof(vector128));
/* set MSR_VEC in the MSR value in the frame to indicate
@@ -247,13 +251,13 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
* use altivec.
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- current->thread.vrsave = mfspr(SPRN_VRSAVE);
- err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
+ tsk->thread.ckvrsave = mfspr(SPRN_VRSAVE);
+ err |= __put_user(tsk->thread.ckvrsave, (u32 __user *)&v_regs[33]);
if (msr & MSR_VEC)
- err |= __put_user(current->thread.transact_vrsave,
+ err |= __put_user(tsk->thread.vrsave,
(u32 __user *)&tm_v_regs[33]);
else
- err |= __put_user(current->thread.vrsave,
+ err |= __put_user(tsk->thread.ckvrsave,
(u32 __user *)&tm_v_regs[33]);
#else /* CONFIG_ALTIVEC */
@@ -262,11 +266,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
#endif /* CONFIG_ALTIVEC */
/* copy fpr regs and fpscr */
- err |= copy_fpr_to_user(&sc->fp_regs, current);
+ err |= copy_ckfpr_to_user(&sc->fp_regs, tsk);
if (msr & MSR_FP)
- err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current);
+ err |= copy_fpr_to_user(&tm_sc->fp_regs, tsk);
else
- err |= copy_fpr_to_user(&tm_sc->fp_regs, current);
+ err |= copy_ckfpr_to_user(&tm_sc->fp_regs, tsk);
#ifdef CONFIG_VSX
/*
@@ -274,17 +278,16 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
* then out to userspace. Update v_regs to point after the
* VMX data.
*/
- if (current->thread.used_vsr) {
- flush_vsx_to_thread(current);
+ if (tsk->thread.used_vsr) {
v_regs += ELF_NVRREG;
tm_v_regs += ELF_NVRREG;
- err |= copy_vsx_to_user(v_regs, current);
+ err |= copy_ckvsx_to_user(v_regs, tsk);
if (msr & MSR_VSX)
- err |= copy_transact_vsx_to_user(tm_v_regs, current);
+ err |= copy_vsx_to_user(tm_v_regs, tsk);
else
- err |= copy_vsx_to_user(tm_v_regs, current);
+ err |= copy_ckvsx_to_user(tm_v_regs, tsk);
/* set MSR_VSX in the MSR value in the frame to
* indicate that sc->vs_reg) contains valid data.
@@ -298,7 +301,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
WARN_ON(!FULL_REGS(regs));
err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
err |= __copy_to_user(&sc->gp_regs,
- &current->thread.ckpt_regs, GP_REGS_SIZE);
+ &tsk->thread.ckpt_regs, GP_REGS_SIZE);
err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]);
err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
err |= __put_user(signr, &sc->signal);
@@ -314,7 +317,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
* Restore the sigcontext from the signal frame.
*/
-static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
+static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig,
struct sigcontext __user *sc)
{
#ifdef CONFIG_ALTIVEC
@@ -323,10 +326,13 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
unsigned long err = 0;
unsigned long save_r13 = 0;
unsigned long msr;
+ struct pt_regs *regs = tsk->thread.regs;
#ifdef CONFIG_VSX
int i;
#endif
+ BUG_ON(tsk != current);
+
/* If this is not a signal return, we preserve the TLS in r13 */
if (!sig)
save_r13 = regs->gpr[13];
@@ -356,7 +362,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
/*
* Force reload of FP/VEC.
- * This has to be done before copying stuff into current->thread.fpr/vr
+ * This has to be done before copying stuff into tsk->thread.fpr/vr
* for the reasons explained in the previous comment.
*/
regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
@@ -368,21 +374,23 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
- if (v_regs != NULL && (msr & MSR_VEC) != 0)
- err |= __copy_from_user(&current->thread.vr_state, v_regs,
+ if (v_regs != NULL && (msr & MSR_VEC) != 0) {
+ err |= __copy_from_user(&tsk->thread.vr_state, v_regs,
33 * sizeof(vector128));
- else if (current->thread.used_vr)
- memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
+ tsk->thread.used_vr = true;
+ } else if (tsk->thread.used_vr) {
+ memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
+ }
/* Always get VRSAVE back */
if (v_regs != NULL)
- err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
+ err |= __get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]);
else
- current->thread.vrsave = 0;
+ tsk->thread.vrsave = 0;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- mtspr(SPRN_VRSAVE, current->thread.vrsave);
+ mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
#endif /* CONFIG_ALTIVEC */
/* restore floating point */
- err |= copy_fpr_from_user(current, &sc->fp_regs);
+ err |= copy_fpr_from_user(tsk, &sc->fp_regs);
#ifdef CONFIG_VSX
/*
* Get additional VSX data. Update v_regs to point after the
@@ -390,11 +398,13 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
* buffer for formatting, then into the taskstruct.
*/
v_regs += ELF_NVRREG;
- if ((msr & MSR_VSX) != 0)
- err |= copy_vsx_from_user(current, v_regs);
- else
+ if ((msr & MSR_VSX) != 0) {
+ err |= copy_vsx_from_user(tsk, v_regs);
+ tsk->thread.used_vsr = true;
+ } else {
for (i = 0; i < 32 ; i++)
- current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
+ tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
+ }
#endif
return err;
}
@@ -404,7 +414,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
* Restore the two sigcontexts from the frame of a transactional processes.
*/
-static long restore_tm_sigcontexts(struct pt_regs *regs,
+static long restore_tm_sigcontexts(struct task_struct *tsk,
struct sigcontext __user *sc,
struct sigcontext __user *tm_sc)
{
@@ -413,12 +423,16 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
#endif
unsigned long err = 0;
unsigned long msr;
+ struct pt_regs *regs = tsk->thread.regs;
#ifdef CONFIG_VSX
int i;
#endif
+
+ BUG_ON(tsk != current);
+
/* copy the GPRs */
err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr));
- err |= __copy_from_user(&current->thread.ckpt_regs, sc->gp_regs,
+ err |= __copy_from_user(&tsk->thread.ckpt_regs, sc->gp_regs,
sizeof(regs->gpr));
/*
@@ -430,7 +444,7 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
* we don't need to re-copy them here.
*/
err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]);
- err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
+ err |= __get_user(tsk->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
/* get MSR separately, transfer the LE bit if doing signal return */
err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
@@ -449,13 +463,13 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]);
err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]);
err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]);
- err |= __get_user(current->thread.ckpt_regs.ctr,
+ err |= __get_user(tsk->thread.ckpt_regs.ctr,
&sc->gp_regs[PT_CTR]);
- err |= __get_user(current->thread.ckpt_regs.link,
+ err |= __get_user(tsk->thread.ckpt_regs.link,
&sc->gp_regs[PT_LNK]);
- err |= __get_user(current->thread.ckpt_regs.xer,
+ err |= __get_user(tsk->thread.ckpt_regs.xer,
&sc->gp_regs[PT_XER]);
- err |= __get_user(current->thread.ckpt_regs.ccr,
+ err |= __get_user(tsk->thread.ckpt_regs.ccr,
&sc->gp_regs[PT_CCR]);
/* These regs are not checkpointed; they can go in 'regs'. */
@@ -466,7 +480,7 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
/*
* Force reload of FP/VEC.
- * This has to be done before copying stuff into current->thread.fpr/vr
+ * This has to be done before copying stuff into tsk->thread.fpr/vr
* for the reasons explained in the previous comment.
*/
regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
@@ -483,32 +497,33 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
- err |= __copy_from_user(&current->thread.vr_state, v_regs,
+ err |= __copy_from_user(&tsk->thread.ckvr_state, v_regs,
33 * sizeof(vector128));
- err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs,
+ err |= __copy_from_user(&tsk->thread.vr_state, tm_v_regs,
33 * sizeof(vector128));
+ current->thread.used_vr = true;
}
- else if (current->thread.used_vr) {
- memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
- memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128));
+ else if (tsk->thread.used_vr) {
+ memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
+ memset(&tsk->thread.ckvr_state, 0, 33 * sizeof(vector128));
}
/* Always get VRSAVE back */
if (v_regs != NULL && tm_v_regs != NULL) {
- err |= __get_user(current->thread.vrsave,
+ err |= __get_user(tsk->thread.ckvrsave,
(u32 __user *)&v_regs[33]);
- err |= __get_user(current->thread.transact_vrsave,
+ err |= __get_user(tsk->thread.vrsave,
(u32 __user *)&tm_v_regs[33]);
}
else {
- current->thread.vrsave = 0;
- current->thread.transact_vrsave = 0;
+ tsk->thread.vrsave = 0;
+ tsk->thread.ckvrsave = 0;
}
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- mtspr(SPRN_VRSAVE, current->thread.vrsave);
+ mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
#endif /* CONFIG_ALTIVEC */
/* restore floating point */
- err |= copy_fpr_from_user(current, &sc->fp_regs);
- err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs);
+ err |= copy_fpr_from_user(tsk, &tm_sc->fp_regs);
+ err |= copy_ckfpr_from_user(tsk, &sc->fp_regs);
#ifdef CONFIG_VSX
/*
* Get additional VSX data. Update v_regs to point after the
@@ -518,32 +533,31 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
if (v_regs && ((msr & MSR_VSX) != 0)) {
v_regs += ELF_NVRREG;
tm_v_regs += ELF_NVRREG;
- err |= copy_vsx_from_user(current, v_regs);
- err |= copy_transact_vsx_from_user(current, tm_v_regs);
+ err |= copy_vsx_from_user(tsk, tm_v_regs);
+ err |= copy_ckvsx_from_user(tsk, v_regs);
+ tsk->thread.used_vsr = true;
} else {
for (i = 0; i < 32 ; i++) {
- current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
- current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
+ tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
+ tsk->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
}
}
#endif
tm_enable();
/* Make sure the transaction is marked as failed */
- current->thread.tm_texasr |= TEXASR_FS;
+ tsk->thread.tm_texasr |= TEXASR_FS;
/* This loads the checkpointed FP/VEC state, if used */
- tm_recheckpoint(&current->thread, msr);
+ tm_recheckpoint(&tsk->thread, msr);
- /* This loads the speculative FP/VEC state, if used */
+ msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
- do_load_up_transact_fpu(&current->thread);
- regs->msr |= (MSR_FP | current->thread.fpexc_mode);
+ load_fp_state(&tsk->thread.fp_state);
+ regs->msr |= (MSR_FP | tsk->thread.fpexc_mode);
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
- do_load_up_transact_altivec(&current->thread);
+ load_vr_state(&tsk->thread.vr_state);
regs->msr |= MSR_VEC;
}
-#endif
return err;
}
@@ -594,6 +608,8 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
unsigned long new_msr = 0;
int ctx_has_vsx_region = 0;
+ BUG_ON(regs != current->thread.regs);
+
if (new_ctx &&
get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR]))
return -EFAULT;
@@ -616,7 +632,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
if (old_ctx != NULL) {
if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
- || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0,
+ || setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, 0,
ctx_has_vsx_region)
|| __copy_to_user(&old_ctx->uc_sigmask,
&current->blocked, sizeof(sigset_t)))
@@ -644,7 +660,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
do_exit(SIGSEGV);
set_current_blocked(&set);
- if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext))
+ if (restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext))
do_exit(SIGSEGV);
/* This returns like rt_sigreturn */
@@ -667,6 +683,8 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long msr;
#endif
+ BUG_ON(current->thread.regs != regs);
+
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
@@ -698,14 +716,14 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
struct ucontext __user *uc_transact;
if (__get_user(uc_transact, &uc->uc_link))
goto badframe;
- if (restore_tm_sigcontexts(regs, &uc->uc_mcontext,
+ if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
&uc_transact->uc_mcontext))
goto badframe;
}
else
/* Fall through, for non-TM restore */
#endif
- if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
+ if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
goto badframe;
if (restore_altstack(&uc->uc_stack))
@@ -724,13 +742,17 @@ badframe:
return 0;
}
-int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
+int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ struct task_struct *tsk)
{
struct rt_sigframe __user *frame;
unsigned long newsp = 0;
long err = 0;
+ struct pt_regs *regs = tsk->thread.regs;
+
+ BUG_ON(tsk != current);
- frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 0);
+ frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 0);
if (unlikely(frame == NULL))
goto badframe;
@@ -751,14 +773,13 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
err |= __put_user(&frame->uc_transact, &frame->uc.uc_link);
err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
&frame->uc_transact.uc_mcontext,
- regs, ksig->sig,
- NULL,
+ tsk, ksig->sig, NULL,
(unsigned long)ksig->ka.sa.sa_handler);
} else
#endif
{
err |= __put_user(0, &frame->uc.uc_link);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, ksig->sig,
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig,
NULL, (unsigned long)ksig->ka.sa.sa_handler,
1);
}
@@ -767,11 +788,11 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
goto badframe;
/* Make sure signal handler doesn't get spurious FP exceptions */
- current->thread.fp_state.fpscr = 0;
+ tsk->thread.fp_state.fpscr = 0;
/* Set up to return from userspace. */
- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
- regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+ if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) {
+ regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp;
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err)
@@ -821,7 +842,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
badframe:
if (show_unhandled_signals)
printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
- current->comm, current->pid, "setup_rt_frame",
+ tsk->comm, tsk->pid, "setup_rt_frame",
(long)frame, regs->nip, regs->link);
return 1;
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 5fa92706444b..644cce3d8dce 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -40,6 +40,7 @@
#include <asm/syscalls.h>
#include <asm/time.h>
#include <asm/unistd.h>
+#include <asm/asm-prototypes.h>
static inline unsigned long do_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 3efbedefba6a..67859b7d1c97 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -73,6 +73,7 @@
#include <asm/vdso_datapage.h>
#include <asm/firmware.h>
#include <asm/cputime.h>
+#include <asm/asm-prototypes.h>
/* powerpc clocksource/clockevent code */
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 298afcf3bf2a..3a2d04134da9 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -108,6 +108,7 @@ _GLOBAL(tm_reclaim)
/* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */
std r3, STK_PARAM(R3)(r1)
+ std r4, STK_PARAM(R4)(r1)
SAVE_NVGPRS(r1)
/* We need to setup MSR for VSX register save instructions. */
@@ -126,43 +127,6 @@ _GLOBAL(tm_reclaim)
mtmsrd r15
std r14, TM_FRAME_L0(r1)
- /* Stash the stack pointer away for use after reclaim */
- std r1, PACAR1(r13)
-
- /* ******************** FPR/VR/VSRs ************
- * Before reclaiming, capture the current/transactional FPR/VR
- * versions /if used/.
- *
- * (If VSX used, FP and VMX are implied. Or, we don't need to look
- * at MSR.VSX as copying FP regs if .FP, vector regs if .VMX covers it.)
- *
- * We're passed the thread's MSR as parameter 2.
- *
- * We enabled VEC/FP/VSX in the msr above, so we can execute these
- * instructions!
- */
- andis. r0, r4, MSR_VEC@h
- beq dont_backup_vec
-
- addi r7, r3, THREAD_TRANSACT_VRSTATE
- SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */
- mfvscr v0
- li r6, VRSTATE_VSCR
- stvx v0, r7, r6
-dont_backup_vec:
- mfspr r0, SPRN_VRSAVE
- std r0, THREAD_TRANSACT_VRSAVE(r3)
-
- andi. r0, r4, MSR_FP
- beq dont_backup_fp
-
- addi r7, r3, THREAD_TRANSACT_FPSTATE
- SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 transact fp state */
-
- mffs fr0
- stfd fr0,FPSTATE_FPSCR(r7)
-
-dont_backup_fp:
/* Do sanity check on MSR to make sure we are suspended */
li r7, (MSR_TS_S)@higher
srdi r6, r14, 32
@@ -170,6 +134,9 @@ dont_backup_fp:
1: tdeqi r6, 0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
+ /* Stash the stack pointer away for use after reclaim */
+ std r1, PACAR1(r13)
+
/* Clear MSR RI since we are about to change r1, EE is already off. */
li r4, 0
mtmsrd r4, 1
@@ -273,6 +240,43 @@ dont_backup_fp:
* MSR.
*/
+
+ /* ******************** FPR/VR/VSRs ************
+ * After reclaiming, capture the checkpointed FPRs/VRs /if used/.
+ *
+ * (If VSX used, FP and VMX are implied. Or, we don't need to look
+ * at MSR.VSX as copying FP regs if .FP, vector regs if .VMX covers it.)
+ *
+ * We're passed the thread's MSR as the second parameter
+ *
+ * We enabled VEC/FP/VSX in the msr above, so we can execute these
+ * instructions!
+ */
+ ld r4, STK_PARAM(R4)(r1) /* Second parameter, MSR * */
+ mr r3, r12
+ andis. r0, r4, MSR_VEC@h
+ beq dont_backup_vec
+
+ addi r7, r3, THREAD_CKVRSTATE
+ SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */
+ mfvscr v0
+ li r6, VRSTATE_VSCR
+ stvx v0, r7, r6
+dont_backup_vec:
+ mfspr r0, SPRN_VRSAVE
+ std r0, THREAD_CKVRSAVE(r3)
+
+ andi. r0, r4, MSR_FP
+ beq dont_backup_fp
+
+ addi r7, r3, THREAD_CKFPSTATE
+ SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 transact fp state */
+
+ mffs fr0
+ stfd fr0,FPSTATE_FPSCR(r7)
+
+dont_backup_fp:
+
/* TM regs, incl TEXASR -- these live in thread_struct. Note they've
* been updated by the treclaim, to explain to userland the failure
* cause (aborted).
@@ -288,6 +292,7 @@ dont_backup_fp:
/* Restore original MSR/IRQ state & clear TM mode */
ld r14, TM_FRAME_L0(r1) /* Orig MSR */
+
li r15, 0
rldimi r14, r15, MSR_TS_LG, (63-MSR_TS_LG)-1
mtmsrd r14
@@ -356,28 +361,29 @@ _GLOBAL(__tm_recheckpoint)
mtmsr r5
#ifdef CONFIG_ALTIVEC
- /* FP and VEC registers: These are recheckpointed from thread.fpr[]
- * and thread.vr[] respectively. The thread.transact_fpr[] version
- * is more modern, and will be loaded subsequently by any FPUnavailable
- * trap.
+ /*
+ * FP and VEC registers: These are recheckpointed from
+ * thread.ckfp_state and thread.ckvr_state respectively. The
+ * thread.fp_state[] version holds the 'live' (transactional)
+ * and will be loaded subsequently by any FPUnavailable trap.
*/
andis. r0, r4, MSR_VEC@h
beq dont_restore_vec
- addi r8, r3, THREAD_VRSTATE
+ addi r8, r3, THREAD_CKVRSTATE
li r5, VRSTATE_VSCR
lvx v0, r8, r5
mtvscr v0
REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */
dont_restore_vec:
- ld r5, THREAD_VRSAVE(r3)
+ ld r5, THREAD_CKVRSAVE(r3)
mtspr SPRN_VRSAVE, r5
#endif
andi. r0, r4, MSR_FP
beq dont_restore_fp
- addi r8, r3, THREAD_FPSTATE
+ addi r8, r3, THREAD_CKFPSTATE
lfd fr0, FPSTATE_FPSCR(r8)
MTFSF_L(fr0)
REST_32FPRS_VSRS(0, R4, R8)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 62859ebe0062..a1f8f5641e9e 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -117,7 +117,7 @@ static int die_owner = -1;
static unsigned int die_nest_count;
static int die_counter;
-static unsigned __kprobes long oops_begin(struct pt_regs *regs)
+static unsigned long oops_begin(struct pt_regs *regs)
{
int cpu;
unsigned long flags;
@@ -144,8 +144,9 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
pmac_backlight_unblank();
return flags;
}
+NOKPROBE_SYMBOL(oops_begin);
-static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+static void oops_end(unsigned long flags, struct pt_regs *regs,
int signr)
{
bust_spinlocks(0);
@@ -196,8 +197,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
panic("Fatal exception");
do_exit(signr);
}
+NOKPROBE_SYMBOL(oops_end);
-static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+static int __die(const char *str, struct pt_regs *regs, long err)
{
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
#ifdef CONFIG_PREEMPT
@@ -221,6 +223,7 @@ static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
return 0;
}
+NOKPROBE_SYMBOL(__die);
void die(const char *str, struct pt_regs *regs, long err)
{
@@ -802,7 +805,7 @@ void RunModeException(struct pt_regs *regs)
_exception(SIGTRAP, regs, 0, 0);
}
-void __kprobes single_step_exception(struct pt_regs *regs)
+void single_step_exception(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
@@ -819,6 +822,7 @@ void __kprobes single_step_exception(struct pt_regs *regs)
bail:
exception_exit(prev_state);
}
+NOKPROBE_SYMBOL(single_step_exception);
/*
* After we have successfully emulated an instruction, we have to
@@ -1140,7 +1144,7 @@ static int emulate_math(struct pt_regs *regs)
static inline int emulate_math(struct pt_regs *regs) { return -1; }
#endif
-void __kprobes program_check_exception(struct pt_regs *regs)
+void program_check_exception(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
unsigned int reason = get_reason(regs);
@@ -1260,16 +1264,18 @@ sigill:
bail:
exception_exit(prev_state);
}
+NOKPROBE_SYMBOL(program_check_exception);
/*
* This occurs when running in hypervisor mode on POWER6 or later
* and an illegal instruction is encountered.
*/
-void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
+void emulation_assist_interrupt(struct pt_regs *regs)
{
regs->msr |= REASON_ILLEGAL;
program_check_exception(regs);
}
+NOKPROBE_SYMBOL(emulation_assist_interrupt);
void alignment_exception(struct pt_regs *regs)
{
@@ -1310,6 +1316,18 @@ bail:
exception_exit(prev_state);
}
+void slb_miss_bad_addr(struct pt_regs *regs)
+{
+ enum ctx_state prev_state = exception_enter();
+
+ if (user_mode(regs))
+ _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
+ else
+ bad_page_fault(regs, regs->dar, SIGSEGV);
+
+ exception_exit(prev_state);
+}
+
void StackOverflow(struct pt_regs *regs)
{
printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
@@ -1372,6 +1390,22 @@ void vsx_unavailable_exception(struct pt_regs *regs)
}
#ifdef CONFIG_PPC64
+static void tm_unavailable(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (user_mode(regs)) {
+ current->thread.load_tm++;
+ regs->msr |= MSR_TM;
+ tm_enable();
+ tm_restore_sprs(&current->thread);
+ return;
+ }
+#endif
+ pr_emerg("Unrecoverable TM Unavailable Exception "
+ "%lx at %lx\n", regs->trap, regs->nip);
+ die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
+}
+
void facility_unavailable_exception(struct pt_regs *regs)
{
static char *facility_strings[] = {
@@ -1451,6 +1485,27 @@ void facility_unavailable_exception(struct pt_regs *regs)
return;
}
+ if (status == FSCR_TM_LG) {
+ /*
+ * If we're here then the hardware is TM aware because it
+ * generated an exception with FSRM_TM set.
+ *
+ * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
+ * told us not to do TM, or the kernel is not built with TM
+ * support.
+ *
+ * If both of those things are true, then userspace can spam the
+ * console by triggering the printk() below just by continually
+ * doing tbegin (or any TM instruction). So in that case just
+ * send the process a SIGILL immediately.
+ */
+ if (!cpu_has_feature(CPU_FTR_TM))
+ goto out;
+
+ tm_unavailable(regs);
+ return;
+ }
+
if ((status < ARRAY_SIZE(facility_strings)) &&
facility_strings[status])
facility = facility_strings[status];
@@ -1463,6 +1518,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
"%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
+out:
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return;
@@ -1504,7 +1560,8 @@ void fp_unavailable_tm(struct pt_regs *regs)
/* If VMX is in use, get the transactional values back */
if (regs->msr & MSR_VEC) {
- do_load_up_transact_altivec(&current->thread);
+ msr_check_and_set(MSR_VEC);
+ load_vr_state(&current->thread.vr_state);
/* At this point all the VSX state is loaded, so enable it */
regs->msr |= MSR_VSX;
}
@@ -1525,7 +1582,8 @@ void altivec_unavailable_tm(struct pt_regs *regs)
current->thread.used_vr = 1;
if (regs->msr & MSR_FP) {
- do_load_up_transact_fpu(&current->thread);
+ msr_check_and_set(MSR_FP);
+ load_fp_state(&current->thread.fp_state);
regs->msr |= MSR_VSX;
}
}
@@ -1564,10 +1622,12 @@ void vsx_unavailable_tm(struct pt_regs *regs)
*/
tm_recheckpoint(&current->thread, regs->msr & ~orig_msr);
+ msr_check_and_set(orig_msr & (MSR_FP | MSR_VEC));
+
if (orig_msr & MSR_FP)
- do_load_up_transact_fpu(&current->thread);
+ load_fp_state(&current->thread.fp_state);
if (orig_msr & MSR_VEC)
- do_load_up_transact_altivec(&current->thread);
+ load_vr_state(&current->thread.vr_state);
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -1656,7 +1716,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
}
-void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
+void DebugException(struct pt_regs *regs, unsigned long debug_status)
{
current->thread.debug.dbsr = debug_status;
@@ -1717,6 +1777,7 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
} else
handle_debug(regs, debug_status);
}
+NOKPROBE_SYMBOL(DebugException);
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
#if !defined(CONFIG_TAU_INT)
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index 366ae09b14c1..31107bf5a61f 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -31,15 +31,9 @@ $(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
-# assembly rules for the .S files
-$(obj-vdso64): %.o: %.S FORCE
- $(call if_changed_dep,vdso64as)
-
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
-quiet_cmd_vdso64as = VDSO64A $@
- cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
index 184a6ba7f283..abf17feffe40 100644
--- a/arch/powerpc/kernel/vdso64/datapage.S
+++ b/arch/powerpc/kernel/vdso64/datapage.S
@@ -59,7 +59,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
bl V_LOCAL_FUNC(__get_datapage)
mtlr r12
addi r3,r3,CFG_SYSCALL_MAP64
- cmpli cr0,r4,0
+ cmpldi cr0,r4,0
crclr cr0*4+so
beqlr
li r0,NR_syscalls
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index a76b4af37ef2..382021324883 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
bne cr0,99f
li r3,0
- cmpli cr0,r4,0
+ cmpldi cr0,r4,0
crclr cr0*4+so
beqlr
lis r5,CLOCK_REALTIME_RES@h
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 616a6d854638..bc85bdff4e01 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -7,31 +7,6 @@
#include <asm/page.h>
#include <asm/ptrace.h>
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/* void do_load_up_transact_altivec(struct thread_struct *thread)
- *
- * This is similar to load_up_altivec but for the transactional version of the
- * vector regs. It doesn't mess with the task MSR or valid flags.
- * Furthermore, VEC laziness is not supported with TM currently.
- */
-_GLOBAL(do_load_up_transact_altivec)
- mfmsr r6
- oris r5,r6,MSR_VEC@h
- MTMSRD(r5)
- isync
-
- li r4,1
- stw r4,THREAD_USED_VR(r3)
-
- li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
- lvx v0,r10,r3
- mtvscr v0
- addi r10,r3,THREAD_TRANSACT_VRSTATE
- REST_32VRS(0,r4,r10)
-
- blr
-#endif
-
/*
* Load state from memory into VMX registers including VSCR.
* Assumes the caller has enabled VMX in the MSR.
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index b5fba689fca6..8295f51c1a5f 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -44,18 +44,68 @@ SECTIONS
* Text, read only data and other permanent read-only sections
*/
- /* Text and gots */
+ _text = .;
+ _stext = .;
+
+ /*
+ * Head text.
+ * This needs to be in its own output section to avoid ld placing
+ * branch trampoline stubs randomly throughout the fixed sections,
+ * which it will do (even if the branch comes from another section)
+ * in order to optimize stub generation.
+ */
+ .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
+#ifdef CONFIG_PPC64
+ KEEP(*(.head.text.first_256B));
+#ifdef CONFIG_PPC_BOOK3E
+# define END_FIXED 0x100
+#else
+ KEEP(*(.head.text.real_vectors));
+ *(.head.text.real_trampolines);
+ KEEP(*(.head.text.virt_vectors));
+ *(.head.text.virt_trampolines);
+# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+ KEEP(*(.head.data.fwnmi_page));
+# define END_FIXED 0x8000
+# else
+# define END_FIXED 0x7000
+# endif
+#endif
+ ASSERT((. == END_FIXED), "vmlinux.lds.S: fixed section overflow error");
+#else /* !CONFIG_PPC64 */
+ HEAD_TEXT
+#endif
+ } :kernel
+
+ /*
+ * If the build dies here, it's likely code in head_64.S is referencing
+ * labels it can't reach, and the linker inserting stubs without the
+ * assembler's knowledge. To debug, remove the above assert and
+ * rebuild. Look for branch stubs in the fixed section region.
+ *
+ * Linker stub generation could be allowed in "trampoline"
+ * sections if absolutely necessary, but this would require
+ * some rework of the fixed sections. Before resorting to this,
+ * consider references that have sufficient addressing range,
+ * (e.g., hand coded trampolines) so the linker does not have
+ * to add stubs.
+ *
+ * Linker stubs at the top of the main text section are currently not
+ * detected, and will result in a crash at boot due to offsets being
+ * wrong.
+ */
.text : AT(ADDR(.text) - LOAD_OFFSET) {
ALIGN_FUNCTION();
- HEAD_TEXT
- _text = .;
/* careful! __ftr_alt_* sections need to be close to .text */
*(.text .fixup __ftr_alt_* .ref.text)
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
+ MEM_KEEP(init.text)
+ MEM_KEEP(exit.text)
#ifdef CONFIG_PPC32
*(.got1)
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index c2024ac9d4e8..029be26b5a17 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -22,6 +22,9 @@ config KVM
select ANON_INODES
select HAVE_KVM_EVENTFD
select SRCU
+ select KVM_VFIO
+ select IRQ_BYPASS_MANAGER
+ select HAVE_KVM_IRQ_BYPASS
config KVM_BOOK3S_HANDLER
bool
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 855d4b95d752..7dd89b79d038 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -7,16 +7,16 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
KVM := ../../../virt/kvm
-common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
- $(KVM)/eventfd.o
+common-objs-y = $(KVM)/kvm_main.o $(KVM)/eventfd.o
common-objs-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o
+common-objs-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o
CFLAGS_e500_mmu.o := -I.
CFLAGS_e500_mmu_host.o := -I.
CFLAGS_emulate.o := -I.
CFLAGS_emulate_loadstore.o := -I.
-common-objs-y += powerpc.o emulate.o emulate_loadstore.o
+common-objs-y += powerpc.o emulate_loadstore.o
obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o
@@ -24,6 +24,7 @@ AFLAGS_booke_interrupts.o := -I$(objtree)/$(obj)
kvm-e500-objs := \
$(common-objs-y) \
+ emulate.o \
booke.o \
booke_emulate.o \
booke_interrupts.o \
@@ -35,6 +36,7 @@ kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs)
kvm-e500mc-objs := \
$(common-objs-y) \
+ emulate.o \
booke.o \
booke_emulate.o \
bookehv_interrupts.o \
@@ -61,9 +63,6 @@ kvm-pr-y := \
book3s_32_mmu.o
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
-kvm-book3s_64-module-objs := \
- $(KVM)/coalesced_mmio.o
-
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_rmhandlers.o
endif
@@ -89,11 +88,8 @@ endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
book3s_xics.o
-kvm-book3s_64-module-objs += \
- $(KVM)/kvm_main.o \
- $(KVM)/eventfd.o \
- powerpc.o \
- emulate_loadstore.o \
+kvm-book3s_64-module-objs := \
+ $(common-objs-y) \
book3s.o \
book3s_64_vio.o \
book3s_rtas.o \
@@ -103,6 +99,7 @@ kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
kvm-book3s_32-objs := \
$(common-objs-y) \
+ emulate.o \
fpu.o \
book3s_paired_singles.o \
book3s.o \
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 47018fcbf7d6..b6952dd23152 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -52,8 +52,12 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "dec", VCPU_STAT(dec_exits) },
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
{ "queue_intr", VCPU_STAT(queue_intr) },
+ { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns) },
+ { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns) },
+ { "halt_wait_ns", VCPU_STAT(halt_wait_ns) },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
+ { "halt_successful_wait", VCPU_STAT(halt_successful_wait) },
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "pf_storage", VCPU_STAT(pf_storage) },
@@ -64,6 +68,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "ld_slow", VCPU_STAT(ld_slow) },
{ "st", VCPU_STAT(st) },
{ "st_slow", VCPU_STAT(st_slow) },
+ { "pthru_all", VCPU_STAT(pthru_all) },
+ { "pthru_host", VCPU_STAT(pthru_host) },
+ { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
{ NULL }
};
@@ -592,9 +599,6 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_BESCR:
*val = get_reg_val(id, vcpu->arch.bescr);
break;
- case KVM_REG_PPC_VTB:
- *val = get_reg_val(id, vcpu->arch.vtb);
- break;
case KVM_REG_PPC_IC:
*val = get_reg_val(id, vcpu->arch.ic);
break;
@@ -666,9 +670,6 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_BESCR:
vcpu->arch.bescr = set_reg_val(id, *val);
break;
- case KVM_REG_PPC_VTB:
- vcpu->arch.vtb = set_reg_val(id, *val);
- break;
case KVM_REG_PPC_IC:
vcpu->arch.ic = set_reg_val(id, *val);
break;
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 2afdb9c0937d..8359752b3efc 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -498,6 +498,7 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_MMCR0:
case SPRN_MMCR1:
case SPRN_MMCR2:
+ case SPRN_UMMCR2:
#endif
break;
unprivileged:
@@ -579,7 +580,7 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
*spr_val = vcpu->arch.spurr;
break;
case SPRN_VTB:
- *spr_val = vcpu->arch.vtb;
+ *spr_val = to_book3s(vcpu)->vtb;
break;
case SPRN_IC:
*spr_val = vcpu->arch.ic;
@@ -640,6 +641,7 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
case SPRN_MMCR0:
case SPRN_MMCR1:
case SPRN_MMCR2:
+ case SPRN_UMMCR2:
case SPRN_TIR:
#endif
*spr_val = 0;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2fd5580c8f6e..3686471be32b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -53,11 +53,15 @@
#include <asm/smp.h>
#include <asm/dbell.h>
#include <asm/hmi.h>
+#include <asm/pnv-pci.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
+#include <linux/kvm_irqfd.h>
+#include <linux/irqbypass.h>
#include <linux/module.h>
+#include <linux/compiler.h>
#include "book3s.h"
@@ -70,6 +74,8 @@
/* Used to indicate that a guest page fault needs to be handled */
#define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
+/* Used to indicate that a guest passthrough interrupt needs to be handled */
+#define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2)
/* Used as a "null" value for timebase values */
#define TB_NIL (~(u64)0)
@@ -89,14 +95,55 @@ static struct kernel_param_ops module_param_ops = {
.get = param_get_int,
};
+module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization");
+
module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
#endif
+/* Maximum halt poll interval defaults to KVM_HALT_POLL_NS_DEFAULT */
+static unsigned int halt_poll_max_ns = KVM_HALT_POLL_NS_DEFAULT;
+module_param(halt_poll_max_ns, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(halt_poll_max_ns, "Maximum halt poll time in ns");
+
+/* Factor by which the vcore halt poll interval is grown, default is to double
+ */
+static unsigned int halt_poll_ns_grow = 2;
+module_param(halt_poll_ns_grow, int, S_IRUGO);
+MODULE_PARM_DESC(halt_poll_ns_grow, "Factor halt poll time is grown by");
+
+/* Factor by which the vcore halt poll interval is shrunk, default is to reset
+ */
+static unsigned int halt_poll_ns_shrink;
+module_param(halt_poll_ns_shrink, int, S_IRUGO);
+MODULE_PARM_DESC(halt_poll_ns_shrink, "Factor halt poll time is shrunk by");
+
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
+static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
+ int *ip)
+{
+ int i = *ip;
+ struct kvm_vcpu *vcpu;
+
+ while (++i < MAX_SMT_THREADS) {
+ vcpu = READ_ONCE(vc->runnable_threads[i]);
+ if (vcpu) {
+ *ip = i;
+ return vcpu;
+ }
+ }
+ return NULL;
+}
+
+/* Used to traverse the list of runnable threads for a given vcore */
+#define for_each_runnable_thread(i, vcpu, vc) \
+ for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
+
static bool kvmppc_ipi_thread(int cpu)
{
/* On POWER8 for IPIs to threads in the same core, use msgsnd */
@@ -991,6 +1038,9 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
break;
+ case BOOK3S_INTERRUPT_HV_RM_HARD:
+ r = RESUME_PASSTHROUGH;
+ break;
default:
kvmppc_dump_regs(vcpu);
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
@@ -1149,6 +1199,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_DPDES:
*val = get_reg_val(id, vcpu->arch.vcore->dpdes);
break;
+ case KVM_REG_PPC_VTB:
+ *val = get_reg_val(id, vcpu->arch.vcore->vtb);
+ break;
case KVM_REG_PPC_DAWR:
*val = get_reg_val(id, vcpu->arch.dawr);
break;
@@ -1341,6 +1394,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_DPDES:
vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_VTB:
+ vcpu->arch.vcore->vtb = set_reg_val(id, *val);
+ break;
case KVM_REG_PPC_DAWR:
vcpu->arch.dawr = set_reg_val(id, *val);
break;
@@ -1493,7 +1549,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
if (vcore == NULL)
return NULL;
- INIT_LIST_HEAD(&vcore->runnable_threads);
spin_lock_init(&vcore->lock);
spin_lock_init(&vcore->stoltb_lock);
init_swait_queue_head(&vcore->wq);
@@ -1802,7 +1857,7 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
spin_unlock_irq(&vcpu->arch.tbacct_lock);
--vc->n_runnable;
- list_del(&vcpu->arch.run_list);
+ WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
}
static int kvmppc_grab_hwthread(int cpu)
@@ -2048,66 +2103,6 @@ static void init_master_vcore(struct kvmppc_vcore *vc)
vc->conferring_threads = 0;
}
-/*
- * See if the existing subcores can be split into 3 (or fewer) subcores
- * of at most two threads each, so we can fit in another vcore. This
- * assumes there are at most two subcores and at most 6 threads in total.
- */
-static bool can_split_piggybacked_subcores(struct core_info *cip)
-{
- int sub, new_sub;
- int large_sub = -1;
- int thr;
- int n_subcores = cip->n_subcores;
- struct kvmppc_vcore *vc, *vcnext;
- struct kvmppc_vcore *master_vc = NULL;
-
- for (sub = 0; sub < cip->n_subcores; ++sub) {
- if (cip->subcore_threads[sub] <= 2)
- continue;
- if (large_sub >= 0)
- return false;
- large_sub = sub;
- vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore,
- preempt_list);
- if (vc->num_threads > 2)
- return false;
- n_subcores += (cip->subcore_threads[sub] - 1) >> 1;
- }
- if (large_sub < 0 || !subcore_config_ok(n_subcores + 1, 2))
- return false;
-
- /*
- * Seems feasible, so go through and move vcores to new subcores.
- * Note that when we have two or more vcores in one subcore,
- * all those vcores must have only one thread each.
- */
- new_sub = cip->n_subcores;
- thr = 0;
- sub = large_sub;
- list_for_each_entry_safe(vc, vcnext, &cip->vcs[sub], preempt_list) {
- if (thr >= 2) {
- list_del(&vc->preempt_list);
- list_add_tail(&vc->preempt_list, &cip->vcs[new_sub]);
- /* vc->num_threads must be 1 */
- if (++cip->subcore_threads[new_sub] == 1) {
- cip->subcore_vm[new_sub] = vc->kvm;
- init_master_vcore(vc);
- master_vc = vc;
- ++cip->n_subcores;
- } else {
- vc->master_vcore = master_vc;
- ++new_sub;
- }
- }
- thr += vc->num_threads;
- }
- cip->subcore_threads[large_sub] = 2;
- cip->max_subcore_threads = 2;
-
- return true;
-}
-
static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
{
int n_threads = vc->num_threads;
@@ -2118,23 +2113,9 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
if (n_threads < cip->max_subcore_threads)
n_threads = cip->max_subcore_threads;
- if (subcore_config_ok(cip->n_subcores + 1, n_threads)) {
- cip->max_subcore_threads = n_threads;
- } else if (cip->n_subcores <= 2 && cip->total_threads <= 6 &&
- vc->num_threads <= 2) {
- /*
- * We may be able to fit another subcore in by
- * splitting an existing subcore with 3 or 4
- * threads into two 2-thread subcores, or one
- * with 5 or 6 threads into three subcores.
- * We can only do this if those subcores have
- * piggybacked virtual cores.
- */
- if (!can_split_piggybacked_subcores(cip))
- return false;
- } else {
+ if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
return false;
- }
+ cip->max_subcore_threads = n_threads;
sub = cip->n_subcores;
++cip->n_subcores;
@@ -2148,43 +2129,6 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
return true;
}
-static bool can_piggyback_subcore(struct kvmppc_vcore *pvc,
- struct core_info *cip, int sub)
-{
- struct kvmppc_vcore *vc;
- int n_thr;
-
- vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore,
- preempt_list);
-
- /* require same VM and same per-core reg values */
- if (pvc->kvm != vc->kvm ||
- pvc->tb_offset != vc->tb_offset ||
- pvc->pcr != vc->pcr ||
- pvc->lpcr != vc->lpcr)
- return false;
-
- /* P8 guest with > 1 thread per core would see wrong TIR value */
- if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
- (vc->num_threads > 1 || pvc->num_threads > 1))
- return false;
-
- n_thr = cip->subcore_threads[sub] + pvc->num_threads;
- if (n_thr > cip->max_subcore_threads) {
- if (!subcore_config_ok(cip->n_subcores, n_thr))
- return false;
- cip->max_subcore_threads = n_thr;
- }
-
- cip->total_threads += pvc->num_threads;
- cip->subcore_threads[sub] = n_thr;
- pvc->master_vcore = vc;
- list_del(&pvc->preempt_list);
- list_add_tail(&pvc->preempt_list, &cip->vcs[sub]);
-
- return true;
-}
-
/*
* Work out whether it is possible to piggyback the execution of
* vcore *pvc onto the execution of the other vcores described in *cip.
@@ -2192,27 +2136,18 @@ static bool can_piggyback_subcore(struct kvmppc_vcore *pvc,
static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
int target_threads)
{
- int sub;
-
if (cip->total_threads + pvc->num_threads > target_threads)
return false;
- for (sub = 0; sub < cip->n_subcores; ++sub)
- if (cip->subcore_threads[sub] &&
- can_piggyback_subcore(pvc, cip, sub))
- return true;
-
- if (can_dynamic_split(pvc, cip))
- return true;
- return false;
+ return can_dynamic_split(pvc, cip);
}
static void prepare_threads(struct kvmppc_vcore *vc)
{
- struct kvm_vcpu *vcpu, *vnext;
+ int i;
+ struct kvm_vcpu *vcpu;
- list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
- arch.run_list) {
+ for_each_runnable_thread(i, vcpu, vc) {
if (signal_pending(vcpu->arch.run_task))
vcpu->arch.ret = -EINTR;
else if (vcpu->arch.vpa.update_pending ||
@@ -2259,15 +2194,14 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
{
- int still_running = 0;
+ int still_running = 0, i;
u64 now;
long ret;
- struct kvm_vcpu *vcpu, *vnext;
+ struct kvm_vcpu *vcpu;
spin_lock(&vc->lock);
now = get_tb();
- list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
- arch.run_list) {
+ for_each_runnable_thread(i, vcpu, vc) {
/* cancel pending dec exception if dec is positive */
if (now < vcpu->arch.dec_expires &&
kvmppc_core_pending_dec(vcpu))
@@ -2307,8 +2241,8 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
}
if (vc->n_runnable > 0 && vc->runner == NULL) {
/* make sure there's a candidate runner awake */
- vcpu = list_first_entry(&vc->runnable_threads,
- struct kvm_vcpu, arch.run_list);
+ i = -1;
+ vcpu = next_runnable_thread(vc, &i);
wake_up(&vcpu->arch.cpu_run);
}
}
@@ -2361,7 +2295,7 @@ static inline void kvmppc_set_host_core(int cpu)
*/
static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
{
- struct kvm_vcpu *vcpu, *vnext;
+ struct kvm_vcpu *vcpu;
int i;
int srcu_idx;
struct core_info core_info;
@@ -2397,8 +2331,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*/
if ((threads_per_core > 1) &&
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
- list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
- arch.run_list) {
+ for_each_runnable_thread(i, vcpu, vc) {
vcpu->arch.ret = -EBUSY;
kvmppc_remove_runnable(vc, vcpu);
wake_up(&vcpu->arch.cpu_run);
@@ -2477,8 +2410,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
active |= 1 << thr;
list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) {
pvc->pcpu = pcpu + thr;
- list_for_each_entry(vcpu, &pvc->runnable_threads,
- arch.run_list) {
+ for_each_runnable_thread(i, vcpu, pvc) {
kvmppc_start_thread(vcpu, pvc);
kvmppc_create_dtl_entry(vcpu, pvc);
trace_kvm_guest_enter(vcpu);
@@ -2604,34 +2536,92 @@ static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
finish_wait(&vcpu->arch.cpu_run, &wait);
}
+static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
+{
+ /* 10us base */
+ if (vc->halt_poll_ns == 0 && halt_poll_ns_grow)
+ vc->halt_poll_ns = 10000;
+ else
+ vc->halt_poll_ns *= halt_poll_ns_grow;
+
+ if (vc->halt_poll_ns > halt_poll_max_ns)
+ vc->halt_poll_ns = halt_poll_max_ns;
+}
+
+static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
+{
+ if (halt_poll_ns_shrink == 0)
+ vc->halt_poll_ns = 0;
+ else
+ vc->halt_poll_ns /= halt_poll_ns_shrink;
+}
+
+/* Check to see if any of the runnable vcpus on the vcore have pending
+ * exceptions or are no longer ceded
+ */
+static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ for_each_runnable_thread(i, vcpu, vc) {
+ if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded)
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* All the vcpus in this vcore are idle, so wait for a decrementer
* or external interrupt to one of the vcpus. vc->lock is held.
*/
static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
{
- struct kvm_vcpu *vcpu;
+ ktime_t cur, start_poll, start_wait;
int do_sleep = 1;
+ u64 block_ns;
DECLARE_SWAITQUEUE(wait);
- prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+ /* Poll for pending exceptions and ceded state */
+ cur = start_poll = ktime_get();
+ if (vc->halt_poll_ns) {
+ ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
+ ++vc->runner->stat.halt_attempted_poll;
- /*
- * Check one last time for pending exceptions and ceded state after
- * we put ourselves on the wait queue
- */
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
- if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
- do_sleep = 0;
- break;
+ vc->vcore_state = VCORE_POLLING;
+ spin_unlock(&vc->lock);
+
+ do {
+ if (kvmppc_vcore_check_block(vc)) {
+ do_sleep = 0;
+ break;
+ }
+ cur = ktime_get();
+ } while (single_task_running() && ktime_before(cur, stop));
+
+ spin_lock(&vc->lock);
+ vc->vcore_state = VCORE_INACTIVE;
+
+ if (!do_sleep) {
+ ++vc->runner->stat.halt_successful_poll;
+ goto out;
}
}
- if (!do_sleep) {
+ prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+
+ if (kvmppc_vcore_check_block(vc)) {
finish_swait(&vc->wq, &wait);
- return;
+ do_sleep = 0;
+ /* If we polled, count this as a successful poll */
+ if (vc->halt_poll_ns)
+ ++vc->runner->stat.halt_successful_poll;
+ goto out;
}
+ start_wait = ktime_get();
+
vc->vcore_state = VCORE_SLEEPING;
trace_kvmppc_vcore_blocked(vc, 0);
spin_unlock(&vc->lock);
@@ -2640,13 +2630,52 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
trace_kvmppc_vcore_blocked(vc, 1);
+ ++vc->runner->stat.halt_successful_wait;
+
+ cur = ktime_get();
+
+out:
+ block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll);
+
+ /* Attribute wait time */
+ if (do_sleep) {
+ vc->runner->stat.halt_wait_ns +=
+ ktime_to_ns(cur) - ktime_to_ns(start_wait);
+ /* Attribute failed poll time */
+ if (vc->halt_poll_ns)
+ vc->runner->stat.halt_poll_fail_ns +=
+ ktime_to_ns(start_wait) -
+ ktime_to_ns(start_poll);
+ } else {
+ /* Attribute successful poll time */
+ if (vc->halt_poll_ns)
+ vc->runner->stat.halt_poll_success_ns +=
+ ktime_to_ns(cur) -
+ ktime_to_ns(start_poll);
+ }
+
+ /* Adjust poll time */
+ if (halt_poll_max_ns) {
+ if (block_ns <= vc->halt_poll_ns)
+ ;
+ /* We slept and blocked for longer than the max halt time */
+ else if (vc->halt_poll_ns && block_ns > halt_poll_max_ns)
+ shrink_halt_poll_ns(vc);
+ /* We slept and our poll time is too small */
+ else if (vc->halt_poll_ns < halt_poll_max_ns &&
+ block_ns < halt_poll_max_ns)
+ grow_halt_poll_ns(vc);
+ } else
+ vc->halt_poll_ns = 0;
+
+ trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
}
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
- int n_ceded;
+ int n_ceded, i;
struct kvmppc_vcore *vc;
- struct kvm_vcpu *v, *vn;
+ struct kvm_vcpu *v;
trace_kvmppc_run_vcpu_enter(vcpu);
@@ -2666,7 +2695,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
- list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
+ WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
++vc->n_runnable;
/*
@@ -2706,8 +2735,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
continue;
}
- list_for_each_entry_safe(v, vn, &vc->runnable_threads,
- arch.run_list) {
+ for_each_runnable_thread(i, v, vc) {
kvmppc_core_prepare_to_enter(v);
if (signal_pending(v->arch.run_task)) {
kvmppc_remove_runnable(vc, v);
@@ -2720,7 +2748,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
break;
n_ceded = 0;
- list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
+ for_each_runnable_thread(i, v, vc) {
if (!v->arch.pending_exceptions)
n_ceded += v->arch.ceded;
else
@@ -2759,8 +2787,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
/* Wake up some vcpu to run the core */
- v = list_first_entry(&vc->runnable_threads,
- struct kvm_vcpu, arch.run_list);
+ i = -1;
+ v = next_runnable_thread(vc, &i);
wake_up(&v->arch.cpu_run);
}
@@ -2818,7 +2846,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = kvmppc_book3s_hv_page_fault(run, vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
- }
+ } else if (r == RESUME_PASSTHROUGH)
+ r = kvmppc_xics_rm_complete(vcpu, 0);
} while (is_kvmppc_resume_guest(r));
out:
@@ -3247,6 +3276,8 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvmppc_free_vcores(kvm);
kvmppc_free_hpt(kvm);
+
+ kvmppc_free_pimap(kvm);
}
/* We don't need to emulate any privileged instructions or dcbz */
@@ -3282,6 +3313,184 @@ static int kvmppc_core_check_processor_compat_hv(void)
return 0;
}
+#ifdef CONFIG_KVM_XICS
+
+void kvmppc_free_pimap(struct kvm *kvm)
+{
+ kfree(kvm->arch.pimap);
+}
+
+static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void)
+{
+ return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL);
+}
+
+static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
+{
+ struct irq_desc *desc;
+ struct kvmppc_irq_map *irq_map;
+ struct kvmppc_passthru_irqmap *pimap;
+ struct irq_chip *chip;
+ int i;
+
+ if (!kvm_irq_bypass)
+ return 1;
+
+ desc = irq_to_desc(host_irq);
+ if (!desc)
+ return -EIO;
+
+ mutex_lock(&kvm->lock);
+
+ pimap = kvm->arch.pimap;
+ if (pimap == NULL) {
+ /* First call, allocate structure to hold IRQ map */
+ pimap = kvmppc_alloc_pimap();
+ if (pimap == NULL) {
+ mutex_unlock(&kvm->lock);
+ return -ENOMEM;
+ }
+ kvm->arch.pimap = pimap;
+ }
+
+ /*
+ * For now, we only support interrupts for which the EOI operation
+ * is an OPAL call followed by a write to XIRR, since that's
+ * what our real-mode EOI code does.
+ */
+ chip = irq_data_get_irq_chip(&desc->irq_data);
+ if (!chip || !is_pnv_opal_msi(chip)) {
+ pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
+ host_irq, guest_gsi);
+ mutex_unlock(&kvm->lock);
+ return -ENOENT;
+ }
+
+ /*
+ * See if we already have an entry for this guest IRQ number.
+ * If it's mapped to a hardware IRQ number, that's an error,
+ * otherwise re-use this entry.
+ */
+ for (i = 0; i < pimap->n_mapped; i++) {
+ if (guest_gsi == pimap->mapped[i].v_hwirq) {
+ if (pimap->mapped[i].r_hwirq) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
+ break;
+ }
+ }
+
+ if (i == KVMPPC_PIRQ_MAPPED) {
+ mutex_unlock(&kvm->lock);
+ return -EAGAIN; /* table is full */
+ }
+
+ irq_map = &pimap->mapped[i];
+
+ irq_map->v_hwirq = guest_gsi;
+ irq_map->desc = desc;
+
+ /*
+ * Order the above two stores before the next to serialize with
+ * the KVM real mode handler.
+ */
+ smp_wmb();
+ irq_map->r_hwirq = desc->irq_data.hwirq;
+
+ if (i == pimap->n_mapped)
+ pimap->n_mapped++;
+
+ kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
+
+ mutex_unlock(&kvm->lock);
+
+ return 0;
+}
+
+static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
+{
+ struct irq_desc *desc;
+ struct kvmppc_passthru_irqmap *pimap;
+ int i;
+
+ if (!kvm_irq_bypass)
+ return 0;
+
+ desc = irq_to_desc(host_irq);
+ if (!desc)
+ return -EIO;
+
+ mutex_lock(&kvm->lock);
+
+ if (kvm->arch.pimap == NULL) {
+ mutex_unlock(&kvm->lock);
+ return 0;
+ }
+ pimap = kvm->arch.pimap;
+
+ for (i = 0; i < pimap->n_mapped; i++) {
+ if (guest_gsi == pimap->mapped[i].v_hwirq)
+ break;
+ }
+
+ if (i == pimap->n_mapped) {
+ mutex_unlock(&kvm->lock);
+ return -ENODEV;
+ }
+
+ kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
+
+ /* invalidate the entry */
+ pimap->mapped[i].r_hwirq = 0;
+
+ /*
+ * We don't free this structure even when the count goes to
+ * zero. The structure is freed when we destroy the VM.
+ */
+
+ mutex_unlock(&kvm->lock);
+ return 0;
+}
+
+static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ int ret = 0;
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ irqfd->producer = prod;
+
+ ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
+ if (ret)
+ pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n",
+ prod->irq, irqfd->gsi, ret);
+
+ return ret;
+}
+
+static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ int ret;
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ irqfd->producer = NULL;
+
+ /*
+ * When producer of consumer is unregistered, we change back to
+ * default external interrupt handling mode - KVM real mode
+ * will switch back to host.
+ */
+ ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
+ if (ret)
+ pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n",
+ prod->irq, irqfd->gsi, ret);
+}
+#endif
+
static long kvm_arch_vm_ioctl_hv(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -3400,6 +3609,10 @@ static struct kvmppc_ops kvm_ops_hv = {
.fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
.arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
.hcall_implemented = kvmppc_hcall_impl_hv,
+#ifdef CONFIG_KVM_XICS
+ .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv,
+ .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv,
+#endif
};
static int kvm_init_subcore_bitmap(void)
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 5f0380db3eab..0c84d6bc8356 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -25,6 +25,7 @@
#include <asm/xics.h>
#include <asm/dbell.h>
#include <asm/cputhreads.h>
+#include <asm/io.h>
#define KVM_CMA_CHUNK_ORDER 18
@@ -286,3 +287,158 @@ void kvmhv_commence_exit(int trap)
struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
+
+#ifdef CONFIG_KVM_XICS
+static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
+ u32 xisr)
+{
+ int i;
+
+ /*
+ * We access the mapped array here without a lock. That
+ * is safe because we never reduce the number of entries
+ * in the array and we never change the v_hwirq field of
+ * an entry once it is set.
+ *
+ * We have also carefully ordered the stores in the writer
+ * and the loads here in the reader, so that if we find a matching
+ * hwirq here, the associated GSI and irq_desc fields are valid.
+ */
+ for (i = 0; i < pimap->n_mapped; i++) {
+ if (xisr == pimap->mapped[i].r_hwirq) {
+ /*
+ * Order subsequent reads in the caller to serialize
+ * with the writer.
+ */
+ smp_rmb();
+ return &pimap->mapped[i];
+ }
+ }
+ return NULL;
+}
+
+/*
+ * If we have an interrupt that's not an IPI, check if we have a
+ * passthrough adapter and if so, check if this external interrupt
+ * is for the adapter.
+ * We will attempt to deliver the IRQ directly to the target VCPU's
+ * ICP, the virtual ICP (based on affinity - the xive value in ICS).
+ *
+ * If the delivery fails or if this is not for a passthrough adapter,
+ * return to the host to handle this interrupt. We earlier
+ * saved a copy of the XIRR in the PACA, it will be picked up by
+ * the host ICP driver.
+ */
+static int kvmppc_check_passthru(u32 xisr, __be32 xirr)
+{
+ struct kvmppc_passthru_irqmap *pimap;
+ struct kvmppc_irq_map *irq_map;
+ struct kvm_vcpu *vcpu;
+
+ vcpu = local_paca->kvm_hstate.kvm_vcpu;
+ if (!vcpu)
+ return 1;
+ pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
+ if (!pimap)
+ return 1;
+ irq_map = get_irqmap(pimap, xisr);
+ if (!irq_map)
+ return 1;
+
+ /* We're handling this interrupt, generic code doesn't need to */
+ local_paca->kvm_hstate.saved_xirr = 0;
+
+ return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap);
+}
+
+#else
+static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr)
+{
+ return 1;
+}
+#endif
+
+/*
+ * Determine what sort of external interrupt is pending (if any).
+ * Returns:
+ * 0 if no interrupt is pending
+ * 1 if an interrupt is pending that needs to be handled by the host
+ * 2 Passthrough that needs completion in the host
+ * -1 if there was a guest wakeup IPI (which has now been cleared)
+ * -2 if there is PCI passthrough external interrupt that was handled
+ */
+
+long kvmppc_read_intr(void)
+{
+ unsigned long xics_phys;
+ u32 h_xirr;
+ __be32 xirr;
+ u32 xisr;
+ u8 host_ipi;
+
+ /* see if a host IPI is pending */
+ host_ipi = local_paca->kvm_hstate.host_ipi;
+ if (host_ipi)
+ return 1;
+
+ /* Now read the interrupt from the ICP */
+ xics_phys = local_paca->kvm_hstate.xics_phys;
+ if (unlikely(!xics_phys))
+ return 1;
+
+ /*
+ * Save XIRR for later. Since we get control in reverse endian
+ * on LE systems, save it byte reversed and fetch it back in
+ * host endian. Note that xirr is the value read from the
+ * XIRR register, while h_xirr is the host endian version.
+ */
+ xirr = _lwzcix(xics_phys + XICS_XIRR);
+ h_xirr = be32_to_cpu(xirr);
+ local_paca->kvm_hstate.saved_xirr = h_xirr;
+ xisr = h_xirr & 0xffffff;
+ /*
+ * Ensure that the store/load complete to guarantee all side
+ * effects of loading from XIRR has completed
+ */
+ smp_mb();
+
+ /* if nothing pending in the ICP */
+ if (!xisr)
+ return 0;
+
+ /* We found something in the ICP...
+ *
+ * If it is an IPI, clear the MFRR and EOI it.
+ */
+ if (xisr == XICS_IPI) {
+ _stbcix(xics_phys + XICS_MFRR, 0xff);
+ _stwcix(xics_phys + XICS_XIRR, xirr);
+ /*
+ * Need to ensure side effects of above stores
+ * complete before proceeding.
+ */
+ smp_mb();
+
+ /*
+ * We need to re-check host IPI now in case it got set in the
+ * meantime. If it's clear, we bounce the interrupt to the
+ * guest
+ */
+ host_ipi = local_paca->kvm_hstate.host_ipi;
+ if (unlikely(host_ipi != 0)) {
+ /* We raced with the host,
+ * we need to resend that IPI, bummer
+ */
+ _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ /* Let side effects complete */
+ smp_mb();
+ return 1;
+ }
+
+ /* OK, it's an IPI for us */
+ local_paca->kvm_hstate.saved_xirr = 0;
+ return -1;
+ }
+
+ return kvmppc_check_passthru(xisr, xirr);
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 980d8a6f7284..82ff5de8b1e7 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/err.h>
+#include <linux/kernel_stat.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
@@ -18,7 +19,10 @@
#include <asm/debug.h>
#include <asm/synch.h>
#include <asm/cputhreads.h>
+#include <asm/pgtable.h>
#include <asm/ppc-opcode.h>
+#include <asm/pnv-pci.h>
+#include <asm/opal.h>
#include "book3s_xics.h"
@@ -26,9 +30,12 @@
int h_ipi_redirect = 1;
EXPORT_SYMBOL(h_ipi_redirect);
+int kvm_irq_bypass = 1;
+EXPORT_SYMBOL(kvm_irq_bypass);
static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
u32 new_irq);
+static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu);
/* -- ICS routines -- */
static void ics_rm_check_resend(struct kvmppc_xics *xics,
@@ -708,10 +715,123 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
icp->rm_action |= XICS_RM_NOTIFY_EOI;
icp->rm_eoied_irq = irq;
}
+
+ if (state->host_irq) {
+ ++vcpu->stat.pthru_all;
+ if (state->intr_cpu != -1) {
+ int pcpu = raw_smp_processor_id();
+
+ pcpu = cpu_first_thread_sibling(pcpu);
+ ++vcpu->stat.pthru_host;
+ if (state->intr_cpu != pcpu) {
+ ++vcpu->stat.pthru_bad_aff;
+ xics_opal_rm_set_server(state->host_irq, pcpu);
+ }
+ state->intr_cpu = -1;
+ }
+ }
bail:
return check_too_hard(xics, icp);
}
+unsigned long eoi_rc;
+
+static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
+{
+ unsigned long xics_phys;
+ int64_t rc;
+
+ rc = pnv_opal_pci_msi_eoi(c, hwirq);
+
+ if (rc)
+ eoi_rc = rc;
+
+ iosync();
+
+ /* EOI it */
+ xics_phys = local_paca->kvm_hstate.xics_phys;
+ _stwcix(xics_phys + XICS_XIRR, xirr);
+}
+
+static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu)
+{
+ unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2;
+
+ return opal_rm_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY);
+}
+
+/*
+ * Increment a per-CPU 32-bit unsigned integer variable.
+ * Safe to call in real-mode. Handles vmalloc'ed addresses
+ *
+ * ToDo: Make this work for any integral type
+ */
+
+static inline void this_cpu_inc_rm(unsigned int __percpu *addr)
+{
+ unsigned long l;
+ unsigned int *raddr;
+ int cpu = smp_processor_id();
+
+ raddr = per_cpu_ptr(addr, cpu);
+ l = (unsigned long)raddr;
+
+ if (REGION_ID(l) == VMALLOC_REGION_ID) {
+ l = vmalloc_to_phys(raddr);
+ raddr = (unsigned int *)l;
+ }
+ ++*raddr;
+}
+
+/*
+ * We don't try to update the flags in the irq_desc 'istate' field in
+ * here as would happen in the normal IRQ handling path for several reasons:
+ * - state flags represent internal IRQ state and are not expected to be
+ * updated outside the IRQ subsystem
+ * - more importantly, these are useful for edge triggered interrupts,
+ * IRQ probing, etc., but we are only handling MSI/MSIx interrupts here
+ * and these states shouldn't apply to us.
+ *
+ * However, we do update irq_stats - we somewhat duplicate the code in
+ * kstat_incr_irqs_this_cpu() for this since this function is defined
+ * in irq/internal.h which we don't want to include here.
+ * The only difference is that desc->kstat_irqs is an allocated per CPU
+ * variable and could have been vmalloc'ed, so we can't directly
+ * call __this_cpu_inc() on it. The kstat structure is a static
+ * per CPU variable and it should be accessible by real-mode KVM.
+ *
+ */
+static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
+{
+ this_cpu_inc_rm(desc->kstat_irqs);
+ __this_cpu_inc(kstat.irqs_sum);
+}
+
+long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
+ u32 xirr,
+ struct kvmppc_irq_map *irq_map,
+ struct kvmppc_passthru_irqmap *pimap)
+{
+ struct kvmppc_xics *xics;
+ struct kvmppc_icp *icp;
+ u32 irq;
+
+ irq = irq_map->v_hwirq;
+ xics = vcpu->kvm->arch.xics;
+ icp = vcpu->arch.icp;
+
+ kvmppc_rm_handle_irq_desc(irq_map->desc);
+ icp_rm_deliver_irq(xics, icp, irq);
+
+ /* EOI the interrupt */
+ icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr);
+
+ if (check_too_hard(xics, icp) == H_TOO_HARD)
+ return 2;
+ else
+ return -2;
+}
+
/* --- Non-real mode XICS-related built-in routines --- */
/**
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 975655573844..c3c1d1bcfc67 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -221,6 +221,13 @@ kvmppc_primary_no_guest:
li r3, 0 /* Don't wake on privileged (OS) doorbell */
b kvm_do_nap
+/*
+ * kvm_novcpu_wakeup
+ * Entered from kvm_start_guest if kvm_hstate.napping is set
+ * to NAPPING_NOVCPU
+ * r2 = kernel TOC
+ * r13 = paca
+ */
kvm_novcpu_wakeup:
ld r1, HSTATE_HOST_R1(r13)
ld r5, HSTATE_KVM_VCORE(r13)
@@ -230,6 +237,13 @@ kvm_novcpu_wakeup:
/* check the wake reason */
bl kvmppc_check_wake_reason
+ /*
+ * Restore volatile registers since we could have called
+ * a C routine in kvmppc_check_wake_reason.
+ * r5 = VCORE
+ */
+ ld r5, HSTATE_KVM_VCORE(r13)
+
/* see if any other thread is already exiting */
lwz r0, VCORE_ENTRY_EXIT(r5)
cmpwi r0, 0x100
@@ -322,6 +336,11 @@ kvm_start_guest:
/* Check the wake reason in SRR1 to see why we got here */
bl kvmppc_check_wake_reason
+ /*
+ * kvmppc_check_wake_reason could invoke a C routine, but we
+ * have no volatile registers to restore when we return.
+ */
+
cmpdi r3, 0
bge kvm_no_guest
@@ -625,9 +644,11 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
38:
BEGIN_FTR_SECTION
- /* DPDES is shared between threads */
+ /* DPDES and VTB are shared between threads */
ld r8, VCORE_DPDES(r5)
+ ld r7, VCORE_VTB(r5)
mtspr SPRN_DPDES, r8
+ mtspr SPRN_VTB, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* Mark the subcore state as inside guest */
@@ -787,10 +808,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_CIABR, r7
mtspr SPRN_TAR, r8
ld r5, VCPU_IC(r4)
- ld r6, VCPU_VTB(r4)
- mtspr SPRN_IC, r5
- mtspr SPRN_VTB, r6
ld r8, VCPU_EBBHR(r4)
+ mtspr SPRN_IC, r5
mtspr SPRN_EBBHR, r8
ld r5, VCPU_EBBRR(r4)
ld r6, VCPU_BESCR(r4)
@@ -881,6 +900,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
cmpwi r3, 512 /* 1 microsecond */
blt hdec_soon
+deliver_guest_interrupt:
ld r6, VCPU_CTR(r4)
ld r7, VCPU_XER(r4)
@@ -895,7 +915,6 @@ kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
mtspr SPRN_SRR0, r6
mtspr SPRN_SRR1, r7
-deliver_guest_interrupt:
/* r11 = vcpu->arch.msr & ~MSR_HV */
rldicl r11, r11, 63 - MSR_HV_LG, 1
rotldi r11, r11, 1 + MSR_HV_LG
@@ -1155,10 +1174,54 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
* set, we know the host wants us out so let's do it now
*/
bl kvmppc_read_intr
+
+ /*
+ * Restore the active volatile registers after returning from
+ * a C function.
+ */
+ ld r9, HSTATE_KVM_VCPU(r13)
+ li r12, BOOK3S_INTERRUPT_EXTERNAL
+
+ /*
+ * kvmppc_read_intr return codes:
+ *
+ * Exit to host (r3 > 0)
+ * 1 An interrupt is pending that needs to be handled by the host
+ * Exit guest and return to host by branching to guest_exit_cont
+ *
+ * 2 Passthrough that needs completion in the host
+ * Exit guest and return to host by branching to guest_exit_cont
+ * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
+ * to indicate to the host to complete handling the interrupt
+ *
+ * Before returning to guest, we check if any CPU is heading out
+ * to the host and if so, we head out also. If no CPUs are heading
+ * check return values <= 0.
+ *
+ * Return to guest (r3 <= 0)
+ * 0 No external interrupt is pending
+ * -1 A guest wakeup IPI (which has now been cleared)
+ * In either case, we return to guest to deliver any pending
+ * guest interrupts.
+ *
+ * -2 A PCI passthrough external interrupt was handled
+ * (interrupt was delivered directly to guest)
+ * Return to guest to deliver any pending guest interrupts.
+ */
+
+ cmpdi r3, 1
+ ble 1f
+
+ /* Return code = 2 */
+ li r12, BOOK3S_INTERRUPT_HV_RM_HARD
+ stw r12, VCPU_TRAP(r9)
+ b guest_exit_cont
+
+1: /* Return code <= 1 */
cmpdi r3, 0
bgt guest_exit_cont
- /* Check if any CPU is heading out to the host, if so head out too */
+ /* Return code <= 0 */
4: ld r5, HSTATE_KVM_VCORE(r13)
lwz r0, VCORE_ENTRY_EXIT(r5)
cmpwi r0, 0x100
@@ -1271,10 +1334,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
stw r6, VCPU_PSPB(r9)
std r7, VCPU_FSCR(r9)
mfspr r5, SPRN_IC
- mfspr r6, SPRN_VTB
mfspr r7, SPRN_TAR
std r5, VCPU_IC(r9)
- std r6, VCPU_VTB(r9)
std r7, VCPU_TAR(r9)
mfspr r8, SPRN_EBBHR
std r8, VCPU_EBBHR(r9)
@@ -1501,9 +1562,11 @@ kvmhv_switch_to_host:
isync
BEGIN_FTR_SECTION
- /* DPDES is shared between threads */
+ /* DPDES and VTB are shared between threads */
mfspr r7, SPRN_DPDES
+ mfspr r8, SPRN_VTB
std r7, VCORE_DPDES(r5)
+ std r8, VCORE_VTB(r5)
/* clear DPDES so we don't get guest doorbells in the host */
li r8, 0
mtspr SPRN_DPDES, r8
@@ -2213,10 +2276,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
ld r29, VCPU_GPR(R29)(r4)
ld r30, VCPU_GPR(R30)(r4)
ld r31, VCPU_GPR(R31)(r4)
-
+
/* Check the wake reason in SRR1 to see why we got here */
bl kvmppc_check_wake_reason
+ /*
+ * Restore volatile registers since we could have called a
+ * C routine in kvmppc_check_wake_reason
+ * r4 = VCPU
+ * r3 tells us whether we need to return to host or not
+ * WARNING: it gets checked further down:
+ * should not modify r3 until this check is done.
+ */
+ ld r4, HSTATE_KVM_VCPU(r13)
+
/* clear our bit in vcore->napping_threads */
34: ld r5,HSTATE_KVM_VCORE(r13)
lbz r7,HSTATE_PTID(r13)
@@ -2230,7 +2303,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
li r0,0
stb r0,HSTATE_NAPPING(r13)
- /* See if the wake reason means we need to exit */
+ /* See if the wake reason saved in r3 means we need to exit */
stw r12, VCPU_TRAP(r4)
mr r9, r4
cmpdi r3, 0
@@ -2297,10 +2370,14 @@ machine_check_realmode:
* 0 if nothing needs to be done
* 1 if something happened that needs to be handled by the host
* -1 if there was a guest wakeup (IPI or msgsnd)
+ * -2 if we handled a PCI passthrough interrupt (returned by
+ * kvmppc_read_intr only)
*
* Also sets r12 to the interrupt vector for any interrupt that needs
* to be handled now by the host (0x500 for external interrupt), or zero.
- * Modifies r0, r6, r7, r8.
+ * Modifies all volatile registers (since it may call a C function).
+ * This routine calls kvmppc_read_intr, a C function, if an external
+ * interrupt is pending.
*/
kvmppc_check_wake_reason:
mfspr r6, SPRN_SRR1
@@ -2310,8 +2387,7 @@ FTR_SECTION_ELSE
rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
cmpwi r6, 8 /* was it an external interrupt? */
- li r12, BOOK3S_INTERRUPT_EXTERNAL
- beq kvmppc_read_intr /* if so, see what it was */
+ beq 7f /* if so, see what it was */
li r3, 0
li r12, 0
cmpwi r6, 6 /* was it the decrementer? */
@@ -2350,83 +2426,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
li r3, 1
blr
-/*
- * Determine what sort of external interrupt is pending (if any).
- * Returns:
- * 0 if no interrupt is pending
- * 1 if an interrupt is pending that needs to be handled by the host
- * -1 if there was a guest wakeup IPI (which has now been cleared)
- * Modifies r0, r6, r7, r8, returns value in r3.
- */
-kvmppc_read_intr:
- /* see if a host IPI is pending */
- li r3, 1
- lbz r0, HSTATE_HOST_IPI(r13)
- cmpwi r0, 0
- bne 1f
+ /* external interrupt - create a stack frame so we can call C */
+7: mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+ stdu r1, -PPC_MIN_STKFRM(r1)
+ bl kvmppc_read_intr
+ nop
+ li r12, BOOK3S_INTERRUPT_EXTERNAL
+ cmpdi r3, 1
+ ble 1f
- /* Now read the interrupt from the ICP */
- ld r6, HSTATE_XICS_PHYS(r13)
- li r7, XICS_XIRR
- cmpdi r6, 0
- beq- 1f
- lwzcix r0, r6, r7
/*
- * Save XIRR for later. Since we get in in reverse endian on LE
- * systems, save it byte reversed and fetch it back in host endian.
- */
- li r3, HSTATE_SAVED_XIRR
- STWX_BE r0, r3, r13
-#ifdef __LITTLE_ENDIAN__
- lwz r3, HSTATE_SAVED_XIRR(r13)
-#else
- mr r3, r0
-#endif
- rlwinm. r3, r3, 0, 0xffffff
- sync
- beq 1f /* if nothing pending in the ICP */
-
- /* We found something in the ICP...
- *
- * If it's not an IPI, stash it in the PACA and return to
- * the host, we don't (yet) handle directing real external
- * interrupts directly to the guest
+ * Return code of 2 means PCI passthrough interrupt, but
+ * we need to return back to host to complete handling the
+ * interrupt. Trap reason is expected in r12 by guest
+ * exit code.
*/
- cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
- bne 42f
-
- /* It's an IPI, clear the MFRR and EOI it */
- li r3, 0xff
- li r8, XICS_MFRR
- stbcix r3, r6, r8 /* clear the IPI */
- stwcix r0, r6, r7 /* EOI it */
- sync
-
- /* We need to re-check host IPI now in case it got set in the
- * meantime. If it's clear, we bounce the interrupt to the
- * guest
- */
- lbz r0, HSTATE_HOST_IPI(r13)
- cmpwi r0, 0
- bne- 43f
-
- /* OK, it's an IPI for us */
- li r12, 0
- li r3, -1
-1: blr
-
-42: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
- * the PACA earlier, it will be picked up by the host ICP driver
- */
- li r3, 1
- b 1b
-
-43: /* We raced with the host, we need to resend that IPI, bummer */
- li r0, IPI_PRIORITY
- stbcix r0, r6, r8 /* set the IPI */
- sync
- li r3, 1
- b 1b
+ li r12, BOOK3S_INTERRUPT_HV_RM_HARD
+1:
+ ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
+ addi r1, r1, PPC_MIN_STKFRM
+ mtlr r0
+ blr
/*
* Save away FP, VMX and VSX registers.
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index e76f79a45988..826c541a12af 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -226,7 +226,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
*/
vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
- vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb;
+ to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
if (cpu_has_feature(CPU_FTR_ARCH_207S))
vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
svcpu->in_use = false;
@@ -448,6 +448,8 @@ void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
case PVR_POWER7:
case PVR_POWER7p:
case PVR_POWER8:
+ case PVR_POWER8E:
+ case PVR_POWER8NVL:
vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
BOOK3S_HFLAG_NEW_TLBIE;
break;
@@ -1361,6 +1363,9 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, to_book3s(vcpu)->hior);
break;
+ case KVM_REG_PPC_VTB:
+ *val = get_reg_val(id, to_book3s(vcpu)->vtb);
+ break;
case KVM_REG_PPC_LPCR:
case KVM_REG_PPC_LPCR_64:
/*
@@ -1397,6 +1402,9 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
to_book3s(vcpu)->hior = set_reg_val(id, *val);
to_book3s(vcpu)->hior_explicit = true;
break;
+ case KVM_REG_PPC_VTB:
+ to_book3s(vcpu)->vtb = set_reg_val(id, *val);
+ break;
case KVM_REG_PPC_LPCR:
case KVM_REG_PPC_LPCR_64:
kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 05aa11399a78..3bdc639157c1 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -99,6 +99,10 @@ static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
return 0;
}
+ /* Record which CPU this arrived on for passed-through interrupts */
+ if (state->host_irq)
+ state->intr_cpu = raw_smp_processor_id();
+
/* Attempt delivery */
icp_deliver_irq(xics, NULL, irq);
@@ -812,7 +816,7 @@ static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
return H_SUCCESS;
}
-static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
+int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
{
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
struct kvmppc_icp *icp = vcpu->arch.icp;
@@ -841,6 +845,7 @@ static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
{
@@ -892,6 +897,21 @@ EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
/* -- Initialisation code etc. -- */
+static void xics_debugfs_irqmap(struct seq_file *m,
+ struct kvmppc_passthru_irqmap *pimap)
+{
+ int i;
+
+ if (!pimap)
+ return;
+ seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
+ pimap->n_mapped);
+ for (i = 0; i < pimap->n_mapped; i++) {
+ seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
+ pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
+ }
+}
+
static int xics_debug_show(struct seq_file *m, void *private)
{
struct kvmppc_xics *xics = m->private;
@@ -913,6 +933,8 @@ static int xics_debug_show(struct seq_file *m, void *private)
t_check_resend = 0;
t_reject = 0;
+ xics_debugfs_irqmap(m, kvm->arch.pimap);
+
seq_printf(m, "=========\nICP state\n=========\n");
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -1252,6 +1274,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
{
struct kvmppc_xics *xics = kvm->arch.xics;
+ if (!xics)
+ return -ENODEV;
return ics_deliver_irq(xics, irq, level);
}
@@ -1418,3 +1442,34 @@ int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
return pin;
}
+
+void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
+ unsigned long host_irq)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+ struct kvmppc_ics *ics;
+ u16 idx;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &idx);
+ if (!ics)
+ return;
+
+ ics->irq_state[idx].host_irq = host_irq;
+ ics->irq_state[idx].intr_cpu = -1;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
+
+void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
+ unsigned long host_irq)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+ struct kvmppc_ics *ics;
+ u16 idx;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &idx);
+ if (!ics)
+ return;
+
+ ics->irq_state[idx].host_irq = 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index a46b954055c4..2a50320b55ca 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -42,6 +42,8 @@ struct ics_irq_state {
u8 lsi; /* level-sensitive interrupt */
u8 asserted; /* Only for LSI */
u8 exists;
+ int intr_cpu;
+ u32 host_irq;
};
/* Atomic ICP state, updated with a single compare & swap */
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 02b4672f7347..df3f2706d3e5 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -2038,7 +2038,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
if (type == KVMPPC_DEBUG_NONE)
continue;
- if (type & !(KVMPPC_DEBUG_WATCH_READ |
+ if (type & ~(KVMPPC_DEBUG_WATCH_READ |
KVMPPC_DEBUG_WATCH_WRITE |
KVMPPC_DEBUG_BREAKPOINT))
return -EINVAL;
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 29911a07bcdb..ddbf8f0284c0 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -743,7 +743,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
char *virt;
struct page **pages;
struct tlbe_priv *privs[2] = {};
- u64 *g2h_bitmap = NULL;
+ u64 *g2h_bitmap;
size_t array_len;
u32 sets;
int num_pages, ret, i;
@@ -779,41 +779,44 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
cfg->array / PAGE_SIZE;
- pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
+ pages = kmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
if (ret < 0)
- goto err_pages;
+ goto free_pages;
if (ret != num_pages) {
num_pages = ret;
ret = -EFAULT;
- goto err_put_page;
+ goto put_pages;
}
virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
if (!virt) {
ret = -ENOMEM;
- goto err_put_page;
+ goto put_pages;
}
- privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
- GFP_KERNEL);
- privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
- GFP_KERNEL);
+ privs[0] = kcalloc(params.tlb_sizes[0], sizeof(*privs[0]), GFP_KERNEL);
+ if (!privs[0]) {
+ ret = -ENOMEM;
+ goto put_pages;
+ }
- if (!privs[0] || !privs[1]) {
+ privs[1] = kcalloc(params.tlb_sizes[1], sizeof(*privs[1]), GFP_KERNEL);
+ if (!privs[1]) {
ret = -ENOMEM;
- goto err_privs;
+ goto free_privs_first;
}
- g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
- GFP_KERNEL);
+ g2h_bitmap = kcalloc(params.tlb_sizes[1],
+ sizeof(*g2h_bitmap),
+ GFP_KERNEL);
if (!g2h_bitmap) {
ret = -ENOMEM;
- goto err_privs;
+ goto free_privs_second;
}
free_gtlb(vcpu_e500);
@@ -845,16 +848,14 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0;
-
-err_privs:
- kfree(privs[0]);
+ free_privs_second:
kfree(privs[1]);
-
-err_put_page:
+ free_privs_first:
+ kfree(privs[0]);
+ put_pages:
for (i = 0; i < num_pages; i++)
put_page(pages[i]);
-
-err_pages:
+ free_pages:
kfree(pages);
return ret;
}
@@ -904,11 +905,9 @@ static int vcpu_mmu_init(struct kvm_vcpu *vcpu,
int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
{
struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
- int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
- int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
if (e500_mmu_host_init(vcpu_e500))
- goto err;
+ goto free_vcpu;
vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
@@ -920,37 +919,39 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
vcpu_e500->gtlb_params[1].sets = 1;
- vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL);
+ vcpu_e500->gtlb_arch = kmalloc_array(KVM_E500_TLB0_SIZE +
+ KVM_E500_TLB1_SIZE,
+ sizeof(*vcpu_e500->gtlb_arch),
+ GFP_KERNEL);
if (!vcpu_e500->gtlb_arch)
return -ENOMEM;
vcpu_e500->gtlb_offset[0] = 0;
vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
- vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
- vcpu_e500->gtlb_params[0].entries,
+ vcpu_e500->gtlb_priv[0] = kcalloc(vcpu_e500->gtlb_params[0].entries,
+ sizeof(struct tlbe_ref),
GFP_KERNEL);
if (!vcpu_e500->gtlb_priv[0])
- goto err;
+ goto free_vcpu;
- vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) *
- vcpu_e500->gtlb_params[1].entries,
+ vcpu_e500->gtlb_priv[1] = kcalloc(vcpu_e500->gtlb_params[1].entries,
+ sizeof(struct tlbe_ref),
GFP_KERNEL);
if (!vcpu_e500->gtlb_priv[1])
- goto err;
+ goto free_vcpu;
- vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) *
- vcpu_e500->gtlb_params[1].entries,
+ vcpu_e500->g2h_tlb1_map = kcalloc(vcpu_e500->gtlb_params[1].entries,
+ sizeof(*vcpu_e500->g2h_tlb1_map),
GFP_KERNEL);
if (!vcpu_e500->g2h_tlb1_map)
- goto err;
+ goto free_vcpu;
vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params);
kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0;
-
-err:
+ free_vcpu:
free_gtlb(vcpu_e500);
return -1;
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 6ce40dd6fe51..70963c845e96 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -27,6 +27,8 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/module.h>
+#include <linux/irqbypass.h>
+#include <linux/kvm_irqfd.h>
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
@@ -436,6 +438,16 @@ err_out:
return -EINVAL;
}
+bool kvm_arch_has_vcpu_debugfs(void)
+{
+ return false;
+}
+
+int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
void kvm_arch_destroy_vm(struct kvm *kvm)
{
unsigned int i;
@@ -739,6 +751,42 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
#endif
}
+/*
+ * irq_bypass_add_producer and irq_bypass_del_producer are only
+ * useful if the architecture supports PCI passthrough.
+ * irq_bypass_stop and irq_bypass_start are not needed and so
+ * kvm_ops are not defined for them.
+ */
+bool kvm_arch_has_irq_bypass(void)
+{
+ return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
+ (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
+}
+
+int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+ struct kvm *kvm = irqfd->kvm;
+
+ if (kvm->arch.kvm_ops->irq_bypass_add_producer)
+ return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
+
+ return 0;
+}
+
+void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+ struct kvm *kvm = irqfd->kvm;
+
+ if (kvm->arch.kvm_ops->irq_bypass_del_producer)
+ kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
+}
+
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
@@ -1167,6 +1215,19 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return r;
}
+bool kvm_arch_intc_initialized(struct kvm *kvm)
+{
+#ifdef CONFIG_KVM_MPIC
+ if (kvm->arch.mpic)
+ return true;
+#endif
+#ifdef CONFIG_KVM_XICS
+ if (kvm->arch.xics)
+ return true;
+#endif
+ return false;
+}
+
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index 33d9daff5783..fb21990c0fb4 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -432,6 +432,28 @@ TRACE_EVENT(kvmppc_vcore_blocked,
__entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
);
+TRACE_EVENT(kvmppc_vcore_wakeup,
+ TP_PROTO(int do_sleep, __u64 ns),
+
+ TP_ARGS(do_sleep, ns),
+
+ TP_STRUCT__entry(
+ __field(__u64, ns)
+ __field(int, waited)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->ns = ns;
+ __entry->waited = do_sleep;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("%s time %lld ns, tgid=%d",
+ __entry->waited ? "wait" : "poll",
+ __entry->ns, __entry->tgid)
+);
+
TRACE_EVENT(kvmppc_run_vcpu_enter,
TP_PROTO(struct kvm_vcpu *vcpu),
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index ba21be15310f..ad5290005ca4 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -22,7 +22,7 @@ obj64-$(CONFIG_SMP) += locks.o
obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
ifeq ($(CONFIG_GENERIC_CSUM),)
-obj-y += checksum_$(CONFIG_WORD_SIZE).o checksum_wrappers.o
+obj-y += checksum_$(BITS).o checksum_wrappers.o
endif
obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 43435c6892fb..eda7a96161ab 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -37,6 +37,7 @@ _GLOBAL(memset)
clrldi r5,r5,58
mtctr r0
beq 5f
+ .balign 16
4: std r4,0(r6)
std r4,8(r6)
std r4,16(r6)
@@ -90,6 +91,7 @@ _GLOBAL(backwards_memcpy)
andi. r0,r6,3
mtctr r7
bne 5f
+ .balign 16
1: lwz r7,-4(r4)
lwzu r8,-8(r4)
stw r7,-4(r6)
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index f2cea6d5e764..1a4e570f7894 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -7,17 +7,16 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y := fault.o mem.o pgtable.o mmap.o \
- init_$(CONFIG_WORD_SIZE).o \
- pgtable_$(CONFIG_WORD_SIZE).o
+ init_$(BITS).o pgtable_$(BITS).o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o
-obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o
+obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o
obj-$(CONFIG_PPC_STD_MMU_64) += pgtable-hash64.o hash_utils_64.o slb_low.o slb.o $(hash64-y) mmu_context_book3s64.o pgtable-book3s64.o
obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
-obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(BITS).o
ifeq ($(CONFIG_PPC_STD_MMU_64),y)
obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index bb1ffc559f38..d0b137d96df1 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -205,7 +205,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
-int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
enum ctx_state prev_state = exception_enter();
@@ -498,8 +498,8 @@ bad_area_nosemaphore:
bail:
exception_exit(prev_state);
return rc;
-
}
+NOKPROBE_SYMBOL(do_page_fault);
/*
* bad_page_fault is called when we have a bad access from the kernel.
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 0e4e9654bd2c..83ddc0e171b0 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -493,36 +493,6 @@ static void native_hugepage_invalidate(unsigned long vsid,
}
#endif
-static inline int __hpte_actual_psize(unsigned int lp, int psize)
-{
- int i, shift;
- unsigned int mask;
-
- /* start from 1 ignoring MMU_PAGE_4K */
- for (i = 1; i < MMU_PAGE_COUNT; i++) {
-
- /* invalid penc */
- if (mmu_psize_defs[psize].penc[i] == -1)
- continue;
- /*
- * encoding bits per actual page size
- * PTE LP actual page size
- * rrrr rrrz >=8KB
- * rrrr rrzz >=16KB
- * rrrr rzzz >=32KB
- * rrrr zzzz >=64KB
- * .......
- */
- shift = mmu_psize_defs[i].shift - LP_SHIFT;
- if (shift > LP_BITS)
- shift = LP_BITS;
- mask = (1 << shift) - 1;
- if ((lp & mask) == mmu_psize_defs[psize].penc[i])
- return i;
- }
- return -1;
-}
-
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
int *psize, int *apsize, int *ssize, unsigned long *vpn)
{
@@ -538,16 +508,8 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
size = MMU_PAGE_4K;
a_size = MMU_PAGE_4K;
} else {
- for (size = 0; size < MMU_PAGE_COUNT; size++) {
-
- /* valid entries have a shift value */
- if (!mmu_psize_defs[size].shift)
- continue;
-
- a_size = __hpte_actual_psize(lp, size);
- if (a_size != -1)
- break;
- }
+ size = hpte_page_sizes[lp] & 0xf;
+ a_size = hpte_page_sizes[lp] >> 4;
}
/* This works for all page sizes, and for 256M and 1T segments */
if (cpu_has_feature(CPU_FTR_ARCH_300))
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 0821556e16f4..90480e23fd2c 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -93,6 +93,9 @@ static unsigned long _SDR1;
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
EXPORT_SYMBOL_GPL(mmu_psize_defs);
+u8 hpte_page_sizes[1 << LP_BITS];
+EXPORT_SYMBOL_GPL(hpte_page_sizes);
+
struct hash_pte *htab_address;
unsigned long htab_size_bytes;
unsigned long htab_hash_mask;
@@ -564,8 +567,60 @@ static void __init htab_scan_page_sizes(void)
#endif /* CONFIG_HUGETLB_PAGE */
}
+/*
+ * Fill in the hpte_page_sizes[] array.
+ * We go through the mmu_psize_defs[] array looking for all the
+ * supported base/actual page size combinations. Each combination
+ * has a unique pagesize encoding (penc) value in the low bits of
+ * the LP field of the HPTE. For actual page sizes less than 1MB,
+ * some of the upper LP bits are used for RPN bits, meaning that
+ * we need to fill in several entries in hpte_page_sizes[].
+ *
+ * In diagrammatic form, with r = RPN bits and z = page size bits:
+ * PTE LP actual page size
+ * rrrr rrrz >=8KB
+ * rrrr rrzz >=16KB
+ * rrrr rzzz >=32KB
+ * rrrr zzzz >=64KB
+ * ...
+ *
+ * The zzzz bits are implementation-specific but are chosen so that
+ * no encoding for a larger page size uses the same value in its
+ * low-order N bits as the encoding for the 2^(12+N) byte page size
+ * (if it exists).
+ */
+static void init_hpte_page_sizes(void)
+{
+ long int ap, bp;
+ long int shift, penc;
+
+ for (bp = 0; bp < MMU_PAGE_COUNT; ++bp) {
+ if (!mmu_psize_defs[bp].shift)
+ continue; /* not a supported page size */
+ for (ap = bp; ap < MMU_PAGE_COUNT; ++ap) {
+ penc = mmu_psize_defs[bp].penc[ap];
+ if (penc == -1)
+ continue;
+ shift = mmu_psize_defs[ap].shift - LP_SHIFT;
+ if (shift <= 0)
+ continue; /* should never happen */
+ /*
+ * For page sizes less than 1MB, this loop
+ * replicates the entry for all possible values
+ * of the rrrr bits.
+ */
+ while (penc < (1 << LP_BITS)) {
+ hpte_page_sizes[penc] = (ap << 4) | bp;
+ penc += 1 << shift;
+ }
+ }
+ }
+}
+
static void __init htab_init_page_sizes(void)
{
+ init_hpte_page_sizes();
+
if (!debug_pagealloc_enabled()) {
/*
* Pick a size for the linear mapping. Currently, we only
@@ -711,6 +766,29 @@ int remove_section_mapping(unsigned long start, unsigned long end)
}
#endif /* CONFIG_MEMORY_HOTPLUG */
+static void update_hid_for_hash(void)
+{
+ unsigned long hid0;
+ unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
+
+ asm volatile("ptesync": : :"memory");
+ /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(0), "i"(0), "i"(2), "r"(0) : "memory");
+ asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
+ /*
+ * now switch the HID
+ */
+ hid0 = mfspr(SPRN_HID0);
+ hid0 &= ~HID0_POWER9_RADIX;
+ mtspr(SPRN_HID0, hid0);
+ asm volatile("isync": : :"memory");
+
+ /* Wait for it to happen */
+ while ((mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
+ cpu_relax();
+}
+
static void __init hash_init_partition_table(phys_addr_t hash_table,
unsigned long htab_size)
{
@@ -737,6 +815,8 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
*/
partition_tb->patb1 = 0;
pr_info("Partition table %p\n", partition_tb);
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ update_hid_for_hash();
/*
* update partition table control register,
* 64 K size.
@@ -1460,6 +1540,29 @@ out_exit:
local_irq_restore(flags);
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static inline void tm_flush_hash_page(int local)
+{
+ /*
+ * Transactions are not aborted by tlbiel, only tlbie. Without, syncing a
+ * page back to a block device w/PIO could pick up transactional data
+ * (bad!) so we force an abort here. Before the sync the page will be
+ * made read-only, which will flush_hash_page. BIG ISSUE here: if the
+ * kernel uses a page from userspace without unmapping it first, it may
+ * see the speculated version.
+ */
+ if (local && cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+ MSR_TM_ACTIVE(current->thread.regs->msr)) {
+ tm_enable();
+ tm_abort(TM_CAUSE_TLBI);
+ }
+}
+#else
+static inline void tm_flush_hash_page(int local)
+{
+}
+#endif
+
/* WARNING: This is called from hash_low_64.S, if you change this prototype,
* do not forget to update the assembly call site !
*/
@@ -1486,21 +1589,7 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
ssize, local);
} pte_iterate_hashed_end();
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* Transactions are not aborted by tlbiel, only tlbie.
- * Without, syncing a page back to a block device w/ PIO could pick up
- * transactional data (bad!) so we force an abort here. Before the
- * sync the page will be made read-only, which will flush_hash_page.
- * BIG ISSUE here: if the kernel uses a page from userspace without
- * unmapping it first, it may see the speculated version.
- */
- if (local && cpu_has_feature(CPU_FTR_TM) &&
- current->thread.regs &&
- MSR_TM_ACTIVE(current->thread.regs->msr)) {
- tm_enable();
- tm_abort(TM_CAUSE_TLBI);
- }
-#endif
+ tm_flush_hash_page(local);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -1557,22 +1646,7 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
MMU_PAGE_16M, ssize, local);
}
tm_abort:
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* Transactions are not aborted by tlbiel, only tlbie.
- * Without, syncing a page back to a block device w/ PIO could pick up
- * transactional data (bad!) so we force an abort here. Before the
- * sync the page will be made read-only, which will flush_hash_page.
- * BIG ISSUE here: if the kernel uses a page from userspace without
- * unmapping it first, it may see the speculated version.
- */
- if (local && cpu_has_feature(CPU_FTR_TM) &&
- current->thread.regs &&
- MSR_TM_ACTIVE(current->thread.regs->msr)) {
- tm_enable();
- tm_abort(TM_CAUSE_TLBI);
- }
-#endif
- return;
+ tm_flush_hash_page(local);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7372ee13eb1e..a5d3ecdabc44 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -1019,8 +1019,15 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
pte = READ_ONCE(*ptep);
mask = _PAGE_PRESENT | _PAGE_READ;
+
+ /*
+ * On some CPUs like the 8xx, _PAGE_RW hence _PAGE_WRITE is defined
+ * as 0 and _PAGE_RO has to be set when a page is not writable
+ */
if (write)
mask |= _PAGE_WRITE;
+ else
+ mask |= _PAGE_RO;
if ((pte_val(pte) & mask) != mask)
return 0;
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 448685fbf27c..8a7c38b8d335 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -137,7 +137,7 @@ void __init MMU_init(void)
if (memblock.memory.cnt > 1) {
#ifndef CONFIG_WII
memblock_enforce_memory_limit(memblock.memory.regions[0].size);
- printk(KERN_WARNING "Only using first contiguous memory region");
+ pr_warn("Only using first contiguous memory region\n");
#else
wii_memory_fixups();
#endif
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index da6a2168ae9e..e0f1c33601dd 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -15,6 +15,9 @@
#include <linux/rculist.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
+#include <linux/migrate.h>
+#include <linux/hugetlb.h>
+#include <linux/swap.h>
#include <asm/mmu_context.h>
static DEFINE_MUTEX(mem_list_mutex);
@@ -72,6 +75,55 @@ bool mm_iommu_preregistered(void)
}
EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
+/*
+ * Taken from alloc_migrate_target with changes to remove CMA allocations
+ */
+struct page *new_iommu_non_cma_page(struct page *page, unsigned long private,
+ int **resultp)
+{
+ gfp_t gfp_mask = GFP_USER;
+ struct page *new_page;
+
+ if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
+ return NULL;
+
+ if (PageHighMem(page))
+ gfp_mask |= __GFP_HIGHMEM;
+
+ /*
+ * We don't want the allocation to force an OOM if possibe
+ */
+ new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
+ return new_page;
+}
+
+static int mm_iommu_move_page_from_cma(struct page *page)
+{
+ int ret = 0;
+ LIST_HEAD(cma_migrate_pages);
+
+ /* Ignore huge pages for now */
+ if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
+ return -EBUSY;
+
+ lru_add_drain();
+ ret = isolate_lru_page(page);
+ if (ret)
+ return ret;
+
+ list_add(&page->lru, &cma_migrate_pages);
+ put_page(page); /* Drop the gup reference */
+
+ ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
+ NULL, 0, MIGRATE_SYNC, MR_CMA);
+ if (ret) {
+ if (!list_empty(&cma_migrate_pages))
+ putback_movable_pages(&cma_migrate_pages);
+ }
+
+ return 0;
+}
+
long mm_iommu_get(unsigned long ua, unsigned long entries,
struct mm_iommu_table_group_mem_t **pmem)
{
@@ -124,15 +176,36 @@ long mm_iommu_get(unsigned long ua, unsigned long entries,
for (i = 0; i < entries; ++i) {
if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
1/* pages */, 1/* iswrite */, &page)) {
+ ret = -EFAULT;
for (j = 0; j < i; ++j)
- put_page(pfn_to_page(
- mem->hpas[j] >> PAGE_SHIFT));
+ put_page(pfn_to_page(mem->hpas[j] >>
+ PAGE_SHIFT));
vfree(mem->hpas);
kfree(mem);
- ret = -EFAULT;
goto unlock_exit;
}
-
+ /*
+ * If we get a page from the CMA zone, since we are going to
+ * be pinning these entries, we might as well move them out
+ * of the CMA zone if possible. NOTE: faulting in + migration
+ * can be expensive. Batching can be considered later
+ */
+ if (get_pageblock_migratetype(page) == MIGRATE_CMA) {
+ if (mm_iommu_move_page_from_cma(page))
+ goto populate;
+ if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
+ 1/* pages */, 1/* iswrite */,
+ &page)) {
+ ret = -EFAULT;
+ for (j = 0; j < i; ++j)
+ put_page(pfn_to_page(mem->hpas[j] >>
+ PAGE_SHIFT));
+ vfree(mem->hpas);
+ kfree(mem);
+ goto unlock_exit;
+ }
+ }
+populate:
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
}
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 34079302cc17..f4f437cbabf1 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -35,7 +35,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
#endif
changed = !pmd_same(*(pmdp), entry);
if (changed) {
- __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
+ __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), pmd_pte(entry));
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;
@@ -116,3 +116,12 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
return;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/* For use by kexec */
+void mmu_cleanup_all(void)
+{
+ if (radix_enabled())
+ radix__mmu_cleanup_all();
+ else if (mmu_hash_ops.hpte_clear_all)
+ mmu_hash_ops.hpte_clear_all();
+}
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index af897d91d09f..ed7bddc456b7 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -294,6 +294,32 @@ found:
return;
}
+static void update_hid_for_radix(void)
+{
+ unsigned long hid0;
+ unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
+
+ asm volatile("ptesync": : :"memory");
+ /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
+ /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
+ asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
+ /*
+ * now switch the HID
+ */
+ hid0 = mfspr(SPRN_HID0);
+ hid0 |= HID0_POWER9_RADIX;
+ mtspr(SPRN_HID0, hid0);
+ asm volatile("isync": : :"memory");
+
+ /* Wait for it to happen */
+ while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
+ cpu_relax();
+}
+
void __init radix__early_init_mmu(void)
{
unsigned long lpcr;
@@ -345,6 +371,8 @@ void __init radix__early_init_mmu(void)
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
radix_init_native();
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ update_hid_for_radix();
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
radix_init_partition_table();
@@ -368,6 +396,18 @@ void radix__early_init_mmu_secondary(void)
}
}
+void radix__mmu_cleanup_all(void)
+{
+ unsigned long lpcr;
+
+ if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+ lpcr = mfspr(SPRN_LPCR);
+ mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
+ mtspr(SPRN_PTCR, 0);
+ radix__flush_tlb_all();
+ }
+}
+
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 0b6fb244d0a1..911fdfb63ec1 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -224,7 +224,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
if (changed) {
if (!is_vm_hugetlb_page(vma))
assert_pte_locked(vma->vm_mm, address);
- __ptep_set_access_flags(ptep, entry);
+ __ptep_set_access_flags(vma->vm_mm, ptep, entry);
flush_tlb_page(vma, address);
}
return changed;
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 9f1983404e1a..e2974fcd20f1 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -178,11 +178,9 @@ BEGIN_FTR_SECTION
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
b slb_finish_load
-8: /* invalid EA */
- li r10,0 /* BAD_VSID */
- li r9,0 /* BAD_VSID */
- li r11,SLB_VSID_USER /* flags don't much matter */
- b slb_finish_load
+8: /* invalid EA - return an error indication */
+ crset 4*cr0+eq /* indicate failure */
+ blr
/*
* Finish loading of an SLB entry and return
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 48df05ef5231..0e49ec541ab5 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -400,3 +400,27 @@ void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
}
EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
+
+void radix__flush_tlb_all(void)
+{
+ unsigned long rb,prs,r,rs;
+ unsigned long ric = RIC_FLUSH_ALL;
+
+ rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
+ prs = 0; /* partition scoped */
+ r = 1; /* raidx format */
+ rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
+
+ asm volatile("ptesync": : :"memory");
+ /*
+ * now flush guest entries by passing PRS = 1 and LPID != 0
+ */
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
+ /*
+ * now flush host entires by passing PRS = 0 and LPID == 0
+ */
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
+ asm volatile("eieio; tlbsync; ptesync": : :"memory");
+}
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index d5301b6f20d0..89f70073dec8 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -40,6 +40,8 @@
#define PPC_BLR() EMIT(PPC_INST_BLR)
#define PPC_BLRL() EMIT(PPC_INST_BLRL)
#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | ___PPC_RT(r))
+#define PPC_BCTR() EMIT(PPC_INST_BCTR)
+#define PPC_MTCTR(r) EMIT(PPC_INST_MTCTR | ___PPC_RT(r))
#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | ___PPC_RT(d) | \
___PPC_RA(a) | IMM_L(i))
#define PPC_MR(d, a) PPC_OR(d, a, a)
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
index 5046d6f65c02..62fa7589db2b 100644
--- a/arch/powerpc/net/bpf_jit64.h
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -16,30 +16,33 @@
/*
* Stack layout:
+ * Ensure the top half (upto local_tmp_var) stays consistent
+ * with our redzone usage.
*
* [ prev sp ] <-------------
* [ nv gpr save area ] 8*8 |
+ * [ tail_call_cnt ] 8 |
+ * [ local_tmp_var ] 8 |
* fp (r31) --> [ ebpf stack space ] 512 |
- * [ local/tmp var space ] 16 |
* [ frame header ] 32/112 |
* sp (r1) ---> [ stack pointer ] --------------
*/
-/* for bpf JIT code internal usage */
-#define BPF_PPC_STACK_LOCALS 16
/* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
#define BPF_PPC_STACK_SAVE (8*8)
+/* for bpf JIT code internal usage */
+#define BPF_PPC_STACK_LOCALS 16
/* Ensure this is quadword aligned */
-#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS + \
- MAX_BPF_STACK + BPF_PPC_STACK_SAVE)
+#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + MAX_BPF_STACK + \
+ BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
#ifndef __ASSEMBLY__
/* BPF register usage */
-#define SKB_HLEN_REG (MAX_BPF_REG + 0)
-#define SKB_DATA_REG (MAX_BPF_REG + 1)
-#define TMP_REG_1 (MAX_BPF_REG + 2)
-#define TMP_REG_2 (MAX_BPF_REG + 3)
+#define SKB_HLEN_REG (MAX_BPF_JIT_REG + 0)
+#define SKB_DATA_REG (MAX_BPF_JIT_REG + 1)
+#define TMP_REG_1 (MAX_BPF_JIT_REG + 2)
+#define TMP_REG_2 (MAX_BPF_JIT_REG + 3)
/* BPF to ppc register mappings */
static const int b2p[] = {
@@ -59,12 +62,16 @@ static const int b2p[] = {
/* frame pointer aka BPF_REG_10 */
[BPF_REG_FP] = 31,
/* eBPF jit internal registers */
+ [BPF_REG_AX] = 2,
[SKB_HLEN_REG] = 25,
[SKB_DATA_REG] = 26,
[TMP_REG_1] = 9,
[TMP_REG_2] = 10
};
+/* PPC NVR range -- update this if we ever use NVRs below r24 */
+#define BPF_PPC_NVR_MIN 24
+
/* Assembly helpers */
#define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \
u64 func##_negative_offset(u64 r3, u64 r4); \
@@ -82,6 +89,7 @@ DECLARE_LOAD_FUNC(sk_load_byte);
#define SEEN_FUNC 0x1000 /* might call external helpers */
#define SEEN_STACK 0x2000 /* uses BPF stack */
#define SEEN_SKB 0x4000 /* uses sk_buff */
+#define SEEN_TAILCALL 0x8000 /* uses tail calls */
struct codegen_context {
/*
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 6073b78516f6..0fe98a567125 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -17,6 +17,7 @@
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <asm/kprobes.h>
+#include <linux/bpf.h>
#include "bpf_jit64.h"
@@ -58,6 +59,40 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
}
+/*
+ * When not setting up our own stackframe, the redzone usage is:
+ *
+ * [ prev sp ] <-------------
+ * [ ... ] |
+ * sp (r1) ---> [ stack pointer ] --------------
+ * [ nv gpr save area ] 8*8
+ * [ tail_call_cnt ] 8
+ * [ local_tmp_var ] 8
+ * [ unused red zone ] 208 bytes protected
+ */
+static int bpf_jit_stack_local(struct codegen_context *ctx)
+{
+ if (bpf_has_stack_frame(ctx))
+ return STACK_FRAME_MIN_SIZE + MAX_BPF_STACK;
+ else
+ return -(BPF_PPC_STACK_SAVE + 16);
+}
+
+static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
+{
+ return bpf_jit_stack_local(ctx) + 8;
+}
+
+static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
+{
+ if (reg >= BPF_PPC_NVR_MIN && reg < 32)
+ return (bpf_has_stack_frame(ctx) ? BPF_PPC_STACKFRAME : 0)
+ - (8 * (32 - reg));
+
+ pr_err("BPF JIT is asking about unknown registers");
+ BUG();
+}
+
static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
{
/*
@@ -73,36 +108,27 @@ static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
}
-static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
+static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{
-#ifdef PPC64_ELF_ABI_v1
- /* func points to the function descriptor */
- PPC_LI64(b2p[TMP_REG_2], func);
- /* Load actual entry point from function descriptor */
- PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
- /* ... and move it to LR */
- PPC_MTLR(b2p[TMP_REG_1]);
+ int i;
+
/*
- * Load TOC from function descriptor at offset 8.
- * We can clobber r2 since we get called through a
- * function pointer (so caller will save/restore r2)
- * and since we don't use a TOC ourself.
+ * Initialize tail_call_cnt if we do tail calls.
+ * Otherwise, put in NOPs so that it can be skipped when we are
+ * invoked through a tail call.
*/
- PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
-#else
- /* We can clobber r12 */
- PPC_FUNC_ADDR(12, func);
- PPC_MTLR(12);
-#endif
- PPC_BLRL();
-}
+ if (ctx->seen & SEEN_TAILCALL) {
+ PPC_LI(b2p[TMP_REG_1], 0);
+ /* this goes in the redzone */
+ PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
+ } else {
+ PPC_NOP();
+ PPC_NOP();
+ }
-static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
-{
- int i;
- bool new_stack_frame = bpf_has_stack_frame(ctx);
+#define BPF_TAILCALL_PROLOGUE_SIZE 8
- if (new_stack_frame) {
+ if (bpf_has_stack_frame(ctx)) {
/*
* We need a stack frame, but we don't necessarily need to
* save/restore LR unless we call other functions
@@ -122,9 +148,7 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
*/
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
if (bpf_is_seen_register(ctx, i))
- PPC_BPF_STL(b2p[i], 1,
- (new_stack_frame ? BPF_PPC_STACKFRAME : 0) -
- (8 * (32 - b2p[i])));
+ PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
/*
* Save additional non-volatile regs if we cache skb
@@ -132,53 +156,142 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
*/
if (ctx->seen & SEEN_SKB) {
PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
- BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_HLEN_REG])));
+ bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
- BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_DATA_REG])));
+ bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
bpf_jit_emit_skb_loads(image, ctx);
}
/* Setup frame pointer to point to the bpf stack area */
if (bpf_is_seen_register(ctx, BPF_REG_FP))
PPC_ADDI(b2p[BPF_REG_FP], 1,
- BPF_PPC_STACKFRAME - BPF_PPC_STACK_SAVE);
+ STACK_FRAME_MIN_SIZE + MAX_BPF_STACK);
}
-static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
{
int i;
- bool new_stack_frame = bpf_has_stack_frame(ctx);
-
- /* Move result to r3 */
- PPC_MR(3, b2p[BPF_REG_0]);
/* Restore NVRs */
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
if (bpf_is_seen_register(ctx, i))
- PPC_BPF_LL(b2p[i], 1,
- (new_stack_frame ? BPF_PPC_STACKFRAME : 0) -
- (8 * (32 - b2p[i])));
+ PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
/* Restore non-volatile registers used for skb cache */
if (ctx->seen & SEEN_SKB) {
PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
- BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_HLEN_REG])));
+ bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
- BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_DATA_REG])));
+ bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
}
/* Tear down our stack frame */
- if (new_stack_frame) {
+ if (bpf_has_stack_frame(ctx)) {
PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
if (ctx->seen & SEEN_FUNC) {
PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
PPC_MTLR(0);
}
}
+}
+
+static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+{
+ bpf_jit_emit_common_epilogue(image, ctx);
+
+ /* Move result to r3 */
+ PPC_MR(3, b2p[BPF_REG_0]);
PPC_BLR();
}
+static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
+{
+#ifdef PPC64_ELF_ABI_v1
+ /* func points to the function descriptor */
+ PPC_LI64(b2p[TMP_REG_2], func);
+ /* Load actual entry point from function descriptor */
+ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
+ /* ... and move it to LR */
+ PPC_MTLR(b2p[TMP_REG_1]);
+ /*
+ * Load TOC from function descriptor at offset 8.
+ * We can clobber r2 since we get called through a
+ * function pointer (so caller will save/restore r2)
+ * and since we don't use a TOC ourself.
+ */
+ PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
+#else
+ /* We can clobber r12 */
+ PPC_FUNC_ADDR(12, func);
+ PPC_MTLR(12);
+#endif
+ PPC_BLRL();
+}
+
+static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+{
+ /*
+ * By now, the eBPF program has already setup parameters in r3, r4 and r5
+ * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
+ * r4/BPF_REG_2 - pointer to bpf_array
+ * r5/BPF_REG_3 - index in bpf_array
+ */
+ int b2p_bpf_array = b2p[BPF_REG_2];
+ int b2p_index = b2p[BPF_REG_3];
+
+ /*
+ * if (index >= array->map.max_entries)
+ * goto out;
+ */
+ PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
+ PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
+ PPC_BCC(COND_GE, out);
+
+ /*
+ * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
+ * goto out;
+ */
+ PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
+ PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
+ PPC_BCC(COND_GT, out);
+
+ /*
+ * tail_call_cnt++;
+ */
+ PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
+ PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
+
+ /* prog = array->ptrs[index]; */
+ PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
+ PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
+ PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
+
+ /*
+ * if (prog == NULL)
+ * goto out;
+ */
+ PPC_CMPLDI(b2p[TMP_REG_1], 0);
+ PPC_BCC(COND_EQ, out);
+
+ /* goto *(prog->bpf_func + prologue_size); */
+ PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
+#ifdef PPC64_ELF_ABI_v1
+ /* skip past the function descriptor */
+ PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
+ FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
+#else
+ PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
+#endif
+ PPC_MTCTR(b2p[TMP_REG_1]);
+
+ /* tear down stack, restore NVRs, ... */
+ bpf_jit_emit_common_epilogue(image, ctx);
+
+ PPC_BCTR();
+ /* out: */
+}
+
/* Assemble the body code between the prologue & epilogue */
static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
struct codegen_context *ctx,
@@ -200,7 +313,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u64 imm64;
u8 *func;
u32 true_cond;
- int stack_local_off;
/*
* addrs[] maps a BPF bytecode address into a real offset from
@@ -219,9 +331,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
* optimization but everything else should work without
* any issues.
*/
- if (dst_reg >= 24 && dst_reg <= 31)
+ if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
bpf_set_seen_register(ctx, insn[i].dst_reg);
- if (src_reg >= 24 && src_reg <= 31)
+ if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
bpf_set_seen_register(ctx, insn[i].src_reg);
switch (code) {
@@ -490,25 +602,12 @@ bpf_alu32_trunc:
* Way easier and faster(?) to store the value
* into stack and then use ldbrx
*
- * First, determine where in stack we can store
- * this:
- * - if we have allotted a stack frame, then we
- * will utilize the area set aside by
- * BPF_PPC_STACK_LOCALS
- * - else, we use the area beneath the NV GPR
- * save area
- *
* ctx->seen will be reliable in pass2, but
* the instructions generated will remain the
* same across all passes
*/
- if (bpf_has_stack_frame(ctx))
- stack_local_off = STACK_FRAME_MIN_SIZE;
- else
- stack_local_off = -(BPF_PPC_STACK_SAVE + 8);
-
- PPC_STD(dst_reg, 1, stack_local_off);
- PPC_ADDI(b2p[TMP_REG_1], 1, stack_local_off);
+ PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
+ PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
break;
}
@@ -668,7 +767,7 @@ emit_clear:
/* Save skb pointer if we need to re-cache skb data */
if (bpf_helper_changes_skb_data(func))
- PPC_BPF_STL(3, 1, STACK_FRAME_MIN_SIZE);
+ PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
bpf_jit_emit_func_call(image, ctx, (u64)func);
@@ -678,7 +777,7 @@ emit_clear:
/* refresh skb cache */
if (bpf_helper_changes_skb_data(func)) {
/* reload skb pointer to r3 */
- PPC_BPF_LL(3, 1, STACK_FRAME_MIN_SIZE);
+ PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
bpf_jit_emit_skb_loads(image, ctx);
}
break;
@@ -837,9 +936,12 @@ common_load:
break;
/*
- * TODO: Tail call
+ * Tail call
*/
case BPF_JMP | BPF_CALL | BPF_X:
+ ctx->seen |= SEEN_TAILCALL;
+ bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ break;
default:
/*
@@ -872,21 +974,37 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
int pass;
int flen;
struct bpf_binary_header *bpf_hdr;
+ struct bpf_prog *org_fp = fp;
+ struct bpf_prog *tmp_fp;
+ bool bpf_blinded = false;
if (!bpf_jit_enable)
- return fp;
+ return org_fp;
+
+ tmp_fp = bpf_jit_blind_constants(org_fp);
+ if (IS_ERR(tmp_fp))
+ return org_fp;
+
+ if (tmp_fp != org_fp) {
+ bpf_blinded = true;
+ fp = tmp_fp;
+ }
flen = fp->len;
addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
- if (addrs == NULL)
- return fp;
+ if (addrs == NULL) {
+ fp = org_fp;
+ goto out;
+ }
+
+ memset(&cgctx, 0, sizeof(struct codegen_context));
- cgctx.idx = 0;
- cgctx.seen = 0;
/* Scouting faux-generate pass 0 */
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
/* We hit something illegal or unsupported. */
+ fp = org_fp;
goto out;
+ }
/*
* Pretend to build prologue, given the features we've seen. This will
@@ -901,8 +1019,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
bpf_jit_fill_ill_insns);
- if (!bpf_hdr)
+ if (!bpf_hdr) {
+ fp = org_fp;
goto out;
+ }
code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
@@ -939,6 +1059,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
out:
kfree(addrs);
+
+ if (bpf_blinded)
+ bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
+
return fp;
}
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index b129d007e7fe..b19265de9178 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -43,7 +43,7 @@ static unsigned int profiling_interval;
#define SPU_PC_MASK 0xFFFF
DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
-unsigned long oprof_spu_smpl_arry_lck_flags;
+static unsigned long oprof_spu_smpl_arry_lck_flags;
void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
{
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index ef2142ff7dbd..83d2b4ef7f0d 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -36,7 +36,7 @@
static DEFINE_SPINLOCK(buffer_lock);
static DEFINE_SPINLOCK(cache_lock);
static int num_spu_nodes;
-int spu_prof_num_nodes;
+static int spu_prof_num_nodes;
struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
struct delayed_work spu_work;
@@ -88,7 +88,7 @@ static void spu_buff_add(unsigned long int value, int spu)
/* This function copies the per SPU buffers to the
* OProfile kernel buffer.
*/
-void sync_spu_buff(void)
+static void sync_spu_buff(void)
{
int spu;
unsigned long flags;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 4ed377f0f7b2..72c27b8d2cf3 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2158,7 +2158,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
irq_exit();
}
-int power_pmu_prepare_cpu(unsigned int cpu)
+static int power_pmu_prepare_cpu(unsigned int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 7aa37236bb70..43fabb3cae0f 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -131,7 +131,7 @@ static const struct attribute_group *attr_groups[] = {
#define HGPCI_MAX_DATA_BYTES \
(HGPCI_REQ_BUFFER_SIZE - sizeof(struct hv_get_perf_counter_info_params))
-DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t));
+static DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t));
struct hv_gpci_request_buffer {
struct hv_get_perf_counter_info_params params;
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index a383c23a9070..7963658dbc22 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -416,7 +416,7 @@ static struct attribute *power7_pmu_format_attr[] = {
NULL,
};
-struct attribute_group power7_pmu_format_group = {
+static struct attribute_group power7_pmu_format_group = {
.name = "format",
.attrs = power7_pmu_format_attr,
};
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 5fde2b192fec..ab830d106ec5 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -204,7 +204,7 @@ static struct attribute *power8_pmu_format_attr[] = {
NULL,
};
-struct attribute_group power8_pmu_format_group = {
+static struct attribute_group power8_pmu_format_group = {
.name = "format",
.attrs = power8_pmu_format_attr,
};
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 788346303852..8e9a81967ff8 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -119,7 +119,7 @@ static struct attribute *power9_pmu_format_attr[] = {
NULL,
};
-struct attribute_group power9_pmu_format_group = {
+static struct attribute_group power9_pmu_format_group = {
.name = "format",
.attrs = power9_pmu_format_attr,
};
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 5ecce543103e..a886c2c22097 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -204,7 +204,7 @@ static void pika_setup_critical_temp(struct device_node *np,
i2c_smbus_write_byte_data(client, 3, 0); /* Tlow */
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
printk(KERN_ERR __FILE__ ": Unable to get ad7414 irq\n");
return;
}
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index 0035d146df73..fe4d4eac7427 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -97,7 +97,7 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
status |= (ignore | mask);
if (status == 0xff)
- return NO_IRQ;
+ return 0;
cpld_irq = ffz(status) + offset;
@@ -110,14 +110,14 @@ static void cpld_pic_cascade(struct irq_desc *desc)
irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
&cpld_regs->pci_mask);
- if (irq != NO_IRQ) {
+ if (irq) {
generic_handle_irq(irq);
return;
}
irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status,
&cpld_regs->misc_mask);
- if (irq != NO_IRQ) {
+ if (irq) {
generic_handle_irq(irq);
return;
}
@@ -177,7 +177,7 @@ mpc5121_ads_cpld_pic_init(void)
goto end;
cascade_irq = irq_of_parse_and_map(np, 0);
- if (cascade_irq == NO_IRQ)
+ if (!cascade_irq)
goto end;
/*
diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
index d93dd4acf40b..cec3f88f153d 100644
--- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
+++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
@@ -473,7 +473,7 @@ static int mpc512x_lpbfifo_probe(struct platform_device *pdev)
}
lpbfifo.irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (lpbfifo.irq == NO_IRQ) {
+ if (!lpbfifo.irq) {
dev_err(&pdev->dev, "mapping irq failed\n");
ret = -ENODEV;
goto err0;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 4fe2074c88cb..fc98912f42cf 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -511,7 +511,7 @@ unsigned int mpc52xx_get_irq(void)
irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET);
}
} else {
- return NO_IRQ;
+ return 0;
}
return irq_linear_revmap(mpc52xx_irqhost, irq);
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 60e89fc9c753..8b065bdf7412 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -131,7 +131,7 @@ int __init pq2ads_pci_init_irq(void)
}
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
printk(KERN_ERR "No interrupt in pci pic node.\n");
of_node_put(np);
goto out;
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 2ef03e7d248c..0d6a62fc5864 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -89,7 +89,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
goto err;
ret = of_irq_to_resource(np, 0, &res[1]);
- if (ret == NO_IRQ)
+ if (!ret)
goto err;
pdev = platform_device_alloc("mpc83xx_spi", i);
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index fcbea4b51a78..24717d060008 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -352,7 +352,7 @@ static int pmc_probe(struct platform_device *ofdev)
return -ENODEV;
pmc_irq = irq_of_parse_and_map(np, 0);
- if (pmc_irq != NO_IRQ) {
+ if (pmc_irq) {
ret = request_irq(pmc_irq, pmc_irq_handler, IRQF_SHARED,
"pmc", ofdev);
@@ -400,7 +400,7 @@ out_syscr:
out_pmc:
iounmap(pmc_regs);
out:
- if (pmc_irq != NO_IRQ)
+ if (pmc_irq)
free_irq(pmc_irq, ofdev);
return ret;
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 28720a4ded7b..954e5e8b14ef 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -76,7 +76,7 @@ void __init mpc85xx_cpm2_pic_init(void)
return;
}
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
of_node_put(np);
printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n");
return;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 62f171c71c4c..86f20156178e 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -196,7 +196,7 @@ static void mpc85xx_8259_cascade_handler(struct irq_desc *desc)
{
unsigned int cascade_irq = i8259_irq();
- if (cascade_irq != NO_IRQ)
+ if (cascade_irq)
/* handle an interrupt from the 8259 */
generic_handle_irq(cascade_irq);
@@ -247,7 +247,7 @@ static int mpc85xx_cds_8259_attach(void)
}
cascade_irq = irq_of_parse_and_map(cascade_node, 0);
- if (cascade_irq == NO_IRQ) {
+ if (!cascade_irq) {
printk(KERN_ERR "Failed to map cascade interrupt\n");
return -ENXIO;
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 6bc07d837b1c..ed69c7ee1829 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -51,7 +51,7 @@ static void mpc85xx_8259_cascade(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
- if (cascade_irq != NO_IRQ) {
+ if (cascade_irq) {
generic_handle_irq(cascade_irq);
}
chip->irq_eoi(&desc->irq_data);
@@ -96,7 +96,7 @@ void __init mpc85xx_ds_pic_init(void)
}
cascade_irq = irq_of_parse_and_map(cascade_node, 0);
- if (cascade_irq == NO_IRQ) {
+ if (!cascade_irq) {
printk(KERN_ERR "Failed to map cascade interrupt\n");
return;
}
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index b02d6a5bb035..82f8490b5aa7 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -78,7 +78,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq)
break;
}
if (i == 3)
- return NO_IRQ;
+ return 0;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
cause = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(i));
@@ -103,7 +103,7 @@ static void socrates_fpga_pic_cascade(struct irq_desc *desc)
*/
cascade_irq = socrates_fpga_pic_get_irq(irq);
- if (cascade_irq != NO_IRQ)
+ if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
@@ -292,7 +292,7 @@ void socrates_fpga_pic_init(struct device_node *pic)
for (i = 0; i < 3; i++) {
socrates_fpga_irqs[i] = irq_of_parse_and_map(pic, i);
- if (socrates_fpga_irqs[i] == NO_IRQ) {
+ if (!socrates_fpga_irqs[i]) {
pr_warning("FPGA PIC: can't get irq%d.\n", i);
continue;
}
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index 845defa1fd19..a6c695fa4da0 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -22,7 +22,7 @@ static void mpc86xx_8259_cascade(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
- if (cascade_irq != NO_IRQ)
+ if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
@@ -58,7 +58,7 @@ void __init mpc86xx_init_irq(void)
}
cascade_irq = irq_of_parse_and_map(cascade_node, 0);
- if (cascade_irq == NO_IRQ) {
+ if (!cascade_irq) {
printk(KERN_ERR "Failed to map cascade interrupt\n");
return;
}
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index b1ab6e96cb31..f81069f79a94 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -241,6 +241,6 @@ void __init mpc8xx_pics_init(void)
}
irq = cpm_pic_init();
- if (irq != NO_IRQ)
+ if (irq)
irq_set_chained_handler(irq, cpm_cascade);
}
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index f32edec13fd1..ca2da30ad2ab 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -74,6 +74,7 @@ config PPC_BOOK3S_64
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_NUMA_BALANCING
select IRQ_WORK
+ select HAVE_KERNEL_XZ
config PPC_BOOK3E_64
bool "Embedded processors"
@@ -86,6 +87,7 @@ endchoice
choice
prompt "CPU selection"
depends on PPC64
+ default POWER8_CPU if CPU_LITTLE_ENDIAN
default GENERIC_CPU
help
This will create a kernel which is optimised for a particular CPU.
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index aed7714495c1..8b55c5f19d4c 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -271,7 +271,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
for_each_pci_msi_entry(entry, dev) {
virq = irq_create_direct_mapping(msic->irq_domain);
- if (virq == NO_IRQ) {
+ if (!virq) {
dev_warn(&dev->dev,
"axon_msi: virq allocation failed!\n");
return -1;
@@ -293,7 +293,7 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
for_each_pci_msi_entry(entry, dev) {
- if (entry->irq == NO_IRQ)
+ if (!entry->irq)
continue;
irq_set_msi_desc(entry->irq, NULL);
@@ -375,7 +375,7 @@ static int axon_msi_probe(struct platform_device *device)
}
virq = irq_of_parse_and_map(dn, 0);
- if (virq == NO_IRQ) {
+ if (!virq) {
printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
dn->full_name);
goto out_free_fifo;
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 1428d583c238..b926438d73af 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -189,7 +189,7 @@ static struct device_node *cbe_get_be_node(int cpu_id)
return NULL;
}
-void __init cbe_fill_regs_map(struct cbe_regs_map *map)
+static void __init cbe_fill_regs_map(struct cbe_regs_map *map)
{
if(map->be_node) {
struct device_node *be, *np;
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 9f609fc8d331..a6bbbaba14a3 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -123,7 +123,7 @@ static void iic_ioexc_cascade(struct irq_desc *desc)
unsigned int cirq =
irq_linear_revmap(iic_host,
base | cascade);
- if (cirq != NO_IRQ)
+ if (cirq)
generic_handle_irq(cirq);
}
/* post-ack level interrupts */
@@ -153,10 +153,10 @@ static unsigned int iic_get_irq(void)
*(unsigned long *) &pending =
in_be64((u64 __iomem *) &iic->regs->pending_destr);
if (!(pending.flags & CBE_IIC_IRQ_VALID))
- return NO_IRQ;
+ return 0;
virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
- if (virq == NO_IRQ)
- return NO_IRQ;
+ if (!virq)
+ return 0;
iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
BUG_ON(iic->eoi_ptr > 15);
return virq;
@@ -187,18 +187,12 @@ void iic_message_pass(int cpu, int msg)
out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
}
-struct irq_domain *iic_get_irq_host(int node)
-{
- return iic_host;
-}
-EXPORT_SYMBOL_GPL(iic_get_irq_host);
-
static void iic_request_ipi(int msg)
{
int virq;
virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg));
- if (virq == NO_IRQ) {
+ if (!virq) {
printk(KERN_ERR
"iic: failed to map IPI %s\n", smp_ipi_name[msg]);
return;
@@ -353,7 +347,7 @@ static int __init setup_iic(void)
cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
cascade |= IIC_UNIT_IIC;
cascade = irq_create_mapping(iic_host, cascade);
- if (cascade == NO_IRQ)
+ if (!cascade)
continue;
/*
* irq_data is a generic pointer that gets passed back
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index f7d1a4953ea0..7ff51f96a00e 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -411,7 +411,7 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
virq = irq_create_mapping(NULL,
IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
- BUG_ON(virq == NO_IRQ);
+ BUG_ON(!virq);
ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
BUG_ON(ret);
@@ -651,7 +651,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
-struct dma_map_ops dma_iommu_fixed_ops = {
+static struct dma_map_ops dma_iommu_fixed_ops = {
.alloc = dma_fixed_alloc_coherent,
.free = dma_fixed_free_coherent,
.map_sg = dma_fixed_map_sg,
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 348a27b12512..e3ad0c38f017 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -385,7 +385,7 @@ static int __init cbe_init_pm_irq(void)
for_each_online_node(node) {
irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
(node << IIC_IRQ_NODE_SHIFT));
- if (irq == NO_IRQ) {
+ if (!irq) {
printk("ERROR: Unable to allocate irq for node %d\n",
node);
return -EINVAL;
@@ -412,7 +412,7 @@ void cbe_sync_irq(int node)
IIC_IRQ_IOEX_PMI
| (node << IIC_IRQ_NODE_SHIFT));
- if (irq == NO_IRQ) {
+ if (!irq) {
printk(KERN_WARNING "ERROR, unable to get existing irq %d " \
"for node %d\n", irq, node);
return;
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 2d4f60c0119a..460ab392f0e7 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -298,7 +298,7 @@ int cbe_sysreset_hack(void)
}
#endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */
-int __init cbe_ptcal_init(void)
+static int __init cbe_ptcal_init(void)
{
int ret;
ptcal_start_tok = rtas_token("ibm,cbe-start-ptcal");
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index d06dcac66fcb..ff924af00e78 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -207,11 +207,11 @@ static void spider_irq_cascade(struct irq_desc *desc)
cs = in_be32(pic->regs + TIR_CS) >> 24;
if (cs == SPIDER_IRQ_INVALID)
- virq = NO_IRQ;
+ virq = 0;
else
virq = irq_linear_revmap(pic->host, cs);
- if (virq != NO_IRQ)
+ if (virq)
generic_handle_irq(virq);
chip->irq_eoi(&desc->irq_data);
@@ -245,19 +245,19 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
/* Now do the horrible hacks */
tmp = of_get_property(of_node, "#interrupt-cells", NULL);
if (tmp == NULL)
- return NO_IRQ;
+ return 0;
intsize = *tmp;
imap = of_get_property(of_node, "interrupt-map", &imaplen);
if (imap == NULL || imaplen < (intsize + 1))
- return NO_IRQ;
+ return 0;
iic = of_find_node_by_phandle(imap[intsize]);
if (iic == NULL)
- return NO_IRQ;
+ return 0;
imap += intsize + 1;
tmp = of_get_property(iic, "#interrupt-cells", NULL);
if (tmp == NULL) {
of_node_put(iic);
- return NO_IRQ;
+ return 0;
}
intsize = *tmp;
/* Assume unit is last entry of interrupt specifier */
@@ -266,7 +266,7 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
tmp = of_get_property(iic, "ibm,interrupt-server-ranges", NULL);
if (tmp == NULL) {
of_node_put(iic);
- return NO_IRQ;
+ return 0;
}
/* ugly as hell but works for now */
pic->node_id = (*tmp) >> 1;
@@ -281,7 +281,7 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
(pic->node_id << IIC_IRQ_NODE_SHIFT) |
(2 << IIC_IRQ_CLASS_SHIFT) |
unit);
- if (virq == NO_IRQ)
+ if (!virq)
printk(KERN_ERR "spider_pic: failed to map cascade !");
return virq;
}
@@ -318,7 +318,7 @@ static void __init spider_init_one(struct device_node *of_node, int chip,
/* Hook up the cascade interrupt to the iic and nodeid */
virq = spider_find_cascade_and_node(pic);
- if (virq == NO_IRQ)
+ if (!virq)
return;
irq_set_handler_data(virq, pic);
irq_set_chained_handler(virq, spider_irq_cascade);
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index bb4a8e07c229..e84d8fbc2e21 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -402,7 +402,7 @@ static int spu_request_irqs(struct spu *spu)
{
int ret = 0;
- if (spu->irqs[0] != NO_IRQ) {
+ if (spu->irqs[0]) {
snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
spu->number);
ret = request_irq(spu->irqs[0], spu_irq_class_0,
@@ -410,7 +410,7 @@ static int spu_request_irqs(struct spu *spu)
if (ret)
goto bail0;
}
- if (spu->irqs[1] != NO_IRQ) {
+ if (spu->irqs[1]) {
snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
spu->number);
ret = request_irq(spu->irqs[1], spu_irq_class_1,
@@ -418,7 +418,7 @@ static int spu_request_irqs(struct spu *spu)
if (ret)
goto bail1;
}
- if (spu->irqs[2] != NO_IRQ) {
+ if (spu->irqs[2]) {
snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
spu->number);
ret = request_irq(spu->irqs[2], spu_irq_class_2,
@@ -429,10 +429,10 @@ static int spu_request_irqs(struct spu *spu)
return 0;
bail2:
- if (spu->irqs[1] != NO_IRQ)
+ if (spu->irqs[1])
free_irq(spu->irqs[1], spu);
bail1:
- if (spu->irqs[0] != NO_IRQ)
+ if (spu->irqs[0])
free_irq(spu->irqs[0], spu);
bail0:
return ret;
@@ -440,11 +440,11 @@ bail0:
static void spu_free_irqs(struct spu *spu)
{
- if (spu->irqs[0] != NO_IRQ)
+ if (spu->irqs[0])
free_irq(spu->irqs[0], spu);
- if (spu->irqs[1] != NO_IRQ)
+ if (spu->irqs[1])
free_irq(spu->irqs[1], spu);
- if (spu->irqs[2] != NO_IRQ)
+ if (spu->irqs[2])
free_irq(spu->irqs[2], spu);
}
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 21b4bfb97200..672d310dcf14 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -105,7 +105,10 @@ static int __init spu_map_interrupts_old(struct spu *spu,
spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
/* Right now, we only fail if class 2 failed */
- return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
+ if (!spu->irqs[2])
+ return -EINVAL;
+
+ return 0;
}
static void __iomem * __init spu_map_prop_old(struct spu *spu,
@@ -191,7 +194,7 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
pr_debug(" irq %d no 0x%x on %s\n", i, oirq.args[0],
oirq.np->full_name);
spu->irqs[i] = irq_create_of_mapping(&oirq);
- if (spu->irqs[i] == NO_IRQ) {
+ if (!spu->irqs[i]) {
pr_debug("spu_new: failed to map it !\n");
goto err;
}
@@ -202,7 +205,7 @@ err:
pr_debug("failed to map irq %x for spu %s\n", *oirq.args,
spu->name);
for (; i >= 0; i--) {
- if (spu->irqs[i] != NO_IRQ)
+ if (spu->irqs[i])
irq_dispose_mapping(spu->irqs[i]);
}
return ret;
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index bfb300633dfe..0ce1b45f02a8 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -368,7 +368,7 @@ static void chrp_8259_cascade(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
- if (cascade_irq != NO_IRQ)
+ if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
@@ -514,7 +514,7 @@ static void __init chrp_find_8259(void)
}
if (chrp_mpic != NULL) {
cascade_irq = irq_of_parse_and_map(pic, 0);
- if (cascade_irq == NO_IRQ)
+ if (!cascade_irq)
printk(KERN_ERR "i8259: failed to map cascade irq\n");
else
irq_set_chained_handler(cascade_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index b7866e01483d..ade83829d5e8 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -181,7 +181,7 @@ unsigned int flipper_pic_get_irq(void)
irq_status = in_be32(io_base + FLIPPER_ICR) &
in_be32(io_base + FLIPPER_IMR);
if (irq_status == 0)
- return NO_IRQ; /* no more IRQs pending */
+ return 0; /* no more IRQs pending */
irq = __ffs(irq_status);
return irq_linear_revmap(flipper_irq_host, irq);
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 9b7975706bfc..89c54de88b7a 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -114,7 +114,7 @@ static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
irq_status = in_be32(io_base + HW_BROADWAY_ICR) &
in_be32(io_base + HW_BROADWAY_IMR);
if (irq_status == 0)
- return NO_IRQ; /* no more IRQs pending */
+ return 0; /* no more IRQs pending */
irq = __ffs(irq_status);
return irq_linear_revmap(h, irq);
@@ -131,7 +131,7 @@ static void hlwd_pic_irq_cascade(struct irq_desc *desc)
raw_spin_unlock(&desc->lock);
virq = __hlwd_pic_get_irq(irq_domain);
- if (virq != NO_IRQ)
+ if (virq)
generic_handle_irq(virq);
else
pr_err("spurious interrupt!\n");
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
index ed7321d6772e..8e3590941960 100644
--- a/arch/powerpc/platforms/embedded6xx/mvme5100.c
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -47,7 +47,7 @@ static void mvme5100_8259_cascade(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
- if (cascade_irq != NO_IRQ)
+ if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
@@ -84,7 +84,7 @@ static void __init mvme5100_pic_init(void)
}
cirq = irq_of_parse_and_map(cp, 0);
- if (cirq == NO_IRQ) {
+ if (!cirq) {
pr_warn("mvme5100_pic_init: no cascade interrupt?\n");
return;
}
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index a2f89e6326ce..a0589aac4163 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -552,7 +552,7 @@ void maple_pci_irq_fixup(struct pci_dev *dev)
pci_bus_to_host(dev->bus) == u4_pcie) {
printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
dev->irq = irq_create_mapping(NULL, 1);
- if (dev->irq != NO_IRQ)
+ if (dev->irq)
irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
}
@@ -562,7 +562,7 @@ void maple_pci_irq_fixup(struct pci_dev *dev)
if (dev->vendor == PCI_VENDOR_ID_AMD &&
dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
(dev->class & 5) != 5) {
- dev->irq = NO_IRQ;
+ dev->irq = 0;
}
DBG(" <- maple_pci_irq_fixup\n");
@@ -648,7 +648,7 @@ int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
return defirq;
}
irq = irq_of_parse_and_map(np, channel & 0x1);
- if (irq == NO_IRQ) {
+ if (!irq) {
printk("Failed to map onboard IDE interrupt for channel %d\n",
channel);
return defirq;
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 3c30c7a4534d..b7f937563827 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -156,7 +156,7 @@ static void __noreturn maple_halt(void)
}
#ifdef CONFIG_SMP
-struct smp_ops_t maple_smp_ops = {
+static struct smp_ops_t maple_smp_ops = {
.probe = smp_mpic_probe,
.message_pass = smp_mpic_message_pass,
.kick_cpu = smp_generic_kick_cpu,
@@ -176,7 +176,7 @@ static void __init maple_use_rtas_reboot_and_halt_if_present(void)
}
}
-void __init maple_setup_arch(void)
+static void __init maple_setup_arch(void)
{
/* init to some ~sane value until calibrate_delay() runs */
loops_per_jiffy = 50000000;
diff --git a/arch/powerpc/platforms/pasemi/Kconfig b/arch/powerpc/platforms/pasemi/Kconfig
index 00d4b28cbb60..c7f1dbe94de7 100644
--- a/arch/powerpc/platforms/pasemi/Kconfig
+++ b/arch/powerpc/platforms/pasemi/Kconfig
@@ -14,6 +14,16 @@ config PPC_PASEMI
menu "PA Semi PWRficient options"
depends on PPC_PASEMI
+config PPC_PASEMI_NEMO
+ bool "Nemo motherboard Support"
+ depends on PPC_PASEMI
+ select PPC_I8259
+ help
+ This option enables support for the 'Nemo' motherboard
+ used in A-Eons's Amigaone X1000. This consists of some
+ device tree patches and workarounds for the SB600 South
+ Bridge that provides SATA/USB/Audio.
+
config PPC_PASEMI_IOMMU
bool "PA Semi IOMMU support"
depends on PPC_PASEMI
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index ddf635000c6b..c23e60959aa8 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -306,7 +306,7 @@ static struct platform_driver gpio_mdio_driver =
},
};
-int gpio_mdio_init(void)
+static int gpio_mdio_init(void)
{
struct device_node *np;
@@ -326,7 +326,7 @@ int gpio_mdio_init(void)
}
module_init(gpio_mdio_init);
-void gpio_mdio_exit(void)
+static void gpio_mdio_exit(void)
{
platform_driver_unregister(&gpio_mdio_driver);
if (gpio_regs)
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index c61667e8bb06..e74adc4e7fd8 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -199,7 +199,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
}
-int __init iob_init(struct device_node *dn)
+static int __init iob_init(struct device_node *dn)
{
unsigned long tmp;
u32 regword;
diff --git a/arch/powerpc/platforms/pasemi/misc.c b/arch/powerpc/platforms/pasemi/misc.c
index e0ab299763c1..8571e7bf78b6 100644
--- a/arch/powerpc/platforms/pasemi/misc.c
+++ b/arch/powerpc/platforms/pasemi/misc.c
@@ -76,7 +76,7 @@ static int __init pasemi_register_i2c_devices(void)
}
info.irq = irq_of_parse_and_map(node, 0);
- if (info.irq == NO_IRQ)
+ if (!info.irq)
info.irq = -1;
if (find_i2c_driver(node, &info) < 0)
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
index d9af76342d99..d9cd510c8865 100644
--- a/arch/powerpc/platforms/pasemi/msi.c
+++ b/arch/powerpc/platforms/pasemi/msi.c
@@ -68,7 +68,7 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
for_each_pci_msi_entry(entry, pdev) {
- if (entry->irq == NO_IRQ)
+ if (!entry->irq)
continue;
hwirq = virq_to_hw(entry->irq);
@@ -109,7 +109,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
}
virq = irq_create_mapping(msi_mpic->irqhost, hwirq);
- if (virq == NO_IRQ) {
+ if (!virq) {
pr_debug("pasemi_msi: failed mapping hwirq 0x%x\n",
hwirq);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq,
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index e86c1bd08f1f..3182400cf48f 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -59,7 +59,7 @@ struct mce_regs {
static struct mce_regs mce_regs[MAX_MCE_REGS];
static int num_mce_regs;
-static int nmi_virq = NO_IRQ;
+static int nmi_virq = 0;
static void __noreturn pas_restart(char *cmd)
@@ -105,7 +105,7 @@ static void pas_take_timebase(void)
arch_spin_unlock(&timebase_lock);
}
-struct smp_ops_t pas_smp_ops = {
+static struct smp_ops_t pas_smp_ops = {
.probe = smp_mpic_probe,
.message_pass = smp_mpic_message_pass,
.kick_cpu = smp_generic_kick_cpu,
@@ -115,7 +115,7 @@ struct smp_ops_t pas_smp_ops = {
};
#endif /* CONFIG_SMP */
-void __init pas_setup_arch(void)
+static void __init pas_setup_arch(void)
{
#ifdef CONFIG_SMP
/* Setup SMP callback */
@@ -264,7 +264,7 @@ static int pas_machine_check_handler(struct pt_regs *regs)
srr0 = regs->nip;
srr1 = regs->msr;
- if (nmi_virq != NO_IRQ && mpic_get_mcirq() == nmi_virq) {
+ if (nmi_virq && mpic_get_mcirq() == nmi_virq) {
printk(KERN_ERR "NMI delivered\n");
debugger(regs);
mpic_end_irq(irq_get_irq_data(nmi_virq));
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 6d6f277477aa..c8c217b7dd33 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -401,7 +401,7 @@ static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
{
struct pmac_i2c_host_kw *host = bus->hostdata;
u8 mode_reg = host->speed;
- int use_irq = host->irq != NO_IRQ && !bus->polled;
+ int use_irq = host->irq && !bus->polled;
/* Setup mode & subaddress if any */
switch(bus->mode) {
@@ -535,7 +535,7 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
break;
}
host->irq = irq_of_parse_and_map(np, 0);
- if (host->irq == NO_IRQ)
+ if (!host->irq)
printk(KERN_WARNING
"low_i2c: Failed to map interrupt for %s\n",
np->full_name);
@@ -557,7 +557,7 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
*/
if (request_irq(host->irq, kw_i2c_irq, IRQF_NO_SUSPEND,
"keywest i2c", host))
- host->irq = NO_IRQ;
+ host->irq = 0;
printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",
*addrp, host->irq, np->full_name);
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
index e49d07f3d542..459138ed4571 100644
--- a/arch/powerpc/platforms/powermac/pfunc_base.c
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -26,7 +26,7 @@ static irqreturn_t macio_gpio_irq(int irq, void *data)
static int macio_do_gpio_irq_enable(struct pmf_function *func)
{
unsigned int irq = irq_of_parse_and_map(func->node, 0);
- if (irq == NO_IRQ)
+ if (!irq)
return -EINVAL;
return request_irq(irq, macio_gpio_irq, 0, func->node->name, func);
}
@@ -34,7 +34,7 @@ static int macio_do_gpio_irq_enable(struct pmf_function *func)
static int macio_do_gpio_irq_disable(struct pmf_function *func)
{
unsigned int irq = irq_of_parse_and_map(func->node, 0);
- if (irq == NO_IRQ)
+ if (!irq)
return -EINVAL;
free_irq(irq, func);
return 0;
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 43075081721f..695e8c4d4224 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -804,7 +804,7 @@ void pmf_unregister_driver(struct device_node *np)
}
EXPORT_SYMBOL_GPL(pmf_unregister_driver);
-struct pmf_function *__pmf_find_function(struct device_node *target,
+static struct pmf_function *__pmf_find_function(struct device_node *target,
const char *name, u32 flags)
{
struct device_node *actor = of_node_get(target);
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 981546345033..f5f9ad7c3398 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -251,7 +251,7 @@ static unsigned int pmac_pic_get_irq(void)
}
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
if (unlikely(irq < 0))
- return NO_IRQ;
+ return 0;
return irq_linear_revmap(pmac_pic_host, irq);
}
@@ -389,7 +389,7 @@ static void __init pmac_pic_probe_oldstyle(void)
out_le32(&pmac_irq_hw[i]->enable, 0);
/* Hookup cascade irq */
- if (slave && pmac_irq_cascade != NO_IRQ)
+ if (slave && pmac_irq_cascade)
setup_irq(pmac_irq_cascade, &gatwick_cascade_action);
printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
@@ -444,7 +444,7 @@ static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
pswitch = of_find_node_by_name(NULL, "programmer-switch");
if (pswitch) {
nmi_irq = irq_of_parse_and_map(pswitch, 0);
- if (nmi_irq != NO_IRQ) {
+ if (nmi_irq) {
mpic_irq_set_priority(nmi_irq, 9);
setup_irq(nmi_irq, &xmon_action);
}
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 366e4f510fcf..c9eb7d6540ea 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -979,7 +979,7 @@ static void pmac_cpu_die(void)
#endif /* CONFIG_HOTPLUG_CPU */
/* Core99 Macs (dual G4s and G5s) */
-struct smp_ops_t core99_smp_ops = {
+static struct smp_ops_t core99_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_core99_probe,
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 86544ea85dc3..2354ea51e871 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -763,7 +763,8 @@ int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
* reset followed by hot reset on root bus. So we also
* need the PCI bus settlement delay.
*/
- rc = pnv_eeh_poll(phb->opal_id);
+ if (rc > 0)
+ rc = pnv_eeh_poll(phb->opal_id);
if (option == EEH_RESET_DEACTIVATE) {
if (system_state < SYSTEM_RUNNING)
udelay(1000 * EEH_PE_RST_SETTLE_TIME);
@@ -806,7 +807,8 @@ static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
goto out;
/* Poll state of the PHB until the request is done */
- rc = pnv_eeh_poll(phb->opal_id);
+ if (rc > 0)
+ rc = pnv_eeh_poll(phb->opal_id);
if (option == EEH_RESET_DEACTIVATE)
msleep(EEH_PE_RST_SETTLE_TIME);
out:
@@ -1090,10 +1092,16 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
}
}
- bus = eeh_pe_bus_get(pe);
if (pe->type & EEH_PE_VF)
return pnv_eeh_reset_vf_pe(pe, option);
+ bus = eeh_pe_bus_get(pe);
+ if (!bus) {
+ pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
+ __func__, pe->phb->global_number, pe->addr);
+ return -EIO;
+ }
+
if (pci_is_root_bus(bus) ||
pci_is_root_bus(bus->parent))
return pnv_eeh_root_reset(hose, option);
@@ -1306,7 +1314,7 @@ static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
return;
}
- switch (data->type) {
+ switch (be16_to_cpu(data->type)) {
case OPAL_P7IOC_DIAG_TYPE_RGC:
pr_info("P7IOC diag-data for RGC\n\n");
pnv_eeh_dump_hub_diag_common(data);
@@ -1538,7 +1546,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
/* Try best to clear it */
opal_pci_eeh_freeze_clear(phb->opal_id,
- frozen_pe_no,
+ be64_to_cpu(frozen_pe_no),
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
ret = EEH_NEXT_ERR_NONE;
} else if ((*pe)->state & EEH_PE_ISOLATED ||
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 00e1a0195c78..aec85e778028 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -115,7 +115,7 @@ static u64 dma_npu_get_required_mask(struct device *dev)
return 0;
}
-struct dma_map_ops dma_npu_ops = {
+static struct dma_map_ops dma_npu_ops = {
.map_page = dma_npu_map_page,
.map_sg = dma_npu_map_sg,
.alloc = dma_npu_alloc,
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index ed8bba68a162..998316bf2dad 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -222,7 +222,7 @@ int __init opal_event_init(void)
/* Get hardware and virtual IRQ */
irq = be32_to_cpup(irqs);
virq = irq_create_mapping(NULL, irq);
- if (virq == NO_IRQ) {
+ if (!virq) {
pr_warn("Failed to map irq 0x%x\n", irq);
continue;
}
@@ -260,7 +260,7 @@ machine_arch_initcall(powernv, opal_event_init);
int opal_event_request(unsigned int opal_event_nr)
{
if (WARN_ON_ONCE(!opal_event_irqchip.domain))
- return NO_IRQ;
+ return 0;
return irq_create_mapping(opal_event_irqchip.domain, opal_event_nr);
}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 3d29d40eb0e9..44d2d842cee7 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -208,6 +208,7 @@ OPAL_CALL(opal_pci_config_write_byte, OPAL_PCI_CONFIG_WRITE_BYTE);
OPAL_CALL(opal_pci_config_write_half_word, OPAL_PCI_CONFIG_WRITE_HALF_WORD);
OPAL_CALL(opal_pci_config_write_word, OPAL_PCI_CONFIG_WRITE_WORD);
OPAL_CALL(opal_set_xive, OPAL_SET_XIVE);
+OPAL_CALL_REAL(opal_rm_set_xive, OPAL_SET_XIVE);
OPAL_CALL(opal_get_xive, OPAL_GET_XIVE);
OPAL_CALL(opal_register_exception_handler, OPAL_REGISTER_OPAL_EXCEPTION_HANDLER);
OPAL_CALL(opal_pci_eeh_freeze_status, OPAL_PCI_EEH_FREEZE_STATUS);
diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c
index 1349a099c74c..94498a04558b 100644
--- a/arch/powerpc/platforms/powernv/pci-cxl.c
+++ b/arch/powerpc/platforms/powernv/pci-cxl.c
@@ -344,7 +344,7 @@ int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return (hwirq ? hwirq : -ENOMEM);
virq = irq_create_mapping(NULL, hwirq);
- if (virq == NO_IRQ) {
+ if (!virq) {
pr_warn("%s: Failed to map cxl mode MSI to linux irq\n",
pci_name(pdev));
return -ENOMEM;
@@ -374,7 +374,7 @@ void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
return;
for_each_pci_msi_entry(entry, pdev) {
- if (entry->irq == NO_IRQ)
+ if (!entry->irq)
continue;
hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 38a5c657ffd3..d4b33dd2d9e7 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -133,9 +133,22 @@ static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
{
+ s64 rc;
+
phb->ioda.pe_array[pe_no].phb = phb;
phb->ioda.pe_array[pe_no].pe_number = pe_no;
+ /*
+ * Clear the PE frozen state as it might be put into frozen state
+ * in the last PCI remove path. It's not harmful to do so when the
+ * PE is already in unfrozen state.
+ */
+ rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+ if (rc != OPAL_SUCCESS)
+ pr_warn("%s: Error %lld unfreezing PHB#%d-PE#%d\n",
+ __func__, rc, phb->hose->global_number, pe_no);
+
return &phb->ioda.pe_array[pe_no];
}
@@ -417,7 +430,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
struct device_node *dn = hose->dn;
struct resource *res;
u32 m64_range[2], i;
- const u32 *r;
+ const __be32 *r;
u64 pci_addr;
if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) {
@@ -2718,15 +2731,21 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
}
#ifdef CONFIG_PCI_MSI
-static void pnv_ioda2_msi_eoi(struct irq_data *d)
+int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq)
{
- unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
- struct irq_chip *chip = irq_data_get_irq_chip(d);
struct pnv_phb *phb = container_of(chip, struct pnv_phb,
ioda.irq_chip);
+
+ return opal_pci_msi_eoi(phb->opal_id, hw_irq);
+}
+
+static void pnv_ioda2_msi_eoi(struct irq_data *d)
+{
int64_t rc;
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ struct irq_chip *chip = irq_data_get_irq_chip(d);
- rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
+ rc = pnv_opal_pci_msi_eoi(chip, hw_irq);
WARN_ON_ONCE(rc);
icp_native_eoi(d);
@@ -2756,6 +2775,16 @@ void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
irq_set_chip(virq, &phb->ioda.irq_chip);
}
+/*
+ * Returns true iff chip is something that we could call
+ * pnv_opal_pci_msi_eoi for.
+ */
+bool is_pnv_opal_msi(struct irq_chip *chip)
+{
+ return chip->irq_eoi == pnv_ioda2_msi_eoi;
+}
+EXPORT_SYMBOL_GPL(is_pnv_opal_msi);
+
static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg)
@@ -3033,6 +3062,38 @@ static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
}
}
+#ifdef CONFIG_DEBUG_FS
+static int pnv_pci_diag_data_set(void *data, u64 val)
+{
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ s64 ret;
+
+ if (val != 1ULL)
+ return -EINVAL;
+
+ hose = (struct pci_controller *)data;
+ if (!hose || !hose->private_data)
+ return -ENODEV;
+
+ phb = hose->private_data;
+
+ /* Retrieve the diag data from firmware */
+ ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
+ PNV_PCI_DIAG_BUF_SIZE);
+ if (ret != OPAL_SUCCESS)
+ return -EIO;
+
+ /* Print the diag data to the kernel log */
+ pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pnv_pci_diag_data_fops, NULL,
+ pnv_pci_diag_data_set, "%llu\n");
+
+#endif /* CONFIG_DEBUG_FS */
+
static void pnv_pci_ioda_create_dbgfs(void)
{
#ifdef CONFIG_DEBUG_FS
@@ -3048,9 +3109,14 @@ static void pnv_pci_ioda_create_dbgfs(void)
sprintf(name, "PCI%04x", hose->global_number);
phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
- if (!phb->dbgfs)
+ if (!phb->dbgfs) {
pr_warning("%s: Error on creating debugfs on PHB#%x\n",
__func__, hose->global_number);
+ continue;
+ }
+
+ debugfs_create_file("dump_diag_regs", 0200, phb->dbgfs, hose,
+ &pnv_pci_diag_data_fops);
}
#endif /* CONFIG_DEBUG_FS */
}
@@ -3763,10 +3829,11 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
if (rc)
pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
- /* If we're running in kdump kerenl, the previous kerenl never
+ /*
+ * If we're running in kdump kernel, the previous kernel never
* shutdown PCI devices correctly. We already got IODA table
* cleaned out. So we have to issue PHB reset to stop all PCI
- * transactions from previous kerenl.
+ * transactions from previous kernel.
*/
if (is_kdump_kernel()) {
pr_info(" Issue PHB reset ...\n");
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index a21d831c1114..db7b8020f68e 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -186,7 +186,7 @@ int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return -ENOSPC;
}
virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
- if (virq == NO_IRQ) {
+ if (!virq) {
pr_warn("%s: Failed to map MSI to linux irq\n",
pci_name(pdev));
msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
@@ -217,7 +217,7 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
return;
for_each_pci_msi_entry(entry, pdev) {
- if (entry->irq == NO_IRQ)
+ if (!entry->irq)
continue;
hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
@@ -309,8 +309,8 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
be64_to_cpu(data->dma1ErrorLog1));
for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
- if ((data->pestA[i] >> 63) == 0 &&
- (data->pestB[i] >> 63) == 0)
+ if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
+ (be64_to_cpu(data->pestB[i]) >> 63) == 0)
continue;
pr_info("PE[%3d] A/B: %016llx %016llx\n",
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index 57caaf11a83f..e48462447ff0 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -62,7 +62,7 @@ static int __init ps3_register_lpm_devices(void)
&dev->lpm.rights);
if (result) {
- pr_debug("%s:%d: ps3_repository_read_lpm_privleges failed \n",
+ pr_debug("%s:%d: ps3_repository_read_lpm_privileges failed\n",
__func__, __LINE__);
goto fail_read_repo;
}
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index b831638e6f4a..98f8c3611133 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -192,7 +192,7 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
*virq = irq_create_mapping(NULL, outlet);
- if (*virq == NO_IRQ) {
+ if (!*virq) {
FAIL("%s:%d: irq_create_mapping failed: outlet %lu\n",
__func__, __LINE__, outlet);
result = -ENOMEM;
@@ -339,7 +339,7 @@ int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq)
if (result) {
FAIL("%s:%d: lv1_construct_event_receive_port failed: %s\n",
__func__, __LINE__, ps3_result(result));
- *virq = NO_IRQ;
+ *virq = 0;
return result;
}
@@ -418,7 +418,7 @@ int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
" failed: %s\n", __func__, __LINE__,
ps3_result(result));
ps3_event_receive_port_destroy(*virq);
- *virq = NO_IRQ;
+ *virq = 0;
return result;
}
@@ -724,12 +724,12 @@ static unsigned int ps3_get_irq(void)
asm volatile("cntlzd %0,%1" : "=r" (plug) : "r" (x));
plug &= 0x3f;
- if (unlikely(plug == NO_IRQ)) {
+ if (unlikely(!plug)) {
DBG("%s:%d: no plug found: thread_id %llu\n", __func__,
__LINE__, pd->thread_id);
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
- return NO_IRQ;
+ return 0;
}
#if defined(DEBUG)
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 3c7707af3384..60154d08debf 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -91,7 +91,7 @@ static void __init ps3_smp_probe(void)
result = smp_request_message_ipi(virqs[i], i);
if (result)
- virqs[i] = NO_IRQ;
+ virqs[i] = 0;
else
ps3_register_ipi_irq(cpu, virqs[i]);
}
@@ -112,7 +112,7 @@ void ps3_smp_cleanup_cpu(int cpu)
for (i = 0; i < MSG_COUNT; i++) {
/* Can't call free_irq from interrupt context. */
ps3_event_receive_port_destroy(virqs[i]);
- virqs[i] = NO_IRQ;
+ virqs[i] = 0;
}
DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index 492b2575e0d2..b54850845466 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -284,7 +284,7 @@ fail_alloc_2:
fail_alloc_1:
ps3_spe_irq_destroy(spu->irqs[0]);
fail_alloc_0:
- spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
+ spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = 0;
return result;
}
@@ -334,7 +334,7 @@ static int ps3_destroy_spu(struct spu *spu)
ps3_spe_irq_destroy(spu->irqs[1]);
ps3_spe_irq_destroy(spu->irqs[0]);
- spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
+ spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = 0;
spu_unmap(spu);
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 4748124faa10..423e450efe07 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -27,7 +27,7 @@
#include <asm/uaccess.h>
#include <asm/rtas.h>
-struct workqueue_struct *pseries_hp_wq;
+static struct workqueue_struct *pseries_hp_wq;
struct pseries_hp_work {
struct work_struct work;
@@ -377,7 +377,7 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
return rc;
}
-void pseries_hp_work_fn(struct work_struct *work)
+static void pseries_hp_work_fn(struct work_struct *work)
{
struct pseries_hp_work *hp_work =
container_of(work, struct pseries_hp_work, work);
@@ -413,6 +413,7 @@ void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
queue_work(pseries_hp_wq, (struct work_struct *)work);
} else {
*rc = -ENOMEM;
+ kfree(hp_errlog_copy);
complete(hotplug_done);
}
}
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
index a6ddca833119..32187dc76730 100644
--- a/arch/powerpc/platforms/pseries/event_sources.c
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -34,7 +34,7 @@ void request_event_sources_irqs(struct device_node *np,
if (count > 15)
break;
virqs[count] = irq_create_of_mapping(&oirq);
- if (virqs[count] == NO_IRQ) {
+ if (!virqs[count]) {
pr_err("event-sources: Unable to allocate "
"interrupt number for %s\n",
np->full_name);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 543a6386f3eb..326ef0dd6038 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -119,7 +119,7 @@ static void rtas_teardown_msi_irqs(struct pci_dev *pdev)
struct msi_desc *entry;
for_each_pci_msi_entry(entry, pdev) {
- if (entry->irq == NO_IRQ)
+ if (!entry->irq)
continue;
irq_set_msi_desc(entry->irq, NULL);
@@ -471,7 +471,7 @@ again:
virq = irq_create_mapping(NULL, hwirq);
- if (virq == NO_IRQ) {
+ if (!virq) {
pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
return -ENOSPC;
}
@@ -490,7 +490,7 @@ again:
static void rtas_msi_pci_irq_fixup(struct pci_dev *pdev)
{
/* No LSI -> leave MSIs (if any) configured */
- if (pdev->irq == NO_IRQ) {
+ if (!pdev->irq) {
dev_dbg(&pdev->dev, "rtas_msi: no LSI, nothing to do.\n");
return;
}
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index b502ab61aafa..7d28cabf1206 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -156,7 +156,7 @@ static int scanlog_release(struct inode * inode, struct file * file)
return 0;
}
-const struct file_operations scanlog_fops = {
+static const struct file_operations scanlog_fops = {
.owner = THIS_MODULE,
.read = scanlog_read,
.write = scanlog_write,
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index a39d20e8623d..97aa3f332f24 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -114,7 +114,7 @@ static void pseries_8259_cascade(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
- if (cascade_irq != NO_IRQ)
+ if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
@@ -141,7 +141,7 @@ static void __init pseries_setup_i8259_cascade(void)
}
cascade = irq_of_parse_and_map(found, 0);
- if (cascade == NO_IRQ) {
+ if (!cascade) {
printk(KERN_ERR "pic: failed to map cascade interrupt");
return;
}
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 9144204442eb..ada29eaed6e2 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -240,7 +240,7 @@ static int axon_ram_probe(struct platform_device *device)
device_add_disk(&device->dev, bank->disk);
bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0);
- if (bank->irq_id == NO_IRQ) {
+ if (!bank->irq_id) {
dev_err(&device->dev, "Cannot access ECC interrupt ID\n");
rc = -EFAULT;
goto failed;
@@ -250,7 +250,7 @@ static int axon_ram_probe(struct platform_device *device)
AXON_RAM_IRQ_FLAGS, bank->disk->disk_name, device);
if (rc != 0) {
dev_err(&device->dev, "Cannot register ECC interrupt handler\n");
- bank->irq_id = NO_IRQ;
+ bank->irq_id = 0;
rc = -EFAULT;
goto failed;
}
@@ -268,7 +268,7 @@ static int axon_ram_probe(struct platform_device *device)
failed:
if (bank != NULL) {
- if (bank->irq_id != NO_IRQ)
+ if (bank->irq_id)
free_irq(bank->irq_id, device);
if (bank->disk != NULL) {
if (bank->disk->major > 0)
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 81d49476c47e..3c0eb9b25535 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -132,7 +132,7 @@ unsigned int cpm_pic_init(void)
{
struct device_node *np = NULL;
struct resource res;
- unsigned int sirq = NO_IRQ, hwirq, eirq;
+ unsigned int sirq = 0, hwirq, eirq;
int ret;
pr_debug("cpm_pic_init\n");
@@ -154,7 +154,7 @@ unsigned int cpm_pic_init(void)
goto end;
sirq = irq_of_parse_and_map(np, 0);
- if (sirq == NO_IRQ)
+ if (!sirq)
goto end;
/* Initialize the CPM interrupt controller. */
@@ -168,7 +168,7 @@ unsigned int cpm_pic_init(void)
cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL);
if (cpm_pic_host == NULL) {
printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
- sirq = NO_IRQ;
+ sirq = 0;
goto end;
}
@@ -182,7 +182,7 @@ unsigned int cpm_pic_init(void)
}
eirq = irq_of_parse_and_map(np, 0);
- if (eirq == NO_IRQ)
+ if (!eirq)
goto end;
if (setup_irq(eirq, &cpm_error_irqaction))
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index bffcc7a486a1..48866e6c1efb 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -155,7 +155,7 @@ static struct irq_chip ehv_pic_direct_eoi_irq_chip = {
.irq_set_type = ehv_pic_set_irq_type,
};
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+/* Return an interrupt vector or 0 if no interrupt is pending. */
unsigned int ehv_pic_get_irq(void)
{
int irq;
@@ -168,7 +168,7 @@ unsigned int ehv_pic_get_irq(void)
ev_int_iack(0, &irq); /* legacy mode */
if (irq == 0xFFFF) /* 0xFFFF --> no irq is pending */
- return NO_IRQ;
+ return 0;
/*
* this will also setup revmap[] in the slow path for the first
diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c
index 06ac3c61b3d0..a6f0b96ce2c9 100644
--- a/arch/powerpc/sysdev/fsl_gtm.c
+++ b/arch/powerpc/sysdev/fsl_gtm.c
@@ -406,7 +406,7 @@ static int __init fsl_gtm_init(void)
unsigned int irq;
irq = irq_of_parse_and_map(np, i);
- if (irq == NO_IRQ) {
+ if (!irq) {
pr_err("%s: not enough interrupts specified\n",
np->full_name);
goto err;
diff --git a/arch/powerpc/sysdev/fsl_mpic_err.c b/arch/powerpc/sysdev/fsl_mpic_err.c
index b83f32562a37..488ec453038a 100644
--- a/arch/powerpc/sysdev/fsl_mpic_err.c
+++ b/arch/powerpc/sysdev/fsl_mpic_err.c
@@ -115,8 +115,8 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data)
errint = __builtin_clz(eisr);
cascade_irq = irq_linear_revmap(mpic->irqhost,
mpic->err_int_vecs[errint]);
- WARN_ON(cascade_irq == NO_IRQ);
- if (cascade_irq != NO_IRQ) {
+ WARN_ON(!cascade_irq);
+ if (cascade_irq) {
generic_handle_irq(cascade_irq);
} else {
eimr |= 1 << (31 - errint);
@@ -134,7 +134,7 @@ void mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum)
int ret;
virq = irq_create_mapping(mpic->irqhost, irqnum);
- if (virq == NO_IRQ) {
+ if (!virq) {
pr_err("Error interrupt setup failed\n");
return;
}
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 3a2be3676f43..8a244828782e 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -131,7 +131,7 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
irq_hw_number_t hwirq;
for_each_pci_msi_entry(entry, pdev) {
- if (entry->irq == NO_IRQ)
+ if (!entry->irq)
continue;
hwirq = virq_to_hw(entry->irq);
msi_data = irq_get_chip_data(entry->irq);
@@ -250,7 +250,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
virq = irq_create_mapping(msi_data->irqhost, hwirq);
- if (virq == NO_IRQ) {
+ if (!virq) {
dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
rc = -ENOSPC;
@@ -285,7 +285,7 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
msir_index = cascade_data->index;
if (msir_index >= NR_MSI_REG_MAX)
- cascade_irq = NO_IRQ;
+ cascade_irq = 0;
switch (msi_data->feature & FSL_PIC_IP_MASK) {
case FSL_PIC_IP_MPIC:
@@ -315,7 +315,7 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
cascade_irq = irq_linear_revmap(msi_data->irqhost,
msi_hwirq(msi_data, msir_index,
intr_index + have_shift));
- if (cascade_irq != NO_IRQ) {
+ if (cascade_irq) {
generic_handle_irq(cascade_irq);
ret = IRQ_HANDLED;
}
@@ -337,7 +337,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
if (msi->cascade_array[i]) {
virq = msi->cascade_array[i]->virq;
- BUG_ON(virq == NO_IRQ);
+ BUG_ON(!virq);
free_irq(virq, msi->cascade_array[i]);
kfree(msi->cascade_array[i]);
@@ -362,7 +362,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
int virt_msir, i, ret;
virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
- if (virt_msir == NO_IRQ) {
+ if (!virt_msir) {
dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
__func__, irq_index);
return 0;
diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c
index d57b77573068..02553a8ce191 100644
--- a/arch/powerpc/sysdev/ge/ge_pic.c
+++ b/arch/powerpc/sysdev/ge/ge_pic.c
@@ -102,7 +102,7 @@ static void gef_pic_cascade(struct irq_desc *desc)
*/
cascade_irq = gef_pic_get_irq();
- if (cascade_irq != NO_IRQ)
+ if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
@@ -206,7 +206,7 @@ void __init gef_pic_init(struct device_node *np)
/* Map controller */
gef_pic_cascade_irq = irq_of_parse_and_map(np, 0);
- if (gef_pic_cascade_irq == NO_IRQ) {
+ if (!gef_pic_cascade_irq) {
printk(KERN_ERR "SBC610: failed to map cascade interrupt");
return;
}
@@ -223,12 +223,12 @@ void __init gef_pic_init(struct device_node *np)
/*
* This is called when we receive an interrupt with apparently comes from this
- * chip - check, returning the highest interrupt generated or return NO_IRQ
+ * chip - check, returning the highest interrupt generated or return 0.
*/
unsigned int gef_pic_get_irq(void)
{
u32 cause, mask, active;
- unsigned int virq = NO_IRQ;
+ unsigned int virq = 0;
int hwirq;
cause = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_STATUS);
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index aa2c186d3115..bafb014e1a7e 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -68,9 +68,9 @@ unsigned int i8259_irq(void)
if (!pci_intack)
outb(0x0B, 0x20); /* ISR register */
if(~inb(0x20) & 0x80)
- irq = NO_IRQ;
+ irq = 0;
} else if (irq == 0xff)
- irq = NO_IRQ;
+ irq = 0;
if (lock)
raw_spin_unlock(&i8259_lock);
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index f76ee39cb337..f267ee0afc08 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -853,7 +853,7 @@ void ipic_clear_mcp_status(u32 mask)
ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
}
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+/* Return an interrupt vector or 0 if no interrupt is pending. */
unsigned int ipic_get_irq(void)
{
int irq;
@@ -864,7 +864,7 @@ unsigned int ipic_get_irq(void)
irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK;
if (irq == 0) /* 0 --> no irq is pending */
- return NO_IRQ;
+ return 0;
return irq_linear_revmap(primary_ipic->irqhost, irq);
}
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index 69f5814ae6d4..c31f634f1973 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -89,7 +89,7 @@ static ssize_t mmio_nvram_write(char *buf, size_t count, loff_t *index)
return count;
}
-void mmio_nvram_write_val(int addr, unsigned char val)
+static void mmio_nvram_write_val(int addr, unsigned char val)
{
unsigned long flags;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index b7cf7abff2eb..3e828b20c21e 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -79,7 +79,7 @@ unsigned int mpc8xx_get_irq(void)
irq = in_be32(&siu_reg->sc_sivec) >> 26;
if (irq == PIC_VEC_SPURRIOUS)
- irq = NO_IRQ;
+ irq = 0;
return irq_linear_revmap(mpc8xx_pic_host, irq);
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7de45b2df366..4d48cecfedd1 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1649,7 +1649,7 @@ void __init mpic_init(struct mpic *mpic)
/* Check if this MPIC is chained from a parent interrupt controller */
if (mpic->flags & MPIC_SECONDARY) {
int virq = irq_of_parse_and_map(mpic->node, 0);
- if (virq != NO_IRQ) {
+ if (virq) {
printk(KERN_INFO "%s: hooking up to IRQ %d\n",
mpic->node->full_name, virq);
irq_set_handler_data(virq, mpic);
@@ -1778,13 +1778,13 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
if (unlikely(src == mpic->spurious_vec)) {
if (mpic->flags & MPIC_SPV_EOI)
mpic_eoi(mpic);
- return NO_IRQ;
+ return 0;
}
if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
mpic->name, (int)src);
mpic_eoi(mpic);
- return NO_IRQ;
+ return 0;
}
return irq_linear_revmap(mpic->irqhost, src);
@@ -1817,17 +1817,17 @@ unsigned int mpic_get_coreint_irq(void)
if (unlikely(src == mpic->spurious_vec)) {
if (mpic->flags & MPIC_SPV_EOI)
mpic_eoi(mpic);
- return NO_IRQ;
+ return 0;
}
if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
mpic->name, (int)src);
- return NO_IRQ;
+ return 0;
}
return irq_linear_revmap(mpic->irqhost, src);
#else
- return NO_IRQ;
+ return 0;
#endif
}
@@ -1852,7 +1852,7 @@ void mpic_request_ipis(void)
for (i = 0; i < 4; i++) {
unsigned int vipi = irq_create_mapping(mpic->irqhost,
mpic->ipi_vecs[0] + i);
- if (vipi == NO_IRQ) {
+ if (!vipi) {
printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]);
continue;
}
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 3f165d972a0e..db2286be5d9a 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -238,7 +238,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
if (receive_mask & (1 << i)) {
msgr->irq = irq_of_parse_and_map(np, irq_index);
- if (msgr->irq == NO_IRQ) {
+ if (!msgr->irq) {
dev_err(&dev->dev,
"Missing interrupt specifier");
kfree(msgr);
@@ -246,7 +246,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
}
irq_index += 1;
} else {
- msgr->irq = NO_IRQ;
+ msgr->irq = 0;
}
mpic_msgrs[reg_number] = msgr;
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 2cbc7e29b85f..cfc1c57d760f 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -110,7 +110,7 @@ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
irq_hw_number_t hwirq;
for_each_pci_msi_entry(entry, pdev) {
- if (entry->irq == NO_IRQ)
+ if (!entry->irq)
continue;
hwirq = virq_to_hw(entry->irq);
@@ -155,7 +155,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
msg.address_hi = addr >> 32;
virq = irq_create_mapping(msi_mpic->irqhost, hwirq);
- if (virq == NO_IRQ) {
+ if (!virq) {
pr_debug("u3msi: failed mapping hwirq 0x%x\n", hwirq);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
return -ENOSPC;
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c
index 0f842dd16bcd..a79953deb489 100644
--- a/arch/powerpc/sysdev/mv64x60_pic.c
+++ b/arch/powerpc/sysdev/mv64x60_pic.c
@@ -272,7 +272,7 @@ unsigned int mv64x60_get_irq(void)
u32 cause;
int level1;
irq_hw_number_t hwirq;
- int virq = NO_IRQ;
+ int virq = 0;
cause = in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_SELECT_CAUSE);
if (cause & MV64X60_SELECT_CAUSE_HIGH) {
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 8a0b77a3ec0c..9ea6a221d9d5 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -158,7 +158,7 @@ static int pmi_of_probe(struct platform_device *dev)
data->dev = dev;
data->irq = irq_of_parse_and_map(np, 0);
- if (data->irq == NO_IRQ) {
+ if (!data->irq) {
printk(KERN_ERR "pmi: invalid interrupt.\n");
rc = -EFAULT;
goto error_cleanup_iomap;
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index 52a93dcae262..9926ad67af76 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -60,7 +60,7 @@ static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
}
hwirq = ppc4xx_hsta_msi.irq_map[irq];
- if (hwirq == NO_IRQ) {
+ if (!hwirq) {
pr_err("%s: Failed mapping irq %d\n", __func__, irq);
return -EINVAL;
}
@@ -110,7 +110,7 @@ static void hsta_teardown_msi_irqs(struct pci_dev *dev)
int irq;
for_each_pci_msi_entry(entry, dev) {
- if (entry->irq == NO_IRQ)
+ if (!entry->irq)
continue;
irq = hsta_find_hwirq_offset(entry->irq);
@@ -166,7 +166,7 @@ static int hsta_msi_probe(struct platform_device *pdev)
for (irq = 0; irq < irq_count; irq++) {
ppc4xx_hsta_msi.irq_map[irq] =
irq_of_parse_and_map(dev->of_node, irq);
- if (ppc4xx_hsta_msi.irq_map[irq] == NO_IRQ) {
+ if (!ppc4xx_hsta_msi.irq_map[irq]) {
dev_err(dev, "Unable to map IRQ\n");
ret = -EINVAL;
goto out2;
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 8fb806135043..590dab4f47d6 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -102,7 +102,7 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
__func__);
}
virq = irq_of_parse_and_map(msi_data->msi_dev, int_no);
- if (virq == NO_IRQ) {
+ if (!virq) {
dev_err(&dev->dev, "%s: fail mapping irq\n", __func__);
msi_bitmap_free_hwirqs(&msi_data->bitmap, int_no, 1);
return -ENOSPC;
@@ -129,7 +129,7 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
for_each_pci_msi_entry(entry, dev) {
- if (entry->irq == NO_IRQ)
+ if (!entry->irq)
continue;
hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
@@ -201,7 +201,7 @@ static int ppc4xx_of_msi_remove(struct platform_device *dev)
for (i = 0; i < msi_irqs; i++) {
virq = msi->msi_virqs[i];
- if (virq != NO_IRQ)
+ if (virq)
irq_dispose_mapping(virq);
}
diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/sysdev/ppc4xx_soc.c
index 5c77c9ba33aa..d41134d2f786 100644
--- a/arch/powerpc/sysdev/ppc4xx_soc.c
+++ b/arch/powerpc/sysdev/ppc4xx_soc.c
@@ -109,7 +109,7 @@ static int __init ppc4xx_l2c_probe(void)
/* Get and map irq number from device tree */
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
printk(KERN_ERR "irq_of_parse_and_map failed\n");
of_node_put(np);
return -ENODEV;
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 379de955aae3..57c971b7839c 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -433,7 +433,7 @@ void tsi108_irq_cascade(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = get_pci_source();
- if (cascade_irq != NO_IRQ)
+ if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 6893d8f236df..a00949f3e378 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -319,7 +319,7 @@ void __init uic_init_tree(void)
}
}
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+/* Return an interrupt vector or 0 if no interrupt is pending. */
unsigned int uic_get_irq(void)
{
u32 msr;
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index c1917cf67c3d..e7fa26c4ff73 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -112,10 +112,10 @@ static unsigned int icp_hv_get_irq(void)
unsigned int irq;
if (vec == XICS_IRQ_SPURIOUS)
- return NO_IRQ;
+ return 0;
irq = irq_find_mapping(xics_host, vec);
- if (likely(irq != NO_IRQ)) {
+ if (likely(irq)) {
xics_push_cppr(vec);
return irq;
}
@@ -126,7 +126,7 @@ static unsigned int icp_hv_get_irq(void)
/* We might learn about it later, so EOI it */
icp_hv_set_xirr(xirr);
- return NO_IRQ;
+ return 0;
}
static void icp_hv_set_cpu_priority(unsigned char cppr)
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index afdf62f2a695..8a6a043e239b 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -124,10 +124,10 @@ static unsigned int icp_native_get_irq(void)
unsigned int irq;
if (vec == XICS_IRQ_SPURIOUS)
- return NO_IRQ;
+ return 0;
irq = irq_find_mapping(xics_host, vec);
- if (likely(irq != NO_IRQ)) {
+ if (likely(irq)) {
xics_push_cppr(vec);
return irq;
}
@@ -138,7 +138,7 @@ static unsigned int icp_native_get_irq(void)
/* We might learn about it later, so EOI it */
icp_native_set_xirr(xirr);
- return NO_IRQ;
+ return 0;
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 9114243fa1b5..d38e86fd5720 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -51,14 +51,14 @@ static unsigned int icp_opal_get_irq(void)
rc = opal_int_get_xirr(&xirr, false);
if (rc < 0)
- return NO_IRQ;
+ return 0;
xirr = be32_to_cpu(xirr);
vec = xirr & 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS)
- return NO_IRQ;
+ return 0;
irq = irq_find_mapping(xics_host, vec);
- if (likely(irq != NO_IRQ)) {
+ if (likely(irq)) {
xics_push_cppr(vec);
return irq;
}
@@ -69,7 +69,7 @@ static unsigned int icp_opal_get_irq(void)
/* We might learn about it later, so EOI it */
opal_int_eoi(xirr);
- return NO_IRQ;
+ return 0;
}
static void icp_opal_set_cpu_priority(unsigned char cppr)
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 9d530f479588..69d858e51ac7 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -131,7 +131,7 @@ static void xics_request_ipi(void)
unsigned int ipi;
ipi = irq_create_mapping(xics_host, XICS_IPI);
- BUG_ON(ipi == NO_IRQ);
+ BUG_ON(!ipi);
/*
* IPIs are marked IRQF_PERCPU. The handler was set in map.
diff --git a/arch/powerpc/xmon/spr_access.S b/arch/powerpc/xmon/spr_access.S
index 84ad74213c83..7d8b0e8ed6d9 100644
--- a/arch/powerpc/xmon/spr_access.S
+++ b/arch/powerpc/xmon/spr_access.S
@@ -2,12 +2,12 @@
/* unsigned long xmon_mfspr(sprn, default_value) */
_GLOBAL(xmon_mfspr)
- ld r5, .Lmfspr_table@got(r2)
+ PPC_LL r5, .Lmfspr_table@got(r2)
b xmon_mxspr
/* void xmon_mtspr(sprn, new_value) */
_GLOBAL(xmon_mtspr)
- ld r5, .Lmtspr_table@got(r2)
+ PPC_LL r5, .Lmtspr_table@got(r2)
b xmon_mxspr
/*
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c109f073d454..426481d4cc86 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -67,12 +67,13 @@ config DEBUG_RODATA
config S390
def_bool y
- select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_SG_CHAIN
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH
@@ -109,6 +110,7 @@ config S390
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_PROT_NUMA_PROT_NONE
+ select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2
@@ -136,6 +138,7 @@ config S390
select HAVE_DMA_API_DEBUG
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EXIT_THREAD
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 224b42734f0d..54e00526b8df 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -46,6 +46,8 @@ cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
+cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
+
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 15c94246b600..f587c4811faf 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -542,7 +542,7 @@ static int __init appldata_init(void)
rc = PTR_ERR(appldata_pdev);
goto out_driver;
}
- appldata_wq = create_singlethread_workqueue("appldata");
+ appldata_wq = alloc_ordered_workqueue("appldata", 0);
if (!appldata_wq) {
rc = -ENOMEM;
goto out_device;
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 33ba697c782d..0daa070d6c9d 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -17,6 +17,7 @@ KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o als.o)
OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 412b1bd21029..45968686f918 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -260,7 +260,6 @@ CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
-CONFIG_NF_NAT_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
@@ -269,6 +268,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -281,7 +282,6 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NF_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
@@ -299,6 +299,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
@@ -359,6 +361,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
@@ -409,6 +412,7 @@ CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m
@@ -428,6 +432,7 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -453,7 +458,6 @@ CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
@@ -495,6 +499,7 @@ CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index bec279eb4b93..1dd05e345c4d 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -15,6 +15,8 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
@@ -255,7 +257,6 @@ CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
-CONFIG_NF_NAT_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
@@ -264,6 +265,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -276,7 +279,6 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NF_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
@@ -294,6 +296,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
@@ -353,6 +357,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
@@ -403,6 +408,7 @@ CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m
@@ -422,6 +428,7 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -447,7 +454,6 @@ CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
@@ -487,6 +493,7 @@ CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 1751446a5bbb..29d1178666f0 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -16,6 +16,8 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
@@ -255,7 +257,6 @@ CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
-CONFIG_NF_NAT_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
@@ -264,6 +265,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -276,7 +279,6 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NF_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
@@ -294,6 +296,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
@@ -353,6 +357,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
@@ -403,6 +408,7 @@ CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m
@@ -422,6 +428,7 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -447,7 +454,6 @@ CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
@@ -488,6 +494,7 @@ CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 2ea18b050309..303d28eb03a2 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -31,69 +31,29 @@
#include <crypto/xts.h>
#include <asm/cpacf.h>
-#define AES_KEYLEN_128 1
-#define AES_KEYLEN_192 2
-#define AES_KEYLEN_256 4
-
static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
-static char keylen_flag;
+
+static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
struct s390_aes_ctx {
u8 key[AES_MAX_KEY_SIZE];
- long enc;
- long dec;
int key_len;
+ unsigned long fc;
union {
struct crypto_skcipher *blk;
struct crypto_cipher *cip;
} fallback;
};
-struct pcc_param {
- u8 key[32];
- u8 tweak[16];
- u8 block[16];
- u8 bit[16];
- u8 xts[16];
-};
-
struct s390_xts_ctx {
u8 key[32];
u8 pcc_key[32];
- long enc;
- long dec;
int key_len;
+ unsigned long fc;
struct crypto_skcipher *fallback;
};
-/*
- * Check if the key_len is supported by the HW.
- * Returns 0 if it is, a positive number if it is not and software fallback is
- * required or a negative number in case the key size is not valid
- */
-static int need_fallback(unsigned int key_len)
-{
- switch (key_len) {
- case 16:
- if (!(keylen_flag & AES_KEYLEN_128))
- return 1;
- break;
- case 24:
- if (!(keylen_flag & AES_KEYLEN_192))
- return 1;
- break;
- case 32:
- if (!(keylen_flag & AES_KEYLEN_256))
- return 1;
- break;
- default:
- return -1;
- break;
- }
- return 0;
-}
-
static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -117,72 +77,44 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
- int ret;
+ unsigned long fc;
- ret = need_fallback(key_len);
- if (ret < 0) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 16) ? CPACF_KM_AES_128 :
+ (key_len == 24) ? CPACF_KM_AES_192 :
+ (key_len == 32) ? CPACF_KM_AES_256 : 0;
- sctx->key_len = key_len;
- if (!ret) {
- memcpy(sctx->key, in_key, key_len);
- return 0;
- }
+ /* Check if the function code is available */
+ sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+ if (!sctx->fc)
+ return setkey_fallback_cip(tfm, in_key, key_len);
- return setkey_fallback_cip(tfm, in_key, key_len);
+ sctx->key_len = key_len;
+ memcpy(sctx->key, in_key, key_len);
+ return 0;
}
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- if (unlikely(need_fallback(sctx->key_len))) {
+ if (unlikely(!sctx->fc)) {
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
return;
}
-
- switch (sctx->key_len) {
- case 16:
- cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- case 24:
- cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- case 32:
- cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- }
+ cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- if (unlikely(need_fallback(sctx->key_len))) {
+ if (unlikely(!sctx->fc)) {
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
return;
}
-
- switch (sctx->key_len) {
- case 16:
- cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- case 24:
- cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- case 32:
- cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- }
+ cpacf_km(sctx->fc | CPACF_DECRYPT,
+ &sctx->key, out, in, AES_BLOCK_SIZE);
}
static int fallback_init_cip(struct crypto_tfm *tfm)
@@ -291,50 +223,37 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- int ret;
+ unsigned long fc;
- ret = need_fallback(key_len);
- if (ret > 0) {
- sctx->key_len = key_len;
- return setkey_fallback_blk(tfm, in_key, key_len);
- }
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 16) ? CPACF_KM_AES_128 :
+ (key_len == 24) ? CPACF_KM_AES_192 :
+ (key_len == 32) ? CPACF_KM_AES_256 : 0;
- switch (key_len) {
- case 16:
- sctx->enc = CPACF_KM_AES_128_ENC;
- sctx->dec = CPACF_KM_AES_128_DEC;
- break;
- case 24:
- sctx->enc = CPACF_KM_AES_192_ENC;
- sctx->dec = CPACF_KM_AES_192_DEC;
- break;
- case 32:
- sctx->enc = CPACF_KM_AES_256_ENC;
- sctx->dec = CPACF_KM_AES_256_DEC;
- break;
- }
+ /* Check if the function code is available */
+ sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+ if (!sctx->fc)
+ return setkey_fallback_blk(tfm, in_key, key_len);
- return aes_set_key(tfm, in_key, key_len);
+ sctx->key_len = key_len;
+ memcpy(sctx->key, in_key, key_len);
+ return 0;
}
-static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
+static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
- int ret = blkcipher_walk_virt(desc, walk);
- unsigned int nbytes;
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ unsigned int nbytes, n;
+ int ret;
- while ((nbytes = walk->nbytes)) {
+ ret = blkcipher_walk_virt(desc, walk);
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
- unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
- u8 *out = walk->dst.virt.addr;
- u8 *in = walk->src.virt.addr;
-
- ret = cpacf_km(func, param, out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, walk, nbytes);
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ cpacf_km(sctx->fc | modifier, sctx->key,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
return ret;
@@ -347,11 +266,11 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(need_fallback(sctx->key_len)))
+ if (unlikely(!sctx->fc))
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
+ return ecb_aes_crypt(desc, 0, &walk);
}
static int ecb_aes_decrypt(struct blkcipher_desc *desc,
@@ -361,11 +280,11 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(need_fallback(sctx->key_len)))
+ if (unlikely(!sctx->fc))
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
+ return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
}
static int fallback_init_blk(struct crypto_tfm *tfm)
@@ -420,64 +339,45 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- int ret;
+ unsigned long fc;
- ret = need_fallback(key_len);
- if (ret > 0) {
- sctx->key_len = key_len;
- return setkey_fallback_blk(tfm, in_key, key_len);
- }
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 16) ? CPACF_KMC_AES_128 :
+ (key_len == 24) ? CPACF_KMC_AES_192 :
+ (key_len == 32) ? CPACF_KMC_AES_256 : 0;
- switch (key_len) {
- case 16:
- sctx->enc = CPACF_KMC_AES_128_ENC;
- sctx->dec = CPACF_KMC_AES_128_DEC;
- break;
- case 24:
- sctx->enc = CPACF_KMC_AES_192_ENC;
- sctx->dec = CPACF_KMC_AES_192_DEC;
- break;
- case 32:
- sctx->enc = CPACF_KMC_AES_256_ENC;
- sctx->dec = CPACF_KMC_AES_256_DEC;
- break;
- }
+ /* Check if the function code is available */
+ sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
+ if (!sctx->fc)
+ return setkey_fallback_blk(tfm, in_key, key_len);
- return aes_set_key(tfm, in_key, key_len);
+ sctx->key_len = key_len;
+ memcpy(sctx->key, in_key, key_len);
+ return 0;
}
-static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
+static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- int ret = blkcipher_walk_virt(desc, walk);
- unsigned int nbytes = walk->nbytes;
+ unsigned int nbytes, n;
+ int ret;
struct {
u8 iv[AES_BLOCK_SIZE];
u8 key[AES_MAX_KEY_SIZE];
} param;
- if (!nbytes)
- goto out;
-
+ ret = blkcipher_walk_virt(desc, walk);
memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
memcpy(param.key, sctx->key, sctx->key_len);
- do {
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
- unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
- u8 *out = walk->dst.virt.addr;
- u8 *in = walk->src.virt.addr;
-
- ret = cpacf_kmc(func, &param, out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, walk, nbytes);
- } while ((nbytes = walk->nbytes));
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ cpacf_kmc(sctx->fc | modifier, &param,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ }
memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
-
-out:
return ret;
}
@@ -488,11 +388,11 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(need_fallback(sctx->key_len)))
+ if (unlikely(!sctx->fc))
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->enc, &walk);
+ return cbc_aes_crypt(desc, 0, &walk);
}
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
@@ -502,11 +402,11 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(need_fallback(sctx->key_len)))
+ if (unlikely(!sctx->fc))
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->dec, &walk);
+ return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg cbc_aes_alg = {
@@ -594,83 +494,67 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
+ unsigned long fc;
int err;
err = xts_check_key(tfm, in_key, key_len);
if (err)
return err;
- switch (key_len) {
- case 32:
- xts_ctx->enc = CPACF_KM_XTS_128_ENC;
- xts_ctx->dec = CPACF_KM_XTS_128_DEC;
- memcpy(xts_ctx->key + 16, in_key, 16);
- memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
- break;
- case 48:
- xts_ctx->enc = 0;
- xts_ctx->dec = 0;
- xts_fallback_setkey(tfm, in_key, key_len);
- break;
- case 64:
- xts_ctx->enc = CPACF_KM_XTS_256_ENC;
- xts_ctx->dec = CPACF_KM_XTS_256_DEC;
- memcpy(xts_ctx->key, in_key, 32);
- memcpy(xts_ctx->pcc_key, in_key + 32, 32);
- break;
- default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 32) ? CPACF_KM_XTS_128 :
+ (key_len == 64) ? CPACF_KM_XTS_256 : 0;
+
+ /* Check if the function code is available */
+ xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+ if (!xts_ctx->fc)
+ return xts_fallback_setkey(tfm, in_key, key_len);
+
+ /* Split the XTS key into the two subkeys */
+ key_len = key_len / 2;
xts_ctx->key_len = key_len;
+ memcpy(xts_ctx->key, in_key, key_len);
+ memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
return 0;
}
-static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
- struct s390_xts_ctx *xts_ctx,
+static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
- unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
- int ret = blkcipher_walk_virt(desc, walk);
- unsigned int nbytes = walk->nbytes;
- unsigned int n;
- u8 *in, *out;
- struct pcc_param pcc_param;
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ unsigned int offset, nbytes, n;
+ int ret;
+ struct {
+ u8 key[32];
+ u8 tweak[16];
+ u8 block[16];
+ u8 bit[16];
+ u8 xts[16];
+ } pcc_param;
struct {
u8 key[32];
u8 init[16];
} xts_param;
- if (!nbytes)
- goto out;
-
+ ret = blkcipher_walk_virt(desc, walk);
+ offset = xts_ctx->key_len & 0x10;
memset(pcc_param.block, 0, sizeof(pcc_param.block));
memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
- memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
- /* remove decipher modifier bit from 'func' and call PCC */
- ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
- if (ret < 0)
- return -EIO;
+ memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
+ cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
- memcpy(xts_param.key, xts_ctx->key, 32);
+ memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
memcpy(xts_param.init, pcc_param.xts, 16);
- do {
+
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
- out = walk->dst.virt.addr;
- in = walk->src.virt.addr;
-
- ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, walk, nbytes);
- } while ((nbytes = walk->nbytes));
-out:
+ cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ }
return ret;
}
@@ -681,11 +565,11 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(xts_ctx->key_len == 48))
+ if (unlikely(!xts_ctx->fc))
return xts_fallback_encrypt(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
+ return xts_aes_crypt(desc, 0, &walk);
}
static int xts_aes_decrypt(struct blkcipher_desc *desc,
@@ -695,11 +579,11 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(xts_ctx->key_len == 48))
+ if (unlikely(!xts_ctx->fc))
return xts_fallback_decrypt(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
+ return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
}
static int xts_fallback_init(struct crypto_tfm *tfm)
@@ -750,108 +634,79 @@ static struct crypto_alg xts_aes_alg = {
}
};
-static int xts_aes_alg_reg;
-
static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ unsigned long fc;
- switch (key_len) {
- case 16:
- sctx->enc = CPACF_KMCTR_AES_128_ENC;
- sctx->dec = CPACF_KMCTR_AES_128_DEC;
- break;
- case 24:
- sctx->enc = CPACF_KMCTR_AES_192_ENC;
- sctx->dec = CPACF_KMCTR_AES_192_DEC;
- break;
- case 32:
- sctx->enc = CPACF_KMCTR_AES_256_ENC;
- sctx->dec = CPACF_KMCTR_AES_256_DEC;
- break;
- }
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
+ (key_len == 24) ? CPACF_KMCTR_AES_192 :
+ (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
+
+ /* Check if the function code is available */
+ sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
+ if (!sctx->fc)
+ return setkey_fallback_blk(tfm, in_key, key_len);
- return aes_set_key(tfm, in_key, key_len);
+ sctx->key_len = key_len;
+ memcpy(sctx->key, in_key, key_len);
+ return 0;
}
-static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
unsigned int i, n;
/* only use complete blocks, max. PAGE_SIZE */
+ memcpy(ctrptr, iv, AES_BLOCK_SIZE);
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
- for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
- memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE);
- crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
+ for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
+ memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
+ crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
+ ctrptr += AES_BLOCK_SIZE;
}
return n;
}
-static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
- struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
+static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
+ struct blkcipher_walk *walk)
{
- int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ u8 buf[AES_BLOCK_SIZE], *ctrptr;
unsigned int n, nbytes;
- u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
- u8 *out, *in, *ctrptr = ctrbuf;
-
- if (!walk->nbytes)
- return ret;
+ int ret, locked;
- if (spin_trylock(&ctrblk_lock))
- ctrptr = ctrblk;
+ locked = spin_trylock(&ctrblk_lock);
- memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
+ ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
- out = walk->dst.virt.addr;
- in = walk->src.virt.addr;
- while (nbytes >= AES_BLOCK_SIZE) {
- if (ctrptr == ctrblk)
- n = __ctrblk_init(ctrptr, nbytes);
- else
- n = AES_BLOCK_SIZE;
- ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
- if (ret < 0 || ret != n) {
- if (ctrptr == ctrblk)
- spin_unlock(&ctrblk_lock);
- return -EIO;
- }
- if (n > AES_BLOCK_SIZE)
- memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE);
- crypto_inc(ctrptr, AES_BLOCK_SIZE);
- out += n;
- in += n;
- nbytes -= n;
- }
- ret = blkcipher_walk_done(desc, walk, nbytes);
+ n = AES_BLOCK_SIZE;
+ if (nbytes >= 2*AES_BLOCK_SIZE && locked)
+ n = __ctrblk_init(ctrblk, walk->iv, nbytes);
+ ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
+ cpacf_kmctr(sctx->fc | modifier, sctx->key,
+ walk->dst.virt.addr, walk->src.virt.addr,
+ n, ctrptr);
+ if (ctrptr == ctrblk)
+ memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(walk->iv, AES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
- if (ctrptr == ctrblk) {
- if (nbytes)
- memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
- else
- memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ if (locked)
spin_unlock(&ctrblk_lock);
- } else {
- if (!nbytes)
- memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
- }
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
if (nbytes) {
- out = walk->dst.virt.addr;
- in = walk->src.virt.addr;
- ret = cpacf_kmctr(func, sctx->key, buf, in,
- AES_BLOCK_SIZE, ctrbuf);
- if (ret < 0 || ret != AES_BLOCK_SIZE)
- return -EIO;
- memcpy(out, buf, nbytes);
- crypto_inc(ctrbuf, AES_BLOCK_SIZE);
+ cpacf_kmctr(sctx->fc | modifier, sctx->key,
+ buf, walk->src.virt.addr,
+ AES_BLOCK_SIZE, walk->iv);
+ memcpy(walk->dst.virt.addr, buf, nbytes);
+ crypto_inc(walk->iv, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
- memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
}
return ret;
@@ -864,8 +719,11 @@ static int ctr_aes_encrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
+ if (unlikely(!sctx->fc))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
+ return ctr_aes_crypt(desc, 0, &walk);
}
static int ctr_aes_decrypt(struct blkcipher_desc *desc,
@@ -875,19 +733,25 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
+ if (unlikely(!sctx->fc))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
+ return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg ctr_aes_alg = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-s390",
.cra_priority = 400, /* combo: aes + ctr */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
+ .cra_init = fallback_init_blk,
+ .cra_exit = fallback_exit_blk,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -900,89 +764,79 @@ static struct crypto_alg ctr_aes_alg = {
}
};
-static int ctr_aes_alg_reg;
+static struct crypto_alg *aes_s390_algs_ptr[5];
+static int aes_s390_algs_num;
-static int __init aes_s390_init(void)
+static int aes_s390_register_alg(struct crypto_alg *alg)
{
int ret;
- if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
- keylen_flag |= AES_KEYLEN_128;
- if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
- keylen_flag |= AES_KEYLEN_192;
- if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
- keylen_flag |= AES_KEYLEN_256;
+ ret = crypto_register_alg(alg);
+ if (!ret)
+ aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
+ return ret;
+}
- if (!keylen_flag)
- return -EOPNOTSUPP;
+static void aes_s390_fini(void)
+{
+ while (aes_s390_algs_num--)
+ crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
+ if (ctrblk)
+ free_page((unsigned long) ctrblk);
+}
- /* z9 109 and z9 BC/EC only support 128 bit key length */
- if (keylen_flag == AES_KEYLEN_128)
- pr_info("AES hardware acceleration is only available for"
- " 128-bit keys\n");
+static int __init aes_s390_init(void)
+{
+ int ret;
- ret = crypto_register_alg(&aes_alg);
- if (ret)
- goto aes_err;
+ /* Query available functions for KM, KMC and KMCTR */
+ cpacf_query(CPACF_KM, &km_functions);
+ cpacf_query(CPACF_KMC, &kmc_functions);
+ cpacf_query(CPACF_KMCTR, &kmctr_functions);
- ret = crypto_register_alg(&ecb_aes_alg);
- if (ret)
- goto ecb_aes_err;
+ if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
+ cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
+ cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
+ ret = aes_s390_register_alg(&aes_alg);
+ if (ret)
+ goto out_err;
+ ret = aes_s390_register_alg(&ecb_aes_alg);
+ if (ret)
+ goto out_err;
+ }
- ret = crypto_register_alg(&cbc_aes_alg);
- if (ret)
- goto cbc_aes_err;
+ if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
+ cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
+ cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
+ ret = aes_s390_register_alg(&cbc_aes_alg);
+ if (ret)
+ goto out_err;
+ }
- if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
- cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
- ret = crypto_register_alg(&xts_aes_alg);
+ if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
+ cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
+ ret = aes_s390_register_alg(&xts_aes_alg);
if (ret)
- goto xts_aes_err;
- xts_aes_alg_reg = 1;
+ goto out_err;
}
- if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
- cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
- cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
+ if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
+ cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
+ cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
- goto ctr_aes_err;
+ goto out_err;
}
- ret = crypto_register_alg(&ctr_aes_alg);
- if (ret) {
- free_page((unsigned long) ctrblk);
- goto ctr_aes_err;
- }
- ctr_aes_alg_reg = 1;
+ ret = aes_s390_register_alg(&ctr_aes_alg);
+ if (ret)
+ goto out_err;
}
-out:
+ return 0;
+out_err:
+ aes_s390_fini();
return ret;
-
-ctr_aes_err:
- crypto_unregister_alg(&xts_aes_alg);
-xts_aes_err:
- crypto_unregister_alg(&cbc_aes_alg);
-cbc_aes_err:
- crypto_unregister_alg(&ecb_aes_alg);
-ecb_aes_err:
- crypto_unregister_alg(&aes_alg);
-aes_err:
- goto out;
-}
-
-static void __exit aes_s390_fini(void)
-{
- if (ctr_aes_alg_reg) {
- crypto_unregister_alg(&ctr_aes_alg);
- free_page((unsigned long) ctrblk);
- }
- if (xts_aes_alg_reg)
- crypto_unregister_alg(&xts_aes_alg);
- crypto_unregister_alg(&cbc_aes_alg);
- crypto_unregister_alg(&ecb_aes_alg);
- crypto_unregister_alg(&aes_alg);
}
module_cpu_feature_match(MSA, aes_s390_init);
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
index 2bad9d837029..992e630c227b 100644
--- a/arch/s390/crypto/crc32-vx.c
+++ b/arch/s390/crypto/crc32-vx.c
@@ -67,7 +67,7 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
\
kernel_fpu_begin(&vxstate, KERNEL_VXR_LOW); \
crc = ___crc32_vx(crc, data, aligned); \
- kernel_fpu_end(&vxstate); \
+ kernel_fpu_end(&vxstate, KERNEL_VXR_LOW); \
\
if (remaining) \
crc = ___crc32_sw(crc, data + aligned, remaining); \
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 697e71a75fc2..8b83144206eb 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -27,6 +27,8 @@
static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
+static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
+
struct s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_KEY_SIZE];
@@ -36,12 +38,12 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
u32 tmp[DES_EXPKEY_WORDS];
/* check for weak keys */
- if (!des_ekey(tmp, key) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ if (!des_ekey(tmp, key) &&
+ (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
@@ -53,14 +55,15 @@ static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- cpacf_km(CPACF_KM_DEA_ENC, ctx->key, out, in, DES_BLOCK_SIZE);
+ cpacf_km(CPACF_KM_DEA, ctx->key, out, in, DES_BLOCK_SIZE);
}
static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- cpacf_km(CPACF_KM_DEA_DEC, ctx->key, out, in, DES_BLOCK_SIZE);
+ cpacf_km(CPACF_KM_DEA | CPACF_DECRYPT,
+ ctx->key, out, in, DES_BLOCK_SIZE);
}
static struct crypto_alg des_alg = {
@@ -82,61 +85,46 @@ static struct crypto_alg des_alg = {
}
};
-static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
- u8 *key, struct blkcipher_walk *walk)
+static int ecb_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
+ struct blkcipher_walk *walk)
{
- int ret = blkcipher_walk_virt(desc, walk);
- unsigned int nbytes;
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ unsigned int nbytes, n;
+ int ret;
- while ((nbytes = walk->nbytes)) {
+ ret = blkcipher_walk_virt(desc, walk);
+ while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
/* only use complete blocks */
- unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
- u8 *out = walk->dst.virt.addr;
- u8 *in = walk->src.virt.addr;
-
- ret = cpacf_km(func, key, out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
-
- nbytes &= DES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, walk, nbytes);
+ n = nbytes & ~(DES_BLOCK_SIZE - 1);
+ cpacf_km(fc, ctx->key, walk->dst.virt.addr,
+ walk->src.virt.addr, n);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
-
return ret;
}
-static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
+static int cbc_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
struct blkcipher_walk *walk)
{
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- int ret = blkcipher_walk_virt(desc, walk);
- unsigned int nbytes = walk->nbytes;
+ unsigned int nbytes, n;
+ int ret;
struct {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_KEY_SIZE];
} param;
- if (!nbytes)
- goto out;
-
+ ret = blkcipher_walk_virt(desc, walk);
memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
memcpy(param.key, ctx->key, DES3_KEY_SIZE);
- do {
+ while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
/* only use complete blocks */
- unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
- u8 *out = walk->dst.virt.addr;
- u8 *in = walk->src.virt.addr;
-
- ret = cpacf_kmc(func, &param, out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
-
- nbytes &= DES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, walk, nbytes);
- } while ((nbytes = walk->nbytes));
+ n = nbytes & ~(DES_BLOCK_SIZE - 1);
+ cpacf_kmc(fc, &param, walk->dst.virt.addr,
+ walk->src.virt.addr, n);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ }
memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
-
-out:
return ret;
}
@@ -144,22 +132,20 @@ static int ecb_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_DEA_ENC, ctx->key, &walk);
+ return ecb_desall_crypt(desc, CPACF_KM_DEA, &walk);
}
static int ecb_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_DEA_DEC, ctx->key, &walk);
+ return ecb_desall_crypt(desc, CPACF_KM_DEA | CPACF_DECRYPT, &walk);
}
static struct crypto_alg ecb_des_alg = {
@@ -189,7 +175,7 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_DEA_ENC, &walk);
+ return cbc_desall_crypt(desc, CPACF_KMC_DEA, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
@@ -199,7 +185,7 @@ static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_DEA_DEC, &walk);
+ return cbc_desall_crypt(desc, CPACF_KMC_DEA | CPACF_DECRYPT, &walk);
}
static struct crypto_alg cbc_des_alg = {
@@ -240,13 +226,12 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
DES_KEY_SIZE)) &&
- (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
memcpy(ctx->key, key, key_len);
@@ -257,14 +242,15 @@ static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- cpacf_km(CPACF_KM_TDEA_192_ENC, ctx->key, dst, src, DES_BLOCK_SIZE);
+ cpacf_km(CPACF_KM_TDEA_192, ctx->key, dst, src, DES_BLOCK_SIZE);
}
static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- cpacf_km(CPACF_KM_TDEA_192_DEC, ctx->key, dst, src, DES_BLOCK_SIZE);
+ cpacf_km(CPACF_KM_TDEA_192 | CPACF_DECRYPT,
+ ctx->key, dst, src, DES_BLOCK_SIZE);
}
static struct crypto_alg des3_alg = {
@@ -290,22 +276,21 @@ static int ecb_des3_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_ENC, ctx->key, &walk);
+ return ecb_desall_crypt(desc, CPACF_KM_TDEA_192, &walk);
}
static int ecb_des3_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_DEC, ctx->key, &walk);
+ return ecb_desall_crypt(desc, CPACF_KM_TDEA_192 | CPACF_DECRYPT,
+ &walk);
}
static struct crypto_alg ecb_des3_alg = {
@@ -335,7 +320,7 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_ENC, &walk);
+ return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192, &walk);
}
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
@@ -345,7 +330,8 @@ static int cbc_des3_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_DEC, &walk);
+ return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192 | CPACF_DECRYPT,
+ &walk);
}
static struct crypto_alg cbc_des3_alg = {
@@ -369,81 +355,54 @@ static struct crypto_alg cbc_des3_alg = {
}
};
-static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
unsigned int i, n;
/* align to block size, max. PAGE_SIZE */
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
- for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
- memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
- crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
+ memcpy(ctrptr, iv, DES_BLOCK_SIZE);
+ for (i = (n / DES_BLOCK_SIZE) - 1; i > 0; i--) {
+ memcpy(ctrptr + DES_BLOCK_SIZE, ctrptr, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr + DES_BLOCK_SIZE, DES_BLOCK_SIZE);
+ ctrptr += DES_BLOCK_SIZE;
}
return n;
}
-static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
- struct s390_des_ctx *ctx,
+static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
struct blkcipher_walk *walk)
{
- int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ u8 buf[DES_BLOCK_SIZE], *ctrptr;
unsigned int n, nbytes;
- u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
- u8 *out, *in, *ctrptr = ctrbuf;
-
- if (!walk->nbytes)
- return ret;
+ int ret, locked;
- if (spin_trylock(&ctrblk_lock))
- ctrptr = ctrblk;
+ locked = spin_trylock(&ctrblk_lock);
- memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
+ ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
- out = walk->dst.virt.addr;
- in = walk->src.virt.addr;
- while (nbytes >= DES_BLOCK_SIZE) {
- if (ctrptr == ctrblk)
- n = __ctrblk_init(ctrptr, nbytes);
- else
- n = DES_BLOCK_SIZE;
- ret = cpacf_kmctr(func, ctx->key, out, in, n, ctrptr);
- if (ret < 0 || ret != n) {
- if (ctrptr == ctrblk)
- spin_unlock(&ctrblk_lock);
- return -EIO;
- }
- if (n > DES_BLOCK_SIZE)
- memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
- DES_BLOCK_SIZE);
- crypto_inc(ctrptr, DES_BLOCK_SIZE);
- out += n;
- in += n;
- nbytes -= n;
- }
- ret = blkcipher_walk_done(desc, walk, nbytes);
+ n = DES_BLOCK_SIZE;
+ if (nbytes >= 2*DES_BLOCK_SIZE && locked)
+ n = __ctrblk_init(ctrblk, walk->iv, nbytes);
+ ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk->iv;
+ cpacf_kmctr(fc, ctx->key, walk->dst.virt.addr,
+ walk->src.virt.addr, n, ctrptr);
+ if (ctrptr == ctrblk)
+ memcpy(walk->iv, ctrptr + n - DES_BLOCK_SIZE,
+ DES_BLOCK_SIZE);
+ crypto_inc(walk->iv, DES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
- if (ctrptr == ctrblk) {
- if (nbytes)
- memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
- else
- memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ if (locked)
spin_unlock(&ctrblk_lock);
- } else {
- if (!nbytes)
- memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
- }
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
- out = walk->dst.virt.addr;
- in = walk->src.virt.addr;
- ret = cpacf_kmctr(func, ctx->key, buf, in,
- DES_BLOCK_SIZE, ctrbuf);
- if (ret < 0 || ret != DES_BLOCK_SIZE)
- return -EIO;
- memcpy(out, buf, nbytes);
- crypto_inc(ctrbuf, DES_BLOCK_SIZE);
+ cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
+ DES_BLOCK_SIZE, walk->iv);
+ memcpy(walk->dst.virt.addr, buf, nbytes);
+ crypto_inc(walk->iv, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
- memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
}
return ret;
}
@@ -452,22 +411,20 @@ static int ctr_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_ENC, ctx, &walk);
+ return ctr_desall_crypt(desc, CPACF_KMCTR_DEA, &walk);
}
static int ctr_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_DEC, ctx, &walk);
+ return ctr_desall_crypt(desc, CPACF_KMCTR_DEA | CPACF_DECRYPT, &walk);
}
static struct crypto_alg ctr_des_alg = {
@@ -495,22 +452,21 @@ static int ctr_des3_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_ENC, ctx, &walk);
+ return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192, &walk);
}
static int ctr_des3_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_DEC, ctx, &walk);
+ return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192 | CPACF_DECRYPT,
+ &walk);
}
static struct crypto_alg ctr_des3_alg = {
@@ -534,83 +490,87 @@ static struct crypto_alg ctr_des3_alg = {
}
};
+static struct crypto_alg *des_s390_algs_ptr[8];
+static int des_s390_algs_num;
+
+static int des_s390_register_alg(struct crypto_alg *alg)
+{
+ int ret;
+
+ ret = crypto_register_alg(alg);
+ if (!ret)
+ des_s390_algs_ptr[des_s390_algs_num++] = alg;
+ return ret;
+}
+
+static void des_s390_exit(void)
+{
+ while (des_s390_algs_num--)
+ crypto_unregister_alg(des_s390_algs_ptr[des_s390_algs_num]);
+ if (ctrblk)
+ free_page((unsigned long) ctrblk);
+}
+
static int __init des_s390_init(void)
{
int ret;
- if (!cpacf_query(CPACF_KM, CPACF_KM_DEA_ENC) ||
- !cpacf_query(CPACF_KM, CPACF_KM_TDEA_192_ENC))
- return -EOPNOTSUPP;
-
- ret = crypto_register_alg(&des_alg);
- if (ret)
- goto des_err;
- ret = crypto_register_alg(&ecb_des_alg);
- if (ret)
- goto ecb_des_err;
- ret = crypto_register_alg(&cbc_des_alg);
- if (ret)
- goto cbc_des_err;
- ret = crypto_register_alg(&des3_alg);
- if (ret)
- goto des3_err;
- ret = crypto_register_alg(&ecb_des3_alg);
- if (ret)
- goto ecb_des3_err;
- ret = crypto_register_alg(&cbc_des3_alg);
- if (ret)
- goto cbc_des3_err;
-
- if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_DEA_ENC) &&
- cpacf_query(CPACF_KMCTR, CPACF_KMCTR_TDEA_192_ENC)) {
- ret = crypto_register_alg(&ctr_des_alg);
+ /* Query available functions for KM, KMC and KMCTR */
+ cpacf_query(CPACF_KM, &km_functions);
+ cpacf_query(CPACF_KMC, &kmc_functions);
+ cpacf_query(CPACF_KMCTR, &kmctr_functions);
+
+ if (cpacf_test_func(&km_functions, CPACF_KM_DEA)) {
+ ret = des_s390_register_alg(&des_alg);
+ if (ret)
+ goto out_err;
+ ret = des_s390_register_alg(&ecb_des_alg);
if (ret)
- goto ctr_des_err;
- ret = crypto_register_alg(&ctr_des3_alg);
+ goto out_err;
+ }
+ if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) {
+ ret = des_s390_register_alg(&cbc_des_alg);
if (ret)
- goto ctr_des3_err;
+ goto out_err;
+ }
+ if (cpacf_test_func(&km_functions, CPACF_KM_TDEA_192)) {
+ ret = des_s390_register_alg(&des3_alg);
+ if (ret)
+ goto out_err;
+ ret = des_s390_register_alg(&ecb_des3_alg);
+ if (ret)
+ goto out_err;
+ }
+ if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) {
+ ret = des_s390_register_alg(&cbc_des3_alg);
+ if (ret)
+ goto out_err;
+ }
+
+ if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA) ||
+ cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
- goto ctr_mem_err;
+ goto out_err;
}
}
-out:
- return ret;
-ctr_mem_err:
- crypto_unregister_alg(&ctr_des3_alg);
-ctr_des3_err:
- crypto_unregister_alg(&ctr_des_alg);
-ctr_des_err:
- crypto_unregister_alg(&cbc_des3_alg);
-cbc_des3_err:
- crypto_unregister_alg(&ecb_des3_alg);
-ecb_des3_err:
- crypto_unregister_alg(&des3_alg);
-des3_err:
- crypto_unregister_alg(&cbc_des_alg);
-cbc_des_err:
- crypto_unregister_alg(&ecb_des_alg);
-ecb_des_err:
- crypto_unregister_alg(&des_alg);
-des_err:
- goto out;
-}
-
-static void __exit des_s390_exit(void)
-{
- if (ctrblk) {
- crypto_unregister_alg(&ctr_des_alg);
- crypto_unregister_alg(&ctr_des3_alg);
- free_page((unsigned long) ctrblk);
+ if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) {
+ ret = des_s390_register_alg(&ctr_des_alg);
+ if (ret)
+ goto out_err;
+ }
+ if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
+ ret = des_s390_register_alg(&ctr_des3_alg);
+ if (ret)
+ goto out_err;
}
- crypto_unregister_alg(&cbc_des3_alg);
- crypto_unregister_alg(&ecb_des3_alg);
- crypto_unregister_alg(&des3_alg);
- crypto_unregister_alg(&cbc_des_alg);
- crypto_unregister_alg(&ecb_des_alg);
- crypto_unregister_alg(&des_alg);
+
+ return 0;
+out_err:
+ des_s390_exit();
+ return ret;
}
module_cpu_feature_match(MSA, des_s390_init);
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index ab68de72e795..564616d48d8b 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -58,7 +58,6 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
unsigned int n;
u8 *buf = dctx->buffer;
- int ret;
if (dctx->bytes) {
u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
@@ -71,18 +70,14 @@ static int ghash_update(struct shash_desc *desc,
src += n;
if (!dctx->bytes) {
- ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
- GHASH_BLOCK_SIZE);
- if (ret != GHASH_BLOCK_SIZE)
- return -EIO;
+ cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
+ GHASH_BLOCK_SIZE);
}
}
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
- ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
- if (ret != n)
- return -EIO;
+ cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
src += n;
srclen -= n;
}
@@ -98,17 +93,12 @@ static int ghash_update(struct shash_desc *desc,
static int ghash_flush(struct ghash_desc_ctx *dctx)
{
u8 *buf = dctx->buffer;
- int ret;
if (dctx->bytes) {
u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
memset(pos, 0, dctx->bytes);
-
- ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
- if (ret != GHASH_BLOCK_SIZE)
- return -EIO;
-
+ cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
dctx->bytes = 0;
}
@@ -146,7 +136,7 @@ static struct shash_alg ghash_alg = {
static int __init ghash_mod_init(void)
{
- if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_GHASH))
+ if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_GHASH))
return -EOPNOTSUPP;
return crypto_register_shash(&ghash_alg);
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 41527b113f5a..9cc050f9536c 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -135,12 +135,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
else
h = ebuf;
/* generate sha256 from this page */
- if (cpacf_kimd(CPACF_KIMD_SHA_256, h,
- pg, PAGE_SIZE) != PAGE_SIZE) {
- prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
- ret = -EIO;
- goto out;
- }
+ cpacf_kimd(CPACF_KIMD_SHA_256, h, pg, PAGE_SIZE);
if (n < sizeof(hash))
memcpy(ebuf, hash, n);
ret += n;
@@ -148,7 +143,6 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
nbytes -= n;
}
-out:
free_page((unsigned long)pg);
return ret;
}
@@ -160,13 +154,11 @@ static void prng_tdes_add_entropy(void)
{
__u64 entropy[4];
unsigned int i;
- int ret;
for (i = 0; i < 16; i++) {
- ret = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
- (char *)entropy, (char *)entropy,
- sizeof(entropy));
- BUG_ON(ret < 0 || ret != sizeof(entropy));
+ cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
+ (char *) entropy, (char *) entropy,
+ sizeof(entropy));
memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy));
}
}
@@ -303,21 +295,14 @@ static int __init prng_sha512_selftest(void)
0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c,
0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 };
- int ret = 0;
u8 buf[sizeof(random)];
struct ppno_ws_s ws;
memset(&ws, 0, sizeof(ws));
/* initial seed */
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED, &ws, NULL, 0,
- seed, sizeof(seed));
- if (ret < 0) {
- pr_err("The prng self test seed operation for the "
- "SHA-512 mode failed with rc=%d\n", ret);
- prng_errorflag = PRNG_SELFTEST_FAILED;
- return -EIO;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
+ &ws, NULL, 0, seed, sizeof(seed));
/* check working states V and C */
if (memcmp(ws.V, V0, sizeof(V0)) != 0
@@ -329,22 +314,10 @@ static int __init prng_sha512_selftest(void)
}
/* generate random bytes */
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
- &ws, buf, sizeof(buf), NULL, 0);
- if (ret < 0) {
- pr_err("The prng self test generate operation for "
- "the SHA-512 mode failed with rc=%d\n", ret);
- prng_errorflag = PRNG_SELFTEST_FAILED;
- return -EIO;
- }
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
- &ws, buf, sizeof(buf), NULL, 0);
- if (ret < 0) {
- pr_err("The prng self test generate operation for "
- "the SHA-512 mode failed with rc=%d\n", ret);
- prng_errorflag = PRNG_SELFTEST_FAILED;
- return -EIO;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+ &ws, buf, sizeof(buf), NULL, 0);
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+ &ws, buf, sizeof(buf), NULL, 0);
/* check against expected data */
if (memcmp(buf, random, sizeof(random)) != 0) {
@@ -392,26 +365,16 @@ static int __init prng_sha512_instantiate(void)
get_tod_clock_ext(seed + 48);
/* initial seed of the ppno drng */
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
- &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
- if (ret < 0) {
- prng_errorflag = PRNG_SEED_FAILED;
- ret = -EIO;
- goto outfree;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
+ &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
/* if fips mode is enabled, generate a first block of random
bytes for the FIPS 140-2 Conditional Self Test */
if (fips_enabled) {
prng_data->prev = prng_data->buf + prng_chunk_size;
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
- &prng_data->ppnows,
- prng_data->prev, prng_chunk_size, NULL, 0);
- if (ret < 0 || ret != prng_chunk_size) {
- prng_errorflag = PRNG_GEN_FAILED;
- ret = -EIO;
- goto outfree;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+ &prng_data->ppnows,
+ prng_data->prev, prng_chunk_size, NULL, 0);
}
return 0;
@@ -440,12 +403,8 @@ static int prng_sha512_reseed(void)
return ret;
/* do a reseed of the ppno drng with this bytestring */
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
- &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
- if (ret) {
- prng_errorflag = PRNG_RESEED_FAILED;
- return -EIO;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
+ &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
return 0;
}
@@ -463,12 +422,8 @@ static int prng_sha512_generate(u8 *buf, size_t nbytes)
}
/* PPNO generate */
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
- &prng_data->ppnows, buf, nbytes, NULL, 0);
- if (ret < 0 || ret != nbytes) {
- prng_errorflag = PRNG_GEN_FAILED;
- return -EIO;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+ &prng_data->ppnows, buf, nbytes, NULL, 0);
/* FIPS 140-2 Conditional Self Test */
if (fips_enabled) {
@@ -479,7 +434,7 @@ static int prng_sha512_generate(u8 *buf, size_t nbytes)
memcpy(prng_data->prev, buf, nbytes);
}
- return ret;
+ return nbytes;
}
@@ -494,7 +449,7 @@ static int prng_open(struct inode *inode, struct file *file)
static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
size_t nbytes, loff_t *ppos)
{
- int chunk, n, tmp, ret = 0;
+ int chunk, n, ret = 0;
/* lock prng_data struct */
if (mutex_lock_interruptible(&prng_data->mutex))
@@ -545,13 +500,9 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
*
* Note: you can still get strict X9.17 conformity by setting
* prng_chunk_size to 8 bytes.
- */
- tmp = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
- prng_data->buf, prng_data->buf, n);
- if (tmp < 0 || tmp != n) {
- ret = -EIO;
- break;
- }
+ */
+ cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
+ prng_data->buf, prng_data->buf, n);
prng_data->prngws.byte_counter += n;
prng_data->prngws.reseed_counter += n;
@@ -806,13 +757,13 @@ static int __init prng_init(void)
int ret;
/* check if the CPU has a PRNG */
- if (!cpacf_query(CPACF_KMC, CPACF_KMC_PRNG))
+ if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG))
return -EOPNOTSUPP;
/* choose prng mode */
if (prng_mode != PRNG_MODE_TDES) {
/* check for MSA5 support for PPNO operations */
- if (!cpacf_query(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) {
+ if (!cpacf_query_func(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) {
if (prng_mode == PRNG_MODE_SHA512) {
pr_err("The prng module cannot "
"start in SHA-512 mode\n");
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 5fbf91bbb478..c7de53d8da75 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -91,7 +91,7 @@ static struct shash_alg alg = {
static int __init sha1_s390_init(void)
{
- if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_1))
+ if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_1))
return -EOPNOTSUPP;
return crypto_register_shash(&alg);
}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 10aac0b11988..53c277999a28 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -123,7 +123,7 @@ static int __init sha256_s390_init(void)
{
int ret;
- if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_256))
+ if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256))
return -EOPNOTSUPP;
ret = crypto_register_shash(&sha256_alg);
if (ret < 0)
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index ea85757be407..2f4caa1ef123 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -133,7 +133,7 @@ static int __init init(void)
{
int ret;
- if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_512))
+ if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_512))
return -EOPNOTSUPP;
if ((ret = crypto_register_shash(&sha512_alg)) < 0)
goto out;
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index 8e908166c3ee..c740f77285b2 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -22,8 +22,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
- unsigned int index;
- int ret;
+ unsigned int index, n;
/* how much is already in the buffer? */
index = ctx->count & (bsize - 1);
@@ -35,9 +34,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process one stored block */
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
- ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
- if (ret != bsize)
- return -EIO;
+ cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
data += bsize - index;
len -= bsize - index;
index = 0;
@@ -45,12 +42,10 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process as many blocks as possible */
if (len >= bsize) {
- ret = cpacf_kimd(ctx->func, ctx->state, data,
- len & ~(bsize - 1));
- if (ret != (len & ~(bsize - 1)))
- return -EIO;
- data += ret;
- len -= ret;
+ n = len & ~(bsize - 1);
+ cpacf_kimd(ctx->func, ctx->state, data, n);
+ data += n;
+ len -= n;
}
store:
if (len)
@@ -66,7 +61,6 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
u64 bits;
unsigned int index, end, plen;
- int ret;
/* SHA-512 uses 128 bit padding length */
plen = (bsize > SHA256_BLOCK_SIZE) ? 16 : 8;
@@ -88,10 +82,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
*/
bits = ctx->count * 8;
memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
-
- ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, end);
- if (ret != end)
- return -EIO;
+ cpacf_kimd(ctx->func, ctx->state, ctx->buf, end);
/* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index d28621de8e0b..2c680db7e5c1 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -28,67 +28,51 @@
#define CPACF_PPNO 0xb93c /* MSA5 */
/*
- * Function codes for the KM (CIPHER MESSAGE)
- * instruction (0x80 is the decipher modifier bit)
+ * Decryption modifier bit
+ */
+#define CPACF_DECRYPT 0x80
+
+/*
+ * Function codes for the KM (CIPHER MESSAGE) instruction
*/
#define CPACF_KM_QUERY 0x00
-#define CPACF_KM_DEA_ENC 0x01
-#define CPACF_KM_DEA_DEC 0x81
-#define CPACF_KM_TDEA_128_ENC 0x02
-#define CPACF_KM_TDEA_128_DEC 0x82
-#define CPACF_KM_TDEA_192_ENC 0x03
-#define CPACF_KM_TDEA_192_DEC 0x83
-#define CPACF_KM_AES_128_ENC 0x12
-#define CPACF_KM_AES_128_DEC 0x92
-#define CPACF_KM_AES_192_ENC 0x13
-#define CPACF_KM_AES_192_DEC 0x93
-#define CPACF_KM_AES_256_ENC 0x14
-#define CPACF_KM_AES_256_DEC 0x94
-#define CPACF_KM_XTS_128_ENC 0x32
-#define CPACF_KM_XTS_128_DEC 0xb2
-#define CPACF_KM_XTS_256_ENC 0x34
-#define CPACF_KM_XTS_256_DEC 0xb4
+#define CPACF_KM_DEA 0x01
+#define CPACF_KM_TDEA_128 0x02
+#define CPACF_KM_TDEA_192 0x03
+#define CPACF_KM_AES_128 0x12
+#define CPACF_KM_AES_192 0x13
+#define CPACF_KM_AES_256 0x14
+#define CPACF_KM_XTS_128 0x32
+#define CPACF_KM_XTS_256 0x34
/*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
#define CPACF_KMC_QUERY 0x00
-#define CPACF_KMC_DEA_ENC 0x01
-#define CPACF_KMC_DEA_DEC 0x81
-#define CPACF_KMC_TDEA_128_ENC 0x02
-#define CPACF_KMC_TDEA_128_DEC 0x82
-#define CPACF_KMC_TDEA_192_ENC 0x03
-#define CPACF_KMC_TDEA_192_DEC 0x83
-#define CPACF_KMC_AES_128_ENC 0x12
-#define CPACF_KMC_AES_128_DEC 0x92
-#define CPACF_KMC_AES_192_ENC 0x13
-#define CPACF_KMC_AES_192_DEC 0x93
-#define CPACF_KMC_AES_256_ENC 0x14
-#define CPACF_KMC_AES_256_DEC 0x94
+#define CPACF_KMC_DEA 0x01
+#define CPACF_KMC_TDEA_128 0x02
+#define CPACF_KMC_TDEA_192 0x03
+#define CPACF_KMC_AES_128 0x12
+#define CPACF_KMC_AES_192 0x13
+#define CPACF_KMC_AES_256 0x14
#define CPACF_KMC_PRNG 0x43
/*
* Function codes for the KMCTR (CIPHER MESSAGE WITH COUNTER)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
-#define CPACF_KMCTR_QUERY 0x00
-#define CPACF_KMCTR_DEA_ENC 0x01
-#define CPACF_KMCTR_DEA_DEC 0x81
-#define CPACF_KMCTR_TDEA_128_ENC 0x02
-#define CPACF_KMCTR_TDEA_128_DEC 0x82
-#define CPACF_KMCTR_TDEA_192_ENC 0x03
-#define CPACF_KMCTR_TDEA_192_DEC 0x83
-#define CPACF_KMCTR_AES_128_ENC 0x12
-#define CPACF_KMCTR_AES_128_DEC 0x92
-#define CPACF_KMCTR_AES_192_ENC 0x13
-#define CPACF_KMCTR_AES_192_DEC 0x93
-#define CPACF_KMCTR_AES_256_ENC 0x14
-#define CPACF_KMCTR_AES_256_DEC 0x94
+#define CPACF_KMCTR_QUERY 0x00
+#define CPACF_KMCTR_DEA 0x01
+#define CPACF_KMCTR_TDEA_128 0x02
+#define CPACF_KMCTR_TDEA_192 0x03
+#define CPACF_KMCTR_AES_128 0x12
+#define CPACF_KMCTR_AES_192 0x13
+#define CPACF_KMCTR_AES_256 0x14
/*
* Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
#define CPACF_KIMD_QUERY 0x00
#define CPACF_KIMD_SHA_1 0x01
@@ -98,7 +82,7 @@
/*
* Function codes for the KLMD (COMPUTE LAST MESSAGE DIGEST)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
#define CPACF_KLMD_QUERY 0x00
#define CPACF_KLMD_SHA_1 0x01
@@ -107,7 +91,7 @@
/*
* function codes for the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
#define CPACF_KMAC_QUERY 0x00
#define CPACF_KMAC_DEA 0x01
@@ -116,12 +100,14 @@
/*
* Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
#define CPACF_PPNO_QUERY 0x00
#define CPACF_PPNO_SHA512_DRNG_GEN 0x03
#define CPACF_PPNO_SHA512_DRNG_SEED 0x83
+typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
+
/**
* cpacf_query() - check if a specific CPACF function is available
* @opcode: the opcode of the crypto instruction
@@ -132,55 +118,66 @@
*
* Returns 1 if @func is available for @opcode, 0 otherwise
*/
-static inline void __cpacf_query(unsigned int opcode, unsigned char *status)
+static inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
{
- typedef struct { unsigned char _[16]; } status_type;
register unsigned long r0 asm("0") = 0; /* query function */
- register unsigned long r1 asm("1") = (unsigned long) status;
+ register unsigned long r1 asm("1") = (unsigned long) mask;
asm volatile(
" spm 0\n" /* pckmo doesn't change the cc */
/* Parameter registers are ignored, but may not be 0 */
"0: .insn rrf,%[opc] << 16,2,2,2,0\n"
" brc 1,0b\n" /* handle partial completion */
- : "=m" (*(status_type *) status)
+ : "=m" (*mask)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (opcode)
: "cc");
}
-static inline int cpacf_query(unsigned int opcode, unsigned int func)
+static inline int __cpacf_check_opcode(unsigned int opcode)
{
- unsigned char status[16];
-
switch (opcode) {
case CPACF_KMAC:
case CPACF_KM:
case CPACF_KMC:
case CPACF_KIMD:
case CPACF_KLMD:
- if (!test_facility(17)) /* check for MSA */
- return 0;
- break;
+ return test_facility(17); /* check for MSA */
case CPACF_PCKMO:
- if (!test_facility(76)) /* check for MSA3 */
- return 0;
- break;
+ return test_facility(76); /* check for MSA3 */
case CPACF_KMF:
case CPACF_KMO:
case CPACF_PCC:
case CPACF_KMCTR:
- if (!test_facility(77)) /* check for MSA4 */
- return 0;
- break;
+ return test_facility(77); /* check for MSA4 */
case CPACF_PPNO:
- if (!test_facility(57)) /* check for MSA5 */
- return 0;
- break;
+ return test_facility(57); /* check for MSA5 */
default:
BUG();
}
- __cpacf_query(opcode, status);
- return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
+}
+
+static inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+{
+ if (__cpacf_check_opcode(opcode)) {
+ __cpacf_query(opcode, mask);
+ return 1;
+ }
+ memset(mask, 0, sizeof(*mask));
+ return 0;
+}
+
+static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func)
+{
+ return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0;
+}
+
+static inline int cpacf_query_func(unsigned int opcode, unsigned int func)
+{
+ cpacf_mask_t mask;
+
+ if (cpacf_query(opcode, &mask))
+ return cpacf_test_func(&mask, func);
+ return 0;
}
/**
@@ -194,7 +191,7 @@ static inline int cpacf_query(unsigned int opcode, unsigned int func)
* Returns 0 for the query func, number of processed bytes for
* encryption/decryption funcs
*/
-static inline int cpacf_km(long func, void *param,
+static inline int cpacf_km(unsigned long func, void *param,
u8 *dest, const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
@@ -224,7 +221,7 @@ static inline int cpacf_km(long func, void *param,
* Returns 0 for the query func, number of processed bytes for
* encryption/decryption funcs
*/
-static inline int cpacf_kmc(long func, void *param,
+static inline int cpacf_kmc(unsigned long func, void *param,
u8 *dest, const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
@@ -250,11 +247,9 @@ static inline int cpacf_kmc(long func, void *param,
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
- *
- * Returns 0 for the query func, number of processed bytes for digest funcs
*/
-static inline int cpacf_kimd(long func, void *param,
- const u8 *src, long src_len)
+static inline void cpacf_kimd(unsigned long func, void *param,
+ const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
@@ -267,8 +262,6 @@ static inline int cpacf_kimd(long func, void *param,
: [src] "+a" (r2), [len] "+d" (r3)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KIMD)
: "cc", "memory");
-
- return src_len - r3;
}
/**
@@ -277,11 +270,9 @@ static inline int cpacf_kimd(long func, void *param,
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
- *
- * Returns 0 for the query func, number of processed bytes for digest funcs
*/
-static inline int cpacf_klmd(long func, void *param,
- const u8 *src, long src_len)
+static inline void cpacf_klmd(unsigned long func, void *param,
+ const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
@@ -294,8 +285,6 @@ static inline int cpacf_klmd(long func, void *param,
: [src] "+a" (r2), [len] "+d" (r3)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KLMD)
: "cc", "memory");
-
- return src_len - r3;
}
/**
@@ -308,7 +297,7 @@ static inline int cpacf_klmd(long func, void *param,
*
* Returns 0 for the query func, number of processed bytes for digest funcs
*/
-static inline int cpacf_kmac(long func, void *param,
+static inline int cpacf_kmac(unsigned long func, void *param,
const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
@@ -338,7 +327,7 @@ static inline int cpacf_kmac(long func, void *param,
* Returns 0 for the query func, number of processed bytes for
* encryption/decryption funcs
*/
-static inline int cpacf_kmctr(long func, void *param, u8 *dest,
+static inline int cpacf_kmctr(unsigned long func, void *param, u8 *dest,
const u8 *src, long src_len, u8 *counter)
{
register unsigned long r0 asm("0") = (unsigned long) func;
@@ -368,13 +357,10 @@ static inline int cpacf_kmctr(long func, void *param, u8 *dest,
* @dest_len: size of destination memory area in bytes
* @seed: address of seed data
* @seed_len: size of seed data in bytes
- *
- * Returns 0 for the query func, number of random bytes stored in
- * dest buffer for generate function
*/
-static inline int cpacf_ppno(long func, void *param,
- u8 *dest, long dest_len,
- const u8 *seed, long seed_len)
+static inline void cpacf_ppno(unsigned long func, void *param,
+ u8 *dest, long dest_len,
+ const u8 *seed, long seed_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
@@ -390,8 +376,6 @@ static inline int cpacf_ppno(long func, void *param,
: [fc] "d" (r0), [pba] "a" (r1),
[seed] "a" (r4), [slen] "d" (r5), [opc] "i" (CPACF_PPNO)
: "cc", "memory");
-
- return dest_len - r3;
}
/**
@@ -399,10 +383,8 @@ static inline int cpacf_ppno(long func, void *param,
* instruction
* @func: the function code passed to PCC; see CPACF_KM_xxx defines
* @param: address of parameter block; see POP for details on each func
- *
- * Returns 0.
*/
-static inline int cpacf_pcc(long func, void *param)
+static inline void cpacf_pcc(unsigned long func, void *param)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
@@ -413,8 +395,6 @@ static inline int cpacf_pcc(long func, void *param)
:
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCC)
: "cc", "memory");
-
- return 0;
}
#endif /* _ASM_S390_CPACF_H */
diff --git a/arch/s390/include/asm/facilities_src.h b/arch/s390/include/asm/facilities_src.h
index 4917728e5828..3b758f66e48b 100644
--- a/arch/s390/include/asm/facilities_src.h
+++ b/arch/s390/include/asm/facilities_src.h
@@ -55,4 +55,28 @@ static struct facility_def facility_defs[] = {
-1 /* END */
}
},
+ {
+ .name = "FACILITIES_KVM",
+ .bits = (int[]){
+ 0, /* N3 instructions */
+ 1, /* z/Arch mode installed */
+ 2, /* z/Arch mode active */
+ 3, /* DAT-enhancement */
+ 4, /* idte segment table */
+ 5, /* idte region table */
+ 6, /* ASN-and-LX reuse */
+ 7, /* stfle */
+ 8, /* enhanced-DAT 1 */
+ 9, /* sense-running-status */
+ 10, /* conditional sske */
+ 13, /* ipte-range */
+ 14, /* nonquiescing key-setting */
+ 73, /* transactional execution */
+ 75, /* access-exception-fetch/store indication */
+ 76, /* msa extension 3 */
+ 77, /* msa extension 4 */
+ 78, /* enhanced-DAT 2 */
+ -1 /* END */
+ }
+ },
};
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
index 6aba6fc406ad..02124d66bfb5 100644
--- a/arch/s390/include/asm/fpu/api.h
+++ b/arch/s390/include/asm/fpu/api.h
@@ -64,18 +64,18 @@ static inline int test_fp_ctl(u32 fpc)
return rc;
}
-#define KERNEL_VXR_V0V7 1
-#define KERNEL_VXR_V8V15 2
-#define KERNEL_VXR_V16V23 4
-#define KERNEL_VXR_V24V31 8
-#define KERNEL_FPR 16
-#define KERNEL_FPC 256
+#define KERNEL_FPC 1
+#define KERNEL_VXR_V0V7 2
+#define KERNEL_VXR_V8V15 4
+#define KERNEL_VXR_V16V23 8
+#define KERNEL_VXR_V24V31 16
#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15)
#define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23)
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
-#define KERNEL_FPU_MASK (KERNEL_VXR_LOW|KERNEL_VXR_HIGH|KERNEL_FPR)
+#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
+#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
struct kernel_fpu;
@@ -87,18 +87,28 @@ struct kernel_fpu;
* Prefer using the kernel_fpu_begin()/kernel_fpu_end() pair of functions.
*/
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
-void __kernel_fpu_end(struct kernel_fpu *state);
+void __kernel_fpu_end(struct kernel_fpu *state, u32 flags);
static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
{
preempt_disable();
- __kernel_fpu_begin(state, flags);
+ state->mask = S390_lowcore.fpu_flags;
+ if (!test_cpu_flag(CIF_FPU))
+ /* Save user space FPU state and register contents */
+ save_fpu_regs();
+ else if (state->mask & flags)
+ /* Save FPU/vector register in-use by the kernel */
+ __kernel_fpu_begin(state, flags);
+ S390_lowcore.fpu_flags |= flags;
}
-static inline void kernel_fpu_end(struct kernel_fpu *state)
+static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
{
- __kernel_fpu_end(state);
+ S390_lowcore.fpu_flags = state->mask;
+ if (state->mask & flags)
+ /* Restore FPU/vector register in-use by the kernel */
+ __kernel_fpu_end(state, flags);
preempt_enable();
}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 8e5daf7a76ce..a41faf34b034 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -28,7 +28,7 @@
#define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
-#define KVM_MAX_VCPUS KVM_S390_ESCA_CPU_SLOTS
+#define KVM_MAX_VCPUS 255
#define KVM_USER_MEM_SLOTS 32
/*
@@ -245,72 +245,72 @@ struct sie_page {
} __packed;
struct kvm_vcpu_stat {
- u32 exit_userspace;
- u32 exit_null;
- u32 exit_external_request;
- u32 exit_external_interrupt;
- u32 exit_stop_request;
- u32 exit_validity;
- u32 exit_instruction;
- u32 exit_pei;
- u32 halt_successful_poll;
- u32 halt_attempted_poll;
- u32 halt_poll_invalid;
- u32 halt_wakeup;
- u32 instruction_lctl;
- u32 instruction_lctlg;
- u32 instruction_stctl;
- u32 instruction_stctg;
- u32 exit_program_interruption;
- u32 exit_instr_and_program;
- u32 exit_operation_exception;
- u32 deliver_external_call;
- u32 deliver_emergency_signal;
- u32 deliver_service_signal;
- u32 deliver_virtio_interrupt;
- u32 deliver_stop_signal;
- u32 deliver_prefix_signal;
- u32 deliver_restart_signal;
- u32 deliver_program_int;
- u32 deliver_io_int;
- u32 exit_wait_state;
- u32 instruction_pfmf;
- u32 instruction_stidp;
- u32 instruction_spx;
- u32 instruction_stpx;
- u32 instruction_stap;
- u32 instruction_storage_key;
- u32 instruction_ipte_interlock;
- u32 instruction_stsch;
- u32 instruction_chsc;
- u32 instruction_stsi;
- u32 instruction_stfl;
- u32 instruction_tprot;
- u32 instruction_sie;
- u32 instruction_essa;
- u32 instruction_sthyi;
- u32 instruction_sigp_sense;
- u32 instruction_sigp_sense_running;
- u32 instruction_sigp_external_call;
- u32 instruction_sigp_emergency;
- u32 instruction_sigp_cond_emergency;
- u32 instruction_sigp_start;
- u32 instruction_sigp_stop;
- u32 instruction_sigp_stop_store_status;
- u32 instruction_sigp_store_status;
- u32 instruction_sigp_store_adtl_status;
- u32 instruction_sigp_arch;
- u32 instruction_sigp_prefix;
- u32 instruction_sigp_restart;
- u32 instruction_sigp_init_cpu_reset;
- u32 instruction_sigp_cpu_reset;
- u32 instruction_sigp_unknown;
- u32 diagnose_10;
- u32 diagnose_44;
- u32 diagnose_9c;
- u32 diagnose_258;
- u32 diagnose_308;
- u32 diagnose_500;
+ u64 exit_userspace;
+ u64 exit_null;
+ u64 exit_external_request;
+ u64 exit_external_interrupt;
+ u64 exit_stop_request;
+ u64 exit_validity;
+ u64 exit_instruction;
+ u64 exit_pei;
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 instruction_lctl;
+ u64 instruction_lctlg;
+ u64 instruction_stctl;
+ u64 instruction_stctg;
+ u64 exit_program_interruption;
+ u64 exit_instr_and_program;
+ u64 exit_operation_exception;
+ u64 deliver_external_call;
+ u64 deliver_emergency_signal;
+ u64 deliver_service_signal;
+ u64 deliver_virtio_interrupt;
+ u64 deliver_stop_signal;
+ u64 deliver_prefix_signal;
+ u64 deliver_restart_signal;
+ u64 deliver_program_int;
+ u64 deliver_io_int;
+ u64 exit_wait_state;
+ u64 instruction_pfmf;
+ u64 instruction_stidp;
+ u64 instruction_spx;
+ u64 instruction_stpx;
+ u64 instruction_stap;
+ u64 instruction_storage_key;
+ u64 instruction_ipte_interlock;
+ u64 instruction_stsch;
+ u64 instruction_chsc;
+ u64 instruction_stsi;
+ u64 instruction_stfl;
+ u64 instruction_tprot;
+ u64 instruction_sie;
+ u64 instruction_essa;
+ u64 instruction_sthyi;
+ u64 instruction_sigp_sense;
+ u64 instruction_sigp_sense_running;
+ u64 instruction_sigp_external_call;
+ u64 instruction_sigp_emergency;
+ u64 instruction_sigp_cond_emergency;
+ u64 instruction_sigp_start;
+ u64 instruction_sigp_stop;
+ u64 instruction_sigp_stop_store_status;
+ u64 instruction_sigp_store_status;
+ u64 instruction_sigp_store_adtl_status;
+ u64 instruction_sigp_arch;
+ u64 instruction_sigp_prefix;
+ u64 instruction_sigp_restart;
+ u64 instruction_sigp_init_cpu_reset;
+ u64 instruction_sigp_cpu_reset;
+ u64 instruction_sigp_unknown;
+ u64 diagnose_10;
+ u64 diagnose_44;
+ u64 diagnose_9c;
+ u64 diagnose_258;
+ u64 diagnose_308;
+ u64 diagnose_500;
};
#define PGM_OPERATION 0x01
@@ -577,7 +577,7 @@ struct kvm_vcpu_arch {
};
struct kvm_vm_stat {
- u32 remote_tlb_flush;
+ ulong remote_tlb_flush;
};
struct kvm_arch_memory_slot {
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index d79ba7cf75b0..7b93b78f423c 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -129,7 +129,8 @@ struct lowcore {
__u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
__u64 gmap; /* 0x0398 */
__u32 spinlock_lockval; /* 0x03a0 */
- __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
+ __u32 fpu_flags; /* 0x03a4 */
+ __u8 pad_0x03a8[0x0400-0x03a8]; /* 0x03a8 */
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 6d39329c894b..bea785d7f853 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -12,6 +12,7 @@ typedef struct {
struct list_head pgtable_list;
spinlock_t gmap_lock;
struct list_head gmap_list;
+ unsigned long gmap_asce;
unsigned long asce;
unsigned long asce_limit;
unsigned long vdso_base;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index c6a088c91aee..515fea5a3fc4 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -21,6 +21,7 @@ static inline int init_new_context(struct task_struct *tsk,
INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.flush_count, 0);
+ mm->context.gmap_asce = 0;
mm->context.flush_mm = 0;
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste;
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 0da91c4d30fd..6611f798d2be 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -11,6 +11,7 @@
#include <asm-generic/pci.h>
#include <asm/pci_clp.h>
#include <asm/pci_debug.h>
+#include <asm/sclp.h>
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
@@ -117,6 +118,7 @@ struct zpci_dev {
spinlock_t iommu_bitmap_lock;
unsigned long *iommu_bitmap;
+ unsigned long *lazy_bitmap;
unsigned long iommu_size;
unsigned long iommu_pages;
unsigned int next_bit;
@@ -216,6 +218,9 @@ void zpci_debug_init_device(struct zpci_dev *, const char *);
void zpci_debug_exit_device(struct zpci_dev *);
void zpci_debug_info(struct zpci_dev *, struct seq_file *);
+/* Error reporting */
+int zpci_report_error(struct pci_dev *, struct zpci_report_error_header *);
+
#ifdef CONFIG_NUMA
/* Returns the node based on PCI bus */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 72c7f60bfe83..0362cd5fa187 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -874,35 +874,31 @@ static inline pte_t pte_mkhuge(pte_t pte)
}
#endif
-static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
-{
- unsigned long pto = (unsigned long) ptep;
-
- /* Invalidation + global TLB flush for the pte */
- asm volatile(
- " ipte %2,%3"
- : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
-}
+#define IPTE_GLOBAL 0
+#define IPTE_LOCAL 1
-static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
+static inline void __ptep_ipte(unsigned long address, pte_t *ptep, int local)
{
unsigned long pto = (unsigned long) ptep;
- /* Invalidation + local TLB flush for the pte */
+ /* Invalidation + TLB flush for the pte */
asm volatile(
- " .insn rrf,0xb2210000,%2,%3,0,1"
- : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
+ " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
+ : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
+ [m4] "i" (local));
}
-static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
+static inline void __ptep_ipte_range(unsigned long address, int nr,
+ pte_t *ptep, int local)
{
unsigned long pto = (unsigned long) ptep;
- /* Invalidate a range of ptes + global TLB flush of the ptes */
+ /* Invalidate a range of ptes + TLB flush of the ptes */
do {
asm volatile(
- " .insn rrf,0xb2210000,%2,%0,%1,0"
- : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
+ " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
+ : [r2] "+a" (address), [r3] "+a" (nr)
+ : [r1] "a" (pto), [m4] "i" (local) : "memory");
} while (nr != 255);
}
@@ -1239,53 +1235,33 @@ static inline void __pmdp_csp(pmd_t *pmdp)
pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
}
-static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
-{
- unsigned long sto;
-
- sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
- asm volatile(
- " .insn rrf,0xb98e0000,%2,%3,0,0"
- : "=m" (*pmdp)
- : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
- : "cc" );
-}
-
-static inline void __pudp_idte(unsigned long address, pud_t *pudp)
-{
- unsigned long r3o;
-
- r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
- r3o |= _ASCE_TYPE_REGION3;
- asm volatile(
- " .insn rrf,0xb98e0000,%2,%3,0,0"
- : "=m" (*pudp)
- : "m" (*pudp), "a" (r3o), "a" ((address & PUD_MASK))
- : "cc");
-}
+#define IDTE_GLOBAL 0
+#define IDTE_LOCAL 1
-static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
+static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp, int local)
{
unsigned long sto;
sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
asm volatile(
- " .insn rrf,0xb98e0000,%2,%3,0,1"
- : "=m" (*pmdp)
- : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
+ " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
+ : "+m" (*pmdp)
+ : [r1] "a" (sto), [r2] "a" ((address & HPAGE_MASK)),
+ [m4] "i" (local)
: "cc" );
}
-static inline void __pudp_idte_local(unsigned long address, pud_t *pudp)
+static inline void __pudp_idte(unsigned long address, pud_t *pudp, int local)
{
unsigned long r3o;
r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
r3o |= _ASCE_TYPE_REGION3;
asm volatile(
- " .insn rrf,0xb98e0000,%2,%3,0,1"
- : "=m" (*pudp)
- : "m" (*pudp), "a" (r3o), "a" ((address & PUD_MASK))
+ " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
+ : "+m" (*pudp)
+ : [r1] "a" (r3o), [r2] "a" ((address & PUD_MASK)),
+ [m4] "i" (local)
: "cc");
}
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 1a691ef740cf..39846100682a 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -26,17 +26,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
: : "a" (2048), "a" (asce) : "cc");
}
-/*
- * Flush TLB entries for a specific ASCE on the local CPU
- */
-static inline void __tlb_flush_idte_local(unsigned long asce)
-{
- /* Local TLB flush for the mm */
- asm volatile(
- " .insn rrf,0xb98e0000,0,%0,%1,1"
- : : "a" (2048), "a" (asce) : "cc");
-}
-
#ifdef CONFIG_SMP
void smp_ptlb_all(void);
@@ -65,35 +54,33 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
/* Global TLB flush */
__tlb_flush_global();
/* Reset TLB flush mask */
- if (MACHINE_HAS_TLB_LC)
- cpumask_copy(mm_cpumask(mm),
- &mm->context.cpu_attach_mask);
+ cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
}
atomic_dec(&mm->context.flush_count);
preempt_enable();
}
-/*
- * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
- * when more than one asce (e.g. gmap) ran on this mm.
- */
-static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+static inline void __tlb_flush_mm(struct mm_struct *mm)
{
+ unsigned long gmap_asce;
+
+ /*
+ * If the machine has IDTE we prefer to do a per mm flush
+ * on all cpus instead of doing a local flush if the mm
+ * only ran on the local cpu.
+ */
preempt_disable();
atomic_inc(&mm->context.flush_count);
- if (MACHINE_HAS_TLB_LC &&
- cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
- __tlb_flush_idte_local(asce);
+ gmap_asce = READ_ONCE(mm->context.gmap_asce);
+ if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
+ if (gmap_asce)
+ __tlb_flush_idte(gmap_asce);
+ __tlb_flush_idte(mm->context.asce);
} else {
- if (MACHINE_HAS_IDTE)
- __tlb_flush_idte(asce);
- else
- __tlb_flush_global();
- /* Reset TLB flush mask */
- if (MACHINE_HAS_TLB_LC)
- cpumask_copy(mm_cpumask(mm),
- &mm->context.cpu_attach_mask);
+ __tlb_flush_full(mm);
}
+ /* Reset TLB flush mask */
+ cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
atomic_dec(&mm->context.flush_count);
preempt_enable();
}
@@ -112,36 +99,17 @@ static inline void __tlb_flush_kernel(void)
/*
* Flush TLB entries for a specific ASCE on all CPUs.
*/
-static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+static inline void __tlb_flush_mm(struct mm_struct *mm)
{
- if (MACHINE_HAS_TLB_LC)
- __tlb_flush_idte_local(asce);
- else
- __tlb_flush_local();
+ __tlb_flush_local();
}
static inline void __tlb_flush_kernel(void)
{
- if (MACHINE_HAS_TLB_LC)
- __tlb_flush_idte_local(init_mm.context.asce);
- else
- __tlb_flush_local();
+ __tlb_flush_local();
}
#endif
-static inline void __tlb_flush_mm(struct mm_struct * mm)
-{
- /*
- * If the machine has IDTE we prefer to do a per mm flush
- * on all cpus instead of doing a local flush if the mm
- * only ran on the local cpu.
- */
- if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
- __tlb_flush_asce(mm, mm->context.asce);
- else
- __tlb_flush_full(mm);
-}
-
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{
if (mm->context.flush_mm) {
diff --git a/arch/s390/include/asm/uprobes.h b/arch/s390/include/asm/uprobes.h
index 1411dff7fea7..658393c65d7e 100644
--- a/arch/s390/include/asm/uprobes.h
+++ b/arch/s390/include/asm/uprobes.h
@@ -29,14 +29,4 @@ struct arch_uprobe {
struct arch_uprobe_task {
};
-int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm,
- unsigned long addr);
-int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
-int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
-bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
-int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
- void *data);
-void arch_uprobe_abort_xol(struct arch_uprobe *ap, struct pt_regs *regs);
-unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
- struct pt_regs *regs);
#endif /* _ASM_UPROBES_H */
diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h
index 4a3135620f5e..49c24a2afce0 100644
--- a/arch/s390/include/asm/vx-insn.h
+++ b/arch/s390/include/asm/vx-insn.h
@@ -16,15 +16,13 @@
/* Macros to generate vector instruction byte code */
-#define REG_NUM_INVALID 255
-
/* GR_NUM - Retrieve general-purpose register number
*
* @opd: Operand to store register number
* @r64: String designation register in the format "%rN"
*/
.macro GR_NUM opd gr
- \opd = REG_NUM_INVALID
+ \opd = 255
.ifc \gr,%r0
\opd = 0
.endif
@@ -73,14 +71,11 @@
.ifc \gr,%r15
\opd = 15
.endif
- .if \opd == REG_NUM_INVALID
- .error "Invalid general-purpose register designation: \gr"
+ .if \opd == 255
+ \opd = \gr
.endif
.endm
-/* VX_R() - Macro to encode the VX_NUM into the instruction */
-#define VX_R(v) (v & 0x0F)
-
/* VX_NUM - Retrieve vector register number
*
* @opd: Operand to store register number
@@ -88,11 +83,10 @@
*
* The vector register number is used for as input number to the
* instruction and, as well as, to compute the RXB field of the
- * instruction. To encode the particular vector register number,
- * use the VX_R(v) macro to extract the instruction opcode.
+ * instruction.
*/
.macro VX_NUM opd vxr
- \opd = REG_NUM_INVALID
+ \opd = 255
.ifc \vxr,%v0
\opd = 0
.endif
@@ -189,8 +183,8 @@
.ifc \vxr,%v31
\opd = 31
.endif
- .if \opd == REG_NUM_INVALID
- .error "Invalid vector register designation: \vxr"
+ .if \opd == 255
+ \opd = \vxr
.endif
.endm
@@ -251,7 +245,7 @@
/* VECTOR GENERATE BYTE MASK */
.macro VGBM vr imm2
VX_NUM v1, \vr
- .word (0xE700 | (VX_R(v1) << 4))
+ .word (0xE700 | ((v1&15) << 4))
.word \imm2
MRXBOPC 0, 0x44, v1
.endm
@@ -267,7 +261,7 @@
VX_NUM v1, \v
GR_NUM b2, "%r0"
GR_NUM r3, \gr
- .word 0xE700 | (VX_R(v1) << 4) | r3
+ .word 0xE700 | ((v1&15) << 4) | r3
.word (b2 << 12) | (\disp)
MRXBOPC \m, 0x22, v1
.endm
@@ -284,12 +278,21 @@
VLVG \v, \gr, \index, 3
.endm
+/* VECTOR LOAD REGISTER */
+.macro VLR v1, v2
+ VX_NUM v1, \v1
+ VX_NUM v2, \v2
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word 0
+ MRXBOPC 0, 0x56, v1, v2
+.endm
+
/* VECTOR LOAD */
.macro VL v, disp, index="%r0", base
VX_NUM v1, \v
GR_NUM x2, \index
GR_NUM b2, \base
- .word 0xE700 | (VX_R(v1) << 4) | x2
+ .word 0xE700 | ((v1&15) << 4) | x2
.word (b2 << 12) | (\disp)
MRXBOPC 0, 0x06, v1
.endm
@@ -299,7 +302,7 @@
VX_NUM v1, \vr1
GR_NUM x2, \index
GR_NUM b2, \base
- .word 0xE700 | (VX_R(v1) << 4) | x2
+ .word 0xE700 | ((v1&15) << 4) | x2
.word (b2 << 12) | (\disp)
MRXBOPC \m3, \opc, v1
.endm
@@ -319,7 +322,7 @@
/* VECTOR LOAD ELEMENT IMMEDIATE */
.macro VLEIx vr1, imm2, m3, opc
VX_NUM v1, \vr1
- .word 0xE700 | (VX_R(v1) << 4)
+ .word 0xE700 | ((v1&15) << 4)
.word \imm2
MRXBOPC \m3, \opc, v1
.endm
@@ -341,7 +344,7 @@
GR_NUM r1, \gr
GR_NUM b2, \base
VX_NUM v3, \vr
- .word 0xE700 | (r1 << 4) | VX_R(v3)
+ .word 0xE700 | (r1 << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC \m, 0x21, v3
.endm
@@ -363,7 +366,7 @@
VX_NUM v1, \vfrom
VX_NUM v3, \vto
GR_NUM b2, \base /* Base register */
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3)
+ .word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC 0, 0x36, v1, v3
.endm
@@ -373,7 +376,7 @@
VX_NUM v1, \vfrom
VX_NUM v3, \vto
GR_NUM b2, \base /* Base register */
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3)
+ .word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC 0, 0x3E, v1, v3
.endm
@@ -384,16 +387,16 @@
VX_NUM v2, \vr2
VX_NUM v3, \vr3
VX_NUM v4, \vr4
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
- .word (VX_R(v3) << 12)
- MRXBOPC VX_R(v4), 0x8C, v1, v2, v3, v4
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
.endm
/* VECTOR UNPACK LOGICAL LOW */
.macro VUPLL vr1, vr2, m3
VX_NUM v1, \vr1
VX_NUM v2, \vr2
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
.word 0x0000
MRXBOPC \m3, 0xD4, v1, v2
.endm
@@ -410,13 +413,23 @@
/* Vector integer instructions */
+/* VECTOR AND */
+.macro VN vr1, vr2, vr3
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC 0, 0x68, v1, v2, v3
+.endm
+
/* VECTOR EXCLUSIVE OR */
.macro VX vr1, vr2, vr3
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
- .word (VX_R(v3) << 12)
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
MRXBOPC 0, 0x6D, v1, v2, v3
.endm
@@ -425,8 +438,8 @@
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
- .word (VX_R(v3) << 12)
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
MRXBOPC \m4, 0xB4, v1, v2, v3
.endm
.macro VGFMB vr1, vr2, vr3
@@ -448,9 +461,9 @@
VX_NUM v2, \vr2
VX_NUM v3, \vr3
VX_NUM v4, \vr4
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
- .word (VX_R(v3) << 12) | (\m5 << 8)
- MRXBOPC VX_R(v4), 0xBC, v1, v2, v3, v4
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12) | (\m5 << 8)
+ MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
.endm
.macro VGFMAB vr1, vr2, vr3, vr4
VGFMA \vr1, \vr2, \vr3, \vr4, 0
@@ -470,11 +483,78 @@
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
- .word (VX_R(v3) << 12)
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
MRXBOPC 0, 0x7D, v1, v2, v3
.endm
+/* VECTOR REPLICATE IMMEDIATE */
+.macro VREPI vr1, imm2, m3
+ VX_NUM v1, \vr1
+ .word 0xE700 | ((v1&15) << 4)
+ .word \imm2
+ MRXBOPC \m3, 0x45, v1
+.endm
+.macro VREPIB vr1, imm2
+ VREPI \vr1, \imm2, 0
+.endm
+.macro VREPIH vr1, imm2
+ VREPI \vr1, \imm2, 1
+.endm
+.macro VREPIF vr1, imm2
+ VREPI \vr1, \imm2, 2
+.endm
+.macro VREPIG vr1, imm2
+ VREP \vr1, \imm2, 3
+.endm
+
+/* VECTOR ADD */
+.macro VA vr1, vr2, vr3, m4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC \m4, 0xF3, v1, v2, v3
+.endm
+.macro VAB vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 0
+.endm
+.macro VAH vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 1
+.endm
+.macro VAF vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 2
+.endm
+.macro VAG vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 3
+.endm
+.macro VAQ vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 4
+.endm
+
+/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
+.macro VESRAV vr1, vr2, vr3, m4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC \m4, 0x7A, v1, v2, v3
+.endm
+
+.macro VESRAVB vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 0
+.endm
+.macro VESRAVH vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 1
+.endm
+.macro VESRAVF vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 2
+.endm
+.macro VESRAVG vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 3
+.endm
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_VX_INSN_H */
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 08fe6dad9026..cc44b09c25fc 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -6,6 +6,7 @@ header-y += bitsperlong.h
header-y += byteorder.h
header-y += chpid.h
header-y += chsc.h
+header-y += clp.h
header-y += cmb.h
header-y += dasd.h
header-y += debug.h
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 3234817c7d47..72ccc41444dc 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -48,6 +48,9 @@ AFLAGS_head.o += -march=z900
endif
GCOV_PROFILE_sclp.o := n
GCOV_PROFILE_als.o := n
+UBSAN_SANITIZE_als.o := n
+UBSAN_SANITIZE_early.o := n
+UBSAN_SANITIZE_sclp.o := n
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 1f95cc1faeb7..f3df9e0a5dec 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -125,6 +125,7 @@ int main(void)
OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list);
OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list);
OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
+ OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 437e61159279..0f9cd90c11af 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -189,7 +189,7 @@ static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info
kgid_t kgid;
for (i = 0; i < group_info->ngroups; i++) {
- kgid = GROUP_AT(group_info, i);
+ kgid = group_info->gid[i];
group = (u16)from_kgid_munged(user_ns, kgid);
if (put_user(group, grouplist+i))
return -EFAULT;
@@ -213,7 +213,7 @@ static int groups16_from_user(struct group_info *group_info, u16 __user *groupli
if (!gid_valid(kgid))
return -EINVAL;
- GROUP_AT(group_info, i) = kgid;
+ group_info->gid[i] = kgid;
}
return 0;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 29df8484282b..f9293bfefb7f 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -71,9 +71,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
*/
struct save_area * __init save_area_boot_cpu(void)
{
- if (list_empty(&dump_save_areas))
- return NULL;
- return list_first_entry(&dump_save_areas, struct save_area, list);
+ return list_first_entry_or_null(&dump_save_areas, struct save_area, list);
}
/*
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 717b03aa16b5..2374c5b46bbc 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -13,7 +13,7 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/lockdep.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/pfn.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
index 81d1d1887507..1235b9438df4 100644
--- a/arch/s390/kernel/fpu.c
+++ b/arch/s390/kernel/fpu.c
@@ -10,240 +10,167 @@
#include <asm/fpu/types.h>
#include <asm/fpu/api.h>
-/*
- * Per-CPU variable to maintain FPU register ranges that are in use
- * by the kernel.
- */
-static DEFINE_PER_CPU(u32, kernel_fpu_state);
-
-#define KERNEL_FPU_STATE_MASK (KERNEL_FPU_MASK|KERNEL_FPC)
-
+asm(".include \"asm/vx-insn.h\"\n");
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
{
- if (!__this_cpu_read(kernel_fpu_state)) {
- /*
- * Save user space FPU state and register contents. Multiple
- * calls because of interruptions do not matter and return
- * immediately. This also sets CIF_FPU to lazy restore FP/VX
- * register contents when returning to user space.
- */
- save_fpu_regs();
- }
-
- /* Update flags to use the vector facility for KERNEL_FPR */
- if (MACHINE_HAS_VX && (state->mask & KERNEL_FPR)) {
- flags |= KERNEL_VXR_LOW | KERNEL_FPC;
- flags &= ~KERNEL_FPR;
- }
-
- /* Save and update current kernel VX state */
- state->mask = __this_cpu_read(kernel_fpu_state);
- __this_cpu_or(kernel_fpu_state, flags & KERNEL_FPU_STATE_MASK);
-
/*
- * If this is the first call to __kernel_fpu_begin(), no additional
- * work is required.
+ * Limit the save to the FPU/vector registers already
+ * in use by the previous context
*/
- if (!(state->mask & KERNEL_FPU_STATE_MASK))
- return;
+ flags &= state->mask;
- /*
- * If KERNEL_FPR is still set, the vector facility is not available
- * and, thus, save floating-point control and registers only.
- */
- if (state->mask & KERNEL_FPR) {
- asm volatile("stfpc %0" : "=Q" (state->fpc));
- asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
- asm volatile("std 1,%0" : "=Q" (state->fprs[1]));
- asm volatile("std 2,%0" : "=Q" (state->fprs[2]));
- asm volatile("std 3,%0" : "=Q" (state->fprs[3]));
- asm volatile("std 4,%0" : "=Q" (state->fprs[4]));
- asm volatile("std 5,%0" : "=Q" (state->fprs[5]));
- asm volatile("std 6,%0" : "=Q" (state->fprs[6]));
- asm volatile("std 7,%0" : "=Q" (state->fprs[7]));
- asm volatile("std 8,%0" : "=Q" (state->fprs[8]));
- asm volatile("std 9,%0" : "=Q" (state->fprs[9]));
- asm volatile("std 10,%0" : "=Q" (state->fprs[10]));
- asm volatile("std 11,%0" : "=Q" (state->fprs[11]));
- asm volatile("std 12,%0" : "=Q" (state->fprs[12]));
- asm volatile("std 13,%0" : "=Q" (state->fprs[13]));
- asm volatile("std 14,%0" : "=Q" (state->fprs[14]));
- asm volatile("std 15,%0" : "=Q" (state->fprs[15]));
+ if (flags & KERNEL_FPC)
+ /* Save floating point control */
+ asm volatile("stfpc %0" : "=m" (state->fpc));
+
+ if (!MACHINE_HAS_VX) {
+ if (flags & KERNEL_VXR_V0V7) {
+ /* Save floating-point registers */
+ asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
+ asm volatile("std 1,%0" : "=Q" (state->fprs[1]));
+ asm volatile("std 2,%0" : "=Q" (state->fprs[2]));
+ asm volatile("std 3,%0" : "=Q" (state->fprs[3]));
+ asm volatile("std 4,%0" : "=Q" (state->fprs[4]));
+ asm volatile("std 5,%0" : "=Q" (state->fprs[5]));
+ asm volatile("std 6,%0" : "=Q" (state->fprs[6]));
+ asm volatile("std 7,%0" : "=Q" (state->fprs[7]));
+ asm volatile("std 8,%0" : "=Q" (state->fprs[8]));
+ asm volatile("std 9,%0" : "=Q" (state->fprs[9]));
+ asm volatile("std 10,%0" : "=Q" (state->fprs[10]));
+ asm volatile("std 11,%0" : "=Q" (state->fprs[11]));
+ asm volatile("std 12,%0" : "=Q" (state->fprs[12]));
+ asm volatile("std 13,%0" : "=Q" (state->fprs[13]));
+ asm volatile("std 14,%0" : "=Q" (state->fprs[14]));
+ asm volatile("std 15,%0" : "=Q" (state->fprs[15]));
+ }
return;
}
- /*
- * If this is a nested call to __kernel_fpu_begin(), check the saved
- * state mask to save and later restore the vector registers that
- * are already in use. Let's start with checking floating-point
- * controls.
- */
- if (state->mask & KERNEL_FPC)
- asm volatile("stfpc %0" : "=m" (state->fpc));
-
/* Test and save vector registers */
asm volatile (
/*
* Test if any vector register must be saved and, if so,
* test if all register can be saved.
*/
- " tmll %[m],15\n" /* KERNEL_VXR_MASK */
- " jz 20f\n" /* no work -> done */
" la 1,%[vxrs]\n" /* load save area */
- " jo 18f\n" /* -> save V0..V31 */
-
+ " tmll %[m],30\n" /* KERNEL_VXR */
+ " jz 7f\n" /* no work -> done */
+ " jo 5f\n" /* -> save V0..V31 */
/*
- * Test if V8..V23 can be saved at once... this speeds up
- * for KERNEL_fpu_MID only. Otherwise continue to split the
- * range of vector registers into two halves and test them
- * separately.
+ * Test for special case KERNEL_FPU_MID only. In this
+ * case a vstm V8..V23 is the best instruction
*/
- " tmll %[m],6\n" /* KERNEL_VXR_MID */
- " jo 17f\n" /* -> save V8..V23 */
-
+ " chi %[m],12\n" /* KERNEL_VXR_MID */
+ " jne 0f\n" /* -> save V8..V23 */
+ " VSTM 8,23,128,1\n" /* vstm %v8,%v23,128(%r1) */
+ " j 7f\n"
/* Test and save the first half of 16 vector registers */
- "1: tmll %[m],3\n" /* KERNEL_VXR_LOW */
- " jz 10f\n" /* -> KERNEL_VXR_HIGH */
+ "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
+ " jz 3f\n" /* -> KERNEL_VXR_HIGH */
" jo 2f\n" /* 11 -> save V0..V15 */
- " brc 4,3f\n" /* 01 -> save V0..V7 */
- " brc 2,4f\n" /* 10 -> save V8..V15 */
-
+ " brc 2,1f\n" /* 10 -> save V8..V15 */
+ " VSTM 0,7,0,1\n" /* vstm %v0,%v7,0(%r1) */
+ " j 3f\n"
+ "1: VSTM 8,15,128,1\n" /* vstm %v8,%v15,128(%r1) */
+ " j 3f\n"
+ "2: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
/* Test and save the second half of 16 vector registers */
- "10: tmll %[m],12\n" /* KERNEL_VXR_HIGH */
- " jo 19f\n" /* 11 -> save V16..V31 */
- " brc 4,11f\n" /* 01 -> save V16..V23 */
- " brc 2,12f\n" /* 10 -> save V24..V31 */
- " j 20f\n" /* 00 -> done */
-
- /*
- * Below are the vstm combinations to save multiple vector
- * registers at once.
- */
- "2: .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "3: .word 0xe707,0x1000,0x003e\n" /* vstm 0,7,0(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "4: .word 0xe78f,0x1080,0x003e\n" /* vstm 8,15,128(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "\n"
- "11: .word 0xe707,0x1100,0x0c3e\n" /* vstm 16,23,256(1) */
- " j 20f\n" /* -> done */
- "12: .word 0xe78f,0x1180,0x0c3e\n" /* vstm 24,31,384(1) */
- " j 20f\n" /* -> done */
- "\n"
- "17: .word 0xe787,0x1080,0x043e\n" /* vstm 8,23,128(1) */
- " nill %[m],249\n" /* m &= ~VXR_MID */
- " j 1b\n" /* -> VXR_LOW */
- "\n"
- "18: .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
- "19: .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
- "20:"
+ "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
+ " jz 7f\n"
+ " jo 6f\n" /* 11 -> save V16..V31 */
+ " brc 2,4f\n" /* 10 -> save V24..V31 */
+ " VSTM 16,23,256,1\n" /* vstm %v16,%v23,256(%r1) */
+ " j 7f\n"
+ "4: VSTM 24,31,384,1\n" /* vstm %v24,%v31,384(%r1) */
+ " j 7f\n"
+ "5: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
+ "6: VSTM 16,31,256,1\n" /* vstm %v16,%v31,256(%r1) */
+ "7:"
: [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
- : [m] "d" (state->mask)
+ : [m] "d" (flags)
: "1", "cc");
}
EXPORT_SYMBOL(__kernel_fpu_begin);
-void __kernel_fpu_end(struct kernel_fpu *state)
+void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
{
- /* Just update the per-CPU state if there is nothing to restore */
- if (!(state->mask & KERNEL_FPU_STATE_MASK))
- goto update_fpu_state;
-
/*
- * If KERNEL_FPR is specified, the vector facility is not available
- * and, thus, restore floating-point control and registers only.
+ * Limit the restore to the FPU/vector registers of the
+ * previous context that have been overwritte by the
+ * current context
*/
- if (state->mask & KERNEL_FPR) {
- asm volatile("lfpc %0" : : "Q" (state->fpc));
- asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
- asm volatile("ld 1,%0" : : "Q" (state->fprs[1]));
- asm volatile("ld 2,%0" : : "Q" (state->fprs[2]));
- asm volatile("ld 3,%0" : : "Q" (state->fprs[3]));
- asm volatile("ld 4,%0" : : "Q" (state->fprs[4]));
- asm volatile("ld 5,%0" : : "Q" (state->fprs[5]));
- asm volatile("ld 6,%0" : : "Q" (state->fprs[6]));
- asm volatile("ld 7,%0" : : "Q" (state->fprs[7]));
- asm volatile("ld 8,%0" : : "Q" (state->fprs[8]));
- asm volatile("ld 9,%0" : : "Q" (state->fprs[9]));
- asm volatile("ld 10,%0" : : "Q" (state->fprs[10]));
- asm volatile("ld 11,%0" : : "Q" (state->fprs[11]));
- asm volatile("ld 12,%0" : : "Q" (state->fprs[12]));
- asm volatile("ld 13,%0" : : "Q" (state->fprs[13]));
- asm volatile("ld 14,%0" : : "Q" (state->fprs[14]));
- asm volatile("ld 15,%0" : : "Q" (state->fprs[15]));
- goto update_fpu_state;
- }
+ flags &= state->mask;
- /* Test and restore floating-point controls */
- if (state->mask & KERNEL_FPC)
+ if (flags & KERNEL_FPC)
+ /* Restore floating-point controls */
asm volatile("lfpc %0" : : "Q" (state->fpc));
+ if (!MACHINE_HAS_VX) {
+ if (flags & KERNEL_VXR_V0V7) {
+ /* Restore floating-point registers */
+ asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
+ asm volatile("ld 1,%0" : : "Q" (state->fprs[1]));
+ asm volatile("ld 2,%0" : : "Q" (state->fprs[2]));
+ asm volatile("ld 3,%0" : : "Q" (state->fprs[3]));
+ asm volatile("ld 4,%0" : : "Q" (state->fprs[4]));
+ asm volatile("ld 5,%0" : : "Q" (state->fprs[5]));
+ asm volatile("ld 6,%0" : : "Q" (state->fprs[6]));
+ asm volatile("ld 7,%0" : : "Q" (state->fprs[7]));
+ asm volatile("ld 8,%0" : : "Q" (state->fprs[8]));
+ asm volatile("ld 9,%0" : : "Q" (state->fprs[9]));
+ asm volatile("ld 10,%0" : : "Q" (state->fprs[10]));
+ asm volatile("ld 11,%0" : : "Q" (state->fprs[11]));
+ asm volatile("ld 12,%0" : : "Q" (state->fprs[12]));
+ asm volatile("ld 13,%0" : : "Q" (state->fprs[13]));
+ asm volatile("ld 14,%0" : : "Q" (state->fprs[14]));
+ asm volatile("ld 15,%0" : : "Q" (state->fprs[15]));
+ }
+ return;
+ }
+
/* Test and restore (load) vector registers */
asm volatile (
/*
- * Test if any vector registers must be loaded and, if so,
+ * Test if any vector register must be loaded and, if so,
* test if all registers can be loaded at once.
*/
- " tmll %[m],15\n" /* KERNEL_VXR_MASK */
- " jz 20f\n" /* no work -> done */
- " la 1,%[vxrs]\n" /* load load area */
- " jo 18f\n" /* -> load V0..V31 */
-
- /*
- * Test if V8..V23 can be restored at once... this speeds up
- * for KERNEL_VXR_MID only. Otherwise continue to split the
- * range of vector registers into two halves and test them
- * separately.
- */
- " tmll %[m],6\n" /* KERNEL_VXR_MID */
- " jo 17f\n" /* -> load V8..V23 */
-
- /* Test and load the first half of 16 vector registers */
- "1: tmll %[m],3\n" /* KERNEL_VXR_LOW */
- " jz 10f\n" /* -> KERNEL_VXR_HIGH */
- " jo 2f\n" /* 11 -> load V0..V15 */
- " brc 4,3f\n" /* 01 -> load V0..V7 */
- " brc 2,4f\n" /* 10 -> load V8..V15 */
-
- /* Test and load the second half of 16 vector registers */
- "10: tmll %[m],12\n" /* KERNEL_VXR_HIGH */
- " jo 19f\n" /* 11 -> load V16..V31 */
- " brc 4,11f\n" /* 01 -> load V16..V23 */
- " brc 2,12f\n" /* 10 -> load V24..V31 */
- " j 20f\n" /* 00 -> done */
-
+ " la 1,%[vxrs]\n" /* load restore area */
+ " tmll %[m],30\n" /* KERNEL_VXR */
+ " jz 7f\n" /* no work -> done */
+ " jo 5f\n" /* -> restore V0..V31 */
/*
- * Below are the vstm combinations to load multiple vector
- * registers at once.
+ * Test for special case KERNEL_FPU_MID only. In this
+ * case a vlm V8..V23 is the best instruction
*/
- "2: .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "3: .word 0xe707,0x1000,0x0036\n" /* vlm 0,7,0(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "4: .word 0xe78f,0x1080,0x0036\n" /* vlm 8,15,128(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "\n"
- "11: .word 0xe707,0x1100,0x0c36\n" /* vlm 16,23,256(1) */
- " j 20f\n" /* -> done */
- "12: .word 0xe78f,0x1180,0x0c36\n" /* vlm 24,31,384(1) */
- " j 20f\n" /* -> done */
- "\n"
- "17: .word 0xe787,0x1080,0x0436\n" /* vlm 8,23,128(1) */
- " nill %[m],249\n" /* m &= ~VXR_MID */
- " j 1b\n" /* -> VXR_LOW */
- "\n"
- "18: .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
- "19: .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
- "20:"
- :
- : [vxrs] "Q" (*(struct vx_array *) &state->vxrs),
- [m] "d" (state->mask)
+ " chi %[m],12\n" /* KERNEL_VXR_MID */
+ " jne 0f\n" /* -> restore V8..V23 */
+ " VLM 8,23,128,1\n" /* vlm %v8,%v23,128(%r1) */
+ " j 7f\n"
+ /* Test and restore the first half of 16 vector registers */
+ "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
+ " jz 3f\n" /* -> KERNEL_VXR_HIGH */
+ " jo 2f\n" /* 11 -> restore V0..V15 */
+ " brc 2,1f\n" /* 10 -> restore V8..V15 */
+ " VLM 0,7,0,1\n" /* vlm %v0,%v7,0(%r1) */
+ " j 3f\n"
+ "1: VLM 8,15,128,1\n" /* vlm %v8,%v15,128(%r1) */
+ " j 3f\n"
+ "2: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
+ /* Test and restore the second half of 16 vector registers */
+ "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
+ " jz 7f\n"
+ " jo 6f\n" /* 11 -> restore V16..V31 */
+ " brc 2,4f\n" /* 10 -> restore V24..V31 */
+ " VLM 16,23,256,1\n" /* vlm %v16,%v23,256(%r1) */
+ " j 7f\n"
+ "4: VLM 24,31,384,1\n" /* vlm %v24,%v31,384(%r1) */
+ " j 7f\n"
+ "5: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
+ "6: VLM 16,31,256,1\n" /* vlm %v16,%v31,256(%r1) */
+ "7:"
+ : [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
+ : [m] "d" (flags)
: "1", "cc");
-
-update_fpu_state:
- /* Update current kernel VX state */
- __this_cpu_write(kernel_fpu_state, state->mask);
}
EXPORT_SYMBOL(__kernel_fpu_end);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index dd6306c51bd6..fdb40424acfe 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -26,12 +26,14 @@
#include <linux/stop_machine.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
+#include <linux/extable.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
#include <linux/ftrace.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
+#include <asm/uaccess.h>
#include <asm/dis.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 29376f0e725c..9a32f7419d78 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
* returns 0 if all registers could be validated
* returns 1 otherwise
*/
-static int notrace s390_validate_registers(union mci mci)
+static int notrace s390_validate_registers(union mci mci, int umode)
{
int kill_task;
u64 zero;
@@ -110,26 +110,41 @@ static int notrace s390_validate_registers(union mci mci)
if (!mci.gr) {
/*
* General purpose registers couldn't be restored and have
- * unknown contents. Process needs to be terminated.
+ * unknown contents. Stop system or terminate process.
*/
+ if (!umode)
+ s390_handle_damage();
kill_task = 1;
}
if (!mci.fp) {
/*
- * Floating point registers can't be restored and
- * therefore the process needs to be terminated.
+ * Floating point registers can't be restored. If the
+ * kernel currently uses floating point registers the
+ * system is stopped. If the process has its floating
+ * pointer registers loaded it is terminated.
+ * Otherwise just revalidate the registers.
*/
- kill_task = 1;
+ if (S390_lowcore.fpu_flags & KERNEL_VXR_V0V7)
+ s390_handle_damage();
+ if (!test_cpu_flag(CIF_FPU))
+ kill_task = 1;
}
fpt_save_area = &S390_lowcore.floating_pt_save_area;
fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
if (!mci.fc) {
/*
* Floating point control register can't be restored.
- * Task will be terminated.
+ * If the kernel currently uses the floating pointer
+ * registers and needs the FPC register the system is
+ * stopped. If the process has its floating pointer
+ * registers loaded it is terminated. Otherwiese the
+ * FPC is just revalidated.
*/
+ if (S390_lowcore.fpu_flags & KERNEL_FPC)
+ s390_handle_damage();
asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
- kill_task = 1;
+ if (!test_cpu_flag(CIF_FPU))
+ kill_task = 1;
} else
asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
@@ -159,10 +174,16 @@ static int notrace s390_validate_registers(union mci mci)
if (!mci.vr) {
/*
- * Vector registers can't be restored and therefore
- * the process needs to be terminated.
+ * Vector registers can't be restored. If the kernel
+ * currently uses vector registers the system is
+ * stopped. If the process has its vector registers
+ * loaded it is terminated. Otherwise just revalidate
+ * the registers.
*/
- kill_task = 1;
+ if (S390_lowcore.fpu_flags & KERNEL_VXR)
+ s390_handle_damage();
+ if (!test_cpu_flag(CIF_FPU))
+ kill_task = 1;
}
cr0.val = S390_lowcore.cregs_save_area[0];
cr0.afp = cr0.vx = 1;
@@ -250,13 +271,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
struct mcck_struct *mcck;
unsigned long long tmp;
union mci mci;
- int umode;
nmi_enter();
inc_irq_stat(NMI_NMI);
mci.val = S390_lowcore.mcck_interruption_code;
mcck = this_cpu_ptr(&cpu_mcck);
- umode = user_mode(regs);
if (mci.sd) {
/* System damage -> stopping machine */
@@ -297,22 +316,14 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
s390_handle_damage();
}
}
- if (s390_validate_registers(mci)) {
- if (umode) {
- /*
- * Couldn't restore all register contents while in
- * user mode -> mark task for termination.
- */
- mcck->kill_task = 1;
- mcck->mcck_code = mci.val;
- set_cpu_flag(CIF_MCCK_PENDING);
- } else {
- /*
- * Couldn't restore all register contents while in
- * kernel mode -> stopping machine.
- */
- s390_handle_damage();
- }
+ if (s390_validate_registers(mci, user_mode(regs))) {
+ /*
+ * Couldn't restore all register contents for the
+ * user space process -> mark task for termination.
+ */
+ mcck->kill_task = 1;
+ mcck->mcck_code = mci.val;
+ set_cpu_flag(CIF_MCCK_PENDING);
}
if (mci.cd) {
/* Timing facility damage */
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 050b8d067d3b..bfda6aa40280 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -454,7 +454,7 @@ void s390_adjust_jiffies(void)
: "Q" (info->capability), "d" (10000000), "d" (0)
: "cc"
);
- kernel_fpu_end(&fpu);
+ kernel_fpu_end(&fpu, KERNEL_FPR);
} else
/*
* Really old machine without stsi block for basic
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 4e9949800562..0bfcc492987e 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -50,10 +50,6 @@
#include <asm/cio.h>
#include "entry.h"
-/* change this if you have some constant time drift */
-#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
-#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
-
u64 sched_clock_base_cc = -1; /* Force to data section. */
EXPORT_SYMBOL_GPL(sched_clock_base_cc);
@@ -282,13 +278,8 @@ extern struct timezone sys_tz;
void update_vsyscall_tz(void)
{
- /* Make userspace gettimeofday spin until we're done. */
- ++vdso_data->tb_update_count;
- smp_wmb();
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
- smp_wmb();
- ++vdso_data->tb_update_count;
}
/*
@@ -318,51 +309,12 @@ void __init time_init(void)
vtime_init();
}
-/*
- * The time is "clock". old is what we think the time is.
- * Adjust the value by a multiple of jiffies and add the delta to ntp.
- * "delay" is an approximation how long the synchronization took. If
- * the time correction is positive, then "delay" is subtracted from
- * the time difference and only the remaining part is passed to ntp.
- */
-static unsigned long long adjust_time(unsigned long long old,
- unsigned long long clock,
- unsigned long long delay)
-{
- unsigned long long delta, ticks;
- struct timex adjust;
-
- if (clock > old) {
- /* It is later than we thought. */
- delta = ticks = clock - old;
- delta = ticks = (delta < delay) ? 0 : delta - delay;
- delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
- adjust.offset = ticks * (1000000 / HZ);
- } else {
- /* It is earlier than we thought. */
- delta = ticks = old - clock;
- delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
- delta = -delta;
- adjust.offset = -ticks * (1000000 / HZ);
- }
- sched_clock_base_cc += delta;
- if (adjust.offset != 0) {
- pr_notice("The ETR interface has adjusted the clock "
- "by %li microseconds\n", adjust.offset);
- adjust.modes = ADJ_OFFSET_SINGLESHOT;
- do_adjtimex(&adjust);
- }
- return delta;
-}
-
static DEFINE_PER_CPU(atomic_t, clock_sync_word);
static DEFINE_MUTEX(clock_sync_mutex);
static unsigned long clock_sync_flags;
-#define CLOCK_SYNC_HAS_ETR 0
-#define CLOCK_SYNC_HAS_STP 1
-#define CLOCK_SYNC_ETR 2
-#define CLOCK_SYNC_STP 3
+#define CLOCK_SYNC_HAS_STP 0
+#define CLOCK_SYNC_STP 1
/*
* The get_clock function for the physical clock. It will get the current
@@ -384,34 +336,32 @@ int get_phys_clock(unsigned long long *clock)
if (sw0 == sw1 && (sw0 & 0x80000000U))
/* Success: time is in sync. */
return 0;
- if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
- !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
+ if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return -EOPNOTSUPP;
- if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
- !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
+ if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
return -EACCES;
return -EAGAIN;
}
EXPORT_SYMBOL(get_phys_clock);
/*
- * Make get_sync_clock return -EAGAIN.
+ * Make get_phys_clock() return -EAGAIN.
*/
static void disable_sync_clock(void *dummy)
{
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
/*
- * Clear the in-sync bit 2^31. All get_sync_clock calls will
+ * Clear the in-sync bit 2^31. All get_phys_clock calls will
* fail until the sync bit is turned back on. In addition
* increase the "sequence" counter to avoid the race of an
- * etr event and the complete recovery against get_sync_clock.
+ * stp event and the complete recovery against get_phys_clock.
*/
atomic_andnot(0x80000000, sw_ptr);
atomic_inc(sw_ptr);
}
/*
- * Make get_sync_clock return 0 again.
+ * Make get_phys_clock() return 0 again.
* Needs to be called from a context disabled for preemption.
*/
static void enable_sync_clock(void)
@@ -434,7 +384,7 @@ static inline int check_sync_clock(void)
return rc;
}
-/* Single threaded workqueue used for etr and stp sync events */
+/* Single threaded workqueue used for stp sync events */
static struct workqueue_struct *time_sync_wq;
static void __init time_init_wq(void)
@@ -448,20 +398,12 @@ struct clock_sync_data {
atomic_t cpus;
int in_sync;
unsigned long long fixup_cc;
- int etr_port;
- struct etr_aib *etr_aib;
};
static void clock_sync_cpu(struct clock_sync_data *sync)
{
atomic_dec(&sync->cpus);
enable_sync_clock();
- /*
- * This looks like a busy wait loop but it isn't. etr_sync_cpus
- * is called on all other cpus while the TOD clocks is stopped.
- * __udelay will stop the cpu on an enabled wait psw until the
- * TOD is running again.
- */
while (sync->in_sync == 0) {
__udelay(1);
/*
@@ -582,7 +524,7 @@ void stp_queue_work(void)
static int stp_sync_clock(void *data)
{
static int first;
- unsigned long long old_clock, delta, new_clock, clock_delta;
+ unsigned long long clock_delta;
struct clock_sync_data *stp_sync;
struct ptff_qto qto;
int rc;
@@ -605,18 +547,18 @@ static int stp_sync_clock(void *data)
if (stp_info.todoff[0] || stp_info.todoff[1] ||
stp_info.todoff[2] || stp_info.todoff[3] ||
stp_info.tmd != 2) {
- old_clock = get_tod_clock();
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
if (rc == 0) {
- new_clock = old_clock + clock_delta;
- delta = adjust_time(old_clock, new_clock, 0);
+ /* fixup the monotonic sched clock */
+ sched_clock_base_cc += clock_delta;
if (ptff_query(PTFF_QTO) &&
ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
/* Update LPAR offset */
lpar_offset = qto.tod_epoch_difference;
atomic_notifier_call_chain(&s390_epoch_delta_notifier,
0, &clock_delta);
- fixup_clock_comparator(delta);
+ stp_sync->fixup_cc = clock_delta;
+ fixup_clock_comparator(clock_delta);
rc = chsc_sstpi(stp_page, &stp_info,
sizeof(struct stp_sstpi));
if (rc == 0 && stp_info.tmd != 2)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index dd97a3e8a34a..d0539f76fd24 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -14,11 +14,12 @@
*/
#include <linux/kprobes.h>
#include <linux/kdebug.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <asm/uaccess.h>
#include <asm/fpu/api.h>
#include "entry.h"
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index 68145456fee2..6cc947896c77 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -24,8 +24,9 @@ obj-y += vdso32_wrapper.o
extra-y += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
-# Disable gcov profiling for VDSO code
+# Disable gcov profiling and ubsan for VDSO code
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 0b0fd22c869a..2d54c18089eb 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -24,8 +24,9 @@ obj-y += vdso64_wrapper.o
extra-y += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
-# Disable gcov profiling for VDSO code
+# Disable gcov profiling and ubsan for VDSO code
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 429bfd111961..000e6e91f6a0 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -35,6 +35,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 54200208bf24..4aa8a7e2a1da 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -495,6 +495,18 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
switch (code) {
+ case PGM_PROTECTION:
+ switch (prot) {
+ case PROT_TYPE_ALC:
+ tec->b60 = 1;
+ /* FALL THROUGH */
+ case PROT_TYPE_DAT:
+ tec->b61 = 1;
+ break;
+ default: /* LA and KEYC set b61 to 0, other params undefined */
+ return code;
+ }
+ /* FALL THROUGH */
case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION:
case PGM_REGION_FIRST_TRANS:
@@ -504,8 +516,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
/*
* op_access_id only applies to MOVE_PAGE -> set bit 61
* exc_access_id has to be set to 0 for some instructions. Both
- * cases have to be handled by the caller. We can always store
- * exc_access_id, as it is undefined for non-ar cases.
+ * cases have to be handled by the caller.
*/
tec->addr = gva >> PAGE_SHIFT;
tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
@@ -516,25 +527,13 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
case PGM_ASTE_VALIDITY:
case PGM_ASTE_SEQUENCE:
case PGM_EXTENDED_AUTHORITY:
+ /*
+ * We can always store exc_access_id, as it is
+ * undefined for non-ar cases. It is undefined for
+ * most DAT protection exceptions.
+ */
pgm->exc_access_id = ar;
break;
- case PGM_PROTECTION:
- switch (prot) {
- case PROT_TYPE_ALC:
- tec->b60 = 1;
- /* FALL THROUGH */
- case PROT_TYPE_DAT:
- tec->b61 = 1;
- tec->addr = gva >> PAGE_SHIFT;
- tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
- tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
- /* exc_access_id is undefined for most cases */
- pgm->exc_access_id = ar;
- break;
- default: /* LA and KEYC set b61 to 0, other params undefined */
- break;
- }
- break;
}
return code;
}
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index 31a05330d11c..d7c6a7f53ced 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -206,7 +206,7 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
- int ret = 0, nr_wp = 0, nr_bp = 0, i, size;
+ int ret = 0, nr_wp = 0, nr_bp = 0, i;
struct kvm_hw_breakpoint *bp_data = NULL;
struct kvm_hw_wp_info_arch *wp_info = NULL;
struct kvm_hw_bp_info_arch *bp_info = NULL;
@@ -216,17 +216,10 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
return -EINVAL;
- size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint);
- bp_data = kmalloc(size, GFP_KERNEL);
- if (!bp_data) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (copy_from_user(bp_data, dbg->arch.hw_bp, size)) {
- ret = -EFAULT;
- goto error;
- }
+ bp_data = memdup_user(dbg->arch.hw_bp,
+ sizeof(*bp_data) * dbg->arch.nr_hw_bp);
+ if (IS_ERR(bp_data))
+ return PTR_ERR(bp_data);
for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
switch (bp_data[i].type) {
@@ -241,17 +234,19 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
}
}
- size = nr_wp * sizeof(struct kvm_hw_wp_info_arch);
- if (size > 0) {
- wp_info = kmalloc(size, GFP_KERNEL);
+ if (nr_wp > 0) {
+ wp_info = kmalloc_array(nr_wp,
+ sizeof(*wp_info),
+ GFP_KERNEL);
if (!wp_info) {
ret = -ENOMEM;
goto error;
}
}
- size = nr_bp * sizeof(struct kvm_hw_bp_info_arch);
- if (size > 0) {
- bp_info = kmalloc(size, GFP_KERNEL);
+ if (nr_bp > 0) {
+ bp_info = kmalloc_array(nr_bp,
+ sizeof(*bp_info),
+ GFP_KERNEL);
if (!bp_info) {
ret = -ENOMEM;
goto error;
@@ -382,14 +377,20 @@ void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
}
+#define PER_CODE_MASK (PER_EVENT_MASK >> 24)
+#define PER_CODE_BRANCH (PER_EVENT_BRANCH >> 24)
+#define PER_CODE_IFETCH (PER_EVENT_IFETCH >> 24)
+#define PER_CODE_STORE (PER_EVENT_STORE >> 24)
+#define PER_CODE_STORE_REAL (PER_EVENT_STORE_REAL >> 24)
+
#define per_bp_event(code) \
- (code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH))
+ (code & (PER_CODE_IFETCH | PER_CODE_BRANCH))
#define per_write_wp_event(code) \
- (code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL))
+ (code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
static int debug_exit_required(struct kvm_vcpu *vcpu)
{
- u32 perc = (vcpu->arch.sie_block->perc << 24);
+ u8 perc = vcpu->arch.sie_block->perc;
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
struct kvm_hw_wp_info_arch *wp_info = NULL;
struct kvm_hw_bp_info_arch *bp_info = NULL;
@@ -444,7 +445,7 @@ int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
const u8 ilen = kvm_s390_get_ilen(vcpu);
struct kvm_s390_pgm_info pgm_info = {
.code = PGM_PER,
- .per_code = PER_EVENT_IFETCH >> 24,
+ .per_code = PER_CODE_IFETCH,
.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
};
@@ -458,33 +459,33 @@ int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
static void filter_guest_per_event(struct kvm_vcpu *vcpu)
{
- u32 perc = vcpu->arch.sie_block->perc << 24;
+ const u8 perc = vcpu->arch.sie_block->perc;
u64 peraddr = vcpu->arch.sie_block->peraddr;
u64 addr = vcpu->arch.sie_block->gpsw.addr;
u64 cr9 = vcpu->arch.sie_block->gcr[9];
u64 cr10 = vcpu->arch.sie_block->gcr[10];
u64 cr11 = vcpu->arch.sie_block->gcr[11];
/* filter all events, demanded by the guest */
- u32 guest_perc = perc & cr9 & PER_EVENT_MASK;
+ u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
if (!guest_per_enabled(vcpu))
guest_perc = 0;
/* filter "successful-branching" events */
- if (guest_perc & PER_EVENT_BRANCH &&
+ if (guest_perc & PER_CODE_BRANCH &&
cr9 & PER_CONTROL_BRANCH_ADDRESS &&
!in_addr_range(addr, cr10, cr11))
- guest_perc &= ~PER_EVENT_BRANCH;
+ guest_perc &= ~PER_CODE_BRANCH;
/* filter "instruction-fetching" events */
- if (guest_perc & PER_EVENT_IFETCH &&
+ if (guest_perc & PER_CODE_IFETCH &&
!in_addr_range(peraddr, cr10, cr11))
- guest_perc &= ~PER_EVENT_IFETCH;
+ guest_perc &= ~PER_CODE_IFETCH;
/* All other PER events will be given to the guest */
/* TODO: Check altered address/address space */
- vcpu->arch.sie_block->perc = guest_perc >> 24;
+ vcpu->arch.sie_block->perc = guest_perc;
if (!guest_perc)
vcpu->arch.sie_block->iprcc &= ~PGM_PER;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index dfd0ca2638fa..1cab8a177d0e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -29,6 +29,7 @@ static const intercept_handler_t instruction_handlers[256] = {
[0x01] = kvm_s390_handle_01,
[0x82] = kvm_s390_handle_lpsw,
[0x83] = kvm_s390_handle_diag,
+ [0xaa] = kvm_s390_handle_aa,
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_b2,
[0xb6] = kvm_s390_handle_stctl,
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 24524c0f3ef8..be4db07f70d3 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -24,6 +24,8 @@
#include <asm/sclp.h>
#include <asm/isc.h>
#include <asm/gmap.h>
+#include <asm/switch_to.h>
+#include <asm/nmi.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace-s390.h"
@@ -40,6 +42,7 @@ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
return 0;
+ BUG_ON(!kvm_s390_use_sca_entries());
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -68,6 +71,7 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
{
int expect, rc;
+ BUG_ON(!kvm_s390_use_sca_entries());
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -109,6 +113,8 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc, expect;
+ if (!kvm_s390_use_sca_entries())
+ return;
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
@@ -400,12 +406,78 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
+static int __write_machine_check(struct kvm_vcpu *vcpu,
+ struct kvm_s390_mchk_info *mchk)
+{
+ unsigned long ext_sa_addr;
+ freg_t fprs[NUM_FPRS];
+ union mci mci;
+ int rc;
+
+ mci.val = mchk->mcic;
+ /* take care of lazy register loading via vcpu load/put */
+ save_fpu_regs();
+ save_access_regs(vcpu->run->s.regs.acrs);
+
+ /* Extended save area */
+ rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr,
+ sizeof(unsigned long));
+ /* Only bits 0-53 are used for address formation */
+ ext_sa_addr &= ~0x3ffUL;
+ if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
+ if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
+ 512))
+ mci.vr = 0;
+ } else {
+ mci.vr = 0;
+ }
+
+ /* General interruption information */
+ rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
+ rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
+
+ /* Register-save areas */
+ if (MACHINE_HAS_VX) {
+ convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
+ rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
+ } else {
+ rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
+ vcpu->run->s.regs.fprs, 128);
+ }
+ rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
+ vcpu->run->s.regs.gprs, 128);
+ rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
+ (u32 __user *) __LC_FP_CREG_SAVE_AREA);
+ rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
+ (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
+ rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
+ (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
+ rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
+ (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
+ rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
+ &vcpu->run->s.regs.acrs, 64);
+ rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
+ &vcpu->arch.sie_block->gcr, 128);
+
+ /* Extended interruption information */
+ rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
+ (u32 __user *) __LC_EXT_DAMAGE_CODE);
+ rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
+ (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
+ sizeof(mchk->fixed_logout));
+ return rc ? -EFAULT : 0;
+}
+
static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_mchk_info mchk = {};
- unsigned long adtl_status_addr;
int deliver = 0;
int rc = 0;
@@ -446,29 +518,9 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_MCHK,
mchk.cr14, mchk.mcic);
-
- rc = kvm_s390_vcpu_store_status(vcpu,
- KVM_S390_STORE_STATUS_PREFIXED);
- rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
- &adtl_status_addr,
- sizeof(unsigned long));
- rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
- adtl_status_addr);
- rc |= put_guest_lc(vcpu, mchk.mcic,
- (u64 __user *) __LC_MCCK_CODE);
- rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
- (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
- rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
- &mchk.fixed_logout,
- sizeof(mchk.fixed_logout));
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
+ rc = __write_machine_check(vcpu, &mchk);
}
- return rc ? -EFAULT : 0;
+ return rc;
}
static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 607ec91966c7..9c7a1ecfe6bd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -132,10 +132,7 @@ module_param(nested, int, S_IRUGO);
MODULE_PARM_DESC(nested, "Nested virtualization support");
/* upper facilities limit for kvm */
-unsigned long kvm_s390_fac_list_mask[16] = {
- 0xffe6000000000000UL,
- 0x005e000000000000UL,
-};
+unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
unsigned long kvm_s390_fac_list_mask_size(void)
{
@@ -248,22 +245,33 @@ static void kvm_s390_cpu_feat_init(void)
PTFF_QAF);
if (test_facility(17)) { /* MSA */
- __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
- __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
- __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
- __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
- __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
+ __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kmac);
+ __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kmc);
+ __cpacf_query(CPACF_KM, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.km);
+ __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kimd);
+ __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.klmd);
}
if (test_facility(76)) /* MSA3 */
- __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
+ __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.pckmo);
if (test_facility(77)) { /* MSA4 */
- __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
- __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
- __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
- __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
+ __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kmctr);
+ __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kmf);
+ __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kmo);
+ __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.pcc);
}
if (test_facility(57)) /* MSA5 */
- __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
+ __cpacf_query(CPACF_PPNO, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.ppno);
if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
@@ -376,7 +384,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
r = KVM_S390_BSCA_CPU_SLOTS;
- if (sclp.has_esca && sclp.has_64bscao)
+ if (!kvm_s390_use_sca_entries())
+ r = KVM_MAX_VCPUS;
+ else if (sclp.has_esca && sclp.has_64bscao)
r = KVM_S390_ESCA_CPU_SLOTS;
break;
case KVM_CAP_NR_MEMSLOTS:
@@ -1490,6 +1500,16 @@ out_err:
return rc;
}
+bool kvm_arch_has_vcpu_debugfs(void)
+{
+ return false;
+}
+
+int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 3, "%s", "free cpu");
@@ -1553,6 +1573,8 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
{
+ if (!kvm_s390_use_sca_entries())
+ return;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -1570,6 +1592,13 @@ static void sca_del_vcpu(struct kvm_vcpu *vcpu)
static void sca_add_vcpu(struct kvm_vcpu *vcpu)
{
+ if (!kvm_s390_use_sca_entries()) {
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
+
+ /* we still need the basic sca for the ipte control */
+ vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
+ vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
+ }
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -1650,6 +1679,11 @@ static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
{
int rc;
+ if (!kvm_s390_use_sca_entries()) {
+ if (id < KVM_MAX_VCPUS)
+ return true;
+ return false;
+ }
if (id < KVM_S390_BSCA_CPU_SLOTS)
return true;
if (!sclp.has_esca || !sclp.has_64bscao)
@@ -1938,8 +1972,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->eca |= 1;
if (sclp.has_sigpif)
vcpu->arch.sie_block->eca |= 0x10000000U;
- if (test_kvm_facility(vcpu->kvm, 64))
- vcpu->arch.sie_block->ecb3 |= 0x01;
if (test_kvm_facility(vcpu->kvm, 129)) {
vcpu->arch.sie_block->eca |= 0x00020000;
vcpu->arch.sie_block->ecd |= 0x20000000;
@@ -2696,6 +2728,19 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
kvm_clear_async_pf_completion_queue(vcpu);
}
+ /*
+ * If userspace sets the riccb (e.g. after migration) to a valid state,
+ * we should enable RI here instead of doing the lazy enablement.
+ */
+ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
+ test_kvm_facility(vcpu->kvm, 64)) {
+ struct runtime_instr_cb *riccb =
+ (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
+
+ if (riccb->valid)
+ vcpu->arch.sie_block->ecb3 |= 0x01;
+ }
+
kvm_run->kvm_dirty_regs = 0;
}
@@ -2839,38 +2884,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return kvm_s390_store_status_unloaded(vcpu, addr);
}
-/*
- * store additional status at address
- */
-int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
- unsigned long gpa)
-{
- /* Only bits 0-53 are used for address formation */
- if (!(gpa & ~0x3ff))
- return 0;
-
- return write_guest_abs(vcpu, gpa & ~0x3ff,
- (void *)&vcpu->run->s.regs.vrs, 512);
-}
-
-int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
-{
- if (!test_kvm_facility(vcpu->kvm, 129))
- return 0;
-
- /*
- * The guest VXRS are in the host VXRs due to the lazy
- * copying in vcpu load/put. We can simply call save_fpu_regs()
- * to save the current register state because we are in the
- * middle of a load/put cycle.
- *
- * Let's update our copies before we save it into the save area.
- */
- save_fpu_regs();
-
- return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
-}
-
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index b8432862a817..3a4e97f1a9e6 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -20,6 +20,7 @@
#include <linux/kvm_host.h>
#include <asm/facility.h>
#include <asm/processor.h>
+#include <asm/sclp.h>
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
@@ -245,6 +246,7 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
/* implemented in priv.c */
int is_valid_psw(psw_t *psw);
+int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
@@ -273,10 +275,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu);
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
-int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
- unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
-int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
@@ -389,4 +388,13 @@ static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
return &sca->ipte_control;
}
+static inline int kvm_s390_use_sca_entries(void)
+{
+ /*
+ * Without SIGP interpretation, only SRS interpretation (if available)
+ * might use the entries. By not setting the entries and keeping them
+ * invalid, hardware will not access them but intercept.
+ */
+ return sclp.has_sigpif;
+}
#endif
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 46160388e996..e18435355c16 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -32,6 +32,24 @@
#include "kvm-s390.h"
#include "trace.h"
+static int handle_ri(struct kvm_vcpu *vcpu)
+{
+ if (test_kvm_facility(vcpu->kvm, 64)) {
+ vcpu->arch.sie_block->ecb3 |= 0x01;
+ kvm_s390_retry_instr(vcpu);
+ return 0;
+ } else
+ return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+}
+
+int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
+{
+ if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
+ return handle_ri(vcpu);
+ else
+ return -EOPNOTSUPP;
+}
+
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
@@ -1093,6 +1111,9 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
static const intercept_handler_t eb_handlers[256] = {
[0x2f] = handle_lctlg,
[0x25] = handle_stctg,
+ [0x60] = handle_ri,
+ [0x61] = handle_ri,
+ [0x62] = handle_ri,
};
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index cbb73fabc91e..661d9fe63c43 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -24,7 +24,7 @@
#include <linux/kdebug.h>
#include <linux/init.h>
#include <linux/console.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 2ce6bb3bab32..3ba622702ce4 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -94,6 +94,7 @@ out:
struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
{
struct gmap *gmap;
+ unsigned long gmap_asce;
gmap = gmap_alloc(limit);
if (!gmap)
@@ -101,6 +102,11 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
gmap->mm = mm;
spin_lock(&mm->context.gmap_lock);
list_add_rcu(&gmap->list, &mm->context.gmap_list);
+ if (list_is_singular(&mm->context.gmap_list))
+ gmap_asce = gmap->asce;
+ else
+ gmap_asce = -1UL;
+ WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
spin_unlock(&mm->context.gmap_lock);
return gmap;
}
@@ -230,6 +236,7 @@ EXPORT_SYMBOL_GPL(gmap_put);
void gmap_remove(struct gmap *gmap)
{
struct gmap *sg, *next;
+ unsigned long gmap_asce;
/* Remove all shadow gmaps linked to this gmap */
if (!list_empty(&gmap->children)) {
@@ -243,6 +250,14 @@ void gmap_remove(struct gmap *gmap)
/* Remove gmap from the pre-mm list */
spin_lock(&gmap->mm->context.gmap_lock);
list_del_rcu(&gmap->list);
+ if (list_empty(&gmap->mm->context.gmap_list))
+ gmap_asce = 0;
+ else if (list_is_singular(&gmap->mm->context.gmap_list))
+ gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
+ struct gmap, list)->asce;
+ else
+ gmap_asce = -1UL;
+ WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
spin_unlock(&gmap->mm->context.gmap_lock);
synchronize_rcu();
/* Put reference */
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index af7cf28cf97e..44f150312a16 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -309,11 +309,11 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
int i;
if (test_facility(13)) {
- __ptep_ipte_range(address, nr - 1, pte);
+ __ptep_ipte_range(address, nr - 1, pte, IPTE_GLOBAL);
return;
}
for (i = 0; i < nr; i++) {
- __ptep_ipte(address, pte);
+ __ptep_ipte(address, pte, IPTE_GLOBAL);
address += PAGE_SIZE;
pte++;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5f092015aaa7..7a1897c51c54 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -35,9 +35,9 @@ static inline pte_t ptep_flush_direct(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __ptep_ipte_local(addr, ptep);
+ __ptep_ipte(addr, ptep, IPTE_LOCAL);
else
- __ptep_ipte(addr, ptep);
+ __ptep_ipte(addr, ptep, IPTE_GLOBAL);
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -56,7 +56,7 @@ static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
pte_val(*ptep) |= _PAGE_INVALID;
mm->context.flush_mm = 1;
} else
- __ptep_ipte(addr, ptep);
+ __ptep_ipte(addr, ptep, IPTE_GLOBAL);
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -301,9 +301,9 @@ static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __pmdp_idte_local(addr, pmdp);
+ __pmdp_idte(addr, pmdp, IDTE_LOCAL);
else
- __pmdp_idte(addr, pmdp);
+ __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -322,7 +322,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1;
} else if (MACHINE_HAS_IDTE)
- __pmdp_idte(addr, pmdp);
+ __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
else
__pmdp_csp(pmdp);
atomic_dec(&mm->context.flush_count);
@@ -374,9 +374,9 @@ static inline pud_t pudp_flush_direct(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __pudp_idte_local(addr, pudp);
+ __pudp_idte(addr, pudp, IDTE_LOCAL);
else
- __pudp_idte(addr, pudp);
+ __pudp_idte(addr, pudp, IDTE_GLOBAL);
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -620,7 +620,7 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
pte = *ptep;
if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
pgste = pgste_pte_notify(mm, addr, ptep, pgste);
- __ptep_ipte(addr, ptep);
+ __ptep_ipte(addr, ptep, IPTE_GLOBAL);
if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
pte_val(pte) |= _PAGE_PROTECT;
else
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 871af75c69c2..15ffc19c8c0c 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -854,6 +854,15 @@ void zpci_stop_device(struct zpci_dev *zdev)
}
EXPORT_SYMBOL_GPL(zpci_stop_device);
+int zpci_report_error(struct pci_dev *pdev,
+ struct zpci_report_error_header *report)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ return sclp_pci_report(report, zdev->fh, zdev->fid);
+}
+EXPORT_SYMBOL(zpci_report_error);
+
static inline int barsize(u8 size)
{
return (size) ? (1 << size) >> 10 : 0;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 7297fce9bf80..7350c8bc13a2 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -129,12 +129,11 @@ void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
entry_clr_protected(entry);
}
-static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
- dma_addr_t dma_addr, size_t size, int flags)
+static int __dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
+ dma_addr_t dma_addr, size_t size, int flags)
{
unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
u8 *page_addr = (u8 *) (pa & PAGE_MASK);
- dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags;
unsigned long *entry;
int i, rc = 0;
@@ -145,7 +144,7 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
if (!zdev->dma_table) {
rc = -EINVAL;
- goto no_refresh;
+ goto out_unlock;
}
for (i = 0; i < nr_pages; i++) {
@@ -159,20 +158,6 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
dma_addr += PAGE_SIZE;
}
- /*
- * With zdev->tlb_refresh == 0, rpcit is not required to establish new
- * translations when previously invalid translation-table entries are
- * validated. With lazy unmap, it also is skipped for previously valid
- * entries, but a global rpcit is then required before any address can
- * be re-used, i.e. after each iommu bitmap wrap-around.
- */
- if (!zdev->tlb_refresh &&
- (!s390_iommu_strict ||
- ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
- goto no_refresh;
-
- rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
- nr_pages * PAGE_SIZE);
undo_cpu_trans:
if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
flags = ZPCI_PTE_INVALID;
@@ -185,12 +170,46 @@ undo_cpu_trans:
dma_update_cpu_trans(entry, page_addr, flags);
}
}
-
-no_refresh:
+out_unlock:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
return rc;
}
+static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
+ size_t size, int flags)
+{
+ /*
+ * With zdev->tlb_refresh == 0, rpcit is not required to establish new
+ * translations when previously invalid translation-table entries are
+ * validated. With lazy unmap, it also is skipped for previously valid
+ * entries, but a global rpcit is then required before any address can
+ * be re-used, i.e. after each iommu bitmap wrap-around.
+ */
+ if (!zdev->tlb_refresh &&
+ (!s390_iommu_strict ||
+ ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
+ return 0;
+
+ return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
+ PAGE_ALIGN(size));
+}
+
+static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
+ dma_addr_t dma_addr, size_t size, int flags)
+{
+ int rc;
+
+ rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
+ if (rc)
+ return rc;
+
+ rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
+ if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
+ __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
+
+ return rc;
+}
+
void dma_free_seg_table(unsigned long entry)
{
unsigned long *sto = get_rt_sto(entry);
@@ -230,45 +249,54 @@ static unsigned long __dma_alloc_iommu(struct device *dev,
boundary_size, 0);
}
-static unsigned long dma_alloc_iommu(struct device *dev, int size)
+static dma_addr_t dma_alloc_address(struct device *dev, int size)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long offset, flags;
- int wrap = 0;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
if (offset == -1) {
+ if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ /* global flush before DMA addresses are reused */
+ if (zpci_refresh_global(zdev))
+ goto out_error;
+
+ bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
+ zdev->lazy_bitmap, zdev->iommu_pages);
+ bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
+ }
/* wrap-around */
offset = __dma_alloc_iommu(dev, 0, size);
- wrap = 1;
+ if (offset == -1)
+ goto out_error;
}
+ zdev->next_bit = offset + size;
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
- if (offset != -1) {
- zdev->next_bit = offset + size;
- if (!zdev->tlb_refresh && !s390_iommu_strict && wrap)
- /* global flush after wrap-around with lazy unmap */
- zpci_refresh_global(zdev);
- }
+ return zdev->start_dma + offset * PAGE_SIZE;
+
+out_error:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
- return offset;
+ return DMA_ERROR_CODE;
}
-static void dma_free_iommu(struct device *dev, unsigned long offset, int size)
+static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long flags;
+ unsigned long flags, offset;
+
+ offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
if (!zdev->iommu_bitmap)
goto out;
- bitmap_clear(zdev->iommu_bitmap, offset, size);
- /*
- * Lazy flush for unmap: need to move next_bit to avoid address re-use
- * until wrap-around.
- */
- if (!s390_iommu_strict && offset >= zdev->next_bit)
- zdev->next_bit = offset + size;
+
+ if (zdev->tlb_refresh || s390_iommu_strict)
+ bitmap_clear(zdev->iommu_bitmap, offset, size);
+ else
+ bitmap_set(zdev->lazy_bitmap, offset, size);
+
out:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
}
@@ -289,16 +317,16 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long nr_pages, iommu_page_index;
unsigned long pa = page_to_phys(page) + offset;
int flags = ZPCI_PTE_VALID;
+ unsigned long nr_pages;
dma_addr_t dma_addr;
int ret;
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
- iommu_page_index = dma_alloc_iommu(dev, nr_pages);
- if (iommu_page_index == -1) {
+ dma_addr = dma_alloc_address(dev, nr_pages);
+ if (dma_addr == DMA_ERROR_CODE) {
ret = -ENOSPC;
goto out_err;
}
@@ -306,12 +334,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
/* Use rounded up size */
size = nr_pages * PAGE_SIZE;
- dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
- if (dma_addr + size > zdev->end_dma) {
- ret = -ERANGE;
- goto out_free;
- }
-
if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
flags |= ZPCI_TABLE_PROTECTED;
@@ -323,7 +345,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
return dma_addr + (offset & ~PAGE_MASK);
out_free:
- dma_free_iommu(dev, iommu_page_index, nr_pages);
+ dma_free_address(dev, dma_addr, nr_pages);
out_err:
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
@@ -335,7 +357,6 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long iommu_page_index;
int npages, ret;
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
@@ -349,8 +370,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
}
atomic64_add(npages, &zdev->unmapped_pages);
- iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
- dma_free_iommu(dev, iommu_page_index, npages);
+ dma_free_address(dev, dma_addr, npages);
}
static void *s390_dma_alloc(struct device *dev, size_t size,
@@ -394,37 +414,98 @@ static void s390_dma_free(struct device *dev, size_t size,
free_pages((unsigned long) pa, get_order(size));
}
-static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nr_elements, enum dma_data_direction dir,
- unsigned long attrs)
+/* Map a segment into a contiguous dma address area */
+static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ size_t size, dma_addr_t *handle,
+ enum dma_data_direction dir)
{
- int mapped_elements = 0;
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+ dma_addr_t dma_addr_base, dma_addr;
+ int flags = ZPCI_PTE_VALID;
struct scatterlist *s;
- int i;
+ unsigned long pa;
+ int ret;
- for_each_sg(sg, s, nr_elements, i) {
- struct page *page = sg_page(s);
- s->dma_address = s390_dma_map_pages(dev, page, s->offset,
- s->length, dir, 0);
- if (!dma_mapping_error(dev, s->dma_address)) {
- s->dma_length = s->length;
- mapped_elements++;
- } else
+ size = PAGE_ALIGN(size);
+ dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
+ if (dma_addr_base == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ dma_addr = dma_addr_base;
+ if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
+ flags |= ZPCI_TABLE_PROTECTED;
+
+ for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
+ pa = page_to_phys(sg_page(s)) + s->offset;
+ ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags);
+ if (ret)
goto unmap;
+
+ dma_addr += s->length;
}
-out:
- return mapped_elements;
+ ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
+ if (ret)
+ goto unmap;
+
+ *handle = dma_addr_base;
+ atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
+
+ return ret;
unmap:
- for_each_sg(sg, s, mapped_elements, i) {
- if (s->dma_address)
- s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
- dir, 0);
- s->dma_address = 0;
+ dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
+ ZPCI_PTE_INVALID);
+ dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
+ zpci_err("map error:\n");
+ zpci_err_dma(ret, pa);
+ return ret;
+}
+
+static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nr_elements, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *s = sg, *start = sg, *dma = sg;
+ unsigned int max = dma_get_max_seg_size(dev);
+ unsigned int size = s->offset + s->length;
+ unsigned int offset = s->offset;
+ int count = 0, i;
+
+ for (i = 1; i < nr_elements; i++) {
+ s = sg_next(s);
+
+ s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
+
+ if (s->offset || (size & ~PAGE_MASK) ||
+ size + s->length > max) {
+ if (__s390_dma_map_sg(dev, start, size,
+ &dma->dma_address, dir))
+ goto unmap;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ size = offset = s->offset;
+ start = s;
+ dma = sg_next(dma);
+ count++;
+ }
+ size += s->length;
}
- mapped_elements = 0;
- goto out;
+ if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
+ goto unmap;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ return count + 1;
+unmap:
+ for_each_sg(sg, s, count, i)
+ s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
+ dir, attrs);
+
+ return 0;
}
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
@@ -435,8 +516,9 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nr_elements, i) {
- s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir,
- 0);
+ if (s->dma_length)
+ s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
+ dir, attrs);
s->dma_address = 0;
s->dma_length = 0;
}
@@ -482,7 +564,14 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
rc = -ENOMEM;
goto free_dma_table;
}
+ if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
+ if (!zdev->lazy_bitmap) {
+ rc = -ENOMEM;
+ goto free_bitmap;
+ }
+ }
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
if (rc)
@@ -492,6 +581,8 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
free_bitmap:
vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
+ vfree(zdev->lazy_bitmap);
+ zdev->lazy_bitmap = NULL;
free_dma_table:
dma_free_cpu_table(zdev->dma_table);
zdev->dma_table = NULL;
@@ -513,6 +604,9 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
zdev->dma_table = NULL;
vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
+ vfree(zdev->lazy_bitmap);
+ zdev->lazy_bitmap = NULL;
+
zdev->next_bit = 0;
}
diff --git a/arch/score/kernel/vmlinux.lds.S b/arch/score/kernel/vmlinux.lds.S
index 7274b5c4287e..4117890b1db1 100644
--- a/arch/score/kernel/vmlinux.lds.S
+++ b/arch/score/kernel/vmlinux.lds.S
@@ -40,6 +40,7 @@ SECTIONS
_text = .; /* Text and read-only data */
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.text.*)
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 95eccd49672f..53783978162e 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -139,7 +139,7 @@ static void ftrace_mod_code(void)
clear_mod_flag();
}
-void ftrace_nmi_enter(void)
+void arch_ftrace_nmi_enter(void)
{
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
smp_rmb();
@@ -150,7 +150,7 @@ void ftrace_nmi_enter(void)
smp_mb();
}
-void ftrace_nmi_exit(void)
+void arch_ftrace_nmi_exit(void)
{
/* Finish all executions before clearing nmi_running */
smp_mb();
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 235a4101999f..5b9a3cc90c58 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -36,6 +36,7 @@ SECTIONS
TEXT_TEXT
EXTRA_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index f5d60f14a0bc..b23c76b42d6e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -22,7 +22,6 @@ config SPARC
select HAVE_ARCH_TRACEHOOK
select HAVE_EXIT_THREAD
select SYSCTL_EXCEPTION_TRACE
- select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select RTC_CLASS
select RTC_DRV_M48T59
select RTC_SYSTOHC
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index f5b6537306f0..666d5ba230d2 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -1744,6 +1744,7 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
#define HV_PCI_MAP_ATTR_READ 0x01
#define HV_PCI_MAP_ATTR_WRITE 0x02
+#define HV_PCI_MAP_ATTR_RELAXED_ORDER 0x04
#define HV_PCI_DEVICE_BUILD(b,d,f) \
((((b) & 0xff) << 16) | \
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 3f70f900e834..1d51a11fb261 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -86,8 +86,9 @@ static inline unsigned long get_softint(void)
return retval;
}
-void arch_trigger_all_cpu_backtrace(bool);
-#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
+ bool exclude_self);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
extern void *hardirq_stack[NR_CPUS];
extern void *softirq_stack[NR_CPUS];
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index cd83be527586..b0377db12d83 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -5,7 +5,7 @@
#include <linux/kernel.h>
#include <linux/kprobes.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
#include <linux/context_tracking.h>
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 61c6f935accc..db57d8acdc01 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -30,8 +30,19 @@
#define DRIVER_NAME "pci_sun4v"
#define PFX DRIVER_NAME ": "
-static unsigned long vpci_major = 1;
-static unsigned long vpci_minor = 1;
+static unsigned long vpci_major;
+static unsigned long vpci_minor;
+
+struct vpci_version {
+ unsigned long major;
+ unsigned long minor;
+};
+
+/* Ordered from largest major to lowest */
+static struct vpci_version vpci_versions[] = {
+ { .major = 2, .minor = 0 },
+ { .major = 1, .minor = 1 },
+};
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
@@ -67,6 +78,10 @@ static long iommu_batch_flush(struct iommu_batch *p)
u64 *pglist = p->pglist;
unsigned long npages = p->npages;
+ /* VPCI maj=1, min=[0,1] only supports read and write */
+ if (vpci_major < 2)
+ prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
+
while (npages != 0) {
long num;
@@ -133,6 +148,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
unsigned long flags, order, first_page, npages, n;
+ unsigned long prot = 0;
struct iommu *iommu;
struct page *page;
void *ret;
@@ -146,6 +162,9 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
npages = size >> IO_PAGE_SHIFT;
+ if (attrs & DMA_ATTR_WEAK_ORDERING)
+ prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
+
nid = dev->archdata.numa_node;
page = alloc_pages_node(nid, gfp, order);
if (unlikely(!page))
@@ -169,7 +188,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
local_irq_save(flags);
iommu_batch_start(dev,
- (HV_PCI_MAP_ATTR_READ |
+ (HV_PCI_MAP_ATTR_READ | prot |
HV_PCI_MAP_ATTR_WRITE),
entry);
@@ -266,6 +285,9 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
+ if (attrs & DMA_ATTR_WEAK_ORDERING)
+ prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
+
local_irq_save(flags);
iommu_batch_start(dev, prot, entry);
@@ -344,6 +366,9 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
+ if (attrs & DMA_ATTR_WEAK_ORDERING)
+ prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
+
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
@@ -907,22 +932,27 @@ static int pci_sun4v_probe(struct platform_device *op)
struct device_node *dp;
struct iommu *iommu;
u32 devhandle;
- int i, err;
+ int i, err = -ENODEV;
dp = op->dev.of_node;
if (!hvapi_negotiated++) {
- err = sun4v_hvapi_register(HV_GRP_PCI,
- vpci_major,
- &vpci_minor);
+ for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
+ vpci_major = vpci_versions[i].major;
+ vpci_minor = vpci_versions[i].minor;
+
+ err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
+ &vpci_minor);
+ if (!err)
+ break;
+ }
if (err) {
- printk(KERN_ERR PFX "Could not register hvapi, "
- "err=%d\n", err);
+ pr_err(PFX "Could not register hvapi, err=%d\n", err);
return err;
}
- printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
- vpci_major, vpci_minor);
+ pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
+ vpci_major, vpci_minor);
dma_ops = &sun4v_dma_ops;
}
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index fa14402b33f9..47ff5588e521 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -239,7 +239,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
}
}
-void arch_trigger_all_cpu_backtrace(bool include_self)
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
struct thread_info *tp = current_thread_info();
struct pt_regs *regs = get_irq_regs();
@@ -255,15 +255,15 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
- if (include_self)
+ if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
__global_reg_self(tp, regs, this_cpu);
smp_fetch_global_regs();
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, mask) {
struct global_reg_snapshot *gp;
- if (!include_self && cpu == this_cpu)
+ if (exclude_self && cpu == this_cpu)
continue;
gp = &global_cpu_snapshot[cpu].reg;
@@ -300,7 +300,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
static void sysrq_handle_globreg(int key)
{
- arch_trigger_all_cpu_backtrace(true);
+ trigger_all_cpu_backtrace();
}
static struct sysrq_key_op sparc_globalreg_op = {
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index d21cd625c0de..4094a51b1970 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -8,7 +8,7 @@
* I like traps on v9, :))))
*/
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/sched.h>
#include <linux/linkage.h>
#include <linux/kernel.h>
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 9aacb9159262..52c00d90d4b4 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -11,7 +11,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <asm/asi.h>
#include <asm/ptrace.h>
#include <asm/pstate.h>
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index d79b3b734245..572db686f845 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -49,6 +49,7 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 3f291d8c57f7..643c149a3151 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -14,7 +14,7 @@
#include <linux/mman.h>
#include <linux/signal.h>
#include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/init.h>
#include <linux/perf_event.h>
#include <linux/interrupt.h>
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 7ac6b62fb7c1..439784b7b7ac 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -5,7 +5,7 @@
* Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
diff --git a/arch/sparc/prom/ranges.c b/arch/sparc/prom/ranges.c
index ad143c13bdc0..6d8dc2aa94ce 100644
--- a/arch/sparc/prom/ranges.c
+++ b/arch/sparc/prom/ranges.c
@@ -16,9 +16,8 @@ static struct linux_prom_ranges promlib_obio_ranges[PROMREG_MAX];
static int num_obio_ranges;
/* Adjust register values based upon the ranges parameters. */
-static void
-prom_adjust_regs(struct linux_prom_registers *regp, int nregs,
- struct linux_prom_ranges *rangep, int nranges)
+static void prom_adjust_regs(struct linux_prom_registers *regp, int nregs,
+ struct linux_prom_ranges *rangep, int nranges)
{
int regc, rngc;
@@ -34,33 +33,30 @@ prom_adjust_regs(struct linux_prom_registers *regp, int nregs,
}
}
-static void
-prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1,
- struct linux_prom_ranges *ranges2, int nranges2)
+static void prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1,
+ struct linux_prom_ranges *ranges2, int nranges2)
{
int rng1c, rng2c;
- for(rng1c=0; rng1c < nranges1; rng1c++) {
- for(rng2c=0; rng2c < nranges2; rng2c++)
- if(ranges1[rng1c].ot_parent_space == ranges2[rng2c].ot_child_space &&
+ for (rng1c = 0; rng1c < nranges1; rng1c++) {
+ for (rng2c = 0; rng2c < nranges2; rng2c++)
+ if (ranges1[rng1c].ot_parent_space == ranges2[rng2c].ot_child_space &&
ranges1[rng1c].ot_parent_base >= ranges2[rng2c].ot_child_base &&
ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base > 0U)
break;
- if(rng2c == nranges2) /* oops */
+ if (rng2c == nranges2) /* oops */
prom_printf("adjust_ranges: Could not find matching bus type...\n");
else if (ranges1[rng1c].ot_parent_base + ranges1[rng1c].or_size > ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size)
- ranges1[rng1c].or_size =
- ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base;
+ ranges1[rng1c].or_size = ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base;
ranges1[rng1c].ot_parent_space = ranges2[rng2c].ot_parent_space;
ranges1[rng1c].ot_parent_base += ranges2[rng2c].ot_parent_base;
}
}
/* Apply probed obio ranges to registers passed, if no ranges return. */
-void
-prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs)
+void prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs)
{
- if(num_obio_ranges)
+ if (num_obio_ranges)
prom_adjust_regs(regs, nregs, promlib_obio_ranges, num_obio_ranges);
}
EXPORT_SYMBOL(prom_apply_obio_ranges);
@@ -76,40 +72,40 @@ void __init prom_ranges_init(void)
node = prom_getchild(prom_root_node);
obio_node = prom_searchsiblings(node, "obio");
- if(obio_node) {
+ if (obio_node) {
success = prom_getproperty(obio_node, "ranges",
(char *) promlib_obio_ranges,
sizeof(promlib_obio_ranges));
- if(success != -1)
- num_obio_ranges = (success/sizeof(struct linux_prom_ranges));
+ if (success != -1)
+ num_obio_ranges = (success / sizeof(struct linux_prom_ranges));
}
- if(num_obio_ranges)
+ if (num_obio_ranges)
prom_printf("PROMLIB: obio_ranges %d\n", num_obio_ranges);
}
void prom_apply_generic_ranges(phandle node, phandle parent,
- struct linux_prom_registers *regs, int nregs)
+ struct linux_prom_registers *regs, int nregs)
{
int success;
int num_ranges;
struct linux_prom_ranges ranges[PROMREG_MAX];
-
+
success = prom_getproperty(node, "ranges",
(char *) ranges,
- sizeof (ranges));
+ sizeof(ranges));
if (success != -1) {
- num_ranges = (success/sizeof(struct linux_prom_ranges));
+ num_ranges = (success / sizeof(struct linux_prom_ranges));
if (parent) {
struct linux_prom_ranges parent_ranges[PROMREG_MAX];
int num_parent_ranges;
-
+
success = prom_getproperty(parent, "ranges",
- (char *) parent_ranges,
- sizeof (parent_ranges));
+ (char *) parent_ranges,
+ sizeof(parent_ranges));
if (success != -1) {
- num_parent_ranges = (success/sizeof(struct linux_prom_ranges));
- prom_adjust_ranges (ranges, num_ranges, parent_ranges, num_parent_ranges);
+ num_parent_ranges = (success / sizeof(struct linux_prom_ranges));
+ prom_adjust_ranges(ranges, num_ranges, parent_ranges, num_parent_ranges);
}
}
prom_adjust_regs(regs, nregs, ranges, num_ranges);
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 78da75b670bc..4583c0320059 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -3,7 +3,6 @@
config TILE
def_bool y
- select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index 84a924034bdb..1fa1f2544ff9 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -79,8 +79,9 @@ void tile_irq_activate(unsigned int irq, int tile_irq_type);
void setup_irq_regs(void);
#ifdef __tilegx__
-void arch_trigger_all_cpu_backtrace(bool self);
-#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
+ bool exclude_self);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
#endif /* _ASM_TILE_IRQ_H */
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 670a3569450f..101de132e363 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -50,7 +50,7 @@ STD_ENTRY(smp_nap)
* When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
* as a result return to the function that called _cpu_idle().
*/
-STD_ENTRY(_cpu_idle)
+STD_ENTRY_SECTION(_cpu_idle, .cpuidle.text)
movei r1, 1
IRQ_ENABLE_LOAD(r2, r3)
mtspr INTERRUPT_CRITICAL_SECTION, r1
diff --git a/arch/tile/kernel/pmc.c b/arch/tile/kernel/pmc.c
index db62cc34b955..81cf8743a3f3 100644
--- a/arch/tile/kernel/pmc.c
+++ b/arch/tile/kernel/pmc.c
@@ -16,7 +16,6 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/atomic.h>
-#include <linux/interrupt.h>
#include <asm/processor.h>
#include <asm/pmc.h>
@@ -29,9 +28,7 @@ int handle_perf_interrupt(struct pt_regs *regs, int fault)
if (!perf_irq)
panic("Unexpected PERF_COUNT interrupt %d\n", fault);
- nmi_enter();
retval = perf_irq(regs, fault);
- nmi_exit();
return retval;
}
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index a465d8372edd..9f37106ef93a 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -22,7 +22,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/compat.h>
-#include <linux/hardirq.h>
+#include <linux/nmi.h>
#include <linux/syscalls.h>
#include <linux/kernel.h>
#include <linux/tracehook.h>
@@ -594,66 +594,18 @@ void show_regs(struct pt_regs *regs)
tile_show_stack(&kbt);
}
-/* To ensure stack dump on tiles occurs one by one. */
-static DEFINE_SPINLOCK(backtrace_lock);
-/* To ensure no backtrace occurs before all of the stack dump are done. */
-static atomic_t backtrace_cpus;
-/* The cpu mask to avoid reentrance. */
-static struct cpumask backtrace_mask;
-
-void do_nmi_dump_stack(struct pt_regs *regs)
-{
- int is_idle = is_idle_task(current) && !in_interrupt();
- int cpu;
-
- nmi_enter();
- cpu = smp_processor_id();
- if (WARN_ON_ONCE(!cpumask_test_and_clear_cpu(cpu, &backtrace_mask)))
- goto done;
-
- spin_lock(&backtrace_lock);
- if (is_idle)
- pr_info("CPU: %d idle\n", cpu);
- else
- show_regs(regs);
- spin_unlock(&backtrace_lock);
- atomic_dec(&backtrace_cpus);
-done:
- nmi_exit();
-}
-
#ifdef __tilegx__
-void arch_trigger_all_cpu_backtrace(bool self)
+void nmi_raise_cpu_backtrace(struct cpumask *in_mask)
{
struct cpumask mask;
HV_Coord tile;
unsigned int timeout;
int cpu;
- int ongoing;
HV_NMI_Info info[NR_CPUS];
- ongoing = atomic_cmpxchg(&backtrace_cpus, 0, num_online_cpus() - 1);
- if (ongoing != 0) {
- pr_err("Trying to do all-cpu backtrace.\n");
- pr_err("But another all-cpu backtrace is ongoing (%d cpus left)\n",
- ongoing);
- if (self) {
- pr_err("Reporting the stack on this cpu only.\n");
- dump_stack();
- }
- return;
- }
-
- cpumask_copy(&mask, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &mask);
- cpumask_copy(&backtrace_mask, &mask);
-
- /* Backtrace for myself first. */
- if (self)
- dump_stack();
-
/* Tentatively dump stack on remote tiles via NMI. */
timeout = 100;
+ cpumask_copy(&mask, in_mask);
while (!cpumask_empty(&mask) && timeout) {
for_each_cpu(cpu, &mask) {
tile.x = cpu_x(cpu);
@@ -664,12 +616,17 @@ void arch_trigger_all_cpu_backtrace(bool self)
}
mdelay(10);
+ touch_softlockup_watchdog();
timeout--;
}
- /* Warn about cpus stuck in ICS and decrement their counts here. */
+ /* Warn about cpus stuck in ICS. */
if (!cpumask_empty(&mask)) {
for_each_cpu(cpu, &mask) {
+
+ /* Clear the bit as if nmi_cpu_backtrace() ran. */
+ cpumask_clear_cpu(cpu, in_mask);
+
switch (info[cpu].result) {
case HV_NMI_RESULT_FAIL_ICS:
pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
@@ -680,16 +637,20 @@ void arch_trigger_all_cpu_backtrace(bool self)
cpu);
break;
case HV_ENOSYS:
- pr_warn("Hypervisor too old to allow remote stack dumps.\n");
- goto skip_for_each;
+ WARN_ONCE(1, "Hypervisor too old to allow remote stack dumps.\n");
+ break;
default: /* should not happen */
pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
cpu, info[cpu].result, info[cpu].pc);
break;
}
}
-skip_for_each:
- atomic_sub(cpumask_weight(&mask), &backtrace_cpus);
}
}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_self,
+ nmi_raise_cpu_backtrace);
+}
#endif /* __tilegx_ */
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 4d9651c5b1ad..39f427bb0de2 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -20,6 +20,8 @@
#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/nmi.h>
#include <asm/stack.h>
#include <asm/traps.h>
#include <asm/setup.h>
@@ -392,14 +394,17 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
{
+ nmi_enter();
switch (reason) {
+#ifdef arch_trigger_cpumask_backtrace
case TILE_NMI_DUMP_STACK:
- do_nmi_dump_stack(regs);
+ nmi_cpu_backtrace(regs);
break;
+#endif
default:
panic("Unexpected do_nmi type %ld", reason);
- return;
}
+ nmi_exit();
}
/* Deprecated function currently only used here. */
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index 9d449caf8910..e1baf094fba4 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -42,6 +42,7 @@ SECTIONS
.text : AT (ADDR(.text) - LOAD_OFFSET) {
HEAD_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index 2d0266d0254d..3282787bbcfb 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -175,27 +175,4 @@ static struct miscdevice harddog_miscdev = {
.name = "watchdog",
.fops = &harddog_fops,
};
-
-static char banner[] __initdata = KERN_INFO "UML Watchdog Timer\n";
-
-static int __init harddog_init(void)
-{
- int ret;
-
- ret = misc_register(&harddog_miscdev);
-
- if (ret)
- return ret;
-
- printk(banner);
-
- return 0;
-}
-
-static void __exit harddog_exit(void)
-{
- misc_deregister(&harddog_miscdev);
-}
-
-module_init(harddog_init);
-module_exit(harddog_exit);
+module_misc_device(harddog_miscdev);
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index adde088aeeff..4fdbcf958cd5 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -68,6 +68,7 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.stub .text.* .gnu.linkonce.t.*)
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 6899195602b7..1840f55ed042 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -28,6 +28,7 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
/* .gnu.warning sections are handled specially by elf32.em. */
diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S
index 77e407e49a63..56e788e8ee83 100644
--- a/arch/unicore32/kernel/vmlinux.lds.S
+++ b/arch/unicore32/kernel/vmlinux.lds.S
@@ -37,6 +37,7 @@ SECTIONS
.text : { /* Real text segment */
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9b2d50a73a11..bada636d1065 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -23,11 +23,11 @@ config X86
select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
- select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_GIGANTIC_PAGE if X86_64
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_MMIO_FLUSH
@@ -2757,19 +2757,6 @@ config PMC_ATOM
def_bool y
depends on PCI
-config VMD
- depends on PCI_MSI
- tristate "Volume Management Device Driver"
- default N
- ---help---
- Adds support for the Intel Volume Management Device (VMD). VMD is a
- secondary PCI host bridge that allows PCI Express root ports,
- and devices attached to them, to be removed from the default
- PCI domain and placed within the VMD domain. This provides
- more bus resources than are otherwise possible with a
- single domain. If you know your system provides one of these and
- has devices attached to it, say Y; if you are not sure, say N.
-
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 830ed391e7ef..2d449337a360 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -163,11 +163,12 @@ asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
+avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1)
sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1)
sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1)
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
LDFLAGS := -m elf_$(UTS_MACHINE)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index fee1d95902b5..c98ec2efd750 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1148,7 +1148,7 @@ END(error_entry)
/*
- * On entry, EBS is a "return to kernel mode" flag:
+ * On entry, EBX is a "return to kernel mode" flag:
* 1: already in kernel mode, don't need SWAPGS
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
*/
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 94d54d0defa7..02223cb4bcfd 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -129,7 +129,7 @@ static notrace cycle_t vread_pvclock(int *mode)
return 0;
}
- ret = __pvclock_read_cycles(pvti);
+ ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
} while (pvclock_read_retry(pvti, version));
/* refer to vread_tsc() comment for rationale */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index e7de5c9a4fbd..16d3fa211962 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -50,8 +50,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
extern void init_ISA_irqs(void);
#ifdef CONFIG_X86_LOCAL_APIC
-void arch_trigger_all_cpu_backtrace(bool);
-#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
+ bool exclude_self);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index b77f5edb03b0..ac7692dcfa2e 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -4,6 +4,10 @@
#include <asm/processor-flags.h>
#ifndef __ASSEMBLY__
+
+/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
+#define __cpuidle __attribute__((__section__(".cpuidle.text")))
+
/*
* Interrupt control:
*/
@@ -44,12 +48,12 @@ static inline void native_irq_enable(void)
asm volatile("sti": : :"memory");
}
-static inline void native_safe_halt(void)
+static inline __cpuidle void native_safe_halt(void)
{
asm volatile("sti; hlt": : :"memory");
}
-static inline void native_halt(void)
+static inline __cpuidle void native_halt(void)
{
asm volatile("hlt": : :"memory");
}
@@ -86,7 +90,7 @@ static inline notrace void arch_local_irq_enable(void)
* Used in the idle loop; sti takes one instruction cycle
* to complete:
*/
-static inline void arch_safe_halt(void)
+static inline __cpuidle void arch_safe_halt(void)
{
native_safe_halt();
}
@@ -95,7 +99,7 @@ static inline void arch_safe_halt(void)
* Used when interrupts are already enabled or to
* shutdown the processor:
*/
-static inline void halt(void)
+static inline __cpuidle void halt(void)
{
native_halt();
}
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 33ae3a4d0159..4b20f7304b9c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -568,6 +568,7 @@ struct kvm_vcpu_arch {
struct kvm_steal_time steal;
} st;
+ u64 tsc_offset;
u64 last_guest_tsc;
u64 last_host_tsc;
u64 tsc_offset_adjustment;
@@ -701,6 +702,8 @@ struct kvm_hv {
/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
u64 hv_crash_ctl;
+
+ HV_REFERENCE_TSC_PAGE tsc_ref;
};
struct kvm_arch {
@@ -781,54 +784,56 @@ struct kvm_arch {
bool disabled_lapic_found;
/* Struct members for AVIC */
+ u32 avic_vm_id;
u32 ldr_mode;
struct page *avic_logical_id_table_page;
struct page *avic_physical_id_table_page;
+ struct hlist_node hnode;
bool x2apic_format;
bool x2apic_broadcast_quirk_disabled;
};
struct kvm_vm_stat {
- u32 mmu_shadow_zapped;
- u32 mmu_pte_write;
- u32 mmu_pte_updated;
- u32 mmu_pde_zapped;
- u32 mmu_flooded;
- u32 mmu_recycled;
- u32 mmu_cache_miss;
- u32 mmu_unsync;
- u32 remote_tlb_flush;
- u32 lpages;
+ ulong mmu_shadow_zapped;
+ ulong mmu_pte_write;
+ ulong mmu_pte_updated;
+ ulong mmu_pde_zapped;
+ ulong mmu_flooded;
+ ulong mmu_recycled;
+ ulong mmu_cache_miss;
+ ulong mmu_unsync;
+ ulong remote_tlb_flush;
+ ulong lpages;
};
struct kvm_vcpu_stat {
- u32 pf_fixed;
- u32 pf_guest;
- u32 tlb_flush;
- u32 invlpg;
-
- u32 exits;
- u32 io_exits;
- u32 mmio_exits;
- u32 signal_exits;
- u32 irq_window_exits;
- u32 nmi_window_exits;
- u32 halt_exits;
- u32 halt_successful_poll;
- u32 halt_attempted_poll;
- u32 halt_poll_invalid;
- u32 halt_wakeup;
- u32 request_irq_exits;
- u32 irq_exits;
- u32 host_state_reload;
- u32 efer_reload;
- u32 fpu_reload;
- u32 insn_emulation;
- u32 insn_emulation_fail;
- u32 hypercalls;
- u32 irq_injections;
- u32 nmi_injections;
+ u64 pf_fixed;
+ u64 pf_guest;
+ u64 tlb_flush;
+ u64 invlpg;
+
+ u64 exits;
+ u64 io_exits;
+ u64 mmio_exits;
+ u64 signal_exits;
+ u64 irq_window_exits;
+ u64 nmi_window_exits;
+ u64 halt_exits;
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 request_irq_exits;
+ u64 irq_exits;
+ u64 host_state_reload;
+ u64 efer_reload;
+ u64 fpu_reload;
+ u64 insn_emulation;
+ u64 insn_emulation_fail;
+ u64 hypercalls;
+ u64 irq_injections;
+ u64 nmi_injections;
};
struct x86_instruction_info;
@@ -951,7 +956,6 @@ struct kvm_x86_ops {
bool (*has_wbinvd_exit)(void);
- u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 9ab7507ca1c2..1411dbed5e5e 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -23,6 +23,9 @@ struct pci_sysdata {
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
void *fwnode; /* IRQ domain for MSI assignment */
#endif
+#if IS_ENABLED(CONFIG_VMD)
+ bool vmd_domain; /* True if in Intel VMD domain */
+#endif
};
extern int pci_routeirq;
@@ -56,6 +59,17 @@ static inline void *_pci_root_bus_fwnode(struct pci_bus *bus)
#define pci_root_bus_fwnode _pci_root_bus_fwnode
#endif
+static inline bool is_vmd(struct pci_bus *bus)
+{
+#if IS_ENABLED(CONFIG_VMD)
+ struct pci_sysdata *sd = bus->sysdata;
+
+ return sd->vmd_domain;
+#else
+ return false;
+#endif
+}
+
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index f1218f512f62..8b4de22d6429 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -439,8 +439,6 @@ extern pgprot_t pgprot_writethrough(pgprot_t prot);
struct file;
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
-int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t *vma_prot);
/* Install a pte for a particular vaddr in kernel space. */
void set_pte_vaddr(unsigned long vaddr, pte_t pte);
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index d019f0cc80ec..3ad741b84072 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -87,9 +87,10 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
}
static __always_inline
-cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src)
+cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
+ u64 tsc)
{
- u64 delta = rdtsc_ordered() - src->tsc_timestamp;
+ u64 delta = tsc - src->tsc_timestamp;
cycle_t offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
src->tsc_shift);
return src->system_time + offset;
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index e6911caf5bbf..608a79d5a466 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -20,15 +20,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
/* No need for a barrier -- XCHG is a barrier on x86. */
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
-extern int xen_have_vector_callback;
-
-/*
- * Events delivered via platform PCI interrupts are always
- * routed to vcpu 0 and hence cannot be rebound.
- */
-static inline bool xen_support_evtchn_rebind(void)
-{
- return (!xen_hvm_domain() || xen_have_vector_callback);
-}
-
#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 45257cf84370..4dd5d500eb60 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -83,6 +83,7 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_X86_TSC) += trace_clock.o
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index bdfad642123f..af15f4444330 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -152,7 +152,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
}
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
-void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
+void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{
unsigned int cpu = smp_processor_id();
struct cstate_entry *percpu_entry;
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index f29501e1a5c1..c73c9fb281e1 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -26,32 +26,32 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh)
}
#endif
-#ifdef arch_trigger_all_cpu_backtrace
+#ifdef arch_trigger_cpumask_backtrace
static void nmi_raise_cpu_backtrace(cpumask_t *mask)
{
apic->send_IPI_mask(mask, NMI_VECTOR);
}
-void arch_trigger_all_cpu_backtrace(bool include_self)
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
- nmi_trigger_all_cpu_backtrace(include_self, nmi_raise_cpu_backtrace);
+ nmi_trigger_cpumask_backtrace(mask, exclude_self,
+ nmi_raise_cpu_backtrace);
}
-static int
-arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
+static int nmi_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
{
if (nmi_cpu_backtrace(regs))
return NMI_HANDLED;
return NMI_DONE;
}
-NOKPROBE_SYMBOL(arch_trigger_all_cpu_backtrace_handler);
+NOKPROBE_SYMBOL(nmi_cpu_backtrace_handler);
-static int __init register_trigger_all_cpu_backtrace(void)
+static int __init register_nmi_cpu_backtrace_handler(void)
{
- register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
+ register_nmi_handler(NMI_LOCAL, nmi_cpu_backtrace_handler,
0, "arch_bt");
return 0;
}
-early_initcall(register_trigger_all_cpu_backtrace);
+early_initcall(register_nmi_cpu_backtrace_handler);
#endif
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
new file mode 100644
index 000000000000..e9d252d873aa
--- /dev/null
+++ b/arch/x86/kernel/livepatch.c
@@ -0,0 +1,65 @@
+/*
+ * livepatch.c - x86-specific Kernel Live Patching Core
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/livepatch.h>
+#include <asm/text-patching.h>
+
+/* Apply per-object alternatives. Based on x86 module_finalize() */
+void arch_klp_init_object_loaded(struct klp_patch *patch,
+ struct klp_object *obj)
+{
+ int cnt;
+ struct klp_modinfo *info;
+ Elf_Shdr *s, *alt = NULL, *para = NULL;
+ void *aseg, *pseg;
+ const char *objname;
+ char sec_objname[MODULE_NAME_LEN];
+ char secname[KSYM_NAME_LEN];
+
+ info = patch->mod->klp_info;
+ objname = obj->name ? obj->name : "vmlinux";
+
+ /* See livepatch core code for BUILD_BUG_ON() explanation */
+ BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
+
+ for (s = info->sechdrs; s < info->sechdrs + info->hdr.e_shnum; s++) {
+ /* Apply per-object .klp.arch sections */
+ cnt = sscanf(info->secstrings + s->sh_name,
+ ".klp.arch.%55[^.].%127s",
+ sec_objname, secname);
+ if (cnt != 2)
+ continue;
+ if (strcmp(sec_objname, objname))
+ continue;
+ if (!strcmp(".altinstructions", secname))
+ alt = s;
+ if (!strcmp(".parainstructions", secname))
+ para = s;
+ }
+
+ if (alt) {
+ aseg = (void *) alt->sh_addr;
+ apply_alternatives(aseg, aseg + alt->sh_size);
+ }
+
+ if (para) {
+ pseg = (void *) para->sh_addr;
+ apply_paravirt(pseg, pseg + para->sh_size);
+ }
+}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 4002b475171c..28cea7802ecb 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -302,7 +302,7 @@ void arch_cpu_idle(void)
/*
* We use this if we don't have any better idle routine..
*/
-void default_idle(void)
+void __cpuidle default_idle(void)
{
trace_cpu_idle_rcuidle(1, smp_processor_id());
safe_halt();
@@ -417,7 +417,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
* with interrupts enabled and no flags, which is backwards compatible with the
* original MWAIT implementation.
*/
-static void mwait_idle(void)
+static __cpuidle void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
trace_cpu_idle_rcuidle(1, smp_processor_id());
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 3599404e3089..5b2cc889ce34 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -80,7 +80,7 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
do {
version = pvclock_read_begin(src);
- ret = __pvclock_read_cycles(src);
+ ret = __pvclock_read_cycles(src, rdtsc_ordered());
flags = src->flags;
} while (pvclock_read_retry(src, version));
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 9297a002d8e5..dbf67f64d5ec 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -97,6 +97,7 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
+ CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
ENTRY_TEXT
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 464fa477afbf..3bff20710471 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -13,7 +13,7 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
- hyperv.o page_track.o
+ hyperv.o page_track.o debugfs.o
kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 3235e0fe7792..afa7bbb596cd 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -366,7 +366,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
- F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB);
+ F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
+ F(AVX512BW) | F(AVX512VL);
/* cpuid 0xD.1.eax */
const u32 kvm_cpuid_D_1_eax_x86_features =
diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
new file mode 100644
index 000000000000..c19c7ede9bd6
--- /dev/null
+++ b/arch/x86/kvm/debugfs.c
@@ -0,0 +1,69 @@
+/*
+ * Kernel-based Virtual Machine driver for Linux
+ *
+ * Copyright 2016 Red Hat, Inc. and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+#include <linux/kvm_host.h>
+#include <linux/debugfs.h>
+
+bool kvm_arch_has_vcpu_debugfs(void)
+{
+ return true;
+}
+
+static int vcpu_get_tsc_offset(void *data, u64 *val)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
+ *val = vcpu->arch.tsc_offset;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_offset_fops, vcpu_get_tsc_offset, NULL, "%lld\n");
+
+static int vcpu_get_tsc_scaling_ratio(void *data, u64 *val)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
+ *val = vcpu->arch.tsc_scaling_ratio;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_fops, vcpu_get_tsc_scaling_ratio, NULL, "%llu\n");
+
+static int vcpu_get_tsc_scaling_frac_bits(void *data, u64 *val)
+{
+ *val = kvm_tsc_scaling_ratio_frac_bits;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bits, NULL, "%llu\n");
+
+int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+{
+ struct dentry *ret;
+
+ ret = debugfs_create_file("tsc-offset", 0444,
+ vcpu->debugfs_dentry,
+ vcpu, &vcpu_tsc_offset_fops);
+ if (!ret)
+ return -ENOMEM;
+
+ if (kvm_has_tsc_control) {
+ ret = debugfs_create_file("tsc-scaling-ratio", 0444,
+ vcpu->debugfs_dentry,
+ vcpu, &vcpu_tsc_scaling_fops);
+ if (!ret)
+ return -ENOMEM;
+ ret = debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444,
+ vcpu->debugfs_dentry,
+ vcpu, &vcpu_tsc_scaling_frac_fops);
+ if (!ret)
+ return -ENOMEM;
+
+ }
+
+ return 0;
+}
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 01bd7b7a6866..42b1c83741c8 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -386,7 +386,21 @@ static void synic_init(struct kvm_vcpu_hv_synic *synic)
static u64 get_time_ref_counter(struct kvm *kvm)
{
- return div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
+ struct kvm_hv *hv = &kvm->arch.hyperv;
+ struct kvm_vcpu *vcpu;
+ u64 tsc;
+
+ /*
+ * The guest has not set up the TSC page or the clock isn't
+ * stable, fall back to get_kvmclock_ns.
+ */
+ if (!hv->tsc_ref.tsc_sequence)
+ return div_u64(get_kvmclock_ns(kvm), 100);
+
+ vcpu = kvm_get_vcpu(kvm, 0);
+ tsc = kvm_read_l1_tsc(vcpu, rdtsc());
+ return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
+ + hv->tsc_ref.tsc_offset;
}
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
@@ -756,6 +770,129 @@ static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
return 0;
}
+/*
+ * The kvmclock and Hyper-V TSC page use similar formulas, and converting
+ * between them is possible:
+ *
+ * kvmclock formula:
+ * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
+ * + system_time
+ *
+ * Hyper-V formula:
+ * nsec/100 = ticks * scale / 2^64 + offset
+ *
+ * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
+ * By dividing the kvmclock formula by 100 and equating what's left we get:
+ * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
+ * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
+ * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
+ *
+ * Now expand the kvmclock formula and divide by 100:
+ * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
+ * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
+ * + system_time
+ * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
+ * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
+ * + system_time / 100
+ *
+ * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
+ * nsec/100 = ticks * scale / 2^64
+ * - tsc_timestamp * scale / 2^64
+ * + system_time / 100
+ *
+ * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
+ * offset = system_time / 100 - tsc_timestamp * scale / 2^64
+ *
+ * These two equivalencies are implemented in this function.
+ */
+static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
+ HV_REFERENCE_TSC_PAGE *tsc_ref)
+{
+ u64 max_mul;
+
+ if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
+ return false;
+
+ /*
+ * check if scale would overflow, if so we use the time ref counter
+ * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
+ * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
+ * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
+ */
+ max_mul = 100ull << (32 - hv_clock->tsc_shift);
+ if (hv_clock->tsc_to_system_mul >= max_mul)
+ return false;
+
+ /*
+ * Otherwise compute the scale and offset according to the formulas
+ * derived above.
+ */
+ tsc_ref->tsc_scale =
+ mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
+ hv_clock->tsc_to_system_mul,
+ 100);
+
+ tsc_ref->tsc_offset = hv_clock->system_time;
+ do_div(tsc_ref->tsc_offset, 100);
+ tsc_ref->tsc_offset -=
+ mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
+ return true;
+}
+
+void kvm_hv_setup_tsc_page(struct kvm *kvm,
+ struct pvclock_vcpu_time_info *hv_clock)
+{
+ struct kvm_hv *hv = &kvm->arch.hyperv;
+ u32 tsc_seq;
+ u64 gfn;
+
+ BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
+ BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);
+
+ if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+ return;
+
+ gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
+ /*
+ * Because the TSC parameters only vary when there is a
+ * change in the master clock, do not bother with caching.
+ */
+ if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
+ &tsc_seq, sizeof(tsc_seq))))
+ return;
+
+ /*
+ * While we're computing and writing the parameters, force the
+ * guest to use the time reference count MSR.
+ */
+ hv->tsc_ref.tsc_sequence = 0;
+ if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
+ &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
+ return;
+
+ if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
+ return;
+
+ /* Ensure sequence is zero before writing the rest of the struct. */
+ smp_wmb();
+ if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
+ return;
+
+ /*
+ * Now switch to the TSC page mechanism by writing the sequence.
+ */
+ tsc_seq++;
+ if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
+ tsc_seq = 1;
+
+ /* Write the struct entirely before the non-zero sequence. */
+ smp_wmb();
+
+ hv->tsc_ref.tsc_sequence = tsc_seq;
+ kvm_write_guest(kvm, gfn_to_gpa(gfn),
+ &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
+}
+
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
bool host)
{
@@ -793,23 +930,11 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
mark_page_dirty(kvm, gfn);
break;
}
- case HV_X64_MSR_REFERENCE_TSC: {
- u64 gfn;
- HV_REFERENCE_TSC_PAGE tsc_ref;
-
- memset(&tsc_ref, 0, sizeof(tsc_ref));
+ case HV_X64_MSR_REFERENCE_TSC:
hv->hv_tsc_page = data;
- if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
- break;
- gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
- if (kvm_write_guest(
- kvm,
- gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
- &tsc_ref, sizeof(tsc_ref)))
- return 1;
- mark_page_dirty(kvm, gfn);
+ if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
+ kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
break;
- }
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
return kvm_hv_msr_set_crash_data(vcpu,
msr - HV_X64_MSR_CRASH_P0,
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 60eccd4bd1d3..cd1119538add 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -84,4 +84,7 @@ static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
+void kvm_hv_setup_tsc_page(struct kvm *kvm,
+ struct pvclock_vcpu_time_info *hv_clock);
+
#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index b62c85229711..23b99f305382 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1761,9 +1761,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
if (value & MSR_IA32_APICBASE_ENABLE) {
kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
static_key_slow_dec_deferred(&apic_hw_disabled);
- } else
+ } else {
static_key_slow_inc(&apic_hw_disabled.key);
- recalculate_apic_map(vcpu->kvm);
+ recalculate_apic_map(vcpu->kvm);
+ }
}
if ((old_value ^ value) & X2APIC_ENABLE) {
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3d4cc8cc56a3..d9c7e986b4e4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1207,7 +1207,7 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
*
* Return true if tlb need be flushed.
*/
-static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect)
+static bool spte_write_protect(u64 *sptep, bool pt_protect)
{
u64 spte = *sptep;
@@ -1233,12 +1233,12 @@ static bool __rmap_write_protect(struct kvm *kvm,
bool flush = false;
for_each_rmap_spte(rmap_head, &iter, sptep)
- flush |= spte_write_protect(kvm, sptep, pt_protect);
+ flush |= spte_write_protect(sptep, pt_protect);
return flush;
}
-static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep)
+static bool spte_clear_dirty(u64 *sptep)
{
u64 spte = *sptep;
@@ -1256,12 +1256,12 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
bool flush = false;
for_each_rmap_spte(rmap_head, &iter, sptep)
- flush |= spte_clear_dirty(kvm, sptep);
+ flush |= spte_clear_dirty(sptep);
return flush;
}
-static bool spte_set_dirty(struct kvm *kvm, u64 *sptep)
+static bool spte_set_dirty(u64 *sptep)
{
u64 spte = *sptep;
@@ -1279,7 +1279,7 @@ static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
bool flush = false;
for_each_rmap_spte(rmap_head, &iter, sptep)
- flush |= spte_set_dirty(kvm, sptep);
+ flush |= spte_set_dirty(sptep);
return flush;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1e6b84b96ea6..f8157a36ab09 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -34,6 +34,8 @@
#include <linux/sched.h>
#include <linux/trace_events.h>
#include <linux/slab.h>
+#include <linux/amd-iommu.h>
+#include <linux/hashtable.h>
#include <asm/apic.h>
#include <asm/perf_event.h>
@@ -41,6 +43,7 @@
#include <asm/desc.h>
#include <asm/debugreg.h>
#include <asm/kvm_para.h>
+#include <asm/irq_remapping.h>
#include <asm/virtext.h>
#include "trace.h"
@@ -96,6 +99,19 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
+/* AVIC GATAG is encoded using VM and VCPU IDs */
+#define AVIC_VCPU_ID_BITS 8
+#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
+
+#define AVIC_VM_ID_BITS 24
+#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
+#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
+
+#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
+ (y & AVIC_VCPU_ID_MASK))
+#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
+#define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
+
static bool erratum_383_found __read_mostly;
static const u32 host_save_user_msrs[] = {
@@ -185,6 +201,23 @@ struct vcpu_svm {
struct page *avic_backing_page;
u64 *avic_physical_id_cache;
bool avic_is_running;
+
+ /*
+ * Per-vcpu list of struct amd_svm_iommu_ir:
+ * This is used mainly to store interrupt remapping information used
+ * when update the vcpu affinity. This avoids the need to scan for
+ * IRTE and try to match ga_tag in the IOMMU driver.
+ */
+ struct list_head ir_list;
+ spinlock_t ir_list_lock;
+};
+
+/*
+ * This is a wrapper of struct amd_iommu_ir_data.
+ */
+struct amd_svm_iommu_ir {
+ struct list_head node; /* Used by SVM for per-vcpu ir_list */
+ void *data; /* Storing pointer to struct amd_ir_data */
};
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
@@ -242,6 +275,10 @@ static int avic;
module_param(avic, int, S_IRUGO);
#endif
+/* AVIC VM ID bit masks and lock */
+static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR);
+static DEFINE_SPINLOCK(avic_vm_id_lock);
+
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -928,6 +965,55 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
}
+/* Note:
+ * This hash table is used to map VM_ID to a struct kvm_arch,
+ * when handling AMD IOMMU GALOG notification to schedule in
+ * a particular vCPU.
+ */
+#define SVM_VM_DATA_HASH_BITS 8
+DECLARE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
+static spinlock_t svm_vm_data_hash_lock;
+
+/* Note:
+ * This function is called from IOMMU driver to notify
+ * SVM to schedule in a particular vCPU of a particular VM.
+ */
+static int avic_ga_log_notifier(u32 ga_tag)
+{
+ unsigned long flags;
+ struct kvm_arch *ka = NULL;
+ struct kvm_vcpu *vcpu = NULL;
+ u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
+ u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
+
+ pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
+
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
+ hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
+ struct kvm *kvm = container_of(ka, struct kvm, arch);
+ struct kvm_arch *vm_data = &kvm->arch;
+
+ if (vm_data->avic_vm_id != vm_id)
+ continue;
+ vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
+ break;
+ }
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+
+ if (!vcpu)
+ return 0;
+
+ /* Note:
+ * At this point, the IOMMU should have already set the pending
+ * bit in the vAPIC backing page. So, we just need to schedule
+ * in the vcpu.
+ */
+ if (vcpu->mode == OUTSIDE_GUEST_MODE)
+ kvm_vcpu_wake_up(vcpu);
+
+ return 0;
+}
+
static __init int svm_hardware_setup(void)
{
int cpu;
@@ -986,10 +1072,15 @@ static __init int svm_hardware_setup(void)
if (avic) {
if (!npt_enabled ||
!boot_cpu_has(X86_FEATURE_AVIC) ||
- !IS_ENABLED(CONFIG_X86_LOCAL_APIC))
+ !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
avic = false;
- else
+ } else {
pr_info("AVIC enabled\n");
+
+ hash_init(svm_vm_data_hash);
+ spin_lock_init(&svm_vm_data_hash_lock);
+ amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
+ }
}
return 0;
@@ -1028,13 +1119,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
seg->base = 0;
}
-static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- return svm->vmcb->control.tsc_offset;
-}
-
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1280,19 +1364,55 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return 0;
}
+static inline int avic_get_next_vm_id(void)
+{
+ int id;
+
+ spin_lock(&avic_vm_id_lock);
+
+ /* AVIC VM ID is one-based. */
+ id = find_next_zero_bit(avic_vm_id_bitmap, AVIC_VM_ID_NR, 1);
+ if (id <= AVIC_VM_ID_MASK)
+ __set_bit(id, avic_vm_id_bitmap);
+ else
+ id = -EAGAIN;
+
+ spin_unlock(&avic_vm_id_lock);
+ return id;
+}
+
+static inline int avic_free_vm_id(int id)
+{
+ if (id <= 0 || id > AVIC_VM_ID_MASK)
+ return -EINVAL;
+
+ spin_lock(&avic_vm_id_lock);
+ __clear_bit(id, avic_vm_id_bitmap);
+ spin_unlock(&avic_vm_id_lock);
+ return 0;
+}
+
static void avic_vm_destroy(struct kvm *kvm)
{
+ unsigned long flags;
struct kvm_arch *vm_data = &kvm->arch;
+ avic_free_vm_id(vm_data->avic_vm_id);
+
if (vm_data->avic_logical_id_table_page)
__free_page(vm_data->avic_logical_id_table_page);
if (vm_data->avic_physical_id_table_page)
__free_page(vm_data->avic_physical_id_table_page);
+
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
+ hash_del(&vm_data->hnode);
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
}
static int avic_vm_init(struct kvm *kvm)
{
- int err = -ENOMEM;
+ unsigned long flags;
+ int vm_id, err = -ENOMEM;
struct kvm_arch *vm_data = &kvm->arch;
struct page *p_page;
struct page *l_page;
@@ -1300,6 +1420,11 @@ static int avic_vm_init(struct kvm *kvm)
if (!avic)
return 0;
+ vm_id = avic_get_next_vm_id();
+ if (vm_id < 0)
+ return vm_id;
+ vm_data->avic_vm_id = (u32)vm_id;
+
/* Allocating physical APIC ID table (4KB) */
p_page = alloc_page(GFP_KERNEL);
if (!p_page)
@@ -1316,6 +1441,10 @@ static int avic_vm_init(struct kvm *kvm)
vm_data->avic_logical_id_table_page = l_page;
clear_page(page_address(l_page));
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
+ hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+
return 0;
free_avic:
@@ -1323,31 +1452,34 @@ free_avic:
return err;
}
-/**
- * This function is called during VCPU halt/unhalt.
- */
-static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
+static inline int
+avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
{
- u64 entry;
- int h_physical_id = kvm_cpu_get_apicid(vcpu->cpu);
+ int ret = 0;
+ unsigned long flags;
+ struct amd_svm_iommu_ir *ir;
struct vcpu_svm *svm = to_svm(vcpu);
- if (!kvm_vcpu_apicv_active(vcpu))
- return;
-
- svm->avic_is_running = is_run;
+ if (!kvm_arch_has_assigned_device(vcpu->kvm))
+ return 0;
- /* ID = 0xff (broadcast), ID > 0xff (reserved) */
- if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
- return;
+ /*
+ * Here, we go through the per-vcpu ir_list to update all existing
+ * interrupt remapping table entry targeting this vcpu.
+ */
+ spin_lock_irqsave(&svm->ir_list_lock, flags);
- entry = READ_ONCE(*(svm->avic_physical_id_cache));
- WARN_ON(is_run == !!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK));
+ if (list_empty(&svm->ir_list))
+ goto out;
- entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
- if (is_run)
- entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
- WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+ list_for_each_entry(ir, &svm->ir_list, node) {
+ ret = amd_iommu_update_ga(cpu, r, ir->data);
+ if (ret)
+ break;
+ }
+out:
+ spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+ return ret;
}
static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -1374,6 +1506,8 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+ avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
+ svm->avic_is_running);
}
static void avic_vcpu_put(struct kvm_vcpu *vcpu)
@@ -1385,10 +1519,27 @@ static void avic_vcpu_put(struct kvm_vcpu *vcpu)
return;
entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
+ avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
+
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
}
+/**
+ * This function is called during VCPU halt/unhalt.
+ */
+static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ svm->avic_is_running = is_run;
+ if (is_run)
+ avic_vcpu_load(vcpu, vcpu->cpu);
+ else
+ avic_vcpu_put(vcpu);
+}
+
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1450,6 +1601,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
err = avic_init_backing_page(&svm->vcpu);
if (err)
goto free_page4;
+
+ INIT_LIST_HEAD(&svm->ir_list);
+ spin_lock_init(&svm->ir_list_lock);
}
/* We initialize this flag to true to make sure that the is_running
@@ -4246,6 +4400,209 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
kvm_vcpu_wake_up(vcpu);
}
+static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+{
+ unsigned long flags;
+ struct amd_svm_iommu_ir *cur;
+
+ spin_lock_irqsave(&svm->ir_list_lock, flags);
+ list_for_each_entry(cur, &svm->ir_list, node) {
+ if (cur->data != pi->ir_data)
+ continue;
+ list_del(&cur->node);
+ kfree(cur);
+ break;
+ }
+ spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+}
+
+static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct amd_svm_iommu_ir *ir;
+
+ /**
+ * In some cases, the existing irte is updaed and re-set,
+ * so we need to check here if it's already been * added
+ * to the ir_list.
+ */
+ if (pi->ir_data && (pi->prev_ga_tag != 0)) {
+ struct kvm *kvm = svm->vcpu.kvm;
+ u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
+ struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
+ struct vcpu_svm *prev_svm;
+
+ if (!prev_vcpu) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ prev_svm = to_svm(prev_vcpu);
+ svm_ir_list_del(prev_svm, pi);
+ }
+
+ /**
+ * Allocating new amd_iommu_pi_data, which will get
+ * add to the per-vcpu ir_list.
+ */
+ ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL);
+ if (!ir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ir->data = pi->ir_data;
+
+ spin_lock_irqsave(&svm->ir_list_lock, flags);
+ list_add(&ir->node, &svm->ir_list);
+ spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+out:
+ return ret;
+}
+
+/**
+ * Note:
+ * The HW cannot support posting multicast/broadcast
+ * interrupts to a vCPU. So, we still use legacy interrupt
+ * remapping for these kind of interrupts.
+ *
+ * For lowest-priority interrupts, we only support
+ * those with single CPU as the destination, e.g. user
+ * configures the interrupts via /proc/irq or uses
+ * irqbalance to make the interrupts single-CPU.
+ */
+static int
+get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
+ struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
+{
+ struct kvm_lapic_irq irq;
+ struct kvm_vcpu *vcpu = NULL;
+
+ kvm_set_msi_irq(kvm, e, &irq);
+
+ if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
+ pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
+ __func__, irq.vector);
+ return -1;
+ }
+
+ pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
+ irq.vector);
+ *svm = to_svm(vcpu);
+ vcpu_info->pi_desc_addr = page_to_phys((*svm)->avic_backing_page);
+ vcpu_info->vector = irq.vector;
+
+ return 0;
+}
+
+/*
+ * svm_update_pi_irte - set IRTE for Posted-Interrupts
+ *
+ * @kvm: kvm
+ * @host_irq: host irq of the interrupt
+ * @guest_irq: gsi of the interrupt
+ * @set: set or unset PI
+ * returns 0 on success, < 0 on failure
+ */
+static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set)
+{
+ struct kvm_kernel_irq_routing_entry *e;
+ struct kvm_irq_routing_table *irq_rt;
+ int idx, ret = -EINVAL;
+
+ if (!kvm_arch_has_assigned_device(kvm) ||
+ !irq_remapping_cap(IRQ_POSTING_CAP))
+ return 0;
+
+ pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
+ __func__, host_irq, guest_irq, set);
+
+ idx = srcu_read_lock(&kvm->irq_srcu);
+ irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+ WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
+
+ hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
+ struct vcpu_data vcpu_info;
+ struct vcpu_svm *svm = NULL;
+
+ if (e->type != KVM_IRQ_ROUTING_MSI)
+ continue;
+
+ /**
+ * Here, we setup with legacy mode in the following cases:
+ * 1. When cannot target interrupt to a specific vcpu.
+ * 2. Unsetting posted interrupt.
+ * 3. APIC virtialization is disabled for the vcpu.
+ */
+ if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
+ kvm_vcpu_apicv_active(&svm->vcpu)) {
+ struct amd_iommu_pi_data pi;
+
+ /* Try to enable guest_mode in IRTE */
+ pi.base = page_to_phys(svm->avic_backing_page) & AVIC_HPA_MASK;
+ pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
+ svm->vcpu.vcpu_id);
+ pi.is_guest_mode = true;
+ pi.vcpu_data = &vcpu_info;
+ ret = irq_set_vcpu_affinity(host_irq, &pi);
+
+ /**
+ * Here, we successfully setting up vcpu affinity in
+ * IOMMU guest mode. Now, we need to store the posted
+ * interrupt information in a per-vcpu ir_list so that
+ * we can reference to them directly when we update vcpu
+ * scheduling information in IOMMU irte.
+ */
+ if (!ret && pi.is_guest_mode)
+ svm_ir_list_add(svm, &pi);
+ } else {
+ /* Use legacy mode in IRTE */
+ struct amd_iommu_pi_data pi;
+
+ /**
+ * Here, pi is used to:
+ * - Tell IOMMU to use legacy mode for this interrupt.
+ * - Retrieve ga_tag of prior interrupt remapping data.
+ */
+ pi.is_guest_mode = false;
+ ret = irq_set_vcpu_affinity(host_irq, &pi);
+
+ /**
+ * Check if the posted interrupt was previously
+ * setup with the guest_mode by checking if the ga_tag
+ * was cached. If so, we need to clean up the per-vcpu
+ * ir_list.
+ */
+ if (!ret && pi.prev_ga_tag) {
+ int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
+ struct kvm_vcpu *vcpu;
+
+ vcpu = kvm_get_vcpu_by_id(kvm, id);
+ if (vcpu)
+ svm_ir_list_del(to_svm(vcpu), &pi);
+ }
+ }
+
+ if (!ret && svm) {
+ trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
+ host_irq, e->gsi,
+ vcpu_info.vector,
+ vcpu_info.pi_desc_addr, set);
+ }
+
+ if (ret < 0) {
+ pr_err("%s: failed to update PI IRTE\n", __func__);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+ return ret;
+}
+
static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -5064,7 +5421,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.has_wbinvd_exit = svm_has_wbinvd_exit,
- .read_tsc_offset = svm_read_tsc_offset,
.write_tsc_offset = svm_write_tsc_offset,
.adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
.read_l1_tsc = svm_read_l1_tsc,
@@ -5078,6 +5434,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.pmu_ops = &amd_pmu_ops,
.deliver_posted_interrupt = svm_deliver_avic_intr,
+ .update_pi_irte = svm_update_pi_irte,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 121fdf6e9ed0..cf1b16dbc98a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -927,6 +927,8 @@ static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode;
static unsigned long *vmx_msr_bitmap_legacy_x2apic;
static unsigned long *vmx_msr_bitmap_longmode_x2apic;
+static unsigned long *vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
+static unsigned long *vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
static unsigned long *vmx_vmread_bitmap;
static unsigned long *vmx_vmwrite_bitmap;
@@ -939,6 +941,7 @@ static DEFINE_SPINLOCK(vmx_vpid_lock);
static struct vmcs_config {
int size;
int order;
+ u32 basic_cap;
u32 revision_id;
u32 pin_based_exec_ctrl;
u32 cpu_based_exec_ctrl;
@@ -1215,6 +1218,11 @@ static inline bool cpu_has_vmx_ple(void)
SECONDARY_EXEC_PAUSE_LOOP_EXITING;
}
+static inline bool cpu_has_vmx_basic_inout(void)
+{
+ return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
+}
+
static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
{
return flexpriority_enabled && lapic_in_kernel(vcpu);
@@ -2518,10 +2526,17 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
else if (cpu_has_secondary_exec_ctrls() &&
(vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
- if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
- else
- msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
+ if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) {
+ if (is_long_mode(vcpu))
+ msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
+ else
+ msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
+ } else {
+ if (is_long_mode(vcpu))
+ msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
+ else
+ msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
+ }
} else {
if (is_long_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_longmode;
@@ -2603,11 +2618,6 @@ static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
return host_tsc + tsc_offset;
}
-static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
-{
- return vmcs_read64(TSC_OFFSET);
-}
-
/*
* writes 'offset' into guest's timestamp counter offset register
*/
@@ -2877,6 +2887,8 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
*pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS |
((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
(VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
+ if (cpu_has_vmx_basic_inout())
+ *pdata |= VMX_BASIC_INOUT;
break;
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
case MSR_IA32_VMX_PINBASED_CTLS:
@@ -3457,7 +3469,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
return -EIO;
vmcs_conf->size = vmx_msr_high & 0x1fff;
- vmcs_conf->order = get_order(vmcs_config.size);
+ vmcs_conf->order = get_order(vmcs_conf->size);
+ vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
vmcs_conf->revision_id = vmx_msr_low;
vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
@@ -4678,28 +4691,49 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
msr, MSR_TYPE_R | MSR_TYPE_W);
}
-static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
+static void vmx_enable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
{
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
+ if (apicv_active) {
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
+ msr, MSR_TYPE_R);
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
+ msr, MSR_TYPE_R);
+ } else {
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
+ msr, MSR_TYPE_R);
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
+ msr, MSR_TYPE_R);
+ }
}
-static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
+static void vmx_disable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
{
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
+ if (apicv_active) {
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
+ msr, MSR_TYPE_R);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
+ msr, MSR_TYPE_R);
+ } else {
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
+ msr, MSR_TYPE_R);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
+ msr, MSR_TYPE_R);
+ }
}
-static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
+static void vmx_disable_intercept_msr_write_x2apic(u32 msr, bool apicv_active)
{
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_W);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_W);
+ if (apicv_active) {
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
+ msr, MSR_TYPE_W);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
+ msr, MSR_TYPE_W);
+ } else {
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
+ msr, MSR_TYPE_W);
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
+ msr, MSR_TYPE_W);
+ }
}
static bool vmx_get_enable_apicv(void)
@@ -5279,29 +5313,30 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (is_guest_mode(vcpu))
- return;
+ if (!is_guest_mode(vcpu)) {
+ if (!cpu_has_virtual_nmis()) {
+ /*
+ * Tracking the NMI-blocked state in software is built upon
+ * finding the next open IRQ window. This, in turn, depends on
+ * well-behaving guests: They have to keep IRQs disabled at
+ * least as long as the NMI handler runs. Otherwise we may
+ * cause NMI nesting, maybe breaking the guest. But as this is
+ * highly unlikely, we can live with the residual risk.
+ */
+ vmx->soft_vnmi_blocked = 1;
+ vmx->vnmi_blocked_time = 0;
+ }
- if (!cpu_has_virtual_nmis()) {
- /*
- * Tracking the NMI-blocked state in software is built upon
- * finding the next open IRQ window. This, in turn, depends on
- * well-behaving guests: They have to keep IRQs disabled at
- * least as long as the NMI handler runs. Otherwise we may
- * cause NMI nesting, maybe breaking the guest. But as this is
- * highly unlikely, we can live with the residual risk.
- */
- vmx->soft_vnmi_blocked = 1;
- vmx->vnmi_blocked_time = 0;
+ ++vcpu->stat.nmi_injections;
+ vmx->nmi_known_unmasked = false;
}
- ++vcpu->stat.nmi_injections;
- vmx->nmi_known_unmasked = false;
if (vmx->rmode.vm86_active) {
if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return;
}
+
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
}
@@ -6109,7 +6144,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
gla_validity = (exit_qualification >> 7) & 0x3;
- if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
+ if (gla_validity == 0x2) {
printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
(long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
@@ -6360,22 +6395,32 @@ static __init int hardware_setup(void)
if (!vmx_msr_bitmap_legacy_x2apic)
goto out2;
+ vmx_msr_bitmap_legacy_x2apic_apicv_inactive =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_legacy_x2apic_apicv_inactive)
+ goto out3;
+
vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_longmode)
- goto out3;
+ goto out4;
vmx_msr_bitmap_longmode_x2apic =
(unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_longmode_x2apic)
- goto out4;
+ goto out5;
+
+ vmx_msr_bitmap_longmode_x2apic_apicv_inactive =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_longmode_x2apic_apicv_inactive)
+ goto out6;
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmread_bitmap)
- goto out6;
+ goto out7;
vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmwrite_bitmap)
- goto out7;
+ goto out8;
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
@@ -6394,7 +6439,7 @@ static __init int hardware_setup(void)
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
- goto out8;
+ goto out9;
}
if (boot_cpu_has(X86_FEATURE_NX))
@@ -6461,20 +6506,35 @@ static __init int hardware_setup(void)
vmx_msr_bitmap_legacy, PAGE_SIZE);
memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
+ memcpy(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
+ vmx_msr_bitmap_legacy, PAGE_SIZE);
+ memcpy(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
+ vmx_msr_bitmap_longmode, PAGE_SIZE);
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+ /*
+ * enable_apicv && kvm_vcpu_apicv_active()
+ */
for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr);
+ vmx_disable_intercept_msr_read_x2apic(msr, true);
/* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839);
+ vmx_enable_intercept_msr_read_x2apic(0x839, true);
/* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808);
+ vmx_disable_intercept_msr_write_x2apic(0x808, true);
/* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b);
+ vmx_disable_intercept_msr_write_x2apic(0x80b, true);
/* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f);
+ vmx_disable_intercept_msr_write_x2apic(0x83f, true);
+
+ /*
+ * (enable_apicv && !kvm_vcpu_apicv_active()) ||
+ * !enable_apicv
+ */
+ /* TPR */
+ vmx_disable_intercept_msr_read_x2apic(0x808, false);
+ vmx_disable_intercept_msr_write_x2apic(0x808, false);
if (enable_ept) {
kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
@@ -6521,14 +6581,18 @@ static __init int hardware_setup(void)
return alloc_kvm_area();
-out8:
+out9:
free_page((unsigned long)vmx_vmwrite_bitmap);
-out7:
+out8:
free_page((unsigned long)vmx_vmread_bitmap);
+out7:
+ free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
out6:
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-out4:
+out5:
free_page((unsigned long)vmx_msr_bitmap_longmode);
+out4:
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
out3:
free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
out2:
@@ -6544,7 +6608,9 @@ out:
static __exit void hardware_unsetup(void)
{
free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
+ free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
free_page((unsigned long)vmx_msr_bitmap_legacy);
free_page((unsigned long)vmx_msr_bitmap_longmode);
free_page((unsigned long)vmx_io_bitmap_b);
@@ -6726,7 +6792,7 @@ static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
{
/* TODO: not to reset guest simply here. */
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
- pr_warn("kvm: nested vmx abort, indicator %d\n", indicator);
+ pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
}
static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
@@ -7013,7 +7079,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
vmx->nested.vmcs02_num = 0;
hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
+ HRTIMER_MODE_REL_PINNED);
vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
vmx->nested.vmxon = true;
@@ -8435,12 +8501,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
return;
}
- /*
- * There is not point to enable virtualize x2apic without enable
- * apicv
- */
- if (!cpu_has_vmx_virtualize_x2apic_mode() ||
- !kvm_vcpu_apicv_active(vcpu))
+ if (!cpu_has_vmx_virtualize_x2apic_mode())
return;
if (!cpu_need_tpr_shadow(vcpu))
@@ -9598,7 +9659,7 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
maxphyaddr = cpuid_maxphyaddr(vcpu);
if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
(addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
- pr_warn_ratelimited(
+ pr_debug_ratelimited(
"nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
addr_field, maxphyaddr, count, addr);
return -EINVAL;
@@ -9671,13 +9732,13 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
for (i = 0; i < count; i++) {
if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
&e, sizeof(e))) {
- pr_warn_ratelimited(
+ pr_debug_ratelimited(
"%s cannot read MSR entry (%u, 0x%08llx)\n",
__func__, i, gpa + i * sizeof(e));
goto fail;
}
if (nested_vmx_load_msr_check(vcpu, &e)) {
- pr_warn_ratelimited(
+ pr_debug_ratelimited(
"%s check failed (%u, 0x%x, 0x%x)\n",
__func__, i, e.index, e.reserved);
goto fail;
@@ -9685,7 +9746,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
msr.index = e.index;
msr.data = e.value;
if (kvm_set_msr(vcpu, &msr)) {
- pr_warn_ratelimited(
+ pr_debug_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, e.value);
goto fail;
@@ -9706,13 +9767,13 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
if (kvm_vcpu_read_guest(vcpu,
gpa + i * sizeof(e),
&e, 2 * sizeof(u32))) {
- pr_warn_ratelimited(
+ pr_debug_ratelimited(
"%s cannot read MSR entry (%u, 0x%08llx)\n",
__func__, i, gpa + i * sizeof(e));
return -EINVAL;
}
if (nested_vmx_store_msr_check(vcpu, &e)) {
- pr_warn_ratelimited(
+ pr_debug_ratelimited(
"%s check failed (%u, 0x%x, 0x%x)\n",
__func__, i, e.index, e.reserved);
return -EINVAL;
@@ -9720,7 +9781,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
msr_info.host_initiated = false;
msr_info.index = e.index;
if (kvm_get_msr(vcpu, &msr_info)) {
- pr_warn_ratelimited(
+ pr_debug_ratelimited(
"%s cannot read MSR (%u, 0x%x)\n",
__func__, i, e.index);
return -EINVAL;
@@ -9729,7 +9790,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
gpa + i * sizeof(e) +
offsetof(struct vmx_msr_entry, value),
&msr_info.data, sizeof(msr_info.data))) {
- pr_warn_ratelimited(
+ pr_debug_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, msr_info.data);
return -EINVAL;
@@ -10500,6 +10561,9 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
}
+ if (nested_cpu_has_ept(vmcs12))
+ vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
+
if (nested_cpu_has_vid(vmcs12))
vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
@@ -10793,7 +10857,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
* We are now running in L2, mmu_notifier will force to reload the
* page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
*/
- kvm_vcpu_reload_apic_access_page(vcpu);
+ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
/*
* Exiting from L2 to L1, we're now back to L1 which thinks it just
@@ -11274,7 +11338,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
- .read_tsc_offset = vmx_read_tsc_offset,
.write_tsc_offset = vmx_write_tsc_offset,
.adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
.read_l1_tsc = vmx_read_l1_tsc,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 699f8726539a..6c633de84dd7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1367,7 +1367,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
{
- u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
+ u64 curr_offset = vcpu->arch.tsc_offset;
vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
}
@@ -1413,6 +1413,12 @@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
+static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+ kvm_x86_ops->write_tsc_offset(vcpu, offset);
+ vcpu->arch.tsc_offset = offset;
+}
+
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
struct kvm *kvm = vcpu->kvm;
@@ -1425,7 +1431,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_compute_tsc_offset(vcpu, data);
- ns = get_kernel_ns();
+ ns = ktime_get_boot_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
if (vcpu->arch.virtual_tsc_khz) {
@@ -1522,7 +1528,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
update_ia32_tsc_adjust_msr(vcpu, offset);
- kvm_x86_ops->write_tsc_offset(vcpu, offset);
+ kvm_vcpu_write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
@@ -1716,6 +1722,88 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
#endif
}
+static u64 __get_kvmclock_ns(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
+ struct kvm_arch *ka = &kvm->arch;
+ s64 ns;
+
+ if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
+ u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
+ ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc);
+ } else {
+ ns = ktime_get_boot_ns() + ka->kvmclock_offset;
+ }
+
+ return ns;
+}
+
+u64 get_kvmclock_ns(struct kvm *kvm)
+{
+ unsigned long flags;
+ s64 ns;
+
+ local_irq_save(flags);
+ ns = __get_kvmclock_ns(kvm);
+ local_irq_restore(flags);
+
+ return ns;
+}
+
+static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
+{
+ struct kvm_vcpu_arch *vcpu = &v->arch;
+ struct pvclock_vcpu_time_info guest_hv_clock;
+
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return;
+
+ /* This VCPU is paused, but it's legal for a guest to read another
+ * VCPU's kvmclock, so we really have to follow the specification where
+ * it says that version is odd if data is being modified, and even after
+ * it is consistent.
+ *
+ * Version field updates must be kept separate. This is because
+ * kvm_write_guest_cached might use a "rep movs" instruction, and
+ * writes within a string instruction are weakly ordered. So there
+ * are three writes overall.
+ *
+ * As a small optimization, only write the version field in the first
+ * and third write. The vcpu->pv_time cache is still valid, because the
+ * version field is the first in the struct.
+ */
+ BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
+
+ vcpu->hv_clock.version = guest_hv_clock.version + 1;
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock.version));
+
+ smp_wmb();
+
+ /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
+ vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
+
+ if (vcpu->pvclock_set_guest_stopped_request) {
+ vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
+ vcpu->pvclock_set_guest_stopped_request = false;
+ }
+
+ trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
+
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
+
+ smp_wmb();
+
+ vcpu->hv_clock.version++;
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock.version));
+}
+
static int kvm_guest_time_update(struct kvm_vcpu *v)
{
unsigned long flags, tgt_tsc_khz;
@@ -1723,7 +1811,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
struct kvm_arch *ka = &v->kvm->arch;
s64 kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1752,7 +1839,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
}
if (!use_master_clock) {
host_tsc = rdtsc();
- kernel_ns = get_kernel_ns();
+ kernel_ns = ktime_get_boot_ns();
}
tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
@@ -1777,8 +1864,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->pv_time_enabled)
- return 0;
+ /* With all the info we got, fill in the values */
if (kvm_has_tsc_control)
tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
@@ -1790,64 +1876,21 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hw_tsc_khz = tgt_tsc_khz;
}
- /* With all the info we got, fill in the values */
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_guest_tsc = tsc_timestamp;
- if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
- &guest_hv_clock, sizeof(guest_hv_clock))))
- return 0;
-
- /* This VCPU is paused, but it's legal for a guest to read another
- * VCPU's kvmclock, so we really have to follow the specification where
- * it says that version is odd if data is being modified, and even after
- * it is consistent.
- *
- * Version field updates must be kept separate. This is because
- * kvm_write_guest_cached might use a "rep movs" instruction, and
- * writes within a string instruction are weakly ordered. So there
- * are three writes overall.
- *
- * As a small optimization, only write the version field in the first
- * and third write. The vcpu->pv_time cache is still valid, because the
- * version field is the first in the struct.
- */
- BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
-
- vcpu->hv_clock.version = guest_hv_clock.version + 1;
- kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
- &vcpu->hv_clock,
- sizeof(vcpu->hv_clock.version));
-
- smp_wmb();
-
- /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
-
- if (vcpu->pvclock_set_guest_stopped_request) {
- pvclock_flags |= PVCLOCK_GUEST_STOPPED;
- vcpu->pvclock_set_guest_stopped_request = false;
- }
-
/* If the host uses TSC clocksource, then it is stable */
+ pvclock_flags = 0;
if (use_master_clock)
pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
vcpu->hv_clock.flags = pvclock_flags;
- trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
-
- kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
- &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- smp_wmb();
-
- vcpu->hv_clock.version++;
- kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
- &vcpu->hv_clock,
- sizeof(vcpu->hv_clock.version));
+ if (vcpu->pv_time_enabled)
+ kvm_setup_pvclock_page(v);
+ if (v == kvm_get_vcpu(v->kvm, 0))
+ kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
return 0;
}
@@ -2746,7 +2789,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (check_tsc_unstable()) {
u64 offset = kvm_compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
- kvm_x86_ops->write_tsc_offset(vcpu, offset);
+ kvm_vcpu_write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
}
if (kvm_lapic_hv_timer_in_use(vcpu) &&
@@ -4039,7 +4082,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
case KVM_SET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
- s64 delta;
r = -EFAULT;
if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
@@ -4051,10 +4093,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = 0;
local_irq_disable();
- now_ns = get_kernel_ns();
- delta = user_ns.clock - now_ns;
+ now_ns = __get_kvmclock_ns(kvm);
+ kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
local_irq_enable();
- kvm->arch.kvmclock_offset = delta;
kvm_gen_update_masterclock(kvm);
break;
}
@@ -4062,10 +4103,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
struct kvm_clock_data user_ns;
u64 now_ns;
- local_irq_disable();
- now_ns = get_kernel_ns();
- user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
- local_irq_enable();
+ now_ns = get_kvmclock_ns(kvm);
+ user_ns.clock = now_ns;
user_ns.flags = 0;
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
@@ -6700,7 +6739,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_put_guest_xcr0(vcpu);
- /* Interrupt is enabled by handle_external_intr() */
kvm_x86_ops->handle_external_intr(vcpu);
++vcpu->stat.exits;
@@ -7530,7 +7568,7 @@ int kvm_arch_hardware_enable(void)
* before any KVM threads can be running. Unfortunately, we can't
* bring the TSCs fully up to date with real time, as we aren't yet far
* enough into CPU bringup that we know how much real time has actually
- * elapsed; our helper function, get_kernel_ns() will be using boot
+ * elapsed; our helper function, ktime_get_boot_ns() will be using boot
* variables that haven't been updated yet.
*
* So we simply find the maximum observed TSC above, then record the
@@ -7765,6 +7803,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
mutex_init(&kvm->arch.apic_map_lock);
spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
+ kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
pvclock_update_vm_gtod_copy(kvm);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index a82ca466b62e..e8ff3e4ce38a 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -148,11 +148,6 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
return kvm_register_write(vcpu, reg, val);
}
-static inline u64 get_kernel_ns(void)
-{
- return ktime_get_boot_ns();
-}
-
static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
{
return !(kvm->arch.disabled_quirks & quirk);
@@ -164,6 +159,7 @@ void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
+u64 get_kvmclock_ns(struct kvm *kvm);
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 97062a635b77..5c6fc3577a49 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -23,8 +23,6 @@ obj-y += bus_numa.o
obj-$(CONFIG_AMD_NB) += amd_bus.o
obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
-obj-$(CONFIG_VMD) += vmd.o
-
ifeq ($(CONFIG_PCI_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 7b6a9d14c8c0..a4fdfa7dcc1b 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -677,6 +677,12 @@ static void set_dma_domain_ops(struct pci_dev *pdev)
static void set_dma_domain_ops(struct pci_dev *pdev) {}
#endif
+static void set_dev_domain_options(struct pci_dev *pdev)
+{
+ if (is_vmd(pdev->bus))
+ pdev->hotplug_user_indicators = 1;
+}
+
int pcibios_add_device(struct pci_dev *dev)
{
struct setup_data *data;
@@ -707,6 +713,7 @@ int pcibios_add_device(struct pci_dev *dev)
iounmap(data);
}
set_dma_domain_ops(dev);
+ set_dev_domain_options(dev);
return 0;
}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 3a483cb5ac81..bedfab98077a 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -456,7 +456,7 @@ void __init xen_msi_init(void)
int __init pci_xen_hvm_init(void)
{
- if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
+ if (!xen_feature(XENFEAT_hvm_pirqs))
return 0;
#ifdef CONFIG_ACPI
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index f1d2182e071f..c0fdd57da7aa 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -137,8 +137,10 @@ struct shared_info xen_dummy_shared_info;
void *xen_initial_gdt;
RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
-__read_mostly int xen_have_vector_callback;
-EXPORT_SYMBOL_GPL(xen_have_vector_callback);
+
+static int xen_cpu_up_prepare(unsigned int cpu);
+static int xen_cpu_up_online(unsigned int cpu);
+static int xen_cpu_dead(unsigned int cpu);
/*
* Point at some empty memory to start with. We map the real shared_info
@@ -1519,10 +1521,7 @@ static void __init xen_pvh_early_guest_init(void)
if (!xen_feature(XENFEAT_auto_translated_physmap))
return;
- if (!xen_feature(XENFEAT_hvm_callback_vector))
- return;
-
- xen_have_vector_callback = 1;
+ BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
xen_pvh_early_cpu_init(0, false);
xen_pvh_set_cr_flags(0);
@@ -1538,6 +1537,24 @@ static void __init xen_dom0_set_legacy_features(void)
x86_platform.legacy.rtc = 1;
}
+static int xen_cpuhp_setup(void)
+{
+ int rc;
+
+ rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
+ "XEN_HVM_GUEST_PREPARE",
+ xen_cpu_up_prepare, xen_cpu_dead);
+ if (rc >= 0) {
+ rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "XEN_HVM_GUEST_ONLINE",
+ xen_cpu_up_online, NULL);
+ if (rc < 0)
+ cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
+ }
+
+ return rc >= 0 ? 0 : rc;
+}
+
/* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void)
{
@@ -1639,6 +1656,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
possible map and a non-dummy shared_info. */
per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
+ WARN_ON(xen_cpuhp_setup());
+
local_irq_disable();
early_boot_irqs_disabled = true;
@@ -1819,31 +1838,54 @@ static void __init init_hvm_pv_info(void)
xen_domain_type = XEN_HVM_DOMAIN;
}
-static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
+static int xen_cpu_up_prepare(unsigned int cpu)
{
- int cpu = (long)hcpu;
- switch (action) {
- case CPU_UP_PREPARE:
+ int rc;
+
+ if (xen_hvm_domain()) {
+ /*
+ * This can happen if CPU was offlined earlier and
+ * offlining timed out in common_cpu_die().
+ */
+ if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
+ xen_smp_intr_free(cpu);
+ xen_uninit_lock_cpu(cpu);
+ }
+
if (cpu_acpi_id(cpu) != U32_MAX)
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
else
per_cpu(xen_vcpu_id, cpu) = cpu;
xen_vcpu_setup(cpu);
- if (xen_have_vector_callback) {
- if (xen_feature(XENFEAT_hvm_safe_pvclock))
- xen_setup_timer(cpu);
- }
- break;
- default:
- break;
}
- return NOTIFY_OK;
+
+ if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
+ xen_setup_timer(cpu);
+
+ rc = xen_smp_intr_init(cpu);
+ if (rc) {
+ WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
+ cpu, rc);
+ return rc;
+ }
+ return 0;
}
-static struct notifier_block xen_hvm_cpu_notifier = {
- .notifier_call = xen_hvm_cpu_notify,
-};
+static int xen_cpu_dead(unsigned int cpu)
+{
+ xen_smp_intr_free(cpu);
+
+ if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
+ xen_teardown_timer(cpu);
+
+ return 0;
+}
+
+static int xen_cpu_up_online(unsigned int cpu)
+{
+ xen_init_lock_cpu(cpu);
+ return 0;
+}
#ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void)
@@ -1871,10 +1913,10 @@ static void __init xen_hvm_guest_init(void)
xen_panic_handler_init();
- if (xen_feature(XENFEAT_hvm_callback_vector))
- xen_have_vector_callback = 1;
+ BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
+
xen_hvm_smp_init();
- register_cpu_notifier(&xen_hvm_cpu_notifier);
+ WARN_ON(xen_cpuhp_setup());
xen_unplug_emulated_devices();
x86_init.irqs.intr_init = xen_init_IRQ;
xen_hvm_init_time_ops();
@@ -1910,7 +1952,7 @@ bool xen_hvm_need_lapic(void)
return false;
if (!xen_hvm_domain())
return false;
- if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
+ if (xen_feature(XENFEAT_hvm_pirqs))
return false;
return true;
}
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index de4144c24f1c..809b6c812654 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -89,7 +89,7 @@ void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
{
- area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
+ area->ptes = kmalloc_array(nr_frames, sizeof(*area->ptes), GFP_KERNEL);
if (area->ptes == NULL)
return -ENOMEM;
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index d37a0c7f82cb..90d1b83cf35f 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -61,7 +61,7 @@ static int check_platform_magic(void)
}
break;
default:
- printk(KERN_WARNING "Xen Platform PCI: unknown I/O protocol version");
+ printk(KERN_WARNING "Xen Platform PCI: unknown I/O protocol version\n");
return XEN_PLATFORM_ERR_PROTOCOL;
}
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 32bdc2c90297..b9fc52556bcc 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -547,8 +547,11 @@ void xen_pmu_init(int cpu)
return;
fail:
- pr_info_once("Could not initialize VPMU for cpu %d, error %d\n",
- cpu, err);
+ if (err == -EOPNOTSUPP || err == -ENOSYS)
+ pr_info_once("VPMU disabled by hypervisor.\n");
+ else
+ pr_info_once("Could not initialize VPMU for cpu %d, error %d\n",
+ cpu, err);
free_pages((unsigned long)xenpmu_data, 0);
}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 0b4d04c8ab4d..9fa27ceeecfd 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -87,6 +87,12 @@ static void cpu_bringup(void)
cpu_data(cpu).x86_max_cores = 1;
set_cpu_sibling_map(cpu);
+ /*
+ * identify_cpu() may have set logical_pkg_id to -1 due
+ * to incorrect phys_proc_id. Let's re-comupte it.
+ */
+ topology_update_package_map(apic->cpu_present_to_apicid(cpu), cpu);
+
xen_setup_cpu_clockevents();
notify_cpu_starting(cpu);
@@ -115,7 +121,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu)
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
-static void xen_smp_intr_free(unsigned int cpu)
+void xen_smp_intr_free(unsigned int cpu)
{
if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
@@ -159,7 +165,7 @@ static void xen_smp_intr_free(unsigned int cpu)
per_cpu(xen_pmu_irq, cpu).name = NULL;
}
};
-static int xen_smp_intr_init(unsigned int cpu)
+int xen_smp_intr_init(unsigned int cpu)
{
int rc;
char *resched_name, *callfunc_name, *debug_name, *pmu_name;
@@ -475,8 +481,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
common_cpu_up(cpu, idle);
xen_setup_runstate_info(cpu);
- xen_setup_timer(cpu);
- xen_init_lock_cpu(cpu);
/*
* PV VCPUs are always successfully taken down (see 'while' loop
@@ -495,10 +499,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
xen_pmu_init(cpu);
- rc = xen_smp_intr_init(cpu);
- if (rc)
- return rc;
-
rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
BUG_ON(rc);
@@ -769,47 +769,10 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
xen_init_lock_cpu(0);
}
-static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
-{
- int rc;
-
- /*
- * This can happen if CPU was offlined earlier and
- * offlining timed out in common_cpu_die().
- */
- if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
- xen_smp_intr_free(cpu);
- xen_uninit_lock_cpu(cpu);
- }
-
- /*
- * xen_smp_intr_init() needs to run before native_cpu_up()
- * so that IPI vectors are set up on the booting CPU before
- * it is marked online in native_cpu_up().
- */
- rc = xen_smp_intr_init(cpu);
- WARN_ON(rc);
- if (!rc)
- rc = native_cpu_up(cpu, tidle);
-
- /*
- * We must initialize the slowpath CPU kicker _after_ the native
- * path has executed. If we initialized it before none of the
- * unlocker IPI kicks would reach the booting CPU as the booting
- * CPU had not set itself 'online' in cpu_online_mask. That mask
- * is checked when IPIs are sent (on HVM at least).
- */
- xen_init_lock_cpu(cpu);
- return rc;
-}
-
void __init xen_hvm_smp_init(void)
{
- if (!xen_have_vector_callback)
- return;
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
- smp_ops.cpu_up = xen_hvm_cpu_up;
smp_ops.cpu_die = xen_cpu_die;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index 963d62a35c82..c5c16dc4f694 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -1,5 +1,6 @@
#ifndef _XEN_SMP_H
+#ifdef CONFIG_SMP
extern void xen_send_IPI_mask(const struct cpumask *mask,
int vector);
extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
@@ -8,6 +9,18 @@ extern void xen_send_IPI_allbutself(int vector);
extern void xen_send_IPI_all(int vector);
extern void xen_send_IPI_self(int vector);
+extern int xen_smp_intr_init(unsigned int cpu);
+extern void xen_smp_intr_free(unsigned int cpu);
+
+#else /* CONFIG_SMP */
+
+static inline int xen_smp_intr_init(unsigned int cpu)
+{
+ return 0;
+}
+static inline void xen_smp_intr_free(unsigned int cpu) {}
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_XEN_PVH
extern void xen_pvh_early_cpu_init(int cpu, bool entry);
#else
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 67356d29d74d..33d8f6a7829d 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -432,11 +432,6 @@ static void xen_hvm_setup_cpu_clockevents(void)
void __init xen_hvm_init_time_ops(void)
{
- /* vector callback is needed otherwise we cannot receive interrupts
- * on cpu > 0 and at this point we don't know how many cpus are
- * available */
- if (!xen_have_vector_callback)
- return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
"disable pv timer\n");
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 64336f666fb6..f61058617ada 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -13,16 +13,19 @@ config XTENSA
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
+ select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_EXIT_THREAD
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if !MMU
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
+ select HAVE_MEMBLOCK
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
+ select NO_BOOTMEM
select PERF_USE_VMALLOC
select VIRT_TO_BUS
help
@@ -209,7 +212,8 @@ config HOTPLUG_CPU
config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
bool "Initialize Xtensa MMU inside the Linux kernel code"
- default y
+ depends on !XTENSA_VARIANT_FSF && !XTENSA_VARIANT_DC232B
+ default y if XTENSA_VARIANT_DC233C || XTENSA_VARIANT_CUSTOM
help
Earlier version initialized the MMU in the exception vector
before jumping to _startup in head.S and had an advantage that
@@ -236,6 +240,71 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
If in doubt, say Y.
+config KSEG_PADDR
+ hex "Physical address of the KSEG mapping"
+ depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
+ default 0x00000000
+ help
+ This is the physical address where KSEG is mapped. Please refer to
+ the chosen KSEG layout help for the required address alignment.
+ Unpacked kernel image (including vectors) must be located completely
+ within KSEG.
+ Physical memory below this address is not available to linux.
+
+ If unsure, leave the default value here.
+
+config KERNEL_LOAD_ADDRESS
+ hex "Kernel load address"
+ default 0x60003000 if !MMU
+ default 0x00003000 if MMU && INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ default 0xd0003000 if MMU && !INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ help
+ This is the address where the kernel is loaded.
+ It is virtual address for MMUv2 configurations and physical address
+ for all other configurations.
+
+ If unsure, leave the default value here.
+
+config VECTORS_OFFSET
+ hex "Kernel vectors offset"
+ default 0x00003000
+ help
+ This is the offset of the kernel image from the relocatable vectors
+ base.
+
+ If unsure, leave the default value here.
+
+choice
+ prompt "KSEG layout"
+ depends on MMU
+ default XTENSA_KSEG_MMU_V2
+
+config XTENSA_KSEG_MMU_V2
+ bool "MMUv2: 128MB cached + 128MB uncached"
+ help
+ MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting
+ at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000
+ without cache.
+ KSEG_PADDR must be aligned to 128MB.
+
+config XTENSA_KSEG_256M
+ bool "256MB cached + 256MB uncached"
+ depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ help
+ TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000
+ with cache and to 0xc0000000 without cache.
+ KSEG_PADDR must be aligned to 256MB.
+
+config XTENSA_KSEG_512M
+ bool "512MB cached + 512MB uncached"
+ depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ help
+ TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000
+ with cache and to 0xc0000000 without cache.
+ KSEG_PADDR must be aligned to 256MB.
+
+endchoice
+
config HIGHMEM
bool "High Memory Support"
depends on MMU
@@ -331,7 +400,7 @@ config XTENSA_PLATFORM_XT2000
config XTENSA_PLATFORM_XTFPGA
bool "XTFPGA"
select ETHOC if ETHERNET
- select PLATFORM_WANT_DEFAULT_MEM
+ select PLATFORM_WANT_DEFAULT_MEM if !MMU
select SERIAL_CONSOLE
select XTENSA_CALIBRATE_CCOUNT
help
@@ -369,6 +438,7 @@ config USE_OF
bool "Flattened Device Tree support"
select OF
select OF_EARLY_FLATTREE
+ select OF_RESERVED_MEM
help
Include support for flattened device tree machine descriptions.
@@ -439,16 +509,9 @@ config DEFAULT_MEM_START
default 0x00000000 if MMU
default 0x60000000 if !MMU
help
- This is a fallback start address of the default memory area, it is
- used when no physical memory size is passed through DTB or through
- boot parameter from bootloader.
-
- In noMMU configuration the following parameters are derived from it:
- - kernel load address;
- - kernel entry point address;
- - relocatable vectors base address;
- - uBoot load address;
- - TASK_SIZE.
+ This is the base address of the default memory area.
+ Default memory area has platform-specific meaning, it may be used
+ for e.g. early cache initialization.
If unsure, leave the default value here.
@@ -457,11 +520,9 @@ config DEFAULT_MEM_SIZE
depends on PLATFORM_WANT_DEFAULT_MEM
default 0x04000000
help
- This is a fallback size of the default memory area, it is used when
- no physical memory size is passed through DTB or through boot
- parameter from bootloader.
-
- It's also used for TASK_SIZE calculation in noMMU configuration.
+ This is the size of the default memory area.
+ Default memory area has platform-specific meaning, it may be used
+ for e.g. early cache initialization.
If unsure, leave the default value here.
diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S
index e54f2c9df63a..a30993054e9c 100644
--- a/arch/xtensa/boot/boot-elf/boot.lds.S
+++ b/arch/xtensa/boot/boot-elf/boot.lds.S
@@ -23,7 +23,7 @@ SECTIONS
*(.ResetVector.text)
}
- .image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS)
+ .image KERNELOFFSET: AT (CONFIG_KERNEL_LOAD_ADDRESS)
{
_image_start = .;
*(image)
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
index e6bf313613cf..b6aa85328ac0 100644
--- a/arch/xtensa/boot/boot-elf/bootstrap.S
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -35,7 +35,12 @@ _ResetVector:
.align 4
RomInitAddr:
- .word LOAD_MEMORY_ADDRESS
+#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
+ XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ .word CONFIG_KERNEL_LOAD_ADDRESS
+#else
+ .word KERNELOFFSET
+#endif
RomBootParam:
.word _bootparam
_bootparam:
diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile
index 403fcf23405c..0f4c417b4196 100644
--- a/arch/xtensa/boot/boot-uboot/Makefile
+++ b/arch/xtensa/boot/boot-uboot/Makefile
@@ -4,15 +4,7 @@
# for more details.
#
-ifdef CONFIG_MMU
-ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
-UIMAGE_LOADADDR = 0x00003000
-else
-UIMAGE_LOADADDR = 0xd0003000
-endif
-else
-UIMAGE_LOADADDR = $(shell printf "0x%x" $$(( ${CONFIG_DEFAULT_MEM_START} + 0x3000 )) )
-endif
+UIMAGE_LOADADDR = $(CONFIG_KERNEL_LOAD_ADDRESS)
UIMAGE_COMPRESSION = gzip
$(obj)/../uImage: vmlinux.bin.gz FORCE
diff --git a/arch/xtensa/boot/dts/csp.dts b/arch/xtensa/boot/dts/csp.dts
new file mode 100644
index 000000000000..4082f26716b9
--- /dev/null
+++ b/arch/xtensa/boot/dts/csp.dts
@@ -0,0 +1,54 @@
+/dts-v1/;
+
+/ {
+ compatible = "cdns,xtensa-xtfpga";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&pic>;
+
+ chosen {
+ bootargs = "earlycon=cdns,0xfd000000,115200 console=tty0 console=ttyPS0,115200 root=/dev/ram0 rw earlyprintk xilinx_uartps.rx_trigger_level=32 loglevel=8 nohz=off ignore_loglevel";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ compatible = "cdns,xtensa-cpu";
+ reg = <0>;
+ };
+ };
+
+ pic: pic {
+ compatible = "cdns,xtensa-pic";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+ clocks {
+ osc: main-oscillator {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xf0000000 0x10000000>;
+
+ uart0: serial@0d000000 {
+ compatible = "xlnx,xuartps", "cdns,uart-r1p8";
+ clocks = <&osc>, <&osc>;
+ clock-names = "uart_clk", "pclk";
+ reg = <0x0d000000 0x1000>;
+ interrupts = <0 1>;
+ };
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
index cd45f9c2c448..91616a9d79df 100644
--- a/arch/xtensa/boot/dts/xtfpga.dtsi
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
@@ -19,9 +19,7 @@
cpu@0 {
compatible = "cdns,xtensa-cpu";
reg = <0>;
- /* Filled in by platform_setup from FPGA register
- * clock-frequency = <100000000>;
- */
+ clocks = <&osc>;
};
};
@@ -36,11 +34,6 @@
};
clocks {
- osc: main-oscillator {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- };
-
clk54: clk54 {
#clock-cells = <0>;
compatible = "fixed-clock";
@@ -54,6 +47,12 @@
compatible = "simple-bus";
ranges = <0x00000000 0xf0000000 0x10000000>;
+ osc: main-oscillator {
+ #clock-cells = <0>;
+ compatible = "cdns,xtfpga-clock";
+ reg = <0x0d020004 0x4>;
+ };
+
serial0: serial@0d050020 {
device_type = "serial";
compatible = "ns16550a";
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index c4904db15582..8d16925765cb 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -33,7 +33,7 @@ CONFIG_HIGHMEM=y
# CONFIG_PCI is not set
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
+CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB="kc705"
# CONFIG_COMPACTION is not set
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
new file mode 100644
index 000000000000..f2d3094aa1d1
--- /dev/null
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -0,0 +1,122 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_USELIB=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="$$KERNEL_INITRAMFS_SOURCE"
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_CUSTOM=y
+CONFIG_XTENSA_VARIANT_CUSTOM_NAME="csp"
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="csp"
+# CONFIG_COMPACTION is not set
+CONFIG_XTFPGA_LCD=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_RCU_TRACE=y
+CONFIG_FUNCTION_TRACER=y
+# CONFIG_S32C1I_SELFTEST is not set
+# CONFIG_CRYPTO_ECHAINIV is not set
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig
index 721df1214bc3..4bcc76b02109 100644
--- a/arch/xtensa/configs/common_defconfig
+++ b/arch/xtensa/configs/common_defconfig
@@ -1,204 +1,15 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.11-rc2
-# Tue Mar 1 16:36:53 2005
-#
-# CONFIG_FRAME_POINTER is not set
-CONFIG_XTENSA=y
-# CONFIG_UID16 is not set
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_HAVE_DEC_LOCK=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_BROKEN_ON_SMP=y
-
-#
-# General setup
-#
-CONFIG_LOCALVERSION=""
-CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
CONFIG_BSD_PROCESS_ACCT=y
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_HOTPLUG is not set
-CONFIG_KOBJECT_UEVENT=y
-# CONFIG_IKCONFIG is not set
-# CONFIG_EXPERT is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_SHMEM=y
-CONFIG_CC_ALIGN_FUNCTIONS=0
-CONFIG_CC_ALIGN_LABELS=0
-CONFIG_CC_ALIGN_LOOPS=0
-CONFIG_CC_ALIGN_JUMPS=0
-# CONFIG_TINY_SHMEM is not set
-
-#
-# Loadable module support
-#
CONFIG_MODULES=y
-# CONFIG_MODULE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
CONFIG_MODVERSIONS=y
-# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
-
-#
-# Processor type and features
-#
-CONFIG_XTENSA_ARCH_LINUX_BE=y
-# CONFIG_XTENSA_ARCH_LINUX_LE is not set
-# CONFIG_XTENSA_ARCH_LINUX_TEST is not set
-# CONFIG_XTENSA_ARCH_S5 is not set
-# CONFIG_XTENSA_CUSTOM is not set
-CONFIG_MMU=y
-# CONFIG_XTENSA_UNALIGNED_USER is not set
-# CONFIG_PREEMPT is not set
-# CONFIG_HIGHMEM is not set
-
-#
-# Platform options
-#
-# CONFIG_XTENSA_PLATFORM_ISS is not set
CONFIG_XTENSA_PLATFORM_XT2000=y
-CONFIG_XTENSA_CALIBRATE_CCOUNT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0,38400 ip=bootp root=nfs nfsroot=/opt/montavista/pro/devkit/xtensa/linux_be/target"
-
-#
-# Bus options
-#
-CONFIG_PCI=y
-# CONFIG_PCI_LEGACY_PROC is not set
-# CONFIG_PCI_NAMES is not set
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
-# CONFIG_PCCARD is not set
-
-#
-# PC-card bridges
-#
-
-#
-# PCI Hotplug Support
-#
-# CONFIG_HOTPLUG_PCI is not set
-
-#
-# Exectuable file formats
-#
-CONFIG_KCORE_ELF=y
-CONFIG_BINFMT_ELF=y
+CONFIG_CMDLINE="console=ttyS0,38400 ip=bootp root=nfs nfsroot=/opt/montavista/pro/devkit/xtensa/linux_be/target memmap=128M@0"
CONFIG_BINFMT_MISC=y
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
-# CONFIG_DEBUG_DRIVER is not set
-
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_CPQ_DA is not set
-# CONFIG_BLK_CPQ_CISS_DA is not set
-# CONFIG_BLK_DEV_DAC960 is not set
-# CONFIG_BLK_DEV_UMEM is not set
-# CONFIG_BLK_DEV_COW_COMMON is not set
-# CONFIG_BLK_DEV_LOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_SX8 is not set
-# CONFIG_BLK_DEV_RAM is not set
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_INITRAMFS_SOURCE=""
-# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_ATA_OVER_ETH is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-# CONFIG_SCSI is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-
-#
-# Networking support
-#
CONFIG_NET=y
-
-#
-# Networking options
-#
-# CONFIG_PACKET is not set
-# CONFIG_NETLINK_DEV is not set
CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -209,47 +20,10 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_MROUTE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_TUNNEL is not set
-# CONFIG_IP_TCPDIAG is not set
-# CONFIG_IP_TCPDIAG_IPV6 is not set
# CONFIG_IPV6 is not set
-# CONFIG_NETFILTER is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
-# CONFIG_NET_SCH_HFSC is not set
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
@@ -257,399 +31,24 @@ CONFIG_NET_SCH_TEQL=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
-# CONFIG_NET_SCH_NETEM is not set
-CONFIG_NET_SCH_INGRESS=m
-CONFIG_NET_QOS=y
-CONFIG_NET_ESTIMATOR=y
-CONFIG_NET_CLS=y
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
-CONFIG_NET_CLS_ROUTE=y
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
-# CONFIG_CLS_U32_PERF is not set
-# CONFIG_NET_CLS_IND is not set
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
-# CONFIG_NET_CLS_ACT is not set
-CONFIG_NET_CLS_POLICE=y
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-# CONFIG_MII is not set
-CONFIG_XT2000_SONIC=y
-# CONFIG_HAPPYMEAL is not set
-# CONFIG_SUNGEM is not set
# CONFIG_NET_VENDOR_3COM is not set
-
-#
-# Tulip family network device support
-#
-# CONFIG_NET_TULIP is not set
-# CONFIG_HP100 is not set
-# CONFIG_NET_PCI is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-# CONFIG_ACENIC is not set
-# CONFIG_DL2K is not set
-# CONFIG_E1000 is not set
-# CONFIG_NS83820 is not set
-# CONFIG_HAMACHI is not set
-# CONFIG_YELLOWFIN is not set
-# CONFIG_R8169 is not set
-# CONFIG_SK98LIN is not set
-# CONFIG_TIGON3 is not set
-
-#
-# Ethernet (10000 Mbit)
-#
-# CONFIG_IXGB is not set
-# CONFIG_S2IO is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-CONFIG_NET_RADIO=y
-
-#
-# Obsolete Wireless cards support (pre-802.11)
-#
-CONFIG_STRIP=m
-
-#
-# Wireless 802.11b ISA/PCI cards support
-#
-CONFIG_HERMES=m
-# CONFIG_PLX_HERMES is not set
-# CONFIG_TMD_HERMES is not set
-# CONFIG_PCI_HERMES is not set
-# CONFIG_ATMEL is not set
-
-#
-# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
-#
-# CONFIG_PRISM54 is not set
-CONFIG_NET_WIRELESS=y
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-CONFIG_SERIO=y
-# CONFIG_SERIO_I8042 is not set
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_SERIO_CT82C710 is not set
-# CONFIG_SERIO_PCIPS2 is not set
-# CONFIG_SERIO_RAW is not set
-
-#
-# Input Device Drivers
-#
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
+# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=4
-# CONFIG_SERIAL_8250_EXTENDED is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_RTC is not set
-# CONFIG_GEN_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
-#
-# Misc devices
-#
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-
-#
-# Graphics support
-#
-# CONFIG_FB is not set
-
-#
-# Console display driver support
-#
# CONFIG_VGA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# USB support
-#
-# CONFIG_USB is not set
-CONFIG_USB_ARCH_HAS_HCD=y
-CONFIG_USB_ARCH_HAS_OHCI=y
-
-#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
-#
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# MMC/SD Card support
-#
-# CONFIG_MMC is not set
-
-#
-# InfiniBand support
-#
-# CONFIG_INFINIBAND is not set
-
-#
-# File systems
-#
-# CONFIG_EXT2_FS is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_JBD is not set
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-CONFIG_DNOTIFY=y
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-# CONFIG_PROC_KCORE is not set
-CONFIG_SYSFS=y
-CONFIG_DEVFS_FS=y
-# CONFIG_DEVFS_MOUNT is not set
-# CONFIG_DEVFS_DEBUG is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-# CONFIG_TMPFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-# CONFIG_NFSD is not set
CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-# CONFIG_EXPORTFS is not set
-CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
-# CONFIG_RPCSEC_GSS_SPKM3 is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-# CONFIG_NLS is not set
-
-#
-# Kernel hacking
-#
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-# CONFIG_DEBUG_SLAB is not set
+# CONFIG_FRAME_POINTER is not set
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-# CONFIG_KGDB is not set
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-# CONFIG_CRYPTO is not set
-
-#
-# Hardware crypto devices
-#
-
-#
-# Library routines
-#
-# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC32 is not set
-# CONFIG_LIBCRC32C is not set
+CONFIG_DEBUG_KERNEL=y
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index d9444f01f4da..744adeaf2945 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -32,7 +32,7 @@ CONFIG_HIGHMEM=y
# CONFIG_PCI is not set
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
+CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB="kc705"
# CONFIG_COMPACTION is not set
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index 44c6764d9146..4bb5b76d9524 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -1,758 +1,34 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.34-rc6
-# Tue Aug 3 00:10:54 2010
-#
-# CONFIG_FRAME_POINTER is not set
-CONFIG_ZONE_DMA=y
-CONFIG_XTENSA=y
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_GENERIC_FIND_NEXT_BIT=y
-CONFIG_GENERIC_HWEIGHT=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-CONFIG_NO_IOPORT_MAP=y
-CONFIG_HZ=100
-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-CONFIG_CONSTRUCTORS=y
-
-#
-# General setup
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-CONFIG_LOCALVERSION=""
-CONFIG_LOCALVERSION_AUTO=y
-CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-CONFIG_SYSVIPC_SYSCTL=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-# CONFIG_TASKSTATS is not set
-# CONFIG_AUDIT is not set
-
-#
-# RCU Subsystem
-#
-CONFIG_TREE_RCU=y
-# CONFIG_TREE_PREEMPT_RCU is not set
-# CONFIG_TINY_RCU is not set
-# CONFIG_RCU_TRACE is not set
-CONFIG_RCU_FANOUT=32
-# CONFIG_RCU_FANOUT_EXACT is not set
-# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
-# CONFIG_SYSFS_DEPRECATED_V2 is not set
-# CONFIG_RELAY is not set
-# CONFIG_NAMESPACES is not set
-# CONFIG_BLK_DEV_INITRD is not set
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_SYSCTL=y
-CONFIG_ANON_INODES=y
CONFIG_EXPERT=y
CONFIG_SYSCTL_SYSCALL=y
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_PRINTK=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_SIGNALFD=y
-CONFIG_TIMERFD=y
-CONFIG_EVENTFD=y
-CONFIG_SHMEM=y
-CONFIG_AIO=y
-
-#
-# Kernel Performance Events And Counters
-#
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_SLUB_DEBUG=y
-CONFIG_COMPAT_BRK=y
-# CONFIG_SLAB is not set
-CONFIG_SLUB=y
-# CONFIG_SLOB is not set
-# CONFIG_PROFILING is not set
-
-#
-# GCOV-based kernel profiling
-#
-# CONFIG_SLOW_WORK is not set
-# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
-CONFIG_SLABINFO=y
-CONFIG_RT_MUTEXES=y
-CONFIG_BASE_SMALL=0
-# CONFIG_MODULES is not set
-CONFIG_BLOCK=y
-CONFIG_LBDAF=y
-CONFIG_BLK_DEV_BSG=y
-# CONFIG_BLK_DEV_INTEGRITY is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
-CONFIG_DEFAULT_NOOP=y
-CONFIG_DEFAULT_IOSCHED="noop"
-# CONFIG_INLINE_SPIN_TRYLOCK is not set
-# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
-# CONFIG_INLINE_SPIN_LOCK is not set
-# CONFIG_INLINE_SPIN_LOCK_BH is not set
-# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
-# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
-# CONFIG_UNINLINE_SPIN_UNLOCK is not set
-# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
-CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
-# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
-# CONFIG_INLINE_READ_TRYLOCK is not set
-# CONFIG_INLINE_READ_LOCK is not set
-# CONFIG_INLINE_READ_LOCK_BH is not set
-# CONFIG_INLINE_READ_LOCK_IRQ is not set
-# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
-CONFIG_INLINE_READ_UNLOCK=y
-# CONFIG_INLINE_READ_UNLOCK_BH is not set
-CONFIG_INLINE_READ_UNLOCK_IRQ=y
-# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
-# CONFIG_INLINE_WRITE_TRYLOCK is not set
-# CONFIG_INLINE_WRITE_LOCK is not set
-# CONFIG_INLINE_WRITE_LOCK_BH is not set
-# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
-# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
-CONFIG_INLINE_WRITE_UNLOCK=y
-# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
-CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
-# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
-# CONFIG_MUTEX_SPIN_ON_OWNER is not set
-# CONFIG_FREEZER is not set
-CONFIG_MMU=y
-# CONFIG_VARIANT_IRQ_SWITCH is not set
-
-#
-# Processor type and features
-#
-CONFIG_XTENSA_VARIANT_FSF=y
-# CONFIG_XTENSA_VARIANT_DC232B is not set
-# CONFIG_XTENSA_UNALIGNED_USER is not set
-# CONFIG_PREEMPT is not set
-CONFIG_XTENSA_CALIBRATE_CCOUNT=y
-CONFIG_SERIAL_CONSOLE=y
-CONFIG_XTENSA_ISS_NETWORK=y
-
-#
-# Bus options
-#
# CONFIG_PCI is not set
-# CONFIG_ARCH_SUPPORTS_MSI is not set
-
-#
-# Platform options
-#
-CONFIG_XTENSA_PLATFORM_ISS=y
-# CONFIG_XTENSA_PLATFORM_XT2000 is not set
-# CONFIG_GENERIC_CALIBRATE_DELAY is not set
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0,38400 eth0=tuntap,,tap0 ip=192.168.168.5:192.168.168.1 root=nfs nfsroot=192.168.168.1:/opt/montavista/pro/devkit/xtensa/linux_be/target"
-CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_FLATMEM_MANUAL=y
-# CONFIG_DISCONTIGMEM_MANUAL is not set
-# CONFIG_SPARSEMEM_MANUAL is not set
-CONFIG_FLATMEM=y
-CONFIG_FLAT_NODE_MEM_MAP=y
-CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_PHYS_ADDR_T_64BIT is not set
-CONFIG_ZONE_DMA_FLAG=1
-CONFIG_BOUNCE=y
-CONFIG_VIRT_TO_BUS=y
-# CONFIG_KSM is not set
-CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
-
-#
-# Executable file formats
-#
-CONFIG_KCORE_ELF=y
-CONFIG_BINFMT_ELF=y
+CONFIG_CMDLINE="console=ttyS0,38400 eth0=tuntap,,tap0 ip=192.168.168.5:192.168.168.1 root=nfs nfsroot=192.168.168.1:/opt/montavista/pro/devkit/xtensa/linux_be/target memmap=128M@0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-# CONFIG_HAVE_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
CONFIG_NET=y
-
-#
-# Networking options
-#
CONFIG_PACKET=y
CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-# CONFIG_XFRM_MIGRATE is not set
-# CONFIG_XFRM_STATISTICS is not set
-# CONFIG_NET_KEY is not set
CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_XFRM_TUNNEL is not set
-# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
-CONFIG_INET_LRO=y
-CONFIG_INET_DIAG=y
-CONFIG_INET_TCP_DIAG=y
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_CUBIC=y
-CONFIG_DEFAULT_TCP_CONG="cubic"
-# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
-# CONFIG_NETWORK_SECMARK is not set
-# CONFIG_NETFILTER is not set
-# CONFIG_IP_DCCP is not set
-# CONFIG_IP_SCTP is not set
-# CONFIG_RDS is not set
-# CONFIG_TIPC is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_NET_DSA is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_PHONET is not set
-# CONFIG_IEEE802154 is not set
-# CONFIG_NET_SCHED is not set
-# CONFIG_DCB is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_CAN is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-# CONFIG_AF_RXRPC is not set
-CONFIG_WIRELESS=y
-# CONFIG_CFG80211 is not set
-# CONFIG_LIB80211 is not set
-
-#
-# CFG80211 needs to be enabled for MAC80211
-#
-# CONFIG_WIMAX is not set
-# CONFIG_RFKILL is not set
-# CONFIG_NET_9P is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
# CONFIG_STANDALONE is not set
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_DEBUG_DRIVER is not set
-# CONFIG_DEBUG_DEVRES is not set
-# CONFIG_SYS_HYPERVISOR is not set
-# CONFIG_CONNECTOR is not set
-# CONFIG_MTD is not set
-# CONFIG_PARPORT is not set
-CONFIG_BLK_DEV=y
-# CONFIG_BLK_DEV_COW_COMMON is not set
-# CONFIG_BLK_DEV_LOOP is not set
-
-#
-# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
-#
-# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_RAM is not set
-# CONFIG_CDROM_PKTCDVD is not set
-# CONFIG_ATA_OVER_ETH is not set
-# CONFIG_BLK_DEV_HD is not set
-CONFIG_MISC_DEVICES=y
-# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_C2PORT is not set
-
-#
-# EEPROM support
-#
-# CONFIG_EEPROM_93CX6 is not set
-# CONFIG_HAVE_IDE is not set
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-CONFIG_SCSI_MOD=y
-# CONFIG_RAID_ATTRS is not set
-# CONFIG_SCSI is not set
-# CONFIG_SCSI_DMA is not set
-# CONFIG_SCSI_NETLINK is not set
-# CONFIG_ATA is not set
-# CONFIG_MD is not set
-# CONFIG_NETDEVICES is not set
-# CONFIG_ISDN is not set
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-# CONFIG_INPUT_FF_MEMLESS is not set
-# CONFIG_INPUT_POLLDEV is not set
-# CONFIG_INPUT_SPARSEKMAP is not set
-
-#
-# Userland interfaces
-#
# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input Device Drivers
-#
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TABLET is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Hardware I/O ports
-#
# CONFIG_SERIO is not set
-# CONFIG_GAMEPORT is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_CONSOLE_TRANSLATIONS=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_VT_HW_CONSOLE_BINDING is not set
-CONFIG_DEVKMEM=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-# CONFIG_SERIAL_8250 is not set
-
-#
-# Non-8250 serial port support
-#
-# CONFIG_SERIAL_TIMBERDALE is not set
-CONFIG_UNIX98_PTYS=y
-# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-# CONFIG_IPMI_HANDLER is not set
-CONFIG_HW_RANDOM=y
-# CONFIG_HW_RANDOM_TIMERIOMEM is not set
-# CONFIG_RTC is not set
-# CONFIG_GEN_RTC is not set
-# CONFIG_R3964 is not set
-# CONFIG_RAW_DRIVER is not set
-# CONFIG_TCG_TPM is not set
-# CONFIG_I2C is not set
-# CONFIG_SPI is not set
-
-#
-# PPS support
-#
-# CONFIG_PPS is not set
-# CONFIG_W1 is not set
-# CONFIG_POWER_SUPPLY is not set
-CONFIG_HWMON=y
-# CONFIG_HWMON_VID is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
-
-#
-# Native drivers
-#
-# CONFIG_SENSORS_F71805F is not set
-# CONFIG_SENSORS_F71882FG is not set
-# CONFIG_SENSORS_IT87 is not set
-# CONFIG_SENSORS_PC87360 is not set
-# CONFIG_SENSORS_PC87427 is not set
-# CONFIG_SENSORS_SHT15 is not set
-# CONFIG_SENSORS_SMSC47M1 is not set
-# CONFIG_SENSORS_SMSC47B397 is not set
-# CONFIG_SENSORS_VT1211 is not set
-# CONFIG_SENSORS_W83627HF is not set
-# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_THERMAL is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
-
-#
-# Watchdog Device Drivers
-#
CONFIG_SOFT_WATCHDOG=y
-CONFIG_SSB_POSSIBLE=y
-
-#
-# Sonics Silicon Backplane
-#
-# CONFIG_SSB is not set
-
-#
-# Multifunction device drivers
-#
-# CONFIG_MFD_CORE is not set
-# CONFIG_MFD_SM501 is not set
-# CONFIG_HTC_PASIC3 is not set
-# CONFIG_MFD_TMIO is not set
-# CONFIG_REGULATOR is not set
-# CONFIG_MEDIA_SUPPORT is not set
-
-#
-# Graphics support
-#
-# CONFIG_VGASTATE is not set
-# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Console display driver support
-#
# CONFIG_VGA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-# CONFIG_SOUND is not set
-CONFIG_HID_SUPPORT=y
-CONFIG_HID=y
-# CONFIG_HIDRAW is not set
-# CONFIG_HID_PID is not set
-
-#
-# Special HID drivers
-#
-CONFIG_USB_SUPPORT=y
-# CONFIG_USB_ARCH_HAS_HCD is not set
-# CONFIG_USB_ARCH_HAS_OHCI is not set
-# CONFIG_USB_ARCH_HAS_EHCI is not set
-# CONFIG_USB_OTG_WHITELIST is not set
-# CONFIG_USB_OTG_BLACKLIST_HUB is not set
-
-#
-# Enable Host or Gadget support to see Inventra options
-#
-
-#
-# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# OTG and related infrastructure
-#
-# CONFIG_MMC is not set
-# CONFIG_MEMSTICK is not set
-# CONFIG_NEW_LEDS is not set
-# CONFIG_ACCESSIBILITY is not set
-# CONFIG_RTC_CLASS is not set
-# CONFIG_DMADEVICES is not set
-# CONFIG_AUXDISPLAY is not set
-# CONFIG_UIO is not set
-
-#
-# TI VLYNQ
-#
-# CONFIG_STAGING is not set
-
-#
-# File systems
-#
-# CONFIG_EXT2_FS is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4_FS is not set
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_FS_POSIX_ACL is not set
-# CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
-# CONFIG_OCFS2_FS is not set
-# CONFIG_BTRFS_FS is not set
-# CONFIG_NILFS2_FS is not set
-CONFIG_FILE_LOCKING=y
-CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
-# CONFIG_INOTIFY is not set
-CONFIG_INOTIFY_USER=y
-# CONFIG_QUOTA is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
-
-#
-# Caches
-#
-# CONFIG_FSCACHE is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_PROC_PAGE_MONITOR=y
-CONFIG_SYSFS=y
CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
-# CONFIG_HUGETLB_PAGE is not set
-# CONFIG_CONFIGFS_FS is not set
-CONFIG_MISC_FILESYSTEMS=y
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_LOGFS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_SQUASHFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_OMFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-CONFIG_NETWORK_FILESYSTEMS=y
-# CONFIG_NFS_FS is not set
-# CONFIG_NFSD is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CEPH_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_NLS is not set
-# CONFIG_DLM is not set
-
-#
-# Kernel hacking
-#
-# CONFIG_PRINTK_TIME is not set
-CONFIG_ENABLE_WARN_DEPRECATED=y
-CONFIG_ENABLE_MUST_CHECK=y
-CONFIG_FRAME_WARN=1024
-# CONFIG_MAGIC_SYSRQ is not set
-# CONFIG_STRIP_ASM_SYMS is not set
-# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
-# CONFIG_HEADERS_CHECK is not set
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SHIRQ is not set
-CONFIG_DETECT_SOFTLOCKUP=y
-# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+# CONFIG_FRAME_POINTER is not set
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
-CONFIG_SCHED_DEBUG=y
-# CONFIG_SCHEDSTATS is not set
-# CONFIG_TIMER_STATS is not set
-# CONFIG_DEBUG_OBJECTS is not set
-# CONFIG_SLUB_DEBUG_ON is not set
-# CONFIG_SLUB_STATS is not set
-# CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_MUTEXES is not set
-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_VM is not set
-# CONFIG_DEBUG_MEMORY_INIT is not set
-# CONFIG_DEBUG_LIST is not set
-# CONFIG_DEBUG_SG is not set
-# CONFIG_DEBUG_NOTIFIERS is not set
-# CONFIG_DEBUG_CREDENTIALS is not set
-# CONFIG_RCU_TORTURE_TEST is not set
-CONFIG_RCU_CPU_STALL_DETECTOR=y
-# CONFIG_BACKTRACE_SELF_TEST is not set
-# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
-# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
-# CONFIG_FAULT_INJECTION is not set
-# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-# CONFIG_PAGE_POISONING is not set
-# CONFIG_SAMPLES is not set
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-# CONFIG_SECURITYFS is not set
-# CONFIG_DEFAULT_SECURITY_SELINUX is not set
-# CONFIG_DEFAULT_SECURITY_SMACK is not set
-# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
-CONFIG_DEFAULT_SECURITY_DAC=y
-CONFIG_DEFAULT_SECURITY=""
-CONFIG_CRYPTO=y
-
-#
-# Crypto core or helper
-#
-# CONFIG_CRYPTO_FIPS is not set
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_ALGAPI2=y
-CONFIG_CRYPTO_RNG=y
-CONFIG_CRYPTO_RNG2=y
-# CONFIG_CRYPTO_MANAGER is not set
-# CONFIG_CRYPTO_MANAGER2 is not set
-# CONFIG_CRYPTO_GF128MUL is not set
-# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-# CONFIG_CRYPTO_AUTHENC is not set
-
-#
-# Authenticated Encryption with Associated Data
-#
-# CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_SEQIV is not set
-
-#
-# Block modes
-#
-# CONFIG_CRYPTO_CBC is not set
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_CTS is not set
-# CONFIG_CRYPTO_ECB is not set
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_PCBC is not set
-# CONFIG_CRYPTO_XTS is not set
-
-#
-# Hash modes
-#
-# CONFIG_CRYPTO_HMAC is not set
-# CONFIG_CRYPTO_XCBC is not set
-# CONFIG_CRYPTO_VMAC is not set
-
-#
-# Digest
-#
-# CONFIG_CRYPTO_CRC32C is not set
-# CONFIG_CRYPTO_GHASH is not set
-# CONFIG_CRYPTO_MD4 is not set
-# CONFIG_CRYPTO_MD5 is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_RMD128 is not set
-# CONFIG_CRYPTO_RMD160 is not set
-# CONFIG_CRYPTO_RMD256 is not set
-# CONFIG_CRYPTO_RMD320 is not set
-# CONFIG_CRYPTO_SHA1 is not set
-# CONFIG_CRYPTO_SHA256 is not set
-# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_TGR192 is not set
-# CONFIG_CRYPTO_WP512 is not set
-
-#
-# Ciphers
-#
-CONFIG_CRYPTO_AES=y
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_ARC4 is not set
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_CAMELLIA is not set
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_DES is not set
-# CONFIG_CRYPTO_FCRYPT is not set
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_SALSA20 is not set
-# CONFIG_CRYPTO_SEED is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-
-#
-# Compression
-#
-# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_ZLIB is not set
-# CONFIG_CRYPTO_LZO is not set
-
-#
-# Random Number Generation
-#
CONFIG_CRYPTO_ANSI_CPRNG=y
-CONFIG_CRYPTO_HW=y
-# CONFIG_BINARY_PRINTF is not set
-
-#
-# Library routines
-#
-CONFIG_GENERIC_FIND_LAST_BIT=y
-# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC16 is not set
-# CONFIG_CRC_T10DIF is not set
-# CONFIG_CRC_ITU_T is not set
-# CONFIG_CRC32 is not set
-# CONFIG_CRC7 is not set
-# CONFIG_LIBCRC32C is not set
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_DMA=y
-CONFIG_NLATTR=y
-CONFIG_LD_NO_RELAX=y
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index 337d5ba2d285..78c2529d0459 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -37,7 +37,7 @@ CONFIG_PREEMPT=y
# CONFIG_PCI is not set
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="earlycon=uart8250,mmio32,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
+CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB="kc705_nommu"
CONFIG_DEFAULT_MEM_SIZE=0x10000000
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 61f943c95619..14e3ca353ac8 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -36,7 +36,7 @@ CONFIG_HOTPLUG_CPU=y
# CONFIG_PCI is not set
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
+CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB="lx200mx"
# CONFIG_COMPACTION is not set
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index 3f44fa2a53e9..d3490189792b 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -48,7 +48,7 @@ static inline int ffz(unsigned long x)
* __ffs: Find first bit set in word. Return 0 for bit 0
*/
-static inline int __ffs(unsigned long x)
+static inline unsigned long __ffs(unsigned long x)
{
return 31 - __cntlz(x & -x);
}
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
index e0f9e1109c83..2041abb10a23 100644
--- a/arch/xtensa/include/asm/cacheasm.h
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -69,26 +69,23 @@
.endm
-#if XCHAL_DCACHE_LINE_LOCKABLE
-
.macro ___unlock_dcache_all ar at
-#if XCHAL_DCACHE_SIZE
+#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
#endif
.endm
-#endif
-
-#if XCHAL_ICACHE_LINE_LOCKABLE
.macro ___unlock_icache_all ar at
+#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
+#endif
.endm
-#endif
+
.macro ___flush_invalidate_dcache_all ar at
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index 62b507deea9d..0d30403b6c95 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -59,6 +59,11 @@ enum fixed_addresses {
*/
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
+ /* Check if this memory layout is broken because fixmap overlaps page
+ * table.
+ */
+ BUILD_BUG_ON(FIXADDR_START <
+ XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
return __fix_to_virt(idx);
}
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 01cef6b40829..6e070db1022e 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -68,6 +68,11 @@ void kunmap_high(struct page *page);
static inline void *kmap(struct page *page)
{
+ /* Check if this memory layout is broken because PKMAP overlaps
+ * page table.
+ */
+ BUILD_BUG_ON(PKMAP_BASE <
+ XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return page_address(page);
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 7a1e075969a3..42410f253597 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -77,13 +77,16 @@
.align 4
1: movi a2, 0x10000000
- movi a3, 0x18000000
- add a2, a2, a0
-9: bgeu a2, a3, 9b /* PC is out of the expected range */
+
+#if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
+#define TEMP_MAPPING_VADDR 0x40000000
+#else
+#define TEMP_MAPPING_VADDR 0x00000000
+#endif
/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
- movi a2, 0x40000000 | XCHAL_SPANNING_WAY
+ movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
idtlb a2
iitlb a2
isync
@@ -95,14 +98,14 @@
srli a3, a0, 27
slli a3, a3, 27
addi a3, a3, CA_BYPASS
- addi a7, a2, -1
+ addi a7, a2, 5 - XCHAL_SPANNING_WAY
wdtlb a3, a7
witlb a3, a7
isync
slli a4, a0, 5
srli a4, a4, 5
- addi a5, a2, -6
+ addi a5, a2, -XCHAL_SPANNING_WAY
add a4, a4, a5
jx a4
@@ -116,35 +119,48 @@
add a5, a5, a4
bne a5, a2, 3b
- /* Step 4: Setup MMU with the old V2 mappings. */
+ /* Step 4: Setup MMU with the requested static mappings. */
+
movi a6, 0x01000000
wsr a6, ITLBCFG
wsr a6, DTLBCFG
isync
- movi a5, 0xd0000005
- movi a4, CA_WRITEBACK
+ movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
wdtlb a4, a5
witlb a4, a5
- movi a5, 0xd8000005
- movi a4, CA_BYPASS
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
wdtlb a4, a5
witlb a4, a5
- movi a5, XCHAL_KIO_CACHED_VADDR + 6
+#ifdef CONFIG_XTENSA_KSEG_512M
+ movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+#endif
+
+ movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
wdtlb a4, a5
witlb a4, a5
- movi a5, XCHAL_KIO_BYPASS_VADDR + 6
+ movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
wdtlb a4, a5
witlb a4, a5
isync
- /* Jump to self, using MMU v2 mappings. */
+ /* Jump to self, using final mappings. */
movi a4, 1f
jx a4
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
new file mode 100644
index 000000000000..561f8729bcde
--- /dev/null
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -0,0 +1,74 @@
+/*
+ * Kernel virtual memory layout definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_KMEM_LAYOUT_H
+#define _XTENSA_KMEM_LAYOUT_H
+
+#include <asm/types.h>
+
+#ifdef CONFIG_MMU
+
+/*
+ * Fixed TLB translations in the processor.
+ */
+
+#define XCHAL_PAGE_TABLE_VADDR __XTENSA_UL_CONST(0x80000000)
+#define XCHAL_PAGE_TABLE_SIZE __XTENSA_UL_CONST(0x00400000)
+
+#if defined(CONFIG_XTENSA_KSEG_MMU_V2)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_TLB_WAY 5
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_256M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xb0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_512M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xa0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x20000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#else
+#error Unsupported KSEG configuration
+#endif
+
+#ifdef CONFIG_KSEG_PADDR
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(CONFIG_KSEG_PADDR)
+#else
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
+#endif
+
+#if XCHAL_KSEG_PADDR & (XCHAL_KSEG_ALIGNMENT - 1)
+#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT
+#endif
+
+#else
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
+
+#endif
+
+#endif
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index ad38500471fa..976b1d70edbc 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -15,15 +15,7 @@
#include <asm/types.h>
#include <asm/cache.h>
#include <platform/hardware.h>
-
-/*
- * Fixed TLB translations in the processor.
- */
-
-#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
-#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
-#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
-#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
+#include <asm/kmem_layout.h>
/*
* PAGE_SHIFT determines the page size
@@ -35,10 +27,13 @@
#ifdef CONFIG_MMU
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
-#define MAX_MEM_PFN XCHAL_KSEG_SIZE
+#define PHYS_OFFSET XCHAL_KSEG_PADDR
+#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
+ PHYS_PFN(XCHAL_KSEG_SIZE))
#else
-#define PAGE_OFFSET __XTENSA_UL_CONST(0)
-#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
+#define PAGE_OFFSET PLATFORM_DEFAULT_MEM_START
+#define PHYS_OFFSET PLATFORM_DEFAULT_MEM_START
+#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
#endif
#define PGTABLE_START 0x80000000
@@ -167,10 +162,12 @@ void copy_user_highpage(struct page *to, struct page *from,
* addresses.
*/
-#define ARCH_PFN_OFFSET (PLATFORM_DEFAULT_MEM_START >> PAGE_SHIFT)
+#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
-#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
-#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
+#define __pa(x) \
+ ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __va(x) \
+ ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
#define pfn_valid(pfn) \
((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index fb02fdc5ecee..8aa0e0d9cbb2 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -13,6 +13,7 @@
#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
+#include <asm/kmem_layout.h>
/*
* We only use two ring levels, user and kernel space.
@@ -68,9 +69,9 @@
* Virtual memory area. We keep a distance to other memory regions to be
* on the safe side. We also use this area for cache aliasing.
*/
-#define VMALLOC_START 0xC0000000
-#define VMALLOC_END 0xC7FEFFFF
-#define TLBTEMP_BASE_1 0xC7FF0000
+#define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000)
+#define VMALLOC_END (VMALLOC_START + 0x07FEFFFF)
+#define TLBTEMP_BASE_1 (VMALLOC_END + 1)
#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index 32e98f27ce97..f8fbef67bc5f 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -69,4 +69,10 @@ extern int platform_pcibios_fixup (void);
*/
extern void platform_calibrate_ccount (void);
+/*
+ * Flush and reset the mmu, simulate a processor reset, and
+ * jump to the reset vector.
+ */
+void cpu_reset(void) __attribute__((noreturn));
+
#endif /* _XTENSA_PLATFORM_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index d2e40d39c615..b42d68bfe3cf 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -37,7 +37,7 @@
#ifdef CONFIG_MMU
#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
#else
-#define TASK_SIZE (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
+#define TASK_SIZE __XTENSA_UL_CONST(0xffffffff)
#endif
#define STACK_TOP TASK_SIZE
diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h
index c015c5c8e3f7..552cdfd8590e 100644
--- a/arch/xtensa/include/asm/sysmem.h
+++ b/arch/xtensa/include/asm/sysmem.h
@@ -11,27 +11,8 @@
#ifndef _XTENSA_SYSMEM_H
#define _XTENSA_SYSMEM_H
-#define SYSMEM_BANKS_MAX 31
+#include <linux/memblock.h>
-struct meminfo {
- unsigned long start;
- unsigned long end;
-};
-
-/*
- * Bank array is sorted by .start.
- * Banks don't overlap and there's at least one page gap
- * between adjacent bank entries.
- */
-struct sysmem_info {
- int nr_banks;
- struct meminfo bank[SYSMEM_BANKS_MAX];
-};
-
-extern struct sysmem_info sysmem;
-
-int add_sysmem_bank(unsigned long start, unsigned long end);
-int mem_reserve(unsigned long, unsigned long, int);
void bootmem_init(void);
void zones_init(void);
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 288c776736d3..77d41cc7a688 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -20,6 +20,7 @@
#include <variant/core.h>
#include <platform/hardware.h>
+#include <asm/kmem_layout.h>
#if XCHAL_HAVE_PTP_MMU
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
@@ -47,61 +48,42 @@ static inline unsigned long xtensa_get_kio_paddr(void)
#if defined(CONFIG_MMU)
-/* Will Become VECBASE */
-#define VIRTUAL_MEMORY_ADDRESS 0xD0000000
-
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
/* Image Virtual Start Address */
-#define KERNELOFFSET 0xD0003000
-
-#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
- /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */
- #define LOAD_MEMORY_ADDRESS 0x00003000
+#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
+ CONFIG_KERNEL_LOAD_ADDRESS - \
+ XCHAL_KSEG_PADDR)
#else
- /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */
- #define LOAD_MEMORY_ADDRESS 0xD0003000
+#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
#endif
-#define RESET_VECTOR1_VADDR (VIRTUAL_MEMORY_ADDRESS + \
- XCHAL_RESET_VECTOR1_PADDR)
-
#else /* !defined(CONFIG_MMU) */
/* MMU Not being used - Virtual == Physical */
- /* VECBASE */
- #define VIRTUAL_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x2000)
+/* Location of the start of the kernel text, _start */
+#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
- /* Location of the start of the kernel text, _start */
- #define KERNELOFFSET (PLATFORM_DEFAULT_MEM_START + 0x3000)
-
- /* Loaded just above possibly live vectors */
- #define LOAD_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x3000)
-
-#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
#endif /* CONFIG_MMU */
-#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset)
-
-/* Used to set VECBASE register */
-#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
+#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
+#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)
#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
-#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS)
-#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS)
-#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
-#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
-#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
-#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
-#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
-#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
-#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
-
-#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS)
+#define VECTOR_VADDR(offset) (VECBASE_VADDR + offset)
-#define NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS)
-
-#define INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
+#define USER_VECTOR_VADDR VECTOR_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR VECTOR_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR VECTOR_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR VECTOR_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL6_VECOFS)
+#define INTLEVEL7_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL7_VECOFS)
+#define DEBUG_VECTOR_VADDR VECTOR_VADDR(XCHAL_DEBUG_VECOFS)
/*
* These XCHAL_* #defines from varian/core.h
@@ -109,7 +91,6 @@ static inline unsigned long xtensa_get_kio_paddr(void)
* constants are defined above and should be used.
*/
#undef XCHAL_VECBASE_RESET_VADDR
-#undef XCHAL_RESET_VECTOR0_VADDR
#undef XCHAL_USER_VECTOR_VADDR
#undef XCHAL_KERNEL_VECTOR_VADDR
#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
@@ -119,9 +100,8 @@ static inline unsigned long xtensa_get_kio_paddr(void)
#undef XCHAL_INTLEVEL4_VECTOR_VADDR
#undef XCHAL_INTLEVEL5_VECTOR_VADDR
#undef XCHAL_INTLEVEL6_VECTOR_VADDR
-#undef XCHAL_DEBUG_VECTOR_VADDR
-#undef XCHAL_NMI_VECTOR_VADDR
#undef XCHAL_INTLEVEL7_VECTOR_VADDR
+#undef XCHAL_DEBUG_VECTOR_VADDR
#else
@@ -134,6 +114,7 @@ static inline unsigned long xtensa_get_kio_paddr(void)
#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define INTLEVEL7_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
#endif
diff --git a/arch/xtensa/include/uapi/asm/types.h b/arch/xtensa/include/uapi/asm/types.h
index 87ec7ae73cb1..2efc921506c4 100644
--- a/arch/xtensa/include/uapi/asm/types.h
+++ b/arch/xtensa/include/uapi/asm/types.h
@@ -18,7 +18,8 @@
# define __XTENSA_UL_CONST(x) x
#else
# define __XTENSA_UL(x) ((unsigned long)(x))
-# define __XTENSA_UL_CONST(x) x##UL
+# define ___XTENSA_UL_CONST(x) x##UL
+# define __XTENSA_UL_CONST(x) ___XTENSA_UL_CONST(x)
#endif
#ifndef __ASSEMBLY__
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index b95c30594355..de9b14b2d348 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -754,7 +754,20 @@ __SYSCALL(340, sys_bpf, 3)
#define __NR_execveat 341
__SYSCALL(341, sys_execveat, 5)
-#define __NR_syscall_count 342
+#define __NR_userfaultfd 342
+__SYSCALL(342, sys_userfaultfd, 1)
+#define __NR_membarrier 343
+__SYSCALL(343, sys_membarrier, 2)
+#define __NR_mlock2 344
+__SYSCALL(344, sys_mlock2, 3)
+#define __NR_copy_file_range 345
+__SYSCALL(345, sys_copy_file_range, 6)
+#define __NR_preadv2 346
+__SYSCALL(346, sys_preadv2, 6)
+#define __NR_pwritev2 347
+__SYSCALL(347, sys_pwritev2, 6)
+
+#define __NR_syscall_count 348
/*
* sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index fe8f7e7efb9d..fa04d9d368a7 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1632,10 +1632,11 @@ ENTRY(fast_second_level_miss)
* The messy computation for 'pteval' above really simplifies
* into the following:
*
- * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
+ * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
+ * | PAGE_DIRECTORY
*/
- movi a1, (-PAGE_OFFSET) & 0xffffffff
+ movi a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
add a0, a0, a1 # pmdval - PAGE_OFFSET
extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
xor a0, a0, a1
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index bc4f4bf05099..23ce62e60435 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -113,7 +113,7 @@ ENTRY(_startup)
movi a0, 0
#if XCHAL_HAVE_VECBASE
- movi a2, VECBASE_RESET_VADDR
+ movi a2, VECBASE_VADDR
wsr a2, vecbase
#endif
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 143251ede897..88a044af7504 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -7,6 +7,7 @@
*
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
@@ -22,7 +23,6 @@
#include <linux/bootmem.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
-#include <linux/clk-provider.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@@ -114,7 +114,7 @@ static int __init parse_tag_mem(const bp_tag_t *tag)
if (mi->type != MEMORY_TYPE_CONVENTIONAL)
return -1;
- return add_sysmem_bank(mi->start, mi->end);
+ return memblock_add(mi->start, mi->end - mi->start);
}
__tagtable(BP_TAG_MEMORY, parse_tag_mem);
@@ -188,7 +188,6 @@ static int __init parse_bootparam(const bp_tag_t* tag)
}
#ifdef CONFIG_OF
-bool __initdata dt_memory_scan = false;
#if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
@@ -228,11 +227,8 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
- if (!dt_memory_scan)
- return;
-
size &= PAGE_MASK;
- add_sysmem_bank(base, base + size);
+ memblock_add(base, size);
}
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
@@ -242,9 +238,6 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
void __init early_init_devtree(void *params)
{
- if (sysmem.nr_banks == 0)
- dt_memory_scan = true;
-
early_init_dt_scan(params);
of_scan_flat_dt(xtensa_dt_io_area, NULL);
@@ -252,14 +245,6 @@ void __init early_init_devtree(void *params)
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
}
-static int __init xtensa_device_probe(void)
-{
- of_clk_init(NULL);
- return 0;
-}
-
-device_initcall(xtensa_device_probe);
-
#endif /* CONFIG_OF */
/*
@@ -277,12 +262,6 @@ void __init init_arch(bp_tag_t *bp_start)
early_init_devtree(dtb_start);
#endif
- if (sysmem.nr_banks == 0) {
- add_sysmem_bank(PLATFORM_DEFAULT_MEM_START,
- PLATFORM_DEFAULT_MEM_START +
- PLATFORM_DEFAULT_MEM_SIZE);
- }
-
#ifdef CONFIG_CMDLINE_BOOL
if (!command_line[0])
strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
@@ -452,6 +431,10 @@ static int __init check_s32c1i(void)
early_initcall(check_s32c1i);
#endif /* CONFIG_S32C1I_SELFTEST */
+static inline int mem_reserve(unsigned long start, unsigned long end)
+{
+ return memblock_reserve(start, end - start);
+}
void __init setup_arch(char **cmdline_p)
{
@@ -463,54 +446,54 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start < initrd_end) {
initrd_is_mapped = mem_reserve(__pa(initrd_start),
- __pa(initrd_end), 0) == 0;
+ __pa(initrd_end)) == 0;
initrd_below_start_ok = 1;
} else {
initrd_start = 0;
}
#endif
- mem_reserve(__pa(&_stext),__pa(&_end), 1);
+ mem_reserve(__pa(&_stext), __pa(&_end));
mem_reserve(__pa(&_WindowVectors_text_start),
- __pa(&_WindowVectors_text_end), 0);
+ __pa(&_WindowVectors_text_end));
mem_reserve(__pa(&_DebugInterruptVector_literal_start),
- __pa(&_DebugInterruptVector_text_end), 0);
+ __pa(&_DebugInterruptVector_text_end));
mem_reserve(__pa(&_KernelExceptionVector_literal_start),
- __pa(&_KernelExceptionVector_text_end), 0);
+ __pa(&_KernelExceptionVector_text_end));
mem_reserve(__pa(&_UserExceptionVector_literal_start),
- __pa(&_UserExceptionVector_text_end), 0);
+ __pa(&_UserExceptionVector_text_end));
mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
- __pa(&_DoubleExceptionVector_text_end), 0);
+ __pa(&_DoubleExceptionVector_text_end));
#if XCHAL_EXCM_LEVEL >= 2
mem_reserve(__pa(&_Level2InterruptVector_text_start),
- __pa(&_Level2InterruptVector_text_end), 0);
+ __pa(&_Level2InterruptVector_text_end));
#endif
#if XCHAL_EXCM_LEVEL >= 3
mem_reserve(__pa(&_Level3InterruptVector_text_start),
- __pa(&_Level3InterruptVector_text_end), 0);
+ __pa(&_Level3InterruptVector_text_end));
#endif
#if XCHAL_EXCM_LEVEL >= 4
mem_reserve(__pa(&_Level4InterruptVector_text_start),
- __pa(&_Level4InterruptVector_text_end), 0);
+ __pa(&_Level4InterruptVector_text_end));
#endif
#if XCHAL_EXCM_LEVEL >= 5
mem_reserve(__pa(&_Level5InterruptVector_text_start),
- __pa(&_Level5InterruptVector_text_end), 0);
+ __pa(&_Level5InterruptVector_text_end));
#endif
#if XCHAL_EXCM_LEVEL >= 6
mem_reserve(__pa(&_Level6InterruptVector_text_start),
- __pa(&_Level6InterruptVector_text_end), 0);
+ __pa(&_Level6InterruptVector_text_end));
#endif
#ifdef CONFIG_SMP
mem_reserve(__pa(&_SecondaryResetVector_text_start),
- __pa(&_SecondaryResetVector_text_end), 0);
+ __pa(&_SecondaryResetVector_text_end));
#endif
parse_early_param();
bootmem_init();
@@ -555,6 +538,137 @@ static int __init topology_init(void)
}
subsys_initcall(topology_init);
+void cpu_reset(void)
+{
+#if XCHAL_HAVE_PTP_MMU
+ local_irq_disable();
+ /*
+ * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
+ * be flushed.
+ * Way 4 is not currently used by linux.
+ * Ways 5 and 6 shall not be touched on MMUv2 as they are hardwired.
+ * Way 5 shall be flushed and way 6 shall be set to identity mapping
+ * on MMUv3.
+ */
+ local_flush_tlb_all();
+ invalidate_page_directory();
+#if XCHAL_HAVE_SPANNING_WAY
+ /* MMU v3 */
+ {
+ unsigned long vaddr = (unsigned long)cpu_reset;
+ unsigned long paddr = __pa(vaddr);
+ unsigned long tmpaddr = vaddr + SZ_512M;
+ unsigned long tmp0, tmp1, tmp2, tmp3;
+
+ /*
+ * Find a place for the temporary mapping. It must not be
+ * in the same 512MB region with vaddr or paddr, otherwise
+ * there may be multihit exception either on entry to the
+ * temporary mapping, or on entry to the identity mapping.
+ * (512MB is the biggest page size supported by TLB.)
+ */
+ while (((tmpaddr ^ paddr) & -SZ_512M) == 0)
+ tmpaddr += SZ_512M;
+
+ /* Invalidate mapping in the selected temporary area */
+ if (itlb_probe(tmpaddr) & 0x8)
+ invalidate_itlb_entry(itlb_probe(tmpaddr));
+ if (itlb_probe(tmpaddr + PAGE_SIZE) & 0x8)
+ invalidate_itlb_entry(itlb_probe(tmpaddr + PAGE_SIZE));
+
+ /*
+ * Map two consecutive pages starting at the physical address
+ * of this function to the temporary mapping area.
+ */
+ write_itlb_entry(__pte((paddr & PAGE_MASK) |
+ _PAGE_HW_VALID |
+ _PAGE_HW_EXEC |
+ _PAGE_CA_BYPASS),
+ tmpaddr & PAGE_MASK);
+ write_itlb_entry(__pte(((paddr & PAGE_MASK) + PAGE_SIZE) |
+ _PAGE_HW_VALID |
+ _PAGE_HW_EXEC |
+ _PAGE_CA_BYPASS),
+ (tmpaddr & PAGE_MASK) + PAGE_SIZE);
+
+ /* Reinitialize TLB */
+ __asm__ __volatile__ ("movi %0, 1f\n\t"
+ "movi %3, 2f\n\t"
+ "add %0, %0, %4\n\t"
+ "add %3, %3, %5\n\t"
+ "jx %0\n"
+ /*
+ * No literal, data or stack access
+ * below this point
+ */
+ "1:\n\t"
+ /* Initialize *tlbcfg */
+ "movi %0, 0\n\t"
+ "wsr %0, itlbcfg\n\t"
+ "wsr %0, dtlbcfg\n\t"
+ /* Invalidate TLB way 5 */
+ "movi %0, 4\n\t"
+ "movi %1, 5\n"
+ "1:\n\t"
+ "iitlb %1\n\t"
+ "idtlb %1\n\t"
+ "add %1, %1, %6\n\t"
+ "addi %0, %0, -1\n\t"
+ "bnez %0, 1b\n\t"
+ /* Initialize TLB way 6 */
+ "movi %0, 7\n\t"
+ "addi %1, %9, 3\n\t"
+ "addi %2, %9, 6\n"
+ "1:\n\t"
+ "witlb %1, %2\n\t"
+ "wdtlb %1, %2\n\t"
+ "add %1, %1, %7\n\t"
+ "add %2, %2, %7\n\t"
+ "addi %0, %0, -1\n\t"
+ "bnez %0, 1b\n\t"
+ /* Jump to identity mapping */
+ "jx %3\n"
+ "2:\n\t"
+ /* Complete way 6 initialization */
+ "witlb %1, %2\n\t"
+ "wdtlb %1, %2\n\t"
+ /* Invalidate temporary mapping */
+ "sub %0, %9, %7\n\t"
+ "iitlb %0\n\t"
+ "add %0, %0, %8\n\t"
+ "iitlb %0"
+ : "=&a"(tmp0), "=&a"(tmp1), "=&a"(tmp2),
+ "=&a"(tmp3)
+ : "a"(tmpaddr - vaddr),
+ "a"(paddr - vaddr),
+ "a"(SZ_128M), "a"(SZ_512M),
+ "a"(PAGE_SIZE),
+ "a"((tmpaddr + SZ_512M) & PAGE_MASK)
+ : "memory");
+ }
+#endif
+#endif
+ __asm__ __volatile__ ("movi a2, 0\n\t"
+ "wsr a2, icountlevel\n\t"
+ "movi a2, 0\n\t"
+ "wsr a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
+ "wsr a2, ibreakenable\n\t"
+#endif
+#if XCHAL_HAVE_LOOPS
+ "wsr a2, lcount\n\t"
+#endif
+ "movi a2, 0x1f\n\t"
+ "wsr a2, ps\n\t"
+ "isync\n\t"
+ "jx %0\n\t"
+ :
+ : "a" (XCHAL_RESET_VECTOR_VADDR)
+ : "a2");
+ for (;;)
+ ;
+}
+
void machine_restart(char * cmd)
{
platform_restart();
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index b9ad9feadc2d..9a5bcd0381a7 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -12,6 +12,8 @@
* Chris Zankel <chris@zankel.net>
*/
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/time.h>
@@ -134,16 +136,52 @@ void local_timer_setup(unsigned cpu)
0xf, 0xffffffff);
}
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+#ifdef CONFIG_OF
+static void __init calibrate_ccount(void)
+{
+ struct device_node *cpu;
+ struct clk *clk;
+
+ cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
+ if (cpu) {
+ clk = of_clk_get(cpu, 0);
+ if (!IS_ERR(clk)) {
+ ccount_freq = clk_get_rate(clk);
+ return;
+ } else {
+ pr_warn("%s: CPU input clock not found\n",
+ __func__);
+ }
+ } else {
+ pr_warn("%s: CPU node not found in the device tree\n",
+ __func__);
+ }
+
+ platform_calibrate_ccount();
+}
+#else
+static inline void calibrate_ccount(void)
+{
+ platform_calibrate_ccount();
+}
+#endif
+#endif
+
void __init time_init(void)
{
+ of_clk_init(NULL);
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
printk("Calibrating CPU frequency ");
- platform_calibrate_ccount();
+ calibrate_ccount();
printk("%d.%02d MHz\n", (int)ccount_freq/1000000,
(int)(ccount_freq/10000)%100);
#else
ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
#endif
+ WARN(!ccount_freq,
+ "%s: CPU clock frequency is not set up correctly\n",
+ __func__);
clocksource_register_hz(&ccount_clocksource, ccount_freq);
local_timer_setup(0);
setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction);
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index c417cbe4ec87..31411fc82662 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -30,10 +30,6 @@ jiffies = jiffies_64 + 4;
jiffies = jiffies_64;
#endif
-#ifndef KERNELOFFSET
-#define KERNELOFFSET 0xd0003000
-#endif
-
/* Note: In the following macros, it would be nice to specify only the
vector name and section kind and construct "sym" and "section" using
CPP concatenation, but that does not work reliably. Concatenating a
@@ -93,6 +89,9 @@ SECTIONS
VMLINUX_SYMBOL(__sched_text_start) = .;
*(.sched.literal .sched.text)
VMLINUX_SYMBOL(__sched_text_end) = .;
+ VMLINUX_SYMBOL(__cpuidle_text_start) = .;
+ *(.cpuidle.literal .cpuidle.text)
+ VMLINUX_SYMBOL(__cpuidle_text_end) = .;
VMLINUX_SYMBOL(__lock_text_start) = .;
*(.spinlock.literal .spinlock.text)
VMLINUX_SYMBOL(__lock_text_end) = .;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 9a9a5935bd36..80e4cfb2471a 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -8,7 +8,7 @@
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
- * Copyright (C) 2014 Cadence Design Systems Inc.
+ * Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
@@ -25,284 +25,43 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
+#include <linux/of_fdt.h>
#include <asm/bootparam.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/sysmem.h>
-struct sysmem_info sysmem __initdata;
-
-static void __init sysmem_dump(void)
-{
- unsigned i;
-
- pr_debug("Sysmem:\n");
- for (i = 0; i < sysmem.nr_banks; ++i)
- pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n",
- sysmem.bank[i].start, sysmem.bank[i].end,
- (sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
-}
-
-/*
- * Find bank with maximal .start such that bank.start <= start
- */
-static inline struct meminfo * __init find_bank(unsigned long start)
-{
- unsigned i;
- struct meminfo *it = NULL;
-
- for (i = 0; i < sysmem.nr_banks; ++i)
- if (sysmem.bank[i].start <= start)
- it = sysmem.bank + i;
- else
- break;
- return it;
-}
-
-/*
- * Move all memory banks starting at 'from' to a new place at 'to',
- * adjust nr_banks accordingly.
- * Both 'from' and 'to' must be inside the sysmem.bank.
- *
- * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
- */
-static int __init move_banks(struct meminfo *to, struct meminfo *from)
-{
- unsigned n = sysmem.nr_banks - (from - sysmem.bank);
-
- if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
- return -ENOMEM;
- if (to != from)
- memmove(to, from, n * sizeof(struct meminfo));
- sysmem.nr_banks += to - from;
- return 0;
-}
-
-/*
- * Add new bank to sysmem. Resulting sysmem is the union of bytes of the
- * original sysmem and the new bank.
- *
- * Returns: 0 (success), < 0 (error)
- */
-int __init add_sysmem_bank(unsigned long start, unsigned long end)
-{
- unsigned i;
- struct meminfo *it = NULL;
- unsigned long sz;
- unsigned long bank_sz = 0;
-
- if (start == end ||
- (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
- pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
- start, end - start);
- return -EINVAL;
- }
-
- start = PAGE_ALIGN(start);
- end &= PAGE_MASK;
- sz = end - start;
-
- it = find_bank(start);
-
- if (it)
- bank_sz = it->end - it->start;
-
- if (it && bank_sz >= start - it->start) {
- if (end - it->start > bank_sz)
- it->end = end;
- else
- return 0;
- } else {
- if (!it)
- it = sysmem.bank;
- else
- ++it;
-
- if (it - sysmem.bank < sysmem.nr_banks &&
- it->start - start <= sz) {
- it->start = start;
- if (it->end - it->start < sz)
- it->end = end;
- else
- return 0;
- } else {
- if (move_banks(it + 1, it) < 0) {
- pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
- start, end - start);
- return -EINVAL;
- }
- it->start = start;
- it->end = end;
- return 0;
- }
- }
- sz = it->end - it->start;
- for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
- if (sysmem.bank[i].start - it->start <= sz) {
- if (sz < sysmem.bank[i].end - it->start)
- it->end = sysmem.bank[i].end;
- } else {
- break;
- }
-
- move_banks(it + 1, sysmem.bank + i);
- return 0;
-}
-
-/*
- * mem_reserve(start, end, must_exist)
- *
- * Reserve some memory from the memory pool.
- * If must_exist is set and a part of the region being reserved does not exist
- * memory map is not altered.
- *
- * Parameters:
- * start Start of region,
- * end End of region,
- * must_exist Must exist in memory pool.
- *
- * Returns:
- * 0 (success)
- * < 0 (error)
- */
-
-int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
-{
- struct meminfo *it;
- struct meminfo *rm = NULL;
- unsigned long sz;
- unsigned long bank_sz = 0;
-
- start = start & PAGE_MASK;
- end = PAGE_ALIGN(end);
- sz = end - start;
- if (!sz)
- return -EINVAL;
-
- it = find_bank(start);
-
- if (it)
- bank_sz = it->end - it->start;
-
- if ((!it || end - it->start > bank_sz) && must_exist) {
- pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
- start, end);
- return -EINVAL;
- }
-
- if (it && start - it->start <= bank_sz) {
- if (start == it->start) {
- if (end - it->start < bank_sz) {
- it->start = end;
- return 0;
- } else {
- rm = it;
- }
- } else {
- it->end = start;
- if (end - it->start < bank_sz)
- return add_sysmem_bank(end,
- it->start + bank_sz);
- ++it;
- }
- }
-
- if (!it)
- it = sysmem.bank;
-
- for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
- if (it->end - start <= sz) {
- if (!rm)
- rm = it;
- } else {
- if (it->start - start < sz)
- it->start = end;
- break;
- }
- }
-
- if (rm)
- move_banks(rm, it);
-
- return 0;
-}
-
-
/*
* Initialize the bootmem system and give it all low memory we have available.
*/
void __init bootmem_init(void)
{
- unsigned long pfn;
- unsigned long bootmap_start, bootmap_size;
- int i;
-
- /* Reserve all memory below PLATFORM_DEFAULT_MEM_START, as memory
+ /* Reserve all memory below PHYS_OFFSET, as memory
* accounting doesn't work for pages below that address.
*
- * If PLATFORM_DEFAULT_MEM_START is zero reserve page at address 0:
+ * If PHYS_OFFSET is zero reserve page at address 0:
* successfull allocations should never return NULL.
*/
- if (PLATFORM_DEFAULT_MEM_START)
- mem_reserve(0, PLATFORM_DEFAULT_MEM_START, 0);
+ if (PHYS_OFFSET)
+ memblock_reserve(0, PHYS_OFFSET);
else
- mem_reserve(0, 1, 0);
+ memblock_reserve(0, 1);
- sysmem_dump();
- max_low_pfn = max_pfn = 0;
- min_low_pfn = ~0;
-
- for (i=0; i < sysmem.nr_banks; i++) {
- pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT;
- if (pfn < min_low_pfn)
- min_low_pfn = pfn;
- pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT;
- if (pfn > max_pfn)
- max_pfn = pfn;
- }
+ early_init_fdt_scan_reserved_mem();
- if (min_low_pfn > max_pfn)
+ if (!memblock_phys_mem_size())
panic("No memory found!\n");
- max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ?
- max_pfn : MAX_MEM_PFN >> PAGE_SHIFT;
+ min_low_pfn = PFN_UP(memblock_start_of_DRAM());
+ min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
+ max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ max_low_pfn = min(max_pfn, MAX_LOW_PFN);
- /* Find an area to use for the bootmem bitmap. */
-
- bootmap_size = bootmem_bootmap_pages(max_low_pfn - min_low_pfn);
- bootmap_size <<= PAGE_SHIFT;
- bootmap_start = ~0;
-
- for (i=0; i<sysmem.nr_banks; i++)
- if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) {
- bootmap_start = sysmem.bank[i].start;
- break;
- }
-
- if (bootmap_start == ~0UL)
- panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
-
- /* Reserve the bootmem bitmap area */
-
- mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1);
- bootmap_size = init_bootmem_node(NODE_DATA(0),
- bootmap_start >> PAGE_SHIFT,
- min_low_pfn,
- max_low_pfn);
-
- /* Add all remaining memory pieces into the bootmem map */
-
- for (i = 0; i < sysmem.nr_banks; i++) {
- if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) {
- unsigned long end = min(max_low_pfn << PAGE_SHIFT,
- sysmem.bank[i].end);
- free_bootmem(sysmem.bank[i].start,
- end - sysmem.bank[i].start);
- }
- }
+ memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+ memblock_dump_all();
}
@@ -344,7 +103,7 @@ void __init mem_init(void)
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
#endif
#ifdef CONFIG_MMU
- " vmalloc : 0x%08x - 0x%08x (%5u MB)\n"
+ " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
#endif
" lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n",
#ifdef CONFIG_HIGHMEM
@@ -395,16 +154,16 @@ static void __init parse_memmap_one(char *p)
switch (*p) {
case '@':
start_at = memparse(p + 1, &p);
- add_sysmem_bank(start_at, start_at + mem_size);
+ memblock_add(start_at, mem_size);
break;
case '$':
start_at = memparse(p + 1, &p);
- mem_reserve(start_at, start_at + mem_size, 0);
+ memblock_reserve(start_at, mem_size);
break;
case 0:
- mem_reserve(mem_size, 0, 0);
+ memblock_reserve(mem_size, -mem_size);
break;
default:
diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h
index 12b15ad1e586..27d7a528b41a 100644
--- a/arch/xtensa/platforms/iss/include/platform/simcall.h
+++ b/arch/xtensa/platforms/iss/include/platform/simcall.h
@@ -76,6 +76,11 @@ static inline int __simc(int a, int b, int c, int d)
return ret;
}
+static inline int simc_exit(int exit_code)
+{
+ return __simc(SYS_exit, exit_code, 0, 0);
+}
+
static inline int simc_open(const char *file, int flags, int mode)
{
return __simc(SYS_open, (int) file, flags, mode);
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index 391820539f0a..379aeddcc638 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -32,6 +32,8 @@
#include <asm/platform.h>
#include <asm/bootparam.h>
+#include <platform/simcall.h>
+
void __init platform_init(bp_tag_t* bootparam)
{
@@ -41,37 +43,19 @@ void __init platform_init(bp_tag_t* bootparam)
void platform_halt(void)
{
pr_info(" ** Called platform_halt() **\n");
- __asm__ __volatile__("movi a2, 1\nsimcall\n");
+ simc_exit(0);
}
void platform_power_off(void)
{
pr_info(" ** Called platform_power_off() **\n");
- __asm__ __volatile__("movi a2, 1\nsimcall\n");
+ simc_exit(0);
}
void platform_restart(void)
{
/* Flush and reset the mmu, simulate a processor reset, and
* jump to the reset vector. */
-
- __asm__ __volatile__("movi a2, 15\n\t"
- "wsr a2, icountlevel\n\t"
- "movi a2, 0\n\t"
- "wsr a2, icount\n\t"
-#if XCHAL_NUM_IBREAK > 0
- "wsr a2, ibreakenable\n\t"
-#endif
-#if XCHAL_HAVE_LOOPS
- "wsr a2, lcount\n\t"
-#endif
- "movi a2, 0x1f\n\t"
- "wsr a2, ps\n\t"
- "isync\n\t"
- "jx %0\n\t"
- :
- : "a" (XCHAL_RESET_VECTOR_VADDR)
- : "a2");
-
+ cpu_reset();
/* control never gets here */
}
@@ -98,7 +82,7 @@ void platform_heartbeat(void)
static int
iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
- __asm__ __volatile__("movi a2, -1; simcall\n");
+ simc_exit(1);
return NOTIFY_DONE;
}
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index f58a4e6472cb..ede04cca30dd 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -86,6 +86,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
unsigned long io;
simc_lseek(dev->fd, offset, SEEK_SET);
+ READ_ONCE(*buffer);
if (write)
io = simc_write(dev->fd, buffer, nbytes);
else
diff --git a/arch/xtensa/platforms/xt2000/setup.c b/arch/xtensa/platforms/xt2000/setup.c
index 4904c5c16918..9c2f1fb960d0 100644
--- a/arch/xtensa/platforms/xt2000/setup.c
+++ b/arch/xtensa/platforms/xt2000/setup.c
@@ -64,26 +64,7 @@ void platform_restart(void)
{
/* Flush and reset the mmu, simulate a processor reset, and
* jump to the reset vector. */
-
- __asm__ __volatile__ ("movi a2, 15\n\t"
- "wsr a2, icountlevel\n\t"
- "movi a2, 0\n\t"
- "wsr a2, icount\n\t"
-#if XCHAL_NUM_IBREAK > 0
- "wsr a2, ibreakenable\n\t"
-#endif
-#if XCHAL_HAVE_LOOPS
- "wsr a2, lcount\n\t"
-#endif
- "movi a2, 0x1f\n\t"
- "wsr a2, ps\n\t"
- "isync\n\t"
- "jx %0\n\t"
- :
- : "a" (XCHAL_RESET_VECTOR_VADDR)
- : "a2"
- );
-
+ cpu_reset();
/* control never gets here */
}
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index b509d1f55ed5..779be723eb2b 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -26,6 +26,8 @@
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/of.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
#include <asm/timex.h>
#include <asm/processor.h>
@@ -54,58 +56,63 @@ void platform_restart(void)
{
/* Flush and reset the mmu, simulate a processor reset, and
* jump to the reset vector. */
+ cpu_reset();
+ /* control never gets here */
+}
+void __init platform_setup(char **cmdline)
+{
+}
- __asm__ __volatile__ ("movi a2, 15\n\t"
- "wsr a2, icountlevel\n\t"
- "movi a2, 0\n\t"
- "wsr a2, icount\n\t"
-#if XCHAL_NUM_IBREAK > 0
- "wsr a2, ibreakenable\n\t"
-#endif
-#if XCHAL_HAVE_LOOPS
- "wsr a2, lcount\n\t"
-#endif
- "movi a2, 0x1f\n\t"
- "wsr a2, ps\n\t"
- "isync\n\t"
- "jx %0\n\t"
- :
- : "a" (XCHAL_RESET_VECTOR_VADDR)
- : "a2"
- );
+/* early initialization */
- /* control never gets here */
+void __init platform_init(bp_tag_t *first)
+{
}
-void __init platform_setup(char **cmdline)
+/* Heartbeat. */
+
+void platform_heartbeat(void)
+{
+}
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+
+void __init platform_calibrate_ccount(void)
{
+ ccount_freq = *(long *)XTFPGA_CLKFRQ_VADDR;
}
+#endif
+
#ifdef CONFIG_OF
-static void __init update_clock_frequency(struct device_node *node)
+static void __init xtfpga_clk_setup(struct device_node *np)
{
- struct property *newfreq;
+ void __iomem *base = of_iomap(np, 0);
+ struct clk *clk;
u32 freq;
- if (!of_property_read_u32(node, "clock-frequency", &freq) && freq != 0)
+ if (!base) {
+ pr_err("%s: invalid address\n", np->name);
return;
+ }
- newfreq = kzalloc(sizeof(*newfreq) + sizeof(u32), GFP_KERNEL);
- if (!newfreq)
- return;
- newfreq->value = newfreq + 1;
- newfreq->length = sizeof(freq);
- newfreq->name = kstrdup("clock-frequency", GFP_KERNEL);
- if (!newfreq->name) {
- kfree(newfreq);
+ freq = __raw_readl(base);
+ iounmap(base);
+ clk = clk_register_fixed_rate(NULL, np->name, NULL, 0, freq);
+
+ if (IS_ERR(clk)) {
+ pr_err("%s: clk registration failed\n", np->name);
return;
}
- *(u32 *)newfreq->value = cpu_to_be32(*(u32 *)XTFPGA_CLKFRQ_VADDR);
- of_update_property(node, newfreq);
+ if (of_clk_add_provider(np, of_clk_src_simple_get, clk)) {
+ pr_err("%s: clk provider registration failed\n", np->name);
+ return;
+ }
}
+CLK_OF_DECLARE(xtfpga_clk, "cdns,xtfpga-clock", xtfpga_clk_setup);
#define MAC_LEN 6
static void __init update_local_mac(struct device_node *node)
@@ -137,56 +144,15 @@ static void __init update_local_mac(struct device_node *node)
static int __init machine_setup(void)
{
- struct device_node *clock;
struct device_node *eth = NULL;
- for_each_node_by_name(clock, "main-oscillator")
- update_clock_frequency(clock);
-
if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
update_local_mac(eth);
return 0;
}
arch_initcall(machine_setup);
-#endif
-
-/* early initialization */
-
-void __init platform_init(bp_tag_t *first)
-{
-}
-
-/* Heartbeat. */
-
-void platform_heartbeat(void)
-{
-}
-
-#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
-
-void __init platform_calibrate_ccount(void)
-{
- long clk_freq = 0;
-#ifdef CONFIG_OF
- struct device_node *cpu =
- of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
- if (cpu) {
- u32 freq;
- update_clock_frequency(cpu);
- if (!of_property_read_u32(cpu, "clock-frequency", &freq))
- clk_freq = freq;
- }
-#endif
- if (!clk_freq)
- clk_freq = *(long *)XTFPGA_CLKFRQ_VADDR;
-
- ccount_freq = clk_freq;
-}
-
-#endif
-
-#ifndef CONFIG_OF
+#else
#include <linux/serial_8250.h>
#include <linux/if.h>
diff --git a/arch/xtensa/variants/csp/include/variant/core.h b/arch/xtensa/variants/csp/include/variant/core.h
new file mode 100644
index 000000000000..ccd81f07eaf3
--- /dev/null
+++ b/arch/xtensa/variants/csp/include/variant/core.h
@@ -0,0 +1,575 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2015 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_DEPBITS 0 /* DEPBITS instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 1 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 4 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */
+#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */
+#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+
+#define XCHAL_HAVE_FUSION 0 /* Fusion*/
+#define XCHAL_HAVE_FUSION_FP 0 /* Fusion FP option */
+#define XCHAL_HAVE_FUSION_LOW_POWER 0 /* Fusion Low Power option */
+#define XCHAL_HAVE_FUSION_AES 0 /* Fusion BLE/Wifi AES-128 CCM option */
+#define XCHAL_HAVE_FUSION_CONVENC 0 /* Fusion Conv Encode option */
+#define XCHAL_HAVE_FUSION_LFSR_CRC 0 /* Fusion LFSR-CRC option */
+#define XCHAL_HAVE_FUSION_BITOPS 0 /* Fusion Bit Operations Support option */
+#define XCHAL_HAVE_FUSION_AVS 0 /* Fusion AVS option */
+#define XCHAL_HAVE_FUSION_16BIT_BASEBAND 0 /* Fusion 16-bit Baseband option */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4 0 /* HiFi4 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4_VFPU 0 /* HiFi4 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3_VFPU 0 /* HiFi3 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_HIFI_MINI 0
+
+
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector or user floating-point pkg */
+#define XCHAL_HAVE_USER_DPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_USER_SPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* single prec floating point */
+#define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */
+#define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */
+#define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */
+#define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */
+#define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/
+#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */
+#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/
+#define XCHAL_HAVE_DFP_ACCEL 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */
+
+#define XCHAL_HAVE_DFPU_SINGLE_ONLY 0 /* DFPU Coprocessor, single precision only */
+#define XCHAL_HAVE_DFPU_SINGLE_DOUBLE 0 /* DFPU Coprocessor, single and double precision */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_PDX4 0 /* PDX4 */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_CONNXD2_DUALLSFLIX 0 /* ConnX D2 & Dual LoadStore Flix */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */
+#define XCHAL_HAVE_GRIVPEP 0 /* GRIVPEP is General Release of IVPEP */
+#define XCHAL_HAVE_GRIVPEP_HISTOGRAM 0 /* Histogram option on GRIVPEP */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 16 /* data width in bytes */
+#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay
+ (1 = 5-stage, 2 = 7-stage) */
+#define XCHAL_CLOCK_GATING_GLOBAL 0 /* global clock gating */
+#define XCHAL_CLOCK_GATING_FUNCUNIT 0 /* funct. unit clock gating */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 1100002 /* sw version of this header */
+
+#define XCHAL_CORE_ID "xt_lnx" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00057D54 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC1B3FFFE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x1C857D54 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX6.0.2" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2600 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 260002 /* major*100+minor */
+#define XCHAL_HW_REL_LX6 1
+#define XCHAL_HW_REL_LX6_0 1
+#define XCHAL_HW_REL_LX6_0_2 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2600 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 260002 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2600 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 260002 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 64 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 64 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 6 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 6 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 65536 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */
+#define XCHAL_PREFETCH_CASTOUT_LINES 0 /* dcache pref. castout bufsz */
+#define XCHAL_PREFETCH_ENTRIES 0 /* cache prefetch entries */
+#define XCHAL_PREFETCH_BLOCK_ENTRIES 0 /* prefetch block streams */
+#define XCHAL_HAVE_CACHE_BLOCKOPS 0 /* block prefetch for caches */
+#define XCHAL_HAVE_ICACHE_TEST 1 /* Icache test instructions */
+#define XCHAL_HAVE_DCACHE_TEST 1 /* Dcache test instructions */
+#define XCHAL_HAVE_ICACHE_DYN_WAYS 0 /* Icache dynamic way support */
+#define XCHAL_HAVE_DCACHE_DYN_WAYS 0 /* Dcache dynamic way support */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 8
+#define XCHAL_DCACHE_SETWIDTH 6
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 16
+#define XCHAL_DCACHE_ACCESS_SIZE 16
+
+#define XCHAL_DCACHE_BANKS 1 /* number of banks */
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+/* Whether MEMCTL register has anything useful */
+#define XCHAL_USE_MEMCTL (((XCHAL_LOOP_BUFFER_SIZE > 0) || \
+ XCHAL_DCACHE_IS_COHERENT || \
+ XCHAL_HAVE_ICACHE_DYN_WAYS || \
+ XCHAL_HAVE_DCACHE_DYN_WAYS) && \
+ (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0))
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 16 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_MASK 0x00001140
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00008000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F11FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F1FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 2
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 2
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 4
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_PROFILING
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F1000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000033F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+#define XCHAL_INTTYPE_MASK_PROFILING 0x00008000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+#define XCHAL_PROFILING_INTERRUPT 15 /* profiling interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL4_NUM 15
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 2, 3.) */
+
+
+/*
+ * External interrupt mapping.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 2) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 21 /* (intlevel 3) */
+/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
+#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
+#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
+#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
+#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */
+#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */
+#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */
+#define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */
+#define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */
+#define XCHAL_INT12_EXTNUM 8 /* (intlevel 2) */
+#define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */
+#define XCHAL_INT16_EXTNUM 10 /* (intlevel 1) */
+#define XCHAL_INT17_EXTNUM 11 /* (intlevel 1) */
+#define XCHAL_INT18_EXTNUM 12 /* (intlevel 1) */
+#define XCHAL_INT19_EXTNUM 13 /* (intlevel 1) */
+#define XCHAL_INT20_EXTNUM 14 /* (intlevel 1) */
+#define XCHAL_INT21_EXTNUM 15 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG MODULE
+ ----------------------------------------------------------------------*/
+
+/* Misc */
+#define XCHAL_HAVE_DEBUG_ERI 1 /* ERI to debug module */
+#define XCHAL_HAVE_DEBUG_APB 1 /* APB to debug module */
+#define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */
+
+/* On-Chip Debug (OCD) */
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */
+#define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */
+
+/* TRAX (in core) */
+#define XCHAL_HAVE_TRAX 1 /* TRAX in debug module */
+#define XCHAL_TRAX_MEM_SIZE 262144 /* TRAX memory size in bytes */
+#define XCHAL_TRAX_MEM_SHAREABLE 1 /* start/end regs; ready sig. */
+#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */
+#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */
+
+/* Perf counters */
+#define XCHAL_NUM_PERF_COUNTERS 8 /* performance counters */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/arch/xtensa/variants/csp/include/variant/tie-asm.h b/arch/xtensa/variants/csp/include/variant/tie-asm.h
new file mode 100644
index 000000000000..ba773c4a26a2
--- /dev/null
+++ b/arch/xtensa/variants/csp/include/variant/tie-asm.h
@@ -0,0 +1,194 @@
+/*
+ * tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file contains assembly-language definitions (assembly
+ macros, etc.) for this specific Xtensa processor's TIE extensions
+ and options. It is customized to this Xtensa processor configuration.
+
+ Copyright (c) 1999-2015 Cadence Design Systems Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+#define XTHAL_SAS_ANYCC 0x000C /* both of the above */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \
+ | ((ccuse) & XTHAL_SAS_ANYCC) \
+ | ((abi) & XTHAL_SAS_ANYABI) )
+
+
+ /*
+ * Macro to store all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger store sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to store. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to store, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any store.
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ rur.THREADPTR \at1 // threadptr option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ rsr.ACCLO \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.ACCHI \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ rsr.BR \at1 // boolean option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.SCOMPARE1 \at1 // conditional store option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rsr.M0 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rsr.M1 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rsr.M2 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ rsr.M3 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+20
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .endif
+ .endm // xchal_ncp_store
+
+ /*
+ * Macro to load all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger load sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to load. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to load, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any load.
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wur.THREADPTR \at1 // threadptr option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.ACCLO \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.ACCHI \at1 // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.BR \at1 // boolean option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.SCOMPARE1 \at1 // conditional store option
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wsr.M0 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wsr.M1 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wsr.M2 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+20
+ wsr.M3 \at1 // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .endif
+ .endm // xchal_ncp_load
+
+
+#define XCHAL_NCP_NUM_ATMPS 1
+
+#define XCHAL_SA_NUM_ATMPS 1
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/arch/xtensa/variants/csp/include/variant/tie.h b/arch/xtensa/variants/csp/include/variant/tie.h
new file mode 100644
index 000000000000..3ce391cfe449
--- /dev/null
+++ b/arch/xtensa/variants/csp/include/variant/tie.h
@@ -0,0 +1,161 @@
+/*
+ * tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file describes this specific Xtensa processor's TIE extensions
+ that extend basic Xtensa core functionality. It is customized to this
+ Xtensa processor configuration.
+
+ Copyright (c) 1999-2015 Cadence Design Systems Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 1 /* number of coprocessors */
+#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x80 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
+
+/* Basic parameters of each coprocessor: */
+#define XCHAL_CP7_NAME "XTIOP"
+#define XCHAL_CP7_IDENT XTIOP
+#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
+#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
+#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
+
+/* Filler info for unassigned coprocessors, to simplify arrays etc: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP1_SA_SIZE 0
+#define XCHAL_CP1_SA_ALIGN 1
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP6_SA_ALIGN 1
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 36
+#define XCHAL_NCP_SA_ALIGN 4
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 48 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 4 /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ * dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ * ccused = set if used by compiler without special options or code
+ * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ * name = lowercase reg name (no quotes)
+ * galign = group byte alignment (power of 2) (galign >= align)
+ * align = register byte alignment (power of 2)
+ * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ * (not including any pad bytes required to galign this or next reg)
+ * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ * regnum = reg index in regfile, or special/TIE-user reg number
+ * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ * gapsz = intervening bits, if bitsz bits not stored contiguously
+ * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ * reset = register reset value (or 0 if undefined at reset)
+ * x = reserved for future use (0 until then)
+ *
+ * To filter out certain registers, e.g. to expand only the non-global
+ * registers used by the compiler, you can do something like this:
+ *
+ * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
+ * #define SELCC0(p...)
+ * #define SELCC1(abikind,p...) SELAK##abikind(p)
+ * #define SELAK0(p...) REG(p)
+ * #define SELAK1(p...) REG(p)
+ * #define SELAK2(p...)
+ * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ * ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM 9
+#define XCHAL_NCP_SA_LIST(s) \
+ XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, br, 4, 4, 4,0x0204, sr,4 , 16,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s) /* empty */
+
+#define XCHAL_CP1_SA_NUM 0
+#define XCHAL_CP1_SA_LIST(s) /* empty */
+
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s) /* empty */
+
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s) /* empty */
+
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s) /* empty */
+
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s) /* empty */
+
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s) /* empty */
+
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s) /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+/* Byte length of instruction from its first byte, per FLIX. */
+#define XCHAL_BYTE0_FORMAT_LENGTHS \
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
diff --git a/block/Kconfig b/block/Kconfig
index 161491d0a879..1d4d624492fc 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -4,6 +4,7 @@
menuconfig BLOCK
bool "Enable the block layer" if EXPERT
default y
+ select SBITMAP
help
Provide block layer support for the kernel.
@@ -124,4 +125,9 @@ config BLOCK_COMPAT
depends on BLOCK && COMPAT
default y
+config BLK_MQ_PCI
+ bool
+ depends on BLOCK && PCI
+ default y
+
source block/Kconfig.iosched
diff --git a/block/Makefile b/block/Makefile
index 9eda2322b2d4..36acdd7545be 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o \
- blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
+ blk-mq-sysfs.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/
@@ -22,4 +22,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
-
+obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
diff --git a/block/bio.c b/block/bio.c
index aa7354088008..db85c5753a76 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1068,7 +1068,7 @@ static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
return 0;
}
-static void bio_free_pages(struct bio *bio)
+void bio_free_pages(struct bio *bio)
{
struct bio_vec *bvec;
int i;
@@ -1076,6 +1076,7 @@ static void bio_free_pages(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i)
__free_page(bvec->bv_page);
}
+EXPORT_SYMBOL(bio_free_pages);
/**
* bio_uncopy_user - finish previously mapped bio
@@ -1274,7 +1275,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
nr_pages += end - start;
/*
- * buffer must be aligned to at least hardsector size for now
+ * buffer must be aligned to at least logical block size for now
*/
if (uaddr & queue_dma_alignment(q))
return ERR_PTR(-EINVAL);
diff --git a/block/blk-core.c b/block/blk-core.c
index 36c7ac328d8c..14d7c0740dc0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -288,7 +288,7 @@ void blk_sync_queue(struct request_queue *q)
int i;
queue_for_each_hw_ctx(q, hctx, i) {
- cancel_delayed_work_sync(&hctx->run_work);
+ cancel_work_sync(&hctx->run_work);
cancel_delayed_work_sync(&hctx->delay_work);
}
} else {
@@ -3097,6 +3097,12 @@ int kblockd_schedule_work(struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
+int kblockd_schedule_work_on(int cpu, struct work_struct *work)
+{
+ return queue_work_on(cpu, kblockd_workqueue, work);
+}
+EXPORT_SYMBOL(kblockd_schedule_work_on);
+
int kblockd_schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
@@ -3301,19 +3307,23 @@ bool blk_poll(struct request_queue *q, blk_qc_t cookie)
{
struct blk_plug *plug;
long state;
+ unsigned int queue_num;
+ struct blk_mq_hw_ctx *hctx;
if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return false;
+ queue_num = blk_qc_t_to_queue_num(cookie);
+ hctx = q->queue_hw_ctx[queue_num];
+ hctx->poll_considered++;
+
plug = current->plug;
if (plug)
blk_flush_plug_list(plug, false);
state = current->state;
while (!need_resched()) {
- unsigned int queue_num = blk_qc_t_to_queue_num(cookie);
- struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[queue_num];
int ret;
hctx->poll_invoked++;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index d308def812db..6a14b68b9135 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -232,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, int error)
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
- hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
flush_rq->tag = -1;
}
@@ -325,7 +325,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
flush_rq->tag = first_rq->tag;
fq->orig_rq = first_rq;
- hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
}
@@ -358,7 +358,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
unsigned long flags;
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
/*
* After populating an empty queue, kick it to avoid stall. Read
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
deleted file mode 100644
index bb3ed488f7b5..000000000000
--- a/block/blk-mq-cpu.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * CPU notifier helper code for blk-mq
- *
- * Copyright (C) 2013-2014 Jens Axboe
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/list.h>
-#include <linux/llist.h>
-#include <linux/smp.h>
-#include <linux/cpu.h>
-
-#include <linux/blk-mq.h>
-#include "blk-mq.h"
-
-static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
-
-static int blk_mq_main_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long) hcpu;
- struct blk_mq_cpu_notifier *notify;
- int ret = NOTIFY_OK;
-
- raw_spin_lock(&blk_mq_cpu_notify_lock);
-
- list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
- ret = notify->notify(notify->data, action, cpu);
- if (ret != NOTIFY_OK)
- break;
- }
-
- raw_spin_unlock(&blk_mq_cpu_notify_lock);
- return ret;
-}
-
-void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
-{
- BUG_ON(!notifier->notify);
-
- raw_spin_lock(&blk_mq_cpu_notify_lock);
- list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
- raw_spin_unlock(&blk_mq_cpu_notify_lock);
-}
-
-void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
-{
- raw_spin_lock(&blk_mq_cpu_notify_lock);
- list_del(&notifier->list);
- raw_spin_unlock(&blk_mq_cpu_notify_lock);
-}
-
-void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
- int (*fn)(void *, unsigned long, unsigned int),
- void *data)
-{
- notifier->notify = fn;
- notifier->data = data;
-}
-
-void __init blk_mq_cpu_init(void)
-{
- hotcpu_notifier(blk_mq_main_cpu_notify, 0);
-}
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index d0634bcf322f..19b1d9c5f07e 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -31,14 +31,16 @@ static int get_first_sibling(unsigned int cpu)
return cpu;
}
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
- const struct cpumask *online_mask)
+int blk_mq_map_queues(struct blk_mq_tag_set *set)
{
+ unsigned int *map = set->mq_map;
+ unsigned int nr_queues = set->nr_hw_queues;
+ const struct cpumask *online_mask = cpu_online_mask;
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
cpumask_var_t cpus;
if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
- return 1;
+ return -ENOMEM;
cpumask_clear(cpus);
nr_cpus = nr_uniq_cpus = 0;
@@ -86,23 +88,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
return 0;
}
-unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
-{
- unsigned int *map;
-
- /* If cpus are offline, map them to first hctx */
- map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
- set->numa_node);
- if (!map)
- return NULL;
-
- if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
- return map;
-
- kfree(map);
- return NULL;
-}
-
/*
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
new file mode 100644
index 000000000000..966c2169762e
--- /dev/null
+++ b/block/blk-mq-pci.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016 Christoph Hellwig.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/kobject.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+
+/**
+ * blk_mq_pci_map_queues - provide a default queue mapping for PCI device
+ * @set: tagset to provide the mapping for
+ * @pdev: PCI device associated with @set.
+ *
+ * This function assumes the PCI device @pdev has at least as many available
+ * interrupt vetors as @set has queues. It will then queuery the vector
+ * corresponding to each queue for it's affinity mask and built queue mapping
+ * that maps a queue to the CPUs that have irq affinity for the corresponding
+ * vector.
+ */
+int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
+{
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ mask = pci_irq_get_affinity(pdev, queue);
+ if (!mask)
+ return -EINVAL;
+
+ for_each_cpu(cpu, mask)
+ set->mq_map[cpu] = queue;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index fe822aa5b8e4..01fb455d3377 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -176,7 +176,17 @@ static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
{
- return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success);
+ return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n",
+ hctx->poll_considered, hctx->poll_invoked,
+ hctx->poll_success);
+}
+
+static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx,
+ const char *page, size_t size)
+{
+ hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
+
+ return size;
}
static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
@@ -198,12 +208,14 @@ static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
- for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
- unsigned long d = 1U << (i - 1);
+ for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
+ unsigned int d = 1U << (i - 1);
- page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
+ page += sprintf(page, "%8u\t%lu\n", d, hctx->dispatched[i]);
}
+ page += sprintf(page, "%8u+\t%lu\n", 1U << (i - 1),
+ hctx->dispatched[i]);
return page - start_page;
}
@@ -301,8 +313,9 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
.show = blk_mq_hw_sysfs_cpus_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
- .attr = {.name = "io_poll", .mode = S_IRUGO },
+ .attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO },
.show = blk_mq_hw_sysfs_poll_show,
+ .store = blk_mq_hw_sysfs_poll_store,
};
static struct attribute *default_hw_ctx_attrs[] = {
@@ -380,9 +393,8 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
return ret;
}
-static void __blk_mq_unregister_disk(struct gendisk *disk)
+static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
{
- struct request_queue *q = disk->queue;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
int i, j;
@@ -400,15 +412,15 @@ static void __blk_mq_unregister_disk(struct gendisk *disk)
kobject_del(&q->mq_kobj);
kobject_put(&q->mq_kobj);
- kobject_put(&disk_to_dev(disk)->kobj);
+ kobject_put(&dev->kobj);
q->mq_sysfs_init_done = false;
}
-void blk_mq_unregister_disk(struct gendisk *disk)
+void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
{
blk_mq_disable_hotplug();
- __blk_mq_unregister_disk(disk);
+ __blk_mq_unregister_dev(dev, q);
blk_mq_enable_hotplug();
}
@@ -430,10 +442,8 @@ static void blk_mq_sysfs_init(struct request_queue *q)
}
}
-int blk_mq_register_disk(struct gendisk *disk)
+int blk_mq_register_dev(struct device *dev, struct request_queue *q)
{
- struct device *dev = disk_to_dev(disk);
- struct request_queue *q = disk->queue;
struct blk_mq_hw_ctx *hctx;
int ret, i;
@@ -454,7 +464,7 @@ int blk_mq_register_disk(struct gendisk *disk)
}
if (ret)
- __blk_mq_unregister_disk(disk);
+ __blk_mq_unregister_dev(dev, q);
else
q->mq_sysfs_init_done = true;
out:
@@ -462,7 +472,7 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(blk_mq_register_disk);
+EXPORT_SYMBOL_GPL(blk_mq_register_dev);
void blk_mq_sysfs_unregister(struct request_queue *q)
{
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 729bac3a673b..dcf5ce3ba4bf 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -1,58 +1,24 @@
/*
- * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
- * over multiple cachelines to avoid ping-pong between multiple submitters
- * or submitter and completer. Uses rolling wakeups to avoid falling of
- * the scaling cliff when we run out of tags and have to start putting
- * submitters to sleep.
- *
- * Uses active queue tracking to support fairer distribution of tags
- * between multiple submitters when a shared tag map is used.
+ * Tag allocation using scalable bitmaps. Uses active queue tracking to support
+ * fairer distribution of tags between multiple submitters when a shared tag map
+ * is used.
*
* Copyright (C) 2013-2014 Jens Axboe
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/random.h>
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
-static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
-{
- int i;
-
- for (i = 0; i < bt->map_nr; i++) {
- struct blk_align_bitmap *bm = &bt->map[i];
- int ret;
-
- ret = find_first_zero_bit(&bm->word, bm->depth);
- if (ret < bm->depth)
- return true;
- }
-
- return false;
-}
-
bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
{
if (!tags)
return true;
- return bt_has_free_tags(&tags->bitmap_tags);
-}
-
-static inline int bt_index_inc(int index)
-{
- return (index + 1) & (BT_WAIT_QUEUES - 1);
-}
-
-static inline void bt_index_atomic_inc(atomic_t *index)
-{
- int old = atomic_read(index);
- int new = bt_index_inc(old);
- atomic_cmpxchg(index, old, new);
+ return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
}
/*
@@ -72,29 +38,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
*/
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
{
- struct blk_mq_bitmap_tags *bt;
- int i, wake_index;
-
- /*
- * Make sure all changes prior to this are visible from other CPUs.
- */
- smp_mb();
- bt = &tags->bitmap_tags;
- wake_index = atomic_read(&bt->wake_index);
- for (i = 0; i < BT_WAIT_QUEUES; i++) {
- struct bt_wait_state *bs = &bt->bs[wake_index];
-
- if (waitqueue_active(&bs->wait))
- wake_up(&bs->wait);
-
- wake_index = bt_index_inc(wake_index);
- }
-
- if (include_reserve) {
- bt = &tags->breserved_tags;
- if (waitqueue_active(&bt->bs[0].wait))
- wake_up(&bt->bs[0].wait);
- }
+ sbitmap_queue_wake_all(&tags->bitmap_tags);
+ if (include_reserve)
+ sbitmap_queue_wake_all(&tags->breserved_tags);
}
/*
@@ -118,7 +64,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
* and attempt to provide a fair share of the tag depth for each of them.
*/
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_bitmap_tags *bt)
+ struct sbitmap_queue *bt)
{
unsigned int depth, users;
@@ -130,7 +76,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
/*
* Don't try dividing an ant
*/
- if (bt->depth == 1)
+ if (bt->sb.depth == 1)
return true;
users = atomic_read(&hctx->tags->active_queues);
@@ -140,142 +86,36 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
/*
* Allow at least some tags
*/
- depth = max((bt->depth + users - 1) / users, 4U);
+ depth = max((bt->sb.depth + users - 1) / users, 4U);
return atomic_read(&hctx->nr_active) < depth;
}
-static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
- bool nowrap)
-{
- int tag, org_last_tag = last_tag;
-
- while (1) {
- tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
- if (unlikely(tag >= bm->depth)) {
- /*
- * We started with an offset, and we didn't reset the
- * offset to 0 in a failure case, so start from 0 to
- * exhaust the map.
- */
- if (org_last_tag && last_tag && !nowrap) {
- last_tag = org_last_tag = 0;
- continue;
- }
- return -1;
- }
-
- if (!test_and_set_bit(tag, &bm->word))
- break;
-
- last_tag = tag + 1;
- if (last_tag >= bm->depth - 1)
- last_tag = 0;
- }
-
- return tag;
-}
-
-#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
-
-/*
- * Straight forward bitmap tag implementation, where each bit is a tag
- * (cleared == free, and set == busy). The small twist is using per-cpu
- * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
- * contexts. This enables us to drastically limit the space searched,
- * without dirtying an extra shared cacheline like we would if we stored
- * the cache value inside the shared blk_mq_bitmap_tags structure. On top
- * of that, each word of tags is in a separate cacheline. This means that
- * multiple users will tend to stick to different cachelines, at least
- * until the map is exhausted.
- */
-static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
- unsigned int *tag_cache, struct blk_mq_tags *tags)
+static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
{
- unsigned int last_tag, org_last_tag;
- int index, i, tag;
-
if (!hctx_may_queue(hctx, bt))
return -1;
-
- last_tag = org_last_tag = *tag_cache;
- index = TAG_TO_INDEX(bt, last_tag);
-
- for (i = 0; i < bt->map_nr; i++) {
- tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
- BT_ALLOC_RR(tags));
- if (tag != -1) {
- tag += (index << bt->bits_per_word);
- goto done;
- }
-
- /*
- * Jump to next index, and reset the last tag to be the
- * first tag of that index
- */
- index++;
- last_tag = (index << bt->bits_per_word);
-
- if (index >= bt->map_nr) {
- index = 0;
- last_tag = 0;
- }
- }
-
- *tag_cache = 0;
- return -1;
-
- /*
- * Only update the cache from the allocation path, if we ended
- * up using the specific cached tag.
- */
-done:
- if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
- last_tag = tag + 1;
- if (last_tag >= bt->depth - 1)
- last_tag = 0;
-
- *tag_cache = last_tag;
- }
-
- return tag;
+ return __sbitmap_queue_get(bt);
}
-static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
- struct blk_mq_hw_ctx *hctx)
+static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
+ struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags)
{
- struct bt_wait_state *bs;
- int wait_index;
-
- if (!hctx)
- return &bt->bs[0];
-
- wait_index = atomic_read(&hctx->wait_index);
- bs = &bt->bs[wait_index];
- bt_index_atomic_inc(&hctx->wait_index);
- return bs;
-}
-
-static int bt_get(struct blk_mq_alloc_data *data,
- struct blk_mq_bitmap_tags *bt,
- struct blk_mq_hw_ctx *hctx,
- unsigned int *last_tag, struct blk_mq_tags *tags)
-{
- struct bt_wait_state *bs;
+ struct sbq_wait_state *ws;
DEFINE_WAIT(wait);
int tag;
- tag = __bt_get(hctx, bt, last_tag, tags);
+ tag = __bt_get(hctx, bt);
if (tag != -1)
return tag;
if (data->flags & BLK_MQ_REQ_NOWAIT)
return -1;
- bs = bt_wait_ptr(bt, hctx);
+ ws = bt_wait_ptr(bt, hctx);
do {
- prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
- tag = __bt_get(hctx, bt, last_tag, tags);
+ tag = __bt_get(hctx, bt);
if (tag != -1)
break;
@@ -292,7 +132,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
* Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions.
*/
- tag = __bt_get(hctx, bt, last_tag, tags);
+ tag = __bt_get(hctx, bt);
if (tag != -1)
break;
@@ -301,20 +141,18 @@ static int bt_get(struct blk_mq_alloc_data *data,
io_schedule();
data->ctx = blk_mq_get_ctx(data->q);
- data->hctx = data->q->mq_ops->map_queue(data->q,
- data->ctx->cpu);
+ data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
if (data->flags & BLK_MQ_REQ_RESERVED) {
bt = &data->hctx->tags->breserved_tags;
} else {
- last_tag = &data->ctx->last_tag;
hctx = data->hctx;
bt = &hctx->tags->bitmap_tags;
}
- finish_wait(&bs->wait, &wait);
- bs = bt_wait_ptr(bt, hctx);
+ finish_wait(&ws->wait, &wait);
+ ws = bt_wait_ptr(bt, hctx);
} while (1);
- finish_wait(&bs->wait, &wait);
+ finish_wait(&ws->wait, &wait);
return tag;
}
@@ -323,7 +161,7 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
int tag;
tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
- &data->ctx->last_tag, data->hctx->tags);
+ data->hctx->tags);
if (tag >= 0)
return tag + data->hctx->tags->nr_reserved_tags;
@@ -332,15 +170,15 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
{
- int tag, zero = 0;
+ int tag;
if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
WARN_ON_ONCE(1);
return BLK_MQ_TAG_FAIL;
}
- tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
- data->hctx->tags);
+ tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL,
+ data->hctx->tags);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
@@ -354,55 +192,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
return __blk_mq_get_tag(data);
}
-static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
-{
- int i, wake_index;
-
- wake_index = atomic_read(&bt->wake_index);
- for (i = 0; i < BT_WAIT_QUEUES; i++) {
- struct bt_wait_state *bs = &bt->bs[wake_index];
-
- if (waitqueue_active(&bs->wait)) {
- int o = atomic_read(&bt->wake_index);
- if (wake_index != o)
- atomic_cmpxchg(&bt->wake_index, o, wake_index);
-
- return bs;
- }
-
- wake_index = bt_index_inc(wake_index);
- }
-
- return NULL;
-}
-
-static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
-{
- const int index = TAG_TO_INDEX(bt, tag);
- struct bt_wait_state *bs;
- int wait_cnt;
-
- clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
-
- /* Ensure that the wait list checks occur after clear_bit(). */
- smp_mb();
-
- bs = bt_wake_ptr(bt);
- if (!bs)
- return;
-
- wait_cnt = atomic_dec_return(&bs->wait_cnt);
- if (unlikely(wait_cnt < 0))
- wait_cnt = atomic_inc_return(&bs->wait_cnt);
- if (wait_cnt == 0) {
- atomic_add(bt->wake_cnt, &bs->wait_cnt);
- bt_index_atomic_inc(&bt->wake_index);
- wake_up(&bs->wait);
- }
-}
-
-void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
- unsigned int *last_tag)
+void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
+ unsigned int tag)
{
struct blk_mq_tags *tags = hctx->tags;
@@ -410,67 +201,92 @@ void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
const int real_tag = tag - tags->nr_reserved_tags;
BUG_ON(real_tag >= tags->nr_tags);
- bt_clear_tag(&tags->bitmap_tags, real_tag);
- if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
- *last_tag = real_tag;
+ sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
} else {
BUG_ON(tag >= tags->nr_reserved_tags);
- bt_clear_tag(&tags->breserved_tags, tag);
+ sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
}
}
-static void bt_for_each(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_bitmap_tags *bt, unsigned int off,
- busy_iter_fn *fn, void *data, bool reserved)
+struct bt_iter_data {
+ struct blk_mq_hw_ctx *hctx;
+ busy_iter_fn *fn;
+ void *data;
+ bool reserved;
+};
+
+static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
+ struct bt_iter_data *iter_data = data;
+ struct blk_mq_hw_ctx *hctx = iter_data->hctx;
+ struct blk_mq_tags *tags = hctx->tags;
+ bool reserved = iter_data->reserved;
struct request *rq;
- int bit, i;
- for (i = 0; i < bt->map_nr; i++) {
- struct blk_align_bitmap *bm = &bt->map[i];
+ if (!reserved)
+ bitnr += tags->nr_reserved_tags;
+ rq = tags->rqs[bitnr];
- for (bit = find_first_bit(&bm->word, bm->depth);
- bit < bm->depth;
- bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
- rq = hctx->tags->rqs[off + bit];
- if (rq->q == hctx->queue)
- fn(hctx, rq, data, reserved);
- }
+ if (rq->q == hctx->queue)
+ iter_data->fn(hctx, rq, iter_data->data, reserved);
+ return true;
+}
- off += (1 << bt->bits_per_word);
- }
+static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
+ busy_iter_fn *fn, void *data, bool reserved)
+{
+ struct bt_iter_data iter_data = {
+ .hctx = hctx,
+ .fn = fn,
+ .data = data,
+ .reserved = reserved,
+ };
+
+ sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
}
-static void bt_tags_for_each(struct blk_mq_tags *tags,
- struct blk_mq_bitmap_tags *bt, unsigned int off,
- busy_tag_iter_fn *fn, void *data, bool reserved)
+struct bt_tags_iter_data {
+ struct blk_mq_tags *tags;
+ busy_tag_iter_fn *fn;
+ void *data;
+ bool reserved;
+};
+
+static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
+ struct bt_tags_iter_data *iter_data = data;
+ struct blk_mq_tags *tags = iter_data->tags;
+ bool reserved = iter_data->reserved;
struct request *rq;
- int bit, i;
- if (!tags->rqs)
- return;
- for (i = 0; i < bt->map_nr; i++) {
- struct blk_align_bitmap *bm = &bt->map[i];
-
- for (bit = find_first_bit(&bm->word, bm->depth);
- bit < bm->depth;
- bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
- rq = tags->rqs[off + bit];
- fn(rq, data, reserved);
- }
+ if (!reserved)
+ bitnr += tags->nr_reserved_tags;
+ rq = tags->rqs[bitnr];
- off += (1 << bt->bits_per_word);
- }
+ iter_data->fn(rq, iter_data->data, reserved);
+ return true;
+}
+
+static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
+ busy_tag_iter_fn *fn, void *data, bool reserved)
+{
+ struct bt_tags_iter_data iter_data = {
+ .tags = tags,
+ .fn = fn,
+ .data = data,
+ .reserved = reserved,
+ };
+
+ if (tags->rqs)
+ sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
}
static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
busy_tag_iter_fn *fn, void *priv)
{
if (tags->nr_reserved_tags)
- bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
- bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
- false);
+ bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
+ bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
}
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
@@ -529,124 +345,40 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
continue;
if (tags->nr_reserved_tags)
- bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
- bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
- false);
+ bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
+ bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
}
}
-static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
+static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
{
- unsigned int i, used;
-
- for (i = 0, used = 0; i < bt->map_nr; i++) {
- struct blk_align_bitmap *bm = &bt->map[i];
-
- used += bitmap_weight(&bm->word, bm->depth);
- }
-
- return bt->depth - used;
+ return bt->sb.depth - sbitmap_weight(&bt->sb);
}
-static void bt_update_count(struct blk_mq_bitmap_tags *bt,
- unsigned int depth)
+static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
+ bool round_robin, int node)
{
- unsigned int tags_per_word = 1U << bt->bits_per_word;
- unsigned int map_depth = depth;
-
- if (depth) {
- int i;
-
- for (i = 0; i < bt->map_nr; i++) {
- bt->map[i].depth = min(map_depth, tags_per_word);
- map_depth -= bt->map[i].depth;
- }
- }
-
- bt->wake_cnt = BT_WAIT_BATCH;
- if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
- bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
-
- bt->depth = depth;
-}
-
-static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
- int node, bool reserved)
-{
- int i;
-
- bt->bits_per_word = ilog2(BITS_PER_LONG);
-
- /*
- * Depth can be zero for reserved tags, that's not a failure
- * condition.
- */
- if (depth) {
- unsigned int nr, tags_per_word;
-
- tags_per_word = (1 << bt->bits_per_word);
-
- /*
- * If the tag space is small, shrink the number of tags
- * per word so we spread over a few cachelines, at least.
- * If less than 4 tags, just forget about it, it's not
- * going to work optimally anyway.
- */
- if (depth >= 4) {
- while (tags_per_word * 4 > depth) {
- bt->bits_per_word--;
- tags_per_word = (1 << bt->bits_per_word);
- }
- }
-
- nr = ALIGN(depth, tags_per_word) / tags_per_word;
- bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
- GFP_KERNEL, node);
- if (!bt->map)
- return -ENOMEM;
-
- bt->map_nr = nr;
- }
-
- bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
- if (!bt->bs) {
- kfree(bt->map);
- bt->map = NULL;
- return -ENOMEM;
- }
-
- bt_update_count(bt, depth);
-
- for (i = 0; i < BT_WAIT_QUEUES; i++) {
- init_waitqueue_head(&bt->bs[i].wait);
- atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
- }
-
- return 0;
-}
-
-static void bt_free(struct blk_mq_bitmap_tags *bt)
-{
- kfree(bt->map);
- kfree(bt->bs);
+ return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
+ node);
}
static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
int node, int alloc_policy)
{
unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
+ bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
- tags->alloc_policy = alloc_policy;
-
- if (bt_alloc(&tags->bitmap_tags, depth, node, false))
- goto enomem;
- if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
- goto enomem;
+ if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
+ goto free_tags;
+ if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
+ node))
+ goto free_bitmap_tags;
return tags;
-enomem:
- bt_free(&tags->bitmap_tags);
+free_bitmap_tags:
+ sbitmap_queue_free(&tags->bitmap_tags);
+free_tags:
kfree(tags);
return NULL;
}
@@ -666,11 +398,6 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
if (!tags)
return NULL;
- if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) {
- kfree(tags);
- return NULL;
- }
-
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
@@ -679,19 +406,11 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
void blk_mq_free_tags(struct blk_mq_tags *tags)
{
- bt_free(&tags->bitmap_tags);
- bt_free(&tags->breserved_tags);
- free_cpumask_var(tags->cpumask);
+ sbitmap_queue_free(&tags->bitmap_tags);
+ sbitmap_queue_free(&tags->breserved_tags);
kfree(tags);
}
-void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
-{
- unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
-
- *tag = prandom_u32() % depth;
-}
-
int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
{
tdepth -= tags->nr_reserved_tags;
@@ -702,7 +421,8 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
* Don't need (or can't) update reserved tags here, they remain
* static and should never need resizing.
*/
- bt_update_count(&tags->bitmap_tags, tdepth);
+ sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
+
blk_mq_tag_wakeup_all(tags, false);
return 0;
}
@@ -726,7 +446,7 @@ u32 blk_mq_unique_tag(struct request *rq)
int hwq = 0;
if (q->mq_ops) {
- hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
hwq = hctx->queue_num;
}
@@ -746,7 +466,7 @@ ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
"bits_per_word=%u\n",
tags->nr_tags, tags->nr_reserved_tags,
- tags->bitmap_tags.bits_per_word);
+ 1U << tags->bitmap_tags.sb.shift);
free = bt_unused_tags(&tags->bitmap_tags);
res = bt_unused_tags(&tags->breserved_tags);
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index d468a79f2c4a..d1662734dc53 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -3,31 +3,6 @@
#include "blk-mq.h"
-enum {
- BT_WAIT_QUEUES = 8,
- BT_WAIT_BATCH = 8,
-};
-
-struct bt_wait_state {
- atomic_t wait_cnt;
- wait_queue_head_t wait;
-} ____cacheline_aligned_in_smp;
-
-#define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word)
-#define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1))
-
-struct blk_mq_bitmap_tags {
- unsigned int depth;
- unsigned int wake_cnt;
- unsigned int bits_per_word;
-
- unsigned int map_nr;
- struct blk_align_bitmap *map;
-
- atomic_t wake_index;
- struct bt_wait_state *bs;
-};
-
/*
* Tag address space map.
*/
@@ -37,14 +12,11 @@ struct blk_mq_tags {
atomic_t active_queues;
- struct blk_mq_bitmap_tags bitmap_tags;
- struct blk_mq_bitmap_tags breserved_tags;
+ struct sbitmap_queue bitmap_tags;
+ struct sbitmap_queue breserved_tags;
struct request **rqs;
struct list_head page_list;
-
- int alloc_policy;
- cpumask_var_t cpumask;
};
@@ -52,15 +24,23 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
-extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
+extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
+ unsigned int tag);
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
-extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv);
+static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
+ struct blk_mq_hw_ctx *hctx)
+{
+ if (!hctx)
+ return &bt->ws[0];
+ return sbq_wait_ptr(bt, &hctx->wait_index);
+}
+
enum {
BLK_MQ_TAG_CACHE_MIN = 1,
BLK_MQ_TAG_CACHE_MAX = 64,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c207fa9870eb..ddc2eed64771 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -22,6 +22,7 @@
#include <linux/sched/sysctl.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
+#include <linux/prefetch.h>
#include <trace/events/block.h>
@@ -33,49 +34,28 @@
static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
-static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
-
/*
* Check if any of the ctx's have pending work in this hardware queue
*/
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
- unsigned int i;
-
- for (i = 0; i < hctx->ctx_map.size; i++)
- if (hctx->ctx_map.map[i].word)
- return true;
-
- return false;
+ return sbitmap_any_bit_set(&hctx->ctx_map);
}
-static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_ctx *ctx)
-{
- return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
-}
-
-#define CTX_TO_BIT(hctx, ctx) \
- ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
-
/*
* Mark this ctx as having pending work in this hardware queue
*/
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
- struct blk_align_bitmap *bm = get_bm(hctx, ctx);
-
- if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
- set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
+ if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
+ sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
}
static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
- struct blk_align_bitmap *bm = get_bm(hctx, ctx);
-
- clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
+ sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
}
void blk_mq_freeze_queue_start(struct request_queue *q)
@@ -244,21 +224,11 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
return ERR_PTR(ret);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
-
rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
- if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) {
- __blk_mq_run_hw_queue(hctx);
- blk_mq_put_ctx(ctx);
-
- ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
- blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
- ctx = alloc_data.ctx;
- }
blk_mq_put_ctx(ctx);
+
if (!rq) {
blk_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
@@ -333,7 +303,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
rq->cmd_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- blk_mq_put_tag(hctx, tag, &ctx->last_tag);
+ blk_mq_put_tag(hctx, ctx, tag);
blk_queue_exit(q);
}
@@ -349,11 +319,7 @@ EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
void blk_mq_free_request(struct request *rq)
{
- struct blk_mq_hw_ctx *hctx;
- struct request_queue *q = rq->q;
-
- hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
- blk_mq_free_hctx_request(hctx, rq);
+ blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
}
EXPORT_SYMBOL_GPL(blk_mq_free_request);
@@ -513,7 +479,7 @@ EXPORT_SYMBOL(blk_mq_requeue_request);
static void blk_mq_requeue_work(struct work_struct *work)
{
struct request_queue *q =
- container_of(work, struct request_queue, requeue_work);
+ container_of(work, struct request_queue, requeue_work.work);
LIST_HEAD(rq_list);
struct request *rq, *next;
unsigned long flags;
@@ -568,16 +534,24 @@ EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_cancel_requeue_work(struct request_queue *q)
{
- cancel_work_sync(&q->requeue_work);
+ cancel_delayed_work_sync(&q->requeue_work);
}
EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
void blk_mq_kick_requeue_list(struct request_queue *q)
{
- kblockd_schedule_work(&q->requeue_work);
+ kblockd_schedule_delayed_work(&q->requeue_work, 0);
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
+void blk_mq_delay_kick_requeue_list(struct request_queue *q,
+ unsigned long msecs)
+{
+ kblockd_schedule_delayed_work(&q->requeue_work,
+ msecs_to_jiffies(msecs));
+}
+EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
+
void blk_mq_abort_requeue_list(struct request_queue *q)
{
unsigned long flags;
@@ -600,8 +574,10 @@ EXPORT_SYMBOL(blk_mq_abort_requeue_list);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
{
- if (tag < tags->nr_tags)
+ if (tag < tags->nr_tags) {
+ prefetch(tags->rqs[tag]);
return tags->rqs[tag];
+ }
return NULL;
}
@@ -756,38 +732,44 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
return false;
}
+struct flush_busy_ctx_data {
+ struct blk_mq_hw_ctx *hctx;
+ struct list_head *list;
+};
+
+static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
+{
+ struct flush_busy_ctx_data *flush_data = data;
+ struct blk_mq_hw_ctx *hctx = flush_data->hctx;
+ struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
+
+ sbitmap_clear_bit(sb, bitnr);
+ spin_lock(&ctx->lock);
+ list_splice_tail_init(&ctx->rq_list, flush_data->list);
+ spin_unlock(&ctx->lock);
+ return true;
+}
+
/*
* Process software queues that have been marked busy, splicing them
* to the for-dispatch
*/
static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
{
- struct blk_mq_ctx *ctx;
- int i;
-
- for (i = 0; i < hctx->ctx_map.size; i++) {
- struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
- unsigned int off, bit;
-
- if (!bm->word)
- continue;
+ struct flush_busy_ctx_data data = {
+ .hctx = hctx,
+ .list = list,
+ };
- bit = 0;
- off = i * hctx->ctx_map.bits_per_word;
- do {
- bit = find_next_bit(&bm->word, bm->depth, bit);
- if (bit >= bm->depth)
- break;
+ sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
+}
- ctx = hctx->ctxs[bit + off];
- clear_bit(bit, &bm->word);
- spin_lock(&ctx->lock);
- list_splice_tail_init(&ctx->rq_list, list);
- spin_unlock(&ctx->lock);
+static inline unsigned int queued_to_index(unsigned int queued)
+{
+ if (!queued)
+ return 0;
- bit++;
- } while (1);
- }
+ return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
}
/*
@@ -878,10 +860,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
dptr = &driver_list;
}
- if (!queued)
- hctx->dispatched[0]++;
- else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
- hctx->dispatched[ilog2(queued) + 1]++;
+ hctx->dispatched[queued_to_index(queued)]++;
/*
* Any items that need requeuing? Stuff them into hctx->dispatch,
@@ -937,7 +916,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
!blk_mq_hw_queue_mapped(hctx)))
return;
- if (!async) {
+ if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
int cpu = get_cpu();
if (cpumask_test_cpu(cpu, hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
@@ -948,8 +927,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
put_cpu();
}
- kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
- &hctx->run_work, 0);
+ kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
}
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -970,7 +948,7 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
- cancel_delayed_work(&hctx->run_work);
+ cancel_work(&hctx->run_work);
cancel_delayed_work(&hctx->delay_work);
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
@@ -1023,7 +1001,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
{
struct blk_mq_hw_ctx *hctx;
- hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
+ hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
__blk_mq_run_hw_queue(hctx);
}
@@ -1076,9 +1054,7 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx;
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head);
@@ -1095,12 +1071,10 @@ static void blk_mq_insert_requests(struct request_queue *q,
bool from_schedule)
{
- struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
trace_block_unplug(q, depth, !from_schedule);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
/*
* preemption doesn't flush plug list, so it's possible ctx->cpu is
* offline now
@@ -1234,26 +1208,14 @@ static struct request *blk_mq_map_request(struct request_queue *q,
blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
if (rw_is_sync(bio_op(bio), bio->bi_opf))
op_flags |= REQ_SYNC;
trace_block_getrq(q, bio, op);
- blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx);
+ blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
- if (unlikely(!rq)) {
- __blk_mq_run_hw_queue(hctx);
- blk_mq_put_ctx(ctx);
- trace_block_sleeprq(q, bio, op);
-
- ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
- blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
- ctx = alloc_data.ctx;
- hctx = alloc_data.hctx;
- }
hctx->queued++;
data->hctx = hctx;
@@ -1265,8 +1227,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
{
int ret;
struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
- rq->mq_ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
struct blk_mq_queue_data bd = {
.rq = rq,
.list = NULL,
@@ -1470,15 +1431,6 @@ run_queue:
return cookie;
}
-/*
- * Default mapping to a software queue, since we use one per CPU.
- */
-struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
-{
- return q->queue_hw_ctx[q->mq_map[cpu]];
-}
-EXPORT_SYMBOL(blk_mq_map_queue);
-
static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags, unsigned int hctx_idx)
{
@@ -1606,42 +1558,18 @@ fail:
return NULL;
}
-static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
-{
- kfree(bitmap->map);
-}
-
-static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
-{
- unsigned int bpw = 8, total, num_maps, i;
-
- bitmap->bits_per_word = bpw;
-
- num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
- bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
- GFP_KERNEL, node);
- if (!bitmap->map)
- return -ENOMEM;
-
- total = nr_cpu_ids;
- for (i = 0; i < num_maps; i++) {
- bitmap->map[i].depth = min(total, bitmap->bits_per_word);
- total -= bitmap->map[i].depth;
- }
-
- return 0;
-}
-
/*
* 'cpu' is going away. splice any existing rq_list entries from this
* software queue to the hw queue dispatch list, and ensure that it
* gets run.
*/
-static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
+static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
{
+ struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
+ hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
spin_lock(&ctx->lock);
@@ -1652,30 +1580,20 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
spin_unlock(&ctx->lock);
if (list_empty(&tmp))
- return NOTIFY_OK;
+ return 0;
spin_lock(&hctx->lock);
list_splice_tail_init(&tmp, &hctx->dispatch);
spin_unlock(&hctx->lock);
blk_mq_run_hw_queue(hctx, true);
- return NOTIFY_OK;
+ return 0;
}
-static int blk_mq_hctx_notify(void *data, unsigned long action,
- unsigned int cpu)
+static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
{
- struct blk_mq_hw_ctx *hctx = data;
-
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
- return blk_mq_hctx_cpu_offline(hctx, cpu);
-
- /*
- * In case of CPU online, tags may be reallocated
- * in blk_mq_map_swqueue() after mapping is updated.
- */
-
- return NOTIFY_OK;
+ cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
+ &hctx->cpuhp_dead);
}
/* hctx->ctxs will be freed in queue's release handler */
@@ -1695,9 +1613,9 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
- blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+ blk_mq_remove_cpuhp(hctx);
blk_free_flush_queue(hctx->fq);
- blk_mq_free_bitmap(&hctx->ctx_map);
+ sbitmap_free(&hctx->ctx_map);
}
static void blk_mq_exit_hw_queues(struct request_queue *q,
@@ -1734,7 +1652,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (node == NUMA_NO_NODE)
node = hctx->numa_node = set->numa_node;
- INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+ INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
@@ -1742,9 +1660,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->queue_num = hctx_idx;
hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
- blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
- blk_mq_hctx_notify, hctx);
- blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+ cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
hctx->tags = set->tags[hctx_idx];
@@ -1757,7 +1673,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (!hctx->ctxs)
goto unregister_cpu_notifier;
- if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
+ if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
+ node))
goto free_ctxs;
hctx->nr_ctx = 0;
@@ -1784,12 +1701,11 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
free_bitmap:
- blk_mq_free_bitmap(&hctx->ctx_map);
+ sbitmap_free(&hctx->ctx_map);
free_ctxs:
kfree(hctx->ctxs);
unregister_cpu_notifier:
- blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
-
+ blk_mq_remove_cpuhp(hctx);
return -1;
}
@@ -1812,7 +1728,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
if (!cpu_online(i))
continue;
- hctx = q->mq_ops->map_queue(q, i);
+ hctx = blk_mq_map_queue(q, i);
/*
* Set local node, IFF we have more than one hw queue. If
@@ -1850,7 +1766,7 @@ static void blk_mq_map_swqueue(struct request_queue *q,
continue;
ctx = per_cpu_ptr(q->queue_ctx, i);
- hctx = q->mq_ops->map_queue(q, i);
+ hctx = blk_mq_map_queue(q, i);
cpumask_set_cpu(i, hctx->cpumask);
ctx->index_hw = hctx->nr_ctx;
@@ -1860,8 +1776,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
mutex_unlock(&q->sysfs_lock);
queue_for_each_hw_ctx(q, hctx, i) {
- struct blk_mq_ctxmap *map = &hctx->ctx_map;
-
/*
* If no software queues are mapped to this hardware queue,
* disable it and free the request entries.
@@ -1881,13 +1795,12 @@ static void blk_mq_map_swqueue(struct request_queue *q,
hctx->tags = set->tags[i];
WARN_ON(!hctx->tags);
- cpumask_copy(hctx->tags->cpumask, hctx->cpumask);
/*
* Set the map size to the number of mapped software queues.
* This is more accurate and more efficient than looping
* over all possibly mapped software queues.
*/
- map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word);
+ sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
/*
* Initialize batch roundrobin counts
@@ -1975,7 +1888,6 @@ void blk_mq_release(struct request_queue *q)
kfree(hctx);
}
- kfree(q->mq_map);
q->mq_map = NULL;
kfree(q->queue_hw_ctx);
@@ -2074,9 +1986,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!q->queue_hw_ctx)
goto err_percpu;
- q->mq_map = blk_mq_make_queue_map(set);
- if (!q->mq_map)
- goto err_map;
+ q->mq_map = set->mq_map;
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
@@ -2094,7 +2004,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->sg_reserved_size = INT_MAX;
- INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
+ INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
INIT_LIST_HEAD(&q->requeue_list);
spin_lock_init(&q->requeue_lock);
@@ -2126,8 +2036,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
return q;
err_hctxs:
- kfree(q->mq_map);
-err_map:
kfree(q->queue_hw_ctx);
err_percpu:
free_percpu(q->queue_ctx);
@@ -2159,8 +2067,6 @@ static void blk_mq_queue_reinit(struct request_queue *q,
blk_mq_sysfs_unregister(q);
- blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
-
/*
* redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
* we should change hctx numa_node according to new topology (this
@@ -2172,50 +2078,18 @@ static void blk_mq_queue_reinit(struct request_queue *q,
blk_mq_sysfs_register(q);
}
-static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
- unsigned long action, void *hcpu)
+/*
+ * New online cpumask which is going to be set in this hotplug event.
+ * Declare this cpumasks as global as cpu-hotplug operation is invoked
+ * one-by-one and dynamically allocating this could result in a failure.
+ */
+static struct cpumask cpuhp_online_new;
+
+static void blk_mq_queue_reinit_work(void)
{
struct request_queue *q;
- int cpu = (unsigned long)hcpu;
- /*
- * New online cpumask which is going to be set in this hotplug event.
- * Declare this cpumasks as global as cpu-hotplug operation is invoked
- * one-by-one and dynamically allocating this could result in a failure.
- */
- static struct cpumask online_new;
-
- /*
- * Before hotadded cpu starts handling requests, new mappings must
- * be established. Otherwise, these requests in hw queue might
- * never be dispatched.
- *
- * For example, there is a single hw queue (hctx) and two CPU queues
- * (ctx0 for CPU0, and ctx1 for CPU1).
- *
- * Now CPU1 is just onlined and a request is inserted into
- * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
- * still zero.
- *
- * And then while running hw queue, flush_busy_ctxs() finds bit0 is
- * set in pending bitmap and tries to retrieve requests in
- * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0,
- * so the request in ctx1->rq_list is ignored.
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- cpumask_copy(&online_new, cpu_online_mask);
- break;
- case CPU_UP_PREPARE:
- cpumask_copy(&online_new, cpu_online_mask);
- cpumask_set_cpu(cpu, &online_new);
- break;
- default:
- return NOTIFY_OK;
- }
mutex_lock(&all_q_mutex);
-
/*
* We need to freeze and reinit all existing queues. Freezing
* involves synchronous wait for an RCU grace period and doing it
@@ -2236,13 +2110,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
}
list_for_each_entry(q, &all_q_list, all_q_node)
- blk_mq_queue_reinit(q, &online_new);
+ blk_mq_queue_reinit(q, &cpuhp_online_new);
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_unfreeze_queue(q);
mutex_unlock(&all_q_mutex);
- return NOTIFY_OK;
+}
+
+static int blk_mq_queue_reinit_dead(unsigned int cpu)
+{
+ cpumask_copy(&cpuhp_online_new, cpu_online_mask);
+ blk_mq_queue_reinit_work();
+ return 0;
+}
+
+/*
+ * Before hotadded cpu starts handling requests, new mappings must be
+ * established. Otherwise, these requests in hw queue might never be
+ * dispatched.
+ *
+ * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
+ * for CPU0, and ctx1 for CPU1).
+ *
+ * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
+ * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
+ *
+ * And then while running hw queue, flush_busy_ctxs() finds bit0 is set in
+ * pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
+ * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list
+ * is ignored.
+ */
+static int blk_mq_queue_reinit_prepare(unsigned int cpu)
+{
+ cpumask_copy(&cpuhp_online_new, cpu_online_mask);
+ cpumask_set_cpu(cpu, &cpuhp_online_new);
+ blk_mq_queue_reinit_work();
+ return 0;
}
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
@@ -2299,12 +2203,6 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
return 0;
}
-struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags)
-{
- return tags->cpumask;
-}
-EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
-
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
@@ -2313,6 +2211,8 @@ EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
+ int ret;
+
BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
if (!set->nr_hw_queues)
@@ -2322,7 +2222,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
return -EINVAL;
- if (!set->ops->queue_rq || !set->ops->map_queue)
+ if (!set->ops->queue_rq)
return -EINVAL;
if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
@@ -2351,17 +2251,35 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (!set->tags)
return -ENOMEM;
- if (blk_mq_alloc_rq_maps(set))
- goto enomem;
+ ret = -ENOMEM;
+ set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
+ GFP_KERNEL, set->numa_node);
+ if (!set->mq_map)
+ goto out_free_tags;
+
+ if (set->ops->map_queues)
+ ret = set->ops->map_queues(set);
+ else
+ ret = blk_mq_map_queues(set);
+ if (ret)
+ goto out_free_mq_map;
+
+ ret = blk_mq_alloc_rq_maps(set);
+ if (ret)
+ goto out_free_mq_map;
mutex_init(&set->tag_list_lock);
INIT_LIST_HEAD(&set->tag_list);
return 0;
-enomem:
+
+out_free_mq_map:
+ kfree(set->mq_map);
+ set->mq_map = NULL;
+out_free_tags:
kfree(set->tags);
set->tags = NULL;
- return -ENOMEM;
+ return ret;
}
EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@ -2374,6 +2292,9 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
blk_mq_free_rq_map(set, set->tags[i], i);
}
+ kfree(set->mq_map);
+ set->mq_map = NULL;
+
kfree(set->tags);
set->tags = NULL;
}
@@ -2444,10 +2365,12 @@ void blk_mq_enable_hotplug(void)
static int __init blk_mq_init(void)
{
- blk_mq_cpu_init();
-
- hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
+ cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
+ blk_mq_hctx_notify_dead);
+ cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
+ blk_mq_queue_reinit_prepare,
+ blk_mq_queue_reinit_dead);
return 0;
}
subsys_initcall(blk_mq_init);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 9087b11037b7..e5d25249028c 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -12,8 +12,6 @@ struct blk_mq_ctx {
unsigned int cpu;
unsigned int index_hw;
- unsigned int last_tag ____cacheline_aligned_in_smp;
-
/* incremented at dispatch time */
unsigned long rq_dispatched[2];
unsigned long rq_merged;
@@ -34,24 +32,21 @@ void blk_mq_wake_waiters(struct request_queue *q);
/*
* CPU hotplug helpers
*/
-struct blk_mq_cpu_notifier;
-void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
- int (*fn)(void *, unsigned long, unsigned int),
- void *data);
-void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
-void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
-void blk_mq_cpu_init(void);
void blk_mq_enable_hotplug(void);
void blk_mq_disable_hotplug(void);
/*
* CPU -> queue mappings
*/
-extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
-extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
- const struct cpumask *online_mask);
+int blk_mq_map_queues(struct blk_mq_tag_set *set);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
+ int cpu)
+{
+ return q->queue_hw_ctx[q->mq_map[cpu]];
+}
+
/*
* sysfs helpers
*/
@@ -63,15 +58,6 @@ extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
void blk_mq_release(struct request_queue *q);
-/*
- * Basic implementation of sparser bitmap, allowing the user to spread
- * the bits over more cachelines.
- */
-struct blk_align_bitmap {
- unsigned long word;
- unsigned long depth;
-} ____cacheline_aligned_in_smp;
-
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index f87a7e747d36..9cc8d7c5439a 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -704,7 +704,7 @@ int blk_register_queue(struct gendisk *disk)
kobject_uevent(&q->kobj, KOBJ_ADD);
if (q->mq_ops)
- blk_mq_register_disk(disk);
+ blk_mq_register_dev(dev, q);
if (!q->request_fn)
return 0;
@@ -729,7 +729,7 @@ void blk_unregister_queue(struct gendisk *disk)
return;
if (q->mq_ops)
- blk_mq_unregister_disk(disk);
+ blk_mq_unregister_dev(disk_to_dev(disk), q);
if (q->request_fn)
elv_unregister_queue(q);
diff --git a/block/blk.h b/block/blk.h
index c37492f5edaa..74444c49078f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -39,14 +39,9 @@ extern struct ida blk_queue_ida;
static inline struct blk_flush_queue *blk_get_flush_queue(
struct request_queue *q, struct blk_mq_ctx *ctx)
{
- struct blk_mq_hw_ctx *hctx;
-
- if (!q->mq_ops)
- return q->fq;
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
- return hctx->fq;
+ if (q->mq_ops)
+ return blk_mq_map_queue(q, ctx->cpu)->fq;
+ return q->fq;
}
static inline void __blk_get_queue(struct request_queue *q)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index cc2f6dbd4303..5e24d880306c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3042,7 +3042,6 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
if (ktime_get_ns() < rq->fifo_time)
rq = NULL;
- cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
return rq;
}
@@ -3420,6 +3419,9 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
unsigned int max_dispatch;
+ if (cfq_cfqq_must_dispatch(cfqq))
+ return true;
+
/*
* Drain async requests before we start sync IO
*/
@@ -3511,15 +3513,20 @@ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+ rq = cfq_check_fifo(cfqq);
+ if (rq)
+ cfq_mark_cfqq_must_dispatch(cfqq);
+
if (!cfq_may_dispatch(cfqd, cfqq))
return false;
/*
* follow expired path, else get first next available
*/
- rq = cfq_check_fifo(cfqq);
if (!rq)
rq = cfqq->next_rq;
+ else
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
/*
* insert request into driver dispatch list
@@ -3989,7 +3996,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* if the new request is sync, but the currently running queue is
* not, let the sync request have priority.
*/
- if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
+ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
return true;
/*
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 08b3ac68952b..f83de99d7d71 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -368,8 +368,6 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
-
- return tx;
} else {
struct page *p_src = P(blocks, disks);
struct page *q_src = Q(blocks, disks);
@@ -424,9 +422,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
submit->cb_param = cb_param_orig;
submit->flags = flags_orig;
async_tx_sync_epilog(submit);
-
- return NULL;
+ tx = NULL;
}
+ dmaengine_unmap_put(unmap);
+
+ return tx;
}
EXPORT_SYMBOL_GPL(async_syndrome_val);
diff --git a/drivers/Makefile b/drivers/Makefile
index 53abb4a5f736..f0afdfb3c7df 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -29,6 +29,8 @@ obj-$(CONFIG_SFI) += sfi/
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
obj-y += amba/
+
+obj-y += clk/
# Many drivers will want to use DMA so this has to be made available
# really early.
obj-$(CONFIG_DMADEVICES) += dma/
@@ -142,8 +144,6 @@ obj-$(CONFIG_VHOST) += vhost/
obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
-#common clk code
-obj-y += clk/
obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 33201d4de633..535e7828445a 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -77,6 +77,9 @@ config ACPI_DEBUGGER_USER
endif
+config ACPI_SPCR_TABLE
+ bool
+
config ACPI_SLEEP
bool
depends on SUSPEND || HIBERNATION
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 313f970888e4..9ed087853dee 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -82,6 +82,7 @@ obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
obj-$(CONFIG_ACPI_BGRT) += bgrt.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_acpi.o
+obj-$(CONFIG_ACPI_SPCR_TABLE) += spcr.o
obj-$(CONFIG_ACPI_DEBUGGER_USER) += acpi_dbg.o
# processor has its own "processor." module_param namespace
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index f096ab3cb54d..ec4f507b524f 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -938,7 +938,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
static struct pstore_info erst_info = {
.owner = THIS_MODULE,
.name = "erst",
- .flags = PSTORE_FLAGS_FRAGILE,
+ .flags = PSTORE_FLAGS_DMESG,
.open = erst_open_pstore,
.close = erst_close_pstore,
.read = erst_reader,
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 60746ef904e4..f0a029e68d3e 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -457,7 +457,7 @@ static void ghes_do_proc(struct ghes *ghes,
devfn = PCI_DEVFN(pcie_err->device_id.device,
pcie_err->device_id.function);
- aer_severity = cper_severity_to_aer(sev);
+ aer_severity = cper_severity_to_aer(gdata->error_severity);
/*
* If firmware reset the component to contain
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index cea52528aa18..2237d3f24f0e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -31,6 +31,7 @@
#include <linux/sched.h> /* need_resched() */
#include <linux/tick.h>
#include <linux/cpuidle.h>
+#include <linux/cpu.h>
#include <acpi/processor.h>
/*
@@ -115,7 +116,7 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
* Callers should disable interrupts before the call and enable
* interrupts after return.
*/
-static void acpi_safe_halt(void)
+static void __cpuidle acpi_safe_halt(void)
{
if (!tif_need_resched()) {
safe_halt();
@@ -645,7 +646,7 @@ static int acpi_idle_bm_check(void)
*
* Caller disables interrupt before call and enables interrupt after return.
*/
-static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
+static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
new file mode 100644
index 000000000000..e8d7bc7d4da8
--- /dev/null
+++ b/drivers/acpi/spcr.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2012, Intel Corporation
+ * Copyright (c) 2015, Red Hat, Inc.
+ * Copyright (c) 2015, 2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "ACPI: SPCR: " fmt
+
+#include <linux/acpi.h>
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/serial_core.h>
+
+/**
+ * parse_spcr() - parse ACPI SPCR table and add preferred console
+ *
+ * @earlycon: set up earlycon for the console specified by the table
+ *
+ * For the architectures with support for ACPI, CONFIG_ACPI_SPCR_TABLE may be
+ * defined to parse ACPI SPCR table. As a result of the parsing preferred
+ * console is registered and if @earlycon is true, earlycon is set up.
+ *
+ * When CONFIG_ACPI_SPCR_TABLE is defined, this function should be called
+ * from arch inintialization code as soon as the DT/ACPI decision is made.
+ *
+ */
+int __init parse_spcr(bool earlycon)
+{
+ static char opts[64];
+ struct acpi_table_spcr *table;
+ acpi_size table_size;
+ acpi_status status;
+ char *uart;
+ char *iotype;
+ int baud_rate;
+ int err;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ status = acpi_get_table_with_size(ACPI_SIG_SPCR, 0,
+ (struct acpi_table_header **)&table,
+ &table_size);
+
+ if (ACPI_FAILURE(status))
+ return -ENOENT;
+
+ if (table->header.revision < 2) {
+ err = -ENOENT;
+ pr_err("wrong table version\n");
+ goto done;
+ }
+
+ iotype = table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY ?
+ "mmio" : "io";
+
+ switch (table->interface_type) {
+ case ACPI_DBG2_ARM_SBSA_32BIT:
+ iotype = "mmio32";
+ /* fall through */
+ case ACPI_DBG2_ARM_PL011:
+ case ACPI_DBG2_ARM_SBSA_GENERIC:
+ case ACPI_DBG2_BCM2835:
+ uart = "pl011";
+ break;
+ case ACPI_DBG2_16550_COMPATIBLE:
+ case ACPI_DBG2_16550_SUBSET:
+ uart = "uart";
+ break;
+ default:
+ err = -ENOENT;
+ goto done;
+ }
+
+ switch (table->baud_rate) {
+ case 3:
+ baud_rate = 9600;
+ break;
+ case 4:
+ baud_rate = 19200;
+ break;
+ case 6:
+ baud_rate = 57600;
+ break;
+ case 7:
+ baud_rate = 115200;
+ break;
+ default:
+ err = -ENOENT;
+ goto done;
+ }
+
+ snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype,
+ table->serial_port.address, baud_rate);
+
+ pr_info("console: %s\n", opts);
+
+ if (earlycon)
+ setup_earlycon(opts);
+
+ err = add_preferred_console(uart, 0, opts + strlen(uart) + 1);
+
+done:
+ early_acpi_os_unmap_memory((void __iomem *)table, table_size);
+ return err;
+}
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index a5b5c87e2114..a56fa2a1e9aa 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -19,6 +19,7 @@
#include <linux/amba/bus.h>
#include <linux/sizes.h>
#include <linux/limits.h>
+#include <linux/clk/clk-conf.h>
#include <asm/irq.h>
@@ -237,6 +238,10 @@ static int amba_probe(struct device *dev)
int ret;
do {
+ ret = of_clk_set_defaults(dev->of_node, false);
+ if (ret < 0)
+ break;
+
ret = dev_pm_domain_attach(dev, true);
if (ret == -EPROBE_DEFER)
break;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 16288e777ec3..562af94bec35 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -59,7 +59,6 @@ static struct dentry *binder_debugfs_dir_entry_proc;
static struct binder_node *binder_context_mgr_node;
static kuid_t binder_context_mgr_uid = INVALID_UID;
static int binder_last_id;
-static struct workqueue_struct *binder_deferred_workqueue;
#define BINDER_DEBUG_ENTRY(name) \
static int binder_##name##_open(struct inode *inode, struct file *file) \
@@ -3227,7 +3226,7 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
if (hlist_unhashed(&proc->deferred_work_node)) {
hlist_add_head(&proc->deferred_work_node,
&binder_deferred_list);
- queue_work(binder_deferred_workqueue, &binder_deferred_work);
+ schedule_work(&binder_deferred_work);
}
mutex_unlock(&binder_deferred_lock);
}
@@ -3679,10 +3678,6 @@ static int __init binder_init(void)
{
int ret;
- binder_deferred_workqueue = create_singlethread_workqueue("binder");
- if (!binder_deferred_workqueue)
- return -ENOMEM;
-
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 6339efd32697..f2aaf9e32a36 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1845,8 +1845,9 @@ static int eni_start(struct atm_dev *dev)
/* initialize memory management */
buffer_mem = eni_dev->mem - (buf - eni_dev->ram);
eni_dev->free_list_size = buffer_mem/MID_MIN_BUF_SIZE/2;
- eni_dev->free_list = kmalloc(
- sizeof(struct eni_free)*(eni_dev->free_list_size+1),GFP_KERNEL);
+ eni_dev->free_list = kmalloc_array(eni_dev->free_list_size + 1,
+ sizeof(*eni_dev->free_list),
+ GFP_KERNEL);
if (!eni_dev->free_list) {
printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n",
dev->number);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 75dde903b238..81aaa505862c 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2489,7 +2489,7 @@ static int fore200e_load_and_start_fw(struct fore200e *fore200e)
{
const struct firmware *firmware;
struct device *device;
- struct fw_header *fw_header;
+ const struct fw_header *fw_header;
const __le32 *fw_data;
u32 fw_size;
u32 __iomem *load_addr;
@@ -2511,9 +2511,9 @@ static int fore200e_load_and_start_fw(struct fore200e *fore200e)
return err;
}
- fw_data = (__le32 *) firmware->data;
+ fw_data = (const __le32 *)firmware->data;
fw_size = firmware->size / sizeof(u32);
- fw_header = (struct fw_header *) firmware->data;
+ fw_header = (const struct fw_header *)firmware->data;
load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 0f5cb37636bc..31b513a23ae0 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -779,8 +779,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
G0_RBPS_BS + (group * 32));
/* bitmap table */
- he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
- * sizeof(unsigned long), GFP_KERNEL);
+ he_dev->rbpl_table = kmalloc_array(BITS_TO_LONGS(RBPL_TABLE_SIZE),
+ sizeof(*he_dev->rbpl_table),
+ GFP_KERNEL);
if (!he_dev->rbpl_table) {
hprintk("unable to allocate rbpl bitmap table\n");
return -ENOMEM;
@@ -788,8 +789,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
/* rbpl_virt 64-bit pointers */
- he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
- * sizeof(struct he_buff *), GFP_KERNEL);
+ he_dev->rbpl_virt = kmalloc_array(RBPL_TABLE_SIZE,
+ sizeof(*he_dev->rbpl_virt),
+ GFP_KERNEL);
if (!he_dev->rbpl_virt) {
hprintk("unable to allocate rbpl virt table\n");
goto out_free_rbpl_table;
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 809dd1e02091..b2756765950e 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1885,9 +1885,9 @@ static int open_tx(struct atm_vcc *vcc)
if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
return ret;
}
- }
- else
- printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
+ } else {
+ printk("iadev: Non UBR, ABR and CBR traffic not supported\n");
+ }
iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
IF_EVENT(printk("ia open_tx returning \n");)
@@ -1975,7 +1975,9 @@ static int tx_init(struct atm_dev *dev)
buf_desc_ptr++;
tx_pkt_start += iadev->tx_buf_sz;
}
- iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
+ iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
+ sizeof(*iadev->tx_buf),
+ GFP_KERNEL);
if (!iadev->tx_buf) {
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
goto err_free_dle;
@@ -1995,8 +1997,9 @@ static int tx_init(struct atm_dev *dev)
sizeof(*cpcs),
DMA_TO_DEVICE);
}
- iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
- sizeof(struct desc_tbl_t), GFP_KERNEL);
+ iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
+ sizeof(*iadev->desc_tbl),
+ GFP_KERNEL);
if (!iadev->desc_tbl) {
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
goto err_free_all_tx_bufs;
@@ -2124,7 +2127,9 @@ static int tx_init(struct atm_dev *dev)
memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
- iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
+ iadev->testTable = kmalloc_array(iadev->num_vc,
+ sizeof(*iadev->testTable),
+ GFP_KERNEL);
if (!iadev->testTable) {
printk("Get freepage failed\n");
goto err_free_desc_tbl;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 700ed15c2362..c7296b583787 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -370,7 +370,8 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
return error;
}
- if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) {
+ card = kmalloc(sizeof(*card), GFP_KERNEL);
+ if (!card) {
printk
("nicstar%d: can't allocate memory for device structure.\n",
i);
@@ -611,7 +612,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
for (j = 0; j < card->rct_size; j++)
ns_write_sram(card, j * 4, u32d, 4);
- memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
+ memset(card->vcmap, 0, sizeof(card->vcmap));
for (j = 0; j < NS_FRSCD_NUM; j++)
card->scd2vc[j] = NULL;
@@ -862,7 +863,7 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd)
if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
return NULL;
- scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
+ scq = kmalloc(sizeof(*scq), GFP_KERNEL);
if (!scq)
return NULL;
scq->org = dma_alloc_coherent(&card->pcidev->dev,
@@ -871,8 +872,9 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd)
kfree(scq);
return NULL;
}
- scq->skb = kmalloc(sizeof(struct sk_buff *) *
- (size / NS_SCQE_SIZE), GFP_KERNEL);
+ scq->skb = kmalloc_array(size / NS_SCQE_SIZE,
+ sizeof(*scq->skb),
+ GFP_KERNEL);
if (!scq->skb) {
dma_free_coherent(&card->pcidev->dev,
2 * size, scq->org, scq->dma);
@@ -2021,7 +2023,8 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
cell = skb->data;
for (i = ns_rsqe_cellcount(rsqe); i; i--) {
- if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) {
+ sb = dev_alloc_skb(NS_SMSKBSIZE);
+ if (!sb) {
printk
("nicstar%d: Can't allocate buffers for aal0.\n",
card->index);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index cecfb943762f..d3dc95484161 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -598,12 +598,13 @@ static void close_rx(struct atm_vcc *vcc)
static int start_rx(struct atm_dev *dev)
{
struct zatm_dev *zatm_dev;
- int size,i;
+ int i;
-DPRINTK("start_rx\n");
+ DPRINTK("start_rx\n");
zatm_dev = ZATM_DEV(dev);
- size = sizeof(struct atm_vcc *)*zatm_dev->chans;
- zatm_dev->rx_map = kzalloc(size,GFP_KERNEL);
+ zatm_dev->rx_map = kcalloc(zatm_dev->chans,
+ sizeof(*zatm_dev->rx_map),
+ GFP_KERNEL);
if (!zatm_dev->rx_map) return -ENOMEM;
/* set VPI/VCI split (use all VCIs and give what's left to VPIs) */
zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR);
@@ -998,8 +999,9 @@ static int start_tx(struct atm_dev *dev)
DPRINTK("start_tx\n");
zatm_dev = ZATM_DEV(dev);
- zatm_dev->tx_map = kmalloc(sizeof(struct atm_vcc *)*
- zatm_dev->chans,GFP_KERNEL);
+ zatm_dev->tx_map = kmalloc_array(zatm_dev->chans,
+ sizeof(*zatm_dev->tx_map),
+ GFP_KERNEL);
if (!zatm_dev->tx_map) return -ENOMEM;
zatm_dev->tx_bw = ATM_OC3_PCR;
zatm_dev->free_shapers = (1 << NR_SHAPERS)-1;
@@ -1398,7 +1400,7 @@ static int zatm_open(struct atm_vcc *vcc)
DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi,
vcc->vci);
if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) {
- zatm_vcc = kmalloc(sizeof(struct zatm_vcc),GFP_KERNEL);
+ zatm_vcc = kmalloc(sizeof(*zatm_vcc), GFP_KERNEL);
if (!zatm_vcc) {
clear_bit(ATM_VF_ADDR,&vcc->flags);
return -ENOMEM;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 98504ec99c7d..fdf44cac08e6 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -212,6 +212,16 @@ config DEBUG_DEVRES
If you are unsure about this, Say N here.
+config DEBUG_TEST_DRIVER_REMOVE
+ bool "Test driver remove calls during probe"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here if you want the Driver core to test driver remove functions
+ by calling probe, remove, probe. This tests the remove path without
+ having to unbind the driver or unload the driver module.
+
+ If you are unsure about this, say N here.
+
config SYS_HYPERVISOR
bool
default n
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 2ba4cac080c5..95e3ef82f3b7 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -243,7 +243,7 @@ attribute_container_remove_device(struct device *dev,
* @dev: The generic device to run the trigger for
* @fn the function to execute for each classdev.
*
- * This funcion is for executing a trigger when you need to know both
+ * This function is for executing a trigger when you need to know both
* the container and the classdev. If you only care about the
* container, then use attribute_container_trigger() instead.
*/
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 70c5be5b03a7..ce057a568673 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -836,11 +836,29 @@ static struct kobject *get_device_parent(struct device *dev,
return NULL;
}
+static inline bool live_in_glue_dir(struct kobject *kobj,
+ struct device *dev)
+{
+ if (!kobj || !dev->class ||
+ kobj->kset != &dev->class->p->glue_dirs)
+ return false;
+ return true;
+}
+
+static inline struct kobject *get_glue_dir(struct device *dev)
+{
+ return dev->kobj.parent;
+}
+
+/*
+ * make sure cleaning up dir as the last step, we need to make
+ * sure .release handler of kobject is run with holding the
+ * global lock
+ */
static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
{
/* see if we live in a "glue" directory */
- if (!glue_dir || !dev->class ||
- glue_dir->kset != &dev->class->p->glue_dirs)
+ if (!live_in_glue_dir(glue_dir, dev))
return;
mutex_lock(&gdp_mutex);
@@ -848,11 +866,6 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
mutex_unlock(&gdp_mutex);
}
-static void cleanup_device_parent(struct device *dev)
-{
- cleanup_glue_dir(dev, dev->kobj.parent);
-}
-
static int device_add_class_symlinks(struct device *dev)
{
struct device_node *of_node = dev_of_node(dev);
@@ -1028,6 +1041,7 @@ int device_add(struct device *dev)
struct kobject *kobj;
struct class_interface *class_intf;
int error = -EINVAL;
+ struct kobject *glue_dir = NULL;
dev = get_device(dev);
if (!dev)
@@ -1072,8 +1086,10 @@ int device_add(struct device *dev)
/* first, register with generic layer. */
/* we require the name to be set before, and pass NULL */
error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
- if (error)
+ if (error) {
+ glue_dir = get_glue_dir(dev);
goto Error;
+ }
/* notify platform of device entry */
if (platform_notify)
@@ -1154,9 +1170,10 @@ done:
device_remove_file(dev, &dev_attr_uevent);
attrError:
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
+ glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
Error:
- cleanup_device_parent(dev);
+ cleanup_glue_dir(dev, glue_dir);
put_device(parent);
name_error:
kfree(dev->p);
@@ -1232,6 +1249,7 @@ EXPORT_SYMBOL_GPL(put_device);
void device_del(struct device *dev)
{
struct device *parent = dev->parent;
+ struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
/* Notify clients of device removal. This call must come
@@ -1277,8 +1295,9 @@ void device_del(struct device *dev)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_REMOVED_DEVICE, dev);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
- cleanup_device_parent(dev);
+ glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
+ cleanup_glue_dir(dev, glue_dir);
put_device(parent);
}
EXPORT_SYMBOL_GPL(device_del);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 691eeea2f19a..4c28e1a09786 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -371,12 +371,13 @@ int register_cpu(struct cpu *cpu, int num)
if (cpu->hotpluggable)
cpu->dev.groups = hotplugable_cpu_attr_groups;
error = device_register(&cpu->dev);
- if (!error)
- per_cpu(cpu_sys_devices, num) = &cpu->dev;
- if (!error)
- register_cpu_under_node(num, cpu_to_node(num));
+ if (error)
+ return error;
- return error;
+ per_cpu(cpu_sys_devices, num) = &cpu->dev;
+ register_cpu_under_node(num, cpu_to_node(num));
+
+ return 0;
}
struct device *get_cpu_device(unsigned cpu)
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 16688f50729c..d22a7260f42b 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -51,7 +51,6 @@
static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
-static struct workqueue_struct *deferred_wq;
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
/*
@@ -175,7 +174,7 @@ static void driver_deferred_probe_trigger(void)
* Kick the re-probe thread. It may already be scheduled, but it is
* safe to kick it again.
*/
- queue_work(deferred_wq, &deferred_probe_work);
+ schedule_work(&deferred_probe_work);
}
/**
@@ -211,14 +210,10 @@ void device_unblock_probing(void)
*/
static int deferred_probe_initcall(void)
{
- deferred_wq = create_singlethread_workqueue("deferwq");
- if (WARN_ON(!deferred_wq))
- return -ENOMEM;
-
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
/* Sort as many dependencies as possible before exiting initcalls */
- flush_workqueue(deferred_wq);
+ flush_work(&deferred_probe_work);
return 0;
}
late_initcall(deferred_probe_initcall);
@@ -329,6 +324,7 @@ static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = -EPROBE_DEFER;
int local_trigger_count = atomic_read(&deferred_trigger_count);
+ bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE);
if (defer_all_probes) {
/*
@@ -346,6 +342,7 @@ static int really_probe(struct device *dev, struct device_driver *drv)
drv->bus->name, __func__, drv->name, dev_name(dev));
WARN_ON(!list_empty(&dev->devres_head));
+re_probe:
dev->driver = drv;
/* If using pinctrl, bind pins now before probing */
@@ -383,6 +380,25 @@ static int really_probe(struct device *dev, struct device_driver *drv)
goto probe_failed;
}
+ if (test_remove) {
+ test_remove = false;
+
+ if (dev->bus && dev->bus->remove)
+ dev->bus->remove(dev);
+ else if (drv->remove)
+ drv->remove(dev);
+
+ devres_release_all(dev);
+ driver_sysfs_remove(dev);
+ dev->driver = NULL;
+ dev_set_drvdata(dev, NULL);
+ if (dev->pm_domain && dev->pm_domain->dismiss)
+ dev->pm_domain->dismiss(dev);
+ pm_runtime_reinit(dev);
+
+ goto re_probe;
+ }
+
pinctrl_init_done(dev);
if (dev->pm_domain && dev->pm_domain->sync)
@@ -460,8 +476,7 @@ int driver_probe_done(void)
void wait_for_device_probe(void)
{
/* wait for the deferred probe workqueue to finish */
- if (driver_deferred_probe_enable)
- flush_workqueue(deferred_wq);
+ flush_work(&deferred_probe_work);
/* wait for the known devices to complete their probing */
wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bdf28f7dd5e8..640a7e63c453 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -165,6 +165,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
int order = get_order(size);
unsigned long flags;
int pageno;
+ int dma_memory_map;
if (!dev)
return 0;
@@ -187,11 +188,12 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
*/
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
- if (mem->flags & DMA_MEMORY_MAP)
+ dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
+ spin_unlock_irqrestore(&mem->spinlock, flags);
+ if (dma_memory_map)
memset(*ret, 0, size);
else
memset_io(*ret, 0, size);
- spin_unlock_irqrestore(&mem->spinlock, flags);
return 1;
@@ -261,8 +263,8 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
- int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- int count = size >> PAGE_SHIFT;
+ int user_count = vma_pages(vma);
+ int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
*ret = -ENXIO;
if (off < count && user_count <= count - off) {
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index d799662f19eb..8f8b68c80986 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -198,10 +198,13 @@ int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
flags);
- if (rc == 0)
+ if (rc) {
devres_add(dev, res);
- else
+ rc = 0;
+ } else {
devres_free(res);
+ rc = -ENOMEM;
+ }
return rc;
}
@@ -247,7 +250,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
{
int ret = -ENXIO;
#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP)
- unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
unsigned long off = vma->vm_pgoff;
@@ -334,7 +337,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
return;
}
- unmap_kernel_range((unsigned long)cpu_addr, size);
+ unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
vunmap(cpu_addr);
}
#endif
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index dc75de9059cd..62c63c0c5c22 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -361,8 +361,11 @@ store_mem_state(struct device *dev,
err:
unlock_device_hotplug();
- if (ret)
+ if (ret < 0)
return ret;
+ if (ret)
+ return -EINVAL;
+
return count;
}
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
index 076297592754..5917b4b5fb99 100644
--- a/drivers/base/pinctrl.c
+++ b/drivers/base/pinctrl.c
@@ -91,9 +91,13 @@ cleanup_alloc:
devm_kfree(dev, dev->pins);
dev->pins = NULL;
- /* Only return deferrals */
- if (ret != -EPROBE_DEFER)
- ret = 0;
+ /* Return deferrals */
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ /* Return serious errors */
+ if (ret == -EINVAL)
+ return ret;
+ /* We ignore errors like -ENOENT meaning no pinctrl state */
- return ret;
+ return 0;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 6482d47deb50..c4af00385502 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -97,7 +97,7 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
int ret;
ret = of_irq_get(dev->dev.of_node, num);
- if (ret >= 0 || ret == -EPROBE_DEFER)
+ if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
}
@@ -108,9 +108,14 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
* IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
* settings.
*/
- if (r && r->flags & IORESOURCE_BITS)
- irqd_set_trigger_type(irq_get_irq_data(r->start),
- r->flags & IORESOURCE_BITS);
+ if (r && r->flags & IORESOURCE_BITS) {
+ struct irq_data *irqd;
+
+ irqd = irq_get_irq_data(r->start);
+ if (!irqd)
+ return -ENXIO;
+ irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
+ }
return r ? r->start : -ENXIO;
#endif
@@ -175,7 +180,7 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
int ret;
ret = of_irq_get_byname(dev->dev.of_node, name);
- if (ret >= 0 || ret == -EPROBE_DEFER)
+ if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
}
@@ -434,6 +439,7 @@ void platform_device_del(struct platform_device *pdev)
int i;
if (pdev) {
+ device_remove_properties(&pdev->dev);
device_del(&pdev->dev);
if (pdev->id_auto) {
@@ -446,8 +452,6 @@ void platform_device_del(struct platform_device *pdev)
if (r->parent)
release_resource(r);
}
-
- device_remove_properties(&pdev->dev);
}
}
EXPORT_SYMBOL_GPL(platform_device_del);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index a0380338946a..2a4435d76028 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -105,8 +105,8 @@ struct regmap {
bool defer_caching;
- u8 read_flag_mask;
- u8 write_flag_mask;
+ unsigned long read_flag_mask;
+ unsigned long write_flag_mask;
/* number of bits to (left) shift the reg value when formatting*/
int reg_shift;
@@ -173,6 +173,7 @@ struct regcache_ops {
int (*drop)(struct regmap *map, unsigned int min, unsigned int max);
};
+bool regmap_cached(struct regmap *map, unsigned int reg);
bool regmap_writeable(struct regmap *map, unsigned int reg);
bool regmap_readable(struct regmap *map, unsigned int reg);
bool regmap_volatile(struct regmap *map, unsigned int reg);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 1ee3d40861c7..36ce3511c733 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -77,6 +77,17 @@ static void regmap_debugfs_free_dump_cache(struct regmap *map)
}
}
+static bool regmap_printable(struct regmap *map, unsigned int reg)
+{
+ if (regmap_precious(map, reg))
+ return false;
+
+ if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
+ return false;
+
+ return true;
+}
+
/*
* Work out where the start offset maps into register numbers, bearing
* in mind that we suppress hidden registers.
@@ -105,8 +116,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
if (list_empty(&map->debugfs_off_cache)) {
for (; i <= map->max_register; i += map->reg_stride) {
/* Skip unprinted registers, closing off cache entry */
- if (!regmap_readable(map, i) ||
- regmap_precious(map, i)) {
+ if (!regmap_printable(map, i)) {
if (c) {
c->max = p - 1;
c->max_reg = i - map->reg_stride;
@@ -204,7 +214,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
for (i = start_reg; i <= to; i += map->reg_stride) {
- if (!regmap_readable(map, i))
+ if (!regmap_readable(map, i) && !regmap_cached(map, i))
continue;
if (regmap_precious(map, i))
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index e964d068874d..ae63bb0875ea 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -93,6 +93,29 @@ bool regmap_writeable(struct regmap *map, unsigned int reg)
return true;
}
+bool regmap_cached(struct regmap *map, unsigned int reg)
+{
+ int ret;
+ unsigned int val;
+
+ if (map->cache == REGCACHE_NONE)
+ return false;
+
+ if (!map->cache_ops)
+ return false;
+
+ if (map->max_register && reg > map->max_register)
+ return false;
+
+ map->lock(map->lock_arg);
+ ret = regcache_read(map, reg, &val);
+ map->unlock(map->lock_arg);
+ if (ret)
+ return false;
+
+ return true;
+}
+
bool regmap_readable(struct regmap *map, unsigned int reg)
{
if (!map->reg_read)
@@ -749,6 +772,9 @@ struct regmap *__regmap_init(struct device *dev,
case REGMAP_ENDIAN_BIG:
map->format.format_reg = regmap_format_16_be;
break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_reg = regmap_format_16_le;
+ break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_reg = regmap_format_16_native;
break;
@@ -768,6 +794,9 @@ struct regmap *__regmap_init(struct device *dev,
case REGMAP_ENDIAN_BIG:
map->format.format_reg = regmap_format_32_be;
break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_reg = regmap_format_32_le;
+ break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_reg = regmap_format_32_native;
break;
@@ -782,6 +811,9 @@ struct regmap *__regmap_init(struct device *dev,
case REGMAP_ENDIAN_BIG:
map->format.format_reg = regmap_format_64_be;
break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_reg = regmap_format_64_le;
+ break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_reg = regmap_format_64_native;
break;
@@ -1296,12 +1328,26 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
return 0;
}
+static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
+ unsigned long mask)
+{
+ u8 *buf;
+ int i;
+
+ if (!mask || !map->work_buf)
+ return;
+
+ buf = map->work_buf;
+
+ for (i = 0; i < max_bytes; i++)
+ buf[i] |= (mask >> (8 * i)) & 0xff;
+}
+
int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
struct regmap_range_node *range;
unsigned long flags;
- u8 *u8 = map->work_buf;
void *work_val = map->work_buf + map->format.reg_bytes +
map->format.pad_bytes;
void *buf;
@@ -1370,8 +1416,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
}
map->format.format_reg(map->work_buf, reg, map->reg_shift);
-
- u8[0] |= map->write_flag_mask;
+ regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
+ map->write_flag_mask);
/*
* Essentially all I/O mechanisms will be faster with a single
@@ -2251,7 +2297,6 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int val_len)
{
struct regmap_range_node *range;
- u8 *u8 = map->work_buf;
int ret;
WARN_ON(!map->bus);
@@ -2268,15 +2313,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
}
map->format.format_reg(map->work_buf, reg, map->reg_shift);
-
- /*
- * Some buses or devices flag reads by setting the high bits in the
- * register address; since it's always the high bits for all
- * current formats we can do this here rather than in
- * formatting. This may break if we get interesting formats.
- */
- u8[0] |= map->read_flag_mask;
-
+ regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
+ map->read_flag_mask);
trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
ret = map->bus->read(map->bus_context, map->work_buf,
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 75b98aad6faf..b63f23e6ad61 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -6,7 +6,6 @@
*/
#include <linux/sysfs.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/slab.h>
@@ -160,11 +159,3 @@ static int __init soc_bus_register(void)
return bus_register(&soc_bus_type);
}
core_initcall(soc_bus_register);
-
-static void __exit soc_bus_unregister(void)
-{
- ida_destroy(&soc_ida);
-
- bus_unregister(&soc_bus_type);
-}
-module_exit(soc_bus_unregister);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index 921ce1834673..b4f6520e74f0 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -36,12 +36,31 @@ u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc)
}
EXPORT_SYMBOL_GPL(bcma_chipco_get_alp_clock);
+static bool bcma_core_cc_has_pmu_watchdog(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+
+ if (cc->capabilities & BCMA_CC_CAP_PMU) {
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573) {
+ WARN(bus->chipinfo.rev <= 1, "No watchdog available\n");
+ /* 53573B0 and 53573B1 have bugged PMU watchdog. It can
+ * be enabled but timer can't be bumped. Use CC one
+ * instead.
+ */
+ return false;
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
static u32 bcma_chipco_watchdog_get_max_timer(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
u32 nb;
- if (cc->capabilities & BCMA_CC_CAP_PMU) {
+ if (bcma_core_cc_has_pmu_watchdog(cc)) {
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
nb = 32;
else if (cc->core->id.rev < 26)
@@ -95,9 +114,16 @@ static int bcma_chipco_watchdog_ticks_per_ms(struct bcma_drv_cc *cc)
int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
{
+ struct bcma_bus *bus = cc->core->bus;
struct bcm47xx_wdt wdt = {};
struct platform_device *pdev;
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573 &&
+ bus->chipinfo.rev <= 1) {
+ pr_debug("No watchdog on 53573A0 / 53573A1\n");
+ return 0;
+ }
+
wdt.driver_data = cc;
wdt.timer_set = bcma_chipco_watchdog_timer_set_wdt;
wdt.timer_set_ms = bcma_chipco_watchdog_timer_set_ms_wdt;
@@ -105,7 +131,7 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
pdev = platform_device_register_data(NULL, "bcm47xx-wdt",
- cc->core->bus->num, &wdt,
+ bus->num, &wdt,
sizeof(wdt));
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -217,7 +243,7 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
u32 maxt;
maxt = bcma_chipco_watchdog_get_max_timer(cc);
- if (cc->capabilities & BCMA_CC_CAP_PMU) {
+ if (bcma_core_cc_has_pmu_watchdog(cc)) {
if (ticks == 1)
ticks = 2;
else if (ticks > maxt)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 1f635471f318..2c1798e38abd 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -209,6 +209,8 @@ static void bcma_of_fill_device(struct platform_device *parent,
core->dev.of_node = node;
core->irq = bcma_of_get_irq(parent, core, 0);
+
+ of_dma_configure(&core->dev, node);
}
unsigned int bcma_core_irq(struct bcma_device *core, int num)
@@ -248,12 +250,12 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
core->irq = bus->host_pci->irq;
break;
case BCMA_HOSTTYPE_SOC:
- core->dev.dma_mask = &core->dev.coherent_dma_mask;
- if (bus->host_pdev) {
+ if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
core->dma_dev = &bus->host_pdev->dev;
core->dev.parent = &bus->host_pdev->dev;
bcma_of_fill_device(bus->host_pdev, core);
} else {
+ core->dev.dma_mask = &core->dev.coherent_dma_mask;
core->dma_dev = &core->dev;
}
break;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c9f2107f7095..cbdb3b162718 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1703,7 +1703,6 @@ static int loop_init_request(void *data, struct request *rq,
static struct blk_mq_ops loop_mq_ops = {
.queue_rq = loop_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_request = loop_init_request,
};
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 2aca98e8e427..3cfd879267b2 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3686,7 +3686,7 @@ static int mtip_block_open(struct block_device *dev, fmode_t mode)
return -ENODEV;
}
-void mtip_block_release(struct gendisk *disk, fmode_t mode)
+static void mtip_block_release(struct gendisk *disk, fmode_t mode)
{
}
@@ -3895,7 +3895,6 @@ exit_handler:
static struct blk_mq_ops mtip_mq_ops = {
.queue_rq = mtip_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_request = mtip_init_cmd,
.exit_request = mtip_free_cmd,
.complete = mtip_softirq_done_fn,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a9e398019f38..ba405b55329f 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -34,33 +34,29 @@
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/debugfs.h>
+#include <linux/blk-mq.h>
#include <asm/uaccess.h>
#include <asm/types.h>
#include <linux/nbd.h>
+#define NBD_TIMEDOUT 0
+#define NBD_DISCONNECT_REQUESTED 1
+
struct nbd_device {
u32 flags;
+ unsigned long runtime_flags;
struct socket * sock; /* If == NULL, device is not ready, yet */
int magic;
- spinlock_t queue_lock;
- struct list_head queue_head; /* Requests waiting result */
- struct request *active_req;
- wait_queue_head_t active_wq;
- struct list_head waiting_queue; /* Requests to be sent */
- wait_queue_head_t waiting_wq;
+ struct blk_mq_tag_set tag_set;
struct mutex tx_lock;
struct gendisk *disk;
int blksize;
loff_t bytesize;
- int xmit_timeout;
- bool timedout;
- bool disconnect; /* a disconnect has been requested by user */
- struct timer_list timeout_timer;
/* protects initialization and shutdown of the socket */
spinlock_t sock_lock;
struct task_struct *task_recv;
@@ -71,6 +67,11 @@ struct nbd_device {
#endif
};
+struct nbd_cmd {
+ struct nbd_device *nbd;
+ struct list_head list;
+};
+
#if IS_ENABLED(CONFIG_DEBUG_FS)
static struct dentry *nbd_dbg_dir;
#endif
@@ -83,18 +84,6 @@ static unsigned int nbds_max = 16;
static struct nbd_device *nbd_dev;
static int max_part;
-/*
- * Use just one lock (or at most 1 per NIC). Two arguments for this:
- * 1. Each NIC is essentially a synchronization point for all servers
- * accessed through that NIC so there's no need to have more locks
- * than NICs anyway.
- * 2. More locks lead to more "Dirty cache line bouncing" which will slow
- * down each lock to the point where they're actually slower than just
- * a single lock.
- * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
- */
-static DEFINE_SPINLOCK(nbd_lock);
-
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{
return disk_to_dev(nbd->disk);
@@ -153,18 +142,16 @@ static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
return 0;
}
-static void nbd_end_request(struct nbd_device *nbd, struct request *req)
+static void nbd_end_request(struct nbd_cmd *cmd)
{
+ struct nbd_device *nbd = cmd->nbd;
+ struct request *req = blk_mq_rq_from_pdu(cmd);
int error = req->errors ? -EIO : 0;
- struct request_queue *q = req->q;
- unsigned long flags;
- dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
+ dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd,
error ? "failed" : "done");
- spin_lock_irqsave(q->queue_lock, flags);
- __blk_end_request_all(req, error);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_mq_complete_request(req, error);
}
/*
@@ -172,40 +159,49 @@ static void nbd_end_request(struct nbd_device *nbd, struct request *req)
*/
static void sock_shutdown(struct nbd_device *nbd)
{
- spin_lock_irq(&nbd->sock_lock);
+ struct socket *sock;
+
+ spin_lock(&nbd->sock_lock);
if (!nbd->sock) {
spin_unlock_irq(&nbd->sock_lock);
return;
}
+ sock = nbd->sock;
dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
- kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
- sockfd_put(nbd->sock);
nbd->sock = NULL;
- spin_unlock_irq(&nbd->sock_lock);
+ spin_unlock(&nbd->sock_lock);
- del_timer(&nbd->timeout_timer);
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sockfd_put(sock);
}
-static void nbd_xmit_timeout(unsigned long arg)
+static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
+ bool reserved)
{
- struct nbd_device *nbd = (struct nbd_device *)arg;
- unsigned long flags;
-
- if (list_empty(&nbd->queue_head))
- return;
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct nbd_device *nbd = cmd->nbd;
+ struct socket *sock = NULL;
- spin_lock_irqsave(&nbd->sock_lock, flags);
+ spin_lock(&nbd->sock_lock);
- nbd->timedout = true;
+ set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
- if (nbd->sock)
- kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
+ if (nbd->sock) {
+ sock = nbd->sock;
+ get_file(sock->file);
+ }
- spin_unlock_irqrestore(&nbd->sock_lock, flags);
+ spin_unlock(&nbd->sock_lock);
+ if (sock) {
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sockfd_put(sock);
+ }
+ req->errors++;
dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
+ return BLK_EH_HANDLED;
}
/*
@@ -255,9 +251,6 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
tsk_restore_flags(current, pflags, PF_MEMALLOC);
- if (!send && nbd->xmit_timeout)
- mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
-
return result;
}
@@ -273,8 +266,9 @@ static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
}
/* always call with the tx_lock held */
-static int nbd_send_req(struct nbd_device *nbd, struct request *req)
+static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
int result, flags;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
@@ -298,10 +292,10 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
- memcpy(request.handle, &req, sizeof(req));
+ memcpy(request.handle, &req->tag, sizeof(req->tag));
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
- req, nbdcmd_to_ascii(type),
+ cmd, nbdcmd_to_ascii(type),
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
result = sock_xmit(nbd, 1, &request, sizeof(request),
(type == NBD_CMD_WRITE) ? MSG_MORE : 0);
@@ -323,7 +317,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
if (!rq_iter_last(bvec, iter))
flags = MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
- req, bvec.bv_len);
+ cmd, bvec.bv_len);
result = sock_send_bvec(nbd, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
@@ -336,29 +330,6 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
return 0;
}
-static struct request *nbd_find_request(struct nbd_device *nbd,
- struct request *xreq)
-{
- struct request *req, *tmp;
- int err;
-
- err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
- if (unlikely(err))
- return ERR_PTR(err);
-
- spin_lock(&nbd->queue_lock);
- list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
- if (req != xreq)
- continue;
- list_del_init(&req->queuelist);
- spin_unlock(&nbd->queue_lock);
- return req;
- }
- spin_unlock(&nbd->queue_lock);
-
- return ERR_PTR(-ENOENT);
-}
-
static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
{
int result;
@@ -370,11 +341,14 @@ static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
}
/* NULL returned = something went wrong, inform userspace */
-static struct request *nbd_read_stat(struct nbd_device *nbd)
+static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
{
int result;
struct nbd_reply reply;
- struct request *req;
+ struct nbd_cmd *cmd;
+ struct request *req = NULL;
+ u16 hwq;
+ int tag;
reply.magic = 0;
result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
@@ -390,25 +364,27 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
return ERR_PTR(-EPROTO);
}
- req = nbd_find_request(nbd, *(struct request **)reply.handle);
- if (IS_ERR(req)) {
- result = PTR_ERR(req);
- if (result != -ENOENT)
- return ERR_PTR(result);
+ memcpy(&tag, reply.handle, sizeof(int));
- dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
- reply.handle);
- return ERR_PTR(-EBADR);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ if (hwq < nbd->tag_set.nr_hw_queues)
+ req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
+ blk_mq_unique_tag_to_tag(tag));
+ if (!req || !blk_mq_request_started(req)) {
+ dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
+ tag, req);
+ return ERR_PTR(-ENOENT);
}
+ cmd = blk_mq_rq_to_pdu(req);
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
req->errors++;
- return req;
+ return cmd;
}
- dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
+ dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
if (rq_data_dir(req) != WRITE) {
struct req_iterator iter;
struct bio_vec bvec;
@@ -419,13 +395,13 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
req->errors++;
- return req;
+ return cmd;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
- req, bvec.bv_len);
+ cmd, bvec.bv_len);
}
}
- return req;
+ return cmd;
}
static ssize_t pid_show(struct device *dev,
@@ -444,7 +420,7 @@ static struct device_attribute pid_attr = {
static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
{
- struct request *req;
+ struct nbd_cmd *cmd;
int ret;
BUG_ON(nbd->magic != NBD_MAGIC);
@@ -460,13 +436,13 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
nbd_size_update(nbd, bdev);
while (1) {
- req = nbd_read_stat(nbd);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
+ cmd = nbd_read_stat(nbd);
+ if (IS_ERR(cmd)) {
+ ret = PTR_ERR(cmd);
break;
}
- nbd_end_request(nbd, req);
+ nbd_end_request(cmd);
}
nbd_size_clear(nbd, bdev);
@@ -475,44 +451,37 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
return ret;
}
-static void nbd_clear_que(struct nbd_device *nbd)
+static void nbd_clear_req(struct request *req, void *data, bool reserved)
{
- struct request *req;
+ struct nbd_cmd *cmd;
+
+ if (!blk_mq_request_started(req))
+ return;
+ cmd = blk_mq_rq_to_pdu(req);
+ req->errors++;
+ nbd_end_request(cmd);
+}
+static void nbd_clear_que(struct nbd_device *nbd)
+{
BUG_ON(nbd->magic != NBD_MAGIC);
/*
* Because we have set nbd->sock to NULL under the tx_lock, all
- * modifications to the list must have completed by now. For
- * the same reason, the active_req must be NULL.
- *
- * As a consequence, we don't need to take the spin lock while
- * purging the list here.
+ * modifications to the list must have completed by now.
*/
BUG_ON(nbd->sock);
- BUG_ON(nbd->active_req);
- while (!list_empty(&nbd->queue_head)) {
- req = list_entry(nbd->queue_head.next, struct request,
- queuelist);
- list_del_init(&req->queuelist);
- req->errors++;
- nbd_end_request(nbd, req);
- }
-
- while (!list_empty(&nbd->waiting_queue)) {
- req = list_entry(nbd->waiting_queue.next, struct request,
- queuelist);
- list_del_init(&req->queuelist);
- req->errors++;
- nbd_end_request(nbd, req);
- }
+ blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
}
-static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
+static void nbd_handle_cmd(struct nbd_cmd *cmd)
{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+ struct nbd_device *nbd = cmd->nbd;
+
if (req->cmd_type != REQ_TYPE_FS)
goto error_out;
@@ -526,6 +495,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
req->errors = 0;
mutex_lock(&nbd->tx_lock);
+ nbd->task_send = current;
if (unlikely(!nbd->sock)) {
mutex_unlock(&nbd->tx_lock);
dev_err(disk_to_dev(nbd->disk),
@@ -533,106 +503,30 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
goto error_out;
}
- nbd->active_req = req;
-
- if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
- mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
-
- if (nbd_send_req(nbd, req) != 0) {
+ if (nbd_send_cmd(nbd, cmd) != 0) {
dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
req->errors++;
- nbd_end_request(nbd, req);
- } else {
- spin_lock(&nbd->queue_lock);
- list_add_tail(&req->queuelist, &nbd->queue_head);
- spin_unlock(&nbd->queue_lock);
+ nbd_end_request(cmd);
}
- nbd->active_req = NULL;
+ nbd->task_send = NULL;
mutex_unlock(&nbd->tx_lock);
- wake_up_all(&nbd->active_wq);
return;
error_out:
req->errors++;
- nbd_end_request(nbd, req);
+ nbd_end_request(cmd);
}
-static int nbd_thread_send(void *data)
+static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct nbd_device *nbd = data;
- struct request *req;
-
- nbd->task_send = current;
-
- set_user_nice(current, MIN_NICE);
- while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
- /* wait for something to do */
- wait_event_interruptible(nbd->waiting_wq,
- kthread_should_stop() ||
- !list_empty(&nbd->waiting_queue));
-
- /* extract request */
- if (list_empty(&nbd->waiting_queue))
- continue;
-
- spin_lock_irq(&nbd->queue_lock);
- req = list_entry(nbd->waiting_queue.next, struct request,
- queuelist);
- list_del_init(&req->queuelist);
- spin_unlock_irq(&nbd->queue_lock);
-
- /* handle request */
- nbd_handle_req(nbd, req);
- }
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
- nbd->task_send = NULL;
-
- return 0;
-}
-
-/*
- * We always wait for result of write, for now. It would be nice to make it optional
- * in future
- * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
- * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
- */
-
-static void nbd_request_handler(struct request_queue *q)
- __releases(q->queue_lock) __acquires(q->queue_lock)
-{
- struct request *req;
-
- while ((req = blk_fetch_request(q)) != NULL) {
- struct nbd_device *nbd;
-
- spin_unlock_irq(q->queue_lock);
-
- nbd = req->rq_disk->private_data;
-
- BUG_ON(nbd->magic != NBD_MAGIC);
-
- dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
- req, req->cmd_type);
-
- if (unlikely(!nbd->sock)) {
- dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Attempted send on closed socket\n");
- req->errors++;
- nbd_end_request(nbd, req);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- spin_lock_irq(&nbd->queue_lock);
- list_add_tail(&req->queuelist, &nbd->waiting_queue);
- spin_unlock_irq(&nbd->queue_lock);
-
- wake_up(&nbd->waiting_wq);
-
- spin_lock_irq(q->queue_lock);
- }
+ blk_mq_start_request(bd->rq);
+ nbd_handle_cmd(cmd);
+ return BLK_MQ_RQ_QUEUE_OK;
}
static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
@@ -657,15 +551,13 @@ out:
/* Reset all properties of an NBD device */
static void nbd_reset(struct nbd_device *nbd)
{
- nbd->disconnect = false;
- nbd->timedout = false;
+ nbd->runtime_flags = 0;
nbd->blksize = 1024;
nbd->bytesize = 0;
set_capacity(nbd->disk, 0);
nbd->flags = 0;
- nbd->xmit_timeout = 0;
+ nbd->tag_set.timeout = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
- del_timer_sync(&nbd->timeout_timer);
}
static void nbd_bdev_reset(struct block_device *bdev)
@@ -700,33 +592,37 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
{
switch (cmd) {
case NBD_DISCONNECT: {
- struct request sreq;
+ struct request *sreq;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
if (!nbd->sock)
return -EINVAL;
+ sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
+ if (!sreq)
+ return -ENOMEM;
+
mutex_unlock(&nbd->tx_lock);
fsync_bdev(bdev);
mutex_lock(&nbd->tx_lock);
- blk_rq_init(NULL, &sreq);
- sreq.cmd_type = REQ_TYPE_DRV_PRIV;
+ sreq->cmd_type = REQ_TYPE_DRV_PRIV;
/* Check again after getting mutex back. */
- if (!nbd->sock)
+ if (!nbd->sock) {
+ blk_mq_free_request(sreq);
return -EINVAL;
+ }
- nbd->disconnect = true;
+ set_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags);
- nbd_send_req(nbd, &sreq);
+ nbd_send_cmd(nbd, blk_mq_rq_to_pdu(sreq));
+ blk_mq_free_request(sreq);
return 0;
}
case NBD_CLEAR_SOCK:
sock_shutdown(nbd);
nbd_clear_que(nbd);
- BUG_ON(!list_empty(&nbd->queue_head));
- BUG_ON(!list_empty(&nbd->waiting_queue));
kill_bdev(bdev);
return 0;
@@ -758,13 +654,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return nbd_size_set(nbd, bdev, nbd->blksize, arg);
case NBD_SET_TIMEOUT:
- nbd->xmit_timeout = arg * HZ;
- if (arg)
- mod_timer(&nbd->timeout_timer,
- jiffies + nbd->xmit_timeout);
- else
- del_timer_sync(&nbd->timeout_timer);
-
+ nbd->tag_set.timeout = arg * HZ;
return 0;
case NBD_SET_FLAGS:
@@ -772,7 +662,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return 0;
case NBD_DO_IT: {
- struct task_struct *thread;
int error;
if (nbd->task_recv)
@@ -786,18 +675,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd_parse_flags(nbd, bdev);
- thread = kthread_run(nbd_thread_send, nbd, "%s",
- nbd_name(nbd));
- if (IS_ERR(thread)) {
- mutex_lock(&nbd->tx_lock);
- nbd->task_recv = NULL;
- return PTR_ERR(thread);
- }
-
nbd_dev_dbg_init(nbd);
error = nbd_thread_recv(nbd, bdev);
nbd_dev_dbg_close(nbd);
- kthread_stop(thread);
mutex_lock(&nbd->tx_lock);
nbd->task_recv = NULL;
@@ -807,9 +687,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
kill_bdev(bdev);
nbd_bdev_reset(bdev);
- if (nbd->disconnect) /* user requested, ignore socket errors */
+ /* user requested, ignore socket errors */
+ if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
error = 0;
- if (nbd->timedout)
+ if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags))
error = -ETIMEDOUT;
nbd_reset(nbd);
@@ -825,10 +706,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return 0;
case NBD_PRINT_DEBUG:
- dev_info(disk_to_dev(nbd->disk),
- "next = %p, prev = %p, head = %p\n",
- nbd->queue_head.next, nbd->queue_head.prev,
- &nbd->queue_head);
+ /*
+ * For compatibility only, we no longer keep a list of
+ * outstanding requests.
+ */
return 0;
}
return -ENOTTY;
@@ -935,7 +816,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
- debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
+ debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
@@ -987,6 +868,23 @@ static void nbd_dbg_close(void)
#endif
+static int nbd_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx,
+ unsigned int numa_node)
+{
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
+ cmd->nbd = data;
+ INIT_LIST_HEAD(&cmd->list);
+ return 0;
+}
+
+static struct blk_mq_ops nbd_mq_ops = {
+ .queue_rq = nbd_queue_rq,
+ .init_request = nbd_init_request,
+ .timeout = nbd_xmit_timeout,
+};
+
/*
* And here should be modules and kernel interface
* (Just smiley confuses emacs :-)
@@ -1035,16 +933,34 @@ static int __init nbd_init(void)
if (!disk)
goto out;
nbd_dev[i].disk = disk;
+
+ nbd_dev[i].tag_set.ops = &nbd_mq_ops;
+ nbd_dev[i].tag_set.nr_hw_queues = 1;
+ nbd_dev[i].tag_set.queue_depth = 128;
+ nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
+ nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
+ nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
+ nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
+
+ err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
+ if (err) {
+ put_disk(disk);
+ goto out;
+ }
+
/*
* The new linux 2.5 block layer implementation requires
* every gendisk to have its very own request_queue struct.
* These structs are big so we dynamically allocate them.
*/
- disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock);
+ disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
if (!disk->queue) {
+ blk_mq_free_tag_set(&nbd_dev[i].tag_set);
put_disk(disk);
goto out;
}
+
/*
* Tell the block layer that we are not a rotational device
*/
@@ -1069,16 +985,8 @@ static int __init nbd_init(void)
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
nbd_dev[i].magic = NBD_MAGIC;
- INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
- spin_lock_init(&nbd_dev[i].queue_lock);
spin_lock_init(&nbd_dev[i].sock_lock);
- INIT_LIST_HEAD(&nbd_dev[i].queue_head);
mutex_init(&nbd_dev[i].tx_lock);
- init_timer(&nbd_dev[i].timeout_timer);
- nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
- nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
- init_waitqueue_head(&nbd_dev[i].active_wq);
- init_waitqueue_head(&nbd_dev[i].waiting_wq);
disk->major = NBD_MAJOR;
disk->first_minor = i << part_shift;
disk->fops = &nbd_fops;
@@ -1091,6 +999,7 @@ static int __init nbd_init(void)
return 0;
out:
while (i--) {
+ blk_mq_free_tag_set(&nbd_dev[i].tag_set);
blk_cleanup_queue(nbd_dev[i].disk->queue);
put_disk(nbd_dev[i].disk);
}
@@ -1110,6 +1019,7 @@ static void __exit nbd_cleanup(void)
if (disk) {
del_gendisk(disk);
blk_cleanup_queue(disk->queue);
+ blk_mq_free_tag_set(&nbd_dev[i].tag_set);
put_disk(disk);
}
}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 75a7f88d6717..ba6f4a2e73db 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -34,6 +34,7 @@ struct nullb {
unsigned int index;
struct request_queue *q;
struct gendisk *disk;
+ struct nvm_dev *ndev;
struct blk_mq_tag_set tag_set;
struct hrtimer timer;
unsigned int queue_depth;
@@ -393,7 +394,6 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
.complete = null_softirq_done_fn,
};
@@ -414,23 +414,6 @@ static void cleanup_queues(struct nullb *nullb)
kfree(nullb->queues);
}
-static void null_del_dev(struct nullb *nullb)
-{
- list_del_init(&nullb->list);
-
- if (use_lightnvm)
- nvm_unregister(nullb->disk_name);
- else
- del_gendisk(nullb->disk);
- blk_cleanup_queue(nullb->q);
- if (queue_mode == NULL_Q_MQ)
- blk_mq_free_tag_set(&nullb->tag_set);
- if (!use_lightnvm)
- put_disk(nullb->disk);
- cleanup_queues(nullb);
- kfree(nullb);
-}
-
#ifdef CONFIG_NVM
static void null_lnvm_end_io(struct request *rq, int error)
@@ -564,10 +547,58 @@ static struct nvm_dev_ops null_lnvm_dev_ops = {
/* Simulate nvme protocol restriction */
.max_phys_sect = 64,
};
+
+static int null_nvm_register(struct nullb *nullb)
+{
+ struct nvm_dev *dev;
+ int rv;
+
+ dev = nvm_alloc_dev(0);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->q = nullb->q;
+ memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
+ dev->ops = &null_lnvm_dev_ops;
+
+ rv = nvm_register(dev);
+ if (rv) {
+ kfree(dev);
+ return rv;
+ }
+ nullb->ndev = dev;
+ return 0;
+}
+
+static void null_nvm_unregister(struct nullb *nullb)
+{
+ nvm_unregister(nullb->ndev);
+}
#else
-static struct nvm_dev_ops null_lnvm_dev_ops;
+static int null_nvm_register(struct nullb *nullb)
+{
+ return -EINVAL;
+}
+static void null_nvm_unregister(struct nullb *nullb) {}
#endif /* CONFIG_NVM */
+static void null_del_dev(struct nullb *nullb)
+{
+ list_del_init(&nullb->list);
+
+ if (use_lightnvm)
+ null_nvm_unregister(nullb);
+ else
+ del_gendisk(nullb->disk);
+ blk_cleanup_queue(nullb->q);
+ if (queue_mode == NULL_Q_MQ)
+ blk_mq_free_tag_set(&nullb->tag_set);
+ if (!use_lightnvm)
+ put_disk(nullb->disk);
+ cleanup_queues(nullb);
+ kfree(nullb);
+}
+
static int null_open(struct block_device *bdev, fmode_t mode)
{
return 0;
@@ -640,11 +671,32 @@ static int init_driver_queues(struct nullb *nullb)
return 0;
}
-static int null_add_dev(void)
+static int null_gendisk_register(struct nullb *nullb)
{
struct gendisk *disk;
- struct nullb *nullb;
sector_t size;
+
+ disk = nullb->disk = alloc_disk_node(1, home_node);
+ if (!disk)
+ return -ENOMEM;
+ size = gb * 1024 * 1024 * 1024ULL;
+ set_capacity(disk, size >> 9);
+
+ disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
+ disk->major = null_major;
+ disk->first_minor = nullb->index;
+ disk->fops = &null_fops;
+ disk->private_data = nullb;
+ disk->queue = nullb->q;
+ strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+
+ add_disk(disk);
+ return 0;
+}
+
+static int null_add_dev(void)
+{
+ struct nullb *nullb;
int rv;
nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
@@ -716,42 +768,19 @@ static int null_add_dev(void)
sprintf(nullb->disk_name, "nullb%d", nullb->index);
- if (use_lightnvm) {
- rv = nvm_register(nullb->q, nullb->disk_name,
- &null_lnvm_dev_ops);
- if (rv)
- goto out_cleanup_blk_queue;
- goto done;
- }
-
- disk = nullb->disk = alloc_disk_node(1, home_node);
- if (!disk) {
- rv = -ENOMEM;
- goto out_cleanup_lightnvm;
- }
- size = gb * 1024 * 1024 * 1024ULL;
- set_capacity(disk, size >> 9);
-
- disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
- disk->major = null_major;
- disk->first_minor = nullb->index;
- disk->fops = &null_fops;
- disk->private_data = nullb;
- disk->queue = nullb->q;
- strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ if (use_lightnvm)
+ rv = null_nvm_register(nullb);
+ else
+ rv = null_gendisk_register(nullb);
- add_disk(disk);
+ if (rv)
+ goto out_cleanup_blk_queue;
-done:
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
return 0;
-
-out_cleanup_lightnvm:
- if (use_lightnvm)
- nvm_unregister(nullb->disk_name);
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 6c6519f6492a..c1f84df7838b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3621,7 +3621,6 @@ static int rbd_init_request(void *data, struct request *rq,
static struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_request = rbd_init_request,
};
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 93b1aaa5ba3b..2dc5c96c186a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -542,7 +542,6 @@ static int virtblk_init_request(void *data, struct request *rq,
static struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
- .map_queue = blk_mq_map_queue,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
};
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 88ef6d4729b4..9908597c5209 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -909,7 +909,6 @@ out_busy:
static struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq,
- .map_queue = blk_mq_map_queue,
};
static void blkif_set_queue_limits(struct blkfront_info *info)
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index cf50fd2e96df..3cc9bff9d99d 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -180,6 +180,17 @@ config BT_HCIUART_AG6XX
Say Y here to compile support for Intel AG6XX protocol.
+config BT_HCIUART_MRVL
+ bool "Marvell protocol support"
+ depends on BT_HCIUART
+ select BT_HCIUART_H4
+ help
+ Marvell is serial protocol for communication between Bluetooth
+ device and host. This protocol is required for most Marvell Bluetooth
+ devices with UART interface.
+
+ Say Y here to compile support for HCI MRVL protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
@@ -331,4 +342,16 @@ config BT_WILINK
Say Y here to compile support for Texas Instrument's WiLink7 driver
into the kernel or say M to compile it as module (btwilink).
+config BT_QCOMSMD
+ tristate "Qualcomm SMD based HCI support"
+ depends on QCOM_SMD && QCOM_WCNSS_CTRL
+ select BT_QCA
+ help
+ Qualcomm SMD based HCI driver.
+ This driver is used to bridge HCI data onto the shared memory
+ channels to the WCNSS core.
+
+ Say Y here to compile support for HCI over Qualcomm SMD into the
+ kernel or say M to compile as a module.
+
endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 9c18939fc5c9..b1fc29a697b7 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
+obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
obj-$(CONFIG_BT_QCA) += btqca.o
@@ -37,6 +38,7 @@ hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o
hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o
+hci_uart-$(CONFIG_BT_HCIUART_MRVL) += hci_mrvl.o
hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 5b0ef7bbe8ac..5ce6d4176dc3 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -185,10 +185,8 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
data->state = BCM203X_LOAD_MINIDRV;
data->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!data->urb) {
- BT_ERR("Can't allocate URB");
+ if (!data->urb)
return -ENOMEM;
- }
if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) {
BT_ERR("Mini driver request failed");
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 4a6208168850..28afd5d585f9 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -55,8 +55,8 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
}
edl = (struct edl_event_hdr *)(skb->data);
- if (!edl || !edl->data) {
- BT_ERR("%s: TLV with no header or no data", hdev->name);
+ if (!edl) {
+ BT_ERR("%s: TLV with no header", hdev->name);
err = -EILSEQ;
goto out;
}
@@ -224,8 +224,8 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
}
edl = (struct edl_event_hdr *)(skb->data);
- if (!edl || !edl->data) {
- BT_ERR("%s: TLV with no header or no data", hdev->name);
+ if (!edl) {
+ BT_ERR("%s: TLV with no header", hdev->name);
err = -EILSEQ;
goto out;
}
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
new file mode 100644
index 000000000000..08c2c93887c1
--- /dev/null
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2016, Linaro Ltd.
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
+#include <linux/platform_device.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btqca.h"
+
+struct btqcomsmd {
+ struct hci_dev *hdev;
+
+ struct qcom_smd_channel *acl_channel;
+ struct qcom_smd_channel *cmd_channel;
+};
+
+static int btqcomsmd_recv(struct hci_dev *hdev, unsigned int type,
+ const void *data, size_t count)
+{
+ struct sk_buff *skb;
+
+ /* Use GFP_ATOMIC as we're in IRQ context */
+ skb = bt_skb_alloc(count, GFP_ATOMIC);
+ if (!skb) {
+ hdev->stat.err_rx++;
+ return -ENOMEM;
+ }
+
+ hci_skb_pkt_type(skb) = type;
+ memcpy(skb_put(skb, count), data, count);
+
+ return hci_recv_frame(hdev, skb);
+}
+
+static int btqcomsmd_acl_callback(struct qcom_smd_channel *channel,
+ const void *data, size_t count)
+{
+ struct btqcomsmd *btq = qcom_smd_get_drvdata(channel);
+
+ btq->hdev->stat.byte_rx += count;
+ return btqcomsmd_recv(btq->hdev, HCI_ACLDATA_PKT, data, count);
+}
+
+static int btqcomsmd_cmd_callback(struct qcom_smd_channel *channel,
+ const void *data, size_t count)
+{
+ struct btqcomsmd *btq = qcom_smd_get_drvdata(channel);
+
+ return btqcomsmd_recv(btq->hdev, HCI_EVENT_PKT, data, count);
+}
+
+static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btqcomsmd *btq = hci_get_drvdata(hdev);
+ int ret;
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_ACLDATA_PKT:
+ ret = qcom_smd_send(btq->acl_channel, skb->data, skb->len);
+ hdev->stat.acl_tx++;
+ hdev->stat.byte_tx += skb->len;
+ break;
+ case HCI_COMMAND_PKT:
+ ret = qcom_smd_send(btq->cmd_channel, skb->data, skb->len);
+ hdev->stat.cmd_tx++;
+ break;
+ default:
+ ret = -EILSEQ;
+ break;
+ }
+
+ kfree_skb(skb);
+
+ return ret;
+}
+
+static int btqcomsmd_open(struct hci_dev *hdev)
+{
+ return 0;
+}
+
+static int btqcomsmd_close(struct hci_dev *hdev)
+{
+ return 0;
+}
+
+static int btqcomsmd_probe(struct platform_device *pdev)
+{
+ struct btqcomsmd *btq;
+ struct hci_dev *hdev;
+ void *wcnss;
+ int ret;
+
+ btq = devm_kzalloc(&pdev->dev, sizeof(*btq), GFP_KERNEL);
+ if (!btq)
+ return -ENOMEM;
+
+ wcnss = dev_get_drvdata(pdev->dev.parent);
+
+ btq->acl_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_ACL",
+ btqcomsmd_acl_callback);
+ if (IS_ERR(btq->acl_channel))
+ return PTR_ERR(btq->acl_channel);
+
+ btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
+ btqcomsmd_cmd_callback);
+ if (IS_ERR(btq->cmd_channel))
+ return PTR_ERR(btq->cmd_channel);
+
+ qcom_smd_set_drvdata(btq->acl_channel, btq);
+ qcom_smd_set_drvdata(btq->cmd_channel, btq);
+
+ hdev = hci_alloc_dev();
+ if (!hdev)
+ return -ENOMEM;
+
+ hci_set_drvdata(hdev, btq);
+ btq->hdev = hdev;
+ SET_HCIDEV_DEV(hdev, &pdev->dev);
+
+ hdev->bus = HCI_SMD;
+ hdev->open = btqcomsmd_open;
+ hdev->close = btqcomsmd_close;
+ hdev->send = btqcomsmd_send;
+ hdev->set_bdaddr = qca_set_bdaddr_rome;
+
+ ret = hci_register_dev(hdev);
+ if (ret < 0) {
+ hci_free_dev(hdev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, btq);
+
+ return 0;
+}
+
+static int btqcomsmd_remove(struct platform_device *pdev)
+{
+ struct btqcomsmd *btq = platform_get_drvdata(pdev);
+
+ hci_unregister_dev(btq->hdev);
+ hci_free_dev(btq->hdev);
+
+ return 0;
+}
+
+static const struct of_device_id btqcomsmd_of_match[] = {
+ { .compatible = "qcom,wcnss-bt", },
+ { },
+};
+
+static struct platform_driver btqcomsmd_driver = {
+ .probe = btqcomsmd_probe,
+ .remove = btqcomsmd_remove,
+ .driver = {
+ .name = "btqcomsmd",
+ .of_match_table = btqcomsmd_of_match,
+ },
+};
+
+module_platform_driver(btqcomsmd_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD HCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 84288938f7f2..fc9b25703c67 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -33,6 +33,7 @@
#define RTL_ROM_LMP_8723B 0x8723
#define RTL_ROM_LMP_8821A 0x8821
#define RTL_ROM_LMP_8761A 0x8761
+#define RTL_ROM_LMP_8822B 0x8822
static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
{
@@ -78,11 +79,15 @@ static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
const unsigned char *patch_length_base, *patch_offset_base;
u32 patch_offset = 0;
u16 patch_length, num_patches;
- const u16 project_id_to_lmp_subver[] = {
- RTL_ROM_LMP_8723A,
- RTL_ROM_LMP_8723B,
- RTL_ROM_LMP_8821A,
- RTL_ROM_LMP_8761A
+ static const struct {
+ __u16 lmp_subver;
+ __u8 id;
+ } project_id_to_lmp_subver[] = {
+ { RTL_ROM_LMP_8723A, 0 },
+ { RTL_ROM_LMP_8723B, 1 },
+ { RTL_ROM_LMP_8821A, 2 },
+ { RTL_ROM_LMP_8761A, 3 },
+ { RTL_ROM_LMP_8822B, 8 },
};
ret = rtl_read_rom_version(hdev, &rom_version);
@@ -134,14 +139,20 @@ static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
return -EINVAL;
}
- if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
+ /* Find project_id in table */
+ for (i = 0; i < ARRAY_SIZE(project_id_to_lmp_subver); i++) {
+ if (project_id == project_id_to_lmp_subver[i].id)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) {
BT_ERR("%s: unknown project id %d", hdev->name, project_id);
return -EINVAL;
}
- if (lmp_subver != project_id_to_lmp_subver[project_id]) {
+ if (lmp_subver != project_id_to_lmp_subver[i].lmp_subver) {
BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
- project_id_to_lmp_subver[project_id], lmp_subver);
+ project_id_to_lmp_subver[i].lmp_subver, lmp_subver);
return -EINVAL;
}
@@ -257,6 +268,26 @@ out:
return ret;
}
+static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
+{
+ const struct firmware *fw;
+ int ret;
+
+ BT_INFO("%s: rtl: loading %s", hdev->name, name);
+ ret = request_firmware(&fw, name, &hdev->dev);
+ if (ret < 0) {
+ BT_ERR("%s: Failed to load %s", hdev->name, name);
+ return ret;
+ }
+
+ ret = fw->size;
+ *buff = kmemdup(fw->data, ret, GFP_KERNEL);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
static int btrtl_setup_rtl8723a(struct hci_dev *hdev)
{
const struct firmware *fw;
@@ -296,25 +327,74 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
unsigned char *fw_data = NULL;
const struct firmware *fw;
int ret;
+ int cfg_sz;
+ u8 *cfg_buff = NULL;
+ u8 *tbuff;
+ char *cfg_name = NULL;
+
+ switch (lmp_subver) {
+ case RTL_ROM_LMP_8723B:
+ cfg_name = "rtl_bt/rtl8723b_config.bin";
+ break;
+ case RTL_ROM_LMP_8821A:
+ cfg_name = "rtl_bt/rtl8821a_config.bin";
+ break;
+ case RTL_ROM_LMP_8761A:
+ cfg_name = "rtl_bt/rtl8761a_config.bin";
+ break;
+ case RTL_ROM_LMP_8822B:
+ cfg_name = "rtl_bt/rtl8822b_config.bin";
+ break;
+ default:
+ BT_ERR("%s: rtl: no config according to lmp_subver %04x",
+ hdev->name, lmp_subver);
+ break;
+ }
+
+ if (cfg_name) {
+ cfg_sz = rtl_load_config(hdev, cfg_name, &cfg_buff);
+ if (cfg_sz < 0)
+ cfg_sz = 0;
+ } else
+ cfg_sz = 0;
BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
ret = request_firmware(&fw, fw_name, &hdev->dev);
if (ret < 0) {
BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
- return ret;
+ goto err_req_fw;
}
ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
if (ret < 0)
goto out;
+ if (cfg_sz) {
+ tbuff = kzalloc(ret + cfg_sz, GFP_KERNEL);
+ if (!tbuff) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(tbuff, fw_data, ret);
+ kfree(fw_data);
+
+ memcpy(tbuff + ret, cfg_buff, cfg_sz);
+ ret += cfg_sz;
+
+ fw_data = tbuff;
+ }
+
+ BT_INFO("cfg_sz %d, total size %d", cfg_sz, ret);
+
ret = rtl_download_firmware(hdev, fw_data, ret);
- kfree(fw_data);
- if (ret < 0)
- goto out;
out:
release_firmware(fw);
+ kfree(fw_data);
+err_req_fw:
+ if (cfg_sz)
+ kfree(cfg_buff);
return ret;
}
@@ -377,6 +457,9 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
case RTL_ROM_LMP_8761A:
return btrtl_setup_rtl8723b(hdev, lmp_subver,
"rtl_bt/rtl8761a_fw.bin");
+ case RTL_ROM_LMP_8822B:
+ return btrtl_setup_rtl8723b(hdev, lmp_subver,
+ "rtl_bt/rtl8822b_fw.bin");
default:
BT_INFO("rtl: assuming no firmware upload needed.");
return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 811f9b97e360..6bd63b84abd0 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -62,6 +62,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_REALTEK 0x20000
#define BTUSB_BCM2045 0x40000
#define BTUSB_IFNUM_2 0x80000
+#define BTUSB_CW6622 0x100000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -248,9 +249,11 @@ static const struct usb_device_id blacklist_table[] = {
/* QCA ROME chipset */
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -290,7 +293,8 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0400, 0x080a), .driver_info = BTUSB_BROKEN_ISOC },
/* CONWISE Technology based adapters with buggy SCO support */
- { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC },
+ { USB_DEVICE(0x0e5e, 0x6622),
+ .driver_info = BTUSB_BROKEN_ISOC | BTUSB_CW6622},
/* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */
{ USB_DEVICE(0x1310, 0x0001), .driver_info = BTUSB_SWAVE },
@@ -2221,9 +2225,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
TASK_INTERRUPTIBLE,
msecs_to_jiffies(5000));
- if (err == 1) {
+ if (err == -EINTR) {
BT_ERR("%s: Firmware loading interrupted", hdev->name);
- err = -EINTR;
goto done;
}
@@ -2275,7 +2278,7 @@ done:
TASK_INTERRUPTIBLE,
msecs_to_jiffies(1000));
- if (err == 1) {
+ if (err == -EINTR) {
BT_ERR("%s: Device boot interrupted", hdev->name);
return -EINTR;
}
@@ -2845,6 +2848,9 @@ static int btusb_probe(struct usb_interface *intf,
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
+ if (id->driver_info & BTUSB_CW6622)
+ set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+
if (id->driver_info & BTUSB_BCM2045)
set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 485281b3f167..ef51c9c864c5 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -245,6 +245,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
struct ti_st *hst;
long len;
+ int pkt_type;
hst = hci_get_drvdata(hdev);
@@ -258,6 +259,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
* Freeing skb memory is taken care in shared transport layer,
* so don't free skb memory here.
*/
+ pkt_type = hci_skb_pkt_type(skb);
len = hst->st_write(skb);
if (len < 0) {
kfree_skb(skb);
@@ -268,7 +270,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
/* ST accepted our skb. So, Go ahead and do rest */
hdev->stat.byte_tx += len;
- ti_st_tx_complete(hst, hci_skb_pkt_type(skb));
+ ti_st_tx_complete(hst, pkt_type);
return 0;
}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 1c97eda8bae3..5ccb90ef0146 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -798,7 +798,7 @@ static int bcm_remove(struct platform_device *pdev)
static const struct hci_uart_proto bcm_proto = {
.id = HCI_UART_BCM,
- .name = "BCM",
+ .name = "Broadcom",
.manufacturer = 15,
.init_speed = 115200,
.oper_speed = 4000000,
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index d7d23ceba4d1..a2c921faaa12 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -90,7 +90,8 @@ struct bcsp_struct {
/* ---- BCSP CRC calculation ---- */
/* Table for calculating CRC for polynomial 0x1021, LSB processed first,
-initial value 0xffff, bits shifted in reverse order. */
+ * initial value 0xffff, bits shifted in reverse order.
+ */
static const u16 crc_table[] = {
0x0000, 0x1081, 0x2102, 0x3183,
@@ -174,7 +175,7 @@ static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb)
}
static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
- int len, int pkt_type)
+ int len, int pkt_type)
{
struct sk_buff *nskb;
u8 hdr[4], chan;
@@ -213,6 +214,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
/* Vendor specific commands */
if (hci_opcode_ogf(__le16_to_cpu(opcode)) == 0x3f) {
u8 desc = *(data + HCI_COMMAND_HDR_SIZE);
+
if ((desc & 0xf0) == 0xc0) {
data += HCI_COMMAND_HDR_SIZE + 1;
len -= HCI_COMMAND_HDR_SIZE + 1;
@@ -271,8 +273,8 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
/* Put CRC */
if (bcsp->use_crc) {
bcsp_txmsg_crc = bitrev16(bcsp_txmsg_crc);
- bcsp_slip_one_byte(nskb, (u8) ((bcsp_txmsg_crc >> 8) & 0x00ff));
- bcsp_slip_one_byte(nskb, (u8) (bcsp_txmsg_crc & 0x00ff));
+ bcsp_slip_one_byte(nskb, (u8)((bcsp_txmsg_crc >> 8) & 0x00ff));
+ bcsp_slip_one_byte(nskb, (u8)(bcsp_txmsg_crc & 0x00ff));
}
bcsp_slip_msgdelim(nskb);
@@ -287,7 +289,8 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
struct sk_buff *skb;
/* First of all, check for unreliable messages in the queue,
- since they have priority */
+ * since they have priority
+ */
skb = skb_dequeue(&bcsp->unrel);
if (skb != NULL) {
@@ -414,7 +417,7 @@ static void bcsp_handle_le_pkt(struct hci_uart *hu)
/* spot "conf" pkts and reply with a "conf rsp" pkt */
if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 &&
- !memcmp(&bcsp->rx_skb->data[4], conf_pkt, 4)) {
+ !memcmp(&bcsp->rx_skb->data[4], conf_pkt, 4)) {
struct sk_buff *nskb = alloc_skb(4, GFP_ATOMIC);
BT_DBG("Found a LE conf pkt");
@@ -428,7 +431,7 @@ static void bcsp_handle_le_pkt(struct hci_uart *hu)
}
/* Spot "sync" pkts. If we find one...disaster! */
else if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 &&
- !memcmp(&bcsp->rx_skb->data[4], sync_pkt, 4)) {
+ !memcmp(&bcsp->rx_skb->data[4], sync_pkt, 4)) {
BT_ERR("Found a LE sync pkt, card has reset");
}
}
@@ -446,7 +449,7 @@ static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char
default:
memcpy(skb_put(bcsp->rx_skb, 1), &byte, 1);
if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
- bcsp->rx_state != BCSP_W4_CRC)
+ bcsp->rx_state != BCSP_W4_CRC)
bcsp_crc_update(&bcsp->message_crc, byte);
bcsp->rx_count--;
}
@@ -457,7 +460,7 @@ static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char
case 0xdc:
memcpy(skb_put(bcsp->rx_skb, 1), &c0, 1);
if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
- bcsp->rx_state != BCSP_W4_CRC)
+ bcsp->rx_state != BCSP_W4_CRC)
bcsp_crc_update(&bcsp->message_crc, 0xc0);
bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
bcsp->rx_count--;
@@ -466,7 +469,7 @@ static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char
case 0xdd:
memcpy(skb_put(bcsp->rx_skb, 1), &db, 1);
if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
- bcsp->rx_state != BCSP_W4_CRC)
+ bcsp->rx_state != BCSP_W4_CRC)
bcsp_crc_update(&bcsp->message_crc, 0xdb);
bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
bcsp->rx_count--;
@@ -485,13 +488,28 @@ static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char
static void bcsp_complete_rx_pkt(struct hci_uart *hu)
{
struct bcsp_struct *bcsp = hu->priv;
- int pass_up;
+ int pass_up = 0;
if (bcsp->rx_skb->data[0] & 0x80) { /* reliable pkt */
BT_DBG("Received seqno %u from card", bcsp->rxseq_txack);
- bcsp->rxseq_txack++;
- bcsp->rxseq_txack %= 0x8;
- bcsp->txack_req = 1;
+
+ /* check the rx sequence number is as expected */
+ if ((bcsp->rx_skb->data[0] & 0x07) == bcsp->rxseq_txack) {
+ bcsp->rxseq_txack++;
+ bcsp->rxseq_txack %= 0x8;
+ } else {
+ /* handle re-transmitted packet or
+ * when packet was missed
+ */
+ BT_ERR("Out-of-order packet arrived, got %u expected %u",
+ bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack);
+
+ /* do not process out-of-order packet payload */
+ pass_up = 2;
+ }
+
+ /* send current txack value to all received reliable packets */
+ bcsp->txack_req = 1;
/* If needed, transmit an ack pkt */
hci_uart_tx_wakeup(hu);
@@ -500,26 +518,33 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
bcsp->rxack = (bcsp->rx_skb->data[0] >> 3) & 0x07;
BT_DBG("Request for pkt %u from card", bcsp->rxack);
+ /* handle received ACK indications,
+ * including those from out-of-order packets
+ */
bcsp_pkt_cull(bcsp);
- if ((bcsp->rx_skb->data[1] & 0x0f) == 6 &&
- bcsp->rx_skb->data[0] & 0x80) {
- hci_skb_pkt_type(bcsp->rx_skb) = HCI_ACLDATA_PKT;
- pass_up = 1;
- } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 &&
- bcsp->rx_skb->data[0] & 0x80) {
- hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT;
- pass_up = 1;
- } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) {
- hci_skb_pkt_type(bcsp->rx_skb) = HCI_SCODATA_PKT;
- pass_up = 1;
- } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 &&
- !(bcsp->rx_skb->data[0] & 0x80)) {
- bcsp_handle_le_pkt(hu);
- pass_up = 0;
- } else
- pass_up = 0;
-
- if (!pass_up) {
+
+ if (pass_up != 2) {
+ if ((bcsp->rx_skb->data[1] & 0x0f) == 6 &&
+ (bcsp->rx_skb->data[0] & 0x80)) {
+ hci_skb_pkt_type(bcsp->rx_skb) = HCI_ACLDATA_PKT;
+ pass_up = 1;
+ } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 &&
+ (bcsp->rx_skb->data[0] & 0x80)) {
+ hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT;
+ pass_up = 1;
+ } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) {
+ hci_skb_pkt_type(bcsp->rx_skb) = HCI_SCODATA_PKT;
+ pass_up = 1;
+ } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 &&
+ !(bcsp->rx_skb->data[0] & 0x80)) {
+ bcsp_handle_le_pkt(hu);
+ pass_up = 0;
+ } else {
+ pass_up = 0;
+ }
+ }
+
+ if (pass_up == 0) {
struct hci_event_hdr hdr;
u8 desc = (bcsp->rx_skb->data[1] & 0x0f);
@@ -537,18 +562,23 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
hci_recv_frame(hu->hdev, bcsp->rx_skb);
} else {
BT_ERR("Packet for unknown channel (%u %s)",
- bcsp->rx_skb->data[1] & 0x0f,
- bcsp->rx_skb->data[0] & 0x80 ?
- "reliable" : "unreliable");
+ bcsp->rx_skb->data[1] & 0x0f,
+ bcsp->rx_skb->data[0] & 0x80 ?
+ "reliable" : "unreliable");
kfree_skb(bcsp->rx_skb);
}
} else
kfree_skb(bcsp->rx_skb);
- } else {
+ } else if (pass_up == 1) {
/* Pull out BCSP hdr */
skb_pull(bcsp->rx_skb, 4);
hci_recv_frame(hu->hdev, bcsp->rx_skb);
+ } else {
+ /* ignore packet payload of already ACKed re-transmitted
+ * packets or when a packet was missed in the BCSP window
+ */
+ kfree_skb(bcsp->rx_skb);
}
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
@@ -567,7 +597,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
const unsigned char *ptr;
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
- hu, count, bcsp->rx_state, bcsp->rx_count);
+ hu, count, bcsp->rx_state, bcsp->rx_count);
ptr = data;
while (count) {
@@ -586,24 +616,14 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
switch (bcsp->rx_state) {
case BCSP_W4_BCSP_HDR:
- if ((0xff & (u8) ~ (bcsp->rx_skb->data[0] + bcsp->rx_skb->data[1] +
- bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
+ if ((0xff & (u8)~(bcsp->rx_skb->data[0] + bcsp->rx_skb->data[1] +
+ bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
BT_ERR("Error in BCSP hdr checksum");
kfree_skb(bcsp->rx_skb);
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;
}
- if (bcsp->rx_skb->data[0] & 0x80 /* reliable pkt */
- && (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) {
- BT_ERR("Out-of-order packet arrived, got %u expected %u",
- bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack);
-
- kfree_skb(bcsp->rx_skb);
- bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
- bcsp->rx_count = 0;
- continue;
- }
bcsp->rx_state = BCSP_W4_DATA;
bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) +
(bcsp->rx_skb->data[2] << 4); /* May be 0 */
@@ -620,8 +640,8 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
case BCSP_W4_CRC:
if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) {
BT_ERR("Checksum failed: computed %04x received %04x",
- bitrev16(bcsp->message_crc),
- bscp_get_crc(bcsp));
+ bitrev16(bcsp->message_crc),
+ bscp_get_crc(bcsp));
kfree_skb(bcsp->rx_skb);
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
@@ -679,7 +699,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
/* Arrange to retransmit all messages in the relq. */
static void bcsp_timed_event(unsigned long arg)
{
- struct hci_uart *hu = (struct hci_uart *) arg;
+ struct hci_uart *hu = (struct hci_uart *)arg;
struct bcsp_struct *bcsp = hu->priv;
struct sk_buff *skb;
unsigned long flags;
@@ -715,7 +735,7 @@ static int bcsp_open(struct hci_uart *hu)
init_timer(&bcsp->tbcsp);
bcsp->tbcsp.function = bcsp_timed_event;
- bcsp->tbcsp.data = (u_long) hu;
+ bcsp->tbcsp.data = (u_long)hu;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index ed0a4201b551..9e271286c5e5 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -128,7 +128,7 @@ static int intel_wait_booting(struct hci_uart *hu)
TASK_INTERRUPTIBLE,
msecs_to_jiffies(1000));
- if (err == 1) {
+ if (err == -EINTR) {
bt_dev_err(hu->hdev, "Device boot interrupted");
return -EINTR;
}
@@ -151,7 +151,7 @@ static int intel_wait_lpm_transaction(struct hci_uart *hu)
TASK_INTERRUPTIBLE,
msecs_to_jiffies(1000));
- if (err == 1) {
+ if (err == -EINTR) {
bt_dev_err(hu->hdev, "LPM transaction interrupted");
return -EINTR;
}
@@ -813,7 +813,7 @@ static int intel_setup(struct hci_uart *hu)
err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING,
TASK_INTERRUPTIBLE,
msecs_to_jiffies(5000));
- if (err == 1) {
+ if (err == -EINTR) {
bt_dev_err(hdev, "Firmware loading interrupted");
err = -EINTR;
goto done;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index dda97398c59a..9497c469efd2 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -697,34 +697,36 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
case HCIUARTSETPROTO:
if (!test_and_set_bit(HCI_UART_PROTO_SET, &hu->flags)) {
err = hci_uart_set_proto(hu, arg);
- if (err) {
+ if (err)
clear_bit(HCI_UART_PROTO_SET, &hu->flags);
- return err;
- }
} else
- return -EBUSY;
+ err = -EBUSY;
break;
case HCIUARTGETPROTO:
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
- return hu->proto->id;
- return -EUNATCH;
+ err = hu->proto->id;
+ else
+ err = -EUNATCH;
+ break;
case HCIUARTGETDEVICE:
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
- return hu->hdev->id;
- return -EUNATCH;
+ err = hu->hdev->id;
+ else
+ err = -EUNATCH;
+ break;
case HCIUARTSETFLAGS:
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
- return -EBUSY;
- err = hci_uart_set_flags(hu, arg);
- if (err)
- return err;
+ err = -EBUSY;
+ else
+ err = hci_uart_set_flags(hu, arg);
break;
case HCIUARTGETFLAGS:
- return hu->hdev_flags;
+ err = hu->hdev_flags;
+ break;
default:
err = n_tty_ioctl_helper(tty, file, cmd, arg);
@@ -810,6 +812,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_AG6XX
ag6xx_init();
#endif
+#ifdef CONFIG_BT_HCIUART_MRVL
+ mrvl_init();
+#endif
return 0;
}
@@ -845,6 +850,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_AG6XX
ag6xx_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_MRVL
+ mrvl_deinit();
+#endif
/* Release tty registration of line discipline */
err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
new file mode 100644
index 000000000000..bbc4b39b1dbf
--- /dev/null
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -0,0 +1,387 @@
+/*
+ *
+ * Bluetooth HCI UART driver for marvell devices
+ *
+ * Copyright (C) 2016 Marvell International Ltd.
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+
+#define HCI_FW_REQ_PKT 0xA5
+#define HCI_CHIP_VER_PKT 0xAA
+
+#define MRVL_ACK 0x5A
+#define MRVL_NAK 0xBF
+#define MRVL_RAW_DATA 0x1F
+
+enum {
+ STATE_CHIP_VER_PENDING,
+ STATE_FW_REQ_PENDING,
+};
+
+struct mrvl_data {
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+ struct sk_buff_head rawq;
+ unsigned long flags;
+ unsigned int tx_len;
+ u8 id, rev;
+};
+
+struct hci_mrvl_pkt {
+ __le16 lhs;
+ __le16 rhs;
+} __packed;
+#define HCI_MRVL_PKT_SIZE 4
+
+static int mrvl_open(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl;
+
+ BT_DBG("hu %p", hu);
+
+ mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
+ if (!mrvl)
+ return -ENOMEM;
+
+ skb_queue_head_init(&mrvl->txq);
+ skb_queue_head_init(&mrvl->rawq);
+
+ set_bit(STATE_CHIP_VER_PENDING, &mrvl->flags);
+
+ hu->priv = mrvl;
+ return 0;
+}
+
+static int mrvl_close(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&mrvl->txq);
+ skb_queue_purge(&mrvl->rawq);
+ kfree_skb(mrvl->rx_skb);
+ kfree(mrvl);
+
+ hu->priv = NULL;
+ return 0;
+}
+
+static int mrvl_flush(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&mrvl->txq);
+ skb_queue_purge(&mrvl->rawq);
+
+ return 0;
+}
+
+static struct sk_buff *mrvl_dequeue(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl = hu->priv;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&mrvl->txq);
+ if (!skb) {
+ /* Any raw data ? */
+ skb = skb_dequeue(&mrvl->rawq);
+ } else {
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+ }
+
+ return skb;
+}
+
+static int mrvl_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ skb_queue_tail(&mrvl->txq, skb);
+ return 0;
+}
+
+static void mrvl_send_ack(struct hci_uart *hu, unsigned char type)
+{
+ struct mrvl_data *mrvl = hu->priv;
+ struct sk_buff *skb;
+
+ /* No H4 payload, only 1 byte header */
+ skb = bt_skb_alloc(0, GFP_ATOMIC);
+ if (!skb) {
+ bt_dev_err(hu->hdev, "Unable to alloc ack/nak packet");
+ return;
+ }
+ hci_skb_pkt_type(skb) = type;
+
+ skb_queue_tail(&mrvl->txq, skb);
+ hci_uart_tx_wakeup(hu);
+}
+
+static int mrvl_recv_fw_req(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_mrvl_pkt *pkt = (void *)skb->data;
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct mrvl_data *mrvl = hu->priv;
+ int ret = 0;
+
+ if ((pkt->lhs ^ pkt->rhs) != 0xffff) {
+ bt_dev_err(hdev, "Corrupted mrvl header");
+ mrvl_send_ack(hu, MRVL_NAK);
+ ret = -EINVAL;
+ goto done;
+ }
+ mrvl_send_ack(hu, MRVL_ACK);
+
+ if (!test_bit(STATE_FW_REQ_PENDING, &mrvl->flags)) {
+ bt_dev_err(hdev, "Received unexpected firmware request");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mrvl->tx_len = le16_to_cpu(pkt->lhs);
+
+ clear_bit(STATE_FW_REQ_PENDING, &mrvl->flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&mrvl->flags, STATE_FW_REQ_PENDING);
+
+done:
+ kfree_skb(skb);
+ return ret;
+}
+
+static int mrvl_recv_chip_ver(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_mrvl_pkt *pkt = (void *)skb->data;
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct mrvl_data *mrvl = hu->priv;
+ u16 version = le16_to_cpu(pkt->lhs);
+ int ret = 0;
+
+ if ((pkt->lhs ^ pkt->rhs) != 0xffff) {
+ bt_dev_err(hdev, "Corrupted mrvl header");
+ mrvl_send_ack(hu, MRVL_NAK);
+ ret = -EINVAL;
+ goto done;
+ }
+ mrvl_send_ack(hu, MRVL_ACK);
+
+ if (!test_bit(STATE_CHIP_VER_PENDING, &mrvl->flags)) {
+ bt_dev_err(hdev, "Received unexpected chip version");
+ goto done;
+ }
+
+ mrvl->id = version;
+ mrvl->rev = version >> 8;
+
+ bt_dev_info(hdev, "Controller id = %x, rev = %x", mrvl->id, mrvl->rev);
+
+ clear_bit(STATE_CHIP_VER_PENDING, &mrvl->flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&mrvl->flags, STATE_CHIP_VER_PENDING);
+
+done:
+ kfree_skb(skb);
+ return ret;
+}
+
+#define HCI_RECV_CHIP_VER \
+ .type = HCI_CHIP_VER_PKT, \
+ .hlen = HCI_MRVL_PKT_SIZE, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MRVL_PKT_SIZE
+
+#define HCI_RECV_FW_REQ \
+ .type = HCI_FW_REQ_PKT, \
+ .hlen = HCI_MRVL_PKT_SIZE, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MRVL_PKT_SIZE
+
+static const struct h4_recv_pkt mrvl_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+ { HCI_RECV_FW_REQ, .recv = mrvl_recv_fw_req },
+ { HCI_RECV_CHIP_VER, .recv = mrvl_recv_chip_ver },
+};
+
+static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
+ mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
+ mrvl_recv_pkts,
+ ARRAY_SIZE(mrvl_recv_pkts));
+ if (IS_ERR(mrvl->rx_skb)) {
+ int err = PTR_ERR(mrvl->rx_skb);
+ bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
+ mrvl->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static int mrvl_load_firmware(struct hci_dev *hdev, const char *name)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct mrvl_data *mrvl = hu->priv;
+ const struct firmware *fw = NULL;
+ const u8 *fw_ptr, *fw_max;
+ int err;
+
+ err = request_firmware(&fw, name, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file %s", name);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_max = fw->data + fw->size;
+
+ bt_dev_info(hdev, "Loading %s", name);
+
+ set_bit(STATE_FW_REQ_PENDING, &mrvl->flags);
+
+ while (fw_ptr <= fw_max) {
+ struct sk_buff *skb;
+
+ /* Controller drives the firmware load by sending firmware
+ * request packets containing the expected fragment size.
+ */
+ err = wait_on_bit_timeout(&mrvl->flags, STATE_FW_REQ_PENDING,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(2000));
+ if (err == 1) {
+ bt_dev_err(hdev, "Firmware load interrupted");
+ err = -EINTR;
+ break;
+ } else if (err) {
+ bt_dev_err(hdev, "Firmware request timeout");
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ bt_dev_dbg(hdev, "Firmware request, expecting %d bytes",
+ mrvl->tx_len);
+
+ if (fw_ptr == fw_max) {
+ /* Controller requests a null size once firmware is
+ * fully loaded. If controller expects more data, there
+ * is an issue.
+ */
+ if (!mrvl->tx_len) {
+ bt_dev_info(hdev, "Firmware loading complete");
+ } else {
+ bt_dev_err(hdev, "Firmware loading failure");
+ err = -EINVAL;
+ }
+ break;
+ }
+
+ if (fw_ptr + mrvl->tx_len > fw_max) {
+ mrvl->tx_len = fw_max - fw_ptr;
+ bt_dev_dbg(hdev, "Adjusting tx_len to %d",
+ mrvl->tx_len);
+ }
+
+ skb = bt_skb_alloc(mrvl->tx_len, GFP_KERNEL);
+ if (!skb) {
+ bt_dev_err(hdev, "Failed to alloc mem for FW packet");
+ err = -ENOMEM;
+ break;
+ }
+ bt_cb(skb)->pkt_type = MRVL_RAW_DATA;
+
+ memcpy(skb_put(skb, mrvl->tx_len), fw_ptr, mrvl->tx_len);
+ fw_ptr += mrvl->tx_len;
+
+ set_bit(STATE_FW_REQ_PENDING, &mrvl->flags);
+
+ skb_queue_tail(&mrvl->rawq, skb);
+ hci_uart_tx_wakeup(hu);
+ }
+
+ release_firmware(fw);
+ return err;
+}
+
+static int mrvl_setup(struct hci_uart *hu)
+{
+ int err;
+
+ hci_uart_set_flow_control(hu, true);
+
+ err = mrvl_load_firmware(hu->hdev, "mrvl/helper_uart_3000000.bin");
+ if (err) {
+ bt_dev_err(hu->hdev, "Unable to download firmware helper");
+ return -EINVAL;
+ }
+
+ hci_uart_set_baudrate(hu, 3000000);
+ hci_uart_set_flow_control(hu, false);
+
+ err = mrvl_load_firmware(hu->hdev, "mrvl/uart8897_bt.bin");
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct hci_uart_proto mrvl_proto = {
+ .id = HCI_UART_MRVL,
+ .name = "Marvell",
+ .init_speed = 115200,
+ .open = mrvl_open,
+ .close = mrvl_close,
+ .flush = mrvl_flush,
+ .setup = mrvl_setup,
+ .recv = mrvl_recv,
+ .enqueue = mrvl_enqueue,
+ .dequeue = mrvl_dequeue,
+};
+
+int __init mrvl_init(void)
+{
+ return hci_uart_register_proto(&mrvl_proto);
+}
+
+int __exit mrvl_deinit(void)
+{
+ return hci_uart_unregister_proto(&mrvl_proto);
+}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 683c2b642057..6c867fbc56a7 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -397,7 +397,7 @@ static int qca_open(struct hci_uart *hu)
skb_queue_head_init(&qca->txq);
skb_queue_head_init(&qca->tx_wait_q);
spin_lock_init(&qca->hci_ibs_lock);
- qca->workqueue = create_singlethread_workqueue("qca_wq");
+ qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
if (!qca->workqueue) {
BT_ERR("QCA Workqueue not initialized properly");
kfree(qca);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 839bad1d8152..070139513e65 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -35,7 +35,7 @@
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 10
+#define HCI_UART_MAX_PROTO 12
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
@@ -47,6 +47,8 @@
#define HCI_UART_BCM 7
#define HCI_UART_QCA 8
#define HCI_UART_AG6XX 9
+#define HCI_UART_NOKIA 10
+#define HCI_UART_MRVL 11
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
@@ -189,3 +191,8 @@ int qca_deinit(void);
int ag6xx_init(void);
int ag6xx_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_MRVL
+int mrvl_init(void);
+int mrvl_deinit(void);
+#endif
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 3ff229b2e7f3..c4a75a18dcae 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -377,21 +377,7 @@ static struct miscdevice vhci_miscdev = {
.fops = &vhci_fops,
.minor = VHCI_MINOR,
};
-
-static int __init vhci_init(void)
-{
- BT_INFO("Virtual HCI driver ver %s", VERSION);
-
- return misc_register(&vhci_miscdev);
-}
-
-static void __exit vhci_exit(void)
-{
- misc_deregister(&vhci_miscdev);
-}
-
-module_init(vhci_init);
-module_exit(vhci_exit);
+module_misc_device(vhci_miscdev);
module_param(amp, bool, 0644);
MODULE_PARM_DESC(amp, "Create AMP controller device");
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 3b205e212337..7010dcac9328 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -108,6 +108,14 @@ config OMAP_OCP2SCP
OCP2SCP and in OMAP5, both USB PHY and SATA PHY is connected via
OCP2SCP.
+config QCOM_EBI2
+ bool "Qualcomm External Bus Interface 2 (EBI2)"
+ depends on HAS_IOMEM
+ help
+ Say y here to enable support for the Qualcomm External Bus
+ Interface 2, which can be used to connect things like NAND Flash,
+ SRAM, ethernet adapters, FPGAs and LCD displays.
+
config SIMPLE_PM_BUS
bool "Simple Power-Managed Bus Driver"
depends on OF && PM
@@ -132,12 +140,8 @@ config SUNXI_RSB
with various RSB based devices, such as AXP223, AXP8XX PMICs,
and AC100/AC200 ICs.
-# TODO: This uses pm_clk_*() symbols that aren't exported in v4.7 and hence
-# the driver will fail to build as a module. However there are patches to
-# address that queued for v4.8, so this can be turned into a tristate symbol
-# after v4.8-rc1.
config TEGRA_ACONNECT
- bool "Tegra ACONNECT Bus Driver"
+ tristate "Tegra ACONNECT Bus Driver"
depends on ARCH_TEGRA_210_SOC
depends on OF && PM
select PM_CLK
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index ac84cc4348e3..c6cfa6b2606e 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
+obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
new file mode 100644
index 000000000000..a6444244c411
--- /dev/null
+++ b/drivers/bus/qcom-ebi2.c
@@ -0,0 +1,408 @@
+/*
+ * Qualcomm External Bus Interface 2 (EBI2) driver
+ * an older version of the Qualcomm Parallel Interface Controller (QPIC)
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+
+/*
+ * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
+ */
+#define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
+#define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
+#define EBI2_CS2_ENABLE_MASK BIT(4)
+#define EBI2_CS3_ENABLE_MASK BIT(5)
+#define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
+#define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
+#define EBI2_CSN_MASK GENMASK(9, 0)
+
+#define EBI2_XMEM_CFG 0x0000 /* Power management etc */
+
+/*
+ * SLOW CSn CFG
+ *
+ * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
+ * memory continues to drive the data bus after OE is de-asserted.
+ * Inserted when reading one CS and switching to another CS or read
+ * followed by write on the same CS. Valid values 0 thru 15.
+ * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
+ * every write minimum 1. The data out is driven from the time WE is
+ * asserted until CS is asserted. With a hold of 1, the CS stays
+ * active for 1 extra cycle etc. Valid values 0 thru 15.
+ * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
+ * write to a page or burst memory
+ * Bits 15-8: RD_DELTA initial latency for read cycles inserted for the first
+ * read to a page or burst memory
+ * Bits 7-4: WR_WAIT number of wait cycles for every write access, 0=1 cycle
+ * so 1 thru 16 cycles.
+ * Bits 3-0: RD_WAIT number of wait cycles for every read access, 0=1 cycle
+ * so 1 thru 16 cycles.
+ */
+#define EBI2_XMEM_CS0_SLOW_CFG 0x0008
+#define EBI2_XMEM_CS1_SLOW_CFG 0x000C
+#define EBI2_XMEM_CS2_SLOW_CFG 0x0010
+#define EBI2_XMEM_CS3_SLOW_CFG 0x0014
+#define EBI2_XMEM_CS4_SLOW_CFG 0x0018
+#define EBI2_XMEM_CS5_SLOW_CFG 0x001C
+
+#define EBI2_XMEM_RECOVERY_SHIFT 28
+#define EBI2_XMEM_WR_HOLD_SHIFT 24
+#define EBI2_XMEM_WR_DELTA_SHIFT 16
+#define EBI2_XMEM_RD_DELTA_SHIFT 8
+#define EBI2_XMEM_WR_WAIT_SHIFT 4
+#define EBI2_XMEM_RD_WAIT_SHIFT 0
+
+/*
+ * FAST CSn CFG
+ * Bits 31-28: ?
+ * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
+ * transfer. For a single read trandfer this will be the time
+ * from CS assertion to OE assertion.
+ * Bits 18-24: ?
+ * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
+ * assertion, with respect to the cycle where ADV is asserted.
+ * 2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
+ * Bits 5: ADDR_HOLD_ENA, The address is held for an extra cycle to meet
+ * hold time requirements with ADV assertion.
+ *
+ * The manual mentions "write precharge cycles" and "precharge cycles".
+ * We have not been able to figure out which bit fields these correspond to
+ * in the hardware, or what valid values exist. The current hypothesis is that
+ * this is something just used on the FAST chip selects. There is also a "byte
+ * device enable" flag somewhere for 8bit memories.
+ */
+#define EBI2_XMEM_CS0_FAST_CFG 0x0028
+#define EBI2_XMEM_CS1_FAST_CFG 0x002C
+#define EBI2_XMEM_CS2_FAST_CFG 0x0030
+#define EBI2_XMEM_CS3_FAST_CFG 0x0034
+#define EBI2_XMEM_CS4_FAST_CFG 0x0038
+#define EBI2_XMEM_CS5_FAST_CFG 0x003C
+
+#define EBI2_XMEM_RD_HOLD_SHIFT 24
+#define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT 16
+#define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT 5
+
+/**
+ * struct cs_data - struct with info on a chipselect setting
+ * @enable_mask: mask to enable the chipselect in the EBI2 config
+ * @slow_cfg0: offset to XMEMC slow CS config
+ * @fast_cfg1: offset to XMEMC fast CS config
+ */
+struct cs_data {
+ u32 enable_mask;
+ u16 slow_cfg;
+ u16 fast_cfg;
+};
+
+static const struct cs_data cs_info[] = {
+ {
+ /* CS0 */
+ .enable_mask = EBI2_CS0_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
+ },
+ {
+ /* CS1 */
+ .enable_mask = EBI2_CS1_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
+ },
+ {
+ /* CS2 */
+ .enable_mask = EBI2_CS2_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
+ },
+ {
+ /* CS3 */
+ .enable_mask = EBI2_CS3_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
+ },
+ {
+ /* CS4 */
+ .enable_mask = EBI2_CS4_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
+ },
+ {
+ /* CS5 */
+ .enable_mask = EBI2_CS5_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
+ },
+};
+
+/**
+ * struct ebi2_xmem_prop - describes an XMEM config property
+ * @prop: the device tree binding name
+ * @max: maximum value for the property
+ * @slowreg: true if this property is in the SLOW CS config register
+ * else it is assumed to be in the FAST config register
+ * @shift: the bit field start in the SLOW or FAST register for this
+ * property
+ */
+struct ebi2_xmem_prop {
+ const char *prop;
+ u32 max;
+ bool slowreg;
+ u16 shift;
+};
+
+static const struct ebi2_xmem_prop xmem_props[] = {
+ {
+ .prop = "qcom,xmem-recovery-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RECOVERY_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-hold-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_HOLD_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-delta-cycles",
+ .max = 255,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_DELTA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-delta-cycles",
+ .max = 255,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RD_DELTA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-wait-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_WAIT_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-wait-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RD_WAIT_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-address-hold-enable",
+ .max = 1, /* boolean prop */
+ .slowreg = false,
+ .shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-adv-to-oe-recovery-cycles",
+ .max = 3,
+ .slowreg = false,
+ .shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-hold-cycles",
+ .max = 15,
+ .slowreg = false,
+ .shift = EBI2_XMEM_RD_HOLD_SHIFT,
+ },
+};
+
+static void qcom_ebi2_setup_chipselect(struct device_node *np,
+ struct device *dev,
+ void __iomem *ebi2_base,
+ void __iomem *ebi2_xmem,
+ u32 csindex)
+{
+ const struct cs_data *csd;
+ u32 slowcfg, fastcfg;
+ u32 val;
+ int ret;
+ int i;
+
+ csd = &cs_info[csindex];
+ val = readl(ebi2_base);
+ val |= csd->enable_mask;
+ writel(val, ebi2_base);
+ dev_dbg(dev, "enabled CS%u\n", csindex);
+
+ /* Next set up the XMEMC */
+ slowcfg = 0;
+ fastcfg = 0;
+
+ for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
+ const struct ebi2_xmem_prop *xp = &xmem_props[i];
+
+ /* All are regular u32 values */
+ ret = of_property_read_u32(np, xp->prop, &val);
+ if (ret) {
+ dev_dbg(dev, "could not read %s for CS%d\n",
+ xp->prop, csindex);
+ continue;
+ }
+
+ /* First check boolean props */
+ if (xp->max == 1 && val) {
+ if (xp->slowreg)
+ slowcfg |= BIT(xp->shift);
+ else
+ fastcfg |= BIT(xp->shift);
+ dev_dbg(dev, "set %s flag\n", xp->prop);
+ continue;
+ }
+
+ /* We're dealing with an u32 */
+ if (val > xp->max) {
+ dev_err(dev,
+ "too high value for %s: %u, capped at %u\n",
+ xp->prop, val, xp->max);
+ val = xp->max;
+ }
+ if (xp->slowreg)
+ slowcfg |= (val << xp->shift);
+ else
+ fastcfg |= (val << xp->shift);
+ dev_dbg(dev, "set %s to %u\n", xp->prop, val);
+ }
+
+ dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
+ csindex, slowcfg, fastcfg);
+
+ if (slowcfg)
+ writel(slowcfg, ebi2_xmem + csd->slow_cfg);
+ if (fastcfg)
+ writel(fastcfg, ebi2_xmem + csd->fast_cfg);
+}
+
+static int qcom_ebi2_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *ebi2_base;
+ void __iomem *ebi2_xmem;
+ struct clk *ebi2xclk;
+ struct clk *ebi2clk;
+ bool have_children = false;
+ u32 val;
+ int ret;
+
+ ebi2xclk = devm_clk_get(dev, "ebi2x");
+ if (IS_ERR(ebi2xclk))
+ return PTR_ERR(ebi2xclk);
+
+ ret = clk_prepare_enable(ebi2xclk);
+ if (ret) {
+ dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
+ return ret;
+ }
+
+ ebi2clk = devm_clk_get(dev, "ebi2");
+ if (IS_ERR(ebi2clk)) {
+ ret = PTR_ERR(ebi2clk);
+ goto err_disable_2x_clk;
+ }
+
+ ret = clk_prepare_enable(ebi2clk);
+ if (ret) {
+ dev_err(dev, "could not enable EBI2 clk\n");
+ goto err_disable_2x_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ebi2_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ebi2_base)) {
+ ret = PTR_ERR(ebi2_base);
+ goto err_disable_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ebi2_xmem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ebi2_xmem)) {
+ ret = PTR_ERR(ebi2_xmem);
+ goto err_disable_clk;
+ }
+
+ /* Allegedly this turns the power save mode off */
+ writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
+
+ /* Disable all chipselects */
+ val = readl(ebi2_base);
+ val &= ~EBI2_CSN_MASK;
+ writel(val, ebi2_base);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ u32 csindex;
+
+ /* Figure out the chipselect */
+ ret = of_property_read_u32(child, "reg", &csindex);
+ if (ret)
+ return ret;
+
+ if (csindex > 5) {
+ dev_err(dev,
+ "invalid chipselect %u, we only support 0-5\n",
+ csindex);
+ continue;
+ }
+
+ qcom_ebi2_setup_chipselect(child,
+ dev,
+ ebi2_base,
+ ebi2_xmem,
+ csindex);
+
+ /* We have at least one child */
+ have_children = true;
+ }
+
+ if (have_children)
+ return of_platform_default_populate(np, NULL, dev);
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(ebi2clk);
+err_disable_2x_clk:
+ clk_disable_unprepare(ebi2xclk);
+
+ return ret;
+}
+
+static const struct of_device_id qcom_ebi2_of_match[] = {
+ { .compatible = "qcom,msm8660-ebi2", },
+ { .compatible = "qcom,apq8060-ebi2", },
+ { }
+};
+
+static struct platform_driver qcom_ebi2_driver = {
+ .probe = qcom_ebi2_probe,
+ .driver = {
+ .name = "qcom-ebi2",
+ .of_match_table = qcom_ebi2_of_match,
+ },
+};
+module_platform_driver(qcom_ebi2_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm EBI2 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
index 7e4104b74fa8..084ae286fa23 100644
--- a/drivers/bus/tegra-aconnect.c
+++ b/drivers/bus/tegra-aconnect.c
@@ -15,24 +15,6 @@
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
-static int tegra_aconnect_add_clock(struct device *dev, char *name)
-{
- struct clk *clk;
- int ret;
-
- clk = clk_get(dev, name);
- if (IS_ERR(clk)) {
- dev_err(dev, "%s clock not found\n", name);
- return PTR_ERR(clk);
- }
-
- ret = pm_clk_add_clk(dev, clk);
- if (ret)
- clk_put(clk);
-
- return ret;
-}
-
static int tegra_aconnect_probe(struct platform_device *pdev)
{
int ret;
@@ -44,11 +26,11 @@ static int tegra_aconnect_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = tegra_aconnect_add_clock(&pdev->dev, "ape");
+ ret = of_pm_clk_add_clk(&pdev->dev, "ape");
if (ret)
goto clk_destroy;
- ret = tegra_aconnect_add_clock(&pdev->dev, "apb2ape");
+ ret = of_pm_clk_add_clk(&pdev->dev, "apb2ape");
if (ret)
goto clk_destroy;
diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c
index 44660f1c4849..35d46da754d7 100644
--- a/drivers/char/bfin-otp.c
+++ b/drivers/char/bfin-otp.c
@@ -230,45 +230,7 @@ static struct miscdevice bfin_otp_misc_device = {
.name = DRIVER_NAME,
.fops = &bfin_otp_fops,
};
-
-/**
- * bfin_otp_init - Initialize module
- *
- * Registers the device and notifier handler. Actual device
- * initialization is handled by bfin_otp_open().
- */
-static int __init bfin_otp_init(void)
-{
- int ret;
-
- stampit();
-
- ret = misc_register(&bfin_otp_misc_device);
- if (ret) {
- pr_init(KERN_ERR PFX "unable to register a misc device\n");
- return ret;
- }
-
- pr_init(KERN_INFO PFX "initialized\n");
-
- return 0;
-}
-
-/**
- * bfin_otp_exit - Deinitialize module
- *
- * Unregisters the device and notifier handler. Actual device
- * deinitialization is handled by bfin_otp_close().
- */
-static void __exit bfin_otp_exit(void)
-{
- stampit();
-
- misc_deregister(&bfin_otp_misc_device);
-}
-
-module_init(bfin_otp_init);
-module_exit(bfin_otp_exit);
+module_misc_device(bfin_otp_misc_device);
MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
MODULE_DESCRIPTION("Blackfin OTP Memory Interface");
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 699b7259f5d7..c19e23d22b36 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -26,7 +26,7 @@
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <asm/io.h>
+#include <linux/io.h>
#define SDCRNG_CTL_REG 0x00
#define SDCRNG_CTL_FVLD_M 0x0000f000
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index a33163dbb913..5bb1985ec484 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -381,6 +381,9 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
int err = 0;
+ if (!pfn_valid(PFN_DOWN(p)))
+ return -EIO;
+
read = 0;
if (p < (unsigned long) high_memory) {
low_count = count;
@@ -509,6 +512,9 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
int err = 0;
+ if (!pfn_valid(PFN_DOWN(p)))
+ return -EIO;
+
if (p < (unsigned long) high_memory) {
unsigned long to_write = min_t(unsigned long, count,
(unsigned long)high_memory - p);
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
index 28740046bc83..972c40a19629 100644
--- a/drivers/char/mwave/3780i.c
+++ b/drivers/char/mwave/3780i.c
@@ -124,7 +124,7 @@ static void dsp3780I_WriteGenCfg(unsigned short usDspBaseIO, unsigned uIndex,
MKBYTE(rSlaveControl));
rSlaveControl_Save = rSlaveControl;
- rSlaveControl.ConfigMode = TRUE;
+ rSlaveControl.ConfigMode = true;
PRINTK_2(TRACE_3780I,
"3780i::dsp3780i_WriteGenCfg entry rSlaveControl+ConfigMode %x\n",
@@ -155,7 +155,7 @@ unsigned char dsp3780I_ReadGenCfg(unsigned short usDspBaseIO,
MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
rSlaveControl_Save = rSlaveControl;
- rSlaveControl.ConfigMode = TRUE;
+ rSlaveControl.ConfigMode = true;
OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
ucValue = InByteDsp(DSP_ConfigData);
@@ -230,7 +230,7 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
rUartCfg1.BaseIO = 3;
break;
}
- rUartCfg2.Enable = TRUE;
+ rUartCfg2.Enable = true;
}
rHBridgeCfg1.Reserved = rHBridgeCfg2.Reserved = 0;
@@ -238,7 +238,7 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
rHBridgeCfg1.IrqPulse = pSettings->bDspIrqPulse;
rHBridgeCfg1.Irq = (unsigned char) pIrqMap[pSettings->usDspIrq];
rHBridgeCfg1.AccessMode = 1;
- rHBridgeCfg2.Enable = TRUE;
+ rHBridgeCfg2.Enable = true;
rBusmasterCfg2.Reserved = 0;
@@ -278,8 +278,8 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
* soft-reset active for 10ms.
*/
rSlaveControl.ClockControl = 0;
- rSlaveControl.SoftReset = TRUE;
- rSlaveControl.ConfigMode = FALSE;
+ rSlaveControl.SoftReset = true;
+ rSlaveControl.ConfigMode = false;
rSlaveControl.Reserved = 0;
PRINTK_4(TRACE_3780I,
@@ -302,7 +302,7 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
for (i = 0; i < 11; i++)
udelay(2000);
- rSlaveControl.SoftReset = FALSE;
+ rSlaveControl.SoftReset = false;
OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
@@ -326,10 +326,10 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
}
- rHBridgeControl.EnableDspInt = FALSE;
- rHBridgeControl.MemAutoInc = TRUE;
- rHBridgeControl.IoAutoInc = FALSE;
- rHBridgeControl.DiagnosticMode = FALSE;
+ rHBridgeControl.EnableDspInt = false;
+ rHBridgeControl.MemAutoInc = true;
+ rHBridgeControl.IoAutoInc = false;
+ rHBridgeControl.DiagnosticMode = false;
PRINTK_3(TRACE_3780I,
"3780i::dsp3780i_EnableDSP DSP_HBridgeControl %x rHBridgeControl %x\n",
@@ -345,7 +345,7 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
ChipID = ReadMsaCfg(DSP_ChipID);
PRINTK_2(TRACE_3780I,
- "3780i::dsp3780I_EnableDSP exiting bRC=TRUE, ChipID %x\n",
+ "3780i::dsp3780I_EnableDSP exiting bRC=true, ChipID %x\n",
ChipID);
return 0;
@@ -361,8 +361,8 @@ int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings)
PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP entry\n");
rSlaveControl.ClockControl = 0;
- rSlaveControl.SoftReset = TRUE;
- rSlaveControl.ConfigMode = FALSE;
+ rSlaveControl.SoftReset = true;
+ rSlaveControl.ConfigMode = false;
rSlaveControl.Reserved = 0;
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
@@ -398,14 +398,14 @@ int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rHBridgeControl %x\n",
MKWORD(rHBridgeControl));
- rHBridgeControl.EnableDspInt = FALSE;
+ rHBridgeControl.EnableDspInt = false;
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
/* Reset the core via the boot domain register */
- rBootDomain.ResetCore = TRUE;
- rBootDomain.Halt = TRUE;
- rBootDomain.NMI = TRUE;
+ rBootDomain.ResetCore = true;
+ rBootDomain.Halt = true;
+ rBootDomain.NMI = true;
rBootDomain.Reserved = 0;
PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rBootDomain %x\n",
@@ -438,26 +438,26 @@ int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings)
/* Transition the core to a running state */
- rBootDomain.ResetCore = TRUE;
- rBootDomain.Halt = FALSE;
- rBootDomain.NMI = TRUE;
+ rBootDomain.ResetCore = true;
+ rBootDomain.Halt = false;
+ rBootDomain.NMI = true;
rBootDomain.Reserved = 0;
WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
udelay(5);
- rBootDomain.ResetCore = FALSE;
+ rBootDomain.ResetCore = false;
WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
udelay(5);
- rBootDomain.NMI = FALSE;
+ rBootDomain.NMI = false;
WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
udelay(5);
/* Enable DSP to PC interrupt */
spin_lock_irqsave(&dsp_lock, flags);
MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
- rHBridgeControl.EnableDspInt = TRUE;
+ rHBridgeControl.EnableDspInt = true;
PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Run rHBridgeControl %x\n",
MKWORD(rHBridgeControl));
@@ -466,7 +466,7 @@ int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings)
spin_unlock_irqrestore(&dsp_lock, flags);
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run exit bRC=TRUE\n");
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run exit bRC=true\n");
return 0;
}
@@ -508,7 +508,7 @@ int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadDStore exit bRC=TRUE\n");
+ "3780I::dsp3780I_ReadDStore exit bRC=true\n");
return 0;
}
@@ -550,7 +550,7 @@ int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadAndClearDStore exit bRC=TRUE\n");
+ "3780I::dsp3780I_ReadAndClearDStore exit bRC=true\n");
return 0;
}
@@ -592,7 +592,7 @@ int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
PRINTK_1(TRACE_3780I,
- "3780I::dsp3780D_WriteDStore exit bRC=TRUE\n");
+ "3780I::dsp3780D_WriteDStore exit bRC=true\n");
return 0;
}
@@ -640,7 +640,7 @@ int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
}
PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadIStore exit bRC=TRUE\n");
+ "3780I::dsp3780I_ReadIStore exit bRC=true\n");
return 0;
}
@@ -689,7 +689,7 @@ int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
}
PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_WriteIStore exit bRC=TRUE\n");
+ "3780I::dsp3780I_WriteIStore exit bRC=true\n");
return 0;
}
@@ -713,7 +713,7 @@ int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
*/
spin_lock_irqsave(&dsp_lock, flags);
MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
- rHBridgeControl.EnableDspInt = FALSE;
+ rHBridgeControl.EnableDspInt = false;
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
*pusIPCSource = InWordDsp(DSP_Interrupt);
@@ -725,7 +725,7 @@ int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
OutWordDsp(DSP_Interrupt, (unsigned short) ~(*pusIPCSource));
- rHBridgeControl.EnableDspInt = TRUE;
+ rHBridgeControl.EnableDspInt = true;
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h
index fba6ab1160ce..9ccb6b270b07 100644
--- a/drivers/char/mwave/3780i.h
+++ b/drivers/char/mwave/3780i.h
@@ -101,7 +101,7 @@ typedef struct {
} DSP_UART_CFG_1;
typedef struct {
- unsigned char Enable:1; /* RW: Enable I/O and IRQ: 0=FALSE, 1=TRUE */
+ unsigned char Enable:1; /* RW: Enable I/O and IRQ: 0=false, 1=true */
unsigned char Reserved:7; /* 0: Reserved */
} DSP_UART_CFG_2;
@@ -114,7 +114,7 @@ typedef struct {
} DSP_HBRIDGE_CFG_1;
typedef struct {
- unsigned char Enable:1; /* RW: enable I/O and IRQ: 0=FALSE, 1=TRUE */
+ unsigned char Enable:1; /* RW: enable I/O and IRQ: 0=false, 1=true */
unsigned char Reserved:7; /* 0: Reserved */
} DSP_HBRIDGE_CFG_2;
@@ -133,12 +133,12 @@ typedef struct {
typedef struct {
- unsigned char GateIOCHRDY:1; /* RW: Enable IOCHRDY gating: 0=FALSE, 1=TRUE */
+ unsigned char GateIOCHRDY:1; /* RW: Enable IOCHRDY gating: 0=false, 1=true */
unsigned char Reserved:7; /* 0: Reserved */
} DSP_ISA_PROT_CFG;
typedef struct {
- unsigned char Enable:1; /* RW: Enable low power suspend/resume 0=FALSE, 1=TRUE */
+ unsigned char Enable:1; /* RW: Enable low power suspend/resume 0=false, 1=true */
unsigned char Reserved:7; /* 0: Reserved */
} DSP_POWER_MGMT_CFG;
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index 164544afd680..3a3ff2eb6cba 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -296,8 +296,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
pDrvData->IPCs[ipcnum].usIntCount);
mutex_lock(&mwave_mutex);
- pDrvData->IPCs[ipcnum].bIsHere = FALSE;
- pDrvData->IPCs[ipcnum].bIsEnabled = TRUE;
+ pDrvData->IPCs[ipcnum].bIsHere = false;
+ pDrvData->IPCs[ipcnum].bIsEnabled = true;
mutex_unlock(&mwave_mutex);
PRINTK_2(TRACE_MWAVE,
@@ -324,7 +324,7 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
pDrvData->IPCs[ipcnum].usIntCount);
mutex_lock(&mwave_mutex);
- if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
+ if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
DECLARE_WAITQUEUE(wait, current);
PRINTK_2(TRACE_MWAVE,
@@ -332,7 +332,7 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
" ipc %x going to sleep\n",
ipcnum);
add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
- pDrvData->IPCs[ipcnum].bIsHere = TRUE;
+ pDrvData->IPCs[ipcnum].bIsHere = true;
set_current_state(TASK_INTERRUPTIBLE);
/* check whether an event was signalled by */
/* the interrupt handler while we were gone */
@@ -355,7 +355,7 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
" application\n",
ipcnum);
}
- pDrvData->IPCs[ipcnum].bIsHere = FALSE;
+ pDrvData->IPCs[ipcnum].bIsHere = false;
remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
set_current_state(TASK_RUNNING);
PRINTK_2(TRACE_MWAVE,
@@ -384,9 +384,9 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
return -EINVAL;
}
mutex_lock(&mwave_mutex);
- if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
- pDrvData->IPCs[ipcnum].bIsEnabled = FALSE;
- if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) {
+ if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
+ pDrvData->IPCs[ipcnum].bIsEnabled = false;
+ if (pDrvData->IPCs[ipcnum].bIsHere == true) {
wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue);
}
}
@@ -541,7 +541,7 @@ static void mwave_exit(void)
if (pDrvData->device_registered) {
device_unregister(&mwave_device);
- pDrvData->device_registered = FALSE;
+ pDrvData->device_registered = false;
}
#endif
@@ -576,16 +576,16 @@ static int __init mwave_init(void)
memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA));
- pDrvData->bBDInitialized = FALSE;
- pDrvData->bResourcesClaimed = FALSE;
- pDrvData->bDSPEnabled = FALSE;
- pDrvData->bDSPReset = FALSE;
- pDrvData->bMwaveDevRegistered = FALSE;
+ pDrvData->bBDInitialized = false;
+ pDrvData->bResourcesClaimed = false;
+ pDrvData->bDSPEnabled = false;
+ pDrvData->bDSPReset = false;
+ pDrvData->bMwaveDevRegistered = false;
pDrvData->sLine = -1;
for (i = 0; i < ARRAY_SIZE(pDrvData->IPCs); i++) {
- pDrvData->IPCs[i].bIsEnabled = FALSE;
- pDrvData->IPCs[i].bIsHere = FALSE;
+ pDrvData->IPCs[i].bIsEnabled = false;
+ pDrvData->IPCs[i].bIsHere = false;
pDrvData->IPCs[i].usIntCount = 0; /* no ints received yet */
init_waitqueue_head(&pDrvData->IPCs[i].ipc_wait_queue);
}
@@ -601,7 +601,7 @@ static int __init mwave_init(void)
" Failed to initialize board data\n");
goto cleanup_error;
}
- pDrvData->bBDInitialized = TRUE;
+ pDrvData->bBDInitialized = true;
retval = tp3780I_CalcResources(&pDrvData->rBDData);
PRINTK_2(TRACE_MWAVE,
@@ -626,7 +626,7 @@ static int __init mwave_init(void)
" Failed to claim resources\n");
goto cleanup_error;
}
- pDrvData->bResourcesClaimed = TRUE;
+ pDrvData->bResourcesClaimed = true;
retval = tp3780I_EnableDSP(&pDrvData->rBDData);
PRINTK_2(TRACE_MWAVE,
@@ -639,7 +639,7 @@ static int __init mwave_init(void)
" Failed to enable DSP\n");
goto cleanup_error;
}
- pDrvData->bDSPEnabled = TRUE;
+ pDrvData->bDSPEnabled = true;
if (misc_register(&mwave_misc_dev) < 0) {
PRINTK_ERROR(KERN_ERR_MWAVE
@@ -647,7 +647,7 @@ static int __init mwave_init(void)
" Failed to register misc device\n");
goto cleanup_error;
}
- pDrvData->bMwaveDevRegistered = TRUE;
+ pDrvData->bMwaveDevRegistered = true;
pDrvData->sLine = register_serial_portandirq(
pDrvData->rBDData.rDspSettings.usUartBaseIO,
@@ -668,7 +668,7 @@ static int __init mwave_init(void)
if (device_register(&mwave_device))
goto cleanup_error;
- pDrvData->device_registered = TRUE;
+ pDrvData->device_registered = true;
for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) {
if(device_create_file(&mwave_device, mwave_dev_attrs[i])) {
PRINTK_ERROR(KERN_ERR_MWAVE
diff --git a/drivers/char/mwave/mwavedd.h b/drivers/char/mwave/mwavedd.h
index 7e0d530e2e07..37e0a4926080 100644
--- a/drivers/char/mwave/mwavedd.h
+++ b/drivers/char/mwave/mwavedd.h
@@ -125,8 +125,8 @@ extern int mwave_uart_io;
typedef struct _MWAVE_IPC {
unsigned short usIntCount; /* 0=none, 1=first, 2=greater than 1st */
- BOOLEAN bIsEnabled;
- BOOLEAN bIsHere;
+ bool bIsEnabled;
+ bool bIsHere;
/* entry spin lock */
wait_queue_head_t ipc_wait_queue;
} MWAVE_IPC;
@@ -135,12 +135,12 @@ typedef struct _MWAVE_DEVICE_DATA {
THINKPAD_BD_DATA rBDData; /* board driver's data area */
unsigned long ulIPCSource_ISR; /* IPC source bits for recently processed intr, set during ISR processing */
unsigned long ulIPCSource_DPC; /* IPC source bits for recently processed intr, set during DPC processing */
- BOOLEAN bBDInitialized;
- BOOLEAN bResourcesClaimed;
- BOOLEAN bDSPEnabled;
- BOOLEAN bDSPReset;
+ bool bBDInitialized;
+ bool bResourcesClaimed;
+ bool bDSPEnabled;
+ bool bDSPReset;
MWAVE_IPC IPCs[16];
- BOOLEAN bMwaveDevRegistered;
+ bool bMwaveDevRegistered;
short sLine;
int nr_registered_attrs;
int device_registered;
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
index 6187fd14b3fe..8c5411a8f33f 100644
--- a/drivers/char/mwave/smapi.c
+++ b/drivers/char/mwave/smapi.c
@@ -493,7 +493,7 @@ exit_smapi_request_error:
}
-int smapi_set_DSP_power_state(BOOLEAN bOn)
+int smapi_set_DSP_power_state(bool bOn)
{
int bRC = -EIO;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
@@ -556,7 +556,7 @@ int smapi_init(void)
PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n");
} else {
PRINTK_2(TRACE_SMAPI,
- "smapi::smapi_init, exit TRUE g_usSmapiPort %x\n",
+ "smapi::smapi_init, exit true g_usSmapiPort %x\n",
g_usSmapiPort);
retval = 0;
//SmapiQuerySystemID();
diff --git a/drivers/char/mwave/smapi.h b/drivers/char/mwave/smapi.h
index 64b2ec1420e3..ebc206b000b9 100644
--- a/drivers/char/mwave/smapi.h
+++ b/drivers/char/mwave/smapi.h
@@ -49,10 +49,6 @@
#ifndef _LINUX_SMAPI_H
#define _LINUX_SMAPI_H
-#define TRUE 1
-#define FALSE 0
-#define BOOLEAN int
-
typedef struct {
int bDSPPresent;
int bDSPEnabled;
@@ -74,7 +70,7 @@ typedef struct {
int smapi_init(void);
int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings);
int smapi_set_DSP_cfg(void);
-int smapi_set_DSP_power_state(BOOLEAN bOn);
+int smapi_set_DSP_power_state(bool bOn);
#endif
diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
index 04e6d6a27994..5e1618a76b2a 100644
--- a/drivers/char/mwave/tp3780i.c
+++ b/drivers/char/mwave/tp3780i.c
@@ -80,13 +80,13 @@ static void EnableSRAM(THINKPAD_BD_DATA * pBDData)
WriteMsaCfg(DSP_GpioModeControl_15_8, MKWORD(rGpioMode));
MKWORD(rGpioDriverEnable) = 0;
- rGpioDriverEnable.Enable10 = TRUE;
- rGpioDriverEnable.Mask10 = TRUE;
+ rGpioDriverEnable.Enable10 = true;
+ rGpioDriverEnable.Mask10 = true;
WriteMsaCfg(DSP_GpioDriverEnable_15_8, MKWORD(rGpioDriverEnable));
MKWORD(rGpioOutputData) = 0;
rGpioOutputData.Latch10 = 0;
- rGpioOutputData.Mask10 = TRUE;
+ rGpioOutputData.Mask10 = true;
WriteMsaCfg(DSP_GpioOutputData_15_8, MKWORD(rGpioOutputData));
PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM exit\n");
@@ -127,7 +127,7 @@ static irqreturn_t DspInterrupt(int irq, void *dev_id)
PRINTK_2(TRACE_TP3780I,
"tp3780i::DspInterrupt usIntCount %x\n",
pDrvData->IPCs[usPCNum - 1].usIntCount);
- if (pDrvData->IPCs[usPCNum - 1].bIsEnabled == TRUE) {
+ if (pDrvData->IPCs[usPCNum - 1].bIsEnabled == true) {
PRINTK_2(TRACE_TP3780I,
"tp3780i::DspInterrupt, waking up usPCNum %x\n",
usPCNum - 1);
@@ -160,8 +160,8 @@ int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData)
PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData entry pBDData %p\n", pBDData);
- pBDData->bDSPEnabled = FALSE;
- pSettings->bInterruptClaimed = FALSE;
+ pBDData->bDSPEnabled = false;
+ pSettings->bInterruptClaimed = false;
retval = smapi_init();
if (retval) {
@@ -269,7 +269,7 @@ int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData)
if (pSettings->bInterruptClaimed) {
free_irq(pSettings->usDspIrq, NULL);
- pSettings->bInterruptClaimed = FALSE;
+ pSettings->bInterruptClaimed = false;
}
PRINTK_2(TRACE_TP3780I,
@@ -283,7 +283,7 @@ int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData)
int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
{
DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
- BOOLEAN bDSPPoweredUp = FALSE, bInterruptAllocated = FALSE;
+ bool bDSPPoweredUp = false, bInterruptAllocated = false;
PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP entry pBDData %p\n", pBDData);
@@ -336,14 +336,14 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
}
}
- pSettings->bDspIrqActiveLow = pSettings->bDspIrqPulse = TRUE;
- pSettings->bUartIrqActiveLow = pSettings->bUartIrqPulse = TRUE;
+ pSettings->bDspIrqActiveLow = pSettings->bDspIrqPulse = true;
+ pSettings->bUartIrqActiveLow = pSettings->bUartIrqPulse = true;
if (pBDData->bShareDspIrq) {
- pSettings->bDspIrqActiveLow = FALSE;
+ pSettings->bDspIrqActiveLow = false;
}
if (pBDData->bShareUartIrq) {
- pSettings->bUartIrqActiveLow = FALSE;
+ pSettings->bUartIrqActiveLow = false;
}
pSettings->usNumTransfers = TP_CFG_NumTransfers;
@@ -373,16 +373,16 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
PRINTK_3(TRACE_TP3780I,
"tp3780i::tp3780I_EnableDSP, got interrupt %x bShareDspIrq %x\n",
pSettings->usDspIrq, pBDData->bShareDspIrq);
- bInterruptAllocated = TRUE;
- pSettings->bInterruptClaimed = TRUE;
+ bInterruptAllocated = true;
+ pSettings->bInterruptClaimed = true;
}
- smapi_set_DSP_power_state(FALSE);
- if (smapi_set_DSP_power_state(TRUE)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: smapi_set_DSP_power_state(TRUE) failed\n");
+ smapi_set_DSP_power_state(false);
+ if (smapi_set_DSP_power_state(true)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: smapi_set_DSP_power_state(true) failed\n");
goto exit_cleanup;
} else {
- bDSPPoweredUp = TRUE;
+ bDSPPoweredUp = true;
}
if (dsp3780I_EnableDSP(pSettings, s_ausThinkpadIrqToField, s_ausThinkpadDmaToField)) {
@@ -392,7 +392,7 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
EnableSRAM(pBDData);
- pBDData->bDSPEnabled = TRUE;
+ pBDData->bDSPEnabled = true;
PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP exit\n");
@@ -401,10 +401,10 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
exit_cleanup:
PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Cleaning up\n");
if (bDSPPoweredUp)
- smapi_set_DSP_power_state(FALSE);
+ smapi_set_DSP_power_state(false);
if (bInterruptAllocated) {
free_irq(pSettings->usDspIrq, NULL);
- pSettings->bInterruptClaimed = FALSE;
+ pSettings->bInterruptClaimed = false;
}
return -EIO;
}
@@ -421,10 +421,10 @@ int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData)
dsp3780I_DisableDSP(&pBDData->rDspSettings);
if (pSettings->bInterruptClaimed) {
free_irq(pSettings->usDspIrq, NULL);
- pSettings->bInterruptClaimed = FALSE;
+ pSettings->bInterruptClaimed = false;
}
- smapi_set_DSP_power_state(FALSE);
- pBDData->bDSPEnabled = FALSE;
+ smapi_set_DSP_power_state(false);
+ pBDData->bDSPEnabled = false;
}
PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP exit retval %x\n", retval);
@@ -516,7 +516,7 @@ int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
int retval = 0;
DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
- BOOLEAN bRC = 0;
+ bool bRC = 0;
PRINTK_6(TRACE_TP3780I,
"tp3780i::tp3780I_ReadWriteDspDStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
@@ -552,7 +552,7 @@ int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
int retval = 0;
DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
- BOOLEAN bRC = 0;
+ bool bRC = 0;
PRINTK_6(TRACE_TP3780I,
"tp3780i::tp3780I_ReadWriteDspIStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index f8a483c67b07..d23368874710 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -286,7 +286,7 @@ static int register_device(int minor, struct pp_struct *pp)
struct parport *port;
struct pardevice *pdev = NULL;
char *name;
- int fl;
+ struct pardev_cb ppdev_cb;
name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
if (name == NULL)
@@ -299,9 +299,11 @@ static int register_device(int minor, struct pp_struct *pp)
return -ENXIO;
}
- fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
- pdev = parport_register_device(port, name, NULL,
- NULL, pp_irq, fl, pp);
+ memset(&ppdev_cb, 0, sizeof(ppdev_cb));
+ ppdev_cb.irq_func = pp_irq;
+ ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
+ ppdev_cb.private = pp;
+ pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
parport_put_port(port);
if (!pdev) {
@@ -799,10 +801,23 @@ static void pp_detach(struct parport *port)
device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
}
+static int pp_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+ int len = strlen(drv->name);
+
+ if (strncmp(par_dev->name, drv->name, len))
+ return -ENODEV;
+
+ return 0;
+}
+
static struct parport_driver pp_driver = {
.name = CHRDEV,
- .attach = pp_attach,
+ .probe = pp_probe,
+ .match_port = pp_attach,
.detach = pp_detach,
+ .devmodel = true,
};
static int __init ppdev_init(void)
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 94006f9c2e43..10e56323f390 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -385,13 +385,18 @@ scdrv_init(void)
event_nasid = ia64_sn_get_console_nasid();
+ snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME);
+ if (IS_ERR(snsc_class)) {
+ printk("%s: failed to allocate class\n", __func__);
+ return PTR_ERR(snsc_class);
+ }
+
if (alloc_chrdev_region(&first_dev, 0, num_cnodes,
SYSCTL_BASENAME) < 0) {
printk("%s: failed to register SN system controller device\n",
__func__);
return -ENODEV;
}
- snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME);
for (cnode = 0; cnode < num_cnodes; cnode++) {
geoid = cnodeid_get_geoid(cnode);
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 69f6b4acc377..398800edb2cc 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -331,13 +331,11 @@ static const struct file_operations srom_fops = {
/**
* srom_setup_minor() - Initialize per-minor information.
* @srom: Per-device SROM state.
- * @index: Device to set up.
+ * @devhdl: Partition device handle.
*/
-static int srom_setup_minor(struct srom_dev *srom, int index)
+static int srom_setup_minor(struct srom_dev *srom, int devhdl)
{
- struct device *dev;
- int devhdl = srom->hv_devhdl;
-
+ srom->hv_devhdl = devhdl;
mutex_init(&srom->lock);
if (_srom_read(devhdl, &srom->total_size,
@@ -350,9 +348,7 @@ static int srom_setup_minor(struct srom_dev *srom, int index)
SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0)
return -EIO;
- dev = device_create(srom_class, &srom_parent->dev,
- MKDEV(srom_major, index), srom, "%d", index);
- return PTR_ERR_OR_ZERO(dev);
+ return 0;
}
/** srom_init() - Initialize the driver's module. */
@@ -365,7 +361,7 @@ static int srom_init(void)
* Start with a plausible number of partitions; the krealloc() call
* below will yield about log(srom_devs) additional allocations.
*/
- srom_devices = kzalloc(4 * sizeof(struct srom_dev), GFP_KERNEL);
+ srom_devices = kmalloc(4 * sizeof(struct srom_dev), GFP_KERNEL);
/* Discover the number of srom partitions. */
for (i = 0; ; i++) {
@@ -373,7 +369,7 @@ static int srom_init(void)
char buf[20];
struct srom_dev *new_srom_devices =
krealloc(srom_devices, (i+1) * sizeof(struct srom_dev),
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL);
if (!new_srom_devices) {
result = -ENOMEM;
goto fail_mem;
@@ -387,7 +383,9 @@ static int srom_init(void)
i, devhdl);
break;
}
- srom_devices[i].hv_devhdl = devhdl;
+ result = srom_setup_minor(&srom_devices[i], devhdl);
+ if (result != 0)
+ goto fail_mem;
}
srom_devs = i;
@@ -431,9 +429,13 @@ static int srom_init(void)
srom_class->dev_groups = srom_dev_groups;
srom_class->devnode = srom_devnode;
- /* Do per-partition initialization */
+ /* Create per-partition devices */
for (i = 0; i < srom_devs; i++) {
- result = srom_setup_minor(srom_devices + i, i);
+ struct device *dev =
+ device_create(srom_class, &srom_parent->dev,
+ MKDEV(srom_major, i), srom_devices + i,
+ "%d", i);
+ result = PTR_ERR_OR_ZERO(dev);
if (result < 0)
goto fail_class;
}
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index c2ee30451e41..6f060c76217b 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -589,8 +589,6 @@ int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
chip->flags |= TPM_CHIP_FLAG_IRQ;
disable_irq_nosync(tpm_dev->irq);
-
- tpm_gen_interrupt(chip);
}
return tpm_chip_register(chip);
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index f5d452151c6b..912ad30be585 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -145,7 +145,7 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
return -EPIPE;
}
out_size = tpm_transmit(priv->chip, priv->data_buffer,
- sizeof(priv->data_buffer));
+ sizeof(priv->data_buffer), 0);
tpm_put_ops(priv->chip);
if (out_size < 0) {
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 1abe2d7a2610..8de61876f633 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -330,13 +330,16 @@ EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
/*
* Internal kernel interface to transmit TPM commands
*/
-ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
- size_t bufsiz)
+ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
+ unsigned int flags)
{
ssize_t rc;
u32 count, ordinal;
unsigned long stop;
+ if (bufsiz < TPM_HEADER_SIZE)
+ return -EINVAL;
+
if (bufsiz > TPM_BUFSIZE)
bufsiz = TPM_BUFSIZE;
@@ -350,7 +353,8 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
return -E2BIG;
}
- mutex_lock(&chip->tpm_mutex);
+ if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ mutex_lock(&chip->tpm_mutex);
rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
@@ -393,20 +397,21 @@ out_recv:
dev_err(&chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
- mutex_unlock(&chip->tpm_mutex);
+ if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ mutex_unlock(&chip->tpm_mutex);
return rc;
}
#define TPM_DIGEST_SIZE 20
#define TPM_RET_CODE_IDX 6
-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd,
- int len, const char *desc)
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd,
+ int len, unsigned int flags, const char *desc)
{
- struct tpm_output_header *header;
+ const struct tpm_output_header *header;
int err;
- len = tpm_transmit(chip, (u8 *) cmd, len);
+ len = tpm_transmit(chip, (const u8 *)cmd, len, flags);
if (len < 0)
return len;
else if (len < TPM_HEADER_SIZE)
@@ -453,26 +458,13 @@ ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = subcap_id;
}
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
+ desc);
if (!rc)
*cap = tpm_cmd.params.getcap_out.cap;
return rc;
}
-
-void tpm_gen_interrupt(struct tpm_chip *chip)
-{
- struct tpm_cmd_t tpm_cmd;
- ssize_t rc;
-
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
-
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
- "attempting to determine the timeouts");
-}
-EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
+EXPORT_SYMBOL_GPL(tpm_getcap);
#define TPM_ORD_STARTUP cpu_to_be32(153)
#define TPM_ST_CLEAR cpu_to_be16(1)
@@ -490,7 +482,7 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
start_cmd.header.in = tpm_startup_header;
start_cmd.params.startup_in.startup_type = startup_type;
- return tpm_transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE,
+ return tpm_transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
"attempting to start the TPM");
}
@@ -521,7 +513,8 @@ int tpm_get_timeouts(struct tpm_chip *chip)
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, NULL);
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
+ NULL);
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
@@ -535,7 +528,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
- NULL);
+ 0, NULL);
}
if (rc) {
dev_err(&chip->dev,
@@ -596,7 +589,7 @@ duration:
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
"attempting to determine the durations");
if (rc)
return rc;
@@ -633,7 +626,7 @@ EXPORT_SYMBOL_GPL(tpm_get_timeouts);
#define TPM_ORD_CONTINUE_SELFTEST 83
#define CONTINUE_SELFTEST_RESULT_SIZE 10
-static struct tpm_input_header continue_selftest_header = {
+static const struct tpm_input_header continue_selftest_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(10),
.ordinal = cpu_to_be32(TPM_ORD_CONTINUE_SELFTEST),
@@ -652,14 +645,14 @@ static int tpm_continue_selftest(struct tpm_chip *chip)
struct tpm_cmd_t cmd;
cmd.header.in = continue_selftest_header;
- rc = tpm_transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE, 0,
"continue selftest");
return rc;
}
#define TPM_ORDINAL_PCRREAD cpu_to_be32(21)
#define READ_PCR_RESULT_SIZE 30
-static struct tpm_input_header pcrread_header = {
+static const struct tpm_input_header pcrread_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(14),
.ordinal = TPM_ORDINAL_PCRREAD
@@ -672,7 +665,7 @@ int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
- rc = tpm_transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE, 0,
"attempting to read a pcr value");
if (rc == 0)
@@ -745,7 +738,7 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
*/
#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
#define EXTEND_PCR_RESULT_SIZE 34
-static struct tpm_input_header pcrextend_header = {
+static const struct tpm_input_header pcrextend_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(34),
.ordinal = TPM_ORD_PCR_EXTEND
@@ -770,7 +763,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0,
"attempting extend a PCR value");
tpm_put_ops(chip);
@@ -792,7 +785,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
unsigned int loops;
unsigned int delay_msec = 100;
unsigned long duration;
- struct tpm_cmd_t cmd;
+ u8 dummy[TPM_DIGEST_SIZE];
duration = tpm_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST);
@@ -807,9 +800,8 @@ int tpm_do_selftest(struct tpm_chip *chip)
do {
/* Attempt to read a PCR value */
- cmd.header.in = pcrread_header;
- cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0);
- rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE);
+ rc = tpm_pcr_read_dev(chip, 0, dummy);
+
/* Some buggy TPMs will not respond to tpm_tis_ready() for
* around 300ms while the self test is ongoing, keep trying
* until the self test duration expires. */
@@ -824,7 +816,6 @@ int tpm_do_selftest(struct tpm_chip *chip)
if (rc < TPM_HEADER_SIZE)
return -EFAULT;
- rc = be32_to_cpu(cmd.header.out.return_code);
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
dev_info(&chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
@@ -879,7 +870,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen)
if (chip == NULL)
return -ENODEV;
- rc = tpm_transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd");
+ rc = tpm_transmit_cmd(chip, cmd, buflen, 0, "attempting tpm_cmd");
tpm_put_ops(chip);
return rc;
@@ -949,7 +940,7 @@ EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
#define TPM_ORD_SAVESTATE cpu_to_be32(152)
#define SAVESTATE_RESULT_SIZE 10
-static struct tpm_input_header savestate_header = {
+static const struct tpm_input_header savestate_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(10),
.ordinal = TPM_ORD_SAVESTATE
@@ -981,14 +972,15 @@ int tpm_pm_suspend(struct device *dev)
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0,
"extending dummy pcr before suspend");
}
/* now do the actual savestate */
for (try = 0; try < TPM_RETRY; try++) {
cmd.header.in = savestate_header;
- rc = tpm_transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, NULL);
+ rc = tpm_transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, 0,
+ NULL);
/*
* If the TPM indicates that it is too busy to respond to
@@ -1032,7 +1024,7 @@ int tpm_pm_resume(struct device *dev)
EXPORT_SYMBOL_GPL(tpm_pm_resume);
#define TPM_GETRANDOM_RESULT_SIZE 18
-static struct tpm_input_header tpm_getrandom_header = {
+static const struct tpm_input_header tpm_getrandom_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(14),
.ordinal = TPM_ORD_GET_RANDOM
@@ -1072,8 +1064,8 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
err = tpm_transmit_cmd(chip, &tpm_cmd,
- TPM_GETRANDOM_RESULT_SIZE + num_bytes,
- "attempting get random");
+ TPM_GETRANDOM_RESULT_SIZE + num_bytes,
+ 0, "attempting get random");
if (err)
break;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index b46cf70c8b16..a76ab4af9fb2 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -22,7 +22,7 @@
#define READ_PUBEK_RESULT_SIZE 314
#define TPM_ORD_READPUBEK cpu_to_be32(124)
-static struct tpm_input_header tpm_readpubek_header = {
+static const struct tpm_input_header tpm_readpubek_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(30),
.ordinal = TPM_ORD_READPUBEK
@@ -39,7 +39,7 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
struct tpm_chip *chip = to_tpm_chip(dev);
tpm_cmd.header.in = tpm_readpubek_header;
- err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
+ err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, 0,
"attempting to read the PUBEK");
if (err)
goto out;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 3e32d5bd2dc6..4d183c97f6a6 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -476,32 +476,35 @@ extern dev_t tpm_devt;
extern const struct file_operations tpm_fops;
extern struct idr dev_nums_idr;
+enum tpm_transmit_flags {
+ TPM_TRANSMIT_UNLOCKED = BIT(0),
+};
+
+ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
+ unsigned int flags);
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, int len,
+ unsigned int flags, const char *desc);
ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
const char *desc);
-ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
- size_t bufsiz);
-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, int len,
- const char *desc);
-extern int tpm_get_timeouts(struct tpm_chip *);
-extern void tpm_gen_interrupt(struct tpm_chip *);
+int tpm_get_timeouts(struct tpm_chip *);
int tpm1_auto_startup(struct tpm_chip *chip);
-extern int tpm_do_selftest(struct tpm_chip *);
-extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32);
-extern int tpm_pm_suspend(struct device *);
-extern int tpm_pm_resume(struct device *);
-extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
- wait_queue_head_t *, bool);
+int tpm_do_selftest(struct tpm_chip *chip);
+unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+int tpm_pm_suspend(struct device *dev);
+int tpm_pm_resume(struct device *dev);
+int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+ wait_queue_head_t *queue, bool check_cancel);
struct tpm_chip *tpm_chip_find_get(int chip_num);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
-extern struct tpm_chip *tpm_chip_alloc(struct device *dev,
- const struct tpm_class_ops *ops);
-extern struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
- const struct tpm_class_ops *ops);
-extern int tpm_chip_register(struct tpm_chip *chip);
-extern void tpm_chip_unregister(struct tpm_chip *chip);
+struct tpm_chip *tpm_chip_alloc(struct device *dev,
+ const struct tpm_class_ops *ops);
+struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
+ const struct tpm_class_ops *ops);
+int tpm_chip_register(struct tpm_chip *chip);
+void tpm_chip_unregister(struct tpm_chip *chip);
void tpm_sysfs_add_device(struct tpm_chip *chip);
@@ -528,8 +531,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
u32 *value, const char *desc);
int tpm2_auto_startup(struct tpm_chip *chip);
-extern void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
-extern unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *, u32);
-extern int tpm2_gen_interrupt(struct tpm_chip *chip);
-extern int tpm2_probe(struct tpm_chip *chip);
+void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
+unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+int tpm2_probe(struct tpm_chip *chip);
#endif
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 0c75c3f1689f..7df55d58c939 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -282,7 +282,7 @@ int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
sizeof(cmd.params.pcrread_in.pcr_select));
cmd.params.pcrread_in.pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
"attempting to read a pcr value");
if (rc == 0) {
buf = cmd.params.pcrread_out.digest;
@@ -330,7 +330,7 @@ int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
cmd.params.pcrextend_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1);
memcpy(cmd.params.pcrextend_in.digest, hash, TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
"attempting extend a PCR value");
return rc;
@@ -376,7 +376,7 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max)
cmd.header.in = tpm2_getrandom_header;
cmd.params.getrandom_in.size = cpu_to_be16(num_bytes);
- err = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ err = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
"attempting get random");
if (err)
break;
@@ -434,12 +434,12 @@ static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
}
/**
- * tpm2_seal_trusted() - seal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
+ * tpm2_seal_trusted() - seal the payload of a trusted key
+ * @chip_num: TPM chip to use
* @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
*
- * Returns < 0 on error and 0 on success.
+ * Return: < 0 on error and 0 on success.
*/
int tpm2_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
@@ -512,7 +512,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
goto out;
}
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "sealing data");
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, 0, "sealing data");
if (rc)
goto out;
@@ -538,10 +538,18 @@ out:
return rc;
}
-static int tpm2_load(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options,
- u32 *blob_handle)
+/**
+ * tpm2_load_cmd() - execute a TPM2_Load command
+ * @chip_num: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: same as with tpm_transmit_cmd
+ */
+static int tpm2_load_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 *blob_handle, unsigned int flags)
{
struct tpm_buf buf;
unsigned int private_len;
@@ -576,7 +584,7 @@ static int tpm2_load(struct tpm_chip *chip,
goto out;
}
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "loading blob");
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, "loading blob");
if (!rc)
*blob_handle = be32_to_cpup(
(__be32 *) &buf.data[TPM_HEADER_SIZE]);
@@ -590,7 +598,16 @@ out:
return rc;
}
-static void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
+/**
+ * tpm2_flush_context_cmd() - execute a TPM2_FlushContext command
+ * @chip_num: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: same as with tpm_transmit_cmd
+ */
+static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
+ unsigned int flags)
{
struct tpm_buf buf;
int rc;
@@ -604,7 +621,8 @@ static void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
tpm_buf_append_u32(&buf, handle);
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "flushing context");
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags,
+ "flushing context");
if (rc)
dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle,
rc);
@@ -612,10 +630,18 @@ static void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
tpm_buf_destroy(&buf);
}
-static int tpm2_unseal(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options,
- u32 blob_handle)
+/**
+ * tpm2_unseal_cmd() - execute a TPM2_Unload command
+ * @chip_num: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: same as with tpm_transmit_cmd
+ */
+static int tpm2_unseal_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 blob_handle, unsigned int flags)
{
struct tpm_buf buf;
u16 data_len;
@@ -635,7 +661,7 @@ static int tpm2_unseal(struct tpm_chip *chip,
options->blobauth /* hmac */,
TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "unsealing");
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, "unsealing");
if (rc > 0)
rc = -EPERM;
@@ -654,12 +680,12 @@ static int tpm2_unseal(struct tpm_chip *chip,
}
/**
- * tpm_unseal_trusted() - unseal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
+ * tpm_unseal_trusted() - unseal the payload of a trusted key
+ * @chip_num: TPM chip to use
* @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
*
- * Returns < 0 on error and 0 on success.
+ * Return: < 0 on error and 0 on success.
*/
int tpm2_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
@@ -668,14 +694,17 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
u32 blob_handle;
int rc;
- rc = tpm2_load(chip, payload, options, &blob_handle);
+ mutex_lock(&chip->tpm_mutex);
+ rc = tpm2_load_cmd(chip, payload, options, &blob_handle,
+ TPM_TRANSMIT_UNLOCKED);
if (rc)
- return rc;
-
- rc = tpm2_unseal(chip, payload, options, blob_handle);
-
- tpm2_flush_context(chip, blob_handle);
+ goto out;
+ rc = tpm2_unseal_cmd(chip, payload, options, blob_handle,
+ TPM_TRANSMIT_UNLOCKED);
+ tpm2_flush_context_cmd(chip, blob_handle, TPM_TRANSMIT_UNLOCKED);
+out:
+ mutex_unlock(&chip->tpm_mutex);
return rc;
}
@@ -701,12 +730,13 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(property_id);
cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), desc);
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, desc);
if (!rc)
*value = be32_to_cpu(cmd.params.get_tpm_pt_out.value);
return rc;
}
+EXPORT_SYMBOL_GPL(tpm2_get_tpm_pt);
#define TPM2_STARTUP_IN_SIZE \
(sizeof(struct tpm_input_header) + \
@@ -735,7 +765,7 @@ static int tpm2_startup(struct tpm_chip *chip, u16 startup_type)
cmd.header.in = tpm2_startup_header;
cmd.params.startup_in.startup_type = cpu_to_be16(startup_type);
- return tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ return tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
"attempting to start the TPM");
}
@@ -763,7 +793,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
cmd.header.in = tpm2_shutdown_header;
cmd.params.startup_in.startup_type = cpu_to_be16(shutdown_type);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), "stopping the TPM");
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, "stopping the TPM");
/* In places where shutdown command is sent there's no much we can do
* except print the error code on a system failure.
@@ -828,7 +858,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
cmd.header.in = tpm2_selftest_header;
cmd.params.selftest_in.full_test = full;
- rc = tpm_transmit_cmd(chip, &cmd, TPM2_SELF_TEST_IN_SIZE,
+ rc = tpm_transmit_cmd(chip, &cmd, TPM2_SELF_TEST_IN_SIZE, 0,
"continue selftest");
/* At least some prototype chips seem to give RC_TESTING error
@@ -880,7 +910,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
cmd.params.pcrread_in.pcr_select[1] = 0x00;
cmd.params.pcrread_in.pcr_select[2] = 0x00;
- rc = tpm_transmit_cmd(chip, (u8 *) &cmd, sizeof(cmd), NULL);
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, NULL);
if (rc < 0)
break;
@@ -895,23 +925,6 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
}
/**
- * tpm2_gen_interrupt() - generate an interrupt
- * @chip: TPM chip to use
- *
- * 0 is returned when the operation is successful. If a negative number is
- * returned it remarks a POSIX error code. If a positive number is returned
- * it remarks a TPM error.
- */
-int tpm2_gen_interrupt(struct tpm_chip *chip)
-{
- u32 dummy;
-
- return tpm2_get_tpm_pt(chip, 0x100, &dummy,
- "attempting to generate an interrupt");
-}
-EXPORT_SYMBOL_GPL(tpm2_gen_interrupt);
-
-/**
* tpm2_probe() - probe TPM 2.0
* @chip: TPM chip to use
*
@@ -928,11 +941,9 @@ int tpm2_probe(struct tpm_chip *chip)
cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(0x100);
cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
- rc = tpm_transmit(chip, (const char *) &cmd, sizeof(cmd));
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, NULL);
if (rc < 0)
return rc;
- else if (rc < TPM_HEADER_SIZE)
- return -EFAULT;
if (be16_to_cpu(cmd.header.out.tag) == TPM2_ST_NO_SESSIONS)
chip->flags |= TPM_CHIP_FLAG_TPM2;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 018c382554ba..a7c870af916c 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -19,7 +19,6 @@
#include <linux/highmem.h>
#include <linux/rculist.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
@@ -34,14 +33,14 @@ enum crb_defaults {
CRB_ACPI_START_INDEX = 1,
};
-enum crb_ca_request {
- CRB_CA_REQ_GO_IDLE = BIT(0),
- CRB_CA_REQ_CMD_READY = BIT(1),
+enum crb_ctrl_req {
+ CRB_CTRL_REQ_CMD_READY = BIT(0),
+ CRB_CTRL_REQ_GO_IDLE = BIT(1),
};
-enum crb_ca_status {
- CRB_CA_STS_ERROR = BIT(0),
- CRB_CA_STS_TPM_IDLE = BIT(1),
+enum crb_ctrl_sts {
+ CRB_CTRL_STS_ERROR = BIT(0),
+ CRB_CTRL_STS_TPM_IDLE = BIT(1),
};
enum crb_start {
@@ -67,7 +66,7 @@ struct crb_control_area {
} __packed;
enum crb_status {
- CRB_STS_COMPLETE = BIT(0),
+ CRB_DRV_STS_COMPLETE = BIT(0),
};
enum crb_flags {
@@ -81,6 +80,7 @@ struct crb_priv {
struct crb_control_area __iomem *cca;
u8 __iomem *cmd;
u8 __iomem *rsp;
+ u32 cmd_size;
};
static SIMPLE_DEV_PM_OPS(crb_pm, tpm_pm_suspend, tpm_pm_resume);
@@ -92,7 +92,7 @@ static u8 crb_status(struct tpm_chip *chip)
if ((ioread32(&priv->cca->start) & CRB_START_INVOKE) !=
CRB_START_INVOKE)
- sts |= CRB_STS_COMPLETE;
+ sts |= CRB_DRV_STS_COMPLETE;
return sts;
}
@@ -106,7 +106,7 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
if (count < 6)
return -EIO;
- if (ioread32(&priv->cca->sts) & CRB_CA_STS_ERROR)
+ if (ioread32(&priv->cca->sts) & CRB_CTRL_STS_ERROR)
return -EIO;
memcpy_fromio(buf, priv->rsp, 6);
@@ -142,11 +142,14 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
int rc = 0;
- if (len > ioread32(&priv->cca->cmd_size)) {
- dev_err(&chip->dev,
- "invalid command count value %x %zx\n",
- (unsigned int) len,
- (size_t) ioread32(&priv->cca->cmd_size));
+ /* Zero the cancel register so that the next command will not get
+ * canceled.
+ */
+ iowrite32(0, &priv->cca->cancel);
+
+ if (len > priv->cmd_size) {
+ dev_err(&chip->dev, "invalid command count value %zd %d\n",
+ len, priv->cmd_size);
return -E2BIG;
}
@@ -156,7 +159,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
wmb();
if (priv->flags & CRB_FL_CRB_START)
- iowrite32(cpu_to_le32(CRB_START_INVOKE), &priv->cca->start);
+ iowrite32(CRB_START_INVOKE, &priv->cca->start);
if (priv->flags & CRB_FL_ACPI_START)
rc = crb_do_acpi_start(chip);
@@ -168,15 +171,10 @@ static void crb_cancel(struct tpm_chip *chip)
{
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
- iowrite32(cpu_to_le32(CRB_CANCEL_INVOKE), &priv->cca->cancel);
-
- /* Make sure that cmd is populated before issuing cancel. */
- wmb();
+ iowrite32(CRB_CANCEL_INVOKE, &priv->cca->cancel);
if ((priv->flags & CRB_FL_ACPI_START) && crb_do_acpi_start(chip))
dev_err(&chip->dev, "ACPI Start failed\n");
-
- iowrite32(0, &priv->cca->cancel);
}
static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
@@ -194,8 +192,8 @@ static const struct tpm_class_ops tpm_crb = {
.send = crb_send,
.cancel = crb_cancel,
.req_canceled = crb_req_canceled,
- .req_complete_mask = CRB_STS_COMPLETE,
- .req_complete_val = CRB_STS_COMPLETE,
+ .req_complete_mask = CRB_DRV_STS_COMPLETE,
+ .req_complete_val = CRB_DRV_STS_COMPLETE,
};
static int crb_init(struct acpi_device *device, struct crb_priv *priv)
@@ -265,8 +263,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
acpi_dev_free_resource_list(&resources);
if (resource_type(&io_res) != IORESOURCE_MEM) {
- dev_err(dev,
- FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
return -EINVAL;
}
@@ -302,6 +299,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
return -EINVAL;
}
+ priv->cmd_size = cmd_size;
priv->rsp = priv->cmd;
return 0;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index d66f51b3648e..e3bf31b37138 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -440,7 +440,6 @@ static int probe_itpm(struct tpm_chip *chip)
0x00, 0x00, 0x00, 0xf1
};
size_t len = sizeof(cmd_getticks);
- bool itpm;
u16 vendor;
rc = tpm_tis_read16(priv, TPM_DID_VID(0), &vendor);
@@ -451,8 +450,6 @@ static int probe_itpm(struct tpm_chip *chip)
if (vendor != TPM_VID_INTEL)
return 0;
- itpm = false;
-
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0)
goto out;
@@ -460,8 +457,6 @@ static int probe_itpm(struct tpm_chip *chip)
tpm_tis_ready(chip);
release_locality(chip, priv->locality, 0);
- itpm = true;
-
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0) {
dev_info(&chip->dev, "Detected an iTPM.\n");
@@ -526,6 +521,18 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
return IRQ_HANDLED;
}
+static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
+{
+ const char *desc = "attempting to generate an interrupt";
+ u32 cap2;
+ cap_t cap;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+ else
+ return tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc);
+}
+
/* Register the IRQ and issue a command that will cause an interrupt. If an
* irq is seen then leave the chip setup for IRQ operation, otherwise reverse
* everything and leave in polling mode. Returns 0 on success.
@@ -575,10 +582,9 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
/* Generate an interrupt by having the core call through to
* tpm_tis_send
*/
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- tpm2_gen_interrupt(chip);
- else
- tpm_gen_interrupt(chip);
+ rc = tpm_tis_gen_interrupt(chip);
+ if (rc < 0)
+ return rc;
/* tpm_tis_send will either confirm the interrupt is working or it
* will call disable_irq which undoes all of the above.
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index b098d2d0b7c4..67549ce88cc9 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -31,60 +31,53 @@ static struct ttyprintk_port tpk_port;
* printk messages (also suitable for logging service):
* - any cr is replaced by nl
* - adds a ttyprintk source tag in front of each line
- * - too long message is fragmeted, with '\'nl between fragments
- * - TPK_STR_SIZE isn't really the write_room limiting factor, bcause
+ * - too long message is fragmented, with '\'nl between fragments
+ * - TPK_STR_SIZE isn't really the write_room limiting factor, because
* it is emptied on the fly during preformatting.
*/
#define TPK_STR_SIZE 508 /* should be bigger then max expected line length */
#define TPK_MAX_ROOM 4096 /* we could assume 4K for instance */
-static const char *tpk_tag = "[U] "; /* U for User */
static int tpk_curr;
+static char tpk_buffer[TPK_STR_SIZE + 4];
+
+static void tpk_flush(void)
+{
+ if (tpk_curr > 0) {
+ tpk_buffer[tpk_curr] = '\0';
+ pr_info("[U] %s\n", tpk_buffer);
+ tpk_curr = 0;
+ }
+}
+
static int tpk_printk(const unsigned char *buf, int count)
{
- static char tmp[TPK_STR_SIZE + 4];
int i = tpk_curr;
if (buf == NULL) {
- /* flush tmp[] */
- if (tpk_curr > 0) {
- /* non nl or cr terminated message - add nl */
- tmp[tpk_curr + 0] = '\n';
- tmp[tpk_curr + 1] = '\0';
- printk(KERN_INFO "%s%s", tpk_tag, tmp);
- tpk_curr = 0;
- }
+ tpk_flush();
return i;
}
for (i = 0; i < count; i++) {
- tmp[tpk_curr] = buf[i];
- if (tpk_curr < TPK_STR_SIZE) {
- switch (buf[i]) {
- case '\r':
- /* replace cr with nl */
- tmp[tpk_curr + 0] = '\n';
- tmp[tpk_curr + 1] = '\0';
- printk(KERN_INFO "%s%s", tpk_tag, tmp);
- tpk_curr = 0;
- if ((i + 1) < count && buf[i + 1] == '\n')
- i++;
- break;
- case '\n':
- tmp[tpk_curr + 1] = '\0';
- printk(KERN_INFO "%s%s", tpk_tag, tmp);
- tpk_curr = 0;
- break;
- default:
- tpk_curr++;
- }
- } else {
+ if (tpk_curr >= TPK_STR_SIZE) {
/* end of tmp buffer reached: cut the message in two */
- tmp[tpk_curr + 1] = '\\';
- tmp[tpk_curr + 2] = '\n';
- tmp[tpk_curr + 3] = '\0';
- printk(KERN_INFO "%s%s", tpk_tag, tmp);
- tpk_curr = 0;
+ tpk_buffer[tpk_curr++] = '\\';
+ tpk_flush();
+ }
+
+ switch (buf[i]) {
+ case '\r':
+ tpk_flush();
+ if ((i + 1) < count && buf[i + 1] == '\n')
+ i++;
+ break;
+ case '\n':
+ tpk_flush();
+ break;
+ default:
+ tpk_buffer[tpk_curr++] = buf[i];
+ break;
}
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5da47e26a012..8114744bf30c 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -889,7 +889,7 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
return 0;
/* Try lock this page */
- if (buf->ops->steal(pipe, buf) == 0) {
+ if (pipe_buf_steal(pipe, buf) == 0) {
/* Get reference and unlock page for moving */
get_page(buf->page);
unlock_page(buf->page);
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index dcd19f3f182e..b6c9cdead7f3 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -655,10 +655,10 @@ static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
version = channel->wr_buffers[0]->addr;
- /* Check version number. Accept anything below 0x82 for now. */
+ /* Check version number. Reject anything above 0x82. */
if (*version > 0x82) {
dev_err(endpoint->dev,
- "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgarde. Aborting.\n",
+ "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgrade. Aborting.\n",
*version);
return -ENODEV;
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index e2d9bd760c84..6a8ac04bedeb 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -31,22 +31,12 @@ config COMMON_CLK_WM831X
source "drivers/clk/versatile/Kconfig"
-config COMMON_CLK_MAX_GEN
- bool
-
config COMMON_CLK_MAX77686
- tristate "Clock driver for Maxim 77686 MFD"
- depends on MFD_MAX77686
- select COMMON_CLK_MAX_GEN
- ---help---
- This driver supports Maxim 77686 crystal oscillator clock.
-
-config COMMON_CLK_MAX77802
- tristate "Clock driver for Maxim 77802 PMIC"
- depends on MFD_MAX77686
- select COMMON_CLK_MAX_GEN
+ tristate "Clock driver for Maxim 77620/77686/77802 MFD"
+ depends on MFD_MAX77686 || MFD_MAX77620
---help---
- This driver supports Maxim 77802 crystal oscillator clock.
+ This driver supports Maxim 77620/77686/77802 crystal oscillator
+ clock.
config COMMON_CLK_RK808
tristate "Clock driver for RK808/RK818"
@@ -210,6 +200,7 @@ config COMMON_CLK_OXNAS
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
+source "drivers/clk/mediatek/Kconfig"
source "drivers/clk/meson/Kconfig"
source "drivers/clk/mvebu/Kconfig"
source "drivers/clk/qcom/Kconfig"
@@ -218,5 +209,6 @@ source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
+source "drivers/clk/uniphier/Kconfig"
endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 3b6f9cf3464a..925081ec14c0 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -26,10 +26,7 @@ obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
-obj-$(CONFIG_MACH_LOONGSON32) += clk-ls1x.o
-obj-$(CONFIG_COMMON_CLK_MAX_GEN) += clk-max-gen.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
-obj-$(CONFIG_COMMON_CLK_MAX77802) += clk-max77802.o
obj-$(CONFIG_ARCH_MB86S7X) += clk-mb86s7x.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
@@ -63,6 +60,7 @@ obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-$(CONFIG_ARCH_MXC) += imx/
obj-$(CONFIG_MACH_INGENIC) += ingenic/
obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
+obj-$(CONFIG_MACH_LOONGSON32) += loongson1/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_COMMON_CLK_AMLOGIC) += meson/
obj-$(CONFIG_MACH_PIC32) += microchip/
@@ -86,6 +84,7 @@ obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
+obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
obj-$(CONFIG_X86) += x86/
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 7f6bec8837ea..4e1cd5aa69d8 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -233,14 +233,16 @@ static void clk_generated_startup(struct clk_generated *gck)
>> AT91_PMC_PCR_GCKDIV_OFFSET;
}
-static struct clk * __init
-at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const char
- *name, const char **parent_names, u8 num_parents,
- u8 id, const struct clk_range *range)
+static struct clk_hw * __init
+at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char **parent_names,
+ u8 num_parents, u8 id,
+ const struct clk_range *range)
{
struct clk_generated *gck;
- struct clk *clk = NULL;
struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
gck = kzalloc(sizeof(*gck), GFP_KERNEL);
if (!gck)
@@ -258,13 +260,15 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const char
gck->lock = lock;
gck->range = *range;
- clk = clk_register(NULL, &gck->hw);
- if (IS_ERR(clk))
+ hw = &gck->hw;
+ ret = clk_hw_register(NULL, &gck->hw);
+ if (ret) {
kfree(gck);
- else
+ hw = ERR_PTR(ret);
+ } else
clk_generated_startup(gck);
- return clk;
+ return hw;
}
static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
@@ -272,7 +276,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
int num;
u32 id;
const char *name;
- struct clk *clk;
+ struct clk_hw *hw;
unsigned int num_parents;
const char *parent_names[GENERATED_SOURCE_MAX];
struct device_node *gcknp;
@@ -306,13 +310,13 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
of_at91_get_clk_range(gcknp, "atmel,clk-output-range",
&range);
- clk = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
parent_names, num_parents,
id, &range);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
continue;
- of_clk_add_provider(gcknp, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(gcknp, of_clk_hw_simple_get, hw);
}
}
CLK_OF_DECLARE(of_sama5d2_clk_generated_setup, "atmel,sama5d2-clk-generated",
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 8e20c8a76db7..e0daa4a31f88 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -92,7 +92,7 @@ static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np)
struct clk_init_data init;
const char *parent_name;
struct regmap *regmap;
- struct clk *clk;
+ int ret;
regmap = syscon_node_to_regmap(of_get_parent(np));
if (IS_ERR(regmap))
@@ -113,13 +113,13 @@ static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np)
h32mxclk->hw.init = &init;
h32mxclk->regmap = regmap;
- clk = clk_register(NULL, &h32mxclk->hw);
- if (IS_ERR(clk)) {
+ ret = clk_hw_register(NULL, &h32mxclk->hw);
+ if (ret) {
kfree(h32mxclk);
return;
}
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, &h32mxclk->hw);
}
CLK_OF_DECLARE(of_sama5d4_clk_h32mx_setup, "atmel,sama5d4-clk-h32mx",
of_sama5d4_clk_h32mx_setup);
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index 58b5baca670c..c813c27f2e58 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -128,15 +128,16 @@ static const struct clk_ops main_osc_ops = {
.is_prepared = clk_main_osc_is_prepared,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_main_osc(struct regmap *regmap,
const char *name,
const char *parent_name,
bool bypass)
{
struct clk_main_osc *osc;
- struct clk *clk = NULL;
struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
if (!name || !parent_name)
return ERR_PTR(-EINVAL);
@@ -160,16 +161,19 @@ at91_clk_register_main_osc(struct regmap *regmap,
AT91_PMC_MOSCEN,
AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
- clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk))
+ hw = &osc->hw;
+ ret = clk_hw_register(NULL, &osc->hw);
+ if (ret) {
kfree(osc);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
const char *name = np->name;
const char *parent_name;
struct regmap *regmap;
@@ -183,11 +187,11 @@ static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- clk = at91_clk_register_main_osc(regmap, name, parent_name, bypass);
- if (IS_ERR(clk))
+ hw = at91_clk_register_main_osc(regmap, name, parent_name, bypass);
+ if (IS_ERR(hw))
return;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(at91rm9200_clk_main_osc, "atmel,at91rm9200-clk-main-osc",
of_at91rm9200_clk_main_osc_setup);
@@ -271,14 +275,15 @@ static const struct clk_ops main_rc_osc_ops = {
.recalc_accuracy = clk_main_rc_osc_recalc_accuracy,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_main_rc_osc(struct regmap *regmap,
const char *name,
u32 frequency, u32 accuracy)
{
struct clk_main_rc_osc *osc;
- struct clk *clk = NULL;
struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
if (!name || !frequency)
return ERR_PTR(-EINVAL);
@@ -298,16 +303,19 @@ at91_clk_register_main_rc_osc(struct regmap *regmap,
osc->frequency = frequency;
osc->accuracy = accuracy;
- clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk))
+ hw = &osc->hw;
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
kfree(osc);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
u32 frequency = 0;
u32 accuracy = 0;
const char *name = np->name;
@@ -321,11 +329,11 @@ static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- clk = at91_clk_register_main_rc_osc(regmap, name, frequency, accuracy);
- if (IS_ERR(clk))
+ hw = at91_clk_register_main_rc_osc(regmap, name, frequency, accuracy);
+ if (IS_ERR(hw))
return;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(at91sam9x5_clk_main_rc_osc, "atmel,at91sam9x5-clk-main-rc-osc",
of_at91sam9x5_clk_main_rc_osc_setup);
@@ -395,14 +403,15 @@ static const struct clk_ops rm9200_main_ops = {
.recalc_rate = clk_rm9200_main_recalc_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_rm9200_main(struct regmap *regmap,
const char *name,
const char *parent_name)
{
struct clk_rm9200_main *clkmain;
- struct clk *clk = NULL;
struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
if (!name)
return ERR_PTR(-EINVAL);
@@ -423,16 +432,19 @@ at91_clk_register_rm9200_main(struct regmap *regmap,
clkmain->hw.init = &init;
clkmain->regmap = regmap;
- clk = clk_register(NULL, &clkmain->hw);
- if (IS_ERR(clk))
+ hw = &clkmain->hw;
+ ret = clk_hw_register(NULL, &clkmain->hw);
+ if (ret) {
kfree(clkmain);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void __init of_at91rm9200_clk_main_setup(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
const char *parent_name;
const char *name = np->name;
struct regmap *regmap;
@@ -444,11 +456,11 @@ static void __init of_at91rm9200_clk_main_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- clk = at91_clk_register_rm9200_main(regmap, name, parent_name);
- if (IS_ERR(clk))
+ hw = at91_clk_register_rm9200_main(regmap, name, parent_name);
+ if (IS_ERR(hw))
return;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(at91rm9200_clk_main, "atmel,at91rm9200-clk-main",
of_at91rm9200_clk_main_setup);
@@ -529,16 +541,17 @@ static const struct clk_ops sam9x5_main_ops = {
.get_parent = clk_sam9x5_main_get_parent,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_sam9x5_main(struct regmap *regmap,
const char *name,
const char **parent_names,
int num_parents)
{
struct clk_sam9x5_main *clkmain;
- struct clk *clk = NULL;
struct clk_init_data init;
unsigned int status;
+ struct clk_hw *hw;
+ int ret;
if (!name)
return ERR_PTR(-EINVAL);
@@ -561,16 +574,19 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
- clk = clk_register(NULL, &clkmain->hw);
- if (IS_ERR(clk))
+ hw = &clkmain->hw;
+ ret = clk_hw_register(NULL, &clkmain->hw);
+ if (ret) {
kfree(clkmain);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void __init of_at91sam9x5_clk_main_setup(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
const char *parent_names[2];
unsigned int num_parents;
const char *name = np->name;
@@ -587,12 +603,12 @@ static void __init of_at91sam9x5_clk_main_setup(struct device_node *np)
of_property_read_string(np, "clock-output-names", &name);
- clk = at91_clk_register_sam9x5_main(regmap, name, parent_names,
+ hw = at91_clk_register_sam9x5_main(regmap, name, parent_names,
num_parents);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
return;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(at91sam9x5_clk_main, "atmel,at91sam9x5-clk-main",
of_at91sam9x5_clk_main_setup);
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index d1021e106191..e9cba9fc26d7 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -120,7 +120,7 @@ static const struct clk_ops master_ops = {
.get_parent = clk_master_get_parent,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_master(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
@@ -128,8 +128,9 @@ at91_clk_register_master(struct regmap *regmap,
const struct clk_master_characteristics *characteristics)
{
struct clk_master *master;
- struct clk *clk = NULL;
struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
if (!name || !num_parents || !parent_names)
return ERR_PTR(-EINVAL);
@@ -149,12 +150,14 @@ at91_clk_register_master(struct regmap *regmap,
master->characteristics = characteristics;
master->regmap = regmap;
- clk = clk_register(NULL, &master->hw);
- if (IS_ERR(clk)) {
+ hw = &master->hw;
+ ret = clk_hw_register(NULL, &master->hw);
+ if (ret) {
kfree(master);
+ hw = ERR_PTR(ret);
}
- return clk;
+ return hw;
}
@@ -198,7 +201,7 @@ static void __init
of_at91_clk_master_setup(struct device_node *np,
const struct clk_master_layout *layout)
{
- struct clk *clk;
+ struct clk_hw *hw;
unsigned int num_parents;
const char *parent_names[MASTER_SOURCE_MAX];
const char *name = np->name;
@@ -221,13 +224,13 @@ of_at91_clk_master_setup(struct device_node *np,
if (IS_ERR(regmap))
return;
- clk = at91_clk_register_master(regmap, name, num_parents,
+ hw = at91_clk_register_master(regmap, name, num_parents,
parent_names, layout,
characteristics);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
goto out_free_characteristics;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
return;
out_free_characteristics:
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index fd160728e990..dc29fd979d3f 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -104,13 +104,14 @@ static const struct clk_ops peripheral_ops = {
.is_enabled = clk_peripheral_is_enabled,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_peripheral(struct regmap *regmap, const char *name,
const char *parent_name, u32 id)
{
struct clk_peripheral *periph;
- struct clk *clk = NULL;
struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
if (!name || !parent_name || id > PERIPHERAL_ID_MAX)
return ERR_PTR(-EINVAL);
@@ -129,11 +130,14 @@ at91_clk_register_peripheral(struct regmap *regmap, const char *name,
periph->hw.init = &init;
periph->regmap = regmap;
- clk = clk_register(NULL, &periph->hw);
- if (IS_ERR(clk))
+ hw = &periph->hw;
+ ret = clk_hw_register(NULL, &periph->hw);
+ if (ret) {
kfree(periph);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph)
@@ -327,14 +331,15 @@ static const struct clk_ops sam9x5_peripheral_ops = {
.set_rate = clk_sam9x5_peripheral_set_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name,
u32 id, const struct clk_range *range)
{
struct clk_sam9x5_peripheral *periph;
- struct clk *clk = NULL;
struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
if (!name || !parent_name)
return ERR_PTR(-EINVAL);
@@ -357,13 +362,15 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
periph->auto_div = true;
periph->range = *range;
- clk = clk_register(NULL, &periph->hw);
- if (IS_ERR(clk))
+ hw = &periph->hw;
+ ret = clk_hw_register(NULL, &periph->hw);
+ if (ret) {
kfree(periph);
- else
+ hw = ERR_PTR(ret);
+ } else
clk_sam9x5_peripheral_autodiv(periph);
- return clk;
+ return hw;
}
static void __init
@@ -371,7 +378,7 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
{
int num;
u32 id;
- struct clk *clk;
+ struct clk_hw *hw;
const char *parent_name;
const char *name;
struct device_node *periphclknp;
@@ -400,7 +407,7 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
name = periphclknp->name;
if (type == PERIPHERAL_AT91RM9200) {
- clk = at91_clk_register_peripheral(regmap, name,
+ hw = at91_clk_register_peripheral(regmap, name,
parent_name, id);
} else {
struct clk_range range = CLK_RANGE(0, 0);
@@ -409,17 +416,17 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
"atmel,clk-output-range",
&range);
- clk = at91_clk_register_sam9x5_peripheral(regmap,
+ hw = at91_clk_register_sam9x5_peripheral(regmap,
&pmc_pcr_lock,
name,
parent_name,
id, &range);
}
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
continue;
- of_clk_add_provider(periphclknp, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(periphclknp, of_clk_hw_simple_get, hw);
}
}
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index fb2e0b56d4b7..45ad168e1496 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -296,17 +296,18 @@ static const struct clk_ops pll_ops = {
.set_rate = clk_pll_set_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_pll(struct regmap *regmap, const char *name,
const char *parent_name, u8 id,
const struct clk_pll_layout *layout,
const struct clk_pll_characteristics *characteristics)
{
struct clk_pll *pll;
- struct clk *clk = NULL;
+ struct clk_hw *hw;
struct clk_init_data init;
int offset = PLL_REG(id);
unsigned int pllr;
+ int ret;
if (id > PLL_MAX_ID)
return ERR_PTR(-EINVAL);
@@ -330,12 +331,14 @@ at91_clk_register_pll(struct regmap *regmap, const char *name,
pll->div = PLL_DIV(pllr);
pll->mul = PLL_MUL(pllr, layout);
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
+ hw = &pll->hw;
+ ret = clk_hw_register(NULL, &pll->hw);
+ if (ret) {
kfree(pll);
+ hw = ERR_PTR(ret);
}
- return clk;
+ return hw;
}
@@ -465,7 +468,7 @@ of_at91_clk_pll_setup(struct device_node *np,
const struct clk_pll_layout *layout)
{
u32 id;
- struct clk *clk;
+ struct clk_hw *hw;
struct regmap *regmap;
const char *parent_name;
const char *name = np->name;
@@ -486,12 +489,12 @@ of_at91_clk_pll_setup(struct device_node *np,
if (!characteristics)
return;
- clk = at91_clk_register_pll(regmap, name, parent_name, id, layout,
+ hw = at91_clk_register_pll(regmap, name, parent_name, id, layout,
characteristics);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
goto out_free_characteristics;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
return;
out_free_characteristics:
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
index 2bed26481027..b4afaf22f3fd 100644
--- a/drivers/clk/at91/clk-plldiv.c
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -75,13 +75,14 @@ static const struct clk_ops plldiv_ops = {
.set_rate = clk_plldiv_set_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_plldiv(struct regmap *regmap, const char *name,
const char *parent_name)
{
struct clk_plldiv *plldiv;
- struct clk *clk = NULL;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
plldiv = kzalloc(sizeof(*plldiv), GFP_KERNEL);
if (!plldiv)
@@ -96,18 +97,20 @@ at91_clk_register_plldiv(struct regmap *regmap, const char *name,
plldiv->hw.init = &init;
plldiv->regmap = regmap;
- clk = clk_register(NULL, &plldiv->hw);
-
- if (IS_ERR(clk))
+ hw = &plldiv->hw;
+ ret = clk_hw_register(NULL, &plldiv->hw);
+ if (ret) {
kfree(plldiv);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void __init
of_at91sam9x5_clk_plldiv_setup(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
const char *parent_name;
const char *name = np->name;
struct regmap *regmap;
@@ -120,12 +123,11 @@ of_at91sam9x5_clk_plldiv_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- clk = at91_clk_register_plldiv(regmap, name, parent_name);
- if (IS_ERR(clk))
+ hw = at91_clk_register_plldiv(regmap, name, parent_name);
+ if (IS_ERR(hw))
return;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- return;
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv",
of_at91sam9x5_clk_plldiv_setup);
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 25d5906640c3..190122e64a3a 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -170,15 +170,16 @@ static const struct clk_ops programmable_ops = {
.set_rate = clk_programmable_set_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_programmable(struct regmap *regmap,
const char *name, const char **parent_names,
u8 num_parents, u8 id,
const struct clk_programmable_layout *layout)
{
struct clk_programmable *prog;
- struct clk *clk = NULL;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
if (id > PROG_ID_MAX)
return ERR_PTR(-EINVAL);
@@ -198,11 +199,14 @@ at91_clk_register_programmable(struct regmap *regmap,
prog->hw.init = &init;
prog->regmap = regmap;
- clk = clk_register(NULL, &prog->hw);
- if (IS_ERR(clk))
+ hw = &prog->hw;
+ ret = clk_hw_register(NULL, &prog->hw);
+ if (ret) {
kfree(prog);
+ hw = &prog->hw;
+ }
- return clk;
+ return hw;
}
static const struct clk_programmable_layout at91rm9200_programmable_layout = {
@@ -229,7 +233,7 @@ of_at91_clk_prog_setup(struct device_node *np,
{
int num;
u32 id;
- struct clk *clk;
+ struct clk_hw *hw;
unsigned int num_parents;
const char *parent_names[PROG_SOURCE_MAX];
const char *name;
@@ -257,13 +261,13 @@ of_at91_clk_prog_setup(struct device_node *np,
if (of_property_read_string(np, "clock-output-names", &name))
name = progclknp->name;
- clk = at91_clk_register_programmable(regmap, name,
+ hw = at91_clk_register_programmable(regmap, name,
parent_names, num_parents,
id, layout);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
continue;
- of_clk_add_provider(progclknp, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(progclknp, of_clk_hw_simple_get, hw);
}
}
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
index 61090b1146cf..560a8b9abf93 100644
--- a/drivers/clk/at91/clk-slow.c
+++ b/drivers/clk/at91/clk-slow.c
@@ -13,42 +13,11 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
-#include <linux/delay.h>
#include <linux/of.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include "pmc.h"
-#include "sckc.h"
-
-#define SLOW_CLOCK_FREQ 32768
-#define SLOWCK_SW_CYCLES 5
-#define SLOWCK_SW_TIME_USEC ((SLOWCK_SW_CYCLES * USEC_PER_SEC) / \
- SLOW_CLOCK_FREQ)
-
-#define AT91_SCKC_CR 0x00
-#define AT91_SCKC_RCEN (1 << 0)
-#define AT91_SCKC_OSC32EN (1 << 1)
-#define AT91_SCKC_OSC32BYP (1 << 2)
-#define AT91_SCKC_OSCSEL (1 << 3)
-
-struct clk_slow_osc {
- struct clk_hw hw;
- void __iomem *sckcr;
- unsigned long startup_usec;
-};
-
-#define to_clk_slow_osc(hw) container_of(hw, struct clk_slow_osc, hw)
-
-struct clk_slow_rc_osc {
- struct clk_hw hw;
- void __iomem *sckcr;
- unsigned long frequency;
- unsigned long accuracy;
- unsigned long startup_usec;
-};
-
-#define to_clk_slow_rc_osc(hw) container_of(hw, struct clk_slow_rc_osc, hw)
struct clk_sam9260_slow {
struct clk_hw hw;
@@ -57,328 +26,6 @@ struct clk_sam9260_slow {
#define to_clk_sam9260_slow(hw) container_of(hw, struct clk_sam9260_slow, hw)
-struct clk_sam9x5_slow {
- struct clk_hw hw;
- void __iomem *sckcr;
- u8 parent;
-};
-
-#define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
-
-static int clk_slow_osc_prepare(struct clk_hw *hw)
-{
- struct clk_slow_osc *osc = to_clk_slow_osc(hw);
- void __iomem *sckcr = osc->sckcr;
- u32 tmp = readl(sckcr);
-
- if (tmp & AT91_SCKC_OSC32BYP)
- return 0;
-
- writel(tmp | AT91_SCKC_OSC32EN, sckcr);
-
- usleep_range(osc->startup_usec, osc->startup_usec + 1);
-
- return 0;
-}
-
-static void clk_slow_osc_unprepare(struct clk_hw *hw)
-{
- struct clk_slow_osc *osc = to_clk_slow_osc(hw);
- void __iomem *sckcr = osc->sckcr;
- u32 tmp = readl(sckcr);
-
- if (tmp & AT91_SCKC_OSC32BYP)
- return;
-
- writel(tmp & ~AT91_SCKC_OSC32EN, sckcr);
-}
-
-static int clk_slow_osc_is_prepared(struct clk_hw *hw)
-{
- struct clk_slow_osc *osc = to_clk_slow_osc(hw);
- void __iomem *sckcr = osc->sckcr;
- u32 tmp = readl(sckcr);
-
- if (tmp & AT91_SCKC_OSC32BYP)
- return 1;
-
- return !!(tmp & AT91_SCKC_OSC32EN);
-}
-
-static const struct clk_ops slow_osc_ops = {
- .prepare = clk_slow_osc_prepare,
- .unprepare = clk_slow_osc_unprepare,
- .is_prepared = clk_slow_osc_is_prepared,
-};
-
-static struct clk * __init
-at91_clk_register_slow_osc(void __iomem *sckcr,
- const char *name,
- const char *parent_name,
- unsigned long startup,
- bool bypass)
-{
- struct clk_slow_osc *osc;
- struct clk *clk = NULL;
- struct clk_init_data init;
-
- if (!sckcr || !name || !parent_name)
- return ERR_PTR(-EINVAL);
-
- osc = kzalloc(sizeof(*osc), GFP_KERNEL);
- if (!osc)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &slow_osc_ops;
- init.parent_names = &parent_name;
- init.num_parents = 1;
- init.flags = CLK_IGNORE_UNUSED;
-
- osc->hw.init = &init;
- osc->sckcr = sckcr;
- osc->startup_usec = startup;
-
- if (bypass)
- writel((readl(sckcr) & ~AT91_SCKC_OSC32EN) | AT91_SCKC_OSC32BYP,
- sckcr);
-
- clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk))
- kfree(osc);
-
- return clk;
-}
-
-void __init of_at91sam9x5_clk_slow_osc_setup(struct device_node *np,
- void __iomem *sckcr)
-{
- struct clk *clk;
- const char *parent_name;
- const char *name = np->name;
- u32 startup;
- bool bypass;
-
- parent_name = of_clk_get_parent_name(np, 0);
- of_property_read_string(np, "clock-output-names", &name);
- of_property_read_u32(np, "atmel,startup-time-usec", &startup);
- bypass = of_property_read_bool(np, "atmel,osc-bypass");
-
- clk = at91_clk_register_slow_osc(sckcr, name, parent_name, startup,
- bypass);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
-}
-
-static unsigned long clk_slow_rc_osc_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
-
- return osc->frequency;
-}
-
-static unsigned long clk_slow_rc_osc_recalc_accuracy(struct clk_hw *hw,
- unsigned long parent_acc)
-{
- struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
-
- return osc->accuracy;
-}
-
-static int clk_slow_rc_osc_prepare(struct clk_hw *hw)
-{
- struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
- void __iomem *sckcr = osc->sckcr;
-
- writel(readl(sckcr) | AT91_SCKC_RCEN, sckcr);
-
- usleep_range(osc->startup_usec, osc->startup_usec + 1);
-
- return 0;
-}
-
-static void clk_slow_rc_osc_unprepare(struct clk_hw *hw)
-{
- struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
- void __iomem *sckcr = osc->sckcr;
-
- writel(readl(sckcr) & ~AT91_SCKC_RCEN, sckcr);
-}
-
-static int clk_slow_rc_osc_is_prepared(struct clk_hw *hw)
-{
- struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
-
- return !!(readl(osc->sckcr) & AT91_SCKC_RCEN);
-}
-
-static const struct clk_ops slow_rc_osc_ops = {
- .prepare = clk_slow_rc_osc_prepare,
- .unprepare = clk_slow_rc_osc_unprepare,
- .is_prepared = clk_slow_rc_osc_is_prepared,
- .recalc_rate = clk_slow_rc_osc_recalc_rate,
- .recalc_accuracy = clk_slow_rc_osc_recalc_accuracy,
-};
-
-static struct clk * __init
-at91_clk_register_slow_rc_osc(void __iomem *sckcr,
- const char *name,
- unsigned long frequency,
- unsigned long accuracy,
- unsigned long startup)
-{
- struct clk_slow_rc_osc *osc;
- struct clk *clk = NULL;
- struct clk_init_data init;
-
- if (!sckcr || !name)
- return ERR_PTR(-EINVAL);
-
- osc = kzalloc(sizeof(*osc), GFP_KERNEL);
- if (!osc)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &slow_rc_osc_ops;
- init.parent_names = NULL;
- init.num_parents = 0;
- init.flags = CLK_IGNORE_UNUSED;
-
- osc->hw.init = &init;
- osc->sckcr = sckcr;
- osc->frequency = frequency;
- osc->accuracy = accuracy;
- osc->startup_usec = startup;
-
- clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk))
- kfree(osc);
-
- return clk;
-}
-
-void __init of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np,
- void __iomem *sckcr)
-{
- struct clk *clk;
- u32 frequency = 0;
- u32 accuracy = 0;
- u32 startup = 0;
- const char *name = np->name;
-
- of_property_read_string(np, "clock-output-names", &name);
- of_property_read_u32(np, "clock-frequency", &frequency);
- of_property_read_u32(np, "clock-accuracy", &accuracy);
- of_property_read_u32(np, "atmel,startup-time-usec", &startup);
-
- clk = at91_clk_register_slow_rc_osc(sckcr, name, frequency, accuracy,
- startup);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
-}
-
-static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
-{
- struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw);
- void __iomem *sckcr = slowck->sckcr;
- u32 tmp;
-
- if (index > 1)
- return -EINVAL;
-
- tmp = readl(sckcr);
-
- if ((!index && !(tmp & AT91_SCKC_OSCSEL)) ||
- (index && (tmp & AT91_SCKC_OSCSEL)))
- return 0;
-
- if (index)
- tmp |= AT91_SCKC_OSCSEL;
- else
- tmp &= ~AT91_SCKC_OSCSEL;
-
- writel(tmp, sckcr);
-
- usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
-
- return 0;
-}
-
-static u8 clk_sam9x5_slow_get_parent(struct clk_hw *hw)
-{
- struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw);
-
- return !!(readl(slowck->sckcr) & AT91_SCKC_OSCSEL);
-}
-
-static const struct clk_ops sam9x5_slow_ops = {
- .set_parent = clk_sam9x5_slow_set_parent,
- .get_parent = clk_sam9x5_slow_get_parent,
-};
-
-static struct clk * __init
-at91_clk_register_sam9x5_slow(void __iomem *sckcr,
- const char *name,
- const char **parent_names,
- int num_parents)
-{
- struct clk_sam9x5_slow *slowck;
- struct clk *clk = NULL;
- struct clk_init_data init;
-
- if (!sckcr || !name || !parent_names || !num_parents)
- return ERR_PTR(-EINVAL);
-
- slowck = kzalloc(sizeof(*slowck), GFP_KERNEL);
- if (!slowck)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &sam9x5_slow_ops;
- init.parent_names = parent_names;
- init.num_parents = num_parents;
- init.flags = 0;
-
- slowck->hw.init = &init;
- slowck->sckcr = sckcr;
- slowck->parent = !!(readl(sckcr) & AT91_SCKC_OSCSEL);
-
- clk = clk_register(NULL, &slowck->hw);
- if (IS_ERR(clk))
- kfree(slowck);
-
- return clk;
-}
-
-void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
- void __iomem *sckcr)
-{
- struct clk *clk;
- const char *parent_names[2];
- unsigned int num_parents;
- const char *name = np->name;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents == 0 || num_parents > 2)
- return;
-
- of_clk_parent_fill(np, parent_names, num_parents);
-
- of_property_read_string(np, "clock-output-names", &name);
-
- clk = at91_clk_register_sam9x5_slow(sckcr, name, parent_names,
- num_parents);
- if (IS_ERR(clk))
- return;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
-}
-
static u8 clk_sam9260_slow_get_parent(struct clk_hw *hw)
{
struct clk_sam9260_slow *slowck = to_clk_sam9260_slow(hw);
@@ -393,15 +40,16 @@ static const struct clk_ops sam9260_slow_ops = {
.get_parent = clk_sam9260_slow_get_parent,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_sam9260_slow(struct regmap *regmap,
const char *name,
const char **parent_names,
int num_parents)
{
struct clk_sam9260_slow *slowck;
- struct clk *clk = NULL;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
if (!name)
return ERR_PTR(-EINVAL);
@@ -422,16 +70,19 @@ at91_clk_register_sam9260_slow(struct regmap *regmap,
slowck->hw.init = &init;
slowck->regmap = regmap;
- clk = clk_register(NULL, &slowck->hw);
- if (IS_ERR(clk))
+ hw = &slowck->hw;
+ ret = clk_hw_register(NULL, &slowck->hw);
+ if (ret) {
kfree(slowck);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void __init of_at91sam9260_clk_slow_setup(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
const char *parent_names[2];
unsigned int num_parents;
const char *name = np->name;
@@ -448,12 +99,12 @@ static void __init of_at91sam9260_clk_slow_setup(struct device_node *np)
of_property_read_string(np, "clock-output-names", &name);
- clk = at91_clk_register_sam9260_slow(regmap, name, parent_names,
+ hw = at91_clk_register_sam9260_slow(regmap, name, parent_names,
num_parents);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
return;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(at91sam9260_clk_slow, "atmel,at91sam9260-clk-slow",
diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c
index 3c04b069d5b8..965c662b90a5 100644
--- a/drivers/clk/at91/clk-smd.c
+++ b/drivers/clk/at91/clk-smd.c
@@ -111,13 +111,14 @@ static const struct clk_ops at91sam9x5_smd_ops = {
.set_rate = at91sam9x5_clk_smd_set_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name,
const char **parent_names, u8 num_parents)
{
struct at91sam9x5_clk_smd *smd;
- struct clk *clk = NULL;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
smd = kzalloc(sizeof(*smd), GFP_KERNEL);
if (!smd)
@@ -132,16 +133,19 @@ at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name,
smd->hw.init = &init;
smd->regmap = regmap;
- clk = clk_register(NULL, &smd->hw);
- if (IS_ERR(clk))
+ hw = &smd->hw;
+ ret = clk_hw_register(NULL, &smd->hw);
+ if (ret) {
kfree(smd);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
unsigned int num_parents;
const char *parent_names[SMD_SOURCE_MAX];
const char *name = np->name;
@@ -159,12 +163,12 @@ static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- clk = at91sam9x5_clk_register_smd(regmap, name, parent_names,
+ hw = at91sam9x5_clk_register_smd(regmap, name, parent_names,
num_parents);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
return;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(at91sam9x5_clk_smd, "atmel,at91sam9x5-clk-smd",
of_at91sam9x5_clk_smd_setup);
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index 8f35d8172909..86a36809765d 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -88,13 +88,14 @@ static const struct clk_ops system_ops = {
.is_prepared = clk_system_is_prepared,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_system(struct regmap *regmap, const char *name,
const char *parent_name, u8 id)
{
struct clk_system *sys;
- struct clk *clk = NULL;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
if (!parent_name || id > SYSTEM_MAX_ID)
return ERR_PTR(-EINVAL);
@@ -113,18 +114,21 @@ at91_clk_register_system(struct regmap *regmap, const char *name,
sys->hw.init = &init;
sys->regmap = regmap;
- clk = clk_register(NULL, &sys->hw);
- if (IS_ERR(clk))
+ hw = &sys->hw;
+ ret = clk_hw_register(NULL, &sys->hw);
+ if (ret) {
kfree(sys);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
{
int num;
u32 id;
- struct clk *clk;
+ struct clk_hw *hw;
const char *name;
struct device_node *sysclknp;
const char *parent_name;
@@ -147,11 +151,11 @@ static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
parent_name = of_clk_get_parent_name(sysclknp, 0);
- clk = at91_clk_register_system(regmap, name, parent_name, id);
- if (IS_ERR(clk))
+ hw = at91_clk_register_system(regmap, name, parent_name, id);
+ if (IS_ERR(hw))
continue;
- of_clk_add_provider(sysclknp, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(sysclknp, of_clk_hw_simple_get, hw);
}
}
CLK_OF_DECLARE(at91rm9200_clk_sys, "atmel,at91rm9200-clk-system",
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index d80bdb0a8b02..791770a563fc 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -192,13 +192,14 @@ static const struct clk_ops at91sam9n12_usb_ops = {
.set_rate = at91sam9x5_clk_usb_set_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
const char **parent_names, u8 num_parents)
{
struct at91sam9x5_clk_usb *usb;
- struct clk *clk = NULL;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
usb = kzalloc(sizeof(*usb), GFP_KERNEL);
if (!usb)
@@ -214,20 +215,24 @@ at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
usb->hw.init = &init;
usb->regmap = regmap;
- clk = clk_register(NULL, &usb->hw);
- if (IS_ERR(clk))
+ hw = &usb->hw;
+ ret = clk_hw_register(NULL, &usb->hw);
+ if (ret) {
kfree(usb);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
-static struct clk * __init
+static struct clk_hw * __init
at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name)
{
struct at91sam9x5_clk_usb *usb;
- struct clk *clk = NULL;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
usb = kzalloc(sizeof(*usb), GFP_KERNEL);
if (!usb)
@@ -242,11 +247,14 @@ at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name,
usb->hw.init = &init;
usb->regmap = regmap;
- clk = clk_register(NULL, &usb->hw);
- if (IS_ERR(clk))
+ hw = &usb->hw;
+ ret = clk_hw_register(NULL, &usb->hw);
+ if (ret) {
kfree(usb);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
@@ -334,13 +342,14 @@ static const struct clk_ops at91rm9200_usb_ops = {
.set_rate = at91rm9200_clk_usb_set_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
at91rm9200_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name, const u32 *divisors)
{
struct at91rm9200_clk_usb *usb;
- struct clk *clk = NULL;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
usb = kzalloc(sizeof(*usb), GFP_KERNEL);
if (!usb)
@@ -356,16 +365,19 @@ at91rm9200_clk_register_usb(struct regmap *regmap, const char *name,
usb->regmap = regmap;
memcpy(usb->divisors, divisors, sizeof(usb->divisors));
- clk = clk_register(NULL, &usb->hw);
- if (IS_ERR(clk))
+ hw = &usb->hw;
+ ret = clk_hw_register(NULL, &usb->hw);
+ if (ret) {
kfree(usb);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
unsigned int num_parents;
const char *parent_names[USB_SOURCE_MAX];
const char *name = np->name;
@@ -383,19 +395,19 @@ static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- clk = at91sam9x5_clk_register_usb(regmap, name, parent_names,
- num_parents);
- if (IS_ERR(clk))
+ hw = at91sam9x5_clk_register_usb(regmap, name, parent_names,
+ num_parents);
+ if (IS_ERR(hw))
return;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(at91sam9x5_clk_usb, "atmel,at91sam9x5-clk-usb",
of_at91sam9x5_clk_usb_setup);
static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
const char *parent_name;
const char *name = np->name;
struct regmap *regmap;
@@ -410,18 +422,18 @@ static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- clk = at91sam9n12_clk_register_usb(regmap, name, parent_name);
- if (IS_ERR(clk))
+ hw = at91sam9n12_clk_register_usb(regmap, name, parent_name);
+ if (IS_ERR(hw))
return;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(at91sam9n12_clk_usb, "atmel,at91sam9n12-clk-usb",
of_at91sam9n12_clk_usb_setup);
static void __init of_at91rm9200_clk_usb_setup(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
const char *parent_name;
const char *name = np->name;
u32 divisors[4] = {0, 0, 0, 0};
@@ -440,12 +452,11 @@ static void __init of_at91rm9200_clk_usb_setup(struct device_node *np)
regmap = syscon_node_to_regmap(of_get_parent(np));
if (IS_ERR(regmap))
return;
-
- clk = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors);
- if (IS_ERR(clk))
+ hw = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors);
+ if (IS_ERR(hw))
return;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(at91rm9200_clk_usb, "atmel,at91rm9200-clk-usb",
of_at91rm9200_clk_usb_setup);
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index 61fcf399e58c..aadabd9d1e2b 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -77,13 +77,14 @@ static const struct clk_ops utmi_ops = {
.recalc_rate = clk_utmi_recalc_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
at91_clk_register_utmi(struct regmap *regmap,
const char *name, const char *parent_name)
{
struct clk_utmi *utmi;
- struct clk *clk = NULL;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
utmi = kzalloc(sizeof(*utmi), GFP_KERNEL);
if (!utmi)
@@ -98,16 +99,19 @@ at91_clk_register_utmi(struct regmap *regmap,
utmi->hw.init = &init;
utmi->regmap = regmap;
- clk = clk_register(NULL, &utmi->hw);
- if (IS_ERR(clk))
+ hw = &utmi->hw;
+ ret = clk_hw_register(NULL, &utmi->hw);
+ if (ret) {
kfree(utmi);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
const char *parent_name;
const char *name = np->name;
struct regmap *regmap;
@@ -120,11 +124,11 @@ static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- clk = at91_clk_register_utmi(regmap, name, parent_name);
- if (IS_ERR(clk))
+ hw = at91_clk_register_utmi(regmap, name, parent_name);
+ if (IS_ERR(hw))
return;
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
return;
}
CLK_OF_DECLARE(at91sam9x5_clk_utmi, "atmel,at91sam9x5-clk-utmi",
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index 1184d76a7ab7..ab6ecefc49ad 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -12,11 +12,382 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/io.h>
-#include "sckc.h"
+#define SLOW_CLOCK_FREQ 32768
+#define SLOWCK_SW_CYCLES 5
+#define SLOWCK_SW_TIME_USEC ((SLOWCK_SW_CYCLES * USEC_PER_SEC) / \
+ SLOW_CLOCK_FREQ)
+
+#define AT91_SCKC_CR 0x00
+#define AT91_SCKC_RCEN (1 << 0)
+#define AT91_SCKC_OSC32EN (1 << 1)
+#define AT91_SCKC_OSC32BYP (1 << 2)
+#define AT91_SCKC_OSCSEL (1 << 3)
+
+struct clk_slow_osc {
+ struct clk_hw hw;
+ void __iomem *sckcr;
+ unsigned long startup_usec;
+};
+
+#define to_clk_slow_osc(hw) container_of(hw, struct clk_slow_osc, hw)
+
+struct clk_sama5d4_slow_osc {
+ struct clk_hw hw;
+ void __iomem *sckcr;
+ unsigned long startup_usec;
+ bool prepared;
+};
+
+#define to_clk_sama5d4_slow_osc(hw) container_of(hw, struct clk_sama5d4_slow_osc, hw)
+
+struct clk_slow_rc_osc {
+ struct clk_hw hw;
+ void __iomem *sckcr;
+ unsigned long frequency;
+ unsigned long accuracy;
+ unsigned long startup_usec;
+};
+
+#define to_clk_slow_rc_osc(hw) container_of(hw, struct clk_slow_rc_osc, hw)
+
+struct clk_sam9x5_slow {
+ struct clk_hw hw;
+ void __iomem *sckcr;
+ u8 parent;
+};
+
+#define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
+
+static int clk_slow_osc_prepare(struct clk_hw *hw)
+{
+ struct clk_slow_osc *osc = to_clk_slow_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+ u32 tmp = readl(sckcr);
+
+ if (tmp & (AT91_SCKC_OSC32BYP | AT91_SCKC_OSC32EN))
+ return 0;
+
+ writel(tmp | AT91_SCKC_OSC32EN, sckcr);
+
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
+
+ return 0;
+}
+
+static void clk_slow_osc_unprepare(struct clk_hw *hw)
+{
+ struct clk_slow_osc *osc = to_clk_slow_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+ u32 tmp = readl(sckcr);
+
+ if (tmp & AT91_SCKC_OSC32BYP)
+ return;
+
+ writel(tmp & ~AT91_SCKC_OSC32EN, sckcr);
+}
+
+static int clk_slow_osc_is_prepared(struct clk_hw *hw)
+{
+ struct clk_slow_osc *osc = to_clk_slow_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+ u32 tmp = readl(sckcr);
+
+ if (tmp & AT91_SCKC_OSC32BYP)
+ return 1;
+
+ return !!(tmp & AT91_SCKC_OSC32EN);
+}
+
+static const struct clk_ops slow_osc_ops = {
+ .prepare = clk_slow_osc_prepare,
+ .unprepare = clk_slow_osc_unprepare,
+ .is_prepared = clk_slow_osc_is_prepared,
+};
+
+static struct clk_hw * __init
+at91_clk_register_slow_osc(void __iomem *sckcr,
+ const char *name,
+ const char *parent_name,
+ unsigned long startup,
+ bool bypass)
+{
+ struct clk_slow_osc *osc;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (!sckcr || !name || !parent_name)
+ return ERR_PTR(-EINVAL);
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &slow_osc_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_IGNORE_UNUSED;
+
+ osc->hw.init = &init;
+ osc->sckcr = sckcr;
+ osc->startup_usec = startup;
+
+ if (bypass)
+ writel((readl(sckcr) & ~AT91_SCKC_OSC32EN) | AT91_SCKC_OSC32BYP,
+ sckcr);
+
+ hw = &osc->hw;
+ ret = clk_hw_register(NULL, &osc->hw);
+ if (ret) {
+ kfree(osc);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static void __init
+of_at91sam9x5_clk_slow_osc_setup(struct device_node *np, void __iomem *sckcr)
+{
+ struct clk_hw *hw;
+ const char *parent_name;
+ const char *name = np->name;
+ u32 startup;
+ bool bypass;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ of_property_read_string(np, "clock-output-names", &name);
+ of_property_read_u32(np, "atmel,startup-time-usec", &startup);
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_slow_osc(sckcr, name, parent_name, startup,
+ bypass);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+
+static unsigned long clk_slow_rc_osc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+
+ return osc->frequency;
+}
+
+static unsigned long clk_slow_rc_osc_recalc_accuracy(struct clk_hw *hw,
+ unsigned long parent_acc)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+
+ return osc->accuracy;
+}
+
+static int clk_slow_rc_osc_prepare(struct clk_hw *hw)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+
+ writel(readl(sckcr) | AT91_SCKC_RCEN, sckcr);
+
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
+
+ return 0;
+}
+
+static void clk_slow_rc_osc_unprepare(struct clk_hw *hw)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+ void __iomem *sckcr = osc->sckcr;
+
+ writel(readl(sckcr) & ~AT91_SCKC_RCEN, sckcr);
+}
+
+static int clk_slow_rc_osc_is_prepared(struct clk_hw *hw)
+{
+ struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw);
+
+ return !!(readl(osc->sckcr) & AT91_SCKC_RCEN);
+}
+
+static const struct clk_ops slow_rc_osc_ops = {
+ .prepare = clk_slow_rc_osc_prepare,
+ .unprepare = clk_slow_rc_osc_unprepare,
+ .is_prepared = clk_slow_rc_osc_is_prepared,
+ .recalc_rate = clk_slow_rc_osc_recalc_rate,
+ .recalc_accuracy = clk_slow_rc_osc_recalc_accuracy,
+};
+
+static struct clk_hw * __init
+at91_clk_register_slow_rc_osc(void __iomem *sckcr,
+ const char *name,
+ unsigned long frequency,
+ unsigned long accuracy,
+ unsigned long startup)
+{
+ struct clk_slow_rc_osc *osc;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (!sckcr || !name)
+ return ERR_PTR(-EINVAL);
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &slow_rc_osc_ops;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ init.flags = CLK_IGNORE_UNUSED;
+
+ osc->hw.init = &init;
+ osc->sckcr = sckcr;
+ osc->frequency = frequency;
+ osc->accuracy = accuracy;
+ osc->startup_usec = startup;
+
+ hw = &osc->hw;
+ ret = clk_hw_register(NULL, &osc->hw);
+ if (ret) {
+ kfree(osc);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static void __init
+of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np, void __iomem *sckcr)
+{
+ struct clk_hw *hw;
+ u32 frequency = 0;
+ u32 accuracy = 0;
+ u32 startup = 0;
+ const char *name = np->name;
+
+ of_property_read_string(np, "clock-output-names", &name);
+ of_property_read_u32(np, "clock-frequency", &frequency);
+ of_property_read_u32(np, "clock-accuracy", &accuracy);
+ of_property_read_u32(np, "atmel,startup-time-usec", &startup);
+
+ hw = at91_clk_register_slow_rc_osc(sckcr, name, frequency, accuracy,
+ startup);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+
+static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw);
+ void __iomem *sckcr = slowck->sckcr;
+ u32 tmp;
+
+ if (index > 1)
+ return -EINVAL;
+
+ tmp = readl(sckcr);
+
+ if ((!index && !(tmp & AT91_SCKC_OSCSEL)) ||
+ (index && (tmp & AT91_SCKC_OSCSEL)))
+ return 0;
+
+ if (index)
+ tmp |= AT91_SCKC_OSCSEL;
+ else
+ tmp &= ~AT91_SCKC_OSCSEL;
+
+ writel(tmp, sckcr);
+
+ usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
+
+ return 0;
+}
+
+static u8 clk_sam9x5_slow_get_parent(struct clk_hw *hw)
+{
+ struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw);
+
+ return !!(readl(slowck->sckcr) & AT91_SCKC_OSCSEL);
+}
+
+static const struct clk_ops sam9x5_slow_ops = {
+ .set_parent = clk_sam9x5_slow_set_parent,
+ .get_parent = clk_sam9x5_slow_get_parent,
+};
+
+static struct clk_hw * __init
+at91_clk_register_sam9x5_slow(void __iomem *sckcr,
+ const char *name,
+ const char **parent_names,
+ int num_parents)
+{
+ struct clk_sam9x5_slow *slowck;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (!sckcr || !name || !parent_names || !num_parents)
+ return ERR_PTR(-EINVAL);
+
+ slowck = kzalloc(sizeof(*slowck), GFP_KERNEL);
+ if (!slowck)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &sam9x5_slow_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = 0;
+
+ slowck->hw.init = &init;
+ slowck->sckcr = sckcr;
+ slowck->parent = !!(readl(sckcr) & AT91_SCKC_OSCSEL);
+
+ hw = &slowck->hw;
+ ret = clk_hw_register(NULL, &slowck->hw);
+ if (ret) {
+ kfree(slowck);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static void __init
+of_at91sam9x5_clk_slow_setup(struct device_node *np, void __iomem *sckcr)
+{
+ struct clk_hw *hw;
+ const char *parent_names[2];
+ unsigned int num_parents;
+ const char *name = np->name;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents == 0 || num_parents > 2)
+ return;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ hw = at91_clk_register_sam9x5_slow(sckcr, name, parent_names,
+ num_parents);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
static const struct of_device_id sckc_clk_ids[] __initconst = {
/* Slow clock */
@@ -55,3 +426,94 @@ static void __init of_at91sam9x5_sckc_setup(struct device_node *np)
}
CLK_OF_DECLARE(at91sam9x5_clk_sckc, "atmel,at91sam9x5-sckc",
of_at91sam9x5_sckc_setup);
+
+static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw)
+{
+ struct clk_sama5d4_slow_osc *osc = to_clk_sama5d4_slow_osc(hw);
+
+ if (osc->prepared)
+ return 0;
+
+ /*
+ * Assume that if it has already been selected (for example by the
+ * bootloader), enough time has aready passed.
+ */
+ if ((readl(osc->sckcr) & AT91_SCKC_OSCSEL)) {
+ osc->prepared = true;
+ return 0;
+ }
+
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
+ osc->prepared = true;
+
+ return 0;
+}
+
+static int clk_sama5d4_slow_osc_is_prepared(struct clk_hw *hw)
+{
+ struct clk_sama5d4_slow_osc *osc = to_clk_sama5d4_slow_osc(hw);
+
+ return osc->prepared;
+}
+
+static const struct clk_ops sama5d4_slow_osc_ops = {
+ .prepare = clk_sama5d4_slow_osc_prepare,
+ .is_prepared = clk_sama5d4_slow_osc_is_prepared,
+};
+
+static void __init of_sama5d4_sckc_setup(struct device_node *np)
+{
+ void __iomem *regbase = of_iomap(np, 0);
+ struct clk_hw *hw;
+ struct clk_sama5d4_slow_osc *osc;
+ struct clk_init_data init;
+ const char *xtal_name;
+ const char *parent_names[2] = { "slow_rc_osc", "slow_osc" };
+ bool bypass;
+ int ret;
+
+ if (!regbase)
+ return;
+
+ hw = clk_hw_register_fixed_rate_with_accuracy(NULL, parent_names[0],
+ NULL, 0, 32768,
+ 250000000);
+ if (IS_ERR(hw))
+ return;
+
+ xtal_name = of_clk_get_parent_name(np, 0);
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ osc = kzalloc(sizeof(*osc), GFP_KERNEL);
+ if (!osc)
+ return;
+
+ init.name = parent_names[1];
+ init.ops = &sama5d4_slow_osc_ops;
+ init.parent_names = &xtal_name;
+ init.num_parents = 1;
+ init.flags = CLK_IGNORE_UNUSED;
+
+ osc->hw.init = &init;
+ osc->sckcr = regbase;
+ osc->startup_usec = 1200000;
+
+ if (bypass)
+ writel((readl(regbase) | AT91_SCKC_OSC32BYP), regbase);
+
+ hw = &osc->hw;
+ ret = clk_hw_register(NULL, &osc->hw);
+ if (ret) {
+ kfree(osc);
+ return;
+ }
+
+ hw = at91_clk_register_sam9x5_slow(regbase, "slowck", parent_names, 2);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(sama5d4_clk_sckc, "atmel,sama5d4-sckc",
+ of_sama5d4_sckc_setup);
diff --git a/drivers/clk/at91/sckc.h b/drivers/clk/at91/sckc.h
deleted file mode 100644
index 836fcf59820f..000000000000
--- a/drivers/clk/at91/sckc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * drivers/clk/at91/sckc.h
- *
- * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __AT91_SCKC_H_
-#define __AT91_SCKC_H_
-
-extern void __init of_at91sam9x5_clk_slow_osc_setup(struct device_node *np,
- void __iomem *sckcr);
-extern void __init of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np,
- void __iomem *sckcr);
-extern void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
- void __iomem *sckcr);
-
-#endif /* __AT91_SCKC_H_ */
diff --git a/drivers/clk/axis/clk-artpec6.c b/drivers/clk/axis/clk-artpec6.c
index ffc988b098e4..da1a073c2236 100644
--- a/drivers/clk/axis/clk-artpec6.c
+++ b/drivers/clk/axis/clk-artpec6.c
@@ -113,8 +113,8 @@ static void of_artpec6_clkctrl_setup(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get, &clkdata->clk_data);
}
-CLK_OF_DECLARE(artpec6_clkctrl, "axis,artpec6-clkctrl",
- of_artpec6_clkctrl_setup);
+CLK_OF_DECLARE_DRIVER(artpec6_clkctrl, "axis,artpec6-clkctrl",
+ of_artpec6_clkctrl_setup);
static int artpec6_clkctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index f2878459199a..f21e9b7afd1a 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -19,8 +19,36 @@ config CLK_BCM_KONA
in the BCM281xx and BCM21664 families.
config COMMON_CLK_IPROC
- bool
+ bool "Broadcom iProc clock support"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on COMMON_CLK
+ default ARCH_BCM_IPROC
help
Enable common clock framework support for Broadcom SoCs
based on the iProc architecture
+
+if COMMON_CLK_IPROC
+
+config CLK_BCM_CYGNUS
+ bool "Broadcom Cygnus clock support"
+ depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+ default ARCH_BCM_CYGNUS
+ help
+ Enable common clock framework support for the Broadcom Cygnus SoC
+
+config CLK_BCM_NSP
+ bool "Broadcom Northstar/Northstar Plus clock support"
+ depends on ARCH_BCM_5301X || ARCH_BCM_NSP || COMPILE_TEST
+ default ARCH_BCM_5301X || ARCH_BCM_NSP
+ help
+ Enable common clock framework support for the Broadcom Northstar and
+ Northstar Plus SoCs
+
+config CLK_BCM_NS2
+ bool "Broadcom Northstar 2 clock support"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ default ARCH_BCM_IPROC
+ help
+ Enable common clock framework support for the Broadcom Northstar 2 SoC
+
+endif
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile
index 1d79bd2c36f0..d9dc848f18c9 100644
--- a/drivers/clk/bcm/Makefile
+++ b/drivers/clk/bcm/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm21664.o
obj-$(CONFIG_COMMON_CLK_IPROC) += clk-iproc-armpll.o clk-iproc-pll.o clk-iproc-asiu.o
obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835-aux.o
-obj-$(CONFIG_COMMON_CLK_IPROC) += clk-ns2.o
-obj-$(CONFIG_ARCH_BCM_CYGNUS) += clk-cygnus.o
-obj-$(CONFIG_ARCH_BCM_NSP) += clk-nsp.o
-obj-$(CONFIG_ARCH_BCM_5301X) += clk-nsp.o
+obj-$(CONFIG_ARCH_BCM_53573) += clk-bcm53573-ilp.o
+obj-$(CONFIG_CLK_BCM_CYGNUS) += clk-cygnus.o
+obj-$(CONFIG_CLK_BCM_NSP) += clk-nsp.o
+obj-$(CONFIG_CLK_BCM_NS2) += clk-ns2.o
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index 3a177ade6e6c..bd750cf2238d 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -25,7 +25,7 @@
static int bcm2835_aux_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct clk_onecell_data *onecell;
+ struct clk_hw_onecell_data *onecell;
const char *parent;
struct clk *parent_clk;
struct resource *res;
@@ -41,28 +41,24 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
- onecell = devm_kmalloc(dev, sizeof(*onecell), GFP_KERNEL);
+ onecell = devm_kmalloc(dev, sizeof(*onecell) + sizeof(*onecell->hws) *
+ BCM2835_AUX_CLOCK_COUNT, GFP_KERNEL);
if (!onecell)
return -ENOMEM;
- onecell->clk_num = BCM2835_AUX_CLOCK_COUNT;
- onecell->clks = devm_kcalloc(dev, BCM2835_AUX_CLOCK_COUNT,
- sizeof(*onecell->clks), GFP_KERNEL);
- if (!onecell->clks)
- return -ENOMEM;
+ onecell->num = BCM2835_AUX_CLOCK_COUNT;
gate = reg + BCM2835_AUXENB;
- onecell->clks[BCM2835_AUX_CLOCK_UART] =
- clk_register_gate(dev, "aux_uart", parent, 0, gate, 0, 0, NULL);
-
- onecell->clks[BCM2835_AUX_CLOCK_SPI1] =
- clk_register_gate(dev, "aux_spi1", parent, 0, gate, 1, 0, NULL);
+ onecell->hws[BCM2835_AUX_CLOCK_UART] =
+ clk_hw_register_gate(dev, "aux_uart", parent, 0, gate, 0, 0, NULL);
- onecell->clks[BCM2835_AUX_CLOCK_SPI2] =
- clk_register_gate(dev, "aux_spi2", parent, 0, gate, 2, 0, NULL);
+ onecell->hws[BCM2835_AUX_CLOCK_SPI1] =
+ clk_hw_register_gate(dev, "aux_spi1", parent, 0, gate, 1, 0, NULL);
- of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get, onecell);
+ onecell->hws[BCM2835_AUX_CLOCK_SPI2] =
+ clk_hw_register_gate(dev, "aux_spi2", parent, 0, gate, 2, 0, NULL);
- return 0;
+ return of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
+ onecell);
}
static const struct of_device_id bcm2835_aux_clk_of_match[] = {
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 7a7970865c2d..b68bf573dcfb 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -36,6 +36,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/clk.h>
#include <linux/clk/bcm2835.h>
#include <linux/debugfs.h>
#include <linux/module.h>
@@ -302,8 +303,8 @@ struct bcm2835_cprman {
spinlock_t regs_lock; /* spinlock for all clocks */
const char *osc_name;
- struct clk_onecell_data onecell;
- struct clk *clks[];
+ /* Must be last */
+ struct clk_hw_onecell_data onecell;
};
static inline void cprman_write(struct bcm2835_cprman *cprman, u32 reg, u32 val)
@@ -344,24 +345,24 @@ static int bcm2835_debugfs_regset(struct bcm2835_cprman *cprman, u32 base,
*/
void __init bcm2835_init_clocks(void)
{
- struct clk *clk;
+ struct clk_hw *hw;
int ret;
- clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 126000000);
- if (IS_ERR(clk))
+ hw = clk_hw_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 126000000);
+ if (IS_ERR(hw))
pr_err("apb_pclk not registered\n");
- clk = clk_register_fixed_rate(NULL, "uart0_pclk", NULL, 0, 3000000);
- if (IS_ERR(clk))
+ hw = clk_hw_register_fixed_rate(NULL, "uart0_pclk", NULL, 0, 3000000);
+ if (IS_ERR(hw))
pr_err("uart0_pclk not registered\n");
- ret = clk_register_clkdev(clk, NULL, "20201000.uart");
+ ret = clk_hw_register_clkdev(hw, NULL, "20201000.uart");
if (ret)
pr_err("uart0_pclk alias not registered\n");
- clk = clk_register_fixed_rate(NULL, "uart1_pclk", NULL, 0, 125000000);
- if (IS_ERR(clk))
+ hw = clk_hw_register_fixed_rate(NULL, "uart1_pclk", NULL, 0, 125000000);
+ if (IS_ERR(hw))
pr_err("uart1_pclk not registered\n");
- ret = clk_register_clkdev(clk, NULL, "20215000.uart");
+ ret = clk_hw_register_clkdev(hw, NULL, "20215000.uart");
if (ret)
pr_err("uart1_pclk alias not registered\n");
}
@@ -443,6 +444,8 @@ struct bcm2835_clock_data {
/* Number of fractional bits in the divider */
u32 frac_bits;
+ u32 flags;
+
bool is_vpu_clock;
bool is_mash_clock;
};
@@ -1006,16 +1009,28 @@ static int bcm2835_clock_set_rate(struct clk_hw *hw,
return 0;
}
+static bool
+bcm2835_clk_is_pllc(struct clk_hw *hw)
+{
+ if (!hw)
+ return false;
+
+ return strncmp(clk_hw_get_name(hw), "pllc", 4) == 0;
+}
+
static int bcm2835_clock_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
struct clk_hw *parent, *best_parent = NULL;
+ bool current_parent_is_pllc;
unsigned long rate, best_rate = 0;
unsigned long prate, best_prate = 0;
size_t i;
u32 div;
+ current_parent_is_pllc = bcm2835_clk_is_pllc(clk_hw_get_parent(hw));
+
/*
* Select parent clock that results in the closest but lower rate
*/
@@ -1023,6 +1038,17 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw,
parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
+
+ /*
+ * Don't choose a PLLC-derived clock as our parent
+ * unless it had been manually set that way. PLLC's
+ * frequency gets adjusted by the firmware due to
+ * over-temp or under-voltage conditions, without
+ * prior notification to our clock consumer.
+ */
+ if (bcm2835_clk_is_pllc(parent) && !current_parent_is_pllc)
+ continue;
+
prate = clk_hw_get_rate(parent);
div = bcm2835_clock_choose_div(hw, req->rate, prate, true);
rate = bcm2835_clock_rate_from_divisor(clock, prate, div);
@@ -1121,11 +1147,12 @@ static const struct clk_ops bcm2835_vpu_clock_clk_ops = {
.debug_init = bcm2835_clock_debug_init,
};
-static struct clk *bcm2835_register_pll(struct bcm2835_cprman *cprman,
- const struct bcm2835_pll_data *data)
+static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
+ const struct bcm2835_pll_data *data)
{
struct bcm2835_pll *pll;
struct clk_init_data init;
+ int ret;
memset(&init, 0, sizeof(init));
@@ -1144,17 +1171,20 @@ static struct clk *bcm2835_register_pll(struct bcm2835_cprman *cprman,
pll->data = data;
pll->hw.init = &init;
- return devm_clk_register(cprman->dev, &pll->hw);
+ ret = devm_clk_hw_register(cprman->dev, &pll->hw);
+ if (ret)
+ return NULL;
+ return &pll->hw;
}
-static struct clk *
+static struct clk_hw *
bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
const struct bcm2835_pll_divider_data *data)
{
struct bcm2835_pll_divider *divider;
struct clk_init_data init;
- struct clk *clk;
const char *divider_name;
+ int ret;
if (data->fixed_divider != 1) {
divider_name = devm_kasprintf(cprman->dev, GFP_KERNEL,
@@ -1188,32 +1218,33 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
divider->cprman = cprman;
divider->data = data;
- clk = devm_clk_register(cprman->dev, &divider->div.hw);
- if (IS_ERR(clk))
- return clk;
+ ret = devm_clk_hw_register(cprman->dev, &divider->div.hw);
+ if (ret)
+ return ERR_PTR(ret);
/*
* PLLH's channels have a fixed divide by 10 afterwards, which
* is what our consumers are actually using.
*/
if (data->fixed_divider != 1) {
- return clk_register_fixed_factor(cprman->dev, data->name,
- divider_name,
- CLK_SET_RATE_PARENT,
- 1,
- data->fixed_divider);
+ return clk_hw_register_fixed_factor(cprman->dev, data->name,
+ divider_name,
+ CLK_SET_RATE_PARENT,
+ 1,
+ data->fixed_divider);
}
- return clk;
+ return &divider->div.hw;
}
-static struct clk *bcm2835_register_clock(struct bcm2835_cprman *cprman,
+static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
const struct bcm2835_clock_data *data)
{
struct bcm2835_clock *clock;
struct clk_init_data init;
const char *parents[1 << CM_SRC_BITS];
size_t i;
+ int ret;
/*
* Replace our "xosc" references with the oscillator's
@@ -1230,13 +1261,19 @@ static struct clk *bcm2835_register_clock(struct bcm2835_cprman *cprman,
init.parent_names = parents;
init.num_parents = data->num_mux_parents;
init.name = data->name;
- init.flags = CLK_IGNORE_UNUSED;
+ init.flags = data->flags | CLK_IGNORE_UNUSED;
if (data->is_vpu_clock) {
init.ops = &bcm2835_vpu_clock_clk_ops;
} else {
init.ops = &bcm2835_clock_clk_ops;
init.flags |= CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ /* If the clock wasn't actually enabled at boot, it's not
+ * critical.
+ */
+ if (!(cprman_read(cprman, data->ctl_reg) & CM_ENABLE))
+ init.flags &= ~CLK_IS_CRITICAL;
}
clock = devm_kzalloc(cprman->dev, sizeof(*clock), GFP_KERNEL);
@@ -1247,7 +1284,10 @@ static struct clk *bcm2835_register_clock(struct bcm2835_cprman *cprman,
clock->data = data;
clock->hw.init = &init;
- return devm_clk_register(cprman->dev, &clock->hw);
+ ret = devm_clk_hw_register(cprman->dev, &clock->hw);
+ if (ret)
+ return ERR_PTR(ret);
+ return &clock->hw;
}
static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman,
@@ -1259,8 +1299,8 @@ static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman,
CM_GATE_BIT, 0, &cprman->regs_lock);
}
-typedef struct clk *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman,
- const void *data);
+typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman,
+ const void *data);
struct bcm2835_clk_desc {
bcm2835_clk_register clk_register;
const void *data;
@@ -1649,6 +1689,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.div_reg = CM_VPUDIV,
.int_bits = 12,
.frac_bits = 8,
+ .flags = CLK_IS_CRITICAL,
.is_vpu_clock = true),
/* clocks with per parent mux */
@@ -1705,13 +1746,15 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.div_reg = CM_GP1DIV,
.int_bits = 12,
.frac_bits = 12,
+ .flags = CLK_IS_CRITICAL,
.is_mash_clock = true),
[BCM2835_CLOCK_GP2] = REGISTER_PER_CLK(
.name = "gp2",
.ctl_reg = CM_GP2CTL,
.div_reg = CM_GP2DIV,
.int_bits = 12,
- .frac_bits = 12),
+ .frac_bits = 12,
+ .flags = CLK_IS_CRITICAL),
/* HDMI state machine */
[BCM2835_CLOCK_HSM] = REGISTER_PER_CLK(
@@ -1790,18 +1833,38 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.ctl_reg = CM_PERIICTL),
};
+/*
+ * Permanently take a reference on the parent of the SDRAM clock.
+ *
+ * While the SDRAM is being driven by its dedicated PLL most of the
+ * time, there is a little loop running in the firmware that
+ * periodically switches the SDRAM to using our CM clock to do PVT
+ * recalibration, with the assumption that the previously configured
+ * SDRAM parent is still enabled and running.
+ */
+static int bcm2835_mark_sdc_parent_critical(struct clk *sdc)
+{
+ struct clk *parent = clk_get_parent(sdc);
+
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
+
+ return clk_prepare_enable(parent);
+}
+
static int bcm2835_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct clk **clks;
+ struct clk_hw **hws;
struct bcm2835_cprman *cprman;
struct resource *res;
const struct bcm2835_clk_desc *desc;
const size_t asize = ARRAY_SIZE(clk_desc_array);
size_t i;
+ int ret;
- cprman = devm_kzalloc(dev,
- sizeof(*cprman) + asize * sizeof(*clks),
+ cprman = devm_kzalloc(dev, sizeof(*cprman) +
+ sizeof(*cprman->onecell.hws) * asize,
GFP_KERNEL);
if (!cprman)
return -ENOMEM;
@@ -1819,18 +1882,21 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cprman);
- cprman->onecell.clk_num = asize;
- cprman->onecell.clks = cprman->clks;
- clks = cprman->clks;
+ cprman->onecell.num = asize;
+ hws = cprman->onecell.hws;
for (i = 0; i < asize; i++) {
desc = &clk_desc_array[i];
if (desc->clk_register && desc->data)
- clks[i] = desc->clk_register(cprman, desc->data);
+ hws[i] = desc->clk_register(cprman, desc->data);
}
- return of_clk_add_provider(dev->of_node, of_clk_src_onecell_get,
- &cprman->onecell);
+ ret = bcm2835_mark_sdc_parent_critical(hws[BCM2835_CLOCK_SDRAM]->clk);
+ if (ret)
+ return ret;
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ &cprman->onecell);
}
static const struct of_device_id bcm2835_clk_of_match[] = {
diff --git a/drivers/clk/bcm/clk-bcm53573-ilp.c b/drivers/clk/bcm/clk-bcm53573-ilp.c
new file mode 100644
index 000000000000..36eb3716ffb0
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm53573-ilp.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define PMU_XTAL_FREQ_RATIO 0x66c
+#define XTAL_ALP_PER_4ILP 0x00001fff
+#define XTAL_CTL_EN 0x80000000
+#define PMU_SLOW_CLK_PERIOD 0x6dc
+
+struct bcm53573_ilp {
+ struct clk_hw hw;
+ struct regmap *regmap;
+};
+
+static int bcm53573_ilp_enable(struct clk_hw *hw)
+{
+ struct bcm53573_ilp *ilp = container_of(hw, struct bcm53573_ilp, hw);
+
+ regmap_write(ilp->regmap, PMU_SLOW_CLK_PERIOD, 0x10199);
+ regmap_write(ilp->regmap, 0x674, 0x10000);
+
+ return 0;
+}
+
+static void bcm53573_ilp_disable(struct clk_hw *hw)
+{
+ struct bcm53573_ilp *ilp = container_of(hw, struct bcm53573_ilp, hw);
+
+ regmap_write(ilp->regmap, PMU_SLOW_CLK_PERIOD, 0);
+ regmap_write(ilp->regmap, 0x674, 0);
+}
+
+static unsigned long bcm53573_ilp_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct bcm53573_ilp *ilp = container_of(hw, struct bcm53573_ilp, hw);
+ struct regmap *regmap = ilp->regmap;
+ u32 last_val, cur_val;
+ int sum = 0, num = 0, loop_num = 0;
+ int avg;
+
+ /* Enable measurement */
+ regmap_write(regmap, PMU_XTAL_FREQ_RATIO, XTAL_CTL_EN);
+
+ /* Read initial value */
+ regmap_read(regmap, PMU_XTAL_FREQ_RATIO, &last_val);
+ last_val &= XTAL_ALP_PER_4ILP;
+
+ /*
+ * At minimum we should loop for a bit to let hardware do the
+ * measurement. This isn't very accurate however, so for a better
+ * precision lets try getting 20 different values for and use average.
+ */
+ while (num < 20) {
+ regmap_read(regmap, PMU_XTAL_FREQ_RATIO, &cur_val);
+ cur_val &= XTAL_ALP_PER_4ILP;
+
+ if (cur_val != last_val) {
+ /* Got different value, use it */
+ sum += cur_val;
+ num++;
+ loop_num = 0;
+ last_val = cur_val;
+ } else if (++loop_num > 5000) {
+ /* Same value over and over, give up */
+ sum += cur_val;
+ num++;
+ break;
+ }
+
+ cpu_relax();
+ }
+
+ /* Disable measurement to save power */
+ regmap_write(regmap, PMU_XTAL_FREQ_RATIO, 0x0);
+
+ avg = sum / num;
+
+ return parent_rate * 4 / avg;
+}
+
+static const struct clk_ops bcm53573_ilp_clk_ops = {
+ .enable = bcm53573_ilp_enable,
+ .disable = bcm53573_ilp_disable,
+ .recalc_rate = bcm53573_ilp_recalc_rate,
+};
+
+static void bcm53573_ilp_init(struct device_node *np)
+{
+ struct bcm53573_ilp *ilp;
+ struct clk_init_data init = { };
+ const char *parent_name;
+ int err;
+
+ ilp = kzalloc(sizeof(*ilp), GFP_KERNEL);
+ if (!ilp)
+ return;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name) {
+ err = -ENOENT;
+ goto err_free_ilp;
+ }
+
+ ilp->regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(ilp->regmap)) {
+ err = PTR_ERR(ilp->regmap);
+ goto err_free_ilp;
+ }
+
+ init.name = np->name;
+ init.ops = &bcm53573_ilp_clk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ ilp->hw.init = &init;
+ err = clk_hw_register(NULL, &ilp->hw);
+ if (err)
+ goto err_free_ilp;
+
+ err = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &ilp->hw);
+ if (err)
+ goto err_clk_hw_unregister;
+
+ return;
+
+err_clk_hw_unregister:
+ clk_hw_unregister(&ilp->hw);
+err_free_ilp:
+ kfree(ilp);
+ pr_err("Failed to init ILP clock: %d\n", err);
+}
+
+/* We need it very early for arch code, before device model gets ready */
+CLK_OF_DECLARE(bcm53573_ilp_clk, "brcm,bcm53573-ilp", bcm53573_ilp_init);
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
index 526b0b0e9a9f..c37a7f0e83aa 100644
--- a/drivers/clk/bcm/clk-kona-setup.c
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -586,8 +586,8 @@ static u32 *parent_process(const char *clocks[],
}
/* There is at least one parent, so allocate a selector array */
-
- parent_sel = kmalloc(parent_count * sizeof(*parent_sel), GFP_KERNEL);
+ parent_sel = kmalloc_array(parent_count, sizeof(*parent_sel),
+ GFP_KERNEL);
if (!parent_sel) {
pr_err("%s: error allocating %u parent selectors\n", __func__,
parent_count);
@@ -696,77 +696,69 @@ static void bcm_clk_teardown(struct kona_clk *bcm_clk)
bcm_clk->type = bcm_clk_none;
}
-static void kona_clk_teardown(struct clk *clk)
+static void kona_clk_teardown(struct clk_hw *hw)
{
- struct clk_hw *hw;
struct kona_clk *bcm_clk;
- if (!clk)
+ if (!hw)
return;
- hw = __clk_get_hw(clk);
- if (!hw) {
- pr_err("%s: clk %p has null hw pointer\n", __func__, clk);
- return;
- }
- clk_unregister(clk);
+ clk_hw_unregister(hw);
bcm_clk = to_kona_clk(hw);
bcm_clk_teardown(bcm_clk);
}
-struct clk *kona_clk_setup(struct kona_clk *bcm_clk)
+static int kona_clk_setup(struct kona_clk *bcm_clk)
{
+ int ret;
struct clk_init_data *init_data = &bcm_clk->init_data;
- struct clk *clk = NULL;
switch (bcm_clk->type) {
case bcm_clk_peri:
- if (peri_clk_setup(bcm_clk->u.data, init_data))
- return NULL;
+ ret = peri_clk_setup(bcm_clk->u.data, init_data);
+ if (ret)
+ return ret;
break;
default:
pr_err("%s: clock type %d invalid for %s\n", __func__,
(int)bcm_clk->type, init_data->name);
- return NULL;
+ return -EINVAL;
}
/* Make sure everything makes sense before we set it up */
if (!kona_clk_valid(bcm_clk)) {
pr_err("%s: clock data invalid for %s\n", __func__,
init_data->name);
+ ret = -EINVAL;
goto out_teardown;
}
bcm_clk->hw.init = init_data;
- clk = clk_register(NULL, &bcm_clk->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: error registering clock %s (%ld)\n", __func__,
- init_data->name, PTR_ERR(clk));
+ ret = clk_hw_register(NULL, &bcm_clk->hw);
+ if (ret) {
+ pr_err("%s: error registering clock %s (%d)\n", __func__,
+ init_data->name, ret);
goto out_teardown;
}
- BUG_ON(!clk);
- return clk;
+ return 0;
out_teardown:
bcm_clk_teardown(bcm_clk);
- return NULL;
+ return ret;
}
static void ccu_clks_teardown(struct ccu_data *ccu)
{
u32 i;
- for (i = 0; i < ccu->clk_data.clk_num; i++)
- kona_clk_teardown(ccu->clk_data.clks[i]);
- kfree(ccu->clk_data.clks);
+ for (i = 0; i < ccu->clk_num; i++)
+ kona_clk_teardown(&ccu->kona_clks[i].hw);
}
static void kona_ccu_teardown(struct ccu_data *ccu)
{
- kfree(ccu->clk_data.clks);
- ccu->clk_data.clks = NULL;
if (!ccu->base)
return;
@@ -793,6 +785,20 @@ static bool ccu_data_valid(struct ccu_data *ccu)
return true;
}
+static struct clk_hw *
+of_clk_kona_onecell_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct ccu_data *ccu = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= ccu->clk_num) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &ccu->kona_clks[idx].hw;
+}
+
/*
* Set up a CCU. Call the provided ccu_clks_setup callback to
* initialize the array of clocks provided by the CCU.
@@ -805,18 +811,6 @@ void __init kona_dt_ccu_setup(struct ccu_data *ccu,
unsigned int i;
int ret;
- if (ccu->clk_data.clk_num) {
- size_t size;
-
- size = ccu->clk_data.clk_num * sizeof(*ccu->clk_data.clks);
- ccu->clk_data.clks = kzalloc(size, GFP_KERNEL);
- if (!ccu->clk_data.clks) {
- pr_err("%s: unable to allocate %u clocks for %s\n",
- __func__, ccu->clk_data.clk_num, node->name);
- return;
- }
- }
-
ret = of_address_to_resource(node, 0, &res);
if (ret) {
pr_err("%s: no valid CCU registers found for %s\n", __func__,
@@ -851,13 +845,13 @@ void __init kona_dt_ccu_setup(struct ccu_data *ccu,
* the clock framework clock array (in ccu->data). Then
* register as a provider for these clocks.
*/
- for (i = 0; i < ccu->clk_data.clk_num; i++) {
+ for (i = 0; i < ccu->clk_num; i++) {
if (!ccu->kona_clks[i].ccu)
continue;
- ccu->clk_data.clks[i] = kona_clk_setup(&ccu->kona_clks[i]);
+ kona_clk_setup(&ccu->kona_clks[i]);
}
- ret = of_clk_add_provider(node, of_clk_src_onecell_get, &ccu->clk_data);
+ ret = of_clk_add_hw_provider(node, of_clk_kona_onecell_get, ccu);
if (ret) {
pr_err("%s: error adding ccu %s as provider (%d)\n", __func__,
node->name, ret);
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 3a15347b4233..eee64b9e5d10 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -1256,19 +1256,18 @@ bool __init kona_ccu_init(struct ccu_data *ccu)
{
unsigned long flags;
unsigned int which;
- struct clk **clks = ccu->clk_data.clks;
struct kona_clk *kona_clks = ccu->kona_clks;
bool success = true;
flags = ccu_lock(ccu);
__ccu_write_enable(ccu);
- for (which = 0; which < ccu->clk_data.clk_num; which++) {
- struct kona_clk *bcm_clk;
+ for (which = 0; which < ccu->clk_num; which++) {
+ struct kona_clk *bcm_clk = &kona_clks[which];
- if (!clks[which])
+ if (!bcm_clk->ccu)
continue;
- bcm_clk = &kona_clks[which];
+
success &= __kona_clk_init(bcm_clk);
}
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index 906576ec97b6..f4b39bb5558a 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -481,7 +481,7 @@ struct ccu_data {
bool write_enabled; /* write access is currently enabled */
struct ccu_policy policy;
struct device_node *node;
- struct clk_onecell_data clk_data;
+ size_t clk_num;
const char *name;
u32 range; /* byte range of address space */
struct kona_clk kona_clks[]; /* must be last */
@@ -491,9 +491,7 @@ struct ccu_data {
#define KONA_CCU_COMMON(_prefix, _name, _ccuname) \
.name = #_name "_ccu", \
.lock = __SPIN_LOCK_UNLOCKED(_name ## _ccu_data.lock), \
- .clk_data = { \
- .clk_num = _prefix ## _ ## _ccuname ## _CCU_CLOCK_COUNT, \
- }
+ .clk_num = _prefix ## _ ## _ccuname ## _CCU_CLOCK_COUNT
/* Exported globals */
@@ -505,7 +503,6 @@ extern u64 scaled_div_max(struct bcm_clk_div *div);
extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
u32 billionths);
-extern struct clk *kona_clk_setup(struct kona_clk *bcm_clk);
extern void __init kona_dt_ccu_setup(struct ccu_data *ccu,
struct device_node *node);
extern bool __init kona_ccu_init(struct ccu_data *ccu);
diff --git a/drivers/clk/berlin/berlin2-avpll.c b/drivers/clk/berlin/berlin2-avpll.c
index fd0f26c38465..cfcae468e989 100644
--- a/drivers/clk/berlin/berlin2-avpll.c
+++ b/drivers/clk/berlin/berlin2-avpll.c
@@ -188,7 +188,7 @@ static const struct clk_ops berlin2_avpll_vco_ops = {
.recalc_rate = berlin2_avpll_vco_recalc_rate,
};
-struct clk * __init berlin2_avpll_vco_register(void __iomem *base,
+int __init berlin2_avpll_vco_register(void __iomem *base,
const char *name, const char *parent_name,
u8 vco_flags, unsigned long flags)
{
@@ -197,7 +197,7 @@ struct clk * __init berlin2_avpll_vco_register(void __iomem *base,
vco = kzalloc(sizeof(*vco), GFP_KERNEL);
if (!vco)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
vco->base = base;
vco->flags = vco_flags;
@@ -208,7 +208,7 @@ struct clk * __init berlin2_avpll_vco_register(void __iomem *base,
init.num_parents = 1;
init.flags = flags;
- return clk_register(NULL, &vco->hw);
+ return clk_hw_register(NULL, &vco->hw);
}
struct berlin2_avpll_channel {
@@ -364,7 +364,7 @@ static const struct clk_ops berlin2_avpll_channel_ops = {
*/
static const u8 quirk_index[] __initconst = { 0, 6, 5, 4, 3, 2, 1, 7 };
-struct clk * __init berlin2_avpll_channel_register(void __iomem *base,
+int __init berlin2_avpll_channel_register(void __iomem *base,
const char *name, u8 index, const char *parent_name,
u8 ch_flags, unsigned long flags)
{
@@ -373,7 +373,7 @@ struct clk * __init berlin2_avpll_channel_register(void __iomem *base,
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (!ch)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ch->base = base;
if (ch_flags & BERLIN2_AVPLL_SCRAMBLE_QUIRK)
@@ -389,5 +389,5 @@ struct clk * __init berlin2_avpll_channel_register(void __iomem *base,
init.num_parents = 1;
init.flags = flags;
- return clk_register(NULL, &ch->hw);
+ return clk_hw_register(NULL, &ch->hw);
}
diff --git a/drivers/clk/berlin/berlin2-avpll.h b/drivers/clk/berlin/berlin2-avpll.h
index a37f5068d299..17e311153b42 100644
--- a/drivers/clk/berlin/berlin2-avpll.h
+++ b/drivers/clk/berlin/berlin2-avpll.h
@@ -19,17 +19,13 @@
#ifndef __BERLIN2_AVPLL_H
#define __BERLIN2_AVPLL_H
-struct clk;
-
#define BERLIN2_AVPLL_BIT_QUIRK BIT(0)
#define BERLIN2_AVPLL_SCRAMBLE_QUIRK BIT(1)
-struct clk * __init
-berlin2_avpll_vco_register(void __iomem *base, const char *name,
+int berlin2_avpll_vco_register(void __iomem *base, const char *name,
const char *parent_name, u8 vco_flags, unsigned long flags);
-struct clk * __init
-berlin2_avpll_channel_register(void __iomem *base, const char *name,
+int berlin2_avpll_channel_register(void __iomem *base, const char *name,
u8 index, const char *parent_name, u8 ch_flags,
unsigned long flags);
diff --git a/drivers/clk/berlin/berlin2-div.c b/drivers/clk/berlin/berlin2-div.c
index 81ff97f8aa0b..41ab2d392c57 100644
--- a/drivers/clk/berlin/berlin2-div.c
+++ b/drivers/clk/berlin/berlin2-div.c
@@ -234,7 +234,7 @@ static const struct clk_ops berlin2_div_mux_ops = {
.get_parent = berlin2_div_get_parent,
};
-struct clk * __init
+struct clk_hw * __init
berlin2_div_register(const struct berlin2_div_map *map,
void __iomem *base, const char *name, u8 div_flags,
const char **parent_names, int num_parents,
@@ -259,7 +259,7 @@ berlin2_div_register(const struct berlin2_div_map *map,
if ((div_flags & BERLIN2_DIV_HAS_MUX) == 0)
mux_ops = NULL;
- return clk_register_composite(NULL, name, parent_names, num_parents,
+ return clk_hw_register_composite(NULL, name, parent_names, num_parents,
&div->hw, mux_ops, &div->hw, rate_ops,
&div->hw, gate_ops, flags);
}
diff --git a/drivers/clk/berlin/berlin2-div.h b/drivers/clk/berlin/berlin2-div.h
index 15e3384f3116..e835ddf8374a 100644
--- a/drivers/clk/berlin/berlin2-div.h
+++ b/drivers/clk/berlin/berlin2-div.h
@@ -19,7 +19,7 @@
#ifndef __BERLIN2_DIV_H
#define __BERLIN2_DIV_H
-struct clk;
+struct clk_hw;
#define BERLIN2_DIV_HAS_GATE BIT(0)
#define BERLIN2_DIV_HAS_MUX BIT(1)
@@ -80,7 +80,7 @@ struct berlin2_div_data {
u8 div_flags;
};
-struct clk * __init
+struct clk_hw *
berlin2_div_register(const struct berlin2_div_map *map,
void __iomem *base, const char *name, u8 div_flags,
const char **parent_names, int num_parents,
diff --git a/drivers/clk/berlin/berlin2-pll.c b/drivers/clk/berlin/berlin2-pll.c
index 1c2294d3ba85..4ffbe80f6323 100644
--- a/drivers/clk/berlin/berlin2-pll.c
+++ b/drivers/clk/berlin/berlin2-pll.c
@@ -84,7 +84,7 @@ static const struct clk_ops berlin2_pll_ops = {
.recalc_rate = berlin2_pll_recalc_rate,
};
-struct clk * __init
+int __init
berlin2_pll_register(const struct berlin2_pll_map *map,
void __iomem *base, const char *name,
const char *parent_name, unsigned long flags)
@@ -94,7 +94,7 @@ berlin2_pll_register(const struct berlin2_pll_map *map,
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
/* copy pll_map to allow __initconst */
memcpy(&pll->map, map, sizeof(*map));
@@ -106,5 +106,5 @@ berlin2_pll_register(const struct berlin2_pll_map *map,
init.num_parents = 1;
init.flags = flags;
- return clk_register(NULL, &pll->hw);
+ return clk_hw_register(NULL, &pll->hw);
}
diff --git a/drivers/clk/berlin/berlin2-pll.h b/drivers/clk/berlin/berlin2-pll.h
index 8831ce27ac1e..583e024b9bed 100644
--- a/drivers/clk/berlin/berlin2-pll.h
+++ b/drivers/clk/berlin/berlin2-pll.h
@@ -19,8 +19,6 @@
#ifndef __BERLIN2_PLL_H
#define __BERLIN2_PLL_H
-struct clk;
-
struct berlin2_pll_map {
const u8 vcodiv[16];
u8 mult;
@@ -29,9 +27,8 @@ struct berlin2_pll_map {
u8 divsel_shift;
};
-struct clk * __init
-berlin2_pll_register(const struct berlin2_pll_map *map,
- void __iomem *base, const char *name,
- const char *parent_name, unsigned long flags);
+int berlin2_pll_register(const struct berlin2_pll_map *map,
+ void __iomem *base, const char *name,
+ const char *parent_name, unsigned long flags);
#endif /* __BERLIN2_PLL_H */
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index 23e0e3be6c37..edf3b96b3b73 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -92,8 +92,7 @@
*/
#define MAX_CLKS 41
-static struct clk *clks[MAX_CLKS];
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_data;
static DEFINE_SPINLOCK(lock);
static void __iomem *gbase;
@@ -505,8 +504,17 @@ static void __init berlin2_clock_setup(struct device_node *np)
struct device_node *parent_np = of_get_parent(np);
const char *parent_names[9];
struct clk *clk;
+ struct clk_hw *hw;
+ struct clk_hw **hws;
u8 avpll_flags = 0;
- int n;
+ int n, ret;
+
+ clk_data = kzalloc(sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL);
+ if (!clk_data)
+ return;
+ clk_data->num = MAX_CLKS;
+ hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
if (!gbase)
@@ -526,118 +534,118 @@ static void __init berlin2_clock_setup(struct device_node *np)
}
/* simple register PLLs */
- clk = berlin2_pll_register(&bg2_pll_map, gbase + REG_SYSPLLCTL0,
+ ret = berlin2_pll_register(&bg2_pll_map, gbase + REG_SYSPLLCTL0,
clk_names[SYSPLL], clk_names[REFCLK], 0);
- if (IS_ERR(clk))
+ if (ret)
goto bg2_fail;
- clk = berlin2_pll_register(&bg2_pll_map, gbase + REG_MEMPLLCTL0,
+ ret = berlin2_pll_register(&bg2_pll_map, gbase + REG_MEMPLLCTL0,
clk_names[MEMPLL], clk_names[REFCLK], 0);
- if (IS_ERR(clk))
+ if (ret)
goto bg2_fail;
- clk = berlin2_pll_register(&bg2_pll_map, gbase + REG_CPUPLLCTL0,
+ ret = berlin2_pll_register(&bg2_pll_map, gbase + REG_CPUPLLCTL0,
clk_names[CPUPLL], clk_names[REFCLK], 0);
- if (IS_ERR(clk))
+ if (ret)
goto bg2_fail;
if (of_device_is_compatible(np, "marvell,berlin2-global-register"))
avpll_flags |= BERLIN2_AVPLL_SCRAMBLE_QUIRK;
/* audio/video VCOs */
- clk = berlin2_avpll_vco_register(gbase + REG_AVPLLCTL0, "avpll_vcoA",
+ ret = berlin2_avpll_vco_register(gbase + REG_AVPLLCTL0, "avpll_vcoA",
clk_names[REFCLK], avpll_flags, 0);
- if (IS_ERR(clk))
+ if (ret)
goto bg2_fail;
for (n = 0; n < 8; n++) {
- clk = berlin2_avpll_channel_register(gbase + REG_AVPLLCTL0,
+ ret = berlin2_avpll_channel_register(gbase + REG_AVPLLCTL0,
clk_names[AVPLL_A1 + n], n, "avpll_vcoA",
avpll_flags, 0);
- if (IS_ERR(clk))
+ if (ret)
goto bg2_fail;
}
- clk = berlin2_avpll_vco_register(gbase + REG_AVPLLCTL31, "avpll_vcoB",
+ ret = berlin2_avpll_vco_register(gbase + REG_AVPLLCTL31, "avpll_vcoB",
clk_names[REFCLK], BERLIN2_AVPLL_BIT_QUIRK |
avpll_flags, 0);
- if (IS_ERR(clk))
+ if (ret)
goto bg2_fail;
for (n = 0; n < 8; n++) {
- clk = berlin2_avpll_channel_register(gbase + REG_AVPLLCTL31,
+ ret = berlin2_avpll_channel_register(gbase + REG_AVPLLCTL31,
clk_names[AVPLL_B1 + n], n, "avpll_vcoB",
BERLIN2_AVPLL_BIT_QUIRK | avpll_flags, 0);
- if (IS_ERR(clk))
+ if (ret)
goto bg2_fail;
}
/* reference clock bypass switches */
parent_names[0] = clk_names[SYSPLL];
parent_names[1] = clk_names[REFCLK];
- clk = clk_register_mux(NULL, "syspll_byp", parent_names, 2,
+ hw = clk_hw_register_mux(NULL, "syspll_byp", parent_names, 2,
0, gbase + REG_CLKSWITCH0, 0, 1, 0, &lock);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
goto bg2_fail;
- clk_names[SYSPLL] = __clk_get_name(clk);
+ clk_names[SYSPLL] = clk_hw_get_name(hw);
parent_names[0] = clk_names[MEMPLL];
parent_names[1] = clk_names[REFCLK];
- clk = clk_register_mux(NULL, "mempll_byp", parent_names, 2,
+ hw = clk_hw_register_mux(NULL, "mempll_byp", parent_names, 2,
0, gbase + REG_CLKSWITCH0, 1, 1, 0, &lock);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
goto bg2_fail;
- clk_names[MEMPLL] = __clk_get_name(clk);
+ clk_names[MEMPLL] = clk_hw_get_name(hw);
parent_names[0] = clk_names[CPUPLL];
parent_names[1] = clk_names[REFCLK];
- clk = clk_register_mux(NULL, "cpupll_byp", parent_names, 2,
+ hw = clk_hw_register_mux(NULL, "cpupll_byp", parent_names, 2,
0, gbase + REG_CLKSWITCH0, 2, 1, 0, &lock);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
goto bg2_fail;
- clk_names[CPUPLL] = __clk_get_name(clk);
+ clk_names[CPUPLL] = clk_hw_get_name(hw);
/* clock muxes */
parent_names[0] = clk_names[AVPLL_B3];
parent_names[1] = clk_names[AVPLL_A3];
- clk = clk_register_mux(NULL, clk_names[AUDIO1_PLL], parent_names, 2,
+ hw = clk_hw_register_mux(NULL, clk_names[AUDIO1_PLL], parent_names, 2,
0, gbase + REG_CLKSELECT2, 29, 1, 0, &lock);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
goto bg2_fail;
parent_names[0] = clk_names[VIDEO0_PLL];
parent_names[1] = clk_names[VIDEO_EXT0];
- clk = clk_register_mux(NULL, clk_names[VIDEO0_IN], parent_names, 2,
+ hw = clk_hw_register_mux(NULL, clk_names[VIDEO0_IN], parent_names, 2,
0, gbase + REG_CLKSELECT3, 4, 1, 0, &lock);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
goto bg2_fail;
parent_names[0] = clk_names[VIDEO1_PLL];
parent_names[1] = clk_names[VIDEO_EXT0];
- clk = clk_register_mux(NULL, clk_names[VIDEO1_IN], parent_names, 2,
+ hw = clk_hw_register_mux(NULL, clk_names[VIDEO1_IN], parent_names, 2,
0, gbase + REG_CLKSELECT3, 6, 1, 0, &lock);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
goto bg2_fail;
parent_names[0] = clk_names[AVPLL_A2];
parent_names[1] = clk_names[AVPLL_B2];
- clk = clk_register_mux(NULL, clk_names[VIDEO1_PLL], parent_names, 2,
+ hw = clk_hw_register_mux(NULL, clk_names[VIDEO1_PLL], parent_names, 2,
0, gbase + REG_CLKSELECT3, 7, 1, 0, &lock);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
goto bg2_fail;
parent_names[0] = clk_names[VIDEO2_PLL];
parent_names[1] = clk_names[VIDEO_EXT0];
- clk = clk_register_mux(NULL, clk_names[VIDEO2_IN], parent_names, 2,
+ hw = clk_hw_register_mux(NULL, clk_names[VIDEO2_IN], parent_names, 2,
0, gbase + REG_CLKSELECT3, 9, 1, 0, &lock);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
goto bg2_fail;
parent_names[0] = clk_names[AVPLL_B1];
parent_names[1] = clk_names[AVPLL_A5];
- clk = clk_register_mux(NULL, clk_names[VIDEO2_PLL], parent_names, 2,
+ hw = clk_hw_register_mux(NULL, clk_names[VIDEO2_PLL], parent_names, 2,
0, gbase + REG_CLKSELECT3, 10, 1, 0, &lock);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
goto bg2_fail;
/* clock divider cells */
@@ -648,7 +656,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
for (k = 0; k < dd->num_parents; k++)
parent_names[k] = clk_names[dd->parent_ids[k]];
- clks[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase,
+ hws[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase,
dd->name, dd->div_flags, parent_names,
dd->num_parents, dd->flags, &lock);
}
@@ -657,18 +665,18 @@ static void __init berlin2_clock_setup(struct device_node *np)
for (n = 0; n < ARRAY_SIZE(bg2_gates); n++) {
const struct berlin2_gate_data *gd = &bg2_gates[n];
- clks[CLKID_GETH0 + n] = clk_register_gate(NULL, gd->name,
+ hws[CLKID_GETH0 + n] = clk_hw_register_gate(NULL, gd->name,
gd->parent_name, gd->flags, gbase + REG_CLKENABLE,
gd->bit_idx, 0, &lock);
}
/* twdclk is derived from cpu/3 */
- clks[CLKID_TWD] =
- clk_register_fixed_factor(NULL, "twd", "cpu", 0, 1, 3);
+ hws[CLKID_TWD] =
+ clk_hw_register_fixed_factor(NULL, "twd", "cpu", 0, 1, 3);
/* check for errors on leaf clocks */
for (n = 0; n < MAX_CLKS; n++) {
- if (!IS_ERR(clks[n]))
+ if (!IS_ERR(hws[n]))
continue;
pr_err("%s: Unable to register leaf clock %d\n",
@@ -677,9 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
}
/* register clk-provider */
- clk_data.clks = clks;
- clk_data.clk_num = MAX_CLKS;
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
return;
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index f144547cf76c..0718e831475f 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -46,8 +46,7 @@
#define REG_SDIO1XIN_CLKCTL 0x015c
#define MAX_CLKS 28
-static struct clk *clks[MAX_CLKS];
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_data;
static DEFINE_SPINLOCK(lock);
static void __iomem *gbase;
static void __iomem *cpupll_base;
@@ -293,7 +292,15 @@ static void __init berlin2q_clock_setup(struct device_node *np)
struct device_node *parent_np = of_get_parent(np);
const char *parent_names[9];
struct clk *clk;
- int n;
+ struct clk_hw **hws;
+ int n, ret;
+
+ clk_data = kzalloc(sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL);
+ if (!clk_data)
+ return;
+ clk_data->num = MAX_CLKS;
+ hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
if (!gbase) {
@@ -317,14 +324,14 @@ static void __init berlin2q_clock_setup(struct device_node *np)
}
/* simple register PLLs */
- clk = berlin2_pll_register(&bg2q_pll_map, gbase + REG_SYSPLLCTL0,
+ ret = berlin2_pll_register(&bg2q_pll_map, gbase + REG_SYSPLLCTL0,
clk_names[SYSPLL], clk_names[REFCLK], 0);
- if (IS_ERR(clk))
+ if (ret)
goto bg2q_fail;
- clk = berlin2_pll_register(&bg2q_pll_map, cpupll_base,
+ ret = berlin2_pll_register(&bg2q_pll_map, cpupll_base,
clk_names[CPUPLL], clk_names[REFCLK], 0);
- if (IS_ERR(clk))
+ if (ret)
goto bg2q_fail;
/* TODO: add BG2Q AVPLL */
@@ -342,7 +349,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
for (k = 0; k < dd->num_parents; k++)
parent_names[k] = clk_names[dd->parent_ids[k]];
- clks[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase,
+ hws[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase,
dd->name, dd->div_flags, parent_names,
dd->num_parents, dd->flags, &lock);
}
@@ -351,22 +358,22 @@ static void __init berlin2q_clock_setup(struct device_node *np)
for (n = 0; n < ARRAY_SIZE(bg2q_gates); n++) {
const struct berlin2_gate_data *gd = &bg2q_gates[n];
- clks[CLKID_GFX2DAXI + n] = clk_register_gate(NULL, gd->name,
+ hws[CLKID_GFX2DAXI + n] = clk_hw_register_gate(NULL, gd->name,
gd->parent_name, gd->flags, gbase + REG_CLKENABLE,
gd->bit_idx, 0, &lock);
}
/* cpuclk divider is fixed to 1 */
- clks[CLKID_CPU] =
- clk_register_fixed_factor(NULL, "cpu", clk_names[CPUPLL],
+ hws[CLKID_CPU] =
+ clk_hw_register_fixed_factor(NULL, "cpu", clk_names[CPUPLL],
0, 1, 1);
/* twdclk is derived from cpu/3 */
- clks[CLKID_TWD] =
- clk_register_fixed_factor(NULL, "twd", "cpu", 0, 1, 3);
+ hws[CLKID_TWD] =
+ clk_hw_register_fixed_factor(NULL, "twd", "cpu", 0, 1, 3);
/* check for errors on leaf clocks */
for (n = 0; n < MAX_CLKS; n++) {
- if (!IS_ERR(clks[n]))
+ if (!IS_ERR(hws[n]))
continue;
pr_err("%s: Unable to register leaf clock %d\n",
@@ -375,9 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
}
/* register clk-provider */
- clk_data.clks = clks;
- clk_data.clk_num = MAX_CLKS;
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
return;
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index 90897af8d9f7..ea8568536193 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -68,8 +68,7 @@
#define HW_LCDCLKDIV 0x01fc
#define HW_ADCANACLKDIV 0x0200
-static struct clk *clks[MAX_CLKS];
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_data;
static DEFINE_SPINLOCK(asm9260_clk_lock);
struct asm9260_div_clk {
@@ -267,12 +266,20 @@ static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
static void __init asm9260_acc_init(struct device_node *np)
{
- struct clk *clk;
+ struct clk_hw *hw;
+ struct clk_hw **hws;
const char *ref_clk, *pll_clk = "pll";
u32 rate;
int n;
u32 accuracy = 0;
+ clk_data = kzalloc(sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL);
+ if (!clk_data)
+ return;
+ clk_data->num = MAX_CLKS;
+ hws = clk_data->hws;
+
base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(base))
panic("%s: unable to map resource", np->name);
@@ -282,10 +289,10 @@ static void __init asm9260_acc_init(struct device_node *np)
ref_clk = of_clk_get_parent_name(np, 0);
accuracy = clk_get_accuracy(__clk_lookup(ref_clk));
- clk = clk_register_fixed_rate_with_accuracy(NULL, pll_clk,
+ hw = clk_hw_register_fixed_rate_with_accuracy(NULL, pll_clk,
ref_clk, 0, rate, accuracy);
- if (IS_ERR(clk))
+ if (IS_ERR(hw))
panic("%s: can't register REFCLK. Check DT!", np->name);
for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
@@ -293,7 +300,7 @@ static void __init asm9260_acc_init(struct device_node *np)
mc->parent_names[0] = ref_clk;
mc->parent_names[1] = pll_clk;
- clk = clk_register_mux_table(NULL, mc->name, mc->parent_names,
+ hw = clk_hw_register_mux_table(NULL, mc->name, mc->parent_names,
mc->num_parents, mc->flags, base + mc->offset,
0, mc->mask, 0, mc->table, &asm9260_clk_lock);
}
@@ -302,7 +309,7 @@ static void __init asm9260_acc_init(struct device_node *np)
for (n = 0; n < ARRAY_SIZE(asm9260_mux_gates); n++) {
const struct asm9260_gate_data *gd = &asm9260_mux_gates[n];
- clk = clk_register_gate(NULL, gd->name,
+ hw = clk_hw_register_gate(NULL, gd->name,
gd->parent_name, gd->flags | CLK_SET_RATE_PARENT,
base + gd->reg, gd->bit_idx, 0, &asm9260_clk_lock);
}
@@ -311,7 +318,7 @@ static void __init asm9260_acc_init(struct device_node *np)
for (n = 0; n < ARRAY_SIZE(asm9260_div_clks); n++) {
const struct asm9260_div_clk *dc = &asm9260_div_clks[n];
- clks[dc->idx] = clk_register_divider(NULL, dc->name,
+ hws[dc->idx] = clk_hw_register_divider(NULL, dc->name,
dc->parent_name, CLK_SET_RATE_PARENT,
base + dc->reg, 0, 8, CLK_DIVIDER_ONE_BASED,
&asm9260_clk_lock);
@@ -321,14 +328,14 @@ static void __init asm9260_acc_init(struct device_node *np)
for (n = 0; n < ARRAY_SIZE(asm9260_ahb_gates); n++) {
const struct asm9260_gate_data *gd = &asm9260_ahb_gates[n];
- clks[gd->idx] = clk_register_gate(NULL, gd->name,
+ hws[gd->idx] = clk_hw_register_gate(NULL, gd->name,
gd->parent_name, gd->flags, base + gd->reg,
gd->bit_idx, 0, &asm9260_clk_lock);
}
/* check for errors on leaf clocks */
for (n = 0; n < MAX_CLKS; n++) {
- if (!IS_ERR(clks[n]))
+ if (!IS_ERR(hws[n]))
continue;
pr_err("%s: Unable to register leaf clock %d\n",
@@ -337,9 +344,7 @@ static void __init asm9260_acc_init(struct device_node *np)
}
/* register clk-provider */
- clk_data.clks = clks;
- clk_data.clk_num = MAX_CLKS;
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
return;
fail:
iounmap(base);
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index 3294db3b4e4e..5e918e7afaba 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -392,8 +392,8 @@ static int axi_clkgen_probe(struct platform_device *pdev)
const char *parent_names[2];
const char *clk_name;
struct resource *mem;
- struct clk *clk;
unsigned int i;
+ int ret;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -433,12 +433,12 @@ static int axi_clkgen_probe(struct platform_device *pdev)
axi_clkgen_mmcm_enable(axi_clkgen, false);
axi_clkgen->clk_hw.init = &init;
- clk = devm_clk_register(&pdev->dev, &axi_clkgen->clk_hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = devm_clk_hw_register(&pdev->dev, &axi_clkgen->clk_hw);
+ if (ret)
+ return ret;
- return of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get,
- clk);
+ return of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_simple_get,
+ &axi_clkgen->clk_hw);
}
static int axi_clkgen_remove(struct platform_device *pdev)
diff --git a/drivers/clk/clk-axm5516.c b/drivers/clk/clk-axm5516.c
index c7c91a5ecf8b..5d7ae333257e 100644
--- a/drivers/clk/clk-axm5516.c
+++ b/drivers/clk/clk-axm5516.c
@@ -516,6 +516,19 @@ static struct axxia_clk *axmclk_clocks[] = {
[AXXIA_CLK_MMC] = &clk_mmc_mux.aclk,
};
+static struct clk_hw *
+of_clk_axmclk_get(struct of_phandle_args *clkspec, void *unused)
+{
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= ARRAY_SIZE(axmclk_clocks)) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &axmclk_clocks[idx]->hw;
+}
+
static const struct regmap_config axmclk_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -530,21 +543,14 @@ static const struct of_device_id axmclk_match_table[] = {
};
MODULE_DEVICE_TABLE(of, axmclk_match_table);
-struct axmclk_priv {
- struct clk_onecell_data onecell;
- struct clk *clks[];
-};
-
static int axmclk_probe(struct platform_device *pdev)
{
void __iomem *base;
struct resource *res;
int i, ret;
struct device *dev = &pdev->dev;
- struct clk *clk;
struct regmap *regmap;
size_t num_clks;
- struct axmclk_priv *priv;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, res);
@@ -557,29 +563,18 @@ static int axmclk_probe(struct platform_device *pdev)
num_clks = ARRAY_SIZE(axmclk_clocks);
pr_info("axmclk: supporting %zu clocks\n", num_clks);
- priv = devm_kzalloc(dev, sizeof(*priv) + sizeof(*priv->clks) * num_clks,
- GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->onecell.clks = priv->clks;
- priv->onecell.clk_num = num_clks;
/* Update each entry with the allocated regmap and register the clock
* with the common clock framework
*/
for (i = 0; i < num_clks; i++) {
axmclk_clocks[i]->regmap = regmap;
- clk = devm_clk_register(dev, &axmclk_clocks[i]->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
- priv->clks[i] = clk;
+ ret = devm_clk_hw_register(dev, &axmclk_clocks[i]->hw);
+ if (ret)
+ return ret;
}
- ret = of_clk_add_provider(dev->of_node,
- of_clk_src_onecell_get, &priv->onecell);
-
- return ret;
+ return of_clk_add_hw_provider(dev->of_node, of_clk_axmclk_get, NULL);
}
static int axmclk_remove(struct platform_device *pdev)
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index 01877f64eff6..f21d9092564f 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -71,7 +71,6 @@ struct cdce706_hw_data {
struct cdce706_dev_data *dev_data;
unsigned idx;
unsigned parent;
- struct clk *clk;
struct clk_hw hw;
unsigned div;
unsigned mul;
@@ -81,8 +80,6 @@ struct cdce706_hw_data {
struct cdce706_dev_data {
struct i2c_client *client;
struct regmap *regmap;
- struct clk_onecell_data onecell;
- struct clk *clks[6];
struct clk *clkin_clk[2];
const char *clkin_name[2];
struct cdce706_hw_data clkin[1];
@@ -455,18 +452,19 @@ static int cdce706_register_hw(struct cdce706_dev_data *cdce,
struct clk_init_data *init)
{
unsigned i;
+ int ret;
for (i = 0; i < num_hw; ++i, ++hw) {
init->name = clk_names[i];
hw->dev_data = cdce;
hw->idx = i;
hw->hw.init = init;
- hw->clk = devm_clk_register(&cdce->client->dev,
+ ret = devm_clk_hw_register(&cdce->client->dev,
&hw->hw);
- if (IS_ERR(hw->clk)) {
+ if (ret) {
dev_err(&cdce->client->dev, "Failed to register %s\n",
clk_names[i]);
- return PTR_ERR(hw->clk);
+ return ret;
}
}
return 0;
@@ -613,13 +611,23 @@ static int cdce706_register_clkouts(struct cdce706_dev_data *cdce)
cdce->clkout[i].parent);
}
- ret = cdce706_register_hw(cdce, cdce->clkout,
- ARRAY_SIZE(cdce->clkout),
- cdce706_clkout_name, &init);
- for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i)
- cdce->clks[i] = cdce->clkout[i].clk;
+ return cdce706_register_hw(cdce, cdce->clkout,
+ ARRAY_SIZE(cdce->clkout),
+ cdce706_clkout_name, &init);
+}
- return ret;
+static struct clk_hw *
+of_clk_cdce_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct cdce706_dev_data *cdce = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= ARRAY_SIZE(cdce->clkout)) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &cdce->clkout[idx].hw;
}
static int cdce706_probe(struct i2c_client *client,
@@ -657,12 +665,8 @@ static int cdce706_probe(struct i2c_client *client,
ret = cdce706_register_clkouts(cdce);
if (ret < 0)
return ret;
- cdce->onecell.clks = cdce->clks;
- cdce->onecell.clk_num = ARRAY_SIZE(cdce->clks);
- ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
- &cdce->onecell);
-
- return ret;
+ return of_clk_add_hw_provider(client->dev.of_node, of_clk_cdce_get,
+ cdce);
}
static int cdce706_remove(struct i2c_client *client)
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index 089bf88ffa8d..b8459c14a1b7 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -62,8 +62,6 @@ struct clk_cdce925_chip {
struct i2c_client *i2c_client;
struct clk_cdce925_pll pll[NUMBER_OF_PLLS];
struct clk_cdce925_output clk[NUMBER_OF_OUTPUTS];
- struct clk *dt_clk[NUMBER_OF_OUTPUTS];
- struct clk_onecell_data onecell;
};
/* ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** */
@@ -557,6 +555,20 @@ static int cdce925_regmap_i2c_read(void *context,
return -EIO;
}
+static struct clk_hw *
+of_clk_cdce925_get(struct of_phandle_args *clkspec, void *_data)
+{
+ struct clk_cdce925_chip *data = _data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= ARRAY_SIZE(data->clk)) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &data->clk[idx].hw;
+}
+
/* The CDCE925 uses a funky way to read/write registers. Bulk mode is
* just weird, so just use the single byte mode exclusively. */
static struct regmap_bus regmap_cdce925_bus = {
@@ -572,7 +584,6 @@ static int cdce925_probe(struct i2c_client *client,
const char *parent_name;
const char *pll_clk_name[NUMBER_OF_PLLS] = {NULL,};
struct clk_init_data init;
- struct clk *clk;
u32 value;
int i;
int err;
@@ -622,10 +633,9 @@ static int cdce925_probe(struct i2c_client *client,
data->pll[i].chip = data;
data->pll[i].hw.init = &init;
data->pll[i].index = i;
- clk = devm_clk_register(&client->dev, &data->pll[i].hw);
- if (IS_ERR(clk)) {
+ err = devm_clk_hw_register(&client->dev, &data->pll[i].hw);
+ if (err) {
dev_err(&client->dev, "Failed register PLL %d\n", i);
- err = PTR_ERR(clk);
goto error;
}
sprintf(child_name, "PLL%d", i+1);
@@ -634,7 +644,7 @@ static int cdce925_probe(struct i2c_client *client,
continue;
if (!of_property_read_u32(np_output,
"clock-frequency", &value)) {
- err = clk_set_rate(clk, value);
+ err = clk_set_rate(data->pll[i].hw.clk, value);
if (err)
dev_err(&client->dev,
"unable to set PLL frequency %ud\n",
@@ -663,14 +673,12 @@ static int cdce925_probe(struct i2c_client *client,
data->clk[0].hw.init = &init;
data->clk[0].index = 0;
data->clk[0].pdiv = 1;
- clk = devm_clk_register(&client->dev, &data->clk[0].hw);
+ err = devm_clk_hw_register(&client->dev, &data->clk[0].hw);
kfree(init.name); /* clock framework made a copy of the name */
- if (IS_ERR(clk)) {
+ if (err) {
dev_err(&client->dev, "clock registration Y1 failed\n");
- err = PTR_ERR(clk);
goto error;
}
- data->dt_clk[0] = clk;
/* Register output clocks Y2 .. Y5*/
init.ops = &cdce925_clk_ops;
@@ -695,21 +703,17 @@ static int cdce925_probe(struct i2c_client *client,
init.parent_names = &pll_clk_name[1];
break;
}
- clk = devm_clk_register(&client->dev, &data->clk[i].hw);
+ err = devm_clk_hw_register(&client->dev, &data->clk[i].hw);
kfree(init.name); /* clock framework made a copy of the name */
- if (IS_ERR(clk)) {
+ if (err) {
dev_err(&client->dev, "clock registration failed\n");
- err = PTR_ERR(clk);
goto error;
}
- data->dt_clk[i] = clk;
}
/* Register the output clocks */
- data->onecell.clk_num = NUMBER_OF_OUTPUTS;
- data->onecell.clks = data->dt_clk;
- err = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
- &data->onecell);
+ err = of_clk_add_hw_provider(client->dev.of_node, of_clk_cdce925_get,
+ data);
if (err)
dev_err(&client->dev, "unable to add OF clock provider\n");
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index adaf109f2fe2..9193f64561f6 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -40,9 +40,8 @@ static const struct clk_div_table timer_div_table[] = {
};
struct clps711x_clk {
- struct clk_onecell_data clk_data;
- spinlock_t lock;
- struct clk *clks[CLPS711X_CLK_MAX];
+ spinlock_t lock;
+ struct clk_hw_onecell_data clk_data;
};
static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
@@ -55,7 +54,9 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
if (!base)
return ERR_PTR(-ENOMEM);
- clps711x_clk = kzalloc(sizeof(*clps711x_clk), GFP_KERNEL);
+ clps711x_clk = kzalloc(sizeof(*clps711x_clk) +
+ sizeof(*clps711x_clk->clk_data.hws) * CLPS711X_CLK_MAX,
+ GFP_KERNEL);
if (!clps711x_clk)
return ERR_PTR(-ENOMEM);
@@ -106,40 +107,40 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
tmp |= SYSCON1_TC2M | SYSCON1_TC2S;
writel(tmp, base + CLPS711X_SYSCON1);
- clps711x_clk->clks[CLPS711X_CLK_DUMMY] =
- clk_register_fixed_rate(NULL, "dummy", NULL, 0, 0);
- clps711x_clk->clks[CLPS711X_CLK_CPU] =
- clk_register_fixed_rate(NULL, "cpu", NULL, 0, f_cpu);
- clps711x_clk->clks[CLPS711X_CLK_BUS] =
- clk_register_fixed_rate(NULL, "bus", NULL, 0, f_bus);
- clps711x_clk->clks[CLPS711X_CLK_PLL] =
- clk_register_fixed_rate(NULL, "pll", NULL, 0, f_pll);
- clps711x_clk->clks[CLPS711X_CLK_TIMERREF] =
- clk_register_fixed_rate(NULL, "timer_ref", NULL, 0, f_tim);
- clps711x_clk->clks[CLPS711X_CLK_TIMER1] =
- clk_register_divider_table(NULL, "timer1", "timer_ref", 0,
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_DUMMY] =
+ clk_hw_register_fixed_rate(NULL, "dummy", NULL, 0, 0);
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_CPU] =
+ clk_hw_register_fixed_rate(NULL, "cpu", NULL, 0, f_cpu);
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_BUS] =
+ clk_hw_register_fixed_rate(NULL, "bus", NULL, 0, f_bus);
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_PLL] =
+ clk_hw_register_fixed_rate(NULL, "pll", NULL, 0, f_pll);
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMERREF] =
+ clk_hw_register_fixed_rate(NULL, "timer_ref", NULL, 0, f_tim);
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER1] =
+ clk_hw_register_divider_table(NULL, "timer1", "timer_ref", 0,
base + CLPS711X_SYSCON1, 5, 1, 0,
timer_div_table, &clps711x_clk->lock);
- clps711x_clk->clks[CLPS711X_CLK_TIMER2] =
- clk_register_divider_table(NULL, "timer2", "timer_ref", 0,
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER2] =
+ clk_hw_register_divider_table(NULL, "timer2", "timer_ref", 0,
base + CLPS711X_SYSCON1, 7, 1, 0,
timer_div_table, &clps711x_clk->lock);
- clps711x_clk->clks[CLPS711X_CLK_PWM] =
- clk_register_fixed_rate(NULL, "pwm", NULL, 0, f_pwm);
- clps711x_clk->clks[CLPS711X_CLK_SPIREF] =
- clk_register_fixed_rate(NULL, "spi_ref", NULL, 0, f_spi);
- clps711x_clk->clks[CLPS711X_CLK_SPI] =
- clk_register_divider_table(NULL, "spi", "spi_ref", 0,
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_PWM] =
+ clk_hw_register_fixed_rate(NULL, "pwm", NULL, 0, f_pwm);
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_SPIREF] =
+ clk_hw_register_fixed_rate(NULL, "spi_ref", NULL, 0, f_spi);
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_SPI] =
+ clk_hw_register_divider_table(NULL, "spi", "spi_ref", 0,
base + CLPS711X_SYSCON1, 16, 2, 0,
spi_div_table, &clps711x_clk->lock);
- clps711x_clk->clks[CLPS711X_CLK_UART] =
- clk_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10);
- clps711x_clk->clks[CLPS711X_CLK_TICK] =
- clk_register_fixed_rate(NULL, "tick", NULL, 0, 64);
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_UART] =
+ clk_hw_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10);
+ clps711x_clk->clk_data.hws[CLPS711X_CLK_TICK] =
+ clk_hw_register_fixed_rate(NULL, "tick", NULL, 0, 64);
for (i = 0; i < CLPS711X_CLK_MAX; i++)
- if (IS_ERR(clps711x_clk->clks[i]))
+ if (IS_ERR(clps711x_clk->clk_data.hws[i]))
pr_err("clk %i: register failed with %ld\n",
- i, PTR_ERR(clps711x_clk->clks[i]));
+ i, PTR_ERR(clps711x_clk->clk_data.hws[i]));
return clps711x_clk;
}
@@ -153,17 +154,17 @@ void __init clps711x_clk_init(void __iomem *base)
BUG_ON(IS_ERR(clps711x_clk));
/* Clocksource */
- clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_TIMER1],
+ clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER1],
NULL, "clps711x-timer.0");
- clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_TIMER2],
+ clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER2],
NULL, "clps711x-timer.1");
/* Drivers */
- clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_PWM],
+ clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_PWM],
NULL, "clps711x-pwm");
- clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_UART],
+ clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
NULL, "clps711x-uart.0");
- clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_UART],
+ clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART],
NULL, "clps711x-uart.1");
}
@@ -179,10 +180,9 @@ static void __init clps711x_clk_init_dt(struct device_node *np)
clps711x_clk = _clps711x_clk_init(base, fref);
BUG_ON(IS_ERR(clps711x_clk));
- clps711x_clk->clk_data.clks = clps711x_clk->clks;
- clps711x_clk->clk_data.clk_num = CLPS711X_CLK_MAX;
- of_clk_add_provider(np, of_clk_src_onecell_get,
- &clps711x_clk->clk_data);
+ clps711x_clk->clk_data.num = CLPS711X_CLK_MAX;
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+ &clps711x_clk->clk_data);
}
CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt);
#endif
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index 7379de8dc894..021f3daf34e1 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -59,7 +59,6 @@ struct cs2000_priv {
struct i2c_client *client;
struct clk *clk_in;
struct clk *ref_clk;
- struct clk *clk_out;
};
static const struct of_device_id cs2000_of_match[] = {
@@ -371,7 +370,6 @@ static int cs2000_clk_register(struct cs2000_priv *priv)
struct device_node *np = dev->of_node;
struct clk_init_data init;
const char *name = np->name;
- struct clk *clk;
static const char *parent_names[CLK_MAX];
int ch = 0; /* it uses ch0 only at this point */
int rate;
@@ -400,18 +398,16 @@ static int cs2000_clk_register(struct cs2000_priv *priv)
priv->hw.init = &init;
- clk = clk_register(dev, &priv->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = clk_hw_register(dev, &priv->hw);
+ if (ret)
+ return ret;
- ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &priv->hw);
if (ret < 0) {
- clk_unregister(clk);
+ clk_hw_unregister(&priv->hw);
return ret;
}
- priv->clk_out = clk;
-
return 0;
}
@@ -454,7 +450,7 @@ static int cs2000_remove(struct i2c_client *client)
of_clk_del_provider(np);
- clk_unregister(priv->clk_out);
+ clk_hw_unregister(&priv->hw);
return 0;
}
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index a0f55bc1ad3d..96386ffc8483 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -352,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- bestdiv = readl(divider->reg) >> divider->shift;
+ bestdiv = clk_readl(divider->reg) >> divider->shift;
bestdiv &= div_mask(divider->width);
bestdiv = _get_div(divider->table, bestdiv, divider->flags,
divider->width);
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
index 22e4c659704e..8802a2dd56ac 100644
--- a/drivers/clk/clk-efm32gg.c
+++ b/drivers/clk/clk-efm32gg.c
@@ -10,24 +10,31 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/efm32-cmu.h>
#define CMU_HFPERCLKEN0 0x44
+#define CMU_MAX_CLKS 37
-static struct clk *clk[37];
-static struct clk_onecell_data clk_data = {
- .clks = clk,
- .clk_num = ARRAY_SIZE(clk),
-};
+static struct clk_hw_onecell_data *clk_data;
static void __init efm32gg_cmu_init(struct device_node *np)
{
int i;
void __iomem *base;
+ struct clk_hw **hws;
- for (i = 0; i < ARRAY_SIZE(clk); ++i)
- clk[i] = ERR_PTR(-ENOENT);
+ clk_data = kzalloc(sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * CMU_MAX_CLKS, GFP_KERNEL);
+
+ if (!clk_data)
+ return;
+
+ hws = clk_data->hws;
+
+ for (i = 0; i < CMU_MAX_CLKS; ++i)
+ hws[i] = ERR_PTR(-ENOENT);
base = of_iomap(np, 0);
if (!base) {
@@ -35,46 +42,46 @@ static void __init efm32gg_cmu_init(struct device_node *np)
return;
}
- clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL,
- 0, 48000000);
+ hws[clk_HFXO] = clk_hw_register_fixed_rate(NULL, "HFXO", NULL, 0,
+ 48000000);
- clk[clk_HFPERCLKUSART0] = clk_register_gate(NULL, "HFPERCLK.USART0",
+ hws[clk_HFPERCLKUSART0] = clk_hw_register_gate(NULL, "HFPERCLK.USART0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 0, 0, NULL);
- clk[clk_HFPERCLKUSART1] = clk_register_gate(NULL, "HFPERCLK.USART1",
+ hws[clk_HFPERCLKUSART1] = clk_hw_register_gate(NULL, "HFPERCLK.USART1",
"HFXO", 0, base + CMU_HFPERCLKEN0, 1, 0, NULL);
- clk[clk_HFPERCLKUSART2] = clk_register_gate(NULL, "HFPERCLK.USART2",
+ hws[clk_HFPERCLKUSART2] = clk_hw_register_gate(NULL, "HFPERCLK.USART2",
"HFXO", 0, base + CMU_HFPERCLKEN0, 2, 0, NULL);
- clk[clk_HFPERCLKUART0] = clk_register_gate(NULL, "HFPERCLK.UART0",
+ hws[clk_HFPERCLKUART0] = clk_hw_register_gate(NULL, "HFPERCLK.UART0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 3, 0, NULL);
- clk[clk_HFPERCLKUART1] = clk_register_gate(NULL, "HFPERCLK.UART1",
+ hws[clk_HFPERCLKUART1] = clk_hw_register_gate(NULL, "HFPERCLK.UART1",
"HFXO", 0, base + CMU_HFPERCLKEN0, 4, 0, NULL);
- clk[clk_HFPERCLKTIMER0] = clk_register_gate(NULL, "HFPERCLK.TIMER0",
+ hws[clk_HFPERCLKTIMER0] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 5, 0, NULL);
- clk[clk_HFPERCLKTIMER1] = clk_register_gate(NULL, "HFPERCLK.TIMER1",
+ hws[clk_HFPERCLKTIMER1] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER1",
"HFXO", 0, base + CMU_HFPERCLKEN0, 6, 0, NULL);
- clk[clk_HFPERCLKTIMER2] = clk_register_gate(NULL, "HFPERCLK.TIMER2",
+ hws[clk_HFPERCLKTIMER2] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER2",
"HFXO", 0, base + CMU_HFPERCLKEN0, 7, 0, NULL);
- clk[clk_HFPERCLKTIMER3] = clk_register_gate(NULL, "HFPERCLK.TIMER3",
+ hws[clk_HFPERCLKTIMER3] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER3",
"HFXO", 0, base + CMU_HFPERCLKEN0, 8, 0, NULL);
- clk[clk_HFPERCLKACMP0] = clk_register_gate(NULL, "HFPERCLK.ACMP0",
+ hws[clk_HFPERCLKACMP0] = clk_hw_register_gate(NULL, "HFPERCLK.ACMP0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 9, 0, NULL);
- clk[clk_HFPERCLKACMP1] = clk_register_gate(NULL, "HFPERCLK.ACMP1",
+ hws[clk_HFPERCLKACMP1] = clk_hw_register_gate(NULL, "HFPERCLK.ACMP1",
"HFXO", 0, base + CMU_HFPERCLKEN0, 10, 0, NULL);
- clk[clk_HFPERCLKI2C0] = clk_register_gate(NULL, "HFPERCLK.I2C0",
+ hws[clk_HFPERCLKI2C0] = clk_hw_register_gate(NULL, "HFPERCLK.I2C0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 11, 0, NULL);
- clk[clk_HFPERCLKI2C1] = clk_register_gate(NULL, "HFPERCLK.I2C1",
+ hws[clk_HFPERCLKI2C1] = clk_hw_register_gate(NULL, "HFPERCLK.I2C1",
"HFXO", 0, base + CMU_HFPERCLKEN0, 12, 0, NULL);
- clk[clk_HFPERCLKGPIO] = clk_register_gate(NULL, "HFPERCLK.GPIO",
+ hws[clk_HFPERCLKGPIO] = clk_hw_register_gate(NULL, "HFPERCLK.GPIO",
"HFXO", 0, base + CMU_HFPERCLKEN0, 13, 0, NULL);
- clk[clk_HFPERCLKVCMP] = clk_register_gate(NULL, "HFPERCLK.VCMP",
+ hws[clk_HFPERCLKVCMP] = clk_hw_register_gate(NULL, "HFPERCLK.VCMP",
"HFXO", 0, base + CMU_HFPERCLKEN0, 14, 0, NULL);
- clk[clk_HFPERCLKPRS] = clk_register_gate(NULL, "HFPERCLK.PRS",
+ hws[clk_HFPERCLKPRS] = clk_hw_register_gate(NULL, "HFPERCLK.PRS",
"HFXO", 0, base + CMU_HFPERCLKEN0, 15, 0, NULL);
- clk[clk_HFPERCLKADC0] = clk_register_gate(NULL, "HFPERCLK.ADC0",
+ hws[clk_HFPERCLKADC0] = clk_hw_register_gate(NULL, "HFPERCLK.ADC0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 16, 0, NULL);
- clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0",
+ hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
}
CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 4db3be214077..a5d402de5584 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
/*
* DOC: basic fixed multiplier and divider clock that cannot gate
@@ -147,27 +148,25 @@ static const struct of_device_id set_rate_parent_matches[] = {
{ /* Sentinel */ },
};
-/**
- * of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
- */
-void __init of_fixed_factor_clk_setup(struct device_node *node)
+static struct clk *_of_fixed_factor_clk_setup(struct device_node *node)
{
struct clk *clk;
const char *clk_name = node->name;
const char *parent_name;
unsigned long flags = 0;
u32 div, mult;
+ int ret;
if (of_property_read_u32(node, "clock-div", &div)) {
pr_err("%s Fixed factor clock <%s> must have a clock-div property\n",
__func__, node->name);
- return;
+ return ERR_PTR(-EIO);
}
if (of_property_read_u32(node, "clock-mult", &mult)) {
pr_err("%s Fixed factor clock <%s> must have a clock-mult property\n",
__func__, node->name);
- return;
+ return ERR_PTR(-EIO);
}
of_property_read_string(node, "clock-output-names", &clk_name);
@@ -178,10 +177,67 @@ void __init of_fixed_factor_clk_setup(struct device_node *node)
clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
mult, div);
- if (!IS_ERR(clk))
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret) {
+ clk_unregister(clk);
+ return ERR_PTR(ret);
+ }
+
+ return clk;
+}
+
+/**
+ * of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
+ */
+void __init of_fixed_factor_clk_setup(struct device_node *node)
+{
+ _of_fixed_factor_clk_setup(node);
}
-EXPORT_SYMBOL_GPL(of_fixed_factor_clk_setup);
CLK_OF_DECLARE(fixed_factor_clk, "fixed-factor-clock",
of_fixed_factor_clk_setup);
+
+static int of_fixed_factor_clk_remove(struct platform_device *pdev)
+{
+ struct clk *clk = platform_get_drvdata(pdev);
+
+ clk_unregister_fixed_factor(clk);
+
+ return 0;
+}
+
+static int of_fixed_factor_clk_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ /*
+ * This function is not executed when of_fixed_factor_clk_setup
+ * succeeded.
+ */
+ clk = _of_fixed_factor_clk_setup(pdev->dev.of_node);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ platform_set_drvdata(pdev, clk);
+
+ return 0;
+}
+
+static const struct of_device_id of_fixed_factor_clk_ids[] = {
+ { .compatible = "fixed-factor-clock" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_fixed_factor_clk_ids);
+
+static struct platform_driver of_fixed_factor_clk_driver = {
+ .driver = {
+ .name = "of_fixed_factor_clk",
+ .of_match_table = of_fixed_factor_clk_ids,
+ },
+ .probe = of_fixed_factor_clk_probe,
+ .remove = of_fixed_factor_clk_remove,
+};
+builtin_platform_driver(of_fixed_factor_clk_driver);
#endif
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 2edb39342a02..b5c46b3f8764 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
/*
* DOC: basic fixed-rate clock that cannot gate
@@ -157,18 +158,16 @@ void clk_hw_unregister_fixed_rate(struct clk_hw *hw)
EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_rate);
#ifdef CONFIG_OF
-/**
- * of_fixed_clk_setup() - Setup function for simple fixed rate clock
- */
-void of_fixed_clk_setup(struct device_node *node)
+static struct clk *_of_fixed_clk_setup(struct device_node *node)
{
struct clk *clk;
const char *clk_name = node->name;
u32 rate;
u32 accuracy = 0;
+ int ret;
if (of_property_read_u32(node, "clock-frequency", &rate))
- return;
+ return ERR_PTR(-EIO);
of_property_read_u32(node, "clock-accuracy", &accuracy);
@@ -176,9 +175,66 @@ void of_fixed_clk_setup(struct device_node *node)
clk = clk_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
0, rate, accuracy);
- if (!IS_ERR(clk))
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret) {
+ clk_unregister(clk);
+ return ERR_PTR(ret);
+ }
+
+ return clk;
+}
+
+/**
+ * of_fixed_clk_setup() - Setup function for simple fixed rate clock
+ */
+void __init of_fixed_clk_setup(struct device_node *node)
+{
+ _of_fixed_clk_setup(node);
}
-EXPORT_SYMBOL_GPL(of_fixed_clk_setup);
CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup);
+
+static int of_fixed_clk_remove(struct platform_device *pdev)
+{
+ struct clk *clk = platform_get_drvdata(pdev);
+
+ clk_unregister_fixed_rate(clk);
+
+ return 0;
+}
+
+static int of_fixed_clk_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ /*
+ * This function is not executed when of_fixed_clk_setup
+ * succeeded.
+ */
+ clk = _of_fixed_clk_setup(pdev->dev.of_node);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ platform_set_drvdata(pdev, clk);
+
+ return 0;
+}
+
+static const struct of_device_id of_fixed_clk_ids[] = {
+ { .compatible = "fixed-clock" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_fixed_clk_ids);
+
+static struct platform_driver of_fixed_clk_driver = {
+ .driver = {
+ .name = "of_fixed_clk",
+ .of_match_table = of_fixed_clk_ids,
+ },
+ .probe = of_fixed_clk_probe,
+ .remove = of_fixed_clk_remove,
+};
+builtin_platform_driver(of_fixed_clk_driver);
#endif
diff --git a/drivers/clk/clk-ls1x.c b/drivers/clk/clk-ls1x.c
deleted file mode 100644
index 5097831387ff..000000000000
--- a/drivers/clk/clk-ls1x.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2012 Zhang, Keguang <keguang.zhang@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-
-#include <loongson1.h>
-
-#define OSC (33 * 1000000)
-#define DIV_APB 2
-
-static DEFINE_SPINLOCK(_lock);
-
-static int ls1x_pll_clk_enable(struct clk_hw *hw)
-{
- return 0;
-}
-
-static void ls1x_pll_clk_disable(struct clk_hw *hw)
-{
-}
-
-static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- u32 pll, rate;
-
- pll = __raw_readl(LS1X_CLK_PLL_FREQ);
- rate = 12 + (pll & 0x3f) + (((pll >> 8) & 0x3ff) >> 10);
- rate *= OSC;
- rate >>= 1;
-
- return rate;
-}
-
-static const struct clk_ops ls1x_pll_clk_ops = {
- .enable = ls1x_pll_clk_enable,
- .disable = ls1x_pll_clk_disable,
- .recalc_rate = ls1x_pll_recalc_rate,
-};
-
-static struct clk *__init clk_register_pll(struct device *dev,
- const char *name,
- const char *parent_name,
- unsigned long flags)
-{
- struct clk_hw *hw;
- struct clk *clk;
- struct clk_init_data init;
-
- /* allocate the divider */
- hw = kzalloc(sizeof(struct clk_hw), GFP_KERNEL);
- if (!hw) {
- pr_err("%s: could not allocate clk_hw\n", __func__);
- return ERR_PTR(-ENOMEM);
- }
-
- init.name = name;
- init.ops = &ls1x_pll_clk_ops;
- init.flags = flags | CLK_IS_BASIC;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
- hw->init = &init;
-
- /* register the clock */
- clk = clk_register(dev, hw);
-
- if (IS_ERR(clk))
- kfree(hw);
-
- return clk;
-}
-
-static const char * const cpu_parents[] = { "cpu_clk_div", "osc_33m_clk", };
-static const char * const ahb_parents[] = { "ahb_clk_div", "osc_33m_clk", };
-static const char * const dc_parents[] = { "dc_clk_div", "osc_33m_clk", };
-
-void __init ls1x_clk_init(void)
-{
- struct clk *clk;
-
- clk = clk_register_fixed_rate(NULL, "osc_33m_clk", NULL, 0, OSC);
- clk_register_clkdev(clk, "osc_33m_clk", NULL);
-
- /* clock derived from 33 MHz OSC clk */
- clk = clk_register_pll(NULL, "pll_clk", "osc_33m_clk", 0);
- clk_register_clkdev(clk, "pll_clk", NULL);
-
- /* clock derived from PLL clk */
- /* _____
- * _______________________| |
- * OSC ___/ | MUX |___ CPU CLK
- * \___ PLL ___ CPU DIV ___| |
- * |_____|
- */
- clk = clk_register_divider(NULL, "cpu_clk_div", "pll_clk",
- CLK_GET_RATE_NOCACHE, LS1X_CLK_PLL_DIV,
- DIV_CPU_SHIFT, DIV_CPU_WIDTH,
- CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ROUND_CLOSEST, &_lock);
- clk_register_clkdev(clk, "cpu_clk_div", NULL);
- clk = clk_register_mux(NULL, "cpu_clk", cpu_parents,
- ARRAY_SIZE(cpu_parents),
- CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
- BYPASS_CPU_SHIFT, BYPASS_CPU_WIDTH, 0, &_lock);
- clk_register_clkdev(clk, "cpu_clk", NULL);
-
- /* _____
- * _______________________| |
- * OSC ___/ | MUX |___ DC CLK
- * \___ PLL ___ DC DIV ___| |
- * |_____|
- */
- clk = clk_register_divider(NULL, "dc_clk_div", "pll_clk",
- 0, LS1X_CLK_PLL_DIV, DIV_DC_SHIFT,
- DIV_DC_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock);
- clk_register_clkdev(clk, "dc_clk_div", NULL);
- clk = clk_register_mux(NULL, "dc_clk", dc_parents,
- ARRAY_SIZE(dc_parents),
- CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
- BYPASS_DC_SHIFT, BYPASS_DC_WIDTH, 0, &_lock);
- clk_register_clkdev(clk, "dc_clk", NULL);
-
- /* _____
- * _______________________| |
- * OSC ___/ | MUX |___ DDR CLK
- * \___ PLL ___ DDR DIV ___| |
- * |_____|
- */
- clk = clk_register_divider(NULL, "ahb_clk_div", "pll_clk",
- 0, LS1X_CLK_PLL_DIV, DIV_DDR_SHIFT,
- DIV_DDR_WIDTH, CLK_DIVIDER_ONE_BASED,
- &_lock);
- clk_register_clkdev(clk, "ahb_clk_div", NULL);
- clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
- ARRAY_SIZE(ahb_parents),
- CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
- BYPASS_DDR_SHIFT, BYPASS_DDR_WIDTH, 0, &_lock);
- clk_register_clkdev(clk, "ahb_clk", NULL);
- clk_register_clkdev(clk, "stmmaceth", NULL);
-
- /* clock derived from AHB clk */
- /* APB clk is always half of the AHB clk */
- clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
- DIV_APB);
- clk_register_clkdev(clk, "apb_clk", NULL);
- clk_register_clkdev(clk, "ls1x_i2c", NULL);
- clk_register_clkdev(clk, "ls1x_pwmtimer", NULL);
- clk_register_clkdev(clk, "ls1x_spi", NULL);
- clk_register_clkdev(clk, "ls1x_wdt", NULL);
- clk_register_clkdev(clk, "serial8250", NULL);
-}
diff --git a/drivers/clk/clk-max-gen.c b/drivers/clk/clk-max-gen.c
deleted file mode 100644
index 35af9cb6da4f..000000000000
--- a/drivers/clk/clk-max-gen.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * clk-max-gen.c - Generic clock driver for Maxim PMICs clocks
- *
- * Copyright (C) 2014 Google, Inc
- *
- * Copyright (C) 2012 Samsung Electornics
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver is based on clk-max77686.c
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/regmap.h>
-#include <linux/platform_device.h>
-#include <linux/clk-provider.h>
-#include <linux/mutex.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-#include <linux/export.h>
-
-#include "clk-max-gen.h"
-
-struct max_gen_clk {
- struct regmap *regmap;
- u32 mask;
- u32 reg;
- struct clk_hw hw;
-};
-
-static struct max_gen_clk *to_max_gen_clk(struct clk_hw *hw)
-{
- return container_of(hw, struct max_gen_clk, hw);
-}
-
-static int max_gen_clk_prepare(struct clk_hw *hw)
-{
- struct max_gen_clk *max_gen = to_max_gen_clk(hw);
-
- return regmap_update_bits(max_gen->regmap, max_gen->reg,
- max_gen->mask, max_gen->mask);
-}
-
-static void max_gen_clk_unprepare(struct clk_hw *hw)
-{
- struct max_gen_clk *max_gen = to_max_gen_clk(hw);
-
- regmap_update_bits(max_gen->regmap, max_gen->reg,
- max_gen->mask, ~max_gen->mask);
-}
-
-static int max_gen_clk_is_prepared(struct clk_hw *hw)
-{
- struct max_gen_clk *max_gen = to_max_gen_clk(hw);
- int ret;
- u32 val;
-
- ret = regmap_read(max_gen->regmap, max_gen->reg, &val);
-
- if (ret < 0)
- return -EINVAL;
-
- return val & max_gen->mask;
-}
-
-static unsigned long max_gen_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- return 32768;
-}
-
-struct clk_ops max_gen_clk_ops = {
- .prepare = max_gen_clk_prepare,
- .unprepare = max_gen_clk_unprepare,
- .is_prepared = max_gen_clk_is_prepared,
- .recalc_rate = max_gen_recalc_rate,
-};
-EXPORT_SYMBOL_GPL(max_gen_clk_ops);
-
-static struct clk *max_gen_clk_register(struct device *dev,
- struct max_gen_clk *max_gen)
-{
- struct clk *clk;
- struct clk_hw *hw = &max_gen->hw;
- int ret;
-
- clk = devm_clk_register(dev, hw);
- if (IS_ERR(clk))
- return clk;
-
- ret = clk_register_clkdev(clk, hw->init->name, NULL);
-
- if (ret)
- return ERR_PTR(ret);
-
- return clk;
-}
-
-int max_gen_clk_probe(struct platform_device *pdev, struct regmap *regmap,
- u32 reg, struct clk_init_data *clks_init, int num_init)
-{
- int i, ret;
- struct max_gen_clk *max_gen_clks;
- struct clk **clocks;
- struct device *dev = pdev->dev.parent;
- const char *clk_name;
- struct clk_init_data *init;
-
- clocks = devm_kzalloc(dev, sizeof(struct clk *) * num_init, GFP_KERNEL);
- if (!clocks)
- return -ENOMEM;
-
- max_gen_clks = devm_kzalloc(dev, sizeof(struct max_gen_clk)
- * num_init, GFP_KERNEL);
- if (!max_gen_clks)
- return -ENOMEM;
-
- for (i = 0; i < num_init; i++) {
- max_gen_clks[i].regmap = regmap;
- max_gen_clks[i].mask = 1 << i;
- max_gen_clks[i].reg = reg;
-
- init = devm_kzalloc(dev, sizeof(*init), GFP_KERNEL);
- if (!init)
- return -ENOMEM;
-
- if (dev->of_node &&
- !of_property_read_string_index(dev->of_node,
- "clock-output-names",
- i, &clk_name))
- init->name = clk_name;
- else
- init->name = clks_init[i].name;
-
- init->ops = clks_init[i].ops;
- init->flags = clks_init[i].flags;
-
- max_gen_clks[i].hw.init = init;
-
- clocks[i] = max_gen_clk_register(dev, &max_gen_clks[i]);
- if (IS_ERR(clocks[i])) {
- ret = PTR_ERR(clocks[i]);
- dev_err(dev, "failed to register %s\n",
- max_gen_clks[i].hw.init->name);
- return ret;
- }
- }
-
- platform_set_drvdata(pdev, clocks);
-
- if (dev->of_node) {
- struct clk_onecell_data *of_data;
-
- of_data = devm_kzalloc(dev, sizeof(*of_data), GFP_KERNEL);
- if (!of_data)
- return -ENOMEM;
-
- of_data->clks = clocks;
- of_data->clk_num = num_init;
- ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get,
- of_data);
-
- if (ret) {
- dev_err(dev, "failed to register OF clock provider\n");
- return ret;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(max_gen_clk_probe);
-
-int max_gen_clk_remove(struct platform_device *pdev, int num_init)
-{
- struct device *dev = pdev->dev.parent;
-
- if (dev->of_node)
- of_clk_del_provider(dev->of_node);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(max_gen_clk_remove);
diff --git a/drivers/clk/clk-max-gen.h b/drivers/clk/clk-max-gen.h
deleted file mode 100644
index 997e86fc3f4d..000000000000
--- a/drivers/clk/clk-max-gen.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * clk-max-gen.h - Generic clock driver for Maxim PMICs clocks
- *
- * Copyright (C) 2014 Google, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __CLK_MAX_GEN_H__
-#define __CLK_MAX_GEN_H__
-
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/clkdev.h>
-#include <linux/regmap.h>
-#include <linux/platform_device.h>
-
-int max_gen_clk_probe(struct platform_device *pdev, struct regmap *regmap,
- u32 reg, struct clk_init_data *clks_init, int num_init);
-int max_gen_clk_remove(struct platform_device *pdev, int num_init);
-extern struct clk_ops max_gen_clk_ops;
-
-#endif /* __CLK_MAX_GEN_H__ */
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index 9b6f2772e948..b637f5979023 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -1,5 +1,5 @@
/*
- * clk-max77686.c - Clock driver for Maxim 77686
+ * clk-max77686.c - Clock driver for Maxim 77686/MAX77802
*
* Copyright (C) 2012 Samsung Electornics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
@@ -25,46 +25,284 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/mfd/max77620.h>
#include <linux/mfd/max77686.h>
#include <linux/mfd/max77686-private.h>
#include <linux/clk-provider.h>
#include <linux/mutex.h>
#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
#include <dt-bindings/clock/maxim,max77686.h>
-#include "clk-max-gen.h"
+#include <dt-bindings/clock/maxim,max77802.h>
+#include <dt-bindings/clock/maxim,max77620.h>
-static struct clk_init_data max77686_clks_init[MAX77686_CLKS_NUM] = {
+#define MAX77802_CLOCK_LOW_JITTER_SHIFT 0x3
+
+enum max77686_chip_name {
+ CHIP_MAX77686,
+ CHIP_MAX77802,
+ CHIP_MAX77620,
+};
+
+struct max77686_hw_clk_info {
+ const char *name;
+ u32 clk_reg;
+ u32 clk_enable_mask;
+ u32 flags;
+};
+
+struct max77686_clk_init_data {
+ struct regmap *regmap;
+ struct clk_hw hw;
+ struct clk_init_data clk_idata;
+ const struct max77686_hw_clk_info *clk_info;
+};
+
+struct max77686_clk_driver_data {
+ enum max77686_chip_name chip;
+ struct max77686_clk_init_data *max_clk_data;
+ size_t num_clks;
+};
+
+static const struct
+max77686_hw_clk_info max77686_hw_clks_info[MAX77686_CLKS_NUM] = {
[MAX77686_CLK_AP] = {
.name = "32khz_ap",
- .ops = &max_gen_clk_ops,
+ .clk_reg = MAX77686_REG_32KHZ,
+ .clk_enable_mask = BIT(MAX77686_CLK_AP),
},
[MAX77686_CLK_CP] = {
.name = "32khz_cp",
- .ops = &max_gen_clk_ops,
+ .clk_reg = MAX77686_REG_32KHZ,
+ .clk_enable_mask = BIT(MAX77686_CLK_CP),
},
[MAX77686_CLK_PMIC] = {
.name = "32khz_pmic",
- .ops = &max_gen_clk_ops,
+ .clk_reg = MAX77686_REG_32KHZ,
+ .clk_enable_mask = BIT(MAX77686_CLK_PMIC),
+ },
+};
+
+static const struct
+max77686_hw_clk_info max77802_hw_clks_info[MAX77802_CLKS_NUM] = {
+ [MAX77802_CLK_32K_AP] = {
+ .name = "32khz_ap",
+ .clk_reg = MAX77802_REG_32KHZ,
+ .clk_enable_mask = BIT(MAX77802_CLK_32K_AP),
+ },
+ [MAX77802_CLK_32K_CP] = {
+ .name = "32khz_cp",
+ .clk_reg = MAX77802_REG_32KHZ,
+ .clk_enable_mask = BIT(MAX77802_CLK_32K_CP),
+ },
+};
+
+static const struct
+max77686_hw_clk_info max77620_hw_clks_info[MAX77620_CLKS_NUM] = {
+ [MAX77620_CLK_32K_OUT0] = {
+ .name = "32khz_out0",
+ .clk_reg = MAX77620_REG_CNFG1_32K,
+ .clk_enable_mask = MAX77620_CNFG1_32K_OUT0_EN,
},
};
+static struct max77686_clk_init_data *to_max77686_clk_init_data(
+ struct clk_hw *hw)
+{
+ return container_of(hw, struct max77686_clk_init_data, hw);
+}
+
+static int max77686_clk_prepare(struct clk_hw *hw)
+{
+ struct max77686_clk_init_data *max77686 = to_max77686_clk_init_data(hw);
+
+ return regmap_update_bits(max77686->regmap, max77686->clk_info->clk_reg,
+ max77686->clk_info->clk_enable_mask,
+ max77686->clk_info->clk_enable_mask);
+}
+
+static void max77686_clk_unprepare(struct clk_hw *hw)
+{
+ struct max77686_clk_init_data *max77686 = to_max77686_clk_init_data(hw);
+
+ regmap_update_bits(max77686->regmap, max77686->clk_info->clk_reg,
+ max77686->clk_info->clk_enable_mask,
+ ~max77686->clk_info->clk_enable_mask);
+}
+
+static int max77686_clk_is_prepared(struct clk_hw *hw)
+{
+ struct max77686_clk_init_data *max77686 = to_max77686_clk_init_data(hw);
+ int ret;
+ u32 val;
+
+ ret = regmap_read(max77686->regmap, max77686->clk_info->clk_reg, &val);
+
+ if (ret < 0)
+ return -EINVAL;
+
+ return val & max77686->clk_info->clk_enable_mask;
+}
+
+static unsigned long max77686_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 32768;
+}
+
+static struct clk_ops max77686_clk_ops = {
+ .prepare = max77686_clk_prepare,
+ .unprepare = max77686_clk_unprepare,
+ .is_prepared = max77686_clk_is_prepared,
+ .recalc_rate = max77686_recalc_rate,
+};
+
+static struct clk_hw *
+of_clk_max77686_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct max77686_clk_driver_data *drv_data = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= drv_data->num_clks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &drv_data->max_clk_data[idx].hw;
+}
+
static int max77686_clk_probe(struct platform_device *pdev)
{
- struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct max77686_clk_driver_data *drv_data;
+ const struct max77686_hw_clk_info *hw_clks;
+ struct regmap *regmap;
+ int i, ret, num_clks;
+
+ drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ regmap = dev_get_regmap(parent, NULL);
+ if (!regmap) {
+ dev_err(dev, "Failed to get rtc regmap\n");
+ return -ENODEV;
+ }
+
+ drv_data->chip = id->driver_data;
+
+ switch (drv_data->chip) {
+ case CHIP_MAX77686:
+ num_clks = MAX77686_CLKS_NUM;
+ hw_clks = max77686_hw_clks_info;
+ break;
+
+ case CHIP_MAX77802:
+ num_clks = MAX77802_CLKS_NUM;
+ hw_clks = max77802_hw_clks_info;
+ break;
+
+ case CHIP_MAX77620:
+ num_clks = MAX77620_CLKS_NUM;
+ hw_clks = max77620_hw_clks_info;
+ break;
- return max_gen_clk_probe(pdev, iodev->regmap, MAX77686_REG_32KHZ,
- max77686_clks_init, MAX77686_CLKS_NUM);
+ default:
+ dev_err(dev, "Unknown Chip ID\n");
+ return -EINVAL;
+ }
+
+ drv_data->max_clk_data = devm_kcalloc(dev, num_clks,
+ sizeof(*drv_data->max_clk_data),
+ GFP_KERNEL);
+ if (!drv_data->max_clk_data)
+ return -ENOMEM;
+
+ for (i = 0; i < num_clks; i++) {
+ struct max77686_clk_init_data *max_clk_data;
+ const char *clk_name;
+
+ max_clk_data = &drv_data->max_clk_data[i];
+
+ max_clk_data->regmap = regmap;
+ max_clk_data->clk_info = &hw_clks[i];
+ max_clk_data->clk_idata.flags = hw_clks[i].flags;
+ max_clk_data->clk_idata.ops = &max77686_clk_ops;
+
+ if (parent->of_node &&
+ !of_property_read_string_index(parent->of_node,
+ "clock-output-names",
+ i, &clk_name))
+ max_clk_data->clk_idata.name = clk_name;
+ else
+ max_clk_data->clk_idata.name = hw_clks[i].name;
+
+ max_clk_data->hw.init = &max_clk_data->clk_idata;
+
+ ret = devm_clk_hw_register(dev, &max_clk_data->hw);
+ if (ret) {
+ dev_err(dev, "Failed to clock register: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_hw_register_clkdev(&max_clk_data->hw,
+ max_clk_data->clk_idata.name, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Failed to clkdev register: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (parent->of_node) {
+ ret = of_clk_add_hw_provider(parent->of_node, of_clk_max77686_get,
+ drv_data);
+
+ if (ret < 0) {
+ dev_err(dev, "Failed to register OF clock provider: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /* MAX77802: Enable low-jitter mode on the 32khz clocks. */
+ if (drv_data->chip == CHIP_MAX77802) {
+ ret = regmap_update_bits(regmap, MAX77802_REG_32KHZ,
+ 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT,
+ 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT);
+ if (ret < 0) {
+ dev_err(dev, "Failed to config low-jitter: %d\n", ret);
+ goto remove_of_clk_provider;
+ }
+ }
+
+ return 0;
+
+remove_of_clk_provider:
+ if (parent->of_node)
+ of_clk_del_provider(parent->of_node);
+
+ return ret;
}
static int max77686_clk_remove(struct platform_device *pdev)
{
- return max_gen_clk_remove(pdev, MAX77686_CLKS_NUM);
+ struct device *parent = pdev->dev.parent;
+
+ if (parent->of_node)
+ of_clk_del_provider(parent->of_node);
+
+ return 0;
}
static const struct platform_device_id max77686_clk_id[] = {
- { "max77686-clk", 0},
- { },
+ { "max77686-clk", .driver_data = CHIP_MAX77686, },
+ { "max77802-clk", .driver_data = CHIP_MAX77802, },
+ { "max77620-clock", .driver_data = CHIP_MAX77620, },
+ {},
};
MODULE_DEVICE_TABLE(platform, max77686_clk_id);
diff --git a/drivers/clk/clk-max77802.c b/drivers/clk/clk-max77802.c
deleted file mode 100644
index 355dd2e522c3..000000000000
--- a/drivers/clk/clk-max77802.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * clk-max77802.c - Clock driver for Maxim 77802
- *
- * Copyright (C) 2014 Google, Inc
- *
- * Copyright (C) 2012 Samsung Electornics
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver is based on clk-max77686.c
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/max77686-private.h>
-#include <linux/clk-provider.h>
-#include <linux/mutex.h>
-#include <linux/clkdev.h>
-
-#include <dt-bindings/clock/maxim,max77802.h>
-#include "clk-max-gen.h"
-
-#define MAX77802_CLOCK_OPMODE_MASK 0x1
-#define MAX77802_CLOCK_LOW_JITTER_SHIFT 0x3
-
-static struct clk_init_data max77802_clks_init[MAX77802_CLKS_NUM] = {
- [MAX77802_CLK_32K_AP] = {
- .name = "32khz_ap",
- .ops = &max_gen_clk_ops,
- },
- [MAX77802_CLK_32K_CP] = {
- .name = "32khz_cp",
- .ops = &max_gen_clk_ops,
- },
-};
-
-static int max77802_clk_probe(struct platform_device *pdev)
-{
- struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- int ret;
-
- ret = max_gen_clk_probe(pdev, iodev->regmap, MAX77802_REG_32KHZ,
- max77802_clks_init, MAX77802_CLKS_NUM);
-
- if (ret) {
- dev_err(&pdev->dev, "generic probe failed %d\n", ret);
- return ret;
- }
-
- /* Enable low-jitter mode on the 32khz clocks. */
- ret = regmap_update_bits(iodev->regmap, MAX77802_REG_32KHZ,
- 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT,
- 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT);
- if (ret < 0)
- dev_err(&pdev->dev, "failed to enable low-jitter mode\n");
-
- return ret;
-}
-
-static int max77802_clk_remove(struct platform_device *pdev)
-{
- return max_gen_clk_remove(pdev, MAX77802_CLKS_NUM);
-}
-
-static const struct platform_device_id max77802_clk_id[] = {
- { "max77802-clk", 0},
- { },
-};
-MODULE_DEVICE_TABLE(platform, max77802_clk_id);
-
-static struct platform_driver max77802_clk_driver = {
- .driver = {
- .name = "max77802-clk",
- },
- .probe = max77802_clk_probe,
- .remove = max77802_clk_remove,
- .id_table = max77802_clk_id,
-};
-
-module_platform_driver(max77802_clk_driver);
-
-MODULE_DESCRIPTION("MAXIM 77802 Clock Driver");
-MODULE_AUTHOR("Javier Martinez Canillas <javier@osg.samsung.com");
-MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-mb86s7x.c b/drivers/clk/clk-mb86s7x.c
index e0817754ca3e..2a83a3ff1d09 100644
--- a/drivers/clk/clk-mb86s7x.c
+++ b/drivers/clk/clk-mb86s7x.c
@@ -327,10 +327,11 @@ static struct clk_ops clk_clc_ops = {
.set_rate = clc_set_rate,
};
-struct clk *mb86s7x_clclk_register(struct device *cpu_dev)
+static struct clk_hw *mb86s7x_clclk_register(struct device *cpu_dev)
{
struct clk_init_data init;
struct cl_clk *clc;
+ int ret;
clc = kzalloc(sizeof(*clc), GFP_KERNEL);
if (!clc)
@@ -344,14 +345,17 @@ struct clk *mb86s7x_clclk_register(struct device *cpu_dev)
init.flags = CLK_GET_RATE_NOCACHE;
init.num_parents = 0;
- return devm_clk_register(cpu_dev, &clc->hw);
+ ret = devm_clk_hw_register(cpu_dev, &clc->hw);
+ if (ret)
+ return ERR_PTR(ret);
+ return &clc->hw;
}
static int mb86s7x_clclk_of_init(void)
{
int cpu, ret = -ENODEV;
struct device_node *np;
- struct clk *clk;
+ struct clk_hw *hw;
np = of_find_compatible_node(NULL, NULL, "fujitsu,mb86s70-scb-1.0");
if (!np || !of_device_is_available(np))
@@ -365,12 +369,12 @@ static int mb86s7x_clclk_of_init(void)
continue;
}
- clk = mb86s7x_clclk_register(cpu_dev);
- if (IS_ERR(clk)) {
+ hw = mb86s7x_clclk_register(cpu_dev);
+ if (IS_ERR(hw)) {
pr_err("failed to register cpu%d clock\n", cpu);
continue;
}
- if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
+ if (clk_hw_register_clkdev(hw, NULL, dev_name(cpu_dev))) {
pr_err("failed to register cpu%d clock lookup\n", cpu);
continue;
}
diff --git a/drivers/clk/clk-moxart.c b/drivers/clk/clk-moxart.c
index f37f719643ec..b86dac851116 100644
--- a/drivers/clk/clk-moxart.c
+++ b/drivers/clk/clk-moxart.c
@@ -19,7 +19,8 @@
static void __init moxart_of_pll_clk_init(struct device_node *node)
{
static void __iomem *base;
- struct clk *clk, *ref_clk;
+ struct clk_hw *hw;
+ struct clk *ref_clk;
unsigned int mul;
const char *name = node->name;
const char *parent_name;
@@ -42,14 +43,14 @@ static void __init moxart_of_pll_clk_init(struct device_node *node)
return;
}
- clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mul, 1);
- if (IS_ERR(clk)) {
+ hw = clk_hw_register_fixed_factor(NULL, name, parent_name, 0, mul, 1);
+ if (IS_ERR(hw)) {
pr_err("%s: failed to register clock\n", node->full_name);
return;
}
- clk_register_clkdev(clk, NULL, name);
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ clk_hw_register_clkdev(hw, NULL, name);
+ of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock",
moxart_of_pll_clk_init);
@@ -57,7 +58,8 @@ CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock",
static void __init moxart_of_apb_clk_init(struct device_node *node)
{
static void __iomem *base;
- struct clk *clk, *pll_clk;
+ struct clk_hw *hw;
+ struct clk *pll_clk;
unsigned int div, val;
unsigned int div_idx[] = { 2, 3, 4, 6, 8};
const char *name = node->name;
@@ -85,14 +87,14 @@ static void __init moxart_of_apb_clk_init(struct device_node *node)
return;
}
- clk = clk_register_fixed_factor(NULL, name, parent_name, 0, 1, div);
- if (IS_ERR(clk)) {
+ hw = clk_hw_register_fixed_factor(NULL, name, parent_name, 0, 1, div);
+ if (IS_ERR(hw)) {
pr_err("%s: failed to register clock\n", node->full_name);
return;
}
- clk_register_clkdev(clk, NULL, name);
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ clk_hw_register_clkdev(hw, NULL, name);
+ of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(moxart_apb_clock, "moxa,moxart-apb-clock",
moxart_of_apb_clk_init);
diff --git a/drivers/clk/clk-nspire.c b/drivers/clk/clk-nspire.c
index 64f196a90816..f861011d5d21 100644
--- a/drivers/clk/clk-nspire.c
+++ b/drivers/clk/clk-nspire.c
@@ -69,7 +69,7 @@ static void __init nspire_ahbdiv_setup(struct device_node *node,
{
u32 val;
void __iomem *io;
- struct clk *clk;
+ struct clk_hw *hw;
const char *clk_name = node->name;
const char *parent_name;
struct nspire_clk_info info;
@@ -85,10 +85,10 @@ static void __init nspire_ahbdiv_setup(struct device_node *node,
of_property_read_string(node, "clock-output-names", &clk_name);
parent_name = of_clk_get_parent_name(node, 0);
- clk = clk_register_fixed_factor(NULL, clk_name, parent_name, 0,
- 1, info.base_ahb_ratio);
- if (!IS_ERR(clk))
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ hw = clk_hw_register_fixed_factor(NULL, clk_name, parent_name, 0,
+ 1, info.base_ahb_ratio);
+ if (!IS_ERR(hw))
+ of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
}
static void __init nspire_ahbdiv_setup_cx(struct device_node *node)
@@ -111,7 +111,7 @@ static void __init nspire_clk_setup(struct device_node *node,
{
u32 val;
void __iomem *io;
- struct clk *clk;
+ struct clk_hw *hw;
const char *clk_name = node->name;
struct nspire_clk_info info;
@@ -125,9 +125,10 @@ static void __init nspire_clk_setup(struct device_node *node,
of_property_read_string(node, "clock-output-names", &clk_name);
- clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, info.base_clock);
- if (!IS_ERR(clk))
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ hw = clk_hw_register_fixed_rate(NULL, clk_name, NULL, 0,
+ info.base_clock);
+ if (!IS_ERR(hw))
+ of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
else
return;
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 8328863cb0e0..31f590cea493 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -41,7 +41,6 @@ struct palmas_clk32k_desc {
struct palmas_clock_info {
struct device *dev;
- struct clk *clk;
struct clk_hw hw;
struct palmas *palmas;
const struct palmas_clk32k_desc *clk_desc;
@@ -218,7 +217,7 @@ static int palmas_clks_init_configure(struct palmas_clock_info *cinfo)
}
if (cinfo->ext_control_pin) {
- ret = clk_prepare(cinfo->clk);
+ ret = clk_prepare(cinfo->hw.clk);
if (ret < 0) {
dev_err(cinfo->dev, "Clock prep failed, %d\n", ret);
return ret;
@@ -242,7 +241,6 @@ static int palmas_clks_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
const struct palmas_clks_of_match_data *match_data;
struct palmas_clock_info *cinfo;
- struct clk *clk;
int ret;
match_data = of_device_get_match_data(&pdev->dev);
@@ -261,22 +259,20 @@ static int palmas_clks_probe(struct platform_device *pdev)
cinfo->clk_desc = &match_data->desc;
cinfo->hw.init = &match_data->init;
- clk = devm_clk_register(&pdev->dev, &cinfo->hw);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ ret = devm_clk_hw_register(&pdev->dev, &cinfo->hw);
+ if (ret) {
dev_err(&pdev->dev, "Fail to register clock %s, %d\n",
match_data->desc.clk_name, ret);
return ret;
}
- cinfo->clk = clk;
ret = palmas_clks_init_configure(cinfo);
if (ret < 0) {
dev_err(&pdev->dev, "Clock config failed, %d\n", ret);
return ret;
}
- ret = of_clk_add_provider(node, of_clk_src_simple_get, cinfo->clk);
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &cinfo->hw);
if (ret < 0)
dev_err(&pdev->dev, "Fail to add clock driver, %d\n", ret);
return ret;
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 1630a1f085f7..8cb9d117fdbf 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -61,7 +61,6 @@ static int clk_pwm_probe(struct platform_device *pdev)
struct pwm_device *pwm;
struct pwm_args pargs;
const char *clk_name;
- struct clk *clk;
int ret;
clk_pwm = devm_kzalloc(&pdev->dev, sizeof(*clk_pwm), GFP_KERNEL);
@@ -107,11 +106,11 @@ static int clk_pwm_probe(struct platform_device *pdev)
clk_pwm->pwm = pwm;
clk_pwm->hw.init = &init;
- clk = devm_clk_register(&pdev->dev, &clk_pwm->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = devm_clk_hw_register(&pdev->dev, &clk_pwm->hw);
+ if (ret)
+ return ret;
- return of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ return of_clk_add_hw_provider(node, of_clk_hw_simple_get, &clk_pwm->hw);
}
static int clk_pwm_remove(struct platform_device *pdev)
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 58566a17944a..20b105584f82 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -766,7 +766,11 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
if (!hwc)
return NULL;
- hwc->reg = cg->regs + 0x20 * idx;
+ if (cg->info.flags & CG_VER3)
+ hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
+ else
+ hwc->reg = cg->regs + 0x20 * idx;
+
hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
/*
diff --git a/drivers/clk/clk-rk808.c b/drivers/clk/clk-rk808.c
index 74383039761e..6461f2820a5b 100644
--- a/drivers/clk/clk-rk808.c
+++ b/drivers/clk/clk-rk808.c
@@ -22,11 +22,8 @@
#include <linux/mfd/rk808.h>
#include <linux/i2c.h>
-#define RK808_NR_OUTPUT 2
-
struct rk808_clkout {
struct rk808 *rk808;
- struct clk_onecell_data clk_data;
struct clk_hw clkout1_hw;
struct clk_hw clkout2_hw;
};
@@ -85,14 +82,28 @@ static const struct clk_ops rk808_clkout2_ops = {
.recalc_rate = rk808_clkout_recalc_rate,
};
+static struct clk_hw *
+of_clk_rk808_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct rk808_clkout *rk808_clkout = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= 2) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return idx ? &rk808_clkout->clkout2_hw : &rk808_clkout->clkout1_hw;
+}
+
static int rk808_clkout_probe(struct platform_device *pdev)
{
struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
struct i2c_client *client = rk808->i2c;
struct device_node *node = client->dev.of_node;
struct clk_init_data init = {};
- struct clk **clk_table;
struct rk808_clkout *rk808_clkout;
+ int ret;
rk808_clkout = devm_kzalloc(&client->dev,
sizeof(*rk808_clkout), GFP_KERNEL);
@@ -101,11 +112,6 @@ static int rk808_clkout_probe(struct platform_device *pdev)
rk808_clkout->rk808 = rk808;
- clk_table = devm_kcalloc(&client->dev, RK808_NR_OUTPUT,
- sizeof(struct clk *), GFP_KERNEL);
- if (!clk_table)
- return -ENOMEM;
-
init.parent_names = NULL;
init.num_parents = 0;
init.name = "rk808-clkout1";
@@ -116,10 +122,9 @@ static int rk808_clkout_probe(struct platform_device *pdev)
of_property_read_string_index(node, "clock-output-names",
0, &init.name);
- clk_table[0] = devm_clk_register(&client->dev,
- &rk808_clkout->clkout1_hw);
- if (IS_ERR(clk_table[0]))
- return PTR_ERR(clk_table[0]);
+ ret = devm_clk_hw_register(&client->dev, &rk808_clkout->clkout1_hw);
+ if (ret)
+ return ret;
init.name = "rk808-clkout2";
init.ops = &rk808_clkout2_ops;
@@ -129,16 +134,11 @@ static int rk808_clkout_probe(struct platform_device *pdev)
of_property_read_string_index(node, "clock-output-names",
1, &init.name);
- clk_table[1] = devm_clk_register(&client->dev,
- &rk808_clkout->clkout2_hw);
- if (IS_ERR(clk_table[1]))
- return PTR_ERR(clk_table[1]);
-
- rk808_clkout->clk_data.clks = clk_table;
- rk808_clkout->clk_data.clk_num = RK808_NR_OUTPUT;
+ ret = devm_clk_hw_register(&client->dev, &rk808_clkout->clkout2_hw);
+ if (ret)
+ return ret;
- return of_clk_add_provider(node, of_clk_src_onecell_get,
- &rk808_clkout->clk_data);
+ return of_clk_add_hw_provider(node, of_clk_rk808_get, rk808_clkout);
}
static int rk808_clkout_remove(struct platform_device *pdev)
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 6962ee5d1e9a..2a3e9d8e88b0 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -146,13 +146,13 @@ static const struct of_device_id scpi_clk_match[] = {
{}
};
-static struct clk *
+static int
scpi_clk_ops_init(struct device *dev, const struct of_device_id *match,
struct scpi_clk *sclk, const char *name)
{
struct clk_init_data init;
- struct clk *clk;
unsigned long min = 0, max = 0;
+ int ret;
init.name = name;
init.flags = 0;
@@ -164,18 +164,18 @@ scpi_clk_ops_init(struct device *dev, const struct of_device_id *match,
if (init.ops == &scpi_dvfs_ops) {
sclk->info = sclk->scpi_ops->dvfs_get_info(sclk->id);
if (IS_ERR(sclk->info))
- return NULL;
+ return PTR_ERR(sclk->info);
} else if (init.ops == &scpi_clk_ops) {
if (sclk->scpi_ops->clk_get_range(sclk->id, &min, &max) || !max)
- return NULL;
+ return -EINVAL;
} else {
- return NULL;
+ return -EINVAL;
}
- clk = devm_clk_register(dev, &sclk->hw);
- if (!IS_ERR(clk) && max)
+ ret = devm_clk_hw_register(dev, &sclk->hw);
+ if (!ret && max)
clk_hw_set_rate_range(&sclk->hw, min, max);
- return clk;
+ return ret;
}
struct scpi_clk_data {
@@ -183,7 +183,7 @@ struct scpi_clk_data {
unsigned int clk_num;
};
-static struct clk *
+static struct clk_hw *
scpi_of_clk_src_get(struct of_phandle_args *clkspec, void *data)
{
struct scpi_clk *sclk;
@@ -193,7 +193,7 @@ scpi_of_clk_src_get(struct of_phandle_args *clkspec, void *data)
for (count = 0; count < clk_data->clk_num; count++) {
sclk = clk_data->clk[count];
if (idx == sclk->id)
- return sclk->hw.clk;
+ return &sclk->hw;
}
return ERR_PTR(-EINVAL);
@@ -202,8 +202,7 @@ scpi_of_clk_src_get(struct of_phandle_args *clkspec, void *data)
static int scpi_clk_add(struct device *dev, struct device_node *np,
const struct of_device_id *match)
{
- struct clk **clks;
- int idx, count;
+ int idx, count, err;
struct scpi_clk_data *clk_data;
count = of_property_count_strings(np, "clock-output-names");
@@ -222,10 +221,6 @@ static int scpi_clk_add(struct device *dev, struct device_node *np,
if (!clk_data->clk)
return -ENOMEM;
- clks = devm_kcalloc(dev, count, sizeof(*clks), GFP_KERNEL);
- if (!clks)
- return -ENOMEM;
-
for (idx = 0; idx < count; idx++) {
struct scpi_clk *sclk;
const char *name;
@@ -249,15 +244,15 @@ static int scpi_clk_add(struct device *dev, struct device_node *np,
sclk->id = val;
- clks[idx] = scpi_clk_ops_init(dev, match, sclk, name);
- if (IS_ERR_OR_NULL(clks[idx]))
+ err = scpi_clk_ops_init(dev, match, sclk, name);
+ if (err)
dev_err(dev, "failed to register clock '%s'\n", name);
else
dev_dbg(dev, "Registered clock '%s'\n", name);
clk_data->clk[idx] = sclk;
}
- return of_clk_add_provider(np, scpi_of_clk_src_get, clk_data);
+ return of_clk_add_hw_provider(np, scpi_of_clk_src_get, clk_data);
}
static int scpi_clocks_remove(struct platform_device *pdev)
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index ceef25b0990b..09b6718956bd 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -305,7 +305,6 @@ static int si514_probe(struct i2c_client *client,
{
struct clk_si514 *data;
struct clk_init_data init;
- struct clk *clk;
int err;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
@@ -330,13 +329,13 @@ static int si514_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
- clk = devm_clk_register(&client->dev, &data->hw);
- if (IS_ERR(clk)) {
+ err = devm_clk_hw_register(&client->dev, &data->hw);
+ if (err) {
dev_err(&client->dev, "clock registration failed\n");
- return PTR_ERR(clk);
+ return err;
}
- err = of_clk_add_provider(client->dev.of_node, of_clk_src_simple_get,
- clk);
+ err = of_clk_add_hw_provider(client->dev.of_node, of_clk_hw_simple_get,
+ &data->hw);
if (err) {
dev_err(&client->dev, "unable to add clk provider\n");
return err;
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index b1bc12c045d3..b051db43fae1 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -54,7 +54,6 @@ struct si5351_driver_data {
enum si5351_variant variant;
struct i2c_client *client;
struct regmap *regmap;
- struct clk_onecell_data onecell;
struct clk *pxtal;
const char *pxtal_name;
@@ -66,6 +65,7 @@ struct si5351_driver_data {
struct si5351_hw_data pll[2];
struct si5351_hw_data *msynth;
struct si5351_hw_data *clkout;
+ size_t num_clkout;
};
static const char * const si5351_input_names[] = {
@@ -1307,11 +1307,31 @@ put_child:
of_node_put(child);
return -EINVAL;
}
+
+static struct clk_hw *
+si53351_of_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct si5351_driver_data *drvdata = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= drvdata->num_clkout) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &drvdata->clkout[idx].hw;
+}
#else
static int si5351_dt_parse(struct i2c_client *client, enum si5351_variant variant)
{
return 0;
}
+
+static struct clk_hw *
+si53351_of_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ return NULL;
+}
#endif /* CONFIG_OF */
static int si5351_i2c_probe(struct i2c_client *client,
@@ -1321,7 +1341,6 @@ static int si5351_i2c_probe(struct i2c_client *client,
struct si5351_platform_data *pdata;
struct si5351_driver_data *drvdata;
struct clk_init_data init;
- struct clk *clk;
const char *parent_names[4];
u8 num_parents, num_clocks;
int ret, n;
@@ -1438,10 +1457,9 @@ static int si5351_i2c_probe(struct i2c_client *client,
init.num_parents = 1;
}
drvdata->xtal.init = &init;
- clk = devm_clk_register(&client->dev, &drvdata->xtal);
- if (IS_ERR(clk)) {
+ ret = devm_clk_hw_register(&client->dev, &drvdata->xtal);
+ if (ret) {
dev_err(&client->dev, "unable to register %s\n", init.name);
- ret = PTR_ERR(clk);
goto err_clk;
}
@@ -1456,11 +1474,10 @@ static int si5351_i2c_probe(struct i2c_client *client,
init.num_parents = 1;
}
drvdata->clkin.init = &init;
- clk = devm_clk_register(&client->dev, &drvdata->clkin);
- if (IS_ERR(clk)) {
+ ret = devm_clk_hw_register(&client->dev, &drvdata->clkin);
+ if (ret) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
- ret = PTR_ERR(clk);
goto err_clk;
}
}
@@ -1480,10 +1497,9 @@ static int si5351_i2c_probe(struct i2c_client *client,
init.flags = 0;
init.parent_names = parent_names;
init.num_parents = num_parents;
- clk = devm_clk_register(&client->dev, &drvdata->pll[0].hw);
- if (IS_ERR(clk)) {
+ ret = devm_clk_hw_register(&client->dev, &drvdata->pll[0].hw);
+ if (ret) {
dev_err(&client->dev, "unable to register %s\n", init.name);
- ret = PTR_ERR(clk);
goto err_clk;
}
@@ -1505,10 +1521,9 @@ static int si5351_i2c_probe(struct i2c_client *client,
init.parent_names = parent_names;
init.num_parents = num_parents;
}
- clk = devm_clk_register(&client->dev, &drvdata->pll[1].hw);
- if (IS_ERR(clk)) {
+ ret = devm_clk_hw_register(&client->dev, &drvdata->pll[1].hw);
+ if (ret) {
dev_err(&client->dev, "unable to register %s\n", init.name);
- ret = PTR_ERR(clk);
goto err_clk;
}
@@ -1524,13 +1539,9 @@ static int si5351_i2c_probe(struct i2c_client *client,
sizeof(*drvdata->msynth), GFP_KERNEL);
drvdata->clkout = devm_kzalloc(&client->dev, num_clocks *
sizeof(*drvdata->clkout), GFP_KERNEL);
+ drvdata->num_clkout = num_clocks;
- drvdata->onecell.clk_num = num_clocks;
- drvdata->onecell.clks = devm_kzalloc(&client->dev,
- num_clocks * sizeof(*drvdata->onecell.clks), GFP_KERNEL);
-
- if (WARN_ON(!drvdata->msynth || !drvdata->clkout ||
- !drvdata->onecell.clks)) {
+ if (WARN_ON(!drvdata->msynth || !drvdata->clkout)) {
ret = -ENOMEM;
goto err_clk;
}
@@ -1547,11 +1558,11 @@ static int si5351_i2c_probe(struct i2c_client *client,
init.flags |= CLK_SET_RATE_PARENT;
init.parent_names = parent_names;
init.num_parents = 2;
- clk = devm_clk_register(&client->dev, &drvdata->msynth[n].hw);
- if (IS_ERR(clk)) {
+ ret = devm_clk_hw_register(&client->dev,
+ &drvdata->msynth[n].hw);
+ if (ret) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
- ret = PTR_ERR(clk);
goto err_clk;
}
}
@@ -1575,19 +1586,19 @@ static int si5351_i2c_probe(struct i2c_client *client,
init.flags |= CLK_SET_RATE_PARENT;
init.parent_names = parent_names;
init.num_parents = num_parents;
- clk = devm_clk_register(&client->dev, &drvdata->clkout[n].hw);
- if (IS_ERR(clk)) {
+ ret = devm_clk_hw_register(&client->dev,
+ &drvdata->clkout[n].hw);
+ if (ret) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
- ret = PTR_ERR(clk);
goto err_clk;
}
- drvdata->onecell.clks[n] = clk;
/* set initial clkout rate */
if (pdata->clkout[n].rate != 0) {
int ret;
- ret = clk_set_rate(clk, pdata->clkout[n].rate);
+ ret = clk_set_rate(drvdata->clkout[n].hw.clk,
+ pdata->clkout[n].rate);
if (ret != 0) {
dev_err(&client->dev, "Cannot set rate : %d\n",
ret);
@@ -1595,8 +1606,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
}
}
- ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
- &drvdata->onecell);
+ ret = of_clk_add_hw_provider(client->dev.of_node, si53351_of_clk_get,
+ drvdata);
if (ret) {
dev_err(&client->dev, "unable to add clk provider\n");
goto err_clk;
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index d56648521a95..646af1d1898d 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -408,7 +408,6 @@ static int si570_probe(struct i2c_client *client,
{
struct clk_si570 *data;
struct clk_init_data init;
- struct clk *clk;
u32 initial_fout, factory_fout, stability;
int err;
enum clk_si570_variant variant = id->driver_data;
@@ -462,13 +461,13 @@ static int si570_probe(struct i2c_client *client,
if (err)
return err;
- clk = devm_clk_register(&client->dev, &data->hw);
- if (IS_ERR(clk)) {
+ err = devm_clk_hw_register(&client->dev, &data->hw);
+ if (err) {
dev_err(&client->dev, "clock registration failed\n");
- return PTR_ERR(clk);
+ return err;
}
- err = of_clk_add_provider(client->dev.of_node, of_clk_src_simple_get,
- clk);
+ err = of_clk_add_hw_provider(client->dev.of_node, of_clk_hw_simple_get,
+ &data->hw);
if (err) {
dev_err(&client->dev, "unable to add clk provider\n");
return err;
@@ -477,7 +476,7 @@ static int si570_probe(struct i2c_client *client,
/* Read the requested initial output frequency from device tree */
if (!of_property_read_u32(client->dev.of_node, "clock-frequency",
&initial_fout)) {
- err = clk_set_rate(clk, initial_fout);
+ err = clk_set_rate(data->hw.clk, initial_fout);
if (err) {
of_clk_del_provider(client->dev.of_node);
return err;
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index 697c66757400..7b222a5db931 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -26,60 +26,73 @@
#include <linux/mfd/twl6040.h>
#include <linux/clk-provider.h>
-struct twl6040_clk {
+struct twl6040_pdmclk {
struct twl6040 *twl6040;
struct device *dev;
- struct clk_hw mcpdm_fclk;
- struct clk *clk;
+ struct clk_hw pdmclk_hw;
int enabled;
};
-static int twl6040_bitclk_is_enabled(struct clk_hw *hw)
+static int twl6040_pdmclk_is_prepared(struct clk_hw *hw)
{
- struct twl6040_clk *twl6040_clk = container_of(hw, struct twl6040_clk,
- mcpdm_fclk);
- return twl6040_clk->enabled;
+ struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
+ pdmclk_hw);
+
+ return pdmclk->enabled;
}
-static int twl6040_bitclk_prepare(struct clk_hw *hw)
+static int twl6040_pdmclk_prepare(struct clk_hw *hw)
{
- struct twl6040_clk *twl6040_clk = container_of(hw, struct twl6040_clk,
- mcpdm_fclk);
+ struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
+ pdmclk_hw);
int ret;
- ret = twl6040_power(twl6040_clk->twl6040, 1);
+ ret = twl6040_power(pdmclk->twl6040, 1);
if (!ret)
- twl6040_clk->enabled = 1;
+ pdmclk->enabled = 1;
return ret;
}
-static void twl6040_bitclk_unprepare(struct clk_hw *hw)
+static void twl6040_pdmclk_unprepare(struct clk_hw *hw)
{
- struct twl6040_clk *twl6040_clk = container_of(hw, struct twl6040_clk,
- mcpdm_fclk);
+ struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
+ pdmclk_hw);
int ret;
- ret = twl6040_power(twl6040_clk->twl6040, 0);
+ ret = twl6040_power(pdmclk->twl6040, 0);
if (!ret)
- twl6040_clk->enabled = 0;
+ pdmclk->enabled = 0;
+
}
-static const struct clk_ops twl6040_mcpdm_ops = {
- .is_enabled = twl6040_bitclk_is_enabled,
- .prepare = twl6040_bitclk_prepare,
- .unprepare = twl6040_bitclk_unprepare,
+static unsigned long twl6040_pdmclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
+ pdmclk_hw);
+
+ return twl6040_get_sysclk(pdmclk->twl6040);
+}
+
+static const struct clk_ops twl6040_pdmclk_ops = {
+ .is_prepared = twl6040_pdmclk_is_prepared,
+ .prepare = twl6040_pdmclk_prepare,
+ .unprepare = twl6040_pdmclk_unprepare,
+ .recalc_rate = twl6040_pdmclk_recalc_rate,
};
-static struct clk_init_data wm831x_clkout_init = {
- .name = "mcpdm_fclk",
- .ops = &twl6040_mcpdm_ops,
+static struct clk_init_data twl6040_pdmclk_init = {
+ .name = "pdmclk",
+ .ops = &twl6040_pdmclk_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
};
-static int twl6040_clk_probe(struct platform_device *pdev)
+static int twl6040_pdmclk_probe(struct platform_device *pdev)
{
struct twl6040 *twl6040 = dev_get_drvdata(pdev->dev.parent);
- struct twl6040_clk *clkdata;
+ struct twl6040_pdmclk *clkdata;
+ int ret;
clkdata = devm_kzalloc(&pdev->dev, sizeof(*clkdata), GFP_KERNEL);
if (!clkdata)
@@ -88,26 +101,28 @@ static int twl6040_clk_probe(struct platform_device *pdev)
clkdata->dev = &pdev->dev;
clkdata->twl6040 = twl6040;
- clkdata->mcpdm_fclk.init = &wm831x_clkout_init;
- clkdata->clk = devm_clk_register(&pdev->dev, &clkdata->mcpdm_fclk);
- if (IS_ERR(clkdata->clk))
- return PTR_ERR(clkdata->clk);
+ clkdata->pdmclk_hw.init = &twl6040_pdmclk_init;
+ ret = devm_clk_hw_register(&pdev->dev, &clkdata->pdmclk_hw);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, clkdata);
- return 0;
+ return of_clk_add_hw_provider(pdev->dev.parent->of_node,
+ of_clk_hw_simple_get,
+ &clkdata->pdmclk_hw);
}
-static struct platform_driver twl6040_clk_driver = {
+static struct platform_driver twl6040_pdmclk_driver = {
.driver = {
- .name = "twl6040-clk",
+ .name = "twl6040-pdmclk",
},
- .probe = twl6040_clk_probe,
+ .probe = twl6040_pdmclk_probe,
};
-module_platform_driver(twl6040_clk_driver);
+module_platform_driver(twl6040_pdmclk_driver);
MODULE_DESCRIPTION("TWL6040 clock driver for McPDM functional clock");
MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
-MODULE_ALIAS("platform:twl6040-clk");
+MODULE_ALIAS("platform:twl6040-pdmclk");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 37368a399ff9..4161a6f25741 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -232,7 +232,7 @@ static const struct clk_ops vt8500_gated_divisor_clk_ops = {
static __init void vtwm_device_clk_init(struct device_node *node)
{
u32 en_reg, div_reg;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_device *dev_clk;
const char *clk_name = node->name;
const char *parent_name;
@@ -301,13 +301,14 @@ static __init void vtwm_device_clk_init(struct device_node *node)
dev_clk->hw.init = &init;
- clk = clk_register(NULL, &dev_clk->hw);
- if (WARN_ON(IS_ERR(clk))) {
+ hw = &dev_clk->hw;
+ rc = clk_hw_register(NULL, hw);
+ if (WARN_ON(rc)) {
kfree(dev_clk);
return;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
+ rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
+ clk_hw_register_clkdev(hw, clk_name, NULL);
}
CLK_OF_DECLARE(vt8500_device, "via,vt8500-device-clock", vtwm_device_clk_init);
@@ -681,7 +682,7 @@ static const struct clk_ops vtwm_pll_ops = {
static __init void vtwm_pll_clk_init(struct device_node *node, int pll_type)
{
u32 reg;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_pll *pll_clk;
const char *clk_name = node->name;
const char *parent_name;
@@ -714,13 +715,14 @@ static __init void vtwm_pll_clk_init(struct device_node *node, int pll_type)
pll_clk->hw.init = &init;
- clk = clk_register(NULL, &pll_clk->hw);
- if (WARN_ON(IS_ERR(clk))) {
+ hw = &pll_clk->hw;
+ rc = clk_hw_register(NULL, &pll_clk->hw);
+ if (WARN_ON(rc)) {
kfree(pll_clk);
return;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
+ rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
+ clk_hw_register_clkdev(hw, clk_name, NULL);
}
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 88def4b2761c..f4fdac55727c 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -24,9 +24,6 @@ struct wm831x_clk {
struct clk_hw xtal_hw;
struct clk_hw fll_hw;
struct clk_hw clkout_hw;
- struct clk *xtal;
- struct clk *fll;
- struct clk *clkout;
bool xtal_ena;
};
@@ -370,19 +367,19 @@ static int wm831x_clk_probe(struct platform_device *pdev)
clkdata->xtal_ena = ret & WM831X_XTAL_ENA;
clkdata->xtal_hw.init = &wm831x_xtal_init;
- clkdata->xtal = devm_clk_register(&pdev->dev, &clkdata->xtal_hw);
- if (IS_ERR(clkdata->xtal))
- return PTR_ERR(clkdata->xtal);
+ ret = devm_clk_hw_register(&pdev->dev, &clkdata->xtal_hw);
+ if (ret)
+ return ret;
clkdata->fll_hw.init = &wm831x_fll_init;
- clkdata->fll = devm_clk_register(&pdev->dev, &clkdata->fll_hw);
- if (IS_ERR(clkdata->fll))
- return PTR_ERR(clkdata->fll);
+ ret = devm_clk_hw_register(&pdev->dev, &clkdata->fll_hw);
+ if (ret)
+ return ret;
clkdata->clkout_hw.init = &wm831x_clkout_init;
- clkdata->clkout = devm_clk_register(&pdev->dev, &clkdata->clkout_hw);
- if (IS_ERR(clkdata->clkout))
- return PTR_ERR(clkdata->clkout);
+ ret = devm_clk_hw_register(&pdev->dev, &clkdata->clkout_hw);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, clkdata);
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 343313250c58..5daddf5ecc4b 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -217,6 +217,226 @@ static void xgene_pcppllclk_init(struct device_node *np)
xgene_pllclk_init(np, PLL_TYPE_PCP);
}
+/**
+ * struct xgene_clk_pmd - PMD clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register containing the fractional scale multiplier (scaler)
+ * @shift: shift to the unit bit field
+ * @denom: 1/denominator unit
+ * @lock: register lock
+ * Flags:
+ * XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
+ * from the register plus one. For example,
+ * 0 for (0 + 1) / denom,
+ * 1 for (1 + 1) / denom and etc.
+ * If this flag is set, it is
+ * 0 for (denom - 0) / denom,
+ * 1 for (denom - 1) / denom and etc.
+ *
+ */
+struct xgene_clk_pmd {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 shift;
+ u32 mask;
+ u64 denom;
+ u32 flags;
+ spinlock_t *lock;
+};
+
+#define to_xgene_clk_pmd(_hw) container_of(_hw, struct xgene_clk_pmd, hw)
+
+#define XGENE_CLK_PMD_SCALE_INVERTED BIT(0)
+#define XGENE_CLK_PMD_SHIFT 8
+#define XGENE_CLK_PMD_WIDTH 3
+
+static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
+ unsigned long flags = 0;
+ u64 ret, scale;
+ u32 val;
+
+ if (fd->lock)
+ spin_lock_irqsave(fd->lock, flags);
+ else
+ __acquire(fd->lock);
+
+ val = clk_readl(fd->reg);
+
+ if (fd->lock)
+ spin_unlock_irqrestore(fd->lock, flags);
+ else
+ __release(fd->lock);
+
+ ret = (u64)parent_rate;
+
+ scale = (val & fd->mask) >> fd->shift;
+ if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
+ scale = fd->denom - scale;
+ else
+ scale++;
+
+ /* freq = parent_rate * scaler / denom */
+ do_div(ret, fd->denom);
+ ret *= scale;
+ if (ret == 0)
+ ret = (u64)parent_rate;
+
+ return ret;
+}
+
+static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
+ u64 ret, scale;
+
+ if (!rate || rate >= *parent_rate)
+ return *parent_rate;
+
+ /* freq = parent_rate * scaler / denom */
+ ret = rate * fd->denom;
+ scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
+
+ ret = (u64)*parent_rate * scale;
+ do_div(ret, fd->denom);
+
+ return ret;
+}
+
+static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
+ unsigned long flags = 0;
+ u64 scale, ret;
+ u32 val;
+
+ /*
+ * Compute the scaler:
+ *
+ * freq = parent_rate * scaler / denom, or
+ * scaler = freq * denom / parent_rate
+ */
+ ret = rate * fd->denom;
+ scale = DIV_ROUND_UP_ULL(ret, (u64)parent_rate);
+
+ /* Check if inverted */
+ if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
+ scale = fd->denom - scale;
+ else
+ scale--;
+
+ if (fd->lock)
+ spin_lock_irqsave(fd->lock, flags);
+ else
+ __acquire(fd->lock);
+
+ val = clk_readl(fd->reg);
+ val &= ~fd->mask;
+ val |= (scale << fd->shift);
+ clk_writel(val, fd->reg);
+
+ if (fd->lock)
+ spin_unlock_irqrestore(fd->lock, flags);
+ else
+ __release(fd->lock);
+
+ return 0;
+}
+
+static const struct clk_ops xgene_clk_pmd_ops = {
+ .recalc_rate = xgene_clk_pmd_recalc_rate,
+ .round_rate = xgene_clk_pmd_round_rate,
+ .set_rate = xgene_clk_pmd_set_rate,
+};
+
+static struct clk *
+xgene_register_clk_pmd(struct device *dev,
+ const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg, u8 shift,
+ u8 width, u64 denom, u32 clk_flags, spinlock_t *lock)
+{
+ struct xgene_clk_pmd *fd;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+ if (!fd)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &xgene_clk_pmd_ops;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ fd->reg = reg;
+ fd->shift = shift;
+ fd->mask = (BIT(width) - 1) << shift;
+ fd->denom = denom;
+ fd->flags = clk_flags;
+ fd->lock = lock;
+ fd->hw.init = &init;
+
+ clk = clk_register(dev, &fd->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: could not register clk %s\n", __func__, name);
+ kfree(fd);
+ return NULL;
+ }
+
+ return clk;
+}
+
+static void xgene_pmdclk_init(struct device_node *np)
+{
+ const char *clk_name = np->full_name;
+ void __iomem *csr_reg;
+ struct resource res;
+ struct clk *clk;
+ u64 denom;
+ u32 flags = 0;
+ int rc;
+
+ /* Check if the entry is disabled */
+ if (!of_device_is_available(np))
+ return;
+
+ /* Parse the DTS register for resource */
+ rc = of_address_to_resource(np, 0, &res);
+ if (rc != 0) {
+ pr_err("no DTS register for %s\n", np->full_name);
+ return;
+ }
+ csr_reg = of_iomap(np, 0);
+ if (!csr_reg) {
+ pr_err("Unable to map resource for %s\n", np->full_name);
+ return;
+ }
+ of_property_read_string(np, "clock-output-names", &clk_name);
+
+ denom = BIT(XGENE_CLK_PMD_WIDTH);
+ flags |= XGENE_CLK_PMD_SCALE_INVERTED;
+
+ clk = xgene_register_clk_pmd(NULL, clk_name,
+ of_clk_get_parent_name(np, 0), 0,
+ csr_reg, XGENE_CLK_PMD_SHIFT,
+ XGENE_CLK_PMD_WIDTH, denom,
+ flags, &clk_lock);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+ pr_debug("Add %s clock\n", clk_name);
+ } else {
+ if (csr_reg)
+ iounmap(csr_reg);
+ }
+}
+
/* IP Clock */
struct xgene_dev_parameters {
void __iomem *csr_reg; /* CSR for IP clock */
@@ -543,6 +763,7 @@ err:
CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
+CLK_OF_DECLARE(xgene_pmd_clock, "apm,xgene-pmd-clock", xgene_pmdclk_init);
CLK_OF_DECLARE(xgene_socpll_v2_clock, "apm,xgene-socpll-v2-clock",
xgene_socpllclk_init);
CLK_OF_DECLARE(xgene_pcppll_v2_clock, "apm,xgene-pcppll-v2-clock",
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 820a939fb6bb..0fb39fe217d1 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1908,10 +1908,6 @@ int clk_set_phase(struct clk *clk, int degrees)
clk_prepare_lock();
- /* bail early if nothing to do */
- if (degrees == clk->core->phase)
- goto out;
-
trace_clk_set_phase(clk->core, degrees);
if (clk->core->ops->set_phase)
@@ -1922,7 +1918,6 @@ int clk_set_phase(struct clk *clk, int degrees)
if (!ret)
clk->core->phase = degrees;
-out:
clk_prepare_unlock();
return ret;
@@ -2449,8 +2444,16 @@ static int __clk_core_init(struct clk_core *core)
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
struct clk_core *parent = __clk_init_parent(orphan);
- if (parent)
- clk_core_reparent(orphan, parent);
+ /*
+ * we could call __clk_set_parent, but that would result in a
+ * redundant call to the .set_rate op, if it exists
+ */
+ if (parent) {
+ __clk_set_parent_before(orphan, parent);
+ __clk_set_parent_after(orphan, parent, NULL);
+ __clk_recalc_accuracies(orphan);
+ __clk_recalc_rates(orphan, 0);
+ }
}
/*
@@ -2491,7 +2494,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
/* This is to allow this function to be chained to others */
if (IS_ERR_OR_NULL(hw))
- return (struct clk *) hw;
+ return ERR_CAST(hw);
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk)
@@ -3166,19 +3169,14 @@ __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
struct of_phandle_args *clkspec)
{
struct clk *clk;
- struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
- if (provider->get_hw) {
- hw = provider->get_hw(clkspec, provider->data);
- } else if (provider->get) {
- clk = provider->get(clkspec, provider->data);
- if (!IS_ERR(clk))
- hw = __clk_get_hw(clk);
- else
- hw = ERR_CAST(clk);
- }
+ if (provider->get_hw)
+ return provider->get_hw(clkspec, provider->data);
- return hw;
+ clk = provider->get(clkspec, provider->data);
+ if (IS_ERR(clk))
+ return ERR_CAST(clk);
+ return __clk_get_hw(clk);
}
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
@@ -3186,7 +3184,7 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
{
struct of_clk_provider *provider;
struct clk *clk = ERR_PTR(-EPROBE_DEFER);
- struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
+ struct clk_hw *hw;
if (!clkspec)
return ERR_PTR(-EINVAL);
@@ -3194,12 +3192,13 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
/* Check if we have such a provider in our array */
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
- if (provider->node == clkspec->np)
+ if (provider->node == clkspec->np) {
hw = __of_clk_get_hw_from_provider(provider, clkspec);
- if (!IS_ERR(hw)) {
clk = __clk_create_clk(hw, dev_id, con_id);
+ }
- if (!IS_ERR(clk) && !__clk_get(clk)) {
+ if (!IS_ERR(clk)) {
+ if (!__clk_get(clk)) {
__clk_free_clk(clk);
clk = ERR_PTR(-ENOENT);
}
@@ -3451,6 +3450,10 @@ void __init of_clk_init(const struct of_device_id *matches)
&clk_provider_list, node) {
if (force || parent_ready(clk_provider->np)) {
+ /* Don't populate platform devices */
+ of_node_set_flag(clk_provider->np,
+ OF_POPULATED);
+
clk_provider->clk_init_cb(clk_provider->np);
of_clk_set_defaults(clk_provider->np, true);
diff --git a/drivers/clk/h8300/clk-div.c b/drivers/clk/h8300/clk-div.c
index 4bf44a25d950..715b882205a8 100644
--- a/drivers/clk/h8300/clk-div.c
+++ b/drivers/clk/h8300/clk-div.c
@@ -14,7 +14,7 @@ static DEFINE_SPINLOCK(clklock);
static void __init h8300_div_clk_setup(struct device_node *node)
{
unsigned int num_parents;
- struct clk *clk;
+ struct clk_hw *hw;
const char *clk_name = node->name;
const char *parent_name;
void __iomem *divcr = NULL;
@@ -38,15 +38,15 @@ static void __init h8300_div_clk_setup(struct device_node *node)
parent_name = of_clk_get_parent_name(node, 0);
of_property_read_u32(node, "renesas,width", &width);
- clk = clk_register_divider(NULL, clk_name, parent_name,
+ hw = clk_hw_register_divider(NULL, clk_name, parent_name,
CLK_SET_RATE_GATE, divcr, offset, width,
CLK_DIVIDER_POWER_OF_TWO, &clklock);
- if (!IS_ERR(clk)) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (!IS_ERR(hw)) {
+ of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
return;
}
pr_err("%s: failed to register %s div clock (%ld)\n",
- __func__, clk_name, PTR_ERR(clk));
+ __func__, clk_name, PTR_ERR(hw));
error:
if (divcr)
iounmap(divcr);
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index c9c2fd575ef7..a26312460621 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -84,11 +84,11 @@ static const struct clk_ops pll_ops = {
static void __init h8s2678_pll_clk_setup(struct device_node *node)
{
unsigned int num_parents;
- struct clk *clk;
const char *clk_name = node->name;
const char *parent_name;
struct pll_clock *pll_clock;
struct clk_init_data init;
+ int ret;
num_parents = of_clk_get_parent_count(node);
if (!num_parents) {
@@ -121,14 +121,14 @@ static void __init h8s2678_pll_clk_setup(struct device_node *node)
init.num_parents = 1;
pll_clock->hw.init = &init;
- clk = clk_register(NULL, &pll_clock->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register %s div clock (%ld)\n",
- __func__, clk_name, PTR_ERR(clk));
+ ret = clk_hw_register(NULL, &pll_clock->hw);
+ if (ret) {
+ pr_err("%s: failed to register %s div clock (%d)\n",
+ __func__, clk_name, ret);
goto unmap_pllcr;
}
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clock->hw);
return;
unmap_pllcr:
diff --git a/drivers/clk/imx/clk-imx1.c b/drivers/clk/imx/clk-imx1.c
index 99cf802fa51f..eaa462ad09e8 100644
--- a/drivers/clk/imx/clk-imx1.c
+++ b/drivers/clk/imx/clk-imx1.c
@@ -45,10 +45,13 @@ static void __iomem *ccm __initdata;
#define CCM_PCDR (ccm + 0x0020)
#define SCM_GCCR (ccm + 0x0810)
-static void __init _mx1_clocks_init(unsigned long fref)
+static void __init mx1_clocks_init_dt(struct device_node *np)
{
+ ccm = of_iomap(np, 0);
+ BUG_ON(!ccm);
+
clk[IMX1_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
- clk[IMX1_CLK_CLK32] = imx_obtain_fixed_clock("clk32", fref);
+ clk[IMX1_CLK_CLK32] = imx_obtain_fixed_clock("clk32", 32768);
clk[IMX1_CLK_CLK16M_EXT] = imx_clk_fixed("clk16m_ext", 16000000);
clk[IMX1_CLK_CLK16M] = imx_clk_gate("clk16m", "clk16m_ext", CCM_CSCR, 17);
clk[IMX1_CLK_CLK32_PREMULT] = imx_clk_fixed_factor("clk32_premult", "clk32", 512, 1);
@@ -74,45 +77,6 @@ static void __init _mx1_clocks_init(unsigned long fref)
clk[IMX1_CLK_USBD_GATE] = imx_clk_gate("usbd_gate", "clk48m", SCM_GCCR, 0);
imx_check_clocks(clk, ARRAY_SIZE(clk));
-}
-
-int __init mx1_clocks_init(unsigned long fref)
-{
- ccm = ioremap(MX1_CCM_BASE_ADDR, SZ_4K);
- BUG_ON(!ccm);
-
- _mx1_clocks_init(fref);
-
- clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx-gpt.0");
- clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx-gpt.0");
- clk_register_clkdev(clk[IMX1_CLK_DMA_GATE], "ahb", "imx1-dma");
- clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx1-dma");
- clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx1-uart.0");
- clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx1-uart.0");
- clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx1-uart.1");
- clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx1-uart.1");
- clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx1-uart.2");
- clk_register_clkdev(clk[IMX1_CLK_UART3_GATE], "ipg", "imx1-uart.2");
- clk_register_clkdev(clk[IMX1_CLK_HCLK], NULL, "imx1-i2c.0");
- clk_register_clkdev(clk[IMX1_CLK_PER2], "per", "imx1-cspi.0");
- clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ipg", "imx1-cspi.0");
- clk_register_clkdev(clk[IMX1_CLK_PER2], "per", "imx1-cspi.1");
- clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ipg", "imx1-cspi.1");
- clk_register_clkdev(clk[IMX1_CLK_PER2], "per", "imx1-fb.0");
- clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ipg", "imx1-fb.0");
- clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ahb", "imx1-fb.0");
-
- mxc_timer_init(MX1_TIM1_BASE_ADDR, MX1_TIM1_INT, GPT_TYPE_IMX1);
-
- return 0;
-}
-
-static void __init mx1_clocks_init_dt(struct device_node *np)
-{
- ccm = of_iomap(np, 0);
- BUG_ON(!ccm);
-
- _mx1_clocks_init(32768);
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
index b0978d3b83e2..203cad6c9aab 100644
--- a/drivers/clk/imx/clk-imx35.c
+++ b/drivers/clk/imx/clk-imx35.c
@@ -66,20 +66,22 @@ static const char *std_sel[] = {"ppll", "arm"};
static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"};
enum mx35_clks {
- ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
- arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel,
- esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre,
- spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre,
- ssi2_div_post, usb_sel, usb_div, nfc_div, asrc_gate, pata_gate,
- audmux_gate, can1_gate, can2_gate, cspi1_gate, cspi2_gate, ect_gate,
- edio_gate, emi_gate, epit1_gate, epit2_gate, esai_gate, esdhc1_gate,
- esdhc2_gate, esdhc3_gate, fec_gate, gpio1_gate, gpio2_gate, gpio3_gate,
- gpt_gate, i2c1_gate, i2c2_gate, i2c3_gate, iomuxc_gate, ipu_gate,
- kpp_gate, mlb_gate, mshc_gate, owire_gate, pwm_gate, rngc_gate,
- rtc_gate, rtic_gate, scc_gate, sdma_gate, spba_gate, spdif_gate,
- ssi1_gate, ssi2_gate, uart1_gate, uart2_gate, uart3_gate, usbotg_gate,
- wdog_gate, max_gate, admux_gate, csi_gate, csi_div, csi_sel, iim_gate,
- gpu2d_gate, ckil, clk_max
+ /* 0 */ ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb,
+ /* 9 */ ipg, arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div,
+ /* 15 */ esdhc_sel, esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel,
+ /* 20 */ spdif_div_pre, spdif_div_post, ssi_sel, ssi1_div_pre,
+ /* 24 */ ssi1_div_post, ssi2_div_pre, ssi2_div_post, usb_sel, usb_div,
+ /* 29 */ nfc_div, asrc_gate, pata_gate, audmux_gate, can1_gate,
+ /* 34 */ can2_gate, cspi1_gate, cspi2_gate, ect_gate, edio_gate,
+ /* 39 */ emi_gate, epit1_gate, epit2_gate, esai_gate, esdhc1_gate,
+ /* 44 */ esdhc2_gate, esdhc3_gate, fec_gate, gpio1_gate, gpio2_gate,
+ /* 49 */ gpio3_gate, gpt_gate, i2c1_gate, i2c2_gate, i2c3_gate,
+ /* 54 */ iomuxc_gate, ipu_gate, kpp_gate, mlb_gate, mshc_gate,
+ /* 59 */ owire_gate, pwm_gate, rngc_gate, rtc_gate, rtic_gate, scc_gate,
+ /* 65 */ sdma_gate, spba_gate, spdif_gate, ssi1_gate, ssi2_gate,
+ /* 70 */ uart1_gate, uart2_gate, uart3_gate, usbotg_gate, wdog_gate,
+ /* 75 */ max_gate, admux_gate, csi_gate, csi_div, csi_sel, iim_gate,
+ /* 81 */ gpu2d_gate, ckil, clk_max
};
static struct clk *clk[clk_max];
@@ -115,7 +117,7 @@ static void __init _mx35_clocks_init(void)
}
clk[ckih] = imx_clk_fixed("ckih", 24000000);
- clk[ckil] = imx_clk_fixed("ckih", 32768);
+ clk[ckil] = imx_clk_fixed("ckil", 32768);
clk[mpll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "mpll", "ckih", base + MX35_CCM_MPCTL);
clk[ppll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "ppll", "ckih", base + MX35_CCM_PPCTL);
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c
index 29d4c44ef356..1e3c9ea5f9dc 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx51-imx53.c
@@ -126,6 +126,7 @@ static const char *spdif0_com_sel[] = { "spdif0_podf", "ssi1_root_gate", };
static const char *mx51_spdif1_com_sel[] = { "spdif1_podf", "ssi2_root_gate", };
static const char *step_sels[] = { "lp_apm", };
static const char *cpu_podf_sels[] = { "pll1_sw", "step_sel" };
+static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_clk */, "dummy" /* fec_phy_clk */ };
static struct clk *clk[IMX5_CLK_END];
static struct clk_onecell_data clk_data;
@@ -543,6 +544,25 @@ static void __init mx53_clocks_init(struct device_node *np)
clk[IMX5_CLK_I2C3_GATE] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
clk[IMX5_CLK_SATA_GATE] = imx_clk_gate2("sata_gate", "ipg", MXC_CCM_CCGR4, 2);
+ clk[IMX5_CLK_FIRI_SEL] = imx_clk_mux("firi_sel", MXC_CCM_CSCMR2, 12, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_FIRI_PRED] = imx_clk_divider("firi_pred", "firi_sel", MXC_CCM_CSCDR3, 6, 3);
+ clk[IMX5_CLK_FIRI_PODF] = imx_clk_divider("firi_podf", "firi_pred", MXC_CCM_CSCDR3, 0, 6);
+ clk[IMX5_CLK_FIRI_SERIAL_GATE] = imx_clk_gate2("firi_serial_gate", "firi_podf", MXC_CCM_CCGR1, 28);
+ clk[IMX5_CLK_FIRI_IPG_GATE] = imx_clk_gate2("firi_ipg_gate", "ipg", MXC_CCM_CCGR1, 26);
+
+ clk[IMX5_CLK_CSI0_MCLK1_SEL] = imx_clk_mux("csi0_mclk1_sel", MXC_CCM_CSCMR2, 22, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_CSI0_MCLK1_PRED] = imx_clk_divider("csi0_mclk1_pred", "csi0_mclk1_sel", MXC_CCM_CSCDR4, 6, 3);
+ clk[IMX5_CLK_CSI0_MCLK1_PODF] = imx_clk_divider("csi0_mclk1_podf", "csi0_mclk1_pred", MXC_CCM_CSCDR4, 0, 6);
+ clk[IMX5_CLK_CSI0_MCLK1_GATE] = imx_clk_gate2("csi0_mclk1_serial_gate", "csi0_mclk1_podf", MXC_CCM_CCGR6, 4);
+
+ clk[IMX5_CLK_IEEE1588_SEL] = imx_clk_mux("ieee1588_sel", MXC_CCM_CSCMR2, 14, 2,
+ ieee1588_sels, ARRAY_SIZE(ieee1588_sels));
+ clk[IMX5_CLK_IEEE1588_PRED] = imx_clk_divider("ieee1588_pred", "ieee1588_sel", MXC_CCM_CSCDR2, 6, 3);
+ clk[IMX5_CLK_IEEE1588_PODF] = imx_clk_divider("ieee1588_podf", "ieee1588_pred", MXC_CCM_CSCDR2, 0, 6);
+ clk[IMX5_CLK_IEEE1588_GATE] = imx_clk_gate2("ieee1588_serial_gate", "ieee1588_podf", MXC_CCM_CCGR7, 6);
+
clk[IMX5_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", MXC_CCM_CCOSR, 0, 4,
mx53_cko1_sel, ARRAY_SIZE(mx53_cko1_sel));
clk[IMX5_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", MXC_CCM_CCOSR, 4, 3);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index ba1c1ae72ac2..ce8ea10407e4 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -318,11 +318,16 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_IPG_PER_SEL] = imx_clk_mux("ipg_per_sel", base + 0x1c, 6, 1, ipg_per_sels, ARRAY_SIZE(ipg_per_sels));
clk[IMX6QDL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels_2, ARRAY_SIZE(gpu2d_core_sels_2));
+ } else if (clk_on_imx6dl()) {
+ clk[IMX6QDL_CLK_MLB_SEL] = imx_clk_mux("mlb_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
} else {
clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
}
clk[IMX6QDL_CLK_GPU3D_CORE_SEL] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels));
- clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
+ if (clk_on_imx6dl())
+ clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
+ else
+ clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
clk[IMX6QDL_CLK_IPU1_SEL] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
clk[IMX6QDL_CLK_IPU2_SEL] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
@@ -400,9 +405,15 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
}
- clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
+ if (clk_on_imx6dl())
+ clk[IMX6QDL_CLK_MLB_PODF] = imx_clk_divider("mlb_podf", "mlb_sel", base + 0x18, 23, 3);
+ else
+ clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
clk[IMX6QDL_CLK_GPU3D_CORE_PODF] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3);
- clk[IMX6QDL_CLK_GPU3D_SHADER] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
+ if (clk_on_imx6dl())
+ clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 29, 3);
+ else
+ clk[IMX6QDL_CLK_GPU3D_SHADER] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
clk[IMX6QDL_CLK_IPU1_PODF] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
clk[IMX6QDL_CLK_IPU2_PODF] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
clk[IMX6QDL_CLK_LDB_DI0_PODF] = imx_clk_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0);
@@ -473,14 +484,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai);
clk[IMX6QDL_CLK_GPT_IPG] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20);
clk[IMX6QDL_CLK_GPT_IPG_PER] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22);
- if (clk_on_imx6dl())
- /*
- * The multiplexer and divider of imx6q clock gpu3d_shader get
- * redefined/reused as gpu2d_core_sel and gpu2d_core_podf on imx6dl.
- */
- clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu3d_shader", base + 0x6c, 24);
- else
- clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
+ clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4);
@@ -511,7 +515,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
* The multiplexer and divider of the imx6q clock gpu2d get
* redefined/reused as mlb_sys_sel and mlb_sys_clk_podf on imx6dl.
*/
- clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "gpu2d_core_podf", base + 0x74, 18);
+ clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "mlb_podf", base + 0x74, 18);
else
clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "axi", base + 0x74, 18);
clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20);
@@ -629,6 +633,24 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
if (IS_ENABLED(CONFIG_PCI_IMX6))
clk_set_parent(clk[IMX6QDL_CLK_LVDS1_SEL], clk[IMX6QDL_CLK_SATA_REF_100M]);
+ /*
+ * Initialize the GPU clock muxes, so that the maximum specified clock
+ * rates for the respective SoC are not exceeded.
+ */
+ if (clk_on_imx6dl()) {
+ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL],
+ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
+ clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL],
+ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
+ } else if (clk_on_imx6q()) {
+ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL],
+ clk[IMX6QDL_CLK_MMDC_CH0_AXI]);
+ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_SHADER_SEL],
+ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
+ clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL],
+ clk[IMX6QDL_CLK_PLL3_USB_OTG]);
+ }
+
imx_register_uart_clocks(uart_clks);
}
CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 6ed4f8fa0667..e7c7353a86fc 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -22,43 +22,63 @@
#include "clk.h"
+static u32 share_count_sai1;
+static u32 share_count_sai2;
+static u32 share_count_sai3;
+
+static struct clk_div_table test_div_table[] = {
+ { .val = 3, .div = 1, },
+ { .val = 2, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 0, .div = 4, },
+ { }
+};
+
+static struct clk_div_table post_div_table[] = {
+ { .val = 3, .div = 4, },
+ { .val = 2, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 0, .div = 1, },
+ { }
+};
+
static struct clk *clks[IMX7D_CLK_END];
static const char *arm_a7_sel[] = { "osc", "pll_arm_main_clk",
"pll_enet_500m_clk", "pll_dram_main_clk",
- "pll_sys_main_clk", "pll_sys_pfd0_392m_clk", "pll_audio_main_clk",
+ "pll_sys_main_clk", "pll_sys_pfd0_392m_clk", "pll_audio_post_div",
"pll_usb_main_clk", };
static const char *arm_m4_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_250m_clk", "pll_sys_pfd2_270m_clk",
- "pll_dram_533m_clk", "pll_audio_main_clk", "pll_video_main_clk",
+ "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
"pll_usb_main_clk", };
static const char *arm_m0_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_125m_clk", "pll_sys_pfd2_135m_clk",
- "pll_dram_533m_clk", "pll_audio_main_clk", "pll_video_main_clk",
+ "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
"pll_usb_main_clk", };
static const char *axi_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk", "pll_sys_pfd5_clk",
- "pll_audio_main_clk", "pll_video_main_clk", "pll_sys_pfd7_clk", };
+ "pll_audio_post_div", "pll_video_main_clk", "pll_sys_pfd7_clk", };
static const char *disp_axi_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk", "pll_sys_pfd6_clk",
- "pll_sys_pfd7_clk", "pll_audio_main_clk", "pll_video_main_clk", };
+ "pll_sys_pfd7_clk", "pll_audio_post_div", "pll_video_main_clk", };
static const char *enet_axi_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk",
- "pll_sys_main_240m_clk", "pll_audio_main_clk", "pll_video_main_clk",
+ "pll_sys_main_240m_clk", "pll_audio_post_div", "pll_video_main_clk",
"pll_sys_pfd4_clk", };
static const char *nand_usdhc_bus_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_sys_main_240m_clk",
"pll_sys_pfd2_135m_clk", "pll_sys_pfd6_clk", "pll_enet_250m_clk",
- "pll_audio_main_clk", };
+ "pll_audio_post_div", };
static const char *ahb_channel_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_sys_pfd0_392m_clk",
- "pll_enet_125m_clk", "pll_usb_main_clk", "pll_audio_main_clk",
+ "pll_enet_125m_clk", "pll_usb_main_clk", "pll_audio_post_div",
"pll_video_main_clk", };
static const char *dram_phym_sel[] = { "pll_dram_main_clk",
@@ -69,13 +89,13 @@ static const char *dram_sel[] = { "pll_dram_main_clk",
static const char *dram_phym_alt_sel[] = { "osc", "pll_dram_533m_clk",
"pll_sys_main_clk", "pll_enet_500m_clk",
- "pll_usb_main_clk", "pll_sys_pfd7_clk", "pll_audio_main_clk",
+ "pll_usb_main_clk", "pll_sys_pfd7_clk", "pll_audio_post_div",
"pll_video_main_clk", };
static const char *dram_alt_sel[] = { "osc", "pll_dram_533m_clk",
"pll_sys_main_clk", "pll_enet_500m_clk",
"pll_enet_250m_clk", "pll_sys_pfd0_392m_clk",
- "pll_audio_main_clk", "pll_sys_pfd2_270m_clk", };
+ "pll_audio_post_div", "pll_sys_pfd2_270m_clk", };
static const char *usb_hsic_sel[] = { "osc", "pll_sys_main_clk",
"pll_usb_main_clk", "pll_sys_pfd3_clk", "pll_sys_pfd4_clk",
@@ -101,53 +121,53 @@ static const char *lcdif_pixel_sel[] = { "osc", "pll_sys_pfd5_clk",
static const char *mipi_dsi_sel[] = { "osc", "pll_sys_pfd5_clk",
"pll_sys_pfd3_clk", "pll_sys_main_clk", "pll_sys_pfd0_196m_clk",
- "pll_dram_533m_clk", "pll_video_main_clk", "pll_audio_main_clk", };
+ "pll_dram_533m_clk", "pll_video_main_clk", "pll_audio_post_div", };
static const char *mipi_csi_sel[] = { "osc", "pll_sys_pfd4_clk",
"pll_sys_pfd3_clk", "pll_sys_main_clk", "pll_sys_pfd0_196m_clk",
- "pll_dram_533m_clk", "pll_video_main_clk", "pll_audio_main_clk", };
+ "pll_dram_533m_clk", "pll_video_main_clk", "pll_audio_post_div", };
static const char *mipi_dphy_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_dram_533m_clk", "pll_sys_pfd5_clk", "ref_1m_clk", "ext_clk_2",
"pll_video_main_clk", "ext_clk_3", };
static const char *sai1_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_main_clk", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_2", };
static const char *sai2_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_main_clk", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_2", };
static const char *sai3_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_main_clk", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_3", };
static const char *spdif_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_main_clk", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_3_clk", };
static const char *enet1_ref_sel[] = { "osc", "pll_enet_125m_clk",
"pll_enet_50m_clk", "pll_enet_25m_clk",
- "pll_sys_main_120m_clk", "pll_audio_main_clk", "pll_video_main_clk",
+ "pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_main_clk",
"ext_clk_4", };
static const char *enet1_time_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_audio_main_clk", "ext_clk_1", "ext_clk_2", "ext_clk_3",
+ "pll_audio_post_div", "ext_clk_1", "ext_clk_2", "ext_clk_3",
"ext_clk_4", "pll_video_main_clk", };
static const char *enet2_ref_sel[] = { "osc", "pll_enet_125m_clk",
"pll_enet_50m_clk", "pll_enet_25m_clk",
- "pll_sys_main_120m_clk", "pll_audio_main_clk", "pll_video_main_clk",
+ "pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_main_clk",
"ext_clk_4", };
static const char *enet2_time_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_audio_main_clk", "ext_clk_1", "ext_clk_2", "ext_clk_3",
+ "pll_audio_post_div", "ext_clk_1", "ext_clk_2", "ext_clk_3",
"ext_clk_4", "pll_video_main_clk", };
static const char *enet_phy_ref_sel[] = { "osc", "pll_enet_25m_clk",
"pll_enet_50m_clk", "pll_enet_125m_clk",
- "pll_dram_533m_clk", "pll_audio_main_clk", "pll_video_main_clk",
+ "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
"pll_sys_pfd3_clk", };
static const char *eim_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
@@ -188,22 +208,22 @@ static const char *can2_sel[] = { "osc", "pll_sys_main_120m_clk",
static const char *i2c1_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_main_clk", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c2_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_main_clk", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c3_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_main_clk", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c4_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_main_clk", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *uart1_sel[] = { "osc", "pll_sys_main_240m_clk",
@@ -262,32 +282,32 @@ static const char *ecspi4_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_usb_main_clk", };
static const char *pwm1_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_main_clk",
+ "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_1", "ref_1m_clk", "pll_video_main_clk", };
static const char *pwm2_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_main_clk",
+ "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_1", "ref_1m_clk", "pll_video_main_clk", };
static const char *pwm3_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_main_clk",
+ "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_2", "ref_1m_clk", "pll_video_main_clk", };
static const char *pwm4_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_main_clk",
+ "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_2", "ref_1m_clk", "pll_video_main_clk", };
static const char *flextimer1_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_main_clk",
+ "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_3", "ref_1m_clk", "pll_video_main_clk", };
static const char *flextimer2_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_main_clk",
+ "pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
"ext_clk_3", "ref_1m_clk", "pll_video_main_clk", };
static const char *sim1_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
- "pll_usb_main_clk", "pll_audio_main_clk", "pll_enet_125m_clk",
+ "pll_usb_main_clk", "pll_audio_post_div", "pll_enet_125m_clk",
"pll_sys_pfd7_clk", };
static const char *sim2_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
@@ -297,19 +317,19 @@ static const char *sim2_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
static const char *gpt1_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
- "ref_1m_clk", "pll_audio_main_clk", "ext_clk_1", };
+ "ref_1m_clk", "pll_audio_post_div", "ext_clk_1", };
static const char *gpt2_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
- "ref_1m_clk", "pll_audio_main_clk", "ext_clk_2", };
+ "ref_1m_clk", "pll_audio_post_div", "ext_clk_2", };
static const char *gpt3_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
- "ref_1m_clk", "pll_audio_main_clk", "ext_clk_3", };
+ "ref_1m_clk", "pll_audio_post_div", "ext_clk_3", };
static const char *gpt4_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
- "ref_1m_clk", "pll_audio_main_clk", "ext_clk_4", };
+ "ref_1m_clk", "pll_audio_post_div", "ext_clk_4", };
static const char *trace_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
@@ -323,12 +343,12 @@ static const char *wdog_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
static const char *csi_mclk_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
- "pll_enet_125m_clk", "pll_audio_main_clk", "pll_video_main_clk",
+ "pll_enet_125m_clk", "pll_audio_post_div", "pll_video_main_clk",
"pll_usb_main_clk", };
static const char *audio_mclk_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
- "pll_enet_125m_clk", "pll_audio_main_clk", "pll_video_main_clk",
+ "pll_enet_125m_clk", "pll_audio_post_div", "pll_video_main_clk",
"pll_usb_main_clk", };
static const char *wrclk_sel[] = { "osc", "pll_enet_40m_clk",
@@ -342,13 +362,13 @@ static const char *clko1_sel[] = { "osc", "pll_sys_main_clk",
static const char *clko2_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_sys_pfd0_392m_clk", "pll_sys_pfd1_166m_clk", "pll_sys_pfd4_clk",
- "pll_audio_main_clk", "pll_video_main_clk", "ckil", };
+ "pll_audio_post_div", "pll_video_main_clk", "ckil", };
static const char *lvds1_sel[] = { "pll_arm_main_clk",
"pll_sys_main_clk", "pll_sys_pfd0_392m_clk", "pll_sys_pfd1_332m_clk",
"pll_sys_pfd2_270m_clk", "pll_sys_pfd3_clk", "pll_sys_pfd4_clk",
"pll_sys_pfd5_clk", "pll_sys_pfd6_clk", "pll_sys_pfd7_clk",
- "pll_audio_main_clk", "pll_video_main_clk", "pll_enet_500m_clk",
+ "pll_audio_post_div", "pll_video_main_clk", "pll_enet_500m_clk",
"pll_enet_250m_clk", "pll_enet_125m_clk", "pll_enet_100m_clk",
"pll_enet_50m_clk", "pll_enet_40m_clk", "pll_enet_25m_clk",
"pll_dram_main_clk", };
@@ -430,6 +450,11 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_PLL_AUDIO_MAIN_CLK] = imx_clk_gate("pll_audio_main_clk", "pll_audio_main_bypass", base + 0xf0, 13);
clks[IMX7D_PLL_VIDEO_MAIN_CLK] = imx_clk_gate("pll_video_main_clk", "pll_video_main_bypass", base + 0x130, 13);
+ clks[IMX7D_PLL_AUDIO_TEST_DIV] = clk_register_divider_table(NULL, "pll_audio_test_div", "pll_audio_main_clk",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xf0, 19, 2, 0, test_div_table, &imx_ccm_lock);
+ clks[IMX7D_PLL_AUDIO_POST_DIV] = clk_register_divider_table(NULL, "pll_audio_post_div", "pll_audio_test_div",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xf0, 22, 2, 0, post_div_table, &imx_ccm_lock);
+
clks[IMX7D_PLL_SYS_PFD0_392M_CLK] = imx_clk_pfd("pll_sys_pfd0_392m_clk", "pll_sys_main_clk", base + 0xc0, 0);
clks[IMX7D_PLL_SYS_PFD1_332M_CLK] = imx_clk_pfd("pll_sys_pfd1_332m_clk", "pll_sys_main_clk", base + 0xc0, 1);
clks[IMX7D_PLL_SYS_PFD2_270M_CLK] = imx_clk_pfd("pll_sys_pfd2_270m_clk", "pll_sys_main_clk", base + 0xc0, 2);
@@ -779,6 +804,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate4("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0);
clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate4("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0);
clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4420, 0);
+ clks[IMX7D_SDMA_CORE_CLK] = imx_clk_gate4("sdma_root_clk", "ahb_root_clk", base + 0x4480, 0);
clks[IMX7D_PCIE_CTRL_ROOT_CLK] = imx_clk_gate4("pcie_ctrl_root_clk", "pcie_ctrl_post_div", base + 0x4600, 0);
clks[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_gate4("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0);
clks[IMX7D_EPDC_PIXEL_ROOT_CLK] = imx_clk_gate4("epdc_pixel_root_clk", "epdc_pixel_post_div", base + 0x44a0, 0);
@@ -786,9 +812,12 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_gate4("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0);
clks[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_gate4("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0);
clks[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_gate4("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0);
- clks[IMX7D_SAI1_ROOT_CLK] = imx_clk_gate4("sai1_root_clk", "sai1_post_div", base + 0x48c0, 0);
- clks[IMX7D_SAI2_ROOT_CLK] = imx_clk_gate4("sai2_root_clk", "sai2_post_div", base + 0x48d0, 0);
- clks[IMX7D_SAI3_ROOT_CLK] = imx_clk_gate4("sai3_root_clk", "sai3_post_div", base + 0x48e0, 0);
+ clks[IMX7D_SAI1_ROOT_CLK] = imx_clk_gate2_shared2("sai1_root_clk", "sai1_post_div", base + 0x48c0, 0, &share_count_sai1);
+ clks[IMX7D_SAI1_IPG_CLK] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_root_clk", base + 0x48c0, 0, &share_count_sai1);
+ clks[IMX7D_SAI2_ROOT_CLK] = imx_clk_gate2_shared2("sai2_root_clk", "sai2_post_div", base + 0x48d0, 0, &share_count_sai2);
+ clks[IMX7D_SAI2_IPG_CLK] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_root_clk", base + 0x48d0, 0, &share_count_sai2);
+ clks[IMX7D_SAI3_ROOT_CLK] = imx_clk_gate2_shared2("sai3_root_clk", "sai3_post_div", base + 0x48e0, 0, &share_count_sai3);
+ clks[IMX7D_SAI3_IPG_CLK] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_root_clk", base + 0x48e0, 0, &share_count_sai3);
clks[IMX7D_SPDIF_ROOT_CLK] = imx_clk_gate4("spdif_root_clk", "spdif_post_div", base + 0x44d0, 0);
clks[IMX7D_ENET1_REF_ROOT_CLK] = imx_clk_gate4("enet1_ref_root_clk", "enet1_ref_post_div", base + 0x44e0, 0);
clks[IMX7D_ENET1_TIME_ROOT_CLK] = imx_clk_gate4("enet1_time_root_clk", "enet1_time_post_div", base + 0x44f0, 0);
@@ -860,8 +889,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
/* use old gpt clk setting, gpt1 root clk must be twice as gpt counter freq */
clk_set_parent(clks[IMX7D_GPT1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
- clk_set_parent(clks[IMX7D_ENET_AXI_ROOT_SRC], clks[IMX7D_PLL_ENET_MAIN_250M_CLK]);
-
/* set uart module clock's parent clock source that must be great then 80MHz */
clk_set_parent(clks[IMX7D_UART1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index a81c0385ed64..3799ff82a9b4 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -134,6 +134,15 @@ static inline struct clk *imx_clk_gate2_shared(const char *name,
shift, 0x3, 0, &imx_ccm_lock, share_count);
}
+static inline struct clk *imx_clk_gate2_shared2(const char *name,
+ const char *parent, void __iomem *reg, u8 shift,
+ unsigned int *share_count)
+{
+ return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
+ CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, 0,
+ &imx_ccm_lock, share_count);
+}
+
static inline struct clk *imx_clk_gate2_cgr(const char *name,
const char *parent, void __iomem *reg, u8 shift, u8 cgr_val)
{
diff --git a/drivers/clk/loongson1/Makefile b/drivers/clk/loongson1/Makefile
new file mode 100644
index 000000000000..b7f6a16390e0
--- /dev/null
+++ b/drivers/clk/loongson1/Makefile
@@ -0,0 +1,3 @@
+obj-y += clk.o
+obj-$(CONFIG_LOONGSON1_LS1B) += clk-loongson1b.o
+obj-$(CONFIG_LOONGSON1_LS1C) += clk-loongson1c.o
diff --git a/drivers/clk/loongson1/clk-loongson1b.c b/drivers/clk/loongson1/clk-loongson1b.c
new file mode 100644
index 000000000000..f36a97e993c0
--- /dev/null
+++ b/drivers/clk/loongson1/clk-loongson1b.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2012-2016 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include <loongson1.h>
+#include "clk.h"
+
+#define OSC (33 * 1000000)
+#define DIV_APB 2
+
+static DEFINE_SPINLOCK(_lock);
+
+static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 pll, rate;
+
+ pll = __raw_readl(LS1X_CLK_PLL_FREQ);
+ rate = 12 + (pll & GENMASK(5, 0));
+ rate *= OSC;
+ rate >>= 1;
+
+ return rate;
+}
+
+static const struct clk_ops ls1x_pll_clk_ops = {
+ .recalc_rate = ls1x_pll_recalc_rate,
+};
+
+static const char *const cpu_parents[] = { "cpu_clk_div", "osc_clk", };
+static const char *const ahb_parents[] = { "ahb_clk_div", "osc_clk", };
+static const char *const dc_parents[] = { "dc_clk_div", "osc_clk", };
+
+void __init ls1x_clk_init(void)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_fixed_rate(NULL, "osc_clk", NULL, 0, OSC);
+ clk_hw_register_clkdev(hw, "osc_clk", NULL);
+
+ /* clock derived from 33 MHz OSC clk */
+ hw = clk_hw_register_pll(NULL, "pll_clk", "osc_clk",
+ &ls1x_pll_clk_ops, 0);
+ clk_hw_register_clkdev(hw, "pll_clk", NULL);
+
+ /* clock derived from PLL clk */
+ /* _____
+ * _______________________| |
+ * OSC ___/ | MUX |___ CPU CLK
+ * \___ PLL ___ CPU DIV ___| |
+ * |_____|
+ */
+ hw = clk_hw_register_divider(NULL, "cpu_clk_div", "pll_clk",
+ CLK_GET_RATE_NOCACHE, LS1X_CLK_PLL_DIV,
+ DIV_CPU_SHIFT, DIV_CPU_WIDTH,
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ROUND_CLOSEST, &_lock);
+ clk_hw_register_clkdev(hw, "cpu_clk_div", NULL);
+ hw = clk_hw_register_mux(NULL, "cpu_clk", cpu_parents,
+ ARRAY_SIZE(cpu_parents),
+ CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
+ BYPASS_CPU_SHIFT, BYPASS_CPU_WIDTH, 0, &_lock);
+ clk_hw_register_clkdev(hw, "cpu_clk", NULL);
+
+ /* _____
+ * _______________________| |
+ * OSC ___/ | MUX |___ DC CLK
+ * \___ PLL ___ DC DIV ___| |
+ * |_____|
+ */
+ hw = clk_hw_register_divider(NULL, "dc_clk_div", "pll_clk",
+ 0, LS1X_CLK_PLL_DIV, DIV_DC_SHIFT,
+ DIV_DC_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock);
+ clk_hw_register_clkdev(hw, "dc_clk_div", NULL);
+ hw = clk_hw_register_mux(NULL, "dc_clk", dc_parents,
+ ARRAY_SIZE(dc_parents),
+ CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
+ BYPASS_DC_SHIFT, BYPASS_DC_WIDTH, 0, &_lock);
+ clk_hw_register_clkdev(hw, "dc_clk", NULL);
+
+ /* _____
+ * _______________________| |
+ * OSC ___/ | MUX |___ DDR CLK
+ * \___ PLL ___ DDR DIV ___| |
+ * |_____|
+ */
+ hw = clk_hw_register_divider(NULL, "ahb_clk_div", "pll_clk",
+ 0, LS1X_CLK_PLL_DIV, DIV_DDR_SHIFT,
+ DIV_DDR_WIDTH, CLK_DIVIDER_ONE_BASED,
+ &_lock);
+ clk_hw_register_clkdev(hw, "ahb_clk_div", NULL);
+ hw = clk_hw_register_mux(NULL, "ahb_clk", ahb_parents,
+ ARRAY_SIZE(ahb_parents),
+ CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
+ BYPASS_DDR_SHIFT, BYPASS_DDR_WIDTH, 0, &_lock);
+ clk_hw_register_clkdev(hw, "ahb_clk", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-dma", NULL);
+ clk_hw_register_clkdev(hw, "stmmaceth", NULL);
+
+ /* clock derived from AHB clk */
+ /* APB clk is always half of the AHB clk */
+ hw = clk_hw_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
+ DIV_APB);
+ clk_hw_register_clkdev(hw, "apb_clk", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-ac97", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-i2c", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-nand", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-pwmtimer", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-spi", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-wdt", NULL);
+ clk_hw_register_clkdev(hw, "serial8250", NULL);
+}
diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c
new file mode 100644
index 000000000000..3466f7320b40
--- /dev/null
+++ b/drivers/clk/loongson1/clk-loongson1c.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2016 Yang Ling <gnaygnil@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+
+#include <loongson1.h>
+#include "clk.h"
+
+#define OSC (24 * 1000000)
+#define DIV_APB 1
+
+static DEFINE_SPINLOCK(_lock);
+
+static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 pll, rate;
+
+ pll = __raw_readl(LS1X_CLK_PLL_FREQ);
+ rate = ((pll >> 8) & 0xff) + ((pll >> 16) & 0xff);
+ rate *= OSC;
+ rate >>= 2;
+
+ return rate;
+}
+
+static const struct clk_ops ls1x_pll_clk_ops = {
+ .recalc_rate = ls1x_pll_recalc_rate,
+};
+
+static const struct clk_div_table ahb_div_table[] = {
+ [0] = { .val = 0, .div = 2 },
+ [1] = { .val = 1, .div = 4 },
+ [2] = { .val = 2, .div = 3 },
+ [3] = { .val = 3, .div = 3 },
+};
+
+void __init ls1x_clk_init(void)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_fixed_rate(NULL, "osc_clk", NULL, 0, OSC);
+ clk_hw_register_clkdev(hw, "osc_clk", NULL);
+
+ /* clock derived from 24 MHz OSC clk */
+ hw = clk_hw_register_pll(NULL, "pll_clk", "osc_clk",
+ &ls1x_pll_clk_ops, 0);
+ clk_hw_register_clkdev(hw, "pll_clk", NULL);
+
+ hw = clk_hw_register_divider(NULL, "cpu_clk_div", "pll_clk",
+ CLK_GET_RATE_NOCACHE, LS1X_CLK_PLL_DIV,
+ DIV_CPU_SHIFT, DIV_CPU_WIDTH,
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ROUND_CLOSEST, &_lock);
+ clk_hw_register_clkdev(hw, "cpu_clk_div", NULL);
+ hw = clk_hw_register_fixed_factor(NULL, "cpu_clk", "cpu_clk_div",
+ 0, 1, 1);
+ clk_hw_register_clkdev(hw, "cpu_clk", NULL);
+
+ hw = clk_hw_register_divider(NULL, "dc_clk_div", "pll_clk",
+ 0, LS1X_CLK_PLL_DIV, DIV_DC_SHIFT,
+ DIV_DC_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock);
+ clk_hw_register_clkdev(hw, "dc_clk_div", NULL);
+ hw = clk_hw_register_fixed_factor(NULL, "dc_clk", "dc_clk_div",
+ 0, 1, 1);
+ clk_hw_register_clkdev(hw, "dc_clk", NULL);
+
+ hw = clk_hw_register_divider_table(NULL, "ahb_clk_div", "cpu_clk_div",
+ 0, LS1X_CLK_PLL_FREQ, DIV_DDR_SHIFT,
+ DIV_DDR_WIDTH, CLK_DIVIDER_ALLOW_ZERO,
+ ahb_div_table, &_lock);
+ clk_hw_register_clkdev(hw, "ahb_clk_div", NULL);
+ hw = clk_hw_register_fixed_factor(NULL, "ahb_clk", "ahb_clk_div",
+ 0, 1, 1);
+ clk_hw_register_clkdev(hw, "ahb_clk", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-dma", NULL);
+ clk_hw_register_clkdev(hw, "stmmaceth", NULL);
+
+ /* clock derived from AHB clk */
+ hw = clk_hw_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
+ DIV_APB);
+ clk_hw_register_clkdev(hw, "apb_clk", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-ac97", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-i2c", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-nand", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-pwmtimer", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-spi", NULL);
+ clk_hw_register_clkdev(hw, "ls1x-wdt", NULL);
+ clk_hw_register_clkdev(hw, "serial8250", NULL);
+}
diff --git a/drivers/clk/loongson1/clk.c b/drivers/clk/loongson1/clk.c
new file mode 100644
index 000000000000..cfcfd143fccb
--- /dev/null
+++ b/drivers/clk/loongson1/clk.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2012-2016 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+
+struct clk_hw *__init clk_hw_register_pll(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ const struct clk_ops *ops,
+ unsigned long flags)
+{
+ int ret;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+
+ /* allocate the divider */
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ hw->init = &init;
+
+ /* register the clock */
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(hw);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
diff --git a/drivers/clk/loongson1/clk.h b/drivers/clk/loongson1/clk.h
new file mode 100644
index 000000000000..085d74b5d496
--- /dev/null
+++ b/drivers/clk/loongson1/clk.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2012-2016 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LOONGSON1_CLK_H
+#define __LOONGSON1_CLK_H
+
+struct clk_hw *clk_hw_register_pll(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ const struct clk_ops *ops,
+ unsigned long flags);
+
+#endif /* __LOONGSON1_CLK_H */
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
new file mode 100644
index 000000000000..380c372d528e
--- /dev/null
+++ b/drivers/clk/mediatek/Kconfig
@@ -0,0 +1,21 @@
+#
+# MediaTek SoC drivers
+#
+config COMMON_CLK_MEDIATEK
+ bool
+ ---help---
+ Mediatek SoCs' clock support.
+
+config COMMON_CLK_MT8135
+ bool "Clock driver for Mediatek MT8135"
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ ---help---
+ This driver supports Mediatek MT8135 clocks.
+
+config COMMON_CLK_MT8173
+ bool "Clock driver for Mediatek MT8173"
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ ---help---
+ This driver supports Mediatek MT8173 clocks.
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 95fdfacb2ebf..32e7222e7305 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -1,4 +1,4 @@
-obj-y += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o
+obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
-obj-y += clk-mt8135.o
-obj-y += clk-mt8173.o
+obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
+obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 2a76901bf04b..d8787bf444eb 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -97,7 +97,7 @@ const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
.disable = mtk_cg_disable_inv,
};
-struct clk * __init mtk_clk_register_gate(
+struct clk *mtk_clk_register_gate(
const char *name,
const char *parent_name,
struct regmap *regmap,
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 10c986018a08..0ac3aee87726 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -1074,8 +1074,10 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
}
mt8173_pll_clk_data = clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
- if (!clk_data)
+ if (!clk_data) {
+ iounmap(base);
return;
+ }
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 5ada644e6200..bb30f7063569 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -24,7 +24,7 @@
#include "clk-mtk.h"
#include "clk-gate.h"
-struct clk_onecell_data * __init mtk_alloc_clk_data(unsigned int clk_num)
+struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num)
{
int i;
struct clk_onecell_data *clk_data;
@@ -49,7 +49,7 @@ err_out:
return NULL;
}
-void __init mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
+void mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
int num, struct clk_onecell_data *clk_data)
{
int i;
@@ -72,7 +72,7 @@ void __init mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
}
}
-void __init mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
+void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
int num, struct clk_onecell_data *clk_data)
{
int i;
@@ -95,7 +95,7 @@ void __init mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
}
}
-int __init mtk_clk_register_gates(struct device_node *node,
+int mtk_clk_register_gates(struct device_node *node,
const struct mtk_gate *clks,
int num, struct clk_onecell_data *clk_data)
{
@@ -135,7 +135,7 @@ int __init mtk_clk_register_gates(struct device_node *node,
return 0;
}
-struct clk * __init mtk_clk_register_composite(const struct mtk_composite *mc,
+struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
void __iomem *base, spinlock_t *lock)
{
struct clk *clk;
@@ -222,7 +222,7 @@ err_out:
return ERR_PTR(ret);
}
-void __init mtk_clk_register_composites(const struct mtk_composite *mcs,
+void mtk_clk_register_composites(const struct mtk_composite *mcs,
int num, void __iomem *base, spinlock_t *lock,
struct clk_onecell_data *clk_data)
{
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 966cab1348da..0c2deac17ce9 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -313,7 +313,7 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
return clk;
}
-void __init mtk_clk_register_plls(struct device_node *node,
+void mtk_clk_register_plls(struct device_node *node,
const struct mtk_pll_data *plls, int num_plls, struct clk_onecell_data *clk_data)
{
void __iomem *base;
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index 197e40175166..349583405b7c 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -3,5 +3,5 @@
#
obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-cpu.o clk-mpll.o
-obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b-clkc.o
-obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o
+obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
+obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index 53326c32e853..9bb70e7a7d6a 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -98,7 +98,7 @@ struct meson_clk_mpll {
};
#define MESON_GATE(_name, _reg, _bit) \
-struct clk_gate gxbb_##_name = { \
+struct clk_gate _name = { \
.reg = (void __iomem *) _reg, \
.bit_idx = (_bit), \
.lock = &clk_lock, \
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
new file mode 100644
index 000000000000..b45c5fba7e35
--- /dev/null
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -0,0 +1,191 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/init.h>
+#include <dt-bindings/clock/gxbb-aoclkc.h>
+#include <dt-bindings/reset/gxbb-aoclkc.h>
+
+static DEFINE_SPINLOCK(gxbb_aoclk_lock);
+
+struct gxbb_aoclk_reset_controller {
+ struct reset_controller_dev reset;
+ unsigned int *data;
+ void __iomem *base;
+};
+
+static int gxbb_aoclk_do_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct gxbb_aoclk_reset_controller *reset =
+ container_of(rcdev, struct gxbb_aoclk_reset_controller, reset);
+
+ writel(BIT(reset->data[id]), reset->base);
+
+ return 0;
+}
+
+static const struct reset_control_ops gxbb_aoclk_reset_ops = {
+ .reset = gxbb_aoclk_do_reset,
+};
+
+#define GXBB_AO_GATE(_name, _bit) \
+static struct clk_gate _name##_ao = { \
+ .reg = (void __iomem *)0, \
+ .bit_idx = (_bit), \
+ .lock = &gxbb_aoclk_lock, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name "_ao", \
+ .ops = &clk_gate_ops, \
+ .parent_names = (const char *[]){ "clk81" }, \
+ .num_parents = 1, \
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
+ }, \
+}
+
+GXBB_AO_GATE(remote, 0);
+GXBB_AO_GATE(i2c_master, 1);
+GXBB_AO_GATE(i2c_slave, 2);
+GXBB_AO_GATE(uart1, 3);
+GXBB_AO_GATE(uart2, 5);
+GXBB_AO_GATE(ir_blaster, 6);
+
+static unsigned int gxbb_aoclk_reset[] = {
+ [RESET_AO_REMOTE] = 16,
+ [RESET_AO_I2C_MASTER] = 18,
+ [RESET_AO_I2C_SLAVE] = 19,
+ [RESET_AO_UART1] = 17,
+ [RESET_AO_UART2] = 22,
+ [RESET_AO_IR_BLASTER] = 23,
+};
+
+static struct clk_gate *gxbb_aoclk_gate[] = {
+ [CLKID_AO_REMOTE] = &remote_ao,
+ [CLKID_AO_I2C_MASTER] = &i2c_master_ao,
+ [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao,
+ [CLKID_AO_UART1] = &uart1_ao,
+ [CLKID_AO_UART2] = &uart2_ao,
+ [CLKID_AO_IR_BLASTER] = &ir_blaster_ao,
+};
+
+static struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
+ .hws = {
+ [CLKID_AO_REMOTE] = &remote_ao.hw,
+ [CLKID_AO_I2C_MASTER] = &i2c_master_ao.hw,
+ [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao.hw,
+ [CLKID_AO_UART1] = &uart1_ao.hw,
+ [CLKID_AO_UART2] = &uart2_ao.hw,
+ [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw,
+ },
+ .num = ARRAY_SIZE(gxbb_aoclk_gate),
+};
+
+static int gxbb_aoclkc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *base;
+ int ret, clkid;
+ struct device *dev = &pdev->dev;
+ struct gxbb_aoclk_reset_controller *rstc;
+
+ rstc = devm_kzalloc(dev, sizeof(*rstc), GFP_KERNEL);
+ if (!rstc)
+ return -ENOMEM;
+
+ /* Generic clocks */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* Reset Controller */
+ rstc->base = base;
+ rstc->data = gxbb_aoclk_reset;
+ rstc->reset.ops = &gxbb_aoclk_reset_ops;
+ rstc->reset.nr_resets = ARRAY_SIZE(gxbb_aoclk_reset);
+ rstc->reset.of_node = dev->of_node;
+ ret = devm_reset_controller_register(dev, &rstc->reset);
+
+ /*
+ * Populate base address and register all clks
+ */
+ for (clkid = 0; clkid < gxbb_aoclk_onecell_data.num; clkid++) {
+ gxbb_aoclk_gate[clkid]->reg = base;
+
+ ret = devm_clk_hw_register(dev,
+ gxbb_aoclk_onecell_data.hws[clkid]);
+ if (ret)
+ return ret;
+ }
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ &gxbb_aoclk_onecell_data);
+}
+
+static const struct of_device_id gxbb_aoclkc_match_table[] = {
+ { .compatible = "amlogic,gxbb-aoclkc" },
+ { }
+};
+
+static struct platform_driver gxbb_aoclkc_driver = {
+ .probe = gxbb_aoclkc_probe,
+ .driver = {
+ .name = "gxbb-aoclkc",
+ .of_match_table = gxbb_aoclkc_match_table,
+ },
+};
+builtin_platform_driver(gxbb_aoclkc_driver);
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index a4c6684b3019..9d9af446bafc 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -565,90 +565,93 @@ static struct clk_gate gxbb_clk81 = {
};
/* Everything Else (EE) domain gates */
-static MESON_GATE(ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(spicc, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(sar_adc, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(smart_card, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(sdhc, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(stream, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(async_fifo, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(sdio, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(abuf, HHI_GCLK_MPEG0, 18);
-static MESON_GATE(hiu_iface, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(i2s_spdif, HHI_GCLK_MPEG1, 2);
-static MESON_GATE(eth, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(aiu_glue, HHI_GCLK_MPEG1, 6);
-static MESON_GATE(iec958, HHI_GCLK_MPEG1, 7);
-static MESON_GATE(i2s_out, HHI_GCLK_MPEG1, 8);
-static MESON_GATE(amclk, HHI_GCLK_MPEG1, 9);
-static MESON_GATE(aififo2, HHI_GCLK_MPEG1, 10);
-static MESON_GATE(mixer, HHI_GCLK_MPEG1, 11);
-static MESON_GATE(mixer_iface, HHI_GCLK_MPEG1, 12);
-static MESON_GATE(adc, HHI_GCLK_MPEG1, 13);
-static MESON_GATE(blkmv, HHI_GCLK_MPEG1, 14);
-static MESON_GATE(aiu, HHI_GCLK_MPEG1, 15);
-static MESON_GATE(uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(nand, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(dos_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(usb, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(vdin1, HHI_GCLK_MPEG1, 28);
-static MESON_GATE(ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(hdmi_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(dvin, HHI_GCLK_MPEG2, 12);
-static MESON_GATE(uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(sana, HHI_GCLK_MPEG2, 22);
-static MESON_GATE(vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(clk81_a53, HHI_GCLK_MPEG2, 29);
-
-static MESON_GATE(vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(gclk_venci_int0, HHI_GCLK_OTHER, 8);
-static MESON_GATE(gclk_vencp_int, HHI_GCLK_OTHER, 9);
-static MESON_GATE(dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(gclk_venci_int1, HHI_GCLK_OTHER, 22);
-static MESON_GATE(vclk2_venclmcc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(vclk_other, HHI_GCLK_OTHER, 26);
-static MESON_GATE(edp, HHI_GCLK_OTHER, 31);
+static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
+static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
+static MESON_GATE(gxbb_isa, HHI_GCLK_MPEG0, 5);
+static MESON_GATE(gxbb_pl301, HHI_GCLK_MPEG0, 6);
+static MESON_GATE(gxbb_periphs, HHI_GCLK_MPEG0, 7);
+static MESON_GATE(gxbb_spicc, HHI_GCLK_MPEG0, 8);
+static MESON_GATE(gxbb_i2c, HHI_GCLK_MPEG0, 9);
+static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG0, 10);
+static MESON_GATE(gxbb_smart_card, HHI_GCLK_MPEG0, 11);
+static MESON_GATE(gxbb_rng0, HHI_GCLK_MPEG0, 12);
+static MESON_GATE(gxbb_uart0, HHI_GCLK_MPEG0, 13);
+static MESON_GATE(gxbb_sdhc, HHI_GCLK_MPEG0, 14);
+static MESON_GATE(gxbb_stream, HHI_GCLK_MPEG0, 15);
+static MESON_GATE(gxbb_async_fifo, HHI_GCLK_MPEG0, 16);
+static MESON_GATE(gxbb_sdio, HHI_GCLK_MPEG0, 17);
+static MESON_GATE(gxbb_abuf, HHI_GCLK_MPEG0, 18);
+static MESON_GATE(gxbb_hiu_iface, HHI_GCLK_MPEG0, 19);
+static MESON_GATE(gxbb_assist_misc, HHI_GCLK_MPEG0, 23);
+static MESON_GATE(gxbb_emmc_a, HHI_GCLK_MPEG0, 24);
+static MESON_GATE(gxbb_emmc_b, HHI_GCLK_MPEG0, 25);
+static MESON_GATE(gxbb_emmc_c, HHI_GCLK_MPEG0, 26);
+static MESON_GATE(gxbb_spi, HHI_GCLK_MPEG0, 30);
+
+static MESON_GATE(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2);
+static MESON_GATE(gxbb_eth, HHI_GCLK_MPEG1, 3);
+static MESON_GATE(gxbb_demux, HHI_GCLK_MPEG1, 4);
+static MESON_GATE(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6);
+static MESON_GATE(gxbb_iec958, HHI_GCLK_MPEG1, 7);
+static MESON_GATE(gxbb_i2s_out, HHI_GCLK_MPEG1, 8);
+static MESON_GATE(gxbb_amclk, HHI_GCLK_MPEG1, 9);
+static MESON_GATE(gxbb_aififo2, HHI_GCLK_MPEG1, 10);
+static MESON_GATE(gxbb_mixer, HHI_GCLK_MPEG1, 11);
+static MESON_GATE(gxbb_mixer_iface, HHI_GCLK_MPEG1, 12);
+static MESON_GATE(gxbb_adc, HHI_GCLK_MPEG1, 13);
+static MESON_GATE(gxbb_blkmv, HHI_GCLK_MPEG1, 14);
+static MESON_GATE(gxbb_aiu, HHI_GCLK_MPEG1, 15);
+static MESON_GATE(gxbb_uart1, HHI_GCLK_MPEG1, 16);
+static MESON_GATE(gxbb_g2d, HHI_GCLK_MPEG1, 20);
+static MESON_GATE(gxbb_usb0, HHI_GCLK_MPEG1, 21);
+static MESON_GATE(gxbb_usb1, HHI_GCLK_MPEG1, 22);
+static MESON_GATE(gxbb_reset, HHI_GCLK_MPEG1, 23);
+static MESON_GATE(gxbb_nand, HHI_GCLK_MPEG1, 24);
+static MESON_GATE(gxbb_dos_parser, HHI_GCLK_MPEG1, 25);
+static MESON_GATE(gxbb_usb, HHI_GCLK_MPEG1, 26);
+static MESON_GATE(gxbb_vdin1, HHI_GCLK_MPEG1, 28);
+static MESON_GATE(gxbb_ahb_arb0, HHI_GCLK_MPEG1, 29);
+static MESON_GATE(gxbb_efuse, HHI_GCLK_MPEG1, 30);
+static MESON_GATE(gxbb_boot_rom, HHI_GCLK_MPEG1, 31);
+
+static MESON_GATE(gxbb_ahb_data_bus, HHI_GCLK_MPEG2, 1);
+static MESON_GATE(gxbb_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
+static MESON_GATE(gxbb_hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
+static MESON_GATE(gxbb_hdmi_pclk, HHI_GCLK_MPEG2, 4);
+static MESON_GATE(gxbb_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
+static MESON_GATE(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
+static MESON_GATE(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11);
+static MESON_GATE(gxbb_dvin, HHI_GCLK_MPEG2, 12);
+static MESON_GATE(gxbb_uart2, HHI_GCLK_MPEG2, 15);
+static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG2, 22);
+static MESON_GATE(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25);
+static MESON_GATE(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
+static MESON_GATE(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29);
+
+static MESON_GATE(gxbb_vclk2_venci0, HHI_GCLK_OTHER, 1);
+static MESON_GATE(gxbb_vclk2_venci1, HHI_GCLK_OTHER, 2);
+static MESON_GATE(gxbb_vclk2_vencp0, HHI_GCLK_OTHER, 3);
+static MESON_GATE(gxbb_vclk2_vencp1, HHI_GCLK_OTHER, 4);
+static MESON_GATE(gxbb_gclk_venci_int0, HHI_GCLK_OTHER, 8);
+static MESON_GATE(gxbb_gclk_vencp_int, HHI_GCLK_OTHER, 9);
+static MESON_GATE(gxbb_dac_clk, HHI_GCLK_OTHER, 10);
+static MESON_GATE(gxbb_aoclk_gate, HHI_GCLK_OTHER, 14);
+static MESON_GATE(gxbb_iec958_gate, HHI_GCLK_OTHER, 16);
+static MESON_GATE(gxbb_enc480p, HHI_GCLK_OTHER, 20);
+static MESON_GATE(gxbb_rng1, HHI_GCLK_OTHER, 21);
+static MESON_GATE(gxbb_gclk_venci_int1, HHI_GCLK_OTHER, 22);
+static MESON_GATE(gxbb_vclk2_venclmcc, HHI_GCLK_OTHER, 24);
+static MESON_GATE(gxbb_vclk2_vencl, HHI_GCLK_OTHER, 25);
+static MESON_GATE(gxbb_vclk_other, HHI_GCLK_OTHER, 26);
+static MESON_GATE(gxbb_edp, HHI_GCLK_OTHER, 31);
/* Always On (AO) domain gates */
-static MESON_GATE(ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(ao_iface, HHI_GCLK_AO, 3);
-static MESON_GATE(ao_i2c, HHI_GCLK_AO, 4);
+static MESON_GATE(gxbb_ao_media_cpu, HHI_GCLK_AO, 0);
+static MESON_GATE(gxbb_ao_ahb_sram, HHI_GCLK_AO, 1);
+static MESON_GATE(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2);
+static MESON_GATE(gxbb_ao_iface, HHI_GCLK_AO, 3);
+static MESON_GATE(gxbb_ao_i2c, HHI_GCLK_AO, 4);
/* Array of all clocks provided by this provider */
@@ -748,6 +751,9 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_AO_AHB_BUS] = &gxbb_ao_ahb_bus.hw,
[CLKID_AO_IFACE] = &gxbb_ao_iface.hw,
[CLKID_AO_I2C] = &gxbb_ao_i2c.hw,
+ [CLKID_SD_EMMC_A] = &gxbb_emmc_a.hw,
+ [CLKID_SD_EMMC_B] = &gxbb_emmc_b.hw,
+ [CLKID_SD_EMMC_C] = &gxbb_emmc_c.hw,
},
.num = NR_CLKS,
};
@@ -847,6 +853,9 @@ static struct clk_gate *gxbb_clk_gates[] = {
&gxbb_ao_ahb_bus,
&gxbb_ao_iface,
&gxbb_ao_i2c,
+ &gxbb_emmc_a,
+ &gxbb_emmc_b,
+ &gxbb_emmc_c,
};
static int gxbb_clkc_probe(struct platform_device *pdev)
@@ -937,8 +946,4 @@ static struct platform_driver gxbb_driver = {
},
};
-static int __init gxbb_clkc_init(void)
-{
- return platform_driver_register(&gxbb_driver);
-}
-device_initcall(gxbb_clkc_init);
+builtin_platform_driver(gxbb_driver);
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index a2adf3448b59..0252939ba58f 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -170,11 +170,11 @@
*/
#define CLKID_SYS_PLL 0
/* CLKID_CPUCLK */
-#define CLKID_HDMI_PLL 2
+/* CLKID_HDMI_PLL */
#define CLKID_FIXED_PLL 3
-#define CLKID_FCLK_DIV2 4
-#define CLKID_FCLK_DIV3 5
-#define CLKID_FCLK_DIV4 6
+/* CLKID_FCLK_DIV2 */
+/* CLKID_FCLK_DIV3 */
+/* CLKID_FCLK_DIV4 */
#define CLKID_FCLK_DIV5 7
#define CLKID_FCLK_DIV7 8
#define CLKID_GP0_PLL 9
@@ -183,14 +183,14 @@
/* CLKID_CLK81 */
#define CLKID_MPLL0 13
#define CLKID_MPLL1 14
-#define CLKID_MPLL2 15
+/* CLKID_MPLL2 */
#define CLKID_DDR 16
#define CLKID_DOS 17
#define CLKID_ISA 18
#define CLKID_PL301 19
#define CLKID_PERIPHS 20
#define CLKID_SPICC 21
-#define CLKID_I2C 22
+/* CLKID_I2C */
#define CLKID_SAR_ADC 23
#define CLKID_SMART_CARD 24
#define CLKID_RNG0 25
@@ -202,7 +202,7 @@
#define CLKID_ABUF 31
#define CLKID_HIU_IFACE 32
#define CLKID_ASSIST_MISC 33
-#define CLKID_SPI 34
+/* CLKID_SPI */
#define CLKID_I2S_SPDIF 35
#define CLKID_ETH 36
#define CLKID_DEMUX 37
@@ -218,12 +218,12 @@
#define CLKID_AIU 47
#define CLKID_UART1 48
#define CLKID_G2D 49
-#define CLKID_USB0 50
-#define CLKID_USB1 51
+/* CLKID_USB0 */
+/* CLKID_USB1 */
#define CLKID_RESET 52
#define CLKID_NAND 53
#define CLKID_DOS_PARSER 54
-#define CLKID_USB 55
+/* CLKID_USB */
#define CLKID_VDIN1 56
#define CLKID_AHB_ARB0 57
#define CLKID_EFUSE 58
@@ -232,8 +232,8 @@
#define CLKID_AHB_CTRL_BUS 61
#define CLKID_HDMI_INTR_SYNC 62
#define CLKID_HDMI_PCLK 63
-#define CLKID_USB1_DDR_BRIDGE 64
-#define CLKID_USB0_DDR_BRIDGE 65
+/* CLKID_USB1_DDR_BRIDGE */
+/* CLKID_USB0_DDR_BRIDGE */
#define CLKID_MMC_PCLK 66
#define CLKID_DVIN 67
#define CLKID_UART2 68
@@ -261,9 +261,12 @@
#define CLKID_AO_AHB_SRAM 90
#define CLKID_AO_AHB_BUS 91
#define CLKID_AO_IFACE 92
-#define CLKID_AO_I2C 93
+/* CLKID_AO_I2C */
+/* CLKID_SD_EMMC_A */
+/* CLKID_SD_EMMC_B */
+/* CLKID_SD_EMMC_C */
-#define NR_CLKS 94
+#define NR_CLKS 97
/* include the CLKIDs that have been made part of the stable DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/meson8b-clkc.c b/drivers/clk/meson/meson8b.c
index 4c9413cdf373..3f1be46cbb33 100644
--- a/drivers/clk/meson/meson8b-clkc.c
+++ b/drivers/clk/meson/meson8b.c
@@ -23,27 +23,11 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
-#include <dt-bindings/clock/meson8b-clkc.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include "clkc.h"
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the HardKernel[0] data sheet are listed in comment
- * blocks below. Those offsets must be multiplied by 4 before adding them to
- * the base address to get the right value
- *
- * [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
- */
-#define MESON8B_REG_SYS_CPU_CNTL1 0x015c /* 0x57 offset in data sheet */
-#define MESON8B_REG_HHI_MPEG 0x0174 /* 0x5d offset in data sheet */
-#define MESON8B_REG_MALI 0x01b0 /* 0x6c offset in data sheet */
-#define MESON8B_REG_PLL_FIXED 0x0280
-#define MESON8B_REG_PLL_SYS 0x0300
-#define MESON8B_REG_PLL_VID 0x0320
+#include "meson8b.h"
static DEFINE_SPINLOCK(clk_lock);
@@ -128,17 +112,17 @@ static struct clk_fixed_rate meson8b_xtal = {
static struct meson_clk_pll meson8b_fixed_pll = {
.m = {
- .reg_off = MESON8B_REG_PLL_FIXED,
+ .reg_off = HHI_MPLL_CNTL,
.shift = 0,
.width = 9,
},
.n = {
- .reg_off = MESON8B_REG_PLL_FIXED,
+ .reg_off = HHI_MPLL_CNTL,
.shift = 9,
.width = 5,
},
.od = {
- .reg_off = MESON8B_REG_PLL_FIXED,
+ .reg_off = HHI_MPLL_CNTL,
.shift = 16,
.width = 2,
},
@@ -154,17 +138,17 @@ static struct meson_clk_pll meson8b_fixed_pll = {
static struct meson_clk_pll meson8b_vid_pll = {
.m = {
- .reg_off = MESON8B_REG_PLL_VID,
+ .reg_off = HHI_VID_PLL_CNTL,
.shift = 0,
.width = 9,
},
.n = {
- .reg_off = MESON8B_REG_PLL_VID,
+ .reg_off = HHI_VID_PLL_CNTL,
.shift = 9,
.width = 5,
},
.od = {
- .reg_off = MESON8B_REG_PLL_VID,
+ .reg_off = HHI_VID_PLL_CNTL,
.shift = 16,
.width = 2,
},
@@ -180,17 +164,17 @@ static struct meson_clk_pll meson8b_vid_pll = {
static struct meson_clk_pll meson8b_sys_pll = {
.m = {
- .reg_off = MESON8B_REG_PLL_SYS,
+ .reg_off = HHI_SYS_PLL_CNTL,
.shift = 0,
.width = 9,
},
.n = {
- .reg_off = MESON8B_REG_PLL_SYS,
+ .reg_off = HHI_SYS_PLL_CNTL,
.shift = 9,
.width = 5,
},
.od = {
- .reg_off = MESON8B_REG_PLL_SYS,
+ .reg_off = HHI_SYS_PLL_CNTL,
.shift = 16,
.width = 2,
},
@@ -267,7 +251,7 @@ static struct clk_fixed_factor meson8b_fclk_div7 = {
* forthcoming coordinated clock rates feature
*/
static struct meson_clk_cpu meson8b_cpu_clk = {
- .reg_off = MESON8B_REG_SYS_CPU_CNTL1,
+ .reg_off = HHI_SYS_CPU_CLK_CNTL1,
.div_table = cpu_div_table,
.clk_nb.notifier_call = meson_clk_cpu_notifier_cb,
.hw.init = &(struct clk_init_data){
@@ -281,7 +265,7 @@ static struct meson_clk_cpu meson8b_cpu_clk = {
static u32 mux_table_clk81[] = { 6, 5, 7 };
struct clk_mux meson8b_mpeg_clk_sel = {
- .reg = (void *)MESON8B_REG_HHI_MPEG,
+ .reg = (void *)HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
.flags = CLK_MUX_READ_ONLY,
@@ -303,7 +287,7 @@ struct clk_mux meson8b_mpeg_clk_sel = {
};
struct clk_divider meson8b_mpeg_clk_div = {
- .reg = (void *)MESON8B_REG_HHI_MPEG,
+ .reg = (void *)HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
.lock = &clk_lock,
@@ -317,7 +301,7 @@ struct clk_divider meson8b_mpeg_clk_div = {
};
struct clk_gate meson8b_clk81 = {
- .reg = (void *)MESON8B_REG_HHI_MPEG,
+ .reg = (void *)HHI_MPEG_CLK_CNTL,
.bit_idx = 7,
.lock = &clk_lock,
.hw.init = &(struct clk_init_data){
@@ -329,6 +313,92 @@ struct clk_gate meson8b_clk81 = {
},
};
+/* Everything Else (EE) domain gates */
+
+static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
+static MESON_GATE(meson8b_dos, HHI_GCLK_MPEG0, 1);
+static MESON_GATE(meson8b_isa, HHI_GCLK_MPEG0, 5);
+static MESON_GATE(meson8b_pl301, HHI_GCLK_MPEG0, 6);
+static MESON_GATE(meson8b_periphs, HHI_GCLK_MPEG0, 7);
+static MESON_GATE(meson8b_spicc, HHI_GCLK_MPEG0, 8);
+static MESON_GATE(meson8b_i2c, HHI_GCLK_MPEG0, 9);
+static MESON_GATE(meson8b_sar_adc, HHI_GCLK_MPEG0, 10);
+static MESON_GATE(meson8b_smart_card, HHI_GCLK_MPEG0, 11);
+static MESON_GATE(meson8b_rng0, HHI_GCLK_MPEG0, 12);
+static MESON_GATE(meson8b_uart0, HHI_GCLK_MPEG0, 13);
+static MESON_GATE(meson8b_sdhc, HHI_GCLK_MPEG0, 14);
+static MESON_GATE(meson8b_stream, HHI_GCLK_MPEG0, 15);
+static MESON_GATE(meson8b_async_fifo, HHI_GCLK_MPEG0, 16);
+static MESON_GATE(meson8b_sdio, HHI_GCLK_MPEG0, 17);
+static MESON_GATE(meson8b_abuf, HHI_GCLK_MPEG0, 18);
+static MESON_GATE(meson8b_hiu_iface, HHI_GCLK_MPEG0, 19);
+static MESON_GATE(meson8b_assist_misc, HHI_GCLK_MPEG0, 23);
+static MESON_GATE(meson8b_spi, HHI_GCLK_MPEG0, 30);
+
+static MESON_GATE(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2);
+static MESON_GATE(meson8b_eth, HHI_GCLK_MPEG1, 3);
+static MESON_GATE(meson8b_demux, HHI_GCLK_MPEG1, 4);
+static MESON_GATE(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6);
+static MESON_GATE(meson8b_iec958, HHI_GCLK_MPEG1, 7);
+static MESON_GATE(meson8b_i2s_out, HHI_GCLK_MPEG1, 8);
+static MESON_GATE(meson8b_amclk, HHI_GCLK_MPEG1, 9);
+static MESON_GATE(meson8b_aififo2, HHI_GCLK_MPEG1, 10);
+static MESON_GATE(meson8b_mixer, HHI_GCLK_MPEG1, 11);
+static MESON_GATE(meson8b_mixer_iface, HHI_GCLK_MPEG1, 12);
+static MESON_GATE(meson8b_adc, HHI_GCLK_MPEG1, 13);
+static MESON_GATE(meson8b_blkmv, HHI_GCLK_MPEG1, 14);
+static MESON_GATE(meson8b_aiu, HHI_GCLK_MPEG1, 15);
+static MESON_GATE(meson8b_uart1, HHI_GCLK_MPEG1, 16);
+static MESON_GATE(meson8b_g2d, HHI_GCLK_MPEG1, 20);
+static MESON_GATE(meson8b_usb0, HHI_GCLK_MPEG1, 21);
+static MESON_GATE(meson8b_usb1, HHI_GCLK_MPEG1, 22);
+static MESON_GATE(meson8b_reset, HHI_GCLK_MPEG1, 23);
+static MESON_GATE(meson8b_nand, HHI_GCLK_MPEG1, 24);
+static MESON_GATE(meson8b_dos_parser, HHI_GCLK_MPEG1, 25);
+static MESON_GATE(meson8b_usb, HHI_GCLK_MPEG1, 26);
+static MESON_GATE(meson8b_vdin1, HHI_GCLK_MPEG1, 28);
+static MESON_GATE(meson8b_ahb_arb0, HHI_GCLK_MPEG1, 29);
+static MESON_GATE(meson8b_efuse, HHI_GCLK_MPEG1, 30);
+static MESON_GATE(meson8b_boot_rom, HHI_GCLK_MPEG1, 31);
+
+static MESON_GATE(meson8b_ahb_data_bus, HHI_GCLK_MPEG2, 1);
+static MESON_GATE(meson8b_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
+static MESON_GATE(meson8b_hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
+static MESON_GATE(meson8b_hdmi_pclk, HHI_GCLK_MPEG2, 4);
+static MESON_GATE(meson8b_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
+static MESON_GATE(meson8b_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
+static MESON_GATE(meson8b_mmc_pclk, HHI_GCLK_MPEG2, 11);
+static MESON_GATE(meson8b_dvin, HHI_GCLK_MPEG2, 12);
+static MESON_GATE(meson8b_uart2, HHI_GCLK_MPEG2, 15);
+static MESON_GATE(meson8b_sana, HHI_GCLK_MPEG2, 22);
+static MESON_GATE(meson8b_vpu_intr, HHI_GCLK_MPEG2, 25);
+static MESON_GATE(meson8b_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
+static MESON_GATE(meson8b_clk81_a9, HHI_GCLK_MPEG2, 29);
+
+static MESON_GATE(meson8b_vclk2_venci0, HHI_GCLK_OTHER, 1);
+static MESON_GATE(meson8b_vclk2_venci1, HHI_GCLK_OTHER, 2);
+static MESON_GATE(meson8b_vclk2_vencp0, HHI_GCLK_OTHER, 3);
+static MESON_GATE(meson8b_vclk2_vencp1, HHI_GCLK_OTHER, 4);
+static MESON_GATE(meson8b_gclk_venci_int, HHI_GCLK_OTHER, 8);
+static MESON_GATE(meson8b_gclk_vencp_int, HHI_GCLK_OTHER, 9);
+static MESON_GATE(meson8b_dac_clk, HHI_GCLK_OTHER, 10);
+static MESON_GATE(meson8b_aoclk_gate, HHI_GCLK_OTHER, 14);
+static MESON_GATE(meson8b_iec958_gate, HHI_GCLK_OTHER, 16);
+static MESON_GATE(meson8b_enc480p, HHI_GCLK_OTHER, 20);
+static MESON_GATE(meson8b_rng1, HHI_GCLK_OTHER, 21);
+static MESON_GATE(meson8b_gclk_vencl_int, HHI_GCLK_OTHER, 22);
+static MESON_GATE(meson8b_vclk2_venclmcc, HHI_GCLK_OTHER, 24);
+static MESON_GATE(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25);
+static MESON_GATE(meson8b_vclk2_other, HHI_GCLK_OTHER, 26);
+static MESON_GATE(meson8b_edp, HHI_GCLK_OTHER, 31);
+
+/* Always On (AO) domain gates */
+
+static MESON_GATE(meson8b_ao_media_cpu, HHI_GCLK_AO, 0);
+static MESON_GATE(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1);
+static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2);
+static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3);
+
static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
.hws = {
[CLKID_XTAL] = &meson8b_xtal.hw,
@@ -344,6 +414,83 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
[CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
[CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_DDR] = &meson8b_ddr.hw,
+ [CLKID_DOS] = &meson8b_dos.hw,
+ [CLKID_ISA] = &meson8b_isa.hw,
+ [CLKID_PL301] = &meson8b_pl301.hw,
+ [CLKID_PERIPHS] = &meson8b_periphs.hw,
+ [CLKID_SPICC] = &meson8b_spicc.hw,
+ [CLKID_I2C] = &meson8b_i2c.hw,
+ [CLKID_SAR_ADC] = &meson8b_sar_adc.hw,
+ [CLKID_SMART_CARD] = &meson8b_smart_card.hw,
+ [CLKID_RNG0] = &meson8b_rng0.hw,
+ [CLKID_UART0] = &meson8b_uart0.hw,
+ [CLKID_SDHC] = &meson8b_sdhc.hw,
+ [CLKID_STREAM] = &meson8b_stream.hw,
+ [CLKID_ASYNC_FIFO] = &meson8b_async_fifo.hw,
+ [CLKID_SDIO] = &meson8b_sdio.hw,
+ [CLKID_ABUF] = &meson8b_abuf.hw,
+ [CLKID_HIU_IFACE] = &meson8b_hiu_iface.hw,
+ [CLKID_ASSIST_MISC] = &meson8b_assist_misc.hw,
+ [CLKID_SPI] = &meson8b_spi.hw,
+ [CLKID_I2S_SPDIF] = &meson8b_i2s_spdif.hw,
+ [CLKID_ETH] = &meson8b_eth.hw,
+ [CLKID_DEMUX] = &meson8b_demux.hw,
+ [CLKID_AIU_GLUE] = &meson8b_aiu_glue.hw,
+ [CLKID_IEC958] = &meson8b_iec958.hw,
+ [CLKID_I2S_OUT] = &meson8b_i2s_out.hw,
+ [CLKID_AMCLK] = &meson8b_amclk.hw,
+ [CLKID_AIFIFO2] = &meson8b_aififo2.hw,
+ [CLKID_MIXER] = &meson8b_mixer.hw,
+ [CLKID_MIXER_IFACE] = &meson8b_mixer_iface.hw,
+ [CLKID_ADC] = &meson8b_adc.hw,
+ [CLKID_BLKMV] = &meson8b_blkmv.hw,
+ [CLKID_AIU] = &meson8b_aiu.hw,
+ [CLKID_UART1] = &meson8b_uart1.hw,
+ [CLKID_G2D] = &meson8b_g2d.hw,
+ [CLKID_USB0] = &meson8b_usb0.hw,
+ [CLKID_USB1] = &meson8b_usb1.hw,
+ [CLKID_RESET] = &meson8b_reset.hw,
+ [CLKID_NAND] = &meson8b_nand.hw,
+ [CLKID_DOS_PARSER] = &meson8b_dos_parser.hw,
+ [CLKID_USB] = &meson8b_usb.hw,
+ [CLKID_VDIN1] = &meson8b_vdin1.hw,
+ [CLKID_AHB_ARB0] = &meson8b_ahb_arb0.hw,
+ [CLKID_EFUSE] = &meson8b_efuse.hw,
+ [CLKID_BOOT_ROM] = &meson8b_boot_rom.hw,
+ [CLKID_AHB_DATA_BUS] = &meson8b_ahb_data_bus.hw,
+ [CLKID_AHB_CTRL_BUS] = &meson8b_ahb_ctrl_bus.hw,
+ [CLKID_HDMI_INTR_SYNC] = &meson8b_hdmi_intr_sync.hw,
+ [CLKID_HDMI_PCLK] = &meson8b_hdmi_pclk.hw,
+ [CLKID_USB1_DDR_BRIDGE] = &meson8b_usb1_ddr_bridge.hw,
+ [CLKID_USB0_DDR_BRIDGE] = &meson8b_usb0_ddr_bridge.hw,
+ [CLKID_MMC_PCLK] = &meson8b_mmc_pclk.hw,
+ [CLKID_DVIN] = &meson8b_dvin.hw,
+ [CLKID_UART2] = &meson8b_uart2.hw,
+ [CLKID_SANA] = &meson8b_sana.hw,
+ [CLKID_VPU_INTR] = &meson8b_vpu_intr.hw,
+ [CLKID_SEC_AHB_AHB3_BRIDGE] = &meson8b_sec_ahb_ahb3_bridge.hw,
+ [CLKID_CLK81_A9] = &meson8b_clk81_a9.hw,
+ [CLKID_VCLK2_VENCI0] = &meson8b_vclk2_venci0.hw,
+ [CLKID_VCLK2_VENCI1] = &meson8b_vclk2_venci1.hw,
+ [CLKID_VCLK2_VENCP0] = &meson8b_vclk2_vencp0.hw,
+ [CLKID_VCLK2_VENCP1] = &meson8b_vclk2_vencp1.hw,
+ [CLKID_GCLK_VENCI_INT] = &meson8b_gclk_venci_int.hw,
+ [CLKID_GCLK_VENCP_INT] = &meson8b_gclk_vencp_int.hw,
+ [CLKID_DAC_CLK] = &meson8b_dac_clk.hw,
+ [CLKID_AOCLK_GATE] = &meson8b_aoclk_gate.hw,
+ [CLKID_IEC958_GATE] = &meson8b_iec958_gate.hw,
+ [CLKID_ENC480P] = &meson8b_enc480p.hw,
+ [CLKID_RNG1] = &meson8b_rng1.hw,
+ [CLKID_GCLK_VENCL_INT] = &meson8b_gclk_vencl_int.hw,
+ [CLKID_VCLK2_VENCLMCC] = &meson8b_vclk2_venclmcc.hw,
+ [CLKID_VCLK2_VENCL] = &meson8b_vclk2_vencl.hw,
+ [CLKID_VCLK2_OTHER] = &meson8b_vclk2_other.hw,
+ [CLKID_EDP] = &meson8b_edp.hw,
+ [CLKID_AO_MEDIA_CPU] = &meson8b_ao_media_cpu.hw,
+ [CLKID_AO_AHB_SRAM] = &meson8b_ao_ahb_sram.hw,
+ [CLKID_AO_AHB_BUS] = &meson8b_ao_ahb_bus.hw,
+ [CLKID_AO_IFACE] = &meson8b_ao_iface.hw,
},
.num = CLK_NR_CLKS,
};
@@ -354,6 +501,87 @@ static struct meson_clk_pll *const meson8b_clk_plls[] = {
&meson8b_sys_pll,
};
+static struct clk_gate *meson8b_clk_gates[] = {
+ &meson8b_clk81,
+ &meson8b_ddr,
+ &meson8b_dos,
+ &meson8b_isa,
+ &meson8b_pl301,
+ &meson8b_periphs,
+ &meson8b_spicc,
+ &meson8b_i2c,
+ &meson8b_sar_adc,
+ &meson8b_smart_card,
+ &meson8b_rng0,
+ &meson8b_uart0,
+ &meson8b_sdhc,
+ &meson8b_stream,
+ &meson8b_async_fifo,
+ &meson8b_sdio,
+ &meson8b_abuf,
+ &meson8b_hiu_iface,
+ &meson8b_assist_misc,
+ &meson8b_spi,
+ &meson8b_i2s_spdif,
+ &meson8b_eth,
+ &meson8b_demux,
+ &meson8b_aiu_glue,
+ &meson8b_iec958,
+ &meson8b_i2s_out,
+ &meson8b_amclk,
+ &meson8b_aififo2,
+ &meson8b_mixer,
+ &meson8b_mixer_iface,
+ &meson8b_adc,
+ &meson8b_blkmv,
+ &meson8b_aiu,
+ &meson8b_uart1,
+ &meson8b_g2d,
+ &meson8b_usb0,
+ &meson8b_usb1,
+ &meson8b_reset,
+ &meson8b_nand,
+ &meson8b_dos_parser,
+ &meson8b_usb,
+ &meson8b_vdin1,
+ &meson8b_ahb_arb0,
+ &meson8b_efuse,
+ &meson8b_boot_rom,
+ &meson8b_ahb_data_bus,
+ &meson8b_ahb_ctrl_bus,
+ &meson8b_hdmi_intr_sync,
+ &meson8b_hdmi_pclk,
+ &meson8b_usb1_ddr_bridge,
+ &meson8b_usb0_ddr_bridge,
+ &meson8b_mmc_pclk,
+ &meson8b_dvin,
+ &meson8b_uart2,
+ &meson8b_sana,
+ &meson8b_vpu_intr,
+ &meson8b_sec_ahb_ahb3_bridge,
+ &meson8b_clk81_a9,
+ &meson8b_vclk2_venci0,
+ &meson8b_vclk2_venci1,
+ &meson8b_vclk2_vencp0,
+ &meson8b_vclk2_vencp1,
+ &meson8b_gclk_venci_int,
+ &meson8b_gclk_vencp_int,
+ &meson8b_dac_clk,
+ &meson8b_aoclk_gate,
+ &meson8b_iec958_gate,
+ &meson8b_enc480p,
+ &meson8b_rng1,
+ &meson8b_gclk_vencl_int,
+ &meson8b_vclk2_venclmcc,
+ &meson8b_vclk2_vencl,
+ &meson8b_vclk2_other,
+ &meson8b_edp,
+ &meson8b_ao_media_cpu,
+ &meson8b_ao_ahb_sram,
+ &meson8b_ao_ahb_bus,
+ &meson8b_ao_iface,
+};
+
static int meson8b_clkc_probe(struct platform_device *pdev)
{
void __iomem *clk_base;
@@ -381,6 +609,11 @@ static int meson8b_clkc_probe(struct platform_device *pdev)
meson8b_mpeg_clk_div.reg = clk_base + (u32)meson8b_mpeg_clk_div.reg;
meson8b_clk81.reg = clk_base + (u32)meson8b_clk81.reg;
+ /* Populate base address for gates */
+ for (i = 0; i < ARRAY_SIZE(meson8b_clk_gates); i++)
+ meson8b_clk_gates[i]->reg = clk_base +
+ (u32)meson8b_clk_gates[i]->reg;
+
/*
* register all clks
* CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1
@@ -440,8 +673,4 @@ static struct platform_driver meson8b_driver = {
},
};
-static int __init meson8b_clkc_init(void)
-{
- return platform_driver_register(&meson8b_driver);
-}
-device_initcall(meson8b_clkc_init);
+builtin_platform_driver(meson8b_driver);
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
new file mode 100644
index 000000000000..010e9582888d
--- /dev/null
+++ b/drivers/clk/meson/meson8b.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * Copyright (c) 2016 BayLibre, Inc.
+ * Michael Turquette <mturquette@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MESON8B_H
+#define __MESON8B_H
+
+/*
+ * Clock controller register offsets
+ *
+ * Register offsets from the HardKernel[0] data sheet are listed in comment
+ * blocks below. Those offsets must be multiplied by 4 before adding them to
+ * the base address to get the right value
+ *
+ * [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
+ */
+#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
+#define HHI_GCLK_MPEG1 0x144 /* 0x51 offset in data sheet */
+#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
+#define HHI_GCLK_OTHER 0x150 /* 0x54 offset in data sheet */
+#define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */
+#define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */
+#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
+#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
+#define HHI_VID_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
+
+/*
+ * CLKID index values
+ *
+ * These indices are entirely contrived and do not map onto the hardware.
+ * Migrate them out of this header and into the DT header file when they need
+ * to be exposed to client nodes in DT: include/dt-bindings/clock/meson8b-clkc.h
+ */
+
+/* CLKID_UNUSED */
+/* CLKID_XTAL */
+/* CLKID_PLL_FIXED */
+/* CLKID_PLL_VID */
+/* CLKID_PLL_SYS */
+/* CLKID_FCLK_DIV2 */
+/* CLKID_FCLK_DIV3 */
+/* CLKID_FCLK_DIV4 */
+/* CLKID_FCLK_DIV5 */
+/* CLKID_FCLK_DIV7 */
+/* CLKID_CLK81 */
+/* CLKID_MALI */
+/* CLKID_CPUCLK */
+/* CLKID_ZERO */
+/* CLKID_MPEG_SEL */
+/* CLKID_MPEG_DIV */
+#define CLKID_DDR 16
+#define CLKID_DOS 17
+#define CLKID_ISA 18
+#define CLKID_PL301 19
+#define CLKID_PERIPHS 20
+#define CLKID_SPICC 21
+#define CLKID_I2C 22
+#define CLKID_SAR_ADC 23
+#define CLKID_SMART_CARD 24
+#define CLKID_RNG0 25
+#define CLKID_UART0 26
+#define CLKID_SDHC 27
+#define CLKID_STREAM 28
+#define CLKID_ASYNC_FIFO 29
+#define CLKID_SDIO 30
+#define CLKID_ABUF 31
+#define CLKID_HIU_IFACE 32
+#define CLKID_ASSIST_MISC 33
+#define CLKID_SPI 34
+#define CLKID_I2S_SPDIF 35
+#define CLKID_ETH 36
+#define CLKID_DEMUX 37
+#define CLKID_AIU_GLUE 38
+#define CLKID_IEC958 39
+#define CLKID_I2S_OUT 40
+#define CLKID_AMCLK 41
+#define CLKID_AIFIFO2 42
+#define CLKID_MIXER 43
+#define CLKID_MIXER_IFACE 44
+#define CLKID_ADC 45
+#define CLKID_BLKMV 46
+#define CLKID_AIU 47
+#define CLKID_UART1 48
+#define CLKID_G2D 49
+#define CLKID_USB0 50
+#define CLKID_USB1 51
+#define CLKID_RESET 52
+#define CLKID_NAND 53
+#define CLKID_DOS_PARSER 54
+#define CLKID_USB 55
+#define CLKID_VDIN1 56
+#define CLKID_AHB_ARB0 57
+#define CLKID_EFUSE 58
+#define CLKID_BOOT_ROM 59
+#define CLKID_AHB_DATA_BUS 60
+#define CLKID_AHB_CTRL_BUS 61
+#define CLKID_HDMI_INTR_SYNC 62
+#define CLKID_HDMI_PCLK 63
+#define CLKID_USB1_DDR_BRIDGE 64
+#define CLKID_USB0_DDR_BRIDGE 65
+#define CLKID_MMC_PCLK 66
+#define CLKID_DVIN 67
+#define CLKID_UART2 68
+#define CLKID_SANA 69
+#define CLKID_VPU_INTR 70
+#define CLKID_SEC_AHB_AHB3_BRIDGE 71
+#define CLKID_CLK81_A9 72
+#define CLKID_VCLK2_VENCI0 73
+#define CLKID_VCLK2_VENCI1 74
+#define CLKID_VCLK2_VENCP0 75
+#define CLKID_VCLK2_VENCP1 76
+#define CLKID_GCLK_VENCI_INT 77
+#define CLKID_GCLK_VENCP_INT 78
+#define CLKID_DAC_CLK 79
+#define CLKID_AOCLK_GATE 80
+#define CLKID_IEC958_GATE 81
+#define CLKID_ENC480P 82
+#define CLKID_RNG1 83
+#define CLKID_GCLK_VENCL_INT 84
+#define CLKID_VCLK2_VENCLMCC 85
+#define CLKID_VCLK2_VENCL 86
+#define CLKID_VCLK2_OTHER 87
+#define CLKID_EDP 88
+#define CLKID_AO_MEDIA_CPU 89
+#define CLKID_AO_AHB_SRAM 90
+#define CLKID_AO_AHB_BUS 91
+#define CLKID_AO_IFACE 92
+
+#define CLK_NR_CLKS 93
+
+/* include the CLKIDs that have been made part of the stable DT binding */
+#include <dt-bindings/clock/meson8b-clkc.h>
+
+#endif /* __MESON8B_H */
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index ca85cea17839..c3b301463425 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -199,9 +199,9 @@ static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate,
spin_unlock_irqrestore(&pb->core->reg_lock, flags);
- /* wait again, for pbdivready */
- err = readl_poll_timeout_atomic(pb->ctrl_reg, v, v & PB_DIV_READY,
- 1, LOCK_TIMEOUT_US);
+ /* wait again for DIV_READY */
+ err = readl_poll_timeout(pb->ctrl_reg, v, v & PB_DIV_READY,
+ 1, LOCK_TIMEOUT_US);
if (err)
return err;
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c
index 51f54380474b..9f734779be92 100644
--- a/drivers/clk/microchip/clk-pic32mzda.c
+++ b/drivers/clk/microchip/clk-pic32mzda.c
@@ -118,6 +118,7 @@ static const struct pic32_sec_osc_data sosc_clk = {
.status_reg = 0x1d0,
.enable_mask = BIT(1),
.status_mask = BIT(4),
+ .fixed_rate = 32768,
.init_data = {
.name = "sosc_clk",
.parent_names = NULL,
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index 383f6a4f64f0..038023483b98 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/clk/mmp.h>
#include "clk.h"
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index 3165da77d525..fddc8ac5faff 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -24,6 +24,9 @@ config ARMADA_39X_CLK
bool
select MVEBU_CLK_COMMON
+config ARMADA_37XX_CLK
+ bool
+
config ARMADA_XP_CLK
bool
select MVEBU_CLK_COMMON
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
index 7172ef65693d..d9ae97fb43c4 100644
--- a/drivers/clk/mvebu/Makefile
+++ b/drivers/clk/mvebu/Makefile
@@ -6,6 +6,9 @@ obj-$(CONFIG_ARMADA_370_CLK) += armada-370.o
obj-$(CONFIG_ARMADA_375_CLK) += armada-375.o
obj-$(CONFIG_ARMADA_38X_CLK) += armada-38x.o
obj-$(CONFIG_ARMADA_39X_CLK) += armada-39x.o
+obj-$(CONFIG_ARMADA_37XX_CLK) += armada-37xx-xtal.o
+obj-$(CONFIG_ARMADA_37XX_CLK) += armada-37xx-tbg.o
+obj-$(CONFIG_ARMADA_37XX_CLK) += armada-37xx-periph.o
obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o
obj-$(CONFIG_ARMADA_AP806_SYSCON) += ap806-system-controller.o
obj-$(CONFIG_ARMADA_CP110_SYSCON) += cp110-system-controller.o
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
new file mode 100644
index 000000000000..45905fc0d75b
--- /dev/null
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -0,0 +1,447 @@
+/*
+ * Marvell Armada 37xx SoC Peripheral clocks
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2 or later. This program is licensed "as is"
+ * without any warranty of any kind, whether express or implied.
+ *
+ * Most of the peripheral clocks can be modelled like this:
+ * _____ _______ _______
+ * TBG-A-P --| | | | | | ______
+ * TBG-B-P --| Mux |--| /div1 |--| /div2 |--| Gate |--> perip_clk
+ * TBG-A-S --| | | | | | |______|
+ * TBG-B-S --|_____| |_______| |_______|
+ *
+ * However some clocks may use only one or two block or and use the
+ * xtal clock as parent.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define TBG_SEL 0x0
+#define DIV_SEL0 0x4
+#define DIV_SEL1 0x8
+#define DIV_SEL2 0xC
+#define CLK_SEL 0x10
+#define CLK_DIS 0x14
+
+struct clk_periph_driver_data {
+ struct clk_hw_onecell_data *hw_data;
+ spinlock_t lock;
+};
+
+struct clk_double_div {
+ struct clk_hw hw;
+ void __iomem *reg1;
+ u8 shift1;
+ void __iomem *reg2;
+ u8 shift2;
+};
+
+#define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
+
+struct clk_periph_data {
+ const char *name;
+ const char * const *parent_names;
+ int num_parents;
+ struct clk_hw *mux_hw;
+ struct clk_hw *rate_hw;
+ struct clk_hw *gate_hw;
+ bool is_double_div;
+};
+
+static const struct clk_div_table clk_table6[] = {
+ { .val = 1, .div = 1, },
+ { .val = 2, .div = 2, },
+ { .val = 3, .div = 3, },
+ { .val = 4, .div = 4, },
+ { .val = 5, .div = 5, },
+ { .val = 6, .div = 6, },
+ { .val = 0, .div = 0, }, /* last entry */
+};
+
+static const struct clk_div_table clk_table1[] = {
+ { .val = 0, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 0, .div = 0, }, /* last entry */
+};
+
+static const struct clk_div_table clk_table2[] = {
+ { .val = 0, .div = 2, },
+ { .val = 1, .div = 4, },
+ { .val = 0, .div = 0, }, /* last entry */
+};
+static const struct clk_ops clk_double_div_ops;
+
+#define PERIPH_GATE(_name, _bit) \
+struct clk_gate gate_##_name = { \
+ .reg = (void *)CLK_DIS, \
+ .bit_idx = _bit, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_gate_ops, \
+ } \
+};
+
+#define PERIPH_MUX(_name, _shift) \
+struct clk_mux mux_##_name = { \
+ .reg = (void *)TBG_SEL, \
+ .shift = _shift, \
+ .mask = 3, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_mux_ro_ops, \
+ } \
+};
+
+#define PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2) \
+struct clk_double_div rate_##_name = { \
+ .reg1 = (void *)_reg1, \
+ .reg2 = (void *)_reg2, \
+ .shift1 = _shift1, \
+ .shift2 = _shift2, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_double_div_ops, \
+ } \
+};
+
+#define PERIPH_DIV(_name, _reg, _shift, _table) \
+struct clk_divider rate_##_name = { \
+ .reg = (void *)_reg, \
+ .table = _table, \
+ .shift = _shift, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_divider_ro_ops, \
+ } \
+};
+
+#define PERIPH_CLK_FULL_DD(_name, _bit, _shift, _reg1, _reg2, _shift1, _shift2)\
+static PERIPH_GATE(_name, _bit); \
+static PERIPH_MUX(_name, _shift); \
+static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
+
+#define PERIPH_CLK_FULL(_name, _bit, _shift, _reg, _shift1, _table) \
+static PERIPH_GATE(_name, _bit); \
+static PERIPH_MUX(_name, _shift); \
+static PERIPH_DIV(_name, _reg, _shift1, _table);
+
+#define PERIPH_CLK_GATE_DIV(_name, _bit, _reg, _shift, _table) \
+static PERIPH_GATE(_name, _bit); \
+static PERIPH_DIV(_name, _reg, _shift, _table);
+
+#define PERIPH_CLK_MUX_DIV(_name, _shift, _reg, _shift_div, _table) \
+static PERIPH_MUX(_name, _shift); \
+static PERIPH_DIV(_name, _reg, _shift_div, _table);
+
+#define PERIPH_CLK_MUX_DD(_name, _shift, _reg1, _reg2, _shift1, _shift2)\
+static PERIPH_MUX(_name, _shift); \
+static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
+
+#define REF_CLK_FULL(_name) \
+ { .name = #_name, \
+ .parent_names = (const char *[]){ "TBG-A-P", \
+ "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
+ .num_parents = 4, \
+ .mux_hw = &mux_##_name.hw, \
+ .gate_hw = &gate_##_name.hw, \
+ .rate_hw = &rate_##_name.hw, \
+ }
+
+#define REF_CLK_FULL_DD(_name) \
+ { .name = #_name, \
+ .parent_names = (const char *[]){ "TBG-A-P", \
+ "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
+ .num_parents = 4, \
+ .mux_hw = &mux_##_name.hw, \
+ .gate_hw = &gate_##_name.hw, \
+ .rate_hw = &rate_##_name.hw, \
+ .is_double_div = true, \
+ }
+
+#define REF_CLK_GATE(_name, _parent_name) \
+ { .name = #_name, \
+ .parent_names = (const char *[]){ _parent_name}, \
+ .num_parents = 1, \
+ .gate_hw = &gate_##_name.hw, \
+ }
+
+#define REF_CLK_GATE_DIV(_name, _parent_name) \
+ { .name = #_name, \
+ .parent_names = (const char *[]){ _parent_name}, \
+ .num_parents = 1, \
+ .gate_hw = &gate_##_name.hw, \
+ .rate_hw = &rate_##_name.hw, \
+ }
+
+#define REF_CLK_MUX_DIV(_name) \
+ { .name = #_name, \
+ .parent_names = (const char *[]){ "TBG-A-P", \
+ "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
+ .num_parents = 4, \
+ .mux_hw = &mux_##_name.hw, \
+ .rate_hw = &rate_##_name.hw, \
+ }
+
+#define REF_CLK_MUX_DD(_name) \
+ { .name = #_name, \
+ .parent_names = (const char *[]){ "TBG-A-P", \
+ "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
+ .num_parents = 4, \
+ .mux_hw = &mux_##_name.hw, \
+ .rate_hw = &rate_##_name.hw, \
+ .is_double_div = true, \
+ }
+
+/* NB periph clocks */
+PERIPH_CLK_FULL_DD(mmc, 2, 0, DIV_SEL2, DIV_SEL2, 16, 13);
+PERIPH_CLK_FULL_DD(sata_host, 3, 2, DIV_SEL2, DIV_SEL2, 10, 7);
+PERIPH_CLK_FULL_DD(sec_at, 6, 4, DIV_SEL1, DIV_SEL1, 3, 0);
+PERIPH_CLK_FULL_DD(sec_dap, 7, 6, DIV_SEL1, DIV_SEL1, 9, 6);
+PERIPH_CLK_FULL_DD(tscem, 8, 8, DIV_SEL1, DIV_SEL1, 15, 12);
+PERIPH_CLK_FULL(tscem_tmx, 10, 10, DIV_SEL1, 18, clk_table6);
+static PERIPH_GATE(avs, 11);
+PERIPH_CLK_FULL_DD(pwm, 13, 14, DIV_SEL0, DIV_SEL0, 3, 0);
+PERIPH_CLK_FULL_DD(sqf, 12, 12, DIV_SEL1, DIV_SEL1, 27, 24);
+static PERIPH_GATE(i2c_2, 16);
+static PERIPH_GATE(i2c_1, 17);
+PERIPH_CLK_GATE_DIV(ddr_phy, 19, DIV_SEL0, 18, clk_table2);
+PERIPH_CLK_FULL_DD(ddr_fclk, 21, 16, DIV_SEL0, DIV_SEL0, 15, 12);
+PERIPH_CLK_FULL(trace, 22, 18, DIV_SEL0, 20, clk_table6);
+PERIPH_CLK_FULL(counter, 23, 20, DIV_SEL0, 23, clk_table6);
+PERIPH_CLK_FULL_DD(eip97, 24, 24, DIV_SEL2, DIV_SEL2, 22, 19);
+PERIPH_CLK_MUX_DIV(cpu, 22, DIV_SEL0, 28, clk_table6);
+
+static struct clk_periph_data data_nb[] ={
+ REF_CLK_FULL_DD(mmc),
+ REF_CLK_FULL_DD(sata_host),
+ REF_CLK_FULL_DD(sec_at),
+ REF_CLK_FULL_DD(sec_dap),
+ REF_CLK_FULL_DD(tscem),
+ REF_CLK_FULL(tscem_tmx),
+ REF_CLK_GATE(avs, "xtal"),
+ REF_CLK_FULL_DD(sqf),
+ REF_CLK_FULL_DD(pwm),
+ REF_CLK_GATE(i2c_2, "xtal"),
+ REF_CLK_GATE(i2c_1, "xtal"),
+ REF_CLK_GATE_DIV(ddr_phy, "TBG-A-S"),
+ REF_CLK_FULL_DD(ddr_fclk),
+ REF_CLK_FULL(trace),
+ REF_CLK_FULL(counter),
+ REF_CLK_FULL_DD(eip97),
+ REF_CLK_MUX_DIV(cpu),
+ { },
+};
+
+/* SB periph clocks */
+PERIPH_CLK_MUX_DD(gbe_50, 6, DIV_SEL2, DIV_SEL2, 6, 9);
+PERIPH_CLK_MUX_DD(gbe_core, 8, DIV_SEL1, DIV_SEL1, 18, 21);
+PERIPH_CLK_MUX_DD(gbe_125, 10, DIV_SEL1, DIV_SEL1, 6, 9);
+static PERIPH_GATE(gbe1_50, 0);
+static PERIPH_GATE(gbe0_50, 1);
+static PERIPH_GATE(gbe1_125, 2);
+static PERIPH_GATE(gbe0_125, 3);
+PERIPH_CLK_GATE_DIV(gbe1_core, 4, DIV_SEL1, 13, clk_table1);
+PERIPH_CLK_GATE_DIV(gbe0_core, 5, DIV_SEL1, 14, clk_table1);
+PERIPH_CLK_GATE_DIV(gbe_bm, 12, DIV_SEL1, 0, clk_table1);
+PERIPH_CLK_FULL_DD(sdio, 11, 14, DIV_SEL0, DIV_SEL0, 3, 6);
+PERIPH_CLK_FULL_DD(usb32_usb2_sys, 16, 16, DIV_SEL0, DIV_SEL0, 9, 12);
+PERIPH_CLK_FULL_DD(usb32_ss_sys, 17, 18, DIV_SEL0, DIV_SEL0, 15, 18);
+
+static struct clk_periph_data data_sb[] = {
+ REF_CLK_MUX_DD(gbe_50),
+ REF_CLK_MUX_DD(gbe_core),
+ REF_CLK_MUX_DD(gbe_125),
+ REF_CLK_GATE(gbe1_50, "gbe_50"),
+ REF_CLK_GATE(gbe0_50, "gbe_50"),
+ REF_CLK_GATE(gbe1_125, "gbe_125"),
+ REF_CLK_GATE(gbe0_125, "gbe_125"),
+ REF_CLK_GATE_DIV(gbe1_core, "gbe_core"),
+ REF_CLK_GATE_DIV(gbe0_core, "gbe_core"),
+ REF_CLK_GATE_DIV(gbe_bm, "gbe_core"),
+ REF_CLK_FULL_DD(sdio),
+ REF_CLK_FULL_DD(usb32_usb2_sys),
+ REF_CLK_FULL_DD(usb32_ss_sys),
+ { },
+};
+
+static unsigned int get_div(void __iomem *reg, int shift)
+{
+ u32 val;
+
+ val = (readl(reg) >> shift) & 0x7;
+ if (val > 6)
+ return 0;
+ return val;
+}
+
+static unsigned long clk_double_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_double_div *double_div = to_clk_double_div(hw);
+ unsigned int div;
+
+ div = get_div(double_div->reg1, double_div->shift1);
+ div *= get_div(double_div->reg2, double_div->shift2);
+
+ return DIV_ROUND_UP_ULL((u64)parent_rate, div);
+}
+
+static const struct clk_ops clk_double_div_ops = {
+ .recalc_rate = clk_double_div_recalc_rate,
+};
+
+static const struct of_device_id armada_3700_periph_clock_of_match[] = {
+ { .compatible = "marvell,armada-3700-periph-clock-nb",
+ .data = data_nb, },
+ { .compatible = "marvell,armada-3700-periph-clock-sb",
+ .data = data_sb, },
+ { }
+};
+static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
+ void __iomem *reg, spinlock_t *lock,
+ struct device *dev, struct clk_hw *hw)
+{
+ const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
+ *rate_ops = NULL;
+ struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *rate_hw = NULL;
+
+ if (data->mux_hw) {
+ struct clk_mux *mux;
+
+ mux_hw = data->mux_hw;
+ mux = to_clk_mux(mux_hw);
+ mux->lock = lock;
+ mux_ops = mux_hw->init->ops;
+ mux->reg = reg + (u64)mux->reg;
+ }
+
+ if (data->gate_hw) {
+ struct clk_gate *gate;
+
+ gate_hw = data->gate_hw;
+ gate = to_clk_gate(gate_hw);
+ gate->lock = lock;
+ gate_ops = gate_hw->init->ops;
+ gate->reg = reg + (u64)gate->reg;
+ }
+
+ if (data->rate_hw) {
+ rate_hw = data->rate_hw;
+ rate_ops = rate_hw->init->ops;
+ if (data->is_double_div) {
+ struct clk_double_div *rate;
+
+ rate = to_clk_double_div(rate_hw);
+ rate->reg1 = reg + (u64)rate->reg1;
+ rate->reg2 = reg + (u64)rate->reg2;
+ } else {
+ struct clk_divider *rate = to_clk_divider(rate_hw);
+ const struct clk_div_table *clkt;
+ int table_size = 0;
+
+ rate->reg = reg + (u64)rate->reg;
+ for (clkt = rate->table; clkt->div; clkt++)
+ table_size++;
+ rate->width = order_base_2(table_size);
+ rate->lock = lock;
+ }
+ }
+
+ hw = clk_hw_register_composite(dev, data->name, data->parent_names,
+ data->num_parents, mux_hw,
+ mux_ops, rate_hw, rate_ops,
+ gate_hw, gate_ops, CLK_IGNORE_UNUSED);
+
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ return 0;
+}
+
+static int armada_3700_periph_clock_probe(struct platform_device *pdev)
+{
+ struct clk_periph_driver_data *driver_data;
+ struct device_node *np = pdev->dev.of_node;
+ const struct clk_periph_data *data;
+ struct device *dev = &pdev->dev;
+ int num_periph = 0, i, ret;
+ struct resource *res;
+ void __iomem *reg;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ while (data[num_periph].name)
+ num_periph++;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
+ if (!driver_data)
+ return -ENOMEM;
+
+ driver_data->hw_data = devm_kzalloc(dev, sizeof(*driver_data->hw_data) +
+ sizeof(*driver_data->hw_data->hws) * num_periph,
+ GFP_KERNEL);
+ if (!driver_data->hw_data)
+ return -ENOMEM;
+ driver_data->hw_data->num = num_periph;
+
+ spin_lock_init(&driver_data->lock);
+
+ for (i = 0; i < num_periph; i++) {
+ struct clk_hw *hw = driver_data->hw_data->hws[i];
+
+ if (armada_3700_add_composite_clk(&data[i], reg,
+ &driver_data->lock, dev, hw))
+ dev_err(dev, "Can't register periph clock %s\n",
+ data[i].name);
+
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+ driver_data->hw_data);
+ if (ret) {
+ for (i = 0; i < num_periph; i++)
+ clk_hw_unregister(driver_data->hw_data->hws[i]);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, driver_data);
+ return 0;
+}
+
+static int armada_3700_periph_clock_remove(struct platform_device *pdev)
+{
+ struct clk_periph_driver_data *data = platform_get_drvdata(pdev);
+ struct clk_hw_onecell_data *hw_data = data->hw_data;
+ int i;
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ for (i = 0; i < hw_data->num; i++)
+ clk_hw_unregister(hw_data->hws[i]);
+
+ return 0;
+}
+
+static struct platform_driver armada_3700_periph_clock_driver = {
+ .probe = armada_3700_periph_clock_probe,
+ .remove = armada_3700_periph_clock_remove,
+ .driver = {
+ .name = "marvell-armada-3700-periph-clock",
+ .of_match_table = armada_3700_periph_clock_of_match,
+ },
+};
+
+builtin_platform_driver(armada_3700_periph_clock_driver);
diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c
new file mode 100644
index 000000000000..aa80db11f543
--- /dev/null
+++ b/drivers/clk/mvebu/armada-37xx-tbg.c
@@ -0,0 +1,158 @@
+/*
+ * Marvell Armada 37xx SoC Time Base Generator clocks
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2 or later. This program is licensed "as is"
+ * without any warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define NUM_TBG 4
+
+#define TBG_CTRL0 0x4
+#define TBG_CTRL1 0x8
+#define TBG_CTRL7 0x20
+#define TBG_CTRL8 0x30
+
+#define TBG_DIV_MASK 0x1FF
+
+#define TBG_A_REFDIV 0
+#define TBG_B_REFDIV 16
+
+#define TBG_A_FBDIV 2
+#define TBG_B_FBDIV 18
+
+#define TBG_A_VCODIV_SE 0
+#define TBG_B_VCODIV_SE 16
+
+#define TBG_A_VCODIV_DIFF 1
+#define TBG_B_VCODIV_DIFF 17
+
+struct tbg_def {
+ char *name;
+ u32 refdiv_offset;
+ u32 fbdiv_offset;
+ u32 vcodiv_reg;
+ u32 vcodiv_offset;
+};
+
+static const struct tbg_def tbg[NUM_TBG] = {
+ {"TBG-A-P", TBG_A_REFDIV, TBG_A_FBDIV, TBG_CTRL8, TBG_A_VCODIV_DIFF},
+ {"TBG-B-P", TBG_B_REFDIV, TBG_B_FBDIV, TBG_CTRL8, TBG_B_VCODIV_DIFF},
+ {"TBG-A-S", TBG_A_REFDIV, TBG_A_FBDIV, TBG_CTRL1, TBG_A_VCODIV_SE},
+ {"TBG-B-S", TBG_B_REFDIV, TBG_B_FBDIV, TBG_CTRL1, TBG_B_VCODIV_SE},
+};
+
+static unsigned int tbg_get_mult(void __iomem *reg, const struct tbg_def *ptbg)
+{
+ u32 val;
+
+ val = readl(reg + TBG_CTRL0);
+
+ return ((val >> ptbg->fbdiv_offset) & TBG_DIV_MASK) << 2;
+}
+
+static unsigned int tbg_get_div(void __iomem *reg, const struct tbg_def *ptbg)
+{
+ u32 val;
+ unsigned int div;
+
+ val = readl(reg + TBG_CTRL7);
+
+ div = (val >> ptbg->refdiv_offset) & TBG_DIV_MASK;
+ if (div == 0)
+ div = 1;
+ val = readl(reg + ptbg->vcodiv_reg);
+
+ div *= 1 << ((val >> ptbg->vcodiv_offset) & TBG_DIV_MASK);
+
+ return div;
+}
+
+
+static int armada_3700_tbg_clock_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct clk_hw_onecell_data *hw_tbg_data;
+ struct device *dev = &pdev->dev;
+ const char *parent_name;
+ struct resource *res;
+ struct clk *parent;
+ void __iomem *reg;
+ int i, ret;
+
+ hw_tbg_data = devm_kzalloc(&pdev->dev, sizeof(*hw_tbg_data)
+ + sizeof(*hw_tbg_data->hws) * NUM_TBG,
+ GFP_KERNEL);
+ if (!hw_tbg_data)
+ return -ENOMEM;
+ hw_tbg_data->num = NUM_TBG;
+ platform_set_drvdata(pdev, hw_tbg_data);
+
+ parent = devm_clk_get(dev, NULL);
+ if (IS_ERR(parent)) {
+ dev_err(dev, "Could get the clock parent\n");
+ return -EINVAL;
+ }
+ parent_name = __clk_get_name(parent);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ for (i = 0; i < NUM_TBG; i++) {
+ const char *name;
+ unsigned int mult, div;
+
+ name = tbg[i].name;
+ mult = tbg_get_mult(reg, &tbg[i]);
+ div = tbg_get_div(reg, &tbg[i]);
+ hw_tbg_data->hws[i] = clk_hw_register_fixed_factor(NULL, name,
+ parent_name, 0, mult, div);
+ if (IS_ERR(hw_tbg_data->hws[i]))
+ dev_err(dev, "Can't register TBG clock %s\n", name);
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_tbg_data);
+
+ return ret;
+}
+
+static int armada_3700_tbg_clock_remove(struct platform_device *pdev)
+{
+ int i;
+ struct clk_hw_onecell_data *hw_tbg_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+ for (i = 0; i < hw_tbg_data->num; i++)
+ clk_hw_unregister_fixed_factor(hw_tbg_data->hws[i]);
+
+ return 0;
+}
+
+static const struct of_device_id armada_3700_tbg_clock_of_match[] = {
+ { .compatible = "marvell,armada-3700-tbg-clock", },
+ { }
+};
+
+static struct platform_driver armada_3700_tbg_clock_driver = {
+ .probe = armada_3700_tbg_clock_probe,
+ .remove = armada_3700_tbg_clock_remove,
+ .driver = {
+ .name = "marvell-armada-3700-tbg-clock",
+ .of_match_table = armada_3700_tbg_clock_of_match,
+ },
+};
+
+builtin_platform_driver(armada_3700_tbg_clock_driver);
diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c
new file mode 100644
index 000000000000..612d65ede10a
--- /dev/null
+++ b/drivers/clk/mvebu/armada-37xx-xtal.c
@@ -0,0 +1,91 @@
+/*
+ * Marvell Armada 37xx SoC xtal clocks
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define NB_GPIO1_LATCH 0xC
+#define XTAL_MODE BIT(31)
+
+static int armada_3700_xtal_clock_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const char *xtal_name = "xtal";
+ struct device_node *parent;
+ struct regmap *regmap;
+ struct clk_hw *xtal_hw;
+ unsigned int rate;
+ u32 reg;
+ int ret;
+
+ xtal_hw = devm_kzalloc(&pdev->dev, sizeof(*xtal_hw), GFP_KERNEL);
+ if (!xtal_hw)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, xtal_hw);
+
+ parent = np->parent;
+ if (!parent) {
+ dev_err(&pdev->dev, "no parent\n");
+ return -ENODEV;
+ }
+
+ regmap = syscon_node_to_regmap(parent);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "cannot get regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ ret = regmap_read(regmap, NB_GPIO1_LATCH, &reg);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot read from regmap\n");
+ return ret;
+ }
+
+ if (reg & XTAL_MODE)
+ rate = 40000000;
+ else
+ rate = 25000000;
+
+ of_property_read_string_index(np, "clock-output-names", 0, &xtal_name);
+ xtal_hw = clk_hw_register_fixed_rate(NULL, xtal_name, NULL, 0, rate);
+ if (IS_ERR(xtal_hw))
+ return PTR_ERR(xtal_hw);
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, xtal_hw);
+
+ return ret;
+}
+
+static int armada_3700_xtal_clock_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id armada_3700_xtal_clock_of_match[] = {
+ { .compatible = "marvell,armada-3700-xtal-clock", },
+ { }
+};
+
+static struct platform_driver armada_3700_xtal_clock_driver = {
+ .probe = armada_3700_xtal_clock_probe,
+ .remove = armada_3700_xtal_clock_remove,
+ .driver = {
+ .name = "marvell-armada-3700-xtal-clock",
+ .of_match_table = armada_3700_xtal_clock_of_match,
+ },
+};
+
+builtin_platform_driver(armada_3700_xtal_clock_driver);
diff --git a/drivers/clk/mvebu/armada-39x.c b/drivers/clk/mvebu/armada-39x.c
index efb974df9822..4fdfd32247a9 100644
--- a/drivers/clk/mvebu/armada-39x.c
+++ b/drivers/clk/mvebu/armada-39x.c
@@ -142,6 +142,8 @@ static const struct clk_gating_soc_desc armada_39x_gating_desc[] __initconst = {
{ "pex3", NULL, 7 },
{ "pex0", NULL, 8 },
{ "usb3h0", NULL, 9 },
+ { "usb3h1", NULL, 10 },
+ { "sata0", NULL, 15 },
{ "sdio", NULL, 17 },
{ "xor0", NULL, 22 },
{ "xor1", NULL, 28 },
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index 7fa42d6b2b92..f2303da7fda7 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -81,13 +81,6 @@ enum {
#define CP110_GATE_EIP150 25
#define CP110_GATE_EIP197 26
-static struct clk *cp110_clks[CP110_CLK_NUM];
-
-static struct clk_onecell_data cp110_clk_data = {
- .clks = cp110_clks,
- .clk_num = CP110_CLK_NUM,
-};
-
struct cp110_gate_clk {
struct clk_hw hw;
struct regmap *regmap;
@@ -142,6 +135,8 @@ static struct clk *cp110_register_gate(const char *name,
if (!gate)
return ERR_PTR(-ENOMEM);
+ memset(&init, 0, sizeof(init));
+
init.name = name;
init.ops = &cp110_gate_ops;
init.parent_names = &parent_name;
@@ -194,7 +189,8 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
struct regmap *regmap;
struct device_node *np = pdev->dev.of_node;
const char *ppv2_name, *apll_name, *core_name, *eip_name, *nand_name;
- struct clk *clk;
+ struct clk_onecell_data *cp110_clk_data;
+ struct clk *clk, **cp110_clks;
u32 nand_clk_ctrl;
int i, ret;
@@ -207,6 +203,20 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
if (ret)
return ret;
+ cp110_clks = devm_kcalloc(&pdev->dev, sizeof(struct clk *),
+ CP110_CLK_NUM, GFP_KERNEL);
+ if (!cp110_clks)
+ return -ENOMEM;
+
+ cp110_clk_data = devm_kzalloc(&pdev->dev,
+ sizeof(*cp110_clk_data),
+ GFP_KERNEL);
+ if (!cp110_clk_data)
+ return -ENOMEM;
+
+ cp110_clk_data->clks = cp110_clks;
+ cp110_clk_data->clk_num = CP110_CLK_NUM;
+
/* Register the APLL which is the root of the clk tree */
of_property_read_string_index(np, "core-clock-output-names",
CP110_CORE_APLL, &apll_name);
@@ -334,10 +344,12 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
cp110_clks[CP110_MAX_CORE_CLOCKS + i] = clk;
}
- ret = of_clk_add_provider(np, cp110_of_clk_get, &cp110_clk_data);
+ ret = of_clk_add_provider(np, cp110_of_clk_get, cp110_clk_data);
if (ret)
goto fail_clk_add;
+ platform_set_drvdata(pdev, cp110_clks);
+
return 0;
fail_clk_add:
@@ -364,6 +376,7 @@ fail0:
static int cp110_syscon_clk_remove(struct platform_device *pdev)
{
+ struct clk **cp110_clks = platform_get_drvdata(pdev);
int i;
of_clk_del_provider(pdev->dev.of_node);
diff --git a/drivers/clk/mvebu/orion.c b/drivers/clk/mvebu/orion.c
index fd129566c1ce..a6e5bee23385 100644
--- a/drivers/clk/mvebu/orion.c
+++ b/drivers/clk/mvebu/orion.c
@@ -21,6 +21,76 @@ static const struct coreclk_ratio orion_coreclk_ratios[] __initconst = {
};
/*
+ * Orion 5181
+ */
+
+#define SAR_MV88F5181_TCLK_FREQ 8
+#define SAR_MV88F5181_TCLK_FREQ_MASK 0x3
+
+static u32 __init mv88f5181_get_tclk_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F5181_TCLK_FREQ) &
+ SAR_MV88F5181_TCLK_FREQ_MASK;
+ if (opt == 0)
+ return 133333333;
+ else if (opt == 1)
+ return 150000000;
+ else if (opt == 2)
+ return 166666667;
+ else
+ return 0;
+}
+
+#define SAR_MV88F5181_CPU_FREQ 4
+#define SAR_MV88F5181_CPU_FREQ_MASK 0xf
+
+static u32 __init mv88f5181_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F5181_CPU_FREQ) &
+ SAR_MV88F5181_CPU_FREQ_MASK;
+ if (opt == 0)
+ return 333333333;
+ else if (opt == 1 || opt == 2)
+ return 400000000;
+ else if (opt == 3)
+ return 500000000;
+ else
+ return 0;
+}
+
+static void __init mv88f5181_get_clk_ratio(void __iomem *sar, int id,
+ int *mult, int *div)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F5181_CPU_FREQ) &
+ SAR_MV88F5181_CPU_FREQ_MASK;
+ if (opt == 0 || opt == 1) {
+ *mult = 1;
+ *div = 2;
+ } else if (opt == 2 || opt == 3) {
+ *mult = 1;
+ *div = 3;
+ } else {
+ *mult = 0;
+ *div = 1;
+ }
+}
+
+static const struct coreclk_soc_desc mv88f5181_coreclks = {
+ .get_tclk_freq = mv88f5181_get_tclk_freq,
+ .get_cpu_freq = mv88f5181_get_cpu_freq,
+ .get_clk_ratio = mv88f5181_get_clk_ratio,
+ .ratios = orion_coreclk_ratios,
+ .num_ratios = ARRAY_SIZE(orion_coreclk_ratios),
+};
+
+static void __init mv88f5181_clk_init(struct device_node *np)
+{
+ return mvebu_coreclk_setup(np, &mv88f5181_coreclks);
+}
+
+CLK_OF_DECLARE(mv88f5181_clk, "marvell,mv88f5181-core-clock", mv88f5181_clk_init);
+
+/*
* Orion 5182
*/
diff --git a/drivers/clk/nxp/clk-lpc18xx-creg.c b/drivers/clk/nxp/clk-lpc18xx-creg.c
index 9e35749dafdf..c6e802e7e6ec 100644
--- a/drivers/clk/nxp/clk-lpc18xx-creg.c
+++ b/drivers/clk/nxp/clk-lpc18xx-creg.c
@@ -184,7 +184,8 @@ static void __init lpc18xx_creg_clk_init(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_creg_early_data);
}
-CLK_OF_DECLARE(lpc18xx_creg_clk, "nxp,lpc1850-creg-clk", lpc18xx_creg_clk_init);
+CLK_OF_DECLARE_DRIVER(lpc18xx_creg_clk, "nxp,lpc1850-creg-clk",
+ lpc18xx_creg_clk_init);
static struct clk *clk_creg[CREG_CLK_MAX];
static struct clk_onecell_data clk_creg_data = {
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 90d740a2fc0d..34c97353cdeb 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -1513,6 +1513,7 @@ static void __init lpc32xx_clk_init(struct device_node *np)
if (IS_ERR(clk_regmap)) {
pr_err("failed to regmap system control block: %ld\n",
PTR_ERR(clk_regmap));
+ iounmap(base);
return;
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 95e3b3e0fa1c..0146d3c2547f 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -87,6 +87,23 @@ config MSM_LCC_8960
Say Y if you want to use audio devices such as i2s, pcm,
SLIMBus, etc.
+config MDM_GCC_9615
+ tristate "MDM9615 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on mdm9615 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MDM_LCC_9615
+ tristate "MDM9615 LPASS Clock Controller"
+ select MDM_GCC_9615
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the LPASS clock controller on mdm9615 devices.
+ Say Y if you want to use audio devices such as i2s, pcm,
+ SLIMBus, etc.
+
config MSM_MMCC_8960
tristate "MSM8960 Multimedia Clock Controller"
select MSM_GCC_8960
@@ -117,6 +134,7 @@ config MSM_MMCC_8974
config MSM_GCC_8996
tristate "MSM8996 Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8996 devices.
@@ -126,6 +144,7 @@ config MSM_GCC_8996
config MSM_MMCC_8996
tristate "MSM8996 Multimedia Clock Controller"
select MSM_GCC_8996
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on msm8996 devices.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 2a25f4e75f49..1fb1f5476cb0 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -12,17 +12,20 @@ clk-qcom-y += clk-regmap-mux.o
clk-qcom-y += reset.o
clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
+# Keep alphabetically sorted by config
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
+obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
+obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
-obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
+obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
diff --git a/drivers/clk/qcom/clk-regmap.c b/drivers/clk/qcom/clk-regmap.c
index a58ba39a900c..1c856d330733 100644
--- a/drivers/clk/qcom/clk-regmap.c
+++ b/drivers/clk/qcom/clk-regmap.c
@@ -101,14 +101,13 @@ EXPORT_SYMBOL_GPL(clk_disable_regmap);
* clk_regmap struct via this function so that the regmap is initialized
* and so that the clock is registered with the common clock framework.
*/
-struct clk *devm_clk_register_regmap(struct device *dev,
- struct clk_regmap *rclk)
+int devm_clk_register_regmap(struct device *dev, struct clk_regmap *rclk)
{
if (dev && dev_get_regmap(dev, NULL))
rclk->regmap = dev_get_regmap(dev, NULL);
else if (dev && dev->parent)
rclk->regmap = dev_get_regmap(dev->parent, NULL);
- return devm_clk_register(dev, &rclk->hw);
+ return devm_clk_hw_register(dev, &rclk->hw);
}
EXPORT_SYMBOL_GPL(devm_clk_register_regmap);
diff --git a/drivers/clk/qcom/clk-regmap.h b/drivers/clk/qcom/clk-regmap.h
index 491a63d537df..90d95cd11ec6 100644
--- a/drivers/clk/qcom/clk-regmap.h
+++ b/drivers/clk/qcom/clk-regmap.h
@@ -39,7 +39,6 @@ struct clk_regmap {
int clk_is_enabled_regmap(struct clk_hw *hw);
int clk_enable_regmap(struct clk_hw *hw);
void clk_disable_regmap(struct clk_hw *hw);
-struct clk *
-devm_clk_register_regmap(struct device *dev, struct clk_regmap *rclk);
+int devm_clk_register_regmap(struct device *dev, struct clk_regmap *rclk);
#endif
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index f7c226ab4307..fffcbaf0fba7 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -27,8 +27,8 @@
struct qcom_cc {
struct qcom_reset_controller reset;
- struct clk_onecell_data data;
- struct clk *clks[];
+ struct clk_regmap **rclks;
+ size_t num_rclks;
};
const
@@ -102,8 +102,8 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
struct device_node *clocks_node;
struct clk_fixed_factor *factor;
struct clk_fixed_rate *fixed;
- struct clk *clk;
struct clk_init_data init_data = { };
+ int ret;
clocks_node = of_find_node_by_path("/clocks");
if (clocks_node)
@@ -121,9 +121,9 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
init_data.name = path;
init_data.ops = &clk_fixed_rate_ops;
- clk = devm_clk_register(dev, &fixed->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = devm_clk_hw_register(dev, &fixed->hw);
+ if (ret)
+ return ret;
}
of_node_put(node);
@@ -141,9 +141,9 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
init_data.flags = 0;
init_data.ops = &clk_fixed_factor_ops;
- clk = devm_clk_register(dev, &factor->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = devm_clk_hw_register(dev, &factor->hw);
+ if (ret)
+ return ret;
}
return 0;
@@ -174,42 +174,48 @@ int qcom_cc_register_sleep_clk(struct device *dev)
}
EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
+static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct qcom_cc *cc = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= cc->num_rclks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return cc->rclks[idx] ? &cc->rclks[idx]->hw : ERR_PTR(-ENOENT);
+}
+
int qcom_cc_really_probe(struct platform_device *pdev,
const struct qcom_cc_desc *desc, struct regmap *regmap)
{
int i, ret;
struct device *dev = &pdev->dev;
- struct clk *clk;
- struct clk_onecell_data *data;
- struct clk **clks;
struct qcom_reset_controller *reset;
struct qcom_cc *cc;
struct gdsc_desc *scd;
size_t num_clks = desc->num_clks;
struct clk_regmap **rclks = desc->clks;
- cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
- GFP_KERNEL);
+ cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
if (!cc)
return -ENOMEM;
- clks = cc->clks;
- data = &cc->data;
- data->clks = clks;
- data->clk_num = num_clks;
+ cc->rclks = rclks;
+ cc->num_rclks = num_clks;
for (i = 0; i < num_clks; i++) {
- if (!rclks[i]) {
- clks[i] = ERR_PTR(-ENOENT);
+ if (!rclks[i])
continue;
- }
- clk = devm_clk_register_regmap(dev, rclks[i]);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
- clks[i] = clk;
+
+ ret = devm_clk_register_regmap(dev, rclks[i]);
+ if (ret)
+ return ret;
}
- ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ ret = of_clk_add_hw_provider(dev->of_node, qcom_cc_clk_hw_get, cc);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index 3cd1af0af0d9..b593065de8db 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -1332,7 +1332,6 @@ static struct platform_driver gcc_ipq4019_driver = {
.probe = gcc_ipq4019_probe,
.driver = {
.name = "qcom,gcc-ipq4019",
- .owner = THIS_MODULE,
.of_match_table = gcc_ipq4019_match_table,
},
};
diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
new file mode 100644
index 000000000000..581a17f67379
--- /dev/null
+++ b/drivers/clk/qcom/gcc-mdm9615.c
@@ -0,0 +1,1727 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-mdm9615.h>
+#include <dt-bindings/reset/qcom,gcc-mdm9615.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+static struct clk_fixed_factor cxo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "cxo",
+ .parent_names = (const char *[]){ "cxo_board" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_pll pll0 = {
+ .l_reg = 0x30c4,
+ .m_reg = 0x30c8,
+ .n_reg = 0x30cc,
+ .config_reg = 0x30d4,
+ .mode_reg = 0x30c0,
+ .status_reg = 0x30d8,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll0",
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap pll0_vote = {
+ .enable_reg = 0x34c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll0_vote",
+ .parent_names = (const char *[]){ "pll8" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_regmap pll4_vote = {
+ .enable_reg = 0x34c0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll4_vote",
+ .parent_names = (const char *[]){ "pll4" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll pll8 = {
+ .l_reg = 0x3144,
+ .m_reg = 0x3148,
+ .n_reg = 0x314c,
+ .config_reg = 0x3154,
+ .mode_reg = 0x3140,
+ .status_reg = 0x3158,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll8",
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap pll8_vote = {
+ .enable_reg = 0x34c0,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll8_vote",
+ .parent_names = (const char *[]){ "pll8" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll pll14 = {
+ .l_reg = 0x31c4,
+ .m_reg = 0x31c8,
+ .n_reg = 0x31cc,
+ .config_reg = 0x31d4,
+ .mode_reg = 0x31c0,
+ .status_reg = 0x31d8,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll14",
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap pll14_vote = {
+ .enable_reg = 0x34c0,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll14_vote",
+ .parent_names = (const char *[]){ "pll14" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+enum {
+ P_CXO,
+ P_PLL8,
+ P_PLL14,
+};
+
+static const struct parent_map gcc_cxo_pll8_map[] = {
+ { P_CXO, 0 },
+ { P_PLL8, 3 }
+};
+
+static const char * const gcc_cxo_pll8[] = {
+ "cxo",
+ "pll8_vote",
+};
+
+static const struct parent_map gcc_cxo_pll14_map[] = {
+ { P_CXO, 0 },
+ { P_PLL14, 4 }
+};
+
+static const char * const gcc_cxo_pll14[] = {
+ "cxo",
+ "pll14_vote",
+};
+
+static const struct parent_map gcc_cxo_map[] = {
+ { P_CXO, 0 },
+};
+
+static const char * const gcc_cxo[] = {
+ "cxo",
+};
+
+static struct freq_tbl clk_tbl_gsbi_uart[] = {
+ { 1843200, P_PLL8, 2, 6, 625 },
+ { 3686400, P_PLL8, 2, 12, 625 },
+ { 7372800, P_PLL8, 2, 24, 625 },
+ { 14745600, P_PLL8, 2, 48, 625 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 32000000, P_PLL8, 4, 1, 3 },
+ { 40000000, P_PLL8, 1, 5, 48 },
+ { 46400000, P_PLL8, 1, 29, 240 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { 56000000, P_PLL8, 1, 7, 48 },
+ { 58982400, P_PLL8, 1, 96, 625 },
+ { 64000000, P_PLL8, 2, 1, 3 },
+ { }
+};
+
+static struct clk_rcg gsbi1_uart_src = {
+ .ns_reg = 0x29d4,
+ .md_reg = 0x29d0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x29d4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_uart_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x29d4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi1_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi2_uart_src = {
+ .ns_reg = 0x29f4,
+ .md_reg = 0x29f0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x29f4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_uart_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x29f4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi2_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi3_uart_src = {
+ .ns_reg = 0x2a14,
+ .md_reg = 0x2a10,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a14,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_uart_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x2a14,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi3_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi4_uart_src = {
+ .ns_reg = 0x2a34,
+ .md_reg = 0x2a30,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a34,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_uart_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 26,
+ .clkr = {
+ .enable_reg = 0x2a34,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi4_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi5_uart_src = {
+ .ns_reg = 0x2a54,
+ .md_reg = 0x2a50,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a54,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_uart_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x2a54,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi5_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_gsbi_qup[] = {
+ { 960000, P_CXO, 4, 1, 5 },
+ { 4800000, P_CXO, 4, 0, 1 },
+ { 9600000, P_CXO, 2, 0, 1 },
+ { 15060000, P_PLL8, 1, 2, 51 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 25600000, P_PLL8, 1, 1, 15 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { }
+};
+
+static struct clk_rcg gsbi1_qup_src = {
+ .ns_reg = 0x29cc,
+ .md_reg = 0x29c8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x29cc,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_qup_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x29cc,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_qup_clk",
+ .parent_names = (const char *[]){ "gsbi1_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi2_qup_src = {
+ .ns_reg = 0x29ec,
+ .md_reg = 0x29e8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x29ec,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_qup_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x29ec,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_qup_clk",
+ .parent_names = (const char *[]){ "gsbi2_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi3_qup_src = {
+ .ns_reg = 0x2a0c,
+ .md_reg = 0x2a08,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a0c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_qup_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x2a0c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_qup_clk",
+ .parent_names = (const char *[]){ "gsbi3_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi4_qup_src = {
+ .ns_reg = 0x2a2c,
+ .md_reg = 0x2a28,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a2c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_qup_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 24,
+ .clkr = {
+ .enable_reg = 0x2a2c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_qup_clk",
+ .parent_names = (const char *[]){ "gsbi4_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi5_qup_src = {
+ .ns_reg = 0x2a4c,
+ .md_reg = 0x2a48,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a4c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_qup_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x2a4c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_qup_clk",
+ .parent_names = (const char *[]){ "gsbi5_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_gp[] = {
+ { 9600000, P_CXO, 2, 0, 0 },
+ { 19200000, P_CXO, 1, 0, 0 },
+ { }
+};
+
+static struct clk_rcg gp0_src = {
+ .ns_reg = 0x2d24,
+ .md_reg = 0x2d00,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d24,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_src",
+ .parent_names = gcc_cxo,
+ .num_parents = 1,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp0_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2d24,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_clk",
+ .parent_names = (const char *[]){ "gp0_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gp1_src = {
+ .ns_reg = 0x2d44,
+ .md_reg = 0x2d40,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d44,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_src",
+ .parent_names = gcc_cxo,
+ .num_parents = 1,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp1_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x2d44,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_clk",
+ .parent_names = (const char *[]){ "gp1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gp2_src = {
+ .ns_reg = 0x2d64,
+ .md_reg = 0x2d60,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d64,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp2_src",
+ .parent_names = gcc_cxo,
+ .num_parents = 1,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp2_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x2d64,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp2_clk",
+ .parent_names = (const char *[]){ "gp2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch pmem_clk = {
+ .hwcg_reg = 0x25a0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x25a0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmem_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_rcg prng_src = {
+ .ns_reg = 0x2e80,
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "prng_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch prng_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "prng_clk",
+ .parent_names = (const char *[]){ "prng_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_sdc[] = {
+ { 144000, P_CXO, 1, 1, 133 },
+ { 400000, P_PLL8, 4, 1, 240 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 17070000, P_PLL8, 1, 2, 45 },
+ { 20210000, P_PLL8, 1, 1, 19 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 38400000, P_PLL8, 2, 1, 5 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 64000000, P_PLL8, 3, 1, 2 },
+ { 76800000, P_PLL8, 1, 1, 5 },
+ { }
+};
+
+static struct clk_rcg sdc1_src = {
+ .ns_reg = 0x282c,
+ .md_reg = 0x2828,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x282c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc1_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x282c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_clk",
+ .parent_names = (const char *[]){ "sdc1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc2_src = {
+ .ns_reg = 0x284c,
+ .md_reg = 0x2848,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x284c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc2_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x284c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_clk",
+ .parent_names = (const char *[]){ "sdc2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_usb[] = {
+ { 60000000, P_PLL8, 1, 5, 32 },
+ { }
+};
+
+static struct clk_rcg usb_hs1_xcvr_src = {
+ .ns_reg = 0x290c,
+ .md_reg = 0x2908,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x290c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_xcvr_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch usb_hs1_xcvr_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x290c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_xcvr_clk",
+ .parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg usb_hsic_xcvr_fs_src = {
+ .ns_reg = 0x2928,
+ .md_reg = 0x2924,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x2928,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_xcvr_fs_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch usb_hsic_xcvr_fs_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x2928,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_xcvr_fs_clk",
+ .parent_names =
+ (const char *[]){ "usb_hsic_xcvr_fs_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_usb_hs1_system[] = {
+ { 60000000, P_PLL8, 1, 5, 32 },
+ { }
+};
+
+static struct clk_rcg usb_hs1_system_src = {
+ .ns_reg = 0x36a4,
+ .md_reg = 0x36a0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb_hs1_system,
+ .clkr = {
+ .enable_reg = 0x36a4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_system_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch usb_hs1_system_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x36a4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .parent_names =
+ (const char *[]){ "usb_hs1_system_src" },
+ .num_parents = 1,
+ .name = "usb_hs1_system_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_usb_hsic_system[] = {
+ { 64000000, P_PLL8, 1, 1, 6 },
+ { }
+};
+
+static struct clk_rcg usb_hsic_system_src = {
+ .ns_reg = 0x2b58,
+ .md_reg = 0x2b54,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb_hsic_system,
+ .clkr = {
+ .enable_reg = 0x2b58,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_system_src",
+ .parent_names = gcc_cxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch usb_hsic_system_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2b58,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .parent_names =
+ (const char *[]){ "usb_hsic_system_src" },
+ .num_parents = 1,
+ .name = "usb_hsic_system_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_usb_hsic_hsic[] = {
+ { 48000000, P_PLL14, 1, 0, 0 },
+ { }
+};
+
+static struct clk_rcg usb_hsic_hsic_src = {
+ .ns_reg = 0x2b50,
+ .md_reg = 0x2b4c,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_cxo_pll14_map,
+ },
+ .freq_tbl = clk_tbl_usb_hsic_hsic,
+ .clkr = {
+ .enable_reg = 0x2b50,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_hsic_src",
+ .parent_names = gcc_cxo_pll14,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch usb_hsic_hsic_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2b50,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "usb_hsic_hsic_src" },
+ .num_parents = 1,
+ .name = "usb_hsic_hsic_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_hsic_hsio_cal_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x2b48,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .name = "usb_hsic_hsio_cal_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch ce1_core_clk = {
+ .hwcg_reg = 0x2724,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd4,
+ .halt_bit = 27,
+ .clkr = {
+ .enable_reg = 0x2724,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "ce1_core_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch ce1_h_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2720,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "ce1_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch dma_bam_h_clk = {
+ .hwcg_reg = 0x25c0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x25c0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "dma_bam_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_h_clk = {
+ .hwcg_reg = 0x29c0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fcc,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x29c0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_h_clk = {
+ .hwcg_reg = 0x29e0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fcc,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x29e0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_h_clk = {
+ .hwcg_reg = 0x2a00,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fcc,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x2a00,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_h_clk = {
+ .hwcg_reg = 0x2a20,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 27,
+ .clkr = {
+ .enable_reg = 0x2a20,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_h_clk = {
+ .hwcg_reg = 0x2a40,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x2a40,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch usb_hs1_h_clk = {
+ .hwcg_reg = 0x2900,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2900,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch usb_hsic_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 28,
+ .clkr = {
+ .enable_reg = 0x2920,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch sdc1_h_clk = {
+ .hwcg_reg = 0x2820,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2820,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch sdc2_h_clk = {
+ .hwcg_reg = 0x2840,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x2840,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch adm0_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm0_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch adm0_pbus_clk = {
+ .hwcg_reg = 0x2208,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fdc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm0_pbus_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch pmic_arb0_h_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_arb0_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch pmic_arb1_h_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 21,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_arb1_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch pmic_ssbi2_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_ssbi2_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch rpm_msg_ram_h_clk = {
+ .hwcg_reg = 0x27e0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "rpm_msg_ram_h_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_hw *gcc_mdm9615_hws[] = {
+ &cxo.hw,
+};
+
+static struct clk_regmap *gcc_mdm9615_clks[] = {
+ [PLL0] = &pll0.clkr,
+ [PLL0_VOTE] = &pll0_vote,
+ [PLL4_VOTE] = &pll4_vote,
+ [PLL8] = &pll8.clkr,
+ [PLL8_VOTE] = &pll8_vote,
+ [PLL14] = &pll14.clkr,
+ [PLL14_VOTE] = &pll14_vote,
+ [GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
+ [GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
+ [GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
+ [GSBI2_UART_CLK] = &gsbi2_uart_clk.clkr,
+ [GSBI3_UART_SRC] = &gsbi3_uart_src.clkr,
+ [GSBI3_UART_CLK] = &gsbi3_uart_clk.clkr,
+ [GSBI4_UART_SRC] = &gsbi4_uart_src.clkr,
+ [GSBI4_UART_CLK] = &gsbi4_uart_clk.clkr,
+ [GSBI5_UART_SRC] = &gsbi5_uart_src.clkr,
+ [GSBI5_UART_CLK] = &gsbi5_uart_clk.clkr,
+ [GSBI1_QUP_SRC] = &gsbi1_qup_src.clkr,
+ [GSBI1_QUP_CLK] = &gsbi1_qup_clk.clkr,
+ [GSBI2_QUP_SRC] = &gsbi2_qup_src.clkr,
+ [GSBI2_QUP_CLK] = &gsbi2_qup_clk.clkr,
+ [GSBI3_QUP_SRC] = &gsbi3_qup_src.clkr,
+ [GSBI3_QUP_CLK] = &gsbi3_qup_clk.clkr,
+ [GSBI4_QUP_SRC] = &gsbi4_qup_src.clkr,
+ [GSBI4_QUP_CLK] = &gsbi4_qup_clk.clkr,
+ [GSBI5_QUP_SRC] = &gsbi5_qup_src.clkr,
+ [GSBI5_QUP_CLK] = &gsbi5_qup_clk.clkr,
+ [GP0_SRC] = &gp0_src.clkr,
+ [GP0_CLK] = &gp0_clk.clkr,
+ [GP1_SRC] = &gp1_src.clkr,
+ [GP1_CLK] = &gp1_clk.clkr,
+ [GP2_SRC] = &gp2_src.clkr,
+ [GP2_CLK] = &gp2_clk.clkr,
+ [PMEM_A_CLK] = &pmem_clk.clkr,
+ [PRNG_SRC] = &prng_src.clkr,
+ [PRNG_CLK] = &prng_clk.clkr,
+ [SDC1_SRC] = &sdc1_src.clkr,
+ [SDC1_CLK] = &sdc1_clk.clkr,
+ [SDC2_SRC] = &sdc2_src.clkr,
+ [SDC2_CLK] = &sdc2_clk.clkr,
+ [USB_HS1_XCVR_SRC] = &usb_hs1_xcvr_src.clkr,
+ [USB_HS1_XCVR_CLK] = &usb_hs1_xcvr_clk.clkr,
+ [USB_HS1_SYSTEM_CLK_SRC] = &usb_hs1_system_src.clkr,
+ [USB_HS1_SYSTEM_CLK] = &usb_hs1_system_clk.clkr,
+ [USB_HSIC_XCVR_FS_SRC] = &usb_hsic_xcvr_fs_src.clkr,
+ [USB_HSIC_XCVR_FS_CLK] = &usb_hsic_xcvr_fs_clk.clkr,
+ [USB_HSIC_SYSTEM_CLK_SRC] = &usb_hsic_system_src.clkr,
+ [USB_HSIC_SYSTEM_CLK] = &usb_hsic_system_clk.clkr,
+ [USB_HSIC_HSIC_CLK_SRC] = &usb_hsic_hsic_src.clkr,
+ [USB_HSIC_HSIC_CLK] = &usb_hsic_hsic_clk.clkr,
+ [USB_HSIC_HSIO_CAL_CLK] = &usb_hsic_hsio_cal_clk.clkr,
+ [CE1_CORE_CLK] = &ce1_core_clk.clkr,
+ [CE1_H_CLK] = &ce1_h_clk.clkr,
+ [DMA_BAM_H_CLK] = &dma_bam_h_clk.clkr,
+ [GSBI1_H_CLK] = &gsbi1_h_clk.clkr,
+ [GSBI2_H_CLK] = &gsbi2_h_clk.clkr,
+ [GSBI3_H_CLK] = &gsbi3_h_clk.clkr,
+ [GSBI4_H_CLK] = &gsbi4_h_clk.clkr,
+ [GSBI5_H_CLK] = &gsbi5_h_clk.clkr,
+ [USB_HS1_H_CLK] = &usb_hs1_h_clk.clkr,
+ [USB_HSIC_H_CLK] = &usb_hsic_h_clk.clkr,
+ [SDC1_H_CLK] = &sdc1_h_clk.clkr,
+ [SDC2_H_CLK] = &sdc2_h_clk.clkr,
+ [ADM0_CLK] = &adm0_clk.clkr,
+ [ADM0_PBUS_CLK] = &adm0_pbus_clk.clkr,
+ [PMIC_ARB0_H_CLK] = &pmic_arb0_h_clk.clkr,
+ [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
+ [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
+ [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_mdm9615_resets[] = {
+ [DMA_BAM_RESET] = { 0x25c0, 7 },
+ [CE1_H_RESET] = { 0x2720, 7 },
+ [CE1_CORE_RESET] = { 0x2724, 7 },
+ [SDC1_RESET] = { 0x2830 },
+ [SDC2_RESET] = { 0x2850 },
+ [ADM0_C2_RESET] = { 0x220c, 4 },
+ [ADM0_C1_RESET] = { 0x220c, 3 },
+ [ADM0_C0_RESET] = { 0x220c, 2 },
+ [ADM0_PBUS_RESET] = { 0x220c, 1 },
+ [ADM0_RESET] = { 0x220c },
+ [USB_HS1_RESET] = { 0x2910 },
+ [USB_HSIC_RESET] = { 0x2934 },
+ [GSBI1_RESET] = { 0x29dc },
+ [GSBI2_RESET] = { 0x29fc },
+ [GSBI3_RESET] = { 0x2a1c },
+ [GSBI4_RESET] = { 0x2a3c },
+ [GSBI5_RESET] = { 0x2a5c },
+ [PDM_RESET] = { 0x2CC0, 12 },
+};
+
+static const struct regmap_config gcc_mdm9615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3660,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_mdm9615_desc = {
+ .config = &gcc_mdm9615_regmap_config,
+ .clks = gcc_mdm9615_clks,
+ .num_clks = ARRAY_SIZE(gcc_mdm9615_clks),
+ .resets = gcc_mdm9615_resets,
+ .num_resets = ARRAY_SIZE(gcc_mdm9615_resets),
+};
+
+static const struct of_device_id gcc_mdm9615_match_table[] = {
+ { .compatible = "qcom,gcc-mdm9615" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_mdm9615_match_table);
+
+static int gcc_mdm9615_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int ret;
+ int i;
+
+ regmap = qcom_cc_map(pdev, &gcc_mdm9615_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ for (i = 0; i < ARRAY_SIZE(gcc_mdm9615_hws); i++) {
+ ret = devm_clk_hw_register(dev, gcc_mdm9615_hws[i]);
+ if (ret)
+ return ret;
+ }
+
+ return qcom_cc_really_probe(pdev, &gcc_mdm9615_desc, regmap);
+}
+
+static struct platform_driver gcc_mdm9615_driver = {
+ .probe = gcc_mdm9615_probe,
+ .driver = {
+ .name = "gcc-mdm9615",
+ .of_match_table = gcc_mdm9615_match_table,
+ },
+};
+
+static int __init gcc_mdm9615_init(void)
+{
+ return platform_driver_register(&gcc_mdm9615_driver);
+}
+core_initcall(gcc_mdm9615_init);
+
+static void __exit gcc_mdm9615_exit(void)
+{
+ platform_driver_unregister(&gcc_mdm9615_driver);
+}
+module_exit(gcc_mdm9615_exit);
+
+MODULE_DESCRIPTION("QCOM GCC MDM9615 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-mdm9615");
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index bbf732bbc3fd..fe03e6fbc7df 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -2592,9 +2592,9 @@ static struct clk_branch gcc_pcie_2_aux_clk = {
};
static struct clk_branch gcc_pcie_2_pipe_clk = {
- .halt_reg = 0x6e108,
+ .halt_reg = 0x6e018,
.clkr = {
- .enable_reg = 0x6e108,
+ .enable_reg = 0x6e018,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_2_pipe_clk",
@@ -3329,6 +3329,8 @@ static const struct qcom_reset_map gcc_msm8996_resets[] = {
[GCC_USB_20_BCR] = { 0x12000 },
[GCC_QUSB2PHY_PRIM_BCR] = { 0x12038 },
[GCC_QUSB2PHY_SEC_BCR] = { 0x1203c },
+ [GCC_USB3_PHY_BCR] = { 0x50020 },
+ [GCC_USB3PHY_PHY_BCR] = { 0x50024 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
[GCC_SDCC1_BCR] = { 0x13000 },
[GCC_SDCC2_BCR] = { 0x14000 },
@@ -3404,6 +3406,8 @@ static const struct qcom_reset_map gcc_msm8996_resets[] = {
[GCC_PCIE_2_BCR] = { 0x6e000 },
[GCC_PCIE_2_PHY_BCR] = { 0x6e038 },
[GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f014 },
+ [GCC_PCIE_PHY_COM_NOCSR_BCR] = { 0x6f00c },
[GCC_DCD_BCR] = { 0x70000 },
[GCC_OBT_ODT_BCR] = { 0x73000 },
[GCC_UFS_BCR] = { 0x75000 },
@@ -3447,9 +3451,8 @@ MODULE_DEVICE_TABLE(of, gcc_msm8996_match_table);
static int gcc_msm8996_probe(struct platform_device *pdev)
{
- struct clk *clk;
struct device *dev = &pdev->dev;
- int i;
+ int i, ret;
struct regmap *regmap;
regmap = qcom_cc_map(pdev, &gcc_msm8996_desc);
@@ -3463,9 +3466,9 @@ static int gcc_msm8996_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
for (i = 0; i < ARRAY_SIZE(gcc_msm8996_hws); i++) {
- clk = devm_clk_register(dev, gcc_msm8996_hws[i]);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = devm_clk_hw_register(dev, gcc_msm8996_hws[i]);
+ if (ret)
+ return ret;
}
return qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap);
diff --git a/drivers/clk/qcom/lcc-mdm9615.c b/drivers/clk/qcom/lcc-mdm9615.c
new file mode 100644
index 000000000000..3237ef4c1197
--- /dev/null
+++ b/drivers/clk/qcom/lcc-mdm9615.c
@@ -0,0 +1,580 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lcc-mdm9615.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+
+static struct clk_pll pll4 = {
+ .l_reg = 0x4,
+ .m_reg = 0x8,
+ .n_reg = 0xc,
+ .config_reg = 0x14,
+ .mode_reg = 0x0,
+ .status_reg = 0x18,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll4",
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+enum {
+ P_CXO,
+ P_PLL4,
+};
+
+static const struct parent_map lcc_cxo_pll4_map[] = {
+ { P_CXO, 0 },
+ { P_PLL4, 2 }
+};
+
+static const char * const lcc_cxo_pll4[] = {
+ "cxo",
+ "pll4_vote",
+};
+
+static struct freq_tbl clk_tbl_aif_osr_492[] = {
+ { 512000, P_PLL4, 4, 1, 240 },
+ { 768000, P_PLL4, 4, 1, 160 },
+ { 1024000, P_PLL4, 4, 1, 120 },
+ { 1536000, P_PLL4, 4, 1, 80 },
+ { 2048000, P_PLL4, 4, 1, 60 },
+ { 3072000, P_PLL4, 4, 1, 40 },
+ { 4096000, P_PLL4, 4, 1, 30 },
+ { 6144000, P_PLL4, 4, 1, 20 },
+ { 8192000, P_PLL4, 4, 1, 15 },
+ { 12288000, P_PLL4, 4, 1, 10 },
+ { 24576000, P_PLL4, 4, 1, 5 },
+ { 27000000, P_CXO, 1, 0, 0 },
+ { }
+};
+
+static struct freq_tbl clk_tbl_aif_osr_393[] = {
+ { 512000, P_PLL4, 4, 1, 192 },
+ { 768000, P_PLL4, 4, 1, 128 },
+ { 1024000, P_PLL4, 4, 1, 96 },
+ { 1536000, P_PLL4, 4, 1, 64 },
+ { 2048000, P_PLL4, 4, 1, 48 },
+ { 3072000, P_PLL4, 4, 1, 32 },
+ { 4096000, P_PLL4, 4, 1, 24 },
+ { 6144000, P_PLL4, 4, 1, 16 },
+ { 8192000, P_PLL4, 4, 1, 12 },
+ { 12288000, P_PLL4, 4, 1, 8 },
+ { 24576000, P_PLL4, 4, 1, 4 },
+ { 27000000, P_CXO, 1, 0, 0 },
+ { }
+};
+
+static struct clk_rcg mi2s_osr_src = {
+ .ns_reg = 0x48,
+ .md_reg = 0x4c,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = lcc_cxo_pll4_map,
+ },
+ .freq_tbl = clk_tbl_aif_osr_393,
+ .clkr = {
+ .enable_reg = 0x48,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "mi2s_osr_src",
+ .parent_names = lcc_cxo_pll4,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ },
+};
+
+static const char * const lcc_mi2s_parents[] = {
+ "mi2s_osr_src",
+};
+
+static struct clk_branch mi2s_osr_clk = {
+ .halt_reg = 0x50,
+ .halt_bit = 1,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x48,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "mi2s_osr_clk",
+ .parent_names = lcc_mi2s_parents,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap_div mi2s_div_clk = {
+ .reg = 0x48,
+ .shift = 10,
+ .width = 4,
+ .clkr = {
+ .enable_reg = 0x48,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "mi2s_div_clk",
+ .parent_names = lcc_mi2s_parents,
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_branch mi2s_bit_div_clk = {
+ .halt_reg = 0x50,
+ .halt_bit = 0,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x48,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "mi2s_bit_div_clk",
+ .parent_names = (const char *[]){ "mi2s_div_clk" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap_mux mi2s_bit_clk = {
+ .reg = 0x48,
+ .shift = 14,
+ .width = 1,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mi2s_bit_clk",
+ .parent_names = (const char *[]){
+ "mi2s_bit_div_clk",
+ "mi2s_codec_clk",
+ },
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr) \
+static struct clk_rcg prefix##_osr_src = { \
+ .ns_reg = _ns, \
+ .md_reg = _md, \
+ .mn = { \
+ .mnctr_en_bit = 8, \
+ .mnctr_reset_bit = 7, \
+ .mnctr_mode_shift = 5, \
+ .n_val_shift = 24, \
+ .m_val_shift = 8, \
+ .width = 8, \
+ }, \
+ .p = { \
+ .pre_div_shift = 3, \
+ .pre_div_width = 2, \
+ }, \
+ .s = { \
+ .src_sel_shift = 0, \
+ .parent_map = lcc_cxo_pll4_map, \
+ }, \
+ .freq_tbl = clk_tbl_aif_osr_393, \
+ .clkr = { \
+ .enable_reg = _ns, \
+ .enable_mask = BIT(9), \
+ .hw.init = &(struct clk_init_data){ \
+ .name = #prefix "_osr_src", \
+ .parent_names = lcc_cxo_pll4, \
+ .num_parents = 2, \
+ .ops = &clk_rcg_ops, \
+ .flags = CLK_SET_RATE_GATE, \
+ }, \
+ }, \
+}; \
+ \
+static const char * const lcc_##prefix##_parents[] = { \
+ #prefix "_osr_src", \
+}; \
+ \
+static struct clk_branch prefix##_osr_clk = { \
+ .halt_reg = hr, \
+ .halt_bit = 1, \
+ .halt_check = BRANCH_HALT_ENABLE, \
+ .clkr = { \
+ .enable_reg = _ns, \
+ .enable_mask = BIT(21), \
+ .hw.init = &(struct clk_init_data){ \
+ .name = #prefix "_osr_clk", \
+ .parent_names = lcc_##prefix##_parents, \
+ .num_parents = 1, \
+ .ops = &clk_branch_ops, \
+ .flags = CLK_SET_RATE_PARENT, \
+ }, \
+ }, \
+}; \
+ \
+static struct clk_regmap_div prefix##_div_clk = { \
+ .reg = _ns, \
+ .shift = 10, \
+ .width = 8, \
+ .clkr = { \
+ .hw.init = &(struct clk_init_data){ \
+ .name = #prefix "_div_clk", \
+ .parent_names = lcc_##prefix##_parents, \
+ .num_parents = 1, \
+ .ops = &clk_regmap_div_ops, \
+ }, \
+ }, \
+}; \
+ \
+static struct clk_branch prefix##_bit_div_clk = { \
+ .halt_reg = hr, \
+ .halt_bit = 0, \
+ .halt_check = BRANCH_HALT_ENABLE, \
+ .clkr = { \
+ .enable_reg = _ns, \
+ .enable_mask = BIT(19), \
+ .hw.init = &(struct clk_init_data){ \
+ .name = #prefix "_bit_div_clk", \
+ .parent_names = (const char *[]){ \
+ #prefix "_div_clk" \
+ }, \
+ .num_parents = 1, \
+ .ops = &clk_branch_ops, \
+ .flags = CLK_SET_RATE_PARENT, \
+ }, \
+ }, \
+}; \
+ \
+static struct clk_regmap_mux prefix##_bit_clk = { \
+ .reg = _ns, \
+ .shift = 18, \
+ .width = 1, \
+ .clkr = { \
+ .hw.init = &(struct clk_init_data){ \
+ .name = #prefix "_bit_clk", \
+ .parent_names = (const char *[]){ \
+ #prefix "_bit_div_clk", \
+ #prefix "_codec_clk", \
+ }, \
+ .num_parents = 2, \
+ .ops = &clk_regmap_mux_closest_ops, \
+ .flags = CLK_SET_RATE_PARENT, \
+ }, \
+ }, \
+}
+
+CLK_AIF_OSR_DIV(codec_i2s_mic, 0x60, 0x64, 0x68);
+CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80);
+CLK_AIF_OSR_DIV(codec_i2s_spkr, 0x6c, 0x70, 0x74);
+CLK_AIF_OSR_DIV(spare_i2s_spkr, 0x84, 0x88, 0x8c);
+
+static struct freq_tbl clk_tbl_pcm_492[] = {
+ { 256000, P_PLL4, 4, 1, 480 },
+ { 512000, P_PLL4, 4, 1, 240 },
+ { 768000, P_PLL4, 4, 1, 160 },
+ { 1024000, P_PLL4, 4, 1, 120 },
+ { 1536000, P_PLL4, 4, 1, 80 },
+ { 2048000, P_PLL4, 4, 1, 60 },
+ { 3072000, P_PLL4, 4, 1, 40 },
+ { 4096000, P_PLL4, 4, 1, 30 },
+ { 6144000, P_PLL4, 4, 1, 20 },
+ { 8192000, P_PLL4, 4, 1, 15 },
+ { 12288000, P_PLL4, 4, 1, 10 },
+ { 24576000, P_PLL4, 4, 1, 5 },
+ { 27000000, P_CXO, 1, 0, 0 },
+ { }
+};
+
+static struct freq_tbl clk_tbl_pcm_393[] = {
+ { 256000, P_PLL4, 4, 1, 384 },
+ { 512000, P_PLL4, 4, 1, 192 },
+ { 768000, P_PLL4, 4, 1, 128 },
+ { 1024000, P_PLL4, 4, 1, 96 },
+ { 1536000, P_PLL4, 4, 1, 64 },
+ { 2048000, P_PLL4, 4, 1, 48 },
+ { 3072000, P_PLL4, 4, 1, 32 },
+ { 4096000, P_PLL4, 4, 1, 24 },
+ { 6144000, P_PLL4, 4, 1, 16 },
+ { 8192000, P_PLL4, 4, 1, 12 },
+ { 12288000, P_PLL4, 4, 1, 8 },
+ { 24576000, P_PLL4, 4, 1, 4 },
+ { 27000000, P_CXO, 1, 0, 0 },
+ { }
+};
+
+static struct clk_rcg pcm_src = {
+ .ns_reg = 0x54,
+ .md_reg = 0x58,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = lcc_cxo_pll4_map,
+ },
+ .freq_tbl = clk_tbl_pcm_393,
+ .clkr = {
+ .enable_reg = 0x54,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "pcm_src",
+ .parent_names = lcc_cxo_pll4,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ },
+};
+
+static struct clk_branch pcm_clk_out = {
+ .halt_reg = 0x5c,
+ .halt_bit = 0,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0x54,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "pcm_clk_out",
+ .parent_names = (const char *[]){ "pcm_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap_mux pcm_clk = {
+ .reg = 0x54,
+ .shift = 10,
+ .width = 1,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "pcm_clk",
+ .parent_names = (const char *[]){
+ "pcm_clk_out",
+ "pcm_codec_clk",
+ },
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg slimbus_src = {
+ .ns_reg = 0xcc,
+ .md_reg = 0xd0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = lcc_cxo_pll4_map,
+ },
+ .freq_tbl = clk_tbl_aif_osr_393,
+ .clkr = {
+ .enable_reg = 0xcc,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "slimbus_src",
+ .parent_names = lcc_cxo_pll4,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ },
+};
+
+static const char * const lcc_slimbus_parents[] = {
+ "slimbus_src",
+};
+
+static struct clk_branch audio_slimbus_clk = {
+ .halt_reg = 0xd4,
+ .halt_bit = 0,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0xcc,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "audio_slimbus_clk",
+ .parent_names = lcc_slimbus_parents,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch sps_slimbus_clk = {
+ .halt_reg = 0xd4,
+ .halt_bit = 1,
+ .halt_check = BRANCH_HALT_ENABLE,
+ .clkr = {
+ .enable_reg = 0xcc,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "sps_slimbus_clk",
+ .parent_names = lcc_slimbus_parents,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap *lcc_mdm9615_clks[] = {
+ [PLL4] = &pll4.clkr,
+ [MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
+ [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
+ [MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
+ [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
+ [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
+ [PCM_SRC] = &pcm_src.clkr,
+ [PCM_CLK_OUT] = &pcm_clk_out.clkr,
+ [PCM_CLK] = &pcm_clk.clkr,
+ [SLIMBUS_SRC] = &slimbus_src.clkr,
+ [AUDIO_SLIMBUS_CLK] = &audio_slimbus_clk.clkr,
+ [SPS_SLIMBUS_CLK] = &sps_slimbus_clk.clkr,
+ [CODEC_I2S_MIC_OSR_SRC] = &codec_i2s_mic_osr_src.clkr,
+ [CODEC_I2S_MIC_OSR_CLK] = &codec_i2s_mic_osr_clk.clkr,
+ [CODEC_I2S_MIC_DIV_CLK] = &codec_i2s_mic_div_clk.clkr,
+ [CODEC_I2S_MIC_BIT_DIV_CLK] = &codec_i2s_mic_bit_div_clk.clkr,
+ [CODEC_I2S_MIC_BIT_CLK] = &codec_i2s_mic_bit_clk.clkr,
+ [SPARE_I2S_MIC_OSR_SRC] = &spare_i2s_mic_osr_src.clkr,
+ [SPARE_I2S_MIC_OSR_CLK] = &spare_i2s_mic_osr_clk.clkr,
+ [SPARE_I2S_MIC_DIV_CLK] = &spare_i2s_mic_div_clk.clkr,
+ [SPARE_I2S_MIC_BIT_DIV_CLK] = &spare_i2s_mic_bit_div_clk.clkr,
+ [SPARE_I2S_MIC_BIT_CLK] = &spare_i2s_mic_bit_clk.clkr,
+ [CODEC_I2S_SPKR_OSR_SRC] = &codec_i2s_spkr_osr_src.clkr,
+ [CODEC_I2S_SPKR_OSR_CLK] = &codec_i2s_spkr_osr_clk.clkr,
+ [CODEC_I2S_SPKR_DIV_CLK] = &codec_i2s_spkr_div_clk.clkr,
+ [CODEC_I2S_SPKR_BIT_DIV_CLK] = &codec_i2s_spkr_bit_div_clk.clkr,
+ [CODEC_I2S_SPKR_BIT_CLK] = &codec_i2s_spkr_bit_clk.clkr,
+ [SPARE_I2S_SPKR_OSR_SRC] = &spare_i2s_spkr_osr_src.clkr,
+ [SPARE_I2S_SPKR_OSR_CLK] = &spare_i2s_spkr_osr_clk.clkr,
+ [SPARE_I2S_SPKR_DIV_CLK] = &spare_i2s_spkr_div_clk.clkr,
+ [SPARE_I2S_SPKR_BIT_DIV_CLK] = &spare_i2s_spkr_bit_div_clk.clkr,
+ [SPARE_I2S_SPKR_BIT_CLK] = &spare_i2s_spkr_bit_clk.clkr,
+};
+
+static const struct regmap_config lcc_mdm9615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xfc,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc lcc_mdm9615_desc = {
+ .config = &lcc_mdm9615_regmap_config,
+ .clks = lcc_mdm9615_clks,
+ .num_clks = ARRAY_SIZE(lcc_mdm9615_clks),
+};
+
+static const struct of_device_id lcc_mdm9615_match_table[] = {
+ { .compatible = "qcom,lcc-mdm9615" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lcc_mdm9615_match_table);
+
+static int lcc_mdm9615_probe(struct platform_device *pdev)
+{
+ u32 val;
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &lcc_mdm9615_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Use the correct frequency plan depending on speed of PLL4 */
+ regmap_read(regmap, 0x4, &val);
+ if (val == 0x12) {
+ slimbus_src.freq_tbl = clk_tbl_aif_osr_492;
+ mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+ codec_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+ spare_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+ codec_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+ spare_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+ pcm_src.freq_tbl = clk_tbl_pcm_492;
+ }
+ /* Enable PLL4 source on the LPASS Primary PLL Mux */
+ regmap_write(regmap, 0xc4, 0x1);
+
+ return qcom_cc_really_probe(pdev, &lcc_mdm9615_desc, regmap);
+}
+
+static struct platform_driver lcc_mdm9615_driver = {
+ .probe = lcc_mdm9615_probe,
+ .driver = {
+ .name = "lcc-mdm9615",
+ .of_match_table = lcc_mdm9615_match_table,
+ },
+};
+module_platform_driver(lcc_mdm9615_driver);
+
+MODULE_DESCRIPTION("QCOM LCC MDM9615 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lcc-mdm9615");
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 847dd9dadeca..ca97e1151797 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -2888,6 +2888,14 @@ static struct clk_hw *mmcc_msm8996_hws[] = {
&gpll0_div.hw,
};
+static struct gdsc mmagic_bimc_gdsc = {
+ .gdscr = 0x529c,
+ .pd = {
+ .name = "mmagic_bimc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct gdsc mmagic_video_gdsc = {
.gdscr = 0x119c,
.gds_hw_ctrl = 0x120c,
@@ -3201,6 +3209,7 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = {
};
static struct gdsc *mmcc_msm8996_gdscs[] = {
+ [MMAGIC_BIMC_GDSC] = &mmagic_bimc_gdsc,
[MMAGIC_VIDEO_GDSC] = &mmagic_video_gdsc,
[MMAGIC_MDSS_GDSC] = &mmagic_mdss_gdsc,
[MMAGIC_CAMSS_GDSC] = &mmagic_camss_gdsc,
@@ -3305,9 +3314,8 @@ MODULE_DEVICE_TABLE(of, mmcc_msm8996_match_table);
static int mmcc_msm8996_probe(struct platform_device *pdev)
{
- struct clk *clk;
struct device *dev = &pdev->dev;
- int i;
+ int i, ret;
struct regmap *regmap;
regmap = qcom_cc_map(pdev, &mmcc_msm8996_desc);
@@ -3320,9 +3328,9 @@ static int mmcc_msm8996_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x5054, BIT(15), 0);
for (i = 0; i < ARRAY_SIZE(mmcc_msm8996_hws); i++) {
- clk = devm_clk_register(dev, mmcc_msm8996_hws[i]);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = devm_clk_hw_register(dev, mmcc_msm8996_hws[i]);
+ if (ret)
+ return ret;
}
return qcom_cc_really_probe(pdev, &mmcc_msm8996_desc, regmap);
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 5093a250650d..9375777776d9 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -167,7 +167,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
unsigned int i;
group = kzalloc(sizeof(*group), GFP_KERNEL);
- clks = kmalloc(MSTP_MAX_CLOCKS * sizeof(*clks), GFP_KERNEL);
+ clks = kmalloc_array(MSTP_MAX_CLOCKS, sizeof(*clks), GFP_KERNEL);
if (group == NULL || clks == NULL) {
kfree(group);
kfree(clks);
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index f6312c62f16b..5adb934326d1 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -25,10 +25,31 @@ struct rz_cpg {
#define CPG_FRQCR 0x10
#define CPG_FRQCR2 0x14
+#define PPR0 0xFCFE3200
+#define PIBC0 0xFCFE7000
+
+#define MD_CLK(x) ((x >> 2) & 1) /* P0_2 */
+
/* -----------------------------------------------------------------------------
* Initialization
*/
+static u16 __init rz_cpg_read_mode_pins(void)
+{
+ void __iomem *ppr0, *pibc0;
+ u16 modes;
+
+ ppr0 = ioremap_nocache(PPR0, 2);
+ pibc0 = ioremap_nocache(PIBC0, 2);
+ BUG_ON(!ppr0 || !pibc0);
+ iowrite16(4, pibc0); /* enable input buffer */
+ modes = ioread16(ppr0);
+ iounmap(ppr0);
+ iounmap(pibc0);
+
+ return modes;
+}
+
static struct clk * __init
rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *name)
{
@@ -37,8 +58,7 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
static const unsigned frqcr_tab[4] = { 3, 2, 0, 1 };
if (strcmp(name, "pll") == 0) {
- /* FIXME: cpg_mode should be read from GPIO. But no GPIO support yet */
- unsigned cpg_mode = 0; /* hardcoded to EXTAL for now */
+ unsigned int cpg_mode = MD_CLK(rz_cpg_read_mode_pins());
const char *parent_name = of_clk_get_parent_name(np, cpg_mode);
mult = cpg_mode ? (32 / 4) : 30;
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index e38bf60c0ff4..f255e451e8ca 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -123,6 +123,10 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S3D1),
DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S3D1),
+ DEF_MOD("cmt3", 300, R8A7795_CLK_R),
+ DEF_MOD("cmt2", 301, R8A7795_CLK_R),
+ DEF_MOD("cmt1", 302, R8A7795_CLK_R),
+ DEF_MOD("cmt0", 303, R8A7795_CLK_R),
DEF_MOD("scif2", 310, R8A7795_CLK_S3D4),
DEF_MOD("sdif3", 311, R8A7795_CLK_SD3),
DEF_MOD("sdif2", 312, R8A7795_CLK_SD2),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index c84b549c14d2..eb347ed265f2 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -45,6 +45,7 @@ enum clk_ids {
CLK_S3,
CLK_SDSRC,
CLK_SSPSRC,
+ CLK_RINT,
/* Module Clocks */
MOD_CLK_BASE
@@ -69,6 +70,7 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
/* Core Clock Outputs */
DEF_FIXED("ztr", R8A7796_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
@@ -92,13 +94,42 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A7796_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A7796_CLK_S3D4, CLK_S3, 4, 1),
+ DEF_GEN3_SD("sd0", R8A7796_CLK_SD0, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SD("sd1", R8A7796_CLK_SD1, CLK_SDSRC, 0x0078),
+ DEF_GEN3_SD("sd2", R8A7796_CLK_SD2, CLK_SDSRC, 0x0268),
+ DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, CLK_SDSRC, 0x026c),
+
DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1),
+
+ DEF_DIV6_RO("osc", R8A7796_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
+ DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
+
+ DEF_BASE("r", R8A7796_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
};
static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
+ DEF_MOD("cmt3", 300, R8A7796_CLK_R),
+ DEF_MOD("cmt2", 301, R8A7796_CLK_R),
+ DEF_MOD("cmt1", 302, R8A7796_CLK_R),
+ DEF_MOD("cmt0", 303, R8A7796_CLK_R),
DEF_MOD("scif2", 310, R8A7796_CLK_S3D4),
+ DEF_MOD("sdif3", 311, R8A7796_CLK_SD3),
+ DEF_MOD("sdif2", 312, R8A7796_CLK_SD2),
+ DEF_MOD("sdif1", 313, R8A7796_CLK_SD1),
+ DEF_MOD("sdif0", 314, R8A7796_CLK_SD0),
+ DEF_MOD("rwdt0", 402, R8A7796_CLK_R),
DEF_MOD("intc-ap", 408, R8A7796_CLK_S3D1),
+ DEF_MOD("thermal", 522, R8A7796_CLK_CP),
+ DEF_MOD("etheravb", 812, R8A7796_CLK_S0D6),
+ DEF_MOD("gpio7", 905, R8A7796_CLK_S3D4),
+ DEF_MOD("gpio6", 906, R8A7796_CLK_S3D4),
+ DEF_MOD("gpio5", 907, R8A7796_CLK_S3D4),
+ DEF_MOD("gpio4", 908, R8A7796_CLK_S3D4),
+ DEF_MOD("gpio3", 909, R8A7796_CLK_S3D4),
+ DEF_MOD("gpio2", 910, R8A7796_CLK_S3D4),
+ DEF_MOD("gpio1", 911, R8A7796_CLK_S3D4),
+ DEF_MOD("gpio0", 912, R8A7796_CLK_S3D4),
};
static const unsigned int r8a7796_crit_mod_clks[] __initconst = {
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index f47a2fa962d2..b5f2c8ed12e1 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -8,6 +8,7 @@ obj-y += clk-pll.o
obj-y += clk-cpu.o
obj-y += clk-inverter.o
obj-y += clk-mmc-phase.o
+obj-y += clk-ddr.o
obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-y += clk-rk3036.o
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
new file mode 100644
index 000000000000..8feba93672c5
--- /dev/null
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
+ * Author: Lin Huang <hl@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <soc/rockchip/rockchip_sip.h>
+#include "clk.h"
+
+struct rockchip_ddrclk {
+ struct clk_hw hw;
+ void __iomem *reg_base;
+ int mux_offset;
+ int mux_shift;
+ int mux_width;
+ int div_shift;
+ int div_width;
+ int ddr_flag;
+ spinlock_t *lock;
+};
+
+#define to_rockchip_ddrclk_hw(hw) container_of(hw, struct rockchip_ddrclk, hw)
+
+static int rockchip_ddrclk_sip_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw);
+ unsigned long flags;
+ struct arm_smccc_res res;
+
+ spin_lock_irqsave(ddrclk->lock, flags);
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, drate, 0,
+ ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE,
+ 0, 0, 0, 0, &res);
+ spin_unlock_irqrestore(ddrclk->lock, flags);
+
+ return res.a0;
+}
+
+static unsigned long
+rockchip_ddrclk_sip_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
+ ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE,
+ 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *prate)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, rate, 0,
+ ROCKCHIP_SIP_CONFIG_DRAM_ROUND_RATE,
+ 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
+{
+ struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ u32 val;
+
+ val = clk_readl(ddrclk->reg_base +
+ ddrclk->mux_offset) >> ddrclk->mux_shift;
+ val &= GENMASK(ddrclk->mux_width - 1, 0);
+
+ if (val >= num_parents)
+ return -EINVAL;
+
+ return val;
+}
+
+static const struct clk_ops rockchip_ddrclk_sip_ops = {
+ .recalc_rate = rockchip_ddrclk_sip_recalc_rate,
+ .set_rate = rockchip_ddrclk_sip_set_rate,
+ .round_rate = rockchip_ddrclk_sip_round_rate,
+ .get_parent = rockchip_ddrclk_get_parent,
+};
+
+struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
+ const char *const *parent_names,
+ u8 num_parents, int mux_offset,
+ int mux_shift, int mux_width,
+ int div_shift, int div_width,
+ int ddr_flag, void __iomem *reg_base,
+ spinlock_t *lock)
+{
+ struct rockchip_ddrclk *ddrclk;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ ddrclk = kzalloc(sizeof(*ddrclk), GFP_KERNEL);
+ if (!ddrclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ init.flags = flags;
+ init.flags |= CLK_SET_RATE_NO_REPARENT;
+
+ switch (ddr_flag) {
+ case ROCKCHIP_DDRCLK_SIP:
+ init.ops = &rockchip_ddrclk_sip_ops;
+ break;
+ default:
+ pr_err("%s: unsupported ddrclk type %d\n", __func__, ddr_flag);
+ kfree(ddrclk);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ddrclk->reg_base = reg_base;
+ ddrclk->lock = lock;
+ ddrclk->hw.init = &init;
+ ddrclk->mux_offset = mux_offset;
+ ddrclk->mux_shift = mux_shift;
+ ddrclk->mux_width = mux_width;
+ ddrclk->div_shift = div_shift;
+ ddrclk->div_width = div_width;
+ ddrclk->ddr_flag = ddr_flag;
+
+ clk = clk_register(NULL, &ddrclk->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: could not register ddrclk %s\n", __func__, name);
+ kfree(ddrclk);
+ return NULL;
+ }
+
+ return clk;
+}
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index db81e454166b..9c1373e81683 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -837,7 +837,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
u8 num_parents, int con_offset, int grf_lock_offset,
int lock_shift, int mode_offset, int mode_shift,
struct rockchip_pll_rate_table *rate_table,
- u8 clk_pll_flags)
+ unsigned long flags, u8 clk_pll_flags)
{
const char *pll_parents[3];
struct clk_init_data init;
@@ -892,7 +892,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
init.name = pll_name;
/* keep all plls untouched for now */
- init.flags = CLK_IGNORE_UNUSED;
+ init.flags = flags | CLK_IGNORE_UNUSED;
init.parent_names = &parent_names[0];
init.num_parents = 1;
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index cdfabeb9a034..8387c7a40bda 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -100,8 +100,10 @@ static struct rockchip_pll_rate_table rk3399_pll_rates[] = {
RK3036_PLL_RATE( 297000000, 1, 99, 4, 2, 1, 0),
RK3036_PLL_RATE( 216000000, 1, 72, 4, 2, 1, 0),
RK3036_PLL_RATE( 148500000, 1, 99, 4, 4, 1, 0),
+ RK3036_PLL_RATE( 106500000, 1, 71, 4, 4, 1, 0),
RK3036_PLL_RATE( 96000000, 1, 64, 4, 4, 1, 0),
RK3036_PLL_RATE( 74250000, 2, 99, 4, 4, 1, 0),
+ RK3036_PLL_RATE( 65000000, 1, 65, 6, 4, 1, 0),
RK3036_PLL_RATE( 54000000, 1, 54, 6, 4, 1, 0),
RK3036_PLL_RATE( 27000000, 1, 27, 6, 4, 1, 0),
{ /* sentinel */ },
@@ -118,6 +120,10 @@ PNAME(mux_armclkb_p) = { "clk_core_b_lpll_src",
"clk_core_b_bpll_src",
"clk_core_b_dpll_src",
"clk_core_b_gpll_src" };
+PNAME(mux_ddrclk_p) = { "clk_ddrc_lpll_src",
+ "clk_ddrc_bpll_src",
+ "clk_ddrc_dpll_src",
+ "clk_ddrc_gpll_src" };
PNAME(mux_aclk_cci_p) = { "cpll_aclk_cci_src",
"gpll_aclk_cci_src",
"npll_aclk_cci_src",
@@ -373,6 +379,7 @@ static struct rockchip_cpuclk_rate_table rk3399_cpuclkb_rates[] __initdata = {
RK3399_CPUCLKB_RATE(2184000000, 1, 11, 11),
RK3399_CPUCLKB_RATE(2088000000, 1, 10, 10),
RK3399_CPUCLKB_RATE(2040000000, 1, 10, 10),
+ RK3399_CPUCLKB_RATE(2016000000, 1, 9, 9),
RK3399_CPUCLKB_RATE(1992000000, 1, 9, 9),
RK3399_CPUCLKB_RATE(1896000000, 1, 9, 9),
RK3399_CPUCLKB_RATE(1800000000, 1, 8, 8),
@@ -578,7 +585,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE(0, "clk_spdif_div", mux_pll_src_cpll_gpll_p, 0,
RK3399_CLKSEL_CON(32), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3399_CLKGATE_CON(8), 13, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_spdif_frac", "clk_spdif_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "clk_spdif_frac", "clk_spdif_div", 0,
RK3399_CLKSEL_CON(99), 0,
RK3399_CLKGATE_CON(8), 14, GFLAGS,
&rk3399_spdif_fracmux),
@@ -592,7 +599,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE(0, "clk_i2s0_div", mux_pll_src_cpll_gpll_p, 0,
RK3399_CLKSEL_CON(28), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3399_CLKGATE_CON(8), 3, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_i2s0_frac", "clk_i2s0_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "clk_i2s0_frac", "clk_i2s0_div", 0,
RK3399_CLKSEL_CON(96), 0,
RK3399_CLKGATE_CON(8), 4, GFLAGS,
&rk3399_i2s0_fracmux),
@@ -602,7 +609,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE(0, "clk_i2s1_div", mux_pll_src_cpll_gpll_p, 0,
RK3399_CLKSEL_CON(29), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3399_CLKGATE_CON(8), 6, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_i2s1_frac", "clk_i2s1_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "clk_i2s1_frac", "clk_i2s1_div", 0,
RK3399_CLKSEL_CON(97), 0,
RK3399_CLKGATE_CON(8), 7, GFLAGS,
&rk3399_i2s1_fracmux),
@@ -612,7 +619,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE(0, "clk_i2s2_div", mux_pll_src_cpll_gpll_p, 0,
RK3399_CLKSEL_CON(30), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3399_CLKGATE_CON(8), 9, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_i2s2_frac", "clk_i2s2_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "clk_i2s2_frac", "clk_i2s2_div", 0,
RK3399_CLKSEL_CON(98), 0,
RK3399_CLKGATE_CON(8), 10, GFLAGS,
&rk3399_i2s2_fracmux),
@@ -631,7 +638,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "clk_uart0_div", "clk_uart0_src", 0,
RK3399_CLKSEL_CON(33), 0, 7, DFLAGS,
RK3399_CLKGATE_CON(9), 0, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_uart0_frac", "clk_uart0_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "clk_uart0_frac", "clk_uart0_div", 0,
RK3399_CLKSEL_CON(100), 0,
RK3399_CLKGATE_CON(9), 1, GFLAGS,
&rk3399_uart0_fracmux),
@@ -641,7 +648,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "clk_uart1_div", "clk_uart_src", 0,
RK3399_CLKSEL_CON(34), 0, 7, DFLAGS,
RK3399_CLKGATE_CON(9), 2, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_uart1_frac", "clk_uart1_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "clk_uart1_frac", "clk_uart1_div", 0,
RK3399_CLKSEL_CON(101), 0,
RK3399_CLKGATE_CON(9), 3, GFLAGS,
&rk3399_uart1_fracmux),
@@ -649,7 +656,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "clk_uart2_div", "clk_uart_src", 0,
RK3399_CLKSEL_CON(35), 0, 7, DFLAGS,
RK3399_CLKGATE_CON(9), 4, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_uart2_frac", "clk_uart2_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "clk_uart2_frac", "clk_uart2_div", 0,
RK3399_CLKSEL_CON(102), 0,
RK3399_CLKGATE_CON(9), 5, GFLAGS,
&rk3399_uart2_fracmux),
@@ -657,7 +664,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "clk_uart3_div", "clk_uart_src", 0,
RK3399_CLKSEL_CON(36), 0, 7, DFLAGS,
RK3399_CLKGATE_CON(9), 6, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_uart3_frac", "clk_uart3_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "clk_uart3_frac", "clk_uart3_div", 0,
RK3399_CLKSEL_CON(103), 0,
RK3399_CLKGATE_CON(9), 7, GFLAGS,
&rk3399_uart3_fracmux),
@@ -846,9 +853,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKSEL_CON(14), 12, 2, DFLAGS,
RK3399_CLKGATE_CON(5), 4, GFLAGS),
- GATE(ACLK_PERF_PCIE, "aclk_perf_pcie", "aclk_perihp", CLK_IGNORE_UNUSED,
+ GATE(ACLK_PERF_PCIE, "aclk_perf_pcie", "aclk_perihp", 0,
RK3399_CLKGATE_CON(20), 2, GFLAGS),
- GATE(ACLK_PCIE, "aclk_pcie", "aclk_perihp", CLK_IGNORE_UNUSED,
+ GATE(ACLK_PCIE, "aclk_pcie", "aclk_perihp", 0,
RK3399_CLKGATE_CON(20), 10, GFLAGS),
GATE(0, "aclk_perihp_noc", "aclk_perihp", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(20), 12, GFLAGS),
@@ -1161,7 +1168,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKSEL_CON(49), 8, 2, MFLAGS, 0, 8, DFLAGS,
RK3399_CLKGATE_CON(10), 12, GFLAGS),
- COMPOSITE_FRACMUX_NOGATE(0, "dclk_vop0_frac", "dclk_vop0_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX_NOGATE(DCLK_VOP0_FRAC, "dclk_vop0_frac", "dclk_vop0_div", 0,
RK3399_CLKSEL_CON(106), 0,
&rk3399_dclk_vop0_fracmux),
@@ -1191,7 +1198,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKSEL_CON(50), 8, 2, MFLAGS, 0, 8, DFLAGS,
RK3399_CLKGATE_CON(10), 13, GFLAGS),
- COMPOSITE_FRACMUX_NOGATE(0, "dclk_vop1_frac", "dclk_vop1_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX_NOGATE(DCLK_VOP1_FRAC, "dclk_vop1_frac", "dclk_vop1_div", 0,
RK3399_CLKSEL_CON(107), 0,
&rk3399_dclk_vop1_fracmux),
@@ -1305,7 +1312,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
/* testout */
MUX(0, "clk_test_pre", mux_pll_src_cpll_gpll_p, CLK_SET_RATE_PARENT,
RK3399_CLKSEL_CON(58), 7, 1, MFLAGS),
- COMPOSITE_FRAC(0, "clk_test_frac", "clk_test_pre", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRAC(0, "clk_test_frac", "clk_test_pre", 0,
RK3399_CLKSEL_CON(105), 0,
RK3399_CLKGATE_CON(13), 9, GFLAGS),
@@ -1377,6 +1384,18 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "clk_test", "clk_test_pre", CLK_IGNORE_UNUSED,
RK3368_CLKSEL_CON(58), 0, 5, DFLAGS,
RK3368_CLKGATE_CON(13), 11, GFLAGS),
+
+ /* ddrc */
+ GATE(0, "clk_ddrc_lpll_src", "lpll", 0, RK3399_CLKGATE_CON(3),
+ 0, GFLAGS),
+ GATE(0, "clk_ddrc_bpll_src", "bpll", 0, RK3399_CLKGATE_CON(3),
+ 1, GFLAGS),
+ GATE(0, "clk_ddrc_dpll_src", "dpll", 0, RK3399_CLKGATE_CON(3),
+ 2, GFLAGS),
+ GATE(0, "clk_ddrc_gpll_src", "gpll", 0, RK3399_CLKGATE_CON(3),
+ 3, GFLAGS),
+ COMPOSITE_DDRCLK(SCLK_DDRC, "sclk_ddrc", mux_ddrclk_p, 0,
+ RK3399_CLKSEL_CON(6), 4, 2, 0, 0, ROCKCHIP_DDRCLK_SIP),
};
static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = {
@@ -1398,7 +1417,7 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = {
RK3399_PMU_CLKSEL_CON(1), 13, 1, MFLAGS, 8, 5, DFLAGS,
RK3399_PMU_CLKGATE_CON(0), 8, GFLAGS),
- COMPOSITE_FRACMUX_NOGATE(0, "clk_wifi_frac", "clk_wifi_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX_NOGATE(0, "clk_wifi_frac", "clk_wifi_div", 0,
RK3399_PMU_CLKSEL_CON(7), 0,
&rk3399_pmuclk_wifi_fracmux),
@@ -1426,7 +1445,7 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = {
RK3399_PMU_CLKSEL_CON(5), 10, 1, MFLAGS, 0, 7, DFLAGS,
RK3399_PMU_CLKGATE_CON(0), 5, GFLAGS),
- COMPOSITE_FRACMUX(0, "clk_uart4_frac", "clk_uart4_div", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "clk_uart4_frac", "clk_uart4_div", 0,
RK3399_PMU_CLKSEL_CON(6), 0,
RK3399_PMU_CLKGATE_CON(0), 6, GFLAGS,
&rk3399_uart4_pmu_fracmux),
@@ -1468,6 +1487,9 @@ static const char *const rk3399_cru_critical_clocks[] __initconst = {
"aclk_cci_pre",
"aclk_gic",
"aclk_gic_noc",
+ "aclk_hdcp_noc",
+ "hclk_hdcp_noc",
+ "pclk_hdcp_noc",
"pclk_perilp0",
"pclk_perilp0",
"hclk_perilp0",
@@ -1488,6 +1510,10 @@ static const char *const rk3399_cru_critical_clocks[] __initconst = {
"gpll_hclk_perilp1_src",
"gpll_aclk_perilp0_src",
"gpll_aclk_perihp_src",
+ "aclk_vio_noc",
+
+ /* ddrc */
+ "sclk_ddrc"
};
static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
diff --git a/drivers/clk/rockchip/clk-rockchip.c b/drivers/clk/rockchip/clk-rockchip.c
index 4cf838d52ef6..2c9bb81144c9 100644
--- a/drivers/clk/rockchip/clk-rockchip.c
+++ b/drivers/clk/rockchip/clk-rockchip.c
@@ -49,14 +49,19 @@ static void __init rk2928_gate_clk_init(struct device_node *node)
}
reg = of_iomap(node, 0);
+ if (!reg)
+ return;
clk_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
- if (!clk_data)
+ if (!clk_data) {
+ iounmap(reg);
return;
+ }
clk_data->clks = kzalloc(qty * sizeof(struct clk *), GFP_KERNEL);
if (!clk_data->clks) {
kfree(clk_data);
+ iounmap(reg);
return;
}
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 7ffd134995f2..b886be30f34f 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -385,7 +385,7 @@ void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
list->con_offset, grf_lock_offset,
list->lock_shift, list->mode_offset,
list->mode_shift, list->rate_table,
- list->pll_flags);
+ list->flags, list->pll_flags);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n", __func__,
list->name);
@@ -484,6 +484,15 @@ void __init rockchip_clk_register_branches(
list->gate_offset, list->gate_shift,
list->gate_flags, flags, &ctx->lock);
break;
+ case branch_ddrclk:
+ clk = rockchip_clk_register_ddrclk(
+ list->name, list->flags,
+ list->parent_names, list->num_parents,
+ list->muxdiv_offset, list->mux_shift,
+ list->mux_width, list->div_shift,
+ list->div_width, list->div_flags,
+ ctx->reg_base, &ctx->lock);
+ break;
}
/* none of the cases above matched */
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 2194ffa8c9fd..1653edd792a5 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -238,7 +238,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
u8 num_parents, int con_offset, int grf_lock_offset,
int lock_shift, int mode_offset, int mode_shift,
struct rockchip_pll_rate_table *rate_table,
- u8 clk_pll_flags);
+ unsigned long flags, u8 clk_pll_flags);
struct rockchip_cpuclk_clksel {
int reg;
@@ -281,6 +281,20 @@ struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *reg, int shift);
+/*
+ * DDRCLK flags, including method of setting the rate
+ * ROCKCHIP_DDRCLK_SIP: use SIP call to bl31 to change ddrclk rate.
+ */
+#define ROCKCHIP_DDRCLK_SIP BIT(0)
+
+struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
+ const char *const *parent_names,
+ u8 num_parents, int mux_offset,
+ int mux_shift, int mux_width,
+ int div_shift, int div_width,
+ int ddr_flags, void __iomem *reg_base,
+ spinlock_t *lock);
+
#define ROCKCHIP_INVERTER_HIWORD_MASK BIT(0)
struct clk *rockchip_clk_register_inverter(const char *name,
@@ -299,6 +313,7 @@ enum rockchip_clk_branch_type {
branch_mmc,
branch_inverter,
branch_factor,
+ branch_ddrclk,
};
struct rockchip_clk_branch {
@@ -488,6 +503,24 @@ struct rockchip_clk_branch {
.child = ch, \
}
+#define COMPOSITE_DDRCLK(_id, cname, pnames, f, mo, ms, mw, \
+ ds, dw, df) \
+ { \
+ .id = _id, \
+ .branch_type = branch_ddrclk, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .mux_shift = ms, \
+ .mux_width = mw, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = -1, \
+ }
+
#define MUX(_id, cname, pnames, f, o, s, w, mf) \
{ \
.id = _id, \
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index bdf8b971f332..51d152f735cc 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -14,18 +14,13 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/syscore_ops.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/exynos-audss-clk.h>
-enum exynos_audss_clk_type {
- TYPE_EXYNOS4210,
- TYPE_EXYNOS5250,
- TYPE_EXYNOS5420,
-};
-
static DEFINE_SPINLOCK(lock);
static struct clk **clk_table;
static void __iomem *reg_base;
@@ -44,9 +39,9 @@ static struct clk *epll;
#ifdef CONFIG_PM_SLEEP
static unsigned long reg_save[][2] = {
- {ASS_CLK_SRC, 0},
- {ASS_CLK_DIV, 0},
- {ASS_CLK_GATE, 0},
+ { ASS_CLK_SRC, 0 },
+ { ASS_CLK_DIV, 0 },
+ { ASS_CLK_GATE, 0 },
};
static int exynos_audss_clk_suspend(void)
@@ -73,14 +68,43 @@ static struct syscore_ops exynos_audss_clk_syscore_ops = {
};
#endif /* CONFIG_PM_SLEEP */
+struct exynos_audss_clk_drvdata {
+ unsigned int has_adma_clk:1;
+ unsigned int has_mst_clk:1;
+ unsigned int enable_epll:1;
+ unsigned int num_clks;
+};
+
+static const struct exynos_audss_clk_drvdata exynos4210_drvdata = {
+ .num_clks = EXYNOS_AUDSS_MAX_CLKS - 1,
+};
+
+static const struct exynos_audss_clk_drvdata exynos5410_drvdata = {
+ .num_clks = EXYNOS_AUDSS_MAX_CLKS - 1,
+ .has_mst_clk = 1,
+};
+
+static const struct exynos_audss_clk_drvdata exynos5420_drvdata = {
+ .num_clks = EXYNOS_AUDSS_MAX_CLKS,
+ .has_adma_clk = 1,
+ .enable_epll = 1,
+};
+
static const struct of_device_id exynos_audss_clk_of_match[] = {
- { .compatible = "samsung,exynos4210-audss-clock",
- .data = (void *)TYPE_EXYNOS4210, },
- { .compatible = "samsung,exynos5250-audss-clock",
- .data = (void *)TYPE_EXYNOS5250, },
- { .compatible = "samsung,exynos5420-audss-clock",
- .data = (void *)TYPE_EXYNOS5420, },
- {},
+ {
+ .compatible = "samsung,exynos4210-audss-clock",
+ .data = &exynos4210_drvdata,
+ }, {
+ .compatible = "samsung,exynos5250-audss-clock",
+ .data = &exynos4210_drvdata,
+ }, {
+ .compatible = "samsung,exynos5410-audss-clock",
+ .data = &exynos5410_drvdata,
+ }, {
+ .compatible = "samsung,exynos5420-audss-clock",
+ .data = &exynos5420_drvdata,
+ },
+ { },
};
static void exynos_audss_clk_teardown(void)
@@ -106,19 +130,17 @@ static void exynos_audss_clk_teardown(void)
/* register exynos_audss clocks */
static int exynos_audss_clk_probe(struct platform_device *pdev)
{
- int i, ret = 0;
- struct resource *res;
const char *mout_audss_p[] = {"fin_pll", "fout_epll"};
const char *mout_i2s_p[] = {"mout_audss", "cdclk0", "sclk_audio0"};
const char *sclk_pcm_p = "sclk_pcm0";
struct clk *pll_ref, *pll_in, *cdclk, *sclk_audio, *sclk_pcm_in;
- const struct of_device_id *match;
- enum exynos_audss_clk_type variant;
+ const struct exynos_audss_clk_drvdata *variant;
+ struct resource *res;
+ int i, ret = 0;
- match = of_match_node(exynos_audss_clk_of_match, pdev->dev.of_node);
- if (!match)
+ variant = of_device_get_match_data(&pdev->dev);
+ if (!variant)
return -EINVAL;
- variant = (enum exynos_audss_clk_type)match->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg_base = devm_ioremap_resource(&pdev->dev, res);
@@ -126,7 +148,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to map audss registers\n");
return PTR_ERR(reg_base);
}
- /* EPLL don't have to be enabled for boards other than Exynos5420 */
+
epll = ERR_PTR(-ENODEV);
clk_table = devm_kzalloc(&pdev->dev,
@@ -136,10 +158,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
return -ENOMEM;
clk_data.clks = clk_table;
- if (variant == TYPE_EXYNOS5420)
- clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS;
- else
- clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS - 1;
+ clk_data.clk_num = variant->num_clks;
pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
pll_in = devm_clk_get(&pdev->dev, "pll_in");
@@ -148,13 +167,13 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
if (!IS_ERR(pll_in)) {
mout_audss_p[1] = __clk_get_name(pll_in);
- if (variant == TYPE_EXYNOS5420) {
+ if (variant->enable_epll) {
epll = pll_in;
ret = clk_prepare_enable(epll);
if (ret) {
dev_err(&pdev->dev,
- "failed to prepare the epll clock\n");
+ "failed to prepare the epll clock\n");
return ret;
}
}
@@ -210,7 +229,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
sclk_pcm_p, CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 5, 0, &lock);
- if (variant == TYPE_EXYNOS5420) {
+ if (variant->has_adma_clk) {
clk_table[EXYNOS_ADMA] = clk_register_gate(NULL, "adma",
"dout_srp", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 9, 0, &lock);
@@ -234,9 +253,6 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&exynos_audss_clk_syscore_ops);
#endif
-
- dev_info(&pdev->dev, "setup completed\n");
-
return 0;
unregister:
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index a43642c36039..fd1d9bfc151b 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -131,21 +131,21 @@ static const struct samsung_gate_clock aud_gate_clks[] __initconst = {
EN_IP_AUD, 4, 0, 0),
};
+static const struct samsung_cmu_info aud_cmu __initconst = {
+ .mux_clks = aud_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(aud_mux_clks),
+ .div_clks = aud_div_clks,
+ .nr_div_clks = ARRAY_SIZE(aud_div_clks),
+ .gate_clks = aud_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(aud_gate_clks),
+ .nr_clk_ids = AUD_NR_CLK,
+ .clk_regs = aud_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(aud_clk_regs),
+};
+
static void __init exynos5260_clk_aud_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.mux_clks = aud_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(aud_mux_clks);
- cmu.div_clks = aud_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(aud_div_clks);
- cmu.gate_clks = aud_gate_clks;
- cmu.nr_gate_clks = ARRAY_SIZE(aud_gate_clks);
- cmu.nr_clk_ids = AUD_NR_CLK;
- cmu.clk_regs = aud_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(aud_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &aud_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_aud, "samsung,exynos5260-clock-aud",
@@ -321,21 +321,21 @@ static const struct samsung_gate_clock disp_gate_clks[] __initconst = {
EN_IP_DISP, 25, 0, 0),
};
+static const struct samsung_cmu_info disp_cmu __initconst = {
+ .mux_clks = disp_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(disp_mux_clks),
+ .div_clks = disp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(disp_div_clks),
+ .gate_clks = disp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(disp_gate_clks),
+ .nr_clk_ids = DISP_NR_CLK,
+ .clk_regs = disp_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(disp_clk_regs),
+};
+
static void __init exynos5260_clk_disp_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.mux_clks = disp_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(disp_mux_clks);
- cmu.div_clks = disp_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(disp_div_clks);
- cmu.gate_clks = disp_gate_clks;
- cmu.nr_gate_clks = ARRAY_SIZE(disp_gate_clks);
- cmu.nr_clk_ids = DISP_NR_CLK;
- cmu.clk_regs = disp_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(disp_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &disp_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_disp, "samsung,exynos5260-clock-disp",
@@ -385,21 +385,21 @@ static const struct samsung_pll_clock egl_pll_clks[] __initconst = {
pll2550_24mhz_tbl),
};
+static const struct samsung_cmu_info egl_cmu __initconst = {
+ .pll_clks = egl_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(egl_pll_clks),
+ .mux_clks = egl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(egl_mux_clks),
+ .div_clks = egl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(egl_div_clks),
+ .nr_clk_ids = EGL_NR_CLK,
+ .clk_regs = egl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(egl_clk_regs),
+};
+
static void __init exynos5260_clk_egl_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.pll_clks = egl_pll_clks;
- cmu.nr_pll_clks = ARRAY_SIZE(egl_pll_clks);
- cmu.mux_clks = egl_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(egl_mux_clks);
- cmu.div_clks = egl_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(egl_div_clks);
- cmu.nr_clk_ids = EGL_NR_CLK;
- cmu.clk_regs = egl_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(egl_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &egl_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_egl, "samsung,exynos5260-clock-egl",
@@ -487,19 +487,19 @@ static const struct samsung_gate_clock fsys_gate_clks[] __initconst = {
EN_IP_FSYS_SECURE_SMMU_RTIC, 12, 0, 0),
};
+static const struct samsung_cmu_info fsys_cmu __initconst = {
+ .mux_clks = fsys_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys_mux_clks),
+ .gate_clks = fsys_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys_gate_clks),
+ .nr_clk_ids = FSYS_NR_CLK,
+ .clk_regs = fsys_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys_clk_regs),
+};
+
static void __init exynos5260_clk_fsys_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.mux_clks = fsys_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks);
- cmu.gate_clks = fsys_gate_clks;
- cmu.nr_gate_clks = ARRAY_SIZE(fsys_gate_clks);
- cmu.nr_clk_ids = FSYS_NR_CLK;
- cmu.clk_regs = fsys_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(fsys_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &fsys_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_fsys, "samsung,exynos5260-clock-fsys",
@@ -576,21 +576,21 @@ static const struct samsung_gate_clock g2d_gate_clks[] __initconst = {
EN_IP_G2D_SECURE_SMMU_G2D, 15, 0, 0),
};
+static const struct samsung_cmu_info g2d_cmu __initconst = {
+ .mux_clks = g2d_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(g2d_mux_clks),
+ .div_clks = g2d_div_clks,
+ .nr_div_clks = ARRAY_SIZE(g2d_div_clks),
+ .gate_clks = g2d_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(g2d_gate_clks),
+ .nr_clk_ids = G2D_NR_CLK,
+ .clk_regs = g2d_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(g2d_clk_regs),
+};
+
static void __init exynos5260_clk_g2d_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.mux_clks = g2d_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks);
- cmu.div_clks = g2d_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(g2d_div_clks);
- cmu.gate_clks = g2d_gate_clks;
- cmu.nr_gate_clks = ARRAY_SIZE(g2d_gate_clks);
- cmu.nr_clk_ids = G2D_NR_CLK;
- cmu.clk_regs = g2d_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(g2d_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &g2d_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_g2d, "samsung,exynos5260-clock-g2d",
@@ -637,23 +637,23 @@ static const struct samsung_pll_clock g3d_pll_clks[] __initconst = {
pll2550_24mhz_tbl),
};
+static const struct samsung_cmu_info g3d_cmu __initconst = {
+ .pll_clks = g3d_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(g3d_pll_clks),
+ .mux_clks = g3d_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(g3d_mux_clks),
+ .div_clks = g3d_div_clks,
+ .nr_div_clks = ARRAY_SIZE(g3d_div_clks),
+ .gate_clks = g3d_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(g3d_gate_clks),
+ .nr_clk_ids = G3D_NR_CLK,
+ .clk_regs = g3d_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(g3d_clk_regs),
+};
+
static void __init exynos5260_clk_g3d_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.pll_clks = g3d_pll_clks;
- cmu.nr_pll_clks = ARRAY_SIZE(g3d_pll_clks);
- cmu.mux_clks = g3d_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(g3d_mux_clks);
- cmu.div_clks = g3d_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(g3d_div_clks);
- cmu.gate_clks = g3d_gate_clks;
- cmu.nr_gate_clks = ARRAY_SIZE(g3d_gate_clks);
- cmu.nr_clk_ids = G3D_NR_CLK;
- cmu.clk_regs = g3d_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &g3d_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_g3d, "samsung,exynos5260-clock-g3d",
@@ -772,21 +772,21 @@ static const struct samsung_gate_clock gscl_gate_clks[] __initconst = {
EN_IP_GSCL_SECURE_SMMU_MSCL1, 20, 0, 0),
};
+static const struct samsung_cmu_info gscl_cmu __initconst = {
+ .mux_clks = gscl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(gscl_mux_clks),
+ .div_clks = gscl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(gscl_div_clks),
+ .gate_clks = gscl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(gscl_gate_clks),
+ .nr_clk_ids = GSCL_NR_CLK,
+ .clk_regs = gscl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(gscl_clk_regs),
+};
+
static void __init exynos5260_clk_gscl_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.mux_clks = gscl_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks);
- cmu.div_clks = gscl_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(gscl_div_clks);
- cmu.gate_clks = gscl_gate_clks;
- cmu.nr_gate_clks = ARRAY_SIZE(gscl_gate_clks);
- cmu.nr_clk_ids = GSCL_NR_CLK;
- cmu.clk_regs = gscl_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(gscl_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &gscl_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_gscl, "samsung,exynos5260-clock-gscl",
@@ -891,21 +891,21 @@ static const struct samsung_gate_clock isp_gate_clks[] __initconst = {
EN_SCLK_ISP, 9, CLK_SET_RATE_PARENT, 0),
};
+static const struct samsung_cmu_info isp_cmu __initconst = {
+ .mux_clks = isp_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(isp_mux_clks),
+ .div_clks = isp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(isp_div_clks),
+ .gate_clks = isp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(isp_gate_clks),
+ .nr_clk_ids = ISP_NR_CLK,
+ .clk_regs = isp_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(isp_clk_regs),
+};
+
static void __init exynos5260_clk_isp_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.mux_clks = isp_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(isp_mux_clks);
- cmu.div_clks = isp_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(isp_div_clks);
- cmu.gate_clks = isp_gate_clks;
- cmu.nr_gate_clks = ARRAY_SIZE(isp_gate_clks);
- cmu.nr_clk_ids = ISP_NR_CLK;
- cmu.clk_regs = isp_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(isp_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &isp_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_isp, "samsung,exynos5260-clock-isp",
@@ -955,21 +955,21 @@ static const struct samsung_pll_clock kfc_pll_clks[] __initconst = {
pll2550_24mhz_tbl),
};
+static const struct samsung_cmu_info kfc_cmu __initconst = {
+ .pll_clks = kfc_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(kfc_pll_clks),
+ .mux_clks = kfc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(kfc_mux_clks),
+ .div_clks = kfc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(kfc_div_clks),
+ .nr_clk_ids = KFC_NR_CLK,
+ .clk_regs = kfc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(kfc_clk_regs),
+};
+
static void __init exynos5260_clk_kfc_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.pll_clks = kfc_pll_clks;
- cmu.nr_pll_clks = ARRAY_SIZE(kfc_pll_clks);
- cmu.mux_clks = kfc_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(kfc_mux_clks);
- cmu.div_clks = kfc_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(kfc_div_clks);
- cmu.nr_clk_ids = KFC_NR_CLK;
- cmu.clk_regs = kfc_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(kfc_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &kfc_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_kfc, "samsung,exynos5260-clock-kfc",
@@ -1011,21 +1011,21 @@ static const struct samsung_gate_clock mfc_gate_clks[] __initconst = {
EN_IP_MFC_SECURE_SMMU2_MFC, 7, 0, 0),
};
+static const struct samsung_cmu_info mfc_cmu __initconst = {
+ .mux_clks = mfc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mfc_mux_clks),
+ .div_clks = mfc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mfc_div_clks),
+ .gate_clks = mfc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mfc_gate_clks),
+ .nr_clk_ids = MFC_NR_CLK,
+ .clk_regs = mfc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mfc_clk_regs),
+};
+
static void __init exynos5260_clk_mfc_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.mux_clks = mfc_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks);
- cmu.div_clks = mfc_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(mfc_div_clks);
- cmu.gate_clks = mfc_gate_clks;
- cmu.nr_gate_clks = ARRAY_SIZE(mfc_gate_clks);
- cmu.nr_clk_ids = MFC_NR_CLK;
- cmu.clk_regs = mfc_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &mfc_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_mfc, "samsung,exynos5260-clock-mfc",
@@ -1158,23 +1158,23 @@ static const struct samsung_pll_clock mif_pll_clks[] __initconst = {
pll2550_24mhz_tbl),
};
+static const struct samsung_cmu_info mif_cmu __initconst = {
+ .pll_clks = mif_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(mif_pll_clks),
+ .mux_clks = mif_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mif_mux_clks),
+ .div_clks = mif_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mif_div_clks),
+ .gate_clks = mif_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mif_gate_clks),
+ .nr_clk_ids = MIF_NR_CLK,
+ .clk_regs = mif_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mif_clk_regs),
+};
+
static void __init exynos5260_clk_mif_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.pll_clks = mif_pll_clks;
- cmu.nr_pll_clks = ARRAY_SIZE(mif_pll_clks);
- cmu.mux_clks = mif_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(mif_mux_clks);
- cmu.div_clks = mif_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(mif_div_clks);
- cmu.gate_clks = mif_gate_clks;
- cmu.nr_gate_clks = ARRAY_SIZE(mif_gate_clks);
- cmu.nr_clk_ids = MIF_NR_CLK;
- cmu.clk_regs = mif_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(mif_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &mif_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_mif, "samsung,exynos5260-clock-mif",
@@ -1366,21 +1366,21 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
EN_IP_PERI_SECURE_TZPC, 20, 0, 0),
};
+static const struct samsung_cmu_info peri_cmu __initconst = {
+ .mux_clks = peri_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peri_mux_clks),
+ .div_clks = peri_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peri_div_clks),
+ .gate_clks = peri_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peri_gate_clks),
+ .nr_clk_ids = PERI_NR_CLK,
+ .clk_regs = peri_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peri_clk_regs),
+};
+
static void __init exynos5260_clk_peri_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.mux_clks = peri_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(peri_mux_clks);
- cmu.div_clks = peri_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(peri_div_clks);
- cmu.gate_clks = peri_gate_clks;
- cmu.nr_gate_clks = ARRAY_SIZE(peri_gate_clks);
- cmu.nr_clk_ids = PERI_NR_CLK;
- cmu.clk_regs = peri_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(peri_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &peri_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_peri, "samsung,exynos5260-clock-peri",
@@ -1818,25 +1818,25 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = {
pll2650_24mhz_tbl),
};
+static const struct samsung_cmu_info top_cmu __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .fixed_clks = fixed_rate_clks,
+ .nr_fixed_clks = ARRAY_SIZE(fixed_rate_clks),
+ .nr_clk_ids = TOP_NR_CLK,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
static void __init exynos5260_clk_top_init(struct device_node *np)
{
- struct samsung_cmu_info cmu = { NULL };
-
- cmu.pll_clks = top_pll_clks;
- cmu.nr_pll_clks = ARRAY_SIZE(top_pll_clks);
- cmu.mux_clks = top_mux_clks;
- cmu.nr_mux_clks = ARRAY_SIZE(top_mux_clks);
- cmu.div_clks = top_div_clks;
- cmu.nr_div_clks = ARRAY_SIZE(top_div_clks);
- cmu.gate_clks = top_gate_clks;
- cmu.nr_gate_clks = ARRAY_SIZE(top_gate_clks);
- cmu.fixed_clks = fixed_rate_clks;
- cmu.nr_fixed_clks = ARRAY_SIZE(fixed_rate_clks);
- cmu.nr_clk_ids = TOP_NR_CLK;
- cmu.clk_regs = top_clk_regs;
- cmu.nr_clk_regs = ARRAY_SIZE(top_clk_regs);
-
- samsung_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &top_cmu);
}
CLK_OF_DECLARE(exynos5260_clk_top, "samsung,exynos5260-clock-top",
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index 54ec486a5e45..fc471a49e8f4 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -14,6 +14,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/clk.h>
#include "clk.h"
@@ -21,6 +22,8 @@
#define APLL_CON0 0x100
#define CPLL_LOCK 0x10020
#define CPLL_CON0 0x10120
+#define EPLL_LOCK 0x10040
+#define EPLL_CON0 0x10130
#define MPLL_LOCK 0x4000
#define MPLL_CON0 0x4100
#define BPLL_LOCK 0x20010
@@ -58,7 +61,7 @@
/* list of PLLs */
enum exynos5410_plls {
- apll, cpll, mpll,
+ apll, cpll, epll, mpll,
bpll, kpll,
nr_plls /* number of PLLs */
};
@@ -67,6 +70,7 @@ enum exynos5410_plls {
PNAME(apll_p) = { "fin_pll", "fout_apll", };
PNAME(bpll_p) = { "fin_pll", "fout_bpll", };
PNAME(cpll_p) = { "fin_pll", "fout_cpll" };
+PNAME(epll_p) = { "fin_pll", "fout_epll" };
PNAME(mpll_p) = { "fin_pll", "fout_mpll", };
PNAME(kpll_p) = { "fin_pll", "fout_kpll", };
@@ -95,6 +99,8 @@ static const struct samsung_mux_clock exynos5410_mux_clks[] __initconst = {
MUX(0, "sclk_bpll", bpll_p, SRC_CDREX, 0, 1),
MUX(0, "sclk_bpll_muxed", bpll_user_p, SRC_TOP2, 24, 1),
+ MUX(0, "sclk_epll", epll_p, SRC_TOP2, 12, 1),
+
MUX(0, "sclk_cpll", cpll_p, SRC_TOP2, 8, 1),
MUX(0, "sclk_mpll_bpll", mpll_bpll_p, SRC_TOP1, 20, 1),
@@ -176,6 +182,8 @@ static const struct samsung_gate_clock exynos5410_gate_clks[] __initconst = {
GATE(CLK_MMC0, "sdmmc0", "aclk200", GATE_BUS_FSYS0, 12, 0, 0),
GATE(CLK_MMC1, "sdmmc1", "aclk200", GATE_BUS_FSYS0, 13, 0, 0),
GATE(CLK_MMC2, "sdmmc2", "aclk200", GATE_BUS_FSYS0, 14, 0, 0),
+ GATE(CLK_PDMA1, "pdma1", "aclk200", GATE_BUS_FSYS0, 2, 0, 0),
+ GATE(CLK_PDMA0, "pdma0", "aclk200", GATE_BUS_FSYS0, 1, 0, 0),
GATE(CLK_SCLK_USBPHY301, "sclk_usbphy301", "dout_usbphy301",
GATE_TOP_SCLK_FSYS, 7, CLK_SET_RATE_PARENT, 0),
@@ -217,11 +225,26 @@ static const struct samsung_gate_clock exynos5410_gate_clks[] __initconst = {
GATE(CLK_USBD301, "usbd301", "aclk200_fsys", GATE_IP_FSYS, 20, 0, 0),
};
-static const struct samsung_pll_clock exynos5410_plls[nr_plls] __initconst = {
+static const struct samsung_pll_rate_table exynos5410_pll2550x_24mhz_tbl[] __initconst = {
+ PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
+ PLL_36XX_RATE(333000000U, 111, 2, 2, 0),
+ PLL_36XX_RATE(300000000U, 100, 2, 2, 0),
+ PLL_36XX_RATE(266000000U, 266, 3, 3, 0),
+ PLL_36XX_RATE(200000000U, 200, 3, 3, 0),
+ PLL_36XX_RATE(192000000U, 192, 3, 3, 0),
+ PLL_36XX_RATE(166000000U, 166, 3, 3, 0),
+ PLL_36XX_RATE(133000000U, 266, 3, 4, 0),
+ PLL_36XX_RATE(100000000U, 200, 3, 4, 0),
+ PLL_36XX_RATE(66000000U, 176, 2, 5, 0),
+};
+
+static struct samsung_pll_clock exynos5410_plls[nr_plls] __initdata = {
[apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
APLL_CON0, NULL),
[cpll] = PLL(pll_35xx, CLK_FOUT_CPLL, "fout_cpll", "fin_pll", CPLL_LOCK,
CPLL_CON0, NULL),
+ [epll] = PLL(pll_2650x, CLK_FOUT_EPLL, "fout_epll", "fin_pll", EPLL_LOCK,
+ EPLL_CON0, NULL),
[mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", MPLL_LOCK,
MPLL_CON0, NULL),
[bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK,
@@ -230,29 +253,27 @@ static const struct samsung_pll_clock exynos5410_plls[nr_plls] __initconst = {
KPLL_CON0, NULL),
};
+static const struct samsung_cmu_info cmu __initconst = {
+ .pll_clks = exynos5410_plls,
+ .nr_pll_clks = ARRAY_SIZE(exynos5410_plls),
+ .mux_clks = exynos5410_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos5410_mux_clks),
+ .div_clks = exynos5410_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5410_div_clks),
+ .gate_clks = exynos5410_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5410_gate_clks),
+ .nr_clk_ids = CLK_NR_CLKS,
+};
+
/* register exynos5410 clocks */
static void __init exynos5410_clk_init(struct device_node *np)
{
- struct samsung_clk_provider *ctx;
- void __iomem *reg_base;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
-
- ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
-
- samsung_clk_register_pll(ctx, exynos5410_plls,
- ARRAY_SIZE(exynos5410_plls), reg_base);
+ struct clk *xxti = of_clk_get(np, 0);
- samsung_clk_register_mux(ctx, exynos5410_mux_clks,
- ARRAY_SIZE(exynos5410_mux_clks));
- samsung_clk_register_div(ctx, exynos5410_div_clks,
- ARRAY_SIZE(exynos5410_div_clks));
- samsung_clk_register_gate(ctx, exynos5410_gate_clks,
- ARRAY_SIZE(exynos5410_gate_clks));
+ if (!IS_ERR(xxti) && clk_get_rate(xxti) == 24 * MHZ)
+ exynos5410_plls[epll].rate_table = exynos5410_pll2550x_24mhz_tbl;
- samsung_clk_of_add_provider(np, ctx);
+ samsung_cmu_register_one(np, &cmu);
pr_debug("Exynos5410: clock setup completed.\n");
}
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index bb196ca21a77..8c8b495cbf0d 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -131,6 +131,9 @@
#define TOP_SPARE2 0x10b08
#define BPLL_LOCK 0x20010
#define BPLL_CON0 0x20110
+#define SRC_CDREX 0x20200
+#define DIV_CDREX0 0x20500
+#define DIV_CDREX1 0x20504
#define KPLL_LOCK 0x28000
#define KPLL_CON0 0x28100
#define SRC_KFC 0x28200
@@ -244,6 +247,9 @@ static const unsigned long exynos5x_clk_regs[] __initconst = {
GATE_TOP_SCLK_FSYS,
GATE_TOP_SCLK_PERIC,
TOP_SPARE2,
+ SRC_CDREX,
+ DIV_CDREX0,
+ DIV_CDREX1,
SRC_KFC,
DIV_KFC0,
};
@@ -448,6 +454,8 @@ PNAME(mout_maudio0_p) = {"fin_pll", "maudio_clk", "mout_sclk_dpll",
"mout_sclk_epll", "mout_sclk_rpll"};
PNAME(mout_mau_epll_clk_p) = {"mout_sclk_epll", "mout_sclk_dpll",
"mout_sclk_mpll", "mout_sclk_spll"};
+PNAME(mout_mclk_cdrex_p) = {"mout_bpll", "mout_mx_mspll_ccore"};
+
/* List of parents specific to exynos5800 */
PNAME(mout_epll2_5800_p) = { "mout_sclk_epll", "ff_dout_epll2" };
PNAME(mout_group1_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll",
@@ -465,6 +473,9 @@ PNAME(mout_group6_5800_p) = { "mout_sclk_ipll", "mout_sclk_dpll",
PNAME(mout_group7_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll",
"mout_sclk_mpll", "mout_sclk_spll",
"mout_epll2", "mout_sclk_ipll" };
+PNAME(mout_mx_mspll_ccore_p) = {"sclk_bpll", "mout_sclk_dpll",
+ "mout_sclk_mpll", "ff_dout_spll2",
+ "mout_sclk_spll", "mout_sclk_epll"};
PNAME(mout_mau_epll_clk_5800_p) = { "mout_sclk_epll", "mout_sclk_dpll",
"mout_sclk_mpll",
"ff_dout_spll2" };
@@ -523,6 +534,8 @@ static const struct samsung_mux_clock exynos5800_mux_clks[] __initconst = {
MUX(0, "mout_aclk300_disp1", mout_group5_5800_p, SRC_TOP2, 24, 2),
MUX(0, "mout_aclk300_gscl", mout_group5_5800_p, SRC_TOP2, 28, 2),
+ MUX(CLK_MOUT_MX_MSPLL_CCORE, "mout_mx_mspll_ccore",
+ mout_mx_mspll_ccore_p, SRC_TOP7, 16, 2),
MUX(0, "mout_mau_epll_clk", mout_mau_epll_clk_5800_p, SRC_TOP7,
20, 2),
MUX(0, "sclk_bpll", mout_bpll_p, SRC_TOP7, 24, 1),
@@ -601,6 +614,8 @@ static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
MUX(0, "mout_aclk300_disp1", mout_group1_p, SRC_TOP2, 24, 2),
MUX(0, "mout_aclk300_gscl", mout_group1_p, SRC_TOP2, 28, 2),
+ MUX(CLK_MOUT_MX_MSPLL_CCORE, "mout_mx_mspll_ccore",
+ mout_group5_5800_p, SRC_TOP7, 16, 2),
MUX(0, "mout_mau_epll_clk", mout_mau_epll_clk_p, SRC_TOP7, 20, 2),
MUX(0, "mout_fimd1", mout_group3_p, SRC_DISP10, 4, 1),
@@ -744,6 +759,12 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
MUX(0, "mout_fimd1_final", mout_fimd1_final_p, TOP_SPARE2, 8, 1),
+ /* CDREX block */
+ MUX_F(CLK_MOUT_MCLK_CDREX, "mout_mclk_cdrex", mout_mclk_cdrex_p,
+ SRC_CDREX, 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX_F(CLK_MOUT_BPLL, "mout_bpll", mout_bpll_p, SRC_CDREX, 0, 1,
+ CLK_SET_RATE_PARENT, 0),
+
/* MAU Block */
MUX(CLK_MOUT_MAUDIO0, "mout_maudio0", mout_maudio0_p, SRC_MAU, 28, 3),
@@ -836,6 +857,21 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
DIV(CLK_DOUT_ACLK400_DISP1, "dout_aclk400_disp1",
"mout_aclk400_disp1", DIV_TOP2, 4, 3),
+ /* CDREX Block */
+ DIV(CLK_DOUT_PCLK_CDREX, "dout_pclk_cdrex", "dout_aclk_cdrex1",
+ DIV_CDREX0, 28, 3),
+ DIV_F(CLK_DOUT_SCLK_CDREX, "dout_sclk_cdrex", "mout_mclk_cdrex",
+ DIV_CDREX0, 24, 3, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DOUT_ACLK_CDREX1, "dout_aclk_cdrex1", "dout_clk2x_phy0",
+ DIV_CDREX0, 16, 3),
+ DIV(CLK_DOUT_CCLK_DREX0, "dout_cclk_drex0", "dout_clk2x_phy0",
+ DIV_CDREX0, 8, 3),
+ DIV(CLK_DOUT_CLK2X_PHY0, "dout_clk2x_phy0", "dout_sclk_cdrex",
+ DIV_CDREX0, 3, 5),
+
+ DIV(CLK_DOUT_PCLK_CORE_MEM, "dout_pclk_core_mem", "mout_mclk_cdrex",
+ DIV_CDREX1, 8, 3),
+
/* Audio Block */
DIV(0, "dout_maudio0", "mout_maudio0", DIV_MAU, 20, 4),
DIV(0, "dout_maupcm0", "dout_maudio0", DIV_MAU, 24, 8),
@@ -1364,6 +1400,7 @@ static void __init exynos5x_clk_init(struct device_node *np,
if (_get_rate("fin_pll") == 24 * MHZ) {
exynos5x_plls[apll].rate_table = exynos5420_pll2550x_24mhz_tbl;
exynos5x_plls[kpll].rate_table = exynos5420_pll2550x_24mhz_tbl;
+ exynos5x_plls[bpll].rate_table = exynos5420_pll2550x_24mhz_tbl;
}
samsung_clk_register_pll(ctx, exynos5x_plls, ARRAY_SIZE(exynos5x_plls),
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index a57d01b99b76..a80f3ef20801 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -112,6 +112,11 @@ static struct notifier_block exynos5440_clk_restart_handler = {
.priority = 128,
};
+static const struct samsung_pll_clock exynos5440_plls[] __initconst = {
+ PLL(pll_2550x, CLK_CPLLA, "cplla", "xtal", 0, 0x4c, NULL),
+ PLL(pll_2550x, CLK_CPLLB, "cpllb", "xtal", 0, 0x50, NULL),
+};
+
/* register exynos5440 clocks */
static void __init exynos5440_clk_init(struct device_node *np)
{
@@ -129,8 +134,8 @@ static void __init exynos5440_clk_init(struct device_node *np)
samsung_clk_of_register_fixed_ext(ctx, exynos5440_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match);
- samsung_clk_register_pll2550x("cplla", "xtal", reg_base + 0x1c, 0x10);
- samsung_clk_register_pll2550x("cpllb", "xtal", reg_base + 0x20, 0x10);
+ samsung_clk_register_pll(ctx, exynos5440_plls,
+ ARRAY_SIZE(exynos5440_plls), ctx->reg_base);
samsung_clk_register_fixed_rate(ctx, exynos5440_fixed_rate_clks,
ARRAY_SIZE(exynos5440_fixed_rate_clks));
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 48139bd510f1..9617825daabb 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -890,22 +890,14 @@ static const struct clk_ops samsung_s3c2440_mpll_clk_ops = {
#define PLL2550X_M_SHIFT (4)
#define PLL2550X_S_SHIFT (0)
-struct samsung_clk_pll2550x {
- struct clk_hw hw;
- const void __iomem *reg_base;
- unsigned long offset;
-};
-
-#define to_clk_pll2550x(_hw) container_of(_hw, struct samsung_clk_pll2550x, hw)
-
static unsigned long samsung_pll2550x_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct samsung_clk_pll2550x *pll = to_clk_pll2550x(hw);
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
u32 r, p, m, s, pll_stat;
u64 fvco = parent_rate;
- pll_stat = readl_relaxed(pll->reg_base + pll->offset * 3);
+ pll_stat = readl_relaxed(pll->con_reg);
r = (pll_stat >> PLL2550X_R_SHIFT) & PLL2550X_R_MASK;
if (!r)
return 0;
@@ -923,43 +915,6 @@ static const struct clk_ops samsung_pll2550x_clk_ops = {
.recalc_rate = samsung_pll2550x_recalc_rate,
};
-struct clk * __init samsung_clk_register_pll2550x(const char *name,
- const char *pname, const void __iomem *reg_base,
- const unsigned long offset)
-{
- struct samsung_clk_pll2550x *pll;
- struct clk *clk;
- struct clk_init_data init;
-
- pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll) {
- pr_err("%s: could not allocate pll clk %s\n", __func__, name);
- return NULL;
- }
-
- init.name = name;
- init.ops = &samsung_pll2550x_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
- init.parent_names = &pname;
- init.num_parents = 1;
-
- pll->hw.init = &init;
- pll->reg_base = reg_base;
- pll->offset = offset;
-
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register pll clock %s\n", __func__,
- name);
- kfree(pll);
- }
-
- if (clk_register_clkdev(clk, name, NULL))
- pr_err("%s: failed to register lookup for %s", __func__, name);
-
- return clk;
-}
-
/*
* PLL2550xx Clock Type
*/
@@ -1063,6 +1018,102 @@ static const struct clk_ops samsung_pll2550xx_clk_min_ops = {
};
/*
+ * PLL2650x Clock Type
+ */
+
+/* Maximum lock time can be 3000 * PDIV cycles */
+#define PLL2650X_LOCK_FACTOR 3000
+
+#define PLL2650X_M_MASK 0x1ff
+#define PLL2650X_P_MASK 0x3f
+#define PLL2650X_S_MASK 0x7
+#define PLL2650X_K_MASK 0xffff
+#define PLL2650X_LOCK_STAT_MASK 0x1
+#define PLL2650X_M_SHIFT 16
+#define PLL2650X_P_SHIFT 8
+#define PLL2650X_S_SHIFT 0
+#define PLL2650X_K_SHIFT 0
+#define PLL2650X_LOCK_STAT_SHIFT 29
+#define PLL2650X_PLL_ENABLE_SHIFT 31
+
+static unsigned long samsung_pll2650x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u64 fout = parent_rate;
+ u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
+ s16 kdiv;
+
+ pll_con0 = readl_relaxed(pll->con_reg);
+ mdiv = (pll_con0 >> PLL2650X_M_SHIFT) & PLL2650X_M_MASK;
+ pdiv = (pll_con0 >> PLL2650X_P_SHIFT) & PLL2650X_P_MASK;
+ sdiv = (pll_con0 >> PLL2650X_S_SHIFT) & PLL2650X_S_MASK;
+
+ pll_con1 = readl_relaxed(pll->con_reg + 4);
+ kdiv = (s16)((pll_con1 >> PLL2650X_K_SHIFT) & PLL2650X_K_MASK);
+
+ fout *= (mdiv << 16) + kdiv;
+ do_div(fout, (pdiv << sdiv));
+ fout >>= 16;
+
+ return (unsigned long)fout;
+}
+
+static int samsung_pll2650x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 con0, con1;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ con0 = readl_relaxed(pll->con_reg);
+ con1 = readl_relaxed(pll->con_reg + 4);
+
+ /* Set PLL lock time. */
+ writel_relaxed(rate->pdiv * PLL2650X_LOCK_FACTOR, pll->lock_reg);
+
+ /* Change PLL PMS values */
+ con0 &= ~((PLL2650X_M_MASK << PLL2650X_M_SHIFT) |
+ (PLL2650X_P_MASK << PLL2650X_P_SHIFT) |
+ (PLL2650X_S_MASK << PLL2650X_S_SHIFT));
+ con0 |= (rate->mdiv << PLL2650X_M_SHIFT) |
+ (rate->pdiv << PLL2650X_P_SHIFT) |
+ (rate->sdiv << PLL2650X_S_SHIFT);
+ con0 |= (1 << PLL2650X_PLL_ENABLE_SHIFT);
+ writel_relaxed(con0, pll->con_reg);
+
+ con1 &= ~(PLL2650X_K_MASK << PLL2650X_K_SHIFT);
+ con1 |= ((rate->kdiv & PLL2650X_K_MASK) << PLL2650X_K_SHIFT);
+ writel_relaxed(con1, pll->con_reg + 4);
+
+ do {
+ cpu_relax();
+ con0 = readl_relaxed(pll->con_reg);
+ } while (!(con0 & (PLL2650X_LOCK_STAT_MASK
+ << PLL2650X_LOCK_STAT_SHIFT)));
+
+ return 0;
+}
+
+static const struct clk_ops samsung_pll2650x_clk_ops = {
+ .recalc_rate = samsung_pll2650x_recalc_rate,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_pll2650x_set_rate,
+};
+
+static const struct clk_ops samsung_pll2650x_clk_min_ops = {
+ .recalc_rate = samsung_pll2650x_recalc_rate,
+};
+
+/*
* PLL2650XX Clock Type
*/
@@ -1263,12 +1314,21 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
else
init.ops = &samsung_s3c2440_mpll_clk_ops;
break;
+ case pll_2550x:
+ init.ops = &samsung_pll2550x_clk_ops;
+ break;
case pll_2550xx:
if (!pll->rate_table)
init.ops = &samsung_pll2550xx_clk_min_ops;
else
init.ops = &samsung_pll2550xx_clk_ops;
break;
+ case pll_2650x:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll2650x_clk_min_ops;
+ else
+ init.ops = &samsung_pll2650x_clk_ops;
+ break;
case pll_2650xx:
if (!pll->rate_table)
init.ops = &samsung_pll2650xx_clk_min_ops;
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 213de9af8b4f..a1ca0233cb4b 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -31,7 +31,9 @@ enum samsung_pll_type {
pll_s3c2410_mpll,
pll_s3c2410_upll,
pll_s3c2440_mpll,
+ pll_2550x,
pll_2550xx,
+ pll_2650x,
pll_2650xx,
pll_1450x,
pll_1451x,
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 546bd79c8e3a..a485f3b284b9 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -15,6 +15,11 @@
#include <linux/of.h>
#include <linux/of_address.h>
+struct clkgen_data {
+ unsigned long flags;
+ bool mode;
+};
+
struct flexgen {
struct clk_hw hw;
@@ -28,9 +33,14 @@ struct flexgen {
struct clk_gate fgate;
/* Final divisor */
struct clk_divider fdiv;
+ /* Asynchronous mode control */
+ struct clk_gate sync;
+ /* hw control flags */
+ bool control_mode;
};
#define to_flexgen(_hw) container_of(_hw, struct flexgen, hw)
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
static int flexgen_enable(struct clk_hw *hw)
{
@@ -139,12 +149,21 @@ static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate,
struct flexgen *flexgen = to_flexgen(hw);
struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
+ struct clk_hw *sync_hw = &flexgen->sync.hw;
+ struct clk_gate *config = to_clk_gate(sync_hw);
unsigned long div = 0;
int ret = 0;
+ u32 reg;
__clk_hw_set_clk(pdiv_hw, hw);
__clk_hw_set_clk(fdiv_hw, hw);
+ if (flexgen->control_mode) {
+ reg = readl(config->reg);
+ reg &= ~BIT(config->bit_idx);
+ writel(reg, config->reg);
+ }
+
div = clk_best_div(parent_rate, rate);
/*
@@ -178,7 +197,7 @@ static const struct clk_ops flexgen_ops = {
static struct clk *clk_register_flexgen(const char *name,
const char **parent_names, u8 num_parents,
void __iomem *reg, spinlock_t *lock, u32 idx,
- unsigned long flexgen_flags) {
+ unsigned long flexgen_flags, bool mode) {
struct flexgen *fgxbar;
struct clk *clk;
struct clk_init_data init;
@@ -227,6 +246,13 @@ static struct clk *clk_register_flexgen(const char *name,
fgxbar->fdiv.reg = fdiv_reg;
fgxbar->fdiv.width = 6;
+ /* Final divider sync config */
+ fgxbar->sync.lock = lock;
+ fgxbar->sync.reg = fdiv_reg;
+ fgxbar->sync.bit_idx = 7;
+
+ fgxbar->control_mode = mode;
+
fgxbar->hw.init = &init;
clk = clk_register(NULL, &fgxbar->hw);
@@ -259,6 +285,27 @@ static const char ** __init flexgen_get_parents(struct device_node *np,
return parents;
}
+static const struct clkgen_data clkgen_audio = {
+ .flags = CLK_SET_RATE_PARENT,
+};
+
+static const struct clkgen_data clkgen_video = {
+ .flags = CLK_SET_RATE_PARENT,
+ .mode = 1,
+};
+
+static const struct of_device_id flexgen_of_match[] = {
+ {
+ .compatible = "st,flexgen-audio",
+ .data = &clkgen_audio,
+ },
+ {
+ .compatible = "st,flexgen-video",
+ .data = &clkgen_video,
+ },
+ {}
+};
+
static void __init st_of_flexgen_setup(struct device_node *np)
{
struct device_node *pnode;
@@ -267,7 +314,11 @@ static void __init st_of_flexgen_setup(struct device_node *np)
const char **parents;
int num_parents, i;
spinlock_t *rlock = NULL;
+ const struct of_device_id *match;
+ struct clkgen_data *data = NULL;
+ unsigned long flex_flags = 0;
int ret;
+ bool clk_mode = 0;
pnode = of_get_parent(np);
if (!pnode)
@@ -281,6 +332,13 @@ static void __init st_of_flexgen_setup(struct device_node *np)
if (!parents)
return;
+ match = of_match_node(flexgen_of_match, np);
+ if (match) {
+ data = (struct clkgen_data *)match->data;
+ flex_flags = data->flags;
+ clk_mode = data->mode;
+ }
+
clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
goto err;
@@ -307,7 +365,6 @@ static void __init st_of_flexgen_setup(struct device_node *np)
for (i = 0; i < clk_data->clk_num; i++) {
struct clk *clk;
const char *clk_name;
- unsigned long flex_flags = 0;
if (of_property_read_string_index(np, "clock-output-names",
i, &clk_name)) {
@@ -323,7 +380,7 @@ static void __init st_of_flexgen_setup(struct device_node *np)
continue;
clk = clk_register_flexgen(clk_name, parents, num_parents,
- reg, rlock, i, flex_flags);
+ reg, rlock, i, flex_flags, clk_mode);
if (IS_ERR(clk))
goto err;
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 09afeb85109c..14819d919df1 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -42,79 +42,6 @@ struct stm_fs {
unsigned long nsdiv;
};
-static const struct stm_fs fs216c65_rtbl[] = {
- { .mdiv = 0x1f, .pe = 0x0, .sdiv = 0x7, .nsdiv = 0 }, /* 312.5 Khz */
- { .mdiv = 0x17, .pe = 0x25ed, .sdiv = 0x1, .nsdiv = 0 }, /* 27 MHz */
- { .mdiv = 0x1a, .pe = 0x7b36, .sdiv = 0x2, .nsdiv = 1 }, /* 36.87 MHz */
- { .mdiv = 0x13, .pe = 0x0, .sdiv = 0x2, .nsdiv = 1 }, /* 48 MHz */
- { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x1, .nsdiv = 1 }, /* 108 MHz */
-};
-
-static const struct stm_fs fs432c65_rtbl[] = {
- { .mdiv = 0x1f, .pe = 0x0, .sdiv = 0x7, .nsdiv = 0 }, /* 625 Khz */
- { .mdiv = 0x13, .pe = 0x777c, .sdiv = 0x4, .nsdiv = 1 }, /* 25.175 MHz */
- { .mdiv = 0x19, .pe = 0x4d35, .sdiv = 0x2, .nsdiv = 0 }, /* 25.200 MHz */
- { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x4, .nsdiv = 1 }, /* 27.000 MHz */
- { .mdiv = 0x17, .pe = 0x28f5, .sdiv = 0x2, .nsdiv = 0 }, /* 27.027 MHz */
- { .mdiv = 0x16, .pe = 0x3359, .sdiv = 0x2, .nsdiv = 0 }, /* 28.320 MHz */
- { .mdiv = 0x1f, .pe = 0x2083, .sdiv = 0x3, .nsdiv = 1 }, /* 30.240 MHz */
- { .mdiv = 0x1e, .pe = 0x430d, .sdiv = 0x3, .nsdiv = 1 }, /* 31.500 MHz */
- { .mdiv = 0x17, .pe = 0x0, .sdiv = 0x3, .nsdiv = 1 }, /* 40.000 MHz */
- { .mdiv = 0x19, .pe = 0x121a, .sdiv = 0x1, .nsdiv = 0 }, /* 49.500 MHz */
- { .mdiv = 0x13, .pe = 0x6667, .sdiv = 0x3, .nsdiv = 1 }, /* 50.000 MHz */
- { .mdiv = 0x10, .pe = 0x1ee6, .sdiv = 0x3, .nsdiv = 1 }, /* 57.284 MHz */
- { .mdiv = 0x1d, .pe = 0x3b14, .sdiv = 0x2, .nsdiv = 1 }, /* 65.000 MHz */
- { .mdiv = 0x12, .pe = 0x7c65, .sdiv = 0x1, .nsdiv = 0 }, /* 71.000 MHz */
- { .mdiv = 0x19, .pe = 0xecd, .sdiv = 0x2, .nsdiv = 1 }, /* 74.176 MHz */
- { .mdiv = 0x19, .pe = 0x121a, .sdiv = 0x2, .nsdiv = 1 }, /* 74.250 MHz */
- { .mdiv = 0x19, .pe = 0x3334, .sdiv = 0x2, .nsdiv = 1 }, /* 75.000 MHz */
- { .mdiv = 0x18, .pe = 0x5138, .sdiv = 0x2, .nsdiv = 1 }, /* 78.800 MHz */
- { .mdiv = 0x1d, .pe = 0x77d, .sdiv = 0x0, .nsdiv = 0 }, /* 85.500 MHz */
- { .mdiv = 0x1c, .pe = 0x13d5, .sdiv = 0x0, .nsdiv = 0 }, /* 88.750 MHz */
- { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x2, .nsdiv = 1 }, /* 108.000 MHz */
- { .mdiv = 0x17, .pe = 0x28f5, .sdiv = 0x0, .nsdiv = 0 }, /* 108.108 MHz */
- { .mdiv = 0x10, .pe = 0x6e26, .sdiv = 0x2, .nsdiv = 1 }, /* 118.963 MHz */
- { .mdiv = 0x15, .pe = 0x3e63, .sdiv = 0x0, .nsdiv = 0 }, /* 119.000 MHz */
- { .mdiv = 0x1c, .pe = 0x471d, .sdiv = 0x1, .nsdiv = 1 }, /* 135.000 MHz */
- { .mdiv = 0x19, .pe = 0xecd, .sdiv = 0x1, .nsdiv = 1 }, /* 148.352 MHz */
- { .mdiv = 0x19, .pe = 0x121a, .sdiv = 0x1, .nsdiv = 1 }, /* 148.500 MHz */
- { .mdiv = 0x19, .pe = 0x121a, .sdiv = 0x0, .nsdiv = 1 }, /* 297 MHz */
-};
-
-static const struct stm_fs fs660c32_rtbl[] = {
- { .mdiv = 0x14, .pe = 0x376b, .sdiv = 0x4, .nsdiv = 1 }, /* 25.175 MHz */
- { .mdiv = 0x14, .pe = 0x30c3, .sdiv = 0x4, .nsdiv = 1 }, /* 25.200 MHz */
- { .mdiv = 0x10, .pe = 0x71c7, .sdiv = 0x4, .nsdiv = 1 }, /* 27.000 MHz */
- { .mdiv = 0x00, .pe = 0x47af, .sdiv = 0x3, .nsdiv = 0 }, /* 27.027 MHz */
- { .mdiv = 0x0e, .pe = 0x4e1a, .sdiv = 0x4, .nsdiv = 1 }, /* 28.320 MHz */
- { .mdiv = 0x0b, .pe = 0x534d, .sdiv = 0x4, .nsdiv = 1 }, /* 30.240 MHz */
- { .mdiv = 0x17, .pe = 0x6fbf, .sdiv = 0x2, .nsdiv = 0 }, /* 31.500 MHz */
- { .mdiv = 0x01, .pe = 0x0, .sdiv = 0x4, .nsdiv = 1 }, /* 40.000 MHz */
- { .mdiv = 0x15, .pe = 0x2aab, .sdiv = 0x3, .nsdiv = 1 }, /* 49.500 MHz */
- { .mdiv = 0x14, .pe = 0x6666, .sdiv = 0x3, .nsdiv = 1 }, /* 50.000 MHz */
- { .mdiv = 0x1d, .pe = 0x395f, .sdiv = 0x1, .nsdiv = 0 }, /* 57.284 MHz */
- { .mdiv = 0x08, .pe = 0x4ec5, .sdiv = 0x3, .nsdiv = 1 }, /* 65.000 MHz */
- { .mdiv = 0x05, .pe = 0x1770, .sdiv = 0x3, .nsdiv = 1 }, /* 71.000 MHz */
- { .mdiv = 0x03, .pe = 0x4ba7, .sdiv = 0x3, .nsdiv = 1 }, /* 74.176 MHz */
- { .mdiv = 0x0f, .pe = 0x3426, .sdiv = 0x1, .nsdiv = 0 }, /* 74.250 MHz */
- { .mdiv = 0x0e, .pe = 0x7777, .sdiv = 0x1, .nsdiv = 0 }, /* 75.000 MHz */
- { .mdiv = 0x01, .pe = 0x4053, .sdiv = 0x3, .nsdiv = 1 }, /* 78.800 MHz */
- { .mdiv = 0x09, .pe = 0x15b5, .sdiv = 0x1, .nsdiv = 0 }, /* 85.500 MHz */
- { .mdiv = 0x1b, .pe = 0x3f19, .sdiv = 0x2, .nsdiv = 1 }, /* 88.750 MHz */
- { .mdiv = 0x10, .pe = 0x71c7, .sdiv = 0x2, .nsdiv = 1 }, /* 108.000 MHz */
- { .mdiv = 0x00, .pe = 0x47af, .sdiv = 0x1, .nsdiv = 0 }, /* 108.108 MHz */
- { .mdiv = 0x0c, .pe = 0x3118, .sdiv = 0x2, .nsdiv = 1 }, /* 118.963 MHz */
- { .mdiv = 0x0c, .pe = 0x2f54, .sdiv = 0x2, .nsdiv = 1 }, /* 119.000 MHz */
- { .mdiv = 0x07, .pe = 0xe39, .sdiv = 0x2, .nsdiv = 1 }, /* 135.000 MHz */
- { .mdiv = 0x03, .pe = 0x4ba7, .sdiv = 0x2, .nsdiv = 1 }, /* 148.352 MHz */
- { .mdiv = 0x0f, .pe = 0x3426, .sdiv = 0x0, .nsdiv = 0 }, /* 148.500 MHz */
- { .mdiv = 0x03, .pe = 0x4ba7, .sdiv = 0x1, .nsdiv = 1 }, /* 296.704 MHz */
- { .mdiv = 0x03, .pe = 0x471c, .sdiv = 0x1, .nsdiv = 1 }, /* 297.000 MHz */
- { .mdiv = 0x00, .pe = 0x295f, .sdiv = 0x1, .nsdiv = 1 }, /* 326.700 MHz */
- { .mdiv = 0x1f, .pe = 0x3633, .sdiv = 0x0, .nsdiv = 1 }, /* 333.000 MHz */
- { .mdiv = 0x1c, .pe = 0x0, .sdiv = 0x0, .nsdiv = 1 }, /* 352.000 Mhz */
-};
-
struct clkgen_quadfs_data {
bool reset_present;
bool bwfilter_present;
@@ -138,174 +65,18 @@ struct clkgen_quadfs_data {
struct clkgen_field nsdiv[QUADFS_MAX_CHAN];
const struct clk_ops *pll_ops;
- const struct stm_fs *rtbl;
- u8 rtbl_cnt;
+ int (*get_params)(unsigned long, unsigned long, struct stm_fs *);
int (*get_rate)(unsigned long , const struct stm_fs *,
unsigned long *);
};
-static const struct clk_ops st_quadfs_pll_c65_ops;
static const struct clk_ops st_quadfs_pll_c32_ops;
-static const struct clk_ops st_quadfs_fs216c65_ops;
-static const struct clk_ops st_quadfs_fs432c65_ops;
static const struct clk_ops st_quadfs_fs660c32_ops;
-static int clk_fs216c65_get_rate(unsigned long, const struct stm_fs *,
- unsigned long *);
-static int clk_fs432c65_get_rate(unsigned long, const struct stm_fs *,
- unsigned long *);
+static int clk_fs660c32_dig_get_params(unsigned long input,
+ unsigned long output, struct stm_fs *fs);
static int clk_fs660c32_dig_get_rate(unsigned long, const struct stm_fs *,
unsigned long *);
-/*
- * Values for all of the standalone instances of this clock
- * generator found in STiH415 and STiH416 SYSCFG register banks. Note
- * that the individual channel standby control bits (nsb) are in the
- * first register along with the PLL control bits.
- */
-static const struct clkgen_quadfs_data st_fs216c65_416 = {
- /* 416 specific */
- .npda = CLKGEN_FIELD(0x0, 0x1, 14),
- .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
- CLKGEN_FIELD(0x0, 0x1, 11),
- CLKGEN_FIELD(0x0, 0x1, 12),
- CLKGEN_FIELD(0x0, 0x1, 13) },
- .nsdiv_present = true,
- .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
- CLKGEN_FIELD(0x0, 0x1, 19),
- CLKGEN_FIELD(0x0, 0x1, 20),
- CLKGEN_FIELD(0x0, 0x1, 21) },
- .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
- CLKGEN_FIELD(0x14, 0x1f, 0),
- CLKGEN_FIELD(0x24, 0x1f, 0),
- CLKGEN_FIELD(0x34, 0x1f, 0) },
- .en = { CLKGEN_FIELD(0x10, 0x1, 0),
- CLKGEN_FIELD(0x20, 0x1, 0),
- CLKGEN_FIELD(0x30, 0x1, 0),
- CLKGEN_FIELD(0x40, 0x1, 0) },
- .ndiv = CLKGEN_FIELD(0x0, 0x1, 15),
- .bwfilter_present = true,
- .ref_bw = CLKGEN_FIELD(0x0, 0x3, 16),
- .pe = { CLKGEN_FIELD(0x8, 0xffff, 0),
- CLKGEN_FIELD(0x18, 0xffff, 0),
- CLKGEN_FIELD(0x28, 0xffff, 0),
- CLKGEN_FIELD(0x38, 0xffff, 0) },
- .sdiv = { CLKGEN_FIELD(0xC, 0x7, 0),
- CLKGEN_FIELD(0x1C, 0x7, 0),
- CLKGEN_FIELD(0x2C, 0x7, 0),
- CLKGEN_FIELD(0x3C, 0x7, 0) },
- .pll_ops = &st_quadfs_pll_c65_ops,
- .rtbl = fs216c65_rtbl,
- .rtbl_cnt = ARRAY_SIZE(fs216c65_rtbl),
- .get_rate = clk_fs216c65_get_rate,
-};
-
-static const struct clkgen_quadfs_data st_fs432c65_416 = {
- .npda = CLKGEN_FIELD(0x0, 0x1, 14),
- .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
- CLKGEN_FIELD(0x0, 0x1, 11),
- CLKGEN_FIELD(0x0, 0x1, 12),
- CLKGEN_FIELD(0x0, 0x1, 13) },
- .nsdiv_present = true,
- .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
- CLKGEN_FIELD(0x0, 0x1, 19),
- CLKGEN_FIELD(0x0, 0x1, 20),
- CLKGEN_FIELD(0x0, 0x1, 21) },
- .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
- CLKGEN_FIELD(0x14, 0x1f, 0),
- CLKGEN_FIELD(0x24, 0x1f, 0),
- CLKGEN_FIELD(0x34, 0x1f, 0) },
- .en = { CLKGEN_FIELD(0x10, 0x1, 0),
- CLKGEN_FIELD(0x20, 0x1, 0),
- CLKGEN_FIELD(0x30, 0x1, 0),
- CLKGEN_FIELD(0x40, 0x1, 0) },
- .ndiv = CLKGEN_FIELD(0x0, 0x1, 15),
- .bwfilter_present = true,
- .ref_bw = CLKGEN_FIELD(0x0, 0x3, 16),
- .pe = { CLKGEN_FIELD(0x8, 0xffff, 0),
- CLKGEN_FIELD(0x18, 0xffff, 0),
- CLKGEN_FIELD(0x28, 0xffff, 0),
- CLKGEN_FIELD(0x38, 0xffff, 0) },
- .sdiv = { CLKGEN_FIELD(0xC, 0x7, 0),
- CLKGEN_FIELD(0x1C, 0x7, 0),
- CLKGEN_FIELD(0x2C, 0x7, 0),
- CLKGEN_FIELD(0x3C, 0x7, 0) },
- .pll_ops = &st_quadfs_pll_c65_ops,
- .rtbl = fs432c65_rtbl,
- .rtbl_cnt = ARRAY_SIZE(fs432c65_rtbl),
- .get_rate = clk_fs432c65_get_rate,
-};
-
-static const struct clkgen_quadfs_data st_fs660c32_E_416 = {
- .npda = CLKGEN_FIELD(0x0, 0x1, 14),
- .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
- CLKGEN_FIELD(0x0, 0x1, 11),
- CLKGEN_FIELD(0x0, 0x1, 12),
- CLKGEN_FIELD(0x0, 0x1, 13) },
- .nsdiv_present = true,
- .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
- CLKGEN_FIELD(0x0, 0x1, 19),
- CLKGEN_FIELD(0x0, 0x1, 20),
- CLKGEN_FIELD(0x0, 0x1, 21) },
- .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
- CLKGEN_FIELD(0x14, 0x1f, 0),
- CLKGEN_FIELD(0x24, 0x1f, 0),
- CLKGEN_FIELD(0x34, 0x1f, 0) },
- .en = { CLKGEN_FIELD(0x10, 0x1, 0),
- CLKGEN_FIELD(0x20, 0x1, 0),
- CLKGEN_FIELD(0x30, 0x1, 0),
- CLKGEN_FIELD(0x40, 0x1, 0) },
- .ndiv = CLKGEN_FIELD(0x0, 0x7, 15),
- .pe = { CLKGEN_FIELD(0x8, 0x7fff, 0),
- CLKGEN_FIELD(0x18, 0x7fff, 0),
- CLKGEN_FIELD(0x28, 0x7fff, 0),
- CLKGEN_FIELD(0x38, 0x7fff, 0) },
- .sdiv = { CLKGEN_FIELD(0xC, 0xf, 0),
- CLKGEN_FIELD(0x1C, 0xf, 0),
- CLKGEN_FIELD(0x2C, 0xf, 0),
- CLKGEN_FIELD(0x3C, 0xf, 0) },
- .lockstatus_present = true,
- .lock_status = CLKGEN_FIELD(0xAC, 0x1, 0),
- .pll_ops = &st_quadfs_pll_c32_ops,
- .rtbl = fs660c32_rtbl,
- .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl),
- .get_rate = clk_fs660c32_dig_get_rate,
-};
-
-static const struct clkgen_quadfs_data st_fs660c32_F_416 = {
- .npda = CLKGEN_FIELD(0x0, 0x1, 14),
- .nsb = { CLKGEN_FIELD(0x0, 0x1, 10),
- CLKGEN_FIELD(0x0, 0x1, 11),
- CLKGEN_FIELD(0x0, 0x1, 12),
- CLKGEN_FIELD(0x0, 0x1, 13) },
- .nsdiv_present = true,
- .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18),
- CLKGEN_FIELD(0x0, 0x1, 19),
- CLKGEN_FIELD(0x0, 0x1, 20),
- CLKGEN_FIELD(0x0, 0x1, 21) },
- .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0),
- CLKGEN_FIELD(0x14, 0x1f, 0),
- CLKGEN_FIELD(0x24, 0x1f, 0),
- CLKGEN_FIELD(0x34, 0x1f, 0) },
- .en = { CLKGEN_FIELD(0x10, 0x1, 0),
- CLKGEN_FIELD(0x20, 0x1, 0),
- CLKGEN_FIELD(0x30, 0x1, 0),
- CLKGEN_FIELD(0x40, 0x1, 0) },
- .ndiv = CLKGEN_FIELD(0x0, 0x7, 15),
- .pe = { CLKGEN_FIELD(0x8, 0x7fff, 0),
- CLKGEN_FIELD(0x18, 0x7fff, 0),
- CLKGEN_FIELD(0x28, 0x7fff, 0),
- CLKGEN_FIELD(0x38, 0x7fff, 0) },
- .sdiv = { CLKGEN_FIELD(0xC, 0xf, 0),
- CLKGEN_FIELD(0x1C, 0xf, 0),
- CLKGEN_FIELD(0x2C, 0xf, 0),
- CLKGEN_FIELD(0x3C, 0xf, 0) },
- .lockstatus_present = true,
- .lock_status = CLKGEN_FIELD(0xEC, 0x1, 0),
- .pll_ops = &st_quadfs_pll_c32_ops,
- .rtbl = fs660c32_rtbl,
- .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl),
- .get_rate = clk_fs660c32_dig_get_rate,
-};
static const struct clkgen_quadfs_data st_fs660c32_C = {
.nrst_present = true,
@@ -345,8 +116,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C = {
.powerup_polarity = 1,
.standby_polarity = 1,
.pll_ops = &st_quadfs_pll_c32_ops,
- .rtbl = fs660c32_rtbl,
- .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl),
+ .get_params = clk_fs660c32_dig_get_params,
.get_rate = clk_fs660c32_dig_get_rate,
};
@@ -388,8 +158,7 @@ static const struct clkgen_quadfs_data st_fs660c32_D = {
.powerup_polarity = 1,
.standby_polarity = 1,
.pll_ops = &st_quadfs_pll_c32_ops,
- .rtbl = fs660c32_rtbl,
- .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl),
+ .get_params = clk_fs660c32_dig_get_params,
.get_rate = clk_fs660c32_dig_get_rate,};
/**
@@ -605,12 +374,6 @@ static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static const struct clk_ops st_quadfs_pll_c65_ops = {
- .enable = quadfs_pll_enable,
- .disable = quadfs_pll_disable,
- .is_enabled = quadfs_pll_is_enabled,
-};
-
static const struct clk_ops st_quadfs_pll_c32_ops = {
.enable = quadfs_pll_enable,
.disable = quadfs_pll_disable,
@@ -797,48 +560,6 @@ static int quadfs_fsynth_is_enabled(struct clk_hw *hw)
return fs->data->standby_polarity ? !nsb : !!nsb;
}
-#define P15 (uint64_t)(1 << 15)
-
-static int clk_fs216c65_get_rate(unsigned long input, const struct stm_fs *fs,
- unsigned long *rate)
-{
- uint64_t res;
- unsigned long ns;
- unsigned long nd = 8; /* ndiv stuck at 0 => val = 8 */
- unsigned long s;
- long m;
-
- m = fs->mdiv - 32;
- s = 1 << (fs->sdiv + 1);
- ns = (fs->nsdiv ? 1 : 3);
-
- res = (uint64_t)(s * ns * P15 * (uint64_t)(m + 33));
- res = res - (s * ns * fs->pe);
- *rate = div64_u64(P15 * nd * input * 32, res);
-
- return 0;
-}
-
-static int clk_fs432c65_get_rate(unsigned long input, const struct stm_fs *fs,
- unsigned long *rate)
-{
- uint64_t res;
- unsigned long nd = 16; /* ndiv value; stuck at 0 (30Mhz input) */
- long m;
- unsigned long sd;
- unsigned long ns;
-
- m = fs->mdiv - 32;
- sd = 1 << (fs->sdiv + 1);
- ns = (fs->nsdiv ? 1 : 3);
-
- res = (uint64_t)(sd * ns * P15 * (uint64_t)(m + 33));
- res = res - (sd * ns * fs->pe);
- *rate = div64_u64(P15 * nd * input * 32, res);
-
- return 0;
-}
-
#define P20 (uint64_t)(1 << 20)
static int clk_fs660c32_dig_get_rate(unsigned long input,
@@ -864,6 +585,107 @@ static int clk_fs660c32_dig_get_rate(unsigned long input,
return 0;
}
+
+static int clk_fs660c32_get_pe(int m, int si, unsigned long *deviation,
+ signed long input, unsigned long output, uint64_t *p,
+ struct stm_fs *fs)
+{
+ unsigned long new_freq, new_deviation;
+ struct stm_fs fs_tmp;
+ uint64_t val;
+
+ val = (uint64_t)output << si;
+
+ *p = (uint64_t)input * P20 - (32LL + (uint64_t)m) * val * (P20 / 32LL);
+
+ *p = div64_u64(*p, val);
+
+ if (*p > 32767LL)
+ return 1;
+
+ fs_tmp.mdiv = (unsigned long) m;
+ fs_tmp.pe = (unsigned long)*p;
+ fs_tmp.sdiv = si;
+ fs_tmp.nsdiv = 1;
+
+ clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
+
+ new_deviation = abs(output - new_freq);
+
+ if (new_deviation < *deviation) {
+ fs->mdiv = m;
+ fs->pe = (unsigned long)*p;
+ fs->sdiv = si;
+ fs->nsdiv = 1;
+ *deviation = new_deviation;
+ }
+ return 0;
+}
+
+static int clk_fs660c32_dig_get_params(unsigned long input,
+ unsigned long output, struct stm_fs *fs)
+{
+ int si; /* sdiv_reg (8 downto 0) */
+ int m; /* md value */
+ unsigned long new_freq, new_deviation;
+ /* initial condition to say: "infinite deviation" */
+ unsigned long deviation = ~0;
+ uint64_t p, p1, p2; /* pe value */
+ int r1, r2;
+
+ struct stm_fs fs_tmp;
+
+ for (si = 0; (si <= 8) && deviation; si++) {
+
+ /* Boundary test to avoid useless iteration */
+ r1 = clk_fs660c32_get_pe(0, si, &deviation,
+ input, output, &p1, fs);
+ r2 = clk_fs660c32_get_pe(31, si, &deviation,
+ input, output, &p2, fs);
+
+ /* No solution */
+ if (r1 && r2 && (p1 > p2))
+ continue;
+
+ /* Try to find best deviation */
+ for (m = 1; (m < 31) && deviation; m++)
+ clk_fs660c32_get_pe(m, si, &deviation,
+ input, output, &p, fs);
+
+ }
+
+ if (deviation == ~0) /* No solution found */
+ return -1;
+
+ /* pe fine tuning if deviation not 0: +/- 2 around computed pe value */
+ if (deviation) {
+ fs_tmp.mdiv = fs->mdiv;
+ fs_tmp.sdiv = fs->sdiv;
+ fs_tmp.nsdiv = fs->nsdiv;
+
+ if (fs->pe > 2)
+ p2 = fs->pe - 2;
+ else
+ p2 = 0;
+
+ for (; p2 < 32768ll && (p2 <= (fs->pe + 2)); p2++) {
+ fs_tmp.pe = (unsigned long)p2;
+
+ clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
+
+ new_deviation = abs(output - new_freq);
+
+ /* Check if this is a better solution */
+ if (new_deviation < deviation) {
+ fs->pe = (unsigned long)p2;
+ deviation = new_deviation;
+
+ }
+ }
+ }
+ return 0;
+}
+
static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs,
struct stm_fs *params)
{
@@ -899,38 +721,14 @@ static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate,
struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
int (*clk_fs_get_rate)(unsigned long ,
const struct stm_fs *, unsigned long *);
- struct stm_fs prev_params;
- unsigned long prev_rate, rate = 0;
- unsigned long diff_rate, prev_diff_rate = ~0;
- int index;
+ int (*clk_fs_get_params)(unsigned long, unsigned long, struct stm_fs *);
+ unsigned long rate = 0;
clk_fs_get_rate = fs->data->get_rate;
+ clk_fs_get_params = fs->data->get_params;
- for (index = 0; index < fs->data->rtbl_cnt; index++) {
- prev_rate = rate;
-
- *params = fs->data->rtbl[index];
- prev_params = *params;
-
- clk_fs_get_rate(prate, &fs->data->rtbl[index], &rate);
-
- diff_rate = abs(drate - rate);
-
- if (diff_rate > prev_diff_rate) {
- rate = prev_rate;
- *params = prev_params;
- break;
- }
-
- prev_diff_rate = diff_rate;
-
- if (drate == rate)
- return rate;
- }
-
-
- if (index == fs->data->rtbl_cnt)
- *params = prev_params;
+ if (!clk_fs_get_params(prate, drate, params))
+ clk_fs_get_rate(prate, params, &rate);
return rate;
}
@@ -1063,34 +861,6 @@ static struct clk * __init st_clk_register_quadfs_fsynth(
return clk;
}
-static const struct of_device_id quadfs_of_match[] = {
- {
- .compatible = "st,stih416-quadfs216",
- .data = &st_fs216c65_416
- },
- {
- .compatible = "st,stih416-quadfs432",
- .data = &st_fs432c65_416
- },
- {
- .compatible = "st,stih416-quadfs660-E",
- .data = &st_fs660c32_E_416
- },
- {
- .compatible = "st,stih416-quadfs660-F",
- .data = &st_fs660c32_F_416
- },
- {
- .compatible = "st,stih407-quadfs660-C",
- .data = &st_fs660c32_C
- },
- {
- .compatible = "st,stih407-quadfs660-D",
- .data = &st_fs660c32_D
- },
- {}
-};
-
static void __init st_of_create_quadfs_fsynths(
struct device_node *np, const char *pll_name,
struct clkgen_quadfs_data *quadfs, void __iomem *reg,
@@ -1150,18 +920,14 @@ static void __init st_of_create_quadfs_fsynths(
of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
}
-static void __init st_of_quadfs_setup(struct device_node *np)
+static void __init st_of_quadfs_setup(struct device_node *np,
+ struct clkgen_quadfs_data *data)
{
- const struct of_device_id *match;
struct clk *clk;
const char *pll_name, *clk_parent_name;
void __iomem *reg;
spinlock_t *lock;
- match = of_match_node(quadfs_of_match, np);
- if (WARN_ON(!match))
- return;
-
reg = of_iomap(np, 0);
if (!reg)
return;
@@ -1180,8 +946,8 @@ static void __init st_of_quadfs_setup(struct device_node *np)
spin_lock_init(lock);
- clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name,
- (struct clkgen_quadfs_data *) match->data, reg, lock);
+ clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, data,
+ reg, lock);
if (IS_ERR(clk))
goto err_exit;
else
@@ -1190,11 +956,20 @@ static void __init st_of_quadfs_setup(struct device_node *np)
__clk_get_name(clk_get_parent(clk)),
(unsigned int)clk_get_rate(clk));
- st_of_create_quadfs_fsynths(np, pll_name,
- (struct clkgen_quadfs_data *)match->data,
- reg, lock);
+ st_of_create_quadfs_fsynths(np, pll_name, data, reg, lock);
err_exit:
kfree(pll_name); /* No longer need local copy of the PLL name */
}
-CLK_OF_DECLARE(quadfs, "st,quadfs", st_of_quadfs_setup);
+
+static void __init st_of_quadfs660C_setup(struct device_node *np)
+{
+ st_of_quadfs_setup(np, (struct clkgen_quadfs_data *) &st_fs660c32_C);
+}
+CLK_OF_DECLARE(quadfs660C, "st,quadfs-pll", st_of_quadfs660C_setup);
+
+static void __init st_of_quadfs660D_setup(struct device_node *np)
+{
+ st_of_quadfs_setup(np, (struct clkgen_quadfs_data *) &st_fs660c32_D);
+}
+CLK_OF_DECLARE(quadfs660D, "st,quadfs", st_of_quadfs660D_setup);
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index b1e10ffe7a44..c514d39760cb 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -19,9 +19,6 @@
#include <linux/clk-provider.h>
#include "clkgen.h"
-static DEFINE_SPINLOCK(clkgena_divmux_lock);
-static DEFINE_SPINLOCK(clkgenf_lock);
-
static const char ** __init clkgen_mux_get_parents(struct device_node *np,
int *num_parents)
{
@@ -40,498 +37,6 @@ static const char ** __init clkgen_mux_get_parents(struct device_node *np,
return parents;
}
-/**
- * DOC: Clock mux with a programmable divider on each of its three inputs.
- * The mux has an input setting which effectively gates its output.
- *
- * Traits of this clock:
- * prepare - clk_(un)prepare only ensures parent is (un)prepared
- * enable - clk_enable and clk_disable are functional & control gating
- * rate - set rate is supported
- * parent - set/get parent
- */
-
-#define NUM_INPUTS 3
-
-struct clkgena_divmux {
- struct clk_hw hw;
- /* Subclassed mux and divider structures */
- struct clk_mux mux;
- struct clk_divider div[NUM_INPUTS];
- /* Enable/running feedback register bits for each input */
- void __iomem *feedback_reg[NUM_INPUTS];
- int feedback_bit_idx;
-
- u8 muxsel;
-};
-
-#define to_clkgena_divmux(_hw) container_of(_hw, struct clkgena_divmux, hw)
-
-struct clkgena_divmux_data {
- int num_outputs;
- int mux_offset;
- int mux_offset2;
- int mux_start_bit;
- int div_offsets[NUM_INPUTS];
- int fb_offsets[NUM_INPUTS];
- int fb_start_bit_idx;
-};
-
-#define CKGAX_CLKOPSRC_SWITCH_OFF 0x3
-
-static int clkgena_divmux_is_running(struct clkgena_divmux *mux)
-{
- u32 regval = readl(mux->feedback_reg[mux->muxsel]);
- u32 running = regval & BIT(mux->feedback_bit_idx);
- return !!running;
-}
-
-static int clkgena_divmux_enable(struct clk_hw *hw)
-{
- struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
- struct clk_hw *mux_hw = &genamux->mux.hw;
- unsigned long timeout;
- int ret = 0;
-
- __clk_hw_set_clk(mux_hw, hw);
-
- ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel);
- if (ret)
- return ret;
-
- timeout = jiffies + msecs_to_jiffies(10);
-
- while (!clkgena_divmux_is_running(genamux)) {
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
- cpu_relax();
- }
-
- return 0;
-}
-
-static void clkgena_divmux_disable(struct clk_hw *hw)
-{
- struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
- struct clk_hw *mux_hw = &genamux->mux.hw;
-
- __clk_hw_set_clk(mux_hw, hw);
-
- clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF);
-}
-
-static int clkgena_divmux_is_enabled(struct clk_hw *hw)
-{
- struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
- struct clk_hw *mux_hw = &genamux->mux.hw;
-
- __clk_hw_set_clk(mux_hw, hw);
-
- return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
-}
-
-static u8 clkgena_divmux_get_parent(struct clk_hw *hw)
-{
- struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
- struct clk_hw *mux_hw = &genamux->mux.hw;
-
- __clk_hw_set_clk(mux_hw, hw);
-
- genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
- if ((s8)genamux->muxsel < 0) {
- pr_debug("%s: %s: Invalid parent, setting to default.\n",
- __func__, clk_hw_get_name(hw));
- genamux->muxsel = 0;
- }
-
- return genamux->muxsel;
-}
-
-static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index)
-{
- struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
-
- if (index >= CKGAX_CLKOPSRC_SWITCH_OFF)
- return -EINVAL;
-
- genamux->muxsel = index;
-
- /*
- * If the mux is already enabled, call enable directly to set the
- * new mux position and wait for it to start running again. Otherwise
- * do nothing.
- */
- if (clkgena_divmux_is_enabled(hw))
- clkgena_divmux_enable(hw);
-
- return 0;
-}
-
-static unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
- struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
-
- __clk_hw_set_clk(div_hw, hw);
-
- return clk_divider_ops.recalc_rate(div_hw, parent_rate);
-}
-
-static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
- struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
-
- __clk_hw_set_clk(div_hw, hw);
-
- return clk_divider_ops.set_rate(div_hw, rate, parent_rate);
-}
-
-static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
- struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
-
- __clk_hw_set_clk(div_hw, hw);
-
- return clk_divider_ops.round_rate(div_hw, rate, prate);
-}
-
-static const struct clk_ops clkgena_divmux_ops = {
- .enable = clkgena_divmux_enable,
- .disable = clkgena_divmux_disable,
- .is_enabled = clkgena_divmux_is_enabled,
- .get_parent = clkgena_divmux_get_parent,
- .set_parent = clkgena_divmux_set_parent,
- .round_rate = clkgena_divmux_round_rate,
- .recalc_rate = clkgena_divmux_recalc_rate,
- .set_rate = clkgena_divmux_set_rate,
-};
-
-/**
- * clk_register_genamux - register a genamux clock with the clock framework
- */
-static struct clk * __init clk_register_genamux(const char *name,
- const char **parent_names, u8 num_parents,
- void __iomem *reg,
- const struct clkgena_divmux_data *muxdata,
- u32 idx)
-{
- /*
- * Fixed constants across all ClockgenA variants
- */
- const int mux_width = 2;
- const int divider_width = 5;
- struct clkgena_divmux *genamux;
- struct clk *clk;
- struct clk_init_data init;
- int i;
-
- genamux = kzalloc(sizeof(*genamux), GFP_KERNEL);
- if (!genamux)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &clkgena_divmux_ops;
- init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
- init.parent_names = parent_names;
- init.num_parents = num_parents;
-
- genamux->mux.lock = &clkgena_divmux_lock;
- genamux->mux.mask = BIT(mux_width) - 1;
- genamux->mux.shift = muxdata->mux_start_bit + (idx * mux_width);
- if (genamux->mux.shift > 31) {
- /*
- * We have spilled into the second mux register so
- * adjust the register address and the bit shift accordingly
- */
- genamux->mux.reg = reg + muxdata->mux_offset2;
- genamux->mux.shift -= 32;
- } else {
- genamux->mux.reg = reg + muxdata->mux_offset;
- }
-
- for (i = 0; i < NUM_INPUTS; i++) {
- /*
- * Divider config for each input
- */
- void __iomem *divbase = reg + muxdata->div_offsets[i];
- genamux->div[i].width = divider_width;
- genamux->div[i].reg = divbase + (idx * sizeof(u32));
-
- /*
- * Mux enabled/running feedback register for each input.
- */
- genamux->feedback_reg[i] = reg + muxdata->fb_offsets[i];
- }
-
- genamux->feedback_bit_idx = muxdata->fb_start_bit_idx + idx;
- genamux->hw.init = &init;
-
- clk = clk_register(NULL, &genamux->hw);
- if (IS_ERR(clk)) {
- kfree(genamux);
- goto err;
- }
-
- pr_debug("%s: parent %s rate %lu\n",
- __clk_get_name(clk),
- __clk_get_name(clk_get_parent(clk)),
- clk_get_rate(clk));
-err:
- return clk;
-}
-
-static struct clkgena_divmux_data st_divmux_c65hs = {
- .num_outputs = 4,
- .mux_offset = 0x14,
- .mux_start_bit = 0,
- .div_offsets = { 0x800, 0x900, 0xb00 },
- .fb_offsets = { 0x18, 0x1c, 0x20 },
- .fb_start_bit_idx = 0,
-};
-
-static struct clkgena_divmux_data st_divmux_c65ls = {
- .num_outputs = 14,
- .mux_offset = 0x14,
- .mux_offset2 = 0x24,
- .mux_start_bit = 8,
- .div_offsets = { 0x810, 0xa10, 0xb10 },
- .fb_offsets = { 0x18, 0x1c, 0x20 },
- .fb_start_bit_idx = 4,
-};
-
-static struct clkgena_divmux_data st_divmux_c32odf0 = {
- .num_outputs = 8,
- .mux_offset = 0x1c,
- .mux_start_bit = 0,
- .div_offsets = { 0x800, 0x900, 0xa60 },
- .fb_offsets = { 0x2c, 0x24, 0x28 },
- .fb_start_bit_idx = 0,
-};
-
-static struct clkgena_divmux_data st_divmux_c32odf1 = {
- .num_outputs = 8,
- .mux_offset = 0x1c,
- .mux_start_bit = 16,
- .div_offsets = { 0x820, 0x980, 0xa80 },
- .fb_offsets = { 0x2c, 0x24, 0x28 },
- .fb_start_bit_idx = 8,
-};
-
-static struct clkgena_divmux_data st_divmux_c32odf2 = {
- .num_outputs = 8,
- .mux_offset = 0x20,
- .mux_start_bit = 0,
- .div_offsets = { 0x840, 0xa20, 0xb10 },
- .fb_offsets = { 0x2c, 0x24, 0x28 },
- .fb_start_bit_idx = 16,
-};
-
-static struct clkgena_divmux_data st_divmux_c32odf3 = {
- .num_outputs = 8,
- .mux_offset = 0x20,
- .mux_start_bit = 16,
- .div_offsets = { 0x860, 0xa40, 0xb30 },
- .fb_offsets = { 0x2c, 0x24, 0x28 },
- .fb_start_bit_idx = 24,
-};
-
-static const struct of_device_id clkgena_divmux_of_match[] = {
- {
- .compatible = "st,clkgena-divmux-c65-hs",
- .data = &st_divmux_c65hs,
- },
- {
- .compatible = "st,clkgena-divmux-c65-ls",
- .data = &st_divmux_c65ls,
- },
- {
- .compatible = "st,clkgena-divmux-c32-odf0",
- .data = &st_divmux_c32odf0,
- },
- {
- .compatible = "st,clkgena-divmux-c32-odf1",
- .data = &st_divmux_c32odf1,
- },
- {
- .compatible = "st,clkgena-divmux-c32-odf2",
- .data = &st_divmux_c32odf2,
- },
- {
- .compatible = "st,clkgena-divmux-c32-odf3",
- .data = &st_divmux_c32odf3,
- },
- {}
-};
-
-static void __iomem * __init clkgen_get_register_base(struct device_node *np)
-{
- struct device_node *pnode;
- void __iomem *reg;
-
- pnode = of_get_parent(np);
- if (!pnode)
- return NULL;
-
- reg = of_iomap(pnode, 0);
-
- of_node_put(pnode);
- return reg;
-}
-
-static void __init st_of_clkgena_divmux_setup(struct device_node *np)
-{
- const struct of_device_id *match;
- const struct clkgena_divmux_data *data;
- struct clk_onecell_data *clk_data;
- void __iomem *reg;
- const char **parents;
- int num_parents = 0, i;
-
- match = of_match_node(clkgena_divmux_of_match, np);
- if (WARN_ON(!match))
- return;
-
- data = match->data;
-
- reg = clkgen_get_register_base(np);
- if (!reg)
- return;
-
- parents = clkgen_mux_get_parents(np, &num_parents);
- if (IS_ERR(parents))
- goto err_parents;
-
- clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
- if (!clk_data)
- goto err_alloc;
-
- clk_data->clk_num = data->num_outputs;
- clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
- GFP_KERNEL);
-
- if (!clk_data->clks)
- goto err_alloc_clks;
-
- for (i = 0; i < clk_data->clk_num; i++) {
- struct clk *clk;
- const char *clk_name;
-
- if (of_property_read_string_index(np, "clock-output-names",
- i, &clk_name))
- break;
-
- /*
- * If we read an empty clock name then the output is unused
- */
- if (*clk_name == '\0')
- continue;
-
- clk = clk_register_genamux(clk_name, parents, num_parents,
- reg, data, i);
-
- if (IS_ERR(clk))
- goto err;
-
- clk_data->clks[i] = clk;
- }
-
- kfree(parents);
-
- of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
- return;
-err:
- kfree(clk_data->clks);
-err_alloc_clks:
- kfree(clk_data);
-err_alloc:
- kfree(parents);
-err_parents:
- iounmap(reg);
-}
-CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup);
-
-struct clkgena_prediv_data {
- u32 offset;
- u8 shift;
- struct clk_div_table *table;
-};
-
-static struct clk_div_table prediv_table16[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 16 },
- { .div = 0 },
-};
-
-static struct clkgena_prediv_data prediv_c65_data = {
- .offset = 0x4c,
- .shift = 31,
- .table = prediv_table16,
-};
-
-static struct clkgena_prediv_data prediv_c32_data = {
- .offset = 0x50,
- .shift = 1,
- .table = prediv_table16,
-};
-
-static const struct of_device_id clkgena_prediv_of_match[] = {
- { .compatible = "st,clkgena-prediv-c65", .data = &prediv_c65_data },
- { .compatible = "st,clkgena-prediv-c32", .data = &prediv_c32_data },
- {}
-};
-
-static void __init st_of_clkgena_prediv_setup(struct device_node *np)
-{
- const struct of_device_id *match;
- void __iomem *reg;
- const char *parent_name, *clk_name;
- struct clk *clk;
- const struct clkgena_prediv_data *data;
-
- match = of_match_node(clkgena_prediv_of_match, np);
- if (!match) {
- pr_err("%s: No matching data\n", __func__);
- return;
- }
-
- data = match->data;
-
- reg = clkgen_get_register_base(np);
- if (!reg)
- return;
-
- parent_name = of_clk_get_parent_name(np, 0);
- if (!parent_name)
- goto err;
-
- if (of_property_read_string_index(np, "clock-output-names",
- 0, &clk_name))
- goto err;
-
- clk = clk_register_divider_table(NULL, clk_name, parent_name,
- CLK_GET_RATE_NOCACHE,
- reg + data->offset, data->shift, 1,
- 0, data->table, NULL);
- if (IS_ERR(clk))
- goto err;
-
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- pr_debug("%s: parent %s rate %u\n",
- __clk_get_name(clk),
- __clk_get_name(clk_get_parent(clk)),
- (unsigned int)clk_get_rate(clk));
-
- return;
-err:
- iounmap(reg);
-}
-CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup);
-
struct clkgen_mux_data {
u32 offset;
u8 shift;
@@ -541,49 +46,6 @@ struct clkgen_mux_data {
u8 mux_flags;
};
-static struct clkgen_mux_data clkgen_mux_c_vcc_hd_416 = {
- .offset = 0,
- .shift = 0,
- .width = 1,
-};
-
-static struct clkgen_mux_data clkgen_mux_f_vcc_fvdp_416 = {
- .offset = 0,
- .shift = 0,
- .width = 1,
-};
-
-static struct clkgen_mux_data clkgen_mux_f_vcc_hva_416 = {
- .offset = 0,
- .shift = 0,
- .width = 1,
-};
-
-static struct clkgen_mux_data clkgen_mux_f_vcc_hd_416 = {
- .offset = 0,
- .shift = 16,
- .width = 1,
- .lock = &clkgenf_lock,
-};
-
-static struct clkgen_mux_data clkgen_mux_c_vcc_sd_416 = {
- .offset = 0,
- .shift = 17,
- .width = 1,
- .lock = &clkgenf_lock,
-};
-
-static struct clkgen_mux_data stih415_a9_mux_data = {
- .offset = 0,
- .shift = 1,
- .width = 2,
- .lock = &clkgen_a9_lock,
-};
-static struct clkgen_mux_data stih416_a9_mux_data = {
- .offset = 0,
- .shift = 0,
- .width = 2,
-};
static struct clkgen_mux_data stih407_a9_mux_data = {
.offset = 0x1a4,
.shift = 0,
@@ -591,58 +53,13 @@ static struct clkgen_mux_data stih407_a9_mux_data = {
.lock = &clkgen_a9_lock,
};
-static const struct of_device_id mux_of_match[] = {
- {
- .compatible = "st,stih416-clkgenc-vcc-hd",
- .data = &clkgen_mux_c_vcc_hd_416,
- },
- {
- .compatible = "st,stih416-clkgenf-vcc-fvdp",
- .data = &clkgen_mux_f_vcc_fvdp_416,
- },
- {
- .compatible = "st,stih416-clkgenf-vcc-hva",
- .data = &clkgen_mux_f_vcc_hva_416,
- },
- {
- .compatible = "st,stih416-clkgenf-vcc-hd",
- .data = &clkgen_mux_f_vcc_hd_416,
- },
- {
- .compatible = "st,stih416-clkgenf-vcc-sd",
- .data = &clkgen_mux_c_vcc_sd_416,
- },
- {
- .compatible = "st,stih415-clkgen-a9-mux",
- .data = &stih415_a9_mux_data,
- },
- {
- .compatible = "st,stih416-clkgen-a9-mux",
- .data = &stih416_a9_mux_data,
- },
- {
- .compatible = "st,stih407-clkgen-a9-mux",
- .data = &stih407_a9_mux_data,
- },
- {}
-};
-
-static void __init st_of_clkgen_mux_setup(struct device_node *np)
+static void __init st_of_clkgen_mux_setup(struct device_node *np,
+ struct clkgen_mux_data *data)
{
- const struct of_device_id *match;
struct clk *clk;
void __iomem *reg;
const char **parents;
- int num_parents;
- const struct clkgen_mux_data *data;
-
- match = of_match_node(mux_of_match, np);
- if (!match) {
- pr_err("%s: No matching data\n", __func__);
- return;
- }
-
- data = match->data;
+ int num_parents = 0;
reg = of_iomap(np, 0);
if (!reg) {
@@ -679,161 +96,10 @@ err:
err_parents:
iounmap(reg);
}
-CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup);
-
-#define VCC_MAX_CHANNELS 16
-
-#define VCC_GATE_OFFSET 0x0
-#define VCC_MUX_OFFSET 0x4
-#define VCC_DIV_OFFSET 0x8
-
-struct clkgen_vcc_data {
- spinlock_t *lock;
- unsigned long clk_flags;
-};
-
-static struct clkgen_vcc_data st_clkgenc_vcc_416 = {
- .clk_flags = CLK_SET_RATE_PARENT,
-};
-
-static struct clkgen_vcc_data st_clkgenf_vcc_416 = {
- .lock = &clkgenf_lock,
-};
-
-static const struct of_device_id vcc_of_match[] = {
- { .compatible = "st,stih416-clkgenc", .data = &st_clkgenc_vcc_416 },
- { .compatible = "st,stih416-clkgenf", .data = &st_clkgenf_vcc_416 },
- {}
-};
-static void __init st_of_clkgen_vcc_setup(struct device_node *np)
+static void __init st_of_clkgen_a9_mux_setup(struct device_node *np)
{
- const struct of_device_id *match;
- void __iomem *reg;
- const char **parents;
- int num_parents, i;
- struct clk_onecell_data *clk_data;
- const struct clkgen_vcc_data *data;
-
- match = of_match_node(vcc_of_match, np);
- if (WARN_ON(!match))
- return;
- data = match->data;
-
- reg = of_iomap(np, 0);
- if (!reg)
- return;
-
- parents = clkgen_mux_get_parents(np, &num_parents);
- if (IS_ERR(parents))
- goto err_parents;
-
- clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
- if (!clk_data)
- goto err_alloc;
-
- clk_data->clk_num = VCC_MAX_CHANNELS;
- clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
- GFP_KERNEL);
-
- if (!clk_data->clks)
- goto err_alloc_clks;
-
- for (i = 0; i < clk_data->clk_num; i++) {
- struct clk *clk;
- const char *clk_name;
- struct clk_gate *gate;
- struct clk_divider *div;
- struct clk_mux *mux;
-
- if (of_property_read_string_index(np, "clock-output-names",
- i, &clk_name))
- break;
-
- /*
- * If we read an empty clock name then the output is unused
- */
- if (*clk_name == '\0')
- continue;
-
- gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate)
- goto err;
-
- div = kzalloc(sizeof(*div), GFP_KERNEL);
- if (!div) {
- kfree(gate);
- goto err;
- }
-
- mux = kzalloc(sizeof(*mux), GFP_KERNEL);
- if (!mux) {
- kfree(gate);
- kfree(div);
- goto err;
- }
-
- gate->reg = reg + VCC_GATE_OFFSET;
- gate->bit_idx = i;
- gate->flags = CLK_GATE_SET_TO_DISABLE;
- gate->lock = data->lock;
-
- div->reg = reg + VCC_DIV_OFFSET;
- div->shift = 2 * i;
- div->width = 2;
- div->flags = CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST;
-
- mux->reg = reg + VCC_MUX_OFFSET;
- mux->shift = 2 * i;
- mux->mask = 0x3;
-
- clk = clk_register_composite(NULL, clk_name, parents,
- num_parents,
- &mux->hw, &clk_mux_ops,
- &div->hw, &clk_divider_ops,
- &gate->hw, &clk_gate_ops,
- data->clk_flags |
- CLK_GET_RATE_NOCACHE);
- if (IS_ERR(clk)) {
- kfree(gate);
- kfree(div);
- kfree(mux);
- goto err;
- }
-
- pr_debug("%s: parent %s rate %u\n",
- __clk_get_name(clk),
- __clk_get_name(clk_get_parent(clk)),
- (unsigned int)clk_get_rate(clk));
-
- clk_data->clks[i] = clk;
- }
-
- kfree(parents);
-
- of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
- return;
-
-err:
- for (i = 0; i < clk_data->clk_num; i++) {
- struct clk_composite *composite;
-
- if (!clk_data->clks[i])
- continue;
-
- composite = to_clk_composite(__clk_get_hw(clk_data->clks[i]));
- kfree(to_clk_gate(composite->gate_hw));
- kfree(to_clk_divider(composite->rate_hw));
- kfree(to_clk_mux(composite->mux_hw));
- }
-
- kfree(clk_data->clks);
-err_alloc_clks:
- kfree(clk_data);
-err_alloc:
- kfree(parents);
-err_parents:
- iounmap(reg);
+ st_of_clkgen_mux_setup(np, &stih407_a9_mux_data);
}
-CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup);
+CLK_OF_DECLARE(clkgen_a9mux, "st,stih407-clkgen-a9-mux",
+ st_of_clkgen_a9_mux_setup);
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 0b5990e82e0d..25bda48a5d35 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -26,14 +26,6 @@ static DEFINE_SPINLOCK(clkgena_c32_odf_lock);
DEFINE_SPINLOCK(clkgen_a9_lock);
/*
- * Common PLL configuration register bits for PLL800 and PLL1600 C65
- */
-#define C65_MDIV_PLL800_MASK (0xff)
-#define C65_MDIV_PLL1600_MASK (0x7)
-#define C65_NDIV_MASK (0xff)
-#define C65_PDIV_MASK (0x7)
-
-/*
* PLL configuration register bits for PLL3200 C32
*/
#define C32_NDIV_MASK (0xff)
@@ -70,144 +62,10 @@ struct clkgen_pll_data {
const struct clk_ops *ops;
};
-static const struct clk_ops st_pll1600c65_ops;
-static const struct clk_ops st_pll800c65_ops;
static const struct clk_ops stm_pll3200c32_ops;
static const struct clk_ops stm_pll3200c32_a9_ops;
-static const struct clk_ops st_pll1200c32_ops;
static const struct clk_ops stm_pll4600c28_ops;
-static const struct clkgen_pll_data st_pll1600c65_ax = {
- .pdn_status = CLKGEN_FIELD(0x0, 0x1, 19),
- .pdn_ctrl = CLKGEN_FIELD(0x10, 0x1, 0),
- .locked_status = CLKGEN_FIELD(0x0, 0x1, 31),
- .mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL1600_MASK, 0),
- .ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8),
- .ops = &st_pll1600c65_ops
-};
-
-static const struct clkgen_pll_data st_pll800c65_ax = {
- .pdn_status = CLKGEN_FIELD(0x0, 0x1, 19),
- .pdn_ctrl = CLKGEN_FIELD(0xC, 0x1, 1),
- .locked_status = CLKGEN_FIELD(0x0, 0x1, 31),
- .mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL800_MASK, 0),
- .ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8),
- .pdiv = CLKGEN_FIELD(0x0, C65_PDIV_MASK, 16),
- .ops = &st_pll800c65_ops
-};
-
-static const struct clkgen_pll_data st_pll3200c32_a1x_0 = {
- .pdn_status = CLKGEN_FIELD(0x0, 0x1, 31),
- .pdn_ctrl = CLKGEN_FIELD(0x18, 0x1, 0),
- .locked_status = CLKGEN_FIELD(0x4, 0x1, 31),
- .ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 0x0),
- .idf = CLKGEN_FIELD(0x4, C32_IDF_MASK, 0x0),
- .num_odfs = 4,
- .odf = { CLKGEN_FIELD(0x54, C32_ODF_MASK, 4),
- CLKGEN_FIELD(0x54, C32_ODF_MASK, 10),
- CLKGEN_FIELD(0x54, C32_ODF_MASK, 16),
- CLKGEN_FIELD(0x54, C32_ODF_MASK, 22) },
- .odf_gate = { CLKGEN_FIELD(0x54, 0x1, 0),
- CLKGEN_FIELD(0x54, 0x1, 1),
- CLKGEN_FIELD(0x54, 0x1, 2),
- CLKGEN_FIELD(0x54, 0x1, 3) },
- .ops = &stm_pll3200c32_ops,
-};
-
-static const struct clkgen_pll_data st_pll3200c32_a1x_1 = {
- .pdn_status = CLKGEN_FIELD(0xC, 0x1, 31),
- .pdn_ctrl = CLKGEN_FIELD(0x18, 0x1, 1),
- .locked_status = CLKGEN_FIELD(0x10, 0x1, 31),
- .ndiv = CLKGEN_FIELD(0xC, C32_NDIV_MASK, 0x0),
- .idf = CLKGEN_FIELD(0x10, C32_IDF_MASK, 0x0),
- .num_odfs = 4,
- .odf = { CLKGEN_FIELD(0x58, C32_ODF_MASK, 4),
- CLKGEN_FIELD(0x58, C32_ODF_MASK, 10),
- CLKGEN_FIELD(0x58, C32_ODF_MASK, 16),
- CLKGEN_FIELD(0x58, C32_ODF_MASK, 22) },
- .odf_gate = { CLKGEN_FIELD(0x58, 0x1, 0),
- CLKGEN_FIELD(0x58, 0x1, 1),
- CLKGEN_FIELD(0x58, 0x1, 2),
- CLKGEN_FIELD(0x58, 0x1, 3) },
- .ops = &stm_pll3200c32_ops,
-};
-
-/* 415 specific */
-static const struct clkgen_pll_data st_pll3200c32_a9_415 = {
- .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
- .pdn_ctrl = CLKGEN_FIELD(0x0, 0x1, 0),
- .locked_status = CLKGEN_FIELD(0x6C, 0x1, 0),
- .ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 9),
- .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 22),
- .num_odfs = 1,
- .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 3) },
- .odf_gate = { CLKGEN_FIELD(0x0, 0x1, 28) },
- .ops = &stm_pll3200c32_ops,
-};
-
-static const struct clkgen_pll_data st_pll3200c32_ddr_415 = {
- .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
- .pdn_ctrl = CLKGEN_FIELD(0x0, 0x1, 0),
- .locked_status = CLKGEN_FIELD(0x100, 0x1, 0),
- .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
- .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
- .num_odfs = 2,
- .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8),
- CLKGEN_FIELD(0x8, C32_ODF_MASK, 14) },
- .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28),
- CLKGEN_FIELD(0x4, 0x1, 29) },
- .ops = &stm_pll3200c32_ops,
-};
-
-static const struct clkgen_pll_data st_pll1200c32_gpu_415 = {
- .pdn_status = CLKGEN_FIELD(0x4, 0x1, 0),
- .pdn_ctrl = CLKGEN_FIELD(0x4, 0x1, 0),
- .locked_status = CLKGEN_FIELD(0x168, 0x1, 0),
- .ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3),
- .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0),
- .num_odfs = 0,
- .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 10) },
- .ops = &st_pll1200c32_ops,
-};
-
-/* 416 specific */
-static const struct clkgen_pll_data st_pll3200c32_a9_416 = {
- .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
- .pdn_ctrl = CLKGEN_FIELD(0x0, 0x1, 0),
- .locked_status = CLKGEN_FIELD(0x6C, 0x1, 0),
- .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
- .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
- .num_odfs = 1,
- .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8) },
- .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28) },
- .ops = &stm_pll3200c32_ops,
-};
-
-static const struct clkgen_pll_data st_pll3200c32_ddr_416 = {
- .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0),
- .pdn_ctrl = CLKGEN_FIELD(0x0, 0x1, 0),
- .locked_status = CLKGEN_FIELD(0x10C, 0x1, 0),
- .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0),
- .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25),
- .num_odfs = 2,
- .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8),
- CLKGEN_FIELD(0x8, C32_ODF_MASK, 14) },
- .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28),
- CLKGEN_FIELD(0x4, 0x1, 29) },
- .ops = &stm_pll3200c32_ops,
-};
-
-static const struct clkgen_pll_data st_pll1200c32_gpu_416 = {
- .pdn_status = CLKGEN_FIELD(0x8E4, 0x1, 3),
- .pdn_ctrl = CLKGEN_FIELD(0x8E4, 0x1, 3),
- .locked_status = CLKGEN_FIELD(0x90C, 0x1, 0),
- .ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3),
- .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0),
- .num_odfs = 0,
- .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 10) },
- .ops = &st_pll1200c32_ops,
-};
-
static const struct clkgen_pll_data st_pll3200c32_407_a0 = {
/* 407 A0 */
.pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8),
@@ -410,57 +268,6 @@ static void clkgen_pll_disable(struct clk_hw *hw)
spin_unlock_irqrestore(pll->lock, flags);
}
-static unsigned long recalc_stm_pll800c65(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clkgen_pll *pll = to_clkgen_pll(hw);
- unsigned long mdiv, ndiv, pdiv;
- unsigned long rate;
- uint64_t res;
-
- if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
- return 0;
-
- pdiv = CLKGEN_READ(pll, pdiv);
- mdiv = CLKGEN_READ(pll, mdiv);
- ndiv = CLKGEN_READ(pll, ndiv);
-
- if (!mdiv)
- mdiv++; /* mdiv=0 or 1 => MDIV=1 */
-
- res = (uint64_t)2 * (uint64_t)parent_rate * (uint64_t)ndiv;
- rate = (unsigned long)div64_u64(res, mdiv * (1 << pdiv));
-
- pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
-
- return rate;
-
-}
-
-static unsigned long recalc_stm_pll1600c65(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clkgen_pll *pll = to_clkgen_pll(hw);
- unsigned long mdiv, ndiv;
- unsigned long rate;
-
- if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
- return 0;
-
- mdiv = CLKGEN_READ(pll, mdiv);
- ndiv = CLKGEN_READ(pll, ndiv);
-
- if (!mdiv)
- mdiv = 1;
-
- /* Note: input is divided by 1000 to avoid overflow */
- rate = ((2 * (parent_rate / 1000) * ndiv) / mdiv) * 1000;
-
- pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
-
- return rate;
-}
-
static int clk_pll3200c32_get_params(unsigned long input, unsigned long output,
struct stm_pll *pll)
{
@@ -608,33 +415,6 @@ static int set_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static unsigned long recalc_stm_pll1200c32(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clkgen_pll *pll = to_clkgen_pll(hw);
- unsigned long odf, ldf, idf;
- unsigned long rate;
-
- if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
- return 0;
-
- odf = CLKGEN_READ(pll, odf[0]);
- ldf = CLKGEN_READ(pll, ldf);
- idf = CLKGEN_READ(pll, idf);
-
- if (!idf) /* idf==0 means 1 */
- idf = 1;
- if (!odf) /* odf==0 means 1 */
- odf = 1;
-
- /* Note: input is divided by 1000 to avoid overflow */
- rate = (((parent_rate / 1000) * ldf) / (odf * idf)) * 1000;
-
- pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
-
- return rate;
-}
-
/* PLL output structure
* FVCO >> /2 >> FVCOBY2 (no output)
* |> Divider (ODF) >> PHI
@@ -792,20 +572,6 @@ static int set_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static const struct clk_ops st_pll1600c65_ops = {
- .enable = clkgen_pll_enable,
- .disable = clkgen_pll_disable,
- .is_enabled = clkgen_pll_is_enabled,
- .recalc_rate = recalc_stm_pll1600c65,
-};
-
-static const struct clk_ops st_pll800c65_ops = {
- .enable = clkgen_pll_enable,
- .disable = clkgen_pll_disable,
- .is_enabled = clkgen_pll_is_enabled,
- .recalc_rate = recalc_stm_pll800c65,
-};
-
static const struct clk_ops stm_pll3200c32_ops = {
.enable = clkgen_pll_enable,
.disable = clkgen_pll_disable,
@@ -822,13 +588,6 @@ static const struct clk_ops stm_pll3200c32_a9_ops = {
.set_rate = set_rate_stm_pll3200c32,
};
-static const struct clk_ops st_pll1200c32_ops = {
- .enable = clkgen_pll_enable,
- .disable = clkgen_pll_disable,
- .is_enabled = clkgen_pll_is_enabled,
- .recalc_rate = recalc_stm_pll1200c32,
-};
-
static const struct clk_ops stm_pll4600c28_ops = {
.enable = clkgen_pll_enable,
.disable = clkgen_pll_disable,
@@ -877,22 +636,6 @@ static struct clk * __init clkgen_pll_register(const char *parent_name,
return clk;
}
-static struct clk * __init clkgen_c65_lsdiv_register(const char *parent_name,
- const char *clk_name)
-{
- struct clk *clk;
-
- clk = clk_register_fixed_factor(NULL, clk_name, parent_name, 0, 1, 2);
- if (IS_ERR(clk))
- return clk;
-
- pr_debug("%s: parent %s rate %lu\n",
- __clk_get_name(clk),
- __clk_get_name(clk_get_parent(clk)),
- clk_get_rate(clk));
- return clk;
-}
-
static void __iomem * __init clkgen_get_register_base(
struct device_node *np)
{
@@ -909,89 +652,6 @@ static void __iomem * __init clkgen_get_register_base(
return reg;
}
-#define CLKGENAx_PLL0_OFFSET 0x0
-#define CLKGENAx_PLL1_OFFSET 0x4
-
-static void __init clkgena_c65_pll_setup(struct device_node *np)
-{
- const int num_pll_outputs = 3;
- struct clk_onecell_data *clk_data;
- const char *parent_name;
- void __iomem *reg;
- const char *clk_name;
-
- parent_name = of_clk_get_parent_name(np, 0);
- if (!parent_name)
- return;
-
- reg = clkgen_get_register_base(np);
- if (!reg)
- return;
-
- clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
- if (!clk_data)
- return;
-
- clk_data->clk_num = num_pll_outputs;
- clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
- GFP_KERNEL);
-
- if (!clk_data->clks)
- goto err;
-
- if (of_property_read_string_index(np, "clock-output-names",
- 0, &clk_name))
- goto err;
-
- /*
- * PLL0 HS (high speed) output
- */
- clk_data->clks[0] = clkgen_pll_register(parent_name,
- (struct clkgen_pll_data *) &st_pll1600c65_ax,
- reg + CLKGENAx_PLL0_OFFSET, 0, clk_name, NULL);
-
- if (IS_ERR(clk_data->clks[0]))
- goto err;
-
- if (of_property_read_string_index(np, "clock-output-names",
- 1, &clk_name))
- goto err;
-
- /*
- * PLL0 LS (low speed) output, which is a fixed divide by 2 of the
- * high speed output.
- */
- clk_data->clks[1] = clkgen_c65_lsdiv_register(__clk_get_name
- (clk_data->clks[0]),
- clk_name);
-
- if (IS_ERR(clk_data->clks[1]))
- goto err;
-
- if (of_property_read_string_index(np, "clock-output-names",
- 2, &clk_name))
- goto err;
-
- /*
- * PLL1 output
- */
- clk_data->clks[2] = clkgen_pll_register(parent_name,
- (struct clkgen_pll_data *) &st_pll800c65_ax,
- reg + CLKGENAx_PLL1_OFFSET, 0, clk_name, NULL);
-
- if (IS_ERR(clk_data->clks[2]))
- goto err;
-
- of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
- return;
-
-err:
- kfree(clk_data->clks);
- kfree(clk_data);
-}
-CLK_OF_DECLARE(clkgena_c65_plls,
- "st,clkgena-plls-c65", clkgena_c65_pll_setup);
-
static struct clk * __init clkgen_odf_register(const char *parent_name,
void __iomem *reg,
struct clkgen_pll_data *pll_data,
@@ -1042,72 +702,17 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
return clk;
}
-static const struct of_device_id c32_pll_of_match[] = {
- {
- .compatible = "st,plls-c32-a1x-0",
- .data = &st_pll3200c32_a1x_0,
- },
- {
- .compatible = "st,plls-c32-a1x-1",
- .data = &st_pll3200c32_a1x_1,
- },
- {
- .compatible = "st,stih415-plls-c32-a9",
- .data = &st_pll3200c32_a9_415,
- },
- {
- .compatible = "st,stih415-plls-c32-ddr",
- .data = &st_pll3200c32_ddr_415,
- },
- {
- .compatible = "st,stih416-plls-c32-a9",
- .data = &st_pll3200c32_a9_416,
- },
- {
- .compatible = "st,stih416-plls-c32-ddr",
- .data = &st_pll3200c32_ddr_416,
- },
- {
- .compatible = "st,stih407-plls-c32-a0",
- .data = &st_pll3200c32_407_a0,
- },
- {
- .compatible = "st,plls-c32-cx_0",
- .data = &st_pll3200c32_cx_0,
- },
- {
- .compatible = "st,plls-c32-cx_1",
- .data = &st_pll3200c32_cx_1,
- },
- {
- .compatible = "st,stih407-plls-c32-a9",
- .data = &st_pll3200c32_407_a9,
- },
- {
- .compatible = "st,stih418-plls-c28-a9",
- .data = &st_pll4600c28_418_a9,
- },
- {}
-};
-static void __init clkgen_c32_pll_setup(struct device_node *np)
+static void __init clkgen_c32_pll_setup(struct device_node *np,
+ struct clkgen_pll_data *data)
{
- const struct of_device_id *match;
struct clk *clk;
const char *parent_name, *pll_name;
void __iomem *pll_base;
int num_odfs, odf;
struct clk_onecell_data *clk_data;
- struct clkgen_pll_data *data;
unsigned long pll_flags = 0;
- match = of_match_node(c32_pll_of_match, np);
- if (!match) {
- pr_err("%s: No matching data\n", __func__);
- return;
- }
-
- data = (struct clkgen_pll_data *) match->data;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -1166,59 +771,30 @@ err:
kfree(clk_data->clks);
kfree(clk_data);
}
-CLK_OF_DECLARE(clkgen_c32_pll, "st,clkgen-plls-c32", clkgen_c32_pll_setup);
-
-static const struct of_device_id c32_gpu_pll_of_match[] = {
- {
- .compatible = "st,stih415-gpu-pll-c32",
- .data = &st_pll1200c32_gpu_415,
- },
- {
- .compatible = "st,stih416-gpu-pll-c32",
- .data = &st_pll1200c32_gpu_416,
- },
- {}
-};
-
-static void __init clkgengpu_c32_pll_setup(struct device_node *np)
+static void __init clkgen_c32_pll0_setup(struct device_node *np)
{
- const struct of_device_id *match;
- struct clk *clk;
- const char *parent_name;
- void __iomem *reg;
- const char *clk_name;
- struct clkgen_pll_data *data;
-
- match = of_match_node(c32_gpu_pll_of_match, np);
- if (!match) {
- pr_err("%s: No matching data\n", __func__);
- return;
- }
-
- data = (struct clkgen_pll_data *)match->data;
-
- parent_name = of_clk_get_parent_name(np, 0);
- if (!parent_name)
- return;
-
- reg = clkgen_get_register_base(np);
- if (!reg)
- return;
-
- if (of_property_read_string_index(np, "clock-output-names",
- 0, &clk_name))
- return;
+ clkgen_c32_pll_setup(np,
+ (struct clkgen_pll_data *) &st_pll3200c32_cx_0);
+}
+CLK_OF_DECLARE(c32_pll0, "st,clkgen-pll0", clkgen_c32_pll0_setup);
- /*
- * PLL 1200MHz output
- */
- clk = clkgen_pll_register(parent_name, data, reg,
- 0, clk_name, data->lock);
+static void __init clkgen_c32_pll1_setup(struct device_node *np)
+{
+ clkgen_c32_pll_setup(np,
+ (struct clkgen_pll_data *) &st_pll3200c32_cx_1);
+}
+CLK_OF_DECLARE(c32_pll1, "st,clkgen-pll1", clkgen_c32_pll1_setup);
- if (!IS_ERR(clk))
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+static void __init clkgen_c32_plla9_setup(struct device_node *np)
+{
+ clkgen_c32_pll_setup(np,
+ (struct clkgen_pll_data *) &st_pll3200c32_407_a9);
+}
+CLK_OF_DECLARE(c32_plla9, "st,stih407-clkgen-plla9", clkgen_c32_plla9_setup);
- return;
+static void __init clkgen_c28_plla9_setup(struct device_node *np)
+{
+ clkgen_c32_pll_setup(np,
+ (struct clkgen_pll_data *) &st_pll4600c28_418_a9);
}
-CLK_OF_DECLARE(clkgengpu_c32_pll,
- "st,clkgengpu-pll-c32", clkgengpu_c32_pll_setup);
+CLK_OF_DECLARE(c28_plla9, "st,stih418-clkgen-plla9", clkgen_c28_plla9_setup);
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 2afcbd39e41e..254d9526c018 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -1,5 +1,6 @@
config SUNXI_CCU
bool "Clock support for Allwinner SoCs"
+ depends on ARCH_SUNXI || COMPILE_TEST
default ARCH_SUNXI
if SUNXI_CCU
@@ -19,6 +20,10 @@ config SUNXI_CCU_GATE
config SUNXI_CCU_MUX
bool
+config SUNXI_CCU_MULT
+ bool
+ select SUNXI_CCU_MUX
+
config SUNXI_CCU_PHASE
bool
@@ -51,6 +56,40 @@ config SUNXI_CCU_MP
# SoC Drivers
+config SUN6I_A31_CCU
+ bool "Support for the Allwinner A31/A31s CCU"
+ select SUNXI_CCU_DIV
+ select SUNXI_CCU_NK
+ select SUNXI_CCU_NKM
+ select SUNXI_CCU_NM
+ select SUNXI_CCU_MP
+ select SUNXI_CCU_PHASE
+ default MACH_SUN6I
+
+config SUN8I_A23_CCU
+ bool "Support for the Allwinner A23 CCU"
+ select SUNXI_CCU_DIV
+ select SUNXI_CCU_MULT
+ select SUNXI_CCU_NK
+ select SUNXI_CCU_NKM
+ select SUNXI_CCU_NKMP
+ select SUNXI_CCU_NM
+ select SUNXI_CCU_MP
+ select SUNXI_CCU_PHASE
+ default MACH_SUN8I
+
+config SUN8I_A33_CCU
+ bool "Support for the Allwinner A33 CCU"
+ select SUNXI_CCU_DIV
+ select SUNXI_CCU_MULT
+ select SUNXI_CCU_NK
+ select SUNXI_CCU_NKM
+ select SUNXI_CCU_NKMP
+ select SUNXI_CCU_NM
+ select SUNXI_CCU_MP
+ select SUNXI_CCU_PHASE
+ default MACH_SUN8I
+
config SUN8I_H3_CCU
bool "Support for the Allwinner H3 CCU"
select SUNXI_CCU_DIV
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 633ce642ffae..106cba27c331 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SUNXI_CCU_DIV) += ccu_div.o
obj-$(CONFIG_SUNXI_CCU_FRAC) += ccu_frac.o
obj-$(CONFIG_SUNXI_CCU_GATE) += ccu_gate.o
obj-$(CONFIG_SUNXI_CCU_MUX) += ccu_mux.o
+obj-$(CONFIG_SUNXI_CCU_MULT) += ccu_mult.o
obj-$(CONFIG_SUNXI_CCU_PHASE) += ccu_phase.o
# Multi-factor clocks
@@ -17,4 +18,7 @@ obj-$(CONFIG_SUNXI_CCU_NM) += ccu_nm.o
obj-$(CONFIG_SUNXI_CCU_MP) += ccu_mp.o
# SoC support
+obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o
+obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o
+obj-$(CONFIG_SUN8I_A33_CCU) += ccu-sun8i-a33.o
obj-$(CONFIG_SUN8I_H3_CCU) += ccu-sun8i-h3.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
new file mode 100644
index 000000000000..79596463e0d9
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -0,0 +1,1239 @@
+/*
+ * Copyright (c) 2016 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * Based on ccu-sun8i-h3.c by Maxime Ripard.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_mux.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+#include "ccu_phase.h"
+
+#include "ccu-sun6i-a31.h"
+
+static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_cpu_clk, "pll-cpu",
+ "osc24M", 0x000,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 0);
+
+/*
+ * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
+ * the base (2x, 4x and 8x), and one variable divider (the one true
+ * pll audio).
+ *
+ * We don't have any need for the variable divider for now, so we just
+ * hardcode it to match with the clock names
+ */
+#define SUN6I_A31_PLL_AUDIO_REG 0x008
+
+static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0",
+ "osc24M", 0x010,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
+ "osc24M", 0x018,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr",
+ "osc24M", 0x020,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph_clk, "pll-periph",
+ "osc24M", 0x028,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 2, /* post-div */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video1_clk, "pll-video1",
+ "osc24M", 0x030,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
+ "osc24M", 0x038,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/*
+ * The MIPI PLL has 2 modes: "MIPI" and "HDMI".
+ *
+ * The MIPI mode is a standard NKM-style clock. The HDMI mode is an
+ * integer / fractional clock with switchable multipliers and dividers.
+ * This is not supported here. We hardcode the PLL to MIPI mode.
+ */
+#define SUN6I_A31_PLL_MIPI_REG 0x040
+
+static const char * const pll_mipi_parents[] = { "pll-video0", "pll-video1" };
+static SUNXI_CCU_NKM_WITH_MUX_GATE_LOCK(pll_mipi_clk, "pll-mipi",
+ pll_mipi_parents, 0x040,
+ 8, 4, /* N */
+ 4, 2, /* K */
+ 0, 4, /* M */
+ 21, 0, /* mux */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll9_clk, "pll9",
+ "osc24M", 0x044,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll10_clk, "pll10",
+ "osc24M", 0x048,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static const char * const cpux_parents[] = { "osc32k", "osc24M",
+ "pll-cpu", "pll-cpu" };
+static SUNXI_CCU_MUX(cpu_clk, "cpu", cpux_parents,
+ 0x050, 16, 2, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+static struct clk_div_table axi_div_table[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 3 },
+ { .val = 3, .div = 4 },
+ { .val = 4, .div = 4 },
+ { .val = 5, .div = 4 },
+ { .val = 6, .div = 4 },
+ { .val = 7, .div = 4 },
+ { /* Sentinel */ },
+};
+
+static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu",
+ 0x050, 0, 3, axi_div_table, 0);
+
+static const char * const ahb1_parents[] = { "osc32k", "osc24M",
+ "axi", "pll-periph" };
+
+static struct ccu_div ahb1_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 12,
+ .width = 2,
+
+ .variable_prediv = {
+ .index = 3,
+ .shift = 6,
+ .width = 2,
+ },
+ },
+
+ .common = {
+ .reg = 0x054,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb1",
+ ahb1_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct clk_div_table apb1_div_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { /* Sentinel */ },
+};
+
+static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1",
+ 0x054, 8, 2, apb1_div_table, 0);
+
+static const char * const apb2_parents[] = { "osc32k", "osc24M",
+ "pll-periph", "pll-periph" };
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_GATE(ahb1_mipidsi_clk, "ahb1-mipidsi", "ahb1",
+ 0x060, BIT(1), 0);
+static SUNXI_CCU_GATE(ahb1_ss_clk, "ahb1-ss", "ahb1",
+ 0x060, BIT(5), 0);
+static SUNXI_CCU_GATE(ahb1_dma_clk, "ahb1-dma", "ahb1",
+ 0x060, BIT(6), 0);
+static SUNXI_CCU_GATE(ahb1_mmc0_clk, "ahb1-mmc0", "ahb1",
+ 0x060, BIT(8), 0);
+static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1",
+ 0x060, BIT(9), 0);
+static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1",
+ 0x060, BIT(10), 0);
+static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1",
+ 0x060, BIT(12), 0);
+static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1",
+ 0x060, BIT(13), 0);
+static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1",
+ 0x060, BIT(13), 0);
+static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1",
+ 0x060, BIT(14), 0);
+static SUNXI_CCU_GATE(ahb1_emac_clk, "ahb1-emac", "ahb1",
+ 0x060, BIT(17), 0);
+static SUNXI_CCU_GATE(ahb1_ts_clk, "ahb1-ts", "ahb1",
+ 0x060, BIT(18), 0);
+static SUNXI_CCU_GATE(ahb1_hstimer_clk, "ahb1-hstimer", "ahb1",
+ 0x060, BIT(19), 0);
+static SUNXI_CCU_GATE(ahb1_spi0_clk, "ahb1-spi0", "ahb1",
+ 0x060, BIT(20), 0);
+static SUNXI_CCU_GATE(ahb1_spi1_clk, "ahb1-spi1", "ahb1",
+ 0x060, BIT(21), 0);
+static SUNXI_CCU_GATE(ahb1_spi2_clk, "ahb1-spi2", "ahb1",
+ 0x060, BIT(22), 0);
+static SUNXI_CCU_GATE(ahb1_spi3_clk, "ahb1-spi3", "ahb1",
+ 0x060, BIT(23), 0);
+static SUNXI_CCU_GATE(ahb1_otg_clk, "ahb1-otg", "ahb1",
+ 0x060, BIT(24), 0);
+static SUNXI_CCU_GATE(ahb1_ehci0_clk, "ahb1-ehci0", "ahb1",
+ 0x060, BIT(26), 0);
+static SUNXI_CCU_GATE(ahb1_ehci1_clk, "ahb1-ehci1", "ahb1",
+ 0x060, BIT(27), 0);
+static SUNXI_CCU_GATE(ahb1_ohci0_clk, "ahb1-ohci0", "ahb1",
+ 0x060, BIT(29), 0);
+static SUNXI_CCU_GATE(ahb1_ohci1_clk, "ahb1-ohci1", "ahb1",
+ 0x060, BIT(30), 0);
+static SUNXI_CCU_GATE(ahb1_ohci2_clk, "ahb1-ohci2", "ahb1",
+ 0x060, BIT(31), 0);
+
+static SUNXI_CCU_GATE(ahb1_ve_clk, "ahb1-ve", "ahb1",
+ 0x064, BIT(0), 0);
+static SUNXI_CCU_GATE(ahb1_lcd0_clk, "ahb1-lcd0", "ahb1",
+ 0x064, BIT(4), 0);
+static SUNXI_CCU_GATE(ahb1_lcd1_clk, "ahb1-lcd1", "ahb1",
+ 0x064, BIT(5), 0);
+static SUNXI_CCU_GATE(ahb1_csi_clk, "ahb1-csi", "ahb1",
+ 0x064, BIT(8), 0);
+static SUNXI_CCU_GATE(ahb1_hdmi_clk, "ahb1-hdmi", "ahb1",
+ 0x064, BIT(11), 0);
+static SUNXI_CCU_GATE(ahb1_be0_clk, "ahb1-be0", "ahb1",
+ 0x064, BIT(12), 0);
+static SUNXI_CCU_GATE(ahb1_be1_clk, "ahb1-be1", "ahb1",
+ 0x064, BIT(13), 0);
+static SUNXI_CCU_GATE(ahb1_fe0_clk, "ahb1-fe0", "ahb1",
+ 0x064, BIT(14), 0);
+static SUNXI_CCU_GATE(ahb1_fe1_clk, "ahb1-fe1", "ahb1",
+ 0x064, BIT(15), 0);
+static SUNXI_CCU_GATE(ahb1_mp_clk, "ahb1-mp", "ahb1",
+ 0x064, BIT(18), 0);
+static SUNXI_CCU_GATE(ahb1_gpu_clk, "ahb1-gpu", "ahb1",
+ 0x064, BIT(20), 0);
+static SUNXI_CCU_GATE(ahb1_deu0_clk, "ahb1-deu0", "ahb1",
+ 0x064, BIT(23), 0);
+static SUNXI_CCU_GATE(ahb1_deu1_clk, "ahb1-deu1", "ahb1",
+ 0x064, BIT(24), 0);
+static SUNXI_CCU_GATE(ahb1_drc0_clk, "ahb1-drc0", "ahb1",
+ 0x064, BIT(25), 0);
+static SUNXI_CCU_GATE(ahb1_drc1_clk, "ahb1-drc1", "ahb1",
+ 0x064, BIT(26), 0);
+
+static SUNXI_CCU_GATE(apb1_codec_clk, "apb1-codec", "apb1",
+ 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE(apb1_spdif_clk, "apb1-spdif", "apb1",
+ 0x068, BIT(1), 0);
+static SUNXI_CCU_GATE(apb1_digital_mic_clk, "apb1-digital-mic", "apb1",
+ 0x068, BIT(4), 0);
+static SUNXI_CCU_GATE(apb1_pio_clk, "apb1-pio", "apb1",
+ 0x068, BIT(5), 0);
+static SUNXI_CCU_GATE(apb1_daudio0_clk, "apb1-daudio0", "apb1",
+ 0x068, BIT(12), 0);
+static SUNXI_CCU_GATE(apb1_daudio1_clk, "apb1-daudio1", "apb1",
+ 0x068, BIT(13), 0);
+
+static SUNXI_CCU_GATE(apb2_i2c0_clk, "apb2-i2c0", "apb2",
+ 0x06c, BIT(0), 0);
+static SUNXI_CCU_GATE(apb2_i2c1_clk, "apb2-i2c1", "apb2",
+ 0x06c, BIT(1), 0);
+static SUNXI_CCU_GATE(apb2_i2c2_clk, "apb2-i2c2", "apb2",
+ 0x06c, BIT(2), 0);
+static SUNXI_CCU_GATE(apb2_i2c3_clk, "apb2-i2c3", "apb2",
+ 0x06c, BIT(3), 0);
+static SUNXI_CCU_GATE(apb2_uart0_clk, "apb2-uart0", "apb2",
+ 0x06c, BIT(16), 0);
+static SUNXI_CCU_GATE(apb2_uart1_clk, "apb2-uart1", "apb2",
+ 0x06c, BIT(17), 0);
+static SUNXI_CCU_GATE(apb2_uart2_clk, "apb2-uart2", "apb2",
+ 0x06c, BIT(18), 0);
+static SUNXI_CCU_GATE(apb2_uart3_clk, "apb2-uart3", "apb2",
+ 0x06c, BIT(19), 0);
+static SUNXI_CCU_GATE(apb2_uart4_clk, "apb2-uart4", "apb2",
+ 0x06c, BIT(20), 0);
+static SUNXI_CCU_GATE(apb2_uart5_clk, "apb2-uart5", "apb2",
+ 0x06c, BIT(21), 0);
+
+static const char * const mod0_default_parents[] = { "osc24M", "pll-periph" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand0_clk, "nand0", mod0_default_parents,
+ 0x080,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand1_clk, "nand1", mod0_default_parents,
+ 0x084,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents,
+ 0x088,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc0_sample_clk, "mmc0_sample", "mmc0",
+ 0x088, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc0_output_clk, "mmc0_output", "mmc0",
+ 0x088, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents,
+ 0x08c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1_sample", "mmc1",
+ 0x08c, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1_output", "mmc1",
+ 0x08c, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents,
+ 0x090,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2_sample", "mmc2",
+ 0x090, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc2_output_clk, "mmc2_output", "mmc2",
+ 0x090, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc3_clk, "mmc3", mod0_default_parents,
+ 0x094,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc3_sample_clk, "mmc3_sample", "mmc3",
+ 0x094, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc3_output_clk, "mmc3_output", "mmc3",
+ 0x094, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", mod0_default_parents, 0x098,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ss_clk, "ss", mod0_default_parents, 0x09c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi2_clk, "spi2", mod0_default_parents, 0x0a8,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi3_clk, "spi3", mod0_default_parents, 0x0ac,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const daudio_parents[] = { "pll-audio-8x", "pll-audio-4x",
+ "pll-audio-2x", "pll-audio" };
+static SUNXI_CCU_MUX_WITH_GATE(daudio0_clk, "daudio0", daudio_parents,
+ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_MUX_WITH_GATE(daudio1_clk, "daudio1", daudio_parents,
+ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
+ 0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
+ 0x0cc, BIT(8), 0);
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M",
+ 0x0cc, BIT(9), 0);
+static SUNXI_CCU_GATE(usb_phy2_clk, "usb-phy2", "osc24M",
+ 0x0cc, BIT(10), 0);
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc24M",
+ 0x0cc, BIT(16), 0);
+static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "osc24M",
+ 0x0cc, BIT(17), 0);
+static SUNXI_CCU_GATE(usb_ohci2_clk, "usb-ohci2", "osc24M",
+ 0x0cc, BIT(18), 0);
+
+/* TODO emac clk not supported yet */
+
+static const char * const dram_parents[] = { "pll-ddr", "pll-periph" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(mdfs_clk, "mdfs", dram_parents, 0x0f0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL);
+
+static SUNXI_CCU_M_WITH_MUX(sdram0_clk, "sdram0", dram_parents,
+ 0x0f4, 0, 4, 4, 1, CLK_IS_CRITICAL);
+static SUNXI_CCU_M_WITH_MUX(sdram1_clk, "sdram1", dram_parents,
+ 0x0f4, 8, 4, 12, 1, CLK_IS_CRITICAL);
+
+static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "mdfs",
+ 0x100, BIT(0), 0);
+static SUNXI_CCU_GATE(dram_csi_isp_clk, "dram-csi-isp", "mdfs",
+ 0x100, BIT(1), 0);
+static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "mdfs",
+ 0x100, BIT(3), 0);
+static SUNXI_CCU_GATE(dram_drc0_clk, "dram-drc0", "mdfs",
+ 0x100, BIT(16), 0);
+static SUNXI_CCU_GATE(dram_drc1_clk, "dram-drc1", "mdfs",
+ 0x100, BIT(17), 0);
+static SUNXI_CCU_GATE(dram_deu0_clk, "dram-deu0", "mdfs",
+ 0x100, BIT(18), 0);
+static SUNXI_CCU_GATE(dram_deu1_clk, "dram-deu1", "mdfs",
+ 0x100, BIT(19), 0);
+static SUNXI_CCU_GATE(dram_fe0_clk, "dram-fe0", "mdfs",
+ 0x100, BIT(24), 0);
+static SUNXI_CCU_GATE(dram_fe1_clk, "dram-fe1", "mdfs",
+ 0x100, BIT(25), 0);
+static SUNXI_CCU_GATE(dram_be0_clk, "dram-be0", "mdfs",
+ 0x100, BIT(26), 0);
+static SUNXI_CCU_GATE(dram_be1_clk, "dram-be1", "mdfs",
+ 0x100, BIT(27), 0);
+static SUNXI_CCU_GATE(dram_mp_clk, "dram-mp", "mdfs",
+ 0x100, BIT(28), 0);
+
+static const char * const de_parents[] = { "pll-video0", "pll-video1",
+ "pll-periph-2x", "pll-gpu",
+ "pll9", "pll10" };
+static SUNXI_CCU_M_WITH_MUX_GATE(be0_clk, "be0", de_parents,
+ 0x104, 0, 4, 24, 3, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(be1_clk, "be1", de_parents,
+ 0x108, 0, 4, 24, 3, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(fe0_clk, "fe0", de_parents,
+ 0x10c, 0, 4, 24, 3, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(fe1_clk, "fe1", de_parents,
+ 0x110, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const mp_parents[] = { "pll-video0", "pll-video1",
+ "pll9", "pll10" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mp_clk, "mp", mp_parents,
+ 0x114, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const lcd_ch0_parents[] = { "pll-video0", "pll-video1",
+ "pll-video0-2x",
+ "pll-video1-2x", "pll-mipi" };
+static SUNXI_CCU_MUX_WITH_GATE(lcd0_ch0_clk, "lcd0-ch0", lcd_ch0_parents,
+ 0x118, 24, 2, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_MUX_WITH_GATE(lcd1_ch0_clk, "lcd1-ch0", lcd_ch0_parents,
+ 0x11c, 24, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static const char * const lcd_ch1_parents[] = { "pll-video0", "pll-video1",
+ "pll-video0-2x",
+ "pll-video1-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents,
+ 0x12c, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
+ 0x12c, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
+ "pll9", "pll10", "pll-mipi",
+ "pll-ve" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi0_sclk_clk, "csi0-sclk", csi_sclk_parents,
+ 0x134, 16, 4, 24, 3, BIT(31), 0);
+
+static const char * const csi_mclk_parents[] = { "pll-video0", "pll-video1",
+ "osc24M" };
+static const u8 csi_mclk_table[] = { 0, 1, 5 };
+static struct ccu_div csi0_mclk_clk = {
+ .enable = BIT(15),
+ .div = _SUNXI_CCU_DIV(0, 4),
+ .mux = _SUNXI_CCU_MUX_TABLE(8, 3, csi_mclk_table),
+ .common = {
+ .reg = 0x134,
+ .hw.init = CLK_HW_INIT_PARENTS("csi0-mclk",
+ csi_mclk_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_div csi1_mclk_clk = {
+ .enable = BIT(15),
+ .div = _SUNXI_CCU_DIV(0, 4),
+ .mux = _SUNXI_CCU_MUX_TABLE(8, 3, csi_mclk_table),
+ .common = {
+ .reg = 0x138,
+ .hw.init = CLK_HW_INIT_PARENTS("csi1-mclk",
+ csi_mclk_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
+ 0x13c, 16, 3, BIT(31), 0);
+
+static SUNXI_CCU_GATE(codec_clk, "codec", "pll-audio",
+ 0x140, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
+ 0x144, BIT(31), 0);
+static SUNXI_CCU_GATE(digital_mic_clk, "digital-mic", "pll-audio",
+ 0x148, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
+ 0x150, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0);
+
+static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
+
+static const char * const mbus_parents[] = { "osc24M", "pll-periph",
+ "pll-ddr" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(mbus0_clk, "mbus0", mbus_parents, 0x15c,
+ 0, 3, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mbus1_clk, "mbus1", mbus_parents, 0x160,
+ 0, 3, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(mipi_dsi_clk, "mipi-dsi", lcd_ch1_parents,
+ 0x168, 16, 3, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_MUX_GATE(mipi_dsi_dphy_clk, "mipi-dsi-dphy",
+ lcd_ch1_parents, 0x168, 0, 3, 8, 2,
+ BIT(15), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_MUX_GATE(mipi_csi_dphy_clk, "mipi-csi-dphy",
+ lcd_ch1_parents, 0x16c, 0, 3, 8, 2,
+ BIT(15), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(iep_drc0_clk, "iep-drc0", de_parents,
+ 0x180, 0, 3, 24, 2, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(iep_drc1_clk, "iep-drc1", de_parents,
+ 0x184, 0, 3, 24, 2, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(iep_deu0_clk, "iep-deu0", de_parents,
+ 0x188, 0, 3, 24, 2, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(iep_deu1_clk, "iep-deu1", de_parents,
+ 0x18c, 0, 3, 24, 2, BIT(31), 0);
+
+static const char * const gpu_parents[] = { "pll-gpu", "pll-periph-2x",
+ "pll-video0", "pll-video1",
+ "pll9", "pll10" };
+static const struct ccu_mux_fixed_prediv gpu_predivs[] = {
+ { .index = 1, .div = 3, },
+};
+
+static struct ccu_div gpu_core_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV(0, 3),
+ .mux = {
+ .shift = 24,
+ .width = 3,
+ .fixed_predivs = gpu_predivs,
+ .n_predivs = ARRAY_SIZE(gpu_predivs),
+ },
+ .common = {
+ .reg = 0x1a0,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("gpu-core",
+ gpu_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_div gpu_memory_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV(0, 3),
+ .mux = {
+ .shift = 24,
+ .width = 3,
+ .fixed_predivs = gpu_predivs,
+ .n_predivs = ARRAY_SIZE(gpu_predivs),
+ },
+ .common = {
+ .reg = 0x1a4,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("gpu-memory",
+ gpu_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_div gpu_hyd_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV(0, 3),
+ .mux = {
+ .shift = 24,
+ .width = 3,
+ .fixed_predivs = gpu_predivs,
+ .n_predivs = ARRAY_SIZE(gpu_predivs),
+ },
+ .common = {
+ .reg = 0x1a8,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("gpu-hyd",
+ gpu_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_M_WITH_MUX_GATE(ats_clk, "ats", mod0_default_parents, 0x1b0,
+ 0, 3, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(trace_clk, "trace", mod0_default_parents,
+ 0x1b0,
+ 0, 3, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const clk_out_parents[] = { "osc24M", "osc32k", "osc24M",
+ "axi", "ahb1" };
+static const u8 clk_out_table[] = { 0, 1, 2, 11, 13 };
+
+static const struct ccu_mux_fixed_prediv clk_out_predivs[] = {
+ { .index = 0, .div = 750, },
+ { .index = 3, .div = 4, },
+ { .index = 4, .div = 4, },
+};
+
+static struct ccu_mp out_a_clk = {
+ .enable = BIT(31),
+ .m = _SUNXI_CCU_DIV(8, 5),
+ .p = _SUNXI_CCU_DIV(20, 2),
+ .mux = {
+ .shift = 24,
+ .width = 4,
+ .table = clk_out_table,
+ .fixed_predivs = clk_out_predivs,
+ .n_predivs = ARRAY_SIZE(clk_out_predivs),
+ },
+ .common = {
+ .reg = 0x300,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("out-a",
+ clk_out_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_mp out_b_clk = {
+ .enable = BIT(31),
+ .m = _SUNXI_CCU_DIV(8, 5),
+ .p = _SUNXI_CCU_DIV(20, 2),
+ .mux = {
+ .shift = 24,
+ .width = 4,
+ .table = clk_out_table,
+ .fixed_predivs = clk_out_predivs,
+ .n_predivs = ARRAY_SIZE(clk_out_predivs),
+ },
+ .common = {
+ .reg = 0x304,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("out-b",
+ clk_out_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_mp out_c_clk = {
+ .enable = BIT(31),
+ .m = _SUNXI_CCU_DIV(8, 5),
+ .p = _SUNXI_CCU_DIV(20, 2),
+ .mux = {
+ .shift = 24,
+ .width = 4,
+ .table = clk_out_table,
+ .fixed_predivs = clk_out_predivs,
+ .n_predivs = ARRAY_SIZE(clk_out_predivs),
+ },
+ .common = {
+ .reg = 0x308,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("out-c",
+ clk_out_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_common *sun6i_a31_ccu_clks[] = {
+ &pll_cpu_clk.common,
+ &pll_audio_base_clk.common,
+ &pll_video0_clk.common,
+ &pll_ve_clk.common,
+ &pll_ddr_clk.common,
+ &pll_periph_clk.common,
+ &pll_video1_clk.common,
+ &pll_gpu_clk.common,
+ &pll_mipi_clk.common,
+ &pll9_clk.common,
+ &pll10_clk.common,
+ &cpu_clk.common,
+ &axi_clk.common,
+ &ahb1_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &ahb1_mipidsi_clk.common,
+ &ahb1_ss_clk.common,
+ &ahb1_dma_clk.common,
+ &ahb1_mmc0_clk.common,
+ &ahb1_mmc1_clk.common,
+ &ahb1_mmc2_clk.common,
+ &ahb1_mmc3_clk.common,
+ &ahb1_nand1_clk.common,
+ &ahb1_nand0_clk.common,
+ &ahb1_sdram_clk.common,
+ &ahb1_emac_clk.common,
+ &ahb1_ts_clk.common,
+ &ahb1_hstimer_clk.common,
+ &ahb1_spi0_clk.common,
+ &ahb1_spi1_clk.common,
+ &ahb1_spi2_clk.common,
+ &ahb1_spi3_clk.common,
+ &ahb1_otg_clk.common,
+ &ahb1_ehci0_clk.common,
+ &ahb1_ehci1_clk.common,
+ &ahb1_ohci0_clk.common,
+ &ahb1_ohci1_clk.common,
+ &ahb1_ohci2_clk.common,
+ &ahb1_ve_clk.common,
+ &ahb1_lcd0_clk.common,
+ &ahb1_lcd1_clk.common,
+ &ahb1_csi_clk.common,
+ &ahb1_hdmi_clk.common,
+ &ahb1_be0_clk.common,
+ &ahb1_be1_clk.common,
+ &ahb1_fe0_clk.common,
+ &ahb1_fe1_clk.common,
+ &ahb1_mp_clk.common,
+ &ahb1_gpu_clk.common,
+ &ahb1_deu0_clk.common,
+ &ahb1_deu1_clk.common,
+ &ahb1_drc0_clk.common,
+ &ahb1_drc1_clk.common,
+ &apb1_codec_clk.common,
+ &apb1_spdif_clk.common,
+ &apb1_digital_mic_clk.common,
+ &apb1_pio_clk.common,
+ &apb1_daudio0_clk.common,
+ &apb1_daudio1_clk.common,
+ &apb2_i2c0_clk.common,
+ &apb2_i2c1_clk.common,
+ &apb2_i2c2_clk.common,
+ &apb2_i2c3_clk.common,
+ &apb2_uart0_clk.common,
+ &apb2_uart1_clk.common,
+ &apb2_uart2_clk.common,
+ &apb2_uart3_clk.common,
+ &apb2_uart4_clk.common,
+ &apb2_uart5_clk.common,
+ &nand0_clk.common,
+ &nand1_clk.common,
+ &mmc0_clk.common,
+ &mmc0_sample_clk.common,
+ &mmc0_output_clk.common,
+ &mmc1_clk.common,
+ &mmc1_sample_clk.common,
+ &mmc1_output_clk.common,
+ &mmc2_clk.common,
+ &mmc2_sample_clk.common,
+ &mmc2_output_clk.common,
+ &mmc3_clk.common,
+ &mmc3_sample_clk.common,
+ &mmc3_output_clk.common,
+ &ts_clk.common,
+ &ss_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &spi2_clk.common,
+ &spi3_clk.common,
+ &daudio0_clk.common,
+ &daudio1_clk.common,
+ &spdif_clk.common,
+ &usb_phy0_clk.common,
+ &usb_phy1_clk.common,
+ &usb_phy2_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_ohci1_clk.common,
+ &usb_ohci2_clk.common,
+ &mdfs_clk.common,
+ &sdram0_clk.common,
+ &sdram1_clk.common,
+ &dram_ve_clk.common,
+ &dram_csi_isp_clk.common,
+ &dram_ts_clk.common,
+ &dram_drc0_clk.common,
+ &dram_drc1_clk.common,
+ &dram_deu0_clk.common,
+ &dram_deu1_clk.common,
+ &dram_fe0_clk.common,
+ &dram_fe1_clk.common,
+ &dram_be0_clk.common,
+ &dram_be1_clk.common,
+ &dram_mp_clk.common,
+ &be0_clk.common,
+ &be1_clk.common,
+ &fe0_clk.common,
+ &fe1_clk.common,
+ &mp_clk.common,
+ &lcd0_ch0_clk.common,
+ &lcd1_ch0_clk.common,
+ &lcd0_ch1_clk.common,
+ &lcd1_ch1_clk.common,
+ &csi0_sclk_clk.common,
+ &csi0_mclk_clk.common,
+ &csi1_mclk_clk.common,
+ &ve_clk.common,
+ &codec_clk.common,
+ &avs_clk.common,
+ &digital_mic_clk.common,
+ &hdmi_clk.common,
+ &hdmi_ddc_clk.common,
+ &ps_clk.common,
+ &mbus0_clk.common,
+ &mbus1_clk.common,
+ &mipi_dsi_clk.common,
+ &mipi_dsi_dphy_clk.common,
+ &mipi_csi_dphy_clk.common,
+ &iep_drc0_clk.common,
+ &iep_drc1_clk.common,
+ &iep_deu0_clk.common,
+ &iep_deu1_clk.common,
+ &gpu_core_clk.common,
+ &gpu_memory_clk.common,
+ &gpu_hyd_clk.common,
+ &ats_clk.common,
+ &trace_clk.common,
+ &out_a_clk.common,
+ &out_b_clk.common,
+ &out_c_clk.common,
+};
+
+/* We hardcode the divider to 4 for now */
+static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
+ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
+ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x",
+ "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_periph_2x_clk, "pll-periph-2x",
+ "pll-periph", 1, 2, 0);
+static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x",
+ "pll-video0", 1, 2, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_video1_2x_clk, "pll-video1-2x",
+ "pll-video1", 1, 2, CLK_SET_RATE_PARENT);
+
+static struct clk_hw_onecell_data sun6i_a31_hw_clks = {
+ .hws = {
+ [CLK_PLL_CPU] = &pll_cpu_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DDR] = &pll_ddr_clk.common.hw,
+ [CLK_PLL_PERIPH] = &pll_periph_clk.common.hw,
+ [CLK_PLL_PERIPH_2X] = &pll_periph_2x_clk.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_MIPI] = &pll_mipi_clk.common.hw,
+ [CLK_PLL9] = &pll9_clk.common.hw,
+ [CLK_PLL10] = &pll10_clk.common.hw,
+ [CLK_CPU] = &cpu_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_AHB1] = &ahb1_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_AHB1_MIPIDSI] = &ahb1_mipidsi_clk.common.hw,
+ [CLK_AHB1_SS] = &ahb1_ss_clk.common.hw,
+ [CLK_AHB1_DMA] = &ahb1_dma_clk.common.hw,
+ [CLK_AHB1_MMC0] = &ahb1_mmc0_clk.common.hw,
+ [CLK_AHB1_MMC1] = &ahb1_mmc1_clk.common.hw,
+ [CLK_AHB1_MMC2] = &ahb1_mmc2_clk.common.hw,
+ [CLK_AHB1_MMC3] = &ahb1_mmc3_clk.common.hw,
+ [CLK_AHB1_NAND1] = &ahb1_nand1_clk.common.hw,
+ [CLK_AHB1_NAND0] = &ahb1_nand0_clk.common.hw,
+ [CLK_AHB1_SDRAM] = &ahb1_sdram_clk.common.hw,
+ [CLK_AHB1_EMAC] = &ahb1_emac_clk.common.hw,
+ [CLK_AHB1_TS] = &ahb1_ts_clk.common.hw,
+ [CLK_AHB1_HSTIMER] = &ahb1_hstimer_clk.common.hw,
+ [CLK_AHB1_SPI0] = &ahb1_spi0_clk.common.hw,
+ [CLK_AHB1_SPI1] = &ahb1_spi1_clk.common.hw,
+ [CLK_AHB1_SPI2] = &ahb1_spi2_clk.common.hw,
+ [CLK_AHB1_SPI3] = &ahb1_spi3_clk.common.hw,
+ [CLK_AHB1_OTG] = &ahb1_otg_clk.common.hw,
+ [CLK_AHB1_EHCI0] = &ahb1_ehci0_clk.common.hw,
+ [CLK_AHB1_EHCI1] = &ahb1_ehci1_clk.common.hw,
+ [CLK_AHB1_OHCI0] = &ahb1_ohci0_clk.common.hw,
+ [CLK_AHB1_OHCI1] = &ahb1_ohci1_clk.common.hw,
+ [CLK_AHB1_OHCI2] = &ahb1_ohci2_clk.common.hw,
+ [CLK_AHB1_VE] = &ahb1_ve_clk.common.hw,
+ [CLK_AHB1_LCD0] = &ahb1_lcd0_clk.common.hw,
+ [CLK_AHB1_LCD1] = &ahb1_lcd1_clk.common.hw,
+ [CLK_AHB1_CSI] = &ahb1_csi_clk.common.hw,
+ [CLK_AHB1_HDMI] = &ahb1_hdmi_clk.common.hw,
+ [CLK_AHB1_BE0] = &ahb1_be0_clk.common.hw,
+ [CLK_AHB1_BE1] = &ahb1_be1_clk.common.hw,
+ [CLK_AHB1_FE0] = &ahb1_fe0_clk.common.hw,
+ [CLK_AHB1_FE1] = &ahb1_fe1_clk.common.hw,
+ [CLK_AHB1_MP] = &ahb1_mp_clk.common.hw,
+ [CLK_AHB1_GPU] = &ahb1_gpu_clk.common.hw,
+ [CLK_AHB1_DEU0] = &ahb1_deu0_clk.common.hw,
+ [CLK_AHB1_DEU1] = &ahb1_deu1_clk.common.hw,
+ [CLK_AHB1_DRC0] = &ahb1_drc0_clk.common.hw,
+ [CLK_AHB1_DRC1] = &ahb1_drc1_clk.common.hw,
+ [CLK_APB1_CODEC] = &apb1_codec_clk.common.hw,
+ [CLK_APB1_SPDIF] = &apb1_spdif_clk.common.hw,
+ [CLK_APB1_DIGITAL_MIC] = &apb1_digital_mic_clk.common.hw,
+ [CLK_APB1_PIO] = &apb1_pio_clk.common.hw,
+ [CLK_APB1_DAUDIO0] = &apb1_daudio0_clk.common.hw,
+ [CLK_APB1_DAUDIO1] = &apb1_daudio1_clk.common.hw,
+ [CLK_APB2_I2C0] = &apb2_i2c0_clk.common.hw,
+ [CLK_APB2_I2C1] = &apb2_i2c1_clk.common.hw,
+ [CLK_APB2_I2C2] = &apb2_i2c2_clk.common.hw,
+ [CLK_APB2_I2C3] = &apb2_i2c3_clk.common.hw,
+ [CLK_APB2_UART0] = &apb2_uart0_clk.common.hw,
+ [CLK_APB2_UART1] = &apb2_uart1_clk.common.hw,
+ [CLK_APB2_UART2] = &apb2_uart2_clk.common.hw,
+ [CLK_APB2_UART3] = &apb2_uart3_clk.common.hw,
+ [CLK_APB2_UART4] = &apb2_uart4_clk.common.hw,
+ [CLK_APB2_UART5] = &apb2_uart5_clk.common.hw,
+ [CLK_NAND0] = &nand0_clk.common.hw,
+ [CLK_NAND1] = &nand1_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw,
+ [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
+ [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
+ [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
+ [CLK_MMC3] = &mmc3_clk.common.hw,
+ [CLK_MMC3_SAMPLE] = &mmc3_sample_clk.common.hw,
+ [CLK_MMC3_OUTPUT] = &mmc3_output_clk.common.hw,
+ [CLK_TS] = &ts_clk.common.hw,
+ [CLK_SS] = &ss_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_SPI2] = &spi2_clk.common.hw,
+ [CLK_SPI3] = &spi3_clk.common.hw,
+ [CLK_DAUDIO0] = &daudio0_clk.common.hw,
+ [CLK_DAUDIO1] = &daudio1_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_PHY2] = &usb_phy2_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_USB_OHCI2] = &usb_ohci2_clk.common.hw,
+ [CLK_MDFS] = &mdfs_clk.common.hw,
+ [CLK_SDRAM0] = &sdram0_clk.common.hw,
+ [CLK_SDRAM1] = &sdram1_clk.common.hw,
+ [CLK_DRAM_VE] = &dram_ve_clk.common.hw,
+ [CLK_DRAM_CSI_ISP] = &dram_csi_isp_clk.common.hw,
+ [CLK_DRAM_TS] = &dram_ts_clk.common.hw,
+ [CLK_DRAM_DRC0] = &dram_drc0_clk.common.hw,
+ [CLK_DRAM_DRC1] = &dram_drc1_clk.common.hw,
+ [CLK_DRAM_DEU0] = &dram_deu0_clk.common.hw,
+ [CLK_DRAM_DEU1] = &dram_deu1_clk.common.hw,
+ [CLK_DRAM_FE0] = &dram_fe0_clk.common.hw,
+ [CLK_DRAM_FE1] = &dram_fe1_clk.common.hw,
+ [CLK_DRAM_BE0] = &dram_be0_clk.common.hw,
+ [CLK_DRAM_BE1] = &dram_be1_clk.common.hw,
+ [CLK_DRAM_MP] = &dram_mp_clk.common.hw,
+ [CLK_BE0] = &be0_clk.common.hw,
+ [CLK_BE1] = &be1_clk.common.hw,
+ [CLK_FE0] = &fe0_clk.common.hw,
+ [CLK_FE1] = &fe1_clk.common.hw,
+ [CLK_MP] = &mp_clk.common.hw,
+ [CLK_LCD0_CH0] = &lcd0_ch0_clk.common.hw,
+ [CLK_LCD1_CH0] = &lcd1_ch0_clk.common.hw,
+ [CLK_LCD0_CH1] = &lcd0_ch1_clk.common.hw,
+ [CLK_LCD1_CH1] = &lcd1_ch1_clk.common.hw,
+ [CLK_CSI0_SCLK] = &csi0_sclk_clk.common.hw,
+ [CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw,
+ [CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_CODEC] = &codec_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_DIGITAL_MIC] = &digital_mic_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_HDMI_DDC] = &hdmi_ddc_clk.common.hw,
+ [CLK_PS] = &ps_clk.common.hw,
+ [CLK_MBUS0] = &mbus0_clk.common.hw,
+ [CLK_MBUS1] = &mbus1_clk.common.hw,
+ [CLK_MIPI_DSI] = &mipi_dsi_clk.common.hw,
+ [CLK_MIPI_DSI_DPHY] = &mipi_dsi_dphy_clk.common.hw,
+ [CLK_MIPI_CSI_DPHY] = &mipi_csi_dphy_clk.common.hw,
+ [CLK_IEP_DRC0] = &iep_drc0_clk.common.hw,
+ [CLK_IEP_DRC1] = &iep_drc1_clk.common.hw,
+ [CLK_IEP_DEU0] = &iep_deu0_clk.common.hw,
+ [CLK_IEP_DEU1] = &iep_deu1_clk.common.hw,
+ [CLK_GPU_CORE] = &gpu_core_clk.common.hw,
+ [CLK_GPU_MEMORY] = &gpu_memory_clk.common.hw,
+ [CLK_GPU_HYD] = &gpu_hyd_clk.common.hw,
+ [CLK_ATS] = &ats_clk.common.hw,
+ [CLK_TRACE] = &trace_clk.common.hw,
+ [CLK_OUT_A] = &out_a_clk.common.hw,
+ [CLK_OUT_B] = &out_b_clk.common.hw,
+ [CLK_OUT_C] = &out_c_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun6i_a31_ccu_resets[] = {
+ [RST_USB_PHY0] = { 0x0cc, BIT(0) },
+ [RST_USB_PHY1] = { 0x0cc, BIT(1) },
+ [RST_USB_PHY2] = { 0x0cc, BIT(2) },
+
+ [RST_AHB1_MIPI_DSI] = { 0x2c0, BIT(1) },
+ [RST_AHB1_SS] = { 0x2c0, BIT(5) },
+ [RST_AHB1_DMA] = { 0x2c0, BIT(6) },
+ [RST_AHB1_MMC0] = { 0x2c0, BIT(8) },
+ [RST_AHB1_MMC1] = { 0x2c0, BIT(9) },
+ [RST_AHB1_MMC2] = { 0x2c0, BIT(10) },
+ [RST_AHB1_MMC3] = { 0x2c0, BIT(11) },
+ [RST_AHB1_NAND1] = { 0x2c0, BIT(12) },
+ [RST_AHB1_NAND0] = { 0x2c0, BIT(13) },
+ [RST_AHB1_SDRAM] = { 0x2c0, BIT(14) },
+ [RST_AHB1_EMAC] = { 0x2c0, BIT(17) },
+ [RST_AHB1_TS] = { 0x2c0, BIT(18) },
+ [RST_AHB1_HSTIMER] = { 0x2c0, BIT(19) },
+ [RST_AHB1_SPI0] = { 0x2c0, BIT(20) },
+ [RST_AHB1_SPI1] = { 0x2c0, BIT(21) },
+ [RST_AHB1_SPI2] = { 0x2c0, BIT(22) },
+ [RST_AHB1_SPI3] = { 0x2c0, BIT(23) },
+ [RST_AHB1_OTG] = { 0x2c0, BIT(24) },
+ [RST_AHB1_EHCI0] = { 0x2c0, BIT(26) },
+ [RST_AHB1_EHCI1] = { 0x2c0, BIT(27) },
+ [RST_AHB1_OHCI0] = { 0x2c0, BIT(29) },
+ [RST_AHB1_OHCI1] = { 0x2c0, BIT(30) },
+ [RST_AHB1_OHCI2] = { 0x2c0, BIT(31) },
+
+ [RST_AHB1_VE] = { 0x2c4, BIT(0) },
+ [RST_AHB1_LCD0] = { 0x2c4, BIT(4) },
+ [RST_AHB1_LCD1] = { 0x2c4, BIT(5) },
+ [RST_AHB1_CSI] = { 0x2c4, BIT(8) },
+ [RST_AHB1_HDMI] = { 0x2c4, BIT(11) },
+ [RST_AHB1_BE0] = { 0x2c4, BIT(12) },
+ [RST_AHB1_BE1] = { 0x2c4, BIT(13) },
+ [RST_AHB1_FE0] = { 0x2c4, BIT(14) },
+ [RST_AHB1_FE1] = { 0x2c4, BIT(15) },
+ [RST_AHB1_MP] = { 0x2c4, BIT(18) },
+ [RST_AHB1_GPU] = { 0x2c4, BIT(20) },
+ [RST_AHB1_DEU0] = { 0x2c4, BIT(23) },
+ [RST_AHB1_DEU1] = { 0x2c4, BIT(24) },
+ [RST_AHB1_DRC0] = { 0x2c4, BIT(25) },
+ [RST_AHB1_DRC1] = { 0x2c4, BIT(26) },
+ [RST_AHB1_LVDS] = { 0x2c8, BIT(0) },
+
+ [RST_APB1_CODEC] = { 0x2d0, BIT(0) },
+ [RST_APB1_SPDIF] = { 0x2d0, BIT(1) },
+ [RST_APB1_DIGITAL_MIC] = { 0x2d0, BIT(4) },
+ [RST_APB1_DAUDIO0] = { 0x2d0, BIT(12) },
+ [RST_APB1_DAUDIO1] = { 0x2d0, BIT(13) },
+
+ [RST_APB2_I2C0] = { 0x2d8, BIT(0) },
+ [RST_APB2_I2C1] = { 0x2d8, BIT(1) },
+ [RST_APB2_I2C2] = { 0x2d8, BIT(2) },
+ [RST_APB2_I2C3] = { 0x2d8, BIT(3) },
+ [RST_APB2_UART0] = { 0x2d8, BIT(16) },
+ [RST_APB2_UART1] = { 0x2d8, BIT(17) },
+ [RST_APB2_UART2] = { 0x2d8, BIT(18) },
+ [RST_APB2_UART3] = { 0x2d8, BIT(19) },
+ [RST_APB2_UART4] = { 0x2d8, BIT(20) },
+ [RST_APB2_UART5] = { 0x2d8, BIT(21) },
+};
+
+static const struct sunxi_ccu_desc sun6i_a31_ccu_desc = {
+ .ccu_clks = sun6i_a31_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun6i_a31_ccu_clks),
+
+ .hw_clks = &sun6i_a31_hw_clks,
+
+ .resets = sun6i_a31_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun6i_a31_ccu_resets),
+};
+
+static struct ccu_mux_nb sun6i_a31_cpu_nb = {
+ .common = &cpu_clk.common,
+ .cm = &cpu_clk.mux,
+ .delay_us = 1, /* > 8 clock cycles at 24 MHz */
+ .bypass_index = 1, /* index of 24 MHz oscillator */
+};
+
+static void __init sun6i_a31_ccu_setup(struct device_node *node)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%s: Could not map the clock registers\n",
+ of_node_full_name(node));
+ return;
+ }
+
+ /* Force the PLL-Audio-1x divider to 4 */
+ val = readl(reg + SUN6I_A31_PLL_AUDIO_REG);
+ val &= ~GENMASK(19, 16);
+ writel(val | (3 << 16), reg + SUN6I_A31_PLL_AUDIO_REG);
+
+ /* Force PLL-MIPI to MIPI mode */
+ val = readl(reg + SUN6I_A31_PLL_MIPI_REG);
+ val &= BIT(16);
+ writel(val, reg + SUN6I_A31_PLL_MIPI_REG);
+
+ sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
+
+ ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
+ &sun6i_a31_cpu_nb);
+}
+CLK_OF_DECLARE(sun6i_a31_ccu, "allwinner,sun6i-a31-ccu",
+ sun6i_a31_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.h b/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
new file mode 100644
index 000000000000..4e434011e9e7
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2016 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_SUN6I_A31_H_
+#define _CCU_SUN6I_A31_H_
+
+#include <dt-bindings/clock/sun6i-a31-ccu.h>
+#include <dt-bindings/reset/sun6i-a31-ccu.h>
+
+#define CLK_PLL_CPU 0
+#define CLK_PLL_AUDIO_BASE 1
+#define CLK_PLL_AUDIO 2
+#define CLK_PLL_AUDIO_2X 3
+#define CLK_PLL_AUDIO_4X 4
+#define CLK_PLL_AUDIO_8X 5
+#define CLK_PLL_VIDEO0 6
+#define CLK_PLL_VIDEO0_2X 7
+#define CLK_PLL_VE 8
+#define CLK_PLL_DDR 9
+
+/* The PLL_PERIPH clock is exported */
+
+#define CLK_PLL_PERIPH_2X 11
+#define CLK_PLL_VIDEO1 12
+#define CLK_PLL_VIDEO1_2X 13
+#define CLK_PLL_GPU 14
+#define CLK_PLL_MIPI 15
+#define CLK_PLL9 16
+#define CLK_PLL10 17
+
+/* The CPUX clock is exported */
+
+#define CLK_AXI 19
+#define CLK_AHB1 20
+#define CLK_APB1 21
+#define CLK_APB2 22
+
+/* All the bus gates are exported */
+
+/* The first bunch of module clocks are exported */
+
+/* EMAC clock is not implemented */
+
+#define CLK_MDFS 107
+#define CLK_SDRAM0 108
+#define CLK_SDRAM1 109
+
+/* All the DRAM gates are exported */
+
+/* Some more module clocks are exported */
+
+#define CLK_MBUS0 141
+#define CLK_MBUS1 142
+
+/* Some more module clocks and external clock outputs are exported */
+
+#define CLK_NUMBER (CLK_OUT_C + 1)
+
+#endif /* _CCU_SUN6I_A31_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h b/drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h
new file mode 100644
index 000000000000..62c0f8d49ef8
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2016 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_SUN8I_A23_A33_H_
+#define _CCU_SUN8I_A23_A33_H_
+
+#include <dt-bindings/clock/sun8i-a23-a33-ccu.h>
+#include <dt-bindings/reset/sun8i-a23-a33-ccu.h>
+
+#define CLK_PLL_CPUX 0
+#define CLK_PLL_AUDIO_BASE 1
+#define CLK_PLL_AUDIO 2
+#define CLK_PLL_AUDIO_2X 3
+#define CLK_PLL_AUDIO_4X 4
+#define CLK_PLL_AUDIO_8X 5
+#define CLK_PLL_VIDEO 6
+#define CLK_PLL_VIDEO_2X 7
+#define CLK_PLL_VE 8
+#define CLK_PLL_DDR0 9
+#define CLK_PLL_PERIPH 10
+#define CLK_PLL_PERIPH_2X 11
+#define CLK_PLL_GPU 12
+#define CLK_PLL_MIPI 13
+#define CLK_PLL_HSIC 14
+#define CLK_PLL_DE 15
+#define CLK_PLL_DDR1 16
+#define CLK_PLL_DDR 17
+
+/* The CPUX clock is exported */
+
+#define CLK_AXI 19
+#define CLK_AHB1 20
+#define CLK_APB1 21
+#define CLK_APB2 22
+
+/* All the bus gates are exported */
+
+/* The first part of the mod clocks is exported */
+
+#define CLK_DRAM 79
+
+/* Some more module clocks are exported */
+
+#define CLK_MBUS 95
+
+/* And the last module clocks are exported */
+
+#define CLK_NUMBER (CLK_ATS + 1)
+
+#endif /* _CCU_SUN8I_A23_A33_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
new file mode 100644
index 000000000000..2646d980087b
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -0,0 +1,737 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+#include "ccu_phase.h"
+
+#include "ccu-sun8i-a23-a33.h"
+
+
+static struct ccu_nkmp pll_cpux_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .m = _SUNXI_CCU_DIV(0, 2),
+ .p = _SUNXI_CCU_DIV_MAX(16, 2, 4),
+
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-cpux", "osc24M",
+ &ccu_nkmp_ops,
+ 0),
+ },
+};
+
+/*
+ * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
+ * the base (2x, 4x and 8x), and one variable divider (the one true
+ * pll audio).
+ *
+ * We don't have any need for the variable divider for now, so we just
+ * hardcode it to match with the clock names
+ */
+#define SUN8I_A23_PLL_AUDIO_REG 0x008
+
+static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
+ "osc24M", 0x010,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
+ "osc24M", 0x018,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr",
+ "osc24M", 0x020,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 0);
+
+static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph_clk, "pll-periph",
+ "osc24M", 0x028,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 2, /* post-div */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
+ "osc24M", 0x038,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/*
+ * The MIPI PLL has 2 modes: "MIPI" and "HDMI".
+ *
+ * The MIPI mode is a standard NKM-style clock. The HDMI mode is an
+ * integer / fractional clock with switchable multipliers and dividers.
+ * This is not supported here. We hardcode the PLL to MIPI mode.
+ */
+#define SUN8I_A23_PLL_MIPI_REG 0x040
+static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi",
+ "pll-video", 0x040,
+ 8, 4, /* N */
+ 4, 2, /* K */
+ 0, 4, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_hsic_clk, "pll-hsic",
+ "osc24M", 0x044,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
+ "osc24M", 0x048,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static const char * const cpux_parents[] = { "osc32k", "osc24M",
+ "pll-cpux" , "pll-cpux" };
+static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
+ 0x050, 16, 2, CLK_IS_CRITICAL);
+
+static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0);
+
+static const char * const ahb1_parents[] = { "osc32k", "osc24M",
+ "axi" , "pll-periph" };
+static struct ccu_div ahb1_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 12,
+ .width = 2,
+
+ .variable_prediv = {
+ .index = 3,
+ .shift = 6,
+ .width = 2,
+ },
+ },
+
+ .common = {
+ .reg = 0x054,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb1",
+ ahb1_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct clk_div_table apb1_div_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { /* Sentinel */ },
+};
+static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1",
+ 0x054, 8, 2, apb1_div_table, 0);
+
+static const char * const apb2_parents[] = { "osc32k", "osc24M",
+ "pll-periph" , "pll-periph" };
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_GATE(bus_mipi_dsi_clk, "bus-mipi-dsi", "ahb1",
+ 0x060, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1",
+ 0x060, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1",
+ 0x060, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1",
+ 0x060, BIT(9), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1",
+ 0x060, BIT(10), 0);
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb1",
+ 0x060, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1",
+ 0x060, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1",
+ 0x060, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1",
+ 0x060, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb1",
+ 0x060, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1",
+ 0x060, BIT(24), 0);
+static SUNXI_CCU_GATE(bus_ehci_clk, "bus-ehci", "ahb1",
+ 0x060, BIT(26), 0);
+static SUNXI_CCU_GATE(bus_ohci_clk, "bus-ohci", "ahb1",
+ 0x060, BIT(29), 0);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1",
+ 0x064, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_lcd_clk, "bus-lcd", "ahb1",
+ 0x064, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb1",
+ 0x064, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_de_be_clk, "bus-de-be", "ahb1",
+ 0x064, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_de_fe_clk, "bus-de-fe", "ahb1",
+ 0x064, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "ahb1",
+ 0x064, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "ahb1",
+ 0x064, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "ahb1",
+ 0x064, BIT(22), 0);
+static SUNXI_CCU_GATE(bus_drc_clk, "bus-drc", "ahb1",
+ 0x064, BIT(25), 0);
+
+static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1",
+ 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1",
+ 0x068, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1",
+ 0x068, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1",
+ 0x068, BIT(13), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
+ 0x06c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
+ 0x06c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
+ 0x06c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
+ 0x06c, BIT(16), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
+ 0x06c, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2",
+ 0x06c, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2",
+ 0x06c, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2",
+ 0x06c, BIT(20), 0);
+
+static const char * const mod0_default_parents[] = { "osc24M", "pll-periph" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, 0x088,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc0_sample_clk, "mmc0_sample", "mmc0",
+ 0x088, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc0_output_clk, "mmc0_output", "mmc0",
+ 0x088, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, 0x08c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1_sample", "mmc1",
+ 0x08c, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1_output", "mmc1",
+ 0x08c, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, 0x090,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2_sample", "mmc2",
+ 0x090, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc2_output_clk, "mmc2_output", "mmc2",
+ 0x090, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
+ "pll-audio-2x", "pll-audio" };
+static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
+ 0x0b0, 16, 2, BIT(31), 0);
+
+static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
+ 0x0b4, 16, 2, BIT(31), 0);
+
+/* TODO: the parent for most of the USB clocks is not known */
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
+ 0x0cc, BIT(8), 0);
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M",
+ 0x0cc, BIT(9), 0);
+static SUNXI_CCU_GATE(usb_hsic_clk, "usb-hsic", "pll-hsic",
+ 0x0cc, BIT(10), 0);
+static SUNXI_CCU_GATE(usb_hsic_12M_clk, "usb-hsic-12M", "osc24M",
+ 0x0cc, BIT(11), 0);
+static SUNXI_CCU_GATE(usb_ohci_clk, "usb-ohci", "osc24M",
+ 0x0cc, BIT(16), 0);
+
+static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "pll-ddr",
+ 0x100, BIT(0), 0);
+static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "pll-ddr",
+ 0x100, BIT(1), 0);
+static SUNXI_CCU_GATE(dram_drc_clk, "dram-drc", "pll-ddr",
+ 0x100, BIT(16), 0);
+static SUNXI_CCU_GATE(dram_de_fe_clk, "dram-de-fe", "pll-ddr",
+ 0x100, BIT(24), 0);
+static SUNXI_CCU_GATE(dram_de_be_clk, "dram-de-be", "pll-ddr",
+ 0x100, BIT(26), 0);
+
+static const char * const de_parents[] = { "pll-video", "pll-periph-2x",
+ "pll-gpu", "pll-de" };
+static const u8 de_table[] = { 0, 2, 3, 5 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(de_be_clk, "de-be",
+ de_parents, de_table,
+ 0x104, 0, 4, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(de_fe_clk, "de-fe",
+ de_parents, de_table,
+ 0x10c, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const lcd_ch0_parents[] = { "pll-video", "pll-video-2x",
+ "pll-mipi" };
+static const u8 lcd_ch0_table[] = { 0, 2, 4 };
+static SUNXI_CCU_MUX_TABLE_WITH_GATE(lcd_ch0_clk, "lcd-ch0",
+ lcd_ch0_parents, lcd_ch0_table,
+ 0x118, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static const char * const lcd_ch1_parents[] = { "pll-video", "pll-video-2x" };
+static const u8 lcd_ch1_table[] = { 0, 2 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(lcd_ch1_clk, "lcd-ch1",
+ lcd_ch1_parents, lcd_ch1_table,
+ 0x12c, 0, 4, 24, 2, BIT(31), 0);
+
+static const char * const csi_sclk_parents[] = { "pll-video", "pll-de",
+ "pll-mipi", "pll-ve" };
+static const u8 csi_sclk_table[] = { 0, 3, 4, 5 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_sclk_clk, "csi-sclk",
+ csi_sclk_parents, csi_sclk_table,
+ 0x134, 16, 4, 24, 3, BIT(31), 0);
+
+static const char * const csi_mclk_parents[] = { "pll-video", "pll-de",
+ "osc24M" };
+static const u8 csi_mclk_table[] = { 0, 3, 5 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_mclk_clk, "csi-mclk",
+ csi_mclk_parents, csi_mclk_table,
+ 0x134, 0, 5, 8, 3, BIT(15), 0);
+
+static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
+ 0x13c, 16, 3, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
+ 0x140, BIT(31), 0);
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
+ 0x144, BIT(31), 0);
+
+static const char * const mbus_parents[] = { "osc24M", "pll-periph-2x",
+ "pll-ddr" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents,
+ 0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL);
+
+static const char * const dsi_sclk_parents[] = { "pll-video", "pll-video-2x" };
+static const u8 dsi_sclk_table[] = { 0, 2 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(dsi_sclk_clk, "dsi-sclk",
+ dsi_sclk_parents, dsi_sclk_table,
+ 0x168, 16, 4, 24, 2, BIT(31), 0);
+
+static const char * const dsi_dphy_parents[] = { "pll-video", "pll-periph" };
+static const u8 dsi_dphy_table[] = { 0, 2 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(dsi_dphy_clk, "dsi-dphy",
+ dsi_dphy_parents, dsi_dphy_table,
+ 0x168, 0, 4, 8, 2, BIT(15), 0);
+
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(drc_clk, "drc",
+ de_parents, de_table,
+ 0x180, 0, 4, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
+ 0x1a0, 0, 3, BIT(31), 0);
+
+static const char * const ats_parents[] = { "osc24M", "pll-periph" };
+static SUNXI_CCU_M_WITH_MUX_GATE(ats_clk, "ats", ats_parents,
+ 0x1b0, 0, 3, 24, 2, BIT(31), 0);
+
+static struct ccu_common *sun8i_a23_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_audio_base_clk.common,
+ &pll_video_clk.common,
+ &pll_ve_clk.common,
+ &pll_ddr_clk.common,
+ &pll_periph_clk.common,
+ &pll_gpu_clk.common,
+ &pll_mipi_clk.common,
+ &pll_hsic_clk.common,
+ &pll_de_clk.common,
+ &cpux_clk.common,
+ &axi_clk.common,
+ &ahb1_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &bus_mipi_dsi_clk.common,
+ &bus_dma_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_nand_clk.common,
+ &bus_dram_clk.common,
+ &bus_hstimer_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_otg_clk.common,
+ &bus_ehci_clk.common,
+ &bus_ohci_clk.common,
+ &bus_ve_clk.common,
+ &bus_lcd_clk.common,
+ &bus_csi_clk.common,
+ &bus_de_fe_clk.common,
+ &bus_de_be_clk.common,
+ &bus_gpu_clk.common,
+ &bus_msgbox_clk.common,
+ &bus_spinlock_clk.common,
+ &bus_drc_clk.common,
+ &bus_codec_clk.common,
+ &bus_pio_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &nand_clk.common,
+ &mmc0_clk.common,
+ &mmc0_sample_clk.common,
+ &mmc0_output_clk.common,
+ &mmc1_clk.common,
+ &mmc1_sample_clk.common,
+ &mmc1_output_clk.common,
+ &mmc2_clk.common,
+ &mmc2_sample_clk.common,
+ &mmc2_output_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &usb_phy0_clk.common,
+ &usb_phy1_clk.common,
+ &usb_hsic_clk.common,
+ &usb_hsic_12M_clk.common,
+ &usb_ohci_clk.common,
+ &dram_ve_clk.common,
+ &dram_csi_clk.common,
+ &dram_drc_clk.common,
+ &dram_de_fe_clk.common,
+ &dram_de_be_clk.common,
+ &de_be_clk.common,
+ &de_fe_clk.common,
+ &lcd_ch0_clk.common,
+ &lcd_ch1_clk.common,
+ &csi_sclk_clk.common,
+ &csi_mclk_clk.common,
+ &ve_clk.common,
+ &ac_dig_clk.common,
+ &avs_clk.common,
+ &mbus_clk.common,
+ &dsi_sclk_clk.common,
+ &dsi_dphy_clk.common,
+ &drc_clk.common,
+ &gpu_clk.common,
+ &ats_clk.common,
+};
+
+/* We hardcode the divider to 4 for now */
+static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
+ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
+ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x",
+ "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_periph_2x_clk, "pll-periph-2x",
+ "pll-periph", 1, 2, 0);
+static CLK_FIXED_FACTOR(pll_video_2x_clk, "pll-video-2x",
+ "pll-video", 1, 2, 0);
+
+static struct clk_hw_onecell_data sun8i_a23_hw_clks = {
+ .hws = {
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
+ [CLK_PLL_VIDEO] = &pll_video_clk.common.hw,
+ [CLK_PLL_VIDEO_2X] = &pll_video_2x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr_clk.common.hw,
+ [CLK_PLL_PERIPH] = &pll_periph_clk.common.hw,
+ [CLK_PLL_PERIPH_2X] = &pll_periph_2x_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_MIPI] = &pll_mipi_clk.common.hw,
+ [CLK_PLL_HSIC] = &pll_hsic_clk.common.hw,
+ [CLK_PLL_DE] = &pll_de_clk.common.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_AHB1] = &ahb1_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_EHCI] = &bus_ehci_clk.common.hw,
+ [CLK_BUS_OHCI] = &bus_ohci_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_LCD] = &bus_lcd_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_BUS_DE_BE] = &bus_de_be_clk.common.hw,
+ [CLK_BUS_DE_FE] = &bus_de_fe_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_BUS_DRC] = &bus_drc_clk.common.hw,
+ [CLK_BUS_CODEC] = &bus_codec_clk.common.hw,
+ [CLK_BUS_PIO] = &bus_pio_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_NAND] = &nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw,
+ [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
+ [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
+ [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_HSIC] = &usb_hsic_clk.common.hw,
+ [CLK_USB_HSIC_12M] = &usb_hsic_12M_clk.common.hw,
+ [CLK_USB_OHCI] = &usb_ohci_clk.common.hw,
+ [CLK_DRAM_VE] = &dram_ve_clk.common.hw,
+ [CLK_DRAM_CSI] = &dram_csi_clk.common.hw,
+ [CLK_DRAM_DRC] = &dram_drc_clk.common.hw,
+ [CLK_DRAM_DE_FE] = &dram_de_fe_clk.common.hw,
+ [CLK_DRAM_DE_BE] = &dram_de_be_clk.common.hw,
+ [CLK_DE_BE] = &de_be_clk.common.hw,
+ [CLK_DE_FE] = &de_fe_clk.common.hw,
+ [CLK_LCD_CH0] = &lcd_ch0_clk.common.hw,
+ [CLK_LCD_CH1] = &lcd_ch1_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
+ [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_AC_DIG] = &ac_dig_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DSI_SCLK] = &dsi_sclk_clk.common.hw,
+ [CLK_DSI_DPHY] = &dsi_dphy_clk.common.hw,
+ [CLK_DRC] = &drc_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_ATS] = &ats_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun8i_a23_ccu_resets[] = {
+ [RST_USB_PHY0] = { 0x0cc, BIT(0) },
+ [RST_USB_PHY1] = { 0x0cc, BIT(1) },
+ [RST_USB_HSIC] = { 0x0cc, BIT(2) },
+
+ [RST_MBUS] = { 0x0fc, BIT(31) },
+
+ [RST_BUS_MIPI_DSI] = { 0x2c0, BIT(1) },
+ [RST_BUS_DMA] = { 0x2c0, BIT(6) },
+ [RST_BUS_MMC0] = { 0x2c0, BIT(8) },
+ [RST_BUS_MMC1] = { 0x2c0, BIT(9) },
+ [RST_BUS_MMC2] = { 0x2c0, BIT(10) },
+ [RST_BUS_NAND] = { 0x2c0, BIT(13) },
+ [RST_BUS_DRAM] = { 0x2c0, BIT(14) },
+ [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
+ [RST_BUS_SPI0] = { 0x2c0, BIT(20) },
+ [RST_BUS_SPI1] = { 0x2c0, BIT(21) },
+ [RST_BUS_OTG] = { 0x2c0, BIT(24) },
+ [RST_BUS_EHCI] = { 0x2c0, BIT(26) },
+ [RST_BUS_OHCI] = { 0x2c0, BIT(29) },
+
+ [RST_BUS_VE] = { 0x2c4, BIT(0) },
+ [RST_BUS_LCD] = { 0x2c4, BIT(4) },
+ [RST_BUS_CSI] = { 0x2c4, BIT(8) },
+ [RST_BUS_DE_BE] = { 0x2c4, BIT(12) },
+ [RST_BUS_DE_FE] = { 0x2c4, BIT(14) },
+ [RST_BUS_GPU] = { 0x2c4, BIT(20) },
+ [RST_BUS_MSGBOX] = { 0x2c4, BIT(21) },
+ [RST_BUS_SPINLOCK] = { 0x2c4, BIT(22) },
+ [RST_BUS_DRC] = { 0x2c4, BIT(25) },
+
+ [RST_BUS_LVDS] = { 0x2c8, BIT(0) },
+
+ [RST_BUS_CODEC] = { 0x2d0, BIT(0) },
+ [RST_BUS_I2S0] = { 0x2d0, BIT(12) },
+ [RST_BUS_I2S1] = { 0x2d0, BIT(13) },
+
+ [RST_BUS_I2C0] = { 0x2d8, BIT(0) },
+ [RST_BUS_I2C1] = { 0x2d8, BIT(1) },
+ [RST_BUS_I2C2] = { 0x2d8, BIT(2) },
+ [RST_BUS_UART0] = { 0x2d8, BIT(16) },
+ [RST_BUS_UART1] = { 0x2d8, BIT(17) },
+ [RST_BUS_UART2] = { 0x2d8, BIT(18) },
+ [RST_BUS_UART3] = { 0x2d8, BIT(19) },
+ [RST_BUS_UART4] = { 0x2d8, BIT(20) },
+};
+
+static const struct sunxi_ccu_desc sun8i_a23_ccu_desc = {
+ .ccu_clks = sun8i_a23_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_a23_ccu_clks),
+
+ .hw_clks = &sun8i_a23_hw_clks,
+
+ .resets = sun8i_a23_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun8i_a23_ccu_resets),
+};
+
+static void __init sun8i_a23_ccu_setup(struct device_node *node)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%s: Could not map the clock registers\n",
+ of_node_full_name(node));
+ return;
+ }
+
+ /* Force the PLL-Audio-1x divider to 4 */
+ val = readl(reg + SUN8I_A23_PLL_AUDIO_REG);
+ val &= ~GENMASK(19, 16);
+ writel(val | (3 << 16), reg + SUN8I_A23_PLL_AUDIO_REG);
+
+ /* Force PLL-MIPI to MIPI mode */
+ val = readl(reg + SUN8I_A23_PLL_MIPI_REG);
+ val &= ~BIT(16);
+ writel(val, reg + SUN8I_A23_PLL_MIPI_REG);
+
+ sunxi_ccu_probe(node, reg, &sun8i_a23_ccu_desc);
+}
+CLK_OF_DECLARE(sun8i_a23_ccu, "allwinner,sun8i-a23-ccu",
+ sun8i_a23_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
new file mode 100644
index 000000000000..96b40ca57697
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+#include "ccu_phase.h"
+
+#include "ccu-sun8i-a23-a33.h"
+
+static struct ccu_nkmp pll_cpux_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .m = _SUNXI_CCU_DIV(0, 2),
+ .p = _SUNXI_CCU_DIV_MAX(16, 2, 4),
+
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-cpux", "osc24M",
+ &ccu_nkmp_ops,
+ 0),
+ },
+};
+
+/*
+ * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
+ * the base (2x, 4x and 8x), and one variable divider (the one true
+ * pll audio).
+ *
+ * We don't have any need for the variable divider for now, so we just
+ * hardcode it to match with the clock names
+ */
+#define SUN8I_A33_PLL_AUDIO_REG 0x008
+
+static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
+ "osc24M", 0x010,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
+ "osc24M", 0x018,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr0_clk, "pll-ddr0",
+ "osc24M", 0x020,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 0);
+
+static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph_clk, "pll-periph",
+ "osc24M", 0x028,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 2, /* post-div */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
+ "osc24M", 0x038,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/*
+ * The MIPI PLL has 2 modes: "MIPI" and "HDMI".
+ *
+ * The MIPI mode is a standard NKM-style clock. The HDMI mode is an
+ * integer / fractional clock with switchable multipliers and dividers.
+ * This is not supported here. We hardcode the PLL to MIPI mode.
+ */
+#define SUN8I_A33_PLL_MIPI_REG 0x040
+static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi",
+ "pll-video", 0x040,
+ 8, 4, /* N */
+ 4, 2, /* K */
+ 0, 4, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_hsic_clk, "pll-hsic",
+ "osc24M", 0x044,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
+ "osc24M", 0x048,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/* TODO: Fix N */
+static SUNXI_CCU_N_WITH_GATE_LOCK(pll_ddr1_clk, "pll-ddr1",
+ "osc24M", 0x04c,
+ 8, 6, /* N */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static const char * const cpux_parents[] = { "osc32k", "osc24M",
+ "pll-cpux" , "pll-cpux" };
+static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
+ 0x050, 16, 2, CLK_IS_CRITICAL);
+
+static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0);
+
+static const char * const ahb1_parents[] = { "osc32k", "osc24M",
+ "axi" , "pll-periph" };
+static struct ccu_div ahb1_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 12,
+ .width = 2,
+
+ .variable_prediv = {
+ .index = 3,
+ .shift = 6,
+ .width = 2,
+ },
+ },
+
+ .common = {
+ .reg = 0x054,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb1",
+ ahb1_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct clk_div_table apb1_div_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { /* Sentinel */ },
+};
+static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1",
+ 0x054, 8, 2, apb1_div_table, 0);
+
+static const char * const apb2_parents[] = { "osc32k", "osc24M",
+ "pll-periph" , "pll-periph" };
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_GATE(bus_mipi_dsi_clk, "bus-mipi-dsi", "ahb1",
+ 0x060, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_ss_clk, "bus-ss", "ahb1",
+ 0x060, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1",
+ 0x060, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1",
+ 0x060, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1",
+ 0x060, BIT(9), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1",
+ 0x060, BIT(10), 0);
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb1",
+ 0x060, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1",
+ 0x060, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1",
+ 0x060, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1",
+ 0x060, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb1",
+ 0x060, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1",
+ 0x060, BIT(24), 0);
+static SUNXI_CCU_GATE(bus_ehci_clk, "bus-ehci", "ahb1",
+ 0x060, BIT(26), 0);
+static SUNXI_CCU_GATE(bus_ohci_clk, "bus-ohci", "ahb1",
+ 0x060, BIT(29), 0);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1",
+ 0x064, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_lcd_clk, "bus-lcd", "ahb1",
+ 0x064, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb1",
+ 0x064, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_de_be_clk, "bus-de-be", "ahb1",
+ 0x064, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_de_fe_clk, "bus-de-fe", "ahb1",
+ 0x064, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "ahb1",
+ 0x064, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "ahb1",
+ 0x064, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "ahb1",
+ 0x064, BIT(22), 0);
+static SUNXI_CCU_GATE(bus_drc_clk, "bus-drc", "ahb1",
+ 0x064, BIT(25), 0);
+static SUNXI_CCU_GATE(bus_sat_clk, "bus-sat", "ahb1",
+ 0x064, BIT(26), 0);
+
+static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1",
+ 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1",
+ 0x068, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1",
+ 0x068, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1",
+ 0x068, BIT(13), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
+ 0x06c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
+ 0x06c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
+ 0x06c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
+ 0x06c, BIT(16), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
+ 0x06c, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2",
+ 0x06c, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2",
+ 0x06c, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2",
+ 0x06c, BIT(20), 0);
+
+static const char * const mod0_default_parents[] = { "osc24M", "pll-periph" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, 0x088,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc0_sample_clk, "mmc0_sample", "mmc0",
+ 0x088, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc0_output_clk, "mmc0_output", "mmc0",
+ 0x088, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, 0x08c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1_sample", "mmc1",
+ 0x08c, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1_output", "mmc1",
+ 0x08c, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, 0x090,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2_sample", "mmc2",
+ 0x090, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc2_output_clk, "mmc2_output", "mmc2",
+ 0x090, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ss_clk, "ss", mod0_default_parents, 0x09c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
+ "pll-audio-2x", "pll-audio" };
+static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
+ 0x0b0, 16, 2, BIT(31), 0);
+
+static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
+ 0x0b4, 16, 2, BIT(31), 0);
+
+/* TODO: the parent for most of the USB clocks is not known */
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
+ 0x0cc, BIT(8), 0);
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M",
+ 0x0cc, BIT(9), 0);
+static SUNXI_CCU_GATE(usb_hsic_clk, "usb-hsic", "pll-hsic",
+ 0x0cc, BIT(10), 0);
+static SUNXI_CCU_GATE(usb_hsic_12M_clk, "usb-hsic-12M", "osc24M",
+ 0x0cc, BIT(11), 0);
+static SUNXI_CCU_GATE(usb_ohci_clk, "usb-ohci", "osc24M",
+ 0x0cc, BIT(16), 0);
+
+static SUNXI_CCU_M(dram_clk, "dram", "pll-ddr",
+ 0x0f4, 0, 4, CLK_IS_CRITICAL);
+
+static const char * const pll_ddr_parents[] = { "pll-ddr0", "pll-ddr1" };
+static SUNXI_CCU_MUX(pll_ddr_clk, "pll-ddr", pll_ddr_parents,
+ 0x0f8, 16, 1, 0);
+
+static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram",
+ 0x100, BIT(0), 0);
+static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "dram",
+ 0x100, BIT(1), 0);
+static SUNXI_CCU_GATE(dram_drc_clk, "dram-drc", "dram",
+ 0x100, BIT(16), 0);
+static SUNXI_CCU_GATE(dram_de_fe_clk, "dram-de-fe", "dram",
+ 0x100, BIT(24), 0);
+static SUNXI_CCU_GATE(dram_de_be_clk, "dram-de-be", "dram",
+ 0x100, BIT(26), 0);
+
+static const char * const de_parents[] = { "pll-video", "pll-periph-2x",
+ "pll-gpu", "pll-de" };
+static const u8 de_table[] = { 0, 2, 3, 5 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(de_be_clk, "de-be",
+ de_parents, de_table,
+ 0x104, 0, 4, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(de_fe_clk, "de-fe",
+ de_parents, de_table,
+ 0x10c, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const lcd_ch0_parents[] = { "pll-video", "pll-video-2x",
+ "pll-mipi" };
+static const u8 lcd_ch0_table[] = { 0, 2, 4 };
+static SUNXI_CCU_MUX_TABLE_WITH_GATE(lcd_ch0_clk, "lcd-ch0",
+ lcd_ch0_parents, lcd_ch0_table,
+ 0x118, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static const char * const lcd_ch1_parents[] = { "pll-video", "pll-video-2x" };
+static const u8 lcd_ch1_table[] = { 0, 2 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(lcd_ch1_clk, "lcd-ch1",
+ lcd_ch1_parents, lcd_ch1_table,
+ 0x12c, 0, 4, 24, 2, BIT(31), 0);
+
+static const char * const csi_sclk_parents[] = { "pll-video", "pll-de",
+ "pll-mipi", "pll-ve" };
+static const u8 csi_sclk_table[] = { 0, 3, 4, 5 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_sclk_clk, "csi-sclk",
+ csi_sclk_parents, csi_sclk_table,
+ 0x134, 16, 4, 24, 3, BIT(31), 0);
+
+static const char * const csi_mclk_parents[] = { "pll-video", "pll-de",
+ "osc24M" };
+static const u8 csi_mclk_table[] = { 0, 3, 5 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_mclk_clk, "csi-mclk",
+ csi_mclk_parents, csi_mclk_table,
+ 0x134, 0, 5, 8, 3, BIT(15), 0);
+
+static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
+ 0x13c, 16, 3, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
+ 0x140, BIT(31), 0);
+static SUNXI_CCU_GATE(ac_dig_4x_clk, "ac-dig-4x", "pll-audio-4x",
+ 0x140, BIT(30), 0);
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
+ 0x144, BIT(31), 0);
+
+static const char * const mbus_parents[] = { "osc24M", "pll-periph-2x",
+ "pll-ddr0", "pll-ddr1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents,
+ 0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL);
+
+static const char * const dsi_sclk_parents[] = { "pll-video", "pll-video-2x" };
+static const u8 dsi_sclk_table[] = { 0, 2 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(dsi_sclk_clk, "dsi-sclk",
+ dsi_sclk_parents, dsi_sclk_table,
+ 0x168, 16, 4, 24, 2, BIT(31), 0);
+
+static const char * const dsi_dphy_parents[] = { "pll-video", "pll-periph" };
+static const u8 dsi_dphy_table[] = { 0, 2 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(dsi_dphy_clk, "dsi-dphy",
+ dsi_dphy_parents, dsi_dphy_table,
+ 0x168, 0, 4, 8, 2, BIT(15), 0);
+
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(drc_clk, "drc",
+ de_parents, de_table,
+ 0x180, 0, 4, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
+ 0x1a0, 0, 3, BIT(31), 0);
+
+static const char * const ats_parents[] = { "osc24M", "pll-periph" };
+static SUNXI_CCU_M_WITH_MUX_GATE(ats_clk, "ats", ats_parents,
+ 0x1b0, 0, 3, 24, 2, BIT(31), 0);
+
+static struct ccu_common *sun8i_a33_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_audio_base_clk.common,
+ &pll_video_clk.common,
+ &pll_ve_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_periph_clk.common,
+ &pll_gpu_clk.common,
+ &pll_mipi_clk.common,
+ &pll_hsic_clk.common,
+ &pll_de_clk.common,
+ &pll_ddr1_clk.common,
+ &pll_ddr_clk.common,
+ &cpux_clk.common,
+ &axi_clk.common,
+ &ahb1_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &bus_mipi_dsi_clk.common,
+ &bus_ss_clk.common,
+ &bus_dma_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_nand_clk.common,
+ &bus_dram_clk.common,
+ &bus_hstimer_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_otg_clk.common,
+ &bus_ehci_clk.common,
+ &bus_ohci_clk.common,
+ &bus_ve_clk.common,
+ &bus_lcd_clk.common,
+ &bus_csi_clk.common,
+ &bus_de_fe_clk.common,
+ &bus_de_be_clk.common,
+ &bus_gpu_clk.common,
+ &bus_msgbox_clk.common,
+ &bus_spinlock_clk.common,
+ &bus_drc_clk.common,
+ &bus_sat_clk.common,
+ &bus_codec_clk.common,
+ &bus_pio_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &nand_clk.common,
+ &mmc0_clk.common,
+ &mmc0_sample_clk.common,
+ &mmc0_output_clk.common,
+ &mmc1_clk.common,
+ &mmc1_sample_clk.common,
+ &mmc1_output_clk.common,
+ &mmc2_clk.common,
+ &mmc2_sample_clk.common,
+ &mmc2_output_clk.common,
+ &ss_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &usb_phy0_clk.common,
+ &usb_phy1_clk.common,
+ &usb_hsic_clk.common,
+ &usb_hsic_12M_clk.common,
+ &usb_ohci_clk.common,
+ &dram_clk.common,
+ &dram_ve_clk.common,
+ &dram_csi_clk.common,
+ &dram_drc_clk.common,
+ &dram_de_fe_clk.common,
+ &dram_de_be_clk.common,
+ &de_be_clk.common,
+ &de_fe_clk.common,
+ &lcd_ch0_clk.common,
+ &lcd_ch1_clk.common,
+ &csi_sclk_clk.common,
+ &csi_mclk_clk.common,
+ &ve_clk.common,
+ &ac_dig_clk.common,
+ &ac_dig_4x_clk.common,
+ &avs_clk.common,
+ &mbus_clk.common,
+ &dsi_sclk_clk.common,
+ &dsi_dphy_clk.common,
+ &drc_clk.common,
+ &gpu_clk.common,
+ &ats_clk.common,
+};
+
+/* We hardcode the divider to 4 for now */
+static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
+ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
+ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x",
+ "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_periph_2x_clk, "pll-periph-2x",
+ "pll-periph", 1, 2, 0);
+static CLK_FIXED_FACTOR(pll_video_2x_clk, "pll-video-2x",
+ "pll-video", 1, 2, 0);
+
+static struct clk_hw_onecell_data sun8i_a33_hw_clks = {
+ .hws = {
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
+ [CLK_PLL_VIDEO] = &pll_video_clk.common.hw,
+ [CLK_PLL_VIDEO_2X] = &pll_video_2x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_PERIPH] = &pll_periph_clk.common.hw,
+ [CLK_PLL_PERIPH_2X] = &pll_periph_2x_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_MIPI] = &pll_mipi_clk.common.hw,
+ [CLK_PLL_HSIC] = &pll_hsic_clk.common.hw,
+ [CLK_PLL_DE] = &pll_de_clk.common.hw,
+ [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw,
+ [CLK_PLL_DDR] = &pll_ddr_clk.common.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_AHB1] = &ahb1_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw,
+ [CLK_BUS_SS] = &bus_ss_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_EHCI] = &bus_ehci_clk.common.hw,
+ [CLK_BUS_OHCI] = &bus_ohci_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_LCD] = &bus_lcd_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_BUS_DE_BE] = &bus_de_be_clk.common.hw,
+ [CLK_BUS_DE_FE] = &bus_de_fe_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_BUS_DRC] = &bus_drc_clk.common.hw,
+ [CLK_BUS_SAT] = &bus_sat_clk.common.hw,
+ [CLK_BUS_CODEC] = &bus_codec_clk.common.hw,
+ [CLK_BUS_PIO] = &bus_pio_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_NAND] = &nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw,
+ [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
+ [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
+ [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
+ [CLK_SS] = &ss_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_HSIC] = &usb_hsic_clk.common.hw,
+ [CLK_USB_HSIC_12M] = &usb_hsic_12M_clk.common.hw,
+ [CLK_USB_OHCI] = &usb_ohci_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_DRAM_VE] = &dram_ve_clk.common.hw,
+ [CLK_DRAM_CSI] = &dram_csi_clk.common.hw,
+ [CLK_DRAM_DRC] = &dram_drc_clk.common.hw,
+ [CLK_DRAM_DE_FE] = &dram_de_fe_clk.common.hw,
+ [CLK_DRAM_DE_BE] = &dram_de_be_clk.common.hw,
+ [CLK_DE_BE] = &de_be_clk.common.hw,
+ [CLK_DE_FE] = &de_fe_clk.common.hw,
+ [CLK_LCD_CH0] = &lcd_ch0_clk.common.hw,
+ [CLK_LCD_CH1] = &lcd_ch1_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
+ [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_AC_DIG] = &ac_dig_clk.common.hw,
+ [CLK_AC_DIG_4X] = &ac_dig_4x_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DSI_SCLK] = &dsi_sclk_clk.common.hw,
+ [CLK_DSI_DPHY] = &dsi_dphy_clk.common.hw,
+ [CLK_DRC] = &drc_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_ATS] = &ats_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun8i_a33_ccu_resets[] = {
+ [RST_USB_PHY0] = { 0x0cc, BIT(0) },
+ [RST_USB_PHY1] = { 0x0cc, BIT(1) },
+ [RST_USB_HSIC] = { 0x0cc, BIT(2) },
+
+ [RST_MBUS] = { 0x0fc, BIT(31) },
+
+ [RST_BUS_MIPI_DSI] = { 0x2c0, BIT(1) },
+ [RST_BUS_SS] = { 0x2c0, BIT(5) },
+ [RST_BUS_DMA] = { 0x2c0, BIT(6) },
+ [RST_BUS_MMC0] = { 0x2c0, BIT(8) },
+ [RST_BUS_MMC1] = { 0x2c0, BIT(9) },
+ [RST_BUS_MMC2] = { 0x2c0, BIT(10) },
+ [RST_BUS_NAND] = { 0x2c0, BIT(13) },
+ [RST_BUS_DRAM] = { 0x2c0, BIT(14) },
+ [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
+ [RST_BUS_SPI0] = { 0x2c0, BIT(20) },
+ [RST_BUS_SPI1] = { 0x2c0, BIT(21) },
+ [RST_BUS_OTG] = { 0x2c0, BIT(24) },
+ [RST_BUS_EHCI] = { 0x2c0, BIT(26) },
+ [RST_BUS_OHCI] = { 0x2c0, BIT(29) },
+
+ [RST_BUS_VE] = { 0x2c4, BIT(0) },
+ [RST_BUS_LCD] = { 0x2c4, BIT(4) },
+ [RST_BUS_CSI] = { 0x2c4, BIT(8) },
+ [RST_BUS_DE_BE] = { 0x2c4, BIT(12) },
+ [RST_BUS_DE_FE] = { 0x2c4, BIT(14) },
+ [RST_BUS_GPU] = { 0x2c4, BIT(20) },
+ [RST_BUS_MSGBOX] = { 0x2c4, BIT(21) },
+ [RST_BUS_SPINLOCK] = { 0x2c4, BIT(22) },
+ [RST_BUS_DRC] = { 0x2c4, BIT(25) },
+ [RST_BUS_SAT] = { 0x2c4, BIT(26) },
+
+ [RST_BUS_LVDS] = { 0x2c8, BIT(0) },
+
+ [RST_BUS_CODEC] = { 0x2d0, BIT(0) },
+ [RST_BUS_I2S0] = { 0x2d0, BIT(12) },
+ [RST_BUS_I2S1] = { 0x2d0, BIT(13) },
+
+ [RST_BUS_I2C0] = { 0x2d8, BIT(0) },
+ [RST_BUS_I2C1] = { 0x2d8, BIT(1) },
+ [RST_BUS_I2C2] = { 0x2d8, BIT(2) },
+ [RST_BUS_UART0] = { 0x2d8, BIT(16) },
+ [RST_BUS_UART1] = { 0x2d8, BIT(17) },
+ [RST_BUS_UART2] = { 0x2d8, BIT(18) },
+ [RST_BUS_UART3] = { 0x2d8, BIT(19) },
+ [RST_BUS_UART4] = { 0x2d8, BIT(20) },
+};
+
+static const struct sunxi_ccu_desc sun8i_a33_ccu_desc = {
+ .ccu_clks = sun8i_a33_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_a33_ccu_clks),
+
+ .hw_clks = &sun8i_a33_hw_clks,
+
+ .resets = sun8i_a33_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun8i_a33_ccu_resets),
+};
+
+static void __init sun8i_a33_ccu_setup(struct device_node *node)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%s: Could not map the clock registers\n",
+ of_node_full_name(node));
+ return;
+ }
+
+ /* Force the PLL-Audio-1x divider to 4 */
+ val = readl(reg + SUN8I_A33_PLL_AUDIO_REG);
+ val &= ~GENMASK(19, 16);
+ writel(val | (3 << 16), reg + SUN8I_A33_PLL_AUDIO_REG);
+
+ /* Force PLL-MIPI to MIPI mode */
+ val = readl(reg + SUN8I_A33_PLL_MIPI_REG);
+ val &= ~BIT(16);
+ writel(val, reg + SUN8I_A33_PLL_MIPI_REG);
+
+ sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
+}
+CLK_OF_DECLARE(sun8i_a33_ccu, "allwinner,sun8i-a33-ccu",
+ sun8i_a33_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 267f99523fbe..4d70590f05e3 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -184,15 +184,15 @@ static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058,
0);
static const char * const ahb2_parents[] = { "ahb1" , "pll-periph0" };
+static const struct ccu_mux_fixed_prediv ahb2_fixed_predivs[] = {
+ { .index = 1, .div = 2 },
+};
static struct ccu_mux ahb2_clk = {
.mux = {
.shift = 0,
.width = 1,
-
- .fixed_prediv = {
- .index = 1,
- .div = 2,
- },
+ .fixed_predivs = ahb2_fixed_predivs,
+ .n_predivs = ARRAY_SIZE(ahb2_fixed_predivs),
},
.common = {
diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h
index 653ade5769b3..34c338832c0d 100644
--- a/drivers/clk/sunxi-ng/ccu_div.h
+++ b/drivers/clk/sunxi-ng/ccu_div.h
@@ -19,10 +19,29 @@
#include "ccu_common.h"
#include "ccu_mux.h"
+/**
+ * struct _ccu_div - Internal divider description
+ * @shift: Bit offset of the divider in its register
+ * @width: Width of the divider field in its register
+ * @max: Maximum value allowed for that divider. This is the
+ * arithmetic value, not the maximum value to be set in the
+ * register.
+ * @flags: clk_divider flags to apply on this divider
+ * @table: Divider table pointer (if applicable)
+ *
+ * That structure represents a single divider, and is meant to be
+ * embedded in other structures representing the various clock
+ * classes.
+ *
+ * It is basically a wrapper around the clk_divider functions
+ * arguments.
+ */
struct _ccu_div {
u8 shift;
u8 width;
+ u32 max;
+
u32 flags;
struct clk_div_table *table;
@@ -36,14 +55,25 @@ struct _ccu_div {
.table = _table, \
}
-#define _SUNXI_CCU_DIV_FLAGS(_shift, _width, _flags) \
- _SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, NULL, _flags)
-
#define _SUNXI_CCU_DIV_TABLE(_shift, _width, _table) \
_SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, _table, 0)
+#define _SUNXI_CCU_DIV_MAX_FLAGS(_shift, _width, _max, _flags) \
+ { \
+ .shift = _shift, \
+ .width = _width, \
+ .flags = _flags, \
+ .max = _max, \
+ }
+
+#define _SUNXI_CCU_DIV_FLAGS(_shift, _width, _flags) \
+ _SUNXI_CCU_DIV_MAX_FLAGS(_shift, _width, 0, _flags)
+
+#define _SUNXI_CCU_DIV_MAX(_shift, _width, _max) \
+ _SUNXI_CCU_DIV_MAX_FLAGS(_shift, _width, _max, 0)
+
#define _SUNXI_CCU_DIV(_shift, _width) \
- _SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, NULL, 0)
+ _SUNXI_CCU_DIV_FLAGS(_shift, _width, 0)
struct ccu_div {
u32 enable;
@@ -77,13 +107,16 @@ struct ccu_div {
_shift, _width, _table, 0, \
_flags)
-#define SUNXI_CCU_M_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
- _mshift, _mwidth, _muxshift, _muxwidth, \
- _gate, _flags) \
+#define SUNXI_CCU_M_WITH_MUX_TABLE_GATE(_struct, _name, \
+ _parents, _table, \
+ _reg, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
struct ccu_div _struct = { \
.enable = _gate, \
.div = _SUNXI_CCU_DIV(_mshift, _mwidth), \
- .mux = SUNXI_CLK_MUX(_muxshift, _muxwidth), \
+ .mux = _SUNXI_CCU_MUX_TABLE(_muxshift, _muxwidth, _table), \
.common = { \
.reg = _reg, \
.hw.init = CLK_HW_INIT_PARENTS(_name, \
@@ -93,12 +126,23 @@ struct ccu_div {
}, \
}
+#define SUNXI_CCU_M_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, _muxshift, _muxwidth, \
+ _gate, _flags) \
+ SUNXI_CCU_M_WITH_MUX_TABLE_GATE(_struct, _name, \
+ _parents, NULL, \
+ _reg, _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags)
+
#define SUNXI_CCU_M_WITH_MUX(_struct, _name, _parents, _reg, \
_mshift, _mwidth, _muxshift, _muxwidth, \
_flags) \
- SUNXI_CCU_M_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
- _mshift, _mwidth, _muxshift, _muxwidth, \
- 0, _flags)
+ SUNXI_CCU_M_WITH_MUX_TABLE_GATE(_struct, _name, \
+ _parents, NULL, \
+ _reg, _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ 0, _flags)
#define SUNXI_CCU_M_WITH_GATE(_struct, _name, _parent, _reg, \
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index cbf33ef5faa9..ebb1b31568a5 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -21,9 +21,9 @@ static void ccu_mp_find_best(unsigned long parent, unsigned long rate,
unsigned int best_m = 0, best_p = 0;
unsigned int _m, _p;
- for (_p = 0; _p <= max_p; _p++) {
+ for (_p = 1; _p <= max_p; _p <<= 1) {
for (_m = 1; _m <= max_m; _m++) {
- unsigned long tmp_rate = (parent >> _p) / _m;
+ unsigned long tmp_rate = parent / _p / _m;
if (tmp_rate > rate)
continue;
@@ -46,13 +46,15 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
void *data)
{
struct ccu_mp *cmp = data;
+ unsigned int max_m, max_p;
unsigned int m, p;
- ccu_mp_find_best(parent_rate, rate,
- 1 << cmp->m.width, (1 << cmp->p.width) - 1,
- &m, &p);
+ max_m = cmp->m.max ?: 1 << cmp->m.width;
+ max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
- return (parent_rate >> p) / m;
+ ccu_mp_find_best(parent_rate, rate, max_m, max_p, &m, &p);
+
+ return parent_rate / p / m;
}
static void ccu_mp_disable(struct clk_hw *hw)
@@ -108,13 +110,14 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct ccu_mp *cmp = hw_to_ccu_mp(hw);
unsigned long flags;
+ unsigned int max_m, max_p;
unsigned int m, p;
u32 reg;
- ccu_mp_find_best(parent_rate, rate,
- 1 << cmp->m.width, (1 << cmp->p.width) - 1,
- &m, &p);
+ max_m = cmp->m.max ?: 1 << cmp->m.width;
+ max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
+ ccu_mp_find_best(parent_rate, rate, max_m, max_p, &m, &p);
spin_lock_irqsave(cmp->common.lock, flags);
@@ -122,7 +125,7 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
reg &= ~GENMASK(cmp->m.width + cmp->m.shift - 1, cmp->m.shift);
reg &= ~GENMASK(cmp->p.width + cmp->p.shift - 1, cmp->p.shift);
- writel(reg | (p << cmp->p.shift) | ((m - 1) << cmp->m.shift),
+ writel(reg | (ilog2(p) << cmp->p.shift) | ((m - 1) << cmp->m.shift),
cmp->common.base + cmp->common.reg);
spin_unlock_irqrestore(cmp->common.lock, flags);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index 3cf12bf95962..edf9215ea8cc 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -44,7 +44,7 @@ struct ccu_mp {
.enable = _gate, \
.m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
.p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
- .mux = SUNXI_CLK_MUX(_muxshift, _muxwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
.common = { \
.reg = _reg, \
.hw.init = CLK_HW_INIT_PARENTS(_name, \
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
new file mode 100644
index 000000000000..010e9424691d
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+
+#include "ccu_gate.h"
+#include "ccu_mult.h"
+
+static void ccu_mult_find_best(unsigned long parent, unsigned long rate,
+ unsigned int max_n, unsigned int *n)
+{
+ *n = rate / parent;
+}
+
+static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
+ unsigned long parent_rate,
+ unsigned long rate,
+ void *data)
+{
+ struct ccu_mult *cm = data;
+ unsigned int n;
+
+ ccu_mult_find_best(parent_rate, rate, 1 << cm->mult.width, &n);
+
+ return parent_rate * n;
+}
+
+static void ccu_mult_disable(struct clk_hw *hw)
+{
+ struct ccu_mult *cm = hw_to_ccu_mult(hw);
+
+ return ccu_gate_helper_disable(&cm->common, cm->enable);
+}
+
+static int ccu_mult_enable(struct clk_hw *hw)
+{
+ struct ccu_mult *cm = hw_to_ccu_mult(hw);
+
+ return ccu_gate_helper_enable(&cm->common, cm->enable);
+}
+
+static int ccu_mult_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_mult *cm = hw_to_ccu_mult(hw);
+
+ return ccu_gate_helper_is_enabled(&cm->common, cm->enable);
+}
+
+static unsigned long ccu_mult_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mult *cm = hw_to_ccu_mult(hw);
+ unsigned long val;
+ u32 reg;
+
+ reg = readl(cm->common.base + cm->common.reg);
+ val = reg >> cm->mult.shift;
+ val &= (1 << cm->mult.width) - 1;
+
+ ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1,
+ &parent_rate);
+
+ return parent_rate * (val + 1);
+}
+
+static int ccu_mult_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_mult *cm = hw_to_ccu_mult(hw);
+
+ return ccu_mux_helper_determine_rate(&cm->common, &cm->mux,
+ req, ccu_mult_round_rate, cm);
+}
+
+static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_mult *cm = hw_to_ccu_mult(hw);
+ unsigned long flags;
+ unsigned int n;
+ u32 reg;
+
+ ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1,
+ &parent_rate);
+
+ ccu_mult_find_best(parent_rate, rate, 1 << cm->mult.width, &n);
+
+ spin_lock_irqsave(cm->common.lock, flags);
+
+ reg = readl(cm->common.base + cm->common.reg);
+ reg &= ~GENMASK(cm->mult.width + cm->mult.shift - 1, cm->mult.shift);
+
+ writel(reg | ((n - 1) << cm->mult.shift),
+ cm->common.base + cm->common.reg);
+
+ spin_unlock_irqrestore(cm->common.lock, flags);
+
+ return 0;
+}
+
+static u8 ccu_mult_get_parent(struct clk_hw *hw)
+{
+ struct ccu_mult *cm = hw_to_ccu_mult(hw);
+
+ return ccu_mux_helper_get_parent(&cm->common, &cm->mux);
+}
+
+static int ccu_mult_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ccu_mult *cm = hw_to_ccu_mult(hw);
+
+ return ccu_mux_helper_set_parent(&cm->common, &cm->mux, index);
+}
+
+const struct clk_ops ccu_mult_ops = {
+ .disable = ccu_mult_disable,
+ .enable = ccu_mult_enable,
+ .is_enabled = ccu_mult_is_enabled,
+
+ .get_parent = ccu_mult_get_parent,
+ .set_parent = ccu_mult_set_parent,
+
+ .determine_rate = ccu_mult_determine_rate,
+ .recalc_rate = ccu_mult_recalc_rate,
+ .set_rate = ccu_mult_set_rate,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_mult.h b/drivers/clk/sunxi-ng/ccu_mult.h
index 609db6610880..5d2c8dc14073 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.h
+++ b/drivers/clk/sunxi-ng/ccu_mult.h
@@ -1,6 +1,9 @@
#ifndef _CCU_MULT_H_
#define _CCU_MULT_H_
+#include "ccu_common.h"
+#include "ccu_mux.h"
+
struct _ccu_mult {
u8 shift;
u8 width;
@@ -12,4 +15,36 @@ struct _ccu_mult {
.width = _width, \
}
+struct ccu_mult {
+ u32 enable;
+
+ struct _ccu_mult mult;
+ struct ccu_mux_internal mux;
+ struct ccu_common common;
+};
+
+#define SUNXI_CCU_N_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \
+ _mshift, _mwidth, _gate, _lock, \
+ _flags) \
+ struct ccu_mult _struct = { \
+ .enable = _gate, \
+ .mult = _SUNXI_CCU_MULT(_mshift, _mwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_mult_ops, \
+ _flags), \
+ }, \
+ }
+
+static inline struct ccu_mult *hw_to_ccu_mult(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_mult, common);
+}
+
+extern const struct clk_ops ccu_mult_ops;
+
#endif /* _CCU_MULT_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index 58fc36e7dcce..a43ad52a957d 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -8,7 +8,9 @@
* the License, or (at your option) any later version.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include "ccu_gate.h"
#include "ccu_mux.h"
@@ -18,8 +20,9 @@ void ccu_mux_helper_adjust_parent_for_prediv(struct ccu_common *common,
int parent_index,
unsigned long *parent_rate)
{
- u8 prediv = 1;
+ u16 prediv = 1;
u32 reg;
+ int i;
if (!((common->features & CCU_FEATURE_FIXED_PREDIV) ||
(common->features & CCU_FEATURE_VARIABLE_PREDIV)))
@@ -32,8 +35,9 @@ void ccu_mux_helper_adjust_parent_for_prediv(struct ccu_common *common,
}
if (common->features & CCU_FEATURE_FIXED_PREDIV)
- if (parent_index == cm->fixed_prediv.index)
- prediv = cm->fixed_prediv.div;
+ for (i = 0; i < cm->n_predivs; i++)
+ if (parent_index == cm->fixed_predivs[i].index)
+ prediv = cm->fixed_predivs[i].div;
if (common->features & CCU_FEATURE_VARIABLE_PREDIV)
if (parent_index == cm->variable_prediv.index) {
@@ -107,6 +111,15 @@ u8 ccu_mux_helper_get_parent(struct ccu_common *common,
parent = reg >> cm->shift;
parent &= (1 << cm->width) - 1;
+ if (cm->table) {
+ int num_parents = clk_hw_get_num_parents(&common->hw);
+ int i;
+
+ for (i = 0; i < num_parents; i++)
+ if (cm->table[i] == parent)
+ return i;
+ }
+
return parent;
}
@@ -117,6 +130,9 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
unsigned long flags;
u32 reg;
+ if (cm->table)
+ index = cm->table[index];
+
spin_lock_irqsave(common->lock, flags);
reg = readl(common->base + common->reg);
@@ -185,3 +201,37 @@ const struct clk_ops ccu_mux_ops = {
.determine_rate = __clk_mux_determine_rate,
.recalc_rate = ccu_mux_recalc_rate,
};
+
+/*
+ * This clock notifier is called when the frequency of the of the parent
+ * PLL clock is to be changed. The idea is to switch the parent to a
+ * stable clock, such as the main oscillator, while the PLL frequency
+ * stabilizes.
+ */
+static int ccu_mux_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct ccu_mux_nb *mux = to_ccu_mux_nb(nb);
+ int ret = 0;
+
+ if (event == PRE_RATE_CHANGE) {
+ mux->original_index = ccu_mux_helper_get_parent(mux->common,
+ mux->cm);
+ ret = ccu_mux_helper_set_parent(mux->common, mux->cm,
+ mux->bypass_index);
+ } else if (event == POST_RATE_CHANGE) {
+ ret = ccu_mux_helper_set_parent(mux->common, mux->cm,
+ mux->original_index);
+ }
+
+ udelay(mux->delay_us);
+
+ return notifier_from_errno(ret);
+}
+
+int ccu_mux_notifier_register(struct clk *clk, struct ccu_mux_nb *mux_nb)
+{
+ mux_nb->clk_nb.notifier_call = ccu_mux_notifier_cb;
+
+ return clk_notifier_register(clk, &mux_nb->clk_nb);
+}
diff --git a/drivers/clk/sunxi-ng/ccu_mux.h b/drivers/clk/sunxi-ng/ccu_mux.h
index 945082631e7d..47aba3a48245 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.h
+++ b/drivers/clk/sunxi-ng/ccu_mux.h
@@ -5,14 +5,18 @@
#include "ccu_common.h"
+struct ccu_mux_fixed_prediv {
+ u8 index;
+ u16 div;
+};
+
struct ccu_mux_internal {
- u8 shift;
- u8 width;
+ u8 shift;
+ u8 width;
+ const u8 *table;
- struct {
- u8 index;
- u8 div;
- } fixed_prediv;
+ const struct ccu_mux_fixed_prediv *fixed_predivs;
+ u8 n_predivs;
struct {
u8 index;
@@ -21,12 +25,16 @@ struct ccu_mux_internal {
} variable_prediv;
};
-#define SUNXI_CLK_MUX(_shift, _width) \
- { \
- .shift = _shift, \
- .width = _width, \
+#define _SUNXI_CCU_MUX_TABLE(_shift, _width, _table) \
+ { \
+ .shift = _shift, \
+ .width = _width, \
+ .table = _table, \
}
+#define _SUNXI_CCU_MUX(_shift, _width) \
+ _SUNXI_CCU_MUX_TABLE(_shift, _width, NULL)
+
struct ccu_mux {
u16 reg;
u32 enable;
@@ -35,9 +43,12 @@ struct ccu_mux {
struct ccu_common common;
};
-#define SUNXI_CCU_MUX(_struct, _name, _parents, _reg, _shift, _width, _flags) \
+#define SUNXI_CCU_MUX_TABLE_WITH_GATE(_struct, _name, _parents, _table, \
+ _reg, _shift, _width, _gate, \
+ _flags) \
struct ccu_mux _struct = { \
- .mux = SUNXI_CLK_MUX(_shift, _width), \
+ .enable = _gate, \
+ .mux = _SUNXI_CCU_MUX_TABLE(_shift, _width, _table), \
.common = { \
.reg = _reg, \
.hw.init = CLK_HW_INIT_PARENTS(_name, \
@@ -49,17 +60,14 @@ struct ccu_mux {
#define SUNXI_CCU_MUX_WITH_GATE(_struct, _name, _parents, _reg, \
_shift, _width, _gate, _flags) \
- struct ccu_mux _struct = { \
- .enable = _gate, \
- .mux = SUNXI_CLK_MUX(_shift, _width), \
- .common = { \
- .reg = _reg, \
- .hw.init = CLK_HW_INIT_PARENTS(_name, \
- _parents, \
- &ccu_mux_ops, \
- _flags), \
- } \
- }
+ SUNXI_CCU_MUX_TABLE_WITH_GATE(_struct, _name, _parents, NULL, \
+ _reg, _shift, _width, _gate, \
+ _flags)
+
+#define SUNXI_CCU_MUX(_struct, _name, _parents, _reg, _shift, _width, \
+ _flags) \
+ SUNXI_CCU_MUX_TABLE_WITH_GATE(_struct, _name, _parents, NULL, \
+ _reg, _shift, _width, 0, _flags)
static inline struct ccu_mux *hw_to_ccu_mux(struct clk_hw *hw)
{
@@ -88,4 +96,18 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
struct ccu_mux_internal *cm,
u8 index);
+struct ccu_mux_nb {
+ struct notifier_block clk_nb;
+ struct ccu_common *common;
+ struct ccu_mux_internal *cm;
+
+ u32 delay_us; /* How many us to wait after reparenting */
+ u8 bypass_index; /* Which parent to temporarily use */
+ u8 original_index; /* This is set by the notifier callback */
+};
+
+#define to_ccu_mux_nb(_nb) container_of(_nb, struct ccu_mux_nb, clk_nb)
+
+int ccu_mux_notifier_register(struct clk *clk, struct ccu_mux_nb *mux_nb);
+
#endif /* _CCU_MUX_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 2071822b1e9c..059fdc3b4f96 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -93,19 +93,30 @@ static unsigned long ccu_nkm_recalc_rate(struct clk_hw *hw,
return parent_rate * (n + 1) * (k + 1) / (m + 1);
}
-static long ccu_nkm_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux,
+ unsigned long parent_rate,
+ unsigned long rate,
+ void *data)
{
- struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
+ struct ccu_nkm *nkm = data;
struct _ccu_nkm _nkm;
_nkm.max_n = 1 << nkm->n.width;
_nkm.max_k = 1 << nkm->k.width;
- _nkm.max_m = 1 << nkm->m.width;
+ _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width;
- ccu_nkm_find_best(*parent_rate, rate, &_nkm);
+ ccu_nkm_find_best(parent_rate, rate, &_nkm);
- return *parent_rate * _nkm.n * _nkm.k / _nkm.m;
+ return parent_rate * _nkm.n * _nkm.k / _nkm.m;
+}
+
+static int ccu_nkm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
+
+ return ccu_mux_helper_determine_rate(&nkm->common, &nkm->mux,
+ req, ccu_nkm_round_rate, nkm);
}
static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -118,7 +129,7 @@ static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate,
_nkm.max_n = 1 << nkm->n.width;
_nkm.max_k = 1 << nkm->k.width;
- _nkm.max_m = 1 << nkm->m.width;
+ _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width;
ccu_nkm_find_best(parent_rate, rate, &_nkm);
@@ -142,12 +153,29 @@ static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static u8 ccu_nkm_get_parent(struct clk_hw *hw)
+{
+ struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
+
+ return ccu_mux_helper_get_parent(&nkm->common, &nkm->mux);
+}
+
+static int ccu_nkm_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
+
+ return ccu_mux_helper_set_parent(&nkm->common, &nkm->mux, index);
+}
+
const struct clk_ops ccu_nkm_ops = {
.disable = ccu_nkm_disable,
.enable = ccu_nkm_enable,
.is_enabled = ccu_nkm_is_enabled,
+ .get_parent = ccu_nkm_get_parent,
+ .set_parent = ccu_nkm_set_parent,
+
+ .determine_rate = ccu_nkm_determine_rate,
.recalc_rate = ccu_nkm_recalc_rate,
- .round_rate = ccu_nkm_round_rate,
.set_rate = ccu_nkm_set_rate,
};
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.h b/drivers/clk/sunxi-ng/ccu_nkm.h
index 1936ac1c6b37..35493fddd8ab 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.h
+++ b/drivers/clk/sunxi-ng/ccu_nkm.h
@@ -32,10 +32,33 @@ struct ccu_nkm {
struct _ccu_mult n;
struct _ccu_mult k;
struct _ccu_div m;
+ struct ccu_mux_internal mux;
struct ccu_common common;
};
+#define SUNXI_CCU_NKM_WITH_MUX_GATE_LOCK(_struct, _name, _parents, _reg, \
+ _nshift, _nwidth, \
+ _kshift, _kwidth, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _lock, _flags) \
+ struct ccu_nkm _struct = { \
+ .enable = _gate, \
+ .lock = _lock, \
+ .k = _SUNXI_CCU_MULT(_kshift, _kwidth), \
+ .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parents, \
+ &ccu_nkm_ops, \
+ _flags), \
+ }, \
+ }
+
#define SUNXI_CCU_NKM_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \
_nshift, _nwidth, \
_kshift, _kwidth, \
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index 9f2b98e19dc9..9769dee99511 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -29,14 +29,14 @@ static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
unsigned long _n, _k, _m, _p;
for (_k = 1; _k <= nkmp->max_k; _k++) {
- for (_p = 0; _p <= nkmp->max_p; _p++) {
+ for (_p = 1; _p <= nkmp->max_p; _p <<= 1) {
unsigned long tmp_rate;
- rational_best_approximation(rate / _k, parent >> _p,
+ rational_best_approximation(rate / _k, parent / _p,
nkmp->max_n, nkmp->max_m,
&_n, &_m);
- tmp_rate = (parent * _n * _k >> _p) / _m;
+ tmp_rate = parent * _n * _k / (_m * _p);
if (tmp_rate > rate)
continue;
@@ -110,13 +110,12 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
_nkmp.max_n = 1 << nkmp->n.width;
_nkmp.max_k = 1 << nkmp->k.width;
- _nkmp.max_m = 1 << nkmp->m.width;
- _nkmp.max_p = (1 << nkmp->p.width) - 1;
+ _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
+ _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
- ccu_nkmp_find_best(*parent_rate, rate,
- &_nkmp);
+ ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
- return (*parent_rate * _nkmp.n * _nkmp.k >> _nkmp.p) / _nkmp.m;
+ return *parent_rate * _nkmp.n * _nkmp.k / (_nkmp.m * _nkmp.p);
}
static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -129,8 +128,8 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
_nkmp.max_n = 1 << nkmp->n.width;
_nkmp.max_k = 1 << nkmp->k.width;
- _nkmp.max_m = 1 << nkmp->m.width;
- _nkmp.max_p = (1 << nkmp->p.width) - 1;
+ _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
+ _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
@@ -145,7 +144,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
reg |= (_nkmp.n - 1) << nkmp->n.shift;
reg |= (_nkmp.k - 1) << nkmp->k.shift;
reg |= (_nkmp.m - 1) << nkmp->m.shift;
- reg |= _nkmp.p << nkmp->p.shift;
+ reg |= ilog2(_nkmp.p) << nkmp->p.shift;
writel(reg, nkmp->common.base + nkmp->common.reg);
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index e35ddd8eec8b..b61bdd8c7a7f 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -61,11 +61,13 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct ccu_nm *nm = hw_to_ccu_nm(hw);
+ unsigned long max_n, max_m;
unsigned long n, m;
- rational_best_approximation(rate, *parent_rate,
- 1 << nm->n.width, 1 << nm->m.width,
- &n, &m);
+ max_n = 1 << nm->n.width;
+ max_m = nm->m.max ?: 1 << nm->m.width;
+
+ rational_best_approximation(rate, *parent_rate, max_n, max_m, &n, &m);
return *parent_rate * n / m;
}
@@ -75,6 +77,7 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct ccu_nm *nm = hw_to_ccu_nm(hw);
unsigned long flags;
+ unsigned long max_n, max_m;
unsigned long n, m;
u32 reg;
@@ -83,9 +86,10 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
else
ccu_frac_helper_disable(&nm->common, &nm->frac);
- rational_best_approximation(rate, parent_rate,
- 1 << nm->n.width, 1 << nm->m.width,
- &n, &m);
+ max_n = 1 << nm->n.width;
+ max_m = nm->m.max ?: 1 << nm->m.width;
+
+ rational_best_approximation(rate, parent_rate, max_n, max_m, &n, &m);
spin_lock_irqsave(nm->common.lock, flags);
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index b38d71cec74c..e54266cc1c51 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -91,7 +91,8 @@ static void __init sun4i_a10_mod0_setup(struct device_node *node)
sunxi_factors_register(node, &sun4i_a10_mod0_data,
&sun4i_a10_mod0_lock, reg);
}
-CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup);
+CLK_OF_DECLARE_DRIVER(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk",
+ sun4i_a10_mod0_setup);
static int sun4i_a10_mod0_clk_probe(struct platform_device *pdev)
{
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
index a5666e1d0ce7..ea1eed24778c 100644
--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -82,8 +82,8 @@ err_unmap:
of_address_to_resource(node, 0, &res);
release_mem_region(res.start, resource_size(&res));
}
-CLK_OF_DECLARE(sun8i_a23_apb0, "allwinner,sun8i-a23-apb0-clk",
- sun8i_a23_apb0_setup);
+CLK_OF_DECLARE_DRIVER(sun8i_a23_apb0, "allwinner,sun8i-a23-apb0-clk",
+ sun8i_a23_apb0_setup);
static int sun8i_a23_apb0_clk_probe(struct platform_device *pdev)
{
diff --git a/drivers/clk/uniphier/Kconfig b/drivers/clk/uniphier/Kconfig
new file mode 100644
index 000000000000..5512377bd62b
--- /dev/null
+++ b/drivers/clk/uniphier/Kconfig
@@ -0,0 +1,9 @@
+config CLK_UNIPHIER
+ bool "Clock driver for UniPhier SoCs"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && MFD_SYSCON
+ default ARCH_UNIPHIER
+ help
+ Support for clock controllers on UniPhier SoCs.
+ Say Y if you want to control clocks provided by System Control
+ block, Media I/O block, Peripheral Block.
diff --git a/drivers/clk/uniphier/Makefile b/drivers/clk/uniphier/Makefile
new file mode 100644
index 000000000000..f27b360329ca
--- /dev/null
+++ b/drivers/clk/uniphier/Makefile
@@ -0,0 +1,8 @@
+obj-y += clk-uniphier-core.o
+obj-y += clk-uniphier-fixed-factor.o
+obj-y += clk-uniphier-fixed-rate.o
+obj-y += clk-uniphier-gate.o
+obj-y += clk-uniphier-mux.o
+obj-y += clk-uniphier-sys.o
+obj-y += clk-uniphier-mio.o
+obj-y += clk-uniphier-peri.o
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
new file mode 100644
index 000000000000..5ffb898d0839
--- /dev/null
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-uniphier.h"
+
+static struct clk_hw *uniphier_clk_register(struct device *dev,
+ struct regmap *regmap,
+ const struct uniphier_clk_data *data)
+{
+ switch (data->type) {
+ case UNIPHIER_CLK_TYPE_FIXED_FACTOR:
+ return uniphier_clk_register_fixed_factor(dev, data->name,
+ &data->data.factor);
+ case UNIPHIER_CLK_TYPE_FIXED_RATE:
+ return uniphier_clk_register_fixed_rate(dev, data->name,
+ &data->data.rate);
+ case UNIPHIER_CLK_TYPE_GATE:
+ return uniphier_clk_register_gate(dev, regmap, data->name,
+ &data->data.gate);
+ case UNIPHIER_CLK_TYPE_MUX:
+ return uniphier_clk_register_mux(dev, regmap, data->name,
+ &data->data.mux);
+ default:
+ dev_err(dev, "unsupported clock type\n");
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static int uniphier_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *hw_data;
+ const struct uniphier_clk_data *p, *data;
+ struct regmap *regmap;
+ struct device_node *parent;
+ int clk_num = 0;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ parent = of_get_parent(dev->of_node); /* parent should be syscon node */
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to get regmap (error %ld)\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ for (p = data; p->name; p++)
+ clk_num = max(clk_num, p->idx + 1);
+
+ hw_data = devm_kzalloc(dev,
+ sizeof(*hw_data) + clk_num * sizeof(struct clk_hw *),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ hw_data->num = clk_num;
+
+ /* avoid returning NULL for unused idx */
+ for (; clk_num >= 0; clk_num--)
+ hw_data->hws[clk_num] = ERR_PTR(-EINVAL);
+
+ for (p = data; p->name; p++) {
+ struct clk_hw *hw;
+
+ dev_dbg(dev, "register %s (index=%d)\n", p->name, p->idx);
+ hw = uniphier_clk_register(dev, regmap, p);
+ if (IS_ERR(hw)) {
+ dev_err(dev, "failed to register %s (error %ld)\n",
+ p->name, PTR_ERR(hw));
+ return PTR_ERR(hw);
+ }
+
+ if (p->idx >= 0)
+ hw_data->hws[p->idx] = hw;
+ }
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ hw_data);
+}
+
+static int uniphier_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id uniphier_clk_match[] = {
+ /* System clock */
+ {
+ .compatible = "socionext,uniphier-ld4-clock",
+ .data = uniphier_ld4_sys_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-clock",
+ .data = uniphier_pro4_sys_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-clock",
+ .data = uniphier_sld8_sys_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-clock",
+ .data = uniphier_pro5_sys_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-clock",
+ .data = uniphier_pxs2_sys_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-clock",
+ .data = uniphier_ld11_sys_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-clock",
+ .data = uniphier_ld20_sys_clk_data,
+ },
+ /* Media I/O clock */
+ {
+ .compatible = "socionext,uniphier-sld3-mio-clock",
+ .data = uniphier_sld3_mio_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld4-mio-clock",
+ .data = uniphier_sld3_mio_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-mio-clock",
+ .data = uniphier_sld3_mio_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-mio-clock",
+ .data = uniphier_sld3_mio_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-mio-clock",
+ .data = uniphier_pro5_mio_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-mio-clock",
+ .data = uniphier_pro5_mio_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-mio-clock",
+ .data = uniphier_sld3_mio_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-mio-clock",
+ .data = uniphier_pro5_mio_clk_data,
+ },
+ /* Peripheral clock */
+ {
+ .compatible = "socionext,uniphier-ld4-peri-clock",
+ .data = uniphier_ld4_peri_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-peri-clock",
+ .data = uniphier_pro4_peri_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-peri-clock",
+ .data = uniphier_ld4_peri_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-peri-clock",
+ .data = uniphier_pro4_peri_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-peri-clock",
+ .data = uniphier_pro4_peri_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-peri-clock",
+ .data = uniphier_pro4_peri_clk_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-peri-clock",
+ .data = uniphier_pro4_peri_clk_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver uniphier_clk_driver = {
+ .probe = uniphier_clk_probe,
+ .remove = uniphier_clk_remove,
+ .driver = {
+ .name = "uniphier-clk",
+ .of_match_table = uniphier_clk_match,
+ },
+};
+builtin_platform_driver(uniphier_clk_driver);
diff --git a/drivers/clk/uniphier/clk-uniphier-fixed-factor.c b/drivers/clk/uniphier/clk-uniphier-fixed-factor.c
new file mode 100644
index 000000000000..da2d9f47ef9f
--- /dev/null
+++ b/drivers/clk/uniphier/clk-uniphier-fixed-factor.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+
+#include "clk-uniphier.h"
+
+struct clk_hw *uniphier_clk_register_fixed_factor(struct device *dev,
+ const char *name,
+ const struct uniphier_clk_fixed_factor_data *data)
+{
+ struct clk_fixed_factor *fix;
+ struct clk_init_data init;
+ int ret;
+
+ fix = devm_kzalloc(dev, sizeof(*fix), GFP_KERNEL);
+ if (!fix)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = data->parent_name ? CLK_SET_RATE_PARENT : 0;
+ init.parent_names = data->parent_name ? &data->parent_name : NULL;
+ init.num_parents = data->parent_name ? 1 : 0;
+
+ fix->mult = data->mult;
+ fix->div = data->div;
+ fix->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &fix->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &fix->hw;
+}
diff --git a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c
new file mode 100644
index 000000000000..0ad0d46173c0
--- /dev/null
+++ b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+
+#include "clk-uniphier.h"
+
+struct clk_hw *uniphier_clk_register_fixed_rate(struct device *dev,
+ const char *name,
+ const struct uniphier_clk_fixed_rate_data *data)
+{
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init;
+ int ret;
+
+ /* allocate fixed-rate clock */
+ fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_fixed_rate_ops;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+
+ fixed->fixed_rate = data->fixed_rate;
+ fixed->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &fixed->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &fixed->hw;
+}
diff --git a/drivers/clk/uniphier/clk-uniphier-gate.c b/drivers/clk/uniphier/clk-uniphier-gate.c
new file mode 100644
index 000000000000..49142d44446d
--- /dev/null
+++ b/drivers/clk/uniphier/clk-uniphier-gate.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "clk-uniphier.h"
+
+struct uniphier_clk_gate {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int reg;
+ unsigned int bit;
+};
+
+#define to_uniphier_clk_gate(_hw) \
+ container_of(_hw, struct uniphier_clk_gate, hw)
+
+static int uniphier_clk_gate_endisable(struct clk_hw *hw, int enable)
+{
+ struct uniphier_clk_gate *gate = to_uniphier_clk_gate(hw);
+
+ return regmap_write_bits(gate->regmap, gate->reg, BIT(gate->bit),
+ enable ? BIT(gate->bit) : 0);
+}
+
+static int uniphier_clk_gate_enable(struct clk_hw *hw)
+{
+ return uniphier_clk_gate_endisable(hw, 1);
+}
+
+static void uniphier_clk_gate_disable(struct clk_hw *hw)
+{
+ if (uniphier_clk_gate_endisable(hw, 0) < 0)
+ pr_warn("failed to disable clk\n");
+}
+
+static int uniphier_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct uniphier_clk_gate *gate = to_uniphier_clk_gate(hw);
+ unsigned int val;
+
+ if (regmap_read(gate->regmap, gate->reg, &val) < 0)
+ pr_warn("is_enabled() may return wrong result\n");
+
+ return !!(val & BIT(gate->bit));
+}
+
+static const struct clk_ops uniphier_clk_gate_ops = {
+ .enable = uniphier_clk_gate_enable,
+ .disable = uniphier_clk_gate_disable,
+ .is_enabled = uniphier_clk_gate_is_enabled,
+};
+
+struct clk_hw *uniphier_clk_register_gate(struct device *dev,
+ struct regmap *regmap,
+ const char *name,
+ const struct uniphier_clk_gate_data *data)
+{
+ struct uniphier_clk_gate *gate;
+ struct clk_init_data init;
+ int ret;
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &uniphier_clk_gate_ops;
+ init.flags = data->parent_name ? CLK_SET_RATE_PARENT : 0;
+ init.parent_names = data->parent_name ? &data->parent_name : NULL;
+ init.num_parents = data->parent_name ? 1 : 0;
+
+ gate->regmap = regmap;
+ gate->reg = data->reg;
+ gate->bit = data->bit;
+ gate->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &gate->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &gate->hw;
+}
diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c
new file mode 100644
index 000000000000..6aa7ec768d0b
--- /dev/null
+++ b/drivers/clk/uniphier/clk-uniphier-mio.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "clk-uniphier.h"
+
+#define UNIPHIER_MIO_CLK_SD_FIXED \
+ UNIPHIER_CLK_FACTOR("sd-44m", -1, "sd-133m", 1, 3), \
+ UNIPHIER_CLK_FACTOR("sd-33m", -1, "sd-200m", 1, 6), \
+ UNIPHIER_CLK_FACTOR("sd-50m", -1, "sd-200m", 1, 4), \
+ UNIPHIER_CLK_FACTOR("sd-67m", -1, "sd-200m", 1, 3), \
+ UNIPHIER_CLK_FACTOR("sd-100m", -1, "sd-200m", 1, 2), \
+ UNIPHIER_CLK_FACTOR("sd-40m", -1, "sd-200m", 1, 5), \
+ UNIPHIER_CLK_FACTOR("sd-25m", -1, "sd-200m", 1, 8), \
+ UNIPHIER_CLK_FACTOR("sd-22m", -1, "sd-133m", 1, 6)
+
+#define UNIPHIER_MIO_CLK_SD(_idx, ch) \
+ { \
+ .name = "sd" #ch "-sel", \
+ .type = UNIPHIER_CLK_TYPE_MUX, \
+ .idx = -1, \
+ .data.mux = { \
+ .parent_names = { \
+ "sd-44m", \
+ "sd-33m", \
+ "sd-50m", \
+ "sd-67m", \
+ "sd-100m", \
+ "sd-40m", \
+ "sd-25m", \
+ "sd-22m", \
+ }, \
+ .num_parents = 8, \
+ .reg = 0x30 + 0x200 * (ch), \
+ .masks = { \
+ 0x00031000, \
+ 0x00031000, \
+ 0x00031000, \
+ 0x00031000, \
+ 0x00001300, \
+ 0x00001300, \
+ 0x00001300, \
+ 0x00001300, \
+ }, \
+ .vals = { \
+ 0x00000000, \
+ 0x00010000, \
+ 0x00020000, \
+ 0x00030000, \
+ 0x00001000, \
+ 0x00001100, \
+ 0x00001200, \
+ 0x00001300, \
+ }, \
+ }, \
+ }, \
+ UNIPHIER_CLK_GATE("sd" #ch, (_idx), "sd" #ch "-sel", 0x20 + 0x200 * (ch), 8)
+
+#define UNIPHIER_MIO_CLK_USB2(idx, ch) \
+ UNIPHIER_CLK_GATE("usb2" #ch, (idx), "usb2", 0x20 + 0x200 * (ch), 28)
+
+#define UNIPHIER_MIO_CLK_USB2_PHY(idx, ch) \
+ UNIPHIER_CLK_GATE("usb2" #ch "-phy", (idx), "usb2", 0x20 + 0x200 * (ch), 29)
+
+#define UNIPHIER_MIO_CLK_DMAC(idx) \
+ UNIPHIER_CLK_GATE("miodmac", (idx), "stdmac", 0x20, 25)
+
+const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = {
+ UNIPHIER_MIO_CLK_SD_FIXED,
+ UNIPHIER_MIO_CLK_SD(0, 0),
+ UNIPHIER_MIO_CLK_SD(1, 1),
+ UNIPHIER_MIO_CLK_SD(2, 2),
+ UNIPHIER_MIO_CLK_DMAC(7),
+ UNIPHIER_MIO_CLK_USB2(8, 0),
+ UNIPHIER_MIO_CLK_USB2(9, 1),
+ UNIPHIER_MIO_CLK_USB2(10, 2),
+ UNIPHIER_MIO_CLK_USB2(11, 3),
+ UNIPHIER_MIO_CLK_USB2_PHY(12, 0),
+ UNIPHIER_MIO_CLK_USB2_PHY(13, 1),
+ UNIPHIER_MIO_CLK_USB2_PHY(14, 2),
+ UNIPHIER_MIO_CLK_USB2_PHY(15, 3),
+ { /* sentinel */ }
+};
+
+const struct uniphier_clk_data uniphier_pro5_mio_clk_data[] = {
+ UNIPHIER_MIO_CLK_SD_FIXED,
+ UNIPHIER_MIO_CLK_SD(0, 0),
+ UNIPHIER_MIO_CLK_SD(1, 1),
+ { /* sentinel */ }
+};
diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
new file mode 100644
index 000000000000..15a2f2cbe0d9
--- /dev/null
+++ b/drivers/clk/uniphier/clk-uniphier-mux.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "clk-uniphier.h"
+
+struct uniphier_clk_mux {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int reg;
+ const unsigned int *masks;
+ const unsigned int *vals;
+};
+
+#define to_uniphier_clk_mux(_hw) container_of(_hw, struct uniphier_clk_mux, hw)
+
+static int uniphier_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
+
+ return regmap_write_bits(mux->regmap, mux->reg, mux->masks[index],
+ mux->vals[index]);
+}
+
+static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ int ret;
+ u32 val;
+ u8 i;
+
+ ret = regmap_read(mux->regmap, mux->reg, &val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_parents; i++)
+ if ((mux->masks[i] & val) == mux->vals[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static const struct clk_ops uniphier_clk_mux_ops = {
+ .determine_rate = __clk_mux_determine_rate,
+ .set_parent = uniphier_clk_mux_set_parent,
+ .get_parent = uniphier_clk_mux_get_parent,
+};
+
+struct clk_hw *uniphier_clk_register_mux(struct device *dev,
+ struct regmap *regmap,
+ const char *name,
+ const struct uniphier_clk_mux_data *data)
+{
+ struct uniphier_clk_mux *mux;
+ struct clk_init_data init;
+ int ret;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &uniphier_clk_mux_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = data->parent_names;
+ init.num_parents = data->num_parents,
+
+ mux->regmap = regmap;
+ mux->reg = data->reg;
+ mux->masks = data->masks;
+ mux->vals = data->vals;
+ mux->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &mux->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &mux->hw;
+}
diff --git a/drivers/clk/uniphier/clk-uniphier-peri.c b/drivers/clk/uniphier/clk-uniphier-peri.c
new file mode 100644
index 000000000000..521c80e9a06f
--- /dev/null
+++ b/drivers/clk/uniphier/clk-uniphier-peri.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "clk-uniphier.h"
+
+#define UNIPHIER_PERI_CLK_UART(idx, ch) \
+ UNIPHIER_CLK_GATE("uart" #ch, (idx), "uart", 0x24, 19 + (ch))
+
+#define UNIPHIER_PERI_CLK_I2C_COMMON \
+ UNIPHIER_CLK_GATE("i2c-common", -1, "i2c", 0x20, 1)
+
+#define UNIPHIER_PERI_CLK_I2C(idx, ch) \
+ UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c-common", 0x24, 5 + (ch))
+
+#define UNIPHIER_PERI_CLK_FI2C(idx, ch) \
+ UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c", 0x24, 24 + (ch))
+
+const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
+ UNIPHIER_PERI_CLK_UART(0, 0),
+ UNIPHIER_PERI_CLK_UART(1, 1),
+ UNIPHIER_PERI_CLK_UART(2, 2),
+ UNIPHIER_PERI_CLK_UART(3, 3),
+ UNIPHIER_PERI_CLK_I2C_COMMON,
+ UNIPHIER_PERI_CLK_I2C(4, 0),
+ UNIPHIER_PERI_CLK_I2C(5, 1),
+ UNIPHIER_PERI_CLK_I2C(6, 2),
+ UNIPHIER_PERI_CLK_I2C(7, 3),
+ UNIPHIER_PERI_CLK_I2C(8, 4),
+ { /* sentinel */ }
+};
+
+const struct uniphier_clk_data uniphier_pro4_peri_clk_data[] = {
+ UNIPHIER_PERI_CLK_UART(0, 0),
+ UNIPHIER_PERI_CLK_UART(1, 1),
+ UNIPHIER_PERI_CLK_UART(2, 2),
+ UNIPHIER_PERI_CLK_UART(3, 3),
+ UNIPHIER_PERI_CLK_FI2C(4, 0),
+ UNIPHIER_PERI_CLK_FI2C(5, 1),
+ UNIPHIER_PERI_CLK_FI2C(6, 2),
+ UNIPHIER_PERI_CLK_FI2C(7, 3),
+ UNIPHIER_PERI_CLK_FI2C(8, 4),
+ UNIPHIER_PERI_CLK_FI2C(9, 5),
+ UNIPHIER_PERI_CLK_FI2C(10, 6),
+ { /* sentinel */ }
+};
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
new file mode 100644
index 000000000000..5d029991047d
--- /dev/null
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/stddef.h>
+
+#include "clk-uniphier.h"
+
+#define UNIPHIER_SLD3_SYS_CLK_SD \
+ UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 8), \
+ UNIPHIER_CLK_FACTOR("sd-133m", -1, "vpll27a", 1, 2)
+
+#define UNIPHIER_PRO5_SYS_CLK_SD \
+ UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 12), \
+ UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 18)
+
+#define UNIPHIER_LD20_SYS_CLK_SD \
+ UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 10), \
+ UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 15)
+
+#define UNIPHIER_SLD3_SYS_CLK_STDMAC(idx) \
+ UNIPHIER_CLK_GATE("stdmac", (idx), NULL, 0x2104, 10)
+
+#define UNIPHIER_LD11_SYS_CLK_STDMAC(idx) \
+ UNIPHIER_CLK_GATE("stdmac", (idx), NULL, 0x210c, 8)
+
+#define UNIPHIER_PRO4_SYS_CLK_GIO(idx) \
+ UNIPHIER_CLK_GATE("gio", (idx), NULL, 0x2104, 6)
+
+#define UNIPHIER_PRO4_SYS_CLK_USB3(idx, ch) \
+ UNIPHIER_CLK_GATE("usb3" #ch, (idx), NULL, 0x2104, 16 + (ch))
+
+const struct uniphier_clk_data uniphier_sld3_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("spll", -1, "ref", 65, 1), /* 1597.44 MHz */
+ UNIPHIER_CLK_FACTOR("upll", -1, "ref", 6000, 512), /* 288 MHz */
+ UNIPHIER_CLK_FACTOR("a2pll", -1, "ref", 24, 1), /* 589.824 MHz */
+ UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 5625, 512), /* 270 MHz */
+ UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 16),
+ UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
+ UNIPHIER_SLD3_SYS_CLK_SD,
+ UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
+ UNIPHIER_SLD3_SYS_CLK_STDMAC(8),
+ { /* sentinel */ }
+};
+
+const struct uniphier_clk_data uniphier_ld4_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("spll", -1, "ref", 65, 1), /* 1597.44 MHz */
+ UNIPHIER_CLK_FACTOR("upll", -1, "ref", 6000, 512), /* 288 MHz */
+ UNIPHIER_CLK_FACTOR("a2pll", -1, "ref", 24, 1), /* 589.824 MHz */
+ UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 5625, 512), /* 270 MHz */
+ UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 16),
+ UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
+ UNIPHIER_SLD3_SYS_CLK_SD,
+ UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
+ UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
+ { /* sentinel */ }
+};
+
+const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("spll", -1, "ref", 64, 1), /* 1600 MHz */
+ UNIPHIER_CLK_FACTOR("upll", -1, "ref", 288, 25), /* 288 MHz */
+ UNIPHIER_CLK_FACTOR("a2pll", -1, "upll", 256, 125), /* 589.824 MHz */
+ UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */
+ UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 8),
+ UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 32),
+ UNIPHIER_SLD3_SYS_CLK_SD,
+ UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
+ UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC, MIO, RLE */
+ UNIPHIER_PRO4_SYS_CLK_GIO(12), /* Ether, SATA, USB3 */
+ UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
+ { /* sentinel */ }
+};
+
+const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("spll", -1, "ref", 64, 1), /* 1600 MHz */
+ UNIPHIER_CLK_FACTOR("upll", -1, "ref", 288, 25), /* 288 MHz */
+ UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */
+ UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 20),
+ UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
+ UNIPHIER_SLD3_SYS_CLK_SD,
+ UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
+ UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
+ { /* sentinel */ }
+};
+
+const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1), /* 2400 MHz */
+ UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1), /* 2560 MHz */
+ UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */
+ UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
+ UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
+ UNIPHIER_PRO5_SYS_CLK_SD,
+ UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC */
+ UNIPHIER_PRO4_SYS_CLK_GIO(12), /* PCIe, USB3 */
+ UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
+ { /* sentinel */ }
+};
+
+const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("spll", -1, "ref", 96, 1), /* 2400 MHz */
+ UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 27),
+ UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
+ UNIPHIER_PRO5_SYS_CLK_SD,
+ UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC, RLE */
+ /* GIO is always clock-enabled: no function for 0x2104 bit6 */
+ UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
+ /* The document mentions 0x2104 bit 18, but not functional */
+ UNIPHIER_CLK_GATE("usb30-phy", 16, NULL, 0x2104, 19),
+ UNIPHIER_CLK_GATE("usb31-phy", 20, NULL, 0x2104, 20),
+ { /* sentinel */ }
+};
+
+const struct uniphier_clk_data uniphier_ld11_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("spll", -1, "ref", 80, 1), /* 2000 MHz */
+ UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
+ UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC, MIO */
+ UNIPHIER_CLK_FACTOR("usb2", -1, "ref", 24, 25),
+ { /* sentinel */ }
+};
+
+const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("spll", -1, "ref", 80, 1), /* 2000 MHz */
+ UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
+ UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_LD20_SYS_CLK_SD,
+ UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC */
+ /* GIO is always clock-enabled: no function for 0x210c bit5 */
+ /*
+ * clock for USB Link is enabled by the logic "OR" of bit 14 and bit 15.
+ * We do not use bit 15 here.
+ */
+ UNIPHIER_CLK_GATE("usb30", 14, NULL, 0x210c, 14),
+ UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 12),
+ UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 13),
+ { /* sentinel */ }
+};
diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h
new file mode 100644
index 000000000000..3ae184062388
--- /dev/null
+++ b/drivers/clk/uniphier/clk-uniphier.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CLK_UNIPHIER_H__
+#define __CLK_UNIPHIER_H__
+
+struct clk_hw;
+struct device;
+struct regmap;
+
+#define UNIPHIER_CLK_MUX_MAX_PARENTS 8
+
+enum uniphier_clk_type {
+ UNIPHIER_CLK_TYPE_FIXED_FACTOR,
+ UNIPHIER_CLK_TYPE_FIXED_RATE,
+ UNIPHIER_CLK_TYPE_GATE,
+ UNIPHIER_CLK_TYPE_MUX,
+};
+
+struct uniphier_clk_fixed_factor_data {
+ const char *parent_name;
+ unsigned int mult;
+ unsigned int div;
+};
+
+struct uniphier_clk_fixed_rate_data {
+ unsigned long fixed_rate;
+};
+
+struct uniphier_clk_gate_data {
+ const char *parent_name;
+ unsigned int reg;
+ unsigned int bit;
+};
+
+struct uniphier_clk_mux_data {
+ const char *parent_names[UNIPHIER_CLK_MUX_MAX_PARENTS];
+ unsigned int num_parents;
+ unsigned int reg;
+ unsigned int masks[UNIPHIER_CLK_MUX_MAX_PARENTS];
+ unsigned int vals[UNIPHIER_CLK_MUX_MAX_PARENTS];
+};
+
+struct uniphier_clk_data {
+ const char *name;
+ enum uniphier_clk_type type;
+ int idx;
+ union {
+ struct uniphier_clk_fixed_factor_data factor;
+ struct uniphier_clk_fixed_rate_data rate;
+ struct uniphier_clk_gate_data gate;
+ struct uniphier_clk_mux_data mux;
+ } data;
+};
+
+#define UNIPHIER_CLK_FACTOR(_name, _idx, _parent, _mult, _div) \
+ { \
+ .name = (_name), \
+ .type = UNIPHIER_CLK_TYPE_FIXED_FACTOR, \
+ .idx = (_idx), \
+ .data.factor = { \
+ .parent_name = (_parent), \
+ .mult = (_mult), \
+ .div = (_div), \
+ }, \
+ }
+
+
+#define UNIPHIER_CLK_GATE(_name, _idx, _parent, _reg, _bit) \
+ { \
+ .name = (_name), \
+ .type = UNIPHIER_CLK_TYPE_GATE, \
+ .idx = (_idx), \
+ .data.gate = { \
+ .parent_name = (_parent), \
+ .reg = (_reg), \
+ .bit = (_bit), \
+ }, \
+ }
+
+
+struct clk_hw *uniphier_clk_register_fixed_factor(struct device *dev,
+ const char *name,
+ const struct uniphier_clk_fixed_factor_data *data);
+struct clk_hw *uniphier_clk_register_fixed_rate(struct device *dev,
+ const char *name,
+ const struct uniphier_clk_fixed_rate_data *data);
+struct clk_hw *uniphier_clk_register_gate(struct device *dev,
+ struct regmap *regmap,
+ const char *name,
+ const struct uniphier_clk_gate_data *data);
+struct clk_hw *uniphier_clk_register_mux(struct device *dev,
+ struct regmap *regmap,
+ const char *name,
+ const struct uniphier_clk_mux_data *data);
+
+extern const struct uniphier_clk_data uniphier_sld3_sys_clk_data[];
+extern const struct uniphier_clk_data uniphier_ld4_sys_clk_data[];
+extern const struct uniphier_clk_data uniphier_pro4_sys_clk_data[];
+extern const struct uniphier_clk_data uniphier_sld8_sys_clk_data[];
+extern const struct uniphier_clk_data uniphier_pro5_sys_clk_data[];
+extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[];
+extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[];
+extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[];
+extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[];
+extern const struct uniphier_clk_data uniphier_pro5_mio_clk_data[];
+extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[];
+extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[];
+
+#endif /* __CLK_UNIPHIER_H__ */
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 5e9b65278e4c..4faa94440779 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -27,6 +27,26 @@
/* Magic unlocking token used on all Versatile boards */
#define VERSATILE_LOCK_VAL 0xA05F
+#define VERSATILE_AUX_OSC_BITS 0x7FFFF
+#define INTEGRATOR_AP_CM_BITS 0xFF
+#define INTEGRATOR_AP_SYS_BITS 0xFF
+#define INTEGRATOR_CP_CM_CORE_BITS 0x7FF
+#define INTEGRATOR_CP_CM_MEM_BITS 0x7FF000
+
+#define INTEGRATOR_AP_PCI_25_33_MHZ BIT(8)
+
+/**
+ * enum icst_control_type - the type of ICST control register
+ */
+enum icst_control_type {
+ ICST_VERSATILE, /* The standard type, all control bits available */
+ ICST_INTEGRATOR_AP_CM, /* Only 8 bits of VDW available */
+ ICST_INTEGRATOR_AP_SYS, /* Only 8 bits of VDW available */
+ ICST_INTEGRATOR_AP_PCI, /* Odd bit pattern storage */
+ ICST_INTEGRATOR_CP_CM_CORE, /* Only 8 bits of VDW and 3 bits of OD */
+ ICST_INTEGRATOR_CP_CM_MEM, /* Only 8 bits of VDW and 3 bits of OD */
+};
+
/**
* struct clk_icst - ICST VCO clock wrapper
* @hw: corresponding clock hardware entry
@@ -34,6 +54,7 @@
* @lockreg: VCO lock register address
* @params: parameters for this ICST instance
* @rate: current rate
+ * @ctype: the type of control register for the ICST
*/
struct clk_icst {
struct clk_hw hw;
@@ -42,6 +63,7 @@ struct clk_icst {
u32 lockreg_off;
struct icst_params *params;
unsigned long rate;
+ enum icst_control_type ctype;
};
#define to_icst(_hw) container_of(_hw, struct clk_icst, hw)
@@ -59,6 +81,76 @@ static int vco_get(struct clk_icst *icst, struct icst_vco *vco)
ret = regmap_read(icst->map, icst->vcoreg_off, &val);
if (ret)
return ret;
+
+ /*
+ * The Integrator/AP core clock can only access the low eight
+ * bits of the v PLL divider. Bit 8 is tied low and always zero,
+ * r is hardwired to 22 and output divider s is hardwired to 1
+ * (divide by 2) according to the document
+ * "Integrator CM926EJ-S, CM946E-S, CM966E-S, CM1026EJ-S and
+ * CM1136JF-S User Guide" ARM DUI 0138E, page 3-13 thru 3-14.
+ */
+ if (icst->ctype == ICST_INTEGRATOR_AP_CM) {
+ vco->v = val & INTEGRATOR_AP_CM_BITS;
+ vco->r = 22;
+ vco->s = 1;
+ return 0;
+ }
+
+ /*
+ * The Integrator/AP system clock on the base board can only
+ * access the low eight bits of the v PLL divider. Bit 8 is tied low
+ * and always zero, r is hardwired to 46, and the output divider is
+ * hardwired to 3 (divide by 4) according to the document
+ * "Integrator AP ASIC Development Motherboard" ARM DUI 0098B,
+ * page 3-16.
+ */
+ if (icst->ctype == ICST_INTEGRATOR_AP_SYS) {
+ vco->v = val & INTEGRATOR_AP_SYS_BITS;
+ vco->r = 46;
+ vco->s = 3;
+ return 0;
+ }
+
+ /*
+ * The Integrator/AP PCI clock is using an odd pattern to create
+ * the child clock, basically a single bit called DIVX/Y is used
+ * to select between two different hardwired values: setting the
+ * bit to 0 yields v = 17, r = 22 and OD = 1, whereas setting the
+ * bit to 1 yields v = 14, r = 14 and OD = 1 giving the frequencies
+ * 33 or 25 MHz respectively.
+ */
+ if (icst->ctype == ICST_INTEGRATOR_AP_PCI) {
+ bool divxy = !!(val & INTEGRATOR_AP_PCI_25_33_MHZ);
+
+ vco->v = divxy ? 17 : 14;
+ vco->r = divxy ? 22 : 14;
+ vco->s = 1;
+ return 0;
+ }
+
+ /*
+ * The Integrator/CP core clock can access the low eight bits
+ * of the v PLL divider. Bit 8 is tied low and always zero,
+ * r is hardwired to 22 and the output divider s is accessible
+ * in bits 8 thru 10 according to the document
+ * "Integrator/CM940T, CM920T, CM740T, and CM720T User Guide"
+ * ARM DUI 0157A, page 3-20 thru 3-23 and 4-10.
+ */
+ if (icst->ctype == ICST_INTEGRATOR_CP_CM_CORE) {
+ vco->v = val & 0xFF;
+ vco->r = 22;
+ vco->s = (val >> 8) & 7;
+ return 0;
+ }
+
+ if (icst->ctype == ICST_INTEGRATOR_CP_CM_MEM) {
+ vco->v = (val >> 12) & 0xFF;
+ vco->r = 22;
+ vco->s = (val >> 20) & 7;
+ return 0;
+ }
+
vco->v = val & 0x1ff;
vco->r = (val >> 9) & 0x7f;
vco->s = (val >> 16) & 03;
@@ -72,22 +164,62 @@ static int vco_get(struct clk_icst *icst, struct icst_vco *vco)
*/
static int vco_set(struct clk_icst *icst, struct icst_vco vco)
{
+ u32 mask;
u32 val;
int ret;
- ret = regmap_read(icst->map, icst->vcoreg_off, &val);
- if (ret)
- return ret;
+ /* Mask the bits used by the VCO */
+ switch (icst->ctype) {
+ case ICST_INTEGRATOR_AP_CM:
+ mask = INTEGRATOR_AP_CM_BITS;
+ val = vco.v & 0xFF;
+ if (vco.v & 0x100)
+ pr_err("ICST error: tried to set bit 8 of VDW\n");
+ if (vco.s != 1)
+ pr_err("ICST error: tried to use VOD != 1\n");
+ if (vco.r != 22)
+ pr_err("ICST error: tried to use RDW != 22\n");
+ break;
+ case ICST_INTEGRATOR_AP_SYS:
+ mask = INTEGRATOR_AP_SYS_BITS;
+ val = vco.v & 0xFF;
+ if (vco.v & 0x100)
+ pr_err("ICST error: tried to set bit 8 of VDW\n");
+ if (vco.s != 3)
+ pr_err("ICST error: tried to use VOD != 1\n");
+ if (vco.r != 46)
+ pr_err("ICST error: tried to use RDW != 22\n");
+ break;
+ case ICST_INTEGRATOR_CP_CM_CORE:
+ mask = INTEGRATOR_CP_CM_CORE_BITS; /* Uses 12 bits */
+ val = (vco.v & 0xFF) | vco.s << 8;
+ if (vco.v & 0x100)
+ pr_err("ICST error: tried to set bit 8 of VDW\n");
+ if (vco.r != 22)
+ pr_err("ICST error: tried to use RDW != 22\n");
+ break;
+ case ICST_INTEGRATOR_CP_CM_MEM:
+ mask = INTEGRATOR_CP_CM_MEM_BITS; /* Uses 12 bits */
+ val = ((vco.v & 0xFF) << 12) | (vco.s << 20);
+ if (vco.v & 0x100)
+ pr_err("ICST error: tried to set bit 8 of VDW\n");
+ if (vco.r != 22)
+ pr_err("ICST error: tried to use RDW != 22\n");
+ break;
+ default:
+ /* Regular auxilary oscillator */
+ mask = VERSATILE_AUX_OSC_BITS;
+ val = vco.v | (vco.r << 9) | (vco.s << 16);
+ break;
+ }
- /* Mask the 18 bits used by the VCO */
- val &= ~0x7ffff;
- val |= vco.v | (vco.r << 9) | (vco.s << 16);
+ pr_debug("ICST: new val = 0x%08x\n", val);
/* This magic unlocks the VCO so it can be controlled */
ret = regmap_write(icst->map, icst->lockreg_off, VERSATILE_LOCK_VAL);
if (ret)
return ret;
- ret = regmap_write(icst->map, icst->vcoreg_off, val);
+ ret = regmap_update_bits(icst->map, icst->vcoreg_off, mask, val);
if (ret)
return ret;
/* This locks the VCO again */
@@ -121,6 +253,46 @@ static long icst_round_rate(struct clk_hw *hw, unsigned long rate,
struct clk_icst *icst = to_icst(hw);
struct icst_vco vco;
+ if (icst->ctype == ICST_INTEGRATOR_AP_CM ||
+ icst->ctype == ICST_INTEGRATOR_CP_CM_CORE) {
+ if (rate <= 12000000)
+ return 12000000;
+ if (rate >= 160000000)
+ return 160000000;
+ /* Slam to closest megahertz */
+ return DIV_ROUND_CLOSEST(rate, 1000000) * 1000000;
+ }
+
+ if (icst->ctype == ICST_INTEGRATOR_CP_CM_MEM) {
+ if (rate <= 6000000)
+ return 6000000;
+ if (rate >= 66000000)
+ return 66000000;
+ /* Slam to closest 0.5 megahertz */
+ return DIV_ROUND_CLOSEST(rate, 500000) * 500000;
+ }
+
+ if (icst->ctype == ICST_INTEGRATOR_AP_SYS) {
+ /* Divides between 3 and 50 MHz in steps of 0.25 MHz */
+ if (rate <= 3000000)
+ return 3000000;
+ if (rate >= 50000000)
+ return 5000000;
+ /* Slam to closest 0.25 MHz */
+ return DIV_ROUND_CLOSEST(rate, 250000) * 250000;
+ }
+
+ if (icst->ctype == ICST_INTEGRATOR_AP_PCI) {
+ /*
+ * If we're below or less than halfway from 25 to 33 MHz
+ * select 25 MHz
+ */
+ if (rate <= 25000000 || rate < 29000000)
+ return 25000000;
+ /* Else just return the default frequency */
+ return 33000000;
+ }
+
vco = icst_hz_to_vco(icst->params, rate);
return icst_hz(icst->params, vco);
}
@@ -131,6 +303,36 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_icst *icst = to_icst(hw);
struct icst_vco vco;
+ if (icst->ctype == ICST_INTEGRATOR_AP_PCI) {
+ /* This clock is especially primitive */
+ unsigned int val;
+ int ret;
+
+ if (rate == 25000000) {
+ val = 0;
+ } else if (rate == 33000000) {
+ val = INTEGRATOR_AP_PCI_25_33_MHZ;
+ } else {
+ pr_err("ICST: cannot set PCI frequency %lu\n",
+ rate);
+ return -EINVAL;
+ }
+ ret = regmap_write(icst->map, icst->lockreg_off,
+ VERSATILE_LOCK_VAL);
+ if (ret)
+ return ret;
+ ret = regmap_update_bits(icst->map, icst->vcoreg_off,
+ INTEGRATOR_AP_PCI_25_33_MHZ,
+ val);
+ if (ret)
+ return ret;
+ /* This locks the VCO again */
+ ret = regmap_write(icst->map, icst->lockreg_off, 0);
+ if (ret)
+ return ret;
+ return 0;
+ }
+
if (parent_rate)
icst->params->ref = parent_rate;
vco = icst_hz_to_vco(icst->params, rate);
@@ -148,7 +350,8 @@ static struct clk *icst_clk_setup(struct device *dev,
const struct clk_icst_desc *desc,
const char *name,
const char *parent_name,
- struct regmap *map)
+ struct regmap *map,
+ enum icst_control_type ctype)
{
struct clk *clk;
struct clk_icst *icst;
@@ -178,6 +381,7 @@ static struct clk *icst_clk_setup(struct device *dev,
icst->params = pclone;
icst->vcoreg_off = desc->vco_offset;
icst->lockreg_off = desc->lock_offset;
+ icst->ctype = ctype;
clk = clk_register(dev, &icst->hw);
if (IS_ERR(clk)) {
@@ -206,7 +410,8 @@ struct clk *icst_clk_register(struct device *dev,
pr_err("could not initialize ICST regmap\n");
return ERR_CAST(map);
}
- return icst_clk_setup(dev, desc, name, parent_name, map);
+ return icst_clk_setup(dev, desc, name, parent_name, map,
+ ICST_VERSATILE);
}
EXPORT_SYMBOL_GPL(icst_clk_register);
@@ -239,6 +444,56 @@ static const struct icst_params icst307_params = {
.idx2s = icst307_idx2s,
};
+/**
+ * The core modules on the Integrator/AP and Integrator/CP have
+ * especially crippled ICST525 control.
+ */
+static const struct icst_params icst525_apcp_cm_params = {
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
+ /* Minimum 12 MHz, VDW = 4 */
+ .vd_min = 12,
+ /*
+ * Maximum 160 MHz, VDW = 152 for all core modules, but
+ * CM926EJ-S, CM1026EJ-S and CM1136JF-S can actually
+ * go to 200 MHz (max VDW = 192).
+ */
+ .vd_max = 192,
+ /* r is hardcoded to 22 and this is the actual divisor, +2 */
+ .rd_min = 24,
+ .rd_max = 24,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
+};
+
+static const struct icst_params icst525_ap_sys_params = {
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
+ /* Minimum 3 MHz, VDW = 4 */
+ .vd_min = 3,
+ /* Maximum 50 MHz, VDW = 192 */
+ .vd_max = 50,
+ /* r is hardcoded to 46 and this is the actual divisor, +2 */
+ .rd_min = 48,
+ .rd_max = 48,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
+};
+
+static const struct icst_params icst525_ap_pci_params = {
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
+ /* Minimum 25 MHz */
+ .vd_min = 25,
+ /* Maximum 33 MHz */
+ .vd_max = 33,
+ /* r is hardcoded to 14 or 22 and this is the actual divisors +2 */
+ .rd_min = 16,
+ .rd_max = 24,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
+};
+
static void __init of_syscon_icst_setup(struct device_node *np)
{
struct device_node *parent;
@@ -247,6 +502,7 @@ static void __init of_syscon_icst_setup(struct device_node *np)
const char *name = np->name;
const char *parent_name;
struct clk *regclk;
+ enum icst_control_type ctype;
/* We do not release this reference, we are using it perpetually */
parent = of_get_parent(np);
@@ -269,11 +525,28 @@ static void __init of_syscon_icst_setup(struct device_node *np)
return;
}
- if (of_device_is_compatible(np, "arm,syscon-icst525"))
+ if (of_device_is_compatible(np, "arm,syscon-icst525")) {
icst_desc.params = &icst525_params;
- else if (of_device_is_compatible(np, "arm,syscon-icst307"))
+ ctype = ICST_VERSATILE;
+ } else if (of_device_is_compatible(np, "arm,syscon-icst307")) {
icst_desc.params = &icst307_params;
- else {
+ ctype = ICST_VERSATILE;
+ } else if (of_device_is_compatible(np, "arm,syscon-icst525-integratorap-cm")) {
+ icst_desc.params = &icst525_apcp_cm_params;
+ ctype = ICST_INTEGRATOR_AP_CM;
+ } else if (of_device_is_compatible(np, "arm,syscon-icst525-integratorap-sys")) {
+ icst_desc.params = &icst525_ap_sys_params;
+ ctype = ICST_INTEGRATOR_AP_SYS;
+ } else if (of_device_is_compatible(np, "arm,syscon-icst525-integratorap-pci")) {
+ icst_desc.params = &icst525_ap_pci_params;
+ ctype = ICST_INTEGRATOR_AP_PCI;
+ } else if (of_device_is_compatible(np, "arm,syscon-icst525-integratorcp-cm-core")) {
+ icst_desc.params = &icst525_apcp_cm_params;
+ ctype = ICST_INTEGRATOR_CP_CM_CORE;
+ } else if (of_device_is_compatible(np, "arm,syscon-icst525-integratorcp-cm-mem")) {
+ icst_desc.params = &icst525_apcp_cm_params;
+ ctype = ICST_INTEGRATOR_CP_CM_MEM;
+ } else {
pr_err("unknown ICST clock %s\n", name);
return;
}
@@ -281,7 +554,7 @@ static void __init of_syscon_icst_setup(struct device_node *np)
/* Parent clock name is not the same as node parent */
parent_name = of_clk_get_parent_name(np, 0);
- regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map);
+ regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype);
if (IS_ERR(regclk)) {
pr_err("error setting up syscon ICST clock %s\n", name);
return;
@@ -294,5 +567,14 @@ CLK_OF_DECLARE(arm_syscon_icst525_clk,
"arm,syscon-icst525", of_syscon_icst_setup);
CLK_OF_DECLARE(arm_syscon_icst307_clk,
"arm,syscon-icst307", of_syscon_icst_setup);
-
+CLK_OF_DECLARE(arm_syscon_integratorap_cm_clk,
+ "arm,syscon-icst525-integratorap-cm", of_syscon_icst_setup);
+CLK_OF_DECLARE(arm_syscon_integratorap_sys_clk,
+ "arm,syscon-icst525-integratorap-sys", of_syscon_icst_setup);
+CLK_OF_DECLARE(arm_syscon_integratorap_pci_clk,
+ "arm,syscon-icst525-integratorap-pci", of_syscon_icst_setup);
+CLK_OF_DECLARE(arm_syscon_integratorcp_cm_core_clk,
+ "arm,syscon-icst525-integratorcp-cm-core", of_syscon_icst_setup);
+CLK_OF_DECLARE(arm_syscon_integratorcp_cm_mem_clk,
+ "arm,syscon-icst525-integratorcp-cm-mem", of_syscon_icst_setup);
#endif
diff --git a/drivers/clk/zte/Makefile b/drivers/clk/zte/Makefile
index 74005aa322a2..83374bfc4c07 100644
--- a/drivers/clk/zte/Makefile
+++ b/drivers/clk/zte/Makefile
@@ -1,2 +1,3 @@
obj-y := clk.o
obj-$(CONFIG_SOC_ZX296702) += clk-zx296702.o
+obj-$(CONFIG_ARCH_ZX) += clk-zx296718.o
diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c
new file mode 100644
index 000000000000..707d62956e9b
--- /dev/null
+++ b/drivers/clk/zte/clk-zx296718.c
@@ -0,0 +1,924 @@
+/*
+ * Copyright (C) 2015 - 2016 ZTE Corporation.
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/zx296718-clock.h>
+#include "clk.h"
+
+/* TOP CRM */
+#define TOP_CLK_MUX0 0x04
+#define TOP_CLK_MUX1 0x08
+#define TOP_CLK_MUX2 0x0c
+#define TOP_CLK_MUX3 0x10
+#define TOP_CLK_MUX4 0x14
+#define TOP_CLK_MUX5 0x18
+#define TOP_CLK_MUX6 0x1c
+#define TOP_CLK_MUX7 0x20
+#define TOP_CLK_MUX9 0x28
+
+
+#define TOP_CLK_GATE0 0x34
+#define TOP_CLK_GATE1 0x38
+#define TOP_CLK_GATE2 0x3c
+#define TOP_CLK_GATE3 0x40
+#define TOP_CLK_GATE4 0x44
+#define TOP_CLK_GATE5 0x48
+#define TOP_CLK_GATE6 0x4c
+
+#define TOP_CLK_DIV0 0x58
+
+#define PLL_CPU_REG 0x80
+#define PLL_VGA_REG 0xb0
+#define PLL_DDR_REG 0xa0
+
+/* LSP0 CRM */
+#define LSP0_TIMER3_CLK 0x4
+#define LSP0_TIMER4_CLK 0x8
+#define LSP0_TIMER5_CLK 0xc
+#define LSP0_UART3_CLK 0x10
+#define LSP0_UART1_CLK 0x14
+#define LSP0_UART2_CLK 0x18
+#define LSP0_SPIFC0_CLK 0x1c
+#define LSP0_I2C4_CLK 0x20
+#define LSP0_I2C5_CLK 0x24
+#define LSP0_SSP0_CLK 0x28
+#define LSP0_SSP1_CLK 0x2c
+#define LSP0_USIM0_CLK 0x30
+#define LSP0_GPIO_CLK 0x34
+#define LSP0_I2C3_CLK 0x38
+
+/* LSP1 CRM */
+#define LSP1_UART4_CLK 0x08
+#define LSP1_UART5_CLK 0x0c
+#define LSP1_PWM_CLK 0x10
+#define LSP1_I2C2_CLK 0x14
+#define LSP1_SSP2_CLK 0x1c
+#define LSP1_SSP3_CLK 0x20
+#define LSP1_SSP4_CLK 0x24
+#define LSP1_USIM1_CLK 0x28
+
+/* audio lsp */
+#define AUDIO_I2S0_DIV_CFG1 0x10
+#define AUDIO_I2S0_DIV_CFG2 0x14
+#define AUDIO_I2S0_CLK 0x18
+#define AUDIO_I2S1_DIV_CFG1 0x20
+#define AUDIO_I2S1_DIV_CFG2 0x24
+#define AUDIO_I2S1_CLK 0x28
+#define AUDIO_I2S2_DIV_CFG1 0x30
+#define AUDIO_I2S2_DIV_CFG2 0x34
+#define AUDIO_I2S2_CLK 0x38
+#define AUDIO_I2S3_DIV_CFG1 0x40
+#define AUDIO_I2S3_DIV_CFG2 0x44
+#define AUDIO_I2S3_CLK 0x48
+#define AUDIO_I2C0_CLK 0x50
+#define AUDIO_SPDIF0_DIV_CFG1 0x60
+#define AUDIO_SPDIF0_DIV_CFG2 0x64
+#define AUDIO_SPDIF0_CLK 0x68
+#define AUDIO_SPDIF1_DIV_CFG1 0x70
+#define AUDIO_SPDIF1_DIV_CFG2 0x74
+#define AUDIO_SPDIF1_CLK 0x78
+#define AUDIO_TIMER_CLK 0x80
+#define AUDIO_TDM_CLK 0x90
+#define AUDIO_TS_CLK 0xa0
+
+static DEFINE_SPINLOCK(clk_lock);
+
+static struct zx_pll_config pll_cpu_table[] = {
+ PLL_RATE(1312000000, 0x00103621, 0x04aaaaaa),
+ PLL_RATE(1407000000, 0x00103a21, 0x04aaaaaa),
+ PLL_RATE(1503000000, 0x00103e21, 0x04aaaaaa),
+ PLL_RATE(1600000000, 0x00104221, 0x04aaaaaa),
+};
+
+PNAME(osc) = {
+ "osc24m",
+ "osc32k",
+};
+
+PNAME(dbg_wclk_p) = {
+ "clk334m",
+ "clk466m",
+ "clk396m",
+ "clk250m",
+};
+
+PNAME(a72_coreclk_p) = {
+ "osc24m",
+ "pll_mm0_1188m",
+ "pll_mm1_1296m",
+ "clk1000m",
+ "clk648m",
+ "clk1600m",
+ "pll_audio_1800m",
+ "pll_vga_1800m",
+};
+
+PNAME(cpu_periclk_p) = {
+ "osc24m",
+ "clk500m",
+ "clk594m",
+ "clk466m",
+ "clk294m",
+ "clk334m",
+ "clk250m",
+ "clk125m",
+};
+
+PNAME(a53_coreclk_p) = {
+ "osc24m",
+ "clk1000m",
+ "pll_mm0_1188m",
+ "clk648m",
+ "clk500m",
+ "clk800m",
+ "clk1600m",
+ "pll_audio_1800m",
+};
+
+PNAME(sec_wclk_p) = {
+ "osc24m",
+ "clk396m",
+ "clk334m",
+ "clk297m",
+ "clk250m",
+ "clk198m",
+ "clk148m5",
+ "clk99m",
+};
+
+PNAME(sd_nand_wclk_p) = {
+ "osc24m",
+ "clk49m5",
+ "clk99m",
+ "clk198m",
+ "clk167m",
+ "clk148m5",
+ "clk125m",
+ "clk216m",
+};
+
+PNAME(emmc_wclk_p) = {
+ "osc24m",
+ "clk198m",
+ "clk99m",
+ "clk396m",
+ "clk334m",
+ "clk297m",
+ "clk250m",
+ "clk148m5",
+};
+
+PNAME(clk32_p) = {
+ "osc32k",
+ "clk32k768",
+};
+
+PNAME(usb_ref24m_p) = {
+ "osc32k",
+ "clk32k768",
+};
+
+PNAME(sys_noc_alck_p) = {
+ "osc24m",
+ "clk250m",
+ "clk198m",
+ "clk148m5",
+ "clk108m",
+ "clk54m",
+ "clk216m",
+ "clk240m",
+};
+
+PNAME(vde_aclk_p) = {
+ "clk334m",
+ "clk594m",
+ "clk500m",
+ "clk432m",
+ "clk480m",
+ "clk297m",
+ "clk_vga", /*600MHz*/
+ "clk294m",
+};
+
+PNAME(vce_aclk_p) = {
+ "clk334m",
+ "clk594m",
+ "clk500m",
+ "clk432m",
+ "clk396m",
+ "clk297m",
+ "clk_vga", /*600MHz*/
+ "clk294m",
+};
+
+PNAME(hde_aclk_p) = {
+ "clk334m",
+ "clk594m",
+ "clk500m",
+ "clk432m",
+ "clk396m",
+ "clk297m",
+ "clk_vga", /*600MHz*/
+ "clk294m",
+};
+
+PNAME(gpu_aclk_p) = {
+ "clk334m",
+ "clk648m",
+ "clk594m",
+ "clk500m",
+ "clk396m",
+ "clk297m",
+ "clk_vga", /*600MHz*/
+ "clk294m",
+};
+
+PNAME(sappu_aclk_p) = {
+ "clk396m",
+ "clk500m",
+ "clk250m",
+ "clk148m5",
+};
+
+PNAME(sappu_wclk_p) = {
+ "clk198m",
+ "clk396m",
+ "clk334m",
+ "clk297m",
+ "clk250m",
+ "clk148m5",
+ "clk125m",
+ "clk99m",
+};
+
+PNAME(vou_aclk_p) = {
+ "clk334m",
+ "clk594m",
+ "clk500m",
+ "clk432m",
+ "clk396m",
+ "clk297m",
+ "clk_vga", /*600MHz*/
+ "clk294m",
+};
+
+PNAME(vou_main_wclk_p) = {
+ "clk108m",
+ "clk594m",
+ "clk297m",
+ "clk148m5",
+ "clk74m25",
+ "clk54m",
+ "clk27m",
+ "clk_vga",
+};
+
+PNAME(vou_aux_wclk_p) = {
+ "clk108m",
+ "clk148m5",
+ "clk74m25",
+ "clk54m",
+ "clk27m",
+ "clk_vga",
+ "clk54m_mm0",
+ "clk"
+};
+
+PNAME(vou_ppu_wclk_p) = {
+ "clk334m",
+ "clk432m",
+ "clk396m",
+ "clk297m",
+ "clk250m",
+ "clk125m",
+ "clk198m",
+ "clk99m",
+};
+
+PNAME(vga_i2c_wclk_p) = {
+ "osc24m",
+ "clk99m",
+};
+
+PNAME(viu_m0_aclk_p) = {
+ "clk334m",
+ "clk432m",
+ "clk396m",
+ "clk297m",
+ "clk250m",
+ "clk125m",
+ "clk198m",
+ "osc24m",
+};
+
+PNAME(viu_m1_aclk_p) = {
+ "clk198m",
+ "clk250m",
+ "clk297m",
+ "clk125m",
+ "clk396m",
+ "clk334m",
+ "clk148m5",
+ "osc24m",
+};
+
+PNAME(viu_clk_p) = {
+ "clk198m",
+ "clk334m",
+ "clk297m",
+ "clk250m",
+ "clk396m",
+ "clk125m",
+ "clk99m",
+ "clk148m5",
+};
+
+PNAME(viu_jpeg_clk_p) = {
+ "clk334m",
+ "clk480m",
+ "clk432m",
+ "clk396m",
+ "clk297m",
+ "clk250m",
+ "clk125m",
+ "clk198m",
+};
+
+PNAME(ts_sys_clk_p) = {
+ "clk192m",
+ "clk167m",
+ "clk125m",
+ "clk99m",
+};
+
+PNAME(wdt_ares_p) = {
+ "osc24m",
+ "clk32k"
+};
+
+static struct clk_zx_pll zx296718_pll_clk[] = {
+ ZX296718_PLL("pll_cpu", "osc24m", PLL_CPU_REG, pll_cpu_table),
+};
+
+static struct zx_clk_fixed_factor top_ffactor_clk[] = {
+ FFACTOR(0, "clk4m", "osc24m", 1, 6, 0),
+ FFACTOR(0, "clk2m", "osc24m", 1, 12, 0),
+ /* pll cpu */
+ FFACTOR(0, "clk1600m", "pll_cpu", 1, 1, CLK_SET_RATE_PARENT),
+ FFACTOR(0, "clk800m", "pll_cpu", 1, 2, CLK_SET_RATE_PARENT),
+ /* pll mac */
+ FFACTOR(0, "clk25m", "pll_mac", 1, 40, 0),
+ FFACTOR(0, "clk125m", "pll_mac", 1, 8, 0),
+ FFACTOR(0, "clk250m", "pll_mac", 1, 4, 0),
+ FFACTOR(0, "clk50m", "pll_mac", 1, 20, 0),
+ FFACTOR(0, "clk500m", "pll_mac", 1, 2, 0),
+ FFACTOR(0, "clk1000m", "pll_mac", 1, 1, 0),
+ FFACTOR(0, "clk334m", "pll_mac", 1, 3, 0),
+ FFACTOR(0, "clk167m", "pll_mac", 1, 6, 0),
+ /* pll mm */
+ FFACTOR(0, "clk54m_mm0", "pll_mm0", 1, 22, 0),
+ FFACTOR(0, "clk74m25", "pll_mm0", 1, 16, 0),
+ FFACTOR(0, "clk148m5", "pll_mm0", 1, 8, 0),
+ FFACTOR(0, "clk297m", "pll_mm0", 1, 4, 0),
+ FFACTOR(0, "clk594m", "pll_mm0", 1, 2, 0),
+ FFACTOR(0, "pll_mm0_1188m", "pll_mm0", 1, 1, 0),
+ FFACTOR(0, "clk396m", "pll_mm0", 1, 3, 0),
+ FFACTOR(0, "clk198m", "pll_mm0", 1, 6, 0),
+ FFACTOR(0, "clk99m", "pll_mm0", 1, 12, 0),
+ FFACTOR(0, "clk49m5", "pll_mm0", 1, 24, 0),
+ /* pll mm */
+ FFACTOR(0, "clk324m", "pll_mm1", 1, 4, 0),
+ FFACTOR(0, "clk648m", "pll_mm1", 1, 2, 0),
+ FFACTOR(0, "pll_mm1_1296m", "pll_mm1", 1, 1, 0),
+ FFACTOR(0, "clk216m", "pll_mm1", 1, 6, 0),
+ FFACTOR(0, "clk432m", "pll_mm1", 1, 3, 0),
+ FFACTOR(0, "clk108m", "pll_mm1", 1, 12, 0),
+ FFACTOR(0, "clk72m", "pll_mm1", 1, 18, 0),
+ FFACTOR(0, "clk27m", "pll_mm1", 1, 48, 0),
+ FFACTOR(0, "clk54m", "pll_mm1", 1, 24, 0),
+ /* vga */
+ FFACTOR(0, "pll_vga_1800m", "pll_vga", 1, 1, 0),
+ FFACTOR(0, "clk_vga", "pll_vga", 1, 2, 0),
+ /* pll ddr */
+ FFACTOR(0, "clk466m", "pll_ddr", 1, 2, 0),
+
+ /* pll audio */
+ FFACTOR(0, "pll_audio_1800m", "pll_audio", 1, 1, 0),
+ FFACTOR(0, "clk32k768", "pll_audio", 1, 27000, 0),
+ FFACTOR(0, "clk16m384", "pll_audio", 1, 54, 0),
+ FFACTOR(0, "clk294m", "pll_audio", 1, 3, 0),
+
+ /* pll hsic*/
+ FFACTOR(0, "clk240m", "pll_hsic", 1, 4, 0),
+ FFACTOR(0, "clk480m", "pll_hsic", 1, 2, 0),
+ FFACTOR(0, "clk192m", "pll_hsic", 1, 5, 0),
+ FFACTOR(0, "clk_pll_24m", "pll_hsic", 1, 40, 0),
+ FFACTOR(0, "emmc_mux_div2", "emmc_mux", 1, 2, CLK_SET_RATE_PARENT),
+};
+
+static struct clk_div_table noc_div_table[] = {
+ { .val = 1, .div = 2, },
+ { .val = 3, .div = 4, },
+};
+static struct zx_clk_div top_div_clk[] = {
+ DIV_T(0, "sys_noc_hclk", "sys_noc_aclk", TOP_CLK_DIV0, 0, 2, 0, noc_div_table),
+ DIV_T(0, "sys_noc_pclk", "sys_noc_aclk", TOP_CLK_DIV0, 4, 2, 0, noc_div_table),
+};
+
+static struct zx_clk_mux top_mux_clk[] = {
+ MUX(0, "dbg_mux", dbg_wclk_p, TOP_CLK_MUX0, 12, 2),
+ MUX(0, "a72_mux", a72_coreclk_p, TOP_CLK_MUX0, 8, 3),
+ MUX(0, "cpu_peri_mux", cpu_periclk_p, TOP_CLK_MUX0, 4, 3),
+ MUX_F(0, "a53_mux", a53_coreclk_p, TOP_CLK_MUX0, 0, 3, CLK_SET_RATE_PARENT, 0),
+ MUX(0, "sys_noc_aclk", sys_noc_alck_p, TOP_CLK_MUX1, 0, 3),
+ MUX(0, "sec_mux", sec_wclk_p, TOP_CLK_MUX2, 16, 3),
+ MUX(0, "sd1_mux", sd_nand_wclk_p, TOP_CLK_MUX2, 12, 3),
+ MUX(0, "sd0_mux", sd_nand_wclk_p, TOP_CLK_MUX2, 8, 3),
+ MUX(0, "emmc_mux", emmc_wclk_p, TOP_CLK_MUX2, 4, 3),
+ MUX(0, "nand_mux", sd_nand_wclk_p, TOP_CLK_MUX2, 0, 3),
+ MUX(0, "usb_ref24m_mux", usb_ref24m_p, TOP_CLK_MUX9, 16, 1),
+ MUX(0, "clk32k", clk32_p, TOP_CLK_MUX9, 12, 1),
+ MUX_F(0, "wdt_mux", wdt_ares_p, TOP_CLK_MUX9, 8, 1, CLK_SET_RATE_PARENT, 0),
+ MUX(0, "timer_mux", osc, TOP_CLK_MUX9, 4, 1),
+ MUX(0, "vde_mux", vde_aclk_p, TOP_CLK_MUX4, 0, 3),
+ MUX(0, "vce_mux", vce_aclk_p, TOP_CLK_MUX4, 4, 3),
+ MUX(0, "hde_mux", hde_aclk_p, TOP_CLK_MUX4, 8, 3),
+ MUX(0, "gpu_mux", gpu_aclk_p, TOP_CLK_MUX5, 0, 3),
+ MUX(0, "sappu_a_mux", sappu_aclk_p, TOP_CLK_MUX5, 4, 2),
+ MUX(0, "sappu_w_mux", sappu_wclk_p, TOP_CLK_MUX5, 8, 3),
+ MUX(0, "vou_a_mux", vou_aclk_p, TOP_CLK_MUX7, 0, 3),
+ MUX(0, "vou_main_w_mux", vou_main_wclk_p, TOP_CLK_MUX7, 4, 3),
+ MUX(0, "vou_aux_w_mux", vou_aux_wclk_p, TOP_CLK_MUX7, 8, 3),
+ MUX(0, "vou_ppu_w_mux", vou_ppu_wclk_p, TOP_CLK_MUX7, 12, 3),
+ MUX(0, "vga_i2c_mux", vga_i2c_wclk_p, TOP_CLK_MUX7, 16, 1),
+ MUX(0, "viu_m0_a_mux", viu_m0_aclk_p, TOP_CLK_MUX6, 0, 3),
+ MUX(0, "viu_m1_a_mux", viu_m1_aclk_p, TOP_CLK_MUX6, 4, 3),
+ MUX(0, "viu_w_mux", viu_clk_p, TOP_CLK_MUX6, 8, 3),
+ MUX(0, "viu_jpeg_w_mux", viu_jpeg_clk_p, TOP_CLK_MUX6, 12, 3),
+ MUX(0, "ts_sys_mux", ts_sys_clk_p, TOP_CLK_MUX6, 16, 2),
+};
+
+static struct zx_clk_gate top_gate_clk[] = {
+ GATE(CPU_DBG_GATE, "dbg_wclk", "dbg_mux", TOP_CLK_GATE0, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(A72_GATE, "a72_coreclk", "a72_mux", TOP_CLK_GATE0, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CPU_PERI_GATE, "cpu_peri", "cpu_peri_mux", TOP_CLK_GATE0, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(A53_GATE, "a53_coreclk", "a53_mux", TOP_CLK_GATE0, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(SD1_WCLK, "sd1_wclk", "sd1_mux", TOP_CLK_GATE1, 13, CLK_SET_RATE_PARENT, 0),
+ GATE(SD0_WCLK, "sd0_wclk", "sd0_mux", TOP_CLK_GATE1, 9, CLK_SET_RATE_PARENT, 0),
+ GATE(EMMC_WCLK, "emmc_wclk", "emmc_mux_div2", TOP_CLK_GATE0, 5, CLK_SET_RATE_PARENT, 0),
+ GATE(EMMC_NAND_AXI, "emmc_nand_aclk", "sys_noc_aclk", TOP_CLK_GATE1, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(NAND_WCLK, "nand_wclk", "nand_mux", TOP_CLK_GATE0, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(EMMC_NAND_AHB, "emmc_nand_hclk", "sys_noc_hclk", TOP_CLK_GATE1, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(0, "lsp1_pclk", "sys_noc_pclk", TOP_CLK_GATE2, 31, 0, 0),
+ GATE(LSP1_148M5, "lsp1_148m5", "clk148m5", TOP_CLK_GATE2, 30, 0, 0),
+ GATE(LSP1_99M, "lsp1_99m", "clk99m", TOP_CLK_GATE2, 29, 0, 0),
+ GATE(LSP1_24M, "lsp1_24m", "osc24m", TOP_CLK_GATE2, 28, 0, 0),
+ GATE(LSP0_74M25, "lsp0_74m25", "clk74m25", TOP_CLK_GATE2, 25, 0, 0),
+ GATE(0, "lsp0_pclk", "sys_noc_pclk", TOP_CLK_GATE2, 24, 0, 0),
+ GATE(LSP0_32K, "lsp0_32k", "osc32k", TOP_CLK_GATE2, 23, 0, 0),
+ GATE(LSP0_148M5, "lsp0_148m5", "clk148m5", TOP_CLK_GATE2, 22, 0, 0),
+ GATE(LSP0_99M, "lsp0_99m", "clk99m", TOP_CLK_GATE2, 21, 0, 0),
+ GATE(LSP0_24M, "lsp0_24m", "osc24m", TOP_CLK_GATE2, 20, 0, 0),
+ GATE(AUDIO_99M, "audio_99m", "clk99m", TOP_CLK_GATE5, 27, 0, 0),
+ GATE(AUDIO_24M, "audio_24m", "osc24m", TOP_CLK_GATE5, 28, 0, 0),
+ GATE(AUDIO_16M384, "audio_16m384", "clk16m384", TOP_CLK_GATE5, 29, 0, 0),
+ GATE(AUDIO_32K, "audio_32k", "clk32k", TOP_CLK_GATE5, 30, 0, 0),
+ GATE(WDT_WCLK, "wdt_wclk", "wdt_mux", TOP_CLK_GATE6, 9, CLK_SET_RATE_PARENT, 0),
+ GATE(TIMER_WCLK, "timer_wclk", "timer_mux", TOP_CLK_GATE6, 5, CLK_SET_RATE_PARENT, 0),
+ GATE(VDE_ACLK, "vde_aclk", "vde_mux", TOP_CLK_GATE3, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(VCE_ACLK, "vce_aclk", "vce_mux", TOP_CLK_GATE3, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(HDE_ACLK, "hde_aclk", "hde_mux", TOP_CLK_GATE3, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(GPU_ACLK, "gpu_aclk", "gpu_mux", TOP_CLK_GATE3, 16, CLK_SET_RATE_PARENT, 0),
+ GATE(SAPPU_ACLK, "sappu_aclk", "sappu_a_mux", TOP_CLK_GATE3, 20, CLK_SET_RATE_PARENT, 0),
+ GATE(SAPPU_WCLK, "sappu_wclk", "sappu_w_mux", TOP_CLK_GATE3, 22, CLK_SET_RATE_PARENT, 0),
+ GATE(VOU_ACLK, "vou_aclk", "vou_a_mux", TOP_CLK_GATE4, 16, CLK_SET_RATE_PARENT, 0),
+ GATE(VOU_MAIN_WCLK, "vou_main_wclk", "vou_main_w_mux", TOP_CLK_GATE4, 18, CLK_SET_RATE_PARENT, 0),
+ GATE(VOU_AUX_WCLK, "vou_aux_wclk", "vou_aux_w_mux", TOP_CLK_GATE4, 19, CLK_SET_RATE_PARENT, 0),
+ GATE(VOU_PPU_WCLK, "vou_ppu_wclk", "vou_ppu_w_mux", TOP_CLK_GATE4, 20, CLK_SET_RATE_PARENT, 0),
+ GATE(MIPI_CFG_CLK, "mipi_cfg_clk", "osc24m", TOP_CLK_GATE4, 21, 0, 0),
+ GATE(VGA_I2C_WCLK, "vga_i2c_wclk", "vga_i2c_mux", TOP_CLK_GATE4, 23, CLK_SET_RATE_PARENT, 0),
+ GATE(MIPI_REF_CLK, "mipi_ref_clk", "clk27m", TOP_CLK_GATE4, 24, 0, 0),
+ GATE(HDMI_OSC_CEC, "hdmi_osc_cec", "clk2m", TOP_CLK_GATE4, 22, 0, 0),
+ GATE(HDMI_OSC_CLK, "hdmi_osc_clk", "clk240m", TOP_CLK_GATE4, 25, 0, 0),
+ GATE(HDMI_XCLK, "hdmi_xclk", "osc24m", TOP_CLK_GATE4, 26, 0, 0),
+ GATE(VIU_M0_ACLK, "viu_m0_aclk", "viu_m0_a_mux", TOP_CLK_GATE4, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(VIU_M1_ACLK, "viu_m1_aclk", "viu_m1_a_mux", TOP_CLK_GATE4, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(VIU_WCLK, "viu_wclk", "viu_w_mux", TOP_CLK_GATE4, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(VIU_JPEG_WCLK, "viu_jpeg_wclk", "viu_jpeg_w_mux", TOP_CLK_GATE4, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(VIU_CFG_CLK, "viu_cfg_clk", "osc24m", TOP_CLK_GATE4, 6, 0, 0),
+ GATE(TS_SYS_WCLK, "ts_sys_wclk", "ts_sys_mux", TOP_CLK_GATE5, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(TS_SYS_108M, "ts_sys_108m", "clk108m", TOP_CLK_GATE5, 3, 0, 0),
+ GATE(USB20_HCLK, "usb20_hclk", "sys_noc_hclk", TOP_CLK_GATE2, 12, 0, 0),
+ GATE(USB20_PHY_CLK, "usb20_phy_clk", "usb_ref24m_mux", TOP_CLK_GATE2, 13, 0, 0),
+ GATE(USB21_HCLK, "usb21_hclk", "sys_noc_hclk", TOP_CLK_GATE2, 14, 0, 0),
+ GATE(USB21_PHY_CLK, "usb21_phy_clk", "usb_ref24m_mux", TOP_CLK_GATE2, 15, 0, 0),
+ GATE(GMAC_RMIICLK, "gmac_rmii_clk", "clk50m", TOP_CLK_GATE2, 3, 0, 0),
+ GATE(GMAC_PCLK, "gmac_pclk", "clk198m", TOP_CLK_GATE2, 1, 0, 0),
+ GATE(GMAC_ACLK, "gmac_aclk", "clk49m5", TOP_CLK_GATE2, 0, 0, 0),
+ GATE(GMAC_RFCLK, "gmac_refclk", "clk25m", TOP_CLK_GATE2, 4, 0, 0),
+ GATE(SD1_AHB, "sd1_hclk", "sys_noc_hclk", TOP_CLK_GATE1, 12, 0, 0),
+ GATE(SD0_AHB, "sd0_hclk", "sys_noc_hclk", TOP_CLK_GATE1, 8, 0, 0),
+ GATE(TEMPSENSOR_GATE, "tempsensor_gate", "clk4m", TOP_CLK_GATE5, 31, 0, 0),
+};
+
+static struct clk_hw_onecell_data top_hw_onecell_data = {
+ .num = TOP_NR_CLKS,
+ .hws = {
+ [TOP_NR_CLKS - 1] = NULL,
+ },
+};
+
+static int __init top_clocks_init(struct device_node *np)
+{
+ void __iomem *reg_base;
+ int i, ret;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: Unable to map clk base\n", __func__);
+ return -ENXIO;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
+ zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
+ ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
+ if (ret) {
+ pr_warn("top clk %s init error!\n",
+ zx296718_pll_clk[i].hw.init->name);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
+ if (top_ffactor_clk[i].id)
+ top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
+ &top_ffactor_clk[i].factor.hw;
+
+ ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
+ if (ret) {
+ pr_warn("top clk %s init error!\n",
+ top_ffactor_clk[i].factor.hw.init->name);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
+ if (top_mux_clk[i].id)
+ top_hw_onecell_data.hws[top_mux_clk[i].id] =
+ &top_mux_clk[i].mux.hw;
+
+ top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
+ ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
+ if (ret) {
+ pr_warn("top clk %s init error!\n",
+ top_mux_clk[i].mux.hw.init->name);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
+ if (top_gate_clk[i].id)
+ top_hw_onecell_data.hws[top_gate_clk[i].id] =
+ &top_gate_clk[i].gate.hw;
+
+ top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
+ ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
+ if (ret) {
+ pr_warn("top clk %s init error!\n",
+ top_gate_clk[i].gate.hw.init->name);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
+ if (top_div_clk[i].id)
+ top_hw_onecell_data.hws[top_div_clk[i].id] =
+ &top_div_clk[i].div.hw;
+
+ top_div_clk[i].div.reg += (uintptr_t)reg_base;
+ ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
+ if (ret) {
+ pr_warn("top clk %s init error!\n",
+ top_div_clk[i].div.hw.init->name);
+ }
+ }
+
+ if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &top_hw_onecell_data))
+ panic("could not register clk provider\n");
+ pr_info("top clk init over, nr:%d\n", TOP_NR_CLKS);
+
+ return 0;
+}
+
+static struct clk_div_table common_even_div_table[] = {
+ { .val = 0, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 3, .div = 4, },
+ { .val = 5, .div = 6, },
+ { .val = 7, .div = 8, },
+ { .val = 9, .div = 10, },
+ { .val = 11, .div = 12, },
+ { .val = 13, .div = 14, },
+ { .val = 15, .div = 16, },
+};
+
+static struct clk_div_table common_div_table[] = {
+ { .val = 0, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 2, .div = 3, },
+ { .val = 3, .div = 4, },
+ { .val = 4, .div = 5, },
+ { .val = 5, .div = 6, },
+ { .val = 6, .div = 7, },
+ { .val = 7, .div = 8, },
+ { .val = 8, .div = 9, },
+ { .val = 9, .div = 10, },
+ { .val = 10, .div = 11, },
+ { .val = 11, .div = 12, },
+ { .val = 12, .div = 13, },
+ { .val = 13, .div = 14, },
+ { .val = 14, .div = 15, },
+ { .val = 15, .div = 16, },
+};
+
+PNAME(lsp0_wclk_common_p) = {
+ "lsp0_24m",
+ "lsp0_99m",
+};
+
+PNAME(lsp0_wclk_timer3_p) = {
+ "timer3_div",
+ "lsp0_32k"
+};
+
+PNAME(lsp0_wclk_timer4_p) = {
+ "timer4_div",
+ "lsp0_32k"
+};
+
+PNAME(lsp0_wclk_timer5_p) = {
+ "timer5_div",
+ "lsp0_32k"
+};
+
+PNAME(lsp0_wclk_spifc0_p) = {
+ "lsp0_148m5",
+ "lsp0_24m",
+ "lsp0_99m",
+ "lsp0_74m25"
+};
+
+PNAME(lsp0_wclk_ssp_p) = {
+ "lsp0_148m5",
+ "lsp0_99m",
+ "lsp0_24m",
+};
+
+static struct zx_clk_mux lsp0_mux_clk[] = {
+ MUX(0, "timer3_wclk_mux", lsp0_wclk_timer3_p, LSP0_TIMER3_CLK, 4, 1),
+ MUX(0, "timer4_wclk_mux", lsp0_wclk_timer4_p, LSP0_TIMER4_CLK, 4, 1),
+ MUX(0, "timer5_wclk_mux", lsp0_wclk_timer5_p, LSP0_TIMER5_CLK, 4, 1),
+ MUX(0, "uart3_wclk_mux", lsp0_wclk_common_p, LSP0_UART3_CLK, 4, 1),
+ MUX(0, "uart1_wclk_mux", lsp0_wclk_common_p, LSP0_UART1_CLK, 4, 1),
+ MUX(0, "uart2_wclk_mux", lsp0_wclk_common_p, LSP0_UART2_CLK, 4, 1),
+ MUX(0, "spifc0_wclk_mux", lsp0_wclk_spifc0_p, LSP0_SPIFC0_CLK, 4, 2),
+ MUX(0, "i2c4_wclk_mux", lsp0_wclk_common_p, LSP0_I2C4_CLK, 4, 1),
+ MUX(0, "i2c5_wclk_mux", lsp0_wclk_common_p, LSP0_I2C5_CLK, 4, 1),
+ MUX(0, "ssp0_wclk_mux", lsp0_wclk_ssp_p, LSP0_SSP0_CLK, 4, 1),
+ MUX(0, "ssp1_wclk_mux", lsp0_wclk_ssp_p, LSP0_SSP1_CLK, 4, 1),
+ MUX(0, "i2c3_wclk_mux", lsp0_wclk_common_p, LSP0_I2C3_CLK, 4, 1),
+};
+
+static struct zx_clk_gate lsp0_gate_clk[] = {
+ GATE(LSP0_TIMER3_WCLK, "timer3_wclk", "timer3_wclk_mux", LSP0_TIMER3_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP0_TIMER4_WCLK, "timer4_wclk", "timer4_wclk_mux", LSP0_TIMER4_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP0_TIMER5_WCLK, "timer5_wclk", "timer5_wclk_mux", LSP0_TIMER5_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP0_UART3_WCLK, "uart3_wclk", "uart3_wclk_mux", LSP0_UART3_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP0_UART1_WCLK, "uart1_wclk", "uart1_wclk_mux", LSP0_UART1_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP0_UART2_WCLK, "uart2_wclk", "uart2_wclk_mux", LSP0_UART2_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP0_SPIFC0_WCLK, "spifc0_wclk", "spifc0_wclk_mux", LSP0_SPIFC0_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP0_I2C4_WCLK, "i2c4_wclk", "i2c4_wclk_mux", LSP0_I2C4_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP0_I2C5_WCLK, "i2c5_wclk", "i2c5_wclk_mux", LSP0_I2C5_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP0_SSP0_WCLK, "ssp0_wclk", "ssp0_div", LSP0_SSP0_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP0_SSP1_WCLK, "ssp1_wclk", "ssp1_div", LSP0_SSP1_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP0_I2C3_WCLK, "i2c3_wclk", "i2c3_wclk_mux", LSP0_I2C3_CLK, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static struct zx_clk_div lsp0_div_clk[] = {
+ DIV_T(0, "timer3_div", "lsp0_24m", LSP0_TIMER3_CLK, 12, 4, 0, common_even_div_table),
+ DIV_T(0, "timer4_div", "lsp0_24m", LSP0_TIMER4_CLK, 12, 4, 0, common_even_div_table),
+ DIV_T(0, "timer5_div", "lsp0_24m", LSP0_TIMER5_CLK, 12, 4, 0, common_even_div_table),
+ DIV_T(0, "ssp0_div", "ssp0_wclk_mux", LSP0_SSP0_CLK, 12, 4, 0, common_even_div_table),
+ DIV_T(0, "ssp1_div", "ssp1_wclk_mux", LSP0_SSP1_CLK, 12, 4, 0, common_even_div_table),
+};
+
+static struct clk_hw_onecell_data lsp0_hw_onecell_data = {
+ .num = LSP0_NR_CLKS,
+ .hws = {
+ [LSP0_NR_CLKS - 1] = NULL,
+ },
+};
+
+static int __init lsp0_clocks_init(struct device_node *np)
+{
+ void __iomem *reg_base;
+ int i, ret;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: Unable to map clk base\n", __func__);
+ return -ENXIO;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp0_mux_clk); i++) {
+ if (lsp0_mux_clk[i].id)
+ lsp0_hw_onecell_data.hws[lsp0_mux_clk[i].id] =
+ &lsp0_mux_clk[i].mux.hw;
+
+ lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
+ ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
+ if (ret) {
+ pr_warn("lsp0 clk %s init error!\n",
+ lsp0_mux_clk[i].mux.hw.init->name);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
+ if (lsp0_gate_clk[i].id)
+ lsp0_hw_onecell_data.hws[lsp0_gate_clk[i].id] =
+ &lsp0_gate_clk[i].gate.hw;
+
+ lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
+ ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
+ if (ret) {
+ pr_warn("lsp0 clk %s init error!\n",
+ lsp0_gate_clk[i].gate.hw.init->name);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
+ if (lsp0_div_clk[i].id)
+ lsp0_hw_onecell_data.hws[lsp0_div_clk[i].id] =
+ &lsp0_div_clk[i].div.hw;
+
+ lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
+ ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
+ if (ret) {
+ pr_warn("lsp0 clk %s init error!\n",
+ lsp0_div_clk[i].div.hw.init->name);
+ }
+ }
+
+ if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &lsp0_hw_onecell_data))
+ panic("could not register clk provider\n");
+ pr_info("lsp0-clk init over:%d\n", LSP0_NR_CLKS);
+
+ return 0;
+}
+
+PNAME(lsp1_wclk_common_p) = {
+ "lsp1_24m",
+ "lsp1_99m",
+};
+
+PNAME(lsp1_wclk_ssp_p) = {
+ "lsp1_148m5",
+ "lsp1_99m",
+ "lsp1_24m",
+};
+
+static struct zx_clk_mux lsp1_mux_clk[] = {
+ MUX(0, "uart4_wclk_mux", lsp1_wclk_common_p, LSP1_UART4_CLK, 4, 1),
+ MUX(0, "uart5_wclk_mux", lsp1_wclk_common_p, LSP1_UART5_CLK, 4, 1),
+ MUX(0, "pwm_wclk_mux", lsp1_wclk_common_p, LSP1_PWM_CLK, 4, 1),
+ MUX(0, "i2c2_wclk_mux", lsp1_wclk_common_p, LSP1_I2C2_CLK, 4, 1),
+ MUX(0, "ssp2_wclk_mux", lsp1_wclk_ssp_p, LSP1_SSP2_CLK, 4, 2),
+ MUX(0, "ssp3_wclk_mux", lsp1_wclk_ssp_p, LSP1_SSP3_CLK, 4, 2),
+ MUX(0, "ssp4_wclk_mux", lsp1_wclk_ssp_p, LSP1_SSP4_CLK, 4, 2),
+ MUX(0, "usim1_wclk_mux", lsp1_wclk_common_p, LSP1_USIM1_CLK, 4, 1),
+};
+
+static struct zx_clk_div lsp1_div_clk[] = {
+ DIV_T(0, "pwm_div", "pwm_wclk_mux", LSP1_PWM_CLK, 12, 4, CLK_SET_RATE_PARENT, common_div_table),
+ DIV_T(0, "ssp2_div", "ssp2_wclk_mux", LSP1_SSP2_CLK, 12, 4, CLK_SET_RATE_PARENT, common_even_div_table),
+ DIV_T(0, "ssp3_div", "ssp3_wclk_mux", LSP1_SSP3_CLK, 12, 4, CLK_SET_RATE_PARENT, common_even_div_table),
+ DIV_T(0, "ssp4_div", "ssp4_wclk_mux", LSP1_SSP4_CLK, 12, 4, CLK_SET_RATE_PARENT, common_even_div_table),
+};
+
+static struct zx_clk_gate lsp1_gate_clk[] = {
+ GATE(LSP1_UART4_WCLK, "lsp1_uart4_wclk", "uart4_wclk_mux", LSP1_UART4_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP1_UART5_WCLK, "lsp1_uart5_wclk", "uart5_wclk_mux", LSP1_UART5_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP1_PWM_WCLK, "lsp1_pwm_wclk", "pwm_div", LSP1_PWM_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP1_PWM_PCLK, "lsp1_pwm_pclk", "lsp1_pclk", LSP1_PWM_CLK, 0, 0, 0),
+ GATE(LSP1_I2C2_WCLK, "lsp1_i2c2_wclk", "i2c2_wclk_mux", LSP1_I2C2_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP1_SSP2_WCLK, "lsp1_ssp2_wclk", "ssp2_div", LSP1_SSP2_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP1_SSP3_WCLK, "lsp1_ssp3_wclk", "ssp3_div", LSP1_SSP3_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP1_SSP4_WCLK, "lsp1_ssp4_wclk", "ssp4_div", LSP1_SSP4_CLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(LSP1_USIM1_WCLK, "lsp1_usim1_wclk", "usim1_wclk_mux", LSP1_USIM1_CLK, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static struct clk_hw_onecell_data lsp1_hw_onecell_data = {
+ .num = LSP1_NR_CLKS,
+ .hws = {
+ [LSP1_NR_CLKS - 1] = NULL,
+ },
+};
+
+static int __init lsp1_clocks_init(struct device_node *np)
+{
+ void __iomem *reg_base;
+ int i, ret;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: Unable to map clk base\n", __func__);
+ return -ENXIO;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp1_mux_clk); i++) {
+ if (lsp1_mux_clk[i].id)
+ lsp1_hw_onecell_data.hws[lsp1_mux_clk[i].id] =
+ &lsp0_mux_clk[i].mux.hw;
+
+ lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
+ ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
+ if (ret) {
+ pr_warn("lsp1 clk %s init error!\n",
+ lsp1_mux_clk[i].mux.hw.init->name);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
+ if (lsp1_gate_clk[i].id)
+ lsp1_hw_onecell_data.hws[lsp1_gate_clk[i].id] =
+ &lsp1_gate_clk[i].gate.hw;
+
+ lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
+ ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
+ if (ret) {
+ pr_warn("lsp1 clk %s init error!\n",
+ lsp1_gate_clk[i].gate.hw.init->name);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
+ if (lsp1_div_clk[i].id)
+ lsp1_hw_onecell_data.hws[lsp1_div_clk[i].id] =
+ &lsp1_div_clk[i].div.hw;
+
+ lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
+ ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
+ if (ret) {
+ pr_warn("lsp1 clk %s init error!\n",
+ lsp1_div_clk[i].div.hw.init->name);
+ }
+ }
+
+ if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &lsp1_hw_onecell_data))
+ panic("could not register clk provider\n");
+ pr_info("lsp1-clk init over, nr:%d\n", LSP1_NR_CLKS);
+
+ return 0;
+}
+
+static const struct of_device_id zx_clkc_match_table[] = {
+ { .compatible = "zte,zx296718-topcrm", .data = &top_clocks_init },
+ { .compatible = "zte,zx296718-lsp0crm", .data = &lsp0_clocks_init },
+ { .compatible = "zte,zx296718-lsp1crm", .data = &lsp1_clocks_init },
+ { }
+};
+
+static int zx_clkc_probe(struct platform_device *pdev)
+{
+ int (*init_fn)(struct device_node *np);
+ struct device_node *np = pdev->dev.of_node;
+
+ init_fn = of_device_get_match_data(&pdev->dev);
+ if (!init_fn) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+
+ return init_fn(np);
+}
+
+static struct platform_driver zx_clk_driver = {
+ .probe = zx_clkc_probe,
+ .driver = {
+ .name = "zx296718-clkc",
+ .of_match_table = zx_clkc_match_table,
+ },
+};
+
+static int __init zx_clk_init(void)
+{
+ return platform_driver_register(&zx_clk_driver);
+}
+core_initcall(zx_clk_init);
diff --git a/drivers/clk/zte/clk.c b/drivers/clk/zte/clk.c
index 7c73c538c43d..c4c1251bc1e7 100644
--- a/drivers/clk/zte/clk.c
+++ b/drivers/clk/zte/clk.c
@@ -21,8 +21,8 @@
#define to_clk_zx_audio(_hw) container_of(_hw, struct clk_zx_audio, hw)
#define CFG0_CFG1_OFFSET 4
-#define LOCK_FLAG BIT(30)
-#define POWER_DOWN BIT(31)
+#define LOCK_FLAG 30
+#define POWER_DOWN 31
static int rate_to_idx(struct clk_zx_pll *zx_pll, unsigned long rate)
{
@@ -50,8 +50,8 @@ static int hw_to_idx(struct clk_zx_pll *zx_pll)
hw_cfg1 = readl_relaxed(zx_pll->reg_base + CFG0_CFG1_OFFSET);
/* For matching the value in lookup table */
- hw_cfg0 &= ~LOCK_FLAG;
- hw_cfg0 |= POWER_DOWN;
+ hw_cfg0 &= ~BIT(zx_pll->lock_bit);
+ hw_cfg0 |= BIT(zx_pll->pd_bit);
for (i = 0; i < zx_pll->count; i++) {
if (hw_cfg0 == config[i].cfg0 && hw_cfg1 == config[i].cfg1)
@@ -108,10 +108,10 @@ static int zx_pll_enable(struct clk_hw *hw)
u32 reg;
reg = readl_relaxed(zx_pll->reg_base);
- writel_relaxed(reg & ~POWER_DOWN, zx_pll->reg_base);
+ writel_relaxed(reg & ~BIT(zx_pll->pd_bit), zx_pll->reg_base);
return readl_relaxed_poll_timeout(zx_pll->reg_base, reg,
- reg & LOCK_FLAG, 0, 100);
+ reg & BIT(zx_pll->lock_bit), 0, 100);
}
static void zx_pll_disable(struct clk_hw *hw)
@@ -120,7 +120,7 @@ static void zx_pll_disable(struct clk_hw *hw)
u32 reg;
reg = readl_relaxed(zx_pll->reg_base);
- writel_relaxed(reg | POWER_DOWN, zx_pll->reg_base);
+ writel_relaxed(reg | BIT(zx_pll->pd_bit), zx_pll->reg_base);
}
static int zx_pll_is_enabled(struct clk_hw *hw)
@@ -130,10 +130,10 @@ static int zx_pll_is_enabled(struct clk_hw *hw)
reg = readl_relaxed(zx_pll->reg_base);
- return !(reg & POWER_DOWN);
+ return !(reg & BIT(zx_pll->pd_bit));
}
-static const struct clk_ops zx_pll_ops = {
+const struct clk_ops zx_pll_ops = {
.recalc_rate = zx_pll_recalc_rate,
.round_rate = zx_pll_round_rate,
.set_rate = zx_pll_set_rate,
@@ -141,6 +141,7 @@ static const struct clk_ops zx_pll_ops = {
.disable = zx_pll_disable,
.is_enabled = zx_pll_is_enabled,
};
+EXPORT_SYMBOL(zx_pll_ops);
struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg_base,
@@ -164,6 +165,8 @@ struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
zx_pll->reg_base = reg_base;
zx_pll->lookup_table = lookup_table;
zx_pll->count = count;
+ zx_pll->lock_bit = LOCK_FLAG;
+ zx_pll->pd_bit = POWER_DOWN;
zx_pll->lock = lock;
zx_pll->hw.init = &init;
diff --git a/drivers/clk/zte/clk.h b/drivers/clk/zte/clk.h
index 65ae08b818d3..0df3474b2cf3 100644
--- a/drivers/clk/zte/clk.h
+++ b/drivers/clk/zte/clk.h
@@ -12,6 +12,26 @@
#include <linux/clk-provider.h>
#include <linux/spinlock.h>
+#define PNAME(x) static const char *x[]
+
+#define CLK_HW_INIT(_name, _parent, _ops, _flags) \
+ &(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = (const char *[]) { _parent }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ }
+
+#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \
+ &(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .ops = _ops, \
+ }
+
struct zx_pll_config {
unsigned long rate;
u32 cfg0;
@@ -24,8 +44,115 @@ struct clk_zx_pll {
const struct zx_pll_config *lookup_table; /* order by rate asc */
int count;
spinlock_t *lock;
+ u8 pd_bit; /* power down bit */
+ u8 lock_bit; /* pll lock flag bit */
+};
+
+#define PLL_RATE(_rate, _cfg0, _cfg1) \
+{ \
+ .rate = _rate, \
+ .cfg0 = _cfg0, \
+ .cfg1 = _cfg1, \
+}
+
+#define ZX_PLL(_name, _parent, _reg, _table, _pd, _lock) \
+{ \
+ .reg_base = (void __iomem *) _reg, \
+ .lookup_table = _table, \
+ .count = ARRAY_SIZE(_table), \
+ .pd_bit = _pd, \
+ .lock_bit = _lock, \
+ .hw.init = CLK_HW_INIT(_name, _parent, &zx_pll_ops, \
+ CLK_GET_RATE_NOCACHE), \
+}
+
+#define ZX296718_PLL(_name, _parent, _reg, _table) \
+ZX_PLL(_name, _parent, _reg, _table, 0, 30)
+
+struct zx_clk_gate {
+ struct clk_gate gate;
+ u16 id;
+};
+
+#define GATE(_id, _name, _parent, _reg, _bit, _flag, _gflags) \
+{ \
+ .gate = { \
+ .reg = (void __iomem *) _reg, \
+ .bit_idx = (_bit), \
+ .flags = _gflags, \
+ .lock = &clk_lock, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &clk_gate_ops, \
+ _flag | CLK_IGNORE_UNUSED), \
+ }, \
+ .id = _id, \
+}
+
+struct zx_clk_fixed_factor {
+ struct clk_fixed_factor factor;
+ u16 id;
+};
+
+#define FFACTOR(_id, _name, _parent, _mult, _div, _flag) \
+{ \
+ .factor = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flag), \
+ }, \
+ .id = _id, \
+}
+
+struct zx_clk_mux {
+ struct clk_mux mux;
+ u16 id;
+};
+
+#define MUX_F(_id, _name, _parent, _reg, _shift, _width, _flag, _mflag) \
+{ \
+ .mux = { \
+ .reg = (void __iomem *) _reg, \
+ .mask = BIT(_width) - 1, \
+ .shift = _shift, \
+ .flags = _mflag, \
+ .lock = &clk_lock, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parent, \
+ &clk_mux_ops, \
+ _flag), \
+ }, \
+ .id = _id, \
+}
+
+#define MUX(_id, _name, _parent, _reg, _shift, _width) \
+MUX_F(_id, _name, _parent, _reg, _shift, _width, 0, 0)
+
+struct zx_clk_div {
+ struct clk_divider div;
+ u16 id;
};
+#define DIV_T(_id, _name, _parent, _reg, _shift, _width, _flag, _table) \
+{ \
+ .div = { \
+ .reg = (void __iomem *) _reg, \
+ .shift = _shift, \
+ .width = _width, \
+ .flags = 0, \
+ .table = _table, \
+ .lock = &clk_lock, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &clk_divider_ops, \
+ _flag), \
+ }, \
+ .id = _id, \
+}
+
struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg_base,
const struct zx_pll_config *lookup_table, int count, spinlock_t *lock);
@@ -38,4 +165,6 @@ struct clk_zx_audio {
struct clk *clk_register_zx_audio(const char *name,
const char * const parent_name,
unsigned long flags, void __iomem *reg_base);
+
+extern const struct clk_ops zx_pll_ops;
#endif
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 8a753fd5b79d..245190839359 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -361,7 +361,7 @@ config CLKSRC_METAG_GENERIC
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
- depends on ARM
+ depends on ARM || ARM64
help
Support for Multi Core Timer controller on Exynos SoCs.
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 41840d02c331..8f3488b80896 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -223,6 +223,7 @@ static u64 notrace exynos4_read_sched_clock(void)
return exynos4_read_count_32();
}
+#if defined(CONFIG_ARM)
static struct delay_timer exynos4_delay_timer;
static cycles_t exynos4_read_current_timer(void)
@@ -231,14 +232,17 @@ static cycles_t exynos4_read_current_timer(void)
"cycles_t needs to move to 32-bit for ARM64 usage");
return exynos4_read_count_32();
}
+#endif
static int __init exynos4_clocksource_init(void)
{
exynos4_mct_frc_start();
+#if defined(CONFIG_ARM)
exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
exynos4_delay_timer.freq = clk_rate;
register_current_timer_delay(&exynos4_delay_timer);
+#endif
if (clocksource_register_hz(&mct_frc, clk_rate))
panic("%s: can't register clocksource\n", mct_frc.name);
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index b5befc211172..2bac9b6cfeea 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -159,7 +159,7 @@ sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz,
* half speed or use delayed read latching (errata 13).
*/
if ((ns_to_cycles(sdram->tck, sd_khz) > 1) ||
- (CPU_REVISION < CPU_SA1110_B2 && sd_khz < 62000))
+ (read_cpuid_revision() < ARM_CPU_REV_SA1110_B2 && sd_khz < 62000))
sd_khz /= 2;
sd->mdcnfg = MDCNFG & 0x007f007f;
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 389ade4572be..ab264d393233 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -14,6 +14,7 @@
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/tick.h>
+#include <linux/cpu.h>
#include "cpuidle.h"
@@ -178,8 +179,8 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
}
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static int __cpuidle poll_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
local_irq_enable();
if (!current_set_polling_and_test()) {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1af94e2d1a25..9b035b7d7f4f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -550,4 +550,6 @@ config CRYPTO_DEV_ROCKCHIP
This driver interfaces with the hardware crypto accelerator.
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
+source "drivers/crypto/chelsio/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 3c6432dd09d9..ad7250fa1348 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -31,3 +31,4 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
+obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
new file mode 100644
index 000000000000..4ce67fb9a880
--- /dev/null
+++ b/drivers/crypto/chelsio/Kconfig
@@ -0,0 +1,19 @@
+config CRYPTO_DEV_CHELSIO
+ tristate "Chelsio Crypto Co-processor Driver"
+ depends on CHELSIO_T4
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ ---help---
+ The Chelsio Crypto Co-processor driver for T6 adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called chcr.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
new file mode 100644
index 000000000000..bebdf06687ad
--- /dev/null
+++ b/drivers/crypto/chelsio/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+
+obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
+chcr-objs := chcr_core.o chcr_algo.o
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
new file mode 100644
index 000000000000..e4ddb921d7b3
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -0,0 +1,1525 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written and Maintained by:
+ * Manoj Malviya (manojmalviya@chelsio.com)
+ * Atul Gupta (atul.gupta@chelsio.com)
+ * Jitendra Lulla (jlulla@chelsio.com)
+ * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
+ * Harsh Jain (harsh@chelsio.com)
+ */
+
+#define pr_fmt(fmt) "chcr:" fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+
+#include "t4fw_api.h"
+#include "t4_msg.h"
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
+{
+ return ctx->crypto_ctx->ablkctx;
+}
+
+static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
+{
+ return ctx->crypto_ctx->hmacctx;
+}
+
+static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
+{
+ return ctx->dev->u_ctx;
+}
+
+static inline int is_ofld_imm(const struct sk_buff *skb)
+{
+ return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
+}
+
+/*
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/*
+ * chcr_handle_resp - Unmap the DMA buffers associated with the request
+ * @req: crypto request
+ */
+int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
+ int error_status)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_req_ctx ctx_req;
+ struct cpl_fw6_pld *fw6_pld;
+ unsigned int digestsize, updated_digestsize;
+
+ switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_BLKCIPHER:
+ ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
+ ctx_req.ctx.ablk_ctx =
+ ablkcipher_request_ctx(ctx_req.req.ablk_req);
+ if (!error_status) {
+ fw6_pld = (struct cpl_fw6_pld *)input;
+ memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
+ AES_BLOCK_SIZE);
+ }
+ dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
+ ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
+ if (ctx_req.ctx.ablk_ctx->skb) {
+ kfree_skb(ctx_req.ctx.ablk_ctx->skb);
+ ctx_req.ctx.ablk_ctx->skb = NULL;
+ }
+ break;
+
+ case CRYPTO_ALG_TYPE_AHASH:
+ ctx_req.req.ahash_req = (struct ahash_request *)req;
+ ctx_req.ctx.ahash_ctx =
+ ahash_request_ctx(ctx_req.req.ahash_req);
+ digestsize =
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(
+ ctx_req.req.ahash_req));
+ updated_digestsize = digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ updated_digestsize = SHA256_DIGEST_SIZE;
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ updated_digestsize = SHA512_DIGEST_SIZE;
+ if (ctx_req.ctx.ahash_ctx->skb)
+ ctx_req.ctx.ahash_ctx->skb = NULL;
+ if (ctx_req.ctx.ahash_ctx->result == 1) {
+ ctx_req.ctx.ahash_ctx->result = 0;
+ memcpy(ctx_req.req.ahash_req->result, input +
+ sizeof(struct cpl_fw6_pld),
+ digestsize);
+ } else {
+ memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
+ sizeof(struct cpl_fw6_pld),
+ updated_digestsize);
+ }
+ kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
+ ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
+ break;
+ }
+ return 0;
+}
+
+/*
+ * calc_tx_flits_ofld - calculate # of flits for an offload packet
+ * @skb: the packet
+ * Returns the number of flits needed for the given offload packet.
+ * These packets are already fully constructed and no additional headers
+ * will be added.
+ */
+static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+ unsigned int flits, cnt;
+
+ if (is_ofld_imm(skb))
+ return DIV_ROUND_UP(skb->len, 8);
+
+ flits = skb_transport_offset(skb) / 8; /* headers */
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb_tail_pointer(skb) != skb_transport_header(skb))
+ cnt++;
+ return flits + sgl_len(cnt);
+}
+
+static struct shash_desc *chcr_alloc_shash(unsigned int ds)
+{
+ struct crypto_shash *base_hash = NULL;
+ struct shash_desc *desc;
+
+ switch (ds) {
+ case SHA1_DIGEST_SIZE:
+ base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
+ break;
+ case SHA224_DIGEST_SIZE:
+ base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
+ break;
+ case SHA256_DIGEST_SIZE:
+ base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
+ break;
+ case SHA384_DIGEST_SIZE:
+ base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
+ break;
+ case SHA512_DIGEST_SIZE:
+ base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
+ break;
+ }
+ if (IS_ERR(base_hash)) {
+ pr_err("Can not allocate sha-generic algo.\n");
+ return (void *)base_hash;
+ }
+
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
+ GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+ desc->tfm = base_hash;
+ desc->flags = crypto_shash_get_flags(base_hash);
+ return desc;
+}
+
+static int chcr_compute_partial_hash(struct shash_desc *desc,
+ char *iopad, char *result_hash,
+ int digest_size)
+{
+ struct sha1_state sha1_st;
+ struct sha256_state sha256_st;
+ struct sha512_state sha512_st;
+ int error;
+
+ if (digest_size == SHA1_DIGEST_SIZE) {
+ error = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
+ crypto_shash_export(desc, (void *)&sha1_st);
+ memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
+ } else if (digest_size == SHA224_DIGEST_SIZE) {
+ error = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
+ crypto_shash_export(desc, (void *)&sha256_st);
+ memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
+
+ } else if (digest_size == SHA256_DIGEST_SIZE) {
+ error = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
+ crypto_shash_export(desc, (void *)&sha256_st);
+ memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
+
+ } else if (digest_size == SHA384_DIGEST_SIZE) {
+ error = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
+ crypto_shash_export(desc, (void *)&sha512_st);
+ memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
+
+ } else if (digest_size == SHA512_DIGEST_SIZE) {
+ error = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
+ crypto_shash_export(desc, (void *)&sha512_st);
+ memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
+ } else {
+ error = -EINVAL;
+ pr_err("Unknown digest size %d\n", digest_size);
+ }
+ return error;
+}
+
+static void chcr_change_order(char *buf, int ds)
+{
+ int i;
+
+ if (ds == SHA512_DIGEST_SIZE) {
+ for (i = 0; i < (ds / sizeof(u64)); i++)
+ *((__be64 *)buf + i) =
+ cpu_to_be64(*((u64 *)buf + i));
+ } else {
+ for (i = 0; i < (ds / sizeof(u32)); i++)
+ *((__be32 *)buf + i) =
+ cpu_to_be32(*((u32 *)buf + i));
+ }
+}
+
+static inline int is_hmac(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
+ alg.hash);
+ if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
+ CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+ return 1;
+ return 0;
+}
+
+static inline unsigned int ch_nents(struct scatterlist *sg,
+ unsigned int *total_size)
+{
+ unsigned int nents;
+
+ for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
+ nents++;
+ *total_size += sg->length;
+ }
+ return nents;
+}
+
+static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
+ struct scatterlist *sg,
+ struct phys_sge_parm *sg_param)
+{
+ struct phys_sge_pairs *to;
+ unsigned int out_buf_size = sg_param->obsize;
+ unsigned int nents = sg_param->nents, i, j, tot_len = 0;
+
+ phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
+ | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
+ phys_cpl->pcirlxorder_to_noofsgentr =
+ htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
+ CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
+ CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
+ CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
+ CPL_RX_PHYS_DSGL_DCAID_V(0) |
+ CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
+ phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
+ phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
+ phys_cpl->rss_hdr_int.hash_val = 0;
+ to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
+ sizeof(struct cpl_rx_phys_dsgl));
+
+ for (i = 0; nents; to++) {
+ for (j = i; (nents && (j < (8 + i))); j++, nents--) {
+ to->len[j] = htons(sg->length);
+ to->addr[j] = cpu_to_be64(sg_dma_address(sg));
+ if (out_buf_size) {
+ if (tot_len + sg_dma_len(sg) >= out_buf_size) {
+ to->len[j] = htons(out_buf_size -
+ tot_len);
+ return;
+ }
+ tot_len += sg_dma_len(sg);
+ }
+ sg = sg_next(sg);
+ }
+ }
+}
+
+static inline unsigned
+int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
+ struct scatterlist *sg, struct phys_sge_parm *sg_param)
+{
+ if (!sg || !sg_param->nents)
+ return 0;
+
+ sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
+ if (sg_param->nents == 0) {
+ pr_err("CHCR : DMA mapping failed\n");
+ return -EINVAL;
+ }
+ write_phys_cpl(phys_cpl, sg, sg_param);
+ return 0;
+}
+
+static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.crypto);
+
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
+static inline void
+write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
+ struct scatterlist *sg, unsigned int count)
+{
+ struct page *spage;
+ unsigned int page_len;
+
+ skb->len += count;
+ skb->data_len += count;
+ skb->truesize += count;
+ while (count > 0) {
+ if (sg && (!(sg->length)))
+ break;
+ spage = sg_page(sg);
+ get_page(spage);
+ page_len = min(sg->length, count);
+ skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
+ (*frags)++;
+ count -= page_len;
+ sg = sg_next(sg);
+ }
+}
+
+static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
+ struct _key_ctx *key_ctx)
+{
+ if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
+ get_aes_decrypt_key(key_ctx->key, ablkctx->key,
+ ablkctx->enckey_len << 3);
+ memset(key_ctx->key + ablkctx->enckey_len, 0,
+ CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
+ } else {
+ memcpy(key_ctx->key,
+ ablkctx->key + (ablkctx->enckey_len >> 1),
+ ablkctx->enckey_len >> 1);
+ get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
+ ablkctx->key, ablkctx->enckey_len << 2);
+ }
+ return 0;
+}
+
+static inline void create_wreq(struct chcr_context *ctx,
+ struct fw_crypto_lookaside_wr *wreq,
+ void *req, struct sk_buff *skb,
+ int kctx_len, int hash_sz,
+ unsigned int phys_dsgl)
+{
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
+ struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
+ int iv_loc = IV_DSGL;
+ int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
+ unsigned int immdatalen = 0, nr_frags = 0;
+
+ if (is_ofld_imm(skb)) {
+ immdatalen = skb->data_len;
+ iv_loc = IV_IMMEDIATE;
+ } else {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ }
+
+ wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
+ (kctx_len >> 4));
+ wreq->pld_size_hash_size =
+ htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
+ FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
+ wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
+ (calc_tx_flits_ofld(skb) * 8), 16)));
+ wreq->cookie = cpu_to_be64((uintptr_t)req);
+ wreq->rx_chid_to_rx_q_id =
+ FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
+ (hash_sz) ? IV_NOP : iv_loc);
+
+ ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
+ ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+ 16) - ((sizeof(*wreq)) >> 4)));
+
+ sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
+ sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
+ ((hash_sz) ? DUMMY_BYTES :
+ (sizeof(struct cpl_rx_phys_dsgl) +
+ phys_dsgl)) + immdatalen);
+}
+
+/**
+ * create_cipher_wr - form the WR for cipher operations
+ * @req: cipher req.
+ * @ctx: crypto driver context of the request.
+ * @qid: ingress qid where response of this WR should be received.
+ * @op_type: encryption or decryption
+ */
+static struct sk_buff
+*create_cipher_wr(struct crypto_async_request *req_base,
+ struct chcr_context *ctx, unsigned short qid,
+ unsigned short op_type)
+{
+ struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct sk_buff *skb = NULL;
+ struct _key_ctx *key_ctx;
+ struct fw_crypto_lookaside_wr *wreq;
+ struct cpl_tx_sec_pdu *sec_cpl;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct phys_sge_parm sg_param;
+ unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
+
+ if (!req->info)
+ return ERR_PTR(-EINVAL);
+ ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
+ ablkctx->enc = op_type;
+
+ if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
+ (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
+ return ERR_PTR(-EINVAL);
+
+ phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
+
+ kctx_len = sizeof(*key_ctx) +
+ (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
+ GFP_ATOMIC);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+ wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
+
+ sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
+ sec_cpl->op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
+
+ sec_cpl->pldlen = htonl(ivsize + req->nbytes);
+ sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
+ ivsize + 1, 0);
+
+ sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0,
+ 0, 0);
+ sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+ ablkctx->ciph_mode,
+ 0, 0, ivsize >> 1, 1);
+ sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+ 0, 1, phys_dsgl);
+
+ key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
+ key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
+ if (op_type == CHCR_DECRYPT_OP) {
+ if (generate_copy_rrkey(ablkctx, key_ctx))
+ goto map_fail1;
+ } else {
+ if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
+ memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
+ } else {
+ memcpy(key_ctx->key, ablkctx->key +
+ (ablkctx->enckey_len >> 1),
+ ablkctx->enckey_len >> 1);
+ memcpy(key_ctx->key +
+ (ablkctx->enckey_len >> 1),
+ ablkctx->key,
+ ablkctx->enckey_len >> 1);
+ }
+ }
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
+
+ memcpy(ablkctx->iv, req->info, ivsize);
+ sg_init_table(&ablkctx->iv_sg, 1);
+ sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
+ sg_param.nents = ablkctx->dst_nents;
+ sg_param.obsize = dst_bufsize;
+ sg_param.qid = qid;
+ sg_param.align = 1;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
+ &sg_param))
+ goto map_fail1;
+
+ skb_set_transport_header(skb, transhdr_len);
+ write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
+ write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
+ create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
+ req_ctx->skb = skb;
+ skb_get(skb);
+ return skb;
+map_fail1:
+ kfree_skb(skb);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
+ struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
+ unsigned int ck_size, context_size;
+ u16 alignment = 0;
+
+ if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
+ goto badkey_err;
+
+ memcpy(ablkctx->key, key, keylen);
+ ablkctx->enckey_len = keylen;
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ alignment = 8;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ goto badkey_err;
+ }
+
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
+ keylen + alignment) >> 4;
+
+ ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
+ 0, 0, context_size);
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
+ return 0;
+badkey_err:
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ ablkctx->enckey_len = 0;
+ return -EINVAL;
+}
+
+static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
+{
+ int ret = 0;
+ struct sge_ofld_txq *q;
+ struct adapter *adap = netdev2adap(dev);
+
+ local_bh_disable();
+ q = &adap->sge.ofldtxq[idx];
+ spin_lock(&q->sendq.lock);
+ if (q->full)
+ ret = -1;
+ spin_unlock(&q->sendq.lock);
+ local_bh_enable();
+ return ret;
+}
+
+static int chcr_aes_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_async_request *req_base = &req->base;
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct sk_buff *skb;
+
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ ctx->tx_channel_id))) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ skb = create_cipher_wr(req_base, ctx,
+ u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+ CHCR_ENCRYPT_OP);
+ if (IS_ERR(skb)) {
+ pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
+ return PTR_ERR(skb);
+ }
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+
+static int chcr_aes_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
+ struct crypto_async_request *req_base = &req->base;
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct sk_buff *skb;
+
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ ctx->tx_channel_id))) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
+ CHCR_DECRYPT_OP);
+ if (IS_ERR(skb)) {
+ pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
+ return PTR_ERR(skb);
+ }
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+
+static int chcr_device_init(struct chcr_context *ctx)
+{
+ struct uld_ctx *u_ctx;
+ unsigned int id;
+ int err = 0, rxq_perchan, rxq_idx;
+
+ id = smp_processor_id();
+ if (!ctx->dev) {
+ err = assign_chcr_device(&ctx->dev);
+ if (err) {
+ pr_err("chcr device assignment fails\n");
+ goto out;
+ }
+ u_ctx = ULD_CTX(ctx);
+ rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
+ ctx->dev->tx_channel_id = 0;
+ rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
+ rxq_idx += id % rxq_perchan;
+ spin_lock(&ctx->dev->lock_chcr_dev);
+ ctx->tx_channel_id = rxq_idx;
+ spin_unlock(&ctx->dev->lock_chcr_dev);
+ }
+out:
+ return err;
+}
+
+static int chcr_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
+ return chcr_device_init(crypto_tfm_ctx(tfm));
+}
+
+static int get_alg_config(struct algo_param *params,
+ unsigned int auth_size)
+{
+ switch (auth_size) {
+ case SHA1_DIGEST_SIZE:
+ params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
+ params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
+ params->result_size = SHA1_DIGEST_SIZE;
+ break;
+ case SHA224_DIGEST_SIZE:
+ params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
+ params->result_size = SHA256_DIGEST_SIZE;
+ break;
+ case SHA256_DIGEST_SIZE:
+ params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
+ params->result_size = SHA256_DIGEST_SIZE;
+ break;
+ case SHA384_DIGEST_SIZE:
+ params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
+ params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
+ params->result_size = SHA512_DIGEST_SIZE;
+ break;
+ case SHA512_DIGEST_SIZE:
+ params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
+ params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
+ params->result_size = SHA512_DIGEST_SIZE;
+ break;
+ default:
+ pr_err("chcr : ERROR, unsupported digest size\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int
+write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
+ struct sk_buff *skb, unsigned int *frags, char *bfr,
+ u8 bfr_len)
+{
+ void *page_ptr = NULL;
+
+ skb->len += bfr_len;
+ skb->data_len += bfr_len;
+ skb->truesize += bfr_len;
+ page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
+ if (!page_ptr)
+ return -ENOMEM;
+ get_page(virt_to_page(page_ptr));
+ req_ctx->dummy_payload_ptr = page_ptr;
+ memcpy(page_ptr, bfr, bfr_len);
+ skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
+ offset_in_page(page_ptr), bfr_len);
+ (*frags)++;
+ return 0;
+}
+
+/**
+ * create_final_hash_wr - Create hash work request
+ * @req - Cipher req base
+ */
+static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
+ struct hash_wr_param *param)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ struct sk_buff *skb = NULL;
+ struct _key_ctx *key_ctx;
+ struct fw_crypto_lookaside_wr *wreq;
+ struct cpl_tx_sec_pdu *sec_cpl;
+ unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
+ unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ unsigned int kctx_len = sizeof(*key_ctx);
+ u8 hash_size_in_response = 0;
+
+ iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
+ kctx_len += param->alg_prm.result_size + iopad_alignment;
+ if (param->opad_needed)
+ kctx_len += param->alg_prm.result_size + iopad_alignment;
+
+ if (req_ctx->result)
+ hash_size_in_response = digestsize;
+ else
+ hash_size_in_response = param->alg_prm.result_size;
+ transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
+ GFP_ATOMIC);
+ if (!skb)
+ return skb;
+
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+ wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
+ memset(wreq, 0, transhdr_len);
+
+ sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
+ sec_cpl->op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
+ sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
+
+ sec_cpl->aadstart_cipherstop_hi =
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
+ sec_cpl->cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
+ sec_cpl->seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
+ param->opad_needed, 0, 0);
+
+ sec_cpl->ivgen_hdrlen =
+ FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
+
+ key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
+ memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
+
+ if (param->opad_needed)
+ memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
+ CHCR_HASH_MAX_DIGEST_SIZE),
+ hmacctx->opad, param->alg_prm.result_size);
+
+ key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+ param->alg_prm.mk_size, 0,
+ param->opad_needed,
+ (kctx_len >> 4));
+ sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
+
+ skb_set_transport_header(skb, transhdr_len);
+ if (param->bfr_len != 0)
+ write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
+ param->bfr_len);
+ if (param->sg_len != 0)
+ write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
+
+ create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
+ 0);
+ req_ctx->skb = skb;
+ skb_get(skb);
+ return skb;
+}
+
+static int chcr_ahash_update(struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
+ struct uld_ctx *u_ctx = NULL;
+ struct sk_buff *skb;
+ u8 remainder = 0, bs;
+ unsigned int nbytes = req->nbytes;
+ struct hash_wr_param params;
+
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+
+ u_ctx = ULD_CTX(ctx);
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ ctx->tx_channel_id))) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ if (nbytes + req_ctx->bfr_len >= bs) {
+ remainder = (nbytes + req_ctx->bfr_len) % bs;
+ nbytes = nbytes + req_ctx->bfr_len - remainder;
+ } else {
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
+ req_ctx->bfr_len, nbytes, 0);
+ req_ctx->bfr_len += nbytes;
+ return 0;
+ }
+
+ params.opad_needed = 0;
+ params.more = 1;
+ params.last = 0;
+ params.sg_len = nbytes - req_ctx->bfr_len;
+ params.bfr_len = req_ctx->bfr_len;
+ params.scmd1 = 0;
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ req_ctx->result = 0;
+ req_ctx->data_len += params.sg_len + params.bfr_len;
+ skb = create_final_hash_wr(req, &params);
+ if (!skb)
+ return -ENOMEM;
+
+ req_ctx->bfr_len = remainder;
+ if (remainder)
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ req_ctx->bfr, remainder, req->nbytes -
+ remainder);
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+
+ return -EINPROGRESS;
+}
+
+static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
+{
+ memset(bfr_ptr, 0, bs);
+ *bfr_ptr = 0x80;
+ if (bs == 64)
+ *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
+ else
+ *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
+}
+
+static int chcr_ahash_final(struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
+ struct hash_wr_param params;
+ struct sk_buff *skb;
+ struct uld_ctx *u_ctx = NULL;
+ u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+
+ u_ctx = ULD_CTX(ctx);
+ if (is_hmac(crypto_ahash_tfm(rtfm)))
+ params.opad_needed = 1;
+ else
+ params.opad_needed = 0;
+ params.sg_len = 0;
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ req_ctx->result = 1;
+ params.bfr_len = req_ctx->bfr_len;
+ req_ctx->data_len += params.bfr_len + params.sg_len;
+ if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
+ create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ params.last = 0;
+ params.more = 1;
+ params.scmd1 = 0;
+ params.bfr_len = bs;
+
+ } else {
+ params.scmd1 = req_ctx->data_len;
+ params.last = 1;
+ params.more = 0;
+ }
+ skb = create_final_hash_wr(req, &params);
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+
+static int chcr_ahash_finup(struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
+ struct uld_ctx *u_ctx = NULL;
+ struct sk_buff *skb;
+ struct hash_wr_param params;
+ u8 bs;
+
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ u_ctx = ULD_CTX(ctx);
+
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ ctx->tx_channel_id))) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ if (is_hmac(crypto_ahash_tfm(rtfm)))
+ params.opad_needed = 1;
+ else
+ params.opad_needed = 0;
+
+ params.sg_len = req->nbytes;
+ params.bfr_len = req_ctx->bfr_len;
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ req_ctx->data_len += params.bfr_len + params.sg_len;
+ req_ctx->result = 1;
+ if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
+ create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ params.last = 0;
+ params.more = 1;
+ params.scmd1 = 0;
+ params.bfr_len = bs;
+ } else {
+ params.scmd1 = req_ctx->data_len;
+ params.last = 1;
+ params.more = 0;
+ }
+
+ skb = create_final_hash_wr(req, &params);
+ if (!skb)
+ return -ENOMEM;
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+
+ return -EINPROGRESS;
+}
+
+static int chcr_ahash_digest(struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
+ struct uld_ctx *u_ctx = NULL;
+ struct sk_buff *skb;
+ struct hash_wr_param params;
+ u8 bs;
+
+ rtfm->init(req);
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+
+ u_ctx = ULD_CTX(ctx);
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ ctx->tx_channel_id))) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ if (is_hmac(crypto_ahash_tfm(rtfm)))
+ params.opad_needed = 1;
+ else
+ params.opad_needed = 0;
+
+ params.last = 0;
+ params.more = 0;
+ params.sg_len = req->nbytes;
+ params.bfr_len = 0;
+ params.scmd1 = 0;
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ req_ctx->result = 1;
+ req_ctx->data_len += params.bfr_len + params.sg_len;
+
+ if (req_ctx->bfr && req->nbytes == 0) {
+ create_last_hash_block(req_ctx->bfr, bs, 0);
+ params.more = 1;
+ params.bfr_len = bs;
+ }
+
+ skb = create_final_hash_wr(req, &params);
+ if (!skb)
+ return -ENOMEM;
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+
+static int chcr_ahash_export(struct ahash_request *areq, void *out)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct chcr_ahash_req_ctx *state = out;
+
+ state->bfr_len = req_ctx->bfr_len;
+ state->data_len = req_ctx->data_len;
+ memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ memcpy(state->partial_hash, req_ctx->partial_hash,
+ CHCR_HASH_MAX_DIGEST_SIZE);
+ return 0;
+}
+
+static int chcr_ahash_import(struct ahash_request *areq, const void *in)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
+
+ req_ctx->bfr_len = state->bfr_len;
+ req_ctx->data_len = state->data_len;
+ req_ctx->dummy_payload_ptr = NULL;
+ memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ memcpy(req_ctx->partial_hash, state->partial_hash,
+ CHCR_HASH_MAX_DIGEST_SIZE);
+ return 0;
+}
+
+static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int i, err = 0, updated_digestsize;
+
+ /*
+ * use the key to calculate the ipad and opad. ipad will sent with the
+ * first request's data. opad will be sent with the final hash result
+ * ipad in hmacctx->ipad and opad in hmacctx->opad location
+ */
+ if (!hmacctx->desc)
+ return -EINVAL;
+ if (keylen > bs) {
+ err = crypto_shash_digest(hmacctx->desc, key, keylen,
+ hmacctx->ipad);
+ if (err)
+ goto out;
+ keylen = digestsize;
+ } else {
+ memcpy(hmacctx->ipad, key, keylen);
+ }
+ memset(hmacctx->ipad + keylen, 0, bs - keylen);
+ memcpy(hmacctx->opad, hmacctx->ipad, bs);
+
+ for (i = 0; i < bs / sizeof(int); i++) {
+ *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
+ *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
+ }
+
+ updated_digestsize = digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ updated_digestsize = SHA256_DIGEST_SIZE;
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ updated_digestsize = SHA512_DIGEST_SIZE;
+ err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
+ hmacctx->ipad, digestsize);
+ if (err)
+ goto out;
+ chcr_change_order(hmacctx->ipad, updated_digestsize);
+
+ err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
+ hmacctx->opad, digestsize);
+ if (err)
+ goto out;
+ chcr_change_order(hmacctx->opad, updated_digestsize);
+out:
+ return err;
+}
+
+static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
+ struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ int status = 0;
+ unsigned short context_size = 0;
+
+ if ((key_len == (AES_KEYSIZE_128 << 1)) ||
+ (key_len == (AES_KEYSIZE_256 << 1))) {
+ memcpy(ablkctx->key, key, key_len);
+ ablkctx->enckey_len = key_len;
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ ablkctx->enckey_len = 0;
+ status = -EINVAL;
+ }
+ return status;
+}
+
+static int chcr_sha_init(struct ahash_request *areq)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ int digestsize = crypto_ahash_digestsize(tfm);
+
+ req_ctx->data_len = 0;
+ req_ctx->dummy_payload_ptr = NULL;
+ req_ctx->bfr_len = 0;
+ req_ctx->skb = NULL;
+ req_ctx->result = 0;
+ copy_hash_init_values(req_ctx->partial_hash, digestsize);
+ return 0;
+}
+
+static int chcr_sha_cra_init(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct chcr_ahash_req_ctx));
+ return chcr_device_init(crypto_tfm_ctx(tfm));
+}
+
+static int chcr_hmac_init(struct ahash_request *areq)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
+ struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
+ struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ unsigned int digestsize = crypto_ahash_digestsize(rtfm);
+ unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+
+ chcr_sha_init(areq);
+ req_ctx->data_len = bs;
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ if (digestsize == SHA224_DIGEST_SIZE)
+ memcpy(req_ctx->partial_hash, hmacctx->ipad,
+ SHA256_DIGEST_SIZE);
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ memcpy(req_ctx->partial_hash, hmacctx->ipad,
+ SHA512_DIGEST_SIZE);
+ else
+ memcpy(req_ctx->partial_hash, hmacctx->ipad,
+ digestsize);
+ }
+ return 0;
+}
+
+static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ unsigned int digestsize =
+ crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct chcr_ahash_req_ctx));
+ hmacctx->desc = chcr_alloc_shash(digestsize);
+ if (IS_ERR(hmacctx->desc))
+ return PTR_ERR(hmacctx->desc);
+ return chcr_device_init(crypto_tfm_ctx(tfm));
+}
+
+static void chcr_free_shash(struct shash_desc *desc)
+{
+ crypto_free_shash(desc->tfm);
+ kfree(desc);
+}
+
+static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+
+ if (hmacctx->desc) {
+ chcr_free_shash(hmacctx->desc);
+ hmacctx->desc = NULL;
+ }
+}
+
+static struct chcr_alg_template driver_algs[] = {
+ /* AES-CBC */
+ {
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .is_registered = 0,
+ .alg.crypto = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc(aes-chcr)",
+ .cra_priority = CHCR_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context)
+ + sizeof(struct ablk_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = chcr_cra_init,
+ .cra_exit = NULL,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_cbc_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .is_registered = 0,
+ .alg.crypto = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts(aes-chcr)",
+ .cra_priority = CHCR_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct ablk_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = chcr_cra_init,
+ .cra_exit = NULL,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_xts_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
+ }
+ }
+ }
+ },
+ /* SHA */
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-chcr",
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-chcr",
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-chcr",
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-chcr",
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-chcr",
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ }
+ }
+ },
+ /* HMAC */
+ {
+ .type = CRYPTO_ALG_TYPE_HMAC,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac(sha1-chcr)",
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_HMAC,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "hmac(sha224-chcr)",
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_HMAC,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac(sha256-chcr)",
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_HMAC,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "hmac(sha384-chcr)",
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_HMAC,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "hmac(sha512-chcr)",
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ }
+ }
+ },
+};
+
+/*
+ * chcr_unregister_alg - Deregister crypto algorithms with
+ * kernel framework.
+ */
+static int chcr_unregister_alg(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ if (driver_algs[i].is_registered)
+ crypto_unregister_alg(
+ &driver_algs[i].alg.crypto);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ if (driver_algs[i].is_registered)
+ crypto_unregister_ahash(
+ &driver_algs[i].alg.hash);
+ break;
+ }
+ driver_algs[i].is_registered = 0;
+ }
+ return 0;
+}
+
+#define SZ_AHASH_CTX sizeof(struct chcr_context)
+#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
+#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
+#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
+
+/*
+ * chcr_register_alg - Register crypto algorithms with kernel framework.
+ */
+static int chcr_register_alg(void)
+{
+ struct crypto_alg ai;
+ struct ahash_alg *a_hash;
+ int err = 0, i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ if (driver_algs[i].is_registered)
+ continue;
+ switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ err = crypto_register_alg(&driver_algs[i].alg.crypto);
+ name = driver_algs[i].alg.crypto.cra_driver_name;
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ a_hash = &driver_algs[i].alg.hash;
+ a_hash->update = chcr_ahash_update;
+ a_hash->final = chcr_ahash_final;
+ a_hash->finup = chcr_ahash_finup;
+ a_hash->digest = chcr_ahash_digest;
+ a_hash->export = chcr_ahash_export;
+ a_hash->import = chcr_ahash_import;
+ a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
+ a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
+ a_hash->halg.base.cra_module = THIS_MODULE;
+ a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
+ a_hash->halg.base.cra_alignmask = 0;
+ a_hash->halg.base.cra_exit = NULL;
+ a_hash->halg.base.cra_type = &crypto_ahash_type;
+
+ if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
+ a_hash->halg.base.cra_init = chcr_hmac_cra_init;
+ a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
+ a_hash->init = chcr_hmac_init;
+ a_hash->setkey = chcr_ahash_setkey;
+ a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
+ } else {
+ a_hash->init = chcr_sha_init;
+ a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
+ a_hash->halg.base.cra_init = chcr_sha_cra_init;
+ }
+ err = crypto_register_ahash(&driver_algs[i].alg.hash);
+ ai = driver_algs[i].alg.hash.halg.base;
+ name = ai.cra_driver_name;
+ break;
+ }
+ if (err) {
+ pr_err("chcr : %s : Algorithm registration failed\n",
+ name);
+ goto register_err;
+ } else {
+ driver_algs[i].is_registered = 1;
+ }
+ }
+ return 0;
+
+register_err:
+ chcr_unregister_alg();
+ return err;
+}
+
+/*
+ * start_crypto - Register the crypto algorithms.
+ * This should called once when the first device comesup. After this
+ * kernel will start calling driver APIs for crypto operations.
+ */
+int start_crypto(void)
+{
+ return chcr_register_alg();
+}
+
+/*
+ * stop_crypto - Deregister all the crypto algorithms with kernel.
+ * This should be called once when the last device goes down. After this
+ * kernel will not call the driver API for crypto operations.
+ */
+int stop_crypto(void)
+{
+ chcr_unregister_alg();
+ return 0;
+}
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
new file mode 100644
index 000000000000..ec64fbcdeb49
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -0,0 +1,471 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __CHCR_ALGO_H__
+#define __CHCR_ALGO_H__
+
+/* Crypto key context */
+#define KEY_CONTEXT_CTX_LEN_S 24
+#define KEY_CONTEXT_CTX_LEN_M 0xff
+#define KEY_CONTEXT_CTX_LEN_V(x) ((x) << KEY_CONTEXT_CTX_LEN_S)
+#define KEY_CONTEXT_CTX_LEN_G(x) \
+ (((x) >> KEY_CONTEXT_CTX_LEN_S) & KEY_CONTEXT_CTX_LEN_M)
+
+#define KEY_CONTEXT_DUAL_CK_S 12
+#define KEY_CONTEXT_DUAL_CK_M 0x1
+#define KEY_CONTEXT_DUAL_CK_V(x) ((x) << KEY_CONTEXT_DUAL_CK_S)
+#define KEY_CONTEXT_DUAL_CK_G(x) \
+(((x) >> KEY_CONTEXT_DUAL_CK_S) & KEY_CONTEXT_DUAL_CK_M)
+#define KEY_CONTEXT_DUAL_CK_F KEY_CONTEXT_DUAL_CK_V(1U)
+
+#define KEY_CONTEXT_SALT_PRESENT_S 10
+#define KEY_CONTEXT_SALT_PRESENT_M 0x1
+#define KEY_CONTEXT_SALT_PRESENT_V(x) ((x) << KEY_CONTEXT_SALT_PRESENT_S)
+#define KEY_CONTEXT_SALT_PRESENT_G(x) \
+ (((x) >> KEY_CONTEXT_SALT_PRESENT_S) & \
+ KEY_CONTEXT_SALT_PRESENT_M)
+#define KEY_CONTEXT_SALT_PRESENT_F KEY_CONTEXT_SALT_PRESENT_V(1U)
+
+#define KEY_CONTEXT_VALID_S 0
+#define KEY_CONTEXT_VALID_M 0x1
+#define KEY_CONTEXT_VALID_V(x) ((x) << KEY_CONTEXT_VALID_S)
+#define KEY_CONTEXT_VALID_G(x) \
+ (((x) >> KEY_CONTEXT_VALID_S) & \
+ KEY_CONTEXT_VALID_M)
+#define KEY_CONTEXT_VALID_F KEY_CONTEXT_VALID_V(1U)
+
+#define KEY_CONTEXT_CK_SIZE_S 6
+#define KEY_CONTEXT_CK_SIZE_M 0xf
+#define KEY_CONTEXT_CK_SIZE_V(x) ((x) << KEY_CONTEXT_CK_SIZE_S)
+#define KEY_CONTEXT_CK_SIZE_G(x) \
+ (((x) >> KEY_CONTEXT_CK_SIZE_S) & KEY_CONTEXT_CK_SIZE_M)
+
+#define KEY_CONTEXT_MK_SIZE_S 2
+#define KEY_CONTEXT_MK_SIZE_M 0xf
+#define KEY_CONTEXT_MK_SIZE_V(x) ((x) << KEY_CONTEXT_MK_SIZE_S)
+#define KEY_CONTEXT_MK_SIZE_G(x) \
+ (((x) >> KEY_CONTEXT_MK_SIZE_S) & KEY_CONTEXT_MK_SIZE_M)
+
+#define KEY_CONTEXT_OPAD_PRESENT_S 11
+#define KEY_CONTEXT_OPAD_PRESENT_M 0x1
+#define KEY_CONTEXT_OPAD_PRESENT_V(x) ((x) << KEY_CONTEXT_OPAD_PRESENT_S)
+#define KEY_CONTEXT_OPAD_PRESENT_G(x) \
+ (((x) >> KEY_CONTEXT_OPAD_PRESENT_S) & \
+ KEY_CONTEXT_OPAD_PRESENT_M)
+#define KEY_CONTEXT_OPAD_PRESENT_F KEY_CONTEXT_OPAD_PRESENT_V(1U)
+
+#define CHCR_HASH_MAX_DIGEST_SIZE 64
+#define CHCR_MAX_SHA_DIGEST_SIZE 64
+
+#define IPSEC_TRUNCATED_ICV_SIZE 12
+#define TLS_TRUNCATED_HMAC_SIZE 10
+#define CBCMAC_DIGEST_SIZE 16
+#define MAX_HASH_NAME 20
+
+#define SHA1_INIT_STATE_5X4B 5
+#define SHA256_INIT_STATE_8X4B 8
+#define SHA512_INIT_STATE_8X8B 8
+#define SHA1_INIT_STATE SHA1_INIT_STATE_5X4B
+#define SHA224_INIT_STATE SHA256_INIT_STATE_8X4B
+#define SHA256_INIT_STATE SHA256_INIT_STATE_8X4B
+#define SHA384_INIT_STATE SHA512_INIT_STATE_8X8B
+#define SHA512_INIT_STATE SHA512_INIT_STATE_8X8B
+
+#define DUMMY_BYTES 16
+
+#define IPAD_DATA 0x36363636
+#define OPAD_DATA 0x5c5c5c5c
+
+#define TRANSHDR_SIZE(alignedkctx_len)\
+ (sizeof(struct ulptx_idata) +\
+ sizeof(struct ulp_txpkt) +\
+ sizeof(struct fw_crypto_lookaside_wr) +\
+ sizeof(struct cpl_tx_sec_pdu) +\
+ (alignedkctx_len))
+#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \
+ (TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\
+ sizeof(struct cpl_rx_phys_dsgl))
+#define HASH_TRANSHDR_SIZE(alignedkctx_len)\
+ (TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES)
+
+#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \
+ sizeof(struct ulp_txpkt) + \
+ sizeof(struct ulptx_idata))
+
+#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \
+ htonl( \
+ CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
+ CPL_TX_SEC_PDU_RXCHID_V((id)) | \
+ CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
+ CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
+ CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
+ CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \
+ CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
+
+#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
+ htonl( \
+ CPL_TX_SEC_PDU_AADSTART_V((a_start)) | \
+ CPL_TX_SEC_PDU_AADSTOP_V((a_stop)) | \
+ CPL_TX_SEC_PDU_CIPHERSTART_V((c_start)) | \
+ CPL_TX_SEC_PDU_CIPHERSTOP_HI_V((c_stop_hi)))
+
+#define FILL_SEC_CPL_AUTHINSERT(c_stop_lo, a_start, a_stop, a_inst) \
+ htonl( \
+ CPL_TX_SEC_PDU_CIPHERSTOP_LO_V((c_stop_lo)) | \
+ CPL_TX_SEC_PDU_AUTHSTART_V((a_start)) | \
+ CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
+ CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
+
+#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \
+ htonl( \
+ SCMD_SEQ_NO_CTRL_V(0) | \
+ SCMD_STATUS_PRESENT_V(0) | \
+ SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) | \
+ SCMD_ENC_DEC_CTRL_V((ctrl)) | \
+ SCMD_CIPH_AUTH_SEQ_CTRL_V((seq)) | \
+ SCMD_CIPH_MODE_V((cmode)) | \
+ SCMD_AUTH_MODE_V((amode)) | \
+ SCMD_HMAC_CTRL_V((opad)) | \
+ SCMD_IV_SIZE_V((size)) | \
+ SCMD_NUM_IVS_V((nivs)))
+
+#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
+ SCMD_ENB_DBGID_V(0) | \
+ SCMD_IV_GEN_CTRL_V(0) | \
+ SCMD_LAST_FRAG_V((last)) | \
+ SCMD_MORE_FRAGS_V((more)) | \
+ SCMD_TLS_COMPPDU_V(0) | \
+ SCMD_KEY_CTX_INLINE_V((ctx_in)) | \
+ SCMD_TLS_FRAG_ENABLE_V(0) | \
+ SCMD_MAC_ONLY_V((mac)) | \
+ SCMD_AADIVDROP_V((ivdrop)) | \
+ SCMD_HDR_LEN_V((len)))
+
+#define FILL_KEY_CTX_HDR(ck_size, mk_size, d_ck, opad, ctx_len) \
+ htonl(KEY_CONTEXT_VALID_V(1) | \
+ KEY_CONTEXT_CK_SIZE_V((ck_size)) | \
+ KEY_CONTEXT_MK_SIZE_V(mk_size) | \
+ KEY_CONTEXT_DUAL_CK_V((d_ck)) | \
+ KEY_CONTEXT_OPAD_PRESENT_V((opad)) | \
+ KEY_CONTEXT_SALT_PRESENT_V(1) | \
+ KEY_CONTEXT_CTX_LEN_V((ctx_len)))
+
+#define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \
+ htonl( \
+ FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \
+ FW_CRYPTO_LOOKASIDE_WR) | \
+ FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \
+ FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len)))
+
+#define FILL_WR_RX_Q_ID(cid, qid, wr_iv) \
+ htonl( \
+ FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
+ FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
+ FW_CRYPTO_LOOKASIDE_WR_LCB_V(0) | \
+ FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)))
+
+#define FILL_ULPTX_CMD_DEST(cid) \
+ htonl(ULPTX_CMD_V(ULP_TX_PKT) | \
+ ULP_TXPKT_DEST_V(0) | \
+ ULP_TXPKT_DATAMODIFY_V(0) | \
+ ULP_TXPKT_CHANNELID_V((cid)) | \
+ ULP_TXPKT_RO_V(1) | \
+ ULP_TXPKT_FID_V(0))
+
+#define KEYCTX_ALIGN_PAD(bs) ({unsigned int _bs = (bs);\
+ _bs == SHA1_DIGEST_SIZE ? 12 : 0; })
+
+#define FILL_PLD_SIZE_HASH_SIZE(payload_sgl_len, sgl_lengths, total_frags) \
+ htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(payload_sgl_len ? \
+ sgl_lengths[total_frags] : 0) |\
+ FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(0))
+
+#define FILL_LEN_PKD(calc_tx_flits_ofld, skb) \
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP((\
+ calc_tx_flits_ofld(skb) * 8), 16)))
+
+#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
+ ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1))
+
+#define MAX_NK 8
+#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
+
+struct algo_param {
+ unsigned int auth_mode;
+ unsigned int mk_size;
+ unsigned int result_size;
+};
+
+struct hash_wr_param {
+ unsigned int opad_needed;
+ unsigned int more;
+ unsigned int last;
+ struct algo_param alg_prm;
+ unsigned int sg_len;
+ unsigned int bfr_len;
+ u64 scmd1;
+};
+
+enum {
+ AES_KEYLENGTH_128BIT = 128,
+ AES_KEYLENGTH_192BIT = 192,
+ AES_KEYLENGTH_256BIT = 256
+};
+
+enum {
+ KEYLENGTH_3BYTES = 3,
+ KEYLENGTH_4BYTES = 4,
+ KEYLENGTH_6BYTES = 6,
+ KEYLENGTH_8BYTES = 8
+};
+
+enum {
+ NUMBER_OF_ROUNDS_10 = 10,
+ NUMBER_OF_ROUNDS_12 = 12,
+ NUMBER_OF_ROUNDS_14 = 14,
+};
+
+/*
+ * CCM defines values of 4, 6, 8, 10, 12, 14, and 16 octets,
+ * where they indicate the size of the integrity check value (ICV)
+ */
+enum {
+ AES_CCM_ICV_4 = 4,
+ AES_CCM_ICV_6 = 6,
+ AES_CCM_ICV_8 = 8,
+ AES_CCM_ICV_10 = 10,
+ AES_CCM_ICV_12 = 12,
+ AES_CCM_ICV_14 = 14,
+ AES_CCM_ICV_16 = 16
+};
+
+struct hash_op_params {
+ unsigned char mk_size;
+ unsigned char pad_align;
+ unsigned char auth_mode;
+ char hash_name[MAX_HASH_NAME];
+ unsigned short block_size;
+ unsigned short word_size;
+ unsigned short ipad_size;
+};
+
+struct phys_sge_pairs {
+ __be16 len[8];
+ __be64 addr[8];
+};
+
+struct phys_sge_parm {
+ unsigned int nents;
+ unsigned int obsize;
+ unsigned short qid;
+ unsigned char align;
+};
+
+struct crypto_result {
+ struct completion completion;
+ int err;
+};
+
+static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
+};
+
+static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
+ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
+};
+
+static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
+};
+
+static const u64 sha384_init[SHA512_DIGEST_SIZE / 8] = {
+ SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3,
+ SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7,
+};
+
+static const u64 sha512_init[SHA512_DIGEST_SIZE / 8] = {
+ SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3,
+ SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7,
+};
+
+static inline void copy_hash_init_values(char *key, int digestsize)
+{
+ u8 i;
+ __be32 *dkey = (__be32 *)key;
+ u64 *ldkey = (u64 *)key;
+ __be64 *sha384 = (__be64 *)sha384_init;
+ __be64 *sha512 = (__be64 *)sha512_init;
+
+ switch (digestsize) {
+ case SHA1_DIGEST_SIZE:
+ for (i = 0; i < SHA1_INIT_STATE; i++)
+ dkey[i] = cpu_to_be32(sha1_init[i]);
+ break;
+ case SHA224_DIGEST_SIZE:
+ for (i = 0; i < SHA224_INIT_STATE; i++)
+ dkey[i] = cpu_to_be32(sha224_init[i]);
+ break;
+ case SHA256_DIGEST_SIZE:
+ for (i = 0; i < SHA256_INIT_STATE; i++)
+ dkey[i] = cpu_to_be32(sha256_init[i]);
+ break;
+ case SHA384_DIGEST_SIZE:
+ for (i = 0; i < SHA384_INIT_STATE; i++)
+ ldkey[i] = be64_to_cpu(sha384[i]);
+ break;
+ case SHA512_DIGEST_SIZE:
+ for (i = 0; i < SHA512_INIT_STATE; i++)
+ ldkey[i] = be64_to_cpu(sha512[i]);
+ break;
+ }
+}
+
+static const u8 sgl_lengths[20] = {
+ 0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15
+};
+
+/* Number of len fields(8) * size of one addr field */
+#define PHYSDSGL_MAX_LEN_SIZE 16
+
+static inline u16 get_space_for_phys_dsgl(unsigned int sgl_entr)
+{
+ /* len field size + addr field size */
+ return ((sgl_entr >> 3) + ((sgl_entr % 8) ?
+ 1 : 0)) * PHYSDSGL_MAX_LEN_SIZE +
+ (sgl_entr << 3) + ((sgl_entr % 2 ? 1 : 0) << 3);
+}
+
+/* The AES s-transform matrix (s-box). */
+static const u8 aes_sbox[256] = {
+ 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215,
+ 171, 118, 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175,
+ 156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204, 52, 165,
+ 229, 241, 113, 216, 49, 21, 4, 199, 35, 195, 24, 150, 5, 154, 7,
+ 18, 128, 226, 235, 39, 178, 117, 9, 131, 44, 26, 27, 110, 90,
+ 160, 82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0, 237, 32,
+ 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208, 239, 170,
+ 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, 81,
+ 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243,
+ 210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100,
+ 93, 25, 115, 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184,
+ 20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92, 194,
+ 211, 172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213, 78,
+ 169, 108, 86, 244, 234, 101, 122, 174, 8, 186, 120, 37, 46, 28, 166,
+ 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, 112, 62, 181, 102,
+ 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, 225, 248,
+ 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223,
+ 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84,
+ 187, 22
+};
+
+static u32 aes_ks_subword(const u32 w)
+{
+ u8 bytes[4];
+
+ *(u32 *)(&bytes[0]) = w;
+ bytes[0] = aes_sbox[bytes[0]];
+ bytes[1] = aes_sbox[bytes[1]];
+ bytes[2] = aes_sbox[bytes[2]];
+ bytes[3] = aes_sbox[bytes[3]];
+ return *(u32 *)(&bytes[0]);
+}
+
+static u32 round_constant[11] = {
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x1B000000, 0x36000000, 0x6C000000
+};
+
+/* dec_key - OUTPUT - Reverse round key
+ * key - INPUT - key
+ * keylength - INPUT - length of the key in number of bits
+ */
+static inline void get_aes_decrypt_key(unsigned char *dec_key,
+ const unsigned char *key,
+ unsigned int keylength)
+{
+ u32 temp;
+ u32 w_ring[MAX_NK];
+ int i, j, k = 0;
+ u8 nr, nk;
+
+ switch (keylength) {
+ case AES_KEYLENGTH_128BIT:
+ nk = KEYLENGTH_4BYTES;
+ nr = NUMBER_OF_ROUNDS_10;
+ break;
+
+ case AES_KEYLENGTH_192BIT:
+ nk = KEYLENGTH_6BYTES;
+ nr = NUMBER_OF_ROUNDS_12;
+ break;
+ case AES_KEYLENGTH_256BIT:
+ nk = KEYLENGTH_8BYTES;
+ nr = NUMBER_OF_ROUNDS_14;
+ break;
+ default:
+ return;
+ }
+ for (i = 0; i < nk; i++ )
+ w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+
+ i = 0;
+ temp = w_ring[nk - 1];
+ while(i + nk < (nr + 1) * 4) {
+ if(!(i % nk)) {
+ /* RotWord(temp) */
+ temp = (temp << 8) | (temp >> 24);
+ temp = aes_ks_subword(temp);
+ temp ^= round_constant[i / nk];
+ }
+ else if (nk == 8 && (i % 4 == 0))
+ temp = aes_ks_subword(temp);
+ w_ring[i % nk] ^= temp;
+ temp = w_ring[i % nk];
+ i++;
+ }
+ for (k = 0, j = i % nk; k < nk; k++) {
+ *((u32 *)dec_key + k) = htonl(w_ring[j]);
+ j--;
+ if(j < 0)
+ j += nk;
+ }
+}
+
+#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
new file mode 100644
index 000000000000..fb5f9bbfa09c
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -0,0 +1,238 @@
+/**
+ * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
+ *
+ * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written and Maintained by:
+ * Manoj Malviya (manojmalviya@chelsio.com)
+ * Atul Gupta (atul.gupta@chelsio.com)
+ * Jitendra Lulla (jlulla@chelsio.com)
+ * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
+ * Harsh Jain (harsh@chelsio.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+
+#include "t4_msg.h"
+#include "chcr_core.h"
+#include "cxgb4_uld.h"
+
+static LIST_HEAD(uld_ctx_list);
+static DEFINE_MUTEX(dev_mutex);
+static atomic_t dev_count;
+
+typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
+static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
+static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
+static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
+
+static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
+ [CPL_FW6_PLD] = cpl_fw6_pld_handler,
+};
+
+static struct cxgb4_uld_info chcr_uld_info = {
+ .name = DRV_MODULE_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .rxq_size = 1024,
+ .add = chcr_uld_add,
+ .state_change = chcr_uld_state_change,
+ .rx_handler = chcr_uld_rx_handler,
+};
+
+int assign_chcr_device(struct chcr_dev **dev)
+{
+ struct uld_ctx *u_ctx;
+
+ /*
+ * Which device to use if multiple devices are available TODO
+ * May be select the device based on round robin. One session
+ * must go to the same device to maintain the ordering.
+ */
+ mutex_lock(&dev_mutex); /* TODO ? */
+ u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
+ if (!u_ctx) {
+ mutex_unlock(&dev_mutex);
+ return -ENXIO;
+ }
+
+ *dev = u_ctx->dev;
+ mutex_unlock(&dev_mutex);
+ return 0;
+}
+
+static int chcr_dev_add(struct uld_ctx *u_ctx)
+{
+ struct chcr_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENXIO;
+
+ spin_lock_init(&dev->lock_chcr_dev);
+ u_ctx->dev = dev;
+ dev->u_ctx = u_ctx;
+ atomic_inc(&dev_count);
+ return 0;
+}
+
+static int chcr_dev_remove(struct uld_ctx *u_ctx)
+{
+ kfree(u_ctx->dev);
+ u_ctx->dev = NULL;
+ atomic_dec(&dev_count);
+ return 0;
+}
+
+static int cpl_fw6_pld_handler(struct chcr_dev *dev,
+ unsigned char *input)
+{
+ struct crypto_async_request *req;
+ struct cpl_fw6_pld *fw6_pld;
+ u32 ack_err_status = 0;
+ int error_status = 0;
+
+ fw6_pld = (struct cpl_fw6_pld *)input;
+ req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
+ fw6_pld->data[1]);
+
+ ack_err_status =
+ ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
+ if (ack_err_status) {
+ if (CHK_MAC_ERR_BIT(ack_err_status) ||
+ CHK_PAD_ERR_BIT(ack_err_status))
+ error_status = -EINVAL;
+ }
+ /* call completion callback with failure status */
+ if (req) {
+ if (!chcr_handle_resp(req, input, error_status))
+ req->complete(req, error_status);
+ else
+ return -EINVAL;
+ } else {
+ pr_err("Incorrect request address from the firmware\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int chcr_send_wr(struct sk_buff *skb)
+{
+ return cxgb4_ofld_send(skb->dev, skb);
+}
+
+static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
+{
+ struct uld_ctx *u_ctx;
+
+ /* Create the device and add it in the device list */
+ u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
+ if (!u_ctx) {
+ u_ctx = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ u_ctx->lldi = *lld;
+ mutex_lock(&dev_mutex);
+ list_add_tail(&u_ctx->entry, &uld_ctx_list);
+ mutex_unlock(&dev_mutex);
+out:
+ return u_ctx;
+}
+
+int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
+ const struct pkt_gl *pgl)
+{
+ struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
+ struct chcr_dev *dev = u_ctx->dev;
+ const struct cpl_act_establish *rpl = (struct cpl_act_establish
+ *)rsp;
+
+ if (rpl->ot.opcode != CPL_FW6_PLD) {
+ pr_err("Unsupported opcode\n");
+ return 0;
+ }
+
+ if (!pgl)
+ work_handlers[rpl->ot.opcode](dev, (unsigned char *)&rsp[1]);
+ else
+ work_handlers[rpl->ot.opcode](dev, pgl->va);
+ return 0;
+}
+
+static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
+{
+ struct uld_ctx *u_ctx = handle;
+ int ret = 0;
+
+ switch (state) {
+ case CXGB4_STATE_UP:
+ if (!u_ctx->dev) {
+ ret = chcr_dev_add(u_ctx);
+ if (ret != 0)
+ return ret;
+ }
+ if (atomic_read(&dev_count) == 1)
+ ret = start_crypto();
+ break;
+
+ case CXGB4_STATE_DETACH:
+ if (u_ctx->dev) {
+ mutex_lock(&dev_mutex);
+ chcr_dev_remove(u_ctx);
+ mutex_unlock(&dev_mutex);
+ }
+ if (!atomic_read(&dev_count))
+ stop_crypto();
+ break;
+
+ case CXGB4_STATE_START_RECOVERY:
+ case CXGB4_STATE_DOWN:
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int __init chcr_crypto_init(void)
+{
+ if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
+ pr_err("ULD register fail: No chcr crypto support in cxgb4");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __exit chcr_crypto_exit(void)
+{
+ struct uld_ctx *u_ctx, *tmp;
+
+ if (atomic_read(&dev_count))
+ stop_crypto();
+
+ /* Remove all devices from list */
+ mutex_lock(&dev_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
+ if (u_ctx->dev)
+ chcr_dev_remove(u_ctx);
+ kfree(u_ctx);
+ }
+ mutex_unlock(&dev_mutex);
+ cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
+}
+
+module_init(chcr_crypto_init);
+module_exit(chcr_crypto_exit);
+
+MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
new file mode 100644
index 000000000000..2a5c671a4232
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -0,0 +1,80 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __CHCR_CORE_H__
+#define __CHCR_CORE_H__
+
+#include <crypto/algapi.h>
+#include "t4_hw.h"
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+
+#define DRV_MODULE_NAME "chcr"
+#define DRV_VERSION "1.0.0.0"
+
+#define MAX_PENDING_REQ_TO_HW 20
+#define CHCR_TEST_RESPONSE_TIMEOUT 1000
+
+#define PAD_ERROR_BIT 1
+#define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1)
+
+#define MAC_ERROR_BIT 0
+#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
+
+struct uld_ctx;
+
+struct chcr_dev {
+ /* Request submited to h/w and waiting for response. */
+ spinlock_t lock_chcr_dev;
+ struct crypto_queue pending_queue;
+ struct uld_ctx *u_ctx;
+ unsigned char tx_channel_id;
+};
+
+struct uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+ struct chcr_dev *dev;
+};
+
+int assign_chcr_device(struct chcr_dev **dev);
+int chcr_send_wr(struct sk_buff *skb);
+int start_crypto(void);
+int stop_crypto(void);
+int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
+ const struct pkt_gl *pgl);
+int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
+ int err);
+#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
new file mode 100644
index 000000000000..d7d75605da8b
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -0,0 +1,203 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __CHCR_CRYPTO_H__
+#define __CHCR_CRYPTO_H__
+
+/* Define following if h/w is not dropping the AAD and IV data before
+ * giving the processed data
+ */
+
+#define CHCR_CRA_PRIORITY 300
+
+#define CHCR_AES_MAX_KEY_LEN (2 * (AES_MAX_KEY_SIZE)) /* consider xts */
+#define CHCR_MAX_CRYPTO_IV_LEN 16 /* AES IV len */
+
+#define CHCR_MAX_AUTHENC_AES_KEY_LEN 32 /* max aes key length*/
+#define CHCR_MAX_AUTHENC_SHA_KEY_LEN 128 /* max sha key length*/
+
+#define CHCR_GIVENCRYPT_OP 2
+/* CPL/SCMD parameters */
+
+#define CHCR_ENCRYPT_OP 0
+#define CHCR_DECRYPT_OP 1
+
+#define CHCR_SCMD_SEQ_NO_CTRL_32BIT 1
+#define CHCR_SCMD_SEQ_NO_CTRL_48BIT 2
+#define CHCR_SCMD_SEQ_NO_CTRL_64BIT 3
+
+#define CHCR_SCMD_PROTO_VERSION_GENERIC 4
+
+#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
+#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
+
+#define CHCR_SCMD_CIPHER_MODE_NOP 0
+#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
+#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
+#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+
+#define CHCR_SCMD_AUTH_MODE_NOP 0
+#define CHCR_SCMD_AUTH_MODE_SHA1 1
+#define CHCR_SCMD_AUTH_MODE_SHA224 2
+#define CHCR_SCMD_AUTH_MODE_SHA256 3
+#define CHCR_SCMD_AUTH_MODE_SHA512_224 5
+#define CHCR_SCMD_AUTH_MODE_SHA512_256 6
+#define CHCR_SCMD_AUTH_MODE_SHA512_384 7
+#define CHCR_SCMD_AUTH_MODE_SHA512_512 8
+
+#define CHCR_SCMD_HMAC_CTRL_NOP 0
+#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1
+
+#define CHCR_SCMD_IVGEN_CTRL_HW 0
+#define CHCR_SCMD_IVGEN_CTRL_SW 1
+/* This are not really mac key size. They are intermediate values
+ * of sha engine and its size
+ */
+#define CHCR_KEYCTX_MAC_KEY_SIZE_128 0
+#define CHCR_KEYCTX_MAC_KEY_SIZE_160 1
+#define CHCR_KEYCTX_MAC_KEY_SIZE_192 2
+#define CHCR_KEYCTX_MAC_KEY_SIZE_256 3
+#define CHCR_KEYCTX_MAC_KEY_SIZE_512 4
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128 0
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_192 1
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_256 2
+#define CHCR_KEYCTX_NO_KEY 15
+
+#define CHCR_CPL_FW4_PLD_IV_OFFSET (5 * 64) /* bytes. flt #5 and #6 */
+#define CHCR_CPL_FW4_PLD_HASH_RESULT_OFFSET (7 * 64) /* bytes. flt #7 */
+#define CHCR_CPL_FW4_PLD_DATA_SIZE (4 * 64) /* bytes. flt #4 to #7 */
+
+#define KEY_CONTEXT_HDR_SALT_AND_PAD 16
+#define flits_to_bytes(x) (x * 8)
+
+#define IV_NOP 0
+#define IV_IMMEDIATE 1
+#define IV_DSGL 2
+
+#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000
+#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
+#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
+ CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+
+#define MAX_SALT 4
+#define MAX_SCRATCH_PAD_SIZE 32
+
+#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
+#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
+
+/* Aligned to 128 bit boundary */
+struct _key_ctx {
+ __be32 ctx_hdr;
+ u8 salt[MAX_SALT];
+ __be64 reserverd;
+ unsigned char key[0];
+};
+
+struct ablk_ctx {
+ u8 enc;
+ unsigned int processed_len;
+ __be32 key_ctx_hdr;
+ unsigned int enckey_len;
+ unsigned int dst_nents;
+ struct scatterlist iv_sg;
+ u8 key[CHCR_AES_MAX_KEY_LEN];
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ unsigned char ciph_mode;
+};
+
+struct hmac_ctx {
+ struct shash_desc *desc;
+ u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
+};
+
+struct __crypto_ctx {
+ struct hmac_ctx hmacctx[0];
+ struct ablk_ctx ablkctx[0];
+};
+
+struct chcr_context {
+ struct chcr_dev *dev;
+ unsigned char tx_channel_id;
+ struct __crypto_ctx crypto_ctx[0];
+};
+
+struct chcr_ahash_req_ctx {
+ u32 result;
+ char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 bfr_len;
+ /* DMA the partial hash in it */
+ u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
+ u64 data_len; /* Data len till time */
+ void *dummy_payload_ptr;
+ /* SKB which is being sent to the hardware for processing */
+ struct sk_buff *skb;
+};
+
+struct chcr_blkcipher_req_ctx {
+ struct sk_buff *skb;
+};
+
+struct chcr_alg_template {
+ u32 type;
+ u32 is_registered;
+ union {
+ struct crypto_alg crypto;
+ struct ahash_alg hash;
+ } alg;
+};
+
+struct chcr_req_ctx {
+ union {
+ struct ahash_request *ahash_req;
+ struct ablkcipher_request *ablk_req;
+ } req;
+ union {
+ struct chcr_ahash_req_ctx *ahash_ctx;
+ struct chcr_blkcipher_req_ctx *ablk_ctx;
+ } ctx;
+};
+
+struct sge_opaque_hdr {
+ void *dev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
+ struct chcr_context *ctx,
+ unsigned short qid,
+ unsigned short op_type);
+
+#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 25bcfa0b474f..2585821b24ab 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -17,4 +17,17 @@ config SYNC_FILE
Files fds, to the DRM driver for example. More details at
Documentation/sync_file.txt.
+config SW_SYNC
+ bool "Sync File Validation Framework"
+ default n
+ depends on SYNC_FILE
+ depends on DEBUG_FS
+ ---help---
+ A sync object driver that uses a 32bit counter to coordinate
+ synchronization. Useful when there is no hardware primitive backing
+ the synchronization.
+
+ WARNING: improper use of this can result in deadlocking kernel
+ drivers from userspace. Intended for test and debug only.
+
endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index f353db213a81..210a10bfad2b 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,2 +1,3 @@
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
+obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
diff --git a/drivers/staging/android/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 115c9174705f..62e8e6dc7953 100644
--- a/drivers/staging/android/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -1,5 +1,5 @@
/*
- * drivers/dma-buf/sw_sync.c
+ * Sync File validation framework
*
* Copyright (C) 2012 Google, Inc.
*
@@ -23,8 +23,38 @@
#include "sync_debug.h"
#define CREATE_TRACE_POINTS
-#include "trace/sync.h"
+#include "sync_trace.h"
+/*
+ * SW SYNC validation framework
+ *
+ * A sync object driver that uses a 32bit counter to coordinate
+ * synchronization. Useful when there is no hardware primitive backing
+ * the synchronization.
+ *
+ * To start the framework just open:
+ *
+ * <debugfs>/sync/sw_sync
+ *
+ * That will create a sync timeline, all fences created under this timeline
+ * file descriptor will belong to the this timeline.
+ *
+ * The 'sw_sync' file can be opened many times as to create different
+ * timelines.
+ *
+ * Fences can be created with SW_SYNC_IOC_CREATE_FENCE ioctl with struct
+ * sw_sync_ioctl_create_fence as parameter.
+ *
+ * To increment the timeline counter, SW_SYNC_IOC_INC ioctl should be used
+ * with the increment as u32. This will update the last signaled value
+ * from the timeline and signal any fence that has a seqno smaller or equal
+ * to it.
+ *
+ * struct sw_sync_ioctl_create_fence
+ * @value: the seqno to initialise the fence with
+ * @name: the name of the new sync point
+ * @fence: return the fd of the new sync_file with the created fence
+ */
struct sw_sync_create_fence_data {
__u32 value;
char name[32];
@@ -35,6 +65,7 @@ struct sw_sync_create_fence_data {
#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
struct sw_sync_create_fence_data)
+
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
static const struct fence_ops timeline_fence_ops;
@@ -176,7 +207,7 @@ static void timeline_fence_release(struct fence *fence)
spin_lock_irqsave(fence->lock, flags);
list_del(&pt->child_list);
- if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
+ if (!list_empty(&pt->active_list))
list_del(&pt->active_list);
spin_unlock_irqrestore(fence->lock, flags);
diff --git a/drivers/staging/android/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 4c5a85595a85..fab95204cf74 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -1,5 +1,5 @@
/*
- * drivers/base/sync.c
+ * Sync File validation framework and debug information
*
* Copyright (C) 2012 Google, Inc.
*
diff --git a/drivers/staging/android/sync_debug.h b/drivers/dma-buf/sync_debug.h
index fab66396d421..d269aa6783aa 100644
--- a/drivers/staging/android/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -1,5 +1,5 @@
/*
- * include/linux/sync.h
+ * Sync File validation framework and debug infomation
*
* Copyright (C) 2012 Google, Inc.
*
diff --git a/drivers/staging/android/trace/sync.h b/drivers/dma-buf/sync_trace.h
index 6b5ce9640ddd..d13d59ff1b85 100644
--- a/drivers/staging/android/trace/sync.h
+++ b/drivers/dma-buf/sync_trace.h
@@ -1,11 +1,11 @@
#undef TRACE_SYSTEM
-#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace
-#define TRACE_SYSTEM sync
+#define TRACE_INCLUDE_PATH ../../drivers/dma-buf
+#define TRACE_SYSTEM sync_trace
#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SYNC_H
-#include "../sync_debug.h"
+#include "sync_debug.h"
#include <linux/tracepoint.h>
TRACE_EVENT(sync_timeline,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 739f797b40d9..af63a6bcf564 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -102,7 +102,7 @@ config AXI_DMAC
config COH901318
bool "ST-Ericsson COH901318 DMA support"
select DMA_ENGINE
- depends on ARCH_U300
+ depends on ARCH_U300 || COMPILE_TEST
help
Enable support for ST-Ericsson COH 901 318 DMA.
@@ -114,13 +114,13 @@ config DMA_BCM2835
config DMA_JZ4740
tristate "JZ4740 DMA support"
- depends on MACH_JZ4740
+ depends on MACH_JZ4740 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
config DMA_JZ4780
tristate "JZ4780 DMA support"
- depends on MACH_JZ4780
+ depends on MACH_JZ4780 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -130,14 +130,14 @@ config DMA_JZ4780
config DMA_OMAP
tristate "OMAP DMA support"
- depends on ARCH_OMAP
+ depends on ARCH_OMAP || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
- select TI_DMA_CROSSBAR if SOC_DRA7XX
+ select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
config DMA_SA11X0
tristate "SA-11x0 DMA support"
- depends on ARCH_SA1100
+ depends on ARCH_SA1100 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -150,7 +150,6 @@ config DMA_SUN4I
depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
select DMA_ENGINE
- select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the DMA controller present in the sun4i,
@@ -167,7 +166,7 @@ config DMA_SUN6I
config EP93XX_DMA
bool "Cirrus Logic EP93xx DMA support"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
select DMA_ENGINE
help
Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
@@ -279,7 +278,7 @@ config INTEL_MIC_X100_DMA
config K3_DMA
tristate "Hisilicon K3 DMA support"
- depends on ARCH_HI3xxx
+ depends on ARCH_HI3xxx || ARCH_HISI || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -297,16 +296,16 @@ config LPC18XX_DMAMUX
config MMP_PDMA
bool "MMP PDMA support"
- depends on (ARCH_MMP || ARCH_PXA)
+ depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
select DMA_ENGINE
help
Support the MMP PDMA engine for PXA and MMP platform.
config MMP_TDMA
bool "MMP Two-Channel DMA support"
- depends on ARCH_MMP
+ depends on ARCH_MMP || COMPILE_TEST
select DMA_ENGINE
- select MMP_SRAM
+ select MMP_SRAM if ARCH_MMP
help
Support the MMP Two-Channel DMA engine.
This engine used for MMP Audio DMA and pxa910 SQU.
@@ -316,7 +315,6 @@ config MOXART_DMA
tristate "MOXART DMA support"
depends on ARCH_MOXART
select DMA_ENGINE
- select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the MOXA ART SoC DMA controller.
@@ -439,9 +437,8 @@ config STE_DMA40
config STM32_DMA
bool "STMicroelectronics STM32 DMA support"
- depends on ARCH_STM32
+ depends on ARCH_STM32 || COMPILE_TEST
select DMA_ENGINE
- select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the on-chip DMA controller on STMicroelectronics
@@ -451,7 +448,7 @@ config STM32_DMA
config S3C24XX_DMAC
bool "Samsung S3C24XX DMA support"
- depends on ARCH_S3C24XX
+ depends on ARCH_S3C24XX || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -483,10 +480,9 @@ config TEGRA20_APB_DMA
config TEGRA210_ADMA
bool "NVIDIA Tegra210 ADMA support"
- depends on ARCH_TEGRA_210_SOC
+ depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
- select PM_CLK
help
Support for the NVIDIA Tegra210 ADMA controller driver. The
DMA controller has multiple DMA channels and is used to service
@@ -497,7 +493,7 @@ config TEGRA210_ADMA
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
- depends on MFD_TIMBERDALE
+ depends on MFD_TIMBERDALE || COMPILE_TEST
select DMA_ENGINE
help
Enable support for the Timberdale FPGA DMA engine.
@@ -515,10 +511,10 @@ config TI_DMA_CROSSBAR
config TI_EDMA
bool "TI EDMA support"
- depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
+ depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
- select TI_DMA_CROSSBAR if ARCH_OMAP
+ select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
default n
help
Enable support for the TI EDMA controller. This DMA
@@ -561,7 +557,7 @@ config XILINX_ZYNQMP_DMA
config ZX_DMA
tristate "ZTE ZX296702 DMA support"
- depends on ARCH_ZX
+ depends on ARCH_ZX || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 53d22eb73b56..a4c8f80db29d 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -473,15 +473,11 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers,
* no need to replay callback function while stopping */
if (!atc_chan_is_cyclic(atchan)) {
- dma_async_tx_callback callback = txd->callback;
- void *param = txd->callback_param;
-
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
- if (callback)
- callback(param);
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
dma_run_dependencies(txd);
@@ -598,15 +594,12 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
{
struct at_desc *first = atc_first_active(atchan);
struct dma_async_tx_descriptor *txd = &first->txd;
- dma_async_tx_callback callback = txd->callback;
- void *param = txd->callback_param;
dev_vdbg(chan2dev(&atchan->chan_common),
"new cyclic period llp 0x%08x\n",
channel_readl(atchan, DSCR));
- if (callback)
- callback(param);
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
/*-- IRQ & Tasklet ---------------------------------------------------*/
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 832cbd647145..b7d7f2d443a1 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1572,8 +1572,8 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
txd = &desc->tx_dma_desc;
- if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
- txd->callback(txd->callback_param);
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
static void at_xdmac_tasklet(unsigned long data)
@@ -1616,8 +1616,8 @@ static void at_xdmac_tasklet(unsigned long data)
if (!at_xdmac_chan_is_cyclic(atchan)) {
dma_cookie_complete(txd);
- if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
- txd->callback(txd->callback_param);
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
dma_run_dependencies(txd);
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index 7ce843723003..7a67b8345092 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -82,7 +82,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size)
/* Get IRQ of that task */
tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
- if (tsk->irq == NO_IRQ)
+ if (!tsk->irq)
goto error;
/* Init the BDs, if needed */
@@ -104,7 +104,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size)
error:
if (tsk) {
- if (tsk->irq != NO_IRQ)
+ if (tsk->irq)
irq_dispose_mapping(tsk->irq);
bcom_sram_free(tsk->bd);
kfree(tsk->cookie);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index e4acd63e42aa..74794c9859f6 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1319,10 +1319,10 @@ static void coh901318_list_print(struct coh901318_chan *cohc,
int i = 0;
while (l) {
- dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
- ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
- i, l, l->control, l->src_addr, l->dst_addr,
- l->link_addr, l->virt_link_addr);
+ dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%pad"
+ ", dst 0x%pad, link 0x%pad virt_link_addr 0x%p\n",
+ i, l, l->control, &l->src_addr, &l->dst_addr,
+ &l->link_addr, l->virt_link_addr);
i++;
l = l->virt_link_addr;
}
@@ -1335,7 +1335,7 @@ static void coh901318_list_print(struct coh901318_chan *cohc,
static struct coh901318_base *debugfs_dma_base;
static struct dentry *dma_dentry;
-static int coh901318_debugfs_read(struct file *file, char __user *buf,
+static ssize_t coh901318_debugfs_read(struct file *file, char __user *buf,
size_t count, loff_t *f_pos)
{
u64 started_channels = debugfs_dma_base->pm.started_channels;
@@ -1352,9 +1352,10 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
- for (i = 0; i < U300_DMA_CHANNELS; i++)
- if (started_channels & (1 << i))
+ for (i = 0; i < U300_DMA_CHANNELS; i++) {
+ if (started_channels & (1ULL << i))
tmp += sprintf(tmp, "channel %d\n", i);
+ }
tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
@@ -1553,15 +1554,8 @@ coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
static struct coh901318_desc *
coh901318_first_active_get(struct coh901318_chan *cohc)
{
- struct coh901318_desc *d;
-
- if (list_empty(&cohc->active))
- return NULL;
-
- d = list_first_entry(&cohc->active,
- struct coh901318_desc,
- node);
- return d;
+ return list_first_entry_or_null(&cohc->active, struct coh901318_desc,
+ node);
}
static void
@@ -1579,15 +1573,8 @@ coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc)
static struct coh901318_desc *
coh901318_first_queued(struct coh901318_chan *cohc)
{
- struct coh901318_desc *d;
-
- if (list_empty(&cohc->queue))
- return NULL;
-
- d = list_first_entry(&cohc->queue,
- struct coh901318_desc,
- node);
- return d;
+ return list_first_entry_or_null(&cohc->queue, struct coh901318_desc,
+ node);
}
static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli)
@@ -1766,7 +1753,7 @@ static int coh901318_resume(struct dma_chan *chan)
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
- unsigned int ch_nr = (unsigned int) chan_id;
+ unsigned long ch_nr = (unsigned long) chan_id;
if (ch_nr == to_coh901318_chan(chan)->id)
return true;
@@ -1888,8 +1875,7 @@ static void dma_tasklet(unsigned long data)
struct coh901318_chan *cohc = (struct coh901318_chan *) data;
struct coh901318_desc *cohd_fin;
unsigned long flags;
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
" nbr_active_done %ld\n", __func__,
@@ -1904,8 +1890,7 @@ static void dma_tasklet(unsigned long data)
goto err;
/* locate callback to client */
- callback = cohd_fin->desc.callback;
- callback_param = cohd_fin->desc.callback_param;
+ dmaengine_desc_get_callback(&cohd_fin->desc, &cb);
/* sign this job as completed on the channel */
dma_cookie_complete(&cohd_fin->desc);
@@ -1920,8 +1905,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&cohc->lock, flags);
/* Call the callback when we're done */
- if (callback)
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&cohc->lock, flags);
@@ -2247,8 +2231,8 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
spin_lock_irqsave(&cohc->lock, flg);
dev_vdbg(COHC_2_DEV(cohc),
- "[%s] channel %d src 0x%x dest 0x%x size %d\n",
- __func__, cohc->id, src, dest, size);
+ "[%s] channel %d src 0x%pad dest 0x%pad size %zu\n",
+ __func__, cohc->id, &src, &dest, size);
if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last lli */
@@ -2744,8 +2728,8 @@ static int __init coh901318_probe(struct platform_device *pdev)
goto err_register_of_dma;
platform_set_drvdata(pdev, base);
- dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
- (u32) base->virtbase);
+ dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%p\n",
+ base->virtbase);
return err;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 702112d547c8..d612b2e5abc4 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -75,7 +75,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
lli = head;
lli->phy_this = phy;
lli->link_addr = 0x00000000;
- lli->virt_link_addr = 0x00000000U;
+ lli->virt_link_addr = NULL;
for (i = 1; i < len; i++) {
lli_prev = lli;
@@ -88,7 +88,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
DEBUGFS_POOL_COUNTER_ADD(pool, 1);
lli->phy_this = phy;
lli->link_addr = 0x00000000;
- lli->virt_link_addr = 0x00000000U;
+ lli->virt_link_addr = NULL;
lli_prev->link_addr = phy;
lli_prev->virt_link_addr = lli;
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 4b2317426c8e..bac5f023013b 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -108,6 +108,8 @@ struct cppi41_channel {
unsigned td_queued:1;
unsigned td_seen:1;
unsigned td_desc_seen:1;
+
+ struct list_head node; /* Node for pending list */
};
struct cppi41_desc {
@@ -146,6 +148,9 @@ struct cppi41_dd {
const struct chan_queues *queues_tx;
struct chan_queues td_queue;
+ struct list_head pending; /* Pending queued transfers */
+ spinlock_t lock; /* Lock for pending list */
+
/* context for suspend/resume */
unsigned int dma_tdfdq;
};
@@ -331,7 +336,11 @@ static irqreturn_t cppi41_irq(int irq, void *data)
c->residue = pd_trans_len(c->desc->pd6) - len;
dma_cookie_complete(&c->txd);
- c->txd.callback(c->txd.callback_param);
+ dmaengine_desc_get_callback_invoke(&c->txd, NULL);
+
+ /* Paired with cppi41_dma_issue_pending */
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
}
}
return IRQ_HANDLED;
@@ -349,6 +358,12 @@ static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_dd *cdd = c->cdd;
+ int error;
+
+ error = pm_runtime_get_sync(cdd->ddev.dev);
+ if (error < 0)
+ return error;
dma_cookie_init(chan);
dma_async_tx_descriptor_init(&c->txd, chan);
@@ -357,11 +372,26 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
if (!c->is_tx)
cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
+
return 0;
}
static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_dd *cdd = c->cdd;
+ int error;
+
+ error = pm_runtime_get_sync(cdd->ddev.dev);
+ if (error < 0)
+ return;
+
+ WARN_ON(!list_empty(&cdd->pending));
+
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
}
static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
@@ -386,21 +416,6 @@ static void push_desc_queue(struct cppi41_channel *c)
u32 desc_phys;
u32 reg;
- desc_phys = lower_32_bits(c->desc_phys);
- desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
- WARN_ON(cdd->chan_busy[desc_num]);
- cdd->chan_busy[desc_num] = c;
-
- reg = (sizeof(struct cppi41_desc) - 24) / 4;
- reg |= desc_phys;
- cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
-}
-
-static void cppi41_dma_issue_pending(struct dma_chan *chan)
-{
- struct cppi41_channel *c = to_cpp41_chan(chan);
- u32 reg;
-
c->residue = 0;
reg = GCR_CHAN_ENABLE;
@@ -418,7 +433,46 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
* before starting the dma engine.
*/
__iowmb();
- push_desc_queue(c);
+
+ desc_phys = lower_32_bits(c->desc_phys);
+ desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ WARN_ON(cdd->chan_busy[desc_num]);
+ cdd->chan_busy[desc_num] = c;
+
+ reg = (sizeof(struct cppi41_desc) - 24) / 4;
+ reg |= desc_phys;
+ cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
+}
+
+static void pending_desc(struct cppi41_channel *c)
+{
+ struct cppi41_dd *cdd = c->cdd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdd->lock, flags);
+ list_add_tail(&c->node, &cdd->pending);
+ spin_unlock_irqrestore(&cdd->lock, flags);
+}
+
+static void cppi41_dma_issue_pending(struct dma_chan *chan)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_dd *cdd = c->cdd;
+ int error;
+
+ /* PM runtime paired with dmaengine_desc_get_callback_invoke */
+ error = pm_runtime_get(cdd->ddev.dev);
+ if ((error != -EINPROGRESS) && error < 0) {
+ dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
+ error);
+
+ return;
+ }
+
+ if (likely(pm_runtime_active(cdd->ddev.dev)))
+ push_desc_queue(c);
+ else
+ pending_desc(c);
}
static u32 get_host_pd0(u32 length)
@@ -940,12 +994,18 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->ctrl_mem = of_iomap(dev->of_node, 1);
cdd->sched_mem = of_iomap(dev->of_node, 2);
cdd->qmgr_mem = of_iomap(dev->of_node, 3);
+ spin_lock_init(&cdd->lock);
+ INIT_LIST_HEAD(&cdd->pending);
+
+ platform_set_drvdata(pdev, cdd);
if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
!cdd->qmgr_mem)
return -ENXIO;
pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 100);
+ pm_runtime_use_autosuspend(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_get_sync;
@@ -985,7 +1045,9 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (ret)
goto err_of;
- platform_set_drvdata(pdev, cdd);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
err_of:
dma_async_device_unregister(&cdd->ddev);
@@ -996,7 +1058,8 @@ err_irq:
err_chans:
deinit_cppi41(dev, cdd);
err_init_cppi:
- pm_runtime_put(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_put_sync(dev);
err_get_sync:
pm_runtime_disable(dev);
iounmap(cdd->usbss_mem);
@@ -1021,13 +1084,13 @@ static int cppi41_dma_remove(struct platform_device *pdev)
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
iounmap(cdd->qmgr_mem);
- pm_runtime_put(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int cppi41_suspend(struct device *dev)
+static int __maybe_unused cppi41_suspend(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
@@ -1038,7 +1101,7 @@ static int cppi41_suspend(struct device *dev)
return 0;
}
-static int cppi41_resume(struct device *dev)
+static int __maybe_unused cppi41_resume(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
struct cppi41_channel *c;
@@ -1062,9 +1125,38 @@ static int cppi41_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
+static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
+{
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+
+ WARN_ON(!list_empty(&cdd->pending));
+
+ return 0;
+}
+
+static int __maybe_unused cppi41_runtime_resume(struct device *dev)
+{
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+ struct cppi41_channel *c, *_c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdd->lock, flags);
+ list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+ push_desc_queue(c);
+ list_del(&c->node);
+ }
+ spin_unlock_irqrestore(&cdd->lock, flags);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cppi41_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume)
+ SET_RUNTIME_PM_OPS(cppi41_runtime_suspend,
+ cppi41_runtime_resume,
+ NULL)
+};
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 9689b36c005a..d50273fed715 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -21,8 +21,6 @@
#include <linux/irq.h>
#include <linux/clk.h>
-#include <asm/mach-jz4740/dma.h>
-
#include "virt-dma.h"
#define JZ_DMA_NR_CHANS 6
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index dade7c47ff18..7373b7a555ec 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -324,8 +324,10 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
sg_dma_address(&sgl[i]),
sg_dma_len(&sgl[i]),
direction);
- if (err < 0)
+ if (err < 0) {
+ jz4780_dma_desc_free(&jzchan->desc->vdesc);
return NULL;
+ }
desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
@@ -368,8 +370,10 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
for (i = 0; i < periods; i++) {
err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
period_len, direction);
- if (err < 0)
+ if (err < 0) {
+ jz4780_dma_desc_free(&jzchan->desc->vdesc);
return NULL;
+ }
buf_addr += period_len;
@@ -396,7 +400,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
}
-struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
+static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8c9f45fd55fc..6b535262ac5d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -997,6 +997,13 @@ int dma_async_device_register(struct dma_device *device)
}
chan->client_count = 0;
}
+
+ if (!chancnt) {
+ dev_err(device->dev, "%s: device has no channels!\n", __func__);
+ rc = -ENODEV;
+ goto err_out;
+ }
+
device->chancnt = chancnt;
mutex_lock(&dma_list_mutex);
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index 17f983a4e9ba..882ff9448c3b 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -86,4 +86,88 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
state->residue = residue;
}
+struct dmaengine_desc_callback {
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+};
+
+/**
+ * dmaengine_desc_get_callback - get the passed in callback function
+ * @tx: tx descriptor
+ * @cb: temp struct to hold the callback info
+ *
+ * Fill the passed in cb struct with what's available in the passed in
+ * tx descriptor struct
+ * No locking is required.
+ */
+static inline void
+dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_desc_callback *cb)
+{
+ cb->callback = tx->callback;
+ cb->callback_result = tx->callback_result;
+ cb->callback_param = tx->callback_param;
+}
+
+/**
+ * dmaengine_desc_callback_invoke - call the callback function in cb struct
+ * @cb: temp struct that is holding the callback info
+ * @result: transaction result
+ *
+ * Call the callback function provided in the cb struct with the parameter
+ * in the cb struct.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_result dummy_result = {
+ .result = DMA_TRANS_NOERROR,
+ .residue = 0
+ };
+
+ if (cb->callback_result) {
+ if (!result)
+ result = &dummy_result;
+ cb->callback_result(cb->callback_param, result);
+ } else if (cb->callback) {
+ cb->callback(cb->callback_param);
+ }
+}
+
+/**
+ * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
+ * then immediately call the callback.
+ * @tx: dma async tx descriptor
+ * @result: transaction result
+ *
+ * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
+ * in a single function since no work is necessary in between for the driver.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_desc_callback cb;
+
+ dmaengine_desc_get_callback(tx, &cb);
+ dmaengine_desc_callback_invoke(&cb, result);
+}
+
+/**
+ * dmaengine_desc_callback_valid - verify the callback is valid in cb
+ * @cb: callback info struct
+ *
+ * Return a bool that verifies whether callback in cb is valid or not.
+ * No locking is required.
+ */
+static inline bool
+dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
+{
+ return (cb->callback) ? true : false;
+}
+
#endif
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 1245db5438e1..cf76fc6149e5 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -56,10 +56,10 @@ module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sg_buffers,
"Number of scatter gather buffers (default: 1)");
-static unsigned int dmatest = 1;
+static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dmatest,
- "dmatest 0-memcpy 1-slave_sg (default: 1)");
+ "dmatest 0-memcpy 1-slave_sg (default: 0)");
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
@@ -426,7 +426,9 @@ static int dmatest_func(void *data)
int src_cnt;
int dst_cnt;
int i;
- ktime_t ktime;
+ ktime_t ktime, start, diff;
+ ktime_t filltime = ktime_set(0, 0);
+ ktime_t comparetime = ktime_set(0, 0);
s64 runtime = 0;
unsigned long long total_len = 0;
@@ -503,7 +505,7 @@ static int dmatest_func(void *data)
total_tests++;
/* honor alignment restrictions */
- if (thread->type == DMA_MEMCPY)
+ if (thread->type == DMA_MEMCPY || thread->type == DMA_SG)
align = dev->copy_align;
else if (thread->type == DMA_XOR)
align = dev->xor_align;
@@ -531,6 +533,7 @@ static int dmatest_func(void *data)
src_off = 0;
dst_off = 0;
} else {
+ start = ktime_get();
src_off = dmatest_random() % (params->buf_size - len + 1);
dst_off = dmatest_random() % (params->buf_size - len + 1);
@@ -541,6 +544,9 @@ static int dmatest_func(void *data)
params->buf_size);
dmatest_init_dsts(thread->dsts, dst_off, len,
params->buf_size);
+
+ diff = ktime_sub(ktime_get(), start);
+ filltime = ktime_add(filltime, diff);
}
um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
@@ -683,6 +689,7 @@ static int dmatest_func(void *data)
continue;
}
+ start = ktime_get();
pr_debug("%s: verifying source buffer...\n", current->comm);
error_count = dmatest_verify(thread->srcs, 0, src_off,
0, PATTERN_SRC, true);
@@ -703,6 +710,9 @@ static int dmatest_func(void *data)
params->buf_size, dst_off + len,
PATTERN_DST, false);
+ diff = ktime_sub(ktime_get(), start);
+ comparetime = ktime_add(comparetime, diff);
+
if (error_count) {
result("data error", total_tests, src_off, dst_off,
len, error_count);
@@ -712,7 +722,10 @@ static int dmatest_func(void *data)
dst_off, len, 0);
}
}
- runtime = ktime_us_delta(ktime_get(), ktime);
+ ktime = ktime_sub(ktime_get(), ktime);
+ ktime = ktime_sub(ktime, comparetime);
+ ktime = ktime_sub(ktime, filltime);
+ runtime = ktime_to_us(ktime);
ret = 0;
err_dstbuf:
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index edf053f73a49..c2c0a613cb7a 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -46,9 +46,9 @@
u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
DW_DMA_MSIZE_16; \
u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \
- _dwc->p_master : _dwc->m_master; \
+ _dwc->dws.p_master : _dwc->dws.m_master; \
u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \
- _dwc->p_master : _dwc->m_master; \
+ _dwc->dws.p_master : _dwc->dws.m_master; \
\
(DWC_CTLL_DST_MSIZE(_dmsize) \
| DWC_CTLL_SRC_MSIZE(_smsize) \
@@ -143,12 +143,16 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+ bool hs_polarity = dwc->dws.hs_polarity;
if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
return;
- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+ cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
+
+ /* Set polarity of handshake interface */
+ cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
@@ -209,7 +213,7 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- u8 lms = DWC_LLP_LMS(dwc->m_master);
+ u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
unsigned long was_soft_llp;
/* ASSERT: channel is idle */
@@ -270,20 +274,19 @@ static void
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
bool callback_required)
{
- dma_async_tx_callback callback = NULL;
- void *param = NULL;
struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;
unsigned long flags;
+ struct dmaengine_desc_callback cb;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
spin_lock_irqsave(&dwc->lock, flags);
dma_cookie_complete(txd);
- if (callback_required) {
- callback = txd->callback;
- param = txd->callback_param;
- }
+ if (callback_required)
+ dmaengine_desc_get_callback(txd, &cb);
+ else
+ memset(&cb, 0, sizeof(cb));
/* async_tx_ack */
list_for_each_entry(child, &desc->tx_list, desc_node)
@@ -292,8 +295,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
dwc_desc_put(dwc, desc);
spin_unlock_irqrestore(&dwc->lock, flags);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -662,7 +664,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
struct dw_desc *prev;
size_t xfer_count;
size_t offset;
- u8 m_master = dwc->m_master;
+ u8 m_master = dwc->dws.m_master;
unsigned int src_width;
unsigned int dst_width;
unsigned int data_width = dw->pdata->data_width[m_master];
@@ -740,7 +742,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dw_desc *prev;
struct dw_desc *first;
u32 ctllo;
- u8 m_master = dwc->m_master;
+ u8 m_master = dwc->dws.m_master;
u8 lms = DWC_LLP_LMS(m_master);
dma_addr_t reg;
unsigned int reg_width;
@@ -895,12 +897,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
return false;
/* We have to copy data since dws can be temporary storage */
-
- dwc->src_id = dws->src_id;
- dwc->dst_id = dws->dst_id;
-
- dwc->m_master = dws->m_master;
- dwc->p_master = dws->p_master;
+ memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
return true;
}
@@ -1167,11 +1164,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
/* Clear custom channel configuration */
- dwc->src_id = 0;
- dwc->dst_id = 0;
-
- dwc->m_master = 0;
- dwc->p_master = 0;
+ memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
@@ -1264,7 +1257,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
struct dw_cyclic_desc *retval = NULL;
struct dw_desc *desc;
struct dw_desc *last = NULL;
- u8 lms = DWC_LLP_LMS(dwc->m_master);
+ u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
unsigned long was_cyclic;
unsigned int reg_width;
unsigned int periods;
@@ -1576,11 +1569,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
} else {
dwc->block_size = pdata->block_size;
-
- /* Check if channel supports multi block transfer */
- channel_writel(dwc, LLP, DWC_LLP_LOC(0xffffffff));
- dwc->nollp = DWC_LLP_LOC(channel_readl(dwc, LLP)) == 0;
- channel_writel(dwc, LLP, 0);
+ dwc->nollp = pdata->is_nollp;
}
}
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 4b7bd7834046..f65dd104479f 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -245,10 +245,7 @@ struct dw_dma_chan {
bool nollp;
/* custom slave configuration */
- u8 src_id;
- u8 dst_id;
- u8 m_master;
- u8 p_master;
+ struct dw_dma_slave dws;
/* configuration passed via .device_config */
struct dma_slave_config dma_sconfig;
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3d277fa76c1a..e18a58068bca 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -263,22 +263,29 @@ static const struct edmacc_param dummy_paramset = {
#define EDMA_BINDING_LEGACY 0
#define EDMA_BINDING_TPCC 1
+static const u32 edma_binding_type[] = {
+ [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
+ [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
+};
+
static const struct of_device_id edma_of_ids[] = {
{
.compatible = "ti,edma3",
- .data = (void *)EDMA_BINDING_LEGACY,
+ .data = &edma_binding_type[EDMA_BINDING_LEGACY],
},
{
.compatible = "ti,edma3-tpcc",
- .data = (void *)EDMA_BINDING_TPCC,
+ .data = &edma_binding_type[EDMA_BINDING_TPCC],
},
{}
};
+MODULE_DEVICE_TABLE(of, edma_of_ids);
static const struct of_device_id edma_tptc_of_ids[] = {
{ .compatible = "ti,edma3-tptc", },
{}
};
+MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
{
@@ -405,18 +412,12 @@ static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
}
-static inline void set_bits(int offset, int len, unsigned long *p)
+static inline void edma_set_bits(int offset, int len, unsigned long *p)
{
for (; len > 0; len--)
set_bit(offset + (len - 1), p);
}
-static inline void clear_bits(int offset, int len, unsigned long *p)
-{
- for (; len > 0; len--)
- clear_bit(offset + (len - 1), p);
-}
-
static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
int priority)
{
@@ -464,13 +465,15 @@ static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
}
-static void edma_read_slot(struct edma_cc *ecc, unsigned slot,
+static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
struct edmacc_param *param)
{
slot = EDMA_CHAN_SLOT(slot);
if (slot >= ecc->num_slots)
- return;
+ return -EINVAL;
memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
+
+ return 0;
}
/**
@@ -1476,13 +1479,15 @@ static void edma_error_handler(struct edma_chan *echan)
struct edma_cc *ecc = echan->ecc;
struct device *dev = echan->vchan.chan.device->dev;
struct edmacc_param p;
+ int err;
if (!echan->edesc)
return;
spin_lock(&echan->vchan.lock);
- edma_read_slot(ecc, echan->slot[0], &p);
+ err = edma_read_slot(ecc, echan->slot[0], &p);
+
/*
* Issue later based on missed flag which will be sure
* to happen as:
@@ -1495,7 +1500,7 @@ static void edma_error_handler(struct edma_chan *echan)
* lead to some nasty recursion when we are in a NULL
* slot. So we avoid doing so and set the missed flag.
*/
- if (p.a_b_cnt == 0 && p.ccnt == 0) {
+ if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
dev_dbg(dev, "Error on null slot, setting miss\n");
echan->missed = 1;
} else {
@@ -2019,8 +2024,7 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
{
struct edma_soc_info *info;
struct property *prop;
- size_t sz;
- int ret;
+ int sz, ret;
info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
if (!info)
@@ -2182,7 +2186,7 @@ static int edma_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(edma_of_ids, node);
- if (match && (u32)match->data == EDMA_BINDING_TPCC)
+ if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
legacy_mode = false;
info = edma_setup_info_from_dt(dev, legacy_mode);
@@ -2260,7 +2264,7 @@ static int edma_probe(struct platform_device *pdev)
for (i = 0; rsv_slots[i][0] != -1; i++) {
off = rsv_slots[i][0];
ln = rsv_slots[i][1];
- set_bits(off, ln, ecc->slot_inuse);
+ edma_set_bits(off, ln, ecc->slot_inuse);
}
}
}
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 21f08cc3352b..d37e8dda8079 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -262,10 +262,8 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
static struct ep93xx_dma_desc *
ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
{
- if (list_empty(&edmac->active))
- return NULL;
-
- return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
+ return list_first_entry_or_null(&edmac->active,
+ struct ep93xx_dma_desc, node);
}
/**
@@ -739,10 +737,10 @@ static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
struct ep93xx_dma_desc *desc, *d;
- dma_async_tx_callback callback = NULL;
- void *callback_param = NULL;
+ struct dmaengine_desc_callback cb;
LIST_HEAD(list);
+ memset(&cb, 0, sizeof(cb));
spin_lock_irq(&edmac->lock);
/*
* If dma_terminate_all() was called before we get to run, the active
@@ -757,8 +755,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
dma_cookie_complete(&desc->txd);
list_splice_init(&edmac->active, &list);
}
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ dmaengine_desc_get_callback(&desc->txd, &cb);
}
spin_unlock_irq(&edmac->lock);
@@ -771,8 +768,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
ep93xx_dma_desc_put(edmac, desc);
}
- if (callback)
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
@@ -1047,11 +1043,11 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
first = NULL;
for_each_sg(sgl, sg, sg_len, i) {
- size_t sg_len = sg_dma_len(sg);
+ size_t len = sg_dma_len(sg);
- if (sg_len > DMA_MAX_CHAN_BYTES) {
- dev_warn(chan2dev(edmac), "too big transfer size %d\n",
- sg_len);
+ if (len > DMA_MAX_CHAN_BYTES) {
+ dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
+ len);
goto fail;
}
@@ -1068,7 +1064,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc->src_addr = edmac->runtime_addr;
desc->dst_addr = sg_dma_address(sg);
}
- desc->size = sg_len;
+ desc->size = len;
if (!first)
first = desc;
@@ -1125,7 +1121,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
}
if (period_len > DMA_MAX_CHAN_BYTES) {
- dev_warn(chan2dev(edmac), "too big period length %d\n",
+ dev_warn(chan2dev(edmac), "too big period length %zu\n",
period_len);
return NULL;
}
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index de2a2a2b1d75..db2f9e1653a2 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -134,17 +134,9 @@ static void fsl_re_issue_pending(struct dma_chan *chan)
static void fsl_re_desc_done(struct fsl_re_desc *desc)
{
- dma_async_tx_callback callback;
- void *callback_param;
-
dma_cookie_complete(&desc->async_tx);
-
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
- if (callback)
- callback(callback_param);
-
dma_descriptor_unmap(&desc->async_tx);
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
}
static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)
@@ -670,7 +662,7 @@ static int fsl_re_chan_probe(struct platform_device *ofdev,
/* read irq property from dts */
chan->irq = irq_of_parse_and_map(np, 0);
- if (chan->irq == NO_IRQ) {
+ if (!chan->irq) {
dev_err(dev, "No IRQ defined for JR %d\n", q);
ret = -ENODEV;
goto err_free;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 911b7177eb50..51c75bf2b9b6 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -516,13 +516,9 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
if (txd->cookie > 0) {
ret = txd->cookie;
- /* Run the link descriptor callback function */
- if (txd->callback) {
- chan_dbg(chan, "LD %p callback\n", desc);
- txd->callback(txd->callback_param);
- }
-
dma_descriptor_unmap(txd);
+ /* Run the link descriptor callback function */
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
/* Run any dependencies */
@@ -1153,7 +1149,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev)
struct fsldma_chan *chan;
int i;
- if (fdev->irq != NO_IRQ) {
+ if (fdev->irq) {
dev_dbg(fdev->dev, "free per-controller IRQ\n");
free_irq(fdev->irq, fdev);
return;
@@ -1161,7 +1157,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev)
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
chan = fdev->chan[i];
- if (chan && chan->irq != NO_IRQ) {
+ if (chan && chan->irq) {
chan_dbg(chan, "free per-channel IRQ\n");
free_irq(chan->irq, chan);
}
@@ -1175,7 +1171,7 @@ static int fsldma_request_irqs(struct fsldma_device *fdev)
int i;
/* if we have a per-controller IRQ, use that */
- if (fdev->irq != NO_IRQ) {
+ if (fdev->irq) {
dev_dbg(fdev->dev, "request per-controller IRQ\n");
ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
"fsldma-controller", fdev);
@@ -1188,7 +1184,7 @@ static int fsldma_request_irqs(struct fsldma_device *fdev)
if (!chan)
continue;
- if (chan->irq == NO_IRQ) {
+ if (!chan->irq) {
chan_err(chan, "interrupts property missing in device tree\n");
ret = -ENODEV;
goto out_unwind;
@@ -1211,7 +1207,7 @@ out_unwind:
if (!chan)
continue;
- if (chan->irq == NO_IRQ)
+ if (!chan->irq)
continue;
free_irq(chan->irq, chan);
@@ -1311,7 +1307,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
list_add_tail(&chan->common.device_node, &fdev->common.channels);
dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
- chan->irq != NO_IRQ ? chan->irq : fdev->irq);
+ chan->irq ? chan->irq : fdev->irq);
return 0;
@@ -1351,7 +1347,7 @@ static int fsldma_of_probe(struct platform_device *op)
if (!fdev->regs) {
dev_err(&op->dev, "unable to ioremap registers\n");
err = -ENOMEM;
- goto out_free_fdev;
+ goto out_free;
}
/* map the channel IRQ if it exists, but don't hookup the handler yet */
@@ -1416,6 +1412,8 @@ static int fsldma_of_probe(struct platform_device *op)
out_free_fdev:
irq_dispose_mapping(fdev->irq);
+ iounmap(fdev->regs);
+out_free:
kfree(fdev);
out_return:
return err;
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index c5f21efd6090..29d04ca71d52 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -200,10 +200,9 @@ EXPORT_SYMBOL_GPL(hsu_dma_get_status);
* is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
*
* Return:
- * IRQ_NONE for invalid channel number, IRQ_HANDLED otherwise.
+ * 0 for invalid channel number, 1 otherwise.
*/
-irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
- u32 status)
+int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
{
struct hsu_dma_chan *hsuc;
struct hsu_dma_desc *desc;
@@ -211,7 +210,7 @@ irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
/* Sanity check */
if (nr >= chip->hsu->nr_channels)
- return IRQ_NONE;
+ return 0;
hsuc = &chip->hsu->chan[nr];
@@ -230,7 +229,7 @@ irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
}
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
- return IRQ_HANDLED;
+ return 1;
}
EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index 9916058531d9..b51639f045ed 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -29,7 +29,7 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev)
u32 dmaisr;
u32 status;
unsigned short i;
- irqreturn_t ret = IRQ_NONE;
+ int ret = 0;
int err;
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
@@ -37,14 +37,14 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev)
if (dmaisr & 0x1) {
err = hsu_dma_get_status(chip, i, &status);
if (err > 0)
- ret |= IRQ_HANDLED;
+ ret |= 1;
else if (err == 0)
ret |= hsu_dma_do_irq(chip, i, status);
}
dmaisr >>= 1;
}
- return ret;
+ return IRQ_RETVAL(ret);
}
static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a960608c0a4d..ab0fb804fb1e 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -663,9 +663,7 @@ static void imxdma_tasklet(unsigned long data)
out:
spin_unlock_irqrestore(&imxdma->lock, flags);
- if (desc->desc.callback)
- desc->desc.callback(desc->desc.callback_param);
-
+ dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
}
static int imxdma_terminate_all(struct dma_chan *chan)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 03ec76fc22ff..b9629b2bfc05 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -184,7 +184,7 @@
struct sdma_mode_count {
u32 count : 16; /* size of the buffer pointed by this BD */
u32 status : 8; /* E,R,I,C,W,D status bits stored here */
- u32 command : 8; /* command mostlky used for channel 0 */
+ u32 command : 8; /* command mostly used for channel 0 */
};
/*
@@ -479,6 +479,24 @@ static struct sdma_driver_data sdma_imx6q = {
.script_addrs = &sdma_script_imx6q,
};
+static struct sdma_script_start_addrs sdma_script_imx7d = {
+ .ap_2_ap_addr = 644,
+ .uart_2_mcu_addr = 819,
+ .mcu_2_app_addr = 749,
+ .uartsh_2_mcu_addr = 1034,
+ .mcu_2_shp_addr = 962,
+ .app_2_mcu_addr = 685,
+ .shp_2_mcu_addr = 893,
+ .spdif_2_mcu_addr = 1102,
+ .mcu_2_spdif_addr = 1136,
+};
+
+static struct sdma_driver_data sdma_imx7d = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx7d,
+};
+
static const struct platform_device_id sdma_devtypes[] = {
{
.name = "imx25-sdma",
@@ -499,6 +517,9 @@ static const struct platform_device_id sdma_devtypes[] = {
.name = "imx6q-sdma",
.driver_data = (unsigned long)&sdma_imx6q,
}, {
+ .name = "imx7d-sdma",
+ .driver_data = (unsigned long)&sdma_imx7d,
+ }, {
/* sentinel */
}
};
@@ -511,6 +532,7 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
+ { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
@@ -648,15 +670,11 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
writel_relaxed(val, sdma->regs + chnenbl);
}
-static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
-{
- if (sdmac->desc.callback)
- sdmac->desc.callback(sdmac->desc.callback_param);
-}
-
static void sdma_update_channel_loop(struct sdma_channel *sdmac)
{
struct sdma_buffer_descriptor *bd;
+ int error = 0;
+ enum dma_status old_status = sdmac->status;
/*
* loop mode. Iterate over descriptors, re-setup them and
@@ -668,17 +686,41 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
if (bd->mode.status & BD_DONE)
break;
- if (bd->mode.status & BD_RROR)
+ if (bd->mode.status & BD_RROR) {
+ bd->mode.status &= ~BD_RROR;
sdmac->status = DMA_ERROR;
+ error = -EIO;
+ }
+ /*
+ * We use bd->mode.count to calculate the residue, since contains
+ * the number of bytes present in the current buffer descriptor.
+ */
+
+ sdmac->chn_real_count = bd->mode.count;
bd->mode.status |= BD_DONE;
+ bd->mode.count = sdmac->period_len;
+
+ /*
+ * The callback is called from the interrupt context in order
+ * to reduce latency and to avoid the risk of altering the
+ * SDMA transaction status by the time the client tasklet is
+ * executed.
+ */
+
+ dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
+
sdmac->buf_tail++;
sdmac->buf_tail %= sdmac->num_bd;
+
+ if (error)
+ sdmac->status = old_status;
}
}
-static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
+static void mxc_sdma_handle_channel_normal(unsigned long data)
{
+ struct sdma_channel *sdmac = (struct sdma_channel *) data;
struct sdma_buffer_descriptor *bd;
int i, error = 0;
@@ -701,18 +743,8 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
sdmac->status = DMA_COMPLETE;
dma_cookie_complete(&sdmac->desc);
- if (sdmac->desc.callback)
- sdmac->desc.callback(sdmac->desc.callback_param);
-}
-
-static void sdma_tasklet(unsigned long data)
-{
- struct sdma_channel *sdmac = (struct sdma_channel *) data;
- if (sdmac->flags & IMX_DMA_SG_LOOP)
- sdma_handle_channel_loop(sdmac);
- else
- mxc_sdma_handle_channel_normal(sdmac);
+ dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
}
static irqreturn_t sdma_int_handler(int irq, void *dev_id)
@@ -731,8 +763,8 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
if (sdmac->flags & IMX_DMA_SG_LOOP)
sdma_update_channel_loop(sdmac);
-
- tasklet_schedule(&sdmac->tasklet);
+ else
+ tasklet_schedule(&sdmac->tasklet);
__clear_bit(channel, &stat);
}
@@ -1353,7 +1385,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
u32 residue;
if (sdmac->flags & IMX_DMA_SG_LOOP)
- residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
+ residue = (sdmac->num_bd - sdmac->buf_tail) *
+ sdmac->period_len - sdmac->chn_real_count;
else
residue = sdmac->chn_count - sdmac->chn_real_count;
@@ -1375,6 +1408,7 @@ static void sdma_issue_pending(struct dma_chan *chan)
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42
static void sdma_add_scripts(struct sdma_engine *sdma,
const struct sdma_script_start_addrs *addr)
@@ -1424,6 +1458,9 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
case 3:
sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
break;
+ case 4:
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
+ break;
default:
dev_err(sdma->dev, "unknown firmware version\n");
goto err_firmware;
@@ -1732,7 +1769,7 @@ static int sdma_probe(struct platform_device *pdev)
dma_cookie_init(&sdmac->chan);
sdmac->channel = i;
- tasklet_init(&sdmac->tasklet, sdma_tasklet,
+ tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
(unsigned long) sdmac);
/*
* Add the channel to the DMAC list. Do not add channel 0 though
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index bd09961443b1..49386ce04bf5 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -38,8 +38,54 @@
#include "../dmaengine.h"
+static char *chanerr_str[] = {
+ "DMA Transfer Destination Address Error",
+ "Next Descriptor Address Error",
+ "Descriptor Error",
+ "Chan Address Value Error",
+ "CHANCMD Error",
+ "Chipset Uncorrectable Data Integrity Error",
+ "DMA Uncorrectable Data Integrity Error",
+ "Read Data Error",
+ "Write Data Error",
+ "Descriptor Control Error",
+ "Descriptor Transfer Size Error",
+ "Completion Address Error",
+ "Interrupt Configuration Error",
+ "Super extended descriptor Address Error",
+ "Unaffiliated Error",
+ "CRC or XOR P Error",
+ "XOR Q Error",
+ "Descriptor Count Error",
+ "DIF All F detect Error",
+ "Guard Tag verification Error",
+ "Application Tag verification Error",
+ "Reference Tag verification Error",
+ "Bundle Bit Error",
+ "Result DIF All F detect Error",
+ "Result Guard Tag verification Error",
+ "Result Application Tag verification Error",
+ "Result Reference Tag verification Error",
+ NULL
+};
+
static void ioat_eh(struct ioatdma_chan *ioat_chan);
+static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ if ((chanerr >> i) & 1) {
+ if (chanerr_str[i]) {
+ dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
+ i, chanerr_str[i]);
+ } else
+ break;
+ }
+ }
+}
+
/**
* ioat_dma_do_interrupt - handler used for single vector interrupt mode
* @irq: interrupt id
@@ -568,12 +614,14 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
tx = &desc->txd;
if (tx->cookie) {
+ struct dmaengine_result res;
+
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
+ res.result = DMA_TRANS_NOERROR;
+ dmaengine_desc_get_callback_invoke(tx, NULL);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
}
if (tx->phys == phys_complete)
@@ -622,7 +670,8 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
if (is_ioat_halted(*ioat_chan->completion)) {
u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
+ if (chanerr &
+ (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
ioat_eh(ioat_chan);
}
@@ -652,6 +701,61 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
__ioat_restart_chan(ioat_chan);
}
+
+static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
+{
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+ struct ioat_ring_ent *desc;
+ u16 active;
+ int idx = ioat_chan->tail, i;
+
+ /*
+ * We assume that the failed descriptor has been processed.
+ * Now we are just returning all the remaining submitted
+ * descriptors to abort.
+ */
+ active = ioat_ring_active(ioat_chan);
+
+ /* we skip the failed descriptor that tail points to */
+ for (i = 1; i < active; i++) {
+ struct dma_async_tx_descriptor *tx;
+
+ smp_read_barrier_depends();
+ prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
+ desc = ioat_get_ring_ent(ioat_chan, idx + i);
+
+ tx = &desc->txd;
+ if (tx->cookie) {
+ struct dmaengine_result res;
+
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ res.result = DMA_TRANS_ABORTED;
+ dmaengine_desc_get_callback_invoke(tx, &res);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
+ }
+
+ /* skip extended descriptors */
+ if (desc_has_ext(desc)) {
+ WARN_ON(i + 1 >= active);
+ i++;
+ }
+
+ /* cleanup super extended descriptors */
+ if (desc->sed) {
+ ioat_free_sed(ioat_dma, desc->sed);
+ desc->sed = NULL;
+ }
+ }
+
+ smp_mb(); /* finish all descriptor reads before incrementing tail */
+ ioat_chan->tail = idx + active;
+
+ desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+ ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
+}
+
static void ioat_eh(struct ioatdma_chan *ioat_chan)
{
struct pci_dev *pdev = to_pdev(ioat_chan);
@@ -662,6 +766,8 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
u32 err_handled = 0;
u32 chanerr_int;
u32 chanerr;
+ bool abort = false;
+ struct dmaengine_result res;
/* cleanup so tail points to descriptor that caused the error */
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
@@ -697,30 +803,55 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
break;
}
+ if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
+ if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
+ res.result = DMA_TRANS_READ_FAILED;
+ err_handled |= IOAT_CHANERR_READ_DATA_ERR;
+ } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
+ res.result = DMA_TRANS_WRITE_FAILED;
+ err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
+ }
+
+ abort = true;
+ } else
+ res.result = DMA_TRANS_NOERROR;
+
/* fault on unhandled error or spurious halt */
if (chanerr ^ err_handled || chanerr == 0) {
dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
__func__, chanerr, err_handled);
+ dev_err(to_dev(ioat_chan), "Errors handled:\n");
+ ioat_print_chanerrs(ioat_chan, err_handled);
+ dev_err(to_dev(ioat_chan), "Errors not handled:\n");
+ ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
+
BUG();
- } else { /* cleanup the faulty descriptor */
- tx = &desc->txd;
- if (tx->cookie) {
- dma_cookie_complete(tx);
- dma_descriptor_unmap(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
- }
}
- writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+ /* cleanup the faulty descriptor since we are continuing */
+ tx = &desc->txd;
+ if (tx->cookie) {
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ dmaengine_desc_get_callback_invoke(tx, &res);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
+ }
/* mark faulting descriptor as complete */
*ioat_chan->completion = desc->txd.phys;
spin_lock_bh(&ioat_chan->prep_lock);
+ /* we need abort all descriptors */
+ if (abort) {
+ ioat_abort_descs(ioat_chan);
+ /* clean up the channel, we could be in weird state */
+ ioat_reset_hw(ioat_chan);
+ }
+
+ writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+
ioat_restart_channel(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock);
}
@@ -753,10 +884,28 @@ void ioat_timer_event(unsigned long data)
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
__func__, chanerr);
- if (test_bit(IOAT_RUN, &ioat_chan->state))
- BUG_ON(is_ioat_bug(chanerr));
- else /* we never got off the ground */
- return;
+ dev_err(to_dev(ioat_chan), "Errors:\n");
+ ioat_print_chanerrs(ioat_chan, chanerr);
+
+ if (test_bit(IOAT_RUN, &ioat_chan->state)) {
+ spin_lock_bh(&ioat_chan->cleanup_lock);
+ spin_lock_bh(&ioat_chan->prep_lock);
+ set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+
+ ioat_abort_descs(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Reset channel...\n");
+ ioat_reset_hw(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Restart channel...\n");
+ ioat_restart_channel(ioat_chan);
+
+ spin_lock_bh(&ioat_chan->prep_lock);
+ clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
+ }
+
+ return;
}
spin_lock_bh(&ioat_chan->cleanup_lock);
@@ -780,14 +929,26 @@ void ioat_timer_event(unsigned long data)
u32 chanerr;
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
- dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
- status, chanerr);
- dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
- ioat_ring_active(ioat_chan));
+ dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
+ status, chanerr);
+ dev_err(to_dev(ioat_chan), "Errors:\n");
+ ioat_print_chanerrs(ioat_chan, chanerr);
+
+ dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
+ ioat_ring_active(ioat_chan));
spin_lock_bh(&ioat_chan->prep_lock);
+ set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+
+ ioat_abort_descs(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
+ ioat_reset_hw(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
ioat_restart_channel(ioat_chan);
+
+ spin_lock_bh(&ioat_chan->prep_lock);
+ clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
return;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 7145f7716a92..015f7110b96d 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -828,7 +828,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dest_dma))
- goto dma_unmap;
+ goto free_resources;
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dma_srcs[i] = DMA_ERROR_CODE;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 70534981a49b..48fa4cf9f64a 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -240,6 +240,8 @@
#define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000
#define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR)
+#define IOAT_CHANERR_RECOVER_MASK (IOAT_CHANERR_READ_DATA_ERR | \
+ IOAT_CHANERR_WRITE_DATA_ERR)
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index f039cfadf17b..a410657f7bcd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -71,8 +71,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
- if (tx->callback)
- tx->callback(tx->callback_param);
+ dmaengine_desc_get_callback_invoke(tx, NULL);
dma_descriptor_unmap(tx);
if (desc->group_head)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index b54f62de9232..ed76044ce4b9 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1160,11 +1160,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
struct scatterlist **sg, *sgnext, *sgnew = NULL;
/* Next transfer descriptor */
struct idmac_tx_desc *desc, *descnew;
- dma_async_tx_callback callback;
- void *callback_param;
bool done = false;
u32 ready0, ready1, curbuf, err;
unsigned long flags;
+ struct dmaengine_desc_callback cb;
/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
@@ -1278,12 +1277,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (likely(sgnew) &&
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
- callback = descnew->txd.callback;
- callback_param = descnew->txd.callback_param;
+ dmaengine_desc_get_callback(&descnew->txd, &cb);
+
list_del_init(&descnew->list);
spin_unlock(&ichan->lock);
- if (callback)
- callback(callback_param);
+
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock(&ichan->lock);
}
@@ -1292,13 +1291,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (done)
dma_cookie_complete(&desc->txd);
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ dmaengine_desc_get_callback(&desc->txd, &cb);
spin_unlock(&ichan->lock);
- if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback)
- callback(callback_param);
+ if (done && (desc->txd.flags & DMA_PREP_INTERRUPT))
+ dmaengine_desc_callback_invoke(&cb, NULL);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 2bf37e68ad0f..dd184b50e5b4 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -286,22 +286,21 @@ static void ipu_irq_handler(struct irq_desc *desc)
raw_spin_unlock(&bank_lock);
while ((line = ffs(status))) {
struct ipu_irq_map *map;
- unsigned int irq = NO_IRQ;
+ unsigned int irq;
line--;
status &= ~(1UL << line);
raw_spin_lock(&bank_lock);
map = src2map(32 * i + line);
- if (map)
- irq = map->irq;
- raw_spin_unlock(&bank_lock);
-
if (!map) {
+ raw_spin_unlock(&bank_lock);
pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
line, i);
continue;
}
+ irq = map->irq;
+ raw_spin_unlock(&bank_lock);
generic_handle_irq(irq);
}
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 39de8980128c..aabcb7934b05 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Linaro Ltd.
+ * Copyright (c) 2013 - 2015 Linaro Ltd.
* Copyright (c) 2013 Hisilicon Limited.
*
* This program is free software; you can redistribute it and/or modify
@@ -8,6 +8,8 @@
*/
#include <linux/sched.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -25,22 +27,28 @@
#define DRIVER_NAME "k3-dma"
#define DMA_MAX_SIZE 0x1ffc
+#define DMA_CYCLIC_MAX_PERIOD 0x1000
+#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
#define INT_STAT 0x00
#define INT_TC1 0x04
+#define INT_TC2 0x08
#define INT_ERR1 0x0c
#define INT_ERR2 0x10
#define INT_TC1_MASK 0x18
+#define INT_TC2_MASK 0x1c
#define INT_ERR1_MASK 0x20
#define INT_ERR2_MASK 0x24
#define INT_TC1_RAW 0x600
-#define INT_ERR1_RAW 0x608
-#define INT_ERR2_RAW 0x610
+#define INT_TC2_RAW 0x608
+#define INT_ERR1_RAW 0x610
+#define INT_ERR2_RAW 0x618
#define CH_PRI 0x688
#define CH_STAT 0x690
#define CX_CUR_CNT 0x704
#define CX_LLI 0x800
-#define CX_CNT 0x810
+#define CX_CNT1 0x80c
+#define CX_CNT0 0x810
#define CX_SRC 0x814
#define CX_DST 0x818
#define CX_CFG 0x81c
@@ -49,6 +57,7 @@
#define CX_LLI_CHAIN_EN 0x2
#define CX_CFG_EN 0x1
+#define CX_CFG_NODEIRQ BIT(1)
#define CX_CFG_MEM2PER (0x1 << 2)
#define CX_CFG_PER2MEM (0x2 << 2)
#define CX_CFG_SRCINCR (0x1 << 31)
@@ -68,7 +77,7 @@ struct k3_dma_desc_sw {
dma_addr_t desc_hw_lli;
size_t desc_num;
size_t size;
- struct k3_desc_hw desc_hw[0];
+ struct k3_desc_hw *desc_hw;
};
struct k3_dma_phy;
@@ -81,6 +90,7 @@ struct k3_dma_chan {
enum dma_transfer_direction dir;
dma_addr_t dev_addr;
enum dma_status status;
+ bool cyclic;
};
struct k3_dma_phy {
@@ -100,6 +110,7 @@ struct k3_dma_dev {
struct k3_dma_phy *phy;
struct k3_dma_chan *chans;
struct clk *clk;
+ struct dma_pool *pool;
u32 dma_channels;
u32 dma_requests;
unsigned int irq;
@@ -135,6 +146,7 @@ static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
val = 0x1 << phy->idx;
writel_relaxed(val, d->base + INT_TC1_RAW);
+ writel_relaxed(val, d->base + INT_TC2_RAW);
writel_relaxed(val, d->base + INT_ERR1_RAW);
writel_relaxed(val, d->base + INT_ERR2_RAW);
}
@@ -142,7 +154,7 @@ static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
{
writel_relaxed(hw->lli, phy->base + CX_LLI);
- writel_relaxed(hw->count, phy->base + CX_CNT);
+ writel_relaxed(hw->count, phy->base + CX_CNT0);
writel_relaxed(hw->saddr, phy->base + CX_SRC);
writel_relaxed(hw->daddr, phy->base + CX_DST);
writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
@@ -176,11 +188,13 @@ static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
/* unmask irq */
writel_relaxed(0xffff, d->base + INT_TC1_MASK);
+ writel_relaxed(0xffff, d->base + INT_TC2_MASK);
writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
} else {
/* mask irq */
writel_relaxed(0x0, d->base + INT_TC1_MASK);
+ writel_relaxed(0x0, d->base + INT_TC2_MASK);
writel_relaxed(0x0, d->base + INT_ERR1_MASK);
writel_relaxed(0x0, d->base + INT_ERR2_MASK);
}
@@ -193,22 +207,31 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
struct k3_dma_chan *c;
u32 stat = readl_relaxed(d->base + INT_STAT);
u32 tc1 = readl_relaxed(d->base + INT_TC1);
+ u32 tc2 = readl_relaxed(d->base + INT_TC2);
u32 err1 = readl_relaxed(d->base + INT_ERR1);
u32 err2 = readl_relaxed(d->base + INT_ERR2);
u32 i, irq_chan = 0;
while (stat) {
i = __ffs(stat);
- stat &= (stat - 1);
- if (likely(tc1 & BIT(i))) {
+ stat &= ~BIT(i);
+ if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
+ unsigned long flags;
+
p = &d->phy[i];
c = p->vchan;
- if (c) {
- unsigned long flags;
-
+ if (c && (tc1 & BIT(i))) {
spin_lock_irqsave(&c->vc.lock, flags);
vchan_cookie_complete(&p->ds_run->vd);
+ WARN_ON_ONCE(p->ds_done);
p->ds_done = p->ds_run;
+ p->ds_run = NULL;
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+ if (c && (tc2 & BIT(i))) {
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (p->ds_run != NULL)
+ vchan_cyclic_callback(&p->ds_run->vd);
spin_unlock_irqrestore(&c->vc.lock, flags);
}
irq_chan |= BIT(i);
@@ -218,14 +241,17 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
}
writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
+ writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
writel_relaxed(err1, d->base + INT_ERR1_RAW);
writel_relaxed(err2, d->base + INT_ERR2_RAW);
- if (irq_chan) {
+ if (irq_chan)
tasklet_schedule(&d->task);
+
+ if (irq_chan || err1 || err2)
return IRQ_HANDLED;
- } else
- return IRQ_NONE;
+
+ return IRQ_NONE;
}
static int k3_dma_start_txd(struct k3_dma_chan *c)
@@ -247,14 +273,14 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
* so vc->desc_issued only contains desc pending
*/
list_del(&ds->vd.node);
+
+ WARN_ON_ONCE(c->phy->ds_run);
+ WARN_ON_ONCE(c->phy->ds_done);
c->phy->ds_run = ds;
- c->phy->ds_done = NULL;
/* start dma */
k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
return 0;
}
- c->phy->ds_done = NULL;
- c->phy->ds_run = NULL;
return -EAGAIN;
}
@@ -351,7 +377,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
* its total size.
*/
vd = vchan_find_desc(&c->vc, cookie);
- if (vd) {
+ if (vd && !c->cyclic) {
bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
} else if ((!p) || (!p->ds_run)) {
bytes = 0;
@@ -361,7 +387,8 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
bytes = k3_dma_get_curr_cnt(d, p);
clli = k3_dma_get_curr_lli(p);
- index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
+ index = ((clli - ds->desc_hw_lli) /
+ sizeof(struct k3_desc_hw)) + 1;
for (; index < ds->desc_num; index++) {
bytes += ds->desc_hw[index].count;
/* end of lli */
@@ -402,9 +429,10 @@ static void k3_dma_issue_pending(struct dma_chan *chan)
static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
dma_addr_t src, size_t len, u32 num, u32 ccfg)
{
- if ((num + 1) < ds->desc_num)
+ if (num != ds->desc_num - 1)
ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
sizeof(struct k3_desc_hw);
+
ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
ds->desc_hw[num].count = len;
ds->desc_hw[num].saddr = src;
@@ -412,6 +440,35 @@ static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
ds->desc_hw[num].config = ccfg;
}
+static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
+ struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_desc_sw *ds;
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
+
+ if (num > lli_limit) {
+ dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
+ &c->vc, num, lli_limit);
+ return NULL;
+ }
+
+ ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
+ if (!ds)
+ return NULL;
+
+ ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+ if (!ds->desc_hw) {
+ dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
+ kfree(ds);
+ return NULL;
+ }
+ memset(ds->desc_hw, 0, sizeof(struct k3_desc_hw) * num);
+ ds->desc_num = num;
+ return ds;
+}
+
static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags)
@@ -425,13 +482,13 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
return NULL;
num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
- ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+
+ ds = k3_dma_alloc_desc_resource(num, chan);
if (!ds)
return NULL;
- ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
+ c->cyclic = 0;
ds->size = len;
- ds->desc_num = num;
num = 0;
if (!c->ccfg) {
@@ -474,18 +531,17 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
if (sgl == NULL)
return NULL;
+ c->cyclic = 0;
+
for_each_sg(sgl, sg, sglen, i) {
avail = sg_dma_len(sg);
if (avail > DMA_MAX_SIZE)
num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
}
- ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+ ds = k3_dma_alloc_desc_resource(num, chan);
if (!ds)
return NULL;
-
- ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
- ds->desc_num = num;
num = 0;
for_each_sg(sgl, sg, sglen, i) {
@@ -516,6 +572,73 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
return vchan_tx_prep(&c->vc, &ds->vd, flags);
}
+static struct dma_async_tx_descriptor *
+k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_desc_sw *ds;
+ size_t len, avail, total = 0;
+ dma_addr_t addr, src = 0, dst = 0;
+ int num = 1, since = 0;
+ size_t modulo = DMA_CYCLIC_MAX_PERIOD;
+ u32 en_tc2 = 0;
+
+ dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
+ __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
+ buf_len, period_len, (int)dir);
+
+ avail = buf_len;
+ if (avail > modulo)
+ num += DIV_ROUND_UP(avail, modulo) - 1;
+
+ ds = k3_dma_alloc_desc_resource(num, chan);
+ if (!ds)
+ return NULL;
+
+ c->cyclic = 1;
+ addr = buf_addr;
+ avail = buf_len;
+ total = avail;
+ num = 0;
+
+ if (period_len < modulo)
+ modulo = period_len;
+
+ do {
+ len = min_t(size_t, avail, modulo);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = addr;
+ dst = c->dev_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = c->dev_addr;
+ dst = addr;
+ }
+ since += len;
+ if (since >= period_len) {
+ /* descriptor asks for TC2 interrupt on completion */
+ en_tc2 = CX_CFG_NODEIRQ;
+ since -= period_len;
+ } else
+ en_tc2 = 0;
+
+ k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
+
+ addr += len;
+ avail -= len;
+ } while (avail);
+
+ /* "Cyclic" == end of link points back to start of link */
+ ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
+
+ ds->size = total;
+
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
static int k3_dma_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
@@ -551,7 +674,7 @@ static int k3_dma_config(struct dma_chan *chan,
c->ccfg |= (val << 12) | (val << 16);
if ((maxburst == 0) || (maxburst > 16))
- val = 16;
+ val = 15;
else
val = maxburst - 1;
c->ccfg |= (val << 20) | (val << 24);
@@ -563,6 +686,16 @@ static int k3_dma_config(struct dma_chan *chan,
return 0;
}
+static void k3_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct k3_dma_desc_sw *ds =
+ container_of(vd, struct k3_dma_desc_sw, vd);
+ struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
+
+ dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
+ kfree(ds);
+}
+
static int k3_dma_terminate_all(struct dma_chan *chan)
{
struct k3_dma_chan *c = to_k3_chan(chan);
@@ -586,7 +719,15 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
k3_dma_terminate_chan(p, d);
c->phy = NULL;
p->vchan = NULL;
- p->ds_run = p->ds_done = NULL;
+ if (p->ds_run) {
+ k3_dma_free_desc(&p->ds_run->vd);
+ p->ds_run = NULL;
+ }
+ if (p->ds_done) {
+ k3_dma_free_desc(&p->ds_done->vd);
+ p->ds_done = NULL;
+ }
+
}
spin_unlock_irqrestore(&c->vc.lock, flags);
vchan_dma_desc_free_list(&c->vc, &head);
@@ -639,14 +780,6 @@ static int k3_dma_transfer_resume(struct dma_chan *chan)
return 0;
}
-static void k3_dma_free_desc(struct virt_dma_desc *vd)
-{
- struct k3_dma_desc_sw *ds =
- container_of(vd, struct k3_dma_desc_sw, vd);
-
- kfree(ds);
-}
-
static const struct of_device_id k3_pdma_dt_ids[] = {
{ .compatible = "hisilicon,k3-dma-1.0", },
{}
@@ -706,6 +839,12 @@ static int k3_dma_probe(struct platform_device *op)
d->irq = irq;
+ /* A DMA memory pool for LLIs, align on 32-byte boundary */
+ d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
+ LLI_BLOCK_SIZE, 32, 0);
+ if (!d->pool)
+ return -ENOMEM;
+
/* init phy channel */
d->phy = devm_kzalloc(&op->dev,
d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
@@ -722,11 +861,13 @@ static int k3_dma_probe(struct platform_device *op)
INIT_LIST_HEAD(&d->slave.channels);
dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
d->slave.dev = &op->dev;
d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
d->slave.device_tx_status = k3_dma_tx_status;
d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
+ d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
d->slave.device_issue_pending = k3_dma_issue_pending;
d->slave.device_config = k3_dma_config;
d->slave.device_pause = k3_dma_transfer_pause;
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 1502b24b7c7d..818255844a3c 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -104,10 +104,8 @@ static void mic_dma_cleanup(struct mic_dma_chan *ch)
tx = &ch->tx_array[last_tail];
if (tx->cookie) {
dma_cookie_complete(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
+ dmaengine_desc_get_callback_invoke(tx, NULL);
+ tx->callback = NULL;
}
last_tail = mic_dma_hw_ring_inc(last_tail);
}
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index f4b25fb0d040..eb3a1f42ab06 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -864,19 +864,15 @@ static void dma_do_tasklet(unsigned long data)
struct mmp_pdma_desc_sw *desc, *_desc;
LIST_HEAD(chain_cleanup);
unsigned long flags;
+ struct dmaengine_desc_callback cb;
if (chan->cyclic_first) {
- dma_async_tx_callback cb = NULL;
- void *cb_data = NULL;
-
spin_lock_irqsave(&chan->desc_lock, flags);
desc = chan->cyclic_first;
- cb = desc->async_tx.callback;
- cb_data = desc->async_tx.callback_param;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
spin_unlock_irqrestore(&chan->desc_lock, flags);
- if (cb)
- cb(cb_data);
+ dmaengine_desc_callback_invoke(&cb, NULL);
return;
}
@@ -921,8 +917,8 @@ static void dma_do_tasklet(unsigned long data)
/* Remove from the list of transactions */
list_del(&desc->node);
/* Run the link descriptor callback function */
- if (txd->callback)
- txd->callback(txd->callback_param);
+ dmaengine_desc_get_callback(txd, &cb);
+ dmaengine_desc_callback_invoke(&cb, NULL);
dma_pool_free(chan->desc_pool, desc, txd->phys);
}
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index b3441f57a364..13c68b6434ce 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -349,9 +349,7 @@ static void dma_do_tasklet(unsigned long data)
{
struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
- if (tdmac->desc.callback)
- tdmac->desc.callback(tdmac->desc.callback_param);
-
+ dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL);
}
static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
@@ -433,7 +431,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
if (period_len > TDMA_MAX_XFER_BYTES) {
dev_err(tdmac->dev,
- "maximum period size exceeded: %d > %d\n",
+ "maximum period size exceeded: %zu > %d\n",
period_len, TDMA_MAX_XFER_BYTES);
goto err_out;
}
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index a6e642792e5a..e1a5c2242f6f 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -579,7 +579,7 @@ static int moxart_probe(struct platform_device *pdev)
return -ENOMEM;
irq = irq_of_parse_and_map(node, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
dev_err(dev, "no IRQ resource\n");
return -EINVAL;
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index fa86592c7ae1..dde713461a95 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -411,8 +411,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
list_for_each_entry(mdesc, &list, node) {
desc = &mdesc->desc;
- if (desc->callback)
- desc->callback(desc->callback_param);
+ dmaengine_desc_get_callback_invoke(desc, NULL);
last_cookie = desc->cookie;
dma_run_dependencies(desc);
@@ -926,7 +925,7 @@ static int mpc_dma_probe(struct platform_device *op)
}
mdma->irq = irq_of_parse_and_map(dn, 0);
- if (mdma->irq == NO_IRQ) {
+ if (!mdma->irq) {
dev_err(dev, "Error mapping IRQ!\n");
retval = -EINVAL;
goto err;
@@ -935,7 +934,7 @@ static int mpc_dma_probe(struct platform_device *op)
if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
mdma->is_mpc8308 = 1;
mdma->irq2 = irq_of_parse_and_map(dn, 1);
- if (mdma->irq2 == NO_IRQ) {
+ if (!mdma->irq2) {
dev_err(dev, "Error mapping IRQ!\n");
retval = -EINVAL;
goto err_dispose1;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index f4c9f98ec35e..23f75285a4d9 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -206,14 +206,11 @@ mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
if (desc->async_tx.cookie > 0) {
cookie = desc->async_tx.cookie;
+ dma_descriptor_unmap(&desc->async_tx);
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
- if (desc->async_tx.callback)
- desc->async_tx.callback(
- desc->async_tx.callback_param);
-
- dma_descriptor_unmap(&desc->async_tx);
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
}
/* run dependent operations */
@@ -470,12 +467,90 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
return mv_chan->slots_allocated ? : -ENOMEM;
}
+/*
+ * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
+ * a new MBus window if necessary. Use a cache for these check so that
+ * the MMIO mapped registers don't have to be accessed for this check
+ * to speed up this process.
+ */
+static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
+{
+ struct mv_xor_device *xordev = mv_chan->xordev;
+ void __iomem *base = mv_chan->mmr_high_base;
+ u32 win_enable;
+ u32 size;
+ u8 target, attr;
+ int ret;
+ int i;
+
+ /* Nothing needs to get done for the Armada 3700 */
+ if (xordev->xor_type == XOR_ARMADA_37XX)
+ return 0;
+
+ /*
+ * Loop over the cached windows to check, if the requested area
+ * is already mapped. If this the case, nothing needs to be done
+ * and we can return.
+ */
+ for (i = 0; i < WINDOW_COUNT; i++) {
+ if (addr >= xordev->win_start[i] &&
+ addr <= xordev->win_end[i]) {
+ /* Window is already mapped */
+ return 0;
+ }
+ }
+
+ /*
+ * The window is not mapped, so we need to create the new mapping
+ */
+
+ /* If no IO window is found that addr has to be located in SDRAM */
+ ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
+ if (ret < 0)
+ return 0;
+
+ /*
+ * Mask the base addr 'addr' according to 'size' read back from the
+ * MBus window. Otherwise we might end up with an address located
+ * somewhere in the middle of this area here.
+ */
+ size -= 1;
+ addr &= ~size;
+
+ /*
+ * Reading one of both enabled register is enough, as they are always
+ * programmed to the identical values
+ */
+ win_enable = readl(base + WINDOW_BAR_ENABLE(0));
+
+ /* Set 'i' to the first free window to write the new values to */
+ i = ffs(~win_enable) - 1;
+ if (i >= WINDOW_COUNT)
+ return -ENOMEM;
+
+ writel((addr & 0xffff0000) | (attr << 8) | target,
+ base + WINDOW_BASE(i));
+ writel(size & 0xffff0000, base + WINDOW_SIZE(i));
+
+ /* Fill the caching variables for later use */
+ xordev->win_start[i] = addr;
+ xordev->win_end[i] = addr + size;
+
+ win_enable |= (1 << i);
+ win_enable |= 3 << (16 + (2 * i));
+ writel(win_enable, base + WINDOW_BAR_ENABLE(0));
+ writel(win_enable, base + WINDOW_BAR_ENABLE(1));
+
+ return 0;
+}
+
static struct dma_async_tx_descriptor *
mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *sw_desc;
+ int ret;
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
@@ -486,6 +561,11 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
"%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
__func__, src_cnt, len, &dest, flags);
+ /* Check if a new window needs to get added for 'dest' */
+ ret = mv_xor_add_io_win(mv_chan, dest);
+ if (ret)
+ return NULL;
+
sw_desc = mv_chan_alloc_slot(mv_chan);
if (sw_desc) {
sw_desc->type = DMA_XOR;
@@ -493,8 +573,13 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
mv_desc_init(sw_desc, dest, len, flags);
if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
mv_desc_set_mode(sw_desc);
- while (src_cnt--)
+ while (src_cnt--) {
+ /* Check if a new window needs to get added for 'src' */
+ ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
+ if (ret)
+ return NULL;
mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
+ }
}
dev_dbg(mv_chan_to_devp(mv_chan),
@@ -959,6 +1044,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->op_in_desc = XOR_MODE_IN_DESC;
dma_dev = &mv_chan->dmadev;
+ mv_chan->xordev = xordev;
/*
* These source and destination dummy buffers are used to implement
@@ -1086,6 +1172,10 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
dram->mbus_dram_target_id, base + WINDOW_BASE(i));
writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
+ /* Fill the caching variables for later use */
+ xordev->win_start[i] = cs->base;
+ xordev->win_end[i] = cs->base + cs->size - 1;
+
win_enable |= (1 << i);
win_enable |= 3 << (16 + (2 * i));
}
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index bf56e082e7cd..88eeab222a23 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -80,12 +80,17 @@
#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2))
#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2))
+#define WINDOW_COUNT 8
+
struct mv_xor_device {
void __iomem *xor_base;
void __iomem *xor_high_base;
struct clk *clk;
struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
int xor_type;
+
+ u32 win_start[WINDOW_COUNT];
+ u32 win_end[WINDOW_COUNT];
};
/**
@@ -127,6 +132,8 @@ struct mv_xor_chan {
char dummy_dst[MV_XOR_MIN_BYTE_COUNT];
dma_addr_t dummy_src_addr, dummy_dst_addr;
u32 saved_config_reg, saved_int_mask_reg;
+
+ struct mv_xor_device *xordev;
};
/**
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 60de35251da5..e217268c7098 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -326,8 +326,7 @@ static void mxs_dma_tasklet(unsigned long data)
{
struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
- if (mxs_chan->desc.callback)
- mxs_chan->desc.callback(mxs_chan->desc.callback_param);
+ dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL);
}
static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
@@ -429,12 +428,10 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
goto err_alloc;
}
- if (mxs_chan->chan_irq != NO_IRQ) {
- ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
- 0, "mxs-dma", mxs_dma);
- if (ret)
- goto err_irq;
- }
+ ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
+ 0, "mxs-dma", mxs_dma);
+ if (ret)
+ goto err_irq;
ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 08c45c185549..09de71519d37 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1102,8 +1102,7 @@ static void nbpf_chan_tasklet(unsigned long data)
{
struct nbpf_channel *chan = (struct nbpf_channel *)data;
struct nbpf_desc *desc, *tmp;
- dma_async_tx_callback callback;
- void *param;
+ struct dmaengine_desc_callback cb;
while (!list_empty(&chan->done)) {
bool found = false, must_put, recycling = false;
@@ -1151,14 +1150,12 @@ static void nbpf_chan_tasklet(unsigned long data)
must_put = false;
}
- callback = desc->async_tx.callback;
- param = desc->async_tx.callback_param;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
/* ack and callback completed descriptor */
spin_unlock_irq(&chan->lock);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
if (must_put)
nbpf_desc_put(desc);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index d99ca2b511c4..7ca27d4b1c54 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -32,10 +33,12 @@ struct omap_dmadev {
const struct omap_dma_reg *reg_map;
struct omap_system_dma_plat_info *plat;
bool legacy;
+ bool ll123_supported;
+ struct dma_pool *desc_pool;
unsigned dma_requests;
spinlock_t irq_lock;
uint32_t irq_enable_mask;
- struct omap_chan *lch_map[OMAP_SDMA_CHANNELS];
+ struct omap_chan **lch_map;
};
struct omap_chan {
@@ -55,16 +58,40 @@ struct omap_chan {
unsigned sgidx;
};
+#define DESC_NXT_SV_REFRESH (0x1 << 24)
+#define DESC_NXT_SV_REUSE (0x2 << 24)
+#define DESC_NXT_DV_REFRESH (0x1 << 26)
+#define DESC_NXT_DV_REUSE (0x2 << 26)
+#define DESC_NTYPE_TYPE2 (0x2 << 29)
+
+/* Type 2 descriptor with Source or Destination address update */
+struct omap_type2_desc {
+ uint32_t next_desc;
+ uint32_t en;
+ uint32_t addr; /* src or dst */
+ uint16_t fn;
+ uint16_t cicr;
+ int16_t cdei;
+ int16_t csei;
+ int32_t cdfi;
+ int32_t csfi;
+} __packed;
+
struct omap_sg {
dma_addr_t addr;
uint32_t en; /* number of elements (24-bit) */
uint32_t fn; /* number of frames (16-bit) */
int32_t fi; /* for double indexing */
int16_t ei; /* for double indexing */
+
+ /* Linked list */
+ struct omap_type2_desc *t2_desc;
+ dma_addr_t t2_desc_paddr;
};
struct omap_desc {
struct virt_dma_desc vd;
+ bool using_ll;
enum dma_transfer_direction dir;
dma_addr_t dev_addr;
@@ -81,6 +108,9 @@ struct omap_desc {
};
enum {
+ CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */
+ CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */
+
CCR_FS = BIT(5),
CCR_READ_PRIORITY = BIT(6),
CCR_ENABLE = BIT(7),
@@ -151,6 +181,19 @@ enum {
CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */
CLNK_CTRL_ENABLE_LNK = BIT(15),
+
+ CDP_DST_VALID_INC = 0 << 0,
+ CDP_DST_VALID_RELOAD = 1 << 0,
+ CDP_DST_VALID_REUSE = 2 << 0,
+ CDP_SRC_VALID_INC = 0 << 2,
+ CDP_SRC_VALID_RELOAD = 1 << 2,
+ CDP_SRC_VALID_REUSE = 2 << 2,
+ CDP_NTYPE_TYPE1 = 1 << 4,
+ CDP_NTYPE_TYPE2 = 2 << 4,
+ CDP_NTYPE_TYPE3 = 3 << 4,
+ CDP_TMODE_NORMAL = 0 << 8,
+ CDP_TMODE_LLIST = 1 << 8,
+ CDP_FAST = BIT(10),
};
static const unsigned es_bytes[] = {
@@ -180,7 +223,64 @@ static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor
static void omap_dma_desc_free(struct virt_dma_desc *vd)
{
- kfree(container_of(vd, struct omap_desc, vd));
+ struct omap_desc *d = to_omap_dma_desc(&vd->tx);
+
+ if (d->using_ll) {
+ struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device);
+ int i;
+
+ for (i = 0; i < d->sglen; i++) {
+ if (d->sg[i].t2_desc)
+ dma_pool_free(od->desc_pool, d->sg[i].t2_desc,
+ d->sg[i].t2_desc_paddr);
+ }
+ }
+
+ kfree(d);
+}
+
+static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx,
+ enum dma_transfer_direction dir, bool last)
+{
+ struct omap_sg *sg = &d->sg[idx];
+ struct omap_type2_desc *t2_desc = sg->t2_desc;
+
+ if (idx)
+ d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr;
+ if (last)
+ t2_desc->next_desc = 0xfffffffc;
+
+ t2_desc->en = sg->en;
+ t2_desc->addr = sg->addr;
+ t2_desc->fn = sg->fn & 0xffff;
+ t2_desc->cicr = d->cicr;
+ if (!last)
+ t2_desc->cicr &= ~CICR_BLOCK_IE;
+
+ switch (dir) {
+ case DMA_DEV_TO_MEM:
+ t2_desc->cdei = sg->ei;
+ t2_desc->csei = d->ei;
+ t2_desc->cdfi = sg->fi;
+ t2_desc->csfi = d->fi;
+
+ t2_desc->en |= DESC_NXT_DV_REFRESH;
+ t2_desc->en |= DESC_NXT_SV_REUSE;
+ break;
+ case DMA_MEM_TO_DEV:
+ t2_desc->cdei = d->ei;
+ t2_desc->csei = sg->ei;
+ t2_desc->cdfi = d->fi;
+ t2_desc->csfi = sg->fi;
+
+ t2_desc->en |= DESC_NXT_SV_REFRESH;
+ t2_desc->en |= DESC_NXT_DV_REUSE;
+ break;
+ default:
+ return;
+ }
+
+ t2_desc->en |= DESC_NTYPE_TYPE2;
}
static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
@@ -285,6 +385,7 @@ static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
{
struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+ uint16_t cicr = d->cicr;
if (__dma_omap15xx(od->plat->dma_attr))
omap_dma_chan_write(c, CPC, 0);
@@ -293,8 +394,27 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
omap_dma_clear_csr(c);
+ if (d->using_ll) {
+ uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST;
+
+ if (d->dir == DMA_DEV_TO_MEM)
+ cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE);
+ else
+ cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD);
+ omap_dma_chan_write(c, CDP, cdp);
+
+ omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr);
+ omap_dma_chan_write(c, CCDN, 0);
+ omap_dma_chan_write(c, CCFN, 0xffff);
+ omap_dma_chan_write(c, CCEN, 0xffffff);
+
+ cicr &= ~CICR_BLOCK_IE;
+ } else if (od->ll123_supported) {
+ omap_dma_chan_write(c, CDP, 0);
+ }
+
/* Enable interrupts */
- omap_dma_chan_write(c, CICR, d->cicr);
+ omap_dma_chan_write(c, CICR, cicr);
/* Enable channel */
omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
@@ -365,10 +485,9 @@ static void omap_dma_stop(struct omap_chan *c)
c->running = false;
}
-static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
- unsigned idx)
+static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
{
- struct omap_sg *sg = d->sg + idx;
+ struct omap_sg *sg = d->sg + c->sgidx;
unsigned cxsa, cxei, cxfi;
if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
@@ -388,6 +507,7 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
omap_dma_chan_write(c, CFN, sg->fn);
omap_dma_start(c, d);
+ c->sgidx++;
}
static void omap_dma_start_desc(struct omap_chan *c)
@@ -433,7 +553,7 @@ static void omap_dma_start_desc(struct omap_chan *c)
omap_dma_chan_write(c, CSDP, d->csdp);
omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
- omap_dma_start_sg(c, d, 0);
+ omap_dma_start_sg(c, d);
}
static void omap_dma_callback(int ch, u16 status, void *data)
@@ -445,15 +565,13 @@ static void omap_dma_callback(int ch, u16 status, void *data)
spin_lock_irqsave(&c->vc.lock, flags);
d = c->desc;
if (d) {
- if (!c->cyclic) {
- if (++c->sgidx < d->sglen) {
- omap_dma_start_sg(c, d, c->sgidx);
- } else {
- omap_dma_start_desc(c);
- vchan_cookie_complete(&d->vd);
- }
- } else {
+ if (c->cyclic) {
vchan_cyclic_callback(&d->vd);
+ } else if (d->using_ll || c->sgidx == d->sglen) {
+ omap_dma_start_desc(c);
+ vchan_cookie_complete(&d->vd);
+ } else {
+ omap_dma_start_sg(c, d);
}
}
spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -503,6 +621,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
+ struct device *dev = od->ddev.dev;
int ret;
if (od->legacy) {
@@ -513,8 +632,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
&c->dma_ch);
}
- dev_dbg(od->ddev.dev, "allocating channel %u for %u\n",
- c->dma_ch, c->dma_sig);
+ dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
if (ret >= 0) {
omap_dma_assign(od, c, c->dma_ch);
@@ -570,7 +688,8 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(&c->vc);
omap_free_dma(c->dma_ch);
- dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig);
+ dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
+ c->dma_sig);
c->dma_sig = 0;
}
@@ -744,6 +863,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
struct omap_desc *d;
dma_addr_t dev_addr;
unsigned i, es, en, frame_bytes;
+ bool ll_failed = false;
u32 burst;
if (dir == DMA_DEV_TO_MEM) {
@@ -784,13 +904,16 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->es = es;
d->ccr = c->ccr | CCR_SYNC_FRAME;
- if (dir == DMA_DEV_TO_MEM)
+ if (dir == DMA_DEV_TO_MEM) {
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
- else
+ d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+ } else {
d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
+ d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+ }
d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
- d->csdp = es;
+ d->csdp |= es;
if (dma_omap1()) {
d->cicr |= CICR_TOUT_IE;
@@ -819,14 +942,47 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
*/
en = burst;
frame_bytes = es_bytes[es] * en;
+
+ if (sglen >= 2)
+ d->using_ll = od->ll123_supported;
+
for_each_sg(sgl, sgent, sglen, i) {
- d->sg[i].addr = sg_dma_address(sgent);
- d->sg[i].en = en;
- d->sg[i].fn = sg_dma_len(sgent) / frame_bytes;
+ struct omap_sg *osg = &d->sg[i];
+
+ osg->addr = sg_dma_address(sgent);
+ osg->en = en;
+ osg->fn = sg_dma_len(sgent) / frame_bytes;
+
+ if (d->using_ll) {
+ osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
+ &osg->t2_desc_paddr);
+ if (!osg->t2_desc) {
+ dev_err(chan->device->dev,
+ "t2_desc[%d] allocation failed\n", i);
+ ll_failed = true;
+ d->using_ll = false;
+ continue;
+ }
+
+ omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1));
+ }
}
d->sglen = sglen;
+ /* Release the dma_pool entries if one allocation failed */
+ if (ll_failed) {
+ for (i = 0; i < d->sglen; i++) {
+ struct omap_sg *osg = &d->sg[i];
+
+ if (osg->t2_desc) {
+ dma_pool_free(od->desc_pool, osg->t2_desc,
+ osg->t2_desc_paddr);
+ osg->t2_desc = NULL;
+ }
+ }
+ }
+
return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
}
@@ -1225,16 +1381,24 @@ static int omap_dma_probe(struct platform_device *pdev)
spin_lock_init(&od->lock);
spin_lock_init(&od->irq_lock);
- od->dma_requests = OMAP_SDMA_REQUESTS;
- if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
- "dma-requests",
- &od->dma_requests)) {
+ if (!pdev->dev.of_node) {
+ od->dma_requests = od->plat->dma_attr->lch_count;
+ if (unlikely(!od->dma_requests))
+ od->dma_requests = OMAP_SDMA_REQUESTS;
+ } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests",
+ &od->dma_requests)) {
dev_info(&pdev->dev,
"Missing dma-requests property, using %u.\n",
OMAP_SDMA_REQUESTS);
+ od->dma_requests = OMAP_SDMA_REQUESTS;
}
- for (i = 0; i < OMAP_SDMA_CHANNELS; i++) {
+ od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests,
+ sizeof(*od->lch_map), GFP_KERNEL);
+ if (!od->lch_map)
+ return -ENOMEM;
+
+ for (i = 0; i < od->dma_requests; i++) {
rc = omap_dma_chan_init(od);
if (rc) {
omap_dma_free(od);
@@ -1257,10 +1421,25 @@ static int omap_dma_probe(struct platform_device *pdev)
return rc;
}
+ if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
+ od->ll123_supported = true;
+
od->ddev.filter.map = od->plat->slave_map;
od->ddev.filter.mapcnt = od->plat->slavecnt;
od->ddev.filter.fn = omap_dma_filter_fn;
+ if (od->ll123_supported) {
+ od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
+ &pdev->dev,
+ sizeof(struct omap_type2_desc),
+ 4, 0);
+ if (!od->desc_pool) {
+ dev_err(&pdev->dev,
+ "unable to allocate descriptor pool\n");
+ od->ll123_supported = false;
+ }
+ }
+
rc = dma_async_device_register(&od->ddev);
if (rc) {
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
@@ -1284,7 +1463,8 @@ static int omap_dma_probe(struct platform_device *pdev)
}
}
- dev_info(&pdev->dev, "OMAP DMA engine driver\n");
+ dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
+ od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
return rc;
}
@@ -1307,6 +1487,9 @@ static int omap_dma_remove(struct platform_device *pdev)
omap_dma_glbl_write(od, IRQENABLE_L0, 0);
}
+ if (od->ll123_supported)
+ dma_pool_destroy(od->desc_pool);
+
omap_dma_free(od);
return 0;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 113605f6fe20..df95727dc2fb 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -357,14 +357,13 @@ static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
struct pch_dma_desc *desc)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
- dma_async_tx_callback callback = txd->callback;
- void *param = txd->callback_param;
+ struct dmaengine_desc_callback cb;
+ dmaengine_desc_get_callback(txd, &cb);
list_splice_init(&desc->tx_list, &pd_chan->free_list);
list_move(&desc->desc_node, &pd_chan->free_list);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static void pdc_complete_all(struct pch_dma_chan *pd_chan)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 4fc3ffbd5ca0..030fe05ed43b 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2039,14 +2039,12 @@ static void pl330_tasklet(unsigned long data)
}
while (!list_empty(&pch->completed_list)) {
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
desc = list_first_entry(&pch->completed_list,
struct dma_pl330_desc, node);
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ dmaengine_desc_get_callback(&desc->txd, &cb);
if (pch->cyclic) {
desc->status = PREP;
@@ -2064,9 +2062,9 @@ static void pl330_tasklet(unsigned long data)
dma_descriptor_unmap(&desc->txd);
- if (callback) {
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irqrestore(&pch->lock, flags);
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&pch->lock, flags);
}
}
@@ -2274,7 +2272,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
{
enum dma_status ret;
unsigned long flags;
- struct dma_pl330_desc *desc, *running = NULL;
+ struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
unsigned int transferred, residual = 0;
@@ -2287,10 +2285,13 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
goto out;
spin_lock_irqsave(&pch->lock, flags);
+ spin_lock(&pch->thread->dmac->lock);
if (pch->thread->req_running != -1)
running = pch->thread->req[pch->thread->req_running].desc;
+ last_enq = pch->thread->req[pch->thread->lstenq].desc;
+
/* Check in pending list */
list_for_each_entry(desc, &pch->work_list, node) {
if (desc->status == DONE)
@@ -2298,6 +2299,15 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
else if (running && desc == running)
transferred =
pl330_get_current_xferred_count(pch, desc);
+ else if (desc->status == BUSY)
+ /*
+ * Busy but not running means either just enqueued,
+ * or finished and not yet marked done
+ */
+ if (desc == last_enq)
+ transferred = 0;
+ else
+ transferred = desc->bytes_requested;
else
transferred = 0;
residual += desc->bytes_requested - transferred;
@@ -2318,6 +2328,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
if (desc->last)
residual = 0;
}
+ spin_unlock(&pch->thread->dmac->lock);
spin_unlock_irqrestore(&pch->lock, flags);
out:
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index da3688b94bdc..b1535b1fe95c 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -1482,14 +1482,11 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
cookie = desc->async_tx.cookie;
desc->async_tx.cookie = 0;
+ dma_descriptor_unmap(&desc->async_tx);
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
- if (desc->async_tx.callback)
- desc->async_tx.callback(
- desc->async_tx.callback_param);
-
- dma_descriptor_unmap(&desc->async_tx);
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
}
/* run dependent operations */
@@ -3891,7 +3888,7 @@ static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
np = ofdev->dev.of_node;
if (adev->id != PPC440SPE_XOR_ID) {
adev->err_irq = irq_of_parse_and_map(np, 1);
- if (adev->err_irq == NO_IRQ) {
+ if (!adev->err_irq) {
dev_warn(adev->dev, "no err irq resource?\n");
*initcode = PPC_ADMA_INIT_IRQ2;
adev->err_irq = -ENXIO;
@@ -3902,7 +3899,7 @@ static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
}
adev->irq = irq_of_parse_and_map(np, 0);
- if (adev->irq == NO_IRQ) {
+ if (!adev->irq) {
dev_err(adev->dev, "no irq resource\n");
*initcode = PPC_ADMA_INIT_IRQ1;
ret = -ENXIO;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index b2374cd91e45..e244e10a94b5 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -111,6 +111,7 @@ static void hidma_process_completed(struct hidma_chan *mchan)
struct dma_async_tx_descriptor *desc;
dma_cookie_t last_cookie;
struct hidma_desc *mdesc;
+ struct hidma_desc *next;
unsigned long irqflags;
struct list_head list;
@@ -122,28 +123,36 @@ static void hidma_process_completed(struct hidma_chan *mchan)
spin_unlock_irqrestore(&mchan->lock, irqflags);
/* Execute callbacks and run dependencies */
- list_for_each_entry(mdesc, &list, node) {
+ list_for_each_entry_safe(mdesc, next, &list, node) {
enum dma_status llstat;
+ struct dmaengine_desc_callback cb;
+ struct dmaengine_result result;
desc = &mdesc->desc;
+ last_cookie = desc->cookie;
spin_lock_irqsave(&mchan->lock, irqflags);
dma_cookie_complete(desc);
spin_unlock_irqrestore(&mchan->lock, irqflags);
llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
- if (desc->callback && (llstat == DMA_COMPLETE))
- desc->callback(desc->callback_param);
+ dmaengine_desc_get_callback(desc, &cb);
- last_cookie = desc->cookie;
dma_run_dependencies(desc);
- }
- /* Free descriptors */
- spin_lock_irqsave(&mchan->lock, irqflags);
- list_splice_tail_init(&list, &mchan->free);
- spin_unlock_irqrestore(&mchan->lock, irqflags);
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_move(&mdesc->node, &mchan->free);
+
+ if (llstat == DMA_COMPLETE) {
+ mchan->last_success = last_cookie;
+ result.result = DMA_TRANS_NOERROR;
+ } else
+ result.result = DMA_TRANS_ABORTED;
+
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+ dmaengine_desc_callback_invoke(&cb, &result);
+ }
}
/*
@@ -238,6 +247,19 @@ static void hidma_issue_pending(struct dma_chan *dmach)
hidma_ll_start(dmadev->lldev);
}
+static inline bool hidma_txn_is_success(dma_cookie_t cookie,
+ dma_cookie_t last_success, dma_cookie_t last_used)
+{
+ if (last_success <= last_used) {
+ if ((cookie <= last_success) || (cookie > last_used))
+ return true;
+ } else {
+ if ((cookie <= last_success) && (cookie > last_used))
+ return true;
+ }
+ return false;
+}
+
static enum dma_status hidma_tx_status(struct dma_chan *dmach,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
@@ -246,8 +268,13 @@ static enum dma_status hidma_tx_status(struct dma_chan *dmach,
enum dma_status ret;
ret = dma_cookie_status(dmach, cookie, txstate);
- if (ret == DMA_COMPLETE)
- return ret;
+ if (ret == DMA_COMPLETE) {
+ bool is_success;
+
+ is_success = hidma_txn_is_success(cookie, mchan->last_success,
+ dmach->cookie);
+ return is_success ? ret : DMA_ERROR;
+ }
if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
unsigned long flags;
@@ -398,6 +425,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
hidma_process_completed(mchan);
spin_lock_irqsave(&mchan->lock, irqflags);
+ mchan->last_success = 0;
list_splice_init(&mchan->active, &list);
list_splice_init(&mchan->prepared, &list);
list_splice_init(&mchan->completed, &list);
@@ -413,14 +441,9 @@ static int hidma_terminate_channel(struct dma_chan *chan)
/* return all user requests */
list_for_each_entry_safe(mdesc, tmp, &list, node) {
struct dma_async_tx_descriptor *txd = &mdesc->desc;
- dma_async_tx_callback callback = mdesc->desc.callback;
- void *param = mdesc->desc.callback_param;
dma_descriptor_unmap(txd);
-
- if (callback)
- callback(param);
-
+ dmaengine_desc_get_callback_invoke(txd, NULL);
dma_run_dependencies(txd);
/* move myself to free_list */
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index db413a5efc4e..e52e20716303 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -72,7 +72,6 @@ struct hidma_lldev {
u32 tre_write_offset; /* TRE write location */
struct tasklet_struct task; /* task delivering notifications */
- struct tasklet_struct rst_task; /* task to reset HW */
DECLARE_KFIFO_PTR(handoff_fifo,
struct hidma_tre *); /* pending TREs FIFO */
};
@@ -89,6 +88,7 @@ struct hidma_chan {
bool allocated;
char dbg_name[16];
u32 dma_sig;
+ dma_cookie_t last_success;
/*
* active descriptor on this channel
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index ad20dfb64c71..3224f24c577b 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -381,27 +381,6 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
}
/*
- * Abort all transactions and perform a reset.
- */
-static void hidma_ll_abort(unsigned long arg)
-{
- struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
- u8 err_code = HIDMA_EVRE_STATUS_ERROR;
- u8 err_info = 0xFF;
- int rc;
-
- hidma_cleanup_pending_tre(lldev, err_info, err_code);
-
- /* reset the channel for recovery */
- rc = hidma_ll_setup(lldev);
- if (rc) {
- dev_err(lldev->dev, "channel reinitialize failed after error\n");
- return;
- }
- writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
-}
-
-/*
* The interrupt handler for HIDMA will try to consume as many pending
* EVRE from the event queue as possible. Each EVRE has an associated
* TRE that holds the user interface parameters. EVRE reports the
@@ -454,13 +433,18 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
while (cause) {
if (cause & HIDMA_ERR_INT_MASK) {
- dev_err(lldev->dev, "error 0x%x, resetting...\n",
+ dev_err(lldev->dev, "error 0x%x, disabling...\n",
cause);
/* Clear out pending interrupts */
writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
- tasklet_schedule(&lldev->rst_task);
+ /* No further submissions. */
+ hidma_ll_disable(lldev);
+
+ /* Driver completes the txn and intimates the client.*/
+ hidma_cleanup_pending_tre(lldev, 0xFF,
+ HIDMA_EVRE_STATUS_ERROR);
goto out;
}
@@ -808,7 +792,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
return NULL;
spin_lock_init(&lldev->lock);
- tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev);
tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
lldev->initialized = 1;
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
@@ -831,7 +814,6 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
tasklet_kill(&lldev->task);
- tasklet_kill(&lldev->rst_task);
memset(lldev->trepool, 0, required_bytes);
lldev->trepool = NULL;
lldev->pending_tre_count = 0;
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index ce67075589f5..3c579abbabb7 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -823,11 +823,11 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
struct s3c24xx_sg *dsg;
int src_mod, dest_mod;
- dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n",
+ dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n",
len, s3cchan->name);
if ((len & S3C24XX_DCON_TC_MASK) != len) {
- dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len);
+ dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len);
return NULL;
}
@@ -1301,6 +1301,9 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
+ s3cdma->slave.filter.map = pdata->slave_map;
+ s3cdma->slave.filter.mapcnt = pdata->slavecnt;
+ s3cdma->slave.filter.fn = s3c24xx_dma_filter;
/* Register as many memcpy channels as there are physical channels */
ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
@@ -1418,7 +1421,7 @@ bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
s3cchan = to_s3c24xx_dma_chan(chan);
- return s3cchan->id == (int)param;
+ return s3cchan->id == (uintptr_t)param;
}
EXPORT_SYMBOL(s3c24xx_dma_filter);
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 43db255050d2..1adeb3265085 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -463,7 +463,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
dma_addr_t addr = sa11x0_dma_pos(p);
unsigned i;
- dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+ dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr);
for (i = 0; i < txd->sglen; i++) {
dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
@@ -491,7 +491,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
}
spin_unlock_irqrestore(&c->vc.lock, flags);
- dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
+ dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue);
return ret;
}
@@ -551,8 +551,8 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
if (len > DMA_MAX_SIZE)
j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
if (addr & DMA_ALIGN) {
- dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
- &c->vc, addr);
+ dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n",
+ &c->vc, &addr);
return NULL;
}
}
@@ -599,7 +599,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
txd->size = size;
txd->sglen = j;
- dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
+ dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n",
&c->vc, &txd->vd, txd->size, txd->sglen);
return vchan_tx_prep(&c->vc, &txd->vd, flags);
@@ -693,8 +693,8 @@ static int sa11x0_dma_device_config(struct dma_chan *chan,
if (maxburst == 8)
ddar |= DDAR_BS;
- dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
- &c->vc, addr, width, maxburst);
+ dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n",
+ &c->vc, &addr, width, maxburst);
c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 0dd953884d1d..2e441d0ccd79 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -118,14 +118,34 @@ struct rcar_dmac_desc_page {
sizeof(struct rcar_dmac_xfer_chunk))
/*
+ * struct rcar_dmac_chan_slave - Slave configuration
+ * @slave_addr: slave memory address
+ * @xfer_size: size (in bytes) of hardware transfers
+ */
+struct rcar_dmac_chan_slave {
+ phys_addr_t slave_addr;
+ unsigned int xfer_size;
+};
+
+/*
+ * struct rcar_dmac_chan_map - Map of slave device phys to dma address
+ * @addr: slave dma address
+ * @dir: direction of mapping
+ * @slave: slave configuration that is mapped
+ */
+struct rcar_dmac_chan_map {
+ dma_addr_t addr;
+ enum dma_data_direction dir;
+ struct rcar_dmac_chan_slave slave;
+};
+
+/*
* struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
* @chan: base DMA channel object
* @iomem: channel I/O memory base
* @index: index of this channel in the controller
- * @src_xfer_size: size (in bytes) of hardware transfers on the source side
- * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side
- * @src_slave_addr: slave source memory address
- * @dst_slave_addr: slave destination memory address
+ * @src: slave memory address and size on the source side
+ * @dst: slave memory address and size on the destination side
* @mid_rid: hardware MID/RID for the DMA client using this channel
* @lock: protects the channel CHCR register and the desc members
* @desc.free: list of free descriptors
@@ -142,10 +162,9 @@ struct rcar_dmac_chan {
void __iomem *iomem;
unsigned int index;
- unsigned int src_xfer_size;
- unsigned int dst_xfer_size;
- dma_addr_t src_slave_addr;
- dma_addr_t dst_slave_addr;
+ struct rcar_dmac_chan_slave src;
+ struct rcar_dmac_chan_slave dst;
+ struct rcar_dmac_chan_map map;
int mid_rid;
spinlock_t lock;
@@ -793,13 +812,13 @@ static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
case DMA_DEV_TO_MEM:
chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
| RCAR_DMACHCR_RS_DMARS;
- xfer_size = chan->src_xfer_size;
+ xfer_size = chan->src.xfer_size;
break;
case DMA_MEM_TO_DEV:
chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
| RCAR_DMACHCR_RS_DMARS;
- xfer_size = chan->dst_xfer_size;
+ xfer_size = chan->dst.xfer_size;
break;
case DMA_MEM_TO_MEM:
@@ -1023,13 +1042,65 @@ rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
DMA_MEM_TO_MEM, flags, false);
}
+static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
+ enum dma_transfer_direction dir)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ struct rcar_dmac_chan_map *map = &rchan->map;
+ phys_addr_t dev_addr;
+ size_t dev_size;
+ enum dma_data_direction dev_dir;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = rchan->src.slave_addr;
+ dev_size = rchan->src.xfer_size;
+ dev_dir = DMA_TO_DEVICE;
+ } else {
+ dev_addr = rchan->dst.slave_addr;
+ dev_size = rchan->dst.xfer_size;
+ dev_dir = DMA_FROM_DEVICE;
+ }
+
+ /* Reuse current map if possible. */
+ if (dev_addr == map->slave.slave_addr &&
+ dev_size == map->slave.xfer_size &&
+ dev_dir == map->dir)
+ return 0;
+
+ /* Remove old mapping if present. */
+ if (map->slave.xfer_size)
+ dma_unmap_resource(chan->device->dev, map->addr,
+ map->slave.xfer_size, map->dir, 0);
+ map->slave.xfer_size = 0;
+
+ /* Create new slave address map. */
+ map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
+ dev_dir, 0);
+
+ if (dma_mapping_error(chan->device->dev, map->addr)) {
+ dev_err(chan->device->dev,
+ "chan%u: failed to map %zx@%pap", rchan->index,
+ dev_size, &dev_addr);
+ return -EIO;
+ }
+
+ dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
+ rchan->index, dev_size, &dev_addr, &map->addr,
+ dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
+
+ map->slave.slave_addr = dev_addr;
+ map->slave.xfer_size = dev_size;
+ map->dir = dev_dir;
+
+ return 0;
+}
+
static struct dma_async_tx_descriptor *
rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags, void *context)
{
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
- dma_addr_t dev_addr;
/* Someone calling slave DMA on a generic channel? */
if (rchan->mid_rid < 0 || !sg_len) {
@@ -1039,9 +1110,10 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
- dev_addr = dir == DMA_DEV_TO_MEM
- ? rchan->src_slave_addr : rchan->dst_slave_addr;
- return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
+ if (rcar_dmac_map_slave_addr(chan, dir))
+ return NULL;
+
+ return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
dir, flags, false);
}
@@ -1055,7 +1127,6 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
struct dma_async_tx_descriptor *desc;
struct scatterlist *sgl;
- dma_addr_t dev_addr;
unsigned int sg_len;
unsigned int i;
@@ -1067,6 +1138,9 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
return NULL;
}
+ if (rcar_dmac_map_slave_addr(chan, dir))
+ return NULL;
+
sg_len = buf_len / period_len;
if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
dev_err(chan->device->dev,
@@ -1094,9 +1168,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
sg_dma_len(&sgl[i]) = period_len;
}
- dev_addr = dir == DMA_DEV_TO_MEM
- ? rchan->src_slave_addr : rchan->dst_slave_addr;
- desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
+ desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
dir, flags, true);
kfree(sgl);
@@ -1112,10 +1184,10 @@ static int rcar_dmac_device_config(struct dma_chan *chan,
* We could lock this, but you shouldn't be configuring the
* channel, while using it...
*/
- rchan->src_slave_addr = cfg->src_addr;
- rchan->dst_slave_addr = cfg->dst_addr;
- rchan->src_xfer_size = cfg->src_addr_width;
- rchan->dst_xfer_size = cfg->dst_addr_width;
+ rchan->src.slave_addr = cfg->src_addr;
+ rchan->dst.slave_addr = cfg->dst_addr;
+ rchan->src.xfer_size = cfg->src_addr_width;
+ rchan->dst.xfer_size = cfg->dst_addr_width;
return 0;
}
@@ -1389,21 +1461,18 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
{
struct rcar_dmac_chan *chan = dev;
struct rcar_dmac_desc *desc;
+ struct dmaengine_desc_callback cb;
spin_lock_irq(&chan->lock);
/* For cyclic transfers notify the user after every chunk. */
if (chan->desc.running && chan->desc.running->cyclic) {
- dma_async_tx_callback callback;
- void *callback_param;
-
desc = chan->desc.running;
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
- if (callback) {
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irq(&chan->lock);
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irq(&chan->lock);
}
}
@@ -1418,14 +1487,15 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
dma_cookie_complete(&desc->async_tx);
list_del(&desc->node);
- if (desc->async_tx.callback) {
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irq(&chan->lock);
/*
* We own the only reference to this descriptor, we can
* safely dereference it without holding the channel
* lock.
*/
- desc->async_tx.callback(desc->async_tx.callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irq(&chan->lock);
}
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 10fcabad80f3..12fa48e380cf 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -330,10 +330,11 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
bool head_acked = false;
dma_cookie_t cookie = 0;
dma_async_tx_callback callback = NULL;
- void *param = NULL;
+ struct dmaengine_desc_callback cb;
unsigned long flags;
LIST_HEAD(cyclic_list);
+ memset(&cb, 0, sizeof(cb));
spin_lock_irqsave(&schan->chan_lock, flags);
list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
struct dma_async_tx_descriptor *tx = &desc->async_tx;
@@ -367,8 +368,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
/* Call callback on the last chunk */
if (desc->mark == DESC_COMPLETED && tx->callback) {
desc->mark = DESC_WAITING;
+ dmaengine_desc_get_callback(tx, &cb);
callback = tx->callback;
- param = tx->callback_param;
dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
tx->cookie, tx, schan->id);
BUG_ON(desc->chunks != 1);
@@ -430,8 +431,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
spin_unlock_irqrestore(&schan->chan_lock, flags);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
return callback;
}
@@ -885,9 +885,9 @@ bool shdma_reset(struct shdma_dev *sdev)
/* Complete all */
list_for_each_entry(sdesc, &dl, node) {
struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
+
sdesc->mark = DESC_IDLE;
- if (tx->callback)
- tx->callback(tx->callback_param);
+ dmaengine_desc_get_callback_invoke(tx, NULL);
}
spin_lock(&schan->chan_lock);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d8bc3f2a71db..8f62edad51be 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -360,9 +360,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
list_for_each_entry(sdesc, &list, node) {
desc = &sdesc->desc;
- if (desc->callback)
- desc->callback(desc->callback_param);
-
+ dmaengine_desc_get_callback_invoke(desc, NULL);
last_cookie = desc->cookie;
dma_run_dependencies(desc);
}
@@ -388,8 +386,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
desc = &sdesc->desc;
while (happened_cyclic != schan->completed_cyclic) {
- if (desc->callback)
- desc->callback(desc->callback_param);
+ dmaengine_desc_get_callback_invoke(desc, NULL);
schan->completed_cyclic++;
}
}
@@ -869,7 +866,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
}
sdma->irq = irq_of_parse_and_map(dn, 0);
- if (sdma->irq == NO_IRQ) {
+ if (!sdma->irq) {
dev_err(dev, "Error mapping IRQ!\n");
return -EINVAL;
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 8b18e44a02d5..8684d11b29bb 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -874,7 +874,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
}
if (curr_lcla < 0)
- goto out;
+ goto set_current;
for (; lli_current < lli_len; lli_current++) {
unsigned int lcla_offset = chan->phy_chan->num * 1024 +
@@ -925,8 +925,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
break;
}
}
-
-out:
+ set_current:
desc->lli_current = lli_current;
}
@@ -941,15 +940,7 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
{
- struct d40_desc *d;
-
- if (list_empty(&d40c->active))
- return NULL;
-
- d = list_first_entry(&d40c->active,
- struct d40_desc,
- node);
- return d;
+ return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
}
/* remove desc from current queue and add it to the pending_queue */
@@ -962,36 +953,18 @@ static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
{
- struct d40_desc *d;
-
- if (list_empty(&d40c->pending_queue))
- return NULL;
-
- d = list_first_entry(&d40c->pending_queue,
- struct d40_desc,
- node);
- return d;
+ return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
+ node);
}
static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
{
- struct d40_desc *d;
-
- if (list_empty(&d40c->queue))
- return NULL;
-
- d = list_first_entry(&d40c->queue,
- struct d40_desc,
- node);
- return d;
+ return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
}
static struct d40_desc *d40_first_done(struct d40_chan *d40c)
{
- if (list_empty(&d40c->done))
- return NULL;
-
- return list_first_entry(&d40c->done, struct d40_desc, node);
+ return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
}
static int d40_psize_2_burst_size(bool is_log, int psize)
@@ -1083,7 +1056,7 @@ static int __d40_execute_command_phy(struct d40_chan *d40c,
D40_CHAN_POS(d40c->phy_chan->num);
if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
- goto done;
+ goto unlock;
}
wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
@@ -1119,7 +1092,7 @@ static int __d40_execute_command_phy(struct d40_chan *d40c,
}
}
-done:
+ unlock:
spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
return ret;
}
@@ -1596,8 +1569,7 @@ static void dma_tasklet(unsigned long data)
struct d40_desc *d40d;
unsigned long flags;
bool callback_active;
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
spin_lock_irqsave(&d40c->lock, flags);
@@ -1607,7 +1579,7 @@ static void dma_tasklet(unsigned long data)
/* Check if we have reached here for cyclic job */
d40d = d40_first_active_get(d40c);
if (d40d == NULL || !d40d->cyclic)
- goto err;
+ goto check_pending_tx;
}
if (!d40d->cyclic)
@@ -1624,8 +1596,7 @@ static void dma_tasklet(unsigned long data)
/* Callback to client */
callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
- callback = d40d->txd.callback;
- callback_param = d40d->txd.callback_param;
+ dmaengine_desc_get_callback(&d40d->txd, &cb);
if (!d40d->cyclic) {
if (async_tx_test_ack(&d40d->txd)) {
@@ -1646,12 +1617,11 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&d40c->lock, flags);
- if (callback_active && callback)
- callback(callback_param);
+ if (callback_active)
+ dmaengine_desc_callback_invoke(&cb, NULL);
return;
-
-err:
+ check_pending_tx:
/* Rescue manouver if receiving double interrupts */
if (d40c->pending_tx > 0)
d40c->pending_tx--;
@@ -1780,42 +1750,40 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
phy->allocated_dst == D40_ALLOC_FREE) {
phy->allocated_dst = D40_ALLOC_PHY;
phy->allocated_src = D40_ALLOC_PHY;
- goto found;
+ goto found_unlock;
} else
- goto not_found;
+ goto not_found_unlock;
}
/* Logical channel */
if (is_src) {
if (phy->allocated_src == D40_ALLOC_PHY)
- goto not_found;
+ goto not_found_unlock;
if (phy->allocated_src == D40_ALLOC_FREE)
phy->allocated_src = D40_ALLOC_LOG_FREE;
if (!(phy->allocated_src & BIT(log_event_line))) {
phy->allocated_src |= BIT(log_event_line);
- goto found;
+ goto found_unlock;
} else
- goto not_found;
+ goto not_found_unlock;
} else {
if (phy->allocated_dst == D40_ALLOC_PHY)
- goto not_found;
+ goto not_found_unlock;
if (phy->allocated_dst == D40_ALLOC_FREE)
phy->allocated_dst = D40_ALLOC_LOG_FREE;
if (!(phy->allocated_dst & BIT(log_event_line))) {
phy->allocated_dst |= BIT(log_event_line);
- goto found;
- } else
- goto not_found;
+ goto found_unlock;
+ }
}
-
-not_found:
+ not_found_unlock:
spin_unlock_irqrestore(&phy->lock, flags);
return false;
-found:
+ found_unlock:
spin_unlock_irqrestore(&phy->lock, flags);
return true;
}
@@ -1831,7 +1799,7 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
phy->allocated_dst = D40_ALLOC_FREE;
phy->allocated_src = D40_ALLOC_FREE;
is_free = true;
- goto out;
+ goto unlock;
}
/* Logical channel */
@@ -1847,8 +1815,7 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
is_free = ((phy->allocated_src | phy->allocated_dst) ==
D40_ALLOC_FREE);
-
-out:
+ unlock:
spin_unlock_irqrestore(&phy->lock, flags);
return is_free;
@@ -2047,7 +2014,7 @@ static int d40_free_dma(struct d40_chan *d40c)
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
chan_err(d40c, "stop failed\n");
- goto out;
+ goto mark_last_busy;
}
d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
@@ -2065,8 +2032,7 @@ static int d40_free_dma(struct d40_chan *d40c)
d40c->busy = false;
d40c->phy_chan = NULL;
d40c->configured = false;
-out:
-
+ mark_last_busy:
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
return res;
@@ -2094,8 +2060,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
D40_CHAN_POS(d40c->phy_chan->num);
if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
is_paused = true;
-
- goto _exit;
+ goto unlock;
}
if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
@@ -2105,7 +2070,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
status = readl(chanbase + D40_CHAN_REG_SSLNK);
} else {
chan_err(d40c, "Unknown direction\n");
- goto _exit;
+ goto unlock;
}
status = (status & D40_EVENTLINE_MASK(event)) >>
@@ -2113,7 +2078,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
if (status != D40_DMA_RUN)
is_paused = true;
-_exit:
+ unlock:
spin_unlock_irqrestore(&d40c->lock, flags);
return is_paused;
@@ -2198,7 +2163,7 @@ static struct d40_desc *
d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
unsigned int sg_len, unsigned long dma_flags)
{
- struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
+ struct stedma40_chan_cfg *cfg;
struct d40_desc *desc;
int ret;
@@ -2206,17 +2171,18 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
if (!desc)
return NULL;
+ cfg = &chan->dma_cfg;
desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
cfg->dst_info.data_width);
if (desc->lli_len < 0) {
chan_err(chan, "Unaligned size\n");
- goto err;
+ goto free_desc;
}
ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
if (ret < 0) {
chan_err(chan, "Could not allocate lli\n");
- goto err;
+ goto free_desc;
}
desc->lli_current = 0;
@@ -2226,8 +2192,7 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
return desc;
-
-err:
+ free_desc:
d40_desc_free(chan, desc);
return NULL;
}
@@ -2238,8 +2203,8 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
enum dma_transfer_direction direction, unsigned long dma_flags)
{
struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
- dma_addr_t src_dev_addr = 0;
- dma_addr_t dst_dev_addr = 0;
+ dma_addr_t src_dev_addr;
+ dma_addr_t dst_dev_addr;
struct d40_desc *desc;
unsigned long flags;
int ret;
@@ -2253,11 +2218,13 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
if (desc == NULL)
- goto err;
+ goto unlock;
if (sg_next(&sg_src[sg_len - 1]) == sg_src)
desc->cyclic = true;
+ src_dev_addr = 0;
+ dst_dev_addr = 0;
if (direction == DMA_DEV_TO_MEM)
src_dev_addr = chan->runtime_addr;
else if (direction == DMA_MEM_TO_DEV)
@@ -2273,7 +2240,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
if (ret) {
chan_err(chan, "Failed to prepare %s sg job: %d\n",
chan_is_logical(chan) ? "log" : "phy", ret);
- goto err;
+ goto free_desc;
}
/*
@@ -2285,10 +2252,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
spin_unlock_irqrestore(&chan->lock, flags);
return &desc->txd;
-
-err:
- if (desc)
- d40_desc_free(chan, desc);
+ free_desc:
+ d40_desc_free(chan, desc);
+ unlock:
spin_unlock_irqrestore(&chan->lock, flags);
return NULL;
}
@@ -2426,7 +2392,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
err = d40_config_memcpy(d40c);
if (err) {
chan_err(d40c, "Failed to configure memcpy channel\n");
- goto fail;
+ goto mark_last_busy;
}
}
@@ -2434,7 +2400,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (err) {
chan_err(d40c, "Failed to allocate channel\n");
d40c->configured = false;
- goto fail;
+ goto mark_last_busy;
}
pm_runtime_get_sync(d40c->base->dev);
@@ -2468,7 +2434,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
*/
if (is_free_phy)
d40_config_write(d40c);
-fail:
+ mark_last_busy:
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
@@ -2891,7 +2857,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
if (err) {
d40_err(base->dev, "Failed to register slave channels\n");
- goto failure1;
+ goto exit;
}
d40_chan_init(base, &base->dma_memcpy, base->log_chans,
@@ -2908,7 +2874,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
if (err) {
d40_err(base->dev,
"Failed to register memcpy only channels\n");
- goto failure2;
+ goto unregister_slave;
}
d40_chan_init(base, &base->dma_both, base->phy_chans,
@@ -2926,14 +2892,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
if (err) {
d40_err(base->dev,
"Failed to register logical and physical capable channels\n");
- goto failure3;
+ goto unregister_memcpy;
}
return 0;
-failure3:
+ unregister_memcpy:
dma_async_device_unregister(&base->dma_memcpy);
-failure2:
+ unregister_slave:
dma_async_device_unregister(&base->dma_slave);
-failure1:
+ exit:
return err;
}
@@ -3144,11 +3110,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
- struct clk *clk = NULL;
- void __iomem *virtbase = NULL;
- struct resource *res = NULL;
- struct d40_base *base = NULL;
- int num_log_chans = 0;
+ struct clk *clk;
+ void __iomem *virtbase;
+ struct resource *res;
+ struct d40_base *base;
+ int num_log_chans;
int num_phy_chans;
int num_memcpy_chans;
int clk_ret = -EINVAL;
@@ -3160,27 +3126,27 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
d40_err(&pdev->dev, "No matching clock found\n");
- goto failure;
+ goto check_prepare_enabled;
}
clk_ret = clk_prepare_enable(clk);
if (clk_ret) {
d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
- goto failure;
+ goto disable_unprepare;
}
/* Get IO for DMAC base address */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
if (!res)
- goto failure;
+ goto disable_unprepare;
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O base") == NULL)
- goto failure;
+ goto release_region;
virtbase = ioremap(res->start, resource_size(res));
if (!virtbase)
- goto failure;
+ goto release_region;
/* This is just a regular AMBA PrimeCell ID actually */
for (pid = 0, i = 0; i < 4; i++)
@@ -3192,13 +3158,13 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if (cid != AMBA_CID) {
d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
- goto failure;
+ goto unmap_io;
}
if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
AMBA_MANF_BITS(pid),
AMBA_VENDOR_ST);
- goto failure;
+ goto unmap_io;
}
/*
* HW revision:
@@ -3212,7 +3178,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
rev = AMBA_REV_BITS(pid);
if (rev < 2) {
d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
- goto failure;
+ goto unmap_io;
}
/* The number of physical channels on this HW */
@@ -3238,7 +3204,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
sizeof(struct d40_chan), GFP_KERNEL);
if (base == NULL)
- goto failure;
+ goto unmap_io;
base->rev = rev;
base->clk = clk;
@@ -3283,65 +3249,66 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
}
- base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
+ base->phy_res = kcalloc(num_phy_chans,
+ sizeof(*base->phy_res),
GFP_KERNEL);
if (!base->phy_res)
- goto failure;
+ goto free_base;
- base->lookup_phy_chans = kzalloc(num_phy_chans *
- sizeof(struct d40_chan *),
+ base->lookup_phy_chans = kcalloc(num_phy_chans,
+ sizeof(*base->lookup_phy_chans),
GFP_KERNEL);
if (!base->lookup_phy_chans)
- goto failure;
+ goto free_phy_res;
- base->lookup_log_chans = kzalloc(num_log_chans *
- sizeof(struct d40_chan *),
+ base->lookup_log_chans = kcalloc(num_log_chans,
+ sizeof(*base->lookup_log_chans),
GFP_KERNEL);
if (!base->lookup_log_chans)
- goto failure;
+ goto free_phy_chans;
- base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
- sizeof(d40_backup_regs_chan),
- GFP_KERNEL);
+ base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
+ sizeof(d40_backup_regs_chan),
+ GFP_KERNEL);
if (!base->reg_val_backup_chan)
- goto failure;
+ goto free_log_chans;
- base->lcla_pool.alloc_map =
- kzalloc(num_phy_chans * sizeof(struct d40_desc *)
- * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
+ base->lcla_pool.alloc_map = kcalloc(num_phy_chans
+ * D40_LCLA_LINK_PER_EVENT_GRP,
+ sizeof(*base->lcla_pool.alloc_map),
+ GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
- goto failure;
+ goto free_backup_chan;
base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (base->desc_slab == NULL)
- goto failure;
+ goto free_map;
return base;
-
-failure:
+ free_map:
+ kfree(base->lcla_pool.alloc_map);
+ free_backup_chan:
+ kfree(base->reg_val_backup_chan);
+ free_log_chans:
+ kfree(base->lookup_log_chans);
+ free_phy_chans:
+ kfree(base->lookup_phy_chans);
+ free_phy_res:
+ kfree(base->phy_res);
+ free_base:
+ kfree(base);
+ unmap_io:
+ iounmap(virtbase);
+ release_region:
+ release_mem_region(res->start, resource_size(res));
+ check_prepare_enabled:
if (!clk_ret)
+ disable_unprepare:
clk_disable_unprepare(clk);
if (!IS_ERR(clk))
clk_put(clk);
- if (virtbase)
- iounmap(virtbase);
- if (res)
- release_mem_region(res->start,
- resource_size(res));
- if (virtbase)
- iounmap(virtbase);
-
- if (base) {
- kfree(base->lcla_pool.alloc_map);
- kfree(base->reg_val_backup_chan);
- kfree(base->lookup_log_chans);
- kfree(base->lookup_phy_chans);
- kfree(base->phy_res);
- kfree(base);
- }
-
return NULL;
}
@@ -3404,20 +3371,18 @@ static int __init d40_lcla_allocate(struct d40_base *base)
struct d40_lcla_pool *pool = &base->lcla_pool;
unsigned long *page_list;
int i, j;
- int ret = 0;
+ int ret;
/*
* This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
* To full fill this hardware requirement without wasting 256 kb
* we allocate pages until we get an aligned one.
*/
- page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
- GFP_KERNEL);
-
- if (!page_list) {
- ret = -ENOMEM;
- goto failure;
- }
+ page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
+ sizeof(*page_list),
+ GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
/* Calculating how many pages that are required */
base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
@@ -3433,7 +3398,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
for (j = 0; j < i; j++)
free_pages(page_list[j], base->lcla_pool.pages);
- goto failure;
+ goto free_page_list;
}
if ((virt_to_phys((void *)page_list[i]) &
@@ -3460,7 +3425,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
GFP_KERNEL);
if (!base->lcla_pool.base_unaligned) {
ret = -ENOMEM;
- goto failure;
+ goto free_page_list;
}
base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
@@ -3473,12 +3438,13 @@ static int __init d40_lcla_allocate(struct d40_base *base)
if (dma_mapping_error(base->dev, pool->dma_addr)) {
pool->dma_addr = 0;
ret = -ENOMEM;
- goto failure;
+ goto free_page_list;
}
writel(virt_to_phys(base->lcla_pool.base),
base->virtbase + D40_DREG_LCLA);
-failure:
+ ret = 0;
+ free_page_list:
kfree(page_list);
return ret;
}
@@ -3490,9 +3456,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
int num_phy = 0, num_memcpy = 0, num_disabled = 0;
const __be32 *list;
- pdata = devm_kzalloc(&pdev->dev,
- sizeof(struct stedma40_platform_data),
- GFP_KERNEL);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -3574,7 +3538,7 @@ static int __init d40_probe(struct platform_device *pdev)
if (!res) {
ret = -ENOENT;
d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
- goto failure;
+ goto destroy_cache;
}
base->lcpa_size = resource_size(res);
base->phy_lcpa = res->start;
@@ -3583,7 +3547,7 @@ static int __init d40_probe(struct platform_device *pdev)
D40_NAME " I/O lcpa") == NULL) {
ret = -EBUSY;
d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
- goto failure;
+ goto destroy_cache;
}
/* We make use of ESRAM memory for this. */
@@ -3599,7 +3563,7 @@ static int __init d40_probe(struct platform_device *pdev)
if (!base->lcpa_base) {
ret = -ENOMEM;
d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
- goto failure;
+ goto destroy_cache;
}
/* If lcla has to be located in ESRAM we don't need to allocate */
if (base->plat_data->use_esram_lcla) {
@@ -3609,14 +3573,14 @@ static int __init d40_probe(struct platform_device *pdev)
ret = -ENOENT;
d40_err(&pdev->dev,
"No \"lcla_esram\" memory resource\n");
- goto failure;
+ goto destroy_cache;
}
base->lcla_pool.base = ioremap(res->start,
resource_size(res));
if (!base->lcla_pool.base) {
ret = -ENOMEM;
d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
- goto failure;
+ goto destroy_cache;
}
writel(res->start, base->virtbase + D40_DREG_LCLA);
@@ -3624,7 +3588,7 @@ static int __init d40_probe(struct platform_device *pdev)
ret = d40_lcla_allocate(base);
if (ret) {
d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
- goto failure;
+ goto destroy_cache;
}
}
@@ -3635,7 +3599,7 @@ static int __init d40_probe(struct platform_device *pdev)
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
if (ret) {
d40_err(&pdev->dev, "No IRQ defined\n");
- goto failure;
+ goto destroy_cache;
}
if (base->plat_data->use_esram_lcla) {
@@ -3645,7 +3609,7 @@ static int __init d40_probe(struct platform_device *pdev)
d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
ret = PTR_ERR(base->lcpa_regulator);
base->lcpa_regulator = NULL;
- goto failure;
+ goto destroy_cache;
}
ret = regulator_enable(base->lcpa_regulator);
@@ -3654,7 +3618,7 @@ static int __init d40_probe(struct platform_device *pdev)
"Failed to enable lcpa_regulator\n");
regulator_put(base->lcpa_regulator);
base->lcpa_regulator = NULL;
- goto failure;
+ goto destroy_cache;
}
}
@@ -3669,13 +3633,13 @@ static int __init d40_probe(struct platform_device *pdev)
ret = d40_dmaengine_init(base, num_reserved_chans);
if (ret)
- goto failure;
+ goto destroy_cache;
base->dev->dma_parms = &base->dma_parms;
ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
if (ret) {
d40_err(&pdev->dev, "Failed to set dma max seg size\n");
- goto failure;
+ goto destroy_cache;
}
d40_hw_init(base);
@@ -3689,8 +3653,7 @@ static int __init d40_probe(struct platform_device *pdev)
dev_info(base->dev, "initialized\n");
return 0;
-
-failure:
+ destroy_cache:
kmem_cache_destroy(base->desc_slab);
if (base->virtbase)
iounmap(base->virtbase);
@@ -3732,7 +3695,7 @@ failure:
kfree(base->lookup_phy_chans);
kfree(base->phy_res);
kfree(base);
-report_failure:
+ report_failure:
d40_err(&pdev->dev, "probe failed\n");
return ret;
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 047476a1383d..307547f4848d 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -954,7 +954,7 @@ static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
}
-void stm32_dma_set_config(struct stm32_dma_chan *chan,
+static void stm32_dma_set_config(struct stm32_dma_chan *chan,
struct stm32_dma_cfg *cfg)
{
stm32_dma_clear_reg(&chan->chan_reg);
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 3835fcde3545..83461994e418 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -1011,6 +1011,12 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = {
.nr_max_vchans = 37,
};
+static struct sun6i_dma_config sun8i_a83t_dma_cfg = {
+ .nr_max_channels = 8,
+ .nr_max_requests = 28,
+ .nr_max_vchans = 39,
+};
+
/*
* The H3 has 12 physical channels, a maximum DRQ port id of 27,
* and a total of 34 usable source and destination endpoints.
@@ -1025,6 +1031,7 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = {
static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
{ .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
+ { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg },
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
{ /* sentinel */ }
};
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 6ab9eb98588a..3722b9d8d9fe 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -655,8 +655,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
static void tegra_dma_tasklet(unsigned long data)
{
struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
- dma_async_tx_callback callback = NULL;
- void *callback_param = NULL;
+ struct dmaengine_desc_callback cb;
struct tegra_dma_desc *dma_desc;
unsigned long flags;
int cb_count;
@@ -666,13 +665,12 @@ static void tegra_dma_tasklet(unsigned long data)
dma_desc = list_first_entry(&tdc->cb_desc,
typeof(*dma_desc), cb_node);
list_del(&dma_desc->cb_node);
- callback = dma_desc->txd.callback;
- callback_param = dma_desc->txd.callback_param;
+ dmaengine_desc_get_callback(&dma_desc->txd, &cb);
cb_count = dma_desc->cb_count;
dma_desc->cb_count = 0;
spin_unlock_irqrestore(&tdc->lock, flags);
- while (cb_count-- && callback)
- callback(callback_param);
+ while (cb_count--)
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&tdc->lock, flags);
}
spin_unlock_irqrestore(&tdc->lock, flags);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index c4b121c4559d..b10cbaa82ff5 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -670,7 +670,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
const struct tegra_adma_chip_data *cdata;
struct tegra_adma *tdma;
struct resource *res;
- struct clk *clk;
int ret, i;
cdata = of_device_get_match_data(&pdev->dev);
@@ -697,18 +696,9 @@ static int tegra_adma_probe(struct platform_device *pdev)
if (ret)
return ret;
- clk = clk_get(&pdev->dev, "d_audio");
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "ADMA clock not found\n");
- ret = PTR_ERR(clk);
- goto clk_destroy;
- }
-
- ret = pm_clk_add_clk(&pdev->dev, clk);
- if (ret) {
- clk_put(clk);
+ ret = of_pm_clk_add_clk(&pdev->dev, "d_audio");
+ if (ret)
goto clk_destroy;
- }
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 5ae294b256a7..3f24aeb48c0e 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -18,15 +18,19 @@
#define TI_XBAR_DRA7 0
#define TI_XBAR_AM335X 1
+static const u32 ti_xbar_type[] = {
+ [TI_XBAR_DRA7] = TI_XBAR_DRA7,
+ [TI_XBAR_AM335X] = TI_XBAR_AM335X,
+};
static const struct of_device_id ti_dma_xbar_match[] = {
{
.compatible = "ti,dra7-dma-crossbar",
- .data = (void *)TI_XBAR_DRA7,
+ .data = &ti_xbar_type[TI_XBAR_DRA7],
},
{
.compatible = "ti,am335x-edma-crossbar",
- .data = (void *)TI_XBAR_AM335X,
+ .data = &ti_xbar_type[TI_XBAR_AM335X],
},
{},
};
@@ -190,9 +194,6 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
#define TI_DRA7_XBAR_OUTPUTS 127
#define TI_DRA7_XBAR_INPUTS 256
-#define TI_XBAR_EDMA_OFFSET 0
-#define TI_XBAR_SDMA_OFFSET 1
-
struct ti_dra7_xbar_data {
void __iomem *iomem;
@@ -280,18 +281,25 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
return map;
}
+#define TI_XBAR_EDMA_OFFSET 0
+#define TI_XBAR_SDMA_OFFSET 1
+static const u32 ti_dma_offset[] = {
+ [TI_XBAR_EDMA_OFFSET] = 0,
+ [TI_XBAR_SDMA_OFFSET] = 1,
+};
+
static const struct of_device_id ti_dra7_master_match[] = {
{
.compatible = "ti,omap4430-sdma",
- .data = (void *)TI_XBAR_SDMA_OFFSET,
+ .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
},
{
.compatible = "ti,edma3",
- .data = (void *)TI_XBAR_EDMA_OFFSET,
+ .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
},
{
.compatible = "ti,edma3-tpcc",
- .data = (void *)TI_XBAR_EDMA_OFFSET,
+ .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
},
{},
};
@@ -311,7 +319,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
struct property *prop;
struct resource *res;
u32 safe_val;
- size_t sz;
+ int sz;
void __iomem *iomem;
int i, ret;
@@ -395,7 +403,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
xbar->dmarouter.dev = &pdev->dev;
xbar->dmarouter.route_free = ti_dra7_xbar_free;
- xbar->dma_offset = (u32)match->data;
+ xbar->dma_offset = *(u32 *)match->data;
mutex_init(&xbar->mutex);
platform_set_drvdata(pdev, xbar);
@@ -428,7 +436,7 @@ static int ti_dma_xbar_probe(struct platform_device *pdev)
if (unlikely(!match))
return -EINVAL;
- switch ((u32)match->data) {
+ switch (*(u32 *)match->data) {
case TI_XBAR_DRA7:
ret = ti_dra7_xbar_probe(pdev);
break;
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index e82745aa42a8..896bafb7a532 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -226,8 +226,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
static void __td_finish(struct timb_dma_chan *td_chan)
{
- dma_async_tx_callback callback;
- void *param;
+ struct dmaengine_desc_callback cb;
struct dma_async_tx_descriptor *txd;
struct timb_dma_desc *td_desc;
@@ -252,8 +251,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
dma_cookie_complete(txd);
td_chan->ongoing = false;
- callback = txd->callback;
- param = txd->callback_param;
+ dmaengine_desc_get_callback(txd, &cb);
list_move(&td_desc->desc_node, &td_chan->free_list);
@@ -262,8 +260,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static u32 __td_ier_mask(struct timb_dma *td)
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 7632290e7c14..4d8c7b9078fd 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -403,16 +403,14 @@ static void
txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
struct txx9dmac_desc *desc)
{
- dma_async_tx_callback callback;
- void *param;
+ struct dmaengine_desc_callback cb;
struct dma_async_tx_descriptor *txd = &desc->txd;
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
txd->cookie, desc);
dma_cookie_complete(txd);
- callback = txd->callback;
- param = txd->callback_param;
+ dmaengine_desc_get_callback(txd, &cb);
txx9dmac_sync_desc_for_cpu(dc, desc);
list_splice_init(&desc->tx_list, &dc->free_list);
@@ -423,8 +421,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
dma_run_dependencies(txd);
}
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index a35c211857dd..e47fc9b0944f 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -87,8 +87,7 @@ static void vchan_complete(unsigned long arg)
{
struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
struct virt_dma_desc *vd;
- dma_async_tx_callback cb = NULL;
- void *cb_data = NULL;
+ struct dmaengine_desc_callback cb;
LIST_HEAD(head);
spin_lock_irq(&vc->lock);
@@ -96,18 +95,17 @@ static void vchan_complete(unsigned long arg)
vd = vc->cyclic;
if (vd) {
vc->cyclic = NULL;
- cb = vd->tx.callback;
- cb_data = vd->tx.callback_param;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+ } else {
+ memset(&cb, 0, sizeof(cb));
}
spin_unlock_irq(&vc->lock);
- if (cb)
- cb(cb_data);
+ dmaengine_desc_callback_invoke(&cb, NULL);
while (!list_empty(&head)) {
vd = list_first_entry(&head, struct virt_dma_desc, node);
- cb = vd->tx.callback;
- cb_data = vd->tx.callback_param;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node);
if (dmaengine_desc_test_reuse(&vd->tx))
@@ -115,8 +113,7 @@ static void vchan_complete(unsigned long arg)
else
vc->desc_free(vd);
- if (cb)
- cb(cb_data);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
}
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index d9731ca5e262..3f776a46a29c 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -45,6 +45,8 @@ static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
+extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
/**
* vchan_tx_prep - prepare a descriptor
@@ -55,8 +57,6 @@ struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
struct virt_dma_desc *vd, unsigned long tx_flags)
{
- extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
- extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
@@ -123,10 +123,8 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
*/
static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
{
- if (list_empty(&vc->desc_issued))
- return NULL;
-
- return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node);
+ return list_first_entry_or_null(&vc->desc_issued,
+ struct virt_dma_desc, node);
}
/**
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 9cb93c5b655d..8b693b712d0f 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -606,12 +606,10 @@ static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
return;
dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
/* Run the link descriptor callback function */
- if (tx->callback)
- tx->callback(tx->callback_param);
-
- dma_descriptor_unmap(tx);
+ dmaengine_desc_get_callback_invoke(tx, NULL);
/* Run any dependencies */
dma_run_dependencies(tx);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 4e223d094433..8288fe4d17c3 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -755,8 +755,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
if (desc->cyclic) {
xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
@@ -767,11 +766,10 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
list_del(&desc->node);
/* Run the link descriptor callback function */
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
- if (callback) {
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irqrestore(&chan->lock, flags);
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&chan->lock, flags);
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index dff1a4a6dc1b..82d85cce81f8 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -266,6 +266,13 @@ config EDAC_MPC85XX
Support for error detection and correction on the Freescale
MPC8349, MPC8560, MPC8540, MPC8548, T4240
+config EDAC_LAYERSCAPE
+ tristate "Freescale Layerscape DDR"
+ depends on EDAC_MM_EDAC && ARCH_LAYERSCAPE
+ help
+ Support for error detection and correction on Freescale memory
+ controllers on Layerscape SoCs.
+
config EDAC_MV64X60
tristate "Marvell MV64x60"
depends on EDAC_MM_EDAC && MV64X60
@@ -406,6 +413,41 @@ config EDAC_ALTERA_ETHERNET
Support for error detection and correction on the
Altera Ethernet FIFO Memory for Altera SoCs.
+config EDAC_ALTERA_NAND
+ bool "Altera NAND FIFO ECC"
+ depends on EDAC_ALTERA=y && MTD_NAND_DENALI
+ help
+ Support for error detection and correction on the
+ Altera NAND FIFO Memory for Altera SoCs.
+
+config EDAC_ALTERA_DMA
+ bool "Altera DMA FIFO ECC"
+ depends on EDAC_ALTERA=y && PL330_DMA=y
+ help
+ Support for error detection and correction on the
+ Altera DMA FIFO Memory for Altera SoCs.
+
+config EDAC_ALTERA_USB
+ bool "Altera USB FIFO ECC"
+ depends on EDAC_ALTERA=y && USB_DWC2
+ help
+ Support for error detection and correction on the
+ Altera USB FIFO Memory for Altera SoCs.
+
+config EDAC_ALTERA_QSPI
+ bool "Altera QSPI FIFO ECC"
+ depends on EDAC_ALTERA=y && SPI_CADENCE_QUADSPI
+ help
+ Support for error detection and correction on the
+ Altera QSPI FIFO Memory for Altera SoCs.
+
+config EDAC_ALTERA_SDMMC
+ bool "Altera SDMMC FIFO ECC"
+ depends on EDAC_ALTERA=y && MMC_DW
+ help
+ Support for error detection and correction on the
+ Altera SDMMC FIFO Memory for Altera SoCs.
+
config EDAC_SYNOPSYS
tristate "Synopsys DDR Memory Controller"
depends on EDAC_MM_EDAC && ARCH_ZYNQ
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 986049925b08..88e472e8b9a9 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -51,7 +51,13 @@ amd64_edac_mod-$(CONFIG_EDAC_AMD64_ERROR_INJECTION) += amd64_edac_inj.o
obj-$(CONFIG_EDAC_AMD64) += amd64_edac_mod.o
obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
-obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
+
+mpc85xx_edac_mod-y := fsl_ddr_edac.o mpc85xx_edac.o
+obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac_mod.o
+
+layerscape_edac_mod-y := fsl_ddr_edac.o layerscape_edac.o
+obj-$(CONFIG_EDAC_LAYERSCAPE) += layerscape_edac_mod.o
+
obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
obj-$(CONFIG_EDAC_CELL) += cell_edac.o
obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 2398d0701f5b..58d3e2b39b5b 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -203,7 +203,7 @@ static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
if (!mci->debugfs)
return;
- edac_debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
+ edac_debugfs_create_file("altr_trigger", S_IWUSR, mci->debugfs, mci,
&altr_sdr_mc_debug_inject_fops);
}
@@ -680,7 +680,7 @@ static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
if (!drvdata->debugfs_dir)
return;
- if (!edac_debugfs_create_file(priv->dbgfs_name, S_IWUSR,
+ if (!edac_debugfs_create_file("altr_trigger", S_IWUSR,
drvdata->debugfs_dir, edac_dci,
priv->inject_fops))
debugfs_remove_recursive(drvdata->debugfs_dir);
@@ -1108,7 +1108,6 @@ static const struct edac_device_prv_data ocramecc_data = {
.setup = altr_check_ecc_deps,
.ce_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_SERR),
.ue_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_DERR),
- .dbgfs_name = "altr_ocram_trigger",
.alloc_mem = ocram_alloc_mem,
.free_mem = ocram_free_mem,
.ecc_enable_mask = ALTR_OCR_ECC_EN,
@@ -1125,7 +1124,6 @@ static const struct edac_device_prv_data a10_ocramecc_data = {
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.irq_status_mask = A10_SYSMGR_ECC_INTSTAT_OCRAM,
- .dbgfs_name = "altr_ocram_trigger",
.ecc_enable_mask = ALTR_A10_OCRAM_ECC_EN_CTL,
.ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
.ce_set_mask = ALTR_A10_ECC_TSERRA,
@@ -1228,7 +1226,6 @@ static const struct edac_device_prv_data l2ecc_data = {
.setup = altr_l2_check_deps,
.ce_clear_mask = 0,
.ue_clear_mask = 0,
- .dbgfs_name = "altr_l2_trigger",
.alloc_mem = l2_alloc_mem,
.free_mem = l2_free_mem,
.ecc_enable_mask = ALTR_L2_ECC_EN,
@@ -1244,7 +1241,6 @@ static const struct edac_device_prv_data a10_l2ecc_data = {
.ce_clear_mask = ALTR_A10_L2_ECC_SERR_CLR,
.ue_clear_mask = ALTR_A10_L2_ECC_MERR_CLR,
.irq_status_mask = A10_SYSMGR_ECC_INTSTAT_L2,
- .dbgfs_name = "altr_l2_trigger",
.alloc_mem = l2_alloc_mem,
.free_mem = l2_free_mem,
.ecc_enable_mask = ALTR_A10_L2_ECC_EN_CTL,
@@ -1266,7 +1262,6 @@ static const struct edac_device_prv_data a10_enetecc_data = {
.setup = altr_check_ecc_deps,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
- .dbgfs_name = "altr_trigger",
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
.ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
.ce_set_mask = ALTR_A10_ECC_TSERRA,
@@ -1285,6 +1280,292 @@ early_initcall(socfpga_init_ethernet_ecc);
#endif /* CONFIG_EDAC_ALTERA_ETHERNET */
+/********************** NAND Device Functions **********************/
+
+#ifdef CONFIG_EDAC_ALTERA_NAND
+
+static const struct edac_device_prv_data a10_nandecc_data = {
+ .setup = altr_check_ecc_deps,
+ .ce_clear_mask = ALTR_A10_ECC_SERRPENA,
+ .ue_clear_mask = ALTR_A10_ECC_DERRPENA,
+ .ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
+ .ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
+ .ce_set_mask = ALTR_A10_ECC_TSERRA,
+ .ue_set_mask = ALTR_A10_ECC_TDERRA,
+ .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
+};
+
+static int __init socfpga_init_nand_ecc(void)
+{
+ return altr_init_a10_ecc_device_type("altr,socfpga-nand-ecc");
+}
+
+early_initcall(socfpga_init_nand_ecc);
+
+#endif /* CONFIG_EDAC_ALTERA_NAND */
+
+/********************** DMA Device Functions **********************/
+
+#ifdef CONFIG_EDAC_ALTERA_DMA
+
+static const struct edac_device_prv_data a10_dmaecc_data = {
+ .setup = altr_check_ecc_deps,
+ .ce_clear_mask = ALTR_A10_ECC_SERRPENA,
+ .ue_clear_mask = ALTR_A10_ECC_DERRPENA,
+ .ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
+ .ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
+ .ce_set_mask = ALTR_A10_ECC_TSERRA,
+ .ue_set_mask = ALTR_A10_ECC_TDERRA,
+ .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
+};
+
+static int __init socfpga_init_dma_ecc(void)
+{
+ return altr_init_a10_ecc_device_type("altr,socfpga-dma-ecc");
+}
+
+early_initcall(socfpga_init_dma_ecc);
+
+#endif /* CONFIG_EDAC_ALTERA_DMA */
+
+/********************** USB Device Functions **********************/
+
+#ifdef CONFIG_EDAC_ALTERA_USB
+
+static const struct edac_device_prv_data a10_usbecc_data = {
+ .setup = altr_check_ecc_deps,
+ .ce_clear_mask = ALTR_A10_ECC_SERRPENA,
+ .ue_clear_mask = ALTR_A10_ECC_DERRPENA,
+ .ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
+ .ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
+ .ce_set_mask = ALTR_A10_ECC_TSERRA,
+ .ue_set_mask = ALTR_A10_ECC_TDERRA,
+ .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
+};
+
+static int __init socfpga_init_usb_ecc(void)
+{
+ return altr_init_a10_ecc_device_type("altr,socfpga-usb-ecc");
+}
+
+early_initcall(socfpga_init_usb_ecc);
+
+#endif /* CONFIG_EDAC_ALTERA_USB */
+
+/********************** QSPI Device Functions **********************/
+
+#ifdef CONFIG_EDAC_ALTERA_QSPI
+
+static const struct edac_device_prv_data a10_qspiecc_data = {
+ .setup = altr_check_ecc_deps,
+ .ce_clear_mask = ALTR_A10_ECC_SERRPENA,
+ .ue_clear_mask = ALTR_A10_ECC_DERRPENA,
+ .ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
+ .ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
+ .ce_set_mask = ALTR_A10_ECC_TSERRA,
+ .ue_set_mask = ALTR_A10_ECC_TDERRA,
+ .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
+};
+
+static int __init socfpga_init_qspi_ecc(void)
+{
+ return altr_init_a10_ecc_device_type("altr,socfpga-qspi-ecc");
+}
+
+early_initcall(socfpga_init_qspi_ecc);
+
+#endif /* CONFIG_EDAC_ALTERA_QSPI */
+
+/********************* SDMMC Device Functions **********************/
+
+#ifdef CONFIG_EDAC_ALTERA_SDMMC
+
+static const struct edac_device_prv_data a10_sdmmceccb_data;
+static int altr_portb_setup(struct altr_edac_device_dev *device)
+{
+ struct edac_device_ctl_info *dci;
+ struct altr_edac_device_dev *altdev;
+ char *ecc_name = "sdmmcb-ecc";
+ int edac_idx, rc;
+ struct device_node *np;
+ const struct edac_device_prv_data *prv = &a10_sdmmceccb_data;
+
+ rc = altr_check_ecc_deps(device);
+ if (rc)
+ return rc;
+
+ np = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
+ if (!np) {
+ edac_printk(KERN_WARNING, EDAC_DEVICE, "SDMMC node not found\n");
+ return -ENODEV;
+ }
+
+ /* Create the PortB EDAC device */
+ edac_idx = edac_device_alloc_index();
+ dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name, 1,
+ ecc_name, 1, 0, NULL, 0, edac_idx);
+ if (!dci) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s: Unable to allocate PortB EDAC device\n",
+ ecc_name);
+ return -ENOMEM;
+ }
+
+ /* Initialize the PortB EDAC device structure from PortA structure */
+ altdev = dci->pvt_info;
+ *altdev = *device;
+
+ if (!devres_open_group(&altdev->ddev, altr_portb_setup, GFP_KERNEL))
+ return -ENOMEM;
+
+ /* Update PortB specific values */
+ altdev->edac_dev_name = ecc_name;
+ altdev->edac_idx = edac_idx;
+ altdev->edac_dev = dci;
+ altdev->data = prv;
+ dci->dev = &altdev->ddev;
+ dci->ctl_name = "Altera ECC Manager";
+ dci->mod_name = ecc_name;
+ dci->dev_name = ecc_name;
+
+ /* Update the IRQs for PortB */
+ altdev->sb_irq = irq_of_parse_and_map(np, 2);
+ if (!altdev->sb_irq) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Error PortB SBIRQ alloc\n");
+ rc = -ENODEV;
+ goto err_release_group_1;
+ }
+ rc = devm_request_irq(&altdev->ddev, altdev->sb_irq,
+ prv->ecc_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ ecc_name, altdev);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "PortB SBERR IRQ error\n");
+ goto err_release_group_1;
+ }
+
+ altdev->db_irq = irq_of_parse_and_map(np, 3);
+ if (!altdev->db_irq) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Error PortB DBIRQ alloc\n");
+ rc = -ENODEV;
+ goto err_release_group_1;
+ }
+ rc = devm_request_irq(&altdev->ddev, altdev->db_irq,
+ prv->ecc_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ ecc_name, altdev);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "PortB DBERR IRQ error\n");
+ goto err_release_group_1;
+ }
+
+ rc = edac_device_add_device(dci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "edac_device_add_device portB failed\n");
+ rc = -ENOMEM;
+ goto err_release_group_1;
+ }
+ altr_create_edacdev_dbgfs(dci, prv);
+
+ list_add(&altdev->next, &altdev->edac->a10_ecc_devices);
+
+ devres_remove_group(&altdev->ddev, altr_portb_setup);
+
+ return 0;
+
+err_release_group_1:
+ edac_device_free_ctl_info(dci);
+ devres_release_group(&altdev->ddev, altr_portb_setup);
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s:Error setting up EDAC device: %d\n", ecc_name, rc);
+ return rc;
+}
+
+static irqreturn_t altr_edac_a10_ecc_irq_portb(int irq, void *dev_id)
+{
+ struct altr_edac_device_dev *ad = dev_id;
+ void __iomem *base = ad->base;
+ const struct edac_device_prv_data *priv = ad->data;
+
+ if (irq == ad->sb_irq) {
+ writel(priv->ce_clear_mask,
+ base + ALTR_A10_ECC_INTSTAT_OFST);
+ edac_device_handle_ce(ad->edac_dev, 0, 0, ad->edac_dev_name);
+ return IRQ_HANDLED;
+ } else if (irq == ad->db_irq) {
+ writel(priv->ue_clear_mask,
+ base + ALTR_A10_ECC_INTSTAT_OFST);
+ edac_device_handle_ue(ad->edac_dev, 0, 0, ad->edac_dev_name);
+ return IRQ_HANDLED;
+ }
+
+ WARN_ONCE(1, "Unhandled IRQ%d on Port B.", irq);
+
+ return IRQ_NONE;
+}
+
+static const struct edac_device_prv_data a10_sdmmcecca_data = {
+ .setup = altr_portb_setup,
+ .ce_clear_mask = ALTR_A10_ECC_SERRPENA,
+ .ue_clear_mask = ALTR_A10_ECC_DERRPENA,
+ .ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
+ .ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
+ .ce_set_mask = ALTR_A10_ECC_SERRPENA,
+ .ue_set_mask = ALTR_A10_ECC_DERRPENA,
+ .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
+};
+
+static const struct edac_device_prv_data a10_sdmmceccb_data = {
+ .setup = altr_portb_setup,
+ .ce_clear_mask = ALTR_A10_ECC_SERRPENB,
+ .ue_clear_mask = ALTR_A10_ECC_DERRPENB,
+ .ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
+ .ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
+ .ce_set_mask = ALTR_A10_ECC_TSERRB,
+ .ue_set_mask = ALTR_A10_ECC_TDERRB,
+ .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq_portb,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
+};
+
+static int __init socfpga_init_sdmmc_ecc(void)
+{
+ int rc = -ENODEV;
+ struct device_node *child = of_find_compatible_node(NULL, NULL,
+ "altr,socfpga-sdmmc-ecc");
+ if (!child) {
+ edac_printk(KERN_WARNING, EDAC_DEVICE, "SDMMC node not found\n");
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(child))
+ goto exit;
+
+ if (validate_parent_available(child))
+ goto exit;
+
+ rc = altr_init_a10_ecc_block(child, ALTR_A10_SDMMC_IRQ_MASK,
+ a10_sdmmcecca_data.ecc_enable_mask, 1);
+exit:
+ of_node_put(child);
+ return rc;
+}
+
+early_initcall(socfpga_init_sdmmc_ecc);
+
+#endif /* CONFIG_EDAC_ALTERA_SDMMC */
+
/********************* Arria10 EDAC Device Functions *************************/
static const struct of_device_id altr_edac_a10_device_of_match[] = {
#ifdef CONFIG_EDAC_ALTERA_L2C
@@ -1298,6 +1579,21 @@ static const struct of_device_id altr_edac_a10_device_of_match[] = {
{ .compatible = "altr,socfpga-eth-mac-ecc",
.data = &a10_enetecc_data },
#endif
+#ifdef CONFIG_EDAC_ALTERA_NAND
+ { .compatible = "altr,socfpga-nand-ecc", .data = &a10_nandecc_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_DMA
+ { .compatible = "altr,socfpga-dma-ecc", .data = &a10_dmaecc_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_USB
+ { .compatible = "altr,socfpga-usb-ecc", .data = &a10_usbecc_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_QSPI
+ { .compatible = "altr,socfpga-qspi-ecc", .data = &a10_qspiecc_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_SDMMC
+ { .compatible = "altr,socfpga-sdmmc-ecc", .data = &a10_sdmmcecca_data },
+#endif
{},
};
MODULE_DEVICE_TABLE(of, altr_edac_a10_device_of_match);
@@ -1451,11 +1747,11 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
rc = -ENODEV;
goto err_release_group1;
}
- rc = devm_request_irq(edac->dev, altdev->sb_irq,
- prv->ecc_irq_handler,
- IRQF_SHARED, ecc_name, altdev);
+ rc = devm_request_irq(edac->dev, altdev->sb_irq, prv->ecc_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ ecc_name, altdev);
if (rc) {
- edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
+ edac_printk(KERN_ERR, EDAC_DEVICE, "No SBERR IRQ resource\n");
goto err_release_group1;
}
@@ -1465,9 +1761,9 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
rc = -ENODEV;
goto err_release_group1;
}
- rc = devm_request_irq(edac->dev, altdev->db_irq,
- prv->ecc_irq_handler,
- IRQF_SHARED, ecc_name, altdev);
+ rc = devm_request_irq(edac->dev, altdev->db_irq, prv->ecc_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ ecc_name, altdev);
if (rc) {
edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
goto err_release_group1;
@@ -1526,7 +1822,7 @@ static int a10_eccmgr_irqdomain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
-struct irq_domain_ops a10_eccmgr_ic_ops = {
+static struct irq_domain_ops a10_eccmgr_ic_ops = {
.map = a10_eccmgr_irqdomain_map,
.xlate = irq_domain_xlate_twocell,
};
@@ -1584,15 +1880,19 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
continue;
- if (of_device_is_compatible(child, "altr,socfpga-a10-l2-ecc"))
- altr_edac_a10_device_add(edac, child);
- else if ((of_device_is_compatible(child,
- "altr,socfpga-a10-ocram-ecc")) ||
- (of_device_is_compatible(child,
- "altr,socfpga-eth-mac-ecc")))
+
+ if (of_device_is_compatible(child, "altr,socfpga-a10-l2-ecc") ||
+ of_device_is_compatible(child, "altr,socfpga-a10-ocram-ecc") ||
+ of_device_is_compatible(child, "altr,socfpga-eth-mac-ecc") ||
+ of_device_is_compatible(child, "altr,socfpga-nand-ecc") ||
+ of_device_is_compatible(child, "altr,socfpga-dma-ecc") ||
+ of_device_is_compatible(child, "altr,socfpga-usb-ecc") ||
+ of_device_is_compatible(child, "altr,socfpga-qspi-ecc") ||
+ of_device_is_compatible(child, "altr,socfpga-sdmmc-ecc"))
+
altr_edac_a10_device_add(edac, child);
- else if (of_device_is_compatible(child,
- "altr,sdram-edac-a10"))
+
+ else if (of_device_is_compatible(child, "altr,sdram-edac-a10"))
of_platform_populate(pdev->dev.of_node,
altr_sdram_ctrl_of_match,
NULL, &pdev->dev);
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 687d8e754d36..cbc96290f743 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -250,6 +250,8 @@ struct altr_sdram_mc_data {
#define ALTR_A10_ECC_INTTEST_OFST 0x24
#define ALTR_A10_ECC_TSERRA BIT(0)
#define ALTR_A10_ECC_TDERRA BIT(8)
+#define ALTR_A10_ECC_TSERRB BIT(16)
+#define ALTR_A10_ECC_TDERRB BIT(24)
/* ECC Manager Defines */
#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
@@ -288,6 +290,9 @@ struct altr_sdram_mc_data {
/* Arria 10 Ethernet ECC Management Group Defines */
#define ALTR_A10_COMMON_ECC_EN_CTL BIT(0)
+/* Arria 10 SDMMC ECC Management Group Defines */
+#define ALTR_A10_SDMMC_IRQ_MASK (BIT(16) | BIT(15))
+
/* A10 ECC Controller memory initialization timeout */
#define ALTR_A10_ECC_INIT_WATCHDOG_10US 10000
@@ -298,7 +303,6 @@ struct edac_device_prv_data {
int ce_clear_mask;
int ue_clear_mask;
int irq_status_mask;
- char dbgfs_name[20];
void * (*alloc_mem)(size_t size, void **other);
void (*free_mem)(void *p, size_t size, void *other);
int ecc_enable_mask;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8c0ec2128907..ee181c53626f 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1425,11 +1425,17 @@ static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
if (intlv_addr & 0x2) {
u8 shift = intlv_addr & 0x1 ? 9 : 6;
- u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+ u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
return ((sys_addr >> shift) & 1) ^ temp;
}
+ if (intlv_addr & 0x4) {
+ u8 shift = intlv_addr & 0x1 ? 9 : 8;
+
+ return (sys_addr >> shift) & 1;
+ }
+
return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
}
@@ -1726,8 +1732,11 @@ static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
return -EINVAL;
- channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
- num_dcts_intlv, dct_sel);
+ if (pvt->model >= 0x60)
+ channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
+ else
+ channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
+ num_dcts_intlv, dct_sel);
/* Verify we stay within the MAX number of channels allowed */
if (channel > 3)
@@ -2961,6 +2970,15 @@ static void setup_pci_device(void)
}
}
+static const struct x86_cpu_id amd64_cpuids[] = {
+ { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
+
static int __init amd64_edac_init(void)
{
int err = -ENODEV;
diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
new file mode 100644
index 000000000000..9774f52f0c3e
--- /dev/null
+++ b/drivers/edac/fsl_ddr_edac.c
@@ -0,0 +1,633 @@
+/*
+ * Freescale Memory Controller kernel module
+ *
+ * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
+ * ARM-based Layerscape SoCs including LS2xxx. Originally split
+ * out from mpc85xx_edac EDAC driver.
+ *
+ * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/edac.h>
+#include <linux/smp.h>
+#include <linux/gfp.h>
+
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include "edac_module.h"
+#include "edac_core.h"
+#include "fsl_ddr_edac.h"
+
+#define EDAC_MOD_STR "fsl_ddr_edac"
+
+static int edac_mc_idx;
+
+static u32 orig_ddr_err_disable;
+static u32 orig_ddr_err_sbe;
+static bool little_endian;
+
+static inline u32 ddr_in32(void __iomem *addr)
+{
+ return little_endian ? ioread32(addr) : ioread32be(addr);
+}
+
+static inline void ddr_out32(void __iomem *addr, u32 value)
+{
+ if (little_endian)
+ iowrite32(value, addr);
+ else
+ iowrite32be(value, addr);
+}
+
+/************************ MC SYSFS parts ***********************************/
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct fsl_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
+}
+
+static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct fsl_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
+}
+
+static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct fsl_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
+}
+
+static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct fsl_mc_pdata *pdata = mci->pvt_info;
+ unsigned long val;
+ int rc;
+
+ if (isdigit(*data)) {
+ rc = kstrtoul(data, 0, &val);
+ if (rc)
+ return rc;
+
+ ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct fsl_mc_pdata *pdata = mci->pvt_info;
+ unsigned long val;
+ int rc;
+
+ if (isdigit(*data)) {
+ rc = kstrtoul(data, 0, &val);
+ if (rc)
+ return rc;
+
+ ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct fsl_mc_pdata *pdata = mci->pvt_info;
+ unsigned long val;
+ int rc;
+
+ if (isdigit(*data)) {
+ rc = kstrtoul(data, 0, &val);
+ if (rc)
+ return rc;
+
+ ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
+ return count;
+ }
+ return 0;
+}
+
+DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
+ fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
+DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
+ fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
+DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
+ fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
+
+static struct attribute *fsl_ddr_dev_attrs[] = {
+ &dev_attr_inject_data_hi.attr,
+ &dev_attr_inject_data_lo.attr,
+ &dev_attr_inject_ctrl.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(fsl_ddr_dev);
+
+/**************************** MC Err device ***************************/
+
+/*
+ * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
+ * MPC8572 User's Manual. Each line represents a syndrome bit column as a
+ * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
+ * below correspond to Freescale's manuals.
+ */
+static unsigned int ecc_table[16] = {
+ /* MSB LSB */
+ /* [0:31] [32:63] */
+ 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
+ 0x00ff00ff, 0x00fff0ff,
+ 0x0f0f0f0f, 0x0f0fff00,
+ 0x11113333, 0x7777000f,
+ 0x22224444, 0x8888222f,
+ 0x44448888, 0xffff4441,
+ 0x8888ffff, 0x11118882,
+ 0xffff1111, 0x22221114, /* Syndrome bit 0 */
+};
+
+/*
+ * Calculate the correct ECC value for a 64-bit value specified by high:low
+ */
+static u8 calculate_ecc(u32 high, u32 low)
+{
+ u32 mask_low;
+ u32 mask_high;
+ int bit_cnt;
+ u8 ecc = 0;
+ int i;
+ int j;
+
+ for (i = 0; i < 8; i++) {
+ mask_high = ecc_table[i * 2];
+ mask_low = ecc_table[i * 2 + 1];
+ bit_cnt = 0;
+
+ for (j = 0; j < 32; j++) {
+ if ((mask_high >> j) & 1)
+ bit_cnt ^= (high >> j) & 1;
+ if ((mask_low >> j) & 1)
+ bit_cnt ^= (low >> j) & 1;
+ }
+
+ ecc |= bit_cnt << i;
+ }
+
+ return ecc;
+}
+
+/*
+ * Create the syndrome code which is generated if the data line specified by
+ * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
+ * User's Manual and 9-61 in the MPC8572 User's Manual.
+ */
+static u8 syndrome_from_bit(unsigned int bit) {
+ int i;
+ u8 syndrome = 0;
+
+ /*
+ * Cycle through the upper or lower 32-bit portion of each value in
+ * ecc_table depending on if 'bit' is in the upper or lower half of
+ * 64-bit data.
+ */
+ for (i = bit < 32; i < 16; i += 2)
+ syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
+
+ return syndrome;
+}
+
+/*
+ * Decode data and ecc syndrome to determine what went wrong
+ * Note: This can only decode single-bit errors
+ */
+static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
+ int *bad_data_bit, int *bad_ecc_bit)
+{
+ int i;
+ u8 syndrome;
+
+ *bad_data_bit = -1;
+ *bad_ecc_bit = -1;
+
+ /*
+ * Calculate the ECC of the captured data and XOR it with the captured
+ * ECC to find an ECC syndrome value we can search for
+ */
+ syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
+
+ /* Check if a data line is stuck... */
+ for (i = 0; i < 64; i++) {
+ if (syndrome == syndrome_from_bit(i)) {
+ *bad_data_bit = i;
+ return;
+ }
+ }
+
+ /* If data is correct, check ECC bits for errors... */
+ for (i = 0; i < 8; i++) {
+ if ((syndrome >> i) & 0x1) {
+ *bad_ecc_bit = i;
+ return;
+ }
+ }
+}
+
+#define make64(high, low) (((u64)(high) << 32) | (low))
+
+static void fsl_mc_check(struct mem_ctl_info *mci)
+{
+ struct fsl_mc_pdata *pdata = mci->pvt_info;
+ struct csrow_info *csrow;
+ u32 bus_width;
+ u32 err_detect;
+ u32 syndrome;
+ u64 err_addr;
+ u32 pfn;
+ int row_index;
+ u32 cap_high;
+ u32 cap_low;
+ int bad_data_bit;
+ int bad_ecc_bit;
+
+ err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
+ if (!err_detect)
+ return;
+
+ fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
+ err_detect);
+
+ /* no more processing if not ECC bit errors */
+ if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
+ ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
+ return;
+ }
+
+ syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
+
+ /* Mask off appropriate bits of syndrome based on bus width */
+ bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
+ DSC_DBW_MASK) ? 32 : 64;
+ if (bus_width == 64)
+ syndrome &= 0xff;
+ else
+ syndrome &= 0xffff;
+
+ err_addr = make64(
+ ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
+ ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
+ pfn = err_addr >> PAGE_SHIFT;
+
+ for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
+ csrow = mci->csrows[row_index];
+ if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
+ break;
+ }
+
+ cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
+ cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
+
+ /*
+ * Analyze single-bit errors on 64-bit wide buses
+ * TODO: Add support for 32-bit wide buses
+ */
+ if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
+ sbe_ecc_decode(cap_high, cap_low, syndrome,
+ &bad_data_bit, &bad_ecc_bit);
+
+ if (bad_data_bit != -1)
+ fsl_mc_printk(mci, KERN_ERR,
+ "Faulty Data bit: %d\n", bad_data_bit);
+ if (bad_ecc_bit != -1)
+ fsl_mc_printk(mci, KERN_ERR,
+ "Faulty ECC bit: %d\n", bad_ecc_bit);
+
+ fsl_mc_printk(mci, KERN_ERR,
+ "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
+ cap_high ^ (1 << (bad_data_bit - 32)),
+ cap_low ^ (1 << bad_data_bit),
+ syndrome ^ (1 << bad_ecc_bit));
+ }
+
+ fsl_mc_printk(mci, KERN_ERR,
+ "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
+ cap_high, cap_low, syndrome);
+ fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
+ fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
+
+ /* we are out of range */
+ if (row_index == mci->nr_csrows)
+ fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
+
+ if (err_detect & DDR_EDE_SBE)
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+ pfn, err_addr & ~PAGE_MASK, syndrome,
+ row_index, 0, -1,
+ mci->ctl_name, "");
+
+ if (err_detect & DDR_EDE_MBE)
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ pfn, err_addr & ~PAGE_MASK, syndrome,
+ row_index, 0, -1,
+ mci->ctl_name, "");
+
+ ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
+}
+
+static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct fsl_mc_pdata *pdata = mci->pvt_info;
+ u32 err_detect;
+
+ err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
+ if (!err_detect)
+ return IRQ_NONE;
+
+ fsl_mc_check(mci);
+
+ return IRQ_HANDLED;
+}
+
+static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
+{
+ struct fsl_mc_pdata *pdata = mci->pvt_info;
+ struct csrow_info *csrow;
+ struct dimm_info *dimm;
+ u32 sdram_ctl;
+ u32 sdtype;
+ enum mem_type mtype;
+ u32 cs_bnds;
+ int index;
+
+ sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
+
+ sdtype = sdram_ctl & DSC_SDTYPE_MASK;
+ if (sdram_ctl & DSC_RD_EN) {
+ switch (sdtype) {
+ case 0x02000000:
+ mtype = MEM_RDDR;
+ break;
+ case 0x03000000:
+ mtype = MEM_RDDR2;
+ break;
+ case 0x07000000:
+ mtype = MEM_RDDR3;
+ break;
+ case 0x05000000:
+ mtype = MEM_RDDR4;
+ break;
+ default:
+ mtype = MEM_UNKNOWN;
+ break;
+ }
+ } else {
+ switch (sdtype) {
+ case 0x02000000:
+ mtype = MEM_DDR;
+ break;
+ case 0x03000000:
+ mtype = MEM_DDR2;
+ break;
+ case 0x07000000:
+ mtype = MEM_DDR3;
+ break;
+ case 0x05000000:
+ mtype = MEM_DDR4;
+ break;
+ default:
+ mtype = MEM_UNKNOWN;
+ break;
+ }
+ }
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ u32 start;
+ u32 end;
+
+ csrow = mci->csrows[index];
+ dimm = csrow->channels[0]->dimm;
+
+ cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
+ (index * FSL_MC_CS_BNDS_OFS));
+
+ start = (cs_bnds & 0xffff0000) >> 16;
+ end = (cs_bnds & 0x0000ffff);
+
+ if (start == end)
+ continue; /* not populated */
+
+ start <<= (24 - PAGE_SHIFT);
+ end <<= (24 - PAGE_SHIFT);
+ end |= (1 << (24 - PAGE_SHIFT)) - 1;
+
+ csrow->first_page = start;
+ csrow->last_page = end;
+
+ dimm->nr_pages = end + 1 - start;
+ dimm->grain = 8;
+ dimm->mtype = mtype;
+ dimm->dtype = DEV_UNKNOWN;
+ if (sdram_ctl & DSC_X32_EN)
+ dimm->dtype = DEV_X32;
+ dimm->edac_mode = EDAC_SECDED;
+ }
+}
+
+int fsl_mc_err_probe(struct platform_device *op)
+{
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
+ struct fsl_mc_pdata *pdata;
+ struct resource r;
+ u32 sdram_ctl;
+ int res;
+
+ if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 4;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(*pdata));
+ if (!mci) {
+ devres_release_group(&op->dev, fsl_mc_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = mci->pvt_info;
+ pdata->name = "fsl_mc_err";
+ mci->pdev = &op->dev;
+ pdata->edac_idx = edac_mc_idx++;
+ dev_set_drvdata(mci->pdev, mci);
+ mci->ctl_name = pdata->name;
+ mci->dev_name = pdata->name;
+
+ /*
+ * Get the endianness of DDR controller registers.
+ * Default is big endian.
+ */
+ little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
+
+ res = of_address_to_resource(op->dev.of_node, 0, &r);
+ if (res) {
+ pr_err("%s: Unable to get resource for MC err regs\n",
+ __func__);
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
+ pdata->name)) {
+ pr_err("%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
+ if (!pdata->mc_vbase) {
+ pr_err("%s: Unable to setup MC err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
+ if (!(sdram_ctl & DSC_ECC_EN)) {
+ /* no ECC */
+ pr_warn("%s: No ECC DIMMs discovered\n", __func__);
+ res = -ENODEV;
+ goto err;
+ }
+
+ edac_dbg(3, "init mci\n");
+ mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
+ MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
+ MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
+ MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = fsl_mc_check;
+
+ mci->ctl_page_to_phys = NULL;
+
+ mci->scrub_mode = SCRUB_SW_SRC;
+
+ fsl_ddr_init_csrows(mci);
+
+ /* store the original error disable bits */
+ orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
+ ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
+
+ /* clear all error bits */
+ ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
+
+ res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
+ if (res) {
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
+ DDR_EIE_MBEE | DDR_EIE_SBEE);
+
+ /* store the original error management threshold */
+ orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
+ FSL_MC_ERR_SBE) & 0xff0000;
+
+ /* set threshold to 1 error per interrupt */
+ ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
+
+ /* register interrupts */
+ pdata->irq = platform_get_irq(op, 0);
+ res = devm_request_irq(&op->dev, pdata->irq,
+ fsl_mc_isr,
+ IRQF_SHARED,
+ "[EDAC] MC err", mci);
+ if (res < 0) {
+ pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
+ __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&op->dev, fsl_mc_err_probe);
+ edac_dbg(3, "success\n");
+ pr_info(EDAC_MOD_STR " MC err registered\n");
+
+ return 0;
+
+err2:
+ edac_mc_del_mc(&op->dev);
+err:
+ devres_release_group(&op->dev, fsl_mc_err_probe);
+ edac_mc_free(mci);
+ return res;
+}
+
+int fsl_mc_err_remove(struct platform_device *op)
+{
+ struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
+ struct fsl_mc_pdata *pdata = mci->pvt_info;
+
+ edac_dbg(0, "\n");
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
+ }
+
+ ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
+ orig_ddr_err_disable);
+ ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
+
+ edac_mc_del_mc(&op->dev);
+ edac_mc_free(mci);
+ return 0;
+}
diff --git a/drivers/edac/fsl_ddr_edac.h b/drivers/edac/fsl_ddr_edac.h
new file mode 100644
index 000000000000..4ccee292eff1
--- /dev/null
+++ b/drivers/edac/fsl_ddr_edac.h
@@ -0,0 +1,79 @@
+/*
+ * Freescale Memory Controller kernel module
+ *
+ * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
+ * ARM-based Layerscape SoCs including LS2xxx. Originally split
+ * out from mpc85xx_edac EDAC driver.
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#ifndef _FSL_DDR_EDAC_H_
+#define _FSL_DDR_EDAC_H_
+
+#define fsl_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "FSL_DDR", fmt, ##arg)
+
+/*
+ * DRAM error defines
+ */
+
+/* DDR_SDRAM_CFG */
+#define FSL_MC_DDR_SDRAM_CFG 0x0110
+#define FSL_MC_CS_BNDS_0 0x0000
+#define FSL_MC_CS_BNDS_OFS 0x0008
+
+#define FSL_MC_DATA_ERR_INJECT_HI 0x0e00
+#define FSL_MC_DATA_ERR_INJECT_LO 0x0e04
+#define FSL_MC_ECC_ERR_INJECT 0x0e08
+#define FSL_MC_CAPTURE_DATA_HI 0x0e20
+#define FSL_MC_CAPTURE_DATA_LO 0x0e24
+#define FSL_MC_CAPTURE_ECC 0x0e28
+#define FSL_MC_ERR_DETECT 0x0e40
+#define FSL_MC_ERR_DISABLE 0x0e44
+#define FSL_MC_ERR_INT_EN 0x0e48
+#define FSL_MC_CAPTURE_ATRIBUTES 0x0e4c
+#define FSL_MC_CAPTURE_ADDRESS 0x0e50
+#define FSL_MC_CAPTURE_EXT_ADDRESS 0x0e54
+#define FSL_MC_ERR_SBE 0x0e58
+
+#define DSC_MEM_EN 0x80000000
+#define DSC_ECC_EN 0x20000000
+#define DSC_RD_EN 0x10000000
+#define DSC_DBW_MASK 0x00180000
+#define DSC_DBW_32 0x00080000
+#define DSC_DBW_64 0x00000000
+
+#define DSC_SDTYPE_MASK 0x07000000
+#define DSC_X32_EN 0x00000020
+
+/* Err_Int_En */
+#define DDR_EIE_MSEE 0x1 /* memory select */
+#define DDR_EIE_SBEE 0x4 /* single-bit ECC error */
+#define DDR_EIE_MBEE 0x8 /* multi-bit ECC error */
+
+/* Err_Detect */
+#define DDR_EDE_MSE 0x1 /* memory select */
+#define DDR_EDE_SBE 0x4 /* single-bit ECC error */
+#define DDR_EDE_MBE 0x8 /* multi-bit ECC error */
+#define DDR_EDE_MME 0x80000000 /* multiple memory errors */
+
+/* Err_Disable */
+#define DDR_EDI_MSED 0x1 /* memory select disable */
+#define DDR_EDI_SBED 0x4 /* single-bit ECC error disable */
+#define DDR_EDI_MBED 0x8 /* multi-bit ECC error disable */
+
+struct fsl_mc_pdata {
+ char *name;
+ int edac_idx;
+ void __iomem *mc_vbase;
+ int irq;
+};
+int fsl_mc_err_probe(struct platform_device *op);
+int fsl_mc_err_remove(struct platform_device *op);
+#endif
diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c
new file mode 100644
index 000000000000..6c59d897ad12
--- /dev/null
+++ b/drivers/edac/layerscape_edac.c
@@ -0,0 +1,73 @@
+/*
+ * Freescale Memory Controller kernel module
+ *
+ * Author: York Sun <york.sun@nxp.com>
+ *
+ * Copyright 2016 NXP Semiconductor
+ *
+ * Derived from mpc85xx_edac.c
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "edac_core.h"
+#include "fsl_ddr_edac.h"
+
+static const struct of_device_id fsl_ddr_mc_err_of_match[] = {
+ { .compatible = "fsl,qoriq-memory-controller", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fsl_ddr_mc_err_of_match);
+
+static struct platform_driver fsl_ddr_mc_err_driver = {
+ .probe = fsl_mc_err_probe,
+ .remove = fsl_mc_err_remove,
+ .driver = {
+ .name = "fsl_ddr_mc_err",
+ .of_match_table = fsl_ddr_mc_err_of_match,
+ },
+};
+
+static int __init fsl_ddr_mc_init(void)
+{
+ int res;
+
+ /* make sure error reporting method is sane */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_INT:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_INT;
+ break;
+ }
+
+ res = platform_driver_register(&fsl_ddr_mc_err_driver);
+ if (res) {
+ pr_err("MC fails to register\n");
+ return res;
+ }
+
+ return 0;
+}
+
+module_init(fsl_ddr_mc_init);
+
+static void __exit fsl_ddr_mc_exit(void)
+{
+ platform_driver_unregister(&fsl_ddr_mc_err_driver);
+}
+
+module_exit(fsl_ddr_mc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("NXP Semiconductor");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state,
+ "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index ca63d0da8889..ff0567526ee3 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -27,15 +27,12 @@
#include "edac_module.h"
#include "edac_core.h"
#include "mpc85xx_edac.h"
+#include "fsl_ddr_edac.h"
static int edac_dev_idx;
#ifdef CONFIG_PCI
static int edac_pci_idx;
#endif
-static int edac_mc_idx;
-
-static u32 orig_ddr_err_disable;
-static u32 orig_ddr_err_sbe;
/*
* PCI Err defines
@@ -46,103 +43,6 @@ static u32 orig_pci_err_en;
#endif
static u32 orig_l2_err_disable;
-#ifdef CONFIG_FSL_SOC_BOOKE
-static u32 orig_hid1[2];
-#endif
-
-/************************ MC SYSFS parts ***********************************/
-
-#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
-
-static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev,
- struct device_attribute *mattr,
- char *data)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- return sprintf(data, "0x%08x",
- in_be32(pdata->mc_vbase +
- MPC85XX_MC_DATA_ERR_INJECT_HI));
-}
-
-static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev,
- struct device_attribute *mattr,
- char *data)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- return sprintf(data, "0x%08x",
- in_be32(pdata->mc_vbase +
- MPC85XX_MC_DATA_ERR_INJECT_LO));
-}
-
-static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev,
- struct device_attribute *mattr,
- char *data)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- return sprintf(data, "0x%08x",
- in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
-}
-
-static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- if (isdigit(*data)) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
- simple_strtoul(data, NULL, 0));
- return count;
- }
- return 0;
-}
-
-static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- if (isdigit(*data)) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
- simple_strtoul(data, NULL, 0));
- return count;
- }
- return 0;
-}
-
-static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- if (isdigit(*data)) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
- simple_strtoul(data, NULL, 0));
- return count;
- }
- return 0;
-}
-
-DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
- mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store);
-DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
- mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store);
-DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
- mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store);
-
-static struct attribute *mpc85xx_dev_attrs[] = {
- &dev_attr_inject_data_hi.attr,
- &dev_attr_inject_data_lo.attr,
- &dev_attr_inject_ctrl.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(mpc85xx_dev);
/**************************** PCI Err device ***************************/
#ifdef CONFIG_PCI
@@ -160,18 +60,18 @@ static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
return;
}
- printk(KERN_ERR "PCI error(s) detected\n");
- printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
+ pr_err("PCI error(s) detected\n");
+ pr_err("PCI/X ERR_DR register: %#08x\n", err_detect);
- printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
+ pr_err("PCI/X ERR_ATTRIB register: %#08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
- printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
+ pr_err("PCI/X ERR_ADDR register: %#08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
- printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
+ pr_err("PCI/X ERR_EXT_ADDR register: %#08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
- printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
+ pr_err("PCI/X ERR_DL register: %#08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
- printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
+ pr_err("PCI/X ERR_DH register: %#08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
/* clear error bits */
@@ -187,14 +87,14 @@ static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
static void mpc85xx_pcie_check(struct edac_pci_ctl_info *pci)
{
struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
- u32 err_detect;
+ u32 err_detect, err_cap_stat;
err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
+ err_cap_stat = in_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR);
pr_err("PCIe error(s) detected\n");
pr_err("PCIe ERR_DR register: 0x%08x\n", err_detect);
- pr_err("PCIe ERR_CAP_STAT register: 0x%08x\n",
- in_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR));
+ pr_err("PCIe ERR_CAP_STAT register: 0x%08x\n", err_cap_stat);
pr_err("PCIe ERR_CAP_R0 register: 0x%08x\n",
in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R0));
pr_err("PCIe ERR_CAP_R1 register: 0x%08x\n",
@@ -206,6 +106,9 @@ static void mpc85xx_pcie_check(struct edac_pci_ctl_info *pci)
/* clear error bits */
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
+
+ /* reset error capture */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR, err_cap_stat | 0x1);
}
static int mpc85xx_pcie_find_capability(struct device_node *np)
@@ -267,7 +170,6 @@ static int mpc85xx_pci_err_probe(struct platform_device *op)
pdata = pci->pvt_info;
pdata->name = "mpc85xx_pci_err";
- pdata->irq = NO_IRQ;
plat_data = op->dev.platform_data;
if (!plat_data) {
@@ -297,8 +199,7 @@ static int mpc85xx_pci_err_probe(struct platform_device *op)
res = of_address_to_resource(of_node, 0, &r);
if (res) {
- printk(KERN_ERR "%s: Unable to get resource for "
- "PCI err regs\n", __func__);
+ pr_err("%s: Unable to get resource for PCI err regs\n", __func__);
goto err;
}
@@ -307,15 +208,14 @@ static int mpc85xx_pci_err_probe(struct platform_device *op)
if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
pdata->name)) {
- printk(KERN_ERR "%s: Error while requesting mem region\n",
- __func__);
+ pr_err("%s: Error while requesting mem region\n", __func__);
res = -EBUSY;
goto err;
}
pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
if (!pdata->pci_vbase) {
- printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
+ pr_err("%s: Unable to setup PCI err regs\n", __func__);
res = -ENOMEM;
goto err;
}
@@ -344,6 +244,9 @@ static int mpc85xx_pci_err_probe(struct platform_device *op)
/* clear error bits */
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
+ /* reset error capture */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR, 0x1);
+
if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
edac_dbg(3, "failed edac_pci_add_device()\n");
goto err;
@@ -356,15 +259,14 @@ static int mpc85xx_pci_err_probe(struct platform_device *op)
IRQF_SHARED,
"[EDAC] PCI err", pci);
if (res < 0) {
- printk(KERN_ERR
- "%s: Unable to request irq %d for "
- "MPC85xx PCI err\n", __func__, pdata->irq);
+ pr_err("%s: Unable to request irq %d for MPC85xx PCI err\n",
+ __func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;
goto err2;
}
- printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
+ pr_info(EDAC_MOD_STR " acquired irq %d for PCI Err\n",
pdata->irq);
}
@@ -386,7 +288,7 @@ static int mpc85xx_pci_err_probe(struct platform_device *op)
devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
edac_dbg(3, "success\n");
- printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
+ pr_info(EDAC_MOD_STR " PCI err registered\n");
return 0;
@@ -529,17 +431,17 @@ static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
if (!(err_detect & L2_EDE_MASK))
return;
- printk(KERN_ERR "ECC Error in CPU L2 cache\n");
- printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
- printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
+ pr_err("ECC Error in CPU L2 cache\n");
+ pr_err("L2 Error Detect Register: 0x%08x\n", err_detect);
+ pr_err("L2 Error Capture Data High Register: 0x%08x\n",
in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
- printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
+ pr_err("L2 Error Capture Data Lo Register: 0x%08x\n",
in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
- printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
+ pr_err("L2 Error Syndrome Register: 0x%08x\n",
in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
- printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
+ pr_err("L2 Error Attributes Capture Register: 0x%08x\n",
in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
- printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
+ pr_err("L2 Error Address Capture Register: 0x%08x\n",
in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
/* clear error detect register */
@@ -588,7 +490,6 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
pdata = edac_dev->pvt_info;
pdata->name = "mpc85xx_l2_err";
- pdata->irq = NO_IRQ;
edac_dev->dev = &op->dev;
dev_set_drvdata(edac_dev->dev, edac_dev);
edac_dev->ctl_name = pdata->name;
@@ -596,8 +497,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
res = of_address_to_resource(op->dev.of_node, 0, &r);
if (res) {
- printk(KERN_ERR "%s: Unable to get resource for "
- "L2 err regs\n", __func__);
+ pr_err("%s: Unable to get resource for L2 err regs\n", __func__);
goto err;
}
@@ -606,15 +506,14 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
pdata->name)) {
- printk(KERN_ERR "%s: Error while requesting mem region\n",
- __func__);
+ pr_err("%s: Error while requesting mem region\n", __func__);
res = -EBUSY;
goto err;
}
pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
if (!pdata->l2_vbase) {
- printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
+ pr_err("%s: Unable to setup L2 err regs\n", __func__);
res = -ENOMEM;
goto err;
}
@@ -646,16 +545,14 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
mpc85xx_l2_isr, IRQF_SHARED,
"[EDAC] L2 err", edac_dev);
if (res < 0) {
- printk(KERN_ERR
- "%s: Unable to request irq %d for "
- "MPC85xx L2 err\n", __func__, pdata->irq);
+ pr_err("%s: Unable to request irq %d for MPC85xx L2 err\n",
+ __func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;
goto err2;
}
- printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
- pdata->irq);
+ pr_info(EDAC_MOD_STR " acquired irq %d for L2 Err\n", pdata->irq);
edac_dev->op_state = OP_RUNNING_INTERRUPT;
@@ -665,7 +562,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
edac_dbg(3, "success\n");
- printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
+ pr_info(EDAC_MOD_STR " L2 err registered\n");
return 0;
@@ -729,466 +626,6 @@ static struct platform_driver mpc85xx_l2_err_driver = {
},
};
-/**************************** MC Err device ***************************/
-
-/*
- * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
- * MPC8572 User's Manual. Each line represents a syndrome bit column as a
- * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
- * below correspond to Freescale's manuals.
- */
-static unsigned int ecc_table[16] = {
- /* MSB LSB */
- /* [0:31] [32:63] */
- 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
- 0x00ff00ff, 0x00fff0ff,
- 0x0f0f0f0f, 0x0f0fff00,
- 0x11113333, 0x7777000f,
- 0x22224444, 0x8888222f,
- 0x44448888, 0xffff4441,
- 0x8888ffff, 0x11118882,
- 0xffff1111, 0x22221114, /* Syndrome bit 0 */
-};
-
-/*
- * Calculate the correct ECC value for a 64-bit value specified by high:low
- */
-static u8 calculate_ecc(u32 high, u32 low)
-{
- u32 mask_low;
- u32 mask_high;
- int bit_cnt;
- u8 ecc = 0;
- int i;
- int j;
-
- for (i = 0; i < 8; i++) {
- mask_high = ecc_table[i * 2];
- mask_low = ecc_table[i * 2 + 1];
- bit_cnt = 0;
-
- for (j = 0; j < 32; j++) {
- if ((mask_high >> j) & 1)
- bit_cnt ^= (high >> j) & 1;
- if ((mask_low >> j) & 1)
- bit_cnt ^= (low >> j) & 1;
- }
-
- ecc |= bit_cnt << i;
- }
-
- return ecc;
-}
-
-/*
- * Create the syndrome code which is generated if the data line specified by
- * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
- * User's Manual and 9-61 in the MPC8572 User's Manual.
- */
-static u8 syndrome_from_bit(unsigned int bit) {
- int i;
- u8 syndrome = 0;
-
- /*
- * Cycle through the upper or lower 32-bit portion of each value in
- * ecc_table depending on if 'bit' is in the upper or lower half of
- * 64-bit data.
- */
- for (i = bit < 32; i < 16; i += 2)
- syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
-
- return syndrome;
-}
-
-/*
- * Decode data and ecc syndrome to determine what went wrong
- * Note: This can only decode single-bit errors
- */
-static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
- int *bad_data_bit, int *bad_ecc_bit)
-{
- int i;
- u8 syndrome;
-
- *bad_data_bit = -1;
- *bad_ecc_bit = -1;
-
- /*
- * Calculate the ECC of the captured data and XOR it with the captured
- * ECC to find an ECC syndrome value we can search for
- */
- syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
-
- /* Check if a data line is stuck... */
- for (i = 0; i < 64; i++) {
- if (syndrome == syndrome_from_bit(i)) {
- *bad_data_bit = i;
- return;
- }
- }
-
- /* If data is correct, check ECC bits for errors... */
- for (i = 0; i < 8; i++) {
- if ((syndrome >> i) & 0x1) {
- *bad_ecc_bit = i;
- return;
- }
- }
-}
-
-#define make64(high, low) (((u64)(high) << 32) | (low))
-
-static void mpc85xx_mc_check(struct mem_ctl_info *mci)
-{
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- struct csrow_info *csrow;
- u32 bus_width;
- u32 err_detect;
- u32 syndrome;
- u64 err_addr;
- u32 pfn;
- int row_index;
- u32 cap_high;
- u32 cap_low;
- int bad_data_bit;
- int bad_ecc_bit;
-
- err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
- if (!err_detect)
- return;
-
- mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
- err_detect);
-
- /* no more processing if not ECC bit errors */
- if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
- return;
- }
-
- syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
-
- /* Mask off appropriate bits of syndrome based on bus width */
- bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) &
- DSC_DBW_MASK) ? 32 : 64;
- if (bus_width == 64)
- syndrome &= 0xff;
- else
- syndrome &= 0xffff;
-
- err_addr = make64(
- in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_EXT_ADDRESS),
- in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS));
- pfn = err_addr >> PAGE_SHIFT;
-
- for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
- csrow = mci->csrows[row_index];
- if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
- break;
- }
-
- cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI);
- cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO);
-
- /*
- * Analyze single-bit errors on 64-bit wide buses
- * TODO: Add support for 32-bit wide buses
- */
- if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
- sbe_ecc_decode(cap_high, cap_low, syndrome,
- &bad_data_bit, &bad_ecc_bit);
-
- if (bad_data_bit != -1)
- mpc85xx_mc_printk(mci, KERN_ERR,
- "Faulty Data bit: %d\n", bad_data_bit);
- if (bad_ecc_bit != -1)
- mpc85xx_mc_printk(mci, KERN_ERR,
- "Faulty ECC bit: %d\n", bad_ecc_bit);
-
- mpc85xx_mc_printk(mci, KERN_ERR,
- "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
- cap_high ^ (1 << (bad_data_bit - 32)),
- cap_low ^ (1 << bad_data_bit),
- syndrome ^ (1 << bad_ecc_bit));
- }
-
- mpc85xx_mc_printk(mci, KERN_ERR,
- "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
- cap_high, cap_low, syndrome);
- mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
- mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
-
- /* we are out of range */
- if (row_index == mci->nr_csrows)
- mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
-
- if (err_detect & DDR_EDE_SBE)
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- pfn, err_addr & ~PAGE_MASK, syndrome,
- row_index, 0, -1,
- mci->ctl_name, "");
-
- if (err_detect & DDR_EDE_MBE)
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- pfn, err_addr & ~PAGE_MASK, syndrome,
- row_index, 0, -1,
- mci->ctl_name, "");
-
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
-}
-
-static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
-{
- struct mem_ctl_info *mci = dev_id;
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- u32 err_detect;
-
- err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
- if (!err_detect)
- return IRQ_NONE;
-
- mpc85xx_mc_check(mci);
-
- return IRQ_HANDLED;
-}
-
-static void mpc85xx_init_csrows(struct mem_ctl_info *mci)
-{
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- struct csrow_info *csrow;
- struct dimm_info *dimm;
- u32 sdram_ctl;
- u32 sdtype;
- enum mem_type mtype;
- u32 cs_bnds;
- int index;
-
- sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
-
- sdtype = sdram_ctl & DSC_SDTYPE_MASK;
- if (sdram_ctl & DSC_RD_EN) {
- switch (sdtype) {
- case DSC_SDTYPE_DDR:
- mtype = MEM_RDDR;
- break;
- case DSC_SDTYPE_DDR2:
- mtype = MEM_RDDR2;
- break;
- case DSC_SDTYPE_DDR3:
- mtype = MEM_RDDR3;
- break;
- default:
- mtype = MEM_UNKNOWN;
- break;
- }
- } else {
- switch (sdtype) {
- case DSC_SDTYPE_DDR:
- mtype = MEM_DDR;
- break;
- case DSC_SDTYPE_DDR2:
- mtype = MEM_DDR2;
- break;
- case DSC_SDTYPE_DDR3:
- mtype = MEM_DDR3;
- break;
- default:
- mtype = MEM_UNKNOWN;
- break;
- }
- }
-
- for (index = 0; index < mci->nr_csrows; index++) {
- u32 start;
- u32 end;
-
- csrow = mci->csrows[index];
- dimm = csrow->channels[0]->dimm;
-
- cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
- (index * MPC85XX_MC_CS_BNDS_OFS));
-
- start = (cs_bnds & 0xffff0000) >> 16;
- end = (cs_bnds & 0x0000ffff);
-
- if (start == end)
- continue; /* not populated */
-
- start <<= (24 - PAGE_SHIFT);
- end <<= (24 - PAGE_SHIFT);
- end |= (1 << (24 - PAGE_SHIFT)) - 1;
-
- csrow->first_page = start;
- csrow->last_page = end;
-
- dimm->nr_pages = end + 1 - start;
- dimm->grain = 8;
- dimm->mtype = mtype;
- dimm->dtype = DEV_UNKNOWN;
- if (sdram_ctl & DSC_X32_EN)
- dimm->dtype = DEV_X32;
- dimm->edac_mode = EDAC_SECDED;
- }
-}
-
-static int mpc85xx_mc_err_probe(struct platform_device *op)
-{
- struct mem_ctl_info *mci;
- struct edac_mc_layer layers[2];
- struct mpc85xx_mc_pdata *pdata;
- struct resource r;
- u32 sdram_ctl;
- int res;
-
- if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
- return -ENOMEM;
-
- layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = 4;
- layers[0].is_virt_csrow = true;
- layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = 1;
- layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
- sizeof(*pdata));
- if (!mci) {
- devres_release_group(&op->dev, mpc85xx_mc_err_probe);
- return -ENOMEM;
- }
-
- pdata = mci->pvt_info;
- pdata->name = "mpc85xx_mc_err";
- pdata->irq = NO_IRQ;
- mci->pdev = &op->dev;
- pdata->edac_idx = edac_mc_idx++;
- dev_set_drvdata(mci->pdev, mci);
- mci->ctl_name = pdata->name;
- mci->dev_name = pdata->name;
-
- res = of_address_to_resource(op->dev.of_node, 0, &r);
- if (res) {
- printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
- __func__);
- goto err;
- }
-
- if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
- pdata->name)) {
- printk(KERN_ERR "%s: Error while requesting mem region\n",
- __func__);
- res = -EBUSY;
- goto err;
- }
-
- pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
- if (!pdata->mc_vbase) {
- printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
- res = -ENOMEM;
- goto err;
- }
-
- sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
- if (!(sdram_ctl & DSC_ECC_EN)) {
- /* no ECC */
- printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
- res = -ENODEV;
- goto err;
- }
-
- edac_dbg(3, "init mci\n");
- mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
- MEM_FLAG_DDR | MEM_FLAG_DDR2;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_SECDED;
- mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = MPC85XX_REVISION;
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- mci->edac_check = mpc85xx_mc_check;
-
- mci->ctl_page_to_phys = NULL;
-
- mci->scrub_mode = SCRUB_SW_SRC;
-
- mpc85xx_init_csrows(mci);
-
- /* store the original error disable bits */
- orig_ddr_err_disable =
- in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
-
- /* clear all error bits */
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
-
- if (edac_mc_add_mc_with_groups(mci, mpc85xx_dev_groups)) {
- edac_dbg(3, "failed edac_mc_add_mc()\n");
- goto err;
- }
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
- DDR_EIE_MBEE | DDR_EIE_SBEE);
-
- /* store the original error management threshold */
- orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
- MPC85XX_MC_ERR_SBE) & 0xff0000;
-
- /* set threshold to 1 error per interrupt */
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
-
- /* register interrupts */
- pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
- res = devm_request_irq(&op->dev, pdata->irq,
- mpc85xx_mc_isr,
- IRQF_SHARED,
- "[EDAC] MC err", mci);
- if (res < 0) {
- printk(KERN_ERR "%s: Unable to request irq %d for "
- "MPC85xx DRAM ERR\n", __func__, pdata->irq);
- irq_dispose_mapping(pdata->irq);
- res = -ENODEV;
- goto err2;
- }
-
- printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
- pdata->irq);
- }
-
- devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
- edac_dbg(3, "success\n");
- printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
-
- return 0;
-
-err2:
- edac_mc_del_mc(&op->dev);
-err:
- devres_release_group(&op->dev, mpc85xx_mc_err_probe);
- edac_mc_free(mci);
- return res;
-}
-
-static int mpc85xx_mc_err_remove(struct platform_device *op)
-{
- struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
- struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
-
- edac_dbg(0, "\n");
-
- if (edac_op_state == EDAC_OPSTATE_INT) {
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
- irq_dispose_mapping(pdata->irq);
- }
-
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
- orig_ddr_err_disable);
- out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
-
- edac_mc_del_mc(&op->dev);
- edac_mc_free(mci);
- return 0;
-}
-
static const struct of_device_id mpc85xx_mc_err_of_match[] = {
/* deprecate the fsl,85.. forms in the future, 2.6.30? */
{ .compatible = "fsl,8540-memory-controller", },
@@ -1217,22 +654,14 @@ static const struct of_device_id mpc85xx_mc_err_of_match[] = {
MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
static struct platform_driver mpc85xx_mc_err_driver = {
- .probe = mpc85xx_mc_err_probe,
- .remove = mpc85xx_mc_err_remove,
+ .probe = fsl_mc_err_probe,
+ .remove = fsl_mc_err_remove,
.driver = {
.name = "mpc85xx_mc_err",
.of_match_table = mpc85xx_mc_err_of_match,
},
};
-#ifdef CONFIG_FSL_SOC_BOOKE
-static void __init mpc85xx_mc_clear_rfxe(void *data)
-{
- orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
- mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE));
-}
-#endif
-
static struct platform_driver * const drivers[] = {
&mpc85xx_mc_err_driver,
&mpc85xx_l2_err_driver,
@@ -1246,8 +675,7 @@ static int __init mpc85xx_mc_init(void)
int res = 0;
u32 __maybe_unused pvr = 0;
- printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
- "(C) 2006 Montavista Software\n");
+ pr_info("Freescale(R) MPC85xx EDAC driver, (C) 2006 Montavista Software\n");
/* make sure error reporting method is sane */
switch (edac_op_state) {
@@ -1261,44 +689,15 @@ static int __init mpc85xx_mc_init(void)
res = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (res)
- printk(KERN_WARNING EDAC_MOD_STR "drivers fail to register\n");
-
-#ifdef CONFIG_FSL_SOC_BOOKE
- pvr = mfspr(SPRN_PVR);
-
- if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
- (PVR_VER(pvr) == PVR_VER_E500V2)) {
- /*
- * need to clear HID1[RFXE] to disable machine check int
- * so we can catch it
- */
- if (edac_op_state == EDAC_OPSTATE_INT)
- on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
- }
-#endif
+ pr_warn(EDAC_MOD_STR "drivers fail to register\n");
return 0;
}
module_init(mpc85xx_mc_init);
-#ifdef CONFIG_FSL_SOC_BOOKE
-static void __exit mpc85xx_mc_restore_hid1(void *data)
-{
- mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
-}
-#endif
-
static void __exit mpc85xx_mc_exit(void)
{
-#ifdef CONFIG_FSL_SOC_BOOKE
- u32 pvr = mfspr(SPRN_PVR);
-
- if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
- (PVR_VER(pvr) == PVR_VER_E500V2)) {
- on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
- }
-#endif
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 9352e88d53e5..3f6fb16ad34f 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -17,65 +17,6 @@
#define mpc85xx_printk(level, fmt, arg...) \
edac_printk(level, "MPC85xx", fmt, ##arg)
-#define mpc85xx_mc_printk(mci, level, fmt, arg...) \
- edac_mc_chipset_printk(mci, level, "MPC85xx", fmt, ##arg)
-
-/*
- * DRAM error defines
- */
-
-/* DDR_SDRAM_CFG */
-#define MPC85XX_MC_DDR_SDRAM_CFG 0x0110
-#define MPC85XX_MC_CS_BNDS_0 0x0000
-#define MPC85XX_MC_CS_BNDS_1 0x0008
-#define MPC85XX_MC_CS_BNDS_2 0x0010
-#define MPC85XX_MC_CS_BNDS_3 0x0018
-#define MPC85XX_MC_CS_BNDS_OFS 0x0008
-
-#define MPC85XX_MC_DATA_ERR_INJECT_HI 0x0e00
-#define MPC85XX_MC_DATA_ERR_INJECT_LO 0x0e04
-#define MPC85XX_MC_ECC_ERR_INJECT 0x0e08
-#define MPC85XX_MC_CAPTURE_DATA_HI 0x0e20
-#define MPC85XX_MC_CAPTURE_DATA_LO 0x0e24
-#define MPC85XX_MC_CAPTURE_ECC 0x0e28
-#define MPC85XX_MC_ERR_DETECT 0x0e40
-#define MPC85XX_MC_ERR_DISABLE 0x0e44
-#define MPC85XX_MC_ERR_INT_EN 0x0e48
-#define MPC85XX_MC_CAPTURE_ATRIBUTES 0x0e4c
-#define MPC85XX_MC_CAPTURE_ADDRESS 0x0e50
-#define MPC85XX_MC_CAPTURE_EXT_ADDRESS 0x0e54
-#define MPC85XX_MC_ERR_SBE 0x0e58
-
-#define DSC_MEM_EN 0x80000000
-#define DSC_ECC_EN 0x20000000
-#define DSC_RD_EN 0x10000000
-#define DSC_DBW_MASK 0x00180000
-#define DSC_DBW_32 0x00080000
-#define DSC_DBW_64 0x00000000
-
-#define DSC_SDTYPE_MASK 0x07000000
-
-#define DSC_SDTYPE_DDR 0x02000000
-#define DSC_SDTYPE_DDR2 0x03000000
-#define DSC_SDTYPE_DDR3 0x07000000
-#define DSC_X32_EN 0x00000020
-
-/* Err_Int_En */
-#define DDR_EIE_MSEE 0x1 /* memory select */
-#define DDR_EIE_SBEE 0x4 /* single-bit ECC error */
-#define DDR_EIE_MBEE 0x8 /* multi-bit ECC error */
-
-/* Err_Detect */
-#define DDR_EDE_MSE 0x1 /* memory select */
-#define DDR_EDE_SBE 0x4 /* single-bit ECC error */
-#define DDR_EDE_MBE 0x8 /* multi-bit ECC error */
-#define DDR_EDE_MME 0x80000000 /* multiple memory errors */
-
-/* Err_Disable */
-#define DDR_EDI_MSED 0x1 /* memory select disable */
-#define DDR_EDI_SBED 0x4 /* single-bit ECC error disable */
-#define DDR_EDI_MBED 0x8 /* multi-bit ECC error disable */
-
/*
* L2 Err defines
*/
@@ -149,13 +90,6 @@
#define MPC85XX_PCIE_ERR_CAP_R2 0x0030
#define MPC85XX_PCIE_ERR_CAP_R3 0x0034
-struct mpc85xx_mc_pdata {
- char *name;
- int edac_idx;
- void __iomem *mc_vbase;
- int irq;
-};
-
struct mpc85xx_l2_pdata {
char *name;
int edac_idx;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 6c54127e6eae..cb9b8577acbc 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -118,7 +118,6 @@ static int mv64x60_pci_err_probe(struct platform_device *pdev)
pdata->pci_hose = pdev->id;
pdata->name = "mpc85xx_pci_err";
- pdata->irq = NO_IRQ;
platform_set_drvdata(pdev, pci);
pci->dev = &pdev->dev;
pci->dev_name = dev_name(&pdev->dev);
@@ -291,7 +290,6 @@ static int mv64x60_sram_err_probe(struct platform_device *pdev)
pdata = edac_dev->pvt_info;
pdata->name = "mv64x60_sram_err";
- pdata->irq = NO_IRQ;
edac_dev->dev = &pdev->dev;
platform_set_drvdata(pdev, edac_dev);
edac_dev->dev_name = dev_name(&pdev->dev);
@@ -459,7 +457,6 @@ static int mv64x60_cpu_err_probe(struct platform_device *pdev)
pdata = edac_dev->pvt_info;
pdata->name = "mv64x60_cpu_err";
- pdata->irq = NO_IRQ;
edac_dev->dev = &pdev->dev;
platform_set_drvdata(pdev, edac_dev);
edac_dev->dev_name = dev_name(&pdev->dev);
@@ -727,7 +724,6 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
mci->pdev = &pdev->dev;
platform_set_drvdata(pdev, mci);
pdata->name = "mv64x60_mc_err";
- pdata->irq = NO_IRQ;
mci->dev_name = dev_name(&pdev->dev);
pdata->edac_idx = edac_mc_idx++;
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index d3a64ba61fa3..691ce25e9010 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -1029,8 +1029,6 @@ static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
pdata = mci->pvt_info;
pdata->dcr_host = *dcr_host;
- pdata->irqs.sec = NO_IRQ;
- pdata->irqs.ded = NO_IRQ;
/* Initialize controller capabilities and configuration */
@@ -1111,7 +1109,7 @@ static int ppc4xx_edac_register_irq(struct platform_device *op,
ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX);
sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX);
- if (ded_irq == NO_IRQ || sec_irq == NO_IRQ) {
+ if (!ded_irq || !sec_irq) {
ppc4xx_edac_mc_printk(KERN_ERR, mci,
"Unable to map interrupts.\n");
status = -ENODEV;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index ce0067b7a2f6..54775221a01f 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -2474,7 +2474,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
/* Check if everything were registered */
if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
- !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta)
+ !pvt->pci_ras || !pvt->pci_ta)
goto enodev;
if (saw_chan_mask != 0x0f)
@@ -2563,8 +2563,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
/* Check if everything were registered */
if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 ||
- !pvt->pci_br1 || !pvt->pci_tad || !pvt->pci_ras ||
- !pvt->pci_ta)
+ !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
goto enodev;
if (saw_chan_mask != 0x0f && /* -EN */
diff --git a/drivers/edac/wq.c b/drivers/edac/wq.c
index 1b8c07e44fd8..2a9a11ae2461 100644
--- a/drivers/edac/wq.c
+++ b/drivers/edac/wq.c
@@ -27,7 +27,7 @@ EXPORT_SYMBOL_GPL(edac_stop_work);
int edac_workqueue_setup(void)
{
- wq = create_singlethread_workqueue("edac-poller");
+ wq = alloc_ordered_workqueue("edac-poller", WQ_MEM_RECLAIM);
if (!wq)
return -ENODEV;
else
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 3d89e60a3e71..04788d92ea52 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -96,6 +96,12 @@ config EXTCON_PALMAS
Say Y here to enable support for USB peripheral and USB host
detection by palmas usb.
+config EXTCON_QCOM_SPMI_MISC
+ tristate "Qualcomm USB extcon support"
+ help
+ Say Y here to enable SPMI PMIC based USB cable detection
+ support on Qualcomm PMICs such as PM8941.
+
config EXTCON_RT8973A
tristate "Richtek RT8973A EXTCON support"
depends on I2C
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 972c813c375b..31a0a999c4fb 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o
obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
+obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o
obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 44e48aa78a84..bc538708c753 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -3,6 +3,9 @@
*
* Analog Jack extcon driver with ADC-based detection capability.
*
+ * Copyright (C) 2016 Samsung Electronics
+ * Chanwoo Choi <cw00.choi@samsung.com>
+ *
* Copyright (C) 2012 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*
@@ -58,7 +61,7 @@ static void adc_jack_handler(struct work_struct *work)
struct adc_jack_data *data = container_of(to_delayed_work(work),
struct adc_jack_data,
handler);
- u32 state = 0;
+ struct adc_jack_cond *def;
int ret, adc_val;
int i;
@@ -70,17 +73,18 @@ static void adc_jack_handler(struct work_struct *work)
/* Get state from adc value with adc_conditions */
for (i = 0; i < data->num_conditions; i++) {
- struct adc_jack_cond *def = &data->adc_conditions[i];
- if (!def->state)
- break;
+ def = &data->adc_conditions[i];
if (def->min_adc <= adc_val && def->max_adc >= adc_val) {
- state = def->state;
- break;
+ extcon_set_state_sync(data->edev, def->id, true);
+ return;
}
}
- /* if no def has met, it means state = 0 (no cables attached) */
- extcon_set_state(data->edev, state);
+ /* Set the detached state if adc value is not included in the range */
+ for (i = 0; i < data->num_conditions; i++) {
+ def = &data->adc_conditions[i];
+ extcon_set_state_sync(data->edev, def->id, false);
+ }
}
static irqreturn_t adc_jack_irq_thread(int irq, void *_data)
@@ -114,16 +118,14 @@ static int adc_jack_probe(struct platform_device *pdev)
return -ENOMEM;
}
- if (!pdata->adc_conditions ||
- !pdata->adc_conditions[0].state) {
+ if (!pdata->adc_conditions) {
dev_err(&pdev->dev, "error: adc_conditions not defined.\n");
return -EINVAL;
}
data->adc_conditions = pdata->adc_conditions;
/* Check the length of array and set num_conditions */
- for (i = 0; data->adc_conditions[i].state; i++)
- ;
+ for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++);
data->num_conditions = i;
data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel);
@@ -158,6 +160,7 @@ static int adc_jack_probe(struct platform_device *pdev)
if (data->wakeup_source)
device_init_wakeup(&pdev->dev, 1);
+ adc_jack_handler(&data->handler.work);
return 0;
}
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 1d8e0a57bd51..56e6c4c7c60d 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -183,7 +183,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
if (clamp)
val = ARIZONA_RMV_SHRT_HP1L;
break;
- };
+ }
snd_soc_dapm_mutex_lock(arizona->dapm);
@@ -614,7 +614,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
}
/* If the cable was removed while measuring ignore the result */
- ret = extcon_get_cable_state_(info->edev, EXTCON_MECHANICAL);
+ ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
if (ret < 0) {
dev_err(arizona->dev, "Failed to check cable state: %d\n",
ret);
@@ -649,7 +649,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
else
report = EXTCON_JACK_HEADPHONE;
- ret = extcon_set_cable_state_(info->edev, report, true);
+ ret = extcon_set_state_sync(info->edev, report, true);
if (ret != 0)
dev_err(arizona->dev, "Failed to report HP/line: %d\n",
ret);
@@ -732,7 +732,7 @@ err:
ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
/* Just report headphone */
- ret = extcon_set_cable_state_(info->edev, EXTCON_JACK_HEADPHONE, true);
+ ret = extcon_set_state_sync(info->edev, EXTCON_JACK_HEADPHONE, true);
if (ret != 0)
dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
@@ -789,7 +789,7 @@ err:
ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
/* Just report headphone */
- ret = extcon_set_cable_state_(info->edev, EXTCON_JACK_HEADPHONE, true);
+ ret = extcon_set_state_sync(info->edev, EXTCON_JACK_HEADPHONE, true);
if (ret != 0)
dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
@@ -829,7 +829,7 @@ static void arizona_micd_detect(struct work_struct *work)
mutex_lock(&info->lock);
/* If the cable was removed while measuring ignore the result */
- ret = extcon_get_cable_state_(info->edev, EXTCON_MECHANICAL);
+ ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
if (ret < 0) {
dev_err(arizona->dev, "Failed to check cable state: %d\n",
ret);
@@ -914,7 +914,7 @@ static void arizona_micd_detect(struct work_struct *work)
arizona_identify_headphone(info);
- ret = extcon_set_cable_state_(info->edev,
+ ret = extcon_set_state_sync(info->edev,
EXTCON_JACK_MICROPHONE, true);
if (ret != 0)
dev_err(arizona->dev, "Headset report failed: %d\n",
@@ -1108,7 +1108,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
if (info->last_jackdet == present) {
dev_dbg(arizona->dev, "Detected jack\n");
- ret = extcon_set_cable_state_(info->edev,
+ ret = extcon_set_state_sync(info->edev,
EXTCON_MECHANICAL, true);
if (ret != 0)
@@ -1149,10 +1149,13 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
info->micd_ranges[i].key, 0);
input_sync(info->input);
- ret = extcon_update_state(info->edev, 0xffffffff, 0);
- if (ret != 0)
- dev_err(arizona->dev, "Removal report failed: %d\n",
- ret);
+ for (i = 0; i < ARRAY_SIZE(arizona_cable) - 1; i++) {
+ ret = extcon_set_state_sync(info->edev,
+ arizona_cable[i], false);
+ if (ret != 0)
+ dev_err(arizona->dev,
+ "Removal report failed: %d\n", ret);
+ }
regmap_update_bits(arizona->regmap,
ARIZONA_JACK_DETECT_DEBOUNCE,
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index fd55c2f2080a..42f41e808292 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -189,19 +189,19 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
switch (chrg_type) {
case DET_STAT_SDP:
- dev_dbg(info->dev, "sdp cable is connecetd\n");
+ dev_dbg(info->dev, "sdp cable is connected\n");
notify_otg = true;
notify_charger = true;
cable = EXTCON_CHG_USB_SDP;
break;
case DET_STAT_CDP:
- dev_dbg(info->dev, "cdp cable is connecetd\n");
+ dev_dbg(info->dev, "cdp cable is connected\n");
notify_otg = true;
notify_charger = true;
cable = EXTCON_CHG_USB_CDP;
break;
case DET_STAT_DCP:
- dev_dbg(info->dev, "dcp cable is connecetd\n");
+ dev_dbg(info->dev, "dcp cable is connected\n");
notify_charger = true;
cable = EXTCON_CHG_USB_DCP;
break;
@@ -226,7 +226,7 @@ notify_otg:
}
if (notify_charger)
- extcon_set_cable_state_(info->edev, cable, vbus_attach);
+ extcon_set_state_sync(info->edev, cable, vbus_attach);
/* Clear the flags on disconnect event */
if (!vbus_attach)
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index d023789f0fda..ebed22f22d75 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -49,7 +49,8 @@ static void gpio_extcon_work(struct work_struct *work)
state = gpiod_get_value_cansleep(data->id_gpiod);
if (data->pdata->gpio_active_low)
state = !state;
- extcon_set_state(data->edev, state);
+
+ extcon_set_state_sync(data->edev, data->pdata->extcon_id, state);
}
static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index 852a7112f451..12e26c4e7763 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2013,2014 Samsung Electronics
* Chanwoo Choi <cw00.choi@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -357,7 +357,7 @@ static int max14577_muic_jig_handler(struct max14577_muic_info *info,
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
+ extcon_set_state_sync(info->edev, EXTCON_JIG, attached);
return 0;
}
@@ -454,24 +454,24 @@ static int max14577_muic_chg_handler(struct max14577_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ extcon_set_state_sync(info->edev, EXTCON_USB, attached);
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
break;
case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
break;
case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_CDP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_CDP,
attached);
break;
case MAX14577_CHARGER_TYPE_SPECIAL_500MA:
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SLOW,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SLOW,
attached);
break;
case MAX14577_CHARGER_TYPE_SPECIAL_1A:
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_FAST,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_FAST,
attached);
break;
case MAX14577_CHARGER_TYPE_NONE:
@@ -791,6 +791,6 @@ static struct platform_driver max14577_muic_driver = {
module_platform_driver(max14577_muic_driver);
MODULE_DESCRIPTION("Maxim 14577/77836 Extcon driver");
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>, Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>, Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:extcon-max14577");
diff --git a/drivers/extcon/extcon-max3355.c b/drivers/extcon/extcon-max3355.c
index c24abec5d06c..533e16a952b8 100644
--- a/drivers/extcon/extcon-max3355.c
+++ b/drivers/extcon/extcon-max3355.c
@@ -39,16 +39,16 @@ static irqreturn_t max3355_id_irq(int irq, void *dev_id)
* As we don't have event for USB peripheral cable attached,
* we simulate USB peripheral attach here.
*/
- extcon_set_cable_state_(data->edev, EXTCON_USB_HOST, false);
- extcon_set_cable_state_(data->edev, EXTCON_USB, true);
+ extcon_set_state_sync(data->edev, EXTCON_USB_HOST, false);
+ extcon_set_state_sync(data->edev, EXTCON_USB, true);
} else {
/*
* ID = 0 means USB HOST cable attached.
* As we don't have event for USB peripheral cable detached,
* we simulate USB peripheral detach here.
*/
- extcon_set_cable_state_(data->edev, EXTCON_USB, false);
- extcon_set_cable_state_(data->edev, EXTCON_USB_HOST, true);
+ extcon_set_state_sync(data->edev, EXTCON_USB, false);
+ extcon_set_state_sync(data->edev, EXTCON_USB_HOST, true);
}
return IRQ_HANDLED;
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index f17cb76b567c..68dbcb814b2f 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -505,8 +505,8 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_DOCK, attached);
- extcon_set_cable_state_(info->edev, EXTCON_DISP_MHL, attached);
+ extcon_set_state_sync(info->edev, EXTCON_DOCK, attached);
+ extcon_set_state_sync(info->edev, EXTCON_DISP_MHL, attached);
goto out;
case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
dock_id = EXTCON_DOCK;
@@ -514,8 +514,8 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
dock_id = EXTCON_DOCK;
if (!attached) {
- extcon_set_cable_state_(info->edev, EXTCON_USB, false);
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ extcon_set_state_sync(info->edev, EXTCON_USB, false);
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
false);
}
break;
@@ -530,7 +530,7 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
attached);
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, dock_id, attached);
+ extcon_set_state_sync(info->edev, dock_id, attached);
out:
return 0;
@@ -596,7 +596,7 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
attached);
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, attached);
break;
case MAX77693_MUIC_GND_AV_CABLE_LOAD:
/* Audio Video Cable with load, PATH:AUDIO */
@@ -604,14 +604,14 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
attached);
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ extcon_set_state_sync(info->edev, EXTCON_USB, attached);
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
break;
case MAX77693_MUIC_GND_MHL:
case MAX77693_MUIC_GND_MHL_VB:
/* MHL or MHL with USB/TA cable */
- extcon_set_cable_state_(info->edev, EXTCON_DISP_MHL, attached);
+ extcon_set_state_sync(info->edev, EXTCON_DISP_MHL, attached);
break;
default:
dev_err(info->dev, "failed to detect %s cable of gnd type\n",
@@ -653,7 +653,7 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info,
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
+ extcon_set_state_sync(info->edev, EXTCON_JIG, attached);
return 0;
}
@@ -807,10 +807,10 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
* - Support charging through micro-usb port without
* data connection
*/
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
if (!cable_attached)
- extcon_set_cable_state_(info->edev,
+ extcon_set_state_sync(info->edev,
EXTCON_DISP_MHL, cable_attached);
break;
}
@@ -834,13 +834,13 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
* - Support charging through micro-usb port without
* data connection.
*/
- extcon_set_cable_state_(info->edev, EXTCON_USB,
+ extcon_set_state_sync(info->edev, EXTCON_USB,
attached);
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
if (!cable_attached)
- extcon_set_cable_state_(info->edev, EXTCON_DOCK,
+ extcon_set_state_sync(info->edev, EXTCON_DOCK,
cable_attached);
break;
case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
@@ -869,9 +869,9 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_DOCK,
+ extcon_set_state_sync(info->edev, EXTCON_DOCK,
attached);
- extcon_set_cable_state_(info->edev, EXTCON_DISP_MHL,
+ extcon_set_state_sync(info->edev, EXTCON_DISP_MHL,
attached);
break;
}
@@ -905,28 +905,28 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_USB,
+ extcon_set_state_sync(info->edev, EXTCON_USB,
attached);
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
break;
case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
/* Only TA cable */
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
break;
}
break;
case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_CDP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_CDP,
attached);
break;
case MAX77693_CHARGER_TYPE_APPLE_500MA:
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SLOW,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SLOW,
attached);
break;
case MAX77693_CHARGER_TYPE_APPLE_1A_2A:
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_FAST,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_FAST,
attached);
break;
case MAX77693_CHARGER_TYPE_DEAD_BATTERY:
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index b188bd650efa..5d11fdf36e94 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -346,7 +346,7 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, attached);
break;
case MAX77843_MUIC_GND_MHL_VB:
case MAX77843_MUIC_GND_MHL:
@@ -356,7 +356,7 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_DISP_MHL, attached);
+ extcon_set_state_sync(info->edev, EXTCON_DISP_MHL, attached);
break;
default:
dev_err(info->dev, "failed to detect %s accessory(gnd:0x%x)\n",
@@ -392,7 +392,7 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info,
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
+ extcon_set_state_sync(info->edev, EXTCON_JIG, attached);
return 0;
}
@@ -486,8 +486,8 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ extcon_set_state_sync(info->edev, EXTCON_USB, attached);
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
break;
case MAX77843_MUIC_CHG_DOWNSTREAM:
@@ -497,7 +497,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_CDP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_CDP,
attached);
break;
case MAX77843_MUIC_CHG_DEDICATED:
@@ -507,7 +507,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
break;
case MAX77843_MUIC_CHG_SPECIAL_500MA:
@@ -517,7 +517,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SLOW,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SLOW,
attached);
break;
case MAX77843_MUIC_CHG_SPECIAL_1A:
@@ -527,7 +527,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
if (ret < 0)
return ret;
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_FAST,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_FAST,
attached);
break;
case MAX77843_MUIC_CHG_GND:
@@ -536,10 +536,10 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
/* Charger cable on MHL accessory is attach or detach */
if (gnd_type == MAX77843_MUIC_GND_MHL_VB)
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
true);
else if (gnd_type == MAX77843_MUIC_GND_MHL)
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
false);
break;
case MAX77843_MUIC_CHG_NONE:
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 9a89320d09a8..4a0612fb9c07 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -331,11 +331,11 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
switch (usb_type) {
case MAX8997_USB_HOST:
- extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, attached);
break;
case MAX8997_USB_DEVICE:
- extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ extcon_set_state_sync(info->edev, EXTCON_USB, attached);
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
break;
default:
@@ -361,7 +361,7 @@ static int max8997_muic_handle_dock(struct max8997_muic_info *info,
switch (cable_type) {
case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
- extcon_set_cable_state_(info->edev, EXTCON_DOCK, attached);
+ extcon_set_state_sync(info->edev, EXTCON_DOCK, attached);
break;
default:
dev_err(info->dev, "failed to detect %s dock device\n",
@@ -384,7 +384,7 @@ static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
return ret;
}
- extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
+ extcon_set_state_sync(info->edev, EXTCON_JIG, attached);
return 0;
}
@@ -406,7 +406,7 @@ static int max8997_muic_adc_handler(struct max8997_muic_info *info)
return ret;
break;
case MAX8997_MUIC_ADC_MHL:
- extcon_set_cable_state_(info->edev, EXTCON_DISP_MHL, attached);
+ extcon_set_state_sync(info->edev, EXTCON_DISP_MHL, attached);
break;
case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
@@ -489,19 +489,19 @@ static int max8997_muic_chg_handler(struct max8997_muic_info *info)
}
break;
case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_CDP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_CDP,
attached);
break;
case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
break;
case MAX8997_CHARGER_TYPE_500MA:
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SLOW,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SLOW,
attached);
break;
case MAX8997_CHARGER_TYPE_1A:
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_FAST,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_FAST,
attached);
break;
default:
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index caff46c0e214..634ba70782de 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -61,7 +61,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
if (vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS) {
if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_VBUS;
- extcon_set_cable_state_(edev, EXTCON_USB, true);
+ extcon_set_state_sync(edev, EXTCON_USB, true);
dev_info(palmas_usb->dev, "USB cable is attached\n");
} else {
dev_dbg(palmas_usb->dev,
@@ -70,7 +70,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
} else if (!(vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS)) {
if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
- extcon_set_cable_state_(edev, EXTCON_USB, false);
+ extcon_set_state_sync(edev, EXTCON_USB, false);
dev_info(palmas_usb->dev, "USB cable is detached\n");
} else {
dev_dbg(palmas_usb->dev,
@@ -98,7 +98,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
- extcon_set_cable_state_(edev, EXTCON_USB_HOST, true);
+ extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
} else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
(id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
@@ -106,17 +106,17 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
- extcon_set_cable_state_(edev, EXTCON_USB_HOST, false);
+ extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) &&
(!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) {
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
- extcon_set_cable_state_(edev, EXTCON_USB_HOST, false);
+ extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
(id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
- extcon_set_cable_state_(edev, EXTCON_USB_HOST, true);
+ extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
dev_info(palmas_usb->dev, " USB-HOST cable is attached\n");
}
@@ -137,10 +137,10 @@ static void palmas_gpio_id_detect(struct work_struct *work)
id = gpiod_get_value_cansleep(palmas_usb->id_gpiod);
if (id) {
- extcon_set_cable_state_(edev, EXTCON_USB_HOST, false);
+ extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
} else {
- extcon_set_cable_state_(edev, EXTCON_USB_HOST, true);
+ extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
}
}
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
new file mode 100644
index 000000000000..ca957a5f4291
--- /dev/null
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -0,0 +1,170 @@
+/**
+ * extcon-qcom-spmi-misc.c - Qualcomm USB extcon driver to support USB ID
+ * detection based on extcon-usb-gpio.c.
+ *
+ * Copyright (C) 2016 Linaro, Ltd.
+ * Stephen Boyd <stephen.boyd@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/extcon.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#define USB_ID_DEBOUNCE_MS 5 /* ms */
+
+struct qcom_usb_extcon_info {
+ struct extcon_dev *edev;
+ int irq;
+ struct delayed_work wq_detcable;
+ unsigned long debounce_jiffies;
+};
+
+static const unsigned int qcom_usb_extcon_cable[] = {
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
+static void qcom_usb_extcon_detect_cable(struct work_struct *work)
+{
+ bool id;
+ int ret;
+ struct qcom_usb_extcon_info *info = container_of(to_delayed_work(work),
+ struct qcom_usb_extcon_info,
+ wq_detcable);
+
+ /* check ID and update cable state */
+ ret = irq_get_irqchip_state(info->irq, IRQCHIP_STATE_LINE_LEVEL, &id);
+ if (ret)
+ return;
+
+ extcon_set_state(info->edev, EXTCON_USB_HOST, !id);
+}
+
+static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id)
+{
+ struct qcom_usb_extcon_info *info = dev_id;
+
+ queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
+ info->debounce_jiffies);
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_usb_extcon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_usb_extcon_info *info;
+ int ret;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->edev = devm_extcon_dev_allocate(dev, qcom_usb_extcon_cable);
+ if (IS_ERR(info->edev)) {
+ dev_err(dev, "failed to allocate extcon device\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_extcon_dev_register(dev, info->edev);
+ if (ret < 0) {
+ dev_err(dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ info->debounce_jiffies = msecs_to_jiffies(USB_ID_DEBOUNCE_MS);
+ INIT_DELAYED_WORK(&info->wq_detcable, qcom_usb_extcon_detect_cable);
+
+ info->irq = platform_get_irq_byname(pdev, "usb_id");
+ if (info->irq < 0)
+ return info->irq;
+
+ ret = devm_request_threaded_irq(dev, info->irq, NULL,
+ qcom_usb_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for ID IRQ\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, info);
+ device_init_wakeup(dev, 1);
+
+ /* Perform initial detection */
+ qcom_usb_extcon_detect_cable(&info->wq_detcable.work);
+
+ return 0;
+}
+
+static int qcom_usb_extcon_remove(struct platform_device *pdev)
+{
+ struct qcom_usb_extcon_info *info = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&info->wq_detcable);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int qcom_usb_extcon_suspend(struct device *dev)
+{
+ struct qcom_usb_extcon_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (device_may_wakeup(dev))
+ ret = enable_irq_wake(info->irq);
+
+ return ret;
+}
+
+static int qcom_usb_extcon_resume(struct device *dev)
+{
+ struct qcom_usb_extcon_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (device_may_wakeup(dev))
+ ret = disable_irq_wake(info->irq);
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(qcom_usb_extcon_pm_ops,
+ qcom_usb_extcon_suspend, qcom_usb_extcon_resume);
+
+static const struct of_device_id qcom_usb_extcon_dt_match[] = {
+ { .compatible = "qcom,pm8941-misc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_usb_extcon_dt_match);
+
+static struct platform_driver qcom_usb_extcon_driver = {
+ .probe = qcom_usb_extcon_probe,
+ .remove = qcom_usb_extcon_remove,
+ .driver = {
+ .name = "extcon-pm8941-misc",
+ .pm = &qcom_usb_extcon_pm_ops,
+ .of_match_table = qcom_usb_extcon_dt_match,
+ },
+};
+module_platform_driver(qcom_usb_extcon_driver);
+
+MODULE_DESCRIPTION("QCOM USB ID extcon driver");
+MODULE_AUTHOR("Stephen Boyd <stephen.boyd@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index 97e074d70eca..174c388739ea 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -398,9 +398,9 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
return ret;
/* Change the state of external accessory */
- extcon_set_cable_state_(info->edev, id, attached);
+ extcon_set_state_sync(info->edev, id, attached);
if (id == EXTCON_USB)
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
return 0;
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index df769a17e736..b22325688503 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -411,9 +411,9 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
return ret;
/* Change the state of external accessory */
- extcon_set_cable_state_(info->edev, id, attached);
+ extcon_set_state_sync(info->edev, id, attached);
if (id == EXTCON_USB)
- extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_SDP,
attached);
return 0;
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 2512660dc4b9..a27d350f69e3 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -63,16 +63,16 @@ static void usb_extcon_detect_cable(struct work_struct *work)
* As we don't have event for USB peripheral cable attached,
* we simulate USB peripheral attach here.
*/
- extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, false);
- extcon_set_cable_state_(info->edev, EXTCON_USB, true);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
+ extcon_set_state_sync(info->edev, EXTCON_USB, true);
} else {
/*
* ID = 0 means USB HOST cable attached.
* As we don't have event for USB peripheral cable detached,
* we simulate USB peripheral detach here.
*/
- extcon_set_cable_state_(info->edev, EXTCON_USB, false);
- extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, true);
+ extcon_set_state_sync(info->edev, EXTCON_USB, false);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
}
}
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 8682efc0f57b..78298460d168 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -38,43 +38,159 @@
#define SUPPORTED_CABLE_MAX 32
#define CABLE_NAME_MAX 30
-static const char *extcon_name[] = {
- [EXTCON_NONE] = "NONE",
+struct __extcon_info {
+ unsigned int type;
+ unsigned int id;
+ const char *name;
+
+} extcon_info[] = {
+ [EXTCON_NONE] = {
+ .type = EXTCON_TYPE_MISC,
+ .id = EXTCON_NONE,
+ .name = "NONE",
+ },
/* USB external connector */
- [EXTCON_USB] = "USB",
- [EXTCON_USB_HOST] = "USB-HOST",
+ [EXTCON_USB] = {
+ .type = EXTCON_TYPE_USB,
+ .id = EXTCON_USB,
+ .name = "USB",
+ },
+ [EXTCON_USB_HOST] = {
+ .type = EXTCON_TYPE_USB,
+ .id = EXTCON_USB_HOST,
+ .name = "USB_HOST",
+ },
/* Charging external connector */
- [EXTCON_CHG_USB_SDP] = "SDP",
- [EXTCON_CHG_USB_DCP] = "DCP",
- [EXTCON_CHG_USB_CDP] = "CDP",
- [EXTCON_CHG_USB_ACA] = "ACA",
- [EXTCON_CHG_USB_FAST] = "FAST-CHARGER",
- [EXTCON_CHG_USB_SLOW] = "SLOW-CHARGER",
+ [EXTCON_CHG_USB_SDP] = {
+ .type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
+ .id = EXTCON_CHG_USB_SDP,
+ .name = "SDP",
+ },
+ [EXTCON_CHG_USB_DCP] = {
+ .type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
+ .id = EXTCON_CHG_USB_DCP,
+ .name = "DCP",
+ },
+ [EXTCON_CHG_USB_CDP] = {
+ .type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
+ .id = EXTCON_CHG_USB_CDP,
+ .name = "CDP",
+ },
+ [EXTCON_CHG_USB_ACA] = {
+ .type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
+ .id = EXTCON_CHG_USB_ACA,
+ .name = "ACA",
+ },
+ [EXTCON_CHG_USB_FAST] = {
+ .type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
+ .id = EXTCON_CHG_USB_FAST,
+ .name = "FAST-CHARGER",
+ },
+ [EXTCON_CHG_USB_SLOW] = {
+ .type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
+ .id = EXTCON_CHG_USB_SLOW,
+ .name = "SLOW-CHARGER",
+ },
+ [EXTCON_CHG_WPT] = {
+ .type = EXTCON_TYPE_CHG,
+ .id = EXTCON_CHG_WPT,
+ .name = "WPT",
+ },
/* Jack external connector */
- [EXTCON_JACK_MICROPHONE] = "MICROPHONE",
- [EXTCON_JACK_HEADPHONE] = "HEADPHONE",
- [EXTCON_JACK_LINE_IN] = "LINE-IN",
- [EXTCON_JACK_LINE_OUT] = "LINE-OUT",
- [EXTCON_JACK_VIDEO_IN] = "VIDEO-IN",
- [EXTCON_JACK_VIDEO_OUT] = "VIDEO-OUT",
- [EXTCON_JACK_SPDIF_IN] = "SPDIF-IN",
- [EXTCON_JACK_SPDIF_OUT] = "SPDIF-OUT",
+ [EXTCON_JACK_MICROPHONE] = {
+ .type = EXTCON_TYPE_JACK,
+ .id = EXTCON_JACK_MICROPHONE,
+ .name = "MICROPHONE",
+ },
+ [EXTCON_JACK_HEADPHONE] = {
+ .type = EXTCON_TYPE_JACK,
+ .id = EXTCON_JACK_HEADPHONE,
+ .name = "HEADPHONE",
+ },
+ [EXTCON_JACK_LINE_IN] = {
+ .type = EXTCON_TYPE_JACK,
+ .id = EXTCON_JACK_LINE_IN,
+ .name = "LINE-IN",
+ },
+ [EXTCON_JACK_LINE_OUT] = {
+ .type = EXTCON_TYPE_JACK,
+ .id = EXTCON_JACK_LINE_OUT,
+ .name = "LINE-OUT",
+ },
+ [EXTCON_JACK_VIDEO_IN] = {
+ .type = EXTCON_TYPE_JACK,
+ .id = EXTCON_JACK_VIDEO_IN,
+ .name = "VIDEO-IN",
+ },
+ [EXTCON_JACK_VIDEO_OUT] = {
+ .type = EXTCON_TYPE_JACK,
+ .id = EXTCON_JACK_VIDEO_OUT,
+ .name = "VIDEO-OUT",
+ },
+ [EXTCON_JACK_SPDIF_IN] = {
+ .type = EXTCON_TYPE_JACK,
+ .id = EXTCON_JACK_SPDIF_IN,
+ .name = "SPDIF-IN",
+ },
+ [EXTCON_JACK_SPDIF_OUT] = {
+ .type = EXTCON_TYPE_JACK,
+ .id = EXTCON_JACK_SPDIF_OUT,
+ .name = "SPDIF-OUT",
+ },
/* Display external connector */
- [EXTCON_DISP_HDMI] = "HDMI",
- [EXTCON_DISP_MHL] = "MHL",
- [EXTCON_DISP_DVI] = "DVI",
- [EXTCON_DISP_VGA] = "VGA",
+ [EXTCON_DISP_HDMI] = {
+ .type = EXTCON_TYPE_DISP,
+ .id = EXTCON_DISP_HDMI,
+ .name = "HDMI",
+ },
+ [EXTCON_DISP_MHL] = {
+ .type = EXTCON_TYPE_DISP,
+ .id = EXTCON_DISP_MHL,
+ .name = "MHL",
+ },
+ [EXTCON_DISP_DVI] = {
+ .type = EXTCON_TYPE_DISP,
+ .id = EXTCON_DISP_DVI,
+ .name = "DVI",
+ },
+ [EXTCON_DISP_VGA] = {
+ .type = EXTCON_TYPE_DISP,
+ .id = EXTCON_DISP_VGA,
+ .name = "VGA",
+ },
+ [EXTCON_DISP_DP] = {
+ .type = EXTCON_TYPE_DISP | EXTCON_TYPE_USB,
+ .id = EXTCON_DISP_DP,
+ .name = "DP",
+ },
+ [EXTCON_DISP_HMD] = {
+ .type = EXTCON_TYPE_DISP | EXTCON_TYPE_USB,
+ .id = EXTCON_DISP_HMD,
+ .name = "HMD",
+ },
/* Miscellaneous external connector */
- [EXTCON_DOCK] = "DOCK",
- [EXTCON_JIG] = "JIG",
- [EXTCON_MECHANICAL] = "MECHANICAL",
-
- NULL,
+ [EXTCON_DOCK] = {
+ .type = EXTCON_TYPE_MISC,
+ .id = EXTCON_DOCK,
+ .name = "DOCK",
+ },
+ [EXTCON_JIG] = {
+ .type = EXTCON_TYPE_MISC,
+ .id = EXTCON_JIG,
+ .name = "JIG",
+ },
+ [EXTCON_MECHANICAL] = {
+ .type = EXTCON_TYPE_MISC,
+ .id = EXTCON_MECHANICAL,
+ .name = "MECHANICAL",
+ },
+
+ { /* sentinel */ }
};
/**
@@ -95,6 +211,16 @@ struct extcon_cable {
struct device_attribute attr_state;
struct attribute *attrs[3]; /* to be fed to attr_g.attrs */
+
+ union extcon_property_value usb_propval[EXTCON_PROP_USB_CNT];
+ union extcon_property_value chg_propval[EXTCON_PROP_CHG_CNT];
+ union extcon_property_value jack_propval[EXTCON_PROP_JACK_CNT];
+ union extcon_property_value disp_propval[EXTCON_PROP_DISP_CNT];
+
+ unsigned long usb_bits[BITS_TO_LONGS(EXTCON_PROP_USB_CNT)];
+ unsigned long chg_bits[BITS_TO_LONGS(EXTCON_PROP_CHG_CNT)];
+ unsigned long jack_bits[BITS_TO_LONGS(EXTCON_PROP_JACK_CNT)];
+ unsigned long disp_bits[BITS_TO_LONGS(EXTCON_PROP_DISP_CNT)];
};
static struct class *extcon_class;
@@ -147,14 +273,93 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
return -EINVAL;
}
-static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
+static int get_extcon_type(unsigned int prop)
{
- if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
- *attached = ((new >> idx) & 0x1) ? true : false;
- return true;
+ switch (prop) {
+ case EXTCON_PROP_USB_MIN ... EXTCON_PROP_USB_MAX:
+ return EXTCON_TYPE_USB;
+ case EXTCON_PROP_CHG_MIN ... EXTCON_PROP_CHG_MAX:
+ return EXTCON_TYPE_CHG;
+ case EXTCON_PROP_JACK_MIN ... EXTCON_PROP_JACK_MAX:
+ return EXTCON_TYPE_JACK;
+ case EXTCON_PROP_DISP_MIN ... EXTCON_PROP_DISP_MAX:
+ return EXTCON_TYPE_DISP;
+ default:
+ return -EINVAL;
}
+}
+
+static bool is_extcon_attached(struct extcon_dev *edev, unsigned int index)
+{
+ return !!(edev->state & BIT(index));
+}
+
+static bool is_extcon_changed(struct extcon_dev *edev, int index,
+ bool new_state)
+{
+ int state = !!(edev->state & BIT(index));
+ return (state != new_state);
+}
+
+static bool is_extcon_property_supported(unsigned int id, unsigned int prop)
+{
+ int type;
+
+ /* Check whether the property is supported or not. */
+ type = get_extcon_type(prop);
+ if (type < 0)
+ return false;
- return false;
+ /* Check whether a specific extcon id supports the property or not. */
+ return !!(extcon_info[id].type & type);
+}
+
+static int is_extcon_property_capability(struct extcon_dev *edev,
+ unsigned int id, int index,unsigned int prop)
+{
+ struct extcon_cable *cable;
+ int type, ret;
+
+ /* Check whether the property is supported or not. */
+ type = get_extcon_type(prop);
+ if (type < 0)
+ return type;
+
+ cable = &edev->cables[index];
+
+ switch (type) {
+ case EXTCON_TYPE_USB:
+ ret = test_bit(prop - EXTCON_PROP_USB_MIN, cable->usb_bits);
+ break;
+ case EXTCON_TYPE_CHG:
+ ret = test_bit(prop - EXTCON_PROP_CHG_MIN, cable->chg_bits);
+ break;
+ case EXTCON_TYPE_JACK:
+ ret = test_bit(prop - EXTCON_PROP_JACK_MIN, cable->jack_bits);
+ break;
+ case EXTCON_TYPE_DISP:
+ ret = test_bit(prop - EXTCON_PROP_DISP_MIN, cable->disp_bits);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void init_property(struct extcon_dev *edev, unsigned int id, int index)
+{
+ unsigned int type = extcon_info[id].type;
+ struct extcon_cable *cable = &edev->cables[index];
+
+ if (EXTCON_TYPE_USB & type)
+ memset(cable->usb_propval, 0, sizeof(cable->usb_propval));
+ if (EXTCON_TYPE_CHG & type)
+ memset(cable->chg_propval, 0, sizeof(cable->chg_propval));
+ if (EXTCON_TYPE_JACK & type)
+ memset(cable->jack_propval, 0, sizeof(cable->jack_propval));
+ if (EXTCON_TYPE_DISP & type)
+ memset(cable->disp_propval, 0, sizeof(cable->disp_propval));
}
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
@@ -168,32 +373,13 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
for (i = 0; i < edev->max_supported; i++) {
count += sprintf(buf + count, "%s=%d\n",
- extcon_name[edev->supported_cable[i]],
+ extcon_info[edev->supported_cable[i]].name,
!!(edev->state & (1 << i)));
}
return count;
}
-
-static ssize_t state_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u32 state;
- ssize_t ret = 0;
- struct extcon_dev *edev = dev_get_drvdata(dev);
-
- ret = sscanf(buf, "0x%x", &state);
- if (ret == 0)
- ret = -EINVAL;
- else
- ret = extcon_set_state(edev, state);
-
- if (ret < 0)
- return ret;
-
- return count;
-}
-static DEVICE_ATTR_RW(state);
+static DEVICE_ATTR_RO(state);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -212,7 +398,7 @@ static ssize_t cable_name_show(struct device *dev,
int i = cable->cable_index;
return sprintf(buf, "%s\n",
- extcon_name[cable->edev->supported_cable[i]]);
+ extcon_info[cable->edev->supported_cable[i]].name);
}
static ssize_t cable_state_show(struct device *dev,
@@ -224,26 +410,17 @@ static ssize_t cable_state_show(struct device *dev,
int i = cable->cable_index;
return sprintf(buf, "%d\n",
- extcon_get_cable_state_(cable->edev,
- cable->edev->supported_cable[i]));
+ extcon_get_state(cable->edev, cable->edev->supported_cable[i]));
}
/**
- * extcon_update_state() - Update the cable attach states of the extcon device
- * only for the masked bits.
- * @edev: the extcon device
- * @mask: the bit mask to designate updated bits.
- * @state: new cable attach status for @edev
- *
- * Changing the state sends uevent with environment variable containing
- * the name of extcon device (envp[0]) and the state output (envp[1]).
- * Tizen uses this format for extcon device to get events from ports.
- * Android uses this format as well.
+ * extcon_sync() - Synchronize the states for both the attached/detached
+ * @edev: the extcon device that has the cable.
*
- * Note that the notifier provides which bits are changed in the state
- * variable with the val parameter (second) to the callback.
+ * This function send a notification to synchronize the all states of a
+ * specific external connector
*/
-int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
+int extcon_sync(struct extcon_dev *edev, unsigned int id)
{
char name_buf[120];
char state_buf[120];
@@ -252,100 +429,102 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
int env_offset = 0;
int length;
int index;
+ int state;
unsigned long flags;
- bool attached;
if (!edev)
return -EINVAL;
- spin_lock_irqsave(&edev->lock, flags);
+ index = find_cable_index_by_id(edev, id);
+ if (index < 0)
+ return index;
- if (edev->state != ((edev->state & ~mask) | (state & mask))) {
- u32 old_state;
+ spin_lock_irqsave(&edev->lock, flags);
- if (check_mutually_exclusive(edev, (edev->state & ~mask) |
- (state & mask))) {
- spin_unlock_irqrestore(&edev->lock, flags);
- return -EPERM;
- }
+ state = !!(edev->state & BIT(index));
+ raw_notifier_call_chain(&edev->nh[index], state, edev);
- old_state = edev->state;
- edev->state &= ~mask;
- edev->state |= state & mask;
+ /* This could be in interrupt handler */
+ prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
+ if (!prop_buf) {
+ /* Unlock early before uevent */
+ spin_unlock_irqrestore(&edev->lock, flags);
- for (index = 0; index < edev->max_supported; index++) {
- if (is_extcon_changed(old_state, edev->state, index,
- &attached))
- raw_notifier_call_chain(&edev->nh[index],
- attached, edev);
- }
+ dev_err(&edev->dev, "out of memory in extcon_set_state\n");
+ kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE);
- /* This could be in interrupt handler */
- prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
- if (prop_buf) {
- length = name_show(&edev->dev, NULL, prop_buf);
- if (length > 0) {
- if (prop_buf[length - 1] == '\n')
- prop_buf[length - 1] = 0;
- snprintf(name_buf, sizeof(name_buf),
- "NAME=%s", prop_buf);
- envp[env_offset++] = name_buf;
- }
- length = state_show(&edev->dev, NULL, prop_buf);
- if (length > 0) {
- if (prop_buf[length - 1] == '\n')
- prop_buf[length - 1] = 0;
- snprintf(state_buf, sizeof(state_buf),
- "STATE=%s", prop_buf);
- envp[env_offset++] = state_buf;
- }
- envp[env_offset] = NULL;
- /* Unlock early before uevent */
- spin_unlock_irqrestore(&edev->lock, flags);
+ return 0;
+ }
- kobject_uevent_env(&edev->dev.kobj, KOBJ_CHANGE, envp);
- free_page((unsigned long)prop_buf);
- } else {
- /* Unlock early before uevent */
- spin_unlock_irqrestore(&edev->lock, flags);
+ length = name_show(&edev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(name_buf, sizeof(name_buf), "NAME=%s", prop_buf);
+ envp[env_offset++] = name_buf;
+ }
- dev_err(&edev->dev, "out of memory in extcon_set_state\n");
- kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE);
- }
- } else {
- /* No changes */
- spin_unlock_irqrestore(&edev->lock, flags);
+ length = state_show(&edev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(state_buf, sizeof(state_buf), "STATE=%s", prop_buf);
+ envp[env_offset++] = state_buf;
}
+ envp[env_offset] = NULL;
+
+ /* Unlock early before uevent */
+ spin_unlock_irqrestore(&edev->lock, flags);
+ kobject_uevent_env(&edev->dev.kobj, KOBJ_CHANGE, envp);
+ free_page((unsigned long)prop_buf);
return 0;
}
-EXPORT_SYMBOL_GPL(extcon_update_state);
+EXPORT_SYMBOL_GPL(extcon_sync);
/**
- * extcon_set_state() - Set the cable attach states of the extcon device.
- * @edev: the extcon device
- * @state: new cable attach status for @edev
- *
- * Note that notifier provides which bits are changed in the state
- * variable with the val parameter (second) to the callback.
+ * extcon_get_state() - Get the state of a external connector.
+ * @edev: the extcon device that has the cable.
+ * @id: the unique id of each external connector in extcon enumeration.
*/
-int extcon_set_state(struct extcon_dev *edev, u32 state)
+int extcon_get_state(struct extcon_dev *edev, const unsigned int id)
{
+ int index, state;
+ unsigned long flags;
+
if (!edev)
return -EINVAL;
- return extcon_update_state(edev, 0xffffffff, state);
+ index = find_cable_index_by_id(edev, id);
+ if (index < 0)
+ return index;
+
+ spin_lock_irqsave(&edev->lock, flags);
+ state = is_extcon_attached(edev, index);
+ spin_unlock_irqrestore(&edev->lock, flags);
+
+ return state;
}
-EXPORT_SYMBOL_GPL(extcon_set_state);
+EXPORT_SYMBOL_GPL(extcon_get_state);
/**
- * extcon_get_cable_state_() - Get the status of a specific cable.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector in extcon enumeration.
+ * extcon_set_state() - Set the state of a external connector.
+ * without a notification.
+ * @edev: the extcon device that has the cable.
+ * @id: the unique id of each external connector
+ * in extcon enumeration.
+ * @state: the new cable status. The default semantics is
+ * true: attached / false: detached.
+ *
+ * This function only set the state of a external connector without
+ * a notification. To synchronize the data of a external connector,
+ * use extcon_set_state_sync() and extcon_sync().
*/
-int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id)
+int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+ bool cable_state)
{
- int index;
+ unsigned long flags;
+ int index, ret = 0;
if (!edev)
return -EINVAL;
@@ -354,41 +533,338 @@ int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id)
if (index < 0)
return index;
- if (edev->max_supported && edev->max_supported <= index)
- return -EINVAL;
+ spin_lock_irqsave(&edev->lock, flags);
+
+ /* Check whether the external connector's state is changed. */
+ if (!is_extcon_changed(edev, index, cable_state))
+ goto out;
+
+ if (check_mutually_exclusive(edev,
+ (edev->state & ~BIT(index)) | (cable_state & BIT(index)))) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ /*
+ * Initialize the value of extcon property before setting
+ * the detached state for an external connector.
+ */
+ if (!cable_state)
+ init_property(edev, id, index);
+
+ /* Update the state for a external connector. */
+ if (cable_state)
+ edev->state |= BIT(index);
+ else
+ edev->state &= ~(BIT(index));
+out:
+ spin_unlock_irqrestore(&edev->lock, flags);
- return !!(edev->state & (1 << index));
+ return ret;
}
-EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
+EXPORT_SYMBOL_GPL(extcon_set_state);
/**
- * extcon_set_cable_state_() - Set the status of a specific cable.
+ * extcon_set_state_sync() - Set the state of a external connector
+ * with a notification.
* @edev: the extcon device that has the cable.
* @id: the unique id of each external connector
* in extcon enumeration.
* @state: the new cable status. The default semantics is
* true: attached / false: detached.
+ *
+ * This function set the state of external connector and synchronize the data
+ * by usning a notification.
*/
-int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
+int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
bool cable_state)
{
- u32 state;
+ int ret, index;
+ unsigned long flags;
+
+ index = find_cable_index_by_id(edev, id);
+ if (index < 0)
+ return index;
+
+ /* Check whether the external connector's state is changed. */
+ spin_lock_irqsave(&edev->lock, flags);
+ ret = is_extcon_changed(edev, index, cable_state);
+ spin_unlock_irqrestore(&edev->lock, flags);
+ if (!ret)
+ return 0;
+
+ ret = extcon_set_state(edev, id, cable_state);
+ if (ret < 0)
+ return ret;
+
+ return extcon_sync(edev, id);
+}
+EXPORT_SYMBOL_GPL(extcon_set_state_sync);
+
+/**
+ * extcon_get_property() - Get the property value of a specific cable.
+ * @edev: the extcon device that has the cable.
+ * @id: the unique id of each external connector
+ * in extcon enumeration.
+ * @prop: the property id among enum extcon_property.
+ * @prop_val: the pointer which store the value of property.
+ *
+ * When getting the property value of external connector, the external connector
+ * should be attached. If detached state, function just return 0 without
+ * property value. Also, the each property should be included in the list of
+ * supported properties according to the type of external connectors.
+ *
+ * Returns 0 if success or error number if fail
+ */
+int extcon_get_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value *prop_val)
+{
+ struct extcon_cable *cable;
+ unsigned long flags;
+ int index, ret = 0;
+
+ *prop_val = (union extcon_property_value)(0);
+
+ if (!edev)
+ return -EINVAL;
+
+ /* Check whether the property is supported or not */
+ if (!is_extcon_property_supported(id, prop))
+ return -EINVAL;
+
+ /* Find the cable index of external connector by using id */
+ index = find_cable_index_by_id(edev, id);
+ if (index < 0)
+ return index;
+
+ spin_lock_irqsave(&edev->lock, flags);
+
+ /* Check whether the property is available or not. */
+ if (!is_extcon_property_capability(edev, id, index, prop)) {
+ spin_unlock_irqrestore(&edev->lock, flags);
+ return -EPERM;
+ }
+
+ /*
+ * Check whether the external connector is attached.
+ * If external connector is detached, the user can not
+ * get the property value.
+ */
+ if (!is_extcon_attached(edev, index)) {
+ spin_unlock_irqrestore(&edev->lock, flags);
+ return 0;
+ }
+
+ cable = &edev->cables[index];
+
+ /* Get the property value according to extcon type */
+ switch (prop) {
+ case EXTCON_PROP_USB_MIN ... EXTCON_PROP_USB_MAX:
+ *prop_val = cable->usb_propval[prop - EXTCON_PROP_USB_MIN];
+ break;
+ case EXTCON_PROP_CHG_MIN ... EXTCON_PROP_CHG_MAX:
+ *prop_val = cable->chg_propval[prop - EXTCON_PROP_CHG_MIN];
+ break;
+ case EXTCON_PROP_JACK_MIN ... EXTCON_PROP_JACK_MAX:
+ *prop_val = cable->jack_propval[prop - EXTCON_PROP_JACK_MIN];
+ break;
+ case EXTCON_PROP_DISP_MIN ... EXTCON_PROP_DISP_MAX:
+ *prop_val = cable->disp_propval[prop - EXTCON_PROP_DISP_MIN];
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_unlock_irqrestore(&edev->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(extcon_get_property);
+
+/**
+ * extcon_set_property() - Set the property value of a specific cable.
+ * @edev: the extcon device that has the cable.
+ * @id: the unique id of each external connector
+ * in extcon enumeration.
+ * @prop: the property id among enum extcon_property.
+ * @prop_val: the pointer including the new value of property.
+ *
+ * The each property should be included in the list of supported properties
+ * according to the type of external connectors.
+ *
+ * Returns 0 if success or error number if fail
+ */
+int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val)
+{
+ struct extcon_cable *cable;
+ unsigned long flags;
+ int index, ret = 0;
+
+ if (!edev)
+ return -EINVAL;
+
+ /* Check whether the property is supported or not */
+ if (!is_extcon_property_supported(id, prop))
+ return -EINVAL;
+
+ /* Find the cable index of external connector by using id */
+ index = find_cable_index_by_id(edev, id);
+ if (index < 0)
+ return index;
+
+ spin_lock_irqsave(&edev->lock, flags);
+
+ /* Check whether the property is available or not. */
+ if (!is_extcon_property_capability(edev, id, index, prop)) {
+ spin_unlock_irqrestore(&edev->lock, flags);
+ return -EPERM;
+ }
+
+ cable = &edev->cables[index];
+
+ /* Set the property value according to extcon type */
+ switch (prop) {
+ case EXTCON_PROP_USB_MIN ... EXTCON_PROP_USB_MAX:
+ cable->usb_propval[prop - EXTCON_PROP_USB_MIN] = prop_val;
+ break;
+ case EXTCON_PROP_CHG_MIN ... EXTCON_PROP_CHG_MAX:
+ cable->chg_propval[prop - EXTCON_PROP_CHG_MIN] = prop_val;
+ break;
+ case EXTCON_PROP_JACK_MIN ... EXTCON_PROP_JACK_MAX:
+ cable->jack_propval[prop - EXTCON_PROP_JACK_MIN] = prop_val;
+ break;
+ case EXTCON_PROP_DISP_MIN ... EXTCON_PROP_DISP_MAX:
+ cable->disp_propval[prop - EXTCON_PROP_DISP_MIN] = prop_val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_unlock_irqrestore(&edev->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(extcon_set_property);
+
+/**
+ * extcon_set_property_sync() - Set the property value of a specific cable
+ with a notification.
+ * @prop_val: the pointer including the new value of property.
+ *
+ * When setting the property value of external connector, the external connector
+ * should be attached. The each property should be included in the list of
+ * supported properties according to the type of external connectors.
+ *
+ * Returns 0 if success or error number if fail
+ */
+int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val)
+{
+ int ret;
+
+ ret = extcon_set_property(edev, id, prop, prop_val);
+ if (ret < 0)
+ return ret;
+
+ return extcon_sync(edev, id);
+}
+EXPORT_SYMBOL_GPL(extcon_set_property_sync);
+
+/**
+ * extcon_get_property_capability() - Get the capability of property
+ * of an external connector.
+ * @edev: the extcon device that has the cable.
+ * @id: the unique id of each external connector
+ * in extcon enumeration.
+ * @prop: the property id among enum extcon_property.
+ *
+ * Returns 1 if the property is available or 0 if not available.
+ */
+int extcon_get_property_capability(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop)
+{
int index;
if (!edev)
return -EINVAL;
+ /* Check whether the property is supported or not */
+ if (!is_extcon_property_supported(id, prop))
+ return -EINVAL;
+
+ /* Find the cable index of external connector by using id */
index = find_cable_index_by_id(edev, id);
if (index < 0)
return index;
- if (edev->max_supported && edev->max_supported <= index)
+ return is_extcon_property_capability(edev, id, index, prop);
+}
+EXPORT_SYMBOL_GPL(extcon_get_property_capability);
+
+/**
+ * extcon_set_property_capability() - Set the capability of a property
+ * of an external connector.
+ * @edev: the extcon device that has the cable.
+ * @id: the unique id of each external connector
+ * in extcon enumeration.
+ * @prop: the property id among enum extcon_property.
+ *
+ * This function set the capability of a property for an external connector
+ * to mark the bit in capability bitmap which mean the available state of
+ * a property.
+ *
+ * Returns 0 if success or error number if fail
+ */
+int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop)
+{
+ struct extcon_cable *cable;
+ int index, type, ret = 0;
+
+ if (!edev)
return -EINVAL;
- state = cable_state ? (1 << index) : 0;
- return extcon_update_state(edev, 1 << index, state);
+ /* Check whether the property is supported or not. */
+ if (!is_extcon_property_supported(id, prop))
+ return -EINVAL;
+
+ /* Find the cable index of external connector by using id. */
+ index = find_cable_index_by_id(edev, id);
+ if (index < 0)
+ return index;
+
+ type = get_extcon_type(prop);
+ if (type < 0)
+ return type;
+
+ cable = &edev->cables[index];
+
+ switch (type) {
+ case EXTCON_TYPE_USB:
+ __set_bit(prop - EXTCON_PROP_USB_MIN, cable->usb_bits);
+ break;
+ case EXTCON_TYPE_CHG:
+ __set_bit(prop - EXTCON_PROP_CHG_MIN, cable->chg_bits);
+ break;
+ case EXTCON_TYPE_JACK:
+ __set_bit(prop - EXTCON_PROP_JACK_MIN, cable->jack_bits);
+ break;
+ case EXTCON_TYPE_DISP:
+ __set_bit(prop - EXTCON_PROP_DISP_MIN, cable->disp_bits);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
+EXPORT_SYMBOL_GPL(extcon_set_property_capability);
/**
* extcon_get_extcon_dev() - Get the extcon device instance from the name
@@ -428,7 +904,7 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
{
unsigned long flags;
- int ret, idx;
+ int ret, idx = -EINVAL;
if (!nb)
return -EINVAL;
@@ -846,13 +1322,13 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
return ERR_PTR(-EINVAL);
if (!dev->of_node) {
- dev_err(dev, "device does not have a device node entry\n");
+ dev_dbg(dev, "device does not have a device node entry\n");
return ERR_PTR(-EINVAL);
}
node = of_parse_phandle(dev->of_node, "extcon", index);
if (!node) {
- dev_err(dev, "failed to get phandle in %s node\n",
+ dev_dbg(dev, "failed to get phandle in %s node\n",
dev->of_node->full_name);
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 0e22f241403b..bca172d42c74 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -209,5 +209,6 @@ config HAVE_ARM_SMCCC
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
+source "drivers/firmware/meson/Kconfig"
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 44a59dcfc398..898ac41fa8b3 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-y += broadcom/
+obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
obj-$(CONFIG_UEFI_CPER) += efi/
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 1c33d7469e4a..f402ba2eed46 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -380,7 +380,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
static struct pstore_info efi_pstore_info = {
.owner = THIS_MODULE,
.name = "efi",
- .flags = PSTORE_FLAGS_FRAGILE,
+ .flags = PSTORE_FLAGS_DMESG,
.open = efi_pstore_open,
.close = efi_pstore_close,
.read = efi_pstore_read,
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
new file mode 100644
index 000000000000..170d7e8bcdfb
--- /dev/null
+++ b/drivers/firmware/meson/Kconfig
@@ -0,0 +1,9 @@
+#
+# Amlogic Secure Monitor driver
+#
+config MESON_SM
+ bool
+ default ARCH_MESON
+ depends on ARM64_4K_PAGES
+ help
+ Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/Makefile b/drivers/firmware/meson/Makefile
new file mode 100644
index 000000000000..9ab3884f96bc
--- /dev/null
+++ b/drivers/firmware/meson/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MESON_SM) += meson_sm.o
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
new file mode 100644
index 000000000000..b0d254930ed3
--- /dev/null
+++ b/drivers/firmware/meson/meson_sm.c
@@ -0,0 +1,248 @@
+/*
+ * Amlogic Secure Monitor driver
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "meson-sm: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#include <linux/firmware/meson/meson_sm.h>
+
+struct meson_sm_cmd {
+ unsigned int index;
+ u32 smc_id;
+};
+#define CMD(d, s) { .index = (d), .smc_id = (s), }
+
+struct meson_sm_chip {
+ unsigned int shmem_size;
+ u32 cmd_shmem_in_base;
+ u32 cmd_shmem_out_base;
+ struct meson_sm_cmd cmd[];
+};
+
+struct meson_sm_chip gxbb_chip = {
+ .shmem_size = SZ_4K,
+ .cmd_shmem_in_base = 0x82000020,
+ .cmd_shmem_out_base = 0x82000021,
+ .cmd = {
+ CMD(SM_EFUSE_READ, 0x82000030),
+ CMD(SM_EFUSE_WRITE, 0x82000031),
+ CMD(SM_EFUSE_USER_MAX, 0x82000033),
+ { /* sentinel */ },
+ },
+};
+
+struct meson_sm_firmware {
+ const struct meson_sm_chip *chip;
+ void __iomem *sm_shmem_in_base;
+ void __iomem *sm_shmem_out_base;
+};
+
+static struct meson_sm_firmware fw;
+
+static u32 meson_sm_get_cmd(const struct meson_sm_chip *chip,
+ unsigned int cmd_index)
+{
+ const struct meson_sm_cmd *cmd = chip->cmd;
+
+ while (cmd->smc_id && cmd->index != cmd_index)
+ cmd++;
+
+ return cmd->smc_id;
+}
+
+static u32 __meson_sm_call(u32 cmd, u32 arg0, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(cmd, arg0, arg1, arg2, arg3, arg4, 0, 0, &res);
+ return res.a0;
+}
+
+static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
+{
+ u32 sm_phy_base;
+
+ sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0);
+ if (!sm_phy_base)
+ return 0;
+
+ return ioremap_cache(sm_phy_base, size);
+}
+
+/**
+ * meson_sm_call - generic SMC32 call to the secure-monitor
+ *
+ * @cmd_index: Index of the SMC32 function ID
+ * @ret: Returned value
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: 0 on success, a negative value on error
+ */
+int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 cmd, lret;
+
+ if (!fw.chip)
+ return -ENOENT;
+
+ cmd = meson_sm_get_cmd(fw.chip, cmd_index);
+ if (!cmd)
+ return -EINVAL;
+
+ lret = __meson_sm_call(cmd, arg0, arg1, arg2, arg3, arg4);
+
+ if (ret)
+ *ret = lret;
+
+ return 0;
+}
+EXPORT_SYMBOL(meson_sm_call);
+
+/**
+ * meson_sm_call_read - retrieve data from secure-monitor
+ *
+ * @buffer: Buffer to store the retrieved data
+ * @cmd_index: Index of the SMC32 function ID
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: size of read data on success, a negative value on error
+ */
+int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 size;
+
+ if (!fw.chip)
+ return -ENOENT;
+
+ if (!fw.chip->cmd_shmem_out_base)
+ return -EINVAL;
+
+ if (meson_sm_call(cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
+ return -EINVAL;
+
+ if (!size || size > fw.chip->shmem_size)
+ return -EINVAL;
+
+ if (buffer)
+ memcpy(buffer, fw.sm_shmem_out_base, size);
+
+ return size;
+}
+EXPORT_SYMBOL(meson_sm_call_read);
+
+/**
+ * meson_sm_call_write - send data to secure-monitor
+ *
+ * @buffer: Buffer containing data to send
+ * @size: Size of the data to send
+ * @cmd_index: Index of the SMC32 function ID
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: size of sent data on success, a negative value on error
+ */
+int meson_sm_call_write(void *buffer, unsigned int size, unsigned int cmd_index,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 written;
+
+ if (!fw.chip)
+ return -ENOENT;
+
+ if (size > fw.chip->shmem_size)
+ return -EINVAL;
+
+ if (!fw.chip->cmd_shmem_in_base)
+ return -EINVAL;
+
+ memcpy(fw.sm_shmem_in_base, buffer, size);
+
+ if (meson_sm_call(cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0)
+ return -EINVAL;
+
+ if (!written)
+ return -EINVAL;
+
+ return written;
+}
+EXPORT_SYMBOL(meson_sm_call_write);
+
+static const struct of_device_id meson_sm_ids[] = {
+ { .compatible = "amlogic,meson-gxbb-sm", .data = &gxbb_chip },
+ { /* sentinel */ },
+};
+
+int __init meson_sm_init(void)
+{
+ const struct meson_sm_chip *chip;
+ const struct of_device_id *matched_np;
+ struct device_node *np;
+
+ np = of_find_matching_node_and_match(NULL, meson_sm_ids, &matched_np);
+ if (!np)
+ return -ENODEV;
+
+ chip = matched_np->data;
+ if (!chip) {
+ pr_err("unable to setup secure-monitor data\n");
+ goto out;
+ }
+
+ if (chip->cmd_shmem_in_base) {
+ fw.sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw.sm_shmem_in_base))
+ goto out;
+ }
+
+ if (chip->cmd_shmem_out_base) {
+ fw.sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw.sm_shmem_out_base))
+ goto out_in_base;
+ }
+
+ fw.chip = chip;
+ pr_info("secure-monitor enabled\n");
+
+ return 0;
+
+out_in_base:
+ iounmap(fw.sm_shmem_in_base);
+out:
+ return -EINVAL;
+}
+device_initcall(meson_sm_init);
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index e64a501adbf4..d95c70227c05 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1,4 +1,7 @@
-/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
+/*
+ * Qualcomm SCM driver
+ *
+ * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -12,7 +15,7 @@
*
*/
#include <linux/platform_device.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
@@ -376,8 +379,6 @@ static const struct of_device_id qcom_scm_dt_match[] = {
{}
};
-MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
-
static struct platform_driver qcom_scm_driver = {
.driver = {
.name = "qcom_scm",
@@ -414,14 +415,4 @@ static int __init qcom_scm_init(void)
return platform_driver_register(&qcom_scm_driver);
}
-
subsys_initcall(qcom_scm_init);
-
-static void __exit qcom_scm_exit(void)
-{
- platform_driver_unregister(&qcom_scm_driver);
-}
-module_exit(qcom_scm_exit);
-
-MODULE_DESCRIPTION("Qualcomm SCM driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index d61410299ec0..cd84934774cc 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -21,6 +21,7 @@ config FPGA_MGR_SOCFPGA
config FPGA_MGR_ZYNQ_FPGA
tristate "Xilinx Zynq FPGA"
+ depends on ARCH_ZYNQ || COMPILE_TEST
depends on HAS_DMA
help
FPGA manager driver support for Xilinx Zynq FPGAs.
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 24caedb00a7a..26ee00f6bd58 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -10,27 +10,6 @@ config ARCH_HAVE_CUSTOM_GPIO_H
overriding the default implementations. New uses of this are
strongly discouraged.
-config ARCH_WANT_OPTIONAL_GPIOLIB
- bool
- help
- Select this config option from the architecture Kconfig, if
- it is possible to use gpiolib on the architecture, but let the
- user decide whether to actually build it or not.
- Select this instead of ARCH_REQUIRE_GPIOLIB, if your architecture does
- not depend on GPIOs being available, but rather let the user
- decide whether he needs it or not.
-
-config ARCH_REQUIRE_GPIOLIB
- bool
- select GPIOLIB
- help
- Platforms select gpiolib if they use this infrastructure
- for all their GPIOs, usually starting with ones integrated
- into SOC processors.
- Selecting this from the architecture code will cause the gpiolib
- code to always get built in.
-
-
menuconfig GPIOLIB
bool "GPIO Support"
select ANON_INODES
@@ -87,6 +66,7 @@ config GPIO_SYSFS
exported to userspace; this can be useful when debugging.
config GPIO_GENERIC
+ depends on HAS_IOMEM # Only for IOMEM drivers
tristate
# put drivers in the right section, in alphabetical order
@@ -96,6 +76,7 @@ config GPIO_MAX730X
tristate
menu "Memory mapped GPIO drivers"
+ depends on HAS_IOMEM
config GPIO_74XX_MMIO
tristate "GPIO driver for 74xx-ICs with MMIO access"
@@ -128,6 +109,13 @@ config GPIO_AMDPT
driver for GPIO functionality on Promontory IOHub
Require ACPI ASL code to enumerate as a platform device.
+config GPIO_ASPEED
+ tristate "Aspeed GPIO support"
+ depends on (ARCH_ASPEED || COMPILE_TEST) && OF_GPIO
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y here to support Aspeed AST2400 and AST2500 GPIO controllers.
+
config GPIO_ATH79
tristate "Atheros AR71XX/AR724X/AR913X GPIO support"
default y if ATH79
@@ -138,6 +126,12 @@ config GPIO_ATH79
Select this option to enable GPIO driver for
Atheros AR71XX/AR724X/AR913X SoC devices.
+config GPIO_AXP209
+ tristate "X-Powers AXP209 PMIC GPIO Support"
+ depends on MFD_AXP20X
+ help
+ Say yes to enable GPIO support for the AXP209 PMIC
+
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
@@ -237,7 +231,8 @@ config GPIO_ICH
config GPIO_IOP
tristate "Intel IOP GPIO"
- depends on ARM && (ARCH_IOP32X || ARCH_IOP33X)
+ depends on ARCH_IOP32X || ARCH_IOP33X || COMPILE_TEST
+ select GPIO_GENERIC
help
Say yes here to support the GPIO functionality of a number of Intel
IOP32X or IOP33X.
@@ -287,6 +282,18 @@ config GPIO_MM_LANTIQ
(EBU) found on Lantiq SoCs. The gpios are output only as they are
created by attaching a 16bit latch to the bus.
+config GPIO_MOCKUP
+ tristate "GPIO Testing Driver"
+ depends on GPIOLIB
+ select GPIO_SYSFS
+ help
+ This enables GPIO Testing driver, which provides a way to test GPIO
+ subsystem through sysfs(or char device) and debugfs. GPIO_SYSFS
+ must be selected for this test.
+ User could use it through the script in
+ tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
+ it.
+
config GPIO_MOXART
bool "MOXART GPIO support"
depends on ARCH_MOXART || COMPILE_TEST
@@ -574,6 +581,19 @@ config GPIO_F7188X
To compile this driver as a module, choose M here: the module will
be called f7188x-gpio.
+config GPIO_GPIO_MM
+ tristate "Diamond Systems GPIO-MM GPIO support"
+ depends on ISA_BUS_API
+ help
+ Enables GPIO support for the Diamond Systems GPIO-MM and GPIO-MM-12.
+
+ The Diamond Systems GPIO-MM device features 48 lines of digital I/O
+ via the emulation of dual 82C55A PPI chips. This driver provides GPIO
+ support for these 48 channels of digital I/O.
+
+ The base port addresses for the devices may be configured via the base
+ array module parameter.
+
config GPIO_IT87
tristate "IT87xx GPIO support"
help
@@ -780,6 +800,13 @@ config GPIO_TPIC2810
To compile this driver as a module, choose M here: the module will
be called gpio-tpic2810.
+config GPIO_TS4900
+ tristate "Technologic Systems FPGA I2C GPIO"
+ select REGMAP_I2C
+ help
+ Say yes here to enabled the GPIO driver for Technologic's FPGA core.
+ Series supported include TS-4100, TS-4900, TS-7970 and TS-7990.
+
endmenu
menu "MFD GPIO expanders"
@@ -849,6 +876,14 @@ config GPIO_DLN2
This driver can also be built as a module. If so, the module
will be called gpio-dln2.
+config HTC_EGPIO
+ bool "HTC EGPIO support"
+ depends on GPIOLIB && ARM
+ help
+ This driver supports the CPLD egpio chip present on
+ several HTC phones. It provides basic support for input
+ pins, output pins, and irqs.
+
config GPIO_JANZ_TTL
tristate "Janz VMOD-TTL Digital IO Module"
depends on MFD_JANZ_CMODIO
@@ -875,6 +910,16 @@ config GPIO_LP3943
LP3943 can be used as a GPIO expander which provides up to 16 GPIOs.
Open drain outputs are required for this usage.
+config GPIO_LP873X
+ tristate "TI LP873X GPO"
+ depends on MFD_TI_LP873X
+ help
+ This driver supports the GPO on TI Lp873x PMICs. 2 GPOs are present
+ on LP873X PMICs.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-lp873x.
+
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
@@ -985,6 +1030,19 @@ config GPIO_UCB1400
This enables support for the Philips UCB1400 GPIO pins.
The UCB1400 is an AC97 audio codec.
+config GPIO_WHISKEY_COVE
+ tristate "GPIO support for Whiskey Cove PMIC"
+ depends on INTEL_SOC_PMIC
+ select GPIOLIB_IRQCHIP
+ help
+ Support for GPIO pins on Whiskey Cove PMIC.
+
+ Say Yes if you have a Intel SoC based tablet with Whiskey Cove PMIC
+ inside.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-wcove.
+
config GPIO_WM831X
tristate "WM831x GPIOs"
depends on MFD_WM831X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 2a035ed8f168..ab28a2daeacc 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -28,6 +28,8 @@ obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
+obj-$(CONFIG_GPIO_ASPEED) += gpio-aspeed.o
+obj-$(CONFIG_GPIO_AXP209) += gpio-axp209.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
@@ -44,7 +46,9 @@ obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_ETRAXFS) += gpio-etraxfs.o
obj-$(CONFIG_GPIO_F7188X) += gpio-f7188x.o
obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
+obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o
obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
+obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
@@ -56,6 +60,7 @@ obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o
obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o
obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
@@ -70,6 +75,7 @@ obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
+obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
obj-$(CONFIG_GPIO_MOXART) += gpio-moxart.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
@@ -110,6 +116,7 @@ obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
obj-$(CONFIG_GPIO_TS4800) += gpio-ts4800.o
+obj-$(CONFIG_GPIO_TS4900) += gpio-ts4900.o
obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
@@ -120,6 +127,7 @@ obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
+obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 3f87a03abc22..5bddbd507ca9 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -17,6 +17,7 @@
*/
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 991370494922..482462889c8f 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -79,7 +79,7 @@ static void arizona_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
ARIZONA_GPN_LVL, value);
}
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "arizona",
.owner = THIS_MODULE,
.direction_input = arizona_gpio_direction_in,
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
new file mode 100644
index 000000000000..03a5925a423c
--- /dev/null
+++ b/drivers/gpio/gpio-aspeed.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * Joel Stanley <joel@jms.id.au>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/driver.h>
+#include <linux/pinctrl/consumer.h>
+
+struct aspeed_gpio {
+ struct gpio_chip chip;
+ spinlock_t lock;
+ void __iomem *base;
+ int irq;
+};
+
+struct aspeed_gpio_bank {
+ uint16_t val_regs;
+ uint16_t irq_regs;
+ const char names[4];
+};
+
+static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
+ {
+ .val_regs = 0x0000,
+ .irq_regs = 0x0008,
+ .names = { 'A', 'B', 'C', 'D' },
+ },
+ {
+ .val_regs = 0x0020,
+ .irq_regs = 0x0028,
+ .names = { 'E', 'F', 'G', 'H' },
+ },
+ {
+ .val_regs = 0x0070,
+ .irq_regs = 0x0098,
+ .names = { 'I', 'J', 'K', 'L' },
+ },
+ {
+ .val_regs = 0x0078,
+ .irq_regs = 0x00e8,
+ .names = { 'M', 'N', 'O', 'P' },
+ },
+ {
+ .val_regs = 0x0080,
+ .irq_regs = 0x0118,
+ .names = { 'Q', 'R', 'S', 'T' },
+ },
+ {
+ .val_regs = 0x0088,
+ .irq_regs = 0x0148,
+ .names = { 'U', 'V', 'W', 'X' },
+ },
+ /*
+ * A bank exists for { 'Y', 'Z', "AA", "AB" }, but is not implemented.
+ * Only half of GPIOs Y support interrupt configuration, and none of Z,
+ * AA or AB do as they are output only.
+ */
+};
+
+#define GPIO_BANK(x) ((x) >> 5)
+#define GPIO_OFFSET(x) ((x) & 0x1f)
+#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
+
+#define GPIO_DATA 0x00
+#define GPIO_DIR 0x04
+
+#define GPIO_IRQ_ENABLE 0x00
+#define GPIO_IRQ_TYPE0 0x04
+#define GPIO_IRQ_TYPE1 0x08
+#define GPIO_IRQ_TYPE2 0x0c
+#define GPIO_IRQ_STATUS 0x10
+
+static const struct aspeed_gpio_bank *to_bank(unsigned int offset)
+{
+ unsigned int bank = GPIO_BANK(offset);
+
+ WARN_ON(bank > ARRAY_SIZE(aspeed_gpio_banks));
+ return &aspeed_gpio_banks[bank];
+}
+
+static void __iomem *bank_val_reg(struct aspeed_gpio *gpio,
+ const struct aspeed_gpio_bank *bank,
+ unsigned int reg)
+{
+ return gpio->base + bank->val_regs + reg;
+}
+
+static void __iomem *bank_irq_reg(struct aspeed_gpio *gpio,
+ const struct aspeed_gpio_bank *bank,
+ unsigned int reg)
+{
+ return gpio->base + bank->irq_regs + reg;
+}
+
+static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+
+ return !!(ioread32(bank_val_reg(gpio, bank, GPIO_DATA))
+ & GPIO_BIT(offset));
+}
+
+static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int val)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr;
+ u32 reg;
+
+ addr = bank_val_reg(gpio, bank, GPIO_DATA);
+ reg = ioread32(addr);
+
+ if (val)
+ reg |= GPIO_BIT(offset);
+ else
+ reg &= ~GPIO_BIT(offset);
+
+ iowrite32(reg, addr);
+}
+
+static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int val)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ __aspeed_gpio_set(gc, offset, val);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
+ iowrite32(reg & ~GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR));
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
+static int aspeed_gpio_dir_out(struct gpio_chip *gc,
+ unsigned int offset, int val)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
+ iowrite32(reg | GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR));
+
+ __aspeed_gpio_set(gc, offset, val);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
+static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ val = ioread32(bank_val_reg(gpio, bank, GPIO_DIR)) & GPIO_BIT(offset);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return !val;
+
+}
+
+static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
+ struct aspeed_gpio **gpio,
+ const struct aspeed_gpio_bank **bank,
+ u32 *bit)
+{
+ int offset;
+
+ offset = irqd_to_hwirq(d);
+
+ *gpio = irq_data_get_irq_chip_data(d);
+ *bank = to_bank(offset);
+ *bit = GPIO_BIT(offset);
+
+ return 0;
+}
+
+static void aspeed_gpio_irq_ack(struct irq_data *d)
+{
+ const struct aspeed_gpio_bank *bank;
+ struct aspeed_gpio *gpio;
+ unsigned long flags;
+ void __iomem *status_addr;
+ u32 bit;
+ int rc;
+
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ if (rc)
+ return;
+
+ status_addr = bank_irq_reg(gpio, bank, GPIO_IRQ_STATUS);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ iowrite32(bit, status_addr);
+ spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
+{
+ const struct aspeed_gpio_bank *bank;
+ struct aspeed_gpio *gpio;
+ unsigned long flags;
+ u32 reg, bit;
+ void __iomem *addr;
+ int rc;
+
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ if (rc)
+ return;
+
+ addr = bank_irq_reg(gpio, bank, GPIO_IRQ_ENABLE);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ reg = ioread32(addr);
+ if (set)
+ reg |= bit;
+ else
+ reg &= bit;
+ iowrite32(reg, addr);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static void aspeed_gpio_irq_mask(struct irq_data *d)
+{
+ aspeed_gpio_irq_set_mask(d, false);
+}
+
+static void aspeed_gpio_irq_unmask(struct irq_data *d)
+{
+ aspeed_gpio_irq_set_mask(d, true);
+}
+
+static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
+{
+ u32 type0 = 0;
+ u32 type1 = 0;
+ u32 type2 = 0;
+ u32 bit, reg;
+ const struct aspeed_gpio_bank *bank;
+ irq_flow_handler_t handler;
+ struct aspeed_gpio *gpio;
+ unsigned long flags;
+ void __iomem *addr;
+ int rc;
+
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ if (rc)
+ return -EINVAL;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ type2 |= bit;
+ case IRQ_TYPE_EDGE_RISING:
+ type0 |= bit;
+ case IRQ_TYPE_EDGE_FALLING:
+ handler = handle_edge_irq;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ type0 |= bit;
+ case IRQ_TYPE_LEVEL_LOW:
+ type1 |= bit;
+ handler = handle_level_irq;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE0);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type0;
+ iowrite32(reg, addr);
+
+ addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE1);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type1;
+ iowrite32(reg, addr);
+
+ addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE2);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type2;
+ iowrite32(reg, addr);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ irq_set_handler_locked(d, handler);
+
+ return 0;
+}
+
+static void aspeed_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct irq_chip *ic = irq_desc_get_chip(desc);
+ struct aspeed_gpio *data = gpiochip_get_data(gc);
+ unsigned int i, p, girq;
+ unsigned long reg;
+
+ chained_irq_enter(ic, desc);
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_gpio_banks); i++) {
+ const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
+
+ reg = ioread32(bank_irq_reg(data, bank, GPIO_IRQ_STATUS));
+
+ for_each_set_bit(p, &reg, 32) {
+ girq = irq_find_mapping(gc->irqdomain, i * 32 + p);
+ generic_handle_irq(girq);
+ }
+
+ }
+
+ chained_irq_exit(ic, desc);
+}
+
+static struct irq_chip aspeed_gpio_irqchip = {
+ .name = "aspeed-gpio",
+ .irq_ack = aspeed_gpio_irq_ack,
+ .irq_mask = aspeed_gpio_irq_mask,
+ .irq_unmask = aspeed_gpio_irq_unmask,
+ .irq_set_type = aspeed_gpio_set_type,
+};
+
+static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
+ struct platform_device *pdev)
+{
+ int rc;
+
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0)
+ return rc;
+
+ gpio->irq = rc;
+
+ rc = gpiochip_irqchip_add(&gpio->chip, &aspeed_gpio_irqchip,
+ 0, handle_bad_irq, IRQ_TYPE_NONE);
+ if (rc) {
+ dev_info(&pdev->dev, "Could not add irqchip\n");
+ return rc;
+ }
+
+ gpiochip_set_chained_irqchip(&gpio->chip, &aspeed_gpio_irqchip,
+ gpio->irq, aspeed_gpio_irq_handler);
+
+ return 0;
+}
+
+static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static int __init aspeed_gpio_probe(struct platform_device *pdev)
+{
+ struct aspeed_gpio *gpio;
+ struct resource *res;
+ int rc;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
+
+ spin_lock_init(&gpio->lock);
+
+ gpio->chip.ngpio = ARRAY_SIZE(aspeed_gpio_banks) * 32;
+
+ gpio->chip.parent = &pdev->dev;
+ gpio->chip.direction_input = aspeed_gpio_dir_in;
+ gpio->chip.direction_output = aspeed_gpio_dir_out;
+ gpio->chip.get_direction = aspeed_gpio_get_direction;
+ gpio->chip.request = aspeed_gpio_request;
+ gpio->chip.free = aspeed_gpio_free;
+ gpio->chip.get = aspeed_gpio_get;
+ gpio->chip.set = aspeed_gpio_set;
+ gpio->chip.label = dev_name(&pdev->dev);
+ gpio->chip.base = -1;
+
+ rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_gpio_setup_irqs(gpio, pdev);
+}
+
+static const struct of_device_id aspeed_gpio_of_table[] = {
+ { .compatible = "aspeed,ast2400-gpio" },
+ { .compatible = "aspeed,ast2500-gpio" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table);
+
+static struct platform_driver aspeed_gpio_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = aspeed_gpio_of_table,
+ },
+};
+
+module_platform_driver_probe(aspeed_gpio_driver, aspeed_gpio_probe);
+
+MODULE_DESCRIPTION("Aspeed GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index c4f4cddc7c1a..9457e2022bf6 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -15,6 +15,7 @@
#include <linux/platform_data/gpio-ath79.h>
#include <linux/of_device.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/irq.h>
#define AR71XX_GPIO_REG_OE 0x00
diff --git a/drivers/gpio/gpio-axp209.c b/drivers/gpio/gpio-axp209.c
new file mode 100644
index 000000000000..d9c2a517c6df
--- /dev/null
+++ b/drivers/gpio/gpio-axp209.c
@@ -0,0 +1,192 @@
+/*
+ * AXP20x GPIO driver
+ *
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AXP20X_GPIO_FUNCTIONS 0x7
+#define AXP20X_GPIO_FUNCTION_OUT_LOW 0
+#define AXP20X_GPIO_FUNCTION_OUT_HIGH 1
+#define AXP20X_GPIO_FUNCTION_INPUT 2
+
+struct axp20x_gpio {
+ struct gpio_chip chip;
+ struct regmap *regmap;
+};
+
+static int axp20x_gpio_get_reg(unsigned offset)
+{
+ switch (offset) {
+ case 0:
+ return AXP20X_GPIO0_CTRL;
+ case 1:
+ return AXP20X_GPIO1_CTRL;
+ case 2:
+ return AXP20X_GPIO2_CTRL;
+ }
+
+ return -EINVAL;
+}
+
+static int axp20x_gpio_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct axp20x_gpio *gpio = gpiochip_get_data(chip);
+ int reg;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ return regmap_update_bits(gpio->regmap, reg,
+ AXP20X_GPIO_FUNCTIONS,
+ AXP20X_GPIO_FUNCTION_INPUT);
+}
+
+static int axp20x_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct axp20x_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int val;
+ int reg, ret;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ ret = regmap_read(gpio->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offset + 4));
+}
+
+static int axp20x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct axp20x_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int val;
+ int reg, ret;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ ret = regmap_read(gpio->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * This shouldn't really happen if the pin is in use already,
+ * or if it's not in use yet, it doesn't matter since we're
+ * going to change the value soon anyway. Default to output.
+ */
+ if ((val & AXP20X_GPIO_FUNCTIONS) > 2)
+ return 0;
+
+ /*
+ * The GPIO directions are the three lowest values.
+ * 2 is input, 0 and 1 are output
+ */
+ return val & 2;
+}
+
+static int axp20x_gpio_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct axp20x_gpio *gpio = gpiochip_get_data(chip);
+ int reg;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ return regmap_update_bits(gpio->regmap, reg,
+ AXP20X_GPIO_FUNCTIONS,
+ value ? AXP20X_GPIO_FUNCTION_OUT_HIGH
+ : AXP20X_GPIO_FUNCTION_OUT_LOW);
+}
+
+static void axp20x_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ axp20x_gpio_output(chip, offset, value);
+}
+
+static int axp20x_gpio_probe(struct platform_device *pdev)
+{
+ struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+ struct axp20x_gpio *gpio;
+ int ret;
+
+ if (!of_device_is_available(pdev->dev.of_node))
+ return -ENODEV;
+
+ if (!axp20x) {
+ dev_err(&pdev->dev, "Parent drvdata not set\n");
+ return -EINVAL;
+ }
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->chip.base = -1;
+ gpio->chip.can_sleep = true;
+ gpio->chip.parent = &pdev->dev;
+ gpio->chip.label = dev_name(&pdev->dev);
+ gpio->chip.owner = THIS_MODULE;
+ gpio->chip.get = axp20x_gpio_get;
+ gpio->chip.get_direction = axp20x_gpio_get_direction;
+ gpio->chip.set = axp20x_gpio_set;
+ gpio->chip.direction_input = axp20x_gpio_input;
+ gpio->chip.direction_output = axp20x_gpio_output;
+ gpio->chip.ngpio = 3;
+
+ gpio->regmap = axp20x->regmap;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GPIO chip\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "AXP209 GPIO driver loaded\n");
+
+ return 0;
+}
+
+static const struct of_device_id axp20x_gpio_match[] = {
+ { .compatible = "x-powers,axp209-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, axp20x_gpio_match);
+
+static struct platform_driver axp20x_gpio_driver = {
+ .probe = axp20x_gpio_probe,
+ .driver = {
+ .name = "axp20x-gpio",
+ .of_match_table = axp20x_gpio_match,
+ },
+};
+
+module_platform_driver(axp20x_gpio_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("AXP20x PMIC GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 953e4b829e32..3d1cf018e8e7 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -308,7 +308,7 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
return 0;
}
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "bcm-kona-gpio",
.owner = THIS_MODULE,
.request = bcm_kona_gpio_request,
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index e29553b7ccdb..dd8977cf3e85 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -184,7 +184,7 @@ static int da9052_gpio_to_irq(struct gpio_chip *gc, u32 offset)
return irq;
}
-static struct gpio_chip reference_gp = {
+static const struct gpio_chip reference_gp = {
.label = "da9052-gpio",
.owner = THIS_MODULE,
.get = da9052_gpio_get,
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 2c2c18dc6c4f..82053b52cba0 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -121,7 +121,7 @@ static int da9055_gpio_to_irq(struct gpio_chip *gc, u32 offset)
DA9055_IRQ_GPI0 + offset);
}
-static struct gpio_chip reference_gp = {
+static const struct gpio_chip reference_gp = {
.label = "da9055-gpio",
.owner = THIS_MODULE,
.get = da9055_gpio_get,
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 600be8418707..e8accde62aa7 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -214,8 +214,7 @@ static struct f7188x_gpio_bank f81866_gpio_bank[] = {
static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
int err;
- struct f7188x_gpio_bank *bank =
- container_of(chip, struct f7188x_gpio_bank, chip);
+ struct f7188x_gpio_bank *bank = gpiochip_get_data(chip);
struct f7188x_sio *sio = bank->data->sio;
u8 dir;
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
new file mode 100644
index 000000000000..1e7def9449ce
--- /dev/null
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -0,0 +1,267 @@
+/*
+ * GPIO driver for the Diamond Systems GPIO-MM
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * This driver supports the following Diamond Systems devices: GPIO-MM and
+ * GPIO-MM-12.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+
+#define GPIOMM_EXTENT 8
+#define MAX_NUM_GPIOMM max_num_isa_dev(GPIOMM_EXTENT)
+
+static unsigned int base[MAX_NUM_GPIOMM];
+static unsigned int num_gpiomm;
+module_param_array(base, uint, &num_gpiomm, 0);
+MODULE_PARM_DESC(base, "Diamond Systems GPIO-MM base addresses");
+
+/**
+ * struct gpiomm_gpio - GPIO device private data structure
+ * @chip: instance of the gpio_chip
+ * @io_state: bit I/O state (whether bit is set to input or output)
+ * @out_state: output bits state
+ * @control: Control registers state
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @base: base port address of the GPIO device
+ */
+struct gpiomm_gpio {
+ struct gpio_chip chip;
+ unsigned char io_state[6];
+ unsigned char out_state[6];
+ unsigned char control[2];
+ spinlock_t lock;
+ unsigned int base;
+};
+
+static int gpiomm_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
+ const unsigned int port = offset / 8;
+ const unsigned int mask = BIT(offset % 8);
+
+ return !!(gpiommgpio->io_state[port] & mask);
+}
+
+static int gpiomm_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
+ const unsigned int io_port = offset / 8;
+ const unsigned int control_port = io_port / 3;
+ const unsigned int control_addr = gpiommgpio->base + 3 + control_port*4;
+ unsigned long flags;
+ unsigned int control;
+
+ spin_lock_irqsave(&gpiommgpio->lock, flags);
+
+ /* Check if configuring Port C */
+ if (io_port == 2 || io_port == 5) {
+ /* Port C can be configured by nibble */
+ if (offset % 8 > 3) {
+ gpiommgpio->io_state[io_port] |= 0xF0;
+ gpiommgpio->control[control_port] |= BIT(3);
+ } else {
+ gpiommgpio->io_state[io_port] |= 0x0F;
+ gpiommgpio->control[control_port] |= BIT(0);
+ }
+ } else {
+ gpiommgpio->io_state[io_port] |= 0xFF;
+ if (io_port == 0 || io_port == 3)
+ gpiommgpio->control[control_port] |= BIT(4);
+ else
+ gpiommgpio->control[control_port] |= BIT(1);
+ }
+
+ control = BIT(7) | gpiommgpio->control[control_port];
+ outb(control, control_addr);
+
+ spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+
+ return 0;
+}
+
+static int gpiomm_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
+ const unsigned int io_port = offset / 8;
+ const unsigned int control_port = io_port / 3;
+ const unsigned int mask = BIT(offset % 8);
+ const unsigned int control_addr = gpiommgpio->base + 3 + control_port*4;
+ const unsigned int out_port = (io_port > 2) ? io_port + 1 : io_port;
+ unsigned long flags;
+ unsigned int control;
+
+ spin_lock_irqsave(&gpiommgpio->lock, flags);
+
+ /* Check if configuring Port C */
+ if (io_port == 2 || io_port == 5) {
+ /* Port C can be configured by nibble */
+ if (offset % 8 > 3) {
+ gpiommgpio->io_state[io_port] &= 0x0F;
+ gpiommgpio->control[control_port] &= ~BIT(3);
+ } else {
+ gpiommgpio->io_state[io_port] &= 0xF0;
+ gpiommgpio->control[control_port] &= ~BIT(0);
+ }
+ } else {
+ gpiommgpio->io_state[io_port] &= 0x00;
+ if (io_port == 0 || io_port == 3)
+ gpiommgpio->control[control_port] &= ~BIT(4);
+ else
+ gpiommgpio->control[control_port] &= ~BIT(1);
+ }
+
+ if (value)
+ gpiommgpio->out_state[io_port] |= mask;
+ else
+ gpiommgpio->out_state[io_port] &= ~mask;
+
+ control = BIT(7) | gpiommgpio->control[control_port];
+ outb(control, control_addr);
+
+ outb(gpiommgpio->out_state[io_port], gpiommgpio->base + out_port);
+
+ spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+
+ return 0;
+}
+
+static int gpiomm_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
+ const unsigned int port = offset / 8;
+ const unsigned int mask = BIT(offset % 8);
+ const unsigned int in_port = (port > 2) ? port + 1 : port;
+ unsigned long flags;
+ unsigned int port_state;
+
+ spin_lock_irqsave(&gpiommgpio->lock, flags);
+
+ /* ensure that GPIO is set for input */
+ if (!(gpiommgpio->io_state[port] & mask)) {
+ spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+ return -EINVAL;
+ }
+
+ port_state = inb(gpiommgpio->base + in_port);
+
+ spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+
+ return !!(port_state & mask);
+}
+
+static void gpiomm_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
+ const unsigned int port = offset / 8;
+ const unsigned int mask = BIT(offset % 8);
+ const unsigned int out_port = (port > 2) ? port + 1 : port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpiommgpio->lock, flags);
+
+ if (value)
+ gpiommgpio->out_state[port] |= mask;
+ else
+ gpiommgpio->out_state[port] &= ~mask;
+
+ outb(gpiommgpio->out_state[port], gpiommgpio->base + out_port);
+
+ spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+}
+
+static int gpiomm_probe(struct device *dev, unsigned int id)
+{
+ struct gpiomm_gpio *gpiommgpio;
+ const char *const name = dev_name(dev);
+ int err;
+
+ gpiommgpio = devm_kzalloc(dev, sizeof(*gpiommgpio), GFP_KERNEL);
+ if (!gpiommgpio)
+ return -ENOMEM;
+
+ if (!devm_request_region(dev, base[id], GPIOMM_EXTENT, name)) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base[id], base[id] + GPIOMM_EXTENT);
+ return -EBUSY;
+ }
+
+ gpiommgpio->chip.label = name;
+ gpiommgpio->chip.parent = dev;
+ gpiommgpio->chip.owner = THIS_MODULE;
+ gpiommgpio->chip.base = -1;
+ gpiommgpio->chip.ngpio = 48;
+ gpiommgpio->chip.get_direction = gpiomm_gpio_get_direction;
+ gpiommgpio->chip.direction_input = gpiomm_gpio_direction_input;
+ gpiommgpio->chip.direction_output = gpiomm_gpio_direction_output;
+ gpiommgpio->chip.get = gpiomm_gpio_get;
+ gpiommgpio->chip.set = gpiomm_gpio_set;
+ gpiommgpio->base = base[id];
+
+ spin_lock_init(&gpiommgpio->lock);
+
+ dev_set_drvdata(dev, gpiommgpio);
+
+ err = gpiochip_add_data(&gpiommgpio->chip, gpiommgpio);
+ if (err) {
+ dev_err(dev, "GPIO registering failed (%d)\n", err);
+ return err;
+ }
+
+ /* initialize all GPIO as output */
+ outb(0x80, base[id] + 3);
+ outb(0x00, base[id]);
+ outb(0x00, base[id] + 1);
+ outb(0x00, base[id] + 2);
+ outb(0x80, base[id] + 7);
+ outb(0x00, base[id] + 4);
+ outb(0x00, base[id] + 5);
+ outb(0x00, base[id] + 6);
+
+ return 0;
+}
+
+static int gpiomm_remove(struct device *dev, unsigned int id)
+{
+ struct gpiomm_gpio *const gpiommgpio = dev_get_drvdata(dev);
+
+ gpiochip_remove(&gpiommgpio->chip);
+
+ return 0;
+}
+
+static struct isa_driver gpiomm_driver = {
+ .probe = gpiomm_probe,
+ .driver = {
+ .name = "gpio-mm"
+ },
+ .remove = gpiomm_remove
+};
+
+module_isa_driver(gpiomm_driver, num_gpiomm);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("Diamond Systems GPIO-MM GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index 513cfc5c8fb6..0b4df6051097 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -14,10 +14,10 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <linux/platform_data/gpio-htc-egpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/mfd/htc-egpio.h>
struct egpio_chip {
int reg_start;
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index 860c535922fd..98c7ff2a76e7 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -10,111 +10,40 @@
* your option) any later version.
*/
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/gpio.h>
-#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#define IOP3XX_N_GPIOS 8
-
-#define GPIO_IN 0
-#define GPIO_OUT 1
-#define GPIO_LOW 0
-#define GPIO_HIGH 1
-
-/* Memory base offset */
-static void __iomem *base;
-
-#define IOP3XX_GPIO_REG(reg) (base + (reg))
-#define IOP3XX_GPOE IOP3XX_GPIO_REG(0x0000)
-#define IOP3XX_GPID IOP3XX_GPIO_REG(0x0004)
-#define IOP3XX_GPOD IOP3XX_GPIO_REG(0x0008)
-
-static void gpio_line_config(int line, int direction)
-{
- unsigned long flags;
- u32 val;
-
- local_irq_save(flags);
- val = readl(IOP3XX_GPOE);
- if (direction == GPIO_IN) {
- val |= BIT(line);
- } else if (direction == GPIO_OUT) {
- val &= ~BIT(line);
- }
- writel(val, IOP3XX_GPOE);
- local_irq_restore(flags);
-}
-
-static int gpio_line_get(int line)
-{
- return !!(readl(IOP3XX_GPID) & BIT(line));
-}
-
-static void gpio_line_set(int line, int value)
-{
- unsigned long flags;
- u32 val;
-
- local_irq_save(flags);
- val = readl(IOP3XX_GPOD);
- if (value == GPIO_LOW) {
- val &= ~BIT(line);
- } else if (value == GPIO_HIGH) {
- val |= BIT(line);
- }
- writel(val, IOP3XX_GPOD);
- local_irq_restore(flags);
-}
-
-static int iop3xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
-{
- gpio_line_config(gpio, GPIO_IN);
- return 0;
-}
-
-static int iop3xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int level)
-{
- gpio_line_set(gpio, level);
- gpio_line_config(gpio, GPIO_OUT);
- return 0;
-}
-
-static int iop3xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
-{
- return gpio_line_get(gpio);
-}
-
-static void iop3xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value)
-{
- gpio_line_set(gpio, value);
-}
-
-static struct gpio_chip iop3xx_chip = {
- .label = "iop3xx",
- .direction_input = iop3xx_gpio_direction_input,
- .get = iop3xx_gpio_get_value,
- .direction_output = iop3xx_gpio_direction_output,
- .set = iop3xx_gpio_set_value,
- .base = 0,
- .ngpio = IOP3XX_N_GPIOS,
-};
+#define IOP3XX_GPOE 0x0000
+#define IOP3XX_GPID 0x0004
+#define IOP3XX_GPOD 0x0008
static int iop3xx_gpio_probe(struct platform_device *pdev)
{
struct resource *res;
+ struct gpio_chip *gc;
+ void __iomem *base;
+ int err;
+
+ gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
- return devm_gpiochip_add_data(&pdev->dev, &iop3xx_chip, NULL);
+ err = bgpio_init(gc, &pdev->dev, 1, base + IOP3XX_GPID,
+ base + IOP3XX_GPOD, NULL, NULL, base + IOP3XX_GPOE, 0);
+ if (err)
+ return err;
+
+ gc->base = 0;
+ gc->owner = THIS_MODULE;
+
+ return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
}
static struct platform_driver iop3xx_gpio_driver = {
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index 63a962d18cd6..45d29e488dbb 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -273,7 +273,7 @@ exit:
return rc;
}
-static struct gpio_chip it87_template_chip = {
+static const struct gpio_chip it87_template_chip = {
.label = KBUILD_MODNAME,
.owner = THIS_MODULE,
.request = it87_gpio_request,
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
index 10c09bdd8514..72b64039241a 100644
--- a/drivers/gpio/gpio-loongson1.c
+++ b/drivers/gpio/gpio-loongson1.c
@@ -8,6 +8,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/module.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
@@ -55,11 +56,6 @@ static int ls1x_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O memory\n");
- return -EINVAL;
- }
-
gpio_reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(gpio_reg_base))
return PTR_ERR(gpio_reg_base);
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
new file mode 100644
index 000000000000..218c706359aa
--- /dev/null
+++ b/drivers/gpio/gpio-lp873x.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the TPS65218 driver
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lp873x.h>
+
+#define BITS_PER_GPO 0x4
+#define LP873X_GPO_CTRL_OD 0x2
+
+struct lp873x_gpio {
+ struct gpio_chip chip;
+ struct lp873x *lp873;
+};
+
+static int lp873x_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ /* This device is output only */
+ return 0;
+}
+
+static int lp873x_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ /* This device is output only */
+ return -EINVAL;
+}
+
+static int lp873x_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct lp873x_gpio *gpio = gpiochip_get_data(chip);
+
+ /* Set the initial value */
+ return regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
+ BIT(offset * BITS_PER_GPO),
+ value ? BIT(offset * BITS_PER_GPO) : 0);
+}
+
+static int lp873x_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct lp873x_gpio *gpio = gpiochip_get_data(chip);
+ int ret, val;
+
+ ret = regmap_read(gpio->lp873->regmap, LP873X_REG_GPO_CTRL, &val);
+ if (ret < 0)
+ return ret;
+
+ return val & BIT(offset * BITS_PER_GPO);
+}
+
+static void lp873x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct lp873x_gpio *gpio = gpiochip_get_data(chip);
+
+ regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
+ BIT(offset * BITS_PER_GPO),
+ value ? BIT(offset * BITS_PER_GPO) : 0);
+}
+
+static int lp873x_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct lp873x_gpio *gpio = gpiochip_get_data(gc);
+ int ret;
+
+ switch (offset) {
+ case 0:
+ /* No MUX Set up Needed for GPO */
+ break;
+ case 1:
+ /* Setup the CLKIN_PIN_SEL MUX to GPO2 */
+ ret = regmap_update_bits(gpio->lp873->regmap, LP873X_REG_CONFIG,
+ LP873X_CONFIG_CLKIN_PIN_SEL, 0);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int lp873x_gpio_set_single_ended(struct gpio_chip *gc,
+ unsigned int offset,
+ enum single_ended_mode mode)
+{
+ struct lp873x_gpio *gpio = gpiochip_get_data(gc);
+
+ switch (mode) {
+ case LINE_MODE_OPEN_DRAIN:
+ return regmap_update_bits(gpio->lp873->regmap,
+ LP873X_REG_GPO_CTRL,
+ BIT(offset * BITS_PER_GPO +
+ LP873X_GPO_CTRL_OD),
+ BIT(offset * BITS_PER_GPO +
+ LP873X_GPO_CTRL_OD));
+ case LINE_MODE_PUSH_PULL:
+ return regmap_update_bits(gpio->lp873->regmap,
+ LP873X_REG_GPO_CTRL,
+ BIT(offset * BITS_PER_GPO +
+ LP873X_GPO_CTRL_OD), 0);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static const struct gpio_chip template_chip = {
+ .label = "lp873x-gpio",
+ .owner = THIS_MODULE,
+ .request = lp873x_gpio_request,
+ .get_direction = lp873x_gpio_get_direction,
+ .direction_input = lp873x_gpio_direction_input,
+ .direction_output = lp873x_gpio_direction_output,
+ .get = lp873x_gpio_get,
+ .set = lp873x_gpio_set,
+ .set_single_ended = lp873x_gpio_set_single_ended,
+ .base = -1,
+ .ngpio = 2,
+ .can_sleep = true,
+};
+
+static int lp873x_gpio_probe(struct platform_device *pdev)
+{
+ struct lp873x_gpio *gpio;
+ int ret;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gpio);
+
+ gpio->lp873 = dev_get_drvdata(pdev->dev.parent);
+ gpio->chip = template_chip;
+ gpio->chip.parent = gpio->lp873->dev;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id lp873x_gpio_id_table[] = {
+ { "lp873x-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, lp873x_gpio_id_table);
+
+static struct platform_driver lp873x_gpio_driver = {
+ .driver = {
+ .name = "lp873x-gpio",
+ },
+ .probe = lp873x_gpio_probe,
+ .id_table = lp873x_gpio_id_table,
+};
+module_platform_driver(lp873x_gpio_driver);
+
+MODULE_AUTHOR("Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("LP873X GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index 98832c9f614a..f12e02e1016d 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -78,7 +78,7 @@ static int lpc18xx_gpio_direction_output(struct gpio_chip *chip,
return lpc18xx_gpio_direction(chip, offset, true);
}
-static struct gpio_chip lpc18xx_chip = {
+static const struct gpio_chip lpc18xx_chip = {
.label = "lpc18xx/43xx-gpio",
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index fc5f197906ac..92b3ae2a6735 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -25,7 +25,6 @@
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/platform_data/gpio-lpc32xx.h>
#include <mach/hardware.h>
#include <mach/platform.h>
@@ -68,6 +67,20 @@
#define GPI3_PIN_IN_SEL(x, y) (((x) >> (y)) & 1)
#define GPO3_PIN_IN_SEL(x, y) (((x) >> (y)) & 1)
+#define LPC32XX_GPIO_P0_MAX 8
+#define LPC32XX_GPIO_P1_MAX 24
+#define LPC32XX_GPIO_P2_MAX 13
+#define LPC32XX_GPIO_P3_MAX 6
+#define LPC32XX_GPI_P3_MAX 29
+#define LPC32XX_GPO_P3_MAX 24
+
+#define LPC32XX_GPIO_P0_GRP 0
+#define LPC32XX_GPIO_P1_GRP (LPC32XX_GPIO_P0_GRP + LPC32XX_GPIO_P0_MAX)
+#define LPC32XX_GPIO_P2_GRP (LPC32XX_GPIO_P1_GRP + LPC32XX_GPIO_P1_MAX)
+#define LPC32XX_GPIO_P3_GRP (LPC32XX_GPIO_P2_GRP + LPC32XX_GPIO_P2_MAX)
+#define LPC32XX_GPI_P3_GRP (LPC32XX_GPIO_P3_GRP + LPC32XX_GPIO_P3_MAX)
+#define LPC32XX_GPO_P3_GRP (LPC32XX_GPI_P3_GRP + LPC32XX_GPI_P3_MAX)
+
struct gpio_regs {
void __iomem *inp_state;
void __iomem *outp_state;
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 6ec144baeb11..d7d03ad052d0 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -573,6 +573,7 @@ static void __iomem *bgpio_map(struct platform_device *pdev,
#ifdef CONFIG_OF
static const struct of_device_id bgpio_of_match[] = {
+ { .compatible = "brcm,bcm6345-gpio" },
{ .compatible = "wd,mbl-gpio" },
{ }
};
@@ -593,6 +594,9 @@ static struct bgpio_pdata *bgpio_parse_dt(struct platform_device *pdev,
pdata->base = -1;
+ if (of_device_is_big_endian(pdev->dev.of_node))
+ *flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+
if (of_property_read_bool(pdev->dev.of_node, "no-output"))
*flags |= BGPIOF_NO_OUTPUT;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
new file mode 100644
index 000000000000..1ef85b0c2b1f
--- /dev/null
+++ b/drivers/gpio/gpio-mockup.c
@@ -0,0 +1,214 @@
+/*
+ * GPIO Testing Device Driver
+ *
+ * Copyright (C) 2014 Kamlakant Patel <kamlakant.patel@broadcom.com>
+ * Copyright (C) 2015-2016 Bamvor Jian Zhang <bamvor.zhangjian@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+
+#define GPIO_NAME "gpio-mockup"
+#define MAX_GC 10
+
+enum direction {
+ OUT,
+ IN
+};
+
+/*
+ * struct gpio_pin_status - structure describing a GPIO status
+ * @dir: Configures direction of gpio as "in" or "out", 0=in, 1=out
+ * @value: Configures status of the gpio as 0(low) or 1(high)
+ */
+struct gpio_pin_status {
+ enum direction dir;
+ bool value;
+};
+
+struct mockup_gpio_controller {
+ struct gpio_chip gc;
+ struct gpio_pin_status *stats;
+};
+
+static int gpio_mockup_ranges[MAX_GC << 1];
+static int gpio_mockup_params_nr;
+module_param_array(gpio_mockup_ranges, int, &gpio_mockup_params_nr, 0400);
+
+const char pins_name_start = 'A';
+
+static int mockup_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mockup_gpio_controller *cntr = gpiochip_get_data(gc);
+
+ return cntr->stats[offset].value;
+}
+
+static void mockup_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct mockup_gpio_controller *cntr = gpiochip_get_data(gc);
+
+ cntr->stats[offset].value = !!value;
+}
+
+static int mockup_gpio_dirout(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct mockup_gpio_controller *cntr = gpiochip_get_data(gc);
+
+ mockup_gpio_set(gc, offset, value);
+ cntr->stats[offset].dir = OUT;
+ return 0;
+}
+
+static int mockup_gpio_dirin(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mockup_gpio_controller *cntr = gpiochip_get_data(gc);
+
+ cntr->stats[offset].dir = IN;
+ return 0;
+}
+
+static int mockup_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mockup_gpio_controller *cntr = gpiochip_get_data(gc);
+
+ return cntr->stats[offset].dir;
+}
+
+static int mockup_gpio_add(struct device *dev,
+ struct mockup_gpio_controller *cntr,
+ const char *name, int base, int ngpio)
+{
+ int ret;
+
+ cntr->gc.base = base;
+ cntr->gc.ngpio = ngpio;
+ cntr->gc.label = name;
+ cntr->gc.owner = THIS_MODULE;
+ cntr->gc.parent = dev;
+ cntr->gc.get = mockup_gpio_get;
+ cntr->gc.set = mockup_gpio_set;
+ cntr->gc.direction_output = mockup_gpio_dirout;
+ cntr->gc.direction_input = mockup_gpio_dirin;
+ cntr->gc.get_direction = mockup_gpio_get_direction;
+ cntr->stats = devm_kzalloc(dev, sizeof(*cntr->stats) * cntr->gc.ngpio,
+ GFP_KERNEL);
+ if (!cntr->stats) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ret = devm_gpiochip_add_data(dev, &cntr->gc, cntr);
+ if (ret)
+ goto err;
+
+ dev_info(dev, "gpio<%d..%d> add successful!", base, base + ngpio);
+ return 0;
+err:
+ dev_err(dev, "gpio<%d..%d> add failed!", base, base + ngpio);
+ return ret;
+}
+
+static int mockup_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mockup_gpio_controller *cntr;
+ int ret;
+ int i;
+ int base;
+ int ngpio;
+ char chip_name[sizeof(GPIO_NAME) + 3];
+
+ if (gpio_mockup_params_nr < 2)
+ return -EINVAL;
+
+ cntr = devm_kzalloc(dev, sizeof(*cntr) * (gpio_mockup_params_nr >> 1),
+ GFP_KERNEL);
+ if (!cntr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cntr);
+
+ for (i = 0; i < gpio_mockup_params_nr >> 1; i++) {
+ base = gpio_mockup_ranges[i * 2];
+ if (base == -1)
+ ngpio = gpio_mockup_ranges[i * 2 + 1];
+ else
+ ngpio = gpio_mockup_ranges[i * 2 + 1] - base;
+
+ if (ngpio >= 0) {
+ sprintf(chip_name, "%s-%c", GPIO_NAME,
+ pins_name_start + i);
+ ret = mockup_gpio_add(dev, &cntr[i],
+ chip_name, base, ngpio);
+ } else {
+ ret = -1;
+ }
+ if (ret) {
+ if (base < 0)
+ dev_err(dev, "gpio<%d..%d> add failed\n",
+ base, ngpio);
+ else
+ dev_err(dev, "gpio<%d..%d> add failed\n",
+ base, base + ngpio);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver mockup_gpio_driver = {
+ .driver = {
+ .name = GPIO_NAME,
+ },
+ .probe = mockup_gpio_probe,
+};
+
+static struct platform_device *pdev;
+static int __init mock_device_init(void)
+{
+ int err;
+
+ pdev = platform_device_alloc(GPIO_NAME, -1);
+ if (!pdev)
+ return -ENOMEM;
+
+ err = platform_device_add(pdev);
+ if (err) {
+ platform_device_put(pdev);
+ return err;
+ }
+
+ err = platform_driver_register(&mockup_gpio_driver);
+ if (err) {
+ platform_device_unregister(pdev);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit mock_device_exit(void)
+{
+ platform_driver_unregister(&mockup_gpio_driver);
+ platform_device_unregister(pdev);
+}
+
+module_init(mock_device_init);
+module_exit(mock_device_exit);
+
+MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>");
+MODULE_AUTHOR("Bamvor Jian Zhang <bamvor.zhangjian@linaro.org>");
+MODULE_DESCRIPTION("GPIO Testing driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index d75649787e6c..1b7ce7f85886 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -20,7 +20,6 @@
*
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@@ -328,9 +327,4 @@ static int __init platform_msic_gpio_init(void)
{
return platform_driver_register(&platform_msic_gpio_driver);
}
-
subsys_initcall(platform_msic_gpio_init);
-
-MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
-MODULE_DESCRIPTION("Intel Medfield MSIC GPIO driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 1b342a3842c8..c1a1e00b8cb0 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -2,7 +2,8 @@
* MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
* Copyright 2008 Juergen Beisert, kernel@pengutronix.de
*
- * Based on code from Freescale,
+ * Based on code from Freescale Semiconductor,
+ * Authors: Daniel Mack, Juergen Beisert.
* Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -33,7 +34,6 @@
#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/module.h>
#include <linux/bug.h>
enum mxc_gpio_hwtype {
@@ -458,6 +458,11 @@ static int mxc_gpio_probe(struct platform_device *pdev)
if (err)
goto out_bgio;
+ if (of_property_read_bool(np, "gpio-ranges")) {
+ port->gc.request = gpiochip_generic_request;
+ port->gc.free = gpiochip_generic_free;
+ }
+
port->gc.to_irq = mxc_gpio_to_irq;
port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
pdev->id * 32;
@@ -510,10 +515,4 @@ static int __init gpio_mxc_init(void)
{
return platform_driver_register(&mxc_gpio_driver);
}
-postcore_initcall(gpio_mxc_init);
-
-MODULE_AUTHOR("Freescale Semiconductor, "
- "Daniel Mack <danielncaiaq.de>, "
- "Juergen Beisert <kernel@pengutronix.de>");
-MODULE_DESCRIPTION("Freescale MXC GPIO");
-MODULE_LICENSE("GPL");
+subsys_initcall(gpio_mxc_init);
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 839474430229..3d818195e351 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -152,7 +152,6 @@ static const struct of_device_id of_palmas_gpio_match[] = {
{ .compatible = "ti,tps80036-gpio", .data = &tps80036_dev_data,},
{ },
};
-MODULE_DEVICE_TABLE(of, of_palmas_gpio_match);
static int palmas_gpio_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 02f2a5621bb0..45c8817d068c 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -21,6 +21,7 @@
#include <asm/unaligned.h>
#include <linux/of_platform.h>
#include <linux/acpi.h>
+#include <linux/regulator/consumer.h>
#define PCA953X_INPUT 0
#define PCA953X_OUTPUT 1
@@ -94,6 +95,24 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
+struct pca953x_reg_config {
+ int direction;
+ int output;
+ int input;
+};
+
+static const struct pca953x_reg_config pca953x_regs = {
+ .direction = PCA953X_DIRECTION,
+ .output = PCA953X_OUTPUT,
+ .input = PCA953X_INPUT,
+};
+
+static const struct pca953x_reg_config pca957x_regs = {
+ .direction = PCA957X_CFG,
+ .output = PCA957X_OUT,
+ .input = PCA957X_IN,
+};
+
struct pca953x_chip {
unsigned gpio_start;
u8 reg_output[MAX_BANK];
@@ -111,8 +130,13 @@ struct pca953x_chip {
struct i2c_client *client;
struct gpio_chip gpio_chip;
const char *const *names;
- int chip_type;
unsigned long driver_data;
+ struct regulator *regulator;
+
+ const struct pca953x_reg_config *regs;
+
+ int (*write_regs)(struct pca953x_chip *, int, u8 *);
+ int (*read_regs)(struct pca953x_chip *, int, u8 *);
};
static int pca953x_read_single(struct pca953x_chip *chip, int reg, u32 *val,
@@ -152,38 +176,44 @@ static int pca953x_write_single(struct pca953x_chip *chip, int reg, u32 val,
return 0;
}
-static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
+static int pca953x_write_regs_8(struct pca953x_chip *chip, int reg, u8 *val)
{
- int ret = 0;
+ return i2c_smbus_write_byte_data(chip->client, reg, *val);
+}
- if (chip->gpio_chip.ngpio <= 8)
- ret = i2c_smbus_write_byte_data(chip->client, reg, *val);
- else if (chip->gpio_chip.ngpio >= 24) {
- int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
- ret = i2c_smbus_write_i2c_block_data(chip->client,
- (reg << bank_shift) | REG_ADDR_AI,
- NBANK(chip), val);
- } else {
- switch (chip->chip_type) {
- case PCA953X_TYPE: {
- __le16 word = cpu_to_le16(get_unaligned((u16 *)val));
+static int pca953x_write_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
+{
+ __le16 word = cpu_to_le16(get_unaligned((u16 *)val));
- ret = i2c_smbus_write_word_data(chip->client, reg << 1,
- (__force u16)word);
- break;
- }
- case PCA957X_TYPE:
- ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
- val[0]);
- if (ret < 0)
- break;
- ret = i2c_smbus_write_byte_data(chip->client,
- (reg << 1) + 1,
- val[1]);
- break;
- }
- }
+ return i2c_smbus_write_word_data(chip->client,
+ reg << 1, (__force u16)word);
+}
+
+static int pca957x_write_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client, reg << 1, val[0]);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(chip->client, (reg << 1) + 1, val[1]);
+}
+
+static int pca953x_write_regs_24(struct pca953x_chip *chip, int reg, u8 *val)
+{
+ int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+
+ return i2c_smbus_write_i2c_block_data(chip->client,
+ (reg << bank_shift) | REG_ADDR_AI,
+ NBANK(chip), val);
+}
+static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
+{
+ int ret = 0;
+
+ ret = chip->write_regs(chip, reg, val);
if (ret < 0) {
dev_err(&chip->client->dev, "failed writing register\n");
return ret;
@@ -192,24 +222,41 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
return 0;
}
-static int pca953x_read_regs(struct pca953x_chip *chip, int reg, u8 *val)
+static int pca953x_read_regs_8(struct pca953x_chip *chip, int reg, u8 *val)
{
int ret;
- if (chip->gpio_chip.ngpio <= 8) {
- ret = i2c_smbus_read_byte_data(chip->client, reg);
- *val = ret;
- } else if (chip->gpio_chip.ngpio >= 24) {
- int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+ ret = i2c_smbus_read_byte_data(chip->client, reg);
+ *val = ret;
- ret = i2c_smbus_read_i2c_block_data(chip->client,
- (reg << bank_shift) | REG_ADDR_AI,
- NBANK(chip), val);
- } else {
- ret = i2c_smbus_read_word_data(chip->client, reg << 1);
- val[0] = (u16)ret & 0xFF;
- val[1] = (u16)ret >> 8;
- }
+ return ret;
+}
+
+static int pca953x_read_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_data(chip->client, reg << 1);
+ val[0] = (u16)ret & 0xFF;
+ val[1] = (u16)ret >> 8;
+
+ return ret;
+}
+
+static int pca953x_read_regs_24(struct pca953x_chip *chip, int reg, u8 *val)
+{
+ int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+
+ return i2c_smbus_read_i2c_block_data(chip->client,
+ (reg << bank_shift) | REG_ADDR_AI,
+ NBANK(chip), val);
+}
+
+static int pca953x_read_regs(struct pca953x_chip *chip, int reg, u8 *val)
+{
+ int ret;
+
+ ret = chip->read_regs(chip, reg, val);
if (ret < 0) {
dev_err(&chip->client->dev, "failed reading register\n");
return ret;
@@ -222,20 +269,12 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 reg_val;
- int ret, offset = 0;
+ int ret;
mutex_lock(&chip->i2c_lock);
reg_val = chip->reg_direction[off / BANK_SZ] | (1u << (off % BANK_SZ));
- switch (chip->chip_type) {
- case PCA953X_TYPE:
- offset = PCA953X_DIRECTION;
- break;
- case PCA957X_TYPE:
- offset = PCA957X_CFG;
- break;
- }
- ret = pca953x_write_single(chip, offset, reg_val, off);
+ ret = pca953x_write_single(chip, chip->regs->direction, reg_val, off);
if (ret)
goto exit;
@@ -250,7 +289,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 reg_val;
- int ret, offset = 0;
+ int ret;
mutex_lock(&chip->i2c_lock);
/* set output level */
@@ -261,15 +300,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
reg_val = chip->reg_output[off / BANK_SZ]
& ~(1u << (off % BANK_SZ));
- switch (chip->chip_type) {
- case PCA953X_TYPE:
- offset = PCA953X_OUTPUT;
- break;
- case PCA957X_TYPE:
- offset = PCA957X_OUT;
- break;
- }
- ret = pca953x_write_single(chip, offset, reg_val, off);
+ ret = pca953x_write_single(chip, chip->regs->output, reg_val, off);
if (ret)
goto exit;
@@ -277,15 +308,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
/* then direction */
reg_val = chip->reg_direction[off / BANK_SZ] & ~(1u << (off % BANK_SZ));
- switch (chip->chip_type) {
- case PCA953X_TYPE:
- offset = PCA953X_DIRECTION;
- break;
- case PCA957X_TYPE:
- offset = PCA957X_CFG;
- break;
- }
- ret = pca953x_write_single(chip, offset, reg_val, off);
+ ret = pca953x_write_single(chip, chip->regs->direction, reg_val, off);
if (ret)
goto exit;
@@ -299,18 +322,10 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u32 reg_val;
- int ret, offset = 0;
+ int ret;
mutex_lock(&chip->i2c_lock);
- switch (chip->chip_type) {
- case PCA953X_TYPE:
- offset = PCA953X_INPUT;
- break;
- case PCA957X_TYPE:
- offset = PCA957X_IN;
- break;
- }
- ret = pca953x_read_single(chip, offset, &reg_val, off);
+ ret = pca953x_read_single(chip, chip->regs->input, &reg_val, off);
mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
/* NOTE: diagnostic already emitted; that's all we should
@@ -327,7 +342,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 reg_val;
- int ret, offset = 0;
+ int ret;
mutex_lock(&chip->i2c_lock);
if (val)
@@ -337,15 +352,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
reg_val = chip->reg_output[off / BANK_SZ]
& ~(1u << (off % BANK_SZ));
- switch (chip->chip_type) {
- case PCA953X_TYPE:
- offset = PCA953X_OUTPUT;
- break;
- case PCA957X_TYPE:
- offset = PCA957X_OUT;
- break;
- }
- ret = pca953x_write_single(chip, offset, reg_val, off);
+ ret = pca953x_write_single(chip, chip->regs->output, reg_val, off);
if (ret)
goto exit;
@@ -355,35 +362,31 @@ exit:
}
static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+ unsigned long *mask, unsigned long *bits)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
+ unsigned int bank_mask, bank_val;
+ int bank_shift, bank;
u8 reg_val[MAX_BANK];
- int ret, offset = 0;
- int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
- int bank;
-
- switch (chip->chip_type) {
- case PCA953X_TYPE:
- offset = PCA953X_OUTPUT;
- break;
- case PCA957X_TYPE:
- offset = PCA957X_OUT;
- break;
- }
+ int ret;
+
+ bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
memcpy(reg_val, chip->reg_output, NBANK(chip));
mutex_lock(&chip->i2c_lock);
- for(bank=0; bank<NBANK(chip); bank++) {
- unsigned bankmask = mask[bank / sizeof(*mask)] >>
- ((bank % sizeof(*mask)) * 8);
- if(bankmask) {
- unsigned bankval = bits[bank / sizeof(*bits)] >>
- ((bank % sizeof(*bits)) * 8);
- reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
+ for (bank = 0; bank < NBANK(chip); bank++) {
+ bank_mask = mask[bank / sizeof(*mask)] >>
+ ((bank % sizeof(*mask)) * 8);
+ if (bank_mask) {
+ bank_val = bits[bank / sizeof(*bits)] >>
+ ((bank % sizeof(*bits)) * 8);
+ reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val;
}
}
- ret = i2c_smbus_write_i2c_block_data(chip->client, offset << bank_shift, NBANK(chip), reg_val);
+
+ ret = i2c_smbus_write_i2c_block_data(chip->client,
+ chip->regs->output << bank_shift,
+ NBANK(chip), reg_val);
if (ret)
goto exit;
@@ -515,7 +518,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
bool pending_seen = false;
bool trigger_seen = false;
u8 trigger[MAX_BANK];
- int ret, i, offset = 0;
+ int ret, i;
if (chip->driver_data & PCA_PCAL) {
/* Read the current interrupt status from the device */
@@ -540,15 +543,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
return pending_seen;
}
- switch (chip->chip_type) {
- case PCA953X_TYPE:
- offset = PCA953X_INPUT;
- break;
- case PCA957X_TYPE:
- offset = PCA957X_IN;
- break;
- }
- ret = pca953x_read_regs(chip, offset, cur_stat);
+ ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
@@ -608,20 +603,13 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
int irq_base)
{
struct i2c_client *client = chip->client;
- int ret, i, offset = 0;
+ int ret, i;
if (client->irq && irq_base != -1
&& (chip->driver_data & PCA_INT)) {
- switch (chip->chip_type) {
- case PCA953X_TYPE:
- offset = PCA953X_INPUT;
- break;
- case PCA957X_TYPE:
- offset = PCA957X_IN;
- break;
- }
- ret = pca953x_read_regs(chip, offset, chip->irq_stat);
+ ret = pca953x_read_regs(chip,
+ chip->regs->input, chip->irq_stat);
if (ret)
return ret;
@@ -684,12 +672,14 @@ static int device_pca953x_init(struct pca953x_chip *chip, u32 invert)
int ret;
u8 val[MAX_BANK];
- ret = pca953x_read_regs(chip, PCA953X_OUTPUT, chip->reg_output);
+ chip->regs = &pca953x_regs;
+
+ ret = pca953x_read_regs(chip, chip->regs->output, chip->reg_output);
if (ret)
goto out;
- ret = pca953x_read_regs(chip, PCA953X_DIRECTION,
- chip->reg_direction);
+ ret = pca953x_read_regs(chip, chip->regs->direction,
+ chip->reg_direction);
if (ret)
goto out;
@@ -709,10 +699,13 @@ static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
int ret;
u8 val[MAX_BANK];
- ret = pca953x_read_regs(chip, PCA957X_OUT, chip->reg_output);
+ chip->regs = &pca957x_regs;
+
+ ret = pca953x_read_regs(chip, chip->regs->output, chip->reg_output);
if (ret)
goto out;
- ret = pca953x_read_regs(chip, PCA957X_CFG, chip->reg_direction);
+ ret = pca953x_read_regs(chip, chip->regs->direction,
+ chip->reg_direction);
if (ret)
goto out;
@@ -739,13 +732,14 @@ out:
static const struct of_device_id pca953x_dt_ids[];
static int pca953x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *i2c_id)
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
int irq_base = 0;
int ret;
u32 invert = 0;
+ struct regulator *reg;
chip = devm_kzalloc(&client->dev,
sizeof(struct pca953x_chip), GFP_KERNEL);
@@ -765,47 +759,77 @@ static int pca953x_probe(struct i2c_client *client,
chip->client = client;
- if (id) {
- chip->driver_data = id->driver_data;
+ reg = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&client->dev, "reg get err: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(reg);
+ if (ret) {
+ dev_err(&client->dev, "reg en err: %d\n", ret);
+ return ret;
+ }
+ chip->regulator = reg;
+
+ if (i2c_id) {
+ chip->driver_data = i2c_id->driver_data;
} else {
- const struct acpi_device_id *id;
+ const struct acpi_device_id *acpi_id;
const struct of_device_id *match;
match = of_match_device(pca953x_dt_ids, &client->dev);
if (match) {
chip->driver_data = (int)(uintptr_t)match->data;
} else {
- id = acpi_match_device(pca953x_acpi_ids, &client->dev);
- if (!id)
- return -ENODEV;
+ acpi_id = acpi_match_device(pca953x_acpi_ids, &client->dev);
+ if (!acpi_id) {
+ ret = -ENODEV;
+ goto err_exit;
+ }
- chip->driver_data = id->driver_data;
+ chip->driver_data = acpi_id->driver_data;
}
}
- chip->chip_type = PCA_CHIP_TYPE(chip->driver_data);
-
mutex_init(&chip->i2c_lock);
+ lockdep_set_subclass(&chip->i2c_lock,
+ i2c_adapter_depth(client->adapter));
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
- if (chip->chip_type == PCA953X_TYPE)
+ if (chip->gpio_chip.ngpio <= 8) {
+ chip->write_regs = pca953x_write_regs_8;
+ chip->read_regs = pca953x_read_regs_8;
+ } else if (chip->gpio_chip.ngpio >= 24) {
+ chip->write_regs = pca953x_write_regs_24;
+ chip->read_regs = pca953x_read_regs_24;
+ } else {
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE)
+ chip->write_regs = pca953x_write_regs_16;
+ else
+ chip->write_regs = pca957x_write_regs_16;
+ chip->read_regs = pca953x_read_regs_16;
+ }
+
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE)
ret = device_pca953x_init(chip, invert);
else
ret = device_pca957x_init(chip, invert);
if (ret)
- return ret;
+ goto err_exit;
ret = devm_gpiochip_add_data(&client->dev, &chip->gpio_chip, chip);
if (ret)
- return ret;
+ goto err_exit;
ret = pca953x_irq_setup(chip, irq_base);
if (ret)
- return ret;
+ goto err_exit;
if (pdata && pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
@@ -816,6 +840,10 @@ static int pca953x_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
return 0;
+
+err_exit:
+ regulator_disable(chip->regulator);
+ return ret;
}
static int pca953x_remove(struct i2c_client *client)
@@ -827,14 +855,16 @@ static int pca953x_remove(struct i2c_client *client)
if (pdata && pdata->teardown) {
ret = pdata->teardown(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
- if (ret < 0) {
+ if (ret < 0)
dev_err(&client->dev, "%s failed, %d\n",
"teardown", ret);
- return ret;
- }
+ } else {
+ ret = 0;
}
- return 0;
+ regulator_disable(chip->regulator);
+
+ return ret;
}
/* convenience to stop overlong match-table lines */
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index cb14b8d1d512..f5545049c187 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -90,7 +90,7 @@ static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset)
return (gpio->buffer[offset / 8] >> (offset % 8)) & 0x1;
}
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "pisosr-gpio",
.owner = THIS_MODULE,
.get_direction = pisosr_gpio_get_direction,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index b96e0b466f74..2be48f5eba36 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -348,6 +348,10 @@ static const struct of_device_id gpio_rcar_of_table[] = {
/* Gen3 GPIO is identical to Gen2. */
.data = &gpio_rcar_info_gen2,
}, {
+ .compatible = "renesas,gpio-r8a7796",
+ /* Gen3 GPIO is identical to Gen2. */
+ .data = &gpio_rcar_info_gen2,
+ }, {
.compatible = "renesas,gpio-rcar",
.data = &gpio_rcar_info_gen1,
}, {
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index eb43ae4835c1..545004445846 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -138,7 +138,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
return 0;
}
-static struct gpio_chip sch_gpio_chip = {
+static const struct gpio_chip sch_gpio_chip = {
.label = "sch_gpio",
.owner = THIS_MODULE,
.direction_input = sch_gpio_direction_in,
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 7ffd16495286..22267479ba68 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -12,7 +12,7 @@
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/types.h>
@@ -183,7 +183,6 @@ static const struct of_device_id spics_gpio_of_match[] = {
{ .compatible = "st,spear-spics-gpio" },
{}
};
-MODULE_DEVICE_TABLE(of, spics_gpio_of_match);
static struct platform_driver spics_gpio_driver = {
.probe = spics_gpio_probe,
@@ -198,7 +197,3 @@ static int __init spics_gpio_init(void)
return platform_driver_register(&spics_gpio_driver);
}
subsys_initcall(spics_gpio_init);
-
-MODULE_AUTHOR("Shiraz Hashim <shiraz.linux.kernel@gmail.com>");
-MODULE_DESCRIPTION("STMicroelectronics SPEAr SPI Chip Select Abstraction");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index f675132de10e..e7d422a6b90b 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/mfd/stmpe.h>
#include <linux/seq_file.h>
+#include <linux/bitops.h>
/*
* These registers are modified under the irq bus lock and cached to avoid
@@ -20,6 +21,8 @@
*/
enum { REG_RE, REG_FE, REG_IE };
+enum { LSB, CSB, MSB };
+
#define CACHE_NR_REGS 3
/* No variant has more than 24 GPIOs */
#define CACHE_NR_BANKS (24 / 8)
@@ -39,8 +42,8 @@ static int stmpe_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
struct stmpe *stmpe = stmpe_gpio->stmpe;
- u8 reg = stmpe->regs[STMPE_IDX_GPMR_LSB] - (offset / 8);
- u8 mask = 1 << (offset % 8);
+ u8 reg = stmpe->regs[STMPE_IDX_GPMR_LSB + (offset / 8)];
+ u8 mask = BIT(offset % 8);
int ret;
ret = stmpe_reg_read(stmpe, reg);
@@ -55,8 +58,8 @@ static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
struct stmpe *stmpe = stmpe_gpio->stmpe;
int which = val ? STMPE_IDX_GPSR_LSB : STMPE_IDX_GPCR_LSB;
- u8 reg = stmpe->regs[which] - (offset / 8);
- u8 mask = 1 << (offset % 8);
+ u8 reg = stmpe->regs[which + (offset / 8)];
+ u8 mask = BIT(offset % 8);
/*
* Some variants have single register for gpio set/clear functionality.
@@ -74,7 +77,7 @@ static int stmpe_gpio_get_direction(struct gpio_chip *chip,
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
struct stmpe *stmpe = stmpe_gpio->stmpe;
u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB] - (offset / 8);
- u8 mask = 1 << (offset % 8);
+ u8 mask = BIT(offset % 8);
int ret;
ret = stmpe_reg_read(stmpe, reg);
@@ -89,8 +92,8 @@ static int stmpe_gpio_direction_output(struct gpio_chip *chip,
{
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
struct stmpe *stmpe = stmpe_gpio->stmpe;
- u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB] - (offset / 8);
- u8 mask = 1 << (offset % 8);
+ u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB + (offset / 8)];
+ u8 mask = BIT(offset % 8);
stmpe_gpio_set(chip, offset, val);
@@ -102,8 +105,8 @@ static int stmpe_gpio_direction_input(struct gpio_chip *chip,
{
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
struct stmpe *stmpe = stmpe_gpio->stmpe;
- u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB] - (offset / 8);
- u8 mask = 1 << (offset % 8);
+ u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB + (offset / 8)];
+ u8 mask = BIT(offset % 8);
return stmpe_set_bits(stmpe, reg, mask, 0);
}
@@ -113,13 +116,13 @@ static int stmpe_gpio_request(struct gpio_chip *chip, unsigned offset)
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
struct stmpe *stmpe = stmpe_gpio->stmpe;
- if (stmpe_gpio->norequest_mask & (1 << offset))
+ if (stmpe_gpio->norequest_mask & BIT(offset))
return -EINVAL;
- return stmpe_set_altfunc(stmpe, 1 << offset, STMPE_BLOCK_GPIO);
+ return stmpe_set_altfunc(stmpe, BIT(offset), STMPE_BLOCK_GPIO);
}
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "stmpe",
.owner = THIS_MODULE,
.get_direction = stmpe_gpio_get_direction,
@@ -137,13 +140,14 @@ static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
int offset = d->hwirq;
int regoffset = offset / 8;
- int mask = 1 << (offset % 8);
+ int mask = BIT(offset % 8);
if (type & IRQ_TYPE_LEVEL_LOW || type & IRQ_TYPE_LEVEL_HIGH)
return -EINVAL;
- /* STMPE801 doesn't have RE and FE registers */
- if (stmpe_gpio->stmpe->partnum == STMPE801)
+ /* STMPE801 and STMPE 1600 don't have RE and FE registers */
+ if (stmpe_gpio->stmpe->partnum == STMPE801 ||
+ stmpe_gpio->stmpe->partnum == STMPE1600)
return 0;
if (type & IRQ_TYPE_EDGE_RISING)
@@ -173,17 +177,24 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
struct stmpe *stmpe = stmpe_gpio->stmpe;
int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
- static const u8 regmap[] = {
- [REG_RE] = STMPE_IDX_GPRER_LSB,
- [REG_FE] = STMPE_IDX_GPFER_LSB,
- [REG_IE] = STMPE_IDX_IEGPIOR_LSB,
+ static const u8 regmap[CACHE_NR_REGS][CACHE_NR_BANKS] = {
+ [REG_RE][LSB] = STMPE_IDX_GPRER_LSB,
+ [REG_RE][CSB] = STMPE_IDX_GPRER_CSB,
+ [REG_RE][MSB] = STMPE_IDX_GPRER_MSB,
+ [REG_FE][LSB] = STMPE_IDX_GPFER_LSB,
+ [REG_FE][CSB] = STMPE_IDX_GPFER_CSB,
+ [REG_FE][MSB] = STMPE_IDX_GPFER_MSB,
+ [REG_IE][LSB] = STMPE_IDX_IEGPIOR_LSB,
+ [REG_IE][CSB] = STMPE_IDX_IEGPIOR_CSB,
+ [REG_IE][MSB] = STMPE_IDX_IEGPIOR_MSB,
};
int i, j;
for (i = 0; i < CACHE_NR_REGS; i++) {
- /* STMPE801 doesn't have RE and FE registers */
- if ((stmpe->partnum == STMPE801) &&
- (i != REG_IE))
+ /* STMPE801 and STMPE1600 don't have RE and FE registers */
+ if ((stmpe->partnum == STMPE801 ||
+ stmpe->partnum == STMPE1600) &&
+ (i != REG_IE))
continue;
for (j = 0; j < num_banks; j++) {
@@ -194,7 +205,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
continue;
stmpe_gpio->oldregs[i][j] = new;
- stmpe_reg_write(stmpe, stmpe->regs[regmap[i]] - j, new);
+ stmpe_reg_write(stmpe, stmpe->regs[regmap[i][j]], new);
}
}
@@ -207,7 +218,7 @@ static void stmpe_gpio_irq_mask(struct irq_data *d)
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
int offset = d->hwirq;
int regoffset = offset / 8;
- int mask = 1 << (offset % 8);
+ int mask = BIT(offset % 8);
stmpe_gpio->regs[REG_IE][regoffset] &= ~mask;
}
@@ -216,11 +227,21 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
+ struct stmpe *stmpe = stmpe_gpio->stmpe;
int offset = d->hwirq;
int regoffset = offset / 8;
- int mask = 1 << (offset % 8);
+ int mask = BIT(offset % 8);
stmpe_gpio->regs[REG_IE][regoffset] |= mask;
+
+ /*
+ * STMPE1600 workaround: to be able to get IRQ from pins,
+ * a read must be done on GPMR register, or a write in
+ * GPSR or GPCR registers
+ */
+ if (stmpe->partnum == STMPE1600)
+ stmpe_reg_read(stmpe,
+ stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]);
}
static void stmpe_dbg_show_one(struct seq_file *s,
@@ -230,10 +251,10 @@ static void stmpe_dbg_show_one(struct seq_file *s,
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
struct stmpe *stmpe = stmpe_gpio->stmpe;
const char *label = gpiochip_is_requested(gc, offset);
- int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
bool val = !!stmpe_gpio_get(gc, offset);
- u8 dir_reg = stmpe->regs[STMPE_IDX_GPDR_LSB] - (offset / 8);
- u8 mask = 1 << (offset % 8);
+ u8 bank = offset / 8;
+ u8 dir_reg = stmpe->regs[STMPE_IDX_GPDR_LSB + bank];
+ u8 mask = BIT(offset % 8);
int ret;
u8 dir;
@@ -247,39 +268,72 @@ static void stmpe_dbg_show_one(struct seq_file *s,
gpio, label ?: "(none)",
val ? "hi" : "lo");
} else {
- u8 edge_det_reg = stmpe->regs[STMPE_IDX_GPEDR_MSB] + num_banks - 1 - (offset / 8);
- u8 rise_reg = stmpe->regs[STMPE_IDX_GPRER_LSB] - (offset / 8);
- u8 fall_reg = stmpe->regs[STMPE_IDX_GPFER_LSB] - (offset / 8);
- u8 irqen_reg = stmpe->regs[STMPE_IDX_IEGPIOR_LSB] - (offset / 8);
- bool edge_det;
- bool rise;
- bool fall;
+ u8 edge_det_reg;
+ u8 rise_reg;
+ u8 fall_reg;
+ u8 irqen_reg;
+
+ char *edge_det_values[] = {"edge-inactive",
+ "edge-asserted",
+ "not-supported"};
+ char *rise_values[] = {"no-rising-edge-detection",
+ "rising-edge-detection",
+ "not-supported"};
+ char *fall_values[] = {"no-falling-edge-detection",
+ "falling-edge-detection",
+ "not-supported"};
+ #define NOT_SUPPORTED_IDX 2
+ u8 edge_det = NOT_SUPPORTED_IDX;
+ u8 rise = NOT_SUPPORTED_IDX;
+ u8 fall = NOT_SUPPORTED_IDX;
bool irqen;
- ret = stmpe_reg_read(stmpe, edge_det_reg);
- if (ret < 0)
+ switch (stmpe->partnum) {
+ case STMPE610:
+ case STMPE811:
+ case STMPE1601:
+ case STMPE2401:
+ case STMPE2403:
+ edge_det_reg = stmpe->regs[STMPE_IDX_GPEDR_LSB + bank];
+ ret = stmpe_reg_read(stmpe, edge_det_reg);
+ if (ret < 0)
+ return;
+ edge_det = !!(ret & mask);
+
+ case STMPE1801:
+ rise_reg = stmpe->regs[STMPE_IDX_GPRER_LSB + bank];
+ fall_reg = stmpe->regs[STMPE_IDX_GPFER_LSB + bank];
+
+ ret = stmpe_reg_read(stmpe, rise_reg);
+ if (ret < 0)
+ return;
+ rise = !!(ret & mask);
+ ret = stmpe_reg_read(stmpe, fall_reg);
+ if (ret < 0)
+ return;
+ fall = !!(ret & mask);
+
+ case STMPE801:
+ case STMPE1600:
+ irqen_reg = stmpe->regs[STMPE_IDX_IEGPIOR_LSB + bank];
+ break;
+
+ default:
return;
- edge_det = !!(ret & mask);
- ret = stmpe_reg_read(stmpe, rise_reg);
- if (ret < 0)
- return;
- rise = !!(ret & mask);
- ret = stmpe_reg_read(stmpe, fall_reg);
- if (ret < 0)
- return;
- fall = !!(ret & mask);
+ }
+
ret = stmpe_reg_read(stmpe, irqen_reg);
if (ret < 0)
return;
irqen = !!(ret & mask);
- seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s %s%s%s",
+ seq_printf(s, " gpio-%-3d (%-20.20s) in %s %13s %13s %25s %25s",
gpio, label ?: "(none)",
val ? "hi" : "lo",
- edge_det ? "edge-asserted" : "edge-inactive",
- irqen ? "IRQ-enabled" : "",
- rise ? " rising-edge-detection" : "",
- fall ? " falling-edge-detection" : "");
+ edge_det_values[edge_det],
+ irqen ? "IRQ-enabled" : "IRQ-disabled",
+ rise_values[rise],
+ fall_values[fall]);
}
}
@@ -307,18 +361,32 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
{
struct stmpe_gpio *stmpe_gpio = dev;
struct stmpe *stmpe = stmpe_gpio->stmpe;
- u8 statmsbreg = stmpe->regs[STMPE_IDX_ISGPIOR_MSB];
+ u8 statmsbreg;
int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
u8 status[num_banks];
int ret;
int i;
+ /*
+ * the stmpe_block_read() call below, imposes to set statmsbreg
+ * with the register located at the lowest address. As STMPE1600
+ * variant is the only one which respect registers address's order
+ * (LSB regs located at lowest address than MSB ones) whereas all
+ * the others have a registers layout with MSB located before the
+ * LSB regs.
+ */
+ if (stmpe->partnum == STMPE1600)
+ statmsbreg = stmpe->regs[STMPE_IDX_ISGPIOR_LSB];
+ else
+ statmsbreg = stmpe->regs[STMPE_IDX_ISGPIOR_MSB];
+
ret = stmpe_block_read(stmpe, statmsbreg, num_banks, status);
if (ret < 0)
return IRQ_NONE;
for (i = 0; i < num_banks; i++) {
- int bank = num_banks - i - 1;
+ int bank = (stmpe_gpio->stmpe->partnum == STMPE1600) ? i :
+ num_banks - i - 1;
unsigned int enabled = stmpe_gpio->regs[REG_IE][bank];
unsigned int stat = status[i];
@@ -333,15 +401,21 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
line);
handle_nested_irq(child_irq);
- stat &= ~(1 << bit);
+ stat &= ~BIT(bit);
}
- stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
-
- /* Edge detect register is not present on 801 */
- if (stmpe->partnum != STMPE801)
- stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_GPEDR_MSB]
- + i, status[i]);
+ /*
+ * interrupt status register write has no effect on
+ * 801/1801/1600, bits are cleared when read.
+ * Edge detect register is not present on 801/1600/1801
+ */
+ if (stmpe->partnum != STMPE801 || stmpe->partnum != STMPE1600 ||
+ stmpe->partnum != STMPE1801) {
+ stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
+ stmpe_reg_write(stmpe,
+ stmpe->regs[STMPE_IDX_GPEDR_LSB + i],
+ status[i]);
+ }
}
return IRQ_HANDLED;
@@ -376,6 +450,8 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
of_property_read_u32(np, "st,norequest-mask",
&stmpe_gpio->norequest_mask);
+ if (stmpe_gpio->norequest_mask)
+ stmpe_gpio->chip.irq_need_valid_mask = true;
if (irq < 0)
dev_info(&pdev->dev,
@@ -400,6 +476,14 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
goto out_disable;
}
+ if (stmpe_gpio->norequest_mask) {
+ int i;
+
+ /* Forbid unused lines to be mapped as IRQs */
+ for (i = 0; i < sizeof(u32); i++)
+ if (stmpe_gpio->norequest_mask & BIT(i))
+ clear_bit(i, stmpe_gpio->chip.irq_valid_mask);
+ }
ret = gpiochip_irqchip_add(&stmpe_gpio->chip,
&stmpe_gpio_irq_chip,
0,
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index a177ebd921d5..af95de89db01 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -236,7 +236,6 @@ static const struct i2c_device_id sx150x_id[] = {
{"sx1502q", 3},
{}
};
-MODULE_DEVICE_TABLE(i2c, sx150x_id);
static const struct of_device_id sx150x_of_match[] = {
{ .compatible = "semtech,sx1508q" },
@@ -245,7 +244,6 @@ static const struct of_device_id sx150x_of_match[] = {
{ .compatible = "semtech,sx1502q" },
{},
};
-MODULE_DEVICE_TABLE(of, sx150x_of_match);
static s32 sx150x_i2c_write(struct i2c_client *client, u8 reg, u8 val)
{
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 8b3659352e49..5a5a6cb00eea 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -34,7 +34,7 @@ struct tc3589x_gpio {
u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS];
};
-static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
@@ -49,24 +49,24 @@ static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(ret & mask);
}
-static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
u8 reg = TC3589x_GPIODATA0 + (offset / 8) * 2;
- unsigned pos = offset % 8;
+ unsigned int pos = offset % 8;
u8 data[] = {val ? BIT(pos) : 0, BIT(pos)};
tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data);
}
static int tc3589x_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int val)
+ unsigned int offset, int val)
{
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
u8 reg = TC3589x_GPIODIR0 + offset / 8;
- unsigned pos = offset % 8;
+ unsigned int pos = offset % 8;
tc3589x_gpio_set(chip, offset, val);
@@ -74,19 +74,35 @@ static int tc3589x_gpio_direction_output(struct gpio_chip *chip,
}
static int tc3589x_gpio_direction_input(struct gpio_chip *chip,
- unsigned offset)
+ unsigned int offset)
{
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
u8 reg = TC3589x_GPIODIR0 + offset / 8;
- unsigned pos = offset % 8;
+ unsigned int pos = offset % 8;
return tc3589x_set_bits(tc3589x, reg, BIT(pos), 0);
}
-static int tc3589x_gpio_single_ended(struct gpio_chip *chip,
- unsigned offset,
- enum single_ended_mode mode)
+static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
+ struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
+ u8 reg = TC3589x_GPIODIR0 + offset / 8;
+ unsigned int pos = offset % 8;
+ int ret;
+
+ ret = tc3589x_reg_read(tc3589x, reg);
+ if (ret < 0)
+ return ret;
+
+ return !!(ret & BIT(pos));
+}
+
+static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
+ unsigned int offset,
+ enum single_ended_mode mode)
{
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
@@ -97,7 +113,7 @@ static int tc3589x_gpio_single_ended(struct gpio_chip *chip,
*/
u8 odmreg = TC3589x_GPIOODM0 + (offset / 8) * 2;
u8 odereg = TC3589x_GPIOODE0 + (offset / 8) * 2;
- unsigned pos = offset % 8;
+ unsigned int pos = offset % 8;
int ret;
switch(mode) {
@@ -124,14 +140,15 @@ static int tc3589x_gpio_single_ended(struct gpio_chip *chip,
return -ENOTSUPP;
}
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "tc3589x",
.owner = THIS_MODULE,
- .direction_input = tc3589x_gpio_direction_input,
.get = tc3589x_gpio_get,
- .direction_output = tc3589x_gpio_direction_output,
.set = tc3589x_gpio_set,
- .set_single_ended = tc3589x_gpio_single_ended,
+ .direction_output = tc3589x_gpio_direction_output,
+ .direction_input = tc3589x_gpio_direction_input,
+ .get_direction = tc3589x_gpio_get_direction,
+ .set_single_ended = tc3589x_gpio_set_single_ended,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index cace79c1b70a..c8b34d787eed 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -87,7 +87,7 @@ static void tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
tpic2810_set_mask_bits(chip, *mask, *bits);
}
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "tpic2810",
.owner = THIS_MODULE,
.get_direction = tpic2810_get_direction,
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
index 8e25f01ac314..b23c4d2429be 100644
--- a/drivers/gpio/gpio-tps65086.c
+++ b/drivers/gpio/gpio-tps65086.c
@@ -72,7 +72,7 @@ static void tps65086_gpio_set(struct gpio_chip *chip, unsigned offset,
BIT(4 + offset), value ? BIT(4 + offset) : 0);
}
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "tps65086-gpio",
.owner = THIS_MODULE,
.get_direction = tps65086_gpio_get_direction,
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index 1c09a19ae10c..d779307a9685 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -172,7 +172,7 @@ static int tps65218_gpio_set_single_ended(struct gpio_chip *gc,
return -ENOTSUPP;
}
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "gpio-tps65218",
.owner = THIS_MODULE,
.request = tps65218_gpio_request,
@@ -204,7 +204,8 @@ static int tps65218_gpio_probe(struct platform_device *pdev)
tps65218_gpio->gpio_chip.of_node = pdev->dev.of_node;
#endif
- ret = gpiochip_add_data(&tps65218_gpio->gpio_chip, tps65218_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tps65218_gpio->gpio_chip,
+ tps65218_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
return ret;
@@ -215,15 +216,6 @@ static int tps65218_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int tps65218_gpio_remove(struct platform_device *pdev)
-{
- struct tps65218_gpio *tps65218_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&tps65218_gpio->gpio_chip);
-
- return 0;
-}
-
static const struct of_device_id tps65218_dt_match[] = {
{ .compatible = "ti,tps65218-gpio" },
{ }
@@ -242,7 +234,6 @@ static struct platform_driver tps65218_gpio_driver = {
.of_match_table = of_match_ptr(tps65218_dt_match)
},
.probe = tps65218_gpio_probe,
- .remove = tps65218_gpio_remove,
.id_table = tps65218_gpio_id_table,
};
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index acfd30a13a56..abc0798ef843 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -90,7 +90,7 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
}
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "tps65912-gpio",
.owner = THIS_MODULE,
.get_direction = tps65912_gpio_get_direction,
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 0c144a72f9af..99256115bea5 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -9,6 +9,7 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
new file mode 100644
index 000000000000..5bd21725e604
--- /dev/null
+++ b/drivers/gpio/gpio-ts4900.c
@@ -0,0 +1,190 @@
+/*
+ * Digital I/O driver for Technologic Systems I2C FPGA Core
+ *
+ * Copyright (C) 2015 Technologic Systems
+ * Copyright (C) 2016 Savoir-Faire Linux
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define DEFAULT_PIN_NUMBER 32
+/*
+ * Register bits used by the GPIO device
+ * Some boards, such as TS-7970 do not have a separate input bit
+ */
+#define TS4900_GPIO_OE 0x01
+#define TS4900_GPIO_OUT 0x02
+#define TS4900_GPIO_IN 0x04
+#define TS7970_GPIO_IN 0x02
+
+struct ts4900_gpio_priv {
+ struct regmap *regmap;
+ struct gpio_chip gpio_chip;
+ unsigned int input_bit;
+};
+
+static int ts4900_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+ unsigned int reg;
+
+ regmap_read(priv->regmap, offset, &reg);
+
+ return !(reg & TS4900_GPIO_OE);
+}
+
+static int ts4900_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+
+ /*
+ * This will clear the output enable bit, the other bits are
+ * dontcare when this is cleared
+ */
+ return regmap_write(priv->regmap, offset, 0);
+}
+
+static int ts4900_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+ int ret;
+
+ if (value)
+ ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE |
+ TS4900_GPIO_OUT);
+ else
+ ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE);
+
+ return ret;
+}
+
+static int ts4900_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+ unsigned int reg;
+
+ regmap_read(priv->regmap, offset, &reg);
+
+ return !!(reg & priv->input_bit);
+}
+
+static void ts4900_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+
+ if (value)
+ regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT,
+ TS4900_GPIO_OUT);
+ else
+ regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, 0);
+}
+
+static const struct regmap_config ts4900_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+};
+
+static const struct gpio_chip template_chip = {
+ .label = "ts4900-gpio",
+ .owner = THIS_MODULE,
+ .get_direction = ts4900_gpio_get_direction,
+ .direction_input = ts4900_gpio_direction_input,
+ .direction_output = ts4900_gpio_direction_output,
+ .get = ts4900_gpio_get,
+ .set = ts4900_gpio_set,
+ .base = -1,
+ .can_sleep = true,
+};
+
+static const struct of_device_id ts4900_gpio_of_match_table[] = {
+ {
+ .compatible = "technologic,ts4900-gpio",
+ .data = (void *)TS4900_GPIO_IN,
+ }, {
+ .compatible = "technologic,ts7970-gpio",
+ .data = (void *)TS7970_GPIO_IN,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ts4900_gpio_of_match_table);
+
+static int ts4900_gpio_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct of_device_id *match;
+ struct ts4900_gpio_priv *priv;
+ u32 ngpio;
+ int ret;
+
+ match = of_match_device(ts4900_gpio_of_match_table, &client->dev);
+ if (!match)
+ return -EINVAL;
+
+ if (of_property_read_u32(client->dev.of_node, "ngpios", &ngpio))
+ ngpio = DEFAULT_PIN_NUMBER;
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->gpio_chip = template_chip;
+ priv->gpio_chip.label = "ts4900-gpio";
+ priv->gpio_chip.ngpio = ngpio;
+ priv->gpio_chip.parent = &client->dev;
+ priv->input_bit = (uintptr_t)match->data;
+
+ priv->regmap = devm_regmap_init_i2c(client, &ts4900_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = devm_gpiochip_add_data(&client->dev, &priv->gpio_chip, priv);
+ if (ret < 0) {
+ dev_err(&client->dev, "Unable to register gpiochip\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id ts4900_gpio_id_table[] = {
+ { "ts4900-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, ts4900_gpio_id_table);
+
+static struct i2c_driver ts4900_gpio_driver = {
+ .driver = {
+ .name = "ts4900-gpio",
+ .of_match_table = ts4900_gpio_of_match_table,
+ },
+ .probe = ts4900_gpio_probe,
+ .id_table = ts4900_gpio_id_table,
+};
+module_i2c_driver(ts4900_gpio_driver);
+
+MODULE_AUTHOR("Technologic Systems");
+MODULE_DESCRIPTION("GPIO interface for Technologic Systems I2C-FPGA core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 4b807b0e0c8e..dfcfbba74416 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -381,7 +381,7 @@ static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
: -EINVAL;
}
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "twl4030",
.owner = THIS_MODULE,
.request = twl_request,
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 6284bdbe1e0c..3edb09cb9ee0 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -1,5 +1,5 @@
/*
- * vf610 GPIO support through PORT and GPIO module
+ * Freescale vf610 GPIO support through PORT and GPIO
*
* Copyright (c) 2014 Toradex AG.
*
@@ -23,7 +23,6 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -289,7 +288,3 @@ static int __init gpio_vf610_init(void)
return platform_driver_register(&vf610_gpio_driver);
}
device_initcall(gpio_vf610_init);
-
-MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
-MODULE_DESCRIPTION("Freescale VF610 GPIO");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
new file mode 100644
index 000000000000..d0ddba7a9d08
--- /dev/null
+++ b/drivers/gpio/gpio-wcove.c
@@ -0,0 +1,470 @@
+/*
+ * Intel Whiskey Cove PMIC GPIO Driver
+ *
+ * This driver is written based on gpio-crystalcove.c
+ *
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/gpio/driver.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+
+/*
+ * Whiskey Cove PMIC has 13 physical GPIO pins divided into 3 banks:
+ * Bank 0: Pin 0 - 6
+ * Bank 1: Pin 7 - 10
+ * Bank 2: Pin 11 -12
+ * Each pin has one output control register and one input control register.
+ */
+#define BANK0_NR_PINS 7
+#define BANK1_NR_PINS 4
+#define BANK2_NR_PINS 2
+#define WCOVE_GPIO_NUM (BANK0_NR_PINS + BANK1_NR_PINS + BANK2_NR_PINS)
+#define WCOVE_VGPIO_NUM 94
+/* GPIO output control registers (one per pin): 0x4e44 - 0x4e50 */
+#define GPIO_OUT_CTRL_BASE 0x4e44
+/* GPIO input control registers (one per pin): 0x4e51 - 0x4e5d */
+#define GPIO_IN_CTRL_BASE 0x4e51
+
+/*
+ * GPIO interrupts are organized in two groups:
+ * Group 0: Bank 0 pins (Pin 0 - 6)
+ * Group 1: Bank 1 and Bank 2 pins (Pin 7 - 12)
+ * Each group has two registers (one bit per pin): status and mask.
+ */
+#define GROUP0_NR_IRQS 7
+#define GROUP1_NR_IRQS 6
+#define IRQ_MASK_BASE 0x4e19
+#define IRQ_STATUS_BASE 0x4e0b
+#define UPDATE_IRQ_TYPE BIT(0)
+#define UPDATE_IRQ_MASK BIT(1)
+
+#define CTLI_INTCNT_DIS (0 << 1)
+#define CTLI_INTCNT_NE (1 << 1)
+#define CTLI_INTCNT_PE (2 << 1)
+#define CTLI_INTCNT_BE (3 << 1)
+
+#define CTLO_DIR_IN (0 << 5)
+#define CTLO_DIR_OUT (1 << 5)
+
+#define CTLO_DRV_MASK (1 << 4)
+#define CTLO_DRV_OD (0 << 4)
+#define CTLO_DRV_CMOS (1 << 4)
+
+#define CTLO_DRV_REN (1 << 3)
+
+#define CTLO_RVAL_2KDOWN (0 << 1)
+#define CTLO_RVAL_2KUP (1 << 1)
+#define CTLO_RVAL_50KDOWN (2 << 1)
+#define CTLO_RVAL_50KUP (3 << 1)
+
+#define CTLO_INPUT_SET (CTLO_DRV_CMOS | CTLO_DRV_REN | CTLO_RVAL_2KUP)
+#define CTLO_OUTPUT_SET (CTLO_DIR_OUT | CTLO_INPUT_SET)
+
+enum ctrl_register {
+ CTRL_IN,
+ CTRL_OUT,
+};
+
+/*
+ * struct wcove_gpio - Whiskey Cove GPIO controller
+ * @buslock: for bus lock/sync and unlock.
+ * @chip: the abstract gpio_chip structure.
+ * @dev: the gpio device
+ * @regmap: the regmap from the parent device.
+ * @regmap_irq_chip: the regmap of the gpio irq chip.
+ * @update: pending IRQ setting update, to be written to the chip upon unlock.
+ * @intcnt: the Interrupt Detect value to be written.
+ * @set_irq_mask: true if the IRQ mask needs to be set, false to clear.
+ */
+struct wcove_gpio {
+ struct mutex buslock;
+ struct gpio_chip chip;
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *regmap_irq_chip;
+ int update;
+ int intcnt;
+ bool set_irq_mask;
+};
+
+static inline unsigned int to_reg(int gpio, enum ctrl_register reg_type)
+{
+ unsigned int reg;
+ int bank;
+
+ if (gpio < BANK0_NR_PINS)
+ bank = 0;
+ else if (gpio < BANK0_NR_PINS + BANK1_NR_PINS)
+ bank = 1;
+ else
+ bank = 2;
+
+ if (reg_type == CTRL_IN)
+ reg = GPIO_IN_CTRL_BASE + bank;
+ else
+ reg = GPIO_OUT_CTRL_BASE + bank;
+
+ return reg;
+}
+
+static void wcove_update_irq_mask(struct wcove_gpio *wg, int gpio)
+{
+ unsigned int reg, mask;
+
+ if (gpio < GROUP0_NR_IRQS) {
+ reg = IRQ_MASK_BASE;
+ mask = BIT(gpio % GROUP0_NR_IRQS);
+ } else {
+ reg = IRQ_MASK_BASE + 1;
+ mask = BIT((gpio - GROUP0_NR_IRQS) % GROUP1_NR_IRQS);
+ }
+
+ if (wg->set_irq_mask)
+ regmap_update_bits(wg->regmap, reg, mask, mask);
+ else
+ regmap_update_bits(wg->regmap, reg, mask, 0);
+}
+
+static void wcove_update_irq_ctrl(struct wcove_gpio *wg, int gpio)
+{
+ unsigned int reg = to_reg(gpio, CTRL_IN);
+
+ regmap_update_bits(wg->regmap, reg, CTLI_INTCNT_BE, wg->intcnt);
+}
+
+static int wcove_gpio_dir_in(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+ return regmap_write(wg->regmap, to_reg(gpio, CTRL_OUT),
+ CTLO_INPUT_SET);
+}
+
+static int wcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio,
+ int value)
+{
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+ return regmap_write(wg->regmap, to_reg(gpio, CTRL_OUT),
+ CTLO_OUTPUT_SET | value);
+}
+
+static int wcove_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(wg->regmap, to_reg(gpio, CTRL_OUT), &val);
+ if (ret)
+ return ret;
+
+ return !(val & CTLO_DIR_OUT);
+}
+
+static int wcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(wg->regmap, to_reg(gpio, CTRL_IN), &val);
+ if (ret)
+ return ret;
+
+ return val & 0x1;
+}
+
+static void wcove_gpio_set(struct gpio_chip *chip,
+ unsigned int gpio, int value)
+{
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+ if (value)
+ regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT), 1, 1);
+ else
+ regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT), 1, 0);
+}
+
+static int wcove_gpio_set_single_ended(struct gpio_chip *chip,
+ unsigned int gpio,
+ enum single_ended_mode mode)
+{
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+ switch (mode) {
+ case LINE_MODE_OPEN_DRAIN:
+ return regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT),
+ CTLO_DRV_MASK, CTLO_DRV_OD);
+ case LINE_MODE_PUSH_PULL:
+ return regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT),
+ CTLO_DRV_MASK, CTLO_DRV_CMOS);
+ default:
+ break;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int wcove_irq_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ wg->intcnt = CTLI_INTCNT_DIS;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ wg->intcnt = CTLI_INTCNT_BE;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ wg->intcnt = CTLI_INTCNT_PE;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ wg->intcnt = CTLI_INTCNT_NE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wg->update |= UPDATE_IRQ_TYPE;
+
+ return 0;
+}
+
+static void wcove_bus_lock(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+ mutex_lock(&wg->buslock);
+}
+
+static void wcove_bus_sync_unlock(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+ int gpio = data->hwirq;
+
+ if (wg->update & UPDATE_IRQ_TYPE)
+ wcove_update_irq_ctrl(wg, gpio);
+ if (wg->update & UPDATE_IRQ_MASK)
+ wcove_update_irq_mask(wg, gpio);
+ wg->update = 0;
+
+ mutex_unlock(&wg->buslock);
+}
+
+static void wcove_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+ wg->set_irq_mask = false;
+ wg->update |= UPDATE_IRQ_MASK;
+}
+
+static void wcove_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+ wg->set_irq_mask = true;
+ wg->update |= UPDATE_IRQ_MASK;
+}
+
+static struct irq_chip wcove_irqchip = {
+ .name = "Whiskey Cove",
+ .irq_mask = wcove_irq_mask,
+ .irq_unmask = wcove_irq_unmask,
+ .irq_set_type = wcove_irq_type,
+ .irq_bus_lock = wcove_bus_lock,
+ .irq_bus_sync_unlock = wcove_bus_sync_unlock,
+};
+
+static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
+{
+ struct wcove_gpio *wg = (struct wcove_gpio *)data;
+ unsigned int pending, virq, gpio, mask, offset;
+ u8 p[2];
+
+ if (regmap_bulk_read(wg->regmap, IRQ_STATUS_BASE, p, 2)) {
+ dev_err(wg->dev, "Failed to read irq status register\n");
+ return IRQ_NONE;
+ }
+
+ pending = p[0] | (p[1] << 8);
+ if (!pending)
+ return IRQ_NONE;
+
+ /* Iterate until no interrupt is pending */
+ while (pending) {
+ /* One iteration is for all pending bits */
+ for_each_set_bit(gpio, (const unsigned long *)&pending,
+ GROUP0_NR_IRQS) {
+ offset = (gpio > GROUP0_NR_IRQS) ? 1 : 0;
+ mask = (offset == 1) ? BIT(gpio - GROUP0_NR_IRQS) :
+ BIT(gpio);
+ virq = irq_find_mapping(wg->chip.irqdomain, gpio);
+ handle_nested_irq(virq);
+ regmap_update_bits(wg->regmap, IRQ_STATUS_BASE + offset,
+ mask, mask);
+ }
+
+ /* Next iteration */
+ if (regmap_bulk_read(wg->regmap, IRQ_STATUS_BASE, p, 2)) {
+ dev_err(wg->dev, "Failed to read irq status\n");
+ break;
+ }
+
+ pending = p[0] | (p[1] << 8);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void wcove_gpio_dbg_show(struct seq_file *s,
+ struct gpio_chip *chip)
+{
+ unsigned int ctlo, ctli, irq_mask, irq_status;
+ struct wcove_gpio *wg = gpiochip_get_data(chip);
+ int gpio, offset, group, ret = 0;
+
+ for (gpio = 0; gpio < WCOVE_GPIO_NUM; gpio++) {
+ group = gpio < GROUP0_NR_IRQS ? 0 : 1;
+ ret += regmap_read(wg->regmap, to_reg(gpio, CTRL_OUT), &ctlo);
+ ret += regmap_read(wg->regmap, to_reg(gpio, CTRL_IN), &ctli);
+ ret += regmap_read(wg->regmap, IRQ_MASK_BASE + group,
+ &irq_mask);
+ ret += regmap_read(wg->regmap, IRQ_STATUS_BASE + group,
+ &irq_status);
+ if (ret) {
+ pr_err("Failed to read registers: ctrl out/in or irq status/mask\n");
+ break;
+ }
+
+ offset = gpio % 8;
+ seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s\n",
+ gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ",
+ ctli & 0x1 ? "hi" : "lo",
+ ctli & CTLI_INTCNT_NE ? "fall" : " ",
+ ctli & CTLI_INTCNT_PE ? "rise" : " ",
+ ctlo,
+ irq_mask & BIT(offset) ? "mask " : "unmask",
+ irq_status & BIT(offset) ? "pending" : " ");
+ }
+}
+
+static int wcove_gpio_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic;
+ struct wcove_gpio *wg;
+ int virq, ret, irq;
+ struct device *dev;
+
+ /*
+ * This gpio platform device is created by a mfd device (see
+ * drivers/mfd/intel_soc_pmic_bxtwc.c for details). Information
+ * shared by all sub-devices created by the mfd device, the regmap
+ * pointer for instance, is stored as driver data of the mfd device
+ * driver.
+ */
+ pmic = dev_get_drvdata(pdev->dev.parent);
+ if (!pmic)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ dev = &pdev->dev;
+
+ wg = devm_kzalloc(dev, sizeof(*wg), GFP_KERNEL);
+ if (!wg)
+ return -ENOMEM;
+
+ wg->regmap_irq_chip = pmic->irq_chip_data_level2;
+
+ platform_set_drvdata(pdev, wg);
+
+ mutex_init(&wg->buslock);
+ wg->chip.label = KBUILD_MODNAME;
+ wg->chip.direction_input = wcove_gpio_dir_in;
+ wg->chip.direction_output = wcove_gpio_dir_out;
+ wg->chip.get_direction = wcove_gpio_get_direction;
+ wg->chip.get = wcove_gpio_get;
+ wg->chip.set = wcove_gpio_set;
+ wg->chip.set_single_ended = wcove_gpio_set_single_ended,
+ wg->chip.base = -1;
+ wg->chip.ngpio = WCOVE_VGPIO_NUM;
+ wg->chip.can_sleep = true;
+ wg->chip.parent = pdev->dev.parent;
+ wg->chip.dbg_show = wcove_gpio_dbg_show;
+ wg->dev = dev;
+ wg->regmap = pmic->regmap;
+
+ ret = devm_gpiochip_add_data(dev, &wg->chip, wg);
+ if (ret) {
+ dev_err(dev, "Failed to add gpiochip: %d\n", ret);
+ return ret;
+ }
+
+ ret = gpiochip_irqchip_add(&wg->chip, &wcove_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "Failed to add irqchip: %d\n", ret);
+ return ret;
+ }
+
+ virq = regmap_irq_get_virq(wg->regmap_irq_chip, irq);
+ if (virq < 0) {
+ dev_err(dev, "Failed to get virq by irq %d\n", irq);
+ return virq;
+ }
+
+ ret = devm_request_threaded_irq(dev, virq, NULL,
+ wcove_gpio_irq_handler, IRQF_ONESHOT, pdev->name, wg);
+ if (ret) {
+ dev_err(dev, "Failed to request irq %d\n", virq);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Whiskey Cove PMIC itself is a analog device(but with digital control
+ * interface) providing power management support for other devices in
+ * the accompanied SoC, so we have no .pm for Whiskey Cove GPIO driver.
+ */
+static struct platform_driver wcove_gpio_driver = {
+ .driver = {
+ .name = "bxt_wcove_gpio",
+ },
+ .probe = wcove_gpio_probe,
+};
+
+module_platform_driver(wcove_gpio_driver);
+
+MODULE_AUTHOR("Ajay Thomas <ajay.thomas.david.rajamanickam@intel.com>");
+MODULE_AUTHOR("Bin Gao <bin.gao@intel.com>");
+MODULE_DESCRIPTION("Intel Whiskey Cove GPIO Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bxt_wcove_gpio");
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 21f97bcd0062..533707f943f4 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -247,7 +247,7 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
#define wm831x_gpio_dbg_show NULL
#endif
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "wm831x",
.owner = THIS_MODULE,
.direction_input = wm831x_gpio_direction_in,
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index e9765707d5c1..e46752e73dd9 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -93,7 +93,7 @@ static int wm8350_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
return wm8350->irq_base + WM8350_IRQ_GPIO(offset);
}
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "wm8350",
.owner = THIS_MODULE,
.direction_input = wm8350_gpio_direction_in,
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 2457aac8592e..68410fda6138 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -249,7 +249,7 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
#define wm8994_gpio_dbg_show NULL
#endif
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
.label = "wm8994",
.owner = THIS_MODULE,
.request = wm8994_gpio_request,
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index e72794e463aa..6b4d10d6e10f 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -96,6 +96,9 @@
/* GPIO upper 16 bit mask */
#define ZYNQ_GPIO_UPPER_MASK 0xFFFF0000
+/* For GPIO quirks */
+#define ZYNQ_GPIO_QUIRK_FOO BIT(0)
+
/**
* struct zynq_gpio - gpio device private data structure
* @chip: instance of the gpio_chip
@@ -122,6 +125,7 @@ struct zynq_gpio {
*/
struct zynq_platform_data {
const char *label;
+ u32 quirks;
u16 ngpio;
int max_bank;
int bank_min[ZYNQMP_GPIO_MAX_BANK];
@@ -238,13 +242,19 @@ static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
{
u32 reg;
+ bool is_zynq_gpio;
unsigned int bank_num, bank_pin_num;
struct zynq_gpio *gpio = gpiochip_get_data(chip);
+ is_zynq_gpio = gpio->p_data->quirks & ZYNQ_GPIO_QUIRK_FOO;
zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
- /* bank 0 pins 7 and 8 are special and cannot be used as inputs */
- if (bank_num == 0 && (bank_pin_num == 7 || bank_pin_num == 8))
+ /*
+ * On zynq bank 0 pins 7 and 8 are special and cannot be used
+ * as inputs.
+ */
+ if (is_zynq_gpio && bank_num == 0 &&
+ (bank_pin_num == 7 || bank_pin_num == 8))
return -EINVAL;
/* clear the bit in direction mode reg to set the pin as input */
@@ -627,6 +637,7 @@ static const struct zynq_platform_data zynqmp_gpio_def = {
static const struct zynq_platform_data zynq_gpio_def = {
.label = "zynq_gpio",
+ .quirks = ZYNQ_GPIO_QUIRK_FOO,
.ngpio = ZYNQ_GPIO_NR_GPIOS,
.max_bank = ZYNQ_GPIO_MAX_BANK,
.bank_min[0] = ZYNQ_GPIO_BANK0_PIN_MIN(),
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index af514618d7fb..58ece201b8e6 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -14,6 +14,7 @@
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/interrupt.h>
@@ -395,7 +396,7 @@ struct acpi_gpio_lookup {
int n;
};
-static int acpi_find_gpio(struct acpi_resource *ares, void *data)
+static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
{
struct acpi_gpio_lookup *lookup = data;
@@ -440,7 +441,8 @@ static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
INIT_LIST_HEAD(&res_list);
- ret = acpi_dev_get_resources(lookup->adev, &res_list, acpi_find_gpio,
+ ret = acpi_dev_get_resources(lookup->adev, &res_list,
+ acpi_populate_gpio_lookup,
lookup);
if (ret < 0)
return ret;
@@ -513,7 +515,7 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* Note: if the GPIO resource has multiple entries in the pin list, this
* function only returns the first.
*/
-struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
+static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
const char *propname, int index,
struct acpi_gpio_info *info)
{
@@ -546,6 +548,55 @@ struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
return ret ? ERR_PTR(ret) : lookup.desc;
}
+struct gpio_desc *acpi_find_gpio(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags,
+ enum gpio_lookup_flags *lookupflags)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpi_gpio_info info;
+ struct gpio_desc *desc;
+ char propname[32];
+ int i;
+
+ /* Try first from _DSD */
+ for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+ if (con_id && strcmp(con_id, "gpios")) {
+ snprintf(propname, sizeof(propname), "%s-%s",
+ con_id, gpio_suffixes[i]);
+ } else {
+ snprintf(propname, sizeof(propname), "%s",
+ gpio_suffixes[i]);
+ }
+
+ desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
+ if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
+ break;
+ }
+
+ /* Then from plain _CRS GPIOs */
+ if (IS_ERR(desc)) {
+ if (!acpi_can_fallback_to_crs(adev, con_id))
+ return ERR_PTR(-ENOENT);
+
+ desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
+ if (IS_ERR(desc))
+ return desc;
+
+ if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) &&
+ info.gpioint) {
+ dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
+ return ERR_PTR(-ENOENT);
+ }
+ }
+
+ if (info.polarity == GPIO_ACTIVE_LOW)
+ *lookupflags |= GPIO_ACTIVE_LOW;
+
+ return desc;
+}
+
/**
* acpi_node_get_gpiod() - get a GPIO descriptor from ACPI resources
* @fwnode: pointer to an ACPI firmware node to get the GPIO information from
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a28feb3edf33..ecad3f0e3b77 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -113,6 +113,45 @@ int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
}
EXPORT_SYMBOL(of_get_named_gpio_flags);
+struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
+ unsigned int idx,
+ enum gpio_lookup_flags *flags)
+{
+ char prop_name[32]; /* 32 is max size of property name */
+ enum of_gpio_flags of_flags;
+ struct gpio_desc *desc;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+ if (con_id)
+ snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id,
+ gpio_suffixes[i]);
+ else
+ snprintf(prop_name, sizeof(prop_name), "%s",
+ gpio_suffixes[i]);
+
+ desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
+ &of_flags);
+ if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
+ break;
+ }
+
+ if (IS_ERR(desc))
+ return desc;
+
+ if (of_flags & OF_GPIO_ACTIVE_LOW)
+ *flags |= GPIO_ACTIVE_LOW;
+
+ if (of_flags & OF_GPIO_SINGLE_ENDED) {
+ if (of_flags & OF_GPIO_ACTIVE_LOW)
+ *flags |= GPIO_OPEN_DRAIN;
+ else
+ *flags |= GPIO_OPEN_SOURCE;
+ }
+
+ return desc;
+}
+
/**
* of_parse_own_gpio() - Get a GPIO hog descriptor, names and flags for GPIO API
* @np: device node to get GPIO from
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 932e510aec50..4b44dd97c07f 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -670,10 +670,10 @@ int gpiod_export_link(struct device *dev, const char *name,
EXPORT_SYMBOL_GPL(gpiod_export_link);
/**
- * gpiod_unexport - reverse effect of gpio_export()
+ * gpiod_unexport - reverse effect of gpiod_export()
* @gpio: gpio to make unavailable
*
- * This is implicit on gpio_free().
+ * This is implicit on gpiod_free().
*/
void gpiod_unexport(struct gpio_desc *desc)
{
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 53ff25ac66d8..f0fc3a0d37c8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -71,6 +71,8 @@ LIST_HEAD(gpio_devices);
static void gpiochip_free_hogs(struct gpio_chip *chip);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
+static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
static bool gpiolib_initialized;
@@ -1167,6 +1169,10 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
if (status)
goto err_remove_from_list;
+ status = gpiochip_irqchip_init_valid_mask(chip);
+ if (status)
+ goto err_remove_from_list;
+
status = of_gpiochip_add(chip);
if (status)
goto err_remove_chip;
@@ -1192,6 +1198,7 @@ err_remove_chip:
acpi_gpiochip_remove(chip);
gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
+ gpiochip_irqchip_free_valid_mask(chip);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
list_del(&gdev->list);
@@ -1363,19 +1370,15 @@ struct gpio_chip *gpiochip_find(void *data,
void *data))
{
struct gpio_device *gdev;
- struct gpio_chip *chip;
+ struct gpio_chip *chip = NULL;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
list_for_each_entry(gdev, &gpio_devices, list)
- if (gdev->chip && match(gdev->chip, data))
+ if (gdev->chip && match(gdev->chip, data)) {
+ chip = gdev->chip;
break;
-
- /* No match? */
- if (&gdev->list == &gpio_devices)
- chip = NULL;
- else
- chip = gdev->chip;
+ }
spin_unlock_irqrestore(&gpio_lock, flags);
@@ -1401,6 +1404,40 @@ static struct gpio_chip *find_chip_by_name(const char *name)
* The following is irqchip helper code for gpiochips.
*/
+static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
+{
+ int i;
+
+ if (!gpiochip->irq_need_valid_mask)
+ return 0;
+
+ gpiochip->irq_valid_mask = kcalloc(BITS_TO_LONGS(gpiochip->ngpio),
+ sizeof(long), GFP_KERNEL);
+ if (!gpiochip->irq_valid_mask)
+ return -ENOMEM;
+
+ /* Assume by default all GPIOs are valid */
+ for (i = 0; i < gpiochip->ngpio; i++)
+ set_bit(i, gpiochip->irq_valid_mask);
+
+ return 0;
+}
+
+static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
+{
+ kfree(gpiochip->irq_valid_mask);
+ gpiochip->irq_valid_mask = NULL;
+}
+
+static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+ unsigned int offset)
+{
+ /* No mask means all valid */
+ if (likely(!gpiochip->irq_valid_mask))
+ return true;
+ return test_bit(offset, gpiochip->irq_valid_mask);
+}
+
/**
* gpiochip_set_chained_irqchip() - sets a chained irqchip to a gpiochip
* @gpiochip: the gpiochip to set the irqchip chain to
@@ -1442,9 +1479,12 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
}
/* Set the parent IRQ for all affected IRQs */
- for (offset = 0; offset < gpiochip->ngpio; offset++)
+ for (offset = 0; offset < gpiochip->ngpio; offset++) {
+ if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
+ continue;
irq_set_parent(irq_find_mapping(gpiochip->irqdomain, offset),
parent_irq);
+ }
}
EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
@@ -1551,9 +1591,12 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
/* Remove all IRQ mappings and delete the domain */
if (gpiochip->irqdomain) {
- for (offset = 0; offset < gpiochip->ngpio; offset++)
+ for (offset = 0; offset < gpiochip->ngpio; offset++) {
+ if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
+ continue;
irq_dispose_mapping(
irq_find_mapping(gpiochip->irqdomain, offset));
+ }
irq_domain_remove(gpiochip->irqdomain);
}
@@ -1562,6 +1605,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
gpiochip->irqchip->irq_release_resources = NULL;
gpiochip->irqchip = NULL;
}
+
+ gpiochip_irqchip_free_valid_mask(gpiochip);
}
/**
@@ -1597,6 +1642,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key)
{
struct device_node *of_node;
+ bool irq_base_set = false;
unsigned int offset;
unsigned irq_base = 0;
@@ -1617,6 +1663,20 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
if (gpiochip->of_node)
of_node = gpiochip->of_node;
#endif
+ /*
+ * Specifying a default trigger is a terrible idea if DT or ACPI is
+ * used to configure the interrupts, as you may end-up with
+ * conflicting triggers. Tell the user, and reset to NONE.
+ */
+ if (WARN(of_node && type != IRQ_TYPE_NONE,
+ "%s: Ignoring %d default trigger\n", of_node->full_name, type))
+ type = IRQ_TYPE_NONE;
+ if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) {
+ acpi_handle_warn(ACPI_HANDLE(gpiochip->parent),
+ "Ignoring %d default trigger\n", type);
+ type = IRQ_TYPE_NONE;
+ }
+
gpiochip->irqchip = irqchip;
gpiochip->irq_handler = handler;
gpiochip->irq_default_type = type;
@@ -1646,13 +1706,17 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
* necessary to allocate descriptors for all IRQs.
*/
for (offset = 0; offset < gpiochip->ngpio; offset++) {
+ if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
+ continue;
irq_base = irq_create_mapping(gpiochip->irqdomain, offset);
- if (offset == 0)
+ if (!irq_base_set) {
/*
* Store the base into the gpiochip to be used when
* unmapping the irqs.
*/
gpiochip->irq_base = irq_base;
+ irq_base_set = true;
+ }
}
acpi_gpiochip_request_interrupts(gpiochip);
@@ -1664,6 +1728,12 @@ EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
#else /* CONFIG_GPIOLIB_IRQCHIP */
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
+static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
+{
+ return 0;
+}
+static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
+{ }
#endif /* CONFIG_GPIOLIB_IRQCHIP */
@@ -2813,94 +2883,6 @@ void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
mutex_unlock(&gpio_lookup_lock);
}
-static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
- unsigned int idx,
- enum gpio_lookup_flags *flags)
-{
- char prop_name[32]; /* 32 is max size of property name */
- enum of_gpio_flags of_flags;
- struct gpio_desc *desc;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
- if (con_id)
- snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id,
- gpio_suffixes[i]);
- else
- snprintf(prop_name, sizeof(prop_name), "%s",
- gpio_suffixes[i]);
-
- desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
- &of_flags);
- if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
- break;
- }
-
- if (IS_ERR(desc))
- return desc;
-
- if (of_flags & OF_GPIO_ACTIVE_LOW)
- *flags |= GPIO_ACTIVE_LOW;
-
- if (of_flags & OF_GPIO_SINGLE_ENDED) {
- if (of_flags & OF_GPIO_ACTIVE_LOW)
- *flags |= GPIO_OPEN_DRAIN;
- else
- *flags |= GPIO_OPEN_SOURCE;
- }
-
- return desc;
-}
-
-static struct gpio_desc *acpi_find_gpio(struct device *dev,
- const char *con_id,
- unsigned int idx,
- enum gpiod_flags flags,
- enum gpio_lookup_flags *lookupflags)
-{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- struct acpi_gpio_info info;
- struct gpio_desc *desc;
- char propname[32];
- int i;
-
- /* Try first from _DSD */
- for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
- if (con_id && strcmp(con_id, "gpios")) {
- snprintf(propname, sizeof(propname), "%s-%s",
- con_id, gpio_suffixes[i]);
- } else {
- snprintf(propname, sizeof(propname), "%s",
- gpio_suffixes[i]);
- }
-
- desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
- if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
- break;
- }
-
- /* Then from plain _CRS GPIOs */
- if (IS_ERR(desc)) {
- if (!acpi_can_fallback_to_crs(adev, con_id))
- return ERR_PTR(-ENOENT);
-
- desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
- if (IS_ERR(desc))
- return desc;
-
- if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) &&
- info.gpioint) {
- dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
- return ERR_PTR(-ENOENT);
- }
- }
-
- if (info.polarity == GPIO_ACTIVE_LOW)
- *lookupflags |= GPIO_ACTIVE_LOW;
-
- return desc;
-}
-
static struct gpiod_lookup_table *gpiod_find_lookup_table(struct device *dev)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 2d9ea5e0cab3..346fbda39220 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -20,6 +20,7 @@
enum of_gpio_flags;
enum gpiod_flags;
+enum gpio_lookup_flags;
struct acpi_device;
/**
@@ -86,6 +87,32 @@ struct acpi_gpio_info {
/* gpio suffixes used for ACPI and device tree lookup */
static const char * const gpio_suffixes[] = { "gpios", "gpio" };
+#ifdef CONFIG_OF_GPIO
+struct gpio_desc *of_find_gpio(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpio_lookup_flags *flags);
+struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
+ const char *list_name, int index, enum of_gpio_flags *flags);
+int of_gpiochip_add(struct gpio_chip *gc);
+void of_gpiochip_remove(struct gpio_chip *gc);
+#else
+static inline struct gpio_desc *of_find_gpio(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpio_lookup_flags *flags)
+{
+ return ERR_PTR(-ENOENT);
+}
+static inline struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
+ const char *list_name, int index, enum of_gpio_flags *flags)
+{
+ return ERR_PTR(-ENOENT);
+}
+static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
+static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
+#endif /* CONFIG_OF_GPIO */
+
#ifdef CONFIG_ACPI
void acpi_gpiochip_add(struct gpio_chip *chip);
void acpi_gpiochip_remove(struct gpio_chip *chip);
@@ -93,9 +120,11 @@ void acpi_gpiochip_remove(struct gpio_chip *chip);
void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
-struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
- const char *propname, int index,
- struct acpi_gpio_info *info);
+struct gpio_desc *acpi_find_gpio(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags,
+ enum gpio_lookup_flags *lookupflags);
struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
struct acpi_gpio_info *info);
@@ -114,10 +143,11 @@ static inline void
acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
static inline struct gpio_desc *
-acpi_get_gpiod_by_index(struct acpi_device *adev, const char *propname,
- int index, struct acpi_gpio_info *info)
+acpi_find_gpio(struct device *dev, const char *con_id,
+ unsigned int idx, enum gpiod_flags flags,
+ enum gpio_lookup_flags *lookupflags)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENOENT);
}
static inline struct gpio_desc *
acpi_node_get_gpiod(struct fwnode_handle *fwnode, const char *propname,
@@ -137,9 +167,6 @@ static inline bool acpi_can_fallback_to_crs(struct acpi_device *adev,
}
#endif
-struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
- const char *list_name, int index, enum of_gpio_flags *flags);
-
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
void gpiod_set_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index eae5ef963cb7..2bd064493ae7 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -790,6 +790,12 @@ static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
mutex_unlock(&i2c_to_aux(i2c)->hw_mutex);
}
+static const struct i2c_lock_operations drm_dp_i2c_lock_ops = {
+ .lock_bus = lock_bus,
+ .trylock_bus = trylock_bus,
+ .unlock_bus = unlock_bus,
+};
+
/**
* drm_dp_aux_init() - minimally initialise an aux channel
* @aux: DisplayPort AUX channel
@@ -807,9 +813,7 @@ void drm_dp_aux_init(struct drm_dp_aux *aux)
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
- aux->ddc.lock_bus = lock_bus;
- aux->ddc.trylock_bus = trylock_bus;
- aux->ddc.unlock_bus = unlock_bus;
+ aux->ddc.lock_ops = &drm_dp_i2c_lock_ops;
}
EXPORT_SYMBOL(drm_dp_aux_init);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 78ac4811bd3c..cd4599c0523b 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -457,8 +457,6 @@ config LOGITECH_FF
- Logitech WingMan Cordless RumblePad
- Logitech WingMan Cordless RumblePad 2
- Logitech WingMan Force 3D
- - Logitech Formula Force EX
- - Logitech WingMan Formula Force GP
and if you want to enable force feedback for them.
Note: if you say N here, this device will still be supported, but without
@@ -488,15 +486,22 @@ config LOGIWHEELS_FF
select INPUT_FF_MEMLESS
default LOGITECH_FF
help
- Say Y here if you want to enable force feedback and range setting
+ Say Y here if you want to enable force feedback and range setting(*)
support for following Logitech wheels:
+ - Logitech G25 (*)
+ - Logitech G27 (*)
+ - Logitech G29 (*)
- Logitech Driving Force
- - Logitech Driving Force Pro
- - Logitech Driving Force GT
- - Logitech G25
- - Logitech G27
- - Logitech MOMO/MOMO 2
- - Logitech Formula Force EX
+ - Logitech Driving Force Pro (*)
+ - Logitech Driving Force GT (*)
+ - Logitech Driving Force EX/RX
+ - Logitech Driving Force Wireless
+ - Logitech Speed Force Wireless
+ - Logitech MOMO Force
+ - Logitech MOMO Racing Force
+ - Logitech Formula Force GP
+ - Logitech Formula Force EX/RX
+ - Logitech Wingman Formula Force GP
config HID_MAGICMOUSE
tristate "Apple Magic Mouse/Trackpad multi-touch support"
@@ -862,6 +867,7 @@ config HID_WACOM
select POWER_SUPPLY
select NEW_LEDS
select LEDS_CLASS
+ select LEDS_TRIGGERS
help
Say Y here if you want to use the USB or BT version of the Wacom Intuos
or Graphire tablet.
@@ -967,4 +973,6 @@ source "drivers/hid/usbhid/Kconfig"
source "drivers/hid/i2c-hid/Kconfig"
+source "drivers/hid/intel-ish-hid/Kconfig"
+
endmenu
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index fc4b2aa47f2e..86b2b5785fd2 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -113,3 +113,5 @@ obj-$(CONFIG_USB_MOUSE) += usbhid/
obj-$(CONFIG_USB_KBD) += usbhid/
obj-$(CONFIG_I2C_HID) += i2c-hid/
+
+obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 048befde295a..ed9c0ea5b026 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -139,8 +139,8 @@ static int u1_read_write_register(struct hid_device *hdev, u32 address,
if (read_flag) {
readbuf = kzalloc(U1_FEATURE_REPORT_LEN, GFP_KERNEL);
if (!readbuf) {
- kfree(input);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto exit;
}
ret = hid_hw_raw_request(hdev, U1_FEATURE_REPORT_ID, readbuf,
@@ -149,6 +149,7 @@ static int u1_read_write_register(struct hid_device *hdev, u32 address,
if (ret < 0) {
dev_err(&hdev->dev, "failed read register (%d)\n", ret);
+ kfree(readbuf);
goto exit;
}
@@ -190,16 +191,16 @@ static int alps_raw_event(struct hid_device *hdev,
if (z != 0) {
input_mt_report_slot_state(hdata->input,
MT_TOOL_FINGER, 1);
+ input_report_abs(hdata->input,
+ ABS_MT_POSITION_X, x);
+ input_report_abs(hdata->input,
+ ABS_MT_POSITION_Y, y);
+ input_report_abs(hdata->input,
+ ABS_MT_PRESSURE, z);
} else {
input_mt_report_slot_state(hdata->input,
MT_TOOL_FINGER, 0);
- break;
}
-
- input_report_abs(hdata->input, ABS_MT_POSITION_X, x);
- input_report_abs(hdata->input, ABS_MT_POSITION_Y, y);
- input_report_abs(hdata->input, ABS_MT_PRESSURE, z);
-
}
input_mt_sync_frame(hdata->input);
@@ -244,13 +245,13 @@ static int alps_raw_event(struct hid_device *hdev,
static int alps_post_reset(struct hid_device *hdev)
{
return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- NULL, U1_TP_ABS_MODE, false);
+ NULL, U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
}
static int alps_post_resume(struct hid_device *hdev)
{
return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- NULL, U1_TP_ABS_MODE, false);
+ NULL, U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
}
#endif /* CONFIG_PM */
@@ -383,7 +384,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
input2 = input_allocate_device();
if (!input2) {
- input_free_device(input2);
+ ret = -ENOMEM;
goto exit;
}
@@ -425,7 +426,8 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
__set_bit(INPUT_PROP_POINTER, input2->propbit);
__set_bit(INPUT_PROP_POINTING_STICK, input2->propbit);
- if (input_register_device(data->input2)) {
+ ret = input_register_device(data->input2);
+ if (ret) {
input_free_device(input2);
goto exit;
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 08f53c7fd513..2b89c701076f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -727,6 +727,7 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
(hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
hid->group == HID_GROUP_MULTITOUCH)
@@ -1916,7 +1917,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
@@ -1982,6 +1983,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
@@ -2037,6 +2039,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
@@ -2083,6 +2086,11 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
@@ -2480,7 +2488,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
-#if defined(CONFIG_MOUSE_SYNAPTICS_USB) || defined(CONFIG_MOUSE_SYNAPTICS_USB_MODULE)
+#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4ed9a4fdfea7..cd59c79eebdd 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -268,6 +268,7 @@
#define USB_DEVICE_ID_CORSAIR_K95RGB 0x1b11
#define USB_DEVICE_ID_CORSAIR_M65RGB 0x1b12
#define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13
+#define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15
#define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17
#define USB_VENDOR_ID_CREATIVELABS 0x041e
@@ -565,7 +566,7 @@
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
-#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2 0x501a
#define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013
#define USB_DEVICE_ID_KYE_PENSKETCH_M912 0x5015
@@ -713,6 +714,7 @@
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 0x07dc
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 0x07e2
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07de
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
@@ -859,6 +861,7 @@
#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
+#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
#define USB_VENDOR_ID_SAMSUNG 0x0419
@@ -996,6 +999,10 @@
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP1062 0x0064
#define USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850 0x0522
#define USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60 0x0781
+#define USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3 0x3031
+#define USB_DEVICE_ID_UGEE_TABLET_81 0x0081
+#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045
+#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d
#define USB_VENDOR_ID_UNITEC 0x227d
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
@@ -1085,4 +1092,7 @@
#define USB_DEVICE_ID_RAPHNET_2NES2SNES 0x0002
#define USB_DEVICE_ID_RAPHNET_4NES4SNES 0x0003
+#define USB_VENDOR_ID_UGTIZER 0x2179
+#define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
+
#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index bcfaf32d9e5e..fb9ace1cef8b 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -604,6 +604,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
}
+ /*
+ * Some lazy vendors declare 255 usages for System Control,
+ * leading to the creation of ABS_X|Y axis and too many others.
+ * It wouldn't be a problem if joydev doesn't consider the
+ * device as a joystick then.
+ */
+ if (field->application == HID_GD_SYSTEM_CONTROL)
+ goto ignore;
+
if ((usage->hid & 0xf0) == 0x90) { /* D-pad */
switch (usage->hid) {
case HID_GD_UP: usage->hat_dir = 1; break;
@@ -953,6 +962,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case HID_UP_HPVENDOR2:
set_bit(EV_REP, input->evbit);
switch (usage->hid & HID_USAGE) {
+ case 0x001: map_key_clear(KEY_MICMUTE); break;
case 0x003: map_key_clear(KEY_BRIGHTNESSDOWN); break;
case 0x004: map_key_clear(KEY_BRIGHTNESSUP); break;
default: goto ignore;
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index 32e6d8d9ded0..0dd1167b2c9b 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -19,11 +19,6 @@
#include "hid-ids.h"
-/*
- * See EasyPen i405X description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=KYE_EasyPen_i405X
- */
-
/* Original EasyPen i405X report descriptor size */
#define EASYPEN_I405X_RDESC_ORIG_SIZE 476
@@ -82,11 +77,6 @@ static __u8 easypen_i405x_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-/*
- * See MousePen i608X description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=KYE_MousePen_i608X
- */
-
/* Original MousePen i608X report descriptor size */
#define MOUSEPEN_I608X_RDESC_ORIG_SIZE 476
@@ -186,10 +176,104 @@ static __u8 mousepen_i608x_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-/*
- * See EasyPen M610X description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=KYE_EasyPen_M610X
- */
+/* Original MousePen i608X v2 report descriptor size */
+#define MOUSEPEN_I608X_V2_RDESC_ORIG_SIZE 482
+
+/* Fixed MousePen i608X v2 report descriptor */
+static __u8 mousepen_i608x_v2_rdesc_fixed[] = {
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0x01, /* Usage (01h), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x05, /* Report ID (5), */
+ 0x09, 0x01, /* Usage (01h), */
+ 0x15, 0x80, /* Logical Minimum (-128), */
+ 0x25, 0x7F, /* Logical Maximum (127), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x07, /* Report Count (7), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x02, /* Usage (Pen), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x10, /* Report ID (16), */
+ 0x09, 0x20, /* Usage (Stylus), */
+ 0xA0, /* Collection (Physical), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x09, 0x44, /* Usage (Barrel Switch), */
+ 0x09, 0x46, /* Usage (Tablet Pick), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x04, /* Report Count (4), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x09, 0x32, /* Usage (In Range), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xA4, /* Push, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x55, 0xFD, /* Unit Exponent (-3), */
+ 0x65, 0x13, /* Unit (Inch), */
+ 0x34, /* Physical Minimum (0), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x40, 0x1F, /* Physical Maximum (8000), */
+ 0x27, 0x00, 0xA0, 0x00, 0x00, /* Logical Maximum (40960), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x46, 0x70, 0x17, /* Physical Maximum (6000), */
+ 0x26, 0x00, 0x78, /* Logical Maximum (30720), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xB4, /* Pop, */
+ 0x09, 0x30, /* Usage (Tip Pressure), */
+ 0x26, 0xFF, 0x07, /* Logical Maximum (2047), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0xC0, /* End Collection, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x02, /* Usage (Mouse), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x11, /* Report ID (17), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0xA0, /* Collection (Physical), */
+ 0x14, /* Logical Minimum (0), */
+ 0xA4, /* Push, */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x03, /* Usage Maximum (03h), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x05, /* Report Count (5), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xB4, /* Pop, */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xA4, /* Push, */
+ 0x55, 0xFD, /* Unit Exponent (-3), */
+ 0x65, 0x13, /* Unit (Inch), */
+ 0x34, /* Physical Minimum (0), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x40, 0x1F, /* Physical Maximum (8000), */
+ 0x27, 0x00, 0xA0, 0x00, 0x00, /* Logical Maximum (40960), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x46, 0x70, 0x17, /* Physical Maximum (6000), */
+ 0x26, 0x00, 0x78, /* Logical Maximum (30720), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xB4, /* Pop, */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x09, 0x38, /* Usage (Wheel), */
+ 0x15, 0xFF, /* Logical Minimum (-1), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+};
/* Original EasyPen M610X report descriptor size */
#define EASYPEN_M610X_RDESC_ORIG_SIZE 476
@@ -454,12 +538,17 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
break;
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
- case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
rdesc = mousepen_i608x_rdesc_fixed;
*rsize = sizeof(mousepen_i608x_rdesc_fixed);
}
break;
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2:
+ if (*rsize == MOUSEPEN_I608X_V2_RDESC_ORIG_SIZE) {
+ rdesc = mousepen_i608x_v2_rdesc_fixed;
+ *rsize = sizeof(mousepen_i608x_v2_rdesc_fixed);
+ }
+ break;
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
if (*rsize == EASYPEN_M610X_RDESC_ORIG_SIZE) {
rdesc = easypen_m610x_rdesc_fixed;
@@ -553,7 +642,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
switch (id->product) {
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
- case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2:
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
case USB_DEVICE_ID_KYE_PENSKETCH_M912:
ret = kye_tablet_enable(hdev);
@@ -586,7 +675,7 @@ static const struct hid_device_id kye_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
- USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
+ USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index feb2be71f77c..76f644deb0a7 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -49,6 +49,7 @@
#define FV_RDESC_ORIG_SIZE 130
#define MOMO_RDESC_ORIG_SIZE 87
#define MOMO2_RDESC_ORIG_SIZE 87
+#define FFG_RDESC_ORIG_SIZE 85
/* Fixed report descriptors for Logitech Driving Force (and Pro)
* wheel controllers
@@ -334,6 +335,52 @@ static __u8 momo2_rdesc_fixed[] = {
0xC0 /* End Collection */
};
+static __u8 ffg_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0A, /* Report Size (10), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x35, 0x00, /* Physical Minimum (0), */
+0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x06, /* Report Count (6), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x06, /* Usage Maximum (06h), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x81, 0x01, /* Input (Constant), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x32, /* Usage (Z), */
+0x81, 0x02, /* Input (Variable), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x09, 0x02, /* Usage (02h), */
+0x95, 0x07, /* Report Count (7), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
/*
* Certain Logitech keyboards send in report #3 keys which are far
* above the logical maximum described in descriptor. This extends
@@ -343,8 +390,6 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
- struct usb_device_descriptor *udesc;
- __u16 bcdDevice, rev_maj, rev_min;
if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 &&
rdesc[84] == 0x8c && rdesc[85] == 0x02) {
@@ -363,20 +408,18 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
switch (hdev->product) {
- /* Several wheels report as this id when operating in emulation mode. */
- case USB_DEVICE_ID_LOGITECH_WHEEL:
- udesc = &(hid_to_usb_dev(hdev)->descriptor);
- if (!udesc) {
- hid_err(hdev, "NULL USB device descriptor\n");
- break;
+ case USB_DEVICE_ID_LOGITECH_WINGMAN_FFG:
+ if (*rsize == FFG_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Wingman Formula Force GP report descriptor\n");
+ rdesc = ffg_rdesc_fixed;
+ *rsize = sizeof(ffg_rdesc_fixed);
}
- bcdDevice = le16_to_cpu(udesc->bcdDevice);
- rev_maj = bcdDevice >> 8;
- rev_min = bcdDevice & 0xff;
+ break;
- /* Update the report descriptor for only the Driving Force wheel */
- if (rev_maj == 1 && rev_min == 2 &&
- *rsize == DF_RDESC_ORIG_SIZE) {
+ /* Several wheels report as this id when operating in emulation mode. */
+ case USB_DEVICE_ID_LOGITECH_WHEEL:
+ if (*rsize == DF_RDESC_ORIG_SIZE) {
hid_info(hdev,
"fixing up Logitech Driving Force report descriptor\n");
rdesc = df_rdesc_fixed;
@@ -621,6 +664,7 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi,
usage->code == ABS_RZ)) {
switch (hdev->product) {
case USB_DEVICE_ID_LOGITECH_G29_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_WINGMAN_FFG:
case USB_DEVICE_ID_LOGITECH_WHEEL:
case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
@@ -657,6 +701,17 @@ static int lg_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
+static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *rd, int size)
+{
+ struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
+
+ if (drv_data->quirks & LG_FF4)
+ return lg4ff_raw_event(hdev, report, rd, size, drv_data);
+
+ return 0;
+}
+
static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
@@ -809,7 +864,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
- .driver_data = LG_FF },
+ .driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
.driver_data = LG_FF2 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
@@ -830,6 +885,7 @@ static struct hid_driver lg_driver = {
.input_mapping = lg_input_mapping,
.input_mapped = lg_input_mapped,
.event = lg_event,
+ .raw_event = lg_raw_event,
.probe = lg_probe,
.remove = lg_remove,
};
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index af3a8ec8a746..1fc12e357035 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -75,6 +75,7 @@ static void lg4ff_set_range_g25(struct hid_device *hid, u16 range);
struct lg4ff_wheel_data {
const u32 product_id;
+ u16 combine;
u16 range;
const u16 min_range;
const u16 max_range;
@@ -136,6 +137,7 @@ struct lg4ff_alternate_mode {
};
static const struct lg4ff_wheel lg4ff_devices[] = {
+ {USB_DEVICE_ID_LOGITECH_WINGMAN_FFG, lg4ff_wheel_effects, 40, 180, NULL},
{USB_DEVICE_ID_LOGITECH_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
{USB_DEVICE_ID_LOGITECH_MOMO_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
{USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_dfp},
@@ -328,6 +330,56 @@ int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field,
}
}
+int lg4ff_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *rd, int size, struct lg_drv_data *drv_data)
+{
+ int offset;
+ struct lg4ff_device_entry *entry = drv_data->device_props;
+
+ if (!entry)
+ return 0;
+
+ /* adjust HID report present combined pedals data */
+ if (entry->wdata.combine) {
+ switch (entry->wdata.product_id) {
+ case USB_DEVICE_ID_LOGITECH_WHEEL:
+ rd[5] = rd[3];
+ rd[6] = 0x7F;
+ return 1;
+ case USB_DEVICE_ID_LOGITECH_WINGMAN_FFG:
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+ rd[4] = rd[3];
+ rd[5] = 0x7F;
+ return 1;
+ case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
+ rd[5] = rd[4];
+ rd[6] = 0x7F;
+ return 1;
+ case USB_DEVICE_ID_LOGITECH_G25_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
+ offset = 5;
+ break;
+ case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_G29_WHEEL:
+ offset = 6;
+ break;
+ case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
+ offset = 3;
+ break;
+ default:
+ return 0;
+ }
+
+ /* Compute a combined axis when wheel does not supply it */
+ rd[offset] = (0xFF + rd[offset] - rd[offset+1]) >> 1;
+ rd[offset+1] = 0x7F;
+ return 1;
+ }
+
+ return 0;
+}
+
static void lg4ff_init_wheel_data(struct lg4ff_wheel_data * const wdata, const struct lg4ff_wheel *wheel,
const struct lg4ff_multimode_wheel *mmode_wheel,
const u16 real_product_id)
@@ -345,6 +397,7 @@ static void lg4ff_init_wheel_data(struct lg4ff_wheel_data * const wdata, const s
{
struct lg4ff_wheel_data t_wdata = { .product_id = wheel->product_id,
.real_product_id = real_product_id,
+ .combine = 0,
.min_range = wheel->min_range,
.max_range = wheel->max_range,
.set_range = wheel->set_range,
@@ -885,6 +938,58 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att
}
static DEVICE_ATTR(alternate_modes, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_alternate_modes_show, lg4ff_alternate_modes_store);
+static ssize_t lg4ff_combine_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hid = to_hid_device(dev);
+ struct lg4ff_device_entry *entry;
+ struct lg_drv_data *drv_data;
+ size_t count;
+
+ drv_data = hid_get_drvdata(hid);
+ if (!drv_data) {
+ hid_err(hid, "Private driver data not found!\n");
+ return 0;
+ }
+
+ entry = drv_data->device_props;
+ if (!entry) {
+ hid_err(hid, "Device properties not found!\n");
+ return 0;
+ }
+
+ count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->wdata.combine);
+ return count;
+}
+
+static ssize_t lg4ff_combine_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hid = to_hid_device(dev);
+ struct lg4ff_device_entry *entry;
+ struct lg_drv_data *drv_data;
+ u16 combine = simple_strtoul(buf, NULL, 10);
+
+ drv_data = hid_get_drvdata(hid);
+ if (!drv_data) {
+ hid_err(hid, "Private driver data not found!\n");
+ return -EINVAL;
+ }
+
+ entry = drv_data->device_props;
+ if (!entry) {
+ hid_err(hid, "Device properties not found!\n");
+ return -EINVAL;
+ }
+
+ if (combine > 1)
+ combine = 1;
+
+ entry->wdata.combine = combine;
+ return count;
+}
+static DEVICE_ATTR(combine_pedals, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_combine_show, lg4ff_combine_store);
+
/* Export the currently set range of the wheel */
static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1259,6 +1364,9 @@ int lg4ff_init(struct hid_device *hid)
}
/* Create sysfs interface */
+ error = device_create_file(&hid->dev, &dev_attr_combine_pedals);
+ if (error)
+ hid_warn(hid, "Unable to create sysfs interface for \"combine\", errno %d\n", error);
error = device_create_file(&hid->dev, &dev_attr_range);
if (error)
hid_warn(hid, "Unable to create sysfs interface for \"range\", errno %d\n", error);
@@ -1358,6 +1466,7 @@ int lg4ff_deinit(struct hid_device *hid)
device_remove_file(&hid->dev, &dev_attr_alternate_modes);
}
+ device_remove_file(&hid->dev, &dev_attr_combine_pedals);
device_remove_file(&hid->dev, &dev_attr_range);
#ifdef CONFIG_LEDS_CLASS
{
diff --git a/drivers/hid/hid-lg4ff.h b/drivers/hid/hid-lg4ff.h
index 66201af44da3..de1f350e0bd3 100644
--- a/drivers/hid/hid-lg4ff.h
+++ b/drivers/hid/hid-lg4ff.h
@@ -6,11 +6,15 @@ extern int lg4ff_no_autoswitch; /* From hid-lg.c */
int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, s32 value, struct lg_drv_data *drv_data);
+int lg4ff_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *rd, int size, struct lg_drv_data *drv_data);
int lg4ff_init(struct hid_device *hdev);
int lg4ff_deinit(struct hid_device *hdev);
#else
static inline int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, s32 value, struct lg_drv_data *drv_data) { return 0; }
+static inline int lg4ff_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *rd, int size, struct lg_drv_data *drv_data) { return 0; }
static inline int lg4ff_init(struct hid_device *hdev) { return -1; }
static inline int lg4ff_deinit(struct hid_device *hdev) { return -1; }
#endif
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index e924d555536c..c6cd392e9f99 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -28,7 +28,6 @@
#define MS_RDESC 0x08
#define MS_NOGET 0x10
#define MS_DUPLICATE_USAGES 0x20
-#define MS_RDESC_3K 0x40
static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
@@ -45,13 +44,6 @@ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[557] = 0x35;
rdesc[559] = 0x45;
}
- /* the same as above (s/usage/physical/) */
- if ((quirks & MS_RDESC_3K) && *rsize == 106 && rdesc[94] == 0x19 &&
- rdesc[95] == 0x00 && rdesc[96] == 0x29 &&
- rdesc[97] == 0xff) {
- rdesc[94] = 0x35;
- rdesc[96] = 0x45;
- }
return rdesc;
}
@@ -271,7 +263,7 @@ static const struct hid_device_id ms_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
.driver_data = MS_PRESENTER },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
- .driver_data = MS_ERGONOMY | MS_RDESC_3K },
+ .driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
.driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
@@ -288,6 +280,8 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
.driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
+ .driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 2f84b26f1167..39e642686ff0 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
+ .driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7),
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 3d5ba5b51af3..658a607dc6d9 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -16,6 +16,7 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
+
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
@@ -798,6 +799,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_ITE,
USB_DEVICE_ID_ITE_LENOVO_YOGA900),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_0,
+ 0x22D8),
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
HID_ANY_ID) },
{ }
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 310436a54a3f..b0bb99a821bd 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -8,7 +8,7 @@
* Copyright (c) 2012 David Dillow <dave@thedillows.org>
* Copyright (c) 2006-2013 Jiri Kosina
* Copyright (c) 2013 Colin Leitner <colin.leitner@gmail.com>
- * Copyright (c) 2014 Frank Praznik <frank.praznik@gmail.com>
+ * Copyright (c) 2014-2016 Frank Praznik <frank.praznik@gmail.com>
*/
/*
@@ -51,6 +51,7 @@
#define NAVIGATION_CONTROLLER_USB BIT(9)
#define NAVIGATION_CONTROLLER_BT BIT(10)
#define SINO_LITE_CONTROLLER BIT(11)
+#define FUTUREMAX_DANCE_MAT BIT(12)
#define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
#define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT)
@@ -65,6 +66,8 @@
MOTION_CONTROLLER_BT | NAVIGATION_CONTROLLER)
#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER | DUALSHOCK4_CONTROLLER |\
MOTION_CONTROLLER)
+#define SONY_BT_DEVICE (SIXAXIS_CONTROLLER_BT | DUALSHOCK4_CONTROLLER_BT |\
+ MOTION_CONTROLLER_BT | NAVIGATION_CONTROLLER_BT)
#define MAX_LEDS 4
@@ -1048,6 +1051,7 @@ struct sony_sc {
u8 mac_address[6];
u8 worker_initialized;
+ u8 defer_initialization;
u8 cable_state;
u8 battery_charging;
u8 battery_capacity;
@@ -1058,6 +1062,12 @@ struct sony_sc {
u8 led_count;
};
+static inline void sony_schedule_work(struct sony_sc *sc)
+{
+ if (!sc->defer_initialization)
+ schedule_work(&sc->state_worker);
+}
+
static u8 *sixaxis_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
@@ -1125,7 +1135,7 @@ static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
{
struct sony_sc *sc = hid_get_drvdata(hdev);
- if (sc->quirks & SINO_LITE_CONTROLLER)
+ if (sc->quirks & (SINO_LITE_CONTROLLER | FUTUREMAX_DANCE_MAT))
return rdesc;
/*
@@ -1317,6 +1327,11 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
dualshock4_parse_report(sc, rd, size);
}
+ if (sc->defer_initialization) {
+ sc->defer_initialization = 0;
+ sony_schedule_work(sc);
+ }
+
return 0;
}
@@ -1554,7 +1569,7 @@ static void buzz_set_leds(struct sony_sc *sc)
static void sony_set_leds(struct sony_sc *sc)
{
if (!(sc->quirks & BUZZ_CONTROLLER))
- schedule_work(&sc->state_worker);
+ sony_schedule_work(sc);
else
buzz_set_leds(sc);
}
@@ -1665,7 +1680,7 @@ static int sony_led_blink_set(struct led_classdev *led, unsigned long *delay_on,
new_off != drv_data->led_delay_off[n]) {
drv_data->led_delay_on[n] = new_on;
drv_data->led_delay_off[n] = new_off;
- schedule_work(&drv_data->state_worker);
+ sony_schedule_work(drv_data);
}
return 0;
@@ -1865,6 +1880,17 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
u8 *buf = sc->output_report_dmabuf;
int offset;
+ /*
+ * NOTE: The buf[1] field of the Bluetooth report controls
+ * the Dualshock 4 reporting rate.
+ *
+ * Known values include:
+ *
+ * 0x80 - 1000hz (full speed)
+ * 0xA0 - 31hz
+ * 0xB0 - 20hz
+ * 0xD0 - 66hz
+ */
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
memset(buf, 0, DS4_REPORT_0x05_SIZE);
buf[0] = 0x05;
@@ -1976,7 +2002,7 @@ static int sony_play_effect(struct input_dev *dev, void *data,
sc->left = effect->u.rumble.strong_magnitude / 256;
sc->right = effect->u.rumble.weak_magnitude / 256;
- schedule_work(&sc->state_worker);
+ sony_schedule_work(sc);
return 0;
}
@@ -2039,8 +2065,11 @@ static int sony_battery_get_property(struct power_supply *psy,
return ret;
}
-static int sony_battery_probe(struct sony_sc *sc)
+static int sony_battery_probe(struct sony_sc *sc, int append_dev_id)
{
+ const char *battery_str_fmt = append_dev_id ?
+ "sony_controller_battery_%pMR_%i" :
+ "sony_controller_battery_%pMR";
struct power_supply_config psy_cfg = { .drv_data = sc, };
struct hid_device *hdev = sc->hdev;
int ret;
@@ -2056,9 +2085,8 @@ static int sony_battery_probe(struct sony_sc *sc)
sc->battery_desc.get_property = sony_battery_get_property;
sc->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY;
sc->battery_desc.use_for_apm = 0;
- sc->battery_desc.name = kasprintf(GFP_KERNEL,
- "sony_controller_battery_%pMR",
- sc->mac_address);
+ sc->battery_desc.name = kasprintf(GFP_KERNEL, battery_str_fmt,
+ sc->mac_address, sc->device_id);
if (!sc->battery_desc.name)
return -ENOMEM;
@@ -2094,7 +2122,21 @@ static void sony_battery_remove(struct sony_sc *sc)
* it will show up as two devices. A global list of connected controllers and
* their MAC addresses is maintained to ensure that a device is only connected
* once.
+ *
+ * Some USB-only devices masquerade as Sixaxis controllers and all have the
+ * same dummy Bluetooth address, so a comparison of the connection type is
+ * required. Devices are only rejected in the case where two devices have
+ * matching Bluetooth addresses on different bus types.
*/
+static inline int sony_compare_connection_type(struct sony_sc *sc0,
+ struct sony_sc *sc1)
+{
+ const int sc0_not_bt = !(sc0->quirks & SONY_BT_DEVICE);
+ const int sc1_not_bt = !(sc1->quirks & SONY_BT_DEVICE);
+
+ return sc0_not_bt == sc1_not_bt;
+}
+
static int sony_check_add_dev_list(struct sony_sc *sc)
{
struct sony_sc *entry;
@@ -2107,9 +2149,14 @@ static int sony_check_add_dev_list(struct sony_sc *sc)
ret = memcmp(sc->mac_address, entry->mac_address,
sizeof(sc->mac_address));
if (!ret) {
- ret = -EEXIST;
- hid_info(sc->hdev, "controller with MAC address %pMR already connected\n",
+ if (sony_compare_connection_type(sc, entry)) {
+ ret = 1;
+ } else {
+ ret = -EEXIST;
+ hid_info(sc->hdev,
+ "controller with MAC address %pMR already connected\n",
sc->mac_address);
+ }
goto unlock;
}
}
@@ -2285,10 +2332,14 @@ static inline void sony_cancel_work_sync(struct sony_sc *sc)
static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
+ int append_dev_id;
unsigned long quirks = id->driver_data;
struct sony_sc *sc;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ if (!strcmp(hdev->name, "FutureMax Dance Mat"))
+ quirks |= FUTUREMAX_DANCE_MAT;
+
sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
if (sc == NULL) {
hid_err(hdev, "can't alloc sony descriptor\n");
@@ -2341,9 +2392,16 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
* the Sixaxis does not want the report_id as part of the data
* packet, so we have to discard buf[0] when sending the actual
* control message, even for numbered reports, humpf!
+ *
+ * Additionally, the Sixaxis on USB isn't properly initialized
+ * until the PS logo button is pressed and as such won't retain
+ * any state set by an output report, so the initial
+ * configuration report is deferred until the first input
+ * report arrives.
*/
hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
hdev->quirks |= HID_QUIRK_SKIP_OUTPUT_REPORT_ID;
+ sc->defer_initialization = 1;
ret = sixaxis_set_operational_usb(hdev);
sony_init_output_report(sc, sixaxis_send_output_report);
} else if ((sc->quirks & SIXAXIS_CONTROLLER_BT) ||
@@ -2379,7 +2437,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret < 0)
goto err_stop;
- ret = sony_check_add(sc);
+ ret = append_dev_id = sony_check_add(sc);
if (ret < 0)
goto err_stop;
@@ -2390,7 +2448,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
if (sc->quirks & SONY_BATTERY_SUPPORT) {
- ret = sony_battery_probe(sc);
+ ret = sony_battery_probe(sc, append_dev_id);
if (ret < 0)
goto err_stop;
@@ -2486,8 +2544,10 @@ static int sony_resume(struct hid_device *hdev)
* reinitialized on resume or they won't behave properly.
*/
if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
- (sc->quirks & NAVIGATION_CONTROLLER_USB))
+ (sc->quirks & NAVIGATION_CONTROLLER_USB)) {
sixaxis_set_operational_usb(sc->hdev);
+ sc->defer_initialization = 1;
+ }
sony_set_leds(sc);
}
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 85ac43517e3f..1509d7287ff3 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -21,13 +21,6 @@
#include "hid-ids.h"
-/*
- * See WPXXXXU model descriptions, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_WP4030U
- * http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_WP5540U
- * http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_WP8060U
- */
-
/* Size of the original descriptor of WPXXXXU tablets */
#define WPXXXXU_RDESC_ORIG_SIZE 212
@@ -221,11 +214,6 @@ static __u8 wp8060u_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-/*
- * See WP1062 description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_WP1062
- */
-
/* Size of the original descriptor of WP1062 tablet */
#define WP1062_RDESC_ORIG_SIZE 254
@@ -274,11 +262,6 @@ static __u8 wp1062_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-/*
- * See PF1209 description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_PF1209
- */
-
/* Size of the original descriptor of PF1209 tablet */
#define PF1209_RDESC_ORIG_SIZE 234
@@ -356,11 +339,6 @@ static __u8 pf1209_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-/*
- * See TWHL850 description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Wireless_Tablet_TWHL850
- */
-
/* Size of the original descriptors of TWHL850 tablet */
#define TWHL850_RDESC_ORIG_SIZE0 182
#define TWHL850_RDESC_ORIG_SIZE1 161
@@ -469,11 +447,6 @@ static __u8 twhl850_rdesc_fixed2[] = {
0xC0 /* End Collection */
};
-/*
- * See TWHA60 description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_TWHA60
- */
-
/* Size of the original descriptors of TWHA60 tablet */
#define TWHA60_RDESC_ORIG_SIZE0 254
#define TWHA60_RDESC_ORIG_SIZE1 139
@@ -613,6 +586,27 @@ static const __u8 uclogic_tablet_rdesc_template[] = {
0xC0 /* End Collection */
};
+/* Fixed virtual pad report descriptor */
+static const __u8 uclogic_buttonpad_rdesc[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x07, /* Usage (Keypad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0xF7, /* Report ID (247), */
+ 0x05, 0x0D, /* Usage Page (Digitizers), */
+ 0x09, 0x39, /* Usage (Tablet Function Keys), */
+ 0xA0, /* Collection (Physical), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x18, /* Report Count (24), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x08, /* Usage Maximum (08h), */
+ 0x95, 0x08, /* Report Count (8), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection */
+ 0xC0 /* End Collection */
+};
+
/* Parameter indices */
enum uclogic_prm {
UCLOGIC_PRM_X_LM = 1,
@@ -628,6 +622,7 @@ struct uclogic_drvdata {
unsigned int rsize;
bool invert_pen_inrange;
bool ignore_pen_usage;
+ bool has_virtual_pad_interface;
};
static __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -637,6 +632,12 @@ static __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
__u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
+ if (drvdata->rdesc != NULL) {
+ rdesc = drvdata->rdesc;
+ *rsize = drvdata->rsize;
+ return rdesc;
+ }
+
switch (hdev->product) {
case USB_DEVICE_ID_UCLOGIC_TABLET_PF1209:
if (*rsize == PF1209_RDESC_ORIG_SIZE) {
@@ -706,11 +707,6 @@ static __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
break;
}
break;
- default:
- if (drvdata->rdesc != NULL) {
- rdesc = drvdata->rdesc;
- *rsize = drvdata->rsize;
- }
}
return rdesc;
@@ -804,7 +800,6 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
len = UCLOGIC_PRM_NUM * sizeof(*buf);
buf = kmalloc(len, GFP_KERNEL);
if (buf == NULL) {
- hid_err(hdev, "failed to allocate parameter buffer\n");
rc = -ENOMEM;
goto cleanup;
}
@@ -848,7 +843,6 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
sizeof(uclogic_tablet_rdesc_template),
GFP_KERNEL);
if (drvdata->rdesc == NULL) {
- hid_err(hdev, "failed to allocate fixed rdesc\n");
rc = -ENOMEM;
goto cleanup;
}
@@ -876,11 +870,75 @@ cleanup:
return rc;
}
+/**
+ * Enable actual button mode.
+ *
+ * @hdev: HID device
+ */
+static int uclogic_button_enable(struct hid_device *hdev)
+{
+ int rc;
+ struct usb_device *usb_dev = hid_to_usb_dev(hdev);
+ struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
+ char *str_buf;
+ size_t str_len = 16;
+ unsigned char *rdesc;
+ size_t rdesc_len;
+
+ str_buf = kzalloc(str_len, GFP_KERNEL);
+ if (str_buf == NULL) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Enable abstract keyboard mode */
+ rc = usb_string(usb_dev, 0x7b, str_buf, str_len);
+ if (rc == -EPIPE) {
+ hid_info(hdev, "button mode setting not found\n");
+ rc = 0;
+ goto cleanup;
+ } else if (rc < 0) {
+ hid_err(hdev, "failed to enable abstract keyboard\n");
+ goto cleanup;
+ } else if (strncmp(str_buf, "HK On", rc)) {
+ hid_info(hdev, "invalid answer when requesting buttons: '%s'\n",
+ str_buf);
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Re-allocate fixed report descriptor */
+ rdesc_len = drvdata->rsize + sizeof(uclogic_buttonpad_rdesc);
+ rdesc = devm_kzalloc(&hdev->dev, rdesc_len, GFP_KERNEL);
+ if (!rdesc) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ memcpy(rdesc, drvdata->rdesc, drvdata->rsize);
+
+ /* Append the buttonpad descriptor */
+ memcpy(rdesc + drvdata->rsize, uclogic_buttonpad_rdesc,
+ sizeof(uclogic_buttonpad_rdesc));
+
+ /* clean up old rdesc and use the new one */
+ drvdata->rsize = rdesc_len;
+ devm_kfree(&hdev->dev, drvdata->rdesc);
+ drvdata->rdesc = rdesc;
+
+ rc = 0;
+
+cleanup:
+ kfree(str_buf);
+ return rc;
+}
+
static int uclogic_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int rc;
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *udev = hid_to_usb_dev(hdev);
struct uclogic_drvdata *drvdata;
/*
@@ -899,6 +957,10 @@ static int uclogic_probe(struct hid_device *hdev,
switch (id->product) {
case USB_DEVICE_ID_HUION_TABLET:
+ case USB_DEVICE_ID_YIYNOVA_TABLET:
+ case USB_DEVICE_ID_UGEE_TABLET_81:
+ case USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3:
+ case USB_DEVICE_ID_UGEE_TABLET_45:
/* If this is the pen interface */
if (intf->cur_altsetting->desc.bInterfaceNumber == 0) {
rc = uclogic_tablet_enable(hdev);
@@ -907,10 +969,48 @@ static int uclogic_probe(struct hid_device *hdev,
return rc;
}
drvdata->invert_pen_inrange = true;
+
+ rc = uclogic_button_enable(hdev);
+ drvdata->has_virtual_pad_interface = !rc;
} else {
drvdata->ignore_pen_usage = true;
}
break;
+ case USB_DEVICE_ID_UGTIZER_TABLET_GP0610:
+ /* If this is the pen interface */
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ rc = uclogic_tablet_enable(hdev);
+ if (rc) {
+ hid_err(hdev, "tablet enabling failed\n");
+ return rc;
+ }
+ drvdata->invert_pen_inrange = true;
+ } else {
+ drvdata->ignore_pen_usage = true;
+ }
+ break;
+ case USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60:
+ /*
+ * If it is the three-interface version, which is known to
+ * respond to initialization.
+ */
+ if (udev->config->desc.bNumInterfaces == 3) {
+ /* If it is the pen interface */
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 0) {
+ rc = uclogic_tablet_enable(hdev);
+ if (rc) {
+ hid_err(hdev, "tablet enabling failed\n");
+ return rc;
+ }
+ drvdata->invert_pen_inrange = true;
+
+ rc = uclogic_button_enable(hdev);
+ drvdata->has_virtual_pad_interface = !rc;
+ } else {
+ drvdata->ignore_pen_usage = true;
+ }
+ }
+ break;
}
rc = hid_parse(hdev);
@@ -933,12 +1033,16 @@ static int uclogic_raw_event(struct hid_device *hdev, struct hid_report *report,
{
struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
- if ((drvdata->invert_pen_inrange) &&
- (report->type == HID_INPUT_REPORT) &&
+ if ((report->type == HID_INPUT_REPORT) &&
(report->id == UCLOGIC_PEN_REPORT_ID) &&
- (size >= 2))
- /* Invert the in-range bit */
- data[1] ^= 0x40;
+ (size >= 2)) {
+ if (drvdata->has_virtual_pad_interface && (data[1] & 0x20))
+ /* Change to virtual frame button report ID */
+ data[0] = 0xf7;
+ else if (drvdata->invert_pen_inrange)
+ /* Invert the in-range bit */
+ data[1] ^= 0x40;
+ }
return 0;
}
@@ -960,6 +1064,11 @@ static const struct hid_device_id uclogic_devices[] = {
USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
{ }
};
MODULE_DEVICE_TABLE(hid, uclogic_devices);
diff --git a/drivers/hid/hid-waltop.c b/drivers/hid/hid-waltop.c
index 059931d7b392..a91aabe4a70a 100644
--- a/drivers/hid/hid-waltop.c
+++ b/drivers/hid/hid-waltop.c
@@ -42,11 +42,6 @@
* 02 16 02 ink
*/
-/*
- * See Slim Tablet 5.8 inch description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=Waltop_Slim_Tablet_5.8%22
- */
-
/* Size of the original report descriptor of Slim Tablet 5.8 inch */
#define SLIM_TABLET_5_8_INCH_RDESC_ORIG_SIZE 222
@@ -98,11 +93,6 @@ static __u8 slim_tablet_5_8_inch_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-/*
- * See Slim Tablet 12.1 inch description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=Waltop_Slim_Tablet_12.1%22
- */
-
/* Size of the original report descriptor of Slim Tablet 12.1 inch */
#define SLIM_TABLET_12_1_INCH_RDESC_ORIG_SIZE 269
@@ -154,11 +144,6 @@ static __u8 slim_tablet_12_1_inch_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-/*
- * See Q Pad description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=Waltop_Q_Pad
- */
-
/* Size of the original report descriptor of Q Pad */
#define Q_PAD_RDESC_ORIG_SIZE 241
@@ -210,11 +195,6 @@ static __u8 q_pad_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-/*
- * See description, device and HID report descriptors of tablet with PID 0038 at
- * http://sf.net/apps/mediawiki/digimend/?title=Waltop_PID_0038
- */
-
/* Size of the original report descriptor of tablet with PID 0038 */
#define PID_0038_RDESC_ORIG_SIZE 241
@@ -268,11 +248,6 @@ static __u8 pid_0038_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-/*
- * See Media Tablet 10.6 inch description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=Waltop_Media_Tablet_10.6%22
- */
-
/* Size of the original report descriptor of Media Tablet 10.6 inch */
#define MEDIA_TABLET_10_6_INCH_RDESC_ORIG_SIZE 300
@@ -386,11 +361,6 @@ static __u8 media_tablet_10_6_inch_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-/*
- * See Media Tablet 14.1 inch description, device and HID report descriptors at
- * http://sf.net/apps/mediawiki/digimend/?title=Waltop_Media_Tablet_14.1%22
- */
-
/* Size of the original report descriptor of Media Tablet 14.1 inch */
#define MEDIA_TABLET_14_1_INCH_RDESC_ORIG_SIZE 309
@@ -502,12 +472,6 @@ static __u8 media_tablet_14_1_inch_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-/*
- * See Sirius Battery Free Tablet description, device and HID report descriptors
- * at
- * http://sf.net/apps/mediawiki/digimend/?title=Waltop_Sirius_Battery_Free_Tablet
- */
-
/* Size of the original report descriptor of Sirius Battery Free Tablet */
#define SIRIUS_BATTERY_FREE_TABLET_RDESC_ORIG_SIZE 335
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
new file mode 100644
index 000000000000..ea065b3684a2
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -0,0 +1,17 @@
+menu "Intel ISH HID support"
+ depends on X86_64 && PCI
+
+config INTEL_ISH_HID
+ tristate "Intel Integrated Sensor Hub"
+ default n
+ select HID
+ help
+ The Integrated Sensor Hub (ISH) enables the ability to offload
+ sensor polling and algorithm processing to a dedicated low power
+ processor in the chipset. This allows the core processor to go into
+ low power modes more often, resulting in the increased battery life.
+ The current processors that support ISH are: Cherrytrail, Skylake,
+ Broxton and Kaby Lake.
+
+ Say Y here if you want to support Intel ISH. If unsure, say N.
+endmenu
diff --git a/drivers/hid/intel-ish-hid/Makefile b/drivers/hid/intel-ish-hid/Makefile
new file mode 100644
index 000000000000..8c08b0b358b1
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile - Intel ISH HID drivers
+# Copyright (c) 2014-2016, Intel Corporation.
+#
+#
+obj-$(CONFIG_INTEL_ISH_HID) += intel-ishtp.o
+intel-ishtp-objs := ishtp/init.o
+intel-ishtp-objs += ishtp/hbm.o
+intel-ishtp-objs += ishtp/client.o
+intel-ishtp-objs += ishtp/bus.o
+intel-ishtp-objs += ishtp/dma-if.o
+intel-ishtp-objs += ishtp/client-buffers.o
+
+obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-ipc.o
+intel-ish-ipc-objs := ipc/ipc.o
+intel-ish-ipc-objs += ipc/pci-ish.o
+
+obj-$(CONFIG_INTEL_ISH_HID) += intel-ishtp-hid.o
+intel-ishtp-hid-objs := ishtp-hid.o
+intel-ishtp-hid-objs += ishtp-hid-client.o
+
+ccflags-y += -Idrivers/hid/intel-ish-hid/ishtp
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h b/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
new file mode 100644
index 000000000000..ab68afcba2a2
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
@@ -0,0 +1,220 @@
+/*
+ * ISH registers definitions
+ *
+ * Copyright (c) 2012-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_ISH_REGS_H_
+#define _ISHTP_ISH_REGS_H_
+
+
+/*** IPC PCI Offsets and sizes ***/
+/* ISH IPC Base Address */
+#define IPC_REG_BASE 0x0000
+/* Peripheral Interrupt Status Register */
+#define IPC_REG_PISR_CHV_AB (IPC_REG_BASE + 0x00)
+/* Peripheral Interrupt Mask Register */
+#define IPC_REG_PIMR_CHV_AB (IPC_REG_BASE + 0x04)
+/*BXT, CHV_K0*/
+/*Peripheral Interrupt Status Register */
+#define IPC_REG_PISR_BXT (IPC_REG_BASE + 0x0C)
+/*Peripheral Interrupt Mask Register */
+#define IPC_REG_PIMR_BXT (IPC_REG_BASE + 0x08)
+/***********************************/
+/* ISH Host Firmware status Register */
+#define IPC_REG_ISH_HOST_FWSTS (IPC_REG_BASE + 0x34)
+/* Host Communication Register */
+#define IPC_REG_HOST_COMM (IPC_REG_BASE + 0x38)
+/* Reset register */
+#define IPC_REG_ISH_RST (IPC_REG_BASE + 0x44)
+
+/* Inbound doorbell register Host to ISH */
+#define IPC_REG_HOST2ISH_DRBL (IPC_REG_BASE + 0x48)
+/* Outbound doorbell register ISH to Host */
+#define IPC_REG_ISH2HOST_DRBL (IPC_REG_BASE + 0x54)
+/* ISH to HOST message registers */
+#define IPC_REG_ISH2HOST_MSG (IPC_REG_BASE + 0x60)
+/* HOST to ISH message registers */
+#define IPC_REG_HOST2ISH_MSG (IPC_REG_BASE + 0xE0)
+/* REMAP2 to enable DMA (D3 RCR) */
+#define IPC_REG_ISH_RMP2 (IPC_REG_BASE + 0x368)
+
+#define IPC_REG_MAX (IPC_REG_BASE + 0x400)
+
+/*** register bits - HISR ***/
+/* bit corresponds HOST2ISH interrupt in PISR and PIMR registers */
+#define IPC_INT_HOST2ISH_BIT (1<<0)
+/***********************************/
+/*CHV_A0, CHV_B0*/
+/* bit corresponds ISH2HOST interrupt in PISR and PIMR registers */
+#define IPC_INT_ISH2HOST_BIT_CHV_AB (1<<3)
+/*BXT, CHV_K0*/
+/* bit corresponds ISH2HOST interrupt in PISR and PIMR registers */
+#define IPC_INT_ISH2HOST_BIT_BXT (1<<0)
+/***********************************/
+
+/* bit corresponds ISH2HOST busy clear interrupt in PIMR register */
+#define IPC_INT_ISH2HOST_CLR_MASK_BIT (1<<11)
+
+/* offset of ISH2HOST busy clear interrupt in IPC_BUSY_CLR register */
+#define IPC_INT_ISH2HOST_CLR_OFFS (0)
+
+/* bit corresponds ISH2HOST busy clear interrupt in IPC_BUSY_CLR register */
+#define IPC_INT_ISH2HOST_CLR_BIT (1<<IPC_INT_ISH2HOST_CLR_OFFS)
+
+/* bit corresponds busy bit in doorbell registers */
+#define IPC_DRBL_BUSY_OFFS (31)
+#define IPC_DRBL_BUSY_BIT (1<<IPC_DRBL_BUSY_OFFS)
+
+#define IPC_HOST_OWNS_MSG_OFFS (30)
+
+/*
+ * A0: bit means that host owns MSGnn registers and is reading them.
+ * ISH FW may not write to them
+ */
+#define IPC_HOST_OWNS_MSG_BIT (1<<IPC_HOST_OWNS_MSG_OFFS)
+
+/*
+ * Host status bits (HOSTCOMM)
+ */
+/* bit corresponds host ready bit in Host Status Register (HOST_COMM) */
+#define IPC_HOSTCOMM_READY_OFFS (7)
+#define IPC_HOSTCOMM_READY_BIT (1<<IPC_HOSTCOMM_READY_OFFS)
+
+/***********************************/
+/*CHV_A0, CHV_B0*/
+#define IPC_HOSTCOMM_INT_EN_OFFS_CHV_AB (31)
+#define IPC_HOSTCOMM_INT_EN_BIT_CHV_AB \
+ (1<<IPC_HOSTCOMM_INT_EN_OFFS_CHV_AB)
+/*BXT, CHV_K0*/
+#define IPC_PIMR_INT_EN_OFFS_BXT (0)
+#define IPC_PIMR_INT_EN_BIT_BXT (1<<IPC_PIMR_INT_EN_OFFS_BXT)
+
+#define IPC_HOST2ISH_BUSYCLEAR_MASK_OFFS_BXT (8)
+#define IPC_HOST2ISH_BUSYCLEAR_MASK_BIT \
+ (1<<IPC_HOST2ISH_BUSYCLEAR_MASK_OFFS_BXT)
+/***********************************/
+/*
+ * both Host and ISH have ILUP at bit 0
+ * bit corresponds host ready bit in both status registers
+ */
+#define IPC_ILUP_OFFS (0)
+#define IPC_ILUP_BIT (1<<IPC_ILUP_OFFS)
+
+/*
+ * FW status bits (relevant)
+ */
+#define IPC_FWSTS_ILUP 0x1
+#define IPC_FWSTS_ISHTP_UP (1<<1)
+#define IPC_FWSTS_DMA0 (1<<16)
+#define IPC_FWSTS_DMA1 (1<<17)
+#define IPC_FWSTS_DMA2 (1<<18)
+#define IPC_FWSTS_DMA3 (1<<19)
+
+#define IPC_ISH_IN_DMA \
+ (IPC_FWSTS_DMA0 | IPC_FWSTS_DMA1 | IPC_FWSTS_DMA2 | IPC_FWSTS_DMA3)
+
+/* bit corresponds host ready bit in ISH FW Status Register */
+#define IPC_ISH_ISHTP_READY_OFFS (1)
+#define IPC_ISH_ISHTP_READY_BIT (1<<IPC_ISH_ISHTP_READY_OFFS)
+
+#define IPC_RMP2_DMA_ENABLED 0x1 /* Value to enable DMA, per D3 RCR */
+
+#define IPC_MSG_MAX_SIZE 0x80
+
+
+#define IPC_HEADER_LENGTH_MASK 0x03FF
+#define IPC_HEADER_PROTOCOL_MASK 0x0F
+#define IPC_HEADER_MNG_CMD_MASK 0x0F
+
+#define IPC_HEADER_LENGTH_OFFSET 0
+#define IPC_HEADER_PROTOCOL_OFFSET 10
+#define IPC_HEADER_MNG_CMD_OFFSET 16
+
+#define IPC_HEADER_GET_LENGTH(drbl_reg) \
+ (((drbl_reg) >> IPC_HEADER_LENGTH_OFFSET)&IPC_HEADER_LENGTH_MASK)
+#define IPC_HEADER_GET_PROTOCOL(drbl_reg) \
+ (((drbl_reg) >> IPC_HEADER_PROTOCOL_OFFSET)&IPC_HEADER_PROTOCOL_MASK)
+#define IPC_HEADER_GET_MNG_CMD(drbl_reg) \
+ (((drbl_reg) >> IPC_HEADER_MNG_CMD_OFFSET)&IPC_HEADER_MNG_CMD_MASK)
+
+#define IPC_IS_BUSY(drbl_reg) \
+ (((drbl_reg)&IPC_DRBL_BUSY_BIT) == ((uint32_t)IPC_DRBL_BUSY_BIT))
+
+/***********************************/
+/*CHV_A0, CHV_B0*/
+#define IPC_INT_FROM_ISH_TO_HOST_CHV_AB(drbl_reg) \
+ (((drbl_reg)&IPC_INT_ISH2HOST_BIT_CHV_AB) == \
+ ((u32)IPC_INT_ISH2HOST_BIT_CHV_AB))
+/*BXT, CHV_K0*/
+#define IPC_INT_FROM_ISH_TO_HOST_BXT(drbl_reg) \
+ (((drbl_reg)&IPC_INT_ISH2HOST_BIT_BXT) == \
+ ((u32)IPC_INT_ISH2HOST_BIT_BXT))
+/***********************************/
+
+#define IPC_BUILD_HEADER(length, protocol, busy) \
+ (((busy)<<IPC_DRBL_BUSY_OFFS) | \
+ ((protocol) << IPC_HEADER_PROTOCOL_OFFSET) | \
+ ((length)<<IPC_HEADER_LENGTH_OFFSET))
+
+#define IPC_BUILD_MNG_MSG(cmd, length) \
+ (((1)<<IPC_DRBL_BUSY_OFFS)| \
+ ((IPC_PROTOCOL_MNG)<<IPC_HEADER_PROTOCOL_OFFSET)| \
+ ((cmd)<<IPC_HEADER_MNG_CMD_OFFSET)| \
+ ((length)<<IPC_HEADER_LENGTH_OFFSET))
+
+
+#define IPC_SET_HOST_READY(host_status) \
+ ((host_status) |= (IPC_HOSTCOMM_READY_BIT))
+
+#define IPC_SET_HOST_ILUP(host_status) \
+ ((host_status) |= (IPC_ILUP_BIT))
+
+#define IPC_CLEAR_HOST_READY(host_status) \
+ ((host_status) ^= (IPC_HOSTCOMM_READY_BIT))
+
+#define IPC_CLEAR_HOST_ILUP(host_status) \
+ ((host_status) ^= (IPC_ILUP_BIT))
+
+/* todo - temp until PIMR HW ready */
+#define IPC_HOST_BUSY_READING_OFFS 6
+
+/* bit corresponds host ready bit in Host Status Register (HOST_COMM) */
+#define IPC_HOST_BUSY_READING_BIT (1<<IPC_HOST_BUSY_READING_OFFS)
+
+#define IPC_SET_HOST_BUSY_READING(host_status) \
+ ((host_status) |= (IPC_HOST_BUSY_READING_BIT))
+
+#define IPC_CLEAR_HOST_BUSY_READING(host_status)\
+ ((host_status) ^= (IPC_HOST_BUSY_READING_BIT))
+
+
+#define IPC_IS_ISH_ISHTP_READY(ish_status) \
+ (((ish_status) & IPC_ISH_ISHTP_READY_BIT) == \
+ ((uint32_t)IPC_ISH_ISHTP_READY_BIT))
+
+#define IPC_IS_ISH_ILUP(ish_status) \
+ (((ish_status) & IPC_ILUP_BIT) == ((uint32_t)IPC_ILUP_BIT))
+
+
+#define IPC_PROTOCOL_ISHTP 1
+#define IPC_PROTOCOL_MNG 3
+
+#define MNG_RX_CMPL_ENABLE 0
+#define MNG_RX_CMPL_DISABLE 1
+#define MNG_RX_CMPL_INDICATION 2
+#define MNG_RESET_NOTIFY 3
+#define MNG_RESET_NOTIFY_ACK 4
+#define MNG_SYNC_FW_CLOCK 5
+#define MNG_ILLEGAL_CMD 0xFF
+
+#endif /* _ISHTP_ISH_REGS_H_ */
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
new file mode 100644
index 000000000000..46615a03e78f
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -0,0 +1,71 @@
+/*
+ * H/W layer of ISHTP provider device (ISH)
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_HW_ISH_H_
+#define _ISHTP_HW_ISH_H_
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include "hw-ish-regs.h"
+#include "ishtp-dev.h"
+
+#define CHV_DEVICE_ID 0x22D8
+#define BXT_Ax_DEVICE_ID 0x0AA2
+#define BXT_Bx_DEVICE_ID 0x1AA2
+#define APL_Ax_DEVICE_ID 0x5AA2
+#define SPT_Ax_DEVICE_ID 0x9D35
+
+#define REVISION_ID_CHT_A0 0x6
+#define REVISION_ID_CHT_Ax_SI 0x0
+#define REVISION_ID_CHT_Bx_SI 0x10
+#define REVISION_ID_CHT_Kx_SI 0x20
+#define REVISION_ID_CHT_Dx_SI 0x30
+#define REVISION_ID_CHT_B0 0xB0
+#define REVISION_ID_SI_MASK 0x70
+
+struct ipc_rst_payload_type {
+ uint16_t reset_id;
+ uint16_t reserved;
+};
+
+struct time_sync_format {
+ uint8_t ts1_source;
+ uint8_t ts2_source;
+ uint16_t reserved;
+} __packed;
+
+struct ipc_time_update_msg {
+ uint64_t primary_host_time;
+ struct time_sync_format sync_info;
+ uint64_t secondary_host_time;
+} __packed;
+
+enum {
+ HOST_UTC_TIME_USEC = 0,
+ HOST_SYSTEM_TIME_USEC = 1
+};
+
+struct ish_hw {
+ void __iomem *mem_addr;
+};
+
+#define to_ish_hw(dev) (struct ish_hw *)((dev)->hw)
+
+irqreturn_t ish_irq_handler(int irq, void *dev_id);
+struct ishtp_device *ish_dev_init(struct pci_dev *pdev);
+int ish_hw_start(struct ishtp_device *dev);
+void ish_device_disable(struct ishtp_device *dev);
+
+#endif /* _ISHTP_HW_ISH_H_ */
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
new file mode 100644
index 000000000000..e2517c11e0ee
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -0,0 +1,881 @@
+/*
+ * H/W layer of ISHTP provider device (ISH)
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include "client.h"
+#include "hw-ish.h"
+#include "utils.h"
+#include "hbm.h"
+
+/* For FW reset flow */
+static struct work_struct fw_reset_work;
+static struct ishtp_device *ishtp_dev;
+
+/**
+ * ish_reg_read() - Read register
+ * @dev: ISHTP device pointer
+ * @offset: Register offset
+ *
+ * Read 32 bit register at a given offset
+ *
+ * Return: Read register value
+ */
+static inline uint32_t ish_reg_read(const struct ishtp_device *dev,
+ unsigned long offset)
+{
+ struct ish_hw *hw = to_ish_hw(dev);
+
+ return readl(hw->mem_addr + offset);
+}
+
+/**
+ * ish_reg_write() - Write register
+ * @dev: ISHTP device pointer
+ * @offset: Register offset
+ * @value: Value to write
+ *
+ * Writes 32 bit register at a give offset
+ */
+static inline void ish_reg_write(struct ishtp_device *dev,
+ unsigned long offset,
+ uint32_t value)
+{
+ struct ish_hw *hw = to_ish_hw(dev);
+
+ writel(value, hw->mem_addr + offset);
+}
+
+/**
+ * _ish_read_fw_sts_reg() - Read FW status register
+ * @dev: ISHTP device pointer
+ *
+ * Read FW status register
+ *
+ * Return: Read register value
+ */
+static inline uint32_t _ish_read_fw_sts_reg(struct ishtp_device *dev)
+{
+ return ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+}
+
+/**
+ * check_generated_interrupt() - Check if ISH interrupt
+ * @dev: ISHTP device pointer
+ *
+ * Check if an interrupt was generated for ISH
+ *
+ * Return: Read true or false
+ */
+static bool check_generated_interrupt(struct ishtp_device *dev)
+{
+ bool interrupt_generated = true;
+ uint32_t pisr_val = 0;
+
+ if (dev->pdev->device == CHV_DEVICE_ID) {
+ pisr_val = ish_reg_read(dev, IPC_REG_PISR_CHV_AB);
+ interrupt_generated =
+ IPC_INT_FROM_ISH_TO_HOST_CHV_AB(pisr_val);
+ } else {
+ pisr_val = ish_reg_read(dev, IPC_REG_PISR_BXT);
+ interrupt_generated = IPC_INT_FROM_ISH_TO_HOST_BXT(pisr_val);
+ }
+
+ return interrupt_generated;
+}
+
+/**
+ * ish_is_input_ready() - Check if FW ready for RX
+ * @dev: ISHTP device pointer
+ *
+ * Check if ISH FW is ready for receiving data
+ *
+ * Return: Read true or false
+ */
+static bool ish_is_input_ready(struct ishtp_device *dev)
+{
+ uint32_t doorbell_val;
+
+ doorbell_val = ish_reg_read(dev, IPC_REG_HOST2ISH_DRBL);
+ return !IPC_IS_BUSY(doorbell_val);
+}
+
+/**
+ * set_host_ready() - Indicate host ready
+ * @dev: ISHTP device pointer
+ *
+ * Set host ready indication to FW
+ */
+static void set_host_ready(struct ishtp_device *dev)
+{
+ if (dev->pdev->device == CHV_DEVICE_ID) {
+ if (dev->pdev->revision == REVISION_ID_CHT_A0 ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Ax_SI)
+ ish_reg_write(dev, IPC_REG_HOST_COMM, 0x81);
+ else if (dev->pdev->revision == REVISION_ID_CHT_B0 ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Bx_SI ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Kx_SI ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Dx_SI) {
+ uint32_t host_comm_val;
+
+ host_comm_val = ish_reg_read(dev, IPC_REG_HOST_COMM);
+ host_comm_val |= IPC_HOSTCOMM_INT_EN_BIT_CHV_AB | 0x81;
+ ish_reg_write(dev, IPC_REG_HOST_COMM, host_comm_val);
+ }
+ } else {
+ uint32_t host_pimr_val;
+
+ host_pimr_val = ish_reg_read(dev, IPC_REG_PIMR_BXT);
+ host_pimr_val |= IPC_PIMR_INT_EN_BIT_BXT;
+ /*
+ * disable interrupt generated instead of
+ * RX_complete_msg
+ */
+ host_pimr_val &= ~IPC_HOST2ISH_BUSYCLEAR_MASK_BIT;
+
+ ish_reg_write(dev, IPC_REG_PIMR_BXT, host_pimr_val);
+ }
+}
+
+/**
+ * ishtp_fw_is_ready() - Check if FW ready
+ * @dev: ISHTP device pointer
+ *
+ * Check if ISH FW is ready
+ *
+ * Return: Read true or false
+ */
+static bool ishtp_fw_is_ready(struct ishtp_device *dev)
+{
+ uint32_t ish_status = _ish_read_fw_sts_reg(dev);
+
+ return IPC_IS_ISH_ILUP(ish_status) &&
+ IPC_IS_ISH_ISHTP_READY(ish_status);
+}
+
+/**
+ * ish_set_host_rdy() - Indicate host ready
+ * @dev: ISHTP device pointer
+ *
+ * Set host ready indication to FW
+ */
+static void ish_set_host_rdy(struct ishtp_device *dev)
+{
+ uint32_t host_status = ish_reg_read(dev, IPC_REG_HOST_COMM);
+
+ IPC_SET_HOST_READY(host_status);
+ ish_reg_write(dev, IPC_REG_HOST_COMM, host_status);
+}
+
+/**
+ * ish_clr_host_rdy() - Indicate host not ready
+ * @dev: ISHTP device pointer
+ *
+ * Send host not ready indication to FW
+ */
+static void ish_clr_host_rdy(struct ishtp_device *dev)
+{
+ uint32_t host_status = ish_reg_read(dev, IPC_REG_HOST_COMM);
+
+ IPC_CLEAR_HOST_READY(host_status);
+ ish_reg_write(dev, IPC_REG_HOST_COMM, host_status);
+}
+
+/**
+ * _ishtp_read_hdr() - Read message header
+ * @dev: ISHTP device pointer
+ *
+ * Read header of 32bit length
+ *
+ * Return: Read register value
+ */
+static uint32_t _ishtp_read_hdr(const struct ishtp_device *dev)
+{
+ return ish_reg_read(dev, IPC_REG_ISH2HOST_MSG);
+}
+
+/**
+ * _ishtp_read - Read message
+ * @dev: ISHTP device pointer
+ * @buffer: message buffer
+ * @buffer_length: length of message buffer
+ *
+ * Read message from FW
+ *
+ * Return: Always 0
+ */
+static int _ishtp_read(struct ishtp_device *dev, unsigned char *buffer,
+ unsigned long buffer_length)
+{
+ uint32_t i;
+ uint32_t *r_buf = (uint32_t *)buffer;
+ uint32_t msg_offs;
+
+ msg_offs = IPC_REG_ISH2HOST_MSG + sizeof(struct ishtp_msg_hdr);
+ for (i = 0; i < buffer_length; i += sizeof(uint32_t))
+ *r_buf++ = ish_reg_read(dev, msg_offs + i);
+
+ return 0;
+}
+
+/**
+ * write_ipc_from_queue() - try to write ipc msg from Tx queue to device
+ * @dev: ishtp device pointer
+ *
+ * Check if DRBL is cleared. if it is - write the first IPC msg, then call
+ * the callback function (unless it's NULL)
+ *
+ * Return: 0 for success else failure code
+ */
+static int write_ipc_from_queue(struct ishtp_device *dev)
+{
+ struct wr_msg_ctl_info *ipc_link;
+ unsigned long length;
+ unsigned long rem;
+ unsigned long flags;
+ uint32_t doorbell_val;
+ uint32_t *r_buf;
+ uint32_t reg_addr;
+ int i;
+ void (*ipc_send_compl)(void *);
+ void *ipc_send_compl_prm;
+ static int out_ipc_locked;
+ unsigned long out_ipc_flags;
+
+ if (dev->dev_state == ISHTP_DEV_DISABLED)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->out_ipc_spinlock, out_ipc_flags);
+ if (out_ipc_locked) {
+ spin_unlock_irqrestore(&dev->out_ipc_spinlock, out_ipc_flags);
+ return -EBUSY;
+ }
+ out_ipc_locked = 1;
+ if (!ish_is_input_ready(dev)) {
+ out_ipc_locked = 0;
+ spin_unlock_irqrestore(&dev->out_ipc_spinlock, out_ipc_flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&dev->out_ipc_spinlock, out_ipc_flags);
+
+ spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
+ /*
+ * if tx send list is empty - return 0;
+ * may happen, as RX_COMPLETE handler doesn't check list emptiness.
+ */
+ if (list_empty(&dev->wr_processing_list_head.link)) {
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+ out_ipc_locked = 0;
+ return 0;
+ }
+
+ ipc_link = list_entry(dev->wr_processing_list_head.link.next,
+ struct wr_msg_ctl_info, link);
+ /* first 4 bytes of the data is the doorbell value (IPC header) */
+ length = ipc_link->length - sizeof(uint32_t);
+ doorbell_val = *(uint32_t *)ipc_link->inline_data;
+ r_buf = (uint32_t *)(ipc_link->inline_data + sizeof(uint32_t));
+
+ /* If sending MNG_SYNC_FW_CLOCK, update clock again */
+ if (IPC_HEADER_GET_PROTOCOL(doorbell_val) == IPC_PROTOCOL_MNG &&
+ IPC_HEADER_GET_MNG_CMD(doorbell_val) == MNG_SYNC_FW_CLOCK) {
+ struct timespec ts_system;
+ struct timeval tv_utc;
+ uint64_t usec_system, usec_utc;
+ struct ipc_time_update_msg time_update;
+ struct time_sync_format ts_format;
+
+ get_monotonic_boottime(&ts_system);
+ do_gettimeofday(&tv_utc);
+ usec_system = (timespec_to_ns(&ts_system)) / NSEC_PER_USEC;
+ usec_utc = (uint64_t)tv_utc.tv_sec * 1000000 +
+ ((uint32_t)tv_utc.tv_usec);
+ ts_format.ts1_source = HOST_SYSTEM_TIME_USEC;
+ ts_format.ts2_source = HOST_UTC_TIME_USEC;
+
+ time_update.primary_host_time = usec_system;
+ time_update.secondary_host_time = usec_utc;
+ time_update.sync_info = ts_format;
+
+ memcpy(r_buf, &time_update,
+ sizeof(struct ipc_time_update_msg));
+ }
+
+ for (i = 0, reg_addr = IPC_REG_HOST2ISH_MSG; i < length >> 2; i++,
+ reg_addr += 4)
+ ish_reg_write(dev, reg_addr, r_buf[i]);
+
+ rem = length & 0x3;
+ if (rem > 0) {
+ uint32_t reg = 0;
+
+ memcpy(&reg, &r_buf[length >> 2], rem);
+ ish_reg_write(dev, reg_addr, reg);
+ }
+ /* Flush writes to msg registers and doorbell */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ /* Update IPC counters */
+ ++dev->ipc_tx_cnt;
+ dev->ipc_tx_bytes_cnt += IPC_HEADER_GET_LENGTH(doorbell_val);
+
+ ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, doorbell_val);
+ out_ipc_locked = 0;
+
+ ipc_send_compl = ipc_link->ipc_send_compl;
+ ipc_send_compl_prm = ipc_link->ipc_send_compl_prm;
+ list_del_init(&ipc_link->link);
+ list_add_tail(&ipc_link->link, &dev->wr_free_list_head.link);
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+
+ /*
+ * callback will be called out of spinlock,
+ * after ipc_link returned to free list
+ */
+ if (ipc_send_compl)
+ ipc_send_compl(ipc_send_compl_prm);
+
+ return 0;
+}
+
+/**
+ * write_ipc_to_queue() - write ipc msg to Tx queue
+ * @dev: ishtp device instance
+ * @ipc_send_compl: Send complete callback
+ * @ipc_send_compl_prm: Parameter to send in complete callback
+ * @msg: Pointer to message
+ * @length: Length of message
+ *
+ * Recived msg with IPC (and upper protocol) header and add it to the device
+ * Tx-to-write list then try to send the first IPC waiting msg
+ * (if DRBL is cleared)
+ * This function returns negative value for failure (means free list
+ * is empty, or msg too long) and 0 for success.
+ *
+ * Return: 0 for success else failure code
+ */
+static int write_ipc_to_queue(struct ishtp_device *dev,
+ void (*ipc_send_compl)(void *), void *ipc_send_compl_prm,
+ unsigned char *msg, int length)
+{
+ struct wr_msg_ctl_info *ipc_link;
+ unsigned long flags;
+
+ if (length > IPC_FULL_MSG_SIZE)
+ return -EMSGSIZE;
+
+ spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
+ if (list_empty(&dev->wr_free_list_head.link)) {
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+ return -ENOMEM;
+ }
+ ipc_link = list_entry(dev->wr_free_list_head.link.next,
+ struct wr_msg_ctl_info, link);
+ list_del_init(&ipc_link->link);
+
+ ipc_link->ipc_send_compl = ipc_send_compl;
+ ipc_link->ipc_send_compl_prm = ipc_send_compl_prm;
+ ipc_link->length = length;
+ memcpy(ipc_link->inline_data, msg, length);
+
+ list_add_tail(&ipc_link->link, &dev->wr_processing_list_head.link);
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+
+ write_ipc_from_queue(dev);
+
+ return 0;
+}
+
+/**
+ * ipc_send_mng_msg() - Send management message
+ * @dev: ishtp device instance
+ * @msg_code: Message code
+ * @msg: Pointer to message
+ * @size: Length of message
+ *
+ * Send management message to FW
+ *
+ * Return: 0 for success else failure code
+ */
+static int ipc_send_mng_msg(struct ishtp_device *dev, uint32_t msg_code,
+ void *msg, size_t size)
+{
+ unsigned char ipc_msg[IPC_FULL_MSG_SIZE];
+ uint32_t drbl_val = IPC_BUILD_MNG_MSG(msg_code, size);
+
+ memcpy(ipc_msg, &drbl_val, sizeof(uint32_t));
+ memcpy(ipc_msg + sizeof(uint32_t), msg, size);
+ return write_ipc_to_queue(dev, NULL, NULL, ipc_msg,
+ sizeof(uint32_t) + size);
+}
+
+/**
+ * ish_fw_reset_handler() - FW reset handler
+ * @dev: ishtp device pointer
+ *
+ * Handle FW reset
+ *
+ * Return: 0 for success else failure code
+ */
+static int ish_fw_reset_handler(struct ishtp_device *dev)
+{
+ uint32_t reset_id;
+ unsigned long flags;
+ struct wr_msg_ctl_info *processing, *next;
+
+ /* Read reset ID */
+ reset_id = ish_reg_read(dev, IPC_REG_ISH2HOST_MSG) & 0xFFFF;
+
+ /* Clear IPC output queue */
+ spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
+ list_for_each_entry_safe(processing, next,
+ &dev->wr_processing_list_head.link, link) {
+ list_move_tail(&processing->link, &dev->wr_free_list_head.link);
+ }
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+
+ /* ISHTP notification in IPC_RESET */
+ ishtp_reset_handler(dev);
+
+ if (!ish_is_input_ready(dev))
+ timed_wait_for_timeout(WAIT_FOR_SEND_SLICE,
+ ish_is_input_ready(dev), (2 * HZ));
+
+ /* ISH FW is dead */
+ if (!ish_is_input_ready(dev))
+ return -EPIPE;
+ /*
+ * Set HOST2ISH.ILUP. Apparently we need this BEFORE sending
+ * RESET_NOTIFY_ACK - FW will be checking for it
+ */
+ ish_set_host_rdy(dev);
+ /* Send RESET_NOTIFY_ACK (with reset_id) */
+ ipc_send_mng_msg(dev, MNG_RESET_NOTIFY_ACK, &reset_id,
+ sizeof(uint32_t));
+
+ /* Wait for ISH FW'es ILUP and ISHTP_READY */
+ timed_wait_for_timeout(WAIT_FOR_SEND_SLICE, ishtp_fw_is_ready(dev),
+ (2 * HZ));
+ if (!ishtp_fw_is_ready(dev)) {
+ /* ISH FW is dead */
+ uint32_t ish_status;
+
+ ish_status = _ish_read_fw_sts_reg(dev);
+ dev_err(dev->devc,
+ "[ishtp-ish]: completed reset, ISH is dead (FWSTS = %08X)\n",
+ ish_status);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/**
+ * ish_fw_reset_work_fn() - FW reset worker function
+ * @unused: not used
+ *
+ * Call ish_fw_reset_handler to complete FW reset
+ */
+static void fw_reset_work_fn(struct work_struct *unused)
+{
+ int rv;
+
+ rv = ish_fw_reset_handler(ishtp_dev);
+ if (!rv) {
+ /* ISH is ILUP & ISHTP-ready. Restart ISHTP */
+ schedule_timeout(HZ / 3);
+ ishtp_dev->recvd_hw_ready = 1;
+ wake_up_interruptible(&ishtp_dev->wait_hw_ready);
+
+ /* ISHTP notification in IPC_RESET sequence completion */
+ ishtp_reset_compl_handler(ishtp_dev);
+ } else
+ dev_err(ishtp_dev->devc, "[ishtp-ish]: FW reset failed (%d)\n",
+ rv);
+}
+
+/**
+ * _ish_sync_fw_clock() -Sync FW clock with the OS clock
+ * @dev: ishtp device pointer
+ *
+ * Sync FW and OS time
+ */
+static void _ish_sync_fw_clock(struct ishtp_device *dev)
+{
+ static unsigned long prev_sync;
+ struct timespec ts;
+ uint64_t usec;
+
+ if (prev_sync && jiffies - prev_sync < 20 * HZ)
+ return;
+
+ prev_sync = jiffies;
+ get_monotonic_boottime(&ts);
+ usec = (timespec_to_ns(&ts)) / NSEC_PER_USEC;
+ ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &usec, sizeof(uint64_t));
+}
+
+/**
+ * recv_ipc() - Receive and process IPC management messages
+ * @dev: ishtp device instance
+ * @doorbell_val: doorbell value
+ *
+ * This function runs in ISR context.
+ * NOTE: Any other mng command than reset_notify and reset_notify_ack
+ * won't wake BH handler
+ */
+static void recv_ipc(struct ishtp_device *dev, uint32_t doorbell_val)
+{
+ uint32_t mng_cmd;
+
+ mng_cmd = IPC_HEADER_GET_MNG_CMD(doorbell_val);
+
+ switch (mng_cmd) {
+ default:
+ break;
+
+ case MNG_RX_CMPL_INDICATION:
+ if (dev->suspend_flag) {
+ dev->suspend_flag = 0;
+ wake_up_interruptible(&dev->suspend_wait);
+ }
+ if (dev->resume_flag) {
+ dev->resume_flag = 0;
+ wake_up_interruptible(&dev->resume_wait);
+ }
+
+ write_ipc_from_queue(dev);
+ break;
+
+ case MNG_RESET_NOTIFY:
+ if (!ishtp_dev) {
+ ishtp_dev = dev;
+ INIT_WORK(&fw_reset_work, fw_reset_work_fn);
+ }
+ schedule_work(&fw_reset_work);
+ break;
+
+ case MNG_RESET_NOTIFY_ACK:
+ dev->recvd_hw_ready = 1;
+ wake_up_interruptible(&dev->wait_hw_ready);
+ break;
+ }
+}
+
+/**
+ * ish_irq_handler() - ISH IRQ handler
+ * @irq: irq number
+ * @dev_id: ishtp device pointer
+ *
+ * ISH IRQ handler. If interrupt is generated and is for ISH it will process
+ * the interrupt.
+ */
+irqreturn_t ish_irq_handler(int irq, void *dev_id)
+{
+ struct ishtp_device *dev = dev_id;
+ uint32_t doorbell_val;
+ bool interrupt_generated;
+
+ /* Check that it's interrupt from ISH (may be shared) */
+ interrupt_generated = check_generated_interrupt(dev);
+
+ if (!interrupt_generated)
+ return IRQ_NONE;
+
+ doorbell_val = ish_reg_read(dev, IPC_REG_ISH2HOST_DRBL);
+ if (!IPC_IS_BUSY(doorbell_val))
+ return IRQ_HANDLED;
+
+ if (dev->dev_state == ISHTP_DEV_DISABLED)
+ return IRQ_HANDLED;
+
+ /* Sanity check: IPC dgram length in header */
+ if (IPC_HEADER_GET_LENGTH(doorbell_val) > IPC_PAYLOAD_SIZE) {
+ dev_err(dev->devc,
+ "IPC hdr - bad length: %u; dropped\n",
+ (unsigned int)IPC_HEADER_GET_LENGTH(doorbell_val));
+ goto eoi;
+ }
+
+ switch (IPC_HEADER_GET_PROTOCOL(doorbell_val)) {
+ default:
+ break;
+ case IPC_PROTOCOL_MNG:
+ recv_ipc(dev, doorbell_val);
+ break;
+ case IPC_PROTOCOL_ISHTP:
+ ishtp_recv(dev);
+ break;
+ }
+
+eoi:
+ /* Update IPC counters */
+ ++dev->ipc_rx_cnt;
+ dev->ipc_rx_bytes_cnt += IPC_HEADER_GET_LENGTH(doorbell_val);
+
+ ish_reg_write(dev, IPC_REG_ISH2HOST_DRBL, 0);
+ /* Flush write to doorbell */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * _ish_hw_reset() - HW reset
+ * @dev: ishtp device pointer
+ *
+ * Reset ISH HW to recover if any error
+ *
+ * Return: 0 for success else error fault code
+ */
+static int _ish_hw_reset(struct ishtp_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int rv;
+ unsigned int dma_delay;
+ uint16_t csr;
+
+ if (!pdev)
+ return -ENODEV;
+
+ rv = pci_reset_function(pdev);
+ if (!rv)
+ dev->dev_state = ISHTP_DEV_RESETTING;
+
+ if (!pdev->pm_cap) {
+ dev_err(&pdev->dev, "Can't reset - no PM caps\n");
+ return -EINVAL;
+ }
+
+ /* Now trigger reset to FW */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
+
+ for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
+ _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
+ dma_delay += 5)
+ mdelay(5);
+
+ if (dma_delay >= MAX_DMA_DELAY) {
+ dev_err(&pdev->dev,
+ "Can't reset - stuck with DMA in-progress\n");
+ return -EBUSY;
+ }
+
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &csr);
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D3hot;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
+
+ mdelay(pdev->d3_delay);
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D0;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
+
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
+
+ /*
+ * Send 0 IPC message so that ISH FW wakes up if it was already
+ * asleep
+ */
+ ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
+
+ /* Flush writes to doorbell and REMAP2 */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ return 0;
+}
+
+/**
+ * _ish_ipc_reset() - IPC reset
+ * @dev: ishtp device pointer
+ *
+ * Resets host and fw IPC and upper layers
+ *
+ * Return: 0 for success else error fault code
+ */
+static int _ish_ipc_reset(struct ishtp_device *dev)
+{
+ struct ipc_rst_payload_type ipc_mng_msg;
+ int rv = 0;
+
+ ipc_mng_msg.reset_id = 1;
+ ipc_mng_msg.reserved = 0;
+
+ set_host_ready(dev);
+
+ /* Clear the incoming doorbell */
+ ish_reg_write(dev, IPC_REG_ISH2HOST_DRBL, 0);
+ /* Flush write to doorbell */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ dev->recvd_hw_ready = 0;
+
+ /* send message */
+ rv = ipc_send_mng_msg(dev, MNG_RESET_NOTIFY, &ipc_mng_msg,
+ sizeof(struct ipc_rst_payload_type));
+ if (rv) {
+ dev_err(dev->devc, "Failed to send IPC MNG_RESET_NOTIFY\n");
+ return rv;
+ }
+
+ wait_event_interruptible_timeout(dev->wait_hw_ready,
+ dev->recvd_hw_ready, 2 * HZ);
+ if (!dev->recvd_hw_ready) {
+ dev_err(dev->devc, "Timed out waiting for HW ready\n");
+ rv = -ENODEV;
+ }
+
+ return rv;
+}
+
+/**
+ * ish_hw_start() -Start ISH HW
+ * @dev: ishtp device pointer
+ *
+ * Set host to ready state and wait for FW reset
+ *
+ * Return: 0 for success else error fault code
+ */
+int ish_hw_start(struct ishtp_device *dev)
+{
+ ish_set_host_rdy(dev);
+ /* After that we can enable ISH DMA operation */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
+
+ /*
+ * Send 0 IPC message so that ISH FW wakes up if it was already
+ * asleep
+ */
+ ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
+ /* Flush write to doorbell */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ set_host_ready(dev);
+
+ /* wait for FW-initiated reset flow */
+ if (!dev->recvd_hw_ready)
+ wait_event_interruptible_timeout(dev->wait_hw_ready,
+ dev->recvd_hw_ready,
+ 10 * HZ);
+
+ if (!dev->recvd_hw_ready) {
+ dev_err(dev->devc,
+ "[ishtp-ish]: Timed out waiting for FW-initiated reset\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_ipc_get_header() -Get doorbell value
+ * @dev: ishtp device pointer
+ * @length: length of message
+ * @busy: busy status
+ *
+ * Get door bell value from message header
+ *
+ * Return: door bell value
+ */
+static uint32_t ish_ipc_get_header(struct ishtp_device *dev, int length,
+ int busy)
+{
+ uint32_t drbl_val;
+
+ drbl_val = IPC_BUILD_HEADER(length, IPC_PROTOCOL_ISHTP, busy);
+
+ return drbl_val;
+}
+
+static const struct ishtp_hw_ops ish_hw_ops = {
+ .hw_reset = _ish_hw_reset,
+ .ipc_reset = _ish_ipc_reset,
+ .ipc_get_header = ish_ipc_get_header,
+ .ishtp_read = _ishtp_read,
+ .write = write_ipc_to_queue,
+ .get_fw_status = _ish_read_fw_sts_reg,
+ .sync_fw_clock = _ish_sync_fw_clock,
+ .ishtp_read_hdr = _ishtp_read_hdr
+};
+
+/**
+ * ish_dev_init() -Initialize ISH devoce
+ * @pdev: PCI device
+ *
+ * Allocate ISHTP device and initialize IPC processing
+ *
+ * Return: ISHTP device instance on success else NULL
+ */
+struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
+{
+ struct ishtp_device *dev;
+ int i;
+
+ dev = kzalloc(sizeof(struct ishtp_device) + sizeof(struct ish_hw),
+ GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ ishtp_device_init(dev);
+
+ init_waitqueue_head(&dev->wait_hw_ready);
+
+ spin_lock_init(&dev->wr_processing_spinlock);
+ spin_lock_init(&dev->out_ipc_spinlock);
+
+ /* Init IPC processing and free lists */
+ INIT_LIST_HEAD(&dev->wr_processing_list_head.link);
+ INIT_LIST_HEAD(&dev->wr_free_list_head.link);
+ for (i = 0; i < IPC_TX_FIFO_SIZE; ++i) {
+ struct wr_msg_ctl_info *tx_buf;
+
+ tx_buf = kzalloc(sizeof(struct wr_msg_ctl_info), GFP_KERNEL);
+ if (!tx_buf) {
+ /*
+ * IPC buffers may be limited or not available
+ * at all - although this shouldn't happen
+ */
+ dev_err(dev->devc,
+ "[ishtp-ish]: failure in Tx FIFO allocations (%d)\n",
+ i);
+ break;
+ }
+ list_add_tail(&tx_buf->link, &dev->wr_free_list_head.link);
+ }
+
+ dev->ops = &ish_hw_ops;
+ dev->devc = &pdev->dev;
+ dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
+ return dev;
+}
+
+/**
+ * ish_device_disable() - Disable ISH device
+ * @dev: ISHTP device pointer
+ *
+ * Disable ISH by clearing host ready to inform firmware.
+ */
+void ish_device_disable(struct ishtp_device *dev)
+{
+ dev->dev_state = ISHTP_DEV_DISABLED;
+ ish_clr_host_rdy(dev);
+}
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
new file mode 100644
index 000000000000..42f0beeb09fd
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -0,0 +1,322 @@
+/*
+ * PCI glue for ISHTP provider device (ISH) driver
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/miscdevice.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/intel_ish.h>
+#include "ishtp-dev.h"
+#include "hw-ish.h"
+
+static const struct pci_device_id ish_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CHV_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, BXT_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, BXT_Bx_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, APL_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_Ax_DEVICE_ID)},
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
+
+/**
+ * ish_event_tracer() - Callback function to dump trace messages
+ * @dev: ishtp device
+ * @format: printf style format
+ *
+ * Callback to direct log messages to Linux trace buffers
+ */
+static void ish_event_tracer(struct ishtp_device *dev, char *format, ...)
+{
+ if (trace_ishtp_dump_enabled()) {
+ va_list args;
+ char tmp_buf[100];
+
+ va_start(args, format);
+ vsnprintf(tmp_buf, sizeof(tmp_buf), format, args);
+ va_end(args);
+
+ trace_ishtp_dump(tmp_buf);
+ }
+}
+
+/**
+ * ish_init() - Init function
+ * @dev: ishtp device
+ *
+ * This function initialize wait queues for suspend/resume and call
+ * calls hadware initialization function. This will initiate
+ * startup sequence
+ *
+ * Return: 0 for success or error code for failure
+ */
+static int ish_init(struct ishtp_device *dev)
+{
+ int ret;
+
+ /* Set the state of ISH HW to start */
+ ret = ish_hw_start(dev);
+ if (ret) {
+ dev_err(dev->devc, "ISH: hw start failed.\n");
+ return ret;
+ }
+
+ /* Start the inter process communication to ISH processor */
+ ret = ishtp_start(dev);
+ if (ret) {
+ dev_err(dev->devc, "ISHTP: Protocol init failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_probe() - PCI driver probe callback
+ * @pdev: pci device
+ * @ent: pci device id
+ *
+ * Initialize PCI function, setup interrupt and call for ISH initialization
+ *
+ * Return: 0 for success or error code for failure
+ */
+static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct ishtp_device *dev;
+ struct ish_hw *hw;
+ int ret;
+
+ /* enable pci dev */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "ISH: Failed to enable PCI device\n");
+ return ret;
+ }
+
+ /* set PCI host mastering */
+ pci_set_master(pdev);
+
+ /* pci request regions for ISH driver */
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "ISH: Failed to get PCI regions\n");
+ goto disable_device;
+ }
+
+ /* allocates and initializes the ISH dev structure */
+ dev = ish_dev_init(pdev);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto release_regions;
+ }
+ hw = to_ish_hw(dev);
+ dev->print_log = ish_event_tracer;
+
+ /* mapping IO device memory */
+ hw->mem_addr = pci_iomap(pdev, 0, 0);
+ if (!hw->mem_addr) {
+ dev_err(&pdev->dev, "ISH: mapping I/O range failure\n");
+ ret = -ENOMEM;
+ goto free_device;
+ }
+
+ dev->pdev = pdev;
+
+ pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
+
+ /* request and enable interrupt */
+ ret = request_irq(pdev->irq, ish_irq_handler, IRQF_NO_SUSPEND,
+ KBUILD_MODNAME, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n",
+ pdev->irq);
+ goto free_device;
+ }
+
+ dev_set_drvdata(dev->devc, dev);
+
+ init_waitqueue_head(&dev->suspend_wait);
+ init_waitqueue_head(&dev->resume_wait);
+
+ ret = ish_init(dev);
+ if (ret)
+ goto free_irq;
+
+ return 0;
+
+free_irq:
+ free_irq(pdev->irq, dev);
+free_device:
+ pci_iounmap(pdev, hw->mem_addr);
+ kfree(dev);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "ISH: PCI driver initialization failed.\n");
+
+ return ret;
+}
+
+/**
+ * ish_remove() - PCI driver remove callback
+ * @pdev: pci device
+ *
+ * This function does cleanup of ISH on pci remove callback
+ */
+static void ish_remove(struct pci_dev *pdev)
+{
+ struct ishtp_device *ishtp_dev = pci_get_drvdata(pdev);
+ struct ish_hw *hw = to_ish_hw(ishtp_dev);
+
+ ishtp_bus_remove_all_clients(ishtp_dev, false);
+ ish_device_disable(ishtp_dev);
+
+ free_irq(pdev->irq, ishtp_dev);
+ pci_iounmap(pdev, hw->mem_addr);
+ pci_release_regions(pdev);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+ kfree(ishtp_dev);
+}
+
+static struct device *ish_resume_device;
+
+/**
+ * ish_resume_handler() - Work function to complete resume
+ * @work: work struct
+ *
+ * The resume work function to complete resume function asynchronously.
+ * There are two types of platforms, one where ISH is not powered off,
+ * in that case a simple resume message is enough, others we need
+ * a reset sequence.
+ */
+static void ish_resume_handler(struct work_struct *work)
+{
+ struct pci_dev *pdev = to_pci_dev(ish_resume_device);
+ struct ishtp_device *dev = pci_get_drvdata(pdev);
+ int ret;
+
+ ishtp_send_resume(dev);
+
+ /* 50 ms to get resume response */
+ if (dev->resume_flag)
+ ret = wait_event_interruptible_timeout(dev->resume_wait,
+ !dev->resume_flag,
+ msecs_to_jiffies(50));
+
+ /*
+ * If no resume response. This platform is not S0ix compatible
+ * So on resume full reboot of ISH processor will happen, so
+ * need to go through init sequence again
+ */
+ if (dev->resume_flag)
+ ish_init(dev);
+}
+
+/**
+ * ish_suspend() - ISH suspend callback
+ * @device: device pointer
+ *
+ * ISH suspend callback
+ *
+ * Return: 0 to the pm core
+ */
+static int ish_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct ishtp_device *dev = pci_get_drvdata(pdev);
+
+ enable_irq_wake(pdev->irq);
+ /*
+ * If previous suspend hasn't been asnwered then ISH is likely dead,
+ * don't attempt nested notification
+ */
+ if (dev->suspend_flag)
+ return 0;
+
+ dev->resume_flag = 0;
+ dev->suspend_flag = 1;
+ ishtp_send_suspend(dev);
+
+ /* 25 ms should be enough for live ISH to flush all IPC buf */
+ if (dev->suspend_flag)
+ wait_event_interruptible_timeout(dev->suspend_wait,
+ !dev->suspend_flag,
+ msecs_to_jiffies(25));
+
+ return 0;
+}
+
+static DECLARE_WORK(resume_work, ish_resume_handler);
+/**
+ * ish_resume() - ISH resume callback
+ * @device: device pointer
+ *
+ * ISH resume callback
+ *
+ * Return: 0 to the pm core
+ */
+static int ish_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct ishtp_device *dev = pci_get_drvdata(pdev);
+
+ ish_resume_device = device;
+ dev->resume_flag = 1;
+
+ disable_irq_wake(pdev->irq);
+ schedule_work(&resume_work);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops ish_pm_ops = {
+ .suspend = ish_suspend,
+ .resume = ish_resume,
+};
+#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
+#else
+#define ISHTP_ISH_PM_OPS NULL
+#endif
+
+static struct pci_driver ish_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ish_pci_tbl,
+ .probe = ish_probe,
+ .remove = ish_remove,
+ .driver.pm = ISHTP_ISH_PM_OPS,
+};
+
+module_pci_driver(ish_driver);
+
+/* Original author */
+MODULE_AUTHOR("Daniel Drubin <daniel.drubin@intel.com>");
+/* Adoption to upstream Linux kernel */
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) Integrated Sensor Hub PCI Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-ish-hid/ipc/utils.h b/drivers/hid/intel-ish-hid/ipc/utils.h
new file mode 100644
index 000000000000..5a82123dc7b4
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/utils.h
@@ -0,0 +1,64 @@
+/*
+ * Utility macros of ISH
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef UTILS__H
+#define UTILS__H
+
+#define WAIT_FOR_SEND_SLICE (HZ / 10)
+#define WAIT_FOR_CONNECT_SLICE (HZ / 10)
+
+/*
+ * Waits for specified event when a thread that triggers event can't signal
+ * Also, waits *at_least* `timeinc` after condition is satisfied
+ */
+#define timed_wait_for(timeinc, condition) \
+ do { \
+ int completed = 0; \
+ do { \
+ unsigned long j; \
+ int done = 0; \
+ \
+ completed = (condition); \
+ for (j = jiffies, done = 0; !done; ) { \
+ schedule_timeout(timeinc); \
+ if (time_is_before_eq_jiffies(j + timeinc)) \
+ done = 1; \
+ } \
+ } while (!(completed)); \
+ } while (0)
+
+
+/*
+ * Waits for specified event when a thread that triggers event
+ * can't signal with timeout (use whenever we may hang)
+ */
+#define timed_wait_for_timeout(timeinc, condition, timeout) \
+ do { \
+ int t = timeout; \
+ do { \
+ unsigned long j; \
+ int done = 0; \
+ \
+ for (j = jiffies, done = 0; !done; ) { \
+ schedule_timeout(timeinc); \
+ if (time_is_before_eq_jiffies(j + timeinc)) \
+ done = 1; \
+ } \
+ t -= timeinc; \
+ if (t <= 0) \
+ break; \
+ } while (!(condition)); \
+ } while (0)
+
+#endif /* UTILS__H */
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
new file mode 100644
index 000000000000..5c643d7a07b2
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -0,0 +1,978 @@
+/*
+ * ISHTP client driver for HID (ISH)
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/hid.h>
+#include <linux/sched.h>
+#include "ishtp/ishtp-dev.h"
+#include "ishtp/client.h"
+#include "ishtp-hid.h"
+
+/* Rx ring buffer pool size */
+#define HID_CL_RX_RING_SIZE 32
+#define HID_CL_TX_RING_SIZE 16
+
+/**
+ * report_bad_packets() - Report bad packets
+ * @hid_ishtp_cl: Client instance to get stats
+ * @recv_buf: Raw received host interface message
+ * @cur_pos: Current position index in payload
+ * @payload_len: Length of payload expected
+ *
+ * Dumps error in case bad packet is received
+ */
+static void report_bad_packet(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
+ size_t cur_pos, size_t payload_len)
+{
+ struct hostif_msg *recv_msg = recv_buf;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+
+ dev_err(&client_data->cl_device->dev, "[hid-ish]: BAD packet %02X\n"
+ "total_bad=%u cur_pos=%u\n"
+ "[%02X %02X %02X %02X]\n"
+ "payload_len=%u\n"
+ "multi_packet_cnt=%u\n"
+ "is_response=%02X\n",
+ recv_msg->hdr.command, client_data->bad_recv_cnt,
+ (unsigned int)cur_pos,
+ ((unsigned char *)recv_msg)[0], ((unsigned char *)recv_msg)[1],
+ ((unsigned char *)recv_msg)[2], ((unsigned char *)recv_msg)[3],
+ (unsigned int)payload_len, client_data->multi_packet_cnt,
+ recv_msg->hdr.command & ~CMD_MASK);
+}
+
+/**
+ * process_recv() - Received and parse incoming packet
+ * @hid_ishtp_cl: Client instance to get stats
+ * @recv_buf: Raw received host interface message
+ * @data_len: length of the message
+ *
+ * Parse the incoming packet. If it is a response packet then it will update
+ * per instance flags and wake up the caller waiting to for the response.
+ */
+static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
+ size_t data_len)
+{
+ struct hostif_msg *recv_msg;
+ unsigned char *payload;
+ struct device_info *dev_info;
+ int i, j;
+ size_t payload_len, total_len, cur_pos;
+ int report_type;
+ struct report_list *reports_list;
+ char *reports;
+ size_t report_len;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ int curr_hid_dev = client_data->cur_hid_dev;
+
+ if (data_len < sizeof(struct hostif_msg_hdr)) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: error, received %u which is less than data header %u\n",
+ (unsigned int)data_len,
+ (unsigned int)sizeof(struct hostif_msg_hdr));
+ ++client_data->bad_recv_cnt;
+ ish_hw_reset(hid_ishtp_cl->dev);
+ return;
+ }
+
+ payload = recv_buf + sizeof(struct hostif_msg_hdr);
+ total_len = data_len;
+ cur_pos = 0;
+
+ do {
+ recv_msg = (struct hostif_msg *)(recv_buf + cur_pos);
+ payload_len = recv_msg->hdr.size;
+
+ /* Sanity checks */
+ if (cur_pos + payload_len + sizeof(struct hostif_msg) >
+ total_len) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
+ payload_len);
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+ }
+
+ hid_ishtp_trace(client_data, "%s %d\n",
+ __func__, recv_msg->hdr.command & CMD_MASK);
+
+ switch (recv_msg->hdr.command & CMD_MASK) {
+ case HOSTIF_DM_ENUM_DEVICES:
+ if ((!(recv_msg->hdr.command & ~CMD_MASK) ||
+ client_data->init_done)) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg,
+ cur_pos,
+ payload_len);
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+ }
+ client_data->hid_dev_count = (unsigned int)*payload;
+ if (!client_data->hid_devices)
+ client_data->hid_devices = devm_kzalloc(
+ &client_data->cl_device->dev,
+ client_data->hid_dev_count *
+ sizeof(struct device_info),
+ GFP_KERNEL);
+ if (!client_data->hid_devices) {
+ dev_err(&client_data->cl_device->dev,
+ "Mem alloc failed for hid device info\n");
+ wake_up_interruptible(&client_data->init_wait);
+ break;
+ }
+ for (i = 0; i < client_data->hid_dev_count; ++i) {
+ if (1 + sizeof(struct device_info) * i >=
+ payload_len) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: [ENUM_DEVICES]: content size %lu is bigger than payload_len %u\n",
+ 1 + sizeof(struct device_info)
+ * i,
+ (unsigned int)payload_len);
+ }
+
+ if (1 + sizeof(struct device_info) * i >=
+ data_len)
+ break;
+
+ dev_info = (struct device_info *)(payload + 1 +
+ sizeof(struct device_info) * i);
+ if (client_data->hid_devices)
+ memcpy(client_data->hid_devices + i,
+ dev_info,
+ sizeof(struct device_info));
+ }
+
+ client_data->enum_devices_done = true;
+ wake_up_interruptible(&client_data->init_wait);
+
+ break;
+
+ case HOSTIF_GET_HID_DESCRIPTOR:
+ if ((!(recv_msg->hdr.command & ~CMD_MASK) ||
+ client_data->init_done)) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg,
+ cur_pos,
+ payload_len);
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+ }
+ if (!client_data->hid_descr[curr_hid_dev])
+ client_data->hid_descr[curr_hid_dev] =
+ devm_kmalloc(&client_data->cl_device->dev,
+ payload_len, GFP_KERNEL);
+ if (client_data->hid_descr[curr_hid_dev]) {
+ memcpy(client_data->hid_descr[curr_hid_dev],
+ payload, payload_len);
+ client_data->hid_descr_size[curr_hid_dev] =
+ payload_len;
+ client_data->hid_descr_done = true;
+ }
+ wake_up_interruptible(&client_data->init_wait);
+
+ break;
+
+ case HOSTIF_GET_REPORT_DESCRIPTOR:
+ if ((!(recv_msg->hdr.command & ~CMD_MASK) ||
+ client_data->init_done)) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg,
+ cur_pos,
+ payload_len);
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+ }
+ if (!client_data->report_descr[curr_hid_dev])
+ client_data->report_descr[curr_hid_dev] =
+ devm_kmalloc(&client_data->cl_device->dev,
+ payload_len, GFP_KERNEL);
+ if (client_data->report_descr[curr_hid_dev]) {
+ memcpy(client_data->report_descr[curr_hid_dev],
+ payload,
+ payload_len);
+ client_data->report_descr_size[curr_hid_dev] =
+ payload_len;
+ client_data->report_descr_done = true;
+ }
+ wake_up_interruptible(&client_data->init_wait);
+
+ break;
+
+ case HOSTIF_GET_FEATURE_REPORT:
+ report_type = HID_FEATURE_REPORT;
+ goto do_get_report;
+
+ case HOSTIF_GET_INPUT_REPORT:
+ report_type = HID_INPUT_REPORT;
+do_get_report:
+ /* Get index of device that matches this id */
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id)
+ if (client_data->hid_sensor_hubs[i]) {
+ hid_input_report(
+ client_data->hid_sensor_hubs[
+ i],
+ report_type, payload,
+ payload_len, 0);
+ ishtp_hid_wakeup(
+ client_data->hid_sensor_hubs[
+ i]);
+ break;
+ }
+ }
+ break;
+
+ case HOSTIF_SET_FEATURE_REPORT:
+ /* Get index of device that matches this id */
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id)
+ if (client_data->hid_sensor_hubs[i]) {
+ ishtp_hid_wakeup(
+ client_data->hid_sensor_hubs[
+ i]);
+ break;
+ }
+ }
+ break;
+
+ case HOSTIF_PUBLISH_INPUT_REPORT:
+ report_type = HID_INPUT_REPORT;
+ for (i = 0; i < client_data->num_hid_devices; ++i)
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id)
+ if (client_data->hid_sensor_hubs[i])
+ hid_input_report(
+ client_data->hid_sensor_hubs[
+ i],
+ report_type, payload,
+ payload_len, 0);
+ break;
+
+ case HOSTIF_PUBLISH_INPUT_REPORT_LIST:
+ report_type = HID_INPUT_REPORT;
+ reports_list = (struct report_list *)payload;
+ reports = (char *)reports_list->reports;
+
+ for (j = 0; j < reports_list->num_of_reports; j++) {
+ recv_msg = (struct hostif_msg *)(reports +
+ sizeof(uint16_t));
+ report_len = *(uint16_t *)reports;
+ payload = reports + sizeof(uint16_t) +
+ sizeof(struct hostif_msg_hdr);
+ payload_len = report_len -
+ sizeof(struct hostif_msg_hdr);
+
+ for (i = 0; i < client_data->num_hid_devices;
+ ++i)
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id &&
+ client_data->hid_sensor_hubs[i]) {
+ hid_input_report(
+ client_data->hid_sensor_hubs[
+ i],
+ report_type,
+ payload, payload_len,
+ 0);
+ }
+
+ reports += sizeof(uint16_t) + report_len;
+ }
+ break;
+ default:
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
+ payload_len);
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+
+ }
+
+ if (!cur_pos && cur_pos + payload_len +
+ sizeof(struct hostif_msg) < total_len)
+ ++client_data->multi_packet_cnt;
+
+ cur_pos += payload_len + sizeof(struct hostif_msg);
+ payload += payload_len + sizeof(struct hostif_msg);
+
+ } while (cur_pos < total_len);
+}
+
+/**
+ * ish_cl_event_cb() - bus driver callback for incoming message/packet
+ * @device: Pointer to the the ishtp client device for which this message
+ * is targeted
+ *
+ * Remove the packet from the list and process the message by calling
+ * process_recv
+ */
+static void ish_cl_event_cb(struct ishtp_cl_device *device)
+{
+ struct ishtp_cl *hid_ishtp_cl = device->driver_data;
+ struct ishtp_cl_rb *rb_in_proc;
+ size_t r_length;
+ unsigned long flags;
+
+ if (!hid_ishtp_cl)
+ return;
+
+ spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags);
+ while (!list_empty(&hid_ishtp_cl->in_process_list.list)) {
+ rb_in_proc = list_entry(
+ hid_ishtp_cl->in_process_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del_init(&rb_in_proc->list);
+ spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock,
+ flags);
+
+ if (!rb_in_proc->buffer.data)
+ return;
+
+ r_length = rb_in_proc->buf_idx;
+
+ /* decide what to do with received data */
+ process_recv(hid_ishtp_cl, rb_in_proc->buffer.data, r_length);
+
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags);
+ }
+ spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock, flags);
+}
+
+/**
+ * hid_ishtp_set_feature() - send request to ISH FW to set a feature request
+ * @hid: hid device instance for this request
+ * @buf: feature buffer
+ * @len: Length of feature buffer
+ * @report_id: Report id for the feature set request
+ *
+ * This is called from hid core .request() callback. This function doesn't wait
+ * for response.
+ */
+void hid_ishtp_set_feature(struct hid_device *hid, char *buf, unsigned int len,
+ int report_id)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ struct ishtp_cl_data *client_data = hid_data->client_data;
+ struct hostif_msg *msg = (struct hostif_msg *)buf;
+ int rv;
+ int i;
+
+ hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
+
+ rv = ishtp_hid_link_ready_wait(client_data);
+ if (rv) {
+ hid_ishtp_trace(client_data, "%s hid %p link not ready\n",
+ __func__, hid);
+ return;
+ }
+
+ memset(msg, 0, sizeof(struct hostif_msg));
+ msg->hdr.command = HOSTIF_SET_FEATURE_REPORT;
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (hid == client_data->hid_sensor_hubs[i]) {
+ msg->hdr.device_id =
+ client_data->hid_devices[i].dev_id;
+ break;
+ }
+ }
+
+ if (i == client_data->num_hid_devices)
+ return;
+
+ rv = ishtp_cl_send(client_data->hid_ishtp_cl, buf, len);
+ if (rv)
+ hid_ishtp_trace(client_data, "%s hid %p send failed\n",
+ __func__, hid);
+}
+
+/**
+ * hid_ishtp_get_report() - request to get feature/input report
+ * @hid: hid device instance for this request
+ * @report_id: Report id for the get request
+ * @report_type: Report type for the this request
+ *
+ * This is called from hid core .request() callback. This function will send
+ * request to FW and return without waiting for response.
+ */
+void hid_ishtp_get_report(struct hid_device *hid, int report_id,
+ int report_type)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ struct ishtp_cl_data *client_data = hid_data->client_data;
+ static unsigned char buf[10];
+ unsigned int len;
+ struct hostif_msg_to_sensor *msg = (struct hostif_msg_to_sensor *)buf;
+ int rv;
+ int i;
+
+ hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
+ rv = ishtp_hid_link_ready_wait(client_data);
+ if (rv) {
+ hid_ishtp_trace(client_data, "%s hid %p link not ready\n",
+ __func__, hid);
+ return;
+ }
+
+ len = sizeof(struct hostif_msg_to_sensor);
+
+ memset(msg, 0, sizeof(struct hostif_msg_to_sensor));
+ msg->hdr.command = (report_type == HID_FEATURE_REPORT) ?
+ HOSTIF_GET_FEATURE_REPORT : HOSTIF_GET_INPUT_REPORT;
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (hid == client_data->hid_sensor_hubs[i]) {
+ msg->hdr.device_id =
+ client_data->hid_devices[i].dev_id;
+ break;
+ }
+ }
+
+ if (i == client_data->num_hid_devices)
+ return;
+
+ msg->report_id = report_id;
+ rv = ishtp_cl_send(client_data->hid_ishtp_cl, buf, len);
+ if (rv)
+ hid_ishtp_trace(client_data, "%s hid %p send failed\n",
+ __func__, hid);
+}
+
+/**
+ * ishtp_hid_link_ready_wait() - Wait for link ready
+ * @client_data: client data instance
+ *
+ * If the transport link started suspend process, then wait, till either
+ * resumed or timeout
+ *
+ * Return: 0 on success, non zero on error
+ */
+int ishtp_hid_link_ready_wait(struct ishtp_cl_data *client_data)
+{
+ int rc;
+
+ if (client_data->suspended) {
+ hid_ishtp_trace(client_data, "wait for link ready\n");
+ rc = wait_event_interruptible_timeout(
+ client_data->ishtp_resume_wait,
+ !client_data->suspended,
+ 5 * HZ);
+
+ if (rc == 0) {
+ hid_ishtp_trace(client_data, "link not ready\n");
+ return -EIO;
+ }
+ hid_ishtp_trace(client_data, "link ready\n");
+ }
+
+ return 0;
+}
+
+/**
+ * ishtp_enum_enum_devices() - Enumerate hid devices
+ * @hid_ishtp_cl: client instance
+ *
+ * Helper function to send request to firmware to enumerate HID devices
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
+{
+ struct hostif_msg msg;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ int retry_count;
+ int rv;
+
+ /* Send HOSTIF_DM_ENUM_DEVICES */
+ memset(&msg, 0, sizeof(struct hostif_msg));
+ msg.hdr.command = HOSTIF_DM_ENUM_DEVICES;
+ rv = ishtp_cl_send(hid_ishtp_cl, (unsigned char *)&msg,
+ sizeof(struct hostif_msg));
+ if (rv)
+ return rv;
+
+ retry_count = 0;
+ while (!client_data->enum_devices_done &&
+ retry_count < 10) {
+ wait_event_interruptible_timeout(client_data->init_wait,
+ client_data->enum_devices_done,
+ 3 * HZ);
+ ++retry_count;
+ if (!client_data->enum_devices_done)
+ /* Send HOSTIF_DM_ENUM_DEVICES */
+ rv = ishtp_cl_send(hid_ishtp_cl,
+ (unsigned char *) &msg,
+ sizeof(struct hostif_msg));
+ }
+ if (!client_data->enum_devices_done) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: timed out waiting for enum_devices\n");
+ return -ETIMEDOUT;
+ }
+ if (!client_data->hid_devices) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: failed to allocate HID dev structures\n");
+ return -ENOMEM;
+ }
+
+ client_data->num_hid_devices = client_data->hid_dev_count;
+ dev_info(&hid_ishtp_cl->device->dev,
+ "[hid-ish]: enum_devices_done OK, num_hid_devices=%d\n",
+ client_data->num_hid_devices);
+
+ return 0;
+}
+
+/**
+ * ishtp_get_hid_descriptor() - Get hid descriptor
+ * @hid_ishtp_cl: client instance
+ * @index: Index into the hid_descr array
+ *
+ * Helper function to send request to firmware get HID descriptor of a device
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int ishtp_get_hid_descriptor(struct ishtp_cl *hid_ishtp_cl, int index)
+{
+ struct hostif_msg msg;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ int rv;
+
+ /* Get HID descriptor */
+ client_data->hid_descr_done = false;
+ memset(&msg, 0, sizeof(struct hostif_msg));
+ msg.hdr.command = HOSTIF_GET_HID_DESCRIPTOR;
+ msg.hdr.device_id = client_data->hid_devices[index].dev_id;
+ rv = ishtp_cl_send(hid_ishtp_cl, (unsigned char *) &msg,
+ sizeof(struct hostif_msg));
+ if (rv)
+ return rv;
+
+ if (!client_data->hid_descr_done) {
+ wait_event_interruptible_timeout(client_data->init_wait,
+ client_data->hid_descr_done,
+ 3 * HZ);
+ if (!client_data->hid_descr_done) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: timed out for hid_descr_done\n");
+ return -EIO;
+ }
+
+ if (!client_data->hid_descr[index]) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: allocation HID desc fail\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ishtp_get_report_descriptor() - Get report descriptor
+ * @hid_ishtp_cl: client instance
+ * @index: Index into the hid_descr array
+ *
+ * Helper function to send request to firmware get HID report descriptor of
+ * a device
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
+ int index)
+{
+ struct hostif_msg msg;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ int rv;
+
+ /* Get report descriptor */
+ client_data->report_descr_done = false;
+ memset(&msg, 0, sizeof(struct hostif_msg));
+ msg.hdr.command = HOSTIF_GET_REPORT_DESCRIPTOR;
+ msg.hdr.device_id = client_data->hid_devices[index].dev_id;
+ rv = ishtp_cl_send(hid_ishtp_cl, (unsigned char *) &msg,
+ sizeof(struct hostif_msg));
+ if (rv)
+ return rv;
+
+ if (!client_data->report_descr_done)
+ wait_event_interruptible_timeout(client_data->init_wait,
+ client_data->report_descr_done,
+ 3 * HZ);
+ if (!client_data->report_descr_done) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: timed out for report descr\n");
+ return -EIO;
+ }
+ if (!client_data->report_descr[index]) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: failed to alloc report descr\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_init() - Init function for ISHTP client
+ * @hid_ishtp_cl: ISHTP client instance
+ * @reset: true if called for init after reset
+ *
+ * This function complete the initializtion of the client. The summary of
+ * processing:
+ * - Send request to enumerate the hid clients
+ * Get the HID descriptor for each enumearated device
+ * Get report description of each device
+ * Register each device wik hid core by calling ishtp_hid_probe
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
+{
+ struct ishtp_device *dev;
+ unsigned long flags;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ int i;
+ int rv;
+
+ dev_dbg(&client_data->cl_device->dev, "%s\n", __func__);
+ hid_ishtp_trace(client_data, "%s reset flag: %d\n", __func__, reset);
+
+ rv = ishtp_cl_link(hid_ishtp_cl, ISHTP_HOST_CLIENT_ID_ANY);
+ if (rv) {
+ dev_err(&client_data->cl_device->dev,
+ "ishtp_cl_link failed\n");
+ return -ENOMEM;
+ }
+
+ client_data->init_done = 0;
+
+ dev = hid_ishtp_cl->dev;
+
+ /* Connect to FW client */
+ hid_ishtp_cl->rx_ring_size = HID_CL_RX_RING_SIZE;
+ hid_ishtp_cl->tx_ring_size = HID_CL_TX_RING_SIZE;
+
+ spin_lock_irqsave(&dev->fw_clients_lock, flags);
+ i = ishtp_fw_cl_by_uuid(dev, &hid_ishtp_guid);
+ if (i < 0) {
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ dev_err(&client_data->cl_device->dev,
+ "ish client uuid not found\n");
+ return i;
+ }
+ hid_ishtp_cl->fw_client_id = dev->fw_clients[i].client_id;
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ hid_ishtp_cl->state = ISHTP_CL_CONNECTING;
+
+ rv = ishtp_cl_connect(hid_ishtp_cl);
+ if (rv) {
+ dev_err(&client_data->cl_device->dev,
+ "client connect fail\n");
+ goto err_cl_unlink;
+ }
+
+ hid_ishtp_trace(client_data, "%s client connected\n", __func__);
+
+ /* Register read callback */
+ ishtp_register_event_cb(hid_ishtp_cl->device, ish_cl_event_cb);
+
+ rv = ishtp_enum_enum_devices(hid_ishtp_cl);
+ if (rv)
+ goto err_cl_disconnect;
+
+ hid_ishtp_trace(client_data, "%s enumerated device count %d\n",
+ __func__, client_data->num_hid_devices);
+
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ client_data->cur_hid_dev = i;
+
+ rv = ishtp_get_hid_descriptor(hid_ishtp_cl, i);
+ if (rv)
+ goto err_cl_disconnect;
+
+ rv = ishtp_get_report_descriptor(hid_ishtp_cl, i);
+ if (rv)
+ goto err_cl_disconnect;
+
+ if (!reset) {
+ rv = ishtp_hid_probe(i, client_data);
+ if (rv) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: HID probe for #%u failed: %d\n",
+ i, rv);
+ goto err_cl_disconnect;
+ }
+ }
+ } /* for() on all hid devices */
+
+ client_data->init_done = 1;
+ client_data->suspended = false;
+ wake_up_interruptible(&client_data->ishtp_resume_wait);
+ hid_ishtp_trace(client_data, "%s successful init\n", __func__);
+ return 0;
+
+err_cl_disconnect:
+ hid_ishtp_cl->state = ISHTP_CL_DISCONNECTING;
+ ishtp_cl_disconnect(hid_ishtp_cl);
+err_cl_unlink:
+ ishtp_cl_unlink(hid_ishtp_cl);
+ return rv;
+}
+
+/**
+ * hid_ishtp_cl_deinit() - Deinit function for ISHTP client
+ * @hid_ishtp_cl: ISHTP client instance
+ *
+ * Unlink and free hid client
+ */
+static void hid_ishtp_cl_deinit(struct ishtp_cl *hid_ishtp_cl)
+{
+ ishtp_cl_unlink(hid_ishtp_cl);
+ ishtp_cl_flush_queues(hid_ishtp_cl);
+
+ /* disband and free all Tx and Rx client-level rings */
+ ishtp_cl_free(hid_ishtp_cl);
+}
+
+static void hid_ishtp_cl_reset_handler(struct work_struct *work)
+{
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *hid_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+ int retry;
+ int rv;
+
+ client_data = container_of(work, struct ishtp_cl_data, work);
+
+ hid_ishtp_cl = client_data->hid_ishtp_cl;
+ cl_device = client_data->cl_device;
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+ dev_dbg(&cl_device->dev, "%s\n", __func__);
+
+ hid_ishtp_cl_deinit(hid_ishtp_cl);
+
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device->ishtp_dev);
+ if (!hid_ishtp_cl)
+ return;
+
+ cl_device->driver_data = hid_ishtp_cl;
+ hid_ishtp_cl->client_data = client_data;
+ client_data->hid_ishtp_cl = hid_ishtp_cl;
+
+ client_data->num_hid_devices = 0;
+
+ for (retry = 0; retry < 3; ++retry) {
+ rv = hid_ishtp_cl_init(hid_ishtp_cl, 1);
+ if (!rv)
+ break;
+ dev_err(&client_data->cl_device->dev, "Retry reset init\n");
+ }
+ if (rv) {
+ dev_err(&client_data->cl_device->dev, "Reset Failed\n");
+ hid_ishtp_trace(client_data, "%s Failed hid_ishtp_cl %p\n",
+ __func__, hid_ishtp_cl);
+ }
+}
+
+/**
+ * hid_ishtp_cl_probe() - ISHTP client driver probe
+ * @cl_device: ISHTP client device instance
+ *
+ * This function gets called on device create on ISHTP bus
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *hid_ishtp_cl;
+ struct ishtp_cl_data *client_data;
+ int rv;
+
+ if (!cl_device)
+ return -ENODEV;
+
+ if (uuid_le_cmp(hid_ishtp_guid,
+ cl_device->fw_client->props.protocol_name) != 0)
+ return -ENODEV;
+
+ client_data = devm_kzalloc(&cl_device->dev, sizeof(*client_data),
+ GFP_KERNEL);
+ if (!client_data)
+ return -ENOMEM;
+
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device->ishtp_dev);
+ if (!hid_ishtp_cl)
+ return -ENOMEM;
+
+ cl_device->driver_data = hid_ishtp_cl;
+ hid_ishtp_cl->client_data = client_data;
+ client_data->hid_ishtp_cl = hid_ishtp_cl;
+ client_data->cl_device = cl_device;
+
+ init_waitqueue_head(&client_data->init_wait);
+ init_waitqueue_head(&client_data->ishtp_resume_wait);
+
+ INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
+
+ rv = hid_ishtp_cl_init(hid_ishtp_cl, 0);
+ if (rv) {
+ ishtp_cl_free(hid_ishtp_cl);
+ return rv;
+ }
+ ishtp_get_device(cl_device);
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_remove() - ISHTP client driver remove
+ * @cl_device: ISHTP client device instance
+ *
+ * This function gets called on device remove on ISHTP bus
+ *
+ * Return: 0
+ */
+static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+
+ dev_dbg(&cl_device->dev, "%s\n", __func__);
+ hid_ishtp_cl->state = ISHTP_CL_DISCONNECTING;
+ ishtp_cl_disconnect(hid_ishtp_cl);
+ ishtp_put_device(cl_device);
+ ishtp_hid_remove(client_data);
+ hid_ishtp_cl_deinit(hid_ishtp_cl);
+
+ hid_ishtp_cl = NULL;
+
+ client_data->num_hid_devices = 0;
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_reset() - ISHTP client driver reset
+ * @cl_device: ISHTP client device instance
+ *
+ * This function gets called on device reset on ISHTP bus
+ *
+ * Return: 0
+ */
+static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+
+ schedule_work(&client_data->work);
+
+ return 0;
+}
+
+#define to_ishtp_cl_device(d) container_of(d, struct ishtp_cl_device, dev)
+
+/**
+ * hid_ishtp_cl_suspend() - ISHTP client driver suspend
+ * @device: device instance
+ *
+ * This function gets called on system suspend
+ *
+ * Return: 0
+ */
+static int hid_ishtp_cl_suspend(struct device *device)
+{
+ struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
+ struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+ client_data->suspended = true;
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_resume() - ISHTP client driver resume
+ * @device: device instance
+ *
+ * This function gets called on system resume
+ *
+ * Return: 0
+ */
+static int hid_ishtp_cl_resume(struct device *device)
+{
+ struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
+ struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+ client_data->suspended = false;
+ return 0;
+}
+
+static const struct dev_pm_ops hid_ishtp_pm_ops = {
+ .suspend = hid_ishtp_cl_suspend,
+ .resume = hid_ishtp_cl_resume,
+};
+
+static struct ishtp_cl_driver hid_ishtp_cl_driver = {
+ .name = "ish-hid",
+ .probe = hid_ishtp_cl_probe,
+ .remove = hid_ishtp_cl_remove,
+ .reset = hid_ishtp_cl_reset,
+ .driver.pm = &hid_ishtp_pm_ops,
+};
+
+static int __init ish_hid_init(void)
+{
+ int rv;
+
+ /* Register ISHTP client device driver with ISHTP Bus */
+ rv = ishtp_cl_driver_register(&hid_ishtp_cl_driver);
+
+ return rv;
+
+}
+
+static void __exit ish_hid_exit(void)
+{
+ ishtp_cl_driver_unregister(&hid_ishtp_cl_driver);
+}
+
+late_initcall(ish_hid_init);
+module_exit(ish_hid_exit);
+
+MODULE_DESCRIPTION("ISH ISHTP HID client driver");
+/* Primary author */
+MODULE_AUTHOR("Daniel Drubin <daniel.drubin@intel.com>");
+/*
+ * Several modification for multi instance support
+ * suspend/resume and clean up
+ */
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ishtp:*");
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
new file mode 100644
index 000000000000..277983aa1d90
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -0,0 +1,246 @@
+/*
+ * ISHTP-HID glue driver.
+ *
+ * Copyright (c) 2012-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/hid.h>
+#include <uapi/linux/input.h>
+#include "ishtp/client.h"
+#include "ishtp-hid.h"
+
+/**
+ * ishtp_hid_parse() - hid-core .parse() callback
+ * @hid: hid device instance
+ *
+ * This function gets called during call to hid_add_device
+ *
+ * Return: 0 on success and non zero on error
+ */
+static int ishtp_hid_parse(struct hid_device *hid)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ struct ishtp_cl_data *client_data = hid_data->client_data;
+ int rv;
+
+ rv = hid_parse_report(hid, client_data->report_descr[hid_data->index],
+ client_data->report_descr_size[hid_data->index]);
+ if (rv)
+ return rv;
+
+ return 0;
+}
+
+/* Empty callbacks with success return code */
+static int ishtp_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void ishtp_hid_stop(struct hid_device *hid)
+{
+}
+
+static int ishtp_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void ishtp_hid_close(struct hid_device *hid)
+{
+}
+
+static int ishtp_raw_request(struct hid_device *hdev, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype, int reqtype)
+{
+ return 0;
+}
+
+/**
+ * ishtp_hid_request() - hid-core .request() callback
+ * @hid: hid device instance
+ * @rep: pointer to hid_report
+ * @reqtype: type of req. [GET|SET]_REPORT
+ *
+ * This function is used to set/get feaure/input report.
+ */
+static void ishtp_hid_request(struct hid_device *hid, struct hid_report *rep,
+ int reqtype)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ /* the specific report length, just HID part of it */
+ unsigned int len = ((rep->size - 1) >> 3) + 1 + (rep->id > 0);
+ char *buf;
+ unsigned int header_size = sizeof(struct hostif_msg);
+
+ len += header_size;
+
+ hid_data->request_done = false;
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ hid_ishtp_get_report(hid, rep->id, rep->type);
+ break;
+ case HID_REQ_SET_REPORT:
+ /*
+ * Spare 7 bytes for 64b accesses through
+ * get/put_unaligned_le64()
+ */
+ buf = kzalloc(len + 7, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ hid_output_report(rep, buf + header_size);
+ hid_ishtp_set_feature(hid, buf, len, rep->id);
+ kfree(buf);
+ break;
+ }
+}
+
+/**
+ * ishtp_wait_for_response() - hid-core .wait() callback
+ * @hid: hid device instance
+ *
+ * This function is used to wait after get feaure/input report.
+ *
+ * Return: 0 on success and non zero on error
+ */
+static int ishtp_wait_for_response(struct hid_device *hid)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ struct ishtp_cl_data *client_data = hid_data->client_data;
+ int rv;
+
+ hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
+
+ rv = ishtp_hid_link_ready_wait(hid_data->client_data);
+ if (rv)
+ return rv;
+
+ if (!hid_data->request_done)
+ wait_event_interruptible_timeout(hid_data->hid_wait,
+ hid_data->request_done, 3 * HZ);
+
+ if (!hid_data->request_done) {
+ hid_err(hid,
+ "timeout waiting for response from ISHTP device\n");
+ return -ETIMEDOUT;
+ }
+ hid_ishtp_trace(client_data, "%s hid %p done\n", __func__, hid);
+
+ hid_data->request_done = false;
+
+ return 0;
+}
+
+/**
+ * ishtp_hid_wakeup() - Wakeup caller
+ * @hid: hid device instance
+ *
+ * This function will wakeup caller waiting for Get/Set feature report
+ */
+void ishtp_hid_wakeup(struct hid_device *hid)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+
+ hid_data->request_done = true;
+ wake_up_interruptible(&hid_data->hid_wait);
+}
+
+static struct hid_ll_driver ishtp_hid_ll_driver = {
+ .parse = ishtp_hid_parse,
+ .start = ishtp_hid_start,
+ .stop = ishtp_hid_stop,
+ .open = ishtp_hid_open,
+ .close = ishtp_hid_close,
+ .request = ishtp_hid_request,
+ .wait = ishtp_wait_for_response,
+ .raw_request = ishtp_raw_request
+};
+
+/**
+ * ishtp_hid_probe() - hid register ll driver
+ * @cur_hid_dev: Index of hid device calling to register
+ * @client_data: Client data pointer
+ *
+ * This function is used to allocate and add HID device.
+ *
+ * Return: 0 on success, non zero on error
+ */
+int ishtp_hid_probe(unsigned int cur_hid_dev,
+ struct ishtp_cl_data *client_data)
+{
+ int rv;
+ struct hid_device *hid;
+ struct ishtp_hid_data *hid_data;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid)) {
+ rv = PTR_ERR(hid);
+ return -ENOMEM;
+ }
+
+ hid_data = kzalloc(sizeof(*hid_data), GFP_KERNEL);
+ if (!hid_data) {
+ rv = -ENOMEM;
+ goto err_hid_data;
+ }
+
+ hid_data->index = cur_hid_dev;
+ hid_data->client_data = client_data;
+ init_waitqueue_head(&hid_data->hid_wait);
+
+ hid->driver_data = hid_data;
+
+ client_data->hid_sensor_hubs[cur_hid_dev] = hid;
+
+ hid->ll_driver = &ishtp_hid_ll_driver;
+ hid->bus = BUS_INTEL_ISHTP;
+ hid->dev.parent = &client_data->cl_device->dev;
+ hid->version = le16_to_cpu(ISH_HID_VERSION);
+ hid->vendor = le16_to_cpu(ISH_HID_VENDOR);
+ hid->product = le16_to_cpu(ISH_HID_PRODUCT);
+ snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX", "hid-ishtp",
+ hid->vendor, hid->product);
+
+ rv = hid_add_device(hid);
+ if (rv)
+ goto err_hid_device;
+
+ hid_ishtp_trace(client_data, "%s allocated hid %p\n", __func__, hid);
+
+ return 0;
+
+err_hid_device:
+ kfree(hid_data);
+err_hid_data:
+ kfree(hid);
+ return rv;
+}
+
+/**
+ * ishtp_hid_probe() - Remove registered hid device
+ * @client_data: client data pointer
+ *
+ * This function is used to destroy allocatd HID device.
+ */
+void ishtp_hid_remove(struct ishtp_cl_data *client_data)
+{
+ int i;
+
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (client_data->hid_sensor_hubs[i]) {
+ kfree(client_data->hid_sensor_hubs[i]->driver_data);
+ hid_destroy_device(client_data->hid_sensor_hubs[i]);
+ client_data->hid_sensor_hubs[i] = NULL;
+ }
+ }
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
new file mode 100644
index 000000000000..f5c7eb79b7b5
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -0,0 +1,182 @@
+/*
+ * ISHTP-HID glue driver's definitions.
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef ISHTP_HID__H
+#define ISHTP_HID__H
+
+/* The fixed ISH product and vendor id */
+#define ISH_HID_VENDOR 0x8086
+#define ISH_HID_PRODUCT 0x22D8
+#define ISH_HID_VERSION 0x0200
+
+#define CMD_MASK 0x7F
+#define IS_RESPONSE 0x80
+
+/* Used to dump to Linux trace buffer, if enabled */
+#define hid_ishtp_trace(client, ...) \
+ client->cl_device->ishtp_dev->print_log(\
+ client->cl_device->ishtp_dev, __VA_ARGS__)
+
+/* ISH Transport protocol (ISHTP in short) GUID */
+static const uuid_le hid_ishtp_guid = UUID_LE(0x33AECD58, 0xB679, 0x4E54,
+ 0x9B, 0xD9, 0xA0, 0x4D, 0x34,
+ 0xF0, 0xC2, 0x26);
+
+/* ISH HID message structure */
+struct hostif_msg_hdr {
+ uint8_t command; /* Bit 7: is_response */
+ uint8_t device_id;
+ uint8_t status;
+ uint8_t flags;
+ uint16_t size;
+} __packed;
+
+struct hostif_msg {
+ struct hostif_msg_hdr hdr;
+} __packed;
+
+struct hostif_msg_to_sensor {
+ struct hostif_msg_hdr hdr;
+ uint8_t report_id;
+} __packed;
+
+struct device_info {
+ uint32_t dev_id;
+ uint8_t dev_class;
+ uint16_t pid;
+ uint16_t vid;
+} __packed;
+
+struct ishtp_version {
+ uint8_t major;
+ uint8_t minor;
+ uint8_t hotfix;
+ uint16_t build;
+} __packed;
+
+/* struct for ISHTP aggregated input data */
+struct report_list {
+ uint16_t total_size;
+ uint8_t num_of_reports;
+ uint8_t flags;
+ struct {
+ uint16_t size_of_report;
+ uint8_t report[1];
+ } __packed reports[1];
+} __packed;
+
+/* HOSTIF commands */
+#define HOSTIF_HID_COMMAND_BASE 0
+#define HOSTIF_GET_HID_DESCRIPTOR 0
+#define HOSTIF_GET_REPORT_DESCRIPTOR 1
+#define HOSTIF_GET_FEATURE_REPORT 2
+#define HOSTIF_SET_FEATURE_REPORT 3
+#define HOSTIF_GET_INPUT_REPORT 4
+#define HOSTIF_PUBLISH_INPUT_REPORT 5
+#define HOSTIF_PUBLISH_INPUT_REPORT_LIST 6
+#define HOSTIF_DM_COMMAND_BASE 32
+#define HOSTIF_DM_ENUM_DEVICES 33
+#define HOSTIF_DM_ADD_DEVICE 34
+
+#define MAX_HID_DEVICES 32
+
+/**
+ * struct ishtp_cl_data - Encapsulate per ISH TP HID Client
+ * @enum_device_done: Enum devices response complete flag
+ * @hid_descr_done: HID descriptor complete flag
+ * @report_descr_done: Get report descriptor complete flag
+ * @init_done: Init process completed successfully
+ * @suspended: System is under suspend state or in progress
+ * @num_hid_devices: Number of HID devices enumerated in this client
+ * @cur_hid_dev: This keeps track of the device index for which
+ * initialization and registration with HID core
+ * in progress.
+ * @hid_devices: Store vid/pid/devid for each enumerated HID device
+ * @report_descr: Stores the raw report descriptors for each HID device
+ * @report_descr_size: Report description of size of above repo_descr[]
+ * @hid_sensor_hubs: Pointer to hid_device for all HID device, so that
+ * when clients are removed, they can be freed
+ * @hid_descr: Pointer to hid descriptor for each enumerated hid
+ * device
+ * @hid_descr_size: Size of each above report descriptor
+ * @init_wait: Wait queue to wait during initialization, where the
+ * client send message to ISH FW and wait for response
+ * @ishtp_hid_wait: The wait for get report during wait callback from hid
+ * core
+ * @bad_recv_cnt: Running count of packets received with error
+ * @multi_packet_cnt: Count of fragmented packet count
+ *
+ * This structure is used to store completion flags and per client data like
+ * like report description, number of HID devices etc.
+ */
+struct ishtp_cl_data {
+ /* completion flags */
+ bool enum_devices_done;
+ bool hid_descr_done;
+ bool report_descr_done;
+ bool init_done;
+ bool suspended;
+
+ unsigned int num_hid_devices;
+ unsigned int cur_hid_dev;
+ unsigned int hid_dev_count;
+
+ struct device_info *hid_devices;
+ unsigned char *report_descr[MAX_HID_DEVICES];
+ int report_descr_size[MAX_HID_DEVICES];
+ struct hid_device *hid_sensor_hubs[MAX_HID_DEVICES];
+ unsigned char *hid_descr[MAX_HID_DEVICES];
+ int hid_descr_size[MAX_HID_DEVICES];
+
+ wait_queue_head_t init_wait;
+ wait_queue_head_t ishtp_resume_wait;
+ struct ishtp_cl *hid_ishtp_cl;
+
+ /* Statistics */
+ unsigned int bad_recv_cnt;
+ int multi_packet_cnt;
+
+ struct work_struct work;
+ struct ishtp_cl_device *cl_device;
+};
+
+/**
+ * struct ishtp_hid_data - Per instance HID data
+ * @index: Device index in the order of enumeration
+ * @request_done: Get Feature/Input report complete flag
+ * used during get/set request from hid core
+ * @client_data: Link to the client instance
+ * @hid_wait: Completion waitq
+ *
+ * Used to tie hid hid->driver data to driver client instance
+ */
+struct ishtp_hid_data {
+ int index;
+ bool request_done;
+ struct ishtp_cl_data *client_data;
+ wait_queue_head_t hid_wait;
+};
+
+/* Interface functions between HID LL driver and ISH TP client */
+void hid_ishtp_set_feature(struct hid_device *hid, char *buf, unsigned int len,
+ int report_id);
+void hid_ishtp_get_report(struct hid_device *hid, int report_id,
+ int report_type);
+int ishtp_hid_probe(unsigned int cur_hid_dev,
+ struct ishtp_cl_data *client_data);
+void ishtp_hid_remove(struct ishtp_cl_data *client_data);
+int ishtp_hid_link_ready_wait(struct ishtp_cl_data *client_data);
+void ishtp_hid_wakeup(struct hid_device *hid);
+
+#endif /* ISHTP_HID__H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
new file mode 100644
index 000000000000..256521509d20
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -0,0 +1,788 @@
+/*
+ * ISHTP bus driver
+ *
+ * Copyright (c) 2012-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "bus.h"
+#include "ishtp-dev.h"
+#include "client.h"
+#include "hbm.h"
+
+static int ishtp_use_dma;
+module_param_named(ishtp_use_dma, ishtp_use_dma, int, 0600);
+MODULE_PARM_DESC(ishtp_use_dma, "Use DMA to send messages");
+
+#define to_ishtp_cl_driver(d) container_of(d, struct ishtp_cl_driver, driver)
+#define to_ishtp_cl_device(d) container_of(d, struct ishtp_cl_device, dev)
+static bool ishtp_device_ready;
+
+/**
+ * ishtp_recv() - process ishtp message
+ * @dev: ishtp device
+ *
+ * If a message with valid header and size is received, then
+ * this function calls appropriate handler. The host or firmware
+ * address is zero, then they are host bus management message,
+ * otherwise they are message fo clients.
+ */
+void ishtp_recv(struct ishtp_device *dev)
+{
+ uint32_t msg_hdr;
+ struct ishtp_msg_hdr *ishtp_hdr;
+
+ /* Read ISHTP header dword */
+ msg_hdr = dev->ops->ishtp_read_hdr(dev);
+ if (!msg_hdr)
+ return;
+
+ dev->ops->sync_fw_clock(dev);
+
+ ishtp_hdr = (struct ishtp_msg_hdr *)&msg_hdr;
+ dev->ishtp_msg_hdr = msg_hdr;
+
+ /* Sanity check: ISHTP frag. length in header */
+ if (ishtp_hdr->length > dev->mtu) {
+ dev_err(dev->devc,
+ "ISHTP hdr - bad length: %u; dropped [%08X]\n",
+ (unsigned int)ishtp_hdr->length, msg_hdr);
+ return;
+ }
+
+ /* ISHTP bus message */
+ if (!ishtp_hdr->host_addr && !ishtp_hdr->fw_addr)
+ recv_hbm(dev, ishtp_hdr);
+ /* ISHTP fixed-client message */
+ else if (!ishtp_hdr->host_addr)
+ recv_fixed_cl_msg(dev, ishtp_hdr);
+ else
+ /* ISHTP client message */
+ recv_ishtp_cl_msg(dev, ishtp_hdr);
+}
+EXPORT_SYMBOL(ishtp_recv);
+
+/**
+ * ishtp_send_msg() - Send ishtp message
+ * @dev: ishtp device
+ * @hdr: Message header
+ * @msg: Message contents
+ * @ipc_send_compl: completion callback
+ * @ipc_send_compl_prm: completion callback parameter
+ *
+ * Send a multi fragment message via IPC. After sending the first fragment
+ * the completion callback is called to schedule transmit of next fragment.
+ *
+ * Return: This returns IPC send message status.
+ */
+int ishtp_send_msg(struct ishtp_device *dev, struct ishtp_msg_hdr *hdr,
+ void *msg, void(*ipc_send_compl)(void *),
+ void *ipc_send_compl_prm)
+{
+ unsigned char ipc_msg[IPC_FULL_MSG_SIZE];
+ uint32_t drbl_val;
+
+ drbl_val = dev->ops->ipc_get_header(dev, hdr->length +
+ sizeof(struct ishtp_msg_hdr),
+ 1);
+
+ memcpy(ipc_msg, &drbl_val, sizeof(uint32_t));
+ memcpy(ipc_msg + sizeof(uint32_t), hdr, sizeof(uint32_t));
+ memcpy(ipc_msg + 2 * sizeof(uint32_t), msg, hdr->length);
+ return dev->ops->write(dev, ipc_send_compl, ipc_send_compl_prm,
+ ipc_msg, 2 * sizeof(uint32_t) + hdr->length);
+}
+
+/**
+ * ishtp_write_message() - Send ishtp single fragment message
+ * @dev: ishtp device
+ * @hdr: Message header
+ * @buf: message data
+ *
+ * Send a single fragment message via IPC. This returns IPC send message
+ * status.
+ *
+ * Return: This returns IPC send message status.
+ */
+int ishtp_write_message(struct ishtp_device *dev, struct ishtp_msg_hdr *hdr,
+ unsigned char *buf)
+{
+ return ishtp_send_msg(dev, hdr, buf, NULL, NULL);
+}
+
+/**
+ * ishtp_fw_cl_by_uuid() - locate index of fw client
+ * @dev: ishtp device
+ * @uuid: uuid of the client to search
+ *
+ * Search firmware client using UUID.
+ *
+ * Return: fw client index or -ENOENT if not found
+ */
+int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *uuid)
+{
+ int i, res = -ENOENT;
+
+ for (i = 0; i < dev->fw_clients_num; ++i) {
+ if (uuid_le_cmp(*uuid, dev->fw_clients[i].props.protocol_name)
+ == 0) {
+ res = i;
+ break;
+ }
+ }
+ return res;
+}
+EXPORT_SYMBOL(ishtp_fw_cl_by_uuid);
+
+/**
+ * ishtp_fw_cl_by_id() - return index to fw_clients for client_id
+ * @dev: the ishtp device structure
+ * @client_id: fw client id to search
+ *
+ * Search firmware client using client id.
+ *
+ * Return: index on success, -ENOENT on failure.
+ */
+int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id)
+{
+ int i, res = -ENOENT;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->fw_clients_lock, flags);
+ for (i = 0; i < dev->fw_clients_num; i++) {
+ if (dev->fw_clients[i].client_id == client_id) {
+ res = i;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+
+ return res;
+}
+
+/**
+ * ishtp_cl_device_probe() - Bus probe() callback
+ * @dev: the device structure
+ *
+ * This is a bus probe callback and calls the drive probe function.
+ *
+ * Return: Return value from driver probe() call.
+ */
+static int ishtp_cl_device_probe(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+
+ if (!device)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (!driver || !driver->probe)
+ return -ENODEV;
+
+ return driver->probe(device);
+}
+
+/**
+ * ishtp_cl_device_remove() - Bus remove() callback
+ * @dev: the device structure
+ *
+ * This is a bus remove callback and calls the drive remove function.
+ * Since the ISH driver model supports only built in, this is
+ * primarily can be called during pci driver init failure.
+ *
+ * Return: Return value from driver remove() call.
+ */
+static int ishtp_cl_device_remove(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+
+ if (!device || !dev->driver)
+ return 0;
+
+ if (device->event_cb) {
+ device->event_cb = NULL;
+ cancel_work_sync(&device->event_work);
+ }
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (!driver->remove) {
+ dev->driver = NULL;
+
+ return 0;
+ }
+
+ return driver->remove(device);
+}
+
+/**
+ * ishtp_cl_device_suspend() - Bus suspend callback
+ * @dev: device
+ *
+ * Called during device suspend process.
+ *
+ * Return: Return value from driver suspend() call.
+ */
+static int ishtp_cl_device_suspend(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ if (!device)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (driver && driver->driver.pm) {
+ if (driver->driver.pm->suspend)
+ ret = driver->driver.pm->suspend(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * ishtp_cl_device_resume() - Bus resume callback
+ * @dev: device
+ *
+ * Called during device resume process.
+ *
+ * Return: Return value from driver resume() call.
+ */
+static int ishtp_cl_device_resume(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ if (!device)
+ return 0;
+
+ /*
+ * When ISH needs hard reset, it is done asynchrnously, hence bus
+ * resume will be called before full ISH resume
+ */
+ if (device->ishtp_dev->resume_flag)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (driver && driver->driver.pm) {
+ if (driver->driver.pm->resume)
+ ret = driver->driver.pm->resume(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * ishtp_cl_device_reset() - Reset callback
+ * @device: ishtp client device instance
+ *
+ * This is a callback when HW reset is done and the device need
+ * reinit.
+ *
+ * Return: Return value from driver reset() call.
+ */
+static int ishtp_cl_device_reset(struct ishtp_cl_device *device)
+{
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ device->event_cb = NULL;
+ cancel_work_sync(&device->event_work);
+
+ driver = to_ishtp_cl_driver(device->dev.driver);
+ if (driver && driver->reset)
+ ret = driver->reset(device);
+
+ return ret;
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, "ishtp:%s\n", dev_name(dev));
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+
+static struct device_attribute ishtp_cl_dev_attrs[] = {
+ __ATTR_RO(modalias),
+ __ATTR_NULL,
+};
+
+static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ if (add_uevent_var(env, "MODALIAS=ishtp:%s", dev_name(dev)))
+ return -ENOMEM;
+ return 0;
+}
+
+static const struct dev_pm_ops ishtp_cl_bus_dev_pm_ops = {
+ /* Suspend callbacks */
+ .suspend = ishtp_cl_device_suspend,
+ .resume = ishtp_cl_device_resume,
+ /* Hibernate callbacks */
+ .freeze = ishtp_cl_device_suspend,
+ .thaw = ishtp_cl_device_resume,
+ .restore = ishtp_cl_device_resume,
+};
+
+static struct bus_type ishtp_cl_bus_type = {
+ .name = "ishtp",
+ .dev_attrs = ishtp_cl_dev_attrs,
+ .probe = ishtp_cl_device_probe,
+ .remove = ishtp_cl_device_remove,
+ .pm = &ishtp_cl_bus_dev_pm_ops,
+ .uevent = ishtp_cl_uevent,
+};
+
+static void ishtp_cl_dev_release(struct device *dev)
+{
+ kfree(to_ishtp_cl_device(dev));
+}
+
+static struct device_type ishtp_cl_device_type = {
+ .release = ishtp_cl_dev_release,
+};
+
+/**
+ * ishtp_bus_add_device() - Function to create device on bus
+ * @dev: ishtp device
+ * @uuid: uuid of the client
+ * @name: Name of the client
+ *
+ * Allocate ISHTP bus client device, attach it to uuid
+ * and register with ISHTP bus.
+ *
+ * Return: ishtp_cl_device pointer or NULL on failure
+ */
+static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
+ uuid_le uuid, char *name)
+{
+ struct ishtp_cl_device *device;
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_for_each_entry(device, &dev->device_list, device_link) {
+ if (!strcmp(name, dev_name(&device->dev))) {
+ device->fw_client = &dev->fw_clients[
+ dev->fw_client_presentation_num - 1];
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+ ishtp_cl_device_reset(device);
+ return device;
+ }
+ }
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+
+ device = kzalloc(sizeof(struct ishtp_cl_device), GFP_KERNEL);
+ if (!device)
+ return NULL;
+
+ device->dev.parent = dev->devc;
+ device->dev.bus = &ishtp_cl_bus_type;
+ device->dev.type = &ishtp_cl_device_type;
+ device->ishtp_dev = dev;
+
+ device->fw_client =
+ &dev->fw_clients[dev->fw_client_presentation_num - 1];
+
+ dev_set_name(&device->dev, "%s", name);
+
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_add_tail(&device->device_link, &dev->device_list);
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+
+ status = device_register(&device->dev);
+ if (status) {
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_del(&device->device_link);
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+ dev_err(dev->devc, "Failed to register ISHTP client device\n");
+ kfree(device);
+ return NULL;
+ }
+
+ ishtp_device_ready = true;
+
+ return device;
+}
+
+/**
+ * ishtp_bus_remove_device() - Function to relase device on bus
+ * @device: client device instance
+ *
+ * This is a counterpart of ishtp_bus_add_device.
+ * Device is unregistered.
+ * the device structure is freed in 'ishtp_cl_dev_release' function
+ * Called only during error in pci driver init path.
+ */
+static void ishtp_bus_remove_device(struct ishtp_cl_device *device)
+{
+ device_unregister(&device->dev);
+}
+
+/**
+ * __ishtp_cl_driver_register() - Client driver register
+ * @driver: the client driver instance
+ * @owner: Owner of this driver module
+ *
+ * Once a client driver is probed, it created a client
+ * instance and registers with the bus.
+ *
+ * Return: Return value of driver_register or -ENODEV if not ready
+ */
+int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner)
+{
+ int err;
+
+ if (!ishtp_device_ready)
+ return -ENODEV;
+
+ driver->driver.name = driver->name;
+ driver->driver.owner = owner;
+ driver->driver.bus = &ishtp_cl_bus_type;
+
+ err = driver_register(&driver->driver);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(__ishtp_cl_driver_register);
+
+/**
+ * ishtp_cl_driver_unregister() - Client driver unregister
+ * @driver: the client driver instance
+ *
+ * Unregister client during device removal process.
+ */
+void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL(ishtp_cl_driver_unregister);
+
+/**
+ * ishtp_bus_event_work() - event work function
+ * @work: work struct pointer
+ *
+ * Once an event is received for a client this work
+ * function is called. If the device has registered a
+ * callback then the callback is called.
+ */
+static void ishtp_bus_event_work(struct work_struct *work)
+{
+ struct ishtp_cl_device *device;
+
+ device = container_of(work, struct ishtp_cl_device, event_work);
+
+ if (device->event_cb)
+ device->event_cb(device);
+}
+
+/**
+ * ishtp_cl_bus_rx_event() - schedule event work
+ * @device: client device instance
+ *
+ * Once an event is received for a client this schedules
+ * a work function to process.
+ */
+void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device)
+{
+ if (!device || !device->event_cb)
+ return;
+
+ if (device->event_cb)
+ schedule_work(&device->event_work);
+}
+
+/**
+ * ishtp_register_event_cb() - Register callback
+ * @device: client device instance
+ * @event_cb: Event processor for an client
+ *
+ * Register a callback for events, called from client driver
+ *
+ * Return: Return 0 or -EALREADY if already registered
+ */
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*event_cb)(struct ishtp_cl_device *))
+{
+ if (device->event_cb)
+ return -EALREADY;
+
+ device->event_cb = event_cb;
+ INIT_WORK(&device->event_work, ishtp_bus_event_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_register_event_cb);
+
+/**
+ * ishtp_get_device() - update usage count for the device
+ * @cl_device: client device instance
+ *
+ * Increment the usage count. The device can't be deleted
+ */
+void ishtp_get_device(struct ishtp_cl_device *cl_device)
+{
+ cl_device->reference_count++;
+}
+EXPORT_SYMBOL(ishtp_get_device);
+
+/**
+ * ishtp_put_device() - decrement usage count for the device
+ * @cl_device: client device instance
+ *
+ * Decrement the usage count. The device can be deleted is count = 0
+ */
+void ishtp_put_device(struct ishtp_cl_device *cl_device)
+{
+ cl_device->reference_count--;
+}
+EXPORT_SYMBOL(ishtp_put_device);
+
+/**
+ * ishtp_bus_new_client() - Create a new client
+ * @dev: ISHTP device instance
+ *
+ * Once bus protocol enumerates a client, this is called
+ * to add a device for the client.
+ *
+ * Return: 0 on success or error code on failure
+ */
+int ishtp_bus_new_client(struct ishtp_device *dev)
+{
+ int i;
+ char *dev_name;
+ struct ishtp_cl_device *cl_device;
+ uuid_le device_uuid;
+
+ /*
+ * For all reported clients, create an unconnected client and add its
+ * device to ISHTP bus.
+ * If appropriate driver has loaded, this will trigger its probe().
+ * Otherwise, probe() will be called when driver is loaded
+ */
+ i = dev->fw_client_presentation_num - 1;
+ device_uuid = dev->fw_clients[i].props.protocol_name;
+ dev_name = kasprintf(GFP_KERNEL,
+ "{%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X}",
+ device_uuid.b[3], device_uuid.b[2], device_uuid.b[1],
+ device_uuid.b[0], device_uuid.b[5], device_uuid.b[4],
+ device_uuid.b[7], device_uuid.b[6], device_uuid.b[8],
+ device_uuid.b[9], device_uuid.b[10], device_uuid.b[11],
+ device_uuid.b[12], device_uuid.b[13], device_uuid.b[14],
+ device_uuid.b[15]);
+ if (!dev_name)
+ return -ENOMEM;
+
+ cl_device = ishtp_bus_add_device(dev, device_uuid, dev_name);
+ if (!cl_device) {
+ kfree(dev_name);
+ return -ENOENT;
+ }
+
+ kfree(dev_name);
+
+ return 0;
+}
+
+/**
+ * ishtp_cl_device_bind() - bind a device
+ * @cl: ishtp client device
+ *
+ * Binds connected ishtp_cl to ISHTP bus device
+ *
+ * Return: 0 on success or fault code
+ */
+int ishtp_cl_device_bind(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_device *cl_device;
+ unsigned long flags;
+ int rv;
+
+ if (!cl->fw_client_id || cl->state != ISHTP_CL_CONNECTED)
+ return -EFAULT;
+
+ rv = -ENOENT;
+ spin_lock_irqsave(&cl->dev->device_list_lock, flags);
+ list_for_each_entry(cl_device, &cl->dev->device_list,
+ device_link) {
+ if (cl_device->fw_client->client_id == cl->fw_client_id) {
+ cl->device = cl_device;
+ rv = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cl->dev->device_list_lock, flags);
+ return rv;
+}
+
+/**
+ * ishtp_bus_remove_all_clients() - Remove all clients
+ * @ishtp_dev: ishtp device
+ * @warm_reset: Reset due to FW reset dure to errors or S3 suspend
+ *
+ * This is part of reset/remove flow. This function the main processing
+ * only targets error processing, if the FW has forced reset or
+ * error to remove connected clients. When warm reset the client devices are
+ * not removed.
+ */
+void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
+ bool warm_reset)
+{
+ struct ishtp_cl_device *cl_device, *n;
+ struct ishtp_cl *cl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &ishtp_dev->cl_list, link) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+
+ /*
+ * Wake any pending process. The waiter would check dev->state
+ * and determine that it's not enabled already,
+ * and will return error to its caller
+ */
+ wake_up_interruptible(&cl->wait_ctrl_res);
+
+ /* Disband any pending read/write requests and free rb */
+ ishtp_cl_flush_queues(cl);
+
+ /* Remove all free and in_process rings, both Rx and Tx */
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_free_tx_ring(cl);
+
+ /*
+ * Free client and ISHTP bus client device structures
+ * don't free host client because it is part of the OS fd
+ * structure
+ */
+ }
+ spin_unlock_irqrestore(&ishtp_dev->cl_list_lock, flags);
+
+ /* Release DMA buffers for client messages */
+ ishtp_cl_free_dma_buf(ishtp_dev);
+
+ /* remove bus clients */
+ spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
+ list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list,
+ device_link) {
+ if (warm_reset && cl_device->reference_count)
+ continue;
+
+ list_del(&cl_device->device_link);
+ spin_unlock_irqrestore(&ishtp_dev->device_list_lock, flags);
+ ishtp_bus_remove_device(cl_device);
+ spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&ishtp_dev->device_list_lock, flags);
+
+ /* Free all client structures */
+ spin_lock_irqsave(&ishtp_dev->fw_clients_lock, flags);
+ kfree(ishtp_dev->fw_clients);
+ ishtp_dev->fw_clients = NULL;
+ ishtp_dev->fw_clients_num = 0;
+ ishtp_dev->fw_client_presentation_num = 0;
+ ishtp_dev->fw_client_index = 0;
+ bitmap_zero(ishtp_dev->fw_clients_map, ISHTP_CLIENTS_MAX);
+ spin_unlock_irqrestore(&ishtp_dev->fw_clients_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_bus_remove_all_clients);
+
+/**
+ * ishtp_reset_handler() - IPC reset handler
+ * @dev: ishtp device
+ *
+ * ISHTP Handler for IPC_RESET notification
+ */
+void ishtp_reset_handler(struct ishtp_device *dev)
+{
+ unsigned long flags;
+
+ /* Handle FW-initiated reset */
+ dev->dev_state = ISHTP_DEV_RESETTING;
+
+ /* Clear BH processing queue - no further HBMs */
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ dev->rd_msg_fifo_head = dev->rd_msg_fifo_tail = 0;
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+
+ /* Handle ISH FW reset against upper layers */
+ ishtp_bus_remove_all_clients(dev, true);
+}
+EXPORT_SYMBOL(ishtp_reset_handler);
+
+/**
+ * ishtp_reset_compl_handler() - Reset completion handler
+ * @dev: ishtp device
+ *
+ * ISHTP handler for IPC_RESET sequence completion to start
+ * host message bus start protocol sequence.
+ */
+void ishtp_reset_compl_handler(struct ishtp_device *dev)
+{
+ dev->dev_state = ISHTP_DEV_INIT_CLIENTS;
+ dev->hbm_state = ISHTP_HBM_START;
+ ishtp_hbm_start_req(dev);
+}
+EXPORT_SYMBOL(ishtp_reset_compl_handler);
+
+/**
+ * ishtp_use_dma_transfer() - Function to use DMA
+ *
+ * This interface is used to enable usage of DMA
+ *
+ * Return non zero if DMA can be enabled
+ */
+int ishtp_use_dma_transfer(void)
+{
+ return ishtp_use_dma;
+}
+
+/**
+ * ishtp_bus_register() - Function to register bus
+ *
+ * This register ishtp bus
+ *
+ * Return: Return output of bus_register
+ */
+static int __init ishtp_bus_register(void)
+{
+ return bus_register(&ishtp_cl_bus_type);
+}
+
+/**
+ * ishtp_bus_unregister() - Function to unregister bus
+ *
+ * This unregister ishtp bus
+ */
+static void __exit ishtp_bus_unregister(void)
+{
+ bus_unregister(&ishtp_cl_bus_type);
+}
+
+module_init(ishtp_bus_register);
+module_exit(ishtp_bus_unregister);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
new file mode 100644
index 000000000000..a1ffae7f26ad
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -0,0 +1,114 @@
+/*
+ * ISHTP bus definitions
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _LINUX_ISHTP_CL_BUS_H
+#define _LINUX_ISHTP_CL_BUS_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct ishtp_cl;
+struct ishtp_cl_device;
+struct ishtp_device;
+struct ishtp_msg_hdr;
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @dev: device pointer
+ * @ishtp_dev: pointer to ishtp device structure to primarily to access
+ * hw device operation callbacks and properties
+ * @fw_client: fw_client pointer to get fw information like protocol name
+ * max message length etc.
+ * @device_link: Link to next client in the list on a bus
+ * @event_work: Used to schedule rx event for client
+ * @driver_data: Storage driver private data
+ * @reference_count: Used for get/put device
+ * @event_cb: Callback to driver to send events
+ *
+ * An ishtp_cl_device pointer is returned from ishtp_add_device()
+ * and links ISHTP bus clients to their actual host client pointer.
+ * Drivers for ISHTP devices will get an ishtp_cl_device pointer
+ * when being probed and shall use it for doing bus I/O.
+ */
+struct ishtp_cl_device {
+ struct device dev;
+ struct ishtp_device *ishtp_dev;
+ struct ishtp_fw_client *fw_client;
+ struct list_head device_link;
+ struct work_struct event_work;
+ void *driver_data;
+ int reference_count;
+ void (*event_cb)(struct ishtp_cl_device *device);
+};
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @driver: driver instance on a bus
+ * @name: Name of the device for probe
+ * @probe: driver callback for device probe
+ * @remove: driver callback on device removal
+ *
+ * Client drivers defines to get probed/removed for ISHTP client device.
+ */
+struct ishtp_cl_driver {
+ struct device_driver driver;
+ const char *name;
+ int (*probe)(struct ishtp_cl_device *dev);
+ int (*remove)(struct ishtp_cl_device *dev);
+ int (*reset)(struct ishtp_cl_device *dev);
+ const struct dev_pm_ops *pm;
+};
+
+
+int ishtp_bus_new_client(struct ishtp_device *dev);
+void ishtp_remove_all_clients(struct ishtp_device *dev);
+int ishtp_cl_device_bind(struct ishtp_cl *cl);
+void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device);
+
+/* Write a multi-fragment message */
+int ishtp_send_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *hdr, void *msg,
+ void (*ipc_send_compl)(void *),
+ void *ipc_send_compl_prm);
+
+/* Write a single-fragment message */
+int ishtp_write_message(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *hdr,
+ unsigned char *buf);
+
+/* Use DMA to send/receive messages */
+int ishtp_use_dma_transfer(void);
+
+/* Exported functions */
+void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
+ bool warm_reset);
+
+void ishtp_recv(struct ishtp_device *dev);
+void ishtp_reset_handler(struct ishtp_device *dev);
+void ishtp_reset_compl_handler(struct ishtp_device *dev);
+
+void ishtp_put_device(struct ishtp_cl_device *);
+void ishtp_get_device(struct ishtp_cl_device *);
+
+int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner);
+#define ishtp_cl_driver_register(driver) \
+ __ishtp_cl_driver_register(driver, THIS_MODULE)
+void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
+
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*read_cb)(struct ishtp_cl_device *));
+int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *cuuid);
+
+#endif /* _LINUX_ISHTP_CL_BUS_H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
new file mode 100644
index 000000000000..b9b917d2d50d
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -0,0 +1,257 @@
+/*
+ * ISHTP Ring Buffers
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include "client.h"
+
+/**
+ * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
+ * @cl: client device instance
+ *
+ * Allocate and initialize RX ring buffers
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
+{
+ size_t len = cl->device->fw_client->props.max_msg_length;
+ int j;
+ struct ishtp_cl_rb *rb;
+ int ret = 0;
+ unsigned long flags;
+
+ for (j = 0; j < cl->rx_ring_size; ++j) {
+ rb = ishtp_io_rb_init(cl);
+ if (!rb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = ishtp_io_rb_alloc_buf(rb, len);
+ if (ret)
+ goto out;
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ }
+
+ return 0;
+
+out:
+ dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
+ ishtp_cl_free_rx_ring(cl);
+ return ret;
+}
+
+/**
+ * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
+ * @cl: client device instance
+ *
+ * Allocate and initialize TX ring buffers
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
+{
+ size_t len = cl->device->fw_client->props.max_msg_length;
+ int j;
+ unsigned long flags;
+
+ /* Allocate pool to free Tx bufs */
+ for (j = 0; j < cl->tx_ring_size; ++j) {
+ struct ishtp_cl_tx_ring *tx_buf;
+
+ tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
+ if (!tx_buf)
+ goto out;
+
+ tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
+ if (!tx_buf->send_buf.data) {
+ kfree(tx_buf);
+ goto out;
+ }
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
+ list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
+ }
+ return 0;
+out:
+ dev_err(&cl->device->dev, "error in allocating Tx pool\n");
+ ishtp_cl_free_rx_ring(cl);
+ return -ENOMEM;
+}
+
+/**
+ * ishtp_cl_free_rx_ring() - Free RX ring buffers
+ * @cl: client device instance
+ *
+ * Free RX ring buffers
+ */
+void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+ unsigned long flags;
+
+ /* release allocated memory - pass over free_rb_list */
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ while (!list_empty(&cl->free_rb_list.list)) {
+ rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
+ list);
+ list_del(&rb->list);
+ kfree(rb->buffer.data);
+ kfree(rb);
+ }
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ /* release allocated memory - pass over in_process_list */
+ spin_lock_irqsave(&cl->in_process_spinlock, flags);
+ while (!list_empty(&cl->in_process_list.list)) {
+ rb = list_entry(cl->in_process_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del(&rb->list);
+ kfree(rb->buffer.data);
+ kfree(rb);
+ }
+ spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
+}
+
+/**
+ * ishtp_cl_free_tx_ring() - Free TX ring buffers
+ * @cl: client device instance
+ *
+ * Free TX ring buffers
+ */
+void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_tx_ring *tx_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
+ /* release allocated memory - pass over tx_free_list */
+ while (!list_empty(&cl->tx_free_list.list)) {
+ tx_buf = list_entry(cl->tx_free_list.list.next,
+ struct ishtp_cl_tx_ring, list);
+ list_del(&tx_buf->list);
+ kfree(tx_buf->send_buf.data);
+ kfree(tx_buf);
+ }
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, flags);
+ /* release allocated memory - pass over tx_list */
+ while (!list_empty(&cl->tx_list.list)) {
+ tx_buf = list_entry(cl->tx_list.list.next,
+ struct ishtp_cl_tx_ring, list);
+ list_del(&tx_buf->list);
+ kfree(tx_buf->send_buf.data);
+ kfree(tx_buf);
+ }
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
+}
+
+/**
+ * ishtp_io_rb_free() - Free IO request block
+ * @rb: IO request block
+ *
+ * Free io request block memory
+ */
+void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
+{
+ if (rb == NULL)
+ return;
+
+ kfree(rb->buffer.data);
+ kfree(rb);
+}
+
+/**
+ * ishtp_io_rb_init() - Allocate and init IO request block
+ * @cl: client device instance
+ *
+ * Allocate and initialize request block
+ *
+ * Return: Allocted IO request block pointer
+ */
+struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+
+ rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
+ if (!rb)
+ return NULL;
+
+ INIT_LIST_HEAD(&rb->list);
+ rb->cl = cl;
+ rb->buf_idx = 0;
+ return rb;
+}
+
+/**
+ * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
+ * @rb: IO request block
+ * @length: length of response buffer
+ *
+ * Allocate respose buffer
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
+{
+ if (!rb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ rb->buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!rb->buffer.data)
+ return -ENOMEM;
+
+ rb->buffer.size = length;
+ return 0;
+}
+
+/**
+ * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
+ * @rb: IO request block
+ *
+ * Re-append rb to its client's free list and send flow control if needed
+ *
+ * Return: 0 on success else -EFAULT
+ */
+int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
+{
+ struct ishtp_cl *cl;
+ int rets = 0;
+ unsigned long flags;
+
+ if (!rb || !rb->cl)
+ return -EFAULT;
+
+ cl = rb->cl;
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+
+ /*
+ * If we returned the first buffer to empty 'free' list,
+ * send flow control
+ */
+ if (!cl->out_flow_ctrl_creds)
+ rets = ishtp_cl_read_start(cl);
+
+ return rets;
+}
+EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
new file mode 100644
index 000000000000..aad61328f282
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -0,0 +1,1054 @@
+/*
+ * ISHTP client logic
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * ishtp_read_list_flush() - Flush read queue
+ * @cl: ishtp client instance
+ *
+ * Used to remove all entries from read queue for a client
+ */
+static void ishtp_read_list_flush(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
+ list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
+ if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ }
+ spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
+}
+
+/**
+ * ishtp_cl_flush_queues() - Flush all queues for a client
+ * @cl: ishtp client instance
+ *
+ * Used to remove all queues for a client. This is called when a client device
+ * needs reset due to error, S3 resume or during module removal
+ *
+ * Return: 0 on success else -EINVAL if device is NULL
+ */
+int ishtp_cl_flush_queues(struct ishtp_cl *cl)
+{
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ ishtp_read_list_flush(cl);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_cl_flush_queues);
+
+/**
+ * ishtp_cl_init() - Initialize all fields of a client device
+ * @cl: ishtp client instance
+ * @dev: ishtp device
+ *
+ * Initializes a client device fields: Init spinlocks, init queues etc.
+ * This function is called during new client creation
+ */
+static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
+{
+ memset(cl, 0, sizeof(struct ishtp_cl));
+ init_waitqueue_head(&cl->wait_ctrl_res);
+ spin_lock_init(&cl->free_list_spinlock);
+ spin_lock_init(&cl->in_process_spinlock);
+ spin_lock_init(&cl->tx_list_spinlock);
+ spin_lock_init(&cl->tx_free_list_spinlock);
+ spin_lock_init(&cl->fc_spinlock);
+ INIT_LIST_HEAD(&cl->link);
+ cl->dev = dev;
+
+ INIT_LIST_HEAD(&cl->free_rb_list.list);
+ INIT_LIST_HEAD(&cl->tx_list.list);
+ INIT_LIST_HEAD(&cl->tx_free_list.list);
+ INIT_LIST_HEAD(&cl->in_process_list.list);
+
+ cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
+ cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
+
+ /* dma */
+ cl->last_tx_path = CL_TX_PATH_IPC;
+ cl->last_dma_acked = 1;
+ cl->last_dma_addr = NULL;
+ cl->last_ipc_acked = 1;
+}
+
+/**
+ * ishtp_cl_allocate() - allocates client structure and sets it up.
+ * @dev: ishtp device
+ *
+ * Allocate memory for new client device and call to initialize each field.
+ *
+ * Return: The allocated client instance or NULL on failure
+ */
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
+{
+ struct ishtp_cl *cl;
+
+ cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
+ if (!cl)
+ return NULL;
+
+ ishtp_cl_init(cl, dev);
+ return cl;
+}
+EXPORT_SYMBOL(ishtp_cl_allocate);
+
+/**
+ * ishtp_cl_free() - Frees a client device
+ * @cl: client device instance
+ *
+ * Frees a client device
+ */
+void ishtp_cl_free(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ unsigned long flags;
+
+ if (!cl)
+ return;
+
+ dev = cl->dev;
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_free_tx_ring(cl);
+ kfree(cl);
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_cl_free);
+
+/**
+ * ishtp_cl_link() - Reserve a host id and link the client instance
+ * @cl: client device instance
+ * @id: host client id to use. It can be ISHTP_HOST_CLIENT_ID_ANY if any
+ * id from the available can be used
+ *
+ *
+ * This allocates a single bit in the hostmap. This function will make sure
+ * that not many client sessions are opened at the same time. Once allocated
+ * the client device instance is added to the ishtp device in the current
+ * client list
+ *
+ * Return: 0 or error code on failure
+ */
+int ishtp_cl_link(struct ishtp_cl *cl, int id)
+{
+ struct ishtp_device *dev;
+ unsigned long flags, flags_cl;
+ int ret = 0;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ spin_lock_irqsave(&dev->device_lock, flags);
+
+ if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
+ ret = -EMFILE;
+ goto unlock_dev;
+ }
+
+ /* If Id is not assigned get one*/
+ if (id == ISHTP_HOST_CLIENT_ID_ANY)
+ id = find_first_zero_bit(dev->host_clients_map,
+ ISHTP_CLIENTS_MAX);
+
+ if (id >= ISHTP_CLIENTS_MAX) {
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+ dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
+ return -ENOENT;
+ }
+
+ dev->open_handle_count++;
+ cl->host_client_id = id;
+ spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ ret = -ENODEV;
+ goto unlock_cl;
+ }
+ list_add_tail(&cl->link, &dev->cl_list);
+ set_bit(id, dev->host_clients_map);
+ cl->state = ISHTP_CL_INITIALIZING;
+
+unlock_cl:
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
+unlock_dev:
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(ishtp_cl_link);
+
+/**
+ * ishtp_cl_unlink() - remove fw_cl from the client device list
+ * @cl: client device instance
+ *
+ * Remove a previously linked device to a ishtp device
+ */
+void ishtp_cl_unlink(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl *pos;
+ unsigned long flags;
+
+ /* don't shout on error exit path */
+ if (!cl || !cl->dev)
+ return;
+
+ dev = cl->dev;
+
+ spin_lock_irqsave(&dev->device_lock, flags);
+ if (dev->open_handle_count > 0) {
+ clear_bit(cl->host_client_id, dev->host_clients_map);
+ dev->open_handle_count--;
+ }
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+
+ /*
+ * This checks that 'cl' is actually linked into device's structure,
+ * before attempting 'list_del'
+ */
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(pos, &dev->cl_list, link)
+ if (cl->host_client_id == pos->host_client_id) {
+ list_del_init(&pos->link);
+ break;
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_cl_unlink);
+
+/**
+ * ishtp_cl_disconnect() - Send disconnect request to firmware
+ * @cl: client device instance
+ *
+ * Send a disconnect request for a client to firmware.
+ *
+ * Return: 0 if successful disconnect response from the firmware or error
+ * code on failure
+ */
+int ishtp_cl_disconnect(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ int err;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
+
+ if (cl->state != ISHTP_CL_DISCONNECTING) {
+ dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
+ return 0;
+ }
+
+ if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
+ dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
+ dev_err(&cl->device->dev, "failed to disconnect.\n");
+ return -ENODEV;
+ }
+
+ err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
+ (dev->dev_state != ISHTP_DEV_ENABLED ||
+ cl->state == ISHTP_CL_DISCONNECTED),
+ ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
+
+ /*
+ * If FW reset arrived, this will happen. Don't check cl->,
+ * as 'cl' may be freed already
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (cl->state == ISHTP_CL_DISCONNECTED) {
+ dev->print_log(dev, "%s() successful\n", __func__);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(ishtp_cl_disconnect);
+
+/**
+ * ishtp_cl_is_other_connecting() - Check other client is connecting
+ * @cl: client device instance
+ *
+ * Checks if other client with the same fw client id is connecting
+ *
+ * Return: true if other client is connected else false
+ */
+static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl *pos;
+ unsigned long flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return false;
+
+ dev = cl->dev;
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(pos, &dev->cl_list, link) {
+ if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
+ cl->fw_client_id == pos->fw_client_id) {
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+ return true;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+
+ return false;
+}
+
+/**
+ * ishtp_cl_connect() - Send connect request to firmware
+ * @cl: client device instance
+ *
+ * Send a connect request for a client to firmware. If successful it will
+ * RX and TX ring buffers
+ *
+ * Return: 0 if successful connect response from the firmware and able
+ * to bind and allocate ring buffers or error code on failure
+ */
+int ishtp_cl_connect(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
+
+ if (ishtp_cl_is_other_connecting(cl)) {
+ dev->print_log(dev, "%s() Busy\n", __func__);
+ return -EBUSY;
+ }
+
+ if (ishtp_hbm_cl_connect_req(dev, cl)) {
+ dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
+ return -ENODEV;
+ }
+
+ rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
+ (dev->dev_state == ISHTP_DEV_ENABLED &&
+ (cl->state == ISHTP_CL_CONNECTED ||
+ cl->state == ISHTP_CL_DISCONNECTED)),
+ ishtp_secs_to_jiffies(
+ ISHTP_CL_CONNECT_TIMEOUT));
+ /*
+ * If FW reset arrived, this will happen. Don't check cl->,
+ * as 'cl' may be freed already
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (cl->state != ISHTP_CL_CONNECTED) {
+ dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ rets = cl->status;
+ if (rets) {
+ dev->print_log(dev, "%s() Invalid status\n", __func__);
+ return rets;
+ }
+
+ rets = ishtp_cl_device_bind(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Bind error\n", __func__);
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ rets = ishtp_cl_alloc_rx_ring(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
+ /* if failed allocation, disconnect */
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ rets = ishtp_cl_alloc_tx_ring(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
+ /* if failed allocation, disconnect */
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ /* Upon successful connection and allocation, emit flow-control */
+ rets = ishtp_cl_read_start(cl);
+
+ dev->print_log(dev, "%s() successful\n", __func__);
+
+ return rets;
+}
+EXPORT_SYMBOL(ishtp_cl_connect);
+
+/**
+ * ishtp_cl_read_start() - Prepare to read client message
+ * @cl: client device instance
+ *
+ * Get a free buffer from pool of free read buffers and add to read buffer
+ * pool to add contents. Send a flow control request to firmware to be able
+ * send next message.
+ *
+ * Return: 0 if successful or error code on failure
+ */
+int ishtp_cl_read_start(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl_rb *rb;
+ int rets;
+ int i;
+ unsigned long flags;
+ unsigned long dev_flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != ISHTP_CL_CONNECTED)
+ return -ENODEV;
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ return -ENODEV;
+
+ i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
+ if (i < 0) {
+ dev_err(&cl->device->dev, "no such fw client %d\n",
+ cl->fw_client_id);
+ return -ENODEV;
+ }
+
+ /* The current rb is the head of the free rb list */
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ if (list_empty(&cl->free_rb_list.list)) {
+ dev_warn(&cl->device->dev,
+ "[ishtp-ish] Rx buffers pool is empty\n");
+ rets = -ENOMEM;
+ rb = NULL;
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ goto out;
+ }
+ rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
+ list_del_init(&rb->list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+
+ rb->cl = cl;
+ rb->buf_idx = 0;
+
+ INIT_LIST_HEAD(&rb->list);
+ rets = 0;
+
+ /*
+ * This must be BEFORE sending flow control -
+ * response in ISR may come too fast...
+ */
+ spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+ list_add_tail(&rb->list, &dev->read_list.list);
+ spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+ if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+out:
+ /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
+ if (rets && rb) {
+ spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+ list_del(&rb->list);
+ spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ }
+ return rets;
+}
+
+/**
+ * ishtp_cl_send() - Send a message to firmware
+ * @cl: client device instance
+ * @buf: message buffer
+ * @length: length of message
+ *
+ * If the client is correct state to send message, this function gets a buffer
+ * from tx ring buffers, copy the message data and call to send the message
+ * using ishtp_cl_send_msg()
+ *
+ * Return: 0 if successful or error code on failure
+ */
+int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
+{
+ struct ishtp_device *dev;
+ int id;
+ struct ishtp_cl_tx_ring *cl_msg;
+ int have_msg_to_send = 0;
+ unsigned long tx_flags, tx_free_flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != ISHTP_CL_CONNECTED) {
+ ++cl->err_send_msg;
+ return -EPIPE;
+ }
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ ++cl->err_send_msg;
+ return -ENODEV;
+ }
+
+ /* Check if we have fw client device */
+ id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
+ if (id < 0) {
+ ++cl->err_send_msg;
+ return -ENOENT;
+ }
+
+ if (length > dev->fw_clients[id].props.max_msg_length) {
+ ++cl->err_send_msg;
+ return -EMSGSIZE;
+ }
+
+ /* No free bufs */
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ if (list_empty(&cl->tx_free_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+ ++cl->err_send_msg;
+ return -ENOMEM;
+ }
+
+ cl_msg = list_first_entry(&cl->tx_free_list.list,
+ struct ishtp_cl_tx_ring, list);
+ if (!cl_msg->send_buf.data) {
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+ return -EIO;
+ /* Should not happen, as free list is pre-allocated */
+ }
+ /*
+ * This is safe, as 'length' is already checked for not exceeding
+ * max ISHTP message size per client
+ */
+ list_del_init(&cl_msg->list);
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+ memcpy(cl_msg->send_buf.data, buf, length);
+ cl_msg->send_buf.size = length;
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ have_msg_to_send = !list_empty(&cl->tx_list.list);
+ list_add_tail(&cl_msg->list, &cl->tx_list.list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
+ ishtp_cl_send_msg(dev, cl);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_cl_send);
+
+/**
+ * ishtp_cl_read_complete() - read complete
+ * @rb: Pointer to client request block
+ *
+ * If the message is completely received call ishtp_cl_bus_rx_event()
+ * to process message
+ */
+static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
+{
+ unsigned long flags;
+ int schedule_work_flag = 0;
+ struct ishtp_cl *cl = rb->cl;
+
+ spin_lock_irqsave(&cl->in_process_spinlock, flags);
+ /*
+ * if in-process list is empty, then need to schedule
+ * the processing thread
+ */
+ schedule_work_flag = list_empty(&cl->in_process_list.list);
+ list_add_tail(&rb->list, &cl->in_process_list.list);
+ spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
+
+ if (schedule_work_flag)
+ ishtp_cl_bus_rx_event(cl->device);
+}
+
+/**
+ * ipc_tx_callback() - IPC tx callback function
+ * @prm: Pointer to client device instance
+ *
+ * Send message over IPC either first time or on callback on previous message
+ * completion
+ */
+static void ipc_tx_callback(void *prm)
+{
+ struct ishtp_cl *cl = prm;
+ struct ishtp_cl_tx_ring *cl_msg;
+ size_t rem;
+ struct ishtp_device *dev = (cl ? cl->dev : NULL);
+ struct ishtp_msg_hdr ishtp_hdr;
+ unsigned long tx_flags, tx_free_flags;
+ unsigned char *pmsg;
+
+ if (!dev)
+ return;
+
+ /*
+ * Other conditions if some critical error has
+ * occurred before this callback is called
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ return;
+
+ if (cl->state != ISHTP_CL_CONNECTED)
+ return;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ if (list_empty(&cl->tx_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ if (!cl->sending) {
+ --cl->ishtp_flow_ctrl_creds;
+ cl->last_ipc_acked = 0;
+ cl->last_tx_path = CL_TX_PATH_IPC;
+ cl->sending = 1;
+ }
+
+ cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
+ list);
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+
+ ishtp_hdr.host_addr = cl->host_client_id;
+ ishtp_hdr.fw_addr = cl->fw_client_id;
+ ishtp_hdr.reserved = 0;
+ pmsg = cl_msg->send_buf.data + cl->tx_offs;
+
+ if (rem <= dev->mtu) {
+ ishtp_hdr.length = rem;
+ ishtp_hdr.msg_complete = 1;
+ cl->sending = 0;
+ list_del_init(&cl_msg->list); /* Must be before write */
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ /* Submit to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+ } else {
+ /* Send IPC fragment */
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ cl->tx_offs += dev->mtu;
+ ishtp_hdr.length = dev->mtu;
+ ishtp_hdr.msg_complete = 0;
+ ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
+ }
+}
+
+/**
+ * ishtp_cl_send_msg_ipc() -Send message using IPC
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message over IPC not using DMA
+ */
+static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ /* If last DMA message wasn't acked yet, leave this one in Tx queue */
+ if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
+ return;
+
+ cl->tx_offs = 0;
+ ipc_tx_callback(cl);
+ ++cl->send_msg_cnt_ipc;
+}
+
+/**
+ * ishtp_cl_send_msg_dma() -Send message using DMA
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message using DMA
+ */
+static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ struct dma_xfer_hbm dma_xfer;
+ unsigned char *msg_addr;
+ int off;
+ struct ishtp_cl_tx_ring *cl_msg;
+ unsigned long tx_flags, tx_free_flags;
+
+ /* If last IPC message wasn't acked yet, leave this one in Tx queue */
+ if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
+ return;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ if (list_empty(&cl->tx_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
+ list);
+
+ msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
+ if (!msg_addr) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ if (dev->transfer_path == CL_TX_PATH_DEFAULT)
+ ishtp_cl_send_msg_ipc(dev, cl);
+ return;
+ }
+
+ list_del_init(&cl_msg->list); /* Must be before write */
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ --cl->ishtp_flow_ctrl_creds;
+ cl->last_dma_acked = 0;
+ cl->last_dma_addr = msg_addr;
+ cl->last_tx_path = CL_TX_PATH_DMA;
+
+ /* write msg to dma buf */
+ memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
+
+ /* send dma_xfer hbm msg */
+ off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
+ ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
+ dma_xfer.hbm = DMA_XFER;
+ dma_xfer.fw_client_id = cl->fw_client_id;
+ dma_xfer.host_client_id = cl->host_client_id;
+ dma_xfer.reserved = 0;
+ dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
+ dma_xfer.msg_length = cl_msg->send_buf.size;
+ dma_xfer.reserved2 = 0;
+ ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+ ++cl->send_msg_cnt_dma;
+}
+
+/**
+ * ishtp_cl_send_msg() -Send message using DMA or IPC
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message using DMA or IPC based on transfer_path
+ */
+void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ if (dev->transfer_path == CL_TX_PATH_DMA)
+ ishtp_cl_send_msg_dma(dev, cl);
+ else
+ ishtp_cl_send_msg_ipc(dev, cl);
+}
+
+/**
+ * recv_ishtp_cl_msg() -Receive client message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: Pointer to message header
+ *
+ * Receive and dispatch ISHTP client messages. This function executes in ISR
+ * context
+ */
+void recv_ishtp_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr)
+{
+ struct ishtp_cl *cl;
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *new_rb;
+ unsigned char *buffer = NULL;
+ struct ishtp_cl_rb *complete_rb = NULL;
+ unsigned long dev_flags;
+ unsigned long flags;
+ int rb_count;
+
+ if (ishtp_hdr->reserved) {
+ dev_err(dev->devc, "corrupted message header.\n");
+ goto eoi;
+ }
+
+ if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
+ dev_err(dev->devc,
+ "ISHTP message length in hdr exceeds IPC MTU\n");
+ goto eoi;
+ }
+
+ spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+ rb_count = -1;
+ list_for_each_entry(rb, &dev->read_list.list, list) {
+ ++rb_count;
+ cl = rb->cl;
+ if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
+ cl->fw_client_id == ishtp_hdr->fw_addr) ||
+ !(cl->state == ISHTP_CL_CONNECTED))
+ continue;
+
+ /* If no Rx buffer is allocated, disband the rb */
+ if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock,
+ dev_flags);
+ dev_err(&cl->device->dev,
+ "Rx buffer is not allocated.\n");
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ cl->status = -ENOMEM;
+ goto eoi;
+ }
+
+ /*
+ * If message buffer overflown (exceeds max. client msg
+ * size, drop message and return to free buffer.
+ * Do we need to disconnect such a client? (We don't send
+ * back FC, so communication will be stuck anyway)
+ */
+ if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock,
+ dev_flags);
+ dev_err(&cl->device->dev,
+ "message overflow. size %d len %d idx %ld\n",
+ rb->buffer.size, ishtp_hdr->length,
+ rb->buf_idx);
+ list_del(&rb->list);
+ ishtp_cl_io_rb_recycle(rb);
+ cl->status = -EIO;
+ goto eoi;
+ }
+
+ buffer = rb->buffer.data + rb->buf_idx;
+ dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
+
+ rb->buf_idx += ishtp_hdr->length;
+ if (ishtp_hdr->msg_complete) {
+ /* Last fragment in message - it's complete */
+ cl->status = 0;
+ list_del(&rb->list);
+ complete_rb = rb;
+
+ --cl->out_flow_ctrl_creds;
+ /*
+ * the whole msg arrived, send a new FC, and add a new
+ * rb buffer for the next coming msg
+ */
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+
+ if (!list_empty(&cl->free_rb_list.list)) {
+ new_rb = list_entry(cl->free_rb_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del_init(&new_rb->list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock,
+ flags);
+ new_rb->cl = cl;
+ new_rb->buf_idx = 0;
+ INIT_LIST_HEAD(&new_rb->list);
+ list_add_tail(&new_rb->list,
+ &dev->read_list.list);
+
+ ishtp_hbm_cl_flow_control_req(dev, cl);
+ } else {
+ spin_unlock_irqrestore(&cl->free_list_spinlock,
+ flags);
+ }
+ }
+ /* One more fragment in message (even if this was last) */
+ ++cl->recv_msg_num_frags;
+
+ /*
+ * We can safely break here (and in BH too),
+ * a single input message can go only to a single request!
+ */
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+ /* If it's nobody's message, just read and discard it */
+ if (!buffer) {
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+
+ dev_err(dev->devc, "Dropped Rx msg - no request\n");
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+ goto eoi;
+ }
+
+ if (complete_rb) {
+ getnstimeofday(&cl->ts_rx);
+ ++cl->recv_msg_cnt_ipc;
+ ishtp_cl_read_complete(complete_rb);
+ }
+eoi:
+ return;
+}
+
+/**
+ * recv_ishtp_cl_msg_dma() -Receive client message
+ * @dev: ISHTP device instance
+ * @msg: message pointer
+ * @hbm: hbm buffer
+ *
+ * Receive and dispatch ISHTP client messages using DMA. This function executes
+ * in ISR context
+ */
+void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
+ struct dma_xfer_hbm *hbm)
+{
+ struct ishtp_cl *cl;
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *new_rb;
+ unsigned char *buffer = NULL;
+ struct ishtp_cl_rb *complete_rb = NULL;
+ unsigned long dev_flags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+ list_for_each_entry(rb, &dev->read_list.list, list) {
+ cl = rb->cl;
+ if (!cl || !(cl->host_client_id == hbm->host_client_id &&
+ cl->fw_client_id == hbm->fw_client_id) ||
+ !(cl->state == ISHTP_CL_CONNECTED))
+ continue;
+
+ /*
+ * If no Rx buffer is allocated, disband the rb
+ */
+ if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock,
+ dev_flags);
+ dev_err(&cl->device->dev,
+ "response buffer is not allocated.\n");
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ cl->status = -ENOMEM;
+ goto eoi;
+ }
+
+ /*
+ * If message buffer overflown (exceeds max. client msg
+ * size, drop message and return to free buffer.
+ * Do we need to disconnect such a client? (We don't send
+ * back FC, so communication will be stuck anyway)
+ */
+ if (rb->buffer.size < hbm->msg_length) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock,
+ dev_flags);
+ dev_err(&cl->device->dev,
+ "message overflow. size %d len %d idx %ld\n",
+ rb->buffer.size, hbm->msg_length, rb->buf_idx);
+ list_del(&rb->list);
+ ishtp_cl_io_rb_recycle(rb);
+ cl->status = -EIO;
+ goto eoi;
+ }
+
+ buffer = rb->buffer.data;
+ memcpy(buffer, msg, hbm->msg_length);
+ rb->buf_idx = hbm->msg_length;
+
+ /* Last fragment in message - it's complete */
+ cl->status = 0;
+ list_del(&rb->list);
+ complete_rb = rb;
+
+ --cl->out_flow_ctrl_creds;
+ /*
+ * the whole msg arrived, send a new FC, and add a new
+ * rb buffer for the next coming msg
+ */
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+
+ if (!list_empty(&cl->free_rb_list.list)) {
+ new_rb = list_entry(cl->free_rb_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del_init(&new_rb->list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock,
+ flags);
+ new_rb->cl = cl;
+ new_rb->buf_idx = 0;
+ INIT_LIST_HEAD(&new_rb->list);
+ list_add_tail(&new_rb->list,
+ &dev->read_list.list);
+
+ ishtp_hbm_cl_flow_control_req(dev, cl);
+ } else {
+ spin_unlock_irqrestore(&cl->free_list_spinlock,
+ flags);
+ }
+
+ /* One more fragment in message (this is always last) */
+ ++cl->recv_msg_num_frags;
+
+ /*
+ * We can safely break here (and in BH too),
+ * a single input message can go only to a single request!
+ */
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+ /* If it's nobody's message, just read and discard it */
+ if (!buffer) {
+ dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
+ goto eoi;
+ }
+
+ if (complete_rb) {
+ getnstimeofday(&cl->ts_rx);
+ ++cl->recv_msg_cnt_dma;
+ ishtp_cl_read_complete(complete_rb);
+ }
+eoi:
+ return;
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
new file mode 100644
index 000000000000..444d069c2ed4
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -0,0 +1,182 @@
+/*
+ * ISHTP client logic
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_CLIENT_H_
+#define _ISHTP_CLIENT_H_
+
+#include <linux/types.h>
+#include "ishtp-dev.h"
+
+/* Client state */
+enum cl_state {
+ ISHTP_CL_INITIALIZING = 0,
+ ISHTP_CL_CONNECTING,
+ ISHTP_CL_CONNECTED,
+ ISHTP_CL_DISCONNECTING,
+ ISHTP_CL_DISCONNECTED
+};
+
+/* Tx and Rx ring size */
+#define CL_DEF_RX_RING_SIZE 2
+#define CL_DEF_TX_RING_SIZE 2
+#define CL_MAX_RX_RING_SIZE 32
+#define CL_MAX_TX_RING_SIZE 32
+
+#define DMA_SLOT_SIZE 4096
+/* Number of IPC fragments after which it's worth sending via DMA */
+#define DMA_WORTH_THRESHOLD 3
+
+/* DMA/IPC Tx paths. Other the default means enforcement */
+#define CL_TX_PATH_DEFAULT 0
+#define CL_TX_PATH_IPC 1
+#define CL_TX_PATH_DMA 2
+
+/* Client Tx buffer list entry */
+struct ishtp_cl_tx_ring {
+ struct list_head list;
+ struct ishtp_msg_data send_buf;
+};
+
+/* ISHTP client instance */
+struct ishtp_cl {
+ struct list_head link;
+ struct ishtp_device *dev;
+ enum cl_state state;
+ int status;
+
+ /* Link to ISHTP bus device */
+ struct ishtp_cl_device *device;
+
+ /* ID of client connected */
+ uint8_t host_client_id;
+ uint8_t fw_client_id;
+ uint8_t ishtp_flow_ctrl_creds;
+ uint8_t out_flow_ctrl_creds;
+
+ /* dma */
+ int last_tx_path;
+ /* 0: ack wasn't received,1:ack was received */
+ int last_dma_acked;
+ unsigned char *last_dma_addr;
+ /* 0: ack wasn't received,1:ack was received */
+ int last_ipc_acked;
+
+ /* Rx ring buffer pool */
+ unsigned int rx_ring_size;
+ struct ishtp_cl_rb free_rb_list;
+ spinlock_t free_list_spinlock;
+ /* Rx in-process list */
+ struct ishtp_cl_rb in_process_list;
+ spinlock_t in_process_spinlock;
+
+ /* Client Tx buffers list */
+ unsigned int tx_ring_size;
+ struct ishtp_cl_tx_ring tx_list, tx_free_list;
+ spinlock_t tx_list_spinlock;
+ spinlock_t tx_free_list_spinlock;
+ size_t tx_offs; /* Offset in buffer at head of 'tx_list' */
+
+ /**
+ * if we get a FC, and the list is not empty, we must know whether we
+ * are at the middle of sending.
+ * if so -need to increase FC counter, otherwise, need to start sending
+ * the first msg in list
+ * (!)This is for counting-FC implementation only. Within single-FC the
+ * other party may NOT send FC until it receives complete message
+ */
+ int sending;
+
+ /* Send FC spinlock */
+ spinlock_t fc_spinlock;
+
+ /* wait queue for connect and disconnect response from FW */
+ wait_queue_head_t wait_ctrl_res;
+
+ /* Error stats */
+ unsigned int err_send_msg;
+ unsigned int err_send_fc;
+
+ /* Send/recv stats */
+ unsigned int send_msg_cnt_ipc;
+ unsigned int send_msg_cnt_dma;
+ unsigned int recv_msg_cnt_ipc;
+ unsigned int recv_msg_cnt_dma;
+ unsigned int recv_msg_num_frags;
+ unsigned int ishtp_flow_ctrl_cnt;
+ unsigned int out_flow_ctrl_cnt;
+
+ /* Rx msg ... out FC timing */
+ struct timespec ts_rx;
+ struct timespec ts_out_fc;
+ struct timespec ts_max_fc_delay;
+ void *client_data;
+};
+
+/* Client connection managenment internal functions */
+int ishtp_can_client_connect(struct ishtp_device *ishtp_dev, uuid_le *uuid);
+int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id);
+void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl);
+void recv_ishtp_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr);
+int ishtp_cl_read_start(struct ishtp_cl *cl);
+
+/* Ring Buffer I/F */
+int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
+int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
+void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
+void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
+
+/* DMA I/F functions */
+void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
+ struct dma_xfer_hbm *hbm);
+void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev);
+void ishtp_cl_free_dma_buf(struct ishtp_device *dev);
+void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+ uint32_t size);
+void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+ void *msg_addr,
+ uint8_t size);
+
+/* Request blocks alloc/free I/F */
+struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl);
+void ishtp_io_rb_free(struct ishtp_cl_rb *priv_rb);
+int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length);
+
+/**
+ * ishtp_cl_cmp_id - tells if file private data have same id
+ * returns true - if ids are the same and not NULL
+ */
+static inline bool ishtp_cl_cmp_id(const struct ishtp_cl *cl1,
+ const struct ishtp_cl *cl2)
+{
+ return cl1 && cl2 &&
+ (cl1->host_client_id == cl2->host_client_id) &&
+ (cl1->fw_client_id == cl2->fw_client_id);
+}
+
+/* exported functions from ISHTP under client management scope */
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev);
+void ishtp_cl_free(struct ishtp_cl *cl);
+int ishtp_cl_link(struct ishtp_cl *cl, int id);
+void ishtp_cl_unlink(struct ishtp_cl *cl);
+int ishtp_cl_disconnect(struct ishtp_cl *cl);
+int ishtp_cl_connect(struct ishtp_cl *cl);
+int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
+int ishtp_cl_flush_queues(struct ishtp_cl *cl);
+
+/* exported functions from ISHTP client buffer management scope */
+int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
+
+#endif /* _ISHTP_CLIENT_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
new file mode 100644
index 000000000000..2783f3666114
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
@@ -0,0 +1,175 @@
+/*
+ * ISHTP DMA I/F functions
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include "ishtp-dev.h"
+#include "client.h"
+
+/**
+ * ishtp_cl_alloc_dma_buf() - Allocate DMA RX and TX buffer
+ * @dev: ishtp device
+ *
+ * Allocate RX and TX DMA buffer once during bus setup.
+ * It allocates 1MB, RX and TX DMA buffer, which are divided
+ * into slots.
+ */
+void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev)
+{
+ dma_addr_t h;
+
+ if (dev->ishtp_host_dma_tx_buf)
+ return;
+
+ dev->ishtp_host_dma_tx_buf_size = 1024*1024;
+ dev->ishtp_host_dma_rx_buf_size = 1024*1024;
+
+ /* Allocate Tx buffer and init usage bitmap */
+ dev->ishtp_host_dma_tx_buf = dma_alloc_coherent(dev->devc,
+ dev->ishtp_host_dma_tx_buf_size,
+ &h, GFP_KERNEL);
+ if (dev->ishtp_host_dma_tx_buf)
+ dev->ishtp_host_dma_tx_buf_phys = h;
+
+ dev->ishtp_dma_num_slots = dev->ishtp_host_dma_tx_buf_size /
+ DMA_SLOT_SIZE;
+
+ dev->ishtp_dma_tx_map = kcalloc(dev->ishtp_dma_num_slots,
+ sizeof(uint8_t),
+ GFP_KERNEL);
+ spin_lock_init(&dev->ishtp_dma_tx_lock);
+
+ /* Allocate Rx buffer */
+ dev->ishtp_host_dma_rx_buf = dma_alloc_coherent(dev->devc,
+ dev->ishtp_host_dma_rx_buf_size,
+ &h, GFP_KERNEL);
+
+ if (dev->ishtp_host_dma_rx_buf)
+ dev->ishtp_host_dma_rx_buf_phys = h;
+}
+
+/**
+ * ishtp_cl_free_dma_buf() - Free DMA RX and TX buffer
+ * @dev: ishtp device
+ *
+ * Free DMA buffer when all clients are released. This is
+ * only happens during error path in ISH built in driver
+ * model
+ */
+void ishtp_cl_free_dma_buf(struct ishtp_device *dev)
+{
+ dma_addr_t h;
+
+ if (dev->ishtp_host_dma_tx_buf) {
+ h = dev->ishtp_host_dma_tx_buf_phys;
+ dma_free_coherent(dev->devc, dev->ishtp_host_dma_tx_buf_size,
+ dev->ishtp_host_dma_tx_buf, h);
+ }
+
+ if (dev->ishtp_host_dma_rx_buf) {
+ h = dev->ishtp_host_dma_rx_buf_phys;
+ dma_free_coherent(dev->devc, dev->ishtp_host_dma_rx_buf_size,
+ dev->ishtp_host_dma_rx_buf, h);
+ }
+
+ kfree(dev->ishtp_dma_tx_map);
+ dev->ishtp_host_dma_tx_buf = NULL;
+ dev->ishtp_host_dma_rx_buf = NULL;
+ dev->ishtp_dma_tx_map = NULL;
+}
+
+/*
+ * ishtp_cl_get_dma_send_buf() - Get a DMA memory slot
+ * @dev: ishtp device
+ * @size: Size of memory to get
+ *
+ * Find and return free address of "size" bytes in dma tx buffer.
+ * the function will mark this address as "in-used" memory.
+ *
+ * Return: NULL when no free buffer else a buffer to copy
+ */
+void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+ uint32_t size)
+{
+ unsigned long flags;
+ int i, j, free;
+ /* additional slot is needed if there is rem */
+ int required_slots = (size / DMA_SLOT_SIZE)
+ + 1 * (size % DMA_SLOT_SIZE != 0);
+
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
+ free = 1;
+ for (j = 0; j < required_slots; j++)
+ if (dev->ishtp_dma_tx_map[i+j]) {
+ free = 0;
+ i += j;
+ break;
+ }
+ if (free) {
+ /* mark memory as "caught" */
+ for (j = 0; j < required_slots; j++)
+ dev->ishtp_dma_tx_map[i+j] = 1;
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ return (i * DMA_SLOT_SIZE) +
+ (unsigned char *)dev->ishtp_host_dma_tx_buf;
+ }
+ }
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ dev_err(dev->devc, "No free DMA buffer to send msg\n");
+ return NULL;
+}
+
+/*
+ * ishtp_cl_release_dma_acked_mem() - Release DMA memory slot
+ * @dev: ishtp device
+ * @msg_addr: message address of slot
+ * @size: Size of memory to get
+ *
+ * Release_dma_acked_mem - returnes the acked memory to free list.
+ * (from msg_addr, size bytes long)
+ */
+void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+ void *msg_addr,
+ uint8_t size)
+{
+ unsigned long flags;
+ int acked_slots = (size / DMA_SLOT_SIZE)
+ + 1 * (size % DMA_SLOT_SIZE != 0);
+ int i, j;
+
+ if ((msg_addr - dev->ishtp_host_dma_tx_buf) % DMA_SLOT_SIZE) {
+ dev_err(dev->devc, "Bad DMA Tx ack address\n");
+ return;
+ }
+
+ i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (j = 0; j < acked_slots; j++) {
+ if ((i + j) >= dev->ishtp_dma_num_slots ||
+ !dev->ishtp_dma_tx_map[i+j]) {
+ /* no such slot, or memory is already free */
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ dev_err(dev->devc, "Bad DMA Tx ack address\n");
+ return;
+ }
+ dev->ishtp_dma_tx_map[i+j] = 0;
+ }
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
new file mode 100644
index 000000000000..74bffee60774
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -0,0 +1,1032 @@
+/*
+ * ISHTP bus layer messages handling
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+#include "ishtp-dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * ishtp_hbm_fw_cl_allocate() - Allocate FW clients
+ * @dev: ISHTP device instance
+ *
+ * Allocates storage for fw clients
+ */
+static void ishtp_hbm_fw_cl_allocate(struct ishtp_device *dev)
+{
+ struct ishtp_fw_client *clients;
+ int b;
+
+ /* count how many ISH clients we have */
+ for_each_set_bit(b, dev->fw_clients_map, ISHTP_CLIENTS_MAX)
+ dev->fw_clients_num++;
+
+ if (dev->fw_clients_num <= 0)
+ return;
+
+ /* allocate storage for fw clients representation */
+ clients = kcalloc(dev->fw_clients_num, sizeof(struct ishtp_fw_client),
+ GFP_KERNEL);
+ if (!clients) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ ish_hw_reset(dev);
+ return;
+ }
+ dev->fw_clients = clients;
+}
+
+/**
+ * ishtp_hbm_cl_hdr() - construct client hbm header
+ * @cl: client
+ * @hbm_cmd: host bus message command
+ * @buf: buffer for cl header
+ * @len: buffer length
+ *
+ * Initialize HBM buffer
+ */
+static inline void ishtp_hbm_cl_hdr(struct ishtp_cl *cl, uint8_t hbm_cmd,
+ void *buf, size_t len)
+{
+ struct ishtp_hbm_cl_cmd *cmd = buf;
+
+ memset(cmd, 0, len);
+
+ cmd->hbm_cmd = hbm_cmd;
+ cmd->host_addr = cl->host_client_id;
+ cmd->fw_addr = cl->fw_client_id;
+}
+
+/**
+ * ishtp_hbm_cl_addr_equal() - Compare client address
+ * @cl: client
+ * @buf: Client command buffer
+ *
+ * Compare client address with the address in command buffer
+ *
+ * Return: True if they have the same address
+ */
+static inline bool ishtp_hbm_cl_addr_equal(struct ishtp_cl *cl, void *buf)
+{
+ struct ishtp_hbm_cl_cmd *cmd = buf;
+
+ return cl->host_client_id == cmd->host_addr &&
+ cl->fw_client_id == cmd->fw_addr;
+}
+
+/**
+ * ishtp_hbm_start_wait() - Wait for HBM start message
+ * @dev: ISHTP device instance
+ *
+ * Wait for HBM start message from firmware
+ *
+ * Return: 0 if HBM start is/was received else timeout error
+ */
+int ishtp_hbm_start_wait(struct ishtp_device *dev)
+{
+ int ret;
+
+ if (dev->hbm_state > ISHTP_HBM_START)
+ return 0;
+
+ dev_dbg(dev->devc, "Going to wait for ishtp start. hbm_state=%08X\n",
+ dev->hbm_state);
+ ret = wait_event_interruptible_timeout(dev->wait_hbm_recvd_msg,
+ dev->hbm_state >= ISHTP_HBM_STARTED,
+ (ISHTP_INTEROP_TIMEOUT * HZ));
+
+ dev_dbg(dev->devc,
+ "Woke up from waiting for ishtp start. hbm_state=%08X\n",
+ dev->hbm_state);
+
+ if (ret <= 0 && (dev->hbm_state <= ISHTP_HBM_START)) {
+ dev->hbm_state = ISHTP_HBM_IDLE;
+ dev_err(dev->devc,
+ "waiting for ishtp start failed. ret=%d hbm_state=%08X\n",
+ ret, dev->hbm_state);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/**
+ * ishtp_hbm_start_req() - Send HBM start message
+ * @dev: ISHTP device instance
+ *
+ * Send HBM start message to firmware
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_start_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ struct hbm_host_version_request *start_req;
+ const size_t len = sizeof(struct hbm_host_version_request);
+
+ ishtp_hbm_hdr(ishtp_hdr, len);
+
+ /* host start message */
+ start_req = (struct hbm_host_version_request *)data;
+ memset(start_req, 0, len);
+ start_req->hbm_cmd = HOST_START_REQ_CMD;
+ start_req->host_version.major_version = HBM_MAJOR_VERSION;
+ start_req->host_version.minor_version = HBM_MINOR_VERSION;
+
+ /*
+ * (!) Response to HBM start may be so quick that this thread would get
+ * preempted BEFORE managing to set hbm_state = ISHTP_HBM_START.
+ * So set it at first, change back to ISHTP_HBM_IDLE upon failure
+ */
+ dev->hbm_state = ISHTP_HBM_START;
+ if (ishtp_write_message(dev, ishtp_hdr, data)) {
+ dev_err(dev->devc, "version message send failed\n");
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev->hbm_state = ISHTP_HBM_IDLE;
+ ish_hw_reset(dev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ishtp_hbm_enum_clients_req() - Send client enum req
+ * @dev: ISHTP device instance
+ *
+ * Send enumeration client request message
+ *
+ * Return: 0 if success else error code
+ */
+void ishtp_hbm_enum_clients_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ struct hbm_host_enum_request *enum_req;
+ const size_t len = sizeof(struct hbm_host_enum_request);
+
+ /* enumerate clients */
+ ishtp_hbm_hdr(ishtp_hdr, len);
+
+ enum_req = (struct hbm_host_enum_request *)data;
+ memset(enum_req, 0, len);
+ enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+
+ if (ishtp_write_message(dev, ishtp_hdr, data)) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev_err(dev->devc, "enumeration request send failed\n");
+ ish_hw_reset(dev);
+ }
+ dev->hbm_state = ISHTP_HBM_ENUM_CLIENTS;
+}
+
+/**
+ * ishtp_hbm_prop_req() - Request property
+ * @dev: ISHTP device instance
+ *
+ * Request property for a single client
+ *
+ * Return: 0 if success else error code
+ */
+static int ishtp_hbm_prop_req(struct ishtp_device *dev)
+{
+
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ struct hbm_props_request *prop_req;
+ const size_t len = sizeof(struct hbm_props_request);
+ unsigned long next_client_index;
+ uint8_t client_num;
+
+ client_num = dev->fw_client_presentation_num;
+
+ next_client_index = find_next_bit(dev->fw_clients_map,
+ ISHTP_CLIENTS_MAX, dev->fw_client_index);
+
+ /* We got all client properties */
+ if (next_client_index == ISHTP_CLIENTS_MAX) {
+ dev->hbm_state = ISHTP_HBM_WORKING;
+ dev->dev_state = ISHTP_DEV_ENABLED;
+
+ for (dev->fw_client_presentation_num = 1;
+ dev->fw_client_presentation_num < client_num + 1;
+ ++dev->fw_client_presentation_num)
+ /* Add new client device */
+ ishtp_bus_new_client(dev);
+ return 0;
+ }
+
+ dev->fw_clients[client_num].client_id = next_client_index;
+
+ ishtp_hbm_hdr(ishtp_hdr, len);
+ prop_req = (struct hbm_props_request *)data;
+
+ memset(prop_req, 0, sizeof(struct hbm_props_request));
+
+ prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ prop_req->address = next_client_index;
+
+ if (ishtp_write_message(dev, ishtp_hdr, data)) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev_err(dev->devc, "properties request send failed\n");
+ ish_hw_reset(dev);
+ return -EIO;
+ }
+
+ dev->fw_client_index = next_client_index;
+
+ return 0;
+}
+
+/**
+ * ishtp_hbm_stop_req() - Send HBM stop
+ * @dev: ISHTP device instance
+ *
+ * Send stop request message
+ */
+static void ishtp_hbm_stop_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ struct hbm_host_stop_request *req;
+ const size_t len = sizeof(struct hbm_host_stop_request);
+
+ ishtp_hbm_hdr(ishtp_hdr, len);
+ req = (struct hbm_host_stop_request *)data;
+
+ memset(req, 0, sizeof(struct hbm_host_stop_request));
+ req->hbm_cmd = HOST_STOP_REQ_CMD;
+ req->reason = DRIVER_STOP_REQUEST;
+
+ ishtp_write_message(dev, ishtp_hdr, data);
+}
+
+/**
+ * ishtp_hbm_cl_flow_control_req() - Send flow control request
+ * @dev: ISHTP device instance
+ * @cl: ISHTP client instance
+ *
+ * Send flow control request
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ const size_t len = sizeof(struct hbm_flow_control);
+ int rv;
+ unsigned int num_frags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->fc_spinlock, flags);
+ ishtp_hbm_hdr(ishtp_hdr, len);
+ ishtp_hbm_cl_hdr(cl, ISHTP_FLOW_CONTROL_CMD, data, len);
+
+ /*
+ * Sync possible race when RB recycle and packet receive paths
+ * both try to send an out FC
+ */
+ if (cl->out_flow_ctrl_creds) {
+ spin_unlock_irqrestore(&cl->fc_spinlock, flags);
+ return 0;
+ }
+
+ num_frags = cl->recv_msg_num_frags;
+ cl->recv_msg_num_frags = 0;
+
+ rv = ishtp_write_message(dev, ishtp_hdr, data);
+ if (!rv) {
+ ++cl->out_flow_ctrl_creds;
+ ++cl->out_flow_ctrl_cnt;
+ getnstimeofday(&cl->ts_out_fc);
+ if (cl->ts_rx.tv_sec && cl->ts_rx.tv_nsec) {
+ struct timespec ts_diff;
+
+ ts_diff = timespec_sub(cl->ts_out_fc, cl->ts_rx);
+ if (timespec_compare(&ts_diff, &cl->ts_max_fc_delay)
+ > 0)
+ cl->ts_max_fc_delay = ts_diff;
+ }
+ } else {
+ ++cl->err_send_fc;
+ }
+
+ spin_unlock_irqrestore(&cl->fc_spinlock, flags);
+ return rv;
+}
+
+/**
+ * ishtp_hbm_cl_disconnect_req() - Send disconnect request
+ * @dev: ISHTP device instance
+ * @cl: ISHTP client instance
+ *
+ * Send disconnect message to fw
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ const size_t len = sizeof(struct hbm_client_connect_request);
+
+ ishtp_hbm_hdr(ishtp_hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, data, len);
+
+ return ishtp_write_message(dev, ishtp_hdr, data);
+}
+
+/**
+ * ishtp_hbm_cl_disconnect_res() - Get disconnect response
+ * @dev: ISHTP device instance
+ * @rs: Response message
+ *
+ * Received disconnect response from fw
+ */
+static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+ break;
+ }
+ }
+ if (cl)
+ wake_up_interruptible(&cl->wait_ctrl_res);
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_hbm_cl_connect_req() - Send connect request
+ * @dev: ISHTP device instance
+ * @cl: client device instance
+ *
+ * Send connection request to specific fw client
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ const size_t len = sizeof(struct hbm_client_connect_request);
+
+ ishtp_hbm_hdr(ishtp_hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, data, len);
+
+ return ishtp_write_message(dev, ishtp_hdr, data);
+}
+
+/**
+ * ishtp_hbm_cl_connect_res() - Get connect response
+ * @dev: ISHTP device instance
+ * @rs: Response message
+ *
+ * Received connect response from fw
+ */
+static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (ishtp_hbm_cl_addr_equal(cl, rs)) {
+ if (!rs->status) {
+ cl->state = ISHTP_CL_CONNECTED;
+ cl->status = 0;
+ } else {
+ cl->state = ISHTP_CL_DISCONNECTED;
+ cl->status = -ENODEV;
+ }
+ break;
+ }
+ }
+ if (cl)
+ wake_up_interruptible(&cl->wait_ctrl_res);
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_client_disconnect_request() - Receive disconnect request
+ * @dev: ISHTP device instance
+ * @disconnect_req: disconnect request structure
+ *
+ * Disconnect request bus message from the fw. Send diconnect response.
+ */
+static void ishtp_hbm_fw_disconnect_req(struct ishtp_device *dev,
+ struct hbm_client_connect_request *disconnect_req)
+{
+ struct ishtp_cl *cl;
+ const size_t len = sizeof(struct hbm_client_connect_response);
+ unsigned long flags;
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[4]; /* All HBM messages are 4 bytes */
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (ishtp_hbm_cl_addr_equal(cl, disconnect_req)) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+
+ /* send disconnect response */
+ ishtp_hbm_hdr(&hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, data,
+ len);
+ ishtp_write_message(dev, &hdr, data);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_hbm_dma_xfer_ack(() - Receive transfer ACK
+ * @dev: ISHTP device instance
+ * @dma_xfer: HBM transfer message
+ *
+ * Receive ack for ISHTP-over-DMA client message
+ */
+static void ishtp_hbm_dma_xfer_ack(struct ishtp_device *dev,
+ struct dma_xfer_hbm *dma_xfer)
+{
+ void *msg;
+ uint64_t offs;
+ struct ishtp_msg_hdr *ishtp_hdr =
+ (struct ishtp_msg_hdr *)&dev->ishtp_msg_hdr;
+ unsigned int msg_offs;
+ struct ishtp_cl *cl;
+
+ for (msg_offs = 0; msg_offs < ishtp_hdr->length;
+ msg_offs += sizeof(struct dma_xfer_hbm)) {
+ offs = dma_xfer->msg_addr - dev->ishtp_host_dma_tx_buf_phys;
+ if (offs > dev->ishtp_host_dma_tx_buf_size) {
+ dev_err(dev->devc, "Bad DMA Tx ack message address\n");
+ return;
+ }
+ if (dma_xfer->msg_length >
+ dev->ishtp_host_dma_tx_buf_size - offs) {
+ dev_err(dev->devc, "Bad DMA Tx ack message size\n");
+ return;
+ }
+
+ /* logical address of the acked mem */
+ msg = (unsigned char *)dev->ishtp_host_dma_tx_buf + offs;
+ ishtp_cl_release_dma_acked_mem(dev, msg, dma_xfer->msg_length);
+
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (cl->fw_client_id == dma_xfer->fw_client_id &&
+ cl->host_client_id == dma_xfer->host_client_id)
+ /*
+ * in case that a single ack may be sent
+ * over several dma transfers, and the last msg
+ * addr was inside the acked memory, but not in
+ * its start
+ */
+ if (cl->last_dma_addr >=
+ (unsigned char *)msg &&
+ cl->last_dma_addr <
+ (unsigned char *)msg +
+ dma_xfer->msg_length) {
+ cl->last_dma_acked = 1;
+
+ if (!list_empty(&cl->tx_list.list) &&
+ cl->ishtp_flow_ctrl_creds) {
+ /*
+ * start sending the first msg
+ */
+ ishtp_cl_send_msg(dev, cl);
+ }
+ }
+ }
+ ++dma_xfer;
+ }
+}
+
+/**
+ * ishtp_hbm_dma_xfer() - Receive DMA transfer message
+ * @dev: ISHTP device instance
+ * @dma_xfer: HBM transfer message
+ *
+ * Receive ISHTP-over-DMA client message
+ */
+static void ishtp_hbm_dma_xfer(struct ishtp_device *dev,
+ struct dma_xfer_hbm *dma_xfer)
+{
+ void *msg;
+ uint64_t offs;
+ struct ishtp_msg_hdr hdr;
+ struct ishtp_msg_hdr *ishtp_hdr =
+ (struct ishtp_msg_hdr *) &dev->ishtp_msg_hdr;
+ struct dma_xfer_hbm *prm = dma_xfer;
+ unsigned int msg_offs;
+
+ for (msg_offs = 0; msg_offs < ishtp_hdr->length;
+ msg_offs += sizeof(struct dma_xfer_hbm)) {
+
+ offs = dma_xfer->msg_addr - dev->ishtp_host_dma_rx_buf_phys;
+ if (offs > dev->ishtp_host_dma_rx_buf_size) {
+ dev_err(dev->devc, "Bad DMA Rx message address\n");
+ return;
+ }
+ if (dma_xfer->msg_length >
+ dev->ishtp_host_dma_rx_buf_size - offs) {
+ dev_err(dev->devc, "Bad DMA Rx message size\n");
+ return;
+ }
+ msg = dev->ishtp_host_dma_rx_buf + offs;
+ recv_ishtp_cl_msg_dma(dev, msg, dma_xfer);
+ dma_xfer->hbm = DMA_XFER_ACK; /* Prepare for response */
+ ++dma_xfer;
+ }
+
+ /* Send DMA_XFER_ACK [...] */
+ ishtp_hbm_hdr(&hdr, ishtp_hdr->length);
+ ishtp_write_message(dev, &hdr, (unsigned char *)prm);
+}
+
+/**
+ * ishtp_hbm_dispatch() - HBM dispatch function
+ * @dev: ISHTP device instance
+ * @hdr: bus message
+ *
+ * Bottom half read routine after ISR to handle the read bus message cmd
+ * processing
+ */
+void ishtp_hbm_dispatch(struct ishtp_device *dev,
+ struct ishtp_bus_message *hdr)
+{
+ struct ishtp_bus_message *ishtp_msg;
+ struct ishtp_fw_client *fw_client;
+ struct hbm_host_version_response *version_res;
+ struct hbm_client_connect_response *connect_res;
+ struct hbm_client_connect_response *disconnect_res;
+ struct hbm_client_connect_request *disconnect_req;
+ struct hbm_props_response *props_res;
+ struct hbm_host_enum_response *enum_res;
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct dma_alloc_notify dma_alloc_notify;
+ struct dma_xfer_hbm *dma_xfer;
+
+ ishtp_msg = hdr;
+
+ switch (ishtp_msg->hbm_cmd) {
+ case HOST_START_RES_CMD:
+ version_res = (struct hbm_host_version_response *)ishtp_msg;
+ if (!version_res->host_version_supported) {
+ dev->version = version_res->fw_max_version;
+
+ dev->hbm_state = ISHTP_HBM_STOPPED;
+ ishtp_hbm_stop_req(dev);
+ return;
+ }
+
+ dev->version.major_version = HBM_MAJOR_VERSION;
+ dev->version.minor_version = HBM_MINOR_VERSION;
+ if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
+ dev->hbm_state == ISHTP_HBM_START) {
+ dev->hbm_state = ISHTP_HBM_STARTED;
+ ishtp_hbm_enum_clients_req(dev);
+ } else {
+ dev_err(dev->devc,
+ "reset: wrong host start response\n");
+ /* BUG: why do we arrive here? */
+ ish_hw_reset(dev);
+ return;
+ }
+
+ wake_up_interruptible(&dev->wait_hbm_recvd_msg);
+ break;
+
+ case CLIENT_CONNECT_RES_CMD:
+ connect_res = (struct hbm_client_connect_response *)ishtp_msg;
+ ishtp_hbm_cl_connect_res(dev, connect_res);
+ break;
+
+ case CLIENT_DISCONNECT_RES_CMD:
+ disconnect_res =
+ (struct hbm_client_connect_response *)ishtp_msg;
+ ishtp_hbm_cl_disconnect_res(dev, disconnect_res);
+ break;
+
+ case HOST_CLIENT_PROPERTIES_RES_CMD:
+ props_res = (struct hbm_props_response *)ishtp_msg;
+ fw_client = &dev->fw_clients[dev->fw_client_presentation_num];
+
+ if (props_res->status || !dev->fw_clients) {
+ dev_err(dev->devc,
+ "reset: properties response hbm wrong status\n");
+ ish_hw_reset(dev);
+ return;
+ }
+
+ if (fw_client->client_id != props_res->address) {
+ dev_err(dev->devc,
+ "reset: host properties response address mismatch [%02X %02X]\n",
+ fw_client->client_id, props_res->address);
+ ish_hw_reset(dev);
+ return;
+ }
+
+ if (dev->dev_state != ISHTP_DEV_INIT_CLIENTS ||
+ dev->hbm_state != ISHTP_HBM_CLIENT_PROPERTIES) {
+ dev_err(dev->devc,
+ "reset: unexpected properties response\n");
+ ish_hw_reset(dev);
+ return;
+ }
+
+ fw_client->props = props_res->client_properties;
+ dev->fw_client_index++;
+ dev->fw_client_presentation_num++;
+
+ /* request property for the next client */
+ ishtp_hbm_prop_req(dev);
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ break;
+
+ if (!ishtp_use_dma_transfer())
+ break;
+
+ dev_dbg(dev->devc, "Requesting to use DMA\n");
+ ishtp_cl_alloc_dma_buf(dev);
+ if (dev->ishtp_host_dma_rx_buf) {
+ const size_t len = sizeof(dma_alloc_notify);
+
+ memset(&dma_alloc_notify, 0, sizeof(dma_alloc_notify));
+ dma_alloc_notify.hbm = DMA_BUFFER_ALLOC_NOTIFY;
+ dma_alloc_notify.buf_size =
+ dev->ishtp_host_dma_rx_buf_size;
+ dma_alloc_notify.buf_address =
+ dev->ishtp_host_dma_rx_buf_phys;
+ ishtp_hbm_hdr(&ishtp_hdr, len);
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&dma_alloc_notify);
+ }
+
+ break;
+
+ case HOST_ENUM_RES_CMD:
+ enum_res = (struct hbm_host_enum_response *) ishtp_msg;
+ memcpy(dev->fw_clients_map, enum_res->valid_addresses, 32);
+ if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
+ dev->hbm_state == ISHTP_HBM_ENUM_CLIENTS) {
+ dev->fw_client_presentation_num = 0;
+ dev->fw_client_index = 0;
+
+ ishtp_hbm_fw_cl_allocate(dev);
+ dev->hbm_state = ISHTP_HBM_CLIENT_PROPERTIES;
+
+ /* first property request */
+ ishtp_hbm_prop_req(dev);
+ } else {
+ dev_err(dev->devc,
+ "reset: unexpected enumeration response hbm\n");
+ ish_hw_reset(dev);
+ return;
+ }
+ break;
+
+ case HOST_STOP_RES_CMD:
+ if (dev->hbm_state != ISHTP_HBM_STOPPED)
+ dev_err(dev->devc, "unexpected stop response\n");
+
+ dev->dev_state = ISHTP_DEV_DISABLED;
+ dev_info(dev->devc, "reset: FW stop response\n");
+ ish_hw_reset(dev);
+ break;
+
+ case CLIENT_DISCONNECT_REQ_CMD:
+ /* search for client */
+ disconnect_req =
+ (struct hbm_client_connect_request *)ishtp_msg;
+ ishtp_hbm_fw_disconnect_req(dev, disconnect_req);
+ break;
+
+ case FW_STOP_REQ_CMD:
+ dev->hbm_state = ISHTP_HBM_STOPPED;
+ break;
+
+ case DMA_BUFFER_ALLOC_RESPONSE:
+ dev->ishtp_host_dma_enabled = 1;
+ break;
+
+ case DMA_XFER:
+ dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
+ if (!dev->ishtp_host_dma_enabled) {
+ dev_err(dev->devc,
+ "DMA XFER requested but DMA is not enabled\n");
+ break;
+ }
+ ishtp_hbm_dma_xfer(dev, dma_xfer);
+ break;
+
+ case DMA_XFER_ACK:
+ dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
+ if (!dev->ishtp_host_dma_enabled ||
+ !dev->ishtp_host_dma_tx_buf) {
+ dev_err(dev->devc,
+ "DMA XFER acked but DMA Tx is not enabled\n");
+ break;
+ }
+ ishtp_hbm_dma_xfer_ack(dev, dma_xfer);
+ break;
+
+ default:
+ dev_err(dev->devc, "unknown HBM: %u\n",
+ (unsigned int)ishtp_msg->hbm_cmd);
+
+ break;
+ }
+}
+
+/**
+ * bh_hbm_work_fn() - HBM work function
+ * @work: work struct
+ *
+ * Bottom half processing work function (instead of thread handler)
+ * for processing hbm messages
+ */
+void bh_hbm_work_fn(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ishtp_device *dev;
+ unsigned char hbm[IPC_PAYLOAD_SIZE];
+
+ dev = container_of(work, struct ishtp_device, bh_hbm_work);
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ if (dev->rd_msg_fifo_head != dev->rd_msg_fifo_tail) {
+ memcpy(hbm, dev->rd_msg_fifo + dev->rd_msg_fifo_head,
+ IPC_PAYLOAD_SIZE);
+ dev->rd_msg_fifo_head =
+ (dev->rd_msg_fifo_head + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ ishtp_hbm_dispatch(dev, (struct ishtp_bus_message *)hbm);
+ } else {
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ }
+}
+
+/**
+ * recv_hbm() - Receive HBM message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: received bus message
+ *
+ * Receive and process ISHTP bus messages in ISR context. This will schedule
+ * work function to process message
+ */
+void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr)
+{
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+ struct ishtp_bus_message *ishtp_msg =
+ (struct ishtp_bus_message *)rd_msg_buf;
+ unsigned long flags;
+
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+
+ /* Flow control - handle in place */
+ if (ishtp_msg->hbm_cmd == ISHTP_FLOW_CONTROL_CMD) {
+ struct hbm_flow_control *flow_control =
+ (struct hbm_flow_control *)ishtp_msg;
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags, tx_flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (cl->host_client_id == flow_control->host_addr &&
+ cl->fw_client_id ==
+ flow_control->fw_addr) {
+ /*
+ * NOTE: It's valid only for counting
+ * flow-control implementation to receive a
+ * FC in the middle of sending. Meanwhile not
+ * supported
+ */
+ if (cl->ishtp_flow_ctrl_creds)
+ dev_err(dev->devc,
+ "recv extra FC from FW client %u (host client %u) (FC count was %d)\n",
+ (unsigned int)cl->fw_client_id,
+ (unsigned int)cl->host_client_id,
+ cl->ishtp_flow_ctrl_creds);
+ else {
+ ++cl->ishtp_flow_ctrl_creds;
+ ++cl->ishtp_flow_ctrl_cnt;
+ cl->last_ipc_acked = 1;
+ spin_lock_irqsave(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ if (!list_empty(&cl->tx_list.list)) {
+ /*
+ * start sending the first msg
+ * = the callback function
+ */
+ spin_unlock_irqrestore(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ ishtp_cl_send_msg(dev, cl);
+ } else {
+ spin_unlock_irqrestore(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ }
+ }
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+ goto eoi;
+ }
+
+ /*
+ * Some messages that are safe for ISR processing and important
+ * to be done "quickly" and in-order, go here
+ */
+ if (ishtp_msg->hbm_cmd == CLIENT_CONNECT_RES_CMD ||
+ ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_RES_CMD ||
+ ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_REQ_CMD ||
+ ishtp_msg->hbm_cmd == DMA_XFER) {
+ ishtp_hbm_dispatch(dev, ishtp_msg);
+ goto eoi;
+ }
+
+ /*
+ * All other HBMs go here.
+ * We schedule HBMs for processing serially by using system wq,
+ * possibly there will be multiple HBMs scheduled at the same time.
+ */
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ if ((dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE) ==
+ dev->rd_msg_fifo_head) {
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ dev_err(dev->devc, "BH buffer overflow, dropping HBM %u\n",
+ (unsigned int)ishtp_msg->hbm_cmd);
+ goto eoi;
+ }
+ memcpy(dev->rd_msg_fifo + dev->rd_msg_fifo_tail, ishtp_msg,
+ ishtp_hdr->length);
+ dev->rd_msg_fifo_tail = (dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ schedule_work(&dev->bh_hbm_work);
+eoi:
+ return;
+}
+
+/**
+ * recv_fixed_cl_msg() - Receive fixed client message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: received bus message
+ *
+ * Receive and process ISHTP fixed client messages (address == 0)
+ * in ISR context
+ */
+void recv_fixed_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr)
+{
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+
+ dev->print_log(dev,
+ "%s() got fixed client msg from client #%d\n",
+ __func__, ishtp_hdr->fw_addr);
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+ if (ishtp_hdr->fw_addr == ISHTP_SYSTEM_STATE_CLIENT_ADDR) {
+ struct ish_system_states_header *msg_hdr =
+ (struct ish_system_states_header *)rd_msg_buf;
+ if (msg_hdr->cmd == SYSTEM_STATE_SUBSCRIBE)
+ ishtp_send_resume(dev);
+ /* if FW request arrived here, the system is not suspended */
+ else
+ dev_err(dev->devc, "unknown fixed client msg [%02X]\n",
+ msg_hdr->cmd);
+ }
+}
+
+/**
+ * fix_cl_hdr() - Initialize fixed client header
+ * @hdr: message header
+ * @length: length of message
+ * @cl_addr: Client address
+ *
+ * Initialize message header for fixed client
+ */
+static inline void fix_cl_hdr(struct ishtp_msg_hdr *hdr, size_t length,
+ uint8_t cl_addr)
+{
+ hdr->host_addr = 0;
+ hdr->fw_addr = cl_addr;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+}
+
+/*** Suspend and resume notification ***/
+
+static uint32_t current_state;
+static uint32_t supported_states = 0 | SUSPEND_STATE_BIT;
+
+/**
+ * ishtp_send_suspend() - Send suspend message to FW
+ * @dev: ISHTP device instance
+ *
+ * Send suspend message to FW. This is useful for system freeze (non S3) case
+ */
+void ishtp_send_suspend(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_status state_status_msg;
+ const size_t len = sizeof(struct ish_system_states_status);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&state_status_msg, 0, len);
+ state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
+ state_status_msg.supported_states = supported_states;
+ current_state |= SUSPEND_STATE_BIT;
+ dev->print_log(dev, "%s() sends SUSPEND notification\n", __func__);
+ state_status_msg.states_status = current_state;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&state_status_msg);
+}
+EXPORT_SYMBOL(ishtp_send_suspend);
+
+/**
+ * ishtp_send_resume() - Send resume message to FW
+ * @dev: ISHTP device instance
+ *
+ * Send resume message to FW. This is useful for system freeze (non S3) case
+ */
+void ishtp_send_resume(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_status state_status_msg;
+ const size_t len = sizeof(struct ish_system_states_status);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&state_status_msg, 0, len);
+ state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
+ state_status_msg.supported_states = supported_states;
+ current_state &= ~SUSPEND_STATE_BIT;
+ dev->print_log(dev, "%s() sends RESUME notification\n", __func__);
+ state_status_msg.states_status = current_state;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&state_status_msg);
+}
+EXPORT_SYMBOL(ishtp_send_resume);
+
+/**
+ * ishtp_query_subscribers() - Send query subscribers message
+ * @dev: ISHTP device instance
+ *
+ * Send message to query subscribers
+ */
+void ishtp_query_subscribers(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_query_subscribers query_subscribers_msg;
+ const size_t len = sizeof(struct ish_system_states_query_subscribers);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&query_subscribers_msg, 0, len);
+ query_subscribers_msg.hdr.cmd = SYSTEM_STATE_QUERY_SUBSCRIBERS;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&query_subscribers_msg);
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.h b/drivers/hid/intel-ish-hid/ishtp/hbm.h
new file mode 100644
index 000000000000..d96111cef7f8
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.h
@@ -0,0 +1,321 @@
+/*
+ * ISHTP bus layer messages handling
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_HBM_H_
+#define _ISHTP_HBM_H_
+
+#include <linux/uuid.h>
+
+struct ishtp_device;
+struct ishtp_msg_hdr;
+struct ishtp_cl;
+
+/*
+ * Timeouts in Seconds
+ */
+#define ISHTP_INTEROP_TIMEOUT 7 /* Timeout on ready message */
+
+#define ISHTP_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
+
+/*
+ * ISHTP Version
+ */
+#define HBM_MINOR_VERSION 0
+#define HBM_MAJOR_VERSION 1
+
+/* Host bus message command opcode */
+#define ISHTP_HBM_CMD_OP_MSK 0x7f
+/* Host bus message command RESPONSE */
+#define ISHTP_HBM_CMD_RES_MSK 0x80
+
+/*
+ * ISHTP Bus Message Command IDs
+ */
+#define HOST_START_REQ_CMD 0x01
+#define HOST_START_RES_CMD 0x81
+
+#define HOST_STOP_REQ_CMD 0x02
+#define HOST_STOP_RES_CMD 0x82
+
+#define FW_STOP_REQ_CMD 0x03
+
+#define HOST_ENUM_REQ_CMD 0x04
+#define HOST_ENUM_RES_CMD 0x84
+
+#define HOST_CLIENT_PROPERTIES_REQ_CMD 0x05
+#define HOST_CLIENT_PROPERTIES_RES_CMD 0x85
+
+#define CLIENT_CONNECT_REQ_CMD 0x06
+#define CLIENT_CONNECT_RES_CMD 0x86
+
+#define CLIENT_DISCONNECT_REQ_CMD 0x07
+#define CLIENT_DISCONNECT_RES_CMD 0x87
+
+#define ISHTP_FLOW_CONTROL_CMD 0x08
+
+#define DMA_BUFFER_ALLOC_NOTIFY 0x11
+#define DMA_BUFFER_ALLOC_RESPONSE 0x91
+
+#define DMA_XFER 0x12
+#define DMA_XFER_ACK 0x92
+
+/*
+ * ISHTP Stop Reason
+ * used by hbm_host_stop_request.reason
+ */
+#define DRIVER_STOP_REQUEST 0x00
+
+/*
+ * ISHTP BUS Interface Section
+ */
+struct ishtp_msg_hdr {
+ uint32_t fw_addr:8;
+ uint32_t host_addr:8;
+ uint32_t length:9;
+ uint32_t reserved:6;
+ uint32_t msg_complete:1;
+} __packed;
+
+struct ishtp_bus_message {
+ uint8_t hbm_cmd;
+ uint8_t data[0];
+} __packed;
+
+/**
+ * struct hbm_cl_cmd - client specific host bus command
+ * CONNECT, DISCONNECT, and FlOW CONTROL
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @data
+ */
+struct ishtp_hbm_cl_cmd {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t data;
+};
+
+struct hbm_version {
+ uint8_t minor_version;
+ uint8_t major_version;
+} __packed;
+
+struct hbm_host_version_request {
+ uint8_t hbm_cmd;
+ uint8_t reserved;
+ struct hbm_version host_version;
+} __packed;
+
+struct hbm_host_version_response {
+ uint8_t hbm_cmd;
+ uint8_t host_version_supported;
+ struct hbm_version fw_max_version;
+} __packed;
+
+struct hbm_host_stop_request {
+ uint8_t hbm_cmd;
+ uint8_t reason;
+ uint8_t reserved[2];
+} __packed;
+
+struct hbm_host_stop_response {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+} __packed;
+
+struct hbm_host_enum_request {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+} __packed;
+
+struct hbm_host_enum_response {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+ uint8_t valid_addresses[32];
+} __packed;
+
+struct ishtp_client_properties {
+ uuid_le protocol_name;
+ uint8_t protocol_version;
+ uint8_t max_number_of_connections;
+ uint8_t fixed_address;
+ uint8_t single_recv_buf;
+ uint32_t max_msg_length;
+ uint8_t dma_hdr_len;
+#define ISHTP_CLIENT_DMA_ENABLED 0x80
+ uint8_t reserved4;
+ uint8_t reserved5;
+ uint8_t reserved6;
+} __packed;
+
+struct hbm_props_request {
+ uint8_t hbm_cmd;
+ uint8_t address;
+ uint8_t reserved[2];
+} __packed;
+
+struct hbm_props_response {
+ uint8_t hbm_cmd;
+ uint8_t address;
+ uint8_t status;
+ uint8_t reserved[1];
+ struct ishtp_client_properties client_properties;
+} __packed;
+
+/**
+ * struct hbm_client_connect_request - connect/disconnect request
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @reserved
+ */
+struct hbm_client_connect_request {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t reserved;
+} __packed;
+
+/**
+ * struct hbm_client_connect_response - connect/disconnect response
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @status - status of the request
+ */
+struct hbm_client_connect_response {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t status;
+} __packed;
+
+
+#define ISHTP_FC_MESSAGE_RESERVED_LENGTH 5
+
+struct hbm_flow_control {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t reserved[ISHTP_FC_MESSAGE_RESERVED_LENGTH];
+} __packed;
+
+struct dma_alloc_notify {
+ uint8_t hbm;
+ uint8_t status;
+ uint8_t reserved[2];
+ uint32_t buf_size;
+ uint64_t buf_address;
+ /* [...] May come more size/address pairs */
+} __packed;
+
+struct dma_xfer_hbm {
+ uint8_t hbm;
+ uint8_t fw_client_id;
+ uint8_t host_client_id;
+ uint8_t reserved;
+ uint64_t msg_addr;
+ uint32_t msg_length;
+ uint32_t reserved2;
+} __packed;
+
+/* System state */
+#define ISHTP_SYSTEM_STATE_CLIENT_ADDR 13
+
+#define SYSTEM_STATE_SUBSCRIBE 0x1
+#define SYSTEM_STATE_STATUS 0x2
+#define SYSTEM_STATE_QUERY_SUBSCRIBERS 0x3
+#define SYSTEM_STATE_STATE_CHANGE_REQ 0x4
+/*indicates suspend and resume states*/
+#define SUSPEND_STATE_BIT (1<<1)
+
+struct ish_system_states_header {
+ uint32_t cmd;
+ uint32_t cmd_status; /*responses will have this set*/
+} __packed;
+
+struct ish_system_states_subscribe {
+ struct ish_system_states_header hdr;
+ uint32_t states;
+} __packed;
+
+struct ish_system_states_status {
+ struct ish_system_states_header hdr;
+ uint32_t supported_states;
+ uint32_t states_status;
+} __packed;
+
+struct ish_system_states_query_subscribers {
+ struct ish_system_states_header hdr;
+} __packed;
+
+struct ish_system_states_state_change_req {
+ struct ish_system_states_header hdr;
+ uint32_t requested_states;
+ uint32_t states_status;
+} __packed;
+
+/**
+ * enum ishtp_hbm_state - host bus message protocol state
+ *
+ * @ISHTP_HBM_IDLE : protocol not started
+ * @ISHTP_HBM_START : start request message was sent
+ * @ISHTP_HBM_ENUM_CLIENTS : enumeration request was sent
+ * @ISHTP_HBM_CLIENT_PROPERTIES : acquiring clients properties
+ */
+enum ishtp_hbm_state {
+ ISHTP_HBM_IDLE = 0,
+ ISHTP_HBM_START,
+ ISHTP_HBM_STARTED,
+ ISHTP_HBM_ENUM_CLIENTS,
+ ISHTP_HBM_CLIENT_PROPERTIES,
+ ISHTP_HBM_WORKING,
+ ISHTP_HBM_STOPPED,
+};
+
+static inline void ishtp_hbm_hdr(struct ishtp_msg_hdr *hdr, size_t length)
+{
+ hdr->host_addr = 0;
+ hdr->fw_addr = 0;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+}
+
+int ishtp_hbm_start_req(struct ishtp_device *dev);
+int ishtp_hbm_start_wait(struct ishtp_device *dev);
+int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
+ struct ishtp_cl *cl);
+int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
+int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
+void ishtp_hbm_enum_clients_req(struct ishtp_device *dev);
+void bh_hbm_work_fn(struct work_struct *work);
+void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr);
+void recv_fixed_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr);
+void ishtp_hbm_dispatch(struct ishtp_device *dev,
+ struct ishtp_bus_message *hdr);
+
+void ishtp_query_subscribers(struct ishtp_device *dev);
+
+/* Exported I/F */
+void ishtp_send_suspend(struct ishtp_device *dev);
+void ishtp_send_resume(struct ishtp_device *dev);
+
+#endif /* _ISHTP_HBM_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/init.c b/drivers/hid/intel-ish-hid/ishtp/init.c
new file mode 100644
index 000000000000..ac364418e17c
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/init.c
@@ -0,0 +1,115 @@
+/*
+ * Initialization protocol for ISHTP driver
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/miscdevice.h>
+#include "ishtp-dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * ishtp_dev_state_str() -Convert to string format
+ * @state: state to convert
+ *
+ * Convert state to string for prints
+ *
+ * Return: character pointer to converted string
+ */
+const char *ishtp_dev_state_str(int state)
+{
+ switch (state) {
+ case ISHTP_DEV_INITIALIZING:
+ return "INITIALIZING";
+ case ISHTP_DEV_INIT_CLIENTS:
+ return "INIT_CLIENTS";
+ case ISHTP_DEV_ENABLED:
+ return "ENABLED";
+ case ISHTP_DEV_RESETTING:
+ return "RESETTING";
+ case ISHTP_DEV_DISABLED:
+ return "DISABLED";
+ case ISHTP_DEV_POWER_DOWN:
+ return "POWER_DOWN";
+ case ISHTP_DEV_POWER_UP:
+ return "POWER_UP";
+ default:
+ return "unknown";
+ }
+}
+
+/**
+ * ishtp_device_init() - ishtp device init
+ * @dev: ISHTP device instance
+ *
+ * After ISHTP device is alloacted, this function is used to initialize
+ * each field which includes spin lock, work struct and lists
+ */
+void ishtp_device_init(struct ishtp_device *dev)
+{
+ dev->dev_state = ISHTP_DEV_INITIALIZING;
+ INIT_LIST_HEAD(&dev->cl_list);
+ INIT_LIST_HEAD(&dev->device_list);
+ dev->rd_msg_fifo_head = 0;
+ dev->rd_msg_fifo_tail = 0;
+ spin_lock_init(&dev->rd_msg_spinlock);
+
+ init_waitqueue_head(&dev->wait_hbm_recvd_msg);
+ spin_lock_init(&dev->read_list_spinlock);
+ spin_lock_init(&dev->device_lock);
+ spin_lock_init(&dev->device_list_lock);
+ spin_lock_init(&dev->cl_list_lock);
+ spin_lock_init(&dev->fw_clients_lock);
+ INIT_WORK(&dev->bh_hbm_work, bh_hbm_work_fn);
+
+ bitmap_zero(dev->host_clients_map, ISHTP_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving client ID 0 for ISHTP Bus Message communications
+ */
+ bitmap_set(dev->host_clients_map, 0, 1);
+
+ INIT_LIST_HEAD(&dev->read_list.list);
+
+}
+EXPORT_SYMBOL(ishtp_device_init);
+
+/**
+ * ishtp_start() - Start ISH processing
+ * @dev: ISHTP device instance
+ *
+ * Start ISHTP processing by sending query subscriber message
+ *
+ * Return: 0 on success else -ENODEV
+ */
+int ishtp_start(struct ishtp_device *dev)
+{
+ if (ishtp_hbm_start_wait(dev)) {
+ dev_err(dev->devc, "HBM haven't started");
+ goto err;
+ }
+
+ /* suspend & resume notification - send QUERY_SUBSCRIBERS msg */
+ ishtp_query_subscribers(dev);
+
+ return 0;
+err:
+ dev_err(dev->devc, "link layer initialization failed.\n");
+ dev->dev_state = ISHTP_DEV_DISABLED;
+ return -ENODEV;
+}
+EXPORT_SYMBOL(ishtp_start);
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
new file mode 100644
index 000000000000..a94f9a8a96a0
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -0,0 +1,277 @@
+/*
+ * Most ISHTP provider device and ISHTP logic declarations
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_DEV_H_
+#define _ISHTP_DEV_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include "bus.h"
+#include "hbm.h"
+
+#define IPC_PAYLOAD_SIZE 128
+#define ISHTP_RD_MSG_BUF_SIZE IPC_PAYLOAD_SIZE
+#define IPC_FULL_MSG_SIZE 132
+
+/* Number of messages to be held in ISR->BH FIFO */
+#define RD_INT_FIFO_SIZE 64
+
+/*
+ * Number of IPC messages to be held in Tx FIFO, to be sent by ISR -
+ * Tx complete interrupt or RX_COMPLETE handler
+ */
+#define IPC_TX_FIFO_SIZE 512
+
+/*
+ * Number of Maximum ISHTP Clients
+ */
+#define ISHTP_CLIENTS_MAX 256
+
+/*
+ * Number of File descriptors/handles
+ * that can be opened to the driver.
+ *
+ * Limit to 255: 256 Total Clients
+ * minus internal client for ISHTP Bus Messages
+ */
+#define ISHTP_MAX_OPEN_HANDLE_COUNT (ISHTP_CLIENTS_MAX - 1)
+
+/* Internal Clients Number */
+#define ISHTP_HOST_CLIENT_ID_ANY (-1)
+#define ISHTP_HBM_HOST_CLIENT_ID 0
+
+#define MAX_DMA_DELAY 20
+
+/* ISHTP device states */
+enum ishtp_dev_state {
+ ISHTP_DEV_INITIALIZING = 0,
+ ISHTP_DEV_INIT_CLIENTS,
+ ISHTP_DEV_ENABLED,
+ ISHTP_DEV_RESETTING,
+ ISHTP_DEV_DISABLED,
+ ISHTP_DEV_POWER_DOWN,
+ ISHTP_DEV_POWER_UP
+};
+const char *ishtp_dev_state_str(int state);
+
+struct ishtp_cl;
+
+/**
+ * struct ishtp_fw_client - representation of fw client
+ *
+ * @props - client properties
+ * @client_id - fw client id
+ */
+struct ishtp_fw_client {
+ struct ishtp_client_properties props;
+ uint8_t client_id;
+};
+
+/**
+ * struct ishtp_msg_data - ISHTP message data struct
+ * @size: Size of data in the *data
+ * @data: Pointer to data
+ */
+struct ishtp_msg_data {
+ uint32_t size;
+ unsigned char *data;
+};
+
+/*
+ * struct ishtp_cl_rb - request block structure
+ * @list: Link to list members
+ * @cl: ISHTP client instance
+ * @buffer: message header
+ * @buf_idx: Index into buffer
+ * @read_time: unused at this time
+ */
+struct ishtp_cl_rb {
+ struct list_head list;
+ struct ishtp_cl *cl;
+ struct ishtp_msg_data buffer;
+ unsigned long buf_idx;
+ unsigned long read_time;
+};
+
+/*
+ * Control info for IPC messages ISHTP/IPC sending FIFO -
+ * list with inline data buffer
+ * This structure will be filled with parameters submitted
+ * by the caller glue layer
+ * 'buf' may be pointing to the external buffer or to 'inline_data'
+ * 'offset' will be initialized to 0 by submitting
+ *
+ * 'ipc_send_compl' is intended for use by clients that send fragmented
+ * messages. When a fragment is sent down to IPC msg regs,
+ * it will be called.
+ * If it has more fragments to send, it will do it. With last fragment
+ * it will send appropriate ISHTP "message-complete" flag.
+ * It will remove the outstanding message
+ * (mark outstanding buffer as available).
+ * If counting flow control is in work and there are more flow control
+ * credits, it can put the next client message queued in cl.
+ * structure for IPC processing.
+ *
+ */
+struct wr_msg_ctl_info {
+ /* Will be called with 'ipc_send_compl_prm' as parameter */
+ void (*ipc_send_compl)(void *);
+
+ void *ipc_send_compl_prm;
+ size_t length;
+ struct list_head link;
+ unsigned char inline_data[IPC_FULL_MSG_SIZE];
+};
+
+/*
+ * The ISHTP layer talks to hardware IPC message using the following
+ * callbacks
+ */
+struct ishtp_hw_ops {
+ int (*hw_reset)(struct ishtp_device *dev);
+ int (*ipc_reset)(struct ishtp_device *dev);
+ uint32_t (*ipc_get_header)(struct ishtp_device *dev, int length,
+ int busy);
+ int (*write)(struct ishtp_device *dev,
+ void (*ipc_send_compl)(void *), void *ipc_send_compl_prm,
+ unsigned char *msg, int length);
+ uint32_t (*ishtp_read_hdr)(const struct ishtp_device *dev);
+ int (*ishtp_read)(struct ishtp_device *dev, unsigned char *buffer,
+ unsigned long buffer_length);
+ uint32_t (*get_fw_status)(struct ishtp_device *dev);
+ void (*sync_fw_clock)(struct ishtp_device *dev);
+};
+
+/**
+ * struct ishtp_device - ISHTP private device struct
+ */
+struct ishtp_device {
+ struct device *devc; /* pointer to lowest device */
+ struct pci_dev *pdev; /* PCI device to get device ids */
+
+ /* waitq for waiting for suspend response */
+ wait_queue_head_t suspend_wait;
+ bool suspend_flag; /* Suspend is active */
+
+ /* waitq for waiting for resume response */
+ wait_queue_head_t resume_wait;
+ bool resume_flag; /*Resume is active */
+
+ /*
+ * lock for the device, for everything that doesn't have
+ * a dedicated spinlock
+ */
+ spinlock_t device_lock;
+
+ bool recvd_hw_ready;
+ struct hbm_version version;
+ int transfer_path; /* Choice of transfer path: IPC or DMA */
+
+ /* ishtp device states */
+ enum ishtp_dev_state dev_state;
+ enum ishtp_hbm_state hbm_state;
+
+ /* driver read queue */
+ struct ishtp_cl_rb read_list;
+ spinlock_t read_list_spinlock;
+
+ /* list of ishtp_cl's */
+ struct list_head cl_list;
+ spinlock_t cl_list_lock;
+ long open_handle_count;
+
+ /* List of bus devices */
+ struct list_head device_list;
+ spinlock_t device_list_lock;
+
+ /* waiting queues for receive message from FW */
+ wait_queue_head_t wait_hw_ready;
+ wait_queue_head_t wait_hbm_recvd_msg;
+
+ /* FIFO for input messages for BH processing */
+ unsigned char rd_msg_fifo[RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE];
+ unsigned int rd_msg_fifo_head, rd_msg_fifo_tail;
+ spinlock_t rd_msg_spinlock;
+ struct work_struct bh_hbm_work;
+
+ /* IPC write queue */
+ struct wr_msg_ctl_info wr_processing_list_head, wr_free_list_head;
+ /* For both processing list and free list */
+ spinlock_t wr_processing_spinlock;
+
+ spinlock_t out_ipc_spinlock;
+
+ struct ishtp_fw_client *fw_clients; /*Note:memory has to be allocated*/
+ DECLARE_BITMAP(fw_clients_map, ISHTP_CLIENTS_MAX);
+ DECLARE_BITMAP(host_clients_map, ISHTP_CLIENTS_MAX);
+ uint8_t fw_clients_num;
+ uint8_t fw_client_presentation_num;
+ uint8_t fw_client_index;
+ spinlock_t fw_clients_lock;
+
+ /* TX DMA buffers and slots */
+ int ishtp_host_dma_enabled;
+ void *ishtp_host_dma_tx_buf;
+ unsigned int ishtp_host_dma_tx_buf_size;
+ uint64_t ishtp_host_dma_tx_buf_phys;
+ int ishtp_dma_num_slots;
+
+ /* map of 4k blocks in Tx dma buf: 0-free, 1-used */
+ uint8_t *ishtp_dma_tx_map;
+ spinlock_t ishtp_dma_tx_lock;
+
+ /* RX DMA buffers and slots */
+ void *ishtp_host_dma_rx_buf;
+ unsigned int ishtp_host_dma_rx_buf_size;
+ uint64_t ishtp_host_dma_rx_buf_phys;
+
+ /* Dump to trace buffers if enabled*/
+ void (*print_log)(struct ishtp_device *dev, char *format, ...);
+
+ /* Debug stats */
+ unsigned int ipc_rx_cnt;
+ unsigned long long ipc_rx_bytes_cnt;
+ unsigned int ipc_tx_cnt;
+ unsigned long long ipc_tx_bytes_cnt;
+
+ const struct ishtp_hw_ops *ops;
+ size_t mtu;
+ uint32_t ishtp_msg_hdr;
+ char hw[0] __aligned(sizeof(void *));
+};
+
+static inline unsigned long ishtp_secs_to_jiffies(unsigned long sec)
+{
+ return msecs_to_jiffies(sec * MSEC_PER_SEC);
+}
+
+/*
+ * Register Access Function
+ */
+static inline int ish_ipc_reset(struct ishtp_device *dev)
+{
+ return dev->ops->ipc_reset(dev);
+}
+
+static inline int ish_hw_reset(struct ishtp_device *dev)
+{
+ return dev->ops->hw_reset(dev);
+}
+
+/* Exported function */
+void ishtp_device_init(struct ishtp_device *dev);
+int ishtp_start(struct ishtp_device *dev);
+
+#endif /*_ISHTP_DEV_H_*/
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 99ec3ff7563b..7f8ff39ed44b 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -779,19 +779,8 @@ static struct miscdevice uhid_misc = {
.minor = UHID_MINOR,
.name = UHID_NAME,
};
+module_misc_device(uhid_misc);
-static int __init uhid_init(void)
-{
- return misc_register(&uhid_misc);
-}
-
-static void __exit uhid_exit(void)
-{
- misc_deregister(&uhid_misc);
-}
-
-module_init(uhid_init);
-module_exit(uhid_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index b4b8c6abb03e..0a0eca5da47d 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -76,6 +76,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
@@ -98,6 +99,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
@@ -143,7 +145,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 4681a65a4579..b4800ea891cb 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -90,6 +90,8 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/hid.h>
+#include <linux/kfifo.h>
+#include <linux/leds.h>
#include <linux/usb/input.h>
#include <linux/power_supply.h>
#include <asm/unaligned.h>
@@ -105,32 +107,95 @@
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_VENDOR_ID_LENOVO 0x17ef
+enum wacom_worker {
+ WACOM_WORKER_WIRELESS,
+ WACOM_WORKER_BATTERY,
+ WACOM_WORKER_REMOTE,
+};
+
+struct wacom;
+
+struct wacom_led {
+ struct led_classdev cdev;
+ struct led_trigger trigger;
+ struct wacom *wacom;
+ unsigned int group;
+ unsigned int id;
+ u8 llv;
+ u8 hlv;
+ bool held;
+};
+
+struct wacom_group_leds {
+ u8 select; /* status led selector (0..3) */
+ struct wacom_led *leds;
+ unsigned int count;
+ struct device *dev;
+};
+
+struct wacom_battery {
+ struct wacom *wacom;
+ struct power_supply_desc bat_desc;
+ struct power_supply *battery;
+ char bat_name[WACOM_NAME_MAX];
+ int battery_capacity;
+ int bat_charging;
+ int bat_connected;
+ int ps_connected;
+};
+
+struct wacom_remote {
+ spinlock_t remote_lock;
+ struct kfifo remote_fifo;
+ struct kobject *remote_dir;
+ struct {
+ struct attribute_group group;
+ u32 serial;
+ struct input_dev *input;
+ bool registered;
+ struct wacom_battery battery;
+ } remotes[WACOM_MAX_REMOTES];
+};
+
struct wacom {
struct usb_device *usbdev;
struct usb_interface *intf;
struct wacom_wac wacom_wac;
struct hid_device *hdev;
struct mutex lock;
- struct work_struct work;
- struct wacom_led {
- u8 select[5]; /* status led selector (0..3) */
+ struct work_struct wireless_work;
+ struct work_struct battery_work;
+ struct work_struct remote_work;
+ struct wacom_remote *remote;
+ struct wacom_leds {
+ struct wacom_group_leds *groups;
+ unsigned int count;
u8 llv; /* status led brightness no button (1..127) */
u8 hlv; /* status led brightness button pressed (1..127) */
u8 img_lum; /* OLED matrix display brightness */
+ u8 max_llv; /* maximum brightness of LED (llv) */
+ u8 max_hlv; /* maximum brightness of LED (hlv) */
} led;
- bool led_initialized;
- struct power_supply *battery;
- struct power_supply *ac;
- struct power_supply_desc battery_desc;
- struct power_supply_desc ac_desc;
- struct kobject *remote_dir;
- struct attribute_group remote_group[5];
+ struct wacom_battery battery;
+ bool resources;
};
-static inline void wacom_schedule_work(struct wacom_wac *wacom_wac)
+static inline void wacom_schedule_work(struct wacom_wac *wacom_wac,
+ enum wacom_worker which)
{
struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
- schedule_work(&wacom->work);
+
+ switch (which) {
+ case WACOM_WORKER_WIRELESS:
+ schedule_work(&wacom->wireless_work);
+ break;
+ case WACOM_WORKER_BATTERY:
+ schedule_work(&wacom->battery_work);
+ break;
+ case WACOM_WORKER_REMOTE:
+ schedule_work(&wacom->remote_work);
+ break;
+ }
}
extern const struct hid_device_id wacom_ids[];
@@ -149,7 +214,8 @@ int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value);
void wacom_wac_report(struct hid_device *hdev, struct hid_report *report);
void wacom_battery_work(struct work_struct *work);
-int wacom_remote_create_attr_group(struct wacom *wacom, __u32 serial,
- int index);
-void wacom_remote_destroy_attr_group(struct wacom *wacom, __u32 serial);
+enum led_brightness wacom_leds_brightness_get(struct wacom_led *led);
+struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group,
+ unsigned int id);
+struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur);
#endif
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 499cc8213cfe..5e7a5648e708 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -25,7 +25,6 @@
#define WAC_CMD_RETRIES 10
#define WAC_CMD_DELETE_PAIRING 0x20
#define WAC_CMD_UNPAIR_ALL 0xFF
-#define WAC_REMOTE_SERIAL_MAX_STRLEN 9
#define DEV_ATTR_RW_PERM (S_IRUGO | S_IWUSR | S_IWGRP)
#define DEV_ATTR_WO_PERM (S_IWUSR | S_IWGRP)
@@ -91,7 +90,12 @@ static void wacom_close(struct input_dev *dev)
{
struct wacom *wacom = input_get_drvdata(dev);
- hid_hw_close(wacom->hdev);
+ /*
+ * wacom->hdev should never be null, but surprisingly, I had the case
+ * once while unplugging the Wacom Wireless Receiver.
+ */
+ if (wacom->hdev)
+ hid_hw_close(wacom->hdev);
}
/*
@@ -523,36 +527,95 @@ struct wacom_hdev_data {
static LIST_HEAD(wacom_udev_list);
static DEFINE_MUTEX(wacom_udev_list_lock);
+static bool compare_device_paths(struct hid_device *hdev_a,
+ struct hid_device *hdev_b, char separator)
+{
+ int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
+ int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
+
+ if (n1 != n2 || n1 <= 0 || n2 <= 0)
+ return false;
+
+ return !strncmp(hdev_a->phys, hdev_b->phys, n1);
+}
+
static bool wacom_are_sibling(struct hid_device *hdev,
struct hid_device *sibling)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_features *features = &wacom->wacom_wac.features;
- int vid = features->oVid;
- int pid = features->oPid;
- int n1,n2;
+ struct wacom *sibling_wacom = hid_get_drvdata(sibling);
+ struct wacom_features *sibling_features = &sibling_wacom->wacom_wac.features;
+ __u32 oVid = features->oVid ? features->oVid : hdev->vendor;
+ __u32 oPid = features->oPid ? features->oPid : hdev->product;
+
+ /* The defined oVid/oPid must match that of the sibling */
+ if (features->oVid != HID_ANY_ID && sibling->vendor != oVid)
+ return false;
+ if (features->oPid != HID_ANY_ID && sibling->product != oPid)
+ return false;
- if (vid == 0 && pid == 0) {
- vid = hdev->vendor;
- pid = hdev->product;
+ /*
+ * Devices with the same VID/PID must share the same physical
+ * device path, while those with different VID/PID must share
+ * the same physical parent device path.
+ */
+ if (hdev->vendor == sibling->vendor && hdev->product == sibling->product) {
+ if (!compare_device_paths(hdev, sibling, '/'))
+ return false;
+ } else {
+ if (!compare_device_paths(hdev, sibling, '.'))
+ return false;
}
- if (vid != sibling->vendor || pid != sibling->product)
+ /* Skip the remaining heuristics unless you are a HID_GENERIC device */
+ if (features->type != HID_GENERIC)
+ return true;
+
+ /*
+ * Direct-input devices may not be siblings of indirect-input
+ * devices.
+ */
+ if ((features->device_type & WACOM_DEVICETYPE_DIRECT) &&
+ !(sibling_features->device_type & WACOM_DEVICETYPE_DIRECT))
return false;
- /* Compare the physical path. */
- n1 = strrchr(hdev->phys, '.') - hdev->phys;
- n2 = strrchr(sibling->phys, '.') - sibling->phys;
- if (n1 != n2 || n1 <= 0 || n2 <= 0)
+ /*
+ * Indirect-input devices may not be siblings of direct-input
+ * devices.
+ */
+ if (!(features->device_type & WACOM_DEVICETYPE_DIRECT) &&
+ (sibling_features->device_type & WACOM_DEVICETYPE_DIRECT))
+ return false;
+
+ /* Pen devices may only be siblings of touch devices */
+ if ((features->device_type & WACOM_DEVICETYPE_PEN) &&
+ !(sibling_features->device_type & WACOM_DEVICETYPE_TOUCH))
return false;
- return !strncmp(hdev->phys, sibling->phys, n1);
+ /* Touch devices may only be siblings of pen devices */
+ if ((features->device_type & WACOM_DEVICETYPE_TOUCH) &&
+ !(sibling_features->device_type & WACOM_DEVICETYPE_PEN))
+ return false;
+
+ /*
+ * No reason could be found for these two devices to NOT be
+ * siblings, so there's a good chance they ARE siblings
+ */
+ return true;
}
static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
{
struct wacom_hdev_data *data;
+ /* Try to find an already-probed interface from the same device */
+ list_for_each_entry(data, &wacom_udev_list, list) {
+ if (compare_device_paths(hdev, data->dev, '/'))
+ return data;
+ }
+
+ /* Fallback to finding devices that appear to be "siblings" */
list_for_each_entry(data, &wacom_udev_list, list) {
if (wacom_are_sibling(hdev, data->dev)) {
kref_get(&data->kref);
@@ -563,6 +626,38 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
return NULL;
}
+static void wacom_release_shared_data(struct kref *kref)
+{
+ struct wacom_hdev_data *data =
+ container_of(kref, struct wacom_hdev_data, kref);
+
+ mutex_lock(&wacom_udev_list_lock);
+ list_del(&data->list);
+ mutex_unlock(&wacom_udev_list_lock);
+
+ kfree(data);
+}
+
+static void wacom_remove_shared_data(void *res)
+{
+ struct wacom *wacom = res;
+ struct wacom_hdev_data *data;
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ if (wacom_wac->shared) {
+ data = container_of(wacom_wac->shared, struct wacom_hdev_data,
+ shared);
+
+ if (wacom_wac->shared->touch == wacom->hdev)
+ wacom_wac->shared->touch = NULL;
+ else if (wacom_wac->shared->pen == wacom->hdev)
+ wacom_wac->shared->pen = NULL;
+
+ kref_put(&data->kref, wacom_release_shared_data);
+ wacom_wac->shared = NULL;
+ }
+}
+
static int wacom_add_shared_data(struct hid_device *hdev)
{
struct wacom *wacom = hid_get_drvdata(hdev);
@@ -587,6 +682,13 @@ static int wacom_add_shared_data(struct hid_device *hdev)
wacom_wac->shared = &data->shared;
+ retval = devm_add_action(&hdev->dev, wacom_remove_shared_data, wacom);
+ if (retval) {
+ mutex_unlock(&wacom_udev_list_lock);
+ wacom_remove_shared_data(wacom);
+ return retval;
+ }
+
if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
wacom_wac->shared->touch = hdev;
else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
@@ -597,37 +699,6 @@ out:
return retval;
}
-static void wacom_release_shared_data(struct kref *kref)
-{
- struct wacom_hdev_data *data =
- container_of(kref, struct wacom_hdev_data, kref);
-
- mutex_lock(&wacom_udev_list_lock);
- list_del(&data->list);
- mutex_unlock(&wacom_udev_list_lock);
-
- kfree(data);
-}
-
-static void wacom_remove_shared_data(struct wacom *wacom)
-{
- struct wacom_hdev_data *data;
- struct wacom_wac *wacom_wac = &wacom->wacom_wac;
-
- if (wacom_wac->shared) {
- data = container_of(wacom_wac->shared, struct wacom_hdev_data,
- shared);
-
- if (wacom_wac->shared->touch == wacom->hdev)
- wacom_wac->shared->touch = NULL;
- else if (wacom_wac->shared->pen == wacom->hdev)
- wacom_wac->shared->pen = NULL;
-
- kref_put(&data->kref, wacom_release_shared_data);
- wacom_wac->shared = NULL;
- }
-}
-
static int wacom_led_control(struct wacom *wacom)
{
unsigned char *buf;
@@ -635,6 +706,12 @@ static int wacom_led_control(struct wacom *wacom)
unsigned char report_id = WAC_CMD_LED_CONTROL;
int buf_size = 9;
+ if (!hid_get_drvdata(wacom->hdev))
+ return -ENODEV;
+
+ if (!wacom->led.groups)
+ return -ENOTSUPP;
+
if (wacom->wacom_wac.pid) { /* wireless connected */
report_id = WAC_CMD_WL_LED_CONTROL;
buf_size = 13;
@@ -650,7 +727,7 @@ static int wacom_led_control(struct wacom *wacom)
* one of four values:
* 0 = Low; 1 = Medium; 2 = High; 3 = Off
*/
- int ring_led = wacom->led.select[0] & 0x03;
+ int ring_led = wacom->led.groups[0].select & 0x03;
int ring_lum = (((wacom->led.llv & 0x60) >> 5) - 1) & 0x03;
int crop_lum = 0;
unsigned char led_bits = (crop_lum << 4) | (ring_lum << 2) | (ring_led);
@@ -665,11 +742,11 @@ static int wacom_led_control(struct wacom *wacom)
buf[1] = led_bits;
}
else {
- int led = wacom->led.select[0] | 0x4;
+ int led = wacom->led.groups[0].select | 0x4;
if (wacom->wacom_wac.features.type == WACOM_21UX2 ||
wacom->wacom_wac.features.type == WACOM_24HD)
- led |= (wacom->led.select[1] << 4) | 0x40;
+ led |= (wacom->led.groups[1].select << 4) | 0x40;
buf[0] = report_id;
buf[1] = led;
@@ -741,7 +818,7 @@ static ssize_t wacom_led_select_store(struct device *dev, int set_id,
mutex_lock(&wacom->lock);
- wacom->led.select[set_id] = id & 0x3;
+ wacom->led.groups[set_id].select = id & 0x3;
err = wacom_led_control(wacom);
mutex_unlock(&wacom->lock);
@@ -761,7 +838,7 @@ static ssize_t wacom_led##SET_ID##_select_show(struct device *dev, \
struct hid_device *hdev = to_hid_device(dev);\
struct wacom *wacom = hid_get_drvdata(hdev); \
return scnprintf(buf, PAGE_SIZE, "%d\n", \
- wacom->led.select[SET_ID]); \
+ wacom->led.groups[SET_ID].select); \
} \
static DEVICE_ATTR(status_led##SET_ID##_select, DEV_ATTR_RW_PERM, \
wacom_led##SET_ID##_select_show, \
@@ -904,6 +981,327 @@ static struct attribute_group intuos5_led_attr_group = {
.attrs = intuos5_led_attrs,
};
+struct wacom_sysfs_group_devres {
+ struct attribute_group *group;
+ struct kobject *root;
+};
+
+static void wacom_devm_sysfs_group_release(struct device *dev, void *res)
+{
+ struct wacom_sysfs_group_devres *devres = res;
+ struct kobject *kobj = devres->root;
+
+ dev_dbg(dev, "%s: dropping reference to %s\n",
+ __func__, devres->group->name);
+ sysfs_remove_group(kobj, devres->group);
+}
+
+static int __wacom_devm_sysfs_create_group(struct wacom *wacom,
+ struct kobject *root,
+ struct attribute_group *group)
+{
+ struct wacom_sysfs_group_devres *devres;
+ int error;
+
+ devres = devres_alloc(wacom_devm_sysfs_group_release,
+ sizeof(struct wacom_sysfs_group_devres),
+ GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ devres->group = group;
+ devres->root = root;
+
+ error = sysfs_create_group(devres->root, group);
+ if (error)
+ return error;
+
+ devres_add(&wacom->hdev->dev, devres);
+
+ return 0;
+}
+
+static int wacom_devm_sysfs_create_group(struct wacom *wacom,
+ struct attribute_group *group)
+{
+ return __wacom_devm_sysfs_create_group(wacom, &wacom->hdev->dev.kobj,
+ group);
+}
+
+enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
+{
+ struct wacom *wacom = led->wacom;
+
+ if (wacom->led.max_hlv)
+ return led->hlv * LED_FULL / wacom->led.max_hlv;
+
+ if (wacom->led.max_llv)
+ return led->llv * LED_FULL / wacom->led.max_llv;
+
+ /* device doesn't support brightness tuning */
+ return LED_FULL;
+}
+
+static enum led_brightness __wacom_led_brightness_get(struct led_classdev *cdev)
+{
+ struct wacom_led *led = container_of(cdev, struct wacom_led, cdev);
+ struct wacom *wacom = led->wacom;
+
+ if (wacom->led.groups[led->group].select != led->id)
+ return LED_OFF;
+
+ return wacom_leds_brightness_get(led);
+}
+
+static int wacom_led_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct wacom_led *led = container_of(cdev, struct wacom_led, cdev);
+ struct wacom *wacom = led->wacom;
+ int error;
+
+ mutex_lock(&wacom->lock);
+
+ if (!wacom->led.groups || (brightness == LED_OFF &&
+ wacom->led.groups[led->group].select != led->id)) {
+ error = 0;
+ goto out;
+ }
+
+ led->llv = wacom->led.llv = wacom->led.max_llv * brightness / LED_FULL;
+ led->hlv = wacom->led.hlv = wacom->led.max_hlv * brightness / LED_FULL;
+
+ wacom->led.groups[led->group].select = led->id;
+
+ error = wacom_led_control(wacom);
+
+out:
+ mutex_unlock(&wacom->lock);
+
+ return error;
+}
+
+static void wacom_led_readonly_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+}
+
+static int wacom_led_register_one(struct device *dev, struct wacom *wacom,
+ struct wacom_led *led, unsigned int group,
+ unsigned int id, bool read_only)
+{
+ int error;
+ char *name;
+
+ name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s::wacom-%d.%d",
+ dev_name(dev),
+ group,
+ id);
+ if (!name)
+ return -ENOMEM;
+
+ if (!read_only) {
+ led->trigger.name = name;
+ error = devm_led_trigger_register(dev, &led->trigger);
+ if (error) {
+ hid_err(wacom->hdev,
+ "failed to register LED trigger %s: %d\n",
+ led->cdev.name, error);
+ return error;
+ }
+ }
+
+ led->group = group;
+ led->id = id;
+ led->wacom = wacom;
+ led->llv = wacom->led.llv;
+ led->hlv = wacom->led.hlv;
+ led->cdev.name = name;
+ led->cdev.max_brightness = LED_FULL;
+ led->cdev.flags = LED_HW_PLUGGABLE;
+ led->cdev.brightness_get = __wacom_led_brightness_get;
+ if (!read_only) {
+ led->cdev.brightness_set_blocking = wacom_led_brightness_set;
+ led->cdev.default_trigger = led->cdev.name;
+ } else {
+ led->cdev.brightness_set = wacom_led_readonly_brightness_set;
+ }
+
+ error = devm_led_classdev_register(dev, &led->cdev);
+ if (error) {
+ hid_err(wacom->hdev,
+ "failed to register LED %s: %d\n",
+ led->cdev.name, error);
+ led->cdev.name = NULL;
+ return error;
+ }
+
+ return 0;
+}
+
+static void wacom_led_groups_release_one(void *data)
+{
+ struct wacom_group_leds *group = data;
+
+ devres_release_group(group->dev, group);
+}
+
+static int wacom_led_groups_alloc_and_register_one(struct device *dev,
+ struct wacom *wacom,
+ int group_id, int count,
+ bool read_only)
+{
+ struct wacom_led *leds;
+ int i, error;
+
+ if (group_id >= wacom->led.count || count <= 0)
+ return -EINVAL;
+
+ if (!devres_open_group(dev, &wacom->led.groups[group_id], GFP_KERNEL))
+ return -ENOMEM;
+
+ leds = devm_kzalloc(dev, sizeof(struct wacom_led) * count, GFP_KERNEL);
+ if (!leds) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ wacom->led.groups[group_id].leds = leds;
+ wacom->led.groups[group_id].count = count;
+
+ for (i = 0; i < count; i++) {
+ error = wacom_led_register_one(dev, wacom, &leds[i],
+ group_id, i, read_only);
+ if (error)
+ goto err;
+ }
+
+ wacom->led.groups[group_id].dev = dev;
+
+ devres_close_group(dev, &wacom->led.groups[group_id]);
+
+ /*
+ * There is a bug (?) in devm_led_classdev_register() in which its
+ * increments the refcount of the parent. If the parent is an input
+ * device, that means the ref count never reaches 0 when
+ * devm_input_device_release() gets called.
+ * This means that the LEDs are still there after disconnect.
+ * Manually force the release of the group so that the leds are released
+ * once we are done using them.
+ */
+ error = devm_add_action_or_reset(&wacom->hdev->dev,
+ wacom_led_groups_release_one,
+ &wacom->led.groups[group_id]);
+ if (error)
+ return error;
+
+ return 0;
+
+err:
+ devres_release_group(dev, &wacom->led.groups[group_id]);
+ return error;
+}
+
+struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group_id,
+ unsigned int id)
+{
+ struct wacom_group_leds *group;
+
+ if (group_id >= wacom->led.count)
+ return NULL;
+
+ group = &wacom->led.groups[group_id];
+
+ if (!group->leds)
+ return NULL;
+
+ id %= group->count;
+
+ return &group->leds[id];
+}
+
+/**
+ * wacom_led_next: gives the next available led with a wacom trigger.
+ *
+ * returns the next available struct wacom_led which has its default trigger
+ * or the current one if none is available.
+ */
+struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur)
+{
+ struct wacom_led *next_led;
+ int group, next;
+
+ if (!wacom || !cur)
+ return NULL;
+
+ group = cur->group;
+ next = cur->id;
+
+ do {
+ next_led = wacom_led_find(wacom, group, ++next);
+ if (!next_led || next_led == cur)
+ return next_led;
+ } while (next_led->cdev.trigger != &next_led->trigger);
+
+ return next_led;
+}
+
+static void wacom_led_groups_release(void *data)
+{
+ struct wacom *wacom = data;
+
+ wacom->led.groups = NULL;
+ wacom->led.count = 0;
+}
+
+static int wacom_led_groups_allocate(struct wacom *wacom, int count)
+{
+ struct device *dev = &wacom->hdev->dev;
+ struct wacom_group_leds *groups;
+ int error;
+
+ groups = devm_kzalloc(dev, sizeof(struct wacom_group_leds) * count,
+ GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ error = devm_add_action_or_reset(dev, wacom_led_groups_release, wacom);
+ if (error)
+ return error;
+
+ wacom->led.groups = groups;
+ wacom->led.count = count;
+
+ return 0;
+}
+
+static int wacom_leds_alloc_and_register(struct wacom *wacom, int group_count,
+ int led_per_group, bool read_only)
+{
+ struct device *dev;
+ int i, error;
+
+ if (!wacom->wacom_wac.pad_input)
+ return -EINVAL;
+
+ dev = &wacom->wacom_wac.pad_input->dev;
+
+ error = wacom_led_groups_allocate(wacom, group_count);
+ if (error)
+ return error;
+
+ for (i = 0; i < group_count; i++) {
+ error = wacom_led_groups_alloc_and_register_one(dev, wacom, i,
+ led_per_group,
+ read_only);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
static int wacom_initialize_leds(struct wacom *wacom)
{
int error;
@@ -917,25 +1315,38 @@ static int wacom_initialize_leds(struct wacom *wacom)
case INTUOS4:
case INTUOS4WL:
case INTUOS4L:
- wacom->led.select[0] = 0;
- wacom->led.select[1] = 0;
wacom->led.llv = 10;
wacom->led.hlv = 20;
+ wacom->led.max_llv = 127;
+ wacom->led.max_hlv = 127;
wacom->led.img_lum = 10;
- error = sysfs_create_group(&wacom->hdev->dev.kobj,
- &intuos4_led_attr_group);
+
+ error = wacom_leds_alloc_and_register(wacom, 1, 4, false);
+ if (error) {
+ hid_err(wacom->hdev,
+ "cannot create leds err: %d\n", error);
+ return error;
+ }
+
+ error = wacom_devm_sysfs_create_group(wacom,
+ &intuos4_led_attr_group);
break;
case WACOM_24HD:
case WACOM_21UX2:
- wacom->led.select[0] = 0;
- wacom->led.select[1] = 0;
wacom->led.llv = 0;
wacom->led.hlv = 0;
wacom->led.img_lum = 0;
- error = sysfs_create_group(&wacom->hdev->dev.kobj,
- &cintiq_led_attr_group);
+ error = wacom_leds_alloc_and_register(wacom, 2, 4, false);
+ if (error) {
+ hid_err(wacom->hdev,
+ "cannot create leds err: %d\n", error);
+ return error;
+ }
+
+ error = wacom_devm_sysfs_create_group(wacom,
+ &cintiq_led_attr_group);
break;
case INTUOS5S:
@@ -944,16 +1355,31 @@ static int wacom_initialize_leds(struct wacom *wacom)
case INTUOSPS:
case INTUOSPM:
case INTUOSPL:
- wacom->led.select[0] = 0;
- wacom->led.select[1] = 0;
wacom->led.llv = 32;
- wacom->led.hlv = 0;
- wacom->led.img_lum = 0;
+ wacom->led.max_llv = 96;
+
+ error = wacom_leds_alloc_and_register(wacom, 1, 4, false);
+ if (error) {
+ hid_err(wacom->hdev,
+ "cannot create leds err: %d\n", error);
+ return error;
+ }
- error = sysfs_create_group(&wacom->hdev->dev.kobj,
- &intuos5_led_attr_group);
+ error = wacom_devm_sysfs_create_group(wacom,
+ &intuos5_led_attr_group);
break;
+ case REMOTE:
+ wacom->led.llv = 255;
+ wacom->led.max_llv = 255;
+ error = wacom_led_groups_allocate(wacom, 5);
+ if (error) {
+ hid_err(wacom->hdev,
+ "cannot create leds err: %d\n", error);
+ return error;
+ }
+ return 0;
+
default:
return 0;
}
@@ -964,86 +1390,45 @@ static int wacom_initialize_leds(struct wacom *wacom)
return error;
}
wacom_led_control(wacom);
- wacom->led_initialized = true;
return 0;
}
-static void wacom_destroy_leds(struct wacom *wacom)
-{
- if (!wacom->led_initialized)
- return;
-
- if (!(wacom->wacom_wac.features.device_type & WACOM_DEVICETYPE_PAD))
- return;
-
- wacom->led_initialized = false;
-
- switch (wacom->wacom_wac.features.type) {
- case INTUOS4S:
- case INTUOS4:
- case INTUOS4WL:
- case INTUOS4L:
- sysfs_remove_group(&wacom->hdev->dev.kobj,
- &intuos4_led_attr_group);
- break;
-
- case WACOM_24HD:
- case WACOM_21UX2:
- sysfs_remove_group(&wacom->hdev->dev.kobj,
- &cintiq_led_attr_group);
- break;
-
- case INTUOS5S:
- case INTUOS5:
- case INTUOS5L:
- case INTUOSPS:
- case INTUOSPM:
- case INTUOSPL:
- sysfs_remove_group(&wacom->hdev->dev.kobj,
- &intuos5_led_attr_group);
- break;
- }
-}
-
static enum power_supply_property wacom_battery_props[] = {
+ POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_CAPACITY
};
-static enum power_supply_property wacom_ac_props[] = {
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_SCOPE,
-};
-
static int wacom_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct wacom *wacom = power_supply_get_drvdata(psy);
+ struct wacom_battery *battery = power_supply_get_drvdata(psy);
int ret = 0;
switch (psp) {
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = battery->wacom->wacom_wac.name;
+ break;
case POWER_SUPPLY_PROP_PRESENT:
- val->intval = wacom->wacom_wac.bat_connected;
+ val->intval = battery->bat_connected;
break;
case POWER_SUPPLY_PROP_SCOPE:
val->intval = POWER_SUPPLY_SCOPE_DEVICE;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- val->intval =
- wacom->wacom_wac.battery_capacity;
+ val->intval = battery->battery_capacity;
break;
case POWER_SUPPLY_PROP_STATUS:
- if (wacom->wacom_wac.bat_charging)
+ if (battery->bat_charging)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
- else if (wacom->wacom_wac.battery_capacity == 100 &&
- wacom->wacom_wac.ps_connected)
+ else if (battery->battery_capacity == 100 &&
+ battery->ps_connected)
val->intval = POWER_SUPPLY_STATUS_FULL;
- else if (wacom->wacom_wac.ps_connected)
+ else if (battery->ps_connected)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
else
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
@@ -1056,84 +1441,64 @@ static int wacom_battery_get_property(struct power_supply *psy,
return ret;
}
-static int wacom_ac_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+static int __wacom_initialize_battery(struct wacom *wacom,
+ struct wacom_battery *battery)
{
- struct wacom *wacom = power_supply_get_drvdata(psy);
- int ret = 0;
+ static atomic_t battery_no = ATOMIC_INIT(0);
+ struct device *dev = &wacom->hdev->dev;
+ struct power_supply_config psy_cfg = { .drv_data = battery, };
+ struct power_supply *ps_bat;
+ struct power_supply_desc *bat_desc = &battery->bat_desc;
+ unsigned long n;
+ int error;
- switch (psp) {
- case POWER_SUPPLY_PROP_PRESENT:
- /* fall through */
- case POWER_SUPPLY_PROP_ONLINE:
- val->intval = wacom->wacom_wac.ps_connected;
- break;
- case POWER_SUPPLY_PROP_SCOPE:
- val->intval = POWER_SUPPLY_SCOPE_DEVICE;
- break;
- default:
- ret = -EINVAL;
- break;
+ if (!devres_open_group(dev, bat_desc, GFP_KERNEL))
+ return -ENOMEM;
+
+ battery->wacom = wacom;
+
+ n = atomic_inc_return(&battery_no) - 1;
+
+ bat_desc->properties = wacom_battery_props;
+ bat_desc->num_properties = ARRAY_SIZE(wacom_battery_props);
+ bat_desc->get_property = wacom_battery_get_property;
+ sprintf(battery->bat_name, "wacom_battery_%ld", n);
+ bat_desc->name = battery->bat_name;
+ bat_desc->type = POWER_SUPPLY_TYPE_USB;
+ bat_desc->use_for_apm = 0;
+
+ ps_bat = devm_power_supply_register(dev, bat_desc, &psy_cfg);
+ if (IS_ERR(ps_bat)) {
+ error = PTR_ERR(ps_bat);
+ goto err;
}
- return ret;
+
+ power_supply_powers(ps_bat, &wacom->hdev->dev);
+
+ battery->battery = ps_bat;
+
+ devres_close_group(dev, bat_desc);
+ return 0;
+
+err:
+ devres_release_group(dev, bat_desc);
+ return error;
}
static int wacom_initialize_battery(struct wacom *wacom)
{
- static atomic_t battery_no = ATOMIC_INIT(0);
- struct power_supply_config psy_cfg = { .drv_data = wacom, };
- unsigned long n;
-
- if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_BATTERY) {
- struct power_supply_desc *bat_desc = &wacom->battery_desc;
- struct power_supply_desc *ac_desc = &wacom->ac_desc;
- n = atomic_inc_return(&battery_no) - 1;
-
- bat_desc->properties = wacom_battery_props;
- bat_desc->num_properties = ARRAY_SIZE(wacom_battery_props);
- bat_desc->get_property = wacom_battery_get_property;
- sprintf(wacom->wacom_wac.bat_name, "wacom_battery_%ld", n);
- bat_desc->name = wacom->wacom_wac.bat_name;
- bat_desc->type = POWER_SUPPLY_TYPE_BATTERY;
- bat_desc->use_for_apm = 0;
-
- ac_desc->properties = wacom_ac_props;
- ac_desc->num_properties = ARRAY_SIZE(wacom_ac_props);
- ac_desc->get_property = wacom_ac_get_property;
- sprintf(wacom->wacom_wac.ac_name, "wacom_ac_%ld", n);
- ac_desc->name = wacom->wacom_wac.ac_name;
- ac_desc->type = POWER_SUPPLY_TYPE_MAINS;
- ac_desc->use_for_apm = 0;
-
- wacom->battery = power_supply_register(&wacom->hdev->dev,
- &wacom->battery_desc, &psy_cfg);
- if (IS_ERR(wacom->battery))
- return PTR_ERR(wacom->battery);
-
- power_supply_powers(wacom->battery, &wacom->hdev->dev);
-
- wacom->ac = power_supply_register(&wacom->hdev->dev,
- &wacom->ac_desc,
- &psy_cfg);
- if (IS_ERR(wacom->ac)) {
- power_supply_unregister(wacom->battery);
- return PTR_ERR(wacom->ac);
- }
-
- power_supply_powers(wacom->ac, &wacom->hdev->dev);
- }
+ if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_BATTERY)
+ return __wacom_initialize_battery(wacom, &wacom->battery);
return 0;
}
static void wacom_destroy_battery(struct wacom *wacom)
{
- if (wacom->battery) {
- power_supply_unregister(wacom->battery);
- wacom->battery = NULL;
- power_supply_unregister(wacom->ac);
- wacom->ac = NULL;
+ if (wacom->battery.battery) {
+ devres_release_group(&wacom->hdev->dev,
+ &wacom->battery.bat_desc);
+ wacom->battery.battery = NULL;
}
}
@@ -1179,7 +1544,7 @@ static ssize_t wacom_show_remote_mode(struct kobject *kobj,
struct wacom *wacom = hid_get_drvdata(hdev);
u8 mode;
- mode = wacom->led.select[index];
+ mode = wacom->led.groups[index].select;
if (mode >= 0 && mode < 3)
return snprintf(buf, PAGE_SIZE, "%d\n", mode);
else
@@ -1212,54 +1577,30 @@ DEVICE_EKR_ATTR_GROUP(2);
DEVICE_EKR_ATTR_GROUP(3);
DEVICE_EKR_ATTR_GROUP(4);
-int wacom_remote_create_attr_group(struct wacom *wacom, __u32 serial, int index)
+static int wacom_remote_create_attr_group(struct wacom *wacom, __u32 serial,
+ int index)
{
int error = 0;
- char *buf;
- struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_remote *remote = wacom->remote;
- wacom_wac->serial[index] = serial;
-
- buf = kzalloc(WAC_REMOTE_SERIAL_MAX_STRLEN, GFP_KERNEL);
- if (!buf)
+ remote->remotes[index].group.name = devm_kasprintf(&wacom->hdev->dev,
+ GFP_KERNEL,
+ "%d", serial);
+ if (!remote->remotes[index].group.name)
return -ENOMEM;
- snprintf(buf, WAC_REMOTE_SERIAL_MAX_STRLEN, "%d", serial);
- wacom->remote_group[index].name = buf;
- error = sysfs_create_group(wacom->remote_dir,
- &wacom->remote_group[index]);
+ error = __wacom_devm_sysfs_create_group(wacom, remote->remote_dir,
+ &remote->remotes[index].group);
if (error) {
+ remote->remotes[index].group.name = NULL;
hid_err(wacom->hdev,
"cannot create sysfs group err: %d\n", error);
- kobject_put(wacom->remote_dir);
return error;
}
return 0;
}
-void wacom_remote_destroy_attr_group(struct wacom *wacom, __u32 serial)
-{
- struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- int i;
-
- if (!serial)
- return;
-
- for (i = 0; i < WACOM_MAX_REMOTES; i++) {
- if (wacom_wac->serial[i] == serial) {
- wacom_wac->serial[i] = 0;
- wacom->led.select[i] = WACOM_STATUS_UNKNOWN;
- if (wacom->remote_group[i].name) {
- sysfs_remove_group(wacom->remote_dir,
- &wacom->remote_group[i]);
- kfree(wacom->remote_group[i].name);
- wacom->remote_group[i].name = NULL;
- }
- }
- }
-}
-
static int wacom_cmd_unpair_remote(struct wacom *wacom, unsigned char selector)
{
const size_t buf_size = 2;
@@ -1316,27 +1657,57 @@ static const struct attribute *remote_unpair_attrs[] = {
NULL
};
-static int wacom_initialize_remote(struct wacom *wacom)
+static void wacom_remotes_destroy(void *data)
+{
+ struct wacom *wacom = data;
+ struct wacom_remote *remote = wacom->remote;
+
+ if (!remote)
+ return;
+
+ kobject_put(remote->remote_dir);
+ kfifo_free(&remote->remote_fifo);
+ wacom->remote = NULL;
+}
+
+static int wacom_initialize_remotes(struct wacom *wacom)
{
int error = 0;
- struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+ struct wacom_remote *remote;
int i;
if (wacom->wacom_wac.features.type != REMOTE)
return 0;
- wacom->remote_group[0] = remote0_serial_group;
- wacom->remote_group[1] = remote1_serial_group;
- wacom->remote_group[2] = remote2_serial_group;
- wacom->remote_group[3] = remote3_serial_group;
- wacom->remote_group[4] = remote4_serial_group;
+ remote = devm_kzalloc(&wacom->hdev->dev, sizeof(*wacom->remote),
+ GFP_KERNEL);
+ if (!remote)
+ return -ENOMEM;
+
+ wacom->remote = remote;
- wacom->remote_dir = kobject_create_and_add("wacom_remote",
- &wacom->hdev->dev.kobj);
- if (!wacom->remote_dir)
+ spin_lock_init(&remote->remote_lock);
+
+ error = kfifo_alloc(&remote->remote_fifo,
+ 5 * sizeof(struct wacom_remote_data),
+ GFP_KERNEL);
+ if (error) {
+ hid_err(wacom->hdev, "failed allocating remote_fifo\n");
return -ENOMEM;
+ }
- error = sysfs_create_files(wacom->remote_dir, remote_unpair_attrs);
+ remote->remotes[0].group = remote0_serial_group;
+ remote->remotes[1].group = remote1_serial_group;
+ remote->remotes[2].group = remote2_serial_group;
+ remote->remotes[3].group = remote3_serial_group;
+ remote->remotes[4].group = remote4_serial_group;
+
+ remote->remote_dir = kobject_create_and_add("wacom_remote",
+ &wacom->hdev->dev.kobj);
+ if (!remote->remote_dir)
+ return -ENOMEM;
+
+ error = sysfs_create_files(remote->remote_dir, remote_unpair_attrs);
if (error) {
hid_err(wacom->hdev,
@@ -1345,10 +1716,15 @@ static int wacom_initialize_remote(struct wacom *wacom)
}
for (i = 0; i < WACOM_MAX_REMOTES; i++) {
- wacom->led.select[i] = WACOM_STATUS_UNKNOWN;
- wacom_wac->serial[i] = 0;
+ wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
+ remote->remotes[i].serial = 0;
}
+ error = devm_add_action_or_reset(&wacom->hdev->dev,
+ wacom_remotes_destroy, wacom);
+ if (error)
+ return error;
+
return 0;
}
@@ -1358,7 +1734,7 @@ static struct input_dev *wacom_allocate_input(struct wacom *wacom)
struct hid_device *hdev = wacom->hdev;
struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
- input_dev = input_allocate_device();
+ input_dev = devm_input_allocate_device(&hdev->dev);
if (!input_dev)
return NULL;
@@ -1377,36 +1753,6 @@ static struct input_dev *wacom_allocate_input(struct wacom *wacom)
return input_dev;
}
-static void wacom_clean_inputs(struct wacom *wacom)
-{
- if (wacom->wacom_wac.pen_input) {
- if (wacom->wacom_wac.pen_registered)
- input_unregister_device(wacom->wacom_wac.pen_input);
- else
- input_free_device(wacom->wacom_wac.pen_input);
- }
- if (wacom->wacom_wac.touch_input) {
- if (wacom->wacom_wac.touch_registered)
- input_unregister_device(wacom->wacom_wac.touch_input);
- else
- input_free_device(wacom->wacom_wac.touch_input);
- }
- if (wacom->wacom_wac.pad_input) {
- if (wacom->wacom_wac.pad_registered)
- input_unregister_device(wacom->wacom_wac.pad_input);
- else
- input_free_device(wacom->wacom_wac.pad_input);
- }
- kobject_put(wacom->remote_dir);
- wacom->wacom_wac.pen_input = NULL;
- wacom->wacom_wac.touch_input = NULL;
- wacom->wacom_wac.pad_input = NULL;
- wacom->wacom_wac.pen_registered = false;
- wacom->wacom_wac.touch_registered = false;
- wacom->wacom_wac.pad_registered = false;
- wacom_destroy_leds(wacom);
-}
-
static int wacom_allocate_inputs(struct wacom *wacom)
{
struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
@@ -1414,10 +1760,10 @@ static int wacom_allocate_inputs(struct wacom *wacom)
wacom_wac->pen_input = wacom_allocate_input(wacom);
wacom_wac->touch_input = wacom_allocate_input(wacom);
wacom_wac->pad_input = wacom_allocate_input(wacom);
- if (!wacom_wac->pen_input || !wacom_wac->touch_input || !wacom_wac->pad_input) {
- wacom_clean_inputs(wacom);
+ if (!wacom_wac->pen_input ||
+ !wacom_wac->touch_input ||
+ !wacom_wac->pad_input)
return -ENOMEM;
- }
wacom_wac->pen_input->name = wacom_wac->pen_name;
wacom_wac->touch_input->name = wacom_wac->touch_name;
@@ -1448,8 +1794,7 @@ static int wacom_register_inputs(struct wacom *wacom)
} else {
error = input_register_device(pen_input_dev);
if (error)
- goto fail_register_pen_input;
- wacom_wac->pen_registered = true;
+ goto fail;
}
error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac);
@@ -1461,8 +1806,7 @@ static int wacom_register_inputs(struct wacom *wacom)
} else {
error = input_register_device(touch_input_dev);
if (error)
- goto fail_register_touch_input;
- wacom_wac->touch_registered = true;
+ goto fail;
}
error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
@@ -1474,37 +1818,15 @@ static int wacom_register_inputs(struct wacom *wacom)
} else {
error = input_register_device(pad_input_dev);
if (error)
- goto fail_register_pad_input;
- wacom_wac->pad_registered = true;
-
- error = wacom_initialize_leds(wacom);
- if (error)
- goto fail_leds;
-
- error = wacom_initialize_remote(wacom);
- if (error)
- goto fail_remote;
+ goto fail;
}
return 0;
-fail_remote:
- wacom_destroy_leds(wacom);
-fail_leds:
- input_unregister_device(pad_input_dev);
- pad_input_dev = NULL;
- wacom_wac->pad_registered = false;
-fail_register_pad_input:
- if (touch_input_dev)
- input_unregister_device(touch_input_dev);
+fail:
+ wacom_wac->pad_input = NULL;
wacom_wac->touch_input = NULL;
- wacom_wac->touch_registered = false;
-fail_register_touch_input:
- if (pen_input_dev)
- input_unregister_device(pen_input_dev);
wacom_wac->pen_input = NULL;
- wacom_wac->pen_registered = false;
-fail_register_pen_input:
return error;
}
@@ -1543,14 +1865,14 @@ static void wacom_calculate_res(struct wacom_features *features)
void wacom_battery_work(struct work_struct *work)
{
- struct wacom *wacom = container_of(work, struct wacom, work);
+ struct wacom *wacom = container_of(work, struct wacom, battery_work);
if ((wacom->wacom_wac.features.quirks & WACOM_QUIRK_BATTERY) &&
- !wacom->battery) {
+ !wacom->battery.battery) {
wacom_initialize_battery(wacom);
}
else if (!(wacom->wacom_wac.features.quirks & WACOM_QUIRK_BATTERY) &&
- wacom->battery) {
+ wacom->battery.battery) {
wacom_destroy_battery(wacom);
}
}
@@ -1606,6 +1928,9 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
strlcpy(name, features->name, sizeof(name));
}
+ snprintf(wacom_wac->name, sizeof(wacom_wac->name), "%s%s",
+ name, suffix);
+
/* Append the device type to the name */
snprintf(wacom_wac->pen_name, sizeof(wacom_wac->pen_name),
"%s%s Pen", name, suffix);
@@ -1615,6 +1940,22 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
"%s%s Pad", name, suffix);
}
+static void wacom_release_resources(struct wacom *wacom)
+{
+ struct hid_device *hdev = wacom->hdev;
+
+ if (!wacom->resources)
+ return;
+
+ devres_release_group(&hdev->dev, wacom);
+
+ wacom->resources = false;
+
+ wacom->wacom_wac.pen_input = NULL;
+ wacom->wacom_wac.touch_input = NULL;
+ wacom->wacom_wac.pad_input = NULL;
+}
+
static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
{
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
@@ -1627,9 +1968,14 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (features->pktlen > WACOM_PKGLEN_MAX)
return -EINVAL;
+ if (!devres_open_group(&hdev->dev, wacom, GFP_KERNEL))
+ return -ENOMEM;
+
+ wacom->resources = true;
+
error = wacom_allocate_inputs(wacom);
if (error)
- return error;
+ goto fail;
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
@@ -1642,7 +1988,7 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
} else if ((features->pktlen != WACOM_PKGLEN_BPAD_TOUCH) &&
(features->pktlen != WACOM_PKGLEN_BPAD_TOUCH_USB)) {
error = -ENODEV;
- goto fail_allocate_inputs;
+ goto fail;
}
}
@@ -1662,7 +2008,7 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
error ? "Ignoring" : "Assuming pen");
if (error)
- goto fail_parsed;
+ goto fail;
features->device_type |= WACOM_DEVICETYPE_PEN;
}
@@ -1673,18 +2019,28 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
error = wacom_add_shared_data(hdev);
if (error)
- goto fail_shared_data;
+ goto fail;
if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
(features->quirks & WACOM_QUIRK_BATTERY)) {
error = wacom_initialize_battery(wacom);
if (error)
- goto fail_battery;
+ goto fail;
}
error = wacom_register_inputs(wacom);
if (error)
- goto fail_register_inputs;
+ goto fail;
+
+ if (wacom->wacom_wac.features.device_type & WACOM_DEVICETYPE_PAD) {
+ error = wacom_initialize_leds(wacom);
+ if (error)
+ goto fail;
+
+ error = wacom_initialize_remotes(wacom);
+ if (error)
+ goto fail;
+ }
if (features->type == HID_GENERIC)
connect_mask |= HID_CONNECT_DRIVER;
@@ -1693,7 +2049,7 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
error = hid_hw_start(hdev, connect_mask);
if (error) {
hid_err(hdev, "hw start failed\n");
- goto fail_hw_start;
+ goto fail;
}
if (!wireless) {
@@ -1705,7 +2061,7 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if ((features->type == BAMBOO_TOUCH) &&
(features->device_type & WACOM_DEVICETYPE_PEN)) {
error = -ENODEV;
- goto fail_hw_start;
+ goto fail_quirks;
}
/* pen only Bamboo neither support touch nor pad */
@@ -1713,37 +2069,33 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
(features->device_type & WACOM_DEVICETYPE_PAD))) {
error = -ENODEV;
- goto fail_hw_start;
+ goto fail_quirks;
}
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
error = hid_hw_open(hdev);
if ((wacom_wac->features.type == INTUOSHT ||
- wacom_wac->features.type == INTUOSHT2) &&
+ wacom_wac->features.type == INTUOSHT2) &&
(wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)) {
- wacom_wac->shared->touch_input = wacom_wac->touch_input;
+ wacom_wac->shared->type = wacom_wac->features.type;
+ wacom_wac->shared->touch_input = wacom_wac->touch_input;
}
+ devres_close_group(&hdev->dev, wacom);
+
return 0;
-fail_hw_start:
+fail_quirks:
hid_hw_stop(hdev);
-fail_register_inputs:
- wacom_clean_inputs(wacom);
- wacom_destroy_battery(wacom);
-fail_battery:
- wacom_remove_shared_data(wacom);
-fail_shared_data:
-fail_parsed:
-fail_allocate_inputs:
- wacom_clean_inputs(wacom);
+fail:
+ wacom_release_resources(wacom);
return error;
}
static void wacom_wireless_work(struct work_struct *work)
{
- struct wacom *wacom = container_of(work, struct wacom, work);
+ struct wacom *wacom = container_of(work, struct wacom, wireless_work);
struct usb_device *usbdev = wacom->usbdev;
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct hid_device *hdev1, *hdev2;
@@ -1762,17 +2114,16 @@ static void wacom_wireless_work(struct work_struct *work)
hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
wacom1 = hid_get_drvdata(hdev1);
wacom_wac1 = &(wacom1->wacom_wac);
- wacom_clean_inputs(wacom1);
+ wacom_release_resources(wacom1);
/* Touch interface */
hdev2 = usb_get_intfdata(usbdev->config->interface[2]);
wacom2 = hid_get_drvdata(hdev2);
wacom_wac2 = &(wacom2->wacom_wac);
- wacom_clean_inputs(wacom2);
+ wacom_release_resources(wacom2);
if (wacom_wac->pid == 0) {
hid_info(wacom->hdev, "wireless tablet disconnected\n");
- wacom_wac1->shared->type = 0;
} else {
const struct hid_device_id *id = wacom_ids;
@@ -1814,6 +2165,8 @@ static void wacom_wireless_work(struct work_struct *work)
goto fail;
}
+ strlcpy(wacom_wac->name, wacom_wac1->name,
+ sizeof(wacom_wac->name));
error = wacom_initialize_battery(wacom);
if (error)
goto fail;
@@ -1822,11 +2175,177 @@ static void wacom_wireless_work(struct work_struct *work)
return;
fail:
- wacom_clean_inputs(wacom1);
- wacom_clean_inputs(wacom2);
+ wacom_release_resources(wacom1);
+ wacom_release_resources(wacom2);
return;
}
+static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
+{
+ struct wacom_remote *remote = wacom->remote;
+ u32 serial = remote->remotes[index].serial;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&remote->remote_lock, flags);
+ remote->remotes[index].registered = false;
+ spin_unlock_irqrestore(&remote->remote_lock, flags);
+
+ if (remote->remotes[index].battery.battery)
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[index].battery.bat_desc);
+
+ if (remote->remotes[index].group.name)
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[index]);
+
+ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+ if (remote->remotes[i].serial == serial) {
+ remote->remotes[i].serial = 0;
+ remote->remotes[i].group.name = NULL;
+ remote->remotes[i].registered = false;
+ remote->remotes[i].battery.battery = NULL;
+ wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
+ }
+ }
+}
+
+static int wacom_remote_create_one(struct wacom *wacom, u32 serial,
+ unsigned int index)
+{
+ struct wacom_remote *remote = wacom->remote;
+ struct device *dev = &wacom->hdev->dev;
+ int error, k;
+
+ /* A remote can pair more than once with an EKR,
+ * check to make sure this serial isn't already paired.
+ */
+ for (k = 0; k < WACOM_MAX_REMOTES; k++) {
+ if (remote->remotes[k].serial == serial)
+ break;
+ }
+
+ if (k < WACOM_MAX_REMOTES) {
+ remote->remotes[index].serial = serial;
+ return 0;
+ }
+
+ if (!devres_open_group(dev, &remote->remotes[index], GFP_KERNEL))
+ return -ENOMEM;
+
+ error = wacom_remote_create_attr_group(wacom, serial, index);
+ if (error)
+ goto fail;
+
+ remote->remotes[index].input = wacom_allocate_input(wacom);
+ if (!remote->remotes[index].input) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ remote->remotes[index].input->uniq = remote->remotes[index].group.name;
+ remote->remotes[index].input->name = wacom->wacom_wac.pad_name;
+
+ if (!remote->remotes[index].input->name) {
+ error = -EINVAL;
+ goto fail;
+ }
+
+ error = wacom_setup_pad_input_capabilities(remote->remotes[index].input,
+ &wacom->wacom_wac);
+ if (error)
+ goto fail;
+
+ remote->remotes[index].serial = serial;
+
+ error = input_register_device(remote->remotes[index].input);
+ if (error)
+ goto fail;
+
+ error = wacom_led_groups_alloc_and_register_one(
+ &remote->remotes[index].input->dev,
+ wacom, index, 3, true);
+ if (error)
+ goto fail;
+
+ remote->remotes[index].registered = true;
+
+ devres_close_group(dev, &remote->remotes[index]);
+ return 0;
+
+fail:
+ devres_release_group(dev, &remote->remotes[index]);
+ remote->remotes[index].serial = 0;
+ return error;
+}
+
+static int wacom_remote_attach_battery(struct wacom *wacom, int index)
+{
+ struct wacom_remote *remote = wacom->remote;
+ int error;
+
+ if (!remote->remotes[index].registered)
+ return 0;
+
+ if (remote->remotes[index].battery.battery)
+ return 0;
+
+ if (wacom->led.groups[index].select == WACOM_STATUS_UNKNOWN)
+ return 0;
+
+ error = __wacom_initialize_battery(wacom,
+ &wacom->remote->remotes[index].battery);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static void wacom_remote_work(struct work_struct *work)
+{
+ struct wacom *wacom = container_of(work, struct wacom, remote_work);
+ struct wacom_remote *remote = wacom->remote;
+ struct wacom_remote_data data;
+ unsigned long flags;
+ unsigned int count;
+ u32 serial;
+ int i;
+
+ spin_lock_irqsave(&remote->remote_lock, flags);
+
+ count = kfifo_out(&remote->remote_fifo, &data, sizeof(data));
+
+ if (count != sizeof(data)) {
+ hid_err(wacom->hdev,
+ "workitem triggered without status available\n");
+ spin_unlock_irqrestore(&remote->remote_lock, flags);
+ return;
+ }
+
+ if (!kfifo_is_empty(&remote->remote_fifo))
+ wacom_schedule_work(&wacom->wacom_wac, WACOM_WORKER_REMOTE);
+
+ spin_unlock_irqrestore(&remote->remote_lock, flags);
+
+ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+ serial = data.remote[i].serial;
+ if (data.remote[i].connected) {
+
+ if (remote->remotes[i].serial == serial) {
+ wacom_remote_attach_battery(wacom, i);
+ continue;
+ }
+
+ if (remote->remotes[i].serial)
+ wacom_remote_destroy_one(wacom, i);
+
+ wacom_remote_create_one(wacom, serial, i);
+
+ } else if (remote->remotes[i].serial) {
+ wacom_remote_destroy_one(wacom, i);
+ }
+ }
+}
+
static int wacom_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -1845,7 +2364,7 @@ static int wacom_probe(struct hid_device *hdev,
/* hid-core sets this quirk for the boot interface */
hdev->quirks &= ~HID_QUIRK_NOGET;
- wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
+ wacom = devm_kzalloc(&hdev->dev, sizeof(struct wacom), GFP_KERNEL);
if (!wacom)
return -ENOMEM;
@@ -1858,7 +2377,7 @@ static int wacom_probe(struct hid_device *hdev,
if (features->check_for_hid_type && features->hid_type != hdev->type) {
error = -ENODEV;
- goto fail_type;
+ goto fail;
}
wacom_wac->hid_data.inputmode = -1;
@@ -1867,18 +2386,20 @@ static int wacom_probe(struct hid_device *hdev,
wacom->usbdev = dev;
wacom->intf = intf;
mutex_init(&wacom->lock);
- INIT_WORK(&wacom->work, wacom_wireless_work);
+ INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
+ INIT_WORK(&wacom->battery_work, wacom_battery_work);
+ INIT_WORK(&wacom->remote_work, wacom_remote_work);
/* ask for the report descriptor to be loaded by HID */
error = hid_parse(hdev);
if (error) {
hid_err(hdev, "parse failed\n");
- goto fail_parse;
+ goto fail;
}
error = wacom_parse_and_register(wacom, false);
if (error)
- goto fail_parse;
+ goto fail;
if (hdev->bus == BUS_BLUETOOTH) {
error = device_create_file(&hdev->dev, &dev_attr_speed);
@@ -1890,9 +2411,7 @@ static int wacom_probe(struct hid_device *hdev,
return 0;
-fail_type:
-fail_parse:
- kfree(wacom);
+fail:
hid_set_drvdata(hdev, NULL);
return error;
}
@@ -1908,15 +2427,13 @@ static void wacom_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
- cancel_work_sync(&wacom->work);
- wacom_clean_inputs(wacom);
+ cancel_work_sync(&wacom->wireless_work);
+ cancel_work_sync(&wacom->battery_work);
+ cancel_work_sync(&wacom->remote_work);
if (hdev->bus == BUS_BLUETOOTH)
device_remove_file(&hdev->dev, &dev_attr_speed);
- wacom_destroy_battery(wacom);
- wacom_remove_shared_data(wacom);
hid_set_drvdata(hdev, NULL);
- kfree(wacom);
}
#ifdef CONFIG_PM
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1eae13cdc502..1cb79925730d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -34,6 +34,10 @@
*/
#define WACOM_CONTACT_AREA_SCALE 2607
+static bool touch_arbitration = 1;
+module_param(touch_arbitration, bool, 0644);
+MODULE_PARM_DESC(touch_arbitration, " on (Y) off (N)");
+
static void wacom_report_numbered_buttons(struct input_dev *input_dev,
int button_count, int mask);
@@ -48,25 +52,34 @@ static unsigned short batcap_gr[8] = { 1, 15, 25, 35, 50, 70, 100, 100 };
*/
static unsigned short batcap_i4[8] = { 1, 15, 30, 45, 60, 70, 85, 100 };
+static void __wacom_notify_battery(struct wacom_battery *battery,
+ int bat_capacity, bool bat_charging,
+ bool bat_connected, bool ps_connected)
+{
+ bool changed = battery->battery_capacity != bat_capacity ||
+ battery->bat_charging != bat_charging ||
+ battery->bat_connected != bat_connected ||
+ battery->ps_connected != ps_connected;
+
+ if (changed) {
+ battery->battery_capacity = bat_capacity;
+ battery->bat_charging = bat_charging;
+ battery->bat_connected = bat_connected;
+ battery->ps_connected = ps_connected;
+
+ if (battery->battery)
+ power_supply_changed(battery->battery);
+ }
+}
+
static void wacom_notify_battery(struct wacom_wac *wacom_wac,
int bat_capacity, bool bat_charging, bool bat_connected,
bool ps_connected)
{
struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
- bool changed = wacom_wac->battery_capacity != bat_capacity ||
- wacom_wac->bat_charging != bat_charging ||
- wacom_wac->bat_connected != bat_connected ||
- wacom_wac->ps_connected != ps_connected;
- if (changed) {
- wacom_wac->battery_capacity = bat_capacity;
- wacom_wac->bat_charging = bat_charging;
- wacom_wac->bat_connected = bat_connected;
- wacom_wac->ps_connected = ps_connected;
-
- if (wacom->battery)
- power_supply_changed(wacom->battery);
- }
+ __wacom_notify_battery(&wacom->battery, bat_capacity, bat_charging,
+ bat_connected, ps_connected);
}
static int wacom_penpartner_irq(struct wacom_wac *wacom)
@@ -751,22 +764,37 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
{
unsigned char *data = wacom_wac->data;
- struct input_dev *input = wacom_wac->pad_input;
+ struct input_dev *input;
struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
- struct wacom_features *features = &wacom_wac->features;
+ struct wacom_remote *remote = wacom->remote;
int bat_charging, bat_percent, touch_ring_mode;
__u32 serial;
- int i;
+ int i, index = -1;
+ unsigned long flags;
if (data[0] != WACOM_REPORT_REMOTE) {
- dev_dbg(input->dev.parent,
- "%s: received unknown report #%d", __func__, data[0]);
+ hid_dbg(wacom->hdev, "%s: received unknown report #%d",
+ __func__, data[0]);
return 0;
}
serial = data[3] + (data[4] << 8) + (data[5] << 16);
wacom_wac->id[0] = PAD_DEVICE_ID;
+ spin_lock_irqsave(&remote->remote_lock, flags);
+
+ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+ if (remote->remotes[i].serial == serial) {
+ index = i;
+ break;
+ }
+ }
+
+ if (index < 0 || !remote->remotes[index].registered)
+ goto out;
+
+ input = remote->remotes[index].input;
+
input_report_key(input, BTN_0, (data[9] & 0x01));
input_report_key(input, BTN_1, (data[9] & 0x02));
input_report_key(input, BTN_2, (data[9] & 0x04));
@@ -803,73 +831,69 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
input_event(input, EV_MSC, MSC_SERIAL, serial);
+ input_sync(input);
+
/*Which mode select (LED light) is currently on?*/
touch_ring_mode = (data[11] & 0xC0) >> 6;
for (i = 0; i < WACOM_MAX_REMOTES; i++) {
- if (wacom_wac->serial[i] == serial)
- wacom->led.select[i] = touch_ring_mode;
- }
-
- if (!wacom->battery &&
- !(features->quirks & WACOM_QUIRK_BATTERY)) {
- features->quirks |= WACOM_QUIRK_BATTERY;
- INIT_WORK(&wacom->work, wacom_battery_work);
- wacom_schedule_work(wacom_wac);
+ if (remote->remotes[i].serial == serial)
+ wacom->led.groups[i].select = touch_ring_mode;
}
- wacom_notify_battery(wacom_wac, bat_percent, bat_charging, 1,
- bat_charging);
+ __wacom_notify_battery(&remote->remotes[index].battery, bat_percent,
+ bat_charging, 1, bat_charging);
- return 1;
+out:
+ spin_unlock_irqrestore(&remote->remote_lock, flags);
+ return 0;
}
-static int wacom_remote_status_irq(struct wacom_wac *wacom_wac, size_t len)
+static void wacom_remote_status_irq(struct wacom_wac *wacom_wac, size_t len)
{
struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
unsigned char *data = wacom_wac->data;
- int i;
+ struct wacom_remote *remote = wacom->remote;
+ struct wacom_remote_data remote_data;
+ unsigned long flags;
+ int i, ret;
if (data[0] != WACOM_REPORT_DEVICE_LIST)
- return 0;
+ return;
+
+ memset(&remote_data, 0, sizeof(struct wacom_remote_data));
for (i = 0; i < WACOM_MAX_REMOTES; i++) {
int j = i * 6;
int serial = (data[j+6] << 16) + (data[j+5] << 8) + data[j+4];
bool connected = data[j+2];
- if (connected) {
- int k;
+ remote_data.remote[i].serial = serial;
+ remote_data.remote[i].connected = connected;
+ }
- if (wacom_wac->serial[i] == serial)
- continue;
+ spin_lock_irqsave(&remote->remote_lock, flags);
- if (wacom_wac->serial[i]) {
- wacom_remote_destroy_attr_group(wacom,
- wacom_wac->serial[i]);
- }
+ ret = kfifo_in(&remote->remote_fifo, &remote_data, sizeof(remote_data));
+ if (ret != sizeof(remote_data)) {
+ spin_unlock_irqrestore(&remote->remote_lock, flags);
+ hid_err(wacom->hdev, "Can't queue Remote status event.\n");
+ return;
+ }
- /* A remote can pair more than once with an EKR,
- * check to make sure this serial isn't already paired.
- */
- for (k = 0; k < WACOM_MAX_REMOTES; k++) {
- if (wacom_wac->serial[k] == serial)
- break;
- }
+ spin_unlock_irqrestore(&remote->remote_lock, flags);
- if (k < WACOM_MAX_REMOTES) {
- wacom_wac->serial[i] = serial;
- continue;
- }
- wacom_remote_create_attr_group(wacom, serial, i);
+ wacom_schedule_work(wacom_wac, WACOM_WORKER_REMOTE);
+}
- } else if (wacom_wac->serial[i]) {
- wacom_remote_destroy_attr_group(wacom,
- wacom_wac->serial[i]);
- }
- }
+static inline bool report_touch_events(struct wacom_wac *wacom)
+{
+ return (touch_arbitration ? !wacom->shared->stylus_in_proximity : 1);
+}
- return 0;
+static inline bool delay_pen_events(struct wacom_wac *wacom)
+{
+ return (wacom->shared->touch_down && touch_arbitration);
}
static int wacom_intuos_general(struct wacom_wac *wacom)
@@ -885,7 +909,7 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
data[0] != WACOM_REPORT_INTUOS_PEN)
return 0;
- if (wacom->shared->touch_down)
+ if (delay_pen_events(wacom))
return 1;
/* don't report events if we don't know the tool ID */
@@ -1145,7 +1169,7 @@ static int wacom_wac_finger_count_touches(struct wacom_wac *wacom)
if (touch_max == 1)
return test_bit(BTN_TOUCH, input->key) &&
- !wacom->shared->stylus_in_proximity;
+ report_touch_events(wacom);
for (i = 0; i < input->mt->num_slots; i++) {
struct input_mt_slot *ps = &input->mt->slots[i];
@@ -1186,7 +1210,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
for (i = 0; i < contacts_to_send; i++) {
int offset = (byte_per_packet * i) + 1;
- bool touch = (data[offset] & 0x1) && !wacom->shared->stylus_in_proximity;
+ bool touch = (data[offset] & 0x1) && report_touch_events(wacom);
int slot = input_mt_get_slot_by_key(input, data[offset + 1]);
if (slot < 0)
@@ -1250,7 +1274,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
for (i = 0; i < contacts_to_send; i++) {
int offset = (WACOM_BYTES_PER_MT_PACKET + x_offset) * i + 3;
- bool touch = (data[offset] & 0x1) && !wacom->shared->stylus_in_proximity;
+ bool touch = (data[offset] & 0x1) && report_touch_events(wacom);
int id = get_unaligned_le16(&data[offset + 1]);
int slot = input_mt_get_slot_by_key(input, id);
@@ -1284,7 +1308,7 @@ static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
for (i = 0; i < 2; i++) {
int p = data[1] & (1 << i);
- bool touch = p && !wacom->shared->stylus_in_proximity;
+ bool touch = p && report_touch_events(wacom);
input_mt_slot(input, i);
input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
@@ -1308,7 +1332,7 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
{
unsigned char *data = wacom->data;
struct input_dev *input = wacom->touch_input;
- bool prox = !wacom->shared->stylus_in_proximity;
+ bool prox = report_touch_events(wacom);
int x = 0, y = 0;
if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG)
@@ -1353,8 +1377,10 @@ static int wacom_tpc_pen(struct wacom_wac *wacom)
/* keep pen state for touch events */
wacom->shared->stylus_in_proximity = prox;
- /* send pen events only when touch is up or forced out */
- if (!wacom->shared->touch_down) {
+ /* send pen events only when touch is up or forced out
+ * or touch arbitration is off
+ */
+ if (!delay_pen_events(wacom)) {
input_report_key(input, BTN_STYLUS, data[1] & 0x02);
input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
@@ -1496,8 +1522,10 @@ static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
- /* send pen events only when touch is up or forced out */
- if (!usage->type || wacom_wac->shared->touch_down)
+ /* send pen events only when touch is up or forced out
+ * or touch arbitration is off
+ */
+ if (!usage->type || delay_pen_events(wacom_wac))
return 0;
input_event(input, usage->type, usage->code, value);
@@ -1527,8 +1555,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
/* keep pen state for touch events */
wacom_wac->shared->stylus_in_proximity = prox;
- /* send pen events only when touch is up or forced out */
- if (!wacom_wac->shared->touch_down) {
+ if (!delay_pen_events(wacom_wac)) {
input_report_key(input, BTN_TOUCH,
wacom_wac->hid_data.tipswitch);
input_report_key(input, wacom_wac->tool[0], prox);
@@ -1544,13 +1571,11 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max;
switch (usage->hid) {
case HID_GD_X:
- features->last_slot_field = usage->hid;
if (touch_max == 1)
wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
else
@@ -1558,7 +1583,6 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
ABS_MT_POSITION_X, 4);
break;
case HID_GD_Y:
- features->last_slot_field = usage->hid;
if (touch_max == 1)
wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4);
else
@@ -1567,22 +1591,11 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
break;
case HID_DG_WIDTH:
case HID_DG_HEIGHT:
- features->last_slot_field = usage->hid;
wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_TOUCH_MAJOR, 0);
wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_TOUCH_MINOR, 0);
input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
break;
- case HID_DG_CONTACTID:
- features->last_slot_field = usage->hid;
- break;
- case HID_DG_INRANGE:
- features->last_slot_field = usage->hid;
- break;
- case HID_DG_INVERT:
- features->last_slot_field = usage->hid;
- break;
case HID_DG_TIPSWITCH:
- features->last_slot_field = usage->hid;
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
break;
case HID_DG_CONTACTCOUNT:
@@ -1599,7 +1612,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
struct hid_data *hid_data = &wacom_wac->hid_data;
bool mt = wacom_wac->features.touch_max > 1;
bool prox = hid_data->tipswitch &&
- !wacom_wac->shared->stylus_in_proximity;
+ report_touch_events(wacom_wac);
wacom_wac->hid_data.num_received++;
if (wacom_wac->hid_data.num_received > wacom_wac->hid_data.num_expected)
@@ -1660,7 +1673,7 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
if (usage->usage_index + 1 == field->report_count) {
- if (usage->hid == wacom_wac->features.last_slot_field)
+ if (usage->hid == wacom_wac->hid_data.last_slot_field)
wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
}
@@ -1673,31 +1686,35 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct hid_data* hid_data = &wacom_wac->hid_data;
+ int i;
- if (hid_data->cc_report != 0 &&
- hid_data->cc_report != report->id) {
- int i;
-
- hid_data->cc_report = report->id;
- hid_data->cc_index = -1;
- hid_data->cc_value_index = -1;
-
- for (i = 0; i < report->maxfield; i++) {
- struct hid_field *field = report->field[i];
- int j;
-
- for (j = 0; j < field->maxusage; j++) {
- if (field->usage[j].hid == HID_DG_CONTACTCOUNT) {
- hid_data->cc_index = i;
- hid_data->cc_value_index = j;
-
- /* break */
- i = report->maxfield;
- j = field->maxusage;
- }
+ for (i = 0; i < report->maxfield; i++) {
+ struct hid_field *field = report->field[i];
+ int j;
+
+ for (j = 0; j < field->maxusage; j++) {
+ struct hid_usage *usage = &field->usage[j];
+
+ switch (usage->hid) {
+ case HID_GD_X:
+ case HID_GD_Y:
+ case HID_DG_WIDTH:
+ case HID_DG_HEIGHT:
+ case HID_DG_CONTACTID:
+ case HID_DG_INRANGE:
+ case HID_DG_INVERT:
+ case HID_DG_TIPSWITCH:
+ hid_data->last_slot_field = usage->hid;
+ break;
+ case HID_DG_CONTACTCOUNT:
+ hid_data->cc_report = report->id;
+ hid_data->cc_index = i;
+ hid_data->cc_value_index = j;
+ break;
}
}
}
+
if (hid_data->cc_report != 0 &&
hid_data->cc_index >= 0) {
struct hid_field *field = report->field[hid_data->cc_index];
@@ -1740,10 +1757,10 @@ void wacom_wac_usage_mapping(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
/* currently, only direct devices have proper hid report descriptors */
- __set_bit(INPUT_PROP_DIRECT, wacom_wac->pen_input->propbit);
- __set_bit(INPUT_PROP_DIRECT, wacom_wac->touch_input->propbit);
+ features->device_type |= WACOM_DEVICETYPE_DIRECT;
if (WACOM_PEN_FIELD(field))
return wacom_wac_pen_usage_mapping(hdev, field, usage);
@@ -1825,15 +1842,8 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
for (i = 0; i < 2; i++) {
int offset = (data[1] & 0x80) ? (8 * i) : (9 * i);
- bool touch = data[offset + 3] & 0x80;
-
- /*
- * Touch events need to be disabled while stylus is
- * in proximity because user's hand is resting on touchpad
- * and sending unwanted events. User expects tablet buttons
- * to continue working though.
- */
- touch = touch && !wacom->shared->stylus_in_proximity;
+ bool touch = report_touch_events(wacom)
+ && (data[offset + 3] & 0x80);
input_mt_slot(input, i);
input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
@@ -1870,7 +1880,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
if (slot < 0)
return;
- touch = touch && !wacom->shared->stylus_in_proximity;
+ touch = touch && report_touch_events(wacom);
input_mt_slot(input, slot);
input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
@@ -1942,7 +1952,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
}
/* only update touch if we actually have a touchpad and touch data changed */
- if (wacom->touch_registered && touch_changed) {
+ if (wacom->touch_input && touch_changed) {
input_mt_sync_frame(wacom->touch_input);
wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
}
@@ -1983,7 +1993,7 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
}
wacom->shared->stylus_in_proximity = prox;
- if (wacom->shared->touch_down)
+ if (delay_pen_events(wacom))
return 0;
if (prox) {
@@ -2077,7 +2087,7 @@ static int wacom_bamboo_pad_touch_event(struct wacom_wac *wacom,
for (id = 0; id < wacom->features.touch_max; id++) {
valid = !!(prefix & BIT(id)) &&
- !wacom->shared->stylus_in_proximity;
+ report_touch_events(wacom);
input_mt_slot(input, id);
input_mt_report_slot_state(input, MT_TOOL_FINGER, valid);
@@ -2099,8 +2109,7 @@ static int wacom_bamboo_pad_touch_event(struct wacom_wac *wacom,
input_report_key(input, BTN_RIGHT, prefix & 0x80);
/* keep touch state for pen event */
- wacom->shared->touch_down = !!prefix &&
- !wacom->shared->stylus_in_proximity;
+ wacom->shared->touch_down = !!prefix && report_touch_events(wacom);
return 1;
}
@@ -2149,16 +2158,15 @@ static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len)
charging = !!(data[5] & 0x80);
if (wacom->pid != pid) {
wacom->pid = pid;
- wacom_schedule_work(wacom);
+ wacom_schedule_work(wacom, WACOM_WORKER_WIRELESS);
}
- if (wacom->shared->type)
- wacom_notify_battery(wacom, battery, charging, 1, 0);
+ wacom_notify_battery(wacom, battery, charging, 1, 0);
} else if (wacom->pid != 0) {
/* disconnected while previously connected */
wacom->pid = 0;
- wacom_schedule_work(wacom);
+ wacom_schedule_work(wacom, WACOM_WORKER_WIRELESS);
wacom_notify_battery(wacom, 0, 0, 0, 0);
}
@@ -2190,18 +2198,16 @@ static int wacom_status_irq(struct wacom_wac *wacom_wac, size_t len)
wacom_notify_battery(wacom_wac, battery, charging,
battery || charging, 1);
- if (!wacom->battery &&
+ if (!wacom->battery.battery &&
!(features->quirks & WACOM_QUIRK_BATTERY)) {
features->quirks |= WACOM_QUIRK_BATTERY;
- INIT_WORK(&wacom->work, wacom_battery_work);
- wacom_schedule_work(wacom_wac);
+ wacom_schedule_work(wacom_wac, WACOM_WORKER_BATTERY);
}
}
else if ((features->quirks & WACOM_QUIRK_BATTERY) &&
- wacom->battery) {
+ wacom->battery.battery) {
features->quirks &= ~WACOM_QUIRK_BATTERY;
- INIT_WORK(&wacom->work, wacom_battery_work);
- wacom_schedule_work(wacom_wac);
+ wacom_schedule_work(wacom_wac, WACOM_WORKER_BATTERY);
wacom_notify_battery(wacom_wac, 0, 0, 0, 0);
}
return 0;
@@ -2312,8 +2318,9 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
break;
case REMOTE:
+ sync = false;
if (wacom_wac->data[0] == WACOM_REPORT_DEVICE_LIST)
- sync = wacom_remote_status_irq(wacom_wac, len);
+ wacom_remote_status_irq(wacom_wac, len);
else
sync = wacom_remote_irq(wacom_wac, len);
break;
@@ -2451,6 +2458,33 @@ void wacom_setup_device_quirks(struct wacom *wacom)
if (features->type == REMOTE)
features->device_type = WACOM_DEVICETYPE_PAD;
+ switch (features->type) {
+ case PL:
+ case DTU:
+ case DTUS:
+ case DTUSX:
+ case WACOM_21UX2:
+ case WACOM_22HD:
+ case DTK:
+ case WACOM_24HD:
+ case WACOM_27QHD:
+ case CINTIQ_HYBRID:
+ case CINTIQ_COMPANION_2:
+ case CINTIQ:
+ case WACOM_BEE:
+ case WACOM_13HD:
+ case WACOM_24HDT:
+ case WACOM_27QHDT:
+ case TABLETPC:
+ case TABLETPCE:
+ case TABLETPC2FG:
+ case MTSCREEN:
+ case MTTPC:
+ case MTTPC_B:
+ features->device_type |= WACOM_DEVICETYPE_DIRECT;
+ break;
+ }
+
if (wacom->hdev->bus == BUS_BLUETOOTH)
features->quirks |= WACOM_QUIRK_BATTERY;
@@ -2469,6 +2503,9 @@ void wacom_setup_device_quirks(struct wacom *wacom)
features->quirks |= WACOM_QUIRK_BATTERY;
}
}
+
+ if (features->type == REMOTE)
+ features->device_type |= WACOM_DEVICETYPE_WL_MONITOR;
}
int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
@@ -2481,6 +2518,11 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
if (!(features->device_type & WACOM_DEVICETYPE_PEN))
return -ENODEV;
+ if (features->device_type & WACOM_DEVICETYPE_DIRECT)
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ else
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
if (features->type == HID_GENERIC)
/* setup has already been done */
return 0;
@@ -2499,7 +2541,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
input_abs_set_res(input_dev, ABS_X, features->x_resolution);
input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
-
switch (features->type) {
case GRAPHIRE_BT:
__clear_bit(ABS_MISC, input_dev->absbit);
@@ -2523,8 +2564,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
__set_bit(BTN_STYLUS2, input_dev->keybit);
-
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
case WACOM_27QHD:
@@ -2539,7 +2578,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
case CINTIQ_COMPANION_2:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
input_abs_set_res(input_dev, ABS_Z, 287);
- __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
wacom_setup_cintiq(wacom_wac);
break;
@@ -2555,8 +2593,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
/* fall through */
case INTUOS:
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
-
wacom_setup_intuos(wacom_wac);
break;
@@ -2566,8 +2602,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
case INTUOSPL:
case INTUOS5S:
case INTUOSPS:
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
-
input_set_abs_params(input_dev, ABS_DISTANCE, 0,
features->distance_max,
features->distance_fuzz, 0);
@@ -2597,8 +2631,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
__set_bit(BTN_STYLUS2, input_dev->keybit);
-
- __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
break;
case PTU:
@@ -2609,16 +2641,12 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
-
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
case INTUOSHT:
case BAMBOO_PT:
case BAMBOO_PEN:
case INTUOSHT2:
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
-
if (features->type == INTUOSHT2) {
wacom_setup_basic_pro_pen(wacom_wac);
} else {
@@ -2649,6 +2677,11 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
return -ENODEV;
+ if (features->device_type & WACOM_DEVICETYPE_DIRECT)
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ else
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
if (features->type == HID_GENERIC)
/* setup has already been done */
return 0;
@@ -2683,8 +2716,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
case INTUOSPL:
case INTUOS5S:
case INTUOSPS:
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
-
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0);
input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, features->y_max, 0, 0);
input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
@@ -2707,7 +2738,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
case TABLETPC:
case TABLETPCE:
- __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
break;
case INTUOSHT:
@@ -2752,11 +2782,105 @@ static void wacom_setup_numbered_buttons(struct input_dev *input_dev,
__set_bit(BTN_BASE + (i-16), input_dev->keybit);
}
+static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
+{
+ struct wacom_led *led;
+ int i;
+ bool updated = false;
+
+ /*
+ * 24HD has LED group 1 to the left and LED group 0 to the right.
+ * So group 0 matches the second half of the buttons and thus the mask
+ * needs to be shifted.
+ */
+ if (group == 0)
+ mask >>= 8;
+
+ for (i = 0; i < 3; i++) {
+ led = wacom_led_find(wacom, group, i);
+ if (!led) {
+ hid_err(wacom->hdev, "can't find LED %d in group %d\n",
+ i, group);
+ continue;
+ }
+ if (!updated && mask & BIT(i)) {
+ led->held = true;
+ led_trigger_event(&led->trigger, LED_FULL);
+ } else {
+ led->held = false;
+ }
+ }
+}
+
+static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
+ int mask, int group)
+{
+ int button_per_group;
+
+ /*
+ * 21UX2 has LED group 1 to the left and LED group 0
+ * to the right. We need to reverse the group to match this
+ * historical behavior.
+ */
+ if (wacom->wacom_wac.features.type == WACOM_21UX2)
+ group = 1 - group;
+
+ button_per_group = button_count/wacom->led.count;
+
+ return mask & (1 << (group * button_per_group));
+}
+
+static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
+ int group)
+{
+ struct wacom_led *led, *next_led;
+ int cur;
+ bool pressed;
+
+ if (wacom->wacom_wac.features.type == WACOM_24HD)
+ return wacom_24hd_update_leds(wacom, mask, group);
+
+ pressed = wacom_is_led_toggled(wacom, button_count, mask, group);
+ cur = wacom->led.groups[group].select;
+
+ led = wacom_led_find(wacom, group, cur);
+ if (!led) {
+ hid_err(wacom->hdev, "can't find current LED %d in group %d\n",
+ cur, group);
+ return;
+ }
+
+ if (!pressed) {
+ led->held = false;
+ return;
+ }
+
+ if (led->held && pressed)
+ return;
+
+ next_led = wacom_led_next(wacom, led);
+ if (!next_led) {
+ hid_err(wacom->hdev, "can't find next LED in group %d\n",
+ group);
+ return;
+ }
+ if (next_led == led)
+ return;
+
+ next_led->held = true;
+ led_trigger_event(&next_led->trigger,
+ wacom_leds_brightness_get(next_led));
+}
+
static void wacom_report_numbered_buttons(struct input_dev *input_dev,
int button_count, int mask)
{
+ struct wacom *wacom = input_get_drvdata(input_dev);
int i;
+ for (i = 0; i < wacom->led.count; i++)
+ wacom_update_led(wacom, button_count, mask, i);
+
for (i = 0; i < button_count && i < 10; i++)
input_report_key(input_dev, BTN_0 + i, mask & (1 << i));
for (i = 10; i < button_count && i < 16; i++)
@@ -2773,6 +2897,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
if (!(features->device_type & WACOM_DEVICETYPE_PAD))
return -ENODEV;
+ if (features->type == REMOTE && input_dev == wacom_wac->pad_input)
+ return -ENODEV;
+
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
/* kept for making legacy xf86-input-wacom working with the wheels */
@@ -3403,7 +3530,7 @@ static const struct wacom_features wacom_features_0x343 =
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_HID_ANY_ID =
- { "Wacom HID", .type = HID_GENERIC };
+ { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
#define USB_DEVICE_WACOM(prod) \
HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 53d16537fd2a..324c40b0c119 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -82,6 +82,7 @@
#define WACOM_DEVICETYPE_TOUCH 0x0002
#define WACOM_DEVICETYPE_PAD 0x0004
#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
+#define WACOM_DEVICETYPE_DIRECT 0x0010
#define WACOM_VENDORDEFINED_PEN 0xff0d0001
#define WACOM_G9_PAGE 0xff090000
@@ -185,7 +186,6 @@ struct wacom_features {
int pktlen;
bool check_for_hid_type;
int hid_type;
- int last_slot_field;
};
struct wacom_shared {
@@ -214,35 +214,35 @@ struct hid_data {
int cc_report;
int cc_index;
int cc_value_index;
+ int last_slot_field;
int num_expected;
int num_received;
};
+struct wacom_remote_data {
+ struct {
+ u32 serial;
+ bool connected;
+ } remote[WACOM_MAX_REMOTES];
+};
+
struct wacom_wac {
+ char name[WACOM_NAME_MAX];
char pen_name[WACOM_NAME_MAX];
char touch_name[WACOM_NAME_MAX];
char pad_name[WACOM_NAME_MAX];
- char bat_name[WACOM_NAME_MAX];
- char ac_name[WACOM_NAME_MAX];
unsigned char data[WACOM_PKGLEN_MAX];
int tool[2];
int id[2];
- __u32 serial[5];
+ __u32 serial[2];
bool reporting_data;
struct wacom_features features;
struct wacom_shared *shared;
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
- bool pen_registered;
- bool touch_registered;
- bool pad_registered;
int pid;
- int battery_capacity;
int num_contacts_left;
- int bat_charging;
- int bat_connected;
- int ps_connected;
u8 bt_features;
u8 bt_high_speed;
int mode_report;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 56dd261f7142..16f91c8490fe 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -43,7 +43,12 @@ static void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
- if (channel->offermsg.monitor_allocated) {
+ /*
+ * For channels marked as in "low latency" mode
+ * bypass the monitor page mechanism.
+ */
+ if ((channel->offermsg.monitor_allocated) &&
+ (!channel->low_latency)) {
/* Each u32 represents 32 channels */
sync_set_bit(channel->offermsg.child_relid & 31,
(unsigned long *) vmbus_connection.send_int_page +
@@ -70,12 +75,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
{
struct vmbus_channel_open_channel *open_msg;
struct vmbus_channel_msginfo *open_info = NULL;
- void *in, *out;
unsigned long flags;
int ret, err = 0;
- unsigned long t;
struct page *page;
+ if (send_ringbuffer_size % PAGE_SIZE ||
+ recv_ringbuffer_size % PAGE_SIZE)
+ return -EINVAL;
+
spin_lock_irqsave(&newchannel->lock, flags);
if (newchannel->state == CHANNEL_OPEN_STATE) {
newchannel->state = CHANNEL_OPENING_STATE;
@@ -95,36 +102,33 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
recv_ringbuffer_size));
if (!page)
- out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
- get_order(send_ringbuffer_size +
- recv_ringbuffer_size));
- else
- out = (void *)page_address(page);
+ page = alloc_pages(GFP_KERNEL|__GFP_ZERO,
+ get_order(send_ringbuffer_size +
+ recv_ringbuffer_size));
- if (!out) {
+ if (!page) {
err = -ENOMEM;
- goto error0;
+ goto error_set_chnstate;
}
- in = (void *)((unsigned long)out + send_ringbuffer_size);
-
- newchannel->ringbuffer_pages = out;
+ newchannel->ringbuffer_pages = page_address(page);
newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
recv_ringbuffer_size) >> PAGE_SHIFT;
- ret = hv_ringbuffer_init(
- &newchannel->outbound, out, send_ringbuffer_size);
+ ret = hv_ringbuffer_init(&newchannel->outbound, page,
+ send_ringbuffer_size >> PAGE_SHIFT);
if (ret != 0) {
err = ret;
- goto error0;
+ goto error_free_pages;
}
- ret = hv_ringbuffer_init(
- &newchannel->inbound, in, recv_ringbuffer_size);
+ ret = hv_ringbuffer_init(&newchannel->inbound,
+ &page[send_ringbuffer_size >> PAGE_SHIFT],
+ recv_ringbuffer_size >> PAGE_SHIFT);
if (ret != 0) {
err = ret;
- goto error0;
+ goto error_free_pages;
}
@@ -132,14 +136,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
newchannel->ringbuffer_gpadlhandle = 0;
ret = vmbus_establish_gpadl(newchannel,
- newchannel->outbound.ring_buffer,
- send_ringbuffer_size +
- recv_ringbuffer_size,
- &newchannel->ringbuffer_gpadlhandle);
+ page_address(page),
+ send_ringbuffer_size +
+ recv_ringbuffer_size,
+ &newchannel->ringbuffer_gpadlhandle);
if (ret != 0) {
err = ret;
- goto error0;
+ goto error_free_pages;
}
/* Create and init the channel open message */
@@ -148,7 +152,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
GFP_KERNEL);
if (!open_info) {
err = -ENOMEM;
- goto error_gpadl;
+ goto error_free_gpadl;
}
init_completion(&open_info->waitevent);
@@ -164,7 +168,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
- goto error_gpadl;
+ goto error_free_gpadl;
}
if (userdatalen)
@@ -180,14 +184,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
if (ret != 0) {
err = ret;
- goto error1;
+ goto error_clean_msglist;
}
- t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
- if (t == 0) {
- err = -ETIMEDOUT;
- goto error1;
- }
+ wait_for_completion(&open_info->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
@@ -195,25 +195,27 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
if (open_info->response.open_result.status) {
err = -EAGAIN;
- goto error_gpadl;
+ goto error_free_gpadl;
}
newchannel->state = CHANNEL_OPENED_STATE;
kfree(open_info);
return 0;
-error1:
+error_clean_msglist:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-error_gpadl:
+error_free_gpadl:
vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
-
-error0:
- free_pages((unsigned long)out,
- get_order(send_ringbuffer_size + recv_ringbuffer_size));
kfree(open_info);
+error_free_pages:
+ hv_ringbuffer_cleanup(&newchannel->outbound);
+ hv_ringbuffer_cleanup(&newchannel->inbound);
+ __free_pages(page,
+ get_order(send_ringbuffer_size + recv_ringbuffer_size));
+error_set_chnstate:
newchannel->state = CHANNEL_OPEN_STATE;
return err;
}
@@ -238,8 +240,7 @@ EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
* create_gpadl_header - Creates a gpadl for the specified buffer
*/
static int create_gpadl_header(void *kbuffer, u32 size,
- struct vmbus_channel_msginfo **msginfo,
- u32 *messagecount)
+ struct vmbus_channel_msginfo **msginfo)
{
int i;
int pagecount;
@@ -283,7 +284,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
*msginfo = msgheader;
- *messagecount = 1;
pfnsum = pfncount;
pfnleft = pagecount - pfncount;
@@ -323,7 +323,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
}
msgbody->msgsize = msgsize;
- (*messagecount)++;
gpadl_body =
(struct vmbus_channel_gpadl_body *)msgbody->msg;
@@ -352,6 +351,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
msgheader = kzalloc(msgsize, GFP_KERNEL);
if (msgheader == NULL)
goto nomem;
+
+ INIT_LIST_HEAD(&msgheader->submsglist);
msgheader->msgsize = msgsize;
gpadl_header = (struct vmbus_channel_gpadl_header *)
@@ -366,7 +367,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
*msginfo = msgheader;
- *messagecount = 1;
}
return 0;
@@ -390,8 +390,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
struct vmbus_channel_msginfo *msginfo = NULL;
- struct vmbus_channel_msginfo *submsginfo;
- u32 msgcount;
+ struct vmbus_channel_msginfo *submsginfo, *tmp;
struct list_head *curr;
u32 next_gpadl_handle;
unsigned long flags;
@@ -400,7 +399,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
next_gpadl_handle =
(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
- ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
+ ret = create_gpadl_header(kbuffer, size, &msginfo);
if (ret)
return ret;
@@ -423,24 +422,21 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
if (ret != 0)
goto cleanup;
- if (msgcount > 1) {
- list_for_each(curr, &msginfo->submsglist) {
-
- submsginfo = (struct vmbus_channel_msginfo *)curr;
- gpadl_body =
- (struct vmbus_channel_gpadl_body *)submsginfo->msg;
+ list_for_each(curr, &msginfo->submsglist) {
+ submsginfo = (struct vmbus_channel_msginfo *)curr;
+ gpadl_body =
+ (struct vmbus_channel_gpadl_body *)submsginfo->msg;
- gpadl_body->header.msgtype =
- CHANNELMSG_GPADL_BODY;
- gpadl_body->gpadl = next_gpadl_handle;
+ gpadl_body->header.msgtype =
+ CHANNELMSG_GPADL_BODY;
+ gpadl_body->gpadl = next_gpadl_handle;
- ret = vmbus_post_msg(gpadl_body,
- submsginfo->msgsize -
- sizeof(*submsginfo));
- if (ret != 0)
- goto cleanup;
+ ret = vmbus_post_msg(gpadl_body,
+ submsginfo->msgsize -
+ sizeof(*submsginfo));
+ if (ret != 0)
+ goto cleanup;
- }
}
wait_for_completion(&msginfo->waitevent);
@@ -451,6 +447,10 @@ cleanup:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
+ msglistentry) {
+ kfree(submsginfo);
+ }
kfree(msginfo);
return ret;
@@ -512,7 +512,6 @@ static void reset_channel_cb(void *arg)
static int vmbus_close_internal(struct vmbus_channel *channel)
{
struct vmbus_channel_close_channel *msg;
- struct tasklet_struct *tasklet;
int ret;
/*
@@ -524,8 +523,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* To resolve the race, we can serialize them by disabling the
* tasklet when the latter is running here.
*/
- tasklet = hv_context.event_dpc[channel->target_cpu];
- tasklet_disable(tasklet);
+ hv_event_tasklet_disable(channel);
/*
* In case a device driver's probe() fails (e.g.,
@@ -591,7 +589,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
out:
- tasklet_enable(tasklet);
+ hv_event_tasklet_enable(channel);
return ret;
}
@@ -659,7 +657,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
- &signal, lock);
+ &signal, lock, channel->signal_policy);
/*
* Signalling the host is conditional on many factors:
@@ -680,11 +678,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
* mechanism which can hurt the performance otherwise.
*/
- if (channel->signal_policy)
- signal = true;
- else
- kick_q = true;
-
if (((ret == 0) && kick_q && signal) ||
(ret && !is_hvsock_channel(channel)))
vmbus_setevent(channel);
@@ -777,7 +770,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock);
+ &signal, lock, channel->signal_policy);
/*
* Signalling the host is conditional on many factors:
@@ -795,11 +788,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
* enough condition that it should not matter.
*/
- if (channel->signal_policy)
- signal = true;
- else
- kick_q = true;
-
if (((ret == 0) && kick_q && signal) || (ret))
vmbus_setevent(channel);
@@ -861,7 +849,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock);
+ &signal, lock, channel->signal_policy);
if (ret == 0 && signal)
vmbus_setevent(channel);
@@ -926,7 +914,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock);
+ &signal, lock, channel->signal_policy);
if (ret == 0 && signal)
vmbus_setevent(channel);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index b6c1211b4df7..96a85cd39580 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
+#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
@@ -138,10 +139,32 @@ static const struct vmbus_device vmbus_devs[] = {
},
};
-static u16 hv_get_dev_type(const uuid_le *guid)
+static const struct {
+ uuid_le guid;
+} vmbus_unsupported_devs[] = {
+ { HV_AVMA1_GUID },
+ { HV_AVMA2_GUID },
+ { HV_RDV_GUID },
+};
+
+static bool is_unsupported_vmbus_devs(const uuid_le *guid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
+ if (!uuid_le_cmp(*guid, vmbus_unsupported_devs[i].guid))
+ return true;
+ return false;
+}
+
+static u16 hv_get_dev_type(const struct vmbus_channel *channel)
{
+ const uuid_le *guid = &channel->offermsg.offer.if_type;
u16 i;
+ if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
+ return HV_UNKOWN;
+
for (i = HV_IDE; i < HV_UNKOWN; i++) {
if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
return i;
@@ -251,14 +274,12 @@ EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
*/
static struct vmbus_channel *alloc_channel(void)
{
- static atomic_t chan_num = ATOMIC_INIT(0);
struct vmbus_channel *channel;
channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
if (!channel)
return NULL;
- channel->id = atomic_inc_return(&chan_num);
channel->acquire_ring_lock = true;
spin_lock_init(&channel->inbound_lock);
spin_lock_init(&channel->lock);
@@ -303,16 +324,32 @@ static void vmbus_release_relid(u32 relid)
vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
}
+void hv_event_tasklet_disable(struct vmbus_channel *channel)
+{
+ struct tasklet_struct *tasklet;
+ tasklet = hv_context.event_dpc[channel->target_cpu];
+ tasklet_disable(tasklet);
+}
+
+void hv_event_tasklet_enable(struct vmbus_channel *channel)
+{
+ struct tasklet_struct *tasklet;
+ tasklet = hv_context.event_dpc[channel->target_cpu];
+ tasklet_enable(tasklet);
+
+ /* In case there is any pending event */
+ tasklet_schedule(tasklet);
+}
+
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
{
unsigned long flags;
struct vmbus_channel *primary_channel;
- vmbus_release_relid(relid);
-
BUG_ON(!channel->rescind);
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
+ hv_event_tasklet_disable(channel);
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu,
@@ -321,6 +358,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
percpu_channel_deq(channel);
put_cpu();
}
+ hv_event_tasklet_enable(channel);
if (channel->primary_channel == NULL) {
list_del(&channel->listentry);
@@ -338,8 +376,11 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
* We need to free the bit for init_vp_index() to work in the case
* of sub-channel, when we reload drivers like hv_netvsc.
*/
- cpumask_clear_cpu(channel->target_cpu,
- &primary_channel->alloced_cpus_in_node);
+ if (channel->affinity_policy == HV_LOCALIZED)
+ cpumask_clear_cpu(channel->target_cpu,
+ &primary_channel->alloced_cpus_in_node);
+
+ vmbus_release_relid(relid);
free_channel(channel);
}
@@ -405,10 +446,13 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
goto err_free_chan;
}
- dev_type = hv_get_dev_type(&newchannel->offermsg.offer.if_type);
+ dev_type = hv_get_dev_type(newchannel);
+ if (dev_type == HV_NIC)
+ set_channel_signal_state(newchannel, HV_SIGNAL_POLICY_EXPLICIT);
init_vp_index(newchannel, dev_type);
+ hv_event_tasklet_disable(newchannel);
if (newchannel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(newchannel->target_cpu,
@@ -418,6 +462,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
percpu_channel_enq(newchannel);
put_cpu();
}
+ hv_event_tasklet_enable(newchannel);
/*
* This state is used to indicate a successful open
@@ -463,12 +508,11 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
return;
err_deq_chan:
- vmbus_release_relid(newchannel->offermsg.child_relid);
-
mutex_lock(&vmbus_connection.channel_mutex);
list_del(&newchannel->listentry);
mutex_unlock(&vmbus_connection.channel_mutex);
+ hv_event_tasklet_disable(newchannel);
if (newchannel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(newchannel->target_cpu,
@@ -477,6 +521,9 @@ err_deq_chan:
percpu_channel_deq(newchannel);
put_cpu();
}
+ hv_event_tasklet_enable(newchannel);
+
+ vmbus_release_relid(newchannel->offermsg.child_relid);
err_free_chan:
free_channel(newchannel);
@@ -522,17 +569,17 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
}
/*
- * We distribute primary channels evenly across all the available
- * NUMA nodes and within the assigned NUMA node we will assign the
- * first available CPU to the primary channel.
- * The sub-channels will be assigned to the CPUs available in the
- * NUMA node evenly.
+ * Based on the channel affinity policy, we will assign the NUMA
+ * nodes.
*/
- if (!primary) {
+
+ if ((channel->affinity_policy == HV_BALANCED) || (!primary)) {
while (true) {
next_node = next_numa_node_id++;
- if (next_node == nr_node_ids)
+ if (next_node == nr_node_ids) {
next_node = next_numa_node_id = 0;
+ continue;
+ }
if (cpumask_empty(cpumask_of_node(next_node)))
continue;
break;
@@ -556,15 +603,17 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
cur_cpu = -1;
- /*
- * Normally Hyper-V host doesn't create more subchannels than there
- * are VCPUs on the node but it is possible when not all present VCPUs
- * on the node are initialized by guest. Clear the alloced_cpus_in_node
- * to start over.
- */
- if (cpumask_equal(&primary->alloced_cpus_in_node,
- cpumask_of_node(primary->numa_node)))
- cpumask_clear(&primary->alloced_cpus_in_node);
+ if (primary->affinity_policy == HV_LOCALIZED) {
+ /*
+ * Normally Hyper-V host doesn't create more subchannels
+ * than there are VCPUs on the node but it is possible when not
+ * all present VCPUs on the node are initialized by guest.
+ * Clear the alloced_cpus_in_node to start over.
+ */
+ if (cpumask_equal(&primary->alloced_cpus_in_node,
+ cpumask_of_node(primary->numa_node)))
+ cpumask_clear(&primary->alloced_cpus_in_node);
+ }
while (true) {
cur_cpu = cpumask_next(cur_cpu, &available_mask);
@@ -575,17 +624,24 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
continue;
}
- /*
- * NOTE: in the case of sub-channel, we clear the sub-channel
- * related bit(s) in primary->alloced_cpus_in_node in
- * hv_process_channel_removal(), so when we reload drivers
- * like hv_netvsc in SMP guest, here we're able to re-allocate
- * bit from primary->alloced_cpus_in_node.
- */
- if (!cpumask_test_cpu(cur_cpu,
- &primary->alloced_cpus_in_node)) {
- cpumask_set_cpu(cur_cpu,
- &primary->alloced_cpus_in_node);
+ if (primary->affinity_policy == HV_LOCALIZED) {
+ /*
+ * NOTE: in the case of sub-channel, we clear the
+ * sub-channel related bit(s) in
+ * primary->alloced_cpus_in_node in
+ * hv_process_channel_removal(), so when we
+ * reload drivers like hv_netvsc in SMP guest, here
+ * we're able to re-allocate
+ * bit from primary->alloced_cpus_in_node.
+ */
+ if (!cpumask_test_cpu(cur_cpu,
+ &primary->alloced_cpus_in_node)) {
+ cpumask_set_cpu(cur_cpu,
+ &primary->alloced_cpus_in_node);
+ cpumask_set_cpu(cur_cpu, alloced_mask);
+ break;
+ }
+ } else {
cpumask_set_cpu(cur_cpu, alloced_mask);
break;
}
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index fcf8a02dc0ea..78e6368a4423 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -439,7 +439,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
union hv_connection_id conn_id;
int ret = 0;
int retries = 0;
- u32 msec = 1;
+ u32 usec = 1;
conn_id.asu32 = 0;
conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID;
@@ -472,9 +472,9 @@ int vmbus_post_msg(void *buffer, size_t buflen)
}
retries++;
- msleep(msec);
- if (msec < 2048)
- msec *= 2;
+ udelay(usec);
+ if (usec < 2048)
+ usec *= 2;
}
return ret;
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index a1c086ba3b9a..60dbd6cb4640 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -278,7 +278,7 @@ cleanup:
*
* This routine is called normally during driver unloading or exiting.
*/
-void hv_cleanup(void)
+void hv_cleanup(bool crash)
{
union hv_x64_msr_hypercall_contents hypercall_msr;
@@ -288,7 +288,8 @@ void hv_cleanup(void)
if (hv_context.hypercall_page) {
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
- vfree(hv_context.hypercall_page);
+ if (!crash)
+ vfree(hv_context.hypercall_page);
hv_context.hypercall_page = NULL;
}
@@ -308,7 +309,8 @@ void hv_cleanup(void)
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
- vfree(hv_context.tsc_page);
+ if (!crash)
+ vfree(hv_context.tsc_page);
hv_context.tsc_page = NULL;
}
#endif
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index df35fb7ed5df..fdf8da929cbe 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -430,16 +430,27 @@ struct dm_info_msg {
* currently hot added. We hot add in multiples of 128M
* chunks; it is possible that we may not be able to bring
* online all the pages in the region. The range
- * covered_end_pfn defines the pages that can
+ * covered_start_pfn:covered_end_pfn defines the pages that can
* be brough online.
*/
struct hv_hotadd_state {
struct list_head list;
unsigned long start_pfn;
+ unsigned long covered_start_pfn;
unsigned long covered_end_pfn;
unsigned long ha_end_pfn;
unsigned long end_pfn;
+ /*
+ * A list of gaps.
+ */
+ struct list_head gap_list;
+};
+
+struct hv_hotadd_gap {
+ struct list_head list;
+ unsigned long start_pfn;
+ unsigned long end_pfn;
};
struct balloon_state {
@@ -536,7 +547,11 @@ struct hv_dynmem_device {
*/
struct task_struct *thread;
- struct mutex ha_region_mutex;
+ /*
+ * Protects ha_region_list, num_pages_onlined counter and individual
+ * regions from ha_region_list.
+ */
+ spinlock_t ha_lock;
/*
* A list of hot-add regions.
@@ -560,18 +575,14 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
void *v)
{
struct memory_notify *mem = (struct memory_notify *)v;
+ unsigned long flags;
switch (val) {
- case MEM_GOING_ONLINE:
- mutex_lock(&dm_device.ha_region_mutex);
- break;
-
case MEM_ONLINE:
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
dm_device.num_pages_onlined += mem->nr_pages;
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
case MEM_CANCEL_ONLINE:
- if (val == MEM_ONLINE ||
- mutex_is_locked(&dm_device.ha_region_mutex))
- mutex_unlock(&dm_device.ha_region_mutex);
if (dm_device.ha_waiting) {
dm_device.ha_waiting = false;
complete(&dm_device.ol_waitevent);
@@ -579,10 +590,11 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
break;
case MEM_OFFLINE:
- mutex_lock(&dm_device.ha_region_mutex);
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
dm_device.num_pages_onlined -= mem->nr_pages;
- mutex_unlock(&dm_device.ha_region_mutex);
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
break;
+ case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_OFFLINE:
break;
@@ -595,18 +607,46 @@ static struct notifier_block hv_memory_nb = {
.priority = 0
};
+/* Check if the particular page is backed and can be onlined and online it. */
+static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
+{
+ unsigned long cur_start_pgp;
+ unsigned long cur_end_pgp;
+ struct hv_hotadd_gap *gap;
+
+ cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
+ cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
+
+ /* The page is not backed. */
+ if (((unsigned long)pg < cur_start_pgp) ||
+ ((unsigned long)pg >= cur_end_pgp))
+ return;
+
+ /* Check for gaps. */
+ list_for_each_entry(gap, &has->gap_list, list) {
+ cur_start_pgp = (unsigned long)
+ pfn_to_page(gap->start_pfn);
+ cur_end_pgp = (unsigned long)
+ pfn_to_page(gap->end_pfn);
+ if (((unsigned long)pg >= cur_start_pgp) &&
+ ((unsigned long)pg < cur_end_pgp)) {
+ return;
+ }
+ }
+
+ /* This frame is currently backed; online the page. */
+ __online_page_set_limits(pg);
+ __online_page_increment_counters(pg);
+ __online_page_free(pg);
+}
-static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
+static void hv_bring_pgs_online(struct hv_hotadd_state *has,
+ unsigned long start_pfn, unsigned long size)
{
int i;
- for (i = 0; i < size; i++) {
- struct page *pg;
- pg = pfn_to_page(start_pfn + i);
- __online_page_set_limits(pg);
- __online_page_increment_counters(pg);
- __online_page_free(pg);
- }
+ for (i = 0; i < size; i++)
+ hv_page_online_one(has, pfn_to_page(start_pfn + i));
}
static void hv_mem_hot_add(unsigned long start, unsigned long size,
@@ -618,9 +658,12 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
unsigned long start_pfn;
unsigned long processed_pfn;
unsigned long total_pfn = pfn_count;
+ unsigned long flags;
for (i = 0; i < (size/HA_CHUNK); i++) {
start_pfn = start + (i * HA_CHUNK);
+
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
has->ha_end_pfn += HA_CHUNK;
if (total_pfn > HA_CHUNK) {
@@ -632,11 +675,11 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
}
has->covered_end_pfn += processed_pfn;
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
init_completion(&dm_device.ol_waitevent);
- dm_device.ha_waiting = true;
+ dm_device.ha_waiting = !memhp_auto_online;
- mutex_unlock(&dm_device.ha_region_mutex);
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
(HA_CHUNK << PAGE_SHIFT));
@@ -653,20 +696,23 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
*/
do_hot_add = false;
}
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
has->ha_end_pfn -= HA_CHUNK;
has->covered_end_pfn -= processed_pfn;
- mutex_lock(&dm_device.ha_region_mutex);
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
break;
}
/*
- * Wait for the memory block to be onlined.
- * Since the hot add has succeeded, it is ok to
- * proceed even if the pages in the hot added region
- * have not been "onlined" within the allowed time.
+ * Wait for the memory block to be onlined when memory onlining
+ * is done outside of kernel (memhp_auto_online). Since the hot
+ * add has succeeded, it is ok to proceed even if the pages in
+ * the hot added region have not been "onlined" within the
+ * allowed time.
*/
- wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
- mutex_lock(&dm_device.ha_region_mutex);
+ if (dm_device.ha_waiting)
+ wait_for_completion_timeout(&dm_device.ol_waitevent,
+ 5*HZ);
post_status(&dm_device);
}
@@ -675,47 +721,64 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
static void hv_online_page(struct page *pg)
{
- struct list_head *cur;
struct hv_hotadd_state *has;
unsigned long cur_start_pgp;
unsigned long cur_end_pgp;
+ unsigned long flags;
- list_for_each(cur, &dm_device.ha_region_list) {
- has = list_entry(cur, struct hv_hotadd_state, list);
- cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn);
- cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ list_for_each_entry(has, &dm_device.ha_region_list, list) {
+ cur_start_pgp = (unsigned long)
+ pfn_to_page(has->start_pfn);
+ cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
- if (((unsigned long)pg >= cur_start_pgp) &&
- ((unsigned long)pg < cur_end_pgp)) {
- /*
- * This frame is currently backed; online the
- * page.
- */
- __online_page_set_limits(pg);
- __online_page_increment_counters(pg);
- __online_page_free(pg);
- }
+ /* The page belongs to a different HAS. */
+ if (((unsigned long)pg < cur_start_pgp) ||
+ ((unsigned long)pg >= cur_end_pgp))
+ continue;
+
+ hv_page_online_one(has, pg);
+ break;
}
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
}
-static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
+static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
{
- struct list_head *cur;
struct hv_hotadd_state *has;
+ struct hv_hotadd_gap *gap;
unsigned long residual, new_inc;
+ int ret = 0;
+ unsigned long flags;
- if (list_empty(&dm_device.ha_region_list))
- return false;
-
- list_for_each(cur, &dm_device.ha_region_list) {
- has = list_entry(cur, struct hv_hotadd_state, list);
-
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ list_for_each_entry(has, &dm_device.ha_region_list, list) {
/*
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
+
+ /*
+ * If the current start pfn is not where the covered_end
+ * is, create a gap and update covered_end_pfn.
+ */
+ if (has->covered_end_pfn != start_pfn) {
+ gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
+ if (!gap) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ INIT_LIST_HEAD(&gap->list);
+ gap->start_pfn = has->covered_end_pfn;
+ gap->end_pfn = start_pfn;
+ list_add_tail(&gap->list, &has->gap_list);
+
+ has->covered_end_pfn = start_pfn;
+ }
+
/*
* If the current hot add-request extends beyond
* our current limit; extend it.
@@ -732,19 +795,12 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
has->end_pfn += new_inc;
}
- /*
- * If the current start pfn is not where the covered_end
- * is, update it.
- */
-
- if (has->covered_end_pfn != start_pfn)
- has->covered_end_pfn = start_pfn;
-
- return true;
-
+ ret = 1;
+ break;
}
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
- return false;
+ return ret;
}
static unsigned long handle_pg_range(unsigned long pg_start,
@@ -753,17 +809,13 @@ static unsigned long handle_pg_range(unsigned long pg_start,
unsigned long start_pfn = pg_start;
unsigned long pfn_cnt = pg_count;
unsigned long size;
- struct list_head *cur;
struct hv_hotadd_state *has;
unsigned long pgs_ol = 0;
unsigned long old_covered_state;
+ unsigned long res = 0, flags;
- if (list_empty(&dm_device.ha_region_list))
- return 0;
-
- list_for_each(cur, &dm_device.ha_region_list) {
- has = list_entry(cur, struct hv_hotadd_state, list);
-
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ list_for_each_entry(has, &dm_device.ha_region_list, list) {
/*
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
@@ -783,6 +835,8 @@ static unsigned long handle_pg_range(unsigned long pg_start,
if (pgs_ol > pfn_cnt)
pgs_ol = pfn_cnt;
+ has->covered_end_pfn += pgs_ol;
+ pfn_cnt -= pgs_ol;
/*
* Check if the corresponding memory block is already
* online by checking its last previously backed page.
@@ -791,10 +845,8 @@ static unsigned long handle_pg_range(unsigned long pg_start,
*/
if (start_pfn > has->start_pfn &&
!PageReserved(pfn_to_page(start_pfn - 1)))
- hv_bring_pgs_online(start_pfn, pgs_ol);
+ hv_bring_pgs_online(has, start_pfn, pgs_ol);
- has->covered_end_pfn += pgs_ol;
- pfn_cnt -= pgs_ol;
}
if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
@@ -813,17 +865,20 @@ static unsigned long handle_pg_range(unsigned long pg_start,
} else {
pfn_cnt = size;
}
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
}
/*
* If we managed to online any pages that were given to us,
* we declare success.
*/
- return has->covered_end_pfn - old_covered_state;
-
+ res = has->covered_end_pfn - old_covered_state;
+ break;
}
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
- return 0;
+ return res;
}
static unsigned long process_hot_add(unsigned long pg_start,
@@ -832,13 +887,20 @@ static unsigned long process_hot_add(unsigned long pg_start,
unsigned long rg_size)
{
struct hv_hotadd_state *ha_region = NULL;
+ int covered;
+ unsigned long flags;
if (pfn_cnt == 0)
return 0;
- if (!dm_device.host_specified_ha_region)
- if (pfn_covered(pg_start, pfn_cnt))
+ if (!dm_device.host_specified_ha_region) {
+ covered = pfn_covered(pg_start, pfn_cnt);
+ if (covered < 0)
+ return 0;
+
+ if (covered)
goto do_pg_range;
+ }
/*
* If the host has specified a hot-add range; deal with it first.
@@ -850,12 +912,17 @@ static unsigned long process_hot_add(unsigned long pg_start,
return 0;
INIT_LIST_HEAD(&ha_region->list);
+ INIT_LIST_HEAD(&ha_region->gap_list);
- list_add_tail(&ha_region->list, &dm_device.ha_region_list);
ha_region->start_pfn = rg_start;
ha_region->ha_end_pfn = rg_start;
+ ha_region->covered_start_pfn = pg_start;
ha_region->covered_end_pfn = pg_start;
ha_region->end_pfn = rg_start + rg_size;
+
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ list_add_tail(&ha_region->list, &dm_device.ha_region_list);
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
}
do_pg_range:
@@ -882,7 +949,6 @@ static void hot_add_req(struct work_struct *dummy)
resp.hdr.size = sizeof(struct dm_hot_add_response);
#ifdef CONFIG_MEMORY_HOTPLUG
- mutex_lock(&dm_device.ha_region_mutex);
pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
@@ -916,7 +982,6 @@ static void hot_add_req(struct work_struct *dummy)
rg_start, rg_sz);
dm->num_pages_added += resp.page_count;
- mutex_unlock(&dm_device.ha_region_mutex);
#endif
/*
* The result field of the response structure has the
@@ -1010,7 +1075,6 @@ static unsigned long compute_balloon_floor(void)
static void post_status(struct hv_dynmem_device *dm)
{
struct dm_status status;
- struct sysinfo val;
unsigned long now = jiffies;
unsigned long last_post = last_post_time;
@@ -1022,7 +1086,6 @@ static void post_status(struct hv_dynmem_device *dm)
if (!time_after(now, (last_post_time + HZ)))
return;
- si_meminfo(&val);
memset(&status, 0, sizeof(struct dm_status));
status.hdr.type = DM_STATUS_REPORT;
status.hdr.size = sizeof(struct dm_status);
@@ -1038,7 +1101,7 @@ static void post_status(struct hv_dynmem_device *dm)
* num_pages_onlined) as committed to the host, otherwise it can try
* asking us to balloon them out.
*/
- status.num_avail = val.freeram;
+ status.num_avail = si_mem_available();
status.num_committed = vm_memory_committed() +
dm->num_pages_ballooned +
(dm->num_pages_added > dm->num_pages_onlined ?
@@ -1144,7 +1207,7 @@ static void balloon_up(struct work_struct *dummy)
int ret;
bool done = false;
int i;
- struct sysinfo val;
+ long avail_pages;
unsigned long floor;
/* The host balloons pages in 2M granularity. */
@@ -1156,12 +1219,12 @@ static void balloon_up(struct work_struct *dummy)
*/
alloc_unit = 512;
- si_meminfo(&val);
+ avail_pages = si_mem_available();
floor = compute_balloon_floor();
/* Refuse to balloon below the floor, keep the 2M granularity. */
- if (val.freeram < num_pages || val.freeram - num_pages < floor) {
- num_pages = val.freeram > floor ? (val.freeram - floor) : 0;
+ if (avail_pages < num_pages || avail_pages - num_pages < floor) {
+ num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
num_pages -= num_pages % PAGES_IN_2M;
}
@@ -1172,7 +1235,6 @@ static void balloon_up(struct work_struct *dummy)
bl_resp->hdr.size = sizeof(struct dm_balloon_response);
bl_resp->more_pages = 1;
-
num_pages -= num_ballooned;
num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
bl_resp, alloc_unit);
@@ -1461,7 +1523,7 @@ static int balloon_probe(struct hv_device *dev,
init_completion(&dm_device.host_event);
init_completion(&dm_device.config_event);
INIT_LIST_HEAD(&dm_device.ha_region_list);
- mutex_init(&dm_device.ha_region_mutex);
+ spin_lock_init(&dm_device.ha_lock);
INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
dm_device.host_specified_ha_region = false;
@@ -1580,8 +1642,9 @@ probe_error0:
static int balloon_remove(struct hv_device *dev)
{
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
- struct list_head *cur, *tmp;
- struct hv_hotadd_state *has;
+ struct hv_hotadd_state *has, *tmp;
+ struct hv_hotadd_gap *gap, *tmp_gap;
+ unsigned long flags;
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
@@ -1596,11 +1659,16 @@ static int balloon_remove(struct hv_device *dev)
restore_online_page_callback(&hv_online_page);
unregister_memory_notifier(&hv_memory_nb);
#endif
- list_for_each_safe(cur, tmp, &dm->ha_region_list) {
- has = list_entry(cur, struct hv_hotadd_state, list);
+ spin_lock_irqsave(&dm_device.ha_lock, flags);
+ list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
+ list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
+ list_del(&gap->list);
+ kfree(gap);
+ }
list_del(&has->list);
kfree(has);
}
+ spin_unlock_irqrestore(&dm_device.ha_lock, flags);
return 0;
}
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 23c70799ad8a..8b2ba98831ec 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -83,6 +83,12 @@ static void fcopy_timeout_func(struct work_struct *dummy)
hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
}
+static void fcopy_register_done(void)
+{
+ pr_debug("FCP: userspace daemon registered\n");
+ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
+}
+
static int fcopy_handle_handshake(u32 version)
{
u32 our_ver = FCOPY_CURRENT_VERSION;
@@ -94,7 +100,8 @@ static int fcopy_handle_handshake(u32 version)
break;
case FCOPY_VERSION_1:
/* Daemon expects us to reply with our own version */
- if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver)))
+ if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
+ fcopy_register_done))
return -EFAULT;
dm_reg_value = version;
break;
@@ -107,8 +114,7 @@ static int fcopy_handle_handshake(u32 version)
*/
return -EINVAL;
}
- pr_debug("FCP: userspace daemon ver. %d registered\n", version);
- hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
+ pr_debug("FCP: userspace daemon ver. %d connected\n", version);
return 0;
}
@@ -161,7 +167,7 @@ static void fcopy_send_data(struct work_struct *dummy)
}
fcopy_transaction.state = HVUTIL_USERSPACE_REQ;
- rc = hvutil_transport_send(hvt, out_src, out_len);
+ rc = hvutil_transport_send(hvt, out_src, out_len, NULL);
if (rc) {
pr_debug("FCP: failed to communicate to the daemon: %d\n", rc);
if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index cb1a9160aab1..5e1fdc8d32ab 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -102,6 +102,17 @@ static void kvp_poll_wrapper(void *channel)
hv_kvp_onchannelcallback(channel);
}
+static void kvp_register_done(void)
+{
+ /*
+ * If we're still negotiating with the host cancel the timeout
+ * work to not poll the channel twice.
+ */
+ pr_debug("KVP: userspace daemon registered\n");
+ cancel_delayed_work_sync(&kvp_host_handshake_work);
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+}
+
static void
kvp_register(int reg_value)
{
@@ -116,7 +127,8 @@ kvp_register(int reg_value)
kvp_msg->kvp_hdr.operation = reg_value;
strcpy(version, HV_DRV_VERSION);
- hvutil_transport_send(hvt, kvp_msg, sizeof(*kvp_msg));
+ hvutil_transport_send(hvt, kvp_msg, sizeof(*kvp_msg),
+ kvp_register_done);
kfree(kvp_msg);
}
}
@@ -158,17 +170,10 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
/*
* We have a compatible daemon; complete the handshake.
*/
- pr_debug("KVP: userspace daemon ver. %d registered\n",
- KVP_OP_REGISTER);
+ pr_debug("KVP: userspace daemon ver. %d connected\n",
+ msg->kvp_hdr.operation);
kvp_register(dm_reg_value);
- /*
- * If we're still negotiating with the host cancel the timeout
- * work to not poll the channel twice.
- */
- cancel_delayed_work_sync(&kvp_host_handshake_work);
- hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
-
return 0;
}
@@ -455,7 +460,7 @@ kvp_send_key(struct work_struct *dummy)
}
kvp_transaction.state = HVUTIL_USERSPACE_REQ;
- rc = hvutil_transport_send(hvt, message, sizeof(*message));
+ rc = hvutil_transport_send(hvt, message, sizeof(*message), NULL);
if (rc) {
pr_debug("KVP: failed to communicate to the daemon: %d\n", rc);
if (cancel_delayed_work_sync(&kvp_timeout_work)) {
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 3fba14e88f03..a6707133c297 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -67,11 +67,11 @@ static const char vss_devname[] = "vmbus/hv_vss";
static __u8 *recv_buffer;
static struct hvutil_transport *hvt;
-static void vss_send_op(struct work_struct *dummy);
static void vss_timeout_func(struct work_struct *dummy);
+static void vss_handle_request(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
-static DECLARE_WORK(vss_send_op_work, vss_send_op);
+static DECLARE_WORK(vss_handle_request_work, vss_handle_request);
static void vss_poll_wrapper(void *channel)
{
@@ -95,6 +95,12 @@ static void vss_timeout_func(struct work_struct *dummy)
hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
}
+static void vss_register_done(void)
+{
+ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
+ pr_debug("VSS: userspace daemon registered\n");
+}
+
static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
{
u32 our_ver = VSS_OP_REGISTER1;
@@ -105,16 +111,16 @@ static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
dm_reg_value = VSS_OP_REGISTER;
break;
case VSS_OP_REGISTER1:
- /* Daemon expects us to reply with our own version*/
- if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver)))
+ /* Daemon expects us to reply with our own version */
+ if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
+ vss_register_done))
return -EFAULT;
dm_reg_value = VSS_OP_REGISTER1;
break;
default:
return -EINVAL;
}
- hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
- pr_debug("VSS: userspace daemon ver. %d registered\n", dm_reg_value);
+ pr_debug("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
return 0;
}
@@ -136,6 +142,11 @@ static int vss_on_msg(void *msg, int len)
return vss_handle_handshake(vss_msg);
} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
vss_transaction.state = HVUTIL_USERSPACE_RECV;
+
+ if (vss_msg->vss_hdr.operation == VSS_OP_HOT_BACKUP)
+ vss_transaction.msg->vss_cf.flags =
+ VSS_HBU_NO_AUTO_RECOVERY;
+
if (cancel_delayed_work_sync(&vss_timeout_work)) {
vss_respond_to_host(vss_msg->error);
/* Transaction is finished, reset the state. */
@@ -150,8 +161,7 @@ static int vss_on_msg(void *msg, int len)
return 0;
}
-
-static void vss_send_op(struct work_struct *dummy)
+static void vss_send_op(void)
{
int op = vss_transaction.msg->vss_hdr.operation;
int rc;
@@ -168,7 +178,10 @@ static void vss_send_op(struct work_struct *dummy)
vss_msg->vss_hdr.operation = op;
vss_transaction.state = HVUTIL_USERSPACE_REQ;
- rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg));
+
+ schedule_delayed_work(&vss_timeout_work, VSS_USERSPACE_TIMEOUT);
+
+ rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
if (rc) {
pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
if (cancel_delayed_work_sync(&vss_timeout_work)) {
@@ -182,6 +195,38 @@ static void vss_send_op(struct work_struct *dummy)
return;
}
+static void vss_handle_request(struct work_struct *dummy)
+{
+ switch (vss_transaction.msg->vss_hdr.operation) {
+ /*
+ * Initiate a "freeze/thaw" operation in the guest.
+ * We respond to the host once the operation is complete.
+ *
+ * We send the message to the user space daemon and the operation is
+ * performed in the daemon.
+ */
+ case VSS_OP_THAW:
+ case VSS_OP_FREEZE:
+ case VSS_OP_HOT_BACKUP:
+ if (vss_transaction.state < HVUTIL_READY) {
+ /* Userspace is not registered yet */
+ vss_respond_to_host(HV_E_FAIL);
+ return;
+ }
+ vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
+ vss_send_op();
+ return;
+ case VSS_OP_GET_DM_INFO:
+ vss_transaction.msg->dm_info.flags = 0;
+ break;
+ default:
+ break;
+ }
+
+ vss_respond_to_host(0);
+ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
+}
+
/*
* Send a response back to the host.
*/
@@ -266,48 +311,8 @@ void hv_vss_onchannelcallback(void *context)
vss_transaction.recv_req_id = requestid;
vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
- switch (vss_msg->vss_hdr.operation) {
- /*
- * Initiate a "freeze/thaw"
- * operation in the guest.
- * We respond to the host once
- * the operation is complete.
- *
- * We send the message to the
- * user space daemon and the
- * operation is performed in
- * the daemon.
- */
- case VSS_OP_FREEZE:
- case VSS_OP_THAW:
- if (vss_transaction.state < HVUTIL_READY) {
- /* Userspace is not registered yet */
- vss_respond_to_host(HV_E_FAIL);
- return;
- }
- vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
- schedule_work(&vss_send_op_work);
- schedule_delayed_work(&vss_timeout_work,
- VSS_USERSPACE_TIMEOUT);
- return;
-
- case VSS_OP_HOT_BACKUP:
- vss_msg->vss_cf.flags =
- VSS_HBU_NO_AUTO_RECOVERY;
- vss_respond_to_host(0);
- return;
-
- case VSS_OP_GET_DM_INFO:
- vss_msg->dm_info.flags = 0;
- vss_respond_to_host(0);
- return;
-
- default:
- vss_respond_to_host(0);
- return;
-
- }
-
+ schedule_work(&vss_handle_request_work);
+ return;
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
@@ -358,6 +363,6 @@ void hv_vss_deinit(void)
{
vss_transaction.state = HVUTIL_DEVICE_DYING;
cancel_delayed_work_sync(&vss_timeout_work);
- cancel_work_sync(&vss_send_op_work);
+ cancel_work_sync(&vss_handle_request_work);
hvutil_transport_destroy(hvt);
}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index d5acaa2d8e61..4aa3cb63fd41 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -34,22 +34,25 @@
#define SD_MINOR 0
#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
-#define SD_WS2008_MAJOR 1
-#define SD_WS2008_VERSION (SD_WS2008_MAJOR << 16 | SD_MINOR)
+#define SD_MAJOR_1 1
+#define SD_VERSION_1 (SD_MAJOR_1 << 16 | SD_MINOR)
-#define TS_MAJOR 3
+#define TS_MAJOR 4
#define TS_MINOR 0
#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
-#define TS_WS2008_MAJOR 1
-#define TS_WS2008_VERSION (TS_WS2008_MAJOR << 16 | TS_MINOR)
+#define TS_MAJOR_1 1
+#define TS_VERSION_1 (TS_MAJOR_1 << 16 | TS_MINOR)
+
+#define TS_MAJOR_3 3
+#define TS_VERSION_3 (TS_MAJOR_3 << 16 | TS_MINOR)
#define HB_MAJOR 3
-#define HB_MINOR 0
+#define HB_MINOR 0
#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
-#define HB_WS2008_MAJOR 1
-#define HB_WS2008_VERSION (HB_WS2008_MAJOR << 16 | HB_MINOR)
+#define HB_MAJOR_1 1
+#define HB_VERSION_1 (HB_MAJOR_1 << 16 | HB_MINOR)
static int sd_srv_version;
static int ts_srv_version;
@@ -61,9 +64,14 @@ static struct hv_util_service util_shutdown = {
.util_cb = shutdown_onchannelcallback,
};
+static int hv_timesync_init(struct hv_util_service *srv);
+static void hv_timesync_deinit(void);
+
static void timesync_onchannelcallback(void *context);
static struct hv_util_service util_timesynch = {
.util_cb = timesync_onchannelcallback,
+ .util_init = hv_timesync_init,
+ .util_deinit = hv_timesync_deinit,
};
static void heartbeat_onchannelcallback(void *context);
@@ -161,35 +169,43 @@ static void shutdown_onchannelcallback(void *context)
}
/*
- * Set guest time to host UTC time.
- */
-static inline void do_adj_guesttime(u64 hosttime)
-{
- s64 host_tns;
- struct timespec host_ts;
-
- host_tns = (hosttime - WLTIMEDELTA) * 100;
- host_ts = ns_to_timespec(host_tns);
-
- do_settimeofday(&host_ts);
-}
-
-/*
* Set the host time in a process context.
*/
struct adj_time_work {
struct work_struct work;
u64 host_time;
+ u64 ref_time;
+ u8 flags;
};
static void hv_set_host_time(struct work_struct *work)
{
struct adj_time_work *wrk;
+ s64 host_tns;
+ u64 newtime;
+ struct timespec host_ts;
wrk = container_of(work, struct adj_time_work, work);
- do_adj_guesttime(wrk->host_time);
- kfree(wrk);
+
+ newtime = wrk->host_time;
+ if (ts_srv_version > TS_VERSION_3) {
+ /*
+ * Some latency has been introduced since Hyper-V generated
+ * its time sample. Take that latency into account before
+ * using TSC reference time sample from Hyper-V.
+ *
+ * This sample is given by TimeSync v4 and above hosts.
+ */
+ u64 current_tick;
+
+ rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
+ newtime += (current_tick - wrk->ref_time);
+ }
+ host_tns = (newtime - WLTIMEDELTA) * 100;
+ host_ts = ns_to_timespec(host_tns);
+
+ do_settimeofday(&host_ts);
}
/*
@@ -198,33 +214,31 @@ static void hv_set_host_time(struct work_struct *work)
* ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
* After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
* message after the timesync channel is opened. Since the hv_utils module is
- * loaded after hv_vmbus, the first message is usually missed. The other
- * thing is, systime is automatically set to emulated hardware clock which may
- * not be UTC time or in the same time zone. So, to override these effects, we
- * use the first 50 time samples for initial system time setting.
+ * loaded after hv_vmbus, the first message is usually missed. This bit is
+ * considered a hard request to discipline the clock.
+ *
+ * ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host. This is
+ * typically used as a hint to the guest. The guest is under no obligation
+ * to discipline the clock.
*/
-static inline void adj_guesttime(u64 hosttime, u8 flags)
+static struct adj_time_work wrk;
+static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 flags)
{
- struct adj_time_work *wrk;
- static s32 scnt = 50;
- wrk = kmalloc(sizeof(struct adj_time_work), GFP_ATOMIC);
- if (wrk == NULL)
+ /*
+ * This check is safe since we are executing in the
+ * interrupt context and time synch messages arre always
+ * delivered on the same CPU.
+ */
+ if (work_pending(&wrk.work))
return;
- wrk->host_time = hosttime;
- if ((flags & ICTIMESYNCFLAG_SYNC) != 0) {
- INIT_WORK(&wrk->work, hv_set_host_time);
- schedule_work(&wrk->work);
- return;
+ wrk.host_time = hosttime;
+ wrk.ref_time = reftime;
+ wrk.flags = flags;
+ if ((flags & (ICTIMESYNCFLAG_SYNC | ICTIMESYNCFLAG_SAMPLE)) != 0) {
+ schedule_work(&wrk.work);
}
-
- if ((flags & ICTIMESYNCFLAG_SAMPLE) != 0 && scnt > 0) {
- scnt--;
- INIT_WORK(&wrk->work, hv_set_host_time);
- schedule_work(&wrk->work);
- } else
- kfree(wrk);
}
/*
@@ -237,6 +251,7 @@ static void timesync_onchannelcallback(void *context)
u64 requestid;
struct icmsg_hdr *icmsghdrp;
struct ictimesync_data *timedatap;
+ struct ictimesync_ref_data *refdata;
u8 *time_txf_buf = util_timesynch.recv_buffer;
struct icmsg_negotiate *negop = NULL;
@@ -252,11 +267,27 @@ static void timesync_onchannelcallback(void *context)
time_txf_buf,
util_fw_version,
ts_srv_version);
+ pr_info("Using TimeSync version %d.%d\n",
+ ts_srv_version >> 16, ts_srv_version & 0xFFFF);
} else {
- timedatap = (struct ictimesync_data *)&time_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
- adj_guesttime(timedatap->parenttime, timedatap->flags);
+ if (ts_srv_version > TS_VERSION_3) {
+ refdata = (struct ictimesync_ref_data *)
+ &time_txf_buf[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+
+ adj_guesttime(refdata->parenttime,
+ refdata->vmreferencetime,
+ refdata->flags);
+ } else {
+ timedatap = (struct ictimesync_data *)
+ &time_txf_buf[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+ adj_guesttime(timedatap->parenttime,
+ 0,
+ timedatap->flags);
+ }
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
@@ -350,16 +381,21 @@ static int util_probe(struct hv_device *dev,
switch (vmbus_proto_version) {
case (VERSION_WS2008):
util_fw_version = UTIL_WS2K8_FW_VERSION;
- sd_srv_version = SD_WS2008_VERSION;
- ts_srv_version = TS_WS2008_VERSION;
- hb_srv_version = HB_WS2008_VERSION;
+ sd_srv_version = SD_VERSION_1;
+ ts_srv_version = TS_VERSION_1;
+ hb_srv_version = HB_VERSION_1;
break;
-
- default:
+ case(VERSION_WIN10):
util_fw_version = UTIL_FW_VERSION;
sd_srv_version = SD_VERSION;
ts_srv_version = TS_VERSION;
hb_srv_version = HB_VERSION;
+ break;
+ default:
+ util_fw_version = UTIL_FW_VERSION;
+ sd_srv_version = SD_VERSION;
+ ts_srv_version = TS_VERSION_3;
+ hb_srv_version = HB_VERSION;
}
ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
@@ -427,6 +463,17 @@ static struct hv_driver util_drv = {
.remove = util_remove,
};
+static int hv_timesync_init(struct hv_util_service *srv)
+{
+ INIT_WORK(&wrk.work, hv_set_host_time);
+ return 0;
+}
+
+static void hv_timesync_deinit(void)
+{
+ cancel_work_sync(&wrk.work);
+}
+
static int __init init_hyperv_utils(void)
{
pr_info("Registering HyperV Utility Driver\n");
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index 9a9983fa4531..c235a9515267 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -72,6 +72,10 @@ static ssize_t hvt_op_read(struct file *file, char __user *buf,
hvt->outmsg = NULL;
hvt->outmsg_len = 0;
+ if (hvt->on_read)
+ hvt->on_read();
+ hvt->on_read = NULL;
+
out_unlock:
mutex_unlock(&hvt->lock);
return ret;
@@ -219,7 +223,8 @@ static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
mutex_unlock(&hvt->lock);
}
-int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
+int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
+ void (*on_read_cb)(void))
{
struct cn_msg *cn_msg;
int ret = 0;
@@ -237,6 +242,13 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
memcpy(cn_msg->data, msg, len);
ret = cn_netlink_send(cn_msg, 0, 0, GFP_ATOMIC);
kfree(cn_msg);
+ /*
+ * We don't know when netlink messages are delivered but unlike
+ * in CHARDEV mode we're not blocked and we can send next
+ * messages right away.
+ */
+ if (on_read_cb)
+ on_read_cb();
return ret;
}
/* HVUTIL_TRANSPORT_CHARDEV */
@@ -255,6 +267,7 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
if (hvt->outmsg) {
memcpy(hvt->outmsg, msg, len);
hvt->outmsg_len = len;
+ hvt->on_read = on_read_cb;
wake_up_interruptible(&hvt->outmsg_q);
} else
ret = -ENOMEM;
diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h
index 06254a165a18..d98f5225c3e6 100644
--- a/drivers/hv/hv_utils_transport.h
+++ b/drivers/hv/hv_utils_transport.h
@@ -36,6 +36,7 @@ struct hvutil_transport {
struct list_head list; /* hvt_list */
int (*on_msg)(void *, int); /* callback on new user message */
void (*on_reset)(void); /* callback when userspace drops */
+ void (*on_read)(void); /* callback on message read */
u8 *outmsg; /* message to the userspace */
int outmsg_len; /* its length */
wait_queue_head_t outmsg_q; /* poll/read wait queue */
@@ -46,7 +47,8 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
u32 cn_idx, u32 cn_val,
int (*on_msg)(void *, int),
void (*on_reset)(void));
-int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len);
+int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
+ void (*on_read_cb)(void));
void hvutil_transport_destroy(struct hvutil_transport *hvt);
#endif /* _HV_UTILS_TRANSPORT_H */
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 718b5c72f0c8..a5b4442433c8 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -495,7 +495,7 @@ struct hv_ring_buffer_debug_info {
extern int hv_init(void);
-extern void hv_cleanup(void);
+extern void hv_cleanup(bool crash);
extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
@@ -522,14 +522,15 @@ extern unsigned int host_info_edx;
/* Interface */
-int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, void *buffer,
- u32 buflen);
+int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+ struct page *pages, u32 pagecnt);
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
struct kvec *kv_list,
- u32 kv_count, bool *signal, bool lock);
+ u32 kv_count, bool *signal, bool lock,
+ enum hv_signal_policy policy);
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
void *buffer, u32 buflen, u32 *buffer_actual_len,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index fe586bf74e17..08043da1a61c 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -27,6 +27,8 @@
#include <linux/mm.h>
#include <linux/hyperv.h>
#include <linux/uio.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include "hyperv_vmbus.h"
@@ -66,12 +68,20 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
* arrived.
*/
-static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
+ enum hv_signal_policy policy)
{
virt_mb();
if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
return false;
+ /*
+ * When the client wants to control signaling,
+ * we only honour the host interrupt mask.
+ */
+ if (policy == HV_SIGNAL_POLICY_EXPLICIT)
+ return true;
+
/* check interrupt_mask before read_index */
virt_rmb();
/*
@@ -162,18 +172,7 @@ static u32 hv_copyfrom_ringbuffer(
void *ring_buffer = hv_get_ring_buffer(ring_info);
u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
- u32 frag_len;
-
- /* wrap-around detected at the src */
- if (destlen > ring_buffer_size - start_read_offset) {
- frag_len = ring_buffer_size - start_read_offset;
-
- memcpy(dest, ring_buffer + start_read_offset, frag_len);
- memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
- } else
-
- memcpy(dest, ring_buffer + start_read_offset, destlen);
-
+ memcpy(dest, ring_buffer + start_read_offset, destlen);
start_read_offset += destlen;
start_read_offset %= ring_buffer_size;
@@ -194,15 +193,8 @@ static u32 hv_copyto_ringbuffer(
{
void *ring_buffer = hv_get_ring_buffer(ring_info);
u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
- u32 frag_len;
- /* wrap-around detected! */
- if (srclen > ring_buffer_size - start_write_offset) {
- frag_len = ring_buffer_size - start_write_offset;
- memcpy(ring_buffer + start_write_offset, src, frag_len);
- memcpy(ring_buffer, src + frag_len, srclen - frag_len);
- } else
- memcpy(ring_buffer + start_write_offset, src, srclen);
+ memcpy(ring_buffer + start_write_offset, src, srclen);
start_write_offset += srclen;
start_write_offset %= ring_buffer_size;
@@ -235,22 +227,46 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
/* Initialize the ring buffer. */
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
- void *buffer, u32 buflen)
+ struct page *pages, u32 page_cnt)
{
- if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
- return -EINVAL;
+ int i;
+ struct page **pages_wraparound;
+
+ BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
- ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
+ /*
+ * First page holds struct hv_ring_buffer, do wraparound mapping for
+ * the rest.
+ */
+ pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
+ GFP_KERNEL);
+ if (!pages_wraparound)
+ return -ENOMEM;
+
+ pages_wraparound[0] = pages;
+ for (i = 0; i < 2 * (page_cnt - 1); i++)
+ pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
+
+ ring_info->ring_buffer = (struct hv_ring_buffer *)
+ vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
+
+ kfree(pages_wraparound);
+
+
+ if (!ring_info->ring_buffer)
+ return -ENOMEM;
+
ring_info->ring_buffer->read_index =
ring_info->ring_buffer->write_index = 0;
/* Set the feature bit for enabling flow control. */
ring_info->ring_buffer->feature_bits.value = 1;
- ring_info->ring_size = buflen;
- ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
+ ring_info->ring_size = page_cnt << PAGE_SHIFT;
+ ring_info->ring_datasize = ring_info->ring_size -
+ sizeof(struct hv_ring_buffer);
spin_lock_init(&ring_info->ring_lock);
@@ -260,11 +276,13 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
/* Cleanup the ring buffer. */
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
{
+ vunmap(ring_info->ring_buffer);
}
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
+ struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
+ enum hv_signal_policy policy)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -326,7 +344,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
if (lock)
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
- *signal = hv_need_to_signal(old_write, outring_info);
+ *signal = hv_need_to_signal(old_write, outring_info, policy);
return 0;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index e82f7e1c217c..a259e18d22d5 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -105,8 +105,8 @@ static struct notifier_block hyperv_panic_block = {
static const char *fb_mmio_name = "fb_range";
static struct resource *fb_mmio;
-struct resource *hyperv_mmio;
-DEFINE_SEMAPHORE(hyperv_mmio_lock);
+static struct resource *hyperv_mmio;
+static DEFINE_SEMAPHORE(hyperv_mmio_lock);
static int vmbus_exists(void)
{
@@ -874,7 +874,7 @@ err_alloc:
bus_unregister(&hv_bus);
err_cleanup:
- hv_cleanup();
+ hv_cleanup(false);
return ret;
}
@@ -961,8 +961,8 @@ int vmbus_device_register(struct hv_device *child_device_obj)
{
int ret = 0;
- dev_set_name(&child_device_obj->device, "vmbus_%d",
- child_device_obj->channel->id);
+ dev_set_name(&child_device_obj->device, "vmbus-%pUl",
+ child_device_obj->channel->offermsg.offer.if_instance.b);
child_device_obj->device.bus = &hv_bus;
child_device_obj->device.parent = &hv_acpi_dev->dev;
@@ -1326,7 +1326,7 @@ static void hv_kexec_handler(void)
vmbus_initiate_unload(false);
for_each_online_cpu(cpu)
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
- hv_cleanup();
+ hv_cleanup(false);
};
static void hv_crash_handler(struct pt_regs *regs)
@@ -1338,7 +1338,7 @@ static void hv_crash_handler(struct pt_regs *regs)
* for kdump.
*/
hv_synic_cleanup(NULL);
- hv_cleanup();
+ hv_cleanup(true);
};
static int __init hv_acpi_init(void)
@@ -1398,7 +1398,7 @@ static void __exit vmbus_exit(void)
&hyperv_panic_block);
}
bus_unregister(&hv_bus);
- hv_cleanup();
+ hv_cleanup(false);
for_each_online_cpu(cpu) {
tasklet_kill(hv_context.event_dpc[cpu]);
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index eaf2f916d48c..45cef3d2c75c 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -969,7 +969,6 @@ config SENSORS_LM73
config SENSORS_LM75
tristate "National Semiconductor LM75 and compatibles"
depends on I2C
- depends on THERMAL || !THERMAL_OF
select REGMAP_I2C
help
If you say yes here you get support for one common type of
@@ -1119,6 +1118,7 @@ config SENSORS_LM95241
config SENSORS_LM95245
tristate "National Semiconductor LM95245 and compatibles"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for LM95235 and LM95245
temperature sensor chips.
@@ -1572,7 +1572,6 @@ config SENSORS_THMC50
config SENSORS_TMP102
tristate "Texas Instruments TMP102"
depends on I2C
- depends on THERMAL || !THERMAL_OF
select REGMAP_I2C
help
If you say yes here you get support for Texas Instruments TMP102
@@ -1823,6 +1822,13 @@ config SENSORS_ULTRA45
This driver provides support for the Ultra45 workstation environmental
sensors.
+config SENSORS_XGENE
+ tristate "APM X-Gene SoC hardware monitoring driver"
+ depends on XGENE_SLIMPRO_MBOX || PCC
+ help
+ If you say yes here you get support for the temperature
+ and power sensors for APM X-Gene SoC.
+
if ACPI
comment "ACPI drivers"
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index fe87d2895a97..aecf4ba17460 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -165,6 +165,7 @@ obj-$(CONFIG_SENSORS_W83L785TS) += w83l785ts.o
obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
+obj-$(CONFIG_SENSORS_XGENE) += xgene-hwmon.o
obj-$(CONFIG_PMBUS) += pmbus/
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index fc1e65a263a4..812fbc00f693 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -7,8 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * TODO: SPI, support for external temperature sensor
- * use power-down mode for suspend?, interrupt handling?
+ * TODO: SPI, use power-down mode for suspend?, interrupt handling?
*/
#include <linux/kernel.h>
@@ -31,6 +30,7 @@
#define ADT7411_REG_CFG1 0x18
#define ADT7411_CFG1_START_MONITOR (1 << 0)
#define ADT7411_CFG1_RESERVED_BIT1 (1 << 1)
+#define ADT7411_CFG1_EXT_TDM (1 << 2)
#define ADT7411_CFG1_RESERVED_BIT3 (1 << 3)
#define ADT7411_REG_CFG2 0x19
@@ -57,6 +57,7 @@ struct adt7411_data {
unsigned long next_update;
int vref_cached;
struct i2c_client *client;
+ bool use_ext_temp;
};
/*
@@ -127,11 +128,20 @@ static ssize_t adt7411_show_vdd(struct device *dev,
static ssize_t adt7411_show_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ int nr = to_sensor_dev_attr(attr)->index;
struct adt7411_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- int val = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
- ADT7411_REG_INT_TEMP_MSB, 0);
-
+ int val;
+ struct {
+ u8 low;
+ u8 high;
+ } reg[2] = {
+ { ADT7411_REG_INT_TEMP_VDD_LSB, ADT7411_REG_INT_TEMP_MSB },
+ { ADT7411_REG_EXT_TEMP_AIN14_LSB,
+ ADT7411_REG_EXT_TEMP_AIN1_MSB },
+ };
+
+ val = adt7411_read_10_bit(client, reg[nr].low, reg[nr].high, 0);
if (val < 0)
return val;
@@ -218,11 +228,13 @@ static ssize_t adt7411_set_bit(struct device *dev,
return ret < 0 ? ret : count;
}
+
#define ADT7411_BIT_ATTR(__name, __reg, __bit) \
SENSOR_DEVICE_ATTR_2(__name, S_IRUGO | S_IWUSR, adt7411_show_bit, \
adt7411_set_bit, __bit, __reg)
-static DEVICE_ATTR(temp1_input, S_IRUGO, adt7411_show_temp, NULL);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, adt7411_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, adt7411_show_temp, NULL, 1);
static DEVICE_ATTR(in0_input, S_IRUGO, adt7411_show_vdd, NULL);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, adt7411_show_input, NULL, 0);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, adt7411_show_input, NULL, 1);
@@ -237,7 +249,8 @@ static ADT7411_BIT_ATTR(fast_sampling, ADT7411_REG_CFG3, ADT7411_CFG3_ADC_CLK_22
static ADT7411_BIT_ATTR(adc_ref_vdd, ADT7411_REG_CFG3, ADT7411_CFG3_REF_VDD);
static struct attribute *adt7411_attrs[] = {
- &dev_attr_temp1_input.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
&dev_attr_in0_input.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
@@ -253,7 +266,27 @@ static struct attribute *adt7411_attrs[] = {
NULL
};
-ATTRIBUTE_GROUPS(adt7411);
+static umode_t adt7411_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct adt7411_data *data = dev_get_drvdata(dev);
+ bool visible = true;
+
+ if (attr == &sensor_dev_attr_temp2_input.dev_attr.attr)
+ visible = data->use_ext_temp;
+ else if (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_in2_input.dev_attr.attr)
+ visible = !data->use_ext_temp;
+
+ return visible ? attr->mode : 0;
+}
+
+static const struct attribute_group adt7411_group = {
+ .attrs = adt7411_attrs,
+ .is_visible = adt7411_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(adt7411);
static int adt7411_detect(struct i2c_client *client,
struct i2c_board_info *info)
@@ -309,6 +342,8 @@ static int adt7411_init_device(struct adt7411_data *data)
if (ret < 0)
return ret;
+ data->use_ext_temp = ret & ADT7411_CFG1_EXT_TDM;
+
/*
* We must only write zero to bit 1 and only one to bit 3 according to
* the datasheet.
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index f5da39a68929..6e60ca53406e 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -32,6 +32,7 @@
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/slab.h>
+#include <linux/util_macros.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END };
@@ -83,6 +84,7 @@ static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END };
#define ADT7470_REG_PWM_MIN_MAX_ADDR 0x6D
#define ADT7470_REG_PWM_TEMP_MIN_BASE_ADDR 0x6E
#define ADT7470_REG_PWM_TEMP_MIN_MAX_ADDR 0x71
+#define ADT7470_REG_CFG_2 0x74
#define ADT7470_REG_ACOUSTICS12 0x75
#define ADT7470_REG_ACOUSTICS34 0x76
#define ADT7470_REG_DEVICE 0x3D
@@ -142,6 +144,11 @@ static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END };
#define FAN_PERIOD_INVALID 65535
#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID)
+/* Config registers 1 and 2 include fields for selecting the PWM frequency */
+#define ADT7470_CFG_LF 0x40
+#define ADT7470_FREQ_MASK 0x70
+#define ADT7470_FREQ_SHIFT 4
+
struct adt7470_data {
struct i2c_client *client;
struct mutex lock;
@@ -170,7 +177,6 @@ struct adt7470_data {
u8 pwm_auto_temp[ADT7470_PWM_COUNT];
struct task_struct *auto_update;
- struct completion auto_update_stop;
unsigned int auto_update_interval;
};
@@ -266,12 +272,14 @@ static int adt7470_update_thread(void *p)
mutex_lock(&data->lock);
adt7470_read_temperatures(client, data);
mutex_unlock(&data->lock);
+
+ set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
break;
- msleep_interruptible(data->auto_update_interval);
+
+ schedule_timeout(msecs_to_jiffies(data->auto_update_interval));
}
- complete_all(&data->auto_update_stop);
return 0;
}
@@ -538,6 +546,28 @@ static ssize_t show_alarm_mask(struct device *dev,
return sprintf(buf, "%x\n", data->alarms_mask);
}
+static ssize_t set_alarm_mask(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf,
+ size_t count)
+{
+ struct adt7470_data *data = dev_get_drvdata(dev);
+ long mask;
+
+ if (kstrtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ if (mask & ~0xffff)
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+ data->alarms_mask = mask;
+ adt7470_write_word_data(data->client, ADT7470_REG_ALARM1_MASK, mask);
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
static ssize_t show_fan_max(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -688,6 +718,70 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
return count;
}
+/* These are the valid PWM frequencies to the nearest Hz */
+static const int adt7470_freq_map[] = {
+ 11, 15, 22, 29, 35, 44, 59, 88, 1400, 22500
+};
+
+static ssize_t show_pwm_freq(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct adt7470_data *data = adt7470_update_device(dev);
+ unsigned char cfg_reg_1;
+ unsigned char cfg_reg_2;
+ int index;
+
+ mutex_lock(&data->lock);
+ cfg_reg_1 = i2c_smbus_read_byte_data(data->client, ADT7470_REG_CFG);
+ cfg_reg_2 = i2c_smbus_read_byte_data(data->client, ADT7470_REG_CFG_2);
+ mutex_unlock(&data->lock);
+
+ index = (cfg_reg_2 & ADT7470_FREQ_MASK) >> ADT7470_FREQ_SHIFT;
+ if (!(cfg_reg_1 & ADT7470_CFG_LF))
+ index += 8;
+ if (index >= ARRAY_SIZE(adt7470_freq_map))
+ index = ARRAY_SIZE(adt7470_freq_map) - 1;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", adt7470_freq_map[index]);
+}
+
+static ssize_t set_pwm_freq(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct adt7470_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ long freq;
+ int index;
+ int low_freq = ADT7470_CFG_LF;
+ unsigned char val;
+
+ if (kstrtol(buf, 10, &freq))
+ return -EINVAL;
+
+ /* Round the user value given to the closest available frequency */
+ index = find_closest(freq, adt7470_freq_map,
+ ARRAY_SIZE(adt7470_freq_map));
+
+ if (index >= 8) {
+ index -= 8;
+ low_freq = 0;
+ }
+
+ mutex_lock(&data->lock);
+ /* Configuration Register 1 */
+ val = i2c_smbus_read_byte_data(client, ADT7470_REG_CFG);
+ i2c_smbus_write_byte_data(client, ADT7470_REG_CFG,
+ (val & ~ADT7470_CFG_LF) | low_freq);
+ /* Configuration Register 2 */
+ val = i2c_smbus_read_byte_data(client, ADT7470_REG_CFG_2);
+ i2c_smbus_write_byte_data(client, ADT7470_REG_CFG_2,
+ (val & ~ADT7470_FREQ_MASK) | (index << ADT7470_FREQ_SHIFT));
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
static ssize_t show_pwm_max(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -918,7 +1012,8 @@ static ssize_t show_alarm(struct device *dev,
return sprintf(buf, "0\n");
}
-static DEVICE_ATTR(alarm_mask, S_IRUGO, show_alarm_mask, NULL);
+static DEVICE_ATTR(alarm_mask, S_IWUSR | S_IRUGO, show_alarm_mask,
+ set_alarm_mask);
static DEVICE_ATTR(num_temp_sensors, S_IWUSR | S_IRUGO, show_num_temp_sensors,
set_num_temp_sensors);
static DEVICE_ATTR(auto_update_interval, S_IWUSR | S_IRUGO,
@@ -1038,6 +1133,8 @@ static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2);
static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 3);
+static DEVICE_ATTR(pwm1_freq, S_IWUSR | S_IRUGO, show_pwm_freq, set_pwm_freq);
+
static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
show_pwm_min, set_pwm_min, 0);
static SENSOR_DEVICE_ATTR(pwm2_auto_point1_pwm, S_IWUSR | S_IRUGO,
@@ -1154,6 +1251,7 @@ static struct attribute *adt7470_attrs[] = {
&sensor_dev_attr_fan4_alarm.dev_attr.attr,
&sensor_dev_attr_force_pwm_max.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
+ &dev_attr_pwm1_freq.attr,
&sensor_dev_attr_pwm2.dev_attr.attr,
&sensor_dev_attr_pwm3.dev_attr.attr,
&sensor_dev_attr_pwm4.dev_attr.attr,
@@ -1256,7 +1354,6 @@ static int adt7470_probe(struct i2c_client *client,
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- init_completion(&data->auto_update_stop);
data->auto_update = kthread_run(adt7470_update_thread, client, "%s",
dev_name(hwmon_dev));
if (IS_ERR(data->auto_update)) {
@@ -1271,7 +1368,6 @@ static int adt7470_remove(struct i2c_client *client)
struct adt7470_data *data = i2c_get_clientdata(client);
kthread_stop(data->auto_update);
- wait_for_completion(&data->auto_update_stop);
return 0;
}
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index 48633e541dc3..0f0277e7aae5 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -36,6 +36,10 @@
#define FTS_EVENT_STATUS_REG 0x0006
#define FTS_GLOBAL_CONTROL_REG 0x0007
+#define FTS_DEVICE_DETECT_REG_1 0x0C
+#define FTS_DEVICE_DETECT_REG_2 0x0D
+#define FTS_DEVICE_DETECT_REG_3 0x0E
+
#define FTS_SENSOR_EVENT_REG 0x0010
#define FTS_FAN_EVENT_REG 0x0014
@@ -54,6 +58,8 @@
#define FTS_NO_TEMP_SENSORS 0x10
#define FTS_NO_VOLT_SENSORS 0x04
+static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
+
static struct i2c_device_id fts_id[] = {
{ "ftsteutates", 0 },
{ }
@@ -734,6 +740,42 @@ static const struct attribute_group *fts_attr_groups[] = {
/*****************************************************************************/
/* Module initialization / remove functions */
/*****************************************************************************/
+static int fts_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ int val;
+
+ /* detection works with revsion greater or equal to 0x2b */
+ val = i2c_smbus_read_byte_data(client, FTS_DEVICE_REVISION_REG);
+ if (val < 0x2b)
+ return -ENODEV;
+
+ /* Device Detect Regs must have 0x17 0x34 and 0x54 */
+ val = i2c_smbus_read_byte_data(client, FTS_DEVICE_DETECT_REG_1);
+ if (val != 0x17)
+ return -ENODEV;
+
+ val = i2c_smbus_read_byte_data(client, FTS_DEVICE_DETECT_REG_2);
+ if (val != 0x34)
+ return -ENODEV;
+
+ val = i2c_smbus_read_byte_data(client, FTS_DEVICE_DETECT_REG_3);
+ if (val != 0x54)
+ return -ENODEV;
+
+ /*
+ * 0x10 == Baseboard Management Controller, 0x01 == Teutates
+ * Device ID Reg needs to be 0x11
+ */
+ val = i2c_smbus_read_byte_data(client, FTS_DEVICE_ID_REG);
+ if (val != 0x11)
+ return -ENODEV;
+
+ strlcpy(info->type, fts_id[0].name, I2C_NAME_SIZE);
+ info->flags = 0;
+ return 0;
+}
+
static int fts_remove(struct i2c_client *client)
{
struct fts_data *data = dev_get_drvdata(&client->dev);
@@ -804,12 +846,15 @@ static int fts_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Module Details */
/*****************************************************************************/
static struct i2c_driver fts_driver = {
+ .class = I2C_CLASS_HWMON,
.driver = {
.name = "ftsteutates",
},
.id_table = fts_id,
.probe = fts_probe,
.remove = fts_remove,
+ .detect = fts_detect,
+ .address_list = normal_i2c,
};
module_i2c_driver(fts_driver);
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index a26c385a435b..adae6848ffb2 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -12,17 +12,17 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
+#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/kdev_t.h>
-#include <linux/idr.h>
-#include <linux/hwmon.h>
#include <linux/gfp.h>
-#include <linux/spinlock.h>
+#include <linux/hwmon.h>
+#include <linux/idr.h>
+#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/thermal.h>
#define HWMON_ID_PREFIX "hwmon"
#define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
@@ -30,9 +30,35 @@
struct hwmon_device {
const char *name;
struct device dev;
+ const struct hwmon_chip_info *chip;
+
+ struct attribute_group group;
+ const struct attribute_group **groups;
};
+
#define to_hwmon_device(d) container_of(d, struct hwmon_device, dev)
+struct hwmon_device_attribute {
+ struct device_attribute dev_attr;
+ const struct hwmon_ops *ops;
+ enum hwmon_sensor_types type;
+ u32 attr;
+ int index;
+};
+
+#define to_hwmon_attr(d) \
+ container_of(d, struct hwmon_device_attribute, dev_attr)
+
+/*
+ * Thermal zone information
+ * In addition to the reference to the hwmon device,
+ * also provides the sensor index.
+ */
+struct hwmon_thermal_data {
+ struct hwmon_device *hwdev; /* Reference to hwmon device */
+ int index; /* sensor index */
+};
+
static ssize_t
show_name(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -80,25 +106,409 @@ static struct class hwmon_class = {
static DEFINE_IDA(hwmon_ida);
-/**
- * hwmon_device_register_with_groups - register w/ hwmon
- * @dev: the parent device
- * @name: hwmon name attribute
- * @drvdata: driver data to attach to created device
- * @groups: List of attribute groups to create
- *
- * hwmon_device_unregister() must be called when the device is no
- * longer needed.
- *
- * Returns the pointer to the new device.
+/* Thermal zone handling */
+
+/*
+ * The complex conditional is necessary to avoid a cyclic dependency
+ * between hwmon and thermal_sys modules.
*/
-struct device *
-hwmon_device_register_with_groups(struct device *dev, const char *name,
- void *drvdata,
- const struct attribute_group **groups)
+#if IS_REACHABLE(CONFIG_THERMAL) && defined(CONFIG_THERMAL_OF) && \
+ (!defined(CONFIG_THERMAL_HWMON) || \
+ !(defined(MODULE) && IS_MODULE(CONFIG_THERMAL)))
+static int hwmon_thermal_get_temp(void *data, int *temp)
+{
+ struct hwmon_thermal_data *tdata = data;
+ struct hwmon_device *hwdev = tdata->hwdev;
+ int ret;
+ long t;
+
+ ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
+ tdata->index, &t);
+ if (ret < 0)
+ return ret;
+
+ *temp = t;
+
+ return 0;
+}
+
+static struct thermal_zone_of_device_ops hwmon_thermal_ops = {
+ .get_temp = hwmon_thermal_get_temp,
+};
+
+static int hwmon_thermal_add_sensor(struct device *dev,
+ struct hwmon_device *hwdev, int index)
+{
+ struct hwmon_thermal_data *tdata;
+
+ tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL);
+ if (!tdata)
+ return -ENOMEM;
+
+ tdata->hwdev = hwdev;
+ tdata->index = index;
+
+ devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+ &hwmon_thermal_ops);
+
+ return 0;
+}
+#else
+static int hwmon_thermal_add_sensor(struct device *dev,
+ struct hwmon_device *hwdev, int index)
+{
+ return 0;
+}
+#endif /* IS_REACHABLE(CONFIG_THERMAL) && ... */
+
+/* sysfs attribute management */
+
+static ssize_t hwmon_attr_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ long val;
+ int ret;
+
+ ret = hattr->ops->read(dev, hattr->type, hattr->attr, hattr->index,
+ &val);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%ld\n", val);
+}
+
+static ssize_t hwmon_attr_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = hattr->ops->write(dev, hattr->type, hattr->attr, hattr->index,
+ val);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static int hwmon_attr_base(enum hwmon_sensor_types type)
+{
+ if (type == hwmon_in)
+ return 0;
+ return 1;
+}
+
+static struct attribute *hwmon_genattr(struct device *dev,
+ const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr,
+ int index,
+ const char *template,
+ const struct hwmon_ops *ops)
+{
+ struct hwmon_device_attribute *hattr;
+ struct device_attribute *dattr;
+ struct attribute *a;
+ umode_t mode;
+ char *name;
+
+ /* The attribute is invisible if there is no template string */
+ if (!template)
+ return ERR_PTR(-ENOENT);
+
+ mode = ops->is_visible(drvdata, type, attr, index);
+ if (!mode)
+ return ERR_PTR(-ENOENT);
+
+ if ((mode & S_IRUGO) && !ops->read)
+ return ERR_PTR(-EINVAL);
+ if ((mode & S_IWUGO) && !ops->write)
+ return ERR_PTR(-EINVAL);
+
+ if (type == hwmon_chip) {
+ name = (char *)template;
+ } else {
+ name = devm_kzalloc(dev, strlen(template) + 16, GFP_KERNEL);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
+ scnprintf(name, strlen(template) + 16, template,
+ index + hwmon_attr_base(type));
+ }
+
+ hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
+ if (!hattr)
+ return ERR_PTR(-ENOMEM);
+
+ hattr->type = type;
+ hattr->attr = attr;
+ hattr->index = index;
+ hattr->ops = ops;
+
+ dattr = &hattr->dev_attr;
+ dattr->show = hwmon_attr_show;
+ dattr->store = hwmon_attr_store;
+
+ a = &dattr->attr;
+ sysfs_attr_init(a);
+ a->name = name;
+ a->mode = mode;
+
+ return a;
+}
+
+static const char * const hwmon_chip_attr_templates[] = {
+ [hwmon_chip_temp_reset_history] = "temp_reset_history",
+ [hwmon_chip_in_reset_history] = "in_reset_history",
+ [hwmon_chip_curr_reset_history] = "curr_reset_history",
+ [hwmon_chip_power_reset_history] = "power_reset_history",
+ [hwmon_chip_update_interval] = "update_interval",
+ [hwmon_chip_alarms] = "alarms",
+};
+
+static const char * const hwmon_temp_attr_templates[] = {
+ [hwmon_temp_input] = "temp%d_input",
+ [hwmon_temp_type] = "temp%d_type",
+ [hwmon_temp_lcrit] = "temp%d_lcrit",
+ [hwmon_temp_lcrit_hyst] = "temp%d_lcrit_hyst",
+ [hwmon_temp_min] = "temp%d_min",
+ [hwmon_temp_min_hyst] = "temp%d_min_hyst",
+ [hwmon_temp_max] = "temp%d_max",
+ [hwmon_temp_max_hyst] = "temp%d_max_hyst",
+ [hwmon_temp_crit] = "temp%d_crit",
+ [hwmon_temp_crit_hyst] = "temp%d_crit_hyst",
+ [hwmon_temp_emergency] = "temp%d_emergency",
+ [hwmon_temp_emergency_hyst] = "temp%d_emergency_hyst",
+ [hwmon_temp_alarm] = "temp%d_alarm",
+ [hwmon_temp_lcrit_alarm] = "temp%d_lcrit_alarm",
+ [hwmon_temp_min_alarm] = "temp%d_min_alarm",
+ [hwmon_temp_max_alarm] = "temp%d_max_alarm",
+ [hwmon_temp_crit_alarm] = "temp%d_crit_alarm",
+ [hwmon_temp_emergency_alarm] = "temp%d_emergency_alarm",
+ [hwmon_temp_fault] = "temp%d_fault",
+ [hwmon_temp_offset] = "temp%d_offset",
+ [hwmon_temp_label] = "temp%d_label",
+ [hwmon_temp_lowest] = "temp%d_lowest",
+ [hwmon_temp_highest] = "temp%d_highest",
+ [hwmon_temp_reset_history] = "temp%d_reset_history",
+};
+
+static const char * const hwmon_in_attr_templates[] = {
+ [hwmon_in_input] = "in%d_input",
+ [hwmon_in_min] = "in%d_min",
+ [hwmon_in_max] = "in%d_max",
+ [hwmon_in_lcrit] = "in%d_lcrit",
+ [hwmon_in_crit] = "in%d_crit",
+ [hwmon_in_average] = "in%d_average",
+ [hwmon_in_lowest] = "in%d_lowest",
+ [hwmon_in_highest] = "in%d_highest",
+ [hwmon_in_reset_history] = "in%d_reset_history",
+ [hwmon_in_label] = "in%d_label",
+ [hwmon_in_alarm] = "in%d_alarm",
+ [hwmon_in_min_alarm] = "in%d_min_alarm",
+ [hwmon_in_max_alarm] = "in%d_max_alarm",
+ [hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm",
+ [hwmon_in_crit_alarm] = "in%d_crit_alarm",
+};
+
+static const char * const hwmon_curr_attr_templates[] = {
+ [hwmon_curr_input] = "curr%d_input",
+ [hwmon_curr_min] = "curr%d_min",
+ [hwmon_curr_max] = "curr%d_max",
+ [hwmon_curr_lcrit] = "curr%d_lcrit",
+ [hwmon_curr_crit] = "curr%d_crit",
+ [hwmon_curr_average] = "curr%d_average",
+ [hwmon_curr_lowest] = "curr%d_lowest",
+ [hwmon_curr_highest] = "curr%d_highest",
+ [hwmon_curr_reset_history] = "curr%d_reset_history",
+ [hwmon_curr_label] = "curr%d_label",
+ [hwmon_curr_alarm] = "curr%d_alarm",
+ [hwmon_curr_min_alarm] = "curr%d_min_alarm",
+ [hwmon_curr_max_alarm] = "curr%d_max_alarm",
+ [hwmon_curr_lcrit_alarm] = "curr%d_lcrit_alarm",
+ [hwmon_curr_crit_alarm] = "curr%d_crit_alarm",
+};
+
+static const char * const hwmon_power_attr_templates[] = {
+ [hwmon_power_average] = "power%d_average",
+ [hwmon_power_average_interval] = "power%d_average_interval",
+ [hwmon_power_average_interval_max] = "power%d_interval_max",
+ [hwmon_power_average_interval_min] = "power%d_interval_min",
+ [hwmon_power_average_highest] = "power%d_average_highest",
+ [hwmon_power_average_lowest] = "power%d_average_lowest",
+ [hwmon_power_average_max] = "power%d_average_max",
+ [hwmon_power_average_min] = "power%d_average_min",
+ [hwmon_power_input] = "power%d_input",
+ [hwmon_power_input_highest] = "power%d_input_highest",
+ [hwmon_power_input_lowest] = "power%d_input_lowest",
+ [hwmon_power_reset_history] = "power%d_reset_history",
+ [hwmon_power_accuracy] = "power%d_accuracy",
+ [hwmon_power_cap] = "power%d_cap",
+ [hwmon_power_cap_hyst] = "power%d_cap_hyst",
+ [hwmon_power_cap_max] = "power%d_cap_max",
+ [hwmon_power_cap_min] = "power%d_cap_min",
+ [hwmon_power_max] = "power%d_max",
+ [hwmon_power_crit] = "power%d_crit",
+ [hwmon_power_label] = "power%d_label",
+ [hwmon_power_alarm] = "power%d_alarm",
+ [hwmon_power_cap_alarm] = "power%d_cap_alarm",
+ [hwmon_power_max_alarm] = "power%d_max_alarm",
+ [hwmon_power_crit_alarm] = "power%d_crit_alarm",
+};
+
+static const char * const hwmon_energy_attr_templates[] = {
+ [hwmon_energy_input] = "energy%d_input",
+ [hwmon_energy_label] = "energy%d_label",
+};
+
+static const char * const hwmon_humidity_attr_templates[] = {
+ [hwmon_humidity_input] = "humidity%d_input",
+ [hwmon_humidity_label] = "humidity%d_label",
+ [hwmon_humidity_min] = "humidity%d_min",
+ [hwmon_humidity_min_hyst] = "humidity%d_min_hyst",
+ [hwmon_humidity_max] = "humidity%d_max",
+ [hwmon_humidity_max_hyst] = "humidity%d_max_hyst",
+ [hwmon_humidity_alarm] = "humidity%d_alarm",
+ [hwmon_humidity_fault] = "humidity%d_fault",
+};
+
+static const char * const hwmon_fan_attr_templates[] = {
+ [hwmon_fan_input] = "fan%d_input",
+ [hwmon_fan_label] = "fan%d_label",
+ [hwmon_fan_min] = "fan%d_min",
+ [hwmon_fan_max] = "fan%d_max",
+ [hwmon_fan_div] = "fan%d_div",
+ [hwmon_fan_pulses] = "fan%d_pulses",
+ [hwmon_fan_target] = "fan%d_target",
+ [hwmon_fan_alarm] = "fan%d_alarm",
+ [hwmon_fan_min_alarm] = "fan%d_min_alarm",
+ [hwmon_fan_max_alarm] = "fan%d_max_alarm",
+ [hwmon_fan_fault] = "fan%d_fault",
+};
+
+static const char * const hwmon_pwm_attr_templates[] = {
+ [hwmon_pwm_input] = "pwm%d",
+ [hwmon_pwm_enable] = "pwm%d_enable",
+ [hwmon_pwm_mode] = "pwm%d_mode",
+ [hwmon_pwm_freq] = "pwm%d_freq",
+};
+
+static const char * const *__templates[] = {
+ [hwmon_chip] = hwmon_chip_attr_templates,
+ [hwmon_temp] = hwmon_temp_attr_templates,
+ [hwmon_in] = hwmon_in_attr_templates,
+ [hwmon_curr] = hwmon_curr_attr_templates,
+ [hwmon_power] = hwmon_power_attr_templates,
+ [hwmon_energy] = hwmon_energy_attr_templates,
+ [hwmon_humidity] = hwmon_humidity_attr_templates,
+ [hwmon_fan] = hwmon_fan_attr_templates,
+ [hwmon_pwm] = hwmon_pwm_attr_templates,
+};
+
+static const int __templates_size[] = {
+ [hwmon_chip] = ARRAY_SIZE(hwmon_chip_attr_templates),
+ [hwmon_temp] = ARRAY_SIZE(hwmon_temp_attr_templates),
+ [hwmon_in] = ARRAY_SIZE(hwmon_in_attr_templates),
+ [hwmon_curr] = ARRAY_SIZE(hwmon_curr_attr_templates),
+ [hwmon_power] = ARRAY_SIZE(hwmon_power_attr_templates),
+ [hwmon_energy] = ARRAY_SIZE(hwmon_energy_attr_templates),
+ [hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates),
+ [hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates),
+ [hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates),
+};
+
+static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
+{
+ int i, n;
+
+ for (i = n = 0; info->config[i]; i++)
+ n += hweight32(info->config[i]);
+
+ return n;
+}
+
+static int hwmon_genattrs(struct device *dev,
+ const void *drvdata,
+ struct attribute **attrs,
+ const struct hwmon_ops *ops,
+ const struct hwmon_channel_info *info)
+{
+ const char * const *templates;
+ int template_size;
+ int i, aindex = 0;
+
+ if (info->type >= ARRAY_SIZE(__templates))
+ return -EINVAL;
+
+ templates = __templates[info->type];
+ template_size = __templates_size[info->type];
+
+ for (i = 0; info->config[i]; i++) {
+ u32 attr_mask = info->config[i];
+ u32 attr;
+
+ while (attr_mask) {
+ struct attribute *a;
+
+ attr = __ffs(attr_mask);
+ attr_mask &= ~BIT(attr);
+ if (attr >= template_size)
+ return -EINVAL;
+ a = hwmon_genattr(dev, drvdata, info->type, attr, i,
+ templates[attr], ops);
+ if (IS_ERR(a)) {
+ if (PTR_ERR(a) != -ENOENT)
+ return PTR_ERR(a);
+ continue;
+ }
+ attrs[aindex++] = a;
+ }
+ }
+ return aindex;
+}
+
+static struct attribute **
+__hwmon_create_attrs(struct device *dev, const void *drvdata,
+ const struct hwmon_chip_info *chip)
+{
+ int ret, i, aindex = 0, nattrs = 0;
+ struct attribute **attrs;
+
+ for (i = 0; chip->info[i]; i++)
+ nattrs += hwmon_num_channel_attrs(chip->info[i]);
+
+ if (nattrs == 0)
+ return ERR_PTR(-EINVAL);
+
+ attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; chip->info[i]; i++) {
+ ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
+ chip->info[i]);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ aindex += ret;
+ }
+
+ return attrs;
+}
+
+static struct device *
+__hwmon_device_register(struct device *dev, const char *name, void *drvdata,
+ const struct hwmon_chip_info *chip,
+ const struct attribute_group **groups)
{
struct hwmon_device *hwdev;
- int err, id;
+ struct device *hdev;
+ int i, j, err, id;
/* Do not accept invalid characters in hwmon name attribute */
if (name && (!strlen(name) || strpbrk(name, "-* \t\n")))
@@ -114,28 +524,128 @@ hwmon_device_register_with_groups(struct device *dev, const char *name,
goto ida_remove;
}
+ hdev = &hwdev->dev;
+
+ if (chip && chip->ops->is_visible) {
+ struct attribute **attrs;
+ int ngroups = 2;
+
+ if (groups)
+ for (i = 0; groups[i]; i++)
+ ngroups++;
+
+ hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
+ GFP_KERNEL);
+ if (!hwdev->groups)
+ return ERR_PTR(-ENOMEM);
+
+ attrs = __hwmon_create_attrs(dev, drvdata, chip);
+ if (IS_ERR(attrs)) {
+ err = PTR_ERR(attrs);
+ goto free_hwmon;
+ }
+
+ hwdev->group.attrs = attrs;
+ ngroups = 0;
+ hwdev->groups[ngroups++] = &hwdev->group;
+
+ if (groups) {
+ for (i = 0; groups[i]; i++)
+ hwdev->groups[ngroups++] = groups[i];
+ }
+
+ hdev->groups = hwdev->groups;
+ } else {
+ hdev->groups = groups;
+ }
+
hwdev->name = name;
- hwdev->dev.class = &hwmon_class;
- hwdev->dev.parent = dev;
- hwdev->dev.groups = groups;
- hwdev->dev.of_node = dev ? dev->of_node : NULL;
- dev_set_drvdata(&hwdev->dev, drvdata);
- dev_set_name(&hwdev->dev, HWMON_ID_FORMAT, id);
- err = device_register(&hwdev->dev);
+ hdev->class = &hwmon_class;
+ hdev->parent = dev;
+ hdev->of_node = dev ? dev->of_node : NULL;
+ hwdev->chip = chip;
+ dev_set_drvdata(hdev, drvdata);
+ dev_set_name(hdev, HWMON_ID_FORMAT, id);
+ err = device_register(hdev);
if (err)
- goto free;
+ goto free_hwmon;
+
+ if (chip && chip->ops->is_visible && chip->ops->read &&
+ chip->info[0]->type == hwmon_chip &&
+ (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
+ const struct hwmon_channel_info **info = chip->info;
+
+ for (i = 1; info[i]; i++) {
+ if (info[i]->type != hwmon_temp)
+ continue;
+
+ for (j = 0; info[i]->config[j]; j++) {
+ if (!chip->ops->is_visible(drvdata, hwmon_temp,
+ hwmon_temp_input, j))
+ continue;
+ if (info[i]->config[j] & HWMON_T_INPUT)
+ hwmon_thermal_add_sensor(dev, hwdev, j);
+ }
+ }
+ }
- return &hwdev->dev;
+ return hdev;
-free:
+free_hwmon:
kfree(hwdev);
ida_remove:
ida_simple_remove(&hwmon_ida, id);
return ERR_PTR(err);
}
+
+/**
+ * hwmon_device_register_with_groups - register w/ hwmon
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @groups: List of attribute groups to create
+ *
+ * hwmon_device_unregister() must be called when the device is no
+ * longer needed.
+ *
+ * Returns the pointer to the new device.
+ */
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups)
+{
+ return __hwmon_device_register(dev, name, drvdata, NULL, groups);
+}
EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
/**
+ * hwmon_device_register_with_info - register w/ hwmon
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @info: Pointer to hwmon chip information
+ * @groups - pointer to list of driver specific attribute groups
+ *
+ * hwmon_device_unregister() must be called when the device is no
+ * longer needed.
+ *
+ * Returns the pointer to the new device.
+ */
+struct device *
+hwmon_device_register_with_info(struct device *dev, const char *name,
+ void *drvdata,
+ const struct hwmon_chip_info *chip,
+ const struct attribute_group **groups)
+{
+ if (chip && (!chip->ops || !chip->info))
+ return ERR_PTR(-EINVAL);
+
+ return __hwmon_device_register(dev, name, drvdata, chip, groups);
+}
+EXPORT_SYMBOL_GPL(hwmon_device_register_with_info);
+
+/**
* hwmon_device_register - register w/ hwmon
* @dev: the device to register
*
@@ -213,6 +723,48 @@ error:
}
EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
+/**
+ * devm_hwmon_device_register_with_info - register w/ hwmon
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @info: Pointer to hwmon chip information
+ * @groups - pointer to list of driver specific attribute groups
+ *
+ * Returns the pointer to the new device. The new device is automatically
+ * unregistered with the parent device.
+ */
+struct device *
+devm_hwmon_device_register_with_info(struct device *dev, const char *name,
+ void *drvdata,
+ const struct hwmon_chip_info *chip,
+ const struct attribute_group **groups)
+{
+ struct device **ptr, *hwdev;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ hwdev = hwmon_device_register_with_info(dev, name, drvdata, chip,
+ groups);
+ if (IS_ERR(hwdev))
+ goto error;
+
+ *ptr = hwdev;
+ devres_add(dev, ptr);
+
+ return hwdev;
+
+error:
+ devres_free(ptr);
+ return hwdev;
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_info);
+
static int devm_hwmon_match(struct device *dev, void *res, void *data)
{
struct device **hwdev = res;
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index 55b5a8ff1cfe..6d2e6605751c 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -143,13 +143,11 @@ static void __init make_sensor_label(struct device_node *np,
if (cpuid >= 0)
/*
* The digital thermal sensors are associated
- * with a core. Let's print out the range of
- * cpu ids corresponding to the hardware
- * threads of the core.
+ * with a core.
*/
n += snprintf(sdata->label + n,
- sizeof(sdata->label) - n, " %d-%d",
- cpuid, cpuid + threads_per_core - 1);
+ sizeof(sdata->label) - n, " %d",
+ cpuid);
else
n += snprintf(sdata->label + n,
sizeof(sdata->label) - n, " phy%d", id);
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 89449871bca7..f6a76679c650 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -73,8 +73,11 @@ static int iio_hwmon_probe(struct platform_device *pdev)
name = dev->of_node->name;
channels = iio_channel_get_all(dev);
- if (IS_ERR(channels))
+ if (IS_ERR(channels)) {
+ if (PTR_ERR(channels) == -ENODEV)
+ return -EPROBE_DEFER;
return PTR_ERR(channels);
+ }
st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
if (st == NULL) {
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 4667012b46b7..ad82cb28d87a 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -2011,10 +2011,10 @@ static struct attribute *it87_attributes_in[] = {
&sensor_dev_attr_in7_beep.dev_attr.attr, /* 39 */
&sensor_dev_attr_in8_input.dev_attr.attr, /* 40 */
- &sensor_dev_attr_in9_input.dev_attr.attr, /* 41 */
- &sensor_dev_attr_in10_input.dev_attr.attr, /* 41 */
- &sensor_dev_attr_in11_input.dev_attr.attr, /* 41 */
- &sensor_dev_attr_in12_input.dev_attr.attr, /* 41 */
+ &sensor_dev_attr_in9_input.dev_attr.attr,
+ &sensor_dev_attr_in10_input.dev_attr.attr,
+ &sensor_dev_attr_in11_input.dev_attr.attr,
+ &sensor_dev_attr_in12_input.dev_attr.attr,
NULL
};
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 9d5f85f3384f..1bf22eff0b08 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -28,7 +28,6 @@
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -254,170 +253,148 @@ abort:
return ret;
}
-/* sysfs functions */
-
-static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct jc42_data *data = jc42_update_device(dev);
- if (IS_ERR(data))
- return PTR_ERR(data);
- return sprintf(buf, "%d\n",
- jc42_temp_from_reg(data->temp[attr->index]));
-}
-
-static ssize_t show_temp_hyst(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int jc42_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct jc42_data *data = jc42_update_device(dev);
int temp, hyst;
if (IS_ERR(data))
return PTR_ERR(data);
- temp = jc42_temp_from_reg(data->temp[attr->index]);
- hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK)
- >> JC42_CFG_HYST_SHIFT];
- return sprintf(buf, "%d\n", temp - hyst);
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = jc42_temp_from_reg(data->temp[t_input]);
+ return 0;
+ case hwmon_temp_min:
+ *val = jc42_temp_from_reg(data->temp[t_min]);
+ return 0;
+ case hwmon_temp_max:
+ *val = jc42_temp_from_reg(data->temp[t_max]);
+ return 0;
+ case hwmon_temp_crit:
+ *val = jc42_temp_from_reg(data->temp[t_crit]);
+ return 0;
+ case hwmon_temp_max_hyst:
+ temp = jc42_temp_from_reg(data->temp[t_max]);
+ hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK)
+ >> JC42_CFG_HYST_SHIFT];
+ *val = temp - hyst;
+ return 0;
+ case hwmon_temp_crit_hyst:
+ temp = jc42_temp_from_reg(data->temp[t_crit]);
+ hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK)
+ >> JC42_CFG_HYST_SHIFT];
+ *val = temp - hyst;
+ return 0;
+ case hwmon_temp_min_alarm:
+ *val = (data->temp[t_input] >> JC42_ALARM_MIN_BIT) & 1;
+ return 0;
+ case hwmon_temp_max_alarm:
+ *val = (data->temp[t_input] >> JC42_ALARM_MAX_BIT) & 1;
+ return 0;
+ case hwmon_temp_crit_alarm:
+ *val = (data->temp[t_input] >> JC42_ALARM_CRIT_BIT) & 1;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static int jc42_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct jc42_data *data = dev_get_drvdata(dev);
- int err, ret = count;
- int nr = attr->index;
- long val;
+ struct i2c_client *client = data->client;
+ int diff, hyst;
+ int ret;
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
mutex_lock(&data->update_lock);
- data->temp[nr] = jc42_temp_to_reg(val, data->extended);
- err = i2c_smbus_write_word_swapped(data->client, temp_regs[nr],
- data->temp[nr]);
- if (err < 0)
- ret = err;
- mutex_unlock(&data->update_lock);
- return ret;
-}
-/*
- * JC42.4 compliant chips only support four hysteresis values.
- * Pick best choice and go from there.
- */
-static ssize_t set_temp_crit_hyst(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct jc42_data *data = dev_get_drvdata(dev);
- long val;
- int diff, hyst;
- int err;
- int ret = count;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
-
- val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED :
- JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX);
- diff = jc42_temp_from_reg(data->temp[t_crit]) - val;
-
- hyst = 0;
- if (diff > 0) {
- if (diff < 2250)
- hyst = 1; /* 1.5 degrees C */
- else if (diff < 4500)
- hyst = 2; /* 3.0 degrees C */
- else
- hyst = 3; /* 6.0 degrees C */
+ switch (attr) {
+ case hwmon_temp_min:
+ data->temp[t_min] = jc42_temp_to_reg(val, data->extended);
+ ret = i2c_smbus_write_word_swapped(client, temp_regs[t_min],
+ data->temp[t_min]);
+ break;
+ case hwmon_temp_max:
+ data->temp[t_max] = jc42_temp_to_reg(val, data->extended);
+ ret = i2c_smbus_write_word_swapped(client, temp_regs[t_max],
+ data->temp[t_max]);
+ break;
+ case hwmon_temp_crit:
+ data->temp[t_crit] = jc42_temp_to_reg(val, data->extended);
+ ret = i2c_smbus_write_word_swapped(client, temp_regs[t_crit],
+ data->temp[t_crit]);
+ break;
+ case hwmon_temp_crit_hyst:
+ /*
+ * JC42.4 compliant chips only support four hysteresis values.
+ * Pick best choice and go from there.
+ */
+ val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED
+ : JC42_TEMP_MIN) - 6000,
+ JC42_TEMP_MAX);
+ diff = jc42_temp_from_reg(data->temp[t_crit]) - val;
+ hyst = 0;
+ if (diff > 0) {
+ if (diff < 2250)
+ hyst = 1; /* 1.5 degrees C */
+ else if (diff < 4500)
+ hyst = 2; /* 3.0 degrees C */
+ else
+ hyst = 3; /* 6.0 degrees C */
+ }
+ data->config = (data->config & ~JC42_CFG_HYST_MASK) |
+ (hyst << JC42_CFG_HYST_SHIFT);
+ ret = i2c_smbus_write_word_swapped(data->client,
+ JC42_REG_CONFIG,
+ data->config);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
}
- mutex_lock(&data->update_lock);
- data->config = (data->config & ~JC42_CFG_HYST_MASK)
- | (hyst << JC42_CFG_HYST_SHIFT);
- err = i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
- data->config);
- if (err < 0)
- ret = err;
mutex_unlock(&data->update_lock);
- return ret;
-}
-static ssize_t show_alarm(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u16 bit = to_sensor_dev_attr(attr)->index;
- struct jc42_data *data = jc42_update_device(dev);
- u16 val;
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- val = data->temp[t_input];
- if (bit != JC42_ALARM_CRIT_BIT && (data->config & JC42_CFG_CRIT_ONLY))
- val = 0;
- return sprintf(buf, "%u\n", (val >> bit) & 1);
+ return ret;
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, t_input);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, set_temp, t_crit);
-static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp, set_temp, t_min);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, set_temp, t_max);
-
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_hyst,
- set_temp_crit_hyst, t_crit);
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp_hyst, NULL, t_max);
-
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL,
- JC42_ALARM_CRIT_BIT);
-static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL,
- JC42_ALARM_MIN_BIT);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL,
- JC42_ALARM_MAX_BIT);
-
-static struct attribute *jc42_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- NULL
-};
-
-static umode_t jc42_attribute_mode(struct kobject *kobj,
- struct attribute *attr, int index)
+static umode_t jc42_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct jc42_data *data = dev_get_drvdata(dev);
+ const struct jc42_data *data = _data;
unsigned int config = data->config;
- bool readonly;
-
- if (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr)
- readonly = config & JC42_CFG_TCRIT_LOCK;
- else if (attr == &sensor_dev_attr_temp1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_temp1_max.dev_attr.attr)
- readonly = config & JC42_CFG_EVENT_LOCK;
- else if (attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)
- readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
- else
- readonly = true;
-
- return S_IRUGO | (readonly ? 0 : S_IWUSR);
+ umode_t mode = S_IRUGO;
+
+ switch (attr) {
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ if (!(config & JC42_CFG_EVENT_LOCK))
+ mode |= S_IWUSR;
+ break;
+ case hwmon_temp_crit:
+ if (!(config & JC42_CFG_TCRIT_LOCK))
+ mode |= S_IWUSR;
+ break;
+ case hwmon_temp_crit_hyst:
+ if (!(config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK)))
+ mode |= S_IWUSR;
+ break;
+ case hwmon_temp_input:
+ case hwmon_temp_max_hyst:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ break;
+ default:
+ mode = 0;
+ break;
+ }
+ return mode;
}
-static const struct attribute_group jc42_group = {
- .attrs = jc42_attributes,
- .is_visible = jc42_attribute_mode,
-};
-__ATTRIBUTE_GROUPS(jc42);
-
/* Return 0 if detection is successful, -ENODEV otherwise */
static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
{
@@ -450,6 +427,34 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
+static const u32 jc42_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_HYST | HWMON_T_CRIT_HYST |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM,
+ 0
+};
+
+static const struct hwmon_channel_info jc42_temp = {
+ .type = hwmon_temp,
+ .config = jc42_temp_config,
+};
+
+static const struct hwmon_channel_info *jc42_info[] = {
+ &jc42_temp,
+ NULL
+};
+
+static const struct hwmon_ops jc42_hwmon_ops = {
+ .is_visible = jc42_is_visible,
+ .read = jc42_read,
+ .write = jc42_write,
+};
+
+static const struct hwmon_chip_info jc42_chip_info = {
+ .ops = &jc42_hwmon_ops,
+ .info = jc42_info,
+};
+
static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
@@ -482,9 +487,9 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
data->config = config;
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data,
- jc42_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &jc42_chip_info,
+ NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 92f9d4bbf597..eff3b24d8473 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -28,7 +28,6 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/regmap.h>
-#include <linux/thermal.h>
#include "lm75.h"
@@ -88,56 +87,75 @@ static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
return ((temp >> (16 - resolution)) * 1000) >> (resolution - 8);
}
-/* sysfs attributes for hwmon */
-
-static int lm75_read_temp(void *dev, int *temp)
+static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
struct lm75_data *data = dev_get_drvdata(dev);
- unsigned int _temp;
- int err;
-
- err = regmap_read(data->regmap, LM75_REG_TEMP, &_temp);
- if (err < 0)
- return err;
-
- *temp = lm75_reg_to_mc(_temp, data->resolution);
-
+ unsigned int regval;
+ int err, reg;
+
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ *val = data->sample_time;
+ break;;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = LM75_REG_TEMP;
+ break;
+ case hwmon_temp_max:
+ reg = LM75_REG_MAX;
+ break;
+ case hwmon_temp_max_hyst:
+ reg = LM75_REG_HYST;
+ break;
+ default:
+ return -EINVAL;
+ }
+ err = regmap_read(data->regmap, reg, &regval);
+ if (err < 0)
+ return err;
+
+ *val = lm75_reg_to_mc(regval, data->resolution);
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
-static ssize_t show_temp(struct device *dev, struct device_attribute *da,
- char *buf)
+static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long temp)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm75_data *data = dev_get_drvdata(dev);
- unsigned int temp = 0;
- int err;
-
- err = regmap_read(data->regmap, attr->index, &temp);
- if (err < 0)
- return err;
-
- return sprintf(buf, "%ld\n", lm75_reg_to_mc(temp, data->resolution));
-}
-
-static ssize_t set_temp(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct lm75_data *data = dev_get_drvdata(dev);
- long temp;
- int error;
u8 resolution;
+ int reg;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
- error = kstrtol(buf, 10, &temp);
- if (error)
- return error;
+ switch (attr) {
+ case hwmon_temp_max:
+ reg = LM75_REG_MAX;
+ break;
+ case hwmon_temp_max_hyst:
+ reg = LM75_REG_HYST;
+ break;
+ default:
+ return -EINVAL;
+ }
/*
* Resolution of limit registers is assumed to be the same as the
* temperature input register resolution unless given explicitly.
*/
- if (attr->index && data->resolution_limits)
+ if (data->resolution_limits)
resolution = data->resolution_limits;
else
resolution = data->resolution;
@@ -145,45 +163,77 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
temp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
temp = DIV_ROUND_CLOSEST(temp << (resolution - 8),
1000) << (16 - resolution);
- error = regmap_write(data->regmap, attr->index, temp);
- if (error < 0)
- return error;
- return count;
+ return regmap_write(data->regmap, reg, temp);
}
-static ssize_t show_update_interval(struct device *dev,
- struct device_attribute *da, char *buf)
+static umode_t lm75_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct lm75_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", data->sample_time);
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return S_IRUGO;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return S_IRUGO;
+ case hwmon_temp_max:
+ case hwmon_temp_max_hyst:
+ return S_IRUGO | S_IWUSR;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
}
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
- show_temp, set_temp, LM75_REG_MAX);
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
- show_temp, set_temp, LM75_REG_HYST);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, LM75_REG_TEMP);
-static DEVICE_ATTR(update_interval, S_IRUGO, show_update_interval, NULL);
+/*-----------------------------------------------------------------------*/
-static struct attribute *lm75_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &dev_attr_update_interval.attr,
+/* device probe and removal */
- NULL
+/* chip configuration */
+
+static const u32 lm75_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
};
-ATTRIBUTE_GROUPS(lm75);
-static const struct thermal_zone_of_device_ops lm75_of_thermal_ops = {
- .get_temp = lm75_read_temp,
+static const struct hwmon_channel_info lm75_chip = {
+ .type = hwmon_chip,
+ .config = lm75_chip_config,
};
-/*-----------------------------------------------------------------------*/
+static const u32 lm75_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
+ 0
+};
-/* device probe and removal */
+static const struct hwmon_channel_info lm75_temp = {
+ .type = hwmon_temp,
+ .config = lm75_temp_config,
+};
+
+static const struct hwmon_channel_info *lm75_info[] = {
+ &lm75_chip,
+ &lm75_temp,
+ NULL
+};
+
+static const struct hwmon_ops lm75_hwmon_ops = {
+ .is_visible = lm75_is_visible,
+ .read = lm75_read,
+ .write = lm75_write,
+};
+
+static const struct hwmon_chip_info lm75_chip_info = {
+ .ops = &lm75_hwmon_ops,
+ .info = lm75_info,
+};
static bool lm75_is_writeable_reg(struct device *dev, unsigned int reg)
{
@@ -337,15 +387,12 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_dbg(dev, "Config %02x\n", new);
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data, lm75_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &lm75_chip_info,
+ NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- devm_thermal_zone_of_sensor_register(hwmon_dev, 0,
- hwmon_dev,
- &lm75_of_thermal_ops);
-
dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), client->name);
return 0;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 496e771b363f..322ed9272811 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -89,7 +89,6 @@
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
#include <linux/err.h>
#include <linux/mutex.h>
@@ -326,7 +325,7 @@ static const struct lm90_params lm90_params[] = {
.alert_alarms = 0x7c,
.max_convrate = 9,
.reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
- }
+ },
};
/*
@@ -365,7 +364,10 @@ enum lm90_temp11_reg_index {
struct lm90_data {
struct i2c_client *client;
- const struct attribute_group *groups[6];
+ u32 channel_config[4];
+ struct hwmon_channel_info temp_info;
+ const struct hwmon_channel_info *info[3];
+ struct hwmon_chip_info chip;
struct mutex update_lock;
bool valid; /* true if register values are valid */
unsigned long last_updated; /* in jiffies */
@@ -489,11 +491,11 @@ static inline int lm90_select_remote_channel(struct i2c_client *client,
* client->update_lock must be held when calling this function (unless we are
* in detection or initialization steps).
*/
-static void lm90_set_convrate(struct i2c_client *client, struct lm90_data *data,
- unsigned int interval)
+static int lm90_set_convrate(struct i2c_client *client, struct lm90_data *data,
+ unsigned int interval)
{
- int i;
unsigned int update_interval;
+ int i, err;
/* Shift calculations to avoid rounding errors */
interval <<= 6;
@@ -504,8 +506,9 @@ static void lm90_set_convrate(struct i2c_client *client, struct lm90_data *data,
if (interval >= update_interval * 3 / 4)
break;
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE, i);
+ err = i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE, i);
data->update_interval = DIV_ROUND_CLOSEST(update_interval, 64);
+ return err;
}
static int lm90_update_limits(struct device *dev)
@@ -604,19 +607,17 @@ static int lm90_update_limits(struct device *dev)
return 0;
}
-static struct lm90_data *lm90_update_device(struct device *dev)
+static int lm90_update_device(struct device *dev)
{
struct lm90_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
unsigned long next_update;
- int val = 0;
-
- mutex_lock(&data->update_lock);
+ int val;
if (!data->valid) {
val = lm90_update_limits(dev);
if (val < 0)
- goto error;
+ return val;
}
next_update = data->last_updated +
@@ -628,53 +629,55 @@ static struct lm90_data *lm90_update_device(struct device *dev)
val = lm90_read_reg(client, LM90_REG_R_LOCAL_LOW);
if (val < 0)
- goto error;
+ return val;
data->temp8[LOCAL_LOW] = val;
val = lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH);
if (val < 0)
- goto error;
+ return val;
data->temp8[LOCAL_HIGH] = val;
if (data->reg_local_ext) {
val = lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
data->reg_local_ext);
if (val < 0)
- goto error;
+ return val;
data->temp11[LOCAL_TEMP] = val;
} else {
val = lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP);
if (val < 0)
- goto error;
+ return val;
data->temp11[LOCAL_TEMP] = val << 8;
}
val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
LM90_REG_R_REMOTE_TEMPL);
if (val < 0)
- goto error;
+ return val;
data->temp11[REMOTE_TEMP] = val;
val = lm90_read_reg(client, LM90_REG_R_STATUS);
if (val < 0)
- goto error;
+ return val;
data->alarms = val; /* lower 8 bit of alarms */
if (data->kind == max6696) {
val = lm90_select_remote_channel(client, data, 1);
if (val < 0)
- goto error;
+ return val;
val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
LM90_REG_R_REMOTE_TEMPL);
- if (val < 0)
- goto error;
+ if (val < 0) {
+ lm90_select_remote_channel(client, data, 0);
+ return val;
+ }
data->temp11[REMOTE2_TEMP] = val;
lm90_select_remote_channel(client, data, 0);
val = lm90_read_reg(client, MAX6696_REG_R_STATUS2);
if (val < 0)
- goto error;
+ return val;
data->alarms |= val << 8;
}
@@ -686,7 +689,7 @@ static struct lm90_data *lm90_update_device(struct device *dev)
!(data->alarms & data->alert_alarms)) {
val = lm90_read_reg(client, LM90_REG_R_CONFIG1);
if (val < 0)
- goto error;
+ return val;
if (val & 0x80) {
dev_dbg(&client->dev, "Re-enabling ALERT#\n");
@@ -700,13 +703,7 @@ static struct lm90_data *lm90_update_device(struct device *dev)
data->valid = true;
}
-error:
- mutex_unlock(&data->update_lock);
-
- if (val < 0)
- return ERR_PTR(val);
-
- return data;
+ return 0;
}
/*
@@ -832,52 +829,19 @@ static u16 temp_to_u16_adt7461(struct lm90_data *data, long val)
return (val + 125) / 250 * 64;
}
-/*
- * Sysfs stuff
- */
-
-static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
- char *buf)
+/* pec used for ADM1032 only */
+static ssize_t show_pec(struct device *dev, struct device_attribute *dummy,
+ char *buf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct lm90_data *data = lm90_update_device(dev);
- int temp;
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- if (data->kind == adt7461 || data->kind == tmp451)
- temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
- else if (data->kind == max6646)
- temp = temp_from_u8(data->temp8[attr->index]);
- else
- temp = temp_from_s8(data->temp8[attr->index]);
-
- /* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && attr->index == 3)
- temp += 16000;
+ struct i2c_client *client = to_i2c_client(dev);
- return sprintf(buf, "%d\n", temp);
+ return sprintf(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC));
}
-static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t set_pec(struct device *dev, struct device_attribute *dummy,
+ const char *buf, size_t count)
{
- static const u8 reg[TEMP8_REG_NUM] = {
- LM90_REG_W_LOCAL_LOW,
- LM90_REG_W_LOCAL_HIGH,
- LM90_REG_W_LOCAL_CRIT,
- LM90_REG_W_REMOTE_CRIT,
- MAX6659_REG_W_LOCAL_EMERG,
- MAX6659_REG_W_REMOTE_EMERG,
- LM90_REG_W_REMOTE_CRIT,
- MAX6659_REG_W_REMOTE_EMERG,
- };
-
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct lm90_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int nr = attr->index;
+ struct i2c_client *client = to_i2c_client(dev);
long val;
int err;
@@ -885,82 +849,61 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
if (err < 0)
return err;
- /* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && attr->index == 3)
- val -= 16000;
-
- mutex_lock(&data->update_lock);
- if (data->kind == adt7461 || data->kind == tmp451)
- data->temp8[nr] = temp_to_u8_adt7461(data, val);
- else if (data->kind == max6646)
- data->temp8[nr] = temp_to_u8(val);
- else
- data->temp8[nr] = temp_to_s8(val);
-
- lm90_select_remote_channel(client, data, nr >= 6);
- i2c_smbus_write_byte_data(client, reg[nr], data->temp8[nr]);
- lm90_select_remote_channel(client, data, 0);
+ switch (val) {
+ case 0:
+ client->flags &= ~I2C_CLIENT_PEC;
+ break;
+ case 1:
+ client->flags |= I2C_CLIENT_PEC;
+ break;
+ default:
+ return -EINVAL;
+ }
- mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static DEVICE_ATTR(pec, S_IWUSR | S_IRUGO, show_pec, set_pec);
+
+static int lm90_get_temp11(struct lm90_data *data, int index)
{
- struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
- struct lm90_data *data = lm90_update_device(dev);
+ s16 temp11 = data->temp11[index];
int temp;
- if (IS_ERR(data))
- return PTR_ERR(data);
-
if (data->kind == adt7461 || data->kind == tmp451)
- temp = temp_from_u16_adt7461(data, data->temp11[attr->index]);
+ temp = temp_from_u16_adt7461(data, temp11);
else if (data->kind == max6646)
- temp = temp_from_u16(data->temp11[attr->index]);
+ temp = temp_from_u16(temp11);
else
- temp = temp_from_s16(data->temp11[attr->index]);
+ temp = temp_from_s16(temp11);
/* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && attr->index <= 2)
+ if (data->kind == lm99 && index <= 2)
temp += 16000;
- return sprintf(buf, "%d\n", temp);
+ return temp;
}
-static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static int lm90_set_temp11(struct lm90_data *data, int index, long val)
{
- struct {
+ static struct reg {
u8 high;
u8 low;
- int channel;
- } reg[5] = {
- { LM90_REG_W_REMOTE_LOWH, LM90_REG_W_REMOTE_LOWL, 0 },
- { LM90_REG_W_REMOTE_HIGHH, LM90_REG_W_REMOTE_HIGHL, 0 },
- { LM90_REG_W_REMOTE_OFFSH, LM90_REG_W_REMOTE_OFFSL, 0 },
- { LM90_REG_W_REMOTE_LOWH, LM90_REG_W_REMOTE_LOWL, 1 },
- { LM90_REG_W_REMOTE_HIGHH, LM90_REG_W_REMOTE_HIGHL, 1 }
+ } reg[] = {
+ [REMOTE_LOW] = { LM90_REG_W_REMOTE_LOWH, LM90_REG_W_REMOTE_LOWL },
+ [REMOTE_HIGH] = { LM90_REG_W_REMOTE_HIGHH, LM90_REG_W_REMOTE_HIGHL },
+ [REMOTE_OFFSET] = { LM90_REG_W_REMOTE_OFFSH, LM90_REG_W_REMOTE_OFFSL },
+ [REMOTE2_LOW] = { LM90_REG_W_REMOTE_LOWH, LM90_REG_W_REMOTE_LOWL },
+ [REMOTE2_HIGH] = { LM90_REG_W_REMOTE_HIGHH, LM90_REG_W_REMOTE_HIGHL }
};
-
- struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
- struct lm90_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- int nr = attr->nr;
- int index = attr->index;
- long val;
+ struct reg *regp = &reg[index];
int err;
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
-
/* +16 degrees offset for temp2 for the LM99 */
if (data->kind == lm99 && index <= 2)
val -= 16000;
- mutex_lock(&data->update_lock);
if (data->kind == adt7461 || data->kind == tmp451)
data->temp11[index] = temp_to_u16_adt7461(data, val);
else if (data->kind == max6646)
@@ -970,317 +913,383 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
else
data->temp11[index] = temp_to_s8(val) << 8;
- lm90_select_remote_channel(client, data, reg[nr].channel);
- i2c_smbus_write_byte_data(client, reg[nr].high,
+ lm90_select_remote_channel(client, data, index >= 3);
+ err = i2c_smbus_write_byte_data(client, regp->high,
data->temp11[index] >> 8);
+ if (err < 0)
+ return err;
if (data->flags & LM90_HAVE_REM_LIMIT_EXT)
- i2c_smbus_write_byte_data(client, reg[nr].low,
- data->temp11[index] & 0xff);
- lm90_select_remote_channel(client, data, 0);
+ err = i2c_smbus_write_byte_data(client, regp->low,
+ data->temp11[index] & 0xff);
- mutex_unlock(&data->update_lock);
- return count;
+ lm90_select_remote_channel(client, data, 0);
+ return err;
}
-static ssize_t show_temphyst(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
+static int lm90_get_temp8(struct lm90_data *data, int index)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct lm90_data *data = lm90_update_device(dev);
+ s8 temp8 = data->temp8[index];
int temp;
- if (IS_ERR(data))
- return PTR_ERR(data);
-
if (data->kind == adt7461 || data->kind == tmp451)
- temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
+ temp = temp_from_u8_adt7461(data, temp8);
else if (data->kind == max6646)
- temp = temp_from_u8(data->temp8[attr->index]);
+ temp = temp_from_u8(temp8);
else
- temp = temp_from_s8(data->temp8[attr->index]);
+ temp = temp_from_s8(temp8);
/* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && attr->index == 3)
+ if (data->kind == lm99 && index == 3)
temp += 16000;
- return sprintf(buf, "%d\n", temp - temp_from_s8(data->temp_hyst));
+ return temp;
}
-static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy,
- const char *buf, size_t count)
+static int lm90_set_temp8(struct lm90_data *data, int index, long val)
{
- struct lm90_data *data = dev_get_drvdata(dev);
+ static const u8 reg[TEMP8_REG_NUM] = {
+ LM90_REG_W_LOCAL_LOW,
+ LM90_REG_W_LOCAL_HIGH,
+ LM90_REG_W_LOCAL_CRIT,
+ LM90_REG_W_REMOTE_CRIT,
+ MAX6659_REG_W_LOCAL_EMERG,
+ MAX6659_REG_W_REMOTE_EMERG,
+ LM90_REG_W_REMOTE_CRIT,
+ MAX6659_REG_W_REMOTE_EMERG,
+ };
struct i2c_client *client = data->client;
- long val;
int err;
- int temp;
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
+ /* +16 degrees offset for temp2 for the LM99 */
+ if (data->kind == lm99 && index == 3)
+ val -= 16000;
- mutex_lock(&data->update_lock);
if (data->kind == adt7461 || data->kind == tmp451)
- temp = temp_from_u8_adt7461(data, data->temp8[LOCAL_CRIT]);
+ data->temp8[index] = temp_to_u8_adt7461(data, val);
else if (data->kind == max6646)
- temp = temp_from_u8(data->temp8[LOCAL_CRIT]);
+ data->temp8[index] = temp_to_u8(val);
else
- temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
+ data->temp8[index] = temp_to_s8(val);
- data->temp_hyst = hyst_to_reg(temp - val);
- i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
- data->temp_hyst);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
- char *buf)
-{
- struct lm90_data *data = lm90_update_device(dev);
-
- if (IS_ERR(data))
- return PTR_ERR(data);
+ lm90_select_remote_channel(client, data, index >= 6);
+ err = i2c_smbus_write_byte_data(client, reg[index], data->temp8[index]);
+ lm90_select_remote_channel(client, data, 0);
- return sprintf(buf, "%d\n", data->alarms);
+ return err;
}
-static ssize_t show_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static int lm90_get_temphyst(struct lm90_data *data, int index)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct lm90_data *data = lm90_update_device(dev);
- int bitnr = attr->index;
-
- if (IS_ERR(data))
- return PTR_ERR(data);
+ int temp;
- return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
-}
+ if (data->kind == adt7461 || data->kind == tmp451)
+ temp = temp_from_u8_adt7461(data, data->temp8[index]);
+ else if (data->kind == max6646)
+ temp = temp_from_u8(data->temp8[index]);
+ else
+ temp = temp_from_s8(data->temp8[index]);
-static ssize_t show_update_interval(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct lm90_data *data = dev_get_drvdata(dev);
+ /* +16 degrees offset for temp2 for the LM99 */
+ if (data->kind == lm99 && index == 3)
+ temp += 16000;
- return sprintf(buf, "%u\n", data->update_interval);
+ return temp - temp_from_s8(data->temp_hyst);
}
-static ssize_t set_update_interval(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int lm90_set_temphyst(struct lm90_data *data, long val)
{
- struct lm90_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- unsigned long val;
+ int temp;
int err;
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- lm90_set_convrate(client, data, clamp_val(val, 0, 100000));
- mutex_unlock(&data->update_lock);
+ if (data->kind == adt7461 || data->kind == tmp451)
+ temp = temp_from_u8_adt7461(data, data->temp8[LOCAL_CRIT]);
+ else if (data->kind == max6646)
+ temp = temp_from_u8(data->temp8[LOCAL_CRIT]);
+ else
+ temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
- return count;
+ data->temp_hyst = hyst_to_reg(temp - val);
+ err = i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
+ data->temp_hyst);
+ return err;
}
-static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp11, NULL,
- 0, LOCAL_TEMP);
-static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp11, NULL,
- 0, REMOTE_TEMP);
-static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, LOCAL_LOW);
-static SENSOR_DEVICE_ATTR_2(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 0, REMOTE_LOW);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, LOCAL_HIGH);
-static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 1, REMOTE_HIGH);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, LOCAL_CRIT);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, REMOTE_CRIT);
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temphyst,
- set_temphyst, LOCAL_CRIT);
-static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL,
- REMOTE_CRIT);
-static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 2, REMOTE_OFFSET);
-
-/* Individual alarm files */
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
-/* Raw alarm file for compatibility */
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
-
-static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
- set_update_interval);
-
-static struct attribute *lm90_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
-
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &dev_attr_alarms.attr,
- &dev_attr_update_interval.attr,
- NULL
+static const u8 lm90_temp_index[3] = {
+ LOCAL_TEMP, REMOTE_TEMP, REMOTE2_TEMP
};
-static const struct attribute_group lm90_group = {
- .attrs = lm90_attributes,
+static const u8 lm90_temp_min_index[3] = {
+ LOCAL_LOW, REMOTE_LOW, REMOTE2_LOW
};
-static struct attribute *lm90_temp2_offset_attributes[] = {
- &sensor_dev_attr_temp2_offset.dev_attr.attr,
- NULL
+static const u8 lm90_temp_max_index[3] = {
+ LOCAL_HIGH, REMOTE_HIGH, REMOTE2_HIGH
};
-static const struct attribute_group lm90_temp2_offset_group = {
- .attrs = lm90_temp2_offset_attributes,
+static const u8 lm90_temp_crit_index[3] = {
+ LOCAL_CRIT, REMOTE_CRIT, REMOTE2_CRIT
};
-/*
- * Additional attributes for devices with emergency sensors
- */
-static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, LOCAL_EMERG);
-static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, REMOTE_EMERG);
-static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO, show_temphyst,
- NULL, LOCAL_EMERG);
-static SENSOR_DEVICE_ATTR(temp2_emergency_hyst, S_IRUGO, show_temphyst,
- NULL, REMOTE_EMERG);
-
-static struct attribute *lm90_emergency_attributes[] = {
- &sensor_dev_attr_temp1_emergency.dev_attr.attr,
- &sensor_dev_attr_temp2_emergency.dev_attr.attr,
- &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_emergency_hyst.dev_attr.attr,
- NULL
+static const u8 lm90_temp_emerg_index[3] = {
+ LOCAL_EMERG, REMOTE_EMERG, REMOTE2_EMERG
};
-static const struct attribute_group lm90_emergency_group = {
- .attrs = lm90_emergency_attributes,
-};
+static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
+static const u8 lm90_max_alarm_bits[3] = { 0, 4, 12 };
+static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
+static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
+static const u8 lm90_fault_bits[3] = { 0, 2, 10 };
-static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 13);
+static int lm90_temp_read(struct device *dev, u32 attr, int channel, long *val)
+{
+ struct lm90_data *data = dev_get_drvdata(dev);
+ int err;
-static struct attribute *lm90_emergency_alarm_attributes[] = {
- &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr,
- NULL
-};
+ mutex_lock(&data->update_lock);
+ err = lm90_update_device(dev);
+ mutex_unlock(&data->update_lock);
+ if (err)
+ return err;
-static const struct attribute_group lm90_emergency_alarm_group = {
- .attrs = lm90_emergency_alarm_attributes,
-};
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = lm90_get_temp11(data, lm90_temp_index[channel]);
+ break;
+ case hwmon_temp_min_alarm:
+ *val = (data->alarms >> lm90_min_alarm_bits[channel]) & 1;
+ break;
+ case hwmon_temp_max_alarm:
+ *val = (data->alarms >> lm90_max_alarm_bits[channel]) & 1;
+ break;
+ case hwmon_temp_crit_alarm:
+ *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
+ break;
+ case hwmon_temp_emergency_alarm:
+ *val = (data->alarms >> lm90_emergency_alarm_bits[channel]) & 1;
+ break;
+ case hwmon_temp_fault:
+ *val = (data->alarms >> lm90_fault_bits[channel]) & 1;
+ break;
+ case hwmon_temp_min:
+ if (channel == 0)
+ *val = lm90_get_temp8(data,
+ lm90_temp_min_index[channel]);
+ else
+ *val = lm90_get_temp11(data,
+ lm90_temp_min_index[channel]);
+ break;
+ case hwmon_temp_max:
+ if (channel == 0)
+ *val = lm90_get_temp8(data,
+ lm90_temp_max_index[channel]);
+ else
+ *val = lm90_get_temp11(data,
+ lm90_temp_max_index[channel]);
+ break;
+ case hwmon_temp_crit:
+ *val = lm90_get_temp8(data, lm90_temp_crit_index[channel]);
+ break;
+ case hwmon_temp_crit_hyst:
+ *val = lm90_get_temphyst(data, lm90_temp_crit_index[channel]);
+ break;
+ case hwmon_temp_emergency:
+ *val = lm90_get_temp8(data, lm90_temp_emerg_index[channel]);
+ break;
+ case hwmon_temp_emergency_hyst:
+ *val = lm90_get_temphyst(data, lm90_temp_emerg_index[channel]);
+ break;
+ case hwmon_temp_offset:
+ *val = lm90_get_temp11(data, REMOTE_OFFSET);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
-/*
- * Additional attributes for devices with 3 temperature sensors
- */
-static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp11, NULL,
- 0, REMOTE2_TEMP);
-static SENSOR_DEVICE_ATTR_2(temp3_min, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 3, REMOTE2_LOW);
-static SENSOR_DEVICE_ATTR_2(temp3_max, S_IWUSR | S_IRUGO, show_temp11,
- set_temp11, 4, REMOTE2_HIGH);
-static SENSOR_DEVICE_ATTR(temp3_crit, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, REMOTE2_CRIT);
-static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, show_temphyst, NULL,
- REMOTE2_CRIT);
-static SENSOR_DEVICE_ATTR(temp3_emergency, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, REMOTE2_EMERG);
-static SENSOR_DEVICE_ATTR(temp3_emergency_hyst, S_IRUGO, show_temphyst,
- NULL, REMOTE2_EMERG);
-
-static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(temp3_emergency_alarm, S_IRUGO, show_alarm, NULL, 14);
-
-static struct attribute *lm90_temp3_attributes[] = {
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_min.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &sensor_dev_attr_temp3_crit.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp3_emergency.dev_attr.attr,
- &sensor_dev_attr_temp3_emergency_hyst.dev_attr.attr,
-
- &sensor_dev_attr_temp3_fault.dev_attr.attr,
- &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_emergency_alarm.dev_attr.attr,
- NULL
-};
+static int lm90_temp_write(struct device *dev, u32 attr, int channel, long val)
+{
+ struct lm90_data *data = dev_get_drvdata(dev);
+ int err;
-static const struct attribute_group lm90_temp3_group = {
- .attrs = lm90_temp3_attributes,
-};
+ mutex_lock(&data->update_lock);
-/* pec used for ADM1032 only */
-static ssize_t show_pec(struct device *dev, struct device_attribute *dummy,
- char *buf)
+ err = lm90_update_device(dev);
+ if (err)
+ goto error;
+
+ switch (attr) {
+ case hwmon_temp_min:
+ if (channel == 0)
+ err = lm90_set_temp8(data,
+ lm90_temp_min_index[channel],
+ val);
+ else
+ err = lm90_set_temp11(data,
+ lm90_temp_min_index[channel],
+ val);
+ break;
+ case hwmon_temp_max:
+ if (channel == 0)
+ err = lm90_set_temp8(data,
+ lm90_temp_max_index[channel],
+ val);
+ else
+ err = lm90_set_temp11(data,
+ lm90_temp_max_index[channel],
+ val);
+ break;
+ case hwmon_temp_crit:
+ err = lm90_set_temp8(data, lm90_temp_crit_index[channel], val);
+ break;
+ case hwmon_temp_crit_hyst:
+ err = lm90_set_temphyst(data, val);
+ break;
+ case hwmon_temp_emergency:
+ err = lm90_set_temp8(data, lm90_temp_emerg_index[channel], val);
+ break;
+ case hwmon_temp_offset:
+ err = lm90_set_temp11(data, REMOTE_OFFSET, val);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+error:
+ mutex_unlock(&data->update_lock);
+
+ return err;
+}
+
+static umode_t lm90_temp_is_visible(const void *data, u32 attr, int channel)
{
- struct i2c_client *client = to_i2c_client(dev);
- return sprintf(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC));
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_emergency_alarm:
+ case hwmon_temp_emergency_hyst:
+ case hwmon_temp_fault:
+ return S_IRUGO;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_emergency:
+ case hwmon_temp_offset:
+ return S_IRUGO | S_IWUSR;
+ case hwmon_temp_crit_hyst:
+ if (channel == 0)
+ return S_IRUGO | S_IWUSR;
+ return S_IRUGO;
+ default:
+ return 0;
+ }
}
-static ssize_t set_pec(struct device *dev, struct device_attribute *dummy,
- const char *buf, size_t count)
+static int lm90_chip_read(struct device *dev, u32 attr, int channel, long *val)
{
- struct i2c_client *client = to_i2c_client(dev);
- long val;
+ struct lm90_data *data = dev_get_drvdata(dev);
int err;
- err = kstrtol(buf, 10, &val);
- if (err < 0)
+ mutex_lock(&data->update_lock);
+ err = lm90_update_device(dev);
+ mutex_unlock(&data->update_lock);
+ if (err)
return err;
- switch (val) {
- case 0:
- client->flags &= ~I2C_CLIENT_PEC;
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ *val = data->update_interval;
break;
- case 1:
- client->flags |= I2C_CLIENT_PEC;
+ case hwmon_chip_alarms:
+ *val = data->alarms;
break;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
- return count;
+ return 0;
}
-static DEVICE_ATTR(pec, S_IWUSR | S_IRUGO, show_pec, set_pec);
+static int lm90_chip_write(struct device *dev, u32 attr, int channel, long val)
+{
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int err;
-/*
- * Real code
- */
+ mutex_lock(&data->update_lock);
+
+ err = lm90_update_device(dev);
+ if (err)
+ goto error;
+
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ err = lm90_set_convrate(client, data,
+ clamp_val(val, 0, 100000));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+error:
+ mutex_unlock(&data->update_lock);
+
+ return err;
+}
+
+static umode_t lm90_chip_is_visible(const void *data, u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return S_IRUGO | S_IWUSR;
+ case hwmon_chip_alarms:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+}
+
+static int lm90_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return lm90_chip_read(dev, attr, channel, val);
+ case hwmon_temp:
+ return lm90_temp_read(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lm90_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return lm90_chip_write(dev, attr, channel, val);
+ case hwmon_temp:
+ return lm90_temp_write(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t lm90_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_chip:
+ return lm90_chip_is_visible(data, attr, channel);
+ case hwmon_temp:
+ return lm90_temp_is_visible(data, attr, channel);
+ default:
+ return 0;
+ }
+}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm90_detect(struct i2c_client *client,
@@ -1617,15 +1626,32 @@ static void lm90_regulator_disable(void *regulator)
regulator_disable(regulator);
}
+static const u32 lm90_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL | HWMON_C_ALARMS,
+ 0
+};
+
+static const struct hwmon_channel_info lm90_chip_info = {
+ .type = hwmon_chip,
+ .config = lm90_chip_config,
+};
+
+
+static const struct hwmon_ops lm90_ops = {
+ .is_visible = lm90_is_visible,
+ .read = lm90_read,
+ .write = lm90_write,
+};
+
static int lm90_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
- struct lm90_data *data;
+ struct hwmon_channel_info *info;
struct regulator *regulator;
struct device *hwmon_dev;
- int groups = 0;
+ struct lm90_data *data;
int err;
regulator = devm_regulator_get(dev, "vcc");
@@ -1665,6 +1691,49 @@ static int lm90_probe(struct i2c_client *client,
/* Set chip capabilities */
data->flags = lm90_params[data->kind].flags;
+
+ data->chip.ops = &lm90_ops;
+ data->chip.info = data->info;
+
+ data->info[0] = &lm90_chip_info;
+ data->info[1] = &data->temp_info;
+
+ info = &data->temp_info;
+ info->type = hwmon_temp;
+ info->config = data->channel_config;
+
+ data->channel_config[0] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM;
+ data->channel_config[1] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT;
+
+ if (data->flags & LM90_HAVE_OFFSET)
+ data->channel_config[1] |= HWMON_T_OFFSET;
+
+ if (data->flags & LM90_HAVE_EMERGENCY) {
+ data->channel_config[0] |= HWMON_T_EMERGENCY |
+ HWMON_T_EMERGENCY_HYST;
+ data->channel_config[1] |= HWMON_T_EMERGENCY |
+ HWMON_T_EMERGENCY_HYST;
+ }
+
+ if (data->flags & LM90_HAVE_EMERGENCY_ALARM) {
+ data->channel_config[0] |= HWMON_T_EMERGENCY_ALARM;
+ data->channel_config[1] |= HWMON_T_EMERGENCY_ALARM;
+ }
+
+ if (data->flags & LM90_HAVE_TEMP3) {
+ data->channel_config[2] = HWMON_T_INPUT |
+ HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_EMERGENCY | HWMON_T_EMERGENCY_HYST |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY_ALARM |
+ HWMON_T_FAULT;
+ }
+
data->reg_local_ext = lm90_params[data->kind].reg_local_ext;
/* Set maximum conversion rate */
@@ -1677,21 +1746,10 @@ static int lm90_probe(struct i2c_client *client,
return err;
}
- /* Register sysfs hooks */
- data->groups[groups++] = &lm90_group;
-
- if (data->flags & LM90_HAVE_OFFSET)
- data->groups[groups++] = &lm90_temp2_offset_group;
-
- if (data->flags & LM90_HAVE_EMERGENCY)
- data->groups[groups++] = &lm90_emergency_group;
-
- if (data->flags & LM90_HAVE_EMERGENCY_ALARM)
- data->groups[groups++] = &lm90_emergency_alarm_group;
-
- if (data->flags & LM90_HAVE_TEMP3)
- data->groups[groups++] = &lm90_temp3_group;
-
+ /*
+ * The 'pec' attribute is attached to the i2c device and thus created
+ * separately.
+ */
if (client->flags & I2C_CLIENT_PEC) {
err = device_create_file(dev, &dev_attr_pec);
if (err)
@@ -1701,8 +1759,9 @@ static int lm90_probe(struct i2c_client *client,
return err;
}
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data, data->groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &data->chip,
+ NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index cdf19adaec79..8c573e6e9726 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -15,22 +15,17 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
#include <linux/init.h>
-#include <linux/slab.h>
#include <linux/jiffies.h>
-#include <linux/i2c.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/sysfs.h>
+#include <linux/slab.h>
#define DEVNAME "lm95241"
@@ -54,26 +49,25 @@ static const unsigned short normal_i2c[] = {
#define LM95241_REG_RW_REMOTE_MODEL 0x30
/* LM95241 specific bitfields */
-#define CFG_STOP 0x40
-#define CFG_CR0076 0x00
-#define CFG_CR0182 0x10
-#define CFG_CR1000 0x20
-#define CFG_CR2700 0x30
-#define R1MS_SHIFT 0
-#define R2MS_SHIFT 2
-#define R1MS_MASK (0x01 << (R1MS_SHIFT))
-#define R2MS_MASK (0x01 << (R2MS_SHIFT))
-#define R1DF_SHIFT 1
-#define R2DF_SHIFT 2
-#define R1DF_MASK (0x01 << (R1DF_SHIFT))
-#define R2DF_MASK (0x01 << (R2DF_SHIFT))
-#define R1FE_MASK 0x01
-#define R2FE_MASK 0x05
-#define TT1_SHIFT 0
-#define TT2_SHIFT 4
-#define TT_OFF 0
-#define TT_ON 1
-#define TT_MASK 7
+#define CFG_STOP BIT(6)
+#define CFG_CR0076 0x00
+#define CFG_CR0182 BIT(4)
+#define CFG_CR1000 BIT(5)
+#define CFG_CR2700 (BIT(4) | BIT(5))
+#define CFG_CRMASK (BIT(4) | BIT(5))
+#define R1MS_MASK BIT(0)
+#define R2MS_MASK BIT(2)
+#define R1DF_MASK BIT(1)
+#define R2DF_MASK BIT(2)
+#define R1FE_MASK BIT(0)
+#define R2FE_MASK BIT(2)
+#define R1DM BIT(0)
+#define R2DM BIT(1)
+#define TT1_SHIFT 0
+#define TT2_SHIFT 4
+#define TT_OFF 0
+#define TT_ON 1
+#define TT_MASK 7
#define NATSEMI_MAN_ID 0x01
#define LM95231_CHIP_ID 0xA1
#define LM95241_CHIP_ID 0xA4
@@ -91,11 +85,12 @@ static const u8 lm95241_reg_address[] = {
struct lm95241_data {
struct i2c_client *client;
struct mutex update_lock;
- unsigned long last_updated, interval; /* in jiffies */
+ unsigned long last_updated; /* in jiffies */
+ unsigned long interval; /* in milli-seconds */
char valid; /* zero until following fields are valid */
/* registers values */
u8 temp[ARRAY_SIZE(lm95241_reg_address)];
- u8 config, model, trutherm;
+ u8 status, config, model, trutherm;
};
/* Conversions */
@@ -118,7 +113,8 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + data->interval) ||
+ if (time_after(jiffies, data->last_updated
+ + msecs_to_jiffies(data->interval)) ||
!data->valid) {
int i;
@@ -127,6 +123,9 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
data->temp[i]
= i2c_smbus_read_byte_data(client,
lm95241_reg_address[i]);
+
+ data->status = i2c_smbus_read_byte_data(client,
+ LM95241_REG_R_STATUS);
data->last_updated = jiffies;
data->valid = 1;
}
@@ -136,197 +135,241 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
return data;
}
-/* Sysfs stuff */
-static ssize_t show_input(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct lm95241_data *data = lm95241_update_device(dev);
- int index = to_sensor_dev_attr(attr)->index;
-
- return snprintf(buf, PAGE_SIZE - 1, "%d\n",
- index == 0 || (data->config & (1 << (index / 2))) ?
- temp_from_reg_signed(data->temp[index], data->temp[index + 1]) :
- temp_from_reg_unsigned(data->temp[index],
- data->temp[index + 1]));
-}
-
-static ssize_t show_type(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int lm95241_read_chip(struct device *dev, u32 attr, int channel,
+ long *val)
{
struct lm95241_data *data = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1,
- data->model & to_sensor_dev_attr(attr)->index ? "1\n" : "2\n");
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ *val = data->interval;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t set_type(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int lm95241_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct lm95241_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- unsigned long val;
- int shift;
- u8 mask = to_sensor_dev_attr(attr)->index;
-
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
- if (val != 1 && val != 2)
- return -EINVAL;
-
- shift = mask == R1MS_MASK ? TT1_SHIFT : TT2_SHIFT;
-
- mutex_lock(&data->update_lock);
+ struct lm95241_data *data = lm95241_update_device(dev);
- data->trutherm &= ~(TT_MASK << shift);
- if (val == 1) {
- data->model |= mask;
- data->trutherm |= (TT_ON << shift);
- } else {
- data->model &= ~mask;
- data->trutherm |= (TT_OFF << shift);
+ switch (attr) {
+ case hwmon_temp_input:
+ if (!channel || (data->config & BIT(channel - 1)))
+ *val = temp_from_reg_signed(data->temp[channel * 2],
+ data->temp[channel * 2 + 1]);
+ else
+ *val = temp_from_reg_unsigned(data->temp[channel * 2],
+ data->temp[channel * 2 + 1]);
+ return 0;
+ case hwmon_temp_min:
+ if (channel == 1)
+ *val = (data->config & R1DF_MASK) ? -128000 : 0;
+ else
+ *val = (data->config & R2DF_MASK) ? -128000 : 0;
+ return 0;
+ case hwmon_temp_max:
+ if (channel == 1)
+ *val = (data->config & R1DF_MASK) ? 127875 : 255875;
+ else
+ *val = (data->config & R2DF_MASK) ? 127875 : 255875;
+ return 0;
+ case hwmon_temp_type:
+ if (channel == 1)
+ *val = (data->model & R1MS_MASK) ? 1 : 2;
+ else
+ *val = (data->model & R2MS_MASK) ? 1 : 2;
+ return 0;
+ case hwmon_temp_fault:
+ if (channel == 1)
+ *val = !!(data->status & R1DM);
+ else
+ *val = !!(data->status & R2DM);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
}
- data->valid = 0;
-
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_REMOTE_MODEL,
- data->model);
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_TRUTHERM,
- data->trutherm);
-
- mutex_unlock(&data->update_lock);
-
- return count;
}
-static ssize_t show_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int lm95241_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
- struct lm95241_data *data = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE - 1,
- data->config & to_sensor_dev_attr(attr)->index ?
- "-127000\n" : "0\n");
+ switch (type) {
+ case hwmon_chip:
+ return lm95241_read_chip(dev, attr, channel, val);
+ case hwmon_temp:
+ return lm95241_read_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t set_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int lm95241_write_chip(struct device *dev, u32 attr, int channel,
+ long val)
{
struct lm95241_data *data = dev_get_drvdata(dev);
- long val;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
- if (val < -128000)
- return -EINVAL;
+ int convrate;
+ u8 config;
+ int ret;
mutex_lock(&data->update_lock);
- if (val < 0)
- data->config |= to_sensor_dev_attr(attr)->index;
- else
- data->config &= ~to_sensor_dev_attr(attr)->index;
- data->valid = 0;
-
- i2c_smbus_write_byte_data(data->client, LM95241_REG_RW_CONFIG,
- data->config);
-
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ config = data->config & ~CFG_CRMASK;
+ if (val < 130) {
+ convrate = 76;
+ config |= CFG_CR0076;
+ } else if (val < 590) {
+ convrate = 182;
+ config |= CFG_CR0182;
+ } else if (val < 1850) {
+ convrate = 1000;
+ config |= CFG_CR1000;
+ } else {
+ convrate = 2700;
+ config |= CFG_CR2700;
+ }
+ data->interval = convrate;
+ data->config = config;
+ ret = i2c_smbus_write_byte_data(data->client,
+ LM95241_REG_RW_CONFIG, config);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
mutex_unlock(&data->update_lock);
-
- return count;
+ return ret;
}
-static ssize_t show_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int lm95241_write_temp(struct device *dev, u32 attr, int channel,
+ long val)
{
struct lm95241_data *data = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE - 1,
- data->config & to_sensor_dev_attr(attr)->index ?
- "127000\n" : "255000\n");
-}
-
-static ssize_t set_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct lm95241_data *data = dev_get_drvdata(dev);
- long val;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
- if (val >= 256000)
- return -EINVAL;
+ struct i2c_client *client = data->client;
+ int ret;
mutex_lock(&data->update_lock);
- if (val <= 127000)
- data->config |= to_sensor_dev_attr(attr)->index;
- else
- data->config &= ~to_sensor_dev_attr(attr)->index;
- data->valid = 0;
-
- i2c_smbus_write_byte_data(data->client, LM95241_REG_RW_CONFIG,
- data->config);
+ switch (attr) {
+ case hwmon_temp_min:
+ if (channel == 1) {
+ if (val < 0)
+ data->config |= R1DF_MASK;
+ else
+ data->config &= ~R1DF_MASK;
+ } else {
+ if (val < 0)
+ data->config |= R2DF_MASK;
+ else
+ data->config &= ~R2DF_MASK;
+ }
+ data->valid = 0;
+ ret = i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG,
+ data->config);
+ break;
+ case hwmon_temp_max:
+ if (channel == 1) {
+ if (val <= 127875)
+ data->config |= R1DF_MASK;
+ else
+ data->config &= ~R1DF_MASK;
+ } else {
+ if (val <= 127875)
+ data->config |= R2DF_MASK;
+ else
+ data->config &= ~R2DF_MASK;
+ }
+ data->valid = 0;
+ ret = i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG,
+ data->config);
+ break;
+ case hwmon_temp_type:
+ if (val != 1 && val != 2) {
+ ret = -EINVAL;
+ break;
+ }
+ if (channel == 1) {
+ data->trutherm &= ~(TT_MASK << TT1_SHIFT);
+ if (val == 1) {
+ data->model |= R1MS_MASK;
+ data->trutherm |= (TT_ON << TT1_SHIFT);
+ } else {
+ data->model &= ~R1MS_MASK;
+ data->trutherm |= (TT_OFF << TT1_SHIFT);
+ }
+ } else {
+ data->trutherm &= ~(TT_MASK << TT2_SHIFT);
+ if (val == 1) {
+ data->model |= R2MS_MASK;
+ data->trutherm |= (TT_ON << TT2_SHIFT);
+ } else {
+ data->model &= ~R2MS_MASK;
+ data->trutherm |= (TT_OFF << TT2_SHIFT);
+ }
+ }
+ ret = i2c_smbus_write_byte_data(client,
+ LM95241_REG_RW_REMOTE_MODEL,
+ data->model);
+ if (ret < 0)
+ break;
+ ret = i2c_smbus_write_byte_data(client, LM95241_REG_RW_TRUTHERM,
+ data->trutherm);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
mutex_unlock(&data->update_lock);
- return count;
+ return ret;
}
-static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int lm95241_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
{
- struct lm95241_data *data = lm95241_update_device(dev);
-
- return snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval
- / HZ);
+ switch (type) {
+ case hwmon_chip:
+ return lm95241_write_chip(dev, attr, channel, val);
+ case hwmon_temp:
+ return lm95241_write_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static umode_t lm95241_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct lm95241_data *data = dev_get_drvdata(dev);
- unsigned long val;
-
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
-
- data->interval = val * HZ / 1000;
-
- return count;
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return S_IRUGO | S_IWUSR;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return S_IRUGO;
+ case hwmon_temp_fault:
+ return S_IRUGO;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_type:
+ return S_IRUGO | S_IWUSR;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_input, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type, set_type,
- R1MS_MASK);
-static SENSOR_DEVICE_ATTR(temp3_type, S_IWUSR | S_IRUGO, show_type, set_type,
- R2MS_MASK);
-static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min, set_min,
- R1DF_MASK);
-static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min, set_min,
- R2DF_MASK);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max, set_max,
- R1DF_MASK);
-static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max, set_max,
- R2DF_MASK);
-static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
- set_interval);
-
-static struct attribute *lm95241_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp2_type.dev_attr.attr,
- &sensor_dev_attr_temp3_type.dev_attr.attr,
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp3_min.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &dev_attr_update_interval.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(lm95241);
-
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm95241_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
@@ -362,8 +405,8 @@ static int lm95241_detect(struct i2c_client *new_client,
static void lm95241_init_client(struct i2c_client *client,
struct lm95241_data *data)
{
- data->interval = HZ; /* 1 sec default */
- data->config = CFG_CR0076;
+ data->interval = 1000;
+ data->config = CFG_CR1000;
data->trutherm = (TT_OFF << TT1_SHIFT) | (TT_OFF << TT2_SHIFT);
i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
@@ -375,6 +418,47 @@ static void lm95241_init_client(struct i2c_client *client,
data->model);
}
+static const u32 lm95241_chip_config[] = {
+ HWMON_C_UPDATE_INTERVAL,
+ 0
+};
+
+static const struct hwmon_channel_info lm95241_chip = {
+ .type = hwmon_chip,
+ .config = lm95241_chip_config,
+};
+
+static const u32 lm95241_temp_config[] = {
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_TYPE |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_TYPE |
+ HWMON_T_FAULT,
+ 0
+};
+
+static const struct hwmon_channel_info lm95241_temp = {
+ .type = hwmon_temp,
+ .config = lm95241_temp_config,
+};
+
+static const struct hwmon_channel_info *lm95241_info[] = {
+ &lm95241_chip,
+ &lm95241_temp,
+ NULL
+};
+
+static const struct hwmon_ops lm95241_hwmon_ops = {
+ .is_visible = lm95241_is_visible,
+ .read = lm95241_read,
+ .write = lm95241_write,
+};
+
+static const struct hwmon_chip_info lm95241_chip_info = {
+ .ops = &lm95241_hwmon_ops,
+ .info = lm95241_info,
+};
+
static int lm95241_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -392,9 +476,10 @@ static int lm95241_probe(struct i2c_client *client,
/* Initialize the LM95241 chip */
lm95241_init_client(client, data);
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data,
- lm95241_groups);
+ &lm95241_chip_info,
+ NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
@@ -420,5 +505,5 @@ static struct i2c_driver lm95241_driver = {
module_i2c_driver(lm95241_driver);
MODULE_AUTHOR("Davide Rizzo <elpa.rizzo@gmail.com>");
-MODULE_DESCRIPTION("LM95241 sensor driver");
+MODULE_DESCRIPTION("LM95231/LM95241 sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index e7aef4561c83..a3bfd88752ca 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -15,22 +15,16 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
+#include <linux/err.h>
#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/sysfs.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x29, 0x4c, 0x4d, I2C_CLIENT_END };
@@ -89,6 +83,7 @@ static const unsigned short normal_i2c[] = {
#define RATE_CR1000 0x02
#define RATE_CR2500 0x03
+#define STATUS1_ROS 0x10
#define STATUS1_DIODE_FAULT 0x04
#define STATUS1_RTCRIT 0x02
#define STATUS1_LOC 0x01
@@ -112,14 +107,9 @@ static const u8 lm95245_reg_address[] = {
/* Client data (each client gets its own) */
struct lm95245_data {
- struct i2c_client *client;
+ struct regmap *regmap;
struct mutex update_lock;
- unsigned long last_updated; /* in jiffies */
- unsigned long interval; /* in msecs */
- bool valid; /* zero until following fields are valid */
- /* registers values */
- u8 regs[ARRAY_SIZE(lm95245_reg_address)];
- u8 config1, config2;
+ int interval; /* in msecs */
};
/* Conversions */
@@ -135,60 +125,36 @@ static int temp_from_reg_signed(u8 val_h, u8 val_l)
return temp_from_reg_unsigned(val_h, val_l);
}
-static struct lm95245_data *lm95245_update_device(struct device *dev)
+static int lm95245_read_conversion_rate(struct lm95245_data *data)
{
- struct lm95245_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
+ unsigned int rate;
+ int ret;
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated
- + msecs_to_jiffies(data->interval)) || !data->valid) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(lm95245_reg_address); i++)
- data->regs[i]
- = i2c_smbus_read_byte_data(client,
- lm95245_reg_address[i]);
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
-
-static unsigned long lm95245_read_conversion_rate(struct i2c_client *client)
-{
- int rate;
- unsigned long interval;
-
- rate = i2c_smbus_read_byte_data(client, LM95245_REG_RW_CONVERS_RATE);
+ ret = regmap_read(data->regmap, LM95245_REG_RW_CONVERS_RATE, &rate);
+ if (ret < 0)
+ return ret;
switch (rate) {
case RATE_CR0063:
- interval = 63;
+ data->interval = 63;
break;
case RATE_CR0364:
- interval = 364;
+ data->interval = 364;
break;
case RATE_CR1000:
- interval = 1000;
+ data->interval = 1000;
break;
case RATE_CR2500:
default:
- interval = 2500;
+ data->interval = 2500;
break;
}
-
- return interval;
+ return 0;
}
-static unsigned long lm95245_set_conversion_rate(struct i2c_client *client,
- unsigned long interval)
+static int lm95245_set_conversion_rate(struct lm95245_data *data, long interval)
{
- int rate;
+ int ret, rate;
if (interval <= 63) {
interval = 63;
@@ -204,221 +170,289 @@ static unsigned long lm95245_set_conversion_rate(struct i2c_client *client,
rate = RATE_CR2500;
}
- i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONVERS_RATE, rate);
+ ret = regmap_write(data->regmap, LM95245_REG_RW_CONVERS_RATE, rate);
+ if (ret < 0)
+ return ret;
- return interval;
-}
-
-/* Sysfs stuff */
-static ssize_t show_input(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct lm95245_data *data = lm95245_update_device(dev);
- int temp;
- int index = to_sensor_dev_attr(attr)->index;
-
- /*
- * Index 0 (Local temp) is always signed
- * Index 2 (Remote temp) has both signed and unsigned data
- * use signed calculation for remote if signed bit is set
- */
- if (index == 0 || data->regs[index] & 0x80)
- temp = temp_from_reg_signed(data->regs[index],
- data->regs[index + 1]);
- else
- temp = temp_from_reg_unsigned(data->regs[index + 2],
- data->regs[index + 3]);
-
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", temp);
-}
-
-static ssize_t show_limit(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct lm95245_data *data = lm95245_update_device(dev);
- int index = to_sensor_dev_attr(attr)->index;
-
- return snprintf(buf, PAGE_SIZE - 1, "%d\n",
- data->regs[index] * 1000);
+ data->interval = interval;
+ return 0;
}
-static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int lm95245_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
{
struct lm95245_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- struct i2c_client *client = data->client;
- unsigned long val;
-
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
-
- val /= 1000;
-
- val = clamp_val(val, 0, (index == 6 ? 127 : 255));
-
- mutex_lock(&data->update_lock);
-
- data->valid = 0;
-
- i2c_smbus_write_byte_data(client, lm95245_reg_address[index], val);
-
- mutex_unlock(&data->update_lock);
-
- return count;
+ struct regmap *regmap = data->regmap;
+ int ret, regl, regh, regvall, regvalh;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ regl = channel ? LM95245_REG_R_REMOTE_TEMPL_S :
+ LM95245_REG_R_LOCAL_TEMPL_S;
+ regh = channel ? LM95245_REG_R_REMOTE_TEMPH_S :
+ LM95245_REG_R_LOCAL_TEMPH_S;
+ ret = regmap_read(regmap, regl, &regvall);
+ if (ret < 0)
+ return ret;
+ ret = regmap_read(regmap, regh, &regvalh);
+ if (ret < 0)
+ return ret;
+ /*
+ * Local temp is always signed.
+ * Remote temp has both signed and unsigned data.
+ * Use signed calculation for remote if signed bit is set
+ * or if reported temperature is below signed limit.
+ */
+ if (!channel || (regvalh & 0x80) || regvalh < 0x7f) {
+ *val = temp_from_reg_signed(regvalh, regvall);
+ return 0;
+ }
+ ret = regmap_read(regmap, LM95245_REG_R_REMOTE_TEMPL_U,
+ &regvall);
+ if (ret < 0)
+ return ret;
+ ret = regmap_read(regmap, LM95245_REG_R_REMOTE_TEMPH_U,
+ &regvalh);
+ if (ret < 0)
+ return ret;
+ *val = temp_from_reg_unsigned(regvalh, regvall);
+ return 0;
+ case hwmon_temp_max:
+ ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OS_LIMIT,
+ &regvalh);
+ if (ret < 0)
+ return ret;
+ *val = regvalh * 1000;
+ return 0;
+ case hwmon_temp_crit:
+ regh = channel ? LM95245_REG_RW_REMOTE_TCRIT_LIMIT :
+ LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT;
+ ret = regmap_read(regmap, regh, &regvalh);
+ if (ret < 0)
+ return ret;
+ *val = regvalh * 1000;
+ return 0;
+ case hwmon_temp_max_hyst:
+ ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OS_LIMIT,
+ &regvalh);
+ if (ret < 0)
+ return ret;
+ ret = regmap_read(regmap, LM95245_REG_RW_COMMON_HYSTERESIS,
+ &regvall);
+ if (ret < 0)
+ return ret;
+ *val = (regvalh - regvall) * 1000;
+ return 0;
+ case hwmon_temp_crit_hyst:
+ regh = channel ? LM95245_REG_RW_REMOTE_TCRIT_LIMIT :
+ LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT;
+ ret = regmap_read(regmap, regh, &regvalh);
+ if (ret < 0)
+ return ret;
+ ret = regmap_read(regmap, LM95245_REG_RW_COMMON_HYSTERESIS,
+ &regvall);
+ if (ret < 0)
+ return ret;
+ *val = (regvalh - regvall) * 1000;
+ return 0;
+ case hwmon_temp_type:
+ ret = regmap_read(regmap, LM95245_REG_RW_CONFIG2, &regvalh);
+ if (ret < 0)
+ return ret;
+ *val = (regvalh & CFG2_REMOTE_TT) ? 1 : 2;
+ return 0;
+ case hwmon_temp_offset:
+ ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OFFL,
+ &regvall);
+ if (ret < 0)
+ return ret;
+ ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OFFH,
+ &regvalh);
+ if (ret < 0)
+ return ret;
+ *val = temp_from_reg_signed(regvalh, regvall);
+ return 0;
+ case hwmon_temp_max_alarm:
+ ret = regmap_read(regmap, LM95245_REG_R_STATUS1, &regvalh);
+ if (ret < 0)
+ return ret;
+ *val = !!(regvalh & STATUS1_ROS);
+ return 0;
+ case hwmon_temp_crit_alarm:
+ ret = regmap_read(regmap, LM95245_REG_R_STATUS1, &regvalh);
+ if (ret < 0)
+ return ret;
+ *val = !!(regvalh & (channel ? STATUS1_RTCRIT : STATUS1_LOC));
+ return 0;
+ case hwmon_temp_fault:
+ ret = regmap_read(regmap, LM95245_REG_R_STATUS1, &regvalh);
+ if (ret < 0)
+ return ret;
+ *val = !!(regvalh & STATUS1_DIODE_FAULT);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t show_crit_hyst(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int lm95245_write_temp(struct device *dev, u32 attr, int channel,
+ long val)
{
- struct lm95245_data *data = lm95245_update_device(dev);
- int index = to_sensor_dev_attr(attr)->index;
- int hyst = data->regs[index] - data->regs[8];
-
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", hyst * 1000);
+ struct lm95245_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int ret, reg;
+
+ switch (attr) {
+ case hwmon_temp_max:
+ val = clamp_val(val / 1000, 0, 255);
+ ret = regmap_write(regmap, LM95245_REG_RW_REMOTE_OS_LIMIT, val);
+ return ret;
+ case hwmon_temp_crit:
+ reg = channel ? LM95245_REG_RW_REMOTE_TCRIT_LIMIT :
+ LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT;
+ val = clamp_val(val / 1000, 0, channel ? 255 : 127);
+ ret = regmap_write(regmap, reg, val);
+ return ret;
+ case hwmon_temp_crit_hyst:
+ mutex_lock(&data->update_lock);
+ ret = regmap_read(regmap, LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT,
+ &regval);
+ if (ret < 0) {
+ mutex_unlock(&data->update_lock);
+ return ret;
+ }
+ /* Clamp to reasonable range to prevent overflow */
+ val = clamp_val(val, -1000000, 1000000);
+ val = regval - val / 1000;
+ val = clamp_val(val, 0, 31);
+ ret = regmap_write(regmap, LM95245_REG_RW_COMMON_HYSTERESIS,
+ val);
+ mutex_unlock(&data->update_lock);
+ return ret;
+ case hwmon_temp_offset:
+ val = clamp_val(val, -128000, 127875);
+ val = val * 256 / 1000;
+ mutex_lock(&data->update_lock);
+ ret = regmap_write(regmap, LM95245_REG_RW_REMOTE_OFFL,
+ val & 0xe0);
+ if (ret < 0) {
+ mutex_unlock(&data->update_lock);
+ return ret;
+ }
+ ret = regmap_write(regmap, LM95245_REG_RW_REMOTE_OFFH,
+ (val >> 8) & 0xff);
+ mutex_unlock(&data->update_lock);
+ return ret;
+ case hwmon_temp_type:
+ if (val != 1 && val != 2)
+ return -EINVAL;
+ ret = regmap_update_bits(regmap, LM95245_REG_RW_CONFIG2,
+ CFG2_REMOTE_TT,
+ val == 1 ? CFG2_REMOTE_TT : 0);
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int lm95245_read_chip(struct device *dev, u32 attr, int channel,
+ long *val)
{
struct lm95245_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(attr)->index;
- struct i2c_client *client = data->client;
- unsigned long val;
- int hyst, limit;
-
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
-
- limit = i2c_smbus_read_byte_data(client, lm95245_reg_address[index]);
- hyst = limit - val / 1000;
- hyst = clamp_val(hyst, 0, 31);
- data->regs[8] = hyst;
- /* shared crit hysteresis */
- i2c_smbus_write_byte_data(client, LM95245_REG_RW_COMMON_HYSTERESIS,
- hyst);
-
- mutex_unlock(&data->update_lock);
-
- return count;
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ *val = data->interval;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t show_type(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int lm95245_write_chip(struct device *dev, u32 attr, int channel,
+ long val)
{
struct lm95245_data *data = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE - 1,
- data->config2 & CFG2_REMOTE_TT ? "1\n" : "2\n");
+ int ret;
+
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ mutex_lock(&data->update_lock);
+ ret = lm95245_set_conversion_rate(data, val);
+ mutex_unlock(&data->update_lock);
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t set_type(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int lm95245_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
- struct lm95245_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- unsigned long val;
-
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
- if (val != 1 && val != 2)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
-
- if (val == 1)
- data->config2 |= CFG2_REMOTE_TT;
- else
- data->config2 &= ~CFG2_REMOTE_TT;
-
- data->valid = 0;
-
- i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONFIG2,
- data->config2);
-
- mutex_unlock(&data->update_lock);
-
- return count;
+ switch (type) {
+ case hwmon_chip:
+ return lm95245_read_chip(dev, attr, channel, val);
+ case hwmon_temp:
+ return lm95245_read_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int lm95245_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
{
- struct lm95245_data *data = lm95245_update_device(dev);
- int index = to_sensor_dev_attr(attr)->index;
-
- return snprintf(buf, PAGE_SIZE - 1, "%d\n",
- !!(data->regs[9] & index));
+ switch (type) {
+ case hwmon_chip:
+ return lm95245_write_chip(dev, attr, channel, val);
+ case hwmon_temp:
+ return lm95245_write_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
- char *buf)
+static umode_t lm95245_temp_is_visible(const void *data, u32 attr, int channel)
{
- struct lm95245_data *data = lm95245_update_device(dev);
-
- return snprintf(buf, PAGE_SIZE - 1, "%lu\n", data->interval);
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_max_hyst:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_fault:
+ return S_IRUGO;
+ case hwmon_temp_type:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_offset:
+ return S_IRUGO | S_IWUSR;
+ case hwmon_temp_crit_hyst:
+ return (channel == 0) ? S_IRUGO | S_IWUSR : S_IRUGO;
+ default:
+ return 0;
+ }
}
-static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static umode_t lm95245_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct lm95245_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- unsigned long val;
-
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
-
- data->interval = lm95245_set_conversion_rate(client, val);
-
- mutex_unlock(&data->update_lock);
-
- return count;
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return S_IRUGO | S_IWUSR;
+ default:
+ return 0;
+ }
+ case hwmon_temp:
+ return lm95245_temp_is_visible(data, attr, channel);
+ default:
+ return 0;
+ }
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_limit,
- set_limit, 6);
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_crit_hyst,
- set_crit_hyst, 6);
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL,
- STATUS1_LOC);
-
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_limit,
- set_limit, 7);
-static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_crit_hyst, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL,
- STATUS1_RTCRIT);
-static SENSOR_DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type,
- set_type, 0);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL,
- STATUS1_DIODE_FAULT);
-
-static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
- set_interval);
-
-static struct attribute *lm95245_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_type.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &dev_attr_update_interval.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(lm95245);
-
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm95245_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
@@ -453,44 +487,130 @@ static int lm95245_detect(struct i2c_client *new_client,
return 0;
}
-static void lm95245_init_client(struct i2c_client *client,
- struct lm95245_data *data)
+static int lm95245_init_client(struct lm95245_data *data)
{
- data->interval = lm95245_read_conversion_rate(client);
-
- data->config1 = i2c_smbus_read_byte_data(client,
- LM95245_REG_RW_CONFIG1);
- data->config2 = i2c_smbus_read_byte_data(client,
- LM95245_REG_RW_CONFIG2);
-
- if (data->config1 & CFG_STOP) {
- /* Clear the standby bit */
- data->config1 &= ~CFG_STOP;
- i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONFIG1,
- data->config1);
+ int ret;
+
+ ret = lm95245_read_conversion_rate(data);
+ if (ret < 0)
+ return ret;
+
+ return regmap_update_bits(data->regmap, LM95245_REG_RW_CONFIG1,
+ CFG_STOP, 0);
+}
+
+static bool lm95245_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LM95245_REG_RW_CONFIG1:
+ case LM95245_REG_RW_CONVERS_RATE:
+ case LM95245_REG_W_ONE_SHOT:
+ case LM95245_REG_RW_CONFIG2:
+ case LM95245_REG_RW_REMOTE_OFFH:
+ case LM95245_REG_RW_REMOTE_OFFL:
+ case LM95245_REG_RW_REMOTE_OS_LIMIT:
+ case LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT:
+ case LM95245_REG_RW_REMOTE_TCRIT_LIMIT:
+ case LM95245_REG_RW_COMMON_HYSTERESIS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool lm95245_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LM95245_REG_R_STATUS1:
+ case LM95245_REG_R_STATUS2:
+ case LM95245_REG_R_LOCAL_TEMPH_S:
+ case LM95245_REG_R_LOCAL_TEMPL_S:
+ case LM95245_REG_R_REMOTE_TEMPH_S:
+ case LM95245_REG_R_REMOTE_TEMPL_S:
+ case LM95245_REG_R_REMOTE_TEMPH_U:
+ case LM95245_REG_R_REMOTE_TEMPL_U:
+ return true;
+ default:
+ return false;
}
}
+static const struct regmap_config lm95245_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = lm95245_is_writeable_reg,
+ .volatile_reg = lm95245_is_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_rw = true,
+};
+
+static const u32 lm95245_chip_config[] = {
+ HWMON_C_UPDATE_INTERVAL,
+ 0
+};
+
+static const struct hwmon_channel_info lm95245_chip = {
+ .type = hwmon_chip,
+ .config = lm95245_chip_config,
+};
+
+static const u32 lm95245_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_CRIT_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST | HWMON_T_FAULT | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_TYPE | HWMON_T_OFFSET,
+ 0
+};
+
+static const struct hwmon_channel_info lm95245_temp = {
+ .type = hwmon_temp,
+ .config = lm95245_temp_config,
+};
+
+static const struct hwmon_channel_info *lm95245_info[] = {
+ &lm95245_chip,
+ &lm95245_temp,
+ NULL
+};
+
+static const struct hwmon_ops lm95245_hwmon_ops = {
+ .is_visible = lm95245_is_visible,
+ .read = lm95245_read,
+ .write = lm95245_write,
+};
+
+static const struct hwmon_chip_info lm95245_chip_info = {
+ .ops = &lm95245_hwmon_ops,
+ .info = lm95245_info,
+};
+
static int lm95245_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct lm95245_data *data;
struct device *hwmon_dev;
+ int ret;
data = devm_kzalloc(dev, sizeof(struct lm95245_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->client = client;
+ data->regmap = devm_regmap_init_i2c(client, &lm95245_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
mutex_init(&data->update_lock);
/* Initialize the LM95245 chip */
- lm95245_init_client(client, data);
-
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data,
- lm95245_groups);
+ ret = lm95245_init_client(data);
+ if (ret < 0)
+ return ret;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &lm95245_chip_info,
+ NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index c86a18402496..8445c9fd946b 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -52,6 +53,7 @@ struct ltc4151_data {
struct mutex update_lock;
bool valid;
unsigned long last_updated; /* in jiffies */
+ unsigned int shunt; /* in micro ohms */
/* Registers */
u8 regs[6];
@@ -111,9 +113,9 @@ static int ltc4151_get_value(struct ltc4151_data *data, u8 reg)
case LTC4151_SENSE_H:
/*
* 20uV resolution. Convert to current as measured with
- * an 1 mOhm sense resistor, in mA.
+ * a given sense resistor, in mA.
*/
- val = val * 20;
+ val = val * 20 * 1000 / data->shunt;
break;
case LTC4151_VIN_H:
/* 25 mV per increment */
@@ -176,6 +178,7 @@ static int ltc4151_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct ltc4151_data *data;
struct device *hwmon_dev;
+ u32 shunt;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -184,6 +187,15 @@ static int ltc4151_probe(struct i2c_client *client,
if (!data)
return -ENOMEM;
+ if (of_property_read_u32(client->dev.of_node,
+ "shunt-resistor-micro-ohms", &shunt))
+ shunt = 1000; /* 1 mOhm if not set via DT */
+
+ if (shunt == 0)
+ return -EINVAL;
+
+ data->shunt = shunt;
+
data->client = client;
mutex_init(&data->update_lock);
@@ -199,10 +211,16 @@ static const struct i2c_device_id ltc4151_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ltc4151_id);
+static const struct of_device_id ltc4151_match[] = {
+ { .compatible = "lltc,ltc4151" },
+ {},
+};
+
/* This is the driver that will be inserted */
static struct i2c_driver ltc4151_driver = {
.driver = {
.name = "ltc4151",
+ .of_match_table = of_match_ptr(ltc4151_match),
},
.probe = ltc4151_probe,
.id_table = ltc4151_id,
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 681b5b7b3c3b..4680d89556ce 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -53,8 +54,6 @@ enum ltc4245_cmd {
struct ltc4245_data {
struct i2c_client *client;
- const struct attribute_group *groups[3];
-
struct mutex update_lock;
bool valid;
unsigned long last_updated; /* in jiffies */
@@ -162,7 +161,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
ltc4245_update_gpios(dev);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
@@ -256,213 +255,204 @@ static unsigned int ltc4245_get_current(struct device *dev, u8 reg)
return curr;
}
-static ssize_t ltc4245_show_voltage(struct device *dev,
- struct device_attribute *da,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- const int voltage = ltc4245_get_voltage(dev, attr->index);
+/* Map from voltage channel index to voltage register */
- return snprintf(buf, PAGE_SIZE, "%d\n", voltage);
-}
+static const s8 ltc4245_in_regs[] = {
+ LTC4245_12VIN, LTC4245_5VIN, LTC4245_3VIN, LTC4245_VEEIN,
+ LTC4245_12VOUT, LTC4245_5VOUT, LTC4245_3VOUT, LTC4245_VEEOUT,
+};
+
+/* Map from current channel index to current register */
-static ssize_t ltc4245_show_current(struct device *dev,
- struct device_attribute *da,
- char *buf)
+static const s8 ltc4245_curr_regs[] = {
+ LTC4245_12VSENSE, LTC4245_5VSENSE, LTC4245_3VSENSE, LTC4245_VEESENSE,
+};
+
+static int ltc4245_read_curr(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- const unsigned int curr = ltc4245_get_current(dev, attr->index);
+ struct ltc4245_data *data = ltc4245_update_device(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", curr);
+ switch (attr) {
+ case hwmon_curr_input:
+ *val = ltc4245_get_current(dev, ltc4245_curr_regs[channel]);
+ return 0;
+ case hwmon_curr_max_alarm:
+ *val = !!(data->cregs[LTC4245_FAULT1] & BIT(channel + 4));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t ltc4245_show_power(struct device *dev,
- struct device_attribute *da,
- char *buf)
+static int ltc4245_read_in(struct device *dev, u32 attr, int channel, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- const unsigned int curr = ltc4245_get_current(dev, attr->index);
- const int output_voltage = ltc4245_get_voltage(dev, attr->index+1);
+ struct ltc4245_data *data = ltc4245_update_device(dev);
- /* current in mA * voltage in mV == power in uW */
- const unsigned int power = abs(output_voltage * curr);
+ switch (attr) {
+ case hwmon_in_input:
+ if (channel < 8) {
+ *val = ltc4245_get_voltage(dev,
+ ltc4245_in_regs[channel]);
+ } else {
+ int regval = data->gpios[channel - 8];
+
+ if (regval < 0)
+ return regval;
+ *val = regval * 10;
+ }
+ return 0;
+ case hwmon_in_min_alarm:
+ if (channel < 4)
+ *val = !!(data->cregs[LTC4245_FAULT1] & BIT(channel));
+ else
+ *val = !!(data->cregs[LTC4245_FAULT2] &
+ BIT(channel - 4));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
- return snprintf(buf, PAGE_SIZE, "%u\n", power);
+static int ltc4245_read_power(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ unsigned long curr;
+ long voltage;
+
+ switch (attr) {
+ case hwmon_power_input:
+ (void)ltc4245_update_device(dev);
+ curr = ltc4245_get_current(dev, ltc4245_curr_regs[channel]);
+ voltage = ltc4245_get_voltage(dev, ltc4245_in_regs[channel]);
+ *val = abs(curr * voltage);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t ltc4245_show_alarm(struct device *dev,
- struct device_attribute *da,
- char *buf)
+static int ltc4245_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
- struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
- struct ltc4245_data *data = ltc4245_update_device(dev);
- const u8 reg = data->cregs[attr->index];
- const u32 mask = attr->nr;
- return snprintf(buf, PAGE_SIZE, "%u\n", (reg & mask) ? 1 : 0);
+ switch (type) {
+ case hwmon_curr:
+ return ltc4245_read_curr(dev, attr, channel, val);
+ case hwmon_power:
+ return ltc4245_read_power(dev, attr, channel, val);
+ case hwmon_in:
+ return ltc4245_read_in(dev, attr, channel - 1, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t ltc4245_show_gpio(struct device *dev,
- struct device_attribute *da,
- char *buf)
+static umode_t ltc4245_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct ltc4245_data *data = ltc4245_update_device(dev);
- int val = data->gpios[attr->index];
+ const struct ltc4245_data *data = _data;
+
+ switch (type) {
+ case hwmon_in:
+ if (channel == 0)
+ return 0;
+ switch (attr) {
+ case hwmon_in_input:
+ if (channel > 9 && !data->use_extra_gpios)
+ return 0;
+ return S_IRUGO;
+ case hwmon_in_min_alarm:
+ if (channel > 8)
+ return 0;
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_max_alarm:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
- /* handle stale GPIO's */
- if (val < 0)
- return val;
+static const u32 ltc4245_in_config[] = {
+ HWMON_I_INPUT, /* dummy, skipped in is_visible */
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ 0
+};
- /* Convert to millivolts and print */
- return snprintf(buf, PAGE_SIZE, "%u\n", val * 10);
-}
+static const struct hwmon_channel_info ltc4245_in = {
+ .type = hwmon_in,
+ .config = ltc4245_in_config,
+};
-/* Construct a sensor_device_attribute structure for each register */
-
-/* Input voltages */
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc4245_show_voltage, NULL,
- LTC4245_12VIN);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc4245_show_voltage, NULL,
- LTC4245_5VIN);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, ltc4245_show_voltage, NULL,
- LTC4245_3VIN);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, ltc4245_show_voltage, NULL,
- LTC4245_VEEIN);
-
-/* Input undervoltage alarms */
-static SENSOR_DEVICE_ATTR_2(in1_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 0, LTC4245_FAULT1);
-static SENSOR_DEVICE_ATTR_2(in2_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 1, LTC4245_FAULT1);
-static SENSOR_DEVICE_ATTR_2(in3_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 2, LTC4245_FAULT1);
-static SENSOR_DEVICE_ATTR_2(in4_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 3, LTC4245_FAULT1);
-
-/* Currents (via sense resistor) */
-static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4245_show_current, NULL,
- LTC4245_12VSENSE);
-static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, ltc4245_show_current, NULL,
- LTC4245_5VSENSE);
-static SENSOR_DEVICE_ATTR(curr3_input, S_IRUGO, ltc4245_show_current, NULL,
- LTC4245_3VSENSE);
-static SENSOR_DEVICE_ATTR(curr4_input, S_IRUGO, ltc4245_show_current, NULL,
- LTC4245_VEESENSE);
-
-/* Overcurrent alarms */
-static SENSOR_DEVICE_ATTR_2(curr1_max_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 4, LTC4245_FAULT1);
-static SENSOR_DEVICE_ATTR_2(curr2_max_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 5, LTC4245_FAULT1);
-static SENSOR_DEVICE_ATTR_2(curr3_max_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 6, LTC4245_FAULT1);
-static SENSOR_DEVICE_ATTR_2(curr4_max_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 7, LTC4245_FAULT1);
-
-/* Output voltages */
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, ltc4245_show_voltage, NULL,
- LTC4245_12VOUT);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, ltc4245_show_voltage, NULL,
- LTC4245_5VOUT);
-static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, ltc4245_show_voltage, NULL,
- LTC4245_3VOUT);
-static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, ltc4245_show_voltage, NULL,
- LTC4245_VEEOUT);
-
-/* Power Bad alarms */
-static SENSOR_DEVICE_ATTR_2(in5_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 0, LTC4245_FAULT2);
-static SENSOR_DEVICE_ATTR_2(in6_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 1, LTC4245_FAULT2);
-static SENSOR_DEVICE_ATTR_2(in7_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 2, LTC4245_FAULT2);
-static SENSOR_DEVICE_ATTR_2(in8_min_alarm, S_IRUGO, ltc4245_show_alarm, NULL,
- 1 << 3, LTC4245_FAULT2);
-
-/* GPIO voltages */
-static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, ltc4245_show_gpio, NULL, 0);
-static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, ltc4245_show_gpio, NULL, 1);
-static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, ltc4245_show_gpio, NULL, 2);
-
-/* Power Consumption (virtual) */
-static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ltc4245_show_power, NULL,
- LTC4245_12VSENSE);
-static SENSOR_DEVICE_ATTR(power2_input, S_IRUGO, ltc4245_show_power, NULL,
- LTC4245_5VSENSE);
-static SENSOR_DEVICE_ATTR(power3_input, S_IRUGO, ltc4245_show_power, NULL,
- LTC4245_3VSENSE);
-static SENSOR_DEVICE_ATTR(power4_input, S_IRUGO, ltc4245_show_power, NULL,
- LTC4245_VEESENSE);
+static const u32 ltc4245_curr_config[] = {
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ 0
+};
-/*
- * Finally, construct an array of pointers to members of the above objects,
- * as required for sysfs_create_group()
- */
-static struct attribute *ltc4245_std_attributes[] = {
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr,
-
- &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_in2_min_alarm.dev_attr.attr,
- &sensor_dev_attr_in3_min_alarm.dev_attr.attr,
- &sensor_dev_attr_in4_min_alarm.dev_attr.attr,
-
- &sensor_dev_attr_curr1_input.dev_attr.attr,
- &sensor_dev_attr_curr2_input.dev_attr.attr,
- &sensor_dev_attr_curr3_input.dev_attr.attr,
- &sensor_dev_attr_curr4_input.dev_attr.attr,
-
- &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_curr2_max_alarm.dev_attr.attr,
- &sensor_dev_attr_curr3_max_alarm.dev_attr.attr,
- &sensor_dev_attr_curr4_max_alarm.dev_attr.attr,
-
- &sensor_dev_attr_in5_input.dev_attr.attr,
- &sensor_dev_attr_in6_input.dev_attr.attr,
- &sensor_dev_attr_in7_input.dev_attr.attr,
- &sensor_dev_attr_in8_input.dev_attr.attr,
-
- &sensor_dev_attr_in5_min_alarm.dev_attr.attr,
- &sensor_dev_attr_in6_min_alarm.dev_attr.attr,
- &sensor_dev_attr_in7_min_alarm.dev_attr.attr,
- &sensor_dev_attr_in8_min_alarm.dev_attr.attr,
-
- &sensor_dev_attr_in9_input.dev_attr.attr,
-
- &sensor_dev_attr_power1_input.dev_attr.attr,
- &sensor_dev_attr_power2_input.dev_attr.attr,
- &sensor_dev_attr_power3_input.dev_attr.attr,
- &sensor_dev_attr_power4_input.dev_attr.attr,
-
- NULL,
+static const struct hwmon_channel_info ltc4245_curr = {
+ .type = hwmon_curr,
+ .config = ltc4245_curr_config,
};
-static struct attribute *ltc4245_gpio_attributes[] = {
- &sensor_dev_attr_in10_input.dev_attr.attr,
- &sensor_dev_attr_in11_input.dev_attr.attr,
- NULL,
+static const u32 ltc4245_power_config[] = {
+ HWMON_P_INPUT,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT,
+ 0
};
-static const struct attribute_group ltc4245_std_group = {
- .attrs = ltc4245_std_attributes,
+static const struct hwmon_channel_info ltc4245_power = {
+ .type = hwmon_power,
+ .config = ltc4245_power_config,
};
-static const struct attribute_group ltc4245_gpio_group = {
- .attrs = ltc4245_gpio_attributes,
+static const struct hwmon_channel_info *ltc4245_info[] = {
+ &ltc4245_in,
+ &ltc4245_curr,
+ &ltc4245_power,
+ NULL
};
-static void ltc4245_sysfs_add_groups(struct ltc4245_data *data)
-{
- /* standard sysfs attributes */
- data->groups[0] = &ltc4245_std_group;
+static const struct hwmon_ops ltc4245_hwmon_ops = {
+ .is_visible = ltc4245_is_visible,
+ .read = ltc4245_read,
+};
- /* if we're using the extra gpio support, register it's attributes */
- if (data->use_extra_gpios)
- data->groups[1] = &ltc4245_gpio_group;
-}
+static const struct hwmon_chip_info ltc4245_chip_info = {
+ .ops = &ltc4245_hwmon_ops,
+ .info = ltc4245_info,
+};
static bool ltc4245_use_extra_gpios(struct i2c_client *client)
{
@@ -502,12 +492,10 @@ static int ltc4245_probe(struct i2c_client *client,
i2c_smbus_write_byte_data(client, LTC4245_FAULT1, 0x00);
i2c_smbus_write_byte_data(client, LTC4245_FAULT2, 0x00);
- /* Add sysfs hooks */
- ltc4245_sysfs_add_groups(data);
-
- hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
- client->name, data,
- data->groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(&client->dev,
+ client->name, data,
+ &ltc4245_chip_info,
+ NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 69c0ac80a946..bef84e085973 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -17,7 +17,6 @@
#include <linux/err.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/jiffies.h>
@@ -169,362 +168,288 @@ static u8 bits_for_tach_period(int rpm)
return bits;
}
-static ssize_t get_fan(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int max31790_read_fan(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max31790_data *data = max31790_update_device(dev);
int sr, rpm;
if (IS_ERR(data))
return PTR_ERR(data);
- sr = get_tach_period(data->fan_dynamics[attr->index]);
- rpm = RPM_FROM_REG(data->tach[attr->index], sr);
-
- return sprintf(buf, "%d\n", rpm);
-}
-
-static ssize_t get_fan_target(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct max31790_data *data = max31790_update_device(dev);
- int sr, rpm;
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- sr = get_tach_period(data->fan_dynamics[attr->index]);
- rpm = RPM_FROM_REG(data->target_count[attr->index], sr);
-
- return sprintf(buf, "%d\n", rpm);
+ switch (attr) {
+ case hwmon_fan_input:
+ sr = get_tach_period(data->fan_dynamics[channel]);
+ rpm = RPM_FROM_REG(data->tach[channel], sr);
+ *val = rpm;
+ return 0;
+ case hwmon_fan_target:
+ sr = get_tach_period(data->fan_dynamics[channel]);
+ rpm = RPM_FROM_REG(data->target_count[channel], sr);
+ *val = rpm;
+ return 0;
+ case hwmon_fan_fault:
+ *val = !!(data->fault_status & (1 << channel));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t set_fan_target(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static int max31790_write_fan(struct device *dev, u32 attr, int channel,
+ long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max31790_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
+ int target_count;
+ int err = 0;
u8 bits;
int sr;
- int target_count;
- unsigned long rpm;
- int err;
-
- err = kstrtoul(buf, 10, &rpm);
- if (err)
- return err;
mutex_lock(&data->update_lock);
- rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
- bits = bits_for_tach_period(rpm);
- data->fan_dynamics[attr->index] =
- ((data->fan_dynamics[attr->index]
- & ~MAX31790_FAN_DYN_SR_MASK)
- | (bits << MAX31790_FAN_DYN_SR_SHIFT));
- err = i2c_smbus_write_byte_data(client,
- MAX31790_REG_FAN_DYNAMICS(attr->index),
- data->fan_dynamics[attr->index]);
-
- if (err < 0) {
- mutex_unlock(&data->update_lock);
- return err;
+ switch (attr) {
+ case hwmon_fan_target:
+ val = clamp_val(val, FAN_RPM_MIN, FAN_RPM_MAX);
+ bits = bits_for_tach_period(val);
+ data->fan_dynamics[channel] =
+ ((data->fan_dynamics[channel] &
+ ~MAX31790_FAN_DYN_SR_MASK) |
+ (bits << MAX31790_FAN_DYN_SR_SHIFT));
+ err = i2c_smbus_write_byte_data(client,
+ MAX31790_REG_FAN_DYNAMICS(channel),
+ data->fan_dynamics[channel]);
+ if (err < 0)
+ break;
+
+ sr = get_tach_period(data->fan_dynamics[channel]);
+ target_count = RPM_TO_REG(val, sr);
+ target_count = clamp_val(target_count, 0x1, 0x7FF);
+
+ data->target_count[channel] = target_count << 5;
+
+ err = i2c_smbus_write_word_swapped(client,
+ MAX31790_REG_TARGET_COUNT(channel),
+ data->target_count[channel]);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
}
- sr = get_tach_period(data->fan_dynamics[attr->index]);
- target_count = RPM_TO_REG(rpm, sr);
- target_count = clamp_val(target_count, 0x1, 0x7FF);
-
- data->target_count[attr->index] = target_count << 5;
-
- err = i2c_smbus_write_word_swapped(client,
- MAX31790_REG_TARGET_COUNT(attr->index),
- data->target_count[attr->index]);
-
mutex_unlock(&data->update_lock);
- if (err < 0)
- return err;
+ return err;
+}
- return count;
+static umode_t max31790_fan_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct max31790_data *data = _data;
+ u8 fan_config = data->fan_config[channel % NR_CHANNEL];
+
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_fault:
+ if (channel < NR_CHANNEL ||
+ (fan_config & MAX31790_FAN_CFG_TACH_INPUT))
+ return S_IRUGO;
+ return 0;
+ case hwmon_fan_target:
+ if (channel < NR_CHANNEL &&
+ !(fan_config & MAX31790_FAN_CFG_TACH_INPUT))
+ return S_IRUGO | S_IWUSR;
+ return 0;
+ default:
+ return 0;
+ }
}
-static ssize_t get_pwm(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max31790_data *data = max31790_update_device(dev);
- int pwm;
+ u8 fan_config = data->fan_config[channel];
if (IS_ERR(data))
return PTR_ERR(data);
- pwm = data->pwm[attr->index] >> 8;
-
- return sprintf(buf, "%d\n", pwm);
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = data->pwm[channel] >> 8;
+ return 0;
+ case hwmon_pwm_enable:
+ if (fan_config & MAX31790_FAN_CFG_RPM_MODE)
+ *val = 2;
+ else if (fan_config & MAX31790_FAN_CFG_TACH_INPUT_EN)
+ *val = 1;
+ else
+ *val = 0;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t set_pwm(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
+ long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max31790_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- unsigned long pwm;
- int err;
-
- err = kstrtoul(buf, 10, &pwm);
- if (err)
- return err;
-
- if (pwm > 255)
- return -EINVAL;
+ u8 fan_config;
+ int err = 0;
mutex_lock(&data->update_lock);
- data->pwm[attr->index] = pwm << 8;
- err = i2c_smbus_write_word_swapped(client,
- MAX31790_REG_PWMOUT(attr->index),
- data->pwm[attr->index]);
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255) {
+ err = -EINVAL;
+ break;
+ }
+ data->pwm[channel] = val << 8;
+ err = i2c_smbus_write_word_swapped(client,
+ MAX31790_REG_PWMOUT(channel),
+ val);
+ break;
+ case hwmon_pwm_enable:
+ fan_config = data->fan_config[channel];
+ if (val == 0) {
+ fan_config &= ~(MAX31790_FAN_CFG_TACH_INPUT_EN |
+ MAX31790_FAN_CFG_RPM_MODE);
+ } else if (val == 1) {
+ fan_config = (fan_config |
+ MAX31790_FAN_CFG_TACH_INPUT_EN) &
+ ~MAX31790_FAN_CFG_RPM_MODE;
+ } else if (val == 2) {
+ fan_config |= MAX31790_FAN_CFG_TACH_INPUT_EN |
+ MAX31790_FAN_CFG_RPM_MODE;
+ } else {
+ err = -EINVAL;
+ break;
+ }
+ data->fan_config[channel] = fan_config;
+ err = i2c_smbus_write_byte_data(client,
+ MAX31790_REG_FAN_CONFIG(channel),
+ fan_config);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
mutex_unlock(&data->update_lock);
- if (err < 0)
- return err;
-
- return count;
+ return err;
}
-static ssize_t get_pwm_enable(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static umode_t max31790_pwm_is_visible(const void *_data, u32 attr, int channel)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct max31790_data *data = max31790_update_device(dev);
- int mode;
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- if (data->fan_config[attr->index] & MAX31790_FAN_CFG_RPM_MODE)
- mode = 2;
- else if (data->fan_config[attr->index] & MAX31790_FAN_CFG_TACH_INPUT_EN)
- mode = 1;
- else
- mode = 0;
-
- return sprintf(buf, "%d\n", mode);
+ const struct max31790_data *data = _data;
+ u8 fan_config = data->fan_config[channel];
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ case hwmon_pwm_enable:
+ if (!(fan_config & MAX31790_FAN_CFG_TACH_INPUT))
+ return S_IRUGO | S_IWUSR;
+ return 0;
+ default:
+ return 0;
+ }
}
-static ssize_t set_pwm_enable(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static int max31790_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct max31790_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- unsigned long mode;
- int err;
-
- err = kstrtoul(buf, 10, &mode);
- if (err)
- return err;
-
- switch (mode) {
- case 0:
- data->fan_config[attr->index] =
- data->fan_config[attr->index]
- & ~(MAX31790_FAN_CFG_TACH_INPUT_EN
- | MAX31790_FAN_CFG_RPM_MODE);
- break;
- case 1:
- data->fan_config[attr->index] =
- (data->fan_config[attr->index]
- | MAX31790_FAN_CFG_TACH_INPUT_EN)
- & ~MAX31790_FAN_CFG_RPM_MODE;
- break;
- case 2:
- data->fan_config[attr->index] =
- data->fan_config[attr->index]
- | MAX31790_FAN_CFG_TACH_INPUT_EN
- | MAX31790_FAN_CFG_RPM_MODE;
- break;
+ switch (type) {
+ case hwmon_fan:
+ return max31790_read_fan(dev, attr, channel, val);
+ case hwmon_pwm:
+ return max31790_read_pwm(dev, attr, channel, val);
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
-
- mutex_lock(&data->update_lock);
-
- err = i2c_smbus_write_byte_data(client,
- MAX31790_REG_FAN_CONFIG(attr->index),
- data->fan_config[attr->index]);
-
- mutex_unlock(&data->update_lock);
-
- if (err < 0)
- return err;
-
- return count;
}
-static ssize_t get_fan_fault(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int max31790_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct max31790_data *data = max31790_update_device(dev);
- int fault;
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- fault = !!(data->fault_status & (1 << attr->index));
+ switch (type) {
+ case hwmon_fan:
+ return max31790_write_fan(dev, attr, channel, val);
+ case hwmon_pwm:
+ return max31790_write_pwm(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
- return sprintf(buf, "%d\n", fault);
+static umode_t max31790_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ return max31790_fan_is_visible(data, attr, channel);
+ case hwmon_pwm:
+ return max31790_pwm_is_visible(data, attr, channel);
+ default:
+ return 0;
+ }
}
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3);
-static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, get_fan, NULL, 4);
-static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, get_fan, NULL, 5);
-
-static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_fan_fault, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, get_fan_fault, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_fault, S_IRUGO, get_fan_fault, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_fault, S_IRUGO, get_fan_fault, NULL, 3);
-static SENSOR_DEVICE_ATTR(fan5_fault, S_IRUGO, get_fan_fault, NULL, 4);
-static SENSOR_DEVICE_ATTR(fan6_fault, S_IRUGO, get_fan_fault, NULL, 5);
-
-static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, get_fan, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, get_fan, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan9_input, S_IRUGO, get_fan, NULL, 8);
-static SENSOR_DEVICE_ATTR(fan10_input, S_IRUGO, get_fan, NULL, 9);
-static SENSOR_DEVICE_ATTR(fan11_input, S_IRUGO, get_fan, NULL, 10);
-static SENSOR_DEVICE_ATTR(fan12_input, S_IRUGO, get_fan, NULL, 11);
-
-static SENSOR_DEVICE_ATTR(fan7_fault, S_IRUGO, get_fan_fault, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan8_fault, S_IRUGO, get_fan_fault, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan9_fault, S_IRUGO, get_fan_fault, NULL, 8);
-static SENSOR_DEVICE_ATTR(fan10_fault, S_IRUGO, get_fan_fault, NULL, 9);
-static SENSOR_DEVICE_ATTR(fan11_fault, S_IRUGO, get_fan_fault, NULL, 10);
-static SENSOR_DEVICE_ATTR(fan12_fault, S_IRUGO, get_fan_fault, NULL, 11);
-
-static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO,
- get_fan_target, set_fan_target, 0);
-static SENSOR_DEVICE_ATTR(fan2_target, S_IWUSR | S_IRUGO,
- get_fan_target, set_fan_target, 1);
-static SENSOR_DEVICE_ATTR(fan3_target, S_IWUSR | S_IRUGO,
- get_fan_target, set_fan_target, 2);
-static SENSOR_DEVICE_ATTR(fan4_target, S_IWUSR | S_IRUGO,
- get_fan_target, set_fan_target, 3);
-static SENSOR_DEVICE_ATTR(fan5_target, S_IWUSR | S_IRUGO,
- get_fan_target, set_fan_target, 4);
-static SENSOR_DEVICE_ATTR(fan6_target, S_IWUSR | S_IRUGO,
- get_fan_target, set_fan_target, 5);
-
-static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 1);
-static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 2);
-static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 3);
-static SENSOR_DEVICE_ATTR(pwm5, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 4);
-static SENSOR_DEVICE_ATTR(pwm6, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 5);
-
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
- get_pwm_enable, set_pwm_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
- get_pwm_enable, set_pwm_enable, 1);
-static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO,
- get_pwm_enable, set_pwm_enable, 2);
-static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO,
- get_pwm_enable, set_pwm_enable, 3);
-static SENSOR_DEVICE_ATTR(pwm5_enable, S_IWUSR | S_IRUGO,
- get_pwm_enable, set_pwm_enable, 4);
-static SENSOR_DEVICE_ATTR(pwm6_enable, S_IWUSR | S_IRUGO,
- get_pwm_enable, set_pwm_enable, 5);
-
-static struct attribute *max31790_attrs[] = {
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan3_input.dev_attr.attr,
- &sensor_dev_attr_fan4_input.dev_attr.attr,
- &sensor_dev_attr_fan5_input.dev_attr.attr,
- &sensor_dev_attr_fan6_input.dev_attr.attr,
-
- &sensor_dev_attr_fan1_fault.dev_attr.attr,
- &sensor_dev_attr_fan2_fault.dev_attr.attr,
- &sensor_dev_attr_fan3_fault.dev_attr.attr,
- &sensor_dev_attr_fan4_fault.dev_attr.attr,
- &sensor_dev_attr_fan5_fault.dev_attr.attr,
- &sensor_dev_attr_fan6_fault.dev_attr.attr,
-
- &sensor_dev_attr_fan7_input.dev_attr.attr,
- &sensor_dev_attr_fan8_input.dev_attr.attr,
- &sensor_dev_attr_fan9_input.dev_attr.attr,
- &sensor_dev_attr_fan10_input.dev_attr.attr,
- &sensor_dev_attr_fan11_input.dev_attr.attr,
- &sensor_dev_attr_fan12_input.dev_attr.attr,
-
- &sensor_dev_attr_fan7_fault.dev_attr.attr,
- &sensor_dev_attr_fan8_fault.dev_attr.attr,
- &sensor_dev_attr_fan9_fault.dev_attr.attr,
- &sensor_dev_attr_fan10_fault.dev_attr.attr,
- &sensor_dev_attr_fan11_fault.dev_attr.attr,
- &sensor_dev_attr_fan12_fault.dev_attr.attr,
-
- &sensor_dev_attr_fan1_target.dev_attr.attr,
- &sensor_dev_attr_fan2_target.dev_attr.attr,
- &sensor_dev_attr_fan3_target.dev_attr.attr,
- &sensor_dev_attr_fan4_target.dev_attr.attr,
- &sensor_dev_attr_fan5_target.dev_attr.attr,
- &sensor_dev_attr_fan6_target.dev_attr.attr,
-
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_pwm2.dev_attr.attr,
- &sensor_dev_attr_pwm3.dev_attr.attr,
- &sensor_dev_attr_pwm4.dev_attr.attr,
- &sensor_dev_attr_pwm5.dev_attr.attr,
- &sensor_dev_attr_pwm6.dev_attr.attr,
-
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm2_enable.dev_attr.attr,
- &sensor_dev_attr_pwm3_enable.dev_attr.attr,
- &sensor_dev_attr_pwm4_enable.dev_attr.attr,
- &sensor_dev_attr_pwm5_enable.dev_attr.attr,
- &sensor_dev_attr_pwm6_enable.dev_attr.attr,
- NULL
+static const u32 max31790_fan_config[] = {
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ 0
};
-static umode_t max31790_attrs_visible(struct kobject *kobj,
- struct attribute *a, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct max31790_data *data = dev_get_drvdata(dev);
- struct device_attribute *devattr =
- container_of(a, struct device_attribute, attr);
- int index = to_sensor_dev_attr(devattr)->index % NR_CHANNEL;
- u8 fan_config;
+static const struct hwmon_channel_info max31790_fan = {
+ .type = hwmon_fan,
+ .config = max31790_fan_config,
+};
- fan_config = data->fan_config[index];
+static const u32 max31790_pwm_config[] = {
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ 0
+};
- if (n >= NR_CHANNEL * 2 && n < NR_CHANNEL * 4 &&
- !(fan_config & MAX31790_FAN_CFG_TACH_INPUT))
- return 0;
- if (n >= NR_CHANNEL * 4 && (fan_config & MAX31790_FAN_CFG_TACH_INPUT))
- return 0;
+static const struct hwmon_channel_info max31790_pwm = {
+ .type = hwmon_pwm,
+ .config = max31790_pwm_config,
+};
- return a->mode;
-}
+static const struct hwmon_channel_info *max31790_info[] = {
+ &max31790_fan,
+ &max31790_pwm,
+ NULL
+};
+
+static const struct hwmon_ops max31790_hwmon_ops = {
+ .is_visible = max31790_is_visible,
+ .read = max31790_read,
+ .write = max31790_write,
+};
-static const struct attribute_group max31790_group = {
- .attrs = max31790_attrs,
- .is_visible = max31790_attrs_visible,
+static const struct hwmon_chip_info max31790_chip_info = {
+ .ops = &max31790_hwmon_ops,
+ .info = max31790_info,
};
-__ATTRIBUTE_GROUPS(max31790);
static int max31790_init_client(struct i2c_client *client,
struct max31790_data *data)
@@ -575,8 +500,10 @@ static int max31790_probe(struct i2c_client *client,
if (err)
return err;
- hwmon_dev = devm_hwmon_device_register_with_groups(dev,
- client->name, data, max31790_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &max31790_chip_info,
+ NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 162a520f4bd6..a993b44ed538 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -39,6 +39,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
+#include <linux/of_device.h>
/*
* Insmod parameters
@@ -48,7 +49,7 @@
static int fan_voltage;
/* prescaler: Possible values are 1, 2, 4, 8, 16 or 0 for don't change */
static int prescaler;
-/* clock: The clock frequency of the chip the driver should assume */
+/* clock: The clock frequency of the chip (max6651 can be clocked externally) */
static int clock = 254000;
module_param(fan_voltage, int, S_IRUGO);
@@ -133,6 +134,19 @@ static const u8 tach_reg[] = {
MAX6650_REG_TACH3,
};
+static const struct of_device_id max6650_dt_match[] = {
+ {
+ .compatible = "maxim,max6650",
+ .data = (void *)1
+ },
+ {
+ .compatible = "maxim,max6651",
+ .data = (void *)4
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, max6650_dt_match);
+
static struct max6650_data *max6650_update_device(struct device *dev)
{
struct max6650_data *data = dev_get_drvdata(dev);
@@ -171,6 +185,30 @@ static struct max6650_data *max6650_update_device(struct device *dev)
return data;
}
+/*
+ * Change the operating mode of the chip (if needed).
+ * mode is one of the MAX6650_CFG_MODE_* values.
+ */
+static int max6650_set_operating_mode(struct max6650_data *data, u8 mode)
+{
+ int result;
+ u8 config = data->config;
+
+ if (mode == (config & MAX6650_CFG_MODE_MASK))
+ return 0;
+
+ config = (config & ~MAX6650_CFG_MODE_MASK) | mode;
+
+ result = i2c_smbus_write_byte_data(data->client, MAX6650_REG_CONFIG,
+ config);
+ if (result < 0)
+ return result;
+
+ data->config = config;
+
+ return 0;
+}
+
static ssize_t get_fan(struct device *dev, struct device_attribute *devattr,
char *buf)
{
@@ -252,18 +290,12 @@ static ssize_t get_target(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%d\n", rpm);
}
-static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static int max6650_set_target(struct max6650_data *data, unsigned long rpm)
{
- struct max6650_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
int kscale, ktach;
- unsigned long rpm;
- int err;
- err = kstrtoul(buf, 10, &rpm);
- if (err)
- return err;
+ if (rpm == 0)
+ return max6650_set_operating_mode(data, MAX6650_CFG_MODE_OFF);
rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
@@ -274,8 +306,6 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
* KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1
*/
- mutex_lock(&data->update_lock);
-
kscale = DIV_FROM_REG(data->config);
ktach = ((clock * kscale) / (256 * rpm / 60)) - 1;
if (ktach < 0)
@@ -284,10 +314,30 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
ktach = 255;
data->speed = ktach;
- i2c_smbus_write_byte_data(client, MAX6650_REG_SPEED, data->speed);
+ return i2c_smbus_write_byte_data(data->client, MAX6650_REG_SPEED,
+ data->speed);
+}
+
+static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct max6650_data *data = dev_get_drvdata(dev);
+ unsigned long rpm;
+ int err;
+
+ err = kstrtoul(buf, 10, &rpm);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+
+ err = max6650_set_target(data, rpm);
mutex_unlock(&data->update_lock);
+ if (err < 0)
+ return err;
+
return count;
}
@@ -341,12 +391,11 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
data->dac = 180 - (180 * pwm)/255;
else
data->dac = 76 - (76 * pwm)/255;
-
- i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac);
+ err = i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac);
mutex_unlock(&data->update_lock);
- return count;
+ return err < 0 ? err : count;
}
/*
@@ -355,14 +404,14 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
* 0 = Fan always on
* 1 = Open loop, Voltage is set according to speed, not regulated.
* 2 = Closed loop, RPM for all fans regulated by fan1 tachometer
+ * 3 = Fan off
*/
-
static ssize_t get_enable(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
int mode = (data->config & MAX6650_CFG_MODE_MASK) >> 4;
- int sysfs_modes[4] = {0, 1, 2, 1};
+ int sysfs_modes[4] = {0, 3, 2, 1};
return sprintf(buf, "%d\n", sysfs_modes[mode]);
}
@@ -371,25 +420,25 @@ static ssize_t set_enable(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct max6650_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int max6650_modes[3] = {0, 3, 2};
unsigned long mode;
int err;
+ const u8 max6650_modes[] = {
+ MAX6650_CFG_MODE_ON,
+ MAX6650_CFG_MODE_OPEN_LOOP,
+ MAX6650_CFG_MODE_CLOSED_LOOP,
+ MAX6650_CFG_MODE_OFF,
+ };
err = kstrtoul(buf, 10, &mode);
if (err)
return err;
- if (mode > 2)
+ if (mode >= ARRAY_SIZE(max6650_modes))
return -EINVAL;
mutex_lock(&data->update_lock);
- data->config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
- data->config = (data->config & ~MAX6650_CFG_MODE_MASK)
- | (max6650_modes[mode] << 4);
-
- i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, data->config);
+ max6650_set_operating_mode(data, max6650_modes[mode]);
mutex_unlock(&data->update_lock);
@@ -566,6 +615,18 @@ static int max6650_init_client(struct max6650_data *data,
struct device *dev = &client->dev;
int config;
int err = -EIO;
+ u32 voltage;
+ u32 prescale;
+ u32 target_rpm;
+
+ if (of_property_read_u32(dev->of_node, "maxim,fan-microvolt",
+ &voltage))
+ voltage = fan_voltage;
+ else
+ voltage /= 1000000; /* Microvolts to volts */
+ if (of_property_read_u32(dev->of_node, "maxim,fan-prescale",
+ &prescale))
+ prescale = prescaler;
config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
@@ -574,7 +635,7 @@ static int max6650_init_client(struct max6650_data *data,
return err;
}
- switch (fan_voltage) {
+ switch (voltage) {
case 0:
break;
case 5:
@@ -584,14 +645,10 @@ static int max6650_init_client(struct max6650_data *data,
config |= MAX6650_CFG_V12;
break;
default:
- dev_err(dev, "illegal value for fan_voltage (%d)\n",
- fan_voltage);
+ dev_err(dev, "illegal value for fan_voltage (%d)\n", voltage);
}
- dev_info(dev, "Fan voltage is set to %dV.\n",
- (config & MAX6650_CFG_V12) ? 12 : 5);
-
- switch (prescaler) {
+ switch (prescale) {
case 0:
break;
case 1:
@@ -614,28 +671,13 @@ static int max6650_init_client(struct max6650_data *data,
| MAX6650_CFG_PRESCALER_16;
break;
default:
- dev_err(dev, "illegal value for prescaler (%d)\n", prescaler);
+ dev_err(dev, "illegal value for prescaler (%d)\n", prescale);
}
- dev_info(dev, "Prescaler is set to %d.\n",
+ dev_info(dev, "Fan voltage: %dV, prescaler: %d.\n",
+ (config & MAX6650_CFG_V12) ? 12 : 5,
1 << (config & MAX6650_CFG_PRESCALER_MASK));
- /*
- * If mode is set to "full off", we change it to "open loop" and
- * set DAC to 255, which has the same effect. We do this because
- * there's no "full off" mode defined in hwmon specifications.
- */
-
- if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) {
- dev_dbg(dev, "Change mode to open loop, full off.\n");
- config = (config & ~MAX6650_CFG_MODE_MASK)
- | MAX6650_CFG_MODE_OPEN_LOOP;
- if (i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, 255)) {
- dev_err(dev, "DAC write error, aborting.\n");
- return err;
- }
- }
-
if (i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, config)) {
dev_err(dev, "Config write error, aborting.\n");
return err;
@@ -644,6 +686,12 @@ static int max6650_init_client(struct max6650_data *data,
data->config = config;
data->count = i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT);
+ if (!of_property_read_u32(client->dev.of_node, "maxim,fan-target-rpm",
+ &target_rpm)) {
+ max6650_set_target(data, target_rpm);
+ max6650_set_operating_mode(data, MAX6650_CFG_MODE_CLOSED_LOOP);
+ }
+
return 0;
}
@@ -651,6 +699,8 @@ static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
+ const struct of_device_id *of_id =
+ of_match_device(of_match_ptr(max6650_dt_match), dev);
struct max6650_data *data;
struct device *hwmon_dev;
int err;
@@ -661,7 +711,7 @@ static int max6650_probe(struct i2c_client *client,
data->client = client;
mutex_init(&data->update_lock);
- data->nr_fans = id->driver_data;
+ data->nr_fans = of_id ? (int)(uintptr_t)of_id->data : id->driver_data;
/*
* Initialize the max6650 chip
@@ -691,6 +741,7 @@ MODULE_DEVICE_TABLE(i2c, max6650_id);
static struct i2c_driver max6650_driver = {
.driver = {
.name = "max6650",
+ .of_match_table = of_match_ptr(max6650_dt_match),
},
.probe = max6650_probe,
.id_table = max6650_id,
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index d087a8e00cf5..ce75dd4db7eb 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -195,6 +195,8 @@ superio_exit(int ioreg)
#define NUM_FAN 6
+#define TEMP_SOURCE_VIRTUAL 0x1f
+
/* Common and NCT6775 specific data */
/* Voltage min/max registers for nr=7..14 are in bank 5 */
@@ -3940,7 +3942,7 @@ static int nct6775_probe(struct platform_device *pdev)
continue;
src = nct6775_read_value(data, data->REG_TEMP_SEL[i]) & 0x1f;
- if (!src || (mask & (1 << src)))
+ if (!src)
continue;
if (src >= data->temp_label_num ||
@@ -3952,7 +3954,16 @@ static int nct6775_probe(struct platform_device *pdev)
continue;
}
- mask |= 1 << src;
+ /*
+ * For virtual temperature sources, the 'virtual' temperature
+ * for each fan reflects a different temperature, and there
+ * are no duplicates.
+ */
+ if (src != TEMP_SOURCE_VIRTUAL) {
+ if (mask & (1 << src))
+ continue;
+ mask |= 1 << src;
+ }
/* Use fixed index for SYSTIN(1), CPUTIN(2), AUXTIN(3) */
if (src <= data->temp_fixed_num) {
@@ -4232,11 +4243,11 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
if (err)
return err;
- if (force_id)
+ val = (superio_inb(sioaddr, SIO_REG_DEVID) << 8) |
+ superio_inb(sioaddr, SIO_REG_DEVID + 1);
+ if (force_id && val != 0xffff)
val = force_id;
- else
- val = (superio_inb(sioaddr, SIO_REG_DEVID) << 8)
- | superio_inb(sioaddr, SIO_REG_DEVID + 1);
+
switch (val & SIO_ID_MASK) {
case SIO_NCT6106_ID:
sio_data->kind = nct6106;
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 08ff89d222e5..95a68ab175c7 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -21,7 +21,6 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#define VENDOR_ID_REG 0x7A /* Any bank */
#define NUVOTON_ID 0x50
@@ -153,341 +152,230 @@ static int nct7904_write_reg(struct nct7904_data *data,
return ret;
}
-/* FANIN ATTR */
-static ssize_t show_fan(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
+ long *val)
{
- int index = to_sensor_dev_attr(devattr)->index;
struct nct7904_data *data = dev_get_drvdata(dev);
+ unsigned int cnt, rpm;
int ret;
- unsigned cnt, rpm;
- ret = nct7904_read_reg16(data, BANK_0, FANIN1_HV_REG + index * 2);
- if (ret < 0)
- return ret;
- cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
- if (cnt == 0x1fff)
- rpm = 0;
- else
- rpm = 1350000 / cnt;
- return sprintf(buf, "%u\n", rpm);
+ switch(attr) {
+ case hwmon_fan_input:
+ ret = nct7904_read_reg16(data, BANK_0,
+ FANIN1_HV_REG + channel * 2);
+ if (ret < 0)
+ return ret;
+ cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
+ if (cnt == 0x1fff)
+ rpm = 0;
+ else
+ rpm = 1350000 / cnt;
+ *val = rpm;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static umode_t nct7904_fanin_is_visible(struct kobject *kobj,
- struct attribute *a, int n)
+static umode_t nct7904_fan_is_visible(const void *_data, u32 attr, int channel)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct nct7904_data *data = dev_get_drvdata(dev);
+ const struct nct7904_data *data = _data;
- if (data->fanin_mask & (1 << n))
- return a->mode;
+ if (attr == hwmon_fan_input && data->fanin_mask & (1 << channel))
+ return S_IRUGO;
return 0;
}
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3);
-static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4);
-static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan, NULL, 5);
-static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, show_fan, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan9_input, S_IRUGO, show_fan, NULL, 8);
-static SENSOR_DEVICE_ATTR(fan10_input, S_IRUGO, show_fan, NULL, 9);
-static SENSOR_DEVICE_ATTR(fan11_input, S_IRUGO, show_fan, NULL, 10);
-static SENSOR_DEVICE_ATTR(fan12_input, S_IRUGO, show_fan, NULL, 11);
-
-static struct attribute *nct7904_fanin_attrs[] = {
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan3_input.dev_attr.attr,
- &sensor_dev_attr_fan4_input.dev_attr.attr,
- &sensor_dev_attr_fan5_input.dev_attr.attr,
- &sensor_dev_attr_fan6_input.dev_attr.attr,
- &sensor_dev_attr_fan7_input.dev_attr.attr,
- &sensor_dev_attr_fan8_input.dev_attr.attr,
- &sensor_dev_attr_fan9_input.dev_attr.attr,
- &sensor_dev_attr_fan10_input.dev_attr.attr,
- &sensor_dev_attr_fan11_input.dev_attr.attr,
- &sensor_dev_attr_fan12_input.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group nct7904_fanin_group = {
- .attrs = nct7904_fanin_attrs,
- .is_visible = nct7904_fanin_is_visible,
+static u8 nct7904_chan_to_index[] = {
+ 0, /* Not used */
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 18, 19, 20, 16
};
-/* VSEN ATTR */
-static ssize_t show_voltage(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int nct7904_read_in(struct device *dev, u32 attr, int channel,
+ long *val)
{
- int index = to_sensor_dev_attr(devattr)->index;
struct nct7904_data *data = dev_get_drvdata(dev);
- int ret;
- int volt;
+ int ret, volt, index;
- ret = nct7904_read_reg16(data, BANK_0, VSEN1_HV_REG + index * 2);
- if (ret < 0)
- return ret;
- volt = ((ret & 0xff00) >> 5) | (ret & 0x7);
- if (index < 14)
- volt *= 2; /* 0.002V scale */
- else
- volt *= 6; /* 0.006V scale */
+ index = nct7904_chan_to_index[channel];
- return sprintf(buf, "%d\n", volt);
+ switch(attr) {
+ case hwmon_in_input:
+ ret = nct7904_read_reg16(data, BANK_0,
+ VSEN1_HV_REG + index * 2);
+ if (ret < 0)
+ return ret;
+ volt = ((ret & 0xff00) >> 5) | (ret & 0x7);
+ if (index < 14)
+ volt *= 2; /* 0.002V scale */
+ else
+ volt *= 6; /* 0.006V scale */
+ *val = volt;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t show_ltemp(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static umode_t nct7904_in_is_visible(const void *_data, u32 attr, int channel)
{
- struct nct7904_data *data = dev_get_drvdata(dev);
- int ret;
- int temp;
+ const struct nct7904_data *data = _data;
+ int index = nct7904_chan_to_index[channel];
- ret = nct7904_read_reg16(data, BANK_0, LTD_HV_REG);
- if (ret < 0)
- return ret;
- temp = ((ret & 0xff00) >> 5) | (ret & 0x7);
- temp = sign_extend32(temp, 10) * 125;
-
- return sprintf(buf, "%d\n", temp);
-}
-
-static umode_t nct7904_vsen_is_visible(struct kobject *kobj,
- struct attribute *a, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct nct7904_data *data = dev_get_drvdata(dev);
+ if (channel > 0 && attr == hwmon_in_input &&
+ (data->vsen_mask & BIT(index)))
+ return S_IRUGO;
- if (data->vsen_mask & (1 << n))
- return a->mode;
return 0;
}
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 0);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 1);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 2);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 3);
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 4);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 5);
-static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 6);
-static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_voltage, NULL, 7);
-static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_voltage, NULL, 8);
-static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_voltage, NULL, 9);
-static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_voltage, NULL, 10);
-static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_voltage, NULL, 11);
-static SENSOR_DEVICE_ATTR(in13_input, S_IRUGO, show_voltage, NULL, 12);
-static SENSOR_DEVICE_ATTR(in14_input, S_IRUGO, show_voltage, NULL, 13);
-/*
- * Next 3 voltage sensors have specific names in the Nuvoton doc
- * (3VDD, VBAT, 3VSB) but we use vacant numbers for them.
- */
-static SENSOR_DEVICE_ATTR(in15_input, S_IRUGO, show_voltage, NULL, 14);
-static SENSOR_DEVICE_ATTR(in16_input, S_IRUGO, show_voltage, NULL, 15);
-static SENSOR_DEVICE_ATTR(in20_input, S_IRUGO, show_voltage, NULL, 16);
-/* This is not a voltage, but a local temperature sensor. */
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_ltemp, NULL, 0);
-static SENSOR_DEVICE_ATTR(in17_input, S_IRUGO, show_voltage, NULL, 18);
-static SENSOR_DEVICE_ATTR(in18_input, S_IRUGO, show_voltage, NULL, 19);
-static SENSOR_DEVICE_ATTR(in19_input, S_IRUGO, show_voltage, NULL, 20);
-
-static struct attribute *nct7904_vsen_attrs[] = {
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr,
- &sensor_dev_attr_in5_input.dev_attr.attr,
- &sensor_dev_attr_in6_input.dev_attr.attr,
- &sensor_dev_attr_in7_input.dev_attr.attr,
- &sensor_dev_attr_in8_input.dev_attr.attr,
- &sensor_dev_attr_in9_input.dev_attr.attr,
- &sensor_dev_attr_in10_input.dev_attr.attr,
- &sensor_dev_attr_in11_input.dev_attr.attr,
- &sensor_dev_attr_in12_input.dev_attr.attr,
- &sensor_dev_attr_in13_input.dev_attr.attr,
- &sensor_dev_attr_in14_input.dev_attr.attr,
- &sensor_dev_attr_in15_input.dev_attr.attr,
- &sensor_dev_attr_in16_input.dev_attr.attr,
- &sensor_dev_attr_in20_input.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_in17_input.dev_attr.attr,
- &sensor_dev_attr_in18_input.dev_attr.attr,
- &sensor_dev_attr_in19_input.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group nct7904_vsen_group = {
- .attrs = nct7904_vsen_attrs,
- .is_visible = nct7904_vsen_is_visible,
-};
-
-/* CPU_TEMP ATTR */
-static ssize_t show_tcpu(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
{
- int index = to_sensor_dev_attr(devattr)->index;
struct nct7904_data *data = dev_get_drvdata(dev);
- int ret;
- int temp;
-
- ret = nct7904_read_reg16(data, BANK_0, T_CPU1_HV_REG + index * 2);
- if (ret < 0)
- return ret;
-
- temp = ((ret & 0xff00) >> 5) | (ret & 0x7);
- temp = sign_extend32(temp, 10) * 125;
- return sprintf(buf, "%d\n", temp);
+ int ret, temp;
+
+ switch(attr) {
+ case hwmon_temp_input:
+ if (channel == 0)
+ ret = nct7904_read_reg16(data, BANK_0, LTD_HV_REG);
+ else
+ ret = nct7904_read_reg16(data, BANK_0,
+ T_CPU1_HV_REG + (channel - 1) * 2);
+ if (ret < 0)
+ return ret;
+ temp = ((ret & 0xff00) >> 5) | (ret & 0x7);
+ *val = sign_extend32(temp, 10) * 125;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static umode_t nct7904_tcpu_is_visible(struct kobject *kobj,
- struct attribute *a, int n)
+static umode_t nct7904_temp_is_visible(const void *_data, u32 attr, int channel)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct nct7904_data *data = dev_get_drvdata(dev);
+ const struct nct7904_data *data = _data;
+
+ if (attr == hwmon_temp_input) {
+ if (channel == 0) {
+ if (data->vsen_mask & BIT(17))
+ return S_IRUGO;
+ } else {
+ if (data->tcpu_mask & BIT(channel - 1))
+ return S_IRUGO;
+ }
+ }
- if (data->tcpu_mask & (1 << n))
- return a->mode;
return 0;
}
-/* "temp1_input" reserved for local temp */
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_tcpu, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_tcpu, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_tcpu, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_tcpu, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_tcpu, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO, show_tcpu, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, show_tcpu, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp9_input, S_IRUGO, show_tcpu, NULL, 7);
-
-static struct attribute *nct7904_tcpu_attrs[] = {
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp5_input.dev_attr.attr,
- &sensor_dev_attr_temp6_input.dev_attr.attr,
- &sensor_dev_attr_temp7_input.dev_attr.attr,
- &sensor_dev_attr_temp8_input.dev_attr.attr,
- &sensor_dev_attr_temp9_input.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group nct7904_tcpu_group = {
- .attrs = nct7904_tcpu_attrs,
- .is_visible = nct7904_tcpu_is_visible,
-};
-
-/* PWM ATTR */
-static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static int nct7904_read_pwm(struct device *dev, u32 attr, int channel,
+ long *val)
{
- int index = to_sensor_dev_attr(devattr)->index;
struct nct7904_data *data = dev_get_drvdata(dev);
- unsigned long val;
int ret;
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
- if (val > 255)
- return -EINVAL;
-
- ret = nct7904_write_reg(data, BANK_3, FANCTL1_OUT_REG + index, val);
+ switch(attr) {
+ case hwmon_pwm_input:
+ ret = nct7904_read_reg(data, BANK_3, FANCTL1_OUT_REG + channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+ case hwmon_pwm_enable:
+ ret = nct7904_read_reg(data, BANK_3, FANCTL1_FMR_REG + channel);
+ if (ret < 0)
+ return ret;
- return ret ? ret : count;
+ *val = ret ? 2 : 1;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t show_pwm(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
+ long val)
{
- int index = to_sensor_dev_attr(devattr)->index;
struct nct7904_data *data = dev_get_drvdata(dev);
- int val;
-
- val = nct7904_read_reg(data, BANK_3, FANCTL1_OUT_REG + index);
- if (val < 0)
- return val;
+ int ret;
- return sprintf(buf, "%d\n", val);
+ switch(attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+ ret = nct7904_write_reg(data, BANK_3, FANCTL1_OUT_REG + channel,
+ val);
+ return ret;
+ case hwmon_pwm_enable:
+ if (val < 1 || val > 2 ||
+ (val == 2 && !data->fan_mode[channel]))
+ return -EINVAL;
+ ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + channel,
+ val == 2 ? data->fan_mode[channel] : 0);
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t store_enable(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static umode_t nct7904_pwm_is_visible(const void *_data, u32 attr, int channel)
{
- int index = to_sensor_dev_attr(devattr)->index;
- struct nct7904_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int ret;
-
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
- if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
- return -EINVAL;
-
- ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
- val == 2 ? data->fan_mode[index] : 0);
-
- return ret ? ret : count;
+ switch(attr) {
+ case hwmon_pwm_input:
+ case hwmon_pwm_enable:
+ return S_IRUGO | S_IWUSR;
+ default:
+ return 0;
+ }
}
-/* Return 1 for manual mode or 2 for SmartFan mode */
-static ssize_t show_enable(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int nct7904_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
- int index = to_sensor_dev_attr(devattr)->index;
- struct nct7904_data *data = dev_get_drvdata(dev);
- int val;
-
- val = nct7904_read_reg(data, BANK_3, FANCTL1_FMR_REG + index);
- if (val < 0)
- return val;
-
- return sprintf(buf, "%d\n", val ? 2 : 1);
+ switch (type) {
+ case hwmon_in:
+ return nct7904_read_in(dev, attr, channel, val);
+ case hwmon_fan:
+ return nct7904_read_fan(dev, attr, channel, val);
+ case hwmon_pwm:
+ return nct7904_read_pwm(dev, attr, channel, val);
+ case hwmon_temp:
+ return nct7904_read_temp(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
}
-/* 2 attributes per channel: pwm and mode */
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
- show_pwm, store_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
- show_enable, store_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
- show_pwm, store_pwm, 1);
-static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
- show_enable, store_enable, 1);
-static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
- show_pwm, store_pwm, 2);
-static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
- show_enable, store_enable, 2);
-static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
- show_pwm, store_pwm, 3);
-static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
- show_enable, store_enable, 3);
-
-static struct attribute *nct7904_fanctl_attrs[] = {
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm2.dev_attr.attr,
- &sensor_dev_attr_pwm2_enable.dev_attr.attr,
- &sensor_dev_attr_pwm3.dev_attr.attr,
- &sensor_dev_attr_pwm3_enable.dev_attr.attr,
- &sensor_dev_attr_pwm4.dev_attr.attr,
- &sensor_dev_attr_pwm4_enable.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group nct7904_fanctl_group = {
- .attrs = nct7904_fanctl_attrs,
-};
+static int nct7904_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return nct7904_write_pwm(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
-static const struct attribute_group *nct7904_groups[] = {
- &nct7904_fanin_group,
- &nct7904_vsen_group,
- &nct7904_tcpu_group,
- &nct7904_fanctl_group,
- NULL
-};
+static umode_t nct7904_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ return nct7904_in_is_visible(data, attr, channel);
+ case hwmon_fan:
+ return nct7904_fan_is_visible(data, attr, channel);
+ case hwmon_pwm:
+ return nct7904_pwm_is_visible(data, attr, channel);
+ case hwmon_temp:
+ return nct7904_temp_is_visible(data, attr, channel);
+ default:
+ return 0;
+ }
+}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int nct7904_detect(struct i2c_client *client,
@@ -512,6 +400,103 @@ static int nct7904_detect(struct i2c_client *client,
return 0;
}
+static const u32 nct7904_in_config[] = {
+ HWMON_I_INPUT, /* dummy, skipped in is_visible */
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info nct7904_in = {
+ .type = hwmon_in,
+ .config = nct7904_in_config,
+};
+
+static const u32 nct7904_fan_config[] = {
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info nct7904_fan = {
+ .type = hwmon_fan,
+ .config = nct7904_fan_config,
+};
+
+static const u32 nct7904_pwm_config[] = {
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ 0
+};
+
+static const struct hwmon_channel_info nct7904_pwm = {
+ .type = hwmon_pwm,
+ .config = nct7904_pwm_config,
+};
+
+static const u32 nct7904_temp_config[] = {
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info nct7904_temp = {
+ .type = hwmon_temp,
+ .config = nct7904_temp_config,
+};
+
+static const struct hwmon_channel_info *nct7904_info[] = {
+ &nct7904_in,
+ &nct7904_fan,
+ &nct7904_pwm,
+ &nct7904_temp,
+ NULL
+};
+
+static const struct hwmon_ops nct7904_hwmon_ops = {
+ .is_visible = nct7904_is_visible,
+ .read = nct7904_read,
+ .write = nct7904_write,
+};
+
+static const struct hwmon_chip_info nct7904_chip_info = {
+ .ops = &nct7904_hwmon_ops,
+ .info = nct7904_info,
+};
+
static int nct7904_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -566,8 +551,8 @@ static int nct7904_probe(struct i2c_client *client,
}
hwmon_dev =
- devm_hwmon_device_register_with_groups(dev, client->name, data,
- nct7904_groups);
+ devm_hwmon_device_register_with_info(dev, client->name, data,
+ &nct7904_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8ef7b713cb1a..c52d07c6b49f 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -253,12 +253,9 @@ static const struct ntc_compensation b57330v2103[] = {
};
struct ntc_data {
- struct device *hwmon_dev;
struct ntc_thermistor_platform_data *pdata;
const struct ntc_compensation *comp;
- struct device *dev;
int n_comp;
- char name[PLATFORM_NAME_SIZE];
};
#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
@@ -316,22 +313,22 @@ static const struct of_device_id ntc_match[] = {
MODULE_DEVICE_TABLE(of, ntc_match);
static struct ntc_thermistor_platform_data *
-ntc_thermistor_parse_dt(struct platform_device *pdev)
+ntc_thermistor_parse_dt(struct device *dev)
{
struct iio_channel *chan;
enum iio_chan_type type;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct ntc_thermistor_platform_data *pdata;
int ret;
if (!np)
return NULL;
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
- chan = iio_channel_get(&pdev->dev, NULL);
+ chan = devm_iio_channel_get(dev, NULL);
if (IS_ERR(chan))
return ERR_CAST(chan);
@@ -359,22 +356,15 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
return pdata;
}
-static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
-{
- if (pdata->chan)
- iio_channel_release(pdata->chan);
-}
#else
static struct ntc_thermistor_platform_data *
-ntc_thermistor_parse_dt(struct platform_device *pdev)
+ntc_thermistor_parse_dt(struct device *dev)
{
return NULL;
}
#define ntc_match NULL
-static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
-{ }
#endif
static inline u64 div64_u64_safe(u64 dividend, u64 divisor)
@@ -516,9 +506,8 @@ static int ntc_thermistor_get_ohm(struct ntc_data *data)
return -EINVAL;
}
-static int ntc_read_temp(void *dev, int *temp)
+static int ntc_read_temp(void *data, int *temp)
{
- struct ntc_data *data = dev_get_drvdata(dev);
int ohm;
ohm = ntc_thermistor_get_ohm(data);
@@ -530,14 +519,6 @@ static int ntc_read_temp(void *dev, int *temp)
return 0;
}
-static ssize_t ntc_show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ntc_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", data->name);
-}
-
static ssize_t ntc_show_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -559,18 +540,13 @@ static ssize_t ntc_show_temp(struct device *dev,
static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, ntc_show_type, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ntc_show_temp, NULL, 0);
-static DEVICE_ATTR(name, S_IRUGO, ntc_show_name, NULL);
-static struct attribute *ntc_attributes[] = {
- &dev_attr_name.attr,
+static struct attribute *ntc_attrs[] = {
&sensor_dev_attr_temp1_type.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL,
};
-
-static const struct attribute_group ntc_attr_group = {
- .attrs = ntc_attributes,
-};
+ATTRIBUTE_GROUPS(ntc);
static const struct thermal_zone_of_device_ops ntc_of_thermal_ops = {
.get_temp = ntc_read_temp,
@@ -579,33 +555,34 @@ static const struct thermal_zone_of_device_ops ntc_of_thermal_ops = {
static int ntc_thermistor_probe(struct platform_device *pdev)
{
struct thermal_zone_device *tz;
+ struct device *dev = &pdev->dev;
const struct of_device_id *of_id =
- of_match_device(of_match_ptr(ntc_match), &pdev->dev);
+ of_match_device(of_match_ptr(ntc_match), dev);
const struct platform_device_id *pdev_id;
struct ntc_thermistor_platform_data *pdata;
+ struct device *hwmon_dev;
struct ntc_data *data;
- int ret;
- pdata = ntc_thermistor_parse_dt(pdev);
+ pdata = ntc_thermistor_parse_dt(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
else if (pdata == NULL)
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- dev_err(&pdev->dev, "No platform init data supplied.\n");
+ dev_err(dev, "No platform init data supplied.\n");
return -ENODEV;
}
/* Either one of the two is required. */
if (!pdata->read_uv && !pdata->read_ohm) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"Both read_uv and read_ohm missing. Need either one of the two.\n");
return -EINVAL;
}
if (pdata->read_uv && pdata->read_ohm) {
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"Only one of read_uv and read_ohm is needed; ignoring read_uv.\n");
pdata->read_uv = NULL;
}
@@ -617,20 +594,17 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
NTC_CONNECTED_POSITIVE) ||
(pdata->connect != NTC_CONNECTED_POSITIVE &&
pdata->connect != NTC_CONNECTED_GROUND))) {
- dev_err(&pdev->dev,
- "Required data to use read_uv not supplied.\n");
+ dev_err(dev, "Required data to use read_uv not supplied.\n");
return -EINVAL;
}
- data = devm_kzalloc(&pdev->dev, sizeof(struct ntc_data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct ntc_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
- data->dev = &pdev->dev;
data->pdata = pdata;
- strlcpy(data->name, pdev_id->name, sizeof(data->name));
switch (pdev_id->driver_data) {
case TYPE_NCPXXWB473:
@@ -654,49 +628,25 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
data->n_comp = ARRAY_SIZE(ncpXXxh103);
break;
default:
- dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n",
+ dev_err(dev, "Unknown device type: %lu(%s)\n",
pdev_id->driver_data, pdev_id->name);
return -EINVAL;
}
- platform_set_drvdata(pdev, data);
-
- ret = sysfs_create_group(&data->dev->kobj, &ntc_attr_group);
- if (ret) {
- dev_err(data->dev, "unable to create sysfs files\n");
- return ret;
- }
-
- data->hwmon_dev = hwmon_device_register(data->dev);
- if (IS_ERR(data->hwmon_dev)) {
- dev_err(data->dev, "unable to register as hwmon device.\n");
- ret = PTR_ERR(data->hwmon_dev);
- goto err_after_sysfs;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, pdev_id->name,
+ data, ntc_groups);
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(dev, "unable to register as hwmon device.\n");
+ return PTR_ERR(hwmon_dev);
}
- dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n",
- pdev_id->name);
+ dev_info(dev, "Thermistor type: %s successfully probed.\n",
+ pdev_id->name);
- tz = devm_thermal_zone_of_sensor_register(data->dev, 0, data->dev,
+ tz = devm_thermal_zone_of_sensor_register(dev, 0, data,
&ntc_of_thermal_ops);
if (IS_ERR(tz))
- dev_dbg(&pdev->dev, "Failed to register to thermal fw.\n");
-
- return 0;
-err_after_sysfs:
- sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
- ntc_iio_channel_release(pdata);
- return ret;
-}
-
-static int ntc_thermistor_remove(struct platform_device *pdev)
-{
- struct ntc_data *data = platform_get_drvdata(pdev);
- struct ntc_thermistor_platform_data *pdata = data->pdata;
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
- ntc_iio_channel_release(pdata);
+ dev_dbg(dev, "Failed to register to thermal fw.\n");
return 0;
}
@@ -707,7 +657,6 @@ static struct platform_driver ntc_thermistor_driver = {
.of_match_table = of_match_ptr(ntc_match),
},
.probe = ntc_thermistor_probe,
- .remove = ntc_thermistor_remove,
.id_table = ntc_thermistor_id,
};
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 054d3d863802..cad1229b7e17 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -126,12 +126,12 @@ config SENSORS_TPS40422
be called tps40422.
config SENSORS_UCD9000
- tristate "TI UCD90120, UCD90124, UCD9090, UCD90910"
+ tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910"
default n
help
If you say yes here you get hardware monitoring support for TI
- UCD90120, UCD90124, UCD9090, UCD90910 Sequencer and System Health
- Controllers.
+ UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System
+ Health Controllers.
This driver can also be built as a module. If so, the module will
be called ucd9000.
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 0a74991a60f0..44ca8a94873d 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/i2c.h>
+#include <linux/i2c/pmbus.h>
#include "pmbus.h"
/*
@@ -167,14 +168,26 @@ static int pmbus_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pmbus_driver_info *info;
+ struct pmbus_platform_data *pdata = NULL;
+ struct device *dev = &client->dev;
- info = devm_kzalloc(&client->dev, sizeof(struct pmbus_driver_info),
- GFP_KERNEL);
+ info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ if (!strcmp(id->name, "dps460") || !strcmp(id->name, "dps800") ||
+ !strcmp(id->name, "sgd009")) {
+ pdata = devm_kzalloc(dev, sizeof(struct pmbus_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->flags = PMBUS_SKIP_STATUS_CHECK;
+ }
+
info->pages = id->driver_data;
info->identify = pmbus_identify;
+ dev->platform_data = pdata;
return pmbus_do_probe(client, id, info);
}
@@ -186,6 +199,8 @@ static const struct i2c_device_id pmbus_id[] = {
{"adp4000", 1},
{"bmr453", 1},
{"bmr454", 1},
+ {"dps460", 1},
+ {"dps800", 1},
{"mdt040", 1},
{"ncp4200", 1},
{"ncp4208", 1},
@@ -193,6 +208,7 @@ static const struct i2c_device_id pmbus_id[] = {
{"pdt006", 1},
{"pdt012", 1},
{"pmbus", 0},
+ {"sgd009", 1},
{"tps40400", 1},
{"tps544b20", 1},
{"tps544b25", 1},
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index fbb1479d3ad4..3e3aa950277f 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -28,7 +28,7 @@
#include <linux/i2c/pmbus.h>
#include "pmbus.h"
-enum chips { ucd9000, ucd90120, ucd90124, ucd9090, ucd90910 };
+enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
#define UCD9000_MONITOR_CONFIG 0xd5
#define UCD9000_NUM_PAGES 0xd6
@@ -112,6 +112,7 @@ static const struct i2c_device_id ucd9000_id[] = {
{"ucd9000", ucd9000},
{"ucd90120", ucd90120},
{"ucd90124", ucd90124},
+ {"ucd90160", ucd90160},
{"ucd9090", ucd9090},
{"ucd90910", ucd90910},
{}
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 25b44e68926d..559a3dcd64d8 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -255,7 +255,6 @@ static const struct of_device_id scpi_of_match[] = {
static struct platform_driver scpi_hwmon_platdrv = {
.driver = {
.name = "scpi-hwmon",
- .owner = THIS_MODULE,
.of_match_table = scpi_of_match,
},
.probe = scpi_hwmon_probe,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 8479ac5eb853..36bba2a816a4 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -25,7 +25,6 @@
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/regmap.h>
-#include <linux/thermal.h>
#include <linux/of.h>
#define DRIVER_NAME "tmp102"
@@ -79,84 +78,113 @@ static inline u16 tmp102_mC_to_reg(int val)
return (val * 128) / 1000;
}
-static int tmp102_read_temp(void *dev, int *temp)
+static int tmp102_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
{
struct tmp102 *tmp102 = dev_get_drvdata(dev);
- unsigned int reg;
- int ret;
-
- if (time_before(jiffies, tmp102->ready_time)) {
- dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__);
- return -EAGAIN;
+ unsigned int regval;
+ int err, reg;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ /* Is it too early to return a conversion ? */
+ if (time_before(jiffies, tmp102->ready_time)) {
+ dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__);
+ return -EAGAIN;
+ }
+ reg = TMP102_TEMP_REG;
+ break;
+ case hwmon_temp_max_hyst:
+ reg = TMP102_TLOW_REG;
+ break;
+ case hwmon_temp_max:
+ reg = TMP102_THIGH_REG;
+ break;
+ default:
+ return -EOPNOTSUPP;
}
- ret = regmap_read(tmp102->regmap, TMP102_TEMP_REG, &reg);
- if (ret < 0)
- return ret;
-
- *temp = tmp102_reg_to_mC(reg);
+ err = regmap_read(tmp102->regmap, reg, &regval);
+ if (err < 0)
+ return err;
+ *temp = tmp102_reg_to_mC(regval);
return 0;
}
-static ssize_t tmp102_show_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int tmp102_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long temp)
{
- struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct tmp102 *tmp102 = dev_get_drvdata(dev);
- int regaddr = sda->index;
- unsigned int reg;
- int err;
-
- if (regaddr == TMP102_TEMP_REG &&
- time_before(jiffies, tmp102->ready_time))
- return -EAGAIN;
-
- err = regmap_read(tmp102->regmap, regaddr, &reg);
- if (err < 0)
- return err;
+ int reg;
+
+ switch (attr) {
+ case hwmon_temp_max_hyst:
+ reg = TMP102_TLOW_REG;
+ break;
+ case hwmon_temp_max:
+ reg = TMP102_THIGH_REG;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- return sprintf(buf, "%d\n", tmp102_reg_to_mC(reg));
+ temp = clamp_val(temp, -256000, 255000);
+ return regmap_write(tmp102->regmap, reg, tmp102_mC_to_reg(temp));
}
-static ssize_t tmp102_set_temp(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static umode_t tmp102_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
- struct tmp102 *tmp102 = dev_get_drvdata(dev);
- int reg = sda->index;
- long val;
- int err;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
- val = clamp_val(val, -256000, 255000);
-
- err = regmap_write(tmp102->regmap, reg, tmp102_mC_to_reg(val));
- return err ? : count;
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return S_IRUGO;
+ case hwmon_temp_max_hyst:
+ case hwmon_temp_max:
+ return S_IRUGO | S_IWUSR;
+ default:
+ return 0;
+ }
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL,
- TMP102_TEMP_REG);
+static u32 tmp102_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info tmp102_chip = {
+ .type = hwmon_chip,
+ .config = tmp102_chip_config,
+};
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp,
- tmp102_set_temp, TMP102_TLOW_REG);
+static u32 tmp102_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
+ 0
+};
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp,
- tmp102_set_temp, TMP102_THIGH_REG);
+static const struct hwmon_channel_info tmp102_temp = {
+ .type = hwmon_temp,
+ .config = tmp102_temp_config,
+};
-static struct attribute *tmp102_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
+static const struct hwmon_channel_info *tmp102_info[] = {
+ &tmp102_chip,
+ &tmp102_temp,
NULL
};
-ATTRIBUTE_GROUPS(tmp102);
-static const struct thermal_zone_of_device_ops tmp102_of_thermal_ops = {
- .get_temp = tmp102_read_temp,
+static const struct hwmon_ops tmp102_hwmon_ops = {
+ .is_visible = tmp102_is_visible,
+ .read = tmp102_read,
+ .write = tmp102_write,
+};
+
+static const struct hwmon_chip_info tmp102_chip_info = {
+ .ops = &tmp102_hwmon_ops,
+ .info = tmp102_info,
};
static void tmp102_restore_config(void *data)
@@ -188,7 +216,7 @@ static const struct regmap_config tmp102_regmap_config = {
};
static int tmp102_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -249,16 +277,14 @@ static int tmp102_probe(struct i2c_client *client,
tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS);
}
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- tmp102,
- tmp102_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ tmp102,
+ &tmp102_chip_info,
+ NULL);
if (IS_ERR(hwmon_dev)) {
dev_dbg(dev, "unable to register hwmon device\n");
return PTR_ERR(hwmon_dev);
}
- devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
- &tmp102_of_thermal_ops);
-
dev_info(dev, "initialized\n");
return 0;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 85d48d80822a..bfb98b96c781 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -72,6 +72,10 @@ MODULE_DEVICE_TABLE(i2c, tmp421_id);
struct tmp421_data {
struct i2c_client *client;
struct mutex update_lock;
+ u32 temp_config[5];
+ struct hwmon_channel_info temp_info;
+ const struct hwmon_channel_info *info[2];
+ struct hwmon_chip_info chip;
char valid;
unsigned long last_updated;
int channels;
@@ -125,85 +129,46 @@ static struct tmp421_data *tmp421_update_device(struct device *dev)
return data;
}
-static ssize_t show_temp_value(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
- int index = to_sensor_dev_attr(devattr)->index;
- struct tmp421_data *data = tmp421_update_device(dev);
- int temp;
-
- mutex_lock(&data->update_lock);
- if (data->config & TMP421_CONFIG_RANGE)
- temp = temp_from_u16(data->temp[index]);
- else
- temp = temp_from_s16(data->temp[index]);
- mutex_unlock(&data->update_lock);
-
- return sprintf(buf, "%d\n", temp);
-}
+ struct tmp421_data *tmp421 = tmp421_update_device(dev);
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (tmp421->config & TMP421_CONFIG_RANGE)
+ *val = temp_from_u16(tmp421->temp[channel]);
+ else
+ *val = temp_from_s16(tmp421->temp[channel]);
+ return 0;
+ case hwmon_temp_fault:
+ /*
+ * The OPEN bit signals a fault. This is bit 0 of the temperature
+ * register (low byte).
+ */
+ *val = tmp421->temp[channel] & 0x01;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
-static ssize_t show_fault(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int index = to_sensor_dev_attr(devattr)->index;
- struct tmp421_data *data = tmp421_update_device(dev);
-
- /*
- * The OPEN bit signals a fault. This is bit 0 of the temperature
- * register (low byte).
- */
- if (data->temp[index] & 0x01)
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
}
-static umode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
- int n)
+static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct tmp421_data *data = dev_get_drvdata(dev);
- struct device_attribute *devattr;
- unsigned int index;
-
- devattr = container_of(a, struct device_attribute, attr);
- index = to_sensor_dev_attr(devattr)->index;
-
- if (index < data->channels)
- return a->mode;
-
- return 0;
+ switch (attr) {
+ case hwmon_temp_fault:
+ if (channel == 0)
+ return 0;
+ return S_IRUGO;
+ case hwmon_temp_input:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_fault, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_value, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_fault, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_value, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_fault, NULL, 3);
-
-static struct attribute *tmp421_attr[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_fault.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp4_fault.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group tmp421_group = {
- .attrs = tmp421_attr,
- .is_visible = tmp421_is_visible,
-};
-
-static const struct attribute_group *tmp421_groups[] = {
- &tmp421_group,
- NULL
-};
-
static int tmp421_init_client(struct i2c_client *client)
{
int config, config_orig;
@@ -289,13 +254,18 @@ static int tmp421_detect(struct i2c_client *client,
return 0;
}
+static const struct hwmon_ops tmp421_ops = {
+ .is_visible = tmp421_is_visible,
+ .read = tmp421_read,
+};
+
static int tmp421_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct tmp421_data *data;
- int err;
+ int i, err;
data = devm_kzalloc(dev, sizeof(struct tmp421_data), GFP_KERNEL);
if (!data)
@@ -309,8 +279,21 @@ static int tmp421_probe(struct i2c_client *client,
if (err)
return err;
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data, tmp421_groups);
+ for (i = 0; i < data->channels; i++)
+ data->temp_config[i] = HWMON_T_INPUT | HWMON_T_FAULT;
+
+ data->chip.ops = &tmp421_ops;
+ data->chip.info = data->info;
+
+ data->info[0] = &data->temp_info;
+
+ data->temp_info.type = hwmon_temp;
+ data->temp_info.config = data->temp_config;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &data->chip,
+ NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
new file mode 100644
index 000000000000..9c0dbb8191ad
--- /dev/null
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -0,0 +1,787 @@
+/*
+ * APM X-Gene SoC Hardware Monitoring Driver
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Author: Loc Ho <lho@apm.com>
+ * Hoan Tran <hotran@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * This driver provides the following features:
+ * - Retrieve CPU total power (uW)
+ * - Retrieve IO total power (uW)
+ * - Retrieve SoC temperature (milli-degree C) and alarm
+ */
+#include <linux/acpi.h>
+#include <linux/dma-mapping.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <acpi/pcc.h>
+
+/* SLIMpro message defines */
+#define MSG_TYPE_DBG 0
+#define MSG_TYPE_ERR 7
+#define MSG_TYPE_PWRMGMT 9
+
+#define MSG_TYPE(v) (((v) & 0xF0000000) >> 28)
+#define MSG_TYPE_SET(v) (((v) << 28) & 0xF0000000)
+#define MSG_SUBTYPE(v) (((v) & 0x0F000000) >> 24)
+#define MSG_SUBTYPE_SET(v) (((v) << 24) & 0x0F000000)
+
+#define DBG_SUBTYPE_SENSOR_READ 4
+#define SENSOR_RD_MSG 0x04FFE902
+#define SENSOR_RD_EN_ADDR(a) ((a) & 0x000FFFFF)
+#define PMD_PWR_REG 0x20
+#define PMD_PWR_MW_REG 0x26
+#define SOC_PWR_REG 0x21
+#define SOC_PWR_MW_REG 0x27
+#define SOC_TEMP_REG 0x10
+
+#define TEMP_NEGATIVE_BIT 8
+#define SENSOR_INVALID_DATA BIT(15)
+
+#define PWRMGMT_SUBTYPE_TPC 1
+#define TPC_ALARM 2
+#define TPC_GET_ALARM 3
+#define TPC_CMD(v) (((v) & 0x00FF0000) >> 16)
+#define TPC_CMD_SET(v) (((v) << 16) & 0x00FF0000)
+#define TPC_EN_MSG(hndl, cmd, type) \
+ (MSG_TYPE_SET(MSG_TYPE_PWRMGMT) | \
+ MSG_SUBTYPE_SET(hndl) | TPC_CMD_SET(cmd) | type)
+
+/* PCC defines */
+#define PCC_SIGNATURE_MASK 0x50424300
+#define PCCC_GENERATE_DB_INT BIT(15)
+#define PCCS_CMD_COMPLETE BIT(0)
+#define PCCS_SCI_DOORBEL BIT(1)
+#define PCCS_PLATFORM_NOTIFICATION BIT(3)
+/*
+ * Arbitrary retries in case the remote processor is slow to respond
+ * to PCC commands
+ */
+#define PCC_NUM_RETRIES 500
+
+#define ASYNC_MSG_FIFO_SIZE 16
+#define MBOX_OP_TIMEOUTMS 1000
+
+#define WATT_TO_mWATT(x) ((x) * 1000)
+#define mWATT_TO_uWATT(x) ((x) * 1000)
+#define CELSIUS_TO_mCELSIUS(x) ((x) * 1000)
+
+#define to_xgene_hwmon_dev(cl) \
+ container_of(cl, struct xgene_hwmon_dev, mbox_client)
+
+struct slimpro_resp_msg {
+ u32 msg;
+ u32 param1;
+ u32 param2;
+} __packed;
+
+struct xgene_hwmon_dev {
+ struct device *dev;
+ struct mbox_chan *mbox_chan;
+ struct mbox_client mbox_client;
+ int mbox_idx;
+
+ spinlock_t kfifo_lock;
+ struct mutex rd_mutex;
+ struct completion rd_complete;
+ int resp_pending;
+ struct slimpro_resp_msg sync_msg;
+
+ struct work_struct workq;
+ struct kfifo_rec_ptr_1 async_msg_fifo;
+
+ struct device *hwmon_dev;
+ bool temp_critical_alarm;
+
+ phys_addr_t comm_base_addr;
+ void *pcc_comm_addr;
+ u64 usecs_lat;
+};
+
+/*
+ * This function tests and clears a bitmask then returns its old value
+ */
+static u16 xgene_word_tst_and_clr(u16 *addr, u16 mask)
+{
+ u16 ret, val;
+
+ val = le16_to_cpu(READ_ONCE(*addr));
+ ret = val & mask;
+ val &= ~mask;
+ WRITE_ONCE(*addr, cpu_to_le16(val));
+
+ return ret;
+}
+
+static int xgene_hwmon_pcc_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
+{
+ struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ u32 *ptr = (void *)(generic_comm_base + 1);
+ int rc, i;
+ u16 val;
+
+ mutex_lock(&ctx->rd_mutex);
+ init_completion(&ctx->rd_complete);
+ ctx->resp_pending = true;
+
+ /* Write signature for subspace */
+ WRITE_ONCE(generic_comm_base->signature,
+ cpu_to_le32(PCC_SIGNATURE_MASK | ctx->mbox_idx));
+
+ /* Write to the shared command region */
+ WRITE_ONCE(generic_comm_base->command,
+ cpu_to_le16(MSG_TYPE(msg[0]) | PCCC_GENERATE_DB_INT));
+
+ /* Flip CMD COMPLETE bit */
+ val = le16_to_cpu(READ_ONCE(generic_comm_base->status));
+ val &= ~PCCS_CMD_COMPLETE;
+ WRITE_ONCE(generic_comm_base->status, cpu_to_le16(val));
+
+ /* Copy the message to the PCC comm space */
+ for (i = 0; i < sizeof(struct slimpro_resp_msg) / 4; i++)
+ WRITE_ONCE(ptr[i], cpu_to_le32(msg[i]));
+
+ /* Ring the doorbell */
+ rc = mbox_send_message(ctx->mbox_chan, msg);
+ if (rc < 0) {
+ dev_err(ctx->dev, "Mailbox send error %d\n", rc);
+ goto err;
+ }
+ if (!wait_for_completion_timeout(&ctx->rd_complete,
+ usecs_to_jiffies(ctx->usecs_lat))) {
+ dev_err(ctx->dev, "Mailbox operation timed out\n");
+ rc = -ETIMEDOUT;
+ goto err;
+ }
+
+ /* Check for error message */
+ if (MSG_TYPE(ctx->sync_msg.msg) == MSG_TYPE_ERR) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ msg[0] = ctx->sync_msg.msg;
+ msg[1] = ctx->sync_msg.param1;
+ msg[2] = ctx->sync_msg.param2;
+
+err:
+ mbox_chan_txdone(ctx->mbox_chan, 0);
+ ctx->resp_pending = false;
+ mutex_unlock(&ctx->rd_mutex);
+ return rc;
+}
+
+static int xgene_hwmon_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
+{
+ int rc;
+
+ mutex_lock(&ctx->rd_mutex);
+ init_completion(&ctx->rd_complete);
+ ctx->resp_pending = true;
+
+ rc = mbox_send_message(ctx->mbox_chan, msg);
+ if (rc < 0) {
+ dev_err(ctx->dev, "Mailbox send error %d\n", rc);
+ goto err;
+ }
+
+ if (!wait_for_completion_timeout(&ctx->rd_complete,
+ msecs_to_jiffies(MBOX_OP_TIMEOUTMS))) {
+ dev_err(ctx->dev, "Mailbox operation timed out\n");
+ rc = -ETIMEDOUT;
+ goto err;
+ }
+
+ /* Check for error message */
+ if (MSG_TYPE(ctx->sync_msg.msg) == MSG_TYPE_ERR) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ msg[0] = ctx->sync_msg.msg;
+ msg[1] = ctx->sync_msg.param1;
+ msg[2] = ctx->sync_msg.param2;
+
+err:
+ ctx->resp_pending = false;
+ mutex_unlock(&ctx->rd_mutex);
+ return rc;
+}
+
+static int xgene_hwmon_reg_map_rd(struct xgene_hwmon_dev *ctx, u32 addr,
+ u32 *data)
+{
+ u32 msg[3];
+ int rc;
+
+ msg[0] = SENSOR_RD_MSG;
+ msg[1] = SENSOR_RD_EN_ADDR(addr);
+ msg[2] = 0;
+
+ if (acpi_disabled)
+ rc = xgene_hwmon_rd(ctx, msg);
+ else
+ rc = xgene_hwmon_pcc_rd(ctx, msg);
+
+ if (rc < 0)
+ return rc;
+
+ /*
+ * Check if sensor data is valid.
+ */
+ if (msg[1] & SENSOR_INVALID_DATA)
+ return -ENODATA;
+
+ *data = msg[1];
+
+ return rc;
+}
+
+static int xgene_hwmon_get_notification_msg(struct xgene_hwmon_dev *ctx,
+ u32 *amsg)
+{
+ u32 msg[3];
+ int rc;
+
+ msg[0] = TPC_EN_MSG(PWRMGMT_SUBTYPE_TPC, TPC_GET_ALARM, 0);
+ msg[1] = 0;
+ msg[2] = 0;
+
+ rc = xgene_hwmon_pcc_rd(ctx, msg);
+ if (rc < 0)
+ return rc;
+
+ amsg[0] = msg[0];
+ amsg[1] = msg[1];
+ amsg[2] = msg[2];
+
+ return rc;
+}
+
+static int xgene_hwmon_get_cpu_pwr(struct xgene_hwmon_dev *ctx, u32 *val)
+{
+ u32 watt, mwatt;
+ int rc;
+
+ rc = xgene_hwmon_reg_map_rd(ctx, PMD_PWR_REG, &watt);
+ if (rc < 0)
+ return rc;
+
+ rc = xgene_hwmon_reg_map_rd(ctx, PMD_PWR_MW_REG, &mwatt);
+ if (rc < 0)
+ return rc;
+
+ *val = WATT_TO_mWATT(watt) + mwatt;
+ return 0;
+}
+
+static int xgene_hwmon_get_io_pwr(struct xgene_hwmon_dev *ctx, u32 *val)
+{
+ u32 watt, mwatt;
+ int rc;
+
+ rc = xgene_hwmon_reg_map_rd(ctx, SOC_PWR_REG, &watt);
+ if (rc < 0)
+ return rc;
+
+ rc = xgene_hwmon_reg_map_rd(ctx, SOC_PWR_MW_REG, &mwatt);
+ if (rc < 0)
+ return rc;
+
+ *val = WATT_TO_mWATT(watt) + mwatt;
+ return 0;
+}
+
+static int xgene_hwmon_get_temp(struct xgene_hwmon_dev *ctx, u32 *val)
+{
+ return xgene_hwmon_reg_map_rd(ctx, SOC_TEMP_REG, val);
+}
+
+/*
+ * Sensor temperature/power functions
+ */
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
+ int rc, temp;
+ u32 val;
+
+ rc = xgene_hwmon_get_temp(ctx, &val);
+ if (rc < 0)
+ return rc;
+
+ temp = sign_extend32(val, TEMP_NEGATIVE_BIT);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", CELSIUS_TO_mCELSIUS(temp));
+}
+
+static ssize_t temp1_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "SoC Temperature\n");
+}
+
+static ssize_t temp1_critical_alarm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ctx->temp_critical_alarm);
+}
+
+static ssize_t power1_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "CPU power\n");
+}
+
+static ssize_t power2_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "IO power\n");
+}
+
+static ssize_t power1_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
+ u32 val;
+ int rc;
+
+ rc = xgene_hwmon_get_cpu_pwr(ctx, &val);
+ if (rc < 0)
+ return rc;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mWATT_TO_uWATT(val));
+}
+
+static ssize_t power2_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct xgene_hwmon_dev *ctx = dev_get_drvdata(dev);
+ u32 val;
+ int rc;
+
+ rc = xgene_hwmon_get_io_pwr(ctx, &val);
+ if (rc < 0)
+ return rc;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mWATT_TO_uWATT(val));
+}
+
+static DEVICE_ATTR_RO(temp1_label);
+static DEVICE_ATTR_RO(temp1_input);
+static DEVICE_ATTR_RO(temp1_critical_alarm);
+static DEVICE_ATTR_RO(power1_label);
+static DEVICE_ATTR_RO(power1_input);
+static DEVICE_ATTR_RO(power2_label);
+static DEVICE_ATTR_RO(power2_input);
+
+static struct attribute *xgene_hwmon_attrs[] = {
+ &dev_attr_temp1_label.attr,
+ &dev_attr_temp1_input.attr,
+ &dev_attr_temp1_critical_alarm.attr,
+ &dev_attr_power1_label.attr,
+ &dev_attr_power1_input.attr,
+ &dev_attr_power2_label.attr,
+ &dev_attr_power2_input.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(xgene_hwmon);
+
+static int xgene_hwmon_tpc_alarm(struct xgene_hwmon_dev *ctx,
+ struct slimpro_resp_msg *amsg)
+{
+ ctx->temp_critical_alarm = !!amsg->param2;
+ sysfs_notify(&ctx->dev->kobj, NULL, "temp1_critical_alarm");
+
+ return 0;
+}
+
+static void xgene_hwmon_process_pwrmsg(struct xgene_hwmon_dev *ctx,
+ struct slimpro_resp_msg *amsg)
+{
+ if ((MSG_SUBTYPE(amsg->msg) == PWRMGMT_SUBTYPE_TPC) &&
+ (TPC_CMD(amsg->msg) == TPC_ALARM))
+ xgene_hwmon_tpc_alarm(ctx, amsg);
+}
+
+/*
+ * This function is called to process async work queue
+ */
+static void xgene_hwmon_evt_work(struct work_struct *work)
+{
+ struct slimpro_resp_msg amsg;
+ struct xgene_hwmon_dev *ctx;
+ int ret;
+
+ ctx = container_of(work, struct xgene_hwmon_dev, workq);
+ while (kfifo_out_spinlocked(&ctx->async_msg_fifo, &amsg,
+ sizeof(struct slimpro_resp_msg),
+ &ctx->kfifo_lock)) {
+ /*
+ * If PCC, send a consumer command to Platform to get info
+ * If Slimpro Mailbox, get message from specific FIFO
+ */
+ if (!acpi_disabled) {
+ ret = xgene_hwmon_get_notification_msg(ctx,
+ (u32 *)&amsg);
+ if (ret < 0)
+ continue;
+ }
+
+ if (MSG_TYPE(amsg.msg) == MSG_TYPE_PWRMGMT)
+ xgene_hwmon_process_pwrmsg(ctx, &amsg);
+ }
+}
+
+static int xgene_hwmon_rx_ready(struct xgene_hwmon_dev *ctx, void *msg)
+{
+ if (IS_ERR_OR_NULL(ctx->hwmon_dev) && !ctx->resp_pending) {
+ /* Enqueue to the FIFO */
+ kfifo_in_spinlocked(&ctx->async_msg_fifo, msg,
+ sizeof(struct slimpro_resp_msg),
+ &ctx->kfifo_lock);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * This function is called when the SLIMpro Mailbox received a message
+ */
+static void xgene_hwmon_rx_cb(struct mbox_client *cl, void *msg)
+{
+ struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
+
+ /*
+ * While the driver registers with the mailbox framework, an interrupt
+ * can be pending before the probe function completes its
+ * initialization. If such condition occurs, just queue up the message
+ * as the driver is not ready for servicing the callback.
+ */
+ if (xgene_hwmon_rx_ready(ctx, msg) < 0)
+ return;
+
+ /*
+ * Response message format:
+ * msg[0] is the return code of the operation
+ * msg[1] is the first parameter word
+ * msg[2] is the second parameter word
+ *
+ * As message only supports dword size, just assign it.
+ */
+
+ /* Check for sync query */
+ if (ctx->resp_pending &&
+ ((MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_ERR) ||
+ (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_DBG &&
+ MSG_SUBTYPE(((u32 *)msg)[0]) == DBG_SUBTYPE_SENSOR_READ) ||
+ (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_PWRMGMT &&
+ MSG_SUBTYPE(((u32 *)msg)[0]) == PWRMGMT_SUBTYPE_TPC &&
+ TPC_CMD(((u32 *)msg)[0]) == TPC_ALARM))) {
+ ctx->sync_msg.msg = ((u32 *)msg)[0];
+ ctx->sync_msg.param1 = ((u32 *)msg)[1];
+ ctx->sync_msg.param2 = ((u32 *)msg)[2];
+
+ /* Operation waiting for response */
+ complete(&ctx->rd_complete);
+
+ return;
+ }
+
+ /* Enqueue to the FIFO */
+ kfifo_in_spinlocked(&ctx->async_msg_fifo, msg,
+ sizeof(struct slimpro_resp_msg), &ctx->kfifo_lock);
+ /* Schedule the bottom handler */
+ schedule_work(&ctx->workq);
+}
+
+/*
+ * This function is called when the PCC Mailbox received a message
+ */
+static void xgene_hwmon_pcc_rx_cb(struct mbox_client *cl, void *msg)
+{
+ struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
+ struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct slimpro_resp_msg amsg;
+
+ /*
+ * While the driver registers with the mailbox framework, an interrupt
+ * can be pending before the probe function completes its
+ * initialization. If such condition occurs, just queue up the message
+ * as the driver is not ready for servicing the callback.
+ */
+ if (xgene_hwmon_rx_ready(ctx, &amsg) < 0)
+ return;
+
+ msg = generic_comm_base + 1;
+ /* Check if platform sends interrupt */
+ if (!xgene_word_tst_and_clr(&generic_comm_base->status,
+ PCCS_SCI_DOORBEL))
+ return;
+
+ /*
+ * Response message format:
+ * msg[0] is the return code of the operation
+ * msg[1] is the first parameter word
+ * msg[2] is the second parameter word
+ *
+ * As message only supports dword size, just assign it.
+ */
+
+ /* Check for sync query */
+ if (ctx->resp_pending &&
+ ((MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_ERR) ||
+ (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_DBG &&
+ MSG_SUBTYPE(((u32 *)msg)[0]) == DBG_SUBTYPE_SENSOR_READ) ||
+ (MSG_TYPE(((u32 *)msg)[0]) == MSG_TYPE_PWRMGMT &&
+ MSG_SUBTYPE(((u32 *)msg)[0]) == PWRMGMT_SUBTYPE_TPC &&
+ TPC_CMD(((u32 *)msg)[0]) == TPC_ALARM))) {
+ /* Check if platform completes command */
+ if (xgene_word_tst_and_clr(&generic_comm_base->status,
+ PCCS_CMD_COMPLETE)) {
+ ctx->sync_msg.msg = ((u32 *)msg)[0];
+ ctx->sync_msg.param1 = ((u32 *)msg)[1];
+ ctx->sync_msg.param2 = ((u32 *)msg)[2];
+
+ /* Operation waiting for response */
+ complete(&ctx->rd_complete);
+
+ return;
+ }
+ }
+
+ /*
+ * Platform notifies interrupt to OSPM.
+ * OPSM schedules a consumer command to get this information
+ * in a workqueue. Platform must wait until OSPM has issued
+ * a consumer command that serves this notification.
+ */
+
+ /* Enqueue to the FIFO */
+ kfifo_in_spinlocked(&ctx->async_msg_fifo, &amsg,
+ sizeof(struct slimpro_resp_msg), &ctx->kfifo_lock);
+ /* Schedule the bottom handler */
+ schedule_work(&ctx->workq);
+}
+
+static void xgene_hwmon_tx_done(struct mbox_client *cl, void *msg, int ret)
+{
+ if (ret) {
+ dev_dbg(cl->dev, "TX did not complete: CMD sent:%x, ret:%d\n",
+ *(u16 *)msg, ret);
+ } else {
+ dev_dbg(cl->dev, "TX completed. CMD sent:%x, ret:%d\n",
+ *(u16 *)msg, ret);
+ }
+}
+
+static int xgene_hwmon_probe(struct platform_device *pdev)
+{
+ struct xgene_hwmon_dev *ctx;
+ struct mbox_client *cl;
+ int rc;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ctx);
+ cl = &ctx->mbox_client;
+
+ spin_lock_init(&ctx->kfifo_lock);
+ mutex_init(&ctx->rd_mutex);
+
+ rc = kfifo_alloc(&ctx->async_msg_fifo,
+ sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
+ GFP_KERNEL);
+ if (rc)
+ goto out_mbox_free;
+
+ INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
+
+ /* Request mailbox channel */
+ cl->dev = &pdev->dev;
+ cl->tx_done = xgene_hwmon_tx_done;
+ cl->tx_block = false;
+ cl->tx_tout = MBOX_OP_TIMEOUTMS;
+ cl->knows_txdone = false;
+ if (acpi_disabled) {
+ cl->rx_callback = xgene_hwmon_rx_cb;
+ ctx->mbox_chan = mbox_request_channel(cl, 0);
+ if (IS_ERR(ctx->mbox_chan)) {
+ dev_err(&pdev->dev,
+ "SLIMpro mailbox channel request failed\n");
+ return -ENODEV;
+ }
+ } else {
+ struct acpi_pcct_hw_reduced *cppc_ss;
+
+ if (device_property_read_u32(&pdev->dev, "pcc-channel",
+ &ctx->mbox_idx)) {
+ dev_err(&pdev->dev, "no pcc-channel property\n");
+ return -ENODEV;
+ }
+
+ cl->rx_callback = xgene_hwmon_pcc_rx_cb;
+ ctx->mbox_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
+ if (IS_ERR(ctx->mbox_chan)) {
+ dev_err(&pdev->dev,
+ "PPC channel request failed\n");
+ return -ENODEV;
+ }
+
+ /*
+ * The PCC mailbox controller driver should
+ * have parsed the PCCT (global table of all
+ * PCC channels) and stored pointers to the
+ * subspace communication region in con_priv.
+ */
+ cppc_ss = ctx->mbox_chan->con_priv;
+ if (!cppc_ss) {
+ dev_err(&pdev->dev, "PPC subspace not found\n");
+ rc = -ENODEV;
+ goto out_mbox_free;
+ }
+
+ if (!ctx->mbox_chan->mbox->txdone_irq) {
+ dev_err(&pdev->dev, "PCC IRQ not supported\n");
+ rc = -ENODEV;
+ goto out_mbox_free;
+ }
+
+ /*
+ * This is the shared communication region
+ * for the OS and Platform to communicate over.
+ */
+ ctx->comm_base_addr = cppc_ss->base_address;
+ if (ctx->comm_base_addr) {
+ ctx->pcc_comm_addr = memremap(ctx->comm_base_addr,
+ cppc_ss->length,
+ MEMREMAP_WB);
+ } else {
+ dev_err(&pdev->dev, "Failed to get PCC comm region\n");
+ rc = -ENODEV;
+ goto out_mbox_free;
+ }
+
+ if (!ctx->pcc_comm_addr) {
+ dev_err(&pdev->dev,
+ "Failed to ioremap PCC comm region\n");
+ rc = -ENOMEM;
+ goto out_mbox_free;
+ }
+
+ /*
+ * cppc_ss->latency is just a Nominal value. In reality
+ * the remote processor could be much slower to reply.
+ * So add an arbitrary amount of wait on top of Nominal.
+ */
+ ctx->usecs_lat = PCC_NUM_RETRIES * cppc_ss->latency;
+ }
+
+ ctx->hwmon_dev = hwmon_device_register_with_groups(ctx->dev,
+ "apm_xgene",
+ ctx,
+ xgene_hwmon_groups);
+ if (IS_ERR(ctx->hwmon_dev)) {
+ dev_err(&pdev->dev, "Failed to register HW monitor device\n");
+ rc = PTR_ERR(ctx->hwmon_dev);
+ goto out;
+ }
+
+ /*
+ * Schedule the bottom handler if there is a pending message.
+ */
+ schedule_work(&ctx->workq);
+
+ dev_info(&pdev->dev, "APM X-Gene SoC HW monitor driver registered\n");
+
+ return 0;
+
+out:
+ if (acpi_disabled)
+ mbox_free_channel(ctx->mbox_chan);
+ else
+ pcc_mbox_free_channel(ctx->mbox_chan);
+out_mbox_free:
+ kfifo_free(&ctx->async_msg_fifo);
+
+ return rc;
+}
+
+static int xgene_hwmon_remove(struct platform_device *pdev)
+{
+ struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(ctx->hwmon_dev);
+ kfifo_free(&ctx->async_msg_fifo);
+ if (acpi_disabled)
+ mbox_free_channel(ctx->mbox_chan);
+ else
+ pcc_mbox_free_channel(ctx->mbox_chan);
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_hwmon_acpi_match[] = {
+ {"APMC0D29", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match);
+#endif
+
+static const struct of_device_id xgene_hwmon_of_match[] = {
+ {.compatible = "apm,xgene-slimpro-hwmon"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, xgene_hwmon_of_match);
+
+static struct platform_driver xgene_hwmon_driver __refdata = {
+ .probe = xgene_hwmon_probe,
+ .remove = xgene_hwmon_remove,
+ .driver = {
+ .name = "xgene-slimpro-hwmon",
+ .of_match_table = xgene_hwmon_of_match,
+ .acpi_match_table = ACPI_PTR(xgene_hwmon_acpi_match),
+ },
+};
+module_platform_driver(xgene_hwmon_driver);
+
+MODULE_DESCRIPTION("APM X-Gene SoC hardware monitor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 4d20b0be0c0b..d7325c6534ad 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -184,8 +184,7 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
dev_err(drvdata->dev,
- "timeout observed when probing at offset %#x\n",
- ETB_FFCR);
+ "timeout while waiting for completion of Manual Flush\n");
}
/* disable trace capture */
@@ -193,8 +192,7 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
dev_err(drvdata->dev,
- "timeout observed when probing at offset %#x\n",
- ETB_FFCR);
+ "timeout while waiting for Formatter to Stop\n");
}
CS_LOCK(drvdata->base);
@@ -561,7 +559,7 @@ static const struct file_operations etb_fops = {
};
#define coresight_etb10_simple_func(name, offset) \
- coresight_simple_func(struct etb_drvdata, name, offset)
+ coresight_simple_func(struct etb_drvdata, NULL, name, offset)
coresight_etb10_simple_func(rdp, ETB_RAM_DEPTH_REG);
coresight_etb10_simple_func(sts, ETB_STATUS_REG);
@@ -638,7 +636,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
struct coresight_platform_data *pdata = NULL;
struct etb_drvdata *drvdata;
struct resource *res = &adev->res;
- struct coresight_desc *desc;
+ struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
if (np) {
@@ -684,17 +682,13 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
}
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- desc->type = CORESIGHT_DEV_TYPE_SINK;
- desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
- desc->ops = &etb_cs_ops;
- desc->pdata = pdata;
- desc->dev = dev;
- desc->groups = coresight_etb_groups;
- drvdata->csdev = coresight_register(desc);
+ desc.type = CORESIGHT_DEV_TYPE_SINK;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ desc.ops = &etb_cs_ops;
+ desc.pdata = pdata;
+ desc.dev = dev;
+ desc.groups = coresight_etb_groups;
+ drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 755125f7917f..2cd7c718198a 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+#include "coresight-etm-perf.h"
#include "coresight-priv.h"
static struct pmu etm_pmu;
@@ -71,14 +72,48 @@ static const struct attribute_group *etm_pmu_attr_groups[] = {
static void etm_event_read(struct perf_event *event) {}
-static int etm_event_init(struct perf_event *event)
+static int etm_addr_filters_alloc(struct perf_event *event)
{
- if (event->attr.type != etm_pmu.type)
- return -ENOENT;
+ struct etm_filters *filters;
+ int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
+
+ filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
+ if (!filters)
+ return -ENOMEM;
+
+ if (event->parent)
+ memcpy(filters, event->parent->hw.addr_filters,
+ sizeof(*filters));
+
+ event->hw.addr_filters = filters;
return 0;
}
+static void etm_event_destroy(struct perf_event *event)
+{
+ kfree(event->hw.addr_filters);
+ event->hw.addr_filters = NULL;
+}
+
+static int etm_event_init(struct perf_event *event)
+{
+ int ret = 0;
+
+ if (event->attr.type != etm_pmu.type) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = etm_addr_filters_alloc(event);
+ if (ret)
+ goto out;
+
+ event->destroy = etm_event_destroy;
+out:
+ return ret;
+}
+
static void free_event_data(struct work_struct *work)
{
int cpu;
@@ -100,7 +135,7 @@ static void free_event_data(struct work_struct *work)
}
for_each_cpu(cpu, mask) {
- if (event_data->path[cpu])
+ if (!(IS_ERR_OR_NULL(event_data->path[cpu])))
coresight_release_path(event_data->path[cpu]);
}
@@ -185,7 +220,7 @@ static void *etm_setup_aux(int event_cpu, void **pages,
* referenced later when the path is actually needed.
*/
event_data->path[cpu] = coresight_build_path(csdev);
- if (!event_data->path[cpu])
+ if (IS_ERR(event_data->path[cpu]))
goto err;
}
@@ -258,7 +293,7 @@ static void etm_event_start(struct perf_event *event, int flags)
event->hw.state = 0;
/* Finally enable the tracer */
- if (source_ops(csdev)->enable(csdev, &event->attr, CS_MODE_PERF))
+ if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
goto fail_end_stop;
out:
@@ -291,7 +326,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
return;
/* stop tracer */
- source_ops(csdev)->disable(csdev);
+ source_ops(csdev)->disable(csdev, event);
/* tell the core */
event->hw.state = PERF_HES_STOPPED;
@@ -342,6 +377,87 @@ static void etm_event_del(struct perf_event *event, int mode)
etm_event_stop(event, PERF_EF_UPDATE);
}
+static int etm_addr_filters_validate(struct list_head *filters)
+{
+ bool range = false, address = false;
+ int index = 0;
+ struct perf_addr_filter *filter;
+
+ list_for_each_entry(filter, filters, entry) {
+ /*
+ * No need to go further if there's no more
+ * room for filters.
+ */
+ if (++index > ETM_ADDR_CMP_MAX)
+ return -EOPNOTSUPP;
+
+ /*
+ * As taken from the struct perf_addr_filter documentation:
+ * @range: 1: range, 0: address
+ *
+ * At this time we don't allow range and start/stop filtering
+ * to cohabitate, they have to be mutually exclusive.
+ */
+ if ((filter->range == 1) && address)
+ return -EOPNOTSUPP;
+
+ if ((filter->range == 0) && range)
+ return -EOPNOTSUPP;
+
+ /*
+ * For range filtering, the second address in the address
+ * range comparator needs to be higher than the first.
+ * Invalid otherwise.
+ */
+ if (filter->range && filter->size == 0)
+ return -EINVAL;
+
+ /*
+ * Everything checks out with this filter, record what we've
+ * received before moving on to the next one.
+ */
+ if (filter->range)
+ range = true;
+ else
+ address = true;
+ }
+
+ return 0;
+}
+
+static void etm_addr_filters_sync(struct perf_event *event)
+{
+ struct perf_addr_filters_head *head = perf_event_addr_filters(event);
+ unsigned long start, stop, *offs = event->addr_filters_offs;
+ struct etm_filters *filters = event->hw.addr_filters;
+ struct etm_filter *etm_filter;
+ struct perf_addr_filter *filter;
+ int i = 0;
+
+ list_for_each_entry(filter, &head->list, entry) {
+ start = filter->offset + offs[i];
+ stop = start + filter->size;
+ etm_filter = &filters->etm_filter[i];
+
+ if (filter->range == 1) {
+ etm_filter->start_addr = start;
+ etm_filter->stop_addr = stop;
+ etm_filter->type = ETM_ADDR_TYPE_RANGE;
+ } else {
+ if (filter->filter == 1) {
+ etm_filter->start_addr = start;
+ etm_filter->type = ETM_ADDR_TYPE_START;
+ } else {
+ etm_filter->stop_addr = stop;
+ etm_filter->type = ETM_ADDR_TYPE_STOP;
+ }
+ }
+ i++;
+ }
+
+ filters->nr_filters = i;
+}
+
int etm_perf_symlink(struct coresight_device *csdev, bool link)
{
char entry[sizeof("cpu9999999")];
@@ -371,18 +487,21 @@ static int __init etm_perf_init(void)
{
int ret;
- etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE;
-
- etm_pmu.attr_groups = etm_pmu_attr_groups;
- etm_pmu.task_ctx_nr = perf_sw_context;
- etm_pmu.read = etm_event_read;
- etm_pmu.event_init = etm_event_init;
- etm_pmu.setup_aux = etm_setup_aux;
- etm_pmu.free_aux = etm_free_aux;
- etm_pmu.start = etm_event_start;
- etm_pmu.stop = etm_event_stop;
- etm_pmu.add = etm_event_add;
- etm_pmu.del = etm_event_del;
+ etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE;
+
+ etm_pmu.attr_groups = etm_pmu_attr_groups;
+ etm_pmu.task_ctx_nr = perf_sw_context;
+ etm_pmu.read = etm_event_read;
+ etm_pmu.event_init = etm_event_init;
+ etm_pmu.setup_aux = etm_setup_aux;
+ etm_pmu.free_aux = etm_free_aux;
+ etm_pmu.start = etm_event_start;
+ etm_pmu.stop = etm_event_stop;
+ etm_pmu.add = etm_event_add;
+ etm_pmu.del = etm_event_del;
+ etm_pmu.addr_filters_sync = etm_addr_filters_sync;
+ etm_pmu.addr_filters_validate = etm_addr_filters_validate;
+ etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
if (ret == 0)
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index 87f5a134eb6f..3ffc9feb2d64 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -18,8 +18,42 @@
#ifndef _CORESIGHT_ETM_PERF_H
#define _CORESIGHT_ETM_PERF_H
+#include "coresight-priv.h"
+
struct coresight_device;
+/*
+ * In both ETMv3 and v4 the maximum number of address comparator implentable
+ * is 8. The actual number is implementation specific and will be checked
+ * when filters are applied.
+ */
+#define ETM_ADDR_CMP_MAX 8
+
+/**
+ * struct etm_filter - single instruction range or start/stop configuration.
+ * @start_addr: The address to start tracing on.
+ * @stop_addr: The address to stop tracing on.
+ * @type: Is this a range or start/stop filter.
+ */
+struct etm_filter {
+ unsigned long start_addr;
+ unsigned long stop_addr;
+ enum etm_addr_type type;
+};
+
+/**
+ * struct etm_filters - set of filters for a session
+ * @etm_filter: All the filters for this session.
+ * @nr_filters: Number of filters
+ * @ssstatus: Status of the start/stop logic.
+ */
+struct etm_filters {
+ struct etm_filter etm_filter[ETM_ADDR_CMP_MAX];
+ unsigned int nr_filters;
+ bool ssstatus;
+};
+
+
#ifdef CONFIG_CORESIGHT
int etm_perf_symlink(struct coresight_device *csdev, bool link);
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 51597cb2c08a..4a18ee499965 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -259,14 +259,6 @@ struct etm_drvdata {
struct etm_config config;
};
-enum etm_addr_type {
- ETM_ADDR_TYPE_NONE,
- ETM_ADDR_TYPE_SINGLE,
- ETM_ADDR_TYPE_RANGE,
- ETM_ADDR_TYPE_START,
- ETM_ADDR_TYPE_STOP,
-};
-
static inline void etm_writel(struct etm_drvdata *drvdata,
u32 val, u32 off)
{
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 02d4b629891f..e9b071953f80 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -18,6 +18,7 @@
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm.h"
+#include "coresight-priv.h"
static ssize_t nr_addr_cmp_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1222,7 +1223,7 @@ static struct attribute *coresight_etm_attrs[] = {
};
#define coresight_etm3x_simple_func(name, offset) \
- coresight_simple_func(struct etm_drvdata, name, offset)
+ coresight_simple_func(struct etm_drvdata, NULL, name, offset)
coresight_etm3x_simple_func(etmccr, ETMCCR);
coresight_etm3x_simple_func(etmccer, ETMCCER);
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 2de4cad9c5ed..3fe368b23d15 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -311,9 +311,10 @@ void etm_config_trace_mode(struct etm_config *config)
#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN)
static int etm_parse_event_config(struct etm_drvdata *drvdata,
- struct perf_event_attr *attr)
+ struct perf_event *event)
{
struct etm_config *config = &drvdata->config;
+ struct perf_event_attr *attr = &event->attr;
if (!attr)
return -EINVAL;
@@ -459,7 +460,7 @@ static int etm_trace_id(struct coresight_device *csdev)
}
static int etm_enable_perf(struct coresight_device *csdev,
- struct perf_event_attr *attr)
+ struct perf_event *event)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -467,7 +468,7 @@ static int etm_enable_perf(struct coresight_device *csdev,
return -EINVAL;
/* Configure the tracer based on the session's specifics */
- etm_parse_event_config(drvdata, attr);
+ etm_parse_event_config(drvdata, event);
/* And enable it */
etm_enable_hw(drvdata);
@@ -504,7 +505,7 @@ err:
}
static int etm_enable(struct coresight_device *csdev,
- struct perf_event_attr *attr, u32 mode)
+ struct perf_event *event, u32 mode)
{
int ret;
u32 val;
@@ -521,7 +522,7 @@ static int etm_enable(struct coresight_device *csdev,
ret = etm_enable_sysfs(csdev);
break;
case CS_MODE_PERF:
- ret = etm_enable_perf(csdev, attr);
+ ret = etm_enable_perf(csdev, event);
break;
default:
ret = -EINVAL;
@@ -601,7 +602,8 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
dev_info(drvdata->dev, "ETM tracing disabled\n");
}
-static void etm_disable(struct coresight_device *csdev)
+static void etm_disable(struct coresight_device *csdev,
+ struct perf_event *event)
{
u32 mode;
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -756,13 +758,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
struct coresight_platform_data *pdata = NULL;
struct etm_drvdata *drvdata;
struct resource *res = &adev->res;
- struct coresight_desc *desc;
+ struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
@@ -825,13 +823,13 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
etm_init_trace_id(drvdata);
etm_set_default(&drvdata->config);
- desc->type = CORESIGHT_DEV_TYPE_SOURCE;
- desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
- desc->ops = &etm_cs_ops;
- desc->pdata = pdata;
- desc->dev = dev;
- desc->groups = coresight_etm_groups;
- drvdata->csdev = coresight_register(desc);
+ desc.type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+ desc.ops = &etm_cs_ops;
+ desc.pdata = pdata;
+ desc.dev = dev;
+ desc.groups = coresight_etm_groups;
+ drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto err_arch_supported;
@@ -893,6 +891,11 @@ static struct amba_id etm_ids[] = {
.mask = 0x0003ffff,
.data = "ETM 3.3",
},
+ { /* ETM 3.5 - Cortex-A5 */
+ .id = 0x0003b955,
+ .mask = 0x0003ffff,
+ .data = "ETM 3.5",
+ },
{ /* ETM 3.5 */
.id = 0x0003b956,
.mask = 0x0003ffff,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 7c84308c5564..b9b1e9c8f4c4 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -18,6 +18,7 @@
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm4x.h"
+#include "coresight-priv.h"
static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
{
@@ -2039,15 +2040,42 @@ static struct attribute *coresight_etmv4_attrs[] = {
NULL,
};
+struct etmv4_reg {
+ void __iomem *addr;
+ u32 data;
+};
+
+static void do_smp_cross_read(void *data)
+{
+ struct etmv4_reg *reg = data;
+
+ reg->data = readl_relaxed(reg->addr);
+}
+
+static u32 etmv4_cross_read(const struct device *dev, u32 offset)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+ struct etmv4_reg reg;
+
+ reg.addr = drvdata->base + offset;
+ /*
+ * smp cross call ensures the CPU will be powered up before
+ * accessing the ETMv4 trace core registers
+ */
+ smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
+ return reg.data;
+}
+
#define coresight_etm4x_simple_func(name, offset) \
- coresight_simple_func(struct etmv4_drvdata, name, offset)
+ coresight_simple_func(struct etmv4_drvdata, NULL, name, offset)
+
+#define coresight_etm4x_cross_read(name, offset) \
+ coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
+ name, offset)
-coresight_etm4x_simple_func(trcoslsr, TRCOSLSR);
coresight_etm4x_simple_func(trcpdcr, TRCPDCR);
coresight_etm4x_simple_func(trcpdsr, TRCPDSR);
coresight_etm4x_simple_func(trclsr, TRCLSR);
-coresight_etm4x_simple_func(trcconfig, TRCCONFIGR);
-coresight_etm4x_simple_func(trctraceid, TRCTRACEIDR);
coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS);
coresight_etm4x_simple_func(trcdevid, TRCDEVID);
coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE);
@@ -2055,6 +2083,9 @@ coresight_etm4x_simple_func(trcpidr0, TRCPIDR0);
coresight_etm4x_simple_func(trcpidr1, TRCPIDR1);
coresight_etm4x_simple_func(trcpidr2, TRCPIDR2);
coresight_etm4x_simple_func(trcpidr3, TRCPIDR3);
+coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
+coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
+coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
static struct attribute *coresight_etmv4_mgmt_attrs[] = {
&dev_attr_trcoslsr.attr,
@@ -2073,19 +2104,19 @@ static struct attribute *coresight_etmv4_mgmt_attrs[] = {
NULL,
};
-coresight_etm4x_simple_func(trcidr0, TRCIDR0);
-coresight_etm4x_simple_func(trcidr1, TRCIDR1);
-coresight_etm4x_simple_func(trcidr2, TRCIDR2);
-coresight_etm4x_simple_func(trcidr3, TRCIDR3);
-coresight_etm4x_simple_func(trcidr4, TRCIDR4);
-coresight_etm4x_simple_func(trcidr5, TRCIDR5);
+coresight_etm4x_cross_read(trcidr0, TRCIDR0);
+coresight_etm4x_cross_read(trcidr1, TRCIDR1);
+coresight_etm4x_cross_read(trcidr2, TRCIDR2);
+coresight_etm4x_cross_read(trcidr3, TRCIDR3);
+coresight_etm4x_cross_read(trcidr4, TRCIDR4);
+coresight_etm4x_cross_read(trcidr5, TRCIDR5);
/* trcidr[6,7] are reserved */
-coresight_etm4x_simple_func(trcidr8, TRCIDR8);
-coresight_etm4x_simple_func(trcidr9, TRCIDR9);
-coresight_etm4x_simple_func(trcidr10, TRCIDR10);
-coresight_etm4x_simple_func(trcidr11, TRCIDR11);
-coresight_etm4x_simple_func(trcidr12, TRCIDR12);
-coresight_etm4x_simple_func(trcidr13, TRCIDR13);
+coresight_etm4x_cross_read(trcidr8, TRCIDR8);
+coresight_etm4x_cross_read(trcidr9, TRCIDR9);
+coresight_etm4x_cross_read(trcidr10, TRCIDR10);
+coresight_etm4x_cross_read(trcidr11, TRCIDR11);
+coresight_etm4x_cross_read(trcidr12, TRCIDR12);
+coresight_etm4x_cross_read(trcidr13, TRCIDR13);
static struct attribute *coresight_etmv4_trcidr_attrs[] = {
&dev_attr_trcidr0.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 1a5e0d14c1dd..4db8d6a4d0cb 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -33,7 +33,6 @@
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/pm_runtime.h>
-#include <linux/perf_event.h>
#include <asm/sections.h>
#include <asm/local.h>
@@ -46,7 +45,9 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
/* The number of ETMv4 currently registered */
static int etm4_count;
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
-static void etm4_set_default(struct etmv4_config *config);
+static void etm4_set_default_config(struct etmv4_config *config);
+static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
+ struct perf_event *event);
static enum cpuhp_state hp_online;
@@ -79,22 +80,8 @@ static int etm4_cpu_id(struct coresight_device *csdev)
static int etm4_trace_id(struct coresight_device *csdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- unsigned long flags;
- int trace_id = -1;
-
- if (!local_read(&drvdata->mode))
- return drvdata->trcid;
-
- spin_lock_irqsave(&drvdata->spinlock, flags);
-
- CS_UNLOCK(drvdata->base);
- trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
- trace_id &= ETM_TRACEID_MASK;
- CS_LOCK(drvdata->base);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- return trace_id;
+ return drvdata->trcid;
}
static void etm4_enable_hw(void *info)
@@ -113,8 +100,7 @@ static void etm4_enable_hw(void *info)
/* wait for TRCSTATR.IDLE to go up */
if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
dev_err(drvdata->dev,
- "timeout observed when probing at offset %#x\n",
- TRCSTATR);
+ "timeout while waiting for Idle Trace Status\n");
writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
@@ -180,14 +166,20 @@ static void etm4_enable_hw(void *info)
writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
+ /*
+ * Request to keep the trace unit powered and also
+ * emulation of powerdown
+ */
+ writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) | TRCPDCR_PU,
+ drvdata->base + TRCPDCR);
+
/* Enable the trace unit */
writel_relaxed(1, drvdata->base + TRCPRGCTLR);
/* wait for TRCSTATR.IDLE to go back down to '0' */
if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
dev_err(drvdata->dev,
- "timeout observed when probing at offset %#x\n",
- TRCSTATR);
+ "timeout while waiting for Idle Trace Status\n");
CS_LOCK(drvdata->base);
@@ -195,12 +187,16 @@ static void etm4_enable_hw(void *info)
}
static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
- struct perf_event_attr *attr)
+ struct perf_event *event)
{
+ int ret = 0;
struct etmv4_config *config = &drvdata->config;
+ struct perf_event_attr *attr = &event->attr;
- if (!attr)
- return -EINVAL;
+ if (!attr) {
+ ret = -EINVAL;
+ goto out;
+ }
/* Clear configuration from previous run */
memset(config, 0, sizeof(struct etmv4_config));
@@ -212,14 +208,12 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
config->mode = ETM_MODE_EXCL_USER;
/* Always start from the default config */
- etm4_set_default(config);
+ etm4_set_default_config(config);
- /*
- * By default the tracers are configured to trace the whole address
- * range. Narrow the field only if requested by user space.
- */
- if (config->mode)
- etm4_config_trace_mode(config);
+ /* Configure filters specified on the perf cmd line, if any. */
+ ret = etm4_set_event_filters(drvdata, event);
+ if (ret)
+ goto out;
/* Go from generic option to ETMv4 specifics */
if (attr->config & BIT(ETM_OPT_CYCACC))
@@ -227,23 +221,30 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
if (attr->config & BIT(ETM_OPT_TS))
config->cfg |= ETMv4_MODE_TIMESTAMP;
- return 0;
+out:
+ return ret;
}
static int etm4_enable_perf(struct coresight_device *csdev,
- struct perf_event_attr *attr)
+ struct perf_event *event)
{
+ int ret = 0;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
- return -EINVAL;
+ if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) {
+ ret = -EINVAL;
+ goto out;
+ }
/* Configure the tracer based on the session's specifics */
- etm4_parse_event_config(drvdata, attr);
+ ret = etm4_parse_event_config(drvdata, event);
+ if (ret)
+ goto out;
/* And enable it */
etm4_enable_hw(drvdata);
- return 0;
+out:
+ return ret;
}
static int etm4_enable_sysfs(struct coresight_device *csdev)
@@ -274,7 +275,7 @@ err:
}
static int etm4_enable(struct coresight_device *csdev,
- struct perf_event_attr *attr, u32 mode)
+ struct perf_event *event, u32 mode)
{
int ret;
u32 val;
@@ -291,7 +292,7 @@ static int etm4_enable(struct coresight_device *csdev,
ret = etm4_enable_sysfs(csdev);
break;
case CS_MODE_PERF:
- ret = etm4_enable_perf(csdev, attr);
+ ret = etm4_enable_perf(csdev, event);
break;
default:
ret = -EINVAL;
@@ -311,6 +312,11 @@ static void etm4_disable_hw(void *info)
CS_UNLOCK(drvdata->base);
+ /* power can be removed from the trace unit now */
+ control = readl_relaxed(drvdata->base + TRCPDCR);
+ control &= ~TRCPDCR_PU;
+ writel_relaxed(control, drvdata->base + TRCPDCR);
+
control = readl_relaxed(drvdata->base + TRCPRGCTLR);
/* EN, bit[0] Trace unit enable bit */
@@ -326,14 +332,28 @@ static void etm4_disable_hw(void *info)
dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
}
-static int etm4_disable_perf(struct coresight_device *csdev)
+static int etm4_disable_perf(struct coresight_device *csdev,
+ struct perf_event *event)
{
+ u32 control;
+ struct etm_filters *filters = event->hw.addr_filters;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
return -EINVAL;
etm4_disable_hw(drvdata);
+
+ /*
+ * Check if the start/stop logic was active when the unit was stopped.
+ * That way we can re-enable the start/stop logic when the process is
+ * scheduled again. Configuration of the start/stop logic happens in
+ * function etm4_set_event_filters().
+ */
+ control = readl_relaxed(drvdata->base + TRCVICTLR);
+ /* TRCVICTLR::SSSTATUS, bit[9] */
+ filters->ssstatus = (control & BIT(9));
+
return 0;
}
@@ -362,7 +382,8 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
dev_info(drvdata->dev, "ETM tracing disabled\n");
}
-static void etm4_disable(struct coresight_device *csdev)
+static void etm4_disable(struct coresight_device *csdev,
+ struct perf_event *event)
{
u32 mode;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -381,7 +402,7 @@ static void etm4_disable(struct coresight_device *csdev)
etm4_disable_sysfs(csdev);
break;
case CS_MODE_PERF:
- etm4_disable_perf(csdev);
+ etm4_disable_perf(csdev, event);
break;
}
@@ -564,21 +585,8 @@ static void etm4_init_arch_data(void *info)
CS_LOCK(drvdata->base);
}
-static void etm4_set_default(struct etmv4_config *config)
+static void etm4_set_default_config(struct etmv4_config *config)
{
- if (WARN_ON_ONCE(!config))
- return;
-
- /*
- * Make default initialisation trace everything
- *
- * Select the "always true" resource selector on the
- * "Enablign Event" line and configure address range comparator
- * '0' to trace all the possible address range. From there
- * configure the "include/exclude" engine to include address
- * range comparator '0'.
- */
-
/* disable all events tracing */
config->eventctrl0 = 0x0;
config->eventctrl1 = 0x0;
@@ -594,6 +602,108 @@ static void etm4_set_default(struct etmv4_config *config)
/* TRCVICTLR::EVENT = 0x01, select the always on logic */
config->vinst_ctrl |= BIT(0);
+}
+
+static u64 etm4_get_access_type(struct etmv4_config *config)
+{
+ u64 access_type = 0;
+
+ /*
+ * EXLEVEL_NS, bits[15:12]
+ * The Exception levels are:
+ * Bit[12] Exception level 0 - Application
+ * Bit[13] Exception level 1 - OS
+ * Bit[14] Exception level 2 - Hypervisor
+ * Bit[15] Never implemented
+ *
+ * Always stay away from hypervisor mode.
+ */
+ access_type = ETM_EXLEVEL_NS_HYP;
+
+ if (config->mode & ETM_MODE_EXCL_KERN)
+ access_type |= ETM_EXLEVEL_NS_OS;
+
+ if (config->mode & ETM_MODE_EXCL_USER)
+ access_type |= ETM_EXLEVEL_NS_APP;
+
+ /*
+ * EXLEVEL_S, bits[11:8], don't trace anything happening
+ * in secure state.
+ */
+ access_type |= (ETM_EXLEVEL_S_APP |
+ ETM_EXLEVEL_S_OS |
+ ETM_EXLEVEL_S_HYP);
+
+ return access_type;
+}
+
+static void etm4_set_comparator_filter(struct etmv4_config *config,
+ u64 start, u64 stop, int comparator)
+{
+ u64 access_type = etm4_get_access_type(config);
+
+ /* First half of default address comparator */
+ config->addr_val[comparator] = start;
+ config->addr_acc[comparator] = access_type;
+ config->addr_type[comparator] = ETM_ADDR_TYPE_RANGE;
+
+ /* Second half of default address comparator */
+ config->addr_val[comparator + 1] = stop;
+ config->addr_acc[comparator + 1] = access_type;
+ config->addr_type[comparator + 1] = ETM_ADDR_TYPE_RANGE;
+
+ /*
+ * Configure the ViewInst function to include this address range
+ * comparator.
+ *
+ * @comparator is divided by two since it is the index in the
+ * etmv4_config::addr_val array but register TRCVIIECTLR deals with
+ * address range comparator _pairs_.
+ *
+ * Therefore:
+ * index 0 -> compatator pair 0
+ * index 2 -> comparator pair 1
+ * index 4 -> comparator pair 2
+ * ...
+ * index 14 -> comparator pair 7
+ */
+ config->viiectlr |= BIT(comparator / 2);
+}
+
+static void etm4_set_start_stop_filter(struct etmv4_config *config,
+ u64 address, int comparator,
+ enum etm_addr_type type)
+{
+ int shift;
+ u64 access_type = etm4_get_access_type(config);
+
+ /* Configure the comparator */
+ config->addr_val[comparator] = address;
+ config->addr_acc[comparator] = access_type;
+ config->addr_type[comparator] = type;
+
+ /*
+ * Configure ViewInst Start-Stop control register.
+ * Addresses configured to start tracing go from bit 0 to n-1,
+ * while those configured to stop tracing from 16 to 16 + n-1.
+ */
+ shift = (type == ETM_ADDR_TYPE_START ? 0 : 16);
+ config->vissctlr |= BIT(shift + comparator);
+}
+
+static void etm4_set_default_filter(struct etmv4_config *config)
+{
+ u64 start, stop;
+
+ /*
+ * Configure address range comparator '0' to encompass all
+ * possible addresses.
+ */
+ start = 0x0;
+ stop = ~0x0;
+
+ etm4_set_comparator_filter(config, start, stop,
+ ETM_DEFAULT_ADDR_COMP);
/*
* TRCVICTLR::SSSTATUS == 1, the start-stop logic is
@@ -601,43 +711,156 @@ static void etm4_set_default(struct etmv4_config *config)
*/
config->vinst_ctrl |= BIT(9);
+ /* No start-stop filtering for ViewInst */
+ config->vissctlr = 0x0;
+}
+
+static void etm4_set_default(struct etmv4_config *config)
+{
+ if (WARN_ON_ONCE(!config))
+ return;
+
/*
- * Configure address range comparator '0' to encompass all
- * possible addresses.
+ * Make default initialisation trace everything
+ *
+ * Select the "always true" resource selector on the
+ * "Enablign Event" line and configure address range comparator
+ * '0' to trace all the possible address range. From there
+ * configure the "include/exclude" engine to include address
+ * range comparator '0'.
*/
+ etm4_set_default_config(config);
+ etm4_set_default_filter(config);
+}
- /* First half of default address comparator: start at address 0 */
- config->addr_val[ETM_DEFAULT_ADDR_COMP] = 0x0;
- /* trace instruction addresses */
- config->addr_acc[ETM_DEFAULT_ADDR_COMP] &= ~(BIT(0) | BIT(1));
- /* EXLEVEL_NS, bits[12:15], only trace application and kernel space */
- config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= ETM_EXLEVEL_NS_HYP;
- /* EXLEVEL_S, bits[11:8], don't trace anything in secure state */
- config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= (ETM_EXLEVEL_S_APP |
- ETM_EXLEVEL_S_OS |
- ETM_EXLEVEL_S_HYP);
- config->addr_type[ETM_DEFAULT_ADDR_COMP] = ETM_ADDR_TYPE_RANGE;
+static int etm4_get_next_comparator(struct etmv4_drvdata *drvdata, u32 type)
+{
+ int nr_comparator, index = 0;
+ struct etmv4_config *config = &drvdata->config;
/*
- * Second half of default address comparator: go all
- * the way to the top.
- */
- config->addr_val[ETM_DEFAULT_ADDR_COMP + 1] = ~0x0;
- /* trace instruction addresses */
- config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] &= ~(BIT(0) | BIT(1));
- /* Address comparator type must be equal for both halves */
- config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] =
- config->addr_acc[ETM_DEFAULT_ADDR_COMP];
- config->addr_type[ETM_DEFAULT_ADDR_COMP + 1] = ETM_ADDR_TYPE_RANGE;
+ * nr_addr_cmp holds the number of comparator _pair_, so time 2
+ * for the total number of comparators.
+ */
+ nr_comparator = drvdata->nr_addr_cmp * 2;
+
+ /* Go through the tally of comparators looking for a free one. */
+ while (index < nr_comparator) {
+ switch (type) {
+ case ETM_ADDR_TYPE_RANGE:
+ if (config->addr_type[index] == ETM_ADDR_TYPE_NONE &&
+ config->addr_type[index + 1] == ETM_ADDR_TYPE_NONE)
+ return index;
+
+ /* Address range comparators go in pairs */
+ index += 2;
+ break;
+ case ETM_ADDR_TYPE_START:
+ case ETM_ADDR_TYPE_STOP:
+ if (config->addr_type[index] == ETM_ADDR_TYPE_NONE)
+ return index;
+
+ /* Start/stop address can have odd indexes */
+ index += 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* If we are here all the comparators have been used. */
+ return -ENOSPC;
+}
+
+static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
+ struct perf_event *event)
+{
+ int i, comparator, ret = 0;
+ u64 address;
+ struct etmv4_config *config = &drvdata->config;
+ struct etm_filters *filters = event->hw.addr_filters;
+
+ if (!filters)
+ goto default_filter;
+
+ /* Sync events with what Perf got */
+ perf_event_addr_filters_sync(event);
/*
- * Configure the ViewInst function to filter on address range
- * comparator '0'.
+ * If there are no filters to deal with simply go ahead with
+ * the default filter, i.e the entire address range.
*/
- config->viiectlr = BIT(0);
+ if (!filters->nr_filters)
+ goto default_filter;
+
+ for (i = 0; i < filters->nr_filters; i++) {
+ struct etm_filter *filter = &filters->etm_filter[i];
+ enum etm_addr_type type = filter->type;
+
+ /* See if a comparator is free. */
+ comparator = etm4_get_next_comparator(drvdata, type);
+ if (comparator < 0) {
+ ret = comparator;
+ goto out;
+ }
+
+ switch (type) {
+ case ETM_ADDR_TYPE_RANGE:
+ etm4_set_comparator_filter(config,
+ filter->start_addr,
+ filter->stop_addr,
+ comparator);
+ /*
+ * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
+ * in the started state
+ */
+ config->vinst_ctrl |= BIT(9);
+
+ /* No start-stop filtering for ViewInst */
+ config->vissctlr = 0x0;
+ break;
+ case ETM_ADDR_TYPE_START:
+ case ETM_ADDR_TYPE_STOP:
+ /* Get the right start or stop address */
+ address = (type == ETM_ADDR_TYPE_START ?
+ filter->start_addr :
+ filter->stop_addr);
+
+ /* Configure comparator */
+ etm4_set_start_stop_filter(config, address,
+ comparator, type);
+
+ /*
+ * If filters::ssstatus == 1, trace acquisition was
+ * started but the process was yanked away before the
+ * the stop address was hit. As such the start/stop
+ * logic needs to be re-started so that tracing can
+ * resume where it left.
+ *
+ * The start/stop logic status when a process is
+ * scheduled out is checked in function
+ * etm4_disable_perf().
+ */
+ if (filters->ssstatus)
+ config->vinst_ctrl |= BIT(9);
+
+ /* No include/exclude filtering for ViewInst */
+ config->viiectlr = 0x0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ }
- /* no start-stop filtering for ViewInst */
- config->vissctlr = 0x0;
+ goto out;
+
+
+default_filter:
+ etm4_set_default_filter(config);
+
+out:
+ return ret;
}
void etm4_config_trace_mode(struct etmv4_config *config)
@@ -727,13 +950,9 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
struct coresight_platform_data *pdata = NULL;
struct etmv4_drvdata *drvdata;
struct resource *res = &adev->res;
- struct coresight_desc *desc;
+ struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
@@ -788,13 +1007,13 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
etm4_init_trace_id(drvdata);
etm4_set_default(&drvdata->config);
- desc->type = CORESIGHT_DEV_TYPE_SOURCE;
- desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
- desc->ops = &etm4_cs_ops;
- desc->pdata = pdata;
- desc->dev = dev;
- desc->groups = coresight_etmv4_groups;
- drvdata->csdev = coresight_register(desc);
+ desc.type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+ desc.ops = &etm4_cs_ops;
+ desc.pdata = pdata;
+ desc.dev = dev;
+ desc.groups = coresight_etmv4_groups;
+ drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto err_arch_supported;
@@ -826,12 +1045,12 @@ err_arch_supported:
}
static struct amba_id etm4_ids[] = {
- { /* ETM 4.0 - Qualcomm */
- .id = 0x0003b95d,
- .mask = 0x0003ffff,
+ { /* ETM 4.0 - Cortex-A53 */
+ .id = 0x000bb95d,
+ .mask = 0x000fffff,
.data = "ETM 4.0",
},
- { /* ETM 4.0 - Juno board */
+ { /* ETM 4.0 - Cortex-A57 */
.id = 0x000bb95e,
.mask = 0x000fffff,
.data = "ETM 4.0",
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 5359c5197c1d..ba8d3f86de21 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -183,6 +183,9 @@
#define TRCSTATR_IDLE_BIT 0
#define ETM_DEFAULT_ADDR_COMP 0
+/* PowerDown Control Register bits */
+#define TRCPDCR_PU BIT(3)
+
/* secure state access levels */
#define ETM_EXLEVEL_S_APP BIT(8)
#define ETM_EXLEVEL_S_OS BIT(9)
@@ -407,14 +410,6 @@ enum etm_addr_ctxtype {
ETM_CTX_CTXID_VMID,
};
-enum etm_addr_type {
- ETM_ADDR_TYPE_NONE,
- ETM_ADDR_TYPE_SINGLE,
- ETM_ADDR_TYPE_RANGE,
- ETM_ADDR_TYPE_START,
- ETM_ADDR_TYPE_STOP,
-};
-
extern const struct attribute_group *coresight_etmv4_groups[];
void etm4_config_trace_mode(struct etmv4_config *config);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 05df789056cc..860fe6ef5632 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -176,7 +176,7 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
struct coresight_platform_data *pdata = NULL;
struct funnel_drvdata *drvdata;
struct resource *res = &adev->res;
- struct coresight_desc *desc;
+ struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
if (np) {
@@ -207,17 +207,13 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->base = base;
pm_runtime_put(&adev->dev);
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- desc->type = CORESIGHT_DEV_TYPE_LINK;
- desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
- desc->ops = &funnel_cs_ops;
- desc->pdata = pdata;
- desc->dev = dev;
- desc->groups = coresight_funnel_groups;
- drvdata->csdev = coresight_register(desc);
+ desc.type = CORESIGHT_DEV_TYPE_LINK;
+ desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
+ desc.ops = &funnel_cs_ops;
+ desc.pdata = pdata;
+ desc.dev = dev;
+ desc.groups = coresight_funnel_groups;
+ drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index ad975c58080d..196a14be4b3d 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -16,6 +16,7 @@
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/coresight.h>
+#include <linux/pm_runtime.h>
/*
* Coresight management registers (0xf00-0xfcc)
@@ -37,16 +38,32 @@
#define ETM_MODE_EXCL_KERN BIT(30)
#define ETM_MODE_EXCL_USER BIT(31)
-#define coresight_simple_func(type, name, offset) \
+typedef u32 (*coresight_read_fn)(const struct device *, u32 offset);
+#define coresight_simple_func(type, func, name, offset) \
static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, char *buf) \
{ \
type *drvdata = dev_get_drvdata(_dev->parent); \
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
- readl_relaxed(drvdata->base + offset)); \
+ coresight_read_fn fn = func; \
+ u32 val; \
+ pm_runtime_get_sync(_dev->parent); \
+ if (fn) \
+ val = fn(_dev->parent, offset); \
+ else \
+ val = readl_relaxed(drvdata->base + offset); \
+ pm_runtime_put_sync(_dev->parent); \
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", val); \
} \
static DEVICE_ATTR_RO(name)
+enum etm_addr_type {
+ ETM_ADDR_TYPE_NONE,
+ ETM_ADDR_TYPE_SINGLE,
+ ETM_ADDR_TYPE_RANGE,
+ ETM_ADDR_TYPE_START,
+ ETM_ADDR_TYPE_STOP,
+};
+
enum cs_mode {
CS_MODE_DISABLED,
CS_MODE_SYSFS,
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
index 700f710e4bfa..0a3d15f0b009 100644
--- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c
+++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
@@ -102,7 +102,7 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
struct resource *res = &adev->res;
struct coresight_platform_data *pdata = NULL;
struct replicator_state *drvdata;
- struct coresight_desc *desc;
+ struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
void __iomem *base;
@@ -134,16 +134,12 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
dev_set_drvdata(dev, drvdata);
pm_runtime_put(&adev->dev);
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- desc->type = CORESIGHT_DEV_TYPE_LINK;
- desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
- desc->ops = &replicator_cs_ops;
- desc->pdata = adev->dev.platform_data;
- desc->dev = &adev->dev;
- drvdata->csdev = coresight_register(desc);
+ desc.type = CORESIGHT_DEV_TYPE_LINK;
+ desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
+ desc.ops = &replicator_cs_ops;
+ desc.pdata = adev->dev.platform_data;
+ desc.dev = &adev->dev;
+ drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index c6982e312e15..3756e71cb8f5 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -69,7 +69,7 @@ static int replicator_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct coresight_platform_data *pdata = NULL;
struct replicator_drvdata *drvdata;
- struct coresight_desc *desc;
+ struct coresight_desc desc = { 0 };
struct device_node *np = pdev->dev.of_node;
if (np) {
@@ -95,18 +95,12 @@ static int replicator_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
platform_set_drvdata(pdev, drvdata);
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto out_disable_pm;
- }
-
- desc->type = CORESIGHT_DEV_TYPE_LINK;
- desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
- desc->ops = &replicator_cs_ops;
- desc->pdata = pdev->dev.platform_data;
- desc->dev = &pdev->dev;
- drvdata->csdev = coresight_register(desc);
+ desc.type = CORESIGHT_DEV_TYPE_LINK;
+ desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
+ desc.ops = &replicator_cs_ops;
+ desc.pdata = pdev->dev.platform_data;
+ desc.dev = &pdev->dev;
+ drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto out_disable_pm;
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 73be58a11e4f..49e0f1b925a5 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -105,10 +105,12 @@ module_param_named(
/**
* struct channel_space - central management entity for extended ports
* @base: memory mapped base address where channels start.
+ * @phys: physical base address of channel region.
* @guaraneed: is the channel delivery guaranteed.
*/
struct channel_space {
void __iomem *base;
+ phys_addr_t phys;
unsigned long *guaranteed;
};
@@ -196,7 +198,7 @@ static void stm_enable_hw(struct stm_drvdata *drvdata)
}
static int stm_enable(struct coresight_device *csdev,
- struct perf_event_attr *attr, u32 mode)
+ struct perf_event *event, u32 mode)
{
u32 val;
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -258,7 +260,8 @@ static void stm_disable_hw(struct stm_drvdata *drvdata)
stm_hwevent_disable_hw(drvdata);
}
-static void stm_disable(struct coresight_device *csdev)
+static void stm_disable(struct coresight_device *csdev,
+ struct perf_event *event)
{
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -353,7 +356,24 @@ static void stm_generic_unlink(struct stm_data *stm_data,
if (!drvdata || !drvdata->csdev)
return;
- stm_disable(drvdata->csdev);
+ stm_disable(drvdata->csdev, NULL);
+}
+
+static phys_addr_t
+stm_mmio_addr(struct stm_data *stm_data, unsigned int master,
+ unsigned int channel, unsigned int nr_chans)
+{
+ struct stm_drvdata *drvdata = container_of(stm_data,
+ struct stm_drvdata, stm);
+ phys_addr_t addr;
+
+ addr = drvdata->chs.phys + channel * BYTES_PER_CHANNEL;
+
+ if (offset_in_page(addr) ||
+ offset_in_page(nr_chans * BYTES_PER_CHANNEL))
+ return 0;
+
+ return addr;
}
static long stm_generic_set_options(struct stm_data *stm_data,
@@ -616,7 +636,7 @@ static ssize_t traceid_store(struct device *dev,
static DEVICE_ATTR_RW(traceid);
#define coresight_stm_simple_func(name, offset) \
- coresight_simple_func(struct stm_drvdata, name, offset)
+ coresight_simple_func(struct stm_drvdata, NULL, name, offset)
coresight_stm_simple_func(tcsr, STMTCSR);
coresight_stm_simple_func(tsfreqr, STMTSFREQR);
@@ -761,7 +781,9 @@ static void stm_init_generic_data(struct stm_drvdata *drvdata)
drvdata->stm.sw_end = 1;
drvdata->stm.hw_override = true;
drvdata->stm.sw_nchannels = drvdata->numsp;
+ drvdata->stm.sw_mmiosz = BYTES_PER_CHANNEL;
drvdata->stm.packet = stm_generic_packet;
+ drvdata->stm.mmio_addr = stm_mmio_addr;
drvdata->stm.link = stm_generic_link;
drvdata->stm.unlink = stm_generic_unlink;
drvdata->stm.set_options = stm_generic_set_options;
@@ -778,7 +800,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
struct resource *res = &adev->res;
struct resource ch_res;
size_t res_size, bitmap_size;
- struct coresight_desc *desc;
+ struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
if (np) {
@@ -808,6 +830,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
ret = stm_get_resource_byname(np, "stm-stimulus-base", &ch_res);
if (ret)
return ret;
+ drvdata->chs.phys = ch_res.start;
base = devm_ioremap_resource(dev, &ch_res);
if (IS_ERR(base))
@@ -843,19 +866,13 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
return -EPROBE_DEFER;
}
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto stm_unregister;
- }
-
- desc->type = CORESIGHT_DEV_TYPE_SOURCE;
- desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE;
- desc->ops = &stm_cs_ops;
- desc->pdata = pdata;
- desc->dev = dev;
- desc->groups = coresight_stm_groups;
- drvdata->csdev = coresight_register(desc);
+ desc.type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE;
+ desc.ops = &stm_cs_ops;
+ desc.pdata = pdata;
+ desc.dev = dev;
+ desc.groups = coresight_stm_groups;
+ drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto stm_unregister;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 466af86fd76f..d6941ea24d8d 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -22,7 +22,7 @@
#include "coresight-priv.h"
#include "coresight-tmc.h"
-void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
+static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
@@ -48,6 +48,7 @@ static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
int i;
bufp = drvdata->buf;
+ drvdata->len = 0;
while (1) {
for (i = 0; i < drvdata->memwidth; i++) {
read_data = readl_relaxed(drvdata->base + TMC_RRD);
@@ -55,6 +56,7 @@ static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
return;
memcpy(bufp, &read_data, 4);
bufp += 4;
+ drvdata->len += 4;
}
}
}
@@ -166,7 +168,7 @@ out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free memory outside the spinlock if need be */
- if (!used && buf)
+ if (!used)
kfree(buf);
if (!ret)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 688be9e060fc..886ea83c68e0 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -20,7 +20,7 @@
#include "coresight-priv.h"
#include "coresight-tmc.h"
-void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
+static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl;
@@ -64,11 +64,17 @@ static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
rwp = readl_relaxed(drvdata->base + TMC_RWP);
val = readl_relaxed(drvdata->base + TMC_STS);
- /* How much memory do we still have */
- if (val & BIT(0))
+ /*
+ * Adjust the buffer to point to the beginning of the trace data
+ * and update the available trace data.
+ */
+ if (val & TMC_STS_FULL) {
drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
- else
+ drvdata->len = drvdata->size;
+ } else {
drvdata->buf = drvdata->vaddr;
+ drvdata->len = rwp - drvdata->paddr;
+ }
}
static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 9e02ac963cd0..d8517d2a968c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -38,8 +38,7 @@ void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
if (coresight_timeout(drvdata->base,
TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
dev_err(drvdata->dev,
- "timeout observed when probing at offset %#x\n",
- TMC_STS);
+ "timeout while waiting for TMC to be Ready\n");
}
}
@@ -56,8 +55,7 @@ void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
if (coresight_timeout(drvdata->base,
TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
dev_err(drvdata->dev,
- "timeout observed when probing at offset %#x\n",
- TMC_FFCR);
+ "timeout while waiting for completion of Manual Flush\n");
}
tmc_wait_for_tmcready(drvdata);
@@ -140,8 +138,8 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
struct tmc_drvdata, miscdev);
char *bufp = drvdata->buf + *ppos;
- if (*ppos + len > drvdata->size)
- len = drvdata->size - *ppos;
+ if (*ppos + len > drvdata->len)
+ len = drvdata->len - *ppos;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
if (bufp == (char *)(drvdata->vaddr + drvdata->size))
@@ -160,7 +158,7 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
*ppos += len;
dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
- __func__, len, (int)(drvdata->size - *ppos));
+ __func__, len, (int)(drvdata->len - *ppos));
return len;
}
@@ -220,7 +218,7 @@ static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
}
#define coresight_tmc_simple_func(name, offset) \
- coresight_simple_func(struct tmc_drvdata, name, offset)
+ coresight_simple_func(struct tmc_drvdata, NULL, name, offset)
coresight_tmc_simple_func(rsz, TMC_RSZ);
coresight_tmc_simple_func(sts, TMC_STS);
@@ -249,8 +247,8 @@ static struct attribute *coresight_tmc_mgmt_attrs[] = {
NULL,
};
-ssize_t trigger_cntr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t trigger_cntr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->trigger_cntr;
@@ -304,27 +302,32 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
struct coresight_platform_data *pdata = NULL;
struct tmc_drvdata *drvdata;
struct resource *res = &adev->res;
- struct coresight_desc *desc;
+ struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
if (np) {
pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out;
+ }
adev->dev.platform_data = pdata;
}
+ ret = -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
- return -ENOMEM;
+ goto out;
drvdata->dev = &adev->dev;
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out;
+ }
drvdata->base = base;
@@ -347,33 +350,28 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
pm_runtime_put(&adev->dev);
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto err_devm_kzalloc;
- }
-
- desc->pdata = pdata;
- desc->dev = dev;
- desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
- desc->groups = coresight_tmc_groups;
+ desc.pdata = pdata;
+ desc.dev = dev;
+ desc.groups = coresight_tmc_groups;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
- desc->type = CORESIGHT_DEV_TYPE_SINK;
- desc->ops = &tmc_etb_cs_ops;
+ desc.type = CORESIGHT_DEV_TYPE_SINK;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ desc.ops = &tmc_etb_cs_ops;
} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- desc->type = CORESIGHT_DEV_TYPE_SINK;
- desc->ops = &tmc_etr_cs_ops;
+ desc.type = CORESIGHT_DEV_TYPE_SINK;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ desc.ops = &tmc_etr_cs_ops;
} else {
- desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
- desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
- desc->ops = &tmc_etf_cs_ops;
+ desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
+ desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
+ desc.ops = &tmc_etf_cs_ops;
}
- drvdata->csdev = coresight_register(desc);
+ drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
- goto err_devm_kzalloc;
+ goto out;
}
drvdata->miscdev.name = pdata->name;
@@ -381,16 +379,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->miscdev.fops = &tmc_fops;
ret = misc_register(&drvdata->miscdev);
if (ret)
- goto err_misc_register;
-
- return 0;
-
-err_misc_register:
- coresight_unregister(drvdata->csdev);
-err_devm_kzalloc:
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
- dma_free_coherent(dev, drvdata->size,
- drvdata->vaddr, drvdata->paddr);
+ coresight_unregister(drvdata->csdev);
+out:
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 5c5fe2ad2ca7..44b3ae346118 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -98,7 +98,8 @@ enum tmc_mem_intf_width {
* @buf: area of memory where trace data get sent.
* @paddr: DMA start location in RAM.
* @vaddr: virtual representation of @paddr.
- * @size: @buf size.
+ * @size: trace buffer size.
+ * @len: size of the available trace.
* @mode: how this TMC is being used.
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
@@ -115,6 +116,7 @@ struct tmc_drvdata {
dma_addr_t paddr;
void __iomem *vaddr;
u32 size;
+ u32 len;
local_t mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 4e471e2e9d89..0673baf0f2f5 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -119,7 +119,7 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
struct coresight_platform_data *pdata = NULL;
struct tpiu_drvdata *drvdata;
struct resource *res = &adev->res;
- struct coresight_desc *desc;
+ struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
if (np) {
@@ -154,16 +154,12 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
pm_runtime_put(&adev->dev);
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- desc->type = CORESIGHT_DEV_TYPE_SINK;
- desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PORT;
- desc->ops = &tpiu_cs_ops;
- desc->pdata = pdata;
- desc->dev = dev;
- drvdata->csdev = coresight_register(desc);
+ desc.type = CORESIGHT_DEV_TYPE_SINK;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PORT;
+ desc.ops = &tpiu_cs_ops;
+ desc.pdata = pdata;
+ desc.dev = dev;
+ drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index d08d1ab9bba5..7bf00a0beb6f 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -257,7 +257,7 @@ static void coresight_disable_source(struct coresight_device *csdev)
{
if (atomic_dec_return(csdev->refcnt) == 0) {
if (source_ops(csdev)->disable) {
- source_ops(csdev)->disable(csdev);
+ source_ops(csdev)->disable(csdev, NULL);
csdev->enable = false;
}
}
@@ -429,7 +429,7 @@ struct list_head *coresight_build_path(struct coresight_device *csdev)
path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (!path)
- return NULL;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(path);
@@ -725,7 +725,8 @@ static int coresight_orphan_match(struct device *dev, void *data)
/* We have found at least one orphan connection */
if (conn->child_dev == NULL) {
/* Does it match this newly added device? */
- if (!strcmp(dev_name(&csdev->dev), conn->child_name)) {
+ if (conn->child_name &&
+ !strcmp(dev_name(&csdev->dev), conn->child_name)) {
conn->child_dev = csdev;
} else {
/* This component still has an orphan */
@@ -893,7 +894,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
int nr_refcnts = 1;
atomic_t *refcnts = NULL;
struct coresight_device *csdev;
- struct coresight_connection *conns;
+ struct coresight_connection *conns = NULL;
csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
if (!csdev) {
@@ -921,16 +922,20 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->nr_inport = desc->pdata->nr_inport;
csdev->nr_outport = desc->pdata->nr_outport;
- conns = kcalloc(csdev->nr_outport, sizeof(*conns), GFP_KERNEL);
- if (!conns) {
- ret = -ENOMEM;
- goto err_kzalloc_conns;
- }
- for (i = 0; i < csdev->nr_outport; i++) {
- conns[i].outport = desc->pdata->outports[i];
- conns[i].child_name = desc->pdata->child_names[i];
- conns[i].child_port = desc->pdata->child_ports[i];
+ /* Initialise connections if there is at least one outport */
+ if (csdev->nr_outport) {
+ conns = kcalloc(csdev->nr_outport, sizeof(*conns), GFP_KERNEL);
+ if (!conns) {
+ ret = -ENOMEM;
+ goto err_kzalloc_conns;
+ }
+
+ for (i = 0; i < csdev->nr_outport; i++) {
+ conns[i].outport = desc->pdata->outports[i];
+ conns[i].child_name = desc->pdata->child_names[i];
+ conns[i].child_port = desc->pdata->child_ports[i];
+ }
}
csdev->conns = conns;
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index b68da1888fd5..629e031b7456 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -166,7 +166,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
rdev = of_coresight_get_endpoint_device(rparent);
if (!rdev)
- continue;
+ return ERR_PTR(-EPROBE_DEFER);
pdata->child_names[i] = dev_name(rdev);
pdata->child_ports[i] = rendpoint.id;
@@ -184,6 +184,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
break;
}
}
+ of_node_put(dn);
return pdata;
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 5c3993b26129..6d94e2ec5b4f 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -836,7 +836,7 @@ config I2C_SH7760
config I2C_SH_MOBILE
tristate "SuperH Mobile I2C Controller"
depends on HAS_DMA
- depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
+ depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Renesas SH-Mobile processor.
@@ -956,6 +956,17 @@ config I2C_OCTEON
This driver can also be built as a module. If so, the module
will be called i2c-octeon.
+config I2C_THUNDERX
+ tristate "Cavium ThunderX I2C bus support"
+ depends on 64BIT && PCI && (ARM64 || COMPILE_TEST)
+ select I2C_SMBUS
+ help
+ Say yes if you want to support the I2C serial bus on Cavium
+ ThunderX SOC.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-thunderx.
+
config I2C_XILINX
tristate "Xilinx I2C Controller"
depends on HAS_IOMEM
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 37f2819b4560..29764cc20a44 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -91,7 +91,10 @@ obj-$(CONFIG_I2C_UNIPHIER) += i2c-uniphier.o
obj-$(CONFIG_I2C_UNIPHIER_F) += i2c-uniphier-f.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_WMT) += i2c-wmt.o
+i2c-octeon-objs := i2c-octeon-core.o i2c-octeon-platdrv.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
+i2c-thunderx-objs := i2c-octeon-core.o i2c-thunderx-pcidrv.o
+obj-$(CONFIG_I2C_THUNDERX) += i2c-thunderx.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
obj-$(CONFIG_I2C_XLR) += i2c-xlr.o
obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 6c7113d990f8..274908cd1fde 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -378,11 +378,8 @@ static int amd756_probe(struct pci_dev *pdev, const struct pci_device_id *id)
amd756_ioport);
error = i2c_add_adapter(&amd756_smbus);
- if (error) {
- dev_err(&pdev->dev,
- "Adapter registration failed, module not inserted\n");
+ if (error)
goto out_err;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 1bb97f658b47..0b86c6173e07 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -1122,8 +1122,6 @@ static int at91_twi_probe(struct platform_device *pdev)
rc = i2c_add_numbered_adapter(&dev->adapter);
if (rc) {
- dev_err(dev->dev, "Adapter %s registration failed\n",
- dev->adapter.name);
clk_disable_unprepare(dev->clk);
pm_runtime_disable(dev->dev);
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index c335cc7852f9..4351a9343058 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -545,7 +545,11 @@ static int axxia_i2c_probe(struct platform_device *pdev)
return ret;
}
- clk_prepare_enable(idev->i2c_clk);
+ ret = clk_prepare_enable(idev->i2c_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ return ret;
+ }
i2c_set_adapdata(&idev->adapter, idev);
strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
@@ -560,7 +564,7 @@ static int axxia_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&idev->adapter);
if (ret) {
- dev_err(&pdev->dev, "failed to add adapter\n");
+ clk_disable_unprepare(idev->i2c_clk);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 95f7cac76f89..326b3db02c48 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -488,13 +488,7 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
- ret = i2c_add_adapter(adap);
- if (ret) {
- dev_err(iproc_i2c->device, "failed to add adapter\n");
- return ret;
- }
-
- return 0;
+ return i2c_add_adapter(adap);
}
static int bcm_iproc_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 258cb9a40ab3..4e489a9d16fb 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -858,10 +858,8 @@ static int bcm_kona_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
rc = i2c_add_adapter(adap);
- if (rc) {
- dev_err(dev->device, "failed to add adapter\n");
+ if (rc)
return rc;
- }
dev_info(dev->device, "device registered successfully\n");
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 025686d41640..29d00c4f7824 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -685,10 +685,8 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA);
rc = i2c_add_numbered_adapter(p_adap);
- if (rc < 0) {
- dev_err(&pdev->dev, "Can't add i2c adapter!\n");
+ if (rc < 0)
goto out_error;
- }
platform_set_drvdata(pdev, iface);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 385b57bfcb38..0652281662a8 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -648,10 +648,8 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
rc = i2c_add_adapter(adap);
- if (rc) {
- dev_err(dev->device, "failed to add adapter\n");
+ if (rc)
goto probe_errorout;
- }
dev_info(dev->device, "%s@%dhz registered in %s mode\n",
int_name ? int_name : " ", dev->clk_freq_hz,
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 3c16a2f7c673..686971263bef 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -963,10 +963,8 @@ static int cdns_i2c_probe(struct platform_device *pdev)
}
ret = i2c_add_adapter(&id->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "reg adap failed: %d\n", ret);
+ if (ret < 0)
goto err_clk_dis;
- }
/*
* Cadence I2C controller has a bug wherein it generates
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index ee57c1e865e2..d89bde2c5da2 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -665,10 +665,8 @@ static int cpm_i2c_probe(struct platform_device *ofdev)
cpm->adap.nr = (data && len == 4) ? be32_to_cpup(data) : -1;
result = i2c_add_numbered_adapter(&cpm->adap);
- if (result < 0) {
- dev_err(&ofdev->dev, "Unable to register with I2C\n");
+ if (result < 0)
goto out_shut;
- }
dev_dbg(&ofdev->dev, "hw routines for %s registered.\n",
cpm->adap.name);
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 2d5ff86398d0..9b36a7b3befd 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -281,10 +281,8 @@ static int ec_i2c_probe(struct platform_device *pdev)
bus->adap.retries = I2C_MAX_RETRIES;
err = i2c_add_adapter(&bus->adap);
- if (err) {
- dev_err(dev, "cannot register i2c adapter\n");
+ if (err)
return err;
- }
platform_set_drvdata(pdev, bus);
return err;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index a8bdcb5292f5..9e7ef5cf5d49 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -846,10 +846,8 @@ static int davinci_i2c_probe(struct platform_device *pdev)
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
- if (r) {
- dev_err(&pdev->dev, "failure adding adapter\n");
+ if (r)
goto err_unuse_clocks;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index fcd973d5131e..1fe93c43215c 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -42,6 +42,8 @@
#define DW_IC_SS_SCL_LCNT 0x18
#define DW_IC_FS_SCL_HCNT 0x1c
#define DW_IC_FS_SCL_LCNT 0x20
+#define DW_IC_HS_SCL_HCNT 0x24
+#define DW_IC_HS_SCL_LCNT 0x28
#define DW_IC_INTR_STAT 0x2c
#define DW_IC_INTR_MASK 0x30
#define DW_IC_RAW_INTR_STAT 0x34
@@ -89,12 +91,17 @@
DW_IC_INTR_TX_ABRT | \
DW_IC_INTR_STOP_DET)
-#define DW_IC_STATUS_ACTIVITY 0x1
+#define DW_IC_STATUS_ACTIVITY 0x1
+#define DW_IC_STATUS_TFE BIT(2)
+#define DW_IC_STATUS_MST_ACTIVITY BIT(5)
#define DW_IC_ERR_TX_ABRT 0x1
#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
+#define DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH (BIT(2) | BIT(3))
+#define DW_IC_COMP_PARAM_1_SPEED_MODE_MASK GENMASK(3, 2)
+
/*
* status codes
*/
@@ -252,10 +259,15 @@ static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
{
+ dw_writel(dev, enable, DW_IC_ENABLE);
+}
+
+static void __i2c_dw_enable_and_wait(struct dw_i2c_dev *dev, bool enable)
+{
int timeout = 100;
do {
- dw_writel(dev, enable, DW_IC_ENABLE);
+ __i2c_dw_enable(dev, enable);
if ((dw_readl(dev, DW_IC_ENABLE_STATUS) & 1) == enable)
return;
@@ -282,6 +294,28 @@ static unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev)
return dev->get_clk_rate_khz(dev);
}
+static int i2c_dw_acquire_lock(struct dw_i2c_dev *dev)
+{
+ int ret;
+
+ if (!dev->acquire_lock)
+ return 0;
+
+ ret = dev->acquire_lock(dev);
+ if (!ret)
+ return 0;
+
+ dev_err(dev->dev, "couldn't acquire bus ownership\n");
+
+ return ret;
+}
+
+static void i2c_dw_release_lock(struct dw_i2c_dev *dev)
+{
+ if (dev->release_lock)
+ dev->release_lock(dev);
+}
+
/**
* i2c_dw_init() - initialize the designware i2c master hardware
* @dev: device private data
@@ -293,17 +327,13 @@ static unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev)
int i2c_dw_init(struct dw_i2c_dev *dev)
{
u32 hcnt, lcnt;
- u32 reg;
+ u32 reg, comp_param1;
u32 sda_falling_time, scl_falling_time;
int ret;
- if (dev->acquire_lock) {
- ret = dev->acquire_lock(dev);
- if (ret) {
- dev_err(dev->dev, "couldn't acquire bus ownership\n");
- return ret;
- }
- }
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return ret;
reg = dw_readl(dev, DW_IC_COMP_TYPE);
if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
@@ -315,13 +345,14 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
} else if (reg != DW_IC_COMP_TYPE_VALUE) {
dev_err(dev->dev, "Unknown Synopsys component type: "
"0x%08x\n", reg);
- if (dev->release_lock)
- dev->release_lock(dev);
+ i2c_dw_release_lock(dev);
return -ENODEV;
}
+ comp_param1 = dw_readl(dev, DW_IC_COMP_PARAM_1);
+
/* Disable the adapter */
- __i2c_dw_enable(dev, false);
+ __i2c_dw_enable_and_wait(dev, false);
/* set standard and fast speed deviders for high/low periods */
@@ -347,8 +378,11 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
- /* Set SCL timing parameters for fast-mode */
- if (dev->fs_hcnt && dev->fs_lcnt) {
+ /* Set SCL timing parameters for fast-mode or fast-mode plus */
+ if ((dev->clk_freq == 1000000) && dev->fp_hcnt && dev->fp_lcnt) {
+ hcnt = dev->fp_hcnt;
+ lcnt = dev->fp_lcnt;
+ } else if (dev->fs_hcnt && dev->fs_lcnt) {
hcnt = dev->fs_hcnt;
lcnt = dev->fs_lcnt;
} else {
@@ -366,6 +400,23 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+ if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
+ DW_IC_CON_SPEED_HIGH) {
+ if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
+ != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
+ dev_err(dev->dev, "High Speed not supported!\n");
+ dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
+ dev->master_cfg |= DW_IC_CON_SPEED_FAST;
+ } else if (dev->hs_hcnt && dev->hs_lcnt) {
+ hcnt = dev->hs_hcnt;
+ lcnt = dev->hs_lcnt;
+ dw_writel(dev, hcnt, DW_IC_HS_SCL_HCNT);
+ dw_writel(dev, lcnt, DW_IC_HS_SCL_LCNT);
+ dev_dbg(dev->dev, "HighSpeed-mode HCNT:LCNT = %d:%d\n",
+ hcnt, lcnt);
+ }
+ }
+
/* Configure SDA Hold Time if required */
reg = dw_readl(dev, DW_IC_COMP_VERSION);
if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
@@ -387,8 +438,8 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
/* configure the i2c master */
dw_writel(dev, dev->master_cfg , DW_IC_CON);
- if (dev->release_lock)
- dev->release_lock(dev);
+ i2c_dw_release_lock(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(i2c_dw_init);
@@ -415,27 +466,45 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
- u32 ic_con, ic_tar = 0;
+ u32 ic_tar = 0;
+ bool enabled;
- /* Disable the adapter */
- __i2c_dw_enable(dev, false);
+ enabled = dw_readl(dev, DW_IC_ENABLE_STATUS) & 1;
+
+ if (enabled) {
+ u32 ic_status;
+
+ /*
+ * Only disable adapter if ic_tar and ic_con can't be
+ * dynamically updated
+ */
+ ic_status = dw_readl(dev, DW_IC_STATUS);
+ if (!dev->dynamic_tar_update_enabled ||
+ (ic_status & DW_IC_STATUS_MST_ACTIVITY) ||
+ !(ic_status & DW_IC_STATUS_TFE)) {
+ __i2c_dw_enable_and_wait(dev, false);
+ enabled = false;
+ }
+ }
/* if the slave address is ten bit address, enable 10BITADDR */
- ic_con = dw_readl(dev, DW_IC_CON);
- if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
- ic_con |= DW_IC_CON_10BITADDR_MASTER;
+ if (dev->dynamic_tar_update_enabled) {
/*
* If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
- * mode has to be enabled via bit 12 of IC_TAR register.
- * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
- * detected from registers.
+ * mode has to be enabled via bit 12 of IC_TAR register,
+ * otherwise bit 4 of IC_CON is used.
*/
- ic_tar = DW_IC_TAR_10BITADDR_MASTER;
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+ ic_tar = DW_IC_TAR_10BITADDR_MASTER;
} else {
- ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
- }
+ u32 ic_con = dw_readl(dev, DW_IC_CON);
- dw_writel(dev, ic_con, DW_IC_CON);
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+ ic_con |= DW_IC_CON_10BITADDR_MASTER;
+ else
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+ dw_writel(dev, ic_con, DW_IC_CON);
+ }
/*
* Set the slave (target) address and enable 10-bit addressing mode
@@ -446,8 +515,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
/* enforce disabled interrupts (due to HW issues) */
i2c_dw_disable_int(dev);
- /* Enable the adapter */
- __i2c_dw_enable(dev, true);
+ if (!enabled)
+ __i2c_dw_enable(dev, true);
/* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR);
@@ -628,7 +697,8 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
}
/*
- * Prepare controller for a transaction and call i2c_dw_xfer_msg
+ * Prepare controller for a transaction and start transfer by calling
+ * i2c_dw_xfer_init()
*/
static int
i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
@@ -651,13 +721,9 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev->abort_source = 0;
dev->rx_outstanding = 0;
- if (dev->acquire_lock) {
- ret = dev->acquire_lock(dev);
- if (ret) {
- dev_err(dev->dev, "couldn't acquire bus ownership\n");
- goto done_nolock;
- }
- }
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ goto done_nolock;
ret = i2c_dw_wait_bus_not_busy(dev);
if (ret < 0)
@@ -675,16 +741,6 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
goto done;
}
- /*
- * We must disable the adapter before returning and signaling the end
- * of the current transfer. Otherwise the hardware might continue
- * generating interrupts which in turn causes a race condition with
- * the following transfer. Needs some more investigation if the
- * additional interrupts are a hardware bug or this driver doesn't
- * handle them correctly yet.
- */
- __i2c_dw_enable(dev, false);
-
if (dev->msg_err) {
ret = dev->msg_err;
goto done;
@@ -704,8 +760,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
ret = -EIO;
done:
- if (dev->release_lock)
- dev->release_lock(dev);
+ i2c_dw_release_lock(dev);
done_nolock:
pm_runtime_mark_last_busy(dev->dev);
@@ -822,9 +877,19 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
*/
tx_aborted:
- if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
+ if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
+ || dev->msg_err) {
+ /*
+ * We must disable interruts before returning and signaling
+ * the end of the current transfer. Otherwise the hardware
+ * might continue generating interrupts for non-existent
+ * transfers.
+ */
+ i2c_dw_disable_int(dev);
+ dw_readl(dev, DW_IC_CLR_INTR);
+
complete(&dev->cmd_complete);
- else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
+ } else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
/* workaround to trigger pending interrupt */
stat = dw_readl(dev, DW_IC_INTR_MASK);
i2c_dw_disable_int(dev);
@@ -837,7 +902,7 @@ tx_aborted:
void i2c_dw_disable(struct dw_i2c_dev *dev)
{
/* Disable controller */
- __i2c_dw_enable(dev, false);
+ __i2c_dw_enable_and_wait(dev, false);
/* Disable all interupts */
dw_writel(dev, 0, DW_IC_INTR_MASK);
@@ -861,6 +926,7 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
{
struct i2c_adapter *adap = &dev->adapter;
int r;
+ u32 reg;
init_completion(&dev->cmd_complete);
@@ -868,6 +934,26 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
if (r)
return r;
+ r = i2c_dw_acquire_lock(dev);
+ if (r)
+ return r;
+
+ /*
+ * Test if dynamic TAR update is enabled in this controller by writing
+ * to IC_10BITADDR_MASTER field in IC_CON: when it is enabled this
+ * field is read-only so it should not succeed
+ */
+ reg = dw_readl(dev, DW_IC_CON);
+ dw_writel(dev, reg ^ DW_IC_CON_10BITADDR_MASTER, DW_IC_CON);
+
+ if ((dw_readl(dev, DW_IC_CON) & DW_IC_CON_10BITADDR_MASTER) ==
+ (reg & DW_IC_CON_10BITADDR_MASTER)) {
+ dev->dynamic_tar_update_enabled = true;
+ dev_dbg(dev->dev, "Dynamic TAR update enabled");
+ }
+
+ i2c_dw_release_lock(dev);
+
snprintf(adap->name, sizeof(adap->name),
"Synopsys DesignWare I2C adapter");
adap->retries = 3;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 38493a7142ad..0d44d2ae7d4c 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -26,6 +26,7 @@
#define DW_IC_CON_MASTER 0x1
#define DW_IC_CON_SPEED_STD 0x2
#define DW_IC_CON_SPEED_FAST 0x4
+#define DW_IC_CON_SPEED_HIGH 0x6
#define DW_IC_CON_SPEED_MASK 0x6
#define DW_IC_CON_10BITADDR_MASTER 0x10
#define DW_IC_CON_RESTART_EN 0x20
@@ -57,10 +58,15 @@
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
* @rx_outstanding: current master-rx elements in tx fifo
+ * @clk_freq: bus clock frequency
* @ss_hcnt: standard speed HCNT value
* @ss_lcnt: standard speed LCNT value
* @fs_hcnt: fast speed HCNT value
* @fs_lcnt: fast speed LCNT value
+ * @fp_hcnt: fast plus HCNT value
+ * @fp_lcnt: fast plus LCNT value
+ * @hs_hcnt: high speed HCNT value
+ * @hs_lcnt: high speed LCNT value
* @acquire_lock: function to acquire a hardware lock on the bus
* @release_lock: function to release a hardware lock on the bus
* @pm_runtime_disabled: true if pm runtime is disabled
@@ -96,6 +102,7 @@ struct dw_i2c_dev {
unsigned int tx_fifo_depth;
unsigned int rx_fifo_depth;
int rx_outstanding;
+ u32 clk_freq;
u32 sda_hold_time;
u32 sda_falling_time;
u32 scl_falling_time;
@@ -103,9 +110,14 @@ struct dw_i2c_dev {
u16 ss_lcnt;
u16 fs_hcnt;
u16 fs_lcnt;
+ u16 fp_hcnt;
+ u16 fp_lcnt;
+ u16 hs_hcnt;
+ u16 hs_lcnt;
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_runtime_disabled;
+ bool dynamic_tar_update_enabled;
};
#define ACCESS_SWAP 0x00000001
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index d656657b805c..0b42a12171f3 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -107,6 +107,8 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, NULL);
dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt,
&dev->sda_hold_time);
+ dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, NULL);
+ dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, NULL);
id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
if (id && id->driver_data)
@@ -155,7 +157,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
struct i2c_adapter *adap;
struct resource *mem;
int irq, r;
- u32 clk_freq, ht = 0;
+ u32 acpi_speed, ht = 0;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -175,10 +177,10 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
/* fast mode by default because of legacy reasons */
- clk_freq = 400000;
+ dev->clk_freq = 400000;
if (pdata) {
- clk_freq = pdata->i2c_scl_freq;
+ dev->clk_freq = pdata->i2c_scl_freq;
} else {
device_property_read_u32(&pdev->dev, "i2c-sda-hold-time-ns",
&ht);
@@ -187,17 +189,24 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
device_property_read_u32(&pdev->dev, "i2c-scl-falling-time-ns",
&dev->scl_falling_time);
device_property_read_u32(&pdev->dev, "clock-frequency",
- &clk_freq);
+ &dev->clk_freq);
}
+ acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
+ if (acpi_speed)
+ dev->clk_freq = acpi_speed;
+
if (has_acpi_companion(&pdev->dev))
dw_i2c_acpi_configure(pdev);
/*
- * Only standard mode at 100kHz and fast mode at 400kHz are supported.
+ * Only standard mode at 100kHz, fast mode at 400kHz,
+ * fast mode plus at 1MHz and high speed mode at 3.4MHz are supported.
*/
- if (clk_freq != 100000 && clk_freq != 400000) {
- dev_err(&pdev->dev, "Only 100kHz and 400kHz supported");
+ if (dev->clk_freq != 100000 && dev->clk_freq != 400000
+ && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
+ dev_err(&pdev->dev,
+ "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
return -EINVAL;
}
@@ -212,12 +221,20 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK;
- if (clk_freq == 100000)
- dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
- DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_STD;
- else
- dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
- DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
+
+ dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
+ DW_IC_CON_RESTART_EN;
+
+ switch (dev->clk_freq) {
+ case 100000:
+ dev->master_cfg |= DW_IC_CON_SPEED_STD;
+ break;
+ case 3400000:
+ dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
+ break;
+ default:
+ dev->master_cfg |= DW_IC_CON_SPEED_FAST;
+ }
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (!i2c_dw_plat_prepare_clk(dev, true)) {
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index b19a310bf9b3..f718ee4e3332 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -487,10 +487,8 @@ static int diolan_u2c_probe(struct usb_interface *interface,
/* and finally attach to i2c layer */
ret = i2c_add_adapter(&dev->adapter);
- if (ret < 0) {
- dev_err(&interface->dev, "failed to add I2C adapter\n");
+ if (ret < 0)
goto error_free;
- }
dev_dbg(&interface->dev, "connected " DRIVER_NAME "\n");
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index f2eb4f76591f..8acda2aa1558 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -228,10 +228,8 @@ static int dln2_i2c_probe(struct platform_device *pdev)
/* and finally attach to i2c layer */
ret = i2c_add_adapter(&dln2->adapter);
- if (ret < 0) {
- dev_err(dev, "failed to add I2C adapter: %d\n", ret);
+ if (ret < 0)
goto out_disable;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
index e253598d764c..aa336ba89aa3 100644
--- a/drivers/i2c/busses/i2c-efm32.c
+++ b/drivers/i2c/busses/i2c-efm32.c
@@ -438,7 +438,6 @@ static int efm32_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&ddata->adapter);
if (ret) {
- dev_err(&pdev->dev, "failed to add i2c adapter (%d)\n", ret);
free_irq(ddata->irq, ddata);
err_disable_clk:
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index c0e3ada02876..bea607149972 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -796,10 +796,8 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
exynos5_i2c_reset(i2c);
ret = i2c_add_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+ if (ret < 0)
goto err_clk;
- }
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 7c6966434ee7..ae7f3180f7e8 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -478,10 +478,8 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
pm_runtime_enable(priv->dev);
ret = i2c_add_adapter(&priv->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+ if (ret < 0)
goto err_runtime;
- }
return ret;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 26298af73232..08847e8b8998 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -64,6 +64,7 @@
* Broxton (SOC) 0x5ad4 32 hard yes yes yes
* Lewisburg (PCH) 0xa1a3 32 hard yes yes yes
* Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes
+ * Kaby Lake PCH-H (PCH) 0xa2a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -226,6 +227,7 @@
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS 0xa123
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
+#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3
struct i801_mux_config {
char *gpio_chip;
@@ -1006,6 +1008,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS) },
{ 0, }
};
@@ -1482,6 +1485,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
+ case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
priv->features |= FEATURE_SMBUS_PEC;
@@ -1615,7 +1619,6 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
"SMBus I801 adapter at %04lx", priv->smba);
err = i2c_add_adapter(&priv->adapter);
if (err) {
- dev_err(&dev->dev, "Failed to add SMBus adapter\n");
i801_acpi_remove(priv);
return err;
}
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index cdaa7be2cd1b..412b91d255ad 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -751,10 +751,8 @@ static int iic_probe(struct platform_device *ofdev)
adap->timeout = HZ;
ret = i2c_add_adapter(adap);
- if (ret < 0) {
- dev_err(&ofdev->dev, "failed to register i2c adapter\n");
+ if (ret < 0)
goto error_cleanup;
- }
dev_info(&ofdev->dev, "using %s mode\n",
dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index ea20425b6972..db8e8b40569d 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1394,10 +1394,8 @@ static int img_i2c_probe(struct platform_device *pdev)
goto disable_clk;
ret = i2c_add_numbered_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add adapter\n");
+ if (ret < 0)
goto disable_clk;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1844bc9f7cd5..592a8f26a708 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -984,11 +984,24 @@ static void i2c_imx_unprepare_recovery(struct i2c_adapter *adap)
pinctrl_select_state(i2c_imx->pinctrl, i2c_imx->pinctrl_pins_default);
}
-static void i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
+/*
+ * We switch SCL and SDA to their GPIO function and do some bitbanging
+ * for bus recovery. These alternative pinmux settings can be
+ * described in the device tree by a separate pinctrl state "gpio". If
+ * this is missing this is not a big problem, the only implication is
+ * that we can't do bus recovery.
+ */
+static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
struct platform_device *pdev)
{
struct i2c_bus_recovery_info *rinfo = &i2c_imx->rinfo;
+ i2c_imx->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!i2c_imx->pinctrl || IS_ERR(i2c_imx->pinctrl)) {
+ dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n");
+ return PTR_ERR(i2c_imx->pinctrl);
+ }
+
i2c_imx->pinctrl_pins_default = pinctrl_lookup_state(i2c_imx->pinctrl,
PINCTRL_STATE_DEFAULT);
i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
@@ -1001,7 +1014,7 @@ static void i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
IS_ERR(i2c_imx->pinctrl_pins_default) ||
IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
dev_dbg(&pdev->dev, "recovery information incomplete\n");
- return;
+ return 0;
}
dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n",
@@ -1011,6 +1024,8 @@ static void i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
rinfo->unprepare_recovery = i2c_imx_unprepare_recovery;
rinfo->recover_bus = i2c_generic_gpio_recovery;
i2c_imx->adapter.bus_recovery_info = rinfo;
+
+ return 0;
}
static u32 i2c_imx_func(struct i2c_adapter *adapter)
@@ -1081,12 +1096,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
return ret;
}
- i2c_imx->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (IS_ERR(i2c_imx->pinctrl)) {
- ret = PTR_ERR(i2c_imx->pinctrl);
- goto clk_disable;
- }
-
/* Request IRQ */
ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
pdev->name, i2c_imx);
@@ -1125,14 +1134,16 @@ static int i2c_imx_probe(struct platform_device *pdev)
i2c_imx, IMX_I2C_I2CR);
imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
- i2c_imx_init_recovery_info(i2c_imx, pdev);
+ /* Init optional bus recovery function */
+ ret = i2c_imx_init_recovery_info(i2c_imx, pdev);
+ /* Give it another chance if pinctrl used is not ready yet */
+ if (ret == -EPROBE_DEFER)
+ goto rpm_disable;
/* Add I2C adapter */
ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
- if (ret < 0) {
- dev_err(&pdev->dev, "registration failed\n");
+ if (ret < 0)
goto rpm_disable;
- }
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index c2f25f19d76f..0cf1379f4e80 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -288,10 +288,8 @@ static int smbus_sch_probe(struct platform_device *dev)
"SMBus SCH adapter at %04x", sch_smba);
retval = i2c_add_adapter(&sch_adapter);
- if (retval) {
- dev_err(&dev->dev, "Couldn't register adapter!\n");
+ if (retval)
sch_smba = 0;
- }
return retval;
}
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 1c8707710098..f573448d2132 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -922,10 +922,8 @@ ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
err = i2c_add_adapter(&priv->adapter);
- if (err) {
- dev_err(&pdev->dev, "Failed to add SMBus iSMT adapter\n");
+ if (err)
return -ENODEV;
- }
return 0;
}
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index cd9872594fe2..b8ea62105f42 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -798,10 +798,8 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
goto err;
ret = i2c_add_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add bus\n");
+ if (ret < 0)
goto err;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 586a15205e61..9b1fef455a89 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -432,10 +432,8 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
i2c->adap.dev.of_node = pdev->dev.of_node;
ret = i2c_add_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add adapter!\n");
+ if (ret < 0)
goto fail_clk;
- }
dev_info(&pdev->dev, "LPC2K I2C adapter\n");
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 76e28980904f..2aa61bbbd307 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -453,7 +453,6 @@ static int meson_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0) {
- dev_err(&pdev->dev, "can't register adapter\n");
clk_unprepare(i2c->clk);
return ret;
}
@@ -473,6 +472,7 @@ static int meson_i2c_remove(struct platform_device *pdev)
static const struct of_device_id meson_i2c_match[] = {
{ .compatible = "amlogic,meson6-i2c" },
+ { .compatible = "amlogic,meson-gxbb-i2c" },
{ },
};
MODULE_DEVICE_TABLE(of, meson_i2c_match);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 48ecffecc0ed..565a49a0c564 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -737,10 +737,8 @@ static int fsl_i2c_probe(struct platform_device *op)
i2c->adap.dev.of_node = of_node_get(op->dev.of_node);
result = i2c_add_adapter(&i2c->adap);
- if (result < 0) {
- dev_err(i2c->dev, "failed to add adapter\n");
+ if (result < 0)
goto fail_add;
- }
return result;
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index d9373e60be8a..4a7d9bc2142b 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -786,10 +786,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c->adap, i2c);
ret = i2c_add_adapter(&i2c->adap);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add i2c bus to i2c core\n");
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 033846cdf266..5738556b6aac 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -868,7 +868,6 @@ static int mxs_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, i2c);
err = i2c_add_numbered_adapter(adap);
if (err) {
- dev_err(dev, "Failed to add adapter (%d)\n", err);
writel(MXS_I2C_CTRL0_SFTRST,
i2c->regs + MXS_I2C_CTRL0_SET);
return err;
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 42fcc9458432..374b35e7e450 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -366,7 +366,6 @@ static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
error = i2c_add_adapter(&smbus->adapter);
if (error) {
- dev_err(&smbus->adapter.dev, "Failed to register adapter.\n");
release_region(smbus->base, smbus->size);
return error;
}
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index bcd17e8cbcb4..da6609d62848 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -1046,10 +1046,8 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
adap->name, dev->virtbase);
ret = i2c_add_adapter(adap);
- if (ret) {
- dev_err(&adev->dev, "failed to add adapter\n");
+ if (ret)
goto err_no_adap;
- }
pm_runtime_put(&adev->dev);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index ac88a524143e..34f1889a4073 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -494,10 +494,8 @@ static int ocores_i2c_probe(struct platform_device *pdev)
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add adapter\n");
+ if (ret)
goto err_clk;
- }
/* add in known devices to the bus */
if (pdata) {
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon-core.c
index 30ae35146723..419b54bfc7c7 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -4,280 +4,127 @@
*
* Portions Copyright (C) 2010 - 2016 Cavium, Inc.
*
- * This is a driver for the i2c adapter in Cavium Networks' OCTEON processors.
+ * This file contains the shared part of the driver for the i2c adapter in
+ * Cavium Networks' OCTEON processors and ThunderX SOCs.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <linux/atomic.h>
-#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/of.h>
-
-#include <asm/octeon/octeon.h>
-
-#define DRV_NAME "i2c-octeon"
-
-/* Register offsets */
-#define SW_TWSI 0x00
-#define TWSI_INT 0x10
-#define SW_TWSI_EXT 0x18
-
-/* Controller command patterns */
-#define SW_TWSI_V BIT_ULL(63) /* Valid bit */
-#define SW_TWSI_EIA BIT_ULL(61) /* Extended internal address */
-#define SW_TWSI_R BIT_ULL(56) /* Result or read bit */
-#define SW_TWSI_SOVR BIT_ULL(55) /* Size override */
-#define SW_TWSI_SIZE_SHIFT 52
-#define SW_TWSI_ADDR_SHIFT 40
-#define SW_TWSI_IA_SHIFT 32 /* Internal address */
-
-/* Controller opcode word (bits 60:57) */
-#define SW_TWSI_OP_SHIFT 57
-#define SW_TWSI_OP_7 (0ULL << SW_TWSI_OP_SHIFT)
-#define SW_TWSI_OP_7_IA (1ULL << SW_TWSI_OP_SHIFT)
-#define SW_TWSI_OP_10 (2ULL << SW_TWSI_OP_SHIFT)
-#define SW_TWSI_OP_10_IA (3ULL << SW_TWSI_OP_SHIFT)
-#define SW_TWSI_OP_TWSI_CLK (4ULL << SW_TWSI_OP_SHIFT)
-#define SW_TWSI_OP_EOP (6ULL << SW_TWSI_OP_SHIFT) /* Extended opcode */
-
-/* Controller extended opcode word (bits 34:32) */
-#define SW_TWSI_EOP_SHIFT 32
-#define SW_TWSI_EOP_TWSI_DATA (SW_TWSI_OP_EOP | 1ULL << SW_TWSI_EOP_SHIFT)
-#define SW_TWSI_EOP_TWSI_CTL (SW_TWSI_OP_EOP | 2ULL << SW_TWSI_EOP_SHIFT)
-#define SW_TWSI_EOP_TWSI_CLKCTL (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
-#define SW_TWSI_EOP_TWSI_STAT (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
-#define SW_TWSI_EOP_TWSI_RST (SW_TWSI_OP_EOP | 7ULL << SW_TWSI_EOP_SHIFT)
-
-/* Controller command and status bits */
-#define TWSI_CTL_CE 0x80 /* High level controller enable */
-#define TWSI_CTL_ENAB 0x40 /* Bus enable */
-#define TWSI_CTL_STA 0x20 /* Master-mode start, HW clears when done */
-#define TWSI_CTL_STP 0x10 /* Master-mode stop, HW clears when done */
-#define TWSI_CTL_IFLG 0x08 /* HW event, SW writes 0 to ACK */
-#define TWSI_CTL_AAK 0x04 /* Assert ACK */
-
-/* Status values */
-#define STAT_ERROR 0x00
-#define STAT_START 0x08
-#define STAT_REP_START 0x10
-#define STAT_TXADDR_ACK 0x18
-#define STAT_TXADDR_NAK 0x20
-#define STAT_TXDATA_ACK 0x28
-#define STAT_TXDATA_NAK 0x30
-#define STAT_LOST_ARB_38 0x38
-#define STAT_RXADDR_ACK 0x40
-#define STAT_RXADDR_NAK 0x48
-#define STAT_RXDATA_ACK 0x50
-#define STAT_RXDATA_NAK 0x58
-#define STAT_SLAVE_60 0x60
-#define STAT_LOST_ARB_68 0x68
-#define STAT_SLAVE_70 0x70
-#define STAT_LOST_ARB_78 0x78
-#define STAT_SLAVE_80 0x80
-#define STAT_SLAVE_88 0x88
-#define STAT_GENDATA_ACK 0x90
-#define STAT_GENDATA_NAK 0x98
-#define STAT_SLAVE_A0 0xA0
-#define STAT_SLAVE_A8 0xA8
-#define STAT_LOST_ARB_B0 0xB0
-#define STAT_SLAVE_LOST 0xB8
-#define STAT_SLAVE_NAK 0xC0
-#define STAT_SLAVE_ACK 0xC8
-#define STAT_AD2W_ACK 0xD0
-#define STAT_AD2W_NAK 0xD8
-#define STAT_IDLE 0xF8
-
-/* TWSI_INT values */
-#define TWSI_INT_ST_INT BIT_ULL(0)
-#define TWSI_INT_TS_INT BIT_ULL(1)
-#define TWSI_INT_CORE_INT BIT_ULL(2)
-#define TWSI_INT_ST_EN BIT_ULL(4)
-#define TWSI_INT_TS_EN BIT_ULL(5)
-#define TWSI_INT_CORE_EN BIT_ULL(6)
-#define TWSI_INT_SDA_OVR BIT_ULL(8)
-#define TWSI_INT_SCL_OVR BIT_ULL(9)
-#define TWSI_INT_SDA BIT_ULL(10)
-#define TWSI_INT_SCL BIT_ULL(11)
-
-#define I2C_OCTEON_EVENT_WAIT 80 /* microseconds */
-
-struct octeon_i2c {
- wait_queue_head_t queue;
- struct i2c_adapter adap;
- int irq;
- int hlc_irq; /* For cn7890 only */
- u32 twsi_freq;
- int sys_freq;
- void __iomem *twsi_base;
- struct device *dev;
- bool hlc_enabled;
- bool broken_irq_mode;
- bool broken_irq_check;
- void (*int_enable)(struct octeon_i2c *);
- void (*int_disable)(struct octeon_i2c *);
- void (*hlc_int_enable)(struct octeon_i2c *);
- void (*hlc_int_disable)(struct octeon_i2c *);
- atomic_t int_enable_cnt;
- atomic_t hlc_int_enable_cnt;
-};
-
-static void octeon_i2c_writeq_flush(u64 val, void __iomem *addr)
-{
- __raw_writeq(val, addr);
- __raw_readq(addr); /* wait for write to land */
-}
-
-/**
- * octeon_i2c_reg_write - write an I2C core register
- * @i2c: The struct octeon_i2c
- * @eop_reg: Register selector
- * @data: Value to be written
- *
- * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
- */
-static void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
-{
- u64 tmp;
- __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI);
- do {
- tmp = __raw_readq(i2c->twsi_base + SW_TWSI);
- } while ((tmp & SW_TWSI_V) != 0);
-}
-
-#define octeon_i2c_ctl_write(i2c, val) \
- octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CTL, val)
-#define octeon_i2c_data_write(i2c, val) \
- octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_DATA, val)
+#include "i2c-octeon-core.h"
-/**
- * octeon_i2c_reg_read - read lower bits of an I2C core register
- * @i2c: The struct octeon_i2c
- * @eop_reg: Register selector
- *
- * Returns the data.
- *
- * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
- */
-static u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
+/* interrupt service routine */
+irqreturn_t octeon_i2c_isr(int irq, void *dev_id)
{
- u64 tmp;
+ struct octeon_i2c *i2c = dev_id;
- __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI);
- do {
- tmp = __raw_readq(i2c->twsi_base + SW_TWSI);
- } while ((tmp & SW_TWSI_V) != 0);
+ i2c->int_disable(i2c);
+ wake_up(&i2c->queue);
- return tmp & 0xFF;
+ return IRQ_HANDLED;
}
-#define octeon_i2c_ctl_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
-#define octeon_i2c_data_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
-#define octeon_i2c_stat_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
-
-/**
- * octeon_i2c_read_int - read the TWSI_INT register
- * @i2c: The struct octeon_i2c
- *
- * Returns the value of the register.
- */
-static u64 octeon_i2c_read_int(struct octeon_i2c *i2c)
+static bool octeon_i2c_test_iflg(struct octeon_i2c *i2c)
{
- return __raw_readq(i2c->twsi_base + TWSI_INT);
+ return (octeon_i2c_ctl_read(i2c) & TWSI_CTL_IFLG);
}
-/**
- * octeon_i2c_write_int - write the TWSI_INT register
- * @i2c: The struct octeon_i2c
- * @data: Value to be written
- */
-static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
+static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
{
- octeon_i2c_writeq_flush(data, i2c->twsi_base + TWSI_INT);
-}
+ if (octeon_i2c_test_iflg(i2c))
+ return true;
-/**
- * octeon_i2c_int_enable - enable the CORE interrupt
- * @i2c: The struct octeon_i2c
- *
- * The interrupt will be asserted when there is non-STAT_IDLE state in
- * the SW_TWSI_EOP_TWSI_STAT register.
- */
-static void octeon_i2c_int_enable(struct octeon_i2c *i2c)
-{
- octeon_i2c_write_int(i2c, TWSI_INT_CORE_EN);
-}
+ if (*first) {
+ *first = false;
+ return false;
+ }
-/* disable the CORE interrupt */
-static void octeon_i2c_int_disable(struct octeon_i2c *i2c)
-{
- /* clear TS/ST/IFLG events */
- octeon_i2c_write_int(i2c, 0);
+ /*
+ * IRQ has signaled an event but IFLG hasn't changed.
+ * Sleep and retry once.
+ */
+ usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
+ return octeon_i2c_test_iflg(i2c);
}
/**
- * octeon_i2c_int_enable78 - enable the CORE interrupt
+ * octeon_i2c_wait - wait for the IFLG to be set
* @i2c: The struct octeon_i2c
*
- * The interrupt will be asserted when there is non-STAT_IDLE state in the
- * SW_TWSI_EOP_TWSI_STAT register.
+ * Returns 0 on success, otherwise a negative errno.
*/
-static void octeon_i2c_int_enable78(struct octeon_i2c *i2c)
-{
- atomic_inc_return(&i2c->int_enable_cnt);
- enable_irq(i2c->irq);
-}
-
-static void __octeon_i2c_irq_disable(atomic_t *cnt, int irq)
+static int octeon_i2c_wait(struct octeon_i2c *i2c)
{
- int count;
+ long time_left;
+ bool first = true;
/*
- * The interrupt can be disabled in two places, but we only
- * want to make the disable_irq_nosync() call once, so keep
- * track with the atomic variable.
+ * Some chip revisions don't assert the irq in the interrupt
+ * controller. So we must poll for the IFLG change.
*/
- count = atomic_dec_if_positive(cnt);
- if (count >= 0)
- disable_irq_nosync(irq);
+ if (i2c->broken_irq_mode) {
+ u64 end = get_jiffies_64() + i2c->adap.timeout;
+
+ while (!octeon_i2c_test_iflg(i2c) &&
+ time_before64(get_jiffies_64(), end))
+ usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT);
+
+ return octeon_i2c_test_iflg(i2c) ? 0 : -ETIMEDOUT;
+ }
+
+ i2c->int_enable(i2c);
+ time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_ready(i2c, &first),
+ i2c->adap.timeout);
+ i2c->int_disable(i2c);
+
+ if (i2c->broken_irq_check && !time_left &&
+ octeon_i2c_test_iflg(i2c)) {
+ dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n");
+ i2c->broken_irq_mode = true;
+ return 0;
+ }
+
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
}
-/* disable the CORE interrupt */
-static void octeon_i2c_int_disable78(struct octeon_i2c *i2c)
+static bool octeon_i2c_hlc_test_valid(struct octeon_i2c *i2c)
{
- __octeon_i2c_irq_disable(&i2c->int_enable_cnt, i2c->irq);
+ return (__raw_readq(i2c->twsi_base + SW_TWSI(i2c)) & SW_TWSI_V) == 0;
}
-/**
- * octeon_i2c_hlc_int_enable78 - enable the ST interrupt
- * @i2c: The struct octeon_i2c
- *
- * The interrupt will be asserted when there is non-STAT_IDLE state in
- * the SW_TWSI_EOP_TWSI_STAT register.
- */
-static void octeon_i2c_hlc_int_enable78(struct octeon_i2c *i2c)
+static bool octeon_i2c_hlc_test_ready(struct octeon_i2c *i2c, bool *first)
{
- atomic_inc_return(&i2c->hlc_int_enable_cnt);
- enable_irq(i2c->hlc_irq);
+ /* check if valid bit is cleared */
+ if (octeon_i2c_hlc_test_valid(i2c))
+ return true;
+
+ if (*first) {
+ *first = false;
+ return false;
+ }
+
+ /*
+ * IRQ has signaled an event but valid bit isn't cleared.
+ * Sleep and retry once.
+ */
+ usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
+ return octeon_i2c_hlc_test_valid(i2c);
}
-/* disable the ST interrupt */
-static void octeon_i2c_hlc_int_disable78(struct octeon_i2c *i2c)
+static void octeon_i2c_hlc_int_clear(struct octeon_i2c *i2c)
{
- __octeon_i2c_irq_disable(&i2c->hlc_int_enable_cnt, i2c->hlc_irq);
+ /* clear ST/TS events, listen for neither */
+ octeon_i2c_write_int(i2c, TWSI_INT_ST_INT | TWSI_INT_TS_INT);
}
/*
@@ -321,83 +168,41 @@ static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
}
-/* interrupt service routine */
-static irqreturn_t octeon_i2c_isr(int irq, void *dev_id)
-{
- struct octeon_i2c *i2c = dev_id;
-
- i2c->int_disable(i2c);
- wake_up(&i2c->queue);
-
- return IRQ_HANDLED;
-}
-
-/* HLC interrupt service routine */
-static irqreturn_t octeon_i2c_hlc_isr78(int irq, void *dev_id)
-{
- struct octeon_i2c *i2c = dev_id;
-
- i2c->hlc_int_disable(i2c);
- wake_up(&i2c->queue);
-
- return IRQ_HANDLED;
-}
-
-static bool octeon_i2c_test_iflg(struct octeon_i2c *i2c)
-{
- return (octeon_i2c_ctl_read(i2c) & TWSI_CTL_IFLG);
-}
-
-static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
-{
- if (octeon_i2c_test_iflg(i2c))
- return true;
-
- if (*first) {
- *first = false;
- return false;
- }
-
- /*
- * IRQ has signaled an event but IFLG hasn't changed.
- * Sleep and retry once.
- */
- usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
- return octeon_i2c_test_iflg(i2c);
-}
-
/**
- * octeon_i2c_wait - wait for the IFLG to be set
+ * octeon_i2c_hlc_wait - wait for an HLC operation to complete
* @i2c: The struct octeon_i2c
*
- * Returns 0 on success, otherwise a negative errno.
+ * Returns 0 on success, otherwise -ETIMEDOUT.
*/
-static int octeon_i2c_wait(struct octeon_i2c *i2c)
+static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
{
- long time_left;
- bool first = 1;
+ bool first = true;
+ int time_left;
/*
- * Some chip revisions don't assert the irq in the interrupt
- * controller. So we must poll for the IFLG change.
+ * Some cn38xx boards don't assert the irq in the interrupt
+ * controller. So we must poll for the valid bit change.
*/
if (i2c->broken_irq_mode) {
u64 end = get_jiffies_64() + i2c->adap.timeout;
- while (!octeon_i2c_test_iflg(i2c) &&
+ while (!octeon_i2c_hlc_test_valid(i2c) &&
time_before64(get_jiffies_64(), end))
usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT);
- return octeon_i2c_test_iflg(i2c) ? 0 : -ETIMEDOUT;
+ return octeon_i2c_hlc_test_valid(i2c) ? 0 : -ETIMEDOUT;
}
- i2c->int_enable(i2c);
- time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_ready(i2c, &first),
+ i2c->hlc_int_enable(i2c);
+ time_left = wait_event_timeout(i2c->queue,
+ octeon_i2c_hlc_test_ready(i2c, &first),
i2c->adap.timeout);
- i2c->int_disable(i2c);
+ i2c->hlc_int_disable(i2c);
+ if (!time_left)
+ octeon_i2c_hlc_int_clear(i2c);
if (i2c->broken_irq_check && !time_left &&
- octeon_i2c_test_iflg(i2c)) {
+ octeon_i2c_hlc_test_valid(i2c)) {
dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n");
i2c->broken_irq_mode = true;
return 0;
@@ -405,13 +210,21 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
if (!time_left)
return -ETIMEDOUT;
-
return 0;
}
static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
{
- u8 stat = octeon_i2c_stat_read(i2c);
+ u8 stat;
+
+ /*
+ * This is ugly... in HLC mode the status is not in the status register
+ * but in the lower 8 bits of SW_TWSI.
+ */
+ if (i2c->hlc_enabled)
+ stat = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ else
+ stat = octeon_i2c_stat_read(i2c);
switch (stat) {
/* Everything is fine */
@@ -470,83 +283,157 @@ static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
}
}
-static bool octeon_i2c_hlc_test_valid(struct octeon_i2c *i2c)
+static int octeon_i2c_recovery(struct octeon_i2c *i2c)
{
- return (__raw_readq(i2c->twsi_base + SW_TWSI) & SW_TWSI_V) == 0;
+ int ret;
+
+ ret = i2c_recover_bus(&i2c->adap);
+ if (ret)
+ /* recover failed, try hardware re-init */
+ ret = octeon_i2c_init_lowlevel(i2c);
+ return ret;
}
-static bool octeon_i2c_hlc_test_ready(struct octeon_i2c *i2c, bool *first)
+/**
+ * octeon_i2c_start - send START to the bus
+ * @i2c: The struct octeon_i2c
+ *
+ * Returns 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_start(struct octeon_i2c *i2c)
{
- /* check if valid bit is cleared */
- if (octeon_i2c_hlc_test_valid(i2c))
- return true;
+ int ret;
+ u8 stat;
- if (*first) {
- *first = false;
- return false;
- }
+ octeon_i2c_hlc_disable(i2c);
- /*
- * IRQ has signaled an event but valid bit isn't cleared.
- * Sleep and retry once.
- */
- usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
- return octeon_i2c_hlc_test_valid(i2c);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA);
+ ret = octeon_i2c_wait(i2c);
+ if (ret)
+ goto error;
+
+ stat = octeon_i2c_stat_read(i2c);
+ if (stat == STAT_START || stat == STAT_REP_START)
+ /* START successful, bail out */
+ return 0;
+
+error:
+ /* START failed, try to recover */
+ ret = octeon_i2c_recovery(i2c);
+ return (ret) ? ret : -EAGAIN;
}
-static void octeon_i2c_hlc_int_enable(struct octeon_i2c *i2c)
+/* send STOP to the bus */
+static void octeon_i2c_stop(struct octeon_i2c *i2c)
{
- octeon_i2c_write_int(i2c, TWSI_INT_ST_EN);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STP);
}
-static void octeon_i2c_hlc_int_clear(struct octeon_i2c *i2c)
+/**
+ * octeon_i2c_read - receive data from the bus via low-level controller
+ * @i2c: The struct octeon_i2c
+ * @target: Target address
+ * @data: Pointer to the location to store the data
+ * @rlength: Length of the data
+ * @recv_len: flag for length byte
+ *
+ * The address is sent over the bus, then the data is read.
+ *
+ * Returns 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
+ u8 *data, u16 *rlength, bool recv_len)
{
- /* clear ST/TS events, listen for neither */
- octeon_i2c_write_int(i2c, TWSI_INT_ST_INT | TWSI_INT_TS_INT);
+ int i, result, length = *rlength;
+ bool final_read = false;
+
+ octeon_i2c_data_write(i2c, (target << 1) | 1);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
+
+ result = octeon_i2c_wait(i2c);
+ if (result)
+ return result;
+
+ /* address OK ? */
+ result = octeon_i2c_check_status(i2c, false);
+ if (result)
+ return result;
+
+ for (i = 0; i < length; i++) {
+ /*
+ * For the last byte to receive TWSI_CTL_AAK must not be set.
+ *
+ * A special case is I2C_M_RECV_LEN where we don't know the
+ * additional length yet. If recv_len is set we assume we're
+ * not reading the final byte and therefore need to set
+ * TWSI_CTL_AAK.
+ */
+ if ((i + 1 == length) && !(recv_len && i == 0))
+ final_read = true;
+
+ /* clear iflg to allow next event */
+ if (final_read)
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
+ else
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_AAK);
+
+ result = octeon_i2c_wait(i2c);
+ if (result)
+ return result;
+
+ data[i] = octeon_i2c_data_read(i2c, &result);
+ if (result)
+ return result;
+ if (recv_len && i == 0) {
+ if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
+ return -EPROTO;
+ length += data[i];
+ }
+
+ result = octeon_i2c_check_status(i2c, final_read);
+ if (result)
+ return result;
+ }
+ *rlength = length;
+ return 0;
}
/**
- * octeon_i2c_hlc_wait - wait for an HLC operation to complete
+ * octeon_i2c_write - send data to the bus via low-level controller
* @i2c: The struct octeon_i2c
+ * @target: Target address
+ * @data: Pointer to the data to be sent
+ * @length: Length of the data
*
- * Returns 0 on success, otherwise -ETIMEDOUT.
+ * The address is sent over the bus, then the data.
+ *
+ * Returns 0 on success, otherwise a negative errno.
*/
-static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
+static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
+ const u8 *data, int length)
{
- bool first = 1;
- int time_left;
+ int i, result;
- /*
- * Some cn38xx boards don't assert the irq in the interrupt
- * controller. So we must poll for the valid bit change.
- */
- if (i2c->broken_irq_mode) {
- u64 end = get_jiffies_64() + i2c->adap.timeout;
+ octeon_i2c_data_write(i2c, target << 1);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
- while (!octeon_i2c_hlc_test_valid(i2c) &&
- time_before64(get_jiffies_64(), end))
- usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT);
+ result = octeon_i2c_wait(i2c);
+ if (result)
+ return result;
- return octeon_i2c_hlc_test_valid(i2c) ? 0 : -ETIMEDOUT;
- }
+ for (i = 0; i < length; i++) {
+ result = octeon_i2c_check_status(i2c, false);
+ if (result)
+ return result;
- i2c->hlc_int_enable(i2c);
- time_left = wait_event_timeout(i2c->queue,
- octeon_i2c_hlc_test_ready(i2c, &first),
- i2c->adap.timeout);
- i2c->hlc_int_disable(i2c);
- if (!time_left)
- octeon_i2c_hlc_int_clear(i2c);
+ octeon_i2c_data_write(i2c, data[i]);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
- if (i2c->broken_irq_check && !time_left &&
- octeon_i2c_hlc_test_valid(i2c)) {
- dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n");
- i2c->broken_irq_mode = true;
- return 0;
+ result = octeon_i2c_wait(i2c);
+ if (result)
+ return result;
}
- if (!time_left)
- return -ETIMEDOUT;
return 0;
}
@@ -570,20 +457,20 @@ static int octeon_i2c_hlc_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
else
cmd |= SW_TWSI_OP_7;
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
- return -EAGAIN;
+ return octeon_i2c_check_status(i2c, false);
for (i = 0, j = msgs[0].len - 1; i < msgs[0].len && i < 4; i++, j--)
msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff;
if (msgs[0].len > 4) {
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT(i2c));
for (i = 0; i < msgs[0].len - 4 && i < 4; i++, j--)
msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff;
}
@@ -620,19 +507,17 @@ static int octeon_i2c_hlc_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
for (i = 0; i < msgs[0].len - 4 && i < 4; i++, j--)
ext |= (u64)msgs[0].buf[j] << (8 * i);
- octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c));
}
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
- return -EAGAIN;
-
- ret = octeon_i2c_check_status(i2c, false);
+ return octeon_i2c_check_status(i2c, false);
err:
return ret;
@@ -663,27 +548,27 @@ static int octeon_i2c_hlc_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs
cmd |= SW_TWSI_EIA;
ext = (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
cmd |= (u64)msgs[0].buf[1] << SW_TWSI_IA_SHIFT;
- octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c));
} else {
cmd |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
}
octeon_i2c_hlc_int_clear(i2c);
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
- return -EAGAIN;
+ return octeon_i2c_check_status(i2c, false);
for (i = 0, j = msgs[1].len - 1; i < msgs[1].len && i < 4; i++, j--)
msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff;
if (msgs[1].len > 4) {
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT(i2c));
for (i = 0; i < msgs[1].len - 4 && i < 4; i++, j--)
msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff;
}
@@ -730,27 +615,85 @@ static int octeon_i2c_hlc_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msg
set_ext = true;
}
if (set_ext)
- octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c));
octeon_i2c_hlc_int_clear(i2c);
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
- return -EAGAIN;
-
- ret = octeon_i2c_check_status(i2c, false);
+ return octeon_i2c_check_status(i2c, false);
err:
return ret;
}
+/**
+ * octeon_i2c_xfer - The driver's master_xfer function
+ * @adap: Pointer to the i2c_adapter structure
+ * @msgs: Pointer to the messages to be processed
+ * @num: Length of the MSGS array
+ *
+ * Returns the number of messages processed, or a negative errno on failure.
+ */
+int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+ int i, ret = 0;
+
+ if (num == 1) {
+ if (msgs[0].len > 0 && msgs[0].len <= 8) {
+ if (msgs[0].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_write(i2c, msgs);
+ goto out;
+ }
+ } else if (num == 2) {
+ if ((msgs[0].flags & I2C_M_RD) == 0 &&
+ (msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
+ msgs[0].len > 0 && msgs[0].len <= 2 &&
+ msgs[1].len > 0 && msgs[1].len <= 8 &&
+ msgs[0].addr == msgs[1].addr) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_comp_write(i2c, msgs);
+ goto out;
+ }
+ }
+
+ for (i = 0; ret == 0 && i < num; i++) {
+ struct i2c_msg *pmsg = &msgs[i];
+
+ /* zero-length messages are not supported */
+ if (!pmsg->len) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ ret = octeon_i2c_start(i2c);
+ if (ret)
+ return ret;
+
+ if (pmsg->flags & I2C_M_RD)
+ ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf,
+ &pmsg->len, pmsg->flags & I2C_M_RECV_LEN);
+ else
+ ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf,
+ pmsg->len);
+ }
+ octeon_i2c_stop(i2c);
+out:
+ return (ret != 0) ? ret : num;
+}
+
/* calculate and set clock divisors */
-static void octeon_i2c_set_clock(struct octeon_i2c *i2c)
+void octeon_i2c_set_clock(struct octeon_i2c *i2c)
{
int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
@@ -791,7 +734,7 @@ static void octeon_i2c_set_clock(struct octeon_i2c *i2c)
octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
}
-static int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
+int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
{
u8 status = 0;
int tries;
@@ -818,219 +761,6 @@ static int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
return 0;
}
-static int octeon_i2c_recovery(struct octeon_i2c *i2c)
-{
- int ret;
-
- ret = i2c_recover_bus(&i2c->adap);
- if (ret)
- /* recover failed, try hardware re-init */
- ret = octeon_i2c_init_lowlevel(i2c);
- return ret;
-}
-
-/**
- * octeon_i2c_start - send START to the bus
- * @i2c: The struct octeon_i2c
- *
- * Returns 0 on success, otherwise a negative errno.
- */
-static int octeon_i2c_start(struct octeon_i2c *i2c)
-{
- int ret;
- u8 stat;
-
- octeon_i2c_hlc_disable(i2c);
-
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA);
- ret = octeon_i2c_wait(i2c);
- if (ret)
- goto error;
-
- stat = octeon_i2c_stat_read(i2c);
- if (stat == STAT_START || stat == STAT_REP_START)
- /* START successful, bail out */
- return 0;
-
-error:
- /* START failed, try to recover */
- ret = octeon_i2c_recovery(i2c);
- return (ret) ? ret : -EAGAIN;
-}
-
-/* send STOP to the bus */
-static void octeon_i2c_stop(struct octeon_i2c *i2c)
-{
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STP);
-}
-
-/**
- * octeon_i2c_write - send data to the bus via low-level controller
- * @i2c: The struct octeon_i2c
- * @target: Target address
- * @data: Pointer to the data to be sent
- * @length: Length of the data
- *
- * The address is sent over the bus, then the data.
- *
- * Returns 0 on success, otherwise a negative errno.
- */
-static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
- const u8 *data, int length)
-{
- int i, result;
-
- octeon_i2c_data_write(i2c, target << 1);
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
-
- result = octeon_i2c_wait(i2c);
- if (result)
- return result;
-
- for (i = 0; i < length; i++) {
- result = octeon_i2c_check_status(i2c, false);
- if (result)
- return result;
-
- octeon_i2c_data_write(i2c, data[i]);
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
-
- result = octeon_i2c_wait(i2c);
- if (result)
- return result;
- }
-
- return 0;
-}
-
-/**
- * octeon_i2c_read - receive data from the bus via low-level controller
- * @i2c: The struct octeon_i2c
- * @target: Target address
- * @data: Pointer to the location to store the data
- * @rlength: Length of the data
- * @recv_len: flag for length byte
- *
- * The address is sent over the bus, then the data is read.
- *
- * Returns 0 on success, otherwise a negative errno.
- */
-static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
- u8 *data, u16 *rlength, bool recv_len)
-{
- int i, result, length = *rlength;
- bool final_read = false;
-
- octeon_i2c_data_write(i2c, (target << 1) | 1);
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
-
- result = octeon_i2c_wait(i2c);
- if (result)
- return result;
-
- /* address OK ? */
- result = octeon_i2c_check_status(i2c, false);
- if (result)
- return result;
-
- for (i = 0; i < length; i++) {
- /*
- * For the last byte to receive TWSI_CTL_AAK must not be set.
- *
- * A special case is I2C_M_RECV_LEN where we don't know the
- * additional length yet. If recv_len is set we assume we're
- * not reading the final byte and therefore need to set
- * TWSI_CTL_AAK.
- */
- if ((i + 1 == length) && !(recv_len && i == 0))
- final_read = true;
-
- /* clear iflg to allow next event */
- if (final_read)
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
- else
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_AAK);
-
- result = octeon_i2c_wait(i2c);
- if (result)
- return result;
-
- data[i] = octeon_i2c_data_read(i2c);
- if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
- return -EPROTO;
- length += data[i];
- }
-
- result = octeon_i2c_check_status(i2c, final_read);
- if (result)
- return result;
- }
- *rlength = length;
- return 0;
-}
-
-/**
- * octeon_i2c_xfer - The driver's master_xfer function
- * @adap: Pointer to the i2c_adapter structure
- * @msgs: Pointer to the messages to be processed
- * @num: Length of the MSGS array
- *
- * Returns the number of messages processed, or a negative errno on failure.
- */
-static int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct octeon_i2c *i2c = i2c_get_adapdata(adap);
- int i, ret = 0;
-
- if (num == 1) {
- if (msgs[0].len > 0 && msgs[0].len <= 8) {
- if (msgs[0].flags & I2C_M_RD)
- ret = octeon_i2c_hlc_read(i2c, msgs);
- else
- ret = octeon_i2c_hlc_write(i2c, msgs);
- goto out;
- }
- } else if (num == 2) {
- if ((msgs[0].flags & I2C_M_RD) == 0 &&
- (msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
- msgs[0].len > 0 && msgs[0].len <= 2 &&
- msgs[1].len > 0 && msgs[1].len <= 8 &&
- msgs[0].addr == msgs[1].addr) {
- if (msgs[1].flags & I2C_M_RD)
- ret = octeon_i2c_hlc_comp_read(i2c, msgs);
- else
- ret = octeon_i2c_hlc_comp_write(i2c, msgs);
- goto out;
- }
- }
-
- for (i = 0; ret == 0 && i < num; i++) {
- struct i2c_msg *pmsg = &msgs[i];
-
- /* zero-length messages are not supported */
- if (!pmsg->len) {
- ret = -EOPNOTSUPP;
- break;
- }
-
- ret = octeon_i2c_start(i2c);
- if (ret)
- return ret;
-
- if (pmsg->flags & I2C_M_RD)
- ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf,
- &pmsg->len, pmsg->flags & I2C_M_RECV_LEN);
- else
- ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf,
- pmsg->len);
- }
- octeon_i2c_stop(i2c);
-out:
- return (ret != 0) ? ret : num;
-}
-
static int octeon_i2c_get_scl(struct i2c_adapter *adap)
{
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
@@ -1044,7 +774,7 @@ static void octeon_i2c_set_scl(struct i2c_adapter *adap, int val)
{
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
- octeon_i2c_write_int(i2c, TWSI_INT_SCL_OVR);
+ octeon_i2c_write_int(i2c, val ? 0 : TWSI_INT_SCL_OVR);
}
static int octeon_i2c_get_sda(struct i2c_adapter *adap)
@@ -1060,13 +790,14 @@ static void octeon_i2c_prepare_recovery(struct i2c_adapter *adap)
{
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+ octeon_i2c_hlc_disable(i2c);
+
/*
- * The stop resets the state machine, does not _transmit_ STOP unless
- * engine was active.
+ * Bring control register to a good state regardless
+ * of HLC state.
*/
- octeon_i2c_stop(i2c);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
- octeon_i2c_hlc_disable(i2c);
octeon_i2c_write_int(i2c, 0);
}
@@ -1074,10 +805,19 @@ static void octeon_i2c_unprepare_recovery(struct i2c_adapter *adap)
{
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+ /*
+ * Generate STOP to finish the unfinished transaction.
+ * Can't generate STOP via the TWSI CTL register
+ * since it could bring the TWSI controller into an inoperable state.
+ */
+ octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR | TWSI_INT_SCL_OVR);
+ udelay(5);
+ octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR);
+ udelay(5);
octeon_i2c_write_int(i2c, 0);
}
-static struct i2c_bus_recovery_info octeon_i2c_recovery_info = {
+struct i2c_bus_recovery_info octeon_i2c_recovery_info = {
.recover_bus = i2c_generic_scl_recovery,
.get_scl = octeon_i2c_get_scl,
.set_scl = octeon_i2c_set_scl,
@@ -1085,171 +825,3 @@ static struct i2c_bus_recovery_info octeon_i2c_recovery_info = {
.prepare_recovery = octeon_i2c_prepare_recovery,
.unprepare_recovery = octeon_i2c_unprepare_recovery,
};
-
-static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
- I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
-}
-
-static const struct i2c_algorithm octeon_i2c_algo = {
- .master_xfer = octeon_i2c_xfer,
- .functionality = octeon_i2c_functionality,
-};
-
-static struct i2c_adapter octeon_i2c_ops = {
- .owner = THIS_MODULE,
- .name = "OCTEON adapter",
- .algo = &octeon_i2c_algo,
-};
-
-static int octeon_i2c_probe(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- int irq, result = 0, hlc_irq = 0;
- struct resource *res_mem;
- struct octeon_i2c *i2c;
- bool cn78xx_style;
-
- cn78xx_style = of_device_is_compatible(node, "cavium,octeon-7890-twsi");
- if (cn78xx_style) {
- hlc_irq = platform_get_irq(pdev, 0);
- if (hlc_irq < 0)
- return hlc_irq;
-
- irq = platform_get_irq(pdev, 2);
- if (irq < 0)
- return irq;
- } else {
- /* All adaptors have an irq. */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
- }
-
- i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
- if (!i2c) {
- result = -ENOMEM;
- goto out;
- }
- i2c->dev = &pdev->dev;
-
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c->twsi_base = devm_ioremap_resource(&pdev->dev, res_mem);
- if (IS_ERR(i2c->twsi_base)) {
- result = PTR_ERR(i2c->twsi_base);
- goto out;
- }
-
- /*
- * "clock-rate" is a legacy binding, the official binding is
- * "clock-frequency". Try the official one first and then
- * fall back if it doesn't exist.
- */
- if (of_property_read_u32(node, "clock-frequency", &i2c->twsi_freq) &&
- of_property_read_u32(node, "clock-rate", &i2c->twsi_freq)) {
- dev_err(i2c->dev,
- "no I2C 'clock-rate' or 'clock-frequency' property\n");
- result = -ENXIO;
- goto out;
- }
-
- i2c->sys_freq = octeon_get_io_clock_rate();
-
- init_waitqueue_head(&i2c->queue);
-
- i2c->irq = irq;
-
- if (cn78xx_style) {
- i2c->hlc_irq = hlc_irq;
-
- i2c->int_enable = octeon_i2c_int_enable78;
- i2c->int_disable = octeon_i2c_int_disable78;
- i2c->hlc_int_enable = octeon_i2c_hlc_int_enable78;
- i2c->hlc_int_disable = octeon_i2c_hlc_int_disable78;
-
- irq_set_status_flags(i2c->irq, IRQ_NOAUTOEN);
- irq_set_status_flags(i2c->hlc_irq, IRQ_NOAUTOEN);
-
- result = devm_request_irq(&pdev->dev, i2c->hlc_irq,
- octeon_i2c_hlc_isr78, 0,
- DRV_NAME, i2c);
- if (result < 0) {
- dev_err(i2c->dev, "failed to attach interrupt\n");
- goto out;
- }
- } else {
- i2c->int_enable = octeon_i2c_int_enable;
- i2c->int_disable = octeon_i2c_int_disable;
- i2c->hlc_int_enable = octeon_i2c_hlc_int_enable;
- i2c->hlc_int_disable = octeon_i2c_int_disable;
- }
-
- result = devm_request_irq(&pdev->dev, i2c->irq,
- octeon_i2c_isr, 0, DRV_NAME, i2c);
- if (result < 0) {
- dev_err(i2c->dev, "failed to attach interrupt\n");
- goto out;
- }
-
- if (OCTEON_IS_MODEL(OCTEON_CN38XX))
- i2c->broken_irq_check = true;
-
- result = octeon_i2c_init_lowlevel(i2c);
- if (result) {
- dev_err(i2c->dev, "init low level failed\n");
- goto out;
- }
-
- octeon_i2c_set_clock(i2c);
-
- i2c->adap = octeon_i2c_ops;
- i2c->adap.timeout = msecs_to_jiffies(2);
- i2c->adap.retries = 5;
- i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
- i2c->adap.dev.parent = &pdev->dev;
- i2c->adap.dev.of_node = node;
- i2c_set_adapdata(&i2c->adap, i2c);
- platform_set_drvdata(pdev, i2c);
-
- result = i2c_add_adapter(&i2c->adap);
- if (result < 0) {
- dev_err(i2c->dev, "failed to add adapter\n");
- goto out;
- }
- dev_info(i2c->dev, "probed\n");
- return 0;
-
-out:
- return result;
-};
-
-static int octeon_i2c_remove(struct platform_device *pdev)
-{
- struct octeon_i2c *i2c = platform_get_drvdata(pdev);
-
- i2c_del_adapter(&i2c->adap);
- return 0;
-};
-
-static const struct of_device_id octeon_i2c_match[] = {
- { .compatible = "cavium,octeon-3860-twsi", },
- { .compatible = "cavium,octeon-7890-twsi", },
- {},
-};
-MODULE_DEVICE_TABLE(of, octeon_i2c_match);
-
-static struct platform_driver octeon_i2c_driver = {
- .probe = octeon_i2c_probe,
- .remove = octeon_i2c_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = octeon_i2c_match,
- },
-};
-
-module_platform_driver(octeon_i2c_driver);
-
-MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>");
-MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors");
-MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
new file mode 100644
index 000000000000..1db7c835a454
--- /dev/null
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -0,0 +1,216 @@
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+/* Controller command patterns */
+#define SW_TWSI_V BIT_ULL(63) /* Valid bit */
+#define SW_TWSI_EIA BIT_ULL(61) /* Extended internal address */
+#define SW_TWSI_R BIT_ULL(56) /* Result or read bit */
+#define SW_TWSI_SOVR BIT_ULL(55) /* Size override */
+#define SW_TWSI_SIZE_SHIFT 52
+#define SW_TWSI_ADDR_SHIFT 40
+#define SW_TWSI_IA_SHIFT 32 /* Internal address */
+
+/* Controller opcode word (bits 60:57) */
+#define SW_TWSI_OP_SHIFT 57
+#define SW_TWSI_OP_7 (0ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_7_IA (1ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_10 (2ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_10_IA (3ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_TWSI_CLK (4ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_EOP (6ULL << SW_TWSI_OP_SHIFT) /* Extended opcode */
+
+/* Controller extended opcode word (bits 34:32) */
+#define SW_TWSI_EOP_SHIFT 32
+#define SW_TWSI_EOP_TWSI_DATA (SW_TWSI_OP_EOP | 1ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_CTL (SW_TWSI_OP_EOP | 2ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_CLKCTL (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_STAT (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_RST (SW_TWSI_OP_EOP | 7ULL << SW_TWSI_EOP_SHIFT)
+
+/* Controller command and status bits */
+#define TWSI_CTL_CE 0x80 /* High level controller enable */
+#define TWSI_CTL_ENAB 0x40 /* Bus enable */
+#define TWSI_CTL_STA 0x20 /* Master-mode start, HW clears when done */
+#define TWSI_CTL_STP 0x10 /* Master-mode stop, HW clears when done */
+#define TWSI_CTL_IFLG 0x08 /* HW event, SW writes 0 to ACK */
+#define TWSI_CTL_AAK 0x04 /* Assert ACK */
+
+/* Status values */
+#define STAT_ERROR 0x00
+#define STAT_START 0x08
+#define STAT_REP_START 0x10
+#define STAT_TXADDR_ACK 0x18
+#define STAT_TXADDR_NAK 0x20
+#define STAT_TXDATA_ACK 0x28
+#define STAT_TXDATA_NAK 0x30
+#define STAT_LOST_ARB_38 0x38
+#define STAT_RXADDR_ACK 0x40
+#define STAT_RXADDR_NAK 0x48
+#define STAT_RXDATA_ACK 0x50
+#define STAT_RXDATA_NAK 0x58
+#define STAT_SLAVE_60 0x60
+#define STAT_LOST_ARB_68 0x68
+#define STAT_SLAVE_70 0x70
+#define STAT_LOST_ARB_78 0x78
+#define STAT_SLAVE_80 0x80
+#define STAT_SLAVE_88 0x88
+#define STAT_GENDATA_ACK 0x90
+#define STAT_GENDATA_NAK 0x98
+#define STAT_SLAVE_A0 0xA0
+#define STAT_SLAVE_A8 0xA8
+#define STAT_LOST_ARB_B0 0xB0
+#define STAT_SLAVE_LOST 0xB8
+#define STAT_SLAVE_NAK 0xC0
+#define STAT_SLAVE_ACK 0xC8
+#define STAT_AD2W_ACK 0xD0
+#define STAT_AD2W_NAK 0xD8
+#define STAT_IDLE 0xF8
+
+/* TWSI_INT values */
+#define TWSI_INT_ST_INT BIT_ULL(0)
+#define TWSI_INT_TS_INT BIT_ULL(1)
+#define TWSI_INT_CORE_INT BIT_ULL(2)
+#define TWSI_INT_ST_EN BIT_ULL(4)
+#define TWSI_INT_TS_EN BIT_ULL(5)
+#define TWSI_INT_CORE_EN BIT_ULL(6)
+#define TWSI_INT_SDA_OVR BIT_ULL(8)
+#define TWSI_INT_SCL_OVR BIT_ULL(9)
+#define TWSI_INT_SDA BIT_ULL(10)
+#define TWSI_INT_SCL BIT_ULL(11)
+
+#define I2C_OCTEON_EVENT_WAIT 80 /* microseconds */
+
+/* Register offsets */
+struct octeon_i2c_reg_offset {
+ unsigned int sw_twsi;
+ unsigned int twsi_int;
+ unsigned int sw_twsi_ext;
+};
+
+#define SW_TWSI(x) (x->roff.sw_twsi)
+#define TWSI_INT(x) (x->roff.twsi_int)
+#define SW_TWSI_EXT(x) (x->roff.sw_twsi_ext)
+
+struct octeon_i2c {
+ wait_queue_head_t queue;
+ struct i2c_adapter adap;
+ struct octeon_i2c_reg_offset roff;
+ struct clk *clk;
+ int irq;
+ int hlc_irq; /* For cn7890 only */
+ u32 twsi_freq;
+ int sys_freq;
+ void __iomem *twsi_base;
+ struct device *dev;
+ bool hlc_enabled;
+ bool broken_irq_mode;
+ bool broken_irq_check;
+ void (*int_enable)(struct octeon_i2c *);
+ void (*int_disable)(struct octeon_i2c *);
+ void (*hlc_int_enable)(struct octeon_i2c *);
+ void (*hlc_int_disable)(struct octeon_i2c *);
+ atomic_t int_enable_cnt;
+ atomic_t hlc_int_enable_cnt;
+#if IS_ENABLED(CONFIG_I2C_THUNDERX)
+ struct msix_entry i2c_msix;
+#endif
+ struct i2c_smbus_alert_setup alert_data;
+ struct i2c_client *ara;
+};
+
+static inline void octeon_i2c_writeq_flush(u64 val, void __iomem *addr)
+{
+ __raw_writeq(val, addr);
+ __raw_readq(addr); /* wait for write to land */
+}
+
+/**
+ * octeon_i2c_reg_write - write an I2C core register
+ * @i2c: The struct octeon_i2c
+ * @eop_reg: Register selector
+ * @data: Value to be written
+ *
+ * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
+ */
+static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
+{
+ u64 tmp;
+
+ __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c));
+
+ readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp, tmp & SW_TWSI_V,
+ I2C_OCTEON_EVENT_WAIT, i2c->adap.timeout);
+}
+
+#define octeon_i2c_ctl_write(i2c, val) \
+ octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CTL, val)
+#define octeon_i2c_data_write(i2c, val) \
+ octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_DATA, val)
+
+/**
+ * octeon_i2c_reg_read - read lower bits of an I2C core register
+ * @i2c: The struct octeon_i2c
+ * @eop_reg: Register selector
+ *
+ * Returns the data.
+ *
+ * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
+ */
+static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg,
+ int *error)
+{
+ u64 tmp;
+ int ret;
+
+ __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c));
+
+ ret = readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp,
+ tmp & SW_TWSI_V, I2C_OCTEON_EVENT_WAIT,
+ i2c->adap.timeout);
+ if (error)
+ *error = ret;
+ return tmp & 0xFF;
+}
+
+#define octeon_i2c_ctl_read(i2c) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL)
+#define octeon_i2c_data_read(i2c, error) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error)
+#define octeon_i2c_stat_read(i2c) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL)
+
+/**
+ * octeon_i2c_read_int - read the TWSI_INT register
+ * @i2c: The struct octeon_i2c
+ *
+ * Returns the value of the register.
+ */
+static inline u64 octeon_i2c_read_int(struct octeon_i2c *i2c)
+{
+ return __raw_readq(i2c->twsi_base + TWSI_INT(i2c));
+}
+
+/**
+ * octeon_i2c_write_int - write the TWSI_INT register
+ * @i2c: The struct octeon_i2c
+ * @data: Value to be written
+ */
+static inline void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
+{
+ octeon_i2c_writeq_flush(data, i2c->twsi_base + TWSI_INT(i2c));
+}
+
+/* Prototypes */
+irqreturn_t octeon_i2c_isr(int irq, void *dev_id);
+int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
+int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c);
+void octeon_i2c_set_clock(struct octeon_i2c *i2c);
+extern struct i2c_bus_recovery_info octeon_i2c_recovery_info;
diff --git a/drivers/i2c/busses/i2c-octeon-platdrv.c b/drivers/i2c/busses/i2c-octeon-platdrv.c
new file mode 100644
index 000000000000..917524ce6890
--- /dev/null
+++ b/drivers/i2c/busses/i2c-octeon-platdrv.c
@@ -0,0 +1,286 @@
+/*
+ * (C) Copyright 2009-2010
+ * Nokia Siemens Networks, michael.lawnick.ext@nsn.com
+ *
+ * Portions Copyright (C) 2010 - 2016 Cavium, Inc.
+ *
+ * This is a driver for the i2c adapter in Cavium Networks' OCTEON processors.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/octeon/octeon.h>
+#include "i2c-octeon-core.h"
+
+#define DRV_NAME "i2c-octeon"
+
+/**
+ * octeon_i2c_int_enable - enable the CORE interrupt
+ * @i2c: The struct octeon_i2c
+ *
+ * The interrupt will be asserted when there is non-STAT_IDLE state in
+ * the SW_TWSI_EOP_TWSI_STAT register.
+ */
+static void octeon_i2c_int_enable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_write_int(i2c, TWSI_INT_CORE_EN);
+}
+
+/* disable the CORE interrupt */
+static void octeon_i2c_int_disable(struct octeon_i2c *i2c)
+{
+ /* clear TS/ST/IFLG events */
+ octeon_i2c_write_int(i2c, 0);
+}
+
+/**
+ * octeon_i2c_int_enable78 - enable the CORE interrupt
+ * @i2c: The struct octeon_i2c
+ *
+ * The interrupt will be asserted when there is non-STAT_IDLE state in the
+ * SW_TWSI_EOP_TWSI_STAT register.
+ */
+static void octeon_i2c_int_enable78(struct octeon_i2c *i2c)
+{
+ atomic_inc_return(&i2c->int_enable_cnt);
+ enable_irq(i2c->irq);
+}
+
+static void __octeon_i2c_irq_disable(atomic_t *cnt, int irq)
+{
+ int count;
+
+ /*
+ * The interrupt can be disabled in two places, but we only
+ * want to make the disable_irq_nosync() call once, so keep
+ * track with the atomic variable.
+ */
+ count = atomic_dec_if_positive(cnt);
+ if (count >= 0)
+ disable_irq_nosync(irq);
+}
+
+/* disable the CORE interrupt */
+static void octeon_i2c_int_disable78(struct octeon_i2c *i2c)
+{
+ __octeon_i2c_irq_disable(&i2c->int_enable_cnt, i2c->irq);
+}
+
+/**
+ * octeon_i2c_hlc_int_enable78 - enable the ST interrupt
+ * @i2c: The struct octeon_i2c
+ *
+ * The interrupt will be asserted when there is non-STAT_IDLE state in
+ * the SW_TWSI_EOP_TWSI_STAT register.
+ */
+static void octeon_i2c_hlc_int_enable78(struct octeon_i2c *i2c)
+{
+ atomic_inc_return(&i2c->hlc_int_enable_cnt);
+ enable_irq(i2c->hlc_irq);
+}
+
+/* disable the ST interrupt */
+static void octeon_i2c_hlc_int_disable78(struct octeon_i2c *i2c)
+{
+ __octeon_i2c_irq_disable(&i2c->hlc_int_enable_cnt, i2c->hlc_irq);
+}
+
+/* HLC interrupt service routine */
+static irqreturn_t octeon_i2c_hlc_isr78(int irq, void *dev_id)
+{
+ struct octeon_i2c *i2c = dev_id;
+
+ i2c->hlc_int_disable(i2c);
+ wake_up(&i2c->queue);
+
+ return IRQ_HANDLED;
+}
+
+static void octeon_i2c_hlc_int_enable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_write_int(i2c, TWSI_INT_ST_EN);
+}
+
+static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
+}
+
+static const struct i2c_algorithm octeon_i2c_algo = {
+ .master_xfer = octeon_i2c_xfer,
+ .functionality = octeon_i2c_functionality,
+};
+
+static struct i2c_adapter octeon_i2c_ops = {
+ .owner = THIS_MODULE,
+ .name = "OCTEON adapter",
+ .algo = &octeon_i2c_algo,
+};
+
+static int octeon_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int irq, result = 0, hlc_irq = 0;
+ struct resource *res_mem;
+ struct octeon_i2c *i2c;
+ bool cn78xx_style;
+
+ cn78xx_style = of_device_is_compatible(node, "cavium,octeon-7890-twsi");
+ if (cn78xx_style) {
+ hlc_irq = platform_get_irq(pdev, 0);
+ if (hlc_irq < 0)
+ return hlc_irq;
+
+ irq = platform_get_irq(pdev, 2);
+ if (irq < 0)
+ return irq;
+ } else {
+ /* All adaptors have an irq. */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ }
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c) {
+ result = -ENOMEM;
+ goto out;
+ }
+ i2c->dev = &pdev->dev;
+
+ i2c->roff.sw_twsi = 0x00;
+ i2c->roff.twsi_int = 0x10;
+ i2c->roff.sw_twsi_ext = 0x18;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c->twsi_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(i2c->twsi_base)) {
+ result = PTR_ERR(i2c->twsi_base);
+ goto out;
+ }
+
+ /*
+ * "clock-rate" is a legacy binding, the official binding is
+ * "clock-frequency". Try the official one first and then
+ * fall back if it doesn't exist.
+ */
+ if (of_property_read_u32(node, "clock-frequency", &i2c->twsi_freq) &&
+ of_property_read_u32(node, "clock-rate", &i2c->twsi_freq)) {
+ dev_err(i2c->dev,
+ "no I2C 'clock-rate' or 'clock-frequency' property\n");
+ result = -ENXIO;
+ goto out;
+ }
+
+ i2c->sys_freq = octeon_get_io_clock_rate();
+
+ init_waitqueue_head(&i2c->queue);
+
+ i2c->irq = irq;
+
+ if (cn78xx_style) {
+ i2c->hlc_irq = hlc_irq;
+
+ i2c->int_enable = octeon_i2c_int_enable78;
+ i2c->int_disable = octeon_i2c_int_disable78;
+ i2c->hlc_int_enable = octeon_i2c_hlc_int_enable78;
+ i2c->hlc_int_disable = octeon_i2c_hlc_int_disable78;
+
+ irq_set_status_flags(i2c->irq, IRQ_NOAUTOEN);
+ irq_set_status_flags(i2c->hlc_irq, IRQ_NOAUTOEN);
+
+ result = devm_request_irq(&pdev->dev, i2c->hlc_irq,
+ octeon_i2c_hlc_isr78, 0,
+ DRV_NAME, i2c);
+ if (result < 0) {
+ dev_err(i2c->dev, "failed to attach interrupt\n");
+ goto out;
+ }
+ } else {
+ i2c->int_enable = octeon_i2c_int_enable;
+ i2c->int_disable = octeon_i2c_int_disable;
+ i2c->hlc_int_enable = octeon_i2c_hlc_int_enable;
+ i2c->hlc_int_disable = octeon_i2c_int_disable;
+ }
+
+ result = devm_request_irq(&pdev->dev, i2c->irq,
+ octeon_i2c_isr, 0, DRV_NAME, i2c);
+ if (result < 0) {
+ dev_err(i2c->dev, "failed to attach interrupt\n");
+ goto out;
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+ i2c->broken_irq_check = true;
+
+ result = octeon_i2c_init_lowlevel(i2c);
+ if (result) {
+ dev_err(i2c->dev, "init low level failed\n");
+ goto out;
+ }
+
+ octeon_i2c_set_clock(i2c);
+
+ i2c->adap = octeon_i2c_ops;
+ i2c->adap.timeout = msecs_to_jiffies(2);
+ i2c->adap.retries = 5;
+ i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
+ i2c->adap.dev.parent = &pdev->dev;
+ i2c->adap.dev.of_node = node;
+ i2c_set_adapdata(&i2c->adap, i2c);
+ platform_set_drvdata(pdev, i2c);
+
+ result = i2c_add_adapter(&i2c->adap);
+ if (result < 0)
+ goto out;
+ dev_info(i2c->dev, "probed\n");
+ return 0;
+
+out:
+ return result;
+};
+
+static int octeon_i2c_remove(struct platform_device *pdev)
+{
+ struct octeon_i2c *i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c->adap);
+ return 0;
+};
+
+static const struct of_device_id octeon_i2c_match[] = {
+ { .compatible = "cavium,octeon-3860-twsi", },
+ { .compatible = "cavium,octeon-7890-twsi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_i2c_match);
+
+static struct platform_driver octeon_i2c_driver = {
+ .probe = octeon_i2c_probe,
+ .remove = octeon_i2c_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = octeon_i2c_match,
+ },
+};
+
+module_platform_driver(octeon_i2c_driver);
+
+MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>");
+MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index ab1279b8e240..c7da0c42baee 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1425,10 +1425,8 @@ omap_i2c_probe(struct platform_device *pdev)
/* i2c device drivers may be active on return from add_adapter() */
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
- if (r) {
- dev_err(omap->dev, "failure adding adapter\n");
+ if (r)
goto err_unuse_clocks;
- }
dev_info(omap->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
major, minor, omap->speed);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 23d1c167b5d7..c2268cdf38e8 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -694,7 +694,6 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
retval = i2c_add_adapter(adap);
if (retval) {
- dev_err(&dev->dev, "Couldn't register adapter!\n");
kfree(adapdata);
kfree(adap);
release_region(smba, SMBIOSIZE);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 2c40edbf6224..217c78711d65 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -329,10 +329,8 @@ static int pmcmsptwi_probe(struct platform_device *pldev)
i2c_set_adapdata(&pmcmsptwi_adapter, &pmcmsptwi_data);
rc = i2c_add_adapter(&pmcmsptwi_adapter);
- if (rc) {
- dev_err(&pldev->dev, "Unable to register I2C adapter\n");
+ if (rc)
goto ret_unmap;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 7ea67aa46fb7..fd5f9d2bf6d9 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -714,10 +714,8 @@ static int i2c_pnx_probe(struct platform_device *pdev)
/* Register this adapter with the I2C subsystem */
ret = i2c_add_numbered_adapter(&alg_data->adapter);
- if (ret < 0) {
- dev_err(&pdev->dev, "I2C: Failed to add bus\n");
+ if (ret < 0)
goto out_clock;
- }
dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n",
alg_data->adapter.name, res->start, alg_data->irq);
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index 82b6f02544da..0c8b1571886d 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -212,11 +212,8 @@ static int puv3_i2c_probe(struct platform_device *pdev)
adapter->nr = pdev->id;
rc = i2c_add_numbered_adapter(adapter);
- if (rc) {
- dev_err(&pdev->dev, "Adapter '%s' registration failed\n",
- adapter->name);
+ if (rc)
goto fail_add_adapter;
- }
dev_info(&pdev->dev, "PKUnity v3 i2c bus adapter.\n");
return 0;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 0d351954db02..e28b825b0433 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1292,10 +1292,8 @@ static int i2c_pxa_probe(struct platform_device *dev)
#endif
ret = i2c_add_numbered_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&dev->dev, "failed to add bus: %d\n", ret);
+ if (ret < 0)
goto ereqirq;
- }
platform_set_drvdata(dev, i2c);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 9bd849dacee8..726615e54f2a 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -802,6 +802,7 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
{ .compatible = "renesas,i2c-r8a7793", .data = (void *)I2C_RCAR_GEN2 },
{ .compatible = "renesas,i2c-r8a7794", .data = (void *)I2C_RCAR_GEN2 },
{ .compatible = "renesas,i2c-r8a7795", .data = (void *)I2C_RCAR_GEN3 },
+ { .compatible = "renesas,i2c-r8a7796", .data = (void *)I2C_RCAR_GEN3 },
{},
};
MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
@@ -875,10 +876,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
ret = i2c_add_numbered_adapter(adap);
- if (ret < 0) {
- dev_err(dev, "reg adap failed: %d\n", ret);
+ if (ret < 0)
goto out_pm_disable;
- }
dev_info(dev, "probed\n");
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index d7e3af671543..6263ea82d6ac 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -383,10 +383,8 @@ static int riic_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(adap);
- if (ret) {
- dev_err(&pdev->dev, "failed to add adapter\n");
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, riic);
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 5c5b7cada8be..50702c7bb244 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -58,7 +58,7 @@ enum {
#define REG_CON_LASTACK BIT(5) /* 1: send NACK after last received byte */
#define REG_CON_ACTACK BIT(6) /* 1: stop if NACK is received */
-#define REG_CON_TUNING_MASK GENMASK(15, 8)
+#define REG_CON_TUNING_MASK GENMASK_ULL(15, 8)
#define REG_CON_SDA_CFG(cfg) ((cfg) << 8)
#define REG_CON_STA_CFG(cfg) ((cfg) << 12)
@@ -742,7 +742,7 @@ static int rk3x_i2c_v1_calc_timings(unsigned long clk_rate,
struct i2c_timings *t,
struct rk3x_i2c_calced_timings *t_calc)
{
- unsigned long min_low_ns, min_high_ns, min_total_ns;
+ unsigned long min_low_ns, min_high_ns;
unsigned long min_setup_start_ns, min_setup_data_ns;
unsigned long min_setup_stop_ns, max_hold_data_ns;
@@ -793,7 +793,6 @@ static int rk3x_i2c_v1_calc_timings(unsigned long clk_rate,
/* These are the min dividers needed for min hold times. */
min_div_for_hold = (min_low_div + min_high_div);
- min_total_ns = min_low_ns + min_high_ns;
/*
* This is the maximum divider so we don't go over the maximum.
@@ -1312,10 +1311,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
rk3x_i2c_adapt_div(i2c, clk_rate);
ret = i2c_add_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register adapter\n");
+ if (ret < 0)
goto err_clk_notifier;
- }
dev_info(&pdev->dev, "Initialized RK3xxx I2C bus at %p\n", i2c->regs);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 38dc1cacfd8b..499af26e736e 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1215,7 +1215,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to add bus to i2c core\n");
pm_runtime_disable(&pdev->dev);
s3c24xx_i2c_deregister_cpufreq(i2c);
clk_unprepare(i2c->clk);
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 24968384b401..c2005c789d2b 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -510,10 +510,8 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
}
ret = i2c_add_numbered_adapter(&id->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "reg adap failed: %d\n", ret);
+ if (ret < 0)
goto out4;
- }
platform_set_drvdata(pdev, id);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 05b1eeab9cf5..192f36f00e4d 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -981,7 +981,6 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
ret = i2c_add_numbered_adapter(adap);
if (ret < 0) {
sh_mobile_i2c_release_dma(pd);
- dev_err(&dev->dev, "cannot add numbered adapter\n");
return ret;
}
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 792a42bdd335..95e81d0f72b4 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -387,10 +387,8 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
writel(regval, siic->base + SIRFSOC_I2C_SDA_DELAY);
err = i2c_add_numbered_adapter(adap);
- if (err < 0) {
- dev_err(&pdev->dev, "Can't add new i2c adapter\n");
+ if (err < 0)
goto out;
- }
clk_disable(clk);
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 944ec4205084..1371547ce1a3 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -874,10 +874,8 @@ static int st_i2c_probe(struct platform_device *pdev)
init_completion(&i2c_dev->complete);
ret = i2c_add_adapter(adap);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add adapter\n");
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, i2c_dev);
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 460c134832ac..dc63236b45b2 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -920,11 +920,8 @@ static int stu300_probe(struct platform_device *pdev)
/* i2c device drivers may be active on return from add_adapter() */
ret = i2c_add_numbered_adapter(adap);
- if (ret) {
- dev_err(&pdev->dev, "failure adding ST Micro DDC "
- "I2C adapter\n");
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, dev);
dev_info(&pdev->dev, "ST DDC I2C @ %p, irq %d\n",
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index b126dbaa47e3..4af9bbae20df 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -28,6 +28,9 @@
#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/reset.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
#include <asm/unaligned.h>
@@ -36,21 +39,21 @@
#define I2C_CNFG 0x000
#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
-#define I2C_CNFG_PACKET_MODE_EN (1<<10)
-#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
-#define I2C_CNFG_MULTI_MASTER_MODE (1<<17)
+#define I2C_CNFG_PACKET_MODE_EN BIT(10)
+#define I2C_CNFG_NEW_MASTER_FSM BIT(11)
+#define I2C_CNFG_MULTI_MASTER_MODE BIT(17)
#define I2C_STATUS 0x01C
#define I2C_SL_CNFG 0x020
-#define I2C_SL_CNFG_NACK (1<<1)
-#define I2C_SL_CNFG_NEWSL (1<<2)
+#define I2C_SL_CNFG_NACK BIT(1)
+#define I2C_SL_CNFG_NEWSL BIT(2)
#define I2C_SL_ADDR1 0x02c
#define I2C_SL_ADDR2 0x030
#define I2C_TX_FIFO 0x050
#define I2C_RX_FIFO 0x054
#define I2C_PACKET_TRANSFER_STATUS 0x058
#define I2C_FIFO_CONTROL 0x05c
-#define I2C_FIFO_CONTROL_TX_FLUSH (1<<1)
-#define I2C_FIFO_CONTROL_RX_FLUSH (1<<0)
+#define I2C_FIFO_CONTROL_TX_FLUSH BIT(1)
+#define I2C_FIFO_CONTROL_RX_FLUSH BIT(0)
#define I2C_FIFO_CONTROL_TX_TRIG_SHIFT 5
#define I2C_FIFO_CONTROL_RX_TRIG_SHIFT 2
#define I2C_FIFO_STATUS 0x060
@@ -60,26 +63,26 @@
#define I2C_FIFO_STATUS_RX_SHIFT 0
#define I2C_INT_MASK 0x064
#define I2C_INT_STATUS 0x068
-#define I2C_INT_PACKET_XFER_COMPLETE (1<<7)
-#define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1<<6)
-#define I2C_INT_TX_FIFO_OVERFLOW (1<<5)
-#define I2C_INT_RX_FIFO_UNDERFLOW (1<<4)
-#define I2C_INT_NO_ACK (1<<3)
-#define I2C_INT_ARBITRATION_LOST (1<<2)
-#define I2C_INT_TX_FIFO_DATA_REQ (1<<1)
-#define I2C_INT_RX_FIFO_DATA_REQ (1<<0)
+#define I2C_INT_PACKET_XFER_COMPLETE BIT(7)
+#define I2C_INT_ALL_PACKETS_XFER_COMPLETE BIT(6)
+#define I2C_INT_TX_FIFO_OVERFLOW BIT(5)
+#define I2C_INT_RX_FIFO_UNDERFLOW BIT(4)
+#define I2C_INT_NO_ACK BIT(3)
+#define I2C_INT_ARBITRATION_LOST BIT(2)
+#define I2C_INT_TX_FIFO_DATA_REQ BIT(1)
+#define I2C_INT_RX_FIFO_DATA_REQ BIT(0)
#define I2C_CLK_DIVISOR 0x06c
#define I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT 16
#define I2C_CLK_MULTIPLIER_STD_FAST_MODE 8
#define DVC_CTRL_REG1 0x000
-#define DVC_CTRL_REG1_INTR_EN (1<<10)
+#define DVC_CTRL_REG1_INTR_EN BIT(10)
#define DVC_CTRL_REG2 0x004
#define DVC_CTRL_REG3 0x008
-#define DVC_CTRL_REG3_SW_PROG (1<<26)
-#define DVC_CTRL_REG3_I2C_DONE_INTR_EN (1<<30)
+#define DVC_CTRL_REG3_SW_PROG BIT(26)
+#define DVC_CTRL_REG3_I2C_DONE_INTR_EN BIT(30)
#define DVC_STATUS 0x00c
-#define DVC_STATUS_I2C_DONE_INTR (1<<30)
+#define DVC_STATUS_I2C_DONE_INTR BIT(30)
#define I2C_ERR_NONE 0x00
#define I2C_ERR_NO_ACK 0x01
@@ -89,26 +92,28 @@
#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28
#define PACKET_HEADER0_PACKET_ID_SHIFT 16
#define PACKET_HEADER0_CONT_ID_SHIFT 12
-#define PACKET_HEADER0_PROTOCOL_I2C (1<<4)
-
-#define I2C_HEADER_HIGHSPEED_MODE (1<<22)
-#define I2C_HEADER_CONT_ON_NAK (1<<21)
-#define I2C_HEADER_SEND_START_BYTE (1<<20)
-#define I2C_HEADER_READ (1<<19)
-#define I2C_HEADER_10BIT_ADDR (1<<18)
-#define I2C_HEADER_IE_ENABLE (1<<17)
-#define I2C_HEADER_REPEAT_START (1<<16)
-#define I2C_HEADER_CONTINUE_XFER (1<<15)
+#define PACKET_HEADER0_PROTOCOL_I2C BIT(4)
+
+#define I2C_HEADER_HIGHSPEED_MODE BIT(22)
+#define I2C_HEADER_CONT_ON_NAK BIT(21)
+#define I2C_HEADER_SEND_START_BYTE BIT(20)
+#define I2C_HEADER_READ BIT(19)
+#define I2C_HEADER_10BIT_ADDR BIT(18)
+#define I2C_HEADER_IE_ENABLE BIT(17)
+#define I2C_HEADER_REPEAT_START BIT(16)
+#define I2C_HEADER_CONTINUE_XFER BIT(15)
#define I2C_HEADER_MASTER_ADDR_SHIFT 12
#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
#define I2C_CONFIG_LOAD 0x08C
-#define I2C_MSTR_CONFIG_LOAD (1 << 0)
-#define I2C_SLV_CONFIG_LOAD (1 << 1)
-#define I2C_TIMEOUT_CONFIG_LOAD (1 << 2)
+#define I2C_MSTR_CONFIG_LOAD BIT(0)
+#define I2C_SLV_CONFIG_LOAD BIT(1)
+#define I2C_TIMEOUT_CONFIG_LOAD BIT(2)
#define I2C_CLKEN_OVERRIDE 0x090
-#define I2C_MST_CORE_CLKEN_OVR (1 << 0)
+#define I2C_MST_CORE_CLKEN_OVR BIT(0)
+
+#define I2C_CONFIG_LOAD_TIMEOUT 1000000
/*
* msg_end_type: The bus control which need to be send at end of transfer.
@@ -191,9 +196,11 @@ struct tegra_i2c_dev {
u16 clk_divisor_non_hs_mode;
bool is_suspended;
bool is_multimaster_mode;
+ spinlock_t xfer_lock;
};
-static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
+static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
+ unsigned long reg)
{
writel(val, i2c_dev->base + reg);
}
@@ -244,15 +251,17 @@ static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
static void tegra_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
{
- u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
- int_mask &= ~mask;
+ u32 int_mask;
+
+ int_mask = i2c_readl(i2c_dev, I2C_INT_MASK) & ~mask;
i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
}
static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
{
- u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
- int_mask |= mask;
+ u32 int_mask;
+
+ int_mask = i2c_readl(i2c_dev, I2C_INT_MASK) | mask;
i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
}
@@ -260,6 +269,7 @@ static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
{
unsigned long timeout = jiffies + HZ;
u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL);
+
val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH;
i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
@@ -385,7 +395,8 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
*/
static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
{
- u32 val = 0;
+ u32 val;
+
val = dvc_readl(i2c_dev, DVC_CTRL_REG3);
val |= DVC_CTRL_REG3_SW_PROG;
val |= DVC_CTRL_REG3_I2C_DONE_INTR_EN;
@@ -396,9 +407,15 @@ static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
}
-static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev)
+static int tegra_i2c_runtime_resume(struct device *dev)
{
+ struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
+
+ ret = pinctrl_pm_select_default_state(i2c_dev->dev);
+ if (ret)
+ return ret;
+
if (!i2c_dev->hw->has_single_clk_source) {
ret = clk_enable(i2c_dev->fast_clk);
if (ret < 0) {
@@ -407,32 +424,66 @@ static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev)
return ret;
}
}
+
ret = clk_enable(i2c_dev->div_clk);
if (ret < 0) {
dev_err(i2c_dev->dev,
"Enabling div clk failed, err %d\n", ret);
clk_disable(i2c_dev->fast_clk);
+ return ret;
}
- return ret;
+
+ return 0;
}
-static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev)
+static int tegra_i2c_runtime_suspend(struct device *dev)
{
+ struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+
clk_disable(i2c_dev->div_clk);
if (!i2c_dev->hw->has_single_clk_source)
clk_disable(i2c_dev->fast_clk);
+
+ return pinctrl_pm_select_idle_state(i2c_dev->dev);
+}
+
+static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
+{
+ unsigned long reg_offset;
+ void __iomem *addr;
+ u32 val;
+ int err;
+
+ if (i2c_dev->hw->has_config_load_reg) {
+ reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_CONFIG_LOAD);
+ addr = i2c_dev->base + reg_offset;
+ i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
+ if (in_interrupt())
+ err = readl_poll_timeout_atomic(addr, val, val == 0,
+ 1000, I2C_CONFIG_LOAD_TIMEOUT);
+ else
+ err = readl_poll_timeout(addr, val, val == 0,
+ 1000, I2C_CONFIG_LOAD_TIMEOUT);
+
+ if (err) {
+ dev_warn(i2c_dev->dev,
+ "timeout waiting for config load\n");
+ return err;
+ }
+ }
+
+ return 0;
}
static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val;
- int err = 0;
+ int err;
u32 clk_divisor;
- unsigned long timeout = jiffies + HZ;
- err = tegra_i2c_clock_enable(i2c_dev);
+ err = pm_runtime_get_sync(i2c_dev->dev);
if (err < 0) {
- dev_err(i2c_dev->dev, "Clock enable failed %d\n", err);
+ dev_err(i2c_dev->dev, "runtime resume failed %d\n", err);
return err;
}
@@ -460,54 +511,59 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
if (!i2c_dev->is_dvc) {
u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
+
sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL;
i2c_writel(i2c_dev, sl_cfg, I2C_SL_CNFG);
i2c_writel(i2c_dev, 0xfc, I2C_SL_ADDR1);
i2c_writel(i2c_dev, 0x00, I2C_SL_ADDR2);
-
}
val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
- if (tegra_i2c_flush_fifos(i2c_dev))
- err = -ETIMEDOUT;
+ err = tegra_i2c_flush_fifos(i2c_dev);
+ if (err)
+ goto err;
if (i2c_dev->is_multimaster_mode && i2c_dev->hw->has_slcg_override_reg)
i2c_writel(i2c_dev, I2C_MST_CORE_CLKEN_OVR, I2C_CLKEN_OVERRIDE);
- if (i2c_dev->hw->has_config_load_reg) {
- i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
- while (i2c_readl(i2c_dev, I2C_CONFIG_LOAD) != 0) {
- if (time_after(jiffies, timeout)) {
- dev_warn(i2c_dev->dev,
- "timeout waiting for config load\n");
- err = -ETIMEDOUT;
- goto err;
- }
- msleep(1);
- }
- }
+ err = tegra_i2c_wait_for_config_load(i2c_dev);
+ if (err)
+ goto err;
if (i2c_dev->irq_disabled) {
- i2c_dev->irq_disabled = 0;
+ i2c_dev->irq_disabled = false;
enable_irq(i2c_dev->irq);
}
err:
- tegra_i2c_clock_disable(i2c_dev);
+ pm_runtime_put(i2c_dev->dev);
return err;
}
+static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 cnfg;
+
+ cnfg = i2c_readl(i2c_dev, I2C_CNFG);
+ if (cnfg & I2C_CNFG_PACKET_MODE_EN)
+ i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
+
+ return tegra_i2c_wait_for_config_load(i2c_dev);
+}
+
static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
{
u32 status;
const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
struct tegra_i2c_dev *i2c_dev = dev_id;
+ unsigned long flags;
status = i2c_readl(i2c_dev, I2C_INT_STATUS);
+ spin_lock_irqsave(&i2c_dev->xfer_lock, flags);
if (status == 0) {
dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n",
i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS),
@@ -517,12 +573,13 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
if (!i2c_dev->irq_disabled) {
disable_irq_nosync(i2c_dev->irq);
- i2c_dev->irq_disabled = 1;
+ i2c_dev->irq_disabled = true;
}
goto err;
}
if (unlikely(status & status_err)) {
+ tegra_i2c_disable_packet_mode(i2c_dev);
if (status & I2C_INT_NO_ACK)
i2c_dev->msg_err |= I2C_ERR_NO_ACK;
if (status & I2C_INT_ARBITRATION_LOST)
@@ -552,7 +609,7 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
BUG_ON(i2c_dev->msg_buf_remaining);
complete(&i2c_dev->msg_complete);
}
- return IRQ_HANDLED;
+ goto done;
err:
/* An error occurred, mask all interrupts */
tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST |
@@ -563,6 +620,8 @@ err:
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
complete(&i2c_dev->msg_complete);
+done:
+ spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags);
return IRQ_HANDLED;
}
@@ -572,6 +631,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
u32 packet_header;
u32 int_mask;
unsigned long time_left;
+ unsigned long flags;
tegra_i2c_flush_fifos(i2c_dev);
@@ -584,6 +644,11 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
i2c_dev->msg_read = (msg->flags & I2C_M_RD);
reinit_completion(&i2c_dev->msg_complete);
+ spin_lock_irqsave(&i2c_dev->xfer_lock, flags);
+
+ int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ tegra_i2c_unmask_irq(i2c_dev, int_mask);
+
packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
PACKET_HEADER0_PROTOCOL_I2C |
(i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) |
@@ -613,14 +678,15 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (!(msg->flags & I2C_M_RD))
tegra_i2c_fill_tx_fifo(i2c_dev);
- int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
if (i2c_dev->hw->has_per_pkt_xfer_complete_irq)
int_mask |= I2C_INT_PACKET_XFER_COMPLETE;
if (msg->flags & I2C_M_RD)
int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
else if (i2c_dev->msg_buf_remaining)
int_mask |= I2C_INT_TX_FIFO_DATA_REQ;
+
tegra_i2c_unmask_irq(i2c_dev, int_mask);
+ spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags);
dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n",
i2c_readl(i2c_dev, I2C_INT_MASK));
@@ -643,9 +709,10 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
return 0;
/*
- * NACK interrupt is generated before the I2C controller generates the
- * STOP condition on the bus. So wait for 2 clock periods before resetting
- * the controller so that STOP condition has been delivered properly.
+ * NACK interrupt is generated before the I2C controller generates
+ * the STOP condition on the bus. So wait for 2 clock periods
+ * before resetting the controller so that the STOP condition has
+ * been delivered properly.
*/
if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
@@ -670,14 +737,15 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (i2c_dev->is_suspended)
return -EBUSY;
- ret = tegra_i2c_clock_enable(i2c_dev);
+ ret = pm_runtime_get_sync(i2c_dev->dev);
if (ret < 0) {
- dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret);
+ dev_err(i2c_dev->dev, "runtime resume failed %d\n", ret);
return ret;
}
for (i = 0; i < num; i++) {
enum msg_end_type end_type = MSG_END_STOP;
+
if (i < (num - 1)) {
if (msgs[i + 1].flags & I2C_M_NOSTART)
end_type = MSG_END_CONTINUE;
@@ -688,7 +756,9 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (ret)
break;
}
- tegra_i2c_clock_disable(i2c_dev);
+
+ pm_runtime_put(i2c_dev->dev);
+
return ret ?: i;
}
@@ -825,7 +895,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
div_clk = devm_clk_get(&pdev->dev, "div-clk");
if (IS_ERR(div_clk)) {
- dev_err(&pdev->dev, "missing controller clock");
+ dev_err(&pdev->dev, "missing controller clock\n");
return PTR_ERR(div_clk);
}
@@ -843,27 +913,22 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->rst = devm_reset_control_get(&pdev->dev, "i2c");
if (IS_ERR(i2c_dev->rst)) {
- dev_err(&pdev->dev, "missing controller reset");
+ dev_err(&pdev->dev, "missing controller reset\n");
return PTR_ERR(i2c_dev->rst);
}
tegra_i2c_parse_dt(i2c_dev);
- i2c_dev->hw = &tegra20_i2c_hw;
-
- if (pdev->dev.of_node) {
- i2c_dev->hw = of_device_get_match_data(&pdev->dev);
- i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
- "nvidia,tegra20-i2c-dvc");
- } else if (pdev->id == 3) {
- i2c_dev->is_dvc = 1;
- }
+ i2c_dev->hw = of_device_get_match_data(&pdev->dev);
+ i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
+ "nvidia,tegra20-i2c-dvc");
init_completion(&i2c_dev->msg_complete);
+ spin_lock_init(&i2c_dev->xfer_lock);
if (!i2c_dev->hw->has_single_clk_source) {
fast_clk = devm_clk_get(&pdev->dev, "fast-clk");
if (IS_ERR(fast_clk)) {
- dev_err(&pdev->dev, "missing fast clock");
+ dev_err(&pdev->dev, "missing fast clock\n");
return PTR_ERR(fast_clk);
}
i2c_dev->fast_clk = fast_clk;
@@ -900,18 +965,27 @@ static int tegra_i2c_probe(struct platform_device *pdev)
goto unprepare_fast_clk;
}
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_i2c_runtime_resume(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime resume failed\n");
+ goto unprepare_div_clk;
+ }
+ }
+
if (i2c_dev->is_multimaster_mode) {
ret = clk_enable(i2c_dev->div_clk);
if (ret < 0) {
dev_err(i2c_dev->dev, "div_clk enable failed %d\n",
ret);
- goto unprepare_div_clk;
+ goto disable_rpm;
}
}
ret = tegra_i2c_init(i2c_dev);
if (ret) {
- dev_err(&pdev->dev, "Failed to initialize i2c controller");
+ dev_err(&pdev->dev, "Failed to initialize i2c controller\n");
goto disable_div_clk;
}
@@ -925,17 +999,15 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
i2c_dev->adapter.owner = THIS_MODULE;
i2c_dev->adapter.class = I2C_CLASS_DEPRECATED;
- strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter",
+ strlcpy(i2c_dev->adapter.name, dev_name(&pdev->dev),
sizeof(i2c_dev->adapter.name));
i2c_dev->adapter.dev.parent = &pdev->dev;
i2c_dev->adapter.nr = pdev->id;
i2c_dev->adapter.dev.of_node = pdev->dev.of_node;
ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add I2C adapter\n");
+ if (ret)
goto disable_div_clk;
- }
return 0;
@@ -943,6 +1015,11 @@ disable_div_clk:
if (i2c_dev->is_multimaster_mode)
clk_disable(i2c_dev->div_clk);
+disable_rpm:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_i2c_runtime_suspend(&pdev->dev);
+
unprepare_div_clk:
clk_unprepare(i2c_dev->div_clk);
@@ -956,11 +1033,16 @@ unprepare_fast_clk:
static int tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
i2c_del_adapter(&i2c_dev->adapter);
if (i2c_dev->is_multimaster_mode)
clk_disable(i2c_dev->div_clk);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_i2c_runtime_suspend(&pdev->dev);
+
clk_unprepare(i2c_dev->div_clk);
if (!i2c_dev->hw->has_single_clk_source)
clk_unprepare(i2c_dev->fast_clk);
@@ -988,20 +1070,19 @@ static int tegra_i2c_resume(struct device *dev)
i2c_lock_adapter(&i2c_dev->adapter);
ret = tegra_i2c_init(i2c_dev);
-
- if (ret) {
- i2c_unlock_adapter(&i2c_dev->adapter);
- return ret;
- }
-
- i2c_dev->is_suspended = false;
+ if (!ret)
+ i2c_dev->is_suspended = false;
i2c_unlock_adapter(&i2c_dev->adapter);
- return 0;
+ return ret;
}
-static SIMPLE_DEV_PM_OPS(tegra_i2c_pm, tegra_i2c_suspend, tegra_i2c_resume);
+static const struct dev_pm_ops tegra_i2c_pm = {
+ SET_RUNTIME_PM_OPS(tegra_i2c_runtime_suspend, tegra_i2c_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_i2c_suspend, tegra_i2c_resume)
+};
#define TEGRA_I2C_PM (&tegra_i2c_pm)
#else
#define TEGRA_I2C_PM NULL
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
new file mode 100644
index 000000000000..bba5b429f69c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -0,0 +1,259 @@
+/*
+ * Cavium ThunderX i2c driver.
+ *
+ * Copyright (C) 2015,2016 Cavium Inc.
+ * Authors: Fred Martin <fmartin@caviumnetworks.com>
+ * Jan Glauber <jglauber@cavium.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+
+#include "i2c-octeon-core.h"
+
+#define DRV_NAME "i2c-thunderx"
+
+#define PCI_DEVICE_ID_THUNDER_TWSI 0xa012
+
+#define SYS_FREQ_DEFAULT 700000000
+
+#define TWSI_INT_ENA_W1C 0x1028
+#define TWSI_INT_ENA_W1S 0x1030
+
+/*
+ * Enable the CORE interrupt.
+ * The interrupt will be asserted when there is non-STAT_IDLE state in the
+ * SW_TWSI_EOP_TWSI_STAT register.
+ */
+static void thunder_i2c_int_enable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_writeq_flush(TWSI_INT_CORE_INT,
+ i2c->twsi_base + TWSI_INT_ENA_W1S);
+}
+
+/*
+ * Disable the CORE interrupt.
+ */
+static void thunder_i2c_int_disable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_writeq_flush(TWSI_INT_CORE_INT,
+ i2c->twsi_base + TWSI_INT_ENA_W1C);
+}
+
+static void thunder_i2c_hlc_int_enable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_writeq_flush(TWSI_INT_ST_INT | TWSI_INT_TS_INT,
+ i2c->twsi_base + TWSI_INT_ENA_W1S);
+}
+
+static void thunder_i2c_hlc_int_disable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_writeq_flush(TWSI_INT_ST_INT | TWSI_INT_TS_INT,
+ i2c->twsi_base + TWSI_INT_ENA_W1C);
+}
+
+static u32 thunderx_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
+}
+
+static const struct i2c_algorithm thunderx_i2c_algo = {
+ .master_xfer = octeon_i2c_xfer,
+ .functionality = thunderx_i2c_functionality,
+};
+
+static struct i2c_adapter thunderx_i2c_ops = {
+ .owner = THIS_MODULE,
+ .name = "ThunderX adapter",
+ .algo = &thunderx_i2c_algo,
+};
+
+static void thunder_i2c_clock_enable(struct device *dev, struct octeon_i2c *i2c)
+{
+ int ret;
+
+ i2c->clk = clk_get(dev, NULL);
+ if (IS_ERR(i2c->clk)) {
+ i2c->clk = NULL;
+ goto skip;
+ }
+
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ goto skip;
+ i2c->sys_freq = clk_get_rate(i2c->clk);
+
+skip:
+ if (!i2c->sys_freq)
+ i2c->sys_freq = SYS_FREQ_DEFAULT;
+}
+
+static void thunder_i2c_clock_disable(struct device *dev, struct clk *clk)
+{
+ if (!clk)
+ return;
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+}
+
+static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c,
+ struct device_node *node)
+{
+ u32 type;
+
+ if (!node)
+ return -EINVAL;
+
+ i2c->alert_data.irq = irq_of_parse_and_map(node, 0);
+ if (!i2c->alert_data.irq)
+ return -EINVAL;
+
+ type = irqd_get_trigger_type(irq_get_irq_data(i2c->alert_data.irq));
+ i2c->alert_data.alert_edge_triggered =
+ (type & IRQ_TYPE_LEVEL_MASK) ? 1 : 0;
+
+ i2c->ara = i2c_setup_smbus_alert(&i2c->adap, &i2c->alert_data);
+ if (!i2c->ara)
+ return -ENODEV;
+ return 0;
+}
+
+static int thunder_i2c_smbus_setup(struct octeon_i2c *i2c,
+ struct device_node *node)
+{
+ /* TODO: ACPI support */
+ if (!acpi_disabled)
+ return -EOPNOTSUPP;
+
+ return thunder_i2c_smbus_setup_of(i2c, node);
+}
+
+static void thunder_i2c_smbus_remove(struct octeon_i2c *i2c)
+{
+ if (i2c->ara)
+ i2c_unregister_device(i2c->ara);
+}
+
+static int thunder_i2c_probe_pci(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct octeon_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ i2c->roff.sw_twsi = 0x1000;
+ i2c->roff.twsi_int = 0x1010;
+ i2c->roff.sw_twsi_ext = 0x1018;
+
+ i2c->dev = dev;
+ pci_set_drvdata(pdev, i2c);
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret)
+ return ret;
+
+ i2c->twsi_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!i2c->twsi_base)
+ return -EINVAL;
+
+ thunder_i2c_clock_enable(dev, i2c);
+ ret = device_property_read_u32(dev, "clock-frequency", &i2c->twsi_freq);
+ if (ret)
+ i2c->twsi_freq = 100000;
+
+ init_waitqueue_head(&i2c->queue);
+
+ i2c->int_enable = thunder_i2c_int_enable;
+ i2c->int_disable = thunder_i2c_int_disable;
+ i2c->hlc_int_enable = thunder_i2c_hlc_int_enable;
+ i2c->hlc_int_disable = thunder_i2c_hlc_int_disable;
+
+ ret = pci_enable_msix(pdev, &i2c->i2c_msix, 1);
+ if (ret)
+ goto error;
+
+ ret = devm_request_irq(dev, i2c->i2c_msix.vector, octeon_i2c_isr, 0,
+ DRV_NAME, i2c);
+ if (ret)
+ goto error;
+
+ ret = octeon_i2c_init_lowlevel(i2c);
+ if (ret)
+ goto error;
+
+ octeon_i2c_set_clock(i2c);
+
+ i2c->adap = thunderx_i2c_ops;
+ i2c->adap.retries = 5;
+ i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
+ i2c->adap.dev.parent = dev;
+ i2c->adap.dev.of_node = pdev->dev.of_node;
+ snprintf(i2c->adap.name, sizeof(i2c->adap.name),
+ "Cavium ThunderX i2c adapter at %s", dev_name(dev));
+ i2c_set_adapdata(&i2c->adap, i2c);
+
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret)
+ goto error;
+
+ dev_info(i2c->dev, "Probed. Set system clock to %u\n", i2c->sys_freq);
+
+ ret = thunder_i2c_smbus_setup(i2c, pdev->dev.of_node);
+ if (ret)
+ dev_info(dev, "SMBUS alert not active on this bus\n");
+
+ return 0;
+
+error:
+ thunder_i2c_clock_disable(dev, i2c->clk);
+ return ret;
+}
+
+static void thunder_i2c_remove_pci(struct pci_dev *pdev)
+{
+ struct octeon_i2c *i2c = pci_get_drvdata(pdev);
+
+ thunder_i2c_smbus_remove(i2c);
+ thunder_i2c_clock_disable(&pdev->dev, i2c->clk);
+ i2c_del_adapter(&i2c->adap);
+}
+
+static const struct pci_device_id thunder_i2c_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_TWSI) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, thunder_i2c_pci_id_table);
+
+static struct pci_driver thunder_i2c_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = thunder_i2c_pci_id_table,
+ .probe = thunder_i2c_probe_pci,
+ .remove = thunder_i2c_remove_pci,
+};
+
+module_pci_driver(thunder_i2c_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Fred Martin <fmartin@caviumnetworks.com>");
+MODULE_DESCRIPTION("I2C-Bus adapter for Cavium ThunderX SOC");
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index aeead0d27d10..db9105e52c79 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -348,14 +349,19 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
dev_dbg(&adap->dev, "complete\n");
if (unlikely(priv->flags & UNIPHIER_FI2C_DEFER_STOP_COMP)) {
- u32 status = readl(priv->membase + UNIPHIER_FI2C_SR);
-
- if (!(status & UNIPHIER_FI2C_SR_STS) ||
- status & UNIPHIER_FI2C_SR_BB) {
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(priv->membase + UNIPHIER_FI2C_SR,
+ status,
+ (status & UNIPHIER_FI2C_SR_STS) &&
+ !(status & UNIPHIER_FI2C_SR_BB),
+ 1, 20);
+ if (ret) {
dev_err(&adap->dev,
"stop condition was not completed.\n");
uniphier_fi2c_recover(priv);
- return -EBUSY;
+ return ret;
}
}
@@ -455,54 +461,25 @@ static struct i2c_bus_recovery_info uniphier_fi2c_bus_recovery_info = {
.unprepare_recovery = uniphier_fi2c_unprepare_recovery,
};
-static int uniphier_fi2c_clk_init(struct device *dev,
- struct uniphier_fi2c_priv *priv)
+static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv,
+ u32 bus_speed, unsigned long clk_rate)
{
- struct device_node *np = dev->of_node;
- unsigned long clk_rate;
- u32 bus_speed, clk_count;
- int ret;
-
- if (of_property_read_u32(np, "clock-frequency", &bus_speed))
- bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED;
-
- if (!bus_speed) {
- dev_err(dev, "clock-frequency should not be zero\n");
- return -EINVAL;
- }
-
- if (bus_speed > UNIPHIER_FI2C_MAX_SPEED)
- bus_speed = UNIPHIER_FI2C_MAX_SPEED;
-
- /* Get input clk rate through clk driver */
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to get clock\n");
- return PTR_ERR(priv->clk);
- }
+ u32 tmp;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
+ tmp = readl(priv->membase + UNIPHIER_FI2C_CR);
+ tmp |= UNIPHIER_FI2C_CR_MST;
+ writel(tmp, priv->membase + UNIPHIER_FI2C_CR);
uniphier_fi2c_reset(priv);
- clk_count = clk_rate / bus_speed;
+ tmp = clk_rate / bus_speed;
- writel(clk_count, priv->membase + UNIPHIER_FI2C_CYC);
- writel(clk_count / 2, priv->membase + UNIPHIER_FI2C_LCTL);
- writel(clk_count / 2, priv->membase + UNIPHIER_FI2C_SSUT);
- writel(clk_count / 16, priv->membase + UNIPHIER_FI2C_DSUT);
+ writel(tmp, priv->membase + UNIPHIER_FI2C_CYC);
+ writel(tmp / 2, priv->membase + UNIPHIER_FI2C_LCTL);
+ writel(tmp / 2, priv->membase + UNIPHIER_FI2C_SSUT);
+ writel(tmp / 16, priv->membase + UNIPHIER_FI2C_DSUT);
uniphier_fi2c_prepare_operation(priv);
-
- return 0;
}
static int uniphier_fi2c_probe(struct platform_device *pdev)
@@ -510,8 +487,9 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uniphier_fi2c_priv *priv;
struct resource *regs;
- int irq;
- int ret;
+ u32 bus_speed;
+ unsigned long clk_rate;
+ int irq, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -528,6 +506,31 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
return irq;
}
+ if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
+ bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED;
+
+ if (!bus_speed || bus_speed > UNIPHIER_FI2C_MAX_SPEED) {
+ dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
+ return -EINVAL;
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ clk_rate = clk_get_rate(priv->clk);
+ if (!clk_rate) {
+ dev_err(dev, "input clock rate should not be zero\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
init_completion(&priv->comp);
priv->adap.owner = THIS_MODULE;
priv->adap.algo = &uniphier_fi2c_algo;
@@ -538,9 +541,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
- ret = uniphier_fi2c_clk_init(dev, priv);
- if (ret)
- goto err;
+ uniphier_fi2c_hw_init(priv, bus_speed, clk_rate);
ret = devm_request_irq(dev, irq, uniphier_fi2c_interrupt, 0,
pdev->name, priv);
@@ -550,11 +551,6 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
}
ret = i2c_add_adapter(&priv->adap);
- if (ret) {
- dev_err(dev, "failed to add I2C adapter\n");
- goto err;
- }
-
err:
if (ret)
clk_disable_unprepare(priv->clk);
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 475a5eb514e2..56e92af46ddc 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -316,50 +316,15 @@ static struct i2c_bus_recovery_info uniphier_i2c_bus_recovery_info = {
.unprepare_recovery = uniphier_i2c_unprepare_recovery,
};
-static int uniphier_i2c_clk_init(struct device *dev,
- struct uniphier_i2c_priv *priv)
+static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv,
+ u32 bus_speed, unsigned long clk_rate)
{
- struct device_node *np = dev->of_node;
- unsigned long clk_rate;
- u32 bus_speed;
- int ret;
-
- if (of_property_read_u32(np, "clock-frequency", &bus_speed))
- bus_speed = UNIPHIER_I2C_DEFAULT_SPEED;
-
- if (!bus_speed) {
- dev_err(dev, "clock-frequency should not be zero\n");
- return -EINVAL;
- }
-
- if (bus_speed > UNIPHIER_I2C_MAX_SPEED)
- bus_speed = UNIPHIER_I2C_MAX_SPEED;
-
- /* Get input clk rate through clk driver */
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to get clock\n");
- return PTR_ERR(priv->clk);
- }
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
-
uniphier_i2c_reset(priv, true);
writel((clk_rate / bus_speed / 2 << 16) | (clk_rate / bus_speed),
priv->membase + UNIPHIER_I2C_CLK);
uniphier_i2c_reset(priv, false);
-
- return 0;
}
static int uniphier_i2c_probe(struct platform_device *pdev)
@@ -367,8 +332,9 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uniphier_i2c_priv *priv;
struct resource *regs;
- int irq;
- int ret;
+ u32 bus_speed;
+ unsigned long clk_rate;
+ int irq, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -385,6 +351,31 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
return irq;
}
+ if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
+ bus_speed = UNIPHIER_I2C_DEFAULT_SPEED;
+
+ if (!bus_speed || bus_speed > UNIPHIER_I2C_MAX_SPEED) {
+ dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
+ return -EINVAL;
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ clk_rate = clk_get_rate(priv->clk);
+ if (!clk_rate) {
+ dev_err(dev, "input clock rate should not be zero\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
init_completion(&priv->comp);
priv->adap.owner = THIS_MODULE;
priv->adap.algo = &uniphier_i2c_algo;
@@ -395,9 +386,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
- ret = uniphier_i2c_clk_init(dev, priv);
- if (ret)
- goto err;
+ uniphier_i2c_hw_init(priv, bus_speed, clk_rate);
ret = devm_request_irq(dev, irq, uniphier_i2c_interrupt, 0, pdev->name,
priv);
@@ -407,11 +396,6 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
}
ret = i2c_add_adapter(&priv->adap);
- if (ret) {
- dev_err(dev, "failed to add I2C adapter\n");
- goto err;
- }
-
err:
if (ret)
clk_disable_unprepare(priv->clk);
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index e1e3a85596c5..fbd0fd59f312 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -432,10 +432,8 @@ static int wmt_i2c_probe(struct platform_device *pdev)
}
err = i2c_add_adapter(adap);
- if (err) {
- dev_err(&pdev->dev, "failed to add adapter\n");
+ if (err)
return err;
- }
platform_set_drvdata(pdev, i2c_dev);
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 4233f5695352..263685c7a512 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -418,7 +418,6 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adapter, ctx);
rc = i2c_add_adapter(adapter);
if (rc) {
- dev_err(&pdev->dev, "Adapter registeration failed\n");
mbox_free_channel(ctx->mbox_chan);
return rc;
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 74f54f2f471f..66bce3b311a1 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -804,7 +804,6 @@ static int xiic_i2c_probe(struct platform_device *pdev)
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
if (ret) {
- dev_err(&pdev->dev, "Failed to add adapter\n");
xiic_deinit(i2c);
goto err_clk_dis;
}
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 55a7bef1b2e1..2a972ed7aa0d 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -400,10 +400,8 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&priv->adapter, priv);
err = i2c_add_adapter(&priv->adapter);
- if (err) {
- dev_err(&pdev->dev, "failed to add I2C adapter!\n");
+ if (err)
return err;
- }
platform_set_drvdata(pdev, priv);
dev_dbg(&pdev->dev, "I2C bus:%d added\n", priv->adapter.nr);
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 613c3a4f2c51..0968f59b6df5 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -432,10 +432,8 @@ static int xlr_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&priv->adap, priv);
ret = i2c_add_numbered_adapter(&priv->adap);
- if (ret < 0) {
- dev_err(&priv->adap.dev, "Failed to add i2c bus.\n");
+ if (ret < 0)
return ret;
- }
platform_set_drvdata(pdev, priv);
dev_info(&priv->adap.dev, "Added I2C Bus.\n");
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index da3a02ef4a31..98fffa3a09f7 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -88,7 +88,7 @@ void i2c_transfer_trace_unreg(void)
}
#if defined(CONFIG_ACPI)
-struct acpi_i2c_handler_data {
+struct i2c_acpi_handler_data {
struct acpi_connection_info info;
struct i2c_adapter *adapter;
};
@@ -103,15 +103,18 @@ struct gsb_buffer {
};
} __packed;
-struct acpi_i2c_lookup {
+struct i2c_acpi_lookup {
struct i2c_board_info *info;
acpi_handle adapter_handle;
acpi_handle device_handle;
+ acpi_handle search_handle;
+ u32 speed;
+ u32 min_speed;
};
-static int acpi_i2c_fill_info(struct acpi_resource *ares, void *data)
+static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data)
{
- struct acpi_i2c_lookup *lookup = data;
+ struct i2c_acpi_lookup *lookup = data;
struct i2c_board_info *info = lookup->info;
struct acpi_resource_i2c_serialbus *sb;
acpi_status status;
@@ -130,19 +133,18 @@ static int acpi_i2c_fill_info(struct acpi_resource *ares, void *data)
return 1;
info->addr = sb->slave_address;
+ lookup->speed = sb->connection_speed;
if (sb->access_mode == ACPI_I2C_10BIT_MODE)
info->flags |= I2C_CLIENT_TEN;
return 1;
}
-static int acpi_i2c_get_info(struct acpi_device *adev,
- struct i2c_board_info *info,
- acpi_handle *adapter_handle)
+static int i2c_acpi_do_lookup(struct acpi_device *adev,
+ struct i2c_acpi_lookup *lookup)
{
+ struct i2c_board_info *info = lookup->info;
struct list_head resource_list;
- struct resource_entry *entry;
- struct acpi_i2c_lookup lookup;
int ret;
if (acpi_bus_get_status(adev) || !adev->status.present ||
@@ -150,24 +152,58 @@ static int acpi_i2c_get_info(struct acpi_device *adev,
return -EINVAL;
memset(info, 0, sizeof(*info));
- info->fwnode = acpi_fwnode_handle(adev);
-
- memset(&lookup, 0, sizeof(lookup));
- lookup.device_handle = acpi_device_handle(adev);
- lookup.info = info;
+ lookup->device_handle = acpi_device_handle(adev);
/* Look up for I2cSerialBus resource */
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
- acpi_i2c_fill_info, &lookup);
+ i2c_acpi_fill_info, lookup);
acpi_dev_free_resource_list(&resource_list);
if (ret < 0 || !info->addr)
return -EINVAL;
- *adapter_handle = lookup.adapter_handle;
+ return 0;
+}
+
+static int i2c_acpi_get_info(struct acpi_device *adev,
+ struct i2c_board_info *info,
+ struct i2c_adapter *adapter,
+ acpi_handle *adapter_handle)
+{
+ struct list_head resource_list;
+ struct resource_entry *entry;
+ struct i2c_acpi_lookup lookup;
+ int ret;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.info = info;
+
+ ret = i2c_acpi_do_lookup(adev, &lookup);
+ if (ret)
+ return ret;
+
+ if (adapter) {
+ /* The adapter must match the one in I2cSerialBus() connector */
+ if (ACPI_HANDLE(&adapter->dev) != lookup.adapter_handle)
+ return -ENODEV;
+ } else {
+ struct acpi_device *adapter_adev;
+
+ /* The adapter must be present */
+ if (acpi_bus_get_device(lookup.adapter_handle, &adapter_adev))
+ return -ENODEV;
+ if (acpi_bus_get_status(adapter_adev) ||
+ !adapter_adev->status.present)
+ return -ENODEV;
+ }
+
+ info->fwnode = acpi_fwnode_handle(adev);
+ if (adapter_handle)
+ *adapter_handle = lookup.adapter_handle;
/* Then fill IRQ number if any */
+ INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (ret < 0)
return -EINVAL;
@@ -186,7 +222,7 @@ static int acpi_i2c_get_info(struct acpi_device *adev,
return 0;
}
-static void acpi_i2c_register_device(struct i2c_adapter *adapter,
+static void i2c_acpi_register_device(struct i2c_adapter *adapter,
struct acpi_device *adev,
struct i2c_board_info *info)
{
@@ -201,39 +237,35 @@ static void acpi_i2c_register_device(struct i2c_adapter *adapter,
}
}
-static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
+static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
void *data, void **return_value)
{
struct i2c_adapter *adapter = data;
struct acpi_device *adev;
- acpi_handle adapter_handle;
struct i2c_board_info info;
if (acpi_bus_get_device(handle, &adev))
return AE_OK;
- if (acpi_i2c_get_info(adev, &info, &adapter_handle))
+ if (i2c_acpi_get_info(adev, &info, adapter, NULL))
return AE_OK;
- if (adapter_handle != ACPI_HANDLE(&adapter->dev))
- return AE_OK;
-
- acpi_i2c_register_device(adapter, adev, &info);
+ i2c_acpi_register_device(adapter, adev, &info);
return AE_OK;
}
-#define ACPI_I2C_MAX_SCAN_DEPTH 32
+#define I2C_ACPI_MAX_SCAN_DEPTH 32
/**
- * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
+ * i2c_acpi_register_devices - enumerate I2C slave devices behind adapter
* @adap: pointer to adapter
*
* Enumerate all I2C slave devices behind this adapter by walking the ACPI
* namespace. When a device is found it will be added to the Linux device
* model and bound to the corresponding ACPI handle.
*/
-static void acpi_i2c_register_devices(struct i2c_adapter *adap)
+static void i2c_acpi_register_devices(struct i2c_adapter *adap)
{
acpi_status status;
@@ -241,14 +273,72 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap)
return;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_I2C_MAX_SCAN_DEPTH,
- acpi_i2c_add_device, NULL,
+ I2C_ACPI_MAX_SCAN_DEPTH,
+ i2c_acpi_add_device, NULL,
adap, NULL);
if (ACPI_FAILURE(status))
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
}
-static int acpi_i2c_match_adapter(struct device *dev, void *data)
+static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct i2c_acpi_lookup *lookup = data;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (i2c_acpi_do_lookup(adev, lookup))
+ return AE_OK;
+
+ if (lookup->search_handle != lookup->adapter_handle)
+ return AE_OK;
+
+ if (lookup->speed <= lookup->min_speed)
+ lookup->min_speed = lookup->speed;
+
+ return AE_OK;
+}
+
+/**
+ * i2c_acpi_find_bus_speed - find I2C bus speed from ACPI
+ * @dev: The device owning the bus
+ *
+ * Find the I2C bus speed by walking the ACPI namespace for all I2C slaves
+ * devices connected to this bus and use the speed of slowest device.
+ *
+ * Returns the speed in Hz or zero
+ */
+u32 i2c_acpi_find_bus_speed(struct device *dev)
+{
+ struct i2c_acpi_lookup lookup;
+ struct i2c_board_info dummy;
+ acpi_status status;
+
+ if (!has_acpi_companion(dev))
+ return 0;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.search_handle = ACPI_HANDLE(dev);
+ lookup.min_speed = UINT_MAX;
+ lookup.info = &dummy;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ I2C_ACPI_MAX_SCAN_DEPTH,
+ i2c_acpi_lookup_speed, NULL,
+ &lookup, NULL);
+
+ if (ACPI_FAILURE(status)) {
+ dev_warn(dev, "unable to find I2C bus speed from ACPI\n");
+ return 0;
+ }
+
+ return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0;
+}
+EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
+
+static int i2c_acpi_match_adapter(struct device *dev, void *data)
{
struct i2c_adapter *adapter = i2c_verify_adapter(dev);
@@ -258,29 +348,29 @@ static int acpi_i2c_match_adapter(struct device *dev, void *data)
return ACPI_HANDLE(dev) == (acpi_handle)data;
}
-static int acpi_i2c_match_device(struct device *dev, void *data)
+static int i2c_acpi_match_device(struct device *dev, void *data)
{
return ACPI_COMPANION(dev) == data;
}
-static struct i2c_adapter *acpi_i2c_find_adapter_by_handle(acpi_handle handle)
+static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
{
struct device *dev;
dev = bus_find_device(&i2c_bus_type, NULL, handle,
- acpi_i2c_match_adapter);
+ i2c_acpi_match_adapter);
return dev ? i2c_verify_adapter(dev) : NULL;
}
-static struct i2c_client *acpi_i2c_find_client_by_adev(struct acpi_device *adev)
+static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{
struct device *dev;
- dev = bus_find_device(&i2c_bus_type, NULL, adev, acpi_i2c_match_device);
+ dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device);
return dev ? i2c_verify_client(dev) : NULL;
}
-static int acpi_i2c_notify(struct notifier_block *nb, unsigned long value,
+static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
void *arg)
{
struct acpi_device *adev = arg;
@@ -291,20 +381,20 @@ static int acpi_i2c_notify(struct notifier_block *nb, unsigned long value,
switch (value) {
case ACPI_RECONFIG_DEVICE_ADD:
- if (acpi_i2c_get_info(adev, &info, &adapter_handle))
+ if (i2c_acpi_get_info(adev, &info, NULL, &adapter_handle))
break;
- adapter = acpi_i2c_find_adapter_by_handle(adapter_handle);
+ adapter = i2c_acpi_find_adapter_by_handle(adapter_handle);
if (!adapter)
break;
- acpi_i2c_register_device(adapter, adev, &info);
+ i2c_acpi_register_device(adapter, adev, &info);
break;
case ACPI_RECONFIG_DEVICE_REMOVE:
if (!acpi_device_enumerated(adev))
break;
- client = acpi_i2c_find_client_by_adev(adev);
+ client = i2c_acpi_find_client_by_adev(adev);
if (!client)
break;
@@ -317,10 +407,10 @@ static int acpi_i2c_notify(struct notifier_block *nb, unsigned long value,
}
static struct notifier_block i2c_acpi_notifier = {
- .notifier_call = acpi_i2c_notify,
+ .notifier_call = i2c_acpi_notify,
};
#else /* CONFIG_ACPI */
-static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
+static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
extern struct notifier_block i2c_acpi_notifier;
#endif /* CONFIG_ACPI */
@@ -386,12 +476,12 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
}
static acpi_status
-acpi_i2c_space_handler(u32 function, acpi_physical_address command,
+i2c_acpi_space_handler(u32 function, acpi_physical_address command,
u32 bits, u64 *value64,
void *handler_context, void *region_context)
{
struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
- struct acpi_i2c_handler_data *data = handler_context;
+ struct i2c_acpi_handler_data *data = handler_context;
struct acpi_connection_info *info = &data->info;
struct acpi_resource_i2c_serialbus *sb;
struct i2c_adapter *adapter = data->adapter;
@@ -510,10 +600,10 @@ acpi_i2c_space_handler(u32 function, acpi_physical_address command,
}
-static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+static int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
{
acpi_handle handle;
- struct acpi_i2c_handler_data *data;
+ struct i2c_acpi_handler_data *data;
acpi_status status;
if (!adapter->dev.parent)
@@ -524,7 +614,7 @@ static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
if (!handle)
return -ENODEV;
- data = kzalloc(sizeof(struct acpi_i2c_handler_data),
+ data = kzalloc(sizeof(struct i2c_acpi_handler_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -538,7 +628,7 @@ static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
status = acpi_install_address_space_handler(handle,
ACPI_ADR_SPACE_GSBUS,
- &acpi_i2c_space_handler,
+ &i2c_acpi_space_handler,
NULL,
data);
if (ACPI_FAILURE(status)) {
@@ -552,10 +642,10 @@ static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
return 0;
}
-static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+static void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter)
{
acpi_handle handle;
- struct acpi_i2c_handler_data *data;
+ struct i2c_acpi_handler_data *data;
acpi_status status;
if (!adapter->dev.parent)
@@ -568,7 +658,7 @@ static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
acpi_remove_address_space_handler(handle,
ACPI_ADR_SPACE_GSBUS,
- &acpi_i2c_space_handler);
+ &i2c_acpi_space_handler);
status = acpi_bus_get_private_data(handle, (void **)&data);
if (ACPI_SUCCESS(status))
@@ -577,10 +667,10 @@ static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
acpi_bus_detach_private_data(handle);
}
#else /* CONFIG_ACPI_I2C_OPREGION */
-static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+static inline void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter)
{ }
-static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+static inline int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
{ return 0; }
#endif /* CONFIG_ACPI_I2C_OPREGION */
@@ -853,7 +943,7 @@ static int i2c_device_probe(struct device *dev)
status = 0;
if (status)
- dev_warn(&client->dev, "failed to set up wakeup irq");
+ dev_warn(&client->dev, "failed to set up wakeup irq\n");
}
dev_dbg(dev, "probe\n");
@@ -1208,8 +1298,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
return client;
out_err:
- dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x "
- "(%d)\n", client->name, client->addr, status);
+ dev_err(&adap->dev,
+ "Failed to register i2c client %s at 0x%02x (%d)\n",
+ client->name, client->addr, status);
out_err_silent:
kfree(client);
return NULL;
@@ -1335,21 +1426,19 @@ static void i2c_adapter_dev_release(struct device *dev)
complete(&adap->dev_released);
}
-/*
- * This function is only needed for mutex_lock_nested, so it is never
- * called unless locking correctness checking is enabled. Thus we
- * make it inline to avoid a compiler warning. That's what gcc ends up
- * doing anyway.
- */
-static inline unsigned int i2c_adapter_depth(struct i2c_adapter *adapter)
+unsigned int i2c_adapter_depth(struct i2c_adapter *adapter)
{
unsigned int depth = 0;
while ((adapter = i2c_parent_is_i2c_adapter(adapter)))
depth++;
+ WARN_ONCE(depth >= MAX_LOCKDEP_SUBCLASSES,
+ "adapter depth exceeds lockdep subclass limit\n");
+
return depth;
}
+EXPORT_SYMBOL_GPL(i2c_adapter_depth);
/*
* Let users instantiate I2C devices through sysfs. This can be used when
@@ -1678,8 +1767,8 @@ static int i2c_do_add_adapter(struct i2c_driver *driver,
if (driver->attach_adapter) {
dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n",
driver->driver.name);
- dev_warn(&adap->dev, "Please use another way to instantiate "
- "your i2c_client\n");
+ dev_warn(&adap->dev,
+ "Please use another way to instantiate your i2c_client\n");
/* We ignore the return code; if it fails, too bad */
driver->attach_adapter(adap);
}
@@ -1691,6 +1780,12 @@ static int __process_new_adapter(struct device_driver *d, void *data)
return i2c_do_add_adapter(to_i2c_driver(d), data);
}
+static const struct i2c_lock_operations i2c_adapter_lock_ops = {
+ .lock_bus = i2c_adapter_lock_bus,
+ .trylock_bus = i2c_adapter_trylock_bus,
+ .unlock_bus = i2c_adapter_unlock_bus,
+};
+
static int i2c_register_adapter(struct i2c_adapter *adap)
{
int res = -EINVAL;
@@ -1710,11 +1805,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
goto out_list;
}
- if (!adap->lock_bus) {
- adap->lock_bus = i2c_adapter_lock_bus;
- adap->trylock_bus = i2c_adapter_trylock_bus;
- adap->unlock_bus = i2c_adapter_unlock_bus;
- }
+ if (!adap->lock_ops)
+ adap->lock_ops = &i2c_adapter_lock_ops;
rt_mutex_init(&adap->bus_lock);
rt_mutex_init(&adap->mux_lock);
@@ -1752,8 +1844,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
/* create pre-declared device nodes */
of_i2c_register_devices(adap);
- acpi_i2c_register_devices(adap);
- acpi_i2c_install_space_handler(adap);
+ i2c_acpi_register_devices(adap);
+ i2c_acpi_install_space_handler(adap);
if (adap->nr < __i2c_first_dynamic_bus_num)
i2c_scan_static_board_info(adap);
@@ -1925,7 +2017,7 @@ void i2c_del_adapter(struct i2c_adapter *adap)
return;
}
- acpi_i2c_remove_space_handler(adap);
+ i2c_acpi_remove_space_handler(adap);
/* Tell drivers about this removal */
mutex_lock(&core_lock);
bus_for_each_drv(&i2c_bus_type, NULL, adap,
@@ -2451,15 +2543,16 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (adap->algo->master_xfer) {
#ifdef DEBUG
for (ret = 0; ret < num; ret++) {
- dev_dbg(&adap->dev, "master_xfer[%d] %c, addr=0x%02x, "
- "len=%d%s\n", ret, (msgs[ret].flags & I2C_M_RD)
- ? 'R' : 'W', msgs[ret].addr, msgs[ret].len,
+ dev_dbg(&adap->dev,
+ "master_xfer[%d] %c, addr=0x%02x, len=%d%s\n",
+ ret, (msgs[ret].flags & I2C_M_RD) ? 'R' : 'W',
+ msgs[ret].addr, msgs[ret].len,
(msgs[ret].flags & I2C_M_RECV_LEN) ? "+" : "");
}
#endif
if (in_atomic() || irqs_disabled()) {
- ret = adap->trylock_bus(adap, I2C_LOCK_SEGMENT);
+ ret = i2c_trylock_bus(adap, I2C_LOCK_SEGMENT);
if (!ret)
/* I2C activity is ongoing. */
return -EAGAIN;
@@ -2619,9 +2712,9 @@ static int i2c_detect_address(struct i2c_client *temp_client,
/* Consistency check */
if (info.type[0] == '\0') {
- dev_err(&adapter->dev, "%s detection function provided "
- "no name for 0x%x\n", driver->driver.name,
- addr);
+ dev_err(&adapter->dev,
+ "%s detection function provided no name for 0x%x\n",
+ driver->driver.name, addr);
} else {
struct i2c_client *client;
@@ -2659,9 +2752,8 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
/* Warn that the adapter lost class based instantiation */
if (adapter->class == I2C_CLASS_DEPRECATED) {
dev_dbg(&adapter->dev,
- "This adapter dropped support for I2C classes and "
- "won't auto-detect %s devices anymore. If you need it, check "
- "'Documentation/i2c/instantiating-devices' for alternatives.\n",
+ "This adapter dropped support for I2C classes and won't auto-detect %s devices anymore. "
+ "If you need it, check 'Documentation/i2c/instantiating-devices' for alternatives.\n",
driver->driver.name);
return 0;
}
@@ -2677,8 +2769,9 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
temp_client->adapter = adapter;
for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) {
- dev_dbg(&adapter->dev, "found normal entry for adapter %d, "
- "addr 0x%02x\n", adap_id, address_list[i]);
+ dev_dbg(&adapter->dev,
+ "found normal entry for adapter %d, addr 0x%02x\n",
+ adap_id, address_list[i]);
temp_client->addr = address_list[i];
err = i2c_detect_address(temp_client, driver);
if (unlikely(err))
@@ -2710,15 +2803,16 @@ i2c_new_probed_device(struct i2c_adapter *adap,
for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) {
/* Check address validity */
if (i2c_check_7bit_addr_validity_strict(addr_list[i]) < 0) {
- dev_warn(&adap->dev, "Invalid 7-bit address "
- "0x%02x\n", addr_list[i]);
+ dev_warn(&adap->dev, "Invalid 7-bit address 0x%02x\n",
+ addr_list[i]);
continue;
}
/* Check address availability (7 bit, no need to encode flags) */
if (i2c_check_addr_busy(adap, addr_list[i])) {
- dev_dbg(&adap->dev, "Address 0x%02x already in "
- "use, not probing\n", addr_list[i]);
+ dev_dbg(&adap->dev,
+ "Address 0x%02x already in use, not probing\n",
+ addr_list[i]);
continue;
}
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 8eee98634cda..83768e85a919 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -159,7 +159,7 @@ static int i2c_mux_trylock_bus(struct i2c_adapter *adapter, unsigned int flags)
return 0; /* mux_lock not locked, failure */
if (!(flags & I2C_LOCK_ROOT_ADAPTER))
return 1; /* we only want mux_lock, success */
- if (parent->trylock_bus(parent, flags))
+ if (i2c_trylock_bus(parent, flags))
return 1; /* parent locked too, success */
rt_mutex_unlock(&parent->mux_lock);
return 0; /* parent not locked, failure */
@@ -193,7 +193,7 @@ static int i2c_parent_trylock_bus(struct i2c_adapter *adapter,
if (!rt_mutex_trylock(&parent->mux_lock))
return 0; /* mux_lock not locked, failure */
- if (parent->trylock_bus(parent, flags))
+ if (i2c_trylock_bus(parent, flags))
return 1; /* parent locked too, success */
rt_mutex_unlock(&parent->mux_lock);
return 0; /* parent not locked, failure */
@@ -255,6 +255,10 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
muxc->dev = dev;
if (flags & I2C_MUX_LOCKED)
muxc->mux_locked = true;
+ if (flags & I2C_MUX_ARBITRATOR)
+ muxc->arbitrator = true;
+ if (flags & I2C_MUX_GATE)
+ muxc->gate = true;
muxc->select = select;
muxc->deselect = deselect;
muxc->max_adapters = max_adapters;
@@ -263,6 +267,18 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
}
EXPORT_SYMBOL_GPL(i2c_mux_alloc);
+static const struct i2c_lock_operations i2c_mux_lock_ops = {
+ .lock_bus = i2c_mux_lock_bus,
+ .trylock_bus = i2c_mux_trylock_bus,
+ .unlock_bus = i2c_mux_unlock_bus,
+};
+
+static const struct i2c_lock_operations i2c_parent_lock_ops = {
+ .lock_bus = i2c_parent_lock_bus,
+ .trylock_bus = i2c_parent_trylock_bus,
+ .unlock_bus = i2c_parent_unlock_bus,
+};
+
int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
u32 force_nr, u32 chan_id,
unsigned int class)
@@ -312,15 +328,10 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
priv->adap.retries = parent->retries;
priv->adap.timeout = parent->timeout;
priv->adap.quirks = parent->quirks;
- if (muxc->mux_locked) {
- priv->adap.lock_bus = i2c_mux_lock_bus;
- priv->adap.trylock_bus = i2c_mux_trylock_bus;
- priv->adap.unlock_bus = i2c_mux_unlock_bus;
- } else {
- priv->adap.lock_bus = i2c_parent_lock_bus;
- priv->adap.trylock_bus = i2c_parent_trylock_bus;
- priv->adap.unlock_bus = i2c_parent_unlock_bus;
- }
+ if (muxc->mux_locked)
+ priv->adap.lock_ops = &i2c_mux_lock_ops;
+ else
+ priv->adap.lock_ops = &i2c_parent_lock_ops;
/* Sanity check on class */
if (i2c_mux_parent_classes(parent) & class)
@@ -335,18 +346,42 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
* nothing if !CONFIG_OF.
*/
if (muxc->dev->of_node) {
- struct device_node *child;
+ struct device_node *dev_node = muxc->dev->of_node;
+ struct device_node *mux_node, *child = NULL;
u32 reg;
- for_each_child_of_node(muxc->dev->of_node, child) {
- ret = of_property_read_u32(child, "reg", &reg);
- if (ret)
- continue;
- if (chan_id == reg) {
- priv->adap.dev.of_node = child;
- break;
+ if (muxc->arbitrator)
+ mux_node = of_get_child_by_name(dev_node, "i2c-arb");
+ else if (muxc->gate)
+ mux_node = of_get_child_by_name(dev_node, "i2c-gate");
+ else
+ mux_node = of_get_child_by_name(dev_node, "i2c-mux");
+
+ if (mux_node) {
+ /* A "reg" property indicates an old-style DT entry */
+ if (!of_property_read_u32(mux_node, "reg", &reg)) {
+ of_node_put(mux_node);
+ mux_node = NULL;
+ }
+ }
+
+ if (!mux_node)
+ mux_node = of_node_get(dev_node);
+ else if (muxc->arbitrator || muxc->gate)
+ child = of_node_get(mux_node);
+
+ if (!child) {
+ for_each_child_of_node(mux_node, child) {
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret)
+ continue;
+ if (chan_id == reg)
+ break;
}
}
+
+ priv->adap.dev.of_node = child;
+ of_node_put(mux_node);
}
/*
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index a90bbc4037dd..86fc2d4c081b 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -130,7 +130,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
return -EINVAL;
}
- muxc = i2c_mux_alloc(NULL, dev, 1, sizeof(*arb), 0,
+ muxc = i2c_mux_alloc(NULL, dev, 1, sizeof(*arb), I2C_MUX_ARBITRATOR,
i2c_arbitrator_select, i2c_arbitrator_deselect);
if (!muxc)
return -ENOMEM;
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 3cb8af635db5..4ea7e691afc7 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -85,6 +85,13 @@ static const struct i2c_device_id pca9541_id[] = {
MODULE_DEVICE_TABLE(i2c, pca9541_id);
+#ifdef CONFIG_OF
+static const struct of_device_id pca9541_of_match[] = {
+ { .compatible = "nxp,pca9541" },
+ {}
+};
+#endif
+
/*
* Write to chip register. Don't use i2c_transfer()/i2c_smbus_xfer()
* as they will try to lock the adapter a second time.
@@ -349,7 +356,8 @@ static int pca9541_probe(struct i2c_client *client,
force = 0;
if (pdata)
force = pdata->modes[0].adap_id;
- muxc = i2c_mux_alloc(adap, &client->dev, 1, sizeof(*data), 0,
+ muxc = i2c_mux_alloc(adap, &client->dev, 1, sizeof(*data),
+ I2C_MUX_ARBITRATOR,
pca9541_select_chan, pca9541_release_chan);
if (!muxc)
return -ENOMEM;
@@ -382,6 +390,7 @@ static int pca9541_remove(struct i2c_client *client)
static struct i2c_driver pca9541_driver = {
.driver = {
.name = "pca9541",
+ .of_match_table = of_match_ptr(pca9541_of_match),
},
.probe = pca9541_probe,
.remove = pca9541_remove,
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 3278ebf1cc5c..1091346f2480 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -42,6 +42,7 @@
#include <linux/i2c/pca954x.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -58,14 +59,6 @@ enum pca_type {
pca_9548,
};
-struct pca954x {
- enum pca_type type;
-
- u8 last_chan; /* last register value */
- u8 deselect;
- struct i2c_client *client;
-};
-
struct chip_desc {
u8 nchans;
u8 enable; /* used for muxes only */
@@ -75,6 +68,14 @@ struct chip_desc {
} muxtype;
};
+struct pca954x {
+ const struct chip_desc *chip;
+
+ u8 last_chan; /* last register value */
+ u8 deselect;
+ struct i2c_client *client;
+};
+
/* Provide specs for the PCA954x types we know about */
static const struct chip_desc chips[] = {
[pca_9540] = {
@@ -119,6 +120,20 @@ static const struct i2c_device_id pca954x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca954x_id);
+#ifdef CONFIG_OF
+static const struct of_device_id pca954x_of_match[] = {
+ { .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
+ { .compatible = "nxp,pca9542", .data = &chips[pca_9542] },
+ { .compatible = "nxp,pca9543", .data = &chips[pca_9543] },
+ { .compatible = "nxp,pca9544", .data = &chips[pca_9544] },
+ { .compatible = "nxp,pca9545", .data = &chips[pca_9545] },
+ { .compatible = "nxp,pca9546", .data = &chips[pca_9546] },
+ { .compatible = "nxp,pca9547", .data = &chips[pca_9547] },
+ { .compatible = "nxp,pca9548", .data = &chips[pca_9548] },
+ {}
+};
+#endif
+
/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer()
for this as they will try to lock adapter a second time */
static int pca954x_reg_write(struct i2c_adapter *adap,
@@ -151,7 +166,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
struct pca954x *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
- const struct chip_desc *chip = &chips[data->type];
+ const struct chip_desc *chip = data->chip;
u8 regval;
int ret = 0;
@@ -197,6 +212,7 @@ static int pca954x_probe(struct i2c_client *client,
int num, force, class;
struct i2c_mux_core *muxc;
struct pca954x *data;
+ const struct of_device_id *match;
int ret;
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
@@ -226,14 +242,19 @@ static int pca954x_probe(struct i2c_client *client,
return -ENODEV;
}
- data->type = id->driver_data;
+ match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
+ if (match)
+ data->chip = of_device_get_match_data(&client->dev);
+ else
+ data->chip = &chips[id->driver_data];
+
data->last_chan = 0; /* force the first selection */
idle_disconnect_dt = of_node &&
of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
/* Now create an adapter for each channel */
- for (num = 0; num < chips[data->type].nchans; num++) {
+ for (num = 0; num < data->chip->nchans; num++) {
bool idle_disconnect_pd = false;
force = 0; /* dynamic adap number */
@@ -263,7 +284,7 @@ static int pca954x_probe(struct i2c_client *client,
dev_info(&client->dev,
"registered %d multiplexed busses for I2C %s %s\n",
- num, chips[data->type].muxtype == pca954x_ismux
+ num, data->chip->muxtype == pca954x_ismux
? "mux" : "switch", client->name);
return 0;
@@ -299,6 +320,7 @@ static struct i2c_driver pca954x_driver = {
.driver = {
.name = "pca954x",
.pm = &pca954x_pm,
+ .of_match_table = of_match_ptr(pca954x_of_match),
},
.probe = pca954x_probe,
.remove = pca954x_remove,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 67ec58f9ef99..4466a2f969d7 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -863,8 +863,8 @@ static struct cpuidle_state dnv_cstates[] = {
*
* Must be called under local_irq_disable().
*/
-static int intel_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int intel_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
unsigned long ecx = 1; /* break on interrupt flag */
struct cpuidle_state *state = &drv->states[index];
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 78f148ea9d9f..2b791fe1e2bc 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -52,6 +52,27 @@ config BMC150_ACCEL_SPI
tristate
select REGMAP_SPI
+config DMARD06
+ tristate "Domintech DMARD06 Digital Accelerometer Driver"
+ depends on OF || COMPILE_TEST
+ depends on I2C
+ help
+ Say yes here to build support for the Domintech low-g tri-axial
+ digital accelerometers: DMARD05, DMARD06, DMARD07.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dmard06.
+
+config DMARD09
+ tristate "Domintech DMARD09 3-axis Accelerometer Driver"
+ depends on I2C
+ help
+ Say yes here to get support for the Domintech DMARD09 3-axis
+ accelerometer.
+
+ Choosing M will build the driver as a module. If so, the module
+ will be called dmard09.
+
config HID_SENSOR_ACCEL_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
@@ -98,14 +119,35 @@ config IIO_ST_ACCEL_SPI_3AXIS
config KXSD9
tristate "Kionix KXSD9 Accelerometer Driver"
- depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the Kionix KXSD9 accelerometer.
- Currently this only supports the device via an SPI interface.
+ It can be accessed using an (optional) SPI or I2C interface.
To compile this driver as a module, choose M here: the module
will be called kxsd9.
+config KXSD9_SPI
+ tristate "Kionix KXSD9 SPI transport"
+ depends on KXSD9
+ depends on SPI
+ default KXSD9
+ select REGMAP_SPI
+ help
+ Say yes here to enable the Kionix KXSD9 accelerometer
+ SPI transport channel.
+
+config KXSD9_I2C
+ tristate "Kionix KXSD9 I2C transport"
+ depends on KXSD9
+ depends on I2C
+ default KXSD9
+ select REGMAP_I2C
+ help
+ Say yes here to enable the Kionix KXSD9 accelerometer
+ I2C transport channel.
+
config KXCJK1013
tristate "Kionix 3-Axis Accelerometer Driver"
depends on I2C
@@ -119,6 +161,16 @@ config KXCJK1013
To compile this driver as a module, choose M here: the module will
be called kxcjk-1013.
+config MC3230
+ tristate "mCube MC3230 Digital Accelerometer Driver"
+ depends on I2C
+ help
+ Say yes here to build support for the mCube MC3230 low-g tri-axial
+ digital accelerometer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mc3230.
+
config MMA7455
tristate
select IIO_BUFFER
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 6cedbecca2ee..f5d3ddee619e 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -8,9 +8,14 @@ obj-$(CONFIG_BMA220) += bma220_spi.o
obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
+obj-$(CONFIG_DMARD06) += dmard06.o
+obj-$(CONFIG_DMARD09) += dmard09.o
obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
obj-$(CONFIG_KXCJK1013) += kxcjk-1013.o
obj-$(CONFIG_KXSD9) += kxsd9.o
+obj-$(CONFIG_KXSD9_SPI) += kxsd9-spi.o
+obj-$(CONFIG_KXSD9_I2C) += kxsd9-i2c.o
+obj-$(CONFIG_MC3230) += mc3230.o
obj-$(CONFIG_MMA7455) += mma7455_core.o
obj-$(CONFIG_MMA7455_I2C) += mma7455_i2c.o
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index e3f88ba5faf3..0890934ef66f 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -469,13 +469,14 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&data->mutex);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&data->mutex);
- return -EBUSY;
- }
ret = bma180_get_data_reg(data, chan->scan_index);
mutex_unlock(&data->mutex);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = sign_extend32(ret >> chan->scan_type.shift,
diff --git a/drivers/iio/accel/dmard06.c b/drivers/iio/accel/dmard06.c
new file mode 100644
index 000000000000..656ca8e1927f
--- /dev/null
+++ b/drivers/iio/accel/dmard06.c
@@ -0,0 +1,241 @@
+/*
+ * IIO driver for Domintech DMARD06 accelerometer
+ *
+ * Copyright (C) 2016 Aleksei Mamlin <mamlinav@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#define DMARD06_DRV_NAME "dmard06"
+
+/* Device data registers */
+#define DMARD06_CHIP_ID_REG 0x0f
+#define DMARD06_TOUT_REG 0x40
+#define DMARD06_XOUT_REG 0x41
+#define DMARD06_YOUT_REG 0x42
+#define DMARD06_ZOUT_REG 0x43
+#define DMARD06_CTRL1_REG 0x44
+
+/* Device ID value */
+#define DMARD05_CHIP_ID 0x05
+#define DMARD06_CHIP_ID 0x06
+#define DMARD07_CHIP_ID 0x07
+
+/* Device values */
+#define DMARD05_AXIS_SCALE_VAL 15625
+#define DMARD06_AXIS_SCALE_VAL 31250
+#define DMARD06_TEMP_CENTER_VAL 25
+#define DMARD06_SIGN_BIT 7
+
+/* Device power modes */
+#define DMARD06_MODE_NORMAL 0x27
+#define DMARD06_MODE_POWERDOWN 0x00
+
+/* Device channels */
+#define DMARD06_ACCEL_CHANNEL(_axis, _reg) { \
+ .type = IIO_ACCEL, \
+ .address = _reg, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .modified = 1, \
+}
+
+#define DMARD06_TEMP_CHANNEL(_reg) { \
+ .type = IIO_TEMP, \
+ .address = _reg, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+}
+
+struct dmard06_data {
+ struct i2c_client *client;
+ u8 chip_id;
+};
+
+static const struct iio_chan_spec dmard06_channels[] = {
+ DMARD06_ACCEL_CHANNEL(X, DMARD06_XOUT_REG),
+ DMARD06_ACCEL_CHANNEL(Y, DMARD06_YOUT_REG),
+ DMARD06_ACCEL_CHANNEL(Z, DMARD06_ZOUT_REG),
+ DMARD06_TEMP_CHANNEL(DMARD06_TOUT_REG),
+};
+
+static int dmard06_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct dmard06_data *dmard06 = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_byte_data(dmard06->client,
+ chan->address);
+ if (ret < 0) {
+ dev_err(&dmard06->client->dev,
+ "Error reading data: %d\n", ret);
+ return ret;
+ }
+
+ *val = sign_extend32(ret, DMARD06_SIGN_BIT);
+
+ if (dmard06->chip_id == DMARD06_CHIP_ID)
+ *val = *val >> 1;
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ if (dmard06->chip_id != DMARD06_CHIP_ID)
+ *val = *val / 2;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = DMARD06_TEMP_CENTER_VAL;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ *val = 0;
+ if (dmard06->chip_id == DMARD06_CHIP_ID)
+ *val2 = DMARD06_AXIS_SCALE_VAL;
+ else
+ *val2 = DMARD05_AXIS_SCALE_VAL;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info dmard06_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = dmard06_read_raw,
+};
+
+static int dmard06_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct dmard06_data *dmard06;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "I2C check functionality failed\n");
+ return -ENXIO;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dmard06));
+ if (!indio_dev) {
+ dev_err(&client->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ dmard06 = iio_priv(indio_dev);
+ dmard06->client = client;
+
+ ret = i2c_smbus_read_byte_data(dmard06->client, DMARD06_CHIP_ID_REG);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading chip id: %d\n", ret);
+ return ret;
+ }
+
+ if (ret != DMARD05_CHIP_ID && ret != DMARD06_CHIP_ID &&
+ ret != DMARD07_CHIP_ID) {
+ dev_err(&client->dev, "Invalid chip id: %02d\n", ret);
+ return -ENODEV;
+ }
+
+ dmard06->chip_id = ret;
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = DMARD06_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = dmard06_channels;
+ indio_dev->num_channels = ARRAY_SIZE(dmard06_channels);
+ indio_dev->info = &dmard06_info;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dmard06_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct dmard06_data *dmard06 = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(dmard06->client, DMARD06_CTRL1_REG,
+ DMARD06_MODE_POWERDOWN);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int dmard06_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct dmard06_data *dmard06 = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(dmard06->client, DMARD06_CTRL1_REG,
+ DMARD06_MODE_NORMAL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(dmard06_pm_ops, dmard06_suspend, dmard06_resume);
+#define DMARD06_PM_OPS (&dmard06_pm_ops)
+#else
+#define DMARD06_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id dmard06_id[] = {
+ { "dmard05", 0 },
+ { "dmard06", 0 },
+ { "dmard07", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, dmard06_id);
+
+static const struct of_device_id dmard06_of_match[] = {
+ { .compatible = "domintech,dmard05" },
+ { .compatible = "domintech,dmard06" },
+ { .compatible = "domintech,dmard07" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, dmard06_of_match);
+
+static struct i2c_driver dmard06_driver = {
+ .probe = dmard06_probe,
+ .id_table = dmard06_id,
+ .driver = {
+ .name = DMARD06_DRV_NAME,
+ .of_match_table = of_match_ptr(dmard06_of_match),
+ .pm = DMARD06_PM_OPS,
+ },
+};
+module_i2c_driver(dmard06_driver);
+
+MODULE_AUTHOR("Aleksei Mamlin <mamlinav@gmail.com>");
+MODULE_DESCRIPTION("Domintech DMARD06 accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c
new file mode 100644
index 000000000000..d3a28f96565c
--- /dev/null
+++ b/drivers/iio/accel/dmard09.c
@@ -0,0 +1,157 @@
+/*
+ * IIO driver for the 3-axis accelerometer Domintech DMARD09.
+ *
+ * Copyright (c) 2016, Jelle van der Waa <jelle@vdwaa.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#define DMARD09_DRV_NAME "dmard09"
+
+#define DMARD09_REG_CHIPID 0x18
+#define DMARD09_REG_STAT 0x0A
+#define DMARD09_REG_X 0x0C
+#define DMARD09_REG_Y 0x0E
+#define DMARD09_REG_Z 0x10
+#define DMARD09_CHIPID 0x95
+
+#define DMARD09_BUF_LEN 8
+#define DMARD09_AXIS_X 0
+#define DMARD09_AXIS_Y 1
+#define DMARD09_AXIS_Z 2
+#define DMARD09_AXIS_X_OFFSET ((DMARD09_AXIS_X + 1) * 2)
+#define DMARD09_AXIS_Y_OFFSET ((DMARD09_AXIS_Y + 1 )* 2)
+#define DMARD09_AXIS_Z_OFFSET ((DMARD09_AXIS_Z + 1) * 2)
+
+struct dmard09_data {
+ struct i2c_client *client;
+};
+
+#define DMARD09_CHANNEL(_axis, offset) { \
+ .type = IIO_ACCEL, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .modified = 1, \
+ .address = offset, \
+ .channel2 = IIO_MOD_##_axis, \
+}
+
+static const struct iio_chan_spec dmard09_channels[] = {
+ DMARD09_CHANNEL(X, DMARD09_AXIS_X_OFFSET),
+ DMARD09_CHANNEL(Y, DMARD09_AXIS_Y_OFFSET),
+ DMARD09_CHANNEL(Z, DMARD09_AXIS_Z_OFFSET),
+};
+
+static int dmard09_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct dmard09_data *data = iio_priv(indio_dev);
+ u8 buf[DMARD09_BUF_LEN];
+ int ret;
+ s16 accel;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * Read from the DMAR09_REG_STAT register, since the chip
+ * caches reads from the individual X, Y, Z registers.
+ */
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ DMARD09_REG_STAT,
+ DMARD09_BUF_LEN, buf);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg %d\n",
+ DMARD09_REG_STAT);
+ return ret;
+ }
+
+ accel = get_unaligned_le16(&buf[chan->address]);
+
+ /* Remove lower 3 bits and sign extend */
+ accel <<= 4;
+ accel >>= 7;
+
+ *val = accel;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info dmard09_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = dmard09_read_raw,
+};
+
+static int dmard09_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct dmard09_data *data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation failed\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+
+ ret = i2c_smbus_read_byte_data(data->client, DMARD09_REG_CHIPID);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading chip id %d\n", ret);
+ return ret;
+ }
+
+ if (ret != DMARD09_CHIPID) {
+ dev_err(&client->dev, "Invalid chip id %d\n", ret);
+ return -ENODEV;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = DMARD09_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = dmard09_channels;
+ indio_dev->num_channels = ARRAY_SIZE(dmard09_channels);
+ indio_dev->info = &dmard09_info;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id dmard09_id[] = {
+ { "dmard09", 0},
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, dmard09_id);
+
+static struct i2c_driver dmard09_driver = {
+ .driver = {
+ .name = DMARD09_DRV_NAME
+ },
+ .probe = dmard09_probe,
+ .id_table = dmard09_id,
+};
+
+module_i2c_driver(dmard09_driver);
+
+MODULE_AUTHOR("Jelle van der Waa <jelle@vdwaa.nl>");
+MODULE_DESCRIPTION("DMARD09 3-axis accelerometer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 765a72362dc6..3f968c46e667 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1392,6 +1392,7 @@ static const struct acpi_device_id kx_acpi_match[] = {
{"KXCJ1013", KXCJK1013},
{"KXCJ1008", KXCJ91008},
{"KXCJ9000", KXCJ91008},
+ {"KIOX000A", KXCJ91008},
{"KXTJ1009", KXTJ21009},
{"SMO8500", KXCJ91008},
{ },
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
new file mode 100644
index 000000000000..95e20855d2ef
--- /dev/null
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -0,0 +1,64 @@
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+
+#include "kxsd9.h"
+
+static int kxsd9_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0e,
+ };
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(i2c, &config);
+ if (IS_ERR(regmap)) {
+ dev_err(&i2c->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return kxsd9_common_probe(&i2c->dev,
+ regmap,
+ i2c->name);
+}
+
+static int kxsd9_i2c_remove(struct i2c_client *client)
+{
+ return kxsd9_common_remove(&client->dev);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id kxsd9_of_match[] = {
+ { .compatible = "kionix,kxsd9", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, kxsd9_of_match);
+#else
+#define kxsd9_of_match NULL
+#endif
+
+static const struct i2c_device_id kxsd9_i2c_id[] = {
+ {"kxsd9", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, kxsd9_i2c_id);
+
+static struct i2c_driver kxsd9_i2c_driver = {
+ .driver = {
+ .name = "kxsd9",
+ .of_match_table = of_match_ptr(kxsd9_of_match),
+ .pm = &kxsd9_dev_pm_ops,
+ },
+ .probe = kxsd9_i2c_probe,
+ .remove = kxsd9_i2c_remove,
+ .id_table = kxsd9_i2c_id,
+};
+module_i2c_driver(kxsd9_i2c_driver);
diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c
new file mode 100644
index 000000000000..b7d0078fd00e
--- /dev/null
+++ b/drivers/iio/accel/kxsd9-spi.c
@@ -0,0 +1,56 @@
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+
+#include "kxsd9.h"
+
+static int kxsd9_spi_probe(struct spi_device *spi)
+{
+ static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0e,
+ };
+ struct regmap *regmap;
+
+ spi->mode = SPI_MODE_0;
+ regmap = devm_regmap_init_spi(spi, &config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "%s: regmap allocation failed: %ld\n",
+ __func__, PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return kxsd9_common_probe(&spi->dev,
+ regmap,
+ spi_get_device_id(spi)->name);
+}
+
+static int kxsd9_spi_remove(struct spi_device *spi)
+{
+ return kxsd9_common_remove(&spi->dev);
+}
+
+static const struct spi_device_id kxsd9_spi_id[] = {
+ {"kxsd9", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(spi, kxsd9_spi_id);
+
+static struct spi_driver kxsd9_spi_driver = {
+ .driver = {
+ .name = "kxsd9",
+ .pm = &kxsd9_dev_pm_ops,
+ },
+ .probe = kxsd9_spi_probe,
+ .remove = kxsd9_spi_remove,
+ .id_table = kxsd9_spi_id,
+};
+module_spi_driver(kxsd9_spi_driver);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("Kionix KXSD9 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 9d72d4bcf5e9..9af60ac70738 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -12,19 +12,25 @@
* I have a suitable wire made up.
*
* TODO: Support the motion detector
- * Uses register address incrementing so could have a
- * heavily optimized ring buffer access function.
*/
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/spi/spi.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/module.h>
-
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#include "kxsd9.h"
#define KXSD9_REG_X 0x00
#define KXSD9_REG_Y 0x02
@@ -33,28 +39,45 @@
#define KXSD9_REG_RESET 0x0a
#define KXSD9_REG_CTRL_C 0x0c
-#define KXSD9_FS_MASK 0x03
+#define KXSD9_CTRL_C_FS_MASK 0x03
+#define KXSD9_CTRL_C_FS_8G 0x00
+#define KXSD9_CTRL_C_FS_6G 0x01
+#define KXSD9_CTRL_C_FS_4G 0x02
+#define KXSD9_CTRL_C_FS_2G 0x03
+#define KXSD9_CTRL_C_MOT_LAT BIT(3)
+#define KXSD9_CTRL_C_MOT_LEV BIT(4)
+#define KXSD9_CTRL_C_LP_MASK 0xe0
+#define KXSD9_CTRL_C_LP_NONE 0x00
+#define KXSD9_CTRL_C_LP_2000HZC BIT(5)
+#define KXSD9_CTRL_C_LP_2000HZB BIT(6)
+#define KXSD9_CTRL_C_LP_2000HZA (BIT(5)|BIT(6))
+#define KXSD9_CTRL_C_LP_1000HZ BIT(7)
+#define KXSD9_CTRL_C_LP_500HZ (BIT(7)|BIT(5))
+#define KXSD9_CTRL_C_LP_100HZ (BIT(7)|BIT(6))
+#define KXSD9_CTRL_C_LP_50HZ (BIT(7)|BIT(6)|BIT(5))
#define KXSD9_REG_CTRL_B 0x0d
-#define KXSD9_REG_CTRL_A 0x0e
-#define KXSD9_READ(a) (0x80 | (a))
-#define KXSD9_WRITE(a) (a)
+#define KXSD9_CTRL_B_CLK_HLD BIT(7)
+#define KXSD9_CTRL_B_ENABLE BIT(6)
+#define KXSD9_CTRL_B_ST BIT(5) /* Self-test */
+
+#define KXSD9_REG_CTRL_A 0x0e
-#define KXSD9_STATE_RX_SIZE 2
-#define KXSD9_STATE_TX_SIZE 2
/**
* struct kxsd9_state - device related storage
- * @buf_lock: protect the rx and tx buffers.
- * @us: spi device
- * @rx: single rx buffer storage
- * @tx: single tx buffer storage
- **/
+ * @dev: pointer to the parent device
+ * @map: regmap to the device
+ * @orientation: mounting matrix, flipped axis etc
+ * @regs: regulators for this device, VDD and IOVDD
+ * @scale: the current scaling setting
+ */
struct kxsd9_state {
- struct mutex buf_lock;
- struct spi_device *us;
- u8 rx[KXSD9_STATE_RX_SIZE] ____cacheline_aligned;
- u8 tx[KXSD9_STATE_TX_SIZE];
+ struct device *dev;
+ struct regmap *map;
+ struct iio_mount_matrix orientation;
+ struct regulator_bulk_data regs[2];
+ u8 scale;
};
#define KXSD9_SCALE_2G "0.011978"
@@ -65,6 +88,14 @@ struct kxsd9_state {
/* reverse order */
static const int kxsd9_micro_scales[4] = { 47853, 35934, 23927, 11978 };
+#define KXSD9_ZERO_G_OFFSET -2048
+
+/*
+ * Regulator names
+ */
+static const char kxsd9_reg_vdd[] = "vdd";
+static const char kxsd9_reg_iovdd[] = "iovdd";
+
static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
{
int ret, i;
@@ -79,42 +110,17 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
if (!foundit)
return -EINVAL;
- mutex_lock(&st->buf_lock);
- ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+ ret = regmap_update_bits(st->map,
+ KXSD9_REG_CTRL_C,
+ KXSD9_CTRL_C_FS_MASK,
+ i);
if (ret < 0)
goto error_ret;
- st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
- st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
- ret = spi_write(st->us, st->tx, 2);
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
+ /* Cached scale when the sensor is powered down */
+ st->scale = i;
-static int kxsd9_read(struct iio_dev *indio_dev, u8 address)
-{
- int ret;
- struct kxsd9_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = {
- {
- .bits_per_word = 8,
- .len = 1,
- .delay_usecs = 200,
- .tx_buf = st->tx,
- }, {
- .bits_per_word = 8,
- .len = 2,
- .rx_buf = st->rx,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = KXSD9_READ(address);
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (!ret)
- ret = (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
- mutex_unlock(&st->buf_lock);
+error_ret:
return ret;
}
@@ -136,6 +142,9 @@ static int kxsd9_write_raw(struct iio_dev *indio_dev,
long mask)
{
int ret = -EINVAL;
+ struct kxsd9_state *st = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(st->dev);
if (mask == IIO_CHAN_INFO_SCALE) {
/* Check no integer component */
@@ -144,6 +153,9 @@ static int kxsd9_write_raw(struct iio_dev *indio_dev,
ret = kxsd9_write_scale(indio_dev, val2);
}
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+
return ret;
}
@@ -153,46 +165,154 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
{
int ret = -EINVAL;
struct kxsd9_state *st = iio_priv(indio_dev);
+ unsigned int regval;
+ __be16 raw_val;
+ u16 nval;
+
+ pm_runtime_get_sync(st->dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = kxsd9_read(indio_dev, chan->address);
- if (ret < 0)
+ ret = regmap_bulk_read(st->map, chan->address, &raw_val,
+ sizeof(raw_val));
+ if (ret)
goto error_ret;
- *val = ret;
+ nval = be16_to_cpu(raw_val);
+ /* Only 12 bits are valid */
+ nval >>= 4;
+ *val = nval;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ /* This has a bias of -2048 */
+ *val = KXSD9_ZERO_G_OFFSET;
ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
- ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+ ret = regmap_read(st->map,
+ KXSD9_REG_CTRL_C,
+ &regval);
if (ret < 0)
goto error_ret;
*val = 0;
- *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
+ *val2 = kxsd9_micro_scales[regval & KXSD9_CTRL_C_FS_MASK];
ret = IIO_VAL_INT_PLUS_MICRO;
break;
}
error_ret:
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+
return ret;
};
-#define KXSD9_ACCEL_CHAN(axis) \
+
+static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
+{
+ const struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct kxsd9_state *st = iio_priv(indio_dev);
+ int ret;
+ /* 4 * 16bit values AND timestamp */
+ __be16 hw_values[8];
+
+ ret = regmap_bulk_read(st->map,
+ KXSD9_REG_X,
+ &hw_values,
+ 8);
+ if (ret) {
+ dev_err(st->dev,
+ "error reading data\n");
+ return ret;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ hw_values,
+ iio_get_time_ns(indio_dev));
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int kxsd9_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct kxsd9_state *st = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(st->dev);
+
+ return 0;
+}
+
+static int kxsd9_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct kxsd9_state *st = iio_priv(indio_dev);
+
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops kxsd9_buffer_setup_ops = {
+ .preenable = kxsd9_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = kxsd9_buffer_postdisable,
+};
+
+static const struct iio_mount_matrix *
+kxsd9_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct kxsd9_state *st = iio_priv(indio_dev);
+
+ return &st->orientation;
+}
+
+static const struct iio_chan_spec_ext_info kxsd9_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, kxsd9_get_mount_matrix),
+ { },
+};
+
+#define KXSD9_ACCEL_CHAN(axis, index) \
{ \
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .ext_info = kxsd9_ext_info, \
.address = KXSD9_REG_##axis, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_BE, \
+ }, \
}
static const struct iio_chan_spec kxsd9_channels[] = {
- KXSD9_ACCEL_CHAN(X), KXSD9_ACCEL_CHAN(Y), KXSD9_ACCEL_CHAN(Z),
+ KXSD9_ACCEL_CHAN(X, 0),
+ KXSD9_ACCEL_CHAN(Y, 1),
+ KXSD9_ACCEL_CHAN(Z, 2),
{
.type = IIO_VOLTAGE,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.indexed = 1,
.address = KXSD9_REG_AUX,
- }
+ .scan_index = 3,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 4,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(4),
};
static const struct attribute_group kxsd9_attribute_group = {
@@ -203,17 +323,69 @@ static int kxsd9_power_up(struct kxsd9_state *st)
{
int ret;
- st->tx[0] = 0x0d;
- st->tx[1] = 0x40;
- ret = spi_write(st->us, st->tx, 2);
+ /* Enable the regulators */
+ ret = regulator_bulk_enable(ARRAY_SIZE(st->regs), st->regs);
+ if (ret) {
+ dev_err(st->dev, "Cannot enable regulators\n");
+ return ret;
+ }
+
+ /* Power up */
+ ret = regmap_write(st->map,
+ KXSD9_REG_CTRL_B,
+ KXSD9_CTRL_B_ENABLE);
if (ret)
return ret;
- st->tx[0] = 0x0c;
- st->tx[1] = 0x9b;
- return spi_write(st->us, st->tx, 2);
+ /*
+ * Set 1000Hz LPF, 2g fullscale, motion wakeup threshold 1g,
+ * latched wakeup
+ */
+ ret = regmap_write(st->map,
+ KXSD9_REG_CTRL_C,
+ KXSD9_CTRL_C_LP_1000HZ |
+ KXSD9_CTRL_C_MOT_LEV |
+ KXSD9_CTRL_C_MOT_LAT |
+ st->scale);
+ if (ret)
+ return ret;
+
+ /*
+ * Power-up time depends on the LPF setting, but typ 15.9 ms, let's
+ * set 20 ms to allow for some slack.
+ */
+ msleep(20);
+
+ return 0;
};
+static int kxsd9_power_down(struct kxsd9_state *st)
+{
+ int ret;
+
+ /*
+ * Set into low power mode - since there may be more users of the
+ * regulators this is the first step of the power saving: it will
+ * make sure we conserve power even if there are others users on the
+ * regulators.
+ */
+ ret = regmap_update_bits(st->map,
+ KXSD9_REG_CTRL_B,
+ KXSD9_CTRL_B_ENABLE,
+ 0);
+ if (ret)
+ return ret;
+
+ /* Disable the regulators */
+ ret = regulator_bulk_disable(ARRAY_SIZE(st->regs), st->regs);
+ if (ret) {
+ dev_err(st->dev, "Cannot disable regulators\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct iio_info kxsd9_info = {
.read_raw = &kxsd9_read_raw,
.write_raw = &kxsd9_write_raw,
@@ -221,57 +393,136 @@ static const struct iio_info kxsd9_info = {
.driver_module = THIS_MODULE,
};
-static int kxsd9_probe(struct spi_device *spi)
+/* Four channels apart from timestamp, scan mask = 0x0f */
+static const unsigned long kxsd9_scan_masks[] = { 0xf, 0 };
+
+int kxsd9_common_probe(struct device *dev,
+ struct regmap *map,
+ const char *name)
{
struct iio_dev *indio_dev;
struct kxsd9_state *st;
+ int ret;
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
- spi_set_drvdata(spi, indio_dev);
+ st->dev = dev;
+ st->map = map;
- st->us = spi;
- mutex_init(&st->buf_lock);
indio_dev->channels = kxsd9_channels;
indio_dev->num_channels = ARRAY_SIZE(kxsd9_channels);
- indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = name;
+ indio_dev->dev.parent = dev;
indio_dev->info = &kxsd9_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->available_scan_masks = kxsd9_scan_masks;
+
+ /* Read the mounting matrix, if present */
+ ret = of_iio_read_mount_matrix(dev,
+ "mount-matrix",
+ &st->orientation);
+ if (ret)
+ return ret;
+
+ /* Fetch and turn on regulators */
+ st->regs[0].supply = kxsd9_reg_vdd;
+ st->regs[1].supply = kxsd9_reg_iovdd;
+ ret = devm_regulator_bulk_get(dev,
+ ARRAY_SIZE(st->regs),
+ st->regs);
+ if (ret) {
+ dev_err(dev, "Cannot get regulators\n");
+ return ret;
+ }
+ /* Default scaling */
+ st->scale = KXSD9_CTRL_C_FS_2G;
- spi->mode = SPI_MODE_0;
- spi_setup(spi);
kxsd9_power_up(st);
- return iio_device_register(indio_dev);
+ ret = iio_triggered_buffer_setup(indio_dev,
+ iio_pollfunc_store_time,
+ kxsd9_trigger_handler,
+ &kxsd9_buffer_setup_ops);
+ if (ret) {
+ dev_err(dev, "triggered buffer setup failed\n");
+ goto err_power_down;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_cleanup_buffer;
+
+ dev_set_drvdata(dev, indio_dev);
+
+ /* Enable runtime PM */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Set autosuspend to two orders of magnitude larger than the
+ * start-up time. 20ms start-up time means 2000ms autosuspend,
+ * i.e. 2 seconds.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 2000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put(dev);
+
+ return 0;
+
+err_cleanup_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_power_down:
+ kxsd9_power_down(st);
+
+ return ret;
}
+EXPORT_SYMBOL(kxsd9_common_probe);
-static int kxsd9_remove(struct spi_device *spi)
+int kxsd9_common_remove(struct device *dev)
{
- iio_device_unregister(spi_get_drvdata(spi));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct kxsd9_state *st = iio_priv(indio_dev);
+
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_device_unregister(indio_dev);
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ kxsd9_power_down(st);
return 0;
}
+EXPORT_SYMBOL(kxsd9_common_remove);
-static const struct spi_device_id kxsd9_id[] = {
- {"kxsd9", 0},
- { },
-};
-MODULE_DEVICE_TABLE(spi, kxsd9_id);
+#ifdef CONFIG_PM
+static int kxsd9_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct kxsd9_state *st = iio_priv(indio_dev);
-static struct spi_driver kxsd9_driver = {
- .driver = {
- .name = "kxsd9",
- },
- .probe = kxsd9_probe,
- .remove = kxsd9_remove,
- .id_table = kxsd9_id,
+ return kxsd9_power_down(st);
+}
+
+static int kxsd9_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct kxsd9_state *st = iio_priv(indio_dev);
+
+ return kxsd9_power_up(st);
+}
+#endif /* CONFIG_PM */
+
+const struct dev_pm_ops kxsd9_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(kxsd9_runtime_suspend,
+ kxsd9_runtime_resume, NULL)
};
-module_spi_driver(kxsd9_driver);
+EXPORT_SYMBOL(kxsd9_dev_pm_ops);
MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("Kionix KXSD9 SPI driver");
+MODULE_DESCRIPTION("Kionix KXSD9 driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/kxsd9.h b/drivers/iio/accel/kxsd9.h
new file mode 100644
index 000000000000..7e8a28168310
--- /dev/null
+++ b/drivers/iio/accel/kxsd9.h
@@ -0,0 +1,12 @@
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#define KXSD9_STATE_RX_SIZE 2
+#define KXSD9_STATE_TX_SIZE 2
+
+int kxsd9_common_probe(struct device *dev,
+ struct regmap *map,
+ const char *name);
+int kxsd9_common_remove(struct device *dev);
+
+extern const struct dev_pm_ops kxsd9_dev_pm_ops;
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
new file mode 100644
index 000000000000..4ea2ff623a6d
--- /dev/null
+++ b/drivers/iio/accel/mc3230.c
@@ -0,0 +1,211 @@
+/**
+ * mCube MC3230 3-Axis Accelerometer
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * IIO driver for mCube MC3230; 7-bit I2C address: 0x4c.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define MC3230_REG_XOUT 0x00
+#define MC3230_REG_YOUT 0x01
+#define MC3230_REG_ZOUT 0x02
+
+#define MC3230_REG_MODE 0x07
+#define MC3230_MODE_OPCON_MASK 0x03
+#define MC3230_MODE_OPCON_WAKE 0x01
+#define MC3230_MODE_OPCON_STANDBY 0x03
+
+#define MC3230_REG_CHIP_ID 0x18
+#define MC3230_CHIP_ID 0x01
+
+#define MC3230_REG_PRODUCT_CODE 0x3b
+#define MC3230_PRODUCT_CODE 0x19
+
+/*
+ * The accelerometer has one measurement range:
+ *
+ * -1.5g - +1.5g (8-bit, signed)
+ *
+ * scale = (1.5 + 1.5) * 9.81 / (2^8 - 1) = 0.115411765
+ */
+
+static const int mc3230_nscale = 115411765;
+
+#define MC3230_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec mc3230_channels[] = {
+ MC3230_CHANNEL(MC3230_REG_XOUT, X),
+ MC3230_CHANNEL(MC3230_REG_YOUT, Y),
+ MC3230_CHANNEL(MC3230_REG_ZOUT, Z),
+};
+
+struct mc3230_data {
+ struct i2c_client *client;
+};
+
+static int mc3230_set_opcon(struct mc3230_data *data, int opcon)
+{
+ int ret;
+ struct i2c_client *client = data->client;
+
+ ret = i2c_smbus_read_byte_data(client, MC3230_REG_MODE);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read mode reg: %d\n", ret);
+ return ret;
+ }
+
+ ret &= ~MC3230_MODE_OPCON_MASK;
+ ret |= opcon;
+
+ ret = i2c_smbus_write_byte_data(client, MC3230_REG_MODE, ret);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to write mode reg: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mc3230_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mc3230_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_byte_data(data->client, chan->address);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 7);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = mc3230_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mc3230_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = mc3230_read_raw,
+};
+
+static int mc3230_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct mc3230_data *data;
+
+ /* First check chip-id and product-id */
+ ret = i2c_smbus_read_byte_data(client, MC3230_REG_CHIP_ID);
+ if (ret != MC3230_CHIP_ID)
+ return (ret < 0) ? ret : -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(client, MC3230_REG_PRODUCT_CODE);
+ if (ret != MC3230_PRODUCT_CODE)
+ return (ret < 0) ? ret : -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &mc3230_info;
+ indio_dev->name = "mc3230";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mc3230_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mc3230_channels);
+
+ ret = mc3230_set_opcon(data, MC3230_MODE_OPCON_WAKE);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ mc3230_set_opcon(data, MC3230_MODE_OPCON_STANDBY);
+ }
+
+ return ret;
+}
+
+static int mc3230_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return mc3230_set_opcon(iio_priv(indio_dev), MC3230_MODE_OPCON_STANDBY);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mc3230_suspend(struct device *dev)
+{
+ struct mc3230_data *data;
+
+ data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ return mc3230_set_opcon(data, MC3230_MODE_OPCON_STANDBY);
+}
+
+static int mc3230_resume(struct device *dev)
+{
+ struct mc3230_data *data;
+
+ data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ return mc3230_set_opcon(data, MC3230_MODE_OPCON_WAKE);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mc3230_pm_ops, mc3230_suspend, mc3230_resume);
+
+static const struct i2c_device_id mc3230_i2c_id[] = {
+ {"mc3230", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mc3230_i2c_id);
+
+static struct i2c_driver mc3230_driver = {
+ .driver = {
+ .name = "mc3230",
+ .pm = &mc3230_pm_ops,
+ },
+ .probe = mc3230_probe,
+ .remove = mc3230_remove,
+ .id_table = mc3230_i2c_id,
+};
+
+module_i2c_driver(mc3230_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("mCube MC3230 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 0acdee516973..03beadf14ad3 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -251,6 +251,7 @@ static const struct i2c_device_id mma7660_i2c_id[] = {
{"mma7660", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, mma7660_i2c_id);
static const struct acpi_device_id mma7660_acpi_id[] = {
{"MMA7660", 0},
diff --git a/drivers/iio/accel/mxc6255.c b/drivers/iio/accel/mxc6255.c
index 97ccde722e7b..0abad6948201 100644
--- a/drivers/iio/accel/mxc6255.c
+++ b/drivers/iio/accel/mxc6255.c
@@ -154,7 +154,7 @@ static int mxc6255_probe(struct i2c_client *client,
return ret;
}
- if (chip_id != MXC6255_CHIP_ID) {
+ if ((chip_id & 0x1f) != MXC6255_CHIP_ID) {
dev_err(&client->dev, "Invalid chip id %x\n", chip_id);
return -ENODEV;
}
@@ -171,12 +171,14 @@ static int mxc6255_probe(struct i2c_client *client,
}
static const struct acpi_device_id mxc6255_acpi_match[] = {
+ {"MXC6225", 0},
{"MXC6255", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, mxc6255_acpi_match);
static const struct i2c_device_id mxc6255_id[] = {
+ {"mxc6225", 0},
{"mxc6255", 0},
{ }
};
diff --git a/drivers/iio/accel/ssp_accel_sensor.c b/drivers/iio/accel/ssp_accel_sensor.c
index 4ae05fce9f24..31db00970fa0 100644
--- a/drivers/iio/accel/ssp_accel_sensor.c
+++ b/drivers/iio/accel/ssp_accel_sensor.c
@@ -74,7 +74,7 @@ static int ssp_accel_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static struct iio_info ssp_accel_iio_info = {
+static const struct iio_info ssp_accel_iio_info = {
.read_raw = &ssp_accel_read_raw,
.write_raw = &ssp_accel_write_raw,
};
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 767577298ee3..7edcf3238620 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -264,6 +264,15 @@ config LPC18XX_ADC
To compile this driver as a module, choose M here: the module will be
called lpc18xx_adc.
+config LTC2485
+ tristate "Linear Technology LTC2485 ADC driver"
+ depends on I2C
+ help
+ Say yes here to build support for Linear Technology LTC2485 ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ltc2485.
+
config MAX1027
tristate "Maxim max1027 ADC driver"
depends on SPI
@@ -317,6 +326,19 @@ config MCP3422
This driver can also be built as a module. If so, the module will be
called mcp3422.
+config MEDIATEK_MT6577_AUXADC
+ tristate "MediaTek AUXADC driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to enable support for MediaTek mt65xx AUXADC.
+
+ The driver supports immediate mode operation to read from one of sixteen
+ channels (external or internal).
+
+ This driver can also be built as a module. If so, the module will be
+ called mt6577_auxadc.
+
config MEN_Z188_ADC
tristate "MEN 16z188 ADC IP Core support"
depends on MCB
@@ -397,6 +419,21 @@ config ROCKCHIP_SARADC
To compile this driver as a module, choose M here: the
module will be called rockchip_saradc.
+config STX104
+ tristate "Apex Embedded Systems STX104 driver"
+ depends on X86 && ISA_BUS_API
+ select GPIOLIB
+ help
+ Say yes here to build support for the Apex Embedded Systems STX104
+ integrated analog PC/104 card.
+
+ This driver supports the 16 channels of single-ended (8 channels of
+ differential) analog inputs, 2 channels of analog output, 4 digital
+ inputs, and 4 digital outputs provided by the STX104.
+
+ The base port addresses for the devices may be configured via the base
+ array module parameter.
+
config TI_ADC081C
tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
depends on I2C
@@ -417,6 +454,18 @@ config TI_ADC0832
This driver can also be built as a module. If so, the module will be
called ti-adc0832.
+config TI_ADC12138
+ tristate "Texas Instruments ADC12130/ADC12132/ADC12138"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for Texas Instruments ADC12130,
+ ADC12132 and ADC12138 chips.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-adc12138.
+
config TI_ADC128S052
tristate "Texas Instruments ADC128S052/ADC122S021/ADC124S021"
depends on SPI
@@ -427,6 +476,18 @@ config TI_ADC128S052
This driver can also be built as a module. If so, the module will be
called ti-adc128s052.
+config TI_ADC161S626
+ tristate "Texas Instruments ADC161S626 1-channel differential ADC"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for Texas Instruments ADC141S626,
+ and ADC161S626 chips.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-adc161s626.
+
config TI_ADS1015
tristate "Texas Instruments ADS1015 ADC"
depends on I2C && !SENSORS_ADS1015
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 0ba0d500eedb..7a40c04c311f 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -27,10 +27,12 @@ obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
+obj-$(CONFIG_LTC2485) += ltc2485.o
obj-$(CONFIG_MAX1027) += max1027.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MCP320X) += mcp320x.o
obj-$(CONFIG_MCP3422) += mcp3422.o
+obj-$(CONFIG_MEDIATEK_MT6577_AUXADC) += mt6577_auxadc.o
obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
obj-$(CONFIG_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_NAU7802) += nau7802.o
@@ -38,9 +40,12 @@ obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
+obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
+obj-$(CONFIG_TI_ADC12138) += ti-adc12138.o
obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
+obj-$(CONFIG_TI_ADC161S626) += ti-adc161s626.o
obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
obj-$(CONFIG_TI_ADS8688) += ti-ads8688.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index c0f6a98fd9bd..b8d5cfd57ec4 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -481,7 +481,7 @@ error_free_gpios:
if (!st->fixed_addr)
gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
error_disable_reg:
- if (!IS_ERR_OR_NULL(st->reg))
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
return ret;
@@ -496,7 +496,7 @@ static int ad7266_remove(struct spi_device *spi)
iio_triggered_buffer_cleanup(indio_dev);
if (!st->fixed_addr)
gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
- if (!IS_ERR_OR_NULL(st->reg))
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
return 0;
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index 10ec8fce395f..e399bf04c73a 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -239,16 +239,16 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
- ret = -EBUSY;
- } else {
- if (chan->address == AD7298_CH_TEMP)
- ret = ad7298_scan_temp(st, val);
- else
- ret = ad7298_scan_direct(st, chan->address);
- }
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ if (chan->address == AD7298_CH_TEMP)
+ ret = ad7298_scan_temp(st, val);
+ else
+ ret = ad7298_scan_direct(st, chan->address);
+
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 847789bae821..e6706a09e100 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -519,11 +519,9 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
int ret, i;
unsigned int tmp;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -548,7 +546,7 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 0438c68015e8..bbdac07f4aaa 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -113,6 +113,7 @@
#define AT91_ADC_TSMR_TSAV (3 << 4) /* Averages samples */
#define AT91_ADC_TSMR_TSAV_(x) ((x) << 4)
#define AT91_ADC_TSMR_SCTIM (0x0f << 16) /* Switch closure time */
+#define AT91_ADC_TSMR_SCTIM_(x) ((x) << 16)
#define AT91_ADC_TSMR_PENDBC (0x0f << 28) /* Pen Debounce time */
#define AT91_ADC_TSMR_PENDBC_(x) ((x) << 28)
#define AT91_ADC_TSMR_NOTSDMA (1 << 22) /* No Touchscreen DMA */
@@ -150,6 +151,7 @@
#define MAX_RLPOS_BITS 10
#define TOUCH_SAMPLE_PERIOD_US_RL 10000 /* 10ms, the SoC can't keep up with 2ms */
#define TOUCH_SHTIM 0xa
+#define TOUCH_SCTIM_US 10 /* 10us for the Touchscreen Switches Closure Time */
/**
* struct at91_adc_reg_desc - Various informations relative to registers
@@ -1001,7 +1003,9 @@ static void atmel_ts_close(struct input_dev *dev)
static int at91_ts_hw_init(struct at91_adc_state *st, u32 adc_clk_khz)
{
+ struct iio_dev *idev = iio_priv_to_dev(st);
u32 reg = 0;
+ u32 tssctim = 0;
int i = 0;
/* a Pen Detect Debounce Time is necessary for the ADC Touch to avoid
@@ -1034,11 +1038,20 @@ static int at91_ts_hw_init(struct at91_adc_state *st, u32 adc_clk_khz)
return 0;
}
+ /* Touchscreen Switches Closure time needed for allowing the value to
+ * stabilize.
+ * Switch Closure Time = (TSSCTIM * 4) ADCClock periods
+ */
+ tssctim = DIV_ROUND_UP(TOUCH_SCTIM_US * adc_clk_khz / 1000, 4);
+ dev_dbg(&idev->dev, "adc_clk at: %d KHz, tssctim at: %d\n",
+ adc_clk_khz, tssctim);
+
if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_4WIRE)
reg = AT91_ADC_TSMR_TSMODE_4WIRE_PRESS;
else
reg = AT91_ADC_TSMR_TSMODE_5WIRE;
+ reg |= AT91_ADC_TSMR_SCTIM_(tssctim) & AT91_ADC_TSMR_SCTIM;
reg |= AT91_ADC_TSMR_TSAV_(st->caps->ts_filter_average)
& AT91_ADC_TSMR_TSAV;
reg |= AT91_ADC_TSMR_PENDBC_(st->ts_pendbc) & AT91_ADC_TSMR_PENDBC;
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 955f3fdaf519..59b7d76e1ad2 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -114,7 +114,6 @@ struct ina2xx_chip_info {
struct mutex state_lock;
unsigned int shunt_resistor;
int avg;
- s64 prev_ns; /* track buffer capture time, check for underruns */
int int_time_vbus; /* Bus voltage integration time uS */
int int_time_vshunt; /* Shunt voltage integration time uS */
bool allow_async_readout;
@@ -509,8 +508,6 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
iio_push_to_buffers_with_timestamp(indio_dev,
(unsigned int *)data, time_a);
- chip->prev_ns = time_a;
-
return (unsigned long)(time_b - time_a) / 1000;
};
@@ -554,8 +551,6 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
dev_dbg(&indio_dev->dev, "Async readout mode: %d\n",
chip->allow_async_readout);
- chip->prev_ns = iio_get_time_ns(indio_dev);
-
chip->task = kthread_run(ina2xx_capture_thread, (void *)indio_dev,
"%s:%d-%uus", indio_dev->name, indio_dev->id,
sampling_us);
diff --git a/drivers/iio/adc/ltc2485.c b/drivers/iio/adc/ltc2485.c
new file mode 100644
index 000000000000..eab91f12454a
--- /dev/null
+++ b/drivers/iio/adc/ltc2485.c
@@ -0,0 +1,148 @@
+/*
+ * ltc2485.c - Driver for Linear Technology LTC2485 ADC
+ *
+ * Copyright (C) 2016 Alison Schofield <amsfield22@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Datasheet: http://cds.linear.com/docs/en/datasheet/2485fd.pdf
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* Power-on configuration: rejects both 50/60Hz, operates at 1x speed */
+#define LTC2485_CONFIG_DEFAULT 0
+
+struct ltc2485_data {
+ struct i2c_client *client;
+ ktime_t time_prev; /* last conversion */
+};
+
+static void ltc2485_wait_conv(struct ltc2485_data *data)
+{
+ const unsigned int conv_time = 147; /* conversion time ms */
+ unsigned int time_elapsed;
+
+ /* delay if conversion time not passed since last read or write */
+ time_elapsed = ktime_ms_delta(ktime_get(), data->time_prev);
+
+ if (time_elapsed < conv_time)
+ msleep(conv_time - time_elapsed);
+}
+
+static int ltc2485_read(struct ltc2485_data *data, int *val)
+{
+ struct i2c_client *client = data->client;
+ __be32 buf = 0;
+ int ret;
+
+ ltc2485_wait_conv(data);
+
+ ret = i2c_master_recv(client, (char *)&buf, 4);
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c_master_recv failed\n");
+ return ret;
+ }
+ data->time_prev = ktime_get();
+ *val = sign_extend32(be32_to_cpu(buf) >> 6, 24);
+
+ return ret;
+}
+
+static int ltc2485_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ltc2485_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (mask == IIO_CHAN_INFO_RAW) {
+ ret = ltc2485_read(data, val);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ } else if (mask == IIO_CHAN_INFO_SCALE) {
+ *val = 5000; /* on board vref millivolts */
+ *val2 = 25; /* 25 (24 + sign) data bits */
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec ltc2485_channel[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE)
+ },
+};
+
+static const struct iio_info ltc2485_info = {
+ .read_raw = ltc2485_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int ltc2485_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct ltc2485_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_WRITE_BYTE))
+ return -EOPNOTSUPP;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->info = &ltc2485_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ltc2485_channel;
+ indio_dev->num_channels = ARRAY_SIZE(ltc2485_channel);
+
+ ret = i2c_smbus_write_byte(data->client, LTC2485_CONFIG_DEFAULT);
+ if (ret < 0)
+ return ret;
+
+ data->time_prev = ktime_get();
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id ltc2485_id[] = {
+ { "ltc2485", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ltc2485_id);
+
+static struct i2c_driver ltc2485_driver = {
+ .driver = {
+ .name = "ltc2485",
+ },
+ .probe = ltc2485_probe,
+ .id_table = ltc2485_id,
+};
+module_i2c_driver(ltc2485_driver);
+
+MODULE_AUTHOR("Alison Schofield <amsfield22@gmail.com>");
+MODULE_DESCRIPTION("Linear Technology LTC2485 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index d095efe1ba14..8f3606de4eaf 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -78,7 +78,7 @@ static int z188_iio_read_raw(struct iio_dev *iio_dev,
return ret;
}
-static struct iio_info z188_adc_info = {
+static const struct iio_info z188_adc_info = {
.read_raw = &z188_iio_read_raw,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
new file mode 100644
index 000000000000..2d104c828041
--- /dev/null
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/iopoll.h>
+#include <linux/io.h>
+#include <linux/iio/iio.h>
+
+/* Register definitions */
+#define MT6577_AUXADC_CON0 0x00
+#define MT6577_AUXADC_CON1 0x04
+#define MT6577_AUXADC_CON2 0x10
+#define MT6577_AUXADC_STA BIT(0)
+
+#define MT6577_AUXADC_DAT0 0x14
+#define MT6577_AUXADC_RDY0 BIT(12)
+
+#define MT6577_AUXADC_MISC 0x94
+#define MT6577_AUXADC_PDN_EN BIT(14)
+
+#define MT6577_AUXADC_DAT_MASK 0xfff
+#define MT6577_AUXADC_SLEEP_US 1000
+#define MT6577_AUXADC_TIMEOUT_US 10000
+#define MT6577_AUXADC_POWER_READY_MS 1
+#define MT6577_AUXADC_SAMPLE_READY_US 25
+
+struct mt6577_auxadc_device {
+ void __iomem *reg_base;
+ struct clk *adc_clk;
+ struct mutex lock;
+};
+
+#define MT6577_AUXADC_CHANNEL(idx) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+}
+
+static const struct iio_chan_spec mt6577_auxadc_iio_channels[] = {
+ MT6577_AUXADC_CHANNEL(0),
+ MT6577_AUXADC_CHANNEL(1),
+ MT6577_AUXADC_CHANNEL(2),
+ MT6577_AUXADC_CHANNEL(3),
+ MT6577_AUXADC_CHANNEL(4),
+ MT6577_AUXADC_CHANNEL(5),
+ MT6577_AUXADC_CHANNEL(6),
+ MT6577_AUXADC_CHANNEL(7),
+ MT6577_AUXADC_CHANNEL(8),
+ MT6577_AUXADC_CHANNEL(9),
+ MT6577_AUXADC_CHANNEL(10),
+ MT6577_AUXADC_CHANNEL(11),
+ MT6577_AUXADC_CHANNEL(12),
+ MT6577_AUXADC_CHANNEL(13),
+ MT6577_AUXADC_CHANNEL(14),
+ MT6577_AUXADC_CHANNEL(15),
+};
+
+static inline void mt6577_auxadc_mod_reg(void __iomem *reg,
+ u32 or_mask, u32 and_mask)
+{
+ u32 val;
+
+ val = readl(reg);
+ val |= or_mask;
+ val &= ~and_mask;
+ writel(val, reg);
+}
+
+static int mt6577_auxadc_read(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ u32 val;
+ void __iomem *reg_channel;
+ int ret;
+ struct mt6577_auxadc_device *adc_dev = iio_priv(indio_dev);
+
+ reg_channel = adc_dev->reg_base + MT6577_AUXADC_DAT0 +
+ chan->channel * 0x04;
+
+ mutex_lock(&adc_dev->lock);
+
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_CON1,
+ 0, 1 << chan->channel);
+
+ /* read channel and make sure old ready bit == 0 */
+ ret = readl_poll_timeout(reg_channel, val,
+ ((val & MT6577_AUXADC_RDY0) == 0),
+ MT6577_AUXADC_SLEEP_US,
+ MT6577_AUXADC_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(indio_dev->dev.parent,
+ "wait for channel[%d] ready bit clear time out\n",
+ chan->channel);
+ goto err_timeout;
+ }
+
+ /* set bit to trigger sample */
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_CON1,
+ 1 << chan->channel, 0);
+
+ /* we must delay here for hardware sample channel data */
+ udelay(MT6577_AUXADC_SAMPLE_READY_US);
+
+ /* check MTK_AUXADC_CON2 if auxadc is idle */
+ ret = readl_poll_timeout(adc_dev->reg_base + MT6577_AUXADC_CON2, val,
+ ((val & MT6577_AUXADC_STA) == 0),
+ MT6577_AUXADC_SLEEP_US,
+ MT6577_AUXADC_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(indio_dev->dev.parent,
+ "wait for auxadc idle time out\n");
+ goto err_timeout;
+ }
+
+ /* read channel and make sure ready bit == 1 */
+ ret = readl_poll_timeout(reg_channel, val,
+ ((val & MT6577_AUXADC_RDY0) != 0),
+ MT6577_AUXADC_SLEEP_US,
+ MT6577_AUXADC_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(indio_dev->dev.parent,
+ "wait for channel[%d] data ready time out\n",
+ chan->channel);
+ goto err_timeout;
+ }
+
+ /* read data */
+ val = readl(reg_channel) & MT6577_AUXADC_DAT_MASK;
+
+ mutex_unlock(&adc_dev->lock);
+
+ return val;
+
+err_timeout:
+
+ mutex_unlock(&adc_dev->lock);
+
+ return -ETIMEDOUT;
+}
+
+static int mt6577_auxadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_PROCESSED:
+ *val = mt6577_auxadc_read(indio_dev, chan);
+ if (*val < 0) {
+ dev_err(indio_dev->dev.parent,
+ "failed to sample data on channel[%d]\n",
+ chan->channel);
+ return *val;
+ }
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mt6577_auxadc_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &mt6577_auxadc_read_raw,
+};
+
+static int mt6577_auxadc_probe(struct platform_device *pdev)
+{
+ struct mt6577_auxadc_device *adc_dev;
+ unsigned long adc_clk_rate;
+ struct resource *res;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc_dev = iio_priv(indio_dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->info = &mt6577_auxadc_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mt6577_auxadc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mt6577_auxadc_iio_channels);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adc_dev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(adc_dev->reg_base)) {
+ dev_err(&pdev->dev, "failed to get auxadc base address\n");
+ return PTR_ERR(adc_dev->reg_base);
+ }
+
+ adc_dev->adc_clk = devm_clk_get(&pdev->dev, "main");
+ if (IS_ERR(adc_dev->adc_clk)) {
+ dev_err(&pdev->dev, "failed to get auxadc clock\n");
+ return PTR_ERR(adc_dev->adc_clk);
+ }
+
+ ret = clk_prepare_enable(adc_dev->adc_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable auxadc clock\n");
+ return ret;
+ }
+
+ adc_clk_rate = clk_get_rate(adc_dev->adc_clk);
+ if (!adc_clk_rate) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "null clock rate\n");
+ goto err_disable_clk;
+ }
+
+ mutex_init(&adc_dev->lock);
+
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
+ MT6577_AUXADC_PDN_EN, 0);
+ mdelay(MT6577_AUXADC_POWER_READY_MS);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register iio device\n");
+ goto err_power_off;
+ }
+
+ return 0;
+
+err_power_off:
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
+ 0, MT6577_AUXADC_PDN_EN);
+err_disable_clk:
+ clk_disable_unprepare(adc_dev->adc_clk);
+ return ret;
+}
+
+static int mt6577_auxadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct mt6577_auxadc_device *adc_dev = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
+ 0, MT6577_AUXADC_PDN_EN);
+
+ clk_disable_unprepare(adc_dev->adc_clk);
+
+ return 0;
+}
+
+static const struct of_device_id mt6577_auxadc_of_match[] = {
+ { .compatible = "mediatek,mt2701-auxadc", },
+ { .compatible = "mediatek,mt8173-auxadc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mt6577_auxadc_of_match);
+
+static struct platform_driver mt6577_auxadc_driver = {
+ .driver = {
+ .name = "mt6577-auxadc",
+ .of_match_table = mt6577_auxadc_of_match,
+ },
+ .probe = mt6577_auxadc_probe,
+ .remove = mt6577_auxadc_remove,
+};
+module_platform_driver(mt6577_auxadc_driver);
+
+MODULE_AUTHOR("Zhiyong Tao <zhiyong.tao@mediatek.com>");
+MODULE_DESCRIPTION("MTK AUXADC Device Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index db9b829ccf0d..08f446695f97 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -197,7 +197,7 @@ static irqreturn_t nau7802_eoc_trigger(int irq, void *private)
if (st->conversion_count < NAU7802_MIN_CONVERSIONS)
st->conversion_count++;
if (st->conversion_count >= NAU7802_MIN_CONVERSIONS)
- complete_all(&st->value_ok);
+ complete(&st->value_ok);
return IRQ_HANDLED;
}
diff --git a/drivers/iio/dac/stx104.c b/drivers/iio/adc/stx104.c
index bebbd00304ce..7e3645749eaf 100644
--- a/drivers/iio/dac/stx104.c
+++ b/drivers/iio/adc/stx104.c
@@ -1,5 +1,5 @@
/*
- * DAC driver for the Apex Embedded Systems STX104
+ * IIO driver for the Apex Embedded Systems STX104
* Copyright (C) 2016 William Breathitt Gray
*
* This program is free software; you can redistribute it and/or modify
@@ -20,19 +20,30 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/isa.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
-#define STX104_NUM_CHAN 2
-
-#define STX104_CHAN(chan) { \
+#define STX104_OUT_CHAN(chan) { \
.type = IIO_VOLTAGE, \
.channel = chan, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.indexed = 1, \
.output = 1 \
}
+#define STX104_IN_CHAN(chan, diff) { \
+ .type = IIO_VOLTAGE, \
+ .channel = chan, \
+ .channel2 = chan, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_HARDWAREGAIN) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .indexed = 1, \
+ .differential = diff \
+}
+
+#define STX104_NUM_OUT_CHAN 2
#define STX104_EXTENT 16
@@ -47,8 +58,8 @@ MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
* @base: base port address of the IIO device
*/
struct stx104_iio {
- unsigned chan_out_states[STX104_NUM_CHAN];
- unsigned base;
+ unsigned int chan_out_states[STX104_NUM_OUT_CHAN];
+ unsigned int base;
};
/**
@@ -79,28 +90,95 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long mask)
{
struct stx104_iio *const priv = iio_priv(indio_dev);
+ unsigned int adc_config;
+ int adbu;
+ int gain;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ /* get gain configuration */
+ adc_config = inb(priv->base + 11);
+ gain = adc_config & 0x3;
+
+ *val = 1 << gain;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_RAW:
+ if (chan->output) {
+ *val = priv->chan_out_states[chan->channel];
+ return IIO_VAL_INT;
+ }
+
+ /* select ADC channel */
+ outb(chan->channel | (chan->channel << 4), priv->base + 2);
+
+ /* trigger ADC sample capture and wait for completion */
+ outb(0, priv->base);
+ while (inb(priv->base + 8) & BIT(7));
+
+ *val = inw(priv->base);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ /* get ADC bipolar/unipolar configuration */
+ adc_config = inb(priv->base + 11);
+ adbu = !(adc_config & BIT(2));
+
+ *val = -32768 * adbu;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* get ADC bipolar/unipolar and gain configuration */
+ adc_config = inb(priv->base + 11);
+ adbu = !(adc_config & BIT(2));
+ gain = adc_config & 0x3;
+
+ *val = 5;
+ *val2 = 15 - adbu + gain;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
- if (mask != IIO_CHAN_INFO_RAW)
- return -EINVAL;
-
- *val = priv->chan_out_states[chan->channel];
-
- return IIO_VAL_INT;
+ return -EINVAL;
}
static int stx104_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
struct stx104_iio *const priv = iio_priv(indio_dev);
- const unsigned chan_addr_offset = 2 * chan->channel;
- if (mask != IIO_CHAN_INFO_RAW)
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ /* Only four gain states (x1, x2, x4, x8) */
+ switch (val) {
+ case 1:
+ outb(0, priv->base + 11);
+ break;
+ case 2:
+ outb(1, priv->base + 11);
+ break;
+ case 4:
+ outb(2, priv->base + 11);
+ break;
+ case 8:
+ outb(3, priv->base + 11);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+ case IIO_CHAN_INFO_RAW:
+ if (chan->output) {
+ /* DAC can only accept up to a 16-bit value */
+ if ((unsigned int)val > 65535)
+ return -EINVAL;
+
+ priv->chan_out_states[chan->channel] = val;
+ outw(val, priv->base + 4 + 2 * chan->channel);
+
+ return 0;
+ }
return -EINVAL;
+ }
- priv->chan_out_states[chan->channel] = val;
- outw(val, priv->base + 4 + chan_addr_offset);
-
- return 0;
+ return -EINVAL;
}
static const struct iio_info stx104_info = {
@@ -109,9 +187,22 @@ static const struct iio_info stx104_info = {
.write_raw = stx104_write_raw
};
-static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
- STX104_CHAN(0),
- STX104_CHAN(1)
+/* single-ended input channels configuration */
+static const struct iio_chan_spec stx104_channels_sing[] = {
+ STX104_OUT_CHAN(0), STX104_OUT_CHAN(1),
+ STX104_IN_CHAN(0, 0), STX104_IN_CHAN(1, 0), STX104_IN_CHAN(2, 0),
+ STX104_IN_CHAN(3, 0), STX104_IN_CHAN(4, 0), STX104_IN_CHAN(5, 0),
+ STX104_IN_CHAN(6, 0), STX104_IN_CHAN(7, 0), STX104_IN_CHAN(8, 0),
+ STX104_IN_CHAN(9, 0), STX104_IN_CHAN(10, 0), STX104_IN_CHAN(11, 0),
+ STX104_IN_CHAN(12, 0), STX104_IN_CHAN(13, 0), STX104_IN_CHAN(14, 0),
+ STX104_IN_CHAN(15, 0)
+};
+/* differential input channels configuration */
+static const struct iio_chan_spec stx104_channels_diff[] = {
+ STX104_OUT_CHAN(0), STX104_OUT_CHAN(1),
+ STX104_IN_CHAN(0, 1), STX104_IN_CHAN(1, 1), STX104_IN_CHAN(2, 1),
+ STX104_IN_CHAN(3, 1), STX104_IN_CHAN(4, 1), STX104_IN_CHAN(5, 1),
+ STX104_IN_CHAN(6, 1), STX104_IN_CHAN(7, 1)
};
static int stx104_gpio_get_direction(struct gpio_chip *chip,
@@ -204,13 +295,27 @@ static int stx104_probe(struct device *dev, unsigned int id)
indio_dev->info = &stx104_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = stx104_channels;
- indio_dev->num_channels = STX104_NUM_CHAN;
+
+ /* determine if differential inputs */
+ if (inb(base[id] + 8) & BIT(5)) {
+ indio_dev->num_channels = ARRAY_SIZE(stx104_channels_diff);
+ indio_dev->channels = stx104_channels_diff;
+ } else {
+ indio_dev->num_channels = ARRAY_SIZE(stx104_channels_sing);
+ indio_dev->channels = stx104_channels_sing;
+ }
+
indio_dev->name = dev_name(dev);
priv = iio_priv(indio_dev);
priv->base = base[id];
+ /* configure device for software trigger operation */
+ outb(0, base[id] + 9);
+
+ /* initialize gain setting to x1 */
+ outb(0, base[id] + 11);
+
/* initialize DAC output to 0V */
outw(0, base[id] + 4);
outw(0, base[id] + 6);
@@ -271,5 +376,5 @@ static struct isa_driver stx104_driver = {
module_isa_driver(stx104_driver, num_stx104);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
-MODULE_DESCRIPTION("Apex Embedded Systems STX104 DAC driver");
+MODULE_DESCRIPTION("Apex Embedded Systems STX104 IIO driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
new file mode 100644
index 000000000000..072f03bfe6a0
--- /dev/null
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -0,0 +1,552 @@
+/*
+ * ADC12130/ADC12132/ADC12138 12-bit plus sign ADC driver
+ *
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Datasheet: http://www.ti.com/lit/ds/symlink/adc12138.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/regulator/consumer.h>
+
+#define ADC12138_MODE_AUTO_CAL 0x08
+#define ADC12138_MODE_READ_STATUS 0x0c
+#define ADC12138_MODE_ACQUISITION_TIME_6 0x0e
+#define ADC12138_MODE_ACQUISITION_TIME_10 0x4e
+#define ADC12138_MODE_ACQUISITION_TIME_18 0x8e
+#define ADC12138_MODE_ACQUISITION_TIME_34 0xce
+
+#define ADC12138_STATUS_CAL BIT(6)
+
+enum {
+ adc12130,
+ adc12132,
+ adc12138,
+};
+
+struct adc12138 {
+ struct spi_device *spi;
+ unsigned int id;
+ /* conversion clock */
+ struct clk *cclk;
+ /* positive analog voltage reference */
+ struct regulator *vref_p;
+ /* negative analog voltage reference */
+ struct regulator *vref_n;
+ struct mutex lock;
+ struct completion complete;
+ /* The number of cclk periods for the S/H's acquisition time */
+ unsigned int acquisition_time;
+
+ u8 tx_buf[2] ____cacheline_aligned;
+ u8 rx_buf[2];
+};
+
+#define ADC12138_VOLTAGE_CHANNEL(chan) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ | BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_index = chan, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 13, \
+ .storagebits = 16, \
+ .shift = 3, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADC12138_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (chan1), \
+ .channel2 = (chan2), \
+ .differential = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ | BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_index = si, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 13, \
+ .storagebits = 16, \
+ .shift = 3, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec adc12132_channels[] = {
+ ADC12138_VOLTAGE_CHANNEL(0),
+ ADC12138_VOLTAGE_CHANNEL(1),
+ ADC12138_VOLTAGE_CHANNEL_DIFF(0, 1, 2),
+ ADC12138_VOLTAGE_CHANNEL_DIFF(1, 0, 3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct iio_chan_spec adc12138_channels[] = {
+ ADC12138_VOLTAGE_CHANNEL(0),
+ ADC12138_VOLTAGE_CHANNEL(1),
+ ADC12138_VOLTAGE_CHANNEL(2),
+ ADC12138_VOLTAGE_CHANNEL(3),
+ ADC12138_VOLTAGE_CHANNEL(4),
+ ADC12138_VOLTAGE_CHANNEL(5),
+ ADC12138_VOLTAGE_CHANNEL(6),
+ ADC12138_VOLTAGE_CHANNEL(7),
+ ADC12138_VOLTAGE_CHANNEL_DIFF(0, 1, 8),
+ ADC12138_VOLTAGE_CHANNEL_DIFF(1, 0, 9),
+ ADC12138_VOLTAGE_CHANNEL_DIFF(2, 3, 10),
+ ADC12138_VOLTAGE_CHANNEL_DIFF(3, 2, 11),
+ ADC12138_VOLTAGE_CHANNEL_DIFF(4, 5, 12),
+ ADC12138_VOLTAGE_CHANNEL_DIFF(5, 4, 13),
+ ADC12138_VOLTAGE_CHANNEL_DIFF(6, 7, 14),
+ ADC12138_VOLTAGE_CHANNEL_DIFF(7, 6, 15),
+ IIO_CHAN_SOFT_TIMESTAMP(16),
+};
+
+static int adc12138_mode_programming(struct adc12138 *adc, u8 mode,
+ void *rx_buf, int len)
+{
+ struct spi_transfer xfer = {
+ .tx_buf = adc->tx_buf,
+ .rx_buf = adc->rx_buf,
+ .len = len,
+ };
+ int ret;
+
+ /* Skip unused bits for ADC12130 and ADC12132 */
+ if (adc->id != adc12138)
+ mode = (mode & 0xc0) | ((mode & 0x0f) << 2);
+
+ adc->tx_buf[0] = mode;
+
+ ret = spi_sync_transfer(adc->spi, &xfer, 1);
+ if (ret)
+ return ret;
+
+ memcpy(rx_buf, adc->rx_buf, len);
+
+ return 0;
+}
+
+static int adc12138_read_status(struct adc12138 *adc)
+{
+ u8 rx_buf[2];
+ int ret;
+
+ ret = adc12138_mode_programming(adc, ADC12138_MODE_READ_STATUS,
+ rx_buf, 2);
+ if (ret)
+ return ret;
+
+ return (rx_buf[0] << 1) | (rx_buf[1] >> 7);
+}
+
+static int __adc12138_start_conv(struct adc12138 *adc,
+ struct iio_chan_spec const *channel,
+ void *data, int len)
+
+{
+ const u8 ch_to_mux[] = { 0, 4, 1, 5, 2, 6, 3, 7 };
+ u8 mode = (ch_to_mux[channel->channel] << 4) |
+ (channel->differential ? 0 : 0x80);
+
+ return adc12138_mode_programming(adc, mode, data, len);
+}
+
+static int adc12138_start_conv(struct adc12138 *adc,
+ struct iio_chan_spec const *channel)
+{
+ u8 trash;
+
+ return __adc12138_start_conv(adc, channel, &trash, 1);
+}
+
+static int adc12138_start_and_read_conv(struct adc12138 *adc,
+ struct iio_chan_spec const *channel,
+ __be16 *data)
+{
+ return __adc12138_start_conv(adc, channel, data, 2);
+}
+
+static int adc12138_read_conv_data(struct adc12138 *adc, __be16 *value)
+{
+ /* Issue a read status instruction and read previous conversion data */
+ return adc12138_mode_programming(adc, ADC12138_MODE_READ_STATUS,
+ value, sizeof(*value));
+}
+
+static int adc12138_wait_eoc(struct adc12138 *adc, unsigned long timeout)
+{
+ if (!wait_for_completion_timeout(&adc->complete, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int adc12138_adc_conversion(struct adc12138 *adc,
+ struct iio_chan_spec const *channel,
+ __be16 *value)
+{
+ int ret;
+
+ reinit_completion(&adc->complete);
+
+ ret = adc12138_start_conv(adc, channel);
+ if (ret)
+ return ret;
+
+ ret = adc12138_wait_eoc(adc, msecs_to_jiffies(100));
+ if (ret)
+ return ret;
+
+ return adc12138_read_conv_data(adc, value);
+}
+
+static int adc12138_read_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *channel, int *value,
+ int *shift, long mask)
+{
+ struct adc12138 *adc = iio_priv(iio);
+ int ret;
+ __be16 data;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+ ret = adc12138_adc_conversion(adc, channel, &data);
+ mutex_unlock(&adc->lock);
+ if (ret)
+ return ret;
+
+ *value = sign_extend32(be16_to_cpu(data) >> 3, 12);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(adc->vref_p);
+ if (ret < 0)
+ return ret;
+ *value = ret;
+
+ if (!IS_ERR(adc->vref_n)) {
+ ret = regulator_get_voltage(adc->vref_n);
+ if (ret < 0)
+ return ret;
+ *value -= ret;
+ }
+
+ /* convert regulator output voltage to mV */
+ *value /= 1000;
+ *shift = channel->scan_type.realbits - 1;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ if (!IS_ERR(adc->vref_n)) {
+ *value = regulator_get_voltage(adc->vref_n);
+ if (*value < 0)
+ return *value;
+ } else {
+ *value = 0;
+ }
+
+ /* convert regulator output voltage to mV */
+ *value /= 1000;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info adc12138_info = {
+ .read_raw = adc12138_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int adc12138_init(struct adc12138 *adc)
+{
+ int ret;
+ int status;
+ u8 mode;
+ u8 trash;
+
+ reinit_completion(&adc->complete);
+
+ ret = adc12138_mode_programming(adc, ADC12138_MODE_AUTO_CAL, &trash, 1);
+ if (ret)
+ return ret;
+
+ /* data output at this time has no significance */
+ status = adc12138_read_status(adc);
+ if (status < 0)
+ return status;
+
+ adc12138_wait_eoc(adc, msecs_to_jiffies(100));
+
+ status = adc12138_read_status(adc);
+ if (status & ADC12138_STATUS_CAL) {
+ dev_warn(&adc->spi->dev,
+ "Auto Cal sequence is still in progress: %#x\n",
+ status);
+ return -EIO;
+ }
+
+ switch (adc->acquisition_time) {
+ case 6:
+ mode = ADC12138_MODE_ACQUISITION_TIME_6;
+ break;
+ case 10:
+ mode = ADC12138_MODE_ACQUISITION_TIME_10;
+ break;
+ case 18:
+ mode = ADC12138_MODE_ACQUISITION_TIME_18;
+ break;
+ case 34:
+ mode = ADC12138_MODE_ACQUISITION_TIME_34;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return adc12138_mode_programming(adc, mode, &trash, 1);
+}
+
+static irqreturn_t adc12138_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adc12138 *adc = iio_priv(indio_dev);
+ __be16 data[20] = { }; /* 16x 2 bytes ADC data + 8 bytes timestamp */
+ __be16 trash;
+ int ret;
+ int scan_index;
+ int i = 0;
+
+ mutex_lock(&adc->lock);
+
+ for_each_set_bit(scan_index, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ const struct iio_chan_spec *scan_chan =
+ &indio_dev->channels[scan_index];
+
+ reinit_completion(&adc->complete);
+
+ ret = adc12138_start_and_read_conv(adc, scan_chan,
+ i ? &data[i - 1] : &trash);
+ if (ret) {
+ dev_warn(&adc->spi->dev,
+ "failed to start conversion\n");
+ goto out;
+ }
+
+ ret = adc12138_wait_eoc(adc, msecs_to_jiffies(100));
+ if (ret) {
+ dev_warn(&adc->spi->dev, "wait eoc timeout\n");
+ goto out;
+ }
+
+ i++;
+ }
+
+ if (i) {
+ ret = adc12138_read_conv_data(adc, &data[i - 1]);
+ if (ret) {
+ dev_warn(&adc->spi->dev,
+ "failed to get conversion data\n");
+ goto out;
+ }
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_get_time_ns(indio_dev));
+out:
+ mutex_unlock(&adc->lock);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t adc12138_eoc_handler(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct adc12138 *adc = iio_priv(indio_dev);
+
+ complete(&adc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int adc12138_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adc12138 *adc;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->spi = spi;
+ adc->id = spi_get_device_id(spi)->driver_data;
+ mutex_init(&adc->lock);
+ init_completion(&adc->complete);
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adc12138_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ switch (adc->id) {
+ case adc12130:
+ case adc12132:
+ indio_dev->channels = adc12132_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adc12132_channels);
+ break;
+ case adc12138:
+ indio_dev->channels = adc12138_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adc12138_channels);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(spi->dev.of_node, "ti,acquisition-time",
+ &adc->acquisition_time);
+ if (ret)
+ adc->acquisition_time = 10;
+
+ adc->cclk = devm_clk_get(&spi->dev, NULL);
+ if (IS_ERR(adc->cclk))
+ return PTR_ERR(adc->cclk);
+
+ adc->vref_p = devm_regulator_get(&spi->dev, "vref-p");
+ if (IS_ERR(adc->vref_p))
+ return PTR_ERR(adc->vref_p);
+
+ adc->vref_n = devm_regulator_get_optional(&spi->dev, "vref-n");
+ if (IS_ERR(adc->vref_n)) {
+ /*
+ * Assume vref_n is 0V if an optional regulator is not
+ * specified, otherwise return the error code.
+ */
+ ret = PTR_ERR(adc->vref_n);
+ if (ret != -ENODEV)
+ return ret;
+ }
+
+ ret = devm_request_irq(&spi->dev, spi->irq, adc12138_eoc_handler,
+ IRQF_TRIGGER_RISING, indio_dev->name, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adc->cclk);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(adc->vref_p);
+ if (ret)
+ goto err_clk_disable;
+
+ if (!IS_ERR(adc->vref_n)) {
+ ret = regulator_enable(adc->vref_n);
+ if (ret)
+ goto err_vref_p_disable;
+ }
+
+ ret = adc12138_init(adc);
+ if (ret)
+ goto err_vref_n_disable;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ adc12138_trigger_handler, NULL);
+ if (ret)
+ goto err_vref_n_disable;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_buffer_cleanup;
+
+ return 0;
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_vref_n_disable:
+ if (!IS_ERR(adc->vref_n))
+ regulator_disable(adc->vref_n);
+err_vref_p_disable:
+ regulator_disable(adc->vref_p);
+err_clk_disable:
+ clk_disable_unprepare(adc->cclk);
+
+ return ret;
+}
+
+static int adc12138_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adc12138 *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (!IS_ERR(adc->vref_n))
+ regulator_disable(adc->vref_n);
+ regulator_disable(adc->vref_p);
+ clk_disable_unprepare(adc->cclk);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+
+static const struct of_device_id adc12138_dt_ids[] = {
+ { .compatible = "ti,adc12130", },
+ { .compatible = "ti,adc12132", },
+ { .compatible = "ti,adc12138", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adc12138_dt_ids);
+
+#endif
+
+static const struct spi_device_id adc12138_id[] = {
+ { "adc12130", adc12130 },
+ { "adc12132", adc12132 },
+ { "adc12138", adc12138 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adc12138_id);
+
+static struct spi_driver adc12138_driver = {
+ .driver = {
+ .name = "adc12138",
+ .of_match_table = of_match_ptr(adc12138_dt_ids),
+ },
+ .probe = adc12138_probe,
+ .remove = adc12138_remove,
+ .id_table = adc12138_id,
+};
+module_spi_driver(adc12138_driver);
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_DESCRIPTION("ADC12130/ADC12132/ADC12138 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
new file mode 100644
index 000000000000..f94b69f9c288
--- /dev/null
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -0,0 +1,248 @@
+/*
+ * ti-adc161s626.c - Texas Instruments ADC161S626 1-channel differential ADC
+ *
+ * ADC Devices Supported:
+ * adc141s626 - 14-bit ADC
+ * adc161s626 - 16-bit ADC
+ *
+ * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define TI_ADC_DRV_NAME "ti-adc161s626"
+
+enum {
+ TI_ADC141S626,
+ TI_ADC161S626,
+};
+
+static const struct iio_chan_spec ti_adc141s626_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 14,
+ .storagebits = 16,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct iio_chan_spec ti_adc161s626_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+struct ti_adc_data {
+ struct iio_dev *indio_dev;
+ struct spi_device *spi;
+ u8 read_size;
+ u8 shift;
+
+ u8 buffer[16] ____cacheline_aligned;
+};
+
+static int ti_adc_read_measurement(struct ti_adc_data *data,
+ struct iio_chan_spec const *chan, int *val)
+{
+ int ret;
+
+ switch (data->read_size) {
+ case 2: {
+ __be16 buf;
+
+ ret = spi_read(data->spi, (void *) &buf, 2);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(buf);
+ break;
+ }
+ case 3: {
+ __be32 buf;
+
+ ret = spi_read(data->spi, (void *) &buf, 3);
+ if (ret)
+ return ret;
+
+ *val = be32_to_cpu(buf) >> 8;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ *val = sign_extend32(*val >> data->shift, chan->scan_type.realbits - 1);
+
+ return 0;
+}
+
+static irqreturn_t ti_adc_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ti_adc_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = ti_adc_read_measurement(data, &indio_dev->channels[0],
+ (int *) &data->buffer);
+ if (!ret)
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ data->buffer,
+ iio_get_time_ns(indio_dev));
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ti_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ti_adc_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ti_adc_read_measurement(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
+
+ if (!ret)
+ return IIO_VAL_INT;
+
+ return 0;
+}
+
+static const struct iio_info ti_adc_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = ti_adc_read_raw,
+};
+
+static int ti_adc_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct ti_adc_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->info = &ti_adc_info;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
+ indio_dev->name = TI_ADC_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ spi_set_drvdata(spi, indio_dev);
+
+ data = iio_priv(indio_dev);
+ data->spi = spi;
+
+ switch (spi_get_device_id(spi)->driver_data) {
+ case TI_ADC141S626:
+ indio_dev->channels = ti_adc141s626_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ti_adc141s626_channels);
+ data->shift = 0;
+ data->read_size = 2;
+ break;
+ case TI_ADC161S626:
+ indio_dev->channels = ti_adc161s626_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ti_adc161s626_channels);
+ data->shift = 6;
+ data->read_size = 3;
+ break;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ti_adc_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_buffer;
+
+ return 0;
+
+error_unreg_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static int ti_adc_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static const struct of_device_id ti_adc_dt_ids[] = {
+ { .compatible = "ti,adc141s626", },
+ { .compatible = "ti,adc161s626", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ti_adc_dt_ids);
+
+static const struct spi_device_id ti_adc_id[] = {
+ {"adc141s626", TI_ADC141S626},
+ {"adc161s626", TI_ADC161S626},
+ {},
+};
+MODULE_DEVICE_TABLE(spi, ti_adc_id);
+
+static struct spi_driver ti_adc_driver = {
+ .driver = {
+ .name = TI_ADC_DRV_NAME,
+ .of_match_table = of_match_ptr(ti_adc_dt_ids),
+ },
+ .probe = ti_adc_probe,
+ .remove = ti_adc_remove,
+ .id_table = ti_adc_id,
+};
+module_spi_driver(ti_adc_driver);
+
+MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_DESCRIPTION("Texas Instruments ADC1x1S 1-channel differential ADC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 066abaf80201..cde6f130a99a 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -522,6 +522,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
if (pga > 6) {
dev_err(&client->dev, "invalid gain on %s\n",
node->full_name);
+ of_node_put(node);
return -EINVAL;
}
}
@@ -532,6 +533,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
dev_err(&client->dev,
"invalid data_rate on %s\n",
node->full_name);
+ of_node_put(node);
return -EINVAL;
}
}
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index c400439900af..4a163496d9e4 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -438,7 +438,7 @@ static int ads8688_probe(struct spi_device *spi)
return 0;
error_out:
- if (!IS_ERR_OR_NULL(st->reg))
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
return ret;
@@ -451,7 +451,7 @@ static int ads8688_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
- if (!IS_ERR_OR_NULL(st->reg))
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
return 0;
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
index 323079c3ccce..b8f550e47d3d 100644
--- a/drivers/iio/buffer/industrialio-buffer-cb.c
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -18,6 +18,7 @@ struct iio_cb_buffer {
int (*cb)(const void *data, void *private);
void *private;
struct iio_channel *channels;
+ struct iio_dev *indio_dev;
};
static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
@@ -52,7 +53,6 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
{
int ret;
struct iio_cb_buffer *cb_buff;
- struct iio_dev *indio_dev;
struct iio_channel *chan;
cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
@@ -72,17 +72,17 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
goto error_free_cb_buff;
}
- indio_dev = cb_buff->channels[0].indio_dev;
+ cb_buff->indio_dev = cb_buff->channels[0].indio_dev;
cb_buff->buffer.scan_mask
- = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
- GFP_KERNEL);
+ = kcalloc(BITS_TO_LONGS(cb_buff->indio_dev->masklength),
+ sizeof(long), GFP_KERNEL);
if (cb_buff->buffer.scan_mask == NULL) {
ret = -ENOMEM;
goto error_release_channels;
}
chan = &cb_buff->channels[0];
while (chan->indio_dev) {
- if (chan->indio_dev != indio_dev) {
+ if (chan->indio_dev != cb_buff->indio_dev) {
ret = -EINVAL;
goto error_free_scan_mask;
}
@@ -105,17 +105,14 @@ EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
{
- return iio_update_buffers(cb_buff->channels[0].indio_dev,
- &cb_buff->buffer,
+ return iio_update_buffers(cb_buff->indio_dev, &cb_buff->buffer,
NULL);
}
EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
{
- iio_update_buffers(cb_buff->channels[0].indio_dev,
- NULL,
- &cb_buff->buffer);
+ iio_update_buffers(cb_buff->indio_dev, NULL, &cb_buff->buffer);
}
EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
@@ -133,6 +130,13 @@ struct iio_channel
}
EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);
+struct iio_dev
+*iio_channel_cb_get_iio_dev(const struct iio_cb_buffer *cb_buffer)
+{
+ return cb_buffer->indio_dev;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_get_iio_dev);
+
MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
MODULE_DESCRIPTION("Industrial I/O callback buffer");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c
index 4b2858ba1fd6..d3db1fce54d2 100644
--- a/drivers/iio/buffer/industrialio-triggered-buffer.c
+++ b/drivers/iio/buffer/industrialio-triggered-buffer.c
@@ -98,6 +98,48 @@ void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev)
}
EXPORT_SYMBOL(iio_triggered_buffer_cleanup);
+static void devm_iio_triggered_buffer_clean(struct device *dev, void *res)
+{
+ iio_triggered_buffer_cleanup(*(struct iio_dev **)res);
+}
+
+int devm_iio_triggered_buffer_setup(struct device *dev,
+ struct iio_dev *indio_dev,
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ const struct iio_buffer_setup_ops *ops)
+{
+ struct iio_dev **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_iio_triggered_buffer_clean, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ *ptr = indio_dev;
+
+ ret = iio_triggered_buffer_setup(indio_dev, h, thread, ops);
+ if (!ret)
+ devres_add(dev, ptr);
+ else
+ devres_free(ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_setup);
+
+void devm_iio_triggered_buffer_cleanup(struct device *dev,
+ struct iio_dev *indio_dev)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_triggered_buffer_clean,
+ devm_iio_device_match, indio_dev);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_cleanup);
+
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("IIO helper functions for setting up triggered buffers");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index 4bcc025e8c8a..cea7f9857a1f 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -16,6 +16,7 @@ config ATLAS_PH_SENSOR
Atlas Scientific OEM SM sensors:
* pH SM sensor
* EC SM sensor
+ * ORP SM sensor
To compile this driver as module, choose M here: the
module will be called atlas-ph-sensor.
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index 407f141a1eee..bd321b305a0a 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -66,12 +66,17 @@
#define ATLAS_REG_TDS_DATA 0x1c
#define ATLAS_REG_PSS_DATA 0x20
+#define ATLAS_REG_ORP_CALIB_STATUS 0x0d
+#define ATLAS_REG_ORP_DATA 0x0e
+
#define ATLAS_PH_INT_TIME_IN_US 450000
#define ATLAS_EC_INT_TIME_IN_US 650000
+#define ATLAS_ORP_INT_TIME_IN_US 450000
enum {
ATLAS_PH_SM,
ATLAS_EC_SM,
+ ATLAS_ORP_SM,
};
struct atlas_data {
@@ -84,26 +89,10 @@ struct atlas_data {
__be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */
};
-static const struct regmap_range atlas_volatile_ranges[] = {
- regmap_reg_range(ATLAS_REG_INT_CONTROL, ATLAS_REG_INT_CONTROL),
- regmap_reg_range(ATLAS_REG_PH_DATA, ATLAS_REG_PH_DATA + 4),
- regmap_reg_range(ATLAS_REG_EC_DATA, ATLAS_REG_PSS_DATA + 4),
-};
-
-static const struct regmap_access_table atlas_volatile_table = {
- .yes_ranges = atlas_volatile_ranges,
- .n_yes_ranges = ARRAY_SIZE(atlas_volatile_ranges),
-};
-
static const struct regmap_config atlas_regmap_config = {
.name = ATLAS_REGMAP_NAME,
-
.reg_bits = 8,
.val_bits = 8,
-
- .volatile_table = &atlas_volatile_table,
- .max_register = ATLAS_REG_PSS_DATA + 4,
- .cache_type = REGCACHE_RBTREE,
};
static const struct iio_chan_spec atlas_ph_channels[] = {
@@ -175,6 +164,23 @@ static const struct iio_chan_spec atlas_ec_channels[] = {
},
};
+static const struct iio_chan_spec atlas_orp_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .address = ATLAS_REG_ORP_DATA,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
static int atlas_check_ph_calibration(struct atlas_data *data)
{
struct device *dev = &data->client->dev;
@@ -240,6 +246,22 @@ static int atlas_check_ec_calibration(struct atlas_data *data)
return 0;
}
+static int atlas_check_orp_calibration(struct atlas_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, ATLAS_REG_ORP_CALIB_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ dev_warn(dev, "device has not been calibrated\n");
+
+ return 0;
+};
+
struct atlas_device {
const struct iio_chan_spec *channels;
int num_channels;
@@ -264,7 +286,13 @@ static struct atlas_device atlas_devices[] = {
.calibration = &atlas_check_ec_calibration,
.delay = ATLAS_EC_INT_TIME_IN_US,
},
-
+ [ATLAS_ORP_SM] = {
+ .channels = atlas_orp_channels,
+ .num_channels = 2,
+ .data_reg = ATLAS_REG_ORP_DATA,
+ .calibration = &atlas_check_orp_calibration,
+ .delay = ATLAS_ORP_INT_TIME_IN_US,
+ },
};
static int atlas_set_powermode(struct atlas_data *data, int on)
@@ -402,15 +430,14 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
case IIO_PH:
case IIO_CONCENTRATION:
case IIO_ELECTRICALCONDUCTIVITY:
- mutex_lock(&indio_dev->mlock);
+ case IIO_VOLTAGE:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = atlas_read_measurement(data,
- chan->address, &reg);
+ ret = atlas_read_measurement(data, chan->address, &reg);
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
break;
default:
ret = -EINVAL;
@@ -440,6 +467,10 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
*val = 0; /* 0.000000001 */
*val2 = 1000;
return IIO_VAL_INT_PLUS_NANO;
+ case IIO_VOLTAGE:
+ *val = 1; /* 0.1 */
+ *val2 = 10;
+ break;
default:
return -EINVAL;
}
@@ -475,6 +506,7 @@ static const struct iio_info atlas_info = {
static const struct i2c_device_id atlas_id[] = {
{ "atlas-ph-sm", ATLAS_PH_SM},
{ "atlas-ec-sm", ATLAS_EC_SM},
+ { "atlas-orp-sm", ATLAS_ORP_SM},
{}
};
MODULE_DEVICE_TABLE(i2c, atlas_id);
@@ -482,6 +514,7 @@ MODULE_DEVICE_TABLE(i2c, atlas_id);
static const struct of_device_id atlas_dt_ids[] = {
{ .compatible = "atlas,ph-sm", .data = (void *)ATLAS_PH_SM, },
{ .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, },
+ { .compatible = "atlas,orp-sm", .data = (void *)ATLAS_ORP_SM, },
{ }
};
MODULE_DEVICE_TABLE(of, atlas_dt_ids);
diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c
index 652649da500f..8e0e4415c161 100644
--- a/drivers/iio/chemical/vz89x.c
+++ b/drivers/iio/chemical/vz89x.c
@@ -19,25 +19,55 @@
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#define VZ89X_REG_MEASUREMENT 0x09
-#define VZ89X_REG_MEASUREMENT_SIZE 6
+#define VZ89X_REG_MEASUREMENT_RD_SIZE 6
+#define VZ89X_REG_MEASUREMENT_WR_SIZE 3
#define VZ89X_VOC_CO2_IDX 0
#define VZ89X_VOC_SHORT_IDX 1
#define VZ89X_VOC_TVOC_IDX 2
#define VZ89X_VOC_RESISTANCE_IDX 3
+#define VZ89TE_REG_MEASUREMENT 0x0c
+#define VZ89TE_REG_MEASUREMENT_RD_SIZE 7
+#define VZ89TE_REG_MEASUREMENT_WR_SIZE 6
+
+#define VZ89TE_VOC_TVOC_IDX 0
+#define VZ89TE_VOC_CO2_IDX 1
+#define VZ89TE_VOC_RESISTANCE_IDX 2
+
+enum {
+ VZ89X,
+ VZ89TE,
+};
+
+struct vz89x_chip_data;
+
struct vz89x_data {
struct i2c_client *client;
+ const struct vz89x_chip_data *chip;
struct mutex lock;
int (*xfer)(struct vz89x_data *data, u8 cmd);
+ bool is_valid;
unsigned long last_update;
- u8 buffer[VZ89X_REG_MEASUREMENT_SIZE];
+ u8 buffer[VZ89TE_REG_MEASUREMENT_RD_SIZE];
+};
+
+struct vz89x_chip_data {
+ bool (*valid)(struct vz89x_data *data);
+ const struct iio_chan_spec *channels;
+ u8 num_channels;
+
+ u8 cmd;
+ u8 read_size;
+ u8 write_size;
};
static const struct iio_chan_spec vz89x_channels[] = {
@@ -70,6 +100,40 @@ static const struct iio_chan_spec vz89x_channels[] = {
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
.address = VZ89X_VOC_RESISTANCE_IDX,
+ .scan_index = -1,
+ .scan_type = {
+ .endianness = IIO_LE,
+ },
+ },
+};
+
+static const struct iio_chan_spec vz89te_channels[] = {
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_VOC,
+ .modified = 1,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_RAW),
+ .address = VZ89TE_VOC_TVOC_IDX,
+ },
+
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_CO2,
+ .modified = 1,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_RAW),
+ .address = VZ89TE_VOC_CO2_IDX,
+ },
+ {
+ .type = IIO_RESISTANCE,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .address = VZ89TE_VOC_RESISTANCE_IDX,
+ .scan_index = -1,
+ .scan_type = {
+ .endianness = IIO_BE,
+ },
},
};
@@ -93,29 +157,45 @@ static const struct attribute_group vz89x_attrs_group = {
* always zero, and by also confirming the VOC_short isn't zero.
*/
-static int vz89x_measurement_is_valid(struct vz89x_data *data)
+static bool vz89x_measurement_is_valid(struct vz89x_data *data)
{
if (data->buffer[VZ89X_VOC_SHORT_IDX] == 0)
- return 1;
+ return true;
- return !!(data->buffer[VZ89X_REG_MEASUREMENT_SIZE - 1] > 0);
+ return !!(data->buffer[data->chip->read_size - 1] > 0);
+}
+
+/* VZ89TE device has a modified CRC-8 two complement check */
+static bool vz89te_measurement_is_valid(struct vz89x_data *data)
+{
+ u8 crc = 0;
+ int i, sum = 0;
+
+ for (i = 0; i < (data->chip->read_size - 1); i++) {
+ sum = crc + data->buffer[i];
+ crc = sum;
+ crc += sum / 256;
+ }
+
+ return !((0xff - crc) == data->buffer[data->chip->read_size - 1]);
}
static int vz89x_i2c_xfer(struct vz89x_data *data, u8 cmd)
{
+ const struct vz89x_chip_data *chip = data->chip;
struct i2c_client *client = data->client;
struct i2c_msg msg[2];
int ret;
- u8 buf[3] = { cmd, 0, 0};
+ u8 buf[6] = { cmd, 0, 0, 0, 0, 0xf3 };
msg[0].addr = client->addr;
msg[0].flags = client->flags;
- msg[0].len = 3;
+ msg[0].len = chip->write_size;
msg[0].buf = (char *) &buf;
msg[1].addr = client->addr;
msg[1].flags = client->flags | I2C_M_RD;
- msg[1].len = VZ89X_REG_MEASUREMENT_SIZE;
+ msg[1].len = chip->read_size;
msg[1].buf = (char *) &data->buffer;
ret = i2c_transfer(client->adapter, msg, 2);
@@ -133,7 +213,7 @@ static int vz89x_smbus_xfer(struct vz89x_data *data, u8 cmd)
if (ret < 0)
return ret;
- for (i = 0; i < VZ89X_REG_MEASUREMENT_SIZE; i++) {
+ for (i = 0; i < data->chip->read_size; i++) {
ret = i2c_smbus_read_byte(client);
if (ret < 0)
return ret;
@@ -145,30 +225,47 @@ static int vz89x_smbus_xfer(struct vz89x_data *data, u8 cmd)
static int vz89x_get_measurement(struct vz89x_data *data)
{
+ const struct vz89x_chip_data *chip = data->chip;
int ret;
/* sensor can only be polled once a second max per datasheet */
if (!time_after(jiffies, data->last_update + HZ))
- return 0;
+ return data->is_valid ? 0 : -EAGAIN;
+
+ data->is_valid = false;
+ data->last_update = jiffies;
- ret = data->xfer(data, VZ89X_REG_MEASUREMENT);
+ ret = data->xfer(data, chip->cmd);
if (ret < 0)
return ret;
- ret = vz89x_measurement_is_valid(data);
+ ret = chip->valid(data);
if (ret)
return -EAGAIN;
- data->last_update = jiffies;
+ data->is_valid = true;
return 0;
}
-static int vz89x_get_resistance_reading(struct vz89x_data *data)
+static int vz89x_get_resistance_reading(struct vz89x_data *data,
+ struct iio_chan_spec const *chan,
+ int *val)
{
- u8 *buf = &data->buffer[VZ89X_VOC_RESISTANCE_IDX];
+ u8 *tmp = (u8 *) &data->buffer[chan->address];
- return buf[0] | (buf[1] << 8);
+ switch (chan->scan_type.endianness) {
+ case IIO_LE:
+ *val = le32_to_cpup((__le32 *) tmp) & GENMASK(23, 0);
+ break;
+ case IIO_BE:
+ *val = be32_to_cpup((__be32 *) tmp) >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
static int vz89x_read_raw(struct iio_dev *indio_dev,
@@ -187,15 +284,15 @@ static int vz89x_read_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- switch (chan->address) {
- case VZ89X_VOC_CO2_IDX:
- case VZ89X_VOC_SHORT_IDX:
- case VZ89X_VOC_TVOC_IDX:
+ switch (chan->type) {
+ case IIO_CONCENTRATION:
*val = data->buffer[chan->address];
return IIO_VAL_INT;
- case VZ89X_VOC_RESISTANCE_IDX:
- *val = vz89x_get_resistance_reading(data);
- return IIO_VAL_INT;
+ case IIO_RESISTANCE:
+ ret = vz89x_get_resistance_reading(data, chan, val);
+ if (!ret)
+ return IIO_VAL_INT;
+ break;
default:
return -EINVAL;
}
@@ -210,12 +307,12 @@ static int vz89x_read_raw(struct iio_dev *indio_dev,
}
break;
case IIO_CHAN_INFO_OFFSET:
- switch (chan->address) {
- case VZ89X_VOC_CO2_IDX:
+ switch (chan->channel2) {
+ case IIO_MOD_CO2:
*val = 44;
*val2 = 250000;
return IIO_VAL_INT_PLUS_MICRO;
- case VZ89X_VOC_TVOC_IDX:
+ case IIO_MOD_VOC:
*val = -13;
return IIO_VAL_INT;
default:
@@ -232,11 +329,43 @@ static const struct iio_info vz89x_info = {
.driver_module = THIS_MODULE,
};
+static const struct vz89x_chip_data vz89x_chips[] = {
+ {
+ .valid = vz89x_measurement_is_valid,
+
+ .cmd = VZ89X_REG_MEASUREMENT,
+ .read_size = VZ89X_REG_MEASUREMENT_RD_SIZE,
+ .write_size = VZ89X_REG_MEASUREMENT_WR_SIZE,
+
+ .channels = vz89x_channels,
+ .num_channels = ARRAY_SIZE(vz89x_channels),
+ },
+ {
+ .valid = vz89te_measurement_is_valid,
+
+ .cmd = VZ89TE_REG_MEASUREMENT,
+ .read_size = VZ89TE_REG_MEASUREMENT_RD_SIZE,
+ .write_size = VZ89TE_REG_MEASUREMENT_WR_SIZE,
+
+ .channels = vz89te_channels,
+ .num_channels = ARRAY_SIZE(vz89te_channels),
+ },
+};
+
+static const struct of_device_id vz89x_dt_ids[] = {
+ { .compatible = "sgx,vz89x", .data = (void *) VZ89X },
+ { .compatible = "sgx,vz89te", .data = (void *) VZ89TE },
+ { }
+};
+MODULE_DEVICE_TABLE(of, vz89x_dt_ids);
+
static int vz89x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct iio_dev *indio_dev;
struct vz89x_data *data;
+ const struct of_device_id *of_id;
+ int chip_id;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -251,8 +380,15 @@ static int vz89x_probe(struct i2c_client *client,
else
return -EOPNOTSUPP;
+ of_id = of_match_device(vz89x_dt_ids, &client->dev);
+ if (!of_id)
+ chip_id = id->driver_data;
+ else
+ chip_id = (unsigned long)of_id->data;
+
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ data->chip = &vz89x_chips[chip_id];
data->last_update = jiffies - HZ;
mutex_init(&data->lock);
@@ -261,24 +397,19 @@ static int vz89x_probe(struct i2c_client *client,
indio_dev->name = dev_name(&client->dev);
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = vz89x_channels;
- indio_dev->num_channels = ARRAY_SIZE(vz89x_channels);
+ indio_dev->channels = data->chip->channels;
+ indio_dev->num_channels = data->chip->num_channels;
return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id vz89x_id[] = {
- { "vz89x", 0 },
+ { "vz89x", VZ89X },
+ { "vz89te", VZ89TE },
{ }
};
MODULE_DEVICE_TABLE(i2c, vz89x_id);
-static const struct of_device_id vz89x_dt_ids[] = {
- { .compatible = "sgx,vz89x" },
- { }
-};
-MODULE_DEVICE_TABLE(of, vz89x_dt_ids);
-
static struct i2c_driver vz89x_driver = {
.driver = {
.name = "vz89x",
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 5b41f9d0d4f3..a3cce3a38300 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -122,6 +122,14 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
#endif
}
+static void hid_sensor_set_power_work(struct work_struct *work)
+{
+ struct hid_sensor_common *attrb = container_of(work,
+ struct hid_sensor_common,
+ work);
+ _hid_sensor_power_state(attrb, true);
+}
+
static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
@@ -130,6 +138,7 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
{
+ cancel_work_sync(&attrb->work);
iio_trigger_unregister(attrb->trigger);
iio_trigger_free(attrb->trigger);
}
@@ -170,6 +179,9 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
goto error_unreg_trigger;
iio_device_set_drvdata(indio_dev, attrb);
+
+ INIT_WORK(&attrb->work, hid_sensor_set_power_work);
+
pm_suspend_ignore_children(&attrb->pdev->dev, true);
pm_runtime_enable(&attrb->pdev->dev);
/* Default to 3 seconds, but can be changed from sysfs */
@@ -187,8 +199,7 @@ error_ret:
}
EXPORT_SYMBOL(hid_sensor_setup_trigger);
-#ifdef CONFIG_PM
-static int hid_sensor_suspend(struct device *dev)
+static int __maybe_unused hid_sensor_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
@@ -197,21 +208,27 @@ static int hid_sensor_suspend(struct device *dev)
return _hid_sensor_power_state(attrb, false);
}
-static int hid_sensor_resume(struct device *dev)
+static int __maybe_unused hid_sensor_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct hid_sensor_common *attrb = iio_device_get_drvdata(indio_dev);
+ schedule_work(&attrb->work);
+ return 0;
+}
+static int __maybe_unused hid_sensor_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct hid_sensor_common *attrb = iio_device_get_drvdata(indio_dev);
return _hid_sensor_power_state(attrb, true);
}
-#endif
-
const struct dev_pm_ops hid_sensor_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(hid_sensor_suspend, hid_sensor_resume)
SET_RUNTIME_PM_OPS(hid_sensor_suspend,
- hid_sensor_resume, NULL)
+ hid_sensor_runtime_resume, NULL)
};
EXPORT_SYMBOL(hid_sensor_pm_ops);
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index d06e728cea37..fe7775bb3740 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -63,7 +63,7 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
* the hardware trigger) and the hw_timestamp may get updated.
* By storing it in a local variable first, we are safe.
*/
- if (sdata->hw_irq_trigger)
+ if (iio_trigger_using_own(indio_dev))
timestamp = sdata->hw_timestamp;
else
timestamp = iio_get_time_ns(indio_dev);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 2d5282e05482..285a64a589d7 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -234,39 +234,35 @@ int st_sensors_power_enable(struct iio_dev *indio_dev)
int err;
/* Regulators not mandatory, but if requested we should enable them. */
- pdata->vdd = devm_regulator_get_optional(indio_dev->dev.parent, "vdd");
- if (!IS_ERR(pdata->vdd)) {
- err = regulator_enable(pdata->vdd);
- if (err != 0) {
- dev_warn(&indio_dev->dev,
- "Failed to enable specified Vdd supply\n");
- return err;
- }
- } else {
- err = PTR_ERR(pdata->vdd);
- if (err != -ENODEV)
- return err;
+ pdata->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd");
+ if (IS_ERR(pdata->vdd)) {
+ dev_err(&indio_dev->dev, "unable to get Vdd supply\n");
+ return PTR_ERR(pdata->vdd);
+ }
+ err = regulator_enable(pdata->vdd);
+ if (err != 0) {
+ dev_warn(&indio_dev->dev,
+ "Failed to enable specified Vdd supply\n");
+ return err;
}
- pdata->vdd_io = devm_regulator_get_optional(indio_dev->dev.parent, "vddio");
- if (!IS_ERR(pdata->vdd_io)) {
- err = regulator_enable(pdata->vdd_io);
- if (err != 0) {
- dev_warn(&indio_dev->dev,
- "Failed to enable specified Vdd_IO supply\n");
- goto st_sensors_disable_vdd;
- }
- } else {
+ pdata->vdd_io = devm_regulator_get(indio_dev->dev.parent, "vddio");
+ if (IS_ERR(pdata->vdd_io)) {
+ dev_err(&indio_dev->dev, "unable to get Vdd_IO supply\n");
err = PTR_ERR(pdata->vdd_io);
- if (err != -ENODEV)
- goto st_sensors_disable_vdd;
+ goto st_sensors_disable_vdd;
+ }
+ err = regulator_enable(pdata->vdd_io);
+ if (err != 0) {
+ dev_warn(&indio_dev->dev,
+ "Failed to enable specified Vdd_IO supply\n");
+ goto st_sensors_disable_vdd;
}
return 0;
st_sensors_disable_vdd:
- if (!IS_ERR_OR_NULL(pdata->vdd))
- regulator_disable(pdata->vdd);
+ regulator_disable(pdata->vdd);
return err;
}
EXPORT_SYMBOL(st_sensors_power_enable);
@@ -275,11 +271,8 @@ void st_sensors_power_disable(struct iio_dev *indio_dev)
{
struct st_sensor_data *pdata = iio_priv(indio_dev);
- if (!IS_ERR_OR_NULL(pdata->vdd))
- regulator_disable(pdata->vdd);
-
- if (!IS_ERR_OR_NULL(pdata->vdd_io))
- regulator_disable(pdata->vdd_io);
+ regulator_disable(pdata->vdd);
+ regulator_disable(pdata->vdd_io);
}
EXPORT_SYMBOL(st_sensors_power_disable);
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index e66f12ee8a55..fa73e6795359 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -66,7 +66,7 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
* @irq: irq number
* @p: private handler data
*/
-irqreturn_t st_sensors_irq_handler(int irq, void *p)
+static irqreturn_t st_sensors_irq_handler(int irq, void *p)
{
struct iio_trigger *trig = p;
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
@@ -82,7 +82,7 @@ irqreturn_t st_sensors_irq_handler(int irq, void *p)
* @irq: irq number
* @p: private handler data
*/
-irqreturn_t st_sensors_irq_thread(int irq, void *p)
+static irqreturn_t st_sensors_irq_thread(int irq, void *p)
{
struct iio_trigger *trig = p;
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index ca814479fadf..120b24478469 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -181,6 +181,25 @@ config AD7303
To compile this driver as module choose M here: the module will be called
ad7303.
+config CIO_DAC
+ tristate "Measurement Computing CIO-DAC IIO driver"
+ depends on X86 && ISA_BUS_API
+ help
+ Say yes here to build support for the Measurement Computing CIO-DAC
+ analog output device family (CIO-DAC16, CIO-DAC08, PC104-DAC06). The
+ base port addresses for the devices may be configured via the base
+ array module parameter.
+
+config AD8801
+ tristate "Analog Devices AD8801/AD8803 DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD8801, AD8803 Digital to
+ Analog Converters (DAC).
+
+ To compile this driver as a module choose M here: the module will be called
+ ad8801.
+
config LPC18XX_DAC
tristate "NXP LPC18xx DAC driver"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -245,16 +264,6 @@ config MCP4922
To compile this driver as a module, choose M here: the module
will be called mcp4922.
-config STX104
- tristate "Apex Embedded Systems STX104 DAC driver"
- depends on X86 && ISA_BUS_API
- select GPIOLIB
- help
- Say yes here to build support for the 2-channel DAC and GPIO on the
- Apex Embedded Systems STX104 integrated analog PC/104 card. The base
- port addresses for the devices may be configured via the base array
- module parameter.
-
config VF610_DAC
tristate "Vybrid vf610 DAC driver"
depends on OF
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 8b78d5ca9b11..27642bbf75f2 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -20,11 +20,12 @@ obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_AD7303) += ad7303.o
+obj-$(CONFIG_AD8801) += ad8801.o
+obj-$(CONFIG_CIO_DAC) += cio-dac.o
obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
obj-$(CONFIG_M62332) += m62332.o
obj-$(CONFIG_MAX517) += max517.o
obj-$(CONFIG_MAX5821) += max5821.o
obj-$(CONFIG_MCP4725) += mcp4725.o
obj-$(CONFIG_MCP4922) += mcp4922.o
-obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_VF610_DAC) += vf610_dac.o
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 0fde593ec0d9..5f7968232564 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -655,7 +655,7 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
devnr = 0;
for_each_child_of_node(np, pp) {
- if (devnr > AD5755_NUM_CHANNELS) {
+ if (devnr >= AD5755_NUM_CHANNELS) {
dev_err(dev,
"There is to many channels defined in DT\n");
goto error_out;
diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c
new file mode 100644
index 000000000000..f06faa1aec09
--- /dev/null
+++ b/drivers/iio/dac/ad8801.c
@@ -0,0 +1,239 @@
+/*
+ * IIO DAC driver for Analog Devices AD8801 DAC
+ *
+ * Copyright (C) 2016 Gwenhael Goavec-Merou
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+
+#define AD8801_CFG_ADDR_OFFSET 8
+
+enum ad8801_device_ids {
+ ID_AD8801,
+ ID_AD8803,
+};
+
+struct ad8801_state {
+ struct spi_device *spi;
+ unsigned char dac_cache[8]; /* Value write on each channel */
+ unsigned int vrefh_mv;
+ unsigned int vrefl_mv;
+ struct regulator *vrefh_reg;
+ struct regulator *vrefl_reg;
+
+ __be16 data ____cacheline_aligned;
+};
+
+static int ad8801_spi_write(struct ad8801_state *state,
+ u8 channel, unsigned char value)
+{
+ state->data = cpu_to_be16((channel << AD8801_CFG_ADDR_OFFSET) | value);
+ return spi_write(state->spi, &state->data, sizeof(state->data));
+}
+
+static int ad8801_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct ad8801_state *state = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val >= 256 || val < 0)
+ return -EINVAL;
+
+ ret = ad8801_spi_write(state, chan->channel, val);
+ if (ret == 0)
+ state->dac_cache[chan->channel] = val;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ad8801_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad8801_state *state = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ *val = state->dac_cache[chan->channel];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = state->vrefh_mv - state->vrefl_mv;
+ *val2 = 8;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = state->vrefl_mv;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad8801_info = {
+ .read_raw = ad8801_read_raw,
+ .write_raw = ad8801_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define AD8801_CHANNEL(chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+}
+
+static const struct iio_chan_spec ad8801_channels[] = {
+ AD8801_CHANNEL(0),
+ AD8801_CHANNEL(1),
+ AD8801_CHANNEL(2),
+ AD8801_CHANNEL(3),
+ AD8801_CHANNEL(4),
+ AD8801_CHANNEL(5),
+ AD8801_CHANNEL(6),
+ AD8801_CHANNEL(7),
+};
+
+static int ad8801_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct ad8801_state *state;
+ const struct spi_device_id *id;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*state));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ state = iio_priv(indio_dev);
+ state->spi = spi;
+ id = spi_get_device_id(spi);
+
+ state->vrefh_reg = devm_regulator_get(&spi->dev, "vrefh");
+ if (IS_ERR(state->vrefh_reg)) {
+ dev_err(&spi->dev, "Vrefh regulator not specified\n");
+ return PTR_ERR(state->vrefh_reg);
+ }
+
+ ret = regulator_enable(state->vrefh_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable vrefh regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regulator_get_voltage(state->vrefh_reg);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to read vrefh regulator: %d\n",
+ ret);
+ goto error_disable_vrefh_reg;
+ }
+ state->vrefh_mv = ret / 1000;
+
+ if (id->driver_data == ID_AD8803) {
+ state->vrefl_reg = devm_regulator_get(&spi->dev, "vrefl");
+ if (IS_ERR(state->vrefl_reg)) {
+ dev_err(&spi->dev, "Vrefl regulator not specified\n");
+ ret = PTR_ERR(state->vrefl_reg);
+ goto error_disable_vrefh_reg;
+ }
+
+ ret = regulator_enable(state->vrefl_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable vrefl regulator: %d\n",
+ ret);
+ goto error_disable_vrefh_reg;
+ }
+
+ ret = regulator_get_voltage(state->vrefl_reg);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to read vrefl regulator: %d\n",
+ ret);
+ goto error_disable_vrefl_reg;
+ }
+ state->vrefl_mv = ret / 1000;
+ } else {
+ state->vrefl_mv = 0;
+ state->vrefl_reg = NULL;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ad8801_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad8801_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad8801_channels);
+ indio_dev->name = id->name;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register iio device: %d\n",
+ ret);
+ goto error_disable_vrefl_reg;
+ }
+
+ return 0;
+
+error_disable_vrefl_reg:
+ if (state->vrefl_reg)
+ regulator_disable(state->vrefl_reg);
+error_disable_vrefh_reg:
+ regulator_disable(state->vrefh_reg);
+ return ret;
+}
+
+static int ad8801_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad8801_state *state = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (state->vrefl_reg)
+ regulator_disable(state->vrefl_reg);
+ regulator_disable(state->vrefh_reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad8801_ids[] = {
+ {"ad8801", ID_AD8801},
+ {"ad8803", ID_AD8803},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad8801_ids);
+
+static struct spi_driver ad8801_driver = {
+ .driver = {
+ .name = "ad8801",
+ },
+ .probe = ad8801_probe,
+ .remove = ad8801_remove,
+ .id_table = ad8801_ids,
+};
+module_spi_driver(ad8801_driver);
+
+MODULE_AUTHOR("Gwenhael Goavec-Merou <gwenhael.goavec-merou@trabucayre.com>");
+MODULE_DESCRIPTION("Analog Devices AD8801/AD8803 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c
new file mode 100644
index 000000000000..5a743e2a779d
--- /dev/null
+++ b/drivers/iio/dac/cio-dac.c
@@ -0,0 +1,144 @@
+/*
+ * IIO driver for the Measurement Computing CIO-DAC
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * This driver supports the following Measurement Computing devices: CIO-DAC16,
+ * CIO-DAC06, and PC104-DAC06.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#define CIO_DAC_NUM_CHAN 16
+
+#define CIO_DAC_CHAN(chan) { \
+ .type = IIO_VOLTAGE, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .indexed = 1, \
+ .output = 1 \
+}
+
+#define CIO_DAC_EXTENT 32
+
+static unsigned int base[max_num_isa_dev(CIO_DAC_EXTENT)];
+static unsigned int num_cio_dac;
+module_param_array(base, uint, &num_cio_dac, 0);
+MODULE_PARM_DESC(base, "Measurement Computing CIO-DAC base addresses");
+
+/**
+ * struct cio_dac_iio - IIO device private data structure
+ * @chan_out_states: channels' output states
+ * @base: base port address of the IIO device
+ */
+struct cio_dac_iio {
+ int chan_out_states[CIO_DAC_NUM_CHAN];
+ unsigned int base;
+};
+
+static int cio_dac_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct cio_dac_iio *const priv = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ *val = priv->chan_out_states[chan->channel];
+
+ return IIO_VAL_INT;
+}
+
+static int cio_dac_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct cio_dac_iio *const priv = iio_priv(indio_dev);
+ const unsigned int chan_addr_offset = 2 * chan->channel;
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ /* DAC can only accept up to a 16-bit value */
+ if ((unsigned int)val > 65535)
+ return -EINVAL;
+
+ priv->chan_out_states[chan->channel] = val;
+ outw(val, priv->base + chan_addr_offset);
+
+ return 0;
+}
+
+static const struct iio_info cio_dac_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = cio_dac_read_raw,
+ .write_raw = cio_dac_write_raw
+};
+
+static const struct iio_chan_spec cio_dac_channels[CIO_DAC_NUM_CHAN] = {
+ CIO_DAC_CHAN(0), CIO_DAC_CHAN(1), CIO_DAC_CHAN(2), CIO_DAC_CHAN(3),
+ CIO_DAC_CHAN(4), CIO_DAC_CHAN(5), CIO_DAC_CHAN(6), CIO_DAC_CHAN(7),
+ CIO_DAC_CHAN(8), CIO_DAC_CHAN(9), CIO_DAC_CHAN(10), CIO_DAC_CHAN(11),
+ CIO_DAC_CHAN(12), CIO_DAC_CHAN(13), CIO_DAC_CHAN(14), CIO_DAC_CHAN(15)
+};
+
+static int cio_dac_probe(struct device *dev, unsigned int id)
+{
+ struct iio_dev *indio_dev;
+ struct cio_dac_iio *priv;
+ unsigned int i;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ if (!devm_request_region(dev, base[id], CIO_DAC_EXTENT,
+ dev_name(dev))) {
+ dev_err(dev, "Unable to request port addresses (0x%X-0x%X)\n",
+ base[id], base[id] + CIO_DAC_EXTENT);
+ return -EBUSY;
+ }
+
+ indio_dev->info = &cio_dac_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = cio_dac_channels;
+ indio_dev->num_channels = CIO_DAC_NUM_CHAN;
+ indio_dev->name = dev_name(dev);
+
+ priv = iio_priv(indio_dev);
+ priv->base = base[id];
+
+ /* initialize DAC outputs to 0V */
+ for (i = 0; i < 32; i += 2)
+ outw(0, base[id] + i);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct isa_driver cio_dac_driver = {
+ .probe = cio_dac_probe,
+ .driver = {
+ .name = "cio-dac"
+ }
+};
+
+module_isa_driver(cio_dac_driver, num_cio_dac);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("Measurement Computing CIO-DAC IIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/ssp_gyro_sensor.c b/drivers/iio/gyro/ssp_gyro_sensor.c
index 0a8afdd21728..1f25f406c545 100644
--- a/drivers/iio/gyro/ssp_gyro_sensor.c
+++ b/drivers/iio/gyro/ssp_gyro_sensor.c
@@ -74,7 +74,7 @@ static int ssp_gyro_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static struct iio_info ssp_gyro_iio_info = {
+static const struct iio_info ssp_gyro_iio_info = {
.read_raw = &ssp_gyro_read_raw,
.write_raw = &ssp_gyro_write_raw,
};
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index d04124345992..b17e2e2bd4f5 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -28,11 +28,11 @@ config HDC100X
tristate "TI HDC100x relative humidity and temperature sensor"
depends on I2C
help
- Say yes here to build support for the TI HDC100x series of
- relative humidity and temperature sensors.
+ Say yes here to build support for the Texas Instruments
+ HDC1000 and HDC1008 relative humidity and temperature sensors.
- To compile this driver as a module, choose M here: the module
- will be called hdc100x.
+ To compile this driver as a module, choose M here: the module
+ will be called hdc100x.
config HTU21
tristate "Measurement Specialties HTU21 humidity & temperature sensor"
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index d2b889918c3e..fc340ed3dca1 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1308,7 +1308,7 @@ static void devm_iio_device_release(struct device *dev, void *res)
iio_device_free(*(struct iio_dev **)res);
}
-static int devm_iio_device_match(struct device *dev, void *res, void *data)
+int devm_iio_device_match(struct device *dev, void *res, void *data)
{
struct iio_dev **r = res;
if (!r || !*r) {
@@ -1317,6 +1317,7 @@ static int devm_iio_device_match(struct device *dev, void *res, void *data)
}
return *r == data;
}
+EXPORT_SYMBOL_GPL(devm_iio_device_match);
/**
* devm_iio_device_alloc - Resource-managed iio_device_alloc()
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 0ebfc923a997..90fac8ec63c9 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -57,6 +57,11 @@ bool iio_event_enabled(const struct iio_event_interface *ev_int)
*
* Note: The caller must make sure that this function is not running
* concurrently for the same indio_dev more than once.
+ *
+ * This function may be safely used as soon as a valid reference to iio_dev has
+ * been obtained via iio_device_alloc(), but any events that are submitted
+ * before iio_device_register() has successfully completed will be silently
+ * discarded.
**/
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
{
@@ -64,6 +69,9 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
struct iio_event_data ev;
int copied;
+ if (!ev_int)
+ return 0;
+
/* Does anyone care? */
if (iio_event_enabled(ev_int)) {
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 7ad82fdd3e5b..e1e104845e38 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -119,6 +119,22 @@ void iio_trigger_unregister(struct iio_trigger *trig_info)
}
EXPORT_SYMBOL(iio_trigger_unregister);
+int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
+{
+ if (!indio_dev || !trig)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ WARN_ON(indio_dev->trig_readonly);
+
+ indio_dev->trig = iio_trigger_get(trig);
+ indio_dev->trig_readonly = true;
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_trigger_set_immutable);
+
/* Search for trigger by name, assuming iio_trigger_list_lock held */
static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
{
@@ -255,6 +271,14 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
goto out_free_irq;
}
+ /*
+ * Check if we just registered to our own trigger: we determine that
+ * this is the case if the IIO device and the trigger device share the
+ * same parent device.
+ */
+ if (pf->indio_dev->dev.parent == trig->dev.parent)
+ trig->attached_own_device = true;
+
return ret;
out_free_irq:
@@ -279,6 +303,8 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
if (ret)
return ret;
}
+ if (pf->indio_dev->dev.parent == trig->dev.parent)
+ trig->attached_own_device = false;
iio_trigger_put_irq(trig, pf->irq);
free_irq(pf->irq, pf);
module_put(pf->indio_dev->info->driver_module);
@@ -384,6 +410,10 @@ static ssize_t iio_trigger_write_current(struct device *dev,
mutex_unlock(&indio_dev->mlock);
return -EBUSY;
}
+ if (indio_dev->trig_readonly) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EPERM;
+ }
mutex_unlock(&indio_dev->mlock);
trig = iio_trigger_find_by_name(buf, len);
@@ -622,6 +652,71 @@ void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
}
EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
+static void devm_iio_trigger_unreg(struct device *dev, void *res)
+{
+ iio_trigger_unregister(*(struct iio_trigger **)res);
+}
+
+/**
+ * devm_iio_trigger_register - Resource-managed iio_trigger_register()
+ * @dev: device this trigger was allocated for
+ * @trig_info: trigger to register
+ *
+ * Managed iio_trigger_register(). The IIO trigger registered with this
+ * function is automatically unregistered on driver detach. This function
+ * calls iio_trigger_register() internally. Refer to that function for more
+ * information.
+ *
+ * If an iio_trigger registered with this function needs to be unregistered
+ * separately, devm_iio_trigger_unregister() must be used.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_iio_trigger_register(struct device *dev, struct iio_trigger *trig_info)
+{
+ struct iio_trigger **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_iio_trigger_unreg, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ *ptr = trig_info;
+ ret = iio_trigger_register(trig_info);
+ if (!ret)
+ devres_add(dev, ptr);
+ else
+ devres_free(ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
+
+/**
+ * devm_iio_trigger_unregister - Resource-managed iio_trigger_unregister()
+ * @dev: device this iio_trigger belongs to
+ * @trig_info: the trigger associated with the device
+ *
+ * Unregister trigger registered with devm_iio_trigger_register().
+ */
+void devm_iio_trigger_unregister(struct device *dev,
+ struct iio_trigger *trig_info)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_trigger_unreg, devm_iio_trigger_match,
+ trig_info);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_trigger_unregister);
+
+bool iio_trigger_using_own(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig->attached_own_device;
+}
+EXPORT_SYMBOL(iio_trigger_using_own);
+
void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
indio_dev->groups[indio_dev->groupcounter++] =
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 3574945183fe..ba2e64d7ee58 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -267,6 +267,19 @@ config PA12203001
This driver can also be built as a module. If so, the module
will be called pa12203001.
+config SI1145
+ tristate "SI1132 and SI1141/2/3/5/6/7 combined ALS, UV index and proximity sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here if you want to build a driver for the Silicon Labs SI1132 or
+ SI1141/2/3/5/6/7 combined ambient light, UV index and proximity sensor
+ chips.
+
+ To compile this driver as a module, choose M here: the module will be
+ called si1145.
+
config STK3310
tristate "STK3310 ALS and proximity sensor"
depends on I2C
@@ -334,11 +347,11 @@ config US5182D
will be called us5182d.
config VCNL4000
- tristate "VCNL4000 combined ALS and proximity sensor"
+ tristate "VCNL4000/4010/4020 combined ALS and proximity sensor"
depends on I2C
help
- Say Y here if you want to build a driver for the Vishay VCNL4000
- combined ambient light and proximity sensor.
+ Say Y here if you want to build a driver for the Vishay VCNL4000,
+ VCNL4010, VCNL4020 combined ambient light and proximity sensor.
To compile this driver as a module, choose M here: the
module will be called vcnl4000.
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 6f2a3c62de27..c5768df87a17 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_OPT3001) += opt3001.o
obj-$(CONFIG_PA12203001) += pa12203001.o
obj-$(CONFIG_RPR0521) += rpr0521.o
obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
+obj-$(CONFIG_SI1145) += si1145.o
obj-$(CONFIG_STK3310) += stk3310.o
obj-$(CONFIG_TCS3414) += tcs3414.o
obj-$(CONFIG_TCS3472) += tcs3472.o
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
new file mode 100644
index 000000000000..096034c126a4
--- /dev/null
+++ b/drivers/iio/light/si1145.c
@@ -0,0 +1,1404 @@
+/*
+ * si1145.c - Support for Silabs SI1132 and SI1141/2/3/5/6/7 combined ambient
+ * light, UV index and proximity sensors
+ *
+ * Copyright 2014-16 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+ * Copyright 2016 Crestez Dan Leonard <leonard.crestez@intel.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * SI1132 (7-bit I2C slave address 0x60)
+ * SI1141/2/3 (7-bit I2C slave address 0x5a)
+ * SI1145/6/6 (7-bit I2C slave address 0x60)
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/buffer.h>
+#include <linux/util_macros.h>
+
+#define SI1145_REG_PART_ID 0x00
+#define SI1145_REG_REV_ID 0x01
+#define SI1145_REG_SEQ_ID 0x02
+#define SI1145_REG_INT_CFG 0x03
+#define SI1145_REG_IRQ_ENABLE 0x04
+#define SI1145_REG_IRQ_MODE 0x05
+#define SI1145_REG_HW_KEY 0x07
+#define SI1145_REG_MEAS_RATE 0x08
+#define SI1145_REG_PS_LED21 0x0f
+#define SI1145_REG_PS_LED3 0x10
+#define SI1145_REG_UCOEF1 0x13
+#define SI1145_REG_UCOEF2 0x14
+#define SI1145_REG_UCOEF3 0x15
+#define SI1145_REG_UCOEF4 0x16
+#define SI1145_REG_PARAM_WR 0x17
+#define SI1145_REG_COMMAND 0x18
+#define SI1145_REG_RESPONSE 0x20
+#define SI1145_REG_IRQ_STATUS 0x21
+#define SI1145_REG_ALSVIS_DATA 0x22
+#define SI1145_REG_ALSIR_DATA 0x24
+#define SI1145_REG_PS1_DATA 0x26
+#define SI1145_REG_PS2_DATA 0x28
+#define SI1145_REG_PS3_DATA 0x2a
+#define SI1145_REG_AUX_DATA 0x2c
+#define SI1145_REG_PARAM_RD 0x2e
+#define SI1145_REG_CHIP_STAT 0x30
+
+#define SI1145_UCOEF1_DEFAULT 0x7b
+#define SI1145_UCOEF2_DEFAULT 0x6b
+#define SI1145_UCOEF3_DEFAULT 0x01
+#define SI1145_UCOEF4_DEFAULT 0x00
+
+/* Helper to figure out PS_LED register / shift per channel */
+#define SI1145_PS_LED_REG(ch) \
+ (((ch) == 2) ? SI1145_REG_PS_LED3 : SI1145_REG_PS_LED21)
+#define SI1145_PS_LED_SHIFT(ch) \
+ (((ch) == 1) ? 4 : 0)
+
+/* Parameter offsets */
+#define SI1145_PARAM_CHLIST 0x01
+#define SI1145_PARAM_PSLED12_SELECT 0x02
+#define SI1145_PARAM_PSLED3_SELECT 0x03
+#define SI1145_PARAM_PS_ENCODING 0x05
+#define SI1145_PARAM_ALS_ENCODING 0x06
+#define SI1145_PARAM_PS1_ADC_MUX 0x07
+#define SI1145_PARAM_PS2_ADC_MUX 0x08
+#define SI1145_PARAM_PS3_ADC_MUX 0x09
+#define SI1145_PARAM_PS_ADC_COUNTER 0x0a
+#define SI1145_PARAM_PS_ADC_GAIN 0x0b
+#define SI1145_PARAM_PS_ADC_MISC 0x0c
+#define SI1145_PARAM_ALS_ADC_MUX 0x0d
+#define SI1145_PARAM_ALSIR_ADC_MUX 0x0e
+#define SI1145_PARAM_AUX_ADC_MUX 0x0f
+#define SI1145_PARAM_ALSVIS_ADC_COUNTER 0x10
+#define SI1145_PARAM_ALSVIS_ADC_GAIN 0x11
+#define SI1145_PARAM_ALSVIS_ADC_MISC 0x12
+#define SI1145_PARAM_LED_RECOVERY 0x1c
+#define SI1145_PARAM_ALSIR_ADC_COUNTER 0x1d
+#define SI1145_PARAM_ALSIR_ADC_GAIN 0x1e
+#define SI1145_PARAM_ALSIR_ADC_MISC 0x1f
+#define SI1145_PARAM_ADC_OFFSET 0x1a
+
+/* Channel enable masks for CHLIST parameter */
+#define SI1145_CHLIST_EN_PS1 BIT(0)
+#define SI1145_CHLIST_EN_PS2 BIT(1)
+#define SI1145_CHLIST_EN_PS3 BIT(2)
+#define SI1145_CHLIST_EN_ALSVIS BIT(4)
+#define SI1145_CHLIST_EN_ALSIR BIT(5)
+#define SI1145_CHLIST_EN_AUX BIT(6)
+#define SI1145_CHLIST_EN_UV BIT(7)
+
+/* Proximity measurement mode for ADC_MISC parameter */
+#define SI1145_PS_ADC_MODE_NORMAL BIT(2)
+/* Signal range mask for ADC_MISC parameter */
+#define SI1145_ADC_MISC_RANGE BIT(5)
+
+/* Commands for REG_COMMAND */
+#define SI1145_CMD_NOP 0x00
+#define SI1145_CMD_RESET 0x01
+#define SI1145_CMD_PS_FORCE 0x05
+#define SI1145_CMD_ALS_FORCE 0x06
+#define SI1145_CMD_PSALS_FORCE 0x07
+#define SI1145_CMD_PS_PAUSE 0x09
+#define SI1145_CMD_ALS_PAUSE 0x0a
+#define SI1145_CMD_PSALS_PAUSE 0x0b
+#define SI1145_CMD_PS_AUTO 0x0d
+#define SI1145_CMD_ALS_AUTO 0x0e
+#define SI1145_CMD_PSALS_AUTO 0x0f
+#define SI1145_CMD_PARAM_QUERY 0x80
+#define SI1145_CMD_PARAM_SET 0xa0
+
+#define SI1145_RSP_INVALID_SETTING 0x80
+#define SI1145_RSP_COUNTER_MASK 0x0F
+
+/* Minimum sleep after each command to ensure it's received */
+#define SI1145_COMMAND_MINSLEEP_MS 5
+/* Return -ETIMEDOUT after this long */
+#define SI1145_COMMAND_TIMEOUT_MS 25
+
+/* Interrupt configuration masks for INT_CFG register */
+#define SI1145_INT_CFG_OE BIT(0) /* enable interrupt */
+#define SI1145_INT_CFG_MODE BIT(1) /* auto reset interrupt pin */
+
+/* Interrupt enable masks for IRQ_ENABLE register */
+#define SI1145_MASK_ALL_IE (BIT(4) | BIT(3) | BIT(2) | BIT(0))
+
+#define SI1145_MUX_TEMP 0x65
+#define SI1145_MUX_VDD 0x75
+
+/* Proximity LED current; see Table 2 in datasheet */
+#define SI1145_LED_CURRENT_45mA 0x04
+
+enum {
+ SI1132,
+ SI1141,
+ SI1142,
+ SI1143,
+ SI1145,
+ SI1146,
+ SI1147,
+};
+
+struct si1145_part_info {
+ u8 part;
+ const struct iio_info *iio_info;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ unsigned int num_leds;
+ bool uncompressed_meas_rate;
+};
+
+/**
+ * struct si1145_data - si1145 chip state data
+ * @client: I2C client
+ * @lock: mutex to protect shared state.
+ * @cmdlock: Low-level mutex to protect command execution only
+ * @rsp_seq: Next expected response number or -1 if counter reset required
+ * @scan_mask: Saved scan mask to avoid duplicate set_chlist
+ * @autonomous: If automatic measurements are active (for buffer support)
+ * @part_info: Part information
+ * @trig: Pointer to iio trigger
+ * @meas_rate: Value of MEAS_RATE register. Only set in HW in auto mode
+ */
+struct si1145_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct mutex cmdlock;
+ int rsp_seq;
+ const struct si1145_part_info *part_info;
+ unsigned long scan_mask;
+ bool autonomous;
+ struct iio_trigger *trig;
+ int meas_rate;
+};
+
+/**
+ * __si1145_command_reset() - Send CMD_NOP and wait for response 0
+ *
+ * Does not modify data->rsp_seq
+ *
+ * Return: 0 on success and -errno on error.
+ */
+static int __si1145_command_reset(struct si1145_data *data)
+{
+ struct device *dev = &data->client->dev;
+ unsigned long stop_jiffies;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, SI1145_REG_COMMAND,
+ SI1145_CMD_NOP);
+ if (ret < 0)
+ return ret;
+ msleep(SI1145_COMMAND_MINSLEEP_MS);
+
+ stop_jiffies = jiffies + SI1145_COMMAND_TIMEOUT_MS * HZ / 1000;
+ while (true) {
+ ret = i2c_smbus_read_byte_data(data->client,
+ SI1145_REG_RESPONSE);
+ if (ret <= 0)
+ return ret;
+ if (time_after(jiffies, stop_jiffies)) {
+ dev_warn(dev, "timeout on reset\n");
+ return -ETIMEDOUT;
+ }
+ msleep(SI1145_COMMAND_MINSLEEP_MS);
+ continue;
+ }
+}
+
+/**
+ * si1145_command() - Execute a command and poll the response register
+ *
+ * All conversion overflows are reported as -EOVERFLOW
+ * INVALID_SETTING is reported as -EINVAL
+ * Timeouts are reported as -ETIMEDOUT
+ *
+ * Return: 0 on success or -errno on failure
+ */
+static int si1145_command(struct si1145_data *data, u8 cmd)
+{
+ struct device *dev = &data->client->dev;
+ unsigned long stop_jiffies;
+ int ret;
+
+ mutex_lock(&data->cmdlock);
+
+ if (data->rsp_seq < 0) {
+ ret = __si1145_command_reset(data);
+ if (ret < 0) {
+ dev_err(dev, "failed to reset command counter, ret=%d\n",
+ ret);
+ goto out;
+ }
+ data->rsp_seq = 0;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, SI1145_REG_COMMAND, cmd);
+ if (ret) {
+ dev_warn(dev, "failed to write command, ret=%d\n", ret);
+ goto out;
+ }
+ /* Sleep a little to ensure the command is received */
+ msleep(SI1145_COMMAND_MINSLEEP_MS);
+
+ stop_jiffies = jiffies + SI1145_COMMAND_TIMEOUT_MS * HZ / 1000;
+ while (true) {
+ ret = i2c_smbus_read_byte_data(data->client,
+ SI1145_REG_RESPONSE);
+ if (ret < 0) {
+ dev_warn(dev, "failed to read response, ret=%d\n", ret);
+ break;
+ }
+
+ if ((ret & ~SI1145_RSP_COUNTER_MASK) == 0) {
+ if (ret == data->rsp_seq) {
+ if (time_after(jiffies, stop_jiffies)) {
+ dev_warn(dev, "timeout on command %#02hhx\n",
+ cmd);
+ ret = -ETIMEDOUT;
+ break;
+ }
+ msleep(SI1145_COMMAND_MINSLEEP_MS);
+ continue;
+ }
+ if (ret == ((data->rsp_seq + 1) &
+ SI1145_RSP_COUNTER_MASK)) {
+ data->rsp_seq = ret;
+ ret = 0;
+ break;
+ }
+ dev_warn(dev, "unexpected response counter %d instead of %d\n",
+ ret, (data->rsp_seq + 1) &
+ SI1145_RSP_COUNTER_MASK);
+ ret = -EIO;
+ } else {
+ if (ret == SI1145_RSP_INVALID_SETTING) {
+ dev_warn(dev, "INVALID_SETTING error on command %#02hhx\n",
+ cmd);
+ ret = -EINVAL;
+ } else {
+ /* All overflows are treated identically */
+ dev_dbg(dev, "overflow, ret=%d, cmd=%#02hhx\n",
+ ret, cmd);
+ ret = -EOVERFLOW;
+ }
+ }
+
+ /* Force a counter reset next time */
+ data->rsp_seq = -1;
+ break;
+ }
+
+out:
+ mutex_unlock(&data->cmdlock);
+
+ return ret;
+}
+
+static int si1145_param_update(struct si1145_data *data, u8 op, u8 param,
+ u8 value)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ SI1145_REG_PARAM_WR, value);
+ if (ret < 0)
+ return ret;
+
+ return si1145_command(data, op | (param & 0x1F));
+}
+
+static int si1145_param_set(struct si1145_data *data, u8 param, u8 value)
+{
+ return si1145_param_update(data, SI1145_CMD_PARAM_SET, param, value);
+}
+
+/* Set param. Returns negative errno or current value */
+static int si1145_param_query(struct si1145_data *data, u8 param)
+{
+ int ret;
+
+ ret = si1145_command(data, SI1145_CMD_PARAM_QUERY | (param & 0x1F));
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_read_byte_data(data->client, SI1145_REG_PARAM_RD);
+}
+
+/* Expand 8 bit compressed value to 16 bit, see Silabs AN498 */
+static u16 si1145_uncompress(u8 x)
+{
+ u16 result = 0;
+ u8 exponent = 0;
+
+ if (x < 8)
+ return 0;
+
+ exponent = (x & 0xf0) >> 4;
+ result = 0x10 | (x & 0x0f);
+
+ if (exponent >= 4)
+ return result << (exponent - 4);
+ return result >> (4 - exponent);
+}
+
+/* Compress 16 bit value to 8 bit, see Silabs AN498 */
+static u8 si1145_compress(u16 x)
+{
+ u32 exponent = 0;
+ u32 significand = 0;
+ u32 tmp = x;
+
+ if (x == 0x0000)
+ return 0x00;
+ if (x == 0x0001)
+ return 0x08;
+
+ while (1) {
+ tmp >>= 1;
+ exponent += 1;
+ if (tmp == 1)
+ break;
+ }
+
+ if (exponent < 5) {
+ significand = x << (4 - exponent);
+ return (exponent << 4) | (significand & 0xF);
+ }
+
+ significand = x >> (exponent - 5);
+ if (significand & 1) {
+ significand += 2;
+ if (significand & 0x0040) {
+ exponent += 1;
+ significand >>= 1;
+ }
+ }
+
+ return (exponent << 4) | ((significand >> 1) & 0xF);
+}
+
+/* Write meas_rate in hardware */
+static int si1145_set_meas_rate(struct si1145_data *data, int interval)
+{
+ if (data->part_info->uncompressed_meas_rate)
+ return i2c_smbus_write_word_data(data->client,
+ SI1145_REG_MEAS_RATE, interval);
+ else
+ return i2c_smbus_write_byte_data(data->client,
+ SI1145_REG_MEAS_RATE, interval);
+}
+
+static int si1145_read_samp_freq(struct si1145_data *data, int *val, int *val2)
+{
+ *val = 32000;
+ if (data->part_info->uncompressed_meas_rate)
+ *val2 = data->meas_rate;
+ else
+ *val2 = si1145_uncompress(data->meas_rate);
+ return IIO_VAL_FRACTIONAL;
+}
+
+/* Set the samp freq in driver private data */
+static int si1145_store_samp_freq(struct si1145_data *data, int val)
+{
+ int ret = 0;
+ int meas_rate;
+
+ if (val <= 0 || val > 32000)
+ return -ERANGE;
+ meas_rate = 32000 / val;
+
+ mutex_lock(&data->lock);
+ if (data->autonomous) {
+ ret = si1145_set_meas_rate(data, meas_rate);
+ if (ret)
+ goto out;
+ }
+ if (data->part_info->uncompressed_meas_rate)
+ data->meas_rate = meas_rate;
+ else
+ data->meas_rate = si1145_compress(meas_rate);
+
+out:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static irqreturn_t si1145_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct si1145_data *data = iio_priv(indio_dev);
+ /*
+ * Maximum buffer size:
+ * 6*2 bytes channels data + 4 bytes alignment +
+ * 8 bytes timestamp
+ */
+ u8 buffer[24];
+ int i, j = 0;
+ int ret;
+ u8 irq_status = 0;
+
+ if (!data->autonomous) {
+ ret = si1145_command(data, SI1145_CMD_PSALS_FORCE);
+ if (ret < 0 && ret != -EOVERFLOW)
+ goto done;
+ } else {
+ irq_status = ret = i2c_smbus_read_byte_data(data->client,
+ SI1145_REG_IRQ_STATUS);
+ if (ret < 0)
+ goto done;
+ if (!(irq_status & SI1145_MASK_ALL_IE))
+ goto done;
+ }
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ int run = 1;
+
+ while (i + run < indio_dev->masklength) {
+ if (!test_bit(i + run, indio_dev->active_scan_mask))
+ break;
+ if (indio_dev->channels[i + run].address !=
+ indio_dev->channels[i].address + 2 * run)
+ break;
+ run++;
+ }
+
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(
+ data->client, indio_dev->channels[i].address,
+ sizeof(u16) * run, &buffer[j]);
+ if (ret < 0)
+ goto done;
+ j += run * sizeof(u16);
+ i += run - 1;
+ }
+
+ if (data->autonomous) {
+ ret = i2c_smbus_write_byte_data(data->client,
+ SI1145_REG_IRQ_STATUS,
+ irq_status & SI1145_MASK_ALL_IE);
+ if (ret < 0)
+ goto done;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_get_time_ns(indio_dev));
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int si1145_set_chlist(struct iio_dev *indio_dev, unsigned long scan_mask)
+{
+ struct si1145_data *data = iio_priv(indio_dev);
+ u8 reg = 0, mux;
+ int ret;
+ int i;
+
+ /* channel list already set, no need to reprogram */
+ if (data->scan_mask == scan_mask)
+ return 0;
+
+ for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ switch (indio_dev->channels[i].address) {
+ case SI1145_REG_ALSVIS_DATA:
+ reg |= SI1145_CHLIST_EN_ALSVIS;
+ break;
+ case SI1145_REG_ALSIR_DATA:
+ reg |= SI1145_CHLIST_EN_ALSIR;
+ break;
+ case SI1145_REG_PS1_DATA:
+ reg |= SI1145_CHLIST_EN_PS1;
+ break;
+ case SI1145_REG_PS2_DATA:
+ reg |= SI1145_CHLIST_EN_PS2;
+ break;
+ case SI1145_REG_PS3_DATA:
+ reg |= SI1145_CHLIST_EN_PS3;
+ break;
+ case SI1145_REG_AUX_DATA:
+ switch (indio_dev->channels[i].type) {
+ case IIO_UVINDEX:
+ reg |= SI1145_CHLIST_EN_UV;
+ break;
+ default:
+ reg |= SI1145_CHLIST_EN_AUX;
+ if (indio_dev->channels[i].type == IIO_TEMP)
+ mux = SI1145_MUX_TEMP;
+ else
+ mux = SI1145_MUX_VDD;
+ ret = si1145_param_set(data,
+ SI1145_PARAM_AUX_ADC_MUX, mux);
+ if (ret < 0)
+ return ret;
+
+ break;
+ }
+ }
+ }
+
+ data->scan_mask = scan_mask;
+ ret = si1145_param_set(data, SI1145_PARAM_CHLIST, reg);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int si1145_measure(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ struct si1145_data *data = iio_priv(indio_dev);
+ u8 cmd;
+ int ret;
+
+ ret = si1145_set_chlist(indio_dev, BIT(chan->scan_index));
+ if (ret < 0)
+ return ret;
+
+ cmd = (chan->type == IIO_PROXIMITY) ? SI1145_CMD_PS_FORCE :
+ SI1145_CMD_ALS_FORCE;
+ ret = si1145_command(data, cmd);
+ if (ret < 0 && ret != -EOVERFLOW)
+ return ret;
+
+ return i2c_smbus_read_word_data(data->client, chan->address);
+}
+
+/*
+ * Conversion between iio scale and ADC_GAIN values
+ * These could be further adjusted but proximity/intensity are dimensionless
+ */
+static const int si1145_proximity_scale_available[] = {
+ 128, 64, 32, 16, 8, 4};
+static const int si1145_intensity_scale_available[] = {
+ 128, 64, 32, 16, 8, 4, 2, 1};
+static IIO_CONST_ATTR(in_proximity_scale_available,
+ "128 64 32 16 8 4");
+static IIO_CONST_ATTR(in_intensity_scale_available,
+ "128 64 32 16 8 4 2 1");
+static IIO_CONST_ATTR(in_intensity_ir_scale_available,
+ "128 64 32 16 8 4 2 1");
+
+static int si1145_scale_from_adcgain(int regval)
+{
+ return 128 >> regval;
+}
+
+static int si1145_proximity_adcgain_from_scale(int val, int val2)
+{
+ val = find_closest_descending(val, si1145_proximity_scale_available,
+ ARRAY_SIZE(si1145_proximity_scale_available));
+ if (val < 0 || val > 5 || val2 != 0)
+ return -EINVAL;
+
+ return val;
+}
+
+static int si1145_intensity_adcgain_from_scale(int val, int val2)
+{
+ val = find_closest_descending(val, si1145_intensity_scale_available,
+ ARRAY_SIZE(si1145_intensity_scale_available));
+ if (val < 0 || val > 7 || val2 != 0)
+ return -EINVAL;
+
+ return val;
+}
+
+static int si1145_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct si1145_data *data = iio_priv(indio_dev);
+ int ret;
+ u8 reg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ case IIO_PROXIMITY:
+ case IIO_VOLTAGE:
+ case IIO_TEMP:
+ case IIO_UVINDEX:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ ret = si1145_measure(indio_dev, chan);
+ iio_device_release_direct_mode(indio_dev);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return IIO_VAL_INT;
+ case IIO_CURRENT:
+ ret = i2c_smbus_read_byte_data(data->client,
+ SI1145_PS_LED_REG(chan->channel));
+ if (ret < 0)
+ return ret;
+
+ *val = (ret >> SI1145_PS_LED_SHIFT(chan->channel))
+ & 0x0f;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ reg = SI1145_PARAM_PS_ADC_GAIN;
+ break;
+ case IIO_INTENSITY:
+ if (chan->channel2 == IIO_MOD_LIGHT_IR)
+ reg = SI1145_PARAM_ALSIR_ADC_GAIN;
+ else
+ reg = SI1145_PARAM_ALSVIS_ADC_GAIN;
+ break;
+ case IIO_TEMP:
+ *val = 28;
+ *val2 = 571429;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_UVINDEX:
+ *val = 0;
+ *val2 = 10000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+
+ ret = si1145_param_query(data, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = si1145_scale_from_adcgain(ret & 0x07);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /*
+ * -ADC offset - ADC counts @ 25°C -
+ * 35 * ADC counts / °C
+ */
+ *val = -256 - 11136 + 25 * 35;
+ return IIO_VAL_INT;
+ default:
+ /*
+ * All ADC measurements have are by default offset
+ * by -256
+ * See AN498 5.6.3
+ */
+ ret = si1145_param_query(data, SI1145_PARAM_ADC_OFFSET);
+ if (ret < 0)
+ return ret;
+ *val = -si1145_uncompress(ret);
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return si1145_read_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int si1145_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct si1145_data *data = iio_priv(indio_dev);
+ u8 reg1, reg2, shift;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ val = si1145_proximity_adcgain_from_scale(val, val2);
+ if (val < 0)
+ return val;
+ reg1 = SI1145_PARAM_PS_ADC_GAIN;
+ reg2 = SI1145_PARAM_PS_ADC_COUNTER;
+ break;
+ case IIO_INTENSITY:
+ val = si1145_intensity_adcgain_from_scale(val, val2);
+ if (val < 0)
+ return val;
+ if (chan->channel2 == IIO_MOD_LIGHT_IR) {
+ reg1 = SI1145_PARAM_ALSIR_ADC_GAIN;
+ reg2 = SI1145_PARAM_ALSIR_ADC_COUNTER;
+ } else {
+ reg1 = SI1145_PARAM_ALSVIS_ADC_GAIN;
+ reg2 = SI1145_PARAM_ALSVIS_ADC_COUNTER;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = si1145_param_set(data, reg1, val);
+ if (ret < 0) {
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ }
+ /* Set recovery period to one's complement of gain */
+ ret = si1145_param_set(data, reg2, (~val & 0x07) << 4);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type != IIO_CURRENT)
+ return -EINVAL;
+
+ if (val < 0 || val > 15 || val2 != 0)
+ return -EINVAL;
+
+ reg1 = SI1145_PS_LED_REG(chan->channel);
+ shift = SI1145_PS_LED_SHIFT(chan->channel);
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, reg1);
+ if (ret < 0) {
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ }
+ ret = i2c_smbus_write_byte_data(data->client, reg1,
+ (ret & ~(0x0f << shift)) |
+ ((val & 0x0f) << shift));
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return si1145_store_samp_freq(data, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define SI1145_ST { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+}
+
+#define SI1145_INTENSITY_CHANNEL(_si) { \
+ .type = IIO_INTENSITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_type = SI1145_ST, \
+ .scan_index = _si, \
+ .address = SI1145_REG_ALSVIS_DATA, \
+}
+
+#define SI1145_INTENSITY_IR_CHANNEL(_si) { \
+ .type = IIO_INTENSITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .modified = 1, \
+ .channel2 = IIO_MOD_LIGHT_IR, \
+ .scan_type = SI1145_ST, \
+ .scan_index = _si, \
+ .address = SI1145_REG_ALSIR_DATA, \
+}
+
+#define SI1145_TEMP_CHANNEL(_si) { \
+ .type = IIO_TEMP, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_type = SI1145_ST, \
+ .scan_index = _si, \
+ .address = SI1145_REG_AUX_DATA, \
+}
+
+#define SI1145_UV_CHANNEL(_si) { \
+ .type = IIO_UVINDEX, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_type = SI1145_ST, \
+ .scan_index = _si, \
+ .address = SI1145_REG_AUX_DATA, \
+}
+
+#define SI1145_PROXIMITY_CHANNEL(_si, _ch) { \
+ .type = IIO_PROXIMITY, \
+ .indexed = 1, \
+ .channel = _ch, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_type = SI1145_ST, \
+ .scan_index = _si, \
+ .address = SI1145_REG_PS1_DATA + _ch * 2, \
+}
+
+#define SI1145_VOLTAGE_CHANNEL(_si) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_type = SI1145_ST, \
+ .scan_index = _si, \
+ .address = SI1145_REG_AUX_DATA, \
+}
+
+#define SI1145_CURRENT_CHANNEL(_ch) { \
+ .type = IIO_CURRENT, \
+ .indexed = 1, \
+ .channel = _ch, \
+ .output = 1, \
+ .scan_index = -1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+}
+
+static const struct iio_chan_spec si1132_channels[] = {
+ SI1145_INTENSITY_CHANNEL(0),
+ SI1145_INTENSITY_IR_CHANNEL(1),
+ SI1145_TEMP_CHANNEL(2),
+ SI1145_VOLTAGE_CHANNEL(3),
+ SI1145_UV_CHANNEL(4),
+ IIO_CHAN_SOFT_TIMESTAMP(6),
+};
+
+static const struct iio_chan_spec si1141_channels[] = {
+ SI1145_INTENSITY_CHANNEL(0),
+ SI1145_INTENSITY_IR_CHANNEL(1),
+ SI1145_PROXIMITY_CHANNEL(2, 0),
+ SI1145_TEMP_CHANNEL(3),
+ SI1145_VOLTAGE_CHANNEL(4),
+ IIO_CHAN_SOFT_TIMESTAMP(5),
+ SI1145_CURRENT_CHANNEL(0),
+};
+
+static const struct iio_chan_spec si1142_channels[] = {
+ SI1145_INTENSITY_CHANNEL(0),
+ SI1145_INTENSITY_IR_CHANNEL(1),
+ SI1145_PROXIMITY_CHANNEL(2, 0),
+ SI1145_PROXIMITY_CHANNEL(3, 1),
+ SI1145_TEMP_CHANNEL(4),
+ SI1145_VOLTAGE_CHANNEL(5),
+ IIO_CHAN_SOFT_TIMESTAMP(6),
+ SI1145_CURRENT_CHANNEL(0),
+ SI1145_CURRENT_CHANNEL(1),
+};
+
+static const struct iio_chan_spec si1143_channels[] = {
+ SI1145_INTENSITY_CHANNEL(0),
+ SI1145_INTENSITY_IR_CHANNEL(1),
+ SI1145_PROXIMITY_CHANNEL(2, 0),
+ SI1145_PROXIMITY_CHANNEL(3, 1),
+ SI1145_PROXIMITY_CHANNEL(4, 2),
+ SI1145_TEMP_CHANNEL(5),
+ SI1145_VOLTAGE_CHANNEL(6),
+ IIO_CHAN_SOFT_TIMESTAMP(7),
+ SI1145_CURRENT_CHANNEL(0),
+ SI1145_CURRENT_CHANNEL(1),
+ SI1145_CURRENT_CHANNEL(2),
+};
+
+static const struct iio_chan_spec si1145_channels[] = {
+ SI1145_INTENSITY_CHANNEL(0),
+ SI1145_INTENSITY_IR_CHANNEL(1),
+ SI1145_PROXIMITY_CHANNEL(2, 0),
+ SI1145_TEMP_CHANNEL(3),
+ SI1145_VOLTAGE_CHANNEL(4),
+ SI1145_UV_CHANNEL(5),
+ IIO_CHAN_SOFT_TIMESTAMP(6),
+ SI1145_CURRENT_CHANNEL(0),
+};
+
+static const struct iio_chan_spec si1146_channels[] = {
+ SI1145_INTENSITY_CHANNEL(0),
+ SI1145_INTENSITY_IR_CHANNEL(1),
+ SI1145_TEMP_CHANNEL(2),
+ SI1145_VOLTAGE_CHANNEL(3),
+ SI1145_UV_CHANNEL(4),
+ SI1145_PROXIMITY_CHANNEL(5, 0),
+ SI1145_PROXIMITY_CHANNEL(6, 1),
+ IIO_CHAN_SOFT_TIMESTAMP(7),
+ SI1145_CURRENT_CHANNEL(0),
+ SI1145_CURRENT_CHANNEL(1),
+};
+
+static const struct iio_chan_spec si1147_channels[] = {
+ SI1145_INTENSITY_CHANNEL(0),
+ SI1145_INTENSITY_IR_CHANNEL(1),
+ SI1145_PROXIMITY_CHANNEL(2, 0),
+ SI1145_PROXIMITY_CHANNEL(3, 1),
+ SI1145_PROXIMITY_CHANNEL(4, 2),
+ SI1145_TEMP_CHANNEL(5),
+ SI1145_VOLTAGE_CHANNEL(6),
+ SI1145_UV_CHANNEL(7),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+ SI1145_CURRENT_CHANNEL(0),
+ SI1145_CURRENT_CHANNEL(1),
+ SI1145_CURRENT_CHANNEL(2),
+};
+
+static struct attribute *si1132_attributes[] = {
+ &iio_const_attr_in_intensity_scale_available.dev_attr.attr,
+ &iio_const_attr_in_intensity_ir_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute *si114x_attributes[] = {
+ &iio_const_attr_in_intensity_scale_available.dev_attr.attr,
+ &iio_const_attr_in_intensity_ir_scale_available.dev_attr.attr,
+ &iio_const_attr_in_proximity_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group si1132_attribute_group = {
+ .attrs = si1132_attributes,
+};
+
+static const struct attribute_group si114x_attribute_group = {
+ .attrs = si114x_attributes,
+};
+
+
+static const struct iio_info si1132_info = {
+ .read_raw = si1145_read_raw,
+ .write_raw = si1145_write_raw,
+ .driver_module = THIS_MODULE,
+ .attrs = &si1132_attribute_group,
+};
+
+static const struct iio_info si114x_info = {
+ .read_raw = si1145_read_raw,
+ .write_raw = si1145_write_raw,
+ .driver_module = THIS_MODULE,
+ .attrs = &si114x_attribute_group,
+};
+
+#define SI1145_PART(id, iio_info, chans, leds, uncompressed_meas_rate) \
+ {id, iio_info, chans, ARRAY_SIZE(chans), leds, uncompressed_meas_rate}
+
+static const struct si1145_part_info si1145_part_info[] = {
+ [SI1132] = SI1145_PART(0x32, &si1132_info, si1132_channels, 0, true),
+ [SI1141] = SI1145_PART(0x41, &si114x_info, si1141_channels, 1, false),
+ [SI1142] = SI1145_PART(0x42, &si114x_info, si1142_channels, 2, false),
+ [SI1143] = SI1145_PART(0x43, &si114x_info, si1143_channels, 3, false),
+ [SI1145] = SI1145_PART(0x45, &si114x_info, si1145_channels, 1, true),
+ [SI1146] = SI1145_PART(0x46, &si114x_info, si1146_channels, 2, true),
+ [SI1147] = SI1145_PART(0x47, &si114x_info, si1147_channels, 3, true),
+};
+
+static int si1145_initialize(struct si1145_data *data)
+{
+ struct i2c_client *client = data->client;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, SI1145_REG_COMMAND,
+ SI1145_CMD_RESET);
+ if (ret < 0)
+ return ret;
+ msleep(SI1145_COMMAND_TIMEOUT_MS);
+
+ /* Hardware key, magic value */
+ ret = i2c_smbus_write_byte_data(client, SI1145_REG_HW_KEY, 0x17);
+ if (ret < 0)
+ return ret;
+ msleep(SI1145_COMMAND_TIMEOUT_MS);
+
+ /* Turn off autonomous mode */
+ ret = si1145_set_meas_rate(data, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize sampling freq to 10 Hz */
+ ret = si1145_store_samp_freq(data, 10);
+ if (ret < 0)
+ return ret;
+
+ /* Set LED currents to 45 mA; have 4 bits, see Table 2 in datasheet */
+ switch (data->part_info->num_leds) {
+ case 3:
+ ret = i2c_smbus_write_byte_data(client,
+ SI1145_REG_PS_LED3,
+ SI1145_LED_CURRENT_45mA);
+ if (ret < 0)
+ return ret;
+ /* fallthrough */
+ case 2:
+ ret = i2c_smbus_write_byte_data(client,
+ SI1145_REG_PS_LED21,
+ (SI1145_LED_CURRENT_45mA << 4) |
+ SI1145_LED_CURRENT_45mA);
+ break;
+ case 1:
+ ret = i2c_smbus_write_byte_data(client,
+ SI1145_REG_PS_LED21,
+ SI1145_LED_CURRENT_45mA);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ if (ret < 0)
+ return ret;
+
+ /* Set normal proximity measurement mode */
+ ret = si1145_param_set(data, SI1145_PARAM_PS_ADC_MISC,
+ SI1145_PS_ADC_MODE_NORMAL);
+ if (ret < 0)
+ return ret;
+
+ ret = si1145_param_set(data, SI1145_PARAM_PS_ADC_GAIN, 0x01);
+ if (ret < 0)
+ return ret;
+
+ /* ADC_COUNTER should be one complement of ADC_GAIN */
+ ret = si1145_param_set(data, SI1145_PARAM_PS_ADC_COUNTER, 0x06 << 4);
+ if (ret < 0)
+ return ret;
+
+ /* Set ALS visible measurement mode */
+ ret = si1145_param_set(data, SI1145_PARAM_ALSVIS_ADC_MISC,
+ SI1145_ADC_MISC_RANGE);
+ if (ret < 0)
+ return ret;
+
+ ret = si1145_param_set(data, SI1145_PARAM_ALSVIS_ADC_GAIN, 0x03);
+ if (ret < 0)
+ return ret;
+
+ ret = si1145_param_set(data, SI1145_PARAM_ALSVIS_ADC_COUNTER,
+ 0x04 << 4);
+ if (ret < 0)
+ return ret;
+
+ /* Set ALS IR measurement mode */
+ ret = si1145_param_set(data, SI1145_PARAM_ALSIR_ADC_MISC,
+ SI1145_ADC_MISC_RANGE);
+ if (ret < 0)
+ return ret;
+
+ ret = si1145_param_set(data, SI1145_PARAM_ALSIR_ADC_GAIN, 0x01);
+ if (ret < 0)
+ return ret;
+
+ ret = si1145_param_set(data, SI1145_PARAM_ALSIR_ADC_COUNTER,
+ 0x06 << 4);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Initialize UCOEF to default values in datasheet
+ * These registers are normally zero on reset
+ */
+ if (data->part_info == &si1145_part_info[SI1132] ||
+ data->part_info == &si1145_part_info[SI1145] ||
+ data->part_info == &si1145_part_info[SI1146] ||
+ data->part_info == &si1145_part_info[SI1147]) {
+ ret = i2c_smbus_write_byte_data(data->client,
+ SI1145_REG_UCOEF1,
+ SI1145_UCOEF1_DEFAULT);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(data->client,
+ SI1145_REG_UCOEF2, SI1145_UCOEF2_DEFAULT);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(data->client,
+ SI1145_REG_UCOEF3, SI1145_UCOEF3_DEFAULT);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(data->client,
+ SI1145_REG_UCOEF4, SI1145_UCOEF4_DEFAULT);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Program the channels we want to measure with CMD_PSALS_AUTO. No need for
+ * _postdisable as we stop with CMD_PSALS_PAUSE; single measurement (direct)
+ * mode reprograms the channels list anyway...
+ */
+static int si1145_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct si1145_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = si1145_set_chlist(indio_dev, *indio_dev->active_scan_mask);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static bool si1145_validate_scan_mask(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct si1145_data *data = iio_priv(indio_dev);
+ unsigned int count = 0;
+ int i;
+
+ /* Check that at most one AUX channel is enabled */
+ for_each_set_bit(i, scan_mask, data->part_info->num_channels) {
+ if (indio_dev->channels[i].address == SI1145_REG_AUX_DATA)
+ count++;
+ }
+
+ return count <= 1;
+}
+
+static const struct iio_buffer_setup_ops si1145_buffer_setup_ops = {
+ .preenable = si1145_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .validate_scan_mask = si1145_validate_scan_mask,
+};
+
+/**
+ * si1145_trigger_set_state() - Set trigger state
+ *
+ * When not using triggers interrupts are disabled and measurement rate is
+ * set to zero in order to minimize power consumption.
+ */
+static int si1145_trigger_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct si1145_data *data = iio_priv(indio_dev);
+ int err = 0, ret;
+
+ mutex_lock(&data->lock);
+
+ if (state) {
+ data->autonomous = true;
+ err = i2c_smbus_write_byte_data(data->client,
+ SI1145_REG_INT_CFG, SI1145_INT_CFG_OE);
+ if (err < 0)
+ goto disable;
+ err = i2c_smbus_write_byte_data(data->client,
+ SI1145_REG_IRQ_ENABLE, SI1145_MASK_ALL_IE);
+ if (err < 0)
+ goto disable;
+ err = si1145_set_meas_rate(data, data->meas_rate);
+ if (err < 0)
+ goto disable;
+ err = si1145_command(data, SI1145_CMD_PSALS_AUTO);
+ if (err < 0)
+ goto disable;
+ } else {
+disable:
+ /* Disable as much as possible skipping errors */
+ ret = si1145_command(data, SI1145_CMD_PSALS_PAUSE);
+ if (ret < 0 && !err)
+ err = ret;
+ ret = si1145_set_meas_rate(data, 0);
+ if (ret < 0 && !err)
+ err = ret;
+ ret = i2c_smbus_write_byte_data(data->client,
+ SI1145_REG_IRQ_ENABLE, 0);
+ if (ret < 0 && !err)
+ err = ret;
+ ret = i2c_smbus_write_byte_data(data->client,
+ SI1145_REG_INT_CFG, 0);
+ if (ret < 0 && !err)
+ err = ret;
+ data->autonomous = false;
+ }
+
+ mutex_unlock(&data->lock);
+ return err;
+}
+
+static const struct iio_trigger_ops si1145_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = si1145_trigger_set_state,
+};
+
+static int si1145_probe_trigger(struct iio_dev *indio_dev)
+{
+ struct si1145_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ struct iio_trigger *trig;
+ int ret;
+
+ trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-dev%d", indio_dev->name, indio_dev->id);
+ if (!trig)
+ return -ENOMEM;
+
+ trig->dev.parent = &client->dev;
+ trig->ops = &si1145_trigger_ops;
+ iio_trigger_set_drvdata(trig, indio_dev);
+
+ ret = devm_request_irq(&client->dev, client->irq,
+ iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_FALLING,
+ "si1145_irq",
+ trig);
+ if (ret < 0) {
+ dev_err(&client->dev, "irq request failed\n");
+ return ret;
+ }
+
+ ret = iio_trigger_register(trig);
+ if (ret)
+ return ret;
+
+ data->trig = trig;
+ indio_dev->trig = iio_trigger_get(data->trig);
+
+ return 0;
+}
+
+static void si1145_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct si1145_data *data = iio_priv(indio_dev);
+
+ if (data->trig) {
+ iio_trigger_unregister(data->trig);
+ data->trig = NULL;
+ }
+}
+
+static int si1145_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct si1145_data *data;
+ struct iio_dev *indio_dev;
+ u8 part_id, rev_id, seq_id;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ data->part_info = &si1145_part_info[id->driver_data];
+
+ part_id = ret = i2c_smbus_read_byte_data(data->client,
+ SI1145_REG_PART_ID);
+ if (ret < 0)
+ return ret;
+ rev_id = ret = i2c_smbus_read_byte_data(data->client,
+ SI1145_REG_REV_ID);
+ if (ret < 0)
+ return ret;
+ seq_id = ret = i2c_smbus_read_byte_data(data->client,
+ SI1145_REG_SEQ_ID);
+ if (ret < 0)
+ return ret;
+ dev_info(&client->dev, "device ID part %#02hhx rev %#02hhx seq %#02hhx\n",
+ part_id, rev_id, seq_id);
+ if (part_id != data->part_info->part) {
+ dev_err(&client->dev, "part ID mismatch got %#02hhx, expected %#02x\n",
+ part_id, data->part_info->part);
+ return -ENODEV;
+ }
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->channels = data->part_info->channels;
+ indio_dev->num_channels = data->part_info->num_channels;
+ indio_dev->info = data->part_info->iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ mutex_init(&data->lock);
+ mutex_init(&data->cmdlock);
+
+ ret = si1145_initialize(data);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ si1145_trigger_handler, &si1145_buffer_setup_ops);
+ if (ret < 0)
+ return ret;
+
+ if (client->irq) {
+ ret = si1145_probe_trigger(indio_dev);
+ if (ret < 0)
+ goto error_free_buffer;
+ } else {
+ dev_info(&client->dev, "no irq, using polling\n");
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto error_free_trigger;
+
+ return 0;
+
+error_free_trigger:
+ si1145_remove_trigger(indio_dev);
+error_free_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static const struct i2c_device_id si1145_ids[] = {
+ { "si1132", SI1132 },
+ { "si1141", SI1141 },
+ { "si1142", SI1142 },
+ { "si1143", SI1143 },
+ { "si1145", SI1145 },
+ { "si1146", SI1146 },
+ { "si1147", SI1147 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si1145_ids);
+
+static int si1145_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ si1145_remove_trigger(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static struct i2c_driver si1145_driver = {
+ .driver = {
+ .name = "si1145",
+ },
+ .probe = si1145_probe,
+ .remove = si1145_remove,
+ .id_table = si1145_ids,
+};
+
+module_i2c_driver(si1145_driver);
+
+MODULE_AUTHOR("Peter Meerwald-Stadler <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Silabs SI1132 and SI1141/2/3/5/6/7 proximity, ambient light and UV index sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 20c40f780964..18cf2e29e4d5 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -894,7 +894,7 @@ static int us5182d_probe(struct i2c_client *client,
goto out_err;
if (data->default_continuous) {
- pm_runtime_set_active(&client->dev);
+ ret = pm_runtime_set_active(&client->dev);
if (ret < 0)
goto out_err;
}
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index c9d85bbc9230..360b6e98137a 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -1,6 +1,6 @@
/*
- * vcnl4000.c - Support for Vishay VCNL4000 combined ambient light and
- * proximity sensor
+ * vcnl4000.c - Support for Vishay VCNL4000/4010/4020 combined ambient
+ * light and proximity sensor
*
* Copyright 2012 Peter Meerwald <pmeerw@pmeerw.net>
*
@@ -13,6 +13,8 @@
* TODO:
* allow to adjust IR current
* proximity threshold and event handling
+ * periodic ALS/proximity measurement (VCNL4010/20)
+ * interrupts (VCNL4010/20)
*/
#include <linux/module.h>
@@ -24,6 +26,8 @@
#include <linux/iio/sysfs.h>
#define VCNL4000_DRV_NAME "vcnl4000"
+#define VCNL4000_ID 0x01
+#define VCNL4010_ID 0x02 /* for VCNL4020, VCNL4010 */
#define VCNL4000_COMMAND 0x80 /* Command register */
#define VCNL4000_PROD_REV 0x81 /* Product ID and Revision ID */
@@ -37,13 +41,14 @@
#define VCNL4000_PS_MOD_ADJ 0x8a /* Proximity modulator timing adjustment */
/* Bit masks for COMMAND register */
-#define VCNL4000_AL_RDY 0x40 /* ALS data ready? */
-#define VCNL4000_PS_RDY 0x20 /* proximity data ready? */
-#define VCNL4000_AL_OD 0x10 /* start on-demand ALS measurement */
-#define VCNL4000_PS_OD 0x08 /* start on-demand proximity measurement */
+#define VCNL4000_AL_RDY BIT(6) /* ALS data ready? */
+#define VCNL4000_PS_RDY BIT(5) /* proximity data ready? */
+#define VCNL4000_AL_OD BIT(4) /* start on-demand ALS measurement */
+#define VCNL4000_PS_OD BIT(3) /* start on-demand proximity measurement */
struct vcnl4000_data {
struct i2c_client *client;
+ struct mutex lock;
};
static const struct i2c_device_id vcnl4000_id[] = {
@@ -59,16 +64,18 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
__be16 buf;
int ret;
+ mutex_lock(&data->lock);
+
ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND,
req_mask);
if (ret < 0)
- return ret;
+ goto fail;
/* wait for data to become ready */
while (tries--) {
ret = i2c_smbus_read_byte_data(data->client, VCNL4000_COMMAND);
if (ret < 0)
- return ret;
+ goto fail;
if (ret & rdy_mask)
break;
msleep(20); /* measurement takes up to 100 ms */
@@ -77,17 +84,23 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
if (tries < 0) {
dev_err(&data->client->dev,
"vcnl4000_measure() failed, data not ready\n");
- return -EIO;
+ ret = -EIO;
+ goto fail;
}
ret = i2c_smbus_read_i2c_block_data(data->client,
data_reg, sizeof(buf), (u8 *) &buf);
if (ret < 0)
- return ret;
+ goto fail;
+ mutex_unlock(&data->lock);
*val = be16_to_cpu(buf);
return 0;
+
+fail:
+ mutex_unlock(&data->lock);
+ return ret;
}
static const struct iio_chan_spec vcnl4000_channels[] = {
@@ -105,7 +118,7 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
- int ret = -EINVAL;
+ int ret;
struct vcnl4000_data *data = iio_priv(indio_dev);
switch (mask) {
@@ -117,32 +130,27 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
VCNL4000_AL_RESULT_HI, val);
if (ret < 0)
return ret;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_PROXIMITY:
ret = vcnl4000_measure(data,
VCNL4000_PS_OD, VCNL4000_PS_RDY,
VCNL4000_PS_RESULT_HI, val);
if (ret < 0)
return ret;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- break;
+ return -EINVAL;
}
- break;
case IIO_CHAN_INFO_SCALE:
- if (chan->type == IIO_LIGHT) {
- *val = 0;
- *val2 = 250000;
- ret = IIO_VAL_INT_PLUS_MICRO;
- }
- break;
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ *val = 0;
+ *val2 = 250000;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
- break;
+ return -EINVAL;
}
-
- return ret;
}
static const struct iio_info vcnl4000_info = {
@@ -155,7 +163,7 @@ static int vcnl4000_probe(struct i2c_client *client,
{
struct vcnl4000_data *data;
struct iio_dev *indio_dev;
- int ret;
+ int ret, prod_id;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -164,13 +172,19 @@ static int vcnl4000_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ mutex_init(&data->lock);
ret = i2c_smbus_read_byte_data(data->client, VCNL4000_PROD_REV);
if (ret < 0)
return ret;
- dev_info(&client->dev, "VCNL4000 Ambient light/proximity sensor, Prod %02x, Rev: %02x\n",
- ret >> 4, ret & 0xf);
+ prod_id = ret >> 4;
+ if (prod_id != VCNL4010_ID && prod_id != VCNL4000_ID)
+ return -ENODEV;
+
+ dev_dbg(&client->dev, "%s Ambient light/proximity sensor, Rev: %02x\n",
+ (prod_id == VCNL4010_ID) ? "VCNL4010/4020" : "VCNL4000",
+ ret & 0xf);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &vcnl4000_info;
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 1f842abcb4a4..421ad90a5fbe 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -5,8 +5,22 @@
menu "Magnetometer sensors"
+config AK8974
+ tristate "Asahi Kasei AK8974 3-Axis Magnetometer"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Asahi Kasei AK8974 or
+ AMI305 I2C-based 3-axis magnetometer chips.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ak8974.
+
config AK8975
- tristate "Asahi Kasei AK 3-Axis Magnetometer"
+ tristate "Asahi Kasei AK8975 3-Axis Magnetometer"
depends on I2C
depends on GPIOLIB || COMPILE_TEST
select IIO_BUFFER
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index 92a745c9a6e8..b86d6cb7f285 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -3,6 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AK8974) += ak8974.o
obj-$(CONFIG_AK8975) += ak8975.o
obj-$(CONFIG_BMC150_MAGN) += bmc150_magn.o
obj-$(CONFIG_BMC150_MAGN_I2C) += bmc150_magn_i2c.o
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
new file mode 100644
index 000000000000..217353145676
--- /dev/null
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -0,0 +1,860 @@
+/*
+ * Driver for the Asahi Kasei EMD Corporation AK8974
+ * and Aichi Steel AMI305 magnetometer chips.
+ * Based on a patch from Samu Onkalo and the AK8975 IIO driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ * Copyright (c) 2010 NVIDIA Corporation.
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * Author: Samu Onkalo <samu.p.onkalo@nokia.com>
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h> /* For irq_get_irq_data() */
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/*
+ * 16-bit registers are little-endian. LSB is at the address defined below
+ * and MSB is at the next higher address.
+ */
+
+/* These registers are common for AK8974 and AMI305 */
+#define AK8974_SELFTEST 0x0C
+#define AK8974_SELFTEST_IDLE 0x55
+#define AK8974_SELFTEST_OK 0xAA
+
+#define AK8974_INFO 0x0D
+
+#define AK8974_WHOAMI 0x0F
+#define AK8974_WHOAMI_VALUE_AMI305 0x47
+#define AK8974_WHOAMI_VALUE_AK8974 0x48
+
+#define AK8974_DATA_X 0x10
+#define AK8974_DATA_Y 0x12
+#define AK8974_DATA_Z 0x14
+#define AK8974_INT_SRC 0x16
+#define AK8974_STATUS 0x18
+#define AK8974_INT_CLEAR 0x1A
+#define AK8974_CTRL1 0x1B
+#define AK8974_CTRL2 0x1C
+#define AK8974_CTRL3 0x1D
+#define AK8974_INT_CTRL 0x1E
+#define AK8974_INT_THRES 0x26 /* Absolute any axis value threshold */
+#define AK8974_PRESET 0x30
+
+/* AK8974-specific offsets */
+#define AK8974_OFFSET_X 0x20
+#define AK8974_OFFSET_Y 0x22
+#define AK8974_OFFSET_Z 0x24
+/* AMI305-specific offsets */
+#define AMI305_OFFSET_X 0x6C
+#define AMI305_OFFSET_Y 0x72
+#define AMI305_OFFSET_Z 0x78
+
+/* Different temperature registers */
+#define AK8974_TEMP 0x31
+#define AMI305_TEMP 0x60
+
+#define AK8974_INT_X_HIGH BIT(7) /* Axis over +threshold */
+#define AK8974_INT_Y_HIGH BIT(6)
+#define AK8974_INT_Z_HIGH BIT(5)
+#define AK8974_INT_X_LOW BIT(4) /* Axis below -threshold */
+#define AK8974_INT_Y_LOW BIT(3)
+#define AK8974_INT_Z_LOW BIT(2)
+#define AK8974_INT_RANGE BIT(1) /* Range overflow (any axis) */
+
+#define AK8974_STATUS_DRDY BIT(6) /* Data ready */
+#define AK8974_STATUS_OVERRUN BIT(5) /* Data overrun */
+#define AK8974_STATUS_INT BIT(4) /* Interrupt occurred */
+
+#define AK8974_CTRL1_POWER BIT(7) /* 0 = standby; 1 = active */
+#define AK8974_CTRL1_RATE BIT(4) /* 0 = 10 Hz; 1 = 20 Hz */
+#define AK8974_CTRL1_FORCE_EN BIT(1) /* 0 = normal; 1 = force */
+#define AK8974_CTRL1_MODE2 BIT(0) /* 0 */
+
+#define AK8974_CTRL2_INT_EN BIT(4) /* 1 = enable interrupts */
+#define AK8974_CTRL2_DRDY_EN BIT(3) /* 1 = enable data ready signal */
+#define AK8974_CTRL2_DRDY_POL BIT(2) /* 1 = data ready active high */
+#define AK8974_CTRL2_RESDEF (AK8974_CTRL2_DRDY_POL)
+
+#define AK8974_CTRL3_RESET BIT(7) /* Software reset */
+#define AK8974_CTRL3_FORCE BIT(6) /* Start forced measurement */
+#define AK8974_CTRL3_SELFTEST BIT(4) /* Set selftest register */
+#define AK8974_CTRL3_RESDEF 0x00
+
+#define AK8974_INT_CTRL_XEN BIT(7) /* Enable interrupt for this axis */
+#define AK8974_INT_CTRL_YEN BIT(6)
+#define AK8974_INT_CTRL_ZEN BIT(5)
+#define AK8974_INT_CTRL_XYZEN (BIT(7)|BIT(6)|BIT(5))
+#define AK8974_INT_CTRL_POL BIT(3) /* 0 = active low; 1 = active high */
+#define AK8974_INT_CTRL_PULSE BIT(1) /* 0 = latched; 1 = pulse (50 usec) */
+#define AK8974_INT_CTRL_RESDEF (AK8974_INT_CTRL_XYZEN | AK8974_INT_CTRL_POL)
+
+/* The AMI305 has elaborate FW version and serial number registers */
+#define AMI305_VER 0xE8
+#define AMI305_SN 0xEA
+
+#define AK8974_MAX_RANGE 2048
+
+#define AK8974_POWERON_DELAY 50
+#define AK8974_ACTIVATE_DELAY 1
+#define AK8974_SELFTEST_DELAY 1
+/*
+ * Set the autosuspend to two orders of magnitude larger than the poweron
+ * delay to make sane reasonable power tradeoff savings (5 seconds in
+ * this case).
+ */
+#define AK8974_AUTOSUSPEND_DELAY 5000
+
+#define AK8974_MEASTIME 3
+
+#define AK8974_PWR_ON 1
+#define AK8974_PWR_OFF 0
+
+/**
+ * struct ak8974 - state container for the AK8974 driver
+ * @i2c: parent I2C client
+ * @orientation: mounting matrix, flipped axis etc
+ * @map: regmap to access the AK8974 registers over I2C
+ * @regs: the avdd and dvdd power regulators
+ * @name: the name of the part
+ * @variant: the whoami ID value (for selecting code paths)
+ * @lock: locks the magnetometer for exclusive use during a measurement
+ * @drdy_irq: uses the DRDY IRQ line
+ * @drdy_complete: completion for DRDY
+ * @drdy_active_low: the DRDY IRQ is active low
+ */
+struct ak8974 {
+ struct i2c_client *i2c;
+ struct iio_mount_matrix orientation;
+ struct regmap *map;
+ struct regulator_bulk_data regs[2];
+ const char *name;
+ u8 variant;
+ struct mutex lock;
+ bool drdy_irq;
+ struct completion drdy_complete;
+ bool drdy_active_low;
+};
+
+static const char ak8974_reg_avdd[] = "avdd";
+static const char ak8974_reg_dvdd[] = "dvdd";
+
+static int ak8974_set_power(struct ak8974 *ak8974, bool mode)
+{
+ int ret;
+ u8 val;
+
+ val = mode ? AK8974_CTRL1_POWER : 0;
+ val |= AK8974_CTRL1_FORCE_EN;
+ ret = regmap_write(ak8974->map, AK8974_CTRL1, val);
+ if (ret < 0)
+ return ret;
+
+ if (mode)
+ msleep(AK8974_ACTIVATE_DELAY);
+
+ return 0;
+}
+
+static int ak8974_reset(struct ak8974 *ak8974)
+{
+ int ret;
+
+ /* Power on to get register access. Sets CTRL1 reg to reset state */
+ ret = ak8974_set_power(ak8974, AK8974_PWR_ON);
+ if (ret)
+ return ret;
+ ret = regmap_write(ak8974->map, AK8974_CTRL2, AK8974_CTRL2_RESDEF);
+ if (ret)
+ return ret;
+ ret = regmap_write(ak8974->map, AK8974_CTRL3, AK8974_CTRL3_RESDEF);
+ if (ret)
+ return ret;
+ ret = regmap_write(ak8974->map, AK8974_INT_CTRL,
+ AK8974_INT_CTRL_RESDEF);
+ if (ret)
+ return ret;
+
+ /* After reset, power off is default state */
+ return ak8974_set_power(ak8974, AK8974_PWR_OFF);
+}
+
+static int ak8974_configure(struct ak8974 *ak8974)
+{
+ int ret;
+
+ ret = regmap_write(ak8974->map, AK8974_CTRL2, AK8974_CTRL2_DRDY_EN |
+ AK8974_CTRL2_INT_EN);
+ if (ret)
+ return ret;
+ ret = regmap_write(ak8974->map, AK8974_CTRL3, 0);
+ if (ret)
+ return ret;
+ ret = regmap_write(ak8974->map, AK8974_INT_CTRL, AK8974_INT_CTRL_POL);
+ if (ret)
+ return ret;
+
+ return regmap_write(ak8974->map, AK8974_PRESET, 0);
+}
+
+static int ak8974_trigmeas(struct ak8974 *ak8974)
+{
+ unsigned int clear;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ /* Clear any previous measurement overflow status */
+ ret = regmap_read(ak8974->map, AK8974_INT_CLEAR, &clear);
+ if (ret)
+ return ret;
+
+ /* If we have a DRDY IRQ line, use it */
+ if (ak8974->drdy_irq) {
+ mask = AK8974_CTRL2_INT_EN |
+ AK8974_CTRL2_DRDY_EN |
+ AK8974_CTRL2_DRDY_POL;
+ val = AK8974_CTRL2_DRDY_EN;
+
+ if (!ak8974->drdy_active_low)
+ val |= AK8974_CTRL2_DRDY_POL;
+
+ init_completion(&ak8974->drdy_complete);
+ ret = regmap_update_bits(ak8974->map, AK8974_CTRL2,
+ mask, val);
+ if (ret)
+ return ret;
+ }
+
+ /* Force a measurement */
+ return regmap_update_bits(ak8974->map,
+ AK8974_CTRL3,
+ AK8974_CTRL3_FORCE,
+ AK8974_CTRL3_FORCE);
+}
+
+static int ak8974_await_drdy(struct ak8974 *ak8974)
+{
+ int timeout = 2;
+ unsigned int val;
+ int ret;
+
+ if (ak8974->drdy_irq) {
+ ret = wait_for_completion_timeout(&ak8974->drdy_complete,
+ 1 + msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(&ak8974->i2c->dev,
+ "timeout waiting for DRDY IRQ\n");
+ return -ETIMEDOUT;
+ }
+ return 0;
+ }
+
+ /* Default delay-based poll loop */
+ do {
+ msleep(AK8974_MEASTIME);
+ ret = regmap_read(ak8974->map, AK8974_STATUS, &val);
+ if (ret < 0)
+ return ret;
+ if (val & AK8974_STATUS_DRDY)
+ return 0;
+ } while (--timeout);
+ if (!timeout) {
+ dev_err(&ak8974->i2c->dev,
+ "timeout waiting for DRDY\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ak8974_getresult(struct ak8974 *ak8974, s16 *result)
+{
+ unsigned int src;
+ int ret;
+
+ ret = ak8974_await_drdy(ak8974);
+ if (ret)
+ return ret;
+ ret = regmap_read(ak8974->map, AK8974_INT_SRC, &src);
+ if (ret < 0)
+ return ret;
+
+ /* Out of range overflow! Strong magnet close? */
+ if (src & AK8974_INT_RANGE) {
+ dev_err(&ak8974->i2c->dev,
+ "range overflow in sensor\n");
+ return -ERANGE;
+ }
+
+ ret = regmap_bulk_read(ak8974->map, AK8974_DATA_X, result, 6);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static irqreturn_t ak8974_drdy_irq(int irq, void *d)
+{
+ struct ak8974 *ak8974 = d;
+
+ if (!ak8974->drdy_irq)
+ return IRQ_NONE;
+
+ /* TODO: timestamp here to get good measurement stamps */
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t ak8974_drdy_irq_thread(int irq, void *d)
+{
+ struct ak8974 *ak8974 = d;
+ unsigned int val;
+ int ret;
+
+ /* Check if this was a DRDY from us */
+ ret = regmap_read(ak8974->map, AK8974_STATUS, &val);
+ if (ret < 0) {
+ dev_err(&ak8974->i2c->dev, "error reading DRDY status\n");
+ return IRQ_HANDLED;
+ }
+ if (val & AK8974_STATUS_DRDY) {
+ /* Yes this was our IRQ */
+ complete(&ak8974->drdy_complete);
+ return IRQ_HANDLED;
+ }
+
+ /* We may be on a shared IRQ, let the next client check */
+ return IRQ_NONE;
+}
+
+static int ak8974_selftest(struct ak8974 *ak8974)
+{
+ struct device *dev = &ak8974->i2c->dev;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(ak8974->map, AK8974_SELFTEST, &val);
+ if (ret)
+ return ret;
+ if (val != AK8974_SELFTEST_IDLE) {
+ dev_err(dev, "selftest not idle before test\n");
+ return -EIO;
+ }
+
+ /* Trigger self-test */
+ ret = regmap_update_bits(ak8974->map,
+ AK8974_CTRL3,
+ AK8974_CTRL3_SELFTEST,
+ AK8974_CTRL3_SELFTEST);
+ if (ret) {
+ dev_err(dev, "could not write CTRL3\n");
+ return ret;
+ }
+
+ msleep(AK8974_SELFTEST_DELAY);
+
+ ret = regmap_read(ak8974->map, AK8974_SELFTEST, &val);
+ if (ret)
+ return ret;
+ if (val != AK8974_SELFTEST_OK) {
+ dev_err(dev, "selftest result NOT OK (%02x)\n", val);
+ return -EIO;
+ }
+
+ ret = regmap_read(ak8974->map, AK8974_SELFTEST, &val);
+ if (ret)
+ return ret;
+ if (val != AK8974_SELFTEST_IDLE) {
+ dev_err(dev, "selftest not idle after test (%02x)\n", val);
+ return -EIO;
+ }
+ dev_dbg(dev, "passed self-test\n");
+
+ return 0;
+}
+
+static int ak8974_get_u16_val(struct ak8974 *ak8974, u8 reg, u16 *val)
+{
+ int ret;
+ u16 bulk;
+
+ ret = regmap_bulk_read(ak8974->map, reg, &bulk, 2);
+ if (ret)
+ return ret;
+ *val = le16_to_cpu(bulk);
+
+ return 0;
+}
+
+static int ak8974_detect(struct ak8974 *ak8974)
+{
+ unsigned int whoami;
+ const char *name;
+ int ret;
+ unsigned int fw;
+ u16 sn;
+
+ ret = regmap_read(ak8974->map, AK8974_WHOAMI, &whoami);
+ if (ret)
+ return ret;
+
+ switch (whoami) {
+ case AK8974_WHOAMI_VALUE_AMI305:
+ name = "ami305";
+ ret = regmap_read(ak8974->map, AMI305_VER, &fw);
+ if (ret)
+ return ret;
+ fw &= 0x7f; /* only bits 0 thru 6 valid */
+ ret = ak8974_get_u16_val(ak8974, AMI305_SN, &sn);
+ if (ret)
+ return ret;
+ dev_info(&ak8974->i2c->dev,
+ "detected %s, FW ver %02x, S/N: %04x\n",
+ name, fw, sn);
+ break;
+ case AK8974_WHOAMI_VALUE_AK8974:
+ name = "ak8974";
+ dev_info(&ak8974->i2c->dev, "detected AK8974\n");
+ break;
+ default:
+ dev_err(&ak8974->i2c->dev, "unsupported device (%02x) ",
+ whoami);
+ return -ENODEV;
+ }
+
+ ak8974->name = name;
+ ak8974->variant = whoami;
+
+ return 0;
+}
+
+static int ak8974_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct ak8974 *ak8974 = iio_priv(indio_dev);
+ s16 hw_values[3];
+ int ret = -EINVAL;
+
+ pm_runtime_get_sync(&ak8974->i2c->dev);
+ mutex_lock(&ak8974->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->address > 2) {
+ dev_err(&ak8974->i2c->dev, "faulty channel address\n");
+ ret = -EIO;
+ goto out_unlock;
+ }
+ ret = ak8974_trigmeas(ak8974);
+ if (ret)
+ goto out_unlock;
+ ret = ak8974_getresult(ak8974, hw_values);
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * We read all axes and discard all but one, for optimized
+ * reading, use the triggered buffer.
+ */
+ *val = le16_to_cpu(hw_values[chan->address]);
+
+ ret = IIO_VAL_INT;
+ }
+
+ out_unlock:
+ mutex_unlock(&ak8974->lock);
+ pm_runtime_mark_last_busy(&ak8974->i2c->dev);
+ pm_runtime_put_autosuspend(&ak8974->i2c->dev);
+
+ return ret;
+}
+
+static void ak8974_fill_buffer(struct iio_dev *indio_dev)
+{
+ struct ak8974 *ak8974 = iio_priv(indio_dev);
+ int ret;
+ s16 hw_values[8]; /* Three axes + 64bit padding */
+
+ pm_runtime_get_sync(&ak8974->i2c->dev);
+ mutex_lock(&ak8974->lock);
+
+ ret = ak8974_trigmeas(ak8974);
+ if (ret) {
+ dev_err(&ak8974->i2c->dev, "error triggering measure\n");
+ goto out_unlock;
+ }
+ ret = ak8974_getresult(ak8974, hw_values);
+ if (ret) {
+ dev_err(&ak8974->i2c->dev, "error getting measures\n");
+ goto out_unlock;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, hw_values,
+ iio_get_time_ns(indio_dev));
+
+ out_unlock:
+ mutex_unlock(&ak8974->lock);
+ pm_runtime_mark_last_busy(&ak8974->i2c->dev);
+ pm_runtime_put_autosuspend(&ak8974->i2c->dev);
+}
+
+static irqreturn_t ak8974_handle_trigger(int irq, void *p)
+{
+ const struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+
+ ak8974_fill_buffer(indio_dev);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_mount_matrix *
+ak8974_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ak8974 *ak8974 = iio_priv(indio_dev);
+
+ return &ak8974->orientation;
+}
+
+static const struct iio_chan_spec_ext_info ak8974_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, ak8974_get_mount_matrix),
+ { },
+};
+
+#define AK8974_AXIS_CHANNEL(axis, index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .ext_info = ak8974_ext_info, \
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE \
+ }, \
+ }
+
+static const struct iio_chan_spec ak8974_channels[] = {
+ AK8974_AXIS_CHANNEL(X, 0),
+ AK8974_AXIS_CHANNEL(Y, 1),
+ AK8974_AXIS_CHANNEL(Z, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static const unsigned long ak8974_scan_masks[] = { 0x7, 0 };
+
+static const struct iio_info ak8974_info = {
+ .read_raw = &ak8974_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static bool ak8974_writeable_reg(struct device *dev, unsigned int reg)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
+ struct ak8974 *ak8974 = iio_priv(indio_dev);
+
+ switch (reg) {
+ case AK8974_CTRL1:
+ case AK8974_CTRL2:
+ case AK8974_CTRL3:
+ case AK8974_INT_CTRL:
+ case AK8974_INT_THRES:
+ case AK8974_INT_THRES + 1:
+ case AK8974_PRESET:
+ case AK8974_PRESET + 1:
+ return true;
+ case AK8974_OFFSET_X:
+ case AK8974_OFFSET_X + 1:
+ case AK8974_OFFSET_Y:
+ case AK8974_OFFSET_Y + 1:
+ case AK8974_OFFSET_Z:
+ case AK8974_OFFSET_Z + 1:
+ if (ak8974->variant == AK8974_WHOAMI_VALUE_AK8974)
+ return true;
+ return false;
+ case AMI305_OFFSET_X:
+ case AMI305_OFFSET_X + 1:
+ case AMI305_OFFSET_Y:
+ case AMI305_OFFSET_Y + 1:
+ case AMI305_OFFSET_Z:
+ case AMI305_OFFSET_Z + 1:
+ if (ak8974->variant == AK8974_WHOAMI_VALUE_AMI305)
+ return true;
+ return false;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config ak8974_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .writeable_reg = ak8974_writeable_reg,
+};
+
+static int ak8974_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct ak8974 *ak8974;
+ unsigned long irq_trig;
+ int irq = i2c->irq;
+ int ret;
+
+ /* Register with IIO */
+ indio_dev = devm_iio_device_alloc(&i2c->dev, sizeof(*ak8974));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ ak8974 = iio_priv(indio_dev);
+ i2c_set_clientdata(i2c, indio_dev);
+ ak8974->i2c = i2c;
+ mutex_init(&ak8974->lock);
+
+ ret = of_iio_read_mount_matrix(&i2c->dev,
+ "mount-matrix",
+ &ak8974->orientation);
+ if (ret)
+ return ret;
+
+ ak8974->regs[0].supply = ak8974_reg_avdd;
+ ak8974->regs[1].supply = ak8974_reg_dvdd;
+
+ ret = devm_regulator_bulk_get(&i2c->dev,
+ ARRAY_SIZE(ak8974->regs),
+ ak8974->regs);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "cannot get regulators\n");
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ /* Take runtime PM online */
+ pm_runtime_get_noresume(&i2c->dev);
+ pm_runtime_set_active(&i2c->dev);
+ pm_runtime_enable(&i2c->dev);
+
+ ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config);
+ if (IS_ERR(ak8974->map)) {
+ dev_err(&i2c->dev, "failed to allocate register map\n");
+ return PTR_ERR(ak8974->map);
+ }
+
+ ret = ak8974_set_power(ak8974, AK8974_PWR_ON);
+ if (ret) {
+ dev_err(&i2c->dev, "could not power on\n");
+ goto power_off;
+ }
+
+ ret = ak8974_detect(ak8974);
+ if (ret) {
+ dev_err(&i2c->dev, "neither AK8974 nor AMI305 found\n");
+ goto power_off;
+ }
+
+ ret = ak8974_selftest(ak8974);
+ if (ret)
+ dev_err(&i2c->dev, "selftest failed (continuing anyway)\n");
+
+ ret = ak8974_reset(ak8974);
+ if (ret) {
+ dev_err(&i2c->dev, "AK8974 reset failed\n");
+ goto power_off;
+ }
+
+ pm_runtime_set_autosuspend_delay(&i2c->dev,
+ AK8974_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&i2c->dev);
+ pm_runtime_put(&i2c->dev);
+
+ indio_dev->dev.parent = &i2c->dev;
+ indio_dev->channels = ak8974_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8974_channels);
+ indio_dev->info = &ak8974_info;
+ indio_dev->available_scan_masks = ak8974_scan_masks;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = ak8974->name;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ak8974_handle_trigger,
+ NULL);
+ if (ret) {
+ dev_err(&i2c->dev, "triggered buffer setup failed\n");
+ goto disable_pm;
+ }
+
+ /* If we have a valid DRDY IRQ, make use of it */
+ if (irq > 0) {
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ if (irq_trig == IRQF_TRIGGER_RISING) {
+ dev_info(&i2c->dev, "enable rising edge DRDY IRQ\n");
+ } else if (irq_trig == IRQF_TRIGGER_FALLING) {
+ ak8974->drdy_active_low = true;
+ dev_info(&i2c->dev, "enable falling edge DRDY IRQ\n");
+ } else {
+ irq_trig = IRQF_TRIGGER_RISING;
+ }
+ irq_trig |= IRQF_ONESHOT;
+ irq_trig |= IRQF_SHARED;
+
+ ret = devm_request_threaded_irq(&i2c->dev,
+ irq,
+ ak8974_drdy_irq,
+ ak8974_drdy_irq_thread,
+ irq_trig,
+ ak8974->name,
+ ak8974);
+ if (ret) {
+ dev_err(&i2c->dev, "unable to request DRDY IRQ "
+ "- proceeding without IRQ\n");
+ goto no_irq;
+ }
+ ak8974->drdy_irq = true;
+ }
+
+no_irq:
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&i2c->dev, "device register failed\n");
+ goto cleanup_buffer;
+ }
+
+ return 0;
+
+cleanup_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+disable_pm:
+ pm_runtime_put_noidle(&i2c->dev);
+ pm_runtime_disable(&i2c->dev);
+ ak8974_set_power(ak8974, AK8974_PWR_OFF);
+power_off:
+ regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
+
+ return ret;
+}
+
+static int __exit ak8974_remove(struct i2c_client *i2c)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
+ struct ak8974 *ak8974 = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ pm_runtime_get_sync(&i2c->dev);
+ pm_runtime_put_noidle(&i2c->dev);
+ pm_runtime_disable(&i2c->dev);
+ ak8974_set_power(ak8974, AK8974_PWR_OFF);
+ regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
+
+ return 0;
+}
+
+static int __maybe_unused ak8974_runtime_suspend(struct device *dev)
+{
+ struct ak8974 *ak8974 =
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ ak8974_set_power(ak8974, AK8974_PWR_OFF);
+ regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
+
+ return 0;
+}
+
+static int __maybe_unused ak8974_runtime_resume(struct device *dev)
+{
+ struct ak8974 *ak8974 =
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
+ if (ret)
+ return ret;
+ msleep(AK8974_POWERON_DELAY);
+ ret = ak8974_set_power(ak8974, AK8974_PWR_ON);
+ if (ret)
+ goto out_regulator_disable;
+
+ ret = ak8974_configure(ak8974);
+ if (ret)
+ goto out_disable_power;
+
+ return 0;
+
+out_disable_power:
+ ak8974_set_power(ak8974, AK8974_PWR_OFF);
+out_regulator_disable:
+ regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
+
+ return ret;
+}
+
+static const struct dev_pm_ops ak8974_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(ak8974_runtime_suspend,
+ ak8974_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id ak8974_id[] = {
+ {"ami305", 0 },
+ {"ak8974", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ak8974_id);
+
+static const struct of_device_id ak8974_of_match[] = {
+ { .compatible = "asahi-kasei,ak8974", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ak8974_of_match);
+
+static struct i2c_driver ak8974_driver = {
+ .driver = {
+ .name = "ak8974",
+ .pm = &ak8974_dev_pm_ops,
+ .of_match_table = of_match_ptr(ak8974_of_match),
+ },
+ .probe = ak8974_probe,
+ .remove = __exit_p(ak8974_remove),
+ .id_table = ak8974_id,
+};
+module_i2c_driver(ak8974_driver);
+
+MODULE_DESCRIPTION("AK8974 and AMI305 3-axis magnetometer driver");
+MODULE_AUTHOR("Samu Onkalo");
+MODULE_AUTHOR("Linus Walleij");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index f2be4a049056..f2b3bd7bf862 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -154,34 +154,41 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (chan->type) {
case IIO_MAGN: /* in 0.1 uT / LSB */
ret = mag3110_read(data, buffer);
if (ret < 0)
- return ret;
+ goto release;
*val = sign_extend32(
be16_to_cpu(buffer[chan->scan_index]), 15);
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
case IIO_TEMP: /* in 1 C / LSB */
mutex_lock(&data->lock);
ret = mag3110_request(data);
if (ret < 0) {
mutex_unlock(&data->lock);
- return ret;
+ goto release;
}
ret = i2c_smbus_read_byte_data(data->client,
MAG3110_DIE_TEMP);
mutex_unlock(&data->lock);
if (ret < 0)
- return ret;
+ goto release;
*val = sign_extend32(ret, 7);
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+release:
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_MAGN:
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index d130cdc78f43..15cd416365c1 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -8,8 +8,6 @@ menu "Pressure sensors"
config BMP280
tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver"
depends on (I2C || SPI_MASTER)
- depends on !(BMP085_I2C=y || BMP085_I2C=m)
- depends on !(BMP085_SPI=y || BMP085_SPI=m)
select REGMAP
select BMP280_I2C if (I2C)
select BMP280_SPI if (SPI_MASTER)
@@ -187,4 +185,26 @@ config HP206C
This driver can also be built as a module. If so, the module will
be called hp206c.
+config ZPA2326
+ tristate "Murata ZPA2326 pressure sensor driver"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP
+ select ZPA2326_I2C if I2C
+ select ZPA2326_SPI if SPI_MASTER
+ help
+ Say Y here to build support for the Murata ZPA2326 pressure and
+ temperature sensor.
+
+ To compile this driver as a module, choose M here: the module will
+ be called zpa2326.
+
+config ZPA2326_I2C
+ tristate
+ select REGMAP_I2C
+
+config ZPA2326_SPI
+ tristate
+ select REGMAP_SPI
+
endmenu
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 7f395bed5e88..fff77185a5cc 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -22,6 +22,9 @@ st_pressure-y := st_pressure_core.o
st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o
obj-$(CONFIG_T5403) += t5403.o
obj-$(CONFIG_HP206C) += hp206c.o
+obj-$(CONFIG_ZPA2326) += zpa2326.o
+obj-$(CONFIG_ZPA2326_I2C) += zpa2326_i2c.o
+obj-$(CONFIG_ZPA2326_SPI) += zpa2326_spi.o
obj-$(CONFIG_IIO_ST_PRESS_I2C) += st_pressure_i2c.o
obj-$(CONFIG_IIO_ST_PRESS_SPI) += st_pressure_spi.o
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index feb41f82c64a..a74ed1f0c880 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -416,8 +416,7 @@ static int ms5611_init(struct iio_dev *indio_dev)
return 0;
err_regulator_disable:
- if (!IS_ERR_OR_NULL(st->vdd))
- regulator_disable(st->vdd);
+ regulator_disable(st->vdd);
return ret;
}
@@ -425,8 +424,7 @@ static void ms5611_fini(const struct iio_dev *indio_dev)
{
const struct ms5611_state *st = iio_priv(indio_dev);
- if (!IS_ERR_OR_NULL(st->vdd))
- regulator_disable(st->vdd);
+ regulator_disable(st->vdd);
}
int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
new file mode 100644
index 000000000000..19d2eb46fda6
--- /dev/null
+++ b/drivers/iio/pressure/zpa2326.c
@@ -0,0 +1,1735 @@
+/*
+ * Murata ZPA2326 pressure and temperature sensor IIO driver
+ *
+ * Copyright (c) 2016 Parrot S.A.
+ *
+ * Author: Gregor Boirie <gregor.boirie@parrot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * DOC: ZPA2326 theory of operations
+ *
+ * This driver supports %INDIO_DIRECT_MODE and %INDIO_BUFFER_TRIGGERED IIO
+ * modes.
+ * A internal hardware trigger is also implemented to dispatch registered IIO
+ * trigger consumers upon "sample ready" interrupts.
+ *
+ * ZPA2326 hardware supports 2 sampling mode: one shot and continuous.
+ *
+ * A complete one shot sampling cycle gets device out of low power mode,
+ * performs pressure and temperature measurements, then automatically switches
+ * back to low power mode. It is meant for on demand sampling with optimal power
+ * saving at the cost of lower sampling rate and higher software overhead.
+ * This is a natural candidate for IIO read_raw hook implementation
+ * (%INDIO_DIRECT_MODE). It is also used for triggered buffering support to
+ * ensure explicit synchronization with external trigger events
+ * (%INDIO_BUFFER_TRIGGERED).
+ *
+ * The continuous mode works according to a periodic hardware measurement
+ * process continuously pushing samples into an internal hardware FIFO (for
+ * pressure samples only). Measurement cycle completion may be signaled by a
+ * "sample ready" interrupt.
+ * Typical software sequence of operations :
+ * - get device out of low power mode,
+ * - setup hardware sampling period,
+ * - at end of period, upon data ready interrupt: pop pressure samples out of
+ * hardware FIFO and fetch temperature sample
+ * - when no longer needed, stop sampling process by putting device into
+ * low power mode.
+ * This mode is used to implement %INDIO_BUFFER_TRIGGERED mode if device tree
+ * declares a valid interrupt line. In this case, the internal hardware trigger
+ * drives acquisition.
+ *
+ * Note that hardware sampling frequency is taken into account only when
+ * internal hardware trigger is attached as the highest sampling rate seems to
+ * be the most energy efficient.
+ *
+ * TODO:
+ * preset pressure threshold crossing / IIO events ;
+ * differential pressure sampling ;
+ * hardware samples averaging.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "zpa2326.h"
+
+/* 200 ms should be enough for the longest conversion time in one-shot mode. */
+#define ZPA2326_CONVERSION_JIFFIES (HZ / 5)
+
+/* There should be a 1 ms delay (Tpup) after getting out of reset. */
+#define ZPA2326_TPUP_USEC_MIN (1000)
+#define ZPA2326_TPUP_USEC_MAX (2000)
+
+/**
+ * struct zpa2326_frequency - Hardware sampling frequency descriptor
+ * @hz : Frequency in Hertz.
+ * @odr: Output Data Rate word as expected by %ZPA2326_CTRL_REG3_REG.
+ */
+struct zpa2326_frequency {
+ int hz;
+ u16 odr;
+};
+
+/*
+ * Keep these in strict ascending order: last array entry is expected to
+ * correspond to the highest sampling frequency.
+ */
+static const struct zpa2326_frequency zpa2326_sampling_frequencies[] = {
+ { .hz = 1, .odr = 1 << ZPA2326_CTRL_REG3_ODR_SHIFT },
+ { .hz = 5, .odr = 5 << ZPA2326_CTRL_REG3_ODR_SHIFT },
+ { .hz = 11, .odr = 6 << ZPA2326_CTRL_REG3_ODR_SHIFT },
+ { .hz = 23, .odr = 7 << ZPA2326_CTRL_REG3_ODR_SHIFT },
+};
+
+/* Return the highest hardware sampling frequency available. */
+static const struct zpa2326_frequency *zpa2326_highest_frequency(void)
+{
+ return &zpa2326_sampling_frequencies[
+ ARRAY_SIZE(zpa2326_sampling_frequencies) - 1];
+}
+
+/**
+ * struct zpa_private - Per-device internal private state
+ * @timestamp: Buffered samples ready datum.
+ * @regmap: Underlying I2C / SPI bus adapter used to abstract slave register
+ * accesses.
+ * @result: Allows sampling logic to get completion status of operations
+ * that interrupt handlers perform asynchronously.
+ * @data_ready: Interrupt handler uses this to wake user context up at sampling
+ * operation completion.
+ * @trigger: Optional hardware / interrupt driven trigger used to notify
+ * external devices a new sample is ready.
+ * @waken: Flag indicating whether or not device has just been powered on.
+ * @irq: Optional interrupt line: negative or zero if not declared into
+ * DT, in which case sampling logic keeps polling status register
+ * to detect completion.
+ * @frequency: Current hardware sampling frequency.
+ * @vref: Power / voltage reference.
+ * @vdd: Power supply.
+ */
+struct zpa2326_private {
+ s64 timestamp;
+ struct regmap *regmap;
+ int result;
+ struct completion data_ready;
+ struct iio_trigger *trigger;
+ bool waken;
+ int irq;
+ const struct zpa2326_frequency *frequency;
+ struct regulator *vref;
+ struct regulator *vdd;
+};
+
+#define zpa2326_err(_idev, _format, _arg...) \
+ dev_err(_idev->dev.parent, _format, ##_arg)
+
+#define zpa2326_warn(_idev, _format, _arg...) \
+ dev_warn(_idev->dev.parent, _format, ##_arg)
+
+#ifdef DEBUG
+#define zpa2326_dbg(_idev, _format, _arg...) \
+ dev_dbg(_idev->dev.parent, _format, ##_arg)
+#else
+#define zpa2326_dbg(_idev, _format, _arg...)
+#endif
+
+bool zpa2326_isreg_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ZPA2326_REF_P_XL_REG:
+ case ZPA2326_REF_P_L_REG:
+ case ZPA2326_REF_P_H_REG:
+ case ZPA2326_RES_CONF_REG:
+ case ZPA2326_CTRL_REG0_REG:
+ case ZPA2326_CTRL_REG1_REG:
+ case ZPA2326_CTRL_REG2_REG:
+ case ZPA2326_CTRL_REG3_REG:
+ case ZPA2326_THS_P_LOW_REG:
+ case ZPA2326_THS_P_HIGH_REG:
+ return true;
+
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(zpa2326_isreg_writeable);
+
+bool zpa2326_isreg_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ZPA2326_REF_P_XL_REG:
+ case ZPA2326_REF_P_L_REG:
+ case ZPA2326_REF_P_H_REG:
+ case ZPA2326_DEVICE_ID_REG:
+ case ZPA2326_RES_CONF_REG:
+ case ZPA2326_CTRL_REG0_REG:
+ case ZPA2326_CTRL_REG1_REG:
+ case ZPA2326_CTRL_REG2_REG:
+ case ZPA2326_CTRL_REG3_REG:
+ case ZPA2326_INT_SOURCE_REG:
+ case ZPA2326_THS_P_LOW_REG:
+ case ZPA2326_THS_P_HIGH_REG:
+ case ZPA2326_STATUS_REG:
+ case ZPA2326_PRESS_OUT_XL_REG:
+ case ZPA2326_PRESS_OUT_L_REG:
+ case ZPA2326_PRESS_OUT_H_REG:
+ case ZPA2326_TEMP_OUT_L_REG:
+ case ZPA2326_TEMP_OUT_H_REG:
+ return true;
+
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(zpa2326_isreg_readable);
+
+bool zpa2326_isreg_precious(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ZPA2326_INT_SOURCE_REG:
+ case ZPA2326_PRESS_OUT_H_REG:
+ return true;
+
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(zpa2326_isreg_precious);
+
+/**
+ * zpa2326_enable_device() - Enable device, i.e. get out of low power mode.
+ * @indio_dev: The IIO device associated with the hardware to enable.
+ *
+ * Required to access complete register space and to perform any sampling
+ * or control operations.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_enable_device(const struct iio_dev *indio_dev)
+{
+ int err;
+
+ err = regmap_write(((struct zpa2326_private *)
+ iio_priv(indio_dev))->regmap,
+ ZPA2326_CTRL_REG0_REG, ZPA2326_CTRL_REG0_ENABLE);
+ if (err) {
+ zpa2326_err(indio_dev, "failed to enable device (%d)", err);
+ return err;
+ }
+
+ zpa2326_dbg(indio_dev, "enabled");
+
+ return 0;
+}
+
+/**
+ * zpa2326_sleep() - Disable device, i.e. switch to low power mode.
+ * @indio_dev: The IIO device associated with the hardware to disable.
+ *
+ * Only %ZPA2326_DEVICE_ID_REG and %ZPA2326_CTRL_REG0_REG registers may be
+ * accessed once device is in the disabled state.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_sleep(const struct iio_dev *indio_dev)
+{
+ int err;
+
+ err = regmap_write(((struct zpa2326_private *)
+ iio_priv(indio_dev))->regmap,
+ ZPA2326_CTRL_REG0_REG, 0);
+ if (err) {
+ zpa2326_err(indio_dev, "failed to sleep (%d)", err);
+ return err;
+ }
+
+ zpa2326_dbg(indio_dev, "sleeping");
+
+ return 0;
+}
+
+/**
+ * zpa2326_reset_device() - Reset device to default hardware state.
+ * @indio_dev: The IIO device associated with the hardware to reset.
+ *
+ * Disable sampling and empty hardware FIFO.
+ * Device must be enabled before reset, i.e. not in low power mode.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_reset_device(const struct iio_dev *indio_dev)
+{
+ int err;
+
+ err = regmap_write(((struct zpa2326_private *)
+ iio_priv(indio_dev))->regmap,
+ ZPA2326_CTRL_REG2_REG, ZPA2326_CTRL_REG2_SWRESET);
+ if (err) {
+ zpa2326_err(indio_dev, "failed to reset device (%d)", err);
+ return err;
+ }
+
+ usleep_range(ZPA2326_TPUP_USEC_MIN, ZPA2326_TPUP_USEC_MAX);
+
+ zpa2326_dbg(indio_dev, "reset");
+
+ return 0;
+}
+
+/**
+ * zpa2326_start_oneshot() - Start a single sampling cycle, i.e. in one shot
+ * mode.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ *
+ * Device must have been previously enabled and configured for one shot mode.
+ * Device will be switched back to low power mode at end of cycle.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_start_oneshot(const struct iio_dev *indio_dev)
+{
+ int err;
+
+ err = regmap_write(((struct zpa2326_private *)
+ iio_priv(indio_dev))->regmap,
+ ZPA2326_CTRL_REG0_REG,
+ ZPA2326_CTRL_REG0_ENABLE |
+ ZPA2326_CTRL_REG0_ONE_SHOT);
+ if (err) {
+ zpa2326_err(indio_dev, "failed to start one shot cycle (%d)",
+ err);
+ return err;
+ }
+
+ zpa2326_dbg(indio_dev, "one shot cycle started");
+
+ return 0;
+}
+
+/**
+ * zpa2326_power_on() - Power on device to allow subsequent configuration.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ * @private: Internal private state related to @indio_dev.
+ *
+ * Sampling will be disabled, preventing strange things from happening in our
+ * back. Hardware FIFO content will be cleared.
+ * When successful, device will be left in the enabled state to allow further
+ * configuration.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_power_on(const struct iio_dev *indio_dev,
+ const struct zpa2326_private *private)
+{
+ int err;
+
+ err = regulator_enable(private->vref);
+ if (err)
+ return err;
+
+ err = regulator_enable(private->vdd);
+ if (err)
+ goto vref;
+
+ zpa2326_dbg(indio_dev, "powered on");
+
+ err = zpa2326_enable_device(indio_dev);
+ if (err)
+ goto vdd;
+
+ err = zpa2326_reset_device(indio_dev);
+ if (err)
+ goto sleep;
+
+ return 0;
+
+sleep:
+ zpa2326_sleep(indio_dev);
+vdd:
+ regulator_disable(private->vdd);
+vref:
+ regulator_disable(private->vref);
+
+ zpa2326_dbg(indio_dev, "powered off");
+
+ return err;
+}
+
+/**
+ * zpa2326_power_off() - Power off device, i.e. disable attached power
+ * regulators.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ * @private: Internal private state related to @indio_dev.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static void zpa2326_power_off(const struct iio_dev *indio_dev,
+ const struct zpa2326_private *private)
+{
+ regulator_disable(private->vdd);
+ regulator_disable(private->vref);
+
+ zpa2326_dbg(indio_dev, "powered off");
+}
+
+/**
+ * zpa2326_config_oneshot() - Setup device for one shot / on demand mode.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ * @irq: Optional interrupt line the hardware uses to notify new data
+ * samples are ready. Negative or zero values indicate no interrupts
+ * are available, meaning polling is required.
+ *
+ * Output Data Rate is configured for the highest possible rate so that
+ * conversion time and power consumption are reduced to a minimum.
+ * Note that hardware internal averaging machinery (not implemented in this
+ * driver) is not applicable in this mode.
+ *
+ * Device must have been previously enabled before calling
+ * zpa2326_config_oneshot().
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_config_oneshot(const struct iio_dev *indio_dev,
+ int irq)
+{
+ struct regmap *regs = ((struct zpa2326_private *)
+ iio_priv(indio_dev))->regmap;
+ const struct zpa2326_frequency *freq = zpa2326_highest_frequency();
+ int err;
+
+ /* Setup highest available Output Data Rate for one shot mode. */
+ err = regmap_write(regs, ZPA2326_CTRL_REG3_REG, freq->odr);
+ if (err)
+ return err;
+
+ if (irq > 0) {
+ /* Request interrupt when new sample is available. */
+ err = regmap_write(regs, ZPA2326_CTRL_REG1_REG,
+ (u8)~ZPA2326_CTRL_REG1_MASK_DATA_READY);
+
+ if (err) {
+ dev_err(indio_dev->dev.parent,
+ "failed to setup one shot mode (%d)", err);
+ return err;
+ }
+ }
+
+ zpa2326_dbg(indio_dev, "one shot mode setup @%dHz", freq->hz);
+
+ return 0;
+}
+
+/**
+ * zpa2326_clear_fifo() - Clear remaining entries in hardware FIFO.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ * @min_count: Number of samples present within hardware FIFO.
+ *
+ * @min_count argument is a hint corresponding to the known minimum number of
+ * samples currently living in the FIFO. This allows to reduce the number of bus
+ * accesses by skipping status register read operation as long as we know for
+ * sure there are still entries left.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_clear_fifo(const struct iio_dev *indio_dev,
+ unsigned int min_count)
+{
+ struct regmap *regs = ((struct zpa2326_private *)
+ iio_priv(indio_dev))->regmap;
+ int err;
+ unsigned int val;
+
+ if (!min_count) {
+ /*
+ * No hint: read status register to determine whether FIFO is
+ * empty or not.
+ */
+ err = regmap_read(regs, ZPA2326_STATUS_REG, &val);
+
+ if (err < 0)
+ goto err;
+
+ if (val & ZPA2326_STATUS_FIFO_E)
+ /* Fifo is empty: nothing to trash. */
+ return 0;
+ }
+
+ /* Clear FIFO. */
+ do {
+ /*
+ * A single fetch from pressure MSB register is enough to pop
+ * values out of FIFO.
+ */
+ err = regmap_read(regs, ZPA2326_PRESS_OUT_H_REG, &val);
+ if (err < 0)
+ goto err;
+
+ if (min_count) {
+ /*
+ * We know for sure there are at least min_count entries
+ * left in FIFO. Skip status register read.
+ */
+ min_count--;
+ continue;
+ }
+
+ err = regmap_read(regs, ZPA2326_STATUS_REG, &val);
+ if (err < 0)
+ goto err;
+
+ } while (!(val & ZPA2326_STATUS_FIFO_E));
+
+ zpa2326_dbg(indio_dev, "FIFO cleared");
+
+ return 0;
+
+err:
+ zpa2326_err(indio_dev, "failed to clear FIFO (%d)", err);
+
+ return err;
+}
+
+/**
+ * zpa2326_dequeue_pressure() - Retrieve the most recent pressure sample from
+ * hardware FIFO.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ * @pressure: Sampled pressure output.
+ *
+ * Note that ZPA2326 hardware FIFO stores pressure samples only.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_dequeue_pressure(const struct iio_dev *indio_dev,
+ u32 *pressure)
+{
+ struct regmap *regs = ((struct zpa2326_private *)
+ iio_priv(indio_dev))->regmap;
+ unsigned int val;
+ int err;
+ int cleared = -1;
+
+ err = regmap_read(regs, ZPA2326_STATUS_REG, &val);
+ if (err < 0)
+ return err;
+
+ *pressure = 0;
+
+ if (val & ZPA2326_STATUS_P_OR) {
+ /*
+ * Fifo overrun : first sample dequeued from FIFO is the
+ * newest.
+ */
+ zpa2326_warn(indio_dev, "FIFO overflow");
+
+ err = regmap_bulk_read(regs, ZPA2326_PRESS_OUT_XL_REG, pressure,
+ 3);
+ if (err)
+ return err;
+
+#define ZPA2326_FIFO_DEPTH (16U)
+ /* Hardware FIFO may hold no more than 16 pressure samples. */
+ return zpa2326_clear_fifo(indio_dev, ZPA2326_FIFO_DEPTH - 1);
+ }
+
+ /*
+ * Fifo has not overflown : retrieve newest sample. We need to pop
+ * values out until FIFO is empty : last fetched pressure is the newest.
+ * In nominal cases, we should find a single queued sample only.
+ */
+ do {
+ err = regmap_bulk_read(regs, ZPA2326_PRESS_OUT_XL_REG, pressure,
+ 3);
+ if (err)
+ return err;
+
+ err = regmap_read(regs, ZPA2326_STATUS_REG, &val);
+ if (err < 0)
+ return err;
+
+ cleared++;
+ } while (!(val & ZPA2326_STATUS_FIFO_E));
+
+ if (cleared)
+ /*
+ * Samples were pushed by hardware during previous rounds but we
+ * didn't consume them fast enough: inform user.
+ */
+ zpa2326_dbg(indio_dev, "cleared %d FIFO entries", cleared);
+
+ return 0;
+}
+
+/**
+ * zpa2326_fill_sample_buffer() - Enqueue new channel samples to IIO buffer.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ * @private: Internal private state related to @indio_dev.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev,
+ const struct zpa2326_private *private)
+{
+ struct {
+ u32 pressure;
+ u16 temperature;
+ u64 timestamp;
+ } sample;
+ int err;
+
+ if (test_bit(0, indio_dev->active_scan_mask)) {
+ /* Get current pressure from hardware FIFO. */
+ err = zpa2326_dequeue_pressure(indio_dev, &sample.pressure);
+ if (err) {
+ zpa2326_warn(indio_dev, "failed to fetch pressure (%d)",
+ err);
+ return err;
+ }
+ }
+
+ if (test_bit(1, indio_dev->active_scan_mask)) {
+ /* Get current temperature. */
+ err = regmap_bulk_read(private->regmap, ZPA2326_TEMP_OUT_L_REG,
+ &sample.temperature, 2);
+ if (err) {
+ zpa2326_warn(indio_dev,
+ "failed to fetch temperature (%d)", err);
+ return err;
+ }
+ }
+
+ /*
+ * Now push samples using timestamp stored either :
+ * - by hardware interrupt handler if interrupt is available: see
+ * zpa2326_handle_irq(),
+ * - or oneshot completion polling machinery : see
+ * zpa2326_trigger_handler().
+ */
+ zpa2326_dbg(indio_dev, "filling raw samples buffer");
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &sample,
+ private->timestamp);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int zpa2326_runtime_suspend(struct device *parent)
+{
+ const struct iio_dev *indio_dev = dev_get_drvdata(parent);
+
+ if (pm_runtime_autosuspend_expiration(parent))
+ /* Userspace changed autosuspend delay. */
+ return -EAGAIN;
+
+ zpa2326_power_off(indio_dev, iio_priv(indio_dev));
+
+ return 0;
+}
+
+static int zpa2326_runtime_resume(struct device *parent)
+{
+ const struct iio_dev *indio_dev = dev_get_drvdata(parent);
+
+ return zpa2326_power_on(indio_dev, iio_priv(indio_dev));
+}
+
+const struct dev_pm_ops zpa2326_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(zpa2326_runtime_suspend, zpa2326_runtime_resume,
+ NULL)
+};
+EXPORT_SYMBOL_GPL(zpa2326_pm_ops);
+
+/**
+ * zpa2326_resume() - Request the PM layer to power supply the device.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ *
+ * Return:
+ * < 0 - a negative error code meaning failure ;
+ * 0 - success, device has just been powered up ;
+ * 1 - success, device was already powered.
+ */
+static int zpa2326_resume(const struct iio_dev *indio_dev)
+{
+ int err;
+
+ err = pm_runtime_get_sync(indio_dev->dev.parent);
+ if (err < 0)
+ return err;
+
+ if (err > 0) {
+ /*
+ * Device was already power supplied: get it out of low power
+ * mode and inform caller.
+ */
+ zpa2326_enable_device(indio_dev);
+ return 1;
+ }
+
+ /* Inform caller device has just been brought back to life. */
+ return 0;
+}
+
+/**
+ * zpa2326_suspend() - Schedule a power down using autosuspend feature of PM
+ * layer.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ *
+ * Device is switched to low power mode at first to save power even when
+ * attached regulator is a "dummy" one.
+ */
+static void zpa2326_suspend(struct iio_dev *indio_dev)
+{
+ struct device *parent = indio_dev->dev.parent;
+
+ zpa2326_sleep(indio_dev);
+
+ pm_runtime_mark_last_busy(parent);
+ pm_runtime_put_autosuspend(parent);
+}
+
+static void zpa2326_init_runtime(struct device *parent)
+{
+ pm_runtime_get_noresume(parent);
+ pm_runtime_set_active(parent);
+ pm_runtime_enable(parent);
+ pm_runtime_set_autosuspend_delay(parent, 1000);
+ pm_runtime_use_autosuspend(parent);
+ pm_runtime_mark_last_busy(parent);
+ pm_runtime_put_autosuspend(parent);
+}
+
+static void zpa2326_fini_runtime(struct device *parent)
+{
+ pm_runtime_disable(parent);
+ pm_runtime_set_suspended(parent);
+}
+#else /* !CONFIG_PM */
+static int zpa2326_resume(const struct iio_dev *indio_dev)
+{
+ zpa2326_enable_device(indio_dev);
+
+ return 0;
+}
+
+static void zpa2326_suspend(struct iio_dev *indio_dev)
+{
+ zpa2326_sleep(indio_dev);
+}
+
+#define zpa2326_init_runtime(_parent)
+#define zpa2326_fini_runtime(_parent)
+#endif /* !CONFIG_PM */
+
+/**
+ * zpa2326_handle_irq() - Process hardware interrupts.
+ * @irq: Interrupt line the hardware uses to notify new data has arrived.
+ * @data: The IIO device associated with the sampling hardware.
+ *
+ * Timestamp buffered samples as soon as possible then schedule threaded bottom
+ * half.
+ *
+ * Return: Always successful.
+ */
+static irqreturn_t zpa2326_handle_irq(int irq, void *data)
+{
+ struct iio_dev *indio_dev = (struct iio_dev *)data;
+
+ if (iio_buffer_enabled(indio_dev)) {
+ /* Timestamping needed for buffered sampling only. */
+ ((struct zpa2326_private *)
+ iio_priv(indio_dev))->timestamp = iio_get_time_ns(indio_dev);
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * zpa2326_handle_threaded_irq() - Interrupt bottom-half handler.
+ * @irq: Interrupt line the hardware uses to notify new data has arrived.
+ * @data: The IIO device associated with the sampling hardware.
+ *
+ * Mainly ensures interrupt is caused by a real "new sample available"
+ * condition. This relies upon the ability to perform blocking / sleeping bus
+ * accesses to slave's registers. This is why zpa2326_handle_threaded_irq() is
+ * called from within a thread, i.e. not called from hard interrupt context.
+ *
+ * When device is using its own internal hardware trigger in continuous sampling
+ * mode, data are available into hardware FIFO once interrupt has occurred. All
+ * we have to do is to dispatch the trigger, which in turn will fetch data and
+ * fill IIO buffer.
+ *
+ * When not using its own internal hardware trigger, the device has been
+ * configured in one-shot mode either by an external trigger or the IIO read_raw
+ * hook. This means one of the latter is currently waiting for sampling
+ * completion, in which case we must simply wake it up.
+ *
+ * See zpa2326_trigger_handler().
+ *
+ * Return:
+ * %IRQ_NONE - no consistent interrupt happened ;
+ * %IRQ_HANDLED - there was new samples available.
+ */
+static irqreturn_t zpa2326_handle_threaded_irq(int irq, void *data)
+{
+ struct iio_dev *indio_dev = (struct iio_dev *)data;
+ struct zpa2326_private *priv = iio_priv(indio_dev);
+ unsigned int val;
+ bool cont;
+ irqreturn_t ret = IRQ_NONE;
+
+ /*
+ * Are we using our own internal trigger in triggered buffer mode, i.e.,
+ * currently working in continuous sampling mode ?
+ */
+ cont = (iio_buffer_enabled(indio_dev) &&
+ iio_trigger_using_own(indio_dev));
+
+ /*
+ * Device works according to a level interrupt scheme: reading interrupt
+ * status de-asserts interrupt line.
+ */
+ priv->result = regmap_read(priv->regmap, ZPA2326_INT_SOURCE_REG, &val);
+ if (priv->result < 0) {
+ if (cont)
+ return IRQ_NONE;
+
+ goto complete;
+ }
+
+ /* Data ready is the only interrupt source we requested. */
+ if (!(val & ZPA2326_INT_SOURCE_DATA_READY)) {
+ /*
+ * Interrupt happened but no new sample available: likely caused
+ * by spurious interrupts, in which case, returning IRQ_NONE
+ * allows to benefit from the generic spurious interrupts
+ * handling.
+ */
+ zpa2326_warn(indio_dev, "unexpected interrupt status %02x",
+ val);
+
+ if (cont)
+ return IRQ_NONE;
+
+ priv->result = -ENODATA;
+ goto complete;
+ }
+
+ /* New sample available: dispatch internal trigger consumers. */
+ iio_trigger_poll_chained(priv->trigger);
+
+ if (cont)
+ /*
+ * Internal hardware trigger has been scheduled above : it will
+ * fetch data on its own.
+ */
+ return IRQ_HANDLED;
+
+ ret = IRQ_HANDLED;
+
+complete:
+ /*
+ * Wake up direct or externaly triggered buffer mode waiters: see
+ * zpa2326_sample_oneshot() and zpa2326_trigger_handler().
+ */
+ complete(&priv->data_ready);
+
+ return ret;
+}
+
+/**
+ * zpa2326_wait_oneshot_completion() - Wait for oneshot data ready interrupt.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ * @private: Internal private state related to @indio_dev.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
+ struct zpa2326_private *private)
+{
+ int ret;
+ unsigned int val;
+
+ zpa2326_dbg(indio_dev, "waiting for one shot completion interrupt");
+
+ ret = wait_for_completion_interruptible_timeout(
+ &private->data_ready, ZPA2326_CONVERSION_JIFFIES);
+ if (ret > 0)
+ /*
+ * Interrupt handler completed before timeout: return operation
+ * status.
+ */
+ return private->result;
+
+ /* Clear all interrupts just to be sure. */
+ regmap_read(private->regmap, ZPA2326_INT_SOURCE_REG, &val);
+
+ if (!ret)
+ /* Timed out. */
+ ret = -ETIME;
+
+ if (ret != -ERESTARTSYS)
+ zpa2326_warn(indio_dev, "no one shot interrupt occurred (%d)",
+ ret);
+
+ return ret;
+}
+
+static int zpa2326_init_managed_irq(struct device *parent,
+ struct iio_dev *indio_dev,
+ struct zpa2326_private *private,
+ int irq)
+{
+ int err;
+
+ private->irq = irq;
+
+ if (irq <= 0) {
+ /*
+ * Platform declared no interrupt line: device will be polled
+ * for data availability.
+ */
+ dev_info(parent, "no interrupt found, running in polling mode");
+ return 0;
+ }
+
+ init_completion(&private->data_ready);
+
+ /* Request handler to be scheduled into threaded interrupt context. */
+ err = devm_request_threaded_irq(parent, irq, zpa2326_handle_irq,
+ zpa2326_handle_threaded_irq,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ dev_name(parent), indio_dev);
+ if (err) {
+ dev_err(parent, "failed to request interrupt %d (%d)", irq,
+ err);
+ return err;
+ }
+
+ dev_info(parent, "using interrupt %d", irq);
+
+ return 0;
+}
+
+/**
+ * zpa2326_poll_oneshot_completion() - Actively poll for one shot data ready.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ *
+ * Loop over registers content to detect end of sampling cycle. Used when DT
+ * declared no valid interrupt lines.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_poll_oneshot_completion(const struct iio_dev *indio_dev)
+{
+ unsigned long tmout = jiffies + ZPA2326_CONVERSION_JIFFIES;
+ struct regmap *regs = ((struct zpa2326_private *)
+ iio_priv(indio_dev))->regmap;
+ unsigned int val;
+ int err;
+
+ zpa2326_dbg(indio_dev, "polling for one shot completion");
+
+ /*
+ * At least, 100 ms is needed for the device to complete its one-shot
+ * cycle.
+ */
+ if (msleep_interruptible(100))
+ return -ERESTARTSYS;
+
+ /* Poll for conversion completion in hardware. */
+ while (true) {
+ err = regmap_read(regs, ZPA2326_CTRL_REG0_REG, &val);
+ if (err < 0)
+ goto err;
+
+ if (!(val & ZPA2326_CTRL_REG0_ONE_SHOT))
+ /* One-shot bit self clears at conversion end. */
+ break;
+
+ if (time_after(jiffies, tmout)) {
+ /* Prevent from waiting forever : let's time out. */
+ err = -ETIME;
+ goto err;
+ }
+
+ usleep_range(10000, 20000);
+ }
+
+ /*
+ * In oneshot mode, pressure sample availability guarantees that
+ * temperature conversion has also completed : just check pressure
+ * status bit to keep things simple.
+ */
+ err = regmap_read(regs, ZPA2326_STATUS_REG, &val);
+ if (err < 0)
+ goto err;
+
+ if (!(val & ZPA2326_STATUS_P_DA)) {
+ /* No sample available. */
+ err = -ENODATA;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ zpa2326_warn(indio_dev, "failed to poll one shot completion (%d)", err);
+
+ return err;
+}
+
+/**
+ * zpa2326_fetch_raw_sample() - Retrieve a raw sample and convert it to CPU
+ * endianness.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ * @type: Type of measurement / channel to fetch from.
+ * @value: Sample output.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_fetch_raw_sample(const struct iio_dev *indio_dev,
+ enum iio_chan_type type,
+ int *value)
+{
+ struct regmap *regs = ((struct zpa2326_private *)
+ iio_priv(indio_dev))->regmap;
+ int err;
+
+ switch (type) {
+ case IIO_PRESSURE:
+ zpa2326_dbg(indio_dev, "fetching raw pressure sample");
+
+ err = regmap_bulk_read(regs, ZPA2326_PRESS_OUT_XL_REG, value,
+ 3);
+ if (err) {
+ zpa2326_warn(indio_dev, "failed to fetch pressure (%d)",
+ err);
+ return err;
+ }
+
+ /* Pressure is a 24 bits wide little-endian unsigned int. */
+ *value = (((u8 *)value)[2] << 16) | (((u8 *)value)[1] << 8) |
+ ((u8 *)value)[0];
+
+ return IIO_VAL_INT;
+
+ case IIO_TEMP:
+ zpa2326_dbg(indio_dev, "fetching raw temperature sample");
+
+ err = regmap_bulk_read(regs, ZPA2326_TEMP_OUT_L_REG, value, 2);
+ if (err) {
+ zpa2326_warn(indio_dev,
+ "failed to fetch temperature (%d)", err);
+ return err;
+ }
+
+ /* Temperature is a 16 bits wide little-endian signed int. */
+ *value = (int)le16_to_cpup((__le16 *)value);
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * zpa2326_sample_oneshot() - Perform a complete one shot sampling cycle.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ * @type: Type of measurement / channel to fetch from.
+ * @value: Sample output.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_sample_oneshot(struct iio_dev *indio_dev,
+ enum iio_chan_type type,
+ int *value)
+{
+ int ret;
+ struct zpa2326_private *priv;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = zpa2326_resume(indio_dev);
+ if (ret < 0)
+ goto release;
+
+ priv = iio_priv(indio_dev);
+
+ if (ret > 0) {
+ /*
+ * We were already power supplied. Just clear hardware FIFO to
+ * get rid of samples acquired during previous rounds (if any).
+ * Sampling operation always generates both temperature and
+ * pressure samples. The latter are always enqueued into
+ * hardware FIFO. This may lead to situations were pressure
+ * samples still sit into FIFO when previous cycle(s) fetched
+ * temperature data only.
+ * Hence, we need to clear hardware FIFO content to prevent from
+ * getting outdated values at the end of current cycle.
+ */
+ if (type == IIO_PRESSURE) {
+ ret = zpa2326_clear_fifo(indio_dev, 0);
+ if (ret)
+ goto suspend;
+ }
+ } else {
+ /*
+ * We have just been power supplied, i.e. device is in default
+ * "out of reset" state, meaning we need to reconfigure it
+ * entirely.
+ */
+ ret = zpa2326_config_oneshot(indio_dev, priv->irq);
+ if (ret)
+ goto suspend;
+ }
+
+ /* Start a sampling cycle in oneshot mode. */
+ ret = zpa2326_start_oneshot(indio_dev);
+ if (ret)
+ goto suspend;
+
+ /* Wait for sampling cycle to complete. */
+ if (priv->irq > 0)
+ ret = zpa2326_wait_oneshot_completion(indio_dev, priv);
+ else
+ ret = zpa2326_poll_oneshot_completion(indio_dev);
+
+ if (ret)
+ goto suspend;
+
+ /* Retrieve raw sample value and convert it to CPU endianness. */
+ ret = zpa2326_fetch_raw_sample(indio_dev, type, value);
+
+suspend:
+ zpa2326_suspend(indio_dev);
+release:
+ iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+}
+
+/**
+ * zpa2326_trigger_handler() - Perform an IIO buffered sampling round in one
+ * shot mode.
+ * @irq: The software interrupt assigned to @data
+ * @data: The IIO poll function dispatched by external trigger our device is
+ * attached to.
+ *
+ * Bottom-half handler called by the IIO trigger to which our device is
+ * currently attached. Allows us to synchronize this device buffered sampling
+ * either with external events (such as timer expiration, external device sample
+ * ready, etc...) or with its own interrupt (internal hardware trigger).
+ *
+ * When using an external trigger, basically run the same sequence of operations
+ * as for zpa2326_sample_oneshot() with the following hereafter. Hardware FIFO
+ * is not cleared since already done at buffering enable time and samples
+ * dequeueing always retrieves the most recent value.
+ *
+ * Otherwise, when internal hardware trigger has dispatched us, just fetch data
+ * from hardware FIFO.
+ *
+ * Fetched data will pushed unprocessed to IIO buffer since samples conversion
+ * is delegated to userspace in buffered mode (endianness, etc...).
+ *
+ * Return:
+ * %IRQ_NONE - no consistent interrupt happened ;
+ * %IRQ_HANDLED - there was new samples available.
+ */
+static irqreturn_t zpa2326_trigger_handler(int irq, void *data)
+{
+ struct iio_dev *indio_dev = ((struct iio_poll_func *)
+ data)->indio_dev;
+ struct zpa2326_private *priv = iio_priv(indio_dev);
+ bool cont;
+
+ /*
+ * We have been dispatched, meaning we are in triggered buffer mode.
+ * Using our own internal trigger implies we are currently in continuous
+ * hardware sampling mode.
+ */
+ cont = iio_trigger_using_own(indio_dev);
+
+ if (!cont) {
+ /* On demand sampling : start a one shot cycle. */
+ if (zpa2326_start_oneshot(indio_dev))
+ goto out;
+
+ /* Wait for sampling cycle to complete. */
+ if (priv->irq <= 0) {
+ /* No interrupt available: poll for completion. */
+ if (zpa2326_poll_oneshot_completion(indio_dev))
+ goto out;
+
+ /* Only timestamp sample once it is ready. */
+ priv->timestamp = iio_get_time_ns(indio_dev);
+ } else {
+ /* Interrupt handlers will timestamp for us. */
+ if (zpa2326_wait_oneshot_completion(indio_dev, priv))
+ goto out;
+ }
+ }
+
+ /* Enqueue to IIO buffer / userspace. */
+ zpa2326_fill_sample_buffer(indio_dev, priv);
+
+out:
+ if (!cont)
+ /* Don't switch to low power if sampling continuously. */
+ zpa2326_sleep(indio_dev);
+
+ /* Inform attached trigger we are done. */
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * zpa2326_preenable_buffer() - Prepare device for configuring triggered
+ * sampling
+ * modes.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ *
+ * Basically power up device.
+ * Called with IIO device's lock held.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_preenable_buffer(struct iio_dev *indio_dev)
+{
+ int ret = zpa2326_resume(indio_dev);
+
+ if (ret < 0)
+ return ret;
+
+ /* Tell zpa2326_postenable_buffer() if we have just been powered on. */
+ ((struct zpa2326_private *)
+ iio_priv(indio_dev))->waken = iio_priv(indio_dev);
+
+ return 0;
+}
+
+/**
+ * zpa2326_postenable_buffer() - Configure device for triggered sampling.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ *
+ * Basically setup one-shot mode if plugging external trigger.
+ * Otherwise, let internal trigger configure continuous sampling :
+ * see zpa2326_set_trigger_state().
+ *
+ * If an error is returned, IIO layer will call our postdisable hook for us,
+ * i.e. no need to explicitly power device off here.
+ * Called with IIO device's lock held.
+ *
+ * Called with IIO device's lock held.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_postenable_buffer(struct iio_dev *indio_dev)
+{
+ const struct zpa2326_private *priv = iio_priv(indio_dev);
+ int err;
+
+ if (!priv->waken) {
+ /*
+ * We were already power supplied. Just clear hardware FIFO to
+ * get rid of samples acquired during previous rounds (if any).
+ */
+ err = zpa2326_clear_fifo(indio_dev, 0);
+ if (err)
+ goto err;
+ }
+
+ if (!iio_trigger_using_own(indio_dev) && priv->waken) {
+ /*
+ * We are using an external trigger and we have just been
+ * powered up: reconfigure one-shot mode.
+ */
+ err = zpa2326_config_oneshot(indio_dev, priv->irq);
+ if (err)
+ goto err;
+ }
+
+ /* Plug our own trigger event handler. */
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ zpa2326_err(indio_dev, "failed to enable buffering (%d)", err);
+
+ return err;
+}
+
+static int zpa2326_postdisable_buffer(struct iio_dev *indio_dev)
+{
+ zpa2326_suspend(indio_dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops zpa2326_buffer_setup_ops = {
+ .preenable = zpa2326_preenable_buffer,
+ .postenable = zpa2326_postenable_buffer,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = zpa2326_postdisable_buffer
+};
+
+/**
+ * zpa2326_set_trigger_state() - Start / stop continuous sampling.
+ * @trig: The trigger being attached to IIO device associated with the sampling
+ * hardware.
+ * @state: Tell whether to start (true) or stop (false)
+ *
+ * Basically enable / disable hardware continuous sampling mode.
+ *
+ * Called with IIO device's lock held at postenable() or predisable() time.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_set_trigger_state(struct iio_trigger *trig, bool state)
+{
+ const struct iio_dev *indio_dev = dev_get_drvdata(
+ trig->dev.parent);
+ const struct zpa2326_private *priv = iio_priv(indio_dev);
+ int err;
+
+ if (!state) {
+ /*
+ * Switch trigger off : in case of failure, interrupt is left
+ * disabled in order to prevent handler from accessing released
+ * resources.
+ */
+ unsigned int val;
+
+ /*
+ * As device is working in continuous mode, handlers may be
+ * accessing resources we are currently freeing...
+ * Prevent this by disabling interrupt handlers and ensure
+ * the device will generate no more interrupts unless explicitly
+ * required to, i.e. by restoring back to default one shot mode.
+ */
+ disable_irq(priv->irq);
+
+ /*
+ * Disable continuous sampling mode to restore settings for
+ * one shot / direct sampling operations.
+ */
+ err = regmap_write(priv->regmap, ZPA2326_CTRL_REG3_REG,
+ zpa2326_highest_frequency()->odr);
+ if (err)
+ return err;
+
+ /*
+ * Now that device won't generate interrupts on its own,
+ * acknowledge any currently active interrupts (may happen on
+ * rare occasions while stopping continuous mode).
+ */
+ err = regmap_read(priv->regmap, ZPA2326_INT_SOURCE_REG, &val);
+ if (err < 0)
+ return err;
+
+ /*
+ * Re-enable interrupts only if we can guarantee the device will
+ * generate no more interrupts to prevent handlers from
+ * accessing released resources.
+ */
+ enable_irq(priv->irq);
+
+ zpa2326_dbg(indio_dev, "continuous mode stopped");
+ } else {
+ /*
+ * Switch trigger on : start continuous sampling at required
+ * frequency.
+ */
+
+ if (priv->waken) {
+ /* Enable interrupt if getting out of reset. */
+ err = regmap_write(priv->regmap, ZPA2326_CTRL_REG1_REG,
+ (u8)
+ ~ZPA2326_CTRL_REG1_MASK_DATA_READY);
+ if (err)
+ return err;
+ }
+
+ /* Enable continuous sampling at specified frequency. */
+ err = regmap_write(priv->regmap, ZPA2326_CTRL_REG3_REG,
+ ZPA2326_CTRL_REG3_ENABLE_MEAS |
+ priv->frequency->odr);
+ if (err)
+ return err;
+
+ zpa2326_dbg(indio_dev, "continuous mode setup @%dHz",
+ priv->frequency->hz);
+ }
+
+ return 0;
+}
+
+static const struct iio_trigger_ops zpa2326_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = zpa2326_set_trigger_state,
+};
+
+/**
+ * zpa2326_init_trigger() - Create an interrupt driven / hardware trigger
+ * allowing to notify external devices a new sample is
+ * ready.
+ * @parent: Hardware sampling device @indio_dev is a child of.
+ * @indio_dev: The IIO device associated with the sampling hardware.
+ * @private: Internal private state related to @indio_dev.
+ * @irq: Optional interrupt line the hardware uses to notify new data
+ * samples are ready. Negative or zero values indicate no interrupts
+ * are available, meaning polling is required.
+ *
+ * Only relevant when DT declares a valid interrupt line.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+static int zpa2326_init_managed_trigger(struct device *parent,
+ struct iio_dev *indio_dev,
+ struct zpa2326_private *private,
+ int irq)
+{
+ struct iio_trigger *trigger;
+ int ret;
+
+ if (irq <= 0)
+ return 0;
+
+ trigger = devm_iio_trigger_alloc(parent, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!trigger)
+ return -ENOMEM;
+
+ /* Basic setup. */
+ trigger->dev.parent = parent;
+ trigger->ops = &zpa2326_trigger_ops;
+
+ private->trigger = trigger;
+
+ /* Register to triggers space. */
+ ret = devm_iio_trigger_register(parent, trigger);
+ if (ret)
+ dev_err(parent, "failed to register hardware trigger (%d)",
+ ret);
+
+ return ret;
+}
+
+static int zpa2326_get_frequency(const struct iio_dev *indio_dev)
+{
+ return ((struct zpa2326_private *)iio_priv(indio_dev))->frequency->hz;
+}
+
+static int zpa2326_set_frequency(struct iio_dev *indio_dev, int hz)
+{
+ struct zpa2326_private *priv = iio_priv(indio_dev);
+ int freq;
+ int err;
+
+ /* Check if requested frequency is supported. */
+ for (freq = 0; freq < ARRAY_SIZE(zpa2326_sampling_frequencies); freq++)
+ if (zpa2326_sampling_frequencies[freq].hz == hz)
+ break;
+ if (freq == ARRAY_SIZE(zpa2326_sampling_frequencies))
+ return -EINVAL;
+
+ /* Don't allow changing frequency if buffered sampling is ongoing. */
+ err = iio_device_claim_direct_mode(indio_dev);
+ if (err)
+ return err;
+
+ priv->frequency = &zpa2326_sampling_frequencies[freq];
+
+ iio_device_release_direct_mode(indio_dev);
+
+ return 0;
+}
+
+/* Expose supported hardware sampling frequencies (Hz) through sysfs. */
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1 5 11 23");
+
+static struct attribute *zpa2326_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group zpa2326_attribute_group = {
+ .attrs = zpa2326_attributes,
+};
+
+static int zpa2326_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return zpa2326_sample_oneshot(indio_dev, chan->type, val);
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ /*
+ * Pressure resolution is 1/64 Pascal. Scale to kPascal
+ * as required by IIO ABI.
+ */
+ *val = 1;
+ *val2 = 64000;
+ return IIO_VAL_FRACTIONAL;
+
+ case IIO_TEMP:
+ /*
+ * Temperature follows the equation:
+ * Temp[degC] = Tempcode * 0.00649 - 176.83
+ * where:
+ * Tempcode is composed the raw sampled 16 bits.
+ *
+ * Hence, to produce a temperature in milli-degrees
+ * Celsius according to IIO ABI, we need to apply the
+ * following equation to raw samples:
+ * Temp[milli degC] = (Tempcode + Offset) * Scale
+ * where:
+ * Offset = -176.83 / 0.00649
+ * Scale = 0.00649 * 1000
+ */
+ *val = 6;
+ *val2 = 490000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = -17683000;
+ *val2 = 649;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = zpa2326_get_frequency(indio_dev);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int zpa2326_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ if ((mask != IIO_CHAN_INFO_SAMP_FREQ) || val2)
+ return -EINVAL;
+
+ return zpa2326_set_frequency(indio_dev, val);
+}
+
+static const struct iio_chan_spec zpa2326_channels[] = {
+ [0] = {
+ .type = IIO_PRESSURE,
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ },
+ [1] = {
+ .type = IIO_TEMP,
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ },
+ [2] = IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static const struct iio_info zpa2326_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &zpa2326_attribute_group,
+ .read_raw = zpa2326_read_raw,
+ .write_raw = zpa2326_write_raw,
+};
+
+static struct iio_dev *zpa2326_create_managed_iiodev(struct device *device,
+ const char *name,
+ struct regmap *regmap)
+{
+ struct iio_dev *indio_dev;
+
+ /* Allocate space to hold IIO device internal state. */
+ indio_dev = devm_iio_device_alloc(device,
+ sizeof(struct zpa2326_private));
+ if (!indio_dev)
+ return NULL;
+
+ /* Setup for userspace synchronous on demand sampling. */
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = device;
+ indio_dev->channels = zpa2326_channels;
+ indio_dev->num_channels = ARRAY_SIZE(zpa2326_channels);
+ indio_dev->name = name;
+ indio_dev->info = &zpa2326_info;
+
+ return indio_dev;
+}
+
+int zpa2326_probe(struct device *parent,
+ const char *name,
+ int irq,
+ unsigned int hwid,
+ struct regmap *regmap)
+{
+ struct iio_dev *indio_dev;
+ struct zpa2326_private *priv;
+ int err;
+ unsigned int id;
+
+ indio_dev = zpa2326_create_managed_iiodev(parent, name, regmap);
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+
+ priv->vref = devm_regulator_get(parent, "vref");
+ if (IS_ERR(priv->vref))
+ return PTR_ERR(priv->vref);
+
+ priv->vdd = devm_regulator_get(parent, "vdd");
+ if (IS_ERR(priv->vdd))
+ return PTR_ERR(priv->vdd);
+
+ /* Set default hardware sampling frequency to highest rate supported. */
+ priv->frequency = zpa2326_highest_frequency();
+
+ /*
+ * Plug device's underlying bus abstraction : this MUST be set before
+ * registering interrupt handlers since an interrupt might happen if
+ * power up sequence is not properly applied.
+ */
+ priv->regmap = regmap;
+
+ err = devm_iio_triggered_buffer_setup(parent, indio_dev, NULL,
+ zpa2326_trigger_handler,
+ &zpa2326_buffer_setup_ops);
+ if (err)
+ return err;
+
+ err = zpa2326_init_managed_trigger(parent, indio_dev, priv, irq);
+ if (err)
+ return err;
+
+ err = zpa2326_init_managed_irq(parent, indio_dev, priv, irq);
+ if (err)
+ return err;
+
+ /* Power up to check device ID and perform initial hardware setup. */
+ err = zpa2326_power_on(indio_dev, priv);
+ if (err)
+ return err;
+
+ /* Read id register to check we are talking to the right slave. */
+ err = regmap_read(regmap, ZPA2326_DEVICE_ID_REG, &id);
+ if (err)
+ goto sleep;
+
+ if (id != hwid) {
+ dev_err(parent, "found device with unexpected id %02x", id);
+ err = -ENODEV;
+ goto sleep;
+ }
+
+ err = zpa2326_config_oneshot(indio_dev, irq);
+ if (err)
+ goto sleep;
+
+ /* Setup done : go sleeping. Device will be awaken upon user request. */
+ err = zpa2326_sleep(indio_dev);
+ if (err)
+ goto poweroff;
+
+ dev_set_drvdata(parent, indio_dev);
+
+ zpa2326_init_runtime(parent);
+
+ err = iio_device_register(indio_dev);
+ if (err) {
+ zpa2326_fini_runtime(parent);
+ goto poweroff;
+ }
+
+ return 0;
+
+sleep:
+ /* Put to sleep just in case power regulators are "dummy" ones. */
+ zpa2326_sleep(indio_dev);
+poweroff:
+ zpa2326_power_off(indio_dev, priv);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(zpa2326_probe);
+
+void zpa2326_remove(const struct device *parent)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(parent);
+
+ iio_device_unregister(indio_dev);
+ zpa2326_fini_runtime(indio_dev->dev.parent);
+ zpa2326_sleep(indio_dev);
+ zpa2326_power_off(indio_dev, iio_priv(indio_dev));
+}
+EXPORT_SYMBOL_GPL(zpa2326_remove);
+
+MODULE_AUTHOR("Gregor Boirie <gregor.boirie@parrot.com>");
+MODULE_DESCRIPTION("Core driver for Murata ZPA2326 pressure sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/zpa2326.h b/drivers/iio/pressure/zpa2326.h
new file mode 100644
index 000000000000..05d3e1e3a449
--- /dev/null
+++ b/drivers/iio/pressure/zpa2326.h
@@ -0,0 +1,89 @@
+/*
+ * Murata ZPA2326 pressure and temperature sensor IIO driver
+ *
+ * Copyright (c) 2016 Parrot S.A.
+ *
+ * Author: Gregor Boirie <gregor.boirie@parrot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ZPA2326_H
+#define _ZPA2326_H
+
+/* Register map. */
+#define ZPA2326_REF_P_XL_REG (0x8)
+#define ZPA2326_REF_P_L_REG (0x9)
+#define ZPA2326_REF_P_H_REG (0xa)
+#define ZPA2326_DEVICE_ID_REG (0xf)
+#define ZPA2326_DEVICE_ID (0xb9)
+#define ZPA2326_RES_CONF_REG (0x10)
+#define ZPA2326_CTRL_REG0_REG (0x20)
+#define ZPA2326_CTRL_REG0_ONE_SHOT BIT(0)
+#define ZPA2326_CTRL_REG0_ENABLE BIT(1)
+#define ZPA2326_CTRL_REG1_REG (0x21)
+#define ZPA2326_CTRL_REG1_MASK_DATA_READY BIT(2)
+#define ZPA2326_CTRL_REG2_REG (0x22)
+#define ZPA2326_CTRL_REG2_SWRESET BIT(2)
+#define ZPA2326_CTRL_REG3_REG (0x23)
+#define ZPA2326_CTRL_REG3_ODR_SHIFT (4)
+#define ZPA2326_CTRL_REG3_ENABLE_MEAS BIT(7)
+#define ZPA2326_INT_SOURCE_REG (0x24)
+#define ZPA2326_INT_SOURCE_DATA_READY BIT(2)
+#define ZPA2326_THS_P_LOW_REG (0x25)
+#define ZPA2326_THS_P_HIGH_REG (0x26)
+#define ZPA2326_STATUS_REG (0x27)
+#define ZPA2326_STATUS_P_DA BIT(1)
+#define ZPA2326_STATUS_FIFO_E BIT(2)
+#define ZPA2326_STATUS_P_OR BIT(5)
+#define ZPA2326_PRESS_OUT_XL_REG (0x28)
+#define ZPA2326_PRESS_OUT_L_REG (0x29)
+#define ZPA2326_PRESS_OUT_H_REG (0x2a)
+#define ZPA2326_TEMP_OUT_L_REG (0x2b)
+#define ZPA2326_TEMP_OUT_H_REG (0x2c)
+
+struct device;
+struct regmap;
+
+bool zpa2326_isreg_writeable(struct device *dev, unsigned int reg);
+bool zpa2326_isreg_readable(struct device *dev, unsigned int reg);
+bool zpa2326_isreg_precious(struct device *dev, unsigned int reg);
+
+/**
+ * zpa2326_probe() - Instantiate and register core ZPA2326 IIO device
+ * @parent: Hardware sampling device the created IIO device will be a child of.
+ * @name: Arbitrary name to identify the device.
+ * @irq: Interrupt line, negative if none.
+ * @hwid: Expected device hardware id.
+ * @regmap: Registers map used to abstract underlying bus accesses.
+ *
+ * Return: Zero when successful, a negative error code otherwise.
+ */
+int zpa2326_probe(struct device *parent,
+ const char *name,
+ int irq,
+ unsigned int hwid,
+ struct regmap *regmap);
+
+/**
+ * zpa2326_remove() - Unregister and destroy core ZPA2326 IIO device.
+ * @parent: Hardware sampling device the IIO device to remove is a child of.
+ */
+void zpa2326_remove(const struct device *parent);
+
+#ifdef CONFIG_PM
+#include <linux/pm.h>
+extern const struct dev_pm_ops zpa2326_pm_ops;
+#define ZPA2326_PM_OPS (&zpa2326_pm_ops)
+#else
+#define ZPA2326_PM_OPS (NULL)
+#endif
+
+#endif
diff --git a/drivers/iio/pressure/zpa2326_i2c.c b/drivers/iio/pressure/zpa2326_i2c.c
new file mode 100644
index 000000000000..e4d27dd4493a
--- /dev/null
+++ b/drivers/iio/pressure/zpa2326_i2c.c
@@ -0,0 +1,99 @@
+/*
+ * Murata ZPA2326 I2C pressure and temperature sensor driver
+ *
+ * Copyright (c) 2016 Parrot S.A.
+ *
+ * Author: Gregor Boirie <gregor.boirie@parrot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include "zpa2326.h"
+
+/*
+ * read_flag_mask:
+ * - address bit 7 must be set to request a register read operation
+ */
+static const struct regmap_config zpa2326_regmap_i2c_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = zpa2326_isreg_writeable,
+ .readable_reg = zpa2326_isreg_readable,
+ .precious_reg = zpa2326_isreg_precious,
+ .max_register = ZPA2326_TEMP_OUT_H_REG,
+ .read_flag_mask = BIT(7),
+ .cache_type = REGCACHE_NONE,
+};
+
+static unsigned int zpa2326_i2c_hwid(const struct i2c_client *client)
+{
+#define ZPA2326_SA0(_addr) (_addr & BIT(0))
+#define ZPA2326_DEVICE_ID_SA0_SHIFT (1)
+
+ /* Identification register bit 1 mirrors device address bit 0. */
+ return (ZPA2326_DEVICE_ID |
+ (ZPA2326_SA0(client->addr) << ZPA2326_DEVICE_ID_SA0_SHIFT));
+}
+
+static int zpa2326_probe_i2c(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &zpa2326_regmap_i2c_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "failed to init registers map");
+ return PTR_ERR(regmap);
+ }
+
+ return zpa2326_probe(&client->dev, i2c_id->name, client->irq,
+ zpa2326_i2c_hwid(client), regmap);
+}
+
+static int zpa2326_remove_i2c(struct i2c_client *client)
+{
+ zpa2326_remove(&client->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id zpa2326_i2c_ids[] = {
+ { "zpa2326", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, zpa2326_i2c_ids);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id zpa2326_i2c_matches[] = {
+ { .compatible = "murata,zpa2326" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, zpa2326_i2c_matches);
+#endif
+
+static struct i2c_driver zpa2326_i2c_driver = {
+ .driver = {
+ .name = "zpa2326-i2c",
+ .of_match_table = of_match_ptr(zpa2326_i2c_matches),
+ .pm = ZPA2326_PM_OPS,
+ },
+ .probe = zpa2326_probe_i2c,
+ .remove = zpa2326_remove_i2c,
+ .id_table = zpa2326_i2c_ids,
+};
+module_i2c_driver(zpa2326_i2c_driver);
+
+MODULE_AUTHOR("Gregor Boirie <gregor.boirie@parrot.com>");
+MODULE_DESCRIPTION("I2C driver for Murata ZPA2326 pressure sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/zpa2326_spi.c b/drivers/iio/pressure/zpa2326_spi.c
new file mode 100644
index 000000000000..bd2c1c319fca
--- /dev/null
+++ b/drivers/iio/pressure/zpa2326_spi.c
@@ -0,0 +1,103 @@
+/*
+ * Murata ZPA2326 SPI pressure and temperature sensor driver
+ *
+ * Copyright (c) 2016 Parrot S.A.
+ *
+ * Author: Gregor Boirie <gregor.boirie@parrot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/of_device.h>
+#include "zpa2326.h"
+
+/*
+ * read_flag_mask:
+ * - address bit 7 must be set to request a register read operation
+ * - address bit 6 must be set to request register address auto increment
+ */
+static const struct regmap_config zpa2326_regmap_spi_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = zpa2326_isreg_writeable,
+ .readable_reg = zpa2326_isreg_readable,
+ .precious_reg = zpa2326_isreg_precious,
+ .max_register = ZPA2326_TEMP_OUT_H_REG,
+ .read_flag_mask = BIT(7) | BIT(6),
+ .cache_type = REGCACHE_NONE,
+};
+
+static int zpa2326_probe_spi(struct spi_device *spi)
+{
+ struct regmap *regmap;
+ int err;
+
+ regmap = devm_regmap_init_spi(spi, &zpa2326_regmap_spi_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "failed to init registers map");
+ return PTR_ERR(regmap);
+ }
+
+ /*
+ * Enforce SPI slave settings to prevent from DT misconfiguration.
+ *
+ * Clock is idle high. Sampling happens on trailing edge, i.e., rising
+ * edge. Maximum bus frequency is 1 MHz. Registers are 8 bits wide.
+ */
+ spi->mode = SPI_MODE_3;
+ spi->max_speed_hz = min(spi->max_speed_hz, 1000000U);
+ spi->bits_per_word = 8;
+ err = spi_setup(spi);
+ if (err < 0)
+ return err;
+
+ return zpa2326_probe(&spi->dev, spi_get_device_id(spi)->name,
+ spi->irq, ZPA2326_DEVICE_ID, regmap);
+}
+
+static int zpa2326_remove_spi(struct spi_device *spi)
+{
+ zpa2326_remove(&spi->dev);
+
+ return 0;
+}
+
+static const struct spi_device_id zpa2326_spi_ids[] = {
+ { "zpa2326", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, zpa2326_spi_ids);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id zpa2326_spi_matches[] = {
+ { .compatible = "murata,zpa2326" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, zpa2326_spi_matches);
+#endif
+
+static struct spi_driver zpa2326_spi_driver = {
+ .driver = {
+ .name = "zpa2326-spi",
+ .of_match_table = of_match_ptr(zpa2326_spi_matches),
+ .pm = ZPA2326_PM_OPS,
+ },
+ .probe = zpa2326_probe_spi,
+ .remove = zpa2326_remove_spi,
+ .id_table = zpa2326_spi_ids,
+};
+module_spi_driver(zpa2326_spi_driver);
+
+MODULE_AUTHOR("Gregor Boirie <gregor.boirie@parrot.com>");
+MODULE_DESCRIPTION("SPI driver for Murata ZPA2326 pressure sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 1d74b3aafeed..1f06282ec793 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -516,7 +516,7 @@ static irqreturn_t sx9500_irq_thread_handler(int irq, void *private)
sx9500_push_events(indio_dev);
if (val & SX9500_CONVDONE_IRQ)
- complete_all(&data->completion);
+ complete(&data->completion);
out:
mutex_unlock(&data->mutex);
@@ -1025,6 +1025,12 @@ static const struct acpi_device_id sx9500_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, sx9500_acpi_match);
+static const struct of_device_id sx9500_of_match[] = {
+ { .compatible = "semtech,sx9500", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sx9500_of_match);
+
static const struct i2c_device_id sx9500_id[] = {
{"sx9500", 0},
{ },
@@ -1035,6 +1041,7 @@ static struct i2c_driver sx9500_driver = {
.driver = {
.name = SX9500_DRIVER_NAME,
.acpi_match_table = ACPI_PTR(sx9500_acpi_match),
+ .of_match_table = of_match_ptr(sx9500_of_match),
.pm = &sx9500_pm_ops,
},
.probe = sx9500_probe,
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index c4664e5de791..5ea77a7e261d 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -3,6 +3,22 @@
#
menu "Temperature sensors"
+config MAXIM_THERMOCOUPLE
+ tristate "Maxim thermocouple sensors"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for the Maxim series of
+ thermocouple sensors connected via SPI.
+
+ Supported sensors:
+ * MAX6675
+ * MAX31855
+
+ This driver can also be built as a module. If so, the module will
+ be called maxim_thermocouple.
+
config MLX90614
tristate "MLX90614 contact-less infrared sensor"
depends on I2C
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index 02bc79d49b24..78c3de0dc3f0 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -2,6 +2,7 @@
# Makefile for industrial I/O temperature drivers
#
+obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
obj-$(CONFIG_MLX90614) += mlx90614.o
obj-$(CONFIG_TMP006) += tmp006.o
obj-$(CONFIG_TSYS01) += tsys01.o
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
new file mode 100644
index 000000000000..39dd2026ccc9
--- /dev/null
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -0,0 +1,281 @@
+/*
+ * maxim_thermocouple.c - Support for Maxim thermocouple chips
+ *
+ * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define MAXIM_THERMOCOUPLE_DRV_NAME "maxim_thermocouple"
+
+enum {
+ MAX6675,
+ MAX31855,
+};
+
+static const struct iio_chan_spec max6675_channels[] = {
+ { /* thermocouple temperature */
+ .type = IIO_TEMP,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 13,
+ .storagebits = 16,
+ .shift = 3,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct iio_chan_spec max31855_channels[] = {
+ { /* thermocouple temperature */
+ .type = IIO_TEMP,
+ .address = 2,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 14,
+ .storagebits = 16,
+ .shift = 2,
+ .endianness = IIO_BE,
+ },
+ },
+ { /* cold junction temperature */
+ .type = IIO_TEMP,
+ .address = 0,
+ .channel2 = IIO_MOD_TEMP_AMBIENT,
+ .modified = 1,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 4,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static const unsigned long max31855_scan_masks[] = {0x3, 0};
+
+struct maxim_thermocouple_chip {
+ const struct iio_chan_spec *channels;
+ const unsigned long *scan_masks;
+ u8 num_channels;
+ u8 read_size;
+
+ /* bit-check for valid input */
+ u32 status_bit;
+};
+
+static const struct maxim_thermocouple_chip maxim_thermocouple_chips[] = {
+ [MAX6675] = {
+ .channels = max6675_channels,
+ .num_channels = ARRAY_SIZE(max6675_channels),
+ .read_size = 2,
+ .status_bit = BIT(2),
+ },
+ [MAX31855] = {
+ .channels = max31855_channels,
+ .num_channels = ARRAY_SIZE(max31855_channels),
+ .read_size = 4,
+ .scan_masks = max31855_scan_masks,
+ .status_bit = BIT(16),
+ },
+};
+
+struct maxim_thermocouple_data {
+ struct spi_device *spi;
+ const struct maxim_thermocouple_chip *chip;
+
+ u8 buffer[16] ____cacheline_aligned;
+};
+
+static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
+ struct iio_chan_spec const *chan, int *val)
+{
+ unsigned int storage_bytes = data->chip->read_size;
+ unsigned int shift = chan->scan_type.shift + (chan->address * 8);
+ unsigned int buf;
+ int ret;
+
+ ret = spi_read(data->spi, (void *) &buf, storage_bytes);
+ if (ret)
+ return ret;
+
+ switch (storage_bytes) {
+ case 2:
+ *val = be16_to_cpu(buf);
+ break;
+ case 4:
+ *val = be32_to_cpu(buf);
+ break;
+ }
+
+ /* check to be sure this is a valid reading */
+ if (*val & data->chip->status_bit)
+ return -EINVAL;
+
+ *val = sign_extend32(*val >> shift, chan->scan_type.realbits - 1);
+
+ return 0;
+}
+
+static irqreturn_t maxim_thermocouple_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct maxim_thermocouple_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = spi_read(data->spi, data->buffer, data->chip->read_size);
+ if (!ret) {
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns(indio_dev));
+ }
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int maxim_thermocouple_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct maxim_thermocouple_data *data = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = maxim_thermocouple_read(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
+
+ if (!ret)
+ return IIO_VAL_INT;
+
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel2) {
+ case IIO_MOD_TEMP_AMBIENT:
+ *val = 62;
+ *val2 = 500000; /* 1000 * 0.0625 */
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ *val = 250; /* 1000 * 0.25 */
+ ret = IIO_VAL_INT;
+ };
+ break;
+ }
+
+ return ret;
+}
+
+static const struct iio_info maxim_thermocouple_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = maxim_thermocouple_read_raw,
+};
+
+static int maxim_thermocouple_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct maxim_thermocouple_data *data;
+ const struct maxim_thermocouple_chip *chip =
+ &maxim_thermocouple_chips[id->driver_data];
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->info = &maxim_thermocouple_info;
+ indio_dev->name = MAXIM_THERMOCOUPLE_DRV_NAME;
+ indio_dev->channels = chip->channels;
+ indio_dev->available_scan_masks = chip->scan_masks;
+ indio_dev->num_channels = chip->num_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ data = iio_priv(indio_dev);
+ data->spi = spi;
+ data->chip = chip;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ maxim_thermocouple_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_buffer;
+
+ return 0;
+
+error_unreg_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static int maxim_thermocouple_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id maxim_thermocouple_id[] = {
+ {"max6675", MAX6675},
+ {"max31855", MAX31855},
+ {},
+};
+MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
+
+static struct spi_driver maxim_thermocouple_driver = {
+ .driver = {
+ .name = MAXIM_THERMOCOUPLE_DRV_NAME,
+ },
+ .probe = maxim_thermocouple_probe,
+ .remove = maxim_thermocouple_remove,
+ .id_table = maxim_thermocouple_id,
+};
+module_spi_driver(maxim_thermocouple_driver);
+
+MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_DESCRIPTION("Maxim thermocouple sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index e9b7dc037ff8..19a418a1b631 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -74,6 +74,7 @@ source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
source "drivers/infiniband/hw/usnic/Kconfig"
+source "drivers/infiniband/hw/hns/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 1374541a4528..b136d3acc5bd 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -800,7 +800,7 @@ static struct notifier_block nb = {
int addr_init(void)
{
- addr_wq = create_singlethread_workqueue("ib_addr");
+ addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0);
if (!addr_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 5f65a78b27c9..36bf50ebb187 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4369,7 +4369,7 @@ static int __init cma_init(void)
{
int ret;
- cma_wq = create_singlethread_workqueue("rdma_cm");
+ cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
if (!cma_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 357624f8b9d3..5495e22839a7 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -1160,7 +1160,7 @@ static int __init iw_cm_init(void)
if (ret)
pr_err("iw_cm: couldn't register netlink callbacks\n");
- iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
+ iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
if (!iwcm_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 2d49228f28b2..40cbd6bdb73b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3160,7 +3160,7 @@ static int ib_mad_port_open(struct ib_device *device,
goto error3;
}
- port_priv->pd = ib_alloc_pd(device);
+ port_priv->pd = ib_alloc_pd(device, 0);
if (IS_ERR(port_priv->pd)) {
dev_err(&device->dev, "Couldn't create ib_mad PD\n");
ret = PTR_ERR(port_priv->pd);
@@ -3177,7 +3177,7 @@ static int ib_mad_port_open(struct ib_device *device,
goto error7;
snprintf(name, sizeof name, "ib_mad%d", port_num);
- port_priv->wq = create_singlethread_workqueue(name);
+ port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!port_priv->wq) {
ret = -ENOMEM;
goto error8;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 51c79b2fb0b8..e51b739f6ea3 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -873,7 +873,7 @@ int mcast_init(void)
{
int ret;
- mcast_wq = create_singlethread_workqueue("ib_mcast");
+ mcast_wq = alloc_ordered_workqueue("ib_mcast", WQ_MEM_RECLAIM);
if (!mcast_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index b9bf7aa055e7..81b742ca1639 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -2015,7 +2015,7 @@ int ib_sa_init(void)
goto err2;
}
- ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq");
+ ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
if (!ib_nl_wq) {
ret = -ENOMEM;
goto err3;
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 15defefecb4f..c1fb545e8d78 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1193,7 +1193,7 @@ static ssize_t set_node_desc(struct device *device,
if (!dev->modify_device)
return -EIO;
- memcpy(desc.node_desc, buf, min_t(int, count, 64));
+ memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 2825ece91d3c..9520154f1d7c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1638,7 +1638,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
if (!file)
return -ENOMEM;
- file->close_wq = create_singlethread_workqueue("ucma_close_id");
+ file->close_wq = alloc_ordered_workqueue("ucma_close_id",
+ WQ_MEM_RECLAIM);
if (!file->close_wq) {
kfree(file);
return -ENOMEM;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index f6647318138d..cb3f515a2285 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -571,7 +571,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
pd->device = ib_dev;
pd->uobject = uobj;
- pd->local_mr = NULL;
+ pd->__internal_mr = NULL;
atomic_set(&pd->usecnt, 0);
uobj->object = pd;
@@ -3078,51 +3078,102 @@ out_put:
return ret ? ret : in_len;
}
+static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec)
+{
+ /* Returns user space filter size, includes padding */
+ return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
+}
+
+static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size,
+ u16 ib_real_filter_sz)
+{
+ /*
+ * User space filter structures must be 64 bit aligned, otherwise this
+ * may pass, but we won't handle additional new attributes.
+ */
+
+ if (kern_filter_size > ib_real_filter_sz) {
+ if (memchr_inv(kern_spec_filter +
+ ib_real_filter_sz, 0,
+ kern_filter_size - ib_real_filter_sz))
+ return -EINVAL;
+ return ib_real_filter_sz;
+ }
+ return kern_filter_size;
+}
+
static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
+ ssize_t actual_filter_sz;
+ ssize_t kern_filter_sz;
+ ssize_t ib_filter_sz;
+ void *kern_spec_mask;
+ void *kern_spec_val;
+
if (kern_spec->reserved)
return -EINVAL;
ib_spec->type = kern_spec->type;
+ kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
+ /* User flow spec size must be aligned to 4 bytes */
+ if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
+ return -EINVAL;
+
+ kern_spec_val = (void *)kern_spec +
+ sizeof(struct ib_uverbs_flow_spec_hdr);
+ kern_spec_mask = kern_spec_val + kern_filter_sz;
+
switch (ib_spec->type) {
case IB_FLOW_SPEC_ETH:
- ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
- if (ib_spec->eth.size != kern_spec->eth.size)
+ ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
return -EINVAL;
- memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
- sizeof(struct ib_flow_eth_filter));
- memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
- sizeof(struct ib_flow_eth_filter));
+ ib_spec->size = sizeof(struct ib_flow_spec_eth);
+ memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV4:
- ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
- if (ib_spec->ipv4.size != kern_spec->ipv4.size)
+ ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
return -EINVAL;
- memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
- sizeof(struct ib_flow_ipv4_filter));
- memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
- sizeof(struct ib_flow_ipv4_filter));
+ ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
+ memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV6:
- ib_spec->ipv6.size = sizeof(struct ib_flow_spec_ipv6);
- if (ib_spec->ipv6.size != kern_spec->ipv6.size)
+ ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
+ memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
+
+ if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
+ (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
return -EINVAL;
- memcpy(&ib_spec->ipv6.val, &kern_spec->ipv6.val,
- sizeof(struct ib_flow_ipv6_filter));
- memcpy(&ib_spec->ipv6.mask, &kern_spec->ipv6.mask,
- sizeof(struct ib_flow_ipv6_filter));
break;
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
- ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
- if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
+ ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
return -EINVAL;
- memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
- sizeof(struct ib_flow_tcp_udp_filter));
- memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
- sizeof(struct ib_flow_tcp_udp_filter));
+ ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
+ memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
break;
default:
return -EINVAL;
@@ -3654,7 +3705,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
goto err_uobj;
}
- flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
+ flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
+ sizeof(union ib_flow_spec), GFP_KERNEL);
if (!flow_attr) {
err = -ENOMEM;
goto err_put;
@@ -4173,6 +4225,23 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
resp.device_cap_flags_ex = attr.device_cap_flags;
resp.response_length += sizeof(resp.device_cap_flags_ex);
+
+ if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
+ goto end;
+
+ resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
+ resp.rss_caps.max_rwq_indirection_tables =
+ attr.rss_caps.max_rwq_indirection_tables;
+ resp.rss_caps.max_rwq_indirection_table_size =
+ attr.rss_caps.max_rwq_indirection_table_size;
+
+ resp.response_length += sizeof(resp.rss_caps);
+
+ if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
+ goto end;
+
+ resp.max_wq_type_rq = attr.max_wq_type_rq;
+ resp.response_length += sizeof(resp.max_wq_type_rq);
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
return err;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index f2b776efab3a..83687646da68 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -227,9 +227,11 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
* Every PD has a local_dma_lkey which can be used as the lkey value for local
* memory operations.
*/
-struct ib_pd *ib_alloc_pd(struct ib_device *device)
+struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
+ const char *caller)
{
struct ib_pd *pd;
+ int mr_access_flags = 0;
pd = device->alloc_pd(device, NULL, NULL);
if (IS_ERR(pd))
@@ -237,26 +239,46 @@ struct ib_pd *ib_alloc_pd(struct ib_device *device)
pd->device = device;
pd->uobject = NULL;
- pd->local_mr = NULL;
+ pd->__internal_mr = NULL;
atomic_set(&pd->usecnt, 0);
+ pd->flags = flags;
if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
pd->local_dma_lkey = device->local_dma_lkey;
- else {
+ else
+ mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
+
+ if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+ pr_warn("%s: enabling unsafe global rkey\n", caller);
+ mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
+ }
+
+ if (mr_access_flags) {
struct ib_mr *mr;
- mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
+ mr = pd->device->get_dma_mr(pd, mr_access_flags);
if (IS_ERR(mr)) {
ib_dealloc_pd(pd);
- return (struct ib_pd *)mr;
+ return ERR_CAST(mr);
}
- pd->local_mr = mr;
- pd->local_dma_lkey = pd->local_mr->lkey;
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->uobject = NULL;
+ mr->need_inval = false;
+
+ pd->__internal_mr = mr;
+
+ if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
+ pd->local_dma_lkey = pd->__internal_mr->lkey;
+
+ if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
+ pd->unsafe_global_rkey = pd->__internal_mr->rkey;
}
+
return pd;
}
-EXPORT_SYMBOL(ib_alloc_pd);
+EXPORT_SYMBOL(__ib_alloc_pd);
/**
* ib_dealloc_pd - Deallocates a protection domain.
@@ -270,10 +292,10 @@ void ib_dealloc_pd(struct ib_pd *pd)
{
int ret;
- if (pd->local_mr) {
- ret = ib_dereg_mr(pd->local_mr);
+ if (pd->__internal_mr) {
+ ret = pd->device->dereg_mr(pd->__internal_mr);
WARN_ON(ret);
- pd->local_mr = NULL;
+ pd->__internal_mr = NULL;
}
/* uverbs manipulates usecnt with proper locking, while the kabi
@@ -821,7 +843,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (ret) {
pr_err("failed to init MR pool ret= %d\n", ret);
ib_destroy_qp(qp);
- qp = ERR_PTR(ret);
+ return ERR_PTR(ret);
}
}
@@ -1391,29 +1413,6 @@ EXPORT_SYMBOL(ib_resize_cq);
/* Memory regions */
-struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
-{
- struct ib_mr *mr;
- int err;
-
- err = ib_check_mr_access(mr_access_flags);
- if (err)
- return ERR_PTR(err);
-
- mr = pd->device->get_dma_mr(pd, mr_access_flags);
-
- if (!IS_ERR(mr)) {
- mr->device = pd->device;
- mr->pd = pd;
- mr->uobject = NULL;
- atomic_inc(&pd->usecnt);
- mr->need_inval = false;
- }
-
- return mr;
-}
-EXPORT_SYMBOL(ib_get_dma_mr);
-
int ib_dereg_mr(struct ib_mr *mr)
{
struct ib_pd *pd = mr->pd;
@@ -1812,13 +1811,13 @@ EXPORT_SYMBOL(ib_set_vf_guid);
*
* Constraints:
* - The first sg element is allowed to have an offset.
- * - Each sg element must be aligned to page_size (or physically
- * contiguous to the previous element). In case an sg element has a
- * non contiguous offset, the mapping prefix will not include it.
+ * - Each sg element must either be aligned to page_size or virtually
+ * contiguous to the previous element. In case an sg element has a
+ * non-contiguous offset, the mapping prefix will not include it.
* - The last sg element is allowed to have length less than page_size.
* - If sg_nents total byte length exceeds the mr max_num_sge * page_size
* then only max_num_sg entries will be mapped.
- * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
+ * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
* constraints holds and the page_size argument is ignored.
*
* Returns the number of sg elements that were mapped to the memory region.
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index c0c7cf8af3f4..21fe401ff178 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_INFINIBAND_NES) += nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
+obj-$(CONFIG_INFINIBAND_HNS) += hns/
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 8e77dc543dd1..b3e11329801d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -36,7 +36,7 @@
#include "cxgb3_offload.h"
#include "iwch_provider.h"
-#include "iwch_user.h"
+#include <rdma/cxgb3-abi.h>
#include "iwch.h"
#include "iwch_cm.h"
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 04bbf172abde..65ee64400deb 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -2258,7 +2258,7 @@ int __init iwch_cm_init(void)
{
skb_queue_head_init(&rxq);
- workq = create_singlethread_workqueue("iw_cxgb3");
+ workq = alloc_ordered_workqueue("iw_cxgb3", WQ_MEM_RECLAIM);
if (!workq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 3edb80644b53..cba57bb53dba 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -58,7 +58,7 @@
#include "iwch.h"
#include "iwch_provider.h"
#include "iwch_cm.h"
-#include "iwch_user.h"
+#include <rdma/cxgb3-abi.h>
#include "common.h"
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
@@ -1396,6 +1396,7 @@ int iwch_register_device(struct iwch_dev *dev)
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
(1ull << IB_USER_VERBS_CMD_POST_RECV);
dev->ibdev.node_type = RDMA_NODE_RNIC;
+ BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
dev->ibdev.num_comp_vectors = 1;
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index 23f38cf2c5cd..afe8b28e0878 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_CXGB4
tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
+ select CHELSIO_LIB
select GENERIC_ALLOCATOR
---help---
This is an iWARP/RDMA driver for the Chelsio T4 and T5
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
index e11cf7299945..fa40b685831b 100644
--- a/drivers/infiniband/hw/cxgb4/Makefile
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -1,4 +1,5 @@
ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 80f988984f44..f1510cc76d2d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -49,6 +49,7 @@
#include <rdma/ib_addr.h>
+#include <libcxgb_cm.h>
#include "iw_cxgb4.h"
#include "clip_tbl.h"
@@ -239,15 +240,13 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
{
- struct cpl_tid_release *req;
+ u32 len = roundup(sizeof(struct cpl_tid_release), 16);
- skb = get_skb(skb, sizeof *req, GFP_KERNEL);
+ skb = get_skb(skb, len, GFP_KERNEL);
if (!skb)
return;
- req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
- INIT_TP_WR(req, hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
- set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+
+ cxgb_mk_tid_release(skb, len, hwtid, 0);
c4iw_ofld_send(rdev, skb);
return;
}
@@ -466,72 +465,6 @@ static struct net_device *get_real_dev(struct net_device *egress_dev)
return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
}
-static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
-{
- int i;
-
- egress_dev = get_real_dev(egress_dev);
- for (i = 0; i < dev->rdev.lldi.nports; i++)
- if (dev->rdev.lldi.ports[i] == egress_dev)
- return 1;
- return 0;
-}
-
-static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
- __u8 *peer_ip, __be16 local_port,
- __be16 peer_port, u8 tos,
- __u32 sin6_scope_id)
-{
- struct dst_entry *dst = NULL;
-
- if (IS_ENABLED(CONFIG_IPV6)) {
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- memcpy(&fl6.daddr, peer_ip, 16);
- memcpy(&fl6.saddr, local_ip, 16);
- if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
- fl6.flowi6_oif = sin6_scope_id;
- dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst)
- goto out;
- if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
- !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
- dst_release(dst);
- dst = NULL;
- }
- }
-
-out:
- return dst;
-}
-
-static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
- __be32 peer_ip, __be16 local_port,
- __be16 peer_port, u8 tos)
-{
- struct rtable *rt;
- struct flowi4 fl4;
- struct neighbour *n;
-
- rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
- peer_port, local_port, IPPROTO_TCP,
- tos, 0);
- if (IS_ERR(rt))
- return NULL;
- n = dst_neigh_lookup(&rt->dst, &peer_ip);
- if (!n)
- return NULL;
- if (!our_interface(dev, n->dev) &&
- !(n->dev->flags & IFF_LOOPBACK)) {
- neigh_release(n);
- dst_release(&rt->dst);
- return NULL;
- }
- neigh_release(n);
- return &rt->dst;
-}
-
static void arp_failure_discard(void *handle, struct sk_buff *skb)
{
pr_err(MOD "ARP failure\n");
@@ -706,56 +639,32 @@ static int send_flowc(struct c4iw_ep *ep)
static int send_halfclose(struct c4iw_ep *ep)
{
- struct cpl_close_con_req *req;
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
- int wrlen = roundup(sizeof *req, 16);
+ u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
if (WARN_ON(!skb))
return -ENOMEM;
- set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
- req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
- memset(req, 0, wrlen);
- INIT_TP_WR(req, ep->hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
- ep->hwtid));
+ cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
+ NULL, arp_failure_discard);
+
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
static int send_abort(struct c4iw_ep *ep)
{
- struct cpl_abort_req *req;
- int wrlen = roundup(sizeof *req, 16);
+ u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
if (WARN_ON(!req_skb))
return -ENOMEM;
- set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx);
- t4_set_arp_err_handler(req_skb, ep, abort_arp_failure);
- req = (struct cpl_abort_req *)skb_put(req_skb, wrlen);
- memset(req, 0, wrlen);
- INIT_TP_WR(req, ep->hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
- req->cmd = CPL_ABORT_SEND_RST;
- return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
-}
+ cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
+ ep, abort_arp_failure);
-static void best_mtu(const unsigned short *mtus, unsigned short mtu,
- unsigned int *idx, int use_ts, int ipv6)
-{
- unsigned short hdr_size = (ipv6 ?
- sizeof(struct ipv6hdr) :
- sizeof(struct iphdr)) +
- sizeof(struct tcphdr) +
- (use_ts ?
- round_up(TCPOLEN_TIMESTAMP, 4) : 0);
- unsigned short data_size = mtu - hdr_size;
-
- cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+ return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
}
static int send_connect(struct c4iw_ep *ep)
@@ -770,7 +679,7 @@ static int send_connect(struct c4iw_ep *ep)
u64 opt0;
u32 opt2;
unsigned int mtu_idx;
- int wscale;
+ u32 wscale;
int win, sizev4, sizev6, wrlen;
struct sockaddr_in *la = (struct sockaddr_in *)
&ep->com.local_addr;
@@ -817,10 +726,10 @@ static int send_connect(struct c4iw_ep *ep)
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
- best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps,
- (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
- wscale = compute_wscale(rcv_win);
+ cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps,
+ (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
@@ -1447,9 +1356,9 @@ static void established_upcall(struct c4iw_ep *ep)
static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
{
- struct cpl_rx_data_ack *req;
struct sk_buff *skb;
- int wrlen = roundup(sizeof *req, 16);
+ u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
+ u32 credit_dack;
PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
@@ -1466,15 +1375,12 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
- req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
- memset(req, 0, wrlen);
- INIT_TP_WR(req, ep->hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
- ep->hwtid));
- req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
- RX_DACK_CHANGE_F |
- RX_DACK_MODE_V(dack_mode));
- set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
+ credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F |
+ RX_DACK_MODE_V(dack_mode);
+
+ cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
+ credit_dack);
+
c4iw_ofld_send(&ep->com.dev->rdev, skb);
return credits;
}
@@ -1972,7 +1878,7 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
struct sk_buff *skb;
struct fw_ofld_connection_wr *req;
unsigned int mtu_idx;
- int wscale;
+ u32 wscale;
struct sockaddr_in *sin;
int win;
@@ -1997,10 +1903,10 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
req->tcb.tx_max = (__force __be32) jiffies;
req->tcb.rcv_adv = htons(1);
- best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps,
- (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
- wscale = compute_wscale(rcv_win);
+ cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps,
+ (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
@@ -2054,15 +1960,6 @@ static inline int act_open_has_tid(int status)
status != CPL_ERR_CONN_EXIST);
}
-/* Returns whether a CPL status conveys negative advice.
- */
-static int is_neg_adv(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE ||
- status == CPL_ERR_KEEPALV_NEG_ADVICE;
-}
-
static char *neg_adv_str(unsigned int status)
{
switch (status) {
@@ -2218,16 +2115,21 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
/* find a route */
if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
- ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
- raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, ep->com.cm_id->tos);
+ ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
+ laddr->sin_addr.s_addr,
+ raddr->sin_addr.s_addr,
+ laddr->sin_port,
+ raddr->sin_port, ep->com.cm_id->tos);
iptype = 4;
ra = (__u8 *)&raddr->sin_addr;
} else {
- ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
- raddr6->sin6_addr.s6_addr,
- laddr6->sin6_port, raddr6->sin6_port, 0,
- raddr6->sin6_scope_id);
+ ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
+ get_real_dev,
+ laddr6->sin6_addr.s6_addr,
+ raddr6->sin6_addr.s6_addr,
+ laddr6->sin6_port,
+ raddr6->sin6_port, 0,
+ raddr6->sin6_scope_id);
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
}
@@ -2299,7 +2201,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
status, status2errno(status));
- if (is_neg_adv(status)) {
+ if (cxgb_is_neg_adv(status)) {
PDBG("%s Connection problems for atid %u status %u (%s)\n",
__func__, atid, status, neg_adv_str(status));
ep->stats.connect_neg_adv++;
@@ -2426,7 +2328,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
unsigned int mtu_idx;
u64 opt0;
u32 opt2;
- int wscale;
+ u32 wscale;
struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
int win;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
@@ -2447,10 +2349,10 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
ep->hwtid));
- best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps && req->tcpopt.tstamp,
- (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
- wscale = compute_wscale(rcv_win);
+ cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps && req->tcpopt.tstamp,
+ (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
@@ -2522,42 +2424,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
return;
}
-static void get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
- int *iptype, __u8 *local_ip, __u8 *peer_ip,
- __be16 *local_port, __be16 *peer_port)
-{
- int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
- ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
- T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
- IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
- T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
- struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
- struct tcphdr *tcp = (struct tcphdr *)
- ((u8 *)(req + 1) + eth_len + ip_len);
-
- if (ip->version == 4) {
- PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
- ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 4;
- memcpy(peer_ip, &ip->saddr, 4);
- memcpy(local_ip, &ip->daddr, 4);
- } else {
- PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
- ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 6;
- memcpy(peer_ip, ip6->saddr.s6_addr, 16);
- memcpy(local_ip, ip6->daddr.s6_addr, 16);
- }
- *peer_port = tcp->source;
- *local_port = tcp->dest;
-
- return;
-}
-
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep = NULL, *parent_ep;
@@ -2586,8 +2452,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
- get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, &iptype,
- local_ip, peer_ip, &local_port, &peer_port);
+ cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
+ &iptype, local_ip, peer_ip, &local_port, &peer_port);
/* Find output route */
if (iptype == 4) {
@@ -2595,18 +2461,19 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
, __func__, parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
- dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
- local_port, peer_port,
- tos);
+ dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+ *(__be32 *)local_ip, *(__be32 *)peer_ip,
+ local_port, peer_port, tos);
} else {
PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
, __func__, parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
- dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
- ((struct sockaddr_in6 *)
- &parent_ep->com.local_addr)->sin6_scope_id);
+ dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
+ local_ip, peer_ip, local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+ ((struct sockaddr_in6 *)
+ &parent_ep->com.local_addr)->sin6_scope_id);
}
if (!dst) {
printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
@@ -2839,18 +2706,18 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
struct c4iw_ep *ep;
- struct cpl_abort_rpl *rpl;
struct sk_buff *rpl_skb;
struct c4iw_qp_attributes attrs;
int ret;
int release = 0;
unsigned int tid = GET_TID(req);
+ u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- if (is_neg_adv(req->status)) {
+ if (cxgb_is_neg_adv(req->status)) {
PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
__func__, ep->hwtid, req->status,
neg_adv_str(req->status));
@@ -2943,11 +2810,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
release = 1;
goto out;
}
- set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
- rpl->cmd = CPL_ABORT_NO_RST;
+
+ cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
+
c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
out:
if (release)
@@ -3379,9 +3244,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
__func__, &laddr->sin_addr, ntohs(laddr->sin_port),
ra, ntohs(raddr->sin_port));
- ep->dst = find_route(dev, laddr->sin_addr.s_addr,
- raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, cm_id->tos);
+ ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+ laddr->sin_addr.s_addr,
+ raddr->sin_addr.s_addr,
+ laddr->sin_port,
+ raddr->sin_port, cm_id->tos);
} else {
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
@@ -3400,10 +3267,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
__func__, laddr6->sin6_addr.s6_addr,
ntohs(laddr6->sin6_port),
raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
- ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
- raddr6->sin6_addr.s6_addr,
- laddr6->sin6_port, raddr6->sin6_port, 0,
- raddr6->sin6_scope_id);
+ ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
+ laddr6->sin6_addr.s6_addr,
+ raddr6->sin6_addr.s6_addr,
+ laddr6->sin6_port,
+ raddr6->sin6_port, 0,
+ raddr6->sin6_scope_id);
}
if (!ep->dst) {
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
@@ -4045,8 +3914,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
ntohs(tcph->source), iph->tos);
- dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
- iph->tos);
+ dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+ iph->daddr, iph->saddr, tcph->dest,
+ tcph->source, iph->tos);
if (!dst) {
pr_err("%s - failed to find dst entry!\n",
__func__);
@@ -4321,7 +4191,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
kfree_skb(skb);
return 0;
}
- if (is_neg_adv(req->status)) {
+ if (cxgb_is_neg_adv(req->status)) {
PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
__func__, ep->hwtid, req->status,
neg_adv_str(req->status));
@@ -4365,7 +4235,7 @@ int __init c4iw_cm_init(void)
spin_lock_init(&timeout_lock);
skb_queue_head_init(&rxq);
- workq = create_singlethread_workqueue("iw_cxgb4");
+ workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
if (!workq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ac926c942fee..867b8cf82be8 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -666,6 +666,18 @@ skip_cqe:
return ret;
}
+static void invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
+{
+ struct c4iw_mr *mhp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rhp->lock, flags);
+ mhp = get_mhp(rhp, rkey >> 8);
+ if (mhp)
+ mhp->attr.state = 0;
+ spin_unlock_irqrestore(&rhp->lock, flags);
+}
+
/*
* Get one cq entry from c4iw and map it to openib.
*
@@ -721,6 +733,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
}
} else {
switch (CQE_OPCODE(&cqe)) {
@@ -746,6 +759,10 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
break;
case FW_RI_FAST_REGISTER:
wc->opcode = IB_WC_REG_MR;
+
+ /* Invalidate the MR if the fastreg failed */
+ if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
+ invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe));
break;
default:
printk(KERN_ERR MOD "Unexpected opcode %d "
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 3c4b2126e0d1..93e3d270a98a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -1480,6 +1480,10 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
static struct cxgb4_uld_info c4iw_uld_info = {
.name = DRV_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .rxq_size = 511,
+ .ciq = true,
+ .lro = false,
.add = c4iw_uld_add,
.rx_handler = c4iw_uld_rx_handler,
.state_change = c4iw_uld_state_change,
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 4b83b84f7ddf..7e7f79e55006 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -58,7 +58,7 @@
#include "cxgb4.h"
#include "cxgb4_uld.h"
#include "l2t.h"
-#include "user.h"
+#include <rdma/cxgb4-abi.h>
#define DRV_NAME "iw_cxgb4"
#define MOD DRV_NAME ":"
@@ -882,15 +882,6 @@ static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
return cm_id->provider_data;
}
-static inline int compute_wscale(int win)
-{
- int wscale = 0;
-
- while (wscale < 14 && (65535<<wscale) < win)
- wscale++;
- return wscale;
-}
-
static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
{
#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 0b91b0f4df71..80e27749420a 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -695,7 +695,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_NSMR;
mhp->attr.stag = stag;
- mhp->attr.state = 1;
+ mhp->attr.state = 0;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index df127ce6b6ec..645e606a17c5 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -563,6 +563,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
(1ull << IB_USER_VERBS_CMD_POST_RECV);
dev->ibdev.node_type = RDMA_NODE_RNIC;
+ BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 690435229be7..f57deba6717c 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -609,10 +609,42 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
return 0;
}
+static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
+ struct ib_reg_wr *wr, struct c4iw_mr *mhp,
+ u8 *len16)
+{
+ __be64 *p = (__be64 *)fr->pbl;
+
+ fr->r2 = cpu_to_be32(0);
+ fr->stag = cpu_to_be32(mhp->ibmr.rkey);
+
+ fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+ FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
+ FW_RI_TPTE_STAGSTATE_V(1) |
+ FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
+ FW_RI_TPTE_PDID_V(mhp->attr.pdid));
+ fr->tpte.locread_to_qpid = cpu_to_be32(
+ FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
+ FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
+ FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
+ fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
+ PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
+ fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
+ fr->tpte.len_hi = cpu_to_be32(0);
+ fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
+ fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
+ fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
+
+ p[0] = cpu_to_be64((u64)mhp->mpl[0]);
+ p[1] = cpu_to_be64((u64)mhp->mpl[1]);
+
+ *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
+}
+
static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_reg_wr *wr, u8 *len16, bool dsgl_supported)
+ struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
+ bool dsgl_supported)
{
- struct c4iw_mr *mhp = to_c4iw_mr(wr->mr);
struct fw_ri_immd *imdp;
__be64 *p;
int i;
@@ -674,9 +706,12 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
return 0;
}
-static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
- u8 *len16)
+static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe,
+ struct ib_send_wr *wr, u8 *len16)
{
+ struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8);
+
+ mhp->attr.state = 0;
wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->inv.r2 = 0;
*len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
@@ -816,18 +851,32 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (!qhp->wq.sq.oldest_read)
qhp->wq.sq.oldest_read = swsqe;
break;
- case IB_WR_REG_MR:
- fw_opcode = FW_RI_FR_NSMR_WR;
+ case IB_WR_REG_MR: {
+ struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
+
swsqe->opcode = FW_RI_FAST_REGISTER;
- err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16,
- qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
+ if (qhp->rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
+ !mhp->attr.state && mhp->mpl_len <= 2) {
+ fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
+ build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
+ mhp, &len16);
+ } else {
+ fw_opcode = FW_RI_FR_NSMR_WR;
+ err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
+ mhp, &len16,
+ qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
+ if (err)
+ break;
+ }
+ mhp->attr.state = 1;
break;
+ }
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
fw_opcode = FW_RI_INV_LSTAG_WR;
swsqe->opcode = FW_RI_LOCAL_INV;
- err = build_inv_stag(wqe, wr, &len16);
+ err = build_inv_stag(qhp->rhp, wqe, wr, &len16);
break;
default:
PDBG("%s post of type=%d TBD!\n", __func__,
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 02173f4315fa..862381aa83c8 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -95,6 +95,7 @@ union t4_wr {
struct fw_ri_rdma_read_wr read;
struct fw_ri_bind_mw_wr bind;
struct fw_ri_fr_nsmr_wr fr;
+ struct fw_ri_fr_nsmr_tpte_wr fr_tpte;
struct fw_ri_inv_lstag_wr inv;
struct t4_status_page status;
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
@@ -170,7 +171,7 @@ struct t4_cqe {
__be32 msn;
} rcqe;
struct {
- u32 nada1;
+ u32 stag;
u16 nada2;
u16 cidx;
} scqe;
@@ -232,6 +233,7 @@ struct t4_cqe {
/* used for SQ completion processing */
#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
+#define CQE_WRID_FR_STAG(x) (be32_to_cpu((x)->u.scqe.stag))
/* generic accessor macros */
#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 1e26669793c3..010c709ba3bb 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -669,6 +669,18 @@ struct fw_ri_fr_nsmr_wr {
#define FW_RI_FR_NSMR_WR_DCACPU_G(x) \
(((x) >> FW_RI_FR_NSMR_WR_DCACPU_S) & FW_RI_FR_NSMR_WR_DCACPU_M)
+struct fw_ri_fr_nsmr_tpte_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u32 r2;
+ __u32 stag;
+ struct fw_ri_tpte tpte;
+ __u64 pbl[2];
+};
+
struct fw_ri_inv_lstag_wr {
__u8 opcode;
__u8 flags;
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 0566393e5aba..a26a9a0bfc41 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -47,6 +47,7 @@
#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/module.h>
+#include <linux/interrupt.h>
#include "hfi.h"
#include "affinity.h"
@@ -55,7 +56,7 @@
struct hfi1_affinity_node_list node_affinity = {
.list = LIST_HEAD_INIT(node_affinity.list),
- .lock = __SPIN_LOCK_UNLOCKED(&node_affinity.lock),
+ .lock = __MUTEX_INITIALIZER(node_affinity.lock)
};
/* Name of IRQ types, indexed by enum irq_type */
@@ -159,14 +160,14 @@ void node_affinity_destroy(void)
struct list_head *pos, *q;
struct hfi1_affinity_node *entry;
- spin_lock(&node_affinity.lock);
+ mutex_lock(&node_affinity.lock);
list_for_each_safe(pos, q, &node_affinity.list) {
entry = list_entry(pos, struct hfi1_affinity_node,
list);
list_del(pos);
kfree(entry);
}
- spin_unlock(&node_affinity.lock);
+ mutex_unlock(&node_affinity.lock);
kfree(hfi1_per_node_cntr);
}
@@ -233,9 +234,8 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
if (cpumask_first(local_mask) >= nr_cpu_ids)
local_mask = topology_core_cpumask(0);
- spin_lock(&node_affinity.lock);
+ mutex_lock(&node_affinity.lock);
entry = node_affinity_lookup(dd->node);
- spin_unlock(&node_affinity.lock);
/*
* If this is the first time this NUMA node's affinity is used,
@@ -246,6 +246,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
if (!entry) {
dd_dev_err(dd,
"Unable to allocate global affinity node\n");
+ mutex_unlock(&node_affinity.lock);
return -ENOMEM;
}
init_cpu_mask_set(&entry->def_intr);
@@ -302,15 +303,113 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
&entry->general_intr_mask);
}
- spin_lock(&node_affinity.lock);
node_affinity_add_tail(entry);
- spin_unlock(&node_affinity.lock);
}
-
+ mutex_unlock(&node_affinity.lock);
return 0;
}
-int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
+/*
+ * Function updates the irq affinity hint for msix after it has been changed
+ * by the user using the /proc/irq interface. This function only accepts
+ * one cpu in the mask.
+ */
+static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
+{
+ struct sdma_engine *sde = msix->arg;
+ struct hfi1_devdata *dd = sde->dd;
+ struct hfi1_affinity_node *entry;
+ struct cpu_mask_set *set;
+ int i, old_cpu;
+
+ if (cpu > num_online_cpus() || cpu == sde->cpu)
+ return;
+
+ mutex_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ if (!entry)
+ goto unlock;
+
+ old_cpu = sde->cpu;
+ sde->cpu = cpu;
+ cpumask_clear(&msix->mask);
+ cpumask_set_cpu(cpu, &msix->mask);
+ dd_dev_dbg(dd, "IRQ vector: %u, type %s engine %u -> cpu: %d\n",
+ msix->msix.vector, irq_type_names[msix->type],
+ sde->this_idx, cpu);
+ irq_set_affinity_hint(msix->msix.vector, &msix->mask);
+
+ /*
+ * Set the new cpu in the hfi1_affinity_node and clean
+ * the old cpu if it is not used by any other IRQ
+ */
+ set = &entry->def_intr;
+ cpumask_set_cpu(cpu, &set->mask);
+ cpumask_set_cpu(cpu, &set->used);
+ for (i = 0; i < dd->num_msix_entries; i++) {
+ struct hfi1_msix_entry *other_msix;
+
+ other_msix = &dd->msix_entries[i];
+ if (other_msix->type != IRQ_SDMA || other_msix == msix)
+ continue;
+
+ if (cpumask_test_cpu(old_cpu, &other_msix->mask))
+ goto unlock;
+ }
+ cpumask_clear_cpu(old_cpu, &set->mask);
+ cpumask_clear_cpu(old_cpu, &set->used);
+unlock:
+ mutex_unlock(&node_affinity.lock);
+}
+
+static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ int cpu = cpumask_first(mask);
+ struct hfi1_msix_entry *msix = container_of(notify,
+ struct hfi1_msix_entry,
+ notify);
+
+ /* Only one CPU configuration supported currently */
+ hfi1_update_sdma_affinity(msix, cpu);
+}
+
+static void hfi1_irq_notifier_release(struct kref *ref)
+{
+ /*
+ * This is required by affinity notifier. We don't have anything to
+ * free here.
+ */
+}
+
+static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
+{
+ struct irq_affinity_notify *notify = &msix->notify;
+
+ notify->irq = msix->msix.vector;
+ notify->notify = hfi1_irq_notifier_notify;
+ notify->release = hfi1_irq_notifier_release;
+
+ if (irq_set_affinity_notifier(notify->irq, notify))
+ pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
+ notify->irq);
+}
+
+static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
+{
+ struct irq_affinity_notify *notify = &msix->notify;
+
+ if (irq_set_affinity_notifier(notify->irq, NULL))
+ pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
+ notify->irq);
+}
+
+/*
+ * Function sets the irq affinity for msix.
+ * It *must* be called with node_affinity.lock held.
+ */
+static int get_irq_affinity(struct hfi1_devdata *dd,
+ struct hfi1_msix_entry *msix)
{
int ret;
cpumask_var_t diff;
@@ -328,9 +427,7 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
if (!ret)
return -ENOMEM;
- spin_lock(&node_affinity.lock);
entry = node_affinity_lookup(dd->node);
- spin_unlock(&node_affinity.lock);
switch (msix->type) {
case IRQ_SDMA:
@@ -360,7 +457,6 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
* finds its CPU here.
*/
if (cpu == -1 && set) {
- spin_lock(&node_affinity.lock);
if (cpumask_equal(&set->mask, &set->used)) {
/*
* We've used up all the CPUs, bump up the generation
@@ -372,17 +468,6 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
cpumask_andnot(diff, &set->mask, &set->used);
cpu = cpumask_first(diff);
cpumask_set_cpu(cpu, &set->used);
- spin_unlock(&node_affinity.lock);
- }
-
- switch (msix->type) {
- case IRQ_SDMA:
- sde->cpu = cpu;
- break;
- case IRQ_GENERAL:
- case IRQ_RCVCTXT:
- case IRQ_OTHER:
- break;
}
cpumask_set_cpu(cpu, &msix->mask);
@@ -391,10 +476,25 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
extra, cpu);
irq_set_affinity_hint(msix->msix.vector, &msix->mask);
+ if (msix->type == IRQ_SDMA) {
+ sde->cpu = cpu;
+ hfi1_setup_sdma_notifier(msix);
+ }
+
free_cpumask_var(diff);
return 0;
}
+int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
+{
+ int ret;
+
+ mutex_lock(&node_affinity.lock);
+ ret = get_irq_affinity(dd, msix);
+ mutex_unlock(&node_affinity.lock);
+ return ret;
+}
+
void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
struct hfi1_msix_entry *msix)
{
@@ -402,13 +502,13 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
struct hfi1_ctxtdata *rcd;
struct hfi1_affinity_node *entry;
- spin_lock(&node_affinity.lock);
+ mutex_lock(&node_affinity.lock);
entry = node_affinity_lookup(dd->node);
- spin_unlock(&node_affinity.lock);
switch (msix->type) {
case IRQ_SDMA:
set = &entry->def_intr;
+ hfi1_cleanup_sdma_notifier(msix);
break;
case IRQ_GENERAL:
/* Don't do accounting for general contexts */
@@ -420,21 +520,21 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
set = &entry->rcv_intr;
break;
default:
+ mutex_unlock(&node_affinity.lock);
return;
}
if (set) {
- spin_lock(&node_affinity.lock);
cpumask_andnot(&set->used, &set->used, &msix->mask);
if (cpumask_empty(&set->used) && set->gen) {
set->gen--;
cpumask_copy(&set->used, &set->mask);
}
- spin_unlock(&node_affinity.lock);
}
irq_set_affinity_hint(msix->msix.vector, NULL);
cpumask_clear(&msix->mask);
+ mutex_unlock(&node_affinity.lock);
}
/* This should be called with node_affinity.lock held */
@@ -535,7 +635,7 @@ int hfi1_get_proc_affinity(int node)
if (!ret)
goto free_available_mask;
- spin_lock(&affinity->lock);
+ mutex_lock(&affinity->lock);
/*
* If we've used all available HW threads, clear the mask and start
* overloading.
@@ -643,7 +743,8 @@ int hfi1_get_proc_affinity(int node)
cpu = -1;
else
cpumask_set_cpu(cpu, &set->used);
- spin_unlock(&affinity->lock);
+
+ mutex_unlock(&affinity->lock);
hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
free_cpumask_var(intrs_mask);
@@ -664,19 +765,17 @@ void hfi1_put_proc_affinity(int cpu)
if (cpu < 0)
return;
- spin_lock(&affinity->lock);
+
+ mutex_lock(&affinity->lock);
cpumask_clear_cpu(cpu, &set->used);
hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
if (cpumask_empty(&set->used) && set->gen) {
set->gen--;
cpumask_copy(&set->used, &set->mask);
}
- spin_unlock(&affinity->lock);
+ mutex_unlock(&affinity->lock);
}
-/* Prevents concurrent reads and writes of the sdma_affinity attrib */
-static DEFINE_MUTEX(sdma_affinity_mutex);
-
int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
size_t count)
{
@@ -684,16 +783,19 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
cpumask_var_t mask;
int ret, i;
- spin_lock(&node_affinity.lock);
+ mutex_lock(&node_affinity.lock);
entry = node_affinity_lookup(dd->node);
- spin_unlock(&node_affinity.lock);
- if (!entry)
- return -EINVAL;
+ if (!entry) {
+ ret = -EINVAL;
+ goto unlock;
+ }
ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
+ if (!ret) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
ret = cpulist_parse(buf, mask);
if (ret)
@@ -705,13 +807,11 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
goto out;
}
- mutex_lock(&sdma_affinity_mutex);
/* reset the SDMA interrupt affinity details */
init_cpu_mask_set(&entry->def_intr);
cpumask_copy(&entry->def_intr.mask, mask);
- /*
- * Reassign the affinity for each SDMA interrupt.
- */
+
+ /* Reassign the affinity for each SDMA interrupt. */
for (i = 0; i < dd->num_msix_entries; i++) {
struct hfi1_msix_entry *msix;
@@ -719,14 +819,15 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
if (msix->type != IRQ_SDMA)
continue;
- ret = hfi1_get_irq_affinity(dd, msix);
+ ret = get_irq_affinity(dd, msix);
if (ret)
break;
}
- mutex_unlock(&sdma_affinity_mutex);
out:
free_cpumask_var(mask);
+unlock:
+ mutex_unlock(&node_affinity.lock);
return ret ? ret : strnlen(buf, PAGE_SIZE);
}
@@ -734,15 +835,15 @@ int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf)
{
struct hfi1_affinity_node *entry;
- spin_lock(&node_affinity.lock);
+ mutex_lock(&node_affinity.lock);
entry = node_affinity_lookup(dd->node);
- spin_unlock(&node_affinity.lock);
- if (!entry)
+ if (!entry) {
+ mutex_unlock(&node_affinity.lock);
return -EINVAL;
+ }
- mutex_lock(&sdma_affinity_mutex);
cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask);
- mutex_unlock(&sdma_affinity_mutex);
+ mutex_unlock(&node_affinity.lock);
return strnlen(buf, PAGE_SIZE);
}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 8879cf7a8cac..b89ea3c0ee1a 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -121,8 +121,7 @@ struct hfi1_affinity_node_list {
int num_core_siblings;
int num_online_nodes;
int num_online_cpus;
- /* protect affinity node list */
- spinlock_t lock;
+ struct mutex lock; /* protects affinity nodes */
};
int node_affinity_init(void);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index cc38004cea42..9bf5f23544d4 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -971,7 +971,9 @@ static struct flag_table dc8051_info_err_flags[] = {
FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
- FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT)
+ FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
+ FLAG_ENTRY0("External Device Request Timeout",
+ EXTERNAL_DEVICE_REQ_TIMEOUT),
};
/*
@@ -6825,7 +6827,6 @@ void handle_link_up(struct work_struct *work)
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
OPA_LINKDOWN_REASON_SPEED_POLICY);
set_link_state(ppd, HLS_DN_OFFLINE);
- tune_serdes(ppd);
start_link(ppd);
}
}
@@ -6998,12 +6999,10 @@ void handle_link_down(struct work_struct *work)
* If there is no cable attached, turn the DC off. Otherwise,
* start the link bring up.
*/
- if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
+ if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
dc_shutdown(ppd->dd);
- } else {
- tune_serdes(ppd);
+ else
start_link(ppd);
- }
}
void handle_link_bounce(struct work_struct *work)
@@ -7016,7 +7015,6 @@ void handle_link_bounce(struct work_struct *work)
*/
if (ppd->host_link_state & HLS_UP) {
set_link_state(ppd, HLS_DN_OFFLINE);
- tune_serdes(ppd);
start_link(ppd);
} else {
dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
@@ -7531,7 +7529,6 @@ done:
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
OPA_LINKDOWN_REASON_WIDTH_POLICY);
set_link_state(ppd, HLS_DN_OFFLINE);
- tune_serdes(ppd);
start_link(ppd);
}
}
@@ -9161,6 +9158,12 @@ set_local_link_attributes_fail:
*/
int start_link(struct hfi1_pportdata *ppd)
{
+ /*
+ * Tune the SerDes to a ballpark setting for optimal signal and bit
+ * error rate. Needs to be done before starting the link.
+ */
+ tune_serdes(ppd);
+
if (!ppd->link_enabled) {
dd_dev_info(ppd->dd,
"%s: stopping link start because link is disabled\n",
@@ -9401,8 +9404,6 @@ void qsfp_event(struct work_struct *work)
*/
set_qsfp_int_n(ppd, 1);
- tune_serdes(ppd);
-
start_link(ppd);
}
@@ -9544,11 +9545,6 @@ static void try_start_link(struct hfi1_pportdata *ppd)
}
ppd->qsfp_retry_count = 0;
- /*
- * Tune the SerDes to a ballpark setting for optimal signal and bit
- * error rate. Needs to be done before starting the link.
- */
- tune_serdes(ppd);
start_link(ppd);
}
@@ -9718,12 +9714,12 @@ void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
}
-struct hfi1_message_header *hfi1_get_msgheader(
- struct hfi1_devdata *dd, __le32 *rhf_addr)
+struct ib_header *hfi1_get_msgheader(
+ struct hfi1_devdata *dd, __le32 *rhf_addr)
{
u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
- return (struct hfi1_message_header *)
+ return (struct ib_header *)
(rhf_addr - dd->rhf_offset + offset);
}
@@ -11559,10 +11555,10 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
!(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
/* reset the tail and hdr addresses, and sequence count */
write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
- rcd->rcvhdrq_phys);
+ rcd->rcvhdrq_dma);
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
- rcd->rcvhdrqtailaddr_phys);
+ rcd->rcvhdrqtailaddr_dma);
rcd->seq_cnt = 1;
/* reset the cached receive header queue head value */
@@ -11627,9 +11623,9 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
* update with a dummy tail address and then disable
* receive context.
*/
- if (dd->rcvhdrtail_dummy_physaddr) {
+ if (dd->rcvhdrtail_dummy_dma) {
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
- dd->rcvhdrtail_dummy_physaddr);
+ dd->rcvhdrtail_dummy_dma);
/* Enabling RcvCtxtCtrl.TailUpd is intentional. */
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
}
@@ -11640,7 +11636,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
- if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
+ if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
/* See comment on RcvCtxtCtrl.TailUpd above */
@@ -11712,7 +11708,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
* so it doesn't contain an address that is invalid.
*/
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
- dd->rcvhdrtail_dummy_physaddr);
+ dd->rcvhdrtail_dummy_dma);
}
u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
@@ -13389,9 +13385,9 @@ static void init_rbufs(struct hfi1_devdata *dd)
/*
* Give up after 1ms - maximum wait time.
*
- * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
+ * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
* 250MB/s bandwidth. Lower rate to 66% for overhead to get:
- * 148 KB / (66% * 250MB/s) = 920us
+ * 136 KB / (66% * 250MB/s) = 844us
*/
if (count++ > 500) {
dd_dev_err(dd,
@@ -14570,6 +14566,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_cleanup;
+ /* call before get_platform_config(), after init_chip_resources() */
+ ret = eprom_init(dd);
+ if (ret)
+ goto bail_free_rcverr;
+
/* Needs to be called before hfi1_firmware_init */
get_platform_config(dd);
@@ -14690,10 +14691,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_free_cntrs;
- ret = eprom_init(dd);
- if (ret)
- goto bail_free_rcverr;
-
goto bail;
bail_free_rcverr:
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index e29573769efc..92345259a8f4 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -82,7 +82,7 @@
*/
#define CM_VAU 3
/* HFI link credit count, AKA receive buffer depth (RBUF_DEPTH) */
-#define CM_GLOBAL_CREDITS 0x940
+#define CM_GLOBAL_CREDITS 0x880
/* Number of PKey entries in the HW */
#define MAX_PKEY_VALUES 16
@@ -254,12 +254,14 @@
#define FAILED_LNI_VERIFY_CAP2 BIT(10)
#define FAILED_LNI_CONFIGLT BIT(11)
#define HOST_HANDSHAKE_TIMEOUT BIT(12)
+#define EXTERNAL_DEVICE_REQ_TIMEOUT BIT(13)
#define FAILED_LNI (FAILED_LNI_POLLING | FAILED_LNI_DEBOUNCE \
| FAILED_LNI_ESTBCOMM | FAILED_LNI_OPTEQ \
| FAILED_LNI_VERIFY_CAP1 \
| FAILED_LNI_VERIFY_CAP2 \
- | FAILED_LNI_CONFIGLT | HOST_HANDSHAKE_TIMEOUT)
+ | FAILED_LNI_CONFIGLT | HOST_HANDSHAKE_TIMEOUT \
+ | EXTERNAL_DEVICE_REQ_TIMEOUT)
/* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG - host message flags */
#define HOST_REQ_DONE BIT(0)
@@ -1336,7 +1338,7 @@ enum {
u64 get_all_cpu_total(u64 __percpu *cntr);
void hfi1_start_cleanup(struct hfi1_devdata *dd);
void hfi1_clear_tids(struct hfi1_ctxtdata *rcd);
-struct hfi1_message_header *hfi1_get_msgheader(
+struct ib_header *hfi1_get_msgheader(
struct hfi1_devdata *dd, __le32 *rhf_addr);
int hfi1_init_ctxt(struct send_context *sc);
void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index fcc9c217a97a..da7be21bedb4 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -320,14 +320,6 @@ struct diag_pkt {
/* RHF receive type error - bypass packet errors */
#define RHF_RTE_BYPASS_NO_ERR 0x0
-/*
- * This structure contains the first field common to all protocols
- * that employ this chip.
- */
-struct hfi1_message_header {
- __be16 lrh[4];
-};
-
/* IB - LRH header constants */
#define HFI1_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
#define HFI1_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 5e9be16f6cd3..632ba21759ab 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -933,6 +933,43 @@ static const struct counter_info port_cntr_ops[] = {
DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write),
};
+static void *_sdma_cpu_list_seq_start(struct seq_file *s, loff_t *pos)
+{
+ if (*pos >= num_online_cpus())
+ return NULL;
+
+ return pos;
+}
+
+static void *_sdma_cpu_list_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ ++*pos;
+ if (*pos >= num_online_cpus())
+ return NULL;
+
+ return pos;
+}
+
+static void _sdma_cpu_list_seq_stop(struct seq_file *s, void *v)
+{
+ /* nothing allocated */
+}
+
+static int _sdma_cpu_list_seq_show(struct seq_file *s, void *v)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ loff_t *spos = v;
+ loff_t i = *spos;
+
+ sdma_seqfile_dump_cpu_list(s, dd, (unsigned long)i);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(sdma_cpu_list);
+DEBUGFS_SEQ_FILE_OPEN(sdma_cpu_list)
+DEBUGFS_FILE_OPS(sdma_cpu_list);
+
void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
{
char name[sizeof("port0counters") + 1];
@@ -961,6 +998,7 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
DEBUGFS_SEQ_FILE_CREATE(ctx_stats, ibd->hfi1_ibdev_dbg, ibd);
DEBUGFS_SEQ_FILE_CREATE(qp_stats, ibd->hfi1_ibdev_dbg, ibd);
DEBUGFS_SEQ_FILE_CREATE(sdes, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(sdma_cpu_list, ibd->hfi1_ibdev_dbg, ibd);
/* dev counter files */
for (i = 0; i < ARRAY_SIZE(cntr_ops); i++)
DEBUGFS_FILE_CREATE(cntr_ops[i].name,
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 303f10555729..6563e4d38b80 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -276,7 +276,7 @@ inline int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
struct hfi1_packet *packet)
{
- struct hfi1_message_header *rhdr = packet->hdr;
+ struct ib_header *rhdr = packet->hdr;
u32 rte = rhf_rcv_type_err(packet->rhf);
int lnh = be16_to_cpu(rhdr->lrh[0]) & 3;
struct hfi1_ibport *ibp = &ppd->ibport_data;
@@ -288,10 +288,9 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
if (packet->rhf & RHF_TID_ERR) {
/* For TIDERR and RC QPs preemptively schedule a NAK */
- struct hfi1_ib_header *hdr = (struct hfi1_ib_header *)rhdr;
- struct hfi1_other_headers *ohdr = NULL;
+ struct ib_other_headers *ohdr = NULL;
u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
- u16 lid = be16_to_cpu(hdr->lrh[1]);
+ u16 lid = be16_to_cpu(rhdr->lrh[1]);
u32 qp_num;
u32 rcv_flags = 0;
@@ -301,14 +300,14 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
/* Check for GRH */
if (lnh == HFI1_LRH_BTH) {
- ohdr = &hdr->u.oth;
+ ohdr = &rhdr->u.oth;
} else if (lnh == HFI1_LRH_GRH) {
u32 vtf;
- ohdr = &hdr->u.l.oth;
- if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+ ohdr = &rhdr->u.l.oth;
+ if (rhdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
goto drop;
- vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+ vtf = be32_to_cpu(rhdr->u.l.grh.version_tclass_flow);
if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
goto drop;
rcv_flags |= HFI1_HAS_GRH;
@@ -344,7 +343,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
case IB_QPT_RC:
hfi1_rc_hdrerr(
rcd,
- hdr,
+ rhdr,
rcv_flags,
qp);
break;
@@ -452,8 +451,8 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
bool do_cnp)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_ib_header *hdr = pkt->hdr;
- struct hfi1_other_headers *ohdr = pkt->ohdr;
+ struct ib_header *hdr = pkt->hdr;
+ struct ib_other_headers *ohdr = pkt->ohdr;
struct ib_grh *grh = NULL;
u32 rqpn = 0, bth1;
u16 rlid, dlid = be16_to_cpu(hdr->lrh[1]);
@@ -487,7 +486,7 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
return;
}
- sc = hdr2sc((struct hfi1_message_header *)hdr, pkt->rhf);
+ sc = hdr2sc(hdr, pkt->rhf);
bth1 = be32_to_cpu(ohdr->bth[1]);
if (do_cnp && (bth1 & HFI1_FECN_SMASK)) {
@@ -599,8 +598,8 @@ static void __prescan_rxq(struct hfi1_packet *packet)
__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
dd->rhf_offset;
struct rvt_qp *qp;
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr;
+ struct ib_header *hdr;
+ struct ib_other_headers *ohdr;
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn, bth1;
@@ -616,8 +615,8 @@ static void __prescan_rxq(struct hfi1_packet *packet)
if (etype != RHF_RCV_TYPE_IB)
goto next;
- hdr = (struct hfi1_ib_header *)
- hfi1_get_msgheader(dd, rhf_addr);
+ hdr = hfi1_get_msgheader(dd, rhf_addr);
+
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
if (lnh == HFI1_LRH_BTH)
@@ -892,8 +891,8 @@ static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
struct hfi1_devdata *dd)
{
struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
- struct hfi1_message_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
- packet->rhf_addr);
+ struct ib_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
+ packet->rhf_addr);
u8 etype = rhf_rcv_type(packet->rhf);
if (etype == RHF_RCV_TYPE_IB && hdr2sc(hdr, packet->rhf) != 0xf) {
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
index 36b77943cbfd..e70c223801b4 100644
--- a/drivers/infiniband/hw/hfi1/eprom.c
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -49,7 +49,26 @@
#include "common.h"
#include "eprom.h"
+/*
+ * The EPROM is logically divided into three partitions:
+ * partition 0: the first 128K, visible from PCI ROM BAR
+ * partition 1: 4K config file (sector size)
+ * partition 2: the rest
+ */
+#define P0_SIZE (128 * 1024)
+#define P1_SIZE (4 * 1024)
+#define P1_START P0_SIZE
+#define P2_START (P0_SIZE + P1_SIZE)
+
+/* controller page size, in bytes */
+#define EP_PAGE_SIZE 256
+#define EP_PAGE_MASK (EP_PAGE_SIZE - 1)
+#define EP_PAGE_DWORDS (EP_PAGE_SIZE / sizeof(u32))
+
+/* controller commands */
#define CMD_SHIFT 24
+#define CMD_NOP (0)
+#define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr)
#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
/* controller interface speeds */
@@ -61,6 +80,90 @@
* Double it for safety.
*/
#define EPROM_TIMEOUT 80000 /* ms */
+
+/*
+ * Read a 256 byte (64 dword) EPROM page.
+ * All callers have verified the offset is at a page boundary.
+ */
+static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
+{
+ int i;
+
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset));
+ for (i = 0; i < EP_PAGE_DWORDS; i++)
+ result[i] = (u32)read_csr(dd, ASIC_EEP_DATA);
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */
+}
+
+/*
+ * Read length bytes starting at offset from the start of the EPROM.
+ */
+static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, void *dest)
+{
+ u32 buffer[EP_PAGE_DWORDS];
+ u32 end;
+ u32 start_offset;
+ u32 read_start;
+ u32 bytes;
+
+ if (len == 0)
+ return 0;
+
+ end = start + len;
+
+ /*
+ * Make sure the read range is not outside of the controller read
+ * command address range. Note that '>' is correct below - the end
+ * of the range is OK if it stops at the limit, but no higher.
+ */
+ if (end > (1 << CMD_SHIFT))
+ return -EINVAL;
+
+ /* read the first partial page */
+ start_offset = start & EP_PAGE_MASK;
+ if (start_offset) {
+ /* partial starting page */
+
+ /* align and read the page that contains the start */
+ read_start = start & ~EP_PAGE_MASK;
+ read_page(dd, read_start, buffer);
+
+ /* the rest of the page is available data */
+ bytes = EP_PAGE_SIZE - start_offset;
+
+ if (len <= bytes) {
+ /* end is within this page */
+ memcpy(dest, (u8 *)buffer + start_offset, len);
+ return 0;
+ }
+
+ memcpy(dest, (u8 *)buffer + start_offset, bytes);
+
+ start += bytes;
+ len -= bytes;
+ dest += bytes;
+ }
+ /* start is now page aligned */
+
+ /* read whole pages */
+ while (len >= EP_PAGE_SIZE) {
+ read_page(dd, start, buffer);
+ memcpy(dest, buffer, EP_PAGE_SIZE);
+
+ start += EP_PAGE_SIZE;
+ len -= EP_PAGE_SIZE;
+ dest += EP_PAGE_SIZE;
+ }
+
+ /* read the last partial page */
+ if (len) {
+ read_page(dd, start, buffer);
+ memcpy(dest, buffer, len);
+ }
+
+ return 0;
+}
+
/*
* Initialize the EPROM handler.
*/
@@ -100,3 +203,85 @@ int eprom_init(struct hfi1_devdata *dd)
done_asic:
return ret;
}
+
+/* magic character sequence that trails an image */
+#define IMAGE_TRAIL_MAGIC "egamiAPO"
+
+/*
+ * Read all of partition 1. The actual file is at the front. Adjust
+ * the returned size if a trailing image magic is found.
+ */
+static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
+ u32 *size)
+{
+ void *buffer;
+ void *p;
+ u32 length;
+ int ret;
+
+ buffer = kmalloc(P1_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ ret = read_length(dd, P1_START, P1_SIZE, buffer);
+ if (ret) {
+ kfree(buffer);
+ return ret;
+ }
+
+ /* scan for image magic that may trail the actual data */
+ p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
+ if (p)
+ length = p - buffer;
+ else
+ length = P1_SIZE;
+
+ *data = buffer;
+ *size = length;
+ return 0;
+}
+
+/*
+ * Read the platform configuration file from the EPROM.
+ *
+ * On success, an allocated buffer containing the data and its size are
+ * returned. It is up to the caller to free this buffer.
+ *
+ * Return value:
+ * 0 - success
+ * -ENXIO - no EPROM is available
+ * -EBUSY - not able to acquire access to the EPROM
+ * -ENOENT - no recognizable file written
+ * -ENOMEM - buffer could not be allocated
+ */
+int eprom_read_platform_config(struct hfi1_devdata *dd, void **data, u32 *size)
+{
+ u32 directory[EP_PAGE_DWORDS]; /* aligned buffer */
+ int ret;
+
+ if (!dd->eprom_available)
+ return -ENXIO;
+
+ ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
+ if (ret)
+ return -EBUSY;
+
+ /* read the last page of P0 for the EPROM format magic */
+ ret = read_length(dd, P1_START - EP_PAGE_SIZE, EP_PAGE_SIZE, directory);
+ if (ret)
+ goto done;
+
+ /* last dword of P0 contains a magic indicator */
+ if (directory[EP_PAGE_DWORDS - 1] == 0) {
+ /* partition format */
+ ret = read_partition_platform_config(dd, data, size);
+ goto done;
+ }
+
+ /* nothing recognized */
+ ret = -ENOENT;
+
+done:
+ release_chip_resource(dd, CR_EPROM);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/hfi1/eprom.h b/drivers/infiniband/hw/hfi1/eprom.h
index d41f0b1afb15..e774184f1643 100644
--- a/drivers/infiniband/hw/hfi1/eprom.h
+++ b/drivers/infiniband/hw/hfi1/eprom.h
@@ -45,8 +45,8 @@
*
*/
-struct hfi1_cmd;
struct hfi1_devdata;
int eprom_init(struct hfi1_devdata *dd);
-int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd);
+int eprom_read_platform_config(struct hfi1_devdata *dd, void **buf_ret,
+ u32 *size_ret);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 7e03ccd2554d..677efa0e8cd6 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -58,7 +58,6 @@
#include "trace.h"
#include "user_sdma.h"
#include "user_exp_rcv.h"
-#include "eprom.h"
#include "aspm.h"
#include "mmu_rb.h"
@@ -440,9 +439,10 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd;
- unsigned long flags, pfn;
+ unsigned long flags;
u64 token = vma->vm_pgoff << PAGE_SHIFT,
memaddr = 0;
+ void *memvirt = NULL;
u8 subctxt, mapio = 0, vmf = 0, type;
ssize_t memlen = 0;
int ret = 0;
@@ -493,7 +493,8 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
* second or third page allocated for credit returns (if number
* of enabled contexts > 64 and 128 respectively).
*/
- memaddr = dd->cr_base[uctxt->numa_id].pa +
+ memvirt = dd->cr_base[uctxt->numa_id].va;
+ memaddr = virt_to_phys(memvirt) +
(((u64)uctxt->sc->hw_free -
(u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
memlen = PAGE_SIZE;
@@ -508,8 +509,8 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
mapio = 1;
break;
case RCV_HDRQ:
- memaddr = uctxt->rcvhdrq_phys;
memlen = uctxt->rcvhdrq_size;
+ memvirt = uctxt->rcvhdrq;
break;
case RCV_EGRBUF: {
unsigned long addr;
@@ -533,14 +534,21 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_MAYWRITE;
addr = vma->vm_start;
for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
+ memlen = uctxt->egrbufs.buffers[i].len;
+ memvirt = uctxt->egrbufs.buffers[i].addr;
ret = remap_pfn_range(
vma, addr,
- uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT,
- uctxt->egrbufs.buffers[i].len,
+ /*
+ * virt_to_pfn() does the same, but
+ * it's not available on x86_64
+ * when CONFIG_MMU is enabled.
+ */
+ PFN_DOWN(__pa(memvirt)),
+ memlen,
vma->vm_page_prot);
if (ret < 0)
goto done;
- addr += uctxt->egrbufs.buffers[i].len;
+ addr += memlen;
}
ret = 0;
goto done;
@@ -596,8 +604,8 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
ret = -EPERM;
goto done;
}
- memaddr = uctxt->rcvhdrqtailaddr_phys;
memlen = PAGE_SIZE;
+ memvirt = (void *)uctxt->rcvhdrtail_kvaddr;
flags &= ~VM_MAYWRITE;
break;
case SUBCTXT_UREGS:
@@ -650,16 +658,24 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
"%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
vma->vm_end - vma->vm_start, vma->vm_flags);
- pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
if (vmf) {
- vma->vm_pgoff = pfn;
+ vma->vm_pgoff = PFN_DOWN(memaddr);
vma->vm_ops = &vm_ops;
ret = 0;
} else if (mapio) {
- ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen,
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(memaddr),
+ memlen,
vma->vm_page_prot);
+ } else if (memvirt) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(__pa(memvirt)),
+ memlen,
+ vma->vm_page_prot);
} else {
- ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen,
+ ret = remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(memaddr),
+ memlen,
vma->vm_page_prot);
}
done:
@@ -961,14 +977,16 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
*/
uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
uctxt->dd->node);
- if (!uctxt->sc)
- return -ENOMEM;
-
+ if (!uctxt->sc) {
+ ret = -ENOMEM;
+ goto ctxdata_free;
+ }
hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
uctxt->sc->hw_context);
ret = sc_enable(uctxt->sc);
if (ret)
- return ret;
+ goto ctxdata_free;
+
/*
* Setup shared context resources if the user-level has requested
* shared contexts and this is the 'master' process.
@@ -982,7 +1000,7 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
* send context because it will be done during file close
*/
if (ret)
- return ret;
+ goto ctxdata_free;
}
uctxt->userversion = uinfo->userversion;
uctxt->flags = hfi1_cap_mask; /* save current flag state */
@@ -1002,6 +1020,11 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
fd->uctxt = uctxt;
return 0;
+
+ctxdata_free:
+ dd->rcd[ctxt] = NULL;
+ hfi1_free_ctxtdata(dd, uctxt);
+ return ret;
}
static int init_subctxts(struct hfi1_ctxtdata *uctxt,
@@ -1260,7 +1283,7 @@ static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
uctxt->rcvhdrq);
binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
fd->subctxt,
- uctxt->egrbufs.rcvtids[0].phys);
+ uctxt->egrbufs.rcvtids[0].dma);
binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
fd->subctxt, 0);
/*
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 325ec211370f..7eef11b316ff 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -64,6 +64,8 @@
#include <linux/kthread.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <rdma/ib_hdrs.h>
+#include <linux/rhashtable.h>
#include <rdma/rdma_vt.h>
#include "chip_registers.h"
@@ -171,12 +173,12 @@ struct ctxt_eager_bufs {
u32 threshold; /* head update threshold */
struct eager_buffer {
void *addr;
- dma_addr_t phys;
+ dma_addr_t dma;
ssize_t len;
} *buffers;
struct {
void *addr;
- dma_addr_t phys;
+ dma_addr_t dma;
} *rcvtids;
};
@@ -207,8 +209,8 @@ struct hfi1_ctxtdata {
/* size of each of the rcvhdrq entries */
u16 rcvhdrqentsize;
/* mmap of hdrq, must fit in 44 bits */
- dma_addr_t rcvhdrq_phys;
- dma_addr_t rcvhdrqtailaddr_phys;
+ dma_addr_t rcvhdrq_dma;
+ dma_addr_t rcvhdrqtailaddr_dma;
struct ctxt_eager_bufs egrbufs;
/* this receive context's assigned PIO ACK send context */
struct send_context *sc;
@@ -350,7 +352,7 @@ struct hfi1_packet {
struct hfi1_ctxtdata *rcd;
__le32 *rhf_addr;
struct rvt_qp *qp;
- struct hfi1_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
u64 rhf;
u32 maxcnt;
u32 rhqoff;
@@ -529,6 +531,7 @@ struct hfi1_msix_entry {
void *arg;
char name[MAX_NAME_SIZE];
cpumask_t mask;
+ struct irq_affinity_notify notify;
};
/* per-SL CCA information */
@@ -1060,8 +1063,6 @@ struct hfi1_devdata {
u8 psxmitwait_supported;
/* cycle length of PS* counters in HW (in picoseconds) */
u16 psxmitwait_check_rate;
- /* high volume overflow errors deferred to tasklet */
- struct tasklet_struct error_tasklet;
/* MSI-X information */
struct hfi1_msix_entry *msix_entries;
@@ -1164,7 +1165,7 @@ struct hfi1_devdata {
/* receive context tail dummy address */
__le64 *rcvhdrtail_dummy_kvaddr;
- dma_addr_t rcvhdrtail_dummy_physaddr;
+ dma_addr_t rcvhdrtail_dummy_dma;
bool eprom_available; /* true if EPROM is available for this device */
bool aspm_supported; /* Does HW support ASPM */
@@ -1175,6 +1176,7 @@ struct hfi1_devdata {
atomic_t aspm_disabled_cnt;
struct hfi1_affinity *affinity;
+ struct rhashtable sdma_rht;
struct kobject kobj;
};
@@ -1268,7 +1270,7 @@ static inline u32 driver_lstate(struct hfi1_pportdata *ppd)
void receive_interrupt_work(struct work_struct *work);
/* extract service channel from header and rhf */
-static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
+static inline int hdr2sc(struct ib_header *hdr, u64 rhf)
{
return ((be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf) |
((!!(rhf_dc_info(rhf))) << 4);
@@ -1603,7 +1605,7 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
bool do_cnp)
{
- struct hfi1_other_headers *ohdr = pkt->ohdr;
+ struct ib_other_headers *ohdr = pkt->ohdr;
u32 bth1;
bth1 = be32_to_cpu(ohdr->bth[1]);
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 384b43d2fd49..60db61536fed 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -336,6 +336,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
}
return rcd;
bail:
+ dd->rcd[ctxt] = NULL;
kfree(rcd->egrbufs.rcvtids);
kfree(rcd->egrbufs.buffers);
kfree(rcd);
@@ -709,7 +710,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
/* allocate dummy tail memory for all receive contexts */
dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
&dd->pcidev->dev, sizeof(u64),
- &dd->rcvhdrtail_dummy_physaddr,
+ &dd->rcvhdrtail_dummy_dma,
GFP_KERNEL);
if (!dd->rcvhdrtail_dummy_kvaddr) {
@@ -942,12 +943,12 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
if (rcd->rcvhdrq) {
dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
- rcd->rcvhdrq, rcd->rcvhdrq_phys);
+ rcd->rcvhdrq, rcd->rcvhdrq_dma);
rcd->rcvhdrq = NULL;
if (rcd->rcvhdrtail_kvaddr) {
dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
(void *)rcd->rcvhdrtail_kvaddr,
- rcd->rcvhdrqtailaddr_phys);
+ rcd->rcvhdrqtailaddr_dma);
rcd->rcvhdrtail_kvaddr = NULL;
}
}
@@ -956,11 +957,11 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
kfree(rcd->egrbufs.rcvtids);
for (e = 0; e < rcd->egrbufs.alloced; e++) {
- if (rcd->egrbufs.buffers[e].phys)
+ if (rcd->egrbufs.buffers[e].dma)
dma_free_coherent(&dd->pcidev->dev,
rcd->egrbufs.buffers[e].len,
rcd->egrbufs.buffers[e].addr,
- rcd->egrbufs.buffers[e].phys);
+ rcd->egrbufs.buffers[e].dma);
}
kfree(rcd->egrbufs.buffers);
@@ -1354,7 +1355,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
if (dd->rcvhdrtail_dummy_kvaddr) {
dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
(void *)dd->rcvhdrtail_dummy_kvaddr,
- dd->rcvhdrtail_dummy_physaddr);
+ dd->rcvhdrtail_dummy_dma);
dd->rcvhdrtail_dummy_kvaddr = NULL;
}
@@ -1577,7 +1578,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
u64 reg;
if (!rcd->rcvhdrq) {
- dma_addr_t phys_hdrqtail;
+ dma_addr_t dma_hdrqtail;
gfp_t gfp_flags;
/*
@@ -1590,7 +1591,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
GFP_USER : GFP_KERNEL;
rcd->rcvhdrq = dma_zalloc_coherent(
- &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
+ &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
gfp_flags | __GFP_COMP);
if (!rcd->rcvhdrq) {
@@ -1602,11 +1603,11 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
+ &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
gfp_flags);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
- rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
+ rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
}
rcd->rcvhdrq_size = amt;
@@ -1634,7 +1635,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
* before enabling any receive context
*/
write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
- dd->rcvhdrtail_dummy_physaddr);
+ dd->rcvhdrtail_dummy_dma);
return 0;
@@ -1645,7 +1646,7 @@ bail_free:
vfree(rcd->user_event_mask);
rcd->user_event_mask = NULL;
dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
- rcd->rcvhdrq_phys);
+ rcd->rcvhdrq_dma);
rcd->rcvhdrq = NULL;
bail:
return -ENOMEM;
@@ -1706,15 +1707,15 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
rcd->egrbufs.buffers[idx].addr =
dma_zalloc_coherent(&dd->pcidev->dev,
rcd->egrbufs.rcvtid_size,
- &rcd->egrbufs.buffers[idx].phys,
+ &rcd->egrbufs.buffers[idx].dma,
gfp_flags);
if (rcd->egrbufs.buffers[idx].addr) {
rcd->egrbufs.buffers[idx].len =
rcd->egrbufs.rcvtid_size;
rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
rcd->egrbufs.buffers[idx].addr;
- rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].phys =
- rcd->egrbufs.buffers[idx].phys;
+ rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
+ rcd->egrbufs.buffers[idx].dma;
rcd->egrbufs.alloced++;
alloced_bytes += rcd->egrbufs.rcvtid_size;
idx++;
@@ -1755,14 +1756,14 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
for (i = 0, j = 0, offset = 0; j < idx; i++) {
if (i >= rcd->egrbufs.count)
break;
- rcd->egrbufs.rcvtids[i].phys =
- rcd->egrbufs.buffers[j].phys + offset;
+ rcd->egrbufs.rcvtids[i].dma =
+ rcd->egrbufs.buffers[j].dma + offset;
rcd->egrbufs.rcvtids[i].addr =
rcd->egrbufs.buffers[j].addr + offset;
rcd->egrbufs.alloced++;
- if ((rcd->egrbufs.buffers[j].phys + offset +
+ if ((rcd->egrbufs.buffers[j].dma + offset +
new_size) ==
- (rcd->egrbufs.buffers[j].phys +
+ (rcd->egrbufs.buffers[j].dma +
rcd->egrbufs.buffers[j].len)) {
j++;
offset = 0;
@@ -1814,7 +1815,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
- rcd->egrbufs.rcvtids[idx].phys, order);
+ rcd->egrbufs.rcvtids[idx].dma, order);
cond_resched();
}
goto bail;
@@ -1826,9 +1827,9 @@ bail_rcvegrbuf_phys:
dma_free_coherent(&dd->pcidev->dev,
rcd->egrbufs.buffers[idx].len,
rcd->egrbufs.buffers[idx].addr,
- rcd->egrbufs.buffers[idx].phys);
+ rcd->egrbufs.buffers[idx].dma);
rcd->egrbufs.buffers[idx].addr = NULL;
- rcd->egrbufs.buffers[idx].phys = 0;
+ rcd->egrbufs.buffers[idx].dma = 0;
rcd->egrbufs.buffers[idx].len = 0;
}
bail:
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 7ffc14f21523..9487c9bb8920 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1013,7 +1013,6 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
* offline.
*/
set_link_state(ppd, HLS_DN_OFFLINE);
- tune_serdes(ppd);
start_link(ppd);
} else {
set_link_state(ppd, link_state);
@@ -1407,12 +1406,6 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
if (key == okey)
continue;
/*
- * Don't update pkeys[2], if an HFI port without MgmtAllowed
- * by neighbor is a switch.
- */
- if (i == 2 && !ppd->mgmt_allowed && ppd->neighbor_type == 1)
- continue;
- /*
* The SM gives us the complete PKey table. We have
* to ensure that we put the PKeys in the matching
* slots.
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index ac1bf4a73571..50a3a36d9363 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -551,11 +551,11 @@ static inline u32 group_size(u32 group)
}
/*
- * Obtain the credit return addresses, kernel virtual and physical, for the
+ * Obtain the credit return addresses, kernel virtual and bus, for the
* given sc.
*
* To understand this routine:
- * o va and pa are arrays of struct credit_return. One for each physical
+ * o va and dma are arrays of struct credit_return. One for each physical
* send context, per NUMA.
* o Each send context always looks in its relative location in a struct
* credit_return for its credit return.
@@ -563,14 +563,14 @@ static inline u32 group_size(u32 group)
* with the same value. Use the address of the first send context in the
* group.
*/
-static void cr_group_addresses(struct send_context *sc, dma_addr_t *pa)
+static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma)
{
u32 gc = group_context(sc->hw_context, sc->group);
u32 index = sc->hw_context & 0x7;
sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
- *pa = (unsigned long)
- &((struct credit_return *)sc->dd->cr_base[sc->node].pa)[gc];
+ *dma = (unsigned long)
+ &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc];
}
/*
@@ -710,7 +710,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
{
struct send_context_info *sci;
struct send_context *sc = NULL;
- dma_addr_t pa;
+ dma_addr_t dma;
unsigned long flags;
u64 reg;
u32 thresh;
@@ -763,7 +763,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
sc->sw_index = sw_index;
sc->hw_context = hw_context;
- cr_group_addresses(sc, &pa);
+ cr_group_addresses(sc, &dma);
sc->credits = sci->credits;
/* PIO Send Memory Address details */
@@ -805,7 +805,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
/* set up credit return */
- reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
+ reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
/*
@@ -2064,7 +2064,7 @@ int init_credit_return(struct hfi1_devdata *dd)
dd->cr_base[i].va = dma_zalloc_coherent(
&dd->pcidev->dev,
bytes,
- &dd->cr_base[i].pa,
+ &dd->cr_base[i].dma,
GFP_KERNEL);
if (!dd->cr_base[i].va) {
set_dev_node(&dd->pcidev->dev, dd->node);
@@ -2097,7 +2097,7 @@ void free_credit_return(struct hfi1_devdata *dd)
TXE_NUM_CONTEXTS *
sizeof(struct credit_return),
dd->cr_base[i].va,
- dd->cr_base[i].pa);
+ dd->cr_base[i].dma);
}
}
kfree(dd->cr_base);
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 464cbd27b975..e709eaf743b5 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -154,7 +154,7 @@ struct credit_return {
/* NUMA indexed credit return array */
struct credit_return_base {
struct credit_return *va;
- dma_addr_t pa;
+ dma_addr_t dma;
};
/* send context configuration sizes (one per type) */
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 3a1ef3056282..aa7773643107 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -165,9 +165,6 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
preempt_enable();
}
-/* USE_SHIFTS is faster in user-space tests on a Xeon X5570 @ 2.93GHz */
-#define USE_SHIFTS 1
-#ifdef USE_SHIFTS
/*
* Handle carry bytes using shifts and masks.
*
@@ -187,150 +184,6 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
#define mshift(x) (8 * (x))
/*
- * Read nbytes bytes from "from" and return them in the LSB bytes
- * of pbuf->carry. Other bytes are zeroed. Any previous value
- * pbuf->carry is lost.
- *
- * NOTES:
- * o do not read from from if nbytes is zero
- * o from may _not_ be u64 aligned
- * o nbytes must not span a QW boundary
- */
-static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
- unsigned int nbytes)
-{
- unsigned long off;
-
- if (nbytes == 0) {
- pbuf->carry.val64 = 0;
- } else {
- /* align our pointer */
- off = (unsigned long)from & 0x7;
- from = (void *)((unsigned long)from & ~0x7l);
- pbuf->carry.val64 = ((*(u64 *)from)
- << zshift(nbytes + off))/* zero upper bytes */
- >> zshift(nbytes); /* place at bottom */
- }
- pbuf->carry_bytes = nbytes;
-}
-
-/*
- * Read nbytes bytes from "from" and put them at the next significant bytes
- * of pbuf->carry. Unused bytes are zeroed. It is expected that the extra
- * read does not overfill carry.
- *
- * NOTES:
- * o from may _not_ be u64 aligned
- * o nbytes may span a QW boundary
- */
-static inline void read_extra_bytes(struct pio_buf *pbuf,
- const void *from, unsigned int nbytes)
-{
- unsigned long off = (unsigned long)from & 0x7;
- unsigned int room, xbytes;
-
- /* align our pointer */
- from = (void *)((unsigned long)from & ~0x7l);
-
- /* check count first - don't read anything if count is zero */
- while (nbytes) {
- /* find the number of bytes in this u64 */
- room = 8 - off; /* this u64 has room for this many bytes */
- xbytes = min(room, nbytes);
-
- /*
- * shift down to zero lower bytes, shift up to zero upper
- * bytes, shift back down to move into place
- */
- pbuf->carry.val64 |= (((*(u64 *)from)
- >> mshift(off))
- << zshift(xbytes))
- >> zshift(xbytes + pbuf->carry_bytes);
- off = 0;
- pbuf->carry_bytes += xbytes;
- nbytes -= xbytes;
- from += sizeof(u64);
- }
-}
-
-/*
- * Zero extra bytes from the end of pbuf->carry.
- *
- * NOTES:
- * o zbytes <= old_bytes
- */
-static inline void zero_extra_bytes(struct pio_buf *pbuf, unsigned int zbytes)
-{
- unsigned int remaining;
-
- if (zbytes == 0) /* nothing to do */
- return;
-
- remaining = pbuf->carry_bytes - zbytes; /* remaining bytes */
-
- /* NOTE: zshift only guaranteed to work if remaining != 0 */
- if (remaining)
- pbuf->carry.val64 = (pbuf->carry.val64 << zshift(remaining))
- >> zshift(remaining);
- else
- pbuf->carry.val64 = 0;
- pbuf->carry_bytes = remaining;
-}
-
-/*
- * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
- * Put the unused part of the next 8 bytes of src into the LSB bytes of
- * pbuf->carry with the upper bytes zeroed..
- *
- * NOTES:
- * o result must keep unused bytes zeroed
- * o src must be u64 aligned
- */
-static inline void merge_write8(
- struct pio_buf *pbuf,
- void __iomem *dest,
- const void *src)
-{
- u64 new, temp;
-
- new = *(u64 *)src;
- temp = pbuf->carry.val64 | (new << mshift(pbuf->carry_bytes));
- writeq(temp, dest);
- pbuf->carry.val64 = new >> zshift(pbuf->carry_bytes);
-}
-
-/*
- * Write a quad word using all bytes of carry.
- */
-static inline void carry8_write8(union mix carry, void __iomem *dest)
-{
- writeq(carry.val64, dest);
-}
-
-/*
- * Write a quad word using all the valid bytes of carry. If carry
- * has zero valid bytes, nothing is written.
- * Returns 0 on nothing written, non-zero on quad word written.
- */
-static inline int carry_write8(struct pio_buf *pbuf, void __iomem *dest)
-{
- if (pbuf->carry_bytes) {
- /* unused bytes are always kept zeroed, so just write */
- writeq(pbuf->carry.val64, dest);
- return 1;
- }
-
- return 0;
-}
-
-#else /* USE_SHIFTS */
-/*
- * Handle carry bytes using byte copies.
- *
- * NOTE: the value the unused portion of carry is left uninitialized.
- */
-
-/*
* Jump copy - no-loop copy for < 8 bytes.
*/
static inline void jcopy(u8 *dest, const u8 *src, u32 n)
@@ -338,18 +191,25 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n)
switch (n) {
case 7:
*dest++ = *src++;
+ /* fall through */
case 6:
*dest++ = *src++;
+ /* fall through */
case 5:
*dest++ = *src++;
+ /* fall through */
case 4:
*dest++ = *src++;
+ /* fall through */
case 3:
*dest++ = *src++;
+ /* fall through */
case 2:
*dest++ = *src++;
+ /* fall through */
case 1:
*dest++ = *src++;
+ /* fall through */
}
}
@@ -365,6 +225,7 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n)
static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
unsigned int nbytes)
{
+ pbuf->carry.val64 = 0;
jcopy(&pbuf->carry.val8[0], from, nbytes);
pbuf->carry_bytes = nbytes;
}
@@ -385,40 +246,31 @@ static inline void read_extra_bytes(struct pio_buf *pbuf,
}
/*
- * Zero extra bytes from the end of pbuf->carry.
- *
- * We do not care about the value of unused bytes in carry, so just
- * reduce the byte count.
+ * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
+ * Put the unused part of the next 8 bytes of src into the LSB bytes of
+ * pbuf->carry with the upper bytes zeroed..
*
* NOTES:
- * o zbytes <= old_bytes
- */
-static inline void zero_extra_bytes(struct pio_buf *pbuf, unsigned int zbytes)
-{
- pbuf->carry_bytes -= zbytes;
-}
-
-/*
- * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
- * Put the unused part of the next 8 bytes of src into the low bytes of
- * pbuf->carry.
+ * o result must keep unused bytes zeroed
+ * o src must be u64 aligned
*/
static inline void merge_write8(
struct pio_buf *pbuf,
- void *dest,
+ void __iomem *dest,
const void *src)
{
- u32 remainder = 8 - pbuf->carry_bytes;
+ u64 new, temp;
- jcopy(&pbuf->carry.val8[pbuf->carry_bytes], src, remainder);
- writeq(pbuf->carry.val64, dest);
- jcopy(&pbuf->carry.val8[0], src + remainder, pbuf->carry_bytes);
+ new = *(u64 *)src;
+ temp = pbuf->carry.val64 | (new << mshift(pbuf->carry_bytes));
+ writeq(temp, dest);
+ pbuf->carry.val64 = new >> zshift(pbuf->carry_bytes);
}
/*
* Write a quad word using all bytes of carry.
*/
-static inline void carry8_write8(union mix carry, void *dest)
+static inline void carry8_write8(union mix carry, void __iomem *dest)
{
writeq(carry.val64, dest);
}
@@ -428,20 +280,16 @@ static inline void carry8_write8(union mix carry, void *dest)
* has zero valid bytes, nothing is written.
* Returns 0 on nothing written, non-zero on quad word written.
*/
-static inline int carry_write8(struct pio_buf *pbuf, void *dest)
+static inline int carry_write8(struct pio_buf *pbuf, void __iomem *dest)
{
if (pbuf->carry_bytes) {
- u64 zero = 0;
-
- jcopy(&pbuf->carry.val8[pbuf->carry_bytes], (u8 *)&zero,
- 8 - pbuf->carry_bytes);
+ /* unused bytes are always kept zeroed, so just write */
writeq(pbuf->carry.val64, dest);
return 1;
}
return 0;
}
-#endif /* USE_SHIFTS */
/*
* Segmented PIO Copy - start
@@ -550,8 +398,8 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
{
void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
void __iomem *dend; /* 8-byte data end */
- unsigned long qw_to_write = (pbuf->carry_bytes + nbytes) >> 3;
- unsigned long bytes_left = (pbuf->carry_bytes + nbytes) & 0x7;
+ unsigned long qw_to_write = nbytes >> 3;
+ unsigned long bytes_left = nbytes & 0x7;
/* calculate 8-byte data end */
dend = dest + (qw_to_write * sizeof(u64));
@@ -621,16 +469,46 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
dest += sizeof(u64);
}
- /* adjust carry */
- if (pbuf->carry_bytes < bytes_left) {
- /* need to read more */
- read_extra_bytes(pbuf, from, bytes_left - pbuf->carry_bytes);
+ pbuf->qw_written += qw_to_write;
+
+ /* handle carry and left-over bytes */
+ if (pbuf->carry_bytes + bytes_left >= 8) {
+ unsigned long nread;
+
+ /* there is enough to fill another qw - fill carry */
+ nread = 8 - pbuf->carry_bytes;
+ read_extra_bytes(pbuf, from, nread);
+
+ /*
+ * One more write - but need to make sure dest is correct.
+ * Check for wrap and the possibility the write
+ * should be in SOP space.
+ *
+ * The two checks immediately below cannot both be true, hence
+ * the else. If we have wrapped, we cannot still be within the
+ * first block. Conversely, if we are still in the first block,
+ * we cannot have wrapped. We do the wrap check first as that
+ * is more likely.
+ */
+ /* adjust if we have wrapped */
+ if (dest >= pbuf->end)
+ dest -= pbuf->size;
+ /* jump to the SOP range if within the first block */
+ else if (pbuf->qw_written < PIO_BLOCK_QWS)
+ dest += SOP_DISTANCE;
+
+ /* flush out full carry */
+ carry8_write8(pbuf->carry, dest);
+ pbuf->qw_written++;
+
+ /* now adjust and read the rest of the bytes into carry */
+ bytes_left -= nread;
+ from += nread; /* from is now not aligned */
+ read_low_bytes(pbuf, from, bytes_left);
} else {
- /* remove invalid bytes */
- zero_extra_bytes(pbuf, pbuf->carry_bytes - bytes_left);
+ /* not enough to fill another qw, append the rest to carry */
+ read_extra_bytes(pbuf, from, bytes_left);
}
-
- pbuf->qw_written += qw_to_write;
}
/*
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index 965c8aef0c60..202433178864 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -47,29 +47,39 @@
#include "hfi.h"
#include "efivar.h"
+#include "eprom.h"
void get_platform_config(struct hfi1_devdata *dd)
{
int ret = 0;
unsigned long size = 0;
u8 *temp_platform_config = NULL;
+ u32 esize;
+
+ ret = eprom_read_platform_config(dd, (void **)&temp_platform_config,
+ &esize);
+ if (!ret) {
+ /* success */
+ size = esize;
+ goto success;
+ }
+ /* fail, try EFI variable */
ret = read_hfi1_efi_var(dd, "configuration", &size,
(void **)&temp_platform_config);
- if (ret) {
- dd_dev_info(dd,
- "%s: Failed to get platform config from UEFI, falling back to request firmware\n",
- __func__);
- /* fall back to request firmware */
- platform_config_load = 1;
- goto bail;
- }
+ if (!ret)
+ goto success;
+
+ dd_dev_info(dd,
+ "%s: Failed to get platform config from UEFI, falling back to request firmware\n",
+ __func__);
+ /* fall back to request firmware */
+ platform_config_load = 1;
+ return;
+success:
dd->platform_config.data = temp_platform_config;
dd->platform_config.size = size;
-
-bail:
- /* exit */;
}
void free_platform_config(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 4e4d8317c281..9fc75e7e8781 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -202,8 +202,7 @@ static void flush_iowait(struct rvt_qp *qp)
write_seqlock_irqsave(&dev->iowait_lock, flags);
if (!list_empty(&priv->s_iowait.list)) {
list_del_init(&priv->s_iowait.list);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
write_sequnlock_irqrestore(&dev->iowait_lock, flags);
}
@@ -450,13 +449,14 @@ static void qp_pio_drain(struct rvt_qp *qp)
*/
void hfi1_schedule_send(struct rvt_qp *qp)
{
+ lockdep_assert_held(&qp->s_lock);
if (hfi1_send_ok(qp))
_hfi1_schedule_send(qp);
}
/**
- * hfi1_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
+ * hfi1_get_credit - handle credit in aeth
+ * @qp: the qp
* @aeth: the Acknowledge Extended Transport Header
*
* The QP s_lock should be held.
@@ -465,6 +465,7 @@ void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
{
u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
+ lockdep_assert_held(&qp->s_lock);
/*
* If the credit is invalid, we can send
* as many packets as we like. Otherwise, we have to
@@ -503,8 +504,7 @@ void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
}
spin_unlock_irqrestore(&qp->s_lock, flags);
/* Notify hfi1_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
static int iowait_sleep(
@@ -544,7 +544,7 @@ static int iowait_sleep(
qp->s_flags |= RVT_S_WAIT_DMA_DESC;
list_add_tail(&priv->s_iowait.list, &sde->dmawait);
trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
}
write_sequnlock(&dev->iowait_lock);
qp->s_flags &= ~RVT_S_BUSY;
@@ -808,6 +808,13 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
kfree(priv);
return ERR_PTR(-ENOMEM);
}
+ iowait_init(
+ &priv->s_iowait,
+ 1,
+ _hfi1_do_send,
+ iowait_sleep,
+ iowait_wakeup,
+ iowait_sdma_drained);
setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp);
qp->s_timer.function = hfi1_rc_timeout;
return priv;
@@ -848,6 +855,7 @@ unsigned free_all_qps(struct rvt_dev_info *rdi)
void flush_qp_waiters(struct rvt_qp *qp)
{
+ lockdep_assert_held(&qp->s_lock);
flush_iowait(qp);
hfi1_stop_rc_timers(qp);
}
@@ -873,13 +881,6 @@ void notify_qp_reset(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
- iowait_init(
- &priv->s_iowait,
- 1,
- _hfi1_do_send,
- iowait_sleep,
- iowait_wakeup,
- iowait_sdma_drained);
priv->r_adefered = 0;
clear_ahg(qp);
}
@@ -963,8 +964,7 @@ void notify_error_qp(struct rvt_qp *qp)
if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
list_del_init(&priv->s_iowait.list);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
write_sequnlock(&dev->iowait_lock);
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 4e95ad810847..1869f639c3ae 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -161,7 +161,7 @@ static struct hfi1_i2c_bus *init_i2c_bus(struct hfi1_devdata *dd,
bus->algo.getsda = hfi1_getsda;
bus->algo.getscl = hfi1_getscl;
bus->algo.udelay = 5;
- bus->algo.timeout = usecs_to_jiffies(50);
+ bus->algo.timeout = usecs_to_jiffies(100000);
bus->algo.data = bus;
bus->adapter.owner = THIS_MODULE;
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 5da190e6011b..8bc5013f39a1 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -55,7 +55,7 @@
#include "trace.h"
/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_RC_##x
+#define OP(x) RC_OP(x)
/**
* hfi1_add_retry_timer - add/start a retry timer
@@ -68,6 +68,7 @@ static inline void hfi1_add_retry_timer(struct rvt_qp *qp)
struct ib_qp *ibqp = &qp->ibqp;
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_TIMER;
/* 4.096 usec. * (1 << qp->timeout) */
qp->s_timer.expires = jiffies + qp->timeout_jiffies +
@@ -86,6 +87,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
{
struct hfi1_qp_priv *priv = qp->priv;
+ lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_WAIT_RNR;
qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
add_timer(&priv->s_rnr_timer);
@@ -103,6 +105,7 @@ static inline void hfi1_mod_retry_timer(struct rvt_qp *qp)
struct ib_qp *ibqp = &qp->ibqp;
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_TIMER;
/* 4.096 usec. * (1 << qp->timeout) */
mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
@@ -120,6 +123,7 @@ static inline int hfi1_stop_retry_timer(struct rvt_qp *qp)
{
int rval = 0;
+ lockdep_assert_held(&qp->s_lock);
/* Remove QP from retry */
if (qp->s_flags & RVT_S_TIMER) {
qp->s_flags &= ~RVT_S_TIMER;
@@ -138,6 +142,7 @@ void hfi1_stop_rc_timers(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
+ lockdep_assert_held(&qp->s_lock);
/* Remove QP from all timers */
if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
@@ -158,6 +163,7 @@ static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp)
int rval = 0;
struct hfi1_qp_priv *priv = qp->priv;
+ lockdep_assert_held(&qp->s_lock);
/* Remove QP from rnr timer */
if (qp->s_flags & RVT_S_WAIT_RNR) {
qp->s_flags &= ~RVT_S_WAIT_RNR;
@@ -178,18 +184,6 @@ void hfi1_del_timers_sync(struct rvt_qp *qp)
del_timer_sync(&priv->s_rnr_timer);
}
-/* only opcode mask for adaptive pio */
-const u32 rc_only_opcode =
- BIT(OP(SEND_ONLY) & 0x1f) |
- BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) |
- BIT(OP(RDMA_WRITE_ONLY & 0x1f)) |
- BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f)) |
- BIT(OP(RDMA_READ_REQUEST & 0x1f)) |
- BIT(OP(ACKNOWLEDGE & 0x1f)) |
- BIT(OP(ATOMIC_ACKNOWLEDGE & 0x1f)) |
- BIT(OP(COMPARE_SWAP & 0x1f)) |
- BIT(OP(FETCH_ADD & 0x1f));
-
static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
u32 psn, u32 pmtu)
{
@@ -216,7 +210,7 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
* Note the QP s_lock must be held.
*/
static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
- struct hfi1_other_headers *ohdr,
+ struct ib_other_headers *ohdr,
struct hfi1_pkt_state *ps)
{
struct rvt_ack_entry *e;
@@ -228,6 +222,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
u32 pmtu = qp->pmtu;
struct hfi1_qp_priv *priv = qp->priv;
+ lockdep_assert_held(&qp->s_lock);
/* Don't send an ACK if we aren't supposed to. */
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
goto bail;
@@ -299,10 +294,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
len = 0;
qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
ohdr->u.at.aeth = hfi1_compute_aeth(qp);
- ohdr->u.at.atomic_ack_eth[0] =
- cpu_to_be32(e->atomic_data >> 32);
- ohdr->u.at.atomic_ack_eth[1] =
- cpu_to_be32(e->atomic_data);
+ ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
hwords += sizeof(ohdr->u.at) / sizeof(u32);
bth2 = mask_psn(e->psn);
e->sent = 1;
@@ -390,7 +382,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
- struct hfi1_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
struct rvt_sge_state *ss;
struct rvt_swqe *wqe;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
@@ -403,6 +395,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
int middle = 0;
int delta;
+ lockdep_assert_held(&qp->s_lock);
ps->s_txreq = get_txreq(ps->dev, qp);
if (IS_ERR(ps->s_txreq))
goto bail_no_tx;
@@ -566,8 +559,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
+ put_ib_reth_vaddr(
+ wqe->rdma_wr.remote_addr,
+ &ohdr->u.rc.reth);
ohdr->u.rc.reth.rkey =
cpu_to_be32(wqe->rdma_wr.rkey);
ohdr->u.rc.reth.length = cpu_to_be32(len);
@@ -608,8 +602,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
}
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
+ put_ib_reth_vaddr(
+ wqe->rdma_wr.remote_addr,
+ &ohdr->u.rc.reth);
ohdr->u.rc.reth.rkey =
cpu_to_be32(wqe->rdma_wr.rkey);
ohdr->u.rc.reth.length = cpu_to_be32(len);
@@ -640,20 +635,18 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
qp->s_state = OP(COMPARE_SWAP);
- ohdr->u.atomic_eth.swap_data = cpu_to_be64(
- wqe->atomic_wr.swap);
- ohdr->u.atomic_eth.compare_data = cpu_to_be64(
- wqe->atomic_wr.compare_add);
+ put_ib_ateth_swap(wqe->atomic_wr.swap,
+ &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(wqe->atomic_wr.compare_add,
+ &ohdr->u.atomic_eth);
} else {
qp->s_state = OP(FETCH_ADD);
- ohdr->u.atomic_eth.swap_data = cpu_to_be64(
- wqe->atomic_wr.compare_add);
- ohdr->u.atomic_eth.compare_data = 0;
+ put_ib_ateth_swap(wqe->atomic_wr.compare_add,
+ &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
}
- ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
- wqe->atomic_wr.remote_addr >> 32);
- ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
- wqe->atomic_wr.remote_addr);
+ put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
+ &ohdr->u.atomic_eth);
ohdr->u.atomic_eth.rkey = cpu_to_be32(
wqe->atomic_wr.rkey);
hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
@@ -779,8 +772,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
* See restart_rc().
*/
len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr + len);
+ put_ib_reth_vaddr(
+ wqe->rdma_wr.remote_addr + len,
+ &ohdr->u.rc.reth);
ohdr->u.rc.reth.rkey =
cpu_to_be32(wqe->rdma_wr.rkey);
ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
@@ -841,7 +835,7 @@ bail_no_tx:
*
* This is called from hfi1_rc_rcv() and handle_receive_interrupt().
* Note that RDMA reads and atomics are handled in the
- * send side QP state and tasklet.
+ * send side QP state and send engine.
*/
void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
int is_fecn)
@@ -856,8 +850,8 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
u32 vl, plen;
struct send_context *sc;
struct pio_buf *pbuf;
- struct hfi1_ib_header hdr;
- struct hfi1_other_headers *ohdr;
+ struct ib_header hdr;
+ struct ib_other_headers *ohdr;
unsigned long flags;
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
@@ -917,7 +911,7 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
if (!pbuf) {
/*
* We have no room to send at the moment. Pass
- * responsibility for sending the ACK to the send tasklet
+ * responsibility for sending the ACK to the send engine
* so that when enough buffer space becomes available,
* the ACK is sent ahead of other outgoing packets.
*/
@@ -932,16 +926,19 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
return;
queue_ack:
- this_cpu_inc(*ibp->rvp.rc_qacks);
spin_lock_irqsave(&qp->s_lock, flags);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
+ goto unlock;
+ this_cpu_inc(*ibp->rvp.rc_qacks);
qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
qp->s_nak_state = qp->r_nak_state;
qp->s_ack_psn = qp->r_ack_psn;
if (is_fecn)
qp->s_flags |= RVT_S_ECN;
- /* Schedule the send tasklet. */
+ /* Schedule the send engine. */
hfi1_schedule_send(qp);
+unlock:
spin_unlock_irqrestore(&qp->s_lock, flags);
}
@@ -960,6 +957,7 @@ static void reset_psn(struct rvt_qp *qp, u32 psn)
struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
u32 opcode;
+ lockdep_assert_held(&qp->s_lock);
qp->s_cur = n;
/*
@@ -1027,7 +1025,7 @@ done:
qp->s_psn = psn;
/*
* Set RVT_S_WAIT_PSN as rc_complete() may start the timer
- * asynchronously before the send tasklet can get scheduled.
+ * asynchronously before the send engine can get scheduled.
* Doing it in hfi1_make_rc_req() is too late.
*/
if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
@@ -1045,6 +1043,8 @@ static void restart_rc(struct rvt_qp *qp, u32 psn, int wait)
struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
struct hfi1_ibport *ibp;
+ lockdep_assert_held(&qp->r_lock);
+ lockdep_assert_held(&qp->s_lock);
if (qp->s_retry == 0) {
if (qp->s_mig_state == IB_MIG_ARMED) {
hfi1_migrate_qp(qp);
@@ -1121,6 +1121,7 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
struct rvt_swqe *wqe;
u32 n = qp->s_last;
+ lockdep_assert_held(&qp->s_lock);
/* Find the work request corresponding to the given PSN. */
for (;;) {
wqe = rvt_get_swqe_ptr(qp, n);
@@ -1141,15 +1142,16 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
/*
* This should be called with the QP s_lock held and interrupts disabled.
*/
-void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
+void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
{
- struct hfi1_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
struct rvt_swqe *wqe;
struct ib_wc wc;
unsigned i;
u32 opcode;
u32 psn;
+ lockdep_assert_held(&qp->s_lock);
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
@@ -1241,6 +1243,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
struct ib_wc wc;
unsigned i;
+ lockdep_assert_held(&qp->s_lock);
/*
* Don't decrement refcount and don't generate a
* completion if the SWQE is being resent until the send
@@ -1340,6 +1343,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
int diff;
unsigned long to;
+ lockdep_assert_held(&qp->s_lock);
/*
* Note that NAKs implicitly ACK outstanding SEND and RDMA write
* requests and implicitly NAK RDMA read and atomic requests issued
@@ -1389,7 +1393,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
qp->r_flags |= RVT_R_RSP_SEND;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
list_add_tail(&qp->rspwait,
&rcd->qp_wait_list);
}
@@ -1555,6 +1559,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
{
struct rvt_swqe *wqe;
+ lockdep_assert_held(&qp->s_lock);
/* Remove QP from retry timer */
hfi1_stop_rc_timers(qp);
@@ -1573,7 +1578,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
qp->r_flags |= RVT_R_RSP_SEND;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
}
@@ -1595,7 +1600,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
* Called at interrupt level.
*/
static void rc_rcv_resp(struct hfi1_ibport *ibp,
- struct hfi1_other_headers *ohdr,
+ struct ib_other_headers *ohdr,
void *data, u32 tlen, struct rvt_qp *qp,
u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
struct hfi1_ctxtdata *rcd)
@@ -1649,14 +1654,10 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
case OP(ATOMIC_ACKNOWLEDGE):
case OP(RDMA_READ_RESPONSE_FIRST):
aeth = be32_to_cpu(ohdr->u.aeth);
- if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
- __be32 *p = ohdr->u.at.atomic_ack_eth;
-
- val = ((u64)be32_to_cpu(p[0]) << 32) |
- be32_to_cpu(p[1]);
- } else {
+ if (opcode == OP(ATOMIC_ACKNOWLEDGE))
+ val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
+ else
val = 0;
- }
if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
opcode != OP(RDMA_READ_RESPONSE_FIRST))
goto ack_done;
@@ -1782,7 +1783,7 @@ static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
{
if (list_empty(&qp->rspwait)) {
qp->r_flags |= RVT_R_RSP_NAK;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
}
@@ -1796,8 +1797,7 @@ static inline void rc_cancel_ack(struct rvt_qp *qp)
return;
list_del_init(&qp->rspwait);
qp->r_flags &= ~RVT_R_RSP_NAK;
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
/**
@@ -1815,7 +1815,7 @@ static inline void rc_cancel_ack(struct rvt_qp *qp)
* Return 1 if no more processing is needed; otherwise return 0 to
* schedule a response to be sent.
*/
-static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
+static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
struct rvt_qp *qp, u32 opcode, u32 psn,
int diff, struct hfi1_ctxtdata *rcd)
{
@@ -1923,7 +1923,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
}
if (len != 0) {
u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
+ u64 vaddr = get_ib_reth_vaddr(reth);
int ok;
ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
@@ -1946,7 +1946,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
case OP(FETCH_ADD): {
/*
* If we didn't find the atomic request in the ack queue
- * or the send tasklet is already backed up to send an
+ * or the send engine is already backed up to send an
* earlier entry, we can ignore this request.
*/
if (!e || e->opcode != (u8)opcode || old_req)
@@ -2123,13 +2123,13 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
void hfi1_rc_rcv(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd = packet->rcd;
- struct hfi1_ib_header *hdr = packet->hdr;
+ struct ib_header *hdr = packet->hdr;
u32 rcv_flags = packet->rcv_flags;
void *data = packet->ebuf;
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_other_headers *ohdr = packet->ohdr;
+ struct ib_other_headers *ohdr = packet->ohdr;
u32 bth0, opcode;
u32 hdrsize = packet->hlen;
u32 psn;
@@ -2143,6 +2143,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
int copy_last = 0;
u32 rkey;
+ lockdep_assert_held(&qp->r_lock);
bth0 = be32_to_cpu(ohdr->bth[0]);
if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
return;
@@ -2342,7 +2343,7 @@ send_last:
qp->r_sge.sg_list = NULL;
if (qp->r_len != 0) {
u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
+ u64 vaddr = get_ib_reth_vaddr(reth);
int ok;
/* Check rkey & NAK */
@@ -2397,7 +2398,7 @@ send_last:
len = be32_to_cpu(reth->length);
if (len) {
u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
+ u64 vaddr = get_ib_reth_vaddr(reth);
int ok;
/* Check rkey & NAK */
@@ -2432,7 +2433,7 @@ send_last:
qp->r_nak_state = 0;
qp->r_head_ack_queue = next;
- /* Schedule the send tasklet. */
+ /* Schedule the send engine. */
qp->s_flags |= RVT_S_RESP_PENDING;
hfi1_schedule_send(qp);
@@ -2469,8 +2470,7 @@ send_last:
e->rdma_sge.mr = NULL;
}
ateth = &ohdr->u.atomic_eth;
- vaddr = ((u64)be32_to_cpu(ateth->vaddr[0]) << 32) |
- be32_to_cpu(ateth->vaddr[1]);
+ vaddr = get_ib_ateth_vaddr(ateth);
if (unlikely(vaddr & (sizeof(u64) - 1)))
goto nack_inv_unlck;
rkey = be32_to_cpu(ateth->rkey);
@@ -2481,11 +2481,11 @@ send_last:
goto nack_acc_unlck;
/* Perform atomic OP and save result. */
maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
- sdata = be64_to_cpu(ateth->swap_data);
+ sdata = get_ib_ateth_swap(ateth);
e->atomic_data = (opcode == OP(FETCH_ADD)) ?
(u64)atomic64_add_return(sdata, maddr) - sdata :
(u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
- be64_to_cpu(ateth->compare_data),
+ get_ib_ateth_compare(ateth),
sdata);
rvt_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
@@ -2499,7 +2499,7 @@ send_last:
qp->r_nak_state = 0;
qp->r_head_ack_queue = next;
- /* Schedule the send tasklet. */
+ /* Schedule the send engine. */
qp->s_flags |= RVT_S_RESP_PENDING;
hfi1_schedule_send(qp);
@@ -2575,12 +2575,12 @@ send_ack:
void hfi1_rc_hdrerr(
struct hfi1_ctxtdata *rcd,
- struct hfi1_ib_header *hdr,
+ struct ib_header *hdr,
u32 rcv_flags,
struct rvt_qp *qp)
{
int has_grh = rcv_flags & HFI1_HAS_GRH;
- struct hfi1_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
int diff;
u32 opcode;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 48d5094f98e2..a1576aea4756 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -262,7 +262,7 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
*
* The s_lock will be acquired around the hfi1_migrate_qp() call.
*/
-int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
+int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr,
int has_grh, struct rvt_qp *qp, u32 bth0)
{
__be64 guid;
@@ -352,7 +352,7 @@ err:
*
* This is called from hfi1_do_send() to
* forward a WQE addressed to the same HFI.
- * Note that although we are single threaded due to the tasklet, we still
+ * Note that although we are single threaded due to the send engine, we still
* have to protect against post_send(). We don't have to worry about
* receive interrupts since this is a connected protocol and all packets
* will pass through here.
@@ -765,7 +765,7 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
}
}
-void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
+void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
u32 bth0, u32 bth2, int middle,
struct hfi1_pkt_state *ps)
{
@@ -846,7 +846,7 @@ void _hfi1_do_send(struct work_struct *work)
* @work: contains a pointer to the QP
*
* Process entries in the send work queue until credit or queue is
- * exhausted. Only allow one CPU to send a packet per QP (tasklet).
+ * exhausted. Only allow one CPU to send a packet per QP.
* Otherwise, two threads could send packets out of order.
*/
void hfi1_do_send(struct rvt_qp *qp)
@@ -909,7 +909,7 @@ void hfi1_do_send(struct rvt_qp *qp)
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
/*
* If the packet cannot be sent now, return and
- * the send tasklet will be woken up later.
+ * the send engine will be woken up later.
*/
if (hfi1_verbs_send(qp, &ps))
return;
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index f9befc05b349..fd39bcaa062d 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -726,6 +726,34 @@ u16 sdma_get_descq_cnt(void)
}
/**
+ * sdma_engine_get_vl() - return vl for a given sdma engine
+ * @sde: sdma engine
+ *
+ * This function returns the vl mapped to a given engine, or an error if
+ * the mapping can't be found. The mapping fields are protected by RCU.
+ */
+int sdma_engine_get_vl(struct sdma_engine *sde)
+{
+ struct hfi1_devdata *dd = sde->dd;
+ struct sdma_vl_map *m;
+ u8 vl;
+
+ if (sde->this_idx >= TXE_NUM_SDMA_ENGINES)
+ return -EINVAL;
+
+ rcu_read_lock();
+ m = rcu_dereference(dd->sdma_map);
+ if (unlikely(!m)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ vl = m->engine_to_vl[sde->this_idx];
+ rcu_read_unlock();
+
+ return vl;
+}
+
+/**
* sdma_select_engine_vl() - select sdma engine
* @dd: devdata
* @selector: a spreading factor
@@ -788,6 +816,326 @@ struct sdma_engine *sdma_select_engine_sc(
return sdma_select_engine_vl(dd, selector, vl);
}
+struct sdma_rht_map_elem {
+ u32 mask;
+ u8 ctr;
+ struct sdma_engine *sde[0];
+};
+
+struct sdma_rht_node {
+ unsigned long cpu_id;
+ struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED];
+ struct rhash_head node;
+};
+
+#define NR_CPUS_HINT 192
+
+static const struct rhashtable_params sdma_rht_params = {
+ .nelem_hint = NR_CPUS_HINT,
+ .head_offset = offsetof(struct sdma_rht_node, node),
+ .key_offset = offsetof(struct sdma_rht_node, cpu_id),
+ .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id),
+ .max_size = NR_CPUS,
+ .min_size = 8,
+ .automatic_shrinking = true,
+};
+
+/*
+ * sdma_select_user_engine() - select sdma engine based on user setup
+ * @dd: devdata
+ * @selector: a spreading factor
+ * @vl: this vl
+ *
+ * This function returns an sdma engine for a user sdma request.
+ * User defined sdma engine affinity setting is honored when applicable,
+ * otherwise system default sdma engine mapping is used. To ensure correct
+ * ordering, the mapping from <selector, vl> to sde must remain unchanged.
+ */
+struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
+ u32 selector, u8 vl)
+{
+ struct sdma_rht_node *rht_node;
+ struct sdma_engine *sde = NULL;
+ const struct cpumask *current_mask = tsk_cpus_allowed(current);
+ unsigned long cpu_id;
+
+ /*
+ * To ensure that always the same sdma engine(s) will be
+ * selected make sure the process is pinned to this CPU only.
+ */
+ if (cpumask_weight(current_mask) != 1)
+ goto out;
+
+ cpu_id = smp_processor_id();
+ rcu_read_lock();
+ rht_node = rhashtable_lookup_fast(&dd->sdma_rht, &cpu_id,
+ sdma_rht_params);
+
+ if (rht_node && rht_node->map[vl]) {
+ struct sdma_rht_map_elem *map = rht_node->map[vl];
+
+ sde = map->sde[selector & map->mask];
+ }
+ rcu_read_unlock();
+
+ if (sde)
+ return sde;
+
+out:
+ return sdma_select_engine_vl(dd, selector, vl);
+}
+
+static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
+{
+ int i;
+
+ for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++)
+ map->sde[map->ctr + i] = map->sde[i];
+}
+
+static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map,
+ struct sdma_engine *sde)
+{
+ unsigned int i, pow;
+
+ /* only need to check the first ctr entries for a match */
+ for (i = 0; i < map->ctr; i++) {
+ if (map->sde[i] == sde) {
+ memmove(&map->sde[i], &map->sde[i + 1],
+ (map->ctr - i - 1) * sizeof(map->sde[0]));
+ map->ctr--;
+ pow = roundup_pow_of_two(map->ctr ? : 1);
+ map->mask = pow - 1;
+ sdma_populate_sde_map(map);
+ break;
+ }
+ }
+}
+
+/*
+ * Prevents concurrent reads and writes of the sdma engine cpu_mask
+ */
+static DEFINE_MUTEX(process_to_sde_mutex);
+
+ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
+ size_t count)
+{
+ struct hfi1_devdata *dd = sde->dd;
+ cpumask_var_t mask, new_mask;
+ unsigned long cpu;
+ int ret, vl, sz;
+
+ vl = sdma_engine_get_vl(sde);
+ if (unlikely(vl < 0))
+ return -EINVAL;
+
+ ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL);
+ if (!ret) {
+ free_cpumask_var(mask);
+ return -ENOMEM;
+ }
+ ret = cpulist_parse(buf, mask);
+ if (ret)
+ goto out_free;
+
+ if (!cpumask_subset(mask, cpu_online_mask)) {
+ dd_dev_warn(sde->dd, "Invalid CPU mask\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ sz = sizeof(struct sdma_rht_map_elem) +
+ (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *));
+
+ mutex_lock(&process_to_sde_mutex);
+
+ for_each_cpu(cpu, mask) {
+ struct sdma_rht_node *rht_node;
+
+ /* Check if we have this already mapped */
+ if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
+ cpumask_set_cpu(cpu, new_mask);
+ continue;
+ }
+
+ rht_node = rhashtable_lookup_fast(&dd->sdma_rht, &cpu,
+ sdma_rht_params);
+ if (!rht_node) {
+ rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL);
+ if (!rht_node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
+ if (!rht_node->map[vl]) {
+ kfree(rht_node);
+ ret = -ENOMEM;
+ goto out;
+ }
+ rht_node->cpu_id = cpu;
+ rht_node->map[vl]->mask = 0;
+ rht_node->map[vl]->ctr = 1;
+ rht_node->map[vl]->sde[0] = sde;
+
+ ret = rhashtable_insert_fast(&dd->sdma_rht,
+ &rht_node->node,
+ sdma_rht_params);
+ if (ret) {
+ kfree(rht_node->map[vl]);
+ kfree(rht_node);
+ dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n",
+ cpu);
+ goto out;
+ }
+
+ } else {
+ int ctr, pow;
+
+ /* Add new user mappings */
+ if (!rht_node->map[vl])
+ rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
+
+ if (!rht_node->map[vl]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ rht_node->map[vl]->ctr++;
+ ctr = rht_node->map[vl]->ctr;
+ rht_node->map[vl]->sde[ctr - 1] = sde;
+ pow = roundup_pow_of_two(ctr);
+ rht_node->map[vl]->mask = pow - 1;
+
+ /* Populate the sde map table */
+ sdma_populate_sde_map(rht_node->map[vl]);
+ }
+ cpumask_set_cpu(cpu, new_mask);
+ }
+
+ /* Clean up old mappings */
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct sdma_rht_node *rht_node;
+
+ /* Don't cleanup sdes that are set in the new mask */
+ if (cpumask_test_cpu(cpu, mask))
+ continue;
+
+ rht_node = rhashtable_lookup_fast(&dd->sdma_rht, &cpu,
+ sdma_rht_params);
+ if (rht_node) {
+ bool empty = true;
+ int i;
+
+ /* Remove mappings for old sde */
+ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
+ if (rht_node->map[i])
+ sdma_cleanup_sde_map(rht_node->map[i],
+ sde);
+
+ /* Free empty hash table entries */
+ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
+ if (!rht_node->map[i])
+ continue;
+
+ if (rht_node->map[i]->ctr) {
+ empty = false;
+ break;
+ }
+ }
+
+ if (empty) {
+ ret = rhashtable_remove_fast(&dd->sdma_rht,
+ &rht_node->node,
+ sdma_rht_params);
+ WARN_ON(ret);
+
+ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
+ kfree(rht_node->map[i]);
+
+ kfree(rht_node);
+ }
+ }
+ }
+
+ cpumask_copy(&sde->cpu_mask, new_mask);
+out:
+ mutex_unlock(&process_to_sde_mutex);
+out_free:
+ free_cpumask_var(mask);
+ free_cpumask_var(new_mask);
+ return ret ? : strnlen(buf, PAGE_SIZE);
+}
+
+ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
+{
+ mutex_lock(&process_to_sde_mutex);
+ if (cpumask_empty(&sde->cpu_mask))
+ snprintf(buf, PAGE_SIZE, "%s\n", "empty");
+ else
+ cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask);
+ mutex_unlock(&process_to_sde_mutex);
+ return strnlen(buf, PAGE_SIZE);
+}
+
+static void sdma_rht_free(void *ptr, void *arg)
+{
+ struct sdma_rht_node *rht_node = ptr;
+ int i;
+
+ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
+ kfree(rht_node->map[i]);
+
+ kfree(rht_node);
+}
+
+/**
+ * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings
+ * @s: seq file
+ * @dd: hfi1_devdata
+ * @cpuid: cpu id
+ *
+ * This routine dumps the process to sde mappings per cpu
+ */
+void sdma_seqfile_dump_cpu_list(struct seq_file *s,
+ struct hfi1_devdata *dd,
+ unsigned long cpuid)
+{
+ struct sdma_rht_node *rht_node;
+ int i, j;
+
+ rht_node = rhashtable_lookup_fast(&dd->sdma_rht, &cpuid,
+ sdma_rht_params);
+ if (!rht_node)
+ return;
+
+ seq_printf(s, "cpu%3lu: ", cpuid);
+ for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
+ if (!rht_node->map[i] || !rht_node->map[i]->ctr)
+ continue;
+
+ seq_printf(s, " vl%d: [", i);
+
+ for (j = 0; j < rht_node->map[i]->ctr; j++) {
+ if (!rht_node->map[i]->sde[j])
+ continue;
+
+ if (j > 0)
+ seq_puts(s, ",");
+
+ seq_printf(s, " sdma%2d",
+ rht_node->map[i]->sde[j]->this_idx);
+ }
+ seq_puts(s, " ]");
+ }
+
+ seq_puts(s, "\n");
+}
+
/*
* Free the indicated map struct
*/
@@ -1161,6 +1509,10 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
dd->num_sdma = num_engines;
if (sdma_map_init(dd, port, ppd->vls_operational, NULL))
goto bail;
+
+ if (rhashtable_init(&dd->sdma_rht, &sdma_rht_params))
+ goto bail;
+
dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
return 0;
@@ -1252,6 +1604,7 @@ void sdma_exit(struct hfi1_devdata *dd)
sdma_finalput(&sde->state);
}
sdma_clean(dd, dd->num_sdma);
+ rhashtable_free_and_destroy(&dd->sdma_rht, sdma_rht_free, NULL);
}
/*
@@ -2086,6 +2439,11 @@ nodesc:
* @sde: sdma engine to use
* @wait: wait structure to use when full (may be NULL)
* @tx_list: list of sdma_txreqs to submit
+ * @count: pointer to a u32 which, after return will contain the total number of
+ * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
+ * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
+ * which are added to SDMA engine flush list if the SDMA engine state is
+ * not running.
*
* The call submits the list into the ring.
*
@@ -2100,18 +2458,18 @@ nodesc:
* side locking.
*
* Return:
- * > 0 - Success (value is number of sdma_txreq's submitted),
+ * 0 - Success,
* -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
* -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
*/
int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
- struct list_head *tx_list)
+ struct list_head *tx_list, u32 *count_out)
{
struct sdma_txreq *tx, *tx_next;
int ret = 0;
unsigned long flags;
u16 tail = INVALID_TAIL;
- int count = 0;
+ u32 submit_count = 0, flush_count = 0, total_count;
spin_lock_irqsave(&sde->tail_lock, flags);
retry:
@@ -2127,33 +2485,34 @@ retry:
}
list_del_init(&tx->list);
tail = submit_tx(sde, tx);
- count++;
+ submit_count++;
if (tail != INVALID_TAIL &&
- (count & SDMA_TAIL_UPDATE_THRESH) == 0) {
+ (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) {
sdma_update_tail(sde, tail);
tail = INVALID_TAIL;
}
}
update_tail:
+ total_count = submit_count + flush_count;
if (wait)
- iowait_sdma_add(wait, count);
+ iowait_sdma_add(wait, total_count);
if (tail != INVALID_TAIL)
sdma_update_tail(sde, tail);
spin_unlock_irqrestore(&sde->tail_lock, flags);
- return ret == 0 ? count : ret;
+ *count_out = total_count;
+ return ret;
unlock_noconn:
spin_lock(&sde->flushlist_lock);
list_for_each_entry_safe(tx, tx_next, tx_list, list) {
tx->wait = wait;
list_del_init(&tx->list);
- if (wait)
- iowait_sdma_inc(wait);
tx->next_descq_idx = 0;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
tx->sn = sde->tail_sn++;
trace_hfi1_sdma_in_sn(sde, tx->sn);
#endif
list_add_tail(&tx->list, &sde->flushlist);
+ flush_count++;
if (wait) {
wait->tx_count++;
wait->count += tx->num_desc;
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 8f50c99fe711..56257ea3598f 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -413,6 +413,8 @@ struct sdma_engine {
spinlock_t flushlist_lock;
/* private: */
struct list_head flushlist;
+ struct cpumask cpu_mask;
+ struct kobject kobj;
};
int sdma_init(struct hfi1_devdata *dd, u8 port);
@@ -847,7 +849,8 @@ int sdma_send_txreq(struct sdma_engine *sde,
struct sdma_txreq *tx);
int sdma_send_txlist(struct sdma_engine *sde,
struct iowait *wait,
- struct list_head *tx_list);
+ struct list_head *tx_list,
+ u32 *count);
int sdma_ahg_alloc(struct sdma_engine *sde);
void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
@@ -1058,7 +1061,15 @@ struct sdma_engine *sdma_select_engine_vl(
u32 selector,
u8 vl);
+struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
+ u32 selector, u8 vl);
+ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf);
+ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
+ size_t count);
+int sdma_engine_get_vl(struct sdma_engine *sde);
void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *);
+void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd,
+ unsigned long cpuid);
#ifdef CONFIG_SDMA_VERBOSITY
void sdma_dumpstate(struct sdma_engine *);
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 74c84c655f7e..edba22461a9c 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -766,13 +766,95 @@ bail:
return ret;
}
+struct sde_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct sdma_engine *sde, char *buf);
+ ssize_t (*store)(struct sdma_engine *sde, const char *buf, size_t cnt);
+};
+
+static ssize_t sde_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct sde_attribute *sde_attr =
+ container_of(attr, struct sde_attribute, attr);
+ struct sdma_engine *sde =
+ container_of(kobj, struct sdma_engine, kobj);
+
+ if (!sde_attr->show)
+ return -EINVAL;
+
+ return sde_attr->show(sde, buf);
+}
+
+static ssize_t sde_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sde_attribute *sde_attr =
+ container_of(attr, struct sde_attribute, attr);
+ struct sdma_engine *sde =
+ container_of(kobj, struct sdma_engine, kobj);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!sde_attr->store)
+ return -EINVAL;
+
+ return sde_attr->store(sde, buf, count);
+}
+
+static const struct sysfs_ops sde_sysfs_ops = {
+ .show = sde_show,
+ .store = sde_store,
+};
+
+static struct kobj_type sde_ktype = {
+ .sysfs_ops = &sde_sysfs_ops,
+};
+
+#define SDE_ATTR(_name, _mode, _show, _store) \
+ struct sde_attribute sde_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+static ssize_t sde_show_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
+{
+ return sdma_get_cpu_to_sde_map(sde, buf);
+}
+
+static ssize_t sde_store_cpu_to_sde_map(struct sdma_engine *sde,
+ const char *buf, size_t count)
+{
+ return sdma_set_cpu_to_sde_map(sde, buf, count);
+}
+
+static ssize_t sde_show_vl(struct sdma_engine *sde, char *buf)
+{
+ int vl;
+
+ vl = sdma_engine_get_vl(sde);
+ if (vl < 0)
+ return vl;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", vl);
+}
+
+static SDE_ATTR(cpu_list, S_IWUSR | S_IRUGO,
+ sde_show_cpu_to_sde_map,
+ sde_store_cpu_to_sde_map);
+static SDE_ATTR(vl, S_IRUGO, sde_show_vl, NULL);
+
+static struct sde_attribute *sde_attribs[] = {
+ &sde_attr_cpu_list,
+ &sde_attr_vl
+};
+
/*
* Register and create our files in /sys/class/infiniband.
*/
int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
{
struct ib_device *dev = &dd->verbs_dev.rdi.ibdev;
- int i, ret;
+ struct device *class_dev = &dev->dev;
+ int i, j, ret;
for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i) {
ret = device_create_file(&dev->dev, hfi1_attributes[i]);
@@ -780,10 +862,29 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
goto bail;
}
+ for (i = 0; i < dd->num_sdma; i++) {
+ ret = kobject_init_and_add(&dd->per_sdma[i].kobj,
+ &sde_ktype, &class_dev->kobj,
+ "sdma%d", i);
+ if (ret)
+ goto bail;
+
+ for (j = 0; j < ARRAY_SIZE(sde_attribs); j++) {
+ ret = sysfs_create_file(&dd->per_sdma[i].kobj,
+ &sde_attribs[j]->attr);
+ if (ret)
+ goto bail;
+ }
+ }
+
return 0;
bail:
for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i)
device_remove_file(&dev->dev, hfi1_attributes[i]);
+
+ for (i = 0; i < dd->num_sdma; i++)
+ kobject_del(&dd->per_sdma[i].kobj);
+
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 4cfb13771897..01f525cd985a 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -47,9 +47,9 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr)
+u8 ibhdr_exhdr_len(struct ib_header *hdr)
{
- struct hfi1_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
u8 opcode;
u8 lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
@@ -67,16 +67,11 @@ u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr)
#define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x"
#define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x"
#define IETH_PRN "ieth rkey 0x%.8x"
-#define ATOMICACKETH_PRN "origdata %lld"
-#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld"
+#define ATOMICACKETH_PRN "origdata %llx"
+#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %llx cdata %llx"
#define OP(transport, op) IB_OPCODE_## transport ## _ ## op
-static u64 ib_u64_get(__be32 *p)
-{
- return ((u64)be32_to_cpu(p[0]) << 32) | be32_to_cpu(p[1]);
-}
-
static const char *parse_syndrome(u8 syndrome)
{
switch (syndrome >> 5) {
@@ -113,8 +108,7 @@ const char *parse_everbs_hdrs(
case OP(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
case OP(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
trace_seq_printf(p, RETH_PRN " " IMM_PRN,
- (unsigned long long)ib_u64_get(
- (__be32 *)&eh->rc.reth.vaddr),
+ get_ib_reth_vaddr(&eh->rc.reth),
be32_to_cpu(eh->rc.reth.rkey),
be32_to_cpu(eh->rc.reth.length),
be32_to_cpu(eh->rc.imm_data));
@@ -126,8 +120,7 @@ const char *parse_everbs_hdrs(
case OP(RC, RDMA_WRITE_ONLY):
case OP(UC, RDMA_WRITE_ONLY):
trace_seq_printf(p, RETH_PRN,
- (unsigned long long)ib_u64_get(
- (__be32 *)&eh->rc.reth.vaddr),
+ get_ib_reth_vaddr(&eh->rc.reth),
be32_to_cpu(eh->rc.reth.rkey),
be32_to_cpu(eh->rc.reth.length));
break;
@@ -145,20 +138,16 @@ const char *parse_everbs_hdrs(
be32_to_cpu(eh->at.aeth) >> 24,
parse_syndrome(be32_to_cpu(eh->at.aeth) >> 24),
be32_to_cpu(eh->at.aeth) & HFI1_MSN_MASK,
- (unsigned long long)
- ib_u64_get(eh->at.atomic_ack_eth));
+ ib_u64_get(&eh->at.atomic_ack_eth));
break;
/* atomiceth */
case OP(RC, COMPARE_SWAP):
case OP(RC, FETCH_ADD):
trace_seq_printf(p, ATOMICETH_PRN,
- (unsigned long long)ib_u64_get(
- eh->atomic_eth.vaddr),
+ get_ib_ateth_vaddr(&eh->atomic_eth),
eh->atomic_eth.rkey,
- (unsigned long long)ib_u64_get(
- (__be32 *)&eh->atomic_eth.swap_data),
- (unsigned long long)ib_u64_get(
- (__be32 *)&eh->atomic_eth.compare_data));
+ get_ib_ateth_swap(&eh->atomic_eth),
+ get_ib_ateth_compare(&eh->atomic_eth));
break;
/* deth */
case OP(UD, SEND_ONLY):
diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h
index 31654bbac1cf..26ae789e47cf 100644
--- a/drivers/infiniband/hw/hfi1/trace_ctxts.h
+++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h
@@ -67,9 +67,9 @@ TRACE_EVENT(hfi1_uctxtdata,
__field(u64, hw_free)
__field(void __iomem *, piobase)
__field(u16, rcvhdrq_cnt)
- __field(u64, rcvhdrq_phys)
+ __field(u64, rcvhdrq_dma)
__field(u32, eager_cnt)
- __field(u64, rcvegr_phys)
+ __field(u64, rcvegr_dma)
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = uctxt->ctxt;
@@ -77,10 +77,9 @@ TRACE_EVENT(hfi1_uctxtdata,
__entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free);
__entry->piobase = uctxt->sc->base_addr;
__entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
- __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
+ __entry->rcvhdrq_dma = uctxt->rcvhdrq_dma;
__entry->eager_cnt = uctxt->egrbufs.alloced;
- __entry->rcvegr_phys =
- uctxt->egrbufs.rcvtids[0].phys;
+ __entry->rcvegr_dma = uctxt->egrbufs.rcvtids[0].dma;
),
TP_printk("[%s] ctxt %u " UCTXT_FMT,
__get_str(dev),
@@ -89,9 +88,9 @@ TRACE_EVENT(hfi1_uctxtdata,
__entry->hw_free,
__entry->piobase,
__entry->rcvhdrq_cnt,
- __entry->rcvhdrq_phys,
+ __entry->rcvhdrq_dma,
__entry->eager_cnt,
- __entry->rcvegr_phys
+ __entry->rcvegr_dma
)
);
diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
index c3e41aed0034..382fcda3a5f6 100644
--- a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
+++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
@@ -55,7 +55,7 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_ibhdrs
-u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
+u8 ibhdr_exhdr_len(struct ib_header *hdr);
const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
@@ -74,7 +74,7 @@ __print_symbolic(lrh, \
DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
TP_PROTO(struct hfi1_devdata *dd,
- struct hfi1_ib_header *hdr),
+ struct ib_header *hdr),
TP_ARGS(dd, hdr),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
@@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
__dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
),
TP_fast_assign(
- struct hfi1_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
DD_DEV_ASSIGN(dd);
/* LRH */
@@ -185,19 +185,19 @@ DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
);
DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr),
TP_ARGS(dd, hdr));
DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr),
TP_ARGS(dd, hdr));
DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr),
TP_ARGS(dd, hdr));
DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr),
TP_ARGS(dd, hdr));
#endif /* __HFI1_TRACE_IBHDRS_H */
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index 9ba1f615ec95..11e02b228922 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -260,7 +260,7 @@ TRACE_EVENT(hfi1_mmu_invalidate,
TRACE_EVENT(snoop_capture,
TP_PROTO(struct hfi1_devdata *dd,
int hdr_len,
- struct hfi1_ib_header *hdr,
+ struct ib_header *hdr,
int data_len,
void *data),
TP_ARGS(dd, hdr_len, hdr, data_len, data),
@@ -279,7 +279,7 @@ TRACE_EVENT(snoop_capture,
__dynamic_array(u8, raw_pkt, data_len)
),
TP_fast_assign(
- struct hfi1_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
__entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
if (__entry->lnh == HFI1_LRH_BTH)
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index a726d96d185f..5e6d1bac4914 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -50,14 +50,7 @@
#include "qp.h"
/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_UC_##x
-
-/* only opcode mask for adaptive pio */
-const u32 uc_only_opcode =
- BIT(OP(SEND_ONLY) & 0x1f) |
- BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) |
- BIT(OP(RDMA_WRITE_ONLY & 0x1f)) |
- BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f));
+#define OP(x) UC_OP(x)
/**
* hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
@@ -70,7 +63,7 @@ const u32 uc_only_opcode =
int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
struct rvt_swqe *wqe;
u32 hwords = 5;
u32 bth0 = 0;
@@ -304,12 +297,12 @@ bail_no_tx:
void hfi1_uc_rcv(struct hfi1_packet *packet)
{
struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
- struct hfi1_ib_header *hdr = packet->hdr;
+ struct ib_header *hdr = packet->hdr;
u32 rcv_flags = packet->rcv_flags;
void *data = packet->ebuf;
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
- struct hfi1_other_headers *ohdr = packet->ohdr;
+ struct ib_other_headers *ohdr = packet->ohdr;
u32 bth0, opcode;
u32 hdrsize = packet->hlen;
u32 psn;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index f01e8e1d62d3..97ae24b6314c 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -271,7 +271,7 @@ drop:
int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
struct ib_ah_attr *ah_attr;
struct hfi1_pportdata *ppd;
struct hfi1_ibport *ibp;
@@ -510,8 +510,8 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
u32 bth0, plen, vl, hwords = 5;
u16 lrh0;
u8 sl = ibp->sc_to_sl[sc5];
- struct hfi1_ib_header hdr;
- struct hfi1_other_headers *ohdr;
+ struct ib_header hdr;
+ struct ib_other_headers *ohdr;
struct pio_buf *pbuf;
struct send_context *ctxt = qp_to_send_context(qp, sc5);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@@ -559,8 +559,8 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
/*
* opa_smp_check() - Do the regular pkey checking, and the additional
- * checks for SMPs specified in OPAv1 rev 0.90, section 9.10.26
- * ("SMA Packet Checks").
+ * checks for SMPs specified in OPAv1 rev 1.0, 9/19/2016 update, section
+ * 9.10.25 ("SMA Packet Checks").
*
* Note that:
* - Checks are done using the pkey directly from the packet's BTH,
@@ -603,23 +603,28 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
/*
* SMPs fall into one of four (disjoint) categories:
- * SMA request, SMA response, trap, or trap repress.
- * Our response depends, in part, on which type of
- * SMP we're processing.
+ * SMA request, SMA response, SMA trap, or SMA trap repress.
+ * Our response depends, in part, on which type of SMP we're
+ * processing.
*
- * If this is not an SMA request, or trap repress:
- * - accept MAD if the port is running an SM
- * - pkey == FULL_MGMT_P_KEY =>
- * reply with unsupported method (i.e., just mark
- * the smp's status field here, and let it be
- * processed normally)
- * - pkey != LIM_MGMT_P_KEY =>
- * increment port recv constraint errors, drop MAD
- * If this is an SMA request or trap repress:
+ * If this is an SMA response, skip the check here.
+ *
+ * If this is an SMA request or SMA trap repress:
* - pkey != FULL_MGMT_P_KEY =>
* increment port recv constraint errors, drop MAD
+ *
+ * Otherwise:
+ * - accept if the port is running an SM
+ * - drop MAD if it's an SMA trap
+ * - pkey == FULL_MGMT_P_KEY =>
+ * reply with unsupported method
+ * - pkey != FULL_MGMT_P_KEY =>
+ * increment port recv constraint errors, drop MAD
*/
switch (smp->method) {
+ case IB_MGMT_METHOD_GET_RESP:
+ case IB_MGMT_METHOD_REPORT_RESP:
+ break;
case IB_MGMT_METHOD_GET:
case IB_MGMT_METHOD_SET:
case IB_MGMT_METHOD_REPORT:
@@ -629,23 +634,17 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
return 1;
}
break;
- case IB_MGMT_METHOD_SEND:
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_GET_RESP:
- case IB_MGMT_METHOD_REPORT_RESP:
+ default:
if (ibp->rvp.port_cap_flags & IB_PORT_SM)
return 0;
+ if (smp->method == IB_MGMT_METHOD_TRAP)
+ return 1;
if (pkey == FULL_MGMT_P_KEY) {
smp->status |= IB_SMP_UNSUP_METHOD;
return 0;
}
- if (pkey != LIM_MGMT_P_KEY) {
- ingress_pkey_table_fail(ppd, pkey, slid);
- return 1;
- }
- break;
- default:
- break;
+ ingress_pkey_table_fail(ppd, pkey, slid);
+ return 1;
}
return 0;
}
@@ -665,7 +664,7 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
*/
void hfi1_ud_rcv(struct hfi1_packet *packet)
{
- struct hfi1_other_headers *ohdr = packet->ohdr;
+ struct ib_other_headers *ohdr = packet->ohdr;
int opcode;
u32 hdrsize = packet->hlen;
struct ib_wc wc;
@@ -675,13 +674,13 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
int mgmt_pkey_idx = -1;
struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_ib_header *hdr = packet->hdr;
+ struct ib_header *hdr = packet->hdr;
u32 rcv_flags = packet->rcv_flags;
void *data = packet->ebuf;
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
bool has_grh = rcv_flags & HFI1_HAS_GRH;
- u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
+ u8 sc5 = hdr2sc(hdr, packet->rhf);
u32 bth1;
u8 sl_from_sc, sl;
u16 slid;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 1694037d1eee..a761f804111e 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -548,7 +548,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
u8 opcode, sc, vl;
int req_queued = 0;
u16 dlid;
- u8 selector;
+ u32 selector;
if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
hfi1_cdbg(
@@ -753,12 +753,9 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
dlid = be16_to_cpu(req->hdr.lrh[1]);
selector = dlid_to_selector(dlid);
+ selector += uctxt->ctxt + fd->subctxt;
+ req->sde = sdma_select_user_engine(dd, selector, vl);
- /* Have to select the engine */
- req->sde = sdma_select_engine_vl(dd,
- (u32)(uctxt->ctxt + fd->subctxt +
- selector),
- vl);
if (!req->sde || !sdma_running(req->sde)) {
ret = -ECOMM;
goto free_req;
@@ -894,7 +891,7 @@ static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
{
- int ret = 0;
+ int ret = 0, count;
unsigned npkts = 0;
struct user_sdma_txreq *tx = NULL;
struct hfi1_user_sdma_pkt_q *pq = NULL;
@@ -1090,23 +1087,18 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
npkts++;
}
dosend:
- ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps);
- if (list_empty(&req->txps)) {
- req->seqsubmitted = req->seqnum;
- if (req->seqnum == req->info.npkts) {
- set_bit(SDMA_REQ_SEND_DONE, &req->flags);
- /*
- * The txreq has already been submitted to the HW queue
- * so we can free the AHG entry now. Corruption will not
- * happen due to the sequential manner in which
- * descriptors are processed.
- */
- if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
- sdma_ahg_free(req->sde, req->ahg_idx);
- }
- } else if (ret > 0) {
- req->seqsubmitted += ret;
- ret = 0;
+ ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
+ req->seqsubmitted += count;
+ if (req->seqsubmitted == req->info.npkts) {
+ set_bit(SDMA_REQ_SEND_DONE, &req->flags);
+ /*
+ * The txreq has already been submitted to the HW queue
+ * so we can free the AHG entry now. Corruption will not
+ * happen due to the sequential manner in which
+ * descriptors are processed.
+ */
+ if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
+ sdma_ahg_free(req->sde, req->ahg_idx);
}
return ret;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 2b359540901d..4b7a16ceb362 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -76,7 +76,7 @@ static unsigned int hfi1_max_ahs = 0xFFFF;
module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
-unsigned int hfi1_max_cqes = 0x2FFFF;
+unsigned int hfi1_max_cqes = 0x2FFFFF;
module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
MODULE_PARM_DESC(max_cqes,
"Maximum number of completion queue entries to support");
@@ -89,7 +89,7 @@ unsigned int hfi1_max_qp_wrs = 0x3FFF;
module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
-unsigned int hfi1_max_qps = 16384;
+unsigned int hfi1_max_qps = 32768;
module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
@@ -335,7 +335,7 @@ const u8 hdr_len_by_opcode[256] = {
[IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4,
[IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4,
[IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4,
- [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4 + 8,
[IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
[IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
@@ -403,6 +403,28 @@ static const opcode_handler opcode_handler_tbl[256] = {
[IB_OPCODE_CNP] = &hfi1_cnp_rcv
};
+#define OPMASK 0x1f
+
+static const u32 pio_opmask[BIT(3)] = {
+ /* RC */
+ [IB_OPCODE_RC >> 5] =
+ BIT(RC_OP(SEND_ONLY) & OPMASK) |
+ BIT(RC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
+ BIT(RC_OP(RDMA_WRITE_ONLY) & OPMASK) |
+ BIT(RC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK) |
+ BIT(RC_OP(RDMA_READ_REQUEST) & OPMASK) |
+ BIT(RC_OP(ACKNOWLEDGE) & OPMASK) |
+ BIT(RC_OP(ATOMIC_ACKNOWLEDGE) & OPMASK) |
+ BIT(RC_OP(COMPARE_SWAP) & OPMASK) |
+ BIT(RC_OP(FETCH_ADD) & OPMASK),
+ /* UC */
+ [IB_OPCODE_UC >> 5] =
+ BIT(UC_OP(SEND_ONLY) & OPMASK) |
+ BIT(UC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
+ BIT(UC_OP(RDMA_WRITE_ONLY) & OPMASK) |
+ BIT(UC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK),
+};
+
/*
* System image GUID.
*/
@@ -567,7 +589,7 @@ static inline opcode_handler qp_ok(int opcode, struct hfi1_packet *packet)
void hfi1_ib_rcv(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd = packet->rcd;
- struct hfi1_ib_header *hdr = packet->hdr;
+ struct ib_header *hdr = packet->hdr;
u32 tlen = packet->tlen;
struct hfi1_pportdata *ppd = rcd->ppd;
struct hfi1_ibport *ibp = &ppd->ibport_data;
@@ -719,7 +741,7 @@ static void verbs_sdma_complete(
if (tx->wqe) {
hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
- struct hfi1_ib_header *hdr;
+ struct ib_header *hdr;
hdr = &tx->phdr.hdr;
hfi1_rc_send_complete(qp, hdr);
@@ -748,7 +770,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
qp->s_flags |= RVT_S_WAIT_KMEM;
list_add_tail(&priv->s_iowait.list, &dev->memwait);
trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
}
write_sequnlock(&dev->iowait_lock);
qp->s_flags &= ~RVT_S_BUSY;
@@ -959,7 +981,7 @@ static int pio_wait(struct rvt_qp *qp,
was_empty = list_empty(&sc->piowait);
list_add_tail(&priv->s_iowait.list, &sc->piowait);
trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
/* counting: only call wantpiobuf_intr if first user */
if (was_empty)
hfi1_sc_wantpiobuf_intr(sc, 1);
@@ -1200,7 +1222,7 @@ static inline send_routine get_send_routine(struct rvt_qp *qp,
{
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_ib_header *h = &tx->phdr.hdr;
+ struct ib_header *h = &tx->phdr.hdr;
if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
return dd->process_pio_send;
@@ -1210,22 +1232,18 @@ static inline send_routine get_send_routine(struct rvt_qp *qp,
case IB_QPT_GSI:
case IB_QPT_UD:
break;
- case IB_QPT_RC:
- if (piothreshold &&
- qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
- (BIT(get_opcode(h) & 0x1f) & rc_only_opcode) &&
- iowait_sdma_pending(&priv->s_iowait) == 0 &&
- !sdma_txreq_built(&tx->txreq))
- return dd->process_pio_send;
- break;
case IB_QPT_UC:
+ case IB_QPT_RC: {
+ u8 op = get_opcode(h);
+
if (piothreshold &&
qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
- (BIT(get_opcode(h) & 0x1f) & uc_only_opcode) &&
+ (BIT(op & OPMASK) & pio_opmask[op >> 5]) &&
iowait_sdma_pending(&priv->s_iowait) == 0 &&
!sdma_txreq_built(&tx->txreq))
return dd->process_pio_send;
break;
+ }
default:
break;
}
@@ -1244,8 +1262,8 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_other_headers *ohdr;
- struct hfi1_ib_header *hdr;
+ struct ib_other_headers *ohdr;
+ struct ib_header *hdr;
send_routine sr;
int ret;
u8 lnh;
@@ -1423,7 +1441,8 @@ static int modify_device(struct ib_device *device,
}
if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
- memcpy(device->node_desc, device_modify->node_desc, 64);
+ memcpy(device->node_desc, device_modify->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
for (i = 0; i < dd->num_pports; i++) {
struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
@@ -1754,7 +1773,7 @@ void hfi1_cnp_rcv(struct hfi1_packet *packet)
{
struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_ib_header *hdr = packet->hdr;
+ struct ib_header *hdr = packet->hdr;
struct rvt_qp *qp = packet->qp;
u32 lqpn, rqpn = 0;
u16 rlid = 0;
@@ -1781,7 +1800,7 @@ void hfi1_cnp_rcv(struct hfi1_packet *packet)
return;
}
- sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
+ sc5 = hdr2sc(hdr, packet->rhf);
sl = ibp->sc_to_sl[sc5];
lqpn = qp->ibqp.qp_num;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index d1b101c54828..1c3815d89eb7 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -60,6 +60,7 @@
#include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_mad.h>
+#include <rdma/ib_hdrs.h>
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>
#include <rdma/rdmavt_cq.h>
@@ -80,16 +81,6 @@ struct hfi1_packet;
*/
#define HFI1_UVERBS_ABI_VERSION 2
-#define IB_SEQ_NAK (3 << 29)
-
-/* AETH NAK opcode values */
-#define IB_RNR_NAK 0x20
-#define IB_NAK_PSN_ERROR 0x60
-#define IB_NAK_INVALID_REQUEST 0x61
-#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
-#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
-#define IB_NAK_INVALID_RD_REQUEST 0x64
-
/* IB Performance Manager status values */
#define IB_PMA_SAMPLE_STATUS_DONE 0x00
#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
@@ -104,80 +95,16 @@ struct hfi1_packet;
#define HFI1_VENDOR_IPG cpu_to_be16(0xFFA0)
-#define IB_BTH_REQ_ACK BIT(31)
-#define IB_BTH_SOLICITED BIT(23)
-#define IB_BTH_MIG_REQ BIT(22)
-
-#define IB_GRH_VERSION 6
-#define IB_GRH_VERSION_MASK 0xF
-#define IB_GRH_VERSION_SHIFT 28
-#define IB_GRH_TCLASS_MASK 0xFF
-#define IB_GRH_TCLASS_SHIFT 20
-#define IB_GRH_FLOW_MASK 0xFFFFF
-#define IB_GRH_FLOW_SHIFT 0
-#define IB_GRH_NEXT_HDR 0x1B
-
#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
+#define RC_OP(x) IB_OPCODE_RC_##x
+#define UC_OP(x) IB_OPCODE_UC_##x
+
/* flags passed by hfi1_ib_rcv() */
enum {
HFI1_HAS_GRH = (1 << 0),
};
-struct ib_reth {
- __be64 vaddr;
- __be32 rkey;
- __be32 length;
-} __packed;
-
-struct ib_atomic_eth {
- __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
- __be32 rkey;
- __be64 swap_data;
- __be64 compare_data;
-} __packed;
-
-union ib_ehdrs {
- struct {
- __be32 deth[2];
- __be32 imm_data;
- } ud;
- struct {
- struct ib_reth reth;
- __be32 imm_data;
- } rc;
- struct {
- __be32 aeth;
- __be32 atomic_ack_eth[2];
- } at;
- __be32 imm_data;
- __be32 aeth;
- __be32 ieth;
- struct ib_atomic_eth atomic_eth;
-} __packed;
-
-struct hfi1_other_headers {
- __be32 bth[3];
- union ib_ehdrs u;
-} __packed;
-
-/*
- * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
- * long (72 w/ imm_data). Only the first 56 bytes of the IB header
- * will be in the eager header buffer. The remaining 12 or 16 bytes
- * are in the data buffer.
- */
-struct hfi1_ib_header {
- __be16 lrh[4];
- union {
- struct {
- struct ib_grh grh;
- struct hfi1_other_headers oth;
- } l;
- struct hfi1_other_headers oth;
- } u;
-} __packed;
-
struct hfi1_ahg_info {
u32 ahgdesc[2];
u16 tx_flags;
@@ -187,7 +114,7 @@ struct hfi1_ahg_info {
struct hfi1_sdma_header {
__le64 pbc;
- struct hfi1_ib_header hdr;
+ struct ib_header hdr;
} __packed;
/*
@@ -386,7 +313,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet);
void hfi1_rc_hdrerr(
struct hfi1_ctxtdata *rcd,
- struct hfi1_ib_header *hdr,
+ struct ib_header *hdr,
u32 rcv_flags,
struct rvt_qp *qp);
@@ -400,7 +327,7 @@ void hfi1_rc_timeout(unsigned long arg);
void hfi1_del_timers_sync(struct rvt_qp *qp);
void hfi1_stop_rc_timers(struct rvt_qp *qp);
-void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr);
+void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
@@ -423,7 +350,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
extern const u32 rc_only_opcode;
extern const u32 uc_only_opcode;
-static inline u8 get_opcode(struct hfi1_ib_header *h)
+static inline u8 get_opcode(struct ib_header *h)
{
u16 lnh = be16_to_cpu(h->lrh[0]) & 3;
@@ -433,13 +360,13 @@ static inline u8 get_opcode(struct hfi1_ib_header *h)
return be32_to_cpu(h->u.l.oth.bth[0]) >> 24;
}
-int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
+int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr,
int has_grh, struct rvt_qp *qp, u32 bth0);
u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
struct ib_global_route *grh, u32 hwords, u32 nwords);
-void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
+void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
u32 bth0, u32 bth2, int middle,
struct hfi1_pkt_state *ps);
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index d8fb056526f8..094ab829ec42 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -109,7 +109,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
qp->s_flags |= RVT_S_WAIT_TX;
list_add_tail(&priv->s_iowait.list, &dev->txwait);
trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
}
qp->s_flags &= ~RVT_S_BUSY;
}
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
new file mode 100644
index 000000000000..e1a6e055cd60
--- /dev/null
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -0,0 +1,10 @@
+config INFINIBAND_HNS
+ tristate "HNS RoCE Driver"
+ depends on NET_VENDOR_HISILICON
+ depends on ARM64 && HNS && HNS_DSAF && HNS_ENET
+ ---help---
+ This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
+ is used in Hisilicon Hi1610 and more further ICT SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hns-roce.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
new file mode 100644
index 000000000000..7e8ebd24dcae
--- /dev/null
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Hisilicon RoCE drivers.
+#
+
+obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
+hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
+ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
+ hns_roce_cq.o hns_roce_alloc.o hns_roce_hw_v1.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
new file mode 100644
index 000000000000..24f79ee39fdf
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include "hns_roce_device.h"
+
+#define HNS_ROCE_PORT_NUM_SHIFT 24
+#define HNS_ROCE_VLAN_SL_BIT_MASK 7
+#define HNS_ROCE_VLAN_SL_SHIFT 13
+
+struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct ib_gid_attr gid_attr;
+ struct hns_roce_ah *ah;
+ u16 vlan_tag = 0xffff;
+ struct in6_addr in6;
+ union ib_gid sgid;
+ int ret;
+
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ /* Get mac address */
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(ah_attr->grh.dgid.raw));
+ if (rdma_is_multicast_addr(&in6))
+ rdma_get_mcast_mac(&in6, ah->av.mac);
+ else
+ memcpy(ah->av.mac, ah_attr->dmac, sizeof(ah_attr->dmac));
+
+ /* Get source gid */
+ ret = ib_get_cached_gid(ibpd->device, ah_attr->port_num,
+ ah_attr->grh.sgid_index, &sgid, &gid_attr);
+ if (ret) {
+ dev_err(dev, "get sgid failed! ret = %d\n", ret);
+ kfree(ah);
+ return ERR_PTR(ret);
+ }
+
+ if (gid_attr.ndev) {
+ if (is_vlan_dev(gid_attr.ndev))
+ vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
+ dev_put(gid_attr.ndev);
+ }
+
+ if (vlan_tag < 0x1000)
+ vlan_tag |= (ah_attr->sl & HNS_ROCE_VLAN_SL_BIT_MASK) <<
+ HNS_ROCE_VLAN_SL_SHIFT;
+
+ ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn | (ah_attr->port_num <<
+ HNS_ROCE_PORT_NUM_SHIFT));
+ ah->av.gid_index = ah_attr->grh.sgid_index;
+ ah->av.vlan = cpu_to_le16(vlan_tag);
+ dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
+ ah->av.vlan);
+
+ if (ah_attr->static_rate)
+ ah->av.stat_rate = IB_RATE_10_GBPS;
+
+ memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, HNS_ROCE_GID_SIZE);
+ ah->av.sl_tclass_flowlabel = cpu_to_le32(ah_attr->sl <<
+ HNS_ROCE_SL_SHIFT);
+
+ return &ah->ibah;
+}
+
+int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct hns_roce_ah *ah = to_hr_ah(ibah);
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+
+ ah_attr->sl = le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
+ HNS_ROCE_SL_SHIFT;
+ ah_attr->port_num = le32_to_cpu(ah->av.port_pd) >>
+ HNS_ROCE_PORT_NUM_SHIFT;
+ ah_attr->static_rate = ah->av.stat_rate;
+ ah_attr->ah_flags = IB_AH_GRH;
+ ah_attr->grh.traffic_class = le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
+ HNS_ROCE_TCLASS_SHIFT;
+ ah_attr->grh.flow_label = le32_to_cpu(ah->av.sl_tclass_flowlabel) &
+ HNS_ROCE_FLOW_LABLE_MASK;
+ ah_attr->grh.hop_limit = ah->av.hop_limit;
+ ah_attr->grh.sgid_index = ah->av.gid_index;
+ memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, HNS_ROCE_GID_SIZE);
+
+ return 0;
+}
+
+int hns_roce_destroy_ah(struct ib_ah *ah)
+{
+ kfree(to_hr_ah(ah));
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
new file mode 100644
index 000000000000..863a17a2de40
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include "hns_roce_device.h"
+
+int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
+{
+ int ret = 0;
+
+ spin_lock(&bitmap->lock);
+ *obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
+ if (*obj >= bitmap->max) {
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ *obj = find_first_zero_bit(bitmap->table, bitmap->max);
+ }
+
+ if (*obj < bitmap->max) {
+ set_bit(*obj, bitmap->table);
+ bitmap->last = (*obj + 1);
+ if (bitmap->last == bitmap->max)
+ bitmap->last = 0;
+ *obj |= bitmap->top;
+ } else {
+ ret = -1;
+ }
+
+ spin_unlock(&bitmap->lock);
+
+ return ret;
+}
+
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
+{
+ hns_roce_bitmap_free_range(bitmap, obj, 1);
+}
+
+int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
+ int align, unsigned long *obj)
+{
+ int ret = 0;
+ int i;
+
+ if (likely(cnt == 1 && align == 1))
+ return hns_roce_bitmap_alloc(bitmap, obj);
+
+ spin_lock(&bitmap->lock);
+
+ *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ bitmap->last, cnt, align - 1);
+ if (*obj >= bitmap->max) {
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
+ cnt, align - 1);
+ }
+
+ if (*obj < bitmap->max) {
+ for (i = 0; i < cnt; i++)
+ set_bit(*obj + i, bitmap->table);
+
+ if (*obj == bitmap->last) {
+ bitmap->last = (*obj + cnt);
+ if (bitmap->last >= bitmap->max)
+ bitmap->last = 0;
+ }
+ *obj |= bitmap->top;
+ } else {
+ ret = -1;
+ }
+
+ spin_unlock(&bitmap->lock);
+
+ return ret;
+}
+
+void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
+ unsigned long obj, int cnt)
+{
+ int i;
+
+ obj &= bitmap->max + bitmap->reserved_top - 1;
+
+ spin_lock(&bitmap->lock);
+ for (i = 0; i < cnt; i++)
+ clear_bit(obj + i, bitmap->table);
+
+ bitmap->last = min(bitmap->last, obj);
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ spin_unlock(&bitmap->lock);
+}
+
+int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
+ u32 reserved_bot, u32 reserved_top)
+{
+ u32 i;
+
+ if (num != roundup_pow_of_two(num))
+ return -EINVAL;
+
+ bitmap->last = 0;
+ bitmap->top = 0;
+ bitmap->max = num - reserved_top;
+ bitmap->mask = mask;
+ bitmap->reserved_top = reserved_top;
+ spin_lock_init(&bitmap->lock);
+ bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
+ GFP_KERNEL);
+ if (!bitmap->table)
+ return -ENOMEM;
+
+ for (i = 0; i < reserved_bot; ++i)
+ set_bit(i, bitmap->table);
+
+ return 0;
+}
+
+void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
+{
+ kfree(bitmap->table);
+}
+
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
+ struct hns_roce_buf *buf)
+{
+ int i;
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 bits_per_long = BITS_PER_LONG;
+
+ if (buf->nbufs == 1) {
+ dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
+ } else {
+ if (bits_per_long == 64)
+ vunmap(buf->direct.buf);
+
+ for (i = 0; i < buf->nbufs; ++i)
+ if (buf->page_list[i].buf)
+ dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
+ buf->page_list[i].buf,
+ buf->page_list[i].map);
+ kfree(buf->page_list);
+ }
+}
+
+int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
+ struct hns_roce_buf *buf)
+{
+ int i = 0;
+ dma_addr_t t;
+ struct page **pages;
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 bits_per_long = BITS_PER_LONG;
+
+ /* SQ/RQ buf lease than one page, SQ + RQ = 8K */
+ if (size <= max_direct) {
+ buf->nbufs = 1;
+ /* Npages calculated by page_size */
+ buf->npages = 1 << get_order(size);
+ buf->page_shift = PAGE_SHIFT;
+ /* MTT PA must be recorded in 4k alignment, t is 4k aligned */
+ buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
+ if (!buf->direct.buf)
+ return -ENOMEM;
+
+ buf->direct.map = t;
+
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
+
+ memset(buf->direct.buf, 0, size);
+ } else {
+ buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ buf->npages = buf->nbufs;
+ buf->page_shift = PAGE_SHIFT;
+ buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
+ GFP_KERNEL);
+
+ if (!buf->page_list)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->nbufs; ++i) {
+ buf->page_list[i].buf = dma_alloc_coherent(dev,
+ PAGE_SIZE, &t,
+ GFP_KERNEL);
+
+ if (!buf->page_list[i].buf)
+ goto err_free;
+
+ buf->page_list[i].map = t;
+ memset(buf->page_list[i].buf, 0, PAGE_SIZE);
+ }
+ if (bits_per_long == 64) {
+ pages = kmalloc_array(buf->nbufs, sizeof(*pages),
+ GFP_KERNEL);
+ if (!pages)
+ goto err_free;
+
+ for (i = 0; i < buf->nbufs; ++i)
+ pages[i] = virt_to_page(buf->page_list[i].buf);
+
+ buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
+ PAGE_KERNEL);
+ kfree(pages);
+ if (!buf->direct.buf)
+ goto err_free;
+ }
+ }
+
+ return 0;
+
+err_free:
+ hns_roce_buf_free(hr_dev, size, buf);
+ return -ENOMEM;
+}
+
+void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_cleanup_qp_table(hr_dev);
+ hns_roce_cleanup_cq_table(hr_dev);
+ hns_roce_cleanup_mr_table(hr_dev);
+ hns_roce_cleanup_pd_table(hr_dev);
+ hns_roce_cleanup_uar_table(hr_dev);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
new file mode 100644
index 000000000000..2a0b6c05da5f
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/dmapool.h>
+#include <linux/platform_device.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+
+#define CMD_POLL_TOKEN 0xffff
+#define CMD_MAX_NUM 32
+#define STATUS_MASK 0xff
+#define CMD_TOKEN_MASK 0x1f
+#define GO_BIT_TIMEOUT_MSECS 10000
+
+enum {
+ HCR_TOKEN_OFFSET = 0x14,
+ HCR_STATUS_OFFSET = 0x18,
+ HCR_GO_BIT = 15,
+};
+
+static int cmd_pending(struct hns_roce_dev *hr_dev)
+{
+ u32 status = readl(hr_dev->cmd.hcr + HCR_TOKEN_OFFSET);
+
+ return (!!(status & (1 << HCR_GO_BIT)));
+}
+
+/* this function should be serialized with "hcr_mutex" */
+static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
+ u64 in_param, u64 out_param,
+ u32 in_modifier, u8 op_modifier, u16 op,
+ u16 token, int event)
+{
+ struct hns_roce_cmdq *cmd = &hr_dev->cmd;
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 __iomem *hcr = (u32 *)cmd->hcr;
+ int ret = -EAGAIN;
+ unsigned long end;
+ u32 val = 0;
+
+ end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
+ while (cmd_pending(hr_dev)) {
+ if (time_after(jiffies, end)) {
+ dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
+ (int)end);
+ goto out;
+ }
+ cond_resched();
+ }
+
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
+ op);
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
+ ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
+ roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
+ roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
+ ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
+
+ __raw_writeq(cpu_to_le64(in_param), hcr + 0);
+ __raw_writeq(cpu_to_le64(out_param), hcr + 2);
+ __raw_writel(cpu_to_le32(in_modifier), hcr + 4);
+ /* Memory barrier */
+ wmb();
+
+ __raw_writel(cpu_to_le32(val), hcr + 5);
+
+ mmiowb();
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, u32 in_modifier,
+ u8 op_modifier, u16 op, u16 token,
+ int event)
+{
+ struct hns_roce_cmdq *cmd = &hr_dev->cmd;
+ int ret = -EAGAIN;
+
+ mutex_lock(&cmd->hcr_mutex);
+ ret = __hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op, token,
+ event);
+ mutex_unlock(&cmd->hcr_mutex);
+
+ return ret;
+}
+
+/* this should be called with "poll_sem" */
+static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, unsigned long in_modifier,
+ u8 op_modifier, u16 op,
+ unsigned long timeout)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u8 __iomem *hcr = hr_dev->cmd.hcr;
+ unsigned long end = 0;
+ u32 status = 0;
+ int ret;
+
+ ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ CMD_POLL_TOKEN, 0);
+ if (ret) {
+ dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
+ goto out;
+ }
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (cmd_pending(hr_dev) && time_before(jiffies, end))
+ cond_resched();
+
+ if (cmd_pending(hr_dev)) {
+ dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = le32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_STATUS_OFFSET));
+ if ((status & STATUS_MASK) != 0x1) {
+ dev_err(dev, "mailbox status 0x%x!\n", status);
+ ret = -EBUSY;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, unsigned long in_modifier,
+ u8 op_modifier, u16 op, unsigned long timeout)
+{
+ int ret;
+
+ down(&hr_dev->cmd.poll_sem);
+ ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier,
+ op_modifier, op, timeout);
+ up(&hr_dev->cmd.poll_sem);
+
+ return ret;
+}
+
+void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
+ u64 out_param)
+{
+ struct hns_roce_cmd_context
+ *context = &hr_dev->cmd.context[token & hr_dev->cmd.token_mask];
+
+ if (token != context->token)
+ return;
+
+ context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO);
+ context->out_param = out_param;
+ complete(&context->done);
+}
+
+/* this should be called with "use_events" */
+static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, unsigned long in_modifier,
+ u8 op_modifier, u16 op,
+ unsigned long timeout)
+{
+ struct hns_roce_cmdq *cmd = &hr_dev->cmd;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_cmd_context *context;
+ int ret = 0;
+
+ spin_lock(&cmd->context_lock);
+ WARN_ON(cmd->free_head < 0);
+ context = &cmd->context[cmd->free_head];
+ context->token += cmd->token_mask + 1;
+ cmd->free_head = context->next;
+ spin_unlock(&cmd->context_lock);
+
+ init_completion(&context->done);
+
+ ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ context->token, 1);
+ if (ret)
+ goto out;
+
+ /*
+ * It is timeout when wait_for_completion_timeout return 0
+ * The return value is the time limit set in advance
+ * how many seconds showing
+ */
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
+ dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = context->result;
+ if (ret) {
+ dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret);
+ goto out;
+ }
+
+out:
+ spin_lock(&cmd->context_lock);
+ context->next = cmd->free_head;
+ cmd->free_head = context - cmd->context;
+ spin_unlock(&cmd->context_lock);
+
+ return ret;
+}
+
+static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, unsigned long in_modifier,
+ u8 op_modifier, u16 op, unsigned long timeout)
+{
+ int ret = 0;
+
+ down(&hr_dev->cmd.event_sem);
+ ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op, timeout);
+ up(&hr_dev->cmd.event_sem);
+
+ return ret;
+}
+
+int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
+ unsigned long in_modifier, u8 op_modifier, u16 op,
+ unsigned long timeout)
+{
+ if (hr_dev->cmd.use_events)
+ return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ timeout);
+ else
+ return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ timeout);
+}
+
+int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ mutex_init(&hr_dev->cmd.hcr_mutex);
+ sema_init(&hr_dev->cmd.poll_sem, 1);
+ hr_dev->cmd.use_events = 0;
+ hr_dev->cmd.toggle = 1;
+ hr_dev->cmd.max_cmds = CMD_MAX_NUM;
+ hr_dev->cmd.hcr = hr_dev->reg_base + ROCEE_MB1_REG;
+ hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
+ HNS_ROCE_MAILBOX_SIZE,
+ HNS_ROCE_MAILBOX_SIZE, 0);
+ if (!hr_dev->cmd.pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev)
+{
+ dma_pool_destroy(hr_dev->cmd.pool);
+}
+
+int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
+ int i;
+
+ hr_cmd->context = kmalloc(hr_cmd->max_cmds *
+ sizeof(struct hns_roce_cmd_context),
+ GFP_KERNEL);
+ if (!hr_cmd->context)
+ return -ENOMEM;
+
+ for (i = 0; i < hr_cmd->max_cmds; ++i) {
+ hr_cmd->context[i].token = i;
+ hr_cmd->context[i].next = i + 1;
+ }
+
+ hr_cmd->context[hr_cmd->max_cmds - 1].next = -1;
+ hr_cmd->free_head = 0;
+
+ sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds);
+ spin_lock_init(&hr_cmd->context_lock);
+
+ hr_cmd->token_mask = CMD_TOKEN_MASK;
+ hr_cmd->use_events = 1;
+
+ down(&hr_cmd->poll_sem);
+
+ return 0;
+}
+
+void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
+ int i;
+
+ hr_cmd->use_events = 0;
+
+ for (i = 0; i < hr_cmd->max_cmds; ++i)
+ down(&hr_cmd->event_sem);
+
+ kfree(hr_cmd->context);
+ up(&hr_cmd->poll_sem);
+}
+
+struct hns_roce_cmd_mailbox
+ *hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+
+ mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL);
+ if (!mailbox)
+ return ERR_PTR(-ENOMEM);
+
+ mailbox->buf = dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL,
+ &mailbox->dma);
+ if (!mailbox->buf) {
+ kfree(mailbox);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mailbox;
+}
+
+void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox)
+{
+ if (!mailbox)
+ return;
+
+ dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
+ kfree(mailbox);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
new file mode 100644
index 000000000000..e3997d312c55
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_CMD_H
+#define _HNS_ROCE_CMD_H
+
+#define HNS_ROCE_MAILBOX_SIZE 4096
+
+enum {
+ /* TPT commands */
+ HNS_ROCE_CMD_SW2HW_MPT = 0xd,
+ HNS_ROCE_CMD_HW2SW_MPT = 0xf,
+
+ /* CQ commands */
+ HNS_ROCE_CMD_SW2HW_CQ = 0x16,
+ HNS_ROCE_CMD_HW2SW_CQ = 0x17,
+
+ /* QP/EE commands */
+ HNS_ROCE_CMD_RST2INIT_QP = 0x19,
+ HNS_ROCE_CMD_INIT2RTR_QP = 0x1a,
+ HNS_ROCE_CMD_RTR2RTS_QP = 0x1b,
+ HNS_ROCE_CMD_RTS2RTS_QP = 0x1c,
+ HNS_ROCE_CMD_2ERR_QP = 0x1e,
+ HNS_ROCE_CMD_RTS2SQD_QP = 0x1f,
+ HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
+ HNS_ROCE_CMD_SQD2RTS_QP = 0x20,
+ HNS_ROCE_CMD_2RST_QP = 0x21,
+ HNS_ROCE_CMD_QUERY_QP = 0x22,
+};
+
+enum {
+ HNS_ROCE_CMD_TIME_CLASS_A = 10000,
+ HNS_ROCE_CMD_TIME_CLASS_B = 10000,
+ HNS_ROCE_CMD_TIME_CLASS_C = 10000,
+};
+
+struct hns_roce_cmd_mailbox {
+ void *buf;
+ dma_addr_t dma;
+};
+
+int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
+ unsigned long in_modifier, u8 op_modifier, u16 op,
+ unsigned long timeout);
+
+struct hns_roce_cmd_mailbox
+ *hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev);
+void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox);
+
+#endif /* _HNS_ROCE_CMD_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
new file mode 100644
index 000000000000..297016103aa7
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_COMMON_H
+#define _HNS_ROCE_COMMON_H
+
+#ifndef assert
+#define assert(cond)
+#endif
+
+#define roce_write(dev, reg, val) writel((val), (dev)->reg_base + (reg))
+#define roce_read(dev, reg) readl((dev)->reg_base + (reg))
+#define roce_raw_write(value, addr) \
+ __raw_writel((__force u32)cpu_to_le32(value), (addr))
+
+#define roce_get_field(origin, mask, shift) \
+ (((origin) & (mask)) >> (shift))
+
+#define roce_get_bit(origin, shift) \
+ roce_get_field((origin), (1ul << (shift)), (shift))
+
+#define roce_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= (((u32)(val) << (shift)) & (mask)); \
+ } while (0)
+
+#define roce_set_bit(origin, shift, val) \
+ roce_set_field((origin), (1ul << (shift)), (shift), (val))
+
+#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
+#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
+
+#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5
+
+#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6
+
+#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10
+#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \
+ (((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S)
+
+#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16
+
+#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0
+#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \
+ (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S)
+
+#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24
+#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \
+ (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S)
+
+#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0
+#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \
+ (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S)
+
+#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24
+#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \
+ (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S)
+
+#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0
+#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \
+ (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S)
+
+#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16
+#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \
+ (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S)
+
+#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0
+#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \
+ (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S)
+
+#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16
+#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \
+ (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S)
+
+#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0
+#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \
+ (((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S)
+
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \
+ (((1UL << 15) - 1) << \
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S)
+
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \
+ (((1UL << 4) - 1) << \
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S)
+
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20
+
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21
+
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S)
+
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S)
+
+#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0
+#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S)
+
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S)
+
+#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0
+#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S)
+
+#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8
+#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S)
+
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \
+ (((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S)
+
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19
+
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \
+ (((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S)
+
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \
+ (((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S)
+
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31
+
+#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0
+#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \
+ (((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S)
+
+#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0
+#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \
+ (((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S)
+
+#define ROCEE_MB6_ROCEE_MB_CMD_S 0
+#define ROCEE_MB6_ROCEE_MB_CMD_M \
+ (((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S)
+
+#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8
+#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \
+ (((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S)
+
+#define ROCEE_MB6_ROCEE_MB_EVENT_S 14
+
+#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15
+
+#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16
+#define ROCEE_MB6_ROCEE_MB_TOKEN_M \
+ (((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S)
+
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \
+ (((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S)
+
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \
+ (((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S)
+
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \
+ (((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S)
+
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31
+
+#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0
+#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \
+ (((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S)
+
+#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16
+#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \
+ (((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S)
+
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \
+ (((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S)
+
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \
+ (((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S)
+
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17
+
+#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0
+#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \
+ (((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S)
+
+#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16
+#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \
+ (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S)
+
+#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0
+#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \
+ (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S)
+
+#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16
+#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1
+#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0
+
+#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0
+#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1
+
+#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0
+
+#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0
+#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \
+ (((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
+
+#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0
+#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
+ (((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
+
+#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
+#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
+ (((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
+
+/*************ROCEE_REG DEFINITION****************/
+#define ROCEE_VENDOR_ID_REG 0x0
+#define ROCEE_VENDOR_PART_ID_REG 0x4
+
+#define ROCEE_HW_VERSION_REG 0x8
+
+#define ROCEE_SYS_IMAGE_GUID_L_REG 0xC
+#define ROCEE_SYS_IMAGE_GUID_H_REG 0x10
+
+#define ROCEE_PORT_GID_L_0_REG 0x50
+#define ROCEE_PORT_GID_ML_0_REG 0x54
+#define ROCEE_PORT_GID_MH_0_REG 0x58
+#define ROCEE_PORT_GID_H_0_REG 0x5C
+
+#define ROCEE_BT_CMD_H_REG 0x204
+
+#define ROCEE_SMAC_L_0_REG 0x240
+#define ROCEE_SMAC_H_0_REG 0x244
+
+#define ROCEE_QP1C_CFG3_0_REG 0x27C
+
+#define ROCEE_CAEP_AEQE_CONS_IDX_REG 0x3AC
+#define ROCEE_CAEP_CEQC_CONS_IDX_0_REG 0x3BC
+
+#define ROCEE_ECC_UCERR_ALM1_REG 0xB38
+#define ROCEE_ECC_UCERR_ALM2_REG 0xB3C
+#define ROCEE_ECC_CERR_ALM1_REG 0xB44
+#define ROCEE_ECC_CERR_ALM2_REG 0xB48
+
+#define ROCEE_ACK_DELAY_REG 0x14
+#define ROCEE_GLB_CFG_REG 0x18
+
+#define ROCEE_DMAE_USER_CFG1_REG 0x40
+#define ROCEE_DMAE_USER_CFG2_REG 0x44
+
+#define ROCEE_DB_SQ_WL_REG 0x154
+#define ROCEE_DB_OTHERS_WL_REG 0x158
+#define ROCEE_RAQ_WL_REG 0x15C
+#define ROCEE_WRMS_POL_TIME_INTERVAL_REG 0x160
+#define ROCEE_EXT_DB_SQ_REG 0x164
+#define ROCEE_EXT_DB_SQ_H_REG 0x168
+#define ROCEE_EXT_DB_OTH_REG 0x16C
+
+#define ROCEE_EXT_DB_OTH_H_REG 0x170
+#define ROCEE_EXT_DB_SQ_WL_EMPTY_REG 0x174
+#define ROCEE_EXT_DB_SQ_WL_REG 0x178
+#define ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG 0x17C
+#define ROCEE_EXT_DB_OTHERS_WL_REG 0x180
+#define ROCEE_EXT_RAQ_REG 0x184
+#define ROCEE_EXT_RAQ_H_REG 0x188
+
+#define ROCEE_CAEP_CE_INTERVAL_CFG_REG 0x190
+#define ROCEE_CAEP_CE_BURST_NUM_CFG_REG 0x194
+#define ROCEE_BT_CMD_L_REG 0x200
+
+#define ROCEE_MB1_REG 0x210
+#define ROCEE_DB_SQ_L_0_REG 0x230
+#define ROCEE_DB_OTHERS_L_0_REG 0x238
+#define ROCEE_QP1C_CFG0_0_REG 0x270
+
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_REG 0x3A0
+#define ROCEE_CAEP_CEQC_SHIFT_0_REG 0x3B0
+#define ROCEE_CAEP_CE_IRQ_MASK_0_REG 0x3C0
+#define ROCEE_CAEP_CEQ_ALM_OVF_0_REG 0x3C4
+#define ROCEE_CAEP_AE_MASK_REG 0x6C8
+#define ROCEE_CAEP_AE_ST_REG 0x6CC
+
+#define ROCEE_SDB_ISSUE_PTR_REG 0x758
+#define ROCEE_SDB_SEND_PTR_REG 0x75C
+#define ROCEE_SDB_INV_CNT_REG 0x9A4
+#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
+#define ROCEE_ECC_CERR_ALM0_REG 0xB40
+
+#endif /* _HNS_ROCE_COMMON_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
new file mode 100644
index 000000000000..875597b0e69c
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include <rdma/ib_umem.h>
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_user.h"
+#include "hns_roce_common.h"
+
+static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
+{
+ struct ib_cq *ibcq = &hr_cq->ib_cq;
+
+ ibcq->comp_handler(ibcq, ibcq->cq_context);
+}
+
+static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
+ enum hns_roce_event event_type)
+{
+ struct hns_roce_dev *hr_dev;
+ struct ib_event event;
+ struct ib_cq *ibcq;
+
+ ibcq = &hr_cq->ib_cq;
+ hr_dev = to_hr_dev(ibcq->device);
+
+ if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+ dev_err(&hr_dev->pdev->dev,
+ "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
+ event_type, hr_cq->cqn);
+ return;
+ }
+
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.event = IB_EVENT_CQ_ERR;
+ event.element.cq = ibcq;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
+}
+
+static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long cq_num)
+{
+ return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
+ HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
+}
+
+static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
+ struct hns_roce_mtt *hr_mtt,
+ struct hns_roce_uar *hr_uar,
+ struct hns_roce_cq *hr_cq, int vector,
+ int collapsed)
+{
+ struct hns_roce_cmd_mailbox *mailbox = NULL;
+ struct hns_roce_cq_table *cq_table = NULL;
+ struct device *dev = &hr_dev->pdev->dev;
+ dma_addr_t dma_handle;
+ u64 *mtts = NULL;
+ int ret = 0;
+
+ cq_table = &hr_dev->cq_table;
+
+ /* Get the physical address of cq buf */
+ mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ hr_mtt->first_seg, &dma_handle);
+ if (!mtts) {
+ dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
+ return -EINVAL;
+ }
+
+ if (vector >= hr_dev->caps.num_comp_vectors) {
+ dev_err(dev, "CQ alloc.Invalid vector.\n");
+ return -EINVAL;
+ }
+ hr_cq->vector = vector;
+
+ ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
+ if (ret == -1) {
+ dev_err(dev, "CQ alloc.Failed to alloc index.\n");
+ return -ENOMEM;
+ }
+
+ /* Get CQC memory HEM(Hardware Entry Memory) table */
+ ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
+ if (ret) {
+ dev_err(dev, "CQ alloc.Failed to get context mem.\n");
+ goto err_out;
+ }
+
+ /* The cq insert radix tree */
+ spin_lock_irq(&cq_table->lock);
+ /* Radix_tree: The associated pointer and long integer key value like */
+ ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq);
+ spin_unlock_irq(&cq_table->lock);
+ if (ret) {
+ dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n");
+ goto err_put;
+ }
+
+ /* Allocate mailbox memory */
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ret = PTR_ERR(mailbox);
+ goto err_radix;
+ }
+
+ hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
+ nent, vector);
+
+ /* Send mailbox to hw */
+ ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret) {
+ dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
+ goto err_radix;
+ }
+
+ hr_cq->cons_index = 0;
+ hr_cq->uar = hr_uar;
+
+ return 0;
+
+err_radix:
+ spin_lock_irq(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, hr_cq->cqn);
+ spin_unlock_irq(&cq_table->lock);
+
+err_put:
+ hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
+
+err_out:
+ hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
+ return ret;
+}
+
+static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long cq_num)
+{
+ return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
+ mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
+ HNS_ROCE_CMD_TIME_CLASS_A);
+}
+
+static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ int ret;
+
+ ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
+ if (ret)
+ dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
+ hr_cq->cqn);
+
+ /* Waiting interrupt process procedure carried out */
+ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+
+ spin_lock_irq(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, hr_cq->cqn);
+ spin_unlock_irq(&cq_table->lock);
+
+ hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
+ hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
+}
+
+static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
+ struct ib_ucontext *context,
+ struct hns_roce_cq_buf *buf,
+ struct ib_umem **umem, u64 buf_addr, int cqe)
+{
+ int ret;
+
+ *umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(*umem))
+ return PTR_ERR(*umem);
+
+ ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
+ ilog2((unsigned int)(*umem)->page_size),
+ &buf->hr_mtt);
+ if (ret)
+ goto err_buf;
+
+ ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
+ if (ret)
+ goto err_mtt;
+
+ return 0;
+
+err_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+
+err_buf:
+ ib_umem_release(*umem);
+ return ret;
+}
+
+static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq_buf *buf, u32 nent)
+{
+ int ret;
+
+ ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
+ PAGE_SIZE * 2, &buf->hr_buf);
+ if (ret)
+ goto out;
+
+ ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
+ buf->hr_buf.page_shift, &buf->hr_mtt);
+ if (ret)
+ goto err_buf;
+
+ ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
+ if (ret)
+ goto err_mtt;
+
+ return 0;
+
+err_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+
+err_buf:
+ hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
+ &buf->hr_buf);
+out:
+ return ret;
+}
+
+static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq_buf *buf, int cqe)
+{
+ hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
+ &buf->hr_buf);
+}
+
+struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_ib_create_cq ucmd;
+ struct hns_roce_cq *hr_cq = NULL;
+ struct hns_roce_uar *uar = NULL;
+ int vector = attr->comp_vector;
+ int cq_entries = attr->cqe;
+ int ret = 0;
+
+ if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
+ dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
+ cq_entries, hr_dev->caps.max_cqes);
+ return ERR_PTR(-EINVAL);
+ }
+
+ hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL);
+ if (!hr_cq)
+ return ERR_PTR(-ENOMEM);
+
+ /* In v1 engine, parameter verification */
+ if (cq_entries < HNS_ROCE_MIN_CQE_NUM)
+ cq_entries = HNS_ROCE_MIN_CQE_NUM;
+
+ cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
+ hr_cq->ib_cq.cqe = cq_entries - 1;
+ mutex_init(&hr_cq->resize_mutex);
+ spin_lock_init(&hr_cq->lock);
+ hr_cq->hr_resize_buf = NULL;
+ hr_cq->resize_umem = NULL;
+
+ if (context) {
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ dev_err(dev, "Failed to copy_from_udata.\n");
+ ret = -EFAULT;
+ goto err_cq;
+ }
+
+ /* Get user space address, write it into mtt table */
+ ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
+ &hr_cq->umem, ucmd.buf_addr,
+ cq_entries);
+ if (ret) {
+ dev_err(dev, "Failed to get_cq_umem.\n");
+ goto err_cq;
+ }
+
+ /* Get user space parameters */
+ uar = &to_hr_ucontext(context)->uar;
+ } else {
+ /* Init mmt table and write buff address to mtt table */
+ ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
+ cq_entries);
+ if (ret) {
+ dev_err(dev, "Failed to alloc_cq_buf.\n");
+ goto err_cq;
+ }
+
+ uar = &hr_dev->priv_uar;
+ hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG +
+ 0x1000 * uar->index;
+ }
+
+ /* Allocate cq index, fill cq_context */
+ ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
+ uar, hr_cq, vector, 0);
+ if (ret) {
+ dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
+ goto err_mtt;
+ }
+
+ /* Get created cq handler and carry out event */
+ hr_cq->comp = hns_roce_ib_cq_comp;
+ hr_cq->event = hns_roce_ib_cq_event;
+ hr_cq->cq_depth = cq_entries;
+
+ if (context) {
+ if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
+ ret = -EFAULT;
+ goto err_mtt;
+ }
+ }
+
+ return &hr_cq->ib_cq;
+
+err_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ if (context)
+ ib_umem_release(hr_cq->umem);
+ else
+ hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
+ hr_cq->ib_cq.cqe);
+
+err_cq:
+ kfree(hr_cq);
+ return ERR_PTR(ret);
+}
+
+int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+
+ hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+
+ if (ib_cq->uobject)
+ ib_umem_release(hr_cq->umem);
+ else
+ /* Free the buff of stored cq */
+ hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
+
+ kfree(hr_cq);
+
+ return 0;
+}
+
+void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_cq *cq;
+
+ cq = radix_tree_lookup(&hr_dev->cq_table.tree,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!cq) {
+ dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
+ return;
+ }
+
+ cq->comp(cq);
+}
+
+void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_cq *cq;
+
+ cq = radix_tree_lookup(&cq_table->tree,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (cq)
+ atomic_inc(&cq->refcount);
+
+ if (!cq) {
+ dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ return;
+ }
+
+ cq->event(cq, (enum hns_roce_event)event_type);
+
+ if (atomic_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
+int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+
+ spin_lock_init(&cq_table->lock);
+ INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+
+ return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
+ hr_dev->caps.num_cqs - 1,
+ hr_dev->caps.reserved_cqs, 0);
+}
+
+void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
new file mode 100644
index 000000000000..ea735800eb18
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -0,0 +1,734 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_DEVICE_H
+#define _HNS_ROCE_DEVICE_H
+
+#include <rdma/ib_verbs.h>
+
+#define DRV_NAME "hns_roce"
+
+#define MAC_ADDR_OCTET_NUM 6
+#define HNS_ROCE_MAX_MSG_LEN 0x80000000
+
+#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
+
+#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
+
+#define HNS_ROCE_BA_SIZE (32 * 4096)
+
+/* Hardware specification only for v1 engine */
+#define HNS_ROCE_MIN_CQE_NUM 0x40
+#define HNS_ROCE_MIN_WQE_NUM 0x20
+
+/* Hardware specification only for v1 engine */
+#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
+#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
+
+#define HNS_ROCE_MAX_IRQ_NUM 34
+
+#define HNS_ROCE_COMP_VEC_NUM 32
+
+#define HNS_ROCE_AEQE_VEC_NUM 1
+#define HNS_ROCE_AEQE_OF_VEC_NUM 1
+
+/* 4G/4K = 1M */
+#define HNS_ROCE_SL_SHIFT 29
+#define HNS_ROCE_TCLASS_SHIFT 20
+#define HNS_ROCE_FLOW_LABLE_MASK 0xfffff
+
+#define HNS_ROCE_MAX_PORTS 6
+#define HNS_ROCE_MAX_GID_NUM 16
+#define HNS_ROCE_GID_SIZE 16
+
+#define MR_TYPE_MR 0x00
+#define MR_TYPE_DMA 0x03
+
+#define PKEY_ID 0xffff
+#define NODE_DESC_SIZE 64
+
+#define SERV_TYPE_RC 0
+#define SERV_TYPE_RD 1
+#define SERV_TYPE_UC 2
+#define SERV_TYPE_UD 3
+
+#define PAGES_SHIFT_8 8
+#define PAGES_SHIFT_16 16
+#define PAGES_SHIFT_24 24
+#define PAGES_SHIFT_32 32
+
+enum hns_roce_qp_state {
+ HNS_ROCE_QP_STATE_RST,
+ HNS_ROCE_QP_STATE_INIT,
+ HNS_ROCE_QP_STATE_RTR,
+ HNS_ROCE_QP_STATE_RTS,
+ HNS_ROCE_QP_STATE_SQD,
+ HNS_ROCE_QP_STATE_ERR,
+ HNS_ROCE_QP_NUM_STATE,
+};
+
+enum hns_roce_event {
+ HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01,
+ HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02,
+ HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03,
+ HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04,
+ HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
+ HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06,
+ HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07,
+ HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08,
+ HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09,
+ HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a,
+ HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b,
+ HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c,
+ HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d,
+ HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f,
+ /* 0x10 and 0x11 is unused in currently application case */
+ HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
+ HNS_ROCE_EVENT_TYPE_MB = 0x13,
+ HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14,
+};
+
+/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
+enum {
+ HNS_ROCE_LWQCE_QPC_ERROR = 1,
+ HNS_ROCE_LWQCE_MTU_ERROR = 2,
+ HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3,
+ HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4,
+ HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5,
+ HNS_ROCE_LWQCE_SL_ERROR = 6,
+ HNS_ROCE_LWQCE_PORT_ERROR = 7,
+};
+
+/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
+enum {
+ HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
+ HNS_ROCE_LAVWQE_LENGTH_ERROR = 2,
+ HNS_ROCE_LAVWQE_VA_ERROR = 3,
+ HNS_ROCE_LAVWQE_PD_ERROR = 4,
+ HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5,
+ HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6,
+ HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7,
+};
+
+/* DOORBELL overflow subtype */
+enum {
+ HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
+ HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2,
+ HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3,
+ HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4,
+ HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5,
+ HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6,
+};
+
+enum {
+ /* RQ&SRQ related operations */
+ HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
+ HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
+};
+
+#define HNS_ROCE_CMD_SUCCESS 1
+
+#define HNS_ROCE_PORT_DOWN 0
+#define HNS_ROCE_PORT_UP 1
+
+#define HNS_ROCE_MTT_ENTRY_PER_SEG 8
+
+#define PAGE_ADDR_SHIFT 12
+
+struct hns_roce_uar {
+ u64 pfn;
+ unsigned long index;
+};
+
+struct hns_roce_ucontext {
+ struct ib_ucontext ibucontext;
+ struct hns_roce_uar uar;
+};
+
+struct hns_roce_pd {
+ struct ib_pd ibpd;
+ unsigned long pdn;
+};
+
+struct hns_roce_bitmap {
+ /* Bitmap Traversal last a bit which is 1 */
+ unsigned long last;
+ unsigned long top;
+ unsigned long max;
+ unsigned long reserved_top;
+ unsigned long mask;
+ spinlock_t lock;
+ unsigned long *table;
+};
+
+/* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */
+/* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
+/* Every bit repesent to a partner free/used status in bitmap */
+/*
+* Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
+* Bit = 1 represent to idle and available; bit = 0: not available
+*/
+struct hns_roce_buddy {
+ /* Members point to every order level bitmap */
+ unsigned long **bits;
+ /* Represent to avail bits of the order level bitmap */
+ u32 *num_free;
+ int max_order;
+ spinlock_t lock;
+};
+
+/* For Hardware Entry Memory */
+struct hns_roce_hem_table {
+ /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
+ u32 type;
+ /* HEM array elment num */
+ unsigned long num_hem;
+ /* HEM entry record obj total num */
+ unsigned long num_obj;
+ /*Single obj size */
+ unsigned long obj_size;
+ int lowmem;
+ struct mutex mutex;
+ struct hns_roce_hem **hem;
+};
+
+struct hns_roce_mtt {
+ unsigned long first_seg;
+ int order;
+ int page_shift;
+};
+
+/* Only support 4K page size for mr register */
+#define MR_SIZE_4K 0
+
+struct hns_roce_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ u64 iova; /* MR's virtual orignal addr */
+ u64 size; /* Address range of MR */
+ u32 key; /* Key of MR */
+ u32 pd; /* PD num of MR */
+ u32 access;/* Access permission of MR */
+ int enabled; /* MR's active status */
+ int type; /* MR's register type */
+ u64 *pbl_buf;/* MR's PBL space */
+ dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
+};
+
+struct hns_roce_mr_table {
+ struct hns_roce_bitmap mtpt_bitmap;
+ struct hns_roce_buddy mtt_buddy;
+ struct hns_roce_hem_table mtt_table;
+ struct hns_roce_hem_table mtpt_table;
+};
+
+struct hns_roce_wq {
+ u64 *wrid; /* Work request ID */
+ spinlock_t lock;
+ int wqe_cnt; /* WQE num */
+ u32 max_post;
+ int max_gs;
+ int offset;
+ int wqe_shift;/* WQE size */
+ u32 head;
+ u32 tail;
+ void __iomem *db_reg_l;
+};
+
+struct hns_roce_buf_list {
+ void *buf;
+ dma_addr_t map;
+};
+
+struct hns_roce_buf {
+ struct hns_roce_buf_list direct;
+ struct hns_roce_buf_list *page_list;
+ int nbufs;
+ u32 npages;
+ int page_shift;
+};
+
+struct hns_roce_cq_buf {
+ struct hns_roce_buf hr_buf;
+ struct hns_roce_mtt hr_mtt;
+};
+
+struct hns_roce_cq_resize {
+ struct hns_roce_cq_buf hr_buf;
+ int cqe;
+};
+
+struct hns_roce_cq {
+ struct ib_cq ib_cq;
+ struct hns_roce_cq_buf hr_buf;
+ /* pointer to store information after resize*/
+ struct hns_roce_cq_resize *hr_resize_buf;
+ spinlock_t lock;
+ struct mutex resize_mutex;
+ struct ib_umem *umem;
+ struct ib_umem *resize_umem;
+ void (*comp)(struct hns_roce_cq *);
+ void (*event)(struct hns_roce_cq *, enum hns_roce_event);
+
+ struct hns_roce_uar *uar;
+ u32 cq_depth;
+ u32 cons_index;
+ void __iomem *cq_db_l;
+ void __iomem *tptr_addr;
+ unsigned long cqn;
+ u32 vector;
+ atomic_t refcount;
+ struct completion free;
+};
+
+struct hns_roce_srq {
+ struct ib_srq ibsrq;
+ int srqn;
+};
+
+struct hns_roce_uar_table {
+ struct hns_roce_bitmap bitmap;
+};
+
+struct hns_roce_qp_table {
+ struct hns_roce_bitmap bitmap;
+ spinlock_t lock;
+ struct hns_roce_hem_table qp_table;
+ struct hns_roce_hem_table irrl_table;
+};
+
+struct hns_roce_cq_table {
+ struct hns_roce_bitmap bitmap;
+ spinlock_t lock;
+ struct radix_tree_root tree;
+ struct hns_roce_hem_table table;
+};
+
+struct hns_roce_raq_table {
+ struct hns_roce_buf_list *e_raq_buf;
+};
+
+struct hns_roce_av {
+ __le32 port_pd;
+ u8 gid_index;
+ u8 stat_rate;
+ u8 hop_limit;
+ __le32 sl_tclass_flowlabel;
+ u8 dgid[HNS_ROCE_GID_SIZE];
+ u8 mac[6];
+ __le16 vlan;
+};
+
+struct hns_roce_ah {
+ struct ib_ah ibah;
+ struct hns_roce_av av;
+};
+
+struct hns_roce_cmd_context {
+ struct completion done;
+ int result;
+ int next;
+ u64 out_param;
+ u16 token;
+};
+
+struct hns_roce_cmdq {
+ struct dma_pool *pool;
+ u8 __iomem *hcr;
+ struct mutex hcr_mutex;
+ struct semaphore poll_sem;
+ /*
+ * Event mode: cmd register mutex protection,
+ * ensure to not exceed max_cmds and user use limit region
+ */
+ struct semaphore event_sem;
+ int max_cmds;
+ spinlock_t context_lock;
+ int free_head;
+ struct hns_roce_cmd_context *context;
+ /*
+ * Result of get integer part
+ * which max_comds compute according a power of 2
+ */
+ u16 token_mask;
+ /*
+ * Process whether use event mode, init default non-zero
+ * After the event queue of cmd event ready,
+ * can switch into event mode
+ * close device, switch into poll mode(non event mode)
+ */
+ u8 use_events;
+ u8 toggle;
+};
+
+struct hns_roce_dev;
+
+struct hns_roce_qp {
+ struct ib_qp ibqp;
+ struct hns_roce_buf hr_buf;
+ struct hns_roce_wq rq;
+ __le64 doorbell_qpn;
+ __le32 sq_signal_bits;
+ u32 sq_next_wqe;
+ int sq_max_wqes_per_wr;
+ int sq_spare_wqes;
+ struct hns_roce_wq sq;
+
+ struct ib_umem *umem;
+ struct hns_roce_mtt mtt;
+ u32 buff_size;
+ struct mutex mutex;
+ u8 port;
+ u8 sl;
+ u8 resp_depth;
+ u8 state;
+ u32 access_flags;
+ u32 pkey_index;
+ void (*event)(struct hns_roce_qp *,
+ enum hns_roce_event);
+ unsigned long qpn;
+
+ atomic_t refcount;
+ struct completion free;
+};
+
+struct hns_roce_sqp {
+ struct hns_roce_qp hr_qp;
+};
+
+struct hns_roce_ib_iboe {
+ spinlock_t lock;
+ struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
+ struct notifier_block nb;
+ struct notifier_block nb_inet;
+ /* 16 GID is shared by 6 port in v1 engine. */
+ union ib_gid gid_table[HNS_ROCE_MAX_GID_NUM];
+ u8 phy_port[HNS_ROCE_MAX_PORTS];
+};
+
+struct hns_roce_eq {
+ struct hns_roce_dev *hr_dev;
+ void __iomem *doorbell;
+
+ int type_flag;/* Aeq:1 ceq:0 */
+ int eqn;
+ u32 entries;
+ int log_entries;
+ int eqe_size;
+ int irq;
+ int log_page_size;
+ int cons_index;
+ struct hns_roce_buf_list *buf_list;
+};
+
+struct hns_roce_eq_table {
+ struct hns_roce_eq *eq;
+ void __iomem **eqc_base;
+};
+
+struct hns_roce_caps {
+ u8 num_ports;
+ int gid_table_len[HNS_ROCE_MAX_PORTS];
+ int pkey_table_len[HNS_ROCE_MAX_PORTS];
+ int local_ca_ack_delay;
+ int num_uars;
+ u32 phy_num_uars;
+ u32 max_sq_sg; /* 2 */
+ u32 max_sq_inline; /* 32 */
+ u32 max_rq_sg; /* 2 */
+ int num_qps; /* 256k */
+ u32 max_wqes; /* 16k */
+ u32 max_sq_desc_sz; /* 64 */
+ u32 max_rq_desc_sz; /* 64 */
+ int max_qp_init_rdma;
+ int max_qp_dest_rdma;
+ int sqp_start;
+ int num_cqs;
+ int max_cqes;
+ int reserved_cqs;
+ int num_aeq_vectors; /* 1 */
+ int num_comp_vectors; /* 32 ceq */
+ int num_other_vectors;
+ int num_mtpts;
+ u32 num_mtt_segs;
+ int reserved_mrws;
+ int reserved_uars;
+ int num_pds;
+ int reserved_pds;
+ u32 mtt_entry_sz;
+ u32 cq_entry_sz;
+ u32 page_size_cap;
+ u32 reserved_lkey;
+ int mtpt_entry_sz;
+ int qpc_entry_sz;
+ int irrl_entry_sz;
+ int cqc_entry_sz;
+ int aeqe_depth;
+ int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
+ enum ib_mtu max_mtu;
+};
+
+struct hns_roce_hw {
+ int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
+ void (*hw_profile)(struct hns_roce_dev *hr_dev);
+ int (*hw_init)(struct hns_roce_dev *hr_dev);
+ void (*hw_exit)(struct hns_roce_dev *hr_dev);
+ void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ union ib_gid *gid);
+ void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
+ void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
+ enum ib_mtu mtu);
+ int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
+ unsigned long mtpt_idx);
+ void (*write_cqc)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
+ dma_addr_t dma_handle, int nent, u32 vector);
+ int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+ int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state);
+ int (*destroy_qp)(struct ib_qp *ibqp);
+ int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+ int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
+ int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+ int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+ void *priv;
+};
+
+struct hns_roce_dev {
+ struct ib_device ib_dev;
+ struct platform_device *pdev;
+ struct hns_roce_uar priv_uar;
+ const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
+ spinlock_t sm_lock;
+ spinlock_t cq_db_lock;
+ spinlock_t bt_cmd_lock;
+ struct hns_roce_ib_iboe iboe;
+
+ int irq[HNS_ROCE_MAX_IRQ_NUM];
+ u8 __iomem *reg_base;
+ struct hns_roce_caps caps;
+ struct radix_tree_root qp_table_tree;
+
+ unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM];
+ u64 sys_image_guid;
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_rev;
+ void __iomem *priv_addr;
+
+ struct hns_roce_cmdq cmd;
+ struct hns_roce_bitmap pd_bitmap;
+ struct hns_roce_uar_table uar_table;
+ struct hns_roce_mr_table mr_table;
+ struct hns_roce_cq_table cq_table;
+ struct hns_roce_qp_table qp_table;
+ struct hns_roce_eq_table eq_table;
+
+ int cmd_mod;
+ int loop_idc;
+ struct hns_roce_hw *hw;
+};
+
+static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
+{
+ return container_of(ib_dev, struct hns_roce_dev, ib_dev);
+}
+
+static inline struct hns_roce_ucontext
+ *to_hr_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
+}
+
+static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct hns_roce_pd, ibpd);
+}
+
+static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct hns_roce_ah, ibah);
+}
+
+static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct hns_roce_mr, ibmr);
+}
+
+static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct hns_roce_qp, ibqp);
+}
+
+static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
+{
+ return container_of(ib_cq, struct hns_roce_cq, ib_cq);
+}
+
+static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct hns_roce_srq, ibsrq);
+}
+
+static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp)
+{
+ return container_of(hr_qp, struct hns_roce_sqp, hr_qp);
+}
+
+static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest)
+{
+ __raw_writeq(*(u64 *) val, dest);
+}
+
+static inline struct hns_roce_qp
+ *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
+{
+ return radix_tree_lookup(&hr_dev->qp_table_tree,
+ qpn & (hr_dev->caps.num_qps - 1));
+}
+
+static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
+{
+ u32 bits_per_long_val = BITS_PER_LONG;
+
+ if (bits_per_long_val == 64 || buf->nbufs == 1)
+ return (char *)(buf->direct.buf) + offset;
+ else
+ return (char *)(buf->page_list[offset >> PAGE_SHIFT].buf) +
+ (offset & (PAGE_SIZE - 1));
+}
+
+int hns_roce_init_uar_table(struct hns_roce_dev *dev);
+int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
+void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
+void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev);
+
+int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
+void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
+void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
+ u64 out_param);
+int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
+void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
+
+int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
+ struct hns_roce_mtt *mtt);
+void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt);
+int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, struct hns_roce_buf *buf);
+
+int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
+
+void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
+void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
+void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
+void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
+void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
+
+int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
+int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
+ u32 reserved_bot, u32 resetrved_top);
+void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
+void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
+int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
+ int align, unsigned long *obj);
+void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
+ unsigned long obj, int cnt);
+
+struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
+int hns_roce_destroy_ah(struct ib_ah *ah);
+
+struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int hns_roce_dealloc_pd(struct ib_pd *pd);
+
+struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata);
+int hns_roce_dereg_mr(struct ib_mr *ibmr);
+
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
+ struct hns_roce_buf *buf);
+int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
+ struct hns_roce_buf *buf);
+
+int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, struct ib_umem *umem);
+
+struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
+void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
+bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
+ struct ib_cq *ib_cq);
+enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
+void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
+ struct hns_roce_cq *recv_cq);
+void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
+ struct hns_roce_cq *recv_cq);
+void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
+void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
+void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
+ int cnt);
+__be32 send_ieth(struct ib_send_wr *wr);
+int to_hr_qp_type(int qp_type);
+
+struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+
+int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
+
+void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
+void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
+void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
+int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
+
+extern struct hns_roce_hw hns_roce_hw_v1;
+
+#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
new file mode 100644
index 000000000000..98af7fecf2f1
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -0,0 +1,762 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_eq.h"
+
+static void eq_set_cons_index(struct hns_roce_eq *eq, int req_not)
+{
+ roce_raw_write((eq->cons_index & CONS_INDEX_MASK) |
+ (req_not << eq->log_entries), eq->doorbell);
+ /* Memory barrier */
+ mb();
+}
+
+static struct hns_roce_aeqe *get_aeqe(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_aeqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw(struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe = get_aeqe(eq, eq->cons_index);
+
+ return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
+ !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe, int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+ dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_warn(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_warn(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_warn(dev, "QP %d, WQE shift error\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SL_ERROR:
+ dev_warn(dev, "QP %d, SL error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_PORT_ERROR:
+ dev_warn(dev, "QP %d, port error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+}
+
+static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+ dev_warn(dev, "Local Access Violation Work Queue Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_warn(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_warn(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_warn(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_warn(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_warn(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_warn(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+}
+
+static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
+ dev_warn(dev, "SDB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
+ dev_warn(dev, "SDB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
+ dev_warn(dev, "ODB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
+ dev_warn(dev, "ODB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_aeqe *aeqe;
+ int aeqes_found = 0;
+ int qpn = 0;
+
+ while ((aeqe = next_aeqe_sw(eq))) {
+ dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ /* Memory barrier */
+ rmb();
+
+ switch (roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_warn(dev, "PATH MIG not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_warn(dev, "COMMUNICATION established\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "SQ DRAINED not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "PATH MIG failed\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "qpn = 0x%lx\n",
+ roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S));
+ hns_roce_qp_event(hr_dev,
+ roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ dev_warn(dev, "SRQ not support!\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%lx access err.\n",
+ roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+ hns_roce_cq_event(hr_dev,
+ le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%lx overflow\n",
+ roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+ hns_roce_cq_event(hr_dev,
+ le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ dev_warn(dev, "CQ ID invalid.\n");
+ hns_roce_cq_event(hr_dev,
+ le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ break;
+ case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
+ dev_warn(dev, "port change.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ hns_roce_cmd_event(hr_dev,
+ le16_to_cpu(aeqe->event.cmd.token),
+ aeqe->event.cmd.status,
+ le64_to_cpu(aeqe->event.cmd.out_param
+ ));
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ hns_roce_db_overflow_handle(hr_dev, aeqe);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
+ dev_warn(dev, "CEQ 0x%lx overflow.\n",
+ roce_get_field(aeqe->event.ce_event.ceqe,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
+ break;
+ default:
+ dev_warn(dev, "Unhandled event 0x%lx on EQ %d at index %u\n",
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S),
+ eq->eqn, eq->cons_index);
+ break;
+ };
+
+ eq->cons_index++;
+ aeqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
+ dev_warn(dev, "cons_index overflow, set back to zero\n"
+ );
+ eq->cons_index = 0;
+ }
+ }
+
+ eq_set_cons_index(eq, 0);
+
+ return aeqes_found;
+}
+
+static struct hns_roce_ceqe *get_ceqe(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_ceqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw(struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe = get_ceqe(eq, eq->cons_index);
+
+ return (!!(roce_get_bit(ceqe->ceqe.comp,
+ HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
+ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static int hns_roce_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe;
+ int ceqes_found = 0;
+ u32 cqn;
+
+ while ((ceqe = next_ceqe_sw(eq))) {
+ /* Memory barrier */
+ rmb();
+ cqn = roce_get_field(ceqe->ceqe.comp,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
+ hns_roce_cq_completion(hr_dev, cqn);
+
+ ++eq->cons_index;
+ ceqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth[eq->eqn] - 1) {
+ dev_warn(&eq->hr_dev->pdev->dev,
+ "cons_index overflow, set back to zero\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ eq_set_cons_index(eq, 0);
+
+ return ceqes_found;
+}
+
+static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = &eq->hr_dev->pdev->dev;
+ int eqovf_found = 0;
+ u32 caepaemask_val;
+ u32 cealmovf_val;
+ u32 caepaest_val;
+ u32 aeshift_val;
+ u32 ceshift_val;
+ u32 cemask_val;
+ int i = 0;
+
+ /**
+ * AEQ overflow ECC mult bit err CEQ overflow alarm
+ * must clear interrupt, mask irq, clear irq, cancel mask operation
+ */
+ aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
+
+ if (roce_get_bit(aeshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "AEQ overflow!\n");
+
+ /* Set mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
+ roce_set_bit(caepaest_val,
+ ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
+ roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
+
+ /* Clear mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+ }
+
+ /* CEQ almost overflow */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ i * CEQ_REG_OFFSET);
+
+ if (roce_get_bit(ceshift_val,
+ ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
+ eqovf_found++;
+
+ /* Set mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ cealmovf_val = roce_read(hr_dev,
+ ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cealmovf_val,
+ ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
+ 1);
+ roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET, cealmovf_val);
+
+ /* Clear mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+ }
+ }
+
+ /* ECC multi-bit error alarm */
+ dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
+
+ dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
+
+ return eqovf_found;
+}
+
+static int hns_roce_eq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ int eqes_found = 0;
+
+ if (likely(eq->type_flag == HNS_ROCE_CEQ))
+ /* CEQ irq routine, CEQ is pulse irq, not clear */
+ eqes_found = hns_roce_ceq_int(hr_dev, eq);
+ else if (likely(eq->type_flag == HNS_ROCE_AEQ))
+ /* AEQ irq routine, AEQ is pulse irq, not clear */
+ eqes_found = hns_roce_aeq_int(hr_dev, eq);
+ else
+ /* AEQ queue overflow irq */
+ eqes_found = hns_roce_aeq_ovf_int(hr_dev, eq);
+
+ return eqes_found;
+}
+
+static irqreturn_t hns_roce_msi_x_interrupt(int irq, void *eq_ptr)
+{
+ int int_work = 0;
+ struct hns_roce_eq *eq = eq_ptr;
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+
+ int_work = hns_roce_eq_int(hr_dev, eq);
+
+ return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
+ int enable_flag)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
+ u32 val;
+
+ val = readl(eqc);
+
+ if (enable_flag)
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_VALID);
+ else
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ writel(val, eqc);
+}
+
+static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
+ struct device *dev = &hr_dev->pdev->dev;
+ dma_addr_t tmp_dma_addr;
+ u32 eqconsindx_val = 0;
+ u32 eqcuridx_val = 0;
+ u32 eqshift_val = 0;
+ int num_bas = 0;
+ int ret;
+ int i;
+
+ num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+
+ if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
+ dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
+ (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
+ num_bas);
+ return -EINVAL;
+ }
+
+ eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
+ if (!eq->buf_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bas; ++i) {
+ eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
+ &tmp_dma_addr,
+ GFP_KERNEL);
+ if (!eq->buf_list[i].buf) {
+ ret = -ENOMEM;
+ goto err_out_free_pages;
+ }
+
+ eq->buf_list[i].map = tmp_dma_addr;
+ memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
+ }
+ eq->cons_index = 0;
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
+ eq->log_entries);
+ writel(eqshift_val, eqc);
+
+ /* Configure eq extended address 12~44bit */
+ writel((u32)(eq->buf_list[0].map >> 12), (u8 *)eqc + 4);
+
+ /*
+ * Configure eq extended address 45~49 bit.
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
+ eq->buf_list[0].map >> 44);
+ roce_set_field(eqcuridx_val,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
+ writel(eqcuridx_val, (u8 *)eqc + 8);
+
+ /* Configure eq consumer index */
+ roce_set_field(eqconsindx_val,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
+ writel(eqconsindx_val, (u8 *)eqc + 0xc);
+
+ return 0;
+
+err_out_free_pages:
+ for (i = i - 1; i >= 0; i--)
+ dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
+ eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+ return ret;
+}
+
+static void hns_roce_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ int i = 0;
+ int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+
+ if (!eq->buf_list)
+ return;
+
+ for (i = 0; i < npages; ++i)
+ dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
+ eq->buf_list[i].buf, eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+}
+
+static void hns_roce_int_mask_en(struct hns_roce_dev *hr_dev)
+{
+ int i = 0;
+ u32 aemask_val;
+ int masken = 0;
+
+ /* AEQ INT */
+ aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ masken);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
+
+ /* CEQ INT */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ /* IRQ mask */
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, masken);
+ }
+}
+
+static void hns_roce_ce_int_default_cfg(struct hns_roce_dev *hr_dev)
+{
+ /* Configure ce int interval */
+ roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_INTERVAL);
+
+ /* Configure ce int burst num */
+ roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
+}
+
+int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_eq *eq = NULL;
+ int eq_num = 0;
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+ if (!eq_table->eq)
+ return -ENOMEM;
+
+ eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
+ GFP_KERNEL);
+ if (!eq_table->eqc_base) {
+ ret = -ENOMEM;
+ goto err_eqc_base_alloc_fail;
+ }
+
+ for (i = 0; i < eq_num; i++) {
+ eq = &eq_table->eq[i];
+ eq->hr_dev = hr_dev;
+ eq->eqn = i;
+ eq->irq = hr_dev->irq[i];
+ eq->log_page_size = PAGE_SHIFT;
+
+ if (i < hr_dev->caps.num_comp_vectors) {
+ /* CEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ HNS_ROCE_CEQC_REG_OFFSET * i;
+ eq->type_flag = HNS_ROCE_CEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
+ HNS_ROCE_CEQC_REG_OFFSET * i;
+ eq->entries = hr_dev->caps.ceqe_depth[i];
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = sizeof(struct hns_roce_ceqe);
+ } else {
+ /* AEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
+ eq->type_flag = HNS_ROCE_AEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_AEQE_CONS_IDX_REG;
+ eq->entries = hr_dev->caps.aeqe_depth;
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = sizeof(struct hns_roce_aeqe);
+ }
+ }
+
+ /* Disable irq */
+ hns_roce_int_mask_en(hr_dev);
+
+ /* Configure CE irq interval and burst num */
+ hns_roce_ce_int_default_cfg(hr_dev);
+
+ for (i = 0; i < eq_num; i++) {
+ ret = hns_roce_create_eq(hr_dev, &eq_table->eq[i]);
+ if (ret) {
+ dev_err(dev, "eq create failed\n");
+ goto err_create_eq_fail;
+ }
+ }
+
+ for (j = 0; j < eq_num; j++) {
+ ret = request_irq(eq_table->eq[j].irq, hns_roce_msi_x_interrupt,
+ 0, hr_dev->irq_names[j], eq_table->eq + j);
+ if (ret) {
+ dev_err(dev, "request irq error!\n");
+ goto err_request_irq_fail;
+ }
+ }
+
+ for (i = 0; i < eq_num; i++)
+ hns_roce_enable_eq(hr_dev, i, EQ_ENABLE);
+
+ return 0;
+
+err_request_irq_fail:
+ for (j = j - 1; j >= 0; j--)
+ free_irq(eq_table->eq[j].irq, eq_table->eq + j);
+
+err_create_eq_fail:
+ for (i = i - 1; i >= 0; i--)
+ hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
+
+ kfree(eq_table->eqc_base);
+
+err_eqc_base_alloc_fail:
+ kfree(eq_table->eq);
+
+ return ret;
+}
+
+void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+ int i;
+ int eq_num;
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ for (i = 0; i < eq_num; i++) {
+ /* Disable EQ */
+ hns_roce_enable_eq(hr_dev, i, EQ_DISABLE);
+
+ free_irq(eq_table->eq[i].irq, eq_table->eq + i);
+
+ hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
+ }
+
+ kfree(eq_table->eqc_base);
+ kfree(eq_table->eq);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h
new file mode 100644
index 000000000000..fe4388191a3c
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_EQ_H
+#define _HNS_ROCE_EQ_H
+
+#define HNS_ROCE_CEQ 1
+#define HNS_ROCE_AEQ 2
+
+#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
+#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
+#define HNS_ROCE_CEQC_REG_OFFSET 0x18
+
+#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
+#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
+
+#define HNS_ROCE_INT_MASK_DISABLE 0
+#define HNS_ROCE_INT_MASK_ENABLE 1
+
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
+#define CONS_INDEX_MASK 0xffff
+
+#define CEQ_REG_OFFSET 0x18
+
+enum {
+ HNS_ROCE_EQ_STAT_INVALID = 0,
+ HNS_ROCE_EQ_STAT_VALID = 2,
+};
+
+struct hns_roce_aeqe {
+ u32 asyn;
+ union {
+ struct {
+ u32 qp;
+ u32 rsv0;
+ u32 rsv1;
+ } qp_event;
+
+ struct {
+ u32 cq;
+ u32 rsv0;
+ u32 rsv1;
+ } cq_event;
+
+ struct {
+ u32 port;
+ u32 rsv0;
+ u32 rsv1;
+ } port_event;
+
+ struct {
+ u32 ceqe;
+ u32 rsv0;
+ u32 rsv1;
+ } ce_event;
+
+ struct {
+ __le64 out_param;
+ __le16 token;
+ u8 status;
+ u8 rsv0;
+ } __packed cmd;
+ } event;
+};
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M \
+ (((1UL << 8) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M \
+ (((1UL << 7) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)
+
+#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
+
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
+ (((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
+
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
+ (((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
+
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M \
+ (((1UL << 5) - 1) << HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)
+
+struct hns_roce_ceqe {
+ union {
+ int comp;
+ } ceqe;
+};
+
+#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
+
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M \
+ (((1UL << 16) - 1) << HNS_ROCE_CEQE_CEQE_COMP_CQN_S)
+
+#endif /* _HNS_ROCE_EQ_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
new file mode 100644
index 000000000000..d53d64362389
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include "hns_roce_device.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_common.h"
+
+#define HW_SYNC_TIMEOUT_MSECS 500
+#define HW_SYNC_SLEEP_TIME_INTERVAL 20
+
+#define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17)
+#define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17)
+
+#define DMA_ADDR_T_SHIFT 12
+#define BT_CMD_SYNC_SHIFT 31
+#define BT_BA_SHIFT 32
+
+struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
+ gfp_t gfp_mask)
+{
+ struct hns_roce_hem_chunk *chunk = NULL;
+ struct hns_roce_hem *hem;
+ struct scatterlist *mem;
+ int order;
+ void *buf;
+
+ WARN_ON(gfp_mask & __GFP_HIGHMEM);
+
+ hem = kmalloc(sizeof(*hem),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!hem)
+ return NULL;
+
+ hem->refcount = 0;
+ INIT_LIST_HEAD(&hem->chunk_list);
+
+ order = get_order(HNS_ROCE_HEM_ALLOC_SIZE);
+
+ while (npages > 0) {
+ if (!chunk) {
+ chunk = kmalloc(sizeof(*chunk),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!chunk)
+ goto fail;
+
+ sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
+ chunk->npages = 0;
+ chunk->nsg = 0;
+ list_add_tail(&chunk->list, &hem->chunk_list);
+ }
+
+ while (1 << order > npages)
+ --order;
+
+ /*
+ * Alloc memory one time. If failed, don't alloc small block
+ * memory, directly return fail.
+ */
+ mem = &chunk->mem[chunk->npages];
+ buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
+ &sg_dma_address(mem), gfp_mask);
+ if (!buf)
+ goto fail;
+
+ sg_set_buf(mem, buf, PAGE_SIZE << order);
+ WARN_ON(mem->offset);
+ sg_dma_len(mem) = PAGE_SIZE << order;
+
+ ++chunk->npages;
+ ++chunk->nsg;
+ npages -= 1 << order;
+ }
+
+ return hem;
+
+fail:
+ hns_roce_free_hem(hr_dev, hem);
+ return NULL;
+}
+
+void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
+{
+ struct hns_roce_hem_chunk *chunk, *tmp;
+ int i;
+
+ if (!hem)
+ return;
+
+ list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
+ for (i = 0; i < chunk->npages; ++i)
+ dma_free_coherent(&hr_dev->pdev->dev,
+ chunk->mem[i].length,
+ lowmem_page_address(sg_page(&chunk->mem[i])),
+ sg_dma_address(&chunk->mem[i]));
+ kfree(chunk);
+ }
+
+ kfree(hem);
+}
+
+static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ spinlock_t *lock = &hr_dev->bt_cmd_lock;
+ unsigned long end = 0;
+ unsigned long flags;
+ struct hns_roce_hem_iter iter;
+ void __iomem *bt_cmd;
+ u32 bt_cmd_h_val = 0;
+ u32 bt_cmd_val[2];
+ u32 bt_cmd_l = 0;
+ u64 bt_ba = 0;
+ int ret = 0;
+
+ /* Find the HEM(Hardware Entry Memory) entry */
+ unsigned long i = (obj & (table->num_obj - 1)) /
+ (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
+ break;
+ case HEM_TYPE_MTPT:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
+ HEM_TYPE_MTPT);
+ break;
+ case HEM_TYPE_CQC:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
+ break;
+ case HEM_TYPE_SRQC:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
+ HEM_TYPE_SRQC);
+ break;
+ default:
+ return ret;
+ }
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
+ roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
+ roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
+
+ /* Currently iter only a chunk */
+ for (hns_roce_hem_first(table->hem[i], &iter);
+ !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
+ bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT;
+
+ spin_lock_irqsave(lock, flags);
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+ end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
+ while (1) {
+ if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+ if (!(time_before(jiffies, end))) {
+ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+ spin_unlock_irqrestore(lock, flags);
+ return -EBUSY;
+ }
+ } else {
+ break;
+ }
+ msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
+ }
+
+ bt_cmd_l = (u32)bt_ba;
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
+ bt_ba >> BT_BA_SHIFT);
+
+ bt_cmd_val[0] = bt_cmd_l;
+ bt_cmd_val[1] = bt_cmd_h_val;
+ hns_roce_write64_k(bt_cmd_val,
+ hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
+ spin_unlock_irqrestore(lock, flags);
+ }
+
+ return ret;
+}
+
+static int hns_roce_clear_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ unsigned long end = 0;
+ unsigned long flags;
+ void __iomem *bt_cmd;
+ uint32_t bt_cmd_val[2];
+ u32 bt_cmd_h_val = 0;
+ int ret = 0;
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
+ break;
+ case HEM_TYPE_MTPT:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
+ HEM_TYPE_MTPT);
+ break;
+ case HEM_TYPE_CQC:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
+ break;
+ case HEM_TYPE_SRQC:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
+ HEM_TYPE_SRQC);
+ break;
+ default:
+ return ret;
+ }
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
+ roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
+ roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 0);
+
+ spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+ end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
+ while (1) {
+ if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+ if (!(time_before(jiffies, end))) {
+ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
+ flags);
+ return -EBUSY;
+ }
+ } else {
+ break;
+ }
+ msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
+ }
+
+ bt_cmd_val[0] = 0;
+ bt_cmd_val[1] = bt_cmd_h_val;
+ hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
+
+ return ret;
+}
+
+int hns_roce_table_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int ret = 0;
+ unsigned long i;
+
+ i = (obj & (table->num_obj - 1)) / (HNS_ROCE_TABLE_CHUNK_SIZE /
+ table->obj_size);
+
+ mutex_lock(&table->mutex);
+
+ if (table->hem[i]) {
+ ++table->hem[i]->refcount;
+ goto out;
+ }
+
+ table->hem[i] = hns_roce_alloc_hem(hr_dev,
+ HNS_ROCE_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
+ (table->lowmem ? GFP_KERNEL :
+ GFP_HIGHUSER) | __GFP_NOWARN);
+ if (!table->hem[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Set HEM base address(128K/page, pa) to Hardware */
+ if (hns_roce_set_hem(hr_dev, table, obj)) {
+ ret = -ENODEV;
+ dev_err(dev, "set HEM base address to HW failed.\n");
+ goto out;
+ }
+
+ ++table->hem[i]->refcount;
+out:
+ mutex_unlock(&table->mutex);
+ return ret;
+}
+
+void hns_roce_table_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ unsigned long i;
+
+ i = (obj & (table->num_obj - 1)) /
+ (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
+
+ mutex_lock(&table->mutex);
+
+ if (--table->hem[i]->refcount == 0) {
+ /* Clear HEM base address */
+ if (hns_roce_clear_hem(hr_dev, table, obj))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ table->hem[i] = NULL;
+ }
+
+ mutex_unlock(&table->mutex);
+}
+
+void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
+ dma_addr_t *dma_handle)
+{
+ struct hns_roce_hem_chunk *chunk;
+ unsigned long idx;
+ int i;
+ int offset, dma_offset;
+ struct hns_roce_hem *hem;
+ struct page *page = NULL;
+
+ if (!table->lowmem)
+ return NULL;
+
+ mutex_lock(&table->mutex);
+ idx = (obj & (table->num_obj - 1)) * table->obj_size;
+ hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE];
+ dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE;
+
+ if (!hem)
+ goto out;
+
+ list_for_each_entry(chunk, &hem->chunk_list, list) {
+ for (i = 0; i < chunk->npages; ++i) {
+ if (dma_handle && dma_offset >= 0) {
+ if (sg_dma_len(&chunk->mem[i]) >
+ (u32)dma_offset)
+ *dma_handle = sg_dma_address(
+ &chunk->mem[i]) + dma_offset;
+ dma_offset -= sg_dma_len(&chunk->mem[i]);
+ }
+
+ if (chunk->mem[i].length > (u32)offset) {
+ page = sg_page(&chunk->mem[i]);
+ goto out;
+ }
+ offset -= chunk->mem[i].length;
+ }
+ }
+
+out:
+ mutex_unlock(&table->mutex);
+ return page ? lowmem_page_address(page) + offset : NULL;
+}
+
+int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long start, unsigned long end)
+{
+ unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
+ unsigned long i = 0;
+ int ret = 0;
+
+ /* Allocate MTT entry memory according to chunk(128K) */
+ for (i = start; i <= end; i += inc) {
+ ret = hns_roce_table_get(hr_dev, table, i);
+ if (ret)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (i > start) {
+ i -= inc;
+ hns_roce_table_put(hr_dev, table, i);
+ }
+ return ret;
+}
+
+void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long start, unsigned long end)
+{
+ unsigned long i;
+
+ for (i = start; i <= end;
+ i += HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)
+ hns_roce_table_put(hr_dev, table, i);
+}
+
+int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, u32 type,
+ unsigned long obj_size, unsigned long nobj,
+ int use_lowmem)
+{
+ unsigned long obj_per_chunk;
+ unsigned long num_hem;
+
+ obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size;
+ num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+
+ table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
+ if (!table->hem)
+ return -ENOMEM;
+
+ table->type = type;
+ table->num_hem = num_hem;
+ table->num_obj = nobj;
+ table->obj_size = obj_size;
+ table->lowmem = use_lowmem;
+ mutex_init(&table->mutex);
+
+ return 0;
+}
+
+void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ unsigned long i;
+
+ for (i = 0; i < table->num_hem; ++i)
+ if (table->hem[i]) {
+ if (hns_roce_clear_hem(hr_dev, table,
+ i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
+ dev_err(dev, "Clear HEM base address failed.\n");
+
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ }
+
+ kfree(table->hem);
+}
+
+void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
new file mode 100644
index 000000000000..ad6617588fba
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_HEM_H
+#define _HNS_ROCE_HEM_H
+
+enum {
+ /* MAP HEM(Hardware Entry Memory) */
+ HEM_TYPE_QPC = 0,
+ HEM_TYPE_MTPT,
+ HEM_TYPE_CQC,
+ HEM_TYPE_SRQC,
+
+ /* UNMAP HEM */
+ HEM_TYPE_MTT,
+ HEM_TYPE_IRRL,
+};
+
+#define HNS_ROCE_HEM_CHUNK_LEN \
+ ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
+ (sizeof(struct scatterlist)))
+
+enum {
+ HNS_ROCE_HEM_PAGE_SHIFT = 12,
+ HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
+};
+
+struct hns_roce_hem_chunk {
+ struct list_head list;
+ int npages;
+ int nsg;
+ struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
+};
+
+struct hns_roce_hem {
+ struct list_head chunk_list;
+ int refcount;
+};
+
+struct hns_roce_hem_iter {
+ struct hns_roce_hem *hem;
+ struct hns_roce_hem_chunk *chunk;
+ int page_idx;
+};
+
+void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
+int hns_roce_table_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj);
+void hns_roce_table_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj);
+void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
+ dma_addr_t *dma_handle);
+int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long start, unsigned long end);
+void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long start, unsigned long end);
+int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, u32 type,
+ unsigned long obj_size, unsigned long nobj,
+ int use_lowmem);
+void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table);
+void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
+
+static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
+ struct hns_roce_hem_iter *iter)
+{
+ iter->hem = hem;
+ iter->chunk = list_empty(&hem->chunk_list) ? NULL :
+ list_entry(hem->chunk_list.next,
+ struct hns_roce_hem_chunk, list);
+ iter->page_idx = 0;
+}
+
+static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
+{
+ return !iter->chunk;
+}
+
+static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter)
+{
+ if (++iter->page_idx >= iter->chunk->nsg) {
+ if (iter->chunk->list.next == &iter->hem->chunk_list) {
+ iter->chunk = NULL;
+ return;
+ }
+
+ iter->chunk = list_entry(iter->chunk->list.next,
+ struct hns_roce_hem_chunk, list);
+ iter->page_idx = 0;
+ }
+}
+
+static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
+{
+ return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
+}
+
+#endif /*_HNS_ROCE_HEM_H*/
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
new file mode 100644
index 000000000000..399f5dedaf2d
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -0,0 +1,2800 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <rdma/ib_umem.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_hw_v1.h"
+
+static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
+{
+ dseg->lkey = cpu_to_le32(sg->lkey);
+ dseg->addr = cpu_to_le64(sg->addr);
+ dseg->len = cpu_to_le32(sg->length);
+}
+
+static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
+ u32 rkey)
+{
+ rseg->raddr = cpu_to_le64(remote_addr);
+ rseg->rkey = cpu_to_le32(rkey);
+ rseg->len = 0;
+}
+
+int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
+ struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
+ struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
+ struct hns_roce_wqe_data_seg *dseg = NULL;
+ struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_sq_db sq_db;
+ int ps_opcode = 0, i = 0;
+ unsigned long flags = 0;
+ void *wqe = NULL;
+ u32 doorbell[2];
+ int nreq = 0;
+ u32 ind = 0;
+ int ret = 0;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ ind = qp->sq_next_wqe;
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->sq.max_gs)) {
+ dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
+ wr->num_sge, qp->sq.max_gs);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
+ qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
+ wr->wr_id;
+
+ /* Corresponding to the RC and RD type wqe process separately */
+ if (ibqp->qp_type == IB_QPT_GSI) {
+ ud_sq_wqe = wqe;
+ roce_set_field(ud_sq_wqe->dmac_h,
+ UD_SEND_WQE_U32_4_DMAC_0_M,
+ UD_SEND_WQE_U32_4_DMAC_0_S,
+ ah->av.mac[0]);
+ roce_set_field(ud_sq_wqe->dmac_h,
+ UD_SEND_WQE_U32_4_DMAC_1_M,
+ UD_SEND_WQE_U32_4_DMAC_1_S,
+ ah->av.mac[1]);
+ roce_set_field(ud_sq_wqe->dmac_h,
+ UD_SEND_WQE_U32_4_DMAC_2_M,
+ UD_SEND_WQE_U32_4_DMAC_2_S,
+ ah->av.mac[2]);
+ roce_set_field(ud_sq_wqe->dmac_h,
+ UD_SEND_WQE_U32_4_DMAC_3_M,
+ UD_SEND_WQE_U32_4_DMAC_3_S,
+ ah->av.mac[3]);
+
+ roce_set_field(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_DMAC_4_M,
+ UD_SEND_WQE_U32_8_DMAC_4_S,
+ ah->av.mac[4]);
+ roce_set_field(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_DMAC_5_M,
+ UD_SEND_WQE_U32_8_DMAC_5_S,
+ ah->av.mac[5]);
+ roce_set_field(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
+ UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
+ HNS_ROCE_WQE_OPCODE_SEND);
+ roce_set_field(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
+ UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
+ 2);
+ roce_set_bit(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
+ 1);
+
+ ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
+ cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
+ (wr->send_flags & IB_SEND_SOLICITED ?
+ cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
+ ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
+ cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
+
+ roce_set_field(ud_sq_wqe->u32_16,
+ UD_SEND_WQE_U32_16_DEST_QP_M,
+ UD_SEND_WQE_U32_16_DEST_QP_S,
+ ud_wr(wr)->remote_qpn);
+ roce_set_field(ud_sq_wqe->u32_16,
+ UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
+ UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
+ ah->av.stat_rate);
+
+ roce_set_field(ud_sq_wqe->u32_36,
+ UD_SEND_WQE_U32_36_FLOW_LABEL_M,
+ UD_SEND_WQE_U32_36_FLOW_LABEL_S, 0);
+ roce_set_field(ud_sq_wqe->u32_36,
+ UD_SEND_WQE_U32_36_PRIORITY_M,
+ UD_SEND_WQE_U32_36_PRIORITY_S,
+ ah->av.sl_tclass_flowlabel >>
+ HNS_ROCE_SL_SHIFT);
+ roce_set_field(ud_sq_wqe->u32_36,
+ UD_SEND_WQE_U32_36_SGID_INDEX_M,
+ UD_SEND_WQE_U32_36_SGID_INDEX_S,
+ hns_get_gid_index(hr_dev, qp->port,
+ ah->av.gid_index));
+
+ roce_set_field(ud_sq_wqe->u32_40,
+ UD_SEND_WQE_U32_40_HOP_LIMIT_M,
+ UD_SEND_WQE_U32_40_HOP_LIMIT_S,
+ ah->av.hop_limit);
+ roce_set_field(ud_sq_wqe->u32_40,
+ UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
+ UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, 0);
+
+ memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
+
+ ud_sq_wqe->va0_l = (u32)wr->sg_list[0].addr;
+ ud_sq_wqe->va0_h = (wr->sg_list[0].addr) >> 32;
+ ud_sq_wqe->l_key0 = wr->sg_list[0].lkey;
+
+ ud_sq_wqe->va1_l = (u32)wr->sg_list[1].addr;
+ ud_sq_wqe->va1_h = (wr->sg_list[1].addr) >> 32;
+ ud_sq_wqe->l_key1 = wr->sg_list[1].lkey;
+ ind++;
+ } else if (ibqp->qp_type == IB_QPT_RC) {
+ ctrl = wqe;
+ memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
+ for (i = 0; i < wr->num_sge; i++)
+ ctrl->msg_length += wr->sg_list[i].length;
+
+ ctrl->sgl_pa_h = 0;
+ ctrl->flag = 0;
+ ctrl->imm_data = send_ieth(wr);
+
+ /*Ctrl field, ctrl set type: sig, solic, imm, fence */
+ /* SO wait for conforming application scenarios */
+ ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
+ cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
+ (wr->send_flags & IB_SEND_SOLICITED ?
+ cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
+ ((wr->opcode == IB_WR_SEND_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
+ cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
+ (wr->send_flags & IB_SEND_FENCE ?
+ (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
+
+ wqe = (struct hns_roce_wqe_ctrl_seg *)wqe +
+ sizeof(struct hns_roce_wqe_ctrl_seg);
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
+ set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
+ atomic_wr(wr)->rkey);
+ break;
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
+ set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
+ atomic_wr(wr)->rkey);
+ break;
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ case IB_WR_SEND_WITH_IMM:
+ ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
+ break;
+ case IB_WR_LOCAL_INV:
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_LSO:
+ default:
+ ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
+ break;
+ }
+ ctrl->flag |= cpu_to_le32(ps_opcode);
+ wqe = (struct hns_roce_wqe_raddr_seg *)wqe +
+ sizeof(struct hns_roce_wqe_raddr_seg);
+
+ dseg = wqe;
+ if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+ if (ctrl->msg_length >
+ hr_dev->caps.max_sq_inline) {
+ ret = -EINVAL;
+ *bad_wr = wr;
+ dev_err(dev, "inline len(1-%d)=%d, illegal",
+ ctrl->msg_length,
+ hr_dev->caps.max_sq_inline);
+ goto out;
+ }
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(wqe, ((void *) (uintptr_t)
+ wr->sg_list[i].addr),
+ wr->sg_list[i].length);
+ wqe = (struct hns_roce_wqe_raddr_seg *)
+ wqe + wr->sg_list[i].length;
+ }
+ ctrl->flag |= HNS_ROCE_WQE_INLINE;
+ } else {
+ /*sqe num is two */
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_seg(dseg + i, wr->sg_list + i);
+
+ ctrl->flag |= cpu_to_le32(wr->num_sge <<
+ HNS_ROCE_WQE_SGE_NUM_BIT);
+ }
+ ind++;
+ } else {
+ dev_dbg(dev, "unSupported QP type\n");
+ break;
+ }
+ }
+
+out:
+ /* Set DB return */
+ if (likely(nreq)) {
+ qp->sq.head += nreq;
+ /* Memory barrier */
+ wmb();
+
+ sq_db.u32_4 = 0;
+ sq_db.u32_8 = 0;
+ roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
+ SQ_DOORBELL_U32_4_SQ_HEAD_S,
+ (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
+ roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
+ SQ_DOORBELL_U32_4_PORT_S, qp->port);
+ roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
+ SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
+ roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
+
+ doorbell[0] = sq_db.u32_4;
+ doorbell[1] = sq_db.u32_8;
+
+ hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
+ qp->sq_next_wqe = ind;
+ }
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return ret;
+}
+
+int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ int ret = 0;
+ int nreq = 0;
+ int ind = 0;
+ int i = 0;
+ u32 reg_val = 0;
+ unsigned long flags = 0;
+ struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
+ struct hns_roce_wqe_data_seg *scat = NULL;
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_rq_db rq_db;
+ uint32_t doorbell[2] = {0};
+
+ spin_lock_irqsave(&hr_qp->rq.lock, flags);
+ ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
+ hr_qp->ibqp.recv_cq)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
+ dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
+ wr->num_sge, hr_qp->rq.max_gs);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ ctrl = get_recv_wqe(hr_qp, ind);
+
+ roce_set_field(ctrl->rwqe_byte_12,
+ RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
+ RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
+ wr->num_sge);
+
+ scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
+
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_seg(scat + i, wr->sg_list + i);
+
+ hr_qp->rq.wrid[ind] = wr->wr_id;
+
+ ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
+ }
+
+out:
+ if (likely(nreq)) {
+ hr_qp->rq.head += nreq;
+ /* Memory barrier */
+ wmb();
+
+ if (ibqp->qp_type == IB_QPT_GSI) {
+ /* SW update GSI rq header */
+ reg_val = roce_read(to_hr_dev(ibqp->device),
+ ROCEE_QP1C_CFG3_0_REG +
+ QP1C_CFGN_OFFSET * hr_qp->port);
+ roce_set_field(reg_val,
+ ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
+ ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
+ hr_qp->rq.head);
+ roce_write(to_hr_dev(ibqp->device),
+ ROCEE_QP1C_CFG3_0_REG +
+ QP1C_CFGN_OFFSET * hr_qp->port, reg_val);
+ } else {
+ rq_db.u32_4 = 0;
+ rq_db.u32_8 = 0;
+
+ roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
+ RQ_DOORBELL_U32_4_RQ_HEAD_S,
+ hr_qp->rq.head);
+ roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
+ RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
+ roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
+ RQ_DOORBELL_U32_8_CMD_S, 1);
+ roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
+ 1);
+
+ doorbell[0] = rq_db.u32_4;
+ doorbell[1] = rq_db.u32_8;
+
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ }
+ }
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+
+ return ret;
+}
+
+static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
+ int sdb_mode, int odb_mode)
+{
+ u32 val;
+
+ val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
+ roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
+ roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
+ roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
+}
+
+static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
+ u32 odb_mode)
+{
+ u32 val;
+
+ /* Configure SDB/ODB extend mode */
+ val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
+ roce_set_bit(val, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
+ roce_set_bit(val, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
+ roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
+}
+
+static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
+ u32 sdb_alful)
+{
+ u32 val;
+
+ /* Configure SDB */
+ val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
+ roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
+ ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
+ roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
+ ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
+ roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
+}
+
+static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
+ u32 odb_alful)
+{
+ u32 val;
+
+ /* Configure ODB */
+ val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
+ roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
+ ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
+ roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
+ ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
+ roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
+}
+
+static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
+ u32 ext_sdb_alful)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_db_table *db;
+ dma_addr_t sdb_dma_addr;
+ u32 val;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ db = &priv->db_table;
+
+ /* Configure extend SDB threshold */
+ roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
+ roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
+
+ /* Configure extend SDB base addr */
+ sdb_dma_addr = db->ext_db->sdb_buf_list->map;
+ roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
+
+ /* Configure extend SDB depth */
+ val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
+ roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
+ ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
+ db->ext_db->esdb_dep);
+ /*
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
+ ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
+ roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
+
+ dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
+ dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
+ ext_sdb_alept, ext_sdb_alful);
+}
+
+static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
+ u32 ext_odb_alful)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_db_table *db;
+ dma_addr_t odb_dma_addr;
+ u32 val;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ db = &priv->db_table;
+
+ /* Configure extend ODB threshold */
+ roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
+ roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
+
+ /* Configure extend ODB base addr */
+ odb_dma_addr = db->ext_db->odb_buf_list->map;
+ roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
+
+ /* Configure extend ODB depth */
+ val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
+ roce_set_field(val, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
+ ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
+ db->ext_db->eodb_dep);
+ roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
+ ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
+ db->ext_db->eodb_dep);
+ roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
+
+ dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
+ dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
+ ext_odb_alept, ext_odb_alful);
+}
+
+static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
+ u32 odb_ext_mod)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_db_table *db;
+ dma_addr_t sdb_dma_addr;
+ dma_addr_t odb_dma_addr;
+ int ret = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ db = &priv->db_table;
+
+ db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
+ if (!db->ext_db)
+ return -ENOMEM;
+
+ if (sdb_ext_mod) {
+ db->ext_db->sdb_buf_list = kmalloc(
+ sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
+ if (!db->ext_db->sdb_buf_list) {
+ ret = -ENOMEM;
+ goto ext_sdb_buf_fail_out;
+ }
+
+ db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
+ HNS_ROCE_V1_EXT_SDB_SIZE,
+ &sdb_dma_addr, GFP_KERNEL);
+ if (!db->ext_db->sdb_buf_list->buf) {
+ ret = -ENOMEM;
+ goto alloc_sq_db_buf_fail;
+ }
+ db->ext_db->sdb_buf_list->map = sdb_dma_addr;
+
+ db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
+ hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
+ HNS_ROCE_V1_EXT_SDB_ALFUL);
+ } else
+ hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
+ HNS_ROCE_V1_SDB_ALFUL);
+
+ if (odb_ext_mod) {
+ db->ext_db->odb_buf_list = kmalloc(
+ sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
+ if (!db->ext_db->odb_buf_list) {
+ ret = -ENOMEM;
+ goto ext_odb_buf_fail_out;
+ }
+
+ db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
+ HNS_ROCE_V1_EXT_ODB_SIZE,
+ &odb_dma_addr, GFP_KERNEL);
+ if (!db->ext_db->odb_buf_list->buf) {
+ ret = -ENOMEM;
+ goto alloc_otr_db_buf_fail;
+ }
+ db->ext_db->odb_buf_list->map = odb_dma_addr;
+
+ db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
+ hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
+ HNS_ROCE_V1_EXT_ODB_ALFUL);
+ } else
+ hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
+ HNS_ROCE_V1_ODB_ALFUL);
+
+ hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
+
+ return 0;
+
+alloc_otr_db_buf_fail:
+ kfree(db->ext_db->odb_buf_list);
+
+ext_odb_buf_fail_out:
+ if (sdb_ext_mod) {
+ dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
+ db->ext_db->sdb_buf_list->buf,
+ db->ext_db->sdb_buf_list->map);
+ }
+
+alloc_sq_db_buf_fail:
+ if (sdb_ext_mod)
+ kfree(db->ext_db->sdb_buf_list);
+
+ext_sdb_buf_fail_out:
+ kfree(db->ext_db);
+ return ret;
+}
+
+static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_db_table *db;
+ u32 sdb_ext_mod;
+ u32 odb_ext_mod;
+ u32 sdb_evt_mod;
+ u32 odb_evt_mod;
+ int ret = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ db = &priv->db_table;
+
+ memset(db, 0, sizeof(*db));
+
+ /* Default DB mode */
+ sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
+ odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
+ sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
+ odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
+
+ db->sdb_ext_mod = sdb_ext_mod;
+ db->odb_ext_mod = odb_ext_mod;
+
+ /* Init extend DB */
+ ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
+ if (ret) {
+ dev_err(dev, "Failed in extend DB configuration.\n");
+ return ret;
+ }
+
+ hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
+
+ return 0;
+}
+
+static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_db_table *db;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ db = &priv->db_table;
+
+ if (db->sdb_ext_mod) {
+ dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
+ db->ext_db->sdb_buf_list->buf,
+ db->ext_db->sdb_buf_list->map);
+ kfree(db->ext_db->sdb_buf_list);
+ }
+
+ if (db->odb_ext_mod) {
+ dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
+ db->ext_db->odb_buf_list->buf,
+ db->ext_db->odb_buf_list->map);
+ kfree(db->ext_db->odb_buf_list);
+ }
+
+ kfree(db->ext_db);
+}
+
+static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ int raq_shift = 0;
+ dma_addr_t addr;
+ u32 val;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_raq_table *raq;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ raq = &priv->raq_table;
+
+ raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
+ if (!raq->e_raq_buf)
+ return -ENOMEM;
+
+ raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
+ &addr, GFP_KERNEL);
+ if (!raq->e_raq_buf->buf) {
+ ret = -ENOMEM;
+ goto err_dma_alloc_raq;
+ }
+ raq->e_raq_buf->map = addr;
+
+ /* Configure raq extended address. 48bit 4K align*/
+ roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
+
+ /* Configure raq_shift */
+ raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
+ val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
+ roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
+ ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
+ /*
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
+ ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
+ raq->e_raq_buf->map >> 44);
+ roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
+ dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
+
+ /* Configure raq threshold */
+ val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
+ roce_set_field(val, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
+ ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
+ HNS_ROCE_V1_EXT_RAQ_WF);
+ roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
+ dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
+
+ /* Enable extend raq */
+ val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
+ roce_set_field(val,
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
+ POL_TIME_INTERVAL_VAL);
+ roce_set_bit(val, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
+ roce_set_field(val,
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
+ 2);
+ roce_set_bit(val,
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
+ roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
+ dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
+
+ /* Enable raq drop */
+ val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
+ roce_set_bit(val, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
+ roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
+ dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
+
+ return 0;
+
+err_dma_alloc_raq:
+ kfree(raq->e_raq_buf);
+ return ret;
+}
+
+static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_raq_table *raq;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ raq = &priv->raq_table;
+
+ dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
+ raq->e_raq_buf->map);
+ kfree(raq->e_raq_buf);
+}
+
+static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
+{
+ u32 val;
+
+ if (enable_flag) {
+ val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
+ /* Open all ports */
+ roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
+ ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
+ ALL_PORT_VAL_OPEN);
+ roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
+ } else {
+ val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
+ /* Close all ports */
+ roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
+ ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
+ roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
+ }
+}
+
+/**
+ * hns_roce_v1_reset - reset RoCE
+ * @hr_dev: RoCE device struct pointer
+ * @enable: true -- drop reset, false -- reset
+ * return 0 - success , negative --fail
+ */
+int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
+{
+ struct device_node *dsaf_node;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ /* check if this is DT/ACPI case */
+ if (dev_of_node(dev)) {
+ dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
+ if (!dsaf_node) {
+ dev_err(dev, "could not find dsaf-handle\n");
+ return -EINVAL;
+ }
+ fwnode = &dsaf_node->fwnode;
+ } else if (is_acpi_device_node(dev->fwnode)) {
+ struct acpi_reference_args args;
+
+ ret = acpi_node_get_property_reference(dev->fwnode,
+ "dsaf-handle", 0, &args);
+ if (ret) {
+ dev_err(dev, "could not find dsaf-handle\n");
+ return ret;
+ }
+ fwnode = acpi_fwnode_handle(args.adev);
+ } else {
+ dev_err(dev, "cannot read data from DT or ACPI\n");
+ return -ENXIO;
+ }
+
+ ret = hns_dsaf_roce_reset(fwnode, false);
+ if (ret)
+ return ret;
+
+ if (dereset) {
+ msleep(SLEEP_TIME_INTERVAL);
+ ret = hns_dsaf_roce_reset(fwnode, true);
+ }
+
+ return ret;
+}
+
+void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
+{
+ int i = 0;
+ struct hns_roce_caps *caps = &hr_dev->caps;
+
+ hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
+ hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
+ ROCEE_VENDOR_PART_ID_REG));
+ hr_dev->hw_rev = le32_to_cpu(roce_read(hr_dev, ROCEE_HW_VERSION_REG));
+
+ hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
+ ROCEE_SYS_IMAGE_GUID_L_REG)) |
+ ((u64)le32_to_cpu(roce_read(hr_dev,
+ ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
+
+ caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
+ caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
+ caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
+ caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
+ caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
+ caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
+ caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
+ caps->num_uars = HNS_ROCE_V1_UAR_NUM;
+ caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
+ caps->num_aeq_vectors = HNS_ROCE_AEQE_VEC_NUM;
+ caps->num_comp_vectors = HNS_ROCE_COMP_VEC_NUM;
+ caps->num_other_vectors = HNS_ROCE_AEQE_OF_VEC_NUM;
+ caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
+ caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
+ caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
+ caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
+ caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
+ caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
+ caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
+ caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
+ caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
+ caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
+ caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
+ caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
+ caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
+ caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
+ caps->sqp_start = 0;
+ caps->reserved_lkey = 0;
+ caps->reserved_pds = 0;
+ caps->reserved_mrws = 1;
+ caps->reserved_uars = 0;
+ caps->reserved_cqs = 0;
+
+ for (i = 0; i < caps->num_ports; i++)
+ caps->pkey_table_len[i] = 1;
+
+ for (i = 0; i < caps->num_ports; i++) {
+ /* Six ports shared 16 GID in v1 engine */
+ if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
+ caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
+ caps->num_ports;
+ else
+ caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
+ caps->num_ports + 1;
+ }
+
+ for (i = 0; i < caps->num_comp_vectors; i++)
+ caps->ceqe_depth[i] = HNS_ROCE_V1_NUM_COMP_EQE;
+
+ caps->aeqe_depth = HNS_ROCE_V1_NUM_ASYNC_EQE;
+ caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
+ ROCEE_ACK_DELAY_REG));
+ caps->max_mtu = IB_MTU_2048;
+}
+
+int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ u32 val;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ /* DMAE user config */
+ val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
+ roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
+ ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
+ roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
+ ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
+ 1 << PAGES_SHIFT_16);
+ roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
+
+ val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
+ roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
+ ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
+ roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
+ ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
+ 1 << PAGES_SHIFT_16);
+
+ ret = hns_roce_db_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "doorbell init failed!\n");
+ return ret;
+ }
+
+ ret = hns_roce_raq_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "raq init failed!\n");
+ goto error_failed_raq_init;
+ }
+
+ hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
+
+ return 0;
+
+error_failed_raq_init:
+ hns_roce_db_free(hr_dev);
+ return ret;
+}
+
+void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
+ hns_roce_raq_free(hr_dev);
+ hns_roce_db_free(hr_dev);
+}
+
+void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ union ib_gid *gid)
+{
+ u32 *p = NULL;
+ u8 gid_idx = 0;
+
+ gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
+
+ p = (u32 *)&gid->raw[0];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+
+ p = (u32 *)&gid->raw[4];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+
+ p = (u32 *)&gid->raw[8];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+
+ p = (u32 *)&gid->raw[0xc];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+}
+
+void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
+{
+ u32 reg_smac_l;
+ u16 reg_smac_h;
+ u16 *p_h;
+ u32 *p;
+ u32 val;
+
+ p = (u32 *)(&addr[0]);
+ reg_smac_l = *p;
+ roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
+ PHY_PORT_OFFSET * phy_port);
+
+ val = roce_read(hr_dev,
+ ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
+ p_h = (u16 *)(&addr[4]);
+ reg_smac_h = *p_h;
+ roce_set_field(val, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
+ ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
+ roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
+ val);
+}
+
+void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
+ enum ib_mtu mtu)
+{
+ u32 val;
+
+ val = roce_read(hr_dev,
+ ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
+ roce_set_field(val, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
+ ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
+ roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
+ val);
+}
+
+int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+ unsigned long mtpt_idx)
+{
+ struct hns_roce_v1_mpt_entry *mpt_entry;
+ struct scatterlist *sg;
+ u64 *pages;
+ int entry;
+ int i;
+
+ /* MPT filled into mailbox buf */
+ mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
+ MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
+ roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
+ MPT_BYTE_4_KEY_S, mr->key);
+ roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
+ MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
+ (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
+ roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
+ MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
+ (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
+ (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
+ (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
+ 0);
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
+
+ roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
+ MPT_BYTE_12_PBL_ADDR_H_S, 0);
+ roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
+ MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
+
+ mpt_entry->virt_addr_l = (u32)mr->iova;
+ mpt_entry->virt_addr_h = (u32)(mr->iova >> 32);
+ mpt_entry->length = (u32)mr->size;
+
+ roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
+ MPT_BYTE_28_PD_S, mr->pd);
+ roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
+ MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
+ roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
+ MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
+
+ /* DMA momery regsiter */
+ if (mr->type == MR_TYPE_DMA)
+ return 0;
+
+ pages = (u64 *) __get_free_page(GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
+ pages[i] = ((u64)sg_dma_address(sg)) >> 12;
+
+ /* Directly record to MTPT table firstly 7 entry */
+ if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
+ break;
+ i++;
+ }
+
+ /* Register user mr */
+ for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
+ switch (i) {
+ case 0:
+ mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
+ roce_set_field(mpt_entry->mpt_byte_36,
+ MPT_BYTE_36_PA0_H_M,
+ MPT_BYTE_36_PA0_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
+ break;
+ case 1:
+ roce_set_field(mpt_entry->mpt_byte_36,
+ MPT_BYTE_36_PA1_L_M,
+ MPT_BYTE_36_PA1_L_S,
+ cpu_to_le32((u32)(pages[i])));
+ roce_set_field(mpt_entry->mpt_byte_40,
+ MPT_BYTE_40_PA1_H_M,
+ MPT_BYTE_40_PA1_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
+ break;
+ case 2:
+ roce_set_field(mpt_entry->mpt_byte_40,
+ MPT_BYTE_40_PA2_L_M,
+ MPT_BYTE_40_PA2_L_S,
+ cpu_to_le32((u32)(pages[i])));
+ roce_set_field(mpt_entry->mpt_byte_44,
+ MPT_BYTE_44_PA2_H_M,
+ MPT_BYTE_44_PA2_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
+ break;
+ case 3:
+ roce_set_field(mpt_entry->mpt_byte_44,
+ MPT_BYTE_44_PA3_L_M,
+ MPT_BYTE_44_PA3_L_S,
+ cpu_to_le32((u32)(pages[i])));
+ roce_set_field(mpt_entry->mpt_byte_48,
+ MPT_BYTE_48_PA3_H_M,
+ MPT_BYTE_48_PA3_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_8)));
+ break;
+ case 4:
+ mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
+ roce_set_field(mpt_entry->mpt_byte_56,
+ MPT_BYTE_56_PA4_H_M,
+ MPT_BYTE_56_PA4_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
+ break;
+ case 5:
+ roce_set_field(mpt_entry->mpt_byte_56,
+ MPT_BYTE_56_PA5_L_M,
+ MPT_BYTE_56_PA5_L_S,
+ cpu_to_le32((u32)(pages[i])));
+ roce_set_field(mpt_entry->mpt_byte_60,
+ MPT_BYTE_60_PA5_H_M,
+ MPT_BYTE_60_PA5_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
+ break;
+ case 6:
+ roce_set_field(mpt_entry->mpt_byte_60,
+ MPT_BYTE_60_PA6_L_M,
+ MPT_BYTE_60_PA6_L_S,
+ cpu_to_le32((u32)(pages[i])));
+ roce_set_field(mpt_entry->mpt_byte_64,
+ MPT_BYTE_64_PA6_H_M,
+ MPT_BYTE_64_PA6_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
+ break;
+ default:
+ break;
+ }
+ }
+
+ free_page((unsigned long) pages);
+
+ mpt_entry->pbl_addr_l = (u32)(mr->pbl_dma_addr);
+
+ roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
+ MPT_BYTE_12_PBL_ADDR_H_S,
+ ((u32)(mr->pbl_dma_addr >> 32)));
+
+ return 0;
+}
+
+static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
+{
+ return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
+ n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+}
+
+static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
+{
+ struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
+
+ /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
+ return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
+ !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
+}
+
+static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
+{
+ return get_sw_cqe(hr_cq, hr_cq->cons_index);
+}
+
+void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index,
+ spinlock_t *doorbell_lock)
+
+{
+ u32 doorbell[2];
+
+ doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
+ roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
+
+ hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+}
+
+static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+ struct hns_roce_srq *srq)
+{
+ struct hns_roce_cqe *cqe, *dest;
+ u32 prod_index;
+ int nfreed = 0;
+ u8 owner_bit;
+
+ for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
+ ++prod_index) {
+ if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
+ break;
+ }
+
+ /*
+ * Now backwards through the CQ, removing CQ entries
+ * that match our QP by overwriting them with next entries.
+ */
+ while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
+ cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
+ if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
+ CQE_BYTE_16_LOCAL_QPN_S) &
+ HNS_ROCE_CQE_QPN_MASK) == qpn) {
+ /* In v1 engine, not support SRQ */
+ ++nfreed;
+ } else if (nfreed) {
+ dest = get_cqe(hr_cq, (prod_index + nfreed) &
+ hr_cq->ib_cq.cqe);
+ owner_bit = roce_get_bit(dest->cqe_byte_4,
+ CQE_BYTE_4_OWNER_S);
+ memcpy(dest, cqe, sizeof(*cqe));
+ roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
+ owner_bit);
+ }
+ }
+
+ if (nfreed) {
+ hr_cq->cons_index += nfreed;
+ /*
+ * Make sure update of buffer contents is done before
+ * updating consumer index.
+ */
+ wmb();
+
+ hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index,
+ &to_hr_dev(hr_cq->ib_cq.device)->cq_db_lock);
+ }
+}
+
+static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+ struct hns_roce_srq *srq)
+{
+ spin_lock_irq(&hr_cq->lock);
+ __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
+ spin_unlock_irq(&hr_cq->lock);
+}
+
+void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
+ dma_addr_t dma_handle, int nent, u32 vector)
+{
+ struct hns_roce_cq_context *cq_context = NULL;
+ void __iomem *tptr_addr;
+
+ cq_context = mb_buf;
+ memset(cq_context, 0, sizeof(*cq_context));
+
+ tptr_addr = 0;
+ hr_dev->priv_addr = tptr_addr;
+ hr_cq->tptr_addr = tptr_addr;
+
+ /* Register cq_context members */
+ roce_set_field(cq_context->cqc_byte_4,
+ CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
+ CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
+ roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
+ CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
+ cq_context->cqc_byte_4 = cpu_to_le32(cq_context->cqc_byte_4);
+
+ cq_context->cq_bt_l = (u32)dma_handle;
+ cq_context->cq_bt_l = cpu_to_le32(cq_context->cq_bt_l);
+
+ roce_set_field(cq_context->cqc_byte_12,
+ CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
+ CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
+ ((u64)dma_handle >> 32));
+ roce_set_field(cq_context->cqc_byte_12,
+ CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
+ CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
+ ilog2((unsigned int)nent));
+ roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
+ CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
+ cq_context->cqc_byte_12 = cpu_to_le32(cq_context->cqc_byte_12);
+
+ cq_context->cur_cqe_ba0_l = (u32)(mtts[0]);
+ cq_context->cur_cqe_ba0_l = cpu_to_le32(cq_context->cur_cqe_ba0_l);
+
+ roce_set_field(cq_context->cqc_byte_20,
+ CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
+ CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S,
+ cpu_to_le32((mtts[0]) >> 32));
+ /* Dedicated hardware, directly set 0 */
+ roce_set_field(cq_context->cqc_byte_20,
+ CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
+ CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
+ /**
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(cq_context->cqc_byte_20,
+ CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
+ CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
+ (u64)tptr_addr >> 44);
+ cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
+
+ cq_context->cqe_tptr_addr_l = (u32)((u64)tptr_addr >> 12);
+
+ roce_set_field(cq_context->cqc_byte_32,
+ CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
+ CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
+ roce_set_bit(cq_context->cqc_byte_32,
+ CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
+ roce_set_bit(cq_context->cqc_byte_32,
+ CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
+ roce_set_bit(cq_context->cqc_byte_32,
+ CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
+ roce_set_bit(cq_context->cqc_byte_32,
+ CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
+ 0);
+ /*The initial value of cq's ci is 0 */
+ roce_set_field(cq_context->cqc_byte_32,
+ CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
+ CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
+ cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32);
+}
+
+int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ u32 notification_flag;
+ u32 doorbell[2];
+ int ret = 0;
+
+ notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
+ IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
+ /*
+ * flags = 0; Notification Flag = 1, next
+ * flags = 1; Notification Flag = 0, solocited
+ */
+ doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
+ roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
+ hr_cq->cqn | notification_flag);
+
+ hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+
+ return ret;
+}
+
+static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
+ struct hns_roce_qp **cur_qp, struct ib_wc *wc)
+{
+ int qpn;
+ int is_send;
+ u16 wqe_ctr;
+ u32 status;
+ u32 opcode;
+ struct hns_roce_cqe *cqe;
+ struct hns_roce_qp *hr_qp;
+ struct hns_roce_wq *wq;
+ struct hns_roce_wqe_ctrl_seg *sq_wqe;
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+ struct device *dev = &hr_dev->pdev->dev;
+
+ /* Find cqe according consumer index */
+ cqe = next_cqe_sw(hr_cq);
+ if (!cqe)
+ return -EAGAIN;
+
+ ++hr_cq->cons_index;
+ /* Memory barrier */
+ rmb();
+ /* 0->SQ, 1->RQ */
+ is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
+
+ /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
+ if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
+ CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
+ qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
+ CQE_BYTE_20_PORT_NUM_S) +
+ roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
+ CQE_BYTE_16_LOCAL_QPN_S) *
+ HNS_ROCE_MAX_PORTS;
+ } else {
+ qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
+ CQE_BYTE_16_LOCAL_QPN_S);
+ }
+
+ if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
+ hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (unlikely(!hr_qp)) {
+ dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
+ hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
+ return -EINVAL;
+ }
+
+ *cur_qp = hr_qp;
+ }
+
+ wc->qp = &(*cur_qp)->ibqp;
+ wc->vendor_err = 0;
+
+ status = roce_get_field(cqe->cqe_byte_4,
+ CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
+ CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
+ HNS_ROCE_CQE_STATUS_MASK;
+ switch (status) {
+ case HNS_ROCE_CQE_SUCCESS:
+ wc->status = IB_WC_SUCCESS;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
+ wc->status = IB_WC_LOC_LEN_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
+ wc->status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
+ wc->status = IB_WC_LOC_PROT_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
+ wc->status = IB_WC_MW_BIND_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
+ wc->status = IB_WC_BAD_RESP_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
+ wc->status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
+ wc->status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
+ wc->status = IB_WC_REM_OP_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
+ wc->status = IB_WC_RETRY_EXC_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
+ wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ /* CQE status error, directly return */
+ if (wc->status != IB_WC_SUCCESS)
+ return 0;
+
+ if (is_send) {
+ /* SQ conrespond to CQE */
+ sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
+ CQE_BYTE_4_WQE_INDEX_M,
+ CQE_BYTE_4_WQE_INDEX_S));
+ switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) {
+ case HNS_ROCE_WQE_OPCODE_SEND:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case HNS_ROCE_WQE_OPCODE_RDMA_READ:
+ wc->opcode = IB_WC_RDMA_READ;
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+ break;
+ case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
+ wc->opcode = IB_WC_LOCAL_INV;
+ break;
+ case HNS_ROCE_WQE_OPCODE_UD_SEND:
+ wc->opcode = IB_WC_SEND;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+ wc->wc_flags = (sq_wqe->flag & HNS_ROCE_WQE_IMM ?
+ IB_WC_WITH_IMM : 0);
+
+ wq = &(*cur_qp)->sq;
+ if ((*cur_qp)->sq_signal_bits) {
+ /*
+ * If sg_signal_bit is 1,
+ * firstly tail pointer updated to wqe
+ * which current cqe correspond to
+ */
+ wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
+ CQE_BYTE_4_WQE_INDEX_M,
+ CQE_BYTE_4_WQE_INDEX_S);
+ wq->tail += (wqe_ctr - (u16)wq->tail) &
+ (wq->wqe_cnt - 1);
+ }
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+ } else {
+ /* RQ conrespond to CQE */
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+ opcode = roce_get_field(cqe->cqe_byte_4,
+ CQE_BYTE_4_OPERATION_TYPE_M,
+ CQE_BYTE_4_OPERATION_TYPE_S) &
+ HNS_ROCE_CQE_OPCODE_MASK;
+ switch (opcode) {
+ case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = le32_to_cpu(cqe->immediate_data);
+ break;
+ case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
+ if (roce_get_bit(cqe->cqe_byte_4,
+ CQE_BYTE_4_IMM_INDICATOR_S)) {
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = le32_to_cpu(
+ cqe->immediate_data);
+ } else {
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = 0;
+ }
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ /* Update tail pointer, record wr_id */
+ wq = &(*cur_qp)->rq;
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+ wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
+ CQE_BYTE_20_SL_S);
+ wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
+ CQE_BYTE_20_REMOTE_QPN_M,
+ CQE_BYTE_20_REMOTE_QPN_S);
+ wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
+ CQE_BYTE_20_GRH_PRESENT_S) ?
+ IB_WC_GRH : 0);
+ wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
+ CQE_BYTE_28_P_KEY_IDX_M,
+ CQE_BYTE_28_P_KEY_IDX_S);
+ }
+
+ return 0;
+}
+
+int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ struct hns_roce_qp *cur_qp = NULL;
+ unsigned long flags;
+ int npolled;
+ int ret = 0;
+
+ spin_lock_irqsave(&hr_cq->lock, flags);
+
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
+ if (ret)
+ break;
+ }
+
+ if (npolled) {
+ hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index,
+ &to_hr_dev(ibcq->device)->cq_db_lock);
+ }
+
+ spin_unlock_irqrestore(&hr_cq->lock, flags);
+
+ if (ret == 0 || ret == -EAGAIN)
+ return npolled;
+ else
+ return ret;
+}
+
+static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt,
+ enum hns_roce_qp_state cur_state,
+ enum hns_roce_qp_state new_state,
+ struct hns_roce_qp_context *context,
+ struct hns_roce_qp *hr_qp)
+{
+ static const u16
+ op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
+ [HNS_ROCE_QP_STATE_RST] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
+ },
+ [HNS_ROCE_QP_STATE_INIT] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ /* Note: In v1 engine, HW doesn't support RST2INIT.
+ * We use RST2INIT cmd instead of INIT2INIT.
+ */
+ [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
+ [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
+ },
+ [HNS_ROCE_QP_STATE_RTR] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
+ },
+ [HNS_ROCE_QP_STATE_RTS] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
+ [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
+ },
+ [HNS_ROCE_QP_STATE_SQD] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
+ [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
+ },
+ [HNS_ROCE_QP_STATE_ERR] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ }
+ };
+
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct device *dev = &hr_dev->pdev->dev;
+ int ret = 0;
+
+ if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
+ new_state >= HNS_ROCE_QP_NUM_STATE ||
+ !op[cur_state][new_state]) {
+ dev_err(dev, "[modify_qp]not support state %d to %d\n",
+ cur_state, new_state);
+ return -EINVAL;
+ }
+
+ if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
+ return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
+ HNS_ROCE_CMD_2RST_QP,
+ HNS_ROCE_CMD_TIME_CLASS_A);
+
+ if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
+ return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
+ HNS_ROCE_CMD_2ERR_QP,
+ HNS_ROCE_CMD_TIME_CLASS_A);
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, context, sizeof(*context));
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
+ op[cur_state][new_state],
+ HNS_ROCE_CMD_TIME_CLASS_C);
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_sqp_context *context;
+ struct device *dev = &hr_dev->pdev->dev;
+ dma_addr_t dma_handle = 0;
+ int rq_pa_start;
+ u32 reg_val;
+ u64 *mtts;
+ u32 *addr;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ /* Search QP buf's MTTs */
+ mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ hr_qp->mtt.first_seg, &dma_handle);
+ if (!mtts) {
+ dev_err(dev, "qp buf pa find failed\n");
+ goto out;
+ }
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ roce_set_field(context->qp1c_bytes_4,
+ QP1C_BYTES_4_SQ_WQE_SHIFT_M,
+ QP1C_BYTES_4_SQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(context->qp1c_bytes_4,
+ QP1C_BYTES_4_RQ_WQE_SHIFT_M,
+ QP1C_BYTES_4_RQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
+ QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
+
+ context->sq_rq_bt_l = (u32)(dma_handle);
+ roce_set_field(context->qp1c_bytes_12,
+ QP1C_BYTES_12_SQ_RQ_BT_H_M,
+ QP1C_BYTES_12_SQ_RQ_BT_H_S,
+ ((u32)(dma_handle >> 32)));
+
+ roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
+ QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
+ roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
+ QP1C_BYTES_16_PORT_NUM_S, hr_qp->port);
+ roce_set_bit(context->qp1c_bytes_16,
+ QP1C_BYTES_16_SIGNALING_TYPE_S,
+ hr_qp->sq_signal_bits);
+ roce_set_bit(context->qp1c_bytes_16,
+ QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S,
+ hr_qp->sq_signal_bits);
+ roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
+ 1);
+ roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
+ 1);
+ roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
+ 0);
+
+ roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
+ QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
+ roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
+ QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
+
+ rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
+ context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
+
+ roce_set_field(context->qp1c_bytes_28,
+ QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
+ QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
+ (mtts[rq_pa_start]) >> 32);
+ roce_set_field(context->qp1c_bytes_28,
+ QP1C_BYTES_28_RQ_CUR_IDX_M,
+ QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
+
+ roce_set_field(context->qp1c_bytes_32,
+ QP1C_BYTES_32_RX_CQ_NUM_M,
+ QP1C_BYTES_32_RX_CQ_NUM_S,
+ to_hr_cq(ibqp->recv_cq)->cqn);
+ roce_set_field(context->qp1c_bytes_32,
+ QP1C_BYTES_32_TX_CQ_NUM_M,
+ QP1C_BYTES_32_TX_CQ_NUM_S,
+ to_hr_cq(ibqp->send_cq)->cqn);
+
+ context->cur_sq_wqe_ba_l = (u32)mtts[0];
+
+ roce_set_field(context->qp1c_bytes_40,
+ QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
+ QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
+ (mtts[0]) >> 32);
+ roce_set_field(context->qp1c_bytes_40,
+ QP1C_BYTES_40_SQ_CUR_IDX_M,
+ QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
+
+ /* Copy context to QP1C register */
+ addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
+ hr_qp->port * sizeof(*context));
+
+ writel(context->qp1c_bytes_4, addr);
+ writel(context->sq_rq_bt_l, addr + 1);
+ writel(context->qp1c_bytes_12, addr + 2);
+ writel(context->qp1c_bytes_16, addr + 3);
+ writel(context->qp1c_bytes_20, addr + 4);
+ writel(context->cur_rq_wqe_ba_l, addr + 5);
+ writel(context->qp1c_bytes_28, addr + 6);
+ writel(context->qp1c_bytes_32, addr + 7);
+ writel(context->cur_sq_wqe_ba_l, addr + 8);
+ }
+
+ /* Modify QP1C status */
+ reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
+ hr_qp->port * sizeof(*context));
+ roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
+ ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
+ roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
+ hr_qp->port * sizeof(*context), reg_val);
+
+ hr_qp->state = new_state;
+ if (new_state == IB_QPS_RESET) {
+ hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
+ ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
+ if (ibqp->send_cq != ibqp->recv_cq)
+ hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
+ hr_qp->qpn, NULL);
+
+ hr_qp->rq.head = 0;
+ hr_qp->rq.tail = 0;
+ hr_qp->sq.head = 0;
+ hr_qp->sq.tail = 0;
+ hr_qp->sq_next_wqe = 0;
+ }
+
+ kfree(context);
+ return 0;
+
+out:
+ kfree(context);
+ return -EINVAL;
+}
+
+static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_qp_context *context;
+ struct hns_roce_rq_db rq_db;
+ dma_addr_t dma_handle_2 = 0;
+ dma_addr_t dma_handle = 0;
+ uint32_t doorbell[2] = {0};
+ int rq_pa_start = 0;
+ u32 reg_val = 0;
+ u64 *mtts_2 = NULL;
+ int ret = -EINVAL;
+ u64 *mtts = NULL;
+ int port;
+ u8 *dmac;
+ u8 *smac;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ /* Search qp buf's mtts */
+ mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ hr_qp->mtt.first_seg, &dma_handle);
+ if (mtts == NULL) {
+ dev_err(dev, "qp buf pa find failed\n");
+ goto out;
+ }
+
+ /* Search IRRL's mtts */
+ mtts_2 = hns_roce_table_find(&hr_dev->qp_table.irrl_table, hr_qp->qpn,
+ &dma_handle_2);
+ if (mtts_2 == NULL) {
+ dev_err(dev, "qp irrl_table find failed\n");
+ goto out;
+ }
+
+ /*
+ *Reset to init
+ * Mandatory param:
+ * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
+ * Optional param: NA
+ */
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
+ QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
+ to_hr_qp_type(hr_qp->ibqp.qp_type));
+
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ );
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ );
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
+ QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
+ QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_PD_M,
+ QP_CONTEXT_QPC_BYTES_4_PD_S,
+ to_hr_pd(ibqp->pd)->pdn);
+ hr_qp->access_flags = attr->qp_access_flags;
+ roce_set_field(context->qpc_bytes_8,
+ QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
+ QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
+ to_hr_cq(ibqp->send_cq)->cqn);
+ roce_set_field(context->qpc_bytes_8,
+ QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
+ QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
+ to_hr_cq(ibqp->recv_cq)->cqn);
+
+ if (ibqp->srq)
+ roce_set_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
+ QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
+ to_hr_srq(ibqp->srq)->srqn);
+
+ roce_set_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
+ attr->pkey_index);
+ hr_qp->pkey_index = attr->pkey_index;
+ roce_set_field(context->qpc_bytes_16,
+ QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
+ QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
+
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
+ QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
+ to_hr_qp_type(hr_qp->ibqp.qp_type));
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
+ !!(attr->qp_access_flags &
+ IB_ACCESS_REMOTE_READ));
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
+ !!(attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE));
+ } else {
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
+ !!(hr_qp->access_flags &
+ IB_ACCESS_REMOTE_READ));
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
+ !!(hr_qp->access_flags &
+ IB_ACCESS_REMOTE_WRITE));
+ }
+
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
+ QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
+ QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_PD_M,
+ QP_CONTEXT_QPC_BYTES_4_PD_S,
+ to_hr_pd(ibqp->pd)->pdn);
+
+ roce_set_field(context->qpc_bytes_8,
+ QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
+ QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
+ to_hr_cq(ibqp->send_cq)->cqn);
+ roce_set_field(context->qpc_bytes_8,
+ QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
+ QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
+ to_hr_cq(ibqp->recv_cq)->cqn);
+
+ if (ibqp->srq)
+ roce_set_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
+ QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
+ to_hr_srq(ibqp->srq)->srqn);
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ roce_set_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
+ attr->pkey_index);
+ else
+ roce_set_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
+ hr_qp->pkey_index);
+
+ roce_set_field(context->qpc_bytes_16,
+ QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
+ QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ if ((attr_mask & IB_QP_ALT_PATH) ||
+ (attr_mask & IB_QP_ACCESS_FLAGS) ||
+ (attr_mask & IB_QP_PKEY_INDEX) ||
+ (attr_mask & IB_QP_QKEY)) {
+ dev_err(dev, "INIT2RTR attr_mask error\n");
+ goto out;
+ }
+
+ dmac = (u8 *)attr->ah_attr.dmac;
+
+ context->sq_rq_bt_l = (u32)(dma_handle);
+ roce_set_field(context->qpc_bytes_24,
+ QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
+ QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
+ ((u32)(dma_handle >> 32)));
+ roce_set_bit(context->qpc_bytes_24,
+ QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
+ 1);
+ roce_set_field(context->qpc_bytes_24,
+ QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
+ QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
+ attr->min_rnr_timer);
+ context->irrl_ba_l = (u32)(dma_handle_2);
+ roce_set_field(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
+ QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
+ ((u32)(dma_handle_2 >> 32)) &
+ QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
+ roce_set_field(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
+ QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
+ roce_set_bit(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
+ 1);
+ roce_set_bit(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
+ hr_qp->sq_signal_bits);
+
+ for (port = 0; port < hr_dev->caps.num_ports; port++) {
+ smac = (u8 *)hr_dev->dev_addr[port];
+ dev_dbg(dev, "smac: %2x: %2x: %2x: %2x: %2x: %2x\n",
+ smac[0], smac[1], smac[2], smac[3], smac[4],
+ smac[5]);
+ if ((dmac[0] == smac[0]) && (dmac[1] == smac[1]) &&
+ (dmac[2] == smac[2]) && (dmac[3] == smac[3]) &&
+ (dmac[4] == smac[4]) && (dmac[5] == smac[5])) {
+ roce_set_bit(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S,
+ 1);
+ break;
+ }
+ }
+
+ if (hr_dev->loop_idc == 0x1)
+ roce_set_bit(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
+
+ roce_set_bit(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
+ attr->ah_attr.ah_flags);
+ roce_set_field(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
+ QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
+ ilog2((unsigned int)attr->max_dest_rd_atomic));
+
+ roce_set_field(context->qpc_bytes_36,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
+ attr->dest_qp_num);
+
+ /* Configure GID index */
+ roce_set_field(context->qpc_bytes_36,
+ QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
+ hns_get_gid_index(hr_dev,
+ attr->ah_attr.port_num - 1,
+ attr->ah_attr.grh.sgid_index));
+
+ memcpy(&(context->dmac_l), dmac, 4);
+
+ roce_set_field(context->qpc_bytes_44,
+ QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
+ QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
+ *((u16 *)(&dmac[4])));
+ roce_set_field(context->qpc_bytes_44,
+ QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
+ QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
+ attr->ah_attr.static_rate);
+ roce_set_field(context->qpc_bytes_44,
+ QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
+ QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
+ attr->ah_attr.grh.hop_limit);
+
+ roce_set_field(context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
+ QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
+ attr->ah_attr.grh.flow_label);
+ roce_set_field(context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
+ QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
+ attr->ah_attr.grh.traffic_class);
+ roce_set_field(context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_MTU_M,
+ QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
+
+ memcpy(context->dgid, attr->ah_attr.grh.dgid.raw,
+ sizeof(attr->ah_attr.grh.dgid.raw));
+
+ dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
+ roce_get_field(context->qpc_bytes_44,
+ QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
+ QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
+
+ roce_set_field(context->qpc_bytes_68,
+ QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
+ QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, 0);
+ roce_set_field(context->qpc_bytes_68,
+ QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
+
+ rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
+ context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
+
+ roce_set_field(context->qpc_bytes_76,
+ QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
+ QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
+ mtts[rq_pa_start] >> 32);
+ roce_set_field(context->qpc_bytes_76,
+ QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
+ QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
+
+ context->rx_rnr_time = 0;
+
+ roce_set_field(context->qpc_bytes_84,
+ QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
+ QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
+ attr->rq_psn - 1);
+ roce_set_field(context->qpc_bytes_84,
+ QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
+ QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
+
+ roce_set_field(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
+ QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
+ attr->rq_psn);
+ roce_set_bit(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
+ roce_set_bit(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
+ roce_set_field(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
+ QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
+ 0);
+ roce_set_field(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
+ QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
+ 0);
+
+ context->dma_length = 0;
+ context->r_key = 0;
+ context->va_l = 0;
+ context->va_h = 0;
+
+ roce_set_field(context->qpc_bytes_108,
+ QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
+ QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
+ roce_set_bit(context->qpc_bytes_108,
+ QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
+ roce_set_bit(context->qpc_bytes_108,
+ QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
+
+ roce_set_field(context->qpc_bytes_112,
+ QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
+ QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
+ roce_set_field(context->qpc_bytes_112,
+ QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
+ QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
+
+ /* For chip resp ack */
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
+ hr_qp->port);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_SL_M,
+ QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
+ hr_qp->sl = attr->ah_attr.sl;
+ } else if (cur_state == IB_QPS_RTR &&
+ new_state == IB_QPS_RTS) {
+ /* If exist optional param, return error */
+ if ((attr_mask & IB_QP_ALT_PATH) ||
+ (attr_mask & IB_QP_ACCESS_FLAGS) ||
+ (attr_mask & IB_QP_QKEY) ||
+ (attr_mask & IB_QP_PATH_MIG_STATE) ||
+ (attr_mask & IB_QP_CUR_STATE) ||
+ (attr_mask & IB_QP_MIN_RNR_TIMER)) {
+ dev_err(dev, "RTR2RTS attr_mask error\n");
+ goto out;
+ }
+
+ context->rx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
+
+ roce_set_field(context->qpc_bytes_120,
+ QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
+ QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
+ (mtts[0]) >> 32);
+
+ roce_set_field(context->qpc_bytes_124,
+ QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
+ QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
+ roce_set_field(context->qpc_bytes_124,
+ QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
+ QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
+
+ roce_set_field(context->qpc_bytes_128,
+ QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
+ QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
+ attr->sq_psn);
+ roce_set_bit(context->qpc_bytes_128,
+ QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
+ roce_set_field(context->qpc_bytes_128,
+ QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
+ QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
+ 0);
+ roce_set_bit(context->qpc_bytes_128,
+ QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
+
+ roce_set_field(context->qpc_bytes_132,
+ QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
+ QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
+ roce_set_field(context->qpc_bytes_132,
+ QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
+ QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
+
+ roce_set_field(context->qpc_bytes_136,
+ QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
+ QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
+ attr->sq_psn);
+ roce_set_field(context->qpc_bytes_136,
+ QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
+ QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
+ attr->sq_psn);
+
+ roce_set_field(context->qpc_bytes_140,
+ QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
+ QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
+ (attr->sq_psn >> SQ_PSN_SHIFT));
+ roce_set_field(context->qpc_bytes_140,
+ QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
+ QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
+ roce_set_bit(context->qpc_bytes_140,
+ QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
+
+ roce_set_field(context->qpc_bytes_144,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
+ attr->qp_state);
+
+ roce_set_field(context->qpc_bytes_148,
+ QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
+ QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
+ roce_set_field(context->qpc_bytes_148,
+ QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
+ QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, 0);
+ roce_set_field(context->qpc_bytes_148,
+ QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
+ QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, 0);
+ roce_set_field(context->qpc_bytes_148,
+ QP_CONTEXT_QPC_BYTES_148_LSN_M,
+ QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
+
+ context->rnr_retry = 0;
+
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
+ QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
+ attr->retry_cnt);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
+ attr->timeout);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
+ QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
+ attr->rnr_retry);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
+ hr_qp->port);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_SL_M,
+ QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
+ hr_qp->sl = attr->ah_attr.sl;
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
+ QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
+ ilog2((unsigned int)attr->max_rd_atomic));
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
+ context->pkt_use_len = 0;
+
+ roce_set_field(context->qpc_bytes_164,
+ QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
+ QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
+ roce_set_field(context->qpc_bytes_164,
+ QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
+ QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
+
+ roce_set_field(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
+ QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
+ attr->sq_psn);
+ roce_set_field(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
+ QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
+ roce_set_field(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
+ QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
+ roce_set_bit(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
+ roce_set_bit(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
+ roce_set_bit(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
+ context->sge_use_len = 0;
+
+ roce_set_field(context->qpc_bytes_176,
+ QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
+ roce_set_field(context->qpc_bytes_176,
+ QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
+ 0);
+ roce_set_field(context->qpc_bytes_180,
+ QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
+ roce_set_field(context->qpc_bytes_180,
+ QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
+ QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
+
+ context->tx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
+
+ roce_set_field(context->qpc_bytes_188,
+ QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
+ QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
+ (mtts[0]) >> 32);
+ roce_set_bit(context->qpc_bytes_188,
+ QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
+ roce_set_field(context->qpc_bytes_188,
+ QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
+ 0);
+ } else if ((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
+ roce_set_field(context->qpc_bytes_144,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
+ attr->qp_state);
+
+ } else {
+ dev_err(dev, "not support this modify\n");
+ goto out;
+ }
+
+ /* Every status migrate must change state */
+ roce_set_field(context->qpc_bytes_144,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, attr->qp_state);
+
+ /* SW pass context to HW */
+ ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
+ to_hns_roce_state(cur_state),
+ to_hns_roce_state(new_state), context,
+ hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_qp_modify failed\n");
+ goto out;
+ }
+
+ /*
+ * Use rst2init to instead of init2init with drv,
+ * need to hw to flash RQ HEAD by DB again
+ */
+ if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ /* Memory barrier */
+ wmb();
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+ /* SW update GSI rq header */
+ reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG3_0_REG +
+ QP1C_CFGN_OFFSET * hr_qp->port);
+ roce_set_field(reg_val,
+ ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
+ ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
+ hr_qp->rq.head);
+ roce_write(hr_dev, ROCEE_QP1C_CFG3_0_REG +
+ QP1C_CFGN_OFFSET * hr_qp->port, reg_val);
+ } else {
+ rq_db.u32_4 = 0;
+ rq_db.u32_8 = 0;
+
+ roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
+ RQ_DOORBELL_U32_4_RQ_HEAD_S,
+ hr_qp->rq.head);
+ roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
+ RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
+ roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
+ RQ_DOORBELL_U32_8_CMD_S, 1);
+ roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
+ 1);
+
+ doorbell[0] = rq_db.u32_4;
+ doorbell[1] = rq_db.u32_8;
+
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ }
+ }
+
+ hr_qp->state = new_state;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ hr_qp->resp_depth = attr->max_dest_rd_atomic;
+ if (attr_mask & IB_QP_PORT)
+ hr_qp->port = (attr->port_num - 1);
+
+ if (new_state == IB_QPS_RESET && !ibqp->uobject) {
+ hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
+ ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
+ if (ibqp->send_cq != ibqp->recv_cq)
+ hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
+ hr_qp->qpn, NULL);
+
+ hr_qp->rq.head = 0;
+ hr_qp->rq.tail = 0;
+ hr_qp->sq.head = 0;
+ hr_qp->sq.tail = 0;
+ hr_qp->sq_next_wqe = 0;
+ }
+out:
+ kfree(context);
+ return ret;
+}
+
+int hns_roce_v1_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
+ return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
+ new_state);
+ else
+ return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
+ new_state);
+}
+
+static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
+{
+ switch (state) {
+ case HNS_ROCE_QP_STATE_RST:
+ return IB_QPS_RESET;
+ case HNS_ROCE_QP_STATE_INIT:
+ return IB_QPS_INIT;
+ case HNS_ROCE_QP_STATE_RTR:
+ return IB_QPS_RTR;
+ case HNS_ROCE_QP_STATE_RTS:
+ return IB_QPS_RTS;
+ case HNS_ROCE_QP_STATE_SQD:
+ return IB_QPS_SQD;
+ case HNS_ROCE_QP_STATE_ERR:
+ return IB_QPS_ERR;
+ default:
+ return IB_QPS_ERR;
+ }
+}
+
+static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_qp_context *hr_context)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
+ HNS_ROCE_CMD_QUERY_QP,
+ HNS_ROCE_CMD_TIME_CLASS_A);
+ if (!ret)
+ memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
+ else
+ dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_qp_context *context;
+ int tmp_qp_state = 0;
+ int ret = 0;
+ int state;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ mutex_lock(&hr_qp->mutex);
+
+ if (hr_qp->state == IB_QPS_RESET) {
+ qp_attr->qp_state = IB_QPS_RESET;
+ goto done;
+ }
+
+ ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
+ if (ret) {
+ dev_err(dev, "query qpc error\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ state = roce_get_field(context->qpc_bytes_144,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
+ tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
+ if (tmp_qp_state == -1) {
+ dev_err(dev, "to_ib_qp_state error\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ hr_qp->state = (u8)tmp_qp_state;
+ qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
+ qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_MTU_M,
+ QP_CONTEXT_QPC_BYTES_48_MTU_S);
+ qp_attr->path_mig_state = IB_MIG_ARMED;
+ if (hr_qp->ibqp.qp_type == IB_QPT_UD)
+ qp_attr->qkey = QKEY_VAL;
+
+ qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
+ QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
+ qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
+ QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
+ QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
+ qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
+ qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
+ ((roce_get_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
+ ((roce_get_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
+ hr_qp->ibqp.qp_type == IB_QPT_UC) {
+ qp_attr->ah_attr.sl = roce_get_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_SL_M,
+ QP_CONTEXT_QPC_BYTES_156_SL_S);
+ qp_attr->ah_attr.grh.flow_label = roce_get_field(
+ context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
+ QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
+ qp_attr->ah_attr.grh.sgid_index = roce_get_field(
+ context->qpc_bytes_36,
+ QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
+ qp_attr->ah_attr.grh.hop_limit = roce_get_field(
+ context->qpc_bytes_44,
+ QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
+ QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
+ qp_attr->ah_attr.grh.traffic_class = roce_get_field(
+ context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
+ QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
+
+ memcpy(qp_attr->ah_attr.grh.dgid.raw, context->dgid,
+ sizeof(qp_attr->ah_attr.grh.dgid.raw));
+ }
+
+ qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
+ qp_attr->port_num = (u8)roce_get_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S) + 1;
+ qp_attr->sq_draining = 0;
+ qp_attr->max_rd_atomic = roce_get_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
+ QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
+ qp_attr->max_dest_rd_atomic = roce_get_field(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
+ QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
+ qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
+ QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
+ QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
+ qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
+ qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
+ QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
+ QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
+ qp_attr->rnr_retry = context->rnr_retry;
+
+done:
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+ qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
+
+ if (!ibqp->uobject) {
+ qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
+ } else {
+ qp_attr->cap.max_send_wr = 0;
+ qp_attr->cap.max_send_sge = 0;
+ }
+
+ qp_init_attr->cap = qp_attr->cap;
+
+out:
+ mutex_unlock(&hr_qp->mutex);
+ kfree(context);
+ return ret;
+}
+
+static void hns_roce_v1_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ int is_user)
+{
+ u32 sdbinvcnt;
+ unsigned long end = 0;
+ u32 sdbinvcnt_val;
+ u32 sdbsendptr_val;
+ u32 sdbisusepr_val;
+ struct hns_roce_cq *send_cq, *recv_cq;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
+ if (hr_qp->state != IB_QPS_RESET) {
+ /*
+ * Set qp to ERR,
+ * waiting for hw complete processing all dbs
+ */
+ if (hns_roce_v1_qp_modify(hr_dev, NULL,
+ to_hns_roce_state(
+ (enum ib_qp_state)hr_qp->state),
+ HNS_ROCE_QP_STATE_ERR, NULL,
+ hr_qp))
+ dev_err(dev, "modify QP %06lx to ERR failed.\n",
+ hr_qp->qpn);
+
+ /* Record issued doorbell */
+ sdbisusepr_val = roce_read(hr_dev,
+ ROCEE_SDB_ISSUE_PTR_REG);
+ /*
+ * Query db process status,
+ * until hw process completely
+ */
+ end = msecs_to_jiffies(
+ HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS) + jiffies;
+ do {
+ sdbsendptr_val = roce_read(hr_dev,
+ ROCEE_SDB_SEND_PTR_REG);
+ if (!time_before(jiffies, end)) {
+ dev_err(dev, "destroy qp(0x%lx) timeout!!!",
+ hr_qp->qpn);
+ break;
+ }
+ } while ((short)(roce_get_field(sdbsendptr_val,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) -
+ roce_get_field(sdbisusepr_val,
+ ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
+ ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
+ ) < 0);
+
+ /* Get list pointer */
+ sdbinvcnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+
+ /* Query db's list status, until hw reversal */
+ do {
+ sdbinvcnt_val = roce_read(hr_dev,
+ ROCEE_SDB_INV_CNT_REG);
+ if (!time_before(jiffies, end)) {
+ dev_err(dev, "destroy qp(0x%lx) timeout!!!",
+ hr_qp->qpn);
+ dev_err(dev, "SdbInvCnt = 0x%x\n",
+ sdbinvcnt_val);
+ break;
+ }
+ } while ((short)(roce_get_field(sdbinvcnt_val,
+ ROCEE_SDB_INV_CNT_SDB_INV_CNT_M,
+ ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) -
+ (sdbinvcnt + SDB_INV_CNT_OFFSET)) < 0);
+
+ /* Modify qp to reset before destroying qp */
+ if (hns_roce_v1_qp_modify(hr_dev, NULL,
+ to_hns_roce_state(
+ (enum ib_qp_state)hr_qp->state),
+ HNS_ROCE_QP_STATE_RST, NULL, hr_qp))
+ dev_err(dev, "modify QP %06lx to RESET failed.\n",
+ hr_qp->qpn);
+ }
+ }
+
+ send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
+ recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
+
+ hns_roce_lock_cqs(send_cq, recv_cq);
+
+ if (!is_user) {
+ __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
+ to_hr_srq(hr_qp->ibqp.srq) : NULL);
+ if (send_cq != recv_cq)
+ __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
+ }
+
+ hns_roce_qp_remove(hr_dev, hr_qp);
+
+ hns_roce_unlock_cqs(send_cq, recv_cq);
+
+ hns_roce_qp_free(hr_dev, hr_qp);
+
+ /* Not special_QP, free their QPN */
+ if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
+ (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
+ (hr_qp->ibqp.qp_type == IB_QPT_UD))
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+
+ hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
+
+ if (is_user) {
+ ib_umem_release(hr_qp->umem);
+ } else {
+ kfree(hr_qp->sq.wrid);
+ kfree(hr_qp->rq.wrid);
+ hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+ }
+}
+
+int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ hns_roce_v1_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ kfree(hr_to_hr_sqp(hr_qp));
+ else
+ kfree(hr_qp);
+
+ return 0;
+}
+
+struct hns_roce_v1_priv hr_v1_priv;
+
+struct hns_roce_hw hns_roce_hw_v1 = {
+ .reset = hns_roce_v1_reset,
+ .hw_profile = hns_roce_v1_profile,
+ .hw_init = hns_roce_v1_init,
+ .hw_exit = hns_roce_v1_exit,
+ .set_gid = hns_roce_v1_set_gid,
+ .set_mac = hns_roce_v1_set_mac,
+ .set_mtu = hns_roce_v1_set_mtu,
+ .write_mtpt = hns_roce_v1_write_mtpt,
+ .write_cqc = hns_roce_v1_write_cqc,
+ .modify_qp = hns_roce_v1_modify_qp,
+ .query_qp = hns_roce_v1_query_qp,
+ .destroy_qp = hns_roce_v1_destroy_qp,
+ .post_send = hns_roce_v1_post_send,
+ .post_recv = hns_roce_v1_post_recv,
+ .req_notify_cq = hns_roce_v1_req_notify_cq,
+ .poll_cq = hns_roce_v1_poll_cq,
+ .priv = &hr_v1_priv,
+};
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
new file mode 100644
index 000000000000..316b592b1636
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -0,0 +1,981 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_HW_V1_H
+#define _HNS_ROCE_HW_V1_H
+
+#define CQ_STATE_VALID 2
+
+#define HNS_ROCE_V1_MAX_PD_NUM 0x8000
+#define HNS_ROCE_V1_MAX_CQ_NUM 0x10000
+#define HNS_ROCE_V1_MAX_CQE_NUM 0x8000
+
+#define HNS_ROCE_V1_MAX_QP_NUM 0x40000
+#define HNS_ROCE_V1_MAX_WQE_NUM 0x4000
+
+#define HNS_ROCE_V1_MAX_MTPT_NUM 0x80000
+
+#define HNS_ROCE_V1_MAX_MTT_SEGS 0x100000
+
+#define HNS_ROCE_V1_MAX_QP_INIT_RDMA 128
+#define HNS_ROCE_V1_MAX_QP_DEST_RDMA 128
+
+#define HNS_ROCE_V1_MAX_SQ_DESC_SZ 64
+#define HNS_ROCE_V1_MAX_RQ_DESC_SZ 64
+#define HNS_ROCE_V1_SG_NUM 2
+#define HNS_ROCE_V1_INLINE_SIZE 32
+
+#define HNS_ROCE_V1_UAR_NUM 256
+#define HNS_ROCE_V1_PHY_UAR_NUM 8
+
+#define HNS_ROCE_V1_GID_NUM 16
+
+#define HNS_ROCE_V1_NUM_COMP_EQE 0x8000
+#define HNS_ROCE_V1_NUM_ASYNC_EQE 0x400
+
+#define HNS_ROCE_V1_QPC_ENTRY_SIZE 256
+#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8
+#define HNS_ROCE_V1_CQC_ENTRY_SIZE 64
+#define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64
+#define HNS_ROCE_V1_MTT_ENTRY_SIZE 64
+
+#define HNS_ROCE_V1_CQE_ENTRY_SIZE 32
+#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000
+
+#define HNS_ROCE_V1_EXT_RAQ_WF 8
+#define HNS_ROCE_V1_RAQ_ENTRY 64
+#define HNS_ROCE_V1_RAQ_DEPTH 32768
+#define HNS_ROCE_V1_RAQ_SIZE (HNS_ROCE_V1_RAQ_ENTRY * HNS_ROCE_V1_RAQ_DEPTH)
+
+#define HNS_ROCE_V1_SDB_DEPTH 0x400
+#define HNS_ROCE_V1_ODB_DEPTH 0x400
+
+#define HNS_ROCE_V1_DB_RSVD 0x80
+
+#define HNS_ROCE_V1_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
+#define HNS_ROCE_V1_SDB_ALFUL (HNS_ROCE_V1_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+#define HNS_ROCE_V1_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
+#define HNS_ROCE_V1_ODB_ALFUL (HNS_ROCE_V1_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+
+#define HNS_ROCE_V1_EXT_SDB_DEPTH 0x4000
+#define HNS_ROCE_V1_EXT_ODB_DEPTH 0x4000
+#define HNS_ROCE_V1_EXT_SDB_ENTRY 16
+#define HNS_ROCE_V1_EXT_ODB_ENTRY 16
+#define HNS_ROCE_V1_EXT_SDB_SIZE \
+ (HNS_ROCE_V1_EXT_SDB_DEPTH * HNS_ROCE_V1_EXT_SDB_ENTRY)
+#define HNS_ROCE_V1_EXT_ODB_SIZE \
+ (HNS_ROCE_V1_EXT_ODB_DEPTH * HNS_ROCE_V1_EXT_ODB_ENTRY)
+
+#define HNS_ROCE_V1_EXT_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
+#define HNS_ROCE_V1_EXT_SDB_ALFUL \
+ (HNS_ROCE_V1_EXT_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+#define HNS_ROCE_V1_EXT_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
+#define HNS_ROCE_V1_EXT_ODB_ALFUL \
+ (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+
+#define HNS_ROCE_ODB_POLL_MODE 0
+
+#define HNS_ROCE_SDB_NORMAL_MODE 0
+#define HNS_ROCE_SDB_EXTEND_MODE 1
+
+#define HNS_ROCE_ODB_EXTEND_MODE 1
+
+#define KEY_VALID 0x02
+
+#define HNS_ROCE_CQE_QPN_MASK 0x3ffff
+#define HNS_ROCE_CQE_STATUS_MASK 0x1f
+#define HNS_ROCE_CQE_OPCODE_MASK 0xf
+
+#define HNS_ROCE_CQE_SUCCESS 0x00
+#define HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR 0x01
+#define HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR 0x02
+#define HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR 0x03
+#define HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR 0x04
+#define HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR 0x05
+#define HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR 0x06
+#define HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR 0x07
+#define HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR 0x08
+#define HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR 0x09
+#define HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR 0x0a
+#define HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR 0x0b
+#define HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR 0x0c
+
+#define QP1C_CFGN_OFFSET 0x28
+#define PHY_PORT_OFFSET 0x8
+#define MTPT_IDX_SHIFT 16
+#define ALL_PORT_VAL_OPEN 0x3f
+#define POL_TIME_INTERVAL_VAL 0x80
+#define SLEEP_TIME_INTERVAL 20
+#define SQ_PSN_SHIFT 8
+#define QKEY_VAL 0x80010000
+#define SDB_INV_CNT_OFFSET 8
+
+struct hns_roce_cq_context {
+ u32 cqc_byte_4;
+ u32 cq_bt_l;
+ u32 cqc_byte_12;
+ u32 cur_cqe_ba0_l;
+ u32 cqc_byte_20;
+ u32 cqe_tptr_addr_l;
+ u32 cur_cqe_ba1_l;
+ u32 cqc_byte_32;
+};
+
+#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0
+#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M \
+ (((1UL << 2) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S)
+
+#define CQ_CONTEXT_CQC_BYTE_4_CQN_S 16
+#define CQ_CONTEXT_CQC_BYTE_4_CQN_M \
+ (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQN_S)
+
+#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S 0
+#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M \
+ (((1UL << 17) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S)
+
+#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S 20
+#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M \
+ (((1UL << 4) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S)
+
+#define CQ_CONTEXT_CQC_BYTE_12_CEQN_S 24
+#define CQ_CONTEXT_CQC_BYTE_12_CEQN_M \
+ (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_12_CEQN_S)
+
+#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S 0
+#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M \
+ (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S)
+
+#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S 16
+#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M \
+ (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S)
+
+#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S 8
+#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M \
+ (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S)
+
+#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S 0
+#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M \
+ (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S)
+
+#define CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S 9
+
+#define CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S 8
+#define CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S 14
+#define CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S 15
+
+#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S 16
+#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M \
+ (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S)
+
+struct hns_roce_cqe {
+ u32 cqe_byte_4;
+ union {
+ u32 r_key;
+ u32 immediate_data;
+ };
+ u32 byte_cnt;
+ u32 cqe_byte_16;
+ u32 cqe_byte_20;
+ u32 s_mac_l;
+ u32 cqe_byte_28;
+ u32 reserved;
+};
+
+#define CQE_BYTE_4_OWNER_S 7
+#define CQE_BYTE_4_SQ_RQ_FLAG_S 14
+
+#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_S 8
+#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_M \
+ (((1UL << 5) - 1) << CQE_BYTE_4_STATUS_OF_THE_OPERATION_S)
+
+#define CQE_BYTE_4_WQE_INDEX_S 16
+#define CQE_BYTE_4_WQE_INDEX_M (((1UL << 14) - 1) << CQE_BYTE_4_WQE_INDEX_S)
+
+#define CQE_BYTE_4_OPERATION_TYPE_S 0
+#define CQE_BYTE_4_OPERATION_TYPE_M \
+ (((1UL << 4) - 1) << CQE_BYTE_4_OPERATION_TYPE_S)
+
+#define CQE_BYTE_4_IMM_INDICATOR_S 15
+
+#define CQE_BYTE_16_LOCAL_QPN_S 0
+#define CQE_BYTE_16_LOCAL_QPN_M (((1UL << 24) - 1) << CQE_BYTE_16_LOCAL_QPN_S)
+
+#define CQE_BYTE_20_PORT_NUM_S 26
+#define CQE_BYTE_20_PORT_NUM_M (((1UL << 3) - 1) << CQE_BYTE_20_PORT_NUM_S)
+
+#define CQE_BYTE_20_SL_S 24
+#define CQE_BYTE_20_SL_M (((1UL << 2) - 1) << CQE_BYTE_20_SL_S)
+
+#define CQE_BYTE_20_REMOTE_QPN_S 0
+#define CQE_BYTE_20_REMOTE_QPN_M \
+ (((1UL << 24) - 1) << CQE_BYTE_20_REMOTE_QPN_S)
+
+#define CQE_BYTE_20_GRH_PRESENT_S 29
+
+#define CQE_BYTE_28_P_KEY_IDX_S 16
+#define CQE_BYTE_28_P_KEY_IDX_M (((1UL << 16) - 1) << CQE_BYTE_28_P_KEY_IDX_S)
+
+#define CQ_DB_REQ_NOT_SOL 0
+#define CQ_DB_REQ_NOT (1 << 16)
+
+struct hns_roce_v1_mpt_entry {
+ u32 mpt_byte_4;
+ u32 pbl_addr_l;
+ u32 mpt_byte_12;
+ u32 virt_addr_l;
+ u32 virt_addr_h;
+ u32 length;
+ u32 mpt_byte_28;
+ u32 pa0_l;
+ u32 mpt_byte_36;
+ u32 mpt_byte_40;
+ u32 mpt_byte_44;
+ u32 mpt_byte_48;
+ u32 pa4_l;
+ u32 mpt_byte_56;
+ u32 mpt_byte_60;
+ u32 mpt_byte_64;
+};
+
+#define MPT_BYTE_4_KEY_STATE_S 0
+#define MPT_BYTE_4_KEY_STATE_M (((1UL << 2) - 1) << MPT_BYTE_4_KEY_STATE_S)
+
+#define MPT_BYTE_4_KEY_S 8
+#define MPT_BYTE_4_KEY_M (((1UL << 8) - 1) << MPT_BYTE_4_KEY_S)
+
+#define MPT_BYTE_4_PAGE_SIZE_S 16
+#define MPT_BYTE_4_PAGE_SIZE_M (((1UL << 2) - 1) << MPT_BYTE_4_PAGE_SIZE_S)
+
+#define MPT_BYTE_4_MW_TYPE_S 20
+
+#define MPT_BYTE_4_MW_BIND_ENABLE_S 21
+
+#define MPT_BYTE_4_OWN_S 22
+
+#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_S 24
+#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_M \
+ (((1UL << 2) - 1) << MPT_BYTE_4_MEMORY_LOCATION_TYPE_S)
+
+#define MPT_BYTE_4_REMOTE_ATOMIC_S 26
+#define MPT_BYTE_4_LOCAL_WRITE_S 27
+#define MPT_BYTE_4_REMOTE_WRITE_S 28
+#define MPT_BYTE_4_REMOTE_READ_S 29
+#define MPT_BYTE_4_REMOTE_INVAL_ENABLE_S 30
+#define MPT_BYTE_4_ADDRESS_TYPE_S 31
+
+#define MPT_BYTE_12_PBL_ADDR_H_S 0
+#define MPT_BYTE_12_PBL_ADDR_H_M \
+ (((1UL << 17) - 1) << MPT_BYTE_12_PBL_ADDR_H_S)
+
+#define MPT_BYTE_12_MW_BIND_COUNTER_S 17
+#define MPT_BYTE_12_MW_BIND_COUNTER_M \
+ (((1UL << 15) - 1) << MPT_BYTE_12_MW_BIND_COUNTER_S)
+
+#define MPT_BYTE_28_PD_S 0
+#define MPT_BYTE_28_PD_M (((1UL << 16) - 1) << MPT_BYTE_28_PD_S)
+
+#define MPT_BYTE_28_L_KEY_IDX_L_S 16
+#define MPT_BYTE_28_L_KEY_IDX_L_M \
+ (((1UL << 16) - 1) << MPT_BYTE_28_L_KEY_IDX_L_S)
+
+#define MPT_BYTE_36_PA0_H_S 0
+#define MPT_BYTE_36_PA0_H_M (((1UL << 5) - 1) << MPT_BYTE_36_PA0_H_S)
+
+#define MPT_BYTE_36_PA1_L_S 8
+#define MPT_BYTE_36_PA1_L_M (((1UL << 24) - 1) << MPT_BYTE_36_PA1_L_S)
+
+#define MPT_BYTE_40_PA1_H_S 0
+#define MPT_BYTE_40_PA1_H_M (((1UL << 13) - 1) << MPT_BYTE_40_PA1_H_S)
+
+#define MPT_BYTE_40_PA2_L_S 16
+#define MPT_BYTE_40_PA2_L_M (((1UL << 16) - 1) << MPT_BYTE_40_PA2_L_S)
+
+#define MPT_BYTE_44_PA2_H_S 0
+#define MPT_BYTE_44_PA2_H_M (((1UL << 21) - 1) << MPT_BYTE_44_PA2_H_S)
+
+#define MPT_BYTE_44_PA3_L_S 24
+#define MPT_BYTE_44_PA3_L_M (((1UL << 8) - 1) << MPT_BYTE_44_PA3_L_S)
+
+#define MPT_BYTE_48_PA3_H_S 0
+#define MPT_BYTE_48_PA3_H_M (((1UL << 29) - 1) << MPT_BYTE_48_PA3_H_S)
+
+#define MPT_BYTE_56_PA4_H_S 0
+#define MPT_BYTE_56_PA4_H_M (((1UL << 5) - 1) << MPT_BYTE_56_PA4_H_S)
+
+#define MPT_BYTE_56_PA5_L_S 8
+#define MPT_BYTE_56_PA5_L_M (((1UL << 24) - 1) << MPT_BYTE_56_PA5_L_S)
+
+#define MPT_BYTE_60_PA5_H_S 0
+#define MPT_BYTE_60_PA5_H_M (((1UL << 13) - 1) << MPT_BYTE_60_PA5_H_S)
+
+#define MPT_BYTE_60_PA6_L_S 16
+#define MPT_BYTE_60_PA6_L_M (((1UL << 16) - 1) << MPT_BYTE_60_PA6_L_S)
+
+#define MPT_BYTE_64_PA6_H_S 0
+#define MPT_BYTE_64_PA6_H_M (((1UL << 21) - 1) << MPT_BYTE_64_PA6_H_S)
+
+#define MPT_BYTE_64_L_KEY_IDX_H_S 24
+#define MPT_BYTE_64_L_KEY_IDX_H_M \
+ (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S)
+
+struct hns_roce_wqe_ctrl_seg {
+ __be32 sgl_pa_h;
+ __be32 flag;
+ __be32 imm_data;
+ __be32 msg_length;
+};
+
+struct hns_roce_wqe_data_seg {
+ __be64 addr;
+ __be32 lkey;
+ __be32 len;
+};
+
+struct hns_roce_wqe_raddr_seg {
+ __be32 rkey;
+ __be32 len;/* reserved */
+ __be64 raddr;
+};
+
+struct hns_roce_rq_wqe_ctrl {
+
+ u32 rwqe_byte_4;
+ u32 rocee_sgl_ba_l;
+ u32 rwqe_byte_12;
+ u32 reserved[5];
+};
+
+#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16
+#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M \
+ (((1UL << 6) - 1) << RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S)
+
+#define HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS 10000
+
+#define GID_LEN 16
+
+struct hns_roce_ud_send_wqe {
+ u32 dmac_h;
+ u32 u32_8;
+ u32 immediate_data;
+
+ u32 u32_16;
+ union {
+ unsigned char dgid[GID_LEN];
+ struct {
+ u32 u32_20;
+ u32 u32_24;
+ u32 u32_28;
+ u32 u32_32;
+ };
+ };
+
+ u32 u32_36;
+ u32 u32_40;
+
+ u32 va0_l;
+ u32 va0_h;
+ u32 l_key0;
+
+ u32 va1_l;
+ u32 va1_h;
+ u32 l_key1;
+};
+
+#define UD_SEND_WQE_U32_4_DMAC_0_S 0
+#define UD_SEND_WQE_U32_4_DMAC_0_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_0_S)
+
+#define UD_SEND_WQE_U32_4_DMAC_1_S 8
+#define UD_SEND_WQE_U32_4_DMAC_1_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_1_S)
+
+#define UD_SEND_WQE_U32_4_DMAC_2_S 16
+#define UD_SEND_WQE_U32_4_DMAC_2_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_2_S)
+
+#define UD_SEND_WQE_U32_4_DMAC_3_S 24
+#define UD_SEND_WQE_U32_4_DMAC_3_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_3_S)
+
+#define UD_SEND_WQE_U32_8_DMAC_4_S 0
+#define UD_SEND_WQE_U32_8_DMAC_4_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_4_S)
+
+#define UD_SEND_WQE_U32_8_DMAC_5_S 8
+#define UD_SEND_WQE_U32_8_DMAC_5_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S)
+
+#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16
+#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \
+ (((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S)
+
+#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S 24
+#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M \
+ (((1UL << 6) - 1) << UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S)
+
+#define UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S 31
+
+#define UD_SEND_WQE_U32_16_DEST_QP_S 0
+#define UD_SEND_WQE_U32_16_DEST_QP_M \
+ (((1UL << 24) - 1) << UD_SEND_WQE_U32_16_DEST_QP_S)
+
+#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S 24
+#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S)
+
+#define UD_SEND_WQE_U32_36_FLOW_LABEL_S 0
+#define UD_SEND_WQE_U32_36_FLOW_LABEL_M \
+ (((1UL << 20) - 1) << UD_SEND_WQE_U32_36_FLOW_LABEL_S)
+
+#define UD_SEND_WQE_U32_36_PRIORITY_S 20
+#define UD_SEND_WQE_U32_36_PRIORITY_M \
+ (((1UL << 4) - 1) << UD_SEND_WQE_U32_36_PRIORITY_S)
+
+#define UD_SEND_WQE_U32_36_SGID_INDEX_S 24
+#define UD_SEND_WQE_U32_36_SGID_INDEX_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_36_SGID_INDEX_S)
+
+#define UD_SEND_WQE_U32_40_HOP_LIMIT_S 0
+#define UD_SEND_WQE_U32_40_HOP_LIMIT_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_HOP_LIMIT_S)
+
+#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S 8
+#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S)
+
+struct hns_roce_sqp_context {
+ u32 qp1c_bytes_4;
+ u32 sq_rq_bt_l;
+ u32 qp1c_bytes_12;
+ u32 qp1c_bytes_16;
+ u32 qp1c_bytes_20;
+ u32 qp1c_bytes_28;
+ u32 cur_rq_wqe_ba_l;
+ u32 qp1c_bytes_32;
+ u32 cur_sq_wqe_ba_l;
+ u32 qp1c_bytes_40;
+};
+
+#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8
+#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \
+ (((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S)
+
+#define QP1C_BYTES_4_RQ_WQE_SHIFT_S 12
+#define QP1C_BYTES_4_RQ_WQE_SHIFT_M \
+ (((1UL << 4) - 1) << QP1C_BYTES_4_RQ_WQE_SHIFT_S)
+
+#define QP1C_BYTES_4_PD_S 16
+#define QP1C_BYTES_4_PD_M (((1UL << 16) - 1) << QP1C_BYTES_4_PD_S)
+
+#define QP1C_BYTES_12_SQ_RQ_BT_H_S 0
+#define QP1C_BYTES_12_SQ_RQ_BT_H_M \
+ (((1UL << 17) - 1) << QP1C_BYTES_12_SQ_RQ_BT_H_S)
+
+#define QP1C_BYTES_16_RQ_HEAD_S 0
+#define QP1C_BYTES_16_RQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_16_RQ_HEAD_S)
+
+#define QP1C_BYTES_16_PORT_NUM_S 16
+#define QP1C_BYTES_16_PORT_NUM_M \
+ (((1UL << 3) - 1) << QP1C_BYTES_16_PORT_NUM_S)
+
+#define QP1C_BYTES_16_SIGNALING_TYPE_S 27
+#define QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S 28
+#define QP1C_BYTES_16_RQ_BA_FLG_S 29
+#define QP1C_BYTES_16_SQ_BA_FLG_S 30
+#define QP1C_BYTES_16_QP1_ERR_S 31
+
+#define QP1C_BYTES_20_SQ_HEAD_S 0
+#define QP1C_BYTES_20_SQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_20_SQ_HEAD_S)
+
+#define QP1C_BYTES_20_PKEY_IDX_S 16
+#define QP1C_BYTES_20_PKEY_IDX_M \
+ (((1UL << 16) - 1) << QP1C_BYTES_20_PKEY_IDX_S)
+
+#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S 0
+#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M \
+ (((1UL << 5) - 1) << QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S)
+
+#define QP1C_BYTES_28_RQ_CUR_IDX_S 16
+#define QP1C_BYTES_28_RQ_CUR_IDX_M \
+ (((1UL << 15) - 1) << QP1C_BYTES_28_RQ_CUR_IDX_S)
+
+#define QP1C_BYTES_32_TX_CQ_NUM_S 0
+#define QP1C_BYTES_32_TX_CQ_NUM_M \
+ (((1UL << 16) - 1) << QP1C_BYTES_32_TX_CQ_NUM_S)
+
+#define QP1C_BYTES_32_RX_CQ_NUM_S 16
+#define QP1C_BYTES_32_RX_CQ_NUM_M \
+ (((1UL << 16) - 1) << QP1C_BYTES_32_RX_CQ_NUM_S)
+
+#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S 0
+#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M \
+ (((1UL << 5) - 1) << QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S)
+
+#define QP1C_BYTES_40_SQ_CUR_IDX_S 16
+#define QP1C_BYTES_40_SQ_CUR_IDX_M \
+ (((1UL << 15) - 1) << QP1C_BYTES_40_SQ_CUR_IDX_S)
+
+#define HNS_ROCE_WQE_INLINE (1UL<<31)
+#define HNS_ROCE_WQE_SE (1UL<<30)
+
+#define HNS_ROCE_WQE_SGE_NUM_BIT 24
+#define HNS_ROCE_WQE_IMM (1UL<<23)
+#define HNS_ROCE_WQE_FENCE (1UL<<21)
+#define HNS_ROCE_WQE_CQ_NOTIFY (1UL<<20)
+
+#define HNS_ROCE_WQE_OPCODE_SEND (0<<16)
+#define HNS_ROCE_WQE_OPCODE_RDMA_READ (1<<16)
+#define HNS_ROCE_WQE_OPCODE_RDMA_WRITE (2<<16)
+#define HNS_ROCE_WQE_OPCODE_LOCAL_INV (4<<16)
+#define HNS_ROCE_WQE_OPCODE_UD_SEND (7<<16)
+#define HNS_ROCE_WQE_OPCODE_MASK (15<<16)
+
+struct hns_roce_qp_context {
+ u32 qpc_bytes_4;
+ u32 qpc_bytes_8;
+ u32 qpc_bytes_12;
+ u32 qpc_bytes_16;
+ u32 sq_rq_bt_l;
+ u32 qpc_bytes_24;
+ u32 irrl_ba_l;
+ u32 qpc_bytes_32;
+ u32 qpc_bytes_36;
+ u32 dmac_l;
+ u32 qpc_bytes_44;
+ u32 qpc_bytes_48;
+ u8 dgid[16];
+ u32 qpc_bytes_68;
+ u32 cur_rq_wqe_ba_l;
+ u32 qpc_bytes_76;
+ u32 rx_rnr_time;
+ u32 qpc_bytes_84;
+ u32 qpc_bytes_88;
+ union {
+ u32 rx_sge_len;
+ u32 dma_length;
+ };
+ union {
+ u32 rx_sge_num;
+ u32 rx_send_pktn;
+ u32 r_key;
+ };
+ u32 va_l;
+ u32 va_h;
+ u32 qpc_bytes_108;
+ u32 qpc_bytes_112;
+ u32 rx_cur_sq_wqe_ba_l;
+ u32 qpc_bytes_120;
+ u32 qpc_bytes_124;
+ u32 qpc_bytes_128;
+ u32 qpc_bytes_132;
+ u32 qpc_bytes_136;
+ u32 qpc_bytes_140;
+ u32 qpc_bytes_144;
+ u32 qpc_bytes_148;
+ union {
+ u32 rnr_retry;
+ u32 ack_time;
+ };
+ u32 qpc_bytes_156;
+ u32 pkt_use_len;
+ u32 qpc_bytes_164;
+ u32 qpc_bytes_168;
+ union {
+ u32 sge_use_len;
+ u32 pa_use_len;
+ };
+ u32 qpc_bytes_176;
+ u32 qpc_bytes_180;
+ u32 tx_cur_sq_wqe_ba_l;
+ u32 qpc_bytes_188;
+ u32 rvd21;
+};
+
+#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0
+#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S)
+
+#define QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S 3
+#define QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S 4
+#define QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S 5
+#define QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S 6
+#define QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S 7
+
+#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S 8
+#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M \
+ (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S)
+
+#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S 12
+#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M \
+ (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S)
+
+#define QP_CONTEXT_QPC_BYTES_4_PD_S 16
+#define QP_CONTEXT_QPC_BYTES_4_PD_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_4_PD_S)
+
+#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S 0
+#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S)
+
+#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S 16
+#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S)
+
+#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S 0
+#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S)
+
+#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S 16
+#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_S 0
+#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_16_QP_NUM_S)
+
+#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S 0
+#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M \
+ (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S 18
+#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M \
+ (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)
+
+#define QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S 23
+
+#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S 0
+#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M \
+ (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S 18
+#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S)
+
+#define QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S 20
+#define QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S 21
+#define QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S 22
+#define QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S 23
+
+#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S 24
+#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S)
+
+#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_S 0
+#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_36_DEST_QP_S)
+
+#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S 24
+#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_S 0
+#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S 16
+#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S)
+
+#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_S 24
+#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_HOPLMT_S)
+
+#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S 0
+#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M \
+ (((1UL << 20) - 1) << QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S)
+
+#define QP_CONTEXT_QPC_BYTES_48_TCLASS_S 20
+#define QP_CONTEXT_QPC_BYTES_48_TCLASS_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_48_TCLASS_S)
+
+#define QP_CONTEXT_QPC_BYTES_48_MTU_S 28
+#define QP_CONTEXT_QPC_BYTES_48_MTU_M \
+ (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_48_MTU_S)
+
+#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S 0
+#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S)
+
+#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S 16
+#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S 0
+#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M \
+ (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S 8
+#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S 24
+#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S)
+
+#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S 0
+#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S 24
+#define QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S 25
+
+#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S 26
+#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M \
+ (((1UL << 2) - 1) << \
+ QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S)
+
+#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S 29
+#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S)
+
+#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S 24
+#define QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S 25
+
+#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S 24
+#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S)
+
+#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S 0
+#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M \
+ (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S 0
+#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S 16
+#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S)
+
+#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S 0
+#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S 24
+
+#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S 25
+#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S)
+
+#define QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S 27
+
+#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S 24
+#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S)
+
+#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S 24
+#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S)
+
+#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S 0
+#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S 16
+#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S 31
+
+#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_S 0
+#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_144_QP_STATE_S)
+
+#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S 0
+#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S)
+
+#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S 2
+#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S)
+
+#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S 5
+#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S)
+
+#define QP_CONTEXT_QPC_BYTES_148_LSN_S 8
+#define QP_CONTEXT_QPC_BYTES_148_LSN_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_148_LSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S 0
+#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S 3
+#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M \
+ (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S 8
+#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S 11
+#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_SL_S 14
+#define QP_CONTEXT_QPC_BYTES_156_SL_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_SL_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S 16
+#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S 24
+#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S)
+
+#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S 24
+#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S)
+
+#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S 24
+#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S)
+
+#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S 26
+#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S)
+
+#define QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S 28
+#define QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S 29
+#define QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S 30
+
+#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S 0
+#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S 16
+#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S 0
+#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S)
+
+#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S 16
+#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S 0
+#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M \
+ (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S 8
+
+#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S 16
+#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
+
+struct hns_roce_rq_db {
+ u32 u32_4;
+ u32 u32_8;
+};
+
+#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0
+#define RQ_DOORBELL_U32_4_RQ_HEAD_M \
+ (((1UL << 15) - 1) << RQ_DOORBELL_U32_4_RQ_HEAD_S)
+
+#define RQ_DOORBELL_U32_8_QPN_S 0
+#define RQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << RQ_DOORBELL_U32_8_QPN_S)
+
+#define RQ_DOORBELL_U32_8_CMD_S 28
+#define RQ_DOORBELL_U32_8_CMD_M (((1UL << 3) - 1) << RQ_DOORBELL_U32_8_CMD_S)
+
+#define RQ_DOORBELL_U32_8_HW_SYNC_S 31
+
+struct hns_roce_sq_db {
+ u32 u32_4;
+ u32 u32_8;
+};
+
+#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0
+#define SQ_DOORBELL_U32_4_SQ_HEAD_M \
+ (((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S)
+
+#define SQ_DOORBELL_U32_4_PORT_S 18
+#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S)
+
+#define SQ_DOORBELL_U32_8_QPN_S 0
+#define SQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << SQ_DOORBELL_U32_8_QPN_S)
+
+#define SQ_DOORBELL_HW_SYNC_S 31
+
+struct hns_roce_ext_db {
+ int esdb_dep;
+ int eodb_dep;
+ struct hns_roce_buf_list *sdb_buf_list;
+ struct hns_roce_buf_list *odb_buf_list;
+};
+
+struct hns_roce_db_table {
+ int sdb_ext_mod;
+ int odb_ext_mod;
+ struct hns_roce_ext_db *ext_db;
+};
+
+struct hns_roce_v1_priv {
+ struct hns_roce_db_table db_table;
+ struct hns_roce_raq_table raq_table;
+};
+
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
+#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
new file mode 100644
index 000000000000..f64f0dde9a88
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -0,0 +1,1142 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/acpi.h>
+#include <linux/of_platform.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_user.h"
+#include "hns_roce_hem.h"
+
+/**
+ * hns_roce_addrconf_ifid_eui48 - Get default gid.
+ * @eui: eui.
+ * @vlan_id: gid
+ * @dev: net device
+ * Description:
+ * MAC convert to GID
+ * gid[0..7] = fe80 0000 0000 0000
+ * gid[8] = mac[0] ^ 2
+ * gid[9] = mac[1]
+ * gid[10] = mac[2]
+ * gid[11] = ff (VLAN ID high byte (4 MS bits))
+ * gid[12] = fe (VLAN ID low byte)
+ * gid[13] = mac[3]
+ * gid[14] = mac[4]
+ * gid[15] = mac[5]
+ */
+static void hns_roce_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
+ struct net_device *dev)
+{
+ memcpy(eui, dev->dev_addr, 3);
+ memcpy(eui + 5, dev->dev_addr + 3, 3);
+ if (vlan_id < 0x1000) {
+ eui[3] = vlan_id >> 8;
+ eui[4] = vlan_id & 0xff;
+ } else {
+ eui[3] = 0xff;
+ eui[4] = 0xfe;
+ }
+ eui[0] ^= 2;
+}
+
+static void hns_roce_make_default_gid(struct net_device *dev, union ib_gid *gid)
+{
+ memset(gid, 0, sizeof(*gid));
+ gid->raw[0] = 0xFE;
+ gid->raw[1] = 0x80;
+ hns_roce_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
+}
+
+/**
+ * hns_get_gid_index - Get gid index.
+ * @hr_dev: pointer to structure hns_roce_dev.
+ * @port: port, value range: 0 ~ MAX
+ * @gid_index: gid_index, value range: 0 ~ MAX
+ * Description:
+ * N ports shared gids, allocation method as follow:
+ * GID[0][0], GID[1][0],.....GID[N - 1][0],
+ * GID[0][0], GID[1][0],.....GID[N - 1][0],
+ * And so on
+ */
+int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
+{
+ return gid_index * hr_dev->caps.num_ports + port;
+}
+
+static int hns_roce_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ union ib_gid *gid)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u8 gid_idx = 0;
+
+ if (gid_index >= hr_dev->caps.gid_table_len[port]) {
+ dev_err(dev, "gid_index %d illegal, port %d gid range: 0~%d\n",
+ gid_index, port, hr_dev->caps.gid_table_len[port] - 1);
+ return -EINVAL;
+ }
+
+ gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
+
+ if (!memcmp(gid, &hr_dev->iboe.gid_table[gid_idx], sizeof(*gid)))
+ return -EINVAL;
+
+ memcpy(&hr_dev->iboe.gid_table[gid_idx], gid, sizeof(*gid));
+
+ hr_dev->hw->set_gid(hr_dev, port, gid_index, gid);
+
+ return 0;
+}
+
+static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
+{
+ u8 phy_port;
+ u32 i = 0;
+
+ if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM))
+ return;
+
+ for (i = 0; i < MAC_ADDR_OCTET_NUM; i++)
+ hr_dev->dev_addr[port][i] = addr[i];
+
+ phy_port = hr_dev->iboe.phy_port[port];
+ hr_dev->hw->set_mac(hr_dev, phy_port, addr);
+}
+
+static void hns_roce_set_mtu(struct hns_roce_dev *hr_dev, u8 port, int mtu)
+{
+ u8 phy_port = hr_dev->iboe.phy_port[port];
+ enum ib_mtu tmp;
+
+ tmp = iboe_get_mtu(mtu);
+ if (!tmp)
+ tmp = IB_MTU_256;
+
+ hr_dev->hw->set_mtu(hr_dev, phy_port, tmp);
+}
+
+static void hns_roce_update_gids(struct hns_roce_dev *hr_dev, int port)
+{
+ struct ib_event event;
+
+ /* Refresh gid in ib_cache */
+ event.device = &hr_dev->ib_dev;
+ event.element.port_num = port + 1;
+ event.event = IB_EVENT_GID_CHANGE;
+ ib_dispatch_event(&event);
+}
+
+static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
+ unsigned long event)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct net_device *netdev;
+ unsigned long flags;
+ union ib_gid gid;
+ int ret = 0;
+
+ netdev = hr_dev->iboe.netdevs[port];
+ if (!netdev) {
+ dev_err(dev, "port(%d) can't find netdev\n", port);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ case NETDEV_REGISTER:
+ case NETDEV_CHANGEADDR:
+ hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
+ hns_roce_make_default_gid(netdev, &gid);
+ ret = hns_roce_set_gid(hr_dev, port, 0, &gid);
+ if (!ret)
+ hns_roce_update_gids(hr_dev, port);
+ break;
+ case NETDEV_DOWN:
+ /*
+ * In v1 engine, only support all ports closed together.
+ */
+ break;
+ default:
+ dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
+ break;
+ }
+
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+ return ret;
+}
+
+static int hns_roce_netdev_event(struct notifier_block *self,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct hns_roce_ib_iboe *iboe = NULL;
+ struct hns_roce_dev *hr_dev = NULL;
+ u8 port = 0;
+ int ret = 0;
+
+ hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
+ iboe = &hr_dev->iboe;
+
+ for (port = 0; port < hr_dev->caps.num_ports; port++) {
+ if (dev == iboe->netdevs[port]) {
+ ret = handle_en_event(hr_dev, port, event);
+ if (ret)
+ return NOTIFY_DONE;
+ break;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void hns_roce_addr_event(int event, struct net_device *event_netdev,
+ struct hns_roce_dev *hr_dev, union ib_gid *gid)
+{
+ struct hns_roce_ib_iboe *iboe = NULL;
+ int gid_table_len = 0;
+ unsigned long flags;
+ union ib_gid zgid;
+ u8 gid_idx = 0;
+ u8 port = 0;
+ int i = 0;
+ int free;
+ struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
+ rdma_vlan_dev_real_dev(event_netdev) :
+ event_netdev;
+
+ if (event != NETDEV_UP && event != NETDEV_DOWN)
+ return;
+
+ iboe = &hr_dev->iboe;
+ while (port < hr_dev->caps.num_ports) {
+ if (real_dev == iboe->netdevs[port])
+ break;
+ port++;
+ }
+
+ if (port >= hr_dev->caps.num_ports) {
+ dev_dbg(&hr_dev->pdev->dev, "can't find netdev\n");
+ return;
+ }
+
+ memset(zgid.raw, 0, sizeof(zgid.raw));
+ free = -1;
+ gid_table_len = hr_dev->caps.gid_table_len[port];
+
+ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+
+ for (i = 0; i < gid_table_len; i++) {
+ gid_idx = hns_get_gid_index(hr_dev, port, i);
+ if (!memcmp(gid->raw, iboe->gid_table[gid_idx].raw,
+ sizeof(gid->raw)))
+ break;
+ if (free < 0 && !memcmp(zgid.raw,
+ iboe->gid_table[gid_idx].raw, sizeof(zgid.raw)))
+ free = i;
+ }
+
+ if (i >= gid_table_len) {
+ if (free < 0) {
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+ dev_dbg(&hr_dev->pdev->dev,
+ "gid_index overflow, port(%d)\n", port);
+ return;
+ }
+ if (!hns_roce_set_gid(hr_dev, port, free, gid))
+ hns_roce_update_gids(hr_dev, port);
+ } else if (event == NETDEV_DOWN) {
+ if (!hns_roce_set_gid(hr_dev, port, i, &zgid))
+ hns_roce_update_gids(hr_dev, port);
+ }
+
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+}
+
+static int hns_roce_inet_event(struct notifier_block *self, unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct hns_roce_dev *hr_dev;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ union ib_gid gid;
+
+ ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
+
+ hr_dev = container_of(self, struct hns_roce_dev, iboe.nb_inet);
+
+ hns_roce_addr_event(event, dev, hr_dev, &gid);
+
+ return NOTIFY_DONE;
+}
+
+static int hns_roce_setup_mtu_gids(struct hns_roce_dev *hr_dev)
+{
+ struct in_ifaddr *ifa_list = NULL;
+ union ib_gid gid = {{0} };
+ u32 ipaddr = 0;
+ int index = 0;
+ int ret = 0;
+ u8 i = 0;
+
+ for (i = 0; i < hr_dev->caps.num_ports; i++) {
+ hns_roce_set_mtu(hr_dev, i,
+ ib_mtu_enum_to_int(hr_dev->caps.max_mtu));
+ hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
+
+ if (hr_dev->iboe.netdevs[i]->ip_ptr) {
+ ifa_list = hr_dev->iboe.netdevs[i]->ip_ptr->ifa_list;
+ index = 1;
+ while (ifa_list) {
+ ipaddr = ifa_list->ifa_address;
+ ipv6_addr_set_v4mapped(ipaddr,
+ (struct in6_addr *)&gid);
+ ret = hns_roce_set_gid(hr_dev, i, index, &gid);
+ if (ret)
+ break;
+ index++;
+ ifa_list = ifa_list->ifa_next;
+ }
+ hns_roce_update_gids(hr_dev, i);
+ }
+ }
+
+ return ret;
+}
+
+static int hns_roce_query_device(struct ib_device *ib_dev,
+ struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+
+ memset(props, 0, sizeof(*props));
+
+ props->sys_image_guid = hr_dev->sys_image_guid;
+ props->max_mr_size = (u64)(~(0ULL));
+ props->page_size_cap = hr_dev->caps.page_size_cap;
+ props->vendor_id = hr_dev->vendor_id;
+ props->vendor_part_id = hr_dev->vendor_part_id;
+ props->hw_ver = hr_dev->hw_rev;
+ props->max_qp = hr_dev->caps.num_qps;
+ props->max_qp_wr = hr_dev->caps.max_wqes;
+ props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
+ IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_LOCAL_DMA_LKEY;
+ props->max_sge = hr_dev->caps.max_sq_sg;
+ props->max_sge_rd = 1;
+ props->max_cq = hr_dev->caps.num_cqs;
+ props->max_cqe = hr_dev->caps.max_cqes;
+ props->max_mr = hr_dev->caps.num_mtpts;
+ props->max_pd = hr_dev->caps.num_pds;
+ props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
+ props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->max_pkeys = 1;
+ props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
+
+ return 0;
+}
+
+static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
+ struct ib_port_attr *props)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct net_device *net_dev;
+ unsigned long flags;
+ enum ib_mtu mtu;
+ u8 port;
+
+ assert(port_num > 0);
+ port = port_num - 1;
+
+ memset(props, 0, sizeof(*props));
+
+ props->max_mtu = hr_dev->caps.max_mtu;
+ props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP |
+ IB_PORT_BOOT_MGMT_SUP;
+ props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
+ props->pkey_tbl_len = 1;
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = 1;
+
+ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+
+ net_dev = hr_dev->iboe.netdevs[port];
+ if (!net_dev) {
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+ dev_err(dev, "find netdev %d failed!\r\n", port);
+ return -EINVAL;
+ }
+
+ mtu = iboe_get_mtu(net_dev->mtu);
+ props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
+ props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ props->phys_state = (props->state == IB_PORT_ACTIVE) ? 5 : 3;
+
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+
+ return 0;
+}
+
+static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
+ u8 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
+ union ib_gid *gid)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct device *dev = &hr_dev->pdev->dev;
+ u8 gid_idx = 0;
+ u8 port;
+
+ if (port_num < 1 || port_num > hr_dev->caps.num_ports ||
+ index >= hr_dev->caps.gid_table_len[port_num - 1]) {
+ dev_err(dev,
+ "port_num %d index %d illegal! correct range: port_num 1~%d index 0~%d!\n",
+ port_num, index, hr_dev->caps.num_ports,
+ hr_dev->caps.gid_table_len[port_num - 1] - 1);
+ return -EINVAL;
+ }
+
+ port = port_num - 1;
+ gid_idx = hns_get_gid_index(hr_dev, port, index);
+ if (gid_idx >= HNS_ROCE_MAX_GID_NUM) {
+ dev_err(dev, "port_num %d index %d illegal! total gid num %d!\n",
+ port_num, index, HNS_ROCE_MAX_GID_NUM);
+ return -EINVAL;
+ }
+
+ memcpy(gid->raw, hr_dev->iboe.gid_table[gid_idx].raw,
+ HNS_ROCE_GID_SIZE);
+
+ return 0;
+}
+
+static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
+ u16 *pkey)
+{
+ *pkey = PKEY_ID;
+
+ return 0;
+}
+
+static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
+ struct ib_device_modify *props)
+{
+ unsigned long flags;
+
+ if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
+ memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
+ spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
+ }
+
+ return 0;
+}
+
+static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
+ struct ib_port_modify *props)
+{
+ return 0;
+}
+
+static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
+ struct ib_udata *udata)
+{
+ int ret = 0;
+ struct hns_roce_ucontext *context;
+ struct hns_roce_ib_alloc_ucontext_resp resp;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+
+ resp.qp_tab_size = hr_dev->caps.num_qps;
+
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hns_roce_uar_alloc(hr_dev, &context->uar);
+ if (ret)
+ goto error_fail_uar_alloc;
+
+ ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (ret)
+ goto error_fail_copy_to_udata;
+
+ return &context->ibucontext;
+
+error_fail_copy_to_udata:
+ hns_roce_uar_free(hr_dev, &context->uar);
+
+error_fail_uar_alloc:
+ kfree(context);
+
+ return ERR_PTR(ret);
+}
+
+static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
+
+ hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
+ kfree(context);
+
+ return 0;
+}
+
+static int hns_roce_mmap(struct ib_ucontext *context,
+ struct vm_area_struct *vma)
+{
+ if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
+ return -EINVAL;
+
+ if (vma->vm_pgoff == 0) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ to_hr_ucontext(context)->uar.pfn,
+ PAGE_SIZE, vma->vm_page_prot))
+ return -EAGAIN;
+
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int ret;
+
+ ret = hns_roce_query_port(ib_dev, port_num, &attr);
+ if (ret)
+ return ret;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
+
+ unregister_inetaddr_notifier(&iboe->nb_inet);
+ unregister_netdevice_notifier(&iboe->nb);
+ ib_unregister_device(&hr_dev->ib_dev);
+}
+
+static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ struct hns_roce_ib_iboe *iboe = NULL;
+ struct ib_device *ib_dev = NULL;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ iboe = &hr_dev->iboe;
+
+ ib_dev = &hr_dev->ib_dev;
+ strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
+
+ ib_dev->owner = THIS_MODULE;
+ ib_dev->node_type = RDMA_NODE_IB_CA;
+ ib_dev->dma_device = dev;
+
+ ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
+ ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
+ ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
+ ib_dev->uverbs_abi_ver = 1;
+ ib_dev->uverbs_cmd_mask =
+ (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ULL << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ULL << IB_USER_VERBS_CMD_REG_MR) |
+ (1ULL << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ULL << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ULL << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ULL << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ULL << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
+
+ /* HCA||device||port */
+ ib_dev->modify_device = hns_roce_modify_device;
+ ib_dev->query_device = hns_roce_query_device;
+ ib_dev->query_port = hns_roce_query_port;
+ ib_dev->modify_port = hns_roce_modify_port;
+ ib_dev->get_link_layer = hns_roce_get_link_layer;
+ ib_dev->query_gid = hns_roce_query_gid;
+ ib_dev->query_pkey = hns_roce_query_pkey;
+ ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
+ ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext;
+ ib_dev->mmap = hns_roce_mmap;
+
+ /* PD */
+ ib_dev->alloc_pd = hns_roce_alloc_pd;
+ ib_dev->dealloc_pd = hns_roce_dealloc_pd;
+
+ /* AH */
+ ib_dev->create_ah = hns_roce_create_ah;
+ ib_dev->query_ah = hns_roce_query_ah;
+ ib_dev->destroy_ah = hns_roce_destroy_ah;
+
+ /* QP */
+ ib_dev->create_qp = hns_roce_create_qp;
+ ib_dev->modify_qp = hns_roce_modify_qp;
+ ib_dev->query_qp = hr_dev->hw->query_qp;
+ ib_dev->destroy_qp = hr_dev->hw->destroy_qp;
+ ib_dev->post_send = hr_dev->hw->post_send;
+ ib_dev->post_recv = hr_dev->hw->post_recv;
+
+ /* CQ */
+ ib_dev->create_cq = hns_roce_ib_create_cq;
+ ib_dev->destroy_cq = hns_roce_ib_destroy_cq;
+ ib_dev->req_notify_cq = hr_dev->hw->req_notify_cq;
+ ib_dev->poll_cq = hr_dev->hw->poll_cq;
+
+ /* MR */
+ ib_dev->get_dma_mr = hns_roce_get_dma_mr;
+ ib_dev->reg_user_mr = hns_roce_reg_user_mr;
+ ib_dev->dereg_mr = hns_roce_dereg_mr;
+
+ /* OTHERS */
+ ib_dev->get_port_immutable = hns_roce_port_immutable;
+
+ ret = ib_register_device(ib_dev, NULL);
+ if (ret) {
+ dev_err(dev, "ib_register_device failed!\n");
+ return ret;
+ }
+
+ ret = hns_roce_setup_mtu_gids(hr_dev);
+ if (ret) {
+ dev_err(dev, "roce_setup_mtu_gids failed!\n");
+ goto error_failed_setup_mtu_gids;
+ }
+
+ spin_lock_init(&iboe->lock);
+
+ iboe->nb.notifier_call = hns_roce_netdev_event;
+ ret = register_netdevice_notifier(&iboe->nb);
+ if (ret) {
+ dev_err(dev, "register_netdevice_notifier failed!\n");
+ goto error_failed_setup_mtu_gids;
+ }
+
+ iboe->nb_inet.notifier_call = hns_roce_inet_event;
+ ret = register_inetaddr_notifier(&iboe->nb_inet);
+ if (ret) {
+ dev_err(dev, "register inet addr notifier failed!\n");
+ goto error_failed_register_inetaddr_notifier;
+ }
+
+ return 0;
+
+error_failed_register_inetaddr_notifier:
+ unregister_netdevice_notifier(&iboe->nb);
+
+error_failed_setup_mtu_gids:
+ ib_unregister_device(ib_dev);
+
+ return ret;
+}
+
+static const struct of_device_id hns_roce_of_match[] = {
+ { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hns_roce_of_match);
+
+static const struct acpi_device_id hns_roce_acpi_match[] = {
+ { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
+
+static int hns_roce_node_match(struct device *dev, void *fwnode)
+{
+ return dev->fwnode == fwnode;
+}
+
+static struct
+platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ /* get the 'device'corresponding to matching 'fwnode' */
+ dev = bus_find_device(&platform_bus_type, NULL,
+ fwnode, hns_roce_node_match);
+ /* get the platform device */
+ return dev ? to_platform_device(dev) : NULL;
+}
+
+static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
+{
+ int i;
+ int ret;
+ u8 phy_port;
+ int port_cnt = 0;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct device_node *net_node;
+ struct net_device *netdev = NULL;
+ struct platform_device *pdev = NULL;
+ struct resource *res;
+
+ /* check if we are compatible with the underlying SoC */
+ if (dev_of_node(dev)) {
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(hns_roce_of_match, dev->of_node);
+ if (!of_id) {
+ dev_err(dev, "device is not compatible!\n");
+ return -ENXIO;
+ }
+ hr_dev->hw = (struct hns_roce_hw *)of_id->data;
+ if (!hr_dev->hw) {
+ dev_err(dev, "couldn't get H/W specific DT data!\n");
+ return -ENXIO;
+ }
+ } else if (is_acpi_device_node(dev->fwnode)) {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
+ if (!acpi_id) {
+ dev_err(dev, "device is not compatible!\n");
+ return -ENXIO;
+ }
+ hr_dev->hw = (struct hns_roce_hw *) acpi_id->driver_data;
+ if (!hr_dev->hw) {
+ dev_err(dev, "couldn't get H/W specific ACPI data!\n");
+ return -ENXIO;
+ }
+ } else {
+ dev_err(dev, "can't read compatibility data from DT or ACPI\n");
+ return -ENXIO;
+ }
+
+ /* get the mapped register base address */
+ res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "memory resource not found!\n");
+ return -EINVAL;
+ }
+ hr_dev->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hr_dev->reg_base))
+ return PTR_ERR(hr_dev->reg_base);
+
+ /* get the RoCE associated ethernet ports or netdevices */
+ for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
+ if (dev_of_node(dev)) {
+ net_node = of_parse_phandle(dev->of_node, "eth-handle",
+ i);
+ if (!net_node)
+ continue;
+ pdev = of_find_device_by_node(net_node);
+ } else if (is_acpi_device_node(dev->fwnode)) {
+ struct acpi_reference_args args;
+ struct fwnode_handle *fwnode;
+
+ ret = acpi_node_get_property_reference(dev->fwnode,
+ "eth-handle",
+ i, &args);
+ if (ret)
+ continue;
+ fwnode = acpi_fwnode_handle(args.adev);
+ pdev = hns_roce_find_pdev(fwnode);
+ } else {
+ dev_err(dev, "cannot read data from DT or ACPI\n");
+ return -ENXIO;
+ }
+
+ if (pdev) {
+ netdev = platform_get_drvdata(pdev);
+ phy_port = (u8)i;
+ if (netdev) {
+ hr_dev->iboe.netdevs[port_cnt] = netdev;
+ hr_dev->iboe.phy_port[port_cnt] = phy_port;
+ } else {
+ dev_err(dev, "no netdev found with pdev %s\n",
+ pdev->name);
+ return -ENODEV;
+ }
+ port_cnt++;
+ }
+ }
+
+ if (port_cnt == 0) {
+ dev_err(dev, "unable to get eth-handle for available ports!\n");
+ return -EINVAL;
+ }
+
+ hr_dev->caps.num_ports = port_cnt;
+
+ /* cmd issue mode: 0 is poll, 1 is event */
+ hr_dev->cmd_mod = 1;
+ hr_dev->loop_idc = 0;
+
+ /* read the interrupt names from the DT or ACPI */
+ ret = device_property_read_string_array(dev, "interrupt-names",
+ hr_dev->irq_names,
+ HNS_ROCE_MAX_IRQ_NUM);
+ if (ret < 0) {
+ dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
+ return ret;
+ }
+
+ /* fetch the interrupt numbers */
+ for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
+ hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
+ if (hr_dev->irq[i] <= 0) {
+ dev_err(dev, "platform get of irq[=%d] failed!\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
+ HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
+ hr_dev->caps.num_mtt_segs, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init MTT context memory, aborting.\n");
+ return ret;
+ }
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
+ HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
+ hr_dev->caps.num_mtpts, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
+ goto err_unmap_mtt;
+ }
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
+ HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz,
+ hr_dev->caps.num_qps, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init QP context memory, aborting.\n");
+ goto err_unmap_dmpt;
+ }
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
+ HEM_TYPE_IRRL,
+ hr_dev->caps.irrl_entry_sz *
+ hr_dev->caps.max_qp_init_rdma,
+ hr_dev->caps.num_qps, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init irrl_table memory, aborting.\n");
+ goto err_unmap_qp;
+ }
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
+ HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
+ hr_dev->caps.num_cqs, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init CQ context memory, aborting.\n");
+ goto err_unmap_irrl;
+ }
+
+ return 0;
+
+err_unmap_irrl:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
+
+err_unmap_qp:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
+
+err_unmap_dmpt:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
+
+err_unmap_mtt:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+
+ return ret;
+}
+
+/**
+* hns_roce_setup_hca - setup host channel adapter
+* @hr_dev: pointer to hns roce device
+* Return : int
+*/
+static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ spin_lock_init(&hr_dev->sm_lock);
+ spin_lock_init(&hr_dev->cq_db_lock);
+ spin_lock_init(&hr_dev->bt_cmd_lock);
+
+ ret = hns_roce_init_uar_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to initialize uar table. aborting\n");
+ return ret;
+ }
+
+ ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
+ if (ret) {
+ dev_err(dev, "Failed to allocate priv_uar.\n");
+ goto err_uar_table_free;
+ }
+
+ ret = hns_roce_init_pd_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to init protected domain table.\n");
+ goto err_uar_alloc_free;
+ }
+
+ ret = hns_roce_init_mr_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to init memory region table.\n");
+ goto err_pd_table_free;
+ }
+
+ ret = hns_roce_init_cq_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to init completion queue table.\n");
+ goto err_mr_table_free;
+ }
+
+ ret = hns_roce_init_qp_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to init queue pair table.\n");
+ goto err_cq_table_free;
+ }
+
+ return 0;
+
+err_cq_table_free:
+ hns_roce_cleanup_cq_table(hr_dev);
+
+err_mr_table_free:
+ hns_roce_cleanup_mr_table(hr_dev);
+
+err_pd_table_free:
+ hns_roce_cleanup_pd_table(hr_dev);
+
+err_uar_alloc_free:
+ hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
+
+err_uar_table_free:
+ hns_roce_cleanup_uar_table(hr_dev);
+ return ret;
+}
+
+/**
+* hns_roce_probe - RoCE driver entrance
+* @pdev: pointer to platform device
+* Return : int
+*
+*/
+static int hns_roce_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct hns_roce_dev *hr_dev;
+ struct device *dev = &pdev->dev;
+
+ hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
+ if (!hr_dev)
+ return -ENOMEM;
+
+ memset((u8 *)hr_dev + sizeof(struct ib_device), 0,
+ sizeof(struct hns_roce_dev) - sizeof(struct ib_device));
+
+ hr_dev->pdev = pdev;
+ platform_set_drvdata(pdev, hr_dev);
+
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
+ dev_err(dev, "Not usable DMA addressing mode\n");
+ ret = -EIO;
+ goto error_failed_get_cfg;
+ }
+
+ ret = hns_roce_get_cfg(hr_dev);
+ if (ret) {
+ dev_err(dev, "Get Configuration failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ ret = hr_dev->hw->reset(hr_dev, true);
+ if (ret) {
+ dev_err(dev, "Reset RoCE engine failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ hr_dev->hw->hw_profile(hr_dev);
+
+ ret = hns_roce_cmd_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "cmd init failed!\n");
+ goto error_failed_cmd_init;
+ }
+
+ ret = hns_roce_init_eq_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "eq init failed!\n");
+ goto error_failed_eq_table;
+ }
+
+ if (hr_dev->cmd_mod) {
+ ret = hns_roce_cmd_use_events(hr_dev);
+ if (ret) {
+ dev_err(dev, "Switch to event-driven cmd failed!\n");
+ goto error_failed_use_event;
+ }
+ }
+
+ ret = hns_roce_init_hem(hr_dev);
+ if (ret) {
+ dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
+ goto error_failed_init_hem;
+ }
+
+ ret = hns_roce_setup_hca(hr_dev);
+ if (ret) {
+ dev_err(dev, "setup hca failed!\n");
+ goto error_failed_setup_hca;
+ }
+
+ ret = hr_dev->hw->hw_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "hw_init failed!\n");
+ goto error_failed_engine_init;
+ }
+
+ ret = hns_roce_register_device(hr_dev);
+ if (ret)
+ goto error_failed_register_device;
+
+ return 0;
+
+error_failed_register_device:
+ hr_dev->hw->hw_exit(hr_dev);
+
+error_failed_engine_init:
+ hns_roce_cleanup_bitmap(hr_dev);
+
+error_failed_setup_hca:
+ hns_roce_cleanup_hem(hr_dev);
+
+error_failed_init_hem:
+ if (hr_dev->cmd_mod)
+ hns_roce_cmd_use_polling(hr_dev);
+
+error_failed_use_event:
+ hns_roce_cleanup_eq_table(hr_dev);
+
+error_failed_eq_table:
+ hns_roce_cmd_cleanup(hr_dev);
+
+error_failed_cmd_init:
+ ret = hr_dev->hw->reset(hr_dev, false);
+ if (ret)
+ dev_err(&hr_dev->pdev->dev, "roce_engine reset fail\n");
+
+error_failed_get_cfg:
+ ib_dealloc_device(&hr_dev->ib_dev);
+
+ return ret;
+}
+
+/**
+* hns_roce_remove - remove RoCE device
+* @pdev: pointer to platform device
+*/
+static int hns_roce_remove(struct platform_device *pdev)
+{
+ struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
+
+ hns_roce_unregister_device(hr_dev);
+ hr_dev->hw->hw_exit(hr_dev);
+ hns_roce_cleanup_bitmap(hr_dev);
+ hns_roce_cleanup_hem(hr_dev);
+
+ if (hr_dev->cmd_mod)
+ hns_roce_cmd_use_polling(hr_dev);
+
+ hns_roce_cleanup_eq_table(hr_dev);
+ hns_roce_cmd_cleanup(hr_dev);
+ hr_dev->hw->reset(hr_dev, false);
+
+ ib_dealloc_device(&hr_dev->ib_dev);
+
+ return 0;
+}
+
+static struct platform_driver hns_roce_driver = {
+ .probe = hns_roce_probe,
+ .remove = hns_roce_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hns_roce_of_match,
+ .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
+ },
+};
+
+module_platform_driver(hns_roce_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
+MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
+MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
+MODULE_DESCRIPTION("HNS RoCE Driver");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
new file mode 100644
index 000000000000..59f5e2be046b
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -0,0 +1,614 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include <rdma/ib_umem.h>
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+
+static u32 hw_index_to_key(unsigned long ind)
+{
+ return (u32)(ind >> 24) | (ind << 8);
+}
+
+static unsigned long key_to_hw_index(u32 key)
+{
+ return (key << 24) | (key >> 8);
+}
+
+static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
+{
+ return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
+ HNS_ROCE_CMD_SW2HW_MPT,
+ HNS_ROCE_CMD_TIME_CLASS_B);
+}
+
+static int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
+{
+ return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
+ mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
+ HNS_ROCE_CMD_TIME_CLASS_B);
+}
+
+static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
+ unsigned long *seg)
+{
+ int o;
+ u32 m;
+
+ spin_lock(&buddy->lock);
+
+ for (o = order; o <= buddy->max_order; ++o) {
+ if (buddy->num_free[o]) {
+ m = 1 << (buddy->max_order - o);
+ *seg = find_first_bit(buddy->bits[o], m);
+ if (*seg < m)
+ goto found;
+ }
+ }
+ spin_unlock(&buddy->lock);
+ return -1;
+
+ found:
+ clear_bit(*seg, buddy->bits[o]);
+ --buddy->num_free[o];
+
+ while (o > order) {
+ --o;
+ *seg <<= 1;
+ set_bit(*seg ^ 1, buddy->bits[o]);
+ ++buddy->num_free[o];
+ }
+
+ spin_unlock(&buddy->lock);
+
+ *seg <<= order;
+ return 0;
+}
+
+static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
+ int order)
+{
+ seg >>= order;
+
+ spin_lock(&buddy->lock);
+
+ while (test_bit(seg ^ 1, buddy->bits[order])) {
+ clear_bit(seg ^ 1, buddy->bits[order]);
+ --buddy->num_free[order];
+ seg >>= 1;
+ ++order;
+ }
+
+ set_bit(seg, buddy->bits[order]);
+ ++buddy->num_free[order];
+
+ spin_unlock(&buddy->lock);
+}
+
+static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
+{
+ int i, s;
+
+ buddy->max_order = max_order;
+ spin_lock_init(&buddy->lock);
+
+ buddy->bits = kzalloc((buddy->max_order + 1) * sizeof(long *),
+ GFP_KERNEL);
+ buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof(int *),
+ GFP_KERNEL);
+ if (!buddy->bits || !buddy->num_free)
+ goto err_out;
+
+ for (i = 0; i <= buddy->max_order; ++i) {
+ s = BITS_TO_LONGS(1 << (buddy->max_order - i));
+ buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
+ if (!buddy->bits[i])
+ goto err_out_free;
+
+ bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
+ }
+
+ set_bit(0, buddy->bits[buddy->max_order]);
+ buddy->num_free[buddy->max_order] = 1;
+
+ return 0;
+
+err_out_free:
+ for (i = 0; i <= buddy->max_order; ++i)
+ kfree(buddy->bits[i]);
+
+err_out:
+ kfree(buddy->bits);
+ kfree(buddy->num_free);
+ return -ENOMEM;
+}
+
+static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
+{
+ int i;
+
+ for (i = 0; i <= buddy->max_order; ++i)
+ kfree(buddy->bits[i]);
+
+ kfree(buddy->bits);
+ kfree(buddy->num_free);
+}
+
+static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
+ unsigned long *seg)
+{
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+ int ret = 0;
+
+ ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
+ if (ret == -1)
+ return -1;
+
+ if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
+ *seg + (1 << order) - 1)) {
+ hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
+ return -1;
+ }
+
+ return 0;
+}
+
+int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
+ struct hns_roce_mtt *mtt)
+{
+ int ret = 0;
+ int i;
+
+ /* Page num is zero, correspond to DMA memory register */
+ if (!npages) {
+ mtt->order = -1;
+ mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
+ return 0;
+ }
+
+ /* Note: if page_shift is zero, FAST memory regsiter */
+ mtt->page_shift = page_shift;
+
+ /* Compute MTT entry necessary */
+ for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
+ i <<= 1)
+ ++mtt->order;
+
+ /* Allocate MTT entry */
+ ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
+ if (ret == -1)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
+{
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+
+ if (mtt->order < 0)
+ return;
+
+ hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
+ hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
+ mtt->first_seg + (1 << mtt->order) - 1);
+}
+
+static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
+ u64 size, u32 access, int npages,
+ struct hns_roce_mr *mr)
+{
+ unsigned long index = 0;
+ int ret = 0;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ /* Allocate a key for mr from mr_table */
+ ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
+ if (ret == -1)
+ return -ENOMEM;
+
+ mr->iova = iova; /* MR va starting addr */
+ mr->size = size; /* MR addr range */
+ mr->pd = pd; /* MR num */
+ mr->access = access; /* MR access permit */
+ mr->enabled = 0; /* MR active status */
+ mr->key = hw_index_to_key(index); /* MR key */
+
+ if (size == ~0ull) {
+ mr->type = MR_TYPE_DMA;
+ mr->pbl_buf = NULL;
+ mr->pbl_dma_addr = 0;
+ } else {
+ mr->type = MR_TYPE_MR;
+ mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+ &(mr->pbl_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int npages = 0;
+ int ret;
+
+ if (mr->enabled) {
+ ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
+ & (hr_dev->caps.num_mtpts - 1));
+ if (ret)
+ dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+ }
+
+ if (mr->size != ~0ULL) {
+ npages = ib_umem_page_count(mr->umem);
+ dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
+ mr->pbl_dma_addr);
+ }
+
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
+ key_to_hw_index(mr->key));
+}
+
+static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr)
+{
+ int ret;
+ unsigned long mtpt_idx = key_to_hw_index(mr->key);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+
+ /* Prepare HEM entry memory */
+ ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
+ if (ret)
+ return ret;
+
+ /* Allocate mailbox memory */
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ret = PTR_ERR(mailbox);
+ goto err_table;
+ }
+
+ ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ if (ret) {
+ dev_err(dev, "Write mtpt fail!\n");
+ goto err_page;
+ }
+
+ ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ if (ret) {
+ dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ goto err_page;
+ }
+
+ mr->enabled = 1;
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+err_page:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+err_table:
+ hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
+ return ret;
+}
+
+static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, u32 start_index,
+ u32 npages, u64 *page_list)
+{
+ u32 i = 0;
+ __le64 *mtts = NULL;
+ dma_addr_t dma_handle;
+ u32 s = start_index * sizeof(u64);
+
+ /* All MTTs must fit in the same page */
+ if (start_index / (PAGE_SIZE / sizeof(u64)) !=
+ (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
+ return -EINVAL;
+
+ if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
+ return -EINVAL;
+
+ mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
+ &dma_handle);
+ if (!mtts)
+ return -ENOMEM;
+
+ /* Save page addr, low 12 bits : 0 */
+ for (i = 0; i < npages; ++i)
+ mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
+
+ return 0;
+}
+
+static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, u32 start_index,
+ u32 npages, u64 *page_list)
+{
+ int chunk;
+ int ret;
+
+ if (mtt->order < 0)
+ return -EINVAL;
+
+ while (npages > 0) {
+ chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
+
+ ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
+ page_list);
+ if (ret)
+ return ret;
+
+ npages -= chunk;
+ start_index += chunk;
+ page_list += chunk;
+ }
+
+ return 0;
+}
+
+int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
+{
+ u32 i = 0;
+ int ret = 0;
+ u64 *page_list = NULL;
+
+ page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->npages; ++i) {
+ if (buf->nbufs == 1)
+ page_list[i] = buf->direct.map + (i << buf->page_shift);
+ else
+ page_list[i] = buf->page_list[i].map;
+
+ }
+ ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
+
+ kfree(page_list);
+
+ return ret;
+}
+
+int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+ int ret = 0;
+
+ ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
+ hr_dev->caps.num_mtpts,
+ hr_dev->caps.num_mtpts - 1,
+ hr_dev->caps.reserved_mrws, 0);
+ if (ret)
+ return ret;
+
+ ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
+ ilog2(hr_dev->caps.num_mtt_segs));
+ if (ret)
+ goto err_buddy;
+
+ return 0;
+
+err_buddy:
+ hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
+ return ret;
+}
+
+void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+
+ hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
+ hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
+}
+
+struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ int ret = 0;
+ struct hns_roce_mr *mr = NULL;
+
+ mr = kmalloc(sizeof(*mr), GFP_KERNEL);
+ if (mr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate memory region key */
+ ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
+ ~0ULL, acc, 0, mr);
+ if (ret)
+ goto err_free;
+
+ ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
+ if (ret)
+ goto err_mr;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+err_mr:
+ hns_roce_mr_free(to_hr_dev(pd->device), mr);
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, struct ib_umem *umem)
+{
+ struct scatterlist *sg;
+ int i, k, entry;
+ int ret = 0;
+ u64 *pages;
+ u32 n;
+ int len;
+
+ pages = (u64 *) __get_free_page(GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ i = n = 0;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ len = sg_dma_len(sg) >> mtt->page_shift;
+ for (k = 0; k < len; ++k) {
+ pages[i++] = sg_dma_address(sg) + umem->page_size * k;
+ if (i == PAGE_SIZE / sizeof(u64)) {
+ ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
+ pages);
+ if (ret)
+ goto out;
+ n += i;
+ i = 0;
+ }
+ }
+ }
+
+ if (i)
+ ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
+
+out:
+ free_page((unsigned long) pages);
+ return ret;
+}
+
+static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
+ struct ib_umem *umem)
+{
+ int i = 0;
+ int entry;
+ struct scatterlist *sg;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
+ i++;
+ }
+
+ /* Memory barrier */
+ mb();
+
+ return 0;
+}
+
+struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_mr *mr = NULL;
+ int ret = 0;
+ int n = 0;
+
+ mr = kmalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->umem = ib_umem_get(pd->uobject->context, start, length,
+ access_flags, 0);
+ if (IS_ERR(mr->umem)) {
+ ret = PTR_ERR(mr->umem);
+ goto err_free;
+ }
+
+ n = ib_umem_page_count(mr->umem);
+ if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
+ dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
+ mr->umem->page_size);
+ }
+
+ if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
+ dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
+ length);
+ goto err_umem;
+ }
+
+ ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
+ access_flags, n, mr);
+ if (ret)
+ goto err_umem;
+
+ ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
+ if (ret)
+ goto err_mr;
+
+ ret = hns_roce_mr_enable(hr_dev, mr);
+ if (ret)
+ goto err_mr;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+
+ return &mr->ibmr;
+
+err_mr:
+ hns_roce_mr_free(hr_dev, mr);
+
+err_umem:
+ ib_umem_release(mr->umem);
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+int hns_roce_dereg_mr(struct ib_mr *ibmr)
+{
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+
+ hns_roce_mr_free(to_hr_dev(ibmr->device), mr);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ kfree(mr);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
new file mode 100644
index 000000000000..16271b5bd170
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include "hns_roce_device.h"
+
+static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ unsigned long pd_number;
+ int ret = 0;
+
+ ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number);
+ if (ret == -1) {
+ dev_err(dev, "alloc pdn from pdbitmap failed\n");
+ return -ENOMEM;
+ }
+
+ *pdn = pd_number;
+
+ return 0;
+}
+
+static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
+{
+ hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn);
+}
+
+int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
+{
+ return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds,
+ hr_dev->caps.num_pds - 1,
+ hr_dev->caps.reserved_pds, 0);
+}
+
+void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
+}
+
+struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_pd *pd;
+ int ret;
+
+ pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
+ if (ret) {
+ kfree(pd);
+ dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n");
+ return ERR_PTR(ret);
+ }
+
+ if (context) {
+ if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) {
+ hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
+ dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
+ kfree(pd);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+
+ return &pd->ibpd;
+}
+
+int hns_roce_dealloc_pd(struct ib_pd *pd)
+{
+ hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
+ kfree(to_hr_pd(pd));
+
+ return 0;
+}
+
+int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
+{
+ struct resource *res;
+ int ret = 0;
+
+ /* Using bitmap to manager UAR index */
+ ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->index);
+ if (ret == -1)
+ return -ENOMEM;
+
+ uar->index = (uar->index - 1) % hr_dev->caps.phy_num_uars + 1;
+
+ res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+ uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
+
+ return 0;
+}
+
+void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
+{
+ hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index);
+}
+
+int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
+{
+ return hns_roce_bitmap_init(&hr_dev->uar_table.bitmap,
+ hr_dev->caps.num_uars,
+ hr_dev->caps.num_uars - 1,
+ hr_dev->caps.reserved_uars, 0);
+}
+
+void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_bitmap_cleanup(&hr_dev->uar_table.bitmap);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
new file mode 100644
index 000000000000..645c18d809a5
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -0,0 +1,855 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include <rdma/ib_umem.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_user.h"
+
+#define DB_REG_OFFSET 0x1000
+#define SQP_NUM 12
+
+void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_qp *qp;
+
+ spin_lock(&qp_table->lock);
+
+ qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (qp)
+ atomic_inc(&qp->refcount);
+
+ spin_unlock(&qp_table->lock);
+
+ if (!qp) {
+ dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ return;
+ }
+
+ qp->event(qp, (enum hns_roce_event)event_type);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+}
+
+static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
+ enum hns_roce_event type)
+{
+ struct ib_event event;
+ struct ib_qp *ibqp = &hr_qp->ibqp;
+
+ if (ibqp->event_handler) {
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ switch (type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ event.event = IB_EVENT_PATH_MIG;
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ event.event = IB_EVENT_COMM_EST;
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ event.event = IB_EVENT_PATH_MIG_ERR;
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ default:
+ dev_dbg(ibqp->device->dma_device, "roce_ib: Unexpected event type %d on QP %06lx\n",
+ type, hr_qp->qpn);
+ return;
+ }
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+}
+
+static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
+ int align, unsigned long *base)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ int ret = 0;
+ unsigned long qpn;
+
+ ret = hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, &qpn);
+ if (ret == -1)
+ return -ENOMEM;
+
+ *base = qpn;
+
+ return 0;
+}
+
+enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET:
+ return HNS_ROCE_QP_STATE_RST;
+ case IB_QPS_INIT:
+ return HNS_ROCE_QP_STATE_INIT;
+ case IB_QPS_RTR:
+ return HNS_ROCE_QP_STATE_RTR;
+ case IB_QPS_RTS:
+ return HNS_ROCE_QP_STATE_RTS;
+ case IB_QPS_SQD:
+ return HNS_ROCE_QP_STATE_SQD;
+ case IB_QPS_ERR:
+ return HNS_ROCE_QP_STATE_ERR;
+ default:
+ return HNS_ROCE_QP_NUM_STATE;
+ }
+}
+
+static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ int ret;
+
+ if (!qpn)
+ return -EINVAL;
+
+ hr_qp->qpn = qpn;
+
+ spin_lock_irq(&qp_table->lock);
+ ret = radix_tree_insert(&hr_dev->qp_table_tree,
+ hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (ret) {
+ dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n");
+ goto err_put_irrl;
+ }
+
+ atomic_set(&hr_qp->refcount, 1);
+ init_completion(&hr_qp->free);
+
+ return 0;
+
+err_put_irrl:
+
+ return ret;
+}
+
+static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ int ret;
+
+ if (!qpn)
+ return -EINVAL;
+
+ hr_qp->qpn = qpn;
+
+ /* Alloc memory for QPC */
+ ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
+ if (ret) {
+ dev_err(dev, "QPC table get failed\n");
+ goto err_out;
+ }
+
+ /* Alloc memory for IRRL */
+ ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
+ if (ret) {
+ dev_err(dev, "IRRL table get failed\n");
+ goto err_put_qp;
+ }
+
+ spin_lock_irq(&qp_table->lock);
+ ret = radix_tree_insert(&hr_dev->qp_table_tree,
+ hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (ret) {
+ dev_err(dev, "QPC radix_tree_insert failed\n");
+ goto err_put_irrl;
+ }
+
+ atomic_set(&hr_qp->refcount, 1);
+ init_completion(&hr_qp->free);
+
+ return 0;
+
+err_put_irrl:
+ hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
+
+err_put_qp:
+ hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
+
+err_out:
+ return ret;
+}
+
+void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp_table->lock, flags);
+ radix_tree_delete(&hr_dev->qp_table_tree,
+ hr_qp->qpn & (hr_dev->caps.num_qps - 1));
+ spin_unlock_irqrestore(&qp_table->lock, flags);
+}
+
+void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+
+ if (atomic_dec_and_test(&hr_qp->refcount))
+ complete(&hr_qp->free);
+ wait_for_completion(&hr_qp->free);
+
+ if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
+ hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
+ hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
+ }
+}
+
+void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
+ int cnt)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+
+ if (base_qpn < (hr_dev->caps.sqp_start + 2 * hr_dev->caps.num_ports))
+ return;
+
+ hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+}
+
+static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap, int is_user, int has_srq,
+ struct hns_roce_qp *hr_qp)
+{
+ u32 max_cnt;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ /* Check the validity of QP support capacity */
+ if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
+ cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
+ dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
+ cap->max_recv_wr, cap->max_recv_sge);
+ return -EINVAL;
+ }
+
+ /* If srq exit, set zero for relative number of rq */
+ if (has_srq) {
+ if (cap->max_recv_wr) {
+ dev_dbg(dev, "srq no need config max_recv_wr\n");
+ return -EINVAL;
+ }
+
+ hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
+ } else {
+ if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
+ dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
+ return -EINVAL;
+ }
+
+ /* In v1 engine, parameter verification procession */
+ max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ?
+ cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM;
+ hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
+
+ if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
+ dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n");
+ return -EINVAL;
+ }
+
+ max_cnt = max(1U, cap->max_recv_sge);
+ hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
+ /* WQE is fixed for 64B */
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
+ }
+
+ cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
+ cap->max_recv_sge = hr_qp->rq.max_gs;
+
+ return 0;
+}
+
+static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
+ u8 max_sq_stride = ilog2(roundup_sq_stride);
+
+ /* Sanity check SQ size before proceeding */
+ if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
+ ucmd->log_sq_stride > max_sq_stride ||
+ ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
+ dev_err(&hr_dev->pdev->dev, "check SQ size error!\n");
+ return -EINVAL;
+ }
+
+ hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+
+ /* Get buf size, SQ and RQ are aligned to page_szie */
+ hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->rq.wqe_shift), PAGE_SIZE) +
+ HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift), PAGE_SIZE);
+
+ hr_qp->sq.offset = 0;
+ hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift), PAGE_SIZE);
+
+ return 0;
+}
+
+static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap,
+ enum ib_qp_type type,
+ struct hns_roce_qp *hr_qp)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 max_cnt;
+ (void)type;
+
+ if (cap->max_send_wr > hr_dev->caps.max_wqes ||
+ cap->max_send_sge > hr_dev->caps.max_sq_sg ||
+ cap->max_inline_data > hr_dev->caps.max_sq_inline) {
+ dev_err(dev, "hns_roce_set_kernel_sq_size error1\n");
+ return -EINVAL;
+ }
+
+ hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
+ hr_qp->sq_max_wqes_per_wr = 1;
+ hr_qp->sq_spare_wqes = 0;
+
+ /* In v1 engine, parameter verification procession */
+ max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ?
+ cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM;
+ hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
+ if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
+ dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n");
+ return -EINVAL;
+ }
+
+ /* Get data_seg numbers */
+ max_cnt = max(1U, cap->max_send_sge);
+ hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
+
+ /* Get buf size, SQ and RQ are aligned to page_szie */
+ hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->rq.wqe_shift), PAGE_SIZE) +
+ HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift), PAGE_SIZE);
+ hr_qp->sq.offset = 0;
+ hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift), PAGE_SIZE);
+
+ /* Get wr and sge number which send */
+ cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
+ cap->max_send_sge = hr_qp->sq.max_gs;
+
+ /* We don't support inline sends for kernel QPs (yet) */
+ cap->max_inline_data = 0;
+
+ return 0;
+}
+
+static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ struct ib_pd *ib_pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata, unsigned long sqpn,
+ struct hns_roce_qp *hr_qp)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_ib_create_qp ucmd;
+ unsigned long qpn = 0;
+ int ret = 0;
+
+ mutex_init(&hr_qp->mutex);
+ spin_lock_init(&hr_qp->sq.lock);
+ spin_lock_init(&hr_qp->rq.lock);
+
+ hr_qp->state = IB_QPS_RESET;
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
+ else
+ hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
+
+ ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
+ !!init_attr->srq, hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_set_rq_size failed\n");
+ goto err_out;
+ }
+
+ if (ib_pd->uobject) {
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ dev_err(dev, "ib_copy_from_udata error for create qp\n");
+ ret = -EFAULT;
+ goto err_out;
+ }
+
+ ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd);
+ if (ret) {
+ dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
+ goto err_out;
+ }
+
+ hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
+ ucmd.buf_addr, hr_qp->buff_size, 0,
+ 0);
+ if (IS_ERR(hr_qp->umem)) {
+ dev_err(dev, "ib_umem_get error for create qp\n");
+ ret = PTR_ERR(hr_qp->umem);
+ goto err_out;
+ }
+
+ ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
+ ilog2((unsigned int)hr_qp->umem->page_size),
+ &hr_qp->mtt);
+ if (ret) {
+ dev_err(dev, "hns_roce_mtt_init error for create qp\n");
+ goto err_buf;
+ }
+
+ ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
+ hr_qp->umem);
+ if (ret) {
+ dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
+ goto err_mtt;
+ }
+ } else {
+ if (init_attr->create_flags &
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
+ dev_err(dev, "init_attr->create_flags error!\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+ dev_err(dev, "init_attr->create_flags error!\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ /* Set SQ size */
+ ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
+ init_attr->qp_type, hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
+ goto err_out;
+ }
+
+ /* QP doorbell register address */
+ hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+ hr_qp->rq.db_reg_l = hr_dev->reg_base +
+ ROCEE_DB_OTHERS_L_0_REG +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+
+ /* Allocate QP buf */
+ if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, PAGE_SIZE * 2,
+ &hr_qp->hr_buf)) {
+ dev_err(dev, "hns_roce_buf_alloc error!\n");
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /* Write MTT */
+ ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
+ hr_qp->hr_buf.page_shift, &hr_qp->mtt);
+ if (ret) {
+ dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
+ goto err_buf;
+ }
+
+ ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
+ &hr_qp->hr_buf);
+ if (ret) {
+ dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
+ goto err_mtt;
+ }
+
+ hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
+ GFP_KERNEL);
+ hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
+ GFP_KERNEL);
+ if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
+ ret = -ENOMEM;
+ goto err_wrid;
+ }
+ }
+
+ if (sqpn) {
+ qpn = sqpn;
+ } else {
+ /* Get QPN */
+ ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
+ if (ret) {
+ dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
+ goto err_wrid;
+ }
+ }
+
+ if ((init_attr->qp_type) == IB_QPT_GSI) {
+ ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_qp_alloc failed!\n");
+ goto err_qpn;
+ }
+ } else {
+ ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_qp_alloc failed!\n");
+ goto err_qpn;
+ }
+ }
+
+ if (sqpn)
+ hr_qp->doorbell_qpn = 1;
+ else
+ hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
+
+ hr_qp->event = hns_roce_ib_qp_event;
+
+ return 0;
+
+err_qpn:
+ if (!sqpn)
+ hns_roce_release_range_qp(hr_dev, qpn, 1);
+
+err_wrid:
+ kfree(hr_qp->sq.wrid);
+ kfree(hr_qp->rq.wrid);
+
+err_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
+
+err_buf:
+ if (ib_pd->uobject)
+ ib_umem_release(hr_qp->umem);
+ else
+ hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+
+err_out:
+ return ret;
+}
+
+struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_sqp *hr_sqp;
+ struct hns_roce_qp *hr_qp;
+ int ret;
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_RC: {
+ hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
+ if (!hr_qp)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
+ hr_qp);
+ if (ret) {
+ dev_err(dev, "Create RC QP failed\n");
+ kfree(hr_qp);
+ return ERR_PTR(ret);
+ }
+
+ hr_qp->ibqp.qp_num = hr_qp->qpn;
+
+ break;
+ }
+ case IB_QPT_GSI: {
+ /* Userspace is not allowed to create special QPs: */
+ if (pd->uobject) {
+ dev_err(dev, "not support usr space GSI\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
+ if (!hr_sqp)
+ return ERR_PTR(-ENOMEM);
+
+ hr_qp = &hr_sqp->hr_qp;
+
+ ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
+ hr_dev->caps.sqp_start +
+ hr_dev->caps.num_ports +
+ init_attr->port_num - 1, hr_qp);
+ if (ret) {
+ dev_err(dev, "Create GSI QP failed!\n");
+ kfree(hr_sqp);
+ return ERR_PTR(ret);
+ }
+
+ hr_qp->port = (init_attr->port_num - 1);
+ hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start +
+ hr_dev->caps.num_ports +
+ init_attr->port_num - 1;
+ break;
+ }
+ default:{
+ dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ return &hr_qp->ibqp;
+}
+
+int to_hr_qp_type(int qp_type)
+{
+ int transport_type;
+
+ if (qp_type == IB_QPT_RC)
+ transport_type = SERV_TYPE_RC;
+ else if (qp_type == IB_QPT_UC)
+ transport_type = SERV_TYPE_UC;
+ else if (qp_type == IB_QPT_UD)
+ transport_type = SERV_TYPE_UD;
+ else if (qp_type == IB_QPT_GSI)
+ transport_type = SERV_TYPE_UD;
+ else
+ transport_type = -1;
+
+ return transport_type;
+}
+
+int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ struct device *dev = &hr_dev->pdev->dev;
+ int ret = -EINVAL;
+ int p;
+
+ mutex_lock(&hr_qp->mutex);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ?
+ attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
+ new_state = attr_mask & IB_QP_STATE ?
+ attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
+ dev_err(dev, "ib_modify_qp_is_ok failed\n");
+ goto out;
+ }
+
+ if ((attr_mask & IB_QP_PORT) &&
+ (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
+ dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
+ attr->port_num);
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+ if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
+ dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
+ attr->pkey_index);
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
+ dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
+ attr->max_rd_atomic);
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
+ dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
+ attr->max_dest_rd_atomic);
+ goto out;
+ }
+
+ if (cur_state == new_state && cur_state == IB_QPS_RESET) {
+ ret = -EPERM;
+ dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
+ new_state);
+ goto out;
+ }
+
+ ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
+ new_state);
+
+out:
+ mutex_unlock(&hr_qp->mutex);
+
+ return ret;
+}
+
+void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
+ __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
+{
+ if (send_cq == recv_cq) {
+ spin_lock_irq(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else if (send_cq->cqn < recv_cq->cqn) {
+ spin_lock_irq(&send_cq->lock);
+ spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock_irq(&recv_cq->lock);
+ spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+ }
+}
+
+void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
+ struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
+ __releases(&recv_cq->lock)
+{
+ if (send_cq == recv_cq) {
+ __release(&recv_cq->lock);
+ spin_unlock_irq(&send_cq->lock);
+ } else if (send_cq->cqn < recv_cq->cqn) {
+ spin_unlock(&recv_cq->lock);
+ spin_unlock_irq(&send_cq->lock);
+ } else {
+ spin_unlock(&send_cq->lock);
+ spin_unlock_irq(&recv_cq->lock);
+ }
+}
+
+__be32 send_ieth(struct ib_send_wr *wr)
+{
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return cpu_to_le32(wr->ex.imm_data);
+ case IB_WR_SEND_WITH_INV:
+ return cpu_to_le32(wr->ex.invalidate_rkey);
+ default:
+ return 0;
+ }
+}
+
+static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
+{
+
+ return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
+}
+
+void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
+{
+ struct ib_qp *ibqp = &hr_qp->ibqp;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+
+ if ((n < 0) || (n > hr_qp->rq.wqe_cnt)) {
+ dev_err(&hr_dev->pdev->dev, "rq wqe index:%d,rq wqe cnt:%d\r\n",
+ n, hr_qp->rq.wqe_cnt);
+ return NULL;
+ }
+
+ return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
+}
+
+void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
+{
+ struct ib_qp *ibqp = &hr_qp->ibqp;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+
+ if ((n < 0) || (n > hr_qp->sq.wqe_cnt)) {
+ dev_err(&hr_dev->pdev->dev, "sq wqe index:%d,sq wqe cnt:%d\r\n",
+ n, hr_qp->sq.wqe_cnt);
+ return NULL;
+ }
+
+ return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
+}
+
+bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
+ struct ib_cq *ib_cq)
+{
+ struct hns_roce_cq *hr_cq;
+ u32 cur;
+
+ cur = hr_wq->head - hr_wq->tail;
+ if (likely(cur + nreq < hr_wq->max_post))
+ return 0;
+
+ hr_cq = to_hr_cq(ib_cq);
+ spin_lock(&hr_cq->lock);
+ cur = hr_wq->head - hr_wq->tail;
+ spin_unlock(&hr_cq->lock);
+
+ return cur + nreq >= hr_wq->max_post;
+}
+
+int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ int reserved_from_top = 0;
+ int ret;
+
+ spin_lock_init(&qp_table->lock);
+ INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
+
+ /* A port include two SQP, six port total 12 */
+ ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
+ hr_dev->caps.num_qps - 1,
+ hr_dev->caps.sqp_start + SQP_NUM,
+ reserved_from_top);
+ if (ret) {
+ dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_user.h b/drivers/infiniband/hw/hns/hns_roce_user.h
new file mode 100644
index 000000000000..a28f761a9f65
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_user.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_USER_H
+#define _HNS_ROCE_USER_H
+
+struct hns_roce_ib_create_cq {
+ __u64 buf_addr;
+};
+
+struct hns_roce_ib_create_qp {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u8 log_sq_bb_count;
+ __u8 log_sq_stride;
+ __u8 sq_no_prefetch;
+ __u8 reserved[5];
+};
+
+struct hns_roce_ib_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+};
+
+#endif /*_HNS_ROCE_USER_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 7ca0638579c0..85637696f6e9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -3166,8 +3166,11 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
spin_lock_init(&cm_core->ht_lock);
spin_lock_init(&cm_core->listen_list_lock);
- cm_core->event_wq = create_singlethread_workqueue("iwewq");
- cm_core->disconn_wq = create_singlethread_workqueue("iwdwq");
+ cm_core->event_wq = alloc_ordered_workqueue("iwewq",
+ WQ_MEM_RECLAIM);
+
+ cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
+ WQ_MEM_RECLAIM);
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 445e230d5ff8..ac2f3cd9478c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1615,7 +1615,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
if (status)
break;
- iwdev->virtchnl_wq = create_singlethread_workqueue("iwvch");
+ iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
i40iw_register_notifiers();
iwdev->init_state = INET_NOTIFIER;
status = i40iw_add_mac_ip(iwdev);
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index c74ef2620b85..5e9939045852 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -881,7 +881,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
dev->sriov.alias_guid.ports_guid[i].wq =
- create_singlethread_workqueue(alias_wq_name);
+ alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM);
if (!dev->sriov.alias_guid.ports_guid[i].wq) {
ret = -ENOMEM;
goto err_thread;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5df63dacaaa3..1ea686b9e0f9 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -37,7 +37,7 @@
#include <linux/slab.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
{
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 0f21c3a25552..1672907ff219 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -230,6 +230,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
mad->mad_hdr.method == IB_MGMT_METHOD_SET)
switch (mad->mad_hdr.attr_id) {
case IB_SMP_ATTR_PORT_INFO:
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+ return;
pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
lid = be16_to_cpu(pinfo->lid);
@@ -245,6 +247,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
break;
case IB_SMP_ATTR_PKEY_TABLE:
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+ return;
if (!mlx4_is_mfunc(dev->dev)) {
mlx4_ib_dispatch_event(dev, port_num,
IB_EVENT_PKEY_CHANGE);
@@ -281,6 +285,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
break;
case IB_SMP_ATTR_GUID_INFO:
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+ return;
/* paravirtualized master's guid is guid 0 -- does not change */
if (!mlx4_is_master(dev->dev))
mlx4_ib_dispatch_event(dev, port_num,
@@ -296,6 +302,26 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
}
break;
+ case IB_SMP_ATTR_SL_TO_VL_TABLE:
+ /* cache sl to vl mapping changes for use in
+ * filling QP1 LRH VL field when sending packets
+ */
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV &&
+ dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)
+ return;
+ if (!mlx4_is_slave(dev->dev)) {
+ union sl2vl_tbl_to_u64 sl2vl64;
+ int jj;
+
+ for (jj = 0; jj < 8; jj++) {
+ sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj];
+ pr_debug("port %u, sl2vl[%d] = %02x\n",
+ port_num, jj, sl2vl64.sl8[jj]);
+ }
+ atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64);
+ }
+ break;
+
default:
break;
}
@@ -345,7 +371,8 @@ static void node_desc_override(struct ib_device *dev,
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
- memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
+ memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
}
}
@@ -805,8 +832,7 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_FAILURE;
if (!out_mad->mad_hdr.status) {
- if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
- smp_snoop(ibdev, port_num, in_mad, prev_lid);
+ smp_snoop(ibdev, port_num, in_mad, prev_lid);
/* slaves get node desc from FW */
if (!mlx4_is_slave(to_mdev(ibdev)->dev))
node_desc_override(ibdev, out_mad);
@@ -1037,6 +1063,23 @@ static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
}
}
+
+ /* Update the sl to vl table from inside client rereg
+ * only if in secure-host mode (snooping is not possible)
+ * and the sl-to-vl change event is not generated by FW.
+ */
+ if (!mlx4_is_slave(dev->dev) &&
+ dev->dev->flags & MLX4_FLAG_SECURE_HOST &&
+ !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) {
+ if (mlx4_is_master(dev->dev))
+ /* already in work queue from mlx4_ib_event queueing
+ * mlx4_handle_port_mgmt_change_event, which calls
+ * this procedure. Therefore, call sl2vl_update directly.
+ */
+ mlx4_ib_sl2vl_update(dev, port_num);
+ else
+ mlx4_sched_ib_sl2vl_update_work(dev, port_num);
+ }
mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
}
@@ -1176,6 +1219,24 @@ void handle_port_mgmt_change_event(struct work_struct *work)
handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
}
break;
+
+ case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP:
+ /* cache sl to vl mapping changes for use in
+ * filling QP1 LRH VL field when sending packets
+ */
+ if (!mlx4_is_slave(dev->dev)) {
+ union sl2vl_tbl_to_u64 sl2vl64;
+ int jj;
+
+ for (jj = 0; jj < 8; jj++) {
+ sl2vl64.sl8[jj] =
+ eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj];
+ pr_debug("port %u, sl2vl[%d] = %02x\n",
+ port, jj, sl2vl64.sl8[jj]);
+ }
+ atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64);
+ }
+ break;
default:
pr_warn("Unsupported subtype 0x%x for "
"Port Management Change event\n", eqe->subtype);
@@ -1918,7 +1979,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
goto err_buf;
}
- ctx->pd = ib_alloc_pd(ctx->ib_dev);
+ ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
if (IS_ERR(ctx->pd)) {
ret = PTR_ERR(ctx->pd);
pr_err("Couldn't create tunnel PD (%d)\n", ret);
@@ -2091,7 +2152,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
}
snprintf(name, sizeof name, "mlx4_ibt%d", port);
- ctx->wq = create_singlethread_workqueue(name);
+ ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->wq) {
pr_err("Failed to create tunnelling WQ for port %d\n", port);
ret = -ENOMEM;
@@ -2099,7 +2160,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
}
snprintf(name, sizeof name, "mlx4_ibud%d", port);
- ctx->ud_wq = create_singlethread_workqueue(name);
+ ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->ud_wq) {
pr_err("Failed to create up/down WQ for port %d\n", port);
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 87ba9bca4181..b597e8227591 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -55,7 +55,7 @@
#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
#define DRV_NAME MLX4_IB_DRV_NAME
#define DRV_VERSION "2.2-1"
@@ -832,6 +832,66 @@ static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
return ret;
}
+static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
+{
+ union sl2vl_tbl_to_u64 sl2vl64;
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
+ int err = -ENOMEM;
+ int jj;
+
+ if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
+ *sl2vl_tbl = 0;
+ return 0;
+ }
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
+ in_mad->attr_mod = 0;
+
+ if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
+ mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
+ in_mad, out_mad);
+ if (err)
+ goto out;
+
+ for (jj = 0; jj < 8; jj++)
+ sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
+ *sl2vl_tbl = sl2vl64.sl64;
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
+{
+ u64 sl2vl;
+ int i;
+ int err;
+
+ for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
+ if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
+ continue;
+ err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
+ if (err) {
+ pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
+ i, err);
+ sl2vl = 0;
+ }
+ atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
+ }
+}
+
int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey, int netw_view)
{
@@ -886,7 +946,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
return -EOPNOTSUPP;
spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
- memcpy(ibdev->node_desc, props->node_desc, 64);
+ memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
/*
@@ -897,7 +957,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
if (IS_ERR(mailbox))
return 0;
- memcpy(mailbox->buf, props->node_desc, 64);
+ memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -1259,7 +1319,7 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
if (err)
goto err1;
- xrcd->pd = ib_alloc_pd(ibdev);
+ xrcd->pd = ib_alloc_pd(ibdev, 0);
if (IS_ERR(xrcd->pd)) {
err = PTR_ERR(xrcd->pd);
goto err2;
@@ -1361,6 +1421,19 @@ struct mlx4_ib_steering {
union ib_gid gid;
};
+#define LAST_ETH_FIELD vlan_tag
+#define LAST_IB_FIELD sl
+#define LAST_IPV4_FIELD dst_ip
+#define LAST_TCP_UDP_FIELD src_port
+
+/* Field is the last supported field */
+#define FIELDS_NOT_SUPPORTED(filter, field)\
+ memchr_inv((void *)&filter.field +\
+ sizeof(filter.field), 0,\
+ sizeof(filter) -\
+ offsetof(typeof(filter), field) -\
+ sizeof(filter.field))
+
static int parse_flow_attr(struct mlx4_dev *dev,
u32 qp_num,
union ib_flow_spec *ib_spec,
@@ -1370,6 +1443,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
switch (ib_spec->type) {
case IB_FLOW_SPEC_ETH:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
+ return -ENOTSUPP;
+
type = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
ETH_ALEN);
@@ -1379,6 +1455,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
break;
case IB_FLOW_SPEC_IB:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
+ return -ENOTSUPP;
+
type = MLX4_NET_TRANS_RULE_ID_IB;
mlx4_spec->ib.l3_qpn =
cpu_to_be32(qp_num);
@@ -1388,6 +1467,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
case IB_FLOW_SPEC_IPV4:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
+ return -ENOTSUPP;
+
type = MLX4_NET_TRANS_RULE_ID_IPV4;
mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
@@ -1397,6 +1479,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
+ return -ENOTSUPP;
+
type = ib_spec->type == IB_FLOW_SPEC_TCP ?
MLX4_NET_TRANS_RULE_ID_TCP :
MLX4_NET_TRANS_RULE_ID_UDP;
@@ -2000,7 +2085,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (err)
goto out;
- memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
+ memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
@@ -2653,6 +2738,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (init_node_data(ibdev))
goto err_map;
+ mlx4_init_sl2vl_tbl(ibdev);
for (i = 0; i < ibdev->num_ports; ++i) {
mutex_init(&ibdev->counters_table[i].mutex);
@@ -3101,6 +3187,47 @@ static void handle_bonded_port_state_event(struct work_struct *work)
ib_dispatch_event(&ibev);
}
+void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
+{
+ u64 sl2vl;
+ int err;
+
+ err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
+ if (err) {
+ pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
+ port, err);
+ sl2vl = 0;
+ }
+ atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
+}
+
+static void ib_sl2vl_update_work(struct work_struct *work)
+{
+ struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
+ struct mlx4_ib_dev *mdev = ew->ib_dev;
+ int port = ew->port;
+
+ mlx4_ib_sl2vl_update(mdev, port);
+
+ kfree(ew);
+}
+
+void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
+ int port)
+{
+ struct ib_event_work *ew;
+
+ ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
+ if (ew) {
+ INIT_WORK(&ew->work, ib_sl2vl_update_work);
+ ew->port = port;
+ ew->ib_dev = ibdev;
+ queue_work(wq, &ew->work);
+ } else {
+ pr_err("failed to allocate memory for sl2vl update work\n");
+ }
+}
+
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
enum mlx4_dev_event event, unsigned long param)
{
@@ -3131,10 +3258,14 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_PORT_UP:
if (p > ibdev->num_ports)
return;
- if (mlx4_is_master(dev) &&
+ if (!mlx4_is_slave(dev) &&
rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
IB_LINK_LAYER_INFINIBAND) {
- mlx4_ib_invalidate_all_guid_record(ibdev, p);
+ if (mlx4_is_master(dev))
+ mlx4_ib_invalidate_all_guid_record(ibdev, p);
+ if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
+ !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
+ mlx4_sched_ib_sl2vl_update_work(ibdev, p);
}
ibev.event = IB_EVENT_PORT_ACTIVE;
break;
@@ -3222,7 +3353,7 @@ static int __init mlx4_ib_init(void)
{
int err;
- wq = create_singlethread_workqueue("mlx4_ib");
+ wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
if (!wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 097bfcc4ee99..a21d37f02f35 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1045,7 +1045,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
atomic_set(&ctx->tid, 0);
sprintf(name, "mlx4_ib_mcg%d", ctx->port);
- ctx->mcg_wq = create_singlethread_workqueue(name);
+ ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->mcg_wq)
return -ENOMEM;
@@ -1246,7 +1246,7 @@ void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
int mlx4_ib_mcg_init(void)
{
- clean_wq = create_singlethread_workqueue("mlx4_ib_mcg");
+ clean_wq = alloc_ordered_workqueue("mlx4_ib_mcg", WQ_MEM_RECLAIM);
if (!clean_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 686ab48ff644..35141f451e5c 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -570,6 +570,7 @@ struct mlx4_ib_dev {
struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2];
struct ib_ah *sm_ah[MLX4_MAX_PORTS];
spinlock_t sm_lock;
+ atomic64_t sl2vl[MLX4_MAX_PORTS];
struct mlx4_ib_sriov sriov;
struct mutex cap_mask_mutex;
@@ -600,6 +601,7 @@ struct ib_event_work {
struct work_struct work;
struct mlx4_ib_dev *ib_dev;
struct mlx4_eqe ib_eqe;
+ int port;
};
struct mlx4_ib_qp_tunnel_init_attr {
@@ -883,4 +885,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
u8 port_num, int index);
+void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
+ int port);
+
+void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
+
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 7fb9629bd12b..570bc866b1d6 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -47,7 +47,7 @@
#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
struct mlx4_ib_cq *recv_cq);
@@ -2405,6 +2405,22 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
return 0;
}
+static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num)
+{
+ union sl2vl_tbl_to_u64 tmp_vltab;
+ u8 vl;
+
+ if (sl > 15)
+ return 0xf;
+ tmp_vltab.sl64 = atomic64_read(&dev->sl2vl[port_num - 1]);
+ vl = tmp_vltab.sl8[sl >> 1];
+ if (sl & 1)
+ vl &= 0x0f;
+ else
+ vl >>= 4;
+ return vl;
+}
+
#define MLX4_ROCEV2_QP1_SPORT 0xC000
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len)
@@ -2590,7 +2606,12 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
}
} else {
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
+ sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 :
+ sl_to_vl(to_mdev(ib_dev),
+ sqp->ud_header.lrh.service_level,
+ sqp->qp.port);
+ if (sqp->qp.ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
+ return -EINVAL;
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 0597f3eef5d0..7dd3f267f06b 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -37,7 +37,7 @@
#include <linux/vmalloc.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
static void *get_wqe(struct mlx4_ib_srq *srq, int n)
{
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index e4fac9292e4a..79d017baf6f4 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -35,7 +35,6 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
#include "mlx5_ib.h"
-#include "user.h"
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
{
@@ -729,14 +728,16 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
- int entries, struct mlx5_create_cq_mbox_in **cqb,
+ int entries, u32 **cqb,
int *cqe_size, int *index, int *inlen)
{
struct mlx5_ib_create_cq ucmd;
size_t ucmdlen;
int page_shift;
+ __be64 *pas;
int npages;
int ncont;
+ void *cqc;
int err;
ucmdlen =
@@ -774,14 +775,20 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
- *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont;
+ *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
*cqb = mlx5_vzalloc(*inlen);
if (!*cqb) {
err = -ENOMEM;
goto err_db;
}
- mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
- (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
+ mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
+
+ cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
+ MLX5_SET(cqc, cqc, log_page_size,
+ page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = to_mucontext(context)->uuari.uars[0].index;
@@ -816,9 +823,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
int entries, int cqe_size,
- struct mlx5_create_cq_mbox_in **cqb,
- int *index, int *inlen)
+ u32 **cqb, int *index, int *inlen)
{
+ __be64 *pas;
+ void *cqc;
int err;
err = mlx5_db_alloc(dev->mdev, &cq->db);
@@ -835,15 +843,21 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
init_cq_buf(cq, &cq->buf);
- *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
+ *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
*cqb = mlx5_vzalloc(*inlen);
if (!*cqb) {
err = -ENOMEM;
goto err_buf;
}
- mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
- (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
+ mlx5_fill_page_array(&cq->buf.buf, pas);
+
+ cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
+ MLX5_SET(cqc, cqc, log_page_size,
+ cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+
*index = dev->mdev->priv.uuari.uars[0].index;
return 0;
@@ -877,11 +891,12 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
{
int entries = attr->cqe;
int vector = attr->comp_vector;
- struct mlx5_create_cq_mbox_in *cqb = NULL;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_cq *cq;
int uninitialized_var(index);
int uninitialized_var(inlen);
+ u32 *cqb = NULL;
+ void *cqc;
int cqe_size;
unsigned int irqn;
int eqn;
@@ -927,19 +942,20 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
- cq->cqe_size = cqe_size;
- cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
-
- if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
- cqb->ctx.cqe_sz_flags |= (1 << 1);
-
- cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
if (err)
goto err_cqb;
- cqb->ctx.c_eqn = cpu_to_be16(eqn);
- cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
+ cq->cqe_size = cqe_size;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
+ MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
+ MLX5_SET(cqc, cqc, uar_page, index);
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
+ if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
+ MLX5_SET(cqc, cqc, oi, 1);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
if (err)
@@ -1070,27 +1086,15 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
- struct mlx5_modify_cq_mbox_in *in;
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
int err;
- u32 fsel;
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
return -ENOSYS;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- in->cqn = cpu_to_be32(mcq->mcq.cqn);
- fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
- in->ctx.cq_period = cpu_to_be16(cq_period);
- in->ctx.cq_max_count = cpu_to_be16(cq_count);
- in->field_select = cpu_to_be32(fsel);
- err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
- kfree(in);
-
+ err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
+ cq_period, cq_count);
if (err)
mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
@@ -1223,9 +1227,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
struct mlx5_ib_cq *cq = to_mcq(ibcq);
- struct mlx5_modify_cq_mbox_in *in;
+ void *cqc;
+ u32 *in;
int err;
int npas;
+ __be64 *pas;
int page_shift;
int inlen;
int uninitialized_var(cqe_size);
@@ -1267,28 +1273,37 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
if (err)
goto ex;
- inlen = sizeof(*in) + npas * sizeof(in->pas[0]);
+ inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
+ MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
+
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto ex_resize;
}
+ pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
if (udata)
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
- in->pas, 0);
+ pas, 0);
else
- mlx5_fill_page_array(&cq->resize_buf->buf, in->pas);
-
- in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE |
- MLX5_MODIFY_CQ_MASK_PG_OFFSET |
- MLX5_MODIFY_CQ_MASK_PG_SIZE);
- in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
- in->ctx.page_offset = 0;
- in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24);
- in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
- in->cqn = cpu_to_be32(cq->mcq.cqn);
+ mlx5_fill_page_array(&cq->resize_buf->buf, pas);
+
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.resize_field_select.resize_field_select,
+ MLX5_MODIFY_CQ_MASK_LOG_SIZE |
+ MLX5_MODIFY_CQ_MASK_PG_OFFSET |
+ MLX5_MODIFY_CQ_MASK_PG_SIZE);
+
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+
+ MLX5_SET(cqc, cqc, log_page_size,
+ page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
+
+ MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
+ MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 364aab9f3c9e..39e58489dcc2 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -394,7 +394,7 @@ int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
if (err)
goto out;
- memcpy(node_desc, out_mad->data, 64);
+ memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
out:
kfree(in_mad);
kfree(out_mad);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e19537cf44ab..22174774dbb8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -53,7 +53,6 @@
#include <linux/in.h>
#include <linux/etherdevice.h>
#include <linux/mlx5/fs.h>
-#include "user.h"
#include "mlx5_ib.h"
#define DRIVER_NAME "mlx5_ib"
@@ -106,13 +105,42 @@ static int mlx5_netdev_event(struct notifier_block *this,
struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
roce.nb);
- if ((event != NETDEV_UNREGISTER) && (event != NETDEV_REGISTER))
- return NOTIFY_DONE;
+ switch (event) {
+ case NETDEV_REGISTER:
+ case NETDEV_UNREGISTER:
+ write_lock(&ibdev->roce.netdev_lock);
+ if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
+ ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
+ NULL : ndev;
+ write_unlock(&ibdev->roce.netdev_lock);
+ break;
+
+ case NETDEV_UP:
+ case NETDEV_DOWN: {
+ struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ struct net_device *upper = NULL;
+
+ if (lag_ndev) {
+ upper = netdev_master_upper_dev_get(lag_ndev);
+ dev_put(lag_ndev);
+ }
+
+ if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
+ && ibdev->ib_active) {
+ struct ib_event ibev = {0};
+
+ ibev.device = &ibdev->ib_dev;
+ ibev.event = (event == NETDEV_UP) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ ibev.element.port_num = 1;
+ ib_dispatch_event(&ibev);
+ }
+ break;
+ }
- write_lock(&ibdev->roce.netdev_lock);
- if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
- ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev;
- write_unlock(&ibdev->roce.netdev_lock);
+ default:
+ break;
+ }
return NOTIFY_DONE;
}
@@ -123,6 +151,10 @@ static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
struct mlx5_ib_dev *ibdev = to_mdev(device);
struct net_device *ndev;
+ ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ if (ndev)
+ return ndev;
+
/* Ensure ndev does not disappear before we invoke dev_hold()
*/
read_lock(&ibdev->roce.netdev_lock);
@@ -138,7 +170,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
- struct net_device *ndev;
+ struct net_device *ndev, *upper;
enum ib_mtu ndev_ib_mtu;
u16 qkey_viol_cntr;
@@ -162,6 +194,17 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
if (!ndev)
return 0;
+ if (mlx5_lag_is_active(dev->mdev)) {
+ rcu_read_lock();
+ upper = netdev_master_upper_dev_get_rcu(ndev);
+ if (upper) {
+ dev_put(ndev);
+ ndev = upper;
+ dev_hold(ndev);
+ }
+ rcu_read_unlock();
+ }
+
if (netif_running(ndev) && netif_carrier_ok(ndev)) {
props->state = IB_PORT_ACTIVE;
props->phys_state = 5;
@@ -232,23 +275,19 @@ static int set_roce_addr(struct ib_device *device, u8 port_num,
const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
- struct mlx5_ib_dev *dev = to_mdev(device);
- u32 in[MLX5_ST_SZ_DW(set_roce_address_in)];
- u32 out[MLX5_ST_SZ_DW(set_roce_address_out)];
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
if (ll != IB_LINK_LAYER_ETHERNET)
return -EINVAL;
- memset(in, 0, sizeof(in));
-
ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
-
- memset(out, 0, sizeof(out));
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
@@ -433,7 +472,7 @@ static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
}
struct mlx5_reg_node_desc {
- u8 desc[64];
+ u8 desc[IB_DEVICE_NODE_DESC_MAX];
};
static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
@@ -536,6 +575,26 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.tso_caps);
}
}
+
+ if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
+ resp.rss_caps.rx_hash_function =
+ MLX5_RX_HASH_FUNC_TOEPLITZ;
+ resp.rss_caps.rx_hash_fields_mask =
+ MLX5_RX_HASH_SRC_IPV4 |
+ MLX5_RX_HASH_DST_IPV4 |
+ MLX5_RX_HASH_SRC_IPV6 |
+ MLX5_RX_HASH_DST_IPV6 |
+ MLX5_RX_HASH_SRC_PORT_TCP |
+ MLX5_RX_HASH_DST_PORT_TCP |
+ MLX5_RX_HASH_SRC_PORT_UDP |
+ MLX5_RX_HASH_DST_PORT_UDP;
+ resp.response_length += sizeof(resp.rss_caps);
+ }
+ } else {
+ if (field_avail(typeof(resp), tso_caps, uhw->outlen))
+ resp.response_length += sizeof(resp.tso_caps);
+ if (field_avail(typeof(resp), rss_caps, uhw->outlen))
+ resp.response_length += sizeof(resp.rss_caps);
}
if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
@@ -599,6 +658,17 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (!mlx5_core_is_pf(mdev))
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
+ if (mlx5_ib_port_link_layer(ibdev, 1) ==
+ IB_LINK_LAYER_ETHERNET) {
+ props->rss_caps.max_rwq_indirection_tables =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
+ props->rss_caps.max_rwq_indirection_table_size =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
+ props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
+ props->max_wq_type_rq =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -753,8 +823,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
&props->active_width);
if (err)
goto out;
- err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
- port);
+ err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
if (err)
goto out;
@@ -851,13 +920,13 @@ static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
* If possible, pass node desc to FW, so it can generate
* a 144 trap. If cmd fails, just ignore.
*/
- memcpy(&in, props->node_desc, 64);
+ memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
if (err)
return err;
- memcpy(ibdev->node_desc, props->node_desc, 64);
+ memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
return err;
}
@@ -1400,28 +1469,77 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
return 0;
}
-static bool outer_header_zero(u32 *match_criteria)
+enum {
+ MATCH_CRITERIA_ENABLE_OUTER_BIT,
+ MATCH_CRITERIA_ENABLE_MISC_BIT,
+ MATCH_CRITERIA_ENABLE_INNER_BIT
+};
+
+#define HEADER_IS_ZERO(match_criteria, headers) \
+ !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
+ 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
+
+static u8 get_match_criteria_enable(u32 *match_criteria)
+{
+ u8 match_criteria_enable;
+
+ match_criteria_enable =
+ (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
+ MATCH_CRITERIA_ENABLE_OUTER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
+ MATCH_CRITERIA_ENABLE_MISC_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
+ MATCH_CRITERIA_ENABLE_INNER_BIT;
+
+ return match_criteria_enable;
+}
+
+static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
{
- int size = MLX5_ST_SZ_BYTES(fte_match_param);
- char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
- outer_headers);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
+}
- return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
- outer_headers_c + 1,
- size - 1);
+static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
+{
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
}
+#define LAST_ETH_FIELD vlan_tag
+#define LAST_IB_FIELD sl
+#define LAST_IPV4_FIELD tos
+#define LAST_IPV6_FIELD traffic_class
+#define LAST_TCP_UDP_FIELD src_port
+
+/* Field is the last supported field */
+#define FIELDS_NOT_SUPPORTED(filter, field)\
+ memchr_inv((void *)&filter.field +\
+ sizeof(filter.field), 0,\
+ sizeof(filter) -\
+ offsetof(typeof(filter), field) -\
+ sizeof(filter.field))
+
static int parse_flow_attr(u32 *match_c, u32 *match_v,
- union ib_flow_spec *ib_spec)
+ const union ib_flow_spec *ib_spec)
{
void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers);
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers);
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters);
+ void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ misc_parameters);
+
switch (ib_spec->type) {
case IB_FLOW_SPEC_ETH:
- if (ib_spec->size != sizeof(ib_spec->eth))
- return -EINVAL;
+ if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
+ return -ENOTSUPP;
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
dmac_47_16),
@@ -1468,8 +1586,8 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
ethertype, ntohs(ib_spec->eth.val.ether_type));
break;
case IB_FLOW_SPEC_IPV4:
- if (ib_spec->size != sizeof(ib_spec->ipv4))
- return -EINVAL;
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
+ return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
ethertype, 0xffff);
@@ -1492,10 +1610,16 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.val.dst_ip,
sizeof(ib_spec->ipv4.val.dst_ip));
+
+ set_tos(outer_headers_c, outer_headers_v,
+ ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
+
+ set_proto(outer_headers_c, outer_headers_v,
+ ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
break;
case IB_FLOW_SPEC_IPV6:
- if (ib_spec->size != sizeof(ib_spec->ipv6))
- return -EINVAL;
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
+ return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
ethertype, 0xffff);
@@ -1518,10 +1642,26 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.val.dst_ip,
sizeof(ib_spec->ipv6.val.dst_ip));
+
+ set_tos(outer_headers_c, outer_headers_v,
+ ib_spec->ipv6.mask.traffic_class,
+ ib_spec->ipv6.val.traffic_class);
+
+ set_proto(outer_headers_c, outer_headers_v,
+ ib_spec->ipv6.mask.next_hdr,
+ ib_spec->ipv6.val.next_hdr);
+
+ MLX5_SET(fte_match_set_misc, misc_params_c,
+ outer_ipv6_flow_label,
+ ntohl(ib_spec->ipv6.mask.flow_label));
+ MLX5_SET(fte_match_set_misc, misc_params_v,
+ outer_ipv6_flow_label,
+ ntohl(ib_spec->ipv6.val.flow_label));
break;
case IB_FLOW_SPEC_TCP:
- if (ib_spec->size != sizeof(ib_spec->tcp_udp))
- return -EINVAL;
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
+ LAST_TCP_UDP_FIELD))
+ return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
0xff);
@@ -1539,8 +1679,9 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
ntohs(ib_spec->tcp_udp.val.dst_port));
break;
case IB_FLOW_SPEC_UDP:
- if (ib_spec->size != sizeof(ib_spec->tcp_udp))
- return -EINVAL;
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
+ LAST_TCP_UDP_FIELD))
+ return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
0xff);
@@ -1587,7 +1728,7 @@ static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
is_multicast_ether_addr(eth_spec->val.dst_mac);
}
-static bool is_valid_attr(struct ib_flow_attr *flow_attr)
+static bool is_valid_attr(const struct ib_flow_attr *flow_attr)
{
union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
bool has_ipv4_spec = false;
@@ -1631,12 +1772,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
list_for_each_entry_safe(iter, tmp, &handler->list, list) {
mlx5_del_flow_rule(iter->rule);
+ put_flow_table(dev, iter->prio, true);
list_del(&iter->list);
kfree(iter);
}
mlx5_del_flow_rule(handler->rule);
- put_flow_table(dev, &dev->flow_db.prios[handler->prio], true);
+ put_flow_table(dev, handler->prio, true);
mutex_unlock(&dev->flow_db.lock);
kfree(handler);
@@ -1652,10 +1794,16 @@ static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
return priority;
}
+enum flow_table_type {
+ MLX5_IB_FT_RX,
+ MLX5_IB_FT_TX
+};
+
#define MLX5_FS_MAX_TYPES 10
#define MLX5_FS_MAX_ENTRIES 32000UL
static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
- struct ib_flow_attr *flow_attr)
+ struct ib_flow_attr *flow_attr,
+ enum flow_table_type ft_type)
{
bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
struct mlx5_flow_namespace *ns = NULL;
@@ -1686,6 +1834,19 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
&num_entries,
&num_groups);
prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
+ } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ allow_sniffer_and_nic_rx_shared_tir))
+ return ERR_PTR(-ENOTSUPP);
+
+ ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
+ MLX5_FLOW_NAMESPACE_SNIFFER_RX :
+ MLX5_FLOW_NAMESPACE_SNIFFER_TX);
+
+ prio = &dev->flow_db.sniffer[ft_type];
+ priority = 0;
+ num_entries = 1;
+ num_groups = 1;
}
if (!ns)
@@ -1711,13 +1872,13 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
- struct ib_flow_attr *flow_attr,
+ const struct ib_flow_attr *flow_attr,
struct mlx5_flow_destination *dst)
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
struct mlx5_flow_spec *spec;
- void *ib_flow = flow_attr + 1;
+ const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
unsigned int spec_index;
u32 action;
int err = 0;
@@ -1743,9 +1904,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
ib_flow += ((union ib_flow_spec *)ib_flow)->size;
}
- /* Outer header support only */
- spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria))
- << 0;
+ spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
handler->rule = mlx5_add_flow_rule(ft, spec,
@@ -1758,7 +1917,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
goto free;
}
- handler->prio = ft_prio - dev->flow_db.prios;
+ ft_prio->refcount++;
+ handler->prio = ft_prio;
ft_prio->flow_table = ft;
free:
@@ -1782,6 +1942,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de
flow_attr, dst);
if (IS_ERR(handler_dst)) {
mlx5_del_flow_rule(handler->rule);
+ ft_prio->refcount--;
kfree(handler);
handler = handler_dst;
} else {
@@ -1843,6 +2004,8 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
&leftovers_specs[LEFTOVERS_UC].flow_attr,
dst);
if (IS_ERR(handler_ucast)) {
+ mlx5_del_flow_rule(handler->rule);
+ ft_prio->refcount--;
kfree(handler);
handler = handler_ucast;
} else {
@@ -1853,6 +2016,43 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
return handler;
}
+static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_rx,
+ struct mlx5_ib_flow_prio *ft_tx,
+ struct mlx5_flow_destination *dst)
+{
+ struct mlx5_ib_flow_handler *handler_rx;
+ struct mlx5_ib_flow_handler *handler_tx;
+ int err;
+ static const struct ib_flow_attr flow_attr = {
+ .num_of_specs = 0,
+ .size = sizeof(flow_attr)
+ };
+
+ handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
+ if (IS_ERR(handler_rx)) {
+ err = PTR_ERR(handler_rx);
+ goto err;
+ }
+
+ handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
+ if (IS_ERR(handler_tx)) {
+ err = PTR_ERR(handler_tx);
+ goto err_tx;
+ }
+
+ list_add(&handler_tx->list, &handler_rx->list);
+
+ return handler_rx;
+
+err_tx:
+ mlx5_del_flow_rule(handler_rx->rule);
+ ft_rx->refcount--;
+ kfree(handler_rx);
+err:
+ return ERR_PTR(err);
+}
+
static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
@@ -1861,6 +2061,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct mlx5_ib_qp *mqp = to_mqp(qp);
struct mlx5_ib_flow_handler *handler = NULL;
struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
struct mlx5_ib_flow_prio *ft_prio;
int err;
@@ -1878,11 +2079,19 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
mutex_lock(&dev->flow_db.lock);
- ft_prio = get_flow_table(dev, flow_attr);
+ ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
if (IS_ERR(ft_prio)) {
err = PTR_ERR(ft_prio);
goto unlock;
}
+ if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
+ ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
+ if (IS_ERR(ft_prio_tx)) {
+ err = PTR_ERR(ft_prio_tx);
+ ft_prio_tx = NULL;
+ goto destroy_ft;
+ }
+ }
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
if (mqp->flags & MLX5_IB_QP_RSS)
@@ -1902,6 +2111,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
handler = create_leftovers_rule(dev, ft_prio, flow_attr,
dst);
+ } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
+ handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
} else {
err = -EINVAL;
goto destroy_ft;
@@ -1913,7 +2124,6 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
goto destroy_ft;
}
- ft_prio->refcount++;
mutex_unlock(&dev->flow_db.lock);
kfree(dst);
@@ -1921,6 +2131,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
destroy_ft:
put_flow_table(dev, ft_prio, false);
+ if (ft_prio_tx)
+ put_flow_table(dev, ft_prio_tx, false);
unlock:
mutex_unlock(&dev->flow_db.lock);
kfree(dst);
@@ -2110,14 +2322,19 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
break;
case MLX5_DEV_EVENT_PORT_UP:
- ibev.event = IB_EVENT_PORT_ACTIVE;
- port = (u8)param;
- break;
-
case MLX5_DEV_EVENT_PORT_DOWN:
case MLX5_DEV_EVENT_PORT_INITIALIZED:
- ibev.event = IB_EVENT_PORT_ERR;
port = (u8)param;
+
+ /* In RoCE, port up/down events are handled in
+ * mlx5_netdev_event().
+ */
+ if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET)
+ return;
+
+ ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
break;
case MLX5_DEV_EVENT_LID_CHANGE:
@@ -2240,7 +2457,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
goto error_0;
}
- pd = ib_alloc_pd(&dev->ib_dev);
+ pd = ib_alloc_pd(&dev->ib_dev, 0);
if (IS_ERR(pd)) {
mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
ret = PTR_ERR(pd);
@@ -2522,30 +2739,88 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
}
+static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
+ MLX5_FLOW_NAMESPACE_LAG);
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (!ns || !mlx5_lag_is_active(mdev))
+ return 0;
+
+ err = mlx5_cmd_create_vport_lag(mdev);
+ if (err)
+ return err;
+
+ ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_destroy_vport_lag;
+ }
+
+ dev->flow_db.lag_demux_ft = ft;
+ return 0;
+
+err_destroy_vport_lag:
+ mlx5_cmd_destroy_vport_lag(mdev);
+ return err;
+}
+
+static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ if (dev->flow_db.lag_demux_ft) {
+ mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft);
+ dev->flow_db.lag_demux_ft = NULL;
+
+ mlx5_cmd_destroy_vport_lag(mdev);
+ }
+}
+
+static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
+{
+ if (dev->roce.nb.notifier_call) {
+ unregister_netdevice_notifier(&dev->roce.nb);
+ dev->roce.nb.notifier_call = NULL;
+ }
+}
+
static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
{
int err;
dev->roce.nb.notifier_call = mlx5_netdev_event;
err = register_netdevice_notifier(&dev->roce.nb);
- if (err)
+ if (err) {
+ dev->roce.nb.notifier_call = NULL;
return err;
+ }
err = mlx5_nic_vport_enable_roce(dev->mdev);
if (err)
goto err_unregister_netdevice_notifier;
+ err = mlx5_roce_lag_init(dev);
+ if (err)
+ goto err_disable_roce;
+
return 0;
+err_disable_roce:
+ mlx5_nic_vport_disable_roce(dev->mdev);
+
err_unregister_netdevice_notifier:
- unregister_netdevice_notifier(&dev->roce.nb);
+ mlx5_remove_roce_notifier(dev);
return err;
}
static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
{
+ mlx5_roce_lag_cleanup(dev);
mlx5_nic_vport_disable_roce(dev->mdev);
- unregister_netdevice_notifier(&dev->roce.nb);
}
static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
@@ -2660,6 +2935,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
struct mlx5_ib_dev *dev;
enum rdma_link_layer ll;
int port_type_cap;
+ const char *name;
int err;
int i;
@@ -2692,7 +2968,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
- strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
+ if (!mlx5_lag_is_active(mdev))
+ name = "mlx5_%d";
+ else
+ name = "mlx5_bond_%d";
+
+ strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
@@ -2894,8 +3175,10 @@ err_rsrc:
destroy_dev_resources(&dev->devr);
err_disable_roce:
- if (ll == IB_LINK_LAYER_ETHERNET)
+ if (ll == IB_LINK_LAYER_ETHERNET) {
mlx5_disable_roce(dev);
+ mlx5_remove_roce_notifier(dev);
+ }
err_free_port:
kfree(dev->port);
@@ -2911,6 +3194,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
struct mlx5_ib_dev *dev = context;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
+ mlx5_remove_roce_notifier(dev);
ib_unregister_device(&dev->ib_dev);
mlx5_ib_dealloc_q_counters(dev);
destroy_umrc_res(dev);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 95146f4aa3e3..dcdcd195fe53 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -44,6 +44,7 @@
#include <linux/types.h>
#include <linux/mlx5/transobj.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/mlx5-abi.h>
#define mlx5_ib_dbg(dev, format, arg...) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
@@ -142,6 +143,7 @@ struct mlx5_ib_pd {
#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
+#define MLX5_IB_NUM_SNIFFER_FTS 2
struct mlx5_ib_flow_prio {
struct mlx5_flow_table *flow_table;
unsigned int refcount;
@@ -150,12 +152,14 @@ struct mlx5_ib_flow_prio {
struct mlx5_ib_flow_handler {
struct list_head list;
struct ib_flow ibflow;
- unsigned int prio;
+ struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_rule *rule;
};
struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
+ struct mlx5_flow_table *lag_demux_ft;
/* Protect flow steering bypass flow tables
* when add/del flow rules.
* only single add/removal of flow steering rule could be done
@@ -225,7 +229,7 @@ struct mlx5_ib_wq {
struct mlx5_ib_rwq {
struct ib_wq ibwq;
- u32 rqn;
+ struct mlx5_core_qp core_qp;
u32 rq_num_pas;
u32 log_rq_stride;
u32 log_rq_size;
@@ -505,7 +509,7 @@ struct mlx5_ib_mr {
int umred;
int npages;
struct mlx5_ib_dev *dev;
- struct mlx5_create_mkey_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
struct mlx5_core_sig_ctx *sig;
int live;
void *descs_alloc;
@@ -603,6 +607,7 @@ struct mlx5_roce {
rwlock_t netdev_lock;
struct net_device *netdev;
struct notifier_block nb;
+ atomic_t next_port;
};
struct mlx5_ib_dev {
@@ -663,6 +668,11 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
}
+static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
+{
+ return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
+}
+
static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
{
return container_of(mmkey, struct mlx5_ib_mr, mmkey);
@@ -947,4 +957,40 @@ static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
return 0;
}
+
+static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
+ struct mlx5_ib_create_qp *ucmd,
+ int inlen,
+ u32 *user_index)
+{
+ u8 cqe_version = ucontext->cqe_version;
+
+ if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
+ !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ return 0;
+
+ if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
+ !!cqe_version))
+ return -EINVAL;
+
+ return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
+}
+
+static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
+ struct mlx5_ib_create_srq *ucmd,
+ int inlen,
+ u32 *user_index)
+{
+ u8 cqe_version = ucontext->cqe_version;
+
+ if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
+ !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ return 0;
+
+ if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
+ !!cqe_version))
+ return -EINVAL;
+
+ return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
+}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 4b021305c321..d4ad672b905b 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -40,7 +40,6 @@
#include <rdma/ib_umem_odp.h>
#include <rdma/ib_verbs.h>
#include "mlx5_ib.h"
-#include "user.h"
enum {
MAX_PENDING_REG_MR = 8,
@@ -135,20 +134,10 @@ static void reg_mr_callback(int status, void *context)
return;
}
- if (mr->out.hdr.status) {
- mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
- mr->out.hdr.status,
- be32_to_cpu(mr->out.hdr.syndrome));
- kfree(mr);
- dev->fill_delay = 1;
- mod_timer(&dev->delay_timer, jiffies + HZ);
- return;
- }
-
spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
key = dev->mdev->priv.mkey_key++;
spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
- mr->mmkey.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
+ mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
cache->last_add = jiffies;
@@ -170,16 +159,19 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
- struct mlx5_create_mkey_mbox_in *in;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mr *mr;
int npages = 1 << ent->order;
+ void *mkc;
+ u32 *in;
int err = 0;
int i;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
+ in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
for (i = 0; i < num; i++) {
if (ent->pending >= MAX_PENDING_REG_MR) {
err = -EAGAIN;
@@ -194,18 +186,22 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
mr->order = ent->order;
mr->umred = 1;
mr->dev = dev;
- in->seg.status = MLX5_MKEY_STATUS_FREE;
- in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
- in->seg.log2_page_size = 12;
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
+
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2);
+ MLX5_SET(mkc, mkc, log_page_size, 12);
spin_lock_irq(&ent->lock);
ent->pending++;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in,
- sizeof(*in), reg_mr_callback,
- mr, &mr->out);
+ err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
+ in, inlen,
+ mr->out, sizeof(mr->out),
+ reg_mr_callback, mr);
if (err) {
spin_lock_irq(&ent->lock);
ent->pending--;
@@ -614,7 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
int err;
int i;
- cache->wq = create_singlethread_workqueue("mkey_cache");
+ cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
if (!cache->wq) {
mlx5_ib_warn(dev, "failed to create work queue\n");
return -ENOMEM;
@@ -670,30 +666,38 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_mkey_seg *seg;
struct mlx5_ib_mr *mr;
+ void *mkc;
+ u32 *in;
int err;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- in = kzalloc(sizeof(*in), GFP_KERNEL);
+ in = kzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_free;
}
- seg = &in->seg;
- seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
- seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- seg->start_addr = 0;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, mkc, lr, 1);
- err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL,
- NULL);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET64(mkc, mkc, start_addr, 0);
+
+ err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
goto err_in;
@@ -1063,9 +1067,11 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
int page_shift, int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_create_mkey_mbox_in *in;
struct mlx5_ib_mr *mr;
+ __be64 *pas;
+ void *mkc;
int inlen;
+ u32 *in;
int err;
bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
@@ -1073,31 +1079,41 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
if (!mr)
return ERR_PTR(-ENOMEM);
- inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
+ sizeof(*pas) * ((npages + 1) / 2) * 2;
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto err_1;
}
- mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
+ pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ mlx5_ib_populate_pas(dev, umem, page_shift, pas,
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
- /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
+ /* The pg_access bit allows setting the access flags
* in the page list submitted with the command. */
- in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
- in->seg.flags = convert_access(access_flags) |
- MLX5_ACCESS_MODE_MTT;
- in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
- in->seg.start_addr = cpu_to_be64(virt_addr);
- in->seg.len = cpu_to_be64(length);
- in->seg.bsfs_octo_size = 0;
- in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
- in->seg.log2_page_size = page_shift;
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
- 1 << page_shift));
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen, NULL,
- NULL, NULL);
+ MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET64(mkc, mkc, start_addr, virt_addr);
+ MLX5_SET64(mkc, mkc, len, length);
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, bsf_octword_size, 0);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_octo_len(virt_addr, length, 1 << page_shift));
+ MLX5_SET(mkc, mkc, log_page_size, page_shift);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ get_octo_len(virt_addr, length, 1 << page_shift));
+
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
goto err_2;
@@ -1523,30 +1539,32 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
u32 max_num_sg)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_ib_mr *mr;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
int ndescs = ALIGN(max_num_sg, 4);
+ struct mlx5_ib_mr *mr;
+ void *mkc;
+ u32 *in;
int err;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- in = kzalloc(sizeof(*in), GFP_KERNEL);
+ in = kzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_free;
}
- in->seg.status = MLX5_MKEY_STATUS_FREE;
- in->seg.xlt_oct_size = cpu_to_be32(ndescs);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
if (mr_type == IB_MR_TYPE_MEM_REG) {
- mr->access_mode = MLX5_ACCESS_MODE_MTT;
- in->seg.log2_page_size = PAGE_SHIFT;
-
+ mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
err = mlx5_alloc_priv_descs(pd->device, mr,
ndescs, sizeof(u64));
if (err)
@@ -1555,7 +1573,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
mr->desc_size = sizeof(u64);
mr->max_descs = ndescs;
} else if (mr_type == IB_MR_TYPE_SG_GAPS) {
- mr->access_mode = MLX5_ACCESS_MODE_KLM;
+ mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
err = mlx5_alloc_priv_descs(pd->device, mr,
ndescs, sizeof(struct mlx5_klm));
@@ -1566,9 +1584,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
} else if (mr_type == IB_MR_TYPE_SIGNATURE) {
u32 psv_index[2];
- in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
- MLX5_MKEY_BSF_EN);
- in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
+ MLX5_SET(mkc, mkc, bsf_en, 1);
+ MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
if (!mr->sig) {
err = -ENOMEM;
@@ -1581,7 +1598,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
if (err)
goto err_free_sig;
- mr->access_mode = MLX5_ACCESS_MODE_KLM;
+ mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
mr->sig->psv_memory.psv_idx = psv_index[0];
mr->sig->psv_wire.psv_idx = psv_index[1];
@@ -1595,9 +1612,10 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
goto err_free_in;
}
- in->seg.flags = MLX5_PERM_UMR_EN | mr->access_mode;
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in),
- NULL, NULL, NULL);
+ MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err)
goto err_destroy_psv;
@@ -1633,8 +1651,10 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_create_mkey_mbox_in *in = NULL;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mw *mw = NULL;
+ u32 *in = NULL;
+ void *mkc;
int ndescs;
int err;
struct mlx5_ib_alloc_mw req = {};
@@ -1658,23 +1678,24 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
mw = kzalloc(sizeof(*mw), GFP_KERNEL);
- in = kzalloc(sizeof(*in), GFP_KERNEL);
+ in = kzalloc(inlen, GFP_KERNEL);
if (!mw || !in) {
err = -ENOMEM;
goto free;
}
- in->seg.status = MLX5_MKEY_STATUS_FREE;
- in->seg.xlt_oct_size = cpu_to_be32(ndescs);
- in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
- in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_KLM |
- MLX5_PERM_LOCAL_READ;
- if (type == IB_MW_TYPE_2)
- in->seg.flags_pd |= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
-
- err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, sizeof(*in),
- NULL, NULL, NULL);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
+ MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
if (err)
goto free;
@@ -1811,7 +1832,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
mr->desc_size * mr->max_descs,
DMA_TO_DEVICE);
- if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
else
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 34e79e709c67..cacb631a7b0a 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -782,8 +782,8 @@ void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
int __init mlx5_ib_odp_init(void)
{
- mlx5_ib_page_fault_wq =
- create_singlethread_workqueue("mlx5_ib_page_faults");
+ mlx5_ib_page_fault_wq = alloc_ordered_workqueue("mlx5_ib_page_faults",
+ WQ_MEM_RECLAIM);
if (!mlx5_ib_page_fault_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index affc3f6598ca..41f4c2afbcdd 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -35,7 +35,6 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_user_verbs.h>
#include "mlx5_ib.h"
-#include "user.h"
/* not supported currently */
static int wq_signature;
@@ -77,6 +76,17 @@ struct mlx5_wqe_eth_pad {
u8 rsvd0[16];
};
+enum raw_qp_set_mask_map {
+ MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
+};
+
+struct mlx5_modify_raw_qp_param {
+ u16 operation;
+
+ u32 set_mask; /* raw_qp_set_mask_map */
+ u8 rq_q_ctr_id;
+};
+
static void get_cqs(enum ib_qp_type qp_type,
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
@@ -726,7 +736,7 @@ err_umem:
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp *qp, struct ib_udata *udata,
struct ib_qp_init_attr *attr,
- struct mlx5_create_qp_mbox_in **in,
+ u32 **in,
struct mlx5_ib_create_qp_resp *resp, int *inlen,
struct mlx5_ib_qp_base *base)
{
@@ -739,6 +749,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 offset = 0;
int uuarn;
int ncont = 0;
+ __be64 *pas;
+ void *qpc;
int err;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
@@ -795,20 +807,24 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
ubuffer->umem = NULL;
}
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
+ *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
*in = mlx5_vzalloc(*inlen);
if (!*in) {
err = -ENOMEM;
goto err_umem;
}
+
+ pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem)
- mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift,
- (*in)->pas, 0);
- (*in)->ctx.log_pg_sz_remote_qpn =
- cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
- (*in)->ctx.params2 = cpu_to_be32(offset << 6);
+ mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
+
+ qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
- (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
+ MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, page_offset, offset);
+
+ MLX5_SET(qpc, qpc, uar_page, uar_index);
resp->uuar_index = uuarn;
qp->uuarn = uuarn;
@@ -857,12 +873,13 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
static int create_kernel_qp(struct mlx5_ib_dev *dev,
struct ib_qp_init_attr *init_attr,
struct mlx5_ib_qp *qp,
- struct mlx5_create_qp_mbox_in **in, int *inlen,
+ u32 **in, int *inlen,
struct mlx5_ib_qp_base *base)
{
enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
struct mlx5_uuar_info *uuari;
int uar_index;
+ void *qpc;
int uuarn;
int err;
@@ -902,25 +919,29 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
}
qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
+ *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
*in = mlx5_vzalloc(*inlen);
if (!*in) {
err = -ENOMEM;
goto err_buf;
}
- (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
- (*in)->ctx.log_pg_sz_remote_qpn =
- cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
+
+ qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
+ MLX5_SET(qpc, qpc, uar_page, uar_index);
+ MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+
/* Set "fast registration enabled" for all kernel QPs */
- (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
- (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
+ MLX5_SET(qpc, qpc, fre, 1);
+ MLX5_SET(qpc, qpc, rlky, 1);
if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
- (*in)->ctx.deth_sqpn = cpu_to_be32(1);
+ MLX5_SET(qpc, qpc, deth_sqpn, 1);
qp->flags |= MLX5_IB_QP_SQPN_QP1;
}
- mlx5_fill_page_array(&qp->buf, (*in)->pas);
+ mlx5_fill_page_array(&qp->buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas));
err = mlx5_db_alloc(dev->mdev, &qp->db);
if (err) {
@@ -974,15 +995,15 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
}
-static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
+static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
{
if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
(attr->qp_type == IB_QPT_XRC_INI))
- return cpu_to_be32(MLX5_SRQ_RQ);
+ return MLX5_SRQ_RQ;
else if (!qp->has_rq)
- return cpu_to_be32(MLX5_ZERO_LEN_RQ);
+ return MLX5_ZERO_LEN_RQ;
else
- return cpu_to_be32(MLX5_NON_ZERO_RQ);
+ return MLX5_NON_ZERO_RQ;
}
static int is_connected(enum ib_qp_type qp_type)
@@ -996,13 +1017,10 @@ static int is_connected(enum ib_qp_type qp_type)
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq, u32 tdn)
{
- u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- memset(in, 0, sizeof(in));
-
MLX5_SET(tisc, tisc, transport_domain, tdn);
-
return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
}
@@ -1191,7 +1209,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
}
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- struct mlx5_create_qp_mbox_in *in,
+ u32 *in,
struct ib_pd *pd)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
@@ -1462,18 +1480,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct ib_udata *udata, struct mlx5_ib_qp *qp)
{
struct mlx5_ib_resources *devr = &dev->devr;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_ib_qp_base *base;
struct mlx5_ib_create_qp_resp resp;
- struct mlx5_create_qp_mbox_in *in;
- struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_cq *send_cq;
struct mlx5_ib_cq *recv_cq;
unsigned long flags;
- int inlen = sizeof(*in);
- int err;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
+ struct mlx5_ib_create_qp ucmd;
+ struct mlx5_ib_qp_base *base;
void *qpc;
+ u32 *in;
+ int err;
base = init_attr->qp_type == IB_QPT_RAW_PACKET ?
&qp->raw_packet_qp.rq.base :
@@ -1601,7 +1619,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (err)
return err;
} else {
- in = mlx5_vzalloc(sizeof(*in));
+ in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
@@ -1611,26 +1629,29 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (is_sqp(init_attr->qp_type))
qp->port = init_attr->port_num;
- in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
- MLX5_QP_PM_MIGRATED << 11);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type));
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
- in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
else
- in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
+ MLX5_SET(qpc, qpc, latency_sensitive, 1);
+
if (qp->wq_sig)
- in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
+ MLX5_SET(qpc, qpc, wq_signature, 1);
if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
- in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
- in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER);
+ MLX5_SET(qpc, qpc, cd_master, 1);
if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
- in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND);
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
- in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV);
+ MLX5_SET(qpc, qpc, cd_slave_receive, 1);
if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
int rcqe_sz;
@@ -1640,71 +1661,68 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
if (rcqe_sz == 128)
- in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
else
- in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
if (scqe_sz == 128)
- in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
else
- in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
}
}
if (qp->rq.wqe_cnt) {
- in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
- in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
}
- in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
if (qp->sq.wqe_cnt)
- in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
else
- in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
+ MLX5_SET(qpc, qpc, no_sq, 1);
/* Set default resources */
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
- in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
- in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
- in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
break;
case IB_QPT_XRC_INI:
- in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
- in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
break;
default:
if (init_attr->srq) {
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
- in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
} else {
- in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
- in->ctx.rq_type_srqn |=
- cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
}
}
if (init_attr->send_cq)
- in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
if (init_attr->recv_cq)
- in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
- in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
- if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
- qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
- /* 0xffffff means we ask to work with cqe version 0 */
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
MLX5_SET(qpc, qpc, user_index, uidx);
- }
+
/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
if (init_attr->qp_type == IB_QPT_UD &&
(init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
- qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
qp->flags |= MLX5_IB_QP_LSO;
}
@@ -1855,13 +1873,13 @@ static void get_cqs(enum ib_qp_type qp_type,
}
static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- u16 operation);
+ const struct mlx5_modify_raw_qp_param *raw_qp_param,
+ u8 lag_tx_affinity);
static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
- struct mlx5_modify_qp_mbox_in *in;
unsigned long flags;
int err;
@@ -1874,19 +1892,18 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return;
-
if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
mlx5_ib_qp_disable_pagefaults(qp);
err = mlx5_core_qp_modify(dev->mdev,
- MLX5_CMD_OP_2RST_QP, in, 0,
- &base->mqp);
+ MLX5_CMD_OP_2RST_QP, 0,
+ NULL, &base->mqp);
} else {
- err = modify_raw_packet_qp(dev, qp,
- MLX5_CMD_OP_2RST_QP);
+ struct mlx5_modify_raw_qp_param raw_qp_param = {
+ .operation = MLX5_CMD_OP_2RST_QP
+ };
+
+ err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
}
if (err)
mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
@@ -1925,8 +1942,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
base->mqp.qpn);
}
- kfree(in);
-
if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
else if (qp->create_type == MLX5_QP_USER)
@@ -2152,6 +2167,31 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
return err;
}
+static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
+ struct mlx5_ib_sq *sq, u8 tx_affinity)
+{
+ void *in;
+ void *tisc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
+
+ tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
+ MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
+
+ err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct ib_ah_attr *ah,
struct mlx5_qp_path *path, u8 port, int attr_mask,
@@ -2362,8 +2402,9 @@ static int ib_mask_to_mlx5_opt(int ib_mask)
return result;
}
-static int modify_raw_packet_qp_rq(struct mlx5_core_dev *dev,
- struct mlx5_ib_rq *rq, int new_state)
+static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param)
{
void *in;
void *rqc;
@@ -2380,7 +2421,17 @@ static int modify_raw_packet_qp_rq(struct mlx5_core_dev *dev,
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(rqc, rqc, state, new_state);
- err = mlx5_core_modify_rq(dev, rq->base.mqp.qpn, in, inlen);
+ if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) {
+ if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID);
+ MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
+ } else
+ pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n",
+ dev->ib_dev.name);
+ }
+
+ err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
if (err)
goto out;
@@ -2421,7 +2472,8 @@ out:
}
static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- u16 operation)
+ const struct mlx5_modify_raw_qp_param *raw_qp_param,
+ u8 tx_affinity)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
@@ -2430,7 +2482,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
int sq_state;
int err;
- switch (operation) {
+ switch (raw_qp_param->operation) {
case MLX5_CMD_OP_RST2INIT_QP:
rq_state = MLX5_RQC_STATE_RDY;
sq_state = MLX5_SQC_STATE_RDY;
@@ -2447,21 +2499,31 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
case MLX5_CMD_OP_INIT2RTR_QP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
- /* Nothing to do here... */
- return 0;
+ if (raw_qp_param->set_mask)
+ return -EINVAL;
+ else
+ return 0;
default:
WARN_ON(1);
return -EINVAL;
}
if (qp->rq.wqe_cnt) {
- err = modify_raw_packet_qp_rq(dev->mdev, rq, rq_state);
+ err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
if (err)
return err;
}
- if (qp->sq.wqe_cnt)
+ if (qp->sq.wqe_cnt) {
+ if (tx_affinity) {
+ err = modify_raw_packet_tx_affinity(dev->mdev, sq,
+ tx_affinity);
+ if (err)
+ return err;
+ }
+
return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state);
+ }
return 0;
}
@@ -2512,20 +2574,20 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_qp_context *context;
- struct mlx5_modify_qp_mbox_in *in;
struct mlx5_ib_pd *pd;
+ struct mlx5_ib_port *mibport = NULL;
enum mlx5_qp_state mlx5_cur, mlx5_new;
enum mlx5_qp_optpar optpar;
int sqd_event;
int mlx5_st;
int err;
u16 op;
+ u8 tx_affinity = 0;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
return -ENOMEM;
- context = &in->ctx;
err = to_mlx5_st(ibqp->qp_type);
if (err < 0) {
mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
@@ -2550,6 +2612,23 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
}
+ if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
+ if ((ibqp->qp_type == IB_QPT_RC) ||
+ (ibqp->qp_type == IB_QPT_UD &&
+ !(qp->flags & MLX5_IB_QP_SQPN_QP1)) ||
+ (ibqp->qp_type == IB_QPT_UC) ||
+ (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
+ (ibqp->qp_type == IB_QPT_XRC_INI) ||
+ (ibqp->qp_type == IB_QPT_XRC_TGT)) {
+ if (mlx5_lag_is_active(dev->mdev)) {
+ tx_affinity = (unsigned int)atomic_add_return(1,
+ &dev->roce.next_port) %
+ MLX5_MAX_PORTS + 1;
+ context->flags |= cpu_to_be32(tx_affinity << 24);
+ }
+ }
+ }
+
if (is_sqp(ibqp->qp_type)) {
context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
} else if (ibqp->qp_type == IB_QPT_UD ||
@@ -2655,8 +2734,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
qp->port) - 1;
- struct mlx5_ib_port *mibport = &dev->port[port_num];
-
+ mibport = &dev->port[port_num];
context->qp_counter_set_usr_page |=
cpu_to_be32((u32)(mibport->q_cnt_id) << 24);
}
@@ -2690,13 +2768,21 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
op = optab[mlx5_cur][mlx5_new];
optpar = ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
- in->optparam = cpu_to_be32(optpar);
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
- err = modify_raw_packet_qp(dev, qp, op);
- else
- err = mlx5_core_qp_modify(dev->mdev, op, in, sqd_event,
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
+ struct mlx5_modify_raw_qp_param raw_qp_param = {};
+
+ raw_qp_param.operation = op;
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id;
+ raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
+ }
+ err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
+ } else {
+ err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
&base->mqp);
+ }
+
if (err)
goto out;
@@ -2736,7 +2822,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
out:
- kfree(in);
+ kfree(context);
return err;
}
@@ -2969,7 +3055,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
memset(umr, 0, sizeof(*umr));
- if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
/* KLMs take twice the size of MTTs */
ndescs *= 2;
@@ -3112,9 +3198,9 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
memset(seg, 0, sizeof(*seg));
- if (mr->access_mode == MLX5_ACCESS_MODE_MTT)
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
seg->log2_page_size = ilog2(mr->ibmr.page_size);
- else if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
/* KLMs take twice the size of MTTs */
ndescs *= 2;
@@ -3455,7 +3541,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
memset(seg, 0, sizeof(*seg));
seg->flags = get_umr_flags(wr->access_flags) |
- MLX5_ACCESS_MODE_KLM;
+ MLX5_MKC_ACCESS_MODE_KLMS;
seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
MLX5_MKEY_BSF_EN | pdn);
@@ -4317,21 +4403,24 @@ static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct ib_qp_attr *qp_attr)
{
- struct mlx5_query_qp_mbox_out *outb;
+ int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
struct mlx5_qp_context *context;
int mlx5_state;
+ u32 *outb;
int err = 0;
- outb = kzalloc(sizeof(*outb), GFP_KERNEL);
+ outb = kzalloc(outlen, GFP_KERNEL);
if (!outb)
return -ENOMEM;
- context = &outb->ctx;
err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
- sizeof(*outb));
+ outlen);
if (err)
goto out;
+ /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
+ context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc);
+
mlx5_state = be32_to_cpu(context->flags) >> 28;
qp->state = to_ib_qp_state(mlx5_state);
@@ -4496,6 +4585,28 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
return 0;
}
+static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
+{
+ struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
+ struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
+ struct ib_event event;
+
+ if (rwq->ibwq.event_handler) {
+ event.device = rwq->ibwq.device;
+ event.element.wq = &rwq->ibwq;
+ switch (type) {
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_WQ_FATAL;
+ break;
+ default:
+ mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn);
+ return;
+ }
+
+ rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
+ }
+}
+
static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
struct ib_wq_init_attr *init_attr)
{
@@ -4533,7 +4644,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
- err = mlx5_core_create_rq(dev->mdev, in, inlen, &rwq->rqn);
+ err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
kvfree(in);
return err;
}
@@ -4649,7 +4760,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
- rwq->ibwq.wq_num = rwq->rqn;
+ rwq->ibwq.wq_num = rwq->core_qp.qpn;
rwq->ibwq.state = IB_WQS_RESET;
if (udata->outlen) {
resp.response_length = offsetof(typeof(resp), response_length) +
@@ -4659,10 +4770,12 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
goto err_copy;
}
+ rwq->core_qp.event = mlx5_ib_wq_event;
+ rwq->ibwq.event_handler = init_attr->event_handler;
return &rwq->ibwq;
err_copy:
- mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
+ mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
err_user_rq:
destroy_user_rq(pd, rwq);
err:
@@ -4675,7 +4788,7 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq)
struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
- mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
+ mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
destroy_user_rq(wq->pd, rwq);
kfree(rwq);
@@ -4807,7 +4920,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
MLX5_SET(rqc, rqc, state, wq_state);
- err = mlx5_core_modify_rq(dev->mdev, rwq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
kvfree(in);
if (!err)
rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index ed6ac52355f1..3857dbd9c956 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -38,7 +38,6 @@
#include <rdma/ib_user_verbs.h>
#include "mlx5_ib.h"
-#include "user.h"
/* not supported currently */
static int srq_signature;
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 712d2a30fbe5..f6474c24f193 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -187,7 +187,7 @@ int __init mthca_catas_init(void)
{
INIT_WORK(&catas_work, catas_reset);
- catas_wq = create_singlethread_workqueue("mthca_catas");
+ catas_wq = alloc_ordered_workqueue("mthca_catas", WQ_MEM_RECLAIM);
if (!catas_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 7c3f2fb44ba5..9139405c4810 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -153,7 +153,8 @@ static void node_desc_override(struct ib_device *dev,
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
mutex_lock(&to_mdev(dev)->cap_mask_mutex);
- memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
+ memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
mutex_unlock(&to_mdev(dev)->cap_mask_mutex);
}
}
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index da2335f7f7c3..358930a41e36 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -46,7 +46,7 @@
#include "mthca_dev.h"
#include "mthca_cmd.h"
-#include "mthca_user.h"
+#include <rdma/mthca-abi.h>
#include "mthca_memfree.h"
static void init_query_mad(struct ib_smp *mad)
@@ -193,7 +193,8 @@ static int mthca_modify_device(struct ib_device *ibdev,
if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
return -ERESTARTSYS;
- memcpy(ibdev->node_desc, props->node_desc, 64);
+ memcpy(ibdev->node_desc, props->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
}
@@ -1138,7 +1139,7 @@ static int mthca_init_node_data(struct mthca_dev *dev)
if (err)
goto out;
- memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
+ memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bd9d132f11c7..e7430c9254d3 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -165,7 +165,7 @@ do { \
#include "nes_hw.h"
#include "nes_verbs.h"
#include "nes_context.h"
-#include "nes_user.h"
+#include <rdma/nes-abi.h>
#include "nes_cm.h"
#include "nes_mgt.h"
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 7f0aa23aef9d..57db9b332f44 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2692,12 +2692,12 @@ static struct nes_cm_core *nes_cm_alloc_core(void)
nes_debug(NES_DBG_CM, "Init CM Core completed -- cm_core=%p\n", cm_core);
nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n");
- cm_core->event_wq = create_singlethread_workqueue("nesewq");
+ cm_core->event_wq = alloc_ordered_workqueue("nesewq", 0);
if (!cm_core->event_wq)
goto out_free_cmcore;
cm_core->post_event = nes_cm_post_event;
nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n");
- cm_core->disconn_wq = create_singlethread_workqueue("nesdwq");
+ cm_core->disconn_wq = alloc_ordered_workqueue("nesdwq", 0);
if (!cm_core->disconn_wq)
goto out_free_wq;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 07d0c6c5b046..896071502739 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -56,7 +56,7 @@
#include "be_roce.h"
#include "ocrdma_hw.h"
#include "ocrdma_stats.h"
-#include "ocrdma_abi.h"
+#include <rdma/ocrdma-abi.h>
MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
@@ -119,6 +119,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
{
strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
+ BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC));
dev->ibdev.owner = THIS_MODULE;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 0aa854737e74..6af44f8db3d5 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -51,7 +51,7 @@
#include "ocrdma.h"
#include "ocrdma_hw.h"
#include "ocrdma_verbs.h"
-#include "ocrdma_abi.h"
+#include <rdma/ocrdma-abi.h>
int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index bbf0a163aeab..a3e21a25cea5 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -52,6 +52,7 @@
#include <linux/kref.h>
#include <linux/sched.h>
#include <linux/kthread.h>
+#include <rdma/ib_hdrs.h>
#include <rdma/rdma_vt.h>
#include "qib_common.h"
@@ -1131,7 +1132,6 @@ extern spinlock_t qib_devs_lock;
extern struct qib_devdata *qib_lookup(int unit);
extern u32 qib_cpulist_count;
extern unsigned long *qib_cpulist;
-extern u16 qpt_mask;
extern unsigned qib_cc_table_size;
int qib_init(struct qib_devdata *, int);
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 67ee6438cf59..728e0a030d2e 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -319,8 +319,8 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
ret = 1;
else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
/* For TIDERR and RC QPs premptively schedule a NAK */
- struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
- struct qib_other_headers *ohdr = NULL;
+ struct ib_header *hdr = (struct ib_header *)rhdr;
+ struct ib_other_headers *ohdr = NULL;
struct qib_ibport *ibp = &ppd->ibport_data;
struct qib_devdata *dd = ppd->dd;
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
@@ -588,8 +588,7 @@ move_along:
qib_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
bail:
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ce4034071f9c..ded27172320e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1415,7 +1415,7 @@ static void flush_fifo(struct qib_pportdata *ppd)
u32 *hdr;
u64 pbc;
const unsigned hdrwords = 7;
- static struct qib_ib_header ibhdr = {
+ static struct ib_header ibhdr = {
.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
.lrh[1] = IB_LID_PERMISSIVE,
.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f253111e682e..1730aa839a47 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -614,8 +614,8 @@ static int qib_create_workqueues(struct qib_devdata *dd)
snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
dd->unit, pidx);
- ppd->qib_wq =
- create_singlethread_workqueue(wq_name);
+ ppd->qib_wq = alloc_ordered_workqueue(wq_name,
+ WQ_MEM_RECLAIM);
if (!ppd->qib_wq)
goto wq_error;
}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index f9b8cd2354d1..99d31efe4c2f 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -41,14 +41,6 @@
#include "qib.h"
-/*
- * mask field which was present in now deleted qib_qpn_table
- * is not present in rvt_qpn_table. Defining the same field
- * as qpt_mask here instead of adding the mask field to
- * rvt_qpn_table.
- */
-u16 qpt_mask;
-
static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map, unsigned off)
{
@@ -57,7 +49,7 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map, unsigned off,
- unsigned n)
+ unsigned n, u16 qpt_mask)
{
if (qpt_mask) {
off++;
@@ -179,6 +171,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
verbs_dev);
+ u16 qpt_mask = dd->qpn_mask;
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
unsigned n;
@@ -215,7 +208,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
goto bail;
}
offset = find_next_offset(qpt, map, offset,
- dd->n_krcv_queues);
+ dd->n_krcv_queues, qpt_mask);
qpn = mk_qpn(qpt, map, offset);
/*
* This test differs from alloc_pidmap().
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 444028a3582a..2097512e75aa 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -75,7 +75,7 @@ static void start_timer(struct rvt_qp *qp)
* Note the QP s_lock must be held.
*/
static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
- struct qib_other_headers *ohdr, u32 pmtu)
+ struct ib_other_headers *ohdr, u32 pmtu)
{
struct rvt_ack_entry *e;
u32 hwords;
@@ -154,10 +154,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
len = 0;
qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
ohdr->u.at.aeth = qib_compute_aeth(qp);
- ohdr->u.at.atomic_ack_eth[0] =
- cpu_to_be32(e->atomic_data >> 32);
- ohdr->u.at.atomic_ack_eth[1] =
- cpu_to_be32(e->atomic_data);
+ ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
hwords += sizeof(ohdr->u.at) / sizeof(u32);
bth2 = e->psn & QIB_PSN_MASK;
e->sent = 1;
@@ -234,7 +231,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
{
struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct qib_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
struct rvt_sge_state *ss;
struct rvt_swqe *wqe;
u32 hwords;
@@ -444,20 +441,18 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
}
if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
qp->s_state = OP(COMPARE_SWAP);
- ohdr->u.atomic_eth.swap_data = cpu_to_be64(
- wqe->atomic_wr.swap);
- ohdr->u.atomic_eth.compare_data = cpu_to_be64(
- wqe->atomic_wr.compare_add);
+ put_ib_ateth_swap(wqe->atomic_wr.swap,
+ &ohdr->u.atomic_eth);
+ put_ib_ateth_swap(wqe->atomic_wr.compare_add,
+ &ohdr->u.atomic_eth);
} else {
qp->s_state = OP(FETCH_ADD);
- ohdr->u.atomic_eth.swap_data = cpu_to_be64(
- wqe->atomic_wr.compare_add);
- ohdr->u.atomic_eth.compare_data = 0;
+ put_ib_ateth_swap(wqe->atomic_wr.compare_add,
+ &ohdr->u.atomic_eth);
+ put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
}
- ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
- wqe->atomic_wr.remote_addr >> 32);
- ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
- wqe->atomic_wr.remote_addr);
+ put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
+ &ohdr->u.atomic_eth);
ohdr->u.atomic_eth.rkey = cpu_to_be32(
wqe->atomic_wr.rkey);
hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
@@ -632,8 +627,8 @@ void qib_send_rc_ack(struct rvt_qp *qp)
u32 hwords;
u32 pbufn;
u32 __iomem *piobuf;
- struct qib_ib_header hdr;
- struct qib_other_headers *ohdr;
+ struct ib_header hdr;
+ struct ib_other_headers *ohdr;
u32 control;
unsigned long flags;
@@ -942,9 +937,9 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
/*
* This should be called with the QP s_lock held and interrupts disabled.
*/
-void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr)
+void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
{
- struct qib_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
struct rvt_swqe *wqe;
struct ib_wc wc;
unsigned i;
@@ -1177,7 +1172,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
qib_restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
qp->r_flags |= RVT_R_RSP_SEND;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
list_add_tail(&qp->rspwait,
&rcd->qp_wait_list);
}
@@ -1361,7 +1356,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
qib_restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
qp->r_flags |= RVT_R_RSP_SEND;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
}
@@ -1383,7 +1378,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
* Called at interrupt level.
*/
static void qib_rc_rcv_resp(struct qib_ibport *ibp,
- struct qib_other_headers *ohdr,
+ struct ib_other_headers *ohdr,
void *data, u32 tlen,
struct rvt_qp *qp,
u32 opcode,
@@ -1463,12 +1458,9 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
case OP(ATOMIC_ACKNOWLEDGE):
case OP(RDMA_READ_RESPONSE_FIRST):
aeth = be32_to_cpu(ohdr->u.aeth);
- if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
- __be32 *p = ohdr->u.at.atomic_ack_eth;
-
- val = ((u64) be32_to_cpu(p[0]) << 32) |
- be32_to_cpu(p[1]);
- } else
+ if (opcode == OP(ATOMIC_ACKNOWLEDGE))
+ val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
+ else
val = 0;
if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
opcode != OP(RDMA_READ_RESPONSE_FIRST))
@@ -1608,7 +1600,7 @@ bail:
* Return 1 if no more processing is needed; otherwise return 0 to
* schedule a response to be sent.
*/
-static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
+static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
void *data,
struct rvt_qp *qp,
u32 opcode,
@@ -1640,7 +1632,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
*/
if (list_empty(&qp->rspwait)) {
qp->r_flags |= RVT_R_RSP_NAK;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
}
@@ -1848,11 +1840,11 @@ static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
* for the given QP.
* Called at interrupt level.
*/
-void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
struct qib_ibport *ibp = &rcd->ppd->ibport_data;
- struct qib_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
u32 opcode;
u32 hdrsize;
u32 psn;
@@ -2177,8 +2169,7 @@ send_last:
e->rdma_sge.mr = NULL;
}
ateth = &ohdr->u.atomic_eth;
- vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
- be32_to_cpu(ateth->vaddr[1]);
+ vaddr = get_ib_ateth_vaddr(ateth);
if (unlikely(vaddr & (sizeof(u64) - 1)))
goto nack_inv_unlck;
rkey = be32_to_cpu(ateth->rkey);
@@ -2189,11 +2180,11 @@ send_last:
goto nack_acc_unlck;
/* Perform atomic OP and save result. */
maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
- sdata = be64_to_cpu(ateth->swap_data);
+ sdata = get_ib_ateth_swap(ateth);
e->atomic_data = (opcode == OP(FETCH_ADD)) ?
(u64) atomic64_add_return(sdata, maddr) - sdata :
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- be64_to_cpu(ateth->compare_data),
+ get_ib_ateth_compare(ateth),
sdata);
rvt_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
@@ -2233,7 +2224,7 @@ rnr_nak:
/* Queue RNR NAK for later */
if (list_empty(&qp->rspwait)) {
qp->r_flags |= RVT_R_RSP_NAK;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
return;
@@ -2245,7 +2236,7 @@ nack_op_err:
/* Queue NAK for later */
if (list_empty(&qp->rspwait)) {
qp->r_flags |= RVT_R_RSP_NAK;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
return;
@@ -2259,7 +2250,7 @@ nack_inv:
/* Queue NAK for later */
if (list_empty(&qp->rspwait)) {
qp->r_flags |= RVT_R_RSP_NAK;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
return;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index b67779256297..de1bde5950f5 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -265,7 +265,7 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
*
* The s_lock will be acquired around the qib_migrate_qp() call.
*/
-int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, struct rvt_qp *qp, u32 bth0)
{
__be64 guid;
@@ -680,7 +680,7 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
return sizeof(struct ib_grh) / sizeof(u32);
}
-void qib_make_ruc_header(struct rvt_qp *qp, struct qib_other_headers *ohdr,
+void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
u32 bth0, u32 bth2)
{
struct qib_qp_priv *priv = qp->priv;
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 1d61bd04f449..5b2d483451ad 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -48,7 +48,7 @@
int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
{
struct qib_qp_priv *priv = qp->priv;
- struct qib_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
struct rvt_swqe *wqe;
u32 hwords;
u32 bth0;
@@ -236,10 +236,10 @@ bail:
* for the given QP.
* Called at interrupt level.
*/
-void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
- struct qib_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
u32 opcode;
u32 hdrsize;
u32 psn;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 10d062561bd9..f45cad1198b0 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -245,7 +245,7 @@ drop:
int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
{
struct qib_qp_priv *priv = qp->priv;
- struct qib_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
struct ib_ah_attr *ah_attr;
struct qib_pportdata *ppd;
struct qib_ibport *ibp;
@@ -435,10 +435,10 @@ static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
* for the given QP.
* Called at interrupt level.
*/
-void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
- struct qib_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
int opcode;
u32 hdrsize;
u32 pad;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index fd1dfbce5539..954f15064514 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -313,7 +313,7 @@ static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
* for the given QP.
* Called at interrupt level.
*/
-static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
struct qib_ibport *ibp = &rcd->ppd->ibport_data;
@@ -366,10 +366,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
{
struct qib_pportdata *ppd = rcd->ppd;
struct qib_ibport *ibp = &ppd->ibport_data;
- struct qib_ib_header *hdr = rhdr;
+ struct ib_header *hdr = rhdr;
struct qib_devdata *dd = ppd->dd;
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- struct qib_other_headers *ohdr;
+ struct ib_other_headers *ohdr;
struct rvt_qp *qp;
u32 qp_num;
int lnh;
@@ -841,7 +841,7 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
if (tx->wqe)
qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
else if (qp->ibqp.qp_type == IB_QPT_RC) {
- struct qib_ib_header *hdr;
+ struct ib_header *hdr;
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
hdr = &tx->align_buf->hdr;
@@ -889,7 +889,7 @@ static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
return ret;
}
-static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr,
+static int qib_verbs_send_dma(struct rvt_qp *qp, struct ib_header *hdr,
u32 hdrwords, struct rvt_sge_state *ss, u32 len,
u32 plen, u32 dwords)
{
@@ -1025,7 +1025,7 @@ static int no_bufs_available(struct rvt_qp *qp)
return ret;
}
-static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr,
+static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
u32 hdrwords, struct rvt_sge_state *ss, u32 len,
u32 plen, u32 dwords)
{
@@ -1133,7 +1133,7 @@ done:
* Return zero if packet is sent or queued OK.
* Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
*/
-int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
+int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
u32 hdrwords, struct rvt_sge_state *ss, u32 len)
{
struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
@@ -1370,7 +1370,8 @@ static int qib_modify_device(struct ib_device *device,
}
if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
- memcpy(device->node_desc, device_modify->node_desc, 64);
+ memcpy(device->node_desc, device_modify->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
for (i = 0; i < dd->num_pports; i++) {
struct qib_ibport *ibp = &dd->pport[i].ibport_data;
@@ -1606,8 +1607,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
/* Only need to initialize non-zero fields. */
setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
- qpt_mask = dd->qpn_mask;
-
INIT_LIST_HEAD(&dev->piowait);
INIT_LIST_HEAD(&dev->dmawait);
INIT_LIST_HEAD(&dev->txwait);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 736ced684842..94fd30fdedac 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -45,6 +45,7 @@
#include <linux/completion.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_hdrs.h>
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_cq.h>
@@ -63,16 +64,6 @@ struct qib_verbs_txreq;
*/
#define QIB_UVERBS_ABI_VERSION 2
-#define IB_SEQ_NAK (3 << 29)
-
-/* AETH NAK opcode values */
-#define IB_RNR_NAK 0x20
-#define IB_NAK_PSN_ERROR 0x60
-#define IB_NAK_INVALID_REQUEST 0x61
-#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
-#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
-#define IB_NAK_INVALID_RD_REQUEST 0x64
-
/* IB Performance Manager status values */
#define IB_PMA_SAMPLE_STATUS_DONE 0x00
#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
@@ -87,22 +78,9 @@ struct qib_verbs_txreq;
#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
-#define IB_BTH_REQ_ACK (1 << 31)
-#define IB_BTH_SOLICITED (1 << 23)
-#define IB_BTH_MIG_REQ (1 << 22)
-
/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
-#define IB_GRH_VERSION 6
-#define IB_GRH_VERSION_MASK 0xF
-#define IB_GRH_VERSION_SHIFT 28
-#define IB_GRH_TCLASS_MASK 0xFF
-#define IB_GRH_TCLASS_SHIFT 20
-#define IB_GRH_FLOW_MASK 0xFFFFF
-#define IB_GRH_FLOW_SHIFT 0
-#define IB_GRH_NEXT_HDR 0x1B
-
#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
/* Values for set/get portinfo VLCap OperationalVLs */
@@ -129,61 +107,9 @@ static inline int qib_num_vls(int vls)
}
}
-struct ib_reth {
- __be64 vaddr;
- __be32 rkey;
- __be32 length;
-} __packed;
-
-struct ib_atomic_eth {
- __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
- __be32 rkey;
- __be64 swap_data;
- __be64 compare_data;
-} __packed;
-
-struct qib_other_headers {
- __be32 bth[3];
- union {
- struct {
- __be32 deth[2];
- __be32 imm_data;
- } ud;
- struct {
- struct ib_reth reth;
- __be32 imm_data;
- } rc;
- struct {
- __be32 aeth;
- __be32 atomic_ack_eth[2];
- } at;
- __be32 imm_data;
- __be32 aeth;
- __be32 ieth;
- struct ib_atomic_eth atomic_eth;
- } u;
-} __packed;
-
-/*
- * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
- * long (72 w/ imm_data). Only the first 56 bytes of the IB header
- * will be in the eager header buffer. The remaining 12 or 16 bytes
- * are in the data buffer.
- */
-struct qib_ib_header {
- __be16 lrh[4];
- union {
- struct {
- struct ib_grh grh;
- struct qib_other_headers oth;
- } l;
- struct qib_other_headers oth;
- } u;
-} __packed;
-
struct qib_pio_header {
__le32 pbc[2];
- struct qib_ib_header hdr;
+ struct ib_header hdr;
} __packed;
/*
@@ -191,7 +117,7 @@ struct qib_pio_header {
* is made common.
*/
struct qib_qp_priv {
- struct qib_ib_header *s_hdr; /* next packet header to send */
+ struct ib_header *s_hdr; /* next packet header to send */
struct list_head iowait; /* link for wait PIO buf */
atomic_t s_dma_busy;
struct qib_verbs_txreq *s_tx;
@@ -376,7 +302,7 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
void qib_put_txreq(struct qib_verbs_txreq *tx);
-int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
+int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
u32 hdrwords, struct rvt_sge_state *ss, u32 len);
void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
@@ -384,10 +310,10 @@ void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release);
-void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
-void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
@@ -398,13 +324,13 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
void qib_rc_rnr_retry(unsigned long arg);
-void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr);
+void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr);
-void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
void mr_rcu_callback(struct rcu_head *list);
@@ -413,13 +339,13 @@ int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only);
void qib_migrate_qp(struct rvt_qp *qp);
-int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, struct rvt_qp *qp, u32 bth0);
u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
struct ib_global_route *grh, u32 hwords, u32 nwords);
-void qib_make_ruc_header(struct rvt_qp *qp, struct qib_other_headers *ohdr,
+void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
u32 bth0, u32 bth2);
void _qib_do_send(struct work_struct *work);
diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
index 33076a5eee2f..01f71caa3ac4 100644
--- a/drivers/infiniband/sw/rdmavt/dma.c
+++ b/drivers/infiniband/sw/rdmavt/dma.c
@@ -138,6 +138,21 @@ static void rvt_unmap_sg(struct ib_device *dev,
/* This is a stub, nothing to be done here */
}
+static int rvt_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ return rvt_map_sg(dev, sgl, nents, direction);
+}
+
+static void rvt_unmap_sg_attrs(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ return rvt_unmap_sg(dev, sg, nents, direction);
+}
+
static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr,
size_t size, enum dma_data_direction dir)
{
@@ -177,6 +192,8 @@ struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = {
.unmap_page = rvt_dma_unmap_page,
.map_sg = rvt_map_sg,
.unmap_sg = rvt_unmap_sg,
+ .map_sg_attrs = rvt_map_sg_attrs,
+ .unmap_sg_attrs = rvt_unmap_sg_attrs,
.sync_single_for_cpu = rvt_sync_single_for_cpu,
.sync_single_for_device = rvt_sync_single_for_device,
.alloc_coherent = rvt_dma_alloc_coherent,
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 870b4f212fbc..6500c3b5a89c 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -488,60 +488,23 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
if (removed) {
synchronize_rcu();
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
}
/**
- * reset_qp - initialize the QP state to the reset state
- * @qp: the QP to reset
+ * rvt_init_qp - initialize the QP state to the reset state
+ * @qp: the QP to init or reinit
* @type: the QP type
- * r and s lock are required to be held by the caller
+ *
+ * This function is called from both rvt_create_qp() and
+ * rvt_reset_qp(). The difference is that the reset
+ * patch the necessary locks to protect against concurent
+ * access.
*/
-static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- enum ib_qp_type type)
- __releases(&qp->s_lock)
- __releases(&qp->s_hlock)
- __releases(&qp->r_lock)
- __acquires(&qp->r_lock)
- __acquires(&qp->s_hlock)
- __acquires(&qp->s_lock)
+static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
{
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
-
- /* Let drivers flush their waitlist */
- rdi->driver_f.flush_qp_waiters(qp);
- qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
- spin_unlock(&qp->s_lock);
- spin_unlock(&qp->s_hlock);
- spin_unlock_irq(&qp->r_lock);
-
- /* Stop the send queue and the retry timer */
- rdi->driver_f.stop_send_queue(qp);
-
- /* Wait for things to stop */
- rdi->driver_f.quiesce_qp(qp);
-
- /* take qp out the hash and wait for it to be unused */
- rvt_remove_qp(rdi, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
-
- /* grab the lock b/c it was locked at call time */
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_hlock);
- spin_lock(&qp->s_lock);
-
- rvt_clear_mr_refs(qp, 1);
- }
-
- /*
- * Let the driver do any tear down it needs to for a qp
- * that has been reset
- */
- rdi->driver_f.notify_qp_reset(qp);
-
qp->remote_qpn = 0;
qp->qkey = 0;
qp->qp_access_flags = 0;
@@ -587,6 +550,60 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
}
/**
+ * rvt_reset_qp - initialize the QP state to the reset state
+ * @qp: the QP to reset
+ * @type: the QP type
+ *
+ * r_lock, s_hlock, and s_lock are required to be held by the caller
+ */
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
+ __must_hold(&qp->s_lock)
+ __must_hold(&qp->s_hlock)
+ __must_hold(&qp->r_lock)
+{
+ lockdep_assert_held(&qp->r_lock);
+ lockdep_assert_held(&qp->s_hlock);
+ lockdep_assert_held(&qp->s_lock);
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+
+ /* Let drivers flush their waitlist */
+ rdi->driver_f.flush_qp_waiters(qp);
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+
+ /* Stop the send queue and the retry timer */
+ rdi->driver_f.stop_send_queue(qp);
+
+ /* Wait for things to stop */
+ rdi->driver_f.quiesce_qp(qp);
+
+ /* take qp out the hash and wait for it to be unused */
+ rvt_remove_qp(rdi, qp);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+
+ /* grab the lock b/c it was locked at call time */
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+
+ rvt_clear_mr_refs(qp, 1);
+ /*
+ * Let the driver do any tear down or re-init it needs to for
+ * a qp that has been reset
+ */
+ rdi->driver_f.notify_qp_reset(qp);
+ }
+ rvt_init_qp(rdi, qp, type);
+ lockdep_assert_held(&qp->r_lock);
+ lockdep_assert_held(&qp->s_hlock);
+ lockdep_assert_held(&qp->s_lock);
+}
+
+/**
* rvt_create_qp - create a queue pair for a device
* @ibpd: the protection domain who's device we create the queue pair for
* @init_attr: the attributes of the queue pair
@@ -766,7 +783,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
}
qp->ibqp.qp_num = err;
qp->port_num = init_attr->port_num;
- rvt_reset_qp(rdi, qp, init_attr->qp_type);
+ rvt_init_qp(rdi, qp, init_attr->qp_type);
break;
default:
@@ -906,6 +923,8 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
int ret = 0;
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ lockdep_assert_held(&qp->r_lock);
+ lockdep_assert_held(&qp->s_lock);
if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
goto bail;
@@ -980,7 +999,7 @@ static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
unsigned long flags;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
if (qp->ibqp.qp_num <= 1) {
@@ -997,7 +1016,7 @@ static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
}
/**
- * qib_modify_qp - modify the attributes of a queue pair
+ * rvt_modify_qp - modify the attributes of a queue pair
* @ibqp: the queue pair who's attributes we're modifying
* @attr: the new attributes
* @attr_mask: the mask of attributes to modify
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index ddd59270ff6d..ab6c3c25d7ff 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -358,38 +358,16 @@ static int __init rxe_module_init(void)
/* initialize slab caches for managed objects */
err = rxe_cache_init();
if (err) {
- pr_err("rxe: unable to init object pools\n");
+ pr_err("unable to init object pools\n");
return err;
}
- err = rxe_net_ipv4_init();
- if (err) {
- pr_err("rxe: unable to init ipv4 tunnel\n");
- rxe_cache_exit();
- goto exit;
- }
-
- err = rxe_net_ipv6_init();
- if (err) {
- pr_err("rxe: unable to init ipv6 tunnel\n");
- rxe_cache_exit();
- goto exit;
- }
-
- err = register_netdevice_notifier(&rxe_net_notifier);
- if (err) {
- pr_err("rxe: Failed to rigister netdev notifier\n");
- goto exit;
- }
-
- pr_info("rxe: loaded\n");
+ err = rxe_net_init();
+ if (err)
+ return err;
+ pr_info("loaded\n");
return 0;
-
-exit:
- rxe_release_udp_tunnel(recv_sockets.sk4);
- rxe_release_udp_tunnel(recv_sockets.sk6);
- return err;
}
static void __exit rxe_module_exit(void)
@@ -398,8 +376,8 @@ static void __exit rxe_module_exit(void)
rxe_net_exit();
rxe_cache_exit();
- pr_info("rxe: unloaded\n");
+ pr_info("unloaded\n");
}
-module_init(rxe_module_init);
+late_initcall(rxe_module_init);
module_exit(rxe_module_exit);
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 12c71c549f97..a696af81e4a5 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -34,6 +34,11 @@
#ifndef RXE_H
#define RXE_H
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/crc32.h>
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 5c9474212d4e..604f6fee96bd 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -39,7 +39,7 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr)
struct rxe_port *port;
if (attr->port_num != 1) {
- pr_info("rxe: invalid port_num = %d\n", attr->port_num);
+ pr_info("invalid port_num = %d\n", attr->port_num);
return -EINVAL;
}
@@ -47,7 +47,7 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr)
if (attr->ah_flags & IB_AH_GRH) {
if (attr->grh.sgid_index > port->attr.gid_tbl_len) {
- pr_info("rxe: invalid sgid index = %d\n",
+ pr_info("invalid sgid index = %d\n",
attr->grh.sgid_index);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 1c59ef2c67aa..6c5e29db88e3 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -567,7 +567,8 @@ int rxe_completer(void *arg)
state = COMPST_GET_ACK;
while (1) {
- pr_debug("state = %s\n", comp_state_name[state]);
+ pr_debug("qp#%d state = %s\n", qp_num(qp),
+ comp_state_name[state]);
switch (state) {
case COMPST_GET_ACK:
skb = skb_dequeue(&qp->resp_pkts);
@@ -709,7 +710,8 @@ int rxe_completer(void *arg)
qp->comp.rnr_retry--;
qp->req.need_retry = 1;
- pr_debug("set rnr nak timer\n");
+ pr_debug("qp#%d set rnr nak timer\n",
+ qp_num(qp));
mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK));
diff --git a/drivers/infiniband/sw/rxe/rxe_dma.c b/drivers/infiniband/sw/rxe/rxe_dma.c
index 7634c1a81b2b..a0f8af5851ae 100644
--- a/drivers/infiniband/sw/rxe/rxe_dma.c
+++ b/drivers/infiniband/sw/rxe/rxe_dma.c
@@ -117,6 +117,21 @@ static void rxe_unmap_sg(struct ib_device *dev,
WARN_ON(!valid_dma_direction(direction));
}
+static int rxe_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ return rxe_map_sg(dev, sgl, nents, direction);
+}
+
+static void rxe_unmap_sg_attrs(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ rxe_unmap_sg(dev, sg, nents, direction);
+}
+
static void rxe_sync_single_for_cpu(struct ib_device *dev,
u64 addr,
size_t size, enum dma_data_direction dir)
@@ -159,6 +174,8 @@ struct ib_dma_mapping_ops rxe_dma_mapping_ops = {
.unmap_page = rxe_dma_unmap_page,
.map_sg = rxe_map_sg,
.unmap_sg = rxe_unmap_sg,
+ .map_sg_attrs = rxe_map_sg_attrs,
+ .unmap_sg_attrs = rxe_unmap_sg_attrs,
.sync_single_for_cpu = rxe_sync_single_for_cpu,
.sync_single_for_device = rxe_sync_single_for_device,
.alloc_coherent = rxe_dma_alloc_coherent,
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 4a5484ef604f..73849a5a91b3 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -198,7 +198,7 @@ void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
{
qp->resp.res_head++;
- if (unlikely(qp->resp.res_head == qp->attr.max_rd_atomic))
+ if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
qp->resp.res_head = 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index 54b3c7c99eff..c572a4c09359 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -126,7 +126,7 @@ found_it:
ret = remap_vmalloc_range(vma, ip->obj, 0);
if (ret) {
- pr_err("rxe: err %d from remap_vmalloc_range\n", ret);
+ pr_err("err %d from remap_vmalloc_range\n", ret);
goto done;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index f3dab6574504..1869152f1d23 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -39,7 +39,7 @@
*/
static u8 rxe_get_key(void)
{
- static unsigned key = 1;
+ static u32 key = 1;
key = key << 1;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index eedf2f1cafdf..b8258e4f0aea 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -65,7 +65,7 @@ struct rxe_dev *net_to_rxe(struct net_device *ndev)
return found;
}
-struct rxe_dev *get_rxe_by_name(const char* name)
+struct rxe_dev *get_rxe_by_name(const char *name)
{
struct rxe_dev *rxe;
struct rxe_dev *found = NULL;
@@ -350,14 +350,14 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
}
-static int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
+static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, struct rxe_av *av)
{
struct dst_entry *dst;
bool xnet = false;
__be16 df = htons(IP_DF);
struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
- struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
dst = rxe_find_route4(rxe->ndev, saddr, daddr);
if (!dst) {
@@ -376,12 +376,12 @@ static int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
return 0;
}
-static int prepare6(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
+static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, struct rxe_av *av)
{
struct dst_entry *dst;
struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
- struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
dst = rxe_find_route6(rxe->ndev, saddr, daddr);
if (!dst) {
@@ -408,9 +408,9 @@ static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct rxe_av *av = rxe_get_av(pkt);
if (av->network_type == RDMA_NETWORK_IPV4)
- err = prepare4(rxe, skb, av);
+ err = prepare4(rxe, pkt, skb, av);
else if (av->network_type == RDMA_NETWORK_IPV6)
- err = prepare6(rxe, skb, av);
+ err = prepare6(rxe, pkt, skb, av);
*crc = rxe_icrc_hdr(pkt, skb);
@@ -601,8 +601,7 @@ void rxe_port_up(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
- pr_info("rxe: set %s active\n", rxe->ib_dev.name);
- return;
+ pr_info("set %s active\n", rxe->ib_dev.name);
}
/* Caller must hold net_info_lock */
@@ -615,8 +614,7 @@ void rxe_port_down(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
rxe_port_event(rxe, IB_EVENT_PORT_ERR);
- pr_info("rxe: set %s down\n", rxe->ib_dev.name);
- return;
+ pr_info("set %s down\n", rxe->ib_dev.name);
}
static int rxe_notify(struct notifier_block *not_blk,
@@ -641,7 +639,7 @@ static int rxe_notify(struct notifier_block *not_blk,
rxe_port_down(rxe);
break;
case NETDEV_CHANGEMTU:
- pr_info("rxe: %s changed mtu to %d\n", ndev->name, ndev->mtu);
+ pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
rxe_set_mtu(rxe, ndev->mtu);
break;
case NETDEV_REBOOT:
@@ -651,7 +649,7 @@ static int rxe_notify(struct notifier_block *not_blk,
case NETDEV_CHANGENAME:
case NETDEV_FEAT_CHANGE:
default:
- pr_info("rxe: ignoring netdev event = %ld for %s\n",
+ pr_info("ignoring netdev event = %ld for %s\n",
event, ndev->name);
break;
}
@@ -671,7 +669,7 @@ int rxe_net_ipv4_init(void)
htons(ROCE_V2_UDP_DPORT), false);
if (IS_ERR(recv_sockets.sk4)) {
recv_sockets.sk4 = NULL;
- pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
+ pr_err("Failed to create IPv4 UDP tunnel\n");
return -1;
}
@@ -688,7 +686,7 @@ int rxe_net_ipv6_init(void)
htons(ROCE_V2_UDP_DPORT), true);
if (IS_ERR(recv_sockets.sk6)) {
recv_sockets.sk6 = NULL;
- pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
+ pr_err("Failed to create IPv6 UDP tunnel\n");
return -1;
}
#endif
@@ -701,3 +699,26 @@ void rxe_net_exit(void)
rxe_release_udp_tunnel(recv_sockets.sk4);
unregister_netdevice_notifier(&rxe_net_notifier);
}
+
+int rxe_net_init(void)
+{
+ int err;
+
+ recv_sockets.sk6 = NULL;
+
+ err = rxe_net_ipv4_init();
+ if (err)
+ return err;
+ err = rxe_net_ipv6_init();
+ if (err)
+ goto err_out;
+ err = register_netdevice_notifier(&rxe_net_notifier);
+ if (err) {
+ pr_err("Failed to register netdev notifier\n");
+ goto err_out;
+ }
+ return 0;
+err_out:
+ rxe_net_exit();
+ return err;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 0daf7f09e5b5..1c06b3bfe1b6 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -49,8 +49,7 @@ void rxe_release_udp_tunnel(struct socket *sk);
struct rxe_dev *rxe_net_add(struct net_device *ndev);
-int rxe_net_ipv4_init(void);
-int rxe_net_ipv6_init(void);
+int rxe_net_init(void);
void rxe_net_exit(void);
#endif /* RXE_NET_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 22ba24f2a2c1..b8036cfbce04 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -146,7 +146,7 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
if (qp->resp.resources) {
int i;
- for (i = 0; i < qp->attr.max_rd_atomic; i++) {
+ for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
struct resp_res *res = &qp->resp.resources[i];
free_rd_atomic_resource(qp, res);
@@ -174,7 +174,7 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
struct resp_res *res;
if (qp->resp.resources) {
- for (i = 0; i < qp->attr.max_rd_atomic; i++) {
+ for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
res = &qp->resp.resources[i];
free_rd_atomic_resource(qp, res);
}
@@ -298,8 +298,8 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
wqe_size = rcv_wqe_size(qp->rq.max_sge);
- pr_debug("max_wr = %d, max_sge = %d, wqe_size = %d\n",
- qp->rq.max_wr, qp->rq.max_sge, wqe_size);
+ pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
+ qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
qp->rq.queue = rxe_queue_init(rxe,
&qp->rq.max_wr,
@@ -596,14 +596,21 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
+ qp->attr.max_rd_atomic = max_rd_atomic;
+ atomic_set(&qp->req.rd_atomic, max_rd_atomic);
+ }
+
+ if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ int max_dest_rd_atomic =
+ __roundup_pow_of_two(attr->max_dest_rd_atomic);
+
+ qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
+
free_rd_atomic_resources(qp);
- err = alloc_rd_atomic_resources(qp, max_rd_atomic);
+ err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
if (err)
return err;
-
- qp->attr.max_rd_atomic = max_rd_atomic;
- atomic_set(&qp->req.rd_atomic, max_rd_atomic);
}
if (mask & IB_QP_CUR_STATE)
@@ -673,24 +680,27 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
if (mask & IB_QP_RETRY_CNT) {
qp->attr.retry_cnt = attr->retry_cnt;
qp->comp.retry_cnt = attr->retry_cnt;
- pr_debug("set retry count = %d\n", attr->retry_cnt);
+ pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
+ attr->retry_cnt);
}
if (mask & IB_QP_RNR_RETRY) {
qp->attr.rnr_retry = attr->rnr_retry;
qp->comp.rnr_retry = attr->rnr_retry;
- pr_debug("set rnr retry count = %d\n", attr->rnr_retry);
+ pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
+ attr->rnr_retry);
}
if (mask & IB_QP_RQ_PSN) {
qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
qp->resp.psn = qp->attr.rq_psn;
- pr_debug("set resp psn = 0x%x\n", qp->resp.psn);
+ pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
+ qp->resp.psn);
}
if (mask & IB_QP_MIN_RNR_TIMER) {
qp->attr.min_rnr_timer = attr->min_rnr_timer;
- pr_debug("set min rnr timer = 0x%x\n",
+ pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
attr->min_rnr_timer);
}
@@ -698,12 +708,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
qp->req.psn = qp->attr.sq_psn;
qp->comp.psn = qp->attr.sq_psn;
- pr_debug("set req psn = 0x%x\n", qp->req.psn);
- }
-
- if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- qp->attr.max_dest_rd_atomic =
- __roundup_pow_of_two(attr->max_dest_rd_atomic);
+ pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
}
if (mask & IB_QP_PATH_MIG_STATE)
@@ -717,38 +722,38 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
switch (attr->qp_state) {
case IB_QPS_RESET:
- pr_debug("qp state -> RESET\n");
+ pr_debug("qp#%d state -> RESET\n", qp_num(qp));
rxe_qp_reset(qp);
break;
case IB_QPS_INIT:
- pr_debug("qp state -> INIT\n");
+ pr_debug("qp#%d state -> INIT\n", qp_num(qp));
qp->req.state = QP_STATE_INIT;
qp->resp.state = QP_STATE_INIT;
break;
case IB_QPS_RTR:
- pr_debug("qp state -> RTR\n");
+ pr_debug("qp#%d state -> RTR\n", qp_num(qp));
qp->resp.state = QP_STATE_READY;
break;
case IB_QPS_RTS:
- pr_debug("qp state -> RTS\n");
+ pr_debug("qp#%d state -> RTS\n", qp_num(qp));
qp->req.state = QP_STATE_READY;
break;
case IB_QPS_SQD:
- pr_debug("qp state -> SQD\n");
+ pr_debug("qp#%d state -> SQD\n", qp_num(qp));
rxe_qp_drain(qp);
break;
case IB_QPS_SQE:
- pr_warn("qp state -> SQE !!?\n");
+ pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
/* Not possible from modify_qp. */
break;
case IB_QPS_ERR:
- pr_debug("qp state -> ERR\n");
+ pr_debug("qp#%d state -> ERR\n", qp_num(qp));
rxe_qp_error(qp);
break;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 144d2f129fcd..46f062842a9a 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -387,7 +387,8 @@ int rxe_rcv(struct sk_buff *skb)
pack_icrc = be32_to_cpu(*icrcp);
calc_icrc = rxe_icrc_hdr(pkt, skb);
- calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt), payload_size(pkt));
+ calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt),
+ payload_size(pkt));
calc_icrc = cpu_to_be32(~calc_icrc);
if (unlikely(calc_icrc != pack_icrc)) {
char saddr[sizeof(struct in6_addr)];
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 13a848a518e8..832846b73ea0 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -38,7 +38,7 @@
#include "rxe_queue.h"
static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
- unsigned opcode);
+ u32 opcode);
static inline void retry_first_write_send(struct rxe_qp *qp,
struct rxe_send_wqe *wqe,
@@ -121,7 +121,7 @@ void rnr_nak_timer(unsigned long data)
{
struct rxe_qp *qp = (struct rxe_qp *)data;
- pr_debug("rnr nak timer fired\n");
+ pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
rxe_run_task(&qp->req.task, 1);
}
@@ -187,7 +187,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
return wqe;
}
-static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits)
+static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
{
switch (opcode) {
case IB_WR_RDMA_WRITE:
@@ -259,7 +259,7 @@ static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits)
return -EINVAL;
}
-static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits)
+static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
{
switch (opcode) {
case IB_WR_RDMA_WRITE:
@@ -311,7 +311,7 @@ static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits)
}
static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
- unsigned opcode)
+ u32 opcode)
{
int fits = (wqe->dma.resid <= qp->mtu);
@@ -588,7 +588,7 @@ int rxe_requester(void *arg)
struct rxe_pkt_info pkt;
struct sk_buff *skb;
struct rxe_send_wqe *wqe;
- unsigned mask;
+ enum rxe_hdr_mask mask;
int payload;
int mtu;
int opcode;
@@ -626,7 +626,8 @@ next_wqe:
rmr = rxe_pool_get_index(&rxe->mr_pool,
wqe->wr.ex.invalidate_rkey >> 8);
if (!rmr) {
- pr_err("No mr for key %#x\n", wqe->wr.ex.invalidate_rkey);
+ pr_err("No mr for key %#x\n",
+ wqe->wr.ex.invalidate_rkey);
wqe->state = wqe_state_error;
wqe->status = IB_WC_MW_BIND_ERR;
goto exit;
@@ -702,12 +703,12 @@ next_wqe:
skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
if (unlikely(!skb)) {
- pr_err("Failed allocating skb\n");
+ pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
goto err;
}
if (fill_packet(qp, wqe, &pkt, skb, payload)) {
- pr_debug("Error during fill packet\n");
+ pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
goto err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 3e0f0f2baace..dd3d88adc003 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -383,7 +383,7 @@ static enum resp_states check_resource(struct rxe_qp *qp,
* too many read/atomic ops, we just
* recycle the responder resource queue
*/
- if (likely(qp->attr.max_rd_atomic > 0))
+ if (likely(qp->attr.max_dest_rd_atomic > 0))
return RESPST_CHK_LENGTH;
else
return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
@@ -749,6 +749,18 @@ static enum resp_states read_reply(struct rxe_qp *qp,
return state;
}
+static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
+ struct rxe_pkt_info *pkt)
+{
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+
+ memset(hdr, 0, sizeof(*hdr));
+ if (skb->protocol == htons(ETH_P_IP))
+ memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
+}
+
/* Executes a new request. A retried request never reach that function (send
* and writes are discarded, and reads and atomics are retried elsewhere.
*/
@@ -761,13 +773,8 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
qp_type(qp) == IB_QPT_SMI ||
qp_type(qp) == IB_QPT_GSI) {
union rdma_network_hdr hdr;
- struct sk_buff *skb = PKT_TO_SKB(pkt);
- memset(&hdr, 0, sizeof(hdr));
- if (skb->protocol == htons(ETH_P_IP))
- memcpy(&hdr.roce4grh, ip_hdr(skb), sizeof(hdr.roce4grh));
- else if (skb->protocol == htons(ETH_P_IPV6))
- memcpy(&hdr.ibgrh, ipv6_hdr(skb), sizeof(hdr.ibgrh));
+ build_rdma_network_hdr(&hdr, pkt);
err = send_data_in(qp, &hdr, sizeof(hdr));
if (err)
@@ -881,7 +888,8 @@ static enum resp_states do_complete(struct rxe_qp *qp,
rmr = rxe_pool_get_index(&rxe->mr_pool,
wc->ex.invalidate_rkey >> 8);
if (unlikely(!rmr)) {
- pr_err("Bad rkey %#x invalidation\n", wc->ex.invalidate_rkey);
+ pr_err("Bad rkey %#x invalidation\n",
+ wc->ex.invalidate_rkey);
return RESPST_ERROR;
}
rmr->state = RXE_MEM_STATE_FREE;
@@ -1208,7 +1216,8 @@ int rxe_responder(void *arg)
}
while (1) {
- pr_debug("state = %s\n", resp_state_name[state]);
+ pr_debug("qp#%d state = %s\n", qp_num(qp),
+ resp_state_name[state]);
switch (state) {
case RESPST_GET_REQ:
state = get_req(qp, &pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
index cf8e77800046..d5ed7571128f 100644
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -79,7 +79,7 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
len = sanitize_arg(val, intf, sizeof(intf));
if (!len) {
- pr_err("rxe: add: invalid interface name\n");
+ pr_err("add: invalid interface name\n");
err = -EINVAL;
goto err;
}
@@ -92,20 +92,20 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
}
if (net_to_rxe(ndev)) {
- pr_err("rxe: already configured on %s\n", intf);
+ pr_err("already configured on %s\n", intf);
err = -EINVAL;
goto err;
}
rxe = rxe_net_add(ndev);
if (!rxe) {
- pr_err("rxe: failed to add %s\n", intf);
+ pr_err("failed to add %s\n", intf);
err = -EINVAL;
goto err;
}
rxe_set_port_state(ndev);
- pr_info("rxe: added %s to %s\n", rxe->ib_dev.name, intf);
+ pr_info("added %s to %s\n", rxe->ib_dev.name, intf);
err:
if (ndev)
dev_put(ndev);
@@ -120,7 +120,7 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
len = sanitize_arg(val, intf, sizeof(intf));
if (!len) {
- pr_err("rxe: add: invalid interface name\n");
+ pr_err("add: invalid interface name\n");
return -EINVAL;
}
@@ -133,7 +133,7 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
rxe = get_rxe_by_name(intf);
if (!rxe) {
- pr_err("rxe: not configured on %s\n", intf);
+ pr_err("not configured on %s\n", intf);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 4552be960c6a..19841c863daf 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -100,10 +100,12 @@ static int rxe_query_port(struct ib_device *dev,
rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
speed = cmd.speed;
} else {
- pr_warn("%s speed is unknown, defaulting to 1000\n", rxe->ndev->name);
+ pr_warn("%s speed is unknown, defaulting to 1000\n",
+ rxe->ndev->name);
speed = 1000;
}
- rxe_eth_speed_to_ib_speed(speed, &attr->active_speed, &attr->active_width);
+ rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
+ &attr->active_width);
mutex_unlock(&rxe->usdev_lock);
return 0;
@@ -761,7 +763,7 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
}
static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
- unsigned mask, u32 length)
+ unsigned int mask, u32 length)
{
int err;
struct rxe_sq *sq = &qp->sq;
@@ -801,26 +803,15 @@ err1:
return err;
}
-static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
{
int err = 0;
- struct rxe_qp *qp = to_rqp(ibqp);
unsigned int mask;
unsigned int length = 0;
int i;
int must_sched;
- if (unlikely(!qp->valid)) {
- *bad_wr = wr;
- return -EINVAL;
- }
-
- if (unlikely(qp->req.state < QP_STATE_READY)) {
- *bad_wr = wr;
- return -EINVAL;
- }
-
while (wr) {
mask = wr_opcode_mask(wr->opcode, qp);
if (unlikely(!mask)) {
@@ -861,6 +852,29 @@ static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return err;
}
+static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ if (unlikely(!qp->valid)) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ if (unlikely(qp->req.state < QP_STATE_READY)) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ if (qp->is_user) {
+ /* Utilize process context to do protocol processing */
+ rxe_run_task(&qp->req.task, 0);
+ return 0;
+ } else
+ return rxe_post_send_kernel(qp, wr, bad_wr);
+}
+
static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
@@ -1133,8 +1147,8 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset)
+static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
{
struct rxe_mem *mr = to_rmr(ibmr);
int n;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 9dbfcc0ab577..7b8d2d9e2263 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -772,7 +772,13 @@ static inline void ipoib_unregister_debugfs(void) { }
#define ipoib_printk(level, priv, format, arg...) \
printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg)
#define ipoib_warn(priv, format, arg...) \
- ipoib_printk(KERN_WARNING, priv, format , ## arg)
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ 10 * HZ /*10 seconds */, \
+ 100); \
+ if (__ratelimit(&_rs)) \
+ ipoib_printk(KERN_WARNING, priv, format , ## arg);\
+} while (0)
extern int ipoib_sendq_size;
extern int ipoib_recvq_size;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index cc1c1b062ea5..5636fc3da6b8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2196,7 +2196,8 @@ static int __init ipoib_init_module(void)
* its private workqueue, and we only queue up flush events
* on our global flush workqueue. This avoids the deadlocks.
*/
- ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
+ ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush",
+ WQ_MEM_RECLAIM);
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index c55ecb2c3736..189dcd1709d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -147,7 +147,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
int ret, size;
int i;
- priv->pd = ib_alloc_pd(priv->ca);
+ priv->pd = ib_alloc_pd(priv->ca, 0);
if (IS_ERR(priv->pd)) {
printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
return -ENODEV;
@@ -157,7 +157,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
* the various IPoIB tasks assume they will never race against
* themselves, so always use a single thread workqueue
*/
- priv->wq = create_singlethread_workqueue("ipoib_wq");
+ priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
if (!priv->wq) {
printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
goto out_free_pd;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0351059783b1..0be6a7c5ddb5 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -374,7 +374,6 @@ struct iser_reg_ops {
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
- struct ib_mr *mr;
struct ib_event_handler event_handler;
struct list_head ig_list;
int refcount;
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 90be56893414..9c3e9ab53a41 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -199,7 +199,11 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
* FIXME: rework the registration code path to differentiate
* rkey/lkey use cases
*/
- reg->rkey = device->mr ? device->mr->rkey : 0;
+
+ if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
+ reg->rkey = device->pd->unsafe_global_rkey;
+ else
+ reg->rkey = 0;
reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 1b4945367e4f..a4b791dfaa1d 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -88,7 +88,8 @@ static int iser_create_device_ib_res(struct iser_device *device)
device->comps_used, ib_dev->name,
ib_dev->num_comp_vectors, max_cqe);
- device->pd = ib_alloc_pd(ib_dev);
+ device->pd = ib_alloc_pd(ib_dev,
+ iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
if (IS_ERR(device->pd))
goto pd_err;
@@ -103,26 +104,13 @@ static int iser_create_device_ib_res(struct iser_device *device)
}
}
- if (!iser_always_reg) {
- int access = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ;
-
- device->mr = ib_get_dma_mr(device->pd, access);
- if (IS_ERR(device->mr))
- goto cq_err;
- }
-
INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
iser_event_handler);
if (ib_register_event_handler(&device->event_handler))
- goto handler_err;
+ goto cq_err;
return 0;
-handler_err:
- if (device->mr)
- ib_dereg_mr(device->mr);
cq_err:
for (i = 0; i < device->comps_used; i++) {
struct iser_comp *comp = &device->comps[i];
@@ -154,14 +142,10 @@ static void iser_free_device_ib_res(struct iser_device *device)
}
(void)ib_unregister_event_handler(&device->event_handler);
- if (device->mr)
- (void)ib_dereg_mr(device->mr);
ib_dealloc_pd(device->pd);
kfree(device->comps);
device->comps = NULL;
-
- device->mr = NULL;
device->pd = NULL;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index cae9bbcc27e7..6dd43f63238e 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -309,7 +309,7 @@ isert_create_device_ib_res(struct isert_device *device)
if (ret)
goto out;
- device->pd = ib_alloc_pd(ib_dev);
+ device->pd = ib_alloc_pd(ib_dev, 0);
if (IS_ERR(device->pd)) {
ret = PTR_ERR(device->pd);
isert_err("failed to allocate pd, device %p, ret=%d\n",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 3322ed750172..d980fb458ad4 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1262,6 +1262,7 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_pd *pd = target->pd;
struct ib_pool_fmr *fmr;
u64 io_addr = 0;
@@ -1273,9 +1274,9 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
if (state->npages == 0)
return 0;
- if (state->npages == 1 && target->global_mr) {
+ if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
srp_map_desc(state, state->base_dma_addr, state->dma_len,
- target->global_mr->rkey);
+ pd->unsafe_global_rkey);
goto reset_state;
}
@@ -1315,6 +1316,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_pd *pd = target->pd;
struct ib_send_wr *bad_wr;
struct ib_reg_wr wr;
struct srp_fr_desc *desc;
@@ -1326,12 +1328,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
WARN_ON_ONCE(!dev->use_fast_reg);
- if (sg_nents == 1 && target->global_mr) {
+ if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
sg_dma_len(state->sg) - sg_offset,
- target->global_mr->rkey);
+ pd->unsafe_global_rkey);
if (sg_offset_p)
*sg_offset_p = 0;
return 1;
@@ -1386,7 +1388,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
static int srp_map_sg_entry(struct srp_map_state *state,
struct srp_rdma_ch *ch,
- struct scatterlist *sg, int sg_index)
+ struct scatterlist *sg)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
@@ -1400,7 +1402,9 @@ static int srp_map_sg_entry(struct srp_map_state *state,
while (dma_len) {
unsigned offset = dma_addr & ~dev->mr_page_mask;
- if (state->npages == dev->max_pages_per_mr || offset != 0) {
+
+ if (state->npages == dev->max_pages_per_mr ||
+ (state->npages > 0 && offset != 0)) {
ret = srp_map_finish_fmr(state, ch);
if (ret)
return ret;
@@ -1417,12 +1421,12 @@ static int srp_map_sg_entry(struct srp_map_state *state,
}
/*
- * If the last entry of the MR wasn't a full page, then we need to
+ * If the end of the MR is not on a page boundary then we need to
* close it out and start a new one -- we can only merge at page
* boundaries.
*/
ret = 0;
- if (len != dev->mr_page_size)
+ if ((dma_addr & ~dev->mr_page_mask) != 0)
ret = srp_map_finish_fmr(state, ch);
return ret;
}
@@ -1439,7 +1443,7 @@ static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
for_each_sg(scat, sg, count, i) {
- ret = srp_map_sg_entry(state, ch, sg, i);
+ ret = srp_map_sg_entry(state, ch, sg);
if (ret)
return ret;
}
@@ -1491,7 +1495,7 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
ib_sg_dma_len(dev->dev, sg),
- target->global_mr->rkey);
+ target->pd->unsafe_global_rkey);
}
return 0;
@@ -1591,6 +1595,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_request *req)
{
struct srp_target_port *target = ch->target;
+ struct ib_pd *pd = target->pd;
struct scatterlist *scat;
struct srp_cmd *cmd = req->cmd->buf;
int len, nents, count, ret;
@@ -1626,7 +1631,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
- if (count == 1 && target->global_mr) {
+ if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
/*
* The midlayer only generated a single gather/scatter
* entry, or DMA mapping coalesced everything to a
@@ -1636,7 +1641,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
- buf->key = cpu_to_be32(target->global_mr->rkey);
+ buf->key = cpu_to_be32(pd->unsafe_global_rkey);
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
req->nmdesc = 0;
@@ -1709,14 +1714,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
memcpy(indirect_hdr->desc_list, req->indirect_desc,
count * sizeof (struct srp_direct_buf));
- if (!target->global_mr) {
+ if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
idb_len, &idb_rkey);
if (ret < 0)
goto unmap;
req->nmdesc++;
} else {
- idb_rkey = cpu_to_be32(target->global_mr->rkey);
+ idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
}
indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
@@ -3268,8 +3273,8 @@ static ssize_t srp_create_target(struct device *dev,
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
+ target->pd = host->srp_dev->pd;
target->lkey = host->srp_dev->pd->local_dma_lkey;
- target->global_mr = host->srp_dev->global_mr;
target->cmd_sg_cnt = cmd_sg_entries;
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
target->allow_ext_sg = allow_ext_sg;
@@ -3524,6 +3529,7 @@ static void srp_add_one(struct ib_device *device)
struct srp_host *host;
int mr_page_shift, p;
u64 max_pages_per_mr;
+ unsigned int flags = 0;
srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
@@ -3558,6 +3564,10 @@ static void srp_add_one(struct ib_device *device)
srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
}
+ if (never_register || !register_always ||
+ (!srp_dev->has_fmr && !srp_dev->has_fr))
+ flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
+
if (srp_dev->use_fast_reg) {
srp_dev->max_pages_per_mr =
min_t(u32, srp_dev->max_pages_per_mr,
@@ -3573,19 +3583,10 @@ static void srp_add_one(struct ib_device *device)
INIT_LIST_HEAD(&srp_dev->dev_list);
srp_dev->dev = device;
- srp_dev->pd = ib_alloc_pd(device);
+ srp_dev->pd = ib_alloc_pd(device, flags);
if (IS_ERR(srp_dev->pd))
goto free_dev;
- if (never_register || !register_always ||
- (!srp_dev->has_fmr && !srp_dev->has_fr)) {
- srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE);
- if (IS_ERR(srp_dev->global_mr))
- goto err_pd;
- }
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
host = srp_add_port(srp_dev, p);
@@ -3596,9 +3597,6 @@ static void srp_add_one(struct ib_device *device)
ib_set_client_data(device, &srp_client, srp_dev);
return;
-err_pd:
- ib_dealloc_pd(srp_dev->pd);
-
free_dev:
kfree(srp_dev);
}
@@ -3638,8 +3636,6 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
kfree(host);
}
- if (srp_dev->global_mr)
- ib_dereg_mr(srp_dev->global_mr);
ib_dealloc_pd(srp_dev->pd);
kfree(srp_dev);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 26bb9b0a7a63..21c69695f9d4 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -90,7 +90,6 @@ struct srp_device {
struct list_head dev_list;
struct ib_device *dev;
struct ib_pd *pd;
- struct ib_mr *global_mr;
u64 mr_page_mask;
int mr_page_size;
int mr_max_size;
@@ -179,7 +178,7 @@ struct srp_target_port {
spinlock_t lock;
/* read only in the hot path */
- struct ib_mr *global_mr;
+ struct ib_pd *pd;
struct srp_rdma_ch *ch;
u32 ch_count;
u32 lkey;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 883bbfe08e0e..0b1f69ed2e92 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2480,7 +2480,7 @@ static void srpt_add_one(struct ib_device *device)
init_waitqueue_head(&sdev->ch_releaseQ);
mutex_init(&sdev->mutex);
- sdev->pd = ib_alloc_pd(device);
+ sdev->pd = ib_alloc_pd(device, 0);
if (IS_ERR(sdev->pd))
goto free_dev;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 509608c95994..cbd75cf44739 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -12,6 +12,21 @@ menuconfig INPUT_KEYBOARD
if INPUT_KEYBOARD
+config KEYBOARD_ADC
+ tristate "ADC Ladder Buttons"
+ depends on IIO
+ select INPUT_POLLDEV
+ help
+ This driver implements support for buttons connected
+ to an ADC using a resistor ladder.
+
+ Say Y here if your device has such buttons connected to an ADC. Your
+ board-specific setup logic must also provide a configuration data
+ for mapping voltages to buttons.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adc_keys.
+
config KEYBOARD_ADP5520
tristate "Keypad Support for ADP5520 PMIC"
depends on PMIC_ADP5520
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 1d416ddf84e4..d9f4cfcf3410 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -4,6 +4,7 @@
# Each configuration option enables a list of files.
+obj-$(CONFIG_KEYBOARD_ADC) += adc-keys.o
obj-$(CONFIG_KEYBOARD_ADP5520) += adp5520-keys.o
obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o
obj-$(CONFIG_KEYBOARD_ADP5589) += adp5589-keys.o
diff --git a/drivers/input/keyboard/adc-keys.c b/drivers/input/keyboard/adc-keys.c
new file mode 100644
index 000000000000..f8cf2ccacefd
--- /dev/null
+++ b/drivers/input/keyboard/adc-keys.c
@@ -0,0 +1,210 @@
+/*
+ * Input driver for resistor ladder connected on ADC
+ *
+ * Copyright (c) 2016 Alexandre Belloni
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/types.h>
+#include <linux/input.h>
+#include <linux/input-polldev.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+struct adc_keys_button {
+ u32 voltage;
+ u32 keycode;
+};
+
+struct adc_keys_state {
+ struct iio_channel *channel;
+ u32 num_keys;
+ u32 last_key;
+ u32 keyup_voltage;
+ const struct adc_keys_button *map;
+};
+
+static void adc_keys_poll(struct input_polled_dev *dev)
+{
+ struct adc_keys_state *st = dev->private;
+ int i, value, ret;
+ u32 diff, closest = 0xffffffff;
+ int keycode = 0;
+
+ ret = iio_read_channel_processed(st->channel, &value);
+ if (unlikely(ret < 0)) {
+ /* Forcibly release key if any was pressed */
+ value = st->keyup_voltage;
+ } else {
+ for (i = 0; i < st->num_keys; i++) {
+ diff = abs(st->map[i].voltage - value);
+ if (diff < closest) {
+ closest = diff;
+ keycode = st->map[i].keycode;
+ }
+ }
+ }
+
+ if (abs(st->keyup_voltage - value) < closest)
+ keycode = 0;
+
+ if (st->last_key && st->last_key != keycode)
+ input_report_key(dev->input, st->last_key, 0);
+
+ if (keycode)
+ input_report_key(dev->input, keycode, 1);
+
+ input_sync(dev->input);
+ st->last_key = keycode;
+}
+
+static int adc_keys_load_keymap(struct device *dev, struct adc_keys_state *st)
+{
+ struct adc_keys_button *map;
+ struct fwnode_handle *child;
+ int i;
+
+ st->num_keys = device_get_child_node_count(dev);
+ if (st->num_keys == 0) {
+ dev_err(dev, "keymap is missing\n");
+ return -EINVAL;
+ }
+
+ map = devm_kmalloc_array(dev, st->num_keys, sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ i = 0;
+ device_for_each_child_node(dev, child) {
+ if (fwnode_property_read_u32(child, "press-threshold-microvolt",
+ &map[i].voltage)) {
+ dev_err(dev, "Key with invalid or missing voltage\n");
+ fwnode_handle_put(child);
+ return -EINVAL;
+ }
+ map[i].voltage /= 1000;
+
+ if (fwnode_property_read_u32(child, "linux,code",
+ &map[i].keycode)) {
+ dev_err(dev, "Key with invalid or missing linux,code\n");
+ fwnode_handle_put(child);
+ return -EINVAL;
+ }
+
+ i++;
+ }
+
+ st->map = map;
+ return 0;
+}
+
+static int adc_keys_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct adc_keys_state *st;
+ struct input_polled_dev *poll_dev;
+ struct input_dev *input;
+ enum iio_chan_type type;
+ int i, value;
+ int error;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->channel = devm_iio_channel_get(dev, "buttons");
+ if (IS_ERR(st->channel))
+ return PTR_ERR(st->channel);
+
+ if (!st->channel->indio_dev)
+ return -ENXIO;
+
+ error = iio_get_channel_type(st->channel, &type);
+ if (error < 0)
+ return error;
+
+ if (type != IIO_VOLTAGE) {
+ dev_err(dev, "Incompatible channel type %d\n", type);
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(dev, "keyup-threshold-microvolt",
+ &st->keyup_voltage)) {
+ dev_err(dev, "Invalid or missing keyup voltage\n");
+ return -EINVAL;
+ }
+ st->keyup_voltage /= 1000;
+
+ error = adc_keys_load_keymap(dev, st);
+ if (error)
+ return error;
+
+ platform_set_drvdata(pdev, st);
+
+ poll_dev = devm_input_allocate_polled_device(dev);
+ if (!poll_dev) {
+ dev_err(dev, "failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ if (!device_property_read_u32(dev, "poll-interval", &value))
+ poll_dev->poll_interval = value;
+
+ poll_dev->poll = adc_keys_poll;
+ poll_dev->private = st;
+
+ input = poll_dev->input;
+
+ input->name = pdev->name;
+ input->phys = "adc-keys/input0";
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ __set_bit(EV_KEY, input->evbit);
+ for (i = 0; i < st->num_keys; i++)
+ __set_bit(st->map[i].keycode, input->keybit);
+
+ if (device_property_read_bool(dev, "autorepeat"))
+ __set_bit(EV_REP, input->evbit);
+
+ error = input_register_polled_device(poll_dev);
+ if (error) {
+ dev_err(dev, "Unable to register input device: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id adc_keys_of_match[] = {
+ { .compatible = "adc-keys", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adc_keys_of_match);
+#endif
+
+static struct platform_driver __refdata adc_keys_driver = {
+ .driver = {
+ .name = "adc_keys",
+ .of_match_table = of_match_ptr(adc_keys_of_match),
+ },
+ .probe = adc_keys_probe,
+};
+module_platform_driver(adc_keys_driver);
+
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
+MODULE_DESCRIPTION("Input driver for resistor ladder connected on ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 4b0878f35471..25943e9bc8bf 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -27,6 +27,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/input/matrix_keypad.h>
@@ -44,6 +45,7 @@
* @dev: Device pointer
* @idev: Input device
* @ec: Top level ChromeOS device to use to talk to EC
+ * @notifier: interrupt event notifier for transport devices
*/
struct cros_ec_keyb {
unsigned int rows;
@@ -57,6 +59,7 @@ struct cros_ec_keyb {
struct device *dev;
struct input_dev *idev;
struct cros_ec_device *ec;
+ struct notifier_block notifier;
};
@@ -146,67 +149,44 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
input_sync(ckdev->idev);
}
-static int cros_ec_keyb_get_state(struct cros_ec_keyb *ckdev, uint8_t *kb_state)
-{
- int ret = 0;
- struct cros_ec_command *msg;
-
- msg = kmalloc(sizeof(*msg) + ckdev->cols, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->version = 0;
- msg->command = EC_CMD_MKBP_STATE;
- msg->insize = ckdev->cols;
- msg->outsize = 0;
-
- ret = cros_ec_cmd_xfer(ckdev->ec, msg);
- if (ret < 0) {
- dev_err(ckdev->dev, "Error transferring EC message %d\n", ret);
- goto exit;
- }
-
- memcpy(kb_state, msg->data, ckdev->cols);
-exit:
- kfree(msg);
- return ret;
-}
-
-static irqreturn_t cros_ec_keyb_irq(int irq, void *data)
+static int cros_ec_keyb_open(struct input_dev *dev)
{
- struct cros_ec_keyb *ckdev = data;
- struct cros_ec_device *ec = ckdev->ec;
- int ret;
- uint8_t kb_state[ckdev->cols];
-
- if (device_may_wakeup(ec->dev))
- pm_wakeup_event(ec->dev, 0);
-
- ret = cros_ec_keyb_get_state(ckdev, kb_state);
- if (ret >= 0)
- cros_ec_keyb_process(ckdev, kb_state, ret);
- else
- dev_err(ckdev->dev, "failed to get keyboard state: %d\n", ret);
+ struct cros_ec_keyb *ckdev = input_get_drvdata(dev);
- return IRQ_HANDLED;
+ return blocking_notifier_chain_register(&ckdev->ec->event_notifier,
+ &ckdev->notifier);
}
-static int cros_ec_keyb_open(struct input_dev *dev)
+static void cros_ec_keyb_close(struct input_dev *dev)
{
struct cros_ec_keyb *ckdev = input_get_drvdata(dev);
- struct cros_ec_device *ec = ckdev->ec;
- return request_threaded_irq(ec->irq, NULL, cros_ec_keyb_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "cros_ec_keyb", ckdev);
+ blocking_notifier_chain_unregister(&ckdev->ec->event_notifier,
+ &ckdev->notifier);
}
-static void cros_ec_keyb_close(struct input_dev *dev)
+static int cros_ec_keyb_work(struct notifier_block *nb,
+ unsigned long queued_during_suspend, void *_notify)
{
- struct cros_ec_keyb *ckdev = input_get_drvdata(dev);
- struct cros_ec_device *ec = ckdev->ec;
+ struct cros_ec_keyb *ckdev = container_of(nb, struct cros_ec_keyb,
+ notifier);
- free_irq(ec->irq, ckdev);
+ if (ckdev->ec->event_data.event_type != EC_MKBP_EVENT_KEY_MATRIX)
+ return NOTIFY_DONE;
+ /*
+ * If EC is not the wake source, discard key state changes during
+ * suspend.
+ */
+ if (queued_during_suspend)
+ return NOTIFY_OK;
+ if (ckdev->ec->event_size != ckdev->cols) {
+ dev_err(ckdev->dev,
+ "Discarded incomplete key matrix event.\n");
+ return NOTIFY_OK;
+ }
+ cros_ec_keyb_process(ckdev, ckdev->ec->event_data.data.key_matrix,
+ ckdev->ec->event_size);
+ return NOTIFY_OK;
}
/*
@@ -265,12 +245,8 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
if (!idev)
return -ENOMEM;
- if (!ec->irq) {
- dev_err(dev, "no EC IRQ specified\n");
- return -EINVAL;
- }
-
ckdev->ec = ec;
+ ckdev->notifier.notifier_call = cros_ec_keyb_work;
ckdev->dev = dev;
dev_set_drvdata(dev, ckdev);
@@ -311,54 +287,6 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-/* Clear any keys in the buffer */
-static void cros_ec_keyb_clear_keyboard(struct cros_ec_keyb *ckdev)
-{
- uint8_t old_state[ckdev->cols];
- uint8_t new_state[ckdev->cols];
- unsigned long duration;
- int i, ret;
-
- /*
- * Keep reading until we see that the scan state does not change.
- * That indicates that we are done.
- *
- * Assume that the EC keyscan buffer is at most 32 deep.
- */
- duration = jiffies;
- ret = cros_ec_keyb_get_state(ckdev, new_state);
- for (i = 1; !ret && i < 32; i++) {
- memcpy(old_state, new_state, sizeof(old_state));
- ret = cros_ec_keyb_get_state(ckdev, new_state);
- if (0 == memcmp(old_state, new_state, sizeof(old_state)))
- break;
- }
- duration = jiffies - duration;
- dev_info(ckdev->dev, "Discarded %d keyscan(s) in %dus\n", i,
- jiffies_to_usecs(duration));
-}
-
-static int cros_ec_keyb_resume(struct device *dev)
-{
- struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
-
- /*
- * When the EC is not a wake source, then it could not have caused the
- * resume, so we clear the EC's key scan buffer. If the EC was a
- * wake source (e.g. the lid is open and the user might press a key to
- * wake) then the key scan buffer should be preserved.
- */
- if (!ckdev->ec->was_wake_device)
- cros_ec_keyb_clear_keyboard(ckdev);
-
- return 0;
-}
-
-#endif
-
-static SIMPLE_DEV_PM_OPS(cros_ec_keyb_pm_ops, NULL, cros_ec_keyb_resume);
-
#ifdef CONFIG_OF
static const struct of_device_id cros_ec_keyb_of_match[] = {
{ .compatible = "google,cros-ec-keyb" },
@@ -372,7 +300,6 @@ static struct platform_driver cros_ec_keyb_driver = {
.driver = {
.name = "cros-ec-keyb",
.of_match_table = of_match_ptr(cros_ec_keyb_of_match),
- .pm = &cros_ec_keyb_pm_ops,
},
};
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index 421d9c55b0e8..1277c39f9482 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -25,8 +25,6 @@
#include <linux/slab.h>
#include <mach/jornada720.h>
-#include <mach/hardware.h>
-#include <mach/irqs.h>
MODULE_AUTHOR("Kristoffer Ericson <Kristoffer.Ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 keyboard driver");
@@ -66,10 +64,8 @@ static irqreturn_t jornada720_kbd_interrupt(int irq, void *dev_id)
jornada_ssp_start();
if (jornada_ssp_inout(GETSCANKEYCODE) != TXDUMMY) {
- printk(KERN_DEBUG
- "jornada720_kbd: "
- "GetKeycode command failed with ETIMEDOUT, "
- "flushed bus\n");
+ dev_dbg(&pdev->dev,
+ "GetKeycode command failed with ETIMEDOUT, flushed bus\n");
} else {
/* How many keycodes are waiting for us? */
count = jornada_ssp_byte(TXDUMMY);
@@ -97,14 +93,16 @@ static int jornada720_kbd_probe(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd;
struct input_dev *input_dev;
- int i, err;
+ int i, err, irq;
- jornadakbd = kzalloc(sizeof(struct jornadakbd), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!jornadakbd || !input_dev) {
- err = -ENOMEM;
- goto fail1;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return irq < 0 ? irq : -EINVAL;
+
+ jornadakbd = devm_kzalloc(&pdev->dev, sizeof(*jornadakbd), GFP_KERNEL);
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!jornadakbd || !input_dev)
+ return -ENOMEM;
platform_set_drvdata(pdev, jornadakbd);
@@ -127,40 +125,16 @@ static int jornada720_kbd_probe(struct platform_device *pdev)
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- err = request_irq(IRQ_GPIO0,
- jornada720_kbd_interrupt,
- IRQF_TRIGGER_FALLING,
- "jornadakbd", pdev);
+ err = devm_request_irq(&pdev->dev, irq, jornada720_kbd_interrupt,
+ IRQF_TRIGGER_FALLING, "jornadakbd", pdev);
if (err) {
- printk(KERN_INFO "jornadakbd720_kbd: Unable to grab IRQ\n");
- goto fail1;
+ dev_err(&pdev->dev, "unable to grab IRQ%d: %d\n", irq, err);
+ return err;
}
- err = input_register_device(jornadakbd->input);
- if (err)
- goto fail2;
-
- return 0;
-
- fail2: /* IRQ, DEVICE, MEMORY */
- free_irq(IRQ_GPIO0, pdev);
- fail1: /* DEVICE, MEMORY */
- input_free_device(input_dev);
- kfree(jornadakbd);
- return err;
+ return input_register_device(jornadakbd->input);
};
-static int jornada720_kbd_remove(struct platform_device *pdev)
-{
- struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
-
- free_irq(IRQ_GPIO0, pdev);
- input_unregister_device(jornadakbd->input);
- kfree(jornadakbd);
-
- return 0;
-}
-
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:jornada720_kbd");
@@ -169,6 +143,5 @@ static struct platform_driver jornada720_kbd_driver = {
.name = "jornada720_kbd",
},
.probe = jornada720_kbd_probe,
- .remove = jornada720_kbd_remove,
};
module_platform_driver(jornada720_kbd_driver);
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 24a9f599082f..7544888c4749 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -168,7 +168,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
error = input_register_device(input);
if (error < 0) {
dev_err(&pdev->dev, "failed to register input device\n");
- input_free_device(input);
return error;
}
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index efb0ca871327..7ffb614ce566 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -292,6 +292,18 @@ config INPUT_GPIO_TILT_POLLED
To compile this driver as a module, choose M here: the
module will be called gpio_tilt_polled.
+config INPUT_GPIO_DECODER
+ tristate "Polled GPIO Decoder Input driver"
+ depends on GPIOLIB || COMPILE_TEST
+ select INPUT_POLLDEV
+ help
+ Say Y here if you want driver to read status of multiple GPIO
+ lines and report the encoded value as an absolute integer to
+ input subsystem.
+
+ To compile this driver as a module, choose M here: the module
+ will be called gpio_decoder.
+
config INPUT_IXP4XX_BEEPER
tristate "IXP4XX Beeper support"
depends on ARCH_IXP4XX
@@ -454,10 +466,10 @@ config INPUT_RETU_PWRBUTTON
config INPUT_TPS65218_PWRBUTTON
tristate "TPS65218 Power button driver"
- depends on MFD_TPS65218
+ depends on (MFD_TPS65217 || MFD_TPS65218)
help
Say Y here if you want to enable power buttong reporting for
- the TPS65218 Power Management IC device.
+ TPS65217 and TPS65218 Power Management IC devices.
To compile this driver as a module, choose M here. The module will
be called tps65218-pwrbutton.
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 6a1e5e20fc1c..0b6d025f0487 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_INPUT_DRV2667_HAPTICS) += drv2667.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o
obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
+obj-$(CONFIG_INPUT_GPIO_DECODER) += gpio_decoder.o
obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
diff --git a/drivers/input/misc/gpio_decoder.c b/drivers/input/misc/gpio_decoder.c
new file mode 100644
index 000000000000..ca7e0bacb2d8
--- /dev/null
+++ b/drivers/input/misc/gpio_decoder.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * A generic driver to read multiple gpio lines and translate the
+ * encoded numeric value into an input event.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/input.h>
+#include <linux/input-polldev.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+struct gpio_decoder {
+ struct input_polled_dev *poll_dev;
+ struct gpio_descs *input_gpios;
+ struct device *dev;
+ u32 axis;
+ u32 last_stable;
+};
+
+static int gpio_decoder_get_gpios_state(struct gpio_decoder *decoder)
+{
+ struct gpio_descs *gpios = decoder->input_gpios;
+ unsigned int ret = 0;
+ int i, val;
+
+ for (i = 0; i < gpios->ndescs; i++) {
+ val = gpiod_get_value_cansleep(gpios->desc[i]);
+ if (val < 0) {
+ dev_err(decoder->dev,
+ "Error reading gpio %d: %d\n",
+ desc_to_gpio(gpios->desc[i]), val);
+ return val;
+ }
+
+ val = !!val;
+ ret = (ret << 1) | val;
+ }
+
+ return ret;
+}
+
+static void gpio_decoder_poll_gpios(struct input_polled_dev *poll_dev)
+{
+ struct gpio_decoder *decoder = poll_dev->private;
+ int state;
+
+ state = gpio_decoder_get_gpios_state(decoder);
+ if (state >= 0 && state != decoder->last_stable) {
+ input_report_abs(poll_dev->input, decoder->axis, state);
+ input_sync(poll_dev->input);
+ decoder->last_stable = state;
+ }
+}
+
+static int gpio_decoder_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gpio_decoder *decoder;
+ struct input_polled_dev *poll_dev;
+ u32 max;
+ int err;
+
+ decoder = devm_kzalloc(dev, sizeof(struct gpio_decoder), GFP_KERNEL);
+ if (!decoder)
+ return -ENOMEM;
+
+ device_property_read_u32(dev, "linux,axis", &decoder->axis);
+ decoder->input_gpios = devm_gpiod_get_array(dev, NULL, GPIOD_IN);
+ if (IS_ERR(decoder->input_gpios)) {
+ dev_err(dev, "unable to acquire input gpios\n");
+ return PTR_ERR(decoder->input_gpios);
+ }
+ if (decoder->input_gpios->ndescs < 2) {
+ dev_err(dev, "not enough gpios found\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(dev, "decoder-max-value", &max))
+ max = (1U << decoder->input_gpios->ndescs) - 1;
+
+ decoder->dev = dev;
+ poll_dev = devm_input_allocate_polled_device(decoder->dev);
+ if (!poll_dev)
+ return -ENOMEM;
+
+ poll_dev->private = decoder;
+ poll_dev->poll = gpio_decoder_poll_gpios;
+ decoder->poll_dev = poll_dev;
+
+ poll_dev->input->name = pdev->name;
+ poll_dev->input->id.bustype = BUS_HOST;
+ input_set_abs_params(poll_dev->input, decoder->axis, 0, max, 0, 0);
+
+ err = input_register_polled_device(poll_dev);
+ if (err) {
+ dev_err(dev, "failed to register polled device\n");
+ return err;
+ }
+ platform_set_drvdata(pdev, decoder);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_decoder_of_match[] = {
+ { .compatible = "gpio-decoder", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gpio_decoder_of_match);
+#endif
+
+static struct platform_driver gpio_decoder_driver = {
+ .probe = gpio_decoder_probe,
+ .driver = {
+ .name = "gpio-decoder",
+ .of_match_table = of_match_ptr(gpio_decoder_of_match),
+ }
+};
+module_platform_driver(gpio_decoder_driver);
+
+MODULE_DESCRIPTION("GPIO decoder input driver");
+MODULE_AUTHOR("Vignesh R <vigneshr@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 29ddeb7be84b..46b0f48fbf49 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2014,2015 Samsung Electronics
* Jaewon Kim <jaewon02.kim@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is not provided / owned by Maxim Integrated Products.
*
@@ -426,7 +426,7 @@ static struct platform_driver max77693_haptic_driver = {
module_platform_driver(max77693_haptic_driver);
MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("MAXIM 77693/77843 Haptic driver");
MODULE_ALIAS("platform:max77693-haptic");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/tps65218-pwrbutton.c b/drivers/input/misc/tps65218-pwrbutton.c
index a39b62651a4b..3273217ce80c 100644
--- a/drivers/input/misc/tps65218-pwrbutton.c
+++ b/drivers/input/misc/tps65218-pwrbutton.c
@@ -1,8 +1,9 @@
/*
- * Texas Instruments' TPS65218 Power Button Input Driver
+ * Texas Instruments' TPS65217 and TPS65218 Power Button Input Driver
*
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
* Author: Felipe Balbi <balbi@ti.com>
+ * Author: Marcin Niestroj <m.niestroj@grinn-global.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,31 +19,61 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mfd/tps65217.h>
#include <linux/mfd/tps65218.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
-struct tps65218_pwrbutton {
+struct tps6521x_data {
+ unsigned int reg_status;
+ unsigned int pb_mask;
+ const char *name;
+};
+
+static const struct tps6521x_data tps65217_data = {
+ .reg_status = TPS65217_REG_STATUS,
+ .pb_mask = TPS65217_STATUS_PB,
+ .name = "tps65217_pwrbutton",
+};
+
+static const struct tps6521x_data tps65218_data = {
+ .reg_status = TPS65218_REG_STATUS,
+ .pb_mask = TPS65218_STATUS_PB_STATE,
+ .name = "tps65218_pwrbutton",
+};
+
+struct tps6521x_pwrbutton {
struct device *dev;
- struct tps65218 *tps;
+ struct regmap *regmap;
struct input_dev *idev;
+ const struct tps6521x_data *data;
+ char phys[32];
+};
+
+static const struct of_device_id of_tps6521x_pb_match[] = {
+ { .compatible = "ti,tps65217-pwrbutton", .data = &tps65217_data },
+ { .compatible = "ti,tps65218-pwrbutton", .data = &tps65218_data },
+ { },
};
+MODULE_DEVICE_TABLE(of, of_tps6521x_pb_match);
-static irqreturn_t tps65218_pwr_irq(int irq, void *_pwr)
+static irqreturn_t tps6521x_pb_irq(int irq, void *_pwr)
{
- struct tps65218_pwrbutton *pwr = _pwr;
+ struct tps6521x_pwrbutton *pwr = _pwr;
+ const struct tps6521x_data *tps_data = pwr->data;
unsigned int reg;
int error;
- error = tps65218_reg_read(pwr->tps, TPS65218_REG_STATUS, &reg);
+ error = regmap_read(pwr->regmap, tps_data->reg_status, &reg);
if (error) {
dev_err(pwr->dev, "can't read register: %d\n", error);
goto out;
}
- if (reg & TPS65218_STATUS_PB_STATE) {
+ if (reg & tps_data->pb_mask) {
input_report_key(pwr->idev, KEY_POWER, 1);
pm_wakeup_event(pwr->dev, 0);
} else {
@@ -55,42 +86,55 @@ out:
return IRQ_HANDLED;
}
-static int tps65218_pwron_probe(struct platform_device *pdev)
+static int tps6521x_pb_probe(struct platform_device *pdev)
{
- struct tps65218 *tps = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
- struct tps65218_pwrbutton *pwr;
+ struct tps6521x_pwrbutton *pwr;
struct input_dev *idev;
+ const struct of_device_id *match;
int error;
int irq;
+ match = of_match_node(of_tps6521x_pb_match, pdev->dev.of_node);
+ if (!match)
+ return -ENXIO;
+
pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
if (!pwr)
return -ENOMEM;
+ pwr->data = match->data;
+
idev = devm_input_allocate_device(dev);
if (!idev)
return -ENOMEM;
- idev->name = "tps65218_pwrbutton";
- idev->phys = "tps65218_pwrbutton/input0";
+ idev->name = pwr->data->name;
+ snprintf(pwr->phys, sizeof(pwr->phys), "%s/input0",
+ pwr->data->name);
+ idev->phys = pwr->phys;
idev->dev.parent = dev;
idev->id.bustype = BUS_I2C;
input_set_capability(idev, EV_KEY, KEY_POWER);
- pwr->tps = tps;
+ pwr->regmap = dev_get_regmap(pdev->dev.parent, NULL);
pwr->dev = dev;
pwr->idev = idev;
platform_set_drvdata(pdev, pwr);
device_init_wakeup(dev, true);
irq = platform_get_irq(pdev, 0);
- error = devm_request_threaded_irq(dev, irq, NULL, tps65218_pwr_irq,
+ if (irq < 0) {
+ dev_err(dev, "No IRQ resource!\n");
+ return -EINVAL;
+ }
+
+ error = devm_request_threaded_irq(dev, irq, NULL, tps6521x_pb_irq,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
- "tps65218-pwrbutton", pwr);
+ pwr->data->name, pwr);
if (error) {
dev_err(dev, "failed to request IRQ #%d: %d\n",
irq, error);
@@ -106,21 +150,15 @@ static int tps65218_pwron_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id of_tps65218_pwr_match[] = {
- { .compatible = "ti,tps65218-pwrbutton" },
- { },
-};
-MODULE_DEVICE_TABLE(of, of_tps65218_pwr_match);
-
-static struct platform_driver tps65218_pwron_driver = {
- .probe = tps65218_pwron_probe,
+static struct platform_driver tps6521x_pb_driver = {
+ .probe = tps6521x_pb_probe,
.driver = {
- .name = "tps65218_pwrbutton",
- .of_match_table = of_tps65218_pwr_match,
+ .name = "tps6521x_pwrbutton",
+ .of_match_table = of_tps6521x_pb_match,
},
};
-module_platform_driver(tps65218_pwron_driver);
+module_platform_driver(tps6521x_pb_driver);
-MODULE_DESCRIPTION("TPS65218 Power Button");
+MODULE_DESCRIPTION("TPS6521X Power Button");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 65ebbd111702..92595b98e7ed 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -1013,23 +1013,12 @@ static struct miscdevice uinput_misc = {
.minor = UINPUT_MINOR,
.name = UINPUT_NAME,
};
+module_misc_device(uinput_misc);
+
MODULE_ALIAS_MISCDEV(UINPUT_MINOR);
MODULE_ALIAS("devname:" UINPUT_NAME);
-static int __init uinput_init(void)
-{
- return misc_register(&uinput_misc);
-}
-
-static void __exit uinput_exit(void)
-{
- misc_deregister(&uinput_misc);
-}
-
MODULE_AUTHOR("Aristeu Sergio Rozanski Filho");
MODULE_DESCRIPTION("User level driver support for input subsystem");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.3");
-
-module_init(uinput_init);
-module_exit(uinput_exit);
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index cb6aecbc1dc2..e23b2495d52e 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -222,11 +222,13 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
static int elan_smbus_get_max(struct i2c_client *client,
unsigned int *max_x, unsigned int *max_y)
{
+ int ret;
int error;
u8 val[3];
- error = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
- if (error) {
+ ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
+ if (ret != 3) {
+ error = ret < 0 ? ret : -EIO;
dev_err(&client->dev, "failed to get dimensions: %d\n", error);
return error;
}
@@ -240,12 +242,13 @@ static int elan_smbus_get_max(struct i2c_client *client,
static int elan_smbus_get_resolution(struct i2c_client *client,
u8 *hw_res_x, u8 *hw_res_y)
{
+ int ret;
int error;
u8 val[3];
- error = i2c_smbus_read_block_data(client,
- ETP_SMBUS_RESOLUTION_CMD, val);
- if (error) {
+ ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
+ if (ret != 3) {
+ error = ret < 0 ? ret : -EIO;
dev_err(&client->dev, "failed to get resolution: %d\n", error);
return error;
}
@@ -260,12 +263,13 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
unsigned int *x_traces,
unsigned int *y_traces)
{
+ int ret;
int error;
u8 val[3];
- error = i2c_smbus_read_block_data(client,
- ETP_SMBUS_XY_TRACENUM_CMD, val);
- if (error) {
+ ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
+ if (ret != 3) {
+ error = ret < 0 ? ret : -EIO;
dev_err(&client->dev, "failed to get trace info: %d\n", error);
return error;
}
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index c8c6a8cc329d..54eceb30ede5 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -390,7 +390,8 @@ static int focaltech_read_size(struct psmouse *psmouse)
return 0;
}
-void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution)
+static void focaltech_set_resolution(struct psmouse *psmouse,
+ unsigned int resolution)
{
/* not supported yet */
}
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 5784e20542a4..fb4b185dea96 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1916,7 +1916,7 @@ static int __init psmouse_init(void)
synaptics_module_init();
hgpk_module_init();
- kpsmoused_wq = create_singlethread_workqueue("kpsmoused");
+ kpsmoused_wq = alloc_ordered_workqueue("kpsmoused", 0);
if (!kpsmoused_wq) {
pr_err("failed to create kpsmoused workqueue\n");
return -ENOMEM;
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 9c927d35c1f5..d189843f3727 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -71,10 +71,7 @@ static void serport_serio_close(struct serio *serio)
spin_lock_irqsave(&serport->lock, flags);
clear_bit(SERPORT_ACTIVE, &serport->flags);
- set_bit(SERPORT_DEAD, &serport->flags);
spin_unlock_irqrestore(&serport->lock, flags);
-
- wake_up_interruptible(&serport->wait);
}
/*
@@ -248,6 +245,19 @@ static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
}
#endif
+static int serport_ldisc_hangup(struct tty_struct *tty)
+{
+ struct serport *serport = (struct serport *) tty->disc_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&serport->lock, flags);
+ set_bit(SERPORT_DEAD, &serport->flags);
+ spin_unlock_irqrestore(&serport->lock, flags);
+
+ wake_up_interruptible(&serport->wait);
+ return 0;
+}
+
static void serport_ldisc_write_wakeup(struct tty_struct * tty)
{
struct serport *serport = (struct serport *) tty->disc_data;
@@ -274,6 +284,7 @@ static struct tty_ldisc_ops serport_ldisc = {
.compat_ioctl = serport_ldisc_compat_ioctl,
#endif
.receive_buf = serport_ldisc_receive,
+ .hangup = serport_ldisc_hangup,
.write_wakeup = serport_ldisc_write_wakeup
};
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index 949dacc78664..47de5a81172f 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -40,6 +40,7 @@
#include <linux/input.h>
#include <linux/usb/input.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
/* USB HID defines */
#define USB_REQ_GET_REPORT 0x01
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 2fb1f430a431..507981356921 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -305,19 +305,6 @@ config TOUCHSCREEN_EGALAX_SERIAL
To compile this driver as a module, choose M here: the
module will be called egalax_ts_serial.
-config TOUCHSCREEN_FT6236
- tristate "FT6236 I2C touchscreen"
- depends on I2C
- depends on GPIOLIB || COMPILE_TEST
- help
- Say Y here to enable support for the I2C connected FT6x06 and
- FT6x36 family of capacitive touchscreen drivers.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called ft6236.
-
config TOUCHSCREEN_FUJITSU
tristate "Fujitsu serial touchscreen"
select SERIO
@@ -397,6 +384,18 @@ config TOUCHSCREEN_GUNZE
To compile this driver as a module, choose M here: the
module will be called gunze.
+config TOUCHSCREEN_EKTF2127
+ tristate "Elan eKTF2127 I2C touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have an Elan eKTF2127 touchscreen
+ connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ektf2127.
+
config TOUCHSCREEN_ELAN
tristate "Elan eKTH I2C touchscreen"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index b4373d6be402..81b86451782d 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -32,11 +32,11 @@ obj-$(CONFIG_TOUCHSCREEN_EDT_FT5X06) += edt-ft5x06.o
obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
+obj-$(CONFIG_TOUCHSCREEN_EKTF2127) += ektf2127.o
obj-$(CONFIG_TOUCHSCREEN_ELAN) += elants_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
obj-$(CONFIG_TOUCHSCREEN_EGALAX_SERIAL) += egalax_ts_serial.o
-obj-$(CONFIG_TOUCHSCREEN_FT6236) += ft6236.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 703e295a37ed..28466e358fee 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1063,9 +1063,15 @@ static const struct edt_i2c_chip_data edt_ft5506_data = {
.max_support_points = 10,
};
+static const struct edt_i2c_chip_data edt_ft6236_data = {
+ .max_support_points = 2,
+};
+
static const struct i2c_device_id edt_ft5x06_ts_id[] = {
{ .name = "edt-ft5x06", .driver_data = (long)&edt_ft5x06_data },
{ .name = "edt-ft5506", .driver_data = (long)&edt_ft5506_data },
+ /* Note no edt- prefix for compatibility with the ft6236.c driver */
+ { .name = "ft6236", .driver_data = (long)&edt_ft6236_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, edt_ft5x06_ts_id);
@@ -1076,6 +1082,8 @@ static const struct of_device_id edt_ft5x06_of_match[] = {
{ .compatible = "edt,edt-ft5306", .data = &edt_ft5x06_data },
{ .compatible = "edt,edt-ft5406", .data = &edt_ft5x06_data },
{ .compatible = "edt,edt-ft5506", .data = &edt_ft5506_data },
+ /* Note focaltech vendor prefix for compatibility with ft6236.c */
+ { .compatible = "focaltech,ft6236", .data = &edt_ft6236_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, edt_ft5x06_of_match);
diff --git a/drivers/input/touchscreen/ektf2127.c b/drivers/input/touchscreen/ektf2127.c
new file mode 100644
index 000000000000..0ed34ff787ce
--- /dev/null
+++ b/drivers/input/touchscreen/ektf2127.c
@@ -0,0 +1,336 @@
+/*
+ * Driver for ELAN eKTF2127 i2c touchscreen controller
+ *
+ * For this driver the layout of the Chipone icn8318 i2c
+ * touchscreencontroller is used.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author:
+ * Michel Verlaan <michel.verl@gmail.com>
+ * Siebren Vroegindeweij <siebren.vroegindeweij@hotmail.com>
+ *
+ * Original chipone_icn8318 driver:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+
+/* Packet header defines (first byte of data send / received) */
+#define EKTF2127_NOISE 0x40
+#define EKTF2127_RESPONSE 0x52
+#define EKTF2127_REQUEST 0x53
+#define EKTF2127_HELLO 0x55
+#define EKTF2127_REPORT 0x5d
+#define EKTF2127_CALIB_DONE 0x66
+
+/* Register defines (second byte of data send / received) */
+#define EKTF2127_ENV_NOISY 0x41
+#define EKTF2127_HEIGHT 0x60
+#define EKTF2127_WIDTH 0x63
+
+/* 2 bytes header + 5 * 3 bytes coordinates + 3 bytes pressure info + footer */
+#define EKTF2127_TOUCH_REPORT_SIZE 21
+#define EKTF2127_MAX_TOUCHES 5
+
+struct ektf2127_ts {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct gpio_desc *power_gpios;
+ struct touchscreen_properties prop;
+};
+
+static void ektf2127_parse_coordinates(const u8* buf, unsigned int touch_count,
+ struct input_mt_pos *touches)
+{
+ int index = 0;
+ int i;
+
+ for (i = 0; i < touch_count; i++) {
+ index = 2 + i * 3;
+
+ touches[i].x = (buf[index] & 0x0f);
+ touches[i].x <<= 8;
+ touches[i].x |= buf[index + 2];
+
+ touches[i].y = (buf[index] & 0xf0);
+ touches[i].y <<= 4;
+ touches[i].y |= buf[index + 1];
+ }
+}
+
+static void ektf2127_report_event(struct ektf2127_ts *ts, const u8 *buf)
+{
+ struct input_mt_pos touches[EKTF2127_MAX_TOUCHES];
+ int slots[EKTF2127_MAX_TOUCHES];
+ unsigned int touch_count, i;
+
+ touch_count = buf[1] & 0x07;
+ if (touch_count > EKTF2127_MAX_TOUCHES) {
+ dev_err(&ts->client->dev,
+ "Too many touches %d > %d\n",
+ touch_count, EKTF2127_MAX_TOUCHES);
+ touch_count = EKTF2127_MAX_TOUCHES;
+ }
+
+ ektf2127_parse_coordinates(buf, touch_count, touches);
+ input_mt_assign_slots(ts->input, slots, touches,
+ touch_count, 0);
+
+ for (i = 0; i < touch_count; i++) {
+ input_mt_slot(ts->input, slots[i]);
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, true);
+ touchscreen_report_pos(ts->input, &ts->prop,
+ touches[i].x, touches[i].y, true);
+ }
+
+ input_mt_sync_frame(ts->input);
+ input_sync(ts->input);
+}
+
+static irqreturn_t ektf2127_irq(int irq, void *dev_id)
+{
+ struct ektf2127_ts *ts = dev_id;
+ struct device *dev = &ts->client->dev;
+ char buf[EKTF2127_TOUCH_REPORT_SIZE];
+ int ret;
+
+ ret = i2c_master_recv(ts->client, buf, EKTF2127_TOUCH_REPORT_SIZE);
+ if (ret != EKTF2127_TOUCH_REPORT_SIZE) {
+ dev_err(dev, "Error reading touch data: %d\n", ret);
+ goto out;
+ }
+
+ switch (buf[0]) {
+ case EKTF2127_REPORT:
+ ektf2127_report_event(ts, buf);
+ break;
+
+ case EKTF2127_NOISE:
+ if (buf[1] == EKTF2127_ENV_NOISY)
+ dev_dbg(dev, "Environment is electrically noisy\n");
+ break;
+
+ case EKTF2127_HELLO:
+ case EKTF2127_CALIB_DONE:
+ break;
+
+ default:
+ dev_err(dev, "Unexpected packet header byte %#02x\n", buf[0]);
+ break;
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int ektf2127_start(struct input_dev *dev)
+{
+ struct ektf2127_ts *ts = input_get_drvdata(dev);
+
+ enable_irq(ts->client->irq);
+ gpiod_set_value_cansleep(ts->power_gpios, 1);
+
+ return 0;
+}
+
+static void ektf2127_stop(struct input_dev *dev)
+{
+ struct ektf2127_ts *ts = input_get_drvdata(dev);
+
+ disable_irq(ts->client->irq);
+ gpiod_set_value_cansleep(ts->power_gpios, 0);
+}
+
+static int __maybe_unused ektf2127_suspend(struct device *dev)
+{
+ struct ektf2127_ts *ts = i2c_get_clientdata(to_i2c_client(dev));
+
+ mutex_lock(&ts->input->mutex);
+ if (ts->input->users)
+ ektf2127_stop(ts->input);
+ mutex_unlock(&ts->input->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused ektf2127_resume(struct device *dev)
+{
+ struct ektf2127_ts *ts = i2c_get_clientdata(to_i2c_client(dev));
+
+ mutex_lock(&ts->input->mutex);
+ if (ts->input->users)
+ ektf2127_start(ts->input);
+ mutex_unlock(&ts->input->mutex);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ektf2127_pm_ops, ektf2127_suspend,
+ ektf2127_resume);
+
+static int ektf2127_query_dimension(struct i2c_client *client, bool width)
+{
+ struct device *dev = &client->dev;
+ const char *what = width ? "width" : "height";
+ u8 what_code = width ? EKTF2127_WIDTH : EKTF2127_HEIGHT;
+ u8 buf[4];
+ int ret;
+ int error;
+
+ /* Request dimension */
+ buf[0] = EKTF2127_REQUEST;
+ buf[1] = width ? EKTF2127_WIDTH : EKTF2127_HEIGHT;
+ buf[2] = 0x00;
+ buf[3] = 0x00;
+ ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(dev, "Failed to request %s: %d\n", what, error);
+ return error;
+ }
+
+ msleep(20);
+
+ /* Read response */
+ ret = i2c_master_recv(client, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(dev, "Failed to receive %s data: %d\n", what, error);
+ return error;
+ }
+
+ if (buf[0] != EKTF2127_RESPONSE || buf[1] != what_code) {
+ dev_err(dev, "Unexpected %s data: %#02x %#02x\n",
+ what, buf[0], buf[1]);
+ return -EIO;
+ }
+
+ return (((buf[3] & 0xf0) << 4) | buf[2]) - 1;
+}
+
+static int ektf2127_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ektf2127_ts *ts;
+ struct input_dev *input;
+ u8 buf[4];
+ int max_x, max_y;
+ int error;
+
+ if (!client->irq) {
+ dev_err(dev, "Error no irq specified\n");
+ return -EINVAL;
+ }
+
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ /* This requests the gpio *and* turns on the touchscreen controller */
+ ts->power_gpios = devm_gpiod_get(dev, "power", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->power_gpios)) {
+ error = PTR_ERR(ts->power_gpios);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "Error getting power gpio: %d\n", error);
+ return error;
+ }
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = client->name;
+ input->id.bustype = BUS_I2C;
+ input->open = ektf2127_start;
+ input->close = ektf2127_stop;
+
+ ts->client = client;
+
+ /* Read hello (ignore result, depends on initial power state) */
+ msleep(20);
+ i2c_master_recv(ts->client, buf, sizeof(buf));
+
+ /* Read resolution from chip */
+ max_x = ektf2127_query_dimension(client, true);
+ if (max_x < 0)
+ return max_x;
+
+ max_y = ektf2127_query_dimension(client, false);
+ if (max_y < 0)
+ return max_y;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
+ touchscreen_parse_properties(input, true, &ts->prop);
+
+ error = input_mt_init_slots(input, EKTF2127_MAX_TOUCHES,
+ INPUT_MT_DIRECT |
+ INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK);
+ if (error)
+ return error;
+
+ ts->input = input;
+ input_set_drvdata(input, ts);
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, ektf2127_irq,
+ IRQF_ONESHOT, client->name, ts);
+ if (error) {
+ dev_err(dev, "Error requesting irq: %d\n", error);
+ return error;
+ }
+
+ /* Stop device till opened */
+ ektf2127_stop(ts->input);
+
+ error = input_register_device(input);
+ if (error)
+ return error;
+
+ i2c_set_clientdata(client, ts);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ektf2127_of_match[] = {
+ { .compatible = "elan,ektf2127" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ektf2127_of_match);
+#endif
+
+static const struct i2c_device_id ektf2127_i2c_id[] = {
+ { "ektf2127", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ektf2127_i2c_id);
+
+static struct i2c_driver ektf2127_driver = {
+ .driver = {
+ .name = "elan_ektf2127",
+ .pm = &ektf2127_pm_ops,
+ .of_match_table = of_match_ptr(ektf2127_of_match),
+ },
+ .probe = ektf2127_probe,
+ .id_table = ektf2127_i2c_id,
+};
+module_i2c_driver(ektf2127_driver);
+
+MODULE_DESCRIPTION("ELAN eKTF2127 I2C Touchscreen Driver");
+MODULE_AUTHOR("Michel Verlaan, Siebren Vroegindeweij");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index ac09855fa435..02aec284deca 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -298,7 +298,7 @@ static u16 elants_i2c_parse_version(u8 *buf)
return get_unaligned_be32(buf) >> 4;
}
-static int elants_i2c_query_fw_id(struct elants_data *ts)
+static int elants_i2c_query_hw_version(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
int error, retry_cnt;
@@ -318,8 +318,13 @@ static int elants_i2c_query_fw_id(struct elants_data *ts)
error, (int)sizeof(resp), resp);
}
- dev_err(&client->dev,
- "Failed to read fw id or fw id is invalid\n");
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read fw id: %d\n", error);
+ return error;
+ }
+
+ dev_err(&client->dev, "Invalid fw id: %#04x\n", ts->hw_version);
return -EINVAL;
}
@@ -508,7 +513,7 @@ static int elants_i2c_fastboot(struct i2c_client *client)
static int elants_i2c_initialize(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
- int error, retry_cnt;
+ int error, error2, retry_cnt;
const u8 hello_packet[] = { 0x55, 0x55, 0x55, 0x55 };
const u8 recov_packet[] = { 0x55, 0x55, 0x80, 0x80 };
u8 buf[HEADER_SIZE];
@@ -553,18 +558,22 @@ static int elants_i2c_initialize(struct elants_data *ts)
}
}
+ /* hw version is available even if device in recovery state */
+ error2 = elants_i2c_query_hw_version(ts);
if (!error)
- error = elants_i2c_query_fw_id(ts);
+ error = error2;
+
if (!error)
error = elants_i2c_query_fw_version(ts);
+ if (!error)
+ error = elants_i2c_query_test_version(ts);
+ if (!error)
+ error = elants_i2c_query_bc_version(ts);
+ if (!error)
+ error = elants_i2c_query_ts_info(ts);
- if (error) {
+ if (error)
ts->iap_mode = ELAN_IAP_RECOVERY;
- } else {
- elants_i2c_query_test_version(ts);
- elants_i2c_query_bc_version(ts);
- elants_i2c_query_ts_info(ts);
- }
return 0;
}
diff --git a/drivers/input/touchscreen/ft6236.c b/drivers/input/touchscreen/ft6236.c
deleted file mode 100644
index d240d2e212bd..000000000000
--- a/drivers/input/touchscreen/ft6236.c
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * FocalTech FT6236 TouchScreen driver.
- *
- * Copyright (c) 2010 Focal tech Ltd.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/i2c.h>
-#include <linux/input.h>
-#include <linux/input/mt.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/property.h>
-
-#define FT6236_MAX_TOUCH_POINTS 2
-
-#define FT6236_REG_TH_GROUP 0x80
-#define FT6236_REG_PERIODACTIVE 0x88
-#define FT6236_REG_LIB_VER_H 0xa1
-#define FT6236_REG_LIB_VER_L 0xa2
-#define FT6236_REG_CIPHER 0xa3
-#define FT6236_REG_FIRMID 0xa6
-#define FT6236_REG_FOCALTECH_ID 0xa8
-#define FT6236_REG_RELEASE_CODE_ID 0xaf
-
-#define FT6236_EVENT_PRESS_DOWN 0
-#define FT6236_EVENT_LIFT_UP 1
-#define FT6236_EVENT_CONTACT 2
-#define FT6236_EVENT_NO_EVENT 3
-
-struct ft6236_data {
- struct i2c_client *client;
- struct input_dev *input;
- struct gpio_desc *reset_gpio;
- u32 max_x;
- u32 max_y;
- bool invert_x;
- bool invert_y;
- bool swap_xy;
-};
-
-/*
- * This struct is a touchpoint as stored in hardware. Note that the id,
- * as well as the event, are stored in the upper nybble of the hi byte.
- */
-struct ft6236_touchpoint {
- union {
- u8 xhi;
- u8 event;
- };
- u8 xlo;
- union {
- u8 yhi;
- u8 id;
- };
- u8 ylo;
- u8 weight;
- u8 misc;
-} __packed;
-
-/* This packet represents the register map as read from offset 0 */
-struct ft6236_packet {
- u8 dev_mode;
- u8 gest_id;
- u8 touches;
- struct ft6236_touchpoint points[FT6236_MAX_TOUCH_POINTS];
-} __packed;
-
-static int ft6236_read(struct i2c_client *client, u8 reg, u8 len, void *data)
-{
- int error;
-
- error = i2c_smbus_read_i2c_block_data(client, reg, len, data);
- if (error < 0)
- return error;
-
- if (error != len)
- return -EIO;
-
- return 0;
-}
-
-static irqreturn_t ft6236_interrupt(int irq, void *dev_id)
-{
- struct ft6236_data *ft6236 = dev_id;
- struct device *dev = &ft6236->client->dev;
- struct input_dev *input = ft6236->input;
- struct ft6236_packet buf;
- u8 touches;
- int i, error;
-
- error = ft6236_read(ft6236->client, 0, sizeof(buf), &buf);
- if (error) {
- dev_err(dev, "read touchdata failed %d\n", error);
- return IRQ_HANDLED;
- }
-
- touches = buf.touches & 0xf;
- if (touches > FT6236_MAX_TOUCH_POINTS) {
- dev_dbg(dev,
- "%d touch points reported, only %d are supported\n",
- touches, FT6236_MAX_TOUCH_POINTS);
- touches = FT6236_MAX_TOUCH_POINTS;
- }
-
- for (i = 0; i < touches; i++) {
- struct ft6236_touchpoint *point = &buf.points[i];
- u16 x = ((point->xhi & 0xf) << 8) | buf.points[i].xlo;
- u16 y = ((point->yhi & 0xf) << 8) | buf.points[i].ylo;
- u8 event = point->event >> 6;
- u8 id = point->id >> 4;
- bool act = (event == FT6236_EVENT_PRESS_DOWN ||
- event == FT6236_EVENT_CONTACT);
-
- input_mt_slot(input, id);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, act);
- if (!act)
- continue;
-
- if (ft6236->invert_x)
- x = ft6236->max_x - x;
-
- if (ft6236->invert_y)
- y = ft6236->max_y - y;
-
- if (ft6236->swap_xy) {
- input_report_abs(input, ABS_MT_POSITION_X, y);
- input_report_abs(input, ABS_MT_POSITION_Y, x);
- } else {
- input_report_abs(input, ABS_MT_POSITION_X, x);
- input_report_abs(input, ABS_MT_POSITION_Y, y);
- }
- }
-
- input_mt_sync_frame(input);
- input_sync(input);
-
- return IRQ_HANDLED;
-}
-
-static u8 ft6236_debug_read_byte(struct ft6236_data *ft6236, u8 reg)
-{
- struct i2c_client *client = ft6236->client;
- u8 val = 0;
- int error;
-
- error = ft6236_read(client, reg, 1, &val);
- if (error)
- dev_dbg(&client->dev,
- "error reading register 0x%02x: %d\n", reg, error);
-
- return val;
-}
-
-static void ft6236_debug_info(struct ft6236_data *ft6236)
-{
- struct device *dev = &ft6236->client->dev;
-
- dev_dbg(dev, "Touch threshold is %d\n",
- ft6236_debug_read_byte(ft6236, FT6236_REG_TH_GROUP) * 4);
- dev_dbg(dev, "Report rate is %dHz\n",
- ft6236_debug_read_byte(ft6236, FT6236_REG_PERIODACTIVE) * 10);
- dev_dbg(dev, "Firmware library version 0x%02x%02x\n",
- ft6236_debug_read_byte(ft6236, FT6236_REG_LIB_VER_H),
- ft6236_debug_read_byte(ft6236, FT6236_REG_LIB_VER_L));
- dev_dbg(dev, "Firmware version 0x%02x\n",
- ft6236_debug_read_byte(ft6236, FT6236_REG_FIRMID));
- dev_dbg(dev, "Chip vendor ID 0x%02x\n",
- ft6236_debug_read_byte(ft6236, FT6236_REG_CIPHER));
- dev_dbg(dev, "CTPM vendor ID 0x%02x\n",
- ft6236_debug_read_byte(ft6236, FT6236_REG_FOCALTECH_ID));
- dev_dbg(dev, "Release code version 0x%02x\n",
- ft6236_debug_read_byte(ft6236, FT6236_REG_RELEASE_CODE_ID));
-}
-
-static void ft6236_reset(struct ft6236_data *ft6236)
-{
- if (!ft6236->reset_gpio)
- return;
-
- gpiod_set_value_cansleep(ft6236->reset_gpio, 1);
- usleep_range(5000, 20000);
- gpiod_set_value_cansleep(ft6236->reset_gpio, 0);
- msleep(300);
-}
-
-static int ft6236_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct device *dev = &client->dev;
- struct ft6236_data *ft6236;
- struct input_dev *input;
- u32 fuzz_x = 0, fuzz_y = 0;
- u8 val;
- int error;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- return -ENXIO;
-
- if (!client->irq) {
- dev_err(dev, "irq is missing\n");
- return -EINVAL;
- }
-
- ft6236 = devm_kzalloc(dev, sizeof(*ft6236), GFP_KERNEL);
- if (!ft6236)
- return -ENOMEM;
-
- ft6236->client = client;
- ft6236->reset_gpio = devm_gpiod_get_optional(dev, "reset",
- GPIOD_OUT_LOW);
- if (IS_ERR(ft6236->reset_gpio)) {
- error = PTR_ERR(ft6236->reset_gpio);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "error getting reset gpio: %d\n", error);
- return error;
- }
-
- ft6236_reset(ft6236);
-
- /* verify that the controller is present */
- error = ft6236_read(client, 0x00, 1, &val);
- if (error) {
- dev_err(dev, "failed to read from controller: %d\n", error);
- return error;
- }
-
- ft6236_debug_info(ft6236);
-
- input = devm_input_allocate_device(dev);
- if (!input)
- return -ENOMEM;
-
- ft6236->input = input;
- input->name = client->name;
- input->id.bustype = BUS_I2C;
-
- if (device_property_read_u32(dev, "touchscreen-size-x",
- &ft6236->max_x) ||
- device_property_read_u32(dev, "touchscreen-size-y",
- &ft6236->max_y)) {
- dev_err(dev, "touchscreen-size-x and/or -y missing\n");
- return -EINVAL;
- }
-
- device_property_read_u32(dev, "touchscreen-fuzz-x", &fuzz_x);
- device_property_read_u32(dev, "touchscreen-fuzz-y", &fuzz_y);
- ft6236->invert_x = device_property_read_bool(dev,
- "touchscreen-inverted-x");
- ft6236->invert_y = device_property_read_bool(dev,
- "touchscreen-inverted-y");
- ft6236->swap_xy = device_property_read_bool(dev,
- "touchscreen-swapped-x-y");
-
- if (ft6236->swap_xy) {
- input_set_abs_params(input, ABS_MT_POSITION_X, 0,
- ft6236->max_y, fuzz_y, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0,
- ft6236->max_x, fuzz_x, 0);
- } else {
- input_set_abs_params(input, ABS_MT_POSITION_X, 0,
- ft6236->max_x, fuzz_x, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0,
- ft6236->max_y, fuzz_y, 0);
- }
-
- error = input_mt_init_slots(input, FT6236_MAX_TOUCH_POINTS,
- INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
- if (error)
- return error;
-
- error = devm_request_threaded_irq(dev, client->irq, NULL,
- ft6236_interrupt, IRQF_ONESHOT,
- client->name, ft6236);
- if (error) {
- dev_err(dev, "request irq %d failed: %d\n", client->irq, error);
- return error;
- }
-
- error = input_register_device(input);
- if (error) {
- dev_err(dev, "failed to register input device: %d\n", error);
- return error;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id ft6236_of_match[] = {
- { .compatible = "focaltech,ft6236", },
- { }
-};
-MODULE_DEVICE_TABLE(of, ft6236_of_match);
-#endif
-
-static const struct i2c_device_id ft6236_id[] = {
- { "ft6236", },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ft6236_id);
-
-static struct i2c_driver ft6236_driver = {
- .driver = {
- .name = "ft6236",
- .of_match_table = of_match_ptr(ft6236_of_match),
- },
- .probe = ft6236_probe,
- .id_table = ft6236_id,
-};
-module_i2c_driver(ft6236_driver);
-
-MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
-MODULE_AUTHOR("Noralf Trønnes <noralf@tronnes.org>");
-MODULE_DESCRIPTION("FocalTech FT6236 TouchScreen driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index ea3b6a5b83e6..729b3c89324c 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -13,6 +13,7 @@
* HP Jornada 710/720/729 Touchscreen Driver
*/
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -20,9 +21,7 @@
#include <linux/slab.h>
#include <linux/io.h>
-#include <mach/hardware.h>
#include <mach/jornada720.h>
-#include <mach/irqs.h>
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 touchscreen driver");
@@ -30,6 +29,7 @@ MODULE_LICENSE("GPL v2");
struct jornada_ts {
struct input_dev *dev;
+ struct gpio_desc *gpio;
int x_data[4]; /* X sample values */
int y_data[4]; /* Y sample values */
};
@@ -71,8 +71,8 @@ static irqreturn_t jornada720_ts_interrupt(int irq, void *dev_id)
struct input_dev *input = jornada_ts->dev;
int x, y;
- /* If GPIO_GPIO9 is set to high then report pen up */
- if (GPLR & GPIO_GPIO(9)) {
+ /* If gpio is high then report pen up */
+ if (gpiod_get_value(jornada_ts->gpio)) {
input_report_key(input, BTN_TOUCH, 0);
input_sync(input);
} else {
@@ -101,7 +101,7 @@ static int jornada720_ts_probe(struct platform_device *pdev)
{
struct jornada_ts *jornada_ts;
struct input_dev *input_dev;
- int error;
+ int error, irq;
jornada_ts = devm_kzalloc(&pdev->dev, sizeof(*jornada_ts), GFP_KERNEL);
if (!jornada_ts)
@@ -113,6 +113,14 @@ static int jornada720_ts_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, jornada_ts);
+ jornada_ts->gpio = devm_gpiod_get(&pdev->dev, "penup", GPIOD_IN);
+ if (IS_ERR(jornada_ts->gpio))
+ return PTR_ERR(jornada_ts->gpio);
+
+ irq = gpiod_to_irq(jornada_ts->gpio);
+ if (irq <= 0)
+ return irq < 0 ? irq : -EINVAL;
+
jornada_ts->dev = input_dev;
input_dev->name = "HP Jornada 7xx Touchscreen";
@@ -125,8 +133,7 @@ static int jornada720_ts_probe(struct platform_device *pdev)
input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0);
- error = devm_request_irq(&pdev->dev, IRQ_GPIO9,
- jornada720_ts_interrupt,
+ error = devm_request_irq(&pdev->dev, irq, jornada720_ts_interrupt,
IRQF_TRIGGER_RISING,
"HP7XX Touchscreen driver", pdev);
if (error) {
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
index 913e25a994b4..ef64f36c5ffc 100644
--- a/drivers/input/touchscreen/mc13783_ts.c
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -37,7 +37,6 @@ struct mc13783_ts_priv {
struct input_dev *idev;
struct mc13xxx *mc13xxx;
struct delayed_work work;
- struct workqueue_struct *workq;
unsigned int sample[4];
struct mc13xxx_ts_platform_data *touch;
};
@@ -54,7 +53,7 @@ static irqreturn_t mc13783_ts_handler(int irq, void *data)
* be rescheduled for immediate execution here. However the rearm
* delay is HZ / 50 which is acceptable.
*/
- queue_delayed_work(priv->workq, &priv->work, 0);
+ schedule_delayed_work(&priv->work, 0);
return IRQ_HANDLED;
}
@@ -106,16 +105,18 @@ static void mc13783_ts_report_sample(struct mc13783_ts_priv *priv)
dev_dbg(&idev->dev, "report (%d, %d, %d)\n",
x1, y1, 0x1000 - cr0);
- queue_delayed_work(priv->workq, &priv->work, HZ / 50);
- } else
+ schedule_delayed_work(&priv->work, HZ / 50);
+ } else {
dev_dbg(&idev->dev, "report release\n");
+ }
input_report_abs(idev, ABS_PRESSURE,
cr0 ? 0x1000 - cr0 : cr0);
input_report_key(idev, BTN_TOUCH, cr0);
input_sync(idev);
- } else
+ } else {
dev_dbg(&idev->dev, "discard event\n");
+ }
}
static void mc13783_ts_work(struct work_struct *work)
@@ -189,14 +190,6 @@ static int __init mc13783_ts_probe(struct platform_device *pdev)
goto err_free_mem;
}
- /*
- * We need separate workqueue because mc13783_adc_do_conversion
- * uses keventd and thus would deadlock.
- */
- priv->workq = create_singlethread_workqueue("mc13783_ts");
- if (!priv->workq)
- goto err_free_mem;
-
idev->name = MC13783_TS_NAME;
idev->dev.parent = &pdev->dev;
@@ -215,14 +208,12 @@ static int __init mc13783_ts_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"register input device failed with %d\n", ret);
- goto err_destroy_wq;
+ goto err_free_mem;
}
platform_set_drvdata(pdev, priv);
return 0;
-err_destroy_wq:
- destroy_workqueue(priv->workq);
err_free_mem:
input_free_device(idev);
kfree(priv);
@@ -233,7 +224,6 @@ static int mc13783_ts_remove(struct platform_device *pdev)
{
struct mc13783_ts_priv *priv = platform_get_drvdata(pdev);
- destroy_workqueue(priv->workq);
input_unregister_device(priv->idev);
kfree(priv);
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index d159e14f4d20..3bb0637d832e 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/delay.h>
@@ -404,7 +400,6 @@ static int __maybe_unused pixcir_i2c_ts_resume(struct device *dev)
mutex_lock(&input->mutex);
if (device_may_wakeup(&client->dev)) {
-
if (!input->users) {
ret = pixcir_stop(ts);
if (ret) {
@@ -431,13 +426,7 @@ static const struct of_device_id pixcir_of_match[];
static int pixcir_parse_dt(struct device *dev,
struct pixcir_i2c_ts_data *tsdata)
{
- const struct of_device_id *match;
-
- match = of_match_device(of_match_ptr(pixcir_of_match), dev);
- if (!match)
- return -EINVAL;
-
- tsdata->chip = (const struct pixcir_i2c_chip_data *)match->data;
+ tsdata->chip = of_device_get_match_data(dev);
if (!tsdata->chip)
return -EINVAL;
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index 73861ad22df4..a9132603ab34 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -23,7 +23,7 @@
#include <asm/unaligned.h>
#define WDT87XX_NAME "wdt87xx_i2c"
-#define WDT87XX_DRV_VER "0.9.7"
+#define WDT87XX_DRV_VER "0.9.8"
#define WDT87XX_FW_NAME "wdt87xx_fw.bin"
#define WDT87XX_CFG_NAME "wdt87xx_cfg.bin"
@@ -157,6 +157,7 @@
/* Controller requires minimum 300us between commands */
#define WDT_COMMAND_DELAY_MS 2
#define WDT_FLASH_WRITE_DELAY_MS 4
+#define WDT_FLASH_ERASE_DELAY_MS 200
#define WDT_FW_RESET_TIME 2500
struct wdt87xx_sys_param {
@@ -726,7 +727,7 @@ static int wdt87xx_write_firmware(struct i2c_client *client, const void *chunk)
break;
}
- msleep(50);
+ msleep(WDT_FLASH_ERASE_DELAY_MS);
error = wdt87xx_write_data(client, data, start_addr,
page_size);
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 1534e9b0788c..90d6be3c26cc 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -500,7 +500,7 @@ static int wm97xx_ts_input_open(struct input_dev *idev)
{
struct wm97xx *wm = input_get_drvdata(idev);
- wm->ts_workq = create_singlethread_workqueue("kwm97xx");
+ wm->ts_workq = alloc_ordered_workqueue("kwm97xx", 0);
if (wm->ts_workq == NULL) {
dev_err(wm->dev,
"Failed to create workqueue\n");
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4025291ea0ae..58fa8cc0262b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -137,6 +137,7 @@ struct iommu_dev_data {
bool pri_tlp; /* PASID TLB required for
PPR completions */
u32 errata; /* Bitmap for errata to apply */
+ bool use_vapic; /* Enable device to use vapic mode */
};
/*
@@ -707,14 +708,74 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
}
}
+#ifdef CONFIG_IRQ_REMAP
+static int (*iommu_ga_log_notifier)(u32);
+
+int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
+{
+ iommu_ga_log_notifier = notifier;
+
+ return 0;
+}
+EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
+
+static void iommu_poll_ga_log(struct amd_iommu *iommu)
+{
+ u32 head, tail, cnt = 0;
+
+ if (iommu->ga_log == NULL)
+ return;
+
+ head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
+
+ while (head != tail) {
+ volatile u64 *raw;
+ u64 log_entry;
+
+ raw = (u64 *)(iommu->ga_log + head);
+ cnt++;
+
+ /* Avoid memcpy function-call overhead */
+ log_entry = *raw;
+
+ /* Update head pointer of hardware ring-buffer */
+ head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
+ writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+
+ /* Handle GA entry */
+ switch (GA_REQ_TYPE(log_entry)) {
+ case GA_GUEST_NR:
+ if (!iommu_ga_log_notifier)
+ break;
+
+ pr_debug("AMD-Vi: %s: devid=%#x, ga_tag=%#x\n",
+ __func__, GA_DEVID(log_entry),
+ GA_TAG(log_entry));
+
+ if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
+ pr_err("AMD-Vi: GA log notifier failed.\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
+#endif /* CONFIG_IRQ_REMAP */
+
+#define AMD_IOMMU_INT_MASK \
+ (MMIO_STATUS_EVT_INT_MASK | \
+ MMIO_STATUS_PPR_INT_MASK | \
+ MMIO_STATUS_GALOG_INT_MASK)
+
irqreturn_t amd_iommu_int_thread(int irq, void *data)
{
struct amd_iommu *iommu = (struct amd_iommu *) data;
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
- /* Enable EVT and PPR interrupts again */
- writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
+ while (status & AMD_IOMMU_INT_MASK) {
+ /* Enable EVT and PPR and GA interrupts again */
+ writel(AMD_IOMMU_INT_MASK,
iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & MMIO_STATUS_EVT_INT_MASK) {
@@ -727,6 +788,13 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
iommu_poll_ppr_log(iommu);
}
+#ifdef CONFIG_IRQ_REMAP
+ if (status & MMIO_STATUS_GALOG_INT_MASK) {
+ pr_devel("AMD-Vi: Processing IOMMU GA Log\n");
+ iommu_poll_ga_log(iommu);
+ }
+#endif
+
/*
* Hardware bug: ERBT1312
* When re-enabling interrupt (by writing 1
@@ -2967,6 +3035,12 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
if (!iommu)
return;
+#ifdef CONFIG_IRQ_REMAP
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ (dom->type == IOMMU_DOMAIN_UNMANAGED))
+ dev_data->use_vapic = 0;
+#endif
+
iommu_completion_wait(iommu);
}
@@ -2992,6 +3066,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
ret = attach_device(dev, domain);
+#ifdef CONFIG_IRQ_REMAP
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
+ if (dom->type == IOMMU_DOMAIN_UNMANAGED)
+ dev_data->use_vapic = 1;
+ else
+ dev_data->use_vapic = 0;
+ }
+#endif
+
iommu_completion_wait(iommu);
return ret;
@@ -3530,34 +3613,6 @@ EXPORT_SYMBOL(amd_iommu_device_info);
*
*****************************************************************************/
-union irte {
- u32 val;
- struct {
- u32 valid : 1,
- no_fault : 1,
- int_type : 3,
- rq_eoi : 1,
- dm : 1,
- rsvd_1 : 1,
- destination : 8,
- vector : 8,
- rsvd_2 : 8;
- } fields;
-};
-
-struct irq_2_irte {
- u16 devid; /* Device ID for IRTE table */
- u16 index; /* Index into IRTE table*/
-};
-
-struct amd_ir_data {
- struct irq_2_irte irq_2_irte;
- union irte irte_entry;
- union {
- struct msi_msg msi_entry;
- };
-};
-
static struct irq_chip amd_ir_chip;
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
@@ -3579,8 +3634,6 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
amd_iommu_dev_table[devid].data[2] = dte;
}
-#define IRTE_ALLOCATED (~1U)
-
static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
{
struct irq_remap_table *table = NULL;
@@ -3626,13 +3679,18 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
goto out;
}
- memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ memset(table->table, 0,
+ MAX_IRQS_PER_TABLE * sizeof(u32));
+ else
+ memset(table->table, 0,
+ (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
if (ioapic) {
int i;
for (i = 0; i < 32; ++i)
- table->table[i] = IRTE_ALLOCATED;
+ iommu->irte_ops->set_allocated(table, i);
}
irq_lookup_table[devid] = table;
@@ -3658,6 +3716,10 @@ static int alloc_irq_index(u16 devid, int count)
struct irq_remap_table *table;
unsigned long flags;
int index, c;
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+ if (!iommu)
+ return -ENODEV;
table = get_irq_table(devid, false);
if (!table)
@@ -3669,14 +3731,14 @@ static int alloc_irq_index(u16 devid, int count)
for (c = 0, index = table->min_index;
index < MAX_IRQS_PER_TABLE;
++index) {
- if (table->table[index] == 0)
+ if (!iommu->irte_ops->is_allocated(table, index))
c += 1;
else
c = 0;
if (c == count) {
for (; c != 0; --c)
- table->table[index - c + 1] = IRTE_ALLOCATED;
+ iommu->irte_ops->set_allocated(table, index - c + 1);
index -= count - 1;
goto out;
@@ -3691,7 +3753,42 @@ out:
return index;
}
-static int modify_irte(u16 devid, int index, union irte irte)
+static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
+ struct amd_ir_data *data)
+{
+ struct irq_remap_table *table;
+ struct amd_iommu *iommu;
+ unsigned long flags;
+ struct irte_ga *entry;
+
+ iommu = amd_iommu_rlookup_table[devid];
+ if (iommu == NULL)
+ return -EINVAL;
+
+ table = get_irq_table(devid, false);
+ if (!table)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&table->lock, flags);
+
+ entry = (struct irte_ga *)table->table;
+ entry = &entry[index];
+ entry->lo.fields_remap.valid = 0;
+ entry->hi.val = irte->hi.val;
+ entry->lo.val = irte->lo.val;
+ entry->lo.fields_remap.valid = 1;
+ if (data)
+ data->ref = entry;
+
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ iommu_flush_irt(iommu, devid);
+ iommu_completion_wait(iommu);
+
+ return 0;
+}
+
+static int modify_irte(u16 devid, int index, union irte *irte)
{
struct irq_remap_table *table;
struct amd_iommu *iommu;
@@ -3706,7 +3803,7 @@ static int modify_irte(u16 devid, int index, union irte irte)
return -ENOMEM;
spin_lock_irqsave(&table->lock, flags);
- table->table[index] = irte.val;
+ table->table[index] = irte->val;
spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
@@ -3730,13 +3827,146 @@ static void free_irte(u16 devid, int index)
return;
spin_lock_irqsave(&table->lock, flags);
- table->table[index] = 0;
+ iommu->irte_ops->clear_allocated(table, index);
spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
}
+static void irte_prepare(void *entry,
+ u32 delivery_mode, u32 dest_mode,
+ u8 vector, u32 dest_apicid, int devid)
+{
+ union irte *irte = (union irte *) entry;
+
+ irte->val = 0;
+ irte->fields.vector = vector;
+ irte->fields.int_type = delivery_mode;
+ irte->fields.destination = dest_apicid;
+ irte->fields.dm = dest_mode;
+ irte->fields.valid = 1;
+}
+
+static void irte_ga_prepare(void *entry,
+ u32 delivery_mode, u32 dest_mode,
+ u8 vector, u32 dest_apicid, int devid)
+{
+ struct irte_ga *irte = (struct irte_ga *) entry;
+ struct iommu_dev_data *dev_data = search_dev_data(devid);
+
+ irte->lo.val = 0;
+ irte->hi.val = 0;
+ irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0;
+ irte->lo.fields_remap.int_type = delivery_mode;
+ irte->lo.fields_remap.dm = dest_mode;
+ irte->hi.fields.vector = vector;
+ irte->lo.fields_remap.destination = dest_apicid;
+ irte->lo.fields_remap.valid = 1;
+}
+
+static void irte_activate(void *entry, u16 devid, u16 index)
+{
+ union irte *irte = (union irte *) entry;
+
+ irte->fields.valid = 1;
+ modify_irte(devid, index, irte);
+}
+
+static void irte_ga_activate(void *entry, u16 devid, u16 index)
+{
+ struct irte_ga *irte = (struct irte_ga *) entry;
+
+ irte->lo.fields_remap.valid = 1;
+ modify_irte_ga(devid, index, irte, NULL);
+}
+
+static void irte_deactivate(void *entry, u16 devid, u16 index)
+{
+ union irte *irte = (union irte *) entry;
+
+ irte->fields.valid = 0;
+ modify_irte(devid, index, irte);
+}
+
+static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
+{
+ struct irte_ga *irte = (struct irte_ga *) entry;
+
+ irte->lo.fields_remap.valid = 0;
+ modify_irte_ga(devid, index, irte, NULL);
+}
+
+static void irte_set_affinity(void *entry, u16 devid, u16 index,
+ u8 vector, u32 dest_apicid)
+{
+ union irte *irte = (union irte *) entry;
+
+ irte->fields.vector = vector;
+ irte->fields.destination = dest_apicid;
+ modify_irte(devid, index, irte);
+}
+
+static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
+ u8 vector, u32 dest_apicid)
+{
+ struct irte_ga *irte = (struct irte_ga *) entry;
+ struct iommu_dev_data *dev_data = search_dev_data(devid);
+
+ if (!dev_data || !dev_data->use_vapic) {
+ irte->hi.fields.vector = vector;
+ irte->lo.fields_remap.destination = dest_apicid;
+ irte->lo.fields_remap.guest_mode = 0;
+ modify_irte_ga(devid, index, irte, NULL);
+ }
+}
+
+#define IRTE_ALLOCATED (~1U)
+static void irte_set_allocated(struct irq_remap_table *table, int index)
+{
+ table->table[index] = IRTE_ALLOCATED;
+}
+
+static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
+{
+ struct irte_ga *ptr = (struct irte_ga *)table->table;
+ struct irte_ga *irte = &ptr[index];
+
+ memset(&irte->lo.val, 0, sizeof(u64));
+ memset(&irte->hi.val, 0, sizeof(u64));
+ irte->hi.fields.vector = 0xff;
+}
+
+static bool irte_is_allocated(struct irq_remap_table *table, int index)
+{
+ union irte *ptr = (union irte *)table->table;
+ union irte *irte = &ptr[index];
+
+ return irte->val != 0;
+}
+
+static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
+{
+ struct irte_ga *ptr = (struct irte_ga *)table->table;
+ struct irte_ga *irte = &ptr[index];
+
+ return irte->hi.fields.vector != 0;
+}
+
+static void irte_clear_allocated(struct irq_remap_table *table, int index)
+{
+ table->table[index] = 0;
+}
+
+static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
+{
+ struct irte_ga *ptr = (struct irte_ga *)table->table;
+ struct irte_ga *irte = &ptr[index];
+
+ memset(&irte->lo.val, 0, sizeof(u64));
+ memset(&irte->hi.val, 0, sizeof(u64));
+}
+
static int get_devid(struct irq_alloc_info *info)
{
int devid = -1;
@@ -3821,19 +4051,17 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
{
struct irq_2_irte *irte_info = &data->irq_2_irte;
struct msi_msg *msg = &data->msi_entry;
- union irte *irte = &data->irte_entry;
struct IO_APIC_route_entry *entry;
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+ if (!iommu)
+ return;
data->irq_2_irte.devid = devid;
data->irq_2_irte.index = index + sub_handle;
-
- /* Setup IRTE for IOMMU */
- irte->val = 0;
- irte->fields.vector = irq_cfg->vector;
- irte->fields.int_type = apic->irq_delivery_mode;
- irte->fields.destination = irq_cfg->dest_apicid;
- irte->fields.dm = apic->irq_dest_mode;
- irte->fields.valid = 1;
+ iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
+ apic->irq_dest_mode, irq_cfg->vector,
+ irq_cfg->dest_apicid, devid);
switch (info->type) {
case X86_IRQ_ALLOC_TYPE_IOAPIC:
@@ -3864,12 +4092,32 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
}
}
+struct amd_irte_ops irte_32_ops = {
+ .prepare = irte_prepare,
+ .activate = irte_activate,
+ .deactivate = irte_deactivate,
+ .set_affinity = irte_set_affinity,
+ .set_allocated = irte_set_allocated,
+ .is_allocated = irte_is_allocated,
+ .clear_allocated = irte_clear_allocated,
+};
+
+struct amd_irte_ops irte_128_ops = {
+ .prepare = irte_ga_prepare,
+ .activate = irte_ga_activate,
+ .deactivate = irte_ga_deactivate,
+ .set_affinity = irte_ga_set_affinity,
+ .set_allocated = irte_ga_set_allocated,
+ .is_allocated = irte_ga_is_allocated,
+ .clear_allocated = irte_ga_clear_allocated,
+};
+
static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct irq_alloc_info *info = arg;
struct irq_data *irq_data;
- struct amd_ir_data *data;
+ struct amd_ir_data *data = NULL;
struct irq_cfg *cfg;
int i, ret, devid;
int index = -1;
@@ -3921,6 +4169,16 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (!data)
goto out_free_data;
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
+ else
+ data->entry = kzalloc(sizeof(struct irte_ga),
+ GFP_KERNEL);
+ if (!data->entry) {
+ kfree(data);
+ goto out_free_data;
+ }
+
irq_data->hwirq = (devid << 16) + i;
irq_data->chip_data = data;
irq_data->chip = &amd_ir_chip;
@@ -3957,6 +4215,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
data = irq_data->chip_data;
irte_info = &data->irq_2_irte;
free_irte(irte_info->devid, irte_info->index);
+ kfree(data->entry);
kfree(data);
}
}
@@ -3968,8 +4227,11 @@ static void irq_remapping_activate(struct irq_domain *domain,
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
- modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
+ if (iommu)
+ iommu->irte_ops->activate(data->entry, irte_info->devid,
+ irte_info->index);
}
static void irq_remapping_deactivate(struct irq_domain *domain,
@@ -3977,10 +4239,11 @@ static void irq_remapping_deactivate(struct irq_domain *domain,
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
- union irte entry;
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
- entry.val = 0;
- modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
+ if (iommu)
+ iommu->irte_ops->deactivate(data->entry, irte_info->devid,
+ irte_info->index);
}
static struct irq_domain_ops amd_ir_domain_ops = {
@@ -3990,6 +4253,70 @@ static struct irq_domain_ops amd_ir_domain_ops = {
.deactivate = irq_remapping_deactivate,
};
+static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
+{
+ struct amd_iommu *iommu;
+ struct amd_iommu_pi_data *pi_data = vcpu_info;
+ struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
+ struct amd_ir_data *ir_data = data->chip_data;
+ struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
+ struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
+ struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
+
+ /* Note:
+ * This device has never been set up for guest mode.
+ * we should not modify the IRTE
+ */
+ if (!dev_data || !dev_data->use_vapic)
+ return 0;
+
+ pi_data->ir_data = ir_data;
+
+ /* Note:
+ * SVM tries to set up for VAPIC mode, but we are in
+ * legacy mode. So, we force legacy mode instead.
+ */
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
+ pr_debug("AMD-Vi: %s: Fall back to using intr legacy remap\n",
+ __func__);
+ pi_data->is_guest_mode = false;
+ }
+
+ iommu = amd_iommu_rlookup_table[irte_info->devid];
+ if (iommu == NULL)
+ return -EINVAL;
+
+ pi_data->prev_ga_tag = ir_data->cached_ga_tag;
+ if (pi_data->is_guest_mode) {
+ /* Setting */
+ irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
+ irte->hi.fields.vector = vcpu_pi_info->vector;
+ irte->lo.fields_vapic.guest_mode = 1;
+ irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
+
+ ir_data->cached_ga_tag = pi_data->ga_tag;
+ } else {
+ /* Un-Setting */
+ struct irq_cfg *cfg = irqd_cfg(data);
+
+ irte->hi.val = 0;
+ irte->lo.val = 0;
+ irte->hi.fields.vector = cfg->vector;
+ irte->lo.fields_remap.guest_mode = 0;
+ irte->lo.fields_remap.destination = cfg->dest_apicid;
+ irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
+ irte->lo.fields_remap.dm = apic->irq_dest_mode;
+
+ /*
+ * This communicates the ga_tag back to the caller
+ * so that it can do all the necessary clean up.
+ */
+ ir_data->cached_ga_tag = 0;
+ }
+
+ return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
+}
+
static int amd_ir_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
@@ -3997,8 +4324,12 @@ static int amd_ir_set_affinity(struct irq_data *data,
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct irq_cfg *cfg = irqd_cfg(data);
struct irq_data *parent = data->parent_data;
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
int ret;
+ if (!iommu)
+ return -ENODEV;
+
ret = parent->chip->irq_set_affinity(parent, mask, force);
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret;
@@ -4007,9 +4338,8 @@ static int amd_ir_set_affinity(struct irq_data *data,
* Atomically updates the IRTE with the new destination, vector
* and flushes the interrupt entry cache.
*/
- ir_data->irte_entry.fields.vector = cfg->vector;
- ir_data->irte_entry.fields.destination = cfg->dest_apicid;
- modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry);
+ iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
+ irte_info->index, cfg->vector, cfg->dest_apicid);
/*
* After this point, all the interrupts will start arriving
@@ -4031,6 +4361,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
static struct irq_chip amd_ir_chip = {
.irq_ack = ir_ack_apic_edge,
.irq_set_affinity = amd_ir_set_affinity,
+ .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
.irq_compose_msi_msg = ir_compose_msi_msg,
};
@@ -4045,4 +4376,43 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
return 0;
}
+
+int amd_iommu_update_ga(int cpu, bool is_run, void *data)
+{
+ unsigned long flags;
+ struct amd_iommu *iommu;
+ struct irq_remap_table *irt;
+ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+ int devid = ir_data->irq_2_irte.devid;
+ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+ struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
+
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+ !ref || !entry || !entry->lo.fields_vapic.guest_mode)
+ return 0;
+
+ iommu = amd_iommu_rlookup_table[devid];
+ if (!iommu)
+ return -ENODEV;
+
+ irt = get_irq_table(devid, false);
+ if (!irt)
+ return -ENODEV;
+
+ spin_lock_irqsave(&irt->lock, flags);
+
+ if (ref->lo.fields_vapic.guest_mode) {
+ if (cpu >= 0)
+ ref->lo.fields_vapic.destination = cpu;
+ ref->lo.fields_vapic.is_run = is_run;
+ barrier();
+ }
+
+ spin_unlock_irqrestore(&irt->lock, flags);
+
+ iommu_flush_irt(iommu, devid);
+ iommu_completion_wait(iommu);
+ return 0;
+}
+EXPORT_SYMBOL(amd_iommu_update_ga);
#endif
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 59741ead7e15..cd1713631a4a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -84,6 +84,7 @@
#define ACPI_DEVFLAG_LINT1 0x80
#define ACPI_DEVFLAG_ATSDIS 0x10000000
+#define LOOP_TIMEOUT 100000
/*
* ACPI table definitions
*
@@ -145,6 +146,8 @@ struct ivmd_header {
bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
+int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
+
static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled;
static int amd_iommu_target_ivhd_type;
@@ -386,6 +389,10 @@ static void iommu_disable(struct amd_iommu *iommu)
iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+ /* Disable IOMMU GA_LOG */
+ iommu_feature_disable(iommu, CONTROL_GALOG_EN);
+ iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+
/* Disable IOMMU hardware itself */
iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
}
@@ -671,6 +678,99 @@ static void __init free_ppr_log(struct amd_iommu *iommu)
free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
}
+static void free_ga_log(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+ if (iommu->ga_log)
+ free_pages((unsigned long)iommu->ga_log,
+ get_order(GA_LOG_SIZE));
+ if (iommu->ga_log_tail)
+ free_pages((unsigned long)iommu->ga_log_tail,
+ get_order(8));
+#endif
+}
+
+static int iommu_ga_log_enable(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+ u32 status, i;
+
+ if (!iommu->ga_log)
+ return -EINVAL;
+
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+ /* Check if already running */
+ if (status & (MMIO_STATUS_GALOG_RUN_MASK))
+ return 0;
+
+ iommu_feature_enable(iommu, CONTROL_GAINT_EN);
+ iommu_feature_enable(iommu, CONTROL_GALOG_EN);
+
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (status & (MMIO_STATUS_GALOG_RUN_MASK))
+ break;
+ }
+
+ if (i >= LOOP_TIMEOUT)
+ return -EINVAL;
+#endif /* CONFIG_IRQ_REMAP */
+ return 0;
+}
+
+#ifdef CONFIG_IRQ_REMAP
+static int iommu_init_ga_log(struct amd_iommu *iommu)
+{
+ u64 entry;
+
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
+ return 0;
+
+ iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(GA_LOG_SIZE));
+ if (!iommu->ga_log)
+ goto err_out;
+
+ iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(8));
+ if (!iommu->ga_log_tail)
+ goto err_out;
+
+ entry = (u64)virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
+ memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
+ &entry, sizeof(entry));
+ entry = ((u64)virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
+ memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
+ &entry, sizeof(entry));
+ writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
+
+ return 0;
+err_out:
+ free_ga_log(iommu);
+ return -EINVAL;
+}
+#endif /* CONFIG_IRQ_REMAP */
+
+static int iommu_init_ga(struct amd_iommu *iommu)
+{
+ int ret = 0;
+
+#ifdef CONFIG_IRQ_REMAP
+ /* Note: We have already checked GASup from IVRS table.
+ * Now, we need to make sure that GAMSup is set.
+ */
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ !iommu_feature(iommu, FEATURE_GAM_VAPIC))
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+
+ ret = iommu_init_ga_log(iommu);
+#endif /* CONFIG_IRQ_REMAP */
+
+ return ret;
+}
+
static void iommu_enable_gt(struct amd_iommu *iommu)
{
if (!iommu_feature(iommu, FEATURE_GT))
@@ -1144,6 +1244,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu)
free_command_buffer(iommu);
free_event_buffer(iommu);
free_ppr_log(iommu);
+ free_ga_log(iommu);
iommu_unmap_mmio_space(iommu);
}
@@ -1258,6 +1359,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+ if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
case 0x11:
case 0x40:
@@ -1265,6 +1368,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+ if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
default:
return -EINVAL;
@@ -1432,6 +1537,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high;
+ int ret;
iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
iommu->devid & 0xff);
@@ -1488,6 +1594,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM;
+ ret = iommu_init_ga(iommu);
+ if (ret)
+ return ret;
+
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
amd_iommu_np_cache = true;
@@ -1545,16 +1655,24 @@ static void print_iommu_info(void)
dev_name(&iommu->dev->dev), iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- pr_info("AMD-Vi: Extended features: ");
+ pr_info("AMD-Vi: Extended features (%#llx):\n",
+ iommu->features);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
pr_cont(" %s", feat_str[i]);
}
+
+ if (iommu->features & FEATURE_GAM_VAPIC)
+ pr_cont(" GA_vAPIC");
+
pr_cont("\n");
}
}
- if (irq_remapping_enabled)
+ if (irq_remapping_enabled) {
pr_info("AMD-Vi: Interrupt remapping enabled\n");
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
+ pr_info("AMD-Vi: virtual APIC enabled\n");
+ }
}
static int __init amd_iommu_init_pci(void)
@@ -1645,6 +1763,8 @@ enable_faults:
if (iommu->ppr_log != NULL)
iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
+ iommu_ga_log_enable(iommu);
+
return 0;
}
@@ -1862,6 +1982,24 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
iommu->stored_addr_lo | 1);
}
+static void iommu_enable_ga(struct amd_iommu *iommu)
+{
+#ifdef CONFIG_IRQ_REMAP
+ switch (amd_iommu_guest_ir) {
+ case AMD_IOMMU_GUEST_IR_VAPIC:
+ iommu_feature_enable(iommu, CONTROL_GAM_EN);
+ /* Fall through */
+ case AMD_IOMMU_GUEST_IR_LEGACY_GA:
+ iommu_feature_enable(iommu, CONTROL_GA_EN);
+ iommu->irte_ops = &irte_128_ops;
+ break;
+ default:
+ iommu->irte_ops = &irte_32_ops;
+ break;
+ }
+#endif
+}
+
/*
* This function finally enables all IOMMUs found in the system after
* they have been initialized
@@ -1877,9 +2015,15 @@ static void early_enable_iommus(void)
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
+ iommu_enable_ga(iommu);
iommu_enable(iommu);
iommu_flush_all_caches(iommu);
}
+
+#ifdef CONFIG_IRQ_REMAP
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
+ amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
+#endif
}
static void enable_iommus_v2(void)
@@ -1905,6 +2049,11 @@ static void disable_iommus(void)
for_each_iommu(iommu)
iommu_disable(iommu);
+
+#ifdef CONFIG_IRQ_REMAP
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
+ amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
+#endif
}
/*
@@ -2059,7 +2208,7 @@ static int __init early_amd_iommu_init(void)
struct acpi_table_header *ivrs_base;
acpi_size ivrs_size;
acpi_status status;
- int i, ret = 0;
+ int i, remap_cache_sz, ret = 0;
if (!amd_iommu_detected)
return -ENODEV;
@@ -2157,10 +2306,14 @@ static int __init early_amd_iommu_init(void)
* remapping tables.
*/
ret = -ENOMEM;
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
+ else
+ remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
- MAX_IRQS_PER_TABLE * sizeof(u32),
- IRQ_TABLE_ALIGNMENT,
- 0, NULL);
+ remap_cache_sz,
+ IRQ_TABLE_ALIGNMENT,
+ 0, NULL);
if (!amd_iommu_irq_cache)
goto out;
@@ -2413,6 +2566,21 @@ static int __init parse_amd_iommu_dump(char *str)
return 1;
}
+static int __init parse_amd_iommu_intr(char *str)
+{
+ for (; *str; ++str) {
+ if (strncmp(str, "legacy", 6) == 0) {
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ break;
+ }
+ if (strncmp(str, "vapic", 5) == 0) {
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
+ break;
+ }
+ }
+ return 1;
+}
+
static int __init parse_amd_iommu_options(char *str)
{
for (; *str; ++str) {
@@ -2521,6 +2689,7 @@ static int __init parse_ivrs_acpihid(char *str)
__setup("amd_iommu_dump", parse_amd_iommu_dump);
__setup("amd_iommu=", parse_amd_iommu_options);
+__setup("amd_iommu_intr=", parse_amd_iommu_intr);
__setup("ivrs_ioapic", parse_ivrs_ioapic);
__setup("ivrs_hpet", parse_ivrs_hpet);
__setup("ivrs_acpihid", parse_ivrs_acpihid);
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 0bd9eb374462..faa3b4895cf0 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -38,6 +38,7 @@ extern int amd_iommu_enable(void);
extern void amd_iommu_disable(void);
extern int amd_iommu_reenable(int);
extern int amd_iommu_enable_faulting(void);
+extern int amd_iommu_guest_ir;
/* IOMMUv2 specific functions */
struct iommu_domain;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 9652848e3155..0d91785ebdc3 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/mutex.h>
+#include <linux/msi.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
@@ -69,6 +70,8 @@
#define MMIO_EXCL_LIMIT_OFFSET 0x0028
#define MMIO_EXT_FEATURES 0x0030
#define MMIO_PPR_LOG_OFFSET 0x0038
+#define MMIO_GA_LOG_BASE_OFFSET 0x00e0
+#define MMIO_GA_LOG_TAIL_OFFSET 0x00e8
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
@@ -76,6 +79,8 @@
#define MMIO_STATUS_OFFSET 0x2020
#define MMIO_PPR_HEAD_OFFSET 0x2030
#define MMIO_PPR_TAIL_OFFSET 0x2038
+#define MMIO_GA_HEAD_OFFSET 0x2040
+#define MMIO_GA_TAIL_OFFSET 0x2048
#define MMIO_CNTR_CONF_OFFSET 0x4000
#define MMIO_CNTR_REG_OFFSET 0x40000
#define MMIO_REG_END_OFFSET 0x80000
@@ -92,6 +97,7 @@
#define FEATURE_GA (1ULL<<7)
#define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9)
+#define FEATURE_GAM_VAPIC (1ULL<<21)
#define FEATURE_PASID_SHIFT 32
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
@@ -110,6 +116,9 @@
#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
+#define MMIO_STATUS_GALOG_RUN_MASK (1 << 8)
+#define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9)
+#define MMIO_STATUS_GALOG_INT_MASK (1 << 10)
/* event logging constants */
#define EVENT_ENTRY_SIZE 0x10
@@ -146,6 +155,10 @@
#define CONTROL_PPFINT_EN 0x0eULL
#define CONTROL_PPR_EN 0x0fULL
#define CONTROL_GT_EN 0x10ULL
+#define CONTROL_GA_EN 0x11ULL
+#define CONTROL_GAM_EN 0x19ULL
+#define CONTROL_GALOG_EN 0x1CULL
+#define CONTROL_GAINT_EN 0x1DULL
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
#define CTRL_INV_TO_NONE 0
@@ -224,6 +237,19 @@
#define PPR_REQ_FAULT 0x01
+/* Constants for GA Log handling */
+#define GA_LOG_ENTRIES 512
+#define GA_LOG_SIZE_SHIFT 56
+#define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT)
+#define GA_ENTRY_SIZE 8
+#define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES)
+
+#define GA_TAG(x) (u32)(x & 0xffffffffULL)
+#define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL)
+#define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
+
+#define GA_GUEST_NR 0x1
+
#define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
@@ -329,6 +355,12 @@
#define IOMMU_CAP_NPCACHE 26
#define IOMMU_CAP_EFR 27
+/* IOMMU Feature Reporting Field (for IVHD type 10h */
+#define IOMMU_FEAT_GASUP_SHIFT 6
+
+/* IOMMU Extended Feature Register (EFR) */
+#define IOMMU_EFR_GASUP_SHIFT 7
+
#define MAX_DOMAIN_ID 65536
/* Protection domain flags */
@@ -400,6 +432,7 @@ struct amd_iommu_fault {
struct iommu_domain;
struct irq_domain;
+struct amd_irte_ops;
/*
* This structure contains generic data for IOMMU protection domains
@@ -490,6 +523,12 @@ struct amd_iommu {
/* Base of the PPR log, if present */
u8 *ppr_log;
+ /* Base of the GA log, if present */
+ u8 *ga_log;
+
+ /* Tail of the GA log, if present */
+ u8 *ga_log_tail;
+
/* true if interrupts for this IOMMU are already enabled */
bool int_enabled;
@@ -523,6 +562,8 @@ struct amd_iommu {
#ifdef CONFIG_IRQ_REMAP
struct irq_domain *ir_domain;
struct irq_domain *msi_domain;
+
+ struct amd_irte_ops *irte_ops;
#endif
volatile u64 __aligned(8) cmd_sem;
@@ -683,4 +724,112 @@ static inline int get_hpet_devid(int id)
return -EINVAL;
}
+enum amd_iommu_intr_mode_type {
+ AMD_IOMMU_GUEST_IR_LEGACY,
+
+ /* This mode is not visible to users. It is used when
+ * we cannot fully enable vAPIC and fallback to only support
+ * legacy interrupt remapping via 128-bit IRTE.
+ */
+ AMD_IOMMU_GUEST_IR_LEGACY_GA,
+ AMD_IOMMU_GUEST_IR_VAPIC,
+};
+
+#define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \
+ x == AMD_IOMMU_GUEST_IR_LEGACY_GA)
+
+#define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC)
+
+union irte {
+ u32 val;
+ struct {
+ u32 valid : 1,
+ no_fault : 1,
+ int_type : 3,
+ rq_eoi : 1,
+ dm : 1,
+ rsvd_1 : 1,
+ destination : 8,
+ vector : 8,
+ rsvd_2 : 8;
+ } fields;
+};
+
+union irte_ga_lo {
+ u64 val;
+
+ /* For int remapping */
+ struct {
+ u64 valid : 1,
+ no_fault : 1,
+ /* ------ */
+ int_type : 3,
+ rq_eoi : 1,
+ dm : 1,
+ /* ------ */
+ guest_mode : 1,
+ destination : 8,
+ rsvd : 48;
+ } fields_remap;
+
+ /* For guest vAPIC */
+ struct {
+ u64 valid : 1,
+ no_fault : 1,
+ /* ------ */
+ ga_log_intr : 1,
+ rsvd1 : 3,
+ is_run : 1,
+ /* ------ */
+ guest_mode : 1,
+ destination : 8,
+ rsvd2 : 16,
+ ga_tag : 32;
+ } fields_vapic;
+};
+
+union irte_ga_hi {
+ u64 val;
+ struct {
+ u64 vector : 8,
+ rsvd_1 : 4,
+ ga_root_ptr : 40,
+ rsvd_2 : 12;
+ } fields;
+};
+
+struct irte_ga {
+ union irte_ga_lo lo;
+ union irte_ga_hi hi;
+};
+
+struct irq_2_irte {
+ u16 devid; /* Device ID for IRTE table */
+ u16 index; /* Index into IRTE table*/
+};
+
+struct amd_ir_data {
+ u32 cached_ga_tag;
+ struct irq_2_irte irq_2_irte;
+ struct msi_msg msi_entry;
+ void *entry; /* Pointer to union irte or struct irte_ga */
+ void *ref; /* Pointer to the actual irte */
+};
+
+struct amd_irte_ops {
+ void (*prepare)(void *, u32, u32, u8, u32, int);
+ void (*activate)(void *, u16, u16);
+ void (*deactivate)(void *, u16, u16);
+ void (*set_affinity)(void *, u16, u16, u8, u32);
+ void *(*get)(struct irq_remap_table *, int);
+ void (*set_allocated)(struct irq_remap_table *, int);
+ bool (*is_allocated)(struct irq_remap_table *, int);
+ void (*clear_allocated)(struct irq_remap_table *, int);
+};
+
+#ifdef CONFIG_IRQ_REMAP
+extern struct amd_irte_ops irte_32_ops;
+extern struct amd_irte_ops irte_128_ops;
+#endif
+
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index a04d491cf431..3b44b1d82f3b 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -101,8 +101,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
zpci_dma_exit_device(zdev);
zdev->dma_table = s390_domain->dma_table;
- rc = zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
- zdev->start_dma + zdev->iommu_size - 1,
+ rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
if (rc)
goto out_restore;
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b372e792adc2..e4dbfc85abdb 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
-obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o
+obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 8c38b3d92e1c..0cdd923d1535 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -436,7 +436,6 @@ static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
/**
* meta_intc_irq_demux() - external irq de-multiplexer
- * @irq: the virtual interrupt number
* @desc: the interrupt description structure for this irq
*
* The cpu receives an interrupt on TR2 when a SoC interrupt has occurred. It is
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index f811a7de5857..74192f62dd67 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -498,7 +498,6 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
* vic_init_cascaded() - initialise a cascaded vectored interrupt controller
* @base: iomem base address
* @parent_irq: the parent IRQ we're cascaded off
- * @irq_start: starting interrupt number, must be muliple of 32
* @vic_sources: bitmask of interrupt sources to allow
* @resume_sources: bitmask of interrupt sources to allow for resume
*
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index 292991c90c02..e3fa1cd64470 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -284,7 +284,7 @@ __write_ctrl_pciv2(struct fritzcard *fc, struct hdlc_hw *hdlc, u32 channel) {
AVM_HDLC_STATUS_1));
}
-void
+static void
write_ctrl(struct bchannel *bch, int which) {
struct fritzcard *fc = bch->hw;
struct hdlc_hw *hdlc;
@@ -741,7 +741,7 @@ inithdlc(struct fritzcard *fc)
modehdlc(&fc->bch[1], -1);
}
-void
+static void
clear_pending_hdlc_ints(struct fritzcard *fc)
{
u32 val;
@@ -962,7 +962,7 @@ avm_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
return err;
}
-int
+static int
setup_fritz(struct fritzcard *fc)
{
u32 val, ver;
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 28543d795188..480c2d7794eb 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -564,19 +564,19 @@ disable_hwirq(struct hfc_multi *hc)
#define MAX_TDM_CHAN 32
-inline void
+static inline void
enablepcibridge(struct hfc_multi *c)
{
HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x3); /* was _io before */
}
-inline void
+static inline void
disablepcibridge(struct hfc_multi *c)
{
HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x2); /* was _io before */
}
-inline unsigned char
+static inline unsigned char
readpcibridge(struct hfc_multi *hc, unsigned char address)
{
unsigned short cipv;
@@ -604,7 +604,7 @@ readpcibridge(struct hfc_multi *hc, unsigned char address)
return data;
}
-inline void
+static inline void
writepcibridge(struct hfc_multi *hc, unsigned char address, unsigned char data)
{
unsigned short cipv;
@@ -634,14 +634,14 @@ writepcibridge(struct hfc_multi *hc, unsigned char address, unsigned char data)
outl(datav, hc->pci_iobase);
}
-inline void
+static inline void
cpld_set_reg(struct hfc_multi *hc, unsigned char reg)
{
/* Do data pin read low byte */
HFC_outb(hc, R_GPIO_OUT1, reg);
}
-inline void
+static inline void
cpld_write_reg(struct hfc_multi *hc, unsigned char reg, unsigned char val)
{
cpld_set_reg(hc, reg);
@@ -653,7 +653,7 @@ cpld_write_reg(struct hfc_multi *hc, unsigned char reg, unsigned char val)
return;
}
-inline unsigned char
+static inline unsigned char
cpld_read_reg(struct hfc_multi *hc, unsigned char reg)
{
unsigned char bytein;
@@ -670,14 +670,14 @@ cpld_read_reg(struct hfc_multi *hc, unsigned char reg)
return bytein;
}
-inline void
+static inline void
vpm_write_address(struct hfc_multi *hc, unsigned short addr)
{
cpld_write_reg(hc, 0, 0xff & addr);
cpld_write_reg(hc, 1, 0x01 & (addr >> 8));
}
-inline unsigned short
+static inline unsigned short
vpm_read_address(struct hfc_multi *c)
{
unsigned short addr;
@@ -691,7 +691,7 @@ vpm_read_address(struct hfc_multi *c)
return addr & 0x1ff;
}
-inline unsigned char
+static inline unsigned char
vpm_in(struct hfc_multi *c, int which, unsigned short addr)
{
unsigned char res;
@@ -712,7 +712,7 @@ vpm_in(struct hfc_multi *c, int which, unsigned short addr)
return res;
}
-inline void
+static inline void
vpm_out(struct hfc_multi *c, int which, unsigned short addr,
unsigned char data)
{
@@ -1024,7 +1024,7 @@ hfcmulti_resync(struct hfc_multi *locked, struct hfc_multi *newmaster, int rm)
}
/* This must be called AND hc must be locked irqsave!!! */
-inline void
+static inline void
plxsd_checksync(struct hfc_multi *hc, int rm)
{
if (hc->syncronized) {
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index aa9b6c3cadc1..8d338ba366d0 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -113,7 +113,7 @@ isac_ph_state_bh(struct dchannel *dch)
pr_debug("%s: TE newstate %x\n", isac->name, dch->state);
}
-void
+static void
isac_empty_fifo(struct isac_hw *isac, int count)
{
u8 *ptr;
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 741675525b53..3b067ea656bd 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -848,7 +848,7 @@ dbusy_timer_handler(struct dchannel *dch)
}
}
-void initW6692(struct w6692_hw *card)
+static void initW6692(struct w6692_hw *card)
{
u8 val;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9dcc9b13d495..7a628c6516f6 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -584,6 +584,18 @@ config LEDS_SEAD3
This driver can also be built as a module. If so the module
will be called leds-sead3.
+config LEDS_IS31FL319X
+ tristate "LED Support for ISSI IS31FL319x I2C LED controller family"
+ depends on LEDS_CLASS && I2C && OF
+ select REGMAP_I2C
+ help
+ This option enables support for LEDs connected to ISSI IS31FL319x
+ fancy LED driver chips accessed via the I2C bus.
+ Driver supports individual PWM brightness control for each channel.
+
+ This driver can also be built as a module. If so the module will be
+ called leds-is31fl319x.
+
config LEDS_IS31FL32XX
tristate "LED support for ISSI IS31FL32XX I2C LED controller family"
depends on LEDS_CLASS && I2C && OF
@@ -631,6 +643,22 @@ config LEDS_VERSATILE
This option enabled support for the LEDs on the ARM Versatile
and RealView boards. Say Y to enabled these.
+config LEDS_PM8058
+ tristate "LED Support for the Qualcomm PM8058 PMIC"
+ depends on MFD_PM8921_CORE
+ depends on LEDS_CLASS
+ help
+ Choose this option if you want to use the LED drivers in
+ the Qualcomm PM8058 PMIC.
+
+config LEDS_MLXCPLD
+ tristate "LED support for the Mellanox boards"
+ depends on X86_64 && DMI
+ depends on LEDS_CLASS
+ help
+ This option enabled support for the LEDs on the Mellanox
+ boards. Say Y to enabled these.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 0684c865a1c0..3965070190f5 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -67,7 +67,10 @@ obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
obj-$(CONFIG_LEDS_SEAD3) += leds-sead3.o
+obj-$(CONFIG_LEDS_IS31FL319X) += leds-is31fl319x.o
obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o
+obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
+obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index c92702a684ce..431123b048a2 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -11,7 +11,7 @@
*
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -81,21 +81,23 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
down_read(&led_cdev->trigger_lock);
if (!led_cdev->trigger)
- len += sprintf(buf+len, "[none] ");
+ len += scnprintf(buf+len, PAGE_SIZE - len, "[none] ");
else
- len += sprintf(buf+len, "none ");
+ len += scnprintf(buf+len, PAGE_SIZE - len, "none ");
list_for_each_entry(trig, &trigger_list, next_trig) {
if (led_cdev->trigger && !strcmp(led_cdev->trigger->name,
trig->name))
- len += sprintf(buf+len, "[%s] ", trig->name);
+ len += scnprintf(buf+len, PAGE_SIZE - len, "[%s] ",
+ trig->name);
else
- len += sprintf(buf+len, "%s ", trig->name);
+ len += scnprintf(buf+len, PAGE_SIZE - len, "%s ",
+ trig->name);
}
up_read(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
- len += sprintf(len+buf, "\n");
+ len += scnprintf(len+buf, PAGE_SIZE - len, "\n");
return len;
}
EXPORT_SYMBOL_GPL(led_trigger_show);
@@ -108,6 +110,9 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
char *envp[2];
const char *name;
+ if (!led_cdev->trigger && !trig)
+ return;
+
name = trig ? trig->name : "none";
event = kasprintf(GFP_KERNEL, "TRIGGER=%s", name);
@@ -136,7 +141,9 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
if (event) {
envp[0] = event;
envp[1] = NULL;
- kobject_uevent_env(&led_cdev->dev->kobj, KOBJ_CHANGE, envp);
+ if (kobject_uevent_env(&led_cdev->dev->kobj, KOBJ_CHANGE, envp))
+ dev_err(led_cdev->dev,
+ "%s: Error sending uevent\n", __func__);
kfree(event);
}
}
@@ -357,7 +364,3 @@ void led_trigger_unregister_simple(struct led_trigger *trig)
kfree(trig);
}
EXPORT_SYMBOL_GPL(led_trigger_unregister_simple);
-
-MODULE_AUTHOR("Richard Purdie");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("LED Triggers Core");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 9b991d46ed84..d400dcaf4d29 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -26,15 +26,19 @@ struct gpio_led_data {
struct gpio_desc *gpiod;
u8 can_sleep;
u8 blinking;
- int (*platform_gpio_blink_set)(struct gpio_desc *desc, int state,
- unsigned long *delay_on, unsigned long *delay_off);
+ gpio_blink_set_t platform_gpio_blink_set;
};
+static inline struct gpio_led_data *
+ cdev_to_gpio_led_data(struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct gpio_led_data, cdev);
+}
+
static void gpio_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
- struct gpio_led_data *led_dat =
- container_of(led_cdev, struct gpio_led_data, cdev);
+ struct gpio_led_data *led_dat = cdev_to_gpio_led_data(led_cdev);
int level;
if (value == LED_OFF)
@@ -64,8 +68,7 @@ static int gpio_led_set_blocking(struct led_classdev *led_cdev,
static int gpio_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on, unsigned long *delay_off)
{
- struct gpio_led_data *led_dat =
- container_of(led_cdev, struct gpio_led_data, cdev);
+ struct gpio_led_data *led_dat = cdev_to_gpio_led_data(led_cdev);
led_dat->blinking = 1;
return led_dat->platform_gpio_blink_set(led_dat->gpiod, GPIO_LED_BLINK,
@@ -74,8 +77,7 @@ static int gpio_blink_set(struct led_classdev *led_cdev,
static int create_gpio_led(const struct gpio_led *template,
struct gpio_led_data *led_dat, struct device *parent,
- int (*blink_set)(struct gpio_desc *, int, unsigned long *,
- unsigned long *))
+ gpio_blink_set_t blink_set)
{
int ret, state;
@@ -120,10 +122,13 @@ static int create_gpio_led(const struct gpio_led *template,
led_dat->platform_gpio_blink_set = blink_set;
led_dat->cdev.blink_set = gpio_blink_set;
}
- if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
- state = !!gpiod_get_value_cansleep(led_dat->gpiod);
- else
+ if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP) {
+ state = gpiod_get_value_cansleep(led_dat->gpiod);
+ if (state < 0)
+ return state;
+ } else {
state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
+ }
led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
@@ -134,7 +139,7 @@ static int create_gpio_led(const struct gpio_led *template,
if (ret < 0)
return ret;
- return led_classdev_register(parent, &led_dat->cdev);
+ return devm_led_classdev_register(parent, &led_dat->cdev);
}
struct gpio_leds_priv {
@@ -154,7 +159,6 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
struct fwnode_handle *child;
struct gpio_leds_priv *priv;
int count, ret;
- struct device_node *np;
count = device_get_child_node_count(dev);
if (!count)
@@ -168,26 +172,22 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
struct gpio_led_data *led_dat = &priv->leds[priv->num_leds];
struct gpio_led led = {};
const char *state = NULL;
+ struct device_node *np = to_of_node(child);
led.gpiod = devm_get_gpiod_from_child(dev, NULL, child);
if (IS_ERR(led.gpiod)) {
fwnode_handle_put(child);
- ret = PTR_ERR(led.gpiod);
- goto err;
+ return ERR_CAST(led.gpiod);
}
- np = to_of_node(child);
-
- if (fwnode_property_present(child, "label")) {
- fwnode_property_read_string(child, "label", &led.name);
- } else {
- if (IS_ENABLED(CONFIG_OF) && !led.name && np)
- led.name = np->name;
- if (!led.name) {
- ret = -EINVAL;
- goto err;
- }
+ ret = fwnode_property_read_string(child, "label", &led.name);
+ if (ret && IS_ENABLED(CONFIG_OF) && np)
+ led.name = np->name;
+ if (!led.name) {
+ fwnode_handle_put(child);
+ return ERR_PTR(-EINVAL);
}
+
fwnode_property_read_string(child, "linux,default-trigger",
&led.default_trigger);
@@ -209,18 +209,13 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
ret = create_gpio_led(&led, led_dat, dev, NULL);
if (ret < 0) {
fwnode_handle_put(child);
- goto err;
+ return ERR_PTR(ret);
}
led_dat->cdev.dev->of_node = np;
priv->num_leds++;
}
return priv;
-
-err:
- for (count = priv->num_leds - 1; count >= 0; count--)
- led_classdev_unregister(&priv->leds[count].cdev);
- return ERR_PTR(ret);
}
static const struct of_device_id of_gpio_leds_match[] = {
@@ -248,13 +243,8 @@ static int gpio_led_probe(struct platform_device *pdev)
ret = create_gpio_led(&pdata->leds[i],
&priv->leds[i],
&pdev->dev, pdata->gpio_blink_set);
- if (ret < 0) {
- /* On failure: unwind the led creations */
- for (i = i - 1; i >= 0; i--)
- led_classdev_unregister(
- &priv->leds[i].cdev);
+ if (ret < 0)
return ret;
- }
}
} else {
priv = gpio_leds_create(pdev);
@@ -267,17 +257,6 @@ static int gpio_led_probe(struct platform_device *pdev)
return 0;
}
-static int gpio_led_remove(struct platform_device *pdev)
-{
- struct gpio_leds_priv *priv = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < priv->num_leds; i++)
- led_classdev_unregister(&priv->leds[i].cdev);
-
- return 0;
-}
-
static void gpio_led_shutdown(struct platform_device *pdev)
{
struct gpio_leds_priv *priv = platform_get_drvdata(pdev);
@@ -292,7 +271,6 @@ static void gpio_led_shutdown(struct platform_device *pdev)
static struct platform_driver gpio_led_driver = {
.probe = gpio_led_probe,
- .remove = gpio_led_remove,
.shutdown = gpio_led_shutdown,
.driver = {
.name = "leds-gpio",
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
new file mode 100644
index 000000000000..f123309597e4
--- /dev/null
+++ b/drivers/leds/leds-is31fl319x.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright 2015-16 Golden Delicious Computers
+ *
+ * Author: Nikolaus Schaller <hns@goldelico.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * LED driver for the IS31FL319{0,1,3,6,9} to drive 1, 3, 6 or 9 light
+ * effect LEDs.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* register numbers */
+#define IS31FL319X_SHUTDOWN 0x00
+#define IS31FL319X_CTRL1 0x01
+#define IS31FL319X_CTRL2 0x02
+#define IS31FL319X_CONFIG1 0x03
+#define IS31FL319X_CONFIG2 0x04
+#define IS31FL319X_RAMP_MODE 0x05
+#define IS31FL319X_BREATH_MASK 0x06
+#define IS31FL319X_PWM(channel) (0x07 + channel)
+#define IS31FL319X_DATA_UPDATE 0x10
+#define IS31FL319X_T0(channel) (0x11 + channel)
+#define IS31FL319X_T123_1 0x1a
+#define IS31FL319X_T123_2 0x1b
+#define IS31FL319X_T123_3 0x1c
+#define IS31FL319X_T4(channel) (0x1d + channel)
+#define IS31FL319X_TIME_UPDATE 0x26
+#define IS31FL319X_RESET 0xff
+
+#define IS31FL319X_REG_CNT (IS31FL319X_RESET + 1)
+
+#define IS31FL319X_MAX_LEDS 9
+
+/* CS (Current Setting) in CONFIG2 register */
+#define IS31FL319X_CONFIG2_CS_SHIFT 4
+#define IS31FL319X_CONFIG2_CS_MASK 0x7
+#define IS31FL319X_CONFIG2_CS_STEP_REF 12
+
+#define IS31FL319X_CURRENT_MIN ((u32)5000)
+#define IS31FL319X_CURRENT_MAX ((u32)40000)
+#define IS31FL319X_CURRENT_STEP ((u32)5000)
+#define IS31FL319X_CURRENT_DEFAULT ((u32)20000)
+
+/* Audio gain in CONFIG2 register */
+#define IS31FL319X_AUDIO_GAIN_DB_MAX ((u32)21)
+#define IS31FL319X_AUDIO_GAIN_DB_STEP ((u32)3)
+
+/*
+ * regmap is used as a cache of chip's register space,
+ * to avoid reading back brightness values from chip,
+ * which is known to hang.
+ */
+struct is31fl319x_chip {
+ const struct is31fl319x_chipdef *cdef;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct mutex lock;
+ u32 audio_gain_db;
+
+ struct is31fl319x_led {
+ struct is31fl319x_chip *chip;
+ struct led_classdev cdev;
+ u32 max_microamp;
+ bool configured;
+ } leds[IS31FL319X_MAX_LEDS];
+};
+
+struct is31fl319x_chipdef {
+ int num_leds;
+};
+
+static const struct is31fl319x_chipdef is31fl3190_cdef = {
+ .num_leds = 1,
+};
+
+static const struct is31fl319x_chipdef is31fl3193_cdef = {
+ .num_leds = 3,
+};
+
+static const struct is31fl319x_chipdef is31fl3196_cdef = {
+ .num_leds = 6,
+};
+
+static const struct is31fl319x_chipdef is31fl3199_cdef = {
+ .num_leds = 9,
+};
+
+static const struct of_device_id of_is31fl319x_match[] = {
+ { .compatible = "issi,is31fl3190", .data = &is31fl3190_cdef, },
+ { .compatible = "issi,is31fl3191", .data = &is31fl3190_cdef, },
+ { .compatible = "issi,is31fl3193", .data = &is31fl3193_cdef, },
+ { .compatible = "issi,is31fl3196", .data = &is31fl3196_cdef, },
+ { .compatible = "issi,is31fl3199", .data = &is31fl3199_cdef, },
+ { .compatible = "si-en,sn3199", .data = &is31fl3199_cdef, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_is31fl319x_match);
+
+static int is31fl319x_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led,
+ cdev);
+ struct is31fl319x_chip *is31 = led->chip;
+ int chan = led - is31->leds;
+ int ret;
+ int i;
+ u8 ctrl1 = 0, ctrl2 = 0;
+
+ dev_dbg(&is31->client->dev, "%s %d: %d\n", __func__, chan, brightness);
+
+ mutex_lock(&is31->lock);
+
+ /* update PWM register */
+ ret = regmap_write(is31->regmap, IS31FL319X_PWM(chan), brightness);
+ if (ret < 0)
+ goto out;
+
+ /* read current brightness of all PWM channels */
+ for (i = 0; i < is31->cdef->num_leds; i++) {
+ unsigned int pwm_value;
+ bool on;
+
+ /*
+ * since neither cdev nor the chip can provide
+ * the current setting, we read from the regmap cache
+ */
+
+ ret = regmap_read(is31->regmap, IS31FL319X_PWM(i), &pwm_value);
+ dev_dbg(&is31->client->dev, "%s read %d: ret=%d: %d\n",
+ __func__, i, ret, pwm_value);
+ on = ret >= 0 && pwm_value > LED_OFF;
+
+ if (i < 3)
+ ctrl1 |= on << i; /* 0..2 => bit 0..2 */
+ else if (i < 6)
+ ctrl1 |= on << (i + 1); /* 3..5 => bit 4..6 */
+ else
+ ctrl2 |= on << (i - 6); /* 6..8 => bit 0..2 */
+ }
+
+ if (ctrl1 > 0 || ctrl2 > 0) {
+ dev_dbg(&is31->client->dev, "power up %02x %02x\n",
+ ctrl1, ctrl2);
+ regmap_write(is31->regmap, IS31FL319X_CTRL1, ctrl1);
+ regmap_write(is31->regmap, IS31FL319X_CTRL2, ctrl2);
+ /* update PWMs */
+ regmap_write(is31->regmap, IS31FL319X_DATA_UPDATE, 0x00);
+ /* enable chip from shut down */
+ ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x01);
+ } else {
+ dev_dbg(&is31->client->dev, "power down\n");
+ /* shut down (no need to clear CTRL1/2) */
+ ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x00);
+ }
+
+out:
+ mutex_unlock(&is31->lock);
+
+ return ret;
+}
+
+static int is31fl319x_parse_child_dt(const struct device *dev,
+ const struct device_node *child,
+ struct is31fl319x_led *led)
+{
+ struct led_classdev *cdev = &led->cdev;
+ int ret;
+
+ if (of_property_read_string(child, "label", &cdev->name))
+ cdev->name = child->name;
+
+ ret = of_property_read_string(child, "linux,default-trigger",
+ &cdev->default_trigger);
+ if (ret < 0 && ret != -EINVAL) /* is optional */
+ return ret;
+
+ led->max_microamp = IS31FL319X_CURRENT_DEFAULT;
+ ret = of_property_read_u32(child, "led-max-microamp",
+ &led->max_microamp);
+ if (!ret) {
+ if (led->max_microamp < IS31FL319X_CURRENT_MIN)
+ return -EINVAL; /* not supported */
+ led->max_microamp = min(led->max_microamp,
+ IS31FL319X_CURRENT_MAX);
+ }
+
+ return 0;
+}
+
+static int is31fl319x_parse_dt(struct device *dev,
+ struct is31fl319x_chip *is31)
+{
+ struct device_node *np = dev->of_node, *child;
+ const struct of_device_id *of_dev_id;
+ int count;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ of_dev_id = of_match_device(of_is31fl319x_match, dev);
+ if (!of_dev_id) {
+ dev_err(dev, "Failed to match device with supported chips\n");
+ return -EINVAL;
+ }
+
+ is31->cdef = of_dev_id->data;
+
+ count = of_get_child_count(np);
+
+ dev_dbg(dev, "probe %s with %d leds defined in DT\n",
+ of_dev_id->compatible, count);
+
+ if (!count || count > is31->cdef->num_leds) {
+ dev_err(dev, "Number of leds defined must be between 1 and %u\n",
+ is31->cdef->num_leds);
+ return -ENODEV;
+ }
+
+ for_each_child_of_node(np, child) {
+ struct is31fl319x_led *led;
+ u32 reg;
+
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret) {
+ dev_err(dev, "Failed to read led 'reg' property\n");
+ goto put_child_node;
+ }
+
+ if (reg < 1 || reg > is31->cdef->num_leds) {
+ dev_err(dev, "invalid led reg %u\n", reg);
+ ret = -EINVAL;
+ goto put_child_node;
+ }
+
+ led = &is31->leds[reg - 1];
+
+ if (led->configured) {
+ dev_err(dev, "led %u is already configured\n", reg);
+ ret = -EINVAL;
+ goto put_child_node;
+ }
+
+ ret = is31fl319x_parse_child_dt(dev, child, led);
+ if (ret) {
+ dev_err(dev, "led %u DT parsing failed\n", reg);
+ goto put_child_node;
+ }
+
+ led->configured = true;
+ }
+
+ is31->audio_gain_db = 0;
+ ret = of_property_read_u32(np, "audio-gain-db", &is31->audio_gain_db);
+ if (!ret)
+ is31->audio_gain_db = min(is31->audio_gain_db,
+ IS31FL319X_AUDIO_GAIN_DB_MAX);
+
+ return 0;
+
+put_child_node:
+ of_node_put(child);
+ return ret;
+}
+
+static bool is31fl319x_readable_reg(struct device *dev, unsigned int reg)
+{ /* we have no readable registers */
+ return false;
+}
+
+static bool is31fl319x_volatile_reg(struct device *dev, unsigned int reg)
+{ /* volatile registers are not cached */
+ switch (reg) {
+ case IS31FL319X_DATA_UPDATE:
+ case IS31FL319X_TIME_UPDATE:
+ case IS31FL319X_RESET:
+ return true; /* always write-through */
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default is31fl319x_reg_defaults[] = {
+ { IS31FL319X_CONFIG1, 0x00},
+ { IS31FL319X_CONFIG2, 0x00},
+ { IS31FL319X_PWM(0), 0x00},
+ { IS31FL319X_PWM(1), 0x00},
+ { IS31FL319X_PWM(2), 0x00},
+ { IS31FL319X_PWM(3), 0x00},
+ { IS31FL319X_PWM(4), 0x00},
+ { IS31FL319X_PWM(5), 0x00},
+ { IS31FL319X_PWM(6), 0x00},
+ { IS31FL319X_PWM(7), 0x00},
+ { IS31FL319X_PWM(8), 0x00},
+};
+
+static struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IS31FL319X_REG_CNT,
+ .cache_type = REGCACHE_FLAT,
+ .readable_reg = is31fl319x_readable_reg,
+ .volatile_reg = is31fl319x_volatile_reg,
+ .reg_defaults = is31fl319x_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(is31fl319x_reg_defaults),
+};
+
+static inline int is31fl319x_microamp_to_cs(struct device *dev, u32 microamp)
+{ /* round down to nearest supported value (range check done by caller) */
+ u32 step = microamp / IS31FL319X_CURRENT_STEP;
+
+ return ((IS31FL319X_CONFIG2_CS_STEP_REF - step) &
+ IS31FL319X_CONFIG2_CS_MASK) <<
+ IS31FL319X_CONFIG2_CS_SHIFT; /* CS encoding */
+}
+
+static inline int is31fl319x_db_to_gain(u32 dezibel)
+{ /* round down to nearest supported value (range check done by caller) */
+ return dezibel / IS31FL319X_AUDIO_GAIN_DB_STEP;
+}
+
+static int is31fl319x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct is31fl319x_chip *is31;
+ struct device *dev = &client->dev;
+ struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
+ int err;
+ int i = 0;
+ u32 aggregated_led_microamp = IS31FL319X_CURRENT_MAX;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -EIO;
+
+ is31 = devm_kzalloc(&client->dev, sizeof(*is31), GFP_KERNEL);
+ if (!is31)
+ return -ENOMEM;
+
+ mutex_init(&is31->lock);
+
+ err = is31fl319x_parse_dt(&client->dev, is31);
+ if (err)
+ goto free_mutex;
+
+ is31->client = client;
+ is31->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(is31->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ err = PTR_ERR(is31->regmap);
+ goto free_mutex;
+ }
+
+ i2c_set_clientdata(client, is31);
+
+ /* check for write-reply from chip (we can't read any registers) */
+ err = regmap_write(is31->regmap, IS31FL319X_RESET, 0x00);
+ if (err < 0) {
+ dev_err(&client->dev, "no response from chip write: err = %d\n",
+ err);
+ err = -EIO; /* does not answer */
+ goto free_mutex;
+ }
+
+ /*
+ * Kernel conventions require per-LED led-max-microamp property.
+ * But the chip does not allow to limit individual LEDs.
+ * So we take minimum from all subnodes for safety of hardware.
+ */
+ for (i = 0; i < is31->cdef->num_leds; i++)
+ if (is31->leds[i].configured &&
+ is31->leds[i].max_microamp < aggregated_led_microamp)
+ aggregated_led_microamp = is31->leds[i].max_microamp;
+
+ regmap_write(is31->regmap, IS31FL319X_CONFIG2,
+ is31fl319x_microamp_to_cs(dev, aggregated_led_microamp) |
+ is31fl319x_db_to_gain(is31->audio_gain_db));
+
+ for (i = 0; i < is31->cdef->num_leds; i++) {
+ struct is31fl319x_led *led = &is31->leds[i];
+
+ if (!led->configured)
+ continue;
+
+ led->chip = is31;
+ led->cdev.brightness_set_blocking = is31fl319x_brightness_set;
+
+ err = devm_led_classdev_register(&client->dev, &led->cdev);
+ if (err < 0)
+ goto free_mutex;
+ }
+
+ return 0;
+
+free_mutex:
+ mutex_destroy(&is31->lock);
+ return err;
+}
+
+static int is31fl319x_remove(struct i2c_client *client)
+{
+ struct is31fl319x_chip *is31 = i2c_get_clientdata(client);
+
+ mutex_destroy(&is31->lock);
+ return 0;
+}
+
+/*
+ * i2c-core (and modalias) requires that id_table be properly filled,
+ * even though it is not used for DeviceTree based instantiation.
+ */
+static const struct i2c_device_id is31fl319x_id[] = {
+ { "is31fl3190" },
+ { "is31fl3191" },
+ { "is31fl3193" },
+ { "is31fl3196" },
+ { "is31fl3199" },
+ { "sn3199" },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, is31fl319x_id);
+
+static struct i2c_driver is31fl319x_driver = {
+ .driver = {
+ .name = "leds-is31fl319x",
+ .of_match_table = of_match_ptr(of_is31fl319x_match),
+ },
+ .probe = is31fl319x_probe,
+ .remove = is31fl319x_remove,
+ .id_table = is31fl319x_id,
+};
+
+module_i2c_driver(is31fl319x_driver);
+
+MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
+MODULE_AUTHOR("Andrey Utkin <andrey_utkin@fastmail.com>");
+MODULE_DESCRIPTION("IS31FL319X LED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-mlxcpld.c b/drivers/leds/leds-mlxcpld.c
new file mode 100644
index 000000000000..197ab9b29a9c
--- /dev/null
+++ b/drivers/leds/leds-mlxcpld.c
@@ -0,0 +1,430 @@
+/*
+ * drivers/leds/leds-mlxcpld.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define MLXPLAT_CPLD_LPC_REG_BASE_ADRR 0x2500 /* LPC bus access */
+
+/* Color codes for LEDs */
+#define MLXCPLD_LED_OFFSET_HALF 0x01 /* Offset from solid: 3Hz blink */
+#define MLXCPLD_LED_OFFSET_FULL 0x02 /* Offset from solid: 6Hz blink */
+#define MLXCPLD_LED_IS_OFF 0x00 /* Off */
+#define MLXCPLD_LED_RED_STATIC_ON 0x05 /* Solid red */
+#define MLXCPLD_LED_RED_BLINK_HALF (MLXCPLD_LED_RED_STATIC_ON + \
+ MLXCPLD_LED_OFFSET_HALF)
+#define MLXCPLD_LED_RED_BLINK_FULL (MLXCPLD_LED_RED_STATIC_ON + \
+ MLXCPLD_LED_OFFSET_FULL)
+#define MLXCPLD_LED_GREEN_STATIC_ON 0x0D /* Solid green */
+#define MLXCPLD_LED_GREEN_BLINK_HALF (MLXCPLD_LED_GREEN_STATIC_ON + \
+ MLXCPLD_LED_OFFSET_HALF)
+#define MLXCPLD_LED_GREEN_BLINK_FULL (MLXCPLD_LED_GREEN_STATIC_ON + \
+ MLXCPLD_LED_OFFSET_FULL)
+#define MLXCPLD_LED_BLINK_3HZ 167 /* ~167 msec off/on */
+#define MLXCPLD_LED_BLINK_6HZ 83 /* ~83 msec off/on */
+
+/**
+ * mlxcpld_param - LED access parameters:
+ * @offset - offset for LED access in CPLD device
+ * @mask - mask for LED access in CPLD device
+ * @base_color - base color code for LED
+**/
+struct mlxcpld_param {
+ u8 offset;
+ u8 mask;
+ u8 base_color;
+};
+
+/**
+ * mlxcpld_led_priv - LED private data:
+ * @cled - LED class device instance
+ * @param - LED CPLD access parameters
+**/
+struct mlxcpld_led_priv {
+ struct led_classdev cdev;
+ struct mlxcpld_param param;
+};
+
+#define cdev_to_priv(c) container_of(c, struct mlxcpld_led_priv, cdev)
+
+/**
+ * mlxcpld_led_profile - system LED profile (defined per system class):
+ * @offset - offset for LED access in CPLD device
+ * @mask - mask for LED access in CPLD device
+ * @base_color - base color code
+ * @brightness - default brightness setting (on/off)
+ * @name - LED name
+**/
+struct mlxcpld_led_profile {
+ u8 offset;
+ u8 mask;
+ u8 base_color;
+ enum led_brightness brightness;
+ const char *name;
+};
+
+/**
+ * mlxcpld_led_pdata - system LED private data
+ * @pdev - platform device pointer
+ * @pled - LED class device instance
+ * @profile - system configuration profile
+ * @num_led_instances - number of LED instances
+ * @lock - device access lock
+**/
+struct mlxcpld_led_pdata {
+ struct platform_device *pdev;
+ struct mlxcpld_led_priv *pled;
+ struct mlxcpld_led_profile *profile;
+ int num_led_instances;
+ spinlock_t lock;
+};
+
+static struct mlxcpld_led_pdata *mlxcpld_led;
+
+/* Default profile fit the next Mellanox systems:
+ * "msx6710", "msx6720", "msb7700", "msn2700", "msx1410",
+ * "msn2410", "msb7800", "msn2740"
+ */
+static struct mlxcpld_led_profile mlxcpld_led_default_profile[] = {
+ {
+ 0x21, 0xf0, MLXCPLD_LED_GREEN_STATIC_ON, 1,
+ "mlxcpld:fan1:green",
+ },
+ {
+ 0x21, 0xf0, MLXCPLD_LED_RED_STATIC_ON, LED_OFF,
+ "mlxcpld:fan1:red",
+ },
+ {
+ 0x21, 0x0f, MLXCPLD_LED_GREEN_STATIC_ON, 1,
+ "mlxcpld:fan2:green",
+ },
+ {
+ 0x21, 0x0f, MLXCPLD_LED_RED_STATIC_ON, LED_OFF,
+ "mlxcpld:fan2:red",
+ },
+ {
+ 0x22, 0xf0, MLXCPLD_LED_GREEN_STATIC_ON, 1,
+ "mlxcpld:fan3:green",
+ },
+ {
+ 0x22, 0xf0, MLXCPLD_LED_RED_STATIC_ON, LED_OFF,
+ "mlxcpld:fan3:red",
+ },
+ {
+ 0x22, 0x0f, MLXCPLD_LED_GREEN_STATIC_ON, 1,
+ "mlxcpld:fan4:green",
+ },
+ {
+ 0x22, 0x0f, MLXCPLD_LED_RED_STATIC_ON, LED_OFF,
+ "mlxcpld:fan4:red",
+ },
+ {
+ 0x20, 0x0f, MLXCPLD_LED_GREEN_STATIC_ON, 1,
+ "mlxcpld:psu:green",
+ },
+ {
+ 0x20, 0x0f, MLXCPLD_LED_RED_STATIC_ON, LED_OFF,
+ "mlxcpld:psu:red",
+ },
+ {
+ 0x20, 0xf0, MLXCPLD_LED_GREEN_STATIC_ON, 1,
+ "mlxcpld:status:green",
+ },
+ {
+ 0x20, 0xf0, MLXCPLD_LED_RED_STATIC_ON, LED_OFF,
+ "mlxcpld:status:red",
+ },
+};
+
+/* Profile fit the Mellanox systems based on "msn2100" */
+static struct mlxcpld_led_profile mlxcpld_led_msn2100_profile[] = {
+ {
+ 0x21, 0xf0, MLXCPLD_LED_GREEN_STATIC_ON, 1,
+ "mlxcpld:fan:green",
+ },
+ {
+ 0x21, 0xf0, MLXCPLD_LED_RED_STATIC_ON, LED_OFF,
+ "mlxcpld:fan:red",
+ },
+ {
+ 0x23, 0xf0, MLXCPLD_LED_GREEN_STATIC_ON, 1,
+ "mlxcpld:psu1:green",
+ },
+ {
+ 0x23, 0xf0, MLXCPLD_LED_RED_STATIC_ON, LED_OFF,
+ "mlxcpld:psu1:red",
+ },
+ {
+ 0x23, 0x0f, MLXCPLD_LED_GREEN_STATIC_ON, 1,
+ "mlxcpld:psu2:green",
+ },
+ {
+ 0x23, 0x0f, MLXCPLD_LED_RED_STATIC_ON, LED_OFF,
+ "mlxcpld:psu2:red",
+ },
+ {
+ 0x20, 0xf0, MLXCPLD_LED_GREEN_STATIC_ON, 1,
+ "mlxcpld:status:green",
+ },
+ {
+ 0x20, 0xf0, MLXCPLD_LED_RED_STATIC_ON, LED_OFF,
+ "mlxcpld:status:red",
+ },
+ {
+ 0x24, 0xf0, MLXCPLD_LED_GREEN_STATIC_ON, LED_OFF,
+ "mlxcpld:uid:blue",
+ },
+};
+
+enum mlxcpld_led_platform_types {
+ MLXCPLD_LED_PLATFORM_DEFAULT,
+ MLXCPLD_LED_PLATFORM_MSN2100,
+};
+
+static const char *mlx_product_names[] = {
+ "DEFAULT",
+ "MSN2100",
+};
+
+static enum
+mlxcpld_led_platform_types mlxcpld_led_platform_check_sys_type(void)
+{
+ const char *mlx_product_name;
+ int i;
+
+ mlx_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (!mlx_product_name)
+ return MLXCPLD_LED_PLATFORM_DEFAULT;
+
+ for (i = 1; i < ARRAY_SIZE(mlx_product_names); i++) {
+ if (strstr(mlx_product_name, mlx_product_names[i]))
+ return i;
+ }
+
+ return MLXCPLD_LED_PLATFORM_DEFAULT;
+}
+
+static void mlxcpld_led_bus_access_func(u16 base, u8 offset, u8 rw_flag,
+ u8 *data)
+{
+ u32 addr = base + offset;
+
+ if (rw_flag == 0)
+ outb(*data, addr);
+ else
+ *data = inb(addr);
+}
+
+static void mlxcpld_led_store_hw(u8 mask, u8 off, u8 vset)
+{
+ u8 nib, val;
+
+ /*
+ * Each LED is controlled through low or high nibble of the relevant
+ * CPLD register. Register offset is specified by off parameter.
+ * Parameter vset provides color code: 0x0 for off, 0x5 for solid red,
+ * 0x6 for 3Hz blink red, 0xd for solid green, 0xe for 3Hz blink
+ * green.
+ * Parameter mask specifies which nibble is used for specific LED: mask
+ * 0xf0 - lower nibble is to be used (bits from 0 to 3), mask 0x0f -
+ * higher nibble (bits from 4 to 7).
+ */
+ spin_lock(&mlxcpld_led->lock);
+ mlxcpld_led_bus_access_func(MLXPLAT_CPLD_LPC_REG_BASE_ADRR, off, 1,
+ &val);
+ nib = (mask == 0xf0) ? vset : (vset << 4);
+ val = (val & mask) | nib;
+ mlxcpld_led_bus_access_func(MLXPLAT_CPLD_LPC_REG_BASE_ADRR, off, 0,
+ &val);
+ spin_unlock(&mlxcpld_led->lock);
+}
+
+static void mlxcpld_led_brightness_set(struct led_classdev *led,
+ enum led_brightness value)
+{
+ struct mlxcpld_led_priv *pled = cdev_to_priv(led);
+
+ if (value) {
+ mlxcpld_led_store_hw(pled->param.mask, pled->param.offset,
+ pled->param.base_color);
+ return;
+ }
+
+ mlxcpld_led_store_hw(pled->param.mask, pled->param.offset,
+ MLXCPLD_LED_IS_OFF);
+}
+
+static int mlxcpld_led_blink_set(struct led_classdev *led,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mlxcpld_led_priv *pled = cdev_to_priv(led);
+
+ /*
+ * HW supports two types of blinking: full (6Hz) and half (3Hz).
+ * For delay on/off zero default setting 3Hz is used.
+ */
+ if (!(*delay_on == 0 && *delay_off == 0) &&
+ !(*delay_on == MLXCPLD_LED_BLINK_3HZ &&
+ *delay_off == MLXCPLD_LED_BLINK_3HZ) &&
+ !(*delay_on == MLXCPLD_LED_BLINK_6HZ &&
+ *delay_off == MLXCPLD_LED_BLINK_6HZ))
+ return -EINVAL;
+
+ if (*delay_on == MLXCPLD_LED_BLINK_6HZ)
+ mlxcpld_led_store_hw(pled->param.mask, pled->param.offset,
+ pled->param.base_color +
+ MLXCPLD_LED_OFFSET_FULL);
+ else
+ mlxcpld_led_store_hw(pled->param.mask, pled->param.offset,
+ pled->param.base_color +
+ MLXCPLD_LED_OFFSET_HALF);
+
+ return 0;
+}
+
+static int mlxcpld_led_config(struct device *dev,
+ struct mlxcpld_led_pdata *cpld)
+{
+ int i;
+ int err;
+
+ cpld->pled = devm_kzalloc(dev, sizeof(struct mlxcpld_led_priv) *
+ cpld->num_led_instances, GFP_KERNEL);
+ if (!cpld->pled)
+ return -ENOMEM;
+
+ for (i = 0; i < cpld->num_led_instances; i++) {
+ cpld->pled[i].cdev.name = cpld->profile[i].name;
+ cpld->pled[i].cdev.brightness = cpld->profile[i].brightness;
+ cpld->pled[i].cdev.max_brightness = 1;
+ cpld->pled[i].cdev.brightness_set = mlxcpld_led_brightness_set;
+ cpld->pled[i].cdev.blink_set = mlxcpld_led_blink_set;
+ cpld->pled[i].cdev.flags = LED_CORE_SUSPENDRESUME;
+ err = devm_led_classdev_register(dev, &cpld->pled[i].cdev);
+ if (err)
+ return err;
+
+ cpld->pled[i].param.offset = mlxcpld_led->profile[i].offset;
+ cpld->pled[i].param.mask = mlxcpld_led->profile[i].mask;
+ cpld->pled[i].param.base_color =
+ mlxcpld_led->profile[i].base_color;
+
+ if (mlxcpld_led->profile[i].brightness)
+ mlxcpld_led_brightness_set(&cpld->pled[i].cdev,
+ mlxcpld_led->profile[i].brightness);
+ }
+
+ return 0;
+}
+
+static int __init mlxcpld_led_probe(struct platform_device *pdev)
+{
+ enum mlxcpld_led_platform_types mlxcpld_led_plat =
+ mlxcpld_led_platform_check_sys_type();
+
+ mlxcpld_led = devm_kzalloc(&pdev->dev, sizeof(*mlxcpld_led),
+ GFP_KERNEL);
+ if (!mlxcpld_led)
+ return -ENOMEM;
+
+ mlxcpld_led->pdev = pdev;
+
+ switch (mlxcpld_led_plat) {
+ case MLXCPLD_LED_PLATFORM_MSN2100:
+ mlxcpld_led->profile = mlxcpld_led_msn2100_profile;
+ mlxcpld_led->num_led_instances =
+ ARRAY_SIZE(mlxcpld_led_msn2100_profile);
+ break;
+
+ default:
+ mlxcpld_led->profile = mlxcpld_led_default_profile;
+ mlxcpld_led->num_led_instances =
+ ARRAY_SIZE(mlxcpld_led_default_profile);
+ break;
+ }
+
+ spin_lock_init(&mlxcpld_led->lock);
+
+ return mlxcpld_led_config(&pdev->dev, mlxcpld_led);
+}
+
+static struct platform_driver mlxcpld_led_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+static int __init mlxcpld_led_init(void)
+{
+ struct platform_device *pdev;
+ int err;
+
+ pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ pr_err("Device allocation failed\n");
+ return PTR_ERR(pdev);
+ }
+
+ err = platform_driver_probe(&mlxcpld_led_driver, mlxcpld_led_probe);
+ if (err) {
+ pr_err("Probe platform driver failed\n");
+ platform_device_unregister(pdev);
+ }
+
+ return err;
+}
+
+static void __exit mlxcpld_led_exit(void)
+{
+ platform_device_unregister(mlxcpld_led->pdev);
+ platform_driver_unregister(&mlxcpld_led_driver);
+}
+
+module_init(mlxcpld_led_init);
+module_exit(mlxcpld_led_exit);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox board LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:leds_mlxcpld");
diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c
new file mode 100644
index 000000000000..a52674327857
--- /dev/null
+++ b/drivers/leds/leds-pm8058.c
@@ -0,0 +1,191 @@
+/* Copyright (c) 2010, 2011, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+#define PM8058_LED_TYPE_COMMON 0x00
+#define PM8058_LED_TYPE_KEYPAD 0x01
+#define PM8058_LED_TYPE_FLASH 0x02
+
+#define PM8058_LED_TYPE_COMMON_MASK 0xf8
+#define PM8058_LED_TYPE_KEYPAD_MASK 0xf0
+#define PM8058_LED_TYPE_COMMON_SHIFT 3
+#define PM8058_LED_TYPE_KEYPAD_SHIFT 4
+
+struct pm8058_led {
+ struct regmap *map;
+ u32 reg;
+ u32 ledtype;
+ struct led_classdev cdev;
+};
+
+static void pm8058_led_set(struct led_classdev *cled,
+ enum led_brightness value)
+{
+ struct pm8058_led *led;
+ int ret = 0;
+ unsigned int mask = 0;
+ unsigned int val = 0;
+
+ led = container_of(cled, struct pm8058_led, cdev);
+ switch (led->ledtype) {
+ case PM8058_LED_TYPE_COMMON:
+ mask = PM8058_LED_TYPE_COMMON_MASK;
+ val = value << PM8058_LED_TYPE_COMMON_SHIFT;
+ break;
+ case PM8058_LED_TYPE_KEYPAD:
+ case PM8058_LED_TYPE_FLASH:
+ mask = PM8058_LED_TYPE_KEYPAD_MASK;
+ val = value << PM8058_LED_TYPE_KEYPAD_SHIFT;
+ break;
+ default:
+ break;
+ }
+
+ ret = regmap_update_bits(led->map, led->reg, mask, val);
+ if (ret)
+ pr_err("Failed to set LED brightness\n");
+}
+
+static enum led_brightness pm8058_led_get(struct led_classdev *cled)
+{
+ struct pm8058_led *led;
+ int ret;
+ unsigned int val;
+
+ led = container_of(cled, struct pm8058_led, cdev);
+
+ ret = regmap_read(led->map, led->reg, &val);
+ if (ret) {
+ pr_err("Failed to get LED brightness\n");
+ return LED_OFF;
+ }
+
+ switch (led->ledtype) {
+ case PM8058_LED_TYPE_COMMON:
+ val &= PM8058_LED_TYPE_COMMON_MASK;
+ val >>= PM8058_LED_TYPE_COMMON_SHIFT;
+ break;
+ case PM8058_LED_TYPE_KEYPAD:
+ case PM8058_LED_TYPE_FLASH:
+ val &= PM8058_LED_TYPE_KEYPAD_MASK;
+ val >>= PM8058_LED_TYPE_KEYPAD_SHIFT;
+ break;
+ default:
+ val = LED_OFF;
+ break;
+ }
+
+ return val;
+}
+
+static int pm8058_led_probe(struct platform_device *pdev)
+{
+ struct pm8058_led *led;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ struct regmap *map;
+ const char *state;
+ enum led_brightness maxbright;
+
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->ledtype = (u32)of_device_get_match_data(&pdev->dev);
+
+ map = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!map) {
+ dev_err(&pdev->dev, "Parent regmap unavailable.\n");
+ return -ENXIO;
+ }
+ led->map = map;
+
+ ret = of_property_read_u32(np, "reg", &led->reg);
+ if (ret) {
+ dev_err(&pdev->dev, "no register offset specified\n");
+ return -EINVAL;
+ }
+
+ /* Use label else node name */
+ led->cdev.name = of_get_property(np, "label", NULL) ? : np->name;
+ led->cdev.default_trigger =
+ of_get_property(np, "linux,default-trigger", NULL);
+ led->cdev.brightness_set = pm8058_led_set;
+ led->cdev.brightness_get = pm8058_led_get;
+ if (led->ledtype == PM8058_LED_TYPE_COMMON)
+ maxbright = 31; /* 5 bits */
+ else
+ maxbright = 15; /* 4 bits */
+ led->cdev.max_brightness = maxbright;
+
+ state = of_get_property(np, "default-state", NULL);
+ if (state) {
+ if (!strcmp(state, "keep")) {
+ led->cdev.brightness = pm8058_led_get(&led->cdev);
+ } else if (!strcmp(state, "on")) {
+ led->cdev.brightness = maxbright;
+ pm8058_led_set(&led->cdev, maxbright);
+ } else {
+ led->cdev.brightness = LED_OFF;
+ pm8058_led_set(&led->cdev, LED_OFF);
+ }
+ }
+
+ if (led->ledtype == PM8058_LED_TYPE_KEYPAD ||
+ led->ledtype == PM8058_LED_TYPE_FLASH)
+ led->cdev.flags = LED_CORE_SUSPENDRESUME;
+
+ ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register led \"%s\"\n",
+ led->cdev.name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id pm8058_leds_id_table[] = {
+ {
+ .compatible = "qcom,pm8058-led",
+ .data = (void *)PM8058_LED_TYPE_COMMON
+ },
+ {
+ .compatible = "qcom,pm8058-keypad-led",
+ .data = (void *)PM8058_LED_TYPE_KEYPAD
+ },
+ {
+ .compatible = "qcom,pm8058-flash-led",
+ .data = (void *)PM8058_LED_TYPE_FLASH
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pm8058_leds_id_table);
+
+static struct platform_driver pm8058_led_driver = {
+ .probe = pm8058_led_probe,
+ .driver = {
+ .name = "pm8058-leds",
+ .of_match_table = pm8058_leds_id_table,
+ },
+};
+module_platform_driver(pm8058_led_driver);
+
+MODULE_DESCRIPTION("PM8058 LEDs driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:pm8058-leds");
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 61c68a1f054a..2f5d5f4a4c75 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -4,7 +4,7 @@
menuconfig NVM
bool "Open-Channel SSD target support"
- depends on BLOCK
+ depends on BLOCK && HAS_DMA
help
Say Y here to get to enable Open-channel SSDs.
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index a7a0a22cf1a5..1f6b6521016a 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,6 @@
# Makefile for Open-Channel SSDs.
#
-obj-$(CONFIG_NVM) := core.o sysblk.o
+obj-$(CONFIG_NVM) := core.o sysblk.o sysfs.o
obj-$(CONFIG_NVM_GENNVM) += gennvm.o
obj-$(CONFIG_NVM_RRPC) += rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 9ebd2cfbd849..1cac0f8bc0dc 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -27,6 +27,8 @@
#include <linux/lightnvm.h>
#include <linux/sched/sysctl.h>
+#include "lightnvm.h"
+
static LIST_HEAD(nvm_tgt_types);
static DECLARE_RWSEM(nvm_tgtt_lock);
static LIST_HEAD(nvm_mgrs);
@@ -581,6 +583,8 @@ static int nvm_core_init(struct nvm_dev *dev)
mutex_init(&dev->mlock);
spin_lock_init(&dev->lock);
+ blk_queue_logical_block_size(dev->q, dev->sec_size);
+
return 0;
err_fmtype:
kfree(dev->lun_map);
@@ -596,15 +600,19 @@ static void nvm_free_mgr(struct nvm_dev *dev)
dev->mt = NULL;
}
-static void nvm_free(struct nvm_dev *dev)
+void nvm_free(struct nvm_dev *dev)
{
if (!dev)
return;
nvm_free_mgr(dev);
+ if (dev->dma_pool)
+ dev->ops->destroy_dma_pool(dev->dma_pool);
+
kfree(dev->lptbl);
kfree(dev->lun_map);
+ kfree(dev);
}
static int nvm_init(struct nvm_dev *dev)
@@ -651,30 +659,19 @@ err:
static void nvm_exit(struct nvm_dev *dev)
{
- if (dev->dma_pool)
- dev->ops->destroy_dma_pool(dev->dma_pool);
- nvm_free(dev);
+ nvm_sysfs_unregister_dev(dev);
+}
- pr_info("nvm: successfully unloaded\n");
+struct nvm_dev *nvm_alloc_dev(int node)
+{
+ return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
}
+EXPORT_SYMBOL(nvm_alloc_dev);
-int nvm_register(struct request_queue *q, char *disk_name,
- struct nvm_dev_ops *ops)
+int nvm_register(struct nvm_dev *dev)
{
- struct nvm_dev *dev;
int ret;
- if (!ops->identity)
- return -EINVAL;
-
- dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->q = q;
- dev->ops = ops;
- strncpy(dev->name, disk_name, DISK_NAME_LEN);
-
ret = nvm_init(dev);
if (ret)
goto err_init;
@@ -694,6 +691,10 @@ int nvm_register(struct request_queue *q, char *disk_name,
}
}
+ ret = nvm_sysfs_register_dev(dev);
+ if (ret)
+ goto err_ppalist;
+
if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
ret = nvm_get_sysblock(dev, &dev->sb);
if (!ret)
@@ -710,31 +711,21 @@ int nvm_register(struct request_queue *q, char *disk_name,
up_write(&nvm_lock);
return 0;
+err_ppalist:
+ dev->ops->destroy_dma_pool(dev->dma_pool);
err_init:
kfree(dev->lun_map);
- kfree(dev);
return ret;
}
EXPORT_SYMBOL(nvm_register);
-void nvm_unregister(char *disk_name)
+void nvm_unregister(struct nvm_dev *dev)
{
- struct nvm_dev *dev;
-
down_write(&nvm_lock);
- dev = nvm_find_nvm_dev(disk_name);
- if (!dev) {
- pr_err("nvm: could not find device %s to unregister\n",
- disk_name);
- up_write(&nvm_lock);
- return;
- }
-
list_del(&dev->devices);
up_write(&nvm_lock);
nvm_exit(dev);
- kfree(dev);
}
EXPORT_SYMBOL(nvm_unregister);
@@ -1171,27 +1162,10 @@ static struct miscdevice _nvm_misc = {
.nodename = "lightnvm/control",
.fops = &_ctl_fops,
};
+module_misc_device(_nvm_misc);
MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
-static int __init nvm_mod_init(void)
-{
- int ret;
-
- ret = misc_register(&_nvm_misc);
- if (ret)
- pr_err("nvm: misc_register failed for control device");
-
- return ret;
-}
-
-static void __exit nvm_mod_exit(void)
-{
- misc_deregister(&_nvm_misc);
-}
-
MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.1");
-module_init(nvm_mod_init);
-module_exit(nvm_mod_exit);
diff --git a/drivers/lightnvm/lightnvm.h b/drivers/lightnvm/lightnvm.h
new file mode 100644
index 000000000000..305c181509a6
--- /dev/null
+++ b/drivers/lightnvm/lightnvm.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 CNEX Labs. All rights reserved.
+ * Initial release: Matias Bjorling <matias@cnexlabs.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#ifndef LIGHTNVM_H
+#define LIGHTNVM_H
+
+#include <linux/lightnvm.h>
+
+/* core -> sysfs.c */
+int __must_check nvm_sysfs_register_dev(struct nvm_dev *);
+void nvm_sysfs_unregister_dev(struct nvm_dev *);
+int nvm_sysfs_register(void);
+void nvm_sysfs_unregister(void);
+
+/* sysfs > core */
+void nvm_free(struct nvm_dev *);
+
+#endif
diff --git a/drivers/lightnvm/sysfs.c b/drivers/lightnvm/sysfs.c
new file mode 100644
index 000000000000..0338c27ab95a
--- /dev/null
+++ b/drivers/lightnvm/sysfs.c
@@ -0,0 +1,198 @@
+#include <linux/kernel.h>
+#include <linux/lightnvm.h>
+#include <linux/miscdevice.h>
+#include <linux/kobject.h>
+#include <linux/blk-mq.h>
+
+#include "lightnvm.h"
+
+static ssize_t nvm_dev_attr_show(struct device *dev,
+ struct device_attribute *dattr, char *page)
+{
+ struct nvm_dev *ndev = container_of(dev, struct nvm_dev, dev);
+ struct nvm_id *id = &ndev->identity;
+ struct nvm_id_group *grp = &id->groups[0];
+ struct attribute *attr = &dattr->attr;
+
+ if (strcmp(attr->name, "version") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
+ } else if (strcmp(attr->name, "vendor_opcode") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
+ } else if (strcmp(attr->name, "capabilities") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
+ } else if (strcmp(attr->name, "device_mode") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
+ } else if (strcmp(attr->name, "media_manager") == 0) {
+ if (!ndev->mt)
+ return scnprintf(page, PAGE_SIZE, "%s\n", "none");
+ return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
+ } else if (strcmp(attr->name, "ppa_format") == 0) {
+ return scnprintf(page, PAGE_SIZE,
+ "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ id->ppaf.ch_offset, id->ppaf.ch_len,
+ id->ppaf.lun_offset, id->ppaf.lun_len,
+ id->ppaf.pln_offset, id->ppaf.pln_len,
+ id->ppaf.blk_offset, id->ppaf.blk_len,
+ id->ppaf.pg_offset, id->ppaf.pg_len,
+ id->ppaf.sect_offset, id->ppaf.sect_len);
+ } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
+ } else if (strcmp(attr->name, "flash_media_type") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
+ } else if (strcmp(attr->name, "num_channels") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
+ } else if (strcmp(attr->name, "num_luns") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
+ } else if (strcmp(attr->name, "num_planes") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
+ } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+ } else if (strcmp(attr->name, "num_pages") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
+ } else if (strcmp(attr->name, "page_size") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
+ } else if (strcmp(attr->name, "hw_sector_size") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
+ } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
+ } else if (strcmp(attr->name, "read_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
+ } else if (strcmp(attr->name, "read_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
+ } else if (strcmp(attr->name, "prog_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
+ } else if (strcmp(attr->name, "prog_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
+ } else if (strcmp(attr->name, "erase_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
+ } else if (strcmp(attr->name, "erase_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
+ } else if (strcmp(attr->name, "multiplane_modes") == 0) {
+ return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
+ } else if (strcmp(attr->name, "media_capabilities") == 0) {
+ return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
+ } else if (strcmp(attr->name, "max_phys_secs") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n",
+ ndev->ops->max_phys_sect);
+ } else {
+ return scnprintf(page,
+ PAGE_SIZE,
+ "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
+ attr->name);
+ }
+}
+
+#define NVM_DEV_ATTR_RO(_name) \
+ DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
+
+static NVM_DEV_ATTR_RO(version);
+static NVM_DEV_ATTR_RO(vendor_opcode);
+static NVM_DEV_ATTR_RO(capabilities);
+static NVM_DEV_ATTR_RO(device_mode);
+static NVM_DEV_ATTR_RO(ppa_format);
+static NVM_DEV_ATTR_RO(media_manager);
+
+static NVM_DEV_ATTR_RO(media_type);
+static NVM_DEV_ATTR_RO(flash_media_type);
+static NVM_DEV_ATTR_RO(num_channels);
+static NVM_DEV_ATTR_RO(num_luns);
+static NVM_DEV_ATTR_RO(num_planes);
+static NVM_DEV_ATTR_RO(num_blocks);
+static NVM_DEV_ATTR_RO(num_pages);
+static NVM_DEV_ATTR_RO(page_size);
+static NVM_DEV_ATTR_RO(hw_sector_size);
+static NVM_DEV_ATTR_RO(oob_sector_size);
+static NVM_DEV_ATTR_RO(read_typ);
+static NVM_DEV_ATTR_RO(read_max);
+static NVM_DEV_ATTR_RO(prog_typ);
+static NVM_DEV_ATTR_RO(prog_max);
+static NVM_DEV_ATTR_RO(erase_typ);
+static NVM_DEV_ATTR_RO(erase_max);
+static NVM_DEV_ATTR_RO(multiplane_modes);
+static NVM_DEV_ATTR_RO(media_capabilities);
+static NVM_DEV_ATTR_RO(max_phys_secs);
+
+#define NVM_DEV_ATTR(_name) (dev_attr_##_name##)
+
+static struct attribute *nvm_dev_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_vendor_opcode.attr,
+ &dev_attr_capabilities.attr,
+ &dev_attr_device_mode.attr,
+ &dev_attr_media_manager.attr,
+
+ &dev_attr_ppa_format.attr,
+ &dev_attr_media_type.attr,
+ &dev_attr_flash_media_type.attr,
+ &dev_attr_num_channels.attr,
+ &dev_attr_num_luns.attr,
+ &dev_attr_num_planes.attr,
+ &dev_attr_num_blocks.attr,
+ &dev_attr_num_pages.attr,
+ &dev_attr_page_size.attr,
+ &dev_attr_hw_sector_size.attr,
+ &dev_attr_oob_sector_size.attr,
+ &dev_attr_read_typ.attr,
+ &dev_attr_read_max.attr,
+ &dev_attr_prog_typ.attr,
+ &dev_attr_prog_max.attr,
+ &dev_attr_erase_typ.attr,
+ &dev_attr_erase_max.attr,
+ &dev_attr_multiplane_modes.attr,
+ &dev_attr_media_capabilities.attr,
+ &dev_attr_max_phys_secs.attr,
+ NULL,
+};
+
+static struct attribute_group nvm_dev_attr_group = {
+ .name = "lightnvm",
+ .attrs = nvm_dev_attrs,
+};
+
+static const struct attribute_group *nvm_dev_attr_groups[] = {
+ &nvm_dev_attr_group,
+ NULL,
+};
+
+static void nvm_dev_release(struct device *device)
+{
+ struct nvm_dev *dev = container_of(device, struct nvm_dev, dev);
+ struct request_queue *q = dev->q;
+
+ pr_debug("nvm/sysfs: `nvm_dev_release`\n");
+
+ blk_mq_unregister_dev(device, q);
+
+ nvm_free(dev);
+}
+
+static struct device_type nvm_type = {
+ .name = "lightnvm",
+ .groups = nvm_dev_attr_groups,
+ .release = nvm_dev_release,
+};
+
+int nvm_sysfs_register_dev(struct nvm_dev *dev)
+{
+ int ret;
+
+ if (!dev->parent_dev)
+ return 0;
+
+ dev->dev.parent = dev->parent_dev;
+ dev_set_name(&dev->dev, "%s", dev->name);
+ dev->dev.type = &nvm_type;
+ device_initialize(&dev->dev);
+ ret = device_add(&dev->dev);
+
+ if (!ret)
+ blk_mq_register_dev(&dev->dev, dev->q);
+
+ return ret;
+}
+
+void nvm_sysfs_unregister_dev(struct nvm_dev *dev)
+{
+ if (dev && dev->parent_dev)
+ kobject_put(&dev->dev.kobj);
+}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index b6819f0fc608..3f041b187033 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -236,7 +236,7 @@ static void macio_create_fixup_irq(struct macio_dev *dev, int index,
unsigned int irq;
irq = irq_create_mapping(NULL, line);
- if (irq != NO_IRQ) {
+ if (!irq) {
dev->interrupt[index].start = irq;
dev->interrupt[index].flags = IORESOURCE_IRQ;
dev->interrupt[index].name = dev_name(&dev->ofdev.dev);
@@ -299,7 +299,7 @@ static void macio_setup_interrupts(struct macio_dev *dev)
break;
res = &dev->interrupt[j];
irq = irq_of_parse_and_map(np, i++);
- if (irq == NO_IRQ)
+ if (!irq)
break;
res->start = irq;
res->flags = IORESOURCE_IRQ;
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 465c52219639..775527135b93 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -427,7 +427,7 @@ static int rackmeter_probe(struct macio_dev* mdev,
rm->irq = macio_irq(mdev, 1);
#else
rm->irq = irq_of_parse_and_map(i2s, 1);
- if (rm->irq == NO_IRQ ||
+ if (!rm->irq ||
of_address_to_resource(i2s, 0, &ri2s) ||
of_address_to_resource(i2s, 1, &rdma)) {
printk(KERN_ERR
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index d6f72c826c1c..08edb2c25b60 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -279,7 +279,7 @@ int smu_queue_cmd(struct smu_cmd *cmd)
spin_unlock_irqrestore(&smu->lock, flags);
/* Workaround for early calls when irq isn't available */
- if (!smu_irq_inited || smu->db_irq == NO_IRQ)
+ if (!smu_irq_inited || !smu->db_irq)
smu_spinwait_cmd(cmd);
return 0;
@@ -498,8 +498,8 @@ int __init smu_init (void)
INIT_LIST_HEAD(&smu->cmd_list);
INIT_LIST_HEAD(&smu->cmd_i2c_list);
smu->of_node = np;
- smu->db_irq = NO_IRQ;
- smu->msg_irq = NO_IRQ;
+ smu->db_irq = 0;
+ smu->msg_irq = 0;
/* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
* 32 bits value safely
@@ -587,13 +587,13 @@ static int smu_late_init(void)
if (smu->db_node) {
smu->db_irq = irq_of_parse_and_map(smu->db_node, 0);
- if (smu->db_irq == NO_IRQ)
+ if (!smu->db_irq)
printk(KERN_ERR "smu: failed to map irq for node %s\n",
smu->db_node->full_name);
}
if (smu->msg_node) {
smu->msg_irq = irq_of_parse_and_map(smu->msg_node, 0);
- if (smu->msg_irq == NO_IRQ)
+ if (!smu->msg_irq)
printk(KERN_ERR "smu: failed to map irq for node %s\n",
smu->msg_node->full_name);
}
@@ -602,23 +602,23 @@ static int smu_late_init(void)
* Try to request the interrupts
*/
- if (smu->db_irq != NO_IRQ) {
+ if (smu->db_irq) {
if (request_irq(smu->db_irq, smu_db_intr,
IRQF_SHARED, "SMU doorbell", smu) < 0) {
printk(KERN_WARNING "SMU: can't "
"request interrupt %d\n",
smu->db_irq);
- smu->db_irq = NO_IRQ;
+ smu->db_irq = 0;
}
}
- if (smu->msg_irq != NO_IRQ) {
+ if (smu->msg_irq) {
if (request_irq(smu->msg_irq, smu_msg_intr,
IRQF_SHARED, "SMU message", smu) < 0) {
printk(KERN_WARNING "SMU: can't "
"request interrupt %d\n",
smu->msg_irq);
- smu->msg_irq = NO_IRQ;
+ smu->msg_irq = 0;
}
}
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index bad18130f125..2088e23a8002 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -209,7 +209,7 @@ static int __init via_cuda_start(void)
cuda_irq = IRQ_MAC_ADB;
#else
cuda_irq = irq_of_parse_and_map(vias, 0);
- if (cuda_irq == NO_IRQ) {
+ if (!cuda_irq) {
printk(KERN_ERR "via-cuda: can't map interrupts for %s\n",
vias->full_name);
return -ENODEV;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index f8b6d1403c16..91081dcdc272 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -145,7 +145,7 @@ static int pmu_fully_inited;
static int pmu_has_adb;
static struct device_node *gpio_node;
static unsigned char __iomem *gpio_reg;
-static int gpio_irq = NO_IRQ;
+static int gpio_irq = 0;
static int gpio_irq_enabled = -1;
static volatile int pmu_suspended;
static spinlock_t pmu_lock;
@@ -402,7 +402,7 @@ static int __init via_pmu_start(void)
batt_req.complete = 1;
irq = irq_of_parse_and_map(vias, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
printk(KERN_ERR "via-pmu: can't map interrupt\n");
return -ENODEV;
}
@@ -424,7 +424,7 @@ static int __init via_pmu_start(void)
if (gpio_node)
gpio_irq = irq_of_parse_and_map(gpio_node, 0);
- if (gpio_irq != NO_IRQ) {
+ if (gpio_irq) {
if (request_irq(gpio_irq, gpio1_interrupt,
IRQF_NO_SUSPEND, "GPIO1 ADB",
(void *)0))
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 7817d40d81e7..11eebfe8a4cb 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -15,6 +15,16 @@ config ARM_MHU
The controller has 3 mailbox channels, the last of which can be
used in Secure mode only.
+config PLATFORM_MHU
+ tristate "Platform MHU Mailbox"
+ depends on OF
+ depends on HAS_IOMEM
+ help
+ Say Y here if you want to build a platform specific variant MHU
+ controller driver.
+ The controller has a maximum of 3 mailbox channels, the last of
+ which can be used in Secure mode only.
+
config PL320_MBOX
bool "ARM PL320 Mailbox"
depends on ARM_AMBA
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 66c38e300dfc..ace6fed8fea9 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -6,6 +6,8 @@ obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o
obj-$(CONFIG_ARM_MHU) += arm_mhu.o
+obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o
+
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
diff --git a/drivers/mailbox/platform_mhu.c b/drivers/mailbox/platform_mhu.c
new file mode 100644
index 000000000000..e13201a5cec6
--- /dev/null
+++ b/drivers/mailbox/platform_mhu.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2016 BayLibre SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Synchronised with arm_mhu.c from :
+ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Jassi Brar <jaswinder.singh@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+
+#define INTR_SET_OFS 0x0
+#define INTR_STAT_OFS 0x4
+#define INTR_CLR_OFS 0x8
+
+#define MHU_SEC_OFFSET 0x0
+#define MHU_LP_OFFSET 0xc
+#define MHU_HP_OFFSET 0x18
+#define TX_REG_OFFSET 0x24
+
+#define MHU_CHANS 3
+
+struct platform_mhu_link {
+ int irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+};
+
+struct platform_mhu {
+ void __iomem *base;
+ struct platform_mhu_link mlink[MHU_CHANS];
+ struct mbox_chan chan[MHU_CHANS];
+ struct mbox_controller mbox;
+};
+
+static irqreturn_t platform_mhu_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 val;
+
+ val = readl_relaxed(mlink->rx_reg + INTR_STAT_OFS);
+ if (!val)
+ return IRQ_NONE;
+
+ mbox_chan_received_data(chan, (void *)&val);
+
+ writel_relaxed(val, mlink->rx_reg + INTR_CLR_OFS);
+
+ return IRQ_HANDLED;
+}
+
+static bool platform_mhu_last_tx_done(struct mbox_chan *chan)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+
+ return (val == 0);
+}
+
+static int platform_mhu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 *arg = data;
+
+ writel_relaxed(*arg, mlink->tx_reg + INTR_SET_OFS);
+
+ return 0;
+}
+
+static int platform_mhu_startup(struct mbox_chan *chan)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+ writel_relaxed(val, mlink->tx_reg + INTR_CLR_OFS);
+
+ ret = request_irq(mlink->irq, platform_mhu_rx_interrupt,
+ IRQF_SHARED, "platform_mhu_link", chan);
+ if (ret) {
+ dev_err(chan->mbox->dev,
+ "Unable to acquire IRQ %d\n", mlink->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void platform_mhu_shutdown(struct mbox_chan *chan)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+
+ free_irq(mlink->irq, chan);
+}
+
+static const struct mbox_chan_ops platform_mhu_ops = {
+ .send_data = platform_mhu_send_data,
+ .startup = platform_mhu_startup,
+ .shutdown = platform_mhu_shutdown,
+ .last_tx_done = platform_mhu_last_tx_done,
+};
+
+static int platform_mhu_probe(struct platform_device *pdev)
+{
+ int i, err;
+ struct platform_mhu *mhu;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int platform_mhu_reg[MHU_CHANS] = {
+ MHU_SEC_OFFSET, MHU_LP_OFFSET, MHU_HP_OFFSET
+ };
+
+ /* Allocate memory for device */
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mhu->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mhu->base)) {
+ dev_err(dev, "ioremap failed\n");
+ return PTR_ERR(mhu->base);
+ }
+
+ for (i = 0; i < MHU_CHANS; i++) {
+ mhu->chan[i].con_priv = &mhu->mlink[i];
+ mhu->mlink[i].irq = platform_get_irq(pdev, i);
+ if (mhu->mlink[i].irq < 0) {
+ dev_err(dev, "failed to get irq%d\n", i);
+ return mhu->mlink[i].irq;
+ }
+ mhu->mlink[i].rx_reg = mhu->base + platform_mhu_reg[i];
+ mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
+ }
+
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = &mhu->chan[0];
+ mhu->mbox.num_chans = MHU_CHANS;
+ mhu->mbox.ops = &platform_mhu_ops;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+ platform_set_drvdata(pdev, mhu);
+
+ err = mbox_controller_register(&mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ dev_info(dev, "Platform MHU Mailbox registered\n");
+ return 0;
+}
+
+static int platform_mhu_remove(struct platform_device *pdev)
+{
+ struct platform_mhu *mhu = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&mhu->mbox);
+
+ return 0;
+}
+
+static const struct of_device_id platform_mhu_dt_ids[] = {
+ { .compatible = "amlogic,meson-gxbb-mhu", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, platform_mhu_dt_ids);
+
+static struct platform_driver platform_mhu_driver = {
+ .probe = platform_mhu_probe,
+ .remove = platform_mhu_remove,
+ .driver = {
+ .name = "platform-mhu",
+ .of_match_table = platform_mhu_dt_ids,
+ },
+};
+
+module_platform_driver(platform_mhu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:platform-mhu");
+MODULE_DESCRIPTION("Platform MHU Driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
diff --git a/drivers/mcb/Kconfig b/drivers/mcb/Kconfig
index e9a6976e1010..76d9c51de6c9 100644
--- a/drivers/mcb/Kconfig
+++ b/drivers/mcb/Kconfig
@@ -28,4 +28,13 @@ config MCB_PCI
If build as a module, the module is called mcb-pci.ko
+config MCB_LPC
+ tristate "LPC (non PCI) based MCB carrier"
+ default n
+ help
+
+ This is a MCB carrier on a LPC or non PCI device.
+
+ If build as a module, the module is called mcb-lpc.ko
+
endif # MCB
diff --git a/drivers/mcb/Makefile b/drivers/mcb/Makefile
index 1ae141311def..bcc7745774ab 100644
--- a/drivers/mcb/Makefile
+++ b/drivers/mcb/Makefile
@@ -5,3 +5,4 @@ mcb-y += mcb-core.o
mcb-y += mcb-parse.o
obj-$(CONFIG_MCB_PCI) += mcb-pci.o
+obj-$(CONFIG_MCB_LPC) += mcb-lpc.o
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index 6f2c8522e14a..921a5d2a802b 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -233,6 +233,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
dev->dev.bus = &mcb_bus_type;
dev->dev.parent = bus->dev.parent;
dev->dev.release = mcb_release_dev;
+ dev->dma_dev = bus->carrier;
device_id = dev->id;
dev_set_name(&dev->dev, "mcb%d-16z%03d-%d:%d:%d",
@@ -369,7 +370,6 @@ struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus)
if (!dev)
return NULL;
- INIT_LIST_HEAD(&dev->bus_list);
dev->bus = bus;
return dev;
@@ -405,20 +405,6 @@ static int __mcb_bus_add_devices(struct device *dev, void *data)
return 0;
}
-static int __mcb_bus_add_child(struct device *dev, void *data)
-{
- struct mcb_device *mdev = to_mcb_device(dev);
- struct mcb_bus *child;
-
- BUG_ON(!mdev->is_added);
- child = mdev->subordinate;
-
- if (child)
- mcb_bus_add_devices(child);
-
- return 0;
-}
-
/**
* mcb_bus_add_devices() - Add devices in the bus' internal device list
* @bus: The @mcb_bus we add the devices
@@ -428,8 +414,6 @@ static int __mcb_bus_add_child(struct device *dev, void *data)
void mcb_bus_add_devices(const struct mcb_bus *bus)
{
bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_devices);
- bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_child);
-
}
EXPORT_SYMBOL_GPL(mcb_bus_add_devices);
diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h
index 5254e0285725..d6e6933b19f1 100644
--- a/drivers/mcb/mcb-internal.h
+++ b/drivers/mcb/mcb-internal.h
@@ -112,6 +112,15 @@ struct chameleon_bdd {
u32 size;
} __packed;
+struct chameleon_bar {
+ u32 addr;
+ u32 size;
+};
+
+#define BAR_CNT(x) ((x) & 0x07)
+#define CHAMELEON_BAR_MAX 6
+#define BAR_DESC_SIZE(x) ((x) * sizeof(struct chameleon_bar) + sizeof(__le32))
+
int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
void __iomem *base);
diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
new file mode 100644
index 000000000000..d072c088ce73
--- /dev/null
+++ b/drivers/mcb/mcb-lpc.c
@@ -0,0 +1,158 @@
+/*
+ * MEN Chameleon Bus.
+ *
+ * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
+ * Author: Andreas Werner <andreas.werner@men.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/mcb.h>
+#include <linux/io.h>
+#include "mcb-internal.h"
+
+struct priv {
+ struct mcb_bus *bus;
+ struct resource *mem;
+ void __iomem *base;
+};
+
+static int mcb_lpc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct priv *priv;
+ int ret = 0;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!priv->mem) {
+ dev_err(&pdev->dev, "No Memory resource\n");
+ return -ENODEV;
+ }
+
+ res = devm_request_mem_region(&pdev->dev, priv->mem->start,
+ resource_size(priv->mem),
+ KBUILD_MODNAME);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to request IO memory\n");
+ return -EBUSY;
+ }
+
+ priv->base = devm_ioremap(&pdev->dev, priv->mem->start,
+ resource_size(priv->mem));
+ if (!priv->base) {
+ dev_err(&pdev->dev, "Cannot ioremap\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->bus = mcb_alloc_bus(&pdev->dev);
+ if (IS_ERR(priv->bus))
+ return PTR_ERR(priv->bus);
+
+ ret = chameleon_parse_cells(priv->bus, priv->mem->start, priv->base);
+ if (ret < 0) {
+ mcb_release_bus(priv->bus);
+ return ret;
+ }
+
+ dev_dbg(&pdev->dev, "Found %d cells\n", ret);
+
+ mcb_bus_add_devices(priv->bus);
+
+ return 0;
+
+}
+
+static int mcb_lpc_remove(struct platform_device *pdev)
+{
+ struct priv *priv = platform_get_drvdata(pdev);
+
+ mcb_release_bus(priv->bus);
+
+ return 0;
+}
+
+static struct platform_device *mcb_lpc_pdev;
+
+static int mcb_lpc_create_platform_device(const struct dmi_system_id *id)
+{
+ struct resource *res = id->driver_data;
+ int ret;
+
+ mcb_lpc_pdev = platform_device_alloc("mcb-lpc", -1);
+ if (!mcb_lpc_pdev)
+ return -ENOMEM;
+
+ ret = platform_device_add_resources(mcb_lpc_pdev, res, 1);
+ if (ret)
+ goto out_put;
+
+ ret = platform_device_add(mcb_lpc_pdev);
+ if (ret)
+ goto out_put;
+
+ return 0;
+
+out_put:
+ platform_device_put(mcb_lpc_pdev);
+ return ret;
+}
+
+static struct resource sc24_fpga_resource = {
+ .start = 0xe000e000,
+ .end = 0xe000e000 + CHAM_HEADER_SIZE,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_driver mcb_lpc_driver = {
+ .driver = {
+ .name = "mcb-lpc",
+ },
+ .probe = mcb_lpc_probe,
+ .remove = mcb_lpc_remove,
+};
+
+static const struct dmi_system_id mcb_lpc_dmi_table[] = {
+ {
+ .ident = "SC24",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEN"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "14SC24"),
+ },
+ .driver_data = (void *)&sc24_fpga_resource,
+ .callback = mcb_lpc_create_platform_device,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, mcb_lpc_dmi_table);
+
+static int __init mcb_lpc_init(void)
+{
+ if (!dmi_check_system(mcb_lpc_dmi_table))
+ return -ENODEV;
+
+ return platform_driver_register(&mcb_lpc_driver);
+}
+
+static void __exit mcb_lpc_exit(void)
+{
+ platform_device_unregister(mcb_lpc_pdev);
+ platform_driver_unregister(&mcb_lpc_driver);
+}
+
+module_init(mcb_lpc_init);
+module_exit(mcb_lpc_exit);
+
+MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MCB over LPC support");
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index dbecbed0d258..4ca2739b4fad 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -26,19 +26,20 @@ static inline uint32_t get_next_dtype(void __iomem *p)
}
static int chameleon_parse_bdd(struct mcb_bus *bus,
- phys_addr_t mapbase,
+ struct chameleon_bar *cb,
void __iomem *base)
{
return 0;
}
static int chameleon_parse_gdd(struct mcb_bus *bus,
- phys_addr_t mapbase,
- void __iomem *base)
+ struct chameleon_bar *cb,
+ void __iomem *base, int bar_count)
{
struct chameleon_gdd __iomem *gdd =
(struct chameleon_gdd __iomem *) base;
struct mcb_device *mdev;
+ u32 dev_mapbase;
u32 offset;
u32 size;
int ret;
@@ -61,13 +62,39 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
mdev->group = GDD_GRP(reg2);
mdev->inst = GDD_INS(reg2);
+ /*
+ * If the BAR is missing, dev_mapbase is zero, or if the
+ * device is IO mapped we just print a warning and go on with the
+ * next device, instead of completely stop the gdd parser
+ */
+ if (mdev->bar > bar_count - 1) {
+ pr_info("No BAR for 16z%03d\n", mdev->id);
+ ret = 0;
+ goto err;
+ }
+
+ dev_mapbase = cb[mdev->bar].addr;
+ if (!dev_mapbase) {
+ pr_info("BAR not assigned for 16z%03d\n", mdev->id);
+ ret = 0;
+ goto err;
+ }
+
+ if (dev_mapbase & 0x01) {
+ pr_info("IO mapped Device (16z%03d) not yet supported\n",
+ mdev->id);
+ ret = 0;
+ goto err;
+ }
+
pr_debug("Found a 16z%03d\n", mdev->id);
mdev->irq.start = GDD_IRQ(reg1);
mdev->irq.end = GDD_IRQ(reg1);
mdev->irq.flags = IORESOURCE_IRQ;
- mdev->mem.start = mapbase + offset;
+ mdev->mem.start = dev_mapbase + offset;
+
mdev->mem.end = mdev->mem.start + size - 1;
mdev->mem.flags = IORESOURCE_MEM;
@@ -85,13 +112,76 @@ err:
return ret;
}
+static void chameleon_parse_bar(void __iomem *base,
+ struct chameleon_bar *cb, int bar_count)
+{
+ char __iomem *p = base;
+ int i;
+
+ /* skip reg1 */
+ p += sizeof(__le32);
+
+ for (i = 0; i < bar_count; i++) {
+ cb[i].addr = readl(p);
+ cb[i].size = readl(p + 4);
+
+ p += sizeof(struct chameleon_bar);
+ }
+}
+
+static int chameleon_get_bar(char __iomem **base, phys_addr_t mapbase,
+ struct chameleon_bar **cb)
+{
+ struct chameleon_bar *c;
+ int bar_count;
+ __le32 reg;
+ u32 dtype;
+
+ /*
+ * For those devices which are not connected
+ * to the PCI Bus (e.g. LPC) there is a bar
+ * descriptor located directly after the
+ * chameleon header. This header is comparable
+ * to a PCI header.
+ */
+ dtype = get_next_dtype(*base);
+ if (dtype == CHAMELEON_DTYPE_BAR) {
+ reg = readl(*base);
+
+ bar_count = BAR_CNT(reg);
+ if (bar_count <= 0 && bar_count > CHAMELEON_BAR_MAX)
+ return -ENODEV;
+
+ c = kcalloc(bar_count, sizeof(struct chameleon_bar),
+ GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ chameleon_parse_bar(*base, c, bar_count);
+ *base += BAR_DESC_SIZE(bar_count);
+ } else {
+ c = kzalloc(sizeof(struct chameleon_bar), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ bar_count = 1;
+ c->addr = mapbase;
+ }
+
+ *cb = c;
+
+ return bar_count;
+}
+
int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
void __iomem *base)
{
- char __iomem *p = base;
struct chameleon_fpga_header *header;
- uint32_t dtype;
+ struct chameleon_bar *cb;
+ char __iomem *p = base;
int num_cells = 0;
+ uint32_t dtype;
+ int bar_count;
int ret = 0;
u32 hsize;
@@ -108,8 +198,8 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
if (header->magic != CHAMELEONV2_MAGIC) {
pr_err("Unsupported chameleon version 0x%x\n",
header->magic);
- kfree(header);
- return -ENODEV;
+ ret = -ENODEV;
+ goto free_header;
}
p += hsize;
@@ -119,16 +209,20 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
snprintf(bus->name, CHAMELEON_FILENAME_LEN + 1, "%s",
header->filename);
+ bar_count = chameleon_get_bar(&p, mapbase, &cb);
+ if (bar_count < 0)
+ goto free_header;
+
for_each_chameleon_cell(dtype, p) {
switch (dtype) {
case CHAMELEON_DTYPE_GENERAL:
- ret = chameleon_parse_gdd(bus, mapbase, p);
+ ret = chameleon_parse_gdd(bus, cb, p, bar_count);
if (ret < 0)
- goto out;
+ goto free_bar;
p += sizeof(struct chameleon_gdd);
break;
case CHAMELEON_DTYPE_BRIDGE:
- chameleon_parse_bdd(bus, mapbase, p);
+ chameleon_parse_bdd(bus, cb, p);
p += sizeof(struct chameleon_bdd);
break;
case CHAMELEON_DTYPE_END:
@@ -136,8 +230,8 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
default:
pr_err("Invalid chameleon descriptor type 0x%x\n",
dtype);
- kfree(header);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_bar;
}
num_cells++;
}
@@ -145,11 +239,15 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
if (num_cells == 0)
num_cells = -EINVAL;
+ kfree(cb);
kfree(header);
return num_cells;
-out:
+free_bar:
+ kfree(cb);
+free_header:
kfree(header);
+
return ret;
}
EXPORT_SYMBOL_GPL(chameleon_parse_cells);
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index b15a0349cd97..af4d2f26f1c6 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -46,6 +46,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(&pdev->dev, "Failed to enable PCI device\n");
return -ENODEV;
}
+ pci_set_master(pdev);
priv->mapbase = pci_resource_start(pdev, 0);
if (!priv->mapbase) {
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 76f7534d1dd1..81d3db40cd7b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -361,12 +361,8 @@ static void __btree_node_write_done(struct closure *cl)
static void btree_node_write_done(struct closure *cl)
{
struct btree *b = container_of(cl, struct btree, io);
- struct bio_vec *bv;
- int n;
-
- bio_for_each_segment_all(bv, b->bio, n)
- __free_page(bv->bv_page);
+ bio_free_pages(b->bio);
__btree_node_write_done(cl);
}
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index c28df164701e..333a1e5f6ae6 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -107,9 +107,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
struct bio *check;
- struct bio_vec bv, *bv2;
+ struct bio_vec bv;
struct bvec_iter iter;
- int i;
check = bio_clone(bio, GFP_NOIO);
if (!check)
@@ -136,8 +135,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
kunmap_atomic(p1);
}
- bio_for_each_segment_all(bv2, check, i)
- __free_page(bv2->bv_page);
+ bio_free_pages(check);
out_put:
bio_put(check);
}
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 1881319f2298..5c4bddecfaf0 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -44,11 +44,8 @@ static void write_moving_finish(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
- struct bio_vec *bv;
- int i;
- bio_for_each_segment_all(bv, bio, i)
- __free_page(bv->bv_page);
+ bio_free_pages(bio);
if (io->op.replace_collision)
trace_bcache_gc_copy_collision(&io->w->key);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 4b177fe11ebb..40ffe5e424b3 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -694,13 +694,8 @@ static void cached_dev_cache_miss_done(struct closure *cl)
if (s->iop.replace_collision)
bch_mark_cache_miss_collision(s->iop.c, s->d);
- if (s->iop.bio) {
- int i;
- struct bio_vec *bv;
-
- bio_for_each_segment_all(bv, s->iop.bio, i)
- __free_page(bv->bv_page);
- }
+ if (s->iop.bio)
+ bio_free_pages(s->iop.bio);
cached_dev_bio_complete(cl);
}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index d9fd2a62e5f6..e51644e503a5 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -128,11 +128,8 @@ static void write_dirty_finish(struct closure *cl)
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
struct cached_dev *dc = io->dc;
- struct bio_vec *bv;
- int i;
- bio_for_each_segment_all(bv, &io->bio, i)
- __free_page(bv->bv_page);
+ bio_free_pages(&io->bio);
/* This is kind of a dumb way of signalling errors. */
if (KEY_DIRTY(&w->key)) {
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 13041ee37ad6..2d826927a3bf 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1903,10 +1903,8 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
struct bitmap_counts *counts;
struct bitmap *bitmap = bitmap_create(mddev, slot);
- if (IS_ERR(bitmap)) {
- bitmap_free(bitmap);
+ if (IS_ERR(bitmap))
return PTR_ERR(bitmap);
- }
rv = bitmap_init_from_disk(bitmap, 0);
if (rv)
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 8625040bae92..125aedc3875f 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -191,19 +191,6 @@ static void dm_bufio_unlock(struct dm_bufio_client *c)
mutex_unlock(&c->lock);
}
-/*
- * FIXME Move to sched.h?
- */
-#ifdef CONFIG_PREEMPT_VOLUNTARY
-# define dm_bufio_cond_resched() \
-do { \
- if (unlikely(need_resched())) \
- _cond_resched(); \
-} while (0)
-#else
-# define dm_bufio_cond_resched() do { } while (0)
-#endif
-
/*----------------------------------------------------------------*/
/*
@@ -741,7 +728,7 @@ static void __flush_write_list(struct list_head *write_list)
list_entry(write_list->next, struct dm_buffer, write_list);
list_del(&b->write_list);
submit_io(b, WRITE, b->block, write_endio);
- dm_bufio_cond_resched();
+ cond_resched();
}
blk_finish_plug(&plug);
}
@@ -780,7 +767,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
__unlink_buffer(b);
return b;
}
- dm_bufio_cond_resched();
+ cond_resched();
}
list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
@@ -791,7 +778,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
__unlink_buffer(b);
return b;
}
- dm_bufio_cond_resched();
+ cond_resched();
}
return NULL;
@@ -923,7 +910,7 @@ static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
return;
__write_dirty_buffer(b, write_list);
- dm_bufio_cond_resched();
+ cond_resched();
}
}
@@ -973,7 +960,7 @@ static void __check_watermark(struct dm_bufio_client *c,
return;
__free_buffer_wake(b);
- dm_bufio_cond_resched();
+ cond_resched();
}
if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
@@ -1170,7 +1157,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
submit_io(b, READ, b->block, read_endio);
dm_bufio_release(b);
- dm_bufio_cond_resched();
+ cond_resched();
if (!n_blocks)
goto flush_plug;
@@ -1291,7 +1278,7 @@ again:
!test_bit(B_WRITING, &b->state))
__relink_lru(b, LIST_CLEAN);
- dm_bufio_cond_resched();
+ cond_resched();
/*
* If we dropped the lock, the list is no longer consistent,
@@ -1574,7 +1561,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
freed++;
if (!--nr_to_scan || ((count - freed) <= retain_target))
return freed;
- dm_bufio_cond_resched();
+ cond_resched();
}
}
return freed;
@@ -1808,7 +1795,7 @@ static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
if (__try_evict_buffer(b, 0))
count--;
- dm_bufio_cond_resched();
+ cond_resched();
}
dm_bufio_unlock(c);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 3970cda10080..695577812cf6 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -140,6 +140,13 @@ struct dm_cache_metadata {
* the device.
*/
bool fail_io:1;
+
+ /*
+ * These structures are used when loading metadata. They're too
+ * big to put on the stack.
+ */
+ struct dm_array_cursor mapping_cursor;
+ struct dm_array_cursor hint_cursor;
};
/*-------------------------------------------------------------------
@@ -1171,31 +1178,37 @@ static bool hints_array_available(struct dm_cache_metadata *cmd,
hints_array_initialized(cmd);
}
-static int __load_mapping(void *context, uint64_t cblock, void *leaf)
+static int __load_mapping(struct dm_cache_metadata *cmd,
+ uint64_t cb, bool hints_valid,
+ struct dm_array_cursor *mapping_cursor,
+ struct dm_array_cursor *hint_cursor,
+ load_mapping_fn fn, void *context)
{
int r = 0;
- bool dirty;
- __le64 value;
- __le32 hint_value = 0;
+
+ __le64 mapping;
+ __le32 hint = 0;
+
+ __le64 *mapping_value_le;
+ __le32 *hint_value_le;
+
dm_oblock_t oblock;
unsigned flags;
- struct thunk *thunk = context;
- struct dm_cache_metadata *cmd = thunk->cmd;
- memcpy(&value, leaf, sizeof(value));
- unpack_value(value, &oblock, &flags);
+ dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
+ memcpy(&mapping, mapping_value_le, sizeof(mapping));
+ unpack_value(mapping, &oblock, &flags);
if (flags & M_VALID) {
- if (thunk->hints_valid) {
- r = dm_array_get_value(&cmd->hint_info, cmd->hint_root,
- cblock, &hint_value);
- if (r && r != -ENODATA)
- return r;
+ if (hints_valid) {
+ dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
+ memcpy(&hint, hint_value_le, sizeof(hint));
}
- dirty = thunk->respect_dirty_flags ? (flags & M_DIRTY) : true;
- r = thunk->fn(thunk->context, oblock, to_cblock(cblock),
- dirty, le32_to_cpu(hint_value), thunk->hints_valid);
+ r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
+ le32_to_cpu(hint), hints_valid);
+ if (r)
+ DMERR("policy couldn't load cblock");
}
return r;
@@ -1205,16 +1218,60 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
struct dm_cache_policy *policy,
load_mapping_fn fn, void *context)
{
- struct thunk thunk;
+ int r;
+ uint64_t cb;
+
+ bool hints_valid = hints_array_available(cmd, policy);
+
+ if (from_cblock(cmd->cache_blocks) == 0)
+ /* Nothing to do */
+ return 0;
+
+ r = dm_array_cursor_begin(&cmd->info, cmd->root, &cmd->mapping_cursor);
+ if (r)
+ return r;
- thunk.fn = fn;
- thunk.context = context;
+ if (hints_valid) {
+ r = dm_array_cursor_begin(&cmd->hint_info, cmd->hint_root, &cmd->hint_cursor);
+ if (r) {
+ dm_array_cursor_end(&cmd->mapping_cursor);
+ return r;
+ }
+ }
+
+ for (cb = 0; ; cb++) {
+ r = __load_mapping(cmd, cb, hints_valid,
+ &cmd->mapping_cursor, &cmd->hint_cursor,
+ fn, context);
+ if (r)
+ goto out;
+
+ /*
+ * We need to break out before we move the cursors.
+ */
+ if (cb >= (from_cblock(cmd->cache_blocks) - 1))
+ break;
- thunk.cmd = cmd;
- thunk.respect_dirty_flags = cmd->clean_when_opened;
- thunk.hints_valid = hints_array_available(cmd, policy);
+ r = dm_array_cursor_next(&cmd->mapping_cursor);
+ if (r) {
+ DMERR("dm_array_cursor_next for mapping failed");
+ goto out;
+ }
- return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk);
+ if (hints_valid) {
+ r = dm_array_cursor_next(&cmd->hint_cursor);
+ if (r) {
+ DMERR("dm_array_cursor_next for hint failed");
+ goto out;
+ }
+ }
+ }
+out:
+ dm_array_cursor_end(&cmd->mapping_cursor);
+ if (hints_valid)
+ dm_array_cursor_end(&cmd->hint_cursor);
+
+ return r;
}
int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
@@ -1368,10 +1425,24 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
/*----------------------------------------------------------------*/
-static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+static int get_hint(uint32_t index, void *value_le, void *context)
+{
+ uint32_t value;
+ struct dm_cache_policy *policy = context;
+
+ value = policy_get_hint(policy, to_cblock(index));
+ *((__le32 *) value_le) = cpu_to_le32(value);
+
+ return 0;
+}
+
+/*
+ * It's quicker to always delete the hint array, and recreate with
+ * dm_array_new().
+ */
+static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
{
int r;
- __le32 value;
size_t hint_size;
const char *policy_name = dm_cache_policy_get_name(policy);
const unsigned *policy_version = dm_cache_policy_get_version(policy);
@@ -1380,63 +1451,23 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
(strlen(policy_name) > sizeof(cmd->policy_name) - 1))
return -EINVAL;
- if (!policy_unchanged(cmd, policy)) {
- strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
- memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
-
- hint_size = dm_cache_policy_get_hint_size(policy);
- if (!hint_size)
- return 0; /* short-circuit hints initialization */
- cmd->policy_hint_size = hint_size;
+ strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
+ memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
- if (cmd->hint_root) {
- r = dm_array_del(&cmd->hint_info, cmd->hint_root);
- if (r)
- return r;
- }
+ hint_size = dm_cache_policy_get_hint_size(policy);
+ if (!hint_size)
+ return 0; /* short-circuit hints initialization */
+ cmd->policy_hint_size = hint_size;
- r = dm_array_empty(&cmd->hint_info, &cmd->hint_root);
+ if (cmd->hint_root) {
+ r = dm_array_del(&cmd->hint_info, cmd->hint_root);
if (r)
return r;
-
- value = cpu_to_le32(0);
- __dm_bless_for_disk(&value);
- r = dm_array_resize(&cmd->hint_info, cmd->hint_root, 0,
- from_cblock(cmd->cache_blocks),
- &value, &cmd->hint_root);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint)
-{
- struct dm_cache_metadata *cmd = context;
- __le32 value = cpu_to_le32(hint);
- int r;
-
- __dm_bless_for_disk(&value);
-
- r = dm_array_set_value(&cmd->hint_info, cmd->hint_root,
- from_cblock(cblock), &value, &cmd->hint_root);
- cmd->changed = true;
-
- return r;
-}
-
-static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
-{
- int r;
-
- r = begin_hints(cmd, policy);
- if (r) {
- DMERR("begin_hints failed");
- return r;
}
- return policy_walk_mappings(policy, save_hint, cmd);
+ return dm_array_new(&cmd->hint_info, &cmd->hint_root,
+ from_cblock(cmd->cache_blocks),
+ get_hint, policy);
}
int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
index 14aaaf059f06..2e8a8f1d8358 100644
--- a/drivers/md/dm-cache-policy-cleaner.c
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -395,7 +395,7 @@ static void init_policy_functions(struct policy *p)
p->policy.set_dirty = wb_set_dirty;
p->policy.clear_dirty = wb_clear_dirty;
p->policy.load_mapping = wb_load_mapping;
- p->policy.walk_mappings = NULL;
+ p->policy.get_hint = NULL;
p->policy.remove_mapping = wb_remove_mapping;
p->policy.writeback_work = wb_writeback_work;
p->policy.force_mapping = wb_force_mapping;
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index 2816018faa7f..808ee0e2b2c4 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -48,10 +48,10 @@ static inline int policy_load_mapping(struct dm_cache_policy *p,
return p->load_mapping(p, oblock, cblock, hint, hint_valid);
}
-static inline int policy_walk_mappings(struct dm_cache_policy *p,
- policy_walk_fn fn, void *context)
+static inline uint32_t policy_get_hint(struct dm_cache_policy *p,
+ dm_cblock_t cblock)
{
- return p->walk_mappings ? p->walk_mappings(p, fn, context) : 0;
+ return p->get_hint ? p->get_hint(p, cblock) : 0;
}
static inline int policy_writeback_work(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index cf48a617a3a4..c33f4a6e1d7d 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1359,6 +1359,11 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
spin_unlock_irqrestore(&mq->lock, flags);
}
+static unsigned random_level(dm_cblock_t cblock)
+{
+ return hash_32_generic(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
+}
+
static int smq_load_mapping(struct dm_cache_policy *p,
dm_oblock_t oblock, dm_cblock_t cblock,
uint32_t hint, bool hint_valid)
@@ -1369,47 +1374,21 @@ static int smq_load_mapping(struct dm_cache_policy *p,
e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
e->oblock = oblock;
e->dirty = false; /* this gets corrected in a minute */
- e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : 1;
+ e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
push(mq, e);
return 0;
}
-static int smq_save_hints(struct smq_policy *mq, struct queue *q,
- policy_walk_fn fn, void *context)
-{
- int r;
- unsigned level;
- struct entry *e;
-
- for (level = 0; level < q->nr_levels; level++)
- for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
- if (!e->sentinel) {
- r = fn(context, infer_cblock(mq, e),
- e->oblock, e->level);
- if (r)
- return r;
- }
- }
-
- return 0;
-}
-
-static int smq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
- void *context)
+static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
{
struct smq_policy *mq = to_smq_policy(p);
- int r = 0;
+ struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
- /*
- * We don't need to lock here since this method is only called once
- * the IO has stopped.
- */
- r = smq_save_hints(mq, &mq->clean, fn, context);
- if (!r)
- r = smq_save_hints(mq, &mq->dirty, fn, context);
+ if (!e->allocated)
+ return 0;
- return r;
+ return e->level;
}
static void __remove_mapping(struct smq_policy *mq, dm_oblock_t oblock)
@@ -1616,7 +1595,7 @@ static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
mq->policy.set_dirty = smq_set_dirty;
mq->policy.clear_dirty = smq_clear_dirty;
mq->policy.load_mapping = smq_load_mapping;
- mq->policy.walk_mappings = smq_walk_mappings;
+ mq->policy.get_hint = smq_get_hint;
mq->policy.remove_mapping = smq_remove_mapping;
mq->policy.remove_cblock = smq_remove_cblock;
mq->policy.writeback_work = smq_writeback_work;
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 05db56eedb6a..aa10b1493f34 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -90,9 +90,6 @@ struct policy_result {
dm_cblock_t cblock; /* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */
};
-typedef int (*policy_walk_fn)(void *context, dm_cblock_t cblock,
- dm_oblock_t oblock, uint32_t hint);
-
/*
* The cache policy object. Just a bunch of methods. It is envisaged that
* this structure will be embedded in a bigger, policy specific structure
@@ -158,8 +155,11 @@ struct dm_cache_policy {
int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
dm_cblock_t cblock, uint32_t hint, bool hint_valid);
- int (*walk_mappings)(struct dm_cache_policy *p, policy_walk_fn fn,
- void *context);
+ /*
+ * Gets the hint for a given cblock. Called in a single threaded
+ * context. So no locking required.
+ */
+ uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock);
/*
* Override functions used on the error paths of the core target.
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 874295757caa..a2768835d394 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -113,8 +113,7 @@ struct iv_tcw_private {
* and encrypts / decrypts at the same time.
*/
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
- DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
- DM_CRYPT_EXIT_THREAD};
+ DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
/*
* The fields in here must be read only after initialization.
@@ -1136,7 +1135,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
- bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_opf);
+ bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio));
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1207,18 +1206,20 @@ continue_locked:
if (!RB_EMPTY_ROOT(&cc->write_tree))
goto pop_from_list;
- if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
- spin_unlock_irq(&cc->write_thread_wait.lock);
- break;
- }
-
- __set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(TASK_INTERRUPTIBLE);
__add_wait_queue(&cc->write_thread_wait, &wait);
spin_unlock_irq(&cc->write_thread_wait.lock);
+ if (unlikely(kthread_should_stop())) {
+ set_task_state(current, TASK_RUNNING);
+ remove_wait_queue(&cc->write_thread_wait, &wait);
+ break;
+ }
+
schedule();
+ set_task_state(current, TASK_RUNNING);
spin_lock_irq(&cc->write_thread_wait.lock);
__remove_wait_queue(&cc->write_thread_wait, &wait);
goto continue_locked;
@@ -1533,13 +1534,8 @@ static void crypt_dtr(struct dm_target *ti)
if (!cc)
return;
- if (cc->write_thread) {
- spin_lock_irq(&cc->write_thread_wait.lock);
- set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
- wake_up_locked(&cc->write_thread_wait);
- spin_unlock_irq(&cc->write_thread_wait.lock);
+ if (cc->write_thread)
kthread_stop(cc->write_thread);
- }
if (cc->io_queue)
destroy_workqueue(cc->io_queue);
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 49e4d8d4558f..4dfe38655a49 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -149,8 +149,6 @@ static void put_io_block(struct log_writes_c *lc)
static void log_end_io(struct bio *bio)
{
struct log_writes_c *lc = bio->bi_private;
- struct bio_vec *bvec;
- int i;
if (bio->bi_error) {
unsigned long flags;
@@ -161,9 +159,7 @@ static void log_end_io(struct bio *bio)
spin_unlock_irqrestore(&lc->blocks_lock, flags);
}
- bio_for_each_segment_all(bvec, bio, i)
- __free_page(bvec->bv_page);
-
+ bio_free_pages(bio);
put_io_block(lc);
bio_put(bio);
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index ac734e5bbe48..e477af8596e2 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -550,9 +550,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
pgpath = choose_pgpath(m, nr_bytes);
if (!pgpath) {
- if (!must_push_back_rq(m))
- r = -EIO; /* Failed */
- return r;
+ if (must_push_back_rq(m))
+ return DM_MAPIO_DELAY_REQUEUE;
+ return -EIO; /* Failed */
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
pg_init_all_paths(m);
@@ -680,9 +680,11 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
return __multipath_map_bio(m, bio, mpio);
}
-static void process_queued_bios_list(struct multipath *m)
+static void process_queued_io_list(struct multipath *m)
{
- if (m->queue_mode == DM_TYPE_BIO_BASED)
+ if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
+ dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
+ else if (m->queue_mode == DM_TYPE_BIO_BASED)
queue_work(kmultipathd, &m->process_queued_bios);
}
@@ -752,7 +754,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
if (!queue_if_no_path) {
dm_table_run_md_queue_async(m->ti->table);
- process_queued_bios_list(m);
+ process_queued_io_list(m);
}
return 0;
@@ -1193,21 +1195,17 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
static void multipath_wait_for_pg_init_completion(struct multipath *m)
{
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&m->pg_init_wait, &wait);
+ DEFINE_WAIT(wait);
while (1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
if (!atomic_read(&m->pg_init_in_progress))
break;
io_schedule();
}
- set_current_state(TASK_RUNNING);
-
- remove_wait_queue(&m->pg_init_wait, &wait);
+ finish_wait(&m->pg_init_wait, &wait);
}
static void flush_multipath_work(struct multipath *m)
@@ -1308,7 +1306,7 @@ out:
spin_unlock_irqrestore(&m->lock, flags);
if (run_queue) {
dm_table_run_md_queue_async(m->ti->table);
- process_queued_bios_list(m);
+ process_queued_io_list(m);
}
return r;
@@ -1506,7 +1504,7 @@ static void pg_init_done(void *data, int errors)
}
clear_bit(MPATHF_QUEUE_IO, &m->flags);
- process_queued_bios_list(m);
+ process_queued_io_list(m);
/*
* Wake up any thread waiting to suspend.
@@ -1521,10 +1519,10 @@ static void activate_path(struct work_struct *work)
{
struct pgpath *pgpath =
container_of(work, struct pgpath, activate_path.work);
+ struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
- if (pgpath->is_active)
- scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
- pg_init_done, pgpath);
+ if (pgpath->is_active && !blk_queue_dying(q))
+ scsi_dh_activate(q, pg_init_done, pgpath);
else
pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
}
@@ -1532,6 +1530,14 @@ static void activate_path(struct work_struct *work)
static int noretry_error(int error)
{
switch (error) {
+ case -EBADE:
+ /*
+ * EBADE signals an reservation conflict.
+ * We shouldn't fail the path here as we can communicate with
+ * the target. We should failover to the next path, but in
+ * doing so we might be causing a ping-pong between paths.
+ * So just return the reservation conflict error.
+ */
case -EOPNOTSUPP:
case -EREMOTEIO:
case -EILSEQ:
@@ -1576,9 +1582,6 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
if (!must_push_back_rq(m))
r = -EIO;
- } else {
- if (error == -EBADE)
- r = error;
}
}
@@ -1627,9 +1630,6 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
if (!must_push_back_bio(m))
return -EIO;
return DM_ENDIO_REQUEUE;
- } else {
- if (error == -EBADE)
- return error;
}
}
@@ -1941,7 +1941,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
pg_init_all_paths(m);
dm_table_run_md_queue_async(m->ti->table);
- process_queued_bios_list(m);
+ process_queued_io_list(m);
}
/*
@@ -1994,11 +1994,14 @@ static int multipath_busy(struct dm_target *ti)
struct priority_group *pg, *next_pg;
struct pgpath *pgpath;
- /* pg_init in progress or no paths available */
- if (atomic_read(&m->pg_init_in_progress) ||
- (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)))
+ /* pg_init in progress */
+ if (atomic_read(&m->pg_init_in_progress))
return true;
+ /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
+ if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
+
/* Guess which priority_group will be used at next mapping time */
pg = lockless_dereference(m->current_pg);
next_pg = lockless_dereference(m->next_pg);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 1ca7463e8bb2..5eacce1ef88b 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -73,15 +73,24 @@ static void dm_old_start_queue(struct request_queue *q)
spin_unlock_irqrestore(q->queue_lock, flags);
}
+static void dm_mq_start_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ blk_mq_start_stopped_hw_queues(q, true);
+ blk_mq_kick_requeue_list(q);
+}
+
void dm_start_queue(struct request_queue *q)
{
if (!q->mq_ops)
dm_old_start_queue(q);
- else {
- queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q);
- blk_mq_start_stopped_hw_queues(q, true);
- blk_mq_kick_requeue_list(q);
- }
+ else
+ dm_mq_start_queue(q);
}
static void dm_old_stop_queue(struct request_queue *q)
@@ -89,27 +98,35 @@ static void dm_old_stop_queue(struct request_queue *q)
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
+ if (!blk_queue_stopped(q))
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_mq_stop_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
if (blk_queue_stopped(q)) {
spin_unlock_irqrestore(q->queue_lock, flags);
return;
}
- blk_stop_queue(q);
+ queue_flag_set(QUEUE_FLAG_STOPPED, q);
spin_unlock_irqrestore(q->queue_lock, flags);
+
+ /* Avoid that requeuing could restart the queue. */
+ blk_mq_cancel_requeue_work(q);
+ blk_mq_stop_hw_queues(q);
}
void dm_stop_queue(struct request_queue *q)
{
if (!q->mq_ops)
dm_old_stop_queue(q);
- else {
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_STOPPED, q);
- spin_unlock_irq(q->queue_lock);
-
- blk_mq_cancel_requeue_work(q);
- blk_mq_stop_hw_queues(q);
- }
+ else
+ dm_mq_stop_queue(q);
}
static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
@@ -319,21 +336,32 @@ static void dm_old_requeue_request(struct request *rq)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void dm_mq_requeue_request(struct request *rq)
+static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
{
- struct request_queue *q = rq->q;
unsigned long flags;
- blk_mq_requeue_request(rq);
spin_lock_irqsave(q->queue_lock, flags);
if (!blk_queue_stopped(q))
- blk_mq_kick_requeue_list(q);
+ blk_mq_delay_kick_requeue_list(q, msecs);
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void dm_requeue_original_request(struct mapped_device *md,
- struct request *rq)
+void dm_mq_kick_requeue_list(struct mapped_device *md)
+{
+ __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
+}
+EXPORT_SYMBOL(dm_mq_kick_requeue_list);
+
+static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
+{
+ blk_mq_requeue_request(rq);
+ __dm_mq_kick_requeue_list(rq->q, msecs);
+}
+
+static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
{
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
int rw = rq_data_dir(rq);
rq_end_stats(md, rq);
@@ -342,7 +370,7 @@ static void dm_requeue_original_request(struct mapped_device *md,
if (!rq->q->mq_ops)
dm_old_requeue_request(rq);
else
- dm_mq_requeue_request(rq);
+ dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
rq_completed(md, rw, false);
}
@@ -372,7 +400,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
return;
else if (r == DM_ENDIO_REQUEUE)
/* The target wants to requeue the I/O */
- dm_requeue_original_request(tio->md, tio->orig);
+ dm_requeue_original_request(tio, false);
else {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
@@ -612,20 +640,23 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
/*
* Returns:
- * 0 : the request has been processed
- * DM_MAPIO_REQUEUE : the original request needs to be requeued
+ * DM_MAPIO_* : the request has been processed as indicated
+ * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
* < 0 : the request was completed due to failure
*/
-static int map_request(struct dm_rq_target_io *tio, struct request *rq,
- struct mapped_device *md)
+static int map_request(struct dm_rq_target_io *tio)
{
int r;
struct dm_target *ti = tio->ti;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
struct request *clone = NULL;
if (tio->clone) {
clone = tio->clone;
r = ti->type->map_rq(ti, clone, &tio->info);
+ if (r == DM_MAPIO_DELAY_REQUEUE)
+ return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
} else {
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
if (r < 0) {
@@ -633,9 +664,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
dm_kill_unmapped_request(rq, r);
return r;
}
- if (r != DM_MAPIO_REMAPPED)
- return r;
- if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
+ if (r == DM_MAPIO_REMAPPED &&
+ setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */
ti->type->release_clone_rq(clone);
return DM_MAPIO_REQUEUE;
@@ -654,7 +684,10 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
- dm_requeue_original_request(md, tio->orig);
+ break;
+ case DM_MAPIO_DELAY_REQUEUE:
+ /* The target wants to requeue the I/O after a delay */
+ dm_requeue_original_request(tio, true);
break;
default:
if (r > 0) {
@@ -664,10 +697,9 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
/* The target wants to complete the I/O */
dm_kill_unmapped_request(rq, r);
- return r;
}
- return 0;
+ return r;
}
static void dm_start_request(struct mapped_device *md, struct request *orig)
@@ -706,11 +738,9 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
static void map_tio_request(struct kthread_work *work)
{
struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
- struct request *rq = tio->orig;
- struct mapped_device *md = tio->md;
- if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
- dm_requeue_original_request(md, rq);
+ if (map_request(tio) == DM_MAPIO_REQUEUE)
+ dm_requeue_original_request(tio, false);
}
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
@@ -896,7 +926,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
tio->ti = ti;
/* Direct call is fine since .queue_rq allows allocations */
- if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
+ if (map_request(tio) == DM_MAPIO_REQUEUE) {
/* Undo dm_start_request() before requeuing */
rq_end_stats(md, rq);
rq_completed(md, rq_data_dir(rq), false);
@@ -908,7 +938,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
static struct blk_mq_ops dm_mq_ops = {
.queue_rq = dm_mq_queue_rq,
- .map_queue = blk_mq_map_queue,
.complete = dm_softirq_done,
.init_request = dm_mq_init_request,
};
@@ -955,7 +984,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
dm_init_md_queue(md);
/* backfill 'mq' sysfs registration normally done in blk_register_queue */
- blk_mq_register_disk(md->disk);
+ blk_mq_register_dev(disk_to_dev(md->disk), q);
return 0;
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index 9e6f0a3773d4..4da06cae7bad 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -55,6 +55,8 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md);
void dm_start_queue(struct request_queue *q);
void dm_stop_queue(struct request_queue *q);
+void dm_mq_kick_requeue_list(struct mapped_device *md);
+
unsigned dm_get_reserved_rq_based_ios(void);
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fa9b1cb4438a..be35258324c1 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1648,6 +1648,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct request_queue *q = md->queue;
sector_t size;
+ lockdep_assert_held(&md->suspend_lock);
+
size = dm_table_get_size(t);
/*
@@ -1873,6 +1875,7 @@ EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
+ struct request_queue *q = dm_get_md_queue(md);
struct dm_table *map;
int srcu_idx;
@@ -1883,6 +1886,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
+ spin_unlock_irq(q->queue_lock);
+
if (dm_request_based(md) && md->kworker_task)
flush_kthread_worker(&md->kworker);
@@ -1934,30 +1941,25 @@ void dm_put(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_put);
-static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
+static int dm_wait_for_completion(struct mapped_device *md, long task_state)
{
int r = 0;
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&md->wait, &wait);
+ DEFINE_WAIT(wait);
while (1) {
- set_current_state(interruptible);
+ prepare_to_wait(&md->wait, &wait, task_state);
if (!md_in_flight(md))
break;
- if (interruptible == TASK_INTERRUPTIBLE &&
- signal_pending(current)) {
+ if (signal_pending_state(task_state, current)) {
r = -EINTR;
break;
}
io_schedule();
}
- set_current_state(TASK_RUNNING);
-
- remove_wait_queue(&md->wait, &wait);
+ finish_wait(&md->wait, &wait);
return r;
}
@@ -2075,6 +2077,10 @@ static void unlock_fs(struct mapped_device *md)
}
/*
+ * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
+ * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
+ * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
+ *
* If __dm_suspend returns 0, the device is completely quiescent
* now. There is no request-processing activity. All new requests
* are being added to md->deferred list.
@@ -2082,13 +2088,15 @@ static void unlock_fs(struct mapped_device *md)
* Caller must hold md->suspend_lock
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
- unsigned suspend_flags, int interruptible,
+ unsigned suspend_flags, long task_state,
int dmf_suspended_flag)
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
int r;
+ lockdep_assert_held(&md->suspend_lock);
+
/*
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
* This flag is cleared before dm_suspend returns.
@@ -2149,7 +2157,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
- r = dm_wait_for_completion(md, interruptible);
+ r = dm_wait_for_completion(md, task_state);
if (!r)
set_bit(dmf_suspended_flag, &md->flags);
@@ -2249,10 +2257,11 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
int dm_resume(struct mapped_device *md)
{
- int r = -EINVAL;
+ int r;
struct dm_table *map = NULL;
retry:
+ r = -EINVAL;
mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
if (!dm_suspended_md(md))
@@ -2276,8 +2285,6 @@ retry:
goto out;
clear_bit(DMF_SUSPENDED, &md->flags);
-
- r = 0;
out:
mutex_unlock(&md->suspend_lock);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 34a840d9df76..2b13117fb918 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
+#include <linux/kthread.h>
#include <linux/dlm.h>
#include <linux/sched.h>
#include <linux/raid/md_p.h>
@@ -25,7 +26,8 @@ struct dlm_lock_resource {
struct dlm_lksb lksb;
char *name; /* lock name. */
uint32_t flags; /* flags to pass to dlm_lock() */
- struct completion completion; /* completion for synchronized locking */
+ wait_queue_head_t sync_locking; /* wait queue for synchronized locking */
+ bool sync_locking_done;
void (*bast)(void *arg, int mode); /* blocking AST function pointer*/
struct mddev *mddev; /* pointing back to mddev. */
int mode;
@@ -118,7 +120,8 @@ static void sync_ast(void *arg)
struct dlm_lock_resource *res;
res = arg;
- complete(&res->completion);
+ res->sync_locking_done = true;
+ wake_up(&res->sync_locking);
}
static int dlm_lock_sync(struct dlm_lock_resource *res, int mode)
@@ -130,7 +133,8 @@ static int dlm_lock_sync(struct dlm_lock_resource *res, int mode)
0, sync_ast, res, res->bast);
if (ret)
return ret;
- wait_for_completion(&res->completion);
+ wait_event(res->sync_locking, res->sync_locking_done);
+ res->sync_locking_done = false;
if (res->lksb.sb_status == 0)
res->mode = mode;
return res->lksb.sb_status;
@@ -141,6 +145,44 @@ static int dlm_unlock_sync(struct dlm_lock_resource *res)
return dlm_lock_sync(res, DLM_LOCK_NL);
}
+/*
+ * An variation of dlm_lock_sync, which make lock request could
+ * be interrupted
+ */
+static int dlm_lock_sync_interruptible(struct dlm_lock_resource *res, int mode,
+ struct mddev *mddev)
+{
+ int ret = 0;
+
+ ret = dlm_lock(res->ls, mode, &res->lksb,
+ res->flags, res->name, strlen(res->name),
+ 0, sync_ast, res, res->bast);
+ if (ret)
+ return ret;
+
+ wait_event(res->sync_locking, res->sync_locking_done
+ || kthread_should_stop()
+ || test_bit(MD_CLOSING, &mddev->flags));
+ if (!res->sync_locking_done) {
+ /*
+ * the convert queue contains the lock request when request is
+ * interrupted, and sync_ast could still be run, so need to
+ * cancel the request and reset completion
+ */
+ ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_CANCEL,
+ &res->lksb, res);
+ res->sync_locking_done = false;
+ if (unlikely(ret != 0))
+ pr_info("failed to cancel previous lock request "
+ "%s return %d\n", res->name, ret);
+ return -EPERM;
+ } else
+ res->sync_locking_done = false;
+ if (res->lksb.sb_status == 0)
+ res->mode = mode;
+ return res->lksb.sb_status;
+}
+
static struct dlm_lock_resource *lockres_init(struct mddev *mddev,
char *name, void (*bastfn)(void *arg, int mode), int with_lvb)
{
@@ -151,7 +193,8 @@ static struct dlm_lock_resource *lockres_init(struct mddev *mddev,
res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
if (!res)
return NULL;
- init_completion(&res->completion);
+ init_waitqueue_head(&res->sync_locking);
+ res->sync_locking_done = false;
res->ls = cinfo->lockspace;
res->mddev = mddev;
res->mode = DLM_LOCK_IV;
@@ -194,25 +237,21 @@ out_err:
static void lockres_free(struct dlm_lock_resource *res)
{
- int ret;
+ int ret = 0;
if (!res)
return;
- /* cancel a lock request or a conversion request that is blocked */
- res->flags |= DLM_LKF_CANCEL;
-retry:
- ret = dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res);
- if (unlikely(ret != 0)) {
- pr_info("%s: failed to unlock %s return %d\n", __func__, res->name, ret);
-
- /* if a lock conversion is cancelled, then the lock is put
- * back to grant queue, need to ensure it is unlocked */
- if (ret == -DLM_ECANCEL)
- goto retry;
- }
- res->flags &= ~DLM_LKF_CANCEL;
- wait_for_completion(&res->completion);
+ /*
+ * use FORCEUNLOCK flag, so we can unlock even the lock is on the
+ * waiting or convert queue
+ */
+ ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_FORCEUNLOCK,
+ &res->lksb, res);
+ if (unlikely(ret != 0))
+ pr_err("failed to unlock %s return %d\n", res->name, ret);
+ else
+ wait_event(res->sync_locking, res->sync_locking_done);
kfree(res->name);
kfree(res->lksb.sb_lvbptr);
@@ -279,7 +318,7 @@ static void recover_bitmaps(struct md_thread *thread)
goto clear_bit;
}
- ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
+ ret = dlm_lock_sync_interruptible(bm_lockres, DLM_LOCK_PW, mddev);
if (ret) {
pr_err("md-cluster: Could not DLM lock %s: %d\n",
str, ret);
@@ -288,7 +327,7 @@ static void recover_bitmaps(struct md_thread *thread)
ret = bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
if (ret) {
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
- goto dlm_unlock;
+ goto clear_bit;
}
if (hi > 0) {
if (lo < mddev->recovery_cp)
@@ -300,8 +339,6 @@ static void recover_bitmaps(struct md_thread *thread)
md_wakeup_thread(mddev->thread);
}
}
-dlm_unlock:
- dlm_unlock_sync(bm_lockres);
clear_bit:
lockres_free(bm_lockres);
clear_bit(slot, &cinfo->recovery_map);
@@ -495,9 +532,10 @@ static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg
static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
{
- struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev,
- le32_to_cpu(msg->raid_slot));
+ struct md_rdev *rdev;
+ rcu_read_lock();
+ rdev = md_find_rdev_nr_rcu(mddev, le32_to_cpu(msg->raid_slot));
if (rdev) {
set_bit(ClusterRemove, &rdev->flags);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -506,18 +544,21 @@ static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
else
pr_warn("%s: %d Could not find disk(%d) to REMOVE\n",
__func__, __LINE__, le32_to_cpu(msg->raid_slot));
+ rcu_read_unlock();
}
static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg)
{
- struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev,
- le32_to_cpu(msg->raid_slot));
+ struct md_rdev *rdev;
+ rcu_read_lock();
+ rdev = md_find_rdev_nr_rcu(mddev, le32_to_cpu(msg->raid_slot));
if (rdev && test_bit(Faulty, &rdev->flags))
clear_bit(Faulty, &rdev->flags);
else
pr_warn("%s: %d Could not find disk(%d) which is faulty",
__func__, __LINE__, le32_to_cpu(msg->raid_slot));
+ rcu_read_unlock();
}
static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
@@ -770,7 +811,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
md_check_recovery(mddev);
}
- dlm_unlock_sync(bm_lockres);
lockres_free(bm_lockres);
}
out:
@@ -1006,7 +1046,7 @@ static void metadata_update_cancel(struct mddev *mddev)
static int resync_start(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
- return dlm_lock_sync(cinfo->resync_lockres, DLM_LOCK_EX);
+ return dlm_lock_sync_interruptible(cinfo->resync_lockres, DLM_LOCK_EX, mddev);
}
static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
@@ -1186,7 +1226,6 @@ static void unlock_all_bitmaps(struct mddev *mddev)
if (cinfo->other_bitmap_lockres) {
for (i = 0; i < mddev->bitmap_info.nodes - 1; i++) {
if (cinfo->other_bitmap_lockres[i]) {
- dlm_unlock_sync(cinfo->other_bitmap_lockres[i]);
lockres_free(cinfo->other_bitmap_lockres[i]);
}
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 915e84d631a2..eac84d8ff724 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5297,6 +5297,21 @@ int md_run(struct mddev *mddev)
return err;
}
if (mddev->queue) {
+ bool nonrot = true;
+
+ rdev_for_each(rdev, mddev) {
+ if (rdev->raid_disk >= 0 &&
+ !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
+ nonrot = false;
+ break;
+ }
+ }
+ if (mddev->degraded)
+ nonrot = false;
+ if (nonrot)
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = md_congested;
}
@@ -5454,12 +5469,14 @@ static void md_clean(struct mddev *mddev)
mddev->degraded = 0;
mddev->safemode = 0;
mddev->private = NULL;
+ mddev->cluster_info = NULL;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = 0;
mddev->bitmap_info.default_space = 0;
mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0;
+ mddev->bitmap_info.nodes = 0;
}
static void __md_stop_writes(struct mddev *mddev)
@@ -5573,8 +5590,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
printk("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -5636,8 +5652,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sysfs_active ||
mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
@@ -6101,9 +6116,14 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
export_rdev(rdev);
if (mddev_is_clustered(mddev)) {
- if (info->state & (1 << MD_DISK_CANDIDATE))
- md_cluster_ops->new_disk_ack(mddev, (err == 0));
- else {
+ if (info->state & (1 << MD_DISK_CANDIDATE)) {
+ if (!err) {
+ err = md_cluster_ops->new_disk_ack(mddev,
+ err == 0);
+ if (err)
+ md_kick_rdev_from_array(rdev);
+ }
+ } else {
if (err)
md_cluster_ops->add_new_disk_cancel(mddev);
else
@@ -6821,7 +6841,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
err = -EBUSY;
goto out;
}
- set_bit(MD_STILL_CLOSED, &mddev->flags);
+ set_bit(MD_CLOSING, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
sync_blockdev(bdev);
}
@@ -7070,9 +7090,13 @@ static int md_open(struct block_device *bdev, fmode_t mode)
if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
goto out;
+ if (test_bit(MD_CLOSING, &mddev->flags)) {
+ mutex_unlock(&mddev->open_mutex);
+ return -ENODEV;
+ }
+
err = 0;
atomic_inc(&mddev->openers);
- clear_bit(MD_STILL_CLOSED, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
check_disk_change(bdev);
@@ -8873,7 +8897,9 @@ static void autostart_arrays(int part)
list_del(&node_detected_dev->list);
dev = node_detected_dev->dev;
kfree(node_detected_dev);
+ mutex_unlock(&detected_devices_mutex);
rdev = md_import_device(dev,0, 90);
+ mutex_lock(&detected_devices_mutex);
if (IS_ERR(rdev))
continue;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 20c667579ede..2b2041773e79 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -201,9 +201,8 @@ struct mddev {
#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
-#define MD_STILL_CLOSED 4 /* If set, then array has not been opened since
- * md_ioctl checked on it.
- */
+#define MD_CLOSING 4 /* If set, we are closing the array, do not open
+ * it then */
#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */
#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */
#define MD_RELOAD_SB 7 /* Reload the superblock because another node
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 431a03067d64..e83047cbb2da 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -277,6 +277,48 @@ static int insert_ablock(struct dm_array_info *info, uint64_t index,
return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root);
}
+/*----------------------------------------------------------------*/
+
+static int __shadow_ablock(struct dm_array_info *info, dm_block_t b,
+ struct dm_block **block, struct array_block **ab)
+{
+ int inc;
+ int r = dm_tm_shadow_block(info->btree_info.tm, b,
+ &array_validator, block, &inc);
+ if (r)
+ return r;
+
+ *ab = dm_block_data(*block);
+ if (inc)
+ inc_ablock_entries(info, *ab);
+
+ return 0;
+}
+
+/*
+ * The shadow op will often be a noop. Only insert if it really
+ * copied data.
+ */
+static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
+ struct dm_block *block, dm_block_t b,
+ dm_block_t *root)
+{
+ int r = 0;
+
+ if (dm_block_location(block) != b) {
+ /*
+ * dm_tm_shadow_block will have already decremented the old
+ * block, but it is still referenced by the btree. We
+ * increment to stop the insert decrementing it below zero
+ * when overwriting the old value.
+ */
+ dm_tm_inc(info->btree_info.tm, b);
+ r = insert_ablock(info, index, block, root);
+ }
+
+ return r;
+}
+
/*
* Looks up an array block in the btree. Then shadows it, and updates the
* btree to point to this new shadow. 'root' is an input/output parameter
@@ -286,49 +328,21 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
unsigned index, struct dm_block **block,
struct array_block **ab)
{
- int r, inc;
+ int r;
uint64_t key = index;
dm_block_t b;
__le64 block_le;
- /*
- * lookup
- */
r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le);
if (r)
return r;
b = le64_to_cpu(block_le);
- /*
- * shadow
- */
- r = dm_tm_shadow_block(info->btree_info.tm, b,
- &array_validator, block, &inc);
+ r = __shadow_ablock(info, b, block, ab);
if (r)
return r;
- *ab = dm_block_data(*block);
- if (inc)
- inc_ablock_entries(info, *ab);
-
- /*
- * Reinsert.
- *
- * The shadow op will often be a noop. Only insert if it really
- * copied data.
- */
- if (dm_block_location(*block) != b) {
- /*
- * dm_tm_shadow_block will have already decremented the old
- * block, but it is still referenced by the btree. We
- * increment to stop the insert decrementing it below zero
- * when overwriting the old value.
- */
- dm_tm_inc(info->btree_info.tm, b);
- r = insert_ablock(info, index, *block, root);
- }
-
- return r;
+ return __reinsert_ablock(info, index, *block, b, root);
}
/*
@@ -681,6 +695,72 @@ int dm_array_resize(struct dm_array_info *info, dm_block_t root,
}
EXPORT_SYMBOL_GPL(dm_array_resize);
+static int populate_ablock_with_values(struct dm_array_info *info, struct array_block *ab,
+ value_fn fn, void *context, unsigned base, unsigned new_nr)
+{
+ int r;
+ unsigned i;
+ uint32_t nr_entries;
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ BUG_ON(le32_to_cpu(ab->nr_entries));
+ BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
+
+ nr_entries = le32_to_cpu(ab->nr_entries);
+ for (i = 0; i < new_nr; i++) {
+ r = fn(base + i, element_at(info, ab, i), context);
+ if (r)
+ return r;
+
+ if (vt->inc)
+ vt->inc(vt->context, element_at(info, ab, i));
+ }
+
+ ab->nr_entries = cpu_to_le32(new_nr);
+ return 0;
+}
+
+int dm_array_new(struct dm_array_info *info, dm_block_t *root,
+ uint32_t size, value_fn fn, void *context)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+ unsigned block_index, end_block, size_of_block, max_entries;
+
+ r = dm_array_empty(info, root);
+ if (r)
+ return r;
+
+ size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ max_entries = calc_max_entries(info->value_type.size, size_of_block);
+ end_block = dm_div_up(size, max_entries);
+
+ for (block_index = 0; block_index != end_block; block_index++) {
+ r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
+ if (r)
+ break;
+
+ r = populate_ablock_with_values(info, ab, fn, context,
+ block_index * max_entries,
+ min(max_entries, size));
+ if (r) {
+ unlock_ablock(info, block);
+ break;
+ }
+
+ r = insert_ablock(info, block_index, block, root);
+ unlock_ablock(info, block);
+ if (r)
+ break;
+
+ size -= max_entries;
+ }
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_new);
+
int dm_array_del(struct dm_array_info *info, dm_block_t root)
{
return dm_btree_del(&info->btree_info, root);
@@ -819,3 +899,89 @@ int dm_array_walk(struct dm_array_info *info, dm_block_t root,
EXPORT_SYMBOL_GPL(dm_array_walk);
/*----------------------------------------------------------------*/
+
+static int load_ablock(struct dm_array_cursor *c)
+{
+ int r;
+ __le64 value_le;
+ uint64_t key;
+
+ if (c->block)
+ unlock_ablock(c->info, c->block);
+
+ c->block = NULL;
+ c->ab = NULL;
+ c->index = 0;
+
+ r = dm_btree_cursor_get_value(&c->cursor, &key, &value_le);
+ if (r) {
+ DMERR("dm_btree_cursor_get_value failed");
+ dm_btree_cursor_end(&c->cursor);
+
+ } else {
+ r = get_ablock(c->info, le64_to_cpu(value_le), &c->block, &c->ab);
+ if (r) {
+ DMERR("get_ablock failed");
+ dm_btree_cursor_end(&c->cursor);
+ }
+ }
+
+ return r;
+}
+
+int dm_array_cursor_begin(struct dm_array_info *info, dm_block_t root,
+ struct dm_array_cursor *c)
+{
+ int r;
+
+ memset(c, 0, sizeof(*c));
+ c->info = info;
+ r = dm_btree_cursor_begin(&info->btree_info, root, true, &c->cursor);
+ if (r) {
+ DMERR("couldn't create btree cursor");
+ return r;
+ }
+
+ return load_ablock(c);
+}
+EXPORT_SYMBOL_GPL(dm_array_cursor_begin);
+
+void dm_array_cursor_end(struct dm_array_cursor *c)
+{
+ if (c->block) {
+ unlock_ablock(c->info, c->block);
+ dm_btree_cursor_end(&c->cursor);
+ }
+}
+EXPORT_SYMBOL_GPL(dm_array_cursor_end);
+
+int dm_array_cursor_next(struct dm_array_cursor *c)
+{
+ int r;
+
+ if (!c->block)
+ return -ENODATA;
+
+ c->index++;
+
+ if (c->index >= le32_to_cpu(c->ab->nr_entries)) {
+ r = dm_btree_cursor_next(&c->cursor);
+ if (r)
+ return r;
+
+ r = load_ablock(c);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_array_cursor_next);
+
+void dm_array_cursor_get_value(struct dm_array_cursor *c, void **value_le)
+{
+ *value_le = element_at(c->info, c->ab, c->index);
+}
+EXPORT_SYMBOL_GPL(dm_array_cursor_get_value);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-array.h b/drivers/md/persistent-data/dm-array.h
index ea177d6fa58f..27ee49a55473 100644
--- a/drivers/md/persistent-data/dm-array.h
+++ b/drivers/md/persistent-data/dm-array.h
@@ -112,6 +112,25 @@ int dm_array_resize(struct dm_array_info *info, dm_block_t root,
__dm_written_to_disk(value);
/*
+ * Creates a new array populated with values provided by a callback
+ * function. This is more efficient than creating an empty array,
+ * resizing, and then setting values since that process incurs a lot of
+ * copying.
+ *
+ * Assumes 32bit values for now since it's only used by the cache hint
+ * array.
+ *
+ * info - describes the array
+ * root - the root block of the array on disk
+ * size - the number of entries in the array
+ * fn - the callback
+ * context - passed to the callback
+ */
+typedef int (*value_fn)(uint32_t index, void *value_le, void *context);
+int dm_array_new(struct dm_array_info *info, dm_block_t *root,
+ uint32_t size, value_fn fn, void *context);
+
+/*
* Frees a whole array. The value_type's decrement operation will be called
* for all values in the array
*/
@@ -163,4 +182,37 @@ int dm_array_walk(struct dm_array_info *info, dm_block_t root,
/*----------------------------------------------------------------*/
+/*
+ * Cursor api.
+ *
+ * This lets you iterate through all the entries in an array efficiently
+ * (it will preload metadata).
+ *
+ * I'm using a cursor, rather than a walk function with a callback because
+ * the cache target needs to iterate both the mapping and hint arrays in
+ * unison.
+ */
+struct dm_array_cursor {
+ struct dm_array_info *info;
+ struct dm_btree_cursor cursor;
+
+ struct dm_block *block;
+ struct array_block *ab;
+ unsigned index;
+};
+
+int dm_array_cursor_begin(struct dm_array_info *info,
+ dm_block_t root, struct dm_array_cursor *c);
+void dm_array_cursor_end(struct dm_array_cursor *c);
+
+uint32_t dm_array_cursor_index(struct dm_array_cursor *c);
+int dm_array_cursor_next(struct dm_array_cursor *c);
+
+/*
+ * value_le is only valid while the cursor points at the current value.
+ */
+void dm_array_cursor_get_value(struct dm_array_cursor *c, void **value_le);
+
+/*----------------------------------------------------------------*/
+
#endif /* _LINUX_DM_ARRAY_H */
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 2cc1877804c2..20a40329d84a 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -994,3 +994,165 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
return walk_node(info, root, fn, context);
}
EXPORT_SYMBOL_GPL(dm_btree_walk);
+
+/*----------------------------------------------------------------*/
+
+static void prefetch_values(struct dm_btree_cursor *c)
+{
+ unsigned i, nr;
+ __le64 value_le;
+ struct cursor_node *n = c->nodes + c->depth - 1;
+ struct btree_node *bn = dm_block_data(n->b);
+ struct dm_block_manager *bm = dm_tm_get_bm(c->info->tm);
+
+ BUG_ON(c->info->value_type.size != sizeof(value_le));
+
+ nr = le32_to_cpu(bn->header.nr_entries);
+ for (i = 0; i < nr; i++) {
+ memcpy(&value_le, value_ptr(bn, i), sizeof(value_le));
+ dm_bm_prefetch(bm, le64_to_cpu(value_le));
+ }
+}
+
+static bool leaf_node(struct dm_btree_cursor *c)
+{
+ struct cursor_node *n = c->nodes + c->depth - 1;
+ struct btree_node *bn = dm_block_data(n->b);
+
+ return le32_to_cpu(bn->header.flags) & LEAF_NODE;
+}
+
+static int push_node(struct dm_btree_cursor *c, dm_block_t b)
+{
+ int r;
+ struct cursor_node *n = c->nodes + c->depth;
+
+ if (c->depth >= DM_BTREE_CURSOR_MAX_DEPTH - 1) {
+ DMERR("couldn't push cursor node, stack depth too high");
+ return -EINVAL;
+ }
+
+ r = bn_read_lock(c->info, b, &n->b);
+ if (r)
+ return r;
+
+ n->index = 0;
+ c->depth++;
+
+ if (c->prefetch_leaves || !leaf_node(c))
+ prefetch_values(c);
+
+ return 0;
+}
+
+static void pop_node(struct dm_btree_cursor *c)
+{
+ c->depth--;
+ unlock_block(c->info, c->nodes[c->depth].b);
+}
+
+static int inc_or_backtrack(struct dm_btree_cursor *c)
+{
+ struct cursor_node *n;
+ struct btree_node *bn;
+
+ for (;;) {
+ if (!c->depth)
+ return -ENODATA;
+
+ n = c->nodes + c->depth - 1;
+ bn = dm_block_data(n->b);
+
+ n->index++;
+ if (n->index < le32_to_cpu(bn->header.nr_entries))
+ break;
+
+ pop_node(c);
+ }
+
+ return 0;
+}
+
+static int find_leaf(struct dm_btree_cursor *c)
+{
+ int r = 0;
+ struct cursor_node *n;
+ struct btree_node *bn;
+ __le64 value_le;
+
+ for (;;) {
+ n = c->nodes + c->depth - 1;
+ bn = dm_block_data(n->b);
+
+ if (le32_to_cpu(bn->header.flags) & LEAF_NODE)
+ break;
+
+ memcpy(&value_le, value_ptr(bn, n->index), sizeof(value_le));
+ r = push_node(c, le64_to_cpu(value_le));
+ if (r) {
+ DMERR("push_node failed");
+ break;
+ }
+ }
+
+ if (!r && (le32_to_cpu(bn->header.nr_entries) == 0))
+ return -ENODATA;
+
+ return r;
+}
+
+int dm_btree_cursor_begin(struct dm_btree_info *info, dm_block_t root,
+ bool prefetch_leaves, struct dm_btree_cursor *c)
+{
+ int r;
+
+ c->info = info;
+ c->root = root;
+ c->depth = 0;
+ c->prefetch_leaves = prefetch_leaves;
+
+ r = push_node(c, root);
+ if (r)
+ return r;
+
+ return find_leaf(c);
+}
+EXPORT_SYMBOL_GPL(dm_btree_cursor_begin);
+
+void dm_btree_cursor_end(struct dm_btree_cursor *c)
+{
+ while (c->depth)
+ pop_node(c);
+}
+EXPORT_SYMBOL_GPL(dm_btree_cursor_end);
+
+int dm_btree_cursor_next(struct dm_btree_cursor *c)
+{
+ int r = inc_or_backtrack(c);
+ if (!r) {
+ r = find_leaf(c);
+ if (r)
+ DMERR("find_leaf failed");
+ }
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_cursor_next);
+
+int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le)
+{
+ if (c->depth) {
+ struct cursor_node *n = c->nodes + c->depth - 1;
+ struct btree_node *bn = dm_block_data(n->b);
+
+ if (le32_to_cpu(bn->header.flags) & INTERNAL_NODE)
+ return -EINVAL;
+
+ *key = le64_to_cpu(*key_ptr(bn, n->index));
+ memcpy(value_le, value_ptr(bn, n->index), c->info->value_type.size);
+ return 0;
+
+ } else
+ return -ENODATA;
+}
+EXPORT_SYMBOL_GPL(dm_btree_cursor_get_value);
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index c74301fa5a37..db9bd26adf31 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -176,4 +176,39 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
int (*fn)(void *context, uint64_t *keys, void *leaf),
void *context);
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Cursor API. This does not follow the rolling lock convention. Since we
+ * know the order that values are required we can issue prefetches to speed
+ * up iteration. Use on a single level btree only.
+ */
+#define DM_BTREE_CURSOR_MAX_DEPTH 16
+
+struct cursor_node {
+ struct dm_block *b;
+ unsigned index;
+};
+
+struct dm_btree_cursor {
+ struct dm_btree_info *info;
+ dm_block_t root;
+
+ bool prefetch_leaves;
+ unsigned depth;
+ struct cursor_node nodes[DM_BTREE_CURSOR_MAX_DEPTH];
+};
+
+/*
+ * Creates a fresh cursor. If prefetch_leaves is set then it is assumed
+ * the btree contains block indexes that will be prefetched. The cursor is
+ * quite large, so you probably don't want to put it on the stack.
+ */
+int dm_btree_cursor_begin(struct dm_btree_info *info, dm_block_t root,
+ bool prefetch_leaves, struct dm_btree_cursor *c);
+void dm_btree_cursor_end(struct dm_btree_cursor *c);
+int dm_btree_cursor_next(struct dm_btree_cursor *c);
+int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le);
+
#endif /* _LINUX_DM_BTREE_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 21dc00eb1989..1961d827dbd1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -145,12 +145,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
return r1_bio;
out_free_pages:
- while (--j >= 0) {
- struct bio_vec *bv;
-
- bio_for_each_segment_all(bv, r1_bio->bios[j], i)
- __free_page(bv->bv_page);
- }
+ while (--j >= 0)
+ bio_free_pages(r1_bio->bios[j]);
out_free_bio:
while (++j < pi->raid_disks)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5287e79e0b78..92ac251e91e6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6370,7 +6370,7 @@ static void free_conf(struct r5conf *conf)
{
if (conf->log)
r5l_exit_log(conf->log);
- if (conf->shrinker.seeks)
+ if (conf->shrinker.nr_deferred)
unregister_shrinker(&conf->shrinker);
free_thread_groups(conf);
@@ -6632,7 +6632,12 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->shrinker.count_objects = raid5_cache_count;
conf->shrinker.batch = 128;
conf->shrinker.flags = 0;
- register_shrinker(&conf->shrinker);
+ if (register_shrinker(&conf->shrinker)) {
+ printk(KERN_ERR
+ "md/raid:%s: couldn't register shrinker.\n",
+ mdname(mddev));
+ goto abort;
+ }
sprintf(pers_name, "raid%d", mddev->new_level);
conf->thread = md_register_thread(raid5d, mddev, pers_name);
@@ -7028,6 +7033,8 @@ static int raid5_run(struct mddev *mddev)
else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
mddev->queue);
+
+ blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
}
if (journal_dev) {
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 6e22af36b637..e038e886731b 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -392,7 +392,6 @@ static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_dev *dev)
dev_dbg(&pdev->dev, "alloc urb=%d\n", i);
dev->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
if (!dev->urb_list[i]) {
- dev_dbg(&pdev->dev, "failed\n");
for (j = 0; j < i; j++)
usb_free_urb(dev->urb_list[j]);
return -ENOMEM;
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 091d793f6583..4b132c29f290 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -627,7 +627,6 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!radio->int_in_urb) {
- dev_info(&intf->dev, "could not allocate int_in_urb");
retval = -ENOMEM;
goto err_intbuffer;
}
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 65f80b8b9f7a..86cc70fe2534 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -2211,16 +2211,11 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf,
goto exit;
}
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rx_urb) {
- dev_err(dev, "%s: usb_alloc_urb failed for IR urb", __func__);
+ if (!rx_urb)
goto rx_urb_alloc_failed;
- }
tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tx_urb) {
- dev_err(dev, "%s: usb_alloc_urb failed for display urb",
- __func__);
+ if (!tx_urb)
goto tx_urb_alloc_failed;
- }
mutex_init(&ictx->lock);
spin_lock_init(&ictx->kc_lock);
@@ -2305,10 +2300,8 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
int ret = -ENOMEM;
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rx_urb) {
- pr_err("usb_alloc_urb failed for IR urb\n");
+ if (!rx_urb)
goto rx_urb_alloc_failed;
- }
mutex_lock(&ictx->lock);
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index fcc3b82d1454..003fff07ade2 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -24,6 +24,7 @@
#define DRIVER_NAME "meson-ir"
+/* valid on all Meson platforms */
#define IR_DEC_LDR_ACTIVE 0x00
#define IR_DEC_LDR_IDLE 0x04
#define IR_DEC_LDR_REPEAT 0x08
@@ -32,12 +33,21 @@
#define IR_DEC_FRAME 0x14
#define IR_DEC_STATUS 0x18
#define IR_DEC_REG1 0x1c
+/* only available on Meson 8b and newer */
+#define IR_DEC_REG2 0x20
#define REG0_RATE_MASK (BIT(11) - 1)
-#define REG1_MODE_MASK (BIT(7) | BIT(8))
-#define REG1_MODE_NEC (0 << 7)
-#define REG1_MODE_GENERAL (2 << 7)
+#define DECODE_MODE_NEC 0x0
+#define DECODE_MODE_RAW 0x2
+
+/* Meson 6b uses REG1 to configure the mode */
+#define REG1_MODE_MASK GENMASK(8, 7)
+#define REG1_MODE_SHIFT 7
+
+/* Meson 8b / GXBB use REG2 to configure the mode */
+#define REG2_MODE_MASK GENMASK(3, 0)
+#define REG2_MODE_SHIFT 0
#define REG1_TIME_IV_SHIFT 16
#define REG1_TIME_IV_MASK ((BIT(13) - 1) << REG1_TIME_IV_SHIFT)
@@ -158,8 +168,15 @@ static int meson_ir_probe(struct platform_device *pdev)
/* Reset the decoder */
meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, REG1_RESET);
meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, 0);
- /* Set general operation mode */
- meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK, REG1_MODE_GENERAL);
+
+ /* Set general operation mode (= raw/software decoding) */
+ if (of_device_is_compatible(node, "amlogic,meson6-ir"))
+ meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK,
+ DECODE_MODE_RAW << REG1_MODE_SHIFT);
+ else
+ meson_ir_set_mask(ir, IR_DEC_REG2, REG2_MODE_MASK,
+ DECODE_MODE_RAW << REG2_MODE_SHIFT);
+
/* Set rate */
meson_ir_set_mask(ir, IR_DEC_REG0, REG0_RATE_MASK, MESON_TRATE - 1);
/* IRQ on rising and falling edges */
@@ -197,6 +214,8 @@ static int meson_ir_remove(struct platform_device *pdev)
static const struct of_device_id meson_ir_match[] = {
{ .compatible = "amlogic,meson6-ir" },
+ { .compatible = "amlogic,meson8b-ir" },
+ { .compatible = "amlogic,meson-gxbb-ir" },
{ },
};
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 399f44d89a29..ec8016d9b009 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -970,10 +970,8 @@ static int redrat3_dev_probe(struct usb_interface *intf,
/* set up bulk-in endpoint */
rr3->read_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rr3->read_urb) {
- dev_err(dev, "Read urb allocation failure\n");
+ if (!rr3->read_urb)
goto error;
- }
rr3->ep_in = ep_in;
rr3->bulk_in_buf = usb_alloc_coherent(udev,
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index fe031b06935f..3c556ee306cd 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -426,7 +426,6 @@ static int airspy_alloc_urbs(struct airspy *s)
dev_dbg(s->dev, "alloc urb=%d\n", i);
s->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
if (!s->urb_list[i]) {
- dev_dbg(s->dev, "failed\n");
for (j = 0; j < i; j++)
usb_free_urb(s->urb_list[j]);
return -ENOMEM;
diff --git a/drivers/media/usb/as102/as102_usb_drv.c b/drivers/media/usb/as102/as102_usb_drv.c
index 0e8030c071b8..68c3a80ce349 100644
--- a/drivers/media/usb/as102/as102_usb_drv.c
+++ b/drivers/media/usb/as102/as102_usb_drv.c
@@ -270,8 +270,6 @@ static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev)
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb == NULL) {
- dev_dbg(&dev->bus_adap.usb_dev->dev,
- "%s: usb_alloc_urb failed\n", __func__);
as102_free_usb_stream_buffer(dev);
return -ENOMEM;
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 82b026985868..13b8387082f2 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -245,7 +245,6 @@ static int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
urb = usb_alloc_urb(max_packets, GFP_KERNEL);
if (!urb) {
- au0828_isocdbg("cannot alloc isoc_ctl.urb %i\n", i);
au0828_uninit_isoc(dev);
return -ENOMEM;
}
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index c1aa1ab2ece9..13620cdf0599 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -662,7 +662,6 @@ static int submit_urbs(struct camera_data *cam)
}
urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL);
if (!urb) {
- ERR("%s: usb_alloc_urb error!\n", __func__);
for (j = 0; j < i; j++)
usb_free_urb(cam->sbuf[j].urb);
return -ENOMEM;
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index a6a9508418f8..4cd5fa91612f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -293,7 +293,6 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
urb = usb_alloc_urb(CX231XX_ISO_NUM_AUDIO_PACKETS, GFP_ATOMIC);
if (!urb) {
- dev_err(dev->dev, "usb_alloc_urb failed!\n");
for (j = 0; j < i; j++) {
usb_free_urb(dev->adev.urb[j]);
kfree(dev->adev.transfer_buffer[j]);
@@ -355,7 +354,6 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
urb = usb_alloc_urb(CX231XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
if (!urb) {
- dev_err(dev->dev, "usb_alloc_urb failed!\n");
for (j = 0; j < i; j++) {
usb_free_urb(dev->adev.urb[j]);
kfree(dev->adev.transfer_buffer[j]);
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 630f4fc5155f..8ec05cb306d8 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -1035,8 +1035,6 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) {
urb = usb_alloc_urb(max_packets, GFP_KERNEL);
if (!urb) {
- dev_err(dev->dev,
- "cannot alloc isoc_ctl.urb %i\n", i);
cx231xx_uninit_isoc(dev);
return -ENOMEM;
}
@@ -1172,8 +1170,6 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) {
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- dev_err(dev->dev,
- "cannot alloc bulk_ctl.urb %i\n", i);
cx231xx_uninit_bulk(dev);
return -ENOMEM;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index 15bb573b78ac..76e901920f6f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -442,8 +442,6 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- dev_err(dev->dev,
- "cannot alloc bulk_ctl.urb %i\n", i);
cx231xx_uninit_vbi_isoc(dev);
return -ENOMEM;
}
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index bf890c3d9cda..26797979ebce 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -783,10 +783,8 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
/* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
purb = usb_alloc_urb(0, GFP_KERNEL);
- if (purb == NULL) {
- err("rc usb alloc urb failed");
+ if (purb == NULL)
return -ENOMEM;
- }
purb->transfer_buffer = kzalloc(RC_MSG_SIZE_V1_20, GFP_KERNEL);
if (purb->transfer_buffer == NULL) {
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 49a5f9532bd8..78f3687772bf 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -850,7 +850,6 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
urb = usb_alloc_urb(npackets, GFP_ATOMIC);
if (!urb) {
- em28xx_errdev("usb_alloc_urb failed!\n");
em28xx_audio_free_urb(dev);
return -ENOMEM;
}
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 37456079f490..eebd5d7088d0 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -934,7 +934,6 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
for (i = 0; i < usb_bufs->num_bufs; i++) {
urb = usb_alloc_urb(usb_bufs->num_packets, GFP_KERNEL);
if (!urb) {
- em28xx_err("cannot alloc usb_ctl.urb %i\n", i);
em28xx_uninit_usb_xfer(dev, mode);
return -ENOMEM;
}
diff --git a/drivers/media/usb/gspca/benq.c b/drivers/media/usb/gspca/benq.c
index 790baed33963..5fa67b78ad49 100644
--- a/drivers/media/usb/gspca/benq.c
+++ b/drivers/media/usb/gspca/benq.c
@@ -95,10 +95,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
#define SD_NPKT 32
for (n = 0; n < 4; n++) {
urb = usb_alloc_urb(SD_NPKT, GFP_KERNEL);
- if (!urb) {
- pr_err("usb_alloc_urb failed\n");
+ if (!urb)
return -ENOMEM;
- }
gspca_dev->urb[n] = urb;
urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev,
SD_PKT_SZ * SD_NPKT,
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index b17bd7ebcb47..af2395a76d8b 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -795,10 +795,8 @@ static int create_urbs(struct gspca_dev *gspca_dev,
for (n = 0; n < nurbs; n++) {
urb = usb_alloc_urb(npkt, GFP_KERNEL);
- if (!urb) {
- pr_err("usb_alloc_urb failed\n");
+ if (!urb)
return -ENOMEM;
- }
gspca_dev->urb[n] = urb;
urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev,
bsize,
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index 0712b1bc90b4..40aaaa9c5f30 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -208,10 +208,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
packet_size =
le16_to_cpu(alt->endpoint[i].desc.wMaxPacketSize);
urb = usb_alloc_urb(SD_NPKT, GFP_KERNEL);
- if (!urb) {
- pr_err("usb_alloc_urb failed\n");
+ if (!urb)
return -ENOMEM;
- }
gspca_dev->urb[n] = urb;
urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev,
packet_size * SD_NPKT,
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index b1e229a44192..c2c8d12e9498 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -691,7 +691,6 @@ static int hackrf_alloc_urbs(struct hackrf_dev *dev, bool rcv)
dev_dbg(dev->dev, "alloc urb=%d\n", i);
dev->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
if (!dev->urb_list[i]) {
- dev_dbg(dev->dev, "failed\n");
for (j = 0; j < i; j++)
usb_free_urb(dev->urb_list[j]);
return -ENOMEM;
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 2a3a8b470555..6d43d75493ea 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -155,10 +155,8 @@ int hdpvr_alloc_buffers(struct hdpvr_device *dev, uint count)
buf->dev = dev;
urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- v4l2_err(&dev->v4l2_dev, "cannot allocate urb\n");
+ if (!urb)
goto exit_urb;
- }
buf->urb = urb;
mem = usb_alloc_coherent(dev->udev, dev->bulk_in_size, GFP_KERNEL,
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index e7f167d44c61..367eb7e2a31d 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -509,7 +509,6 @@ static int msi2500_isoc_init(struct msi2500_dev *dev)
for (i = 0; i < MAX_ISO_BUFS; i++) {
urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
if (urb == NULL) {
- dev_err(dev->dev, "Failed to allocate urb %d\n", i);
msi2500_isoc_cleanup(dev);
return -ENOMEM;
}
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index b51b27a3fd61..c4454c928776 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -410,7 +410,6 @@ retry:
for (i = 0; i < MAX_ISO_BUFS; i++) {
urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
if (urb == NULL) {
- PWC_ERROR("Failed to allocate urb %d\n", i);
pwc_isoc_cleanup(pdev);
return -ENOMEM;
}
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 43ba71a7d02b..9458eb0ef66f 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -2113,11 +2113,8 @@ static int s2255_start_readpipe(struct s2255_dev *dev)
pipe_info->state = 1;
pipe_info->err_count = 0;
pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!pipe_info->stream_urb) {
- dev_err(&dev->udev->dev,
- "ReadStream: Unable to alloc URB\n");
+ if (!pipe_info->stream_urb)
return -ENOMEM;
- }
/* transfer buffer allocated in board_init */
usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
pipe,
@@ -2290,10 +2287,8 @@ static int s2255_probe(struct usb_interface *interface,
}
dev->fw_data->fw_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->fw_data->fw_urb) {
- dev_err(&interface->dev, "out of memory!\n");
+ if (!dev->fw_data->fw_urb)
goto errorFWURB;
- }
dev->fw_data->pfw_data = kzalloc(CHUNK_SIZE, GFP_KERNEL);
if (!dev->fw_data->pfw_data) {
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 6ecb0b48423f..ce8ebbe395a6 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -457,10 +457,8 @@ int stk1160_alloc_isoc(struct stk1160 *dev)
for (i = 0; i < num_bufs; i++) {
urb = usb_alloc_urb(max_packets, GFP_KERNEL);
- if (!urb) {
- stk1160_err("cannot alloc urb[%d]\n", i);
+ if (!urb)
goto free_i_bufs;
- }
dev->isoc_ctl.urb[i] = urb;
#ifndef CONFIG_DMA_NONCOHERENT
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index c21c4c004f97..db200c9d796d 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -452,10 +452,8 @@ static int stk_prepare_iso(struct stk_camera *dev)
STK_ERROR("isobuf data already allocated\n");
if (dev->isobufs[i].urb == NULL) {
urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
- if (urb == NULL) {
- STK_ERROR("Failed to allocate URB %d\n", i);
+ if (urb == NULL)
goto isobufs_out;
- }
dev->isobufs[i].urb = urb;
} else {
STK_ERROR("Killing URB\n");
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 095f5db1a790..0426b210383b 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -129,10 +129,8 @@ static int tm6000_start_stream(struct tm6000_core *dev)
}
dvb->bulk_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (dvb->bulk_urb == NULL) {
- printk(KERN_ERR "tm6000: couldn't allocate urb\n");
+ if (dvb->bulk_urb == NULL)
return -ENOMEM;
- }
pipe = usb_rcvbulkpipe(dev->udev, dev->bulk_in.endp->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index fa5e8bda2ae4..dee7e7d3d47d 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -635,7 +635,6 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
urb = usb_alloc_urb(max_packets, GFP_KERNEL);
if (!urb) {
- tm6000_err("cannot alloc isoc_ctl.urb %i\n", i);
tm6000_uninit_isoc(dev);
usb_free_urb(urb);
return -ENOMEM;
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index 52ac4391582c..c23bf73a68ea 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -2303,11 +2303,8 @@ int usbvision_init_isoc(struct usb_usbvision *usbvision)
struct urb *urb;
urb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL);
- if (urb == NULL) {
- dev_err(&usbvision->dev->dev,
- "%s: usb_alloc_urb() failed\n", __func__);
+ if (urb == NULL)
return -ENOMEM;
- }
usbvision->sbuf[buf_idx].urb = urb;
usbvision->sbuf[buf_idx].data =
usb_alloc_coherent(usbvision->dev,
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 7433ba5c4bad..cc128db85723 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1045,10 +1045,8 @@ static int zr364xx_start_readpipe(struct zr364xx_camera *cam)
pipe_info->state = 1;
pipe_info->err_count = 0;
pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!pipe_info->stream_urb) {
- dev_err(&cam->udev->dev, "ReadStream: Unable to alloc URB\n");
+ if (!pipe_info->stream_urb)
return -ENOMEM;
- }
/* transfer buffer allocated in board_init */
usb_fill_bulk_urb(pipe_info->stream_urb, cam->udev,
pipe,
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index f87ad6f5d2dc..b5ed3bd082b5 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -410,10 +410,7 @@ static int at91sam9_ebi_init(struct at91_ebi *ebi)
field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC);
fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
- if (IS_ERR(fields->mode))
- return PTR_ERR(fields->mode);
-
- return 0;
+ return PTR_ERR_OR_ZERO(fields->mode);
}
static int sama5d3_ebi_init(struct at91_ebi *ebi)
@@ -441,10 +438,7 @@ static int sama5d3_ebi_init(struct at91_ebi *ebi)
field.reg = SAMA5_SMC_MODE(SAMA5_SMC_GENERIC);
fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
- if (IS_ERR(fields->mode))
- return PTR_ERR(fields->mode);
-
- return 0;
+ return PTR_ERR_OR_ZERO(fields->mode);
}
static int at91_ebi_dev_setup(struct at91_ebi *ebi, struct device_node *np,
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index 53a341f3b305..12080b05e3e6 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -53,12 +53,10 @@ static const struct of_device_id atmel_ramc_of_match[] = {
static int atmel_ramc_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct at91_ramc_caps *caps;
struct clk *clk;
- match = of_match_device(atmel_ramc_of_match, &pdev->dev);
- caps = match->data;
+ caps = of_device_get_match_data(&pdev->dev);
if (caps->has_ddrck) {
clk = devm_clk_get(&pdev->dev, "ddrck");
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index 9daf94bb8f27..568f05ed961a 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -16,6 +16,7 @@
#include <linux/gfp.h>
#include <memory/jedec_ddr.h>
#include <linux/export.h>
+#include "of_memory.h"
/**
* of_get_min_tck() - extract min timing values for ddr
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index f00f3e742265..5457c361ad58 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -350,8 +350,8 @@ static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
return (time_ps + tick_ps - 1) / tick_ps;
}
-unsigned int gpmc_clk_ticks_to_ns(unsigned ticks, int cs,
- enum gpmc_clk_domain cd)
+static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs,
+ enum gpmc_clk_domain cd)
{
return ticks * gpmc_get_clk_period(cs, cd) / 1000;
}
@@ -2143,9 +2143,7 @@ err_child_fail:
ret = -ENODEV;
err_cs:
- if (waitpin_desc)
- gpiochip_free_own_desc(waitpin_desc);
-
+ gpiochip_free_own_desc(waitpin_desc);
err:
gpmc_cs_free(cs);
@@ -2265,7 +2263,7 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc)
gpmc->gpio_chip.get = gpmc_gpio_get;
gpmc->gpio_chip.base = -1;
- ret = gpiochip_add(&gpmc->gpio_chip);
+ ret = devm_gpiochip_add_data(gpmc->dev, &gpmc->gpio_chip, NULL);
if (ret < 0) {
dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret);
return ret;
@@ -2274,11 +2272,6 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc)
return 0;
}
-static void gpmc_gpio_exit(struct gpmc_device *gpmc)
-{
- gpiochip_remove(&gpmc->gpio_chip);
-}
-
static int gpmc_probe(struct platform_device *pdev)
{
int rc;
@@ -2365,15 +2358,13 @@ static int gpmc_probe(struct platform_device *pdev)
rc = gpmc_setup_irq(gpmc);
if (rc) {
dev_err(gpmc->dev, "gpmc_setup_irq failed\n");
- goto setup_irq_failed;
+ goto gpio_init_failed;
}
gpmc_probe_dt_children(pdev);
return 0;
-setup_irq_failed:
- gpmc_gpio_exit(gpmc);
gpio_init_failed:
gpmc_mem_exit();
pm_runtime_put_sync(&pdev->dev);
@@ -2387,7 +2378,6 @@ static int gpmc_remove(struct platform_device *pdev)
struct gpmc_device *gpmc = platform_get_drvdata(pdev);
gpmc_free_irq(gpmc);
- gpmc_gpio_exit(gpmc);
gpmc_mem_exit();
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5537f8df8512..89c7ed16b4df 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1865,8 +1865,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
"mpt_poll_%d", ioc->id);
- ioc->reset_work_q =
- create_singlethread_workqueue(ioc->reset_work_q_name);
+ ioc->reset_work_q = alloc_workqueue(ioc->reset_work_q_name,
+ WQ_MEM_RECLAIM, 0);
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
@@ -1992,7 +1992,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->fw_event_list);
spin_lock_init(&ioc->fw_event_lock);
snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
- ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
+ ioc->fw_event_q = alloc_workqueue(ioc->fw_event_q_name,
+ WQ_MEM_RECLAIM, 0);
if (!ioc->fw_event_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index d8bf84aef602..add6a3a6ef0d 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1324,9 +1324,12 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name),
"mptfc_wq_%d", sh->host_no);
ioc->fc_rescan_work_q =
- create_singlethread_workqueue(ioc->fc_rescan_work_q_name);
- if (!ioc->fc_rescan_work_q)
+ alloc_ordered_workqueue(ioc->fc_rescan_work_q_name,
+ WQ_MEM_RECLAIM);
+ if (!ioc->fc_rescan_work_q) {
+ error = -ENOMEM;
goto out_mptfc_probe;
+ }
/*
* Pre-fetch FC port WWN and stuff...
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 2d1fb6420592..c6df6442ba2b 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -50,7 +50,7 @@ config MFD_AS3711
Support for the AS3711 PMIC from AMS
config MFD_AS3722
- bool "ams AS3722 Power Management IC"
+ tristate "ams AS3722 Power Management IC"
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -112,6 +112,16 @@ config MFD_BCM590XX
help
Support for the BCM590xx PMUs from Broadcom
+config MFD_AC100
+ tristate "X-Powers AC100"
+ select MFD_CORE
+ depends on SUNXI_RSB
+ help
+ If you say Y here you get support for the X-Powers AC100 audio codec
+ IC.
+ This driver include only the core APIs. You have to select individual
+ components like codecs or RTC under the corresponding menus.
+
config MFD_AXP20X
tristate
select MFD_CORE
@@ -281,6 +291,14 @@ config MFD_DLN2
etc. must be enabled in order to use the functionality of
the device.
+config MFD_EXYNOS_LPASS
+ tristate "Samsung Exynos SoC Low Power Audio Subsystem"
+ select MFD_CORE
+ select REGMAP_MMIO
+ help
+ Select this option to enable support for Samsung Exynos Low Power
+ Audio Subsystem.
+
config MFD_MC13XXX
tristate
depends on (SPI_MASTER || I2C)
@@ -340,14 +358,6 @@ config MFD_HI655X_PMIC
help
Select this option to enable Hisilicon hi655x series pmic driver.
-config HTC_EGPIO
- bool "HTC EGPIO support"
- depends on GPIOLIB && ARM
- help
- This driver supports the CPLD egpio chip present on
- several HTC phones. It provides basic support for input
- pins, output pins, and irqs.
-
config HTC_PASIC3
tristate "HTC PASIC3 LED/DS1WM chip support"
select MFD_CORE
@@ -852,13 +862,13 @@ config MFD_RC5T583
different functionality of the device.
config MFD_RK808
- tristate "Rockchip RK808 Power Management chip"
+ tristate "Rockchip RK808/RK818 Power Management Chip"
depends on I2C && OF
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
help
- If you say yes here you get support for the RK808
+ If you say yes here you get support for the RK808 and RK818
Power Management chips.
This driver provides common support for accessing the device
through I2C interface. The device supports multiple sub-devices
@@ -1214,6 +1224,7 @@ config MFD_TPS65217
depends on I2C
select MFD_CORE
select REGMAP_I2C
+ select IRQ_DOMAIN
help
If you say yes here you get support for the TPS65217 series of
Power Management / White LED chips.
@@ -1224,6 +1235,20 @@ config MFD_TPS65217
This driver can also be built as a module. If so, the module
will be called tps65217.
+config MFD_TI_LP873X
+ tristate "TI LP873X Power Management IC"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ If you say yes here then you get support for the LP873X series of
+ Power Management Integrated Circuits (PMIC).
+ These include voltage regulators, thermal protection, configurable
+ General Purpose Outputs (GPO) that are used in portable devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called lp873x.
+
config MFD_TPS65218
tristate "TI TPS65218 Power Management chips"
depends on I2C
@@ -1549,6 +1574,7 @@ config MFD_WM8350
config MFD_WM8350_I2C
bool "Wolfson Microelectronics WM8350 with I2C"
select MFD_WM8350
+ select REGMAP_I2C
depends on I2C=y
help
The WM8350 is an integrated audio and power management
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 2ba3ba35f745..9834e669d985 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -13,15 +13,17 @@ obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
obj-$(CONFIG_MFD_CROS_EC) += cros_ec.o
obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o
+obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
obj-$(CONFIG_MFD_RTSX_USB) += rtsx_usb.o
-obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
+obj-$(CONFIG_MFD_TI_LP873X) += lp873x.o
+
obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
obj-$(CONFIG_MFD_TI_AM335X_TSCADC) += ti_am335x_tscadc.o
@@ -113,6 +115,8 @@ obj-$(CONFIG_PMIC_DA9052) += da9052-irq.o
obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
+
+obj-$(CONFIG_MFD_AC100) += ac100.o
obj-$(CONFIG_MFD_AXP20X) += axp20x.o
obj-$(CONFIG_MFD_AXP20X_I2C) += axp20x-i2c.o
obj-$(CONFIG_MFD_AXP20X_RSB) += axp20x-rsb.o
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 0aecd7bd35f8..acf6c00b14b9 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -153,14 +153,14 @@ static struct hwreg_cfg hwreg_cfg = {
#define AB8500_NAME_STRING "ab8500"
#define AB8500_ADC_NAME_STRING "gpadc"
-#define AB8500_NUM_BANKS 24
+#define AB8500_NUM_BANKS AB8500_DEBUG_FIELD_LAST
#define AB8500_REV_REG 0x80
static struct ab8500_prcmu_ranges *debug_ranges;
static struct ab8500_prcmu_ranges ab8500_debug_ranges[AB8500_NUM_BANKS] = {
- [0x0] = {
+ [AB8500_M_FSM_RANK] = {
.num_ranges = 0,
.range = NULL,
},
@@ -315,7 +315,7 @@ static struct ab8500_prcmu_ranges ab8500_debug_ranges[AB8500_NUM_BANKS] = {
},
},
},
- [0x9] = {
+ [AB8500_RESERVED] = {
.num_ranges = 0,
.range = NULL,
},
@@ -386,24 +386,6 @@ static struct ab8500_prcmu_ranges ab8500_debug_ranges[AB8500_NUM_BANKS] = {
},
},
},
- [AB8500_DEVELOPMENT] = {
- .num_ranges = 1,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x00,
- },
- },
- },
- [AB8500_DEBUG] = {
- .num_ranges = 1,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x05,
- .last = 0x07,
- },
- },
- },
[AB8500_AUDIO] = {
.num_ranges = 1,
.range = (struct ab8500_reg_range[]) {
@@ -463,19 +445,29 @@ static struct ab8500_prcmu_ranges ab8500_debug_ranges[AB8500_NUM_BANKS] = {
},
},
},
- [0x11] = {
- .num_ranges = 0,
- .range = NULL,
+ [AB8500_DEVELOPMENT] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x00,
+ },
+ },
},
- [0x12] = {
- .num_ranges = 0,
- .range = NULL,
+ [AB8500_DEBUG] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x05,
+ .last = 0x07,
+ },
+ },
},
- [0x13] = {
+ [AB8500_PROD_TEST] = {
.num_ranges = 0,
.range = NULL,
},
- [0x14] = {
+ [AB8500_STE_TEST] = {
.num_ranges = 0,
.range = NULL,
},
@@ -1382,60 +1374,6 @@ void ab8500_dump_all_banks(struct device *dev)
}
}
-/* Space for 500 registers. */
-#define DUMP_MAX_REGS 700
-static struct ab8500_register_dump
-{
- u8 bank;
- u8 reg;
- u8 value;
-} ab8500_complete_register_dump[DUMP_MAX_REGS];
-
-/* This shall only be called upon kernel panic! */
-void ab8500_dump_all_banks_to_mem(void)
-{
- int i, r = 0;
- u8 bank;
- int err = 0;
-
- pr_info("Saving all ABB registers for crash analysis.\n");
-
- for (bank = 0; bank < AB8500_NUM_BANKS; bank++) {
- for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
- u8 reg;
-
- for (reg = debug_ranges[bank].range[i].first;
- reg <= debug_ranges[bank].range[i].last;
- reg++) {
- u8 value;
-
- err = prcmu_abb_read(bank, reg, &value, 1);
-
- if (err < 0)
- goto out;
-
- ab8500_complete_register_dump[r].bank = bank;
- ab8500_complete_register_dump[r].reg = reg;
- ab8500_complete_register_dump[r].value = value;
-
- r++;
-
- if (r >= DUMP_MAX_REGS) {
- pr_err("%s: too many register to dump!\n",
- __func__);
- err = -EINVAL;
- goto out;
- }
- }
- }
- }
-out:
- if (err >= 0)
- pr_info("Saved all ABB registers.\n");
- else
- pr_info("Failed to save all ABB registers.\n");
-}
-
static int ab8500_all_banks_open(struct inode *inode, struct file *file)
{
struct seq_file *s;
@@ -1584,18 +1522,10 @@ static u32 num_interrupts[AB8500_MAX_NR_IRQS];
static u32 num_wake_interrupts[AB8500_MAX_NR_IRQS];
static int num_interrupt_lines;
-bool __attribute__((weak)) suspend_test_wake_cause_interrupt_is_mine(u32 my_int)
-{
- return false;
-}
-
void ab8500_debug_register_interrupt(int line)
{
- if (line < num_interrupt_lines) {
+ if (line < num_interrupt_lines)
num_interrupts[line]++;
- if (suspend_test_wake_cause_interrupt_is_mine(irq_ab8500))
- num_wake_interrupts[line]++;
- }
}
static int ab8500_interrupts_print(struct seq_file *s, void *p)
diff --git a/drivers/mfd/ac100.c b/drivers/mfd/ac100.c
new file mode 100644
index 000000000000..9bc69cd7807d
--- /dev/null
+++ b/drivers/mfd/ac100.c
@@ -0,0 +1,137 @@
+/*
+ * MFD core driver for X-Powers' AC100 Audio Codec IC
+ *
+ * The AC100 is a highly integrated audio codec and RTC subsystem designed
+ * for mobile applications. It has 3 I2S/PCM interfaces, a 2 channel DAC,
+ * a 2 channel ADC with 5 inputs and a builtin mixer. The RTC subsystem has
+ * 3 clock outputs.
+ *
+ * The audio codec and RTC parts are completely separate, sharing only the
+ * host interface for access to its registers.
+ *
+ * Copyright (2016) Chen-Yu Tsai
+ *
+ * Author: Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/ac100.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/sunxi-rsb.h>
+
+static const struct regmap_range ac100_writeable_ranges[] = {
+ regmap_reg_range(AC100_CHIP_AUDIO_RST, AC100_I2S_SR_CTRL),
+ regmap_reg_range(AC100_I2S1_CLK_CTRL, AC100_I2S1_MXR_GAIN),
+ regmap_reg_range(AC100_I2S2_CLK_CTRL, AC100_I2S2_MXR_GAIN),
+ regmap_reg_range(AC100_I2S3_CLK_CTRL, AC100_I2S3_SIG_PATH_CTRL),
+ regmap_reg_range(AC100_ADC_DIG_CTRL, AC100_ADC_VOL_CTRL),
+ regmap_reg_range(AC100_HMIC_CTRL1, AC100_HMIC_STATUS),
+ regmap_reg_range(AC100_DAC_DIG_CTRL, AC100_DAC_MXR_GAIN),
+ regmap_reg_range(AC100_ADC_APC_CTRL, AC100_LINEOUT_CTRL),
+ regmap_reg_range(AC100_ADC_DAP_L_CTRL, AC100_ADC_DAP_OPT),
+ regmap_reg_range(AC100_DAC_DAP_CTRL, AC100_DAC_DAP_OPT),
+ regmap_reg_range(AC100_ADC_DAP_ENA, AC100_DAC_DAP_ENA),
+ regmap_reg_range(AC100_SRC1_CTRL1, AC100_SRC1_CTRL2),
+ regmap_reg_range(AC100_SRC2_CTRL1, AC100_SRC2_CTRL2),
+ regmap_reg_range(AC100_CLK32K_ANALOG_CTRL, AC100_CLKOUT_CTRL3),
+ regmap_reg_range(AC100_RTC_RST, AC100_RTC_UPD),
+ regmap_reg_range(AC100_ALM_INT_ENA, AC100_ALM_INT_STA),
+ regmap_reg_range(AC100_ALM_SEC, AC100_RTC_GP(15)),
+};
+
+static const struct regmap_range ac100_volatile_ranges[] = {
+ regmap_reg_range(AC100_CHIP_AUDIO_RST, AC100_PLL_CTRL2),
+ regmap_reg_range(AC100_HMIC_STATUS, AC100_HMIC_STATUS),
+ regmap_reg_range(AC100_ADC_DAP_L_STA, AC100_ADC_DAP_L_STA),
+ regmap_reg_range(AC100_SRC1_CTRL1, AC100_SRC1_CTRL1),
+ regmap_reg_range(AC100_SRC1_CTRL3, AC100_SRC2_CTRL1),
+ regmap_reg_range(AC100_SRC2_CTRL3, AC100_SRC2_CTRL4),
+ regmap_reg_range(AC100_RTC_RST, AC100_RTC_RST),
+ regmap_reg_range(AC100_RTC_SEC, AC100_ALM_INT_STA),
+ regmap_reg_range(AC100_ALM_SEC, AC100_ALM_UPD),
+};
+
+static const struct regmap_access_table ac100_writeable_table = {
+ .yes_ranges = ac100_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ac100_writeable_ranges),
+};
+
+static const struct regmap_access_table ac100_volatile_table = {
+ .yes_ranges = ac100_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ac100_volatile_ranges),
+};
+
+static const struct regmap_config ac100_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .wr_table = &ac100_writeable_table,
+ .volatile_table = &ac100_volatile_table,
+ .max_register = AC100_RTC_GP(15),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static struct mfd_cell ac100_cells[] = {
+ {
+ .name = "ac100-codec",
+ .of_compatible = "x-powers,ac100-codec",
+ }, {
+ .name = "ac100-rtc",
+ .of_compatible = "x-powers,ac100-rtc",
+ },
+};
+
+static int ac100_rsb_probe(struct sunxi_rsb_device *rdev)
+{
+ struct ac100_dev *ac100;
+ int ret;
+
+ ac100 = devm_kzalloc(&rdev->dev, sizeof(*ac100), GFP_KERNEL);
+ if (!ac100)
+ return -ENOMEM;
+
+ ac100->dev = &rdev->dev;
+ sunxi_rsb_device_set_drvdata(rdev, ac100);
+
+ ac100->regmap = devm_regmap_init_sunxi_rsb(rdev, &ac100_regmap_config);
+ if (IS_ERR(ac100->regmap)) {
+ ret = PTR_ERR(ac100->regmap);
+ dev_err(ac100->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_mfd_add_devices(ac100->dev, PLATFORM_DEVID_NONE, ac100_cells,
+ ARRAY_SIZE(ac100_cells), NULL, 0, NULL);
+ if (ret) {
+ dev_err(ac100->dev, "failed to add MFD devices: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ac100_of_match[] = {
+ { .compatible = "x-powers,ac100" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ac100_of_match);
+
+static struct sunxi_rsb_driver ac100_rsb_driver = {
+ .driver = {
+ .name = "ac100",
+ .of_match_table = of_match_ptr(ac100_of_match),
+ },
+ .probe = ac100_rsb_probe,
+};
+module_sunxi_rsb_driver(ac100_rsb_driver);
+
+MODULE_DESCRIPTION("Audio codec MFD core driver for AC100");
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/act8945a.c b/drivers/mfd/act8945a.c
index 10c6d2da8822..a4024d91da01 100644
--- a/drivers/mfd/act8945a.c
+++ b/drivers/mfd/act8945a.c
@@ -23,6 +23,7 @@ static const struct mfd_cell act8945a_devs[] = {
},
{
.name = "act8945a-charger",
+ .of_compatible = "active-semi,act8945a-charger",
},
};
diff --git a/drivers/mfd/altera-a10sr.c b/drivers/mfd/altera-a10sr.c
index c05aa4ff57fd..06e1f7fc5605 100644
--- a/drivers/mfd/altera-a10sr.c
+++ b/drivers/mfd/altera-a10sr.c
@@ -1,4 +1,8 @@
/*
+ * Altera Arria10 DevKit System Resource MFD Driver
+ *
+ * Author: Thor Thayer <tthayer@opensource.altera.com>
+ *
* Copyright Intel Corporation (C) 2014-2016. All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify it
@@ -20,7 +24,7 @@
#include <linux/mfd/altera-a10sr.h>
#include <linux/mfd/core.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
@@ -94,7 +98,7 @@ static bool altr_a10sr_reg_volatile(struct device *dev, unsigned int reg)
}
}
-const struct regmap_config altr_a10sr_regmap_config = {
+static const struct regmap_config altr_a10sr_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -152,7 +156,6 @@ static const struct of_device_id altr_a10sr_spi_of_match[] = {
{ .compatible = "altr,a10sr" },
{ },
};
-MODULE_DEVICE_TABLE(of, altr_a10sr_spi_of_match);
static struct spi_driver altr_a10sr_spi_driver = {
.probe = altr_a10sr_spi_probe,
@@ -161,9 +164,4 @@ static struct spi_driver altr_a10sr_spi_driver = {
.of_match_table = of_match_ptr(altr_a10sr_spi_of_match),
},
};
-
-module_spi_driver(altr_a10sr_spi_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Thor Thayer <tthayer@opensource.altera.com>");
-MODULE_DESCRIPTION("Altera Arria10 DevKit System Resource MFD Driver");
+builtin_driver(altr_a10sr_spi_driver, spi_register_driver)
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index e4f97b3c824b..41767f7239bb 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
@@ -49,7 +50,15 @@ int arizona_clk32k_enable(struct arizona *arizona)
case ARIZONA_32KZ_MCLK1:
ret = pm_runtime_get_sync(arizona->dev);
if (ret != 0)
- goto out;
+ goto err_ref;
+ ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
+ if (ret != 0)
+ goto err_pm;
+ break;
+ case ARIZONA_32KZ_MCLK2:
+ ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]);
+ if (ret != 0)
+ goto err_ref;
break;
}
@@ -58,7 +67,9 @@ int arizona_clk32k_enable(struct arizona *arizona)
ARIZONA_CLK_32K_ENA);
}
-out:
+err_pm:
+ pm_runtime_put_sync(arizona->dev);
+err_ref:
if (ret != 0)
arizona->clk32k_ref--;
@@ -83,6 +94,10 @@ int arizona_clk32k_disable(struct arizona *arizona)
switch (arizona->pdata.clk32k_src) {
case ARIZONA_32KZ_MCLK1:
pm_runtime_put_sync(arizona->dev);
+ clk_disable_unprepare(arizona->mclk[ARIZONA_MCLK1]);
+ break;
+ case ARIZONA_32KZ_MCLK2:
+ clk_disable_unprepare(arizona->mclk[ARIZONA_MCLK2]);
break;
}
}
@@ -735,7 +750,7 @@ static int arizona_suspend(struct device *dev)
return 0;
}
-static int arizona_suspend_late(struct device *dev)
+static int arizona_suspend_noirq(struct device *dev)
{
struct arizona *arizona = dev_get_drvdata(dev);
@@ -759,7 +774,7 @@ static int arizona_resume(struct device *dev)
{
struct arizona *arizona = dev_get_drvdata(dev);
- dev_dbg(arizona->dev, "Late resume, reenabling IRQ\n");
+ dev_dbg(arizona->dev, "Resume, reenabling IRQ\n");
enable_irq(arizona->irq);
return 0;
@@ -771,10 +786,8 @@ const struct dev_pm_ops arizona_pm_ops = {
arizona_runtime_resume,
NULL)
SET_SYSTEM_SLEEP_PM_OPS(arizona_suspend, arizona_resume)
-#ifdef CONFIG_PM_SLEEP
- .suspend_late = arizona_suspend_late,
- .resume_noirq = arizona_resume_noirq,
-#endif
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(arizona_suspend_noirq,
+ arizona_resume_noirq)
};
EXPORT_SYMBOL_GPL(arizona_pm_ops);
@@ -790,35 +803,25 @@ unsigned long arizona_of_get_type(struct device *dev)
}
EXPORT_SYMBOL_GPL(arizona_of_get_type);
-int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
- bool mandatory)
-{
- int gpio;
-
- gpio = of_get_named_gpio(arizona->dev->of_node, prop, 0);
- if (gpio < 0) {
- if (mandatory)
- dev_err(arizona->dev,
- "Mandatory DT gpio %s missing/malformed: %d\n",
- prop, gpio);
-
- gpio = 0;
- }
-
- return gpio;
-}
-EXPORT_SYMBOL_GPL(arizona_of_get_named_gpio);
-
static int arizona_of_get_core_pdata(struct arizona *arizona)
{
struct arizona_pdata *pdata = &arizona->pdata;
struct property *prop;
const __be32 *cur;
u32 val;
+ u32 pdm_val[ARIZONA_MAX_PDM_SPK];
int ret, i;
int count = 0;
- pdata->reset = arizona_of_get_named_gpio(arizona, "wlf,reset", true);
+ pdata->reset = of_get_named_gpio(arizona->dev->of_node, "wlf,reset", 0);
+ if (pdata->reset == -EPROBE_DEFER) {
+ return pdata->reset;
+ } else if (pdata->reset < 0) {
+ dev_err(arizona->dev, "Reset GPIO missing/malformed: %d\n",
+ pdata->reset);
+
+ pdata->reset = 0;
+ }
ret = of_property_read_u32_array(arizona->dev->of_node,
"wlf,gpio-defaults",
@@ -871,6 +874,35 @@ static int arizona_of_get_core_pdata(struct arizona *arizona)
count++;
}
+ count = 0;
+ of_property_for_each_u32(arizona->dev->of_node,
+ "wlf,max-channels-clocked",
+ prop, cur, val) {
+ if (count == ARRAY_SIZE(pdata->max_channels_clocked))
+ break;
+
+ pdata->max_channels_clocked[count] = val;
+ count++;
+ }
+
+ ret = of_property_read_u32_array(arizona->dev->of_node,
+ "wlf,spk-fmt",
+ pdm_val,
+ ARRAY_SIZE(pdm_val));
+
+ if (ret >= 0)
+ for (count = 0; count < ARRAY_SIZE(pdata->spk_fmt); ++count)
+ pdata->spk_fmt[count] = pdm_val[count];
+
+ ret = of_property_read_u32_array(arizona->dev->of_node,
+ "wlf,spk-mute",
+ pdm_val,
+ ARRAY_SIZE(pdm_val));
+
+ if (ret >= 0)
+ for (count = 0; count < ARRAY_SIZE(pdata->spk_mute); ++count)
+ pdata->spk_mute[count] = pdm_val[count];
+
return 0;
}
@@ -1000,6 +1032,7 @@ static const struct mfd_cell wm8998_devs[] = {
int arizona_dev_init(struct arizona *arizona)
{
+ const char * const mclk_name[] = { "mclk1", "mclk2" };
struct device *dev = arizona->dev;
const char *type_name = NULL;
unsigned int reg, val, mask;
@@ -1010,11 +1043,24 @@ int arizona_dev_init(struct arizona *arizona)
dev_set_drvdata(arizona->dev, arizona);
mutex_init(&arizona->clk_lock);
- if (dev_get_platdata(arizona->dev))
+ if (dev_get_platdata(arizona->dev)) {
memcpy(&arizona->pdata, dev_get_platdata(arizona->dev),
sizeof(arizona->pdata));
- else
- arizona_of_get_core_pdata(arizona);
+ } else {
+ ret = arizona_of_get_core_pdata(arizona);
+ if (ret < 0)
+ return ret;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(arizona->mclk) != ARRAY_SIZE(mclk_name));
+ for (i = 0; i < ARRAY_SIZE(arizona->mclk); i++) {
+ arizona->mclk[i] = devm_clk_get(arizona->dev, mclk_name[i]);
+ if (IS_ERR(arizona->mclk[i])) {
+ dev_info(arizona->dev, "Failed to get %s: %ld\n",
+ mclk_name[i], PTR_ERR(arizona->mclk[i]));
+ arizona->mclk[i] = NULL;
+ }
+ }
regcache_cache_only(arizona->regmap, true);
@@ -1035,7 +1081,7 @@ int arizona_dev_init(struct arizona *arizona)
default:
dev_err(arizona->dev, "Unknown device type %d\n",
arizona->type);
- return -EINVAL;
+ return -ENODEV;
}
/* Mark DCVDD as external, LDO1 driver will clear if internal */
@@ -1121,6 +1167,7 @@ int arizona_dev_init(struct arizona *arizona)
break;
default:
dev_err(arizona->dev, "Unknown device ID: %x\n", reg);
+ ret = -ENODEV;
goto err_reset;
}
@@ -1280,12 +1327,14 @@ int arizona_dev_init(struct arizona *arizona)
break;
default:
dev_err(arizona->dev, "Unknown device ID %x\n", reg);
+ ret = -ENODEV;
goto err_reset;
}
if (!subdevs) {
dev_err(arizona->dev,
"No kernel support for device ID %x\n", reg);
+ ret = -ENODEV;
goto err_reset;
}
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index eca7ea69b81c..4b15b0840f16 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -50,8 +50,9 @@ static int regmap_atmel_hlcdc_reg_write(void *context, unsigned int reg,
if (reg <= ATMEL_HLCDC_DIS) {
u32 status;
- readl_poll_timeout(hregmap->regs + ATMEL_HLCDC_SR, status,
- !(status & ATMEL_HLCDC_SIP), 1, 100);
+ readl_poll_timeout_atomic(hregmap->regs + ATMEL_HLCDC_SR,
+ status, !(status & ATMEL_HLCDC_SIP),
+ 1, 100);
}
writel(val, hregmap->regs + reg);
diff --git a/drivers/mfd/axp20x-rsb.c b/drivers/mfd/axp20x-rsb.c
index a407527bcd09..a732cb50bcff 100644
--- a/drivers/mfd/axp20x-rsb.c
+++ b/drivers/mfd/axp20x-rsb.c
@@ -61,6 +61,7 @@ static int axp20x_rsb_remove(struct sunxi_rsb_device *rdev)
static const struct of_device_id axp20x_rsb_of_match[] = {
{ .compatible = "x-powers,axp223", .data = (void *)AXP223_ID },
+ { .compatible = "x-powers,axp806", .data = (void *)AXP806_ID },
{ .compatible = "x-powers,axp809", .data = (void *)AXP809_ID },
{ },
};
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index fd80b0981f0f..ba130be32e61 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -38,6 +38,7 @@ static const char * const axp20x_model_names[] = {
"AXP221",
"AXP223",
"AXP288",
+ "AXP806",
"AXP809",
};
@@ -129,6 +130,27 @@ static const struct regmap_access_table axp288_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(axp288_volatile_ranges),
};
+static const struct regmap_range axp806_writeable_ranges[] = {
+ regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_DATACACHE(3)),
+ regmap_reg_range(AXP806_PWR_OUT_CTRL1, AXP806_CLDO3_V_CTRL),
+ regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ2_EN),
+ regmap_reg_range(AXP20X_IRQ1_STATE, AXP20X_IRQ2_STATE),
+};
+
+static const struct regmap_range axp806_volatile_ranges[] = {
+ regmap_reg_range(AXP20X_IRQ1_STATE, AXP20X_IRQ2_STATE),
+};
+
+static const struct regmap_access_table axp806_writeable_table = {
+ .yes_ranges = axp806_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(axp806_writeable_ranges),
+};
+
+static const struct regmap_access_table axp806_volatile_table = {
+ .yes_ranges = axp806_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(axp806_volatile_ranges),
+};
+
static struct resource axp152_pek_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_RIS_EDGE, "PEK_DBR"),
DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
@@ -278,6 +300,15 @@ static const struct regmap_config axp288_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
+static const struct regmap_config axp806_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .wr_table = &axp806_writeable_table,
+ .volatile_table = &axp806_volatile_table,
+ .max_register = AXP806_VREF_TEMP_WARN_L,
+ .cache_type = REGCACHE_RBTREE,
+};
+
#define INIT_REGMAP_IRQ(_variant, _irq, _off, _mask) \
[_variant##_IRQ_##_irq] = { .reg_offset = (_off), .mask = BIT(_mask) }
@@ -409,6 +440,21 @@ static const struct regmap_irq axp288_regmap_irqs[] = {
INIT_REGMAP_IRQ(AXP288, BC_USB_CHNG, 5, 1),
};
+static const struct regmap_irq axp806_regmap_irqs[] = {
+ INIT_REGMAP_IRQ(AXP806, DIE_TEMP_HIGH_LV1, 0, 0),
+ INIT_REGMAP_IRQ(AXP806, DIE_TEMP_HIGH_LV2, 0, 1),
+ INIT_REGMAP_IRQ(AXP806, DCDCA_V_LOW, 0, 3),
+ INIT_REGMAP_IRQ(AXP806, DCDCB_V_LOW, 0, 4),
+ INIT_REGMAP_IRQ(AXP806, DCDCC_V_LOW, 0, 5),
+ INIT_REGMAP_IRQ(AXP806, DCDCD_V_LOW, 0, 6),
+ INIT_REGMAP_IRQ(AXP806, DCDCE_V_LOW, 0, 7),
+ INIT_REGMAP_IRQ(AXP806, PWROK_LONG, 1, 0),
+ INIT_REGMAP_IRQ(AXP806, PWROK_SHORT, 1, 1),
+ INIT_REGMAP_IRQ(AXP806, WAKEUP, 1, 4),
+ INIT_REGMAP_IRQ(AXP806, PWROK_FALL, 1, 5),
+ INIT_REGMAP_IRQ(AXP806, PWROK_RISE, 1, 6),
+};
+
static const struct regmap_irq axp809_regmap_irqs[] = {
INIT_REGMAP_IRQ(AXP809, ACIN_OVER_V, 0, 7),
INIT_REGMAP_IRQ(AXP809, ACIN_PLUGIN, 0, 6),
@@ -494,6 +540,18 @@ static const struct regmap_irq_chip axp288_regmap_irq_chip = {
};
+static const struct regmap_irq_chip axp806_regmap_irq_chip = {
+ .name = "axp806",
+ .status_base = AXP20X_IRQ1_STATE,
+ .ack_base = AXP20X_IRQ1_STATE,
+ .mask_base = AXP20X_IRQ1_EN,
+ .mask_invert = true,
+ .init_ack_masked = true,
+ .irqs = axp806_regmap_irqs,
+ .num_irqs = ARRAY_SIZE(axp806_regmap_irqs),
+ .num_regs = 2,
+};
+
static const struct regmap_irq_chip axp809_regmap_irq_chip = {
.name = "axp809",
.status_base = AXP20X_IRQ1_STATE,
@@ -508,6 +566,9 @@ static const struct regmap_irq_chip axp809_regmap_irq_chip = {
static struct mfd_cell axp20x_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp209-gpio",
+ }, {
.name = "axp20x-pek",
.num_resources = ARRAY_SIZE(axp20x_pek_resources),
.resources = axp20x_pek_resources,
@@ -660,12 +721,20 @@ static struct mfd_cell axp288_cells[] = {
},
};
+static struct mfd_cell axp806_cells[] = {
+ {
+ .id = 2,
+ .name = "axp20x-regulator",
+ },
+};
+
static struct mfd_cell axp809_cells[] = {
{
.name = "axp20x-pek",
.num_resources = ARRAY_SIZE(axp809_pek_resources),
.resources = axp809_pek_resources,
}, {
+ .id = 1,
.name = "axp20x-regulator",
},
};
@@ -732,6 +801,12 @@ int axp20x_match_device(struct axp20x_dev *axp20x)
axp20x->regmap_cfg = &axp288_regmap_config;
axp20x->regmap_irq_chip = &axp288_regmap_irq_chip;
break;
+ case AXP806_ID:
+ axp20x->nr_cells = ARRAY_SIZE(axp806_cells);
+ axp20x->cells = axp806_cells;
+ axp20x->regmap_cfg = &axp806_regmap_config;
+ axp20x->regmap_irq_chip = &axp806_regmap_irq_chip;
+ break;
case AXP809_ID:
axp20x->nr_cells = ARRAY_SIZE(axp809_cells);
axp20x->cells = axp809_cells;
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index 0eee63542038..abd83424b498 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/mfd/core.h>
#include <linux/mfd/cros_ec.h>
+#include <asm/unaligned.h>
#define CROS_EC_DEV_EC_INDEX 0
#define CROS_EC_DEV_PD_INDEX 1
@@ -49,11 +50,28 @@ static const struct mfd_cell ec_pd_cell = {
.pdata_size = sizeof(pd_p),
};
+static irqreturn_t ec_irq_thread(int irq, void *data)
+{
+ struct cros_ec_device *ec_dev = data;
+ int ret;
+
+ if (device_may_wakeup(ec_dev->dev))
+ pm_wakeup_event(ec_dev->dev, 0);
+
+ ret = cros_ec_get_next_event(ec_dev);
+ if (ret > 0)
+ blocking_notifier_call_chain(&ec_dev->event_notifier,
+ 0, ec_dev);
+ return IRQ_HANDLED;
+}
+
int cros_ec_register(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
int err = 0;
+ BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->event_notifier);
+
ec_dev->max_request = sizeof(struct ec_params_hello);
ec_dev->max_response = sizeof(struct ec_response_get_protocol_info);
ec_dev->max_passthru = 0;
@@ -70,13 +88,24 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
cros_ec_query_all(ec_dev);
+ if (ec_dev->irq) {
+ err = request_threaded_irq(ec_dev->irq, NULL, ec_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "chromeos-ec", ec_dev);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d: %d",
+ ec_dev->irq, err);
+ return err;
+ }
+ }
+
err = mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, &ec_cell, 1,
NULL, ec_dev->irq, NULL);
if (err) {
dev_err(dev,
"Failed to register Embedded Controller subdevice %d\n",
err);
- return err;
+ goto fail_mfd;
}
if (ec_dev->max_passthru) {
@@ -94,7 +123,7 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
dev_err(dev,
"Failed to register Power Delivery subdevice %d\n",
err);
- return err;
+ goto fail_mfd;
}
}
@@ -103,13 +132,18 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
if (err) {
mfd_remove_devices(dev);
dev_err(dev, "Failed to register sub-devices\n");
- return err;
+ goto fail_mfd;
}
}
dev_info(dev, "Chrome EC device registered\n");
return 0;
+
+fail_mfd:
+ if (ec_dev->irq)
+ free_irq(ec_dev->irq, ec_dev);
+ return err;
}
EXPORT_SYMBOL(cros_ec_register);
@@ -136,13 +170,31 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev)
}
EXPORT_SYMBOL(cros_ec_suspend);
+static void cros_ec_drain_events(struct cros_ec_device *ec_dev)
+{
+ while (cros_ec_get_next_event(ec_dev) > 0)
+ blocking_notifier_call_chain(&ec_dev->event_notifier,
+ 1, ec_dev);
+}
+
int cros_ec_resume(struct cros_ec_device *ec_dev)
{
enable_irq(ec_dev->irq);
+ /*
+ * In some cases, we need to distinguish between events that occur
+ * during suspend if the EC is not a wake source. For example,
+ * keypresses during suspend should be discarded if it does not wake
+ * the system.
+ *
+ * If the EC is not a wake source, drain the event queue and mark them
+ * as "queued during suspend".
+ */
if (ec_dev->wake_enabled) {
disable_irq_wake(ec_dev->irq);
ec_dev->wake_enabled = 0;
+ } else {
+ cros_ec_drain_events(ec_dev);
}
return 0;
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index ebe9b9477cb2..a518832ed5f5 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -366,7 +366,6 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
struct cros_ec_command *ec_msg)
{
- struct ec_host_request *request;
struct ec_host_response *response;
struct cros_ec_spi *ec_spi = ec_dev->priv;
struct spi_transfer trans, trans_delay;
@@ -378,7 +377,6 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
int ret = 0, final_ret;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
- request = (struct ec_host_request *)ec_dev->dout;
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index c0bf68a3e614..a88c2065d8ab 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -167,6 +167,7 @@ static bool da9052_reg_writeable(struct device *dev, unsigned int reg)
case DA9052_EVENT_B_REG:
case DA9052_EVENT_C_REG:
case DA9052_EVENT_D_REG:
+ case DA9052_FAULTLOG_REG:
case DA9052_IRQ_MASK_A_REG:
case DA9052_IRQ_MASK_B_REG:
case DA9052_IRQ_MASK_C_REG:
@@ -541,6 +542,52 @@ const struct regmap_config da9052_regmap_config = {
};
EXPORT_SYMBOL_GPL(da9052_regmap_config);
+static int da9052_clear_fault_log(struct da9052 *da9052)
+{
+ int ret = 0;
+ int fault_log = 0;
+
+ fault_log = da9052_reg_read(da9052, DA9052_FAULTLOG_REG);
+ if (fault_log < 0) {
+ dev_err(da9052->dev,
+ "Cannot read FAULT_LOG %d\n", fault_log);
+ return fault_log;
+ }
+
+ if (fault_log) {
+ if (fault_log & DA9052_FAULTLOG_TWDERROR)
+ dev_dbg(da9052->dev,
+ "Fault log entry detected: TWD_ERROR\n");
+ if (fault_log & DA9052_FAULTLOG_VDDFAULT)
+ dev_dbg(da9052->dev,
+ "Fault log entry detected: VDD_FAULT\n");
+ if (fault_log & DA9052_FAULTLOG_VDDSTART)
+ dev_dbg(da9052->dev,
+ "Fault log entry detected: VDD_START\n");
+ if (fault_log & DA9052_FAULTLOG_TEMPOVER)
+ dev_dbg(da9052->dev,
+ "Fault log entry detected: TEMP_OVER\n");
+ if (fault_log & DA9052_FAULTLOG_KEYSHUT)
+ dev_dbg(da9052->dev,
+ "Fault log entry detected: KEY_SHUT\n");
+ if (fault_log & DA9052_FAULTLOG_NSDSET)
+ dev_dbg(da9052->dev,
+ "Fault log entry detected: nSD_SHUT\n");
+ if (fault_log & DA9052_FAULTLOG_WAITSET)
+ dev_dbg(da9052->dev,
+ "Fault log entry detected: WAIT_SHUT\n");
+
+ ret = da9052_reg_write(da9052,
+ DA9052_FAULTLOG_REG,
+ 0xFF);
+ if (ret < 0)
+ dev_err(da9052->dev,
+ "Cannot reset FAULT_LOG values %d\n", ret);
+ }
+
+ return ret;
+}
+
int da9052_device_init(struct da9052 *da9052, u8 chip_id)
{
struct da9052_pdata *pdata = dev_get_platdata(da9052->dev);
@@ -549,6 +596,10 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id)
mutex_init(&da9052->auxadc_lock);
init_completion(&da9052->done);
+ ret = da9052_clear_fault_log(da9052);
+ if (ret < 0)
+ dev_warn(da9052->dev, "Cannot clear FAULT_LOG\n");
+
if (pdata && pdata->init != NULL)
pdata->init(da9052);
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index af841c165787..6c2870d4e754 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -4,8 +4,8 @@
* Copyright 2012 Dialog Semiconductors Ltd.
* Copyright 2013 Philipp Zabel, Pengutronix
*
- * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>,
- * Michal Hajduk <michal.hajduk@diasemi.com>
+ * Author: Krystian Garbaciak, Dialog Semiconductor
+ * Author: Michal Hajduk, Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -242,5 +242,6 @@ void da9063_device_exit(struct da9063 *da9063)
}
MODULE_DESCRIPTION("PMIC driver for Dialog DA9063");
-MODULE_AUTHOR("Krystian Garbaciak <krystian.garbaciak@diasemi.com>, Michal Hajduk <michal.hajduk@diasemi.com>");
+MODULE_AUTHOR("Krystian Garbaciak");
+MODULE_AUTHOR("Michal Hajduk");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 73901084945f..981805a2c521 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -3,7 +3,7 @@
* Copyright 2012 Dialog Semiconductor Ltd.
* Copyright 2013 Philipp Zabel, Pengutronix
*
- * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ * Author: Krystian Garbaciak, Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/mfd/da9063-irq.c b/drivers/mfd/da9063-irq.c
index 7e903fcb8813..207bbfe55449 100644
--- a/drivers/mfd/da9063-irq.c
+++ b/drivers/mfd/da9063-irq.c
@@ -3,7 +3,7 @@
* Copyright 2012 Dialog Semiconductor Ltd.
* Copyright 2013 Philipp Zabel, Pengutronix
*
- * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ * Author: Michal Hajduk, Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 388e268b9bcf..ca38a6a14110 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -938,25 +938,6 @@ int db8500_prcmu_get_ddr_opp(void)
return readb(PRCM_DDR_SUBSYS_APE_MINBW);
}
-/**
- * db8500_set_ddr_opp - set the appropriate DDR OPP
- * @opp: The new DDR operating point to which transition is to be made
- * Returns: 0 on success, non-zero on failure
- *
- * This function sets the operating point of the DDR.
- */
-static bool enable_set_ddr_opp;
-int db8500_prcmu_set_ddr_opp(u8 opp)
-{
- if (opp < DDR_100_OPP || opp > DDR_25_OPP)
- return -EINVAL;
- /* Changing the DDR OPP can hang the hardware pre-v21 */
- if (enable_set_ddr_opp)
- writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW);
-
- return 0;
-}
-
/* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */
static void request_even_slower_clocks(bool enable)
{
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 270e19c0bba1..86eca614507b 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -209,7 +209,7 @@ static struct device *add_child(struct i2c_client *client, const char *name,
status = platform_device_add_data(pdev, pdata, pdata_len);
if (status < 0) {
dev_dbg(&pdev->dev, "can't add platform_data\n");
- goto err;
+ goto put_device;
}
}
@@ -222,19 +222,20 @@ static struct device *add_child(struct i2c_client *client, const char *name,
status = platform_device_add_resources(pdev, &r, 1);
if (status < 0) {
dev_dbg(&pdev->dev, "can't add irq\n");
- goto err;
+ goto put_device;
}
}
status = platform_device_add(pdev);
+ if (status)
+ goto put_device;
-err:
- if (status < 0) {
- platform_device_put(pdev);
- dev_err(&client->dev, "can't add %s dev\n", name);
- return ERR_PTR(status);
- }
return &pdev->dev;
+
+put_device:
+ platform_device_put(pdev);
+ dev_err(&client->dev, "failed to add device %s\n", name);
+ return ERR_PTR(status);
}
static int add_children(struct i2c_client *client)
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
new file mode 100644
index 000000000000..2e064fb8826f
--- /dev/null
+++ b/drivers/mfd/exynos-lpass.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2015 - 2016 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Inha Song <ideal.song@samsung.com>
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Samsung Exynos SoC series Low Power Audio Subsystem driver.
+ *
+ * This module provides regmap for the Top SFR region and instantiates
+ * devices for IP blocks like DMAC, I2S, UART.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/exynos5-pmu.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+/* LPASS Top register definitions */
+#define SFR_LPASS_CORE_SW_RESET 0x08
+#define LPASS_SB_SW_RESET BIT(11)
+#define LPASS_UART_SW_RESET BIT(10)
+#define LPASS_PCM_SW_RESET BIT(9)
+#define LPASS_I2S_SW_RESET BIT(8)
+#define LPASS_WDT1_SW_RESET BIT(4)
+#define LPASS_WDT0_SW_RESET BIT(3)
+#define LPASS_TIMER_SW_RESET BIT(2)
+#define LPASS_MEM_SW_RESET BIT(1)
+#define LPASS_DMA_SW_RESET BIT(0)
+
+#define SFR_LPASS_INTR_CA5_MASK 0x48
+#define SFR_LPASS_INTR_CPU_MASK 0x58
+#define LPASS_INTR_APM BIT(9)
+#define LPASS_INTR_MIF BIT(8)
+#define LPASS_INTR_TIMER BIT(7)
+#define LPASS_INTR_DMA BIT(6)
+#define LPASS_INTR_GPIO BIT(5)
+#define LPASS_INTR_I2S BIT(4)
+#define LPASS_INTR_PCM BIT(3)
+#define LPASS_INTR_SLIMBUS BIT(2)
+#define LPASS_INTR_UART BIT(1)
+#define LPASS_INTR_SFR BIT(0)
+
+struct exynos_lpass {
+ /* pointer to the Power Management Unit regmap */
+ struct regmap *pmu;
+ /* pointer to the LPASS TOP regmap */
+ struct regmap *top;
+};
+
+static void exynos_lpass_core_sw_reset(struct exynos_lpass *lpass, int mask)
+{
+ unsigned int val = 0;
+
+ regmap_read(lpass->top, SFR_LPASS_CORE_SW_RESET, &val);
+
+ val &= ~mask;
+ regmap_write(lpass->top, SFR_LPASS_CORE_SW_RESET, val);
+
+ usleep_range(100, 150);
+
+ val |= mask;
+ regmap_write(lpass->top, SFR_LPASS_CORE_SW_RESET, val);
+}
+
+static void exynos_lpass_enable(struct exynos_lpass *lpass)
+{
+ /* Unmask SFR, DMA and I2S interrupt */
+ regmap_write(lpass->top, SFR_LPASS_INTR_CA5_MASK,
+ LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S);
+
+ regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK,
+ LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S);
+
+ /* Activate related PADs from retention state */
+ regmap_write(lpass->pmu, EXYNOS5433_PAD_RETENTION_AUD_OPTION,
+ EXYNOS5433_PAD_INITIATE_WAKEUP_FROM_LOWPWR);
+
+ exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET);
+ exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET);
+ exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET);
+}
+
+static void exynos_lpass_disable(struct exynos_lpass *lpass)
+{
+ /* Mask any unmasked IP interrupt sources */
+ regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK, 0);
+ regmap_write(lpass->top, SFR_LPASS_INTR_CA5_MASK, 0);
+
+ /* Deactivate related PADs from retention state */
+ regmap_write(lpass->pmu, EXYNOS5433_PAD_RETENTION_AUD_OPTION, 0);
+}
+
+static const struct regmap_config exynos_lpass_reg_conf = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xfc,
+ .fast_io = true,
+};
+
+static int exynos_lpass_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_lpass *lpass;
+ void __iomem *base_top;
+ struct resource *res;
+
+ lpass = devm_kzalloc(dev, sizeof(*lpass), GFP_KERNEL);
+ if (!lpass)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base_top = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_top))
+ return PTR_ERR(base_top);
+
+ lpass->top = regmap_init_mmio(dev, base_top,
+ &exynos_lpass_reg_conf);
+ if (IS_ERR(lpass->top)) {
+ dev_err(dev, "LPASS top regmap initialization failed\n");
+ return PTR_ERR(lpass->top);
+ }
+
+ lpass->pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,pmu-syscon");
+ if (IS_ERR(lpass->pmu)) {
+ dev_err(dev, "Failed to lookup PMU regmap\n");
+ return PTR_ERR(lpass->pmu);
+ }
+
+ platform_set_drvdata(pdev, lpass);
+ exynos_lpass_enable(lpass);
+
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+}
+
+static int __maybe_unused exynos_lpass_suspend(struct device *dev)
+{
+ struct exynos_lpass *lpass = dev_get_drvdata(dev);
+
+ exynos_lpass_disable(lpass);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_lpass_resume(struct device *dev)
+{
+ struct exynos_lpass *lpass = dev_get_drvdata(dev);
+
+ exynos_lpass_enable(lpass);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(lpass_pm_ops, exynos_lpass_suspend,
+ exynos_lpass_resume);
+
+static const struct of_device_id exynos_lpass_of_match[] = {
+ { .compatible = "samsung,exynos5433-lpass" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_lpass_of_match);
+
+static struct platform_driver exynos_lpass_driver = {
+ .driver = {
+ .name = "exynos-lpass",
+ .pm = &lpass_pm_ops,
+ .of_match_table = exynos_lpass_of_match,
+ },
+ .probe = exynos_lpass_probe,
+};
+module_platform_driver(exynos_lpass_driver);
+
+MODULE_DESCRIPTION("Samsung Low Power Audio Subsystem driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index 7ddc4a9563ea..6bf8d643d942 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -52,6 +52,18 @@ static const struct intel_lpss_platform_info bxt_i2c_info = {
.properties = bxt_i2c_properties,
};
+static struct property_entry apl_i2c_properties[] = {
+ PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 207),
+ PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171),
+ PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 208),
+ { },
+};
+
+static const struct intel_lpss_platform_info apl_i2c_info = {
+ .clk_rate = 133000000,
+ .properties = apl_i2c_properties,
+};
+
static const struct acpi_device_id intel_lpss_acpi_ids[] = {
/* SPT */
{ "INT3446", (kernel_ulong_t)&spt_i2c_info },
@@ -61,7 +73,7 @@ static const struct acpi_device_id intel_lpss_acpi_ids[] = {
{ "80860ABC", (kernel_ulong_t)&bxt_info },
{ "80860AC2", (kernel_ulong_t)&bxt_info },
/* APL */
- { "80865AAC", (kernel_ulong_t)&bxt_i2c_info },
+ { "80865AAC", (kernel_ulong_t)&apl_i2c_info },
{ "80865ABC", (kernel_ulong_t)&bxt_info },
{ "80865AC2", (kernel_ulong_t)&bxt_info },
{ }
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 1d79a3c9370f..3228fd182a99 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -111,6 +111,31 @@ static const struct intel_lpss_platform_info bxt_i2c_info = {
.properties = bxt_i2c_properties,
};
+static struct property_entry apl_i2c_properties[] = {
+ PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 207),
+ PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171),
+ PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 208),
+ { },
+};
+
+static const struct intel_lpss_platform_info apl_i2c_info = {
+ .clk_rate = 133000000,
+ .properties = apl_i2c_properties,
+};
+
+static const struct intel_lpss_platform_info kbl_info = {
+ .clk_rate = 120000000,
+};
+
+static const struct intel_lpss_platform_info kbl_uart_info = {
+ .clk_rate = 120000000,
+ .clk_con_id = "baudclk",
+};
+
+static const struct intel_lpss_platform_info kbl_i2c_info = {
+ .clk_rate = 133000000,
+};
+
static const struct pci_device_id intel_lpss_pci_ids[] = {
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
@@ -146,14 +171,14 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
/* APL */
- { PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x5ab0), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x5ab2), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x5ab4), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x5ab6), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x5ab8), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x5aba), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&apl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&apl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x5ab0), (kernel_ulong_t)&apl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x5ab2), (kernel_ulong_t)&apl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x5ab4), (kernel_ulong_t)&apl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x5ab6), (kernel_ulong_t)&apl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x5ab8), (kernel_ulong_t)&apl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x5aba), (kernel_ulong_t)&apl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x5abc), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x5abe), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x5ac0), (kernel_ulong_t)&bxt_uart_info },
@@ -181,6 +206,16 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info },
+ /* KBL-H */
+ { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&kbl_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&kbl_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&kbl_info },
+ { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&kbl_info },
+ { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&kbl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&kbl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&kbl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&kbl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&kbl_uart_info },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 25d486c543cb..2017446c5b4b 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -12,7 +12,7 @@
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel_msic.h>
#include <linux/platform_device.h>
@@ -449,9 +449,4 @@ static struct platform_driver intel_msic_driver = {
.name = "intel_msic",
},
};
-
-module_platform_driver(intel_msic_driver);
-
-MODULE_DESCRIPTION("Driver for Intel MSIC");
-MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(intel_msic_driver);
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index b9428767e615..43e54b7e908f 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -47,6 +47,8 @@
#define BXTWC_MIRQLVL1 0x4E0E
#define BXTWC_MPWRTNIRQ 0x4E0F
+#define BXTWC_MIRQLVL1_MCHGR BIT(5)
+
#define BXTWC_MTHRM0IRQ 0x4E12
#define BXTWC_MTHRM1IRQ 0x4E13
#define BXTWC_MTHRM2IRQ 0x4E14
@@ -109,7 +111,7 @@ static const struct regmap_irq bxtwc_regmap_irqs_level2[] = {
REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff),
REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f),
REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff),
- REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f),
+ REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x3f),
REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f),
REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff),
REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f),
@@ -143,6 +145,10 @@ static struct resource adc_resources[] = {
DEFINE_RES_IRQ_NAMED(BXTWC_ADC_IRQ, "ADC"),
};
+static struct resource usbc_resources[] = {
+ DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "USBC"),
+};
+
static struct resource charger_resources[] = {
DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "CHARGER"),
DEFINE_RES_IRQ_NAMED(BXTWC_CHGR1_IRQ, "CHARGER1"),
@@ -170,6 +176,11 @@ static struct mfd_cell bxt_wc_dev[] = {
.resources = thermal_resources,
},
{
+ .name = "bxt_wcove_usbc",
+ .num_resources = ARRAY_SIZE(usbc_resources),
+ .resources = usbc_resources,
+ },
+ {
.name = "bxt_wcove_ext_charger",
.num_resources = ARRAY_SIZE(charger_resources),
.resources = charger_resources,
@@ -403,6 +414,16 @@ static int bxtwc_probe(struct platform_device *pdev)
goto err_sysfs;
}
+ /*
+ * There is known hw bug. Upon reset BIT 5 of register
+ * BXTWC_CHGR_LVL1_IRQ is 0 which is the expected value. However,
+ * later it's set to 1(masked) automatically by hardware. So we
+ * have the software workaround here to unmaksed it in order to let
+ * charger interrutp work.
+ */
+ regmap_update_bits(pmic->regmap, BXTWC_MIRQLVL1,
+ BXTWC_MIRQLVL1_MCHGR, 0);
+
return 0;
err_sysfs:
diff --git a/drivers/mfd/lp873x.c b/drivers/mfd/lp873x.c
new file mode 100644
index 000000000000..873c608e6a5d
--- /dev/null
+++ b/drivers/mfd/lp873x.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author: Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lp873x.h>
+
+static const struct regmap_config lp873x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LP873X_REG_MAX,
+};
+
+static const struct mfd_cell lp873x_cells[] = {
+ { .name = "lp873x-regulator", },
+ { .name = "lp873x-gpio", },
+};
+
+static int lp873x_probe(struct i2c_client *client,
+ const struct i2c_device_id *ids)
+{
+ struct lp873x *lp873;
+ int ret;
+ unsigned int otpid;
+
+ lp873 = devm_kzalloc(&client->dev, sizeof(*lp873), GFP_KERNEL);
+ if (!lp873)
+ return -ENOMEM;
+
+ lp873->dev = &client->dev;
+
+ lp873->regmap = devm_regmap_init_i2c(client, &lp873x_regmap_config);
+ if (IS_ERR(lp873->regmap)) {
+ ret = PTR_ERR(lp873->regmap);
+ dev_err(lp873->dev,
+ "Failed to initialize register map: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(lp873->regmap, LP873X_REG_OTP_REV, &otpid);
+ if (ret) {
+ dev_err(lp873->dev, "Failed to read OTP ID\n");
+ return ret;
+ }
+
+ lp873->rev = otpid & LP873X_OTP_REV_OTP_ID;
+
+ i2c_set_clientdata(client, lp873);
+
+ ret = mfd_add_devices(lp873->dev, PLATFORM_DEVID_AUTO, lp873x_cells,
+ ARRAY_SIZE(lp873x_cells), NULL, 0, NULL);
+
+ return ret;
+}
+
+static const struct of_device_id of_lp873x_match_table[] = {
+ { .compatible = "ti,lp8733", },
+ { .compatible = "ti,lp8732", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_lp873x_match_table);
+
+static const struct i2c_device_id lp873x_id_table[] = {
+ { "lp873x", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, lp873x_id_table);
+
+static struct i2c_driver lp873x_driver = {
+ .driver = {
+ .name = "lp873x",
+ .of_match_table = of_lp873x_match_table,
+ },
+ .probe = lp873x_probe,
+ .id_table = lp873x_id_table,
+};
+module_i2c_driver(lp873x_driver);
+
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("LP873X chip family Multi-Function Device driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 6c245128ab2e..6cbe96b28f42 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2014 Samsung Electronics
* Chanwoo Choi <cw00.choi@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -569,6 +569,6 @@ static void __exit max14577_i2c_exit(void)
}
module_exit(max14577_i2c_exit);
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>, Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>, Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("Maxim 14577/77836 multi-function core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index b95a46d79b9d..326f17b632a7 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -139,7 +139,7 @@ static void max8997_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&max8997->irqlock);
}
-static const inline struct max8997_irq_data *
+inline static const struct max8997_irq_data *
irq_to_max8997_irq(struct max8997_dev *max8997, struct irq_data *data)
{
return &max8997_irqs[data->hwirq];
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 1d924d1533c0..7aab376ecb84 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -162,7 +162,7 @@ static const char * const port_modes[] = {
* provided port mode string as per the port_modes table.
* If no match is found it returns -ENODEV
*/
-static const int omap_usbhs_get_dt_port_mode(const char *mode)
+static int omap_usbhs_get_dt_port_mode(const char *mode)
{
int i;
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index 1b7ec0870c2a..0e3a2ea25942 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -309,6 +309,7 @@ static const struct regmap_config ssbi_regmap_config = {
};
static const struct of_device_id pm8921_id_table[] = {
+ { .compatible = "qcom,pm8018", },
{ .compatible = "qcom,pm8058", },
{ .compatible = "qcom,pm8921", },
{ }
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 2e44323455dd..52fafea06067 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -21,6 +21,7 @@
#include <linux/mfd/qcom_rpm.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/clk.h>
#include <dt-bindings/mfd/qcom-rpm.h>
@@ -48,6 +49,7 @@ struct qcom_rpm {
struct regmap *ipc_regmap;
unsigned ipc_offset;
unsigned ipc_bit;
+ struct clk *ramclk;
struct completion ack;
struct mutex lock;
@@ -388,11 +390,62 @@ static const struct qcom_rpm_data ipq806x_template = {
.ack_sel_size = 7,
};
+static const struct qcom_rpm_resource mdm9615_rpm_resource_table[] = {
+ [QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 },
+ [QCOM_RPM_SYS_FABRIC_CLK] = { 26, 10, 9, 1 },
+ [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 27, 11, 11, 1 },
+ [QCOM_RPM_SFPB_CLK] = { 28, 12, 12, 1 },
+ [QCOM_RPM_CFPB_CLK] = { 29, 13, 13, 1 },
+ [QCOM_RPM_EBI1_CLK] = { 30, 14, 16, 1 },
+ [QCOM_RPM_APPS_FABRIC_HALT] = { 31, 15, 22, 2 },
+ [QCOM_RPM_APPS_FABRIC_MODE] = { 33, 16, 23, 3 },
+ [QCOM_RPM_APPS_FABRIC_IOCTL] = { 36, 17, 24, 1 },
+ [QCOM_RPM_APPS_FABRIC_ARB] = { 37, 18, 25, 27 },
+ [QCOM_RPM_PM8018_SMPS1] = { 64, 19, 30, 2 },
+ [QCOM_RPM_PM8018_SMPS2] = { 66, 21, 31, 2 },
+ [QCOM_RPM_PM8018_SMPS3] = { 68, 23, 32, 2 },
+ [QCOM_RPM_PM8018_SMPS4] = { 70, 25, 33, 2 },
+ [QCOM_RPM_PM8018_SMPS5] = { 72, 27, 34, 2 },
+ [QCOM_RPM_PM8018_LDO1] = { 74, 29, 35, 2 },
+ [QCOM_RPM_PM8018_LDO2] = { 76, 31, 36, 2 },
+ [QCOM_RPM_PM8018_LDO3] = { 78, 33, 37, 2 },
+ [QCOM_RPM_PM8018_LDO4] = { 80, 35, 38, 2 },
+ [QCOM_RPM_PM8018_LDO5] = { 82, 37, 39, 2 },
+ [QCOM_RPM_PM8018_LDO6] = { 84, 39, 40, 2 },
+ [QCOM_RPM_PM8018_LDO7] = { 86, 41, 41, 2 },
+ [QCOM_RPM_PM8018_LDO8] = { 88, 43, 42, 2 },
+ [QCOM_RPM_PM8018_LDO9] = { 90, 45, 43, 2 },
+ [QCOM_RPM_PM8018_LDO10] = { 92, 47, 44, 2 },
+ [QCOM_RPM_PM8018_LDO11] = { 94, 49, 45, 2 },
+ [QCOM_RPM_PM8018_LDO12] = { 96, 51, 46, 2 },
+ [QCOM_RPM_PM8018_LDO13] = { 98, 53, 47, 2 },
+ [QCOM_RPM_PM8018_LDO14] = { 100, 55, 48, 2 },
+ [QCOM_RPM_PM8018_LVS1] = { 102, 57, 49, 1 },
+ [QCOM_RPM_PM8018_NCP] = { 103, 58, 80, 2 },
+ [QCOM_RPM_CXO_BUFFERS] = { 105, 60, 81, 1 },
+ [QCOM_RPM_USB_OTG_SWITCH] = { 106, 61, 82, 1 },
+ [QCOM_RPM_HDMI_SWITCH] = { 107, 62, 83, 1 },
+ [QCOM_RPM_VOLTAGE_CORNER] = { 109, 64, 87, 1 },
+};
+
+static const struct qcom_rpm_data mdm9615_template = {
+ .version = 3,
+ .resource_table = mdm9615_rpm_resource_table,
+ .n_resources = ARRAY_SIZE(mdm9615_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+ .req_sel_size = 4,
+ .ack_sel_size = 7,
+};
+
static const struct of_device_id qcom_rpm_of_match[] = {
{ .compatible = "qcom,rpm-apq8064", .data = &apq8064_template },
{ .compatible = "qcom,rpm-msm8660", .data = &msm8660_template },
{ .compatible = "qcom,rpm-msm8960", .data = &msm8960_template },
{ .compatible = "qcom,rpm-ipq8064", .data = &ipq806x_template },
+ { .compatible = "qcom,rpm-mdm9615", .data = &mdm9615_template },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_rpm_of_match);
@@ -501,6 +554,20 @@ static int qcom_rpm_probe(struct platform_device *pdev)
mutex_init(&rpm->lock);
init_completion(&rpm->ack);
+ /* Enable message RAM clock */
+ rpm->ramclk = devm_clk_get(&pdev->dev, "ram");
+ if (IS_ERR(rpm->ramclk)) {
+ ret = PTR_ERR(rpm->ramclk);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ /*
+ * Fall through in all other cases, as the clock is
+ * optional. (Does not exist on all platforms.)
+ */
+ rpm->ramclk = NULL;
+ }
+ clk_prepare_enable(rpm->ramclk); /* Accepts NULL */
+
irq_ack = platform_get_irq_byname(pdev, "ack");
if (irq_ack < 0) {
dev_err(&pdev->dev, "required ack interrupt missing\n");
@@ -538,6 +605,7 @@ static int qcom_rpm_probe(struct platform_device *pdev)
}
rpm->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
if (IS_ERR(rpm->ipc_regmap))
return PTR_ERR(rpm->ipc_regmap);
@@ -620,7 +688,11 @@ static int qcom_rpm_probe(struct platform_device *pdev)
static int qcom_rpm_remove(struct platform_device *pdev)
{
+ struct qcom_rpm *rpm = dev_get_drvdata(&pdev->dev);
+
of_platform_depopulate(&pdev->dev);
+ clk_disable_unprepare(rpm->ramclk);
+
return 0;
}
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 49d7f624fc94..0f8acc5882a4 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -1,11 +1,15 @@
/*
- * MFD core driver for Rockchip RK808
+ * MFD core driver for Rockchip RK808/RK818
*
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
*
* Author: Chris Zhong <zyw@rock-chips.com>
* Author: Zhang Qing <zhangqing@rock-chips.com>
*
+ * Copyright (C) 2016 PHYTEC Messtechnik GmbH
+ *
+ * Author: Wadim Egorov <w.egorov@phytec.de>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -21,6 +25,7 @@
#include <linux/mfd/rk808.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
struct rk808_reg_data {
@@ -57,6 +62,14 @@ static bool rk808_is_volatile_reg(struct device *dev, unsigned int reg)
return false;
}
+static const struct regmap_config rk818_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RK818_USB_CTRL_REG,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = rk808_is_volatile_reg,
+};
+
static const struct regmap_config rk808_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -79,11 +92,21 @@ static const struct mfd_cell rk808s[] = {
{
.name = "rk808-rtc",
.num_resources = ARRAY_SIZE(rtc_resources),
- .resources = &rtc_resources[0],
+ .resources = rtc_resources,
},
};
-static const struct rk808_reg_data pre_init_reg[] = {
+static const struct mfd_cell rk818s[] = {
+ { .name = "rk808-clkout", },
+ { .name = "rk808-regulator", },
+ {
+ .name = "rk808-rtc",
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resources = rtc_resources,
+ },
+};
+
+static const struct rk808_reg_data rk808_pre_init_reg[] = {
{ RK808_BUCK3_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_150MA },
{ RK808_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_200MA },
{ RK808_BOOST_CONFIG_REG, BOOST_ILMIN_MASK, BOOST_ILMIN_100MA },
@@ -94,6 +117,24 @@ static const struct rk808_reg_data pre_init_reg[] = {
VB_LO_SEL_3500MV },
};
+static const struct rk808_reg_data rk818_pre_init_reg[] = {
+ /* improve efficiency */
+ { RK818_BUCK2_CONFIG_REG, BUCK2_RATE_MASK, BUCK_ILMIN_250MA },
+ { RK818_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_250MA },
+ { RK818_BOOST_CONFIG_REG, BOOST_ILMIN_MASK, BOOST_ILMIN_100MA },
+ { RK818_USB_CTRL_REG, RK818_USB_ILIM_SEL_MASK,
+ RK818_USB_ILMIN_2000MA },
+ /* close charger when usb lower then 3.4V */
+ { RK818_USB_CTRL_REG, RK818_USB_CHG_SD_VSEL_MASK,
+ (0x7 << 4) },
+ /* no action when vref */
+ { RK818_H5V_EN_REG, BIT(1), RK818_REF_RDY_CTRL },
+ /* enable HDMI 5V */
+ { RK818_H5V_EN_REG, BIT(0), RK818_H5V_EN },
+ { RK808_VB_MON_REG, MASK_ALL, VB_LO_ACT |
+ VB_LO_SEL_3500MV },
+};
+
static const struct regmap_irq rk808_irqs[] = {
/* INT_STS */
[RK808_IRQ_VOUT_LO] = {
@@ -136,6 +177,76 @@ static const struct regmap_irq rk808_irqs[] = {
},
};
+static const struct regmap_irq rk818_irqs[] = {
+ /* INT_STS */
+ [RK818_IRQ_VOUT_LO] = {
+ .mask = RK818_IRQ_VOUT_LO_MSK,
+ .reg_offset = 0,
+ },
+ [RK818_IRQ_VB_LO] = {
+ .mask = RK818_IRQ_VB_LO_MSK,
+ .reg_offset = 0,
+ },
+ [RK818_IRQ_PWRON] = {
+ .mask = RK818_IRQ_PWRON_MSK,
+ .reg_offset = 0,
+ },
+ [RK818_IRQ_PWRON_LP] = {
+ .mask = RK818_IRQ_PWRON_LP_MSK,
+ .reg_offset = 0,
+ },
+ [RK818_IRQ_HOTDIE] = {
+ .mask = RK818_IRQ_HOTDIE_MSK,
+ .reg_offset = 0,
+ },
+ [RK818_IRQ_RTC_ALARM] = {
+ .mask = RK818_IRQ_RTC_ALARM_MSK,
+ .reg_offset = 0,
+ },
+ [RK818_IRQ_RTC_PERIOD] = {
+ .mask = RK818_IRQ_RTC_PERIOD_MSK,
+ .reg_offset = 0,
+ },
+ [RK818_IRQ_USB_OV] = {
+ .mask = RK818_IRQ_USB_OV_MSK,
+ .reg_offset = 0,
+ },
+
+ /* INT_STS2 */
+ [RK818_IRQ_PLUG_IN] = {
+ .mask = RK818_IRQ_PLUG_IN_MSK,
+ .reg_offset = 1,
+ },
+ [RK818_IRQ_PLUG_OUT] = {
+ .mask = RK818_IRQ_PLUG_OUT_MSK,
+ .reg_offset = 1,
+ },
+ [RK818_IRQ_CHG_OK] = {
+ .mask = RK818_IRQ_CHG_OK_MSK,
+ .reg_offset = 1,
+ },
+ [RK818_IRQ_CHG_TE] = {
+ .mask = RK818_IRQ_CHG_TE_MSK,
+ .reg_offset = 1,
+ },
+ [RK818_IRQ_CHG_TS1] = {
+ .mask = RK818_IRQ_CHG_TS1_MSK,
+ .reg_offset = 1,
+ },
+ [RK818_IRQ_TS2] = {
+ .mask = RK818_IRQ_TS2_MSK,
+ .reg_offset = 1,
+ },
+ [RK818_IRQ_CHG_CVTLIM] = {
+ .mask = RK818_IRQ_CHG_CVTLIM_MSK,
+ .reg_offset = 1,
+ },
+ [RK818_IRQ_DISCHG_ILIM] = {
+ .mask = RK818_IRQ_DISCHG_ILIM_MSK,
+ .reg_offset = 1,
+ },
+};
+
static struct regmap_irq_chip rk808_irq_chip = {
.name = "rk808",
.irqs = rk808_irqs,
@@ -148,6 +259,18 @@ static struct regmap_irq_chip rk808_irq_chip = {
.init_ack_masked = true,
};
+static struct regmap_irq_chip rk818_irq_chip = {
+ .name = "rk818",
+ .irqs = rk818_irqs,
+ .num_irqs = ARRAY_SIZE(rk818_irqs),
+ .num_regs = 2,
+ .irq_reg_stride = 2,
+ .status_base = RK818_INT_STS_REG1,
+ .mask_base = RK818_INT_STS_MSK_REG1,
+ .ack_base = RK818_INT_STS_REG1,
+ .init_ack_masked = true,
+};
+
static struct i2c_client *rk808_i2c_client;
static void rk808_device_shutdown(void)
{
@@ -167,55 +290,100 @@ static void rk808_device_shutdown(void)
dev_err(&rk808_i2c_client->dev, "power off error!\n");
}
+static const struct of_device_id rk808_of_match[] = {
+ { .compatible = "rockchip,rk808" },
+ { .compatible = "rockchip,rk818" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rk808_of_match);
+
static int rk808_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device_node *np = client->dev.of_node;
struct rk808 *rk808;
+ const struct rk808_reg_data *pre_init_reg;
+ const struct mfd_cell *cells;
+ int nr_pre_init_regs;
+ int nr_cells;
int pm_off = 0;
int ret;
int i;
- if (!client->irq) {
- dev_err(&client->dev, "No interrupt support, no core IRQ\n");
- return -EINVAL;
- }
-
rk808 = devm_kzalloc(&client->dev, sizeof(*rk808), GFP_KERNEL);
if (!rk808)
return -ENOMEM;
- rk808->regmap = devm_regmap_init_i2c(client, &rk808_regmap_config);
+ rk808->variant = i2c_smbus_read_word_data(client, RK808_ID_MSB);
+ if (rk808->variant < 0) {
+ dev_err(&client->dev, "Failed to read the chip id at 0x%02x\n",
+ RK808_ID_MSB);
+ return rk808->variant;
+ }
+
+ dev_dbg(&client->dev, "Chip id: 0x%x\n", (unsigned int)rk808->variant);
+
+ switch (rk808->variant) {
+ case RK808_ID:
+ rk808->regmap_cfg = &rk808_regmap_config;
+ rk808->regmap_irq_chip = &rk808_irq_chip;
+ pre_init_reg = rk808_pre_init_reg;
+ nr_pre_init_regs = ARRAY_SIZE(rk808_pre_init_reg);
+ cells = rk808s;
+ nr_cells = ARRAY_SIZE(rk808s);
+ break;
+ case RK818_ID:
+ rk808->regmap_cfg = &rk818_regmap_config;
+ rk808->regmap_irq_chip = &rk818_irq_chip;
+ pre_init_reg = rk818_pre_init_reg;
+ nr_pre_init_regs = ARRAY_SIZE(rk818_pre_init_reg);
+ cells = rk818s;
+ nr_cells = ARRAY_SIZE(rk818s);
+ break;
+ default:
+ dev_err(&client->dev, "Unsupported RK8XX ID %lu\n",
+ rk808->variant);
+ return -EINVAL;
+ }
+
+ rk808->i2c = client;
+ i2c_set_clientdata(client, rk808);
+
+ rk808->regmap = devm_regmap_init_i2c(client, rk808->regmap_cfg);
if (IS_ERR(rk808->regmap)) {
dev_err(&client->dev, "regmap initialization failed\n");
return PTR_ERR(rk808->regmap);
}
- for (i = 0; i < ARRAY_SIZE(pre_init_reg); i++) {
- ret = regmap_update_bits(rk808->regmap, pre_init_reg[i].addr,
- pre_init_reg[i].mask,
- pre_init_reg[i].value);
- if (ret) {
- dev_err(&client->dev,
- "0x%x write err\n", pre_init_reg[i].addr);
- return ret;
- }
+ if (!client->irq) {
+ dev_err(&client->dev, "No interrupt support, no core IRQ\n");
+ return -EINVAL;
}
ret = regmap_add_irq_chip(rk808->regmap, client->irq,
IRQF_ONESHOT, -1,
- &rk808_irq_chip, &rk808->irq_data);
+ rk808->regmap_irq_chip, &rk808->irq_data);
if (ret) {
dev_err(&client->dev, "Failed to add irq_chip %d\n", ret);
return ret;
}
- rk808->i2c = client;
- i2c_set_clientdata(client, rk808);
+ for (i = 0; i < nr_pre_init_regs; i++) {
+ ret = regmap_update_bits(rk808->regmap,
+ pre_init_reg[i].addr,
+ pre_init_reg[i].mask,
+ pre_init_reg[i].value);
+ if (ret) {
+ dev_err(&client->dev,
+ "0x%x write err\n",
+ pre_init_reg[i].addr);
+ return ret;
+ }
+ }
- ret = devm_mfd_add_devices(&client->dev, -1,
- rk808s, ARRAY_SIZE(rk808s), NULL, 0,
- regmap_irq_get_domain(rk808->irq_data));
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
+ cells, nr_cells, NULL, 0,
+ regmap_irq_get_domain(rk808->irq_data));
if (ret) {
dev_err(&client->dev, "failed to add MFD devices %d\n", ret);
goto err_irq;
@@ -245,14 +413,9 @@ static int rk808_remove(struct i2c_client *client)
return 0;
}
-static const struct of_device_id rk808_of_match[] = {
- { .compatible = "rockchip,rk808" },
- { },
-};
-MODULE_DEVICE_TABLE(of, rk808_of_match);
-
static const struct i2c_device_id rk808_ids[] = {
{ "rk808" },
+ { "rk818" },
{ },
};
MODULE_DEVICE_TABLE(i2c, rk808_ids);
@@ -272,4 +435,5 @@ module_i2c_driver(rk808_i2c_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
MODULE_AUTHOR("Zhang Qing <zhangqing@rock-chips.com>");
-MODULE_DESCRIPTION("RK808 PMIC driver");
+MODULE_AUTHOR("Wadim Egorov <w.egorov@phytec.de>");
+MODULE_DESCRIPTION("RK808/RK818 PMIC driver");
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index dbd907d7170e..691dab791f7a 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -46,9 +46,6 @@ static void rtsx_usb_sg_timed_out(unsigned long data)
dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
usb_sg_cancel(&ucr->current_sg);
-
- /* we know the cancellation is caused by time-out */
- ucr->current_sg.status = -ETIMEDOUT;
}
static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr,
@@ -67,12 +64,15 @@ static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr,
ucr->sg_timer.expires = jiffies + msecs_to_jiffies(timeout);
add_timer(&ucr->sg_timer);
usb_sg_wait(&ucr->current_sg);
- del_timer_sync(&ucr->sg_timer);
+ if (!del_timer_sync(&ucr->sg_timer))
+ ret = -ETIMEDOUT;
+ else
+ ret = ucr->current_sg.status;
if (act_len)
*act_len = ucr->current_sg.bytes;
- return ucr->current_sg.status;
+ return ret;
}
int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe,
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 65cd0d2a822a..40534352e574 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1001,7 +1001,7 @@ static int sm501_gpio_output(struct gpio_chip *chip,
return 0;
}
-static struct gpio_chip gpio_chip_template = {
+static const struct gpio_chip gpio_chip_template = {
.ngpio = 32,
.direction_input = sm501_gpio_input,
.direction_output = sm501_gpio_output,
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index cd18c09827ef..1f40baf1234e 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -11,8 +11,7 @@
*
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
+#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
@@ -81,7 +80,6 @@ static const struct i2c_device_id smsc_i2c_id[] = {
{ "smscece1099", 0},
{},
};
-MODULE_DEVICE_TABLE(i2c, smsc_i2c_id);
static struct i2c_driver smsc_i2c_driver = {
.driver = {
@@ -90,9 +88,4 @@ static struct i2c_driver smsc_i2c_driver = {
.probe = smsc_i2c_probe,
.id_table = smsc_i2c_id,
};
-
-module_i2c_driver(smsc_i2c_driver);
-
-MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
-MODULE_DESCRIPTION("SMSC chip multi-function driver");
-MODULE_LICENSE("GPL v2");
+builtin_i2c_driver(smsc_i2c_driver);
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index c3f4aab53b07..863c39a3353c 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -57,6 +57,7 @@ static const struct of_device_id stmpe_of_match[] = {
{ .compatible = "st,stmpe610", .data = (void *)STMPE610, },
{ .compatible = "st,stmpe801", .data = (void *)STMPE801, },
{ .compatible = "st,stmpe811", .data = (void *)STMPE811, },
+ { .compatible = "st,stmpe1600", .data = (void *)STMPE1600, },
{ .compatible = "st,stmpe1601", .data = (void *)STMPE1601, },
{ .compatible = "st,stmpe1801", .data = (void *)STMPE1801, },
{ .compatible = "st,stmpe2401", .data = (void *)STMPE2401, },
@@ -101,6 +102,7 @@ static const struct i2c_device_id stmpe_i2c_id[] = {
{ "stmpe610", STMPE610 },
{ "stmpe801", STMPE801 },
{ "stmpe811", STMPE811 },
+ { "stmpe1600", STMPE1600 },
{ "stmpe1601", STMPE1601 },
{ "stmpe1801", STMPE1801 },
{ "stmpe2401", STMPE2401 },
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 94c7cc02fdab..cfdae8a3d779 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -469,6 +469,8 @@ static const struct mfd_cell stmpe_ts_cell = {
static const u8 stmpe811_regs[] = {
[STMPE_IDX_CHIP_ID] = STMPE811_REG_CHIP_ID,
+ [STMPE_IDX_SYS_CTRL] = STMPE811_REG_SYS_CTRL,
+ [STMPE_IDX_SYS_CTRL2] = STMPE811_REG_SYS_CTRL2,
[STMPE_IDX_ICR_LSB] = STMPE811_REG_INT_CTRL,
[STMPE_IDX_IER_LSB] = STMPE811_REG_INT_EN,
[STMPE_IDX_ISR_MSB] = STMPE811_REG_INT_STA,
@@ -481,7 +483,7 @@ static const u8 stmpe811_regs[] = {
[STMPE_IDX_GPAFR_U_MSB] = STMPE811_REG_GPIO_AF,
[STMPE_IDX_IEGPIOR_LSB] = STMPE811_REG_GPIO_INT_EN,
[STMPE_IDX_ISGPIOR_MSB] = STMPE811_REG_GPIO_INT_STA,
- [STMPE_IDX_GPEDR_MSB] = STMPE811_REG_GPIO_ED,
+ [STMPE_IDX_GPEDR_LSB] = STMPE811_REG_GPIO_ED,
};
static struct stmpe_variant_block stmpe811_blocks[] = {
@@ -511,7 +513,7 @@ static int stmpe811_enable(struct stmpe *stmpe, unsigned int blocks,
if (blocks & STMPE_BLOCK_TOUCHSCREEN)
mask |= STMPE811_SYS_CTRL2_TSC_OFF;
- return __stmpe_set_bits(stmpe, STMPE811_REG_SYS_CTRL2, mask,
+ return __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL2], mask,
enable ? 0 : mask);
}
@@ -551,25 +553,89 @@ static struct stmpe_variant_info stmpe610 = {
};
/*
+ * STMPE1600
+ * Compared to all others STMPE variant, LSB and MSB regs are located in this
+ * order : LSB addr
+ * MSB addr + 1
+ * As there is only 2 * 8bits registers for GPMR/GPSR/IEGPIOPR, CSB index is MSB registers
+ */
+
+static const u8 stmpe1600_regs[] = {
+ [STMPE_IDX_CHIP_ID] = STMPE1600_REG_CHIP_ID,
+ [STMPE_IDX_SYS_CTRL] = STMPE1600_REG_SYS_CTRL,
+ [STMPE_IDX_ICR_LSB] = STMPE1600_REG_SYS_CTRL,
+ [STMPE_IDX_GPMR_LSB] = STMPE1600_REG_GPMR_LSB,
+ [STMPE_IDX_GPMR_CSB] = STMPE1600_REG_GPMR_MSB,
+ [STMPE_IDX_GPSR_LSB] = STMPE1600_REG_GPSR_LSB,
+ [STMPE_IDX_GPSR_CSB] = STMPE1600_REG_GPSR_MSB,
+ [STMPE_IDX_GPDR_LSB] = STMPE1600_REG_GPDR_LSB,
+ [STMPE_IDX_GPDR_CSB] = STMPE1600_REG_GPDR_MSB,
+ [STMPE_IDX_IEGPIOR_LSB] = STMPE1600_REG_IEGPIOR_LSB,
+ [STMPE_IDX_IEGPIOR_CSB] = STMPE1600_REG_IEGPIOR_MSB,
+ [STMPE_IDX_ISGPIOR_LSB] = STMPE1600_REG_ISGPIOR_LSB,
+};
+
+static struct stmpe_variant_block stmpe1600_blocks[] = {
+ {
+ .cell = &stmpe_gpio_cell,
+ .irq = 0,
+ .block = STMPE_BLOCK_GPIO,
+ },
+};
+
+static int stmpe1600_enable(struct stmpe *stmpe, unsigned int blocks,
+ bool enable)
+{
+ if (blocks & STMPE_BLOCK_GPIO)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static struct stmpe_variant_info stmpe1600 = {
+ .name = "stmpe1600",
+ .id_val = STMPE1600_ID,
+ .id_mask = 0xffff,
+ .num_gpios = 16,
+ .af_bits = 0,
+ .regs = stmpe1600_regs,
+ .blocks = stmpe1600_blocks,
+ .num_blocks = ARRAY_SIZE(stmpe1600_blocks),
+ .num_irqs = STMPE1600_NR_INTERNAL_IRQS,
+ .enable = stmpe1600_enable,
+};
+
+/*
* STMPE1601
*/
static const u8 stmpe1601_regs[] = {
[STMPE_IDX_CHIP_ID] = STMPE1601_REG_CHIP_ID,
+ [STMPE_IDX_SYS_CTRL] = STMPE1601_REG_SYS_CTRL,
+ [STMPE_IDX_SYS_CTRL2] = STMPE1601_REG_SYS_CTRL2,
[STMPE_IDX_ICR_LSB] = STMPE1601_REG_ICR_LSB,
+ [STMPE_IDX_IER_MSB] = STMPE1601_REG_IER_MSB,
[STMPE_IDX_IER_LSB] = STMPE1601_REG_IER_LSB,
[STMPE_IDX_ISR_MSB] = STMPE1601_REG_ISR_MSB,
[STMPE_IDX_GPMR_LSB] = STMPE1601_REG_GPIO_MP_LSB,
+ [STMPE_IDX_GPMR_CSB] = STMPE1601_REG_GPIO_MP_MSB,
[STMPE_IDX_GPSR_LSB] = STMPE1601_REG_GPIO_SET_LSB,
+ [STMPE_IDX_GPSR_CSB] = STMPE1601_REG_GPIO_SET_MSB,
[STMPE_IDX_GPCR_LSB] = STMPE1601_REG_GPIO_CLR_LSB,
+ [STMPE_IDX_GPCR_CSB] = STMPE1601_REG_GPIO_CLR_MSB,
[STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB,
+ [STMPE_IDX_GPDR_CSB] = STMPE1601_REG_GPIO_SET_DIR_MSB,
+ [STMPE_IDX_GPEDR_LSB] = STMPE1601_REG_GPIO_ED_LSB,
+ [STMPE_IDX_GPEDR_CSB] = STMPE1601_REG_GPIO_ED_MSB,
[STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB,
+ [STMPE_IDX_GPRER_CSB] = STMPE1601_REG_GPIO_RE_MSB,
[STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB,
+ [STMPE_IDX_GPFER_CSB] = STMPE1601_REG_GPIO_FE_MSB,
[STMPE_IDX_GPPUR_LSB] = STMPE1601_REG_GPIO_PU_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
+ [STMPE_IDX_IEGPIOR_CSB] = STMPE1601_REG_INT_EN_GPIO_MASK_MSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
- [STMPE_IDX_GPEDR_MSB] = STMPE1601_REG_GPIO_ED_MSB,
};
static struct stmpe_variant_block stmpe1601_blocks[] = {
@@ -640,13 +706,13 @@ static int stmpe1601_autosleep(struct stmpe *stmpe,
return timeout;
}
- ret = __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL2,
+ ret = __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL2],
STMPE1601_AUTOSLEEP_TIMEOUT_MASK,
timeout);
if (ret < 0)
return ret;
- return __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL2,
+ return __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL2],
STPME1601_AUTOSLEEP_ENABLE,
STPME1601_AUTOSLEEP_ENABLE);
}
@@ -671,7 +737,7 @@ static int stmpe1601_enable(struct stmpe *stmpe, unsigned int blocks,
else
mask &= ~STMPE1601_SYS_CTRL_ENABLE_SPWM;
- return __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL, mask,
+ return __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL], mask,
enable ? mask : 0);
}
@@ -710,18 +776,33 @@ static struct stmpe_variant_info stmpe1601 = {
*/
static const u8 stmpe1801_regs[] = {
[STMPE_IDX_CHIP_ID] = STMPE1801_REG_CHIP_ID,
+ [STMPE_IDX_SYS_CTRL] = STMPE1801_REG_SYS_CTRL,
[STMPE_IDX_ICR_LSB] = STMPE1801_REG_INT_CTRL_LOW,
[STMPE_IDX_IER_LSB] = STMPE1801_REG_INT_EN_MASK_LOW,
[STMPE_IDX_ISR_LSB] = STMPE1801_REG_INT_STA_LOW,
[STMPE_IDX_GPMR_LSB] = STMPE1801_REG_GPIO_MP_LOW,
+ [STMPE_IDX_GPMR_CSB] = STMPE1801_REG_GPIO_MP_MID,
+ [STMPE_IDX_GPMR_MSB] = STMPE1801_REG_GPIO_MP_HIGH,
[STMPE_IDX_GPSR_LSB] = STMPE1801_REG_GPIO_SET_LOW,
+ [STMPE_IDX_GPSR_CSB] = STMPE1801_REG_GPIO_SET_MID,
+ [STMPE_IDX_GPSR_MSB] = STMPE1801_REG_GPIO_SET_HIGH,
[STMPE_IDX_GPCR_LSB] = STMPE1801_REG_GPIO_CLR_LOW,
+ [STMPE_IDX_GPCR_CSB] = STMPE1801_REG_GPIO_CLR_MID,
+ [STMPE_IDX_GPCR_MSB] = STMPE1801_REG_GPIO_CLR_HIGH,
[STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW,
+ [STMPE_IDX_GPDR_CSB] = STMPE1801_REG_GPIO_SET_DIR_MID,
+ [STMPE_IDX_GPDR_MSB] = STMPE1801_REG_GPIO_SET_DIR_HIGH,
[STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW,
+ [STMPE_IDX_GPRER_CSB] = STMPE1801_REG_GPIO_RE_MID,
+ [STMPE_IDX_GPRER_MSB] = STMPE1801_REG_GPIO_RE_HIGH,
[STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW,
+ [STMPE_IDX_GPFER_CSB] = STMPE1801_REG_GPIO_FE_MID,
+ [STMPE_IDX_GPFER_MSB] = STMPE1801_REG_GPIO_FE_HIGH,
[STMPE_IDX_GPPUR_LSB] = STMPE1801_REG_GPIO_PULL_UP_LOW,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
- [STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
+ [STMPE_IDX_IEGPIOR_CSB] = STMPE1801_REG_INT_EN_GPIO_MASK_MID,
+ [STMPE_IDX_IEGPIOR_MSB] = STMPE1801_REG_INT_EN_GPIO_MASK_HIGH,
+ [STMPE_IDX_ISGPIOR_MSB] = STMPE1801_REG_INT_STA_GPIO_HIGH,
};
static struct stmpe_variant_block stmpe1801_blocks[] = {
@@ -751,22 +832,31 @@ static int stmpe1801_enable(struct stmpe *stmpe, unsigned int blocks,
enable ? mask : 0);
}
-static int stmpe1801_reset(struct stmpe *stmpe)
+static int stmpe_reset(struct stmpe *stmpe)
{
+ u16 id_val = stmpe->variant->id_val;
unsigned long timeout;
int ret = 0;
+ u8 reset_bit;
+
+ if (id_val == STMPE811_ID)
+ /* STMPE801 and STMPE610 use bit 1 of SYS_CTRL register */
+ reset_bit = STMPE811_SYS_CTRL_RESET;
+ else
+ /* all other STMPE variant use bit 7 of SYS_CTRL register */
+ reset_bit = STMPE_SYS_CTRL_RESET;
- ret = __stmpe_set_bits(stmpe, STMPE1801_REG_SYS_CTRL,
- STMPE1801_MSK_SYS_CTRL_RESET, STMPE1801_MSK_SYS_CTRL_RESET);
+ ret = __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL],
+ reset_bit, reset_bit);
if (ret < 0)
return ret;
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
- ret = __stmpe_reg_read(stmpe, STMPE1801_REG_SYS_CTRL);
+ ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]);
if (ret < 0)
return ret;
- if (!(ret & STMPE1801_MSK_SYS_CTRL_RESET))
+ if (!(ret & reset_bit))
return 0;
usleep_range(100, 200);
}
@@ -794,20 +884,39 @@ static struct stmpe_variant_info stmpe1801 = {
static const u8 stmpe24xx_regs[] = {
[STMPE_IDX_CHIP_ID] = STMPE24XX_REG_CHIP_ID,
+ [STMPE_IDX_SYS_CTRL] = STMPE24XX_REG_SYS_CTRL,
+ [STMPE_IDX_SYS_CTRL2] = STMPE24XX_REG_SYS_CTRL2,
[STMPE_IDX_ICR_LSB] = STMPE24XX_REG_ICR_LSB,
+ [STMPE_IDX_IER_MSB] = STMPE24XX_REG_IER_MSB,
[STMPE_IDX_IER_LSB] = STMPE24XX_REG_IER_LSB,
[STMPE_IDX_ISR_MSB] = STMPE24XX_REG_ISR_MSB,
[STMPE_IDX_GPMR_LSB] = STMPE24XX_REG_GPMR_LSB,
+ [STMPE_IDX_GPMR_CSB] = STMPE24XX_REG_GPMR_CSB,
+ [STMPE_IDX_GPMR_MSB] = STMPE24XX_REG_GPMR_MSB,
[STMPE_IDX_GPSR_LSB] = STMPE24XX_REG_GPSR_LSB,
+ [STMPE_IDX_GPSR_CSB] = STMPE24XX_REG_GPSR_CSB,
+ [STMPE_IDX_GPSR_MSB] = STMPE24XX_REG_GPSR_MSB,
[STMPE_IDX_GPCR_LSB] = STMPE24XX_REG_GPCR_LSB,
+ [STMPE_IDX_GPCR_CSB] = STMPE24XX_REG_GPCR_CSB,
+ [STMPE_IDX_GPCR_MSB] = STMPE24XX_REG_GPCR_MSB,
[STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB,
+ [STMPE_IDX_GPDR_CSB] = STMPE24XX_REG_GPDR_CSB,
+ [STMPE_IDX_GPDR_MSB] = STMPE24XX_REG_GPDR_MSB,
[STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB,
+ [STMPE_IDX_GPRER_CSB] = STMPE24XX_REG_GPRER_CSB,
+ [STMPE_IDX_GPRER_MSB] = STMPE24XX_REG_GPRER_MSB,
[STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB,
+ [STMPE_IDX_GPFER_CSB] = STMPE24XX_REG_GPFER_CSB,
+ [STMPE_IDX_GPFER_MSB] = STMPE24XX_REG_GPFER_MSB,
[STMPE_IDX_GPPUR_LSB] = STMPE24XX_REG_GPPUR_LSB,
[STMPE_IDX_GPPDR_LSB] = STMPE24XX_REG_GPPDR_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
+ [STMPE_IDX_IEGPIOR_CSB] = STMPE24XX_REG_IEGPIOR_CSB,
+ [STMPE_IDX_IEGPIOR_MSB] = STMPE24XX_REG_IEGPIOR_MSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
+ [STMPE_IDX_GPEDR_LSB] = STMPE24XX_REG_GPEDR_LSB,
+ [STMPE_IDX_GPEDR_CSB] = STMPE24XX_REG_GPEDR_CSB,
[STMPE_IDX_GPEDR_MSB] = STMPE24XX_REG_GPEDR_MSB,
};
@@ -840,7 +949,7 @@ static int stmpe24xx_enable(struct stmpe *stmpe, unsigned int blocks,
if (blocks & STMPE_BLOCK_KEYPAD)
mask |= STMPE24XX_SYS_CTRL_ENABLE_KPC;
- return __stmpe_set_bits(stmpe, STMPE24XX_REG_SYS_CTRL, mask,
+ return __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL], mask,
enable ? mask : 0);
}
@@ -893,6 +1002,7 @@ static struct stmpe_variant_info *stmpe_variant_info[STMPE_NBR_PARTS] = {
[STMPE610] = &stmpe610,
[STMPE801] = &stmpe801,
[STMPE811] = &stmpe811,
+ [STMPE1600] = &stmpe1600,
[STMPE1601] = &stmpe1601,
[STMPE1801] = &stmpe1801,
[STMPE2401] = &stmpe2401,
@@ -919,7 +1029,8 @@ static irqreturn_t stmpe_irq(int irq, void *data)
int ret;
int i;
- if (variant->id_val == STMPE801_ID) {
+ if (variant->id_val == STMPE801_ID ||
+ variant->id_val == STMPE1600_ID) {
int base = irq_create_mapping(stmpe->domain, 0);
handle_nested_irq(base);
@@ -982,7 +1093,7 @@ static void stmpe_irq_sync_unlock(struct irq_data *data)
continue;
stmpe->oldier[i] = new;
- stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_IER_LSB] - i, new);
+ stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_IER_LSB + i], new);
}
mutex_unlock(&stmpe->irq_lock);
@@ -1088,20 +1199,18 @@ static int stmpe_chip_init(struct stmpe *stmpe)
if (ret)
return ret;
- if (id == STMPE1801_ID) {
- ret = stmpe1801_reset(stmpe);
- if (ret < 0)
- return ret;
- }
+ ret = stmpe_reset(stmpe);
+ if (ret < 0)
+ return ret;
if (stmpe->irq >= 0) {
- if (id == STMPE801_ID)
- icr = STMPE801_REG_SYS_CTRL_INT_EN;
+ if (id == STMPE801_ID || id == STMPE1600_ID)
+ icr = STMPE_SYS_CTRL_INT_EN;
else
icr = STMPE_ICR_LSB_GIM;
- /* STMPE801 doesn't support Edge interrupts */
- if (id != STMPE801_ID) {
+ /* STMPE801 and STMPE1600 don't support Edge interrupts */
+ if (id != STMPE801_ID && id != STMPE1600_ID) {
if (irq_trigger == IRQF_TRIGGER_FALLING ||
irq_trigger == IRQF_TRIGGER_RISING)
icr |= STMPE_ICR_LSB_EDGE;
@@ -1109,8 +1218,8 @@ static int stmpe_chip_init(struct stmpe *stmpe)
if (irq_trigger == IRQF_TRIGGER_RISING ||
irq_trigger == IRQF_TRIGGER_HIGH) {
- if (id == STMPE801_ID)
- icr |= STMPE801_REG_SYS_CTRL_INT_HI;
+ if (id == STMPE801_ID || id == STMPE1600_ID)
+ icr |= STMPE_SYS_CTRL_INT_HI;
else
icr |= STMPE_ICR_LSB_HIGH;
}
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index 84adb46b3e2f..f7efdd8a5fc6 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -104,6 +104,10 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE_ICR_LSB_EDGE (1 << 1)
#define STMPE_ICR_LSB_GIM (1 << 0)
+#define STMPE_SYS_CTRL_RESET (1 << 7)
+#define STMPE_SYS_CTRL_INT_EN (1 << 2)
+#define STMPE_SYS_CTRL_INT_HI (1 << 0)
+
/*
* STMPE801
*/
@@ -119,13 +123,10 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE801_REG_GPIO_SET_PIN 0x11
#define STMPE801_REG_GPIO_DIR 0x12
-#define STMPE801_REG_SYS_CTRL_RESET (1 << 7)
-#define STMPE801_REG_SYS_CTRL_INT_EN (1 << 2)
-#define STMPE801_REG_SYS_CTRL_INT_HI (1 << 0)
-
/*
* STMPE811
*/
+#define STMPE811_ID 0x0811
#define STMPE811_IRQ_TOUCH_DET 0
#define STMPE811_IRQ_FIFO_TH 1
@@ -138,6 +139,7 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE811_NR_INTERNAL_IRQS 8
#define STMPE811_REG_CHIP_ID 0x00
+#define STMPE811_REG_SYS_CTRL 0x03
#define STMPE811_REG_SYS_CTRL2 0x04
#define STMPE811_REG_SPI_CFG 0x08
#define STMPE811_REG_INT_CTRL 0x09
@@ -154,12 +156,35 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE811_REG_GPIO_FE 0x16
#define STMPE811_REG_GPIO_AF 0x17
+#define STMPE811_SYS_CTRL_RESET (1 << 1)
+
#define STMPE811_SYS_CTRL2_ADC_OFF (1 << 0)
#define STMPE811_SYS_CTRL2_TSC_OFF (1 << 1)
#define STMPE811_SYS_CTRL2_GPIO_OFF (1 << 2)
#define STMPE811_SYS_CTRL2_TS_OFF (1 << 3)
/*
+ * STMPE1600
+ */
+#define STMPE1600_ID 0x0016
+#define STMPE1600_NR_INTERNAL_IRQS 16
+
+#define STMPE1600_REG_CHIP_ID 0x00
+#define STMPE1600_REG_SYS_CTRL 0x03
+#define STMPE1600_REG_IEGPIOR_LSB 0x08
+#define STMPE1600_REG_IEGPIOR_MSB 0x09
+#define STMPE1600_REG_ISGPIOR_LSB 0x0A
+#define STMPE1600_REG_ISGPIOR_MSB 0x0B
+#define STMPE1600_REG_GPMR_LSB 0x10
+#define STMPE1600_REG_GPMR_MSB 0x11
+#define STMPE1600_REG_GPSR_LSB 0x12
+#define STMPE1600_REG_GPSR_MSB 0x13
+#define STMPE1600_REG_GPDR_LSB 0x14
+#define STMPE1600_REG_GPDR_MSB 0x15
+#define STMPE1600_REG_GPPIR_LSB 0x16
+#define STMPE1600_REG_GPPIR_MSB 0x17
+
+/*
* STMPE1601
*/
@@ -175,19 +200,32 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE1601_REG_SYS_CTRL 0x02
#define STMPE1601_REG_SYS_CTRL2 0x03
+#define STMPE1601_REG_ICR_MSB 0x10
#define STMPE1601_REG_ICR_LSB 0x11
+#define STMPE1601_REG_IER_MSB 0x12
#define STMPE1601_REG_IER_LSB 0x13
#define STMPE1601_REG_ISR_MSB 0x14
-#define STMPE1601_REG_CHIP_ID 0x80
+#define STMPE1601_REG_ISR_LSB 0x15
+#define STMPE1601_REG_INT_EN_GPIO_MASK_MSB 0x16
#define STMPE1601_REG_INT_EN_GPIO_MASK_LSB 0x17
#define STMPE1601_REG_INT_STA_GPIO_MSB 0x18
-#define STMPE1601_REG_GPIO_MP_LSB 0x87
+#define STMPE1601_REG_INT_STA_GPIO_LSB 0x19
+#define STMPE1601_REG_CHIP_ID 0x80
+#define STMPE1601_REG_GPIO_SET_MSB 0x82
#define STMPE1601_REG_GPIO_SET_LSB 0x83
+#define STMPE1601_REG_GPIO_CLR_MSB 0x84
#define STMPE1601_REG_GPIO_CLR_LSB 0x85
+#define STMPE1601_REG_GPIO_MP_MSB 0x86
+#define STMPE1601_REG_GPIO_MP_LSB 0x87
+#define STMPE1601_REG_GPIO_SET_DIR_MSB 0x88
#define STMPE1601_REG_GPIO_SET_DIR_LSB 0x89
#define STMPE1601_REG_GPIO_ED_MSB 0x8A
+#define STMPE1601_REG_GPIO_ED_LSB 0x8B
+#define STMPE1601_REG_GPIO_RE_MSB 0x8C
#define STMPE1601_REG_GPIO_RE_LSB 0x8D
+#define STMPE1601_REG_GPIO_FE_MSB 0x8E
#define STMPE1601_REG_GPIO_FE_LSB 0x8F
+#define STMPE1601_REG_GPIO_PU_MSB 0x90
#define STMPE1601_REG_GPIO_PU_LSB 0x91
#define STMPE1601_REG_GPIO_AF_U_MSB 0x92
@@ -243,8 +281,6 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE1801_REG_GPIO_PULL_UP_MID 0x23
#define STMPE1801_REG_GPIO_PULL_UP_HIGH 0x24
-#define STMPE1801_MSK_SYS_CTRL_RESET (1 << 7)
-
#define STMPE1801_MSK_INT_EN_KPC (1 << 1)
#define STMPE1801_MSK_INT_EN_GPIO (1 << 3)
@@ -264,23 +300,48 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE24XX_NR_INTERNAL_IRQS 9
#define STMPE24XX_REG_SYS_CTRL 0x02
+#define STMPE24XX_REG_SYS_CTRL2 0x03
+#define STMPE24XX_REG_ICR_MSB 0x10
#define STMPE24XX_REG_ICR_LSB 0x11
+#define STMPE24XX_REG_IER_MSB 0x12
#define STMPE24XX_REG_IER_LSB 0x13
#define STMPE24XX_REG_ISR_MSB 0x14
-#define STMPE24XX_REG_CHIP_ID 0x80
+#define STMPE24XX_REG_ISR_LSB 0x15
+#define STMPE24XX_REG_IEGPIOR_MSB 0x16
+#define STMPE24XX_REG_IEGPIOR_CSB 0x17
#define STMPE24XX_REG_IEGPIOR_LSB 0x18
#define STMPE24XX_REG_ISGPIOR_MSB 0x19
-#define STMPE24XX_REG_GPMR_LSB 0xA4
+#define STMPE24XX_REG_ISGPIOR_CSB 0x1A
+#define STMPE24XX_REG_ISGPIOR_LSB 0x1B
+#define STMPE24XX_REG_CHIP_ID 0x80
+#define STMPE24XX_REG_GPSR_MSB 0x83
+#define STMPE24XX_REG_GPSR_CSB 0x84
#define STMPE24XX_REG_GPSR_LSB 0x85
+#define STMPE24XX_REG_GPCR_MSB 0x86
+#define STMPE24XX_REG_GPCR_CSB 0x87
#define STMPE24XX_REG_GPCR_LSB 0x88
+#define STMPE24XX_REG_GPDR_MSB 0x89
+#define STMPE24XX_REG_GPDR_CSB 0x8A
#define STMPE24XX_REG_GPDR_LSB 0x8B
#define STMPE24XX_REG_GPEDR_MSB 0x8C
+#define STMPE24XX_REG_GPEDR_CSB 0x8D
+#define STMPE24XX_REG_GPEDR_LSB 0x8E
+#define STMPE24XX_REG_GPRER_MSB 0x8F
+#define STMPE24XX_REG_GPRER_CSB 0x90
#define STMPE24XX_REG_GPRER_LSB 0x91
+#define STMPE24XX_REG_GPFER_MSB 0x92
+#define STMPE24XX_REG_GPFER_CSB 0x93
#define STMPE24XX_REG_GPFER_LSB 0x94
+#define STMPE24XX_REG_GPPUR_MSB 0x95
+#define STMPE24XX_REG_GPPUR_CSB 0x96
#define STMPE24XX_REG_GPPUR_LSB 0x97
-#define STMPE24XX_REG_GPPDR_LSB 0x9a
+#define STMPE24XX_REG_GPPDR_MSB 0x98
+#define STMPE24XX_REG_GPPDR_CSB 0x99
+#define STMPE24XX_REG_GPPDR_LSB 0x9A
#define STMPE24XX_REG_GPAFR_U_MSB 0x9B
-
+#define STMPE24XX_REG_GPMR_MSB 0xA2
+#define STMPE24XX_REG_GPMR_CSB 0xA3
+#define STMPE24XX_REG_GPMR_LSB 0xA4
#define STMPE24XX_SYS_CTRL_ENABLE_GPIO (1 << 3)
#define STMPE24XX_SYSCON_ENABLE_PWM (1 << 2)
#define STMPE24XX_SYS_CTRL_ENABLE_KPC (1 << 1)
diff --git a/drivers/mfd/sun6i-prcm.c b/drivers/mfd/sun6i-prcm.c
index 191173166d65..011fcc555945 100644
--- a/drivers/mfd/sun6i-prcm.c
+++ b/drivers/mfd/sun6i-prcm.c
@@ -9,7 +9,7 @@
*/
#include <linux/mfd/core.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
struct prcm_data {
@@ -170,8 +170,4 @@ static struct platform_driver sun6i_prcm_driver = {
},
.probe = sun6i_prcm_probe,
};
-module_platform_driver(sun6i_prcm_driver);
-
-MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
-MODULE_DESCRIPTION("Allwinner sun6i PRCM driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(sun6i_prcm_driver);
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 049a6fcac651..9a4d8684dd32 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -15,22 +15,103 @@
* GNU General Public License for more details.
*/
-#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/err.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/regmap.h>
-#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps65217.h>
-static const struct mfd_cell tps65217s[] = {
+static struct resource charger_resources[] = {
+ DEFINE_RES_IRQ_NAMED(TPS65217_IRQ_AC, "AC"),
+ DEFINE_RES_IRQ_NAMED(TPS65217_IRQ_USB, "USB"),
+};
+
+static struct resource pb_resources[] = {
+ DEFINE_RES_IRQ_NAMED(TPS65217_IRQ_PB, "PB"),
+};
+
+struct tps65217_irq {
+ int mask;
+ int interrupt;
+};
+
+static const struct tps65217_irq tps65217_irqs[] = {
+ [TPS65217_IRQ_PB] = {
+ .mask = TPS65217_INT_PBM,
+ .interrupt = TPS65217_INT_PBI,
+ },
+ [TPS65217_IRQ_AC] = {
+ .mask = TPS65217_INT_ACM,
+ .interrupt = TPS65217_INT_ACI,
+ },
+ [TPS65217_IRQ_USB] = {
+ .mask = TPS65217_INT_USBM,
+ .interrupt = TPS65217_INT_USBI,
+ },
+};
+
+static void tps65217_irq_lock(struct irq_data *data)
+{
+ struct tps65217 *tps = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&tps->irq_lock);
+}
+
+static void tps65217_irq_sync_unlock(struct irq_data *data)
+{
+ struct tps65217 *tps = irq_data_get_irq_chip_data(data);
+ int ret;
+
+ ret = tps65217_reg_write(tps, TPS65217_REG_INT, tps->irq_mask,
+ TPS65217_PROTECT_NONE);
+ if (ret != 0)
+ dev_err(tps->dev, "Failed to sync IRQ masks\n");
+
+ mutex_unlock(&tps->irq_lock);
+}
+
+static inline const struct tps65217_irq *
+irq_to_tps65217_irq(struct tps65217 *tps, struct irq_data *data)
+{
+ return &tps65217_irqs[data->hwirq];
+}
+
+static void tps65217_irq_enable(struct irq_data *data)
+{
+ struct tps65217 *tps = irq_data_get_irq_chip_data(data);
+ const struct tps65217_irq *irq_data = irq_to_tps65217_irq(tps, data);
+
+ tps->irq_mask &= ~irq_data->mask;
+}
+
+static void tps65217_irq_disable(struct irq_data *data)
+{
+ struct tps65217 *tps = irq_data_get_irq_chip_data(data);
+ const struct tps65217_irq *irq_data = irq_to_tps65217_irq(tps, data);
+
+ tps->irq_mask |= irq_data->mask;
+}
+
+static struct irq_chip tps65217_irq_chip = {
+ .irq_bus_lock = tps65217_irq_lock,
+ .irq_bus_sync_unlock = tps65217_irq_sync_unlock,
+ .irq_enable = tps65217_irq_enable,
+ .irq_disable = tps65217_irq_disable,
+};
+
+static struct mfd_cell tps65217s[] = {
{
.name = "tps65217-pmic",
.of_compatible = "ti,tps65217-pmic",
@@ -41,10 +122,96 @@ static const struct mfd_cell tps65217s[] = {
},
{
.name = "tps65217-charger",
+ .num_resources = ARRAY_SIZE(charger_resources),
+ .resources = charger_resources,
.of_compatible = "ti,tps65217-charger",
},
+ {
+ .name = "tps65217-pwrbutton",
+ .num_resources = ARRAY_SIZE(pb_resources),
+ .resources = pb_resources,
+ .of_compatible = "ti,tps65217-pwrbutton",
+ },
+};
+
+static irqreturn_t tps65217_irq_thread(int irq, void *data)
+{
+ struct tps65217 *tps = data;
+ unsigned int status;
+ bool handled = false;
+ int i;
+ int ret;
+
+ ret = tps65217_reg_read(tps, TPS65217_REG_INT, &status);
+ if (ret < 0) {
+ dev_err(tps->dev, "Failed to read IRQ status: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tps65217_irqs); i++) {
+ if (status & tps65217_irqs[i].interrupt) {
+ handle_nested_irq(irq_find_mapping(tps->irq_domain, i));
+ handled = true;
+ }
+ }
+
+ if (handled)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+static int tps65217_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct tps65217 *tps = h->host_data;
+
+ irq_set_chip_data(virq, tps);
+ irq_set_chip_and_handler(virq, &tps65217_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+ irq_set_parent(virq, tps->irq);
+ irq_set_noprobe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops tps65217_irq_domain_ops = {
+ .map = tps65217_irq_map,
};
+static int tps65217_irq_init(struct tps65217 *tps, int irq)
+{
+ int ret;
+
+ mutex_init(&tps->irq_lock);
+ tps->irq = irq;
+
+ /* Mask all interrupt sources */
+ tps->irq_mask = (TPS65217_INT_RESERVEDM | TPS65217_INT_PBM
+ | TPS65217_INT_ACM | TPS65217_INT_USBM);
+ tps65217_reg_write(tps, TPS65217_REG_INT, tps->irq_mask,
+ TPS65217_PROTECT_NONE);
+
+ tps->irq_domain = irq_domain_add_linear(tps->dev->of_node,
+ TPS65217_NUM_IRQ, &tps65217_irq_domain_ops, tps);
+ if (!tps->irq_domain) {
+ dev_err(tps->dev, "Could not create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_request_threaded_irq(tps->dev, irq, NULL,
+ tps65217_irq_thread, IRQF_ONESHOT,
+ "tps65217-irq", tps);
+ if (ret) {
+ dev_err(tps->dev, "Failed to request IRQ %d: %d\n",
+ irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* tps65217_reg_read: Read a single tps65217 register.
*
@@ -149,11 +316,22 @@ int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg,
}
EXPORT_SYMBOL_GPL(tps65217_clear_bits);
+static bool tps65217_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS65217_REG_INT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct regmap_config tps65217_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = TPS65217_REG_MAX,
+ .volatile_reg = tps65217_volatile_reg,
};
static const struct of_device_id tps65217_of_match[] = {
@@ -205,8 +383,19 @@ static int tps65217_probe(struct i2c_client *client,
return ret;
}
+ if (client->irq) {
+ tps65217_irq_init(tps, client->irq);
+ } else {
+ int i;
+
+ /* Don't tell children about IRQ resources which won't fire */
+ for (i = 0; i < ARRAY_SIZE(tps65217s); i++)
+ tps65217s[i].num_resources = 0;
+ }
+
ret = devm_mfd_add_devices(tps->dev, -1, tps65217s,
- ARRAY_SIZE(tps65217s), NULL, 0, NULL);
+ ARRAY_SIZE(tps65217s), NULL, 0,
+ tps->irq_domain);
if (ret < 0) {
dev_err(tps->dev, "mfd_add_devices failed: %d\n", ret);
return ret;
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 80b9dc363cd8..ba610adbdbff 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -219,6 +219,7 @@ static int tps65218_probe(struct i2c_client *client,
struct tps65218 *tps;
const struct of_device_id *match;
int ret;
+ unsigned int chipid;
match = of_match_device(of_tps65218_match_table, &client->dev);
if (!match) {
@@ -250,6 +251,14 @@ static int tps65218_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ ret = tps65218_reg_read(tps, TPS65218_REG_CHIPID, &chipid);
+ if (ret) {
+ dev_err(tps->dev, "Failed to read chipid: %d\n", ret);
+ return ret;
+ }
+
+ tps->rev = chipid & TPS65218_CHIPID_REV_MASK;
+
ret = of_platform_populate(client->dev.of_node, NULL, NULL,
&client->dev);
if (ret < 0)
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index a49d3db6d936..c64615dca2bd 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -30,7 +30,6 @@
#include <linux/init.h>
#include <linux/mutex.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/clk.h>
@@ -1258,7 +1257,6 @@ static const struct i2c_device_id twl_ids[] = {
{ "twl6032", TWL6030_CLASS | TWL6032_SUBCLASS }, /* "Phoenix lite" */
{ /* end of list */ },
};
-MODULE_DEVICE_TABLE(i2c, twl_ids);
/* One Client Driver , 4 Clients */
static struct i2c_driver twl_driver = {
@@ -1267,9 +1265,4 @@ static struct i2c_driver twl_driver = {
.probe = twl_probe,
.remove = twl_remove,
};
-
-module_i2c_driver(twl_driver);
-
-MODULE_AUTHOR("Texas Instruments, Inc.");
-MODULE_DESCRIPTION("I2C Core interface for TWL");
-MODULE_LICENSE("GPL");
+builtin_i2c_driver(twl_driver);
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index ab328ec49353..d66502d36ba0 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -609,6 +609,7 @@ static const struct regmap_config twl6040_regmap_config = {
.writeable_reg = twl6040_writeable_reg,
.cache_type = REGCACHE_RBTREE,
+ .use_single_rw = true,
};
static const struct regmap_irq twl6040_irqs[] = {
@@ -782,6 +783,11 @@ static int twl6040_probe(struct i2c_client *client,
cell->name = "twl6040-gpo";
children++;
+ /* PDM clock support */
+ cell = &twl6040->cells[children];
+ cell->name = "twl6040-pdmclk";
+ children++;
+
/* The chip is powered down so mark regmap to cache only and dirty */
regcache_cache_only(twl6040->regmap, true);
regcache_mark_dirty(twl6040->regmap);
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 9ab9ec47ea75..d6fb2e1a759a 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -446,10 +446,6 @@ static int ucb1x00_detect_irq(struct ucb1x00 *ucb)
unsigned long mask;
mask = probe_irq_on();
- if (!mask) {
- probe_irq_off(mask);
- return NO_IRQ;
- }
/*
* Enable the ADC interrupt.
@@ -541,7 +537,7 @@ static int ucb1x00_probe(struct mcp *mcp)
ucb1x00_enable(ucb);
ucb->irq = ucb1x00_detect_irq(ucb);
ucb1x00_disable(ucb);
- if (ucb->irq == NO_IRQ) {
+ if (!ucb->irq) {
dev_err(&ucb->dev, "IRQ probe failed\n");
ret = -ENODEV;
goto err_no_irq;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index d00252828966..64971baf11fa 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -429,34 +429,6 @@ config ARM_CHARLCD
line and the Linux version on the second line, but that's
still useful.
-config BMP085
- tristate
- depends on SYSFS
-
-config BMP085_I2C
- tristate "BMP085 digital pressure sensor on I2C"
- select BMP085
- select REGMAP_I2C
- depends on I2C && SYSFS
- help
- Say Y here if you want to support Bosch Sensortec's digital pressure
- sensor hooked to an I2C bus.
-
- To compile this driver as a module, choose M here: the
- module will be called bmp085-i2c.
-
-config BMP085_SPI
- tristate "BMP085 digital pressure sensor on SPI"
- select BMP085
- select REGMAP_SPI
- depends on SPI_MASTER && SYSFS
- help
- Say Y here if you want to support Bosch Sensortec's digital pressure
- sensor hooked to an SPI bus.
-
- To compile this driver as a module, choose M here: the
- module will be called bmp085-spi.
-
config PCH_PHUB
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
select GENERIC_NET_UTILS
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index fb32516ddfe2..31983366090a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -9,9 +9,6 @@ obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
obj-$(CONFIG_INTEL_MID_PTI) += pti.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
-obj-$(CONFIG_BMP085) += bmp085.o
-obj-$(CONFIG_BMP085_I2C) += bmp085-i2c.o
-obj-$(CONFIG_BMP085_SPI) += bmp085-spi.o
obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o
obj-$(CONFIG_ICS932S401) += ics932s401.o
obj-$(CONFIG_LKDTM) += lkdtm.o
diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c
deleted file mode 100644
index f35c218aaa1a..000000000000
--- a/drivers/misc/bmp085-i2c.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2012 Bosch Sensortec GmbH
- * Copyright (c) 2012 Unixphere AB
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/err.h>
-#include "bmp085.h"
-
-#define BMP085_I2C_ADDRESS 0x77
-
-static const unsigned short normal_i2c[] = { BMP085_I2C_ADDRESS,
- I2C_CLIENT_END };
-
-static int bmp085_i2c_detect(struct i2c_client *client,
- struct i2c_board_info *info)
-{
- if (client->addr != BMP085_I2C_ADDRESS)
- return -ENODEV;
-
- return bmp085_detect(&client->dev);
-}
-
-static int bmp085_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int err;
- struct regmap *regmap = devm_regmap_init_i2c(client,
- &bmp085_regmap_config);
-
- if (IS_ERR(regmap)) {
- err = PTR_ERR(regmap);
- dev_err(&client->dev, "Failed to init regmap: %d\n", err);
- return err;
- }
-
- return bmp085_probe(&client->dev, regmap, client->irq);
-}
-
-static int bmp085_i2c_remove(struct i2c_client *client)
-{
- return bmp085_remove(&client->dev);
-}
-
-static const struct i2c_device_id bmp085_id[] = {
- { BMP085_NAME, 0 },
- { "bmp180", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, bmp085_id);
-
-static struct i2c_driver bmp085_i2c_driver = {
- .driver = {
- .name = BMP085_NAME,
- },
- .id_table = bmp085_id,
- .probe = bmp085_i2c_probe,
- .remove = bmp085_i2c_remove,
-
- .detect = bmp085_i2c_detect,
- .address_list = normal_i2c
-};
-
-module_i2c_driver(bmp085_i2c_driver);
-
-MODULE_AUTHOR("Eric Andersson <eric.andersson@unixphere.com>");
-MODULE_DESCRIPTION("BMP085 I2C bus driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/bmp085-spi.c b/drivers/misc/bmp085-spi.c
deleted file mode 100644
index 17ecbf95ff15..000000000000
--- a/drivers/misc/bmp085-spi.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2012 Bosch Sensortec GmbH
- * Copyright (c) 2012 Unixphere AB
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-#include <linux/err.h>
-#include "bmp085.h"
-
-static int bmp085_spi_probe(struct spi_device *client)
-{
- int err;
- struct regmap *regmap;
-
- client->bits_per_word = 8;
- err = spi_setup(client);
- if (err < 0) {
- dev_err(&client->dev, "spi_setup failed!\n");
- return err;
- }
-
- regmap = devm_regmap_init_spi(client, &bmp085_regmap_config);
- if (IS_ERR(regmap)) {
- err = PTR_ERR(regmap);
- dev_err(&client->dev, "Failed to init regmap: %d\n", err);
- return err;
- }
-
- return bmp085_probe(&client->dev, regmap, client->irq);
-}
-
-static int bmp085_spi_remove(struct spi_device *client)
-{
- return bmp085_remove(&client->dev);
-}
-
-static const struct of_device_id bmp085_of_match[] = {
- { .compatible = "bosch,bmp085", },
- { },
-};
-MODULE_DEVICE_TABLE(of, bmp085_of_match);
-
-static const struct spi_device_id bmp085_id[] = {
- { "bmp180", 0 },
- { "bmp181", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(spi, bmp085_id);
-
-static struct spi_driver bmp085_spi_driver = {
- .driver = {
- .name = BMP085_NAME,
- .of_match_table = bmp085_of_match
- },
- .id_table = bmp085_id,
- .probe = bmp085_spi_probe,
- .remove = bmp085_spi_remove
-};
-
-module_spi_driver(bmp085_spi_driver);
-
-MODULE_AUTHOR("Eric Andersson <eric.andersson@unixphere.com>");
-MODULE_DESCRIPTION("BMP085 SPI bus driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
deleted file mode 100644
index 9b313f7810f5..000000000000
--- a/drivers/misc/bmp085.c
+++ /dev/null
@@ -1,506 +0,0 @@
-/* Copyright (c) 2010 Christoph Mair <christoph.mair@gmail.com>
- * Copyright (c) 2012 Bosch Sensortec GmbH
- * Copyright (c) 2012 Unixphere AB
- *
- * This driver supports the bmp085 and bmp18x digital barometric pressure
- * and temperature sensors from Bosch Sensortec. The datasheets
- * are available from their website:
- * http://www.bosch-sensortec.com/content/language1/downloads/BST-BMP085-DS000-05.pdf
- * http://www.bosch-sensortec.com/content/language1/downloads/BST-BMP180-DS000-07.pdf
- *
- * A pressure measurement is issued by reading from pressure0_input.
- * The return value ranges from 30000 to 110000 pascal with a resulution
- * of 1 pascal (0.01 millibar) which enables measurements from 9000m above
- * to 500m below sea level.
- *
- * The temperature can be read from temp0_input. Values range from
- * -400 to 850 representing the ambient temperature in degree celsius
- * multiplied by 10.The resolution is 0.1 celsius.
- *
- * Because ambient pressure is temperature dependent, a temperature
- * measurement will be executed automatically even if the user is reading
- * from pressure0_input. This happens if the last temperature measurement
- * has been executed more then one second ago.
- *
- * To decrease RMS noise from pressure measurements, the bmp085 can
- * autonomously calculate the average of up to eight samples. This is
- * set up by writing to the oversampling sysfs file. Accepted values
- * are 0, 1, 2 and 3. 2^x when x is the value written to this file
- * specifies the number of samples used to calculate the ambient pressure.
- * RMS noise is specified with six pascal (without averaging) and decreases
- * down to 3 pascal when using an oversampling setting of 3.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include "bmp085.h"
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-#include <linux/gpio.h>
-
-#define BMP085_CHIP_ID 0x55
-#define BMP085_CALIBRATION_DATA_START 0xAA
-#define BMP085_CALIBRATION_DATA_LENGTH 11 /* 16 bit values */
-#define BMP085_CHIP_ID_REG 0xD0
-#define BMP085_CTRL_REG 0xF4
-#define BMP085_TEMP_MEASUREMENT 0x2E
-#define BMP085_PRESSURE_MEASUREMENT 0x34
-#define BMP085_CONVERSION_REGISTER_MSB 0xF6
-#define BMP085_CONVERSION_REGISTER_LSB 0xF7
-#define BMP085_CONVERSION_REGISTER_XLSB 0xF8
-#define BMP085_TEMP_CONVERSION_TIME 5
-
-struct bmp085_calibration_data {
- s16 AC1, AC2, AC3;
- u16 AC4, AC5, AC6;
- s16 B1, B2;
- s16 MB, MC, MD;
-};
-
-struct bmp085_data {
- struct device *dev;
- struct regmap *regmap;
- struct mutex lock;
- struct bmp085_calibration_data calibration;
- u8 oversampling_setting;
- u32 raw_temperature;
- u32 raw_pressure;
- u32 temp_measurement_period;
- unsigned long last_temp_measurement;
- u8 chip_id;
- s32 b6; /* calculated temperature correction coefficient */
- int irq;
- struct completion done;
-};
-
-static irqreturn_t bmp085_eoc_isr(int irq, void *devid)
-{
- struct bmp085_data *data = devid;
-
- complete(&data->done);
-
- return IRQ_HANDLED;
-}
-
-static s32 bmp085_read_calibration_data(struct bmp085_data *data)
-{
- u16 tmp[BMP085_CALIBRATION_DATA_LENGTH];
- struct bmp085_calibration_data *cali = &(data->calibration);
- s32 status = regmap_bulk_read(data->regmap,
- BMP085_CALIBRATION_DATA_START, (u8 *)tmp,
- (BMP085_CALIBRATION_DATA_LENGTH << 1));
- if (status < 0)
- return status;
-
- cali->AC1 = be16_to_cpu(tmp[0]);
- cali->AC2 = be16_to_cpu(tmp[1]);
- cali->AC3 = be16_to_cpu(tmp[2]);
- cali->AC4 = be16_to_cpu(tmp[3]);
- cali->AC5 = be16_to_cpu(tmp[4]);
- cali->AC6 = be16_to_cpu(tmp[5]);
- cali->B1 = be16_to_cpu(tmp[6]);
- cali->B2 = be16_to_cpu(tmp[7]);
- cali->MB = be16_to_cpu(tmp[8]);
- cali->MC = be16_to_cpu(tmp[9]);
- cali->MD = be16_to_cpu(tmp[10]);
- return 0;
-}
-
-static s32 bmp085_update_raw_temperature(struct bmp085_data *data)
-{
- u16 tmp;
- s32 status;
-
- mutex_lock(&data->lock);
-
- init_completion(&data->done);
-
- status = regmap_write(data->regmap, BMP085_CTRL_REG,
- BMP085_TEMP_MEASUREMENT);
- if (status < 0) {
- dev_err(data->dev,
- "Error while requesting temperature measurement.\n");
- goto exit;
- }
- wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies(
- BMP085_TEMP_CONVERSION_TIME));
-
- status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB,
- &tmp, sizeof(tmp));
- if (status < 0) {
- dev_err(data->dev,
- "Error while reading temperature measurement result\n");
- goto exit;
- }
- data->raw_temperature = be16_to_cpu(tmp);
- data->last_temp_measurement = jiffies;
- status = 0; /* everything ok, return 0 */
-
-exit:
- mutex_unlock(&data->lock);
- return status;
-}
-
-static s32 bmp085_update_raw_pressure(struct bmp085_data *data)
-{
- u32 tmp = 0;
- s32 status;
-
- mutex_lock(&data->lock);
-
- init_completion(&data->done);
-
- status = regmap_write(data->regmap, BMP085_CTRL_REG,
- BMP085_PRESSURE_MEASUREMENT +
- (data->oversampling_setting << 6));
- if (status < 0) {
- dev_err(data->dev,
- "Error while requesting pressure measurement.\n");
- goto exit;
- }
-
- /* wait for the end of conversion */
- wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies(
- 2+(3 << data->oversampling_setting)));
- /* copy data into a u32 (4 bytes), but skip the first byte. */
- status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB,
- ((u8 *)&tmp)+1, 3);
- if (status < 0) {
- dev_err(data->dev,
- "Error while reading pressure measurement results\n");
- goto exit;
- }
- data->raw_pressure = be32_to_cpu((tmp));
- data->raw_pressure >>= (8-data->oversampling_setting);
- status = 0; /* everything ok, return 0 */
-
-exit:
- mutex_unlock(&data->lock);
- return status;
-}
-
-/*
- * This function starts the temperature measurement and returns the value
- * in tenth of a degree celsius.
- */
-static s32 bmp085_get_temperature(struct bmp085_data *data, int *temperature)
-{
- struct bmp085_calibration_data *cali = &data->calibration;
- long x1, x2;
- int status;
-
- status = bmp085_update_raw_temperature(data);
- if (status < 0)
- goto exit;
-
- x1 = ((data->raw_temperature - cali->AC6) * cali->AC5) >> 15;
- x2 = (cali->MC << 11) / (x1 + cali->MD);
- data->b6 = x1 + x2 - 4000;
- /* if NULL just update b6. Used for pressure only measurements */
- if (temperature != NULL)
- *temperature = (x1+x2+8) >> 4;
-
-exit:
- return status;
-}
-
-/*
- * This function starts the pressure measurement and returns the value
- * in millibar. Since the pressure depends on the ambient temperature,
- * a temperature measurement is executed according to the given temperature
- * measurement period (default is 1 sec boundary). This period could vary
- * and needs to be adjusted according to the sensor environment, i.e. if big
- * temperature variations then the temperature needs to be read out often.
- */
-static s32 bmp085_get_pressure(struct bmp085_data *data, int *pressure)
-{
- struct bmp085_calibration_data *cali = &data->calibration;
- s32 x1, x2, x3, b3;
- u32 b4, b7;
- s32 p;
- int status;
-
- /* alt least every second force an update of the ambient temperature */
- if ((data->last_temp_measurement == 0) ||
- time_is_before_jiffies(data->last_temp_measurement + 1*HZ)) {
- status = bmp085_get_temperature(data, NULL);
- if (status < 0)
- return status;
- }
-
- status = bmp085_update_raw_pressure(data);
- if (status < 0)
- return status;
-
- x1 = (data->b6 * data->b6) >> 12;
- x1 *= cali->B2;
- x1 >>= 11;
-
- x2 = cali->AC2 * data->b6;
- x2 >>= 11;
-
- x3 = x1 + x2;
-
- b3 = (((((s32)cali->AC1) * 4 + x3) << data->oversampling_setting) + 2);
- b3 >>= 2;
-
- x1 = (cali->AC3 * data->b6) >> 13;
- x2 = (cali->B1 * ((data->b6 * data->b6) >> 12)) >> 16;
- x3 = (x1 + x2 + 2) >> 2;
- b4 = (cali->AC4 * (u32)(x3 + 32768)) >> 15;
-
- b7 = ((u32)data->raw_pressure - b3) *
- (50000 >> data->oversampling_setting);
- p = ((b7 < 0x80000000) ? ((b7 << 1) / b4) : ((b7 / b4) * 2));
-
- x1 = p >> 8;
- x1 *= x1;
- x1 = (x1 * 3038) >> 16;
- x2 = (-7357 * p) >> 16;
- p += (x1 + x2 + 3791) >> 4;
-
- *pressure = p;
-
- return 0;
-}
-
-/*
- * This function sets the chip-internal oversampling. Valid values are 0..3.
- * The chip will use 2^oversampling samples for internal averaging.
- * This influences the measurement time and the accuracy; larger values
- * increase both. The datasheet gives an overview on how measurement time,
- * accuracy and noise correlate.
- */
-static void bmp085_set_oversampling(struct bmp085_data *data,
- unsigned char oversampling)
-{
- if (oversampling > 3)
- oversampling = 3;
- data->oversampling_setting = oversampling;
-}
-
-/*
- * Returns the currently selected oversampling. Range: 0..3
- */
-static unsigned char bmp085_get_oversampling(struct bmp085_data *data)
-{
- return data->oversampling_setting;
-}
-
-/* sysfs callbacks */
-static ssize_t set_oversampling(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bmp085_data *data = dev_get_drvdata(dev);
- unsigned long oversampling;
- int err = kstrtoul(buf, 10, &oversampling);
-
- if (err == 0) {
- mutex_lock(&data->lock);
- bmp085_set_oversampling(data, oversampling);
- mutex_unlock(&data->lock);
- return count;
- }
-
- return err;
-}
-
-static ssize_t show_oversampling(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct bmp085_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", bmp085_get_oversampling(data));
-}
-static DEVICE_ATTR(oversampling, S_IWUSR | S_IRUGO,
- show_oversampling, set_oversampling);
-
-
-static ssize_t show_temperature(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int temperature;
- int status;
- struct bmp085_data *data = dev_get_drvdata(dev);
-
- status = bmp085_get_temperature(data, &temperature);
- if (status < 0)
- return status;
- else
- return sprintf(buf, "%d\n", temperature);
-}
-static DEVICE_ATTR(temp0_input, S_IRUGO, show_temperature, NULL);
-
-
-static ssize_t show_pressure(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int pressure;
- int status;
- struct bmp085_data *data = dev_get_drvdata(dev);
-
- status = bmp085_get_pressure(data, &pressure);
- if (status < 0)
- return status;
- else
- return sprintf(buf, "%d\n", pressure);
-}
-static DEVICE_ATTR(pressure0_input, S_IRUGO, show_pressure, NULL);
-
-
-static struct attribute *bmp085_attributes[] = {
- &dev_attr_temp0_input.attr,
- &dev_attr_pressure0_input.attr,
- &dev_attr_oversampling.attr,
- NULL
-};
-
-static const struct attribute_group bmp085_attr_group = {
- .attrs = bmp085_attributes,
-};
-
-int bmp085_detect(struct device *dev)
-{
- struct bmp085_data *data = dev_get_drvdata(dev);
- unsigned int id;
- int ret;
-
- ret = regmap_read(data->regmap, BMP085_CHIP_ID_REG, &id);
- if (ret < 0)
- return ret;
-
- if (id != data->chip_id)
- return -ENODEV;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(bmp085_detect);
-
-static void bmp085_get_of_properties(struct bmp085_data *data)
-{
-#ifdef CONFIG_OF
- struct device_node *np = data->dev->of_node;
- u32 prop;
-
- if (!np)
- return;
-
- if (!of_property_read_u32(np, "chip-id", &prop))
- data->chip_id = prop & 0xff;
-
- if (!of_property_read_u32(np, "temp-measurement-period", &prop))
- data->temp_measurement_period = (prop/100)*HZ;
-
- if (!of_property_read_u32(np, "default-oversampling", &prop))
- data->oversampling_setting = prop & 0xff;
-#endif
-}
-
-static int bmp085_init_client(struct bmp085_data *data)
-{
- int status = bmp085_read_calibration_data(data);
-
- if (status < 0)
- return status;
-
- /* default settings */
- data->chip_id = BMP085_CHIP_ID;
- data->last_temp_measurement = 0;
- data->temp_measurement_period = 1*HZ;
- data->oversampling_setting = 3;
-
- bmp085_get_of_properties(data);
-
- mutex_init(&data->lock);
-
- return 0;
-}
-
-struct regmap_config bmp085_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8
-};
-EXPORT_SYMBOL_GPL(bmp085_regmap_config);
-
-int bmp085_probe(struct device *dev, struct regmap *regmap, int irq)
-{
- struct bmp085_data *data;
- int err = 0;
-
- data = kzalloc(sizeof(struct bmp085_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
-
- dev_set_drvdata(dev, data);
- data->dev = dev;
- data->regmap = regmap;
- data->irq = irq;
-
- if (data->irq > 0) {
- err = devm_request_irq(dev, data->irq, bmp085_eoc_isr,
- IRQF_TRIGGER_RISING, "bmp085",
- data);
- if (err < 0)
- goto exit_free;
- }
-
- /* Initialize the BMP085 chip */
- err = bmp085_init_client(data);
- if (err < 0)
- goto exit_free;
-
- err = bmp085_detect(dev);
- if (err < 0) {
- dev_err(dev, "%s: chip_id failed!\n", BMP085_NAME);
- goto exit_free;
- }
-
- /* Register sysfs hooks */
- err = sysfs_create_group(&dev->kobj, &bmp085_attr_group);
- if (err)
- goto exit_free;
-
- dev_info(dev, "Successfully initialized %s!\n", BMP085_NAME);
-
- return 0;
-
-exit_free:
- kfree(data);
-exit:
- return err;
-}
-EXPORT_SYMBOL_GPL(bmp085_probe);
-
-int bmp085_remove(struct device *dev)
-{
- struct bmp085_data *data = dev_get_drvdata(dev);
-
- sysfs_remove_group(&data->dev->kobj, &bmp085_attr_group);
- kfree(data);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(bmp085_remove);
-
-MODULE_AUTHOR("Christoph Mair <christoph.mair@gmail.com>");
-MODULE_DESCRIPTION("BMP085 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/bmp085.h b/drivers/misc/bmp085.h
deleted file mode 100644
index 8b8e3b1f5ca5..000000000000
--- a/drivers/misc/bmp085.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2012 Bosch Sensortec GmbH
- * Copyright (c) 2012 Unixphere AB
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _BMP085_H
-#define _BMP085_H
-
-#include <linux/regmap.h>
-
-#define BMP085_NAME "bmp085"
-
-extern struct regmap_config bmp085_regmap_config;
-
-int bmp085_probe(struct device *dev, struct regmap *regmap, int irq);
-int bmp085_remove(struct device *dev);
-int bmp085_detect(struct device *dev);
-
-#endif
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 344a0ff8f8c7..01d372aba131 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -162,7 +162,10 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_SPAP_V 0x0000000000000001ULL
/****** CXL_PSL_Control ****************************************************/
-#define CXL_PSL_Control_tb 0x0000000000000001ULL
+#define CXL_PSL_Control_tb (0x1ull << (63-63))
+#define CXL_PSL_Control_Fr (0x1ull << (63-31))
+#define CXL_PSL_Control_Fs_MASK (0x3ull << (63-29))
+#define CXL_PSL_Control_Fs_Complete (0x3ull << (63-29))
/****** CXL_PSL_DLCNTL *****************************************************/
#define CXL_PSL_DLCNTL_D (0x1ull << (63-28))
@@ -854,6 +857,7 @@ int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler,
int cxl_check_error(struct cxl_afu *afu);
int cxl_afu_slbia(struct cxl_afu *afu);
int cxl_tlb_slb_invalidate(struct cxl *adapter);
+int cxl_data_cache_flush(struct cxl *adapter);
int cxl_afu_disable(struct cxl_afu *afu);
int cxl_psl_purge(struct cxl_afu *afu);
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index e606fdc4bc9c..a217a74ccc98 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -290,6 +290,37 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter)
return 0;
}
+int cxl_data_cache_flush(struct cxl *adapter)
+{
+ u64 reg;
+ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+
+ pr_devel("Flushing data cache\n");
+
+ reg = cxl_p1_read(adapter, CXL_PSL_Control);
+ reg |= CXL_PSL_Control_Fr;
+ cxl_p1_write(adapter, CXL_PSL_Control, reg);
+
+ reg = cxl_p1_read(adapter, CXL_PSL_Control);
+ while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
+ return -EBUSY;
+ }
+
+ if (!cxl_ops->link_ok(adapter, NULL)) {
+ dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
+ return -EIO;
+ }
+ cpu_relax();
+ reg = cxl_p1_read(adapter, CXL_PSL_Control);
+ }
+
+ reg &= ~CXL_PSL_Control_Fr;
+ cxl_p1_write(adapter, CXL_PSL_Control, reg);
+ return 0;
+}
+
static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
{
int rc;
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
index edc458395f68..ec175ea5dfba 100644
--- a/drivers/misc/cxl/of.c
+++ b/drivers/misc/cxl/of.c
@@ -460,7 +460,7 @@ int cxl_of_probe(struct platform_device *pdev)
struct device_node *afu_np = NULL;
struct cxl *adapter = NULL;
int ret;
- int slice, slice_ok;
+ int slice = 0, slice_ok = 0;
pr_devel("in %s\n", __func__);
@@ -476,13 +476,13 @@ int cxl_of_probe(struct platform_device *pdev)
}
/* init afu */
- slice_ok = 0;
- for (afu_np = NULL, slice = 0; (afu_np = of_get_next_child(np, afu_np)); slice++) {
+ for_each_child_of_node(np, afu_np) {
if ((ret = cxl_guest_init_afu(adapter, slice, afu_np)))
dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n",
slice, ret);
else
slice_ok++;
+ slice++;
}
if (slice_ok == 0) {
@@ -490,8 +490,6 @@ int cxl_of_probe(struct platform_device *pdev)
adapter->slices = 0;
}
- if (afu_np)
- of_node_put(afu_np);
return 0;
}
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 6f0c4ac4b649..7afad8477ad5 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1239,6 +1239,9 @@ int cxl_pci_reset(struct cxl *adapter)
dev_info(&dev->dev, "CXL reset\n");
+ /* the adapter is about to be reset, so ignore errors */
+ cxl_data_cache_flush(adapter);
+
/* pcie_warm_reset requests a fundamental pci reset which includes a
* PERST assert/deassert. PERST triggers a loading of the image
* if "user" or "factory" is selected in sysfs */
@@ -1530,11 +1533,11 @@ static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
{
if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
/* Mellanox CX-4 */
- dev_info(&adapter->dev, "Device uses an XSL\n");
+ dev_info(&dev->dev, "Device uses an XSL\n");
adapter->native->sl_ops = &xsl_ops;
adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
} else {
- dev_info(&adapter->dev, "Device uses a PSL\n");
+ dev_info(&dev->dev, "Device uses a PSL\n");
adapter->native->sl_ops = &psl_ops;
}
}
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 3cdf8e1ca0ad..051b14766ef9 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -593,6 +593,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct at24_data *at24;
int err;
unsigned i, num_addresses;
+ u8 test_byte;
if (client->dev.platform_data) {
chip = *(struct at24_platform_data *)client->dev.platform_data;
@@ -743,6 +744,18 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
+ i2c_set_clientdata(client, at24);
+
+ /*
+ * Perform a one-byte test read to verify that the
+ * chip is functional.
+ */
+ err = at24_read(at24, 0, &test_byte, 1);
+ if (err) {
+ err = -ENODEV;
+ goto err_clients;
+ }
+
at24->nvmem_config.name = dev_name(&client->dev);
at24->nvmem_config.dev = &client->dev;
at24->nvmem_config.read_only = !writable;
@@ -764,8 +777,6 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
goto err_clients;
}
- i2c_set_clientdata(client, at24);
-
dev_info(&client->dev, "%u byte %s EEPROM, %s, %u bytes/write\n",
chip.byte_len, client->name,
writable ? "writable" : "read-only", at24->write_max);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 2c6c7c8e3ead..5afe4cd16569 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -121,9 +121,8 @@ static int at25_ee_read(void *priv, unsigned int offset,
* this chip is clocked very slowly
*/
status = spi_sync(at25->spi, &m);
- dev_dbg(&at25->spi->dev,
- "read %Zd bytes at %d --> %d\n",
- count, offset, (int) status);
+ dev_dbg(&at25->spi->dev, "read %zu bytes at %d --> %zd\n",
+ count, offset, status);
mutex_unlock(&at25->lock);
return status;
@@ -167,8 +166,7 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
*cp = AT25_WREN;
status = spi_write(at25->spi, cp, 1);
if (status < 0) {
- dev_dbg(&at25->spi->dev, "WREN --> %d\n",
- (int) status);
+ dev_dbg(&at25->spi->dev, "WREN --> %d\n", status);
break;
}
@@ -196,9 +194,8 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
memcpy(cp, buf, segment);
status = spi_write(at25->spi, bounce,
segment + at25->addrlen + 1);
- dev_dbg(&at25->spi->dev,
- "write %u bytes at %u --> %d\n",
- segment, offset, (int) status);
+ dev_dbg(&at25->spi->dev, "write %u bytes at %u --> %d\n",
+ segment, offset, status);
if (status < 0)
break;
@@ -225,8 +222,7 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
if ((sr < 0) || (sr & AT25_SR_nRDY)) {
dev_err(&at25->spi->dev,
- "write %d bytes offset %d, "
- "timeout after %u msecs\n",
+ "write %u bytes offset %u, timeout after %u msecs\n",
segment, offset,
jiffies_to_msecs(jiffies -
(timeout - EE_TIMEOUT)));
@@ -368,9 +364,7 @@ static int at25_probe(struct spi_device *spi)
return PTR_ERR(at25->nvmem);
dev_info(&spi->dev, "%d %s %s eeprom%s, pagesize %u\n",
- (chip.byte_len < 1024)
- ? chip.byte_len
- : (chip.byte_len / 1024),
+ (chip.byte_len < 1024) ? chip.byte_len : (chip.byte_len / 1024),
(chip.byte_len < 1024) ? "Byte" : "KByte",
at25->chip.name,
(chip.flags & EE_READONLY) ? " (readonly)" : "",
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index a70b853fa2c9..6c1f49a85023 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -1351,6 +1351,19 @@ static struct pci_driver genwqe_driver = {
};
/**
+ * genwqe_devnode() - Set default access mode for genwqe devices.
+ *
+ * Default mode should be rw for everybody. Do not change default
+ * device name.
+ */
+static char *genwqe_devnode(struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0666;
+ return NULL;
+}
+
+/**
* genwqe_init_module() - Driver registration and initialization
*/
static int __init genwqe_init_module(void)
@@ -1363,6 +1376,8 @@ static int __init genwqe_init_module(void)
return -ENOMEM;
}
+ class_genwqe->devnode = genwqe_devnode;
+
debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL);
if (!debugfs_genwqe) {
rc = -ENOMEM;
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 353ee0cc733d..ddfeefe39540 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -1048,8 +1048,6 @@ static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
"[%s] **err: could not allocate DDCB **\n", __func__);
return -ENOMEM;
}
- memset(queue->ddcb_vaddr, 0, queue_size);
-
queue->ddcb_req = kzalloc(sizeof(struct ddcb_requ *) *
queue->ddcb_max, GFP_KERNEL);
if (!queue->ddcb_req) {
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 222367cc8c81..8a679ecc8fd1 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -220,8 +220,8 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
if (get_order(size) > MAX_ORDER)
return NULL;
- return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
- GFP_KERNEL);
+ return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
+ GFP_KERNEL);
}
void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index d6a901cd4222..fea8ff40440f 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -688,7 +688,8 @@ static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
{
- int error = -ENOMEM;
+ int bar;
+ unsigned long off;
/* map the memory mapped i/o registers */
hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
@@ -698,7 +699,15 @@ static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
}
/* map the adapter shared memory region */
- hw->ram_vaddr = pci_iomap(pdev, 2, max_ccb * ILOHW_CCB_SZ);
+ if (pdev->subsystem_device == 0x00E4) {
+ bar = 5;
+ /* Last 8k is reserved for CCBs */
+ off = pci_resource_len(pdev, bar) - 0x2000;
+ } else {
+ bar = 2;
+ off = 0;
+ }
+ hw->ram_vaddr = pci_iomap_range(pdev, bar, off, max_ccb * ILOHW_CCB_SZ);
if (hw->ram_vaddr == NULL) {
dev_err(&pdev->dev, "Error mapping shared mem\n");
goto mmio_free;
@@ -717,7 +726,7 @@ ram_free:
mmio_free:
pci_iounmap(pdev, hw->mmio_vaddr);
out:
- return error;
+ return -ENOMEM;
}
static void ilo_remove(struct pci_dev *pdev)
@@ -899,7 +908,7 @@ static void __exit ilo_exit(void)
class_destroy(ilo_class);
}
-MODULE_VERSION("1.4.1");
+MODULE_VERSION("1.5.0");
MODULE_ALIAS(ILO_NAME);
MODULE_DESCRIPTION(ILO_NAME);
MODULE_AUTHOR("David Altobelli <david.altobelli@hpe.com>");
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index a039a5df6f21..7ae89b4a21d5 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -47,7 +47,6 @@ const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
void mei_amthif_reset_params(struct mei_device *dev)
{
/* reset iamthif parameters. */
- dev->iamthif_current_cb = NULL;
dev->iamthif_canceled = false;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
dev->iamthif_stall_timer = 0;
@@ -67,8 +66,12 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
struct mei_cl *cl = &dev->iamthif_cl;
int ret;
- if (mei_cl_is_connected(cl))
- return 0;
+ mutex_lock(&dev->device_lock);
+
+ if (mei_cl_is_connected(cl)) {
+ ret = 0;
+ goto out;
+ }
dev->iamthif_state = MEI_IAMTHIF_IDLE;
@@ -77,179 +80,37 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
ret = mei_cl_link(cl);
if (ret < 0) {
dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
- return ret;
+ goto out;
}
ret = mei_cl_connect(cl, me_cl, NULL);
- return ret;
-}
-
-/**
- * mei_amthif_read - read data from AMTHIF client
- *
- * @dev: the device structure
- * @file: pointer to file object
- * @ubuf: pointer to user data in user space
- * @length: data length to read
- * @offset: data read offset
- *
- * Locking: called under "dev->device_lock" lock
- *
- * Return:
- * returned data length on success,
- * zero if no data to read,
- * negative on failure.
- */
-int mei_amthif_read(struct mei_device *dev, struct file *file,
- char __user *ubuf, size_t length, loff_t *offset)
-{
- struct mei_cl *cl = file->private_data;
- struct mei_cl_cb *cb;
- int rets;
- int wait_ret;
-
- dev_dbg(dev->dev, "checking amthif data\n");
- cb = mei_cl_read_cb(cl, file);
-
- /* Check for if we can block or not*/
- if (cb == NULL && file->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
-
- dev_dbg(dev->dev, "waiting for amthif data\n");
- while (cb == NULL) {
- /* unlock the Mutex */
- mutex_unlock(&dev->device_lock);
-
- wait_ret = wait_event_interruptible(cl->rx_wait,
- !list_empty(&cl->rd_completed) ||
- !mei_cl_is_connected(cl));
-
- /* Locking again the Mutex */
- mutex_lock(&dev->device_lock);
-
- if (wait_ret)
- return -ERESTARTSYS;
-
- if (!mei_cl_is_connected(cl)) {
- rets = -EBUSY;
- goto out;
- }
-
- cb = mei_cl_read_cb(cl, file);
- }
-
- if (cb->status) {
- rets = cb->status;
- dev_dbg(dev->dev, "read operation failed %d\n", rets);
- goto free;
- }
-
- dev_dbg(dev->dev, "Got amthif data\n");
- /* if the whole message will fit remove it from the list */
- if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset))
- list_del_init(&cb->list);
- else if (cb->buf_idx <= *offset) {
- /* end of the message has been reached */
- list_del_init(&cb->list);
- rets = 0;
- goto free;
- }
- /* else means that not full buffer will be read and do not
- * remove message from deletion list
- */
-
- dev_dbg(dev->dev, "amthif cb->buf.size - %zu cb->buf_idx - %zu\n",
- cb->buf.size, cb->buf_idx);
-
- /* length is being truncated to PAGE_SIZE, however,
- * the buf_idx may point beyond */
- length = min_t(size_t, length, (cb->buf_idx - *offset));
-
- if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
- dev_dbg(dev->dev, "failed to copy data to userland\n");
- rets = -EFAULT;
- } else {
- rets = length;
- if ((*offset + length) < cb->buf_idx) {
- *offset += length;
- goto out;
- }
- }
-free:
- dev_dbg(dev->dev, "free amthif cb memory.\n");
- *offset = 0;
- mei_io_cb_free(cb);
out:
- return rets;
+ mutex_unlock(&dev->device_lock);
+ return ret;
}
/**
* mei_amthif_read_start - queue message for sending read credential
*
* @cl: host client
- * @file: file pointer of message recipient
+ * @fp: file pointer of message recipient
*
* Return: 0 on success, <0 on failure.
*/
-static int mei_amthif_read_start(struct mei_cl *cl, const struct file *file)
+static int mei_amthif_read_start(struct mei_cl *cl, const struct file *fp)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
- int rets;
-
- cb = mei_io_cb_init(cl, MEI_FOP_READ, file);
- if (!cb) {
- rets = -ENOMEM;
- goto err;
- }
- rets = mei_io_cb_alloc_buf(cb, mei_cl_mtu(cl));
- if (rets)
- goto err;
+ cb = mei_cl_enqueue_ctrl_wr_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, fp);
+ if (!cb)
+ return -ENOMEM;
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ cl->rx_flow_ctrl_creds++;
dev->iamthif_state = MEI_IAMTHIF_READING;
- dev->iamthif_fp = cb->fp;
- dev->iamthif_current_cb = cb;
-
- return 0;
-err:
- mei_io_cb_free(cb);
- return rets;
-}
-
-/**
- * mei_amthif_send_cmd - send amthif command to the ME
- *
- * @cl: the host client
- * @cb: mei call back struct
- *
- * Return: 0 on success, <0 on failure.
- */
-static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
-{
- struct mei_device *dev;
- int ret;
-
- if (!cl->dev || !cb)
- return -ENODEV;
-
- dev = cl->dev;
-
- dev->iamthif_state = MEI_IAMTHIF_WRITING;
- dev->iamthif_current_cb = cb;
- dev->iamthif_fp = cb->fp;
- dev->iamthif_canceled = false;
-
- ret = mei_cl_write(cl, cb, false);
- if (ret < 0)
- return ret;
-
- if (cb->completed)
- cb->status = mei_amthif_read_start(cl, cb->fp);
+ cl->fp = cb->fp;
return 0;
}
@@ -265,20 +126,32 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
{
struct mei_cl *cl = &dev->iamthif_cl;
struct mei_cl_cb *cb;
+ int ret;
dev->iamthif_canceled = false;
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- dev->iamthif_fp = NULL;
dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
cb = list_first_entry_or_null(&dev->amthif_cmd_list.list,
typeof(*cb), list);
- if (!cb)
+ if (!cb) {
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ cl->fp = NULL;
return 0;
+ }
list_del_init(&cb->list);
- return mei_amthif_send_cmd(cl, cb);
+ dev->iamthif_state = MEI_IAMTHIF_WRITING;
+ cl->fp = cb->fp;
+
+ ret = mei_cl_write(cl, cb, false);
+ if (ret < 0)
+ return ret;
+
+ if (cb->completed)
+ cb->status = mei_amthif_read_start(cl, cb->fp);
+
+ return 0;
}
/**
@@ -299,8 +172,7 @@ int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
/*
* The previous request is still in processing, queue this one.
*/
- if (dev->iamthif_state > MEI_IAMTHIF_IDLE &&
- dev->iamthif_state < MEI_IAMTHIF_READ_COMPLETE)
+ if (dev->iamthif_state != MEI_IAMTHIF_IDLE)
return 0;
return mei_amthif_run_next_cmd(dev);
@@ -309,7 +181,6 @@ int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
/**
* mei_amthif_poll - the amthif poll function
*
- * @dev: the device structure
* @file: pointer to file structure
* @wait: pointer to poll_table structure
*
@@ -317,26 +188,19 @@ int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
*
* Locking: called under "dev->device_lock" lock
*/
-
-unsigned int mei_amthif_poll(struct mei_device *dev,
- struct file *file, poll_table *wait)
+unsigned int mei_amthif_poll(struct file *file, poll_table *wait)
{
+ struct mei_cl *cl = file->private_data;
+ struct mei_cl_cb *cb = mei_cl_read_cb(cl, file);
unsigned int mask = 0;
- poll_wait(file, &dev->iamthif_cl.rx_wait, wait);
-
- if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
- dev->iamthif_fp == file) {
-
+ poll_wait(file, &cl->rx_wait, wait);
+ if (cb)
mask |= POLLIN | POLLRDNORM;
- mei_amthif_run_next_cmd(dev);
- }
return mask;
}
-
-
/**
* mei_amthif_irq_write - write iamthif command in irq thread context.
*
@@ -393,7 +257,6 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl,
return 0;
dev_dbg(dev->dev, "completed amthif read.\n ");
- dev->iamthif_current_cb = NULL;
dev->iamthif_stall_timer = 0;
return 0;
@@ -409,115 +272,63 @@ void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev = cl->dev;
- if (cb->fop_type == MEI_FOP_WRITE) {
+ dev_dbg(dev->dev, "completing amthif call back.\n");
+ switch (cb->fop_type) {
+ case MEI_FOP_WRITE:
if (!cb->status) {
dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
+ mei_schedule_stall_timer(dev);
mei_io_cb_free(cb);
return;
}
- /*
- * in case of error enqueue the write cb to complete read list
- * so it can be propagated to the reader
- */
- list_add_tail(&cb->list, &cl->rd_completed);
- wake_up_interruptible(&cl->rx_wait);
- return;
- }
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ cl->fp = NULL;
+ if (!dev->iamthif_canceled) {
+ /*
+ * in case of error enqueue the write cb to complete
+ * read list so it can be propagated to the reader
+ */
+ list_add_tail(&cb->list, &cl->rd_completed);
+ wake_up_interruptible(&cl->rx_wait);
+ } else {
+ mei_io_cb_free(cb);
+ }
+ break;
+ case MEI_FOP_READ:
+ if (!dev->iamthif_canceled) {
+ list_add_tail(&cb->list, &cl->rd_completed);
+ dev_dbg(dev->dev, "amthif read completed\n");
+ wake_up_interruptible(&cl->rx_wait);
+ } else {
+ mei_io_cb_free(cb);
+ }
- if (!dev->iamthif_canceled) {
- dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
dev->iamthif_stall_timer = 0;
- list_add_tail(&cb->list, &cl->rd_completed);
- dev_dbg(dev->dev, "amthif read completed\n");
- } else {
mei_amthif_run_next_cmd(dev);
+ break;
+ default:
+ WARN_ON(1);
}
-
- dev_dbg(dev->dev, "completing amthif call back.\n");
- wake_up_interruptible(&cl->rx_wait);
}
/**
* mei_clear_list - removes all callbacks associated with file
* from mei_cb_list
*
- * @dev: device structure.
* @file: file structure
* @mei_cb_list: callbacks list
*
* mei_clear_list is called to clear resources associated with file
* when application calls close function or Ctrl-C was pressed
- *
- * Return: true if callback removed from the list, false otherwise
*/
-static bool mei_clear_list(struct mei_device *dev,
- const struct file *file, struct list_head *mei_cb_list)
+static void mei_clear_list(const struct file *file,
+ struct list_head *mei_cb_list)
{
- struct mei_cl *cl = &dev->iamthif_cl;
struct mei_cl_cb *cb, *next;
- bool removed = false;
-
- /* list all list member */
- list_for_each_entry_safe(cb, next, mei_cb_list, list) {
- /* check if list member associated with a file */
- if (file == cb->fp) {
- /* check if cb equal to current iamthif cb */
- if (dev->iamthif_current_cb == cb) {
- dev->iamthif_current_cb = NULL;
- /* send flow control to iamthif client */
- mei_hbm_cl_flow_control_req(dev, cl);
- }
- /* free all allocated buffers */
- mei_io_cb_free(cb);
- removed = true;
- }
- }
- return removed;
-}
-/**
- * mei_clear_lists - removes all callbacks associated with file
- *
- * @dev: device structure
- * @file: file structure
- *
- * mei_clear_lists is called to clear resources associated with file
- * when application calls close function or Ctrl-C was pressed
- *
- * Return: true if callback removed from the list, false otherwise
- */
-static bool mei_clear_lists(struct mei_device *dev, const struct file *file)
-{
- bool removed = false;
- struct mei_cl *cl = &dev->iamthif_cl;
-
- /* remove callbacks associated with a file */
- mei_clear_list(dev, file, &dev->amthif_cmd_list.list);
- if (mei_clear_list(dev, file, &cl->rd_completed))
- removed = true;
-
- mei_clear_list(dev, file, &dev->ctrl_rd_list.list);
-
- if (mei_clear_list(dev, file, &dev->ctrl_wr_list.list))
- removed = true;
-
- if (mei_clear_list(dev, file, &dev->write_waiting_list.list))
- removed = true;
-
- if (mei_clear_list(dev, file, &dev->write_list.list))
- removed = true;
-
- /* check if iamthif_current_cb not NULL */
- if (dev->iamthif_current_cb && !removed) {
- /* check file and iamthif current cb association */
- if (dev->iamthif_current_cb->fp == file) {
- /* remove cb */
- mei_io_cb_free(dev->iamthif_current_cb);
- dev->iamthif_current_cb = NULL;
- removed = true;
- }
- }
- return removed;
+ list_for_each_entry_safe(cb, next, mei_cb_list, list)
+ if (file == cb->fp)
+ mei_io_cb_free(cb);
}
/**
@@ -530,23 +341,21 @@ static bool mei_clear_lists(struct mei_device *dev, const struct file *file)
*/
int mei_amthif_release(struct mei_device *dev, struct file *file)
{
+ struct mei_cl *cl = file->private_data;
+
if (dev->iamthif_open_count > 0)
dev->iamthif_open_count--;
- if (dev->iamthif_fp == file &&
- dev->iamthif_state != MEI_IAMTHIF_IDLE) {
+ if (cl->fp == file && dev->iamthif_state != MEI_IAMTHIF_IDLE) {
dev_dbg(dev->dev, "amthif canceled iamthif state %d\n",
dev->iamthif_state);
dev->iamthif_canceled = true;
- if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
- dev_dbg(dev->dev, "run next amthif iamthif cb\n");
- mei_amthif_run_next_cmd(dev);
- }
}
- if (mei_clear_lists(dev, file))
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ mei_clear_list(file, &dev->amthif_cmd_list.list);
+ mei_clear_list(file, &cl->rd_completed);
+ mei_clear_list(file, &dev->ctrl_rd_list.list);
return 0;
}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 1f33fea9299f..8cac7ef9ad0d 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -126,7 +126,8 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
goto out;
/* wait on event only if there is no other waiter */
- if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
+ /* synchronized under device mutex */
+ if (!waitqueue_active(&cl->rx_wait)) {
mutex_unlock(&bus->device_lock);
@@ -142,7 +143,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
mutex_lock(&bus->device_lock);
if (!mei_cl_is_connected(cl)) {
- rets = -EBUSY;
+ rets = -ENODEV;
goto out;
}
}
@@ -234,7 +235,7 @@ static void mei_cl_bus_event_work(struct work_struct *work)
/* Prepare for the next read */
if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
mutex_lock(&bus->device_lock);
- mei_cl_read_start(cldev->cl, 0, NULL);
+ mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
mutex_unlock(&bus->device_lock);
}
}
@@ -324,7 +325,7 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
mutex_lock(&bus->device_lock);
- ret = mei_cl_read_start(cldev->cl, 0, NULL);
+ ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
mutex_unlock(&bus->device_lock);
if (ret && ret != -EBUSY)
return ret;
@@ -983,12 +984,10 @@ void mei_cl_bus_rescan_work(struct work_struct *work)
container_of(work, struct mei_device, bus_rescan_work);
struct mei_me_client *me_cl;
- mutex_lock(&bus->device_lock);
me_cl = mei_me_cl_by_uuid(bus, &mei_amthif_guid);
if (me_cl)
mei_amthif_host_init(bus, me_cl);
mei_me_cl_put(me_cl);
- mutex_unlock(&bus->device_lock);
mei_cl_bus_rescan(bus);
}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 641c1a566687..6fe02350578d 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -358,8 +358,9 @@ void mei_io_cb_free(struct mei_cl_cb *cb)
*
* Return: mei_cl_cb pointer or NULL;
*/
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
- const struct file *fp)
+static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
+ enum mei_cb_file_ops type,
+ const struct file *fp)
{
struct mei_cl_cb *cb;
@@ -420,32 +421,41 @@ static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
}
/**
- * mei_io_cb_alloc_buf - allocate callback buffer
+ * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
*
- * @cb: io callback structure
+ * @cl: host client
* @length: size of the buffer
+ * @type: operation type
+ * @fp: associated file pointer (might be NULL)
*
- * Return: 0 on success
- * -EINVAL if cb is NULL
- * -ENOMEM if allocation failed
+ * Return: cb on success and NULL on failure
*/
-int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length)
+struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
+ enum mei_cb_file_ops fop_type,
+ const struct file *fp)
{
+ struct mei_cl_cb *cb;
+
+ cb = mei_io_cb_init(cl, fop_type, fp);
if (!cb)
- return -EINVAL;
+ return NULL;
if (length == 0)
- return 0;
+ return cb;
cb->buf.data = kmalloc(length, GFP_KERNEL);
- if (!cb->buf.data)
- return -ENOMEM;
+ if (!cb->buf.data) {
+ mei_io_cb_free(cb);
+ return NULL;
+ }
cb->buf.size = length;
- return 0;
+
+ return cb;
}
/**
- * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
+ * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
+ * and enqueuing of the control commands cb
*
* @cl: host client
* @length: size of the buffer
@@ -453,22 +463,23 @@ int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length)
* @fp: associated file pointer (might be NULL)
*
* Return: cb on success and NULL on failure
+ * Locking: called under "dev->device_lock" lock
*/
-struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
- enum mei_cb_file_ops type,
- const struct file *fp)
+struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
+ enum mei_cb_file_ops fop_type,
+ const struct file *fp)
{
struct mei_cl_cb *cb;
- cb = mei_io_cb_init(cl, type, fp);
- if (!cb)
- return NULL;
+ /* for RX always allocate at least client's mtu */
+ if (length)
+ length = max_t(size_t, length, mei_cl_mtu(cl));
- if (mei_io_cb_alloc_buf(cb, length)) {
- mei_io_cb_free(cb);
+ cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
+ if (!cb)
return NULL;
- }
+ list_add_tail(&cb->list, &cl->dev->ctrl_wr_list.list);
return cb;
}
@@ -754,7 +765,8 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
mei_cl_wake_all(cl);
- cl->mei_flow_ctrl_creds = 0;
+ cl->rx_flow_ctrl_creds = 0;
+ cl->tx_flow_ctrl_creds = 0;
cl->timer_count = 0;
if (!cl->me_cl)
@@ -764,7 +776,7 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
cl->me_cl->connect_count--;
if (cl->me_cl->connect_count == 0)
- cl->me_cl->mei_flow_ctrl_creds = 0;
+ cl->me_cl->tx_flow_ctrl_creds = 0;
mei_me_cl_put(cl->me_cl);
cl->me_cl = NULL;
@@ -814,6 +826,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
cl->timer_count = MEI_CONNECT_TIMEOUT;
+ mei_schedule_stall_timer(dev);
return 0;
}
@@ -867,13 +880,11 @@ static int __mei_cl_disconnect(struct mei_cl *cl)
cl->state = MEI_FILE_DISCONNECTING;
- cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
- rets = cb ? 0 : -ENOMEM;
- if (rets)
+ cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
+ if (!cb) {
+ rets = -ENOMEM;
goto out;
-
- cl_dbg(dev, cl, "add disconnect cb to control write list\n");
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ }
if (mei_hbuf_acquire(dev)) {
rets = mei_cl_send_disconnect(cl, cb);
@@ -1001,6 +1012,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
cl->timer_count = MEI_CONNECT_TIMEOUT;
+ mei_schedule_stall_timer(dev);
return 0;
}
@@ -1042,14 +1054,14 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
*
* @cl: host client
* @me_cl: me client
- * @file: pointer to file structure
+ * @fp: pointer to file structure
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on success, <0 on failure.
*/
int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
- const struct file *file)
+ const struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
@@ -1076,12 +1088,11 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
goto nortpm;
}
- cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file);
- rets = cb ? 0 : -ENOMEM;
- if (rets)
+ cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
+ if (!cb) {
+ rets = -ENOMEM;
goto out;
-
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ }
/* run hbuf acquire last so we don't have to undo */
if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
@@ -1159,50 +1170,42 @@ err:
return ERR_PTR(ret);
}
-
-
/**
- * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
+ * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
*
* @cl: host client
- * @fp: the file pointer associated with the pointer
*
- * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
+ * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
*/
-static int mei_cl_flow_ctrl_creds(struct mei_cl *cl, const struct file *fp)
+static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
{
- int rets;
-
if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
- if (cl->mei_flow_ctrl_creds > 0)
+ if (cl->tx_flow_ctrl_creds > 0)
return 1;
- if (mei_cl_is_fixed_address(cl)) {
- rets = mei_cl_read_start(cl, mei_cl_mtu(cl), fp);
- if (rets && rets != -EBUSY)
- return rets;
+ if (mei_cl_is_fixed_address(cl))
return 1;
- }
if (mei_cl_is_single_recv_buf(cl)) {
- if (cl->me_cl->mei_flow_ctrl_creds > 0)
+ if (cl->me_cl->tx_flow_ctrl_creds > 0)
return 1;
}
return 0;
}
/**
- * mei_cl_flow_ctrl_reduce - reduces flow_control.
+ * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
+ * for a client
*
- * @cl: private data of the file object
+ * @cl: host client
*
* Return:
* 0 on success
* -EINVAL when ctrl credits are <= 0
*/
-static int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
+static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
{
if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
@@ -1211,13 +1214,13 @@ static int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
return 0;
if (mei_cl_is_single_recv_buf(cl)) {
- if (WARN_ON(cl->me_cl->mei_flow_ctrl_creds <= 0))
+ if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
return -EINVAL;
- cl->me_cl->mei_flow_ctrl_creds--;
+ cl->me_cl->tx_flow_ctrl_creds--;
} else {
- if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
+ if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
return -EINVAL;
- cl->mei_flow_ctrl_creds--;
+ cl->tx_flow_ctrl_creds--;
}
return 0;
}
@@ -1292,7 +1295,7 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
* mei_cl_notify_request - send notification stop/start request
*
* @cl: host client
- * @file: associate request with file
+ * @fp: associate request with file
* @request: 1 for start or 0 for stop
*
* Locking: called under "dev->device_lock" lock
@@ -1300,7 +1303,7 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
* Return: 0 on such and error otherwise.
*/
int mei_cl_notify_request(struct mei_cl *cl,
- const struct file *file, u8 request)
+ const struct file *fp, u8 request)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
@@ -1325,7 +1328,7 @@ int mei_cl_notify_request(struct mei_cl *cl,
}
fop_type = mei_cl_notify_req2fop(request);
- cb = mei_io_cb_init(cl, fop_type, file);
+ cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
if (!cb) {
rets = -ENOMEM;
goto out;
@@ -1336,9 +1339,7 @@ int mei_cl_notify_request(struct mei_cl *cl,
rets = -ENODEV;
goto out;
}
- list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
- } else {
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
}
mutex_unlock(&dev->device_lock);
@@ -1436,25 +1437,6 @@ out:
}
/**
- * mei_cl_is_read_fc_cb - check if read cb is waiting for flow control
- * for given host client
- *
- * @cl: host client
- *
- * Return: true, if found at least one cb.
- */
-static bool mei_cl_is_read_fc_cb(struct mei_cl *cl)
-{
- struct mei_device *dev = cl->dev;
- struct mei_cl_cb *cb;
-
- list_for_each_entry(cb, &dev->ctrl_wr_list.list, list)
- if (cb->fop_type == MEI_FOP_READ && cb->cl == cl)
- return true;
- return false;
-}
-
-/**
* mei_cl_read_start - the start read client message function.
*
* @cl: host client
@@ -1477,26 +1459,22 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
if (!mei_cl_is_connected(cl))
return -ENODEV;
- /* HW currently supports only one pending read */
- if (!list_empty(&cl->rd_pending) || mei_cl_is_read_fc_cb(cl))
- return -EBUSY;
-
if (!mei_me_cl_is_active(cl->me_cl)) {
cl_err(dev, cl, "no such me client\n");
return -ENOTTY;
}
- /* always allocate at least client max message */
- length = max_t(size_t, length, mei_cl_mtu(cl));
- cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
+ if (mei_cl_is_fixed_address(cl) || cl == &dev->iamthif_cl)
+ return 0;
+
+ /* HW currently supports only one pending read */
+ if (cl->rx_flow_ctrl_creds)
+ return -EBUSY;
+
+ cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
if (!cb)
return -ENOMEM;
- if (mei_cl_is_fixed_address(cl)) {
- list_add_tail(&cb->list, &cl->rd_pending);
- return 0;
- }
-
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
@@ -1504,16 +1482,15 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
goto nortpm;
}
+ rets = 0;
if (mei_hbuf_acquire(dev)) {
rets = mei_hbm_cl_flow_control_req(dev, cl);
if (rets < 0)
goto out;
- list_add_tail(&cb->list, &cl->rd_pending);
- } else {
- rets = 0;
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ list_move_tail(&cb->list, &cl->rd_pending);
}
+ cl->rx_flow_ctrl_creds++;
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
@@ -1557,7 +1534,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
first_chunk = cb->buf_idx == 0;
- rets = first_chunk ? mei_cl_flow_ctrl_creds(cl, cb->fp) : 1;
+ rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
if (rets < 0)
return rets;
@@ -1605,7 +1582,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
cb->completed = mei_hdr.msg_complete == 1;
if (first_chunk) {
- if (mei_cl_flow_ctrl_reduce(cl))
+ if (mei_cl_tx_flow_ctrl_creds_reduce(cl))
return -EIO;
}
@@ -1663,7 +1640,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
mei_hdr.msg_complete = 0;
mei_hdr.internal = cb->internal;
- rets = mei_cl_flow_ctrl_creds(cl, cb->fp);
+ rets = mei_cl_tx_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
@@ -1691,7 +1668,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
if (rets)
goto err;
- rets = mei_cl_flow_ctrl_reduce(cl);
+ rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
if (rets)
goto err;
@@ -1761,6 +1738,9 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
case MEI_FOP_READ:
list_add_tail(&cb->list, &cl->rd_completed);
+ if (!mei_cl_is_fixed_address(cl) &&
+ !WARN_ON(!cl->rx_flow_ctrl_creds))
+ cl->rx_flow_ctrl_creds--;
if (!mei_cl_bus_rx_event(cl))
wake_up_interruptible(&cl->rx_wait);
break;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 0d7a3a1fef78..d2bfabecd882 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -82,11 +82,7 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
/*
* MEI IO Functions
*/
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
- const struct file *fp);
void mei_io_cb_free(struct mei_cl_cb *priv_cb);
-int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length);
-
/**
* mei_io_list_init - Sets up a queue list.
@@ -118,6 +114,9 @@ void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
enum mei_cb_file_ops type,
const struct file *fp);
+struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
+ enum mei_cb_file_ops type,
+ const struct file *fp);
int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
/*
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 085f3aafe6fa..dd7f15a65eed 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -161,6 +161,7 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
* @dev: the device structure
* @cl: client
* @hbm_cmd: host bus message command
+ * @buf: message buffer
* @len: buffer length
*
* Return: 0 on success, <0 on failure.
@@ -276,6 +277,7 @@ int mei_hbm_start_req(struct mei_device *dev)
dev->hbm_state = MEI_HBM_STARTING;
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ mei_schedule_stall_timer(dev);
return 0;
}
@@ -311,6 +313,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
}
dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ mei_schedule_stall_timer(dev);
return 0;
}
@@ -339,7 +342,7 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
me_cl->props = res->client_properties;
me_cl->client_id = res->me_addr;
- me_cl->mei_flow_ctrl_creds = 0;
+ me_cl->tx_flow_ctrl_creds = 0;
mei_me_cl_add(dev, me_cl);
@@ -561,6 +564,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
}
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ mei_schedule_stall_timer(dev);
return 0;
}
@@ -636,23 +640,22 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
}
/**
- * mei_hbm_add_single_flow_creds - adds single buffer credentials.
+ * mei_hbm_add_single_tx_flow_ctrl_creds - adds single buffer credentials.
*
* @dev: the device structure
- * @flow: flow control.
+ * @fctrl: flow control response bus message
*
* Return: 0 on success, < 0 otherwise
*/
-static int mei_hbm_add_single_flow_creds(struct mei_device *dev,
- struct hbm_flow_control *flow)
+static int mei_hbm_add_single_tx_flow_ctrl_creds(struct mei_device *dev,
+ struct hbm_flow_control *fctrl)
{
struct mei_me_client *me_cl;
int rets;
- me_cl = mei_me_cl_by_id(dev, flow->me_addr);
+ me_cl = mei_me_cl_by_id(dev, fctrl->me_addr);
if (!me_cl) {
- dev_err(dev->dev, "no such me client %d\n",
- flow->me_addr);
+ dev_err(dev->dev, "no such me client %d\n", fctrl->me_addr);
return -ENOENT;
}
@@ -661,9 +664,9 @@ static int mei_hbm_add_single_flow_creds(struct mei_device *dev,
goto out;
}
- me_cl->mei_flow_ctrl_creds++;
+ me_cl->tx_flow_ctrl_creds++;
dev_dbg(dev->dev, "recv flow ctrl msg ME %d (single) creds = %d.\n",
- flow->me_addr, me_cl->mei_flow_ctrl_creds);
+ fctrl->me_addr, me_cl->tx_flow_ctrl_creds);
rets = 0;
out:
@@ -675,24 +678,24 @@ out:
* mei_hbm_cl_flow_control_res - flow control response from me
*
* @dev: the device structure
- * @flow_control: flow control response bus message
+ * @fctrl: flow control response bus message
*/
-static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
- struct hbm_flow_control *flow_control)
+static void mei_hbm_cl_tx_flow_ctrl_creds_res(struct mei_device *dev,
+ struct hbm_flow_control *fctrl)
{
struct mei_cl *cl;
- if (!flow_control->host_addr) {
+ if (!fctrl->host_addr) {
/* single receive buffer */
- mei_hbm_add_single_flow_creds(dev, flow_control);
+ mei_hbm_add_single_tx_flow_ctrl_creds(dev, fctrl);
return;
}
- cl = mei_hbm_cl_find_by_cmd(dev, flow_control);
+ cl = mei_hbm_cl_find_by_cmd(dev, fctrl);
if (cl) {
- cl->mei_flow_ctrl_creds++;
+ cl->tx_flow_ctrl_creds++;
cl_dbg(dev, cl, "flow control creds = %d.\n",
- cl->mei_flow_ctrl_creds);
+ cl->tx_flow_ctrl_creds);
}
}
@@ -871,10 +874,10 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
cl->state = MEI_FILE_DISCONNECTING;
cl->timer_count = 0;
- cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
+ cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT_RSP,
+ NULL);
if (!cb)
return -ENOMEM;
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
return 0;
}
@@ -1022,7 +1025,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
struct mei_hbm_cl_cmd *cl_cmd;
struct hbm_client_connect_request *disconnect_req;
- struct hbm_flow_control *flow_control;
+ struct hbm_flow_control *fctrl;
/* read the message to our buffer */
BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf));
@@ -1102,8 +1105,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
case MEI_FLOW_CONTROL_CMD:
dev_dbg(dev->dev, "hbm: client flow control response: message received.\n");
- flow_control = (struct hbm_flow_control *) mei_msg;
- mei_hbm_cl_flow_control_res(dev, flow_control);
+ fctrl = (struct hbm_flow_control *)mei_msg;
+ mei_hbm_cl_tx_flow_ctrl_creds_res(dev, fctrl);
break;
case MEI_PG_ISOLATION_ENTRY_RES_CMD:
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 0dcb854b4bfc..7ad15d678878 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -125,6 +125,9 @@
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
+#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
+#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index dc3a854e02d3..56c2101e80ad 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -18,6 +18,7 @@
#include <linux/kthread.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include "mei_dev.h"
#include "hbm.h"
@@ -1063,6 +1064,8 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
}
}
+ pm_runtime_set_active(dev->dev);
+
hcsr = mei_hcsr_read(dev);
/* H_RST may be found lit before reset is started,
* for example if preceding reset flow hasn't completed.
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 4a6c1b85f11e..e6e5e55a12ed 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/irqreturn.h>
+#include <linux/pm_runtime.h>
#include <linux/mei.h>
@@ -935,6 +936,8 @@ static int mei_txe_hw_start(struct mei_device *dev)
return ret;
}
+ pm_runtime_set_active(dev->dev);
+
/* enable input ready interrupts:
* SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK
*/
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index f7c8dfdb6a12..9a9c2484d107 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -94,7 +94,7 @@ void mei_cancel_work(struct mei_device *dev)
cancel_work_sync(&dev->reset_work);
cancel_work_sync(&dev->bus_rescan_work);
- cancel_delayed_work(&dev->timer_work);
+ cancel_delayed_work_sync(&dev->timer_work);
}
EXPORT_SYMBOL_GPL(mei_cancel_work);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 3831a7ba2531..5a4893ce9c24 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -102,26 +102,25 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
- unsigned char *buffer = NULL;
size_t buf_sz;
cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
if (!cb) {
- cl_err(dev, cl, "pending read cb not found\n");
- goto out;
+ if (!mei_cl_is_fixed_address(cl)) {
+ cl_err(dev, cl, "pending read cb not found\n");
+ goto discard;
+ }
+ cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp);
+ if (!cb)
+ goto discard;
+ list_add_tail(&cb->list, &cl->rd_pending);
}
if (!mei_cl_is_connected(cl)) {
cl_dbg(dev, cl, "not connected\n");
- cb->status = -ENODEV;
- goto out;
- }
-
- if (cb->buf.size == 0 || cb->buf.data == NULL) {
- cl_err(dev, cl, "response buffer is not allocated.\n");
list_move_tail(&cb->list, &complete_list->list);
- cb->status = -ENOMEM;
- goto out;
+ cb->status = -ENODEV;
+ goto discard;
}
buf_sz = mei_hdr->length + cb->buf_idx;
@@ -132,25 +131,19 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
list_move_tail(&cb->list, &complete_list->list);
cb->status = -EMSGSIZE;
- goto out;
+ goto discard;
}
if (cb->buf.size < buf_sz) {
cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
cb->buf.size, mei_hdr->length, cb->buf_idx);
- buffer = krealloc(cb->buf.data, buf_sz, GFP_KERNEL);
- if (!buffer) {
- cb->status = -ENOMEM;
- list_move_tail(&cb->list, &complete_list->list);
- goto out;
- }
- cb->buf.data = buffer;
- cb->buf.size = buf_sz;
+ list_move_tail(&cb->list, &complete_list->list);
+ cb->status = -EMSGSIZE;
+ goto discard;
}
- buffer = cb->buf.data + cb->buf_idx;
- mei_read_slots(dev, buffer, mei_hdr->length);
+ mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length);
cb->buf_idx += mei_hdr->length;
@@ -162,10 +155,10 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
pm_request_autosuspend(dev->dev);
}
-out:
- if (!buffer)
- mei_irq_discard_msg(dev, mei_hdr);
+ return 0;
+discard:
+ mei_irq_discard_msg(dev, mei_hdr);
return 0;
}
@@ -216,6 +209,9 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
int slots;
int ret;
+ if (!list_empty(&cl->rd_pending))
+ return 0;
+
msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
slots = mei_hbuf_empty_slots(dev);
@@ -463,6 +459,19 @@ static void mei_connect_timeout(struct mei_cl *cl)
mei_reset(dev);
}
+#define MEI_STALL_TIMER_FREQ (2 * HZ)
+/**
+ * mei_schedule_stall_timer - re-arm stall_timer work
+ *
+ * Schedule stall timer
+ *
+ * @dev: the device structure
+ */
+void mei_schedule_stall_timer(struct mei_device *dev)
+{
+ schedule_delayed_work(&dev->timer_work, MEI_STALL_TIMER_FREQ);
+}
+
/**
* mei_timer - timer function.
*
@@ -472,10 +481,9 @@ static void mei_connect_timeout(struct mei_cl *cl)
void mei_timer(struct work_struct *work)
{
struct mei_cl *cl;
-
struct mei_device *dev = container_of(work,
struct mei_device, timer_work.work);
-
+ bool reschedule_timer = false;
mutex_lock(&dev->device_lock);
@@ -490,6 +498,7 @@ void mei_timer(struct work_struct *work)
mei_reset(dev);
goto out;
}
+ reschedule_timer = true;
}
}
@@ -504,6 +513,7 @@ void mei_timer(struct work_struct *work)
mei_connect_timeout(cl);
goto out;
}
+ reschedule_timer = true;
}
}
@@ -514,19 +524,16 @@ void mei_timer(struct work_struct *work)
if (--dev->iamthif_stall_timer == 0) {
dev_err(dev->dev, "timer: amthif hanged.\n");
mei_reset(dev);
- dev->iamthif_canceled = false;
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- mei_io_cb_free(dev->iamthif_current_cb);
- dev->iamthif_current_cb = NULL;
-
- dev->iamthif_fp = NULL;
mei_amthif_run_next_cmd(dev);
+ goto out;
}
+ reschedule_timer = true;
}
out:
- if (dev->dev_state != MEI_DEV_DISABLED)
- schedule_delayed_work(&dev->timer_work, 2 * HZ);
+ if (dev->dev_state != MEI_DEV_DISABLED && reschedule_timer)
+ mei_schedule_stall_timer(dev);
+
mutex_unlock(&dev->device_lock);
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 52635b063873..fa50635512e8 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -71,6 +71,7 @@ static int mei_open(struct inode *inode, struct file *file)
goto err_unlock;
}
+ cl->fp = file;
file->private_data = cl;
mutex_unlock(&dev->device_lock);
@@ -138,9 +139,8 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
struct mei_cl_cb *cb = NULL;
+ bool nonblock = !!(file->f_flags & O_NONBLOCK);
int rets;
- int err;
-
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -164,11 +164,6 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto out;
}
- if (cl == &dev->iamthif_cl) {
- rets = mei_amthif_read(dev, file, ubuf, length, offset);
- goto out;
- }
-
cb = mei_cl_read_cb(cl, file);
if (cb)
goto copy_buffer;
@@ -176,24 +171,29 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
if (*offset > 0)
*offset = 0;
- err = mei_cl_read_start(cl, length, file);
- if (err && err != -EBUSY) {
- cl_dbg(dev, cl, "mei start read failure status = %d\n", err);
- rets = err;
+ rets = mei_cl_read_start(cl, length, file);
+ if (rets && rets != -EBUSY) {
+ cl_dbg(dev, cl, "mei start read failure status = %d\n", rets);
goto out;
}
- if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
- if (file->f_flags & O_NONBLOCK) {
- rets = -EAGAIN;
- goto out;
- }
+ if (nonblock) {
+ rets = -EAGAIN;
+ goto out;
+ }
+ if (rets == -EBUSY &&
+ !mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, file)) {
+ rets = -ENOMEM;
+ goto out;
+ }
+
+ do {
mutex_unlock(&dev->device_lock);
if (wait_event_interruptible(cl->rx_wait,
- (!list_empty(&cl->rd_completed)) ||
- (!mei_cl_is_connected(cl)))) {
+ (!list_empty(&cl->rd_completed)) ||
+ (!mei_cl_is_connected(cl)))) {
if (signal_pending(current))
return -EINTR;
@@ -202,16 +202,12 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
mutex_lock(&dev->device_lock);
if (!mei_cl_is_connected(cl)) {
- rets = -EBUSY;
+ rets = -ENODEV;
goto out;
}
- }
- cb = mei_cl_read_cb(cl, file);
- if (!cb) {
- rets = 0;
- goto out;
- }
+ cb = mei_cl_read_cb(cl, file);
+ } while (!cb);
copy_buffer:
/* now copy the data to user space */
@@ -609,24 +605,24 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
goto out;
}
- if (cl == &dev->iamthif_cl) {
- mask = mei_amthif_poll(dev, file, wait);
- goto out;
- }
-
if (notify_en) {
poll_wait(file, &cl->ev_wait, wait);
if (cl->notify_ev)
mask |= POLLPRI;
}
+ if (cl == &dev->iamthif_cl) {
+ mask |= mei_amthif_poll(file, wait);
+ goto out;
+ }
+
if (req_events & (POLLIN | POLLRDNORM)) {
poll_wait(file, &cl->rx_wait, wait);
if (!list_empty(&cl->rd_completed))
mask |= POLLIN | POLLRDNORM;
else
- mei_cl_read_start(cl, 0, file);
+ mei_cl_read_start(cl, mei_cl_mtu(cl), file);
}
out:
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index e5e32503d4bc..1169fd9e7d02 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -80,18 +80,13 @@ const char *mei_dev_state_str(int state);
enum iamthif_states {
MEI_IAMTHIF_IDLE,
MEI_IAMTHIF_WRITING,
- MEI_IAMTHIF_FLOW_CONTROL,
MEI_IAMTHIF_READING,
- MEI_IAMTHIF_READ_COMPLETE
};
enum mei_file_transaction_states {
MEI_IDLE,
MEI_WRITING,
MEI_WRITE_COMPLETE,
- MEI_FLOW_CONTROL,
- MEI_READING,
- MEI_READ_COMPLETE
};
/**
@@ -146,7 +141,7 @@ struct mei_fw_status {
* @refcnt: struct reference count
* @props: client properties
* @client_id: me client id
- * @mei_flow_ctrl_creds: flow control credits
+ * @tx_flow_ctrl_creds: flow control credits
* @connect_count: number connections to this client
* @bus_added: added to bus
*/
@@ -155,7 +150,7 @@ struct mei_me_client {
struct kref refcnt;
struct mei_client_properties props;
u8 client_id;
- u8 mei_flow_ctrl_creds;
+ u8 tx_flow_ctrl_creds;
u8 connect_count;
u8 bus_added;
};
@@ -202,10 +197,11 @@ struct mei_cl_cb {
* @ev_async: event async notification
* @status: connection status
* @me_cl: fw client connected
+ * @fp: file associated with client
* @host_client_id: host id
- * @mei_flow_ctrl_creds: transmit flow credentials
+ * @tx_flow_ctrl_creds: transmit flow credentials
+ * @rx_flow_ctrl_creds: receive flow credentials
* @timer_count: watchdog timer for operation completion
- * @reserved: reserved for alignment
* @notify_en: notification - enabled/disabled
* @notify_ev: pending notification event
* @writing_state: state of the tx
@@ -225,10 +221,11 @@ struct mei_cl {
struct fasync_struct *ev_async;
int status;
struct mei_me_client *me_cl;
+ const struct file *fp;
u8 host_client_id;
- u8 mei_flow_ctrl_creds;
+ u8 tx_flow_ctrl_creds;
+ u8 rx_flow_ctrl_creds;
u8 timer_count;
- u8 reserved;
u8 notify_en;
u8 notify_ev;
enum mei_file_transaction_states writing_state;
@@ -400,9 +397,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @override_fixed_address: force allow fixed address behavior
*
* @amthif_cmd_list : amthif list for cmd waiting
- * @iamthif_fp : file for current amthif operation
* @iamthif_cl : amthif host client
- * @iamthif_current_cb : amthif current operation callback
* @iamthif_open_count : number of opened amthif connections
* @iamthif_stall_timer : timer to detect amthif hang
* @iamthif_state : amthif processor state
@@ -484,10 +479,7 @@ struct mei_device {
/* amthif list for cmd waiting */
struct mei_cl_cb amthif_cmd_list;
- /* driver managed amthif list for reading completed amthif cmd data */
- const struct file *iamthif_fp;
struct mei_cl iamthif_cl;
- struct mei_cl_cb *iamthif_current_cb;
long iamthif_open_count;
u32 iamthif_stall_timer;
enum iamthif_states iamthif_state;
@@ -556,6 +548,7 @@ void mei_cancel_work(struct mei_device *dev);
*/
void mei_timer(struct work_struct *work);
+void mei_schedule_stall_timer(struct mei_device *dev);
int mei_irq_read_handler(struct mei_device *dev,
struct mei_cl_cb *cmpl_list, s32 *slots);
@@ -569,11 +562,7 @@ void mei_amthif_reset_params(struct mei_device *dev);
int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
-int mei_amthif_read(struct mei_device *dev, struct file *file,
- char __user *ubuf, size_t length, loff_t *offset);
-
-unsigned int mei_amthif_poll(struct mei_device *dev,
- struct file *file, poll_table *wait);
+unsigned int mei_amthif_poll(struct file *file, poll_table *wait);
int mei_amthif_release(struct mei_device *dev, struct file *file);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 71cea9b296b2..f3ffd883b232 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -91,6 +91,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)},
+
/* required last entry */
{0, }
};
@@ -217,8 +220,6 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- schedule_delayed_work(&dev->timer_work, HZ);
-
/*
* For not wake-able HW runtime pm framework
* can't be used on pci device level.
@@ -400,6 +401,9 @@ static int mei_me_pm_runtime_suspend(struct device *device)
dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
+ if (ret && ret != -EAGAIN)
+ schedule_work(&dev->reset_work);
+
return ret;
}
@@ -423,6 +427,9 @@ static int mei_me_pm_runtime_resume(struct device *device)
dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
+ if (ret)
+ schedule_work(&dev->reset_work);
+
return ret;
}
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 30cc30683c07..58ffd30dcc91 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -347,6 +347,10 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
mutex_unlock(&dev->device_lock);
+
+ if (ret && ret != -EAGAIN)
+ schedule_work(&dev->reset_work);
+
return ret;
}
@@ -372,6 +376,9 @@ static int mei_txe_pm_runtime_resume(struct device *device)
dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret);
+ if (ret)
+ schedule_work(&dev->reset_work);
+
return ret;
}
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index cd01a0efda6b..64d5760d069a 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -115,7 +115,6 @@ int scif_reserve_dma_chan(struct scif_endpt *ep)
*/
static
void __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn,
- struct scif_endpt *ep,
u64 start, u64 len)
{
struct list_head *item, *tmp;
@@ -128,7 +127,6 @@ void __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn,
list_for_each_safe(item, tmp, &mmn->tc_reg_list) {
window = list_entry(item, struct scif_window, list);
- ep = (struct scif_endpt *)window->ep;
if (!len)
break;
start_va = window->va_for_temp;
@@ -146,7 +144,7 @@ static void scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, u64 start, u64 len)
struct scif_endpt *ep = mmn->ep;
spin_lock(&ep->rma_info.tc_lock);
- __scif_rma_destroy_tcw(mmn, ep, start, len);
+ __scif_rma_destroy_tcw(mmn, start, len);
spin_unlock(&ep->rma_info.tc_lock);
}
@@ -169,7 +167,7 @@ static void __scif_rma_destroy_tcw_ep(struct scif_endpt *ep)
spin_lock(&ep->rma_info.tc_lock);
list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) {
mmn = list_entry(item, struct scif_mmu_notif, list);
- __scif_rma_destroy_tcw(mmn, ep, 0, ULONG_MAX);
+ __scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
}
spin_unlock(&ep->rma_info.tc_lock);
}
diff --git a/drivers/misc/mic/scif/scif_mmap.c b/drivers/misc/mic/scif/scif_mmap.c
index 49cb8f7b4672..928211677079 100644
--- a/drivers/misc/mic/scif/scif_mmap.c
+++ b/drivers/misc/mic/scif/scif_mmap.c
@@ -552,7 +552,7 @@ static void scif_munmap(struct vm_area_struct *vma)
{
struct scif_endpt *ep;
struct vma_pvt *vmapvt = vma->vm_private_data;
- int nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int nr_pages = vma_pages(vma);
s64 offset;
struct scif_rma_req req;
struct scif_window *window = NULL;
@@ -614,7 +614,7 @@ int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd)
struct scif_window *window = NULL;
struct scif_endpt *ep = (struct scif_endpt *)epd;
s64 start_offset = vma->vm_pgoff << PAGE_SHIFT;
- int nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int nr_pages = vma_pages(vma);
int err;
struct vma_pvt *vmapvt;
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 4810e039bbec..e42bdc90fa27 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -28,6 +28,7 @@
#include <linux/if_ether.h>
#include <linux/ctype.h>
#include <linux/dmi.h>
+#include <linux/of.h>
#define PHUB_STATUS 0x00 /* Status Register offset */
#define PHUB_CONTROL 0x04 /* Control Register offset */
@@ -57,6 +58,7 @@
/* CM-iTC */
#define CLKCFG_UART_48MHZ (1 << 16)
+#define CLKCFG_UART_25MHZ (2 << 16)
#define CLKCFG_BAUDDIV (2 << 20)
#define CLKCFG_PLL2VCO (8 << 9)
#define CLKCFG_UARTCLKSEL (1 << 18)
@@ -711,6 +713,12 @@ static int pch_phub_probe(struct pci_dev *pdev,
if (id->driver_data == 1) { /* EG20T PCH */
const char *board_name;
+ unsigned int prefetch = 0x000affaa;
+
+ if (pdev->dev.of_node)
+ of_property_read_u32(pdev->dev.of_node,
+ "intel,eg20t-prefetch",
+ &prefetch);
ret = sysfs_create_file(&pdev->dev.kobj,
&dev_attr_pch_mac.attr);
@@ -736,11 +744,21 @@ static int pch_phub_probe(struct pci_dev *pdev,
CLKCFG_UART_MASK);
/* set the prefech value */
- iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
+ iowrite32(prefetch, chip->pch_phub_base_address + 0x14);
/* set the interrupt delay value */
iowrite32(0x25, chip->pch_phub_base_address + 0x44);
chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
+
+ /* quirk for MIPS Boston platform */
+ if (pdev->dev.of_node) {
+ if (of_machine_is_compatible("img,boston")) {
+ pch_phub_read_modify_write_reg(chip,
+ (unsigned int)CLKCFG_REG_OFFSET,
+ CLKCFG_UART_25MHZ,
+ CLKCFG_UART_MASK);
+ }
+ }
} else if (id->driver_data == 2) { /* ML7213 IOH */
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
if (ret)
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 9ec262a52656..ec090105eb4b 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -381,18 +381,12 @@ static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
return -EINVAL;
}
- dg = kmalloc(send_info.len, GFP_KERNEL);
- if (!dg) {
+ dg = memdup_user((void __user *)(uintptr_t)send_info.addr,
+ send_info.len);
+ if (IS_ERR(dg)) {
vmci_ioctl_err(
"cannot allocate memory to dispatch datagram\n");
- return -ENOMEM;
- }
-
- if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr,
- send_info.len)) {
- vmci_ioctl_err("error getting datagram\n");
- kfree(dg);
- return -EFAULT;
+ return PTR_ERR(dg);
}
if (VMCI_DG_SIZE(dg) != send_info.len) {
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2206d4477dbb..c3335112e68c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -142,8 +142,6 @@ static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
{
struct mmc_packed *packed = mqrq->packed;
- BUG_ON(!packed);
-
mqrq->cmd_type = MMC_PACKED_NONE;
packed->nr_entries = MMC_PACKED_NR_ZERO;
packed->idx_failure = MMC_PACKED_NR_IDX;
@@ -1443,8 +1441,6 @@ static int mmc_blk_packed_err_check(struct mmc_card *card,
int err, check, status;
u8 *ext_csd;
- BUG_ON(!packed);
-
packed->retries--;
check = mmc_blk_err_check(card, areq);
err = get_card_status(card, &status, 0);
@@ -1673,6 +1669,18 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
u8 max_packed_rw = 0;
u8 reqs = 0;
+ /*
+ * We don't need to check packed for any further
+ * operation of packed stuff as we set MMC_PACKED_NONE
+ * and return zero for reqs if geting null packed. Also
+ * we clean the flag of MMC_BLK_PACKED_CMD to avoid doing
+ * it again when removing blk req.
+ */
+ if (!mqrq->packed) {
+ md->flags &= (~MMC_BLK_PACKED_CMD);
+ goto no_packed;
+ }
+
if (!(md->flags & MMC_BLK_PACKED_CMD))
goto no_packed;
@@ -1782,8 +1790,6 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
u8 hdr_blocks;
u8 i = 1;
- BUG_ON(!packed);
-
mqrq->cmd_type = MMC_PACKED_WRITE;
packed->blocks = 0;
packed->idx_failure = MMC_PACKED_NR_IDX;
@@ -1887,8 +1893,6 @@ static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
int idx = packed->idx_failure, i = 0;
int ret = 0;
- BUG_ON(!packed);
-
while (!list_empty(&packed->list)) {
prq = list_entry_rq(packed->list.next);
if (idx == i) {
@@ -1917,8 +1921,6 @@ static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
struct request *prq;
struct mmc_packed *packed = mq_rq->packed;
- BUG_ON(!packed);
-
while (!list_empty(&packed->list)) {
prq = list_entry_rq(packed->list.next);
list_del_init(&prq->queuelist);
@@ -1935,8 +1937,6 @@ static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
struct request_queue *q = mq->queue;
struct mmc_packed *packed = mq_rq->packed;
- BUG_ON(!packed);
-
while (!list_empty(&packed->list)) {
prq = list_entry_rq(packed->list.prev);
if (prq->queuelist.prev != &packed->list) {
@@ -2144,7 +2144,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
return 0;
}
-static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
struct mmc_blk_data *md = mq->data;
@@ -2265,7 +2265,6 @@ again:
if (ret)
goto err_putdisk;
- md->queue.issue_fn = mmc_blk_issue_rq;
md->queue.data = md;
md->disk->major = MMC_BLOCK_MAJOR;
@@ -2303,7 +2302,8 @@ again:
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
- if (mmc_card_mmc(card) ||
+ if ((mmc_card_mmc(card) &&
+ card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
(mmc_card_sd(card) &&
card->scr.cmds & SD_SCR_CMD23_SUPPORT))
md->flags |= MMC_BLK_CMD23;
diff --git a/drivers/mmc/card/block.h b/drivers/mmc/card/block.h
new file mode 100644
index 000000000000..cdabb2ee74be
--- /dev/null
+++ b/drivers/mmc/card/block.h
@@ -0,0 +1 @@
+int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index c032eef45762..5a8dc5a76e0d 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -184,6 +184,29 @@ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
return mmc_set_blocklen(test->card, size);
}
+static bool mmc_test_card_cmd23(struct mmc_card *card)
+{
+ return mmc_card_mmc(card) ||
+ (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
+}
+
+static void mmc_test_prepare_sbc(struct mmc_test_card *test,
+ struct mmc_request *mrq, unsigned int blocks)
+{
+ struct mmc_card *card = test->card;
+
+ if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
+ !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
+ (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
+ mrq->sbc = NULL;
+ return;
+ }
+
+ mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
+ mrq->sbc->arg = blocks;
+ mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
+}
+
/*
* Fill in the mmc_request structure given a set of transfer parameters.
*/
@@ -221,6 +244,8 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
mrq->data->sg = sg;
mrq->data->sg_len = sg_len;
+ mmc_test_prepare_sbc(test, mrq, blocks);
+
mmc_set_data_timeout(mrq->data, test->card);
}
@@ -693,6 +718,8 @@ static int mmc_test_check_result(struct mmc_test_card *test,
ret = 0;
+ if (mrq->sbc && mrq->sbc->error)
+ ret = mrq->sbc->error;
if (!ret && mrq->cmd->error)
ret = mrq->cmd->error;
if (!ret && mrq->data->error)
@@ -2278,6 +2305,245 @@ static int mmc_test_reset(struct mmc_test_card *test)
return RESULT_FAIL;
}
+struct mmc_test_req {
+ struct mmc_request mrq;
+ struct mmc_command sbc;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_command status;
+ struct mmc_data data;
+};
+
+static struct mmc_test_req *mmc_test_req_alloc(void)
+{
+ struct mmc_test_req *rq = kzalloc(sizeof(*rq), GFP_KERNEL);
+
+ if (rq) {
+ rq->mrq.cmd = &rq->cmd;
+ rq->mrq.data = &rq->data;
+ rq->mrq.stop = &rq->stop;
+ }
+
+ return rq;
+}
+
+static int mmc_test_send_status(struct mmc_test_card *test,
+ struct mmc_command *cmd)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->opcode = MMC_SEND_STATUS;
+ if (!mmc_host_is_spi(test->card->host))
+ cmd->arg = test->card->rca << 16;
+ cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(test->card->host, cmd, 0);
+}
+
+static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
+ unsigned int dev_addr, int use_sbc,
+ int repeat_cmd, int write, int use_areq)
+{
+ struct mmc_test_req *rq = mmc_test_req_alloc();
+ struct mmc_host *host = test->card->host;
+ struct mmc_test_area *t = &test->area;
+ struct mmc_async_req areq;
+ struct mmc_request *mrq;
+ unsigned long timeout;
+ bool expired = false;
+ int ret = 0, cmd_ret;
+ u32 status = 0;
+ int count = 0;
+
+ if (!rq)
+ return -ENOMEM;
+
+ mrq = &rq->mrq;
+ if (use_sbc)
+ mrq->sbc = &rq->sbc;
+ mrq->cap_cmd_during_tfr = true;
+
+ areq.mrq = mrq;
+ areq.err_check = mmc_test_check_result_async;
+
+ mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
+ 512, write);
+
+ if (use_sbc && t->blocks > 1 && !mrq->sbc) {
+ ret = mmc_host_cmd23(host) ?
+ RESULT_UNSUP_CARD :
+ RESULT_UNSUP_HOST;
+ goto out_free;
+ }
+
+ /* Start ongoing data request */
+ if (use_areq) {
+ mmc_start_req(host, &areq, &ret);
+ if (ret)
+ goto out_free;
+ } else {
+ mmc_wait_for_req(host, mrq);
+ }
+
+ timeout = jiffies + msecs_to_jiffies(3000);
+ do {
+ count += 1;
+
+ /* Send status command while data transfer in progress */
+ cmd_ret = mmc_test_send_status(test, &rq->status);
+ if (cmd_ret)
+ break;
+
+ status = rq->status.resp[0];
+ if (status & R1_ERROR) {
+ cmd_ret = -EIO;
+ break;
+ }
+
+ if (mmc_is_req_done(host, mrq))
+ break;
+
+ expired = time_after(jiffies, timeout);
+ if (expired) {
+ pr_info("%s: timeout waiting for Tran state status %#x\n",
+ mmc_hostname(host), status);
+ cmd_ret = -ETIMEDOUT;
+ break;
+ }
+ } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
+
+ /* Wait for data request to complete */
+ if (use_areq)
+ mmc_start_req(host, NULL, &ret);
+ else
+ mmc_wait_for_req_done(test->card->host, mrq);
+
+ /*
+ * For cap_cmd_during_tfr request, upper layer must send stop if
+ * required.
+ */
+ if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
+ if (ret)
+ mmc_wait_for_cmd(host, mrq->data->stop, 0);
+ else
+ ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
+ }
+
+ if (ret)
+ goto out_free;
+
+ if (cmd_ret) {
+ pr_info("%s: Send Status failed: status %#x, error %d\n",
+ mmc_hostname(test->card->host), status, cmd_ret);
+ }
+
+ ret = mmc_test_check_result(test, mrq);
+ if (ret)
+ goto out_free;
+
+ ret = mmc_test_wait_busy(test);
+ if (ret)
+ goto out_free;
+
+ if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
+ pr_info("%s: %d commands completed during transfer of %u blocks\n",
+ mmc_hostname(test->card->host), count, t->blocks);
+
+ if (cmd_ret)
+ ret = cmd_ret;
+out_free:
+ kfree(rq);
+
+ return ret;
+}
+
+static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
+ unsigned long sz, int use_sbc, int write,
+ int use_areq)
+{
+ struct mmc_test_area *t = &test->area;
+ int ret;
+
+ if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
+ return RESULT_UNSUP_HOST;
+
+ ret = mmc_test_area_map(test, sz, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
+ use_areq);
+ if (ret)
+ return ret;
+
+ return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
+ use_areq);
+}
+
+static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
+ int write, int use_areq)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned long sz;
+ int ret;
+
+ for (sz = 512; sz <= t->max_tfr; sz += 512) {
+ ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
+ use_areq);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Commands during read - no Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_read(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 0, 0, 0);
+}
+
+/*
+ * Commands during write - no Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_write(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 0, 1, 0);
+}
+
+/*
+ * Commands during read - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 1, 0, 0);
+}
+
+/*
+ * Commands during write - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 1, 1, 0);
+}
+
+/*
+ * Commands during non-blocking read - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 1, 0, 1);
+}
+
+/*
+ * Commands during non-blocking write - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 1, 1, 1);
+}
+
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@@ -2605,6 +2871,48 @@ static const struct mmc_test_case mmc_test_cases[] = {
.name = "Reset test",
.run = mmc_test_reset,
},
+
+ {
+ .name = "Commands during read - no Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_read,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Commands during write - no Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_write,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Commands during read - use Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_read_cmd23,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Commands during write - use Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_write_cmd23,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Commands during non-blocking read - use Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_read_cmd23_nonblock,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Commands during non-blocking write - use Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_write_cmd23_nonblock,
+ .cleanup = mmc_test_area_cleanup,
+ },
};
static DEFINE_MUTEX(mmc_test_lock);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 708057261b38..8037f73a109a 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -19,7 +19,9 @@
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
+
#include "queue.h"
+#include "block.h"
#define MMC_QUEUE_BOUNCESZ 65536
@@ -68,7 +70,7 @@ static int mmc_queue_thread(void *d)
bool req_is_special = mmc_req_is_special(req);
set_current_state(TASK_RUNNING);
- mq->issue_fn(mq, req);
+ mmc_blk_issue_rq(mq, req);
cond_resched();
if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index fee5e1271465..3c15a75bae86 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -57,8 +57,6 @@ struct mmc_queue {
unsigned int flags;
#define MMC_QUEUE_SUSPENDED (1 << 0)
#define MMC_QUEUE_NEW_REQUEST (1 << 1)
-
- int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
struct mmc_queue_req mqrq[2];
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index e55cde6d436d..2553d903a82b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -58,6 +58,9 @@
*/
#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
+/* The max erase timeout, used when host->max_busy_timeout isn't specified */
+#define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
+
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
/*
@@ -117,6 +120,24 @@ static inline void mmc_should_fail_request(struct mmc_host *host,
#endif /* CONFIG_FAIL_MMC_REQUEST */
+static inline void mmc_complete_cmd(struct mmc_request *mrq)
+{
+ if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
+ complete_all(&mrq->cmd_completion);
+}
+
+void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (!mrq->cap_cmd_during_tfr)
+ return;
+
+ mmc_complete_cmd(mrq);
+
+ pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
+ mmc_hostname(host), mrq->cmd->opcode);
+}
+EXPORT_SYMBOL(mmc_command_done);
+
/**
* mmc_request_done - finish processing an MMC request
* @host: MMC host which completed request
@@ -143,6 +164,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
cmd->retries = 0;
}
+ if (host->ongoing_mrq == mrq)
+ host->ongoing_mrq = NULL;
+
+ mmc_complete_cmd(mrq);
+
trace_mmc_request_done(host, mrq);
if (err && cmd->retries && !mmc_card_removed(host->card)) {
@@ -155,7 +181,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
} else {
mmc_should_fail_request(host, mrq);
- led_trigger_event(host->led, LED_OFF);
+ if (!host->ongoing_mrq)
+ led_trigger_event(host->led, LED_OFF);
if (mrq->sbc) {
pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
@@ -220,6 +247,15 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
}
}
+ if (mrq->cap_cmd_during_tfr) {
+ host->ongoing_mrq = mrq;
+ /*
+ * Retry path could come through here without having waiting on
+ * cmd_completion, so ensure it is reinitialised.
+ */
+ reinit_completion(&mrq->cmd_completion);
+ }
+
trace_mmc_request_start(host, mrq);
host->ops->request(host, mrq);
@@ -386,6 +422,18 @@ static void mmc_wait_done(struct mmc_request *mrq)
complete(&mrq->completion);
}
+static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
+{
+ struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
+
+ /*
+ * If there is an ongoing transfer, wait for the command line to become
+ * available.
+ */
+ if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
+ wait_for_completion(&ongoing_mrq->cmd_completion);
+}
+
/*
*__mmc_start_data_req() - starts data request
* @host: MMC host to start the request
@@ -393,17 +441,24 @@ static void mmc_wait_done(struct mmc_request *mrq)
*
* Sets the done callback to be called when request is completed by the card.
* Starts data mmc request execution
+ * If an ongoing transfer is already in progress, wait for the command line
+ * to become available before sending another command.
*/
static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
+ mmc_wait_ongoing_tfr_cmd(host);
+
mrq->done = mmc_wait_data_done;
mrq->host = host;
+ init_completion(&mrq->cmd_completion);
+
err = mmc_start_request(host, mrq);
if (err) {
mrq->cmd->error = err;
+ mmc_complete_cmd(mrq);
mmc_wait_data_done(mrq);
}
@@ -414,12 +469,17 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
+ mmc_wait_ongoing_tfr_cmd(host);
+
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
+ init_completion(&mrq->cmd_completion);
+
err = mmc_start_request(host, mrq);
if (err) {
mrq->cmd->error = err;
+ mmc_complete_cmd(mrq);
complete(&mrq->completion);
}
@@ -483,8 +543,7 @@ static int mmc_wait_for_data_req_done(struct mmc_host *host,
return err;
}
-static void mmc_wait_for_req_done(struct mmc_host *host,
- struct mmc_request *mrq)
+void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
{
struct mmc_command *cmd;
@@ -525,6 +584,28 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
mmc_retune_release(host);
}
+EXPORT_SYMBOL(mmc_wait_for_req_done);
+
+/**
+ * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
+ * @host: MMC host
+ * @mrq: MMC request
+ *
+ * mmc_is_req_done() is used with requests that have
+ * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
+ * starting a request and before waiting for it to complete. That is,
+ * either in between calls to mmc_start_req(), or after mmc_wait_for_req()
+ * and before mmc_wait_for_req_done(). If it is called at other times the
+ * result is not meaningful.
+ */
+bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->areq)
+ return host->context_info.is_done_rcv;
+ else
+ return completion_done(&mrq->completion);
+}
+EXPORT_SYMBOL(mmc_is_req_done);
/**
* mmc_pre_req - Prepare for a new request
@@ -645,13 +726,18 @@ EXPORT_SYMBOL(mmc_start_req);
* @mrq: MMC request to start
*
* Start a new MMC custom command request for a host, and wait
- * for the command to complete. Does not attempt to parse the
- * response.
+ * for the command to complete. In the case of 'cap_cmd_during_tfr'
+ * requests, the transfer is ongoing and the caller can issue further
+ * commands that do not use the data lines, and then wait by calling
+ * mmc_wait_for_req_done().
+ * Does not attempt to parse the response.
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
__mmc_start_req(host, mrq);
- mmc_wait_for_req_done(host, mrq);
+
+ if (!mrq->cap_cmd_during_tfr)
+ mmc_wait_for_req_done(host, mrq);
}
EXPORT_SYMBOL(mmc_wait_for_req);
@@ -2202,6 +2288,54 @@ out:
return err;
}
+static unsigned int mmc_align_erase_size(struct mmc_card *card,
+ unsigned int *from,
+ unsigned int *to,
+ unsigned int nr)
+{
+ unsigned int from_new = *from, nr_new = nr, rem;
+
+ /*
+ * When the 'card->erase_size' is power of 2, we can use round_up/down()
+ * to align the erase size efficiently.
+ */
+ if (is_power_of_2(card->erase_size)) {
+ unsigned int temp = from_new;
+
+ from_new = round_up(temp, card->erase_size);
+ rem = from_new - temp;
+
+ if (nr_new > rem)
+ nr_new -= rem;
+ else
+ return 0;
+
+ nr_new = round_down(nr_new, card->erase_size);
+ } else {
+ rem = from_new % card->erase_size;
+ if (rem) {
+ rem = card->erase_size - rem;
+ from_new += rem;
+ if (nr_new > rem)
+ nr_new -= rem;
+ else
+ return 0;
+ }
+
+ rem = nr_new % card->erase_size;
+ if (rem)
+ nr_new -= rem;
+ }
+
+ if (nr_new == 0)
+ return 0;
+
+ *to = from_new + nr_new;
+ *from = from_new;
+
+ return nr_new;
+}
+
/**
* mmc_erase - erase sectors.
* @card: card to erase
@@ -2240,26 +2374,12 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
return -EINVAL;
}
- if (arg == MMC_ERASE_ARG) {
- rem = from % card->erase_size;
- if (rem) {
- rem = card->erase_size - rem;
- from += rem;
- if (nr > rem)
- nr -= rem;
- else
- return 0;
- }
- rem = nr % card->erase_size;
- if (rem)
- nr -= rem;
- }
+ if (arg == MMC_ERASE_ARG)
+ nr = mmc_align_erase_size(card, &from, &to, nr);
if (nr == 0)
return 0;
- to = from + nr;
-
if (to <= from)
return -EINVAL;
@@ -2352,6 +2472,8 @@ static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
struct mmc_host *host = card->host;
unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
unsigned int last_timeout = 0;
+ unsigned int max_busy_timeout = host->max_busy_timeout ?
+ host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
if (card->erase_shift) {
max_qty = UINT_MAX >> card->erase_shift;
@@ -2374,15 +2496,15 @@ static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
* matter what size of 'host->max_busy_timeout', but if the
* 'host->max_busy_timeout' is large enough for more discard sectors,
* then we can continue to increase the max discard sectors until we
- * get a balance value.
+ * get a balance value. In cases when the 'host->max_busy_timeout'
+ * isn't specified, use the default max erase timeout.
*/
do {
y = 0;
for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
timeout = mmc_erase_timeout(card, arg, qty + x);
- if (qty + x > min_qty &&
- timeout > host->max_busy_timeout)
+ if (qty + x > min_qty && timeout > max_busy_timeout)
break;
if (timeout < last_timeout)
@@ -2427,9 +2549,6 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
struct mmc_host *host = card->host;
unsigned int max_discard, max_trim;
- if (!host->max_busy_timeout)
- return UINT_MAX;
-
/*
* Without erase_group_def set, MMC erase timeout depends on clock
* frequence which can change. In that case, the best choice is
@@ -2447,7 +2566,8 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
max_discard = 0;
}
pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
- mmc_hostname(host), max_discard, host->max_busy_timeout);
+ mmc_hostname(host), max_discard, host->max_busy_timeout ?
+ host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
return max_discard;
}
EXPORT_SYMBOL(mmc_calc_max_discard);
@@ -2456,7 +2576,8 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
struct mmc_command cmd = {0};
- if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
+ if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
+ mmc_card_hs400(card) || mmc_card_hs400es(card))
return 0;
cmd.opcode = MMC_SET_BLOCKLEN;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f2d185cf8a8b..3486bc7fbb64 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1029,6 +1029,10 @@ static int mmc_select_hs(struct mmc_card *card)
err = mmc_switch_status(card);
}
+ if (err)
+ pr_warn("%s: switch to high-speed failed, err:%d\n",
+ mmc_hostname(card->host), err);
+
return err;
}
@@ -1265,11 +1269,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
/* Switch card to HS mode */
err = mmc_select_hs(card);
- if (err) {
- pr_err("%s: switch to high-speed failed, err:%d\n",
- mmc_hostname(host), err);
+ if (err)
goto out_err;
- }
err = mmc_switch_status(card);
if (err)
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index 450d907c6e6c..1304160de168 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -16,6 +16,8 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/property.h>
#include <linux/mmc/host.h>
@@ -24,6 +26,7 @@
struct mmc_pwrseq_simple {
struct mmc_pwrseq pwrseq;
bool clk_enabled;
+ u32 post_power_on_delay_ms;
struct clk *ext_clk;
struct gpio_descs *reset_gpios;
};
@@ -64,6 +67,9 @@ static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
+
+ if (pwrseq->post_power_on_delay_ms)
+ msleep(pwrseq->post_power_on_delay_ms);
}
static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
@@ -111,6 +117,9 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
return PTR_ERR(pwrseq->reset_gpios);
}
+ device_property_read_u32(dev, "post-power-on-delay-ms",
+ &pwrseq->post_power_on_delay_ms);
+
pwrseq->pwrseq.dev = dev;
pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
pwrseq->pwrseq.owner = THIS_MODULE;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 0123936241b0..73c762a28dfe 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -223,8 +223,7 @@ static int mmc_decode_scr(struct mmc_card *card)
static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
- int err, i;
- u32 *ssr;
+ int i;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
pr_warn("%s: card lacks mandatory SD Status function\n",
@@ -232,33 +231,27 @@ static int mmc_read_ssr(struct mmc_card *card)
return 0;
}
- ssr = kmalloc(64, GFP_KERNEL);
- if (!ssr)
- return -ENOMEM;
-
- err = mmc_app_sd_status(card, ssr);
- if (err) {
+ if (mmc_app_sd_status(card, card->raw_ssr)) {
pr_warn("%s: problem reading SD Status register\n",
mmc_hostname(card->host));
- err = 0;
- goto out;
+ return 0;
}
for (i = 0; i < 16; i++)
- ssr[i] = be32_to_cpu(ssr[i]);
+ card->raw_ssr[i] = be32_to_cpu(card->raw_ssr[i]);
/*
* UNSTUFF_BITS only works with four u32s so we have to offset the
* bitfield positions accordingly.
*/
- au = UNSTUFF_BITS(ssr, 428 - 384, 4);
+ au = UNSTUFF_BITS(card->raw_ssr, 428 - 384, 4);
if (au) {
if (au <= 9 || card->scr.sda_spec3) {
card->ssr.au = sd_au_size[au];
- es = UNSTUFF_BITS(ssr, 408 - 384, 16);
- et = UNSTUFF_BITS(ssr, 402 - 384, 6);
+ es = UNSTUFF_BITS(card->raw_ssr, 408 - 384, 16);
+ et = UNSTUFF_BITS(card->raw_ssr, 402 - 384, 6);
if (es && et) {
- eo = UNSTUFF_BITS(ssr, 400 - 384, 2);
+ eo = UNSTUFF_BITS(card->raw_ssr, 400 - 384, 2);
card->ssr.erase_timeout = (et * 1000) / es;
card->ssr.erase_offset = eo * 1000;
}
@@ -267,9 +260,8 @@ static int mmc_read_ssr(struct mmc_card *card)
mmc_hostname(card->host));
}
}
-out:
- kfree(ssr);
- return err;
+
+ return 0;
}
/*
@@ -666,6 +658,14 @@ MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
card->raw_csd[2], card->raw_csd[3]);
MMC_DEV_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]);
+MMC_DEV_ATTR(ssr,
+ "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ card->raw_ssr[0], card->raw_ssr[1], card->raw_ssr[2],
+ card->raw_ssr[3], card->raw_ssr[4], card->raw_ssr[5],
+ card->raw_ssr[6], card->raw_ssr[7], card->raw_ssr[8],
+ card->raw_ssr[9], card->raw_ssr[10], card->raw_ssr[11],
+ card->raw_ssr[12], card->raw_ssr[13], card->raw_ssr[14],
+ card->raw_ssr[15]);
MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
@@ -698,6 +698,7 @@ static struct attribute *sd_std_attrs[] = {
&dev_attr_cid.attr,
&dev_attr_csd.attr,
&dev_attr_scr.attr,
+ &dev_attr_ssr.attr,
&dev_attr_date.attr,
&dev_attr_erase_size.attr,
&dev_attr_preferred_erase_size.attr,
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 78cb4d5d9d58..406e5f037e32 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -26,8 +26,8 @@
*/
void sdio_claim_host(struct sdio_func *func)
{
- BUG_ON(!func);
- BUG_ON(!func->card);
+ if (WARN_ON(!func))
+ return;
mmc_claim_host(func->card->host);
}
@@ -42,8 +42,8 @@ EXPORT_SYMBOL_GPL(sdio_claim_host);
*/
void sdio_release_host(struct sdio_func *func)
{
- BUG_ON(!func);
- BUG_ON(!func->card);
+ if (WARN_ON(!func))
+ return;
mmc_release_host(func->card->host);
}
@@ -62,8 +62,8 @@ int sdio_enable_func(struct sdio_func *func)
unsigned char reg;
unsigned long timeout;
- BUG_ON(!func);
- BUG_ON(!func->card);
+ if (!func)
+ return -EINVAL;
pr_debug("SDIO: Enabling device %s...\n", sdio_func_id(func));
@@ -112,8 +112,8 @@ int sdio_disable_func(struct sdio_func *func)
int ret;
unsigned char reg;
- BUG_ON(!func);
- BUG_ON(!func->card);
+ if (!func)
+ return -EINVAL;
pr_debug("SDIO: Disabling device %s...\n", sdio_func_id(func));
@@ -307,6 +307,9 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
unsigned max_blocks;
int ret;
+ if (!func || (func->num > 7))
+ return -EINVAL;
+
/* Do the bulk of the transfer using block mode (if supported). */
if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) {
/* Blocks per command is limited by host count, host transfer
@@ -367,7 +370,10 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
int ret;
u8 val;
- BUG_ON(!func);
+ if (!func) {
+ *err_ret = -EINVAL;
+ return 0xFF;
+ }
if (err_ret)
*err_ret = 0;
@@ -398,7 +404,10 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
{
int ret;
- BUG_ON(!func);
+ if (!func) {
+ *err_ret = -EINVAL;
+ return;
+ }
ret = mmc_io_rw_direct(func->card, 1, func->num, addr, b, NULL);
if (err_ret)
@@ -623,7 +632,10 @@ unsigned char sdio_f0_readb(struct sdio_func *func, unsigned int addr,
int ret;
unsigned char val;
- BUG_ON(!func);
+ if (!func) {
+ *err_ret = -EINVAL;
+ return 0xFF;
+ }
if (err_ret)
*err_ret = 0;
@@ -658,7 +670,10 @@ void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr,
{
int ret;
- BUG_ON(!func);
+ if (!func) {
+ *err_ret = -EINVAL;
+ return;
+ }
if ((addr < 0xF0 || addr > 0xFF) && (!mmc_card_lenient_fn0(func->card))) {
if (err_ret)
@@ -684,8 +699,8 @@ EXPORT_SYMBOL_GPL(sdio_f0_writeb);
*/
mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func)
{
- BUG_ON(!func);
- BUG_ON(!func->card);
+ if (!func)
+ return 0;
return func->card->host->pm_caps;
}
@@ -707,8 +722,8 @@ int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags)
{
struct mmc_host *host;
- BUG_ON(!func);
- BUG_ON(!func->card);
+ if (!func)
+ return -EINVAL;
host = func->card->host;
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index 34f6e8015306..90fe5545c677 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -24,8 +24,6 @@ int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
struct mmc_command cmd = {0};
int i, err = 0;
- BUG_ON(!host);
-
cmd.opcode = SD_IO_SEND_OP_COND;
cmd.arg = ocr;
cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR;
@@ -71,8 +69,8 @@ static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
struct mmc_command cmd = {0};
int err;
- BUG_ON(!host);
- BUG_ON(fn > 7);
+ if (fn > 7)
+ return -EINVAL;
/* sanity check */
if (addr & ~0x1FFFF)
@@ -114,7 +112,6 @@ static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
unsigned addr, u8 in, u8 *out)
{
- BUG_ON(!card);
return mmc_io_rw_direct_host(card->host, write, fn, addr, in, out);
}
@@ -129,8 +126,6 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned int nents, left_size, i;
unsigned int seg_size = card->host->max_seg_size;
- BUG_ON(!card);
- BUG_ON(fn > 7);
WARN_ON(blksz == 0);
/* sanity check */
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index a56373c75983..8fa478c3b0db 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1216,9 +1216,11 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (!r || irq == NO_IRQ)
+ if (!r)
return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
mem_size = resource_size(r);
mem = devm_request_mem_region(&pdev->dev, r->start, mem_size,
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index da0ef1765735..7ab3d749b5ae 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -225,8 +225,12 @@ static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
* Not supported to configure register
* related to HS400
*/
- if (priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420)
+ if (priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420) {
+ if (timing == MMC_TIMING_MMC_HS400)
+ dev_warn(host->dev,
+ "cannot configure HS400, unsupported chipset\n");
return;
+ }
dqs = priv->saved_dqs_en;
strobe = priv->saved_strobe_ctrl;
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 8e9d886bfcda..624789496dce 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -131,11 +131,17 @@ static void dw_mci_hi6220_set_ios(struct dw_mci *host, struct mmc_ios *ios)
host->bus_hz = clk_get_rate(host->biu_clk);
}
+static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
+{
+ return 0;
+}
+
static const struct dw_mci_drv_data hi6220_data = {
.caps = dw_mci_hi6220_caps,
.switch_voltage = dw_mci_hi6220_switch_voltage,
.set_ios = dw_mci_hi6220_set_ios,
.parse_dt = dw_mci_hi6220_parse_dt,
+ .execute_tuning = dw_mci_hi6220_execute_tuning,
};
static const struct of_device_id dw_mci_k3_match[] = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 767af2026f8b..4fcbc4012ed0 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -61,6 +61,8 @@
SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
SDMMC_IDMAC_INT_TI)
+#define DESC_RING_BUF_SZ PAGE_SIZE
+
struct idmac_desc_64addr {
u32 des0; /* Control Descriptor */
@@ -467,136 +469,6 @@ static void dw_mci_dmac_complete_dma(void *arg)
}
}
-static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
- unsigned int sg_len)
-{
- unsigned int desc_len;
- int i;
-
- if (host->dma_64bit_address == 1) {
- struct idmac_desc_64addr *desc_first, *desc_last, *desc;
-
- desc_first = desc_last = desc = host->sg_cpu;
-
- for (i = 0; i < sg_len; i++) {
- unsigned int length = sg_dma_len(&data->sg[i]);
-
- u64 mem_addr = sg_dma_address(&data->sg[i]);
-
- for ( ; length ; desc++) {
- desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
- length : DW_MCI_DESC_DATA_LENGTH;
-
- length -= desc_len;
-
- /*
- * Set the OWN bit and disable interrupts
- * for this descriptor
- */
- desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
- IDMAC_DES0_CH;
-
- /* Buffer length */
- IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
-
- /* Physical address to DMA to/from */
- desc->des4 = mem_addr & 0xffffffff;
- desc->des5 = mem_addr >> 32;
-
- /* Update physical address for the next desc */
- mem_addr += desc_len;
-
- /* Save pointer to the last descriptor */
- desc_last = desc;
- }
- }
-
- /* Set first descriptor */
- desc_first->des0 |= IDMAC_DES0_FD;
-
- /* Set last descriptor */
- desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
- desc_last->des0 |= IDMAC_DES0_LD;
-
- } else {
- struct idmac_desc *desc_first, *desc_last, *desc;
-
- desc_first = desc_last = desc = host->sg_cpu;
-
- for (i = 0; i < sg_len; i++) {
- unsigned int length = sg_dma_len(&data->sg[i]);
-
- u32 mem_addr = sg_dma_address(&data->sg[i]);
-
- for ( ; length ; desc++) {
- desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
- length : DW_MCI_DESC_DATA_LENGTH;
-
- length -= desc_len;
-
- /*
- * Set the OWN bit and disable interrupts
- * for this descriptor
- */
- desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
- IDMAC_DES0_DIC |
- IDMAC_DES0_CH);
-
- /* Buffer length */
- IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
-
- /* Physical address to DMA to/from */
- desc->des2 = cpu_to_le32(mem_addr);
-
- /* Update physical address for the next desc */
- mem_addr += desc_len;
-
- /* Save pointer to the last descriptor */
- desc_last = desc;
- }
- }
-
- /* Set first descriptor */
- desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
-
- /* Set last descriptor */
- desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
- IDMAC_DES0_DIC));
- desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
- }
-
- wmb(); /* drain writebuffer */
-}
-
-static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
-{
- u32 temp;
-
- dw_mci_translate_sglist(host, host->data, sg_len);
-
- /* Make sure to reset DMA in case we did PIO before this */
- dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
- dw_mci_idmac_reset(host);
-
- /* Select IDMAC interface */
- temp = mci_readl(host, CTRL);
- temp |= SDMMC_CTRL_USE_IDMAC;
- mci_writel(host, CTRL, temp);
-
- /* drain writebuffer */
- wmb();
-
- /* Enable the IDMAC */
- temp = mci_readl(host, BMOD);
- temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
- mci_writel(host, BMOD, temp);
-
- /* Start it running */
- mci_writel(host, PLDMND, 1);
-
- return 0;
-}
-
static int dw_mci_idmac_init(struct dw_mci *host)
{
int i;
@@ -604,7 +476,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
if (host->dma_64bit_address == 1) {
struct idmac_desc_64addr *p;
/* Number of descriptors in the ring buffer */
- host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
+ host->ring_size =
+ DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
/* Forward link the descriptor list */
for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
@@ -630,7 +503,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
} else {
struct idmac_desc *p;
/* Number of descriptors in the ring buffer */
- host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+ host->ring_size =
+ DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
/* Forward link the descriptor list */
for (i = 0, p = host->sg_cpu;
@@ -671,6 +545,195 @@ static int dw_mci_idmac_init(struct dw_mci *host)
return 0;
}
+static inline int dw_mci_prepare_desc64(struct dw_mci *host,
+ struct mmc_data *data,
+ unsigned int sg_len)
+{
+ unsigned int desc_len;
+ struct idmac_desc_64addr *desc_first, *desc_last, *desc;
+ unsigned long timeout;
+ int i;
+
+ desc_first = desc_last = desc = host->sg_cpu;
+
+ for (i = 0; i < sg_len; i++) {
+ unsigned int length = sg_dma_len(&data->sg[i]);
+
+ u64 mem_addr = sg_dma_address(&data->sg[i]);
+
+ for ( ; length ; desc++) {
+ desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
+ length : DW_MCI_DESC_DATA_LENGTH;
+
+ length -= desc_len;
+
+ /*
+ * Wait for the former clear OWN bit operation
+ * of IDMAC to make sure that this descriptor
+ * isn't still owned by IDMAC as IDMAC's write
+ * ops and CPU's read ops are asynchronous.
+ */
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (readl(&desc->des0) & IDMAC_DES0_OWN) {
+ if (time_after(jiffies, timeout))
+ goto err_own_bit;
+ udelay(10);
+ }
+
+ /*
+ * Set the OWN bit and disable interrupts
+ * for this descriptor
+ */
+ desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
+ IDMAC_DES0_CH;
+
+ /* Buffer length */
+ IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
+
+ /* Physical address to DMA to/from */
+ desc->des4 = mem_addr & 0xffffffff;
+ desc->des5 = mem_addr >> 32;
+
+ /* Update physical address for the next desc */
+ mem_addr += desc_len;
+
+ /* Save pointer to the last descriptor */
+ desc_last = desc;
+ }
+ }
+
+ /* Set first descriptor */
+ desc_first->des0 |= IDMAC_DES0_FD;
+
+ /* Set last descriptor */
+ desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+ desc_last->des0 |= IDMAC_DES0_LD;
+
+ return 0;
+err_own_bit:
+ /* restore the descriptor chain as it's polluted */
+ dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
+ memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
+ dw_mci_idmac_init(host);
+ return -EINVAL;
+}
+
+
+static inline int dw_mci_prepare_desc32(struct dw_mci *host,
+ struct mmc_data *data,
+ unsigned int sg_len)
+{
+ unsigned int desc_len;
+ struct idmac_desc *desc_first, *desc_last, *desc;
+ unsigned long timeout;
+ int i;
+
+ desc_first = desc_last = desc = host->sg_cpu;
+
+ for (i = 0; i < sg_len; i++) {
+ unsigned int length = sg_dma_len(&data->sg[i]);
+
+ u32 mem_addr = sg_dma_address(&data->sg[i]);
+
+ for ( ; length ; desc++) {
+ desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
+ length : DW_MCI_DESC_DATA_LENGTH;
+
+ length -= desc_len;
+
+ /*
+ * Wait for the former clear OWN bit operation
+ * of IDMAC to make sure that this descriptor
+ * isn't still owned by IDMAC as IDMAC's write
+ * ops and CPU's read ops are asynchronous.
+ */
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (readl(&desc->des0) &
+ cpu_to_le32(IDMAC_DES0_OWN)) {
+ if (time_after(jiffies, timeout))
+ goto err_own_bit;
+ udelay(10);
+ }
+
+ /*
+ * Set the OWN bit and disable interrupts
+ * for this descriptor
+ */
+ desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
+ IDMAC_DES0_DIC |
+ IDMAC_DES0_CH);
+
+ /* Buffer length */
+ IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
+
+ /* Physical address to DMA to/from */
+ desc->des2 = cpu_to_le32(mem_addr);
+
+ /* Update physical address for the next desc */
+ mem_addr += desc_len;
+
+ /* Save pointer to the last descriptor */
+ desc_last = desc;
+ }
+ }
+
+ /* Set first descriptor */
+ desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
+
+ /* Set last descriptor */
+ desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
+ IDMAC_DES0_DIC));
+ desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
+
+ return 0;
+err_own_bit:
+ /* restore the descriptor chain as it's polluted */
+ dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
+ memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
+ dw_mci_idmac_init(host);
+ return -EINVAL;
+}
+
+static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
+{
+ u32 temp;
+ int ret;
+
+ if (host->dma_64bit_address == 1)
+ ret = dw_mci_prepare_desc64(host, host->data, sg_len);
+ else
+ ret = dw_mci_prepare_desc32(host, host->data, sg_len);
+
+ if (ret)
+ goto out;
+
+ /* drain writebuffer */
+ wmb();
+
+ /* Make sure to reset DMA in case we did PIO before this */
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
+ dw_mci_idmac_reset(host);
+
+ /* Select IDMAC interface */
+ temp = mci_readl(host, CTRL);
+ temp |= SDMMC_CTRL_USE_IDMAC;
+ mci_writel(host, CTRL, temp);
+
+ /* drain writebuffer */
+ wmb();
+
+ /* Enable the IDMAC */
+ temp = mci_readl(host, BMOD);
+ temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
+ mci_writel(host, BMOD, temp);
+
+ /* Start it running */
+ mci_writel(host, PLDMND, 1);
+
+out:
+ return ret;
+}
+
static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
.init = dw_mci_idmac_init,
.start = dw_mci_idmac_start_dma,
@@ -876,11 +939,8 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
* MSIZE is '1',
* if blksz is not a multiple of the FIFO width
*/
- if (blksz % fifo_width) {
- msize = 0;
- rx_wmark = 1;
+ if (blksz % fifo_width)
goto done;
- }
do {
if (!((blksz_depth % mszs[idx]) ||
@@ -998,8 +1058,10 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
spin_unlock_irqrestore(&host->irq_lock, irqflags);
if (host->dma_ops->start(host, sg_len)) {
- /* We can't do DMA */
- dev_err(host->dev, "%s: failed to start DMA.\n", __func__);
+ /* We can't do DMA, try PIO for this one */
+ dev_dbg(host->dev,
+ "%s: fall back to PIO mode for current transfer\n",
+ __func__);
return -ENODEV;
}
@@ -1695,11 +1757,11 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
data->error = -ETIMEDOUT;
} else if (host->dir_status ==
DW_MCI_RECV_STATUS) {
- data->error = -EIO;
+ data->error = -EILSEQ;
}
} else {
/* SDMMC_INT_SBE is included */
- data->error = -EIO;
+ data->error = -EILSEQ;
}
dev_dbg(host->dev, "data error, status 0x%08x\n", status);
@@ -2527,47 +2589,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_OF
-/* given a slot, find out the device node representing that slot */
-static struct device_node *dw_mci_of_find_slot_node(struct dw_mci_slot *slot)
-{
- struct device *dev = slot->mmc->parent;
- struct device_node *np;
- const __be32 *addr;
- int len;
-
- if (!dev || !dev->of_node)
- return NULL;
-
- for_each_child_of_node(dev->of_node, np) {
- addr = of_get_property(np, "reg", &len);
- if (!addr || (len < sizeof(int)))
- continue;
- if (be32_to_cpup(addr) == slot->id)
- return np;
- }
- return NULL;
-}
-
-static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
-{
- struct device_node *np = dw_mci_of_find_slot_node(slot);
-
- if (!np)
- return;
-
- if (of_property_read_bool(np, "disable-wp")) {
- slot->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
- dev_warn(slot->mmc->parent,
- "Slot quirk 'disable-wp' is deprecated\n");
- }
-}
-#else /* CONFIG_OF */
-static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
-{
-}
-#endif /* CONFIG_OF */
-
static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
{
struct mmc_host *mmc;
@@ -2630,8 +2651,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (host->pdata->caps2)
mmc->caps2 = host->pdata->caps2;
- dw_mci_slot_of_parse(slot);
-
ret = mmc_of_parse(mmc);
if (ret)
goto err_host_allocated;
@@ -2736,7 +2755,8 @@ static void dw_mci_init_dma(struct dw_mci *host)
}
/* Alloc memory for sg translation */
- host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
+ host->sg_cpu = dmam_alloc_coherent(host->dev,
+ DESC_RING_BUF_SZ,
&host->sg_dma, GFP_KERNEL);
if (!host->sg_cpu) {
dev_err(host->dev,
@@ -2919,6 +2939,13 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
if (!pdata)
return ERR_PTR(-ENOMEM);
+ /* find reset controller when exist */
+ pdata->rstc = devm_reset_control_get_optional(dev, NULL);
+ if (IS_ERR(pdata->rstc)) {
+ if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
/* find out number of slots supported */
of_property_read_u32(np, "num-slots", &pdata->num_slots);
@@ -2937,11 +2964,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
return ERR_PTR(ret);
}
- if (of_find_property(np, "supports-highspeed", NULL)) {
- dev_info(dev, "supports-highspeed property is deprecated.\n");
- pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
- }
-
return pdata;
}
@@ -2990,7 +3012,9 @@ int dw_mci_probe(struct dw_mci *host)
if (!host->pdata) {
host->pdata = dw_mci_parse_dt(host);
- if (IS_ERR(host->pdata)) {
+ if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (IS_ERR(host->pdata)) {
dev_err(host->dev, "platform data not available\n");
return -EINVAL;
}
@@ -3044,6 +3068,12 @@ int dw_mci_probe(struct dw_mci *host)
}
}
+ if (!IS_ERR(host->pdata->rstc)) {
+ reset_control_assert(host->pdata->rstc);
+ usleep_range(10, 50);
+ reset_control_deassert(host->pdata->rstc);
+ }
+
setup_timer(&host->cmd11_timer,
dw_mci_cmd11_timer, (unsigned long)host);
@@ -3193,13 +3223,14 @@ err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
+ if (!IS_ERR(host->pdata->rstc))
+ reset_control_assert(host->pdata->rstc);
+
err_clk_ciu:
- if (!IS_ERR(host->ciu_clk))
- clk_disable_unprepare(host->ciu_clk);
+ clk_disable_unprepare(host->ciu_clk);
err_clk_biu:
- if (!IS_ERR(host->biu_clk))
- clk_disable_unprepare(host->biu_clk);
+ clk_disable_unprepare(host->biu_clk);
return ret;
}
@@ -3225,11 +3256,11 @@ void dw_mci_remove(struct dw_mci *host)
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
- if (!IS_ERR(host->ciu_clk))
- clk_disable_unprepare(host->ciu_clk);
+ if (!IS_ERR(host->pdata->rstc))
+ reset_control_assert(host->pdata->rstc);
- if (!IS_ERR(host->biu_clk))
- clk_disable_unprepare(host->biu_clk);
+ clk_disable_unprepare(host->ciu_clk);
+ clk_disable_unprepare(host->biu_clk);
}
EXPORT_SYMBOL(dw_mci_remove);
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 79905ce895ad..bbad309679cf 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -257,7 +257,7 @@ static void moxart_dma_complete(void *param)
static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
{
u32 len, dir_data, dir_slave;
- unsigned long dma_time;
+ long dma_time;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *dma_chan;
@@ -397,7 +397,8 @@ static void moxart_prepare_data(struct moxart_host *host)
static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct moxart_host *host = mmc_priv(mmc);
- unsigned long pio_time, flags;
+ long pio_time;
+ unsigned long flags;
u32 status;
spin_lock_irqsave(&host->lock, flags);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 396c9b7e4121..3ccaa1415f33 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -126,7 +126,7 @@ static int sd_response_type(struct mmc_command *cmd)
return SD_RSP_TYPE_R0;
case MMC_RSP_R1:
return SD_RSP_TYPE_R1;
- case MMC_RSP_R1 & ~MMC_RSP_CRC:
+ case MMC_RSP_R1_NO_CRC:
return SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
case MMC_RSP_R1B:
return SD_RSP_TYPE_R1b;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 6c71fc9f76c7..4106295527b9 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -324,7 +324,7 @@ static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host,
case MMC_RSP_R1:
rsp_type = SD_RSP_TYPE_R1;
break;
- case MMC_RSP_R1 & ~MMC_RSP_CRC:
+ case MMC_RSP_R1_NO_CRC:
rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
break;
case MMC_RSP_R1B:
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 8fe0756c8e1e..81d4dc034793 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -275,7 +275,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.chip = &sdhci_acpi_chip_int,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
- MMC_CAP_WAIT_WHILE_BUSY,
+ MMC_CAP_CMD_DURING_TFR | MMC_CAP_WAIT_WHILE_BUSY,
.caps2 = MMC_CAP2_HC_ERASE_SZ,
.flags = SDHCI_ACPI_RUNTIME_PM,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index e5c634bdfdd9..51dd2fd65000 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -253,12 +253,14 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
goto err_pltfm_free;
}
- if (clk_set_rate(pltfm_priv->clk, host->mmc->f_max) != 0) {
+ ret = clk_set_rate(pltfm_priv->clk, host->mmc->f_max);
+ if (ret) {
dev_err(dev, "Failed to set rate core clock\n");
goto err_pltfm_free;
}
- if (clk_prepare_enable(pltfm_priv->clk) != 0) {
+ ret = clk_prepare_enable(pltfm_priv->clk);
+ if (ret) {
dev_err(dev, "Failed to enable core clock\n");
goto err_pltfm_free;
}
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index cce10fe3e19e..159f6f64c68e 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -98,6 +98,8 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
* properties through mmc_of_parse().
*/
host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci"))
+ host->caps &= ~SDHCI_CAN_64BIT;
host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50);
@@ -121,6 +123,7 @@ err_clk:
static const struct of_device_id sdhci_brcm_of_match[] = {
{ .compatible = "brcm,bcm7425-sdhci" },
+ { .compatible = "brcm,bcm7445-sdhci" },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
@@ -128,7 +131,6 @@ MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
static struct platform_driver sdhci_brcmstb_driver = {
.driver = {
.name = "sdhci-brcmstb",
- .owner = THIS_MODULE,
.pm = &sdhci_brcmstb_pmops,
.of_match_table = of_match_ptr(sdhci_brcm_of_match),
},
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 99e0b334f9df..1f54fd8755c8 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -31,6 +31,7 @@
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
+#define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f
#define ESDHC_CTRL_D3CD 0x08
#define ESDHC_BURST_LEN_EN_INCR (1 << 27)
/* VENDOR SPEC register */
@@ -928,7 +929,8 @@ static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
- return esdhc_is_usdhc(imx_data) ? 1 << 28 : 1 << 27;
+ /* Doc Errata: the uSDHC actual maximum timeout count is 1 << 29 */
+ return esdhc_is_usdhc(imx_data) ? 1 << 29 : 1 << 27;
}
static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
@@ -937,7 +939,8 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
/* use maximum timeout counter */
- sdhci_writeb(host, esdhc_is_usdhc(imx_data) ? 0xF : 0xE,
+ esdhc_clrset_le(host, ESDHC_SYS_CTRL_DTOCV_MASK,
+ esdhc_is_usdhc(imx_data) ? 0xF : 0xE,
SDHCI_TIMEOUT_CONTROL);
}
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index e0f193f7e3e5..da8e40af6f85 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -26,6 +26,7 @@
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include "sdhci-pltfm.h"
+#include <linux/of.h>
#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
@@ -35,6 +36,8 @@
#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
#define CLK_CTRL_TIMEOUT_MIN_EXP 13
+#define PHY_CLK_TOO_SLOW_HZ 400000
+
/*
* On some SoCs the syscon area has a feature where the upper 16-bits of
* each 32-bit register act as a write mask for the lower 16-bits. This allows
@@ -65,10 +68,12 @@ struct sdhci_arasan_soc_ctl_field {
* accessible via the syscon API.
*
* @baseclkfreq: Where to find corecfg_baseclkfreq
+ * @clockmultiplier: Where to find corecfg_clockmultiplier
* @hiword_update: If true, use HIWORD_UPDATE to access the syscon
*/
struct sdhci_arasan_soc_ctl_map {
struct sdhci_arasan_soc_ctl_field baseclkfreq;
+ struct sdhci_arasan_soc_ctl_field clockmultiplier;
bool hiword_update;
};
@@ -77,6 +82,7 @@ struct sdhci_arasan_soc_ctl_map {
* @host: Pointer to the main SDHCI host structure.
* @clk_ahb: Pointer to the AHB clock
* @phy: Pointer to the generic phy
+ * @is_phy_on: True if the PHY is on; false if not.
* @sdcardclk_hw: Struct for the clock we might provide to a PHY.
* @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
* @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers.
@@ -86,16 +92,22 @@ struct sdhci_arasan_data {
struct sdhci_host *host;
struct clk *clk_ahb;
struct phy *phy;
+ bool is_phy_on;
struct clk_hw sdcardclk_hw;
struct clk *sdcardclk;
struct regmap *soc_ctl_base;
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
+ unsigned int quirks; /* Arasan deviations from spec */
+
+/* Controller does not have CD wired and will not function normally without */
+#define SDHCI_ARASAN_QUIRK_FORCE_CDTEST BIT(0)
};
static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
.baseclkfreq = { .reg = 0xf000, .width = 8, .shift = 8 },
+ .clockmultiplier = { .reg = 0xf02c, .width = 8, .shift = 0},
.hiword_update = true,
};
@@ -170,13 +182,47 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
bool ctrl_phy = false;
- if (clock > MMC_HIGH_52_MAX_DTR && (!IS_ERR(sdhci_arasan->phy)))
- ctrl_phy = true;
+ if (!IS_ERR(sdhci_arasan->phy)) {
+ if (!sdhci_arasan->is_phy_on && clock <= PHY_CLK_TOO_SLOW_HZ) {
+ /*
+ * If PHY off, set clock to max speed and power PHY on.
+ *
+ * Although PHY docs apparently suggest power cycling
+ * when changing the clock the PHY doesn't like to be
+ * powered on while at low speeds like those used in ID
+ * mode. Even worse is powering the PHY on while the
+ * clock is off.
+ *
+ * To workaround the PHY limitations, the best we can
+ * do is to power it on at a faster speed and then slam
+ * through low speeds without power cycling.
+ */
+ sdhci_set_clock(host, host->max_clk);
+ spin_unlock_irq(&host->lock);
+ phy_power_on(sdhci_arasan->phy);
+ spin_lock_irq(&host->lock);
+ sdhci_arasan->is_phy_on = true;
+
+ /*
+ * We'll now fall through to the below case with
+ * ctrl_phy = false (so we won't turn off/on). The
+ * sdhci_set_clock() will set the real clock.
+ */
+ } else if (clock > PHY_CLK_TOO_SLOW_HZ) {
+ /*
+ * At higher clock speeds the PHY is fine being power
+ * cycled and docs say you _should_ power cycle when
+ * changing clock speeds.
+ */
+ ctrl_phy = true;
+ }
+ }
- if (ctrl_phy) {
+ if (ctrl_phy && sdhci_arasan->is_phy_on) {
spin_unlock_irq(&host->lock);
phy_power_off(sdhci_arasan->phy);
spin_lock_irq(&host->lock);
+ sdhci_arasan->is_phy_on = false;
}
sdhci_set_clock(host, clock);
@@ -185,6 +231,7 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
spin_unlock_irq(&host->lock);
phy_power_on(sdhci_arasan->phy);
spin_lock_irq(&host->lock);
+ sdhci_arasan->is_phy_on = true;
}
}
@@ -203,12 +250,27 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc,
writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
}
+void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
+{
+ u8 ctrl;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_reset(host, mask);
+
+ if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) {
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl |= SDHCI_CTRL_CDTEST_INS | SDHCI_CTRL_CDTEST_EN;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ }
+}
+
static struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_arasan_get_timeout_clock,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
@@ -239,13 +301,14 @@ static int sdhci_arasan_suspend(struct device *dev)
if (ret)
return ret;
- if (!IS_ERR(sdhci_arasan->phy)) {
+ if (!IS_ERR(sdhci_arasan->phy) && sdhci_arasan->is_phy_on) {
ret = phy_power_off(sdhci_arasan->phy);
if (ret) {
dev_err(dev, "Cannot power off phy.\n");
sdhci_resume_host(host);
return ret;
}
+ sdhci_arasan->is_phy_on = false;
}
clk_disable(pltfm_host->clk);
@@ -281,12 +344,13 @@ static int sdhci_arasan_resume(struct device *dev)
return ret;
}
- if (!IS_ERR(sdhci_arasan->phy)) {
+ if (!IS_ERR(sdhci_arasan->phy) && host->mmc->actual_clock) {
ret = phy_power_on(sdhci_arasan->phy);
if (ret) {
dev_err(dev, "Cannot power on phy.\n");
return ret;
}
+ sdhci_arasan->is_phy_on = true;
}
return sdhci_resume_host(host);
@@ -338,6 +402,45 @@ static const struct clk_ops arasan_sdcardclk_ops = {
};
/**
+ * sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier
+ *
+ * The corecfg_clockmultiplier is supposed to contain clock multiplier
+ * value of programmable clock generator.
+ *
+ * NOTES:
+ * - Many existing devices don't seem to do this and work fine. To keep
+ * compatibility for old hardware where the device tree doesn't provide a
+ * register map, this function is a noop if a soc_ctl_map hasn't been provided
+ * for this platform.
+ * - The value of corecfg_clockmultiplier should sync with that of corresponding
+ * value reading from sdhci_capability_register. So this function is called
+ * once at probe time and never called again.
+ *
+ * @host: The sdhci_host
+ */
+static void sdhci_arasan_update_clockmultiplier(struct sdhci_host *host,
+ u32 value)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_arasan_soc_ctl_map *soc_ctl_map =
+ sdhci_arasan->soc_ctl_map;
+
+ /* Having a map is optional */
+ if (!soc_ctl_map)
+ return;
+
+ /* If we have a map, we expect to have a syscon */
+ if (!sdhci_arasan->soc_ctl_base) {
+ pr_warn("%s: Have regmap, but no soc-ctl-syscon\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+
+ sdhci_arasan_syscon_write(host, &soc_ctl_map->clockmultiplier, value);
+}
+
+/**
* sdhci_arasan_update_baseclkfreq - Set corecfg_baseclkfreq
*
* The corecfg_baseclkfreq is supposed to contain the MHz of clk_xin. This
@@ -462,6 +565,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
+ struct device_node *np = pdev->dev.of_node;
host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata,
sizeof(*sdhci_arasan));
@@ -516,8 +620,16 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
}
sdhci_get_of_property(pdev);
+
+ if (of_property_read_bool(np, "xlnx,fails-without-test-cd"))
+ sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_FORCE_CDTEST;
+
pltfm_host->clk = clk_xin;
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "rockchip,rk3399-sdhci-5.1"))
+ sdhci_arasan_update_clockmultiplier(host, 0x0);
+
sdhci_arasan_update_baseclkfreq(host);
ret = sdhci_arasan_register_sdclk(sdhci_arasan, clk_xin, &pdev->dev);
@@ -547,12 +659,6 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto unreg_clk;
}
- ret = phy_power_on(sdhci_arasan->phy);
- if (ret < 0) {
- dev_err(&pdev->dev, "phy_power_on err.\n");
- goto err_phy_power;
- }
-
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_arasan_hs400_enhanced_strobe;
}
@@ -565,9 +671,6 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
err_add_host:
if (!IS_ERR(sdhci_arasan->phy))
- phy_power_off(sdhci_arasan->phy);
-err_phy_power:
- if (!IS_ERR(sdhci_arasan->phy))
phy_exit(sdhci_arasan->phy);
unreg_clk:
sdhci_arasan_unregister_sdclk(&pdev->dev);
@@ -589,7 +692,8 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
struct clk *clk_ahb = sdhci_arasan->clk_ahb;
if (!IS_ERR(sdhci_arasan->phy)) {
- phy_power_off(sdhci_arasan->phy);
+ if (sdhci_arasan->is_phy_on)
+ phy_power_off(sdhci_arasan->phy);
phy_exit(sdhci_arasan->phy);
}
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 239be2fde242..fb71c866eacc 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -583,7 +583,7 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
np = pdev->dev.of_node;
- if (of_get_property(np, "little-endian", NULL))
+ if (of_property_read_bool(np, "little-endian"))
host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
sizeof(struct sdhci_esdhc));
else
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 897cfd24ca2e..72a1f1f5180a 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -156,7 +156,7 @@ static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
if (!gpio_is_valid(gpio))
return;
- err = gpio_request(gpio, "sd_cd");
+ err = devm_gpio_request(&slot->chip->pdev->dev, gpio, "sd_cd");
if (err < 0)
goto out;
@@ -179,7 +179,7 @@ static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
return;
out_free:
- gpio_free(gpio);
+ devm_gpio_free(&slot->chip->pdev->dev, gpio);
out:
dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n");
}
@@ -188,8 +188,6 @@ static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
{
if (slot->cd_irq >= 0)
free_irq(slot->cd_irq, slot);
- if (gpio_is_valid(slot->cd_gpio))
- gpio_free(slot->cd_gpio);
}
#else
@@ -356,6 +354,7 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
+ MMC_CAP_CMD_DURING_TFR |
MMC_CAP_WAIT_WHILE_BUSY;
slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
slot->hw_reset = sdhci_pci_int_hw_reset;
@@ -421,17 +420,30 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
/* Define Host controllers for Intel Merrifield platform */
#define INTEL_MRFLD_EMMC_0 0
#define INTEL_MRFLD_EMMC_1 1
+#define INTEL_MRFLD_SD 2
+#define INTEL_MRFLD_SDIO 3
static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
{
- if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFLD_EMMC_0) &&
- (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFLD_EMMC_1))
- /* SD support is not ready yet */
+ unsigned int func = PCI_FUNC(slot->chip->pdev->devfn);
+
+ switch (func) {
+ case INTEL_MRFLD_EMMC_0:
+ case INTEL_MRFLD_EMMC_1:
+ slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
+ MMC_CAP_8_BIT_DATA |
+ MMC_CAP_1_8V_DDR;
+ break;
+ case INTEL_MRFLD_SD:
+ slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ break;
+ case INTEL_MRFLD_SDIO:
+ slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
+ MMC_CAP_POWER_OFF_CARD;
+ break;
+ default:
return -ENODEV;
-
- slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
- MMC_CAP_1_8V_DDR;
-
+ }
return 0;
}
@@ -1615,7 +1627,6 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
slot->chip = chip;
slot->host = host;
- slot->pci_bar = bar;
slot->rst_n_gpio = -EINVAL;
slot->cd_gpio = -EINVAL;
slot->cd_idx = -1;
@@ -1643,27 +1654,22 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
host->irq = pdev->irq;
- ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc));
+ ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc));
if (ret) {
dev_err(&pdev->dev, "cannot request region\n");
goto cleanup;
}
- host->ioaddr = pci_ioremap_bar(pdev, bar);
- if (!host->ioaddr) {
- dev_err(&pdev->dev, "failed to remap registers\n");
- ret = -ENOMEM;
- goto release;
- }
+ host->ioaddr = pcim_iomap_table(pdev)[bar];
if (chip->fixes && chip->fixes->probe_slot) {
ret = chip->fixes->probe_slot(slot);
if (ret)
- goto unmap;
+ goto cleanup;
}
if (gpio_is_valid(slot->rst_n_gpio)) {
- if (!gpio_request(slot->rst_n_gpio, "eMMC_reset")) {
+ if (!devm_gpio_request(&pdev->dev, slot->rst_n_gpio, "eMMC_reset")) {
gpio_direction_output(slot->rst_n_gpio, 1);
slot->host->mmc->caps |= MMC_CAP_HW_RESET;
slot->hw_reset = sdhci_pci_gpio_hw_reset;
@@ -1702,18 +1708,9 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
return slot;
remove:
- if (gpio_is_valid(slot->rst_n_gpio))
- gpio_free(slot->rst_n_gpio);
-
if (chip->fixes && chip->fixes->remove_slot)
chip->fixes->remove_slot(slot, 0);
-unmap:
- iounmap(host->ioaddr);
-
-release:
- pci_release_region(pdev, bar);
-
cleanup:
if (slot->data && slot->data->cleanup)
slot->data->cleanup(slot->data);
@@ -1738,17 +1735,12 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
sdhci_remove_host(slot->host, dead);
- if (gpio_is_valid(slot->rst_n_gpio))
- gpio_free(slot->rst_n_gpio);
-
if (slot->chip->fixes && slot->chip->fixes->remove_slot)
slot->chip->fixes->remove_slot(slot, dead);
if (slot->data && slot->data->cleanup)
slot->data->cleanup(slot->data);
- pci_release_region(slot->chip->pdev, slot->pci_bar);
-
sdhci_free_host(slot->host);
}
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 7e0788712e1a..9c7c08b93223 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -72,7 +72,6 @@ struct sdhci_pci_slot {
struct sdhci_host *host;
struct sdhci_pci_data *data;
- int pci_bar;
int rst_n_gpio;
int cd_gpio;
int cd_irq;
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 1d17dcfc3ffb..ad49bfaf5bf8 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -156,13 +156,6 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
host->quirks2 = pdata->quirks2;
}
- /*
- * Some platforms need to probe the controller to be able to
- * determine which caps should be used.
- */
- if (host->ops && host->ops->platform_init)
- host->ops->platform_init(host);
-
platform_set_drvdata(pdev, host);
return host;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 1e93dc4e303e..20b6ff5b4af1 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -391,6 +391,31 @@ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
};
+static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ /*
+ * The TRM states that the SD/MMC controller found on
+ * Tegra124 can address 34 bits (the maximum supported by
+ * the Tegra memory controller), but tests show that DMA
+ * to or from above 4 GiB doesn't work. This is possibly
+ * caused by missing programming, though it's not obvious
+ * what sequence is required. Mark 64-bit DMA broken for
+ * now to fix this for existing users (e.g. Nyan boards).
+ */
+ SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
+ .ops = &tegra114_sdhci_ops,
+};
+
+static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
+ .pdata = &sdhci_tegra124_pdata,
+};
+
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -408,7 +433,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
static const struct of_device_id sdhci_tegra_dt_match[] = {
{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
- { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
+ { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index cd65d474afa2..48055666c655 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -888,7 +888,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
struct mmc_request *mrq)
{
- return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12);
+ return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
+ !mrq->cap_cmd_during_tfr;
}
static void sdhci_set_transfer_mode(struct sdhci_host *host,
@@ -1031,9 +1032,18 @@ static void sdhci_finish_data(struct sdhci_host *host)
sdhci_do_reset(host, SDHCI_RESET_DATA);
}
- /* Avoid triggering warning in sdhci_send_command() */
- host->cmd = NULL;
- sdhci_send_command(host, data->stop);
+ /*
+ * 'cap_cmd_during_tfr' request must not use the command line
+ * after mmc_command_done() has been called. It is upper layer's
+ * responsibility to send the stop command if required.
+ */
+ if (data->mrq->cap_cmd_during_tfr) {
+ sdhci_finish_mrq(host, data->mrq);
+ } else {
+ /* Avoid triggering warning in sdhci_send_command() */
+ host->cmd = NULL;
+ sdhci_send_command(host, data->stop);
+ }
} else {
sdhci_finish_mrq(host, data->mrq);
}
@@ -1165,6 +1175,9 @@ static void sdhci_finish_command(struct sdhci_host *host)
}
}
+ if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
+ mmc_command_done(host->mmc, cmd->mrq);
+
/*
* The host can send and interrupt when the busy state has
* ended, allowing us to wait without wasting CPU cycles.
@@ -2062,7 +2075,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
spin_unlock_irqrestore(&host->lock, flags);
/* Wait for Buffer Read Ready interrupt */
- wait_event_interruptible_timeout(host->buf_ready_int,
+ wait_event_timeout(host->buf_ready_int,
(host->tuning_done == 1),
msecs_to_jiffies(50));
spin_lock_irqsave(&host->lock, flags);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0411c9f36461..c722cd23205c 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -84,6 +84,8 @@
#define SDHCI_CTRL_ADMA32 0x10
#define SDHCI_CTRL_ADMA64 0x18
#define SDHCI_CTRL_8BITBUS 0x20
+#define SDHCI_CTRL_CDTEST_INS 0x40
+#define SDHCI_CTRL_CDTEST_EN 0x80
#define SDHCI_POWER_CONTROL 0x29
#define SDHCI_POWER_ON 0x01
@@ -555,7 +557,6 @@ struct sdhci_ops {
void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
void (*hw_reset)(struct sdhci_host *host);
void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
- void (*platform_init)(struct sdhci_host *host);
void (*card_event)(struct sdhci_host *host);
void (*voltage_switch)(struct sdhci_host *host);
int (*select_drive_strength)(struct sdhci_host *host,
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index c3b651bf89cb..49edff7fee49 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -94,6 +94,7 @@ static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
@@ -213,6 +214,13 @@ static void sh_mobile_sdhi_clk_disable(struct tmio_mmc_host *host)
clk_disable_unprepare(priv->clk);
}
+static int sh_mobile_sdhi_card_busy(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_DAT0);
+}
+
static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -369,7 +377,14 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
host->clk_update = sh_mobile_sdhi_clk_update;
host->clk_disable = sh_mobile_sdhi_clk_disable;
host->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk;
- host->start_signal_voltage_switch = sh_mobile_sdhi_start_signal_voltage_switch;
+
+ /* SDR speeds are only available on Gen2+ */
+ if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
+ /* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
+ host->card_busy = sh_mobile_sdhi_card_busy;
+ host->start_signal_voltage_switch =
+ sh_mobile_sdhi_start_signal_voltage_switch;
+ }
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
if (!host->bus_shift && resource_size(res) > 0x100) /* old way to determine the shift */
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 2ee4c21ec55e..c0a5c676d0e8 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -72,6 +72,13 @@
#define SDXC_REG_CHDA (0x90)
#define SDXC_REG_CBDA (0x94)
+/* New registers introduced in A64 */
+#define SDXC_REG_A12A 0x058 /* SMC Auto Command 12 Register */
+#define SDXC_REG_SD_NTSR 0x05C /* SMC New Timing Set Register */
+#define SDXC_REG_DRV_DL 0x140 /* Drive Delay Control Register */
+#define SDXC_REG_SAMP_DL_REG 0x144 /* SMC sample delay control */
+#define SDXC_REG_DS_DL_REG 0x148 /* SMC data strobe delay control */
+
#define mmc_readl(host, reg) \
readl((host)->reg_base + SDXC_##reg)
#define mmc_writel(host, reg, value) \
@@ -217,21 +224,41 @@
#define SDXC_CLK_50M_DDR 3
#define SDXC_CLK_50M_DDR_8BIT 4
+#define SDXC_2X_TIMING_MODE BIT(31)
+
+#define SDXC_CAL_START BIT(15)
+#define SDXC_CAL_DONE BIT(14)
+#define SDXC_CAL_DL_SHIFT 8
+#define SDXC_CAL_DL_SW_EN BIT(7)
+#define SDXC_CAL_DL_SW_SHIFT 0
+#define SDXC_CAL_DL_MASK 0x3f
+
+#define SDXC_CAL_TIMEOUT 3 /* in seconds, 3s is enough*/
+
struct sunxi_mmc_clk_delay {
u32 output;
u32 sample;
};
struct sunxi_idma_des {
- u32 config;
- u32 buf_size;
- u32 buf_addr_ptr1;
- u32 buf_addr_ptr2;
+ __le32 config;
+ __le32 buf_size;
+ __le32 buf_addr_ptr1;
+ __le32 buf_addr_ptr2;
+};
+
+struct sunxi_mmc_cfg {
+ u32 idma_des_size_bits;
+ const struct sunxi_mmc_clk_delay *clk_delays;
+
+ /* does the IP block support autocalibration? */
+ bool can_calibrate;
};
struct sunxi_mmc_host {
struct mmc_host *mmc;
struct reset_control *reset;
+ const struct sunxi_mmc_cfg *cfg;
/* IO mapping base */
void __iomem *reg_base;
@@ -241,7 +268,6 @@ struct sunxi_mmc_host {
struct clk *clk_mmc;
struct clk *clk_sample;
struct clk *clk_output;
- const struct sunxi_mmc_clk_delay *clk_delays;
/* irq */
spinlock_t lock;
@@ -250,7 +276,6 @@ struct sunxi_mmc_host {
u32 sdio_imask;
/* dma */
- u32 idma_des_size_bits;
dma_addr_t sg_dma;
void *sg_cpu;
bool wait_dma;
@@ -322,25 +347,28 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
{
struct sunxi_idma_des *pdes = (struct sunxi_idma_des *)host->sg_cpu;
dma_addr_t next_desc = host->sg_dma;
- int i, max_len = (1 << host->idma_des_size_bits);
+ int i, max_len = (1 << host->cfg->idma_des_size_bits);
for (i = 0; i < data->sg_len; i++) {
- pdes[i].config = SDXC_IDMAC_DES0_CH | SDXC_IDMAC_DES0_OWN |
- SDXC_IDMAC_DES0_DIC;
+ pdes[i].config = cpu_to_le32(SDXC_IDMAC_DES0_CH |
+ SDXC_IDMAC_DES0_OWN |
+ SDXC_IDMAC_DES0_DIC);
if (data->sg[i].length == max_len)
pdes[i].buf_size = 0; /* 0 == max_len */
else
- pdes[i].buf_size = data->sg[i].length;
+ pdes[i].buf_size = cpu_to_le32(data->sg[i].length);
next_desc += sizeof(struct sunxi_idma_des);
- pdes[i].buf_addr_ptr1 = sg_dma_address(&data->sg[i]);
- pdes[i].buf_addr_ptr2 = (u32)next_desc;
+ pdes[i].buf_addr_ptr1 =
+ cpu_to_le32(sg_dma_address(&data->sg[i]));
+ pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc);
}
- pdes[0].config |= SDXC_IDMAC_DES0_FD;
- pdes[i - 1].config |= SDXC_IDMAC_DES0_LD | SDXC_IDMAC_DES0_ER;
- pdes[i - 1].config &= ~SDXC_IDMAC_DES0_DIC;
+ pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD);
+ pdes[i - 1].config |= cpu_to_le32(SDXC_IDMAC_DES0_LD |
+ SDXC_IDMAC_DES0_ER);
+ pdes[i - 1].config &= cpu_to_le32(~SDXC_IDMAC_DES0_DIC);
pdes[i - 1].buf_addr_ptr2 = 0;
/*
@@ -653,11 +681,84 @@ static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
return 0;
}
+static int sunxi_mmc_calibrate(struct sunxi_mmc_host *host, int reg_off)
+{
+ u32 reg = readl(host->reg_base + reg_off);
+ u32 delay;
+ unsigned long timeout;
+
+ if (!host->cfg->can_calibrate)
+ return 0;
+
+ reg &= ~(SDXC_CAL_DL_MASK << SDXC_CAL_DL_SW_SHIFT);
+ reg &= ~SDXC_CAL_DL_SW_EN;
+
+ writel(reg | SDXC_CAL_START, host->reg_base + reg_off);
+
+ dev_dbg(mmc_dev(host->mmc), "calibration started\n");
+
+ timeout = jiffies + HZ * SDXC_CAL_TIMEOUT;
+
+ while (!((reg = readl(host->reg_base + reg_off)) & SDXC_CAL_DONE)) {
+ if (time_before(jiffies, timeout))
+ cpu_relax();
+ else {
+ reg &= ~SDXC_CAL_START;
+ writel(reg, host->reg_base + reg_off);
+
+ return -ETIMEDOUT;
+ }
+ }
+
+ delay = (reg >> SDXC_CAL_DL_SHIFT) & SDXC_CAL_DL_MASK;
+
+ reg &= ~SDXC_CAL_START;
+ reg |= (delay << SDXC_CAL_DL_SW_SHIFT) | SDXC_CAL_DL_SW_EN;
+
+ writel(reg, host->reg_base + reg_off);
+
+ dev_dbg(mmc_dev(host->mmc), "calibration ended, reg is 0x%x\n", reg);
+
+ return 0;
+}
+
+static int sunxi_mmc_clk_set_phase(struct sunxi_mmc_host *host,
+ struct mmc_ios *ios, u32 rate)
+{
+ int index;
+
+ if (!host->cfg->clk_delays)
+ return 0;
+
+ /* determine delays */
+ if (rate <= 400000) {
+ index = SDXC_CLK_400K;
+ } else if (rate <= 25000000) {
+ index = SDXC_CLK_25M;
+ } else if (rate <= 52000000) {
+ if (ios->timing != MMC_TIMING_UHS_DDR50 &&
+ ios->timing != MMC_TIMING_MMC_DDR52) {
+ index = SDXC_CLK_50M;
+ } else if (ios->bus_width == MMC_BUS_WIDTH_8) {
+ index = SDXC_CLK_50M_DDR_8BIT;
+ } else {
+ index = SDXC_CLK_50M_DDR;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ clk_set_phase(host->clk_sample, host->cfg->clk_delays[index].sample);
+ clk_set_phase(host->clk_output, host->cfg->clk_delays[index].output);
+
+ return 0;
+}
+
static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
struct mmc_ios *ios)
{
- u32 rate, oclk_dly, rval, sclk_dly;
- u32 clock = ios->clock;
+ long rate;
+ u32 rval, clock = ios->clock;
int ret;
/* 8 bit DDR requires a higher module clock */
@@ -666,13 +767,18 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
clock <<= 1;
rate = clk_round_rate(host->clk_mmc, clock);
- dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %d\n",
+ if (rate < 0) {
+ dev_err(mmc_dev(host->mmc), "error rounding clk to %d: %ld\n",
+ clock, rate);
+ return rate;
+ }
+ dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %ld\n",
clock, rate);
/* setting clock rate */
ret = clk_set_rate(host->clk_mmc, rate);
if (ret) {
- dev_err(mmc_dev(host->mmc), "error setting clk to %d: %d\n",
+ dev_err(mmc_dev(host->mmc), "error setting clk to %ld: %d\n",
rate, ret);
return ret;
}
@@ -692,31 +798,15 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
}
mmc_writel(host, REG_CLKCR, rval);
- /* determine delays */
- if (rate <= 400000) {
- oclk_dly = host->clk_delays[SDXC_CLK_400K].output;
- sclk_dly = host->clk_delays[SDXC_CLK_400K].sample;
- } else if (rate <= 25000000) {
- oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
- sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
- } else if (rate <= 52000000) {
- if (ios->timing != MMC_TIMING_UHS_DDR50 &&
- ios->timing != MMC_TIMING_MMC_DDR52) {
- oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
- sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
- } else if (ios->bus_width == MMC_BUS_WIDTH_8) {
- oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR_8BIT].output;
- sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR_8BIT].sample;
- } else {
- oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
- sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
- }
- } else {
- return -EINVAL;
- }
+ ret = sunxi_mmc_clk_set_phase(host, ios, rate);
+ if (ret)
+ return ret;
+
+ ret = sunxi_mmc_calibrate(host, SDXC_REG_SAMP_DL_REG);
+ if (ret)
+ return ret;
- clk_set_phase(host->clk_sample, sclk_dly);
- clk_set_phase(host->clk_output, oclk_dly);
+ /* TODO: enable calibrate on sdc2 SDXC_REG_DS_DL_REG of A64 */
return sunxi_mmc_oclk_onoff(host, 1);
}
@@ -938,14 +1028,6 @@ static int sunxi_mmc_card_busy(struct mmc_host *mmc)
return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY);
}
-static const struct of_device_id sunxi_mmc_of_match[] = {
- { .compatible = "allwinner,sun4i-a10-mmc", },
- { .compatible = "allwinner,sun5i-a13-mmc", },
- { .compatible = "allwinner,sun9i-a80-mmc", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
-
static struct mmc_host_ops sunxi_mmc_ops = {
.request = sunxi_mmc_request,
.set_ios = sunxi_mmc_set_ios,
@@ -974,21 +1056,54 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
[SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 },
};
+static const struct sunxi_mmc_cfg sun4i_a10_cfg = {
+ .idma_des_size_bits = 13,
+ .clk_delays = NULL,
+ .can_calibrate = false,
+};
+
+static const struct sunxi_mmc_cfg sun5i_a13_cfg = {
+ .idma_des_size_bits = 16,
+ .clk_delays = NULL,
+ .can_calibrate = false,
+};
+
+static const struct sunxi_mmc_cfg sun7i_a20_cfg = {
+ .idma_des_size_bits = 16,
+ .clk_delays = sunxi_mmc_clk_delays,
+ .can_calibrate = false,
+};
+
+static const struct sunxi_mmc_cfg sun9i_a80_cfg = {
+ .idma_des_size_bits = 16,
+ .clk_delays = sun9i_mmc_clk_delays,
+ .can_calibrate = false,
+};
+
+static const struct sunxi_mmc_cfg sun50i_a64_cfg = {
+ .idma_des_size_bits = 16,
+ .clk_delays = NULL,
+ .can_calibrate = true,
+};
+
+static const struct of_device_id sunxi_mmc_of_match[] = {
+ { .compatible = "allwinner,sun4i-a10-mmc", .data = &sun4i_a10_cfg },
+ { .compatible = "allwinner,sun5i-a13-mmc", .data = &sun5i_a13_cfg },
+ { .compatible = "allwinner,sun7i-a20-mmc", .data = &sun7i_a20_cfg },
+ { .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg },
+ { .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
+
static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
int ret;
- if (of_device_is_compatible(np, "allwinner,sun4i-a10-mmc"))
- host->idma_des_size_bits = 13;
- else
- host->idma_des_size_bits = 16;
-
- if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
- host->clk_delays = sun9i_mmc_clk_delays;
- else
- host->clk_delays = sunxi_mmc_clk_delays;
+ host->cfg = of_device_get_match_data(&pdev->dev);
+ if (!host->cfg)
+ return -EINVAL;
ret = mmc_regulator_get_supply(host->mmc);
if (ret) {
@@ -1014,16 +1129,18 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return PTR_ERR(host->clk_mmc);
}
- host->clk_output = devm_clk_get(&pdev->dev, "output");
- if (IS_ERR(host->clk_output)) {
- dev_err(&pdev->dev, "Could not get output clock\n");
- return PTR_ERR(host->clk_output);
- }
+ if (host->cfg->clk_delays) {
+ host->clk_output = devm_clk_get(&pdev->dev, "output");
+ if (IS_ERR(host->clk_output)) {
+ dev_err(&pdev->dev, "Could not get output clock\n");
+ return PTR_ERR(host->clk_output);
+ }
- host->clk_sample = devm_clk_get(&pdev->dev, "sample");
- if (IS_ERR(host->clk_sample)) {
- dev_err(&pdev->dev, "Could not get sample clock\n");
- return PTR_ERR(host->clk_sample);
+ host->clk_sample = devm_clk_get(&pdev->dev, "sample");
+ if (IS_ERR(host->clk_sample)) {
+ dev_err(&pdev->dev, "Could not get sample clock\n");
+ return PTR_ERR(host->clk_sample);
+ }
}
host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
@@ -1120,15 +1237,17 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
mmc->max_blk_count = 8192;
mmc->max_blk_size = 4096;
mmc->max_segs = PAGE_SIZE / sizeof(struct sunxi_idma_des);
- mmc->max_seg_size = (1 << host->idma_des_size_bits);
+ mmc->max_seg_size = (1 << host->cfg->idma_des_size_bits);
mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
/* 400kHz ~ 52MHz */
mmc->f_min = 400000;
mmc->f_max = 52000000;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_1_8V_DDR |
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
+ if (host->cfg->clk_delays)
+ mmc->caps |= MMC_CAP_1_8V_DDR;
+
ret = mmc_of_parse(mmc);
if (ret)
goto error_free_dma;
@@ -1160,6 +1279,8 @@ static int sunxi_mmc_remove(struct platform_device *pdev)
if (!IS_ERR(host->reset))
reset_control_assert(host->reset);
+ clk_disable_unprepare(host->clk_sample);
+ clk_disable_unprepare(host->clk_output);
clk_disable_unprepare(host->clk_mmc);
clk_disable_unprepare(host->clk_ahb);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 7f63ec05bdf4..8e126afd988c 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -79,6 +79,9 @@
#define CLK_CTL_DIV_MASK 0xff
#define CLK_CTL_SCLKEN BIT(8)
+#define CARD_OPT_WIDTH8 BIT(13)
+#define CARD_OPT_WIDTH BIT(15)
+
#define TMIO_BBS 512 /* Boot block size */
/* Definitions for values the CTRL_SDIO_STATUS register can take. */
@@ -158,6 +161,7 @@ struct tmio_mmc_host {
void (*clk_disable)(struct tmio_mmc_host *host);
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
+ int (*card_busy)(struct mmc_host *mmc);
int (*start_signal_voltage_switch)(struct mmc_host *mmc,
struct mmc_ios *ios);
};
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 92467efc4e2c..700567603107 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -336,7 +336,9 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE: c |= RESP_NONE; break;
- case MMC_RSP_R1: c |= RESP_R1; break;
+ case MMC_RSP_R1:
+ case MMC_RSP_R1_NO_CRC:
+ c |= RESP_R1; break;
case MMC_RSP_R1B: c |= RESP_R1B; break;
case MMC_RSP_R2: c |= RESP_R2; break;
case MMC_RSP_R3: c |= RESP_R3; break;
@@ -730,12 +732,13 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
data->blksz, data->blocks);
- /* Some hardware cannot perform 2 byte requests in 4 bit mode */
- if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
+ /* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
+ host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
- pr_err("%s: %d byte block unsupported in 4 bit mode\n",
+ pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
mmc_hostname(host->mmc), data->blksz);
return -EINVAL;
}
@@ -857,14 +860,16 @@ static void tmio_mmc_power_off(struct tmio_mmc_host *host)
static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
unsigned char bus_width)
{
- switch (bus_width) {
- case MMC_BUS_WIDTH_1:
- sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
- break;
- case MMC_BUS_WIDTH_4:
- sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
- break;
- }
+ u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
+ & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
+
+ /* reg now applies to MMC_BUS_WIDTH_4 */
+ if (bus_width == MMC_BUS_WIDTH_1)
+ reg |= CARD_OPT_WIDTH;
+ else if (bus_width == MMC_BUS_WIDTH_8)
+ reg |= CARD_OPT_WIDTH8;
+
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
}
/* Set MMC clock / power.
@@ -960,20 +965,12 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
-static int tmio_mmc_card_busy(struct mmc_host *mmc)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_DAT0);
-}
-
static struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
.get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
- .card_busy = tmio_mmc_card_busy,
.multi_io_quirk = tmio_multi_io_quirk,
};
@@ -1072,6 +1069,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
goto host_free;
}
+ tmio_mmc_ops.card_busy = _host->card_busy;
tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
mmc->ops = &tmio_mmc_ops;
@@ -1089,6 +1087,15 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
!mmc_card_is_removable(mmc) ||
mmc->slot.cd_irq >= 0);
+ /*
+ * On Gen2+, eMMC with NONREMOVABLE currently fails because native
+ * hotplug gets disabled. It seems RuntimePM related yet we need further
+ * research. Since we are planning a PM overhaul anyway, let's enforce
+ * for now the device being active by enabling native hotplug always.
+ */
+ if (pdata->flags & TMIO_MMC_MIN_RCAR2)
+ _host->native_hotplug = true;
+
if (tmio_mmc_clk_enable(_host) < 0) {
mmc->f_max = pdata->hclk;
mmc->f_min = mmc->f_max / 512;
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 1e819f98b94f..bb3e0d1dd355 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2116,13 +2116,11 @@ static int vub300_probe(struct usb_interface *interface,
command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!command_out_urb) {
retval = -ENOMEM;
- dev_err(&udev->dev, "not enough memory for command_out_urb\n");
goto error0;
}
command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!command_res_urb) {
retval = -ENOMEM;
- dev_err(&udev->dev, "not enough memory for command_res_urb\n");
goto error1;
}
/* this also allocates memory for our VUB300 mmc host device */
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 392f9eff5fb7..5bcc896a48c3 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -78,7 +78,7 @@ config MTD_PHYSMAP_OF_VERSATILE
bool "Support ARM Versatile physmap OF"
depends on MTD_PHYSMAP_OF
depends on MFD_SYSCON
- default y if (ARCH_INTEGRATOR || ARCH_VERSATILE || REALVIEW_DT)
+ default y if (ARCH_INTEGRATOR || ARCH_VERSATILE || ARCH_REALVIEW)
help
This provides some extra DT physmap parsing for the ARM Versatile
platforms, basically to add a VPP (write protection) callback so
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index ebf46ad2d513..d1e6931c132f 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -351,7 +351,6 @@ static int ubiblock_init_request(void *data, struct request *req,
static struct blk_mq_ops ubiblock_mq_ops = {
.queue_rq = ubiblock_queue_rq,
.init_request = ubiblock_init_request,
- .map_queue = blk_mq_map_queue,
};
static DEFINE_IDR(ubiblock_minor_idr);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0c5415b05ea9..95c32f2d7601 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -149,6 +149,8 @@ config IPVLAN
tristate "IP-VLAN support"
depends on INET
depends on IPV6
+ depends on NETFILTER
+ depends on NET_L3_MASTER_DEV
---help---
This allows one to create virtual devices off of a main interface
and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9599ed6f1213..3f31ca32f52b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4628,7 +4628,7 @@ static int bond_init(struct net_device *bond_dev)
netdev_dbg(bond_dev, "Begin bond_init\n");
- bond->wq = create_singlethread_workqueue(bond_dev->name);
+ bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
if (!bond->wq)
return -ENOMEM;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 71f0e791355b..b3d02759c226 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -600,7 +600,6 @@ static int ems_usb_start(struct ems_usb *dev)
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- netdev_err(netdev, "No memory left for URBs\n");
err = -ENOMEM;
break;
}
@@ -752,10 +751,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
/* create a URB, and a buffer for it, and copy the data to the URB */
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(netdev, "No memory left for URBs\n");
+ if (!urb)
goto nomem;
- }
buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
if (!buf) {
@@ -1007,10 +1004,8 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->tx_contexts[i].echo_index = MAX_TX_URBS;
dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->intr_urb) {
- dev_err(&intf->dev, "Couldn't alloc intr URB\n");
+ if (!dev->intr_urb)
goto cleanup_candev;
- }
dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
if (!dev->intr_in_buffer)
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 784a9002fbb9..be928ce62d32 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -558,8 +558,6 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- dev_warn(dev->udev->dev.parent,
- "No memory left for URBs\n");
err = -ENOMEM;
break;
}
@@ -730,7 +728,6 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
/* create a URB, and a buffer for it, and copy the data to the URB */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- netdev_err(netdev, "No memory left for URBs\n");
stats->tx_dropped++;
dev_kfree_skb(skb);
goto nourbmem;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 6f0cbc38782e..77e3cc06a30c 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -493,10 +493,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(netdev, "No memory left for URB\n");
+ if (!urb)
goto nomem_urb;
- }
hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
&urb->transfer_dma);
@@ -600,11 +598,8 @@ static int gs_can_open(struct net_device *netdev)
/* alloc rx urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- netdev_err(netdev,
- "No memory left for URB\n");
+ if (!urb)
return -ENOMEM;
- }
/* alloc rx buffer */
buf = usb_alloc_coherent(dev->udev,
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 6f1f3b675ff5..d51e0c401b48 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -787,10 +787,8 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
int err;
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(netdev, "No memory left for URBs\n");
+ if (!urb)
return -ENOMEM;
- }
buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
if (!buf) {
@@ -1393,8 +1391,6 @@ static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- dev_warn(dev->udev->dev.parent,
- "No memory left for URBs\n");
err = -ENOMEM;
break;
}
@@ -1670,7 +1666,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- netdev_err(netdev, "No memory left for URBs\n");
stats->tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index bfb91d8fa460..c06382cdfdfe 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -399,7 +399,6 @@ static int peak_usb_start(struct peak_usb_device *dev)
/* create a URB, and a buffer for it, to receive usb messages */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- netdev_err(netdev, "No memory left for URBs\n");
err = -ENOMEM;
break;
}
@@ -454,7 +453,6 @@ static int peak_usb_start(struct peak_usb_device *dev)
/* create a URB and a buffer for it, to transmit usb messages */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- netdev_err(netdev, "No memory left for URBs\n");
err = -ENOMEM;
break;
}
@@ -651,10 +649,8 @@ static int peak_usb_restart(struct peak_usb_device *dev)
/* first allocate a urb to handle the asynchronous steps */
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(dev->netdev, "no memory left for urb\n");
+ if (!urb)
return -ENOMEM;
- }
/* also allocate enough space for the commands to send */
buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_ATOMIC);
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index a731720f1d13..108a30e15097 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -623,10 +623,8 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
/* create a URB, and a buffer for it, and copy the data to the URB */
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(netdev, "No memory left for URBs\n");
+ if (!urb)
goto nomem;
- }
buf = usb_alloc_coherent(priv->udev, size, GFP_ATOMIC,
&urb->transfer_dma);
@@ -748,7 +746,6 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- netdev_err(netdev, "No memory left for URBs\n");
err = -ENOMEM;
break;
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 8f4544394f44..065984670ff1 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -16,6 +16,7 @@ config NET_DSA_BCM_SF2
select FIXED_PHY
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
+ select B53
---help---
This enables support for the Broadcom Starfighter 2 Ethernet
switch chips.
@@ -24,4 +25,13 @@ source "drivers/net/dsa/b53/Kconfig"
source "drivers/net/dsa/mv88e6xxx/Kconfig"
+config NET_DSA_QCA8K
+ tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
+ depends on NET_DSA
+ select NET_DSA_TAG_QCA
+ select REGMAP
+ ---help---
+ This enables support for the Qualcomm Atheros QCA8K Ethernet
+ switch chips.
+
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index ca1e71b853a6..8346e4f9737a 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
+obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
obj-y += b53/
obj-y += mv88e6xxx/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index bda37d336736..7717b19dc806 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -167,6 +167,65 @@ static const struct b53_mib_desc b53_mibs[] = {
#define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
+static const struct b53_mib_desc b53_mibs_58xx[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x0c, "TxQPKTQ0" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPKts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredCollision" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x34, "TxFrameInDisc" },
+ { 4, 0x38, "TxPausePkts" },
+ { 4, 0x3c, "TxQPKTQ1" },
+ { 4, 0x40, "TxQPKTQ2" },
+ { 4, 0x44, "TxQPKTQ3" },
+ { 4, 0x48, "TxQPKTQ4" },
+ { 4, 0x4c, "TxQPKTQ5" },
+ { 8, 0x50, "RxOctets" },
+ { 4, 0x58, "RxUndersizePkts" },
+ { 4, 0x5c, "RxPausePkts" },
+ { 4, 0x60, "RxPkts64Octets" },
+ { 4, 0x64, "RxPkts65to127Octets" },
+ { 4, 0x68, "RxPkts128to255Octets" },
+ { 4, 0x6c, "RxPkts256to511Octets" },
+ { 4, 0x70, "RxPkts512to1023Octets" },
+ { 4, 0x74, "RxPkts1024toMaxPktsOctets" },
+ { 4, 0x78, "RxOversizePkts" },
+ { 4, 0x7c, "RxJabbers" },
+ { 4, 0x80, "RxAlignmentErrors" },
+ { 4, 0x84, "RxFCSErrors" },
+ { 8, 0x88, "RxGoodOctets" },
+ { 4, 0x90, "RxDropPkts" },
+ { 4, 0x94, "RxUnicastPkts" },
+ { 4, 0x98, "RxMulticastPkts" },
+ { 4, 0x9c, "RxBroadcastPkts" },
+ { 4, 0xa0, "RxSAChanges" },
+ { 4, 0xa4, "RxFragments" },
+ { 4, 0xa8, "RxJumboPkt" },
+ { 4, 0xac, "RxSymblErr" },
+ { 4, 0xb0, "InRangeErrCount" },
+ { 4, 0xb4, "OutRangeErrCount" },
+ { 4, 0xb8, "EEELpiEvent" },
+ { 4, 0xbc, "EEELpiDuration" },
+ { 4, 0xc0, "RxDiscard" },
+ { 4, 0xc8, "TxQPKTQ6" },
+ { 4, 0xcc, "TxQPKTQ7" },
+ { 4, 0xd0, "TxPkts64Octets" },
+ { 4, 0xd4, "TxPkts65to127Octets" },
+ { 4, 0xd8, "TxPkts128to255Octets" },
+ { 4, 0xdc, "TxPkts256to511Ocets" },
+ { 4, 0xe0, "TxPkts512to1023Ocets" },
+ { 4, 0xe4, "TxPkts1024toMaxPktOcets" },
+};
+
+#define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
+
static int b53_do_vlan_op(struct b53_device *dev, u8 op)
{
unsigned int i;
@@ -418,7 +477,7 @@ static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
unsigned int i;
u16 pvlan;
@@ -436,7 +495,7 @@ static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
static int b53_enable_port(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
unsigned int cpu_port = dev->cpu_port;
u16 pvlan;
@@ -461,7 +520,7 @@ static int b53_enable_port(struct dsa_switch *ds, int port,
static void b53_disable_port(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
u8 reg;
/* Disable Tx/Rx for the port */
@@ -570,7 +629,7 @@ static int b53_switch_reset(struct b53_device *dev)
static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
- struct b53_device *priv = ds_to_priv(ds);
+ struct b53_device *priv = ds->priv;
u16 value = 0;
int ret;
@@ -585,7 +644,7 @@ static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
- struct b53_device *priv = ds_to_priv(ds);
+ struct b53_device *priv = ds->priv;
if (priv->ops->phy_write16)
return priv->ops->phy_write16(priv, addr, reg, val);
@@ -635,6 +694,8 @@ static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
return b53_mibs_65;
else if (is63xx(dev))
return b53_mibs_63xx;
+ else if (is58xx(dev))
+ return b53_mibs_58xx;
else
return b53_mibs;
}
@@ -645,13 +706,15 @@ static unsigned int b53_get_mib_size(struct b53_device *dev)
return B53_MIBS_65_SIZE;
else if (is63xx(dev))
return B53_MIBS_63XX_SIZE;
+ else if (is58xx(dev))
+ return B53_MIBS_58XX_SIZE;
else
return B53_MIBS_SIZE;
}
static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
const struct b53_mib_desc *mibs = b53_get_mib(dev);
unsigned int mib_size = b53_get_mib_size(dev);
unsigned int i;
@@ -664,7 +727,7 @@ static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
const struct b53_mib_desc *mibs = b53_get_mib(dev);
unsigned int mib_size = b53_get_mib_size(dev);
const struct b53_mib_desc *s;
@@ -696,19 +759,14 @@ static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
static int b53_get_sset_count(struct dsa_switch *ds)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
return b53_get_mib_size(dev);
}
-static int b53_set_addr(struct dsa_switch *ds, u8 *addr)
-{
- return 0;
-}
-
static int b53_setup(struct dsa_switch *ds)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
unsigned int port;
int ret;
@@ -739,7 +797,7 @@ static int b53_setup(struct dsa_switch *ds)
static void b53_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
u8 rgmii_ctrl = 0, reg = 0, off;
if (!phy_is_pseudo_fixed_link(phydev))
@@ -873,7 +931,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
return -EOPNOTSUPP;
@@ -890,7 +948,7 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
unsigned int cpu_port = dev->cpu_port;
@@ -924,7 +982,7 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
static int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
unsigned int cpu_port = dev->cpu_port;
struct b53_vlan *vl;
@@ -970,7 +1028,7 @@ static int b53_vlan_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
int (*cb)(struct switchdev_obj *obj))
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
u16 vid, vid_start = 0, pvid;
struct b53_vlan *vl;
int err = 0;
@@ -1129,7 +1187,7 @@ static int b53_fdb_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
{
- struct b53_device *priv = ds_to_priv(ds);
+ struct b53_device *priv = ds->priv;
/* 5325 and 5365 require some more massaging, but could
* be supported eventually
@@ -1144,7 +1202,7 @@ static void b53_fdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
{
- struct b53_device *priv = ds_to_priv(ds);
+ struct b53_device *priv = ds->priv;
if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
pr_err("%s: failed to add MAC address\n", __func__);
@@ -1153,7 +1211,7 @@ static void b53_fdb_add(struct dsa_switch *ds, int port,
static int b53_fdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb)
{
- struct b53_device *priv = ds_to_priv(ds);
+ struct b53_device *priv = ds->priv;
return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
}
@@ -1212,7 +1270,7 @@ static int b53_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
int (*cb)(struct switchdev_obj *obj))
{
- struct b53_device *priv = ds_to_priv(ds);
+ struct b53_device *priv = ds->priv;
struct net_device *dev = ds->ports[port].netdev;
struct b53_arl_entry results[2];
unsigned int count = 0;
@@ -1251,10 +1309,22 @@ static int b53_fdb_dump(struct dsa_switch *ds, int port,
static int b53_br_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
+ s8 cpu_port = ds->dst->cpu_port;
u16 pvlan, reg;
unsigned int i;
+ /* Make this port leave the all VLANs join since we will have proper
+ * VLAN entries from now on
+ */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
+ reg &= ~BIT(port);
+ if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+ reg &= ~BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ }
+
dev->ports[port].bridge_dev = bridge;
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
@@ -1284,9 +1354,10 @@ static int b53_br_join(struct dsa_switch *ds, int port,
static void b53_br_leave(struct dsa_switch *ds, int port)
{
- struct b53_device *dev = ds_to_priv(ds);
+ struct b53_device *dev = ds->priv;
struct net_device *bridge = dev->ports[port].bridge_dev;
struct b53_vlan *vl = &dev->vlans[0];
+ s8 cpu_port = ds->dst->cpu_port;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1316,22 +1387,27 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
else
pvid = 0;
- b53_get_vlan_entry(dev, pvid, vl);
- vl->members |= BIT(port) | BIT(dev->cpu_port);
- vl->untag |= BIT(port) | BIT(dev->cpu_port);
- b53_set_vlan_entry(dev, pvid, vl);
+ /* Make this port join all VLANs without VLAN entries */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
+ reg |= BIT(port);
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ } else {
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members |= BIT(port) | BIT(dev->cpu_port);
+ vl->untag |= BIT(port) | BIT(dev->cpu_port);
+ b53_set_vlan_entry(dev, pvid, vl);
+ }
}
-static void b53_br_set_stp_state(struct dsa_switch *ds, int port,
- u8 state)
+static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
{
- struct b53_device *dev = ds_to_priv(ds);
- u8 hw_state, cur_hw_state;
+ struct b53_device *dev = ds->priv;
+ u8 hw_state;
u8 reg;
- b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
- cur_hw_state = reg & PORT_CTRL_STP_STATE_MASK;
-
switch (state) {
case BR_STATE_DISABLED:
hw_state = PORT_CTRL_DIS_STATE;
@@ -1353,30 +1429,28 @@ static void b53_br_set_stp_state(struct dsa_switch *ds, int port,
return;
}
- /* Fast-age ARL entries if we are moving a port from Learning or
- * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
- * state (hw_state)
- */
- if (cur_hw_state != hw_state) {
- if (cur_hw_state >= PORT_CTRL_LEARN_STATE &&
- hw_state <= PORT_CTRL_LISTEN_STATE) {
- if (b53_fast_age_port(dev, port)) {
- dev_err(ds->dev, "fast ageing failed\n");
- return;
- }
- }
- }
-
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
reg &= ~PORT_CTRL_STP_STATE_MASK;
reg |= hw_state;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
}
-static struct dsa_switch_driver b53_switch_ops = {
- .tag_protocol = DSA_TAG_PROTO_NONE,
+static void b53_br_fast_age(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (b53_fast_age_port(dev, port))
+ dev_err(ds->dev, "fast ageing failed\n");
+}
+
+static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds)
+{
+ return DSA_TAG_PROTO_NONE;
+}
+
+static struct dsa_switch_ops b53_switch_ops = {
+ .get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
- .set_addr = b53_set_addr,
.get_strings = b53_get_strings,
.get_ethtool_stats = b53_get_ethtool_stats,
.get_sset_count = b53_get_sset_count,
@@ -1388,6 +1462,7 @@ static struct dsa_switch_driver b53_switch_ops = {
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_stp_state_set = b53_br_set_stp_state,
+ .port_fast_age = b53_br_fast_age,
.port_vlan_filtering = b53_vlan_filtering,
.port_vlan_prepare = b53_vlan_prepare,
.port_vlan_add = b53_vlan_add,
@@ -1593,11 +1668,22 @@ static const struct b53_chip_data b53_switch_chips[] = {
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
+ {
+ .chip_id = BCM7445_DEVICE_ID,
+ .dev_name = "BCM7445",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
};
static int b53_switch_init(struct b53_device *dev)
{
- struct dsa_switch *ds = dev->ds;
unsigned int i;
int ret;
@@ -1613,7 +1699,6 @@ static int b53_switch_init(struct b53_device *dev)
dev->vta_regs[1] = chip->vta_regs[1];
dev->vta_regs[2] = chip->vta_regs[2];
dev->jumbo_pm_reg = chip->jumbo_pm_reg;
- ds->drv = &b53_switch_ops;
dev->cpu_port = chip->cpu_port;
dev->num_vlans = chip->vlans;
dev->num_arl_entries = chip->arl_entries;
@@ -1681,7 +1766,8 @@ static int b53_switch_init(struct b53_device *dev)
return 0;
}
-struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops,
+struct b53_device *b53_switch_alloc(struct device *base,
+ const struct b53_io_ops *ops,
void *priv)
{
struct dsa_switch *ds;
@@ -1700,6 +1786,7 @@ struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops,
dev->ds = ds;
dev->priv = priv;
dev->ops = ops;
+ ds->ops = &b53_switch_ops;
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index aa87c3fffdac..477a16b5660a 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -267,7 +267,7 @@ static int b53_mdio_phy_write16(struct b53_device *dev, int addr, int reg,
return mdiobus_write_nested(bus, addr, reg, value);
}
-static struct b53_io_ops b53_mdio_ops = {
+static const struct b53_io_ops b53_mdio_ops = {
.read8 = b53_mdio_read8,
.read16 = b53_mdio_read16,
.read32 = b53_mdio_read32,
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 77ffc4312808..cc9e6bd83e0e 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -208,7 +208,7 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
return 0;
}
-static struct b53_io_ops b53_mmap_ops = {
+static const struct b53_io_ops b53_mmap_ops = {
.read8 = b53_mmap_read8,
.read16 = b53_mmap_read16,
.read32 = b53_mmap_read32,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 835a744f206e..f192a673caba 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -60,6 +60,7 @@ enum {
BCM53018_DEVICE_ID = 0x53018,
BCM53019_DEVICE_ID = 0x53019,
BCM58XX_DEVICE_ID = 0x5800,
+ BCM7445_DEVICE_ID = 0x7445,
};
#define B53_N_PORTS 9
@@ -174,6 +175,12 @@ static inline int is5301x(struct b53_device *dev)
dev->chip_id == BCM53019_DEVICE_ID;
}
+static inline int is58xx(struct b53_device *dev)
+{
+ return dev->chip_id == BCM58XX_DEVICE_ID ||
+ dev->chip_id == BCM7445_DEVICE_ID;
+}
+
#define B53_CPU_PORT_25 5
#define B53_CPU_PORT 8
@@ -182,7 +189,8 @@ static inline int is_cpu_port(struct b53_device *dev, int port)
return dev->cpu_port;
}
-struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops,
+struct b53_device *b53_switch_alloc(struct device *base,
+ const struct b53_io_ops *ops,
void *priv);
int b53_switch_detect(struct b53_device *dev);
@@ -364,7 +372,6 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
#ifdef CONFIG_BCM47XX
-#include <linux/version.h>
#include <linux/bcm47xx_nvram.h>
#include <bcm47xx_board.h>
static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index a0b453ea34c9..dac0af4e2cd0 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -309,6 +309,9 @@
/* Port VLAN mask (16 bit) IMP port is always 8, also on 5325 & co */
#define B53_PVLAN_PORT_MASK(i) ((i) * 2)
+/* Join all VLANs register (16 bit) */
+#define B53_JOIN_ALL_VLAN_EN 0x50
+
/*************************************************************************
* 802.1Q Page Registers
*************************************************************************/
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 2bda0b5f1578..f89f5308a99b 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -270,7 +270,7 @@ static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value)
return spi_write(spi, txbuf, sizeof(txbuf));
}
-static struct b53_io_ops b53_spi_ops = {
+static const struct b53_io_ops b53_spi_ops = {
.read8 = b53_spi_read8,
.read16 = b53_spi_read16,
.read32 = b53_spi_read32,
@@ -317,8 +317,6 @@ static int b53_spi_remove(struct spi_device *spi)
static struct spi_driver b53_spi_driver = {
.driver = {
.name = "b53-switch",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
},
.probe = b53_spi_probe,
.remove = b53_spi_remove,
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 3e2d4a5fcd5a..8a62b6a69703 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -344,7 +344,7 @@ err:
return ret;
}
-static struct b53_io_ops b53_srab_ops = {
+static const struct b53_io_ops b53_srab_ops = {
.read8 = b53_srab_read8,
.read16 = b53_srab_read16,
.read32 = b53_srab_read32,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index b2b838724a9b..e218887f18b7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -29,130 +29,21 @@
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
#include <net/switchdev.h>
+#include <linux/platform_data/b53.h>
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
+#include "b53/b53_priv.h"
+#include "b53/b53_regs.h"
-/* String, offset, and register size in bytes if different from 4 bytes */
-static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
- { "TxOctets", 0x000, 8 },
- { "TxDropPkts", 0x020 },
- { "TxQPKTQ0", 0x030 },
- { "TxBroadcastPkts", 0x040 },
- { "TxMulticastPkts", 0x050 },
- { "TxUnicastPKts", 0x060 },
- { "TxCollisions", 0x070 },
- { "TxSingleCollision", 0x080 },
- { "TxMultipleCollision", 0x090 },
- { "TxDeferredCollision", 0x0a0 },
- { "TxLateCollision", 0x0b0 },
- { "TxExcessiveCollision", 0x0c0 },
- { "TxFrameInDisc", 0x0d0 },
- { "TxPausePkts", 0x0e0 },
- { "TxQPKTQ1", 0x0f0 },
- { "TxQPKTQ2", 0x100 },
- { "TxQPKTQ3", 0x110 },
- { "TxQPKTQ4", 0x120 },
- { "TxQPKTQ5", 0x130 },
- { "RxOctets", 0x140, 8 },
- { "RxUndersizePkts", 0x160 },
- { "RxPausePkts", 0x170 },
- { "RxPkts64Octets", 0x180 },
- { "RxPkts65to127Octets", 0x190 },
- { "RxPkts128to255Octets", 0x1a0 },
- { "RxPkts256to511Octets", 0x1b0 },
- { "RxPkts512to1023Octets", 0x1c0 },
- { "RxPkts1024toMaxPktsOctets", 0x1d0 },
- { "RxOversizePkts", 0x1e0 },
- { "RxJabbers", 0x1f0 },
- { "RxAlignmentErrors", 0x200 },
- { "RxFCSErrors", 0x210 },
- { "RxGoodOctets", 0x220, 8 },
- { "RxDropPkts", 0x240 },
- { "RxUnicastPkts", 0x250 },
- { "RxMulticastPkts", 0x260 },
- { "RxBroadcastPkts", 0x270 },
- { "RxSAChanges", 0x280 },
- { "RxFragments", 0x290 },
- { "RxJumboPkt", 0x2a0 },
- { "RxSymblErr", 0x2b0 },
- { "InRangeErrCount", 0x2c0 },
- { "OutRangeErrCount", 0x2d0 },
- { "EEELpiEvent", 0x2e0 },
- { "EEELpiDuration", 0x2f0 },
- { "RxDiscard", 0x300, 8 },
- { "TxQPKTQ6", 0x320 },
- { "TxQPKTQ7", 0x330 },
- { "TxPkts64Octets", 0x340 },
- { "TxPkts65to127Octets", 0x350 },
- { "TxPkts128to255Octets", 0x360 },
- { "TxPkts256to511Ocets", 0x370 },
- { "TxPkts512to1023Ocets", 0x380 },
- { "TxPkts1024toMaxPktOcets", 0x390 },
-};
-
-#define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
-
-static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
- int port, uint8_t *data)
-{
- unsigned int i;
-
- for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
-}
-
-static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
- int port, uint64_t *data)
-{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- const struct bcm_sf2_hw_stats *s;
- unsigned int i;
- u64 val = 0;
- u32 offset;
-
- mutex_lock(&priv->stats_mutex);
-
- /* Now fetch the per-port counters */
- for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
- s = &bcm_sf2_mib[i];
-
- /* Do a latched 64-bit read if needed */
- offset = s->reg + CORE_P_MIB_OFFSET(port);
- if (s->sizeof_stat == 8)
- val = core_readq(priv, offset);
- else
- val = core_readl(priv, offset);
-
- data[i] = (u64)val;
- }
-
- mutex_unlock(&priv->stats_mutex);
-}
-
-static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
-{
- return BCM_SF2_STATS_SIZE;
-}
-
-static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev,
- struct device *host_dev, int sw_addr,
- void **_priv)
+static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds)
{
- struct bcm_sf2_priv *priv;
-
- priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return NULL;
- *_priv = priv;
-
- return "Broadcom Starfighter 2";
+ return DSA_TAG_PROTO_BRCM;
}
static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int i;
u32 reg;
@@ -172,7 +63,7 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg, val;
/* Enable the port memories */
@@ -237,7 +128,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg;
reg = core_readl(priv, CORE_EEE_EN_CTRL);
@@ -250,7 +141,7 @@ static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg;
reg = reg_readl(priv, REG_SPHY_CNTRL);
@@ -324,7 +215,7 @@ static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->dst[ds->index].cpu_port;
u32 reg;
@@ -365,7 +256,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
reg &= ~PORT_VLAN_CTRL_MASK;
reg |= (1 << port);
- reg |= priv->port_sts[port].vlan_ctl_mask;
+ reg |= priv->dev->ports[port].vlan_ctl_mask;
core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
bcm_sf2_imp_vlan_setup(ds, cpu_port);
@@ -380,7 +271,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 off, reg;
if (priv->wol_ports_mask & (1 << port))
@@ -412,7 +303,7 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->port_sts[port].eee;
int ret;
@@ -430,7 +321,7 @@ static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->port_sts[port].eee;
u32 reg;
@@ -445,7 +336,7 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
struct phy_device *phydev,
struct ethtool_eee *e)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->port_sts[port].eee;
p->eee_enabled = e->eee_enabled;
@@ -461,469 +352,6 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
return 0;
}
-static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv)
-{
- unsigned int timeout = 1000;
- u32 reg;
-
- reg = core_readl(priv, CORE_FAST_AGE_CTRL);
- reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
- core_writel(priv, reg, CORE_FAST_AGE_CTRL);
-
- do {
- reg = core_readl(priv, CORE_FAST_AGE_CTRL);
- if (!(reg & FAST_AGE_STR_DONE))
- break;
-
- cpu_relax();
- } while (timeout--);
-
- if (!timeout)
- return -ETIMEDOUT;
-
- core_writel(priv, 0, CORE_FAST_AGE_CTRL);
-
- return 0;
-}
-
-/* Fast-ageing of ARL entries for a given port, equivalent to an ARL
- * flush for that port.
- */
-static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
-{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
-
- core_writel(priv, port, CORE_FAST_AGE_PORT);
-
- return bcm_sf2_fast_age_op(priv);
-}
-
-static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid)
-{
- core_writel(priv, vid, CORE_FAST_AGE_VID);
-
- return bcm_sf2_fast_age_op(priv);
-}
-
-static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
-{
- unsigned int timeout = 10;
- u32 reg;
-
- do {
- reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
- if (!(reg & ARLA_VTBL_STDN))
- return 0;
-
- usleep_range(1000, 2000);
- } while (timeout--);
-
- return -ETIMEDOUT;
-}
-
-static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
-{
- core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
-
- return bcm_sf2_vlan_op_wait(priv);
-}
-
-static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
- struct bcm_sf2_vlan *vlan)
-{
- int ret;
-
- core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
- core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members,
- CORE_ARLA_VTBL_ENTRY);
-
- ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE);
- if (ret)
- pr_err("failed to write VLAN entry\n");
-}
-
-static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
- struct bcm_sf2_vlan *vlan)
-{
- u32 entry;
- int ret;
-
- core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
-
- ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ);
- if (ret)
- return ret;
-
- entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY);
- vlan->members = entry & FWD_MAP_MASK;
- vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK;
-
- return 0;
-}
-
-static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
-{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- s8 cpu_port = ds->dst->cpu_port;
- unsigned int i;
- u32 reg, p_ctl;
-
- /* Make this port leave the all VLANs join since we will have proper
- * VLAN entries from now on
- */
- reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
- reg &= ~BIT(port);
- if ((reg & BIT(cpu_port)) == BIT(cpu_port))
- reg &= ~BIT(cpu_port);
- core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
-
- priv->port_sts[port].bridge_dev = bridge;
- p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
-
- for (i = 0; i < priv->hw_params.num_ports; i++) {
- if (priv->port_sts[i].bridge_dev != bridge)
- continue;
-
- /* Add this local port to the remote port VLAN control
- * membership and update the remote port bitmask
- */
- reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
- reg |= 1 << port;
- core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
- priv->port_sts[i].vlan_ctl_mask = reg;
-
- p_ctl |= 1 << i;
- }
-
- /* Configure the local port VLAN control membership to include
- * remote ports and update the local port bitmask
- */
- core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
- priv->port_sts[port].vlan_ctl_mask = p_ctl;
-
- return 0;
-}
-
-static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
-{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- struct net_device *bridge = priv->port_sts[port].bridge_dev;
- s8 cpu_port = ds->dst->cpu_port;
- unsigned int i;
- u32 reg, p_ctl;
-
- p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
-
- for (i = 0; i < priv->hw_params.num_ports; i++) {
- /* Don't touch the remaining ports */
- if (priv->port_sts[i].bridge_dev != bridge)
- continue;
-
- reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
- reg &= ~(1 << port);
- core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
- priv->port_sts[port].vlan_ctl_mask = reg;
-
- /* Prevent self removal to preserve isolation */
- if (port != i)
- p_ctl &= ~(1 << i);
- }
-
- core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
- priv->port_sts[port].vlan_ctl_mask = p_ctl;
- priv->port_sts[port].bridge_dev = NULL;
-
- /* Make this port join all VLANs without VLAN entries */
- reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
- reg |= BIT(port);
- if (!(reg & BIT(cpu_port)))
- reg |= BIT(cpu_port);
- core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
-}
-
-static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
- u8 state)
-{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- u8 hw_state, cur_hw_state;
- u32 reg;
-
- reg = core_readl(priv, CORE_G_PCTL_PORT(port));
- cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
-
- switch (state) {
- case BR_STATE_DISABLED:
- hw_state = G_MISTP_DIS_STATE;
- break;
- case BR_STATE_LISTENING:
- hw_state = G_MISTP_LISTEN_STATE;
- break;
- case BR_STATE_LEARNING:
- hw_state = G_MISTP_LEARN_STATE;
- break;
- case BR_STATE_FORWARDING:
- hw_state = G_MISTP_FWD_STATE;
- break;
- case BR_STATE_BLOCKING:
- hw_state = G_MISTP_BLOCK_STATE;
- break;
- default:
- pr_err("%s: invalid STP state: %d\n", __func__, state);
- return;
- }
-
- /* Fast-age ARL entries if we are moving a port from Learning or
- * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
- * state (hw_state)
- */
- if (cur_hw_state != hw_state) {
- if (cur_hw_state >= G_MISTP_LEARN_STATE &&
- hw_state <= G_MISTP_LISTEN_STATE) {
- if (bcm_sf2_sw_fast_age_port(ds, port)) {
- pr_err("%s: fast-ageing failed\n", __func__);
- return;
- }
- }
- }
-
- reg = core_readl(priv, CORE_G_PCTL_PORT(port));
- reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
- reg |= hw_state;
- core_writel(priv, reg, CORE_G_PCTL_PORT(port));
-}
-
-/* Address Resolution Logic routines */
-static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
-{
- unsigned int timeout = 10;
- u32 reg;
-
- do {
- reg = core_readl(priv, CORE_ARLA_RWCTL);
- if (!(reg & ARL_STRTDN))
- return 0;
-
- usleep_range(1000, 2000);
- } while (timeout--);
-
- return -ETIMEDOUT;
-}
-
-static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
-{
- u32 cmd;
-
- if (op > ARL_RW)
- return -EINVAL;
-
- cmd = core_readl(priv, CORE_ARLA_RWCTL);
- cmd &= ~IVL_SVL_SELECT;
- cmd |= ARL_STRTDN;
- if (op)
- cmd |= ARL_RW;
- else
- cmd &= ~ARL_RW;
- core_writel(priv, cmd, CORE_ARLA_RWCTL);
-
- return bcm_sf2_arl_op_wait(priv);
-}
-
-static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
- u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
- bool is_valid)
-{
- unsigned int i;
- int ret;
-
- ret = bcm_sf2_arl_op_wait(priv);
- if (ret)
- return ret;
-
- /* Read the 4 bins */
- for (i = 0; i < 4; i++) {
- u64 mac_vid;
- u32 fwd_entry;
-
- mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
- fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
- bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
-
- if (ent->is_valid && is_valid) {
- *idx = i;
- return 0;
- }
-
- /* This is the MAC we just deleted */
- if (!is_valid && (mac_vid & mac))
- return 0;
- }
-
- return -ENOENT;
-}
-
-static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
- const unsigned char *addr, u16 vid, bool is_valid)
-{
- struct bcm_sf2_arl_entry ent;
- u32 fwd_entry;
- u64 mac, mac_vid = 0;
- u8 idx = 0;
- int ret;
-
- /* Convert the array into a 64-bit MAC */
- mac = bcm_sf2_mac_to_u64(addr);
-
- /* Perform a read for the given MAC and VID */
- core_writeq(priv, mac, CORE_ARLA_MAC);
- core_writel(priv, vid, CORE_ARLA_VID);
-
- /* Issue a read operation for this MAC */
- ret = bcm_sf2_arl_rw_op(priv, 1);
- if (ret)
- return ret;
-
- ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
- /* If this is a read, just finish now */
- if (op)
- return ret;
-
- /* We could not find a matching MAC, so reset to a new entry */
- if (ret) {
- fwd_entry = 0;
- idx = 0;
- }
-
- memset(&ent, 0, sizeof(ent));
- ent.port = port;
- ent.is_valid = is_valid;
- ent.vid = vid;
- ent.is_static = true;
- memcpy(ent.mac, addr, ETH_ALEN);
- bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
-
- core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
- core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
-
- ret = bcm_sf2_arl_rw_op(priv, 0);
- if (ret)
- return ret;
-
- /* Re-read the entry to check */
- return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
-}
-
-static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
-{
- /* We do not need to do anything specific here yet */
- return 0;
-}
-
-static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
-{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
-
- if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
- pr_err("%s: failed to add MAC address\n", __func__);
-}
-
-static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb)
-{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
-
- return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
-}
-
-static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
-{
- unsigned timeout = 1000;
- u32 reg;
-
- do {
- reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
- if (!(reg & ARLA_SRCH_STDN))
- return 0;
-
- if (reg & ARLA_SRCH_VLID)
- return 0;
-
- usleep_range(1000, 2000);
- } while (timeout--);
-
- return -ETIMEDOUT;
-}
-
-static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
- struct bcm_sf2_arl_entry *ent)
-{
- u64 mac_vid;
- u32 fwd_entry;
-
- mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
- fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
- bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
-}
-
-static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
- const struct bcm_sf2_arl_entry *ent,
- struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
-{
- if (!ent->is_valid)
- return 0;
-
- if (port != ent->port)
- return 0;
-
- ether_addr_copy(fdb->addr, ent->mac);
- fdb->vid = ent->vid;
- fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
-
- return cb(&fdb->obj);
-}
-
-static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
-{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- struct net_device *dev = ds->ports[port].netdev;
- struct bcm_sf2_arl_entry results[2];
- unsigned int count = 0;
- int ret;
-
- /* Start search operation */
- core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
-
- do {
- ret = bcm_sf2_arl_search_wait(priv);
- if (ret)
- return ret;
-
- /* Read both entries, then return their values back */
- bcm_sf2_arl_search_rd(priv, 0, &results[0]);
- ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
- if (ret)
- return ret;
-
- bcm_sf2_arl_search_rd(priv, 1, &results[1]);
- ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
- if (ret)
- return ret;
-
- if (!results[0].is_valid && !results[1].is_valid)
- break;
-
- } while (count++ < CORE_ARLA_NUM_ENTRIES);
-
- return 0;
-}
-
static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
int regnum, u16 val)
{
@@ -1036,12 +464,10 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
{
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_0_mask_set(priv, 0xffffffff);
intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_1_mask_set(priv, 0xffffffff);
intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
}
static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
@@ -1082,7 +508,7 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
static int bcm_sf2_mdio_register(struct dsa_switch *ds)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct device_node *dn;
static int index;
int err;
@@ -1146,14 +572,9 @@ static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
of_node_put(priv->master_mii_dn);
}
-static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
-{
- return 0;
-}
-
static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
/* The BCM7xxx PHY driver expects to find the integrated PHY revision
* in bits 15:8 and the patch level in bits 7:0 which is exactly what
@@ -1166,7 +587,7 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 id_mode_dis = 0, port_mode;
const char *str = NULL;
u32 reg;
@@ -1246,7 +667,7 @@ force_link:
static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
struct fixed_phy_status *status)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 duplex, pause;
u32 reg;
@@ -1298,7 +719,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int port;
bcm_sf2_intr_disable(priv);
@@ -1318,7 +739,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int port;
int ret;
@@ -1345,7 +766,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
struct net_device *p = ds->dst[ds->index].master_netdev;
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol;
/* Get the parent device WoL settings */
@@ -1368,7 +789,7 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
struct net_device *p = ds->dst[ds->index].master_netdev;
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->dst[ds->index].cpu_port;
struct ethtool_wolinfo pwol;
@@ -1393,43 +814,32 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
return p->ethtool_ops->set_wol(p, wol);
}
-static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable)
+static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
{
- u32 mgmt, vc0, vc1, vc4, vc5;
+ unsigned int timeout = 10;
+ u32 reg;
- mgmt = core_readl(priv, CORE_SWMODE);
- vc0 = core_readl(priv, CORE_VLAN_CTRL0);
- vc1 = core_readl(priv, CORE_VLAN_CTRL1);
- vc4 = core_readl(priv, CORE_VLAN_CTRL4);
- vc5 = core_readl(priv, CORE_VLAN_CTRL5);
+ do {
+ reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
+ if (!(reg & ARLA_VTBL_STDN))
+ return 0;
- mgmt &= ~SW_FWDG_MODE;
+ usleep_range(1000, 2000);
+ } while (timeout--);
- if (enable) {
- vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL;
- vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP;
- vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
- vc4 |= INGR_VID_CHK_DROP;
- vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD;
- } else {
- vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL);
- vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP);
- vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
- vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD);
- vc4 |= INGR_VID_CHK_VID_VIOL_IMP;
- }
+ return -ETIMEDOUT;
+}
+
+static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
+{
+ core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
- core_writel(priv, vc0, CORE_VLAN_CTRL0);
- core_writel(priv, vc1, CORE_VLAN_CTRL1);
- core_writel(priv, 0, CORE_VLAN_CTRL3);
- core_writel(priv, vc4, CORE_VLAN_CTRL4);
- core_writel(priv, vc5, CORE_VLAN_CTRL5);
- core_writel(priv, mgmt, CORE_SWMODE);
+ return bcm_sf2_vlan_op_wait(priv);
}
static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int port;
/* Clear all VLANs */
@@ -1443,162 +853,199 @@ static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
}
}
-static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+static int bcm_sf2_sw_setup(struct dsa_switch *ds)
{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int port;
+
+ /* Enable all valid ports and disable those unused */
+ for (port = 0; port < priv->hw_params.num_ports; port++) {
+ /* IMP port receives special treatment */
+ if ((1 << port) & ds->enabled_port_mask)
+ bcm_sf2_port_setup(ds, port, NULL);
+ else if (dsa_is_cpu_port(ds, port))
+ bcm_sf2_imp_setup(ds, port);
+ else
+ bcm_sf2_port_disable(ds, port, NULL);
+ }
+
+ bcm_sf2_sw_configure_vlan(ds);
+
return 0;
}
-static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+/* The SWITCH_CORE register space is managed by b53 but operates on a page +
+ * register basis so we need to translate that into an address that the
+ * bus-glue understands.
+ */
+#define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2)
+
+static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
+ u8 *val)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_priv *priv = dev->priv;
- bcm_sf2_enable_vlan(priv, true);
+ *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
return 0;
}
-static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
+ u16 *val)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- s8 cpu_port = ds->dst->cpu_port;
- struct bcm_sf2_vlan *vl;
- u16 vid;
+ struct bcm_sf2_priv *priv = dev->priv;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &priv->vlans[vid];
+ *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
- bcm_sf2_get_vlan_entry(priv, vid, vl);
+ return 0;
+}
- vl->members |= BIT(port) | BIT(cpu_port);
- if (untagged)
- vl->untag |= BIT(port) | BIT(cpu_port);
- else
- vl->untag &= ~(BIT(port) | BIT(cpu_port));
+static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
+ u32 *val)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
- bcm_sf2_set_vlan_entry(priv, vid, vl);
- bcm_sf2_sw_fast_age_vlan(priv, vid);
- }
+ *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
- if (pvid) {
- core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port));
- core_writel(priv, vlan->vid_end,
- CORE_DEFAULT_1Q_TAG_P(cpu_port));
- bcm_sf2_sw_fast_age_vlan(priv, vid);
- }
+ return 0;
}
-static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
+ u64 *val)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- s8 cpu_port = ds->dst->cpu_port;
- struct bcm_sf2_vlan *vl;
- u16 vid, pvid;
- int ret;
+ struct bcm_sf2_priv *priv = dev->priv;
- pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
+ *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &priv->vlans[vid];
-
- ret = bcm_sf2_get_vlan_entry(priv, vid, vl);
- if (ret)
- return ret;
-
- vl->members &= ~BIT(port);
- if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
- vl->members = 0;
- if (pvid == vid)
- pvid = 0;
- if (untagged) {
- vl->untag &= ~BIT(port);
- if ((vl->untag & BIT(port)) == BIT(cpu_port))
- vl->untag = 0;
- }
+ return 0;
+}
- bcm_sf2_set_vlan_entry(priv, vid, vl);
- bcm_sf2_sw_fast_age_vlan(priv, vid);
- }
+static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
+ u8 value)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
- core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port));
- core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port));
- bcm_sf2_sw_fast_age_vlan(priv, vid);
+ core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
return 0;
}
-static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj))
+static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- struct bcm_sf2_port_status *p = &priv->port_sts[port];
- struct bcm_sf2_vlan *vl;
- u16 vid, pvid;
- int err = 0;
+ struct bcm_sf2_priv *priv = dev->priv;
- pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
+ core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- vl = &priv->vlans[vid];
+ return 0;
+}
- if (!(vl->members & BIT(port)))
- continue;
+static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
- vlan->vid_begin = vlan->vid_end = vid;
- vlan->flags = 0;
+ core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
- if (vl->untag & BIT(port))
- vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
- if (p->pvid == vid)
- vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+ return 0;
+}
- err = cb(&vlan->obj);
- if (err)
- break;
- }
+static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
- return err;
+ core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
+
+ return 0;
}
-static int bcm_sf2_sw_setup(struct dsa_switch *ds)
+static struct b53_io_ops bcm_sf2_io_ops = {
+ .read8 = bcm_sf2_core_read8,
+ .read16 = bcm_sf2_core_read16,
+ .read32 = bcm_sf2_core_read32,
+ .read48 = bcm_sf2_core_read64,
+ .read64 = bcm_sf2_core_read64,
+ .write8 = bcm_sf2_core_write8,
+ .write16 = bcm_sf2_core_write16,
+ .write32 = bcm_sf2_core_write32,
+ .write48 = bcm_sf2_core_write64,
+ .write64 = bcm_sf2_core_write64,
+};
+
+static int bcm_sf2_sw_probe(struct platform_device *pdev)
{
const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- struct device_node *dn;
+ struct device_node *dn = pdev->dev.of_node;
+ struct b53_platform_data *pdata;
+ struct bcm_sf2_priv *priv;
+ struct b53_device *dev;
+ struct dsa_switch *ds;
void __iomem **base;
- unsigned int port;
+ struct resource *r;
unsigned int i;
u32 reg, rev;
int ret;
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
+ if (!dev)
+ return -ENOMEM;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /* Auto-detection using standard registers will not work, so
+ * provide an indication of what kind of device we are for
+ * b53_common to work with
+ */
+ pdata->chip_id = BCM7445_DEVICE_ID;
+ dev->pdata = pdata;
+
+ priv->dev = dev;
+ ds = dev->ds;
+
+ /* Override the parts that are non-standard wrt. normal b53 devices */
+ ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol;
+ ds->ops->setup = bcm_sf2_sw_setup;
+ ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags;
+ ds->ops->adjust_link = bcm_sf2_sw_adjust_link;
+ ds->ops->fixed_link_update = bcm_sf2_sw_fixed_link_update;
+ ds->ops->suspend = bcm_sf2_sw_suspend;
+ ds->ops->resume = bcm_sf2_sw_resume;
+ ds->ops->get_wol = bcm_sf2_sw_get_wol;
+ ds->ops->set_wol = bcm_sf2_sw_set_wol;
+ ds->ops->port_enable = bcm_sf2_port_setup;
+ ds->ops->port_disable = bcm_sf2_port_disable;
+ ds->ops->get_eee = bcm_sf2_sw_get_eee;
+ ds->ops->set_eee = bcm_sf2_sw_set_eee;
+
+ /* Avoid having DSA free our slave MDIO bus (checking for
+ * ds->slave_mii_bus and ds->ops->phy_read being non-NULL)
+ */
+ ds->ops->phy_read = NULL;
+
+ dev_set_drvdata(&pdev->dev, priv);
+
spin_lock_init(&priv->indir_lock);
mutex_init(&priv->stats_mutex);
- /* All the interesting properties are at the parent device_node
- * level
- */
- dn = ds->cd->of_node->parent;
- bcm_sf2_identify_ports(priv, ds->cd->of_node);
+ bcm_sf2_identify_ports(priv, dn->child);
priv->irq0 = irq_of_parse_and_map(dn, 0);
priv->irq1 = irq_of_parse_and_map(dn, 1);
base = &priv->core;
for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
- *base = of_iomap(dn, i);
- if (*base == NULL) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ *base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(*base)) {
pr_err("unable to find register: %s\n", reg_names[i]);
- ret = -ENOMEM;
- goto out_unmap;
+ return PTR_ERR(*base);
}
base++;
}
@@ -1606,30 +1053,30 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
ret = bcm_sf2_sw_rst(priv);
if (ret) {
pr_err("unable to software reset switch: %d\n", ret);
- goto out_unmap;
+ return ret;
}
ret = bcm_sf2_mdio_register(ds);
if (ret) {
pr_err("failed to register MDIO bus\n");
- goto out_unmap;
+ return ret;
}
/* Disable all interrupts and request them */
bcm_sf2_intr_disable(priv);
- ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
- "switch_0", priv);
+ ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
+ "switch_0", priv);
if (ret < 0) {
pr_err("failed to request switch_0 IRQ\n");
goto out_mdio;
}
- ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
- "switch_1", priv);
+ ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
+ "switch_1", priv);
if (ret < 0) {
pr_err("failed to request switch_1 IRQ\n");
- goto out_free_irq0;
+ goto out_mdio;
}
/* Reset the MIB counters */
@@ -1649,19 +1096,6 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
&priv->hw_params.num_gphy))
priv->hw_params.num_gphy = 1;
- /* Enable all valid ports and disable those unused */
- for (port = 0; port < priv->hw_params.num_ports; port++) {
- /* IMP port receives special treatment */
- if ((1 << port) & ds->enabled_port_mask)
- bcm_sf2_port_setup(ds, port, NULL);
- else if (dsa_is_cpu_port(ds, port))
- bcm_sf2_imp_setup(ds, port);
- else
- bcm_sf2_port_disable(ds, port, NULL);
- }
-
- bcm_sf2_sw_configure_vlan(ds);
-
rev = reg_readl(priv, REG_SWITCH_REVISION);
priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
SWITCH_TOP_REV_MASK;
@@ -1670,6 +1104,10 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
rev = reg_readl(priv, REG_PHY_REVISION);
priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
+ ret = b53_switch_register(dev);
+ if (ret)
+ goto out_mdio;
+
pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
@@ -1677,66 +1115,60 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
return 0;
-out_free_irq0:
- free_irq(priv->irq0, priv);
out_mdio:
bcm_sf2_mdio_unregister(priv);
-out_unmap:
- base = &priv->core;
- for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
- if (*base)
- iounmap(*base);
- base++;
- }
return ret;
}
-static struct dsa_switch_driver bcm_sf2_switch_driver = {
- .tag_protocol = DSA_TAG_PROTO_BRCM,
- .probe = bcm_sf2_sw_drv_probe,
- .setup = bcm_sf2_sw_setup,
- .set_addr = bcm_sf2_sw_set_addr,
- .get_phy_flags = bcm_sf2_sw_get_phy_flags,
- .get_strings = bcm_sf2_sw_get_strings,
- .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
- .get_sset_count = bcm_sf2_sw_get_sset_count,
- .adjust_link = bcm_sf2_sw_adjust_link,
- .fixed_link_update = bcm_sf2_sw_fixed_link_update,
- .suspend = bcm_sf2_sw_suspend,
- .resume = bcm_sf2_sw_resume,
- .get_wol = bcm_sf2_sw_get_wol,
- .set_wol = bcm_sf2_sw_set_wol,
- .port_enable = bcm_sf2_port_setup,
- .port_disable = bcm_sf2_port_disable,
- .get_eee = bcm_sf2_sw_get_eee,
- .set_eee = bcm_sf2_sw_set_eee,
- .port_bridge_join = bcm_sf2_sw_br_join,
- .port_bridge_leave = bcm_sf2_sw_br_leave,
- .port_stp_state_set = bcm_sf2_sw_br_set_stp_state,
- .port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
- .port_fdb_add = bcm_sf2_sw_fdb_add,
- .port_fdb_del = bcm_sf2_sw_fdb_del,
- .port_fdb_dump = bcm_sf2_sw_fdb_dump,
- .port_vlan_filtering = bcm_sf2_sw_vlan_filtering,
- .port_vlan_prepare = bcm_sf2_sw_vlan_prepare,
- .port_vlan_add = bcm_sf2_sw_vlan_add,
- .port_vlan_del = bcm_sf2_sw_vlan_del,
- .port_vlan_dump = bcm_sf2_sw_vlan_dump,
-};
-
-static int __init bcm_sf2_init(void)
+static int bcm_sf2_sw_remove(struct platform_device *pdev)
{
- register_switch_driver(&bcm_sf2_switch_driver);
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+ /* Disable all ports and interrupts */
+ priv->wol_ports_mask = 0;
+ bcm_sf2_sw_suspend(priv->dev->ds);
+ dsa_unregister_switch(priv->dev->ds);
+ bcm_sf2_mdio_unregister(priv);
return 0;
}
-module_init(bcm_sf2_init);
-static void __exit bcm_sf2_exit(void)
+#ifdef CONFIG_PM_SLEEP
+static int bcm_sf2_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+ return dsa_switch_suspend(priv->dev->ds);
+}
+
+static int bcm_sf2_resume(struct device *dev)
{
- unregister_switch_driver(&bcm_sf2_switch_driver);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+ return dsa_switch_resume(priv->dev->ds);
}
-module_exit(bcm_sf2_exit);
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
+ bcm_sf2_suspend, bcm_sf2_resume);
+
+static const struct of_device_id bcm_sf2_of_match[] = {
+ { .compatible = "brcm,bcm7445-switch-v4.0" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver bcm_sf2_driver = {
+ .probe = bcm_sf2_sw_probe,
+ .remove = bcm_sf2_sw_remove,
+ .driver = {
+ .name = "brcm-sf2",
+ .of_match_table = bcm_sf2_of_match,
+ .pm = &bcm_sf2_pm_ops,
+ },
+};
+module_platform_driver(bcm_sf2_driver);
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index dd446e466699..44692673e1d5 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -26,6 +26,7 @@
#include <net/dsa.h>
#include "bcm_sf2_regs.h"
+#include "b53/b53_priv.h"
struct bcm_sf2_hw_params {
u16 top_rev;
@@ -49,72 +50,8 @@ struct bcm_sf2_port_status {
unsigned int link;
struct ethtool_eee eee;
-
- u32 vlan_ctl_mask;
- u16 pvid;
-
- struct net_device *bridge_dev;
-};
-
-struct bcm_sf2_arl_entry {
- u8 port;
- u8 mac[ETH_ALEN];
- u16 vid;
- u8 is_valid:1;
- u8 is_age:1;
- u8 is_static:1;
};
-struct bcm_sf2_vlan {
- u16 members;
- u16 untag;
-};
-
-static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst)
-{
- unsigned int i;
-
- for (i = 0; i < ETH_ALEN; i++)
- dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff;
-}
-
-static inline u64 bcm_sf2_mac_to_u64(const u8 *src)
-{
- unsigned int i;
- u64 dst = 0;
-
- for (i = 0; i < ETH_ALEN; i++)
- dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i);
-
- return dst;
-}
-
-static inline void bcm_sf2_arl_to_entry(struct bcm_sf2_arl_entry *ent,
- u64 mac_vid, u32 fwd_entry)
-{
- memset(ent, 0, sizeof(*ent));
- ent->port = fwd_entry & PORTID_MASK;
- ent->is_valid = !!(fwd_entry & ARL_VALID);
- ent->is_age = !!(fwd_entry & ARL_AGE);
- ent->is_static = !!(fwd_entry & ARL_STATIC);
- bcm_sf2_mac_from_u64(mac_vid, ent->mac);
- ent->vid = mac_vid >> VID_SHIFT;
-}
-
-static inline void bcm_sf2_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
- const struct bcm_sf2_arl_entry *ent)
-{
- *mac_vid = bcm_sf2_mac_to_u64(ent->mac);
- *mac_vid |= (u64)(ent->vid & VID_MASK) << VID_SHIFT;
- *fwd_entry = ent->port & PORTID_MASK;
- if (ent->is_valid)
- *fwd_entry |= ARL_VALID;
- if (ent->is_static)
- *fwd_entry |= ARL_STATIC;
- if (ent->is_age)
- *fwd_entry |= ARL_AGE;
-}
-
struct bcm_sf2_priv {
/* Base registers, keep those in order with BCM_SF2_REGS_NAME */
void __iomem *core;
@@ -134,6 +71,9 @@ struct bcm_sf2_priv {
u32 irq1_stat;
u32 irq1_mask;
+ /* Backing b53_device */
+ struct b53_device *dev;
+
/* Mutex protecting access to the MIB counters */
struct mutex stats_mutex;
@@ -155,16 +95,14 @@ struct bcm_sf2_priv {
struct device_node *master_mii_dn;
struct mii_bus *slave_mii_bus;
struct mii_bus *master_mii_bus;
-
- /* Cache of programmed VLANs */
- struct bcm_sf2_vlan vlans[VLAN_N_VID];
};
-struct bcm_sf2_hw_stats {
- const char *string;
- u16 reg;
- u8 sizeof_stat;
-};
+static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
+{
+ struct b53_device *dev = ds->priv;
+
+ return dev->priv;
+}
#define SF2_IO_MACRO(name) \
static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 9f2a9cb42074..838fe373cd6f 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -115,14 +115,6 @@
#define RX_BCST_EN (1 << 2)
#define RX_MCST_EN (1 << 3)
#define RX_UCST_EN (1 << 4)
-#define G_MISTP_STATE_SHIFT 5
-#define G_MISTP_NO_STP (0 << G_MISTP_STATE_SHIFT)
-#define G_MISTP_DIS_STATE (1 << G_MISTP_STATE_SHIFT)
-#define G_MISTP_BLOCK_STATE (2 << G_MISTP_STATE_SHIFT)
-#define G_MISTP_LISTEN_STATE (3 << G_MISTP_STATE_SHIFT)
-#define G_MISTP_LEARN_STATE (4 << G_MISTP_STATE_SHIFT)
-#define G_MISTP_FWD_STATE (5 << G_MISTP_STATE_SHIFT)
-#define G_MISTP_STATE_MASK 0x7
#define CORE_SWMODE 0x0002c
#define SW_FWDG_MODE (1 << 0)
@@ -205,75 +197,11 @@
#define BRCM_HDR_EN_P5 (1 << 1)
#define BRCM_HDR_EN_P7 (1 << 2)
-#define CORE_BRCM_HDR_CTRL2 0x0828
-
-#define CORE_HL_PRTC_CTRL 0x0940
-#define ARP_EN (1 << 0)
-#define RARP_EN (1 << 1)
-#define DHCP_EN (1 << 2)
-#define ICMPV4_EN (1 << 3)
-#define ICMPV6_EN (1 << 4)
-#define ICMPV6_FWD_MODE (1 << 5)
-#define IGMP_DIP_EN (1 << 8)
-#define IGMP_RPTLVE_EN (1 << 9)
-#define IGMP_RTPLVE_FWD_MODE (1 << 10)
-#define IGMP_QRY_EN (1 << 11)
-#define IGMP_QRY_FWD_MODE (1 << 12)
-#define IGMP_UKN_EN (1 << 13)
-#define IGMP_UKN_FWD_MODE (1 << 14)
-#define MLD_RPTDONE_EN (1 << 15)
-#define MLD_RPTDONE_FWD_MODE (1 << 16)
-#define MLD_QRY_EN (1 << 17)
-#define MLD_QRY_FWD_MODE (1 << 18)
-
#define CORE_RST_MIB_CNT_EN 0x0950
#define CORE_BRCM_HDR_RX_DIS 0x0980
#define CORE_BRCM_HDR_TX_DIS 0x0988
-#define CORE_ARLA_NUM_ENTRIES 1024
-
-#define CORE_ARLA_RWCTL 0x1400
-#define ARL_RW (1 << 0)
-#define IVL_SVL_SELECT (1 << 6)
-#define ARL_STRTDN (1 << 7)
-
-#define CORE_ARLA_MAC 0x1408
-#define CORE_ARLA_VID 0x1420
-#define ARLA_VIDTAB_INDX_MASK 0x1fff
-
-#define CORE_ARLA_MACVID0 0x1440
-#define MAC_MASK 0xffffffffff
-#define VID_SHIFT 48
-#define VID_MASK 0xfff
-
-#define CORE_ARLA_FWD_ENTRY0 0x1460
-#define PORTID_MASK 0x1ff
-#define ARL_CON_SHIFT 9
-#define ARL_CON_MASK 0x3
-#define ARL_PRI_SHIFT 11
-#define ARL_PRI_MASK 0x7
-#define ARL_AGE (1 << 14)
-#define ARL_STATIC (1 << 15)
-#define ARL_VALID (1 << 16)
-
-#define CORE_ARLA_MACVID_ENTRY(x) (CORE_ARLA_MACVID0 + ((x) * 0x40))
-#define CORE_ARLA_FWD_ENTRY(x) (CORE_ARLA_FWD_ENTRY0 + ((x) * 0x40))
-
-#define CORE_ARLA_SRCH_CTL 0x1540
-#define ARLA_SRCH_VLID (1 << 0)
-#define IVL_SVL_SELECT (1 << 6)
-#define ARLA_SRCH_STDN (1 << 7)
-
-#define CORE_ARLA_SRCH_ADR 0x1544
-#define ARLA_SRCH_ADR_VALID (1 << 15)
-
-#define CORE_ARLA_SRCH_RSLT_0_MACVID 0x1580
-#define CORE_ARLA_SRCH_RSLT_0 0x15a0
-
-#define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40))
-#define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40))
-
#define CORE_ARLA_VTBL_RWCTRL 0x1600
#define ARLA_VTBL_CMD_WRITE 0
#define ARLA_VTBL_CMD_READ 1
@@ -297,59 +225,9 @@
#define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \
((x) * P_TXQ_PSM_VDD_SHIFT))
-#define CORE_P0_MIB_OFFSET 0x8000
-#define P_MIB_SIZE 0x400
-#define CORE_P_MIB_OFFSET(x) (CORE_P0_MIB_OFFSET + (x) * P_MIB_SIZE)
-
#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8))
#define PORT_VLAN_CTRL_MASK 0x1ff
-#define CORE_VLAN_CTRL0 0xd000
-#define CHANGE_1P_VID_INNER (1 << 0)
-#define CHANGE_1P_VID_OUTER (1 << 1)
-#define CHANGE_1Q_VID (1 << 3)
-#define VLAN_LEARN_MODE_SVL (0 << 5)
-#define VLAN_LEARN_MODE_IVL (3 << 5)
-#define VLAN_EN (1 << 7)
-
-#define CORE_VLAN_CTRL1 0xd004
-#define EN_RSV_MCAST_FWDMAP (1 << 2)
-#define EN_RSV_MCAST_UNTAG (1 << 3)
-#define EN_IPMC_BYPASS_FWDMAP (1 << 5)
-#define EN_IPMC_BYPASS_UNTAG (1 << 6)
-
-#define CORE_VLAN_CTRL2 0xd008
-#define EN_MIIM_BYPASS_V_FWDMAP (1 << 2)
-#define EN_GMRP_GVRP_V_FWDMAP (1 << 5)
-#define EN_GMRP_GVRP_UNTAG_MAP (1 << 6)
-
-#define CORE_VLAN_CTRL3 0xd00c
-#define EN_DROP_NON1Q_MASK 0x1ff
-
-#define CORE_VLAN_CTRL4 0xd014
-#define RESV_MCAST_FLOOD (1 << 1)
-#define EN_DOUBLE_TAG_MASK 0x3
-#define EN_DOUBLE_TAG_SHIFT 2
-#define EN_MGE_REV_GMRP (1 << 4)
-#define EN_MGE_REV_GVRP (1 << 5)
-#define INGR_VID_CHK_SHIFT 6
-#define INGR_VID_CHK_MASK 0x3
-#define INGR_VID_CHK_FWD (0 << INGR_VID_CHK_SHIFT)
-#define INGR_VID_CHK_DROP (1 << INGR_VID_CHK_SHIFT)
-#define INGR_VID_CHK_NO_CHK (2 << INGR_VID_CHK_SHIFT)
-#define INGR_VID_CHK_VID_VIOL_IMP (3 << INGR_VID_CHK_SHIFT)
-
-#define CORE_VLAN_CTRL5 0xd018
-#define EN_CPU_RX_BYP_INNER_CRCCHCK (1 << 0)
-#define EN_VID_FFF_FWD (1 << 2)
-#define DROP_VTABLE_MISS (1 << 3)
-#define EGRESS_DIR_FRM_BYP_TRUNK_EN (1 << 4)
-#define PRESV_NON1Q (1 << 6)
-
-#define CORE_VLAN_CTRL6 0xd01c
-#define STRICT_SFD_DETECT (1 << 0)
-#define DIS_ARL_BUST_LMIT (1 << 4)
-
#define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8))
#define CFI_SHIFT 12
#define PRI_SHIFT 13
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index e36b40886bd8..7ce36dbd9b62 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -19,7 +19,7 @@
static int reg_read(struct dsa_switch *ds, int addr, int reg)
{
- struct mv88e6060_priv *priv = ds_to_priv(ds);
+ struct mv88e6060_priv *priv = ds->priv;
return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg);
}
@@ -37,7 +37,7 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg)
static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
{
- struct mv88e6060_priv *priv = ds_to_priv(ds);
+ struct mv88e6060_priv *priv = ds->priv;
return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val);
}
@@ -69,6 +69,11 @@ static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
return NULL;
}
+static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds)
+{
+ return DSA_TAG_PROTO_TRAILER;
+}
+
static const char *mv88e6060_drv_probe(struct device *dsa_dev,
struct device *host_dev, int sw_addr,
void **_priv)
@@ -247,8 +252,8 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
return reg_write(ds, addr, regnum, val);
}
-static struct dsa_switch_driver mv88e6060_switch_driver = {
- .tag_protocol = DSA_TAG_PROTO_TRAILER,
+static struct dsa_switch_ops mv88e6060_switch_ops = {
+ .get_tag_protocol = mv88e6060_get_tag_protocol,
.probe = mv88e6060_drv_probe,
.setup = mv88e6060_setup,
.set_addr = mv88e6060_set_addr,
@@ -258,14 +263,14 @@ static struct dsa_switch_driver mv88e6060_switch_driver = {
static int __init mv88e6060_init(void)
{
- register_switch_driver(&mv88e6060_switch_driver);
+ register_switch_driver(&mv88e6060_switch_ops);
return 0;
}
module_init(mv88e6060_init);
static void __exit mv88e6060_cleanup(void)
{
- unregister_switch_driver(&mv88e6060_switch_driver);
+ unregister_switch_driver(&mv88e6060_switch_ops);
}
module_exit(mv88e6060_cleanup);
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index 490bc06f993e..486668813e15 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -2,6 +2,18 @@ config NET_DSA_MV88E6XXX
tristate "Marvell 88E6xxx Ethernet switch fabric support"
depends on NET_DSA
select NET_DSA_TAG_EDSA
+ select NET_DSA_TAG_DSA
help
This driver adds support for most of the Marvell 88E6xxx models of
Ethernet switch chips, except 88E6060.
+
+config NET_DSA_MV88E6XXX_GLOBAL2
+ bool "Switch Global 2 Registers support"
+ default y
+ depends on NET_DSA_MV88E6XXX
+ help
+ This registers set at internal SMI address 0x1C provides extended
+ features like EEPROM interface, trunking, cross-chip setup, etc.
+
+ It is required on most chips. If the chip you compile the support for
+ doesn't have such registers set, say N here. In doubt, say Y.
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 6e29a75ee2f7..10ce820daa48 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -1 +1,4 @@
-obj-$(CONFIG_NET_DSA_MV88E6XXX) += chip.o
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
+mv88e6xxx-objs := chip.o
+mv88e6xxx-objs += global1.o
+mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 710679067594..883fd9809dd2 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -29,7 +29,10 @@
#include <linux/phy.h>
#include <net/dsa.h>
#include <net/switchdev.h>
+
#include "mv88e6xxx.h"
+#include "global1.h"
+#include "global2.h"
static void assert_reg_lock(struct mv88e6xxx_chip *chip)
{
@@ -95,7 +98,7 @@ static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_chip *chip,
return 0;
}
-static const struct mv88e6xxx_ops mv88e6xxx_smi_single_chip_ops = {
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_single_chip_ops = {
.read = mv88e6xxx_smi_single_chip_read,
.write = mv88e6xxx_smi_single_chip_write,
};
@@ -177,13 +180,12 @@ static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_chip *chip,
return 0;
}
-static const struct mv88e6xxx_ops mv88e6xxx_smi_multi_chip_ops = {
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_multi_chip_ops = {
.read = mv88e6xxx_smi_multi_chip_read,
.write = mv88e6xxx_smi_multi_chip_write,
};
-static int mv88e6xxx_read(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 *val)
+int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val)
{
int err;
@@ -199,8 +201,7 @@ static int mv88e6xxx_read(struct mv88e6xxx_chip *chip,
return 0;
}
-static int mv88e6xxx_write(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 val)
+int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
{
int err;
@@ -216,89 +217,172 @@ static int mv88e6xxx_write(struct mv88e6xxx_chip *chip,
return 0;
}
-/* Indirect write to single pointer-data register with an Update bit */
-static int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 update)
+static int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 *val)
{
- u16 val;
- int i, err;
+ int addr = chip->info->port_base_addr + port;
- /* Wait until the previous operation is completed */
- for (i = 0; i < 16; ++i) {
- err = mv88e6xxx_read(chip, addr, reg, &val);
- if (err)
- return err;
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
- if (!(val & BIT(15)))
- break;
+static int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
+ int reg, u16 *val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+
+ if (!chip->info->ops->phy_read)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_read(chip, addr, reg, val);
+}
+
+static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
+ int reg, u16 val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+
+ if (!chip->info->ops->phy_write)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_write(chip, addr, reg, val);
+}
+
+static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
+{
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_PHY_PAGE))
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page);
+}
+
+static void mv88e6xxx_phy_page_put(struct mv88e6xxx_chip *chip, int phy)
+{
+ int err;
+
+ /* Restore PHY page Copper 0x0 for access via the registered MDIO bus */
+ err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, PHY_PAGE_COPPER);
+ if (unlikely(err)) {
+ dev_err(chip->dev, "failed to restore PHY %d page Copper (%d)\n",
+ phy, err);
}
+}
+
+static int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 *val)
+{
+ int err;
- if (i == 16)
- return -ETIMEDOUT;
+ /* There is no paging for registers 22 */
+ if (reg == PHY_PAGE)
+ return -EINVAL;
- /* Set the Update bit to trigger a write operation */
- val = BIT(15) | update;
+ err = mv88e6xxx_phy_page_get(chip, phy, page);
+ if (!err) {
+ err = mv88e6xxx_phy_read(chip, phy, reg, val);
+ mv88e6xxx_phy_page_put(chip, phy);
+ }
- return mv88e6xxx_write(chip, addr, reg, val);
+ return err;
}
-static int _mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg)
+static int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 val)
{
- u16 val;
int err;
- err = mv88e6xxx_read(chip, addr, reg, &val);
- if (err)
- return err;
+ /* There is no paging for registers 22 */
+ if (reg == PHY_PAGE)
+ return -EINVAL;
- return val;
+ err = mv88e6xxx_phy_page_get(chip, phy, page);
+ if (!err) {
+ err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page);
+ mv88e6xxx_phy_page_put(chip, phy);
+ }
+
+ return err;
}
-static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr,
- int reg, u16 val)
+static int mv88e6xxx_serdes_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
{
- return mv88e6xxx_write(chip, addr, reg, val);
+ return mv88e6xxx_phy_page_read(chip, ADDR_SERDES, SERDES_PAGE_FIBER,
+ reg, val);
}
-static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_chip *chip,
- int addr, int regnum)
+static int mv88e6xxx_serdes_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
{
- if (addr >= 0)
- return _mv88e6xxx_reg_read(chip, addr, regnum);
- return 0xffff;
+ return mv88e6xxx_phy_page_write(chip, ADDR_SERDES, SERDES_PAGE_FIBER,
+ reg, val);
}
-static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_chip *chip,
- int addr, int regnum, u16 val)
+int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
{
- if (addr >= 0)
- return _mv88e6xxx_reg_write(chip, addr, regnum, val);
- return 0;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_read(chip, addr, reg, &val);
+ if (err)
+ return err;
+
+ if (!(val & mask))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_err(chip->dev, "Timeout while waiting for switch\n");
+ return -ETIMEDOUT;
+}
+
+/* Indirect write to single pointer-data register with an Update bit */
+int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update)
+{
+ u16 val;
+ int err;
+
+ /* Wait until the previous operation is completed */
+ err = mv88e6xxx_wait(chip, addr, reg, BIT(15));
+ if (err)
+ return err;
+
+ /* Set the Update bit to trigger a write operation */
+ val = BIT(15) | update;
+
+ return mv88e6xxx_write(chip, addr, reg, val);
}
static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
{
- int ret;
- unsigned long timeout;
+ u16 val;
+ int i, err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
- ret & ~GLOBAL_CONTROL_PPU_ENABLE);
- if (ret)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL,
+ val & ~GLOBAL_CONTROL_PPU_ENABLE);
+ if (err)
+ return err;
- timeout = jiffies + 1 * HZ;
- while (time_before(jiffies, timeout)) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
- if (ret < 0)
- return ret;
+ for (i = 0; i < 16; i++) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
+ if (err)
+ return err;
usleep_range(1000, 2000);
- if ((ret & GLOBAL_STATUS_PPU_MASK) !=
- GLOBAL_STATUS_PPU_POLLING)
+ if ((val & GLOBAL_STATUS_PPU_MASK) != GLOBAL_STATUS_PPU_POLLING)
return 0;
}
@@ -307,27 +391,25 @@ static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip)
{
- int ret, err;
- unsigned long timeout;
+ u16 val;
+ int i, err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
- ret | GLOBAL_CONTROL_PPU_ENABLE);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL,
+ val | GLOBAL_CONTROL_PPU_ENABLE);
if (err)
return err;
- timeout = jiffies + 1 * HZ;
- while (time_before(jiffies, timeout)) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
- if (ret < 0)
- return ret;
+ for (i = 0; i < 16; i++) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
+ if (err)
+ return err;
usleep_range(1000, 2000);
- if ((ret & GLOBAL_STATUS_PPU_MASK) ==
- GLOBAL_STATUS_PPU_POLLING)
+ if ((val & GLOBAL_STATUS_PPU_MASK) == GLOBAL_STATUS_PPU_POLLING)
return 0;
}
@@ -400,32 +482,37 @@ static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip)
chip->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
}
-static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_chip *chip, int addr,
- int regnum)
+static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip)
{
- int ret;
+ del_timer_sync(&chip->ppu_timer);
+}
+
+static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr,
+ int reg, u16 *val)
+{
+ int err;
- ret = mv88e6xxx_ppu_access_get(chip);
- if (ret >= 0) {
- ret = _mv88e6xxx_reg_read(chip, addr, regnum);
+ err = mv88e6xxx_ppu_access_get(chip);
+ if (!err) {
+ err = mv88e6xxx_read(chip, addr, reg, val);
mv88e6xxx_ppu_access_put(chip);
}
- return ret;
+ return err;
}
-static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_chip *chip, int addr,
- int regnum, u16 val)
+static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr,
+ int reg, u16 val)
{
- int ret;
+ int err;
- ret = mv88e6xxx_ppu_access_get(chip);
- if (ret >= 0) {
- ret = _mv88e6xxx_reg_write(chip, addr, regnum, val);
+ err = mv88e6xxx_ppu_access_get(chip);
+ if (!err) {
+ err = mv88e6xxx_write(chip, addr, reg, val);
mv88e6xxx_ppu_access_put(chip);
}
- return ret;
+ return err;
}
static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip)
@@ -468,21 +555,6 @@ static bool mv88e6xxx_6352_family(struct mv88e6xxx_chip *chip)
return chip->info->family == MV88E6XXX_FAMILY_6352;
}
-static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
-{
- return chip->info->num_databases;
-}
-
-static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_chip *chip)
-{
- /* Does the device have dedicated FID registers for ATU and VTU ops? */
- if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
- mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip))
- return true;
-
- return false;
-}
-
/* We expect the switch to perform auto negotiation if there is a real
* phy. However, in the case of a fixed link phy, we force the port
* settings from the fixed link settings.
@@ -490,24 +562,24 @@ static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- u32 reg;
- int ret;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 reg;
+ int err;
if (!phy_is_pseudo_fixed_link(phydev))
return;
mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
+ if (err)
goto out;
- reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
- PORT_PCS_CTRL_FORCE_LINK |
- PORT_PCS_CTRL_DUPLEX_FULL |
- PORT_PCS_CTRL_FORCE_DUPLEX |
- PORT_PCS_CTRL_UNFORCED);
+ reg &= ~(PORT_PCS_CTRL_LINK_UP |
+ PORT_PCS_CTRL_FORCE_LINK |
+ PORT_PCS_CTRL_DUPLEX_FULL |
+ PORT_PCS_CTRL_FORCE_DUPLEX |
+ PORT_PCS_CTRL_UNFORCED);
reg |= PORT_PCS_CTRL_FORCE_LINK;
if (phydev->link)
@@ -536,7 +608,7 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
reg |= PORT_PCS_CTRL_DUPLEX_FULL;
if ((mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip)) &&
- (port >= chip->info->num_ports - 2)) {
+ (port >= mv88e6xxx_num_ports(chip) - 2)) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
@@ -545,7 +617,7 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
}
- _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PCS_CTRL, reg);
+ mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
out:
mutex_unlock(&chip->reg_lock);
@@ -553,12 +625,12 @@ out:
static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip)
{
- int ret;
- int i;
+ u16 val;
+ int i, err;
for (i = 0; i < 10; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_OP);
- if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_OP, &val);
+ if ((val & GLOBAL_STATS_OP_BUSY) == 0)
return 0;
}
@@ -567,55 +639,52 @@ static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip)
static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
- int ret;
+ int err;
if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_CAPTURE_PORT |
- GLOBAL_STATS_OP_HIST_RX_TX | port);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_CAPTURE_PORT |
+ GLOBAL_STATS_OP_HIST_RX_TX | port);
+ if (err)
+ return err;
/* Wait for the snapshotting to complete. */
- ret = _mv88e6xxx_stats_wait(chip);
- if (ret < 0)
- return ret;
-
- return 0;
+ return _mv88e6xxx_stats_wait(chip);
}
static void _mv88e6xxx_stats_read(struct mv88e6xxx_chip *chip,
int stat, u32 *val)
{
- u32 _val;
- int ret;
+ u32 value;
+ u16 reg;
+ int err;
*val = 0;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_READ_CAPTURED |
- GLOBAL_STATS_OP_HIST_RX_TX | stat);
- if (ret < 0)
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_READ_CAPTURED |
+ GLOBAL_STATS_OP_HIST_RX_TX | stat);
+ if (err)
return;
- ret = _mv88e6xxx_stats_wait(chip);
- if (ret < 0)
+ err = _mv88e6xxx_stats_wait(chip);
+ if (err)
return;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
- if (ret < 0)
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_32, &reg);
+ if (err)
return;
- _val = ret << 16;
+ value = reg << 16;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
- if (ret < 0)
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_01, &reg);
+ if (err)
return;
- *val = _val | ret;
+ *val = value | reg;
}
static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
@@ -705,22 +774,22 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
{
u32 low;
u32 high = 0;
- int ret;
+ int err;
+ u16 reg;
u64 value;
switch (s->type) {
case PORT:
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), s->reg);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, s->reg, &reg);
+ if (err)
return UINT64_MAX;
- low = ret;
+ low = reg;
if (s->sizeof_stat == 4) {
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port),
- s->reg + 1);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
+ if (err)
return UINT64_MAX;
- high = ret;
+ high = reg;
}
break;
case BANK0:
@@ -736,7 +805,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
uint8_t *data)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_hw_stat *stat;
int i, j;
@@ -752,7 +821,7 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_hw_stat *stat;
int i, j;
@@ -767,7 +836,7 @@ static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_hw_stat *stat;
int ret;
int i, j;
@@ -798,7 +867,9 @@ static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *_p)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+ u16 reg;
u16 *p = _p;
int i;
@@ -809,170 +880,106 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
mutex_lock(&chip->reg_lock);
for (i = 0; i < 32; i++) {
- int ret;
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), i);
- if (ret >= 0)
- p[i] = ret;
+ err = mv88e6xxx_port_read(chip, port, i, &reg);
+ if (!err)
+ p[i] = reg;
}
mutex_unlock(&chip->reg_lock);
}
-static int _mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg, int offset,
- u16 mask)
-{
- unsigned long timeout = jiffies + HZ / 10;
-
- while (time_before(jiffies, timeout)) {
- int ret;
-
- ret = _mv88e6xxx_reg_read(chip, reg, offset);
- if (ret < 0)
- return ret;
- if (!(ret & mask))
- return 0;
-
- usleep_range(1000, 2000);
- }
- return -ETIMEDOUT;
-}
-
-static int mv88e6xxx_mdio_wait(struct mv88e6xxx_chip *chip)
-{
- return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
- GLOBAL2_SMI_OP_BUSY);
-}
-
static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip)
{
- return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP,
- GLOBAL_ATU_OP_BUSY);
-}
-
-static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_chip *chip,
- int addr, int regnum)
-{
- int ret;
-
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
- GLOBAL2_SMI_OP_22_READ | (addr << 5) |
- regnum);
- if (ret < 0)
- return ret;
-
- ret = mv88e6xxx_mdio_wait(chip);
- if (ret < 0)
- return ret;
-
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA);
-
- return ret;
-}
-
-static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_chip *chip,
- int addr, int regnum, u16 val)
-{
- int ret;
-
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
- if (ret < 0)
- return ret;
-
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
- GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
- regnum);
-
- return mv88e6xxx_mdio_wait(chip);
+ return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
}
static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- int reg;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 reg;
+ int err;
if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE))
return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
- reg = mv88e6xxx_mdio_read_indirect(chip, port, 16);
- if (reg < 0)
+ err = mv88e6xxx_phy_read(chip, port, 16, &reg);
+ if (err)
goto out;
e->eee_enabled = !!(reg & 0x0200);
e->tx_lpi_enabled = !!(reg & 0x0100);
- reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS);
- if (reg < 0)
+ err = mv88e6xxx_port_read(chip, port, PORT_STATUS, &reg);
+ if (err)
goto out;
e->eee_active = !!(reg & PORT_STATUS_EEE);
- reg = 0;
-
out:
mutex_unlock(&chip->reg_lock);
- return reg;
+
+ return err;
}
static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
struct phy_device *phydev, struct ethtool_eee *e)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- int reg;
- int ret;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 reg;
+ int err;
if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE))
return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
- ret = mv88e6xxx_mdio_read_indirect(chip, port, 16);
- if (ret < 0)
+ err = mv88e6xxx_phy_read(chip, port, 16, &reg);
+ if (err)
goto out;
- reg = ret & ~0x0300;
+ reg &= ~0x0300;
if (e->eee_enabled)
reg |= 0x0200;
if (e->tx_lpi_enabled)
reg |= 0x0100;
- ret = mv88e6xxx_mdio_write_indirect(chip, port, 16, reg);
+ err = mv88e6xxx_phy_write(chip, port, 16, reg);
out:
mutex_unlock(&chip->reg_lock);
- return ret;
+ return err;
}
static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd)
{
- int ret;
+ u16 val;
+ int err;
- if (mv88e6xxx_has_fid_reg(chip)) {
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_FID,
- fid);
- if (ret < 0)
- return ret;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_ATU_FID)) {
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid);
+ if (err)
+ return err;
} else if (mv88e6xxx_num_databases(chip) == 256) {
/* ATU DBNum[7:4] are located in ATU Control 15:12 */
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
- (ret & 0xfff) |
- ((fid << 8) & 0xf000));
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
+ (val & 0xfff) | ((fid << 8) & 0xf000));
+ if (err)
+ return err;
/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
cmd |= fid & 0xf;
}
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, cmd);
+ if (err)
+ return err;
return _mv88e6xxx_atu_wait(chip);
}
@@ -997,7 +1004,7 @@ static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip,
data |= (entry->portv_trunkid << shift) & mask;
}
- return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_DATA, data);
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
}
static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip,
@@ -1073,57 +1080,45 @@ static int _mv88e6xxx_port_state(struct mv88e6xxx_chip *chip, int port,
u8 state)
{
struct dsa_switch *ds = chip->ds;
- int reg, ret = 0;
+ u16 reg;
+ int err;
u8 oldstate;
- reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL);
- if (reg < 0)
- return reg;
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, &reg);
+ if (err)
+ return err;
oldstate = reg & PORT_CONTROL_STATE_MASK;
- if (oldstate != state) {
- /* Flush forwarding database if we're moving a port
- * from Learning or Forwarding state to Disabled or
- * Blocking or Listening state.
- */
- if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
- oldstate == PORT_CONTROL_STATE_FORWARDING) &&
- (state == PORT_CONTROL_STATE_DISABLED ||
- state == PORT_CONTROL_STATE_BLOCKING)) {
- ret = _mv88e6xxx_atu_remove(chip, 0, port, false);
- if (ret)
- return ret;
- }
+ reg &= ~PORT_CONTROL_STATE_MASK;
+ reg |= state;
- reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL,
- reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ if (err)
+ return err;
- netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n",
- mv88e6xxx_port_state_names[state],
- mv88e6xxx_port_state_names[oldstate]);
- }
+ netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n",
+ mv88e6xxx_port_state_names[state],
+ mv88e6xxx_port_state_names[oldstate]);
- return ret;
+ return 0;
}
static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
{
struct net_device *bridge = chip->ports[port].bridge_dev;
- const u16 mask = (1 << chip->info->num_ports) - 1;
+ const u16 mask = (1 << mv88e6xxx_num_ports(chip)) - 1;
struct dsa_switch *ds = chip->ds;
u16 output_ports = 0;
- int reg;
+ u16 reg;
+ int err;
int i;
/* allow CPU port or DSA link(s) to send frames to every port */
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
output_ports = mask;
} else {
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
/* allow sending frames to every group member */
if (bridge && chip->ports[i].bridge_dev == bridge)
output_ports |= BIT(i);
@@ -1137,20 +1132,20 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
/* prevent frames from going back out of the port they came in on */
output_ports &= ~BIT(port);
- reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
- if (reg < 0)
- return reg;
+ err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
reg &= ~mask;
reg |= output_ports & mask;
- return _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, reg);
+ return mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
}
static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
int stp_state;
int err;
@@ -1181,27 +1176,39 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
mv88e6xxx_port_state_names[stp_state]);
}
+static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = _mv88e6xxx_atu_remove(chip, 0, port, false);
+ mutex_unlock(&chip->reg_lock);
+
+ if (err)
+ netdev_err(ds->ports[port].netdev, "failed to flush ATU\n");
+}
+
static int _mv88e6xxx_port_pvid(struct mv88e6xxx_chip *chip, int port,
u16 *new, u16 *old)
{
struct dsa_switch *ds = chip->ds;
- u16 pvid;
- int ret;
+ u16 pvid, reg;
+ int err;
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_DEFAULT_VLAN);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, port, PORT_DEFAULT_VLAN, &reg);
+ if (err)
+ return err;
- pvid = ret & PORT_DEFAULT_VLAN_MASK;
+ pvid = reg & PORT_DEFAULT_VLAN_MASK;
if (new) {
- ret &= ~PORT_DEFAULT_VLAN_MASK;
- ret |= *new & PORT_DEFAULT_VLAN_MASK;
+ reg &= ~PORT_DEFAULT_VLAN_MASK;
+ reg |= *new & PORT_DEFAULT_VLAN_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_DEFAULT_VLAN, ret);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, reg);
+ if (err)
+ return err;
netdev_dbg(ds->ports[port].netdev,
"DefaultVID %d (was %d)\n", *new, pvid);
@@ -1227,17 +1234,16 @@ static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_chip *chip,
static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip)
{
- return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP,
- GLOBAL_VTU_OP_BUSY);
+ return mv88e6xxx_g1_wait(chip, GLOBAL_VTU_OP, GLOBAL_VTU_OP_BUSY);
}
static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op)
{
- int ret;
+ int err;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_OP, op);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_OP, op);
+ if (err)
+ return err;
return _mv88e6xxx_vtu_wait(chip);
}
@@ -1254,23 +1260,21 @@ static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_chip *chip)
}
static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry,
+ struct mv88e6xxx_vtu_entry *entry,
unsigned int nibble_offset)
{
u16 regs[3];
- int i;
- int ret;
+ int i, err;
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_DATA_0_3 + i);
- if (ret < 0)
- return ret;
+ u16 *reg = &regs[i];
- regs[i] = ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ if (err)
+ return err;
}
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u16 reg = regs[i / 4];
@@ -1281,26 +1285,25 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
}
static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_read(chip, entry, 0);
}
static int mv88e6xxx_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_read(chip, entry, 2);
}
static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry,
+ struct mv88e6xxx_vtu_entry *entry,
unsigned int nibble_offset)
{
u16 regs[3] = { 0 };
- int i;
- int ret;
+ int i, err;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u8 data = entry->data[i];
@@ -1308,86 +1311,85 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip,
}
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL,
- GLOBAL_VTU_DATA_0_3 + i, regs[i]);
- if (ret < 0)
- return ret;
+ u16 reg = regs[i];
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ if (err)
+ return err;
}
return 0;
}
static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_write(chip, entry, 0);
}
static int mv88e6xxx_stu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_write(chip, entry, 2);
}
static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_chip *chip, u16 vid)
{
- return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID,
- vid & GLOBAL_VTU_VID_MASK);
+ return mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID,
+ vid & GLOBAL_VTU_VID_MASK);
}
static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
- struct mv88e6xxx_vtu_stu_entry next = { 0 };
- int ret;
+ struct mv88e6xxx_vtu_entry next = { 0 };
+ u16 val;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
- ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val);
+ if (err)
+ return err;
- next.vid = ret & GLOBAL_VTU_VID_MASK;
- next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+ next.vid = val & GLOBAL_VTU_VID_MASK;
+ next.valid = !!(val & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = mv88e6xxx_vtu_data_read(chip, &next);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_vtu_data_read(chip, &next);
+ if (err)
+ return err;
- if (mv88e6xxx_has_fid_reg(chip)) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_FID);
- if (ret < 0)
- return ret;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_VTU_FID)) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_FID, &val);
+ if (err)
+ return err;
- next.fid = ret & GLOBAL_VTU_FID_MASK;
+ next.fid = val & GLOBAL_VTU_FID_MASK;
} else if (mv88e6xxx_num_databases(chip) == 256) {
/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
* VTU DBNum[3:0] are located in VTU Operation 3:0
*/
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_OP);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_OP, &val);
+ if (err)
+ return err;
- next.fid = (ret & 0xf00) >> 4;
- next.fid |= ret & 0xf;
+ next.fid = (val & 0xf00) >> 4;
+ next.fid |= val & 0xf;
}
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_SID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val);
+ if (err)
+ return err;
- next.sid = ret & GLOBAL_VTU_SID_MASK;
+ next.sid = val & GLOBAL_VTU_SID_MASK;
}
}
@@ -1399,8 +1401,8 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
int (*cb)(struct switchdev_obj *obj))
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- struct mv88e6xxx_vtu_stu_entry next;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry next;
u16 pvid;
int err;
@@ -1451,38 +1453,36 @@ unlock:
}
static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
u16 reg = 0;
- int ret;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
if (!entry->valid)
goto loadpurge;
/* Write port member tags */
- ret = mv88e6xxx_vtu_data_write(chip, entry);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_vtu_data_write(chip, entry);
+ if (err)
+ return err;
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
- reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, reg);
+ if (err)
+ return err;
}
- if (mv88e6xxx_has_fid_reg(chip)) {
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_VTU_FID)) {
reg = entry->fid & GLOBAL_VTU_FID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_FID,
- reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_FID, reg);
+ if (err)
+ return err;
} else if (mv88e6xxx_num_databases(chip) == 256) {
/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
* VTU DBNum[3:0] are located in VTU Operation 3:0
@@ -1494,48 +1494,49 @@ static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
reg |= entry->vid & GLOBAL_VTU_VID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, reg);
+ if (err)
+ return err;
return _mv88e6xxx_vtu_cmd(chip, op);
}
static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
- struct mv88e6xxx_vtu_stu_entry next = { 0 };
- int ret;
+ struct mv88e6xxx_vtu_entry next = { 0 };
+ u16 val;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
- sid & GLOBAL_VTU_SID_MASK);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID,
+ sid & GLOBAL_VTU_SID_MASK);
+ if (err)
+ return err;
- ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_SID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val);
+ if (err)
+ return err;
- next.sid = ret & GLOBAL_VTU_SID_MASK;
+ next.sid = val & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val);
+ if (err)
+ return err;
- next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+ next.valid = !!(val & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = mv88e6xxx_stu_data_read(chip, &next);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_stu_data_read(chip, &next);
+ if (err)
+ return err;
}
*entry = next;
@@ -1543,33 +1544,33 @@ static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid,
}
static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
u16 reg = 0;
- int ret;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
if (!entry->valid)
goto loadpurge;
/* Write port states */
- ret = mv88e6xxx_stu_data_write(chip, entry);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_stu_data_write(chip, entry);
+ if (err)
+ return err;
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, reg);
+ if (err)
+ return err;
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, reg);
+ if (err)
+ return err;
return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
}
@@ -1580,7 +1581,8 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port,
struct dsa_switch *ds = chip->ds;
u16 upper_mask;
u16 fid;
- int ret;
+ u16 reg;
+ int err;
if (mv88e6xxx_num_databases(chip) == 4096)
upper_mask = 0xff;
@@ -1590,37 +1592,35 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port,
return -EOPNOTSUPP;
/* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
- fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
+ fid = (reg & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
if (new) {
- ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
- ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
+ reg &= ~PORT_BASE_VLAN_FID_3_0_MASK;
+ reg |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN,
- ret);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
+ if (err)
+ return err;
}
/* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_1);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &reg);
+ if (err)
+ return err;
- fid |= (ret & upper_mask) << 4;
+ fid |= (reg & upper_mask) << 4;
if (new) {
- ret &= ~upper_mask;
- ret |= (*new >> 4) & upper_mask;
+ reg &= ~upper_mask;
+ reg |= (*new >> 4) & upper_mask;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
- ret);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, reg);
+ if (err)
+ return err;
netdev_dbg(ds->ports[port].netdev,
"FID %d (was %d)\n", *new, fid);
@@ -1647,13 +1647,13 @@ static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_chip *chip,
static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
/* Set every FID bit used by the (un)bridged ports */
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
err = _mv88e6xxx_port_fid_get(chip, i, fid);
if (err)
return err;
@@ -1689,10 +1689,10 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
}
static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
struct dsa_switch *ds = chip->ds;
- struct mv88e6xxx_vtu_stu_entry vlan = {
+ struct mv88e6xxx_vtu_entry vlan = {
.valid = true,
.vid = vid,
};
@@ -1703,14 +1703,14 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
return err;
/* exclude all ports except the CPU and DSA ports */
- for (i = 0; i < chip->info->num_ports; ++i)
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
: GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) {
- struct mv88e6xxx_vtu_stu_entry vstp;
+ struct mv88e6xxx_vtu_entry vstp;
/* Adding a VTU entry requires a valid STU entry. As VSTP is not
* implemented, only one STU entry is needed to cover all VTU
@@ -1737,7 +1737,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
}
static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
- struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
+ struct mv88e6xxx_vtu_entry *entry, bool creat)
{
int err;
@@ -1768,8 +1768,8 @@ static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
if (!vid_begin)
@@ -1792,7 +1792,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (vlan.vid > vid_end)
break;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
@@ -1829,29 +1829,29 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
PORT_CONTROL_2_8021Q_DISABLED;
- int ret;
+ u16 reg;
+ int err;
if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_2);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
+ if (err)
goto unlock;
- old = ret & PORT_CONTROL_2_8021Q_MASK;
+ old = reg & PORT_CONTROL_2_8021Q_MASK;
if (new != old) {
- ret &= ~PORT_CONTROL_2_8021Q_MASK;
- ret |= new & PORT_CONTROL_2_8021Q_MASK;
+ reg &= ~PORT_CONTROL_2_8021Q_MASK;
+ reg |= new & PORT_CONTROL_2_8021Q_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_2,
- ret);
- if (ret < 0)
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ if (err)
goto unlock;
netdev_dbg(ds->ports[port].netdev, "802.1Q Mode %s (was %s)\n",
@@ -1859,11 +1859,11 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
mv88e6xxx_port_8021q_mode_names[old]);
}
- ret = 0;
+ err = 0;
unlock:
mutex_unlock(&chip->reg_lock);
- return ret;
+ return err;
}
static int
@@ -1871,7 +1871,7 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
@@ -1894,7 +1894,7 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
u16 vid, bool untagged)
{
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
int err;
err = _mv88e6xxx_vtu_get(chip, vid, &vlan, true);
@@ -1912,7 +1912,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
u16 vid;
@@ -1939,7 +1939,7 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
int port, u16 vid)
{
struct dsa_switch *ds = chip->ds;
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
@@ -1954,7 +1954,7 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
/* keep the VLAN unless all ports are excluded */
vlan.valid = false;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
continue;
@@ -1974,7 +1974,7 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
u16 pvid, vid;
int err = 0;
@@ -2008,14 +2008,13 @@ unlock:
static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
const unsigned char *addr)
{
- int i, ret;
+ int i, err;
for (i = 0; i < 3; i++) {
- ret = _mv88e6xxx_reg_write(
- chip, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
- (addr[i * 2] << 8) | addr[i * 2 + 1]);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i,
+ (addr[i * 2] << 8) | addr[i * 2 + 1]);
+ if (err)
+ return err;
}
return 0;
@@ -2024,15 +2023,16 @@ static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip,
unsigned char *addr)
{
- int i, ret;
+ u16 val;
+ int i, err;
for (i = 0; i < 3; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_ATU_MAC_01 + i);
- if (ret < 0)
- return ret;
- addr[i * 2] = ret >> 8;
- addr[i * 2 + 1] = ret & 0xff;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
+ if (err)
+ return err;
+
+ addr[i * 2] = val >> 8;
+ addr[i * 2 + 1] = val & 0xff;
}
return 0;
@@ -2058,12 +2058,48 @@ static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip,
return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
}
-static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_chip *chip, int port,
- const unsigned char *addr, u16 vid,
- u8 state)
+static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry);
+
+static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid,
+ const u8 *addr, struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_atu_entry entry = { 0 };
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_atu_entry next;
+ int err;
+
+ eth_broadcast_addr(next.mac);
+
+ err = _mv88e6xxx_atu_mac_write(chip, next.mac);
+ if (err)
+ return err;
+
+ do {
+ err = _mv88e6xxx_atu_getnext(chip, fid, &next);
+ if (err)
+ return err;
+
+ if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+ break;
+
+ if (ether_addr_equal(next.mac, addr)) {
+ *entry = next;
+ return 0;
+ }
+ } while (!is_broadcast_ether_addr(next.mac));
+
+ memset(entry, 0, sizeof(*entry));
+ entry->fid = fid;
+ ether_addr_copy(entry->mac, addr);
+
+ return 0;
+}
+
+static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_vtu_entry vlan;
+ struct mv88e6xxx_atu_entry entry;
int err;
/* Null VLAN ID corresponds to the port private database */
@@ -2074,12 +2110,18 @@ static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- entry.fid = vlan.fid;
- entry.state = state;
- ether_addr_copy(entry.mac, addr);
- if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
- entry.trunk = false;
- entry.portv_trunkid = BIT(port);
+ err = mv88e6xxx_atu_get(chip, vlan.fid, addr, &entry);
+ if (err)
+ return err;
+
+ /* Purge the ATU entry only if no port is using it anymore */
+ if (state == GLOBAL_ATU_DATA_STATE_UNUSED) {
+ entry.portv_trunkid &= ~BIT(port);
+ if (!entry.portv_trunkid)
+ entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+ } else {
+ entry.portv_trunkid |= BIT(port);
+ entry.state = state;
}
return _mv88e6xxx_atu_load(chip, &entry);
@@ -2099,61 +2141,59 @@ static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
{
- int state = is_multicast_ether_addr(fdb->addr) ?
- GLOBAL_ATU_DATA_STATE_MC_STATIC :
- GLOBAL_ATU_DATA_STATE_UC_STATIC;
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
mutex_lock(&chip->reg_lock);
- if (_mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid, state))
- netdev_err(ds->ports[port].netdev,
- "failed to load MAC address\n");
+ if (mv88e6xxx_port_db_load_purge(chip, port, fdb->addr, fdb->vid,
+ GLOBAL_ATU_DATA_STATE_UC_STATIC))
+ netdev_err(ds->ports[port].netdev, "failed to load unicast MAC address\n");
mutex_unlock(&chip->reg_lock);
}
static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- int ret;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid,
- GLOBAL_ATU_DATA_STATE_UNUSED);
+ err = mv88e6xxx_port_db_load_purge(chip, port, fdb->addr, fdb->vid,
+ GLOBAL_ATU_DATA_STATE_UNUSED);
mutex_unlock(&chip->reg_lock);
- return ret;
+ return err;
}
static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
struct mv88e6xxx_atu_entry *entry)
{
struct mv88e6xxx_atu_entry next = { 0 };
- int ret;
+ u16 val;
+ int err;
next.fid = fid;
- ret = _mv88e6xxx_atu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_atu_wait(chip);
+ if (err)
+ return err;
- ret = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+ if (err)
+ return err;
- ret = _mv88e6xxx_atu_mac_read(chip, next.mac);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_atu_mac_read(chip, next.mac);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_DATA);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
+ if (err)
+ return err;
- next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+ next.state = val & GLOBAL_ATU_DATA_STATE_MASK;
if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
unsigned int mask, shift;
- if (ret & GLOBAL_ATU_DATA_TRUNK) {
+ if (val & GLOBAL_ATU_DATA_TRUNK) {
next.trunk = true;
mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
@@ -2163,17 +2203,17 @@ static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
}
- next.portv_trunkid = (ret & mask) >> shift;
+ next.portv_trunkid = (val & mask) >> shift;
}
*entry = next;
return 0;
}
-static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_chip *chip,
- u16 fid, u16 vid, int port,
- struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
+ u16 fid, u16 vid, int port,
+ struct switchdev_obj *obj,
+ int (*cb)(struct switchdev_obj *obj))
{
struct mv88e6xxx_atu_entry addr = {
.mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
@@ -2187,72 +2227,98 @@ static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_chip *chip,
do {
err = _mv88e6xxx_atu_getnext(chip, fid, &addr);
if (err)
- break;
+ return err;
if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
break;
- if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
- bool is_static = addr.state ==
- (is_multicast_ether_addr(addr.mac) ?
- GLOBAL_ATU_DATA_STATE_MC_STATIC :
- GLOBAL_ATU_DATA_STATE_UC_STATIC);
+ if (addr.trunk || (addr.portv_trunkid & BIT(port)) == 0)
+ continue;
+
+ if (obj->id == SWITCHDEV_OBJ_ID_PORT_FDB) {
+ struct switchdev_obj_port_fdb *fdb;
+
+ if (!is_unicast_ether_addr(addr.mac))
+ continue;
+ fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
fdb->vid = vid;
ether_addr_copy(fdb->addr, addr.mac);
- fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+ if (addr.state == GLOBAL_ATU_DATA_STATE_UC_STATIC)
+ fdb->ndm_state = NUD_NOARP;
+ else
+ fdb->ndm_state = NUD_REACHABLE;
+ } else if (obj->id == SWITCHDEV_OBJ_ID_PORT_MDB) {
+ struct switchdev_obj_port_mdb *mdb;
- err = cb(&fdb->obj);
- if (err)
- break;
+ if (!is_multicast_ether_addr(addr.mac))
+ continue;
+
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ mdb->vid = vid;
+ ether_addr_copy(mdb->addr, addr.mac);
+ } else {
+ return -EOPNOTSUPP;
}
+
+ err = cb(obj);
+ if (err)
+ return err;
} while (!is_broadcast_ether_addr(addr.mac));
return err;
}
-static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
+ struct switchdev_obj *obj,
+ int (*cb)(struct switchdev_obj *obj))
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- struct mv88e6xxx_vtu_stu_entry vlan = {
+ struct mv88e6xxx_vtu_entry vlan = {
.vid = GLOBAL_VTU_VID_MASK, /* all ones */
};
u16 fid;
int err;
- mutex_lock(&chip->reg_lock);
-
/* Dump port's default Filtering Information Database (VLAN ID 0) */
err = _mv88e6xxx_port_fid_get(chip, port, &fid);
if (err)
- goto unlock;
+ return err;
- err = _mv88e6xxx_port_fdb_dump_one(chip, fid, 0, port, fdb, cb);
+ err = mv88e6xxx_port_db_dump_fid(chip, fid, 0, port, obj, cb);
if (err)
- goto unlock;
+ return err;
/* Dump VLANs' Filtering Information Databases */
err = _mv88e6xxx_vtu_vid_write(chip, vlan.vid);
if (err)
- goto unlock;
+ return err;
do {
err = _mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
- break;
+ return err;
if (!vlan.valid)
break;
- err = _mv88e6xxx_port_fdb_dump_one(chip, vlan.fid, vlan.vid,
- port, fdb, cb);
+ err = mv88e6xxx_port_db_dump_fid(chip, vlan.fid, vlan.vid, port,
+ obj, cb);
if (err)
- break;
+ return err;
} while (vlan.vid < GLOBAL_VTU_VID_MASK);
-unlock:
+ return err;
+}
+
+static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_port_db_dump(chip, port, &fdb->obj, cb);
mutex_unlock(&chip->reg_lock);
return err;
@@ -2261,7 +2327,7 @@ unlock:
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
int i, err = 0;
mutex_lock(&chip->reg_lock);
@@ -2269,7 +2335,7 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
/* Assign the bridge and remap each port's VLANTable */
chip->ports[port].bridge_dev = bridge;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
if (chip->ports[i].bridge_dev == bridge) {
err = _mv88e6xxx_port_based_vlan_map(chip, i);
if (err)
@@ -2284,7 +2350,7 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
struct net_device *bridge = chip->ports[port].bridge_dev;
int i;
@@ -2293,7 +2359,7 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
/* Unassign the bridge and remap each port's VLANTable */
chip->ports[port].bridge_dev = NULL;
- for (i = 0; i < chip->info->num_ports; ++i)
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
if (i == port || chip->ports[i].bridge_dev == bridge)
if (_mv88e6xxx_port_based_vlan_map(chip, i))
netdev_warn(ds->ports[i].netdev,
@@ -2302,57 +2368,26 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
mutex_unlock(&chip->reg_lock);
}
-static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_chip *chip,
- int port, int page, int reg, int val)
-{
- int ret;
-
- ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page);
- if (ret < 0)
- goto restore_page_0;
-
- ret = mv88e6xxx_mdio_write_indirect(chip, port, reg, val);
-restore_page_0:
- mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0);
-
- return ret;
-}
-
-static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_chip *chip,
- int port, int page, int reg)
-{
- int ret;
-
- ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page);
- if (ret < 0)
- goto restore_page_0;
-
- ret = mv88e6xxx_mdio_read_indirect(chip, port, reg);
-restore_page_0:
- mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0);
-
- return ret;
-}
-
static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
{
bool ppu_active = mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE);
u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
struct gpio_desc *gpiod = chip->reset;
unsigned long timeout;
- int ret;
+ u16 reg;
+ int err;
int i;
/* Set all ports to the disabled state. */
- for (i = 0; i < chip->info->num_ports; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(i), PORT_CONTROL);
- if (ret < 0)
- return ret;
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
+ err = mv88e6xxx_port_read(chip, i, PORT_CONTROL, &reg);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(i), PORT_CONTROL,
- ret & 0xfffc);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, i, PORT_CONTROL,
+ reg & 0xfffc);
+ if (err)
+ return err;
}
/* Wait for transmit queues to drain. */
@@ -2371,65 +2406,53 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
* through global registers 0x18 and 0x19.
*/
if (ppu_active)
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc000);
+ err = mv88e6xxx_g1_write(chip, 0x04, 0xc000);
else
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc400);
- if (ret)
- return ret;
+ err = mv88e6xxx_g1_write(chip, 0x04, 0xc400);
+ if (err)
+ return err;
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, 0x00);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, 0x00, &reg);
+ if (err)
+ return err;
- if ((ret & is_reset) == is_reset)
+ if ((reg & is_reset) == is_reset)
break;
usleep_range(1000, 2000);
}
if (time_after(jiffies, timeout))
- ret = -ETIMEDOUT;
+ err = -ETIMEDOUT;
else
- ret = 0;
+ err = 0;
- return ret;
+ return err;
}
-static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_chip *chip)
+static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
{
- int ret;
+ u16 val;
+ int err;
- ret = _mv88e6xxx_mdio_page_read(chip, REG_FIBER_SERDES,
- PAGE_FIBER_SERDES, MII_BMCR);
- if (ret < 0)
- return ret;
+ /* Clear Power Down bit */
+ err = mv88e6xxx_serdes_read(chip, MII_BMCR, &val);
+ if (err)
+ return err;
- if (ret & BMCR_PDOWN) {
- ret &= ~BMCR_PDOWN;
- ret = _mv88e6xxx_mdio_page_write(chip, REG_FIBER_SERDES,
- PAGE_FIBER_SERDES, MII_BMCR,
- ret);
+ if (val & BMCR_PDOWN) {
+ val &= ~BMCR_PDOWN;
+ err = mv88e6xxx_serdes_write(chip, MII_BMCR, val);
}
- return ret;
-}
-
-static int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port,
- int reg, u16 *val)
-{
- int addr = chip->info->port_base_addr + port;
-
- if (port >= chip->info->num_ports)
- return -EINVAL;
-
- return mv88e6xxx_read(chip, addr, reg, val);
+ return err;
}
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_switch *ds = chip->ds;
- int ret;
+ int err;
u16 reg;
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
@@ -2442,7 +2465,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* and all DSA ports to their maximum bandwidth and
* full duplex.
*/
- reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
+ err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
reg &= ~PORT_PCS_CTRL_UNFORCED;
reg |= PORT_PCS_CTRL_FORCE_LINK |
@@ -2457,10 +2480,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
reg |= PORT_PCS_CTRL_UNFORCED;
}
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_PCS_CTRL, reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ if (err)
+ return err;
}
/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
@@ -2486,28 +2508,13 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
PORT_CONTROL_STATE_FORWARDING;
if (dsa_is_cpu_port(ds, port)) {
- if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip))
- reg |= PORT_CONTROL_DSA_TAG;
- if (mv88e6xxx_6352_family(chip) ||
- mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6165_family(chip) ||
- mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6320_family(chip)) {
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA))
reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA |
- PORT_CONTROL_FORWARD_UNKNOWN |
PORT_CONTROL_FORWARD_UNKNOWN_MC;
- }
-
- if (mv88e6xxx_6352_family(chip) ||
- mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6165_family(chip) ||
- mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6095_family(chip) ||
- mv88e6xxx_6065_family(chip) ||
- mv88e6xxx_6185_family(chip) ||
- mv88e6xxx_6320_family(chip)) {
- reg |= PORT_CONTROL_EGRESS_ADD_TAG;
- }
+ else
+ reg |= PORT_CONTROL_DSA_TAG;
+ reg |= PORT_CONTROL_EGRESS_ADD_TAG |
+ PORT_CONTROL_FORWARD_UNKNOWN;
}
if (dsa_is_dsa_port(ds, port)) {
if (mv88e6xxx_6095_family(chip) ||
@@ -2526,26 +2533,25 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
if (reg) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_CONTROL, reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ if (err)
+ return err;
}
/* If this port is connected to a SerDes, make sure the SerDes is not
* powered down.
*/
- if (mv88e6xxx_6352_family(chip)) {
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS);
- if (ret < 0)
- return ret;
- ret &= PORT_STATUS_CMODE_MASK;
- if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
- (ret == PORT_STATUS_CMODE_1000BASE_X) ||
- (ret == PORT_STATUS_CMODE_SGMII)) {
- ret = mv88e6xxx_power_on_serdes(chip);
- if (ret < 0)
- return ret;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_SERDES)) {
+ err = mv88e6xxx_port_read(chip, port, PORT_STATUS, &reg);
+ if (err)
+ return err;
+ reg &= PORT_STATUS_CMODE_MASK;
+ if ((reg == PORT_STATUS_CMODE_100BASE_X) ||
+ (reg == PORT_STATUS_CMODE_1000BASE_X) ||
+ (reg == PORT_STATUS_CMODE_SGMII)) {
+ err = mv88e6xxx_serdes_power_on(chip);
+ if (err < 0)
+ return err;
}
}
@@ -2579,10 +2585,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
reg |= PORT_CONTROL_2_8021Q_DISABLED;
if (reg) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_CONTROL_2, reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ if (err)
+ return err;
}
/* Port Association Vector: when learning source addresses
@@ -2595,16 +2600,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (dsa_is_cpu_port(ds, port))
reg = 0;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_ASSOC_VECTOR,
- reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_ASSOC_VECTOR, reg);
+ if (err)
+ return err;
/* Egress rate control 2: disable egress rate control. */
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_RATE_CONTROL_2,
- 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL_2, 0x0000);
+ if (err)
+ return err;
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
@@ -2613,111 +2616,108 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* be paused for by the remote end or the period of
* time that this port can pause the remote end.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_PAUSE_CTRL, 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL, 0x0000);
+ if (err)
+ return err;
/* Port ATU control: disable limiting the number of
* address database entries that this port is allowed
* to use.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_ATU_CONTROL, 0x0000);
+ err = mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL,
+ 0x0000);
/* Priority Override: disable DA, SA and VTU priority
* override.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_PRI_OVERRIDE, 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE,
+ 0x0000);
+ if (err)
+ return err;
/* Port Ethertype: use the Ethertype DSA Ethertype
* value.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_ETH_TYPE, ETH_P_EDSA);
- if (ret)
- return ret;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA)) {
+ err = mv88e6xxx_port_write(chip, port, PORT_ETH_TYPE,
+ ETH_P_EDSA);
+ if (err)
+ return err;
+ }
+
/* Tag Remap: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_TAG_REGMAP_0123, 0x3210);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_0123,
+ 0x3210);
+ if (err)
+ return err;
/* Tag Remap 2: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_TAG_REGMAP_4567, 0x7654);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_4567,
+ 0x7654);
+ if (err)
+ return err;
}
/* Rate Control: disable ingress rate limiting. */
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
mv88e6xxx_6320_family(chip)) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_RATE_CONTROL, 0x0001);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL,
+ 0x0001);
+ if (err)
+ return err;
} else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_RATE_CONTROL, 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL,
+ 0x0000);
+ if (err)
+ return err;
}
/* Port Control 1: disable trunking, disable sending
* learning messages to this port.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
- 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, 0x0000);
+ if (err)
+ return err;
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- ret = _mv88e6xxx_port_fid_set(chip, port, 0);
- if (ret)
- return ret;
+ err = _mv88e6xxx_port_fid_set(chip, port, 0);
+ if (err)
+ return err;
- ret = _mv88e6xxx_port_based_vlan_map(chip, port);
- if (ret)
- return ret;
+ err = _mv88e6xxx_port_based_vlan_map(chip, port);
+ if (err)
+ return err;
/* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_DEFAULT_VLAN,
- 0x0000);
- if (ret)
- return ret;
-
- return 0;
+ return mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, 0x0000);
}
-static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
{
int err;
- err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_01,
- (addr[0] << 8) | addr[1]);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
if (err)
return err;
- err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_23,
- (addr[2] << 8) | addr[3]);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
if (err)
return err;
- return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_45,
- (addr[4] << 8) | addr[5]);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
+ if (err)
+ return err;
+
+ return 0;
}
static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
@@ -2736,7 +2736,7 @@ static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
/* Round to nearest multiple of coeff */
age_time = (msecs + coeff / 2) / coeff;
- err = mv88e6xxx_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, &val);
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
if (err)
return err;
@@ -2744,13 +2744,13 @@ static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
val &= ~0xff0;
val |= age_time << 4;
- return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, val);
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
}
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
int err;
mutex_lock(&chip->reg_lock);
@@ -2775,7 +2775,7 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE))
reg |= GLOBAL_CONTROL_PPU_ENABLE;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, reg);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, reg);
if (err)
return err;
@@ -2785,15 +2785,14 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MONITOR_CONTROL,
- reg);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MONITOR_CONTROL, reg);
if (err)
return err;
/* Disable remote management, and set the switch's DSA device number. */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL_2,
- GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
- (ds->index & 0x1f));
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL_2,
+ GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+ (ds->index & 0x1f));
if (err)
return err;
@@ -2806,8 +2805,8 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
* enable address learn messages to be sent to all message
* ports.
*/
- err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
- GLOBAL_ATU_CONTROL_LEARN2ALL);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_LEARN2ALL);
if (err)
return err;
@@ -2821,39 +2820,39 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
return err;
/* Configure the IP ToS mapping registers. */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_0, 0x0000);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_1, 0x0000);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_2, 0x5555);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_3, 0x5555);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_4, 0xaaaa);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_5, 0xaaaa);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_6, 0xffff);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_7, 0xffff);
if (err)
return err;
/* Configure the IEEE 802.1p priority mapping register. */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IEEE_PRI, 0xfa41);
if (err)
return err;
/* Clear the statistics counters for all ports */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_FLUSH_ALL);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_FLUSH_ALL);
if (err)
return err;
@@ -2865,277 +2864,9 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
return 0;
}
-static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
- int target, int port)
-{
- u16 val = (target << 8) | (port & 0xf);
-
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING, val);
-}
-
-static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
-{
- int target, port;
- int err;
-
- /* Initialize the routing port to the 32 possible target devices */
- for (target = 0; target < 32; ++target) {
- port = 0xf;
-
- if (target < DSA_MAX_SWITCHES) {
- port = chip->ds->rtable[target];
- if (port == DSA_RTABLE_NONE)
- port = 0xf;
- }
-
- err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
- bool hask, u16 mask)
-{
- const u16 port_mask = BIT(chip->info->num_ports) - 1;
- u16 val = (num << 12) | (mask & port_mask);
-
- if (hask)
- val |= GLOBAL2_TRUNK_MASK_HASK;
-
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, val);
-}
-
-static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
- u16 map)
-{
- const u16 port_mask = BIT(chip->info->num_ports) - 1;
- u16 val = (id << 11) | (map & port_mask);
-
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING, val);
-}
-
-static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip)
-{
- const u16 port_mask = BIT(chip->info->num_ports) - 1;
- int i, err;
-
- /* Clear all eight possible Trunk Mask vectors */
- for (i = 0; i < 8; ++i) {
- err = mv88e6xxx_g2_trunk_mask_write(chip, i, false, port_mask);
- if (err)
- return err;
- }
-
- /* Clear all sixteen possible Trunk ID routing vectors */
- for (i = 0; i < 16; ++i) {
- err = mv88e6xxx_g2_trunk_mapping_write(chip, i, 0);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip)
-{
- int port, err;
-
- /* Init all Ingress Rate Limit resources of all ports */
- for (port = 0; port < chip->info->num_ports; ++port) {
- /* XXX newer chips (like 88E6390) have different 2-bit ops */
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD,
- GLOBAL2_IRL_CMD_OP_INIT_ALL |
- (port << 8));
- if (err)
- break;
-
- /* Wait for the operation to complete */
- err = _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD,
- GLOBAL2_IRL_CMD_BUSY);
- if (err)
- break;
- }
-
- return err;
-}
-
-/* Indirect write to the Switch MAC/WoL/WoF register */
-static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip,
- unsigned int pointer, u8 data)
-{
- u16 val = (pointer << 8) | data;
-
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, val);
-}
-
-static int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
-{
- int i, err;
-
- for (i = 0; i < 6; i++) {
- err = mv88e6xxx_g2_switch_mac_write(chip, i, addr[i]);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
- u8 data)
-{
- u16 val = (pointer << 8) | (data & 0x7);
-
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, val);
-}
-
-static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
-{
- int i, err;
-
- /* Clear all sixteen possible Priority Override entries */
- for (i = 0; i < 16; i++) {
- err = mv88e6xxx_g2_pot_write(chip, i, 0);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
-{
- return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD,
- GLOBAL2_EEPROM_CMD_BUSY |
- GLOBAL2_EEPROM_CMD_RUNNING);
-}
-
-static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
-{
- int err;
-
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, cmd);
- if (err)
- return err;
-
- return mv88e6xxx_g2_eeprom_wait(chip);
-}
-
-static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
- u8 addr, u16 *data)
-{
- u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ | addr;
- int err;
-
- err = mv88e6xxx_g2_eeprom_wait(chip);
- if (err)
- return err;
-
- err = mv88e6xxx_g2_eeprom_cmd(chip, cmd);
- if (err)
- return err;
-
- return mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
-}
-
-static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
- u8 addr, u16 data)
-{
- u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | addr;
- int err;
-
- err = mv88e6xxx_g2_eeprom_wait(chip);
- if (err)
- return err;
-
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
- if (err)
- return err;
-
- return mv88e6xxx_g2_eeprom_cmd(chip, cmd);
-}
-
-static int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
-{
- u16 reg;
- int err;
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) {
- /* Consider the frames with reserved multicast destination
- * addresses matching 01:80:c2:00:00:2x as MGMT.
- */
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_2X,
- 0xffff);
- if (err)
- return err;
- }
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X)) {
- /* Consider the frames with reserved multicast destination
- * addresses matching 01:80:c2:00:00:0x as MGMT.
- */
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X,
- 0xffff);
- if (err)
- return err;
- }
-
- /* Ignore removed tag data on doubly tagged packets, disable
- * flow control messages, force flow control priority to the
- * highest, and send all special multicast frames to the CPU
- * port at the highest priority.
- */
- reg = GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI | (0x7 << 4);
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) ||
- mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X))
- reg |= GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x7;
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, reg);
- if (err)
- return err;
-
- /* Program the DSA routing table. */
- err = mv88e6xxx_g2_set_device_mapping(chip);
- if (err)
- return err;
-
- /* Clear all trunk masks and mapping. */
- err = mv88e6xxx_g2_clear_trunk(chip);
- if (err)
- return err;
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_IRL)) {
- /* Disable ingress rate limiting by resetting all per port
- * ingress rate limit resources to their initial state.
- */
- err = mv88e6xxx_g2_clear_irl(chip);
- if (err)
- return err;
- }
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_PVT)) {
- /* Initialize Cross-chip Port VLAN Table to reset defaults */
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_PVT_ADDR,
- GLOBAL2_PVT_ADDR_OP_INIT_ONES);
- if (err)
- return err;
- }
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) {
- /* Clear the priority override table. */
- err = mv88e6xxx_g2_clear_pot(chip);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mv88e6xxx_setup(struct dsa_switch *ds)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
int err;
int i;
@@ -3149,7 +2880,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
goto unlock;
/* Setup Switch Port Registers */
- for (i = 0; i < chip->info->num_ports; i++) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
err = mv88e6xxx_setup_port(chip, i);
if (err)
goto unlock;
@@ -3175,100 +2906,48 @@ unlock:
static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
-
- /* Has an indirect Switch MAC/WoL/WoF register in Global 2? */
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_SWITCH_MAC))
- err = mv88e6xxx_g2_set_switch_mac(chip, addr);
- else
- err = mv88e6xxx_g1_set_switch_mac(chip, addr);
-
- mutex_unlock(&chip->reg_lock);
-
- return err;
-}
-
-#ifdef CONFIG_NET_DSA_HWMON
-static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page,
- int reg)
-{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- int ret;
-
- mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_mdio_page_read(chip, port, page, reg);
- mutex_unlock(&chip->reg_lock);
-
- return ret;
-}
-
-static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page,
- int reg, int val)
-{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- int ret;
+ if (!chip->info->ops->set_switch_mac)
+ return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_mdio_page_write(chip, port, page, reg, val);
+ err = chip->info->ops->set_switch_mac(chip, addr);
mutex_unlock(&chip->reg_lock);
- return ret;
-}
-#endif
-
-static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port)
-{
- if (port >= 0 && port < chip->info->num_ports)
- return port;
- return -EINVAL;
+ return err;
}
-static int mv88e6xxx_mdio_read(struct mii_bus *bus, int port, int regnum)
+static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
{
struct mv88e6xxx_chip *chip = bus->priv;
- int addr = mv88e6xxx_port_to_mdio_addr(chip, port);
- int ret;
+ u16 val;
+ int err;
- if (addr < 0)
+ if (phy >= mv88e6xxx_num_ports(chip))
return 0xffff;
mutex_lock(&chip->reg_lock);
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
- ret = mv88e6xxx_mdio_read_ppu(chip, addr, regnum);
- else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY))
- ret = mv88e6xxx_mdio_read_indirect(chip, addr, regnum);
- else
- ret = mv88e6xxx_mdio_read_direct(chip, addr, regnum);
-
+ err = mv88e6xxx_phy_read(chip, phy, reg, &val);
mutex_unlock(&chip->reg_lock);
- return ret;
+
+ return err ? err : val;
}
-static int mv88e6xxx_mdio_write(struct mii_bus *bus, int port, int regnum,
- u16 val)
+static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mv88e6xxx_chip *chip = bus->priv;
- int addr = mv88e6xxx_port_to_mdio_addr(chip, port);
- int ret;
+ int err;
- if (addr < 0)
+ if (phy >= mv88e6xxx_num_ports(chip))
return 0xffff;
mutex_lock(&chip->reg_lock);
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
- ret = mv88e6xxx_mdio_write_ppu(chip, addr, regnum, val);
- else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY))
- ret = mv88e6xxx_mdio_write_indirect(chip, addr, regnum, val);
- else
- ret = mv88e6xxx_mdio_write_direct(chip, addr, regnum, val);
-
+ err = mv88e6xxx_phy_write(chip, phy, reg, val);
mutex_unlock(&chip->reg_lock);
- return ret;
+
+ return err;
}
static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
@@ -3278,9 +2957,6 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
struct mii_bus *bus;
int err;
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
- mv88e6xxx_ppu_state_init(chip);
-
if (np)
chip->mdio_np = of_get_child_by_name(np, "mdio");
@@ -3335,69 +3011,70 @@ static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip)
static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 val;
int ret;
- int val;
*temp = 0;
mutex_lock(&chip->reg_lock);
- ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x6);
+ ret = mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x6);
if (ret < 0)
goto error;
/* Enable temperature sensor */
- ret = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a);
+ ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val);
if (ret < 0)
goto error;
- ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret | (1 << 5));
+ ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val | (1 << 5));
if (ret < 0)
goto error;
/* Wait for temperature to stabilize */
usleep_range(10000, 12000);
- val = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a);
- if (val < 0) {
- ret = val;
+ ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val);
+ if (ret < 0)
goto error;
- }
/* Disable temperature sensor */
- ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret & ~(1 << 5));
+ ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val & ~(1 << 5));
if (ret < 0)
goto error;
*temp = ((val & 0x1f) - 5) * 5;
error:
- mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x0);
+ mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x0);
mutex_unlock(&chip->reg_lock);
return ret;
}
static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
+ u16 val;
int ret;
*temp = 0;
- ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 27);
+ mutex_lock(&chip->reg_lock);
+ ret = mv88e6xxx_phy_page_read(chip, phy, 6, 27, &val);
+ mutex_unlock(&chip->reg_lock);
if (ret < 0)
return ret;
- *temp = (ret & 0xff) - 25;
+ *temp = (val & 0xff) - 25;
return 0;
}
static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP))
return -EOPNOTSUPP;
@@ -3410,8 +3087,9 @@ static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
+ u16 val;
int ret;
if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
@@ -3419,36 +3097,45 @@ static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
*temp = 0;
- ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
+ mutex_lock(&chip->reg_lock);
+ ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
+ mutex_unlock(&chip->reg_lock);
if (ret < 0)
return ret;
- *temp = (((ret >> 8) & 0x1f) * 5) - 25;
+ *temp = (((val >> 8) & 0x1f) * 5) - 25;
return 0;
}
static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
- int ret;
+ u16 val;
+ int err;
if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
return -EOPNOTSUPP;
- ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
- if (ret < 0)
- return ret;
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
+ if (err)
+ goto unlock;
temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
- return mv88e6xxx_mdio_page_write(ds, phy, 6, 26,
- (ret & 0xe0ff) | (temp << 8));
+ err = mv88e6xxx_phy_page_write(chip, phy, 6, 26,
+ (val & 0xe0ff) | (temp << 8));
+unlock:
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
}
static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
+ u16 val;
int ret;
if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
@@ -3456,11 +3143,13 @@ static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
*alarm = false;
- ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
+ mutex_lock(&chip->reg_lock);
+ ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
+ mutex_unlock(&chip->reg_lock);
if (ret < 0)
return ret;
- *alarm = !!(ret & 0x40);
+ *alarm = !!(val & 0x40);
return 0;
}
@@ -3468,74 +3157,22 @@ static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
return chip->eeprom_len;
}
-static int mv88e6xxx_get_eeprom16(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom, u8 *data)
-{
- unsigned int offset = eeprom->offset;
- unsigned int len = eeprom->len;
- u16 val;
- int err;
-
- eeprom->len = 0;
-
- if (offset & 1) {
- err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
- if (err)
- return err;
-
- *data++ = (val >> 8) & 0xff;
-
- offset++;
- len--;
- eeprom->len++;
- }
-
- while (len >= 2) {
- err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
- if (err)
- return err;
-
- *data++ = val & 0xff;
- *data++ = (val >> 8) & 0xff;
-
- offset += 2;
- len -= 2;
- eeprom->len += 2;
- }
-
- if (len) {
- err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
- if (err)
- return err;
-
- *data++ = val & 0xff;
-
- offset++;
- len--;
- eeprom->len++;
- }
-
- return 0;
-}
-
static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
struct ethtool_eeprom *eeprom, u8 *data)
{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16))
- err = mv88e6xxx_get_eeprom16(chip, eeprom, data);
- else
- err = -EOPNOTSUPP;
+ if (!chip->info->ops->get_eeprom)
+ return -EOPNOTSUPP;
+ mutex_lock(&chip->reg_lock);
+ err = chip->info->ops->get_eeprom(chip, eeprom, data);
mutex_unlock(&chip->reg_lock);
if (err)
@@ -3546,92 +3183,138 @@ static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
return 0;
}
-static int mv88e6xxx_set_eeprom16(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom, u8 *data)
+static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
{
- unsigned int offset = eeprom->offset;
- unsigned int len = eeprom->len;
- u16 val;
+ struct mv88e6xxx_chip *chip = ds->priv;
int err;
- /* Ensure the RO WriteEn bit is set */
- err = mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, &val);
- if (err)
- return err;
+ if (!chip->info->ops->set_eeprom)
+ return -EOPNOTSUPP;
- if (!(val & GLOBAL2_EEPROM_CMD_WRITE_EN))
- return -EROFS;
+ if (eeprom->magic != 0xc3ec4951)
+ return -EINVAL;
- eeprom->len = 0;
+ mutex_lock(&chip->reg_lock);
+ err = chip->info->ops->set_eeprom(chip, eeprom, data);
+ mutex_unlock(&chip->reg_lock);
- if (offset & 1) {
- err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
- if (err)
- return err;
+ return err;
+}
- val = (*data++ << 8) | (val & 0xff);
+static const struct mv88e6xxx_ops mv88e6085_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
- err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
- if (err)
- return err;
+static const struct mv88e6xxx_ops mv88e6095_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
- offset++;
- len--;
- eeprom->len++;
- }
+static const struct mv88e6xxx_ops mv88e6123_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_read,
+ .phy_write = mv88e6xxx_write,
+};
- while (len >= 2) {
- val = *data++;
- val |= *data++ << 8;
+static const struct mv88e6xxx_ops mv88e6131_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
- err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
- if (err)
- return err;
+static const struct mv88e6xxx_ops mv88e6161_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_read,
+ .phy_write = mv88e6xxx_write,
+};
- offset += 2;
- len -= 2;
- eeprom->len += 2;
- }
+static const struct mv88e6xxx_ops mv88e6165_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_read,
+ .phy_write = mv88e6xxx_write,
+};
- if (len) {
- err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
- if (err)
- return err;
+static const struct mv88e6xxx_ops mv88e6171_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
- val = (val & 0xff00) | *data++;
+static const struct mv88e6xxx_ops mv88e6172_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
- err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
- if (err)
- return err;
+static const struct mv88e6xxx_ops mv88e6175_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
- offset++;
- len--;
- eeprom->len++;
- }
+static const struct mv88e6xxx_ops mv88e6176_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
- return 0;
-}
+static const struct mv88e6xxx_ops mv88e6185_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
-static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
- struct ethtool_eeprom *eeprom, u8 *data)
-{
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
- int err;
+static const struct mv88e6xxx_ops mv88e6240_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
- if (eeprom->magic != 0xc3ec4951)
- return -EINVAL;
+static const struct mv88e6xxx_ops mv88e6320_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
- mutex_lock(&chip->reg_lock);
+static const struct mv88e6xxx_ops mv88e6321_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16))
- err = mv88e6xxx_set_eeprom16(chip, eeprom, data);
- else
- err = -EOPNOTSUPP;
+static const struct mv88e6xxx_ops mv88e6350_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
- mutex_unlock(&chip->reg_lock);
+static const struct mv88e6xxx_ops mv88e6351_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
- return err;
-}
+static const struct mv88e6xxx_ops mv88e6352_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
[MV88E6085] = {
@@ -3641,8 +3324,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 10,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6097,
+ .ops = &mv88e6085_ops,
},
[MV88E6095] = {
@@ -3652,8 +3337,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 256,
.num_ports = 11,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6095,
+ .ops = &mv88e6095_ops,
},
[MV88E6123] = {
@@ -3663,8 +3350,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 3,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ .ops = &mv88e6123_ops,
},
[MV88E6131] = {
@@ -3674,8 +3363,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 256,
.num_ports = 8,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ .ops = &mv88e6131_ops,
},
[MV88E6161] = {
@@ -3685,8 +3376,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 6,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ .ops = &mv88e6161_ops,
},
[MV88E6165] = {
@@ -3696,8 +3389,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 6,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ .ops = &mv88e6165_ops,
},
[MV88E6171] = {
@@ -3707,8 +3402,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6171_ops,
},
[MV88E6172] = {
@@ -3718,8 +3415,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6172_ops,
},
[MV88E6175] = {
@@ -3729,8 +3428,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6175_ops,
},
[MV88E6176] = {
@@ -3740,8 +3441,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6176_ops,
},
[MV88E6185] = {
@@ -3751,8 +3454,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 256,
.num_ports = 10,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ .ops = &mv88e6185_ops,
},
[MV88E6240] = {
@@ -3762,8 +3467,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6240_ops,
},
[MV88E6320] = {
@@ -3773,8 +3480,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ .ops = &mv88e6320_ops,
},
[MV88E6321] = {
@@ -3784,8 +3493,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ .ops = &mv88e6321_ops,
},
[MV88E6350] = {
@@ -3795,8 +3506,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6350_ops,
},
[MV88E6351] = {
@@ -3806,8 +3519,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6351_ops,
},
[MV88E6352] = {
@@ -3817,8 +3532,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6352_ops,
},
};
@@ -3856,6 +3573,10 @@ static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
/* Update the compatible info with the probed one */
chip->info = info;
+ err = mv88e6xxx_g2_require(chip);
+ if (err)
+ return err;
+
dev_info(chip->dev, "switch 0x%x detected: %s, revision %u\n",
chip->info->prod_num, chip->info->name, rev);
@@ -3877,6 +3598,18 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
return chip;
}
+static void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip)
+{
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ mv88e6xxx_ppu_state_init(chip);
+}
+
+static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
+{
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ mv88e6xxx_ppu_state_destroy(chip);
+}
+
static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
struct mii_bus *bus, int sw_addr)
{
@@ -3886,7 +3619,7 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
if (sw_addr == 0)
chip->smi_ops = &mv88e6xxx_smi_single_chip_ops;
- else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_MULTI_CHIP))
+ else if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_MULTI_CHIP))
chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops;
else
return -EINVAL;
@@ -3897,6 +3630,16 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
return 0;
}
+static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA))
+ return DSA_TAG_PROTO_EDSA;
+
+ return DSA_TAG_PROTO_DSA;
+}
+
static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
struct device *host_dev, int sw_addr,
void **priv)
@@ -3924,6 +3667,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
if (err)
goto free;
+ mv88e6xxx_phy_init(chip);
+
err = mv88e6xxx_mdio_register(chip, NULL);
if (err)
goto free;
@@ -3937,9 +3682,61 @@ free:
return NULL;
}
-static struct dsa_switch_driver mv88e6xxx_switch_driver = {
- .tag_protocol = DSA_TAG_PROTO_EDSA,
+static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ /* We don't need any dynamic resource from the kernel (yet),
+ * so skip the prepare phase.
+ */
+
+ return 0;
+}
+
+static void mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ mutex_lock(&chip->reg_lock);
+ if (mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
+ GLOBAL_ATU_DATA_STATE_MC_STATIC))
+ netdev_err(ds->ports[port].netdev, "failed to load multicast MAC address\n");
+ mutex_unlock(&chip->reg_lock);
+}
+
+static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
+ GLOBAL_ATU_DATA_STATE_UNUSED);
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static int mv88e6xxx_port_mdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_mdb *mdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_port_db_dump(chip, port, &mdb->obj, cb);
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static struct dsa_switch_ops mv88e6xxx_switch_ops = {
.probe = mv88e6xxx_drv_probe,
+ .get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
.set_addr = mv88e6xxx_set_addr,
.adjust_link = mv88e6xxx_adjust_link,
@@ -3963,6 +3760,7 @@ static struct dsa_switch_driver mv88e6xxx_switch_driver = {
.port_bridge_join = mv88e6xxx_port_bridge_join,
.port_bridge_leave = mv88e6xxx_port_bridge_leave,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_fast_age = mv88e6xxx_port_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
.port_vlan_add = mv88e6xxx_port_vlan_add,
@@ -3972,6 +3770,10 @@ static struct dsa_switch_driver mv88e6xxx_switch_driver = {
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
.port_fdb_dump = mv88e6xxx_port_fdb_dump,
+ .port_mdb_prepare = mv88e6xxx_port_mdb_prepare,
+ .port_mdb_add = mv88e6xxx_port_mdb_add,
+ .port_mdb_del = mv88e6xxx_port_mdb_del,
+ .port_mdb_dump = mv88e6xxx_port_mdb_dump,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip,
@@ -3986,7 +3788,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip,
ds->dev = dev;
ds->priv = chip;
- ds->drv = &mv88e6xxx_switch_driver;
+ ds->ops = &mv88e6xxx_switch_ops;
dev_set_drvdata(dev, ds);
@@ -4025,11 +3827,13 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
return err;
+ mv88e6xxx_phy_init(chip);
+
chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
if (IS_ERR(chip->reset))
return PTR_ERR(chip->reset);
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16) &&
+ if (chip->info->ops->get_eeprom &&
!of_property_read_u32(np, "eeprom-length", &eeprom_len))
chip->eeprom_len = eeprom_len;
@@ -4049,8 +3853,9 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
static void mv88e6xxx_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
- struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ mv88e6xxx_phy_destroy(chip);
mv88e6xxx_unregister_switch(chip);
mv88e6xxx_mdio_unregister(chip);
}
@@ -4076,7 +3881,7 @@ static struct mdio_driver mv88e6xxx_driver = {
static int __init mv88e6xxx_init(void)
{
- register_switch_driver(&mv88e6xxx_switch_driver);
+ register_switch_driver(&mv88e6xxx_switch_ops);
return mdio_driver_register(&mv88e6xxx_driver);
}
module_init(mv88e6xxx_init);
@@ -4084,7 +3889,7 @@ module_init(mv88e6xxx_init);
static void __exit mv88e6xxx_cleanup(void)
{
mdio_driver_unregister(&mv88e6xxx_driver);
- unregister_switch_driver(&mv88e6xxx_switch_driver);
+ unregister_switch_driver(&mv88e6xxx_switch_ops);
}
module_exit(mv88e6xxx_cleanup);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
new file mode 100644
index 000000000000..d358720b6c2d
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -0,0 +1,34 @@
+/*
+ * Marvell 88E6xxx Switch Global (1) Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "mv88e6xxx.h"
+#include "global1.h"
+
+int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
+{
+ int addr = chip->info->global1_addr;
+
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
+{
+ int addr = chip->info->global1_addr;
+
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+{
+ return mv88e6xxx_wait(chip, chip->info->global1_addr, reg, mask);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
new file mode 100644
index 000000000000..62291e6fe3a3
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -0,0 +1,23 @@
+/*
+ * Marvell 88E6xxx Switch Global (1) Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_GLOBAL1_H
+#define _MV88E6XXX_GLOBAL1_H
+
+#include "mv88e6xxx.h"
+
+int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
+int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
+int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask);
+
+#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
new file mode 100644
index 000000000000..cf686e7506a9
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -0,0 +1,491 @@
+/*
+ * Marvell 88E6xxx Switch Global 2 Registers support (device address 0x1C)
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "mv88e6xxx.h"
+#include "global2.h"
+
+#define ADDR_GLOBAL2 0x1c
+
+static int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
+{
+ return mv88e6xxx_read(chip, ADDR_GLOBAL2, reg, val);
+}
+
+static int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
+{
+ return mv88e6xxx_write(chip, ADDR_GLOBAL2, reg, val);
+}
+
+static int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
+{
+ return mv88e6xxx_update(chip, ADDR_GLOBAL2, reg, update);
+}
+
+static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+{
+ return mv88e6xxx_wait(chip, ADDR_GLOBAL2, reg, mask);
+}
+
+/* Offset 0x06: Device Mapping Table register */
+
+static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
+ int target, int port)
+{
+ u16 val = (target << 8) | (port & 0xf);
+
+ return mv88e6xxx_g2_update(chip, GLOBAL2_DEVICE_MAPPING, val);
+}
+
+static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
+{
+ int target, port;
+ int err;
+
+ /* Initialize the routing port to the 32 possible target devices */
+ for (target = 0; target < 32; ++target) {
+ port = 0xf;
+
+ if (target < DSA_MAX_SWITCHES) {
+ port = chip->ds->rtable[target];
+ if (port == DSA_RTABLE_NONE)
+ port = 0xf;
+ }
+
+ err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* Offset 0x07: Trunk Mask Table register */
+
+static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
+ bool hask, u16 mask)
+{
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
+ u16 val = (num << 12) | (mask & port_mask);
+
+ if (hask)
+ val |= GLOBAL2_TRUNK_MASK_HASK;
+
+ return mv88e6xxx_g2_update(chip, GLOBAL2_TRUNK_MASK, val);
+}
+
+/* Offset 0x08: Trunk Mapping Table register */
+
+static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
+ u16 map)
+{
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
+ u16 val = (id << 11) | (map & port_mask);
+
+ return mv88e6xxx_g2_update(chip, GLOBAL2_TRUNK_MAPPING, val);
+}
+
+static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip)
+{
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
+ int i, err;
+
+ /* Clear all eight possible Trunk Mask vectors */
+ for (i = 0; i < 8; ++i) {
+ err = mv88e6xxx_g2_trunk_mask_write(chip, i, false, port_mask);
+ if (err)
+ return err;
+ }
+
+ /* Clear all sixteen possible Trunk ID routing vectors */
+ for (i = 0; i < 16; ++i) {
+ err = mv88e6xxx_g2_trunk_mapping_write(chip, i, 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Offset 0x09: Ingress Rate Command register
+ * Offset 0x0A: Ingress Rate Data register
+ */
+
+static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip)
+{
+ int port, err;
+
+ /* Init all Ingress Rate Limit resources of all ports */
+ for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
+ /* XXX newer chips (like 88E6390) have different 2-bit ops */
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_IRL_CMD,
+ GLOBAL2_IRL_CMD_OP_INIT_ALL |
+ (port << 8));
+ if (err)
+ break;
+
+ /* Wait for the operation to complete */
+ err = mv88e6xxx_g2_wait(chip, GLOBAL2_IRL_CMD,
+ GLOBAL2_IRL_CMD_BUSY);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* Offset 0x0D: Switch MAC/WoL/WoF register */
+
+static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip,
+ unsigned int pointer, u8 data)
+{
+ u16 val = (pointer << 8) | data;
+
+ return mv88e6xxx_g2_update(chip, GLOBAL2_SWITCH_MAC, val);
+}
+
+int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+{
+ int i, err;
+
+ for (i = 0; i < 6; i++) {
+ err = mv88e6xxx_g2_switch_mac_write(chip, i, addr[i]);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* Offset 0x0F: Priority Override Table */
+
+static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
+ u8 data)
+{
+ u16 val = (pointer << 8) | (data & 0x7);
+
+ return mv88e6xxx_g2_update(chip, GLOBAL2_PRIO_OVERRIDE, val);
+}
+
+static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
+{
+ int i, err;
+
+ /* Clear all sixteen possible Priority Override entries */
+ for (i = 0; i < 16; i++) {
+ err = mv88e6xxx_g2_pot_write(chip, i, 0);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* Offset 0x14: EEPROM Command
+ * Offset 0x15: EEPROM Data
+ */
+
+static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_wait(chip, GLOBAL2_EEPROM_CMD,
+ GLOBAL2_EEPROM_CMD_BUSY |
+ GLOBAL2_EEPROM_CMD_RUNNING);
+}
+
+static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
+{
+ int err;
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_CMD, cmd);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_eeprom_wait(chip);
+}
+
+static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
+ u8 addr, u16 *data)
+{
+ u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ | addr;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_eeprom_cmd(chip, cmd);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_DATA, data);
+}
+
+static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
+ u8 addr, u16 data)
+{
+ u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | addr;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_DATA, data);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_eeprom_cmd(chip, cmd);
+}
+
+int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ u16 val;
+ int err;
+
+ eeprom->len = 0;
+
+ if (offset & 1) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ *data++ = (val >> 8) & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ *data++ = val & 0xff;
+ *data++ = (val >> 8) & 0xff;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ *data++ = val & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ u16 val;
+ int err;
+
+ /* Ensure the RO WriteEn bit is set */
+ err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &val);
+ if (err)
+ return err;
+
+ if (!(val & GLOBAL2_EEPROM_CMD_WRITE_EN))
+ return -EROFS;
+
+ eeprom->len = 0;
+
+ if (offset & 1) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ val = (*data++ << 8) | (val & 0xff);
+
+ err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
+ if (err)
+ return err;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ val = *data++;
+ val |= *data++ << 8;
+
+ err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
+ if (err)
+ return err;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ val = (val & 0xff00) | *data++;
+
+ err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
+ if (err)
+ return err;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+/* Offset 0x18: SMI PHY Command Register
+ * Offset 0x19: SMI PHY Data Register
+ */
+
+static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_wait(chip, GLOBAL2_SMI_PHY_CMD,
+ GLOBAL2_SMI_PHY_CMD_BUSY);
+}
+
+static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
+{
+ int err;
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_CMD, cmd);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_wait(chip);
+}
+
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 *val)
+{
+ u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
+}
+
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 val)
+{
+ u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, val);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+}
+
+int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
+{
+ u16 reg;
+ int err;
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) {
+ /* Consider the frames with reserved multicast destination
+ * addresses matching 01:80:c2:00:00:2x as MGMT.
+ */
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_2X, 0xffff);
+ if (err)
+ return err;
+ }
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X)) {
+ /* Consider the frames with reserved multicast destination
+ * addresses matching 01:80:c2:00:00:0x as MGMT.
+ */
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_0X, 0xffff);
+ if (err)
+ return err;
+ }
+
+ /* Ignore removed tag data on doubly tagged packets, disable
+ * flow control messages, force flow control priority to the
+ * highest, and send all special multicast frames to the CPU
+ * port at the highest priority.
+ */
+ reg = GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI | (0x7 << 4);
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) ||
+ mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X))
+ reg |= GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x7;
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_SWITCH_MGMT, reg);
+ if (err)
+ return err;
+
+ /* Program the DSA routing table. */
+ err = mv88e6xxx_g2_set_device_mapping(chip);
+ if (err)
+ return err;
+
+ /* Clear all trunk masks and mapping. */
+ err = mv88e6xxx_g2_clear_trunk(chip);
+ if (err)
+ return err;
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_IRL)) {
+ /* Disable ingress rate limiting by resetting all per port
+ * ingress rate limit resources to their initial state.
+ */
+ err = mv88e6xxx_g2_clear_irl(chip);
+ if (err)
+ return err;
+ }
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_PVT)) {
+ /* Initialize Cross-chip Port VLAN Table to reset defaults */
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_ADDR,
+ GLOBAL2_PVT_ADDR_OP_INIT_ONES);
+ if (err)
+ return err;
+ }
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) {
+ /* Clear the priority override table. */
+ err = mv88e6xxx_g2_clear_pot(chip);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
new file mode 100644
index 000000000000..c4bb9035ee3a
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -0,0 +1,88 @@
+/*
+ * Marvell 88E6xxx Switch Global 2 Registers support (device address 0x1C)
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_GLOBAL2_H
+#define _MV88E6XXX_GLOBAL2_H
+
+#include "mv88e6xxx.h"
+
+#ifdef CONFIG_NET_DSA_MV88E6XXX_GLOBAL2
+
+static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
+{
+ return 0;
+}
+
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 *val);
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 val);
+int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip);
+
+#else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
+
+static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
+{
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_GLOBAL2)) {
+ dev_err(chip->dev, "this chip requires CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static inline int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip,
+ u8 *addr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
+
+#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index 48d6ea77f9bd..e572121c196e 100644
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -30,11 +30,13 @@
#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
#define SMI_DATA 0x01
-/* Fiber/SERDES Registers are located at SMI address F, page 1 */
-#define REG_FIBER_SERDES 0x0f
-#define PAGE_FIBER_SERDES 0x01
+/* PHY Registers */
+#define PHY_PAGE 0x16
+#define PHY_PAGE_COPPER 0x00
+
+#define ADDR_SERDES 0x0f
+#define SERDES_PAGE_FIBER 0x01
-#define REG_PORT(p) (0x10 + (p))
#define PORT_STATUS 0x00
#define PORT_STATUS_PAUSE_EN BIT(15)
#define PORT_STATUS_MY_PAUSE BIT(14)
@@ -157,7 +159,6 @@
#define PORT_TAG_REGMAP_0123 0x18
#define PORT_TAG_REGMAP_4567 0x19
-#define REG_GLOBAL 0x1b
#define GLOBAL_STATUS 0x00
#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */
/* Two bits for 6165, 6185 etc */
@@ -169,8 +170,8 @@
#define GLOBAL_MAC_01 0x01
#define GLOBAL_MAC_23 0x02
#define GLOBAL_MAC_45 0x03
-#define GLOBAL_ATU_FID 0x01 /* 6097 6165 6351 6352 */
-#define GLOBAL_VTU_FID 0x02 /* 6097 6165 6351 6352 */
+#define GLOBAL_ATU_FID 0x01
+#define GLOBAL_VTU_FID 0x02
#define GLOBAL_VTU_FID_MASK 0xfff
#define GLOBAL_VTU_SID 0x03 /* 6097 6165 6351 6352 */
#define GLOBAL_VTU_SID_MASK 0x3f
@@ -275,7 +276,6 @@
#define GLOBAL_STATS_COUNTER_32 0x1e
#define GLOBAL_STATS_COUNTER_01 0x1f
-#define REG_GLOBAL2 0x1c
#define GLOBAL2_INT_SOURCE 0x00
#define GLOBAL2_INT_MASK 0x01
#define GLOBAL2_MGMT_EN_2X 0x02
@@ -329,17 +329,16 @@
#define GLOBAL2_EEPROM_DATA 0x15
#define GLOBAL2_PTP_AVB_OP 0x16
#define GLOBAL2_PTP_AVB_DATA 0x17
-#define GLOBAL2_SMI_OP 0x18
-#define GLOBAL2_SMI_OP_BUSY BIT(15)
-#define GLOBAL2_SMI_OP_CLAUSE_22 BIT(12)
-#define GLOBAL2_SMI_OP_22_WRITE ((1 << 10) | GLOBAL2_SMI_OP_BUSY | \
- GLOBAL2_SMI_OP_CLAUSE_22)
-#define GLOBAL2_SMI_OP_22_READ ((2 << 10) | GLOBAL2_SMI_OP_BUSY | \
- GLOBAL2_SMI_OP_CLAUSE_22)
-#define GLOBAL2_SMI_OP_45_WRITE_ADDR ((0 << 10) | GLOBAL2_SMI_OP_BUSY)
-#define GLOBAL2_SMI_OP_45_WRITE_DATA ((1 << 10) | GLOBAL2_SMI_OP_BUSY)
-#define GLOBAL2_SMI_OP_45_READ_DATA ((2 << 10) | GLOBAL2_SMI_OP_BUSY)
-#define GLOBAL2_SMI_DATA 0x19
+#define GLOBAL2_SMI_PHY_CMD 0x18
+#define GLOBAL2_SMI_PHY_CMD_BUSY BIT(15)
+#define GLOBAL2_SMI_PHY_CMD_MODE_22 BIT(12)
+#define GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA ((0x1 << 10) | \
+ GLOBAL2_SMI_PHY_CMD_MODE_22 | \
+ GLOBAL2_SMI_PHY_CMD_BUSY)
+#define GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA ((0x2 << 10) | \
+ GLOBAL2_SMI_PHY_CMD_MODE_22 | \
+ GLOBAL2_SMI_PHY_CMD_BUSY)
+#define GLOBAL2_SMI_PHY_DATA 0x19
#define GLOBAL2_SCRATCH_MISC 0x1a
#define GLOBAL2_SCRATCH_BUSY BIT(15)
#define GLOBAL2_SCRATCH_REGISTER_SHIFT 8
@@ -384,10 +383,36 @@ enum mv88e6xxx_family {
};
enum mv88e6xxx_cap {
+ /* Two different tag protocols can be used by the driver. All
+ * switches support DSA, but only later generations support
+ * EDSA.
+ */
+ MV88E6XXX_CAP_EDSA,
+
/* Energy Efficient Ethernet.
*/
MV88E6XXX_CAP_EEE,
+ /* Multi-chip Addressing Mode.
+ * Some chips respond to only 2 registers of its own SMI device address
+ * when it is non-zero, and use indirect access to internal registers.
+ */
+ MV88E6XXX_CAP_SMI_CMD, /* (0x00) SMI Command */
+ MV88E6XXX_CAP_SMI_DATA, /* (0x01) SMI Data */
+
+ /* PHY Registers.
+ */
+ MV88E6XXX_CAP_PHY_PAGE, /* (0x16) Page Register */
+
+ /* Fiber/SERDES Registers (SMI address F).
+ */
+ MV88E6XXX_CAP_SERDES,
+
+ /* Switch Global (1) Registers.
+ */
+ MV88E6XXX_CAP_G1_ATU_FID, /* (0x01) ATU FID Register */
+ MV88E6XXX_CAP_G1_VTU_FID, /* (0x02) VTU FID Register */
+
/* Switch Global 2 Registers.
* The device contains a second set of global 16-bit registers.
*/
@@ -398,16 +423,7 @@ enum mv88e6xxx_cap {
MV88E6XXX_CAP_G2_IRL_DATA, /* (0x0a) Ingress Rate Data */
MV88E6XXX_CAP_G2_PVT_ADDR, /* (0x0b) Cross Chip Port VLAN Addr */
MV88E6XXX_CAP_G2_PVT_DATA, /* (0x0c) Cross Chip Port VLAN Data */
- MV88E6XXX_CAP_G2_SWITCH_MAC, /* (0x0d) Switch MAC/WoL/WoF */
MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */
- MV88E6XXX_CAP_G2_EEPROM_CMD, /* (0x14) EEPROM Command */
- MV88E6XXX_CAP_G2_EEPROM_DATA, /* (0x15) EEPROM Data */
-
- /* Multi-chip Addressing Mode.
- * Some chips require an indirect SMI access when their SMI device
- * address is not zero. See SMI_CMD and SMI_DATA.
- */
- MV88E6XXX_CAP_MULTI_CHIP,
/* PHY Polling Unit.
* See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING.
@@ -415,12 +431,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_CAP_PPU,
MV88E6XXX_CAP_PPU_ACTIVE,
- /* SMI PHY Command and Data registers.
- * This requires an indirect access to PHY registers through
- * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done.
- */
- MV88E6XXX_CAP_SMI_PHY,
-
/* Per VLAN Spanning Tree Unit (STU).
* The Port State database, if present, is accessed through VTU
* operations and dedicated SID registers. See GLOBAL_VTU_SID.
@@ -440,130 +450,148 @@ enum mv88e6xxx_cap {
};
/* Bitmask of capabilities */
-#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE)
-#define MV88E6XXX_FLAG_GLOBAL2 BIT(MV88E6XXX_CAP_GLOBAL2)
-#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT(MV88E6XXX_CAP_G2_MGMT_EN_2X)
-#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT(MV88E6XXX_CAP_G2_MGMT_EN_0X)
-#define MV88E6XXX_FLAG_G2_IRL_CMD BIT(MV88E6XXX_CAP_G2_IRL_CMD)
-#define MV88E6XXX_FLAG_G2_IRL_DATA BIT(MV88E6XXX_CAP_G2_IRL_DATA)
-#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT(MV88E6XXX_CAP_G2_PVT_ADDR)
-#define MV88E6XXX_FLAG_G2_PVT_DATA BIT(MV88E6XXX_CAP_G2_PVT_DATA)
-#define MV88E6XXX_FLAG_G2_SWITCH_MAC BIT(MV88E6XXX_CAP_G2_SWITCH_MAC)
-#define MV88E6XXX_FLAG_G2_POT BIT(MV88E6XXX_CAP_G2_POT)
-#define MV88E6XXX_FLAG_G2_EEPROM_CMD BIT(MV88E6XXX_CAP_G2_EEPROM_CMD)
-#define MV88E6XXX_FLAG_G2_EEPROM_DATA BIT(MV88E6XXX_CAP_G2_EEPROM_DATA)
-#define MV88E6XXX_FLAG_MULTI_CHIP BIT(MV88E6XXX_CAP_MULTI_CHIP)
-#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU)
-#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE)
-#define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY)
-#define MV88E6XXX_FLAG_STU BIT(MV88E6XXX_CAP_STU)
-#define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP)
-#define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT)
-#define MV88E6XXX_FLAG_VTU BIT(MV88E6XXX_CAP_VTU)
-
-/* EEPROM Programming via Global2 with 16-bit data */
-#define MV88E6XXX_FLAGS_EEPROM16 \
- (MV88E6XXX_FLAG_G2_EEPROM_CMD | \
- MV88E6XXX_FLAG_G2_EEPROM_DATA)
+#define MV88E6XXX_FLAG_EDSA BIT_ULL(MV88E6XXX_CAP_EDSA)
+#define MV88E6XXX_FLAG_EEE BIT_ULL(MV88E6XXX_CAP_EEE)
+
+#define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD)
+#define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA)
+
+#define MV88E6XXX_FLAG_PHY_PAGE BIT_ULL(MV88E6XXX_CAP_PHY_PAGE)
+
+#define MV88E6XXX_FLAG_SERDES BIT_ULL(MV88E6XXX_CAP_SERDES)
+
+#define MV88E6XXX_FLAG_G1_ATU_FID BIT_ULL(MV88E6XXX_CAP_G1_ATU_FID)
+#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID)
+
+#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
+#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X)
+#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X)
+#define MV88E6XXX_FLAG_G2_IRL_CMD BIT_ULL(MV88E6XXX_CAP_G2_IRL_CMD)
+#define MV88E6XXX_FLAG_G2_IRL_DATA BIT_ULL(MV88E6XXX_CAP_G2_IRL_DATA)
+#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT_ULL(MV88E6XXX_CAP_G2_PVT_ADDR)
+#define MV88E6XXX_FLAG_G2_PVT_DATA BIT_ULL(MV88E6XXX_CAP_G2_PVT_DATA)
+#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
+
+#define MV88E6XXX_FLAG_PPU BIT_ULL(MV88E6XXX_CAP_PPU)
+#define MV88E6XXX_FLAG_PPU_ACTIVE BIT_ULL(MV88E6XXX_CAP_PPU_ACTIVE)
+#define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU)
+#define MV88E6XXX_FLAG_TEMP BIT_ULL(MV88E6XXX_CAP_TEMP)
+#define MV88E6XXX_FLAG_TEMP_LIMIT BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT)
+#define MV88E6XXX_FLAG_VTU BIT_ULL(MV88E6XXX_CAP_VTU)
/* Ingress Rate Limit unit */
#define MV88E6XXX_FLAGS_IRL \
(MV88E6XXX_FLAG_G2_IRL_CMD | \
MV88E6XXX_FLAG_G2_IRL_DATA)
+/* Multi-chip Addressing Mode */
+#define MV88E6XXX_FLAGS_MULTI_CHIP \
+ (MV88E6XXX_FLAG_SMI_CMD | \
+ MV88E6XXX_FLAG_SMI_DATA)
+
/* Cross-chip Port VLAN Table */
#define MV88E6XXX_FLAGS_PVT \
(MV88E6XXX_FLAG_G2_PVT_ADDR | \
MV88E6XXX_FLAG_G2_PVT_DATA)
+/* Fiber/SERDES Registers at SMI address F, page 1 */
+#define MV88E6XXX_FLAGS_SERDES \
+ (MV88E6XXX_FLAG_PHY_PAGE | \
+ MV88E6XXX_FLAG_SERDES)
+
#define MV88E6XXX_FLAGS_FAMILY_6095 \
(MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_PPU | \
- MV88E6XXX_FLAG_VTU)
+ MV88E6XXX_FLAG_VTU | \
+ MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6097 \
- (MV88E6XXX_FLAG_GLOBAL2 | \
+ (MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_PPU | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_MULTI_CHIP | \
MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6165 \
- (MV88E6XXX_FLAG_GLOBAL2 | \
+ (MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_MULTI_CHIP | \
MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6185 \
(MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_MULTI_CHIP | \
+ MV88E6XXX_FLAGS_MULTI_CHIP | \
MV88E6XXX_FLAG_PPU | \
MV88E6XXX_FLAG_VTU)
#define MV88E6XXX_FLAGS_FAMILY_6320 \
- (MV88E6XXX_FLAG_EEE | \
+ (MV88E6XXX_FLAG_EDSA | \
+ MV88E6XXX_FLAG_EEE | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
- MV88E6XXX_FLAG_SMI_PHY | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
- MV88E6XXX_FLAGS_EEPROM16 | \
MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_MULTI_CHIP | \
MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6351 \
- (MV88E6XXX_FLAG_GLOBAL2 | \
+ (MV88E6XXX_FLAG_EDSA | \
+ MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
- MV88E6XXX_FLAG_SMI_PHY | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_MULTI_CHIP | \
MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6352 \
- (MV88E6XXX_FLAG_EEE | \
+ (MV88E6XXX_FLAG_EDSA | \
+ MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_MULTI_CHIP | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
- MV88E6XXX_FLAG_SMI_PHY | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
- MV88E6XXX_FLAGS_EEPROM16 | \
MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_PVT)
+ MV88E6XXX_FLAGS_MULTI_CHIP | \
+ MV88E6XXX_FLAGS_PVT | \
+ MV88E6XXX_FLAGS_SERDES)
+
+struct mv88e6xxx_ops;
struct mv88e6xxx_info {
enum mv88e6xxx_family family;
@@ -572,8 +600,10 @@ struct mv88e6xxx_info {
unsigned int num_databases;
unsigned int num_ports;
unsigned int port_base_addr;
+ unsigned int global1_addr;
unsigned int age_time_coeff;
- unsigned long flags;
+ unsigned long long flags;
+ const struct mv88e6xxx_ops *ops;
};
struct mv88e6xxx_atu_entry {
@@ -584,18 +614,15 @@ struct mv88e6xxx_atu_entry {
u8 mac[ETH_ALEN];
};
-struct mv88e6xxx_vtu_stu_entry {
- /* VTU only */
+struct mv88e6xxx_vtu_entry {
u16 vid;
u16 fid;
-
- /* VTU and STU */
u8 sid;
bool valid;
u8 data[DSA_MAX_PORTS];
};
-struct mv88e6xxx_ops;
+struct mv88e6xxx_bus_ops;
struct mv88e6xxx_priv_port {
struct net_device *bridge_dev;
@@ -616,13 +643,14 @@ struct mv88e6xxx_chip {
/* The MII bus and the address on the bus that is used to
* communication with the switch
*/
- const struct mv88e6xxx_ops *smi_ops;
+ const struct mv88e6xxx_bus_ops *smi_ops;
struct mii_bus *bus;
int sw_addr;
/* Handles automatic disabling and re-enabling of the PHY
* polling unit.
*/
+ const struct mv88e6xxx_bus_ops *phy_ops;
struct mutex ppu_mutex;
int ppu_disabled;
struct work_struct ppu_work;
@@ -651,11 +679,25 @@ struct mv88e6xxx_chip {
struct mii_bus *mdio_bus;
};
-struct mv88e6xxx_ops {
+struct mv88e6xxx_bus_ops {
int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
};
+struct mv88e6xxx_ops {
+ int (*get_eeprom)(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+ int (*set_eeprom)(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+ int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr);
+
+ int (*phy_read)(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 *val);
+ int (*phy_write)(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 val);
+};
+
enum stat_type {
BANK0,
BANK1,
@@ -675,4 +717,20 @@ static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip,
return (chip->info->flags & flags) == flags;
}
+static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_databases;
+}
+
+static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_ports;
+}
+
+int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
+int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 update);
+int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask);
+
#endif
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
new file mode 100644
index 000000000000..b3df70d07ff6
--- /dev/null
+++ b/drivers/net/dsa/qca8k.c
@@ -0,0 +1,1040 @@
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/if_bridge.h>
+#include <linux/mdio.h>
+#include <linux/etherdevice.h>
+
+#include "qca8k.h"
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+static const struct qca8k_mib_desc ar8327_mib[] = {
+ MIB_DESC(1, 0x00, "RxBroad"),
+ MIB_DESC(1, 0x04, "RxPause"),
+ MIB_DESC(1, 0x08, "RxMulti"),
+ MIB_DESC(1, 0x0c, "RxFcsErr"),
+ MIB_DESC(1, 0x10, "RxAlignErr"),
+ MIB_DESC(1, 0x14, "RxRunt"),
+ MIB_DESC(1, 0x18, "RxFragment"),
+ MIB_DESC(1, 0x1c, "Rx64Byte"),
+ MIB_DESC(1, 0x20, "Rx128Byte"),
+ MIB_DESC(1, 0x24, "Rx256Byte"),
+ MIB_DESC(1, 0x28, "Rx512Byte"),
+ MIB_DESC(1, 0x2c, "Rx1024Byte"),
+ MIB_DESC(1, 0x30, "Rx1518Byte"),
+ MIB_DESC(1, 0x34, "RxMaxByte"),
+ MIB_DESC(1, 0x38, "RxTooLong"),
+ MIB_DESC(2, 0x3c, "RxGoodByte"),
+ MIB_DESC(2, 0x44, "RxBadByte"),
+ MIB_DESC(1, 0x4c, "RxOverFlow"),
+ MIB_DESC(1, 0x50, "Filtered"),
+ MIB_DESC(1, 0x54, "TxBroad"),
+ MIB_DESC(1, 0x58, "TxPause"),
+ MIB_DESC(1, 0x5c, "TxMulti"),
+ MIB_DESC(1, 0x60, "TxUnderRun"),
+ MIB_DESC(1, 0x64, "Tx64Byte"),
+ MIB_DESC(1, 0x68, "Tx128Byte"),
+ MIB_DESC(1, 0x6c, "Tx256Byte"),
+ MIB_DESC(1, 0x70, "Tx512Byte"),
+ MIB_DESC(1, 0x74, "Tx1024Byte"),
+ MIB_DESC(1, 0x78, "Tx1518Byte"),
+ MIB_DESC(1, 0x7c, "TxMaxByte"),
+ MIB_DESC(1, 0x80, "TxOverSize"),
+ MIB_DESC(2, 0x84, "TxByte"),
+ MIB_DESC(1, 0x8c, "TxCollision"),
+ MIB_DESC(1, 0x90, "TxAbortCol"),
+ MIB_DESC(1, 0x94, "TxMultiCol"),
+ MIB_DESC(1, 0x98, "TxSingleCol"),
+ MIB_DESC(1, 0x9c, "TxExcDefer"),
+ MIB_DESC(1, 0xa0, "TxDefer"),
+ MIB_DESC(1, 0xa4, "TxLateCol"),
+};
+
+/* The 32bit switch registers are accessed indirectly. To achieve this we need
+ * to set the page of the register. Track the last page that was set to reduce
+ * mdio writes
+ */
+static u16 qca8k_current_page = 0xffff;
+
+static void
+qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
+{
+ regaddr >>= 1;
+ *r1 = regaddr & 0x1e;
+
+ regaddr >>= 5;
+ *r2 = regaddr & 0x7;
+
+ regaddr >>= 3;
+ *page = regaddr & 0x3ff;
+}
+
+static u32
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum)
+{
+ u32 val;
+ int ret;
+
+ ret = bus->read(bus, phy_id, regnum);
+ if (ret >= 0) {
+ val = ret;
+ ret = bus->read(bus, phy_id, regnum + 1);
+ val |= ret << 16;
+ }
+
+ if (ret < 0) {
+ dev_err_ratelimited(&bus->dev,
+ "failed to read qca8k 32bit register\n");
+ return ret;
+ }
+
+ return val;
+}
+
+static void
+qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+{
+ u16 lo, hi;
+ int ret;
+
+ lo = val & 0xffff;
+ hi = (u16)(val >> 16);
+
+ ret = bus->write(bus, phy_id, regnum, lo);
+ if (ret >= 0)
+ ret = bus->write(bus, phy_id, regnum + 1, hi);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit register\n");
+}
+
+static void
+qca8k_set_page(struct mii_bus *bus, u16 page)
+{
+ if (page == qca8k_current_page)
+ return;
+
+ if (bus->write(bus, 0x18, 0, page) < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to set qca8k page\n");
+ qca8k_current_page = page;
+}
+
+static u32
+qca8k_read(struct qca8k_priv *priv, u32 reg)
+{
+ u16 r1, r2, page;
+ u32 val;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ qca8k_set_page(priv->bus, page);
+ val = qca8k_mii_read32(priv->bus, 0x10 | r2, r1);
+
+ mutex_unlock(&priv->bus->mdio_lock);
+
+ return val;
+}
+
+static void
+qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ u16 r1, r2, page;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ qca8k_set_page(priv->bus, page);
+ qca8k_mii_write32(priv->bus, 0x10 | r2, r1, val);
+
+ mutex_unlock(&priv->bus->mdio_lock);
+}
+
+static u32
+qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 val)
+{
+ u16 r1, r2, page;
+ u32 ret;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ qca8k_set_page(priv->bus, page);
+ ret = qca8k_mii_read32(priv->bus, 0x10 | r2, r1);
+ ret &= ~mask;
+ ret |= val;
+ qca8k_mii_write32(priv->bus, 0x10 | r2, r1, ret);
+
+ mutex_unlock(&priv->bus->mdio_lock);
+
+ return ret;
+}
+
+static void
+qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ qca8k_rmw(priv, reg, 0, val);
+}
+
+static void
+qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ qca8k_rmw(priv, reg, val, 0);
+}
+
+static int
+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+
+ *val = qca8k_read(priv, reg);
+
+ return 0;
+}
+
+static int
+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+
+ qca8k_write(priv, reg, val);
+
+ return 0;
+}
+
+static const struct regmap_range qca8k_readable_ranges[] = {
+ regmap_reg_range(0x0000, 0x00e4), /* Global control */
+ regmap_reg_range(0x0100, 0x0168), /* EEE control */
+ regmap_reg_range(0x0200, 0x0270), /* Parser control */
+ regmap_reg_range(0x0400, 0x0454), /* ACL */
+ regmap_reg_range(0x0600, 0x0718), /* Lookup */
+ regmap_reg_range(0x0800, 0x0b70), /* QM */
+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */
+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */
+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+
+};
+
+static struct regmap_access_table qca8k_readable_table = {
+ .yes_ranges = qca8k_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
+};
+
+static struct regmap_config qca8k_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x16ac, /* end MIB - Port6 range */
+ .reg_read = qca8k_regmap_read,
+ .reg_write = qca8k_regmap_write,
+ .rd_table = &qca8k_readable_table,
+};
+
+static int
+qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(20);
+
+ /* loop until the busy flag has cleared */
+ do {
+ u32 val = qca8k_read(priv, reg);
+ int busy = val & mask;
+
+ if (!busy)
+ break;
+ cond_resched();
+ } while (!time_after_eq(jiffies, timeout));
+
+ return time_after_eq(jiffies, timeout);
+}
+
+static void
+qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
+{
+ u32 reg[4];
+ int i;
+
+ /* load the ARL table into an array */
+ for (i = 0; i < 4; i++)
+ reg[i] = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4));
+
+ /* vid - 83:72 */
+ fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
+ /* aging - 67:64 */
+ fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
+ /* portmask - 54:48 */
+ fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
+ /* mac - 47:0 */
+ fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
+ fdb->mac[1] = reg[1] & 0xff;
+ fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
+ fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
+ fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
+ fdb->mac[5] = reg[0] & 0xff;
+}
+
+static void
+qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
+ u8 aging)
+{
+ u32 reg[3] = { 0 };
+ int i;
+
+ /* vid - 83:72 */
+ reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
+ /* aging - 67:64 */
+ reg[2] |= aging & QCA8K_ATU_STATUS_M;
+ /* portmask - 54:48 */
+ reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
+ /* mac - 47:0 */
+ reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
+ reg[1] |= mac[1];
+ reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
+ reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
+ reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
+ reg[0] |= mac[5];
+
+ /* load the array into the ARL table */
+ for (i = 0; i < 3; i++)
+ qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
+}
+
+static int
+qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
+{
+ u32 reg;
+
+ /* Set the command and FDB index */
+ reg = QCA8K_ATU_FUNC_BUSY;
+ reg |= cmd;
+ if (port >= 0) {
+ reg |= QCA8K_ATU_FUNC_PORT_EN;
+ reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
+ }
+
+ /* Write the function register triggering the table access */
+ qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+
+ /* wait for completion */
+ if (qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY))
+ return -1;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_FDB_LOAD) {
+ reg = qca8k_read(priv, QCA8K_REG_ATU_FUNC);
+ if (reg & QCA8K_ATU_FUNC_FULL)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
+{
+ int ret;
+
+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
+ if (ret >= 0)
+ qca8k_fdb_read(priv, fdb);
+
+ return ret;
+}
+
+static int
+qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
+ u16 vid, u8 aging)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static void
+qca8k_fdb_flush(struct qca8k_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static void
+qca8k_mib_init(struct qca8k_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+ qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
+ qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+ qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
+{
+ u32 reg;
+
+ switch (port) {
+ case 0:
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+ break;
+ case 6:
+ reg = QCA8K_REG_PORT6_PAD_CTRL;
+ break;
+ default:
+ pr_err("Can't set PAD_CTRL on port %d\n", port);
+ return -EINVAL;
+ }
+
+ /* Configure a port to be directly connected to an external
+ * PHY or MAC.
+ */
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ qca8k_write(priv, reg,
+ QCA8K_PORT_PAD_RGMII_EN |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY(3) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY(3));
+
+ /* According to the datasheet, RGMII delay is enabled through
+ * PORT5_PAD_CTRL for all ports, rather than individual port
+ * registers
+ */
+ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
+ break;
+ default:
+ pr_err("xMII mode %d not supported\n", mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
+{
+ u32 mask = QCA8K_PORT_STATUS_TXMAC;
+
+ /* Port 0 and 6 have no internal PHY */
+ if ((port > 0) && (port < 6))
+ mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+ if (enable)
+ qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
+ else
+ qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
+}
+
+static int
+qca8k_setup(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int ret, i, phy_mode = -1;
+
+ /* Make sure that port 0 is the cpu port */
+ if (!dsa_is_cpu_port(ds, 0)) {
+ pr_err("port 0 is not the CPU port\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&priv->reg_mutex);
+
+ /* Start by setting up the register mapping */
+ priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
+ &qca8k_regmap_config);
+ if (IS_ERR(priv->regmap))
+ pr_warn("regmap initialization failed");
+
+ /* Initialize CPU port pad mode (xMII type, delays...) */
+ phy_mode = of_get_phy_mode(ds->ports[ds->dst->cpu_port].dn);
+ if (phy_mode < 0) {
+ pr_err("Can't find phy-mode for master device\n");
+ return phy_mode;
+ }
+ ret = qca8k_set_pad_ctrl(priv, QCA8K_CPU_PORT, phy_mode);
+ if (ret < 0)
+ return ret;
+
+ /* Enable CPU Port */
+ qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1);
+ priv->port_sts[QCA8K_CPU_PORT].enabled = 1;
+
+ /* Enable MIB counters */
+ qca8k_mib_init(priv);
+
+ /* Enable QCA header mode on the cpu port */
+ qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+
+ /* Disable forwarding by default on all ports */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++)
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, 0);
+
+ /* Disable MAC by default on all user ports */
+ for (i = 1; i < QCA8K_NUM_PORTS; i++)
+ if (ds->enabled_port_mask & BIT(i))
+ qca8k_port_set_status(priv, i, 0);
+
+ /* Forward all unknown frames to CPU port for Linux processing */
+ qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+
+ /* Setup connection between CPU port & user ports */
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ /* CPU port gets connected to all user ports of the switch */
+ if (dsa_is_cpu_port(ds, i)) {
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ ds->enabled_port_mask);
+ }
+
+ /* Invividual user ports get connected to CPU port only */
+ if (ds->enabled_port_mask & BIT(i)) {
+ int shift = 16 * (i % 2);
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ BIT(QCA8K_CPU_PORT));
+
+ /* Enable ARP Auto-learning by default */
+ qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_LEARN);
+
+ /* For port based vlans to work we need to set the
+ * default egress vid
+ */
+ qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
+ 0xffff << shift, 1 << shift);
+ qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
+ QCA8K_PORT_VLAN_CVID(1) |
+ QCA8K_PORT_VLAN_SVID(1));
+ }
+ }
+
+ /* Flush the FDB table */
+ qca8k_fdb_flush(priv);
+
+ return 0;
+}
+
+static int
+qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ return mdiobus_read(priv->bus, phy, regnum);
+}
+
+static int
+qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ return mdiobus_write(priv->bus, phy, regnum, val);
+}
+
+static void
+qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static void
+qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ const struct qca8k_mib_desc *mib;
+ u32 reg, i;
+ u64 hi;
+
+ for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
+ mib = &ar8327_mib[i];
+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+ data[i] = qca8k_read(priv, reg);
+ if (mib->size == 2) {
+ hi = qca8k_read(priv, reg + 4);
+ data[i] |= hi << 32;
+ }
+ }
+}
+
+static int
+qca8k_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(ar8327_mib);
+}
+
+static void
+qca8k_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
+ u32 reg;
+
+ mutex_lock(&priv->reg_mutex);
+ reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL);
+ if (enable)
+ reg |= lpi_en;
+ else
+ reg &= ~lpi_en;
+ qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+qca8k_eee_init(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct ethtool_eee *p = &priv->port_sts[port].eee;
+ int ret;
+
+ p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
+
+ ret = phy_init_eee(phy, 0);
+ if (ret)
+ return ret;
+
+ qca8k_eee_enable_set(ds, port, true);
+
+ return 0;
+}
+
+static int
+qca8k_set_eee(struct dsa_switch *ds, int port,
+ struct phy_device *phydev,
+ struct ethtool_eee *e)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct ethtool_eee *p = &priv->port_sts[port].eee;
+ int ret = 0;
+
+ p->eee_enabled = e->eee_enabled;
+
+ if (e->eee_enabled) {
+ p->eee_enabled = qca8k_eee_init(ds, port, phydev);
+ if (!p->eee_enabled)
+ ret = -EOPNOTSUPP;
+ }
+ qca8k_eee_enable_set(ds, port, p->eee_enabled);
+
+ return ret;
+}
+
+static int
+qca8k_get_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct ethtool_eee *p = &priv->port_sts[port].eee;
+ struct net_device *netdev = ds->ports[port].netdev;
+ int ret;
+
+ ret = phy_ethtool_get_eee(netdev->phydev, p);
+ if (!ret)
+ e->eee_active =
+ !!(p->supported & p->advertised & p->lp_advertised);
+ else
+ e->eee_active = 0;
+
+ e->eee_enabled = p->eee_enabled;
+
+ return ret;
+}
+
+static void
+qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LISTENING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
+ break;
+ }
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+}
+
+static int
+qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int port_mask = BIT(QCA8K_CPU_PORT);
+ int i;
+
+ priv->port_sts[port].bridge_dev = bridge;
+
+ for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ if (priv->port_sts[i].bridge_dev != bridge)
+ continue;
+ /* Add this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ qca8k_reg_set(priv,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ if (i != port)
+ port_mask |= BIT(i);
+ }
+ /* Add all other ports to this ports portvlan mask */
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return 0;
+}
+
+static void
+qca8k_port_bridge_leave(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int i;
+
+ for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ if (priv->port_sts[i].bridge_dev !=
+ priv->port_sts[port].bridge_dev)
+ continue;
+ /* Remove this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ qca8k_reg_clear(priv,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+ priv->port_sts[port].bridge_dev = NULL;
+ /* Set the cpu port to be the only one in the portvlan mask of
+ * this port
+ */
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT));
+}
+
+static int
+qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ qca8k_port_set_status(priv, port, 1);
+ priv->port_sts[port].enabled = 1;
+
+ return 0;
+}
+
+static void
+qca8k_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+ priv->port_sts[port].enabled = 0;
+}
+
+static int
+qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid)
+{
+ /* Set the vid to the port vlan id if no vid is set */
+ if (!vid)
+ vid = 1;
+
+ return qca8k_fdb_add(priv, addr, port_mask, vid,
+ QCA8K_ATU_STATUS_STATIC);
+}
+
+static int
+qca8k_port_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ /* The FDB table for static and auto learned entries is the same. We
+ * need to reserve an entry with no port_mask set to make sure that
+ * when port_fdb_add is called an entry is still available. Otherwise
+ * the last free entry might have been used up by auto learning
+ */
+ return qca8k_port_fdb_insert(priv, fdb->addr, 0, fdb->vid);
+}
+
+static void
+qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ /* Update the FDB entry adding the port_mask */
+ qca8k_port_fdb_insert(priv, fdb->addr, port_mask, fdb->vid);
+}
+
+static int
+qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+ u16 vid = fdb->vid;
+
+ if (!vid)
+ vid = 1;
+
+ return qca8k_fdb_del(priv, fdb->addr, port_mask, vid);
+}
+
+static int
+qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct qca8k_fdb _fdb = { 0 };
+ int cnt = QCA8K_NUM_FDB_RECORDS;
+ int ret = 0;
+
+ mutex_lock(&priv->reg_mutex);
+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
+ if (!_fdb.aging)
+ break;
+
+ ether_addr_copy(fdb->addr, _fdb.mac);
+ fdb->vid = _fdb.vid;
+ if (_fdb.aging == QCA8K_ATU_STATUS_STATIC)
+ fdb->ndm_state = NUD_NOARP;
+ else
+ fdb->ndm_state = NUD_REACHABLE;
+
+ ret = cb(&fdb->obj);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+qca8k_get_tag_protocol(struct dsa_switch *ds)
+{
+ return DSA_TAG_PROTO_QCA;
+}
+
+static struct dsa_switch_ops qca8k_switch_ops = {
+ .get_tag_protocol = qca8k_get_tag_protocol,
+ .setup = qca8k_setup,
+ .get_strings = qca8k_get_strings,
+ .phy_read = qca8k_phy_read,
+ .phy_write = qca8k_phy_write,
+ .get_ethtool_stats = qca8k_get_ethtool_stats,
+ .get_sset_count = qca8k_get_sset_count,
+ .get_eee = qca8k_get_eee,
+ .set_eee = qca8k_set_eee,
+ .port_enable = qca8k_port_enable,
+ .port_disable = qca8k_port_disable,
+ .port_stp_state_set = qca8k_port_stp_state_set,
+ .port_bridge_join = qca8k_port_bridge_join,
+ .port_bridge_leave = qca8k_port_bridge_leave,
+ .port_fdb_prepare = qca8k_port_fdb_prepare,
+ .port_fdb_add = qca8k_port_fdb_add,
+ .port_fdb_del = qca8k_port_fdb_del,
+ .port_fdb_dump = qca8k_port_fdb_dump,
+};
+
+static int
+qca8k_sw_probe(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv;
+ u32 id;
+
+ /* allocate the private data struct so that we can probe the switches
+ * ID register
+ */
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = mdiodev->bus;
+
+ /* read the switches ID register */
+ id = qca8k_read(priv, QCA8K_REG_MASK_CTRL);
+ id >>= QCA8K_MASK_CTRL_ID_S;
+ id &= QCA8K_MASK_CTRL_ID_M;
+ if (id != QCA8K_ID_QCA8337)
+ return -ENODEV;
+
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->priv = priv;
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->ops = &qca8k_switch_ops;
+ mutex_init(&priv->reg_mutex);
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ return dsa_register_switch(priv->ds, priv->ds->dev->of_node);
+}
+
+static void
+qca8k_sw_remove(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ int i;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++)
+ qca8k_port_set_status(priv, i, 0);
+
+ dsa_unregister_switch(priv->ds);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void
+qca8k_set_pm(struct qca8k_priv *priv, int enable)
+{
+ int i;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (!priv->port_sts[i].enabled)
+ continue;
+
+ qca8k_port_set_status(priv, i, enable);
+ }
+}
+
+static int qca8k_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct qca8k_priv *priv = platform_get_drvdata(pdev);
+
+ qca8k_set_pm(priv, 0);
+
+ return dsa_switch_suspend(priv->ds);
+}
+
+static int qca8k_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct qca8k_priv *priv = platform_get_drvdata(pdev);
+
+ qca8k_set_pm(priv, 1);
+
+ return dsa_switch_resume(priv->ds);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
+ qca8k_suspend, qca8k_resume);
+
+static const struct of_device_id qca8k_of_match[] = {
+ { .compatible = "qca,qca8337" },
+ { /* sentinel */ },
+};
+
+static struct mdio_driver qca8kmdio_driver = {
+ .probe = qca8k_sw_probe,
+ .remove = qca8k_sw_remove,
+ .mdiodrv.driver = {
+ .name = "qca8k",
+ .of_match_table = qca8k_of_match,
+ .pm = &qca8k_pm_ops,
+ },
+};
+
+mdio_module_driver(qca8kmdio_driver);
+
+MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qca8k");
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
new file mode 100644
index 000000000000..201464719531
--- /dev/null
+++ b/drivers/net/dsa/qca8k.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCA8K_H
+#define __QCA8K_H
+
+#include <linux/delay.h>
+#include <linux/regmap.h>
+
+#define QCA8K_NUM_PORTS 7
+
+#define PHY_ID_QCA8337 0x004dd036
+#define QCA8K_ID_QCA8337 0x13
+
+#define QCA8K_NUM_FDB_RECORDS 2048
+
+#define QCA8K_CPU_PORT 0
+
+/* Global control registers */
+#define QCA8K_REG_MASK_CTRL 0x000
+#define QCA8K_MASK_CTRL_ID_M 0xff
+#define QCA8K_MASK_CTRL_ID_S 8
+#define QCA8K_REG_PORT0_PAD_CTRL 0x004
+#define QCA8K_REG_PORT5_PAD_CTRL 0x008
+#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
+#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) \
+ ((0x8 + (x & 0x3)) << 22)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) \
+ ((0x10 + (x & 0x3)) << 20)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
+#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
+#define QCA8K_REG_MODULE_EN 0x030
+#define QCA8K_MODULE_EN_MIB BIT(0)
+#define QCA8K_REG_MIB 0x034
+#define QCA8K_MIB_FLUSH BIT(24)
+#define QCA8K_MIB_CPU_KEEP BIT(20)
+#define QCA8K_MIB_BUSY BIT(17)
+#define QCA8K_GOL_MAC_ADDR0 0x60
+#define QCA8K_GOL_MAC_ADDR1 0x64
+#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
+#define QCA8K_PORT_STATUS_SPEED GENMASK(2, 0)
+#define QCA8K_PORT_STATUS_SPEED_S 0
+#define QCA8K_PORT_STATUS_TXMAC BIT(2)
+#define QCA8K_PORT_STATUS_RXMAC BIT(3)
+#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
+#define QCA8K_PORT_STATUS_RXFLOW BIT(5)
+#define QCA8K_PORT_STATUS_DUPLEX BIT(6)
+#define QCA8K_PORT_STATUS_LINK_UP BIT(8)
+#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9)
+#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10)
+#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
+#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
+#define QCA8K_PORT_HDR_CTRL_RX_S 2
+#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
+#define QCA8K_PORT_HDR_CTRL_TX_S 0
+#define QCA8K_PORT_HDR_CTRL_ALL 2
+#define QCA8K_PORT_HDR_CTRL_MGMT 1
+#define QCA8K_PORT_HDR_CTRL_NONE 0
+
+/* EEE control registers */
+#define QCA8K_REG_EEE_CTRL 0x100
+#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
+
+/* ACL registers */
+#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
+#define QCA8K_PORT_VLAN_CVID(x) (x << 16)
+#define QCA8K_PORT_VLAN_SVID(x) x
+#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
+#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
+#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
+
+/* Lookup registers */
+#define QCA8K_REG_ATU_DATA0 0x600
+#define QCA8K_ATU_ADDR2_S 24
+#define QCA8K_ATU_ADDR3_S 16
+#define QCA8K_ATU_ADDR4_S 8
+#define QCA8K_REG_ATU_DATA1 0x604
+#define QCA8K_ATU_PORT_M 0x7f
+#define QCA8K_ATU_PORT_S 16
+#define QCA8K_ATU_ADDR0_S 8
+#define QCA8K_REG_ATU_DATA2 0x608
+#define QCA8K_ATU_VID_M 0xfff
+#define QCA8K_ATU_VID_S 8
+#define QCA8K_ATU_STATUS_M 0xf
+#define QCA8K_ATU_STATUS_STATIC 0xf
+#define QCA8K_REG_ATU_FUNC 0x60c
+#define QCA8K_ATU_FUNC_BUSY BIT(31)
+#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
+#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
+#define QCA8K_ATU_FUNC_FULL BIT(12)
+#define QCA8K_ATU_FUNC_PORT_M 0xf
+#define QCA8K_ATU_FUNC_PORT_S 8
+#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
+#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
+#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
+#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24
+#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16
+#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8
+#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
+#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
+#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
+#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
+#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16)
+#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
+#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
+
+/* Pkt edit registers */
+#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
+
+/* L3 registers */
+#define QCA8K_HROUTER_CONTROL 0xe00
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16)
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16
+#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1
+#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08
+#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c
+#define QCA8K_HNAT_CONTROL 0xe38
+
+/* MIB registers */
+#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100)
+
+/* QCA specific MII registers */
+#define MII_ATH_MMD_ADDR 0x0d
+#define MII_ATH_MMD_DATA 0x0e
+
+enum {
+ QCA8K_PORT_SPEED_10M = 0,
+ QCA8K_PORT_SPEED_100M = 1,
+ QCA8K_PORT_SPEED_1000M = 2,
+ QCA8K_PORT_SPEED_ERR = 3,
+};
+
+enum qca8k_fdb_cmd {
+ QCA8K_FDB_FLUSH = 1,
+ QCA8K_FDB_LOAD = 2,
+ QCA8K_FDB_PURGE = 3,
+ QCA8K_FDB_NEXT = 6,
+ QCA8K_FDB_SEARCH = 7,
+};
+
+struct ar8xxx_port_status {
+ struct ethtool_eee eee;
+ struct net_device *bridge_dev;
+ int enabled;
+};
+
+struct qca8k_priv {
+ struct regmap *regmap;
+ struct mii_bus *bus;
+ struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
+ struct dsa_switch *ds;
+ struct mutex reg_mutex;
+};
+
+struct qca8k_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+struct qca8k_fdb {
+ u16 vid;
+ u8 port_mask;
+ u8 aging;
+ u8 mac[6];
+};
+
+#endif /* __QCA8K_H */
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 25c55ab05c7d..9133e7926da5 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -3089,7 +3089,7 @@ static void set_rx_mode(struct net_device *dev)
iowrite16(new_mode, ioaddr + EL3_CMD);
}
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Setup the card so that it can receive frames with an 802.1q VLAN tag.
Note that this must be done after each RxReset due to some backwards
compatibility logic in the Cyclone and Tornado ASICs */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 2ffd63463299..8cc7467b6c1f 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -24,6 +24,7 @@ source "drivers/net/ethernet/agere/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
source "drivers/net/ethernet/altera/Kconfig"
+source "drivers/net/ethernet/amazon/Kconfig"
source "drivers/net/ethernet/amd/Kconfig"
source "drivers/net/ethernet/apm/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 1d349e9aa9a6..a09423df83f2 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
obj-$(CONFIG_ALTERA_TSE) += altera/
+obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/
obj-$(CONFIG_NET_VENDOR_AMD) += amd/
obj-$(CONFIG_NET_XGENE) += apm/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 1d1069641d81..8af2c88d5b33 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -66,7 +66,7 @@
*/
#define ZEROCOPY
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define VLAN_SUPPORT
#endif
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 38eaea18da23..00f9ee3fc3e5 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -192,8 +192,8 @@ static int desc_list_init(struct net_device *dev)
goto init_error;
skb_reserve(new_skb, NET_IP_ALIGN);
- /* Invidate the data cache of skb->data range when it is write back
- * cache. It will prevent overwritting the new data from DMA
+ /* Invalidate the data cache of skb->data range when it is write back
+ * cache. It will prevent overwriting the new data from DMA
*/
blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
(unsigned long)new_skb->end);
@@ -1205,7 +1205,7 @@ static void bfin_mac_rx(struct bfin_mac_local *lp)
}
/* reserve 2 bytes for RXDWA padding */
skb_reserve(new_skb, NET_IP_ALIGN);
- /* Invidate the data cache of skb->data range when it is write back
+ /* Invalidate the data cache of skb->data range when it is write back
* cache. It will prevent overwritting the new data from DMA
*/
blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
@@ -1599,7 +1599,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
*(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
/* probe mac */
- /*todo: how to proble? which is revision_register */
+ /*todo: how to probe? which is revision_register */
bfin_write_EMAC_ADDRLO(0x12345678);
if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index bca07c5c94bd..f8df8248035e 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1105,27 +1105,6 @@ static void greth_set_msglevel(struct net_device *dev, u32 value)
struct greth_private *greth = netdev_priv(dev);
greth->msg_enable = value;
}
-static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct greth_private *greth = netdev_priv(dev);
- struct phy_device *phy = greth->phy;
-
- if (!phy)
- return -ENODEV;
-
- return phy_ethtool_gset(phy, cmd);
-}
-
-static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct greth_private *greth = netdev_priv(dev);
- struct phy_device *phy = greth->phy;
-
- if (!phy)
- return -ENODEV;
-
- return phy_ethtool_sset(phy, cmd);
-}
static int greth_get_regs_len(struct net_device *dev)
{
@@ -1157,12 +1136,12 @@ static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, vo
static const struct ethtool_ops greth_ethtool_ops = {
.get_msglevel = greth_get_msglevel,
.set_msglevel = greth_set_msglevel,
- .get_settings = greth_get_settings,
- .set_settings = greth_set_settings,
.get_drvinfo = greth_get_drvinfo,
.get_regs_len = greth_get_regs_len,
.get_regs = greth_get_regs,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static struct net_device_ops greth_netdev_ops = {
@@ -1224,7 +1203,7 @@ static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
static void greth_link_change(struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
- struct phy_device *phydev = greth->phy;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int status_change = 0;
u32 ctrl;
@@ -1307,7 +1286,6 @@ static int greth_mdio_probe(struct net_device *dev)
greth->link = 0;
greth->speed = 0;
greth->duplex = -1;
- greth->phy = phy;
return 0;
}
@@ -1325,6 +1303,7 @@ static int greth_mdio_init(struct greth_private *greth)
{
int ret;
unsigned long timeout;
+ struct net_device *ndev = greth->netdev;
greth->mdio = mdiobus_alloc();
if (!greth->mdio) {
@@ -1349,15 +1328,16 @@ static int greth_mdio_init(struct greth_private *greth)
goto unreg_mdio;
}
- phy_start(greth->phy);
+ phy_start(ndev->phydev);
/* If Ethernet debug link is used make autoneg happen right away */
if (greth->edcl && greth_edcl == 1) {
- phy_start_aneg(greth->phy);
+ phy_start_aneg(ndev->phydev);
timeout = jiffies + 6*HZ;
- while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
+ while (!phy_aneg_done(ndev->phydev) &&
+ time_before(jiffies, timeout)) {
}
- phy_read_status(greth->phy);
+ phy_read_status(ndev->phydev);
greth_link_change(greth->netdev);
}
@@ -1569,8 +1549,8 @@ static int greth_of_remove(struct platform_device *of_dev)
dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
- if (greth->phy)
- phy_stop(greth->phy);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
mdiobus_unregister(greth->mdio);
unregister_netdev(ndev);
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h
index 92dd918e4a83..9c07140a5d8d 100644
--- a/drivers/net/ethernet/aeroflex/greth.h
+++ b/drivers/net/ethernet/aeroflex/greth.h
@@ -123,7 +123,6 @@ struct greth_private {
struct napi_struct napi;
spinlock_t devlock;
- struct phy_device *phy;
struct mii_bus *mdio;
unsigned int link;
unsigned int speed;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index c83ebae73d91..906683851c7d 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2961,7 +2961,7 @@ static void et131x_get_drvinfo(struct net_device *netdev,
sizeof(info->bus_info));
}
-static struct ethtool_ops et131x_ethtool_ops = {
+static const struct ethtool_ops et131x_ethtool_ops = {
.get_drvinfo = et131x_get_drvinfo,
.get_regs_len = et131x_get_regs_len,
.get_regs = et131x_get_regs,
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
new file mode 100644
index 000000000000..99b30353541a
--- /dev/null
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -0,0 +1,27 @@
+#
+# Amazon network device configuration
+#
+
+config NET_VENDOR_AMAZON
+ bool "Amazon Devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Amazon devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_AMAZON
+
+config ENA_ETHERNET
+ tristate "Elastic Network Adapter (ENA) support"
+ depends on (PCI_MSI && X86)
+ ---help---
+ This driver supports Elastic Network Adapter (ENA)"
+
+ To compile this driver as a module, choose M here.
+ The module will be called ena.
+
+endif #NET_VENDOR_AMAZON
diff --git a/drivers/net/ethernet/amazon/Makefile b/drivers/net/ethernet/amazon/Makefile
new file mode 100644
index 000000000000..8e0b73f60d51
--- /dev/null
+++ b/drivers/net/ethernet/amazon/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Amazon network device drivers.
+#
+
+obj-$(CONFIG_ENA_ETHERNET) += ena/
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
new file mode 100644
index 000000000000..eaeeae06c5d9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Elastic Network Adapter (ENA) device drivers.
+#
+
+obj-$(CONFIG_ENA_ETHERNET) += ena.o
+
+ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
new file mode 100644
index 000000000000..a46e749bf226
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -0,0 +1,973 @@
+/*
+ * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _ENA_ADMIN_H_
+#define _ENA_ADMIN_H_
+
+enum ena_admin_aq_opcode {
+ ENA_ADMIN_CREATE_SQ = 1,
+
+ ENA_ADMIN_DESTROY_SQ = 2,
+
+ ENA_ADMIN_CREATE_CQ = 3,
+
+ ENA_ADMIN_DESTROY_CQ = 4,
+
+ ENA_ADMIN_GET_FEATURE = 8,
+
+ ENA_ADMIN_SET_FEATURE = 9,
+
+ ENA_ADMIN_GET_STATS = 11,
+};
+
+enum ena_admin_aq_completion_status {
+ ENA_ADMIN_SUCCESS = 0,
+
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+
+ ENA_ADMIN_BAD_OPCODE = 2,
+
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
+
+ /* Additional status is provided in ACQ entry extended_status */
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
+};
+
+enum ena_admin_aq_feature_id {
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
+
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+
+ ENA_ADMIN_MTU = 14,
+
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
+
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
+
+ ENA_ADMIN_AENQ_CONFIG = 26,
+
+ ENA_ADMIN_LINK_CONFIG = 27,
+
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+};
+
+enum ena_admin_placement_policy_type {
+ /* descriptors and headers are in host memory */
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+
+ /* descriptors and headers are in device memory (a.k.a Low Latency
+ * Queue)
+ */
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+};
+
+enum ena_admin_link_types {
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
+
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
+
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
+
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
+
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
+
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
+
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
+
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
+
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
+};
+
+enum ena_admin_completion_policy_type {
+ /* completion queue entry for each sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
+
+ /* completion queue entry upon request in sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
+
+ /* current queue head pointer is updated in OS memory upon sq
+ * descriptor request
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
+
+ /* current queue head pointer is updated in OS memory for each sq
+ * descriptor
+ */
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+};
+
+/* basic stats return ena_admin_basic_stats while extanded stats return a
+ * buffer (string format) with additional statistics per queue and per
+ * device id
+ */
+enum ena_admin_get_stats_type {
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+};
+
+enum ena_admin_get_stats_scope {
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
+
+ ENA_ADMIN_ETH_TRAFFIC = 1,
+};
+
+struct ena_admin_aq_common_desc {
+ /* 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ u16 command_id;
+
+ /* as appears in ena_admin_aq_opcode */
+ u8 opcode;
+
+ /* 0 : phase
+ * 1 : ctrl_data - control buffer address valid
+ * 2 : ctrl_data_indirect - control buffer address
+ * points to list of pages with addresses of control
+ * buffers
+ * 7:3 : reserved3
+ */
+ u8 flags;
+};
+
+/* used in ena_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
+ */
+struct ena_admin_ctrl_buff_info {
+ u32 length;
+
+ struct ena_common_mem_addr address;
+};
+
+struct ena_admin_sq {
+ u16 sq_idx;
+
+ /* 4:0 : reserved
+ * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx
+ */
+ u8 sq_identity;
+
+ u8 reserved1;
+};
+
+struct ena_admin_aq_entry {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ u32 inline_data_w1[3];
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ u32 inline_data_w4[12];
+};
+
+struct ena_admin_acq_common_desc {
+ /* command identifier to associate it with the aq descriptor
+ * 11:0 : command_id
+ * 15:12 : reserved12
+ */
+ u16 command;
+
+ u8 status;
+
+ /* 0 : phase
+ * 7:1 : reserved1
+ */
+ u8 flags;
+
+ u16 extended_status;
+
+ /* serves as a hint what AQ entries can be revoked */
+ u16 sq_head_indx;
+};
+
+struct ena_admin_acq_entry {
+ struct ena_admin_acq_common_desc acq_common_descriptor;
+
+ u32 response_specific_data[14];
+};
+
+struct ena_admin_aq_create_sq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* 4:0 : reserved0_w1
+ * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
+ */
+ u8 sq_identity;
+
+ u8 reserved8_w1;
+
+ /* 3:0 : placement_policy - Describing where the SQ
+ * descriptor ring and the SQ packet headers reside:
+ * 0x1 - descriptors and headers are in OS memory,
+ * 0x3 - descriptors and headers in device memory
+ * (a.k.a Low Latency Queue)
+ * 6:4 : completion_policy - Describing what policy
+ * to use for generation completion entry (cqe) in
+ * the CQ associated with this SQ: 0x0 - cqe for each
+ * sq descriptor, 0x1 - cqe upon request in sq
+ * descriptor, 0x2 - current queue head pointer is
+ * updated in OS memory upon sq descriptor request
+ * 0x3 - current queue head pointer is updated in OS
+ * memory for each sq descriptor
+ * 7 : reserved15_w1
+ */
+ u8 sq_caps_2;
+
+ /* 0 : is_physically_contiguous - Described if the
+ * queue ring memory is allocated in physical
+ * contiguous pages or split.
+ * 7:1 : reserved17_w1
+ */
+ u8 sq_caps_3;
+
+ /* associated completion queue id. This CQ must be created prior to
+ * SQ creation
+ */
+ u16 cq_idx;
+
+ /* submission queue depth in entries */
+ u16 sq_depth;
+
+ /* SQ physical base address in OS memory. This field should not be
+ * used for Low Latency queues. Has to be page aligned.
+ */
+ struct ena_common_mem_addr sq_ba;
+
+ /* specifies queue head writeback location in OS memory. Valid if
+ * completion_policy is set to completion_policy_head_on_demand or
+ * completion_policy_head. Has to be cache aligned
+ */
+ struct ena_common_mem_addr sq_head_writeback;
+
+ u32 reserved0_w7;
+
+ u32 reserved0_w8;
+};
+
+enum ena_admin_sq_direction {
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
+
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
+};
+
+struct ena_admin_acq_create_sq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ u16 sq_idx;
+
+ u16 reserved;
+
+ /* queue doorbell address as an offset to PCIe MMIO REG BAR */
+ u32 sq_doorbell_offset;
+
+ /* low latency queue ring base address as an offset to PCIe MMIO
+ * LLQ_MEM BAR
+ */
+ u32 llq_descriptors_offset;
+
+ /* low latency queue headers' memory as an offset to PCIe MMIO
+ * LLQ_MEM BAR
+ */
+ u32 llq_headers_offset;
+};
+
+struct ena_admin_aq_destroy_sq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_sq sq;
+};
+
+struct ena_admin_acq_destroy_sq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+struct ena_admin_aq_create_cq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ /* 4:0 : reserved5
+ * 5 : interrupt_mode_enabled - if set, cq operates
+ * in interrupt mode, otherwise - polling
+ * 7:6 : reserved6
+ */
+ u8 cq_caps_1;
+
+ /* 4:0 : cq_entry_size_words - size of CQ entry in
+ * 32-bit words, valid values: 4, 8.
+ * 7:5 : reserved7
+ */
+ u8 cq_caps_2;
+
+ /* completion queue depth in # of entries. must be power of 2 */
+ u16 cq_depth;
+
+ /* msix vector assigned to this cq */
+ u32 msix_vector;
+
+ /* cq physical base address in OS memory. CQ must be physically
+ * contiguous
+ */
+ struct ena_common_mem_addr cq_ba;
+};
+
+struct ena_admin_acq_create_cq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ u16 cq_idx;
+
+ /* actual cq depth in number of entries */
+ u16 cq_actual_depth;
+
+ u32 numa_node_register_offset;
+
+ u32 cq_head_db_register_offset;
+
+ u32 cq_interrupt_unmask_register_offset;
+};
+
+struct ena_admin_aq_destroy_cq_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ u16 cq_idx;
+
+ u16 reserved1;
+};
+
+struct ena_admin_acq_destroy_cq_resp_desc {
+ struct ena_admin_acq_common_desc acq_common_desc;
+};
+
+/* ENA AQ Get Statistics command. Extended statistics are placed in control
+ * buffer pointed by AQ entry
+ */
+struct ena_admin_aq_get_stats_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ union {
+ /* command specific inline data */
+ u32 inline_data_w1[3];
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+ } u;
+
+ /* stats type as defined in enum ena_admin_get_stats_type */
+ u8 type;
+
+ /* stats scope defined in enum ena_admin_get_stats_scope */
+ u8 scope;
+
+ u16 reserved3;
+
+ /* queue id. used when scope is specific_queue */
+ u16 queue_idx;
+
+ /* device id, value 0xFFFF means mine. only privileged device can get
+ * stats of other device
+ */
+ u16 device_id;
+};
+
+/* Basic Statistics Command. */
+struct ena_admin_basic_stats {
+ u32 tx_bytes_low;
+
+ u32 tx_bytes_high;
+
+ u32 tx_pkts_low;
+
+ u32 tx_pkts_high;
+
+ u32 rx_bytes_low;
+
+ u32 rx_bytes_high;
+
+ u32 rx_pkts_low;
+
+ u32 rx_pkts_high;
+
+ u32 rx_drops_low;
+
+ u32 rx_drops_high;
+};
+
+struct ena_admin_acq_get_stats_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ struct ena_admin_basic_stats basic_stats;
+};
+
+struct ena_admin_get_set_feature_common_desc {
+ /* 1:0 : select - 0x1 - current value; 0x3 - default
+ * value
+ * 7:3 : reserved3
+ */
+ u8 flags;
+
+ /* as appears in ena_admin_aq_feature_id */
+ u8 feature_id;
+
+ u16 reserved16;
+};
+
+struct ena_admin_device_attr_feature_desc {
+ u32 impl_id;
+
+ u32 device_version;
+
+ /* bitmap of ena_admin_aq_feature_id */
+ u32 supported_features;
+
+ u32 reserved3;
+
+ /* Indicates how many bits are used physical address access. */
+ u32 phys_addr_width;
+
+ /* Indicates how many bits are used virtual address access. */
+ u32 virt_addr_width;
+
+ /* unicast MAC address (in Network byte order) */
+ u8 mac_addr[6];
+
+ u8 reserved7[2];
+
+ u32 max_mtu;
+};
+
+struct ena_admin_queue_feature_desc {
+ /* including LLQs */
+ u32 max_sq_num;
+
+ u32 max_sq_depth;
+
+ u32 max_cq_num;
+
+ u32 max_cq_depth;
+
+ u32 max_llq_num;
+
+ u32 max_llq_depth;
+
+ u32 max_header_size;
+
+ /* Maximum Descriptors number, including meta descriptor, allowed for
+ * a single Tx packet
+ */
+ u16 max_packet_tx_descs;
+
+ /* Maximum Descriptors number allowed for a single Rx packet */
+ u16 max_packet_rx_descs;
+};
+
+struct ena_admin_set_feature_mtu_desc {
+ /* exclude L2 */
+ u32 mtu;
+};
+
+struct ena_admin_set_feature_host_attr_desc {
+ /* host OS info base address in OS memory. host info is 4KB of
+ * physically contiguous
+ */
+ struct ena_common_mem_addr os_info_ba;
+
+ /* host debug area base address in OS memory. debug area must be
+ * physically contiguous
+ */
+ struct ena_common_mem_addr debug_ba;
+
+ /* debug area size */
+ u32 debug_area_size;
+};
+
+struct ena_admin_feature_intr_moder_desc {
+ /* interrupt delay granularity in usec */
+ u16 intr_delay_resolution;
+
+ u16 reserved;
+};
+
+struct ena_admin_get_feature_link_desc {
+ /* Link speed in Mb */
+ u32 speed;
+
+ /* bit field of enum ena_admin_link types */
+ u32 supported;
+
+ /* 0 : autoneg
+ * 1 : duplex - Full Duplex
+ * 31:2 : reserved2
+ */
+ u32 flags;
+};
+
+struct ena_admin_feature_aenq_desc {
+ /* bitmask for AENQ groups the device can report */
+ u32 supported_groups;
+
+ /* bitmask for AENQ groups to report */
+ u32 enabled_groups;
+};
+
+struct ena_admin_feature_offload_desc {
+ /* 0 : TX_L3_csum_ipv4
+ * 1 : TX_L4_ipv4_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 2 : TX_L4_ipv4_csum_full
+ * 3 : TX_L4_ipv6_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 4 : TX_L4_ipv6_csum_full
+ * 5 : tso_ipv4
+ * 6 : tso_ipv6
+ * 7 : tso_ecn
+ */
+ u32 tx;
+
+ /* Receive side supported stateless offload
+ * 0 : RX_L3_csum_ipv4 - IPv4 checksum
+ * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
+ * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum
+ * 3 : RX_hash - Hash calculation
+ */
+ u32 rx_supported;
+
+ u32 rx_enabled;
+};
+
+enum ena_admin_hash_functions {
+ ENA_ADMIN_TOEPLITZ = 1,
+
+ ENA_ADMIN_CRC32 = 2,
+};
+
+struct ena_admin_feature_rss_flow_hash_control {
+ u32 keys_num;
+
+ u32 reserved;
+
+ u32 key[10];
+};
+
+struct ena_admin_feature_rss_flow_hash_function {
+ /* 7:0 : funcs - bitmask of ena_admin_hash_functions */
+ u32 supported_func;
+
+ /* 7:0 : selected_func - bitmask of
+ * ena_admin_hash_functions
+ */
+ u32 selected_func;
+
+ /* initial value */
+ u32 init_val;
+};
+
+/* RSS flow hash protocols */
+enum ena_admin_flow_hash_proto {
+ ENA_ADMIN_RSS_TCP4 = 0,
+
+ ENA_ADMIN_RSS_UDP4 = 1,
+
+ ENA_ADMIN_RSS_TCP6 = 2,
+
+ ENA_ADMIN_RSS_UDP6 = 3,
+
+ ENA_ADMIN_RSS_IP4 = 4,
+
+ ENA_ADMIN_RSS_IP6 = 5,
+
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
+
+ ENA_ADMIN_RSS_NOT_IP = 7,
+
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
+};
+
+/* RSS flow hash fields */
+enum ena_admin_flow_hash_fields {
+ /* Ethernet Dest Addr */
+ ENA_ADMIN_RSS_L2_DA = 0,
+
+ /* Ethernet Src Addr */
+ ENA_ADMIN_RSS_L2_SA = 1,
+
+ /* ipv4/6 Dest Addr */
+ ENA_ADMIN_RSS_L3_DA = 2,
+
+ /* ipv4/6 Src Addr */
+ ENA_ADMIN_RSS_L3_SA = 5,
+
+ /* tcp/udp Dest Port */
+ ENA_ADMIN_RSS_L4_DP = 6,
+
+ /* tcp/udp Src Port */
+ ENA_ADMIN_RSS_L4_SP = 7,
+};
+
+struct ena_admin_proto_input {
+ /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
+ u16 fields;
+
+ u16 reserved2;
+};
+
+struct ena_admin_feature_rss_hash_control {
+ struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM];
+
+ struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM];
+};
+
+struct ena_admin_feature_rss_flow_hash_input {
+ /* supported hash input sorting
+ * 1 : L3_sort - support swap L3 addresses if DA is
+ * smaller than SA
+ * 2 : L4_sort - support swap L4 ports if DP smaller
+ * SP
+ */
+ u16 supported_input_sort;
+
+ /* enabled hash input sorting
+ * 1 : enable_L3_sort - enable swap L3 addresses if
+ * DA smaller than SA
+ * 2 : enable_L4_sort - enable swap L4 ports if DP
+ * smaller than SP
+ */
+ u16 enabled_input_sort;
+};
+
+enum ena_admin_os_type {
+ ENA_ADMIN_OS_LINUX = 1,
+
+ ENA_ADMIN_OS_WIN = 2,
+
+ ENA_ADMIN_OS_DPDK = 3,
+
+ ENA_ADMIN_OS_FREEBSD = 4,
+
+ ENA_ADMIN_OS_IPXE = 5,
+};
+
+struct ena_admin_host_info {
+ /* defined in enum ena_admin_os_type */
+ u32 os_type;
+
+ /* os distribution string format */
+ u8 os_dist_str[128];
+
+ /* OS distribution numeric format */
+ u32 os_dist;
+
+ /* kernel version string format */
+ u8 kernel_ver_str[32];
+
+ /* Kernel version numeric format */
+ u32 kernel_ver;
+
+ /* 7:0 : major
+ * 15:8 : minor
+ * 23:16 : sub_minor
+ */
+ u32 driver_version;
+
+ /* features bitmap */
+ u32 supported_network_features[4];
+};
+
+struct ena_admin_rss_ind_table_entry {
+ u16 cq_idx;
+
+ u16 reserved;
+};
+
+struct ena_admin_feature_rss_ind_table {
+ /* min supported table size (2^min_size) */
+ u16 min_size;
+
+ /* max supported table size (2^max_size) */
+ u16 max_size;
+
+ /* table size (2^size) */
+ u16 size;
+
+ u16 reserved;
+
+ /* index of the inline entry. 0xFFFFFFFF means invalid */
+ u32 inline_index;
+
+ /* used for updating single entry, ignored when setting the entire
+ * table through the control buffer.
+ */
+ struct ena_admin_rss_ind_table_entry inline_entry;
+};
+
+struct ena_admin_get_feat_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ u32 raw[11];
+};
+
+struct ena_admin_get_feat_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ union {
+ u32 raw[14];
+
+ struct ena_admin_device_attr_feature_desc dev_attr;
+
+ struct ena_admin_queue_feature_desc max_queue;
+
+ struct ena_admin_feature_aenq_desc aenq;
+
+ struct ena_admin_get_feature_link_desc link;
+
+ struct ena_admin_feature_offload_desc offload;
+
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ struct ena_admin_feature_rss_ind_table ind_table;
+
+ struct ena_admin_feature_intr_moder_desc intr_moderation;
+ } u;
+};
+
+struct ena_admin_set_feat_cmd {
+ struct ena_admin_aq_common_desc aq_common_descriptor;
+
+ struct ena_admin_ctrl_buff_info control_buffer;
+
+ struct ena_admin_get_set_feature_common_desc feat_common;
+
+ union {
+ u32 raw[11];
+
+ /* mtu size */
+ struct ena_admin_set_feature_mtu_desc mtu;
+
+ /* host attributes */
+ struct ena_admin_set_feature_host_attr_desc host_attr;
+
+ /* AENQ configuration */
+ struct ena_admin_feature_aenq_desc aenq;
+
+ /* rss flow hash function */
+ struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
+
+ /* rss flow hash input */
+ struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
+
+ /* rss indirection table */
+ struct ena_admin_feature_rss_ind_table ind_table;
+ } u;
+};
+
+struct ena_admin_set_feat_resp {
+ struct ena_admin_acq_common_desc acq_common_desc;
+
+ union {
+ u32 raw[14];
+ } u;
+};
+
+struct ena_admin_aenq_common_desc {
+ u16 group;
+
+ u16 syndrom;
+
+ /* 0 : phase */
+ u8 flags;
+
+ u8 reserved1[3];
+
+ u32 timestamp_low;
+
+ u32 timestamp_high;
+};
+
+/* asynchronous event notification groups */
+enum ena_admin_aenq_group {
+ ENA_ADMIN_LINK_CHANGE = 0,
+
+ ENA_ADMIN_FATAL_ERROR = 1,
+
+ ENA_ADMIN_WARNING = 2,
+
+ ENA_ADMIN_NOTIFICATION = 3,
+
+ ENA_ADMIN_KEEP_ALIVE = 4,
+
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+};
+
+enum ena_admin_aenq_notification_syndrom {
+ ENA_ADMIN_SUSPEND = 0,
+
+ ENA_ADMIN_RESUME = 1,
+};
+
+struct ena_admin_aenq_entry {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* command specific inline data */
+ u32 inline_data_w4[12];
+};
+
+struct ena_admin_aenq_link_change_desc {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ /* 0 : link_status */
+ u32 flags;
+};
+
+struct ena_admin_ena_mmio_req_read_less_resp {
+ u16 req_id;
+
+ u16 reg_off;
+
+ /* value is valid when poll is cleared */
+ u32 reg_val;
+};
+
+/* aq_common_desc */
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+
+/* sq */
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+
+/* acq_common_desc */
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aq_create_sq_cmd */
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
+
+/* aq_create_cq_cmd */
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
+#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+
+/* get_set_feature_common_desc */
+#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+
+/* get_feature_link_desc */
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
+
+/* feature_offload_desc */
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
+
+/* feature_rss_flow_hash_function */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0)
+
+/* feature_rss_flow_hash_input */
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
+
+/* host_info */
+#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
+#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+
+/* aenq_common_desc */
+#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+
+/* aenq_link_change_desc */
+#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
+
+#endif /*_ENA_ADMIN_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
new file mode 100644
index 000000000000..3066d9c99984
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -0,0 +1,2666 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ena_com.h"
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/* Timeout in micro-sec */
+#define ADMIN_CMD_TIMEOUT_US (1000000)
+
+#define ENA_ASYNC_QUEUE_DEPTH 4
+#define ENA_ADMIN_QUEUE_DEPTH 32
+
+#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
+ | (ENA_COMMON_SPEC_VERSION_MINOR))
+
+#define ENA_CTRL_MAJOR 0
+#define ENA_CTRL_MINOR 0
+#define ENA_CTRL_SUB_MINOR 1
+
+#define MIN_ENA_CTRL_VER \
+ (((ENA_CTRL_MAJOR) << \
+ (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
+ ((ENA_CTRL_MINOR) << \
+ (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
+ (ENA_CTRL_SUB_MINOR))
+
+#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
+#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
+
+#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+
+/*****************************************************************************/
+/*****************************************************************************/
+/*****************************************************************************/
+
+enum ena_cmd_status {
+ ENA_CMD_SUBMITTED,
+ ENA_CMD_COMPLETED,
+ /* Abort - canceled by the driver */
+ ENA_CMD_ABORTED,
+};
+
+struct ena_comp_ctx {
+ struct completion wait_event;
+ struct ena_admin_acq_entry *user_cqe;
+ u32 comp_size;
+ enum ena_cmd_status status;
+ /* status from the device */
+ u8 comp_status;
+ u8 cmd_opcode;
+ bool occupied;
+};
+
+struct ena_com_stats_ctx {
+ struct ena_admin_aq_get_stats_cmd get_cmd;
+ struct ena_admin_acq_get_stats_resp get_resp;
+};
+
+static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
+ struct ena_common_mem_addr *ena_addr,
+ dma_addr_t addr)
+{
+ if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
+ pr_err("dma address has more bits that the device supports\n");
+ return -EINVAL;
+ }
+
+ ena_addr->mem_addr_low = (u32)addr;
+ ena_addr->mem_addr_high = (u64)addr >> 32;
+
+ return 0;
+}
+
+static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
+{
+ struct ena_com_admin_sq *sq = &queue->sq;
+ u16 size = ADMIN_SQ_SIZE(queue->q_depth);
+
+ sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
+ GFP_KERNEL);
+
+ if (!sq->entries) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ sq->head = 0;
+ sq->tail = 0;
+ sq->phase = 1;
+
+ sq->db_addr = NULL;
+
+ return 0;
+}
+
+static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
+{
+ struct ena_com_admin_cq *cq = &queue->cq;
+ u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+
+ cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
+ GFP_KERNEL);
+
+ if (!cq->entries) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ cq->head = 0;
+ cq->phase = 1;
+
+ return 0;
+}
+
+static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
+ struct ena_aenq_handlers *aenq_handlers)
+{
+ struct ena_com_aenq *aenq = &dev->aenq;
+ u32 addr_low, addr_high, aenq_caps;
+ u16 size;
+
+ dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+ aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
+ GFP_KERNEL);
+
+ if (!aenq->entries) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ aenq->head = aenq->q_depth;
+ aenq->phase = 1;
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
+
+ writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+ writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
+
+ aenq_caps = 0;
+ aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+ aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
+ << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+
+ if (unlikely(!aenq_handlers)) {
+ pr_err("aenq handlers pointer is NULL\n");
+ return -EINVAL;
+ }
+
+ aenq->aenq_handlers = aenq_handlers;
+
+ return 0;
+}
+
+static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
+ struct ena_comp_ctx *comp_ctx)
+{
+ comp_ctx->occupied = false;
+ atomic_dec(&queue->outstanding_cmds);
+}
+
+static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+ u16 command_id, bool capture)
+{
+ if (unlikely(command_id >= queue->q_depth)) {
+ pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, queue->q_depth);
+ return NULL;
+ }
+
+ if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
+ pr_err("Completion context is occupied\n");
+ return NULL;
+ }
+
+ if (capture) {
+ atomic_inc(&queue->outstanding_cmds);
+ queue->comp_ctx[command_id].occupied = true;
+ }
+
+ return &queue->comp_ctx[command_id];
+}
+
+static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ struct ena_comp_ctx *comp_ctx;
+ u16 tail_masked, cmd_id;
+ u16 queue_size_mask;
+ u16 cnt;
+
+ queue_size_mask = admin_queue->q_depth - 1;
+
+ tail_masked = admin_queue->sq.tail & queue_size_mask;
+
+ /* In case of queue FULL */
+ cnt = admin_queue->sq.tail - admin_queue->sq.head;
+ if (cnt >= admin_queue->q_depth) {
+ pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
+ admin_queue->sq.tail, admin_queue->sq.head,
+ admin_queue->q_depth);
+ admin_queue->stats.out_of_space++;
+ return ERR_PTR(-ENOSPC);
+ }
+
+ cmd_id = admin_queue->curr_cmd_id;
+
+ cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
+ ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+
+ cmd->aq_common_descriptor.command_id |= cmd_id &
+ ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
+ if (unlikely(!comp_ctx))
+ return ERR_PTR(-EINVAL);
+
+ comp_ctx->status = ENA_CMD_SUBMITTED;
+ comp_ctx->comp_size = (u32)comp_size_in_bytes;
+ comp_ctx->user_cqe = comp;
+ comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
+
+ reinit_completion(&comp_ctx->wait_event);
+
+ memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
+
+ admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
+ queue_size_mask;
+
+ admin_queue->sq.tail++;
+ admin_queue->stats.submitted_cmd++;
+
+ if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
+ admin_queue->sq.phase = !admin_queue->sq.phase;
+
+ writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
+
+ return comp_ctx;
+}
+
+static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+{
+ size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+ queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
+ if (unlikely(!queue->comp_ctx)) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(queue, i, false);
+ if (comp_ctx)
+ init_completion(&comp_ctx->wait_event);
+ }
+
+ return 0;
+}
+
+static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+{
+ unsigned long flags;
+ struct ena_comp_ctx *comp_ctx;
+
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ if (unlikely(!admin_queue->running_state)) {
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+ return ERR_PTR(-ENODEV);
+ }
+ comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
+ cmd_size_in_bytes,
+ comp,
+ comp_size_in_bytes);
+ if (unlikely(IS_ERR(comp_ctx)))
+ admin_queue->running_state = false;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+ return comp_ctx;
+}
+
+static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx,
+ struct ena_com_io_sq *io_sq)
+{
+ size_t size;
+ int dev_node = 0;
+
+ memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+
+ io_sq->desc_entry_size =
+ (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_desc) :
+ sizeof(struct ena_eth_io_rx_desc);
+
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+ dev_node = dev_to_node(ena_dev->dmadev);
+ set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_sq->desc_addr.virt_addr =
+ dma_zalloc_coherent(ena_dev->dmadev, size,
+ &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
+ set_dev_node(ena_dev->dmadev, dev_node);
+ if (!io_sq->desc_addr.virt_addr) {
+ io_sq->desc_addr.virt_addr =
+ dma_zalloc_coherent(ena_dev->dmadev, size,
+ &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
+ }
+ } else {
+ dev_node = dev_to_node(ena_dev->dmadev);
+ set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_sq->desc_addr.virt_addr =
+ devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ set_dev_node(ena_dev->dmadev, dev_node);
+ if (!io_sq->desc_addr.virt_addr) {
+ io_sq->desc_addr.virt_addr =
+ devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ }
+ }
+
+ if (!io_sq->desc_addr.virt_addr) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ io_sq->tail = 0;
+ io_sq->next_to_comp = 0;
+ io_sq->phase = 1;
+
+ return 0;
+}
+
+static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx,
+ struct ena_com_io_cq *io_cq)
+{
+ size_t size;
+ int prev_node = 0;
+
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+
+ /* Use the basic completion descriptor for Rx */
+ io_cq->cdesc_entry_size_in_bytes =
+ (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_cdesc) :
+ sizeof(struct ena_eth_io_rx_cdesc_base);
+
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+ prev_node = dev_to_node(ena_dev->dmadev);
+ set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_cq->cdesc_addr.virt_addr =
+ dma_zalloc_coherent(ena_dev->dmadev, size,
+ &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ set_dev_node(ena_dev->dmadev, prev_node);
+ if (!io_cq->cdesc_addr.virt_addr) {
+ io_cq->cdesc_addr.virt_addr =
+ dma_zalloc_coherent(ena_dev->dmadev, size,
+ &io_cq->cdesc_addr.phys_addr,
+ GFP_KERNEL);
+ }
+
+ if (!io_cq->cdesc_addr.virt_addr) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+
+ io_cq->phase = 1;
+ io_cq->head = 0;
+
+ return 0;
+}
+
+static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_acq_entry *cqe)
+{
+ struct ena_comp_ctx *comp_ctx;
+ u16 cmd_id;
+
+ cmd_id = cqe->acq_common_descriptor.command &
+ ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+
+ comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
+ if (unlikely(!comp_ctx)) {
+ pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
+ admin_queue->running_state = false;
+ return;
+ }
+
+ comp_ctx->status = ENA_CMD_COMPLETED;
+ comp_ctx->comp_status = cqe->acq_common_descriptor.status;
+
+ if (comp_ctx->user_cqe)
+ memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
+
+ if (!admin_queue->polling)
+ complete(&comp_ctx->wait_event);
+}
+
+static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
+{
+ struct ena_admin_acq_entry *cqe = NULL;
+ u16 comp_num = 0;
+ u16 head_masked;
+ u8 phase;
+
+ head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
+ phase = admin_queue->cq.phase;
+
+ cqe = &admin_queue->cq.entries[head_masked];
+
+ /* Go over all the completions */
+ while ((cqe->acq_common_descriptor.flags &
+ ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /* Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ rmb();
+ ena_com_handle_single_admin_completion(admin_queue, cqe);
+
+ head_masked++;
+ comp_num++;
+ if (unlikely(head_masked == admin_queue->q_depth)) {
+ head_masked = 0;
+ phase = !phase;
+ }
+
+ cqe = &admin_queue->cq.entries[head_masked];
+ }
+
+ admin_queue->cq.head += comp_num;
+ admin_queue->cq.phase = phase;
+ admin_queue->sq.head += comp_num;
+ admin_queue->stats.completed_cmd += comp_num;
+}
+
+static int ena_com_comp_status_to_errno(u8 comp_status)
+{
+ if (unlikely(comp_status != 0))
+ pr_err("admin command failed[%u]\n", comp_status);
+
+ if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
+ return -EINVAL;
+
+ switch (comp_status) {
+ case ENA_ADMIN_SUCCESS:
+ return 0;
+ case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
+ return -ENOMEM;
+ case ENA_ADMIN_UNSUPPORTED_OPCODE:
+ return -EPERM;
+ case ENA_ADMIN_BAD_OPCODE:
+ case ENA_ADMIN_MALFORMED_REQUEST:
+ case ENA_ADMIN_ILLEGAL_PARAMETER:
+ case ENA_ADMIN_UNKNOWN_ERROR:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ unsigned long flags;
+ u32 start_time;
+ int ret;
+
+ start_time = ((u32)jiffies_to_usecs(jiffies));
+
+ while (comp_ctx->status == ENA_CMD_SUBMITTED) {
+ if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
+ ADMIN_CMD_TIMEOUT_US) {
+ pr_err("Wait for completion (polling) timeout\n");
+ /* ENA didn't have any completion */
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ admin_queue->stats.no_completion++;
+ admin_queue->running_state = false;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+ ret = -ETIME;
+ goto err;
+ }
+
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+ msleep(100);
+ }
+
+ if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
+ pr_err("Command was aborted\n");
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ admin_queue->stats.aborted_cmd++;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
+ comp_ctx->status);
+
+ ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+ comp_ctxt_release(admin_queue, comp_ctx);
+ return ret;
+}
+
+static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ unsigned long flags;
+ int ret;
+
+ wait_for_completion_timeout(&comp_ctx->wait_event,
+ usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US));
+
+ /* In case the command wasn't completed find out the root cause.
+ * There might be 2 kinds of errors
+ * 1) No completion (timeout reached)
+ * 2) There is completion but the device didn't get any msi-x interrupt.
+ */
+ if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ admin_queue->stats.no_completion++;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+ if (comp_ctx->status == ENA_CMD_COMPLETED)
+ pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
+ comp_ctx->cmd_opcode);
+ else
+ pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
+ comp_ctx->cmd_opcode, comp_ctx->status);
+
+ admin_queue->running_state = false;
+ ret = -ETIME;
+ goto err;
+ }
+
+ ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+err:
+ comp_ctxt_release(admin_queue, comp_ctx);
+ return ret;
+}
+
+/* This method read the hardware device register through posting writes
+ * and waiting for response
+ * On timeout the function will return ENA_MMIO_READ_TIMEOUT
+ */
+static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+ volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
+ mmio_read->read_resp;
+ u32 mmio_read_reg, ret;
+ unsigned long flags;
+ int i;
+
+ might_sleep();
+
+ /* If readless is disabled, perform regular read */
+ if (!mmio_read->readless_supported)
+ return readl(ena_dev->reg_bar + offset);
+
+ spin_lock_irqsave(&mmio_read->lock, flags);
+ mmio_read->seq_num++;
+
+ read_resp->req_id = mmio_read->seq_num + 0xDEAD;
+ mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
+ ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
+ mmio_read_reg |= mmio_read->seq_num &
+ ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
+
+ /* make sure read_resp->req_id get updated before the hw can write
+ * there
+ */
+ wmb();
+
+ writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+
+ for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
+ if (read_resp->req_id == mmio_read->seq_num)
+ break;
+
+ udelay(1);
+ }
+
+ if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
+ pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ mmio_read->seq_num, offset, read_resp->req_id,
+ read_resp->reg_off);
+ ret = ENA_MMIO_READ_TIMEOUT;
+ goto err;
+ }
+
+ if (read_resp->reg_off != offset) {
+ pr_err("Read failure: wrong offset provided");
+ ret = ENA_MMIO_READ_TIMEOUT;
+ } else {
+ ret = read_resp->reg_val;
+ }
+err:
+ spin_unlock_irqrestore(&mmio_read->lock, flags);
+
+ return ret;
+}
+
+/* There are two types to wait for completion.
+ * Polling mode - wait until the completion is available.
+ * Async mode - wait on wait queue until the completion is ready
+ * (or the timeout expired).
+ * It is expected that the IRQ called ena_com_handle_admin_completion
+ * to mark the completions.
+ */
+static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+{
+ if (admin_queue->polling)
+ return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
+ admin_queue);
+
+ return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
+ admin_queue);
+}
+
+static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
+ struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
+ u8 direction;
+ int ret;
+
+ memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+
+ if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ direction = ENA_ADMIN_SQ_DIRECTION_TX;
+ else
+ direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+ destroy_cmd.sq.sq_identity |= (direction <<
+ ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+
+ destroy_cmd.sq.sq_idx = io_sq->idx;
+ destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != -ENODEV)))
+ pr_err("failed to destroy io sq error: %d\n", ret);
+
+ return ret;
+}
+
+static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_io_cq *io_cq)
+{
+ size_t size;
+
+ if (io_cq->cdesc_addr.virt_addr) {
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+ dma_free_coherent(ena_dev->dmadev, size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr);
+
+ io_cq->cdesc_addr.virt_addr = NULL;
+ }
+
+ if (io_sq->desc_addr.virt_addr) {
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ dma_free_coherent(ena_dev->dmadev, size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr);
+ else
+ devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+
+ io_sq->desc_addr.virt_addr = NULL;
+ }
+}
+
+static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+ u16 exp_state)
+{
+ u32 val, i;
+
+ for (i = 0; i < timeout; i++) {
+ val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
+ pr_err("Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+ if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
+ exp_state)
+ return 0;
+
+ /* The resolution of the timeout is 100ms */
+ msleep(100);
+ }
+
+ return -ETIME;
+}
+
+static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_feature_id feature_id)
+{
+ u32 feature_mask = 1 << feature_id;
+
+ /* Device attributes is always supported */
+ if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
+ !(ena_dev->supported_features & feature_mask))
+ return false;
+
+ return true;
+}
+
+static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *get_resp,
+ enum ena_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_get_feat_cmd get_cmd;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
+ pr_info("Feature %d isn't supported\n", feature_id);
+ return -EPERM;
+ }
+
+ memset(&get_cmd, 0x0, sizeof(get_cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
+
+ if (control_buff_size)
+ get_cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ else
+ get_cmd.aq_common_descriptor.flags = 0;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd.control_buffer.address,
+ control_buf_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ get_cmd.control_buffer.length = control_buff_size;
+
+ get_cmd.feat_common.feature_id = feature_id;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)
+ &get_cmd,
+ sizeof(get_cmd),
+ (struct ena_admin_acq_entry *)
+ get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to submit get_feature command %d error: %d\n",
+ feature_id, ret);
+
+ return ret;
+}
+
+static int ena_com_get_feature(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *get_resp,
+ enum ena_admin_aq_feature_id feature_id)
+{
+ return ena_com_get_feature_ex(ena_dev,
+ get_resp,
+ feature_id,
+ 0,
+ 0);
+}
+
+static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ rss->hash_key =
+ dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ &rss->hash_key_dma_addr, GFP_KERNEL);
+
+ if (unlikely(!rss->hash_key))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_key)
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ rss->hash_key, rss->hash_key_dma_addr);
+ rss->hash_key = NULL;
+}
+
+static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ rss->hash_ctrl =
+ dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+
+ if (unlikely(!rss->hash_ctrl))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_ctrl)
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ rss->hash_ctrl, rss->hash_ctrl_dma_addr);
+ rss->hash_ctrl = NULL;
+}
+
+static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+ u16 log_size)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ size_t tbl_size;
+ int ret;
+
+ ret = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ if (unlikely(ret))
+ return ret;
+
+ if ((get_resp.u.ind_table.min_size > log_size) ||
+ (get_resp.u.ind_table.max_size < log_size)) {
+ pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+ 1 << log_size, 1 << get_resp.u.ind_table.min_size,
+ 1 << get_resp.u.ind_table.max_size);
+ return -EINVAL;
+ }
+
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ rss->rss_ind_tbl =
+ dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
+ &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
+ if (unlikely(!rss->rss_ind_tbl))
+ goto mem_err1;
+
+ tbl_size = (1ULL << log_size) * sizeof(u16);
+ rss->host_rss_ind_tbl =
+ devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+ if (unlikely(!rss->host_rss_ind_tbl))
+ goto mem_err2;
+
+ rss->tbl_log_size = log_size;
+
+ return 0;
+
+mem_err2:
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr);
+ rss->rss_ind_tbl = NULL;
+mem_err1:
+ rss->tbl_log_size = 0;
+ return -ENOMEM;
+}
+
+static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ size_t tbl_size = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ if (rss->rss_ind_tbl)
+ dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr);
+ rss->rss_ind_tbl = NULL;
+
+ if (rss->host_rss_ind_tbl)
+ devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
+ rss->host_rss_ind_tbl = NULL;
+}
+
+static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq, u16 cq_idx)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_create_sq_cmd create_cmd;
+ struct ena_admin_acq_create_sq_resp_desc cmd_completion;
+ u8 direction;
+ int ret;
+
+ memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
+
+ create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
+
+ if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ direction = ENA_ADMIN_SQ_DIRECTION_TX;
+ else
+ direction = ENA_ADMIN_SQ_DIRECTION_RX;
+
+ create_cmd.sq_identity |= (direction <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+
+ create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
+
+ create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+
+ create_cmd.sq_caps_3 |=
+ ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+
+ create_cmd.cq_idx = cq_idx;
+ create_cmd.sq_depth = io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+ ret = ena_com_mem_addr_set(ena_dev,
+ &create_cmd.sq_ba,
+ io_sq->desc_addr.phys_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+ }
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+ pr_err("Failed to create IO SQ. error: %d\n", ret);
+ return ret;
+ }
+
+ io_sq->idx = cmd_completion.sq_idx;
+
+ io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ (uintptr_t)cmd_completion.sq_doorbell_offset);
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
+ + cmd_completion.llq_headers_offset);
+
+ io_sq->desc_addr.pbuf_dev_addr =
+ (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
+ cmd_completion.llq_descriptors_offset);
+ }
+
+ pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+
+ return ret;
+}
+
+static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_com_io_sq *io_sq;
+ u16 qid;
+ int i;
+
+ for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+ qid = rss->host_rss_ind_tbl[i];
+ if (qid >= ENA_TOTAL_NUM_QUEUES)
+ return -EINVAL;
+
+ io_sq = &ena_dev->io_sq_queues[qid];
+
+ if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
+ return -EINVAL;
+
+ rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
+ }
+
+ return 0;
+}
+
+static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
+{
+ u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
+ struct ena_rss *rss = &ena_dev->rss;
+ u8 idx;
+ u16 i;
+
+ for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
+ dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
+
+ for (i = 0; i < 1 << rss->tbl_log_size; i++) {
+ if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
+ return -EINVAL;
+ idx = (u8)rss->rss_ind_tbl[i].cq_idx;
+
+ if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
+ return -EINVAL;
+
+ rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
+ }
+
+ return 0;
+}
+
+static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+ size_t size;
+
+ size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
+
+ ena_dev->intr_moder_tbl =
+ devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ if (!ena_dev->intr_moder_tbl)
+ return -ENOMEM;
+
+ ena_com_config_default_interrupt_moderation_table(ena_dev);
+
+ return 0;
+}
+
+static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
+ u16 intr_delay_resolution)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+ unsigned int i;
+
+ if (!intr_delay_resolution) {
+ pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
+ intr_delay_resolution = 1;
+ }
+ ena_dev->intr_delay_resolution = intr_delay_resolution;
+
+ /* update Rx */
+ for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
+ intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
+
+ /* update Tx */
+ ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
+}
+
+/*****************************************************************************/
+/******************************* API ******************************/
+/*****************************************************************************/
+
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size)
+{
+ struct ena_comp_ctx *comp_ctx;
+ int ret;
+
+ comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
+ comp, comp_size);
+ if (unlikely(IS_ERR(comp_ctx))) {
+ pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx));
+ return PTR_ERR(comp_ctx);
+ }
+
+ ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
+ if (unlikely(ret)) {
+ if (admin_queue->running_state)
+ pr_err("Failed to process command. ret = %d\n", ret);
+ else
+ pr_debug("Failed to process command. ret = %d\n", ret);
+ }
+ return ret;
+}
+
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_create_cq_cmd create_cmd;
+ struct ena_admin_acq_create_cq_resp_desc cmd_completion;
+ int ret;
+
+ memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
+
+ create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
+
+ create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ create_cmd.cq_caps_1 |=
+ ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+
+ create_cmd.msix_vector = io_cq->msix_vector;
+ create_cmd.cq_depth = io_cq->q_depth;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &create_cmd.cq_ba,
+ io_cq->cdesc_addr.phys_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+ pr_err("Failed to create IO CQ. error: %d\n", ret);
+ return ret;
+ }
+
+ io_cq->idx = cmd_completion.cq_idx;
+
+ io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_interrupt_unmask_register_offset);
+
+ if (cmd_completion.cq_head_db_register_offset)
+ io_cq->cq_head_db_reg =
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.cq_head_db_register_offset);
+
+ if (cmd_completion.numa_node_register_offset)
+ io_cq->numa_node_cfg_reg =
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.numa_node_register_offset);
+
+ pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+
+ return ret;
+}
+
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_sq **io_sq,
+ struct ena_com_io_cq **io_cq)
+{
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+ pr_err("Invalid queue number %d but the max is %d\n", qid,
+ ENA_TOTAL_NUM_QUEUES);
+ return -EINVAL;
+ }
+
+ *io_sq = &ena_dev->io_sq_queues[qid];
+ *io_cq = &ena_dev->io_cq_queues[qid];
+
+ return 0;
+}
+
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+ if (!admin_queue->comp_ctx)
+ return;
+
+ for (i = 0; i < admin_queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(admin_queue, i, false);
+ if (unlikely(!comp_ctx))
+ break;
+
+ comp_ctx->status = ENA_CMD_ABORTED;
+
+ complete(&comp_ctx->wait_event);
+ }
+}
+
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ }
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+}
+
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
+ struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
+ int ret;
+
+ memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+
+ destroy_cmd.cq_idx = io_cq->idx;
+ destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != -ENODEV)))
+ pr_err("Failed to destroy IO CQ. error: %d\n", ret);
+
+ return ret;
+}
+
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->admin_queue.running_state;
+}
+
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ ena_dev->admin_queue.running_state = state;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+}
+
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
+{
+ u16 depth = ena_dev->aenq.q_depth;
+
+ WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
+
+ /* Init head_db to mark that all entries in the queue
+ * are initially available
+ */
+ writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_admin_get_feat_resp get_resp;
+ int ret;
+
+ ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
+ if (ret) {
+ pr_info("Can't get aenq configuration\n");
+ return ret;
+ }
+
+ if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
+ pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+ get_resp.u.aenq.supported_groups, groups_flag);
+ return -EPERM;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags = 0;
+ cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
+ cmd.u.aenq.enabled_groups = groups_flag;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to config AENQ ret: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
+{
+ u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+ int width;
+
+ if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
+ pr_err("Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+ width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
+ ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
+
+ pr_debug("ENA dma width: %d\n", width);
+
+ if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
+ pr_err("DMA width illegal value: %d\n", width);
+ return -EINVAL;
+ }
+
+ ena_dev->dma_addr_bits = width;
+
+ return width;
+}
+
+int ena_com_validate_version(struct ena_com_dev *ena_dev)
+{
+ u32 ver;
+ u32 ctrl_ver;
+ u32 ctrl_ver_masked;
+
+ /* Make sure the ENA version and the controller version are at least
+ * as the driver expects
+ */
+ ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
+ ctrl_ver = ena_com_reg_bar_read32(ena_dev,
+ ENA_REGS_CONTROLLER_VERSION_OFF);
+
+ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
+ (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ pr_err("Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+ pr_info("ena device version: %d.%d\n",
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+
+ if (ver < MIN_ENA_VER) {
+ pr_err("ENA version is lower than the minimal version the driver supports\n");
+ return -1;
+ }
+
+ pr_info("ena controller version: %d.%d.%d implementation version %d\n",
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
+
+ ctrl_ver_masked =
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
+
+ /* Validate the ctrl version without the implementation ID */
+ if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
+ pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
+ u16 size;
+
+ if (admin_queue->comp_ctx)
+ devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
+ admin_queue->comp_ctx = NULL;
+ size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+ if (sq->entries)
+ dma_free_coherent(ena_dev->dmadev, size, sq->entries,
+ sq->dma_addr);
+ sq->entries = NULL;
+
+ size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+ if (cq->entries)
+ dma_free_coherent(ena_dev->dmadev, size, cq->entries,
+ cq->dma_addr);
+ cq->entries = NULL;
+
+ size = ADMIN_AENQ_SIZE(aenq->q_depth);
+ if (ena_dev->aenq.entries)
+ dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
+ aenq->dma_addr);
+ aenq->entries = NULL;
+}
+
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
+{
+ ena_dev->admin_queue.polling = polling;
+}
+
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ spin_lock_init(&mmio_read->lock);
+ mmio_read->read_resp =
+ dma_zalloc_coherent(ena_dev->dmadev,
+ sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ if (unlikely(!mmio_read->read_resp))
+ return -ENOMEM;
+
+ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+ mmio_read->read_resp->req_id = 0x0;
+ mmio_read->seq_num = 0x0;
+ mmio_read->readless_supported = true;
+
+ return 0;
+}
+
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ mmio_read->readless_supported = readless_supported;
+}
+
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+
+ dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+ mmio_read->read_resp, mmio_read->read_resp_dma_addr);
+
+ mmio_read->read_resp = NULL;
+}
+
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+ u32 addr_low, addr_high;
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
+
+ writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+}
+
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ struct ena_aenq_handlers *aenq_handlers,
+ bool init_spinlock)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
+ int ret;
+
+ dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
+ pr_err("Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+ if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
+ pr_err("Device isn't ready, abort com init\n");
+ return -ENODEV;
+ }
+
+ admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
+
+ admin_queue->q_dmadev = ena_dev->dmadev;
+ admin_queue->polling = false;
+ admin_queue->curr_cmd_id = 0;
+
+ atomic_set(&admin_queue->outstanding_cmds, 0);
+
+ if (init_spinlock)
+ spin_lock_init(&admin_queue->q_lock);
+
+ ret = ena_com_init_comp_ctxt(admin_queue);
+ if (ret)
+ goto error;
+
+ ret = ena_com_admin_init_sq(admin_queue);
+ if (ret)
+ goto error;
+
+ ret = ena_com_admin_init_cq(admin_queue);
+ if (ret)
+ goto error;
+
+ admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ ENA_REGS_AQ_DB_OFF);
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
+
+ writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
+ writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
+
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
+
+ writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
+ writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
+
+ aq_caps = 0;
+ aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
+ aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
+ ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
+
+ acq_caps = 0;
+ acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
+ acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
+ ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
+
+ writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
+ writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
+ ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
+ if (ret)
+ goto error;
+
+ admin_queue->running_state = true;
+
+ return 0;
+error:
+ ena_com_admin_destroy(ena_dev);
+
+ return ret;
+}
+
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx)
+{
+ struct ena_com_io_sq *io_sq;
+ struct ena_com_io_cq *io_cq;
+ int ret;
+
+ if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
+ pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
+ ctx->qid, ENA_TOTAL_NUM_QUEUES);
+ return -EINVAL;
+ }
+
+ io_sq = &ena_dev->io_sq_queues[ctx->qid];
+ io_cq = &ena_dev->io_cq_queues[ctx->qid];
+
+ memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
+ memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
+
+ /* Init CQ */
+ io_cq->q_depth = ctx->queue_size;
+ io_cq->direction = ctx->direction;
+ io_cq->qid = ctx->qid;
+
+ io_cq->msix_vector = ctx->msix_vector;
+
+ io_sq->q_depth = ctx->queue_size;
+ io_sq->direction = ctx->direction;
+ io_sq->qid = ctx->qid;
+
+ io_sq->mem_queue_type = ctx->mem_queue_type;
+
+ if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ /* header length is limited to 8 bits */
+ io_sq->tx_max_header_size =
+ min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+
+ ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
+ if (ret)
+ goto error;
+ ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
+ if (ret)
+ goto error;
+
+ ret = ena_com_create_io_cq(ena_dev, io_cq);
+ if (ret)
+ goto error;
+
+ ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
+ if (ret)
+ goto destroy_io_cq;
+
+ return 0;
+
+destroy_io_cq:
+ ena_com_destroy_io_cq(ena_dev, io_cq);
+error:
+ ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+ return ret;
+}
+
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
+{
+ struct ena_com_io_sq *io_sq;
+ struct ena_com_io_cq *io_cq;
+
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+ pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
+ ENA_TOTAL_NUM_QUEUES);
+ return;
+ }
+
+ io_sq = &ena_dev->io_sq_queues[qid];
+ io_cq = &ena_dev->io_cq_queues[qid];
+
+ ena_com_destroy_io_sq(ena_dev, io_sq);
+ ena_com_destroy_io_cq(ena_dev, io_cq);
+
+ ena_com_io_queue_free(ena_dev, io_sq, io_cq);
+}
+
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp)
+{
+ return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
+}
+
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_DEVICE_ATTRIBUTES);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
+ sizeof(get_resp.u.dev_attr));
+ ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_MAX_QUEUES_NUM);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
+ sizeof(get_resp.u.max_queue));
+ ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_AENQ_CONFIG);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
+ sizeof(get_resp.u.aenq));
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ if (rc)
+ return rc;
+
+ memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
+ sizeof(get_resp.u.offload));
+
+ return 0;
+}
+
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
+{
+ ena_com_handle_admin_completion(&ena_dev->admin_queue);
+}
+
+/* ena_handle_specific_aenq_event:
+ * return the handler that is relevant to the specific event group
+ */
+static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
+ u16 group)
+{
+ struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
+
+ if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
+ return aenq_handlers->handlers[group];
+
+ return aenq_handlers->unimplemented_handler;
+}
+
+/* ena_aenq_intr_handler:
+ * handles the aenq incoming events.
+ * pop events from the queue and apply the specific handler
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
+{
+ struct ena_admin_aenq_entry *aenq_e;
+ struct ena_admin_aenq_common_desc *aenq_common;
+ struct ena_com_aenq *aenq = &dev->aenq;
+ ena_aenq_handler handler_cb;
+ u16 masked_head, processed = 0;
+ u8 phase;
+
+ masked_head = aenq->head & (aenq->q_depth - 1);
+ phase = aenq->phase;
+ aenq_e = &aenq->entries[masked_head]; /* Get first entry */
+ aenq_common = &aenq_e->aenq_common_desc;
+
+ /* Go over all the events */
+ while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
+ phase) {
+ pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
+ aenq_common->group, aenq_common->syndrom,
+ (u64)aenq_common->timestamp_low +
+ ((u64)aenq_common->timestamp_high << 32));
+
+ /* Handle specific event*/
+ handler_cb = ena_com_get_specific_aenq_cb(dev,
+ aenq_common->group);
+ handler_cb(data, aenq_e); /* call the actual event handler*/
+
+ /* Get next event entry */
+ masked_head++;
+ processed++;
+
+ if (unlikely(masked_head == aenq->q_depth)) {
+ masked_head = 0;
+ phase = !phase;
+ }
+ aenq_e = &aenq->entries[masked_head];
+ aenq_common = &aenq_e->aenq_common_desc;
+ }
+
+ aenq->head += processed;
+ aenq->phase = phase;
+
+ /* Don't update aenq doorbell if there weren't any processed events */
+ if (!processed)
+ return;
+
+ /* write the aenq doorbell after all AENQ descriptors were read */
+ mb();
+ writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+}
+
+int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+{
+ u32 stat, timeout, cap, reset_val;
+ int rc;
+
+ stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+ cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+
+ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
+ (cap == ENA_MMIO_READ_TIMEOUT))) {
+ pr_err("Reg read32 timeout occurred\n");
+ return -ETIME;
+ }
+
+ if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
+ pr_err("Device isn't ready, can't reset device\n");
+ return -EINVAL;
+ }
+
+ timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
+ ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
+ if (timeout == 0) {
+ pr_err("Invalid timeout value\n");
+ return -EINVAL;
+ }
+
+ /* start reset */
+ reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+ writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+
+ /* Write again the MMIO read request address */
+ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+ rc = wait_for_reset_state(ena_dev, timeout,
+ ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+ if (rc != 0) {
+ pr_err("Reset indication didn't turn on\n");
+ return rc;
+ }
+
+ /* reset done */
+ writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+ rc = wait_for_reset_state(ena_dev, timeout, 0);
+ if (rc != 0) {
+ pr_err("Reset indication didn't turn off\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ struct ena_com_stats_ctx *ctx,
+ enum ena_admin_get_stats_type type)
+{
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
+ struct ena_com_admin_queue *admin_queue;
+ int ret;
+
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
+ get_cmd->aq_common_descriptor.flags = 0;
+ get_cmd->type = type;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to get stats. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_basic_stats *stats)
+{
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.basic_stats,
+ sizeof(ctx.get_resp.basic_stats));
+
+ return ret;
+}
+
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
+ pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ return -EPERM;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags = 0;
+ cmd.feat_common.feature_id = ENA_ADMIN_MTU;
+ cmd.u.mtu.mtu = mtu;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
+
+ return ret;
+}
+
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload)
+{
+ int ret;
+ struct ena_admin_get_feat_resp resp;
+
+ ret = ena_com_get_feature(ena_dev, &resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ if (unlikely(ret)) {
+ pr_err("Failed to get offload capabilities %d\n", ret);
+ return ret;
+ }
+
+ memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
+
+ return 0;
+}
+
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_admin_get_feat_resp get_resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ pr_info("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ return -EPERM;
+ }
+
+ /* Validate hash function is supported */
+ ret = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ if (unlikely(ret))
+ return ret;
+
+ if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
+ pr_err("Func hash %d isn't supported by device, abort\n",
+ rss->hash_func);
+ return -EPERM;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
+ cmd.u.flow_hash_func.init_val = rss->hash_init_val;
+ cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->hash_key_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.control_buffer.length = sizeof(*rss->hash_key);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret)) {
+ pr_err("Failed to set hash function %d. error: %d\n",
+ rss->hash_func, ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions func,
+ const u8 *key, u16 key_len, u32 init_val)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ rss->hash_key;
+ int rc;
+
+ /* Make sure size is a mult of DWs */
+ if (unlikely(key_len & 0x3))
+ return -EINVAL;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ rss->hash_key_dma_addr,
+ sizeof(*rss->hash_key));
+ if (unlikely(rc))
+ return rc;
+
+ if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
+ pr_err("Flow hash function %d isn't supported\n", func);
+ return -EPERM;
+ }
+
+ switch (func) {
+ case ENA_ADMIN_TOEPLITZ:
+ if (key_len > sizeof(hash_key->key)) {
+ pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return -EINVAL;
+ }
+
+ memcpy(hash_key->key, key, key_len);
+ rss->hash_init_val = init_val;
+ hash_key->keys_num = key_len >> 2;
+ break;
+ case ENA_ADMIN_CRC32:
+ rss->hash_init_val = init_val;
+ break;
+ default:
+ pr_err("Invalid hash function (%d)\n", func);
+ return -EINVAL;
+ }
+
+ rc = ena_com_set_hash_function(ena_dev);
+
+ /* Restore the old function */
+ if (unlikely(rc))
+ ena_com_get_hash_function(ena_dev, NULL, NULL);
+
+ return rc;
+}
+
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions *func,
+ u8 *key)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ rss->hash_key;
+ int rc;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ rss->hash_key_dma_addr,
+ sizeof(*rss->hash_key));
+ if (unlikely(rc))
+ return rc;
+
+ rss->hash_func = get_resp.u.flow_hash_func.selected_func;
+ if (func)
+ *func = rss->hash_func;
+
+ if (key)
+ memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
+
+ return 0;
+}
+
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 *fields)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_INPUT,
+ rss->hash_ctrl_dma_addr,
+ sizeof(*rss->hash_ctrl));
+ if (unlikely(rc))
+ return rc;
+
+ if (fields)
+ *fields = rss->hash_ctrl->selected_fields[proto].fields;
+
+ return 0;
+}
+
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_INPUT)) {
+ pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT);
+ return -EPERM;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
+ cmd.u.flow_hash_input.enabled_input_sort =
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
+ ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->hash_ctrl_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+ cmd.control_buffer.length = sizeof(*hash_ctrl);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret))
+ pr_err("Failed to set hash input. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl =
+ rss->hash_ctrl;
+ u16 available_fields = 0;
+ int rc, i;
+
+ /* Get the supported hash input */
+ rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+ if (unlikely(rc))
+ return rc;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
+ ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
+
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
+
+ for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
+ available_fields = hash_ctrl->selected_fields[i].fields &
+ hash_ctrl->supported_fields[i].fields;
+ if (available_fields != hash_ctrl->selected_fields[i].fields) {
+ pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+ i, hash_ctrl->supported_fields[i].fields,
+ hash_ctrl->selected_fields[i].fields);
+ return -EPERM;
+ }
+ }
+
+ rc = ena_com_set_hash_ctrl(ena_dev);
+
+ /* In case of failure, restore the old hash ctrl */
+ if (unlikely(rc))
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+
+ return rc;
+}
+
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 hash_fields)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
+ u16 supported_fields;
+ int rc;
+
+ if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
+ pr_err("Invalid proto num (%u)\n", proto);
+ return -EINVAL;
+ }
+
+ /* Get the ctrl table */
+ rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
+ if (unlikely(rc))
+ return rc;
+
+ /* Make sure all the fields are supported */
+ supported_fields = hash_ctrl->supported_fields[proto].fields;
+ if ((hash_fields & supported_fields) != hash_fields) {
+ pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
+ proto, hash_fields, supported_fields);
+ }
+
+ hash_ctrl->selected_fields[proto].fields = hash_fields;
+
+ rc = ena_com_set_hash_ctrl(ena_dev);
+
+ /* In case of failure, restore the old hash ctrl */
+ if (unlikely(rc))
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
+
+ return 0;
+}
+
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+ u16 entry_idx, u16 entry_value)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
+ return -EINVAL;
+
+ if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
+ return -EINVAL;
+
+ rss->host_rss_ind_tbl[entry_idx] = entry_value;
+
+ return 0;
+}
+
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(
+ ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+ pr_info("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ return -EPERM;
+ }
+
+ ret = ena_com_ind_tbl_convert_to_device(ena_dev);
+ if (ret) {
+ pr_err("Failed to convert host indirection table to device table\n");
+ return ret;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.aq_common_descriptor.flags =
+ ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
+ cmd.u.ind_table.size = rss->tbl_log_size;
+ cmd.u.ind_table.inline_index = 0xFFFFFFFF;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.control_buffer.address,
+ rss->rss_ind_tbl_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to set indirect table. error: %d\n", ret);
+
+ return ret;
+}
+
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
+{
+ struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_get_feat_resp get_resp;
+ u32 tbl_size;
+ int i, rc;
+
+ tbl_size = (1ULL << rss->tbl_log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
+ rss->rss_ind_tbl_dma_addr,
+ tbl_size);
+ if (unlikely(rc))
+ return rc;
+
+ if (!ind_tbl)
+ return 0;
+
+ rc = ena_com_ind_tbl_convert_from_device(ena_dev);
+ if (unlikely(rc))
+ return rc;
+
+ for (i = 0; i < (1 << rss->tbl_log_size); i++)
+ ind_tbl[i] = rss->host_rss_ind_tbl[i];
+
+ return 0;
+}
+
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
+{
+ int rc;
+
+ memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+
+ rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
+ if (unlikely(rc))
+ goto err_indr_tbl;
+
+ rc = ena_com_hash_key_allocate(ena_dev);
+ if (unlikely(rc))
+ goto err_hash_key;
+
+ rc = ena_com_hash_ctrl_init(ena_dev);
+ if (unlikely(rc))
+ goto err_hash_ctrl;
+
+ return 0;
+
+err_hash_ctrl:
+ ena_com_hash_key_destroy(ena_dev);
+err_hash_key:
+ ena_com_indirect_table_destroy(ena_dev);
+err_indr_tbl:
+
+ return rc;
+}
+
+void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
+{
+ ena_com_indirect_table_destroy(ena_dev);
+ ena_com_hash_key_destroy(ena_dev);
+ ena_com_hash_ctrl_destroy(ena_dev);
+
+ memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
+}
+
+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ host_attr->host_info =
+ dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
+ &host_attr->host_info_dma_addr, GFP_KERNEL);
+ if (unlikely(!host_attr->host_info))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
+ u32 debug_area_size)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ host_attr->debug_area_virt_addr =
+ dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
+ &host_attr->debug_area_dma_addr, GFP_KERNEL);
+ if (unlikely(!host_attr->debug_area_virt_addr)) {
+ host_attr->debug_area_size = 0;
+ return -ENOMEM;
+ }
+
+ host_attr->debug_area_size = debug_area_size;
+
+ return 0;
+}
+
+void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ if (host_attr->host_info) {
+ dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
+ host_attr->host_info_dma_addr);
+ host_attr->host_info = NULL;
+ }
+}
+
+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+ if (host_attr->debug_area_virt_addr) {
+ dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
+ host_attr->debug_area_virt_addr,
+ host_attr->debug_area_dma_addr);
+ host_attr->debug_area_virt_addr = NULL;
+ }
+}
+
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
+{
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_HOST_ATTR_CONFIG)) {
+ pr_warn("Set host attribute isn't supported\n");
+ return -EPERM;
+ }
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.u.host_attr.debug_ba,
+ host_attr->debug_area_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ ret = ena_com_mem_addr_set(ena_dev,
+ &cmd.u.host_attr.os_info_ba,
+ host_attr->host_info_dma_addr);
+ if (unlikely(ret)) {
+ pr_err("memory address set failed\n");
+ return ret;
+ }
+
+ cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to set host attributes: %d\n", ret);
+
+ return ret;
+}
+
+/* Interrupt moderation */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
+{
+ return ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_INTERRUPT_MODERATION);
+}
+
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs)
+{
+ if (!ena_dev->intr_delay_resolution) {
+ pr_err("Illegal interrupt delay granularity value\n");
+ return -EFAULT;
+ }
+
+ ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
+ ena_dev->intr_delay_resolution;
+
+ return 0;
+}
+
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs)
+{
+ if (!ena_dev->intr_delay_resolution) {
+ pr_err("Illegal interrupt delay granularity value\n");
+ return -EFAULT;
+ }
+
+ /* We use LOWEST entry of moderation table for storing
+ * nonadaptive interrupt coalescing values
+ */
+ ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+ rx_coalesce_usecs / ena_dev->intr_delay_resolution;
+
+ return 0;
+}
+
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+ if (ena_dev->intr_moder_tbl)
+ devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
+ ena_dev->intr_moder_tbl = NULL;
+}
+
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_get_feat_resp get_resp;
+ u16 delay_resolution;
+ int rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_INTERRUPT_MODERATION);
+
+ if (rc) {
+ if (rc == -EPERM) {
+ pr_info("Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
+ rc = 0;
+ } else {
+ pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
+ rc);
+ }
+
+ /* no moderation supported, disable adaptive support */
+ ena_com_disable_adaptive_moderation(ena_dev);
+ return rc;
+ }
+
+ rc = ena_com_init_interrupt_moderation_table(ena_dev);
+ if (rc)
+ goto err;
+
+ /* if moderation is supported by device we set adaptive moderation */
+ delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
+ ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
+ ena_com_enable_adaptive_moderation(ena_dev);
+
+ return 0;
+err:
+ ena_com_destroy_interrupt_moderation(ena_dev);
+ return rc;
+}
+
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (!intr_moder_tbl)
+ return;
+
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
+ ENA_INTR_LOWEST_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
+ ENA_INTR_LOWEST_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
+ ENA_INTR_LOWEST_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
+ ENA_INTR_LOW_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
+ ENA_INTR_LOW_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
+ ENA_INTR_LOW_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
+ ENA_INTR_MID_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
+ ENA_INTR_MID_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
+ ENA_INTR_MID_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
+ ENA_INTR_HIGH_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
+ ENA_INTR_HIGH_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
+ ENA_INTR_HIGH_BYTES;
+
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
+ ENA_INTR_HIGHEST_USECS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
+ ENA_INTR_HIGHEST_PKTS;
+ intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
+ ENA_INTR_HIGHEST_BYTES;
+}
+
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->intr_moder_tx_interval;
+}
+
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (intr_moder_tbl)
+ return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
+
+ return 0;
+}
+
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+ return;
+
+ intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
+ if (ena_dev->intr_delay_resolution)
+ intr_moder_tbl[level].intr_moder_interval /=
+ ena_dev->intr_delay_resolution;
+ intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
+
+ /* use hardcoded value until ethtool supports bytecount parameter */
+ if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
+ intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
+}
+
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry)
+{
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+
+ if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
+ return;
+
+ entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
+ if (ena_dev->intr_delay_resolution)
+ entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
+ entry->pkts_per_interval =
+ intr_moder_tbl[level].pkts_per_interval;
+ entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
new file mode 100644
index 000000000000..509d7b8e15ab
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -0,0 +1,1038 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ENA_COM
+#define ENA_COM
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "ena_common_defs.h"
+#include "ena_admin_defs.h"
+#include "ena_eth_io_defs.h"
+#include "ena_regs_defs.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define ENA_MAX_NUM_IO_QUEUES 128U
+/* We need to queues for each IO (on for Tx and one for Rx) */
+#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
+
+#define ENA_MAX_HANDLERS 256
+
+#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
+
+/* Unit in usec */
+#define ENA_REG_READ_TIMEOUT 200000
+
+#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry))
+#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
+#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* ENA adaptive interrupt moderation settings */
+
+#define ENA_INTR_LOWEST_USECS (0)
+#define ENA_INTR_LOWEST_PKTS (3)
+#define ENA_INTR_LOWEST_BYTES (2 * 1524)
+
+#define ENA_INTR_LOW_USECS (32)
+#define ENA_INTR_LOW_PKTS (12)
+#define ENA_INTR_LOW_BYTES (16 * 1024)
+
+#define ENA_INTR_MID_USECS (80)
+#define ENA_INTR_MID_PKTS (48)
+#define ENA_INTR_MID_BYTES (64 * 1024)
+
+#define ENA_INTR_HIGH_USECS (128)
+#define ENA_INTR_HIGH_PKTS (96)
+#define ENA_INTR_HIGH_BYTES (128 * 1024)
+
+#define ENA_INTR_HIGHEST_USECS (192)
+#define ENA_INTR_HIGHEST_PKTS (128)
+#define ENA_INTR_HIGHEST_BYTES (192 * 1024)
+
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 4
+#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6
+#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4
+#define ENA_INTR_MODER_LEVEL_STRIDE 2
+#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
+
+enum ena_intr_moder_level {
+ ENA_INTR_MODER_LOWEST = 0,
+ ENA_INTR_MODER_LOW,
+ ENA_INTR_MODER_MID,
+ ENA_INTR_MODER_HIGH,
+ ENA_INTR_MODER_HIGHEST,
+ ENA_INTR_MAX_NUM_OF_LEVELS,
+};
+
+struct ena_intr_moder_entry {
+ unsigned int intr_moder_interval;
+ unsigned int pkts_per_interval;
+ unsigned int bytes_per_interval;
+};
+
+enum queue_direction {
+ ENA_COM_IO_QUEUE_DIRECTION_TX,
+ ENA_COM_IO_QUEUE_DIRECTION_RX
+};
+
+struct ena_com_buf {
+ dma_addr_t paddr; /**< Buffer physical address */
+ u16 len; /**< Buffer length in bytes */
+};
+
+struct ena_com_rx_buf_info {
+ u16 len;
+ u16 req_id;
+};
+
+struct ena_com_io_desc_addr {
+ u8 __iomem *pbuf_dev_addr; /* LLQ address */
+ u8 *virt_addr;
+ dma_addr_t phys_addr;
+};
+
+struct ena_com_tx_meta {
+ u16 mss;
+ u16 l3_hdr_len;
+ u16 l3_hdr_offset;
+ u16 l4_hdr_len; /* In words */
+};
+
+struct ena_com_io_cq {
+ struct ena_com_io_desc_addr cdesc_addr;
+
+ /* Interrupt unmask register */
+ u32 __iomem *unmask_reg;
+
+ /* The completion queue head doorbell register */
+ u32 __iomem *cq_head_db_reg;
+
+ /* numa configuration register (for TPH) */
+ u32 __iomem *numa_node_cfg_reg;
+
+ /* The value to write to the above register to unmask
+ * the interrupt of this queue
+ */
+ u32 msix_vector;
+
+ enum queue_direction direction;
+
+ /* holds the number of cdesc of the current packet */
+ u16 cur_rx_pkt_cdesc_count;
+ /* save the firt cdesc idx of the current packet */
+ u16 cur_rx_pkt_cdesc_start_idx;
+
+ u16 q_depth;
+ /* Caller qid */
+ u16 qid;
+
+ /* Device queue index */
+ u16 idx;
+ u16 head;
+ u16 last_head_update;
+ u8 phase;
+ u8 cdesc_entry_size_in_bytes;
+
+} ____cacheline_aligned;
+
+struct ena_com_io_sq {
+ struct ena_com_io_desc_addr desc_addr;
+
+ u32 __iomem *db_addr;
+ u8 __iomem *header_addr;
+
+ enum queue_direction direction;
+ enum ena_admin_placement_policy_type mem_queue_type;
+
+ u32 msix_vector;
+ struct ena_com_tx_meta cached_tx_meta;
+
+ u16 q_depth;
+ u16 qid;
+
+ u16 idx;
+ u16 tail;
+ u16 next_to_comp;
+ u32 tx_max_header_size;
+ u8 phase;
+ u8 desc_entry_size;
+ u8 dma_addr_bits;
+} ____cacheline_aligned;
+
+struct ena_com_admin_cq {
+ struct ena_admin_acq_entry *entries;
+ dma_addr_t dma_addr;
+
+ u16 head;
+ u8 phase;
+};
+
+struct ena_com_admin_sq {
+ struct ena_admin_aq_entry *entries;
+ dma_addr_t dma_addr;
+
+ u32 __iomem *db_addr;
+
+ u16 head;
+ u16 tail;
+ u8 phase;
+
+};
+
+struct ena_com_stats_admin {
+ u32 aborted_cmd;
+ u32 submitted_cmd;
+ u32 completed_cmd;
+ u32 out_of_space;
+ u32 no_completion;
+};
+
+struct ena_com_admin_queue {
+ void *q_dmadev;
+ spinlock_t q_lock; /* spinlock for the admin queue */
+ struct ena_comp_ctx *comp_ctx;
+ u16 q_depth;
+ struct ena_com_admin_cq cq;
+ struct ena_com_admin_sq sq;
+
+ /* Indicate if the admin queue should poll for completion */
+ bool polling;
+
+ u16 curr_cmd_id;
+
+ /* Indicate that the ena was initialized and can
+ * process new admin commands
+ */
+ bool running_state;
+
+ /* Count the number of outstanding admin commands */
+ atomic_t outstanding_cmds;
+
+ struct ena_com_stats_admin stats;
+};
+
+struct ena_aenq_handlers;
+
+struct ena_com_aenq {
+ u16 head;
+ u8 phase;
+ struct ena_admin_aenq_entry *entries;
+ dma_addr_t dma_addr;
+ u16 q_depth;
+ struct ena_aenq_handlers *aenq_handlers;
+};
+
+struct ena_com_mmio_read {
+ struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
+ dma_addr_t read_resp_dma_addr;
+ u16 seq_num;
+ bool readless_supported;
+ /* spin lock to ensure a single outstanding read */
+ spinlock_t lock;
+};
+
+struct ena_rss {
+ /* Indirect table */
+ u16 *host_rss_ind_tbl;
+ struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
+ dma_addr_t rss_ind_tbl_dma_addr;
+ u16 tbl_log_size;
+
+ /* Hash key */
+ enum ena_admin_hash_functions hash_func;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key;
+ dma_addr_t hash_key_dma_addr;
+ u32 hash_init_val;
+
+ /* Flow Control */
+ struct ena_admin_feature_rss_hash_control *hash_ctrl;
+ dma_addr_t hash_ctrl_dma_addr;
+
+};
+
+struct ena_host_attribute {
+ /* Debug area */
+ u8 *debug_area_virt_addr;
+ dma_addr_t debug_area_dma_addr;
+ u32 debug_area_size;
+
+ /* Host information */
+ struct ena_admin_host_info *host_info;
+ dma_addr_t host_info_dma_addr;
+};
+
+/* Each ena_dev is a PCI function. */
+struct ena_com_dev {
+ struct ena_com_admin_queue admin_queue;
+ struct ena_com_aenq aenq;
+ struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
+ struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
+ u8 __iomem *reg_bar;
+ void __iomem *mem_bar;
+ void *dmadev;
+
+ enum ena_admin_placement_policy_type tx_mem_queue_type;
+ u32 tx_max_header_size;
+ u16 stats_func; /* Selected function for extended statistic dump */
+ u16 stats_queue; /* Selected queue for extended statistic dump */
+
+ struct ena_com_mmio_read mmio_read;
+
+ struct ena_rss rss;
+ u32 supported_features;
+ u32 dma_addr_bits;
+
+ struct ena_host_attribute host_attr;
+ bool adaptive_coalescing;
+ u16 intr_delay_resolution;
+ u32 intr_moder_tx_interval;
+ struct ena_intr_moder_entry *intr_moder_tbl;
+};
+
+struct ena_com_dev_get_features_ctx {
+ struct ena_admin_queue_feature_desc max_queues;
+ struct ena_admin_device_attr_feature_desc dev_attr;
+ struct ena_admin_feature_aenq_desc aenq;
+ struct ena_admin_feature_offload_desc offload;
+};
+
+struct ena_com_create_io_ctx {
+ enum ena_admin_placement_policy_type mem_queue_type;
+ enum queue_direction direction;
+ int numa_node;
+ u32 msix_vector;
+ u16 queue_size;
+ u16 qid;
+};
+
+typedef void (*ena_aenq_handler)(void *data,
+ struct ena_admin_aenq_entry *aenq_e);
+
+/* Holds aenq handlers. Indexed by AENQ event group */
+struct ena_aenq_handlers {
+ ena_aenq_handler handlers[ENA_MAX_HANDLERS];
+ ena_aenq_handler unimplemented_handler;
+};
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ *
+ * Initialize the register read mechanism.
+ *
+ * @note: This method must be the first stage in the initialization sequence.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ * @readless_supported: readless mode (enable/disable)
+ */
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
+ bool readless_supported);
+
+/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
+ * value physical address.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
+
+/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_init - Init the admin and the async queues
+ * @ena_dev: ENA communication layer struct
+ * @aenq_handlers: Those handlers to be called upon event.
+ * @init_spinlock: Indicate if this method should init the admin spinlock or
+ * the spinlock was init before (for example, in a case of FLR).
+ *
+ * Initialize the admin submission and completion queues.
+ * Initialize the asynchronous events notification queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ struct ena_aenq_handlers *aenq_handlers,
+ bool init_spinlock);
+
+/* ena_com_admin_destroy - Destroy the admin and the async events queues.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @note: Before calling this method, the caller must validate that the device
+ * won't send any additional admin completions/aenq.
+ * To achieve that, a FLR is recommended.
+ */
+void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_dev_reset - Perform device FLR to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+
+/* ena_com_create_io_queue - Create io queue.
+ * @ena_dev: ENA communication layer struct
+ * @ctx - create context structure
+ *
+ * Create the submission and the completion queues.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ struct ena_com_create_io_ctx *ctx);
+
+/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ */
+void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
+
+/* ena_com_get_io_handlers - Return the io queue handlers
+ * @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
+ * @io_sq - IO submission queue handler
+ * @io_cq - IO completion queue handler.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_sq **io_sq,
+ struct ena_com_io_cq **io_cq);
+
+/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
+ * @ena_dev: ENA communication layer struct
+ *
+ * After this method, aenq event can be received via AENQ.
+ */
+void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_running_state - Set the state of the admin queue
+ * @ena_dev: ENA communication layer struct
+ *
+ * Change the state of the admin queue (enable/disable)
+ */
+void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
+
+/* ena_com_get_admin_running_state - Get the admin queue state
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the state of the admin queue (enable/disable)
+ *
+ * @return - current polling mode (enable/disable)
+ */
+bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ * @polling: ENAble/Disable polling mode
+ *
+ * Set the admin completion mode.
+ */
+void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
+
+/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
+ * @ena_dev: ENA communication layer struct
+ *
+ * Get the admin completion mode.
+ * If polling mode is on, ena_com_execute_admin_command will perform a
+ * polling on the admin completion queue for the commands completion,
+ * otherwise it will wait on wait event.
+ *
+ * @return state
+ */
+bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
+
+/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the admin completion queue and wake up all the pending
+ * threads that wait on the commands wait event.
+ *
+ * @note: Should be called after MSI-X interrupt.
+ */
+void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
+
+/* ena_com_aenq_intr_handler - AENQ interrupt handler
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method go over the async event notification queue and call the proper
+ * aenq handler.
+ */
+void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
+
+/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method aborts all the outstanding admin commands.
+ * The caller should then call ena_com_wait_for_abort_completion to make sure
+ * all the commands were completed.
+ */
+void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
+
+/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method wait until all the outstanding admin commands will be completed.
+ */
+void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
+
+/* ena_com_validate_version - Validate the device parameters
+ * @ena_dev: ENA communication layer struct
+ *
+ * This method validate the device parameters are the same as the saved
+ * parameters in ena_dev.
+ * This method is useful after device reset, to validate the device mac address
+ * and the device offloads are the same as before the reset.
+ *
+ * @return - 0 on success negative value otherwise.
+ */
+int ena_com_validate_version(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_link_params - Retrieve physical link parameters.
+ * @ena_dev: ENA communication layer struct
+ * @resp: Link parameters
+ *
+ * Retrieve the physical link parameters,
+ * like speed, auto-negotiation and full duplex support.
+ *
+ * @return - 0 on Success negative value otherwise.
+ */
+int ena_com_get_link_params(struct ena_com_dev *ena_dev,
+ struct ena_admin_get_feat_resp *resp);
+
+/* ena_com_get_dma_width - Retrieve physical dma address width the device
+ * supports.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Retrieve the maximum physical address bits the device can handle.
+ *
+ * @return: > 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_aenq_config - Set aenq groups configurations
+ * @ena_dev: ENA communication layer struct
+ * @groups flag: bit fields flags of enum ena_admin_aenq_group.
+ *
+ * Configure which aenq event group the driver would like to receive.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
+
+/* ena_com_get_dev_attr_feat - Get device features
+ * @ena_dev: ENA communication layer struct
+ * @get_feat_ctx: returned context that contain the get features.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx);
+
+/* ena_com_get_dev_basic_stats - Get device basic statistics
+ * @ena_dev: ENA communication layer struct
+ * @stats: stats return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_basic_stats *stats);
+
+/* ena_com_set_dev_mtu - Configure the device mtu.
+ * @ena_dev: ENA communication layer struct
+ * @mtu: mtu value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
+
+/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
+ * @ena_dev: ENA communication layer struct
+ * @offlad: offload return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload);
+
+/* ena_com_rss_init - Init RSS
+ * @ena_dev: ENA communication layer struct
+ * @log_size: indirection log size
+ *
+ * Allocate RSS/RFS resources.
+ * The caller then can configure rss using ena_com_set_hash_function,
+ * ena_com_set_hash_ctrl and ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
+
+/* ena_com_rss_destroy - Destroy rss
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free all the RSS/RFS resources.
+ */
+void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_fill_hash_function - Fill RSS hash function
+ * @ena_dev: ENA communication layer struct
+ * @func: The hash function (Toeplitz or crc)
+ * @key: Hash key (for toeplitz hash)
+ * @key_len: key length (max length 10 DW)
+ * @init_val: initial value for the hash function
+ *
+ * Fill the ena_dev resources with the desire hash function, hash key, key_len
+ * and key initial value (if needed by the hash function).
+ * To flush the key into the device the caller should call
+ * ena_com_set_hash_function.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions func,
+ const u8 *key, u16 key_len, u32 init_val);
+
+/* ena_com_set_hash_function - Flush the hash function and it dependencies to
+ * the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash function and it dependencies (key, key length and
+ * initial value) if needed.
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_function
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_function - Retrieve the hash function and the hash key
+ * from the device.
+ * @ena_dev: ENA communication layer struct
+ * @func: hash function
+ * @key: hash key
+ *
+ * Retrieve the hash function and the hash key from the device.
+ *
+ * @note: If the caller called ena_com_fill_hash_function but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ enum ena_admin_hash_functions *func,
+ u8 *key);
+
+/* ena_com_fill_hash_ctrl - Fill RSS hash control
+ * @ena_dev: ENA communication layer struct.
+ * @proto: The protocol to configure.
+ * @hash_fields: bit mask of ena_admin_flow_hash_fields
+ *
+ * Fill the ena_dev resources with the desire hash control (the ethernet
+ * fields that take part of the hash) for a specific protocol.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 hash_fields);
+
+/* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the hash control (the ethernet fields that take part of the hash)
+ *
+ * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
+ * @ena_dev: ENA communication layer struct
+ * @proto: The protocol to retrieve.
+ * @fields: bit mask of ena_admin_flow_hash_fields.
+ *
+ * Retrieve the hash control from the device.
+ *
+ * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
+ enum ena_admin_flow_hash_proto proto,
+ u16 *fields);
+
+/* ena_com_set_default_hash_ctrl - Set the hash control to a default
+ * configuration.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Fill the ena_dev resources with the default hash control configuration.
+ * To flush the hash control to the device, the caller should call
+ * ena_com_set_hash_ctrl.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
+ * indirection table
+ * @ena_dev: ENA communication layer struct.
+ * @entry_idx - indirection table entry.
+ * @entry_value - redirection value
+ *
+ * Fill a single entry of the RSS indirection table in the ena_dev resources.
+ * To flush the indirection table to the device, the called should call
+ * ena_com_indirect_table_set.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
+ u16 entry_idx, u16 entry_value);
+
+/* ena_com_indirect_table_set - Flush the indirection table to the device.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Flush the indirection hash control to the device.
+ * Prior to this method the caller should call ena_com_indirect_table_fill_entry
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
+
+/* ena_com_indirect_table_get - Retrieve the indirection table from the device.
+ * @ena_dev: ENA communication layer struct
+ * @ind_tbl: indirection table
+ *
+ * Retrieve the RSS indirection table from the device.
+ *
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
+
+/* ena_com_allocate_host_info - Allocate host info resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
+
+/* ena_com_allocate_debug_area - Allocate debug area.
+ * @ena_dev: ENA communication layer struct
+ * @debug_area_size - debug area size.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
+ u32 debug_area_size);
+
+/* ena_com_delete_debug_area - Free the debug area resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocate debug area.
+ */
+void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
+
+/* ena_com_delete_host_info - Free the host info resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocate host info.
+ */
+void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_host_attributes - Update the device with the host
+ * attributes (debug area and host info) base address.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
+
+/* ena_com_create_io_cq - Create io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Create IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq);
+
+/* ena_com_destroy_io_cq - Destroy io completion queue.
+ * @ena_dev: ENA communication layer struct
+ * @io_cq - io completion queue handler
+
+ * Destroy IO completion queue.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq);
+
+/* ena_com_execute_admin_command - Execute admin command
+ * @admin_queue: admin queue.
+ * @cmd: the admin command to execute.
+ * @cmd_size: the command size.
+ * @cmd_completion: command completion return value.
+ * @cmd_comp_size: command completion size.
+
+ * Submit an admin command and then wait until the device will return a
+ * completion.
+ * The completion will be copyed into cmd_comp.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size,
+ struct ena_admin_acq_entry *cmd_comp,
+ size_t cmd_comp_size);
+
+/* ena_com_init_interrupt_moderation - Init interrupt moderation
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev);
+
+/* ena_com_interrupt_moderation_supported - Return if interrupt moderation
+ * capability is supported by the device.
+ *
+ * @return - supported or not.
+ */
+bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt
+ * moderation table back to the default parameters.
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
+
+/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ * @tx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs);
+
+/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ * @rx_coalesce_usecs: Interval in usec.
+ *
+ * @return - 0 on success, negative value on failure.
+ */
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs);
+
+/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
+ * non-adaptive interval in Tx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
+
+/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
+ * non-adaptive interval in Rx direction.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - interval in usec
+ */
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
+
+/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
+ * moderation table.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry value
+ *
+ * Update a single entry in the interrupt moderation table.
+ */
+void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry);
+
+/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry.
+ * @ena_dev: ENA communication layer struct
+ * @level: Interrupt moderation table level
+ * @entry: Entry to fill.
+ *
+ * Initialize the entry according to the adaptive interrupt moderation table.
+ */
+void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
+ enum ena_intr_moder_level level,
+ struct ena_intr_moder_entry *entry);
+
+static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->adaptive_coalescing;
+}
+
+static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+ ena_dev->adaptive_coalescing = true;
+}
+
+static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
+{
+ ena_dev->adaptive_coalescing = false;
+}
+
+/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay
+ * @ena_dev: ENA communication layer struct
+ * @pkts: Number of packets since the last update
+ * @bytes: Number of bytes received since the last update.
+ * @smoothed_interval: Returned interval
+ * @moder_tbl_idx: Current table level as input update new level as return
+ * value.
+ */
+static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
+ unsigned int pkts,
+ unsigned int bytes,
+ unsigned int *smoothed_interval,
+ unsigned int *moder_tbl_idx)
+{
+ enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
+ struct ena_intr_moder_entry *curr_moder_entry;
+ struct ena_intr_moder_entry *pred_moder_entry;
+ struct ena_intr_moder_entry *new_moder_entry;
+ struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
+ unsigned int interval;
+
+ /* We apply adaptive moderation on Rx path only.
+ * Tx uses static interrupt moderation.
+ */
+ if (!pkts || !bytes)
+ /* Tx interrupt, or spurious interrupt,
+ * in both cases we just use same delay values
+ */
+ return;
+
+ curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx);
+ if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) {
+ pr_err("Wrong moderation index %u\n", curr_moder_idx);
+ return;
+ }
+
+ curr_moder_entry = &intr_moder_tbl[curr_moder_idx];
+ new_moder_idx = curr_moder_idx;
+
+ if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
+ if ((pkts > curr_moder_entry->pkts_per_interval) ||
+ (bytes > curr_moder_entry->bytes_per_interval))
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
+ } else {
+ pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
+
+ if ((pkts <= pred_moder_entry->pkts_per_interval) ||
+ (bytes <= pred_moder_entry->bytes_per_interval))
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
+ else if ((pkts > curr_moder_entry->pkts_per_interval) ||
+ (bytes > curr_moder_entry->bytes_per_interval)) {
+ if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
+ }
+ }
+ new_moder_entry = &intr_moder_tbl[new_moder_idx];
+
+ interval = new_moder_entry->intr_moder_interval;
+ *smoothed_interval = (
+ (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT +
+ ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) /
+ 10;
+
+ *moder_tbl_idx = new_moder_idx;
+}
+
+/* ena_com_update_intr_reg - Prepare interrupt register
+ * @intr_reg: interrupt register to update.
+ * @rx_delay_interval: Rx interval in usecs
+ * @tx_delay_interval: Tx interval in usecs
+ * @unmask: unask enable/disable
+ *
+ * Prepare interrupt update register with the supplied parameters.
+ */
+static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
+ u32 rx_delay_interval,
+ u32 tx_delay_interval,
+ bool unmask)
+{
+ intr_reg->intr_control = 0;
+ intr_reg->intr_control |= rx_delay_interval &
+ ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+
+ intr_reg->intr_control |=
+ (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
+ & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+
+ if (unmask)
+ intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+}
+
+#endif /* !(ENA_COM) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
new file mode 100644
index 000000000000..bb8d73676eab
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _ENA_COMMON_H_
+#define _ENA_COMMON_H_
+
+#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
+#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
+
+/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
+struct ena_common_mem_addr {
+ u32 mem_addr_low;
+
+ u16 mem_addr_high;
+
+ /* MBZ */
+ u16 reserved16;
+};
+
+#endif /*_ENA_COMMON_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
new file mode 100644
index 000000000000..539c536464a5
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -0,0 +1,501 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ena_eth_com.h"
+
+static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+ struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+ u16 expected_phase, head_masked;
+ u16 desc_phase;
+
+ head_masked = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ + (head_masked * io_cq->cdesc_entry_size_in_bytes));
+
+ desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+
+ if (desc_phase != expected_phase)
+ return NULL;
+
+ return cdesc;
+}
+
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+ io_cq->head++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+ io_cq->phase ^= 1;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+ u16 tail_masked;
+ u32 offset;
+
+ tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+
+ offset = tail_masked * io_sq->desc_entry_size;
+
+ return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
+}
+
+static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
+{
+ u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+ u32 offset = tail_masked * io_sq->desc_entry_size;
+
+ /* In case this queue isn't a LLQ */
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return;
+
+ memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
+ io_sq->desc_addr.virt_addr + offset,
+ io_sq->desc_entry_size);
+}
+
+static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+ io_sq->tail++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+ io_sq->phase ^= 1;
+}
+
+static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
+ u8 *head_src, u16 header_len)
+{
+ u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
+ u8 __iomem *dev_head_addr =
+ io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return 0;
+
+ if (unlikely(!io_sq->header_addr)) {
+ pr_err("Push buffer header ptr is NULL\n");
+ return -EINVAL;
+ }
+
+ memcpy_toio(dev_head_addr, head_src, header_len);
+
+ return 0;
+}
+
+static inline struct ena_eth_io_rx_cdesc_base *
+ ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
+{
+ idx &= (io_cq->q_depth - 1);
+ return (struct ena_eth_io_rx_cdesc_base *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ idx * io_cq->cdesc_entry_size_in_bytes);
+}
+
+static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ u16 *first_cdesc_idx)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+ u16 count = 0, head_masked;
+ u32 last = 0;
+
+ do {
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (!cdesc)
+ break;
+
+ ena_com_cq_inc_head(io_cq);
+ count++;
+ last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ } while (!last);
+
+ if (last) {
+ *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
+ count += io_cq->cur_rx_pkt_cdesc_count;
+
+ head_masked = io_cq->head & (io_cq->q_depth - 1);
+
+ io_cq->cur_rx_pkt_cdesc_count = 0;
+ io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
+
+ pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ io_cq->qid, *first_cdesc_idx, count);
+ } else {
+ io_cq->cur_rx_pkt_cdesc_count += count;
+ count = 0;
+ }
+
+ return count;
+}
+
+static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ int rc;
+
+ if (ena_tx_ctx->meta_valid) {
+ rc = memcmp(&io_sq->cached_tx_meta,
+ &ena_tx_ctx->ena_meta,
+ sizeof(struct ena_com_tx_meta));
+
+ if (unlikely(rc != 0))
+ return true;
+ }
+
+ return false;
+}
+
+static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
+
+ meta_desc = get_sq_desc(io_sq);
+ memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
+
+ /* bits 0-9 of the mss */
+ meta_desc->word2 |= (ena_meta->mss <<
+ ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+ /* bits 10-13 of the mss */
+ meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
+
+ /* Extended meta desc */
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+ meta_desc->len_ctrl |= (io_sq->phase <<
+ ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+ meta_desc->word2 |= ena_meta->l3_hdr_len &
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
+ meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+
+ meta_desc->word2 |= (ena_meta->l4_hdr_len <<
+ ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+
+ /* Cached the meta desc */
+ memcpy(&io_sq->cached_tx_meta, ena_meta,
+ sizeof(struct ena_com_tx_meta));
+
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+ ena_com_sq_update_tail(io_sq);
+}
+
+static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
+ struct ena_eth_io_rx_cdesc_base *cdesc)
+{
+ ena_rx_ctx->l3_proto = cdesc->status &
+ ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+ ena_rx_ctx->l4_proto =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+ ena_rx_ctx->l3_csum_err =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ ena_rx_ctx->l4_csum_err =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ ena_rx_ctx->hash = cdesc->hash;
+ ena_rx_ctx->frag =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+
+ pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
+ ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
+ ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
+ ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+}
+
+/*****************************************************************************/
+/***************************** API **********************************/
+/*****************************************************************************/
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ int *nb_hw_desc)
+{
+ struct ena_eth_io_tx_desc *desc = NULL;
+ struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
+ void *push_header = ena_tx_ctx->push_header;
+ u16 header_len = ena_tx_ctx->header_len;
+ u16 num_bufs = ena_tx_ctx->num_bufs;
+ int total_desc, i, rc;
+ bool have_meta;
+ u64 addr_hi;
+
+ WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
+
+ /* num_bufs +1 for potential meta desc */
+ if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
+ pr_err("Not enough space in the tx queue\n");
+ return -ENOMEM;
+ }
+
+ if (unlikely(header_len > io_sq->tx_max_header_size)) {
+ pr_err("header size is too large %d max header: %d\n",
+ header_len, io_sq->tx_max_header_size);
+ return -EINVAL;
+ }
+
+ /* start with pushing the header (if needed) */
+ rc = ena_com_write_header(io_sq, push_header, header_len);
+ if (unlikely(rc))
+ return rc;
+
+ have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
+ ena_tx_ctx);
+ if (have_meta)
+ ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+
+ /* If the caller doesn't want send packets */
+ if (unlikely(!num_bufs && !header_len)) {
+ *nb_hw_desc = have_meta ? 0 : 1;
+ return 0;
+ }
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+ /* Set first desc when we don't have meta descriptor */
+ if (!have_meta)
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
+
+ desc->buff_addr_hi_hdr_sz |= (header_len <<
+ ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
+ ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+ desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_DESC_PHASE_MASK;
+
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+
+ /* Bits 0-9 */
+ desc->meta_ctrl |= (ena_tx_ctx->req_id <<
+ ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
+ ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+
+ desc->meta_ctrl |= (ena_tx_ctx->df <<
+ ENA_ETH_IO_TX_DESC_DF_SHIFT) &
+ ENA_ETH_IO_TX_DESC_DF_MASK;
+
+ /* Bits 10-15 */
+ desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
+ ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
+ ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+
+ if (ena_tx_ctx->meta_valid) {
+ desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
+ ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+ desc->meta_ctrl |= ena_tx_ctx->l3_proto &
+ ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
+ ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
+ ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
+ ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+ desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
+ ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
+ ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ /* The first desc share the same desc as the header */
+ if (likely(i != 0)) {
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+ ena_com_sq_update_tail(io_sq);
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
+
+ desc->len_ctrl |= (io_sq->phase <<
+ ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ ENA_ETH_IO_TX_DESC_PHASE_MASK;
+ }
+
+ desc->len_ctrl |= ena_bufs->len &
+ ENA_ETH_IO_TX_DESC_LENGTH_MASK;
+
+ addr_hi = ((ena_bufs->paddr &
+ GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+ desc->buff_addr_lo = (u32)ena_bufs->paddr;
+ desc->buff_addr_hi_hdr_sz |= addr_hi &
+ ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
+ ena_bufs++;
+ }
+
+ /* set the last desc indicator */
+ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
+
+ ena_com_copy_curr_sq_desc_to_dev(io_sq);
+
+ ena_com_sq_update_tail(io_sq);
+
+ total_desc = max_t(u16, num_bufs, 1);
+ total_desc += have_meta ? 1 : 0;
+
+ *nb_hw_desc = total_desc;
+ return 0;
+}
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_rx_ctx *ena_rx_ctx)
+{
+ struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
+ struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
+ u16 cdesc_idx = 0;
+ u16 nb_hw_desc;
+ u16 i;
+
+ WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
+
+ nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
+ if (nb_hw_desc == 0) {
+ ena_rx_ctx->descs = nb_hw_desc;
+ return 0;
+ }
+
+ pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
+ nb_hw_desc);
+
+ if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
+ pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
+ ena_rx_ctx->max_bufs);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < nb_hw_desc; i++) {
+ cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
+
+ ena_buf->len = cdesc->length;
+ ena_buf->req_id = cdesc->req_id;
+ ena_buf++;
+ }
+
+ /* Update SQ head ptr */
+ io_sq->next_to_comp += nb_hw_desc;
+
+ pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
+ io_sq->next_to_comp);
+
+ /* Get rx flags from the last pkt */
+ ena_com_rx_set_flags(ena_rx_ctx, cdesc);
+
+ ena_rx_ctx->descs = nb_hw_desc;
+ return 0;
+}
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_buf *ena_buf,
+ u16 req_id)
+{
+ struct ena_eth_io_rx_desc *desc;
+
+ WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
+
+ if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+ return -ENOSPC;
+
+ desc = get_sq_desc(io_sq);
+ memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
+
+ desc->length = ena_buf->len;
+
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
+ desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
+ desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+
+ desc->req_id = req_id;
+
+ desc->buff_addr_lo = (u32)ena_buf->paddr;
+ desc->buff_addr_hi =
+ ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+
+ ena_com_sq_update_tail(io_sq);
+
+ return 0;
+}
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
+{
+ u8 expected_phase, cdesc_phase;
+ struct ena_eth_io_tx_cdesc *cdesc;
+ u16 masked_head;
+
+ masked_head = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_tx_cdesc *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+ /* When the current completion descriptor phase isn't the same as the
+ * expected, it mean that the device still didn't update
+ * this completion.
+ */
+ cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ if (cdesc_phase != expected_phase)
+ return -EAGAIN;
+
+ ena_com_cq_inc_head(io_cq);
+
+ *req_id = cdesc->req_id;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
new file mode 100644
index 000000000000..bb53c3a4f8e9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ENA_ETH_COM_H_
+#define ENA_ETH_COM_H_
+
+#include "ena_com.h"
+
+/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
+#define ENA_COMP_HEAD_THRESH 4
+
+struct ena_com_tx_ctx {
+ struct ena_com_tx_meta ena_meta;
+ struct ena_com_buf *ena_bufs;
+ /* For LLQ, header buffer - pushed to the device mem space */
+ void *push_header;
+
+ enum ena_eth_io_l3_proto_index l3_proto;
+ enum ena_eth_io_l4_proto_index l4_proto;
+ u16 num_bufs;
+ u16 req_id;
+ /* For regular queue, indicate the size of the header
+ * For LLQ, indicate the size of the pushed buffer
+ */
+ u16 header_len;
+
+ u8 meta_valid;
+ u8 tso_enable;
+ u8 l3_csum_enable;
+ u8 l4_csum_enable;
+ u8 l4_csum_partial;
+ u8 df; /* Don't fragment */
+};
+
+struct ena_com_rx_ctx {
+ struct ena_com_rx_buf_info *ena_bufs;
+ enum ena_eth_io_l3_proto_index l3_proto;
+ enum ena_eth_io_l4_proto_index l4_proto;
+ bool l3_csum_err;
+ bool l4_csum_err;
+ /* fragmented packet */
+ bool frag;
+ u32 hash;
+ u16 descs;
+ int max_bufs;
+};
+
+int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ int *nb_hw_desc);
+
+int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ struct ena_com_io_sq *io_sq,
+ struct ena_com_rx_ctx *ena_rx_ctx);
+
+int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_buf *ena_buf,
+ u16 req_id);
+
+int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+
+static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
+ struct ena_eth_io_intr_reg *intr_reg)
+{
+ writel(intr_reg->intr_control, io_cq->unmask_reg);
+}
+
+static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+{
+ u16 tail, next_to_comp, cnt;
+
+ next_to_comp = io_sq->next_to_comp;
+ tail = io_sq->tail;
+ cnt = tail - next_to_comp;
+
+ return io_sq->q_depth - 1 - cnt;
+}
+
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+{
+ u16 tail;
+
+ tail = io_sq->tail;
+
+ pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
+ io_sq->qid, tail);
+
+ writel(tail, io_sq->db_addr);
+
+ return 0;
+}
+
+static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
+{
+ u16 unreported_comp, head;
+ bool need_update;
+
+ head = io_cq->head;
+ unreported_comp = head - io_cq->last_head_update;
+ need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
+
+ if (io_cq->cq_head_db_reg && need_update) {
+ pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
+ io_cq->qid, head);
+ writel(head, io_cq->cq_head_db_reg);
+ io_cq->last_head_update = head;
+ }
+
+ return 0;
+}
+
+static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
+ u8 numa_node)
+{
+ struct ena_eth_io_numa_node_cfg_reg numa_cfg;
+
+ if (!io_cq->numa_node_cfg_reg)
+ return;
+
+ numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
+ | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
+
+ writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
+}
+
+static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
+{
+ io_sq->next_to_comp += elem;
+}
+
+#endif /* ENA_ETH_COM_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
new file mode 100644
index 000000000000..f320c58793a5
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _ENA_ETH_IO_H_
+#define _ENA_ETH_IO_H_
+
+enum ena_eth_io_l3_proto_index {
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
+
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
+};
+
+enum ena_eth_io_l4_proto_index {
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
+
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
+
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+};
+
+struct ena_eth_io_tx_desc {
+ /* 15:0 : length - Buffer length in bytes, must
+ * include any packet trailers that the ENA supposed
+ * to update like End-to-End CRC, Authentication GMAC
+ * etc. This length must not include the
+ * 'Push_Buffer' length. This length must not include
+ * the 4-byte added in the end for 802.3 Ethernet FCS
+ * 21:16 : req_id_hi - Request ID[15:10]
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBZ
+ * 24 : phase
+ * 25 : reserved1 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ u32 len_ctrl;
+
+ /* 3:0 : l3_proto_idx - L3 protocol. This field
+ * required when l3_csum_en,l3_csum or tso_en are set.
+ * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
+ * DF flags of the IPv4 header is 0. Otherwise must
+ * be set to 1
+ * 6:5 : reserved5
+ * 7 : tso_en - Enable TSO, For TCP only.
+ * 12:8 : l4_proto_idx - L4 protocol. This field need
+ * to be set when l4_csum_en or tso_en are set.
+ * 13 : l3_csum_en - enable IPv4 header checksum.
+ * 14 : l4_csum_en - enable TCP/UDP checksum.
+ * 15 : ethernet_fcs_dis - when set, the controller
+ * will not append the 802.3 Ethernet Frame Check
+ * Sequence to the packet
+ * 16 : reserved16
+ * 17 : l4_csum_partial - L4 partial checksum. when
+ * set to 0, the ENA calculates the L4 checksum,
+ * where the Destination Address required for the
+ * TCP/UDP pseudo-header is taken from the actual
+ * packet L3 header. when set to 1, the ENA doesn't
+ * calculate the sum of the pseudo-header, instead,
+ * the checksum field of the L4 is used instead. When
+ * TSO enabled, the checksum of the pseudo-header
+ * must not include the tcp length field. L4 partial
+ * checksum should be used for IPv6 packet that
+ * contains Routing Headers.
+ * 20:18 : reserved18 - MBZ
+ * 21 : reserved21 - MBZ
+ * 31:22 : req_id_lo - Request ID[9:0]
+ */
+ u32 meta_ctrl;
+
+ u32 buff_addr_lo;
+
+ /* address high and header size
+ * 15:0 : addr_hi - Buffer Pointer[47:32]
+ * 23:16 : reserved16_w2
+ * 31:24 : header_length - Header length. For Low
+ * Latency Queues, this fields indicates the number
+ * of bytes written to the headers' memory. For
+ * normal queues, if packet is TCP or UDP, and longer
+ * than max_header_size, then this field should be
+ * set to the sum of L4 header offset and L4 header
+ * size(without options), otherwise, this field
+ * should be set to 0. For both modes, this field
+ * must not exceed the max_header_size.
+ * max_header_size value is reported by the Max
+ * Queues Feature descriptor
+ */
+ u32 buff_addr_hi_hdr_sz;
+};
+
+struct ena_eth_io_tx_meta_desc {
+ /* 9:0 : req_id_lo - Request ID[9:0]
+ * 11:10 : reserved10 - MBZ
+ * 12 : reserved12 - MBZ
+ * 13 : reserved13 - MBZ
+ * 14 : ext_valid - if set, offset fields in Word2
+ * are valid Also MSS High in Word 0 and bits [31:24]
+ * in Word 3
+ * 15 : reserved15
+ * 19:16 : mss_hi
+ * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
+ * Extended Metadata Descriptor
+ * 21 : meta_store - Store extended metadata in queue
+ * cache
+ * 22 : reserved22 - MBZ
+ * 23 : meta_desc - MBO
+ * 24 : phase
+ * 25 : reserved25 - MBZ
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 28 : comp_req - Indicates whether completion
+ * should be posted, after packet is transmitted.
+ * Valid only for first descriptor
+ * 30:29 : reserved29 - MBZ
+ * 31 : reserved31 - MBZ
+ */
+ u32 len_ctrl;
+
+ /* 5:0 : req_id_hi
+ * 31:6 : reserved6 - MBZ
+ */
+ u32 word1;
+
+ /* 7:0 : l3_hdr_len
+ * 15:8 : l3_hdr_off
+ * 21:16 : l4_hdr_len_in_words - counts the L4 header
+ * length in words. there is an explicit assumption
+ * that L4 header appears right after L3 header and
+ * L4 offset is based on l3_hdr_off+l3_hdr_len
+ * 31:22 : mss_lo
+ */
+ u32 word2;
+
+ u32 reserved;
+};
+
+struct ena_eth_io_tx_cdesc {
+ /* Request ID[15:0] */
+ u16 req_id;
+
+ u8 status;
+
+ /* flags
+ * 0 : phase
+ * 7:1 : reserved1
+ */
+ u8 flags;
+
+ u16 sub_qid;
+
+ u16 sq_head_idx;
+};
+
+struct ena_eth_io_rx_desc {
+ /* In bytes. 0 means 64KB */
+ u16 length;
+
+ /* MBZ */
+ u8 reserved2;
+
+ /* 0 : phase
+ * 1 : reserved1 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction
+ * 3 : last - Indicates last descriptor in transaction
+ * 4 : comp_req
+ * 5 : reserved5 - MBO
+ * 7:6 : reserved6 - MBZ
+ */
+ u8 ctrl;
+
+ u16 req_id;
+
+ /* MBZ */
+ u16 reserved6;
+
+ u32 buff_addr_lo;
+
+ u16 buff_addr_hi;
+
+ /* MBZ */
+ u16 reserved16_w3;
+};
+
+/* 4-word format Note: all ethernet parsing information are valid only when
+ * last=1
+ */
+struct ena_eth_io_rx_cdesc_base {
+ /* 4:0 : l3_proto_idx
+ * 6:5 : src_vlan_cnt
+ * 7 : reserved7 - MBZ
+ * 12:8 : l4_proto_idx
+ * 13 : l3_csum_err - when set, either the L3
+ * checksum error detected, or, the controller didn't
+ * validate the checksum. This bit is valid only when
+ * l3_proto_idx indicates IPv4 packet
+ * 14 : l4_csum_err - when set, either the L4
+ * checksum error detected, or, the controller didn't
+ * validate the checksum. This bit is valid only when
+ * l4_proto_idx indicates TCP/UDP packet, and,
+ * ipv4_frag is not set
+ * 15 : ipv4_frag - Indicates IPv4 fragmented packet
+ * 23:16 : reserved16
+ * 24 : phase
+ * 25 : l3_csum2 - second checksum engine result
+ * 26 : first - Indicates first descriptor in
+ * transaction
+ * 27 : last - Indicates last descriptor in
+ * transaction
+ * 29:28 : reserved28
+ * 30 : buffer - 0: Metadata descriptor. 1: Buffer
+ * Descriptor was used
+ * 31 : reserved31
+ */
+ u32 status;
+
+ u16 length;
+
+ u16 req_id;
+
+ /* 32-bit hash result */
+ u32 hash;
+
+ u16 sub_qid;
+
+ u16 reserved;
+};
+
+/* 8-word format */
+struct ena_eth_io_rx_cdesc_ext {
+ struct ena_eth_io_rx_cdesc_base base;
+
+ u32 buff_addr_lo;
+
+ u16 buff_addr_hi;
+
+ u16 reserved16;
+
+ u32 reserved_w6;
+
+ u32 reserved_w7;
+};
+
+struct ena_eth_io_intr_reg {
+ /* 14:0 : rx_intr_delay
+ * 29:15 : tx_intr_delay
+ * 30 : intr_unmask
+ * 31 : reserved
+ */
+ u32 intr_control;
+};
+
+struct ena_eth_io_numa_node_cfg_reg {
+ /* 7:0 : numa
+ * 30:8 : reserved
+ * 31 : enabled
+ */
+ u32 numa_cfg;
+};
+
+/* tx_desc */
+#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
+#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
+#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
+#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
+#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
+
+/* tx_meta_desc */
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
+
+/* tx_cdesc */
+#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
+
+/* rx_desc */
+#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
+#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
+#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
+#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
+
+/* rx_cdesc_base */
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
+
+/* intr_reg */
+#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
+
+/* numa_node_cfg_reg */
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
+
+#endif /*_ENA_ETH_IO_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
new file mode 100644
index 000000000000..67b2338f8fb3
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -0,0 +1,895 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+
+#include "ena_netdev.h"
+
+struct ena_stats {
+ char name[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define ENA_STAT_ENA_COM_ENTRY(stat) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_com_stats_admin, stat) \
+}
+
+#define ENA_STAT_ENTRY(stat, stat_type) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
+}
+
+#define ENA_STAT_RX_ENTRY(stat) \
+ ENA_STAT_ENTRY(stat, rx)
+
+#define ENA_STAT_TX_ENTRY(stat) \
+ ENA_STAT_ENTRY(stat, tx)
+
+#define ENA_STAT_GLOBAL_ENTRY(stat) \
+ ENA_STAT_ENTRY(stat, dev)
+
+static const struct ena_stats ena_stats_global_strings[] = {
+ ENA_STAT_GLOBAL_ENTRY(tx_timeout),
+ ENA_STAT_GLOBAL_ENTRY(io_suspend),
+ ENA_STAT_GLOBAL_ENTRY(io_resume),
+ ENA_STAT_GLOBAL_ENTRY(wd_expired),
+ ENA_STAT_GLOBAL_ENTRY(interface_up),
+ ENA_STAT_GLOBAL_ENTRY(interface_down),
+ ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
+};
+
+static const struct ena_stats ena_stats_tx_strings[] = {
+ ENA_STAT_TX_ENTRY(cnt),
+ ENA_STAT_TX_ENTRY(bytes),
+ ENA_STAT_TX_ENTRY(queue_stop),
+ ENA_STAT_TX_ENTRY(queue_wakeup),
+ ENA_STAT_TX_ENTRY(dma_mapping_err),
+ ENA_STAT_TX_ENTRY(linearize),
+ ENA_STAT_TX_ENTRY(linearize_failed),
+ ENA_STAT_TX_ENTRY(napi_comp),
+ ENA_STAT_TX_ENTRY(tx_poll),
+ ENA_STAT_TX_ENTRY(doorbells),
+ ENA_STAT_TX_ENTRY(prepare_ctx_err),
+ ENA_STAT_TX_ENTRY(missing_tx_comp),
+ ENA_STAT_TX_ENTRY(bad_req_id),
+};
+
+static const struct ena_stats ena_stats_rx_strings[] = {
+ ENA_STAT_RX_ENTRY(cnt),
+ ENA_STAT_RX_ENTRY(bytes),
+ ENA_STAT_RX_ENTRY(refil_partial),
+ ENA_STAT_RX_ENTRY(bad_csum),
+ ENA_STAT_RX_ENTRY(page_alloc_fail),
+ ENA_STAT_RX_ENTRY(skb_alloc_fail),
+ ENA_STAT_RX_ENTRY(dma_mapping_err),
+ ENA_STAT_RX_ENTRY(bad_desc_num),
+ ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
+};
+
+static const struct ena_stats ena_stats_ena_com_strings[] = {
+ ENA_STAT_ENA_COM_ENTRY(aborted_cmd),
+ ENA_STAT_ENA_COM_ENTRY(submitted_cmd),
+ ENA_STAT_ENA_COM_ENTRY(completed_cmd),
+ ENA_STAT_ENA_COM_ENTRY(out_of_space),
+ ENA_STAT_ENA_COM_ENTRY(no_completion),
+};
+
+#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
+#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
+#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
+#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
+
+static void ena_safe_update_stat(u64 *src, u64 *dst,
+ struct u64_stats_sync *syncp)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(syncp);
+ *(dst) = *src;
+ } while (u64_stats_fetch_retry_irq(syncp, start));
+}
+
+static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
+{
+ const struct ena_stats *ena_stats;
+ struct ena_ring *ring;
+
+ u64 *ptr;
+ int i, j;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ /* Tx stats */
+ ring = &adapter->tx_ring[i];
+
+ for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
+ ena_stats = &ena_stats_tx_strings[j];
+
+ ptr = (u64 *)((uintptr_t)&ring->tx_stats +
+ (uintptr_t)ena_stats->stat_offset);
+
+ ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
+ }
+
+ /* Rx stats */
+ ring = &adapter->rx_ring[i];
+
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
+
+ ptr = (u64 *)((uintptr_t)&ring->rx_stats +
+ (uintptr_t)ena_stats->stat_offset);
+
+ ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
+ }
+ }
+}
+
+static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
+{
+ const struct ena_stats *ena_stats;
+ u32 *ptr;
+ int i;
+
+ for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
+ ena_stats = &ena_stats_ena_com_strings[i];
+
+ ptr = (u32 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats +
+ (uintptr_t)ena_stats->stat_offset);
+
+ *(*data)++ = *ptr;
+ }
+}
+
+static void ena_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ const struct ena_stats *ena_stats;
+ u64 *ptr;
+ int i;
+
+ for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
+ ena_stats = &ena_stats_global_strings[i];
+
+ ptr = (u64 *)((uintptr_t)&adapter->dev_stats +
+ (uintptr_t)ena_stats->stat_offset);
+
+ ena_safe_update_stat(ptr, data++, &adapter->syncp);
+ }
+
+ ena_queue_stats(adapter, &data);
+ ena_dev_admin_queue_stats(adapter, &data);
+}
+
+int ena_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
+}
+
+static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
+{
+ const struct ena_stats *ena_stats;
+ int i, j;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ /* Tx stats */
+ for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
+ ena_stats = &ena_stats_tx_strings[j];
+
+ snprintf(*data, ETH_GSTRING_LEN,
+ "queue_%u_tx_%s", i, ena_stats->name);
+ (*data) += ETH_GSTRING_LEN;
+ }
+ /* Rx stats */
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
+
+ snprintf(*data, ETH_GSTRING_LEN,
+ "queue_%u_rx_%s", i, ena_stats->name);
+ (*data) += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void ena_com_dev_strings(u8 **data)
+{
+ const struct ena_stats *ena_stats;
+ int i;
+
+ for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
+ ena_stats = &ena_stats_ena_com_strings[i];
+
+ snprintf(*data, ETH_GSTRING_LEN,
+ "ena_admin_q_%s", ena_stats->name);
+ (*data) += ETH_GSTRING_LEN;
+ }
+}
+
+static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ const struct ena_stats *ena_stats;
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
+ ena_stats = &ena_stats_global_strings[i];
+
+ memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ ena_queue_strings(adapter, &data);
+ ena_com_dev_strings(&data);
+}
+
+static int ena_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct ena_admin_get_feature_link_desc *link;
+ struct ena_admin_get_feat_resp feat_resp;
+ int rc;
+
+ rc = ena_com_get_link_params(ena_dev, &feat_resp);
+ if (rc)
+ return rc;
+
+ link = &feat_resp.u.link;
+ link_ksettings->base.speed = link->speed;
+
+ if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) {
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, Autoneg);
+ }
+
+ link_ksettings->base.autoneg =
+ (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ link_ksettings->base.duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static int ena_get_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct ena_adapter *adapter = netdev_priv(net_dev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct ena_intr_moder_entry intr_moder_entry;
+
+ if (!ena_com_interrupt_moderation_supported(ena_dev)) {
+ /* the devie doesn't support interrupt moderation */
+ return -EOPNOTSUPP;
+ }
+ coalesce->tx_coalesce_usecs =
+ ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) /
+ ena_dev->intr_delay_resolution;
+ if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) {
+ coalesce->rx_coalesce_usecs =
+ ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
+ / ena_dev->intr_delay_resolution;
+ } else {
+ ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry);
+ coalesce->rx_coalesce_usecs_low = intr_moder_entry.intr_moder_interval;
+ coalesce->rx_max_coalesced_frames_low = intr_moder_entry.pkts_per_interval;
+
+ ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry);
+ coalesce->rx_coalesce_usecs = intr_moder_entry.intr_moder_interval;
+ coalesce->rx_max_coalesced_frames = intr_moder_entry.pkts_per_interval;
+
+ ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry);
+ coalesce->rx_coalesce_usecs_high = intr_moder_entry.intr_moder_interval;
+ coalesce->rx_max_coalesced_frames_high = intr_moder_entry.pkts_per_interval;
+ }
+ coalesce->use_adaptive_rx_coalesce =
+ ena_com_get_adaptive_moderation_enabled(ena_dev);
+
+ return 0;
+}
+
+static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter)
+{
+ unsigned int val;
+ int i;
+
+ val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev);
+
+ for (i = 0; i < adapter->num_queues; i++)
+ adapter->tx_ring[i].smoothed_interval = val;
+}
+
+static int ena_set_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct ena_adapter *adapter = netdev_priv(net_dev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct ena_intr_moder_entry intr_moder_entry;
+ int rc;
+
+ if (!ena_com_interrupt_moderation_supported(ena_dev)) {
+ /* the devie doesn't support interrupt moderation */
+ return -EOPNOTSUPP;
+ }
+
+ if (coalesce->rx_coalesce_usecs_irq ||
+ coalesce->rx_max_coalesced_frames_irq ||
+ coalesce->tx_coalesce_usecs_irq ||
+ coalesce->tx_max_coalesced_frames ||
+ coalesce->tx_max_coalesced_frames_irq ||
+ coalesce->stats_block_coalesce_usecs ||
+ coalesce->use_adaptive_tx_coalesce ||
+ coalesce->pkt_rate_low ||
+ coalesce->tx_coalesce_usecs_low ||
+ coalesce->tx_max_coalesced_frames_low ||
+ coalesce->pkt_rate_high ||
+ coalesce->tx_coalesce_usecs_high ||
+ coalesce->tx_max_coalesced_frames_high ||
+ coalesce->rate_sample_interval)
+ return -EINVAL;
+
+ rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev,
+ coalesce->tx_coalesce_usecs);
+ if (rc)
+ return rc;
+
+ ena_update_tx_rings_intr_moderation(adapter);
+
+ if (ena_com_get_adaptive_moderation_enabled(ena_dev)) {
+ if (!coalesce->use_adaptive_rx_coalesce) {
+ ena_com_disable_adaptive_moderation(ena_dev);
+ rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
+ coalesce->rx_coalesce_usecs);
+ return rc;
+ }
+ } else { /* was in non-adaptive mode */
+ if (coalesce->use_adaptive_rx_coalesce) {
+ ena_com_enable_adaptive_moderation(ena_dev);
+ } else {
+ rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
+ coalesce->rx_coalesce_usecs);
+ return rc;
+ }
+ }
+
+ intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_low;
+ intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_low;
+ intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED;
+ ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry);
+
+ intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs;
+ intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames;
+ intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED;
+ ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry);
+
+ intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_high;
+ intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_high;
+ intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED;
+ ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry);
+
+ return 0;
+}
+
+static u32 ena_get_msglevel(struct net_device *netdev)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void ena_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = value;
+}
+
+static void ena_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+}
+
+static void ena_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_ring *tx_ring = &adapter->tx_ring[0];
+ struct ena_ring *rx_ring = &adapter->rx_ring[0];
+
+ ring->rx_max_pending = rx_ring->ring_size;
+ ring->tx_max_pending = tx_ring->ring_size;
+ ring->rx_pending = rx_ring->ring_size;
+ ring->tx_pending = tx_ring->ring_size;
+}
+
+static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
+{
+ u32 data = 0;
+
+ if (hash_fields & ENA_ADMIN_RSS_L2_DA)
+ data |= RXH_L2DA;
+
+ if (hash_fields & ENA_ADMIN_RSS_L3_DA)
+ data |= RXH_IP_DST;
+
+ if (hash_fields & ENA_ADMIN_RSS_L3_SA)
+ data |= RXH_IP_SRC;
+
+ if (hash_fields & ENA_ADMIN_RSS_L4_DP)
+ data |= RXH_L4_B_2_3;
+
+ if (hash_fields & ENA_ADMIN_RSS_L4_SP)
+ data |= RXH_L4_B_0_1;
+
+ return data;
+}
+
+static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
+{
+ u16 data = 0;
+
+ if (hash_fields & RXH_L2DA)
+ data |= ENA_ADMIN_RSS_L2_DA;
+
+ if (hash_fields & RXH_IP_DST)
+ data |= ENA_ADMIN_RSS_L3_DA;
+
+ if (hash_fields & RXH_IP_SRC)
+ data |= ENA_ADMIN_RSS_L3_SA;
+
+ if (hash_fields & RXH_L4_B_2_3)
+ data |= ENA_ADMIN_RSS_L4_DP;
+
+ if (hash_fields & RXH_L4_B_0_1)
+ data |= ENA_ADMIN_RSS_L4_SP;
+
+ return data;
+}
+
+static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
+ struct ethtool_rxnfc *cmd)
+{
+ enum ena_admin_flow_hash_proto proto;
+ u16 hash_fields;
+ int rc;
+
+ cmd->data = 0;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ proto = ENA_ADMIN_RSS_TCP4;
+ break;
+ case UDP_V4_FLOW:
+ proto = ENA_ADMIN_RSS_UDP4;
+ break;
+ case TCP_V6_FLOW:
+ proto = ENA_ADMIN_RSS_TCP6;
+ break;
+ case UDP_V6_FLOW:
+ proto = ENA_ADMIN_RSS_UDP6;
+ break;
+ case IPV4_FLOW:
+ proto = ENA_ADMIN_RSS_IP4;
+ break;
+ case IPV6_FLOW:
+ proto = ENA_ADMIN_RSS_IP6;
+ break;
+ case ETHER_FLOW:
+ proto = ENA_ADMIN_RSS_NOT_IP;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+
+ rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields);
+ if (rc) {
+ /* If device don't have permission, return unsupported */
+ if (rc == -EPERM)
+ rc = -EOPNOTSUPP;
+ return rc;
+ }
+
+ cmd->data = ena_flow_hash_to_flow_type(hash_fields);
+
+ return 0;
+}
+
+static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
+ struct ethtool_rxnfc *cmd)
+{
+ enum ena_admin_flow_hash_proto proto;
+ u16 hash_fields;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ proto = ENA_ADMIN_RSS_TCP4;
+ break;
+ case UDP_V4_FLOW:
+ proto = ENA_ADMIN_RSS_UDP4;
+ break;
+ case TCP_V6_FLOW:
+ proto = ENA_ADMIN_RSS_TCP6;
+ break;
+ case UDP_V6_FLOW:
+ proto = ENA_ADMIN_RSS_UDP6;
+ break;
+ case IPV4_FLOW:
+ proto = ENA_ADMIN_RSS_IP4;
+ break;
+ case IPV6_FLOW:
+ proto = ENA_ADMIN_RSS_IP6;
+ break;
+ case ETHER_FLOW:
+ proto = ENA_ADMIN_RSS_NOT_IP;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+
+ hash_fields = ena_flow_data_to_flow_hash(cmd->data);
+
+ return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
+}
+
+static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ rc = ena_set_rss_hash(adapter->ena_dev, info);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ case ETHTOOL_SRXCLSRLINS:
+ default:
+ netif_err(adapter, drv, netdev,
+ "Command parameter %d is not supported\n", info->cmd);
+ rc = -EOPNOTSUPP;
+ }
+
+ return (rc == -EPERM) ? -EOPNOTSUPP : rc;
+}
+
+static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
+ u32 *rules)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = adapter->num_queues;
+ rc = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ rc = ena_get_rss_hash(adapter->ena_dev, info);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ case ETHTOOL_GRXCLSRULE:
+ case ETHTOOL_GRXCLSRLALL:
+ default:
+ netif_err(adapter, drv, netdev,
+ "Command parameter %d is not supported\n", info->cmd);
+ rc = -EOPNOTSUPP;
+ }
+
+ return (rc == -EPERM) ? -EOPNOTSUPP : rc;
+}
+
+static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return ENA_RX_RSS_TABLE_SIZE;
+}
+
+static u32 ena_get_rxfh_key_size(struct net_device *netdev)
+{
+ return ENA_HASH_KEY_SIZE;
+}
+
+static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ enum ena_admin_hash_functions ena_func;
+ u8 func;
+ int rc;
+
+ rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
+ if (rc)
+ return rc;
+
+ rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
+ if (rc)
+ return rc;
+
+ switch (ena_func) {
+ case ENA_ADMIN_TOEPLITZ:
+ func = ETH_RSS_HASH_TOP;
+ break;
+ case ENA_ADMIN_CRC32:
+ func = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ netif_err(adapter, drv, netdev,
+ "Command parameter is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (hfunc)
+ *hfunc = func;
+
+ return rc;
+}
+
+static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ enum ena_admin_hash_functions func;
+ int rc, i;
+
+ if (indir) {
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
+ rc = ena_com_indirect_table_fill_entry(ena_dev,
+ ENA_IO_RXQ_IDX(indir[i]),
+ i);
+ if (unlikely(rc)) {
+ netif_err(adapter, drv, netdev,
+ "Cannot fill indirect table (index is too large)\n");
+ return rc;
+ }
+ }
+
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (rc) {
+ netif_err(adapter, drv, netdev,
+ "Cannot set indirect table\n");
+ return rc == -EPERM ? -EOPNOTSUPP : rc;
+ }
+ }
+
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ func = ENA_ADMIN_TOEPLITZ;
+ break;
+ case ETH_RSS_HASH_XOR:
+ func = ENA_ADMIN_CRC32;
+ break;
+ default:
+ netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n",
+ hfunc);
+ return -EOPNOTSUPP;
+ }
+
+ if (key) {
+ rc = ena_com_fill_hash_function(ena_dev, func, key,
+ ENA_HASH_KEY_SIZE,
+ 0xFFFFFFFF);
+ if (unlikely(rc)) {
+ netif_err(adapter, drv, netdev, "Cannot fill key\n");
+ return rc == -EPERM ? -EOPNOTSUPP : rc;
+ }
+ }
+
+ return 0;
+}
+
+static void ena_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ channels->max_rx = ENA_MAX_NUM_IO_QUEUES;
+ channels->max_tx = ENA_MAX_NUM_IO_QUEUES;
+ channels->max_other = 0;
+ channels->max_combined = 0;
+ channels->rx_count = adapter->num_queues;
+ channels->tx_count = adapter->num_queues;
+ channels->other_count = 0;
+ channels->combined_count = 0;
+}
+
+static int ena_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = adapter->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ena_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+ u32 len;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ len = *(u32 *)data;
+ if (len > adapter->netdev->mtu) {
+ ret = -EINVAL;
+ break;
+ }
+ adapter->rx_copybreak = len;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops ena_ethtool_ops = {
+ .get_link_ksettings = ena_get_link_ksettings,
+ .get_drvinfo = ena_get_drvinfo,
+ .get_msglevel = ena_get_msglevel,
+ .set_msglevel = ena_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = ena_get_coalesce,
+ .set_coalesce = ena_set_coalesce,
+ .get_ringparam = ena_get_ringparam,
+ .get_sset_count = ena_get_sset_count,
+ .get_strings = ena_get_strings,
+ .get_ethtool_stats = ena_get_ethtool_stats,
+ .get_rxnfc = ena_get_rxnfc,
+ .set_rxnfc = ena_set_rxnfc,
+ .get_rxfh_indir_size = ena_get_rxfh_indir_size,
+ .get_rxfh_key_size = ena_get_rxfh_key_size,
+ .get_rxfh = ena_get_rxfh,
+ .set_rxfh = ena_set_rxfh,
+ .get_channels = ena_get_channels,
+ .get_tunable = ena_get_tunable,
+ .set_tunable = ena_set_tunable,
+};
+
+void ena_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ena_ethtool_ops;
+}
+
+static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
+{
+ struct net_device *netdev = adapter->netdev;
+ u8 *strings_buf;
+ u64 *data_buf;
+ int strings_num;
+ int i, rc;
+
+ strings_num = ena_get_sset_count(netdev, ETH_SS_STATS);
+ if (strings_num <= 0) {
+ netif_err(adapter, drv, netdev, "Can't get stats num\n");
+ return;
+ }
+
+ strings_buf = devm_kzalloc(&adapter->pdev->dev,
+ strings_num * ETH_GSTRING_LEN,
+ GFP_ATOMIC);
+ if (!strings_buf) {
+ netif_err(adapter, drv, netdev,
+ "failed to alloc strings_buf\n");
+ return;
+ }
+
+ data_buf = devm_kzalloc(&adapter->pdev->dev,
+ strings_num * sizeof(u64),
+ GFP_ATOMIC);
+ if (!data_buf) {
+ netif_err(adapter, drv, netdev,
+ "failed to allocate data buf\n");
+ devm_kfree(&adapter->pdev->dev, strings_buf);
+ return;
+ }
+
+ ena_get_strings(netdev, ETH_SS_STATS, strings_buf);
+ ena_get_ethtool_stats(netdev, NULL, data_buf);
+
+ /* If there is a buffer, dump stats, otherwise print them to dmesg */
+ if (buf)
+ for (i = 0; i < strings_num; i++) {
+ rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64),
+ "%s %llu\n",
+ strings_buf + i * ETH_GSTRING_LEN,
+ data_buf[i]);
+ buf += rc;
+ }
+ else
+ for (i = 0; i < strings_num; i++)
+ netif_err(adapter, drv, netdev, "%s: %llu\n",
+ strings_buf + i * ETH_GSTRING_LEN,
+ data_buf[i]);
+
+ devm_kfree(&adapter->pdev->dev, strings_buf);
+ devm_kfree(&adapter->pdev->dev, data_buf);
+}
+
+void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf)
+{
+ if (!buf)
+ return;
+
+ ena_dump_stats_ex(adapter, buf);
+}
+
+void ena_dump_stats_to_dmesg(struct ena_adapter *adapter)
+{
+ ena_dump_stats_ex(adapter, NULL);
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
new file mode 100644
index 000000000000..bfeaec5bd7b9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -0,0 +1,3272 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif /* CONFIG_RFS_ACCEL */
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/numa.h>
+#include <linux/pci.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+
+#include "ena_netdev.h"
+#include "ena_pci_id_tbl.h"
+
+static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
+
+MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
+MODULE_DESCRIPTION(DEVICE_NAME);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (5 * HZ)
+
+#define ENA_NAPI_BUDGET 64
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
+ NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static struct ena_aenq_handlers aenq_handlers;
+
+static struct workqueue_struct *ena_wq;
+
+MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
+
+static int ena_rss_init_default(struct ena_adapter *adapter);
+
+static void ena_tx_timeout(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.tx_timeout++;
+ u64_stats_update_end(&adapter->syncp);
+
+ netif_err(adapter, tx_err, dev, "Transmit time out\n");
+
+ /* Change the state of the device to trigger reset */
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+}
+
+static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ adapter->rx_ring[i].mtu = mtu;
+}
+
+static int ena_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ int ret;
+
+ if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
+ netif_err(adapter, drv, dev,
+ "Invalid MTU setting. new_mtu: %d\n", new_mtu);
+
+ return -EINVAL;
+ }
+
+ ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
+ if (!ret) {
+ netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
+ update_rx_ring_mtu(adapter, new_mtu);
+ dev->mtu = new_mtu;
+ } else {
+ netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
+ new_mtu);
+ }
+
+ return ret;
+}
+
+static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
+{
+#ifdef CONFIG_RFS_ACCEL
+ u32 i;
+ int rc;
+
+ adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
+ if (!adapter->netdev->rx_cpu_rmap)
+ return -ENOMEM;
+ for (i = 0; i < adapter->num_queues; i++) {
+ int irq_idx = ENA_IO_IRQ_IDX(i);
+
+ rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
+ adapter->msix_entries[irq_idx].vector);
+ if (rc) {
+ free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
+ adapter->netdev->rx_cpu_rmap = NULL;
+ return rc;
+ }
+ }
+#endif /* CONFIG_RFS_ACCEL */
+ return 0;
+}
+
+static void ena_init_io_rings_common(struct ena_adapter *adapter,
+ struct ena_ring *ring, u16 qid)
+{
+ ring->qid = qid;
+ ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+ ring->napi = &adapter->ena_napi[qid].napi;
+ ring->adapter = adapter;
+ ring->ena_dev = adapter->ena_dev;
+ ring->per_napi_packets = 0;
+ ring->per_napi_bytes = 0;
+ ring->cpu = 0;
+ u64_stats_init(&ring->syncp);
+}
+
+static void ena_init_io_rings(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev;
+ struct ena_ring *txr, *rxr;
+ int i;
+
+ ena_dev = adapter->ena_dev;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ txr = &adapter->tx_ring[i];
+ rxr = &adapter->rx_ring[i];
+
+ /* TX/RX common ring state */
+ ena_init_io_rings_common(adapter, txr, i);
+ ena_init_io_rings_common(adapter, rxr, i);
+
+ /* TX specific ring state */
+ txr->ring_size = adapter->tx_ring_size;
+ txr->tx_max_header_size = ena_dev->tx_max_header_size;
+ txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
+ txr->sgl_size = adapter->max_tx_sgl_size;
+ txr->smoothed_interval =
+ ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
+
+ /* RX specific ring state */
+ rxr->ring_size = adapter->rx_ring_size;
+ rxr->rx_copybreak = adapter->rx_copybreak;
+ rxr->sgl_size = adapter->max_rx_sgl_size;
+ rxr->smoothed_interval =
+ ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
+ }
+}
+
+/* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Return 0 on success, negative on failure
+ */
+static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
+{
+ struct ena_ring *tx_ring = &adapter->tx_ring[qid];
+ struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
+ int size, i, node;
+
+ if (tx_ring->tx_buffer_info) {
+ netif_err(adapter, ifup,
+ adapter->netdev, "tx_buffer_info info is not NULL");
+ return -EEXIST;
+ }
+
+ size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
+ node = cpu_to_node(ena_irq->cpu);
+
+ tx_ring->tx_buffer_info = vzalloc_node(size, node);
+ if (!tx_ring->tx_buffer_info) {
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ return -ENOMEM;
+ }
+
+ size = sizeof(u16) * tx_ring->ring_size;
+ tx_ring->free_tx_ids = vzalloc_node(size, node);
+ if (!tx_ring->free_tx_ids) {
+ tx_ring->free_tx_ids = vzalloc(size);
+ if (!tx_ring->free_tx_ids) {
+ vfree(tx_ring->tx_buffer_info);
+ return -ENOMEM;
+ }
+ }
+
+ /* Req id ring for TX out of order completions */
+ for (i = 0; i < tx_ring->ring_size; i++)
+ tx_ring->free_tx_ids[i] = i;
+
+ /* Reset tx statistics */
+ memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->cpu = ena_irq->cpu;
+ return 0;
+}
+
+/* ena_free_tx_resources - Free I/O Tx Resources per Queue
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Free all transmit software resources
+ */
+static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
+{
+ struct ena_ring *tx_ring = &adapter->tx_ring[qid];
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ vfree(tx_ring->free_tx_ids);
+ tx_ring->free_tx_ids = NULL;
+}
+
+/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
+ * @adapter: private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rc = ena_setup_tx_resources(adapter, i);
+ if (rc)
+ goto err_setup_tx;
+ }
+
+ return 0;
+
+err_setup_tx:
+
+ netif_err(adapter, ifup, adapter->netdev,
+ "Tx queue %d: allocation failed\n", i);
+
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ena_free_tx_resources(adapter, i);
+ return rc;
+}
+
+/* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_free_tx_resources(adapter, i);
+}
+
+/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ena_setup_rx_resources(struct ena_adapter *adapter,
+ u32 qid)
+{
+ struct ena_ring *rx_ring = &adapter->rx_ring[qid];
+ struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
+ int size, node;
+
+ if (rx_ring->rx_buffer_info) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "rx_buffer_info is not NULL");
+ return -EEXIST;
+ }
+
+ /* alloc extra element so in rx path
+ * we can always prefetch rx_info + 1
+ */
+ size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
+ node = cpu_to_node(ena_irq->cpu);
+
+ rx_ring->rx_buffer_info = vzalloc_node(size, node);
+ if (!rx_ring->rx_buffer_info) {
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ return -ENOMEM;
+ }
+
+ /* Reset rx statistics */
+ memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->cpu = ena_irq->cpu;
+
+ return 0;
+}
+
+/* ena_free_rx_resources - Free I/O Rx Resources
+ * @adapter: network interface device structure
+ * @qid: queue index
+ *
+ * Free all receive software resources
+ */
+static void ena_free_rx_resources(struct ena_adapter *adapter,
+ u32 qid)
+{
+ struct ena_ring *rx_ring = &adapter->rx_ring[qid];
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+}
+
+/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rc = ena_setup_rx_resources(adapter, i);
+ if (rc)
+ goto err_setup_rx;
+ }
+
+ return 0;
+
+err_setup_rx:
+
+ netif_err(adapter, ifup, adapter->netdev,
+ "Rx queue %d: allocation failed\n", i);
+
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ena_free_rx_resources(adapter, i);
+ return rc;
+}
+
+/* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ */
+static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_free_rx_resources(adapter, i);
+}
+
+static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info, gfp_t gfp)
+{
+ struct ena_com_buf *ena_buf;
+ struct page *page;
+ dma_addr_t dma;
+
+ /* if previous allocated page is not used */
+ if (unlikely(rx_info->page))
+ return 0;
+
+ page = alloc_page(gfp);
+ if (unlikely(!page)) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.page_alloc_fail++;
+ u64_stats_update_end(&rx_ring->syncp);
+ return -ENOMEM;
+ }
+
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.dma_mapping_err++;
+ u64_stats_update_end(&rx_ring->syncp);
+
+ __free_page(page);
+ return -EIO;
+ }
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "alloc page %p, rx_info %p\n", page, rx_info);
+
+ rx_info->page = page;
+ rx_info->page_offset = 0;
+ ena_buf = &rx_info->ena_buf;
+ ena_buf->paddr = dma;
+ ena_buf->len = PAGE_SIZE;
+
+ return 0;
+}
+
+static void ena_free_rx_page(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info)
+{
+ struct page *page = rx_info->page;
+ struct ena_com_buf *ena_buf = &rx_info->ena_buf;
+
+ if (unlikely(!page)) {
+ netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Trying to free unallocated buffer\n");
+ return;
+ }
+
+ dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ __free_page(page);
+ rx_info->page = NULL;
+}
+
+static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
+{
+ u16 next_to_use;
+ u32 i;
+ int rc;
+
+ next_to_use = rx_ring->next_to_use;
+
+ for (i = 0; i < num; i++) {
+ struct ena_rx_buffer *rx_info =
+ &rx_ring->rx_buffer_info[next_to_use];
+
+ rc = ena_alloc_rx_page(rx_ring, rx_info,
+ __GFP_COLD | GFP_ATOMIC | __GFP_COMP);
+ if (unlikely(rc < 0)) {
+ netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "failed to alloc buffer for rx queue %d\n",
+ rx_ring->qid);
+ break;
+ }
+ rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
+ &rx_info->ena_buf,
+ next_to_use);
+ if (unlikely(rc)) {
+ netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "failed to add buffer for rx queue %d\n",
+ rx_ring->qid);
+ break;
+ }
+ next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
+ rx_ring->ring_size);
+ }
+
+ if (unlikely(i < num)) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.refil_partial++;
+ u64_stats_update_end(&rx_ring->syncp);
+ netdev_warn(rx_ring->netdev,
+ "refilled rx qid %d with only %d buffers (from %d)\n",
+ rx_ring->qid, i, num);
+ }
+
+ if (likely(i)) {
+ /* Add memory barrier to make sure the desc were written before
+ * issue a doorbell
+ */
+ wmb();
+ ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
+ }
+
+ rx_ring->next_to_use = next_to_use;
+
+ return i;
+}
+
+static void ena_free_rx_bufs(struct ena_adapter *adapter,
+ u32 qid)
+{
+ struct ena_ring *rx_ring = &adapter->rx_ring[qid];
+ u32 i;
+
+ for (i = 0; i < rx_ring->ring_size; i++) {
+ struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
+
+ if (rx_info->page)
+ ena_free_rx_page(rx_ring, rx_info);
+ }
+}
+
+/* ena_refill_all_rx_bufs - allocate all queues Rx buffers
+ * @adapter: board private structure
+ *
+ */
+static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
+{
+ struct ena_ring *rx_ring;
+ int i, rc, bufs_num;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ bufs_num = rx_ring->ring_size - 1;
+ rc = ena_refill_rx_bufs(rx_ring, bufs_num);
+
+ if (unlikely(rc != bufs_num))
+ netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "refilling Queue %d failed. allocated %d buffers from: %d\n",
+ i, rc, bufs_num);
+ }
+}
+
+static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_free_rx_bufs(adapter, i);
+}
+
+/* ena_free_tx_bufs - Free Tx Buffers per Queue
+ * @tx_ring: TX ring for which buffers be freed
+ */
+static void ena_free_tx_bufs(struct ena_ring *tx_ring)
+{
+ u32 i;
+
+ for (i = 0; i < tx_ring->ring_size; i++) {
+ struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
+ struct ena_com_buf *ena_buf;
+ int nr_frags;
+ int j;
+
+ if (!tx_info->skb)
+ continue;
+
+ netdev_notice(tx_ring->netdev,
+ "free uncompleted tx skb qid %d idx 0x%x\n",
+ tx_ring->qid, i);
+
+ ena_buf = tx_info->bufs;
+ dma_unmap_single(tx_ring->dev,
+ ena_buf->paddr,
+ ena_buf->len,
+ DMA_TO_DEVICE);
+
+ /* unmap remaining mapped pages */
+ nr_frags = tx_info->num_of_bufs - 1;
+ for (j = 0; j < nr_frags; j++) {
+ ena_buf++;
+ dma_unmap_page(tx_ring->dev,
+ ena_buf->paddr,
+ ena_buf->len,
+ DMA_TO_DEVICE);
+ }
+
+ dev_kfree_skb_any(tx_info->skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->qid));
+}
+
+static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
+{
+ struct ena_ring *tx_ring;
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ tx_ring = &adapter->tx_ring[i];
+ ena_free_tx_bufs(tx_ring);
+ }
+}
+
+static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
+{
+ u16 ena_qid;
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ ena_qid = ENA_IO_TXQ_IDX(i);
+ ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
+ }
+}
+
+static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
+{
+ u16 ena_qid;
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ ena_qid = ENA_IO_RXQ_IDX(i);
+ ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
+ }
+}
+
+static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
+{
+ ena_destroy_all_tx_queues(adapter);
+ ena_destroy_all_rx_queues(adapter);
+}
+
+static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info = NULL;
+
+ if (likely(req_id < tx_ring->ring_size)) {
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->skb))
+ return 0;
+ }
+
+ if (tx_info)
+ netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_info doesn't have valid skb\n");
+ else
+ netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "Invalid req_id: %hu\n", req_id);
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.bad_req_id++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ /* Trigger device reset */
+ set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
+ return -EFAULT;
+}
+
+static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
+{
+ struct netdev_queue *txq;
+ bool above_thresh;
+ u32 tx_bytes = 0;
+ u32 total_done = 0;
+ u16 next_to_clean;
+ u16 req_id;
+ int tx_pkts = 0;
+ int rc;
+
+ next_to_clean = tx_ring->next_to_clean;
+ txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
+
+ while (tx_pkts < budget) {
+ struct ena_tx_buffer *tx_info;
+ struct sk_buff *skb;
+ struct ena_com_buf *ena_buf;
+ int i, nr_frags;
+
+ rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
+ &req_id);
+ if (rc)
+ break;
+
+ rc = validate_tx_req_id(tx_ring, req_id);
+ if (rc)
+ break;
+
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ skb = tx_info->skb;
+
+ /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
+ prefetch(&skb->end);
+
+ tx_info->skb = NULL;
+ tx_info->last_jiffies = 0;
+
+ if (likely(tx_info->num_of_bufs != 0)) {
+ ena_buf = tx_info->bufs;
+
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len),
+ DMA_TO_DEVICE);
+
+ /* unmap remaining mapped pages */
+ nr_frags = tx_info->num_of_bufs - 1;
+ for (i = 0; i < nr_frags; i++) {
+ ena_buf++;
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len),
+ DMA_TO_DEVICE);
+ }
+ }
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d skb %p completed\n", tx_ring->qid,
+ skb);
+
+ tx_bytes += skb->len;
+ dev_kfree_skb(skb);
+ tx_pkts++;
+ total_done += tx_info->tx_descs;
+
+ tx_ring->free_tx_ids[next_to_clean] = req_id;
+ next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
+ tx_ring->ring_size);
+ }
+
+ tx_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
+ ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
+
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d done. total pkts: %d\n",
+ tx_ring->qid, tx_pkts);
+
+ /* need to make the rings circular update visible to
+ * ena_start_xmit() before checking for netif_queue_stopped().
+ */
+ smp_mb();
+
+ above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
+ ENA_TX_WAKEUP_THRESH;
+ if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
+ __netif_tx_lock(txq, smp_processor_id());
+ above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
+ ENA_TX_WAKEUP_THRESH;
+ if (netif_tx_queue_stopped(txq) && above_thresh) {
+ netif_tx_wake_queue(txq);
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.queue_wakeup++;
+ u64_stats_update_end(&tx_ring->syncp);
+ }
+ __netif_tx_unlock(txq);
+ }
+
+ tx_ring->per_napi_bytes += tx_bytes;
+ tx_ring->per_napi_packets += tx_pkts;
+
+ return tx_pkts;
+}
+
+static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ struct ena_com_rx_buf_info *ena_bufs,
+ u32 descs,
+ u16 *next_to_clean)
+{
+ struct sk_buff *skb;
+ struct ena_rx_buffer *rx_info =
+ &rx_ring->rx_buffer_info[*next_to_clean];
+ u32 len;
+ u32 buf = 0;
+ void *va;
+
+ len = ena_bufs[0].len;
+ if (unlikely(!rx_info->page)) {
+ netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Page is NULL\n");
+ return NULL;
+ }
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "rx_info %p page %p\n",
+ rx_info, rx_info->page);
+
+ /* save virt address of first buffer */
+ va = page_address(rx_info->page) + rx_info->page_offset;
+ prefetch(va + NET_IP_ALIGN);
+
+ if (len <= rx_ring->rx_copybreak) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_copybreak);
+ if (unlikely(!skb)) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.skb_alloc_fail++;
+ u64_stats_update_end(&rx_ring->syncp);
+ netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Failed to allocate skb\n");
+ return NULL;
+ }
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "rx allocated small packet. len %d. data_len %d\n",
+ skb->len, skb->data_len);
+
+ /* sync this buffer for CPU use */
+ dma_sync_single_for_cpu(rx_ring->dev,
+ dma_unmap_addr(&rx_info->ena_buf, paddr),
+ len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, va, len);
+ dma_sync_single_for_device(rx_ring->dev,
+ dma_unmap_addr(&rx_info->ena_buf, paddr),
+ len,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
+ rx_ring->ring_size);
+ return skb;
+ }
+
+ skb = napi_get_frags(rx_ring->napi);
+ if (unlikely(!skb)) {
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "Failed allocating skb\n");
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.skb_alloc_fail++;
+ u64_stats_update_end(&rx_ring->syncp);
+ return NULL;
+ }
+
+ do {
+ dma_unmap_page(rx_ring->dev,
+ dma_unmap_addr(&rx_info->ena_buf, paddr),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
+ rx_info->page_offset, len, PAGE_SIZE);
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "rx skb updated. len %d. data_len %d\n",
+ skb->len, skb->data_len);
+
+ rx_info->page = NULL;
+ *next_to_clean =
+ ENA_RX_RING_IDX_NEXT(*next_to_clean,
+ rx_ring->ring_size);
+ if (likely(--descs == 0))
+ break;
+ rx_info = &rx_ring->rx_buffer_info[*next_to_clean];
+ len = ena_bufs[++buf].len;
+ } while (1);
+
+ return skb;
+}
+
+/* ena_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: structure containing adapter specific data
+ * @ena_rx_ctx: received packet context/metadata
+ * @skb: skb currently being received and modified
+ */
+static inline void ena_rx_checksum(struct ena_ring *rx_ring,
+ struct ena_com_rx_ctx *ena_rx_ctx,
+ struct sk_buff *skb)
+{
+ /* Rx csum disabled */
+ if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ /* For fragmented packets the checksum isn't valid */
+ if (ena_rx_ctx->frag) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ /* if IP and error */
+ if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
+ (ena_rx_ctx->l3_csum_err))) {
+ /* ipv4 checksum error */
+ skb->ip_summed = CHECKSUM_NONE;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bad_csum++;
+ u64_stats_update_end(&rx_ring->syncp);
+ netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "RX IPv4 header checksum error\n");
+ return;
+ }
+
+ /* if TCP/UDP */
+ if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
+ (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
+ if (unlikely(ena_rx_ctx->l4_csum_err)) {
+ /* TCP/UDP checksum error */
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bad_csum++;
+ u64_stats_update_end(&rx_ring->syncp);
+ netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "RX L4 checksum error\n");
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static void ena_set_rx_hash(struct ena_ring *rx_ring,
+ struct ena_com_rx_ctx *ena_rx_ctx,
+ struct sk_buff *skb)
+{
+ enum pkt_hash_types hash_type;
+
+ if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
+ if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
+ (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
+
+ hash_type = PKT_HASH_TYPE_L4;
+ else
+ hash_type = PKT_HASH_TYPE_NONE;
+
+ /* Override hash type if the packet is fragmented */
+ if (ena_rx_ctx->frag)
+ hash_type = PKT_HASH_TYPE_NONE;
+
+ skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
+ }
+}
+
+/* ena_clean_rx_irq - Cleanup RX irq
+ * @rx_ring: RX ring to clean
+ * @napi: napi handler
+ * @budget: how many packets driver is allowed to clean
+ *
+ * Returns the number of cleaned buffers.
+ */
+static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ u32 budget)
+{
+ u16 next_to_clean = rx_ring->next_to_clean;
+ u32 res_budget, work_done;
+
+ struct ena_com_rx_ctx ena_rx_ctx;
+ struct ena_adapter *adapter;
+ struct sk_buff *skb;
+ int refill_required;
+ int refill_threshold;
+ int rc = 0;
+ int total_len = 0;
+ int rx_copybreak_pkt = 0;
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "%s qid %d\n", __func__, rx_ring->qid);
+ res_budget = budget;
+
+ do {
+ ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
+ ena_rx_ctx.max_bufs = rx_ring->sgl_size;
+ ena_rx_ctx.descs = 0;
+ rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
+ rx_ring->ena_com_io_sq,
+ &ena_rx_ctx);
+ if (unlikely(rc))
+ goto error;
+
+ if (unlikely(ena_rx_ctx.descs == 0))
+ break;
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
+ rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
+ ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
+
+ /* allocate skb and fill it */
+ skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
+ &next_to_clean);
+
+ /* exit if we failed to retrieve a buffer */
+ if (unlikely(!skb)) {
+ next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean,
+ ena_rx_ctx.descs,
+ rx_ring->ring_size);
+ break;
+ }
+
+ ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
+
+ ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
+
+ skb_record_rx_queue(skb, rx_ring->qid);
+
+ if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
+ total_len += rx_ring->ena_bufs[0].len;
+ rx_copybreak_pkt++;
+ napi_gro_receive(napi, skb);
+ } else {
+ total_len += skb->len;
+ napi_gro_frags(napi);
+ }
+
+ res_budget--;
+ } while (likely(res_budget));
+
+ work_done = budget - res_budget;
+ rx_ring->per_napi_bytes += total_len;
+ rx_ring->per_napi_packets += work_done;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bytes += total_len;
+ rx_ring->rx_stats.cnt += work_done;
+ rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
+ u64_stats_update_end(&rx_ring->syncp);
+
+ rx_ring->next_to_clean = next_to_clean;
+
+ refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+ refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
+
+ /* Optimization, try to batch new rx buffers */
+ if (refill_required > refill_threshold) {
+ ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
+ ena_refill_rx_bufs(rx_ring, refill_required);
+ }
+
+ return work_done;
+
+error:
+ adapter = netdev_priv(rx_ring->netdev);
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bad_desc_num++;
+ u64_stats_update_end(&rx_ring->syncp);
+
+ /* Too many desc from the device. Trigger reset */
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+
+ return 0;
+}
+
+inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
+ struct ena_ring *tx_ring)
+{
+ /* We apply adaptive moderation on Rx path only.
+ * Tx uses static interrupt moderation.
+ */
+ ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
+ rx_ring->per_napi_packets,
+ rx_ring->per_napi_bytes,
+ &rx_ring->smoothed_interval,
+ &rx_ring->moder_tbl_idx);
+
+ /* Reset per napi packets/bytes */
+ tx_ring->per_napi_packets = 0;
+ tx_ring->per_napi_bytes = 0;
+ rx_ring->per_napi_packets = 0;
+ rx_ring->per_napi_bytes = 0;
+}
+
+static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring)
+{
+ int cpu = get_cpu();
+ int numa_node;
+
+ /* Check only one ring since the 2 rings are running on the same cpu */
+ if (likely(tx_ring->cpu == cpu))
+ goto out;
+
+ numa_node = cpu_to_node(cpu);
+ put_cpu();
+
+ if (numa_node != NUMA_NO_NODE) {
+ ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
+ ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
+ }
+
+ tx_ring->cpu = cpu;
+ rx_ring->cpu = cpu;
+
+ return;
+out:
+ put_cpu();
+}
+
+static int ena_io_poll(struct napi_struct *napi, int budget)
+{
+ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
+ struct ena_ring *tx_ring, *rx_ring;
+ struct ena_eth_io_intr_reg intr_reg;
+
+ u32 tx_work_done;
+ u32 rx_work_done;
+ int tx_budget;
+ int napi_comp_call = 0;
+ int ret;
+
+ tx_ring = ena_napi->tx_ring;
+ rx_ring = ena_napi->rx_ring;
+
+ tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
+ rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
+
+ if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
+ napi_complete_done(napi, rx_work_done);
+
+ napi_comp_call = 1;
+ /* Tx and Rx share the same interrupt vector */
+ if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
+ ena_adjust_intr_moderation(rx_ring, tx_ring);
+
+ /* Update intr register: rx intr delay, tx intr delay and
+ * interrupt unmask
+ */
+ ena_com_update_intr_reg(&intr_reg,
+ rx_ring->smoothed_interval,
+ tx_ring->smoothed_interval,
+ true);
+
+ /* It is a shared MSI-X. Tx and Rx CQ have pointer to it.
+ * So we use one of them to reach the intr reg
+ */
+ ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+
+ ena_update_ring_numa_node(tx_ring, rx_ring);
+
+ ret = rx_work_done;
+ } else {
+ ret = budget;
+ }
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.napi_comp += napi_comp_call;
+ tx_ring->tx_stats.tx_poll++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ return ret;
+}
+
+static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)data;
+
+ ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
+
+ /* Don't call the aenq handler before probe is done */
+ if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
+ ena_com_aenq_intr_handler(adapter->ena_dev, data);
+
+ return IRQ_HANDLED;
+}
+
+/* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
+ * @irq: interrupt number
+ * @data: pointer to a network interface private napi device structure
+ */
+static irqreturn_t ena_intr_msix_io(int irq, void *data)
+{
+ struct ena_napi *ena_napi = data;
+
+ napi_schedule(&ena_napi->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
+{
+ int i, msix_vecs, rc;
+
+ if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
+ netif_err(adapter, probe, adapter->netdev,
+ "Error, MSI-X is already enabled\n");
+ return -EPERM;
+ }
+
+ /* Reserved the max msix vectors we might need */
+ msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
+
+ netif_dbg(adapter, probe, adapter->netdev,
+ "trying to enable MSI-X, vectors %d\n", msix_vecs);
+
+ adapter->msix_entries = vzalloc(msix_vecs * sizeof(struct msix_entry));
+
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < msix_vecs; i++)
+ adapter->msix_entries[i].entry = i;
+
+ rc = pci_enable_msix(adapter->pdev, adapter->msix_entries, msix_vecs);
+ if (rc != 0) {
+ netif_err(adapter, probe, adapter->netdev,
+ "Failed to enable MSI-X, vectors %d rc %d\n",
+ msix_vecs, rc);
+ return -ENOSPC;
+ }
+
+ netif_dbg(adapter, probe, adapter->netdev, "enable MSI-X, vectors %d\n",
+ msix_vecs);
+
+ if (msix_vecs >= 1) {
+ if (ena_init_rx_cpu_rmap(adapter))
+ netif_warn(adapter, probe, adapter->netdev,
+ "Failed to map IRQs to CPUs\n");
+ }
+
+ adapter->msix_vecs = msix_vecs;
+ set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
+
+ return 0;
+}
+
+static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
+{
+ u32 cpu;
+
+ snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
+ ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
+ pci_name(adapter->pdev));
+ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
+ ena_intr_msix_mgmnt;
+ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
+ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
+ adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
+ cpu = cpumask_first(cpu_online_mask);
+ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
+ cpumask_set_cpu(cpu,
+ &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
+}
+
+static void ena_setup_io_intr(struct ena_adapter *adapter)
+{
+ struct net_device *netdev;
+ int irq_idx, i, cpu;
+
+ netdev = adapter->netdev;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ irq_idx = ENA_IO_IRQ_IDX(i);
+ cpu = i % num_online_cpus();
+
+ snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
+ "%s-Tx-Rx-%d", netdev->name, i);
+ adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
+ adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
+ adapter->irq_tbl[irq_idx].vector =
+ adapter->msix_entries[irq_idx].vector;
+ adapter->irq_tbl[irq_idx].cpu = cpu;
+
+ cpumask_set_cpu(cpu,
+ &adapter->irq_tbl[irq_idx].affinity_hint_mask);
+ }
+}
+
+static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
+{
+ unsigned long flags = 0;
+ struct ena_irq *irq;
+ int rc;
+
+ irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
+ rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+ irq->data);
+ if (rc) {
+ netif_err(adapter, probe, adapter->netdev,
+ "failed to request admin irq\n");
+ return rc;
+ }
+
+ netif_dbg(adapter, probe, adapter->netdev,
+ "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
+ irq->affinity_hint_mask.bits[0], irq->vector);
+
+ irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
+
+ return rc;
+}
+
+static int ena_request_io_irq(struct ena_adapter *adapter)
+{
+ unsigned long flags = 0;
+ struct ena_irq *irq;
+ int rc = 0, i, k;
+
+ if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to request I/O IRQ: MSI-X is not enabled\n");
+ return -EINVAL;
+ }
+
+ for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
+ irq = &adapter->irq_tbl[i];
+ rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+ irq->data);
+ if (rc) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to request I/O IRQ. index %d rc %d\n",
+ i, rc);
+ goto err;
+ }
+
+ netif_dbg(adapter, ifup, adapter->netdev,
+ "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
+ i, irq->affinity_hint_mask.bits[0], irq->vector);
+
+ irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
+ }
+
+ return rc;
+
+err:
+ for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
+ irq = &adapter->irq_tbl[k];
+ free_irq(irq->vector, irq->data);
+ }
+
+ return rc;
+}
+
+static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
+{
+ struct ena_irq *irq;
+
+ irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
+ synchronize_irq(irq->vector);
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_irq(irq->vector, irq->data);
+}
+
+static void ena_free_io_irq(struct ena_adapter *adapter)
+{
+ struct ena_irq *irq;
+ int i;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (adapter->msix_vecs >= 1) {
+ free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
+ adapter->netdev->rx_cpu_rmap = NULL;
+ }
+#endif /* CONFIG_RFS_ACCEL */
+
+ for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
+ irq = &adapter->irq_tbl[i];
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_irq(irq->vector, irq->data);
+ }
+}
+
+static void ena_disable_msix(struct ena_adapter *adapter)
+{
+ if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
+ pci_disable_msix(adapter->pdev);
+
+ if (adapter->msix_entries)
+ vfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+}
+
+static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
+{
+ int i;
+
+ if (!netif_running(adapter->netdev))
+ return;
+
+ for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
+ synchronize_irq(adapter->irq_tbl[i].vector);
+}
+
+static void ena_del_napi(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ netif_napi_del(&adapter->ena_napi[i].napi);
+}
+
+static void ena_init_napi(struct ena_adapter *adapter)
+{
+ struct ena_napi *napi;
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ napi = &adapter->ena_napi[i];
+
+ netif_napi_add(adapter->netdev,
+ &adapter->ena_napi[i].napi,
+ ena_io_poll,
+ ENA_NAPI_BUDGET);
+ napi->rx_ring = &adapter->rx_ring[i];
+ napi->tx_ring = &adapter->tx_ring[i];
+ napi->qid = i;
+ }
+}
+
+static void ena_napi_disable_all(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_disable(&adapter->ena_napi[i].napi);
+}
+
+static void ena_napi_enable_all(struct ena_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_enable(&adapter->ena_napi[i].napi);
+}
+
+static void ena_restore_ethtool_params(struct ena_adapter *adapter)
+{
+ adapter->tx_usecs = 0;
+ adapter->rx_usecs = 0;
+ adapter->tx_frames = 1;
+ adapter->rx_frames = 1;
+}
+
+/* Configure the Rx forwarding */
+static int ena_rss_configure(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int rc;
+
+ /* In case the RSS table wasn't initialized by probe */
+ if (!ena_dev->rss.tbl_log_size) {
+ rc = ena_rss_init_default(adapter);
+ if (rc && (rc != -EPERM)) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to init RSS rc: %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Set indirect table */
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (unlikely(rc && rc != -EPERM))
+ return rc;
+
+ /* Configure hash function (if supported) */
+ rc = ena_com_set_hash_function(ena_dev);
+ if (unlikely(rc && (rc != -EPERM)))
+ return rc;
+
+ /* Configure hash inputs (if supported) */
+ rc = ena_com_set_hash_ctrl(ena_dev);
+ if (unlikely(rc && (rc != -EPERM)))
+ return rc;
+
+ return 0;
+}
+
+static int ena_up_complete(struct ena_adapter *adapter)
+{
+ int rc, i;
+
+ rc = ena_rss_configure(adapter);
+ if (rc)
+ return rc;
+
+ ena_init_napi(adapter);
+
+ ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
+
+ ena_refill_all_rx_bufs(adapter);
+
+ /* enable transmits */
+ netif_tx_start_all_queues(adapter->netdev);
+
+ ena_restore_ethtool_params(adapter);
+
+ ena_napi_enable_all(adapter);
+
+ /* schedule napi in case we had pending packets
+ * from the last time we disable napi
+ */
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_schedule(&adapter->ena_napi[i].napi);
+
+ return 0;
+}
+
+static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
+{
+ struct ena_com_create_io_ctx ctx = { 0 };
+ struct ena_com_dev *ena_dev;
+ struct ena_ring *tx_ring;
+ u32 msix_vector;
+ u16 ena_qid;
+ int rc;
+
+ ena_dev = adapter->ena_dev;
+
+ tx_ring = &adapter->tx_ring[qid];
+ msix_vector = ENA_IO_IRQ_IDX(qid);
+ ena_qid = ENA_IO_TXQ_IDX(qid);
+
+ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
+ ctx.qid = ena_qid;
+ ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
+ ctx.msix_vector = msix_vector;
+ ctx.queue_size = adapter->tx_ring_size;
+ ctx.numa_node = cpu_to_node(tx_ring->cpu);
+
+ rc = ena_com_create_io_queue(ena_dev, &ctx);
+ if (rc) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to create I/O TX queue num %d rc: %d\n",
+ qid, rc);
+ return rc;
+ }
+
+ rc = ena_com_get_io_handlers(ena_dev, ena_qid,
+ &tx_ring->ena_com_io_sq,
+ &tx_ring->ena_com_io_cq);
+ if (rc) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
+ qid, rc);
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ }
+
+ ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
+ return rc;
+}
+
+static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int rc, i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rc = ena_create_io_tx_queue(adapter, i);
+ if (rc)
+ goto create_err;
+ }
+
+ return 0;
+
+create_err:
+ while (i--)
+ ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
+
+ return rc;
+}
+
+static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
+{
+ struct ena_com_dev *ena_dev;
+ struct ena_com_create_io_ctx ctx = { 0 };
+ struct ena_ring *rx_ring;
+ u32 msix_vector;
+ u16 ena_qid;
+ int rc;
+
+ ena_dev = adapter->ena_dev;
+
+ rx_ring = &adapter->rx_ring[qid];
+ msix_vector = ENA_IO_IRQ_IDX(qid);
+ ena_qid = ENA_IO_RXQ_IDX(qid);
+
+ ctx.qid = ena_qid;
+ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
+ ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ ctx.msix_vector = msix_vector;
+ ctx.queue_size = adapter->rx_ring_size;
+ ctx.numa_node = cpu_to_node(rx_ring->cpu);
+
+ rc = ena_com_create_io_queue(ena_dev, &ctx);
+ if (rc) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to create I/O RX queue num %d rc: %d\n",
+ qid, rc);
+ return rc;
+ }
+
+ rc = ena_com_get_io_handlers(ena_dev, ena_qid,
+ &rx_ring->ena_com_io_sq,
+ &rx_ring->ena_com_io_cq);
+ if (rc) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
+ qid, rc);
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ }
+
+ ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
+
+ return rc;
+}
+
+static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int rc, i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ rc = ena_create_io_rx_queue(adapter, i);
+ if (rc)
+ goto create_err;
+ }
+
+ return 0;
+
+create_err:
+ while (i--)
+ ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
+
+ return rc;
+}
+
+static int ena_up(struct ena_adapter *adapter)
+{
+ int rc;
+
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
+
+ ena_setup_io_intr(adapter);
+
+ rc = ena_request_io_irq(adapter);
+ if (rc)
+ goto err_req_irq;
+
+ /* allocate transmit descriptors */
+ rc = ena_setup_all_tx_resources(adapter);
+ if (rc)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ rc = ena_setup_all_rx_resources(adapter);
+ if (rc)
+ goto err_setup_rx;
+
+ /* Create TX queues */
+ rc = ena_create_all_io_tx_queues(adapter);
+ if (rc)
+ goto err_create_tx_queues;
+
+ /* Create RX queues */
+ rc = ena_create_all_io_rx_queues(adapter);
+ if (rc)
+ goto err_create_rx_queues;
+
+ rc = ena_up_complete(adapter);
+ if (rc)
+ goto err_up;
+
+ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.interface_up++;
+ u64_stats_update_end(&adapter->syncp);
+
+ set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+
+ return rc;
+
+err_up:
+ ena_destroy_all_rx_queues(adapter);
+err_create_rx_queues:
+ ena_destroy_all_tx_queues(adapter);
+err_create_tx_queues:
+ ena_free_all_io_rx_resources(adapter);
+err_setup_rx:
+ ena_free_all_io_tx_resources(adapter);
+err_setup_tx:
+ ena_free_io_irq(adapter);
+err_req_irq:
+
+ return rc;
+}
+
+static void ena_down(struct ena_adapter *adapter)
+{
+ netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
+
+ clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.interface_down++;
+ u64_stats_update_end(&adapter->syncp);
+
+ /* After this point the napi handler won't enable the tx queue */
+ ena_napi_disable_all(adapter);
+ netif_carrier_off(adapter->netdev);
+ netif_tx_disable(adapter->netdev);
+
+ /* After destroy the queue there won't be any new interrupts */
+ ena_destroy_all_io_queues(adapter);
+
+ ena_disable_io_intr_sync(adapter);
+ ena_free_io_irq(adapter);
+ ena_del_napi(adapter);
+
+ ena_free_all_tx_bufs(adapter);
+ ena_free_all_rx_bufs(adapter);
+ ena_free_all_io_tx_resources(adapter);
+ ena_free_all_io_rx_resources(adapter);
+}
+
+/* ena_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int ena_open(struct net_device *netdev)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int rc;
+
+ /* Notify the stack of the actual queue counts. */
+ rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
+ if (rc) {
+ netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
+ return rc;
+ }
+
+ rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
+ if (rc) {
+ netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
+ return rc;
+ }
+
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+/* ena_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int ena_close(struct net_device *netdev)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
+
+ if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ ena_down(adapter);
+
+ return 0;
+}
+
+static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
+{
+ u32 mss = skb_shinfo(skb)->gso_size;
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
+ u8 l4_protocol = 0;
+
+ if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
+ ena_tx_ctx->l4_csum_enable = 1;
+ if (mss) {
+ ena_tx_ctx->tso_enable = 1;
+ ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
+ ena_tx_ctx->l4_csum_partial = 0;
+ } else {
+ ena_tx_ctx->tso_enable = 0;
+ ena_meta->l4_hdr_len = 0;
+ ena_tx_ctx->l4_csum_partial = 1;
+ }
+
+ switch (ip_hdr(skb)->version) {
+ case IPVERSION:
+ ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
+ if (ip_hdr(skb)->frag_off & htons(IP_DF))
+ ena_tx_ctx->df = 1;
+ if (mss)
+ ena_tx_ctx->l3_csum_enable = 1;
+ l4_protocol = ip_hdr(skb)->protocol;
+ break;
+ case 6:
+ ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
+ l4_protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ break;
+ }
+
+ if (l4_protocol == IPPROTO_TCP)
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
+ else
+ ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
+
+ ena_meta->mss = mss;
+ ena_meta->l3_hdr_len = skb_network_header_len(skb);
+ ena_meta->l3_hdr_offset = skb_network_offset(skb);
+ ena_tx_ctx->meta_valid = 1;
+
+ } else {
+ ena_tx_ctx->meta_valid = 0;
+ }
+}
+
+static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
+ struct sk_buff *skb)
+{
+ int num_frags, header_len, rc;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ header_len = skb_headlen(skb);
+
+ if (num_frags < tx_ring->sgl_size)
+ return 0;
+
+ if ((num_frags == tx_ring->sgl_size) &&
+ (header_len < tx_ring->tx_max_header_size))
+ return 0;
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.linearize++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ rc = skb_linearize(skb);
+ if (unlikely(rc)) {
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.linearize_failed++;
+ u64_stats_update_end(&tx_ring->syncp);
+ }
+
+ return rc;
+}
+
+/* Called with netif_tx_lock. */
+static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_tx_buffer *tx_info;
+ struct ena_com_tx_ctx ena_tx_ctx;
+ struct ena_ring *tx_ring;
+ struct netdev_queue *txq;
+ struct ena_com_buf *ena_buf;
+ void *push_hdr;
+ u32 len, last_frag;
+ u16 next_to_use;
+ u16 req_id;
+ u16 push_len;
+ u16 header_len;
+ dma_addr_t dma;
+ int qid, rc, nb_hw_desc;
+ int i = -1;
+
+ netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
+ /* Determine which tx ring we will be placed on */
+ qid = skb_get_queue_mapping(skb);
+ tx_ring = &adapter->tx_ring[qid];
+ txq = netdev_get_tx_queue(dev, qid);
+
+ rc = ena_check_and_linearize_skb(tx_ring, skb);
+ if (unlikely(rc))
+ goto error_drop_packet;
+
+ skb_tx_timestamp(skb);
+ len = skb_headlen(skb);
+
+ next_to_use = tx_ring->next_to_use;
+ req_id = tx_ring->free_tx_ids[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+
+ WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
+ ena_buf = tx_info->bufs;
+ tx_info->skb = skb;
+
+ if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* prepared the push buffer */
+ push_len = min_t(u32, len, tx_ring->tx_max_header_size);
+ header_len = push_len;
+ push_hdr = skb->data;
+ } else {
+ push_len = 0;
+ header_len = min_t(u32, len, tx_ring->tx_max_header_size);
+ push_hdr = NULL;
+ }
+
+ netif_dbg(adapter, tx_queued, dev,
+ "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
+ push_hdr, push_len);
+
+ if (len > push_len) {
+ dma = dma_map_single(tx_ring->dev, skb->data + push_len,
+ len - push_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto error_report_dma_error;
+
+ ena_buf->paddr = dma;
+ ena_buf->len = len - push_len;
+
+ ena_buf++;
+ tx_info->num_of_bufs++;
+ }
+
+ last_frag = skb_shinfo(skb)->nr_frags;
+
+ for (i = 0; i < last_frag; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto error_report_dma_error;
+
+ ena_buf->paddr = dma;
+ ena_buf->len = len;
+ ena_buf++;
+ }
+
+ tx_info->num_of_bufs += last_frag;
+
+ memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
+ ena_tx_ctx.ena_bufs = tx_info->bufs;
+ ena_tx_ctx.push_header = push_hdr;
+ ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+ ena_tx_ctx.req_id = req_id;
+ ena_tx_ctx.header_len = header_len;
+
+ /* set flags and meta data */
+ ena_tx_csum(&ena_tx_ctx, skb);
+
+ /* prepare the packet's descriptors to dma engine */
+ rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
+ &nb_hw_desc);
+
+ if (unlikely(rc)) {
+ netif_err(adapter, tx_queued, dev,
+ "failed to prepare tx bufs\n");
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.queue_stop++;
+ tx_ring->tx_stats.prepare_ctx_err++;
+ u64_stats_update_end(&tx_ring->syncp);
+ netif_tx_stop_queue(txq);
+ goto error_unmap_dma;
+ }
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.cnt++;
+ tx_ring->tx_stats.bytes += skb->len;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ tx_info->tx_descs = nb_hw_desc;
+ tx_info->last_jiffies = jiffies;
+
+ tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
+ tx_ring->ring_size);
+
+ /* This WMB is aimed to:
+ * 1 - perform smp barrier before reading next_to_completion
+ * 2 - make sure the desc were written before trigger DB
+ */
+ wmb();
+
+ /* stop the queue when no more space available, the packet can have up
+ * to sgl_size + 2. one for the meta descriptor and one for header
+ * (if the header is larger than tx_max_header_size).
+ */
+ if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
+ (tx_ring->sgl_size + 2))) {
+ netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
+ __func__, qid);
+
+ netif_tx_stop_queue(txq);
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.queue_stop++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ /* There is a rare condition where this function decide to
+ * stop the queue but meanwhile clean_tx_irq updates
+ * next_to_completion and terminates.
+ * The queue will remain stopped forever.
+ * To solve this issue this function perform rmb, check
+ * the wakeup condition and wake up the queue if needed.
+ */
+ smp_rmb();
+
+ if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
+ > ENA_TX_WAKEUP_THRESH) {
+ netif_tx_wake_queue(txq);
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.queue_wakeup++;
+ u64_stats_update_end(&tx_ring->syncp);
+ }
+ }
+
+ if (netif_xmit_stopped(txq) || !skb->xmit_more) {
+ /* trigger the dma engine */
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.doorbells++;
+ u64_stats_update_end(&tx_ring->syncp);
+ }
+
+ return NETDEV_TX_OK;
+
+error_report_dma_error:
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.dma_mapping_err++;
+ u64_stats_update_end(&tx_ring->syncp);
+ netdev_warn(adapter->netdev, "failed to map skb\n");
+
+ tx_info->skb = NULL;
+
+error_unmap_dma:
+ if (i >= 0) {
+ /* save value of frag that failed */
+ last_frag = i;
+
+ /* start back at beginning and unmap skb */
+ tx_info->skb = NULL;
+ ena_buf = tx_info->bufs;
+ dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < last_frag; i++) {
+ ena_buf++;
+ dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
+ }
+ }
+
+error_drop_packet:
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ena_netpoll(struct net_device *netdev)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_schedule(&adapter->ena_napi[i].napi);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ u16 qid;
+ /* we suspect that this is good for in--kernel network services that
+ * want to loop incoming skb rx to tx in normal user generated traffic,
+ * most probably we will not get to this
+ */
+ if (skb_rx_queue_recorded(skb))
+ qid = skb_get_rx_queue(skb);
+ else
+ qid = fallback(dev, skb);
+
+ return qid;
+}
+
+static void ena_config_host_info(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_host_info *host_info;
+ int rc;
+
+ /* Allocate only the host info */
+ rc = ena_com_allocate_host_info(ena_dev);
+ if (rc) {
+ pr_err("Cannot allocate host info\n");
+ return;
+ }
+
+ host_info = ena_dev->host_attr.host_info;
+
+ host_info->os_type = ENA_ADMIN_OS_LINUX;
+ host_info->kernel_ver = LINUX_VERSION_CODE;
+ strncpy(host_info->kernel_ver_str, utsname()->version,
+ sizeof(host_info->kernel_ver_str) - 1);
+ host_info->os_dist = 0;
+ strncpy(host_info->os_dist_str, utsname()->release,
+ sizeof(host_info->os_dist_str) - 1);
+ host_info->driver_version =
+ (DRV_MODULE_VER_MAJOR) |
+ (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
+ (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
+
+ rc = ena_com_set_host_attributes(ena_dev);
+ if (rc) {
+ if (rc == -EPERM)
+ pr_warn("Cannot set host attributes\n");
+ else
+ pr_err("Cannot set host attributes\n");
+
+ goto err;
+ }
+
+ return;
+
+err:
+ ena_com_delete_host_info(ena_dev);
+}
+
+static void ena_config_debug_area(struct ena_adapter *adapter)
+{
+ u32 debug_area_size;
+ int rc, ss_count;
+
+ ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
+ if (ss_count <= 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "SS count is negative\n");
+ return;
+ }
+
+ /* allocate 32 bytes for each string and 64bit for the value */
+ debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
+
+ rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
+ if (rc) {
+ pr_err("Cannot allocate debug area\n");
+ return;
+ }
+
+ rc = ena_com_set_host_attributes(adapter->ena_dev);
+ if (rc) {
+ if (rc == -EPERM)
+ netif_warn(adapter, drv, adapter->netdev,
+ "Cannot set host attributes\n");
+ else
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot set host attributes\n");
+ goto err;
+ }
+
+ return;
+err:
+ ena_com_delete_debug_area(adapter->ena_dev);
+}
+
+static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_admin_basic_stats ena_stats;
+ int rc;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ return NULL;
+
+ rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats);
+ if (rc)
+ return NULL;
+
+ stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) |
+ ena_stats.tx_bytes_low;
+ stats->rx_bytes = ((u64)ena_stats.rx_bytes_high << 32) |
+ ena_stats.rx_bytes_low;
+
+ stats->rx_packets = ((u64)ena_stats.rx_pkts_high << 32) |
+ ena_stats.rx_pkts_low;
+ stats->tx_packets = ((u64)ena_stats.tx_pkts_high << 32) |
+ ena_stats.tx_pkts_low;
+
+ stats->rx_dropped = ((u64)ena_stats.rx_drops_high << 32) |
+ ena_stats.rx_drops_low;
+
+ stats->multicast = 0;
+ stats->collisions = 0;
+
+ stats->rx_length_errors = 0;
+ stats->rx_crc_errors = 0;
+ stats->rx_frame_errors = 0;
+ stats->rx_fifo_errors = 0;
+ stats->rx_missed_errors = 0;
+ stats->tx_window_errors = 0;
+
+ stats->rx_errors = 0;
+ stats->tx_errors = 0;
+
+ return stats;
+}
+
+static const struct net_device_ops ena_netdev_ops = {
+ .ndo_open = ena_open,
+ .ndo_stop = ena_close,
+ .ndo_start_xmit = ena_start_xmit,
+ .ndo_select_queue = ena_select_queue,
+ .ndo_get_stats64 = ena_get_stats64,
+ .ndo_tx_timeout = ena_tx_timeout,
+ .ndo_change_mtu = ena_change_mtu,
+ .ndo_set_mac_address = NULL,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ena_netpoll,
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+};
+
+static void ena_device_io_suspend(struct work_struct *work)
+{
+ struct ena_adapter *adapter =
+ container_of(work, struct ena_adapter, suspend_io_task);
+ struct net_device *netdev = adapter->netdev;
+
+ /* ena_napi_disable_all disables only the IO handling.
+ * We are still subject to AENQ keep alive watchdog.
+ */
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.io_suspend++;
+ u64_stats_update_begin(&adapter->syncp);
+ ena_napi_disable_all(adapter);
+ netif_tx_lock(netdev);
+ netif_device_detach(netdev);
+ netif_tx_unlock(netdev);
+}
+
+static void ena_device_io_resume(struct work_struct *work)
+{
+ struct ena_adapter *adapter =
+ container_of(work, struct ena_adapter, resume_io_task);
+ struct net_device *netdev = adapter->netdev;
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.io_resume++;
+ u64_stats_update_end(&adapter->syncp);
+
+ netif_device_attach(netdev);
+ ena_napi_enable_all(adapter);
+}
+
+static int ena_device_validate_params(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ struct net_device *netdev = adapter->netdev;
+ int rc;
+
+ rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
+ adapter->mac_addr);
+ if (!rc) {
+ netif_err(adapter, drv, netdev,
+ "Error, mac address are different\n");
+ return -EINVAL;
+ }
+
+ if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
+ (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
+ netif_err(adapter, drv, netdev,
+ "Error, device doesn't support enough queues\n");
+ return -EINVAL;
+ }
+
+ if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
+ netif_err(adapter, drv, netdev,
+ "Error, device max mtu is smaller than netdev MTU\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx,
+ bool *wd_state)
+{
+ struct device *dev = &pdev->dev;
+ bool readless_supported;
+ u32 aenq_groups;
+ int dma_width;
+ int rc;
+
+ rc = ena_com_mmio_reg_read_request_init(ena_dev);
+ if (rc) {
+ dev_err(dev, "failed to init mmio read less\n");
+ return rc;
+ }
+
+ /* The PCIe configuration space revision id indicate if mmio reg
+ * read is disabled
+ */
+ readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
+ ena_com_set_mmio_read_mode(ena_dev, readless_supported);
+
+ rc = ena_com_dev_reset(ena_dev);
+ if (rc) {
+ dev_err(dev, "Can not reset device\n");
+ goto err_mmio_read_less;
+ }
+
+ rc = ena_com_validate_version(ena_dev);
+ if (rc) {
+ dev_err(dev, "device version is too low\n");
+ goto err_mmio_read_less;
+ }
+
+ dma_width = ena_com_get_dma_width(ena_dev);
+ if (dma_width < 0) {
+ dev_err(dev, "Invalid dma width value %d", dma_width);
+ rc = dma_width;
+ goto err_mmio_read_less;
+ }
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
+ if (rc) {
+ dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
+ goto err_mmio_read_less;
+ }
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
+ if (rc) {
+ dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
+ rc);
+ goto err_mmio_read_less;
+ }
+
+ /* ENA admin level init */
+ rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
+ if (rc) {
+ dev_err(dev,
+ "Can not initialize ena admin queue with device\n");
+ goto err_mmio_read_less;
+ }
+
+ /* To enable the msix interrupts the driver needs to know the number
+ * of queues. So the driver uses polling mode to retrieve this
+ * information
+ */
+ ena_com_set_admin_polling_mode(ena_dev, true);
+
+ /* Get Device Attributes*/
+ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
+ if (rc) {
+ dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
+ goto err_admin_init;
+ }
+
+ /* Try to turn all the available aenq groups */
+ aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
+ BIT(ENA_ADMIN_FATAL_ERROR) |
+ BIT(ENA_ADMIN_WARNING) |
+ BIT(ENA_ADMIN_NOTIFICATION) |
+ BIT(ENA_ADMIN_KEEP_ALIVE);
+
+ aenq_groups &= get_feat_ctx->aenq.supported_groups;
+
+ rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
+ if (rc) {
+ dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
+ goto err_admin_init;
+ }
+
+ *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
+
+ ena_config_host_info(ena_dev);
+
+ return 0;
+
+err_admin_init:
+ ena_com_admin_destroy(ena_dev);
+err_mmio_read_less:
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ return rc;
+}
+
+static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
+ int io_vectors)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct device *dev = &adapter->pdev->dev;
+ int rc;
+
+ rc = ena_enable_msix(adapter, io_vectors);
+ if (rc) {
+ dev_err(dev, "Can not reserve msix vectors\n");
+ return rc;
+ }
+
+ ena_setup_mgmnt_intr(adapter);
+
+ rc = ena_request_mgmnt_irq(adapter);
+ if (rc) {
+ dev_err(dev, "Can not setup management interrupts\n");
+ goto err_disable_msix;
+ }
+
+ ena_com_set_admin_polling_mode(ena_dev, false);
+
+ ena_com_admin_aenq_enable(ena_dev);
+
+ return 0;
+
+err_disable_msix:
+ ena_disable_msix(adapter);
+
+ return rc;
+}
+
+static void ena_fw_reset_device(struct work_struct *work)
+{
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ struct ena_adapter *adapter =
+ container_of(work, struct ena_adapter, reset_task);
+ struct net_device *netdev = adapter->netdev;
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct pci_dev *pdev = adapter->pdev;
+ bool dev_up, wd_state;
+ int rc;
+
+ del_timer_sync(&adapter->timer_service);
+
+ rtnl_lock();
+
+ dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ ena_com_set_admin_running_state(ena_dev, false);
+
+ /* After calling ena_close the tx queues and the napi
+ * are disabled so no one can interfere or touch the
+ * data structures
+ */
+ ena_close(netdev);
+
+ rc = ena_com_dev_reset(ena_dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Device reset failed\n");
+ goto err;
+ }
+
+ ena_free_mgmnt_irq(adapter);
+
+ ena_disable_msix(adapter);
+
+ ena_com_abort_admin_commands(ena_dev);
+
+ ena_com_wait_for_abort_completion(ena_dev);
+
+ ena_com_admin_destroy(ena_dev);
+
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ /* Finish with the destroy part. Start the init part */
+
+ rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
+ if (rc) {
+ dev_err(&pdev->dev, "Can not initialize device\n");
+ goto err;
+ }
+ adapter->wd_state = wd_state;
+
+ rc = ena_device_validate_params(adapter, &get_feat_ctx);
+ if (rc) {
+ dev_err(&pdev->dev, "Validation of device parameters failed\n");
+ goto err_device_destroy;
+ }
+
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter,
+ adapter->num_queues);
+ if (rc) {
+ dev_err(&pdev->dev, "Enable MSI-X failed\n");
+ goto err_device_destroy;
+ }
+ /* If the interface was up before the reset bring it up */
+ if (dev_up) {
+ rc = ena_up(adapter);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to create I/O queues\n");
+ goto err_disable_msix;
+ }
+ }
+
+ mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
+
+ rtnl_unlock();
+
+ dev_err(&pdev->dev, "Device reset completed successfully\n");
+
+ return;
+err_disable_msix:
+ ena_free_mgmnt_irq(adapter);
+ ena_disable_msix(adapter);
+err_device_destroy:
+ ena_com_admin_destroy(ena_dev);
+err:
+ rtnl_unlock();
+
+ dev_err(&pdev->dev,
+ "Reset attempt failed. Can not reset the device\n");
+}
+
+static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+{
+ struct ena_tx_buffer *tx_buf;
+ unsigned long last_jiffies;
+ struct ena_ring *tx_ring;
+ int i, j, budget;
+ u32 missed_tx;
+
+ /* Make sure the driver doesn't turn the device in other process */
+ smp_rmb();
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ return;
+
+ budget = ENA_MONITORED_TX_QUEUES;
+
+ for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
+ tx_ring = &adapter->tx_ring[i];
+
+ for (j = 0; j < tx_ring->ring_size; j++) {
+ tx_buf = &tx_ring->tx_buffer_info[j];
+ last_jiffies = tx_buf->last_jiffies;
+ if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
+ netif_notice(adapter, tx_err, adapter->netdev,
+ "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
+ tx_ring->qid, j);
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ missed_tx = tx_ring->tx_stats.missing_tx_comp++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ /* Clear last jiffies so the lost buffer won't
+ * be counted twice.
+ */
+ tx_buf->last_jiffies = 0;
+
+ if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
+ netif_err(adapter, tx_err, adapter->netdev,
+ "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n",
+ missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ }
+ }
+ }
+
+ budget--;
+ if (!budget)
+ break;
+ }
+
+ adapter->last_monitored_tx_qid = i % adapter->num_queues;
+}
+
+/* Check for keep alive expiration */
+static void check_for_missing_keep_alive(struct ena_adapter *adapter)
+{
+ unsigned long keep_alive_expired;
+
+ if (!adapter->wd_state)
+ return;
+
+ keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies
+ + ENA_DEVICE_KALIVE_TIMEOUT);
+ if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Keep alive watchdog timeout.\n");
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.wd_expired++;
+ u64_stats_update_end(&adapter->syncp);
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ }
+}
+
+static void check_for_admin_com_state(struct ena_adapter *adapter)
+{
+ if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
+ netif_err(adapter, drv, adapter->netdev,
+ "ENA admin queue is not in running state!\n");
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.admin_q_pause++;
+ u64_stats_update_end(&adapter->syncp);
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ }
+}
+
+static void ena_update_host_info(struct ena_admin_host_info *host_info,
+ struct net_device *netdev)
+{
+ host_info->supported_network_features[0] =
+ netdev->features & GENMASK_ULL(31, 0);
+ host_info->supported_network_features[1] =
+ (netdev->features & GENMASK_ULL(63, 32)) >> 32;
+}
+
+static void ena_timer_service(unsigned long data)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)data;
+ u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
+ struct ena_admin_host_info *host_info =
+ adapter->ena_dev->host_attr.host_info;
+
+ check_for_missing_keep_alive(adapter);
+
+ check_for_admin_com_state(adapter);
+
+ check_for_missing_tx_completions(adapter);
+
+ if (debug_area)
+ ena_dump_stats_to_buf(adapter, debug_area);
+
+ if (host_info)
+ ena_update_host_info(host_info, adapter->netdev);
+
+ if (unlikely(test_and_clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Trigger reset is on\n");
+ ena_dump_stats_to_dmesg(adapter);
+ queue_work(ena_wq, &adapter->reset_task);
+ return;
+ }
+
+ /* Reset the timer */
+ mod_timer(&adapter->timer_service, jiffies + HZ);
+}
+
+static int ena_calc_io_queue_num(struct pci_dev *pdev,
+ struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ int io_sq_num, io_queue_num;
+
+ /* In case of LLQ use the llq number in the get feature cmd */
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ io_sq_num = get_feat_ctx->max_queues.max_llq_num;
+
+ if (io_sq_num == 0) {
+ dev_err(&pdev->dev,
+ "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
+
+ ena_dev->tx_mem_queue_type =
+ ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ io_sq_num = get_feat_ctx->max_queues.max_sq_num;
+ }
+ } else {
+ io_sq_num = get_feat_ctx->max_queues.max_sq_num;
+ }
+
+ io_queue_num = min_t(int, num_possible_cpus(), ENA_MAX_NUM_IO_QUEUES);
+ io_queue_num = min_t(int, io_queue_num, io_sq_num);
+ io_queue_num = min_t(int, io_queue_num,
+ get_feat_ctx->max_queues.max_cq_num);
+ /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
+ io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
+ if (unlikely(!io_queue_num)) {
+ dev_err(&pdev->dev, "The device doesn't have io queues\n");
+ return -EFAULT;
+ }
+
+ return io_queue_num;
+}
+
+static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ bool has_mem_bar;
+
+ has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
+
+ /* Enable push mode if device supports LLQ */
+ if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+ else
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+}
+
+static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
+ struct net_device *netdev)
+{
+ netdev_features_t dev_features = 0;
+
+ /* Set offload features */
+ if (feat->offload.tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
+ dev_features |= NETIF_F_IP_CSUM;
+
+ if (feat->offload.tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
+ dev_features |= NETIF_F_IPV6_CSUM;
+
+ if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
+ dev_features |= NETIF_F_TSO;
+
+ if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
+ dev_features |= NETIF_F_TSO6;
+
+ if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
+ dev_features |= NETIF_F_TSO_ECN;
+
+ if (feat->offload.rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
+ dev_features |= NETIF_F_RXCSUM;
+
+ if (feat->offload.rx_supported &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
+ dev_features |= NETIF_F_RXCSUM;
+
+ netdev->features =
+ dev_features |
+ NETIF_F_SG |
+ NETIF_F_NTUPLE |
+ NETIF_F_RXHASH |
+ NETIF_F_HIGHDMA;
+
+ netdev->hw_features |= netdev->features;
+ netdev->vlan_features |= netdev->features;
+}
+
+static void ena_set_conf_feat_params(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *feat)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* Copy mac address */
+ if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
+ eth_hw_addr_random(netdev);
+ ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
+ } else {
+ ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
+ ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+ }
+
+ /* Set offload features */
+ ena_set_dev_offloads(feat, netdev);
+
+ adapter->max_mtu = feat->dev_attr.max_mtu;
+}
+
+static int ena_rss_init_default(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct device *dev = &adapter->pdev->dev;
+ int rc, i;
+ u32 val;
+
+ rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
+ if (unlikely(rc)) {
+ dev_err(dev, "Cannot init indirect table\n");
+ goto err_rss_init;
+ }
+
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
+ val = ethtool_rxfh_indir_default(i, adapter->num_queues);
+ rc = ena_com_indirect_table_fill_entry(ena_dev, i,
+ ENA_IO_RXQ_IDX(val));
+ if (unlikely(rc && (rc != -EPERM))) {
+ dev_err(dev, "Cannot fill indirect table\n");
+ goto err_fill_indir;
+ }
+ }
+
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
+ ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
+ if (unlikely(rc && (rc != -EPERM))) {
+ dev_err(dev, "Cannot fill hash function\n");
+ goto err_fill_indir;
+ }
+
+ rc = ena_com_set_default_hash_ctrl(ena_dev);
+ if (unlikely(rc && (rc != -EPERM))) {
+ dev_err(dev, "Cannot fill hash control\n");
+ goto err_fill_indir;
+ }
+
+ return 0;
+
+err_fill_indir:
+ ena_com_rss_destroy(ena_dev);
+err_rss_init:
+
+ return rc;
+}
+
+static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
+{
+ int release_bars;
+
+ release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
+ pci_release_selected_regions(pdev, release_bars);
+}
+
+static int ena_calc_queue_size(struct pci_dev *pdev,
+ struct ena_com_dev *ena_dev,
+ u16 *max_tx_sgl_size,
+ u16 *max_rx_sgl_size,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ u32 queue_size = ENA_DEFAULT_RING_SIZE;
+
+ queue_size = min_t(u32, queue_size,
+ get_feat_ctx->max_queues.max_cq_depth);
+ queue_size = min_t(u32, queue_size,
+ get_feat_ctx->max_queues.max_sq_depth);
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ queue_size = min_t(u32, queue_size,
+ get_feat_ctx->max_queues.max_llq_depth);
+
+ queue_size = rounddown_pow_of_two(queue_size);
+
+ if (unlikely(!queue_size)) {
+ dev_err(&pdev->dev, "Invalid queue size\n");
+ return -EFAULT;
+ }
+
+ *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ get_feat_ctx->max_queues.max_packet_tx_descs);
+ *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ get_feat_ctx->max_queues.max_packet_rx_descs);
+
+ return queue_size;
+}
+
+/* ena_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ena_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ena_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ static int version_printed;
+ struct net_device *netdev;
+ struct ena_adapter *adapter;
+ struct ena_com_dev *ena_dev = NULL;
+ static int adapters_found;
+ int io_queue_num, bars, rc;
+ int queue_size;
+ u16 tx_sgl_size = 0;
+ u16 rx_sgl_size = 0;
+ bool wd_state;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ if (version_printed++ == 0)
+ dev_info(&pdev->dev, "%s", version);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
+ return rc;
+ }
+
+ pci_set_master(pdev);
+
+ ena_dev = vzalloc(sizeof(*ena_dev));
+ if (!ena_dev) {
+ rc = -ENOMEM;
+ goto err_disable_device;
+ }
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
+ rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
+ rc);
+ goto err_free_ena_dev;
+ }
+
+ ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR),
+ pci_resource_len(pdev, ENA_REG_BAR));
+ if (!ena_dev->reg_bar) {
+ dev_err(&pdev->dev, "failed to remap regs bar\n");
+ rc = -EFAULT;
+ goto err_free_region;
+ }
+
+ ena_dev->dmadev = &pdev->dev;
+
+ rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
+ if (rc) {
+ dev_err(&pdev->dev, "ena device init failed\n");
+ if (rc == -ETIME)
+ rc = -EPROBE_DEFER;
+ goto err_free_region;
+ }
+
+ ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR),
+ pci_resource_len(pdev, ENA_MEM_BAR));
+ if (!ena_dev->mem_bar) {
+ rc = -EFAULT;
+ goto err_device_destroy;
+ }
+ }
+
+ /* initial Tx interrupt delay, Assumes 1 usec granularity.
+ * Updated during device initialization with the real granularity
+ */
+ ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
+ io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
+ queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
+ &rx_sgl_size, &get_feat_ctx);
+ if ((queue_size <= 0) || (io_queue_num <= 0)) {
+ rc = -EFAULT;
+ goto err_device_destroy;
+ }
+
+ dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
+ io_queue_num, queue_size);
+
+ /* dev zeroed in init_etherdev */
+ netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
+ if (!netdev) {
+ dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
+ rc = -ENOMEM;
+ goto err_device_destroy;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ pci_set_drvdata(pdev, adapter);
+
+ adapter->ena_dev = ena_dev;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ ena_set_conf_feat_params(adapter, &get_feat_ctx);
+
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ adapter->tx_ring_size = queue_size;
+ adapter->rx_ring_size = queue_size;
+
+ adapter->max_tx_sgl_size = tx_sgl_size;
+ adapter->max_rx_sgl_size = rx_sgl_size;
+
+ adapter->num_queues = io_queue_num;
+ adapter->last_monitored_tx_qid = 0;
+
+ adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
+ adapter->wd_state = wd_state;
+
+ snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
+
+ rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to query interrupt moderation feature\n");
+ goto err_netdev_destroy;
+ }
+ ena_init_io_rings(adapter);
+
+ netdev->netdev_ops = &ena_netdev_ops;
+ netdev->watchdog_timeo = TX_TIMEOUT;
+ ena_set_ethtool_ops(netdev);
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ u64_stats_init(&adapter->syncp);
+
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to enable and set the admin interrupts\n");
+ goto err_worker_destroy;
+ }
+ rc = ena_rss_init_default(adapter);
+ if (rc && (rc != -EPERM)) {
+ dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
+ goto err_free_msix;
+ }
+
+ ena_config_debug_area(adapter);
+
+ memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
+
+ netif_carrier_off(netdev);
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto err_rss;
+ }
+
+ INIT_WORK(&adapter->suspend_io_task, ena_device_io_suspend);
+ INIT_WORK(&adapter->resume_io_task, ena_device_io_resume);
+ INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
+
+ adapter->last_keep_alive_jiffies = jiffies;
+
+ init_timer(&adapter->timer_service);
+ adapter->timer_service.expires = round_jiffies(jiffies + HZ);
+ adapter->timer_service.function = ena_timer_service;
+ adapter->timer_service.data = (unsigned long)adapter;
+
+ add_timer(&adapter->timer_service);
+
+ dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
+ DEVICE_NAME, (long)pci_resource_start(pdev, 0),
+ netdev->dev_addr, io_queue_num);
+
+ set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
+
+ adapters_found++;
+
+ return 0;
+
+err_rss:
+ ena_com_delete_debug_area(ena_dev);
+ ena_com_rss_destroy(ena_dev);
+err_free_msix:
+ ena_com_dev_reset(ena_dev);
+ ena_free_mgmnt_irq(adapter);
+ ena_disable_msix(adapter);
+err_worker_destroy:
+ ena_com_destroy_interrupt_moderation(ena_dev);
+ del_timer(&adapter->timer_service);
+ cancel_work_sync(&adapter->suspend_io_task);
+ cancel_work_sync(&adapter->resume_io_task);
+err_netdev_destroy:
+ free_netdev(netdev);
+err_device_destroy:
+ ena_com_delete_host_info(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+err_free_region:
+ ena_release_bars(ena_dev, pdev);
+err_free_ena_dev:
+ vfree(ena_dev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+/*****************************************************************************/
+static int ena_sriov_configure(struct pci_dev *dev, int numvfs)
+{
+ int rc;
+
+ if (numvfs > 0) {
+ rc = pci_enable_sriov(dev, numvfs);
+ if (rc != 0) {
+ dev_err(&dev->dev,
+ "pci_enable_sriov failed to enable: %d vfs with the error: %d\n",
+ numvfs, rc);
+ return rc;
+ }
+
+ return numvfs;
+ }
+
+ if (numvfs == 0) {
+ pci_disable_sriov(dev);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*****************************************************************************/
+/*****************************************************************************/
+
+/* ena_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ena_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void ena_remove(struct pci_dev *pdev)
+{
+ struct ena_adapter *adapter = pci_get_drvdata(pdev);
+ struct ena_com_dev *ena_dev;
+ struct net_device *netdev;
+
+ if (!adapter)
+ /* This device didn't load properly and it's resources
+ * already released, nothing to do
+ */
+ return;
+
+ ena_dev = adapter->ena_dev;
+ netdev = adapter->netdev;
+
+#ifdef CONFIG_RFS_ACCEL
+ if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
+ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
+ netdev->rx_cpu_rmap = NULL;
+ }
+#endif /* CONFIG_RFS_ACCEL */
+
+ unregister_netdev(netdev);
+ del_timer_sync(&adapter->timer_service);
+
+ cancel_work_sync(&adapter->reset_task);
+
+ cancel_work_sync(&adapter->suspend_io_task);
+
+ cancel_work_sync(&adapter->resume_io_task);
+
+ ena_com_dev_reset(ena_dev);
+
+ ena_free_mgmnt_irq(adapter);
+
+ ena_disable_msix(adapter);
+
+ free_netdev(netdev);
+
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ ena_com_abort_admin_commands(ena_dev);
+
+ ena_com_wait_for_abort_completion(ena_dev);
+
+ ena_com_admin_destroy(ena_dev);
+
+ ena_com_rss_destroy(ena_dev);
+
+ ena_com_delete_debug_area(ena_dev);
+
+ ena_com_delete_host_info(ena_dev);
+
+ ena_release_bars(ena_dev, pdev);
+
+ pci_disable_device(pdev);
+
+ ena_com_destroy_interrupt_moderation(ena_dev);
+
+ vfree(ena_dev);
+}
+
+static struct pci_driver ena_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = ena_pci_tbl,
+ .probe = ena_probe,
+ .remove = ena_remove,
+ .sriov_configure = ena_sriov_configure,
+};
+
+static int __init ena_init(void)
+{
+ pr_info("%s", version);
+
+ ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
+ if (!ena_wq) {
+ pr_err("Failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
+ return pci_register_driver(&ena_pci_driver);
+}
+
+static void __exit ena_cleanup(void)
+{
+ pci_unregister_driver(&ena_pci_driver);
+
+ if (ena_wq) {
+ destroy_workqueue(ena_wq);
+ ena_wq = NULL;
+ }
+}
+
+/******************************************************************************
+ ******************************** AENQ Handlers *******************************
+ *****************************************************************************/
+/* ena_update_on_link_change:
+ * Notify the network interface about the change in link status
+ */
+static void ena_update_on_link_change(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+ struct ena_admin_aenq_link_change_desc *aenq_desc =
+ (struct ena_admin_aenq_link_change_desc *)aenq_e;
+ int status = aenq_desc->flags &
+ ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
+
+ if (status) {
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
+ set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
+ netif_carrier_on(adapter->netdev);
+ } else {
+ clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
+ netif_carrier_off(adapter->netdev);
+ }
+}
+
+static void ena_keep_alive_wd(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+
+ adapter->last_keep_alive_jiffies = jiffies;
+}
+
+static void ena_notification(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+
+ WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
+ "Invalid group(%x) expected %x\n",
+ aenq_e->aenq_common_desc.group,
+ ENA_ADMIN_NOTIFICATION);
+
+ switch (aenq_e->aenq_common_desc.syndrom) {
+ case ENA_ADMIN_SUSPEND:
+ /* Suspend just the IO queues.
+ * We deliberately don't suspend admin so the timer and
+ * the keep_alive events should remain.
+ */
+ queue_work(ena_wq, &adapter->suspend_io_task);
+ break;
+ case ENA_ADMIN_RESUME:
+ queue_work(ena_wq, &adapter->resume_io_task);
+ break;
+ default:
+ netif_err(adapter, drv, adapter->netdev,
+ "Invalid aenq notification link state %d\n",
+ aenq_e->aenq_common_desc.syndrom);
+ }
+}
+
+/* This handler will called for unknown event group or unimplemented handlers*/
+static void unimplemented_aenq_handler(void *data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)data;
+
+ netif_err(adapter, drv, adapter->netdev,
+ "Unknown event was received or event with unimplemented handler\n");
+}
+
+static struct ena_aenq_handlers aenq_handlers = {
+ .handlers = {
+ [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
+ [ENA_ADMIN_NOTIFICATION] = ena_notification,
+ [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
+ },
+ .unimplemented_handler = unimplemented_aenq_handler
+};
+
+module_init(ena_init);
+module_exit(ena_cleanup);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
new file mode 100644
index 000000000000..69d7e9ed5bc8
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ENA_H
+#define ENA_H
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+#include "ena_com.h"
+#include "ena_eth_com.h"
+
+#define DRV_MODULE_VER_MAJOR 1
+#define DRV_MODULE_VER_MINOR 0
+#define DRV_MODULE_VER_SUBMINOR 2
+
+#define DRV_MODULE_NAME "ena"
+#ifndef DRV_MODULE_VERSION
+#define DRV_MODULE_VERSION \
+ __stringify(DRV_MODULE_VER_MAJOR) "." \
+ __stringify(DRV_MODULE_VER_MINOR) "." \
+ __stringify(DRV_MODULE_VER_SUBMINOR)
+#endif
+
+#define DEVICE_NAME "Elastic Network Adapter (ENA)"
+
+/* 1 for AENQ + ADMIN */
+#define ENA_MAX_MSIX_VEC(io_queues) (1 + (io_queues))
+
+#define ENA_REG_BAR 0
+#define ENA_MEM_BAR 2
+#define ENA_BAR_MASK (BIT(ENA_REG_BAR) | BIT(ENA_MEM_BAR))
+
+#define ENA_DEFAULT_RING_SIZE (1024)
+
+#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
+#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN)
+
+/* limit the buffer size to 600 bytes to handle MTU changes from very
+ * small to very large, in which case the number of buffers per packet
+ * could exceed ENA_PKT_MAX_BUFS
+ */
+#define ENA_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE 600
+
+#define ENA_MIN_MTU 128
+
+#define ENA_NAME_MAX_LEN 20
+#define ENA_IRQNAME_SIZE 40
+
+#define ENA_PKT_MAX_BUFS 19
+
+#define ENA_RX_RSS_TABLE_LOG_SIZE 7
+#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
+
+#define ENA_HASH_KEY_SIZE 40
+
+/* The number of tx packet completions that will be handled each NAPI poll
+ * cycle is ring_size / ENA_TX_POLL_BUDGET_DIVIDER.
+ */
+#define ENA_TX_POLL_BUDGET_DIVIDER 4
+
+/* Refill Rx queue when number of available descriptors is below
+ * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER
+ */
+#define ENA_RX_REFILL_THRESH_DIVIDER 8
+
+/* Number of queues to check for missing queues per timer service */
+#define ENA_MONITORED_TX_QUEUES 4
+/* Max timeout packets before device reset */
+#define MAX_NUM_OF_TIMEOUTED_PACKETS 32
+
+#define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
+
+#define ENA_RX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
+#define ENA_RX_RING_IDX_ADD(idx, n, ring_size) \
+ (((idx) + (n)) & ((ring_size) - 1))
+
+#define ENA_IO_TXQ_IDX(q) (2 * (q))
+#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
+
+#define ENA_MGMNT_IRQ_IDX 0
+#define ENA_IO_IRQ_FIRST_IDX 1
+#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
+
+/* ENA device should send keep alive msg every 1 sec.
+ * We wait for 3 sec just to be on the safe side.
+ */
+#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ)
+
+#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+
+struct ena_irq {
+ irq_handler_t handler;
+ void *data;
+ int cpu;
+ u32 vector;
+ cpumask_t affinity_hint_mask;
+ char name[ENA_IRQNAME_SIZE];
+};
+
+struct ena_napi {
+ struct napi_struct napi ____cacheline_aligned;
+ struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
+ u32 qid;
+};
+
+struct ena_tx_buffer {
+ struct sk_buff *skb;
+ /* num of ena desc for this specific skb
+ * (includes data desc and metadata desc)
+ */
+ u32 tx_descs;
+ /* num of buffers used by this skb */
+ u32 num_of_bufs;
+ /* Save the last jiffies to detect missing tx packets */
+ unsigned long last_jiffies;
+ struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
+} ____cacheline_aligned;
+
+struct ena_rx_buffer {
+ struct sk_buff *skb;
+ struct page *page;
+ u32 page_offset;
+ struct ena_com_buf ena_buf;
+} ____cacheline_aligned;
+
+struct ena_stats_tx {
+ u64 cnt;
+ u64 bytes;
+ u64 queue_stop;
+ u64 prepare_ctx_err;
+ u64 queue_wakeup;
+ u64 dma_mapping_err;
+ u64 linearize;
+ u64 linearize_failed;
+ u64 napi_comp;
+ u64 tx_poll;
+ u64 doorbells;
+ u64 missing_tx_comp;
+ u64 bad_req_id;
+};
+
+struct ena_stats_rx {
+ u64 cnt;
+ u64 bytes;
+ u64 refil_partial;
+ u64 bad_csum;
+ u64 page_alloc_fail;
+ u64 skb_alloc_fail;
+ u64 dma_mapping_err;
+ u64 bad_desc_num;
+ u64 rx_copybreak_pkt;
+};
+
+struct ena_ring {
+ /* Holds the empty requests for TX out of order completions */
+ u16 *free_tx_ids;
+ union {
+ struct ena_tx_buffer *tx_buffer_info;
+ struct ena_rx_buffer *rx_buffer_info;
+ };
+
+ /* cache ptr to avoid using the adapter */
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct napi_struct *napi;
+ struct net_device *netdev;
+ struct ena_com_dev *ena_dev;
+ struct ena_adapter *adapter;
+ struct ena_com_io_cq *ena_com_io_cq;
+ struct ena_com_io_sq *ena_com_io_sq;
+
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 rx_copybreak;
+ u16 qid;
+ u16 mtu;
+ u16 sgl_size;
+
+ /* The maximum header length the device can handle */
+ u8 tx_max_header_size;
+
+ /* cpu for TPH */
+ int cpu;
+ /* number of tx/rx_buffer_info's entries */
+ int ring_size;
+
+ enum ena_admin_placement_policy_type tx_mem_queue_type;
+
+ struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS];
+ u32 smoothed_interval;
+ u32 per_napi_packets;
+ u32 per_napi_bytes;
+ enum ena_intr_moder_level moder_tbl_idx;
+ struct u64_stats_sync syncp;
+ union {
+ struct ena_stats_tx tx_stats;
+ struct ena_stats_rx rx_stats;
+ };
+} ____cacheline_aligned;
+
+struct ena_stats_dev {
+ u64 tx_timeout;
+ u64 io_suspend;
+ u64 io_resume;
+ u64 wd_expired;
+ u64 interface_up;
+ u64 interface_down;
+ u64 admin_q_pause;
+};
+
+enum ena_flags_t {
+ ENA_FLAG_DEVICE_RUNNING,
+ ENA_FLAG_DEV_UP,
+ ENA_FLAG_LINK_UP,
+ ENA_FLAG_MSIX_ENABLED,
+ ENA_FLAG_TRIGGER_RESET
+};
+
+/* adapter specific private data structure */
+struct ena_adapter {
+ struct ena_com_dev *ena_dev;
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ /* rx packets that shorter that this len will be copied to the skb
+ * header
+ */
+ u32 rx_copybreak;
+ u32 max_mtu;
+
+ int num_queues;
+
+ struct msix_entry *msix_entries;
+ int msix_vecs;
+
+ u32 tx_usecs, rx_usecs; /* interrupt moderation */
+ u32 tx_frames, rx_frames; /* interrupt moderation */
+
+ u32 tx_ring_size;
+ u32 rx_ring_size;
+
+ u32 msg_enable;
+
+ u16 max_tx_sgl_size;
+ u16 max_rx_sgl_size;
+
+ u8 mac_addr[ETH_ALEN];
+
+ char name[ENA_NAME_MAX_LEN];
+
+ unsigned long flags;
+ /* TX */
+ struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
+ ____cacheline_aligned_in_smp;
+
+ /* RX */
+ struct ena_ring rx_ring[ENA_MAX_NUM_IO_QUEUES]
+ ____cacheline_aligned_in_smp;
+
+ struct ena_napi ena_napi[ENA_MAX_NUM_IO_QUEUES];
+
+ struct ena_irq irq_tbl[ENA_MAX_MSIX_VEC(ENA_MAX_NUM_IO_QUEUES)];
+
+ /* timer service */
+ struct work_struct reset_task;
+ struct work_struct suspend_io_task;
+ struct work_struct resume_io_task;
+ struct timer_list timer_service;
+
+ bool wd_state;
+ unsigned long last_keep_alive_jiffies;
+
+ struct u64_stats_sync syncp;
+ struct ena_stats_dev dev_stats;
+
+ /* last queue index that was checked for uncompleted tx packets */
+ u32 last_monitored_tx_qid;
+};
+
+void ena_set_ethtool_ops(struct net_device *netdev);
+
+void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
+
+void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
+
+int ena_get_sset_count(struct net_device *netdev, int sset);
+
+#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
new file mode 100644
index 000000000000..f80d2a47fa94
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2015 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ENA_PCI_ID_TBL_H_
+#define ENA_PCI_ID_TBL_H_
+
+#ifndef PCI_VENDOR_ID_AMAZON
+#define PCI_VENDOR_ID_AMAZON 0x1d0f
+#endif
+
+#ifndef PCI_DEV_ID_ENA_PF
+#define PCI_DEV_ID_ENA_PF 0x0ec2
+#endif
+
+#ifndef PCI_DEV_ID_ENA_LLQ_PF
+#define PCI_DEV_ID_ENA_LLQ_PF 0x1ec2
+#endif
+
+#ifndef PCI_DEV_ID_ENA_VF
+#define PCI_DEV_ID_ENA_VF 0xec20
+#endif
+
+#ifndef PCI_DEV_ID_ENA_LLQ_VF
+#define PCI_DEV_ID_ENA_LLQ_VF 0xec21
+#endif
+
+#define ENA_PCI_ID_TABLE_ENTRY(devid) \
+ {PCI_DEVICE(PCI_VENDOR_ID_AMAZON, devid)},
+
+static const struct pci_device_id ena_pci_tbl[] = {
+ ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_PF)
+ ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_PF)
+ ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_VF)
+ ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_VF)
+ { }
+};
+
+#endif /* ENA_PCI_ID_TBL_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
new file mode 100644
index 000000000000..26097a2b6030
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _ENA_REGS_H_
+#define _ENA_REGS_H_
+
+/* ena_registers offsets */
+#define ENA_REGS_VERSION_OFF 0x0
+#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
+#define ENA_REGS_CAPS_OFF 0x8
+#define ENA_REGS_CAPS_EXT_OFF 0xc
+#define ENA_REGS_AQ_BASE_LO_OFF 0x10
+#define ENA_REGS_AQ_BASE_HI_OFF 0x14
+#define ENA_REGS_AQ_CAPS_OFF 0x18
+#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
+#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
+#define ENA_REGS_ACQ_CAPS_OFF 0x28
+#define ENA_REGS_AQ_DB_OFF 0x2c
+#define ENA_REGS_ACQ_TAIL_OFF 0x30
+#define ENA_REGS_AENQ_CAPS_OFF 0x34
+#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
+#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
+#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
+#define ENA_REGS_AENQ_TAIL_OFF 0x44
+#define ENA_REGS_INTR_MASK_OFF 0x4c
+#define ENA_REGS_DEV_CTL_OFF 0x54
+#define ENA_REGS_DEV_STS_OFF 0x58
+#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
+#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
+#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+
+/* version register */
+#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
+#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
+#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
+
+/* controller_version register */
+#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
+
+/* caps register */
+#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+
+/* aq_caps register */
+#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* acq_caps register */
+#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* aenq_caps register */
+#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
+
+/* dev_ctl register */
+#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
+#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
+#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
+#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
+#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+
+/* dev_sts register */
+#define ENA_REGS_DEV_STS_READY_MASK 0x1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
+#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
+#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
+#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
+#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
+
+/* mmio_reg_read register */
+#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+
+/* rss_ind_entry_update register */
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+
+#endif /*_ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index dcf2a1f3643d..dc57f2759f44 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -45,14 +45,14 @@
#define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x))
#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
-#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
+#if IS_ENABLED(CONFIG_HPLANCE)
#include "hplance.h"
#undef WRITERAP
#undef WRITERDP
#undef READRDP
-#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
+#if IS_ENABLED(CONFIG_MVME147_NET)
/* Lossage Factor Nine, Mr Sulu. */
#define WRITERAP(lp, x) (lp->writerap(lp, x))
@@ -86,7 +86,7 @@ static inline __u16 READRDP(struct lance_private *lp)
}
#endif
-#endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */
+#endif /* IS_ENABLED(CONFIG_HPLANCE) */
/* debugging output macros, various flavours */
/* #define TEST_HITS */
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 94960055fa1f..f92cc97151ec 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -89,7 +89,7 @@ Revision History:
#include <asm/byteorder.h>
#include <asm/uaccess.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define AMD8111E_VLAN_TAG_USED 1
#else
#define AMD8111E_VLAN_TAG_USED 0
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a9b2709567ec..7f9216db026f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1708,9 +1708,9 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_set_features = xgbe_set_features,
};
-struct net_device_ops *xgbe_get_netdev_ops(void)
+const struct net_device_ops *xgbe_get_netdev_ops(void)
{
- return (struct net_device_ops *)&xgbe_netdev_ops;
+ return &xgbe_netdev_ops;
}
static void xgbe_rx_refresh(struct xgbe_channel *channel)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 11d9f0c5b78b..4007b429c80c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -623,7 +623,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_ts_info = xgbe_get_ts_info,
};
-struct ethtool_ops *xgbe_get_ethtool_ops(void)
+const struct ethtool_ops *xgbe_get_ethtool_ops(void)
{
- return (struct ethtool_ops *)&xgbe_ethtool_ops;
+ return &xgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 3eee3201b58f..9de078819aa6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -861,9 +861,15 @@ static int xgbe_resume(struct device *dev)
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+ /* Schedule a restart in case the link or phy state changed
+ * while we were powered down.
+ */
+ schedule_work(&pdata->restart_work);
+ }
+
DBGPR("<--xgbe_resume\n");
return ret;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 98d9d63c4353..5dd17dcea2f8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -956,8 +956,9 @@ struct xgbe_prv_data {
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
-struct net_device_ops *xgbe_get_netdev_ops(void);
-struct ethtool_ops *xgbe_get_ethtool_ops(void);
+const struct net_device_ops *xgbe_get_netdev_ops(void);
+const struct ethtool_ops *xgbe_get_ethtool_ops(void);
+
#ifdef CONFIG_AMD_XGBE_DCB
const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
#endif
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index 300e3b5c54e0..afccb033177b 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -4,6 +4,7 @@ config NET_XGENE
depends on ARCH_XGENE || COMPILE_TEST
select PHYLIB
select MDIO_XGENE
+ select GPIOLIB
help
This is the Ethernet driver for the on-chip ethernet interface on the
APM X-Gene SoC.
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
index 472c0fb3f4c4..23d72af83d82 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
@@ -32,12 +32,19 @@ static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver,
SET_VAL(SB_HDRLEN, len);
}
-static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel,
+static void xgene_cle_idt_to_hw(struct xgene_enet_pdata *pdata,
+ u32 dstqid, u32 fpsel,
u32 nfpsel, u32 *idt_reg)
{
- *idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
- SET_VAL(IDT_FPSEL, fpsel) |
- SET_VAL(IDT_NFPSEL, nfpsel);
+ if (pdata->enet_id == XGENE_ENET1) {
+ *idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
+ SET_VAL(IDT_FPSEL1, fpsel) |
+ SET_VAL(IDT_NFPSEL1, nfpsel);
+ } else {
+ *idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
+ SET_VAL(IDT_FPSEL, fpsel) |
+ SET_VAL(IDT_NFPSEL, nfpsel);
+ }
}
static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
@@ -344,7 +351,7 @@ static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
nfpsel = 0;
idt_reg = 0;
- xgene_cle_idt_to_hw(dstqid, fpsel, nfpsel, &idt_reg);
+ xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg);
ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
RSS_IDT, CLE_CMD_WR);
if (ret)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
index 33c5f6b25824..9ac9f8e145ec 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
@@ -196,9 +196,13 @@ enum xgene_cle_ptree_dbptrs {
#define IDT_DSTQID_POS 0
#define IDT_DSTQID_LEN 12
#define IDT_FPSEL_POS 12
-#define IDT_FPSEL_LEN 4
-#define IDT_NFPSEL_POS 16
-#define IDT_NFPSEL_LEN 4
+#define IDT_FPSEL_LEN 5
+#define IDT_NFPSEL_POS 17
+#define IDT_NFPSEL_LEN 5
+#define IDT_FPSEL1_POS 12
+#define IDT_FPSEL1_LEN 4
+#define IDT_NFPSEL1_POS 16
+#define IDT_NFPSEL1_LEN 4
struct xgene_cle_ptree_branch {
bool valid;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 22a7b26ca1d6..d372d4235c81 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -54,55 +54,68 @@ static void xgene_get_drvinfo(struct net_device *ndev,
sprintf(info->bus_info, "%s", pdev->name);
}
-static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+static int xgene_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct phy_device *phydev = pdata->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
+ u32 supported;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
if (phydev == NULL)
return -ENODEV;
- return phy_ethtool_gset(phydev, cmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
} else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
if (pdata->mdio_driver) {
if (!phydev)
return -ENODEV;
- return phy_ethtool_gset(phydev, cmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
}
- cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
- SUPPORTED_MII;
- cmd->advertising = cmd->supported;
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_ENABLE;
+ supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_MII;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising,
+ supported);
+
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_MII;
+ cmd->base.autoneg = AUTONEG_ENABLE;
} else {
- cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
- cmd->advertising = cmd->supported;
- ethtool_cmd_speed_set(cmd, SPEED_10000);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
+ supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising,
+ supported);
+
+ cmd->base.speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
}
return 0;
}
-static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+static int xgene_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct phy_device *phydev = pdata->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
if (!phydev)
return -ENODEV;
- return phy_ethtool_sset(phydev, cmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
@@ -110,7 +123,7 @@ static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
if (!phydev)
return -ENODEV;
- return phy_ethtool_sset(phydev, cmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
}
@@ -152,12 +165,12 @@ static void xgene_get_ethtool_stats(struct net_device *ndev,
static const struct ethtool_ops xgene_ethtool_ops = {
.get_drvinfo = xgene_get_drvinfo,
- .get_settings = xgene_get_settings,
- .set_settings = xgene_set_settings,
.get_link = ethtool_op_get_link,
.get_strings = xgene_get_strings,
.get_sset_count = xgene_get_sset_count,
- .get_ethtool_stats = xgene_get_ethtool_stats
+ .get_ethtool_stats = xgene_get_ethtool_stats,
+ .get_link_ksettings = xgene_get_link_ksettings,
+ .set_link_ksettings = xgene_set_link_ksettings,
};
void xgene_enet_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 18bb9556dd00..c481f104a8fe 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -713,7 +713,7 @@ static void xgene_enet_adjust_link(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
- struct phy_device *phydev = pdata->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (phydev->link) {
if (pdata->phy_speed != phydev->speed) {
@@ -761,31 +761,25 @@ int xgene_enet_phy_connect(struct net_device *ndev)
if (dev->of_node) {
for (i = 0 ; i < 2; i++) {
np = of_parse_phandle(dev->of_node, "phy-handle", i);
- if (np)
+ phy_dev = of_phy_connect(ndev, np,
+ &xgene_enet_adjust_link,
+ 0, pdata->phy_mode);
+ of_node_put(np);
+ if (phy_dev)
break;
}
- if (!np) {
- netdev_dbg(ndev, "No phy-handle found in DT\n");
- return -ENODEV;
- }
-
- phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link,
- 0, pdata->phy_mode);
- of_node_put(np);
if (!phy_dev) {
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
}
-
- pdata->phy_dev = phy_dev;
} else {
#ifdef CONFIG_ACPI
struct acpi_device *adev = acpi_phy_find_device(dev);
if (adev)
- pdata->phy_dev = adev->driver_data;
-
- phy_dev = pdata->phy_dev;
+ phy_dev = adev->driver_data;
+ else
+ phy_dev = NULL;
if (!phy_dev ||
phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
@@ -853,8 +847,6 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
if (!phy)
return -EIO;
- pdata->phy_dev = phy;
-
return ret;
}
@@ -894,14 +886,18 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
{
- if (pdata->phy_dev)
- phy_disconnect(pdata->phy_dev);
+ struct net_device *ndev = pdata->ndev;
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
}
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
{
- if (pdata->phy_dev)
- phy_disconnect(pdata->phy_dev);
+ struct net_device *ndev = pdata->ndev;
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
mdiobus_unregister(pdata->mdio_bus);
mdiobus_free(pdata->mdio_bus);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 179a44dceb29..8456337a237d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -124,6 +124,12 @@ enum xgene_enet_rm {
#define MAC_READ_REG_OFFSET 0x0c
#define MAC_COMMAND_DONE_REG_OFFSET 0x10
+#define PCS_ADDR_REG_OFFSET 0x00
+#define PCS_COMMAND_REG_OFFSET 0x04
+#define PCS_WRITE_REG_OFFSET 0x08
+#define PCS_READ_REG_OFFSET 0x0c
+#define PCS_COMMAND_DONE_REG_OFFSET 0x10
+
#define MII_MGMT_CONFIG_ADDR 0x20
#define MII_MGMT_COMMAND_ADDR 0x24
#define MII_MGMT_ADDRESS_ADDR 0x28
@@ -231,6 +237,8 @@ enum xgene_enet_rm {
#define TCPHDR_LEN 6
#define IPHDR_POS 6
#define IPHDR_LEN 6
+#define MSS_POS 20
+#define MSS_LEN 2
#define EC_POS 22 /* Enable checksum */
#define EC_LEN 1
#define ET_POS 23 /* Enable TSO */
@@ -247,6 +255,11 @@ enum xgene_enet_rm {
#define LAST_BUFFER (0x7800ULL << BUFDATALEN_POS)
+#define TSO_MSS0_POS 0
+#define TSO_MSS0_LEN 14
+#define TSO_MSS1_POS 16
+#define TSO_MSS1_LEN 14
+
struct xgene_enet_raw_desc {
__le64 m0;
__le64 m1;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d1d6b5eeb613..429f18fc5503 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -19,6 +19,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/gpio.h>
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_sgmac.h"
@@ -72,7 +73,6 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
skb = netdev_alloc_skb_ip_align(ndev, len);
if (unlikely(!skb))
return -ENOMEM;
- buf_pool->rx_skb[tail] = skb;
dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
@@ -81,6 +81,8 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
return -EINVAL;
}
+ buf_pool->rx_skb[tail] = skb;
+
raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
SET_VAL(BUFDATALEN, bufdatalen) |
SET_BIT(COHERENT));
@@ -102,12 +104,21 @@ static u8 xgene_enet_hdr_len(const void *data)
static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
+ struct device *dev = ndev_to_dev(buf_pool->ndev);
+ struct xgene_enet_raw_desc16 *raw_desc;
+ dma_addr_t dma_addr;
int i;
/* Free up the buffers held by hardware */
for (i = 0; i < buf_pool->slots; i++) {
- if (buf_pool->rx_skb[i])
+ if (buf_pool->rx_skb[i]) {
dev_kfree_skb_any(buf_pool->rx_skb[i]);
+
+ raw_desc = &buf_pool->raw_desc16[i];
+ dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
+ dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
+ DMA_FROM_DEVICE);
+ }
}
}
@@ -126,6 +137,7 @@ static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
struct xgene_enet_raw_desc *raw_desc)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
struct sk_buff *skb;
struct device *dev;
skb_frag_t *frag;
@@ -133,6 +145,7 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
u16 skb_index;
u8 status;
int i, ret = 0;
+ u8 mss_index;
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = cp_ring->cp_skb[skb_index];
@@ -149,6 +162,13 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
DMA_TO_DEVICE);
}
+ if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
+ mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
+ spin_lock(&pdata->mss_lock);
+ pdata->mss_refcnt[mss_index]--;
+ spin_unlock(&pdata->mss_lock);
+ }
+
/* Checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
@@ -167,15 +187,53 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
return ret;
}
-static u64 xgene_enet_work_msg(struct sk_buff *skb)
+static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ bool mss_index_found = false;
+ int mss_index;
+ int i;
+
+ spin_lock(&pdata->mss_lock);
+
+ /* Reuse the slot if MSS matches */
+ for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
+ if (pdata->mss[i] == mss) {
+ pdata->mss_refcnt[i]++;
+ mss_index = i;
+ mss_index_found = true;
+ }
+ }
+
+ /* Overwrite the slot with ref_count = 0 */
+ for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
+ if (!pdata->mss_refcnt[i]) {
+ pdata->mss_refcnt[i]++;
+ pdata->mac_ops->set_mss(pdata, mss, i);
+ pdata->mss[i] = mss;
+ mss_index = i;
+ mss_index_found = true;
+ }
+ }
+
+ spin_unlock(&pdata->mss_lock);
+
+ /* No slots with ref_count = 0 available, return busy */
+ if (!mss_index_found)
+ return -EBUSY;
+
+ return mss_index;
+}
+
+static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
{
struct net_device *ndev = skb->dev;
struct iphdr *iph;
u8 l3hlen = 0, l4hlen = 0;
u8 ethhdr, proto = 0, csum_enable = 0;
- u64 hopinfo = 0;
u32 hdr_len, mss = 0;
u32 i, len, nr_frags;
+ int mss_index;
ethhdr = xgene_enet_hdr_len(skb->data);
@@ -215,7 +273,11 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
if (!mss || ((skb->len - hdr_len) <= mss))
goto out;
- hopinfo |= SET_BIT(ET);
+ mss_index = xgene_enet_setup_mss(ndev, mss);
+ if (unlikely(mss_index < 0))
+ return -EBUSY;
+
+ *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
}
} else if (iph->protocol == IPPROTO_UDP) {
l4hlen = UDP_HDR_SIZE;
@@ -223,15 +285,15 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
}
out:
l3hlen = ip_hdrlen(skb) >> 2;
- hopinfo |= SET_VAL(TCPHDR, l4hlen) |
- SET_VAL(IPHDR, l3hlen) |
- SET_VAL(ETHHDR, ethhdr) |
- SET_VAL(EC, csum_enable) |
- SET_VAL(IS, proto) |
- SET_BIT(IC) |
- SET_BIT(TYPE_ETH_WORK_MESSAGE);
-
- return hopinfo;
+ *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
+ SET_VAL(IPHDR, l3hlen) |
+ SET_VAL(ETHHDR, ethhdr) |
+ SET_VAL(EC, csum_enable) |
+ SET_VAL(IS, proto) |
+ SET_BIT(IC) |
+ SET_BIT(TYPE_ETH_WORK_MESSAGE);
+
+ return 0;
}
static u16 xgene_enet_encode_len(u16 len)
@@ -271,20 +333,22 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
skb_frag_t *frag;
u16 tail = tx_ring->tail;
- u64 hopinfo;
+ u64 hopinfo = 0;
u32 len, hw_len;
u8 ll = 0, nv = 0, idx = 0;
bool split = false;
u32 size, offset, ell_bytes = 0;
u32 i, fidx, nr_frags, count = 1;
+ int ret;
raw_desc = &tx_ring->raw_desc[tail];
tail = (tail + 1) & (tx_ring->slots - 1);
memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
- hopinfo = xgene_enet_work_msg(skb);
- if (!hopinfo)
- return -EINVAL;
+ ret = xgene_enet_work_msg(skb, &hopinfo);
+ if (ret)
+ return ret;
+
raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
hopinfo);
@@ -424,6 +488,9 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
count = xgene_enet_setup_tx_desc(tx_ring, skb);
+ if (count == -EBUSY)
+ return NETDEV_TX_BUSY;
+
if (count <= 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -452,7 +519,6 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
struct xgene_enet_raw_desc *raw_desc)
{
struct net_device *ndev;
- struct xgene_enet_pdata *pdata;
struct device *dev;
struct xgene_enet_desc_ring *buf_pool;
u32 datalen, skb_index;
@@ -461,7 +527,6 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
int ret = 0;
ndev = rx_ring->ndev;
- pdata = netdev_priv(ndev);
dev = ndev_to_dev(rx_ring->ndev);
buf_pool = rx_ring->buf_pool;
@@ -739,8 +804,8 @@ static int xgene_enet_open(struct net_device *ndev)
if (ret)
return ret;
- if (pdata->phy_dev) {
- phy_start(pdata->phy_dev);
+ if (ndev->phydev) {
+ phy_start(ndev->phydev);
} else {
schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
netif_carrier_off(ndev);
@@ -763,8 +828,8 @@ static int xgene_enet_close(struct net_device *ndev)
mac_ops->tx_disable(pdata);
mac_ops->rx_disable(pdata);
- if (pdata->phy_dev)
- phy_stop(pdata->phy_dev);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
else
cancel_delayed_work_sync(&pdata->link_work);
@@ -1312,6 +1377,18 @@ static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
return 0;
}
+static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+ return;
+
+ pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
+ if (IS_ERR(pdata->sfp_rdy))
+ pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
+}
+
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev;
@@ -1401,6 +1478,8 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret)
return ret;
+ xgene_enet_gpiod_get(pdata);
+
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
/* Firmware may have set up the clock already. */
@@ -1425,6 +1504,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
} else {
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
+ pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
}
pdata->rx_buff_cnt = NUM_PKT_BUF;
@@ -1454,10 +1534,8 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
buf_pool = pdata->rx_ring[i]->buf_pool;
xgene_enet_init_bufpool(buf_pool);
ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
- if (ret) {
- xgene_enet_delete_desc_rings(pdata);
- return ret;
- }
+ if (ret)
+ goto err;
}
dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
@@ -1474,7 +1552,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
ret = pdata->cle_ops->cle_init(pdata);
if (ret) {
netdev_err(ndev, "Preclass Tree init error\n");
- return ret;
+ goto err;
}
} else {
pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
@@ -1484,6 +1562,10 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
pdata->mac_ops->init(pdata);
return ret;
+
+err:
+ xgene_enet_delete_desc_rings(pdata);
+ return ret;
}
static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
@@ -1631,8 +1713,8 @@ static int xgene_enet_probe(struct platform_device *pdev)
}
#endif
if (!pdata->enet_id) {
- free_netdev(ndev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err;
}
ret = xgene_enet_get_resources(pdata);
@@ -1643,7 +1725,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
ndev->features |= NETIF_F_TSO;
- pdata->mss = XGENE_ENET_MSS;
+ spin_lock_init(&pdata->mss_lock);
}
ndev->hw_features = ndev->features;
@@ -1655,7 +1737,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
ret = xgene_enet_init_hw(pdata);
if (ret)
- goto err_netdev;
+ goto err;
link_state = pdata->mac_ops->link_state;
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
@@ -1665,21 +1747,32 @@ static int xgene_enet_probe(struct platform_device *pdev)
ret = xgene_enet_mdio_config(pdata);
else
INIT_DELAYED_WORK(&pdata->link_work, link_state);
+
+ if (ret)
+ goto err1;
}
- if (ret)
- goto err;
xgene_enet_napi_add(pdata);
ret = register_netdev(ndev);
if (ret) {
netdev_err(ndev, "Failed to register netdev\n");
- goto err;
+ goto err2;
}
return 0;
-err_netdev:
- unregister_netdev(ndev);
+err2:
+ /*
+ * If necessary, free_netdev() will call netif_napi_del() and undo
+ * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
+ */
+
+ if (pdata->mdio_driver)
+ xgene_enet_phy_disconnect(pdata);
+ else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ xgene_enet_mdio_remove(pdata);
+err1:
+ xgene_enet_delete_desc_rings(pdata);
err:
free_netdev(ndev);
return ret;
@@ -1688,11 +1781,9 @@ err:
static int xgene_enet_remove(struct platform_device *pdev)
{
struct xgene_enet_pdata *pdata;
- const struct xgene_mac_ops *mac_ops;
struct net_device *ndev;
pdata = platform_get_drvdata(pdev);
- mac_ops = pdata->mac_ops;
ndev = pdata->ndev;
rtnl_lock();
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 217546e5714a..0cda58f5a840 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -47,7 +47,7 @@
#define NUM_PKT_BUF 64
#define NUM_BUFPOOL 32
#define MAX_EXP_BUFFS 256
-#define XGENE_ENET_MSS 1448
+#define NUM_MSS_REG 4
#define XGENE_MIN_ENET_FRAME_SIZE 60
#define XGENE_MAX_ENET_IRQ 16
@@ -143,7 +143,7 @@ struct xgene_mac_ops {
void (*rx_disable)(struct xgene_enet_pdata *pdata);
void (*set_speed)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
- void (*set_mss)(struct xgene_enet_pdata *pdata);
+ void (*set_mss)(struct xgene_enet_pdata *pdata, u16 mss, u8 index);
void (*link_state)(struct work_struct *work);
};
@@ -174,7 +174,6 @@ struct xgene_cle_ops {
struct xgene_enet_pdata {
struct net_device *ndev;
struct mii_bus *mdio_bus;
- struct phy_device *phy_dev;
int phy_speed;
struct clk *clk;
struct platform_device *pdev;
@@ -196,6 +195,7 @@ struct xgene_enet_pdata {
void __iomem *mcx_mac_addr;
void __iomem *mcx_mac_csr_addr;
void __iomem *base_addr;
+ void __iomem *pcs_addr;
void __iomem *ring_csr_addr;
void __iomem *ring_cmd_addr;
int phy_mode;
@@ -212,10 +212,13 @@ struct xgene_enet_pdata {
u8 eth_bufnum;
u8 bp_bufnum;
u16 ring_num;
- u32 mss;
+ u32 mss[NUM_MSS_REG];
+ u32 mss_refcnt[NUM_MSS_REG];
+ spinlock_t mss_lock; /* mss lock */
u8 tx_delay;
u8 rx_delay;
bool mdio_driver;
+ struct gpio_desc *sfp_rdy;
};
struct xgene_indirect_ctl {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 9c6ad0dce00f..6475f383ba83 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -18,6 +18,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_xgmac.h"
@@ -84,6 +86,21 @@ static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata,
wr_addr);
}
+static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
+ u32 wr_addr, u32 wr_data)
+{
+ void __iomem *addr, *wr, *cmd, *cmd_done;
+
+ addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
+ wr = pdata->pcs_addr + PCS_WRITE_REG_OFFSET;
+ cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
+ cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
+
+ if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
+ netdev_err(pdata->ndev, "PCS write failed, addr: %04x\n",
+ wr_addr);
+}
+
static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
u32 offset, u32 *val)
{
@@ -122,6 +139,7 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
return true;
}
+
static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
u32 rd_addr, u32 *rd_data)
{
@@ -137,6 +155,25 @@ static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
rd_addr);
}
+static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
+ u32 rd_addr, u32 *rd_data)
+{
+ void __iomem *addr, *rd, *cmd, *cmd_done;
+ bool success;
+
+ addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
+ rd = pdata->pcs_addr + PCS_READ_REG_OFFSET;
+ cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
+ cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
+
+ success = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data);
+ if (!success)
+ netdev_err(pdata->ndev, "PCS read failed, addr: %04x\n",
+ rd_addr);
+
+ return success;
+}
+
static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
{
struct net_device *ndev = pdata->ndev;
@@ -171,6 +208,17 @@ static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0);
}
+static void xgene_pcs_reset(struct xgene_enet_pdata *pdata)
+{
+ u32 data;
+
+ if (!xgene_enet_rd_pcs(pdata, PCS_CONTROL_1, &data))
+ return;
+
+ xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data | PCS_CTRL_PCS_RST);
+ xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data & ~PCS_CTRL_PCS_RST);
+}
+
static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
u32 addr0, addr1;
@@ -184,9 +232,22 @@ static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
}
-static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata)
+static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata,
+ u16 mss, u8 index)
{
- xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR, pdata->mss);
+ u8 offset;
+ u32 data;
+
+ offset = (index < 2) ? 0 : 4;
+ xgene_enet_rd_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, &data);
+
+ if (!(index & 0x1))
+ data = SET_VAL(TSO_MSS1, data >> TSO_MSS1_POS) |
+ SET_VAL(TSO_MSS0, mss);
+ else
+ data = SET_VAL(TSO_MSS1, mss) | SET_VAL(TSO_MSS0, data);
+
+ xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data);
}
static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
@@ -210,18 +271,17 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
xgene_xgmac_set_mac_addr(pdata);
- xgene_xgmac_set_mss(pdata);
xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
- xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
- xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
data |= BIT(12);
xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82);
+ xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
+ xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
}
static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
@@ -359,14 +419,17 @@ static void xgene_enet_link_state(struct work_struct *work)
{
struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
struct xgene_enet_pdata, link_work);
+ struct gpio_desc *sfp_rdy = pdata->sfp_rdy;
struct net_device *ndev = pdata->ndev;
u32 link_status, poll_interval;
link_status = xgene_enet_link_status(pdata);
+ if (link_status && !IS_ERR(sfp_rdy) && !gpiod_get_value(sfp_rdy))
+ link_status = 0;
+
if (link_status) {
if (!netif_carrier_ok(ndev)) {
netif_carrier_on(ndev);
- xgene_xgmac_init(pdata);
xgene_xgmac_rx_enable(pdata);
xgene_xgmac_tx_enable(pdata);
netdev_info(ndev, "Link is Up - 10Gbps\n");
@@ -380,6 +443,8 @@ static void xgene_enet_link_state(struct work_struct *work)
netdev_info(ndev, "Link is Down\n");
}
poll_interval = PHY_POLL_LINK_OFF;
+
+ xgene_pcs_reset(pdata);
}
schedule_delayed_work(&pdata->link_work, poll_interval);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index f1ea485f916b..360ccbd95566 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -24,6 +24,7 @@
#define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000
#define BLOCK_AXG_MAC_OFFSET 0x0800
#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000
+#define BLOCK_PCS_OFFSET 0x3800
#define XGENET_CONFIG_REG_ADDR 0x20
#define XGENET_SRST_ADDR 0x00
@@ -72,6 +73,9 @@
#define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0
#define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8
+#define PCS_CONTROL_1 0x0000
+#define PCS_CTRL_PCS_RST BIT(15)
+
extern const struct xgene_mac_ops xgene_xgmac_ops;
extern const struct xgene_port_ops xgene_xgport_ops;
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 058460bdd5a6..a22403c688c9 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -104,7 +104,7 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr,
* @bus: points to the mii_bus structure
* Description: reset the MII bus
*/
-int arc_mdio_reset(struct mii_bus *bus)
+static int arc_mdio_reset(struct mii_bus *bus)
{
struct arc_emac_priv *priv = bus->priv;
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 8fc93c5f6abc..6cac919272ea 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -76,11 +76,19 @@ enum alx_device_quirks {
ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
};
+#define ALX_FLAG_USING_MSIX BIT(0)
+#define ALX_FLAG_USING_MSI BIT(1)
+
struct alx_priv {
struct net_device *dev;
struct alx_hw hw;
+ /* msi-x vectors */
+ int num_vec;
+ struct msix_entry *msix_entries;
+ char irq_lbl[IFNAMSIZ + 8];
+
/* all descriptor memory */
struct {
dma_addr_t dma;
@@ -105,7 +113,7 @@ struct alx_priv {
u16 msg_enable;
- bool msi;
+ int flags;
/* protects hw.stats */
spinlock_t stats_lock;
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
index 1fe35e453d43..6ac40b0003a3 100644
--- a/drivers/net/ethernet/atheros/alx/hw.c
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -1031,6 +1031,20 @@ void alx_configure_basic(struct alx_hw *hw)
alx_write_mem32(hw, ALX_WRR, val);
}
+void alx_mask_msix(struct alx_hw *hw, int index, bool mask)
+{
+ u32 reg, val;
+
+ reg = ALX_MSIX_ENTRY_BASE + index * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ val = mask ? PCI_MSIX_ENTRY_CTRL_MASKBIT : 0;
+
+ alx_write_mem32(hw, reg, val);
+ alx_post_write(hw);
+}
+
+
bool alx_get_phy_info(struct alx_hw *hw)
{
u16 devs1, devs2;
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
index f289c05f5cb4..0191477ace51 100644
--- a/drivers/net/ethernet/atheros/alx/hw.h
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -562,6 +562,7 @@ int alx_reset_mac(struct alx_hw *hw);
void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
bool alx_phy_configured(struct alx_hw *hw);
void alx_configure_basic(struct alx_hw *hw);
+void alx_mask_msix(struct alx_hw *hw, int index, bool mask);
void alx_disable_rss(struct alx_hw *hw);
bool alx_get_phy_info(struct alx_hw *hw);
void alx_update_hw_stats(struct alx_hw *hw);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4eb17daefc4f..c0f84b73574d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -51,6 +51,9 @@
const char alx_drv_name[] = "alx";
+static bool msix = false;
+module_param(msix, bool, 0);
+MODULE_PARM_DESC(msix, "Enable msi-x interrupt support");
static void alx_free_txbuf(struct alx_priv *alx, int entry)
{
@@ -292,32 +295,29 @@ static int alx_poll(struct napi_struct *napi, int budget)
napi_complete(&alx->napi);
/* enable interrupt */
- spin_lock_irqsave(&alx->irq_lock, flags);
- alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
- alx_write_mem32(hw, ALX_IMR, alx->int_mask);
- spin_unlock_irqrestore(&alx->irq_lock, flags);
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ alx_mask_msix(hw, 1, false);
+ } else {
+ spin_lock_irqsave(&alx->irq_lock, flags);
+ alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ spin_unlock_irqrestore(&alx->irq_lock, flags);
+ }
alx_post_write(hw);
return work;
}
-static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr)
{
struct alx_hw *hw = &alx->hw;
- bool write_int_mask = false;
-
- spin_lock(&alx->irq_lock);
-
- /* ACK interrupt */
- alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
- intr &= alx->int_mask;
if (intr & ALX_ISR_FATAL) {
netif_warn(alx, hw, alx->dev,
"fatal interrupt 0x%x, resetting\n", intr);
alx_schedule_reset(alx);
- goto out;
+ return true;
}
if (intr & ALX_ISR_ALERT)
@@ -329,19 +329,32 @@ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
* is cleared, the interrupt status could be cleared.
*/
alx->int_mask &= ~ALX_ISR_PHY;
- write_int_mask = true;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
alx_schedule_link_check(alx);
}
+ return false;
+}
+
+static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ spin_lock(&alx->irq_lock);
+
+ /* ACK interrupt */
+ alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
+ intr &= alx->int_mask;
+
+ if (alx_intr_handle_misc(alx, intr))
+ goto out;
+
if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
napi_schedule(&alx->napi);
/* mask rx/tx interrupt, enable them when napi complete */
alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
- write_int_mask = true;
- }
-
- if (write_int_mask)
alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ }
alx_write_mem32(hw, ALX_ISR, 0);
@@ -350,6 +363,46 @@ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
return IRQ_HANDLED;
}
+static irqreturn_t alx_intr_msix_ring(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+ struct alx_hw *hw = &alx->hw;
+
+ /* mask interrupt to ACK chip */
+ alx_mask_msix(hw, 1, true);
+ /* clear interrupt status */
+ alx_write_mem32(hw, ALX_ISR, (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0));
+
+ napi_schedule(&alx->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t alx_intr_msix_misc(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+ struct alx_hw *hw = &alx->hw;
+ u32 intr;
+
+ /* mask interrupt to ACK chip */
+ alx_mask_msix(hw, 0, true);
+
+ /* read interrupt status */
+ intr = alx_read_mem32(hw, ALX_ISR);
+ intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES);
+
+ if (alx_intr_handle_misc(alx, intr))
+ return IRQ_HANDLED;
+
+ /* clear interrupt status */
+ alx_write_mem32(hw, ALX_ISR, intr);
+
+ /* enable interrupt again */
+ alx_mask_msix(hw, 0, false);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t alx_intr_msi(int irq, void *data)
{
struct alx_priv *alx = data;
@@ -614,31 +667,136 @@ static void alx_free_rings(struct alx_priv *alx)
static void alx_config_vector_mapping(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
+ u32 tbl = 0;
- alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ tbl |= 1 << ALX_MSI_MAP_TBL1_TXQ0_SHIFT;
+ tbl |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
+ }
+
+ alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl);
alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
}
+static bool alx_enable_msix(struct alx_priv *alx)
+{
+ int i, err, num_vec = 2;
+
+ alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!alx->msix_entries) {
+ netdev_warn(alx->dev, "Allocation of msix entries failed!\n");
+ return false;
+ }
+
+ for (i = 0; i < num_vec; i++)
+ alx->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec);
+ if (err) {
+ kfree(alx->msix_entries);
+ netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
+ return false;
+ }
+
+ alx->num_vec = num_vec;
+ return true;
+}
+
+static int alx_request_msix(struct alx_priv *alx)
+{
+ struct net_device *netdev = alx->dev;
+ int i, err, vector = 0, free_vector = 0;
+
+ err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc,
+ 0, netdev->name, alx);
+ if (err)
+ goto out_err;
+
+ vector++;
+ sprintf(alx->irq_lbl, "%s-TxRx-0", netdev->name);
+
+ err = request_irq(alx->msix_entries[vector].vector,
+ alx_intr_msix_ring, 0, alx->irq_lbl, alx);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ free_irq(alx->msix_entries[free_vector++].vector, alx);
+
+ vector--;
+ for (i = 0; i < vector; i++)
+ free_irq(alx->msix_entries[free_vector++].vector, alx);
+
+out_err:
+ return err;
+}
+
+static void alx_init_intr(struct alx_priv *alx, bool msix)
+{
+ if (msix) {
+ if (alx_enable_msix(alx))
+ alx->flags |= ALX_FLAG_USING_MSIX;
+ }
+
+ if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
+ alx->num_vec = 1;
+
+ if (!pci_enable_msi(alx->hw.pdev))
+ alx->flags |= ALX_FLAG_USING_MSI;
+ }
+}
+
+static void alx_disable_advanced_intr(struct alx_priv *alx)
+{
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ kfree(alx->msix_entries);
+ pci_disable_msix(alx->hw.pdev);
+ alx->flags &= ~ALX_FLAG_USING_MSIX;
+ }
+
+ if (alx->flags & ALX_FLAG_USING_MSI) {
+ pci_disable_msi(alx->hw.pdev);
+ alx->flags &= ~ALX_FLAG_USING_MSI;
+ }
+}
+
static void alx_irq_enable(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
+ int i;
/* level-1 interrupt switch */
alx_write_mem32(hw, ALX_ISR, 0);
alx_write_mem32(hw, ALX_IMR, alx->int_mask);
alx_post_write(hw);
+
+ if (alx->flags & ALX_FLAG_USING_MSIX)
+ /* enable all msix irqs */
+ for (i = 0; i < alx->num_vec; i++)
+ alx_mask_msix(hw, i, false);
}
static void alx_irq_disable(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
+ int i;
alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
alx_write_mem32(hw, ALX_IMR, 0);
alx_post_write(hw);
- synchronize_irq(alx->hw.pdev->irq);
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ for (i = 0; i < alx->num_vec; i++) {
+ alx_mask_msix(hw, i, true);
+ synchronize_irq(alx->msix_entries[i].vector);
+ }
+ } else {
+ synchronize_irq(alx->hw.pdev->irq);
+ }
}
static int alx_request_irq(struct alx_priv *alx)
@@ -650,9 +808,18 @@ static int alx_request_irq(struct alx_priv *alx)
msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
- if (!pci_enable_msi(alx->hw.pdev)) {
- alx->msi = true;
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
+ err = alx_request_msix(alx);
+ if (!err)
+ goto out;
+
+ /* msix request failed, realloc resources */
+ alx_disable_advanced_intr(alx);
+ alx_init_intr(alx, false);
+ }
+ if (alx->flags & ALX_FLAG_USING_MSI) {
alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
msi_ctrl | ALX_MSI_MASK_SEL_LINE);
err = request_irq(pdev->irq, alx_intr_msi, 0,
@@ -660,6 +827,7 @@ static int alx_request_irq(struct alx_priv *alx)
if (!err)
goto out;
/* fall back to legacy interrupt */
+ alx->flags &= ~ALX_FLAG_USING_MSI;
pci_disable_msi(alx->hw.pdev);
}
@@ -669,19 +837,25 @@ static int alx_request_irq(struct alx_priv *alx)
out:
if (!err)
alx_config_vector_mapping(alx);
+ else
+ netdev_err(alx->dev, "IRQ registration failed!\n");
return err;
}
static void alx_free_irq(struct alx_priv *alx)
{
struct pci_dev *pdev = alx->hw.pdev;
+ int i;
- free_irq(pdev->irq, alx);
-
- if (alx->msi) {
- pci_disable_msi(alx->hw.pdev);
- alx->msi = false;
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ /* we have only 2 vectors without multi queue support */
+ for (i = 0; i < 2; i++)
+ free_irq(alx->msix_entries[i].vector, alx);
+ } else {
+ free_irq(pdev->irq, alx);
}
+
+ alx_disable_advanced_intr(alx);
}
static int alx_identify_hw(struct alx_priv *alx)
@@ -847,12 +1021,14 @@ static int __alx_open(struct alx_priv *alx, bool resume)
{
int err;
+ alx_init_intr(alx, msix);
+
if (!resume)
netif_carrier_off(alx->dev);
err = alx_alloc_rings(alx);
if (err)
- return err;
+ goto out_disable_adv_intr;
alx_configure(alx);
@@ -873,6 +1049,8 @@ static int __alx_open(struct alx_priv *alx, bool resume)
out_free_rings:
alx_free_rings(alx);
+out_disable_adv_intr:
+ alx_disable_advanced_intr(alx);
return err;
}
@@ -993,6 +1171,18 @@ static void alx_reset(struct work_struct *work)
rtnl_unlock();
}
+static int alx_tpd_req(struct sk_buff *skb)
+{
+ int num;
+
+ num = skb_shinfo(skb)->nr_frags + 1;
+ /* we need one extra descriptor for LSOv2 */
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ num++;
+
+ return num;
+}
+
static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
{
u8 cso, css;
@@ -1012,6 +1202,45 @@ static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
return 0;
}
+static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
+{
+ int err;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ first->word1 |= 1 << TPD_IPV4_SHIFT;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ /* LSOv2: the first TPD only provides the packet length */
+ first->adrl.l.pkt_len = skb->len;
+ first->word1 |= 1 << TPD_LSO_V2_SHIFT;
+ }
+
+ first->word1 |= 1 << TPD_LSO_EN_SHIFT;
+ first->word1 |= (skb_transport_offset(skb) &
+ TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT;
+ first->word1 |= (skb_shinfo(skb)->gso_size &
+ TPD_MSS_MASK) << TPD_MSS_SHIFT;
+ return 1;
+}
+
static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
{
struct alx_tx_queue *txq = &alx->txq;
@@ -1022,6 +1251,16 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
first_tpd = &txq->tpd[txq->write_idx];
tpd = first_tpd;
+ if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) {
+ if (++txq->write_idx == alx->tx_ringsz)
+ txq->write_idx = 0;
+
+ tpd = &txq->tpd[txq->write_idx];
+ tpd->len = first_tpd->len;
+ tpd->vlan_tag = first_tpd->vlan_tag;
+ tpd->word1 = first_tpd->word1;
+ }
+
maplen = skb_headlen(skb);
dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
DMA_TO_DEVICE);
@@ -1082,9 +1321,9 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
struct alx_priv *alx = netdev_priv(netdev);
struct alx_tx_queue *txq = &alx->txq;
struct alx_txd *first;
- int tpdreq = skb_shinfo(skb)->nr_frags + 1;
+ int tso;
- if (alx_tpd_avail(alx) < tpdreq) {
+ if (alx_tpd_avail(alx) < alx_tpd_req(skb)) {
netif_stop_queue(alx->dev);
goto drop;
}
@@ -1092,7 +1331,10 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
first = &txq->tpd[txq->write_idx];
memset(first, 0, sizeof(*first));
- if (alx_tx_csum(skb, first))
+ tso = alx_tso(skb, first);
+ if (tso < 0)
+ goto drop;
+ else if (!tso && alx_tx_csum(skb, first))
goto drop;
if (alx_map_tx_skb(alx, skb) < 0)
@@ -1172,7 +1414,10 @@ static void alx_poll_controller(struct net_device *netdev)
{
struct alx_priv *alx = netdev_priv(netdev);
- if (alx->msi)
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ alx_intr_msix_misc(0, alx);
+ alx_intr_msix_ring(0, alx);
+ } else if (alx->flags & ALX_FLAG_USING_MSI)
alx_intr_msi(0, alx);
else
alx_intr_legacy(0, alx);
@@ -1351,7 +1596,10 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
dev_warn(&pdev->dev,
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 74f0a37c4eb6..17aa33c5567d 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1486,7 +1486,7 @@ static int b44_open(struct net_device *dev)
b44_enable_ints(bp);
if (bp->flags & B44_FLAG_EXTERNAL_PHY)
- phy_start(bp->phydev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
out:
@@ -1651,7 +1651,7 @@ static int b44_close(struct net_device *dev)
netif_stop_queue(dev);
if (bp->flags & B44_FLAG_EXTERNAL_PHY)
- phy_stop(bp->phydev);
+ phy_stop(dev->phydev);
napi_disable(&bp->napi);
@@ -1832,90 +1832,100 @@ static int b44_nway_reset(struct net_device *dev)
return r;
}
-static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int b44_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct b44 *bp = netdev_priv(dev);
+ u32 supported, advertising;
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
- return phy_ethtool_gset(bp->phydev, cmd);
+ BUG_ON(!dev->phydev);
+ return phy_ethtool_ksettings_get(dev->phydev, cmd);
}
- cmd->supported = (SUPPORTED_Autoneg);
- cmd->supported |= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_MII);
+ supported = (SUPPORTED_Autoneg);
+ supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_MII);
- cmd->advertising = 0;
+ advertising = 0;
if (bp->flags & B44_FLAG_ADV_10HALF)
- cmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
if (bp->flags & B44_FLAG_ADV_10FULL)
- cmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if (bp->flags & B44_FLAG_ADV_100HALF)
- cmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
if (bp->flags & B44_FLAG_ADV_100FULL)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
- ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
- SPEED_100 : SPEED_10));
- cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
+ advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
+ SPEED_100 : SPEED_10;
+ cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
DUPLEX_FULL : DUPLEX_HALF;
- cmd->port = 0;
- cmd->phy_address = bp->phy_addr;
- cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
- XCVR_EXTERNAL : XCVR_INTERNAL;
- cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
+ cmd->base.port = 0;
+ cmd->base.phy_address = bp->phy_addr;
+ cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
- if (cmd->autoneg == AUTONEG_ENABLE)
- cmd->advertising |= ADVERTISED_Autoneg;
+ if (cmd->base.autoneg == AUTONEG_ENABLE)
+ advertising |= ADVERTISED_Autoneg;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
if (!netif_running(dev)){
- ethtool_cmd_speed_set(cmd, 0);
- cmd->duplex = 0xff;
+ cmd->base.speed = 0;
+ cmd->base.duplex = 0xff;
}
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+
return 0;
}
-static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int b44_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct b44 *bp = netdev_priv(dev);
u32 speed;
int ret;
+ u32 advertising;
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
+ BUG_ON(!dev->phydev);
spin_lock_irq(&bp->lock);
if (netif_running(dev))
b44_setup_phy(bp);
- ret = phy_ethtool_sset(bp->phydev, cmd);
+ ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
spin_unlock_irq(&bp->lock);
return ret;
}
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* We do not support gigabit. */
- if (cmd->autoneg == AUTONEG_ENABLE) {
- if (cmd->advertising &
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (advertising &
(ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full))
return -EINVAL;
} else if ((speed != SPEED_100 &&
speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)) {
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)) {
return -EINVAL;
}
spin_lock_irq(&bp->lock);
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
bp->flags &= ~(B44_FLAG_FORCE_LINK |
B44_FLAG_100_BASE_T |
B44_FLAG_FULL_DUPLEX |
@@ -1923,19 +1933,19 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
- if (cmd->advertising == 0) {
+ if (advertising == 0) {
bp->flags |= (B44_FLAG_ADV_10HALF |
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
} else {
- if (cmd->advertising & ADVERTISED_10baseT_Half)
+ if (advertising & ADVERTISED_10baseT_Half)
bp->flags |= B44_FLAG_ADV_10HALF;
- if (cmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
bp->flags |= B44_FLAG_ADV_10FULL;
- if (cmd->advertising & ADVERTISED_100baseT_Half)
+ if (advertising & ADVERTISED_100baseT_Half)
bp->flags |= B44_FLAG_ADV_100HALF;
- if (cmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
bp->flags |= B44_FLAG_ADV_100FULL;
}
} else {
@@ -1943,7 +1953,7 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
if (speed == SPEED_100)
bp->flags |= B44_FLAG_100_BASE_T;
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
bp->flags |= B44_FLAG_FULL_DUPLEX;
}
@@ -2110,8 +2120,6 @@ static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops b44_ethtool_ops = {
.get_drvinfo = b44_get_drvinfo,
- .get_settings = b44_get_settings,
- .set_settings = b44_set_settings,
.nway_reset = b44_nway_reset,
.get_link = ethtool_op_get_link,
.get_wol = b44_get_wol,
@@ -2125,6 +2133,8 @@ static const struct ethtool_ops b44_ethtool_ops = {
.get_strings = b44_get_strings,
.get_sset_count = b44_get_sset_count,
.get_ethtool_stats = b44_get_ethtool_stats,
+ .get_link_ksettings = b44_get_link_ksettings,
+ .set_link_ksettings = b44_set_link_ksettings,
};
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -2137,8 +2147,8 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
spin_lock_irq(&bp->lock);
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
- err = phy_mii_ioctl(bp->phydev, ifr, cmd);
+ BUG_ON(!dev->phydev);
+ err = phy_mii_ioctl(dev->phydev, ifr, cmd);
} else {
err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
}
@@ -2206,7 +2216,7 @@ static const struct net_device_ops b44_netdev_ops = {
static void b44_adjust_link(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phydev;
+ struct phy_device *phydev = dev->phydev;
bool status_changed = 0;
BUG_ON(!phydev);
@@ -2303,7 +2313,6 @@ static int b44_register_phy_one(struct b44 *bp)
SUPPORTED_MII);
phydev->advertising = phydev->supported;
- bp->phydev = phydev;
bp->old_link = 0;
bp->phy_addr = phydev->mdio.addr;
@@ -2323,9 +2332,10 @@ err_out:
static void b44_unregister_phy_one(struct b44 *bp)
{
+ struct net_device *dev = bp->dev;
struct mii_bus *mii_bus = bp->mii_bus;
- phy_disconnect(bp->phydev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(mii_bus);
mdiobus_free(mii_bus);
}
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 65d88d7c5581..89d2cf341163 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -404,7 +404,6 @@ struct b44 {
u32 tx_pending;
u8 phy_addr;
u8 force_copybreak;
- struct phy_device *phydev;
struct mii_bus *mii_bus;
int old_link;
struct mii_if_info mii_if;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 6c8bc5fadac7..ae364c74baf3 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -791,7 +791,7 @@ static void bcm_enet_adjust_phy_link(struct net_device *dev)
int status_changed;
priv = netdev_priv(dev);
- phydev = priv->phydev;
+ phydev = dev->phydev;
status_changed = 0;
if (priv->old_link != phydev->link) {
@@ -913,7 +913,6 @@ static int bcm_enet_open(struct net_device *dev)
priv->old_link = 0;
priv->old_duplex = -1;
priv->old_pause = -1;
- priv->phydev = phydev;
}
/* mask all interrupts and request them */
@@ -1085,7 +1084,7 @@ static int bcm_enet_open(struct net_device *dev)
ENETDMAC_IRMASK, priv->tx_chan);
if (priv->has_phy)
- phy_start(priv->phydev);
+ phy_start(phydev);
else
bcm_enet_adjust_link(dev);
@@ -1127,7 +1126,7 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- phy_disconnect(priv->phydev);
+ phy_disconnect(phydev);
return ret;
}
@@ -1190,7 +1189,7 @@ static int bcm_enet_stop(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&priv->napi);
if (priv->has_phy)
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
del_timer_sync(&priv->rx_timeout);
/* mask all interrupts */
@@ -1234,10 +1233,8 @@ static int bcm_enet_stop(struct net_device *dev)
free_irq(dev->irq, dev);
/* release phy */
- if (priv->has_phy) {
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
+ if (priv->has_phy)
+ phy_disconnect(dev->phydev);
return 0;
}
@@ -1437,64 +1434,68 @@ static int bcm_enet_nway_reset(struct net_device *dev)
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return genphy_restart_aneg(priv->phydev);
+ return genphy_restart_aneg(dev->phydev);
}
return -EOPNOTSUPP;
}
-static int bcm_enet_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcm_enet_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bcm_enet_priv *priv;
+ u32 supported, advertising;
priv = netdev_priv(dev);
- cmd->maxrxpkt = 0;
- cmd->maxtxpkt = 0;
-
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_gset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_get(dev->phydev, cmd);
} else {
- cmd->autoneg = 0;
- ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
- ? SPEED_100 : SPEED_10));
- cmd->duplex = (priv->force_duplex_full) ?
+ cmd->base.autoneg = 0;
+ cmd->base.speed = (priv->force_speed_100) ?
+ SPEED_100 : SPEED_10;
+ cmd->base.duplex = (priv->force_duplex_full) ?
DUPLEX_FULL : DUPLEX_HALF;
- cmd->supported = ADVERTISED_10baseT_Half |
+ supported = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
- cmd->advertising = 0;
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_EXTERNAL;
+ advertising = 0;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising, advertising);
+ cmd->base.port = PORT_MII;
}
return 0;
}
-static int bcm_enet_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcm_enet_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_sset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_set(dev->phydev, cmd);
} else {
- if (cmd->autoneg ||
- (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
- cmd->port != PORT_MII)
+ if (cmd->base.autoneg ||
+ (cmd->base.speed != SPEED_100 &&
+ cmd->base.speed != SPEED_10) ||
+ cmd->base.port != PORT_MII)
return -EINVAL;
- priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
- priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
+ priv->force_speed_100 =
+ (cmd->base.speed == SPEED_100) ? 1 : 0;
+ priv->force_duplex_full =
+ (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
if (netif_running(dev))
bcm_enet_adjust_link(dev);
@@ -1588,14 +1589,14 @@ static const struct ethtool_ops bcm_enet_ethtool_ops = {
.get_sset_count = bcm_enet_get_sset_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
.nway_reset = bcm_enet_nway_reset,
- .get_settings = bcm_enet_get_settings,
- .set_settings = bcm_enet_set_settings,
.get_drvinfo = bcm_enet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = bcm_enet_get_ringparam,
.set_ringparam = bcm_enet_set_ringparam,
.get_pauseparam = bcm_enet_get_pauseparam,
.set_pauseparam = bcm_enet_set_pauseparam,
+ .get_link_ksettings = bcm_enet_get_link_ksettings,
+ .set_link_ksettings = bcm_enet_set_link_ksettings,
};
static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1604,9 +1605,9 @@ static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
} else {
struct mii_if_info mii;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index f55af4310085..0a1b7b2e55bd 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -290,7 +290,6 @@ struct bcm_enet_priv {
/* used when a phy is connected (phylib used) */
struct mii_bus *mii_bus;
- struct phy_device *phydev;
int old_link;
int old_duplex;
int old_pause;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index b2d30863caeb..c3354b9941d1 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -58,8 +58,8 @@ BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
u32 mask) \
{ \
- intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
priv->irq##which##_mask &= ~(mask); \
+ intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
} \
static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
u32 mask) \
@@ -1692,7 +1692,7 @@ static int bcm_sysport_stop(struct net_device *dev)
return 0;
}
-static struct ethtool_ops bcm_sysport_ethtool_ops = {
+static const struct ethtool_ops bcm_sysport_ethtool_ops = {
.get_drvinfo = bcm_sysport_get_drvinfo,
.get_msglevel = bcm_sysport_get_msglvl,
.set_msglevel = bcm_sysport_set_msglvl,
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 625235db644f..c16ec3a51876 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -92,6 +92,7 @@ MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
static int bgmac_probe(struct bcma_device *core)
{
+ struct bcma_chipinfo *ci = &core->bus->chipinfo;
struct ssb_sprom *sprom = &core->bus->sprom;
struct mii_bus *mii_bus;
struct bgmac *bgmac;
@@ -157,7 +158,8 @@ static int bgmac_probe(struct bcma_device *core)
dev_info(bgmac->dev, "Found PHY addr: %d%s\n", bgmac->phyaddr,
bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
- if (!bgmac_is_bcm4707_family(core)) {
+ if (!bgmac_is_bcm4707_family(core) &&
+ !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) {
mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
if (IS_ERR(mii_bus)) {
err = PTR_ERR(mii_bus);
@@ -230,6 +232,21 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
break;
+ case BCMA_CHIP_ID_BCM53573:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ if (core->core_unit == 0) {
+ bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
+ bgmac->feature_flags |=
+ BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
+ } else if (core->core_unit == 1) {
+ bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
+ bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
+ }
+ break;
default:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index c4751ece76f6..6ea0e5ff1e44 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -932,7 +932,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
et_swtype <<= 4;
sw_type = et_swtype;
} else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
- sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
+ sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII |
+ BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
} else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
@@ -940,6 +941,27 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
BGMAC_CHIPCTL_1_SW_TYPE_MASK),
sw_type);
+ } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) {
+ u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII |
+ BGMAC_CHIPCTL_4_SW_TYPE_EPHY;
+ u8 et_swtype = 0;
+ char buf[4];
+
+ if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
+ if (kstrtou8(buf, 0, &et_swtype))
+ dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
+ buf);
+ sw_type = (et_swtype & 0x0f) << 12;
+ } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) {
+ sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII |
+ BGMAC_CHIPCTL_4_SW_TYPE_RGMII;
+ }
+ bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK |
+ BGMAC_CHIPCTL_4_SW_TYPE_MASK),
+ sw_type);
+ } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) {
+ bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK,
+ BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
}
if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
@@ -1467,6 +1489,10 @@ int bgmac_enet_probe(struct bgmac *info)
*/
bgmac_clk_enable(bgmac, 0);
+ /* This seems to be fixing IRQ by assigning OOB #6 to the core */
+ if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
+ bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
+
bgmac_chip_reset(bgmac);
err = bgmac_dma_alloc(bgmac);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 24a250267b88..80836b4c9f38 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -369,6 +369,21 @@
#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
+#define BGMAC_CHIPCTL_4_IF_TYPE_MASK 0x00003000
+#define BGMAC_CHIPCTL_4_IF_TYPE_RMII 0x00000000
+#define BGMAC_CHIPCTL_4_IF_TYPE_MII 0x00001000
+#define BGMAC_CHIPCTL_4_IF_TYPE_RGMII 0x00002000
+#define BGMAC_CHIPCTL_4_SW_TYPE_MASK 0x0000C000
+#define BGMAC_CHIPCTL_4_SW_TYPE_EPHY 0x00000000
+#define BGMAC_CHIPCTL_4_SW_TYPE_EPHYMII 0x00004000
+#define BGMAC_CHIPCTL_4_SW_TYPE_EPHYRMII 0x00008000
+#define BGMAC_CHIPCTL_4_SW_TYPE_RGMII 0x0000C000
+
+#define BGMAC_CHIPCTL_7_IF_TYPE_MASK 0x000000C0
+#define BGMAC_CHIPCTL_7_IF_TYPE_RMII 0x00000000
+#define BGMAC_CHIPCTL_7_IF_TYPE_MII 0x00000040
+#define BGMAC_CHIPCTL_7_IF_TYPE_RGMII 0x00000080
+
#define BGMAC_WEIGHT 64
#define ETHER_MAX_LEN 1518
@@ -390,6 +405,10 @@
#define BGMAC_FEAT_NO_CLR_MIB BIT(13)
#define BGMAC_FEAT_FORCE_SPEED_2500 BIT(14)
#define BGMAC_FEAT_CMDCFG_SR_REV4 BIT(15)
+#define BGMAC_FEAT_IRQ_ID_OOB_6 BIT(16)
+#define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17)
+#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18)
+#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19)
struct bgmac_slot_info {
union {
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 505ceaf451e2..27f11a5d5fe2 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -50,7 +50,7 @@
#include <linux/log2.h>
#include <linux/aer.h>
-#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#if IS_ENABLED(CONFIG_CNIC)
#define BCM_CNIC 1
#include "cnic_if.h"
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0e68fadecfdb..243cb9748d35 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -492,7 +492,8 @@ int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
int bnx2x_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi);
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
-int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index fa3386bb14f7..20fe6a8c35c1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12563,41 +12563,64 @@ static int bnx2x_close(struct net_device *dev)
return 0;
}
+struct bnx2x_mcast_list_elem_group
+{
+ struct list_head mcast_group_link;
+ struct bnx2x_mcast_list_elem mcast_elems[];
+};
+
+#define MCAST_ELEMS_PER_PG \
+ ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
+ sizeof(struct bnx2x_mcast_list_elem))
+
+static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
+{
+ struct bnx2x_mcast_list_elem_group *current_mcast_group;
+
+ while (!list_empty(mcast_group_list)) {
+ current_mcast_group = list_first_entry(mcast_group_list,
+ struct bnx2x_mcast_list_elem_group,
+ mcast_group_link);
+ list_del(&current_mcast_group->mcast_group_link);
+ free_page((unsigned long)current_mcast_group);
+ }
+}
+
static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p)
+ struct bnx2x_mcast_ramrod_params *p,
+ struct list_head *mcast_group_list)
{
- int mc_count = netdev_mc_count(bp->dev);
- struct bnx2x_mcast_list_elem *mc_mac =
- kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
+ struct bnx2x_mcast_list_elem *mc_mac;
struct netdev_hw_addr *ha;
-
- if (!mc_mac)
- return -ENOMEM;
+ struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
+ int mc_count = netdev_mc_count(bp->dev);
+ int offset = 0;
INIT_LIST_HEAD(&p->mcast_list);
-
netdev_for_each_mc_addr(ha, bp->dev) {
+ if (!offset) {
+ current_mcast_group =
+ (struct bnx2x_mcast_list_elem_group *)
+ __get_free_page(GFP_ATOMIC);
+ if (!current_mcast_group) {
+ bnx2x_free_mcast_macs_list(mcast_group_list);
+ BNX2X_ERR("Failed to allocate mc MAC list\n");
+ return -ENOMEM;
+ }
+ list_add(&current_mcast_group->mcast_group_link,
+ mcast_group_list);
+ }
+ mc_mac = &current_mcast_group->mcast_elems[offset];
mc_mac->mac = bnx2x_mc_addr(ha);
list_add_tail(&mc_mac->link, &p->mcast_list);
- mc_mac++;
+ offset++;
+ if (offset == MCAST_ELEMS_PER_PG)
+ offset = 0;
}
-
p->mcast_list_len = mc_count;
-
return 0;
}
-static void bnx2x_free_mcast_macs_list(
- struct bnx2x_mcast_ramrod_params *p)
-{
- struct bnx2x_mcast_list_elem *mc_mac =
- list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
- link);
-
- WARN_ON(!mc_mac);
- kfree(mc_mac);
-}
-
/**
* bnx2x_set_uc_list - configure a new unicast MACs list.
*
@@ -12643,8 +12666,9 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
BNX2X_UC_LIST_MAC, &ramrod_flags);
}
-static int bnx2x_set_mc_list(struct bnx2x *bp)
+static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
{
+ LIST_HEAD(mcast_group_list);
struct net_device *dev = bp->dev;
struct bnx2x_mcast_ramrod_params rparam = {NULL};
int rc = 0;
@@ -12660,12 +12684,9 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
/* then, configure a new MACs list */
if (netdev_mc_count(dev)) {
- rc = bnx2x_init_mcast_macs_list(bp, &rparam);
- if (rc) {
- BNX2X_ERR("Failed to create multicast MACs list: %d\n",
- rc);
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
+ if (rc)
return rc;
- }
/* Now add the new MACs */
rc = bnx2x_config_mcast(bp, &rparam,
@@ -12674,7 +12695,44 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
rc);
- bnx2x_free_mcast_macs_list(&rparam);
+ bnx2x_free_mcast_macs_list(&mcast_group_list);
+ }
+
+ return rc;
+}
+
+static int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+ LIST_HEAD(mcast_group_list);
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
+ struct net_device *dev = bp->dev;
+ int rc = 0;
+
+ /* On older adapters, we need to flush and re-add filters */
+ if (CHIP_IS_E1x(bp))
+ return bnx2x_set_mc_list_e1x(bp);
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ if (netdev_mc_count(dev)) {
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
+ if (rc)
+ return rc;
+
+ /* Override the curently configured set of mc filters */
+ rc = bnx2x_config_mcast(bp, &rparam,
+ BNX2X_MCAST_CMD_SET);
+ if (rc < 0)
+ BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
+ rc);
+
+ bnx2x_free_mcast_macs_list(&mcast_group_list);
+ } else {
+ /* If no mc addresses are required, flush the configuration */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc)
+ BNX2X_ERR("Failed to clear multicast configuration %d\n",
+ rc);
}
return rc;
@@ -13214,13 +13272,22 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!chip_is_e1x) {
- dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_IPXIP4;
+ dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_GSO_IPXIP4 |
- NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+
+ dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index ff702a707a91..cea6bdcde33f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2600,8 +2600,29 @@ struct bnx2x_mcast_mac_elem {
u8 pad[2]; /* For a natural alignment of the following buffer */
};
+struct bnx2x_mcast_bin_elem {
+ struct list_head link;
+ int bin;
+ int type; /* BNX2X_MCAST_CMD_SET_{ADD, DEL} */
+};
+
+union bnx2x_mcast_elem {
+ struct bnx2x_mcast_bin_elem bin_elem;
+ struct bnx2x_mcast_mac_elem mac_elem;
+};
+
+struct bnx2x_mcast_elem_group {
+ struct list_head mcast_group_link;
+ union bnx2x_mcast_elem mcast_elems[];
+};
+
+#define MCAST_MAC_ELEMS_PER_PG \
+ ((PAGE_SIZE - sizeof(struct bnx2x_mcast_elem_group)) / \
+ sizeof(union bnx2x_mcast_elem))
+
struct bnx2x_pending_mcast_cmd {
struct list_head link;
+ struct list_head group_head;
int type; /* BNX2X_MCAST_CMD_X */
union {
struct list_head macs_head;
@@ -2609,6 +2630,11 @@ struct bnx2x_pending_mcast_cmd {
int next_bin; /* Needed for RESTORE flow with aprox match */
} data;
+ bool set_convert; /* in case type == BNX2X_MCAST_CMD_SET, this is set
+ * when macs_head had been converted to a list of
+ * bnx2x_mcast_bin_elem.
+ */
+
bool done; /* set to true, when the command has been handled,
* practically used in 57712 handling only, where one pending
* command may be handled in a few operations. As long as for
@@ -2627,53 +2653,93 @@ static int bnx2x_mcast_wait(struct bnx2x *bp,
return 0;
}
+static void bnx2x_free_groups(struct list_head *mcast_group_list)
+{
+ struct bnx2x_mcast_elem_group *current_mcast_group;
+
+ while (!list_empty(mcast_group_list)) {
+ current_mcast_group = list_first_entry(mcast_group_list,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
+ list_del(&current_mcast_group->mcast_group_link);
+ free_page((unsigned long)current_mcast_group);
+ }
+}
+
static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
struct bnx2x_mcast_obj *o,
struct bnx2x_mcast_ramrod_params *p,
enum bnx2x_mcast_cmd cmd)
{
- int total_sz;
struct bnx2x_pending_mcast_cmd *new_cmd;
- struct bnx2x_mcast_mac_elem *cur_mac = NULL;
struct bnx2x_mcast_list_elem *pos;
- int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
- p->mcast_list_len : 0);
+ struct bnx2x_mcast_elem_group *elem_group;
+ struct bnx2x_mcast_mac_elem *mac_elem;
+ int total_elems = 0, macs_list_len = 0, offset = 0;
+
+ /* When adding MACs we'll need to store their values */
+ if (cmd == BNX2X_MCAST_CMD_ADD || cmd == BNX2X_MCAST_CMD_SET)
+ macs_list_len = p->mcast_list_len;
/* If the command is empty ("handle pending commands only"), break */
if (!p->mcast_list_len)
return 0;
- total_sz = sizeof(*new_cmd) +
- macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
-
/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
- new_cmd = kzalloc(total_sz, GFP_ATOMIC);
-
+ new_cmd = kzalloc(sizeof(*new_cmd), GFP_ATOMIC);
if (!new_cmd)
return -ENOMEM;
- DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
- cmd, macs_list_len);
-
INIT_LIST_HEAD(&new_cmd->data.macs_head);
-
+ INIT_LIST_HEAD(&new_cmd->group_head);
new_cmd->type = cmd;
new_cmd->done = false;
+ DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
+ cmd, macs_list_len);
+
switch (cmd) {
case BNX2X_MCAST_CMD_ADD:
- cur_mac = (struct bnx2x_mcast_mac_elem *)
- ((u8 *)new_cmd + sizeof(*new_cmd));
-
- /* Push the MACs of the current command into the pending command
- * MACs list: FIFO
+ case BNX2X_MCAST_CMD_SET:
+ /* For a set command, we need to allocate sufficient memory for
+ * all the bins, since we can't analyze at this point how much
+ * memory would be required.
*/
+ total_elems = macs_list_len;
+ if (cmd == BNX2X_MCAST_CMD_SET) {
+ if (total_elems < BNX2X_MCAST_BINS_NUM)
+ total_elems = BNX2X_MCAST_BINS_NUM;
+ }
+ while (total_elems > 0) {
+ elem_group = (struct bnx2x_mcast_elem_group *)
+ __get_free_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!elem_group) {
+ bnx2x_free_groups(&new_cmd->group_head);
+ kfree(new_cmd);
+ return -ENOMEM;
+ }
+ total_elems -= MCAST_MAC_ELEMS_PER_PG;
+ list_add_tail(&elem_group->mcast_group_link,
+ &new_cmd->group_head);
+ }
+ elem_group = list_first_entry(&new_cmd->group_head,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
list_for_each_entry(pos, &p->mcast_list, link) {
- memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
- list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
- cur_mac++;
+ mac_elem = &elem_group->mcast_elems[offset].mac_elem;
+ memcpy(mac_elem->mac, pos->mac, ETH_ALEN);
+ /* Push the MACs of the current command into the pending
+ * command MACs list: FIFO
+ */
+ list_add_tail(&mac_elem->link,
+ &new_cmd->data.macs_head);
+ offset++;
+ if (offset == MCAST_MAC_ELEMS_PER_PG) {
+ offset = 0;
+ elem_group = list_next_entry(elem_group,
+ mcast_group_link);
+ }
}
-
break;
case BNX2X_MCAST_CMD_DEL:
@@ -2771,7 +2837,8 @@ static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
int bin;
- if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+ if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE) ||
+ (cmd == BNX2X_MCAST_CMD_SET_ADD))
rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
data->rules[idx].cmd_general_data |= rx_tx_add_flag;
@@ -2797,6 +2864,16 @@ static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
bin = cfg_data->bin;
break;
+ case BNX2X_MCAST_CMD_SET_ADD:
+ bin = cfg_data->bin;
+ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
+ break;
+
+ case BNX2X_MCAST_CMD_SET_DEL:
+ bin = cfg_data->bin;
+ BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, bin);
+ break;
+
default:
BNX2X_ERR("Unknown command: %d\n", cmd);
return;
@@ -2932,6 +3009,110 @@ static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
cmd_pos->data.next_bin++;
}
+static void
+bnx2x_mcast_hdl_pending_set_e2_convert(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_pending_mcast_cmd *cmd_pos)
+{
+ u64 cur[BNX2X_MCAST_VEC_SZ], req[BNX2X_MCAST_VEC_SZ];
+ struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
+ struct bnx2x_mcast_bin_elem *p_item;
+ struct bnx2x_mcast_elem_group *elem_group;
+ int cnt = 0, mac_cnt = 0, offset = 0, i;
+
+ memset(req, 0, sizeof(u64) * BNX2X_MCAST_VEC_SZ);
+ memcpy(cur, o->registry.aprox_match.vec,
+ sizeof(u64) * BNX2X_MCAST_VEC_SZ);
+
+ /* Fill `current' with the required set of bins to configure */
+ list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
+ link) {
+ int bin = bnx2x_mcast_bin_from_mac(pmac_pos->mac);
+
+ DP(BNX2X_MSG_SP, "Set contains %pM mcast MAC\n",
+ pmac_pos->mac);
+
+ BIT_VEC64_SET_BIT(req, bin);
+ list_del(&pmac_pos->link);
+ mac_cnt++;
+ }
+
+ /* We no longer have use for the MACs; Need to re-use memory for
+ * a list that will be used to configure bins.
+ */
+ cmd_pos->set_convert = true;
+ INIT_LIST_HEAD(&cmd_pos->data.macs_head);
+ elem_group = list_first_entry(&cmd_pos->group_head,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
+ for (i = 0; i < BNX2X_MCAST_BINS_NUM; i++) {
+ bool b_current = !!BIT_VEC64_TEST_BIT(cur, i);
+ bool b_required = !!BIT_VEC64_TEST_BIT(req, i);
+
+ if (b_current == b_required)
+ continue;
+
+ p_item = &elem_group->mcast_elems[offset].bin_elem;
+ p_item->bin = i;
+ p_item->type = b_required ? BNX2X_MCAST_CMD_SET_ADD
+ : BNX2X_MCAST_CMD_SET_DEL;
+ list_add_tail(&p_item->link , &cmd_pos->data.macs_head);
+ cnt++;
+ offset++;
+ if (offset == MCAST_MAC_ELEMS_PER_PG) {
+ offset = 0;
+ elem_group = list_next_entry(elem_group,
+ mcast_group_link);
+ }
+ }
+
+ /* We now definitely know how many commands are hiding here.
+ * Also need to correct the disruption we've added to guarantee this
+ * would be enqueued.
+ */
+ o->total_pending_num -= (o->max_cmd_len + mac_cnt);
+ o->total_pending_num += cnt;
+
+ DP(BNX2X_MSG_SP, "o->total_pending_num=%d\n", o->total_pending_num);
+}
+
+static void
+bnx2x_mcast_hdl_pending_set_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *cnt)
+{
+ union bnx2x_mcast_config_data cfg_data = {NULL};
+ struct bnx2x_mcast_bin_elem *p_item, *p_item_n;
+
+ /* This is actually a 2-part scheme - it starts by converting the MACs
+ * into a list of bins to be added/removed, and correcting the numbers
+ * on the object. this is now allowed, as we're now sure that all
+ * previous configured requests have already applied.
+ * The second part is actually adding rules for the newly introduced
+ * entries [like all the rest of the hdl_pending functions].
+ */
+ if (!cmd_pos->set_convert)
+ bnx2x_mcast_hdl_pending_set_e2_convert(bp, o, cmd_pos);
+
+ list_for_each_entry_safe(p_item, p_item_n, &cmd_pos->data.macs_head,
+ link) {
+ cfg_data.bin = (u8)p_item->bin;
+ o->set_one_rule(bp, o, *cnt, &cfg_data, p_item->type);
+ (*cnt)++;
+
+ list_del(&p_item->link);
+
+ /* Break if we reached the maximum number of rules. */
+ if (*cnt >= o->max_cmd_len)
+ break;
+ }
+
+ /* if no more MACs to configure - we are done */
+ if (list_empty(&cmd_pos->data.macs_head))
+ cmd_pos->done = true;
+}
+
static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p)
{
@@ -2955,6 +3136,10 @@ static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
&cnt);
break;
+ case BNX2X_MCAST_CMD_SET:
+ bnx2x_mcast_hdl_pending_set_e2(bp, o, cmd_pos, &cnt);
+ break;
+
default:
BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
return -EINVAL;
@@ -2965,6 +3150,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
*/
if (cmd_pos->done) {
list_del(&cmd_pos->link);
+ bnx2x_free_groups(&cmd_pos->group_head);
kfree(cmd_pos);
}
@@ -3095,6 +3281,19 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
o->set_registry_size(o, reg_sz + p->mcast_list_len);
break;
+ case BNX2X_MCAST_CMD_SET:
+ /* We can only learn how many commands would actually be used
+ * when this is being configured. So for now, simply guarantee
+ * the command will be enqueued [to refrain from adding logic
+ * that handles this and THEN learns it needs several ramrods].
+ * Just like for ADD/Cont, the mcast_list_len might be an over
+ * estimation; or even more so, since we don't take into
+ * account the possibility of removal of existing bins.
+ */
+ o->set_registry_size(o, reg_sz + p->mcast_list_len);
+ o->total_pending_num += o->max_cmd_len;
+ break;
+
default:
BNX2X_ERR("Unknown command: %d\n", cmd);
return -EINVAL;
@@ -3108,12 +3307,16 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int old_num_bins)
+ int old_num_bins,
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
o->set_registry_size(o, old_num_bins);
o->total_pending_num -= p->mcast_list_len;
+
+ if (cmd == BNX2X_MCAST_CMD_SET)
+ o->total_pending_num -= o->max_cmd_len;
}
/**
@@ -3223,9 +3426,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
bnx2x_mcast_refresh_registry_e2(bp, o);
/* If CLEAR_ONLY was requested - don't send a ramrod and clear
- * RAMROD_PENDING status immediately.
+ * RAMROD_PENDING status immediately. due to the SET option, it's also
+ * possible that after evaluating the differences there's no need for
+ * a ramrod. In that case, we can skip it as well.
*/
- if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags) || !cnt) {
raw->clear_pending(raw);
return 0;
} else {
@@ -3253,6 +3458,11 @@ static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
enum bnx2x_mcast_cmd cmd)
{
+ if (cmd == BNX2X_MCAST_CMD_SET) {
+ BNX2X_ERR("Can't use `set' command on e1h!\n");
+ return -EINVAL;
+ }
+
/* Mark, that there is a work to do */
if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
p->mcast_list_len = 1;
@@ -3262,7 +3472,8 @@ static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int old_num_bins)
+ int old_num_bins,
+ enum bnx2x_mcast_cmd cmd)
{
/* Do nothing */
}
@@ -3372,6 +3583,11 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
struct bnx2x_mcast_obj *o = p->mcast_obj;
int reg_sz = o->get_registry_size(o);
+ if (cmd == BNX2X_MCAST_CMD_SET) {
+ BNX2X_ERR("Can't use `set' command on e1!\n");
+ return -EINVAL;
+ }
+
switch (cmd) {
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
@@ -3422,7 +3638,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int old_num_macs)
+ int old_num_macs,
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -3572,6 +3789,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
}
list_del(&cmd_pos->link);
+ bnx2x_free_groups(&cmd_pos->group_head);
kfree(cmd_pos);
return cnt;
@@ -3816,7 +4034,7 @@ error_exit2:
r->clear_pending(r);
error_exit1:
- o->revert(bp, p, old_reg_size);
+ o->revert(bp, p, old_reg_size, cmd);
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 4048fc594cce..0bf2fd470819 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -536,6 +536,15 @@ enum bnx2x_mcast_cmd {
BNX2X_MCAST_CMD_CONT,
BNX2X_MCAST_CMD_DEL,
BNX2X_MCAST_CMD_RESTORE,
+
+ /* Following this, multicast configuration should equal to approx
+ * the set of MACs provided [i.e., remove all else].
+ * The two sub-commands are used internally to decide whether a given
+ * bin is to be added or removed
+ */
+ BNX2X_MCAST_CMD_SET,
+ BNX2X_MCAST_CMD_SET_ADD,
+ BNX2X_MCAST_CMD_SET_DEL,
};
struct bnx2x_mcast_obj {
@@ -635,7 +644,8 @@ struct bnx2x_mcast_obj {
*/
void (*revert)(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int old_num_bins);
+ int old_num_bins,
+ enum bnx2x_mcast_cmd cmd);
int (*get_registry_size)(struct bnx2x_mcast_obj *o);
void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 632daff117d3..3f77d0863543 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -573,17 +573,6 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
}
}
- /* clear existing mcasts */
- mcast.mcast_list_len = vf->mcast_list_len;
- vf->mcast_list_len = mc_num;
- rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
- if (rc) {
- BNX2X_ERR("Failed to remove multicasts\n");
- kfree(mc);
- return rc;
- }
-
- /* update mcast list on the ramrod params */
if (mc_num) {
INIT_LIST_HEAD(&mcast.mcast_list);
for (i = 0; i < mc_num; i++) {
@@ -594,12 +583,18 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* add new mcasts */
mcast.mcast_list_len = mc_num;
- rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
if (rc)
- BNX2X_ERR("Faled to add multicasts\n");
- kfree(mc);
+ BNX2X_ERR("Faled to set multicasts\n");
+ } else {
+ /* clear existing mcasts */
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
+ if (rc)
+ BNX2X_ERR("Failed to remove multicasts\n");
}
+ kfree(mc);
+
return rc;
}
@@ -1583,7 +1578,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
* It needs to be initialized here so that it can be safely
* handled by a subsequent FLR flow.
*/
- vf->mcast_list_len = 0;
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
0xFF, 0xFF, 0xFF,
bnx2x_vf_sp(bp, vf, mcast_rdata),
@@ -2527,7 +2521,8 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
for_each_vf(bp, vfidx) {
bulletin = BP_VF_BULLETIN(bp, vfidx);
if (bulletin->valid_bitmap & (1 << VLAN_VALID))
- bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
+ bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
+ htons(ETH_P_8021Q));
}
}
@@ -2787,7 +2782,8 @@ static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
return 0;
}
-int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct pf_vf_bulletin_content *bulletin = NULL;
struct bnx2x *bp = netdev_priv(dev);
@@ -2802,6 +2798,9 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
return -EINVAL;
}
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
vfidx, vlan, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 670a581ffabc..7a6d406f4c11 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -195,7 +195,6 @@ struct bnx2x_virtf {
int leading_rss;
/* MCAST object */
- int mcast_list_len;
struct bnx2x_mcast_obj mcast_obj;
/* RSS configuration object */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 228c964e709a..a9f9f3738022 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -32,6 +32,7 @@
#include <linux/mii.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/rtc.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
@@ -93,50 +94,49 @@ enum board_idx {
BCM57404_NPAR,
BCM57406_NPAR,
BCM57407_SFP,
+ BCM57407_NPAR,
BCM57414_NPAR,
BCM57416_NPAR,
- BCM57304_VF,
- BCM57404_VF,
- BCM57414_VF,
- BCM57314_VF,
+ NETXTREME_E_VF,
+ NETXTREME_C_VF,
};
/* indexed by enum above */
static const struct {
char *name;
} board_info[] = {
- { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
- { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
+ { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM58700 Nitro 4-port 1Gb/2.5Gb/10Gb Ethernet" },
- { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" },
- { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
- { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
+ { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
+ { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
+ { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57407 NetXtreme-E Dual-port 10GBase-T Ethernet" },
- { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" },
- { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" },
- { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
+ { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
+ { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
- { "Broadcom BCM57417 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57416 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57407 NetXtreme-E Dual-port 25Gb Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
- { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
- { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" },
- { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom NetXtreme-C Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
@@ -160,13 +160,19 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
+ { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
#ifdef CONFIG_BNXT_SRIOV
- { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
- { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
- { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF },
- { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
#endif
{ 0 }
};
@@ -189,8 +195,7 @@ static const u16 bnxt_async_events_arr[] = {
static bool bnxt_vf_pciid(enum board_idx idx)
{
- return (idx == BCM57304_VF || idx == BCM57404_VF ||
- idx == BCM57314_VF || idx == BCM57414_VF);
+ return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -3419,10 +3424,10 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
- vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
- BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
- BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
- BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+ vnic->hash_type = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
req.hash_type = cpu_to_le32(vnic->hash_type);
@@ -4156,6 +4161,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_func_qcaps_exit;
+ bp->tx_push_thresh = 0;
+ if (resp->flags &
+ cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -4187,12 +4197,6 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = le16_to_cpu(resp->fid);
- memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
- if (is_valid_ether_addr(vf->mac_addr))
- /* overwrite netdev dev_adr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
- else
- random_ether_addr(bp->dev->dev_addr);
vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -4204,14 +4208,21 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
vf->max_vnics = le16_to_cpu(resp->max_vnics);
vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ /* overwrite netdev dev_adr with admin VF MAC */
+ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ } else {
+ random_ether_addr(bp->dev->dev_addr);
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ }
+ return rc;
#endif
}
- bp->tx_push_thresh = 0;
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
- bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
-
hwrm_func_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4249,6 +4260,9 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
if (bp->max_tc > BNXT_MAX_QUEUE)
bp->max_tc = BNXT_MAX_QUEUE;
+ if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
+ bp->max_tc = 1;
+
qptr = &resp->queue_id0;
for (i = 0; i < bp->max_tc; i++) {
bp->q_info[i].queue_id = *qptr++;
@@ -4307,6 +4321,31 @@ hwrm_ver_get_exit:
return rc;
}
+int bnxt_hwrm_fw_set_time(struct bnxt *bp)
+{
+#if IS_ENABLED(CONFIG_RTC_LIB)
+ struct hwrm_fw_set_time_input req = {0};
+ struct rtc_time tm;
+ struct timeval tv;
+
+ if (bp->hwrm_spec_code < 0x10400)
+ return -EOPNOTSUPP;
+
+ do_gettimeofday(&tv);
+ rtc_time_to_tm(tv.tv_sec, &tm);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
+ req.year = cpu_to_le16(1900 + tm.tm_year);
+ req.month = 1 + tm.tm_mon;
+ req.day = tm.tm_mday;
+ req.hour = tm.tm_hour;
+ req.minute = tm.tm_min;
+ req.second = tm.tm_sec;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
static int bnxt_hwrm_port_qstats(struct bnxt *bp)
{
int rc;
@@ -6804,6 +6843,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err;
+ bnxt_hwrm_fw_set_time(bp);
+
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 23e04a6142fb..51b164a0e844 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -11,10 +11,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.3.0"
+#define DRV_MODULE_VERSION "1.5.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 3
+#define DRV_VER_MIN 5
#define DRV_VER_UPD 0
struct tx_bd {
@@ -106,11 +106,11 @@ struct tx_cmp {
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
#define CMP_TYPE_ERROR_STATUS 48
- #define CMPL_BASE_TYPE_STAT_EJECT (0x1aUL << 0)
- #define CMPL_BASE_TYPE_HWRM_DONE (0x20UL << 0)
- #define CMPL_BASE_TYPE_HWRM_FWD_REQ (0x22UL << 0)
- #define CMPL_BASE_TYPE_HWRM_FWD_RESP (0x24UL << 0)
- #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL
+ #define CMPL_BASE_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define TX_CMP_FLAGS_ERROR (1 << 6)
#define TX_CMP_FLAGS_PUSH (1 << 7)
@@ -389,11 +389,6 @@ struct rx_tpa_end_cmp_ext {
#define INVALID_HW_RING_ID ((u16)-1)
-#define BNXT_RSS_HASH_TYPE_FLAG_IPV4 0x01
-#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 0x02
-#define BNXT_RSS_HASH_TYPE_FLAG_IPV6 0x04
-#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6 0x08
-
/* The hardware supports certain page sizes. Use the supported page sizes
* to allocate the rings.
*/
@@ -418,7 +413,7 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
-#define BNXT_MIN_PKT_SIZE 45
+#define BNXT_MIN_PKT_SIZE 52
#define BNXT_NUM_TESTS(bp) 0
@@ -1225,6 +1220,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b83e17403d6c..a7e04ff4eaed 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -21,6 +21,8 @@
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
+#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
+#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
@@ -346,7 +348,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_rx_rings, max_tx_rings, tcs;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
- channel->max_combined = max_rx_rings;
+ channel->max_combined = max_t(int, max_rx_rings, max_tx_rings);
if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
max_rx_rings = 0;
@@ -404,8 +406,8 @@ static int bnxt_set_channels(struct net_device *dev,
if (tcs > 1)
max_tx_rings /= tcs;
- if (sh && (channel->combined_count > max_rx_rings ||
- channel->combined_count > max_tx_rings))
+ if (sh &&
+ channel->combined_count > max_t(int, max_rx_rings, max_tx_rings))
return -ENOMEM;
if (!sh && (channel->rx_count > max_rx_rings ||
@@ -428,8 +430,10 @@ static int bnxt_set_channels(struct net_device *dev,
if (sh) {
bp->flags |= BNXT_FLAG_SHARED_RINGS;
- bp->rx_nr_rings = channel->combined_count;
- bp->tx_nr_rings_per_tc = channel->combined_count;
+ bp->rx_nr_rings = min_t(int, channel->combined_count,
+ max_rx_rings);
+ bp->tx_nr_rings_per_tc = min_t(int, channel->combined_count,
+ max_tx_rings);
} else {
bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
bp->rx_nr_rings = channel->rx_count;
@@ -1028,6 +1032,10 @@ static u32 bnxt_get_link(struct net_device *dev)
return bp->link_info.link_up;
}
+static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+
static int bnxt_flash_nvram(struct net_device *dev,
u16 dir_type,
u16 dir_ordinal,
@@ -1179,7 +1187,6 @@ static int bnxt_flash_firmware(struct net_device *dev,
(unsigned long)calculated_crc);
return -EINVAL;
}
- /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
0, 0, fw_data, fw_size);
if (rc == 0) /* Firmware update successful */
@@ -1188,6 +1195,57 @@ static int bnxt_flash_firmware(struct net_device *dev,
return rc;
}
+static int bnxt_flash_microcode(struct net_device *dev,
+ u16 dir_type,
+ const u8 *fw_data,
+ size_t fw_size)
+{
+ struct bnxt_ucode_trailer *trailer;
+ u32 calculated_crc;
+ u32 stored_crc;
+ int rc = 0;
+
+ if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
+ netdev_err(dev, "Invalid microcode file size: %u\n",
+ (unsigned int)fw_size);
+ return -EINVAL;
+ }
+ trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
+ sizeof(*trailer)));
+ if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
+ netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
+ le32_to_cpu(trailer->sig));
+ return -EINVAL;
+ }
+ if (le16_to_cpu(trailer->dir_type) != dir_type) {
+ netdev_err(dev, "Expected microcode type: %d, read: %d\n",
+ dir_type, le16_to_cpu(trailer->dir_type));
+ return -EINVAL;
+ }
+ if (le16_to_cpu(trailer->trailer_length) <
+ sizeof(struct bnxt_ucode_trailer)) {
+ netdev_err(dev, "Invalid microcode trailer length: %d\n",
+ le16_to_cpu(trailer->trailer_length));
+ return -EINVAL;
+ }
+
+ /* Confirm the CRC32 checksum of the file: */
+ stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+ sizeof(stored_crc)));
+ calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+ if (calculated_crc != stored_crc) {
+ netdev_err(dev,
+ "CRC32 (%08lX) does not match calculated: %08lX\n",
+ (unsigned long)stored_crc,
+ (unsigned long)calculated_crc);
+ return -EINVAL;
+ }
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw_data, fw_size);
+
+ return rc;
+}
+
static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
{
switch (dir_type) {
@@ -1206,7 +1264,7 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
return false;
}
-static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
{
switch (dir_type) {
case BNX_DIR_TYPE_AVS:
@@ -1227,7 +1285,7 @@ static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
static bool bnxt_dir_type_is_executable(u16 dir_type)
{
return bnxt_dir_type_is_ape_bin_format(dir_type) ||
- bnxt_dir_type_is_unprotected_exec_format(dir_type);
+ bnxt_dir_type_is_other_exec_format(dir_type);
}
static int bnxt_flash_firmware_from_file(struct net_device *dev,
@@ -1237,10 +1295,6 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
const struct firmware *fw;
int rc;
- if (dir_type != BNX_DIR_TYPE_UPDATE &&
- bnxt_dir_type_is_executable(dir_type) == false)
- return -EINVAL;
-
rc = request_firmware(&fw, filename, &dev->dev);
if (rc != 0) {
netdev_err(dev, "Error %d requesting firmware file: %s\n",
@@ -1249,6 +1303,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
}
if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+ else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
+ rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
else
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
0, 0, fw->data, fw->size);
@@ -1257,10 +1313,83 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
}
static int bnxt_flash_package_from_file(struct net_device *dev,
- char *filename)
+ char *filename, u32 install_type)
{
- netdev_err(dev, "packages are not yet supported\n");
- return -EINVAL;
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_nvm_install_update_input install = {0};
+ const struct firmware *fw;
+ u32 item_len;
+ u16 index;
+ int rc;
+
+ bnxt_hwrm_fw_set_time(bp);
+
+ if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, &item_len, NULL) != 0) {
+ netdev_err(dev, "PKG update area not created in nvram\n");
+ return -ENOBUFS;
+ }
+
+ rc = request_firmware(&fw, filename, &dev->dev);
+ if (rc != 0) {
+ netdev_err(dev, "PKG error %d requesting file: %s\n",
+ rc, filename);
+ return rc;
+ }
+
+ if (fw->size > item_len) {
+ netdev_err(dev, "PKG insufficient update area in nvram: %lu",
+ (unsigned long)fw->size);
+ rc = -EFBIG;
+ } else {
+ dma_addr_t dma_handle;
+ u8 *kmem;
+ struct hwrm_nvm_modify_input modify = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
+
+ modify.dir_idx = cpu_to_le16(index);
+ modify.len = cpu_to_le32(fw->size);
+
+ kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
+ &dma_handle, GFP_KERNEL);
+ if (!kmem) {
+ netdev_err(dev,
+ "dma_alloc_coherent failure, length = %u\n",
+ (unsigned int)fw->size);
+ rc = -ENOMEM;
+ } else {
+ memcpy(kmem, fw->data, fw->size);
+ modify.host_src_addr = cpu_to_le64(dma_handle);
+
+ rc = hwrm_send_message(bp, &modify, sizeof(modify),
+ FLASH_PACKAGE_TIMEOUT);
+ dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
+ dma_handle);
+ }
+ }
+ release_firmware(fw);
+ if (rc)
+ return rc;
+
+ if ((install_type & 0xffff) == 0)
+ install_type >>= 16;
+ bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
+ install.install_type = cpu_to_le32(install_type);
+
+ rc = hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (rc)
+ return -EOPNOTSUPP;
+
+ if (resp->result) {
+ netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
+ (s8)resp->result, (int)resp->problem_item);
+ return -ENOPKG;
+ }
+ return 0;
}
static int bnxt_flash_device(struct net_device *dev,
@@ -1271,8 +1400,10 @@ static int bnxt_flash_device(struct net_device *dev,
return -EINVAL;
}
- if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
- return bnxt_flash_package_from_file(dev, flash->data);
+ if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
+ flash->region > 0xffff)
+ return bnxt_flash_package_from_file(dev, flash->data,
+ flash->region);
return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
}
@@ -1516,7 +1647,7 @@ static int bnxt_set_eeprom(struct net_device *dev,
/* Create or re-write an NVM item: */
if (bnxt_dir_type_is_executable(type) == true)
- return -EINVAL;
+ return -EOPNOTSUPP;
ext = eeprom->magic & 0xffff;
ordinal = eeprom->offset >> 16;
attr = eeprom->offset & 0xffff;
@@ -1718,6 +1849,25 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
return rc;
}
+static int bnxt_nway_reset(struct net_device *dev)
+{
+ int rc = 0;
+
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
+
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
+ return -EINVAL;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_link_setting(bp, true, false);
+
+ return rc;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
@@ -1750,4 +1900,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_eee = bnxt_set_eee,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
+ .nway_reset = bnxt_nway_reset
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index 82bf44ab811b..cad30ddc6936 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -11,6 +11,7 @@
#define __BNXT_FW_HDR_H__
#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */
+#define BNXT_UCODE_TRAILER_SIGNATURE 0x726c7254 /* "Trlr" */
enum SUPPORTED_FAMILY {
DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */
@@ -85,7 +86,7 @@ enum SUPPORTED_MEDIA {
struct bnxt_fw_header {
__le32 signature; /* constains the constant value of
- * BNXT_Firmware_Bin_Signatures
+ * BNXT_FIRMWARE_BIN_SIGNATURE
*/
u8 flags; /* reserved for ChiMP use */
u8 code_type; /* enum SUPPORTED_CODE */
@@ -102,4 +103,17 @@ struct bnxt_fw_header {
u8 major_ver;
};
+/* Microcode and pre-boot software/firmware trailer: */
+struct bnxt_ucode_trailer {
+ u8 rsa_sig[256];
+ __le16 flags;
+ u8 version_format;
+ u8 version_length;
+ u8 version[16];
+ __le16 dir_type;
+ __le16 trailer_length;
+ __le32 sig; /* BNXT_UCODE_TRAILER_SIGNATURE */
+ __le32 chksum; /* CRC-32 */
+};
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 517567f6d651..04a96cc3498a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -39,7 +39,7 @@ struct eject_cmpl {
__le16 type;
#define EJECT_CMPL_TYPE_MASK 0x3fUL
#define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT (0x1aUL << 0)
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
__le16 len;
__le32 opaque;
__le32 v;
@@ -52,7 +52,7 @@ struct hwrm_cmpl {
__le16 type;
#define HWRM_CMPL_TYPE_MASK 0x3fUL
#define HWRM_CMPL_TYPE_SFT 0
- #define HWRM_CMPL_TYPE_HWRM_DONE (0x20UL << 0)
+ #define HWRM_CMPL_TYPE_HWRM_DONE 0x20UL
__le16 sequence_id;
__le32 unused_1;
__le32 v;
@@ -65,7 +65,7 @@ struct hwrm_fwd_req_cmpl {
__le16 req_len_type;
#define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL
#define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
- #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
#define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
#define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
__le16 source_id;
@@ -81,7 +81,7 @@ struct hwrm_fwd_resp_cmpl {
__le16 type;
#define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL
#define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
- #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
__le16 source_id;
__le16 resp_len;
__le16 unused_1;
@@ -96,25 +96,26 @@ struct hwrm_async_event_cmpl {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_V 0x1UL
@@ -130,9 +131,9 @@ struct hwrm_async_event_cmpl_link_status_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
@@ -156,9 +157,9 @@ struct hwrm_async_event_cmpl_link_mtu_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
@@ -176,9 +177,9 @@ struct hwrm_async_event_cmpl_link_speed_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
@@ -200,8 +201,7 @@ struct hwrm_async_event_cmpl_link_speed_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
};
@@ -211,9 +211,9 @@ struct hwrm_async_event_cmpl_dcb_config_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
@@ -231,9 +231,9 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
@@ -258,9 +258,9 @@ struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
@@ -278,9 +278,9 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
@@ -300,9 +300,9 @@ struct hwrm_async_event_cmpl_func_drvr_unload {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
@@ -320,9 +320,9 @@ struct hwrm_async_event_cmpl_func_drvr_load {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
@@ -340,9 +340,9 @@ struct hwrm_async_event_cmpl_pf_drvr_unload {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
@@ -362,9 +362,9 @@ struct hwrm_async_event_cmpl_pf_drvr_load {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
@@ -384,9 +384,9 @@ struct hwrm_async_event_cmpl_vf_flr {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR (0x30UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
@@ -404,9 +404,9 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
@@ -424,9 +424,9 @@ struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
@@ -443,9 +443,9 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
@@ -465,15 +465,15 @@ struct hwrm_async_event_cmpl_hwrm_error {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
@@ -485,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
-/* HW Resource Manager Specification 1.3.0 */
+/* HW Resource Manager Specification 1.5.1 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 3
-#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_MINOR 5
+#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_STR "1.3.0"
+#define HWRM_VERSION_STR "1.5.1"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -556,8 +556,8 @@ struct cmd_nums {
#define HWRM_QUEUE_QPORTCFG (0x30UL)
#define HWRM_QUEUE_QCFG (0x31UL)
#define HWRM_QUEUE_CFG (0x32UL)
- #define HWRM_QUEUE_BUFFERS_QCFG (0x33UL)
- #define HWRM_QUEUE_BUFFERS_CFG (0x34UL)
+ #define RESERVED2 (0x33UL)
+ #define RESERVED3 (0x34UL)
#define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
#define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
#define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
@@ -574,6 +574,7 @@ struct cmd_nums {
#define HWRM_VNIC_RSS_QCFG (0x47UL)
#define HWRM_VNIC_PLCMODES_CFG (0x48UL)
#define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
+ #define HWRM_VNIC_QCAPS (0x4aUL)
#define HWRM_RING_ALLOC (0x50UL)
#define HWRM_RING_FREE (0x51UL)
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
@@ -581,13 +582,15 @@ struct cmd_nums {
#define HWRM_RING_RESET (0x5eUL)
#define HWRM_RING_GRP_ALLOC (0x60UL)
#define HWRM_RING_GRP_FREE (0x61UL)
+ #define RESERVED5 (0x64UL)
+ #define RESERVED6 (0x65UL)
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
#define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
#define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
#define HWRM_CFA_L2_FILTER_FREE (0x91UL)
#define HWRM_CFA_L2_FILTER_CFG (0x92UL)
#define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define RESERVED3 (0x94UL)
+ #define RESERVED4 (0x94UL)
#define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
#define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
#define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
@@ -607,6 +610,8 @@ struct cmd_nums {
#define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
#define HWRM_FW_RESET (0xc0UL)
#define HWRM_FW_QSTATUS (0xc1UL)
+ #define HWRM_FW_SET_TIME (0xc8UL)
+ #define HWRM_FW_GET_TIME (0xc9UL)
#define HWRM_EXEC_FWD_RESP (0xd0UL)
#define HWRM_REJECT_FWD_RESP (0xd1UL)
#define HWRM_FWD_RESP (0xd2UL)
@@ -615,11 +620,13 @@ struct cmd_nums {
#define HWRM_WOL_FILTER_ALLOC (0xf0UL)
#define HWRM_WOL_FILTER_FREE (0xf1UL)
#define HWRM_WOL_FILTER_QCFG (0xf2UL)
+ #define HWRM_WOL_REASON_QCFG (0xf3UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
#define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
#define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
#define HWRM_NVM_MODIFY (0xfff4UL)
#define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
#define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
@@ -824,7 +831,9 @@ struct hwrm_ver_get_output {
u8 netctrl_fw_min;
u8 netctrl_fw_bld;
u8 netctrl_fw_rsvd;
- __le32 reserved1;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
u8 roce_fw_maj;
u8 roce_fw_min;
u8 roce_fw_bld;
@@ -839,9 +848,9 @@ struct hwrm_ver_get_output {
u8 chip_metal;
u8 chip_bond_id;
u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC (0x0UL << 0)
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA (0x1UL << 0)
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM (0x2UL << 0)
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
__le16 max_req_win_len;
__le16 max_resp_len;
__le16 def_req_timeout;
@@ -863,10 +872,10 @@ struct hwrm_func_reset_input {
#define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
__le16 vf_id;
u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL (0x0UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME (0x1UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN (0x2UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF (0x3UL << 0)
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
u8 unused_0;
};
@@ -1028,6 +1037,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
#define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
#define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1047,9 +1060,8 @@ struct hwrm_func_qcaps_output {
__le32 max_mcast_filters;
__le32 max_flow_id;
__le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
u8 unused_0;
- u8 unused_1;
- u8 unused_2;
u8 valid;
};
@@ -1077,6 +1089,7 @@ struct hwrm_func_qcfg_output {
__le16 flags;
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1089,29 +1102,46 @@ struct hwrm_func_qcfg_output {
__le16 mru;
__le16 stat_ctx_id;
u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF (0x0UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS (0x1UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 (0x2UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
u8 unused_0;
__le16 dflt_vnic_id;
u8 unused_1;
u8 unused_2;
__le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
- #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
u8 unused_3;
- __le16 unused_4;
+ __le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
- u8 unused_5;
- u8 unused_6;
- u8 unused_7;
+ __le16 alloc_sp_tx_rings;
+ u8 unused_4;
u8 valid;
};
@@ -1171,18 +1201,36 @@ struct hwrm_func_cfg_input {
__le16 dflt_vlan;
__be32 dflt_ip_addr[4];
__le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
__le16 async_event_cr;
u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK (0x0UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN (0x1UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
u8 allowed_vlan_pris;
u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0)
- #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0)
- #define FUNC_CFG_REQ_EVB_MODE_VEPA (0x2UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
u8 unused_2;
__le16 num_mcast_filters;
};
@@ -1341,16 +1389,16 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
__le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS (0x12UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI (0x68UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 (0x73UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 (0x74UL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1415,13 +1463,13 @@ struct hwrm_func_buf_rgtr_input {
__le16 vf_id;
__le16 req_buf_num_pages;
__le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B (0x4UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x15UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x16UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
__le16 req_buf_len;
__le16 resp_buf_len;
u8 unused_0;
@@ -1473,16 +1521,16 @@ struct hwrm_func_drv_qver_output {
__le16 seq_id;
__le16 resp_len;
__le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS (0x12UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI (0x68UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 (0x73UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 (0x74UL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1528,44 +1576,44 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
__le16 port_id;
__le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
u8 auto_pause;
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
u8 unused_0;
__le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
__le16 auto_link_speed_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1582,12 +1630,12 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
u8 force_pause;
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
@@ -1641,25 +1689,25 @@ struct hwrm_port_phy_qcfg_output {
__le16 seq_id;
__le16 resp_len;
u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_LINK (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
u8 unused_0;
__le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
u8 duplex;
- #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_FULL 0x1UL
u8 pause;
#define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
@@ -1679,39 +1727,39 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
__le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
u8 auto_pause;
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
__le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
__le16 auto_link_speed_mask;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1728,46 +1776,46 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED (0x4UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE (0xffUL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
__le32 preemphasis;
u8 phy_maj;
u8 phy_min;
u8 phy_bld;
u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR (0x4UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX (0x6UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE (0x9UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
u8 eee_config_phy_addr;
#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
@@ -1796,11 +1844,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
u8 link_partner_adv_pause;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
@@ -1859,7 +1907,7 @@ struct hwrm_port_mac_cfg_input {
__le64 resp_addr;
__le32 flags;
#define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
@@ -1868,28 +1916,50 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1 0x8UL
#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
__le16 port_id;
u8 ipg;
u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE (0x0UL << 0)
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
- u8 ivlan_pri2cos_map_pri;
- u8 lcos_map_pri;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
u8 tunnel_pri2cos_map_pri;
u8 dscp2pri_map_pri;
__le16 rx_ts_capture_ptp_msg_type;
__le16 tx_ts_capture_ptp_msg_type;
- __le32 unused_0;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
};
/* Output (16 bytes) */
@@ -1902,9 +1972,9 @@ struct hwrm_port_mac_cfg_output {
__le16 mtu;
u8 ipg;
u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE (0x0UL << 0)
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL (0x1UL << 0)
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
u8 unused_0;
u8 valid;
};
@@ -2163,8 +2233,8 @@ struct hwrm_queue_qportcfg_input {
__le64 resp_addr;
__le32 flags;
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
__le16 port_id;
__le16 unused_0;
@@ -2179,50 +2249,51 @@ struct hwrm_queue_qportcfg_output {
u8 max_configurable_queues;
u8 max_configurable_lossless_queues;
u8 queue_cfg_allowed;
- u8 queue_buffers_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
u8 queue_pfcenable_cfg_allowed;
u8 queue_pri2cos_cfg_allowed;
u8 queue_cos2bw_cfg_allowed;
u8 queue_id0;
u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id1;
u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id2;
u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id3;
u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id4;
u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id5;
u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id6;
u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id7;
u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 valid;
};
@@ -2235,19 +2306,21 @@ struct hwrm_queue_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
__le32 enables;
#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
__le32 queue_id;
__le32 dflt_len;
u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 unused_0[7];
};
@@ -2264,50 +2337,6 @@ struct hwrm_queue_cfg_output {
u8 valid;
};
-/* hwrm_queue_buffers_cfg */
-/* Input (56 bytes) */
-struct hwrm_queue_buffers_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX
- __le32 enables;
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x4UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x8UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x10UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x20UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x40UL
- __le32 queue_id;
- __le32 reserved;
- __le32 shared;
- __le32 xoff;
- __le32 xon;
- __le32 full;
- __le32 notfull;
- __le32 max;
-};
-
-/* Output (16 bytes) */
-struct hwrm_queue_buffers_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_queue_pfcenable_cfg */
/* Input (24 bytes) */
struct hwrm_queue_pfcenable_cfg_input {
@@ -2351,12 +2380,22 @@ struct hwrm_queue_pri2cos_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR (0x2UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
__le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
u8 port_id;
u8 pri0_cos_queue_id;
u8 pri1_cos_queue_id;
@@ -2404,82 +2443,226 @@ struct hwrm_queue_cos2bw_cfg_input {
u8 queue_id0;
u8 unused_0;
__le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
u8 queue_id1;
__le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id1_pri_lvl;
u8 queue_id1_bw_weight;
u8 queue_id2;
__le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id2_pri_lvl;
u8 queue_id2_bw_weight;
u8 queue_id3;
__le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id3_pri_lvl;
u8 queue_id3_bw_weight;
u8 queue_id4;
__le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id4_pri_lvl;
u8 queue_id4_bw_weight;
u8 queue_id5;
__le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id5_pri_lvl;
u8 queue_id5_bw_weight;
u8 queue_id6;
__le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id6_pri_lvl;
u8 queue_id6_bw_weight;
u8 queue_id7;
__le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id7_pri_lvl;
u8 queue_id7_bw_weight;
u8 unused_1[5];
@@ -2563,6 +2746,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
#define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -2615,18 +2799,18 @@ struct hwrm_vnic_tpa_cfg_input {
#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
__le16 vnic_id;
__le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 (0x0UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 (0x1UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 (0x2UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 (0x3UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX (0x1fUL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
__le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 (0x0UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 (0x1UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 (0x2UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 (0x3UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 (0x4UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX (0x7UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
u8 unused_0;
u8 unused_1;
__le32 max_agg_timer;
@@ -2780,15 +2964,15 @@ struct hwrm_ring_alloc_input {
__le64 resp_addr;
__le32 enables;
#define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED2 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
#define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL
#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
#define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 unused_1;
__le64 page_tbl_addr;
@@ -2804,18 +2988,36 @@ struct hwrm_ring_alloc_input {
u8 unused_4;
u8 unused_5;
__le32 reserved1;
- __le16 reserved2;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP (0x1UL << 0)
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
u8 unused_6;
u8 unused_7;
__le32 reserved3;
__le32 stat_ctx_id;
__le32 reserved4;
__le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_RSVD 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_RSVD (0x1UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
u8 unused_8[3];
};
@@ -2842,9 +3044,9 @@ struct hwrm_ring_free_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -2942,9 +3144,9 @@ struct hwrm_ring_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -3068,36 +3270,36 @@ struct hwrm_cfa_l2_filter_alloc_input {
__le16 t_l2_ivlan;
__le16 t_l2_ivlan_mask;
u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG (0x4UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE (0x5UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO (0x6UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG (0x7UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
u8 unused_6;
__le32 src_id;
u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_7;
__le16 dst_id;
__le16 mirror_vnic_id;
u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
u8 unused_8;
__le32 unused_9;
__le64 l2_filter_id_hint;
@@ -3246,16 +3448,16 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
u8 l3_addr_type;
u8 t_l3_addr_type;
u8 tunnel_type;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_0;
__le32 vni;
__le32 dst_vnic_id;
@@ -3311,14 +3513,14 @@ struct hwrm_cfa_encap_record_alloc_input {
__le32 flags;
#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN (0x1UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE (0x2UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE (0x3UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP (0x4UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE (0x5UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS (0x6UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN (0x7UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
u8 unused_0;
__le16 unused_1;
__le32 encap_data[16];
@@ -3397,32 +3599,32 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
u8 src_macaddr[6];
__be16 ethertype;
u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 (0x4UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 (0x6UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP (0x6UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP (0x11UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x11UL
__le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE (0x1UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW (0x2UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST (0x3UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST (0x4UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
__be32 src_ipaddr[4];
__be32 src_ipaddr_mask[4];
__be32 dst_ipaddr[4];
@@ -3511,8 +3713,8 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0[7];
};
@@ -3539,8 +3741,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0;
__be16 tunnel_dst_port_val;
__le32 unused_1;
@@ -3570,8 +3772,8 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0;
__le16 tunnel_dst_port_id;
__le32 unused_1;
@@ -3720,15 +3922,15 @@ struct hwrm_fw_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
__le16 unused_0[3];
};
@@ -3739,9 +3941,9 @@ struct hwrm_fw_reset_output {
__le16 seq_id;
__le16 resp_len;
u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
u8 unused_0;
__le16 unused_1;
u8 unused_2;
@@ -3759,11 +3961,11 @@ struct hwrm_fw_qstatus_input {
__le16 target_id;
__le64 resp_addr;
u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
u8 unused_0[7];
};
@@ -3774,9 +3976,9 @@ struct hwrm_fw_qstatus_output {
__le16 seq_id;
__le16 resp_len;
u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
u8 unused_0;
__le16 unused_1;
u8 unused_2;
@@ -3785,6 +3987,42 @@ struct hwrm_fw_qstatus_output {
u8 valid;
};
+/* hwrm_fw_set_time */
+/* Input (32 bytes) */
+struct hwrm_fw_set_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_set_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
/* hwrm_exec_fwd_resp */
/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
@@ -3921,32 +4159,6 @@ struct hwrm_temp_monitor_query_output {
u8 valid;
};
-/* hwrm_nvm_raw_write_blk */
-/* Input (32 bytes) */
-struct hwrm_nvm_raw_write_blk_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le32 dest_addr;
- __le32 len;
-};
-
-/* Output (16 bytes) */
-struct hwrm_nvm_raw_write_blk_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_nvm_read */
/* Input (40 bytes) */
struct hwrm_nvm_read_input {
@@ -4132,9 +4344,9 @@ struct hwrm_nvm_find_dir_entry_input {
u8 opt_ordinal;
#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ (0x0UL << 0)
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE (0x1UL << 0)
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT (0x2UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
u8 unused_1[3];
};
@@ -4266,4 +4478,41 @@ struct hwrm_nvm_verify_update_output {
u8 valid;
};
+/* hwrm_nvm_install_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_install_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ __le32 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_install_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 50d2007a2640..ec6cd18842c3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -19,6 +19,45 @@
#include "bnxt_ethtool.h"
#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
+ struct bnxt_vf_info *vf, u16 event_id)
+{
+ struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_fwd_async_event_cmpl_input req = {0};
+ struct hwrm_async_event_cmpl *async_cmpl;
+ int rc = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+ if (vf)
+ req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+ else
+ /* broadcast this async event to all VFs */
+ req.encap_async_event_target_id = cpu_to_le16(0xffff);
+ async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+ async_cmpl->type =
+ cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+ async_cmpl->event_id = cpu_to_le16(event_id);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
+ rc);
+ goto fwd_async_event_cmpl_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_async_event_cmpl_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
{
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
@@ -135,7 +174,8 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
{
struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
@@ -146,6 +186,9 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
if (bp->hwrm_spec_code < 0x10201)
return -ENOTSUPP;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
@@ -243,8 +286,9 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
rc = -EINVAL;
break;
}
- /* CHIMP TODO: send msg to VF to update new link state */
-
+ if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
+ rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc;
}
@@ -525,46 +569,6 @@ err_out1:
return rc;
}
-static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
- struct bnxt_vf_info *vf,
- u16 event_id)
-{
- int rc = 0;
- struct hwrm_fwd_async_event_cmpl_input req = {0};
- struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_async_event_cmpl *async_cmpl;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
- if (vf)
- req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
- else
- /* broadcast this async event to all VFs */
- req.encap_async_event_target_id = cpu_to_le16(0xffff);
- async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
- async_cmpl->type =
- cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
- async_cmpl->event_id = cpu_to_le16(event_id);
-
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
- netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
- rc);
- goto fwd_async_event_cmpl_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_async_event_cmpl_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
-}
-
void bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 0392670ab49c..1ab72e4820af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -12,7 +12,7 @@
int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
int bnxt_set_vf_mac(struct net_device *, int, u8 *);
-int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int bnxt_set_vf_bw(struct net_device *, int, int, int);
int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 541456398dfb..4464bc5db934 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -450,8 +450,8 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
-static int bcmgenet_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcmgenet_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -461,11 +461,11 @@ static int bcmgenet_get_settings(struct net_device *dev,
if (!priv->phydev)
return -ENODEV;
- return phy_ethtool_gset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_get(priv->phydev, cmd);
}
-static int bcmgenet_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcmgenet_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -475,7 +475,7 @@ static int bcmgenet_set_settings(struct net_device *dev,
if (!priv->phydev)
return -ENODEV;
- return phy_ethtool_sset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_set(priv->phydev, cmd);
}
static int bcmgenet_set_rx_csum(struct net_device *dev,
@@ -979,12 +979,10 @@ static int bcmgenet_nway_reset(struct net_device *dev)
}
/* standard ethtool support functions. */
-static struct ethtool_ops bcmgenet_ethtool_ops = {
+static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_strings = bcmgenet_get_strings,
.get_sset_count = bcmgenet_get_sset_count,
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
- .get_settings = bcmgenet_get_settings,
- .set_settings = bcmgenet_set_settings,
.get_drvinfo = bcmgenet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_msglevel = bcmgenet_get_msglevel,
@@ -996,6 +994,8 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
.nway_reset = bcmgenet_nway_reset,
.get_coalesce = bcmgenet_get_coalesce,
.set_coalesce = bcmgenet_set_coalesce,
+ .get_link_ksettings = bcmgenet_get_link_ksettings,
+ .set_link_ksettings = bcmgenet_set_link_ksettings,
};
/* Power down the unimac, based on mode. */
@@ -2669,128 +2669,6 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
}
-static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
- u32 f_index)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- return !!(reg & (1 << (f_index % 32)));
-}
-
-static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg |= (1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
-}
-
-static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
- u32 f_index, u32 rx_queue)
-{
- u32 offset;
- u32 reg;
-
- offset = f_index / 8;
- reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
- reg &= ~(0xF << (4 * (f_index % 8)));
- reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
- bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
-}
-
-static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
- u32 f_index, u32 f_length)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_LEN_V3PLUS +
- ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
- sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg &= ~(0xFF << (8 * (f_index % 4)));
- reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
-}
-
-static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
-{
- u32 f_index;
-
- for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
- if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
- return f_index;
-
- return -ENOMEM;
-}
-
-/* bcmgenet_hfb_add_filter
- *
- * Add new filter to Hardware Filter Block to match and direct Rx traffic to
- * desired Rx queue.
- *
- * f_data is an array of unsigned 32-bit integers where each 32-bit integer
- * provides filter data for 2 bytes (4 nibbles) of Rx frame:
- *
- * bits 31:20 - unused
- * bit 19 - nibble 0 match enable
- * bit 18 - nibble 1 match enable
- * bit 17 - nibble 2 match enable
- * bit 16 - nibble 3 match enable
- * bits 15:12 - nibble 0 data
- * bits 11:8 - nibble 1 data
- * bits 7:4 - nibble 2 data
- * bits 3:0 - nibble 3 data
- *
- * Example:
- * In order to match:
- * - Ethernet frame type = 0x0800 (IP)
- * - IP version field = 4
- * - IP protocol field = 0x11 (UDP)
- *
- * The following filter is needed:
- * u32 hfb_filter_ipv4_udp[] = {
- * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
- * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
- * };
- *
- * To add the filter to HFB and direct the traffic to Rx queue 0, call:
- * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
- * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
- */
-int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
- u32 f_length, u32 rx_queue)
-{
- int f_index;
- u32 i;
-
- f_index = bcmgenet_hfb_find_unused_filter(priv);
- if (f_index < 0)
- return -ENOMEM;
-
- if (f_length > priv->hw_params->hfb_filter_size)
- return -EINVAL;
-
- for (i = 0; i < f_length; i++)
- bcmgenet_hfb_writel(priv, f_data[i],
- (f_index * priv->hw_params->hfb_filter_size + i) *
- sizeof(u32));
-
- bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
- bcmgenet_hfb_enable_filter(priv, f_index);
- bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
-
- return 0;
-}
-
/* bcmgenet_hfb_clear
*
* Clear Hardware Filter Block and disable all filtering.
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ea967df4b202..a927a730da10 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12079,95 +12079,107 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
return ret;
}
-static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tg3_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct tg3 *tp = netdev_priv(dev);
+ u32 supported, advertising;
if (tg3_flag(tp, USE_PHYLIB)) {
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
- return phy_ethtool_gset(phydev, cmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
}
- cmd->supported = (SUPPORTED_Autoneg);
+ supported = (SUPPORTED_Autoneg);
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
- cmd->supported |= (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full);
+ supported |= (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
- cmd->supported |= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_TP);
- cmd->port = PORT_TP;
+ supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP);
+ cmd->base.port = PORT_TP;
} else {
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
- cmd->advertising = tp->link_config.advertising;
+ advertising = tp->link_config.advertising;
if (tg3_flag(tp, PAUSE_AUTONEG)) {
if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
- cmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
} else {
- cmd->advertising |= ADVERTISED_Pause |
- ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
}
} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
- cmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
}
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
if (netif_running(dev) && tp->link_up) {
- ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
- cmd->duplex = tp->link_config.active_duplex;
- cmd->lp_advertising = tp->link_config.rmt_adv;
+ cmd->base.speed = tp->link_config.active_speed;
+ cmd->base.duplex = tp->link_config.active_duplex;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.lp_advertising,
+ tp->link_config.rmt_adv);
+
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
- cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
else
- cmd->eth_tp_mdix = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI;
}
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
- }
- cmd->phy_address = tp->phy_addr;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = tp->link_config.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ }
+ cmd->base.phy_address = tp->phy_addr;
+ cmd->base.autoneg = tp->link_config.autoneg;
return 0;
}
-static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tg3_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct tg3 *tp = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
+ u32 advertising;
if (tg3_flag(tp, USE_PHYLIB)) {
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
- return phy_ethtool_sset(phydev, cmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
- cmd->duplex != DUPLEX_FULL &&
- cmd->duplex != DUPLEX_HALF)
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
+ cmd->base.duplex != DUPLEX_FULL &&
+ cmd->base.duplex != DUPLEX_HALF)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
u32 mask = ADVERTISED_Autoneg |
ADVERTISED_Pause |
ADVERTISED_Asym_Pause;
@@ -12185,7 +12197,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
else
mask |= ADVERTISED_FIBRE;
- if (cmd->advertising & ~mask)
+ if (advertising & ~mask)
return -EINVAL;
mask &= (ADVERTISED_1000baseT_Half |
@@ -12195,13 +12207,13 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full);
- cmd->advertising &= mask;
+ advertising &= mask;
} else {
if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
if (speed != SPEED_1000)
return -EINVAL;
- if (cmd->duplex != DUPLEX_FULL)
+ if (cmd->base.duplex != DUPLEX_FULL)
return -EINVAL;
} else {
if (speed != SPEED_100 &&
@@ -12212,16 +12224,16 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
tg3_full_lock(tp, 0);
- tp->link_config.autoneg = cmd->autoneg;
- if (cmd->autoneg == AUTONEG_ENABLE) {
- tp->link_config.advertising = (cmd->advertising |
+ tp->link_config.autoneg = cmd->base.autoneg;
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ tp->link_config.advertising = (advertising |
ADVERTISED_Autoneg);
tp->link_config.speed = SPEED_UNKNOWN;
tp->link_config.duplex = DUPLEX_UNKNOWN;
} else {
tp->link_config.advertising = 0;
tp->link_config.speed = speed;
- tp->link_config.duplex = cmd->duplex;
+ tp->link_config.duplex = cmd->base.duplex;
}
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
@@ -14094,8 +14106,6 @@ static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
}
static const struct ethtool_ops tg3_ethtool_ops = {
- .get_settings = tg3_get_settings,
- .set_settings = tg3_set_settings,
.get_drvinfo = tg3_get_drvinfo,
.get_regs_len = tg3_get_regs_len,
.get_regs = tg3_get_regs,
@@ -14128,6 +14138,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_ts_info = tg3_get_ts_info,
.get_eee = tg3_get_eee,
.set_eee = tg3_set_eee,
+ .get_link_ksettings = tg3_get_link_ksettings,
+ .set_link_ksettings = tg3_set_link_ksettings,
};
static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 771cc267f217..f9df4b5ae90e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -54,9 +54,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
* Global variables
*/
static u32 bnad_rxqs_per_cq = 2;
-static u32 bna_id;
-static struct mutex bnad_list_mutex;
-static LIST_HEAD(bnad_list);
+static atomic_t bna_id;
static const u8 bnad_bcast_addr[] __aligned(2) =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -76,23 +74,6 @@ do { \
(_res_info)->res_u.mem_info.len = (_size); \
} while (0)
-static void
-bnad_add_to_list(struct bnad *bnad)
-{
- mutex_lock(&bnad_list_mutex);
- list_add_tail(&bnad->list_entry, &bnad_list);
- bnad->id = bna_id++;
- mutex_unlock(&bnad_list_mutex);
-}
-
-static void
-bnad_remove_from_list(struct bnad *bnad)
-{
- mutex_lock(&bnad_list_mutex);
- list_del(&bnad->list_entry);
- mutex_unlock(&bnad_list_mutex);
-}
-
/*
* Reinitialize completions in CQ, once Rx is taken down
*/
@@ -3573,14 +3554,12 @@ bnad_lock_init(struct bnad *bnad)
{
spin_lock_init(&bnad->bna_lock);
mutex_init(&bnad->conf_mutex);
- mutex_init(&bnad_list_mutex);
}
static void
bnad_lock_uninit(struct bnad *bnad)
{
mutex_destroy(&bnad->conf_mutex);
- mutex_destroy(&bnad_list_mutex);
}
/* PCI Initialization */
@@ -3653,7 +3632,7 @@ bnad_pci_probe(struct pci_dev *pdev,
}
bnad = netdev_priv(netdev);
bnad_lock_init(bnad);
- bnad_add_to_list(bnad);
+ bnad->id = atomic_inc_return(&bna_id) - 1;
mutex_lock(&bnad->conf_mutex);
/*
@@ -3807,7 +3786,6 @@ pci_uninit:
bnad_pci_uninit(pdev);
unlock_mutex:
mutex_unlock(&bnad->conf_mutex);
- bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
free_netdev(netdev);
return err;
@@ -3845,7 +3823,6 @@ bnad_pci_remove(struct pci_dev *pdev)
bnad_disable_msix(bnad);
bnad_pci_uninit(pdev);
mutex_unlock(&bnad->conf_mutex);
- bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
/* Remove the debugfs node for this bnad */
kfree(bnad->regdata);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index f4ed816b93ee..46f7b842b39c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -288,7 +288,6 @@ struct bnad_rx_unmap_q {
struct bnad {
struct net_device *netdev;
u32 id;
- struct list_head list_entry;
/* Data path */
struct bnad_tx_info tx_info[BNAD_MAX_TX];
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index d954a97b0b0b..63144bb413d1 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -541,6 +541,14 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
}
}
+static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
+{
+ desc->addr = (u32)addr;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ desc->addrh = (u32)(addr >> 32);
+#endif
+}
+
static void macb_tx_error_task(struct work_struct *work)
{
struct macb_queue *queue = container_of(work, struct macb_queue,
@@ -621,14 +629,17 @@ static void macb_tx_error_task(struct work_struct *work)
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
- desc->addr = 0;
+ macb_set_addr(desc, 0);
desc->ctrl = MACB_BIT(TX_USED);
/* Make descriptor updates visible to hardware */
wmb();
/* Reinitialize the TX desc queue */
- queue_writel(queue, TBQP, queue->tx_ring_dma);
+ queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
queue->tx_tail = 0;
@@ -750,7 +761,7 @@ static void gem_rx_refill(struct macb *bp)
if (entry == RX_RING_SIZE - 1)
paddr |= MACB_BIT(RX_WRAP);
- bp->rx_ring[entry].addr = paddr;
+ macb_set_addr(&(bp->rx_ring[entry]), paddr);
bp->rx_ring[entry].ctrl = 0;
/* properly align Ethernet header */
@@ -798,7 +809,9 @@ static int gem_rx(struct macb *bp, int budget)
int count = 0;
while (count < budget) {
- u32 addr, ctrl;
+ u32 ctrl;
+ dma_addr_t addr;
+ bool rxused;
entry = macb_rx_ring_wrap(bp->rx_tail);
desc = &bp->rx_ring[entry];
@@ -806,10 +819,14 @@ static int gem_rx(struct macb *bp, int budget)
/* Make hw descriptor updates visible to CPU */
rmb();
- addr = desc->addr;
+ rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
+ addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ addr |= ((u64)(desc->addrh) << 32);
+#endif
ctrl = desc->ctrl;
- if (!(addr & MACB_BIT(RX_USED)))
+ if (!rxused)
break;
bp->rx_tail++;
@@ -835,7 +852,6 @@ static int gem_rx(struct macb *bp, int budget)
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
skb_put(skb, len);
- addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
dma_unmap_single(&bp->pdev->dev, addr,
bp->rx_buffer_size, DMA_FROM_DEVICE);
@@ -1299,7 +1315,7 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BIT(TX_WRAP);
/* Set TX buffer descriptor */
- desc->addr = tx_skb->mapping;
+ macb_set_addr(desc, tx_skb->mapping);
/* desc->addr must be visible to hardware before clearing
* 'TX_USED' bit in desc->ctrl.
*/
@@ -1382,7 +1398,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (macb_clear_csum(skb)) {
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ goto unlock;
}
/* Map socket buffer for DMA transfer */
@@ -1445,6 +1461,9 @@ static void gem_free_rx_buffers(struct macb *bp)
desc = &bp->rx_ring[i];
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ addr |= ((u64)(desc->addrh) << 32);
+#endif
dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
@@ -1570,7 +1589,7 @@ static void gem_init_rings(struct macb *bp)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
for (i = 0; i < TX_RING_SIZE; i++) {
- queue->tx_ring[i].addr = 0;
+ macb_set_addr(&(queue->tx_ring[i]), 0);
queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
}
queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
@@ -1717,6 +1736,10 @@ static void macb_configure_dma(struct macb *bp)
dmacfg |= GEM_BIT(TXCOEN);
else
dmacfg &= ~GEM_BIT(TXCOEN);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ dmacfg |= GEM_BIT(ADDR64);
+#endif
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
dmacfg);
gem_writel(bp, DMACFG, dmacfg);
@@ -1762,9 +1785,15 @@ static void macb_init_hw(struct macb *bp)
macb_configure_dma(bp);
/* Initialize TX and RX buffers */
- macb_writel(bp, RBQP, bp->rx_ring_dma);
+ macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
+#endif
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, TBQP, queue->tx_ring_dma);
+ queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+#endif
/* Enable interrupts */
queue_writel(queue, IER,
@@ -2326,7 +2355,8 @@ static void macb_probe_queues(void __iomem *mem,
}
static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
- struct clk **hclk, struct clk **tx_clk)
+ struct clk **hclk, struct clk **tx_clk,
+ struct clk **rx_clk)
{
int err;
@@ -2348,6 +2378,10 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
if (IS_ERR(*tx_clk))
*tx_clk = NULL;
+ *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
+ if (IS_ERR(*rx_clk))
+ *rx_clk = NULL;
+
err = clk_prepare_enable(*pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
@@ -2366,8 +2400,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
goto err_disable_hclk;
}
+ err = clk_prepare_enable(*rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+ goto err_disable_txclk;
+ }
+
return 0;
+err_disable_txclk:
+ clk_disable_unprepare(*tx_clk);
+
err_disable_hclk:
clk_disable_unprepare(*hclk);
@@ -2402,6 +2445,9 @@ static int macb_init(struct platform_device *pdev)
queue->IDR = GEM_IDR(hw_q - 1);
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ queue->TBQPH = GEM_TBQPH(hw_q -1);
+#endif
} else {
/* queue0 uses legacy registers */
queue->ISR = MACB_ISR;
@@ -2409,6 +2455,9 @@ static int macb_init(struct platform_device *pdev)
queue->IDR = MACB_IDR;
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ queue->TBQPH = MACB_TBQPH;
+#endif
}
/* get irq: here we use the linux queue index, not the hardware
@@ -2751,12 +2800,14 @@ static const struct net_device_ops at91ether_netdev_ops = {
};
static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
- struct clk **hclk, struct clk **tx_clk)
+ struct clk **hclk, struct clk **tx_clk,
+ struct clk **rx_clk)
{
int err;
*hclk = NULL;
*tx_clk = NULL;
+ *rx_clk = NULL;
*pclk = devm_clk_get(&pdev->dev, "ether_clk");
if (IS_ERR(*pclk))
@@ -2880,13 +2931,13 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
static int macb_probe(struct platform_device *pdev)
{
int (*clk_init)(struct platform_device *, struct clk **,
- struct clk **, struct clk **)
+ struct clk **, struct clk **, struct clk **)
= macb_clk_init;
int (*init)(struct platform_device *) = macb_init;
struct device_node *np = pdev->dev.of_node;
struct device_node *phy_node;
const struct macb_config *macb_config = NULL;
- struct clk *pclk, *hclk = NULL, *tx_clk = NULL;
+ struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
bool native_io;
@@ -2914,7 +2965,7 @@ static int macb_probe(struct platform_device *pdev)
}
}
- err = clk_init(pdev, &pclk, &hclk, &tx_clk);
+ err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
if (err)
return err;
@@ -2950,6 +3001,7 @@ static int macb_probe(struct platform_device *pdev)
bp->pclk = pclk;
bp->hclk = hclk;
bp->tx_clk = tx_clk;
+ bp->rx_clk = rx_clk;
if (macb_config)
bp->jumbo_max_len = macb_config->jumbo_max_len;
@@ -2958,6 +3010,11 @@ static int macb_probe(struct platform_device *pdev)
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+#endif
+
spin_lock_init(&bp->lock);
/* setup capabilities */
@@ -2968,7 +3025,7 @@ static int macb_probe(struct platform_device *pdev)
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0) {
err = dev->irq;
- goto err_disable_clocks;
+ goto err_out_free_netdev;
}
mac = of_get_mac_address(np);
@@ -3043,6 +3100,7 @@ err_disable_clocks:
clk_disable_unprepare(tx_clk);
clk_disable_unprepare(hclk);
clk_disable_unprepare(pclk);
+ clk_disable_unprepare(rx_clk);
return err;
}
@@ -3069,6 +3127,7 @@ static int macb_remove(struct platform_device *pdev)
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
+ clk_disable_unprepare(bp->rx_clk);
free_netdev(dev);
}
@@ -3092,6 +3151,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
+ clk_disable_unprepare(bp->rx_clk);
}
return 0;
@@ -3111,6 +3171,7 @@ static int __maybe_unused macb_resume(struct device *dev)
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
+ clk_prepare_enable(bp->rx_clk);
}
netif_device_attach(netdev);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index b6fcf10621b6..8bed4b52fef5 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -66,6 +66,8 @@
#define MACB_USRIO 0x00c0
#define MACB_WOL 0x00c4
#define MACB_MID 0x00fc
+#define MACB_TBQPH 0x04C8
+#define MACB_RBQPH 0x04D4
/* GEM register offsets. */
#define GEM_NCFGR 0x0004 /* Network Config */
@@ -139,6 +141,7 @@
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
+#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
@@ -249,6 +252,8 @@
#define GEM_RXBS_SIZE 8
#define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */
#define GEM_DDRP_SIZE 1
+#define GEM_ADDR64_OFFSET 30 /* Address bus width - 64b or 32b */
+#define GEM_ADDR64_SIZE 1
/* Bitfields in NSR */
@@ -474,6 +479,10 @@
struct macb_dma_desc {
u32 addr;
u32 ctrl;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ u32 addrh;
+ u32 resvd;
+#endif
};
/* DMA descriptor bitfields */
@@ -763,7 +772,8 @@ struct macb_config {
u32 caps;
unsigned int dma_burst_length;
int (*clk_init)(struct platform_device *pdev, struct clk **pclk,
- struct clk **hclk, struct clk **tx_clk);
+ struct clk **hclk, struct clk **tx_clk,
+ struct clk **rx_clk);
int (*init)(struct platform_device *pdev);
int jumbo_max_len;
};
@@ -777,6 +787,7 @@ struct macb_queue {
unsigned int IDR;
unsigned int IMR;
unsigned int TBQP;
+ unsigned int TBQPH;
unsigned int tx_head, tx_tail;
struct macb_dma_desc *tx_ring;
@@ -809,6 +820,7 @@ struct macb {
struct clk *pclk;
struct clk *hclk;
struct clk *tx_clk;
+ struct clk *rx_clk;
struct net_device *dev;
struct napi_struct napi;
struct net_device_stats stats;
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 0ef232d3331e..92f411c9f0df 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -36,10 +36,20 @@ config THUNDER_NIC_BGX
depends on 64BIT
select PHYLIB
select MDIO_THUNDER
+ select THUNDER_NIC_RGX
---help---
This driver supports programming and controlling of MAC
interface from NIC physical function driver.
+config THUNDER_NIC_RGX
+ tristate "Thunder MAC interface driver (RGX)"
+ depends on 64BIT
+ select PHYLIB
+ select MDIO_THUNDER
+ ---help---
+ This driver supports configuring XCV block of RGX interface
+ present on CN81XX chip.
+
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT
@@ -48,7 +58,7 @@ config LIQUIDIO
select LIBCRC32C
---help---
This driver supports Cavium LiquidIO Intelligent Server Adapters
- based on CN66XX and CN68XX chips.
+ based on CN66XX, CN68XX and CN23XX chips.
To compile this driver as a module, choose M here: the module
will be called liquidio. This is recommended.
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile
index 2f366806835d..5a27b2a44039 100644
--- a/drivers/net/ethernet/cavium/liquidio/Makefile
+++ b/drivers/net/ethernet/cavium/liquidio/Makefile
@@ -3,14 +3,16 @@
#
obj-$(CONFIG_LIQUIDIO) += liquidio.o
-liquidio-objs := lio_main.o \
- lio_ethtool.o \
- request_manager.o \
- response_manager.o \
- octeon_device.o \
- cn66xx_device.o \
- cn68xx_device.o \
- octeon_mem_ops.o \
- octeon_droq.o \
- octeon_console.o \
- octeon_nic.o
+liquidio-$(CONFIG_LIQUIDIO) += lio_ethtool.o \
+ lio_core.o \
+ request_manager.o \
+ response_manager.o \
+ octeon_device.o \
+ cn66xx_device.o \
+ cn68xx_device.o \
+ cn23xx_pf_device.o \
+ octeon_mem_ops.o \
+ octeon_droq.o \
+ octeon_nic.o
+
+liquidio-objs := lio_main.o octeon_console.o $(liquidio-y)
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
new file mode 100644
index 000000000000..bddb198c0b74
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -0,0 +1,1237 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "cn23xx_pf_device.h"
+#include "octeon_main.h"
+
+#define RESET_NOTDONE 0
+#define RESET_DONE 1
+
+/* Change the value of SLI Packet Input Jabber Register to allow
+ * VXLAN TSO packets which can be 64424 bytes, exceeding the
+ * MAX_GSO_SIZE we supplied to the kernel
+ */
+#define CN23XX_INPUT_JABBER 64600
+
+#define LIOLUT_RING_DISTRIBUTION 9
+const int liolut_num_vfs_to_rings_per_vf[LIOLUT_RING_DISTRIBUTION] = {
+ 0, 8, 4, 2, 2, 2, 1, 1, 1
+};
+
+void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
+{
+ int i = 0;
+ u32 regval = 0;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ /*In cn23xx_soft_reset*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
+ "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST,
+ lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
+
+ /*In cn23xx_set_dpi_regs*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL,
+ lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
+
+ for (i = 0; i < 6; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_ENG_ENB", i,
+ CN23XX_DPI_DMA_ENG_ENB(i),
+ lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_ENG_BUF", i,
+ CN23XX_DPI_DMA_ENG_BUF(i),
+ lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
+ CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
+
+ /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_CONFIG_PCIE_DEVCTL",
+ CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval));
+
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
+ CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
+ lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
+
+ /*In cn23xx_specific_regs_setup */
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
+ CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST),
+ (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+
+ /*In cn23xx_setup_global_mac_regs*/
+ for (i = 0; i < CN23XX_MAX_MACS; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_MAC_RINFO64", i,
+ CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_PKT_MAC_RINFO64
+ (i, oct->pf_num))));
+ }
+
+ /*In cn23xx_setup_global_input_regs*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_PKT_CONTROL64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
+ }
+
+ /*In cn23xx_setup_global_output_regs*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
+
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKT_CONTROL", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)),
+ CVM_CAST64(octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKT_INT_LEVELS", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
+ }
+
+ /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "cn23xx->intr_enb_reg64",
+ CVM_CAST64((long)(cn23xx->intr_enb_reg64)),
+ CVM_CAST64(readq(cn23xx->intr_enb_reg64)));
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "cn23xx->intr_sum_reg64",
+ CVM_CAST64((long)(cn23xx->intr_sum_reg64)),
+ CVM_CAST64(readq(cn23xx->intr_sum_reg64)));
+
+ /*In cn23xx_setup_iq_regs*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_BASE_ADDR64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)),
+ CVM_CAST64(octeon_read_csr
+ (oct, CN23XX_SLI_IQ_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_DOORBELL", i,
+ CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_DOORBELL(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_INSTR_COUNT64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
+ }
+
+ /*In cn23xx_setup_oq_regs*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_BASE_ADDR64", i,
+ CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)),
+ CVM_CAST64(octeon_read_csr
+ (oct, CN23XX_SLI_OQ_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)),
+ CVM_CAST64(octeon_read_csr(
+ oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKTS_SENT", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKTS_CREDIT", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_TIME_INT",
+ CVM_CAST64(CN23XX_SLI_PKT_TIME_INT),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_CNT_INT",
+ CVM_CAST64(CN23XX_SLI_PKT_CNT_INT),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
+}
+
+static int cn23xx_pf_soft_reset(struct octeon_device *oct)
+{
+ octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: BIST enabled for CN23XX soft reset\n",
+ oct->octeon_id);
+
+ octeon_write_csr64(oct, CN23XX_SLI_SCRATCH1, 0x1234ULL);
+
+ /* Initiate chip-wide soft reset */
+ lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
+ lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
+
+ /* Wait for 100ms as Octeon resets. */
+ mdelay(100);
+
+ if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) {
+ dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
+ oct->octeon_id);
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
+ oct->octeon_id);
+
+ /* restore the reset value*/
+ octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
+
+ return 0;
+}
+
+static void cn23xx_enable_error_reporting(struct octeon_device *oct)
+{
+ u32 regval;
+ u32 uncorrectable_err_mask, corrtable_err_status;
+
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ if (regval & CN23XX_CONFIG_PCIE_DEVCTL_MASK) {
+ uncorrectable_err_mask = 0;
+ corrtable_err_status = 0;
+ pci_read_config_dword(oct->pci_dev,
+ CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK,
+ &uncorrectable_err_mask);
+ pci_read_config_dword(oct->pci_dev,
+ CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS,
+ &corrtable_err_status);
+ dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n"
+ "\tdev_ctl_status_reg = 0x%08x\n"
+ "\tuncorrectable_error_mask_reg = 0x%08x\n"
+ "\tcorrectable_error_status_reg = 0x%08x\n",
+ regval, uncorrectable_err_mask,
+ corrtable_err_status);
+ }
+
+ regval |= 0xf; /* Enable Link error reporting */
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n",
+ oct->octeon_id);
+ pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval);
+}
+
+static u32 cn23xx_coprocessor_clock(struct octeon_device *oct)
+{
+ /* Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER
+ * for SLI.
+ */
+
+ /* TBD: get the info in Hand-shake */
+ return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50);
+}
+
+u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
+{
+ /* This gives the SLI clock per microsec */
+ u32 oqticks_per_us = cn23xx_coprocessor_clock(oct);
+
+ oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us;
+
+ /* This gives the clock cycles per millisecond */
+ oqticks_per_us *= 1000;
+
+ /* This gives the oq ticks (1024 core clock cycles) per millisecond */
+ oqticks_per_us /= 1024;
+
+ /* time_intr is in microseconds. The next 2 steps gives the oq ticks
+ * corressponding to time_intr.
+ */
+ oqticks_per_us *= time_intr_in_us;
+ oqticks_per_us /= 1000;
+
+ return oqticks_per_us;
+}
+
+static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
+{
+ u64 reg_val;
+ u16 mac_no = oct->pcie_port;
+ u16 pf_num = oct->pf_num;
+
+ /* programming SRN and TRS for each MAC(0..3) */
+
+ dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
+ __func__, mac_no);
+ /* By default, mapping all 64 IOQs to a single MACs */
+
+ reg_val =
+ octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
+
+ if (oct->rev_id == OCTEON_CN23XX_REV_1_1) {
+ /* setting SRN <6:0> */
+ reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+ } else {
+ /* setting SRN <6:0> */
+ reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF;
+ }
+
+ /* setting TRS <23:16> */
+ reg_val = reg_val |
+ (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);
+ /* write these settings to MAC register */
+ octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
+ reg_val);
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n",
+ mac_no, pf_num, (u64)octeon_read_csr64
+ (oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)));
+}
+
+static int cn23xx_reset_io_queues(struct octeon_device *oct)
+{
+ int ret_val = 0;
+ u64 d64;
+ u32 q_no, srn, ern;
+ u32 loop = 1000;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ /*As per HRM reg description, s/w cant write 0 to ENB. */
+ /*to make the queue off, need to set the RST bit. */
+
+ /* Reset the Enable bit for all the 64 IQs. */
+ for (q_no = srn; q_no < ern; q_no++) {
+ /* set RST bit to 1. This bit applies to both IQ and OQ */
+ d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
+ }
+
+ /*wait until the RST bit is clear or the RST and quite bits are set*/
+ for (q_no = srn; q_no < ern; q_no++) {
+ u64 reg_val = octeon_read_csr64(oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop--) {
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
+ ~CN23XX_PKT_INPUT_CTL_RST);
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ READ_ONCE(reg_val));
+
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset failed for qno: %u\n",
+ q_no);
+ ret_val = -1;
+ }
+ }
+
+ return ret_val;
+}
+
+static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
+{
+ u32 q_no, ern, srn;
+ u64 pf_num;
+ u64 intr_threshold, reg_val;
+ struct octeon_instr_queue *iq;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ pf_num = oct->pf_num;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ if (cn23xx_reset_io_queues(oct))
+ return -1;
+
+ /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+ * for all queues.Only PF can set these bits.
+ * bits 29:30 indicate the MAC num.
+ * bits 32:47 indicate the PVF num.
+ */
+ for (q_no = 0; q_no < ern; q_no++) {
+ reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
+ reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS;
+
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+ }
+
+ /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ * pf queues
+ */
+ for (q_no = srn; q_no < ern; q_no++) {
+ void __iomem *inst_cnt_reg;
+
+ iq = oct->instr_queue[q_no];
+ if (iq)
+ inst_cnt_reg = iq->inst_cnt_reg;
+ else
+ inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_IQ_INSTR_COUNT64(q_no);
+
+ reg_val =
+ octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+
+ reg_val |= CN23XX_PKT_INPUT_CTL_MASK;
+
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ /* Set WMARK level for triggering PI_INT */
+ /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
+ intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+
+ writeq((readq(inst_cnt_reg) &
+ ~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
+ CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
+ (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
+ inst_cnt_reg);
+ }
+ return 0;
+}
+
+static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
+{
+ u32 reg_val;
+ u32 q_no, ern, srn;
+ u64 time_threshold;
+
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ if (CFG_GET_IS_SLI_BP_ON(cn23xx->conf)) {
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32);
+ } else {
+ /** Set Output queue watermark to 0 to disable backpressure */
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0);
+ }
+
+ for (q_no = srn; q_no < ern; q_no++) {
+ reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+
+ /* set IPTR & DPTR */
+ reg_val |=
+ (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+
+ /* reset BMODE */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue ScatterList
+ * reset ROR_P, NSR_P
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
+#else
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
+#endif
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Data
+ * reset ROR, NSR
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
+ /* set the ES bit */
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
+
+ /* write all the selected settings */
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
+
+ /* Enabling these interrupt in oct->fn_list.enable_interrupt()
+ * routine which called after IOQ init.
+ * Set up interrupt packet and time thresholds
+ * for all the OQs
+ */
+ time_threshold = cn23xx_pf_get_oq_ticks(
+ oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
+ (time_threshold << 32)));
+ }
+
+ /** Setting the water mark level for pko back pressure **/
+ writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
+
+ /** Disabling setting OQs in reset when ring has no dorebells
+ * enabling this will cause of head of line blocking
+ */
+ /* Do it only for pass1.1. and pass1.2 */
+ if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) ||
+ (oct->rev_id == OCTEON_CN23XX_REV_1_1))
+ writeq(readq((u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_GBL_CONTROL) | 0x2,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
+
+ /** Enable channel-level backpressure */
+ if (oct->pf_num)
+ writeq(0xffffffffffffffffULL,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
+ else
+ writeq(0xffffffffffffffffULL,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S);
+}
+
+static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
+{
+ cn23xx_enable_error_reporting(oct);
+
+ /* program the MAC(0..3)_RINFO before setting up input/output regs */
+ cn23xx_setup_global_mac_regs(oct);
+
+ if (cn23xx_pf_setup_global_input_regs(oct))
+ return -1;
+
+ cn23xx_pf_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
+ CN23XX_SLI_WINDOW_CTL_DEFAULT);
+
+ /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+ octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
+ return 0;
+}
+
+static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+ u64 pkt_in_done;
+
+ iq_no += oct->sriov_info.pf_srn;
+
+ /* Write the start of the input queue's ring and its size */
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
+ dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter (used in flush_iq
+ * calculation)
+ */
+ pkt_in_done = readq(iq->inst_cnt_reg);
+
+ if (oct->msix_on) {
+ /* Set CINT_ENB to enable IQ interrupt */
+ writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
+ iq->inst_cnt_reg);
+ } else {
+ /* Clear the count by writing back what we read, but don't
+ * enable interrupts
+ */
+ writeq(pkt_in_done, iq->inst_cnt_reg);
+ }
+
+ iq->reset_instr_cnt = 0;
+}
+
+static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+ u32 reg_val;
+ struct octeon_droq *droq = oct->droq[oq_no];
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 time_threshold;
+ u64 cnt_threshold;
+
+ oq_no += oct->sriov_info.pf_srn;
+
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ (droq->buffer_size | (OCT_RH_SIZE << 16)));
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
+
+ if (!oct->msix_on) {
+ /* Enable this output queue to generate Packet Timer Interrupt
+ */
+ reg_val =
+ octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_TENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
+ reg_val);
+
+ /* Enable this output queue to generate Packet Count Interrupt
+ */
+ reg_val =
+ octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_CENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
+ reg_val);
+ } else {
+ time_threshold = cn23xx_pf_get_oq_ticks(
+ oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+ cnt_threshold = (u32)CFG_GET_OQ_INTR_PKT(cn23xx->conf);
+
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no),
+ ((time_threshold << 32 | cnt_threshold)));
+ }
+}
+
+static int cn23xx_enable_io_queues(struct octeon_device *oct)
+{
+ u64 reg_val;
+ u32 srn, ern, q_no;
+ u32 loop = 1000;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->num_iqs;
+
+ for (q_no = srn; q_no < ern; q_no++) {
+ /* set the corresponding IQ IS_64B bit */
+ if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+
+ /* set the corresponding IQ ENB bit */
+ if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
+ /* IOQs are in reset by default in PEM2 mode,
+ * clearing reset bit
+ */
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(reg_val &
+ CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop--) {
+ reg_val = octeon_read_csr64(
+ oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ }
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+ }
+ for (q_no = srn; q_no < ern; q_no++) {
+ u32 reg_val;
+ /* set the corresponding OQ ENB bit */
+ if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
+ reg_val = octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+ reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+ }
+ return 0;
+}
+
+static void cn23xx_disable_io_queues(struct octeon_device *oct)
+{
+ int q_no, loop;
+ u64 d64;
+ u32 d32;
+ u32 srn, ern;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->num_iqs;
+
+ /*** Disable Input Queues. ***/
+ for (q_no = srn; q_no < ern; q_no++) {
+ loop = HZ;
+
+ /* start the Reset for a particular ring */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ WRITE_ONCE(d64, READ_ONCE(d64) &
+ (~(CN23XX_PKT_INPUT_CTL_RING_ENB)));
+ WRITE_ONCE(d64, READ_ONCE(d64) | CN23XX_PKT_INPUT_CTL_RST);
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ READ_ONCE(d64));
+
+ /* Wait until hardware indicates that the particular IQ
+ * is out of reset.
+ */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for this Input Queue. */
+ octeon_write_csr(oct, CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF);
+ while (octeon_read_csr64(oct, CN23XX_SLI_IQ_DOORBELL(q_no)) &&
+ loop--) {
+ schedule_timeout_uninterruptible(1);
+ }
+ }
+
+ /*** Disable Output Queues. ***/
+ for (q_no = srn; q_no < ern; q_no++) {
+ loop = HZ;
+
+ /* Wait until hardware indicates that the particular IQ
+ * is out of reset.It given that SLI_PKT_RING_RST is
+ * common for both IQs and OQs
+ */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for this Output Queue. */
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
+ 0xFFFFFFFF);
+ while (octeon_read_csr64(oct,
+ CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) &&
+ loop--) {
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+ WRITE_ONCE(d32, octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
+ READ_ONCE(d32));
+ }
+}
+
+static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
+{
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ u64 pkts_sent;
+ u64 ret = 0;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+
+ dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+
+ if (!droq) {
+ dev_err(&oct->pci_dev->dev, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n",
+ oct->pf_num, ioq_vector->ioq_num);
+ return 0;
+ }
+
+ pkts_sent = readq(droq->pkts_sent_reg);
+
+ /* If our device has interrupted, then proceed. Also check
+ * for all f's if interrupt was triggered on an error
+ * and the PCI read fails.
+ */
+ if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
+ return ret;
+
+ /* Write count reg in sli_pkt_cnts to clear these int.*/
+ if ((pkts_sent & CN23XX_INTR_PO_INT) ||
+ (pkts_sent & CN23XX_INTR_PI_INT)) {
+ if (pkts_sent & CN23XX_INTR_PO_INT)
+ ret |= MSIX_PO_INT;
+ }
+
+ if (pkts_sent & CN23XX_INTR_PI_INT)
+ /* We will clear the count when we update the read_index. */
+ ret |= MSIX_PI_INT;
+
+ /* Never need to handle msix mbox intr for pf. They arrive on the last
+ * msix
+ */
+ return ret;
+}
+
+static irqreturn_t cn23xx_interrupt_handler(void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr64;
+
+ dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+ intr64 = readq(cn23xx->intr_sum_reg64);
+
+ oct->int_status = 0;
+
+ if (intr64 & CN23XX_INTR_ERR)
+ dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n",
+ oct->octeon_id, CVM_CAST64(intr64));
+
+ if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
+ if (intr64 & CN23XX_INTR_PKT_DATA)
+ oct->int_status |= OCT_DEV_INTR_PKT_DATA;
+ }
+
+ if (intr64 & (CN23XX_INTR_DMA0_FORCE))
+ oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
+ if (intr64 & (CN23XX_INTR_DMA1_FORCE))
+ oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
+
+ /* Clear the current interrupts */
+ writeq(intr64, cn23xx->intr_sum_reg64);
+
+ return IRQ_HANDLED;
+}
+
+static void cn23xx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
+ u32 idx, int valid)
+{
+ u64 bar1;
+ u64 reg_adr;
+
+ if (!valid) {
+ reg_adr = lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ WRITE_ONCE(bar1, reg_adr);
+ lio_pci_writeq(oct, (READ_ONCE(bar1) & 0xFFFFFFFEULL),
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ reg_adr = lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ WRITE_ONCE(bar1, reg_adr);
+ return;
+ }
+
+ /* The PEM(0..3)_BAR1_INDEX(0..15)[ADDR_IDX]<23:4> stores
+ * bits <41:22> of the Core Addr
+ */
+ lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+
+ WRITE_ONCE(bar1, lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)));
+}
+
+static void cn23xx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask)
+{
+ lio_pci_writeq(oct, mask,
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+}
+
+static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
+{
+ return (u32)lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+}
+
+/* always call with lock held */
+static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
+{
+ u32 new_idx;
+ u32 last_done;
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index. The iq->reset_instr_cnt is always zero for
+ * cn23xx, so no extra adjustments are needed.
+ */
+ new_idx = (iq->octeon_read_index +
+ (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
+ iq->max_count;
+
+ return new_idx;
+}
+
+static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr_val = 0;
+
+ /* Divide the single write to multiple writes based on the flag. */
+ /* Enable Interrupt */
+ if (intr_flag == OCTEON_ALL_INTR) {
+ writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
+ } else if (intr_flag & OCTEON_OUTPUT_INTR) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val |= CN23XX_INTR_PKT_DATA;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ }
+}
+
+static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr_val = 0;
+
+ /* Disable Interrupts */
+ if (intr_flag == OCTEON_ALL_INTR) {
+ writeq(0, cn23xx->intr_enb_reg64);
+ } else if (intr_flag & OCTEON_OUTPUT_INTR) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val &= ~CN23XX_INTR_PKT_DATA;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ }
+}
+
+static void cn23xx_get_pcie_qlmport(struct octeon_device *oct)
+{
+ oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n",
+ oct->pcie_port);
+}
+
+static void cn23xx_get_pf_num(struct octeon_device *oct)
+{
+ u32 fdl_bit = 0;
+
+ /** Read Function Dependency Link reg to get the function number */
+ pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, &fdl_bit);
+ oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
+ CN23XX_PCIE_SRIOV_FDL_MASK);
+}
+
+static void cn23xx_setup_reg_address(struct octeon_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ oct->reg_list.pci_win_wr_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_HI);
+ oct->reg_list.pci_win_wr_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_LO);
+ oct->reg_list.pci_win_wr_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR64);
+
+ oct->reg_list.pci_win_rd_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_HI);
+ oct->reg_list.pci_win_rd_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_LO);
+ oct->reg_list.pci_win_rd_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR64);
+
+ oct->reg_list.pci_win_wr_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_HI);
+ oct->reg_list.pci_win_wr_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_LO);
+ oct->reg_list.pci_win_wr_data =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA64);
+
+ oct->reg_list.pci_win_rd_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_HI);
+ oct->reg_list.pci_win_rd_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_LO);
+ oct->reg_list.pci_win_rd_data =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA64);
+
+ cn23xx_get_pcie_qlmport(oct);
+
+ cn23xx->intr_mask64 = CN23XX_INTR_MASK;
+ if (!oct->msix_on)
+ cn23xx->intr_mask64 |= CN23XX_INTR_PKT_TIME;
+ if (oct->rev_id >= OCTEON_CN23XX_REV_1_1)
+ cn23xx->intr_mask64 |= CN23XX_INTR_VF_MBOX;
+
+ cn23xx->intr_sum_reg64 =
+ bar0_pciaddr +
+ CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
+ cn23xx->intr_enb_reg64 =
+ bar0_pciaddr +
+ CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
+}
+
+static int cn23xx_sriov_config(struct octeon_device *oct)
+{
+ u32 total_rings;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ /* num_vfs is already filled for us */
+ u32 pf_srn, num_pf_rings;
+
+ cn23xx->conf =
+ (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
+ switch (oct->rev_id) {
+ case OCTEON_CN23XX_REV_1_0:
+ total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0;
+ break;
+ case OCTEON_CN23XX_REV_1_1:
+ total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+ break;
+ default:
+ total_rings = CN23XX_MAX_RINGS_PER_PF;
+ break;
+ }
+ if (!oct->sriov_info.num_pf_rings) {
+ if (total_rings > num_present_cpus())
+ num_pf_rings = num_present_cpus();
+ else
+ num_pf_rings = total_rings;
+ } else {
+ num_pf_rings = oct->sriov_info.num_pf_rings;
+
+ if (num_pf_rings > total_rings) {
+ dev_warn(&oct->pci_dev->dev,
+ "num_queues_per_pf requested %u is more than available rings. Reducing to %u\n",
+ num_pf_rings, total_rings);
+ num_pf_rings = total_rings;
+ }
+ }
+
+ total_rings = num_pf_rings;
+ /* the first ring of the pf */
+ pf_srn = total_rings - num_pf_rings;
+
+ oct->sriov_info.trs = total_rings;
+ oct->sriov_info.pf_srn = pf_srn;
+ oct->sriov_info.num_pf_rings = num_pf_rings;
+ dev_dbg(&oct->pci_dev->dev, "trs:%d pf_srn:%d num_pf_rings:%d\n",
+ oct->sriov_info.trs, oct->sriov_info.pf_srn,
+ oct->sriov_info.num_pf_rings);
+ return 0;
+}
+
+int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
+{
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ cn23xx_get_pf_num(oct);
+
+ if (cn23xx_sriov_config(oct)) {
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL);
+
+ oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs;
+ oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler;
+ oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler;
+
+ oct->fn_list.soft_reset = cn23xx_pf_soft_reset;
+ oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs;
+ oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = cn23xx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = cn23xx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = cn23xx_bar1_idx_read;
+
+ oct->fn_list.enable_interrupt = cn23xx_enable_pf_interrupt;
+ oct->fn_list.disable_interrupt = cn23xx_disable_pf_interrupt;
+
+ oct->fn_list.enable_io_queues = cn23xx_enable_io_queues;
+ oct->fn_list.disable_io_queues = cn23xx_disable_io_queues;
+
+ cn23xx_setup_reg_address(oct);
+
+ oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct);
+
+ return 0;
+}
+
+int validate_cn23xx_pf_config_info(struct octeon_device *oct,
+ struct octeon_config *conf23xx)
+{
+ if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_IQ_MAX_Q(conf23xx),
+ CN23XX_MAX_INPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_OQ_MAX_Q(conf23xx),
+ CN23XX_MAX_OUTPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
+ CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!(CFG_GET_OQ_INFO_PTR(conf23xx)) ||
+ !(CFG_GET_OQ_REFILL_THRESHOLD(conf23xx))) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
+void cn23xx_dump_iq_regs(struct octeon_device *oct)
+{
+ u32 regval, q_no;
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_IQ_DOORBELL(0),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_IQ_DOORBELL(0))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_IQ_BASE_ADDR64(0),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_IQ_BASE_ADDR64(0))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_IQ_SIZE(0),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_IQ_SIZE(0))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_CTL_STATUS [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_CTL_STATUS,
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_CTL_STATUS)));
+
+ for (q_no = 0; q_no < CN23XX_MAX_INPUT_QUEUES; q_no++) {
+ dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
+ q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ CVM_CAST64(octeon_read_csr64
+ (oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no))));
+ }
+
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
+ CN23XX_CONFIG_PCIE_DEVCTL, regval);
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_PRT[%d]_CFG [0x%llx]: 0x%016llx\n",
+ oct->pcie_port, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
+ CVM_CAST64(lio_pci_readq(
+ oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_S2M_PORT[%d]_CTL [0x%x]: 0x%016llx\n",
+ oct->pcie_port, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
+}
+
+int cn23xx_fw_loaded(struct octeon_device *oct)
+{
+ u64 val;
+
+ val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1);
+ return (val >> 1) & 1ULL;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
new file mode 100644
index 000000000000..21b5c9051967
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -0,0 +1,59 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn23xx_device.h
+ * \brief Host Driver: Routines that perform CN23XX specific operations.
+*/
+
+#ifndef __CN23XX_PF_DEVICE_H__
+#define __CN23XX_PF_DEVICE_H__
+
+#include "cn23xx_pf_regs.h"
+
+/* Register address and configuration for a CN23XX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn23xx_pf {
+ /** PCI interrupt summary register */
+ u8 __iomem *intr_sum_reg64;
+
+ /** PCI interrupt enable register */
+ u8 __iomem *intr_enb_reg64;
+
+ /** The PCI interrupt mask used by interrupt handler */
+ u64 intr_mask64;
+
+ struct octeon_config *conf;
+};
+
+int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
+
+int validate_cn23xx_pf_config_info(struct octeon_device *oct,
+ struct octeon_config *conf23xx);
+
+u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
+
+void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
+
+int cn23xx_fw_loaded(struct octeon_device *oct);
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
new file mode 100644
index 000000000000..03d79d95ab75
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -0,0 +1,604 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn23xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN23XX devices.
+*/
+
+#ifndef __CN23XX_PF_REGS_H__
+#define __CN23XX_PF_REGS_H__
+
+#define CN23XX_CONFIG_VENDOR_ID 0x00
+#define CN23XX_CONFIG_DEVICE_ID 0x02
+
+#define CN23XX_CONFIG_XPANSION_BAR 0x38
+
+#define CN23XX_CONFIG_MSIX_CAP 0x50
+#define CN23XX_CONFIG_MSIX_LMSI 0x54
+#define CN23XX_CONFIG_MSIX_UMSI 0x58
+#define CN23XX_CONFIG_MSIX_MSIMD 0x5C
+#define CN23XX_CONFIG_MSIX_MSIMM 0x60
+#define CN23XX_CONFIG_MSIX_MSIMP 0x64
+
+#define CN23XX_CONFIG_PCIE_CAP 0x70
+#define CN23XX_CONFIG_PCIE_DEVCAP 0x74
+#define CN23XX_CONFIG_PCIE_DEVCTL 0x78
+#define CN23XX_CONFIG_PCIE_LINKCAP 0x7C
+#define CN23XX_CONFIG_PCIE_LINKCTL 0x80
+#define CN23XX_CONFIG_PCIE_SLOTCAP 0x84
+#define CN23XX_CONFIG_PCIE_SLOTCTL 0x88
+#define CN23XX_CONFIG_PCIE_DEVCTL2 0x98
+#define CN23XX_CONFIG_PCIE_LINKCTL2 0xA0
+#define CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK 0x108
+#define CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS 0x110
+#define CN23XX_CONFIG_PCIE_DEVCTL_MASK 0x00040000
+
+#define CN23XX_PCIE_SRIOV_FDL 0x188
+#define CN23XX_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CN23XX_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
+
+#define CN23XX_CONFIG_SRIOV_VFDEVID 0x190
+
+#define CN23XX_CONFIG_SRIOV_BAR_START 0x19C
+#define CN23XX_CONFIG_SRIOV_BARX(i) \
+ (CN23XX_CONFIG_SRIOV_BAR_START + (i * 4))
+#define CN23XX_CONFIG_SRIOV_BAR_PF 0x08
+#define CN23XX_CONFIG_SRIOV_BAR_64BIT 0x04
+#define CN23XX_CONFIG_SRIOV_BAR_IO 0x01
+
+/* ############## BAR0 Registers ################ */
+
+#define CN23XX_SLI_CTL_PORT_START 0x286E0
+#define CN23XX_PORT_OFFSET 0x10
+
+#define CN23XX_SLI_CTL_PORT(p) \
+ (CN23XX_SLI_CTL_PORT_START + ((p) * CN23XX_PORT_OFFSET))
+
+/* 2 scatch registers (64-bit) */
+#define CN23XX_SLI_WINDOW_CTL 0x282E0
+#define CN23XX_SLI_SCRATCH1 0x283C0
+#define CN23XX_SLI_SCRATCH2 0x283D0
+#define CN23XX_SLI_WINDOW_CTL_DEFAULT 0x200000ULL
+
+/* 1 registers (64-bit) - SLI_CTL_STATUS */
+#define CN23XX_SLI_CTL_STATUS 0x28570
+
+/* SLI Packet Input Jabber Register (64 bit register)
+ * <31:0> for Byte count for limiting sizes of packet sizes
+ * that are allowed for sli packet inbound packets.
+ * the default value is 0xFA00(=64000).
+ */
+#define CN23XX_SLI_PKT_IN_JABBER 0x29170
+/* The input jabber is used to determine the TSO max size.
+ * Due to H/W limitation, this need to be reduced to 60000
+ * in order to to H/W TSO and avoid the WQE malfarmation
+ * PKO_BUG_24989_WQE_LEN
+ */
+#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
+
+#define CN23XX_WIN_WR_ADDR_LO 0x20000
+#define CN23XX_WIN_WR_ADDR_HI 0x20004
+#define CN23XX_WIN_WR_ADDR64 CN23XX_WIN_WR_ADDR_LO
+
+#define CN23XX_WIN_RD_ADDR_LO 0x20010
+#define CN23XX_WIN_RD_ADDR_HI 0x20014
+#define CN23XX_WIN_RD_ADDR64 CN23XX_WIN_RD_ADDR_LO
+
+#define CN23XX_WIN_WR_DATA_LO 0x20020
+#define CN23XX_WIN_WR_DATA_HI 0x20024
+#define CN23XX_WIN_WR_DATA64 CN23XX_WIN_WR_DATA_LO
+
+#define CN23XX_WIN_RD_DATA_LO 0x20040
+#define CN23XX_WIN_RD_DATA_HI 0x20044
+#define CN23XX_WIN_RD_DATA64 CN23XX_WIN_RD_DATA_LO
+
+#define CN23XX_WIN_WR_MASK_LO 0x20030
+#define CN23XX_WIN_WR_MASK_HI 0x20034
+#define CN23XX_WIN_WR_MASK_REG CN23XX_WIN_WR_MASK_LO
+#define CN23XX_SLI_MAC_CREDIT_CNT 0x23D70
+
+/* 4 registers (64-bit) for mapping IOQs to MACs(PEMs)-
+ * SLI_PKT_MAC(0..3)_PF(0..1)_RINFO
+ */
+#define CN23XX_SLI_PKT_MAC_RINFO_START64 0x29030
+
+/*1 register (64-bit) to determine whether IOQs are in reset. */
+#define CN23XX_SLI_PKT_IOQ_RING_RST 0x291E0
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_IQ_OFFSET 0x20000
+
+#define CN23XX_MAC_RINFO_OFFSET 0x20
+#define CN23XX_PF_RINFO_OFFSET 0x10
+
+#define CN23XX_SLI_PKT_MAC_RINFO64(mac, pf) \
+ (CN23XX_SLI_PKT_MAC_RINFO_START64 + \
+ ((mac) * CN23XX_MAC_RINFO_OFFSET) + \
+ ((pf) * CN23XX_PF_RINFO_OFFSET))
+
+/** mask for total rings, setting TRS to base */
+#define CN23XX_PKT_MAC_CTL_RINFO_TRS BIT_ULL(16)
+/** mask for starting ring number: setting SRN <6:0> = 0x7F */
+#define CN23XX_PKT_MAC_CTL_RINFO_SRN (0x7F)
+
+/* Starting bit of the TRS field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS 16
+/* Starting bit of SRN field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_SRN_BIT_POS 0
+/* Starting bit of RPVF field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS 32
+/* Starting bit of NVFS field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS 48
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define CN23XX_SLI_IQ_INSTR_COUNT_START64 0x10040
+
+/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */
+#define CN23XX_SLI_IQ_BASE_ADDR_START64 0x10010
+
+/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define CN23XX_SLI_IQ_DOORBELL_START 0x10020
+
+/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define CN23XX_SLI_IQ_SIZE_START 0x10030
+
+/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
+ */
+#define CN23XX_SLI_IQ_PKT_CONTROL_START64 0x10000
+
+/*------- Request Queue Macros ---------*/
+#define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \
+ (CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_BASE_ADDR64(iq) \
+ (CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_SIZE(iq) \
+ (CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_DOORBELL(iq) \
+ (CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \
+ (CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_INPUT_CTL_VF_NUM BIT_ULL(32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM BIT(29)
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25)
+#define CN23XX_PKT_INPUT_CTL_IS_64B BIT(24)
+#define CN23XX_PKT_INPUT_CTL_RST BIT(23)
+#define CN23XX_PKT_INPUT_CTL_QUIET BIT(28)
+#define CN23XX_PKT_INPUT_CTL_RING_ENB BIT(22)
+#define CN23XX_PKT_INPUT_CTL_DATA_NS BIT(8)
+#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP BIT(6)
+#define CN23XX_PKT_INPUT_CTL_DATA_RO BIT(5)
+#define CN23XX_PKT_INPUT_CTL_USE_CSR BIT(4)
+#define CN23XX_PKT_INPUT_CTL_GATHER_NS BIT(3)
+#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2)
+#define CN23XX_PKT_INPUT_CTL_GATHER_RO (1)
+
+/** Rings per Virtual Function **/
+#define CN23XX_PKT_INPUT_CTL_RPVF_MASK (0x3F)
+#define CN23XX_PKT_INPUT_CTL_RPVF_POS (48)
+/** These bits[47:44] select the Physical function number within the MAC */
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK (0x7)
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS (45)
+/** These bits[43:32] select the function number within the PF */
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK (0x1FFF)
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS (32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_MASK (0x3)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_POS (29)
+#define CN23XX_PKT_IN_DONE_WMARK_MASK (0xFFFFULL)
+#define CN23XX_PKT_IN_DONE_WMARK_BIT_POS (32)
+#define CN23XX_PKT_IN_DONE_CNT_MASK (0x00000000FFFFFFFFULL)
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR)
+#else
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR | \
+ CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
+#endif
+
+/** Masks for SLI_PKT_IN_DONE(0..63)_CNTS Register */
+#define CN23XX_IN_DONE_CNTS_PI_INT BIT_ULL(62)
+#define CN23XX_IN_DONE_CNTS_CINT_ENB BIT_ULL(48)
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
+#define CN23XX_SLI_OQ_PKT_CONTROL_START 0x10050
+
+/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define CN23XX_SLI_OQ0_BUFF_INFO_SIZE 0x10060
+
+/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define CN23XX_SLI_OQ_BASE_ADDR_START64 0x10070
+
+/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define CN23XX_SLI_OQ_PKT_CREDITS_START 0x10080
+
+/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define CN23XX_SLI_OQ_SIZE_START 0x10090
+
+/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define CN23XX_SLI_OQ_PKT_SENT_START 0x100B0
+
+/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 0x100A0
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_OQ_OFFSET 0x20000
+
+/* 1 (64-bit register) for Output Queue backpressure across all rings. */
+#define CN23XX_SLI_OQ_WMARK 0x29180
+
+/* Global pkt control register */
+#define CN23XX_SLI_GBL_CONTROL 0x29210
+
+/* Backpressure enable register for PF0 */
+#define CN23XX_SLI_OUT_BP_EN_W1S 0x29260
+
+/* Backpressure enable register for PF1 */
+#define CN23XX_SLI_OUT_BP_EN2_W1S 0x29270
+
+/* Backpressure disable register for PF0 */
+#define CN23XX_SLI_OUT_BP_EN_W1C 0x29280
+
+/* Backpressure disable register for PF1 */
+#define CN23XX_SLI_OUT_BP_EN2_W1C 0x29290
+
+/*------- Output Queue Macros ---------*/
+
+#define CN23XX_SLI_OQ_PKT_CONTROL(oq) \
+ (CN23XX_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BASE_ADDR64(oq) \
+ (CN23XX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_SIZE(oq) \
+ (CN23XX_SLI_OQ_SIZE_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN23XX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_SENT(oq) \
+ (CN23XX_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN23XX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET))
+
+/*Macro's for accessing CNT and TIME separately from INT_LEVELS*/
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_CNT(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_TIME(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET) + 4)
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_OUTPUT_CTL_TENB BIT(13)
+#define CN23XX_PKT_OUTPUT_CTL_CENB BIT(12)
+#define CN23XX_PKT_OUTPUT_CTL_IPTR BIT(11)
+#define CN23XX_PKT_OUTPUT_CTL_ES BIT(9)
+#define CN23XX_PKT_OUTPUT_CTL_NSR BIT(8)
+#define CN23XX_PKT_OUTPUT_CTL_ROR BIT(7)
+#define CN23XX_PKT_OUTPUT_CTL_DPTR BIT(6)
+#define CN23XX_PKT_OUTPUT_CTL_BMODE BIT(5)
+#define CN23XX_PKT_OUTPUT_CTL_ES_P BIT(3)
+#define CN23XX_PKT_OUTPUT_CTL_NSR_P BIT(2)
+#define CN23XX_PKT_OUTPUT_CTL_ROR_P BIT(1)
+#define CN23XX_PKT_OUTPUT_CTL_RING_ENB BIT(0)
+
+/*######################### Mailbox Reg Macros ########################*/
+#define CN23XX_SLI_PKT_MBOX_INT_START 0x10210
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200
+#define CN23XX_SLI_MAC_PF_MBOX_INT_START 0x27380
+
+#define CN23XX_SLI_MBOX_OFFSET 0x20000
+#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8
+
+#define CN23XX_SLI_PKT_MBOX_INT(q) \
+ (CN23XX_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
+
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \
+ (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \
+ ((q) * CN23XX_SLI_MBOX_OFFSET + \
+ (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))
+
+#define CN23XX_SLI_MAC_PF_MBOX_INT(mac, pf) \
+ (CN23XX_SLI_MAC_PF_MBOX_INT_START + \
+ ((mac) * CN23XX_MAC_INT_OFFSET + \
+ (pf) * CN23XX_PF_INT_OFFSET))
+
+/*######################### DMA Counters #########################*/
+
+/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */
+#define CN23XX_DMA_CNT_START 0x28400
+
+/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values */
+/* SLI_DMA_0_TIM */
+#define CN23XX_DMA_TIM_START 0x28420
+
+/* 2 registers (64-bit) - DMA count & Time Interrupt threshold -
+ * SLI_DMA_0_INT_LEVEL
+ */
+#define CN23XX_DMA_INT_LEVEL_START 0x283E0
+
+/* Each DMA register is at a 16-byte Offset in BAR0 */
+#define CN23XX_DMA_OFFSET 0x10
+
+/*---------- DMA Counter Macros ---------*/
+#define CN23XX_DMA_CNT(dq) \
+ (CN23XX_DMA_CNT_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_PKT_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_TIME_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + 4 + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_TIM(dq) \
+ (CN23XX_DMA_TIM_START + ((dq) * CN23XX_DMA_OFFSET))
+
+/*######################## MSIX TABLE #########################*/
+
+#define CN23XX_MSIX_TABLE_ADDR_START 0x0
+#define CN23XX_MSIX_TABLE_DATA_START 0x8
+
+#define CN23XX_MSIX_TABLE_SIZE 0x10
+#define CN23XX_MSIX_TABLE_ENTRIES 0x41
+
+#define CN23XX_MSIX_ENTRY_VECTOR_CTL BIT_ULL(32)
+
+#define CN23XX_MSIX_TABLE_ADDR(idx) \
+ (CN23XX_MSIX_TABLE_ADDR_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+#define CN23XX_MSIX_TABLE_DATA(idx) \
+ (CN23XX_MSIX_TABLE_DATA_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+/*######################## INTERRUPTS #########################*/
+#define CN23XX_MAC_INT_OFFSET 0x20
+#define CN23XX_PF_INT_OFFSET 0x10
+
+/* 1 register (64-bit) for Interrupt Summary */
+#define CN23XX_SLI_INT_SUM64 0x27000
+
+/* 4 registers (64-bit) for Interrupt Enable for each Port */
+#define CN23XX_SLI_INT_ENB64 0x27080
+
+#define CN23XX_SLI_MAC_PF_INT_SUM64(mac, pf) \
+ (CN23XX_SLI_INT_SUM64 + \
+ ((mac) * CN23XX_MAC_INT_OFFSET) + \
+ ((pf) * CN23XX_PF_INT_OFFSET))
+
+#define CN23XX_SLI_MAC_PF_INT_ENB64(mac, pf) \
+ (CN23XX_SLI_INT_ENB64 + \
+ ((mac) * CN23XX_MAC_INT_OFFSET) + \
+ ((pf) * CN23XX_PF_INT_OFFSET))
+
+/* 1 register (64-bit) to indicate which Output Queue reached pkt threshold */
+#define CN23XX_SLI_PKT_CNT_INT 0x29130
+
+/* 1 register (64-bit) to indicate which Output Queue reached time threshold */
+#define CN23XX_SLI_PKT_TIME_INT 0x29140
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define CN23XX_INTR_PO_INT BIT_ULL(63)
+#define CN23XX_INTR_PI_INT BIT_ULL(62)
+#define CN23XX_INTR_MBOX_INT BIT_ULL(61)
+#define CN23XX_INTR_RESEND BIT_ULL(60)
+
+#define CN23XX_INTR_CINT_ENB BIT_ULL(48)
+#define CN23XX_INTR_MBOX_ENB BIT(0)
+
+#define CN23XX_INTR_RML_TIMEOUT_ERR (1)
+
+#define CN23XX_INTR_MIO_INT BIT(1)
+
+#define CN23XX_INTR_RESERVED1 (3 << 2)
+
+#define CN23XX_INTR_PKT_COUNT BIT(4)
+#define CN23XX_INTR_PKT_TIME BIT(5)
+
+#define CN23XX_INTR_RESERVED2 (3 << 6)
+
+#define CN23XX_INTR_M0UPB0_ERR BIT(8)
+#define CN23XX_INTR_M0UPWI_ERR BIT(9)
+#define CN23XX_INTR_M0UNB0_ERR BIT(10)
+#define CN23XX_INTR_M0UNWI_ERR BIT(11)
+
+#define CN23XX_INTR_RESERVED3 (0xFFFFFULL << 12)
+
+#define CN23XX_INTR_DMA0_FORCE BIT_ULL(32)
+#define CN23XX_INTR_DMA1_FORCE BIT_ULL(33)
+
+#define CN23XX_INTR_DMA0_COUNT BIT_ULL(34)
+#define CN23XX_INTR_DMA1_COUNT BIT_ULL(35)
+
+#define CN23XX_INTR_DMA0_TIME BIT_ULL(36)
+#define CN23XX_INTR_DMA1_TIME BIT_ULL(37)
+
+#define CN23XX_INTR_RESERVED4 (0x7FFFFULL << 38)
+
+#define CN23XX_INTR_VF_MBOX BIT_ULL(57)
+#define CN23XX_INTR_DMAVF_ERR BIT_ULL(58)
+#define CN23XX_INTR_DMAPF_ERR BIT_ULL(59)
+
+#define CN23XX_INTR_PKTVF_ERR BIT_ULL(60)
+#define CN23XX_INTR_PKTPF_ERR BIT_ULL(61)
+#define CN23XX_INTR_PPVF_ERR BIT_ULL(62)
+#define CN23XX_INTR_PPPF_ERR BIT_ULL(63)
+
+#define CN23XX_INTR_DMA0_DATA (CN23XX_INTR_DMA0_TIME)
+#define CN23XX_INTR_DMA1_DATA (CN23XX_INTR_DMA1_TIME)
+
+#define CN23XX_INTR_DMA_DATA \
+ (CN23XX_INTR_DMA0_DATA | CN23XX_INTR_DMA1_DATA)
+
+/* By fault only TIME based */
+#define CN23XX_INTR_PKT_DATA (CN23XX_INTR_PKT_TIME)
+/* For both COUNT and TIME based */
+/* #define CN23XX_INTR_PKT_DATA \
+ * (CN23XX_INTR_PKT_COUNT | CN23XX_INTR_PKT_TIME)
+ */
+
+/* Sum of interrupts for all PCI-Express Data Interrupts */
+#define CN23XX_INTR_PCIE_DATA \
+ (CN23XX_INTR_DMA_DATA | CN23XX_INTR_PKT_DAT)
+
+/* Sum of interrupts for error events */
+#define CN23XX_INTR_ERR \
+ (CN23XX_INTR_M0UPB0_ERR | \
+ CN23XX_INTR_M0UPWI_ERR | \
+ CN23XX_INTR_M0UNB0_ERR | \
+ CN23XX_INTR_M0UNWI_ERR | \
+ CN23XX_INTR_DMAVF_ERR | \
+ CN23XX_INTR_DMAPF_ERR | \
+ CN23XX_INTR_PKTPF_ERR | \
+ CN23XX_INTR_PPPF_ERR | \
+ CN23XX_INTR_PPVF_ERR)
+
+/* Programmed Mask for Interrupt Sum */
+#define CN23XX_INTR_MASK \
+ (CN23XX_INTR_DMA_DATA | \
+ CN23XX_INTR_DMA0_FORCE | \
+ CN23XX_INTR_DMA1_FORCE | \
+ CN23XX_INTR_MIO_INT | \
+ CN23XX_INTR_ERR)
+
+/* 4 Registers (64 - bit) */
+#define CN23XX_SLI_S2M_PORT_CTL_START 0x23D80
+#define CN23XX_SLI_S2M_PORTX_CTL(port) \
+ (CN23XX_SLI_S2M_PORT_CTL_START + (port * 0x10))
+
+#define CN23XX_SLI_MAC_NUMBER 0x20050
+
+/** PEM(0..3)_BAR1_INDEX(0..15)address is defined as
+ * addr = (0x00011800C0000100 |port <<24 |idx <<3 )
+ * Here, port is PEM(0..3) & idx is INDEX(0..15)
+ */
+#define CN23XX_PEM_BAR1_INDEX_START 0x00011800C0000100ULL
+#define CN23XX_PEM_OFFSET 24
+#define CN23XX_BAR1_INDEX_OFFSET 3
+
+#define CN23XX_PEM_BAR1_INDEX_REG(port, idx) \
+ (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
+ ((idx) << CN23XX_BAR1_INDEX_OFFSET))
+
+/*############################ DPI #########################*/
+
+/* 1 register (64-bit) - provides DMA Enable */
+#define CN23XX_DPI_CTL 0x0001df0000000040ULL
+
+/* 1 register (64-bit) - Controls the DMA IO Operation */
+#define CN23XX_DPI_DMA_CONTROL 0x0001df0000000048ULL
+
+/* 1 register (64-bit) - Provides DMA Instr'n Queue Enable */
+#define CN23XX_DPI_REQ_GBL_ENB 0x0001df0000000050ULL
+
+/* 1 register (64-bit) - DPI_REQ_ERR_RSP
+ * Indicates which Instr'n Queue received error response from the IO sub-system
+ */
+#define CN23XX_DPI_REQ_ERR_RSP 0x0001df0000000058ULL
+
+/* 1 register (64-bit) - DPI_REQ_ERR_RST
+ * Indicates which Instr'n Queue dropped an Instr'n
+ */
+#define CN23XX_DPI_REQ_ERR_RST 0x0001df0000000060ULL
+
+/* 6 register (64-bit) - DPI_DMA_ENG(0..5)_EN
+ * Provides DMA Engine Queue Enable
+ */
+#define CN23XX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL
+#define CN23XX_DPI_DMA_ENG_ENB(eng) (CN23XX_DPI_DMA_ENG0_ENB + (eng * 8))
+
+/* 8 register (64-bit) - DPI_DMA(0..7)_REQQ_CTL
+ * Provides control bits for transaction on 8 Queues
+ */
+#define CN23XX_DPI_DMA_REQQ0_CTL 0x0001df0000000180ULL
+#define CN23XX_DPI_DMA_REQQ_CTL(q_no) \
+ (CN23XX_DPI_DMA_REQQ0_CTL + (q_no * 8))
+
+/* 6 register (64-bit) - DPI_ENG(0..5)_BUF
+ * Provides DMA Engine FIFO (Queue) Size
+ */
+#define CN23XX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL
+#define CN23XX_DPI_DMA_ENG_BUF(eng) \
+ (CN23XX_DPI_DMA_ENG0_BUF + (eng * 8))
+
+/* 4 Registers (64-bit) */
+#define CN23XX_DPI_SLI_PRT_CFG_START 0x0001df0000000900ULL
+#define CN23XX_DPI_SLI_PRTX_CFG(port) \
+ (CN23XX_DPI_SLI_PRT_CFG_START + (port * 0x8))
+
+/* Masks for DPI_DMA_CONTROL Register */
+#define CN23XX_DPI_DMA_COMMIT_MODE BIT_ULL(58)
+#define CN23XX_DPI_DMA_PKT_EN BIT_ULL(56)
+#define CN23XX_DPI_DMA_ENB (0x0FULL << 48)
+/* Set the DMA Control, to update packet count not byte count sent by DMA,
+ * when we use Interrupt Coalescing (CA mode)
+ */
+#define CN23XX_DPI_DMA_O_ADD1 BIT(19)
+/*selecting 64-bit Byte Swap Mode */
+#define CN23XX_DPI_DMA_O_ES BIT(15)
+#define CN23XX_DPI_DMA_O_MODE BIT(14)
+
+#define CN23XX_DPI_DMA_CTL_MASK \
+ (CN23XX_DPI_DMA_COMMIT_MODE | \
+ CN23XX_DPI_DMA_PKT_EN | \
+ CN23XX_DPI_DMA_O_ES | \
+ CN23XX_DPI_DMA_O_MODE)
+
+/*############################ RST #########################*/
+
+#define CN23XX_RST_BOOT 0x0001180006001600ULL
+#define CN23XX_RST_SOFT_RST 0x0001180006001680ULL
+
+#define CN23XX_LMC0_RESET_CTL 0x0001180088000180ULL
+#define CN23XX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index c03d37016a48..e779af88621b 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -338,7 +338,7 @@ void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
}
-void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
+int lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
{
u32 mask;
@@ -353,6 +353,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
mask |= oct->io_qmask.oq;
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+
+ return 0;
}
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
@@ -418,36 +420,6 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
}
-void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
-{
- int i;
-
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
- if (!(oct->io_qmask.iq & (1ULL << i)))
- continue;
- oct->fn_list.setup_iq_regs(oct, i);
- }
-
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (!(oct->io_qmask.oq & (1ULL << i)))
- continue;
- oct->fn_list.setup_oq_regs(oct, i);
- }
-
- oct->fn_list.setup_device_regs(oct);
-
- oct->fn_list.enable_interrupt(oct->chip);
-
- oct->fn_list.enable_io_queues(oct);
-
- /* for (i = 0; i < oct->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (!(oct->io_qmask.oq & (1ULL << i)))
- continue;
- writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
- }
-}
-
void
lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
u64 core_addr,
@@ -507,18 +479,20 @@ lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
return new_idx;
}
-void lio_cn6xxx_enable_interrupt(void *chip)
+void lio_cn6xxx_enable_interrupt(struct octeon_device *oct,
+ u8 unused __attribute__((unused)))
{
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
/* Enable Interrupt */
writeq(mask, cn6xxx->intr_enb_reg64);
}
-void lio_cn6xxx_disable_interrupt(void *chip)
+void lio_cn6xxx_disable_interrupt(struct octeon_device *oct,
+ u8 unused __attribute__((unused)))
{
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
/* Disable Interrupts */
writeq(0, cn6xxx->intr_enb_reg64);
@@ -714,7 +688,6 @@ int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
- oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index 28c47224221a..a40a91394079 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -80,18 +80,17 @@ void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct);
void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct);
void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
-void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
+int lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
-void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
u32 idx, int valid);
void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
u32
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
-void lio_cn6xxx_enable_interrupt(void *chip);
-void lio_cn6xxx_disable_interrupt(void *chip);
+void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused);
+void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused);
void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
struct octeon_reg_list *reg_list);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 29755bc68f12..dbf3566ead53 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -148,7 +148,6 @@ int lio_setup_cn68xx_octeon_device(struct octeon_device *oct)
oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
oct->fn_list.soft_reset = lio_cn68xx_soft_reset;
oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs;
- oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
new file mode 100644
index 000000000000..201eddb3013a
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -0,0 +1,266 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = cmd;
+ nctrl.ncmd.s.param1 = param1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+ unsigned int bytes_compl)
+{
+ struct netdev_queue *netdev_queue = txq;
+
+ netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
+}
+
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb = NULL;
+ struct octeon_soft_command *sc;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ (*pkts_compl)++;
+/*TODO, Use some other pound define to suggest
+ * the fact that iqs are not tied to netdevs
+ * and can take traffic from different netdevs
+ * hence bql reporting is done per packet
+ * than in bulk. Usage of NO_NAPI in txq completion is
+ * a little confusing
+ */
+ *bytes_compl += skb->len;
+}
+
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct octeon_soft_command *sc;
+ struct netdev_queue *txq;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
+ netdev_tx_sent_queue(txq, skb->len);
+}
+
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
+{
+ struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
+ struct net_device *netdev = (struct net_device *)nctrl->netpndev;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ u8 *mac;
+
+ switch (nctrl->ncmd.s.cmd) {
+ case OCTNET_CMD_CHANGE_DEVFLAGS:
+ case OCTNET_CMD_SET_MULTI_LIST:
+ break;
+
+ case OCTNET_CMD_CHANGE_MACADDR:
+ mac = ((u8 *)&nctrl->udd[0]) + 2;
+ netif_info(lio, probe, lio->netdev,
+ "MACAddr changed to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ mac[0], mac[1],
+ mac[2], mac[3],
+ mac[4], mac[5]);
+ break;
+
+ case OCTNET_CMD_CHANGE_MTU:
+ /* If command is successful, change the MTU. */
+ netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n",
+ netdev->mtu, nctrl->ncmd.s.param1);
+ dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
+ netdev->name, netdev->mtu,
+ nctrl->ncmd.s.param1);
+ netdev->mtu = nctrl->ncmd.s.param1;
+ queue_delayed_work(lio->link_status_wq.wq,
+ &lio->link_status_wq.wk.work, 0);
+ break;
+
+ case OCTNET_CMD_GPIO_ACCESS:
+ netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+ break;
+
+ case OCTNET_CMD_ID_ACTIVE:
+ netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+ break;
+
+ case OCTNET_CMD_LRO_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+ break;
+
+ case OCTNET_CMD_LRO_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_ENABLE_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_ADD_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
+ case OCTNET_CMD_DEL_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
+ case OCTNET_CMD_SET_SETTINGS:
+ dev_info(&oct->pci_dev->dev, "%s settings changed\n",
+ netdev->name);
+
+ break;
+
+ /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_RX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "RX Checksum Offload Enabled\n");
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_RXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "RX Checksum Offload Disabled\n");
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_TX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "TX Checksum Offload Enabled\n");
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_TXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "TX Checksum Offload Disabled\n");
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_VXLAN_PORT_CONFIG:
+ if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
+ netif_info(lio, probe, lio->netdev,
+ "VxLAN Destination UDP PORT:%d ADDED\n",
+ nctrl->ncmd.s.param1);
+ } else if (nctrl->ncmd.s.more ==
+ OCTNET_CMD_VXLAN_PORT_DEL) {
+ netif_info(lio, probe, lio->netdev,
+ "VxLAN Destination UDP PORT:%d DELETED\n",
+ nctrl->ncmd.s.param1);
+ }
+ break;
+
+ case OCTNET_CMD_SET_FLOW_CTL:
+ netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
+ break;
+
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
+ nctrl->ncmd.s.cmd);
+ }
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 289eb8907922..f163e0abbeb2 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -32,6 +32,7 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
static int octnet_get_link_stats(struct net_device *netdev);
@@ -75,6 +76,7 @@ enum {
#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
#define OCT_ETHTOOL_REGDUMP_LEN 4096
+#define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
#define OCT_ETHTOOL_REGSVER 1
/* statistics of PF */
@@ -188,6 +190,10 @@ static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
"buffer_alloc_failure",
};
+/* LiquidIO driver private flags */
+static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
+};
+
#define OCTNIC_NCMD_AUTONEG_ON 0x1
#define OCTNIC_NCMD_PHY_ON 0x2
@@ -259,6 +265,13 @@ lio_ethtool_get_channels(struct net_device *dev,
max_tx = CFG_GET_IQ_MAX_Q(conf6x);
rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+
+ max_rx = CFG_GET_OQ_MAX_Q(conf23);
+ max_tx = CFG_GET_IQ_MAX_Q(conf23);
+ rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx);
+ tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx);
}
channel->max_rx = max_rx;
@@ -290,18 +303,16 @@ lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
struct octeon_board_info *board_info;
- int len;
- if (eeprom->offset != 0)
+ if (eeprom->offset)
return -EINVAL;
eeprom->magic = oct_dev->pci_dev->vendor;
board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
- len =
- sprintf((char *)bytes,
- "boardname:%s serialnum:%s maj:%lld min:%lld\n",
- board_info->name, board_info->serial_number,
- board_info->major, board_info->minor);
+ sprintf((char *)bytes,
+ "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+ board_info->name, board_info->serial_number,
+ board_info->major, board_info->minor);
return 0;
}
@@ -333,6 +344,32 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
return 0;
}
+static int octnet_id_active(struct net_device *netdev, int val)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
+ nctrl.ncmd.s.param1 = val;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* Callback for when mdio command response arrives
*/
static void octnet_mdio_resp_callback(struct octeon_device *oct,
@@ -406,7 +443,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
dev_err(&oct_dev->pci_dev->dev,
"octnet_mdio45_access instruction failed status: %x\n",
retval);
- retval = -EBUSY;
+ retval = -EBUSY;
} else {
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived
@@ -476,6 +513,11 @@ static int lio_set_phys_id(struct net_device *netdev,
&value);
if (ret)
return ret;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ octnet_id_active(netdev, LED_IDENTIFICATION_ON);
+
+ /* returns 0 since updates are asynchronous */
+ return 0;
} else {
return -EINVAL;
}
@@ -521,7 +563,10 @@ static int lio_set_phys_id(struct net_device *netdev,
&lio->phy_beacon_val);
if (ret)
return ret;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
+ return 0;
} else {
return -EINVAL;
}
@@ -550,6 +595,13 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+
+ tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
+ rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
+ rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx);
+ tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx);
}
if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
@@ -610,6 +662,69 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->rx_pause = oct->rx_pause;
}
+static int
+lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ /* Notes: Not supporting any auto negotiation in these
+ * drivers.
+ */
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct oct_link_info *linfo = &lio->linfo;
+
+ int ret = 0;
+
+ if (oct->chip_id != OCTEON_CN23XX_PF_VID)
+ return -EINVAL;
+
+ if (linfo->link.s.duplex == 0) {
+ /*no flow control for half duplex*/
+ if (pause->rx_pause || pause->tx_pause)
+ return -EINVAL;
+ }
+
+ /*do not support autoneg of link flow control*/
+ if (pause->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ if (pause->rx_pause) {
+ /*enable rx pause*/
+ nctrl.ncmd.s.param1 = 1;
+ } else {
+ /*disable rx pause*/
+ nctrl.ncmd.s.param1 = 0;
+ }
+
+ if (pause->tx_pause) {
+ /*enable tx pause*/
+ nctrl.ncmd.s.param2 = 1;
+ } else {
+ /*disable tx pause*/
+ nctrl.ncmd.s.param2 = 0;
+ }
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
+ return -EINVAL;
+ }
+
+ oct->rx_pause = pause->rx_pause;
+ oct->tx_pause = pause->tx_pause;
+
+ return 0;
+}
+
static void
lio_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats __attribute__((unused)),
@@ -877,6 +992,27 @@ lio_get_ethtool_stats(struct net_device *netdev,
}
}
+static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int i;
+
+ switch (oct_dev->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
+ sprintf(data, "%s", oct_priv_flags_strings[i]);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ break;
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ break;
+ }
+}
+
static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct lio *lio = GET_LIO(netdev);
@@ -916,12 +1052,31 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ lio_get_priv_flags_strings(lio, data);
+ break;
default:
netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
break;
}
}
+static int lio_get_priv_flags_ss_count(struct lio *lio)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ switch (oct_dev->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ return ARRAY_SIZE(oct_priv_flags_strings);
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ return -EOPNOTSUPP;
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ return -EOPNOTSUPP;
+ }
+}
+
static int lio_get_sset_count(struct net_device *netdev, int sset)
{
struct lio *lio = GET_LIO(netdev);
@@ -932,6 +1087,8 @@ static int lio_get_sset_count(struct net_device *netdev, int sset)
return (ARRAY_SIZE(oct_stats_strings) +
ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ case ETH_SS_PRIV_FLAGS:
+ return lio_get_priv_flags_ss_count(lio);
default:
return -EOPNOTSUPP;
}
@@ -948,6 +1105,16 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
intrmod_cfg = &oct->intrmod;
switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ if (!intrmod_cfg->rx_enable) {
+ intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
+ intr_coal->rx_max_coalesced_frames =
+ intrmod_cfg->rx_frames;
+ }
+ if (!intrmod_cfg->tx_enable)
+ intr_coal->tx_max_coalesced_frames =
+ intrmod_cfg->tx_frames;
+ break;
case OCTEON_CN68XX:
case OCTEON_CN66XX: {
struct octeon_cn6xxx *cn6xxx =
@@ -983,7 +1150,15 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
intr_coal->rx_coalesce_usecs_low =
intrmod_cfg->rx_mintmr_trigger;
intr_coal->rx_max_coalesced_frames_low =
- intrmod_cfg->rx_mincnt_trigger;
+ intrmod_cfg->rx_mincnt_trigger;
+ }
+ if (OCTEON_CN23XX_PF(oct) &&
+ (intrmod_cfg->tx_enable)) {
+ intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
+ intr_coal->tx_max_coalesced_frames_high =
+ intrmod_cfg->tx_maxcnt_trigger;
+ intr_coal->tx_max_coalesced_frames_low =
+ intrmod_cfg->tx_mincnt_trigger;
}
return 0;
}
@@ -1060,11 +1235,11 @@ static void
octnet_nic_stats_callback(struct octeon_device *oct_dev,
u32 status, void *ptr)
{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
- struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
- sc->virtrptr;
- struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
- sc->ctxptr;
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct oct_nic_stats_resp *resp =
+ (struct oct_nic_stats_resp *)sc->virtrptr;
+ struct oct_nic_stats_ctrl *ctrl =
+ (struct oct_nic_stats_ctrl *)sc->ctxptr;
struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
@@ -1314,14 +1489,35 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
break;
}
+ case OCTEON_CN23XX_PF_VID: {
+ int q_no;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = oct->intrmod.rx_frames;
+ else
+ rx_max_coalesced_frames =
+ intr_coal->rx_max_coalesced_frames;
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ q_no += oct->sriov_info.pf_srn;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
+ (0x3fffff00000000UL)) |
+ rx_max_coalesced_frames);
+ /*consider setting resend bit*/
+ }
+ oct->intrmod.rx_frames = rx_max_coalesced_frames;
+ break;
+ }
default:
return -EINVAL;
}
return 0;
}
-static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
- *intr_coal)
+static int oct_cfg_rx_intrtime(struct lio *lio,
+ struct ethtool_coalesce *intr_coal)
{
struct octeon_device *oct = lio->oct_dev;
u32 time_threshold, rx_coalesce_usecs;
@@ -1346,6 +1542,27 @@ static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
break;
}
+ case OCTEON_CN23XX_PF_VID: {
+ u64 time_threshold;
+ int q_no;
+
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = oct->intrmod.rx_usecs;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+ time_threshold =
+ cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ q_no += oct->sriov_info.pf_srn;
+ octeon_write_csr64(oct,
+ CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (oct->intrmod.rx_frames |
+ (time_threshold << 32)));
+ /*consider writing to resend bit here*/
+ }
+ oct->intrmod.rx_usecs = rx_coalesce_usecs;
+ break;
+ }
default:
return -EINVAL;
}
@@ -1358,12 +1575,37 @@ oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
__attribute__((unused)))
{
struct octeon_device *oct = lio->oct_dev;
+ u32 iq_intr_pkt;
+ void __iomem *inst_cnt_reg;
+ u64 val;
/* Config Cnt based interrupt values */
switch (oct->chip_id) {
case OCTEON_CN68XX:
case OCTEON_CN66XX:
break;
+ case OCTEON_CN23XX_PF_VID: {
+ int q_no;
+
+ if (!intr_coal->tx_max_coalesced_frames)
+ iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+ else
+ iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+ for (q_no = 0; q_no < oct->num_iqs; q_no++) {
+ inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
+ val = readq(inst_cnt_reg);
+ /*clear wmark and count.dont want to write count back*/
+ val = (val & 0xFFFF000000000000ULL) |
+ ((u64)iq_intr_pkt
+ << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
+ writeq(val, inst_cnt_reg);
+ /*consider setting resend bit*/
+ }
+ oct->intrmod.tx_frames = iq_intr_pkt;
+ break;
+ }
default:
return -EINVAL;
}
@@ -1399,6 +1641,8 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
return -EINVAL;
}
break;
+ case OCTEON_CN23XX_PF_VID:
+ break;
default:
return -EINVAL;
}
@@ -1541,9 +1785,237 @@ static int lio_nway_reset(struct net_device *netdev)
}
/* Return register dump len. */
-static int lio_get_regs_len(struct net_device *dev __attribute__((unused)))
+static int lio_get_regs_len(struct net_device *dev)
{
- return OCT_ETHTOOL_REGDUMP_LEN;
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ return OCT_ETHTOOL_REGDUMP_LEN_23XX;
+ default:
+ return OCT_ETHTOOL_REGDUMP_LEN;
+ }
+}
+
+static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
+{
+ u32 reg;
+ u8 pf_num = oct->pf_num;
+ int len = 0;
+ int i;
+
+ /* PCI Window Registers */
+
+ len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
+
+ /*0x29030 or 0x29040*/
+ reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27080 or 0x27090*/
+ reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27000 or 0x27010*/
+ reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29120*/
+ reg = 0x29120;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27300*/
+ reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
+ (oct->pf_num) * CN23XX_PF_INT_OFFSET;
+ len += sprintf(
+ s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
+ oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27200*/
+ reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
+ (oct->pf_num) * CN23XX_PF_INT_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*29130*/
+ reg = CN23XX_SLI_PKT_CNT_INT;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29140*/
+ reg = CN23XX_SLI_PKT_TIME_INT;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29160*/
+ reg = 0x29160;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29180*/
+ reg = CN23XX_SLI_OQ_WMARK;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x291E0*/
+ reg = CN23XX_SLI_PKT_IOQ_RING_RST;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29210*/
+ reg = CN23XX_SLI_GBL_CONTROL;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29220*/
+ reg = 0x29220;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+
+ /*PF only*/
+ if (pf_num == 0) {
+ /*0x29260*/
+ reg = CN23XX_SLI_OUT_BP_EN_W1S;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+ } else if (pf_num == 1) {
+ /*0x29270*/
+ reg = CN23XX_SLI_OUT_BP_EN2_W1S;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10040*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10080*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10090*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_SIZE(i);
+ len += sprintf(
+ s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10050*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10070*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100a0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100b0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKTS_SENT(i);
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100c0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x10000*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10010*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
+ i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10020*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_DOORBELL(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10030*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_SIZE(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10040*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
+ reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ return len;
}
static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
@@ -1688,6 +2160,10 @@ static void lio_get_regs(struct net_device *dev,
regs->version = OCT_ETHTOOL_REGSVER;
switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
+ len += cn23xx_read_csr_reg(regbuf + len, oct);
+ break;
case OCTEON_CN68XX:
case OCTEON_CN66XX:
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
@@ -1729,6 +2205,7 @@ static const struct ethtool_ops lio_ethtool_ops = {
.get_strings = lio_get_strings,
.get_ethtool_stats = lio_get_ethtool_stats,
.get_pauseparam = lio_get_pauseparam,
+ .set_pauseparam = lio_set_pauseparam,
.get_regs_len = lio_get_regs_len,
.get_regs = lio_get_regs,
.get_msglevel = lio_get_msglevel,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 20d6942edf40..afc6f9dc8119 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -21,11 +21,10 @@
**********************************************************************/
#include <linux/version.h>
#include <linux/pci.h>
-#include <linux/net_tstamp.h>
-#include <linux/if_vlan.h>
#include <linux/firmware.h>
#include <linux/ptp_clock_kernel.h>
#include <net/vxlan.h>
+#include <linux/kthread.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -37,6 +36,7 @@
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_device.h"
+#include "cn23xx_pf_device.h"
#include "liquidio_image.h"
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
@@ -52,11 +52,6 @@ module_param(ddr_timeout, int, 0644);
MODULE_PARM_DESC(ddr_timeout,
"Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
-static u32 console_bitmask;
-module_param(console_bitmask, int, 0644);
-MODULE_PARM_DESC(console_bitmask,
- "Bitmask indicating which consoles have debug output redirected to syslog.");
-
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
@@ -102,6 +97,14 @@ struct liquidio_if_cfg_resp {
u64 status;
};
+struct liquidio_rx_ctl_context {
+ int octeon_id;
+
+ wait_queue_head_t wc;
+
+ int cond;
+};
+
struct oct_link_status_resp {
u64 rh;
struct oct_link_info link_info;
@@ -139,7 +142,8 @@ union tx_info {
#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
#define OCTNIC_GSO_MAX_HEADER_SIZE 128
-#define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
+#define OCTNIC_GSO_MAX_SIZE \
+ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
/** Structure of a node in list of gather components maintained by
* NIC driver for each network device.
@@ -162,27 +166,6 @@ struct octnic_gather {
u64 sg_dma_ptr;
};
-/** This structure is used by NIC driver to store information required
- * to free the sk_buff when the packet has been fetched by Octeon.
- * Bytes offset below assume worst-case of a 64-bit system.
- */
-struct octnet_buf_free_info {
- /** Bytes 1-8. Pointer to network device private structure. */
- struct lio *lio;
-
- /** Bytes 9-16. Pointer to sk_buff. */
- struct sk_buff *skb;
-
- /** Bytes 17-24. Pointer to gather list. */
- struct octnic_gather *g;
-
- /** Bytes 25-32. Physical address of skb->data or gather list. */
- u64 dptr;
-
- /** Bytes 33-47. Piggybacked soft command, if any */
- struct octeon_soft_command *sc;
-};
-
struct handshake {
struct completion init;
struct completion started;
@@ -198,6 +181,7 @@ struct octeon_device_priv {
};
static int octeon_device_init(struct octeon_device *);
+static int liquidio_stop(struct net_device *netdev);
static void liquidio_remove(struct pci_dev *pdev);
static int liquidio_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
@@ -219,6 +203,20 @@ static void octeon_droq_bh(unsigned long pdev)
continue;
reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
MAX_PACKET_BUDGET);
+ lio_enable_irq(oct->droq[q_no], NULL);
+
+ if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
+ /* set time and cnt interrupt thresholds for this DROQ
+ * for NAPI
+ */
+ int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
+
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
+ 0x5700000040ULL);
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
+ }
}
if (reschedule)
@@ -252,76 +250,6 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
return pkt_cnt;
}
-void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
- unsigned int bytes_compl)
-{
- struct netdev_queue *netdev_queue = txq;
-
- netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
-}
-
-void octeon_update_tx_completion_counters(void *buf, int reqtype,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
-{
- struct octnet_buf_free_info *finfo;
- struct sk_buff *skb = NULL;
- struct octeon_soft_command *sc;
-
- switch (reqtype) {
- case REQTYPE_NORESP_NET:
- case REQTYPE_NORESP_NET_SG:
- finfo = buf;
- skb = finfo->skb;
- break;
-
- case REQTYPE_RESP_NET_SG:
- case REQTYPE_RESP_NET:
- sc = buf;
- skb = sc->callback_arg;
- break;
-
- default:
- return;
- }
-
- (*pkts_compl)++;
- *bytes_compl += skb->len;
-}
-
-void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
-{
- struct octnet_buf_free_info *finfo;
- struct sk_buff *skb;
- struct octeon_soft_command *sc;
- struct netdev_queue *txq;
-
- switch (reqtype) {
- case REQTYPE_NORESP_NET:
- case REQTYPE_NORESP_NET_SG:
- finfo = buf;
- skb = finfo->skb;
- break;
-
- case REQTYPE_RESP_NET_SG:
- case REQTYPE_RESP_NET:
- sc = buf;
- skb = sc->callback_arg;
- break;
-
- default:
- return;
- }
-
- txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
- netdev_tx_sent_queue(txq, skb->len);
-}
-
-int octeon_console_debug_enabled(u32 console)
-{
- return (console_bitmask >> (console)) & 0x1;
-}
-
/**
* \brief Forces all IO queues off on a given device
* @param oct Pointer to Octeon device
@@ -441,7 +369,7 @@ static void stop_pci_io(struct octeon_device *oct)
pci_disable_device(oct->pci_dev);
/* Disable interrupts */
- oct->fn_list.disable_interrupt(oct->chip);
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
pcierror_quiesce_device(oct);
@@ -570,6 +498,9 @@ static const struct pci_device_id liquidio_pci_tbl[] = {
{ /* 66xx */
PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
},
+ { /* 23xx pf */
+ PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
{
0, 0, 0, 0, 0, 0, 0
}
@@ -587,7 +518,6 @@ static struct pci_driver liquidio_pci_driver = {
.suspend = liquidio_suspend,
.resume = liquidio_resume,
#endif
-
};
/**
@@ -936,6 +866,52 @@ static void print_link_info(struct net_device *netdev)
}
/**
+ * \brief Routine to notify MTU change
+ * @param work work_struct data structure
+ */
+static void octnet_link_status_change(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+
+ rtnl_lock();
+ call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
+ rtnl_unlock();
+}
+
+/**
+ * \brief Sets up the mtu status change work
+ * @param netdev network device
+ */
+static inline int setup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->link_status_wq.wq = alloc_workqueue("link-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!lio->link_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
+ return -1;
+ }
+ INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
+ octnet_link_status_change);
+ lio->link_status_wq.wk.ctxptr = lio;
+
+ return 0;
+}
+
+static inline void cleanup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->link_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
+ destroy_workqueue(lio->link_status_wq.wq);
+ }
+}
+
+/**
* \brief Update link status
* @param netdev network device
* @param ls link status structure
@@ -973,8 +949,6 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
struct lio *lio;
struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
- /*octeon_update_iq_read_idx(oct, iq);*/
-
netdev = oct->props[iq->ifidx].netdev;
/* This is needed because the first IQ does not have
@@ -1002,12 +976,32 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
}
}
+static
+int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
+{
+ struct octeon_device *oct = droq->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ if (droq->ops.poll_mode) {
+ droq->ops.napi_fn(droq);
+ } else {
+ if (ret & MSIX_PO_INT) {
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ return 1;
+ }
+ /* this will be flushed periodically by check iq db */
+ if (ret & MSIX_PI_INT)
+ return 0;
+ }
+ return 0;
+}
+
/**
* \brief Droq packet processor sceduler
* @param oct octeon device
*/
-static
-void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
+static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
{
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
@@ -1032,19 +1026,36 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
}
}
+static irqreturn_t
+liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
+{
+ u64 ret;
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+
+ ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
+
+ if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
+ liquidio_schedule_msix_droq_pkt_handler(droq, ret);
+
+ return IRQ_HANDLED;
+}
+
/**
* \brief Interrupt handler for octeon
* @param irq unused
* @param dev octeon device
*/
static
-irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
+irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
+ void *dev)
{
struct octeon_device *oct = (struct octeon_device *)dev;
irqreturn_t ret;
/* Disable our interrupts for the duration of ISR */
- oct->fn_list.disable_interrupt(oct->chip);
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
ret = oct->fn_list.process_interrupt_regs(oct);
@@ -1053,7 +1064,7 @@ irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
/* Re-enable our interrupts */
if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
- oct->fn_list.enable_interrupt(oct->chip);
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
return ret;
}
@@ -1067,22 +1078,204 @@ irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
static int octeon_setup_interrupt(struct octeon_device *oct)
{
int irqret, err;
+ struct msix_entry *msix_entries;
+ int i;
+ int num_ioq_vectors;
+ int num_alloc_ioq_vectors;
- err = pci_enable_msi(oct->pci_dev);
- if (err)
- dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
- err);
- else
- oct->flags |= LIO_FLAG_MSI_ENABLED;
-
- irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler,
- IRQF_SHARED, "octeon", oct);
- if (irqret) {
- if (oct->flags & LIO_FLAG_MSI_ENABLED)
- pci_disable_msi(oct->pci_dev);
- dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
- irqret);
- return 1;
+ if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
+ oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
+ /* one non ioq interrupt for handling sli_mac_pf_int_sum */
+ oct->num_msix_irqs += 1;
+
+ oct->msix_entries = kcalloc(
+ oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ return 1;
+
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ /*Assumption is that pf msix vectors start from pf srn to pf to
+ * trs and not from 0. if not change this code
+ */
+ for (i = 0; i < oct->num_msix_irqs - 1; i++)
+ msix_entries[i].entry = oct->sriov_info.pf_srn + i;
+ msix_entries[oct->num_msix_irqs - 1].entry =
+ oct->sriov_info.trs;
+ num_alloc_ioq_vectors = pci_enable_msix_range(
+ oct->pci_dev, msix_entries,
+ oct->num_msix_irqs,
+ oct->num_msix_irqs);
+ if (num_alloc_ioq_vectors < 0) {
+ dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ return 1;
+ }
+ dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
+
+ num_ioq_vectors = oct->num_msix_irqs;
+
+ /** For PF, there is one non-ioq interrupt handler */
+ num_ioq_vectors -= 1;
+ irqret = request_irq(msix_entries[num_ioq_vectors].vector,
+ liquidio_legacy_intr_handler, 0, "octeon",
+ oct);
+ if (irqret) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
+ irqret);
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ return 1;
+ }
+
+ for (i = 0; i < num_ioq_vectors; i++) {
+ irqret = request_irq(msix_entries[i].vector,
+ liquidio_msix_intr_handler, 0,
+ "octeon", &oct->ioq_vector[i]);
+ if (irqret) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
+ irqret);
+ /** Freeing the non-ioq irq vector here . */
+ free_irq(msix_entries[num_ioq_vectors].vector,
+ oct);
+
+ while (i) {
+ i--;
+ /** clearing affinity mask. */
+ irq_set_affinity_hint(
+ msix_entries[i].vector, NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ return 1;
+ }
+ oct->ioq_vector[i].vector = msix_entries[i].vector;
+ /* assign the cpu mask for this msix interrupt vector */
+ irq_set_affinity_hint(
+ msix_entries[i].vector,
+ (&oct->ioq_vector[i].affinity_mask));
+ }
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
+ oct->octeon_id);
+ } else {
+ err = pci_enable_msi(oct->pci_dev);
+ if (err)
+ dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
+ err);
+ else
+ oct->flags |= LIO_FLAG_MSI_ENABLED;
+
+ irqret = request_irq(oct->pci_dev->irq,
+ liquidio_legacy_intr_handler, IRQF_SHARED,
+ "octeon", oct);
+ if (irqret) {
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
+ irqret);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int liquidio_watchdog(void *param)
+{
+ u64 wdog;
+ u16 mask_of_stuck_cores = 0;
+ u16 mask_of_crashed_cores = 0;
+ int core_num;
+ u8 core_is_stuck[LIO_MAX_CORES];
+ u8 core_crashed[LIO_MAX_CORES];
+ struct octeon_device *oct = param;
+
+ memset(core_is_stuck, 0, sizeof(core_is_stuck));
+ memset(core_crashed, 0, sizeof(core_crashed));
+
+ while (!kthread_should_stop()) {
+ mask_of_crashed_cores =
+ (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
+
+ for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) {
+ if (!core_is_stuck[core_num]) {
+ wdog = lio_pci_readq(oct, CIU3_WDOG(core_num));
+
+ /* look at watchdog state field */
+ wdog &= CIU3_WDOG_MASK;
+ if (wdog) {
+ /* this watchdog timer has expired */
+ core_is_stuck[core_num] =
+ LIO_MONITOR_WDOG_EXPIRE;
+ mask_of_stuck_cores |= (1 << core_num);
+ }
+ }
+
+ if (!core_crashed[core_num])
+ core_crashed[core_num] =
+ (mask_of_crashed_cores >> core_num) & 1;
+ }
+
+ if (mask_of_stuck_cores) {
+ for (core_num = 0; core_num < LIO_MAX_CORES;
+ core_num++) {
+ if (core_is_stuck[core_num] == 1) {
+ dev_err(&oct->pci_dev->dev,
+ "ERROR: Octeon core %d is stuck!\n",
+ core_num);
+ /* 2 means we have printk'd an error
+ * so no need to repeat the same printk
+ */
+ core_is_stuck[core_num] =
+ LIO_MONITOR_CORE_STUCK_MSGD;
+ }
+ }
+ }
+
+ if (mask_of_crashed_cores) {
+ for (core_num = 0; core_num < LIO_MAX_CORES;
+ core_num++) {
+ if (core_crashed[core_num] == 1) {
+ dev_err(&oct->pci_dev->dev,
+ "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n",
+ core_num);
+ /* 2 means we have printk'd an error
+ * so no need to repeat the same printk
+ */
+ core_crashed[core_num] =
+ LIO_MONITOR_CORE_STUCK_MSGD;
+ }
+ }
+ }
+#ifdef CONFIG_MODULE_UNLOAD
+ if (mask_of_stuck_cores || mask_of_crashed_cores) {
+ /* make module refcount=0 so that rmmod will work */
+ long refcount;
+
+ refcount = module_refcount(THIS_MODULE);
+
+ while (refcount > 0) {
+ module_put(THIS_MODULE);
+ refcount = module_refcount(THIS_MODULE);
+ }
+
+ /* compensate for and withstand an unlikely (but still
+ * possible) race condition
+ */
+ while (refcount < 0) {
+ try_module_get(THIS_MODULE);
+ refcount = module_refcount(THIS_MODULE);
+ }
+ }
+#endif
+ /* sleep for two seconds */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(2 * HZ);
}
return 0;
@@ -1107,6 +1300,9 @@ liquidio_probe(struct pci_dev *pdev,
return -ENOMEM;
}
+ if (pdev->device == OCTEON_CN23XX_PF_VID)
+ oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
+
dev_info(&pdev->dev, "Initializing device %x:%x.\n",
(u32)pdev->vendor, (u32)pdev->device);
@@ -1130,6 +1326,30 @@ liquidio_probe(struct pci_dev *pdev,
return -ENOMEM;
}
+ if (OCTEON_CN23XX_PF(oct_dev)) {
+ u64 scratch1;
+ u8 bus, device, function;
+
+ scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1);
+ if (!(scratch1 & 4ULL)) {
+ /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
+ * the lio watchdog kernel thread is running for this
+ * NIC. Each NIC gets one watchdog kernel thread.
+ */
+ scratch1 |= 4ULL;
+ octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1,
+ scratch1);
+
+ bus = pdev->bus->number;
+ device = PCI_SLOT(pdev->devfn);
+ function = PCI_FUNC(pdev->devfn);
+ oct_dev->watchdog_task = kthread_create(
+ liquidio_watchdog, oct_dev,
+ "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
+ wake_up_process(oct_dev->watchdog_task);
+ }
+ }
+
oct_dev->rx_pause = 1;
oct_dev->tx_pause = 1;
@@ -1146,6 +1366,7 @@ liquidio_probe(struct pci_dev *pdev,
static void octeon_destroy_resources(struct octeon_device *oct)
{
int i;
+ struct msix_entry *msix_entries;
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
@@ -1190,21 +1411,40 @@ static void octeon_destroy_resources(struct octeon_device *oct)
dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
/* Disable interrupts */
- oct->fn_list.disable_interrupt(oct->chip);
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ if (oct->msix_on) {
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ for (i = 0; i < oct->num_msix_irqs - 1; i++) {
+ /* clear the affinity_cpumask */
+ irq_set_affinity_hint(msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ /* non-iov vector's argument is oct struct */
+ free_irq(msix_entries[i].vector, oct);
- /* Release the interrupt line */
- free_irq(oct->pci_dev->irq, oct);
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ } else {
+ /* Release the interrupt line */
+ free_irq(oct->pci_dev->irq, oct);
- if (oct->flags & LIO_FLAG_MSI_ENABLED)
- pci_disable_msi(oct->pci_dev);
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ }
- /* fallthrough */
+ if (OCTEON_CN23XX_PF(oct))
+ octeon_free_ioq_vector(oct);
+ /* fallthrough */
case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE:
/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
mdelay(100);
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (!(oct->io_qmask.oq & (1ULL << i)))
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue;
octeon_delete_droq(oct, i);
}
@@ -1226,16 +1466,15 @@ static void octeon_destroy_resources(struct octeon_device *oct)
octeon_delete_response_list(oct);
/* fallthrough */
- case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
- octeon_free_sc_buffer_pool(oct);
-
- /* fallthrough */
case OCT_DEV_INSTR_QUEUE_INIT_DONE:
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
- if (!(oct->io_qmask.iq & (1ULL << i)))
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
octeon_delete_instr_queue(oct, i);
}
+ /* fallthrough */
+ case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
+ octeon_free_sc_buffer_pool(oct);
/* fallthrough */
case OCT_DEV_DISPATCH_INIT_DONE:
@@ -1244,9 +1483,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_PCI_MAP_DONE:
-
/* Soft reset the octeon device before exiting */
- oct->fn_list.soft_reset(oct);
+ if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
+ oct->fn_list.soft_reset(oct);
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
@@ -1264,23 +1503,89 @@ static void octeon_destroy_resources(struct octeon_device *oct)
}
/**
+ * \brief Callback for rx ctrl
+ * @param status status of request
+ * @param buf pointer to resp structure
+ */
+static void rx_ctl_callback(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct liquidio_rx_ctl_context *ctx;
+
+ ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (status)
+ dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ WRITE_ONCE(ctx->cond, 1);
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/**
* \brief Send Rx control command
* @param lio per-network private data
* @param start_stop whether to start or stop
*/
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
- struct octnic_ctrl_pkt nctrl;
+ struct octeon_soft_command *sc;
+ struct liquidio_rx_ctl_context *ctx;
+ union octnet_cmd *ncmd;
+ int ctx_size = sizeof(struct liquidio_rx_ctl_context);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ int retval;
- memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+ if (oct->props[lio->ifidx].rx_on == start_stop)
+ return;
- nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
- nctrl.ncmd.s.param1 = start_stop;
- nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.netpndev = (u64)lio->netdev;
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ 16, ctx_size);
- if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0)
+ ncmd = (union octnet_cmd *)sc->virtdptr;
+ ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
+
+ WRITE_ONCE(ctx->cond, 0);
+ ctx->octeon_id = lio_get_device_id(oct);
+ init_waitqueue_head(&ctx->wc);
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = OCTNET_CMD_RX_CTL;
+ ncmd->s.param1 = start_stop;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_CMD, 0, 0, 0);
+
+ sc->callback = rx_ctl_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 5000;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+ } else {
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
+ return;
+ oct->props[lio->ifidx].rx_on = start_stop;
+ }
+
+ octeon_free_soft_command(oct, sc);
}
/**
@@ -1307,21 +1612,24 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
- send_rx_ctrl_cmd(lio, 0);
-
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
- txqs_stop(netdev);
+ liquidio_stop(netdev);
if (oct->props[lio->ifidx].napi_enabled == 1) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);
oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
}
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
+ cleanup_link_status_change_wq(netdev);
+
delete_glists(lio);
free_netdev(netdev);
@@ -1374,6 +1682,9 @@ static void liquidio_remove(struct pci_dev *pdev)
dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
+ if (oct_dev->watchdog_task)
+ kthread_stop(oct_dev->watchdog_task);
+
if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
liquidio_stop_nic_module(oct_dev);
@@ -1417,6 +1728,12 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
s = "CN66XX";
break;
+ case OCTEON_CN23XX_PCIID_PF:
+ oct->chip_id = OCTEON_CN23XX_PF_VID;
+ ret = setup_cn23xx_octeon_pf_device(oct);
+ s = "CN23XX";
+ break;
+
default:
s = "?";
dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
@@ -1867,7 +2184,7 @@ static void if_cfg_callback(struct octeon_device *oct,
struct liquidio_if_cfg_context *ctx;
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
oct = lio_get_device(ctx->octeon_id);
if (resp->status)
@@ -2060,11 +2377,14 @@ static void napi_schedule_wrapper(void *param)
*/
static void liquidio_napi_drv_callback(void *arg)
{
+ struct octeon_device *oct;
struct octeon_droq *droq = arg;
int this_cpu = smp_processor_id();
- if (droq->cpu_id == this_cpu) {
- napi_schedule(&droq->napi);
+ oct = droq->oct_dev;
+
+ if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) {
+ napi_schedule_irqoff(&droq->napi);
} else {
struct call_single_data *csd = &droq->csd;
@@ -2173,17 +2493,15 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
lio->ifidx), NULL);
if (retval) {
dev_err(&octeon_dev->pci_dev->dev,
- " %s : Runtime DROQ(RxQ) creation failed.\n",
+ "%s : Runtime DROQ(RxQ) creation failed.\n",
__func__);
return 1;
}
droq = octeon_dev->droq[q_no];
napi = &droq->napi;
- dev_dbg(&octeon_dev->pci_dev->dev,
- "netif_napi_add netdev:%llx oct:%llx\n",
- (u64)netdev,
- (u64)octeon_dev);
+ dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
+ (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
/* designate a CPU for this droq */
@@ -2195,6 +2513,14 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
}
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ /* 23XX PF can receive control messages (via the first PF-owned
+ * droq) from the firmware even if the ethX interface is down,
+ * so that's why poll_mode must be off for the first droq.
+ */
+ octeon_dev->droq[0]->ops.poll_mode = 0;
+ }
+
/* set up IQs. */
for (q = 0; q < lio->linfo.num_txpciq; q++) {
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
@@ -2235,7 +2561,7 @@ static void octnet_poll_check_txq_status(struct work_struct *work)
* \brief Sets up the txq poll check
* @param netdev network device
*/
-static inline void setup_tx_poll_fn(struct net_device *netdev)
+static inline int setup_tx_poll_fn(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
@@ -2244,21 +2570,24 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
WQ_MEM_RECLAIM, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
- return;
+ return -1;
}
INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
octnet_poll_check_txq_status);
lio->txq_status_wq.wk.ctxptr = lio;
queue_delayed_work(lio->txq_status_wq.wq,
&lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+ return 0;
}
static inline void cleanup_tx_poll_fn(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
- cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
- destroy_workqueue(lio->txq_status_wq.wq);
+ if (lio->txq_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+ destroy_workqueue(lio->txq_status_wq.wq);
+ }
}
/**
@@ -2276,24 +2605,34 @@ static int liquidio_open(struct net_device *netdev)
napi_enable(napi);
oct->props[lio->ifidx].napi_enabled = 1;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 1;
}
oct_ptp_open(netdev);
ifstate_set(lio, LIO_IFSTATE_RUNNING);
- setup_tx_poll_fn(netdev);
-
- start_txq(netdev);
+ /* Ready for link status updates */
+ lio->intf_open = 1;
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
+ if (OCTEON_CN23XX_PF(oct)) {
+ if (!oct->msix_on)
+ if (setup_tx_poll_fn(netdev))
+ return -1;
+ } else {
+ if (setup_tx_poll_fn(netdev))
+ return -1;
+ }
+
+ start_txq(netdev);
+
/* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1);
- /* Ready for link status updates */
- lio->intf_open = 1;
-
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
@@ -2328,7 +2667,12 @@ static int liquidio_stop(struct net_device *netdev)
/* Now it should be safe to tell Octeon that nic interface is down. */
send_rx_ctrl_cmd(lio, 0);
- cleanup_tx_poll_fn(netdev);
+ if (OCTEON_CN23XX_PF(oct)) {
+ if (!oct->msix_on)
+ cleanup_tx_poll_fn(netdev);
+ } else {
+ cleanup_tx_poll_fn(netdev);
+ }
if (lio->ptp_clock) {
ptp_clock_unregister(lio->ptp_clock);
@@ -2340,143 +2684,6 @@ static int liquidio_stop(struct net_device *netdev)
return 0;
}
-void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
-{
- struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
- struct net_device *netdev = (struct net_device *)nctrl->netpndev;
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- u8 *mac;
-
- switch (nctrl->ncmd.s.cmd) {
- case OCTNET_CMD_CHANGE_DEVFLAGS:
- case OCTNET_CMD_SET_MULTI_LIST:
- break;
-
- case OCTNET_CMD_CHANGE_MACADDR:
- mac = ((u8 *)&nctrl->udd[0]) + 2;
- netif_info(lio, probe, lio->netdev,
- "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
- "MACAddr changed to", mac[0], mac[1],
- mac[2], mac[3], mac[4], mac[5]);
- break;
-
- case OCTNET_CMD_CHANGE_MTU:
- /* If command is successful, change the MTU. */
- netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
- netdev->mtu, nctrl->ncmd.s.param1);
- dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
- netdev->name, netdev->mtu,
- nctrl->ncmd.s.param1);
- rtnl_lock();
- netdev->mtu = nctrl->ncmd.s.param1;
- call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev);
- rtnl_unlock();
- break;
-
- case OCTNET_CMD_GPIO_ACCESS:
- netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
-
- break;
-
- case OCTNET_CMD_LRO_ENABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
- break;
-
- case OCTNET_CMD_LRO_DISABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
- netdev->name);
- break;
-
- case OCTNET_CMD_VERBOSE_ENABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
- break;
-
- case OCTNET_CMD_VERBOSE_DISABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
- netdev->name);
- break;
-
- case OCTNET_CMD_ENABLE_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
- netdev->name);
- break;
-
- case OCTNET_CMD_ADD_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
- netdev->name, nctrl->ncmd.s.param1);
- break;
-
- case OCTNET_CMD_DEL_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
- netdev->name, nctrl->ncmd.s.param1);
- break;
-
- case OCTNET_CMD_SET_SETTINGS:
- dev_info(&oct->pci_dev->dev, "%s settings changed\n",
- netdev->name);
-
- break;
- /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_TNL_RX_CSUM_CTL:
- if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s RX Checksum Offload Enabled\n",
- netdev->name);
- } else if (nctrl->ncmd.s.param1 ==
- OCTNET_CMD_RXCSUM_DISABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s RX Checksum Offload Disabled\n",
- netdev->name);
- }
- break;
-
- /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_TNL_TX_CSUM_CTL:
- if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s TX Checksum Offload Enabled\n",
- netdev->name);
- } else if (nctrl->ncmd.s.param1 ==
- OCTNET_CMD_TXCSUM_DISABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s TX Checksum Offload Disabled\n",
- netdev->name);
- }
- break;
-
- /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_VXLAN_PORT_CONFIG:
- if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
- netif_info(lio, probe, lio->netdev,
- "%s VxLAN Destination UDP PORT:%d ADDED\n",
- netdev->name,
- nctrl->ncmd.s.param1);
- } else if (nctrl->ncmd.s.more ==
- OCTNET_CMD_VXLAN_PORT_DEL) {
- netif_info(lio, probe, lio->netdev,
- "%s VxLAN Destination UDP PORT:%d DELETED\n",
- netdev->name,
- nctrl->ncmd.s.param1);
- }
- break;
-
- case OCTNET_CMD_SET_FLOW_CTL:
- netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
- break;
-
- default:
- dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
- nctrl->ncmd.s.cmd);
- }
-}
-
/**
* \brief Converts a mask based on net device flags
* @param netdev network device
@@ -2817,8 +3024,7 @@ static void handle_timestamp(struct octeon_device *oct,
*/
static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
struct octnic_data_pkt *ndata,
- struct octnet_buf_free_info *finfo,
- int xmit_more)
+ struct octnet_buf_free_info *finfo)
{
int retval;
struct octeon_soft_command *sc;
@@ -2846,9 +3052,15 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
sc->callback_arg = finfo->skb;
sc->iq_no = ndata->q_no;
- len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
+ if (OCTEON_CN23XX_PF(oct))
+ len = (u32)((struct octeon_instr_ih3 *)
+ (&sc->cmd.cmd3.ih3))->dlengsz;
+ else
+ len = (u32)((struct octeon_instr_ih2 *)
+ (&sc->cmd.cmd2.ih2))->dlengsz;
+
+ ring_doorbell = 1;
- ring_doorbell = !xmit_more;
retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
sc, len, ndata->reqtype);
@@ -2881,7 +3093,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
union tx_info *tx_info;
int status = 0;
int q_idx = 0, iq_no = 0;
- int xmit_more, j;
+ int j;
u64 dptr = 0;
u32 tag = 0;
@@ -2980,7 +3192,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- ndata.cmd.cmd2.dptr = dptr;
+ if (OCTEON_CN23XX_PF(oct))
+ ndata.cmd.cmd3.dptr = dptr;
+ else
+ ndata.cmd.cmd2.dptr = dptr;
finfo->dptr = dptr;
ndata.reqtype = REQTYPE_NORESP_NET;
@@ -3055,15 +3270,23 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
g->sg_size, DMA_TO_DEVICE);
dptr = g->sg_dma_ptr;
- ndata.cmd.cmd2.dptr = dptr;
+ if (OCTEON_CN23XX_PF(oct))
+ ndata.cmd.cmd3.dptr = dptr;
+ else
+ ndata.cmd.cmd2.dptr = dptr;
finfo->dptr = dptr;
finfo->g = g;
ndata.reqtype = REQTYPE_NORESP_NET_SG;
}
- irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
- tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
+ if (OCTEON_CN23XX_PF(oct)) {
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
+ } else {
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
+ }
if (skb_shinfo(skb)->gso_size) {
tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
@@ -3077,12 +3300,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
}
- xmit_more = skb->xmit_more;
-
if (unlikely(cmdsetup.s.timestamp))
- status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
+ status = send_nic_timestamp_pkt(oct, &ndata, finfo);
else
- status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
+ status = octnet_send_nic_data_pkt(oct, &ndata);
if (status == IQ_SEND_FAILED)
goto lio_xmit_failed;
@@ -3190,8 +3411,8 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
* OCTNET_CMD_RXCSUM_DISABLE
* @returns SUCCESS or FAILURE
*/
-int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
- u8 rx_cmd)
+static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
+ u8 rx_cmd)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
@@ -3249,31 +3470,6 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
return ret;
}
-int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
-{
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- struct octnic_ctrl_pkt nctrl;
- int ret = 0;
-
- memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
-
- nctrl.ncmd.u64 = 0;
- nctrl.ncmd.s.cmd = cmd;
- nctrl.ncmd.s.param1 = param1;
- nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
- nctrl.netpndev = (u64)netdev;
- nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
- ret);
- }
- return ret;
-}
-
/** \brief Net device fix features
* @param netdev pointer to network device
* @param request features requested
@@ -3492,8 +3688,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
union oct_nic_if_cfg if_cfg;
unsigned int base_queue;
unsigned int gmx_port_id;
- u32 resp_size, ctx_size;
+ u32 resp_size, ctx_size, data_size;
u32 ifidx_or_pfnum;
+ struct lio_version *vdata;
/* This is to handle link status changes */
octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
@@ -3515,21 +3712,37 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
for (i = 0; i < octeon_dev->ifcount; i++) {
resp_size = sizeof(struct liquidio_if_cfg_resp);
ctx_size = sizeof(struct liquidio_if_cfg_context);
+ data_size = sizeof(struct lio_version);
sc = (struct octeon_soft_command *)
- octeon_alloc_soft_command(octeon_dev, 0,
+ octeon_alloc_soft_command(octeon_dev, data_size,
resp_size, ctx_size);
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+ vdata = (struct lio_version *)sc->virtdptr;
+
+ *((u64 *)vdata) = 0;
+ vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
+ vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
+ vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
- num_iqueues =
- CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
- num_oqueues =
- CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
- base_queue =
- CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
- gmx_port_id =
- CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
- ifidx_or_pfnum = i;
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ num_iqueues = octeon_dev->sriov_info.num_pf_rings;
+ num_oqueues = octeon_dev->sriov_info.num_pf_rings;
+ base_queue = octeon_dev->sriov_info.pf_srn;
+
+ gmx_port_id = octeon_dev->pf_num;
+ ifidx_or_pfnum = octeon_dev->pf_num;
+ } else {
+ num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ base_queue = CFG_GET_BASE_QUE_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ gmx_port_id = CFG_GET_GMXID_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ ifidx_or_pfnum = i;
+ }
dev_dbg(&octeon_dev->pci_dev->dev,
"requesting config for interface %d, iqs %d, oqs %d\n",
@@ -3566,7 +3779,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
- sleep_cond(&ctx->wc, &ctx->cond);
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
+ dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
+ goto setup_nic_wait_intr;
+ }
+
retval = resp->status;
if (retval) {
dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
@@ -3633,12 +3850,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
- lio->dev_capability = NETIF_F_HIGHDMA
- | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_SG | NETIF_F_RXCSUM
- | NETIF_F_GRO
- | NETIF_F_TSO | NETIF_F_TSO6
- | NETIF_F_LRO;
+ if (OCTEON_CN23XX_PF(octeon_dev) ||
+ OCTEON_CN6XXX(octeon_dev)) {
+ lio->dev_capability = NETIF_F_HIGHDMA
+ | NETIF_F_IP_CSUM
+ | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_GRO
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+ }
netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
/* Copy of transmit encapsulation capabilities:
@@ -3713,7 +3934,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Register ethtool support */
liquidio_set_ethtool_ops(netdev);
- octeon_dev->priv_flags = 0x0;
+ if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
+ octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
+ else
+ octeon_dev->priv_flags = 0x0;
if (netdev->features & NETIF_F_LRO)
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
@@ -3725,6 +3949,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
liquidio_set_feature(netdev,
OCTNET_CMD_VERBOSE_ENABLE, 0);
+ if (setup_link_status_change_wq(netdev))
+ goto setup_nic_dev_fail;
+
/* Register the network device with the OS */
if (register_netdev(netdev)) {
dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
@@ -3760,6 +3987,8 @@ setup_nic_dev_fail:
octeon_free_soft_command(octeon_dev, sc);
+setup_nic_wait_intr:
+
while (i--) {
dev_err(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup failed\n", i);
@@ -3789,8 +4018,7 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
/* run port_config command for each port */
oct->ifcount = num_nic_ports;
- memset(oct->props, 0,
- sizeof(struct octdev_props) * num_nic_ports);
+ memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
for (i = 0; i < MAX_OCTEON_LINKS; i++)
oct->props[i].gmxport = -1;
@@ -3806,7 +4034,7 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
/* Initialize interrupt moderation params */
intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
intrmod_cfg->rx_enable = 1;
- intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
+ intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
@@ -3818,6 +4046,7 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+ intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
return retval;
@@ -3880,6 +4109,7 @@ static void nic_starter(struct work_struct *work)
static int octeon_device_init(struct octeon_device *octeon_dev)
{
int j, ret;
+ int fw_loaded = 0;
char bootcmd[] = "\n";
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)octeon_dev->priv;
@@ -3901,9 +4131,23 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_dev->app_mode = CVM_DRV_INVALID_APP;
- /* Do a soft reset of the Octeon device. */
- if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ if (!cn23xx_fw_loaded(octeon_dev)) {
+ fw_loaded = 0;
+ /* Do a soft reset of the Octeon device. */
+ if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ return 1;
+ /* things might have changed */
+ if (!cn23xx_fw_loaded(octeon_dev))
+ fw_loaded = 0;
+ else
+ fw_loaded = 1;
+ } else {
+ fw_loaded = 1;
+ }
+ } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
return 1;
+ }
/* Initialize the dispatch mechanism used to push packets arriving on
* Octeon Output queues.
@@ -3925,6 +4169,22 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_set_io_queues_off(octeon_dev);
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
+ return ret;
+ }
+ }
+
+ /* Initialize soft command buffer pool
+ */
+ if (octeon_setup_sc_buffer_pool(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
/* Setup the data structures that manage this Octeon's Input queues. */
if (octeon_setup_instr_queues(octeon_dev)) {
dev_err(&octeon_dev->pci_dev->dev,
@@ -3936,14 +4196,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
}
atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
- /* Initialize soft command buffer pool
- */
- if (octeon_setup_sc_buffer_pool(octeon_dev)) {
- dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
- return 1;
- }
- atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
-
/* Initialize lists to manage the requests of different types that
* arrive from user & kernel applications for this octeon device.
*/
@@ -3963,15 +4215,23 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
- /* The input and output queue registers were setup earlier (the queues
- * were not enabled). Any additional registers that need to be
- * programmed should be done now.
- */
- ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev,
- "Failed to configure device registers\n");
- return ret;
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ if (octeon_allocate_ioq_vector(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
+ return 1;
+ }
+
+ } else {
+ /* The input and output queue registers were setup earlier (the
+ * queues were not enabled). Any additional registers
+ * that need to be programmed should be done now.
+ */
+ ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Failed to configure device registers\n");
+ return ret;
+ }
}
/* Initialize the tasklet that handles output queue packet processing.*/
@@ -3985,63 +4245,76 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
return 1;
/* Enable Octeon device interrupts */
- octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
+ octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
/* Enable the input and output queues for this Octeon device */
- octeon_dev->fn_list.enable_io_queues(octeon_dev);
+ ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
+ return ret;
+ }
atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
- dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
-
- if (ddr_timeout == 0)
- dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+ if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
+ dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
+ if (!ddr_timeout) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+ }
- schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
+ schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
- /* Wait for the octeon to initialize DDR after the soft-reset. */
- while (ddr_timeout == 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout(HZ / 10)) {
- /* user probably pressed Control-C */
+ /* Wait for the octeon to initialize DDR after the soft-reset.*/
+ while (!ddr_timeout) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ / 10)) {
+ /* user probably pressed Control-C */
+ return 1;
+ }
+ }
+ ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
+ ret);
return 1;
}
- }
- ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev,
- "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
- ret);
- return 1;
- }
- if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) {
- dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
- return 1;
- }
+ if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
+ return 1;
+ }
- /* Divert uboot to take commands from host instead. */
- ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
+ /* Divert uboot to take commands from host instead. */
+ ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
- dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
- ret = octeon_init_consoles(octeon_dev);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
- return 1;
- }
- ret = octeon_add_console(octeon_dev, 0);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
- return 1;
- }
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
+ ret = octeon_init_consoles(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
+ return 1;
+ }
+ ret = octeon_add_console(octeon_dev, 0);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
+ return 1;
+ }
- atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
+ atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
- dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
- ret = load_firmware(octeon_dev);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
- return 1;
+ dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
+ ret = load_firmware(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
+ return 1;
+ }
+ /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
+ * loaded
+ */
+ if (OCTEON_CN23XX_PF(octeon_dev))
+ octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
+ 2ULL);
}
handshake[octeon_dev->octeon_id].init_ok = 1;
@@ -4057,7 +4330,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_dev->droq[j]->pkts_credit_reg);
/* Packets can start arriving on the output queues from this point. */
-
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 199a8b9c7dc5..0d990accb65e 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -30,10 +30,24 @@
#include "octeon_config.h"
-#define LIQUIDIO_BASE_VERSION "1.4"
-#define LIQUIDIO_MICRO_VERSION ".1"
#define LIQUIDIO_PACKAGE ""
-#define LIQUIDIO_VERSION "1.4.1"
+#define LIQUIDIO_BASE_MAJOR_VERSION 1
+#define LIQUIDIO_BASE_MINOR_VERSION 4
+#define LIQUIDIO_BASE_MICRO_VERSION 1
+#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
+ __stringify(LIQUIDIO_BASE_MINOR_VERSION)
+#define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
+#define LIQUIDIO_VERSION LIQUIDIO_PACKAGE \
+ __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
+ __stringify(LIQUIDIO_BASE_MINOR_VERSION) \
+ "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
+
+struct lio_version {
+ u16 major;
+ u16 minor;
+ u16 micro;
+ u16 reserved;
+};
#define CONTROL_IQ 0
/** Tag types used by Octeon cores in its work. */
@@ -218,6 +232,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
+
+#define OCTNET_CMD_ID_ACTIVE 0x1a
+
#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
#define OCTNET_CMD_RXCSUM_ENABLE 0x0
@@ -296,6 +313,13 @@ union octnet_cmd {
#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
+/*pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+#define LIO_SOFTCMDRESP_IH2 40
+#define LIO_SOFTCMDRESP_IH3 (40 + 8)
+
+#define LIO_PCICMD_O2 24
+#define LIO_PCICMD_O3 (24 + 8)
+
/* Instruction Header(DPI) - for OCTEON-III models */
struct octeon_instr_ih3 {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -814,6 +838,8 @@ struct oct_link_stats {
#define VITESSE_PHY_GPIO_DRIVEOFF 0x4
#define VITESSE_PHY_GPIO_HIGH 0x2
#define VITESSE_PHY_GPIO_LOW 0x3
+#define LED_IDENTIFICATION_ON 0x1
+#define LED_IDENTIFICATION_OFF 0x0
struct oct_mdio_cmd {
u64 op;
@@ -832,7 +858,7 @@ struct oct_mdio_cmd {
/* intrmod: max. packets to trigger interrupt */
#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384
/* intrmod: min. packets to trigger interrupt */
-#define LIO_INTRMOD_RXMINCNT_TRIGGER 1
+#define LIO_INTRMOD_RXMINCNT_TRIGGER 0
/* intrmod: max. time to trigger interrupt */
#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128
/* 66xx:intrmod: min. time to trigger interrupt
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index b3396e3a8bab..c76556809ed1 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -64,6 +64,34 @@
#define DEFAULT_NUM_NIC_PORTS_68XX 4
#define DEFAULT_NUM_NIC_PORTS_68XX_210NV 2
+/* CN23xx IQ configuration macros */
+#define CN23XX_MAX_RINGS_PER_PF_PASS_1_0 12
+#define CN23XX_MAX_RINGS_PER_PF_PASS_1_1 32
+#define CN23XX_MAX_RINGS_PER_PF 64
+
+#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_IQ_DESCRIPTORS 2048
+#define CN23XX_DB_MIN 1
+#define CN23XX_DB_MAX 8
+#define CN23XX_DB_TIMEOUT 1
+
+#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_OQ_DESCRIPTORS 2048
+#define CN23XX_OQ_BUF_SIZE 1536
+#define CN23XX_OQ_PKTSPER_INTR 128
+/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
+#define CN23XX_OQ_REFIL_THRESHOLD 128
+
+#define CN23XX_OQ_INTR_PKT 64
+#define CN23XX_OQ_INTR_TIME 100
+#define DEFAULT_NUM_NIC_PORTS_23XX 1
+
+#define CN23XX_CFG_IO_QUEUES CN23XX_MAX_RINGS_PER_PF
+/* PEMs count */
+#define CN23XX_MAX_MACS 4
+
+#define CN23XX_DEF_IQ_INTR_THRESHOLD 32
+#define CN23XX_DEF_IQ_INTR_BYTE_THRESHOLD (64 * 1024)
/* common OCTEON configuration macros */
#define CN6XXX_CFG_IO_QUEUES 32
#define OCTEON_32BYTE_INSTR 32
@@ -92,6 +120,9 @@
#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
#define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout)
+#define CFG_GET_IQ_INTR_PKT(cfg) ((cfg)->iq.iq_intr_pkt)
+#define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val
+
#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs)
#define CFG_GET_OQ_INFO_PTR(cfg) ((cfg)->oq.info_ptr)
#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr)
@@ -140,19 +171,24 @@
enum lio_card_type {
LIO_210SV = 0, /* Two port, 66xx */
LIO_210NV, /* Two port, 68xx */
- LIO_410NV /* Four port, 68xx */
+ LIO_410NV, /* Four port, 68xx */
+ LIO_23XX /* 23xx */
};
#define LIO_210SV_NAME "210sv"
#define LIO_210NV_NAME "210nv"
#define LIO_410NV_NAME "410nv"
+#define LIO_23XX_NAME "23xx"
/** Structure to define the configuration attributes for each Input queue.
* Applicable to all Octeon processors
**/
struct octeon_iq_config {
#ifdef __BIG_ENDIAN_BITFIELD
- u64 reserved:32;
+ u64 reserved:16;
+
+ /** Tx interrupt packets. Applicable to 23xx only */
+ u64 iq_intr_pkt:16;
/** Minimum ticks to wait before checking for pending instructions. */
u64 db_timeout:16;
@@ -192,7 +228,10 @@ struct octeon_iq_config {
/** Minimum ticks to wait before checking for pending instructions. */
u64 db_timeout:16;
- u64 reserved:32;
+ /** Tx interrupt packets. Applicable to 23xx only */
+ u64 iq_intr_pkt:16;
+
+ u64 reserved:16;
#endif
};
@@ -416,11 +455,15 @@ struct octeon_config {
#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
/* Maximum number of Octeon Instruction (command) queues */
-#define MAX_OCTEON_INSTR_QUEUES(oct) CN6XXX_MAX_INPUT_QUEUES
-/* Maximum number of Octeon Output queues */
-#define MAX_OCTEON_OUTPUT_QUEUES(oct) CN6XXX_MAX_OUTPUT_QUEUES
+#define MAX_OCTEON_INSTR_QUEUES(oct) \
+ (OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_INPUT_QUEUES : \
+ CN6XXX_MAX_INPUT_QUEUES)
-#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
-#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_OUTPUT_QUEUES(oct) \
+ (OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_OUTPUT_QUEUES : \
+ CN6XXX_MAX_OUTPUT_QUEUES)
+#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN23XX_MAX_INPUT_QUEUES
+#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN23XX_MAX_OUTPUT_QUEUES
#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index bbb50ea66f16..01a50f3b0c8e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -25,12 +25,13 @@
*/
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/crc32.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_main.h"
+#include "liquidio_image.h"
#include "octeon_mem_ops.h"
static void octeon_remote_lock(void);
@@ -40,6 +41,10 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
u32 flags);
static int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size);
+static u32 console_bitmask;
+module_param(console_bitmask, int, 0644);
+MODULE_PARM_DESC(console_bitmask,
+ "Bitmask indicating which consoles have debug output redirected to syslog.");
#define MIN(a, b) min((a), (b))
#define CAST_ULL(v) ((u64)(v))
@@ -177,6 +182,15 @@ struct octeon_pci_console_desc {
__cvmx_bootmem_desc_get(oct, addr, \
offsetof(struct cvmx_bootmem_named_block_desc, field), \
SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+/**
+ * \brief determines if a given console has debug enabled.
+ * @param console console to check
+ * @returns 1 = enabled. 0 otherwise
+ */
+static int octeon_console_debug_enabled(u32 console)
+{
+ return (console_bitmask >> (console)) & 0x1;
+}
/**
* This function is the implementation of the get macros defined
@@ -709,3 +723,104 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num,
return bytes_to_read;
}
+
+#define FBUF_SIZE (4 * 1024 * 1024)
+u8 fbuf[FBUF_SIZE];
+
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+ size_t size)
+{
+ int ret = 0;
+ u8 *p = fbuf;
+ u32 crc32_result;
+ u64 load_addr;
+ u32 image_len;
+ struct octeon_firmware_file_header *h;
+ u32 i, rem;
+
+ if (size < sizeof(struct octeon_firmware_file_header)) {
+ dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
+ (u32)size,
+ (u32)sizeof(struct octeon_firmware_file_header));
+ return -EINVAL;
+ }
+
+ h = (struct octeon_firmware_file_header *)data;
+
+ if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
+ dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
+ return -EINVAL;
+ }
+
+ crc32_result = crc32((unsigned int)~0, data,
+ sizeof(struct octeon_firmware_file_header) -
+ sizeof(u32)) ^ ~0U;
+ if (crc32_result != be32_to_cpu(h->crc32)) {
+ dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
+ crc32_result, be32_to_cpu(h->crc32));
+ return -EINVAL;
+ }
+
+ if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
+ LIQUIDIO_PACKAGE, h->version);
+ return -EINVAL;
+ }
+
+ if (memcmp(LIQUIDIO_BASE_VERSION, h->version + strlen(LIQUIDIO_PACKAGE),
+ strlen(LIQUIDIO_BASE_VERSION))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
+ LIQUIDIO_BASE_VERSION,
+ h->version + strlen(LIQUIDIO_PACKAGE));
+ return -EINVAL;
+ }
+
+ if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
+ dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
+ be32_to_cpu(h->num_images));
+ return -EINVAL;
+ }
+
+ dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
+ h->version);
+
+ data += sizeof(struct octeon_firmware_file_header);
+
+ dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
+ be32_to_cpu(h->num_images));
+ /* load all images */
+ for (i = 0; i < be32_to_cpu(h->num_images); i++) {
+ load_addr = be64_to_cpu(h->desc[i].addr);
+ image_len = be32_to_cpu(h->desc[i].len);
+
+ dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
+ image_len, load_addr);
+
+ /* Write in 4MB chunks*/
+ rem = image_len;
+
+ while (rem) {
+ if (rem < FBUF_SIZE)
+ size = rem;
+ else
+ size = FBUF_SIZE;
+
+ memcpy(p, data, size);
+
+ /* download the image */
+ octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
+
+ data += size;
+ rem -= (u32)size;
+ load_addr += size;
+ }
+ }
+ dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
+ h->bootcmd);
+
+ /* Invoke the bootcmd */
+ ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 0eb504a4379a..586b68899b06 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -20,7 +20,6 @@
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/pci.h>
-#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include "liquidio_common.h"
@@ -32,8 +31,7 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
+#include "cn23xx_pf_device.h"
/** Default configuration
* for CN66XX OCTEON Models.
@@ -420,6 +418,108 @@ static struct octeon_config default_cn68xx_210nv_conf = {
,
};
+static struct octeon_config default_cn23xx_conf = {
+ .card_type = LIO_23XX,
+ .card_name = LIO_23XX_NAME,
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN23XX_CFG_IO_QUEUES,
+ .pending_list_size = (CN23XX_MAX_IQ_DESCRIPTORS *
+ CN23XX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN23XX_DB_MIN,
+ .db_timeout = CN23XX_DB_TIMEOUT,
+ .iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD,
+ },
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN23XX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .pkts_per_intr = CN23XX_OQ_PKTSPER_INTR,
+ .refill_threshold = CN23XX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN23XX_OQ_INTR_PKT,
+ .oq_intr_time = CN23XX_OQ_INTR_TIME,
+ },
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_23XX,
+ .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ /* For ethernet interface 0: Port cfg Attributes */
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+};
+
enum {
OCTEON_CONFIG_TYPE_DEFAULT = 0,
NUM_OCTEON_CONFS,
@@ -487,6 +587,8 @@ static void *__retrieve_octeon_config_info(struct octeon_device *oct,
} else if ((oct->chip_id == OCTEON_CN68XX) &&
(card_type == LIO_410NV)) {
ret = (void *)&default_cn68xx_conf;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ ret = (void *)&default_cn23xx_conf;
}
break;
default:
@@ -501,7 +603,8 @@ static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
case OCTEON_CN66XX:
case OCTEON_CN68XX:
return lio_validate_cn6xxx_config_info(oct, conf);
-
+ case OCTEON_CN23XX_PF_VID:
+ return 0;
default:
break;
}
@@ -541,107 +644,6 @@ static char *get_oct_app_string(u32 app_mode)
return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
}
-u8 fbuf[4 * 1024 * 1024];
-
-int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
- size_t size)
-{
- int ret = 0;
- u8 *p = fbuf;
- u32 crc32_result;
- u64 load_addr;
- u32 image_len;
- struct octeon_firmware_file_header *h;
- u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION);
- char *base;
-
- if (size < sizeof(struct octeon_firmware_file_header)) {
- dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
- (u32)size,
- (u32)sizeof(struct octeon_firmware_file_header));
- return -EINVAL;
- }
-
- h = (struct octeon_firmware_file_header *)data;
-
- if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
- dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
- return -EINVAL;
- }
-
- crc32_result = crc32((unsigned int)~0, data,
- sizeof(struct octeon_firmware_file_header) -
- sizeof(u32)) ^ ~0U;
- if (crc32_result != be32_to_cpu(h->crc32)) {
- dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
- crc32_result, be32_to_cpu(h->crc32));
- return -EINVAL;
- }
-
- if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
- LIQUIDIO_PACKAGE, h->version);
- return -EINVAL;
- }
-
- base = h->version + strlen(LIQUIDIO_PACKAGE);
- ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len);
- if (ret) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
- LIQUIDIO_BASE_VERSION, base);
- return -EINVAL;
- }
-
- if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
- dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
- be32_to_cpu(h->num_images));
- return -EINVAL;
- }
-
- dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
- snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
- h->version);
-
- data += sizeof(struct octeon_firmware_file_header);
-
- dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
- be32_to_cpu(h->num_images));
- /* load all images */
- for (i = 0; i < be32_to_cpu(h->num_images); i++) {
- load_addr = be64_to_cpu(h->desc[i].addr);
- image_len = be32_to_cpu(h->desc[i].len);
-
- dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
- image_len, load_addr);
-
- /* Write in 4MB chunks*/
- rem = image_len;
-
- while (rem) {
- if (rem < (4 * 1024 * 1024))
- size = rem;
- else
- size = 4 * 1024 * 1024;
-
- memcpy(p, data, size);
-
- /* download the image */
- octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
-
- data += size;
- rem -= (u32)size;
- load_addr += size;
- }
- }
- dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
- h->bootcmd);
-
- /* Invoke the bootcmd */
- ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
-
- return 0;
-}
-
void octeon_free_device_mem(struct octeon_device *oct)
{
int i;
@@ -676,6 +678,9 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
configsize = sizeof(struct octeon_cn6xxx);
break;
+ case OCTEON_CN23XX_PF_VID:
+ configsize = sizeof(struct octeon_cn23xx_pf);
+ break;
default:
pr_err("%s: Unknown PCI Device: 0x%x\n",
__func__,
@@ -741,6 +746,45 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
return oct;
}
+int
+octeon_allocate_ioq_vector(struct octeon_device *oct)
+{
+ int i, num_ioqs = 0;
+ struct octeon_ioq_vector *ioq_vector;
+ int cpu_num;
+ int size;
+
+ if (OCTEON_CN23XX_PF(oct))
+ num_ioqs = oct->sriov_info.num_pf_rings;
+ size = sizeof(struct octeon_ioq_vector) * num_ioqs;
+
+ oct->ioq_vector = vmalloc(size);
+ if (!oct->ioq_vector)
+ return 1;
+ memset(oct->ioq_vector, 0, size);
+ for (i = 0; i < num_ioqs; i++) {
+ ioq_vector = &oct->ioq_vector[i];
+ ioq_vector->oct_dev = oct;
+ ioq_vector->iq_index = i;
+ ioq_vector->droq_index = i;
+
+ cpu_num = i % num_online_cpus();
+ cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask);
+
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID)
+ ioq_vector->ioq_num = i + oct->sriov_info.pf_srn;
+ else
+ ioq_vector->ioq_num = i;
+ }
+ return 0;
+}
+
+void
+octeon_free_ioq_vector(struct octeon_device *oct)
+{
+ vfree(oct->ioq_vector);
+}
+
/* this function is only for setting up the first queue */
int octeon_setup_instr_queues(struct octeon_device *oct)
{
@@ -749,10 +793,12 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
union oct_txpciq txpciq;
int numa_node = cpu_to_node(iq_no % num_online_cpus());
- /* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct))
num_descs =
CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+ else if (OCTEON_CN23XX_PF(oct))
+ num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn23xx_pf,
+ conf));
oct->num_iqs = 0;
@@ -769,6 +815,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
oct->instr_queue[0]->ifidx = 0;
txpciq.u64 = 0;
txpciq.s.q_no = iq_no;
+ txpciq.s.pkind = oct->pfvf_hsword.pkind;
txpciq.s.use_qpg = 0;
txpciq.s.qpg = 0;
if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
@@ -788,14 +835,17 @@ int octeon_setup_output_queues(struct octeon_device *oct)
u32 oq_no = 0;
int numa_node = cpu_to_node(oq_no % num_online_cpus());
- /* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct)) {
num_descs =
CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
desc_size =
CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn23xx_pf,
+ conf));
+ desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn23xx_pf,
+ conf));
}
-
oct->num_oqs = 0;
oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
if (!oct->droq[0])
@@ -812,10 +862,10 @@ int octeon_setup_output_queues(struct octeon_device *oct)
void octeon_set_io_queues_off(struct octeon_device *oct)
{
- /* Disable the i/p and o/p queues for this Octeon. */
-
- octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
- octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ if (OCTEON_CN6XXX(oct)) {
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ }
}
void octeon_set_droq_pkt_op(struct octeon_device *oct,
@@ -825,14 +875,16 @@ void octeon_set_droq_pkt_op(struct octeon_device *oct,
u32 reg_val = 0;
/* Disable the i/p and o/p queues for this Octeon. */
- reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+ if (OCTEON_CN6XXX(oct)) {
+ reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
- if (enable)
- reg_val = reg_val | (1 << q_no);
- else
- reg_val = reg_val & (~(1 << q_no));
+ if (enable)
+ reg_val = reg_val | (1 << q_no);
+ else
+ reg_val = reg_val & (~(1 << q_no));
- octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
+ }
}
int octeon_init_dispatch_list(struct octeon_device *oct)
@@ -1019,6 +1071,9 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
if (OCTEON_CN6XXX(oct))
num_nic_ports =
CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
+ else if (OCTEON_CN23XX_PF(oct))
+ num_nic_ports =
+ CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn23xx_pf, conf));
if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
@@ -1046,6 +1101,12 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
}
oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+ oct->pfvf_hsword.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+
+ oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind;
+
+ for (i = 0; i < oct->num_iqs; i++)
+ oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
atomic_set(&oct->status, OCT_DEV_CORE_OK);
@@ -1108,8 +1169,10 @@ struct octeon_config *octeon_get_conf(struct octeon_device *oct)
if (OCTEON_CN6XXX(oct)) {
default_oct_conf =
(struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ default_oct_conf = (struct octeon_config *)
+ (CHIP_FIELD(oct, cn23xx_pf, conf));
}
-
return default_oct_conf;
}
@@ -1141,7 +1204,9 @@ u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
* So write MSB first
*/
addrhi = (addr >> 32);
- if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
+ if ((oct->chip_id == OCTEON_CN66XX) ||
+ (oct->chip_id == OCTEON_CN68XX) ||
+ (oct->chip_id == OCTEON_CN23XX_PF_VID))
addrhi |= 0x00060000;
writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
@@ -1185,8 +1250,15 @@ int octeon_mem_access_ok(struct octeon_device *oct)
u64 lmc0_reset_ctl;
/* Check to make sure a DDR interface is enabled */
- lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
- access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+ if (OCTEON_CN23XX_PF(oct)) {
+ lmc0_reset_ctl = lio_pci_readq(oct, CN23XX_LMC0_RESET_CTL);
+ access_okay =
+ (lmc0_reset_ctl & CN23XX_LMC0_RESET_CTL_DDR3RST_MASK);
+ } else {
+ lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
+ access_okay =
+ (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+ }
return access_okay ? 0 : 1;
}
@@ -1226,3 +1298,39 @@ int lio_get_device_id(void *dev)
return octeon_dev->octeon_id;
return -1;
}
+
+void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
+{
+ u64 instr_cnt;
+ struct octeon_device *oct = NULL;
+
+ /* the whole thing needs to be atomic, ideally */
+ if (droq) {
+ spin_lock_bh(&droq->lock);
+ writel(droq->pkt_count, droq->pkts_sent_reg);
+ droq->pkt_count = 0;
+ spin_unlock_bh(&droq->lock);
+ oct = droq->oct_dev;
+ }
+ if (iq) {
+ spin_lock_bh(&iq->lock);
+ writel(iq->pkt_in_done, iq->inst_cnt_reg);
+ iq->pkt_in_done = 0;
+ spin_unlock_bh(&iq->lock);
+ oct = iq->oct_dev;
+ }
+ /*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough
+ *to trigger tx interrupts as well, if they are pending.
+ */
+ if (oct && OCTEON_CN23XX_PF(oct)) {
+ if (droq)
+ writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg);
+ /*we race with firmrware here. read and write the IN_DONE_CNTS*/
+ else if (iq) {
+ instr_cnt = readq(iq->inst_cnt_reg);
+ writeq(((instr_cnt & 0xFFFFFFFF00000000ULL) |
+ CN23XX_INTR_RESEND),
+ iq->inst_cnt_reg);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 01edfb404346..da15c2ae9330 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -30,13 +30,19 @@
/** PCI VendorId Device Id */
#define OCTEON_CN68XX_PCIID 0x91177d
#define OCTEON_CN66XX_PCIID 0x92177d
-
+#define OCTEON_CN23XX_PCIID_PF 0x9702177d
/** Driver identifies chips by these Ids, created by clubbing together
* DeviceId+RevisionId; Where Revision Id is not used to distinguish
* between chips, a value of 0 is used for revision id.
*/
#define OCTEON_CN68XX 0x0091
#define OCTEON_CN66XX 0x0092
+#define OCTEON_CN23XX_PF_VID 0x9702
+
+/**RevisionId for the chips */
+#define OCTEON_CN23XX_REV_1_0 0x00
+#define OCTEON_CN23XX_REV_1_1 0x01
+#define OCTEON_CN23XX_REV_2_0 0x80
/** Endian-swap modes supported by Octeon. */
enum octeon_pci_swap_mode {
@@ -46,6 +52,9 @@ enum octeon_pci_swap_mode {
OCTEON_PCI_32BIT_LW_SWAP = 3
};
+#define OCTEON_OUTPUT_INTR (2)
+#define OCTEON_ALL_INTR 0xff
+
/*--------------- PCI BAR1 index registers -------------*/
/* BAR1 Mask */
@@ -198,9 +207,9 @@ struct octeon_fn_list {
void (*setup_oq_regs)(struct octeon_device *, u32);
irqreturn_t (*process_interrupt_regs)(void *);
+ u64 (*msix_interrupt_handler)(void *);
int (*soft_reset)(struct octeon_device *);
int (*setup_device_regs)(struct octeon_device *);
- void (*reinit_regs)(struct octeon_device *);
void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
void (*bar1_idx_write)(struct octeon_device *, u32, u32);
u32 (*bar1_idx_read)(struct octeon_device *, u32);
@@ -209,10 +218,10 @@ struct octeon_fn_list {
void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
- void (*enable_interrupt)(void *);
- void (*disable_interrupt)(void *);
+ void (*enable_interrupt)(struct octeon_device *, u8);
+ void (*disable_interrupt)(struct octeon_device *, u8);
- void (*enable_io_queues)(struct octeon_device *);
+ int (*enable_io_queues)(struct octeon_device *);
void (*disable_io_queues)(struct octeon_device *);
};
@@ -266,11 +275,72 @@ struct octdev_props {
/* Each interface in the Octeon device has a network
* device pointer (used for OS specific calls).
*/
+ int rx_on;
int napi_enabled;
int gmxport;
struct net_device *netdev;
};
+#define LIO_FLAG_MSIX_ENABLED 0x1
+#define MSIX_PO_INT 0x1
+#define MSIX_PI_INT 0x2
+
+struct octeon_pf_vf_hs_word {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ /** PKIND value assigned for the DPI interface */
+ u64 pkind : 8;
+
+ /** OCTEON core clock multiplier */
+ u64 core_tics_per_us : 16;
+
+ /** OCTEON coprocessor clock multiplier */
+ u64 coproc_tics_per_us : 16;
+
+ /** app that currently running on OCTEON */
+ u64 app_mode : 8;
+
+ /** RESERVED */
+ u64 reserved : 16;
+
+#else
+
+ /** RESERVED */
+ u64 reserved : 16;
+
+ /** app that currently running on OCTEON */
+ u64 app_mode : 8;
+
+ /** OCTEON coprocessor clock multiplier */
+ u64 coproc_tics_per_us : 16;
+
+ /** OCTEON core clock multiplier */
+ u64 core_tics_per_us : 16;
+
+ /** PKIND value assigned for the DPI interface */
+ u64 pkind : 8;
+#endif
+};
+
+struct octeon_sriov_info {
+ /* Actual rings left for PF device */
+ u32 num_pf_rings;
+
+ /* SRN of PF usable IO queues */
+ u32 pf_srn;
+ /* total pf rings */
+ u32 trs;
+
+};
+
+struct octeon_ioq_vector {
+ struct octeon_device *oct_dev;
+ int iq_index;
+ int droq_index;
+ int vector;
+ struct cpumask affinity_mask;
+ u32 ioq_num;
+};
+
/** The Octeon device.
* Each Octeon device has this structure to represent all its
* components.
@@ -296,7 +366,7 @@ struct octeon_device {
/** Octeon Chip type. */
u16 chip_id;
u16 rev_id;
-
+ u16 pf_num;
/** This device's id - set by the driver. */
u32 octeon_id;
@@ -305,7 +375,6 @@ struct octeon_device {
u16 flags;
#define LIO_FLAG_MSI_ENABLED (u32)(1 << 1)
-#define LIO_FLAG_MSIX_ENABLED (u32)(1 << 2)
/** The state of this device */
atomic_t status;
@@ -395,6 +464,19 @@ struct octeon_device {
void *priv;
+ int num_msix_irqs;
+
+ void *msix_entries;
+
+ struct octeon_sriov_info sriov_info;
+
+ struct octeon_pf_vf_hs_word pfvf_hsword;
+
+ int msix_on;
+
+ /** IOq information of it's corresponding MSI-X interrupt. */
+ struct octeon_ioq_vector *ioq_vector;
+
int rx_pause;
int tx_pause;
@@ -402,12 +484,15 @@ struct octeon_device {
/* private flags to control driver-specific features through ethtool */
u32 priv_flags;
+
+ void *watchdog_task;
};
#define OCT_DRV_ONLINE 1
#define OCT_DRV_OFFLINE 2
#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
(oct->chip_id == OCTEON_CN68XX))
+#define OCTEON_CN23XX_PF(oct) (oct->chip_id == OCTEON_CN23XX_PF_VID)
#define CHIP_FIELD(oct, TYPE, field) \
(((struct octeon_ ## TYPE *)(oct->chip))->field)
@@ -661,13 +746,24 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
*/
struct octeon_config *octeon_get_conf(struct octeon_device *oct);
+void octeon_free_ioq_vector(struct octeon_device *oct);
+int octeon_allocate_ioq_vector(struct octeon_device *oct);
+void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
+
/* LiquidIO driver pivate flags */
enum {
OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */
};
-static inline void lio_set_priv_flag(struct octeon_device *octdev, u32 flag,
- u32 val)
+#define OCT_PRIV_FLAG_DEFAULT 0x0
+
+static inline u32 lio_get_priv_flag(struct octeon_device *octdev, u32 flag)
+{
+ return !!(octdev->priv_flags & (0x1 << flag));
+}
+
+static inline void lio_set_priv_flag(struct octeon_device *octdev,
+ u32 flag, u32 val)
{
if (val)
octdev->priv_flags |= (0x1 << flag);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index e0afe4c1fd01..f60e5320daf4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -31,6 +31,7 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
@@ -92,22 +93,25 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
return fn_arg;
}
-/** Check for packets on Droq. This function should be called with
- * lock held.
+/** Check for packets on Droq. This function should be called with lock held.
* @param droq - Droq on which count is checked.
* @return Returns packet count.
*/
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
{
u32 pkt_count = 0;
+ u32 last_count;
pkt_count = readl(droq->pkts_sent_reg);
- if (pkt_count) {
- atomic_add(pkt_count, &droq->pkts_pending);
- writel(pkt_count, droq->pkts_sent_reg);
- }
- return pkt_count;
+ last_count = pkt_count - droq->pkt_count;
+ droq->pkt_count = pkt_count;
+
+ /* we shall write to cnts at napi irq enable or end of droq tasklet */
+ if (last_count)
+ atomic_add(last_count, &droq->pkts_pending);
+
+ return last_count;
}
static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
@@ -259,6 +263,11 @@ int octeon_init_droq(struct octeon_device *oct,
c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
c_refill_threshold =
(u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+
+ c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
+ c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
} else {
return 1;
}
@@ -564,7 +573,7 @@ octeon_droq_dispatch_pkt(struct octeon_device *oct,
(unsigned int)rh->r.opcode,
(unsigned int)rh->r.subcode);
droq->stats.dropped_nodispatch++;
- } /* else (dispatch_fn ... */
+ }
return cnt;
}
@@ -735,16 +744,20 @@ octeon_droq_process_packets(struct octeon_device *oct,
u32 pkt_count = 0, pkts_processed = 0;
struct list_head *tmp, *tmp2;
+ /* Grab the droq lock */
+ spin_lock(&droq->lock);
+
+ octeon_droq_check_hw_for_pkts(droq);
pkt_count = atomic_read(&droq->pkts_pending);
- if (!pkt_count)
+
+ if (!pkt_count) {
+ spin_unlock(&droq->lock);
return 0;
+ }
if (pkt_count > budget)
pkt_count = budget;
- /* Grab the droq lock */
- spin_lock(&droq->lock);
-
pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
atomic_sub(pkts_processed, &droq->pkts_pending);
@@ -789,6 +802,8 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
spin_lock(&droq->lock);
while (total_pkts_processed < budget) {
+ octeon_droq_check_hw_for_pkts(droq);
+
pkts_available =
CVM_MIN((budget - total_pkts_processed),
(u32)(atomic_read(&droq->pkts_pending)));
@@ -803,8 +818,6 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
atomic_sub(pkts_processed, &droq->pkts_pending);
total_pkts_processed += pkts_processed;
-
- octeon_droq_check_hw_for_pkts(droq);
}
spin_unlock(&droq->lock);
@@ -874,8 +887,11 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
return 0;
}
break;
+ case OCTEON_CN23XX_PF_VID: {
+ lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
+ }
+ break;
}
-
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 5a6fb9113bbd..5be002d5dba4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -261,6 +261,8 @@ struct octeon_droq {
u32 q_no;
+ u32 pkt_count;
+
struct octeon_droq_ops ops;
struct octeon_device *oct_dev;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index ff4b1d6f007b..e4d426ba18dc 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -88,6 +88,8 @@ struct octeon_instr_queue {
/** A spinlock to protect while posting on the ring. */
spinlock_t post_lock;
+ u32 pkt_in_done;
+
/** A spinlock to protect access to the input ring.*/
spinlock_t iq_flush_running_lock;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index bc14e4c27332..366298f7bcb2 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -38,12 +38,26 @@
#define DRV_NAME "LiquidIO"
-/**
- * \brief determines if a given console has debug enabled.
- * @param console console to check
- * @returns 1 = enabled. 0 otherwise
+/** This structure is used by NIC driver to store information required
+ * to free the sk_buff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
*/
-int octeon_console_debug_enabled(u32 console);
+struct octnet_buf_free_info {
+ /** Bytes 1-8. Pointer to network device private structure. */
+ struct lio *lio;
+
+ /** Bytes 9-16. Pointer to sk_buff. */
+ struct sk_buff *skb;
+
+ /** Bytes 17-24. Pointer to gather list. */
+ struct octnic_gather *g;
+
+ /** Bytes 25-32. Physical address of skb->data or gather list. */
+ u64 dptr;
+
+ /** Bytes 33-47. Piggybacked soft command, if any */
+ struct octeon_soft_command *sc;
+};
/* BQL-related functions */
void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
@@ -167,22 +181,26 @@ cnnic_numa_alloc_aligned_dma(u32 size,
#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
free_pages(orig_ptr, get_order(size))
-static inline void
+static inline int
sleep_cond(wait_queue_head_t *wait_queue, int *condition)
{
+ int errno = 0;
wait_queue_t we;
init_waitqueue_entry(&we, current);
add_wait_queue(wait_queue, &we);
while (!(READ_ONCE(*condition))) {
set_current_state(TASK_INTERRUPTIBLE);
- if (signal_pending(current))
+ if (signal_pending(current)) {
+ errno = -EINTR;
goto out;
+ }
schedule();
}
out:
set_current_state(TASK_RUNNING);
remove_wait_queue(wait_queue, &we);
+ return errno;
}
static inline void
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 95a4bbedf557..0dc081a99b30 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -19,7 +19,6 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/pci.h>
#include <linux/netdevice.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index fb820dc7fcb7..e5d1debd05ad 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -26,8 +26,6 @@
#ifndef __OCTEON_NETWORK_H__
#define __OCTEON_NETWORK_H__
-#include <linux/version.h>
-#include <linux/dma-mapping.h>
#include <linux/ptp_clock_kernel.h>
#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
@@ -124,11 +122,21 @@ struct lio {
/* work queue for txq status */
struct cavium_wq txq_status_wq;
+
+ /* work queue for link status */
+ struct cavium_wq link_status_wq;
+
};
#define LIO_SIZE (sizeof(struct lio))
#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
+#define CIU3_WDOG(c) (0x1010000020000ULL + (c << 3))
+#define CIU3_WDOG_MASK 12ULL
+#define LIO_MONITOR_WDOG_EXPIRE 1
+#define LIO_MONITOR_CORE_STUCK_MSGD 2
+#define LIO_MAX_CORES 12
+
/**
* \brief Enable or disable feature
* @param netdev pointer to network device
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 166727be928f..40ac1fe88956 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -19,7 +19,6 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include "liquidio_common.h"
@@ -36,6 +35,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
u32 rdatasize)
{
struct octeon_soft_command *sc;
+ struct octeon_instr_ih3 *ih3;
struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
@@ -52,10 +52,19 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
/* Add in the response related fields. Opcode and Param are already
* there.
*/
- ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
- ih2->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ if (OCTEON_CN23XX_PF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ /*pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ ih3->fsz = LIO_SOFTCMDRESP_IH3;
+ } else {
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ ih2->fsz = LIO_SOFTCMDRESP_IH2;
+ }
irh->rflag = 1; /* a response is required */
@@ -64,7 +73,10 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
*sc->status_word = COMPLETION_WORD_INIT;
- sc->cmd.cmd2.rptr = sc->dmarptr;
+ if (OCTEON_CN23XX_PF(oct))
+ sc->cmd.cmd3.rptr = sc->dmarptr;
+ else
+ sc->cmd.cmd2.rptr = sc->dmarptr;
sc->wait_time = 1000;
sc->timeout = jiffies + sc->wait_time;
@@ -73,12 +85,9 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
}
int octnet_send_nic_data_pkt(struct octeon_device *oct,
- struct octnic_data_pkt *ndata,
- u32 xmit_more)
+ struct octnic_data_pkt *ndata)
{
- int ring_doorbell;
-
- ring_doorbell = !xmit_more;
+ int ring_doorbell = 1;
return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
ndata->buf, ndata->datasize,
@@ -183,8 +192,8 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) {
octeon_free_soft_command(oct, sc);
- dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n",
- __func__, nctrl->ncmd.s.cmd, retval);
+ dev_err(&oct->pci_dev->dev, "%s pf_num:%d soft command:%d send failed status: %x\n",
+ __func__, oct->pf_num, nctrl->ncmd.s.cmd, retval);
spin_unlock_bh(&oct->cmd_resp_wqlock);
return -1;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index b71a2bbe4bee..4b8da67b995f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -138,7 +138,7 @@ octnet_prepare_pci_cmd_o2(struct octeon_device *oct,
/* assume that rflag is cleared so therefore front data will only have
* irh and ossp[0], ossp[1] for a total of 32 bytes
*/
- ih2->fsz = 24;
+ ih2->fsz = LIO_PCICMD_O2;
ih2->tagtype = ORDERED_TAG;
ih2->grp = DEFAULT_POW_GRP;
@@ -196,7 +196,7 @@ octnet_prepare_pci_cmd_o3(struct octeon_device *oct,
*/
ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
/*PKI IH*/
- ih3->fsz = 24 + 8;
+ ih3->fsz = LIO_PCICMD_O3;
if (!setup->s.gather) {
ih3->dlengsz = setup->s.u.datasize;
@@ -278,7 +278,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int octnet_send_nic_data_pkt(struct octeon_device *oct,
- struct octnic_data_pkt *ndata, u32 xmit_more);
+ struct octnic_data_pkt *ndata);
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index d32492f185ff..90866bb50033 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -30,6 +30,7 @@
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
(octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
@@ -71,7 +72,8 @@ int octeon_init_instr_queue(struct octeon_device *oct,
if (OCTEON_CN6XXX(oct))
conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
-
+ else if (OCTEON_CN23XX_PF(oct))
+ conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn23xx_pf, conf)));
if (!conf) {
dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
oct->chip_id);
@@ -88,6 +90,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
q_size = (u32)conf->instr_type * num_descs;
iq = oct->instr_queue[iq_no];
+
iq->oct_dev = oct;
set_dev_node(&oct->pci_dev->dev, numa_node);
@@ -181,6 +184,9 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
if (OCTEON_CN6XXX(oct))
desc_size =
CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
+ else if (OCTEON_CN23XX_PF(oct))
+ desc_size =
+ CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn23xx_pf, conf));
vfree(iq->request_list);
@@ -383,7 +389,12 @@ lio_process_iq_request_list(struct octeon_device *oct,
case REQTYPE_SOFT_COMMAND:
sc = buf;
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ if (OCTEON_CN23XX_PF(oct))
+ irh = (struct octeon_instr_irh *)
+ &sc->cmd.cmd3.irh;
+ else
+ irh = (struct octeon_instr_irh *)
+ &sc->cmd.cmd2.irh;
if (irh->rflag) {
/* We're expecting a response from Octeon.
* It's up to lio_process_ordered_list() to
@@ -499,6 +510,7 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
if (!oct)
return;
+
iq = oct->instr_queue[iq_no];
if (!iq)
return;
@@ -514,6 +526,8 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
/* Flush the instruction queue */
octeon_flush_iq(oct, iq, 1, 0);
+
+ lio_enable_irq(NULL, iq);
}
/* Called by the Poll thread at regular intervals to check the instruction
@@ -580,6 +594,8 @@ octeon_prepare_soft_command(struct octeon_device *oct,
{
struct octeon_config *oct_cfg;
struct octeon_instr_ih2 *ih2;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_pki_ih3 *pki_ih3;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
@@ -588,36 +604,88 @@ octeon_prepare_soft_command(struct octeon_device *oct,
oct_cfg = octeon_get_conf(oct);
- ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
- ih2->tagtype = ATOMIC_TAG;
- ih2->tag = LIO_CONTROL;
- ih2->raw = 1;
- ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
-
- if (sc->datasize) {
- ih2->dlengsz = sc->datasize;
- ih2->rs = 1;
- }
-
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
- irh->opcode = opcode;
- irh->subcode = subcode;
-
- /* opcode/subcode specific parameters (ossp) */
- irh->ossp = irh_ossp;
- sc->cmd.cmd2.ossp[0] = ossp0;
- sc->cmd.cmd2.ossp[1] = ossp1;
-
- if (sc->rdatasize) {
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
- rdp->pcie_port = oct->pcie_port;
- rdp->rlen = sc->rdatasize;
+ if (OCTEON_CN23XX_PF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+
+ ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
+
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
+
+ pki_ih3->w = 1;
+ pki_ih3->raw = 1;
+ pki_ih3->utag = 1;
+ pki_ih3->uqpg =
+ oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
+ pki_ih3->utt = 1;
+ pki_ih3->tag = LIO_CONTROL;
+ pki_ih3->tagtype = ATOMIC_TAG;
+ pki_ih3->qpg =
+ oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x7;
+ pki_ih3->sl = 8;
+
+ if (sc->datasize)
+ ih3->dlengsz = sc->datasize;
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.cmd3.ossp[0] = ossp0;
+ sc->cmd.cmd3.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = sc->rdatasize;
+
+ irh->rflag = 1;
+ /*PKI IH3*/
+ /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
+ ih3->fsz = LIO_SOFTCMDRESP_IH3;
+ } else {
+ irh->rflag = 0;
+ /*PKI IH3*/
+ /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
+ ih3->fsz = LIO_PCICMD_O3;
+ }
- irh->rflag = 1;
- ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
} else {
- irh->rflag = 0;
- ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ ih2->tagtype = ATOMIC_TAG;
+ ih2->tag = LIO_CONTROL;
+ ih2->raw = 1;
+ ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
+
+ if (sc->datasize) {
+ ih2->dlengsz = sc->datasize;
+ ih2->rs = 1;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.cmd2.ossp[0] = ossp0;
+ sc->cmd.cmd2.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = sc->rdatasize;
+
+ irh->rflag = 1;
+ /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
+ ih2->fsz = LIO_SOFTCMDRESP_IH2;
+ } else {
+ irh->rflag = 0;
+ /* irh + ossp[0] + ossp[1] = 24 bytes */
+ ih2->fsz = LIO_PCICMD_O2;
+ }
}
}
@@ -625,23 +693,39 @@ int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
struct octeon_instr_ih2 *ih2;
+ struct octeon_instr_ih3 *ih3;
struct octeon_instr_irh *irh;
u32 len;
- ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
- if (ih2->dlengsz) {
- WARN_ON(!sc->dmadptr);
- sc->cmd.cmd2.dptr = sc->dmadptr;
- }
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
- if (irh->rflag) {
- WARN_ON(!sc->dmarptr);
- WARN_ON(!sc->status_word);
- *sc->status_word = COMPLETION_WORD_INIT;
-
- sc->cmd.cmd2.rptr = sc->dmarptr;
+ if (OCTEON_CN23XX_PF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+ if (ih3->dlengsz) {
+ WARN_ON(!sc->dmadptr);
+ sc->cmd.cmd3.dptr = sc->dmadptr;
+ }
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ if (irh->rflag) {
+ WARN_ON(!sc->dmarptr);
+ WARN_ON(!sc->status_word);
+ *sc->status_word = COMPLETION_WORD_INIT;
+ sc->cmd.cmd3.rptr = sc->dmarptr;
+ }
+ len = (u32)ih3->dlengsz;
+ } else {
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ if (ih2->dlengsz) {
+ WARN_ON(!sc->dmadptr);
+ sc->cmd.cmd2.dptr = sc->dmadptr;
+ }
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ if (irh->rflag) {
+ WARN_ON(!sc->dmarptr);
+ WARN_ON(!sc->status_word);
+ *sc->status_word = COMPLETION_WORD_INIT;
+ sc->cmd.cmd2.rptr = sc->dmarptr;
+ }
+ len = (u32)ih2->dlengsz;
}
- len = (u32)ih2->dlengsz;
if (sc->wait_time)
sc->timeout = jiffies + sc->wait_time;
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 709049e36627..be52178d8cb6 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -91,8 +91,13 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
sc = (struct octeon_soft_command *)ordered_sc_list->
head.next;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
- rptr = sc->cmd.cmd2.rptr;
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ rptr = sc->cmd.cmd3.rptr;
+ } else {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ rptr = sc->cmd.cmd2.rptr;
+ }
status = OCTEON_REQUEST_PENDING;
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
index 5c4615ccaa14..6b4d4add7353 100644
--- a/drivers/net/ethernet/cavium/thunder/Makefile
+++ b/drivers/net/ethernet/cavium/thunder/Makefile
@@ -2,6 +2,7 @@
# Makefile for Cavium's Thunder ethernet device
#
+obj-$(CONFIG_THUNDER_NIC_RGX) += thunder_xcv.o
obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index e29815d9e6f4..30426109711c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -20,6 +20,17 @@
#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+/* Subsystem device IDs */
+#define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E
+#define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E
+#define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E
+
+#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E
+#define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134
+#define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234
+#define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334
+
+
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
#define PCI_MSIX_REG_BAR_NUM 4
@@ -41,40 +52,8 @@
/* Max pkinds */
#define NIC_MAX_PKIND 16
-/* Rx Channels */
-/* Receive channel configuration in TNS bypass mode
- * Below is configuration in TNS bypass mode
- * BGX0-LMAC0-CHAN0 - VNIC CHAN0
- * BGX0-LMAC1-CHAN0 - VNIC CHAN16
- * ...
- * BGX1-LMAC0-CHAN0 - VNIC CHAN128
- * ...
- * BGX1-LMAC3-CHAN0 - VNIC CHAN174
- */
-#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
-#define NIC_CHANS_PER_INF 128
-#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
-#define NIC_CPI_COUNT 2048 /* No of channel parse indices */
-
-/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
-#define NIC_MAX_BGX MAX_BGX_PER_CN88XX
-#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX)
-#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */
-#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX)
-
-/* Tx scheduling */
-#define NIC_MAX_TL4 1024
-#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */
-#define NIC_MAX_TL3 256
-#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */
-#define NIC_MAX_TL2 64
-#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */
-#define NIC_MAX_TL1 2
-
-/* TNS bypass mode */
-#define NIC_TL2_PER_BGX 32
-#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX)
-#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF)
+/* Max when CPI_ALG is IP diffserv */
+#define NIC_MAX_CPI_PER_LMAC 64
/* NIC VF Interrupts */
#define NICVF_INTR_CQ 0
@@ -148,7 +127,6 @@ struct nicvf_cq_poll {
struct napi_struct napi;
};
-#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */
#define NIC_MAX_RSS_HASH_BITS 8
#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
@@ -273,6 +251,7 @@ struct nicvf {
struct net_device *netdev;
struct pci_dev *pdev;
void __iomem *reg_base;
+#define MAX_QUEUES_PER_QSET 8
struct queue_set *qs;
struct nicvf_cq_poll *napi[8];
u8 vf_id;
@@ -326,7 +305,7 @@ struct nicvf {
bool msix_enabled;
u8 num_vec;
struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
- char irq_name[NIC_VF_MSIX_VECTORS][20];
+ char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
@@ -369,6 +348,7 @@ struct nicvf {
#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
+#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
@@ -485,6 +465,31 @@ struct set_loopback {
bool enable;
};
+/* Reset statistics counters */
+struct reset_stat_cfg {
+ u8 msg;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
+ u16 rx_stat_mask;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
+ u8 tx_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
+ */
+ u16 rq_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
+ */
+ u16 sq_stat_mask;
+};
+
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
@@ -502,6 +507,7 @@ union nic_mbx {
struct sqs_alloc sqs_alloc;
struct nicvf_ptr nicvf;
struct set_loopback lbk;
+ struct reset_stat_cfg reset_stat;
};
#define NIC_NODE_ID_MASK 0x03
@@ -515,7 +521,14 @@ static inline int nic_get_node_id(struct pci_dev *pdev)
static inline bool pass1_silicon(struct pci_dev *pdev)
{
- return pdev->revision < 8;
+ return (pdev->revision < 8) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
+}
+
+static inline bool pass2_silicon(struct pci_dev *pdev)
+{
+ return (pdev->revision >= 8) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
}
int nicvf_set_real_num_queues(struct net_device *netdev,
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 85cc782b9060..2bbf4cbf08b2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -20,8 +20,25 @@
#define DRV_NAME "thunder-nic"
#define DRV_VERSION "1.0"
+struct hw_info {
+ u8 bgx_cnt;
+ u8 chans_per_lmac;
+ u8 chans_per_bgx; /* Rx/Tx chans */
+ u8 chans_per_rgx;
+ u8 chans_per_lbk;
+ u16 cpi_cnt;
+ u16 rssi_cnt;
+ u16 rss_ind_tbl_size;
+ u16 tl4_cnt;
+ u16 tl3_cnt;
+ u8 tl2_cnt;
+ u8 tl1_cnt;
+ bool tl1_per_bgx; /* TL1 per BGX or per LMAC */
+};
+
struct nicpf {
struct pci_dev *pdev;
+ struct hw_info *hw;
u8 node;
unsigned int flags;
u8 num_vf_en; /* No of VF enabled */
@@ -36,22 +53,22 @@ struct nicpf {
#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
- u8 vf_lmac_map[MAX_LMAC];
+ u8 *vf_lmac_map;
struct delayed_work dwork;
struct workqueue_struct *check_link;
- u8 link[MAX_LMAC];
- u8 duplex[MAX_LMAC];
- u32 speed[MAX_LMAC];
+ u8 *link;
+ u8 *duplex;
+ u32 *speed;
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
- u16 rss_ind_tbl_size;
bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
/* MSI-X */
bool msix_enabled;
u8 num_vec;
- struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS];
+ struct msix_entry *msix_entries;
bool irq_allocated[NIC_PF_MSIX_VECTORS];
+ char irq_name[NIC_PF_MSIX_VECTORS][20];
};
/* Supported devices */
@@ -89,9 +106,22 @@ static u64 nic_reg_read(struct nicpf *nic, u64 offset)
/* PF -> VF mailbox communication APIs */
static void nic_enable_mbx_intr(struct nicpf *nic)
{
- /* Enable mailbox interrupt for all 128 VFs */
- nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);
- nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
+ int vf_cnt = pci_sriov_get_totalvfs(nic->pdev);
+
+#define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull))
+
+ /* Clear it, to avoid spurious interrupts (if any) */
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt));
+
+ /* Enable mailbox interrupt for all VFs */
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt));
+ /* One mailbox intr enable reg per 64 VFs */
+ if (vf_cnt > 64) {
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64),
+ INTR_MASK(vf_cnt - 64));
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64),
+ INTR_MASK(vf_cnt - 64));
+ }
}
static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
@@ -144,7 +174,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)
mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
- if (vf < MAX_LMAC) {
+ if (vf < nic->num_vf_en) {
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
@@ -155,7 +185,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)
mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
mbx.nic_cfg.node_id = nic->node;
- mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;
+ mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;
nic_send_msg_to_vf(nic, vf, &mbx);
}
@@ -248,7 +278,8 @@ static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
/* Set minimum transmit packet size */
static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
{
- int lmac;
+ int lmac, max_lmac;
+ u16 sdevid;
u64 lmac_cfg;
/* There is a issue in HW where-in while sending GSO sized
@@ -260,7 +291,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
if (size > 52)
size = 52;
- for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ /* 81xx's RGX has only one LMAC */
+ if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF)
+ max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
+ else
+ max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
+
+ for (lmac = 0; lmac < max_lmac; lmac++) {
lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
lmac_cfg &= ~(0xF << 2);
lmac_cfg |= ((size / 4) << 2);
@@ -280,7 +318,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
nic->num_vf_en = 0;
- for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
+ for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
if (!(bgx_map & (1 << bgx)))
continue;
lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
@@ -300,28 +338,125 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
nic_reg_write(nic,
NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
lmac_credit);
+
+ /* On CN81XX there are only 8 VFs but max possible no of
+ * interfaces are 9.
+ */
+ if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) {
+ nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev);
+ break;
+ }
}
}
+static void nic_free_lmacmem(struct nicpf *nic)
+{
+ kfree(nic->vf_lmac_map);
+ kfree(nic->link);
+ kfree(nic->duplex);
+ kfree(nic->speed);
+}
+
+static int nic_get_hw_info(struct nicpf *nic)
+{
+ u8 max_lmac;
+ u16 sdevid;
+ struct hw_info *hw = nic->hw;
+
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+
+ switch (sdevid) {
+ case PCI_SUBSYS_DEVID_88XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN88XX;
+ hw->chans_per_lmac = 16;
+ hw->chans_per_bgx = 128;
+ hw->cpi_cnt = 2048;
+ hw->rssi_cnt = 4096;
+ hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+ hw->tl3_cnt = 256;
+ hw->tl2_cnt = 64;
+ hw->tl1_cnt = 2;
+ hw->tl1_per_bgx = true;
+ break;
+ case PCI_SUBSYS_DEVID_81XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN81XX;
+ hw->chans_per_lmac = 8;
+ hw->chans_per_bgx = 32;
+ hw->chans_per_rgx = 8;
+ hw->chans_per_lbk = 24;
+ hw->cpi_cnt = 512;
+ hw->rssi_cnt = 256;
+ hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
+ hw->tl3_cnt = 64;
+ hw->tl2_cnt = 16;
+ hw->tl1_cnt = 10;
+ hw->tl1_per_bgx = false;
+ break;
+ case PCI_SUBSYS_DEVID_83XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN83XX;
+ hw->chans_per_lmac = 8;
+ hw->chans_per_bgx = 32;
+ hw->chans_per_lbk = 64;
+ hw->cpi_cnt = 2048;
+ hw->rssi_cnt = 1024;
+ hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
+ hw->tl3_cnt = 256;
+ hw->tl2_cnt = 64;
+ hw->tl1_cnt = 18;
+ hw->tl1_per_bgx = false;
+ break;
+ }
+ hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev);
+
+ /* Allocate memory for LMAC tracking elements */
+ max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX;
+ nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
+ if (!nic->vf_lmac_map)
+ goto error;
+ nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
+ if (!nic->link)
+ goto error;
+ nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
+ if (!nic->duplex)
+ goto error;
+ nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL);
+ if (!nic->speed)
+ goto error;
+ return 0;
+
+error:
+ nic_free_lmacmem(nic);
+ return -ENOMEM;
+}
+
#define BGX0_BLOCK 8
#define BGX1_BLOCK 9
-static void nic_init_hw(struct nicpf *nic)
+static int nic_init_hw(struct nicpf *nic)
{
- int i;
+ int i, err;
u64 cqm_cfg;
+ /* Get HW capability info */
+ err = nic_get_hw_info(nic);
+ if (err)
+ return err;
+
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
/* Enable backpressure */
nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
- /* Disable TNS mode on both interfaces */
- nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
- (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
- nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
- (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ /* TNS and TNS bypass modes are present only on 88xx */
+ if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
+ /* Disable TNS mode on both interfaces */
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
+ (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
+ (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ }
+
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
(1ULL << 63) | BGX0_BLOCK);
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
@@ -351,11 +486,14 @@ static void nic_init_hw(struct nicpf *nic)
cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
+
+ return 0;
}
/* Channel parse index configuration */
static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
{
+ struct hw_info *hw = nic->hw;
u32 vnic, bgx, lmac, chan;
u32 padd, cpi_count = 0;
u64 cpi_base, cpi, rssi_base, rssi;
@@ -365,9 +503,9 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
- chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
- cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
- rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
+ chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+ cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
+ rssi_base = vnic * hw->rss_ind_tbl_size;
/* Rx channel configuration */
nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
@@ -439,7 +577,7 @@ static void nic_send_rss_size(struct nicpf *nic, int vf)
msg = (u64 *)&mbx;
mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
- mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
+ mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size;
nic_send_msg_to_vf(nic, vf, &mbx);
}
@@ -486,7 +624,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
/* 4 level transmit side scheduler configutation
* for TNS bypass mode
*
- * Sample configuration for SQ0
+ * Sample configuration for SQ0 on 88xx
* VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
* VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
* VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
@@ -499,6 +637,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
struct sq_cfg_msg *sq)
{
+ struct hw_info *hw = nic->hw;
u32 bgx, lmac, chan;
u32 tl2, tl3, tl4;
u32 rr_quantum;
@@ -517,21 +656,28 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
- if (!sq->sqs_mode) {
- tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
- } else {
- for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
- if (nic->vf_sqs[pqs_vnic][svf] == vnic)
- break;
+ /* For 88xx 0-511 TL4 transmits via BGX0 and
+ * 512-1023 TL4s transmit via BGX1.
+ */
+ if (hw->tl1_per_bgx) {
+ tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
+ if (!sq->sqs_mode) {
+ tl4 += (lmac * MAX_QUEUES_PER_QSET);
+ } else {
+ for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+ if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+ break;
+ }
+ tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
+ tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF);
+ tl4 += (svf * MAX_QUEUES_PER_QSET);
}
- tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
- tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
- tl4 += (svf * NIC_TL4_PER_LMAC);
- tl4 += (bgx * NIC_TL4_PER_BGX);
+ } else {
+ tl4 = (vnic * MAX_QUEUES_PER_QSET);
}
tl4 += sq_idx;
- tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
+ tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
((u64)vnic << NIC_QS_ID_SHIFT) |
((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
@@ -539,8 +685,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
- chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
- nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+
+ /* On 88xx 0-127 channels are for BGX0 and
+ * 127-255 channels for BGX1.
+ *
+ * On 81xx/83xx TL3_CHAN reg should be configured with channel
+ * within LMAC i.e 0-7 and not the actual channel number like on 88xx
+ */
+ chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+ if (hw->tl1_per_bgx)
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+ else
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);
+
/* Enable backpressure on the channel */
nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
@@ -549,6 +706,16 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
/* No priorities as of now */
nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+
+ /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
+ * on 81xx/83xx TL2 needs to be configured to transmit to one of the
+ * possible LMACs.
+ *
+ * This register doesn't exist on 88xx.
+ */
+ if (!hw->tl1_per_bgx)
+ nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
+ lmac + (bgx * MAX_LMAC_PER_BGX));
}
/* Send primary nicvf pointer to secondary QS's VF */
@@ -620,7 +787,7 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
{
int bgx_idx, lmac_idx;
- if (lbk->vf_id > MAX_LMAC)
+ if (lbk->vf_id >= nic->num_vf_en)
return -1;
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
@@ -631,6 +798,67 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
return 0;
}
+/* Reset statistics counters */
+static int nic_reset_stat_counters(struct nicpf *nic,
+ int vf, struct reset_stat_cfg *cfg)
+{
+ int i, stat, qnum;
+ u64 reg_addr;
+
+ for (i = 0; i < RX_STATS_ENUM_LAST; i++) {
+ if (cfg->rx_stat_mask & BIT(i)) {
+ reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 |
+ (vf << NIC_QS_ID_SHIFT) |
+ (i << 3);
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+
+ for (i = 0; i < TX_STATS_ENUM_LAST; i++) {
+ if (cfg->tx_stat_mask & BIT(i)) {
+ reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 |
+ (vf << NIC_QS_ID_SHIFT) |
+ (i << 3);
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+
+ for (i = 0; i <= 15; i++) {
+ qnum = i >> 1;
+ stat = i & 1 ? 1 : 0;
+ reg_addr = (vf << NIC_QS_ID_SHIFT) |
+ (qnum << NIC_Q_NUM_SHIFT) | (stat << 3);
+ if (cfg->rq_stat_mask & BIT(i)) {
+ reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1;
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ if (cfg->sq_stat_mask & BIT(i)) {
+ reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1;
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+ return 0;
+}
+
+static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf)
+{
+ u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT;
+ u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) |
+ (IPV4_PROT_DEF) << 16 | ET_PROT_DEF;
+
+ /* Configure tunnel parsing parameters */
+ nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF,
+ (1ULL << 63 | UDP_GENEVE_PORT_NUM));
+ nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF,
+ ((7ULL << 61) | prot_def));
+ nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF,
+ ((7ULL << 61) | prot_def));
+ nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1,
+ ((1ULL << 63) | UDP_VXLAN_PORT_NUM));
+ nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF,
+ ((0xfULL << 60) | vxlan_prot_def));
+}
+
static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
{
int bgx, lmac;
@@ -669,18 +897,17 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
mbx_addr += sizeof(u64);
}
- dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
+ dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n",
__func__, mbx.msg.msg, vf);
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
nic_mbx_send_ready(nic, vf);
- if (vf < MAX_LMAC) {
+ if (vf < nic->num_vf_en) {
nic->link[vf] = 0;
nic->duplex[vf] = 0;
nic->speed[vf] = 0;
}
- ret = 1;
- break;
+ goto unlock;
case NIC_MBOX_MSG_QS_CFG:
reg_addr = NIC_PF_QSET_0_127_CFG |
(mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -698,6 +925,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ /* Enable CQE_RX2_S extension in CQE_RX descriptor.
+ * This gets appended by default on 81xx/83xx chips,
+ * for consistency enabling the same on 88xx pass2
+ * where this is introduced.
+ */
+ if (pass2_silicon(nic->pdev))
+ nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
+ if (!pass1_silicon(nic->pdev))
+ nic_enable_tunnel_parsing(nic, vf);
break;
case NIC_MBOX_MSG_RQ_BP_CFG:
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
@@ -722,8 +958,10 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
break;
case NIC_MBOX_MSG_SET_MAC:
- if (vf >= nic->num_vf_en)
+ if (vf >= nic->num_vf_en) {
+ ret = -1; /* NACK */
break;
+ }
lmac = mbx.mac.vf_id;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
@@ -772,25 +1010,38 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_LOOPBACK:
ret = nic_config_loopback(nic, &mbx.lbk);
break;
+ case NIC_MBOX_MSG_RESET_STAT_COUNTER:
+ ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat);
+ break;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
break;
}
- if (!ret)
+ if (!ret) {
nic_mbx_send_ack(nic, vf);
- else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
+ } else if (mbx.msg.msg != NIC_MBOX_MSG_READY) {
+ dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n",
+ mbx.msg.msg, vf);
nic_mbx_send_nack(nic, vf);
+ }
unlock:
nic->mbx_lock[vf] = false;
}
-static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
+static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+ int mbx;
u64 intr;
u8 vf, vf_per_mbx_reg = 64;
+ if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector)
+ mbx = 0;
+ else
+ mbx = 1;
+
intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
for (vf = 0; vf < vf_per_mbx_reg; vf++) {
@@ -802,23 +1053,6 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
nic_clear_mbx_intr(nic, vf, mbx);
}
}
-}
-
-static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
-{
- struct nicpf *nic = (struct nicpf *)nic_irq;
-
- nic_mbx_intr_handler(nic, 0);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
-{
- struct nicpf *nic = (struct nicpf *)nic_irq;
-
- nic_mbx_intr_handler(nic, 1);
-
return IRQ_HANDLED;
}
@@ -826,7 +1060,13 @@ static int nic_enable_msix(struct nicpf *nic)
{
int i, ret;
- nic->num_vec = NIC_PF_MSIX_VECTORS;
+ nic->num_vec = pci_msix_vec_count(nic->pdev);
+
+ nic->msix_entries = kmalloc_array(nic->num_vec,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!nic->msix_entries)
+ return -ENOMEM;
for (i = 0; i < nic->num_vec; i++)
nic->msix_entries[i].entry = i;
@@ -834,8 +1074,9 @@ static int nic_enable_msix(struct nicpf *nic)
ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
if (ret) {
dev_err(&nic->pdev->dev,
- "Request for #%d msix vectors failed\n",
- nic->num_vec);
+ "Request for #%d msix vectors failed, returned %d\n",
+ nic->num_vec, ret);
+ kfree(nic->msix_entries);
return ret;
}
@@ -847,6 +1088,7 @@ static void nic_disable_msix(struct nicpf *nic)
{
if (nic->msix_enabled) {
pci_disable_msix(nic->pdev);
+ kfree(nic->msix_entries);
nic->msix_enabled = 0;
nic->num_vec = 0;
}
@@ -865,27 +1107,26 @@ static void nic_free_all_interrupts(struct nicpf *nic)
static int nic_register_interrupts(struct nicpf *nic)
{
- int ret;
+ int i, ret;
/* Enable MSI-X */
ret = nic_enable_msix(nic);
if (ret)
return ret;
- /* Register mailbox interrupt handlers */
- ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
- nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
- if (ret)
- goto fail;
-
- nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
+ /* Register mailbox interrupt handler */
+ for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) {
+ sprintf(nic->irq_name[i],
+ "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
- ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
- nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
- if (ret)
- goto fail;
+ ret = request_irq(nic->msix_entries[i].vector,
+ nic_mbx_intr_handler, 0,
+ nic->irq_name[i], nic);
+ if (ret)
+ goto fail;
- nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
+ nic->irq_allocated[i] = true;
+ }
/* Enable mailbox interrupt */
nic_enable_mbx_intr(nic);
@@ -894,6 +1135,7 @@ static int nic_register_interrupts(struct nicpf *nic)
fail:
dev_err(&nic->pdev->dev, "Request irq failed\n");
nic_free_all_interrupts(nic);
+ nic_disable_msix(nic);
return ret;
}
@@ -908,6 +1150,12 @@ static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
u16 total_vf;
+ /* Secondary Qsets are needed only if CPU count is
+ * morethan MAX_QUEUES_PER_QSET.
+ */
+ if (num_online_cpus() <= MAX_QUEUES_PER_QSET)
+ return 0;
+
/* Check if its a multi-node environment */
if (nr_node_ids > 1)
sqs_per_vf = MAX_SQS_PER_VF;
@@ -1013,6 +1261,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!nic)
return -ENOMEM;
+ nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL);
+ if (!nic->hw) {
+ devm_kfree(dev, nic);
+ return -ENOMEM;
+ }
+
pci_set_drvdata(pdev, nic);
nic->pdev = pdev;
@@ -1052,13 +1306,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->node = nic_get_node_id(pdev);
- nic_set_lmac_vf_mapping(nic);
-
/* Initialize hardware */
- nic_init_hw(nic);
+ err = nic_init_hw(nic);
+ if (err)
+ goto err_release_regions;
- /* Set RSS TBL size for each VF */
- nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+ nic_set_lmac_vf_mapping(nic);
/* Register interrupts */
err = nic_register_interrupts(nic);
@@ -1091,6 +1344,9 @@ err_unregister_interrupts:
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
+ nic_free_lmacmem(nic);
+ devm_kfree(dev, nic->hw);
+ devm_kfree(dev, nic);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
@@ -1111,6 +1367,11 @@ static void nic_remove(struct pci_dev *pdev)
nic_unregister_interrupts(nic);
pci_release_regions(pdev);
+
+ nic_free_lmacmem(nic);
+ devm_kfree(&pdev->dev, nic->hw);
+ devm_kfree(&pdev->dev, nic);
+
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index fab35a593898..edf779f5a227 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -36,6 +36,20 @@
#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
#define NIC_PF_RX_ETYPE_0_7 (0x0500)
+#define NIC_PF_RX_GENEVE_DEF (0x0580)
+#define UDP_GENEVE_PORT_NUM 0x17C1ULL
+#define NIC_PF_RX_GENEVE_PROT_DEF (0x0588)
+#define IPV6_PROT 0x86DDULL
+#define IPV4_PROT 0x800ULL
+#define ET_PROT 0x6558ULL
+#define NIC_PF_RX_NVGRE_PROT_DEF (0x0598)
+#define NIC_PF_RX_VXLAN_DEF_0_1 (0x05A0)
+#define UDP_VXLAN_PORT_NUM 0x12B5
+#define NIC_PF_RX_VXLAN_PROT_DEF (0x05B0)
+#define IPV6_PROT_DEF 0x2ULL
+#define IPV4_PROT_DEF 0x1ULL
+#define ET_PROT_DEF 0x3ULL
+#define NIC_PF_RX_CFG (0x05D0)
#define NIC_PF_PKIND_0_15_CFG (0x0600)
#define NIC_PF_ECC0_FLIP0 (0x1000)
#define NIC_PF_ECC1_FLIP0 (0x1008)
@@ -103,6 +117,7 @@
#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
#define NIC_PF_TL2_0_63_CFG (0x500000)
#define NIC_PF_TL2_0_63_PRI (0x520000)
+#define NIC_PF_TL2_LMAC (0x540000)
#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
#define NIC_PF_TL3_0_255_CFG (0x600000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 3240349615bd..45a13f718863 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -29,10 +29,20 @@
static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_NIC_VF,
- PCI_VENDOR_ID_CAVIUM, 0xA134) },
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_88XX_NIC_VF) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
- PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_81XX_NIC_VF) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_83XX_NIC_VF) },
{ 0, } /* end of table */
};
@@ -134,15 +144,19 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
/* Wait for previous message to be acked, timeout 2sec */
while (!nic->pf_acked) {
- if (nic->pf_nacked)
+ if (nic->pf_nacked) {
+ netdev_err(nic->netdev,
+ "PF NACK to mbox msg 0x%02x from VF%d\n",
+ (mbx->msg.msg & 0xFF), nic->vf_id);
return -EINVAL;
+ }
msleep(sleep);
if (nic->pf_acked)
break;
timeout -= sleep;
if (!timeout) {
netdev_err(nic->netdev,
- "PF didn't ack to mbox msg %d from VF%d\n",
+ "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
return -EBUSY;
}
@@ -352,13 +366,7 @@ static int nicvf_rss_init(struct nicvf *nic)
rss->enable = true;
- /* Using the HW reset value for now */
- rss->key[0] = 0xFEED0BADFEED0BADULL;
- rss->key[1] = 0xFEED0BADFEED0BADULL;
- rss->key[2] = 0xFEED0BADFEED0BADULL;
- rss->key[3] = 0xFEED0BADFEED0BADULL;
- rss->key[4] = 0xFEED0BADFEED0BADULL;
-
+ netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
nicvf_set_rss_key(nic);
rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
@@ -507,7 +515,9 @@ static int nicvf_init_resources(struct nicvf *nic)
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cmp_queue *cq,
- struct cqe_send_t *cqe_tx, int cqe_type)
+ struct cqe_send_t *cqe_tx,
+ int cqe_type, int budget,
+ unsigned int *tx_pkts, unsigned int *tx_bytes)
{
struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev);
@@ -538,7 +548,9 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
}
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
- dev_consume_skb_any(skb);
+ (*tx_pkts)++;
+ *tx_bytes += skb->len;
+ napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
/* In case of SW TSO on 88xx, only last segment will have
@@ -653,6 +665,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cmp_queue *cq = &qs->cq[cq_idx];
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
+ unsigned int tx_pkts = 0, tx_bytes = 0;
spin_lock_bh(&cq->lock);
loop:
@@ -691,7 +704,8 @@ loop:
break;
case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(netdev, cq,
- (void *)cq_desc, CQE_TYPE_SEND);
+ (void *)cq_desc, CQE_TYPE_SEND,
+ budget, &tx_pkts, &tx_bytes);
tx_done++;
break;
case CQE_TYPE_INVALID:
@@ -720,6 +734,9 @@ done:
netdev = nic->pnicvf->netdev;
txq = netdev_get_tx_queue(netdev,
nicvf_netdev_qidx(nic, cq_idx));
+ if (tx_pkts)
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+
nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq);
@@ -933,16 +950,19 @@ static int nicvf_register_interrupts(struct nicvf *nic)
int vector;
for_each_cq_irq(irq)
- sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
- nic->vf_id, irq);
+ sprintf(nic->irq_name[irq], "%s-rxtx-%d",
+ nic->pnicvf->netdev->name,
+ nicvf_netdev_qidx(nic, irq));
for_each_sq_irq(irq)
- sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
- nic->vf_id, irq - NICVF_INTR_ID_SQ);
+ sprintf(nic->irq_name[irq], "%s-sq-%d",
+ nic->pnicvf->netdev->name,
+ nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
for_each_rbdr_irq(irq)
- sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
- nic->vf_id, irq - NICVF_INTR_ID_RBDR);
+ sprintf(nic->irq_name[irq], "%s-rbdr-%d",
+ nic->pnicvf->netdev->name,
+ nic->sqs_mode ? (nic->sqs_id + 1) : 0);
/* Register CQ interrupts */
for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
@@ -966,8 +986,9 @@ static int nicvf_register_interrupts(struct nicvf *nic)
}
/* Register QS error interrupt */
- sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
- "NICVF%d Qset error", nic->vf_id);
+ sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
+ nic->pnicvf->netdev->name,
+ nic->sqs_mode ? (nic->sqs_id + 1) : 0);
irq = NICVF_INTR_ID_QS_ERR;
ret = request_irq(nic->msix_entries[irq].vector,
nicvf_qs_err_intr_handler,
@@ -1146,6 +1167,9 @@ int nicvf_stop(struct net_device *netdev)
netif_tx_disable(netdev);
+ for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
+
/* Free resources */
nicvf_config_data_transfer(nic, false);
@@ -1196,7 +1220,7 @@ int nicvf_open(struct net_device *netdev)
}
/* Check if we got MAC address from PF or else generate a radom MAC */
- if (is_zero_ether_addr(netdev->dev_addr)) {
+ if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
eth_hw_addr_random(netdev);
nicvf_hw_set_mac_addr(nic, netdev);
}
@@ -1533,14 +1557,13 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions;
}
- qcount = MAX_CMP_QUEUES_PER_QS;
+ qcount = netif_get_num_default_rss_queues();
/* Restrict multiqset support only for host bound VFs */
if (pdev->is_virtfn) {
/* Set max number of queues per VF */
- qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS);
- qcount = min(qcount,
- (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
+ qcount = min_t(int, num_online_cpus(),
+ (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
}
netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index dda3ea3f3bb6..a4fc50155881 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -479,6 +479,16 @@ void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
}
+static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ /* Reset all RXQ's stats */
+ mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
+ mbx.reset_stat.rq_stat_mask = 0xFFFF;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
/* Configures receive queue */
static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, bool enable)
@@ -762,10 +772,10 @@ int nicvf_set_qset_resources(struct nicvf *nic)
nic->qs = qs;
/* Set count of each queue */
- qs->rbdr_cnt = RBDR_CNT;
- qs->rq_cnt = RCV_QUEUE_CNT;
- qs->sq_cnt = SND_QUEUE_CNT;
- qs->cq_cnt = CMP_QUEUE_CNT;
+ qs->rbdr_cnt = DEFAULT_RBDR_CNT;
+ qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
+ qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
+ qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
/* Set queue lengths */
qs->rbdr_len = RCV_BUF_COUNT;
@@ -812,6 +822,11 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
nicvf_free_resources(nic);
}
+ /* Reset RXQ's stats.
+ * SQ's stats will get reset automatically once SQ is reset.
+ */
+ nicvf_reset_rcv_queue_stats(nic);
+
return 0;
}
@@ -1067,6 +1082,24 @@ static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
imm->len = 1;
}
+static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
+ int sq_num, int desc_cnt)
+{
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(nic->pnicvf->netdev,
+ skb_get_queue_mapping(skb));
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, desc_cnt);
+}
+
/* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer
*/
@@ -1126,12 +1159,8 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
/* Save SKB in the last segment for freeing */
sq->skbuff[hdr_qentry] = (u64)skb;
- /* make sure all memory stores are done before ringing doorbell */
- smp_wmb();
+ nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
- /* Inform HW to xmit all TSO segments */
- nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- sq_num, desc_cnt);
nic->drv_stats.tx_tso++;
return 1;
}
@@ -1204,12 +1233,8 @@ doorbell:
nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
}
- /* make sure all memory stores are done before ringing doorbell */
- smp_wmb();
+ nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
- /* Inform HW to xmit new packet */
- nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- sq_num, subdesc_cnt);
return 1;
append_fail:
@@ -1234,13 +1259,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
int frag;
int payload_len = 0;
struct sk_buff *skb = NULL;
- struct sk_buff *skb_frag = NULL;
- struct sk_buff *prev_frag = NULL;
+ struct page *page;
+ int offset;
u16 *rb_lens = NULL;
u64 *rb_ptrs = NULL;
rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
- rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+ /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
+ * CQE_RX at word6, hence buffer pointers move by word
+ *
+ * Use existing 'hw_tso' flag which will be set for all chips
+ * except 88xx pass1 instead of a additional cache line
+ * access (or miss) by using pci dev's revision.
+ */
+ if (!nic->hw_tso)
+ rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+ else
+ rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
__func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
@@ -1258,22 +1293,10 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
skb_put(skb, payload_len);
} else {
/* Add fragments */
- skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
- payload_len);
- if (!skb_frag) {
- dev_kfree_skb(skb);
- return NULL;
- }
-
- if (!skb_shinfo(skb)->frag_list)
- skb_shinfo(skb)->frag_list = skb_frag;
- else
- prev_frag->next = skb_frag;
-
- prev_frag = skb_frag;
- skb->len += payload_len;
- skb->data_len += payload_len;
- skb_frag->len = payload_len;
+ page = virt_to_page(phys_to_virt(*rb_ptrs));
+ offset = phys_to_virt(*rb_ptrs) - page_address(page);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ offset, payload_len, RCV_FRAG_LEN);
}
/* Next buffer pointer */
rb_ptrs++;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 6673e1133523..869f3386028b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -57,10 +57,7 @@
#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
/* Default queue count per QS, its lengths and threshold values */
-#define RBDR_CNT 1
-#define RCV_QUEUE_CNT 8
-#define SND_QUEUE_CNT 8
-#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
+#define DEFAULT_RBDR_CNT 1
#define SND_QSIZE SND_QUEUE_SIZE2
#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 63a39ac97d53..8bbaedbb7b94 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -28,6 +28,9 @@ struct lmac {
struct bgx *bgx;
int dmac;
u8 mac[ETH_ALEN];
+ u8 lmac_type;
+ u8 lane_to_sds;
+ bool use_training;
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
@@ -43,14 +46,13 @@ struct lmac {
struct bgx {
u8 bgx_id;
- u8 qlm_mode;
struct lmac lmac[MAX_LMAC_PER_BGX];
int lmac_count;
- int lmac_type;
- int lane_to_sds;
- int use_training;
+ u8 max_lmac;
void __iomem *reg_base;
struct pci_dev *pdev;
+ bool is_dlm;
+ bool is_rgx;
};
static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
@@ -61,6 +63,7 @@ static int bgx_xaui_check_link(struct lmac *lmac);
/* Supported devices */
static const struct pci_device_id bgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) },
{ 0, } /* end of table */
};
@@ -124,8 +127,8 @@ unsigned bgx_get_map(int node)
int i;
unsigned map = 0;
- for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
- if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
+ for (i = 0; i < MAX_BGX_PER_NODE; i++) {
+ if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
map |= (1 << i);
}
@@ -138,7 +141,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (bgx)
return bgx->lmac_count;
@@ -153,7 +156,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
struct bgx *bgx;
struct lmac *lmac;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return;
@@ -166,7 +169,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (bgx)
return bgx->lmac[lmacid].mac;
@@ -177,7 +180,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return;
@@ -188,11 +191,13 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct lmac *lmac;
u64 cfg;
if (!bgx)
return;
+ lmac = &bgx->lmac[lmacid];
cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
if (enable)
@@ -200,6 +205,9 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
else
cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ if (bgx->is_rgx)
+ xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed);
}
EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
@@ -266,9 +274,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
- /* renable lmac */
+ /* Re-enable lmac */
cmr_cfg |= CMR_EN;
bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+
+ if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
+ xcv_setup_link(lmac->link_up, lmac->last_speed);
}
static void bgx_lmac_handler(struct net_device *netdev)
@@ -314,7 +325,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return 0;
@@ -328,7 +339,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return 0;
@@ -356,7 +367,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
struct lmac *lmac;
u64 cfg;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return;
@@ -379,8 +390,9 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
}
EXPORT_SYMBOL(bgx_lmac_internal_loopback);
-static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
+static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
{
+ int lmacid = lmac->lmacid;
u64 cfg;
bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
@@ -409,18 +421,29 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
- if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
- PCS_MRX_STATUS_AN_CPT, false)) {
- dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
- return -1;
+ if (lmac->lmac_type == BGX_MODE_QSGMII) {
+ /* Disable disparity check for QSGMII */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
+ cfg &= ~PCS_MISC_CTL_DISP_EN;
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
+ return 0;
+ }
+
+ if (lmac->lmac_type == BGX_MODE_SGMII) {
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
+ return -1;
+ }
}
return 0;
}
-static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
+static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
{
u64 cfg;
+ int lmacid = lmac->lmacid;
/* Reset SPU */
bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
@@ -436,12 +459,14 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
/* Set interleaved running disparity for RXAUI */
- if (bgx->lmac_type != BGX_MODE_RXAUI)
- bgx_reg_modify(bgx, lmacid,
- BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
- else
+ if (lmac->lmac_type == BGX_MODE_RXAUI)
bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
- SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
+ SPU_MISC_CTL_INTLV_RDISP);
+
+ /* Clear receive packet disable */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+ cfg &= ~SPU_MISC_CTL_RX_DIS;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
/* clear all interrupts */
cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
@@ -451,7 +476,7 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
- if (bgx->use_training) {
+ if (lmac->use_training) {
bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
@@ -474,9 +499,9 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
- if (bgx->lmac_type == BGX_MODE_10G_KR)
+ if (lmac->lmac_type == BGX_MODE_10G_KR)
cfg |= (1 << 23);
- else if (bgx->lmac_type == BGX_MODE_40G_KR)
+ else if (lmac->lmac_type == BGX_MODE_40G_KR)
cfg |= (1 << 24);
else
cfg &= ~((1 << 23) | (1 << 24));
@@ -511,11 +536,10 @@ static int bgx_xaui_check_link(struct lmac *lmac)
{
struct bgx *bgx = lmac->bgx;
int lmacid = lmac->lmacid;
- int lmac_type = bgx->lmac_type;
+ int lmac_type = lmac->lmac_type;
u64 cfg;
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
- if (bgx->use_training) {
+ if (lmac->use_training) {
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
if (!(cfg & (1ull << 13))) {
cfg = (1ull << 13) | (1ull << 14);
@@ -556,7 +580,7 @@ static int bgx_xaui_check_link(struct lmac *lmac)
BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
- if (bgx->use_training) {
+ if (lmac->use_training) {
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
if (!(cfg & (1ull << 13))) {
cfg = (1ull << 13) | (1ull << 14);
@@ -584,11 +608,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- /* Clear receive packet disable */
- cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
- cfg &= ~SPU_MISC_CTL_RX_DIS;
- bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
-
/* Check for MAC RX faults */
cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
/* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
@@ -599,7 +618,7 @@ static int bgx_xaui_check_link(struct lmac *lmac)
/* Rx local/remote fault seen.
* Do lmac reinit to see if condition recovers
*/
- bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+ bgx_lmac_xaui_init(bgx, lmac);
return -1;
}
@@ -623,7 +642,7 @@ static void bgx_poll_for_link(struct work_struct *work)
if ((spu_link & SPU_STATUS1_RCV_LNK) &&
!(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
- if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
+ if (lmac->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
else
lmac->last_speed = 10000;
@@ -649,6 +668,16 @@ static void bgx_poll_for_link(struct work_struct *work)
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
}
+static int phy_interface_mode(u8 lmac_type)
+{
+ if (lmac_type == BGX_MODE_QSGMII)
+ return PHY_INTERFACE_MODE_QSGMII;
+ if (lmac_type == BGX_MODE_RGMII)
+ return PHY_INTERFACE_MODE_RGMII;
+
+ return PHY_INTERFACE_MODE_SGMII;
+}
+
static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
@@ -657,13 +686,15 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
lmac = &bgx->lmac[lmacid];
lmac->bgx = bgx;
- if (bgx->lmac_type == BGX_MODE_SGMII) {
+ if ((lmac->lmac_type == BGX_MODE_SGMII) ||
+ (lmac->lmac_type == BGX_MODE_QSGMII) ||
+ (lmac->lmac_type == BGX_MODE_RGMII)) {
lmac->is_sgmii = 1;
- if (bgx_lmac_sgmii_init(bgx, lmacid))
+ if (bgx_lmac_sgmii_init(bgx, lmac))
return -1;
} else {
lmac->is_sgmii = 0;
- if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
+ if (bgx_lmac_xaui_init(bgx, lmac))
return -1;
}
@@ -685,10 +716,10 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
/* Restore default cfg, incase low level firmware changed it */
bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
- if ((bgx->lmac_type != BGX_MODE_XFI) &&
- (bgx->lmac_type != BGX_MODE_XLAUI) &&
- (bgx->lmac_type != BGX_MODE_40G_KR) &&
- (bgx->lmac_type != BGX_MODE_10G_KR)) {
+ if ((lmac->lmac_type != BGX_MODE_XFI) &&
+ (lmac->lmac_type != BGX_MODE_XLAUI) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR) &&
+ (lmac->lmac_type != BGX_MODE_10G_KR)) {
if (!lmac->phydev)
return -ENODEV;
@@ -696,7 +727,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
if (phy_connect_direct(&lmac->netdev, lmac->phydev,
bgx_lmac_handler,
- PHY_INTERFACE_MODE_SGMII))
+ phy_interface_mode(lmac->lmac_type)))
return -ENODEV;
phy_start_aneg(lmac->phydev);
@@ -753,76 +784,19 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
bgx_flush_dmac_addrs(bgx, lmacid);
- if ((bgx->lmac_type != BGX_MODE_XFI) &&
- (bgx->lmac_type != BGX_MODE_XLAUI) &&
- (bgx->lmac_type != BGX_MODE_40G_KR) &&
- (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
+ if ((lmac->lmac_type != BGX_MODE_XFI) &&
+ (lmac->lmac_type != BGX_MODE_XLAUI) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR) &&
+ (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
phy_disconnect(lmac->phydev);
lmac->phydev = NULL;
}
-static void bgx_set_num_ports(struct bgx *bgx)
-{
- u64 lmac_count;
-
- switch (bgx->qlm_mode) {
- case QLM_MODE_SGMII:
- bgx->lmac_count = 4;
- bgx->lmac_type = BGX_MODE_SGMII;
- bgx->lane_to_sds = 0;
- break;
- case QLM_MODE_XAUI_1X4:
- bgx->lmac_count = 1;
- bgx->lmac_type = BGX_MODE_XAUI;
- bgx->lane_to_sds = 0xE4;
- break;
- case QLM_MODE_RXAUI_2X2:
- bgx->lmac_count = 2;
- bgx->lmac_type = BGX_MODE_RXAUI;
- bgx->lane_to_sds = 0xE4;
- break;
- case QLM_MODE_XFI_4X1:
- bgx->lmac_count = 4;
- bgx->lmac_type = BGX_MODE_XFI;
- bgx->lane_to_sds = 0;
- break;
- case QLM_MODE_XLAUI_1X4:
- bgx->lmac_count = 1;
- bgx->lmac_type = BGX_MODE_XLAUI;
- bgx->lane_to_sds = 0xE4;
- break;
- case QLM_MODE_10G_KR_4X1:
- bgx->lmac_count = 4;
- bgx->lmac_type = BGX_MODE_10G_KR;
- bgx->lane_to_sds = 0;
- bgx->use_training = 1;
- break;
- case QLM_MODE_40G_KR4_1X4:
- bgx->lmac_count = 1;
- bgx->lmac_type = BGX_MODE_40G_KR;
- bgx->lane_to_sds = 0xE4;
- bgx->use_training = 1;
- break;
- default:
- bgx->lmac_count = 0;
- break;
- }
-
- /* Check if low level firmware has programmed LMAC count
- * based on board type, if yes consider that otherwise
- * the default static values
- */
- lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
- if (lmac_count != 4)
- bgx->lmac_count = lmac_count;
-}
-
static void bgx_init_hw(struct bgx *bgx)
{
int i;
-
- bgx_set_num_ports(bgx);
+ struct lmac *lmac;
bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
@@ -830,17 +804,9 @@ static void bgx_init_hw(struct bgx *bgx)
/* Set lmac type and lane2serdes mapping */
for (i = 0; i < bgx->lmac_count; i++) {
- if (bgx->lmac_type == BGX_MODE_RXAUI) {
- if (i)
- bgx->lane_to_sds = 0x0e;
- else
- bgx->lane_to_sds = 0x04;
- bgx_reg_write(bgx, i, BGX_CMRX_CFG,
- (bgx->lmac_type << 8) | bgx->lane_to_sds);
- continue;
- }
+ lmac = &bgx->lmac[i];
bgx_reg_write(bgx, i, BGX_CMRX_CFG,
- (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
+ (lmac->lmac_type << 8) | lmac->lane_to_sds);
bgx->lmac[i].lmacid_bd = lmac_count;
lmac_count++;
}
@@ -863,55 +829,212 @@ static void bgx_init_hw(struct bgx *bgx)
bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
}
-static void bgx_get_qlm_mode(struct bgx *bgx)
+static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
+{
+ return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF);
+}
+
+static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
{
struct device *dev = &bgx->pdev->dev;
- int lmac_type;
- int train_en;
+ struct lmac *lmac;
+ char str[20];
+ u8 dlm;
- /* Read LMAC0 type to figure out QLM mode
- * This is configured by low level firmware
- */
- lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
- lmac_type = (lmac_type >> 8) & 0x07;
+ if (lmacid > bgx->max_lmac)
+ return;
- train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
- SPU_PMD_CRTL_TRAIN_EN;
+ lmac = &bgx->lmac[lmacid];
+ dlm = (lmacid / 2) + (bgx->bgx_id * 2);
+ if (!bgx->is_dlm)
+ sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
+ else
+ sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm);
- switch (lmac_type) {
+ switch (lmac->lmac_type) {
case BGX_MODE_SGMII:
- bgx->qlm_mode = QLM_MODE_SGMII;
- dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
+ dev_info(dev, "%s: SGMII\n", (char *)str);
break;
case BGX_MODE_XAUI:
- bgx->qlm_mode = QLM_MODE_XAUI_1X4;
- dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
+ dev_info(dev, "%s: XAUI\n", (char *)str);
break;
case BGX_MODE_RXAUI:
- bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
- dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
+ dev_info(dev, "%s: RXAUI\n", (char *)str);
break;
case BGX_MODE_XFI:
- if (!train_en) {
- bgx->qlm_mode = QLM_MODE_XFI_4X1;
- dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
- } else {
- bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
- dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
- }
+ if (!lmac->use_training)
+ dev_info(dev, "%s: XFI\n", (char *)str);
+ else
+ dev_info(dev, "%s: 10G_KR\n", (char *)str);
break;
case BGX_MODE_XLAUI:
- if (!train_en) {
- bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
- dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
- } else {
- bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
- dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
- }
+ if (!lmac->use_training)
+ dev_info(dev, "%s: XLAUI\n", (char *)str);
+ else
+ dev_info(dev, "%s: 40G_KR4\n", (char *)str);
+ break;
+ case BGX_MODE_QSGMII:
+ if ((lmacid == 0) &&
+ (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
+ return;
+ if ((lmacid == 2) &&
+ (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
+ return;
+ dev_info(dev, "%s: QSGMII\n", (char *)str);
+ break;
+ case BGX_MODE_RGMII:
+ dev_info(dev, "%s: RGMII\n", (char *)str);
+ break;
+ case BGX_MODE_INVALID:
+ /* Nothing to do */
+ break;
+ }
+}
+
+static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac)
+{
+ switch (lmac->lmac_type) {
+ case BGX_MODE_SGMII:
+ case BGX_MODE_XFI:
+ lmac->lane_to_sds = lmac->lmacid;
+ break;
+ case BGX_MODE_XAUI:
+ case BGX_MODE_XLAUI:
+ case BGX_MODE_RGMII:
+ lmac->lane_to_sds = 0xE4;
+ break;
+ case BGX_MODE_RXAUI:
+ lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4;
+ break;
+ case BGX_MODE_QSGMII:
+ /* There is no way to determine if DLM0/2 is QSGMII or
+ * DLM1/3 is configured to QSGMII as bootloader will
+ * configure all LMACs, so take whatever is configured
+ * by low level firmware.
+ */
+ lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac);
break;
default:
- bgx->qlm_mode = QLM_MODE_SGMII;
- dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
+ lmac->lane_to_sds = 0;
+ break;
+ }
+}
+
+static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
+{
+ if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR)) {
+ lmac->use_training = 0;
+ return;
+ }
+
+ lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) &
+ SPU_PMD_CRTL_TRAIN_EN;
+}
+
+static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
+{
+ struct lmac *lmac;
+ struct lmac *olmac;
+ u64 cmr_cfg;
+ u8 lmac_type;
+ u8 lane_to_sds;
+
+ lmac = &bgx->lmac[idx];
+
+ if (!bgx->is_dlm || bgx->is_rgx) {
+ /* Read LMAC0 type to figure out QLM mode
+ * This is configured by low level firmware
+ */
+ cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
+ lmac->lmac_type = (cmr_cfg >> 8) & 0x07;
+ if (bgx->is_rgx)
+ lmac->lmac_type = BGX_MODE_RGMII;
+ lmac_set_training(bgx, lmac, 0);
+ lmac_set_lane2sds(bgx, lmac);
+ return;
+ }
+
+ /* On 81xx BGX can be split across 2 DLMs
+ * firmware programs lmac_type of LMAC0 and LMAC2
+ */
+ if ((idx == 0) || (idx == 2)) {
+ cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
+ lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
+ lane_to_sds = (u8)(cmr_cfg & 0xFF);
+ /* Check if config is not reset value */
+ if ((lmac_type == 0) && (lane_to_sds == 0xE4))
+ lmac->lmac_type = BGX_MODE_INVALID;
+ else
+ lmac->lmac_type = lmac_type;
+ lmac_set_training(bgx, lmac, lmac->lmacid);
+ lmac_set_lane2sds(bgx, lmac);
+
+ /* Set LMAC type of other lmac on same DLM i.e LMAC 1/3 */
+ olmac = &bgx->lmac[idx + 1];
+ olmac->lmac_type = lmac->lmac_type;
+ lmac_set_training(bgx, olmac, olmac->lmacid);
+ lmac_set_lane2sds(bgx, olmac);
+ }
+}
+
+static bool is_dlm0_in_bgx_mode(struct bgx *bgx)
+{
+ struct lmac *lmac;
+
+ if (!bgx->is_dlm)
+ return true;
+
+ lmac = &bgx->lmac[0];
+ if (lmac->lmac_type == BGX_MODE_INVALID)
+ return false;
+
+ return true;
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+ struct lmac *lmac;
+ struct lmac *lmac01;
+ struct lmac *lmac23;
+ u8 idx;
+
+ /* Init all LMAC's type to invalid */
+ for (idx = 0; idx < bgx->max_lmac; idx++) {
+ lmac = &bgx->lmac[idx];
+ lmac->lmacid = idx;
+ lmac->lmac_type = BGX_MODE_INVALID;
+ lmac->use_training = false;
+ }
+
+ /* It is assumed that low level firmware sets this value */
+ bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
+ if (bgx->lmac_count > bgx->max_lmac)
+ bgx->lmac_count = bgx->max_lmac;
+
+ for (idx = 0; idx < bgx->max_lmac; idx++)
+ bgx_set_lmac_config(bgx, idx);
+
+ if (!bgx->is_dlm || bgx->is_rgx) {
+ bgx_print_qlm_mode(bgx, 0);
+ return;
+ }
+
+ if (bgx->lmac_count) {
+ bgx_print_qlm_mode(bgx, 0);
+ bgx_print_qlm_mode(bgx, 2);
+ }
+
+ /* If DLM0 is not in BGX mode then LMAC0/1 have
+ * to be configured with serdes lanes of DLM1
+ */
+ if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2))
+ return;
+ for (idx = 0; idx < bgx->lmac_count; idx++) {
+ lmac01 = &bgx->lmac[idx];
+ lmac23 = &bgx->lmac[idx + 2];
+ lmac01->lmac_type = lmac23->lmac_type;
+ lmac01->lane_to_sds = lmac23->lane_to_sds;
}
}
@@ -1042,7 +1165,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
}
lmac++;
- if (lmac == MAX_LMAC_PER_BGX) {
+ if (lmac == bgx->max_lmac) {
of_node_put(node);
break;
}
@@ -1087,6 +1210,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct bgx *bgx = NULL;
u8 lmac;
+ u16 sdevid;
bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
if (!bgx)
@@ -1115,10 +1239,30 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto err_release_regions;
}
- bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
- bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
- bgx_vnic[bgx->bgx_id] = bgx;
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
+ if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
+ bgx->bgx_id =
+ (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+ bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
+ bgx->max_lmac = MAX_LMAC_PER_BGX;
+ bgx_vnic[bgx->bgx_id] = bgx;
+ } else {
+ bgx->is_rgx = true;
+ bgx->max_lmac = 1;
+ bgx->bgx_id = MAX_BGX_PER_CN81XX - 1;
+ bgx_vnic[bgx->bgx_id] = bgx;
+ xcv_init_hw();
+ }
+
+ /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one
+ * BGX i.e BGX2 can be split across 2 DLMs.
+ */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) ||
+ ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2)))
+ bgx->is_dlm = true;
+
bgx_get_qlm_mode(bgx);
err = bgx_init_phy(bgx);
@@ -1133,6 +1277,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_err(dev, "BGX%d failed to enable lmac%d\n",
bgx->bgx_id, lmac);
+ while (lmac)
+ bgx_lmac_disable(bgx, --lmac);
goto err_enable;
}
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 42010d2e5ddf..d59c71e4a000 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -9,8 +9,20 @@
#ifndef THUNDER_BGX_H
#define THUNDER_BGX_H
-#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */
+/* PCI device ID */
+#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+#define PCI_DEVICE_ID_THUNDER_RGX 0xA054
+
+/* Subsystem device IDs */
+#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126
+#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226
+#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326
+
+#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */
#define MAX_BGX_PER_CN88XX 2
+#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
+#define MAX_BGX_PER_CN83XX 4
+#define MAX_BGX_PER_NODE 4
#define MAX_LMAC_PER_BGX 4
#define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8
@@ -18,8 +30,6 @@
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
-#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
-
/* Registers */
#define BGX_CMRX_CFG 0x00
#define CMR_PKT_TX_EN BIT_ULL(13)
@@ -136,6 +146,7 @@
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISC_CTL_DISP_EN BIT_ULL(13)
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
#define BGX_GMP_GMI_PRTX_CFG 0x38020
@@ -194,6 +205,9 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
+void xcv_init_hw(void);
+void xcv_setup_link(bool link_up, int link_speed);
+
u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
#define BGX_RX_STATS_COUNT 11
@@ -213,16 +227,9 @@ enum LMAC_TYPE {
BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
-};
-
-enum qlm_mode {
- QLM_MODE_SGMII, /* SGMII, each lane independent */
- QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */
- QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */
- QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */
- QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */
- QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */
- QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */
+ BGX_MODE_RGMII = 5,
+ BGX_MODE_QSGMII = 6,
+ BGX_MODE_INVALID = 7,
};
#endif /* THUNDER_BGX_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
new file mode 100644
index 000000000000..67befedef709
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "nic.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-xcv"
+#define DRV_VERSION "1.0"
+
+/* Register offsets */
+#define XCV_RESET 0x00
+#define PORT_EN BIT_ULL(63)
+#define CLK_RESET BIT_ULL(15)
+#define DLL_RESET BIT_ULL(11)
+#define COMP_EN BIT_ULL(7)
+#define TX_PKT_RESET BIT_ULL(3)
+#define TX_DATA_RESET BIT_ULL(2)
+#define RX_PKT_RESET BIT_ULL(1)
+#define RX_DATA_RESET BIT_ULL(0)
+#define XCV_DLL_CTL 0x10
+#define CLKRX_BYP BIT_ULL(23)
+#define CLKTX_BYP BIT_ULL(15)
+#define XCV_COMP_CTL 0x20
+#define DRV_BYP BIT_ULL(63)
+#define XCV_CTL 0x30
+#define XCV_INT 0x40
+#define XCV_INT_W1S 0x48
+#define XCV_INT_ENA_W1C 0x50
+#define XCV_INT_ENA_W1S 0x58
+#define XCV_INBND_STATUS 0x80
+#define XCV_BATCH_CRD_RET 0x100
+
+struct xcv {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+};
+
+static struct xcv *xcv;
+
+/* Supported devices */
+static const struct pci_device_id xcv_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA056) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Thunder RGX/XCV Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, xcv_id_table);
+
+void xcv_init_hw(void)
+{
+ u64 cfg;
+
+ /* Take DLL out of reset */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~DLL_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Take clock tree out of reset */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~CLK_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ /* Wait for DLL to lock */
+ msleep(1);
+
+ /* Configure DLL - enable or bypass
+ * TX no bypass, RX bypass
+ */
+ cfg = readq_relaxed(xcv->reg_base + XCV_DLL_CTL);
+ cfg &= ~0xFF03;
+ cfg |= CLKRX_BYP;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_DLL_CTL);
+
+ /* Enable compensation controller and force the
+ * write to be visible to HW by readig back.
+ */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= COMP_EN;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ readq_relaxed(xcv->reg_base + XCV_RESET);
+ /* Wait for compensation state machine to lock */
+ msleep(10);
+
+ /* enable the XCV block */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= PORT_EN;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= CLK_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+}
+EXPORT_SYMBOL(xcv_init_hw);
+
+void xcv_setup_link(bool link_up, int link_speed)
+{
+ u64 cfg;
+ int speed = 2;
+
+ if (!xcv) {
+ dev_err(&xcv->pdev->dev,
+ "XCV init not done, probe may have failed\n");
+ return;
+ }
+
+ if (link_speed == 100)
+ speed = 1;
+ else if (link_speed == 10)
+ speed = 0;
+
+ if (link_up) {
+ /* set operating speed */
+ cfg = readq_relaxed(xcv->reg_base + XCV_CTL);
+ cfg &= ~0x03;
+ cfg |= speed;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_CTL);
+
+ /* Reset datapaths */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= TX_DATA_RESET | RX_DATA_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Enable the packet flow */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= TX_PKT_RESET | RX_PKT_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Return credits to RGX */
+ writeq_relaxed(0x01, xcv->reg_base + XCV_BATCH_CRD_RET);
+ } else {
+ /* Disable packet flow */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~(TX_PKT_RESET | RX_PKT_RESET);
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ readq_relaxed(xcv->reg_base + XCV_RESET);
+ }
+}
+EXPORT_SYMBOL(xcv_setup_link);
+
+static int xcv_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+
+ xcv = devm_kzalloc(dev, sizeof(struct xcv), GFP_KERNEL);
+ if (!xcv)
+ return -ENOMEM;
+ xcv->pdev = pdev;
+
+ pci_set_drvdata(pdev, xcv);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_kfree;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ xcv->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!xcv->reg_base) {
+ dev_err(dev, "XCV: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_kfree:
+ devm_kfree(dev, xcv);
+ xcv = NULL;
+ return err;
+}
+
+static void xcv_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (xcv) {
+ devm_kfree(dev, xcv);
+ xcv = NULL;
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver xcv_driver = {
+ .name = DRV_NAME,
+ .id_table = xcv_id_table,
+ .probe = xcv_probe,
+ .remove = xcv_remove,
+};
+
+static int __init xcv_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&xcv_driver);
+}
+
+static void __exit xcv_cleanup_module(void)
+{
+ pci_unregister_driver(&xcv_driver);
+}
+
+module_init(xcv_init_module);
+module_exit(xcv_cleanup_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index ace0ab98d0f1..c6b71f656992 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
-cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index edd23386b47d..2125903043fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -53,6 +53,8 @@
#include "cxgb4_uld.h"
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
+extern struct list_head adapter_list;
+extern struct mutex uld_mutex;
enum {
MAX_NPORTS = 4, /* max # of ports */
@@ -338,14 +340,17 @@ struct adapter_params {
enum chip_type chip; /* chip code */
struct arch_specific_params arch; /* chip specific params */
unsigned char offload;
+ unsigned char crypto; /* HW capability for crypto */
unsigned char bypass;
unsigned int ofldq_wr_cred;
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ unsigned int nsched_cls; /* number of traffic classes */
unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
unsigned int max_ird_adapter; /* Max read depth per adapter */
+ bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
};
/* State needed to monitor the forward progress of SGE Ingress DMA activities
@@ -403,7 +408,6 @@ struct fw_info {
struct fw_hdr fw_hdr;
};
-
struct trace_params {
u32 data[TRACE_LEN / 4];
u32 mask[TRACE_LEN / 4];
@@ -434,11 +438,6 @@ enum {
MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
MAX_OFLD_QSETS = 16, /* # of offload Tx, iscsi Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
- MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
- MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
-
- /* # of streaming iSCSIT Rx queues */
- MAX_ISCSIT_QUEUES = MAX_OFLD_QSETS,
};
enum {
@@ -455,8 +454,7 @@ enum {
enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
- MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES +
- MAX_RDMA_CIQS + MAX_ISCSIT_QUEUES + INGQ_EXTRAS,
+ MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
};
struct adapter;
@@ -493,6 +491,7 @@ struct port_info {
#endif /* CONFIG_CHELSIO_T4_FCOE */
bool rxtstamp; /* Enable TS */
struct hwtstamp_config tstamp_config;
+ struct sched_table *sched_tbl;
};
struct dentry;
@@ -510,6 +509,10 @@ enum { /* adapter flags */
FW_OFLD_CONN = (1 << 9),
};
+enum {
+ ULP_CRYPTO_LOOKASIDE = 1 << 0,
+};
+
struct rx_sw_desc;
struct sge_fl { /* SGE free-buffer queue state */
@@ -680,17 +683,24 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */
u8 full; /* the Tx ring is full */
} ____cacheline_aligned_in_smp;
+struct sge_uld_rxq_info {
+ char name[IFNAMSIZ]; /* name of ULD driver */
+ struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */
+ u16 *msix_tbl; /* msix_tbl for uld */
+ u16 *rspq_id; /* response queue id's of rxq */
+ u16 nrxq; /* # of ingress uld queues */
+ u16 nciq; /* # of completion queues */
+ u8 uld; /* uld type */
+};
+
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
- struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS];
- struct sge_ofld_rxq iscsitrxq[MAX_ISCSIT_QUEUES];
- struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
- struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
+ struct sge_uld_rxq_info **uld_rxq_info;
struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock;
@@ -698,14 +708,8 @@ struct sge {
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
- u16 iscsiqsets; /* # of active iSCSI queue sets */
- u16 niscsitq; /* # of available iSCST Rx queues */
- u16 rdmaqs; /* # of available RDMA Rx queues */
- u16 rdmaciqs; /* # of available RDMA concentrator IQs */
- u16 iscsi_rxq[MAX_OFLD_QSETS];
- u16 iscsit_rxq[MAX_ISCSIT_QUEUES];
- u16 rdma_rxq[MAX_RDMA_QUEUES];
- u16 rdma_ciq[MAX_RDMA_CIQS];
+ u16 ofldqsets; /* # of active ofld queue sets */
+ u16 nqs_per_uld; /* # of Rx queues per ULD */
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u32 fl_pg_order; /* large page allocation size */
@@ -729,10 +733,7 @@ struct sge {
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
-#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++)
-#define for_each_iscsitrxq(sge, i) for (i = 0; i < (sge)->niscsitq; i++)
-#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
-#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
+#define for_each_ofldtxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
struct l2t_data;
@@ -757,6 +758,23 @@ struct hash_mac_addr {
u8 addr[ETH_ALEN];
};
+struct uld_msix_bmap {
+ unsigned long *msix_bmap;
+ unsigned int mapsize;
+ spinlock_t lock; /* lock for acquiring bitmap */
+};
+
+struct uld_msix_info {
+ unsigned short vec;
+ char desc[IFNAMSIZ + 10];
+ unsigned int idx;
+};
+
+struct vf_info {
+ unsigned char vf_mac_addr[ETH_ALEN];
+ bool pf_set_mac;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -767,6 +785,7 @@ struct adapter {
unsigned int mbox;
unsigned int pf;
unsigned int flags;
+ unsigned int adap_idx;
enum chip_type chip;
int msg_enable;
@@ -779,6 +798,9 @@ struct adapter {
unsigned short vec;
char desc[IFNAMSIZ + 10];
} msix_info[MAX_INGQ + 1];
+ struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
+ struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
+ int msi_idx;
struct doorbell_stats db_stats;
struct sge sge;
@@ -786,6 +808,9 @@ struct adapter {
struct net_device *port[MAX_NPORTS];
u8 chan_map[NCHAN]; /* channel -> port map */
+ struct vf_info *vfinfo;
+ u8 num_vfs;
+
u32 filter_mode;
unsigned int l2t_start;
unsigned int l2t_end;
@@ -793,7 +818,10 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
+ struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
+ unsigned int num_uld;
+ unsigned int num_ofld_uld;
struct list_head list_node;
struct list_head rcu_node;
struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
@@ -813,6 +841,8 @@ struct adapter {
#define T4_OS_LOG_MBOX_CMDS 256
struct mbox_cmd_log *mbox_log;
+ struct mutex uld_mutex;
+
struct dentry *debugfs_root;
bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
bool trace_rss; /* 1 implies that different RSS flit per filter is
@@ -822,6 +852,58 @@ struct adapter {
spinlock_t stats_lock;
spinlock_t win0_lock ____cacheline_aligned_in_smp;
+
+ /* TC u32 offload */
+ struct cxgb4_tc_u32_table *tc_u32;
+};
+
+/* Support for "sched-class" command to allow a TX Scheduling Class to be
+ * programmed with various parameters.
+ */
+struct ch_sched_params {
+ s8 type; /* packet or flow */
+ union {
+ struct {
+ s8 level; /* scheduler hierarchy level */
+ s8 mode; /* per-class or per-flow */
+ s8 rateunit; /* bit or packet rate */
+ s8 ratemode; /* %port relative or kbps absolute */
+ s8 channel; /* scheduler channel [0..N] */
+ s8 class; /* scheduler class [0..N] */
+ s32 minrate; /* minimum rate */
+ s32 maxrate; /* maximum rate */
+ s16 weight; /* percent weight */
+ s16 pktsize; /* average packet size */
+ } params;
+ } u;
+};
+
+enum {
+ SCHED_CLASS_TYPE_PACKET = 0, /* class type */
+};
+
+enum {
+ SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */
+};
+
+enum {
+ SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */
+};
+
+enum {
+ SCHED_CLASS_RATEUNIT_BITS = 0, /* bit rate scheduling */
+};
+
+enum {
+ SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */
+};
+
+/* Support for "sched_queue" command to allow one or more NIC TX Queues
+ * to be bound to a TX Scheduling Class.
+ */
+struct ch_sched_queue {
+ s8 queue; /* queue index */
+ s8 class; /* class index */
};
/* Defined bit width of user definable filter tuples
@@ -947,11 +1029,47 @@ enum {
VLAN_REWRITE
};
+/* Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware of the
+ * firmware command. The use of bit-field structure elements is purely to
+ * remind ourselves of the field size limitations and save memory in the case
+ * where the filter table is large.
+ */
+struct filter_entry {
+ /* Administrative fields for filter. */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+
+ u32 pending:1; /* filter action is pending firmware reply */
+ u32 smtidx:8; /* Source MAC Table index for smac */
+ struct filter_ctx *ctx; /* Caller's completion hook */
+ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
+ struct net_device *dev; /* Associated net device */
+ u32 tid; /* This will store the actual tid */
+
+ /* The filter itself. Most of this is a straight copy of information
+ * provided by the extended ioctl(). Some fields are translated to
+ * internal forms -- for instance the Ingress Queue ID passed in from
+ * the ioctl() is translated into the Absolute Ingress Queue ID.
+ */
+ struct ch_filter_specification fs;
+};
+
static inline int is_offload(const struct adapter *adap)
{
return adap->params.offload;
}
+static inline int is_pci_uld(const struct adapter *adap)
+{
+ return adap->params.crypto;
+}
+
+static inline int is_uld(const struct adapter *adap)
+{
+ return (adap->params.offload || adap->params.crypto);
+}
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -1178,6 +1296,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int cmplqid);
+int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
+ unsigned int cmplqid);
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
@@ -1185,8 +1305,6 @@ int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
int cxgb_busy_poll(struct napi_struct *napi);
-int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
- unsigned int cnt);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
extern int dbfifo_int_thresh;
@@ -1289,6 +1407,18 @@ static inline int hash_mac_addr(const u8 *addr)
return a & 0x3f;
}
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt);
+static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
+ unsigned int us, unsigned int cnt,
+ unsigned int size, unsigned int iqe_size)
+{
+ q->adap = adap;
+ cxgb4_set_rspq_intr_params(q, us, cnt);
+ q->iqe_len = iqe_size;
+ q->size = size;
+}
+
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
@@ -1514,6 +1644,9 @@ void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
int filter_index, int *enabled);
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
+int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
+ int rateunit, int ratemode, int channel, int class,
+ int minrate, int maxrate, int weight, int pktsize);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
void t4_free_mem(void *addr);
void t4_idma_monitor_init(struct adapter *adapter,
@@ -1521,4 +1654,11 @@ void t4_idma_monitor_init(struct adapter *adapter,
void t4_idma_monitor(struct adapter *adapter,
struct sge_idma_monitor_state *idma,
int hz, int ticks);
+int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
+ unsigned int naddr, u8 *addr);
+void t4_uld_mem_free(struct adapter *adap);
+int t4_uld_mem_alloc(struct adapter *adap);
+void t4_uld_clean_up(struct adapter *adap);
+void t4_register_netevent_notifier(void);
+void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 91fb50850fff..20455d082cb8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2432,17 +2432,11 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4);
- int iscsit_entries = DIV_ROUND_UP(adap->sge.niscsitq, 4);
- int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
- int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
+ int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
int i, r = (uintptr_t)v - 1;
- int iscsi_idx = r - eth_entries;
- int iscsit_idx = iscsi_idx - iscsi_entries;
- int rdma_idx = iscsit_idx - iscsit_entries;
- int ciq_idx = rdma_idx - rdma_entries;
- int ctrl_idx = ciq_idx - ciq_entries;
+ int ofld_idx = r - eth_entries;
+ int ctrl_idx = ofld_idx - ofld_entries;
int fq_idx = ctrl_idx - ctrl_entries;
if (r)
@@ -2518,119 +2512,17 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- } else if (iscsi_idx < iscsi_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.iscsirxq[iscsi_idx * 4];
+ } else if (ofld_idx < ofld_entries) {
const struct sge_ofld_txq *tx =
- &adap->sge.ofldtxq[iscsi_idx * 4];
- int n = min(4, adap->sge.iscsiqsets - 4 * iscsi_idx);
+ &adap->sge.ofldtxq[ofld_idx * 4];
+ int n = min(4, adap->sge.ofldqsets - 4 * ofld_idx);
- S("QType:", "iSCSI");
+ S("QType:", "OFLD-Txq");
T("TxQ ID:", q.cntxt_id);
T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use);
T("TxQ CIDX:", q.cidx);
T("TxQ PIDX:", q.pidx);
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (iscsit_idx < iscsit_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.iscsitrxq[iscsit_idx * 4];
- int n = min(4, adap->sge.niscsitq - 4 * iscsit_idx);
-
- S("QType:", "iSCSIT");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (rdma_idx < rdma_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.rdmarxq[rdma_idx * 4];
- int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
-
- S("QType:", "RDMA-CPL");
- S("Interface:",
- rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (ciq_idx < ciq_entries) {
- const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
- int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
-
- S("QType:", "RDMA-CIQ");
- S("Interface:",
- rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- RL("RxAN:", stats.an);
- RL("RxNoMem:", stats.nomem);
} else if (ctrl_idx < ctrl_entries) {
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
@@ -2672,10 +2564,7 @@ do { \
static int sge_queue_entries(const struct adapter *adap)
{
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- DIV_ROUND_UP(adap->sge.iscsiqsets, 4) +
- DIV_ROUND_UP(adap->sge.niscsitq, 4) +
- DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
- DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
+ DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
@@ -2859,12 +2748,6 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
size_mb << 20);
}
-static int blocked_fl_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -2908,7 +2791,7 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
static const struct file_operations blocked_fl_fops = {
.owner = THIS_MODULE,
- .open = blocked_fl_open,
+ .open = simple_open,
.read = blocked_fl_read,
.write = blocked_fl_write,
.llseek = generic_file_llseek,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
new file mode 100644
index 000000000000..10736738ff30
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -0,0 +1,721 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "l2t.h"
+#include "t4fw_api.h"
+#include "cxgb4_filter.h"
+
+static inline bool is_field_set(u32 val, u32 mask)
+{
+ return val || mask;
+}
+
+static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
+{
+ return !(conf & conf_mask) && is_field_set(val, mask);
+}
+
+/* Validate filter spec against configuration done on the card. */
+static int validate_filter(struct net_device *dev,
+ struct ch_filter_specification *fs)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ u32 fconf, iconf;
+
+ /* Check for unconfigured fields being used. */
+ fconf = adapter->params.tp.vlan_pri_map;
+ iconf = adapter->params.tp.ingress_config;
+
+ if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
+ unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
+ unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
+ unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
+ fs->mask.ethtype) ||
+ unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
+ unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
+ fs->mask.matchtype) ||
+ unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
+ unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
+ unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
+ fs->mask.pfvf_vld) ||
+ unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
+ fs->mask.ovlan_vld) ||
+ unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
+ return -EOPNOTSUPP;
+
+ /* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
+ * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
+ * in TP_INGRESS_CONFIG. Hense the somewhat crazy checks
+ * below. Additionally, since the T4 firmware interface also
+ * carries that overlap, we need to translate any PF/VF
+ * specification into that internal format below.
+ */
+ if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
+ is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld))
+ return -EOPNOTSUPP;
+ if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
+ (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
+ (iconf & VNIC_F)))
+ return -EOPNOTSUPP;
+ if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
+ return -ERANGE;
+ fs->mask.pf &= 0x7;
+ fs->mask.vf &= 0x7f;
+
+ /* If the user is requesting that the filter action loop
+ * matching packets back out one of our ports, make sure that
+ * the egress port is in range.
+ */
+ if (fs->action == FILTER_SWITCH &&
+ fs->eport >= adapter->params.nports)
+ return -ERANGE;
+
+ /* Don't allow various trivially obvious bogus out-of-range values... */
+ if (fs->val.iport >= adapter->params.nports)
+ return -ERANGE;
+
+ /* T4 doesn't support removing VLAN Tags for loop back filters. */
+ if (is_t4(adapter->params.chip) &&
+ fs->action == FILTER_SWITCH &&
+ (fs->newvlan == VLAN_REMOVE ||
+ fs->newvlan == VLAN_REWRITE))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int get_filter_steerq(struct net_device *dev,
+ struct ch_filter_specification *fs)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ int iq;
+
+ /* If the user has requested steering matching Ingress Packets
+ * to a specific Queue Set, we need to make sure it's in range
+ * for the port and map that into the Absolute Queue ID of the
+ * Queue Set's Response Queue.
+ */
+ if (!fs->dirsteer) {
+ if (fs->iq)
+ return -EINVAL;
+ iq = 0;
+ } else {
+ struct port_info *pi = netdev_priv(dev);
+
+ /* If the iq id is greater than the number of qsets,
+ * then assume it is an absolute qid.
+ */
+ if (fs->iq < pi->nqsets)
+ iq = adapter->sge.ethrxq[pi->first_qset +
+ fs->iq].rspq.abs_id;
+ else
+ iq = fs->iq;
+ }
+
+ return iq;
+}
+
+static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+
+ if (test_bit(fidx, t->ftid_bmap)) {
+ spin_unlock_bh(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ if (family == PF_INET)
+ __set_bit(fidx, t->ftid_bmap);
+ else
+ bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+
+ spin_unlock_bh(&t->ftid_lock);
+ return 0;
+}
+
+static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+ if (family == PF_INET)
+ __clear_bit(fidx, t->ftid_bmap);
+ else
+ bitmap_release_region(t->ftid_bmap, fidx, 2);
+ spin_unlock_bh(&t->ftid_lock);
+}
+
+/* Delete the filter at a specified index. */
+static int del_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct fw_filter_wr *fwr;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ len = sizeof(*fwr);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(adapter, skb);
+ return 0;
+}
+
+/* Send a Work Request to write the filter at a specified index. We construct
+ * a Firmware Filter Work Request to have the work done and put the indicated
+ * filter into "pending" mode which will prevent any further actions against
+ * it till we get a reply from the firmware on the completion status of the
+ * request.
+ */
+int set_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct fw_filter_wr *fwr;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newdmac || f->fs.newvlan) {
+ /* allocate L2T entry for new filter */
+ f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
+ f->fs.eport, f->fs.dmac);
+ if (!f->l2t) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ }
+
+ fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
+ memset(fwr, 0, sizeof(*fwr));
+
+ /* It would be nice to put most of the following in t4_hw.c but most
+ * of the work is translating the cxgbtool ch_filter_specification
+ * into the Work Request and the definition of that structure is
+ * currently in cxgbtool.h which isn't appropriate to pull into the
+ * common code. We may eventually try to come up with a more neutral
+ * filter specification structure but for now it's easiest to simply
+ * put this fairly direct code in line ...
+ */
+ fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
+ fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
+ fwr->tid_to_iq =
+ htonl(FW_FILTER_WR_TID_V(f->tid) |
+ FW_FILTER_WR_RQTYPE_V(f->fs.type) |
+ FW_FILTER_WR_NOREPLY_V(0) |
+ FW_FILTER_WR_IQ_V(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
+ FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
+ FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
+ FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
+ FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
+ FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
+ FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
+ FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
+ FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
+ FW_FILTER_WR_PRIO_V(f->fs.prio) |
+ FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
+ fwr->ethtype = htons(f->fs.val.ethtype);
+ fwr->ethtypem = htons(f->fs.mask.ethtype);
+ fwr->frag_to_ovlan_vldm =
+ (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
+ FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
+ FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
+ FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ htons(FW_FILTER_WR_RX_CHAN_V(0) |
+ FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
+ fwr->maci_to_matchtypem =
+ htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
+ FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
+ FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
+ FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
+ FW_FILTER_WR_PORT_V(f->fs.val.iport) |
+ FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
+ FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
+ FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ fwr->ttyp = f->fs.val.tos;
+ fwr->ttypm = f->fs.mask.tos;
+ fwr->ivlan = htons(f->fs.val.ivlan);
+ fwr->ivlanm = htons(f->fs.mask.ivlan);
+ fwr->ovlan = htons(f->fs.val.ovlan);
+ fwr->ovlanm = htons(f->fs.mask.ovlan);
+ memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = htons(f->fs.val.lport);
+ fwr->lpm = htons(f->fs.mask.lport);
+ fwr->fp = htons(f->fs.val.fport);
+ fwr->fpm = htons(f->fs.mask.fport);
+ if (f->fs.newsmac)
+ memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
+ t4_ofld_send(adapter, skb);
+ return 0;
+}
+
+/* Return an error number if the indicated filter isn't writable ... */
+int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Delete the filter at the specified index (if valid). The checks for all
+ * the common problems with doing this like the filter being locked, currently
+ * pending in another operation, etc.
+ */
+int delete_filter(struct adapter *adapter, unsigned int fidx)
+{
+ struct filter_entry *f;
+ int ret;
+
+ if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+ return -EINVAL;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+ if (f->valid)
+ return del_filter_wr(adapter, fidx);
+
+ return 0;
+}
+
+/* Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+void clear_filter(struct adapter *adap, struct filter_entry *f)
+{
+ /* If the new or old filter have loopback rewriteing rules then we'll
+ * need to free any existing Layer Two Table (L2T) entries of the old
+ * filter rule. The firmware will handle freeing up any Source MAC
+ * Table (SMT) entries used for rewriting Source MAC Addresses in
+ * loopback rules.
+ */
+ if (f->l2t)
+ cxgb4_l2t_release(f->l2t);
+
+ /* The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags, l2t pointer, etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+void clear_all_filters(struct adapter *adapter)
+{
+ unsigned int i;
+
+ if (adapter->tids.ftid_tab) {
+ struct filter_entry *f = &adapter->tids.ftid_tab[0];
+ unsigned int max_ftid = adapter->tids.nftids +
+ adapter->tids.nsftids;
+
+ for (i = 0; i < max_ftid; i++, f++)
+ if (f->valid || f->pending)
+ clear_filter(adapter, f);
+ }
+}
+
+/* Fill up default masks for set match fields. */
+static void fill_default_mask(struct ch_filter_specification *fs)
+{
+ unsigned int lip = 0, lip_mask = 0;
+ unsigned int fip = 0, fip_mask = 0;
+ unsigned int i;
+
+ if (fs->val.iport && !fs->mask.iport)
+ fs->mask.iport |= ~0;
+ if (fs->val.fcoe && !fs->mask.fcoe)
+ fs->mask.fcoe |= ~0;
+ if (fs->val.matchtype && !fs->mask.matchtype)
+ fs->mask.matchtype |= ~0;
+ if (fs->val.macidx && !fs->mask.macidx)
+ fs->mask.macidx |= ~0;
+ if (fs->val.ethtype && !fs->mask.ethtype)
+ fs->mask.ethtype |= ~0;
+ if (fs->val.ivlan && !fs->mask.ivlan)
+ fs->mask.ivlan |= ~0;
+ if (fs->val.ovlan && !fs->mask.ovlan)
+ fs->mask.ovlan |= ~0;
+ if (fs->val.frag && !fs->mask.frag)
+ fs->mask.frag |= ~0;
+ if (fs->val.tos && !fs->mask.tos)
+ fs->mask.tos |= ~0;
+ if (fs->val.proto && !fs->mask.proto)
+ fs->mask.proto |= ~0;
+
+ for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
+ lip |= fs->val.lip[i];
+ lip_mask |= fs->mask.lip[i];
+ fip |= fs->val.fip[i];
+ fip_mask |= fs->mask.fip[i];
+ }
+
+ if (lip && !lip_mask)
+ memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
+
+ if (fip && !fip_mask)
+ memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
+
+ if (fs->val.lport && !fs->mask.lport)
+ fs->mask.lport = ~0;
+ if (fs->val.fport && !fs->mask.fport)
+ fs->mask.fport = ~0;
+}
+
+/* Check a Chelsio Filter Request for validity, convert it into our internal
+ * format and send it to the hardware. Return 0 on success, an error number
+ * otherwise. We attach any provided filter operation context to the internal
+ * filter specification in order to facilitate signaling completion of the
+ * operation.
+ */
+int __cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ unsigned int max_fidx, fidx;
+ struct filter_entry *f;
+ u32 iconf;
+ int iq, ret;
+
+ max_fidx = adapter->tids.nftids;
+ if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
+ filter_id >= max_fidx)
+ return -E2BIG;
+
+ fill_default_mask(fs);
+
+ ret = validate_filter(dev, fs);
+ if (ret)
+ return ret;
+
+ iq = get_filter_steerq(dev, fs);
+ if (iq < 0)
+ return iq;
+
+ /* IPv6 filters occupy four slots and must be aligned on
+ * four-slot boundaries. IPv4 filters only occupy a single
+ * slot and have no alignment requirements but writing a new
+ * IPv4 filter into the middle of an existing IPv6 filter
+ * requires clearing the old IPv6 filter and hence we prevent
+ * insertion.
+ */
+ if (fs->type == 0) { /* IPv4 */
+ /* If our IPv4 filter isn't being written to a
+ * multiple of four filter index and there's an IPv6
+ * filter at the multiple of 4 base slot, then we
+ * prevent insertion.
+ */
+ fidx = filter_id & ~0x3;
+ if (fidx != filter_id &&
+ adapter->tids.ftid_tab[fidx].fs.type) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
+ fidx, fidx + 3);
+ return -EINVAL;
+ }
+ }
+ } else { /* IPv6 */
+ /* Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
+ return -EINVAL;
+ }
+
+ /* Check all except the base overlapping IPv4 filter slots. */
+ for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
+ fidx);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Check to make sure that provided filter index is not
+ * already in use by someone else
+ */
+ f = &adapter->tids.ftid_tab[filter_id];
+ if (f->valid)
+ return -EBUSY;
+
+ fidx = filter_id + adapter->tids.ftid_base;
+ ret = cxgb4_set_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ if (ret)
+ return ret;
+
+ /* Check to make sure the filter requested is writable ... */
+ ret = writable_filter(f);
+ if (ret) {
+ /* Clear the bits we have set above */
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ return ret;
+ }
+
+ /* Clear out any old resources being used by the filter before
+ * we start constructing the new filter.
+ */
+ if (f->valid)
+ clear_filter(adapter, f);
+
+ /* Convert the filter specification into our internal format.
+ * We copy the PF/VF specification into the Outer VLAN field
+ * here so the rest of the code -- including the interface to
+ * the firmware -- doesn't have to constantly do these checks.
+ */
+ f->fs = *fs;
+ f->fs.iq = iq;
+ f->dev = dev;
+
+ iconf = adapter->params.tp.ingress_config;
+ if (iconf & VNIC_F) {
+ f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
+ f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
+ f->fs.val.ovlan_vld = fs->val.pfvf_vld;
+ f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+ }
+
+ /* Attempt to set the filter. If we don't succeed, we clear
+ * it and return the failure.
+ */
+ f->ctx = ctx;
+ f->tid = fidx; /* Save the actual tid */
+ ret = set_filter_wr(adapter, filter_id);
+ if (ret) {
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ clear_filter(adapter, f);
+ }
+
+ return ret;
+}
+
+/* Check a delete filter request for validity and send it to the hardware.
+ * Return 0 on success, an error number otherwise. We attach any provided
+ * filter operation context to the internal filter specification in order to
+ * facilitate signaling completion of the operation.
+ */
+int __cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ struct filter_entry *f;
+ unsigned int max_fidx;
+ int ret;
+
+ max_fidx = adapter->tids.nftids;
+ if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
+ filter_id >= max_fidx)
+ return -E2BIG;
+
+ f = &adapter->tids.ftid_tab[filter_id];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ f->ctx = ctx;
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ f->fs.type ? PF_INET6 : PF_INET);
+ return del_filter_wr(adapter, filter_id);
+ }
+
+ /* If the caller has passed in a Completion Context then we need to
+ * mark it as a successful completion so they don't stall waiting
+ * for it.
+ */
+ if (ctx) {
+ ctx->result = 0;
+ complete(&ctx->completion);
+ }
+ return ret;
+}
+
+int cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs)
+{
+ struct filter_ctx ctx;
+ int ret;
+
+ init_completion(&ctx.completion);
+
+ ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
+ if (ret)
+ goto out;
+
+ /* Wait for reply */
+ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ ret = ctx.result;
+out:
+ return ret;
+}
+
+int cxgb4_del_filter(struct net_device *dev, int filter_id)
+{
+ struct filter_ctx ctx;
+ int ret;
+
+ init_completion(&ctx.completion);
+
+ ret = __cxgb4_del_filter(dev, filter_id, &ctx);
+ if (ret)
+ goto out;
+
+ /* Wait for reply */
+ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ ret = ctx.result;
+out:
+ return ret;
+}
+
+/* Handle a filter write/deletion reply. */
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ unsigned int tid = GET_TID(rpl);
+ struct filter_entry *f = NULL;
+ unsigned int max_fidx;
+ int idx;
+
+ max_fidx = adap->tids.nftids + adap->tids.nsftids;
+ /* Get the corresponding filter entry for this tid */
+ if (adap->tids.ftid_tab) {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+ f = &adap->tids.ftid_tab[idx];
+ if (f->tid != tid)
+ return;
+ }
+
+ /* We found the filter entry for this tid */
+ if (f) {
+ unsigned int ret = TCB_COOKIE_G(rpl->cookie);
+ struct filter_ctx *ctx;
+
+ /* Pull off any filter operation context attached to the
+ * filter.
+ */
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /* Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = 0;
+ } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
+ dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
+ idx);
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = -ENOMEM;
+ } else if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->result = 0;
+ ctx->tid = idx;
+ }
+ } else {
+ /* Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = -EINVAL;
+ }
+ if (ctx)
+ complete(&ctx->completion);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
new file mode 100644
index 000000000000..23742cb1c69f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -0,0 +1,48 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_FILTER_H
+#define __CXGB4_FILTER_H
+
+#include "t4_msg.h"
+
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
+void clear_filter(struct adapter *adap, struct filter_entry *f);
+
+int set_filter_wr(struct adapter *adapter, int fidx);
+int delete_filter(struct adapter *adapter, unsigned int fidx);
+
+int writable_filter(struct filter_entry *f);
+void clear_all_filters(struct adapter *adapter);
+#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 3ceafb55d6da..f320497368f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -67,6 +67,7 @@
#include <linux/crash_dump.h>
#include "cxgb4.h"
+#include "cxgb4_filter.h"
#include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h"
@@ -76,6 +77,8 @@
#include "cxgb4_debugfs.h"
#include "clip_tbl.h"
#include "l2t.h"
+#include "sched.h"
+#include "cxgb4_tc_u32.h"
char cxgb4_driver_name[] = KBUILD_MODNAME;
@@ -86,30 +89,6 @@ char cxgb4_driver_name[] = KBUILD_MODNAME;
const char cxgb4_driver_version[] = DRV_VERSION;
#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
-/* Host shadow copy of ingress filter entry. This is in host native format
- * and doesn't match the ordering or bit order, etc. of the hardware of the
- * firmware command. The use of bit-field structure elements is purely to
- * remind ourselves of the field size limitations and save memory in the case
- * where the filter table is large.
- */
-struct filter_entry {
- /* Administrative fields for filter.
- */
- u32 valid:1; /* filter allocated and valid */
- u32 locked:1; /* filter is administratively locked */
-
- u32 pending:1; /* filter action is pending firmware reply */
- u32 smtidx:8; /* Source MAC Table index for smac */
- struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
-
- /* The filter itself. Most of this is a straight copy of information
- * provided by the extended ioctl(). Some fields are translated to
- * internal forms -- for instance the Ingress Queue ID passed in from
- * the ioctl() is translated into the Absolute Ingress Queue ID.
- */
- struct ch_filter_specification fs;
-};
-
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -223,13 +202,8 @@ MODULE_PARM_DESC(select_queue,
static struct dentry *cxgb4_debugfs_root;
-static LIST_HEAD(adapter_list);
-static DEFINE_MUTEX(uld_mutex);
-/* Adapter list to be accessed from atomic context */
-static LIST_HEAD(adap_rcu_list);
-static DEFINE_SPINLOCK(adap_rcu_lock);
-static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
-static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
+LIST_HEAD(adapter_list);
+DEFINE_MUTEX(uld_mutex);
static void link_report(struct net_device *dev)
{
@@ -303,11 +277,9 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
txq->dcb_prio = value;
}
}
-#endif /* CONFIG_CHELSIO_T4_DCB */
-int cxgb4_dcb_enabled(const struct net_device *dev)
+static int cxgb4_dcb_enabled(const struct net_device *dev)
{
-#ifdef CONFIG_CHELSIO_T4_DCB
struct port_info *pi = netdev_priv(dev);
if (!pi->dcb.enabled)
@@ -315,11 +287,8 @@ int cxgb4_dcb_enabled(const struct net_device *dev)
return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
(pi->dcb.state == CXGB4_DCB_STATE_HOST));
-#else
- return 0;
-#endif
}
-EXPORT_SYMBOL(cxgb4_dcb_enabled);
+#endif /* CONFIG_CHELSIO_T4_DCB */
void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
{
@@ -531,66 +500,6 @@ static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
}
#endif /* CONFIG_CHELSIO_T4_DCB */
-/* Clear a filter and release any of its resources that we own. This also
- * clears the filter's "pending" status.
- */
-static void clear_filter(struct adapter *adap, struct filter_entry *f)
-{
- /* If the new or old filter have loopback rewriteing rules then we'll
- * need to free any existing Layer Two Table (L2T) entries of the old
- * filter rule. The firmware will handle freeing up any Source MAC
- * Table (SMT) entries used for rewriting Source MAC Addresses in
- * loopback rules.
- */
- if (f->l2t)
- cxgb4_l2t_release(f->l2t);
-
- /* The zeroing of the filter rule below clears the filter valid,
- * pending, locked flags, l2t pointer, etc. so it's all we need for
- * this operation.
- */
- memset(f, 0, sizeof(*f));
-}
-
-/* Handle a filter write/deletion reply.
- */
-static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
-{
- unsigned int idx = GET_TID(rpl);
- unsigned int nidx = idx - adap->tids.ftid_base;
- unsigned int ret;
- struct filter_entry *f;
-
- if (idx >= adap->tids.ftid_base && nidx <
- (adap->tids.nftids + adap->tids.nsftids)) {
- idx = nidx;
- ret = TCB_COOKIE_G(rpl->cookie);
- f = &adap->tids.ftid_tab[idx];
-
- if (ret == FW_FILTER_WR_FLT_DELETED) {
- /* Clear the filter when we get confirmation from the
- * hardware that the filter has been deleted.
- */
- clear_filter(adap, f);
- } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
- dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
- idx);
- clear_filter(adap, f);
- } else if (ret == FW_FILTER_WR_FLT_ADDED) {
- f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
- f->pending = 0; /* asynchronous setup completed */
- f->valid = 1;
- } else {
- /* Something went wrong. Issue a warning about the
- * problem and clear everything out.
- */
- dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
- idx, ret);
- clear_filter(adap, f);
- }
- }
-}
-
/* Response queue handler for the FW event queue.
*/
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
@@ -677,56 +586,6 @@ out:
return 0;
}
-/* Flush the aggregated lro sessions */
-static void uldrx_flush_handler(struct sge_rspq *q)
-{
- if (ulds[q->uld].lro_flush)
- ulds[q->uld].lro_flush(&q->lro_mgr);
-}
-
-/**
- * uldrx_handler - response queue handler for ULD queues
- * @q: the response queue that received the packet
- * @rsp: the response queue descriptor holding the offload message
- * @gl: the gather list of packet fragments
- *
- * Deliver an ingress offload packet to a ULD. All processing is done by
- * the ULD, we just maintain statistics.
- */
-static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
- const struct pkt_gl *gl)
-{
- struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
- int ret;
-
- /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
- */
- if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
- ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
- rsp += 2;
-
- if (q->flush_handler)
- ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
- rsp, gl, &q->lro_mgr,
- &q->napi);
- else
- ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
- rsp, gl);
-
- if (ret) {
- rxq->stats.nomem++;
- return -1;
- }
-
- if (gl == NULL)
- rxq->stats.imm++;
- else if (gl == CXGB4_MSG_AN)
- rxq->stats.an++;
- else
- rxq->stats.pkts++;
- return 0;
-}
-
static void disable_msi(struct adapter *adapter)
{
if (adapter->flags & USING_MSIX) {
@@ -778,30 +637,12 @@ static void name_msix_vecs(struct adapter *adap)
snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
d->name, i);
}
-
- /* offload queues */
- for_each_iscsirxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
- adap->port[0]->name, i);
-
- for_each_iscsitrxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
- adap->port[0]->name, i);
-
- for_each_rdmarxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
- adap->port[0]->name, i);
-
- for_each_rdmaciq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
- adap->port[0]->name, i);
}
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
- int iscsitqidx = 0;
+ int err, ethqidx;
int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
@@ -818,57 +659,9 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
- for_each_iscsirxq(s, iscsiqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->iscsirxq[iscsiqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_iscsitrxq(s, iscsitqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->iscsitrxq[iscsitqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_rdmarxq(s, rdmaqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->rdmarxq[rdmaqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_rdmaciq(s, rdmaciqqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->rdmaciq[rdmaciqqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
return 0;
unwind:
- while (--rdmaciqqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->rdmaciq[rdmaciqqidx].rspq);
- while (--rdmaqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->rdmarxq[rdmaqidx].rspq);
- while (--iscsitqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->iscsitrxq[iscsitqidx].rspq);
- while (--iscsiqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->iscsirxq[iscsiqidx].rspq);
while (--ethqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->ethrxq[ethqidx].rspq);
@@ -884,16 +677,6 @@ static void free_msix_queue_irqs(struct adapter *adap)
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
for_each_ethrxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
- for_each_iscsirxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec,
- &s->iscsirxq[i].rspq);
- for_each_iscsitrxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec,
- &s->iscsitrxq[i].rspq);
- for_each_rdmarxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
- for_each_rdmaciq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
}
/**
@@ -1032,28 +815,30 @@ static void enable_rx(struct adapter *adap)
}
}
-static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
- unsigned int nq, unsigned int per_chan, int msi_idx,
- u16 *ids, bool lro)
+
+static int setup_fw_sge_queues(struct adapter *adap)
{
- int i, err;
+ struct sge *s = &adap->sge;
+ int err = 0;
+
+ bitmap_zero(s->starving_fl, s->egr_sz);
+ bitmap_zero(s->txq_maperr, s->egr_sz);
- for (i = 0; i < nq; i++, q++) {
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false,
- adap->port[i / per_chan],
- msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler,
- lro ? uldrx_flush_handler : NULL,
- 0);
+ if (adap->flags & USING_MSIX)
+ adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
+ else {
+ err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
+ NULL, NULL, NULL, -1);
if (err)
return err;
- memset(&q->stats, 0, sizeof(q->stats));
- if (ids)
- ids[i] = q->rspq.abs_id;
+ adap->msi_idx = -((int)s->intrq.abs_id + 1);
}
- return 0;
+
+ err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
+ adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
+ if (err)
+ t4_free_sge_resources(adap);
+ return err;
}
/**
@@ -1066,41 +851,10 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
*/
static int setup_sge_queues(struct adapter *adap)
{
- int err, msi_idx, i, j;
+ int err, i, j;
struct sge *s = &adap->sge;
-
- bitmap_zero(s->starving_fl, s->egr_sz);
- bitmap_zero(s->txq_maperr, s->egr_sz);
-
- if (adap->flags & USING_MSIX)
- msi_idx = 1; /* vector 0 is for non-queue interrupts */
- else {
- err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
- NULL, NULL, NULL, -1);
- if (err)
- return err;
- msi_idx = -((int)s->intrq.abs_id + 1);
- }
-
- /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
- * don't forget to update the following which need to be
- * synchronized to and changes here.
- *
- * 1. The calculations of MAX_INGQ in cxgb4.h.
- *
- * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
- * to accommodate any new/deleted Ingress Queues
- * which need MSI-X Vectors.
- *
- * 3. Update sge_qinfo_show() to include information on the
- * new/deleted queues.
- */
- err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
- msi_idx, NULL, fwevtq_handler, NULL, -1);
- if (err) {
-freeout: t4_free_sge_resources(adap);
- return err;
- }
+ struct sge_uld_rxq_info *rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ unsigned int cmplqid = 0;
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
@@ -1109,10 +863,10 @@ freeout: t4_free_sge_resources(adap);
struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
for (j = 0; j < pi->nqsets; j++, q++) {
- if (msi_idx > 0)
- msi_idx++;
+ if (adap->msi_idx > 0)
+ adap->msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
- msi_idx, &q->fl,
+ adap->msi_idx, &q->fl,
t4_ethrx_handler,
NULL,
t4_get_mps_bg_map(adap,
@@ -1131,8 +885,8 @@ freeout: t4_free_sge_resources(adap);
}
}
- j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */
- for_each_iscsirxq(s, i) {
+ j = s->ofldqsets / adap->params.nports; /* iscsi queues per channel */
+ for_each_ofldtxq(s, i) {
err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
adap->port[i / j],
s->fw_evtq.cntxt_id);
@@ -1140,30 +894,15 @@ freeout: t4_free_sge_resources(adap);
goto freeout;
}
-#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
- err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \
- if (err) \
- goto freeout; \
- if (msi_idx > 0) \
- msi_idx += nq; \
-} while (0)
-
- ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
- ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
- ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
- j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
- ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
-
-#undef ALLOC_OFLD_RXQS
-
for_each_port(adap, i) {
- /*
- * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
+ /* Note that cmplqid below is 0 if we don't
* have RDMA queues, and that's the right value.
*/
+ if (rxq_info)
+ cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
+
err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
- s->fw_evtq.cntxt_id,
- s->rdmarxq[i].rspq.cntxt_id);
+ s->fw_evtq.cntxt_id, cmplqid);
if (err)
goto freeout;
}
@@ -1174,6 +913,9 @@ freeout: t4_free_sge_resources(adap);
RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
return 0;
+freeout:
+ t4_free_sge_resources(adap);
+ return err;
}
/*
@@ -1197,151 +939,6 @@ void t4_free_mem(void *addr)
kvfree(addr);
}
-/* Send a Work Request to write the filter at a specified index. We construct
- * a Firmware Filter Work Request to have the work done and put the indicated
- * filter into "pending" mode which will prevent any further actions against
- * it till we get a reply from the firmware on the completion status of the
- * request.
- */
-static int set_filter_wr(struct adapter *adapter, int fidx)
-{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
- struct sk_buff *skb;
- struct fw_filter_wr *fwr;
- unsigned int ftid;
-
- skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- /* If the new filter requires loopback Destination MAC and/or VLAN
- * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
- * the filter.
- */
- if (f->fs.newdmac || f->fs.newvlan) {
- /* allocate L2T entry for new filter */
- f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
- f->fs.eport, f->fs.dmac);
- if (f->l2t == NULL) {
- kfree_skb(skb);
- return -ENOMEM;
- }
- }
-
- ftid = adapter->tids.ftid_base + fidx;
-
- fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
- memset(fwr, 0, sizeof(*fwr));
-
- /* It would be nice to put most of the following in t4_hw.c but most
- * of the work is translating the cxgbtool ch_filter_specification
- * into the Work Request and the definition of that structure is
- * currently in cxgbtool.h which isn't appropriate to pull into the
- * common code. We may eventually try to come up with a more neutral
- * filter specification structure but for now it's easiest to simply
- * put this fairly direct code in line ...
- */
- fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
- fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
- fwr->tid_to_iq =
- htonl(FW_FILTER_WR_TID_V(ftid) |
- FW_FILTER_WR_RQTYPE_V(f->fs.type) |
- FW_FILTER_WR_NOREPLY_V(0) |
- FW_FILTER_WR_IQ_V(f->fs.iq));
- fwr->del_filter_to_l2tix =
- htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
- FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
- FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
- FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
- FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
- FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
- FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
- FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
- FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
- f->fs.newvlan == VLAN_REWRITE) |
- FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
- f->fs.newvlan == VLAN_REWRITE) |
- FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
- FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
- FW_FILTER_WR_PRIO_V(f->fs.prio) |
- FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
- fwr->ethtype = htons(f->fs.val.ethtype);
- fwr->ethtypem = htons(f->fs.mask.ethtype);
- fwr->frag_to_ovlan_vldm =
- (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
- FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
- FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
- FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
- FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
- FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
- fwr->smac_sel = 0;
- fwr->rx_chan_rx_rpl_iq =
- htons(FW_FILTER_WR_RX_CHAN_V(0) |
- FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
- fwr->maci_to_matchtypem =
- htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
- FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
- FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
- FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
- FW_FILTER_WR_PORT_V(f->fs.val.iport) |
- FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
- FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
- FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
- fwr->ptcl = f->fs.val.proto;
- fwr->ptclm = f->fs.mask.proto;
- fwr->ttyp = f->fs.val.tos;
- fwr->ttypm = f->fs.mask.tos;
- fwr->ivlan = htons(f->fs.val.ivlan);
- fwr->ivlanm = htons(f->fs.mask.ivlan);
- fwr->ovlan = htons(f->fs.val.ovlan);
- fwr->ovlanm = htons(f->fs.mask.ovlan);
- memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
- memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
- memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
- memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
- fwr->lp = htons(f->fs.val.lport);
- fwr->lpm = htons(f->fs.mask.lport);
- fwr->fp = htons(f->fs.val.fport);
- fwr->fpm = htons(f->fs.mask.fport);
- if (f->fs.newsmac)
- memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
-
- /* Mark the filter as "pending" and ship off the Filter Work Request.
- * When we get the Work Request Reply we'll clear the pending status.
- */
- f->pending = 1;
- set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
- t4_ofld_send(adapter, skb);
- return 0;
-}
-
-/* Delete the filter at a specified index.
- */
-static int del_filter_wr(struct adapter *adapter, int fidx)
-{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
- struct sk_buff *skb;
- struct fw_filter_wr *fwr;
- unsigned int len, ftid;
-
- len = sizeof(*fwr);
- ftid = adapter->tids.ftid_base + fidx;
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- fwr = (struct fw_filter_wr *)__skb_put(skb, len);
- t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
-
- /* Mark the filter as "pending" and ship off the Filter Work Request.
- * When we get the Work Request Reply we'll clear the pending status.
- */
- f->pending = 1;
- t4_mgmt_tx(adapter, skb);
- return 0;
-}
-
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -1723,19 +1320,22 @@ EXPORT_SYMBOL(cxgb4_remove_tid);
*/
static int tid_init(struct tid_info *t)
{
- size_t size;
- unsigned int stid_bmap_size;
- unsigned int natids = t->natids;
struct adapter *adap = container_of(t, struct adapter, tids);
+ unsigned int max_ftids = t->nftids + t->nsftids;
+ unsigned int natids = t->natids;
+ unsigned int stid_bmap_size;
+ unsigned int ftid_bmap_size;
+ size_t size;
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
+ ftid_bmap_size = BITS_TO_LONGS(t->nftids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
- t->nftids * sizeof(*t->ftid_tab) +
- t->nsftids * sizeof(*t->ftid_tab);
+ max_ftids * sizeof(*t->ftid_tab) +
+ ftid_bmap_size * sizeof(long);
t->tid_tab = t4_alloc_mem(size);
if (!t->tid_tab)
@@ -1745,8 +1345,10 @@ static int tid_init(struct tid_info *t)
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
+ t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
+ spin_lock_init(&t->ftid_lock);
t->stids_in_use = 0;
t->sftids_in_use = 0;
@@ -1761,12 +1363,16 @@ static int tid_init(struct tid_info *t)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
- bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
- /* Reserve stid 0 for T4/T5 adapters */
- if (!t->stid_base &&
- (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
- __set_bit(0, t->stid_bmap);
+ if (is_offload(adap)) {
+ bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
+ /* Reserve stid 0 for T4/T5 adapters */
+ if (!t->stid_base &&
+ CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ __set_bit(0, t->stid_bmap);
+ }
+
+ bitmap_zero(t->ftid_bmap, t->nftids);
return 0;
}
@@ -2316,7 +1922,7 @@ static void disable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
disable_txq_db(&adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
disable_txq_db(&adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
disable_txq_db(&adap->sge.ctrlq[i].q);
@@ -2328,7 +1934,7 @@ static void enable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
@@ -2336,9 +1942,10 @@ static void enable_dbs(struct adapter *adap)
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
{
- if (adap->uld_handle[CXGB4_ULD_RDMA])
- ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
- cmd);
+ enum cxgb4_uld type = CXGB4_ULD_RDMA;
+
+ if (adap->uld && adap->uld[type].handle)
+ adap->uld[type].control(adap->uld[type].handle, cmd);
}
static void process_db_full(struct work_struct *work)
@@ -2392,13 +1999,14 @@ out:
if (ret)
CH_WARN(adap, "DB drop recovery failed.\n");
}
+
static void recover_all_queues(struct adapter *adap)
{
int i;
for_each_ethrxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
@@ -2463,94 +2071,12 @@ void t4_db_dropped(struct adapter *adap)
queue_work(adap->workq, &adap->db_drop_task);
}
-static void uld_attach(struct adapter *adap, unsigned int uld)
-{
- void *handle;
- struct cxgb4_lld_info lli;
- unsigned short i;
-
- lli.pdev = adap->pdev;
- lli.pf = adap->pf;
- lli.l2t = adap->l2t;
- lli.tids = &adap->tids;
- lli.ports = adap->port;
- lli.vr = &adap->vres;
- lli.mtus = adap->params.mtus;
- if (uld == CXGB4_ULD_RDMA) {
- lli.rxq_ids = adap->sge.rdma_rxq;
- lli.ciq_ids = adap->sge.rdma_ciq;
- lli.nrxq = adap->sge.rdmaqs;
- lli.nciq = adap->sge.rdmaciqs;
- } else if (uld == CXGB4_ULD_ISCSI) {
- lli.rxq_ids = adap->sge.iscsi_rxq;
- lli.nrxq = adap->sge.iscsiqsets;
- } else if (uld == CXGB4_ULD_ISCSIT) {
- lli.rxq_ids = adap->sge.iscsit_rxq;
- lli.nrxq = adap->sge.niscsitq;
- }
- lli.ntxq = adap->sge.iscsiqsets;
- lli.nchan = adap->params.nports;
- lli.nports = adap->params.nports;
- lli.wr_cred = adap->params.ofldq_wr_cred;
- lli.adapter_type = adap->params.chip;
- lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
- lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
- lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
- lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
- lli.iscsi_ppm = &adap->iscsi_ppm;
- lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
- lli.udb_density = 1 << adap->params.sge.eq_qpp;
- lli.ucq_density = 1 << adap->params.sge.iq_qpp;
- lli.filt_mode = adap->params.tp.vlan_pri_map;
- /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
- for (i = 0; i < NCHAN; i++)
- lli.tx_modq[i] = i;
- lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
- lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
- lli.fw_vers = adap->params.fw_vers;
- lli.dbfifo_int_thresh = dbfifo_int_thresh;
- lli.sge_ingpadboundary = adap->sge.fl_align;
- lli.sge_egrstatuspagesize = adap->sge.stat_len;
- lli.sge_pktshift = adap->sge.pktshift;
- lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
- lli.max_ordird_qp = adap->params.max_ordird_qp;
- lli.max_ird_adapter = adap->params.max_ird_adapter;
- lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
- lli.nodeid = dev_to_node(adap->pdev_dev);
-
- handle = ulds[uld].add(&lli);
- if (IS_ERR(handle)) {
- dev_warn(adap->pdev_dev,
- "could not attach to the %s driver, error %ld\n",
- uld_str[uld], PTR_ERR(handle));
- return;
- }
-
- adap->uld_handle[uld] = handle;
-
+void t4_register_netevent_notifier(void)
+{
if (!netevent_registered) {
register_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = true;
}
-
- if (adap->flags & FULL_INIT_DONE)
- ulds[uld].state_change(handle, CXGB4_STATE_UP);
-}
-
-static void attach_ulds(struct adapter *adap)
-{
- unsigned int i;
-
- spin_lock(&adap_rcu_lock);
- list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
- spin_unlock(&adap_rcu_lock);
-
- mutex_lock(&uld_mutex);
- list_add_tail(&adap->list_node, &adapter_list);
- for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (ulds[i].add)
- uld_attach(adap, i);
- mutex_unlock(&uld_mutex);
}
static void detach_ulds(struct adapter *adap)
@@ -2560,20 +2086,16 @@ static void detach_ulds(struct adapter *adap)
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (adap->uld_handle[i]) {
- ulds[i].state_change(adap->uld_handle[i],
+ if (adap->uld && adap->uld[i].handle) {
+ adap->uld[i].state_change(adap->uld[i].handle,
CXGB4_STATE_DETACH);
- adap->uld_handle[i] = NULL;
+ adap->uld[i].handle = NULL;
}
if (netevent_registered && list_empty(&adapter_list)) {
unregister_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = false;
}
mutex_unlock(&uld_mutex);
-
- spin_lock(&adap_rcu_lock);
- list_del_rcu(&adap->rcu_node);
- spin_unlock(&adap_rcu_lock);
}
static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
@@ -2582,60 +2104,11 @@ static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (adap->uld_handle[i])
- ulds[i].state_change(adap->uld_handle[i], new_state);
- mutex_unlock(&uld_mutex);
-}
-
-/**
- * cxgb4_register_uld - register an upper-layer driver
- * @type: the ULD type
- * @p: the ULD methods
- *
- * Registers an upper-layer driver with this driver and notifies the ULD
- * about any presently available devices that support its type. Returns
- * %-EBUSY if a ULD of the same type is already registered.
- */
-int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
-{
- int ret = 0;
- struct adapter *adap;
-
- if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
- mutex_lock(&uld_mutex);
- if (ulds[type].add) {
- ret = -EBUSY;
- goto out;
- }
- ulds[type] = *p;
- list_for_each_entry(adap, &adapter_list, list_node)
- uld_attach(adap, type);
-out: mutex_unlock(&uld_mutex);
- return ret;
-}
-EXPORT_SYMBOL(cxgb4_register_uld);
-
-/**
- * cxgb4_unregister_uld - unregister an upper-layer driver
- * @type: the ULD type
- *
- * Unregisters an existing upper-layer driver.
- */
-int cxgb4_unregister_uld(enum cxgb4_uld type)
-{
- struct adapter *adap;
-
- if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
- mutex_lock(&uld_mutex);
- list_for_each_entry(adap, &adapter_list, list_node)
- adap->uld_handle[type] = NULL;
- ulds[type].add = NULL;
+ if (adap->uld && adap->uld[i].handle)
+ adap->uld[i].state_change(adap->uld[i].handle,
+ new_state);
mutex_unlock(&uld_mutex);
- return 0;
}
-EXPORT_SYMBOL(cxgb4_unregister_uld);
#if IS_ENABLED(CONFIG_IPV6)
static int cxgb4_inet6addr_handler(struct notifier_block *this,
@@ -2741,7 +2214,6 @@ static int cxgb_up(struct adapter *adap)
adap->msix_info[0].desc, adap);
if (err)
goto irq_err;
-
err = request_msix_queue_irqs(adap);
if (err) {
free_irq(adap->msix_info[0].vec, adap);
@@ -2819,40 +2291,6 @@ static int cxgb_close(struct net_device *dev)
return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
}
-/* Return an error number if the indicated filter isn't writable ...
- */
-static int writable_filter(struct filter_entry *f)
-{
- if (f->locked)
- return -EPERM;
- if (f->pending)
- return -EBUSY;
-
- return 0;
-}
-
-/* Delete the filter at the specified index (if valid). The checks for all
- * the common problems with doing this like the filter being locked, currently
- * pending in another operation, etc.
- */
-static int delete_filter(struct adapter *adapter, unsigned int fidx)
-{
- struct filter_entry *f;
- int ret;
-
- if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
- return -EINVAL;
-
- f = &adapter->tids.ftid_tab[fidx];
- ret = writable_filter(f);
- if (ret)
- return ret;
- if (f->valid)
- return del_filter_wr(adapter, fidx);
-
- return 0;
-}
-
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan,
unsigned int queue, unsigned char port, unsigned char mask)
@@ -2922,7 +2360,6 @@ EXPORT_SYMBOL(cxgb4_create_server_filter);
int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
unsigned int queue, bool ipv6)
{
- int ret;
struct filter_entry *f;
struct adapter *adap;
@@ -2936,11 +2373,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
/* Unlock the filter */
f->locked = 0;
- ret = delete_filter(adap, stid);
- if (ret)
- return ret;
-
- return 0;
+ return delete_filter(adap, stid);
}
EXPORT_SYMBOL(cxgb4_remove_server_filter);
@@ -3078,6 +2511,85 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
+#ifdef CONFIG_PCI_IOV
+static int dummy_open(struct net_device *dev)
+{
+ /* Turn carrier off since we don't have to transmit anything on this
+ * interface.
+ */
+ netif_carrier_off(dev);
+ return 0;
+}
+
+/* Fill MAC address that will be assigned by the FW */
+static void fill_vf_station_mac_addr(struct adapter *adap)
+{
+ unsigned int i;
+ u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
+ int err;
+ u8 *na;
+ u16 a, b;
+
+ err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
+ if (!err) {
+ na = adap->params.vpd.na;
+ for (i = 0; i < ETH_ALEN; i++)
+ hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+ hex2val(na[2 * i + 1]));
+ a = (hw_addr[0] << 8) | hw_addr[1];
+ b = (hw_addr[1] << 8) | hw_addr[2];
+ a ^= b;
+ a |= 0x0200; /* locally assigned Ethernet MAC address */
+ a &= ~0x0100; /* not a multicast Ethernet MAC address */
+ macaddr[0] = a >> 8;
+ macaddr[1] = a & 0xff;
+
+ for (i = 2; i < 5; i++)
+ macaddr[i] = hw_addr[i + 1];
+
+ for (i = 0; i < adap->num_vfs; i++) {
+ macaddr[5] = adap->pf * 16 + i;
+ ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
+ }
+ }
+}
+
+static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ int ret;
+
+ /* verify MAC addr is valid */
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(pi->adapter->pdev_dev,
+ "Invalid Ethernet address %pM for VF %d\n",
+ mac, vf);
+ return -EINVAL;
+ }
+
+ dev_info(pi->adapter->pdev_dev,
+ "Setting MAC %pM on VF %d\n", mac, vf);
+ ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
+ if (!ret)
+ ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
+ return ret;
+}
+
+static int cxgb_get_vf_config(struct net_device *dev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ if (vf >= adap->num_vfs)
+ return -EINVAL;
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr);
+ return 0;
+}
+#endif
+
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{
int ret;
@@ -3114,6 +2626,116 @@ static void cxgb_netpoll(struct net_device *dev)
}
#endif
+static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ struct ch_sched_params p;
+ struct ch_sched_queue qe;
+ u32 req_rate;
+ int err = 0;
+
+ if (!can_sched(dev))
+ return -ENOTSUPP;
+
+ if (index < 0 || index > pi->nqsets - 1)
+ return -EINVAL;
+
+ if (!(adap->flags & FULL_INIT_DONE)) {
+ dev_err(adap->pdev_dev,
+ "Failed to rate limit on queue %d. Link Down?\n",
+ index);
+ return -EINVAL;
+ }
+
+ /* Convert from Mbps to Kbps */
+ req_rate = rate << 10;
+
+ /* Max rate is 10 Gbps */
+ if (req_rate >= SCHED_MAX_RATE_KBPS) {
+ dev_err(adap->pdev_dev,
+ "Invalid rate %u Mbps, Max rate is %u Gbps\n",
+ rate, SCHED_MAX_RATE_KBPS);
+ return -ERANGE;
+ }
+
+ /* First unbind the queue from any existing class */
+ memset(&qe, 0, sizeof(qe));
+ qe.queue = index;
+ qe.class = SCHED_CLS_NONE;
+
+ err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
+ if (err) {
+ dev_err(adap->pdev_dev,
+ "Unbinding Queue %d on port %d fail. Err: %d\n",
+ index, pi->port_id, err);
+ return err;
+ }
+
+ /* Queue already unbound */
+ if (!req_rate)
+ return 0;
+
+ /* Fetch any available unused or matching scheduling class */
+ memset(&p, 0, sizeof(p));
+ p.type = SCHED_CLASS_TYPE_PACKET;
+ p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
+ p.u.params.mode = SCHED_CLASS_MODE_CLASS;
+ p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
+ p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
+ p.u.params.channel = pi->tx_chan;
+ p.u.params.class = SCHED_CLS_NONE;
+ p.u.params.minrate = 0;
+ p.u.params.maxrate = req_rate;
+ p.u.params.weight = 0;
+ p.u.params.pktsize = dev->mtu;
+
+ e = cxgb4_sched_class_alloc(dev, &p);
+ if (!e)
+ return -ENOMEM;
+
+ /* Bind the queue to a scheduling class */
+ memset(&qe, 0, sizeof(qe));
+ qe.queue = index;
+ qe.class = e->idx;
+
+ err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
+ if (err)
+ dev_err(adap->pdev_dev,
+ "Queue rate limiting failed. Err: %d\n", err);
+ return err;
+}
+
+static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!(adap->flags & FULL_INIT_DONE)) {
+ dev_err(adap->pdev_dev,
+ "Failed to setup tc on port %d. Link Down?\n",
+ pi->port_id);
+ return -EINVAL;
+ }
+
+ if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
+ tc->type == TC_SETUP_CLSU32) {
+ switch (tc->cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return cxgb4_config_knode(dev, proto, tc->cls_u32);
+ case TC_CLSU32_DELETE_KNODE:
+ return cxgb4_delete_knode(dev, proto, tc->cls_u32);
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
@@ -3136,7 +2758,31 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = cxgb_busy_poll,
#endif
+ .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
+ .ndo_setup_tc = cxgb_setup_tc,
+};
+#ifdef CONFIG_PCI_IOV
+static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
+ .ndo_open = dummy_open,
+ .ndo_set_vf_mac = cxgb_set_vf_mac,
+ .ndo_get_vf_config = cxgb_get_vf_config,
+};
+#endif
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct adapter *adapter = netdev2adap(dev);
+
+ strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strlcpy(info->version, cxgb4_driver_version,
+ sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
+ .get_drvinfo = get_drvinfo,
};
void t4_fatal_err(struct adapter *adap)
@@ -3979,6 +3625,12 @@ static int adap_init0(struct adapter *adap)
adap->clipt_start = val[0];
adap->clipt_end = val[1];
+ /* We don't yet have a PARAMs calls to retrieve the number of Traffic
+ * Classes supported by the hardware/firmware so we hard code it here
+ * for now.
+ */
+ adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
+
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
@@ -4016,6 +3668,12 @@ static int adap_init0(struct adapter *adap)
adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
}
+ /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
+ params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
+
/*
* Get device capabilities so we can determine what resources we need
* to manage.
@@ -4067,6 +3725,7 @@ static int adap_init0(struct adapter *adap)
adap->params.ofldq_wr_cred = val[5];
adap->params.offload = 1;
+ adap->num_ofld_uld += 1;
}
if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
@@ -4119,6 +3778,7 @@ static int adap_init0(struct adapter *adap)
"max_ordird_qp %d max_ird_adapter %d\n",
adap->params.max_ordird_qp,
adap->params.max_ird_adapter);
+ adap->num_ofld_uld += 2;
}
if (caps_cmd.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
@@ -4129,6 +3789,13 @@ static int adap_init0(struct adapter *adap)
goto bye;
adap->vres.iscsi.start = val[0];
adap->vres.iscsi.size = val[1] - val[0] + 1;
+ /* LIO target and cxgb4i initiaitor */
+ adap->num_ofld_uld += 2;
+ }
+ if (caps_cmd.cryptocaps) {
+ /* Should query params here...TODO */
+ adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
+ adap->num_uld += 1;
}
#undef FW_PARAM_PFVF
#undef FW_PARAM_DEV
@@ -4318,16 +3985,6 @@ static inline bool is_x_10g_port(const struct link_config *lc)
return high_speeds != 0;
}
-static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
- unsigned int us, unsigned int cnt,
- unsigned int size, unsigned int iqe_size)
-{
- q->adap = adap;
- cxgb4_set_rspq_intr_params(q, us, cnt);
- q->iqe_len = iqe_size;
- q->size = size;
-}
-
/*
* Perform default configuration of DMA queues depending on the number and type
* of ports we found and the number of available CPUs. Most settings can be
@@ -4340,12 +3997,16 @@ static void cfg_queues(struct adapter *adap)
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
- int ciq_size;
/* Reduce memory usage in kdump environment, disable all offload.
*/
- if (is_kdump_kernel())
+ if (is_kdump_kernel()) {
adap->params.offload = 0;
+ adap->params.crypto = 0;
+ } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) {
+ adap->params.offload = 0;
+ adap->params.crypto = 0;
+ }
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
@@ -4389,33 +4050,18 @@ static void cfg_queues(struct adapter *adap)
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
- if (is_offload(adap)) {
+ if (is_uld(adap)) {
/*
* For offload we use 1 queue/channel if all ports are up to 1G,
* otherwise we divide all available queues amongst the channels
* capped by the number of available cores.
*/
if (n10g) {
- i = min_t(int, ARRAY_SIZE(s->iscsirxq),
- num_online_cpus());
- s->iscsiqsets = roundup(i, adap->params.nports);
- } else
- s->iscsiqsets = adap->params.nports;
- /* For RDMA one Rx queue per channel suffices */
- s->rdmaqs = adap->params.nports;
- /* Try and allow at least 1 CIQ per cpu rounding down
- * to the number of ports, with a minimum of 1 per port.
- * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
- * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
- * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
- */
- s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
- s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
- adap->params.nports;
- s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
-
- if (!is_t4(adap->params.chip))
- s->niscsitq = s->iscsiqsets;
+ i = num_online_cpus();
+ s->ofldqsets = roundup(i, adap->params.nports);
+ } else {
+ s->ofldqsets = adap->params.nports;
+ }
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -4434,47 +4080,8 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
s->ofldtxq[i].q.size = 1024;
- for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) {
- struct sge_ofld_rxq *r = &s->iscsirxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
- r->rspq.uld = CXGB4_ULD_ISCSI;
- r->fl.size = 72;
- }
-
- if (!is_t4(adap->params.chip)) {
- for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
- struct sge_ofld_rxq *r = &s->iscsitrxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
- r->rspq.uld = CXGB4_ULD_ISCSIT;
- r->fl.size = 72;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
- struct sge_ofld_rxq *r = &s->rdmarxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 511, 64);
- r->rspq.uld = CXGB4_ULD_RDMA;
- r->fl.size = 72;
- }
-
- ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
- if (ciq_size > SGE_MAX_IQ_SIZE) {
- CH_WARN(adap, "CIQ size too small for available IQs\n");
- ciq_size = SGE_MAX_IQ_SIZE;
- }
-
- for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
- struct sge_ofld_rxq *r = &s->rdmaciq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
- r->rspq.uld = CXGB4_ULD_RDMA;
- }
-
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
- init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
+ init_rspq(adap, &s->intrq, 0, 1, 512, 64);
}
/*
@@ -4505,42 +4112,90 @@ static void reduce_ethqs(struct adapter *adap, int n)
}
}
+static int get_msix_info(struct adapter *adap)
+{
+ struct uld_msix_info *msix_info;
+ unsigned int max_ingq = 0;
+
+ if (is_offload(adap))
+ max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
+ if (is_pci_uld(adap))
+ max_ingq += MAX_OFLD_QSETS * adap->num_uld;
+
+ if (!max_ingq)
+ goto out;
+
+ msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
+ if (!msix_info)
+ return -ENOMEM;
+
+ adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->msix_bmap_ulds.msix_bmap) {
+ kfree(msix_info);
+ return -ENOMEM;
+ }
+ spin_lock_init(&adap->msix_bmap_ulds.lock);
+ adap->msix_info_ulds = msix_info;
+out:
+ return 0;
+}
+
+static void free_msix_info(struct adapter *adap)
+{
+ if (!(adap->num_uld && adap->num_ofld_uld))
+ return;
+
+ kfree(adap->msix_info_ulds);
+ kfree(adap->msix_bmap_ulds.msix_bmap);
+}
+
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
#define EXTRA_VECS 2
static int enable_msix(struct adapter *adap)
{
- int ofld_need = 0;
- int i, want, need, allocated;
+ int ofld_need = 0, uld_need = 0;
+ int i, j, want, need, allocated;
struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports;
struct msix_entry *entries;
+ int max_ingq = MAX_INGQ;
- entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
+ if (is_pci_uld(adap))
+ max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
+ if (is_offload(adap))
+ max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
+ entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
- for (i = 0; i < MAX_INGQ + 1; ++i)
+ /* map for msix */
+ if (get_msix_info(adap)) {
+ adap->params.offload = 0;
+ adap->params.crypto = 0;
+ }
+
+ for (i = 0; i < max_ingq + 1; ++i)
entries[i].entry = i;
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
- s->niscsitq;
- /* need nchan for each possible ULD */
- if (is_t4(adap->params.chip))
- ofld_need = 3 * nchan;
- else
- ofld_need = 4 * nchan;
+ want += adap->num_ofld_uld * s->ofldqsets;
+ ofld_need = adap->num_ofld_uld * nchan;
+ }
+ if (is_pci_uld(adap)) {
+ want += adap->num_uld * s->ofldqsets;
+ uld_need = adap->num_uld * nchan;
}
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
* each port.
*/
- need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
+ need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
#else
- need = adap->params.nports + EXTRA_VECS + ofld_need;
+ need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
#endif
allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
if (allocated < 0) {
@@ -4554,33 +4209,31 @@ static int enable_msix(struct adapter *adap)
* Every group gets its minimum requirement and NIC gets top
* priority for leftovers.
*/
- i = allocated - EXTRA_VECS - ofld_need;
+ i = allocated - EXTRA_VECS - ofld_need - uld_need;
if (i < s->max_ethqsets) {
s->max_ethqsets = i;
if (i < s->ethqsets)
reduce_ethqs(adap, i);
}
- if (is_offload(adap)) {
- if (allocated < want) {
- s->rdmaqs = nchan;
- s->rdmaciqs = nchan;
+ if (is_uld(adap)) {
+ if (allocated < want)
+ s->nqs_per_uld = nchan;
+ else
+ s->nqs_per_uld = s->ofldqsets;
+ }
- if (!is_t4(adap->params.chip))
- s->niscsitq = nchan;
+ for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ if (is_uld(adap)) {
+ for (j = 0 ; i < allocated; ++i, j++) {
+ adap->msix_info_ulds[j].vec = entries[i].vector;
+ adap->msix_info_ulds[j].idx = i;
}
-
- /* leftovers go to OFLD */
- i = allocated - EXTRA_VECS - s->max_ethqsets -
- s->rdmaqs - s->rdmaciqs - s->niscsitq;
- s->iscsiqsets = (i / nchan) * nchan; /* round down */
-
+ adap->msix_bmap_ulds.mapsize = j;
}
- for (i = 0; i < allocated; ++i)
- adap->msix_info[i].vec = entries[i].vector;
dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
- "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
- allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
- s->rdmaciqs);
+ "nic %d per uld %d\n",
+ allocated, s->max_ethqsets, s->nqs_per_uld);
kfree(entries);
return 0;
@@ -4794,7 +4447,9 @@ static void free_some_resources(struct adapter *adapter)
unsigned int i;
t4_free_mem(adapter->l2t);
+ t4_cleanup_sched(adapter);
t4_free_mem(adapter->tids.tid_tab);
+ cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
kfree(adapter->sge.starving_fl);
@@ -4845,21 +4500,59 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
}
#ifdef CONFIG_PCI_IOV
+static void dummy_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_NONE;
+ dev->mtu = 0;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 0;
+ dev->flags |= IFF_NOARP;
+ dev->priv_flags |= IFF_NO_QUEUE;
+
+ /* Initialize the device structure. */
+ dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
+ dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
+ dev->destructor = free_netdev;
+}
+
+static int config_mgmt_dev(struct pci_dev *pdev)
+{
+ struct adapter *adap = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+ struct port_info *pi;
+ char name[IFNAMSIZ];
+ int err;
+
+ snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
+ netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup);
+ if (!netdev)
+ return -ENOMEM;
+
+ pi = netdev_priv(netdev);
+ pi->adapter = adap;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adap->port[0] = netdev;
+
+ err = register_netdev(adap->port[0]);
+ if (err) {
+ pr_info("Unable to register VF mgmt netdev %s\n", name);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ return err;
+ }
+ return 0;
+}
+
static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
{
+ struct adapter *adap = pci_get_drvdata(pdev);
int err = 0;
int current_vfs = pci_num_vf(pdev);
u32 pcie_fw;
- void __iomem *regs;
-
- regs = pci_ioremap_bar(pdev, 0);
- if (!regs) {
- dev_err(&pdev->dev, "cannot map device registers\n");
- return -ENOMEM;
- }
- pcie_fw = readl(regs + PCIE_FW_A);
- iounmap(regs);
+ pcie_fw = readl(adap->regs + PCIE_FW_A);
/* Check if cxgb4 is the MASTER and fw is initialized */
if (!(pcie_fw & PCIE_FW_INIT_F) ||
!(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
@@ -4886,6 +4579,14 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
*/
if (!num_vfs) {
pci_disable_sriov(pdev);
+ if (adap->port[0]) {
+ unregister_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ }
+ /* free VF resources */
+ kfree(adap->vfinfo);
+ adap->vfinfo = NULL;
+ adap->num_vfs = 0;
return num_vfs;
}
@@ -4893,7 +4594,17 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
err = pci_enable_sriov(pdev, num_vfs);
if (err)
return err;
+
+ adap->num_vfs = num_vfs;
+ err = config_mgmt_dev(pdev);
+ if (err)
+ return err;
}
+
+ adap->vfinfo = kcalloc(adap->num_vfs,
+ sizeof(struct vf_info), GFP_KERNEL);
+ if (adap->vfinfo)
+ fill_vf_station_mac_addr(adap);
return num_vfs;
}
#endif
@@ -4904,9 +4615,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
+ struct net_device *netdev;
void __iomem *regs;
u32 whoami, pl_rev;
enum chip_type chip;
+ static int adap_idx = 1;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4941,7 +4654,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
if (func != ent->driver_data) {
+#ifndef CONFIG_PCI_IOV
iounmap(regs);
+#endif
pci_disable_device(pdev);
pci_save_state(pdev); /* to restore SR-IOV later */
goto sriov;
@@ -4973,6 +4688,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto out_unmap_bar0;
}
+ adap_idx++;
adapter->workq = create_singlethread_workqueue("cxgb4");
if (!adapter->workq) {
@@ -5059,8 +4775,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
T6_STATMODE_V(0)));
for_each_port(adapter, i) {
- struct net_device *netdev;
-
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_ETH_QSETS);
if (!netdev) {
@@ -5080,7 +4794,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_TC;
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
@@ -5154,10 +4869,26 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
#endif
- if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
+
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
+ if (!pi->sched_tbl)
+ dev_warn(&pdev->dev,
+ "could not activate scheduling on port %d\n",
+ i);
+ }
+
+ if (tid_init(&adapter->tids) < 0) {
dev_warn(&pdev->dev, "could not allocate TID table, "
"continuing\n");
adapter->params.offload = 0;
+ } else {
+ adapter->tc_u32 = cxgb4_init_tc_u32(adapter,
+ CXGB4_MAX_LINK_HANDLE);
+ if (!adapter->tc_u32)
+ dev_warn(&pdev->dev,
+ "could not offload tc u32, continuing\n");
}
if (is_offload(adapter)) {
@@ -5179,8 +4910,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* See what interrupts we'll be using */
if (msi > 1 && enable_msix(adapter) == 0)
adapter->flags |= USING_MSIX;
- else if (msi > 0 && pci_enable_msi(pdev) == 0)
+ else if (msi > 0 && pci_enable_msi(pdev) == 0) {
adapter->flags |= USING_MSI;
+ if (msi > 1)
+ free_msix_info(adapter);
+ }
/* check for PCI Express bandwidth capabiltites */
cxgb4_check_pcie_caps(adapter);
@@ -5224,10 +4958,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1;
- if (is_offload(adapter))
- attach_ulds(adapter);
+ if (is_uld(adapter)) {
+ mutex_lock(&uld_mutex);
+ list_add_tail(&adapter->list_node, &adapter_list);
+ mutex_unlock(&uld_mutex);
+ }
print_adapter_info(adapter);
+ setup_fw_sge_queues(adapter);
+ return 0;
sriov:
#ifdef CONFIG_PCI_IOV
@@ -5241,11 +4980,48 @@ sriov:
"instantiated %u virtual functions\n",
num_vf[func]);
}
-#endif
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto free_pci_region;
+ }
+
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+ adapter->name = pci_name(pdev);
+ adapter->mbox = func;
+ adapter->pf = func;
+ adapter->regs = regs;
+ adapter->adap_idx = adap_idx;
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto free_adapter;
+ }
+ pci_set_drvdata(pdev, adapter);
+ return 0;
+
+ free_adapter:
+ kfree(adapter);
+ free_pci_region:
+ iounmap(regs);
+ pci_disable_sriov(pdev);
+ pci_release_regions(pdev);
+ return err;
+#else
return 0;
+#endif
out_free_dev:
free_some_resources(adapter);
+ if (adapter->flags & USING_MSIX)
+ free_msix_info(adapter);
+ if (adapter->num_uld || adapter->num_ofld_uld)
+ t4_uld_mem_free(adapter);
out_unmap_bar:
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
@@ -5269,12 +5045,12 @@ static void remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
-#ifdef CONFIG_PCI_IOV
- pci_disable_sriov(pdev);
-
-#endif
+ if (!adapter) {
+ pci_release_regions(pdev);
+ return;
+ }
- if (adapter) {
+ if (adapter->pf == 4) {
int i;
/* Tear down per-adapter Work Queue first since it can contain
@@ -5282,7 +5058,7 @@ static void remove_one(struct pci_dev *pdev)
*/
destroy_workqueue(adapter->workq);
- if (is_offload(adapter))
+ if (is_uld(adapter))
detach_ulds(adapter);
disable_interrupts(adapter);
@@ -5296,17 +5072,15 @@ static void remove_one(struct pci_dev *pdev)
/* If we allocated filters, free up state associated with any
* valid filters ...
*/
- if (adapter->tids.ftid_tab) {
- struct filter_entry *f = &adapter->tids.ftid_tab[0];
- for (i = 0; i < (adapter->tids.nftids +
- adapter->tids.nsftids); i++, f++)
- if (f->valid)
- clear_filter(adapter, f);
- }
+ clear_all_filters(adapter);
if (adapter->flags & FULL_INIT_DONE)
cxgb_down(adapter);
+ if (adapter->flags & USING_MSIX)
+ free_msix_info(adapter);
+ if (adapter->num_uld || adapter->num_ofld_uld)
+ t4_uld_mem_free(adapter);
free_some_resources(adapter);
#if IS_ENABLED(CONFIG_IPV6)
t4_cleanup_clip_tbl(adapter);
@@ -5323,8 +5097,64 @@ static void remove_one(struct pci_dev *pdev)
kfree(adapter->mbox_log);
synchronize_rcu();
kfree(adapter);
- } else
+ }
+#ifdef CONFIG_PCI_IOV
+ else {
+ if (adapter->port[0])
+ unregister_netdev(adapter->port[0]);
+ iounmap(adapter->regs);
+ kfree(adapter->vfinfo);
+ kfree(adapter);
+ pci_disable_sriov(pdev);
+ pci_release_regions(pdev);
+ }
+#endif
+}
+
+/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
+ * delivery. This is essentially a stripped down version of the PCI remove()
+ * function where we do the minimal amount of work necessary to shutdown any
+ * further activity.
+ */
+static void shutdown_one(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ /* As with remove_one() above (see extended comment), we only want do
+ * do cleanup on PCI Devices which went all the way through init_one()
+ * ...
+ */
+ if (!adapter) {
+ pci_release_regions(pdev);
+ return;
+ }
+
+ if (adapter->pf == 4) {
+ int i;
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_close(adapter->port[i]);
+
+ t4_uld_clean_up(adapter);
+ disable_interrupts(adapter);
+ disable_msi(adapter);
+
+ t4_sge_stop(adapter);
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, adapter->mbox);
+ }
+#ifdef CONFIG_PCI_IOV
+ else {
+ if (adapter->port[0])
+ unregister_netdev(adapter->port[0]);
+ iounmap(adapter->regs);
+ kfree(adapter->vfinfo);
+ kfree(adapter);
+ pci_disable_sriov(pdev);
pci_release_regions(pdev);
+ }
+#endif
}
static struct pci_driver cxgb4_driver = {
@@ -5332,7 +5162,7 @@ static struct pci_driver cxgb4_driver = {
.id_table = cxgb4_pci_tbl,
.probe = init_one,
.remove = remove_one,
- .shutdown = remove_one,
+ .shutdown = shutdown_one,
#ifdef CONFIG_PCI_IOV
.sriov_configure = cxgb4_iov_configure,
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
new file mode 100644
index 000000000000..49d2debb334e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -0,0 +1,483 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "cxgb4.h"
+#include "cxgb4_tc_u32_parse.h"
+#include "cxgb4_tc_u32.h"
+
+/* Fill ch_filter_specification with parsed match value/mask pair. */
+static int fill_match_fields(struct adapter *adap,
+ struct ch_filter_specification *fs,
+ struct tc_cls_u32_offload *cls,
+ const struct cxgb4_match_field *entry,
+ bool next_header)
+{
+ unsigned int i, j;
+ u32 val, mask;
+ int off, err;
+ bool found;
+
+ for (i = 0; i < cls->knode.sel->nkeys; i++) {
+ off = cls->knode.sel->keys[i].off;
+ val = cls->knode.sel->keys[i].val;
+ mask = cls->knode.sel->keys[i].mask;
+
+ if (next_header) {
+ /* For next headers, parse only keys with offmask */
+ if (!cls->knode.sel->keys[i].offmask)
+ continue;
+ } else {
+ /* For the remaining, parse only keys without offmask */
+ if (cls->knode.sel->keys[i].offmask)
+ continue;
+ }
+
+ found = false;
+
+ for (j = 0; entry[j].val; j++) {
+ if (off == entry[j].off) {
+ found = true;
+ err = entry[j].val(fs, val, mask);
+ if (err)
+ return err;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Fill ch_filter_specification with parsed action. */
+static int fill_action_fields(struct adapter *adap,
+ struct ch_filter_specification *fs,
+ struct tc_cls_u32_offload *cls)
+{
+ unsigned int num_actions = 0;
+ const struct tc_action *a;
+ struct tcf_exts *exts;
+ LIST_HEAD(actions);
+
+ exts = cls->knode.exts;
+ if (tc_no_actions(exts))
+ return -EINVAL;
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ /* Don't allow more than one action per rule. */
+ if (num_actions)
+ return -EINVAL;
+
+ /* Drop in hardware. */
+ if (is_tcf_gact_shot(a)) {
+ fs->action = FILTER_DROP;
+ num_actions++;
+ continue;
+ }
+
+ /* Re-direct to specified port in hardware. */
+ if (is_tcf_mirred_redirect(a)) {
+ struct net_device *n_dev;
+ unsigned int i, index;
+ bool found = false;
+
+ index = tcf_mirred_ifindex(a);
+ for_each_port(adap, i) {
+ n_dev = adap->port[i];
+ if (index == n_dev->ifindex) {
+ fs->action = FILTER_SWITCH;
+ fs->eport = i;
+ found = true;
+ break;
+ }
+ }
+
+ /* Interface doesn't belong to any port of
+ * the underlying hardware.
+ */
+ if (!found)
+ return -EINVAL;
+
+ num_actions++;
+ continue;
+ }
+
+ /* Un-supported action. */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls)
+{
+ const struct cxgb4_match_field *start, *link_start = NULL;
+ struct adapter *adapter = netdev2adap(dev);
+ struct ch_filter_specification fs;
+ struct cxgb4_tc_u32_table *t;
+ struct cxgb4_link *link;
+ unsigned int filter_id;
+ u32 uhtid, link_uhtid;
+ bool is_ipv6 = false;
+ int ret;
+
+ if (!can_tc_u32_offload(dev))
+ return -EOPNOTSUPP;
+
+ if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
+ return -EOPNOTSUPP;
+
+ /* Fetch the location to insert the filter. */
+ filter_id = cls->knode.handle & 0xFFFFF;
+
+ if (filter_id > adapter->tids.nftids) {
+ dev_err(adapter->pdev_dev,
+ "Location %d out of range for insertion. Max: %d\n",
+ filter_id, adapter->tids.nftids);
+ return -ERANGE;
+ }
+
+ t = adapter->tc_u32;
+ uhtid = TC_U32_USERHTID(cls->knode.handle);
+ link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
+
+ /* Ensure that uhtid is either root u32 (i.e. 0x800)
+ * or a a valid linked bucket.
+ */
+ if (uhtid != 0x800 && uhtid >= t->size)
+ return -EINVAL;
+
+ /* Ensure link handle uhtid is sane, if specified. */
+ if (link_uhtid >= t->size)
+ return -EINVAL;
+
+ memset(&fs, 0, sizeof(fs));
+
+ if (protocol == htons(ETH_P_IPV6)) {
+ start = cxgb4_ipv6_fields;
+ is_ipv6 = true;
+ } else {
+ start = cxgb4_ipv4_fields;
+ is_ipv6 = false;
+ }
+
+ if (uhtid != 0x800) {
+ /* Link must exist from root node before insertion. */
+ if (!t->table[uhtid - 1].link_handle)
+ return -EINVAL;
+
+ /* Link must have a valid supported next header. */
+ link_start = t->table[uhtid - 1].match_field;
+ if (!link_start)
+ return -EINVAL;
+ }
+
+ /* Parse links and record them for subsequent jumps to valid
+ * next headers.
+ */
+ if (link_uhtid) {
+ const struct cxgb4_next_header *next;
+ bool found = false;
+ unsigned int i, j;
+ u32 val, mask;
+ int off;
+
+ if (t->table[link_uhtid - 1].link_handle) {
+ dev_err(adapter->pdev_dev,
+ "Link handle exists for: 0x%x\n",
+ link_uhtid);
+ return -EINVAL;
+ }
+
+ next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
+
+ /* Try to find matches that allow jumps to next header. */
+ for (i = 0; next[i].jump; i++) {
+ if (next[i].offoff != cls->knode.sel->offoff ||
+ next[i].shift != cls->knode.sel->offshift ||
+ next[i].mask != cls->knode.sel->offmask ||
+ next[i].offset != cls->knode.sel->off)
+ continue;
+
+ /* Found a possible candidate. Find a key that
+ * matches the corresponding offset, value, and
+ * mask to jump to next header.
+ */
+ for (j = 0; j < cls->knode.sel->nkeys; j++) {
+ off = cls->knode.sel->keys[j].off;
+ val = cls->knode.sel->keys[j].val;
+ mask = cls->knode.sel->keys[j].mask;
+
+ if (next[i].match_off == off &&
+ next[i].match_val == val &&
+ next[i].match_mask == mask) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ continue; /* Try next candidate. */
+
+ /* Candidate to jump to next header found.
+ * Translate all keys to internal specification
+ * and store them in jump table. This spec is copied
+ * later to set the actual filters.
+ */
+ ret = fill_match_fields(adapter, &fs, cls,
+ start, false);
+ if (ret)
+ goto out;
+
+ link = &t->table[link_uhtid - 1];
+ link->match_field = next[i].jump;
+ link->link_handle = cls->knode.handle;
+ memcpy(&link->fs, &fs, sizeof(fs));
+ break;
+ }
+
+ /* No candidate found to jump to next header. */
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ /* Fill ch_filter_specification match fields to be shipped to hardware.
+ * Copy the linked spec (if any) first. And then update the spec as
+ * needed.
+ */
+ if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
+ /* Copy linked ch_filter_specification */
+ memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
+ ret = fill_match_fields(adapter, &fs, cls,
+ link_start, true);
+ if (ret)
+ goto out;
+ }
+
+ ret = fill_match_fields(adapter, &fs, cls, start, false);
+ if (ret)
+ goto out;
+
+ /* Fill ch_filter_specification action fields to be shipped to
+ * hardware.
+ */
+ ret = fill_action_fields(adapter, &fs, cls);
+ if (ret)
+ goto out;
+
+ /* The filter spec has been completely built from the info
+ * provided from u32. We now set some default fields in the
+ * spec for sanity.
+ */
+
+ /* Match only packets coming from the ingress port where this
+ * filter will be created.
+ */
+ fs.val.iport = netdev2pinfo(dev)->port_id;
+ fs.mask.iport = ~0;
+
+ /* Enable filter hit counts. */
+ fs.hitcnts = 1;
+
+ /* Set type of filter - IPv6 or IPv4 */
+ fs.type = is_ipv6 ? 1 : 0;
+
+ /* Set the filter */
+ ret = cxgb4_set_filter(dev, filter_id, &fs);
+ if (ret)
+ goto out;
+
+ /* If this is a linked bucket, then set the corresponding
+ * entry in the bitmap to mark it as belonging to this linked
+ * bucket.
+ */
+ if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
+ set_bit(filter_id, t->table[uhtid - 1].tid_map);
+
+out:
+ return ret;
+}
+
+int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ unsigned int filter_id, max_tids, i, j;
+ struct cxgb4_link *link = NULL;
+ struct cxgb4_tc_u32_table *t;
+ u32 handle, uhtid;
+ int ret;
+
+ if (!can_tc_u32_offload(dev))
+ return -EOPNOTSUPP;
+
+ /* Fetch the location to delete the filter. */
+ filter_id = cls->knode.handle & 0xFFFFF;
+
+ if (filter_id > adapter->tids.nftids) {
+ dev_err(adapter->pdev_dev,
+ "Location %d out of range for deletion. Max: %d\n",
+ filter_id, adapter->tids.nftids);
+ return -ERANGE;
+ }
+
+ t = adapter->tc_u32;
+ handle = cls->knode.handle;
+ uhtid = TC_U32_USERHTID(cls->knode.handle);
+
+ /* Ensure that uhtid is either root u32 (i.e. 0x800)
+ * or a a valid linked bucket.
+ */
+ if (uhtid != 0x800 && uhtid >= t->size)
+ return -EINVAL;
+
+ /* Delete the specified filter */
+ if (uhtid != 0x800) {
+ link = &t->table[uhtid - 1];
+ if (!link->link_handle)
+ return -EINVAL;
+
+ if (!test_bit(filter_id, link->tid_map))
+ return -EINVAL;
+ }
+
+ ret = cxgb4_del_filter(dev, filter_id);
+ if (ret)
+ goto out;
+
+ if (link)
+ clear_bit(filter_id, link->tid_map);
+
+ /* If a link is being deleted, then delete all filters
+ * associated with the link.
+ */
+ max_tids = adapter->tids.nftids;
+ for (i = 0; i < t->size; i++) {
+ link = &t->table[i];
+
+ if (link->link_handle == handle) {
+ for (j = 0; j < max_tids; j++) {
+ if (!test_bit(j, link->tid_map))
+ continue;
+
+ ret = __cxgb4_del_filter(dev, j, NULL);
+ if (ret)
+ goto out;
+
+ clear_bit(j, link->tid_map);
+ }
+
+ /* Clear the link state */
+ link->match_field = NULL;
+ link->link_handle = 0;
+ memset(&link->fs, 0, sizeof(link->fs));
+ break;
+ }
+ }
+
+out:
+ return ret;
+}
+
+void cxgb4_cleanup_tc_u32(struct adapter *adap)
+{
+ struct cxgb4_tc_u32_table *t;
+ unsigned int i;
+
+ if (!adap->tc_u32)
+ return;
+
+ /* Free up all allocated memory. */
+ t = adap->tc_u32;
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+
+ t4_free_mem(link->tid_map);
+ }
+ t4_free_mem(adap->tc_u32);
+}
+
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
+ unsigned int size)
+{
+ struct cxgb4_tc_u32_table *t;
+ unsigned int i;
+
+ if (!size)
+ return NULL;
+
+ t = t4_alloc_mem(sizeof(*t) +
+ (size * sizeof(struct cxgb4_link)));
+ if (!t)
+ return NULL;
+
+ t->size = size;
+
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+ unsigned int bmap_size;
+ unsigned int max_tids;
+
+ max_tids = adap->tids.nftids;
+ bmap_size = BITS_TO_LONGS(max_tids);
+ link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
+ if (!link->tid_map)
+ goto out_no_mem;
+ bitmap_zero(link->tid_map, max_tids);
+ }
+
+ return t;
+
+out_no_mem:
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+
+ if (link->tid_map)
+ t4_free_mem(link->tid_map);
+ }
+
+ if (t)
+ t4_free_mem(t);
+
+ return NULL;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
new file mode 100644
index 000000000000..6bdc885eff22
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
@@ -0,0 +1,57 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_TC_U32_H
+#define __CXGB4_TC_U32_H
+
+#include <net/pkt_cls.h>
+
+#define CXGB4_MAX_LINK_HANDLE 32
+
+static inline bool can_tc_u32_offload(struct net_device *dev)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ return (dev->features & NETIF_F_HW_TC) && adap->tc_u32 ? true : false;
+}
+
+int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls);
+int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls);
+
+void cxgb4_cleanup_tc_u32(struct adapter *adapter);
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
+ unsigned int size);
+#endif /* __CXGB4_TC_U32_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
new file mode 100644
index 000000000000..a4b99edcc339
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -0,0 +1,294 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_TC_U32_PARSE_H
+#define __CXGB4_TC_U32_PARSE_H
+
+struct cxgb4_match_field {
+ int off; /* Offset from the beginning of the header to match */
+ /* Fill the value/mask pair in the spec if matched */
+ int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
+};
+
+/* IPv4 match fields */
+static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.tos = (ntohl(val) >> 16) & 0x000000FF;
+ f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ u32 mask_val;
+ u8 frag_val;
+
+ frag_val = (ntohl(val) >> 13) & 0x00000007;
+ mask_val = ntohl(mask) & 0x0000FFFF;
+
+ if (frag_val == 0x1 && mask_val != 0x3FFF) { /* MF set */
+ f->val.frag = 1;
+ f->mask.frag = 1;
+ } else if (frag_val == 0x2 && mask_val != 0x3FFF) { /* DF set */
+ f->val.frag = 0;
+ f->mask.frag = 1;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.proto = (ntohl(val) >> 16) & 0x000000FF;
+ f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[0], &val, sizeof(u32));
+ memcpy(&f->mask.fip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[0], &val, sizeof(u32));
+ memcpy(&f->mask.lip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
+ { .off = 0, .val = cxgb4_fill_ipv4_tos },
+ { .off = 4, .val = cxgb4_fill_ipv4_frag },
+ { .off = 8, .val = cxgb4_fill_ipv4_proto },
+ { .off = 12, .val = cxgb4_fill_ipv4_src_ip },
+ { .off = 16, .val = cxgb4_fill_ipv4_dst_ip },
+ { .val = NULL }
+};
+
+/* IPv6 match fields */
+static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.tos = (ntohl(val) >> 20) & 0x000000FF;
+ f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.proto = (ntohl(val) >> 8) & 0x000000FF;
+ f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[0], &val, sizeof(u32));
+ memcpy(&f->mask.fip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[4], &val, sizeof(u32));
+ memcpy(&f->mask.fip[4], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[8], &val, sizeof(u32));
+ memcpy(&f->mask.fip[8], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[12], &val, sizeof(u32));
+ memcpy(&f->mask.fip[12], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[0], &val, sizeof(u32));
+ memcpy(&f->mask.lip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[4], &val, sizeof(u32));
+ memcpy(&f->mask.lip[4], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[8], &val, sizeof(u32));
+ memcpy(&f->mask.lip[8], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[12], &val, sizeof(u32));
+ memcpy(&f->mask.lip[12], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
+ { .off = 0, .val = cxgb4_fill_ipv6_tos },
+ { .off = 4, .val = cxgb4_fill_ipv6_proto },
+ { .off = 8, .val = cxgb4_fill_ipv6_src_ip0 },
+ { .off = 12, .val = cxgb4_fill_ipv6_src_ip1 },
+ { .off = 16, .val = cxgb4_fill_ipv6_src_ip2 },
+ { .off = 20, .val = cxgb4_fill_ipv6_src_ip3 },
+ { .off = 24, .val = cxgb4_fill_ipv6_dst_ip0 },
+ { .off = 28, .val = cxgb4_fill_ipv6_dst_ip1 },
+ { .off = 32, .val = cxgb4_fill_ipv6_dst_ip2 },
+ { .off = 36, .val = cxgb4_fill_ipv6_dst_ip3 },
+ { .val = NULL }
+};
+
+/* TCP/UDP match */
+static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.fport = ntohl(val) >> 16;
+ f->mask.fport = ntohl(mask) >> 16;
+ f->val.lport = ntohl(val) & 0x0000FFFF;
+ f->mask.lport = ntohl(mask) & 0x0000FFFF;
+
+ return 0;
+};
+
+static const struct cxgb4_match_field cxgb4_tcp_fields[] = {
+ { .off = 0, .val = cxgb4_fill_l4_ports },
+ { .val = NULL }
+};
+
+static const struct cxgb4_match_field cxgb4_udp_fields[] = {
+ { .off = 0, .val = cxgb4_fill_l4_ports },
+ { .val = NULL }
+};
+
+struct cxgb4_next_header {
+ unsigned int offset; /* Offset to next header */
+ /* offset, shift, and mask added to offset above
+ * to get to next header. Useful when using a header
+ * field's value to jump to next header such as IHL field
+ * in IPv4 header.
+ */
+ unsigned int offoff;
+ u32 shift;
+ u32 mask;
+ /* match criteria to make this jump */
+ unsigned int match_off;
+ u32 match_val;
+ u32 match_mask;
+ /* location of jump to make */
+ const struct cxgb4_match_field *jump;
+};
+
+/* Accept a rule with a jump to transport layer header based on IHL field in
+ * IPv4 header.
+ */
+static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
+ { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
+ .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
+ .jump = cxgb4_tcp_fields },
+ { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
+ .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
+ .jump = cxgb4_udp_fields },
+ { .jump = NULL }
+};
+
+/* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
+ * to get to transport layer header.
+ */
+static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
+ { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
+ .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
+ .jump = cxgb4_tcp_fields },
+ { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
+ .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
+ .jump = cxgb4_udp_fields },
+ { .jump = NULL }
+};
+
+struct cxgb4_link {
+ const struct cxgb4_match_field *match_field; /* Next header */
+ struct ch_filter_specification fs; /* Match spec associated with link */
+ u32 link_handle; /* Knode handle associated with the link */
+ unsigned long *tid_map; /* Bitmap for filter tids */
+};
+
+struct cxgb4_tc_u32_table {
+ unsigned int size; /* number of entries in table */
+ struct cxgb4_link table[0]; /* Jump table */
+};
+#endif /* __CXGB4_TC_U32_PARSE_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
new file mode 100644
index 000000000000..0945fa49a5dd
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -0,0 +1,697 @@
+/*
+ * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written by: Atul Gupta (atul.gupta@chelsio.com)
+ * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "t4_regs.h"
+#include "t4fw_api.h"
+#include "t4_msg.h"
+
+#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
+
+static int get_msix_idx_from_bmap(struct adapter *adap)
+{
+ struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
+ unsigned long flags;
+ unsigned int msix_idx;
+
+ spin_lock_irqsave(&bmap->lock, flags);
+ msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
+ if (msix_idx < bmap->mapsize) {
+ __set_bit(msix_idx, bmap->msix_bmap);
+ } else {
+ spin_unlock_irqrestore(&bmap->lock, flags);
+ return -ENOSPC;
+ }
+
+ spin_unlock_irqrestore(&bmap->lock, flags);
+ return msix_idx;
+}
+
+static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
+{
+ struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bmap->lock, flags);
+ __clear_bit(msix_idx, bmap->msix_bmap);
+ spin_unlock_irqrestore(&bmap->lock, flags);
+}
+
+/* Flush the aggregated lro sessions */
+static void uldrx_flush_handler(struct sge_rspq *q)
+{
+ struct adapter *adap = q->adap;
+
+ if (adap->uld[q->uld].lro_flush)
+ adap->uld[q->uld].lro_flush(&q->lro_mgr);
+}
+
+/**
+ * uldrx_handler - response queue handler for ULD queues
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the offload message
+ * @gl: the gather list of packet fragments
+ *
+ * Deliver an ingress offload packet to a ULD. All processing is done by
+ * the ULD, we just maintain statistics.
+ */
+static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct adapter *adap = q->adap;
+ struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+ int ret;
+
+ /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
+ if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
+ ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
+ rsp += 2;
+
+ if (q->flush_handler)
+ ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
+ rsp, gl, &q->lro_mgr,
+ &q->napi);
+ else
+ ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
+ rsp, gl);
+
+ if (ret) {
+ rxq->stats.nomem++;
+ return -1;
+ }
+
+ if (!gl)
+ rxq->stats.imm++;
+ else if (gl == CXGB4_MSG_AN)
+ rxq->stats.an++;
+ else
+ rxq->stats.pkts++;
+ return 0;
+}
+
+static int alloc_uld_rxqs(struct adapter *adap,
+ struct sge_uld_rxq_info *rxq_info,
+ unsigned int nq, unsigned int offset, bool lro)
+{
+ struct sge *s = &adap->sge;
+ struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
+ unsigned short *ids = rxq_info->rspq_id + offset;
+ unsigned int per_chan = nq / adap->params.nports;
+ unsigned int bmap_idx = 0;
+ int i, err, msi_idx;
+
+ if (adap->flags & USING_MSIX)
+ msi_idx = 1;
+ else
+ msi_idx = -((int)s->intrq.abs_id + 1);
+
+ for (i = 0; i < nq; i++, q++) {
+ if (msi_idx >= 0) {
+ bmap_idx = get_msix_idx_from_bmap(adap);
+ msi_idx = adap->msix_info_ulds[bmap_idx].idx;
+ }
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false,
+ adap->port[i / per_chan],
+ msi_idx,
+ q->fl.size ? &q->fl : NULL,
+ uldrx_handler,
+ lro ? uldrx_flush_handler : NULL,
+ 0);
+ if (err)
+ goto freeout;
+ if (msi_idx >= 0)
+ rxq_info->msix_tbl[i + offset] = bmap_idx;
+ memset(&q->stats, 0, sizeof(q->stats));
+ if (ids)
+ ids[i] = q->rspq.abs_id;
+ }
+ return 0;
+freeout:
+ q = rxq_info->uldrxq + offset;
+ for ( ; i; i--, q++) {
+ if (q->rspq.desc)
+ free_rspq_fl(adap, &q->rspq,
+ q->fl.size ? &q->fl : NULL);
+ }
+
+ /* We need to free rxq also in case of ciq allocation failure */
+ if (offset) {
+ q = rxq_info->uldrxq + offset;
+ for ( ; i; i--, q++) {
+ if (q->rspq.desc)
+ free_rspq_fl(adap, &q->rspq,
+ q->fl.size ? &q->fl : NULL);
+ }
+ }
+ return err;
+}
+
+static int
+setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int i, ret = 0;
+
+ if (adap->flags & USING_MSIX) {
+ rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
+ sizeof(unsigned short),
+ GFP_KERNEL);
+ if (!rxq_info->msix_tbl)
+ return -ENOMEM;
+ }
+
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
+ !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
+ rxq_info->nrxq, lro));
+
+ /* Tell uP to route control queue completions to rdma rspq */
+ if (adap->flags & FULL_INIT_DONE &&
+ !ret && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ unsigned int cmplqid;
+ u32 param, cmdop;
+
+ cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
+ for_each_port(adap, i) {
+ cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(cmdop) |
+ FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
+ ret = t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &param, &cmplqid);
+ }
+ }
+ return ret;
+}
+
+static void t4_free_uld_rxqs(struct adapter *adap, int n,
+ struct sge_ofld_rxq *q)
+{
+ for ( ; n; n--, q++) {
+ if (q->rspq.desc)
+ free_rspq_fl(adap, &q->rspq,
+ q->fl.size ? &q->fl : NULL);
+ }
+}
+
+static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+
+ if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ u32 param, cmdop, cmplqid = 0;
+ int i;
+
+ cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
+ for_each_port(adap, i) {
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(cmdop) |
+ FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
+ t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &param, &cmplqid);
+ }
+ }
+
+ if (rxq_info->nciq)
+ t4_free_uld_rxqs(adap, rxq_info->nciq,
+ rxq_info->uldrxq + rxq_info->nrxq);
+ t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
+ if (adap->flags & USING_MSIX)
+ kfree(rxq_info->msix_tbl);
+}
+
+static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
+ const struct cxgb4_uld_info *uld_info)
+{
+ struct sge *s = &adap->sge;
+ struct sge_uld_rxq_info *rxq_info;
+ int i, nrxq, ciq_size;
+
+ rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
+ if (!rxq_info)
+ return -ENOMEM;
+
+ if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
+ i = s->nqs_per_uld;
+ rxq_info->nrxq = roundup(i, adap->params.nports);
+ } else {
+ i = min_t(int, uld_info->nrxq,
+ num_online_cpus());
+ rxq_info->nrxq = roundup(i, adap->params.nports);
+ }
+ if (!uld_info->ciq) {
+ rxq_info->nciq = 0;
+ } else {
+ if (adap->flags & USING_MSIX)
+ rxq_info->nciq = min_t(int, s->nqs_per_uld,
+ num_online_cpus());
+ else
+ rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
+ num_online_cpus());
+ rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
+ adap->params.nports);
+ rxq_info->nciq = max_t(int, rxq_info->nciq,
+ adap->params.nports);
+ }
+
+ nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
+ rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
+ GFP_KERNEL);
+ if (!rxq_info->uldrxq) {
+ kfree(rxq_info);
+ return -ENOMEM;
+ }
+
+ rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
+ if (!rxq_info->rspq_id) {
+ kfree(rxq_info->uldrxq);
+ kfree(rxq_info);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < rxq_info->nrxq; i++) {
+ struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
+
+ init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
+ r->rspq.uld = uld_type;
+ r->fl.size = 72;
+ }
+
+ ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
+ if (ciq_size > SGE_MAX_IQ_SIZE) {
+ dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
+ ciq_size = SGE_MAX_IQ_SIZE;
+ }
+
+ for (i = rxq_info->nrxq; i < nrxq; i++) {
+ struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
+
+ init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
+ r->rspq.uld = uld_type;
+ }
+
+ memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
+ adap->sge.uld_rxq_info[uld_type] = rxq_info;
+
+ return 0;
+}
+
+static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+
+ kfree(rxq_info->rspq_id);
+ kfree(rxq_info->uldrxq);
+ kfree(rxq_info);
+}
+
+static int
+request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int err = 0;
+ unsigned int idx, bmap_idx;
+
+ for_each_uldrxq(rxq_info, idx) {
+ bmap_idx = rxq_info->msix_tbl[idx];
+ err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info_ulds[bmap_idx].desc,
+ &rxq_info->uldrxq[idx].rspq);
+ if (err)
+ goto unwind;
+ }
+ return 0;
+unwind:
+ while (idx-- > 0) {
+ bmap_idx = rxq_info->msix_tbl[idx];
+ free_msix_idx_in_bmap(adap, bmap_idx);
+ free_irq(adap->msix_info_ulds[bmap_idx].vec,
+ &rxq_info->uldrxq[idx].rspq);
+ }
+ return err;
+}
+
+static void
+free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ unsigned int idx, bmap_idx;
+
+ for_each_uldrxq(rxq_info, idx) {
+ bmap_idx = rxq_info->msix_tbl[idx];
+
+ free_msix_idx_in_bmap(adap, bmap_idx);
+ free_irq(adap->msix_info_ulds[bmap_idx].vec,
+ &rxq_info->uldrxq[idx].rspq);
+ }
+}
+
+static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int n = sizeof(adap->msix_info_ulds[0].desc);
+ unsigned int idx, bmap_idx;
+
+ for_each_uldrxq(rxq_info, idx) {
+ bmap_idx = rxq_info->msix_tbl[idx];
+
+ snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
+ adap->port[0]->name, rxq_info->name, idx);
+ }
+}
+
+static void enable_rx(struct adapter *adap, struct sge_rspq *q)
+{
+ if (!q)
+ return;
+
+ if (q->handler) {
+ cxgb_busy_poll_init_lock(q);
+ napi_enable(&q->napi);
+ }
+ /* 0-increment GTS to start the timer and enable interrupts */
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+ SEINTARM_V(q->intr_params) |
+ INGRESSQID_V(q->cntxt_id));
+}
+
+static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
+{
+ if (q && q->handler) {
+ napi_disable(&q->napi);
+ local_bh_disable();
+ while (!cxgb_poll_lock_napi(q))
+ mdelay(1);
+ local_bh_enable();
+ }
+}
+
+static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int idx;
+
+ for_each_uldrxq(rxq_info, idx)
+ enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
+}
+
+static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int idx;
+
+ for_each_uldrxq(rxq_info, idx)
+ quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
+}
+
+static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
+ struct cxgb4_lld_info *lli)
+{
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+
+ lli->rxq_ids = rxq_info->rspq_id;
+ lli->nrxq = rxq_info->nrxq;
+ lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
+ lli->nciq = rxq_info->nciq;
+}
+
+int t4_uld_mem_alloc(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+
+ adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
+ if (!adap->uld)
+ return -ENOMEM;
+
+ s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
+ sizeof(struct sge_uld_rxq_info *),
+ GFP_KERNEL);
+ if (!s->uld_rxq_info)
+ goto err_uld;
+
+ return 0;
+err_uld:
+ kfree(adap->uld);
+ return -ENOMEM;
+}
+
+void t4_uld_mem_free(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+
+ kfree(s->uld_rxq_info);
+ kfree(adap->uld);
+}
+
+void t4_uld_clean_up(struct adapter *adap)
+{
+ struct sge_uld_rxq_info *rxq_info;
+ unsigned int i;
+
+ if (!adap->uld)
+ return;
+ for (i = 0; i < CXGB4_ULD_MAX; i++) {
+ if (!adap->uld[i].handle)
+ continue;
+ rxq_info = adap->sge.uld_rxq_info[i];
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, i);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, i);
+ free_sge_queues_uld(adap, i);
+ free_queues_uld(adap, i);
+ }
+}
+
+static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
+{
+ int i;
+
+ lld->pdev = adap->pdev;
+ lld->pf = adap->pf;
+ lld->l2t = adap->l2t;
+ lld->tids = &adap->tids;
+ lld->ports = adap->port;
+ lld->vr = &adap->vres;
+ lld->mtus = adap->params.mtus;
+ lld->ntxq = adap->sge.ofldqsets;
+ lld->nchan = adap->params.nports;
+ lld->nports = adap->params.nports;
+ lld->wr_cred = adap->params.ofldq_wr_cred;
+ lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
+ lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
+ lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
+ lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
+ lld->iscsi_ppm = &adap->iscsi_ppm;
+ lld->adapter_type = adap->params.chip;
+ lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
+ lld->udb_density = 1 << adap->params.sge.eq_qpp;
+ lld->ucq_density = 1 << adap->params.sge.iq_qpp;
+ lld->filt_mode = adap->params.tp.vlan_pri_map;
+ /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
+ for (i = 0; i < NCHAN; i++)
+ lld->tx_modq[i] = i;
+ lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
+ lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
+ lld->fw_vers = adap->params.fw_vers;
+ lld->dbfifo_int_thresh = dbfifo_int_thresh;
+ lld->sge_ingpadboundary = adap->sge.fl_align;
+ lld->sge_egrstatuspagesize = adap->sge.stat_len;
+ lld->sge_pktshift = adap->sge.pktshift;
+ lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+ lld->max_ordird_qp = adap->params.max_ordird_qp;
+ lld->max_ird_adapter = adap->params.max_ird_adapter;
+ lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
+ lld->nodeid = dev_to_node(adap->pdev_dev);
+ lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
+}
+
+static void uld_attach(struct adapter *adap, unsigned int uld)
+{
+ void *handle;
+ struct cxgb4_lld_info lli;
+
+ uld_init(adap, &lli);
+ uld_queue_init(adap, uld, &lli);
+
+ handle = adap->uld[uld].add(&lli);
+ if (IS_ERR(handle)) {
+ dev_warn(adap->pdev_dev,
+ "could not attach to the %s driver, error %ld\n",
+ adap->uld[uld].name, PTR_ERR(handle));
+ return;
+ }
+
+ adap->uld[uld].handle = handle;
+ t4_register_netevent_notifier();
+
+ if (adap->flags & FULL_INIT_DONE)
+ adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
+}
+
+/**
+ * cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
+ *
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type. Returns
+ * %-EBUSY if a ULD of the same type is already registered.
+ */
+int cxgb4_register_uld(enum cxgb4_uld type,
+ const struct cxgb4_uld_info *p)
+{
+ int ret = 0;
+ unsigned int adap_idx = 0;
+ struct adapter *adap;
+
+ if (type >= CXGB4_ULD_MAX)
+ return -EINVAL;
+
+ mutex_lock(&uld_mutex);
+ list_for_each_entry(adap, &adapter_list, list_node) {
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+ continue;
+ ret = cfg_queues_uld(adap, type, p);
+ if (ret)
+ goto out;
+ ret = setup_sge_queues_uld(adap, type, p->lro);
+ if (ret)
+ goto free_queues;
+ if (adap->flags & USING_MSIX) {
+ name_msix_vecs_uld(adap, type);
+ ret = request_msix_queue_irqs_uld(adap, type);
+ if (ret)
+ goto free_rxq;
+ }
+ if (adap->flags & FULL_INIT_DONE)
+ enable_rx_uld(adap, type);
+ if (adap->uld[type].add) {
+ ret = -EBUSY;
+ goto free_irq;
+ }
+ adap->uld[type] = *p;
+ uld_attach(adap, type);
+ adap_idx++;
+ }
+ mutex_unlock(&uld_mutex);
+ return 0;
+
+free_irq:
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+free_rxq:
+ free_sge_queues_uld(adap, type);
+free_queues:
+ free_queues_uld(adap, type);
+out:
+
+ list_for_each_entry(adap, &adapter_list, list_node) {
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+ continue;
+ if (!adap_idx)
+ break;
+ adap->uld[type].handle = NULL;
+ adap->uld[type].add = NULL;
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+ free_sge_queues_uld(adap, type);
+ free_queues_uld(adap, type);
+ adap_idx--;
+ }
+ mutex_unlock(&uld_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(cxgb4_register_uld);
+
+/**
+ * cxgb4_unregister_uld - unregister an upper-layer driver
+ * @type: the ULD type
+ *
+ * Unregisters an existing upper-layer driver.
+ */
+int cxgb4_unregister_uld(enum cxgb4_uld type)
+{
+ struct adapter *adap;
+
+ if (type >= CXGB4_ULD_MAX)
+ return -EINVAL;
+
+ mutex_lock(&uld_mutex);
+ list_for_each_entry(adap, &adapter_list, list_node) {
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+ continue;
+ adap->uld[type].handle = NULL;
+ adap->uld[type].add = NULL;
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+ free_sge_queues_uld(adap, type);
+ free_queues_uld(adap, type);
+ }
+ mutex_unlock(&uld_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_unregister_uld);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index f3c58aaa932d..2996793b1aaa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -32,8 +32,8 @@
* SOFTWARE.
*/
-#ifndef __CXGB4_OFLD_H
-#define __CXGB4_OFLD_H
+#ifndef __CXGB4_ULD_H
+#define __CXGB4_ULD_H
#include <linux/cache.h>
#include <linux/spinlock.h>
@@ -42,6 +42,8 @@
#include <linux/atomic.h>
#include "cxgb4.h"
+#define MAX_ULD_QSETS 16
+
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
@@ -104,6 +106,7 @@ struct tid_info {
unsigned int atid_base;
struct filter_entry *ftid_tab;
+ unsigned long *ftid_bmap;
unsigned int nftids;
unsigned int ftid_base;
unsigned int aftid_base;
@@ -124,6 +127,8 @@ struct tid_info {
atomic_t tids_in_use;
/* TIDs in the HASH */
atomic_t hash_tids_in_use;
+ /* lock for setting/clearing filter bitmap */
+ spinlock_t ftid_lock;
};
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -183,15 +188,38 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
unsigned int queue, bool ipv6);
+/* Filter operation context to allow callers of cxgb4_set_filter() and
+ * cxgb4_del_filter() to wait for an asynchronous completion.
+ */
+struct filter_ctx {
+ struct completion completion; /* completion rendezvous */
+ void *closure; /* caller's opaque information */
+ int result; /* result of operation */
+ u32 tid; /* to store tid */
+};
+
+struct ch_filter_specification;
+
+int __cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int __cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct filter_ctx *ctx);
+int cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs);
+int cxgb4_del_filter(struct net_device *dev, int filter_id);
+
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
skb_set_queue_mapping(skb, (queue << 1) | prio);
}
enum cxgb4_uld {
+ CXGB4_ULD_INIT,
CXGB4_ULD_RDMA,
CXGB4_ULD_ISCSI,
CXGB4_ULD_ISCSIT,
+ CXGB4_ULD_CRYPTO,
CXGB4_ULD_MAX
};
@@ -280,10 +308,16 @@ struct cxgb4_lld_info {
unsigned int iscsi_llimit; /* chip's iscsi region llimit */
void **iscsi_ppm; /* iscsi page pod manager */
int nodeid; /* device numa node id */
+ bool fr_nsmr_tpte_wr_support; /* FW supports FR_NSMR_TPTE_WR */
};
struct cxgb4_uld_info {
const char *name;
+ void *handle;
+ unsigned int nrxq;
+ unsigned int rxq_size;
+ bool ciq;
+ bool lro;
void *(*add)(const struct cxgb4_lld_info *p);
int (*rx_handler)(void *handle, const __be64 *rsp,
const struct pkt_gl *gl);
@@ -330,4 +364,4 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid);
-#endif /* !__CXGB4_OFLD_H */
+#endif /* !__CXGB4_ULD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
new file mode 100644
index 000000000000..539de764bbd3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -0,0 +1,556 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include "cxgb4.h"
+#include "sched.h"
+
+/* Spinlock must be held by caller */
+static int t4_sched_class_fw_cmd(struct port_info *pi,
+ struct ch_sched_params *p,
+ enum sched_fw_ops op)
+{
+ struct adapter *adap = pi->adapter;
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_class *e;
+ int err = 0;
+
+ e = &s->tab[p->u.params.class];
+ switch (op) {
+ case SCHED_FW_OP_ADD:
+ err = t4_sched_params(adap, p->type,
+ p->u.params.level, p->u.params.mode,
+ p->u.params.rateunit,
+ p->u.params.ratemode,
+ p->u.params.channel, e->idx,
+ p->u.params.minrate, p->u.params.maxrate,
+ p->u.params.weight, p->u.params.pktsize);
+ break;
+ default:
+ err = -ENOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+/* Spinlock must be held by caller */
+static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
+ enum sched_bind_type type, bool bind)
+{
+ struct adapter *adap = pi->adapter;
+ u32 fw_mnem, fw_class, fw_param;
+ unsigned int pf = adap->pf;
+ unsigned int vf = 0;
+ int err = 0;
+
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct sched_queue_entry *qe;
+
+ qe = (struct sched_queue_entry *)arg;
+
+ /* Create a template for the FW_PARAMS_CMD mnemonic and
+ * value (TX Scheduling Class in this case).
+ */
+ fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(
+ FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
+ fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE;
+ fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id));
+
+ pf = adap->pf;
+ vf = 0;
+ break;
+ }
+ default:
+ err = -ENOTSUPP;
+ goto out;
+ }
+
+ err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
+
+out:
+ return err;
+}
+
+static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
+ const unsigned int qid,
+ int *index)
+{
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_class *e, *end;
+ struct sched_class *found = NULL;
+ int i;
+
+ /* Look for a class with matching bound queue parameters */
+ end = &s->tab[s->sched_size];
+ for (e = &s->tab[0]; e != end; ++e) {
+ struct sched_queue_entry *qe;
+
+ i = 0;
+ if (e->state == SCHED_STATE_UNUSED)
+ continue;
+
+ list_for_each_entry(qe, &e->queue_list, list) {
+ if (qe->cntxt_id == qid) {
+ found = e;
+ if (index)
+ *index = i;
+ break;
+ }
+ i++;
+ }
+
+ if (found)
+ break;
+ }
+
+ return found;
+}
+
+static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
+{
+ struct adapter *adap = pi->adapter;
+ struct sched_class *e;
+ struct sched_queue_entry *qe = NULL;
+ struct sge_eth_txq *txq;
+ unsigned int qid;
+ int index = -1;
+ int err = 0;
+
+ if (p->queue < 0 || p->queue >= pi->nqsets)
+ return -ERANGE;
+
+ txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
+ qid = txq->q.cntxt_id;
+
+ /* Find the existing class that the queue is bound to */
+ e = t4_sched_queue_lookup(pi, qid, &index);
+ if (e && index >= 0) {
+ int i = 0;
+
+ spin_lock(&e->lock);
+ list_for_each_entry(qe, &e->queue_list, list) {
+ if (i == index)
+ break;
+ i++;
+ }
+ err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
+ false);
+ if (err) {
+ spin_unlock(&e->lock);
+ goto out;
+ }
+
+ list_del(&qe->list);
+ t4_free_mem(qe);
+ if (atomic_dec_and_test(&e->refcnt)) {
+ e->state = SCHED_STATE_UNUSED;
+ memset(&e->info, 0, sizeof(e->info));
+ }
+ spin_unlock(&e->lock);
+ }
+out:
+ return err;
+}
+
+static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
+{
+ struct adapter *adap = pi->adapter;
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_class *e;
+ struct sched_queue_entry *qe = NULL;
+ struct sge_eth_txq *txq;
+ unsigned int qid;
+ int err = 0;
+
+ if (p->queue < 0 || p->queue >= pi->nqsets)
+ return -ERANGE;
+
+ qe = t4_alloc_mem(sizeof(struct sched_queue_entry));
+ if (!qe)
+ return -ENOMEM;
+
+ txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
+ qid = txq->q.cntxt_id;
+
+ /* Unbind queue from any existing class */
+ err = t4_sched_queue_unbind(pi, p);
+ if (err)
+ goto out;
+
+ /* Bind queue to specified class */
+ memset(qe, 0, sizeof(*qe));
+ qe->cntxt_id = qid;
+ memcpy(&qe->param, p, sizeof(qe->param));
+
+ e = &s->tab[qe->param.class];
+ spin_lock(&e->lock);
+ err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
+ if (err) {
+ t4_free_mem(qe);
+ spin_unlock(&e->lock);
+ goto out;
+ }
+
+ list_add_tail(&qe->list, &e->queue_list);
+ atomic_inc(&e->refcnt);
+ spin_unlock(&e->lock);
+out:
+ return err;
+}
+
+static void t4_sched_class_unbind_all(struct port_info *pi,
+ struct sched_class *e,
+ enum sched_bind_type type)
+{
+ if (!e)
+ return;
+
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct sched_queue_entry *qe;
+
+ list_for_each_entry(qe, &e->queue_list, list)
+ t4_sched_queue_unbind(pi, &qe->param);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
+ enum sched_bind_type type, bool bind)
+{
+ int err = 0;
+
+ if (!arg)
+ return -EINVAL;
+
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
+
+ if (bind)
+ err = t4_sched_queue_bind(pi, qe);
+ else
+ err = t4_sched_queue_unbind(pi, qe);
+ break;
+ }
+ default:
+ err = -ENOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * cxgb4_sched_class_bind - Bind an entity to a scheduling class
+ * @dev: net_device pointer
+ * @arg: Entity opaque data
+ * @type: Entity type (Queue)
+ *
+ * Binds an entity (queue) to a scheduling class. If the entity
+ * is bound to another class, it will be unbound from the other class
+ * and bound to the class specified in @arg.
+ */
+int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
+ enum sched_bind_type type)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_table *s;
+ int err = 0;
+ u8 class_id;
+
+ if (!can_sched(dev))
+ return -ENOTSUPP;
+
+ if (!arg)
+ return -EINVAL;
+
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
+
+ class_id = qe->class;
+ break;
+ }
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (!valid_class_id(dev, class_id))
+ return -EINVAL;
+
+ if (class_id == SCHED_CLS_NONE)
+ return -ENOTSUPP;
+
+ s = pi->sched_tbl;
+ write_lock(&s->rw_lock);
+ err = t4_sched_class_bind_unbind_op(pi, arg, type, true);
+ write_unlock(&s->rw_lock);
+
+ return err;
+}
+
+/**
+ * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
+ * @dev: net_device pointer
+ * @arg: Entity opaque data
+ * @type: Entity type (Queue)
+ *
+ * Unbinds an entity (queue) from a scheduling class.
+ */
+int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
+ enum sched_bind_type type)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_table *s;
+ int err = 0;
+ u8 class_id;
+
+ if (!can_sched(dev))
+ return -ENOTSUPP;
+
+ if (!arg)
+ return -EINVAL;
+
+ switch (type) {
+ case SCHED_QUEUE: {
+ struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
+
+ class_id = qe->class;
+ break;
+ }
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (!valid_class_id(dev, class_id))
+ return -EINVAL;
+
+ s = pi->sched_tbl;
+ write_lock(&s->rw_lock);
+ err = t4_sched_class_bind_unbind_op(pi, arg, type, false);
+ write_unlock(&s->rw_lock);
+
+ return err;
+}
+
+/* If @p is NULL, fetch any available unused class */
+static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
+ const struct ch_sched_params *p)
+{
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_class *e, *end;
+ struct sched_class *found = NULL;
+
+ if (!p) {
+ /* Get any available unused class */
+ end = &s->tab[s->sched_size];
+ for (e = &s->tab[0]; e != end; ++e) {
+ if (e->state == SCHED_STATE_UNUSED) {
+ found = e;
+ break;
+ }
+ }
+ } else {
+ /* Look for a class with matching scheduling parameters */
+ struct ch_sched_params info;
+ struct ch_sched_params tp;
+
+ memset(&info, 0, sizeof(info));
+ memset(&tp, 0, sizeof(tp));
+
+ memcpy(&tp, p, sizeof(tp));
+ /* Don't try to match class parameter */
+ tp.u.params.class = SCHED_CLS_NONE;
+
+ end = &s->tab[s->sched_size];
+ for (e = &s->tab[0]; e != end; ++e) {
+ if (e->state == SCHED_STATE_UNUSED)
+ continue;
+
+ memset(&info, 0, sizeof(info));
+ memcpy(&info, &e->info, sizeof(info));
+ /* Don't try to match class parameter */
+ info.u.params.class = SCHED_CLS_NONE;
+
+ if ((info.type == tp.type) &&
+ (!memcmp(&info.u.params, &tp.u.params,
+ sizeof(info.u.params)))) {
+ found = e;
+ break;
+ }
+ }
+ }
+
+ return found;
+}
+
+static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
+ struct ch_sched_params *p)
+{
+ struct sched_table *s = pi->sched_tbl;
+ struct sched_class *e;
+ u8 class_id;
+ int err;
+
+ if (!p)
+ return NULL;
+
+ class_id = p->u.params.class;
+
+ /* Only accept search for existing class with matching params
+ * or allocation of new class with specified params
+ */
+ if (class_id != SCHED_CLS_NONE)
+ return NULL;
+
+ write_lock(&s->rw_lock);
+ /* See if there's an exisiting class with same
+ * requested sched params
+ */
+ e = t4_sched_class_lookup(pi, p);
+ if (!e) {
+ struct ch_sched_params np;
+
+ /* Fetch any available unused class */
+ e = t4_sched_class_lookup(pi, NULL);
+ if (!e)
+ goto out;
+
+ memset(&np, 0, sizeof(np));
+ memcpy(&np, p, sizeof(np));
+ np.u.params.class = e->idx;
+
+ spin_lock(&e->lock);
+ /* New class */
+ err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
+ if (err) {
+ spin_unlock(&e->lock);
+ e = NULL;
+ goto out;
+ }
+ memcpy(&e->info, &np, sizeof(e->info));
+ atomic_set(&e->refcnt, 0);
+ e->state = SCHED_STATE_ACTIVE;
+ spin_unlock(&e->lock);
+ }
+
+out:
+ write_unlock(&s->rw_lock);
+ return e;
+}
+
+/**
+ * cxgb4_sched_class_alloc - allocate a scheduling class
+ * @dev: net_device pointer
+ * @p: new scheduling class to create.
+ *
+ * Returns pointer to the scheduling class created. If @p is NULL, then
+ * it allocates and returns any available unused scheduling class. If a
+ * scheduling class with matching @p is found, then the matching class is
+ * returned.
+ */
+struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ u8 class_id;
+
+ if (!can_sched(dev))
+ return NULL;
+
+ class_id = p->u.params.class;
+ if (!valid_class_id(dev, class_id))
+ return NULL;
+
+ return t4_sched_class_alloc(pi, p);
+}
+
+static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
+{
+ t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
+}
+
+struct sched_table *t4_init_sched(unsigned int sched_size)
+{
+ struct sched_table *s;
+ unsigned int i;
+
+ s = t4_alloc_mem(sizeof(*s) + sched_size * sizeof(struct sched_class));
+ if (!s)
+ return NULL;
+
+ s->sched_size = sched_size;
+ rwlock_init(&s->rw_lock);
+
+ for (i = 0; i < s->sched_size; i++) {
+ memset(&s->tab[i], 0, sizeof(struct sched_class));
+ s->tab[i].idx = i;
+ s->tab[i].state = SCHED_STATE_UNUSED;
+ INIT_LIST_HEAD(&s->tab[i].queue_list);
+ spin_lock_init(&s->tab[i].lock);
+ atomic_set(&s->tab[i].refcnt, 0);
+ }
+ return s;
+}
+
+void t4_cleanup_sched(struct adapter *adap)
+{
+ struct sched_table *s;
+ unsigned int i;
+
+ for_each_port(adap, i) {
+ struct port_info *pi = netdev2pinfo(adap->port[i]);
+
+ s = pi->sched_tbl;
+ for (i = 0; i < s->sched_size; i++) {
+ struct sched_class *e;
+
+ write_lock(&s->rw_lock);
+ e = &s->tab[i];
+ if (e->state == SCHED_STATE_ACTIVE)
+ t4_sched_class_free(pi, e);
+ write_unlock(&s->rw_lock);
+ }
+ t4_free_mem(s);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
new file mode 100644
index 000000000000..77b2b3fd9021
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -0,0 +1,110 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_SCHED_H
+#define __CXGB4_SCHED_H
+
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+
+#define SCHED_CLS_NONE 0xff
+
+#define FW_SCHED_CLS_NONE 0xffffffff
+
+/* Max rate that can be set to a scheduling class is 10 Gbps */
+#define SCHED_MAX_RATE_KBPS 10000000U
+
+enum {
+ SCHED_STATE_ACTIVE,
+ SCHED_STATE_UNUSED,
+};
+
+enum sched_fw_ops {
+ SCHED_FW_OP_ADD,
+};
+
+enum sched_bind_type {
+ SCHED_QUEUE,
+};
+
+struct sched_queue_entry {
+ struct list_head list;
+ unsigned int cntxt_id;
+ struct ch_sched_queue param;
+};
+
+struct sched_class {
+ u8 state;
+ u8 idx;
+ struct ch_sched_params info;
+ struct list_head queue_list;
+ spinlock_t lock; /* Per class lock */
+ atomic_t refcnt;
+};
+
+struct sched_table { /* per port scheduling table */
+ u8 sched_size;
+ rwlock_t rw_lock; /* Table lock */
+ struct sched_class tab[0];
+};
+
+static inline bool can_sched(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+
+ return !pi->sched_tbl ? false : true;
+}
+
+static inline bool valid_class_id(struct net_device *dev, u8 class_id)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+
+ if ((class_id > pi->sched_tbl->sched_size - 1) &&
+ (class_id != SCHED_CLS_NONE))
+ return false;
+
+ return true;
+}
+
+int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
+ enum sched_bind_type type);
+int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
+ enum sched_bind_type type);
+
+struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p);
+
+struct sched_table *t4_init_sched(unsigned int size);
+void t4_cleanup_sched(struct adapter *adap);
+#endif /* __CXGB4_SCHED_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ad3552df0545..1e74fd6085df 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2860,6 +2860,18 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
return 0;
}
+int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
+ unsigned int cmplqid)
+{
+ u32 param, val;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
+ FW_PARAMS_PARAM_YZ_V(eqid));
+ val = cmplqid;
+ return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
+}
+
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid)
{
@@ -2928,8 +2940,8 @@ static void free_txq(struct adapter *adap, struct sge_txq *q)
q->desc = NULL;
}
-static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
- struct sge_fl *fl)
+void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
+ struct sge_fl *fl)
{
struct sge *s = &adap->sge;
unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
@@ -3014,12 +3026,6 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
- /* clean up RDMA and iSCSI Rx queues */
- t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
- t4_free_ofld_rxqs(adap, adap->sge.niscsitq, adap->sge.iscsitrxq);
- t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
- t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
-
/* clean up offload Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 660204bff726..20dec85da63d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -2729,7 +2729,7 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
out:
vfree(vpd);
- return ret;
+ return ret < 0 ? ret : 0;
}
/**
@@ -8269,3 +8269,73 @@ void t4_idma_monitor(struct adapter *adapter,
t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
}
}
+
+/**
+ * t4_set_vf_mac - Set MAC address for the specified VF
+ * @adapter: The adapter
+ * @vf: one of the VFs instantiated by the specified PF
+ * @naddr: the number of MAC addresses
+ * @addr: the MAC address(es) to be set to the specified VF
+ */
+int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
+ unsigned int naddr, u8 *addr)
+{
+ struct fw_acl_mac_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
+ FW_ACL_MAC_CMD_VFN_V(vf));
+
+ /* Note: Do not enable the ACL */
+ cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
+ cmd.nmac = naddr;
+
+ switch (adapter->pf) {
+ case 3:
+ memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
+ break;
+ case 2:
+ memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
+ break;
+ case 1:
+ memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
+ break;
+ case 0:
+ memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
+ break;
+ }
+
+ return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
+}
+
+int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
+ int rateunit, int ratemode, int channel, int class,
+ int minrate, int maxrate, int weight, int pktsize)
+{
+ struct fw_sched_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+ cmd.u.params.sc = FW_SCHED_SC_PARAMS;
+ cmd.u.params.type = type;
+ cmd.u.params.level = level;
+ cmd.u.params.mode = mode;
+ cmd.u.params.ch = channel;
+ cmd.u.params.cl = class;
+ cmd.u.params.unit = rateunit;
+ cmd.u.params.rate = ratemode;
+ cmd.u.params.min = cpu_to_be32(minrate);
+ cmd.u.params.max = cpu_to_be32(maxrate);
+ cmd.u.params.weight = cpu_to_be16(weight);
+ cmd.u.params.pktsize = cpu_to_be16(pktsize);
+
+ return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
+ NULL, 1);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index e0ebe1378cb2..fba3b2ad382d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -61,6 +61,7 @@ enum {
CPL_ABORT_REQ_RSS = 0x2B,
CPL_ABORT_RPL_RSS = 0x2D,
+ CPL_RX_PHYS_ADDR = 0x30,
CPL_CLOSE_CON_RPL = 0x32,
CPL_ISCSI_HDR = 0x33,
CPL_RDMA_CQE = 0x35,
@@ -83,6 +84,10 @@ enum {
CPL_PASS_OPEN_REQ6 = 0x81,
CPL_ACT_OPEN_REQ6 = 0x83,
+ CPL_TX_TLS_PDU = 0x88,
+ CPL_TX_SEC_PDU = 0x8A,
+ CPL_TX_TLS_ACK = 0x8B,
+
CPL_RDMA_TERMINATE = 0xA2,
CPL_RDMA_WRITE = 0xA4,
CPL_SGE_EGR_UPDATE = 0xA5,
@@ -94,6 +99,8 @@ enum {
CPL_FW4_PLD = 0xC1,
CPL_FW4_ACK = 0xC3,
+ CPL_RX_PHYS_DSGL = 0xD0,
+
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
CPL_TX_PKT_LSO = 0xED,
@@ -1362,6 +1369,15 @@ struct ulptx_idata {
__be32 len;
};
+struct ulp_txpkt {
+ __be32 cmd_dest;
+ __be32 len;
+};
+
+#define ULPTX_CMD_S 24
+#define ULPTX_CMD_M 0xFF
+#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S)
+
#define ULPTX_NSGE_S 0
#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
@@ -1369,6 +1385,22 @@ struct ulptx_idata {
#define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S)
#define ULPTX_MORE_F ULPTX_MORE_V(1U)
+#define ULP_TXPKT_DEST_S 16
+#define ULP_TXPKT_DEST_M 0x3
+#define ULP_TXPKT_DEST_V(x) ((x) << ULP_TXPKT_DEST_S)
+
+#define ULP_TXPKT_FID_S 4
+#define ULP_TXPKT_FID_M 0x7ff
+#define ULP_TXPKT_FID_V(x) ((x) << ULP_TXPKT_FID_S)
+
+#define ULP_TXPKT_RO_S 3
+#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S)
+#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U)
+
+#define ULP_TX_SC_MORE_S 23
+#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S)
+#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U)
+
struct ulp_mem_io {
WR_HDR;
__be32 cmd;
@@ -1406,4 +1438,409 @@ struct ulp_mem_io {
#define ULP_MEMIO_DATA_LEN_S 0
#define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S)
+#define ULPTX_NSGE_S 0
+#define ULPTX_NSGE_M 0xFFFF
+#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
+#define ULPTX_NSGE_G(x) (((x) >> ULPTX_NSGE_S) & ULPTX_NSGE_M)
+
+struct ulptx_sc_memrd {
+ __be32 cmd_to_len;
+ __be32 addr;
+};
+
+#define ULP_TXPKT_DATAMODIFY_S 23
+#define ULP_TXPKT_DATAMODIFY_M 0x1
+#define ULP_TXPKT_DATAMODIFY_V(x) ((x) << ULP_TXPKT_DATAMODIFY_S)
+#define ULP_TXPKT_DATAMODIFY_G(x) \
+ (((x) >> ULP_TXPKT_DATAMODIFY_S) & ULP_TXPKT_DATAMODIFY__M)
+#define ULP_TXPKT_DATAMODIFY_F ULP_TXPKT_DATAMODIFY_V(1U)
+
+#define ULP_TXPKT_CHANNELID_S 22
+#define ULP_TXPKT_CHANNELID_M 0x1
+#define ULP_TXPKT_CHANNELID_V(x) ((x) << ULP_TXPKT_CHANNELID_S)
+#define ULP_TXPKT_CHANNELID_G(x) \
+ (((x) >> ULP_TXPKT_CHANNELID_S) & ULP_TXPKT_CHANNELID_M)
+#define ULP_TXPKT_CHANNELID_F ULP_TXPKT_CHANNELID_V(1U)
+
+#define SCMD_SEQ_NO_CTRL_S 29
+#define SCMD_SEQ_NO_CTRL_M 0x3
+#define SCMD_SEQ_NO_CTRL_V(x) ((x) << SCMD_SEQ_NO_CTRL_S)
+#define SCMD_SEQ_NO_CTRL_G(x) \
+ (((x) >> SCMD_SEQ_NO_CTRL_S) & SCMD_SEQ_NO_CTRL_M)
+
+/* StsFieldPrsnt- Status field at the end of the TLS PDU */
+#define SCMD_STATUS_PRESENT_S 28
+#define SCMD_STATUS_PRESENT_M 0x1
+#define SCMD_STATUS_PRESENT_V(x) ((x) << SCMD_STATUS_PRESENT_S)
+#define SCMD_STATUS_PRESENT_G(x) \
+ (((x) >> SCMD_STATUS_PRESENT_S) & SCMD_STATUS_PRESENT_M)
+#define SCMD_STATUS_PRESENT_F SCMD_STATUS_PRESENT_V(1U)
+
+/* ProtoVersion - Protocol Version 0: 1.2, 1:1.1, 2:DTLS, 3:Generic,
+ * 3-15: Reserved.
+ */
+#define SCMD_PROTO_VERSION_S 24
+#define SCMD_PROTO_VERSION_M 0xf
+#define SCMD_PROTO_VERSION_V(x) ((x) << SCMD_PROTO_VERSION_S)
+#define SCMD_PROTO_VERSION_G(x) \
+ (((x) >> SCMD_PROTO_VERSION_S) & SCMD_PROTO_VERSION_M)
+
+/* EncDecCtrl - Encryption/Decryption Control. 0: Encrypt, 1: Decrypt */
+#define SCMD_ENC_DEC_CTRL_S 23
+#define SCMD_ENC_DEC_CTRL_M 0x1
+#define SCMD_ENC_DEC_CTRL_V(x) ((x) << SCMD_ENC_DEC_CTRL_S)
+#define SCMD_ENC_DEC_CTRL_G(x) \
+ (((x) >> SCMD_ENC_DEC_CTRL_S) & SCMD_ENC_DEC_CTRL_M)
+#define SCMD_ENC_DEC_CTRL_F SCMD_ENC_DEC_CTRL_V(1U)
+
+/* CipherAuthSeqCtrl - Cipher Authentication Sequence Control. */
+#define SCMD_CIPH_AUTH_SEQ_CTRL_S 22
+#define SCMD_CIPH_AUTH_SEQ_CTRL_M 0x1
+#define SCMD_CIPH_AUTH_SEQ_CTRL_V(x) \
+ ((x) << SCMD_CIPH_AUTH_SEQ_CTRL_S)
+#define SCMD_CIPH_AUTH_SEQ_CTRL_G(x) \
+ (((x) >> SCMD_CIPH_AUTH_SEQ_CTRL_S) & SCMD_CIPH_AUTH_SEQ_CTRL_M)
+#define SCMD_CIPH_AUTH_SEQ_CTRL_F SCMD_CIPH_AUTH_SEQ_CTRL_V(1U)
+
+/* CiphMode - Cipher Mode. 0: NOP, 1:AES-CBC, 2:AES-GCM, 3:AES-CTR,
+ * 4:Generic-AES, 5-15: Reserved.
+ */
+#define SCMD_CIPH_MODE_S 18
+#define SCMD_CIPH_MODE_M 0xf
+#define SCMD_CIPH_MODE_V(x) ((x) << SCMD_CIPH_MODE_S)
+#define SCMD_CIPH_MODE_G(x) \
+ (((x) >> SCMD_CIPH_MODE_S) & SCMD_CIPH_MODE_M)
+
+/* AuthMode - Auth Mode. 0: NOP, 1:SHA1, 2:SHA2-224, 3:SHA2-256
+ * 4-15: Reserved
+ */
+#define SCMD_AUTH_MODE_S 14
+#define SCMD_AUTH_MODE_M 0xf
+#define SCMD_AUTH_MODE_V(x) ((x) << SCMD_AUTH_MODE_S)
+#define SCMD_AUTH_MODE_G(x) \
+ (((x) >> SCMD_AUTH_MODE_S) & SCMD_AUTH_MODE_M)
+
+/* HmacCtrl - HMAC Control. 0:NOP, 1:No truncation, 2:Support HMAC Truncation
+ * per RFC 4366, 3:IPSec 96 bits, 4-7:Reserved
+ */
+#define SCMD_HMAC_CTRL_S 11
+#define SCMD_HMAC_CTRL_M 0x7
+#define SCMD_HMAC_CTRL_V(x) ((x) << SCMD_HMAC_CTRL_S)
+#define SCMD_HMAC_CTRL_G(x) \
+ (((x) >> SCMD_HMAC_CTRL_S) & SCMD_HMAC_CTRL_M)
+
+/* IvSize - IV size in units of 2 bytes */
+#define SCMD_IV_SIZE_S 7
+#define SCMD_IV_SIZE_M 0xf
+#define SCMD_IV_SIZE_V(x) ((x) << SCMD_IV_SIZE_S)
+#define SCMD_IV_SIZE_G(x) \
+ (((x) >> SCMD_IV_SIZE_S) & SCMD_IV_SIZE_M)
+
+/* NumIVs - Number of IVs */
+#define SCMD_NUM_IVS_S 0
+#define SCMD_NUM_IVS_M 0x7f
+#define SCMD_NUM_IVS_V(x) ((x) << SCMD_NUM_IVS_S)
+#define SCMD_NUM_IVS_G(x) \
+ (((x) >> SCMD_NUM_IVS_S) & SCMD_NUM_IVS_M)
+
+/* EnbDbgId - If this is enabled upper 20 (63:44) bits if SeqNumber
+ * (below) are used as Cid (connection id for debug status), these
+ * bits are padded to zero for forming the 64 bit
+ * sequence number for TLS
+ */
+#define SCMD_ENB_DBGID_S 31
+#define SCMD_ENB_DBGID_M 0x1
+#define SCMD_ENB_DBGID_V(x) ((x) << SCMD_ENB_DBGID_S)
+#define SCMD_ENB_DBGID_G(x) \
+ (((x) >> SCMD_ENB_DBGID_S) & SCMD_ENB_DBGID_M)
+
+/* IV generation in SW. */
+#define SCMD_IV_GEN_CTRL_S 30
+#define SCMD_IV_GEN_CTRL_M 0x1
+#define SCMD_IV_GEN_CTRL_V(x) ((x) << SCMD_IV_GEN_CTRL_S)
+#define SCMD_IV_GEN_CTRL_G(x) \
+ (((x) >> SCMD_IV_GEN_CTRL_S) & SCMD_IV_GEN_CTRL_M)
+#define SCMD_IV_GEN_CTRL_F SCMD_IV_GEN_CTRL_V(1U)
+
+/* More frags */
+#define SCMD_MORE_FRAGS_S 20
+#define SCMD_MORE_FRAGS_M 0x1
+#define SCMD_MORE_FRAGS_V(x) ((x) << SCMD_MORE_FRAGS_S)
+#define SCMD_MORE_FRAGS_G(x) (((x) >> SCMD_MORE_FRAGS_S) & SCMD_MORE_FRAGS_M)
+
+/*last frag */
+#define SCMD_LAST_FRAG_S 19
+#define SCMD_LAST_FRAG_M 0x1
+#define SCMD_LAST_FRAG_V(x) ((x) << SCMD_LAST_FRAG_S)
+#define SCMD_LAST_FRAG_G(x) (((x) >> SCMD_LAST_FRAG_S) & SCMD_LAST_FRAG_M)
+
+/* TlsCompPdu */
+#define SCMD_TLS_COMPPDU_S 18
+#define SCMD_TLS_COMPPDU_M 0x1
+#define SCMD_TLS_COMPPDU_V(x) ((x) << SCMD_TLS_COMPPDU_S)
+#define SCMD_TLS_COMPPDU_G(x) (((x) >> SCMD_TLS_COMPPDU_S) & SCMD_TLS_COMPPDU_M)
+
+/* KeyCntxtInline - Key context inline after the scmd OR PayloadOnly*/
+#define SCMD_KEY_CTX_INLINE_S 17
+#define SCMD_KEY_CTX_INLINE_M 0x1
+#define SCMD_KEY_CTX_INLINE_V(x) ((x) << SCMD_KEY_CTX_INLINE_S)
+#define SCMD_KEY_CTX_INLINE_G(x) \
+ (((x) >> SCMD_KEY_CTX_INLINE_S) & SCMD_KEY_CTX_INLINE_M)
+#define SCMD_KEY_CTX_INLINE_F SCMD_KEY_CTX_INLINE_V(1U)
+
+/* TLSFragEnable - 0: Host created TLS PDUs, 1: TLS Framgmentation in ASIC */
+#define SCMD_TLS_FRAG_ENABLE_S 16
+#define SCMD_TLS_FRAG_ENABLE_M 0x1
+#define SCMD_TLS_FRAG_ENABLE_V(x) ((x) << SCMD_TLS_FRAG_ENABLE_S)
+#define SCMD_TLS_FRAG_ENABLE_G(x) \
+ (((x) >> SCMD_TLS_FRAG_ENABLE_S) & SCMD_TLS_FRAG_ENABLE_M)
+#define SCMD_TLS_FRAG_ENABLE_F SCMD_TLS_FRAG_ENABLE_V(1U)
+
+/* MacOnly - Only send the MAC and discard PDU. This is valid for hash only
+ * modes, in this case TLS_TX will drop the PDU and only
+ * send back the MAC bytes.
+ */
+#define SCMD_MAC_ONLY_S 15
+#define SCMD_MAC_ONLY_M 0x1
+#define SCMD_MAC_ONLY_V(x) ((x) << SCMD_MAC_ONLY_S)
+#define SCMD_MAC_ONLY_G(x) \
+ (((x) >> SCMD_MAC_ONLY_S) & SCMD_MAC_ONLY_M)
+#define SCMD_MAC_ONLY_F SCMD_MAC_ONLY_V(1U)
+
+/* AadIVDrop - Drop the AAD and IV fields. Useful in protocols
+ * which have complex AAD and IV formations Eg:AES-CCM
+ */
+#define SCMD_AADIVDROP_S 14
+#define SCMD_AADIVDROP_M 0x1
+#define SCMD_AADIVDROP_V(x) ((x) << SCMD_AADIVDROP_S)
+#define SCMD_AADIVDROP_G(x) \
+ (((x) >> SCMD_AADIVDROP_S) & SCMD_AADIVDROP_M)
+#define SCMD_AADIVDROP_F SCMD_AADIVDROP_V(1U)
+
+/* HdrLength - Length of all headers excluding TLS header
+ * present before start of crypto PDU/payload.
+ */
+#define SCMD_HDR_LEN_S 0
+#define SCMD_HDR_LEN_M 0x3fff
+#define SCMD_HDR_LEN_V(x) ((x) << SCMD_HDR_LEN_S)
+#define SCMD_HDR_LEN_G(x) \
+ (((x) >> SCMD_HDR_LEN_S) & SCMD_HDR_LEN_M)
+
+struct cpl_tx_sec_pdu {
+ __be32 op_ivinsrtofst;
+ __be32 pldlen;
+ __be32 aadstart_cipherstop_hi;
+ __be32 cipherstop_lo_authinsert;
+ __be32 seqno_numivs;
+ __be32 ivgen_hdrlen;
+ __be64 scmd1;
+};
+
+#define CPL_TX_SEC_PDU_OPCODE_S 24
+#define CPL_TX_SEC_PDU_OPCODE_M 0xff
+#define CPL_TX_SEC_PDU_OPCODE_V(x) ((x) << CPL_TX_SEC_PDU_OPCODE_S)
+#define CPL_TX_SEC_PDU_OPCODE_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_OPCODE_S) & CPL_TX_SEC_PDU_OPCODE_M)
+
+/* RX Channel Id */
+#define CPL_TX_SEC_PDU_RXCHID_S 22
+#define CPL_TX_SEC_PDU_RXCHID_M 0x1
+#define CPL_TX_SEC_PDU_RXCHID_V(x) ((x) << CPL_TX_SEC_PDU_RXCHID_S)
+#define CPL_TX_SEC_PDU_RXCHID_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_RXCHID_S) & CPL_TX_SEC_PDU_RXCHID_M)
+#define CPL_TX_SEC_PDU_RXCHID_F CPL_TX_SEC_PDU_RXCHID_V(1U)
+
+/* Ack Follows */
+#define CPL_TX_SEC_PDU_ACKFOLLOWS_S 21
+#define CPL_TX_SEC_PDU_ACKFOLLOWS_M 0x1
+#define CPL_TX_SEC_PDU_ACKFOLLOWS_V(x) ((x) << CPL_TX_SEC_PDU_ACKFOLLOWS_S)
+#define CPL_TX_SEC_PDU_ACKFOLLOWS_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_ACKFOLLOWS_S) & CPL_TX_SEC_PDU_ACKFOLLOWS_M)
+#define CPL_TX_SEC_PDU_ACKFOLLOWS_F CPL_TX_SEC_PDU_ACKFOLLOWS_V(1U)
+
+/* Loopback bit in cpl_tx_sec_pdu */
+#define CPL_TX_SEC_PDU_ULPTXLPBK_S 20
+#define CPL_TX_SEC_PDU_ULPTXLPBK_M 0x1
+#define CPL_TX_SEC_PDU_ULPTXLPBK_V(x) ((x) << CPL_TX_SEC_PDU_ULPTXLPBK_S)
+#define CPL_TX_SEC_PDU_ULPTXLPBK_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_ULPTXLPBK_S) & CPL_TX_SEC_PDU_ULPTXLPBK_M)
+#define CPL_TX_SEC_PDU_ULPTXLPBK_F CPL_TX_SEC_PDU_ULPTXLPBK_V(1U)
+
+/* Length of cpl header encapsulated */
+#define CPL_TX_SEC_PDU_CPLLEN_S 16
+#define CPL_TX_SEC_PDU_CPLLEN_M 0xf
+#define CPL_TX_SEC_PDU_CPLLEN_V(x) ((x) << CPL_TX_SEC_PDU_CPLLEN_S)
+#define CPL_TX_SEC_PDU_CPLLEN_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_CPLLEN_S) & CPL_TX_SEC_PDU_CPLLEN_M)
+
+/* PlaceHolder */
+#define CPL_TX_SEC_PDU_PLACEHOLDER_S 10
+#define CPL_TX_SEC_PDU_PLACEHOLDER_M 0x1
+#define CPL_TX_SEC_PDU_PLACEHOLDER_V(x) ((x) << CPL_TX_SEC_PDU_PLACEHOLDER_S)
+#define CPL_TX_SEC_PDU_PLACEHOLDER_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_PLACEHOLDER_S) & \
+ CPL_TX_SEC_PDU_PLACEHOLDER_M)
+
+/* IvInsrtOffset: Insertion location for IV */
+#define CPL_TX_SEC_PDU_IVINSRTOFST_S 0
+#define CPL_TX_SEC_PDU_IVINSRTOFST_M 0x3ff
+#define CPL_TX_SEC_PDU_IVINSRTOFST_V(x) ((x) << CPL_TX_SEC_PDU_IVINSRTOFST_S)
+#define CPL_TX_SEC_PDU_IVINSRTOFST_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_IVINSRTOFST_S) & \
+ CPL_TX_SEC_PDU_IVINSRTOFST_M)
+
+/* AadStartOffset: Offset in bytes for AAD start from
+ * the first byte following the pkt headers (0-255 bytes)
+ */
+#define CPL_TX_SEC_PDU_AADSTART_S 24
+#define CPL_TX_SEC_PDU_AADSTART_M 0xff
+#define CPL_TX_SEC_PDU_AADSTART_V(x) ((x) << CPL_TX_SEC_PDU_AADSTART_S)
+#define CPL_TX_SEC_PDU_AADSTART_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_AADSTART_S) & \
+ CPL_TX_SEC_PDU_AADSTART_M)
+
+/* AadStopOffset: offset in bytes for AAD stop/end from the first byte following
+ * the pkt headers (0-511 bytes)
+ */
+#define CPL_TX_SEC_PDU_AADSTOP_S 15
+#define CPL_TX_SEC_PDU_AADSTOP_M 0x1ff
+#define CPL_TX_SEC_PDU_AADSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AADSTOP_S)
+#define CPL_TX_SEC_PDU_AADSTOP_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_AADSTOP_S) & CPL_TX_SEC_PDU_AADSTOP_M)
+
+/* CipherStartOffset: offset in bytes for encryption/decryption start from the
+ * first byte following the pkt headers (0-1023 bytes)
+ */
+#define CPL_TX_SEC_PDU_CIPHERSTART_S 5
+#define CPL_TX_SEC_PDU_CIPHERSTART_M 0x3ff
+#define CPL_TX_SEC_PDU_CIPHERSTART_V(x) ((x) << CPL_TX_SEC_PDU_CIPHERSTART_S)
+#define CPL_TX_SEC_PDU_CIPHERSTART_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_CIPHERSTART_S) & \
+ CPL_TX_SEC_PDU_CIPHERSTART_M)
+
+/* CipherStopOffset: offset in bytes for encryption/decryption end
+ * from end of the payload of this command (0-511 bytes)
+ */
+#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_S 0
+#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_M 0x1f
+#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_V(x) \
+ ((x) << CPL_TX_SEC_PDU_CIPHERSTOP_HI_S)
+#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_HI_S) & \
+ CPL_TX_SEC_PDU_CIPHERSTOP_HI_M)
+
+#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_S 28
+#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_M 0xf
+#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_V(x) \
+ ((x) << CPL_TX_SEC_PDU_CIPHERSTOP_LO_S)
+#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_LO_S) & \
+ CPL_TX_SEC_PDU_CIPHERSTOP_LO_M)
+
+/* AuthStartOffset: offset in bytes for authentication start from
+ * the first byte following the pkt headers (0-1023)
+ */
+#define CPL_TX_SEC_PDU_AUTHSTART_S 18
+#define CPL_TX_SEC_PDU_AUTHSTART_M 0x3ff
+#define CPL_TX_SEC_PDU_AUTHSTART_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTART_S)
+#define CPL_TX_SEC_PDU_AUTHSTART_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_AUTHSTART_S) & \
+ CPL_TX_SEC_PDU_AUTHSTART_M)
+
+/* AuthStopOffset: offset in bytes for authentication
+ * end from end of the payload of this command (0-511 Bytes)
+ */
+#define CPL_TX_SEC_PDU_AUTHSTOP_S 9
+#define CPL_TX_SEC_PDU_AUTHSTOP_M 0x1ff
+#define CPL_TX_SEC_PDU_AUTHSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTOP_S)
+#define CPL_TX_SEC_PDU_AUTHSTOP_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_AUTHSTOP_S) & \
+ CPL_TX_SEC_PDU_AUTHSTOP_M)
+
+/* AuthInsrtOffset: offset in bytes for authentication insertion
+ * from end of the payload of this command (0-511 bytes)
+ */
+#define CPL_TX_SEC_PDU_AUTHINSERT_S 0
+#define CPL_TX_SEC_PDU_AUTHINSERT_M 0x1ff
+#define CPL_TX_SEC_PDU_AUTHINSERT_V(x) ((x) << CPL_TX_SEC_PDU_AUTHINSERT_S)
+#define CPL_TX_SEC_PDU_AUTHINSERT_G(x) \
+ (((x) >> CPL_TX_SEC_PDU_AUTHINSERT_S) & \
+ CPL_TX_SEC_PDU_AUTHINSERT_M)
+
+struct cpl_rx_phys_dsgl {
+ __be32 op_to_tid;
+ __be32 pcirlxorder_to_noofsgentr;
+ struct rss_header rss_hdr_int;
+};
+
+#define CPL_RX_PHYS_DSGL_OPCODE_S 24
+#define CPL_RX_PHYS_DSGL_OPCODE_M 0xff
+#define CPL_RX_PHYS_DSGL_OPCODE_V(x) ((x) << CPL_RX_PHYS_DSGL_OPCODE_S)
+#define CPL_RX_PHYS_DSGL_OPCODE_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_OPCODE_S) & CPL_RX_PHYS_DSGL_OPCODE_M)
+
+#define CPL_RX_PHYS_DSGL_ISRDMA_S 23
+#define CPL_RX_PHYS_DSGL_ISRDMA_M 0x1
+#define CPL_RX_PHYS_DSGL_ISRDMA_V(x) ((x) << CPL_RX_PHYS_DSGL_ISRDMA_S)
+#define CPL_RX_PHYS_DSGL_ISRDMA_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_ISRDMA_S) & CPL_RX_PHYS_DSGL_ISRDMA_M)
+#define CPL_RX_PHYS_DSGL_ISRDMA_F CPL_RX_PHYS_DSGL_ISRDMA_V(1U)
+
+#define CPL_RX_PHYS_DSGL_RSVD1_S 20
+#define CPL_RX_PHYS_DSGL_RSVD1_M 0x7
+#define CPL_RX_PHYS_DSGL_RSVD1_V(x) ((x) << CPL_RX_PHYS_DSGL_RSVD1_S)
+#define CPL_RX_PHYS_DSGL_RSVD1_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_RSVD1_S) & \
+ CPL_RX_PHYS_DSGL_RSVD1_M)
+
+#define CPL_RX_PHYS_DSGL_PCIRLXORDER_S 31
+#define CPL_RX_PHYS_DSGL_PCIRLXORDER_M 0x1
+#define CPL_RX_PHYS_DSGL_PCIRLXORDER_V(x) \
+ ((x) << CPL_RX_PHYS_DSGL_PCIRLXORDER_S)
+#define CPL_RX_PHYS_DSGL_PCIRLXORDER_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_PCIRLXORDER_S) & \
+ CPL_RX_PHYS_DSGL_PCIRLXORDER_M)
+#define CPL_RX_PHYS_DSGL_PCIRLXORDER_F CPL_RX_PHYS_DSGL_PCIRLXORDER_V(1U)
+
+#define CPL_RX_PHYS_DSGL_PCINOSNOOP_S 30
+#define CPL_RX_PHYS_DSGL_PCINOSNOOP_M 0x1
+#define CPL_RX_PHYS_DSGL_PCINOSNOOP_V(x) \
+ ((x) << CPL_RX_PHYS_DSGL_PCINOSNOOP_S)
+#define CPL_RX_PHYS_DSGL_PCINOSNOOP_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_PCINOSNOOP_S) & \
+ CPL_RX_PHYS_DSGL_PCINOSNOOP_M)
+
+#define CPL_RX_PHYS_DSGL_PCINOSNOOP_F CPL_RX_PHYS_DSGL_PCINOSNOOP_V(1U)
+
+#define CPL_RX_PHYS_DSGL_PCITPHNTENB_S 29
+#define CPL_RX_PHYS_DSGL_PCITPHNTENB_M 0x1
+#define CPL_RX_PHYS_DSGL_PCITPHNTENB_V(x) \
+ ((x) << CPL_RX_PHYS_DSGL_PCITPHNTENB_S)
+#define CPL_RX_PHYS_DSGL_PCITPHNTENB_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_PCITPHNTENB_S) & \
+ CPL_RX_PHYS_DSGL_PCITPHNTENB_M)
+#define CPL_RX_PHYS_DSGL_PCITPHNTENB_F CPL_RX_PHYS_DSGL_PCITPHNTENB_V(1U)
+
+#define CPL_RX_PHYS_DSGL_PCITPHNT_S 27
+#define CPL_RX_PHYS_DSGL_PCITPHNT_M 0x3
+#define CPL_RX_PHYS_DSGL_PCITPHNT_V(x) ((x) << CPL_RX_PHYS_DSGL_PCITPHNT_S)
+#define CPL_RX_PHYS_DSGL_PCITPHNT_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_PCITPHNT_S) & \
+ CPL_RX_PHYS_DSGL_PCITPHNT_M)
+
+#define CPL_RX_PHYS_DSGL_DCAID_S 16
+#define CPL_RX_PHYS_DSGL_DCAID_M 0x7ff
+#define CPL_RX_PHYS_DSGL_DCAID_V(x) ((x) << CPL_RX_PHYS_DSGL_DCAID_S)
+#define CPL_RX_PHYS_DSGL_DCAID_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_DCAID_S) & \
+ CPL_RX_PHYS_DSGL_DCAID_M)
+
+#define CPL_RX_PHYS_DSGL_NOOFSGENTR_S 0
+#define CPL_RX_PHYS_DSGL_NOOFSGENTR_M 0xffff
+#define CPL_RX_PHYS_DSGL_NOOFSGENTR_V(x) \
+ ((x) << CPL_RX_PHYS_DSGL_NOOFSGENTR_S)
+#define CPL_RX_PHYS_DSGL_NOOFSGENTR_G(x) \
+ (((x) >> CPL_RX_PHYS_DSGL_NOOFSGENTR_S) & \
+ CPL_RX_PHYS_DSGL_NOOFSGENTR_M)
+
#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 30507d44422c..8d9e4b7a8e84 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2009-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2009-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -100,8 +100,10 @@ enum fw_wr_opcodes {
FW_RI_RECV_WR = 0x17,
FW_RI_BIND_MW_WR = 0x18,
FW_RI_FR_NSMR_WR = 0x19,
+ FW_RI_FR_NSMR_TPTE_WR = 0x20,
FW_RI_INV_LSTAG_WR = 0x1a,
FW_ISCSI_TX_DATA_WR = 0x45,
+ FW_CRYPTO_LOOKASIDE_WR = 0X6d,
FW_LASTC2E_WR = 0x70
};
@@ -680,6 +682,7 @@ enum fw_cmd_opcodes {
FW_RSS_IND_TBL_CMD = 0x20,
FW_RSS_GLB_CONFIG_CMD = 0x22,
FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_SCHED_CMD = 0x24,
FW_DEVLOG_CMD = 0x25,
FW_CLIP_CMD = 0x28,
FW_LASTC2E_CMD = 0x40,
@@ -1060,7 +1063,7 @@ struct fw_caps_config_cmd {
__be16 niccaps;
__be16 ofldcaps;
__be16 rdmacaps;
- __be16 r4;
+ __be16 cryptocaps;
__be16 iscsicaps;
__be16 fcoecaps;
__be32 cfcsum;
@@ -1119,6 +1122,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
FW_PARAMS_PARAM_DEV_FWCACHE = 0x18,
+ FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C,
};
/*
@@ -2967,6 +2971,41 @@ struct fw_rss_vi_config_cmd {
#define FW_RSS_VI_CONFIG_CMD_UDPEN_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_UDPEN_S)
#define FW_RSS_VI_CONFIG_CMD_UDPEN_F FW_RSS_VI_CONFIG_CMD_UDPEN_V(1U)
+enum fw_sched_sc {
+ FW_SCHED_SC_PARAMS = 1,
+};
+
+struct fw_sched_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ union fw_sched {
+ struct fw_sched_config {
+ __u8 sc;
+ __u8 type;
+ __u8 minmaxen;
+ __u8 r3[5];
+ __u8 nclasses[4];
+ __be32 r4;
+ } config;
+ struct fw_sched_params {
+ __u8 sc;
+ __u8 type;
+ __u8 level;
+ __u8 mode;
+ __u8 unit;
+ __u8 rate;
+ __u8 ch;
+ __u8 cl;
+ __be32 min;
+ __be32 max;
+ __be16 weight;
+ __be16 pktsize;
+ __be16 burstsize;
+ __be16 r4;
+ } params;
+ } u;
+};
+
struct fw_clip_cmd {
__be32 op_to_write;
__be32 alloc_to_len16;
@@ -3255,4 +3294,127 @@ struct fw_devlog_cmd {
#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
(((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
+#define MAX_IMM_OFLD_TX_DATA_WR_LEN (0xff + sizeof(struct fw_ofld_tx_data_wr))
+
+struct fw_crypto_lookaside_wr {
+ __be32 op_to_cctx_size;
+ __be32 len16_pkd;
+ __be32 session_id;
+ __be32 rx_chid_to_rx_q_id;
+ __be32 key_addr;
+ __be32 pld_size_hash_size;
+ __be64 cookie;
+};
+
+#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_S 24
+#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_M 0xff
+#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_OPCODE_S)
+#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_OPCODE_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_OPCODE_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_COMPL_S 23
+#define FW_CRYPTO_LOOKASIDE_WR_COMPL_M 0x1
+#define FW_CRYPTO_LOOKASIDE_WR_COMPL_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_COMPL_S)
+#define FW_CRYPTO_LOOKASIDE_WR_COMPL_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_COMPL_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_COMPL_M)
+#define FW_CRYPTO_LOOKASIDE_WR_COMPL_F FW_CRYPTO_LOOKASIDE_WR_COMPL_V(1U)
+
+#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S 15
+#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M 0xff
+#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S)
+#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S 5
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S)
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S 0
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M 0x1f
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S)
+#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_LEN16_S 0
+#define FW_CRYPTO_LOOKASIDE_WR_LEN16_M 0xff
+#define FW_CRYPTO_LOOKASIDE_WR_LEN16_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_LEN16_S)
+#define FW_CRYPTO_LOOKASIDE_WR_LEN16_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_LEN16_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_LEN16_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S 29
+#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S)
+#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_LCB_S 27
+#define FW_CRYPTO_LOOKASIDE_WR_LCB_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_LCB_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_LCB_S)
+#define FW_CRYPTO_LOOKASIDE_WR_LCB_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_LCB_S) & FW_CRYPTO_LOOKASIDE_WR_LCB_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_PHASH_S 25
+#define FW_CRYPTO_LOOKASIDE_WR_PHASH_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_PHASH_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_PHASH_S)
+#define FW_CRYPTO_LOOKASIDE_WR_PHASH_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_PHASH_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_PHASH_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_IV_S 23
+#define FW_CRYPTO_LOOKASIDE_WR_IV_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_IV_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_IV_S)
+#define FW_CRYPTO_LOOKASIDE_WR_IV_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_IV_S) & FW_CRYPTO_LOOKASIDE_WR_IV_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_S 10
+#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_M 0x3
+#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_TX_CH_S)
+#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_TX_CH_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_TX_CH_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S 0
+#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M 0x3ff
+#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S)
+#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S 24
+#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M 0xff
+#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S)
+#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M)
+
+#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S 17
+#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M 0x7f
+#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S)
+#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M)
+
#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index e116bb8d1729..100b2cc064a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2378,7 +2378,7 @@ static void size_nports_qsets(struct adapter *adapter)
*/
pmask_nports = hweight32(adapter->params.vfres.pmask);
if (pmask_nports < adapter->params.nports) {
- dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
+ dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
" virtual interfaces; limited by Port Access Rights"
" mask %#x\n", pmask_nports, adapter->params.nports,
adapter->params.vfres.pmask);
@@ -2777,6 +2777,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
struct adapter *adapter;
struct port_info *pi;
struct net_device *netdev;
+ unsigned int pf;
/*
* Print our driver banner the first time we're called to initialize a
@@ -2903,8 +2904,11 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Allocate our "adapter ports" and stitch everything together.
*/
pmask = adapter->params.vfres.pmask;
+ pf = t4vf_get_pf_from_vf(adapter);
for_each_port(adapter, pidx) {
int port_id, viid;
+ u8 mac[ETH_ALEN];
+ unsigned int naddr = 1;
/*
* We simplistically allocate our virtual interfaces
@@ -2975,6 +2979,26 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
pidx);
goto err_free_dev;
}
+
+ err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac);
+ if (err) {
+ dev_err(&pdev->dev,
+ "unable to determine MAC ACL address, "
+ "continuing anyway.. (status %d)\n", err);
+ } else if (naddr && adapter->params.vfres.nvi == 1) {
+ struct sockaddr addr;
+
+ ether_addr_copy(addr.sa_data, mac);
+ err = cxgb4vf_set_mac_addr(netdev, &addr);
+ if (err) {
+ dev_err(&pdev->dev,
+ "unable to set MAC address %pM\n",
+ mac);
+ goto err_free_dev;
+ }
+ dev_info(&pdev->dev,
+ "Using assigned MAC ACL: %pM\n", mac);
+ }
}
/* See what interrupts we'll be using. If we've been configured to
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index c8fd4f8fe1fa..f3ed9ce99e5e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1648,14 +1648,15 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
if (csum_ok && !pkt->err_vec &&
(be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
- if (!pkt->ip_frag)
+ if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- else {
+ rxq->stats.rx_cso++;
+ } else if (pkt->l2info & htonl(RXF_IP_F)) {
__sum16 c = (__force __sum16)pkt->csum;
skb->csum = csum_unfold(c);
skb->ip_summed = CHECKSUM_COMPLETE;
+ rxq->stats.rx_cso++;
}
- rxq->stats.rx_cso++;
} else
skb_checksum_none_assert(skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 17a2bbcf93f0..b3903fe411aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -354,6 +354,7 @@ int t4vf_bar2_sge_qregs(struct adapter *adapter,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid);
+unsigned int t4vf_get_pf_from_vf(struct adapter *);
int t4vf_get_sge_params(struct adapter *);
int t4vf_get_vpd_params(struct adapter *);
int t4vf_get_dev_params(struct adapter *);
@@ -388,5 +389,7 @@ int t4vf_eth_eq_free(struct adapter *, unsigned int);
int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
int t4vf_prep_adapter(struct adapter *);
+int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
+ unsigned int *naddr, u8 *addr);
#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index b5622b1689e9..e98248f00fef 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -640,6 +640,15 @@ int t4vf_bar2_sge_qregs(struct adapter *adapter,
return 0;
}
+unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
+{
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
+ return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami));
+}
+
/**
* t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
* @adapter: the adapter
@@ -717,7 +726,6 @@ int t4vf_get_sge_params(struct adapter *adapter)
* read.
*/
if (!is_t4(adapter->params.chip)) {
- u32 whoami;
unsigned int pf, s_hps, s_qpp;
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
@@ -741,11 +749,7 @@ int t4vf_get_sge_params(struct adapter *adapter)
* register we just read. Do it once here so other code in
* the driver can just use it.
*/
- whoami = t4_read_reg(adapter,
- T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
- pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
- SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
-
+ pf = t4vf_get_pf_from_vf(adapter);
s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
sge_params->sge_vf_hps =
@@ -1812,3 +1816,50 @@ int t4vf_prep_adapter(struct adapter *adapter)
return 0;
}
+
+/**
+ * t4vf_get_vf_mac_acl - Get the MAC address to be set to
+ * the VI of this VF.
+ * @adapter: The adapter
+ * @pf: The pf associated with vf
+ * @naddr: the number of ACL MAC addresses returned in addr
+ * @addr: Placeholder for MAC addresses
+ *
+ * Find the MAC address to be set to the VF's VI. The requested MAC address
+ * is from the host OS via callback in the PF driver.
+ */
+int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
+ unsigned int *naddr, u8 *addr)
+{
+ struct fw_acl_mac_cmd cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
+ cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ if (cmd.nmac < *naddr)
+ *naddr = cmd.nmac;
+
+ switch (pf) {
+ case 3:
+ memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3));
+ break;
+ case 2:
+ memcpy(addr, cmd.macaddr2, sizeof(cmd.macaddr2));
+ break;
+ case 1:
+ memcpy(addr, cmd.macaddr1, sizeof(cmd.macaddr1));
+ break;
+ case 0:
+ memcpy(addr, cmd.macaddr0, sizeof(cmd.macaddr0));
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile
index 2362230ef4fe..2534e30a1560 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/Makefile
+++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile
@@ -1,3 +1,5 @@
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+
obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o
-libcxgb-y := libcxgb_ppm.o
+libcxgb-y := libcxgb_ppm.o libcxgb_cm.o
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
new file mode 100644
index 000000000000..0f0de5b63622
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
+
+#include "libcxgb_cm.h"
+
+void
+cxgb_get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
+ int *iptype, __u8 *local_ip, __u8 *peer_ip,
+ __be16 *local_port, __be16 *peer_port)
+{
+ int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
+ ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
+ T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
+ IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
+ T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+ struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
+ struct tcphdr *tcp = (struct tcphdr *)
+ ((u8 *)(req + 1) + eth_len + ip_len);
+
+ if (ip->version == 4) {
+ pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
+ __func__, ntohl(ip->saddr), ntohl(ip->daddr),
+ ntohs(tcp->source), ntohs(tcp->dest));
+ *iptype = 4;
+ memcpy(peer_ip, &ip->saddr, 4);
+ memcpy(local_ip, &ip->daddr, 4);
+ } else {
+ pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
+ __func__, ip6->saddr.s6_addr, ip6->daddr.s6_addr,
+ ntohs(tcp->source), ntohs(tcp->dest));
+ *iptype = 6;
+ memcpy(peer_ip, ip6->saddr.s6_addr, 16);
+ memcpy(local_ip, ip6->daddr.s6_addr, 16);
+ }
+ *peer_port = tcp->source;
+ *local_port = tcp->dest;
+}
+EXPORT_SYMBOL(cxgb_get_4tuple);
+
+static bool
+cxgb_our_interface(struct cxgb4_lld_info *lldi,
+ struct net_device *(*get_real_dev)(struct net_device *),
+ struct net_device *egress_dev)
+{
+ int i;
+
+ egress_dev = get_real_dev(egress_dev);
+ for (i = 0; i < lldi->nports; i++)
+ if (lldi->ports[i] == egress_dev)
+ return true;
+ return false;
+}
+
+struct dst_entry *
+cxgb_find_route(struct cxgb4_lld_info *lldi,
+ struct net_device *(*get_real_dev)(struct net_device *),
+ __be32 local_ip, __be32 peer_ip, __be16 local_port,
+ __be16 peer_port, u8 tos)
+{
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct neighbour *n;
+
+ rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
+ peer_port, local_port, IPPROTO_TCP,
+ tos, 0);
+ if (IS_ERR(rt))
+ return NULL;
+ n = dst_neigh_lookup(&rt->dst, &peer_ip);
+ if (!n)
+ return NULL;
+ if (!cxgb_our_interface(lldi, get_real_dev, n->dev) &&
+ !(n->dev->flags & IFF_LOOPBACK)) {
+ neigh_release(n);
+ dst_release(&rt->dst);
+ return NULL;
+ }
+ neigh_release(n);
+ return &rt->dst;
+}
+EXPORT_SYMBOL(cxgb_find_route);
+
+struct dst_entry *
+cxgb_find_route6(struct cxgb4_lld_info *lldi,
+ struct net_device *(*get_real_dev)(struct net_device *),
+ __u8 *local_ip, __u8 *peer_ip, __be16 local_port,
+ __be16 peer_port, u8 tos, __u32 sin6_scope_id)
+{
+ struct dst_entry *dst = NULL;
+
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ memcpy(&fl6.daddr, peer_ip, 16);
+ memcpy(&fl6.saddr, local_ip, 16);
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = sin6_scope_id;
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ if (!dst)
+ goto out;
+ if (!cxgb_our_interface(lldi, get_real_dev,
+ ip6_dst_idev(dst)->dev) &&
+ !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+ dst_release(dst);
+ dst = NULL;
+ }
+ }
+
+out:
+ return dst;
+}
+EXPORT_SYMBOL(cxgb_find_route6);
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
new file mode 100644
index 000000000000..515b94ff9080
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIBCXGB_CM_H__
+#define __LIBCXGB_CM_H__
+
+
+#include <net/tcp.h>
+
+#include <cxgb4.h>
+#include <t4_msg.h>
+#include <l2t.h>
+
+void
+cxgb_get_4tuple(struct cpl_pass_accept_req *, enum chip_type,
+ int *, __u8 *, __u8 *, __be16 *, __be16 *);
+struct dst_entry *
+cxgb_find_route(struct cxgb4_lld_info *,
+ struct net_device *(*)(struct net_device *),
+ __be32, __be32, __be16, __be16, u8);
+struct dst_entry *
+cxgb_find_route6(struct cxgb4_lld_info *,
+ struct net_device *(*)(struct net_device *),
+ __u8 *, __u8 *, __be16, __be16, u8, __u32);
+
+/* Returns whether a CPL status conveys negative advice.
+ */
+static inline bool cxgb_is_neg_adv(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE ||
+ status == CPL_ERR_KEEPALV_NEG_ADVICE;
+}
+
+static inline void
+cxgb_best_mtu(const unsigned short *mtus, unsigned short mtu,
+ unsigned int *idx, int use_ts, int ipv6)
+{
+ unsigned short hdr_size = (ipv6 ?
+ sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) +
+ sizeof(struct tcphdr) +
+ (use_ts ?
+ round_up(TCPOLEN_TIMESTAMP, 4) : 0);
+ unsigned short data_size = mtu - hdr_size;
+
+ cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+}
+
+static inline u32 cxgb_compute_wscale(u32 win)
+{
+ u32 wscale = 0;
+
+ while (wscale < 14 && (65535 << wscale) < win)
+ wscale++;
+ return wscale;
+}
+
+static inline void
+cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
+{
+ struct cpl_tid_release *req;
+
+ req = (struct cpl_tid_release *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+}
+
+static inline void
+cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ void *handle, arp_err_handler_t handler)
+{
+ struct cpl_close_con_req *req;
+
+ req = (struct cpl_close_con_req *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+ set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+ t4_set_arp_err_handler(skb, handle, handler);
+}
+
+static inline void
+cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ void *handle, arp_err_handler_t handler)
+{
+ struct cpl_abort_req *req;
+
+ req = (struct cpl_abort_req *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+ req->cmd = CPL_ABORT_SEND_RST;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+ t4_set_arp_err_handler(skb, handle, handler);
+}
+
+static inline void
+cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
+{
+ struct cpl_abort_rpl *rpl;
+
+ rpl = (struct cpl_abort_rpl *)__skb_put(skb, len);
+ memset(rpl, 0, len);
+
+ INIT_TP_WR(rpl, tid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = CPL_ABORT_NO_RST;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+}
+
+static inline void
+cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ u32 credit_dack)
+{
+ struct cpl_rx_data_ack *req;
+
+ req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid));
+ req->credit_dack = cpu_to_be32(credit_dack);
+ set_wr_txq(skb, CPL_PRIORITY_ACK, chan);
+}
+#endif
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index f0e9e2ef62a0..6620fc861c47 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1966,7 +1966,7 @@ SetMulticastFilter(struct net_device *dev)
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(ETH_ALEN, ha->addr);
- hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
+ hashcode = crc & DE4X5_HASH_BITS; /* hashcode is 9 LSb of CRC */
byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
@@ -5043,7 +5043,7 @@ build_setup_frame(struct net_device *dev, int mode)
*(pa + i) = dev->dev_addr[i]; /* Host address */
if (i & 0x01) pa += 2;
}
- *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
+ *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80;
} else {
for (i=0; i<ETH_ALEN; i++) { /* Host address */
*(pa + (i&1)) = dev->dev_addr[i];
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h
index ec756eba397b..1bfdc9b117f6 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.h
+++ b/drivers/net/ethernet/dec/tulip/de4x5.h
@@ -860,8 +860,8 @@
#define PCI 0
#define EISA 1
-#define HASH_TABLE_LEN 512 /* Bits */
-#define HASH_BITS 0x01ff /* 9 LS bits */
+#define DE4X5_HASH_TABLE_LEN 512 /* Bits */
+#define DE4X5_HASH_BITS 0x01ff /* 9 LS bits */
#define SETUP_FRAME_LEN 192 /* Bytes */
#define IMPERF_PA_OFFSET 156 /* Bytes */
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 58c6338a839e..79d80090eac8 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -867,7 +867,7 @@ static int netdev_open(struct net_device *dev)
/* Initialize other registers. */
__set_mac_addr(dev);
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
#else
iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 4555e041ef69..6cfa63a5e9b4 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "11.0.0.0"
+#define DRV_VER "11.1.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -399,13 +399,13 @@ enum vf_state {
#define BE_FLAGS_PHY_MISCONFIGURED BIT(10)
#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11)
#define BE_FLAGS_OS2BMC BIT(12)
+#define BE_FLAGS_TRY_RECOVERY BIT(13)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
#define MAX_ERR_RECOVERY_RETRY_COUNT 3
#define ERR_DETECTION_DELAY 1000
-#define ERR_RECOVERY_RETRY_DELAY 30000
/* Ethtool set_dump flags */
#define LANCER_INITIATE_FW_DUMP 0x1
@@ -508,6 +508,70 @@ struct be_wrb_params {
u16 lso_mss; /* MSS for LSO */
};
+struct be_eth_addr {
+ unsigned char mac[ETH_ALEN];
+};
+
+#define BE_SEC 1000 /* in msec */
+#define BE_MIN (60 * BE_SEC) /* in msec */
+#define BE_HOUR (60 * BE_MIN) /* in msec */
+
+#define ERR_RECOVERY_MAX_RETRY_COUNT 3
+#define ERR_RECOVERY_DETECTION_DELAY BE_SEC
+#define ERR_RECOVERY_RETRY_DELAY (30 * BE_SEC)
+
+/* UE-detection-duration in BEx/Skyhawk:
+ * All PFs must wait for this duration after they detect UE before reading
+ * SLIPORT_SEMAPHORE register. At the end of this duration, the Firmware
+ * guarantees that the SLIPORT_SEMAPHORE register is updated to indicate
+ * if the UE is recoverable.
+ */
+#define ERR_RECOVERY_UE_DETECT_DURATION BE_SEC
+
+/* Initial idle time (in msec) to elapse after driver load,
+ * before UE recovery is allowed.
+ */
+#define ERR_IDLE_HR 24
+#define ERR_RECOVERY_IDLE_TIME (ERR_IDLE_HR * BE_HOUR)
+
+/* Time interval (in msec) after which UE recovery can be repeated */
+#define ERR_INTERVAL_HR 72
+#define ERR_RECOVERY_INTERVAL (ERR_INTERVAL_HR * BE_HOUR)
+
+/* BEx/SH UE recovery state machine */
+enum {
+ ERR_RECOVERY_ST_NONE = 0, /* No Recovery */
+ ERR_RECOVERY_ST_DETECT = 1, /* UE detection duration */
+ ERR_RECOVERY_ST_RESET = 2, /* Reset Phase (PF0 only) */
+ ERR_RECOVERY_ST_PRE_POLL = 3, /* Pre-Poll Phase (all PFs) */
+ ERR_RECOVERY_ST_REINIT = 4 /* Re-initialize Phase */
+};
+
+struct be_error_recovery {
+ /* Lancer error recovery variables */
+ u8 recovery_retries;
+
+ /* BEx/Skyhawk error recovery variables */
+ u8 recovery_state;
+ u16 ue_to_reset_time; /* Time after UE, to soft reset
+ * the chip - PF0 only
+ */
+ u16 ue_to_poll_time; /* Time after UE, to Restart Polling
+ * of SLIPORT_SEMAPHORE reg
+ */
+ u16 last_err_code;
+ bool recovery_supported;
+ unsigned long probe_time;
+ unsigned long last_recovery_time;
+
+ /* Common to both Lancer & BEx/SH error recovery */
+ u32 resched_delay;
+ struct delayed_work err_detection_work;
+};
+
+/* Ethtool priv_flags */
+#define BE_DISABLE_TPE_RECOVERY 0x1
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -523,7 +587,7 @@ struct be_adapter {
struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
- spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
+ struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
u16 cfg_num_rx_irqs; /* configured via set-channels */
@@ -556,7 +620,6 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
- struct delayed_work be_err_detection_work;
u8 recovery_retries;
u8 err_flags;
bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */
@@ -570,9 +633,15 @@ struct be_adapter {
int if_handle; /* Used to configure filtering */
u32 if_flags; /* Interface filtering flags */
u32 *pmac_id; /* MAC addr handle used by BE card */
+ struct be_eth_addr *uc_list;/* list of uc-addrs programmed (not perm) */
u32 uc_macs; /* Count of secondary UC MAC programmed */
+ struct be_eth_addr *mc_list;/* list of mcast addrs programmed */
+ u32 mc_count;
unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
u16 vlans_added;
+ bool update_uc_list;
+ bool update_mc_list;
+ struct mutex rx_filter_lock;/* For protecting vids[] & mc/uc_list[] */
u32 beacon_state; /* for set_phys_id */
@@ -624,6 +693,18 @@ struct be_adapter {
u32 fat_dump_len;
u16 serial_num[CNTL_SERIAL_NUM_WORDS];
u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */
+ u8 dev_mac[ETH_ALEN];
+ u32 priv_flags; /* ethtool get/set_priv_flags() */
+ struct be_error_recovery error_recovery;
+};
+
+/* Used for defered FW config cmds. Add fields to this struct as reqd */
+struct be_cmd_work {
+ struct work_struct work;
+ struct be_adapter *adapter;
+ union {
+ __be16 vxlan_port;
+ } info;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -848,6 +929,9 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb)
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
}
+#define be_error_recovering(adapter) \
+ (adapter->flags & BE_FLAGS_TRY_RECOVERY)
+
#define BE_ERROR_EEH 1
#define BE_ERROR_UE BIT(1)
#define BE_ERROR_FW BIT(2)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 2cc11756859f..9cffe48be156 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -92,6 +92,11 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
CMD_SUBSYSTEM_COMMON,
BE_PRIV_DEVCFG | BE_PRIV_VHADM
},
+ {
+ OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_DEVCFG
+ }
};
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
@@ -571,7 +576,7 @@ int be_process_mcc(struct be_adapter *adapter)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
-#define mcc_timeout 120000 /* 12s timeout */
+#define mcc_timeout 12000 /* 12s timeout */
int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
@@ -585,7 +590,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (atomic_read(&mcc_obj->q.used) == 0)
break;
- udelay(100);
+ usleep_range(500, 1000);
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
@@ -705,7 +710,7 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
return 0;
}
-static u16 be_POST_stage_get(struct be_adapter *adapter)
+u16 be_POST_stage_get(struct be_adapter *adapter)
{
u32 sem;
@@ -863,7 +868,7 @@ static bool use_mcc(struct be_adapter *adapter)
static int be_cmd_lock(struct be_adapter *adapter)
{
if (use_mcc(adapter)) {
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
return 0;
} else {
return mutex_lock_interruptible(&adapter->mbox_lock);
@@ -874,7 +879,7 @@ static int be_cmd_lock(struct be_adapter *adapter)
static void be_cmd_unlock(struct be_adapter *adapter)
{
if (use_mcc(adapter))
- spin_unlock_bh(&adapter->mcc_lock);
+ return mutex_unlock(&adapter->mcc_lock);
else
return mutex_unlock(&adapter->mbox_lock);
}
@@ -1044,7 +1049,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1073,7 +1078,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1085,7 +1090,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_pmac_add *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1110,7 +1115,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
@@ -1128,7 +1133,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
if (pmac_id == -1)
return 0;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1148,7 +1153,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1411,7 +1416,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1441,7 +1446,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1505,7 +1510,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
struct be_cmd_req_q_destroy *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1522,7 +1527,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
q->created = false;
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1590,7 +1595,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1618,7 +1623,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
adapter->stats_cmd_sent = true;
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1634,7 +1639,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
CMD_SUBSYSTEM_ETH))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1657,7 +1662,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
adapter->stats_cmd_sent = true;
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1694,7 +1699,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
struct be_cmd_req_link_status *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
if (link_status)
*link_status = LINK_DOWN;
@@ -1733,7 +1738,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1744,7 +1749,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
struct be_cmd_req_get_cntl_addnl_attribs *req;
int status = 0;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1759,7 +1764,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
status = be_mcc_notify(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1808,7 +1813,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!get_fat_cmd.va)
return -ENOMEM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
while (total_size) {
buf_size = min(total_size, (u32)60*1024);
@@ -1848,7 +1853,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
err:
dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma);
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1859,7 +1864,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
struct be_cmd_req_get_fw_version *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1882,7 +1887,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
sizeof(adapter->fw_on_flash));
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1896,7 +1901,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
struct be_cmd_req_modify_eq_delay *req;
int status = 0, i;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1919,7 +1924,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
status = be_mcc_notify(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1946,7 +1951,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1968,7 +1973,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1979,7 +1984,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct be_cmd_req_rx_filter *req = mem->va;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1996,8 +2001,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
req->if_flags = (value == ON) ? req->if_flags_mask : 0;
if (flags & BE_IF_FLAGS_MULTICAST) {
- struct netdev_hw_addr *ha;
- int i = 0;
+ int i;
/* Reset mcast promisc mode if already set by setting mask
* and not setting flags field
@@ -2005,14 +2009,15 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
req->if_flags_mask |=
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
be_if_cap_flags(adapter));
- req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
- netdev_for_each_mc_addr(ha, adapter->netdev)
- memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
+ req->mcast_num = cpu_to_le32(adapter->mc_count);
+ for (i = 0; i < adapter->mc_count; i++)
+ ether_addr_copy(req->mcast_mac[i].byte,
+ adapter->mc_list[i].mac);
}
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2043,7 +2048,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2063,7 +2068,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
return -EOPNOTSUPP;
@@ -2082,7 +2087,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2105,7 +2110,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2186,7 +2191,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2211,7 +2216,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2223,7 +2228,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
struct be_cmd_req_enable_disable_beacon *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2244,7 +2249,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2255,7 +2260,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
struct be_cmd_req_get_beacon_state *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2279,7 +2284,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2303,7 +2308,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -ENOMEM;
}
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2325,7 +2330,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
memcpy(data, resp->page_data, PAGE_DATA_LEN);
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2342,7 +2347,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
void *ctxt = NULL;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2384,7 +2389,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
if (status)
goto err_unlock;
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
@@ -2403,7 +2408,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
return status;
err_unlock:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2457,7 +2462,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
struct be_mcc_wrb *wrb;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2475,7 +2480,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2488,7 +2493,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
struct lancer_cmd_resp_read_object *resp;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2522,7 +2527,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
}
err_unlock:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2534,7 +2539,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_cmd_write_flashrom *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2559,7 +2564,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
if (status)
goto err_unlock;
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(40000)))
@@ -2570,7 +2575,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
return status;
err_unlock:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -2581,7 +2586,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
struct be_mcc_wrb *wrb;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2608,7 +2613,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
memcpy(flashed_crc, req->crc, 4);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3192,7 +3197,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_cmd_req_acpi_wol_magic_config *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3209,7 +3214,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3224,7 +3229,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3247,7 +3252,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
if (status)
goto err_unlock;
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
@@ -3256,7 +3261,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
return status;
err_unlock:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3273,7 +3278,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3299,7 +3304,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
if (status)
goto err;
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
wait_for_completion(&adapter->et_cmd_compl);
resp = embedded_payload(wrb);
@@ -3307,7 +3312,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
return status;
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3323,7 +3328,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3357,7 +3362,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3368,7 +3373,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
struct be_cmd_req_seeprom_read *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3384,7 +3389,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3399,7 +3404,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3444,7 +3449,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3454,7 +3459,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
struct be_cmd_req_set_qos *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3474,7 +3479,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3581,7 +3586,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_req_get_fn_privileges *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3613,7 +3618,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3625,7 +3630,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
struct be_cmd_req_set_fn_privileges *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3645,7 +3650,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3677,7 +3682,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
return -ENOMEM;
}
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3741,7 +3746,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
}
out:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
@@ -3801,7 +3806,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
if (!cmd.va)
return -ENOMEM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3823,7 +3828,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
err:
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3859,7 +3864,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3900,7 +3905,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3914,7 +3919,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
int status;
u16 vid;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3961,7 +3966,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4127,6 +4132,10 @@ int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_get_ext_fat_caps *req;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -4138,7 +4147,7 @@ int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
+ OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
cmd->size, wrb, cmd);
req->parameter_type = cpu_to_le32(1);
@@ -4156,7 +4165,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_set_ext_fat_caps *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4167,12 +4176,12 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
req = cmd->va;
memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
+ OPCODE_COMMON_SET_EXT_FAT_CAPABILITIES,
cmd->size, wrb, cmd);
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4373,7 +4382,7 @@ err:
}
/* This routine returns a list of all the NIC PF_nums in the adapter */
-u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
+static u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
{
struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
struct be_pcie_res_desc *pcie = NULL;
@@ -4525,7 +4534,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
}
/* Mark all fields invalid */
-void be_reset_nic_desc(struct be_nic_res_desc *nic)
+static void be_reset_nic_desc(struct be_nic_res_desc *nic)
{
memset(nic, 0, sizeof(*nic));
nic->unicast_mac_count = 0xFFFF;
@@ -4650,7 +4659,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
if (iface == 0xFFFFFFFF)
return -1;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4667,7 +4676,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4701,7 +4710,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
struct be_cmd_resp_get_iface_list *resp;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4722,7 +4731,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4816,7 +4825,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
if (BEx_chip(adapter))
return 0;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4834,7 +4843,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
req->enable = 1;
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4898,14 +4907,15 @@ err:
return status;
}
-int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
- int link_state, int version, u8 domain)
+static int
+__be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, int version, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_ll_link *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4931,7 +4941,7 @@ int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -4954,6 +4964,57 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
1, domain);
return status;
}
+
+int be_cmd_set_features(struct be_adapter *adapter)
+{
+ struct be_cmd_resp_set_features *resp;
+ struct be_cmd_req_set_features *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mcc_lock))
+ return -1;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FEATURES,
+ sizeof(*req), wrb, NULL);
+
+ req->features = cpu_to_le32(BE_FEATURE_UE_RECOVERY);
+ req->parameter_len = cpu_to_le32(sizeof(struct be_req_ue_recovery));
+ req->parameter.req.uer = cpu_to_le32(BE_UE_RECOVERY_UER_MASK);
+
+ status = be_mcc_notify_wait(adapter);
+ if (status)
+ goto err;
+
+ resp = embedded_payload(wrb);
+
+ adapter->error_recovery.ue_to_poll_time =
+ le16_to_cpu(resp->parameter.resp.ue2rp);
+ adapter->error_recovery.ue_to_reset_time =
+ le16_to_cpu(resp->parameter.resp.ue2sr);
+ adapter->error_recovery.recovery_supported = true;
+err:
+ /* Checking "MCC_STATUS_INVALID_LENGTH" for SKH as FW
+ * returns this error in older firmware versions
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
+ base_status(status) == MCC_STATUS_INVALID_LENGTH)
+ dev_info(&adapter->pdev->dev,
+ "Adapter does not support HW error recovery\n");
+
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
@@ -4964,7 +5025,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
struct be_cmd_resp_hdr *resp;
int status;
- spin_lock_bh(&adapter->mcc_lock);
+ mutex_lock(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4987,7 +5048,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
- spin_unlock_bh(&adapter->mcc_lock);
+ mutex_unlock(&adapter->mcc_lock);
return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0d6be224a787..1bd82bcb3be5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -58,7 +58,8 @@ enum mcc_base_status {
MCC_STATUS_INSUFFICIENT_BUFFER = 4,
MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
MCC_STATUS_NOT_SUPPORTED = 66,
- MCC_STATUS_FEATURE_NOT_SUPPORTED = 68
+ MCC_STATUS_FEATURE_NOT_SUPPORTED = 68,
+ MCC_STATUS_INVALID_LENGTH = 116
};
/* Additional status */
@@ -294,8 +295,8 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
-#define OPCODE_COMMON_GET_EXT_FAT_CAPABILITES 125
-#define OPCODE_COMMON_SET_EXT_FAT_CAPABILITES 126
+#define OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES 125
+#define OPCODE_COMMON_SET_EXT_FAT_CAPABILITIES 126
#define OPCODE_COMMON_GET_MAC_LIST 147
#define OPCODE_COMMON_SET_MAC_LIST 148
#define OPCODE_COMMON_GET_HSW_CONFIG 152
@@ -308,6 +309,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
#define OPCODE_COMMON_DELETE_OBJECT 174
+#define OPCODE_COMMON_SET_FEATURES 191
#define OPCODE_COMMON_MANAGE_IFACE_FILTERS 193
#define OPCODE_COMMON_GET_IFACE_LIST 194
#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
@@ -2315,6 +2317,41 @@ struct be_cmd_resp_get_iface_list {
struct be_if_desc if_desc;
};
+/************** Set Features *******************/
+#define BE_FEATURE_UE_RECOVERY 0x10
+#define BE_UE_RECOVERY_UER_MASK 0x1
+
+struct be_req_ue_recovery {
+ u32 uer;
+ u32 rsvd;
+};
+
+struct be_cmd_req_set_features {
+ struct be_cmd_req_hdr hdr;
+ u32 features;
+ u32 parameter_len;
+ union {
+ struct be_req_ue_recovery req;
+ u32 rsvd[2];
+ } parameter;
+};
+
+struct be_resp_ue_recovery {
+ u32 uer;
+ u16 ue2rp;
+ u16 ue2sr;
+};
+
+struct be_cmd_resp_set_features {
+ struct be_cmd_resp_hdr hdr;
+ u32 features;
+ u32 parameter_len;
+ union {
+ struct be_resp_ue_recovery resp;
+ u32 rsvd[2];
+ } parameter;
+};
+
/*************** Set logical link ********************/
#define PLINK_ENABLE BIT(0)
#define PLINK_TRACK BIT(8)
@@ -2343,6 +2380,7 @@ struct be_cmd_req_manage_iface_filters {
u32 cap_control_flags;
} __packed;
+u16 be_POST_stage_get(struct be_adapter *adapter);
int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -2470,3 +2508,4 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
int be_cmd_set_sriov_config(struct be_adapter *adapter,
struct be_resources res, u16 num_vfs,
struct be_resources *vft_res);
+int be_cmd_set_features(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 50e7be5da50c..0a48a31225e6 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -421,6 +421,10 @@ static void be_get_ethtool_stats(struct net_device *netdev,
}
}
+static const char be_priv_flags[][ETH_GSTRING_LEN] = {
+ "disable-tpe-recovery"
+};
+
static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
uint8_t *data)
{
@@ -454,6 +458,10 @@ static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
data += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ARRAY_SIZE(be_priv_flags); i++)
+ strcpy(data + i * ETH_GSTRING_LEN, be_priv_flags[i]);
+ break;
}
}
@@ -468,6 +476,8 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
return ETHTOOL_STATS_NUM +
adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(be_priv_flags);
default:
return -EINVAL;
}
@@ -1360,6 +1370,34 @@ err:
return be_cmd_status(status);
}
+static u32 be_get_priv_flags(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->priv_flags;
+}
+
+static int be_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ bool tpe_old = !!(adapter->priv_flags & BE_DISABLE_TPE_RECOVERY);
+ bool tpe_new = !!(flags & BE_DISABLE_TPE_RECOVERY);
+
+ if (tpe_old != tpe_new) {
+ if (tpe_new) {
+ adapter->priv_flags |= BE_DISABLE_TPE_RECOVERY;
+ dev_info(&adapter->pdev->dev,
+ "HW error recovery is disabled\n");
+ } else {
+ adapter->priv_flags &= ~BE_DISABLE_TPE_RECOVERY;
+ dev_info(&adapter->pdev->dev,
+ "HW error recovery is enabled\n");
+ }
+ }
+
+ return 0;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1373,6 +1411,8 @@ const struct ethtool_ops be_ethtool_ops = {
.get_ringparam = be_get_ringparam,
.get_pauseparam = be_get_pauseparam,
.set_pauseparam = be_set_pauseparam,
+ .set_priv_flags = be_set_priv_flags,
+ .get_priv_flags = be_get_priv_flags,
.get_strings = be_get_stat_strings,
.set_phys_id = be_set_phys_id,
.set_dump = be_set_dump,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index c684bb32b487..92942c84d329 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -32,18 +32,23 @@
#define MPU_EP_CONTROL 0
/********** MPU semphore: used for SH & BE *************/
+#define SLIPORT_SOFTRESET_OFFSET 0x5c /* CSR BAR offset */
#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
#define POST_STAGE_MASK 0x0000FFFF
#define POST_ERR_MASK 0x1
#define POST_ERR_SHIFT 31
+#define POST_ERR_RECOVERY_CODE_MASK 0xFFF
+
+/* Soft Reset register masks */
+#define SLIPORT_SOFTRESET_SR_MASK 0x00000080 /* SR bit */
/* MPU semphore POST stage values */
#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
-
+#define POST_STAGE_RECOVERABLE_ERR 0xE000 /* Recoverable err detected */
/* Lancer SLIPORT registers */
#define SLIPORT_STATUS_OFFSET 0x404
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 874c7539a79d..dcb930a52613 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -41,6 +41,11 @@ static ushort rx_frag_size = 2048;
module_param(rx_frag_size, ushort, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+/* Per-module error detection/recovery workq shared across all functions.
+ * Each function schedules its own work request on this shared workq.
+ */
+static struct workqueue_struct *be_err_recovery_workq;
+
static const struct pci_device_id be_dev_ids[] = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -53,6 +58,10 @@ static const struct pci_device_id be_dev_ids[] = {
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
+
+/* Workqueue used by all functions for defering cmd calls to the adapter */
+static struct workqueue_struct *be_wq;
+
/* UE Status Low CSR */
static const char * const ue_status_low_desc[] = {
"CEV",
@@ -260,6 +269,38 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
iowrite32(val, adapter->db + DB_CQ_OFFSET);
}
+static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
+{
+ int i;
+
+ /* Check if mac has already been added as part of uc-list */
+ for (i = 0; i < adapter->uc_macs; i++) {
+ if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
+ mac)) {
+ /* mac already added, skip addition */
+ adapter->pmac_id[0] = adapter->pmac_id[i + 1];
+ return 0;
+ }
+ }
+
+ return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+}
+
+static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
+{
+ int i;
+
+ /* Skip deletion if the programmed mac is
+ * being used in uc-list
+ */
+ for (i = 0; i < adapter->uc_macs; i++) {
+ if (adapter->pmac_id[i + 1] == pmac_id)
+ return;
+ }
+ be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
+}
+
static int be_mac_addr_set(struct net_device *netdev, void *p)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -267,7 +308,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
int status;
u8 mac[ETH_ALEN];
- u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
+ u32 old_pmac_id = adapter->pmac_id[0];
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -275,7 +316,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
/* Proceed further only if, User provided MAC is different
* from active MAC
*/
- if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
+ if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
return 0;
/* if device is not running, copy MAC to netdev->dev_addr */
@@ -288,23 +329,22 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
* FILTMGMT privilege. This failure is OK, only if the PF programmed
* the MAC for the VF.
*/
- status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle, &adapter->pmac_id[0], 0);
+ mutex_lock(&adapter->rx_filter_lock);
+ status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
if (!status) {
- curr_pmac_id = adapter->pmac_id[0];
/* Delete the old programmed MAC. This call may fail if the
* old MAC was already deleted by the PF driver.
*/
if (adapter->pmac_id[0] != old_pmac_id)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- old_pmac_id, 0);
+ be_dev_mac_del(adapter, old_pmac_id);
}
+ mutex_unlock(&adapter->rx_filter_lock);
/* Decide if the new MAC is successfully activated only after
* querying the FW
*/
- status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
+ status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
adapter->if_handle, true, 0);
if (status)
goto err;
@@ -317,6 +357,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
goto err;
}
done:
+ ether_addr_copy(adapter->dev_mac, addr->sa_data);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
@@ -1420,13 +1461,18 @@ static int be_vid_config(struct be_adapter *adapter)
u16 num = 0, i = 0;
int status = 0;
- /* No need to further configure vids if in promiscuous mode */
- if (be_in_all_promisc(adapter))
+ /* No need to change the VLAN state if the I/F is in promiscuous */
+ if (adapter->netdev->flags & IFF_PROMISC)
return 0;
if (adapter->vlans_added > be_max_vlans(adapter))
return be_set_vlan_promisc(adapter);
+ if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+ status = be_clear_vlan_promisc(adapter);
+ if (status)
+ return status;
+ }
/* Construct VLAN Table to give to HW */
for_each_set_bit(i, adapter->vids, VLAN_N_VID)
vids[num++] = cpu_to_le16(i);
@@ -1439,8 +1485,6 @@ static int be_vid_config(struct be_adapter *adapter)
addl_status(status) ==
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
return be_set_vlan_promisc(adapter);
- } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
- status = be_clear_vlan_promisc(adapter);
}
return status;
}
@@ -1450,46 +1494,45 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
+ mutex_lock(&adapter->rx_filter_lock);
+
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
- return status;
+ goto done;
if (test_bit(vid, adapter->vids))
- return status;
+ goto done;
set_bit(vid, adapter->vids);
adapter->vlans_added++;
status = be_vid_config(adapter);
- if (status) {
- adapter->vlans_added--;
- clear_bit(vid, adapter->vids);
- }
-
+done:
+ mutex_unlock(&adapter->rx_filter_lock);
return status;
}
static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
+
+ mutex_lock(&adapter->rx_filter_lock);
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
- return 0;
+ goto done;
if (!test_bit(vid, adapter->vids))
- return 0;
+ goto done;
clear_bit(vid, adapter->vids);
adapter->vlans_added--;
- return be_vid_config(adapter);
-}
-
-static void be_clear_all_promisc(struct be_adapter *adapter)
-{
- be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
- adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
+ status = be_vid_config(adapter);
+done:
+ mutex_unlock(&adapter->rx_filter_lock);
+ return status;
}
static void be_set_all_promisc(struct be_adapter *adapter)
@@ -1510,75 +1553,226 @@ static void be_set_mc_promisc(struct be_adapter *adapter)
adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
}
-static void be_set_mc_list(struct be_adapter *adapter)
+static void be_set_uc_promisc(struct be_adapter *adapter)
{
int status;
- status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
+ if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
+ return;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
if (!status)
- adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
- else
+ adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
+}
+
+static void be_clear_uc_promisc(struct be_adapter *adapter)
+{
+ int status;
+
+ if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
+ return;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
+ if (!status)
+ adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
+}
+
+/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
+ * We use a single callback function for both sync and unsync. We really don't
+ * add/remove addresses through this callback. But, we use it to detect changes
+ * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
+ */
+static int be_uc_list_update(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ adapter->update_uc_list = true;
+ return 0;
+}
+
+static int be_mc_list_update(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ adapter->update_mc_list = true;
+ return 0;
+}
+
+static void be_set_mc_list(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct netdev_hw_addr *ha;
+ bool mc_promisc = false;
+ int status;
+
+ netif_addr_lock_bh(netdev);
+ __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
+
+ if (netdev->flags & IFF_PROMISC) {
+ adapter->update_mc_list = false;
+ } else if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > be_max_mc(adapter)) {
+ /* Enable multicast promisc if num configured exceeds
+ * what we support
+ */
+ mc_promisc = true;
+ adapter->update_mc_list = false;
+ } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
+ /* Update mc-list unconditionally if the iface was previously
+ * in mc-promisc mode and now is out of that mode.
+ */
+ adapter->update_mc_list = true;
+ }
+
+ if (adapter->update_mc_list) {
+ int i = 0;
+
+ /* cache the mc-list in adapter */
+ netdev_for_each_mc_addr(ha, netdev) {
+ ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
+ i++;
+ }
+ adapter->mc_count = netdev_mc_count(netdev);
+ }
+ netif_addr_unlock_bh(netdev);
+
+ if (mc_promisc) {
be_set_mc_promisc(adapter);
+ } else if (adapter->update_mc_list) {
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
+ if (!status)
+ adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
+ else
+ be_set_mc_promisc(adapter);
+
+ adapter->update_mc_list = false;
+ }
+}
+
+static void be_clear_mc_list(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ __dev_mc_unsync(netdev, NULL);
+ be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
+ adapter->mc_count = 0;
+}
+
+static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
+{
+ if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+ adapter->dev_mac)) {
+ adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
+ return 0;
+ }
+
+ return be_cmd_pmac_add(adapter,
+ (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+ adapter->if_handle,
+ &adapter->pmac_id[uc_idx + 1], 0);
+}
+
+static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
+{
+ if (pmac_id == adapter->pmac_id[0])
+ return;
+
+ be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
}
static void be_set_uc_list(struct be_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
struct netdev_hw_addr *ha;
- int i = 1; /* First slot is claimed by the Primary MAC */
+ bool uc_promisc = false;
+ int curr_uc_macs = 0, i;
- for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
+ netif_addr_lock_bh(netdev);
+ __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
- if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
- be_set_all_promisc(adapter);
- return;
+ if (netdev->flags & IFF_PROMISC) {
+ adapter->update_uc_list = false;
+ } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
+ uc_promisc = true;
+ adapter->update_uc_list = false;
+ } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
+ /* Update uc-list unconditionally if the iface was previously
+ * in uc-promisc mode and now is out of that mode.
+ */
+ adapter->update_uc_list = true;
}
- netdev_for_each_uc_addr(ha, adapter->netdev) {
- adapter->uc_macs++; /* First slot is for Primary MAC */
- be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
- &adapter->pmac_id[adapter->uc_macs], 0);
+ if (adapter->update_uc_list) {
+ i = 1; /* First slot is claimed by the Primary MAC */
+
+ /* cache the uc-list in adapter array */
+ netdev_for_each_uc_addr(ha, netdev) {
+ ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
+ i++;
+ }
+ curr_uc_macs = netdev_uc_count(netdev);
+ }
+ netif_addr_unlock_bh(netdev);
+
+ if (uc_promisc) {
+ be_set_uc_promisc(adapter);
+ } else if (adapter->update_uc_list) {
+ be_clear_uc_promisc(adapter);
+
+ for (i = 0; i < adapter->uc_macs; i++)
+ be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
+
+ for (i = 0; i < curr_uc_macs; i++)
+ be_uc_mac_add(adapter, i);
+ adapter->uc_macs = curr_uc_macs;
+ adapter->update_uc_list = false;
}
}
static void be_clear_uc_list(struct be_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
int i;
- for (i = 1; i < (adapter->uc_macs + 1); i++)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
+ __dev_uc_unsync(netdev, NULL);
+ for (i = 0; i < adapter->uc_macs; i++)
+ be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
+
adapter->uc_macs = 0;
}
-static void be_set_rx_mode(struct net_device *netdev)
+static void __be_set_rx_mode(struct be_adapter *adapter)
{
- struct be_adapter *adapter = netdev_priv(netdev);
+ struct net_device *netdev = adapter->netdev;
+
+ mutex_lock(&adapter->rx_filter_lock);
if (netdev->flags & IFF_PROMISC) {
- be_set_all_promisc(adapter);
- return;
+ if (!be_in_all_promisc(adapter))
+ be_set_all_promisc(adapter);
+ } else if (be_in_all_promisc(adapter)) {
+ /* We need to re-program the vlan-list or clear
+ * vlan-promisc mode (if needed) when the interface
+ * comes out of promisc mode.
+ */
+ be_vid_config(adapter);
}
- /* Interface was previously in promiscuous mode; disable it */
- if (be_in_all_promisc(adapter)) {
- be_clear_all_promisc(adapter);
- if (adapter->vlans_added)
- be_vid_config(adapter);
- }
+ be_set_uc_list(adapter);
+ be_set_mc_list(adapter);
- /* Enable multicast promisc if num configured exceeds what we support */
- if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > be_max_mc(adapter)) {
- be_set_mc_promisc(adapter);
- return;
- }
+ mutex_unlock(&adapter->rx_filter_lock);
+}
- if (netdev_uc_count(netdev) != adapter->uc_macs)
- be_set_uc_list(adapter);
+static void be_work_set_rx_mode(struct work_struct *work)
+{
+ struct be_cmd_work *cmd_work =
+ container_of(work, struct be_cmd_work, work);
- be_set_mc_list(adapter);
+ __be_set_rx_mode(cmd_work->adapter);
+ kfree(cmd_work);
}
static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
@@ -1701,7 +1895,8 @@ static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
return 0;
}
-static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1713,6 +1908,9 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
status = be_set_vf_tvt(adapter, vf, vlan);
@@ -3220,9 +3418,7 @@ void be_detect_error(struct be_adapter *adapter)
*/
if (ue_lo || ue_hi) {
- dev_err(dev,
- "Unrecoverable Error detected in the adapter");
- dev_err(dev, "Please reboot server to recover");
+ dev_err(dev, "Error detected in the adapter");
if (skyhawk_chip(adapter))
be_set_error(adapter, BE_ERROR_UE);
@@ -3425,10 +3621,9 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
static void be_disable_if_filters(struct be_adapter *adapter)
{
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[0], 0);
-
+ be_dev_mac_del(adapter, adapter->pmac_id[0]);
be_clear_uc_list(adapter);
+ be_clear_mc_list(adapter);
/* The IFACE flags are enabled in the open path and cleared
* in the close path. When a VF gets detached from the host and
@@ -3462,6 +3657,11 @@ static int be_close(struct net_device *netdev)
if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
return 0;
+ /* Before attempting cleanup ensure all the pending cmds in the
+ * config_wq have finished execution
+ */
+ flush_workqueue(be_wq);
+
be_disable_if_filters(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3576,17 +3776,16 @@ static int be_enable_if_filters(struct be_adapter *adapter)
/* For BE3 VFs, the PF programs the initial MAC address */
if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
- status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
- adapter->if_handle,
- &adapter->pmac_id[0], 0);
+ status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;
+ ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
}
if (adapter->vlans_added)
be_vid_config(adapter);
- be_set_rx_mode(adapter->netdev);
+ __be_set_rx_mode(adapter);
return 0;
}
@@ -3759,8 +3958,13 @@ static void be_cancel_worker(struct be_adapter *adapter)
static void be_cancel_err_detection(struct be_adapter *adapter)
{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+
+ if (!be_err_recovery_workq)
+ return;
+
if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
- cancel_delayed_work_sync(&adapter->be_err_detection_work);
+ cancel_delayed_work_sync(&err_rec->err_detection_work);
adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
}
}
@@ -3860,6 +4064,20 @@ static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
}
+static void be_if_destroy(struct be_adapter *adapter)
+{
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
+
+ kfree(adapter->mc_list);
+ adapter->mc_list = NULL;
+
+ kfree(adapter->uc_list);
+ adapter->uc_list = NULL;
+}
+
static int be_clear(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -3867,6 +4085,8 @@ static int be_clear(struct be_adapter *adapter)
be_cancel_worker(adapter);
+ flush_workqueue(be_wq);
+
if (sriov_enabled(adapter))
be_vf_clear(adapter);
@@ -3884,10 +4104,8 @@ static int be_clear(struct be_adapter *adapter)
}
be_disable_vxlan_offloads(adapter);
- kfree(adapter->pmac_id);
- adapter->pmac_id = NULL;
- be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+ be_if_destroy(adapter);
be_clear_queues(adapter);
@@ -4151,7 +4369,7 @@ static void be_setup_init(struct be_adapter *adapter)
* for distribution between the VFs. This self-imposed limit will determine the
* no: of VFs for which RSS can be enabled.
*/
-void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
+static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
{
struct be_port_resources port_res = {0};
u8 rss_tables_on_port;
@@ -4341,14 +4559,29 @@ static int be_mac_setup(struct be_adapter *adapter)
static void be_schedule_worker(struct be_adapter *adapter)
{
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+ queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
}
+static void be_destroy_err_recovery_workq(void)
+{
+ if (!be_err_recovery_workq)
+ return;
+
+ flush_workqueue(be_err_recovery_workq);
+ destroy_workqueue(be_err_recovery_workq);
+ be_err_recovery_workq = NULL;
+}
+
static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
{
- schedule_delayed_work(&adapter->be_err_detection_work,
- msecs_to_jiffies(delay));
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+
+ if (!be_err_recovery_workq)
+ return;
+
+ queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
+ msecs_to_jiffies(delay));
adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
}
@@ -4393,6 +4626,22 @@ static int be_if_create(struct be_adapter *adapter)
u32 cap_flags = be_if_cap_flags(adapter);
int status;
+ /* alloc required memory for other filtering fields */
+ adapter->pmac_id = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->pmac_id), GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
+
+ adapter->mc_list = kcalloc(be_max_mc(adapter),
+ sizeof(*adapter->mc_list), GFP_KERNEL);
+ if (!adapter->mc_list)
+ return -ENOMEM;
+
+ adapter->uc_list = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->uc_list), GFP_KERNEL);
+ if (!adapter->uc_list)
+ return -ENOMEM;
+
if (adapter->cfg_num_rx_irqs == 1)
cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
@@ -4401,7 +4650,10 @@ static int be_if_create(struct be_adapter *adapter)
status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
&adapter->if_handle, 0);
- return status;
+ if (status)
+ return status;
+
+ return 0;
}
int be_update_queues(struct be_adapter *adapter)
@@ -4458,10 +4710,15 @@ static inline int fw_major_num(const char *fw_ver)
return fw_major;
}
-/* If any VFs are already enabled don't FLR the PF */
+/* If it is error recovery, FLR the PF
+ * Else if any VFs are already enabled don't FLR the PF
+ */
static bool be_reset_required(struct be_adapter *adapter)
{
- return pci_num_vf(adapter->pdev) ? false : true;
+ if (be_error_recovering(adapter))
+ return true;
+ else
+ return pci_num_vf(adapter->pdev) == 0;
}
/* Wait for the FW to be ready and perform the required initialization */
@@ -4473,6 +4730,9 @@ static int be_func_init(struct be_adapter *adapter)
if (status)
return status;
+ /* FW is now ready; clear errors to allow cmds/doorbell */
+ be_clear_error(adapter, BE_CLEAR_ALL);
+
if (be_reset_required(adapter)) {
status = be_cmd_reset_function(adapter);
if (status)
@@ -4480,9 +4740,6 @@ static int be_func_init(struct be_adapter *adapter)
/* Wait for interrupts to quiesce after an FLR */
msleep(100);
-
- /* We can clear all errors when function reset succeeds */
- be_clear_error(adapter, BE_CLEAR_ALL);
}
/* Tell FW we're ready to fire cmds */
@@ -4530,11 +4787,6 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- adapter->pmac_id = kcalloc(be_max_uc(adapter),
- sizeof(*adapter->pmac_id), GFP_KERNEL);
- if (!adapter->pmac_id)
- return -ENOMEM;
-
status = be_msix_enable(adapter);
if (status)
goto err;
@@ -4595,6 +4847,9 @@ static int be_setup(struct be_adapter *adapter)
if (!status && be_pause_supported(adapter))
adapter->phy.fc_autoneg = 1;
+ if (be_physfn(adapter) && !lancer_chip(adapter))
+ be_cmd_set_features(adapter);
+
be_schedule_worker(adapter);
adapter->flags |= BE_FLAGS_SETUP_DONE;
return 0;
@@ -4728,6 +4983,23 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
0, 0, nlflags, filter_mask, NULL);
}
+static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
+ void (*func)(struct work_struct *))
+{
+ struct be_cmd_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ dev_err(&adapter->pdev->dev,
+ "be_work memory allocation failed\n");
+ return NULL;
+ }
+
+ INIT_WORK(&work->work, func);
+ work->adapter = adapter;
+ return work;
+}
+
/* VxLAN offload Notes:
*
* The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
@@ -4742,23 +5014,19 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
* adds more than one port, disable offloads and don't re-enable them again
* until after all the tunnels are removed.
*/
-static void be_add_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static void be_work_add_vxlan_port(struct work_struct *work)
{
- struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_cmd_work *cmd_work =
+ container_of(work, struct be_cmd_work, work);
+ struct be_adapter *adapter = cmd_work->adapter;
+ struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->pdev->dev;
- __be16 port = ti->port;
+ __be16 port = cmd_work->info.vxlan_port;
int status;
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
-
- if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
- return;
-
if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
adapter->vxlan_port_aliases++;
- return;
+ goto done;
}
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -4770,7 +5038,7 @@ static void be_add_vxlan_port(struct net_device *netdev,
}
if (adapter->vxlan_port_count++ >= 1)
- return;
+ goto done;
status = be_cmd_manage_iface(adapter, adapter->if_handle,
OP_CONVERT_NORMAL_TO_TUNNEL);
@@ -4795,29 +5063,26 @@ static void be_add_vxlan_port(struct net_device *netdev,
dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
be16_to_cpu(port));
- return;
+ goto done;
err:
be_disable_vxlan_offloads(adapter);
+done:
+ kfree(cmd_work);
}
-static void be_del_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static void be_work_del_vxlan_port(struct work_struct *work)
{
- struct be_adapter *adapter = netdev_priv(netdev);
- __be16 port = ti->port;
-
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
-
- if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
- return;
+ struct be_cmd_work *cmd_work =
+ container_of(work, struct be_cmd_work, work);
+ struct be_adapter *adapter = cmd_work->adapter;
+ __be16 port = cmd_work->info.vxlan_port;
if (adapter->vxlan_port != port)
goto done;
if (adapter->vxlan_port_aliases) {
adapter->vxlan_port_aliases--;
- return;
+ goto out;
}
be_disable_vxlan_offloads(adapter);
@@ -4827,6 +5092,40 @@ static void be_del_vxlan_port(struct net_device *netdev,
be16_to_cpu(port));
done:
adapter->vxlan_port_count--;
+out:
+ kfree(cmd_work);
+}
+
+static void be_cfg_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti,
+ void (*func)(struct work_struct *))
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_cmd_work *cmd_work;
+
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
+ return;
+
+ cmd_work = be_alloc_work(adapter, func);
+ if (cmd_work) {
+ cmd_work->info.vxlan_port = ti->port;
+ queue_work(be_wq, &cmd_work->work);
+ }
+}
+
+static void be_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
+}
+
+static void be_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
}
static netdev_features_t be_features_check(struct sk_buff *skb,
@@ -4891,6 +5190,16 @@ static int be_get_phys_port_id(struct net_device *dev,
return 0;
}
+static void be_set_rx_mode(struct net_device *dev)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ struct be_cmd_work *work;
+
+ work = be_alloc_work(adapter, be_work_set_rx_mode);
+ if (work)
+ queue_work(be_wq, &work->work);
+}
+
static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
@@ -4984,13 +5293,145 @@ static int be_resume(struct be_adapter *adapter)
return 0;
}
+static void be_soft_reset(struct be_adapter *adapter)
+{
+ u32 val;
+
+ dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
+ val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
+ val |= SLIPORT_SOFTRESET_SR_MASK;
+ iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
+}
+
+static bool be_err_is_recoverable(struct be_adapter *adapter)
+{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+ unsigned long initial_idle_time =
+ msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
+ unsigned long recovery_interval =
+ msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
+ u16 ue_err_code;
+ u32 val;
+
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
+ return false;
+ ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
+ if (ue_err_code == 0)
+ return false;
+
+ dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
+ ue_err_code);
+
+ if (jiffies - err_rec->probe_time <= initial_idle_time) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover within %lu sec from driver load\n",
+ jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
+ return false;
+ }
+
+ if (err_rec->last_recovery_time &&
+ (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover within %lu sec from last recovery\n",
+ jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
+ return false;
+ }
+
+ if (ue_err_code == err_rec->last_err_code) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover from a consecutive TPE error\n");
+ return false;
+ }
+
+ err_rec->last_recovery_time = jiffies;
+ err_rec->last_err_code = ue_err_code;
+ return true;
+}
+
+static int be_tpe_recover(struct be_adapter *adapter)
+{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+ int status = -EAGAIN;
+ u32 val;
+
+ switch (err_rec->recovery_state) {
+ case ERR_RECOVERY_ST_NONE:
+ err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
+ err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+
+ case ERR_RECOVERY_ST_DETECT:
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_RECOVERABLE_ERR) !=
+ POST_STAGE_RECOVERABLE_ERR) {
+ dev_err(&adapter->pdev->dev,
+ "Unrecoverable HW error detected: 0x%x\n", val);
+ status = -EINVAL;
+ err_rec->resched_delay = 0;
+ break;
+ }
+
+ dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
+
+ /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
+ * milliseconds before it checks for final error status in
+ * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
+ * If it does, then PF0 initiates a Soft Reset.
+ */
+ if (adapter->pf_num == 0) {
+ err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
+ err_rec->resched_delay = err_rec->ue_to_reset_time -
+ ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+ }
+
+ err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
+ err_rec->resched_delay = err_rec->ue_to_poll_time -
+ ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+
+ case ERR_RECOVERY_ST_RESET:
+ if (!be_err_is_recoverable(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to meet recovery criteria\n");
+ status = -EIO;
+ err_rec->resched_delay = 0;
+ break;
+ }
+ be_soft_reset(adapter);
+ err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
+ err_rec->resched_delay = err_rec->ue_to_poll_time -
+ err_rec->ue_to_reset_time;
+ break;
+
+ case ERR_RECOVERY_ST_PRE_POLL:
+ err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
+ err_rec->resched_delay = 0;
+ status = 0; /* done */
+ break;
+
+ default:
+ status = -EINVAL;
+ err_rec->resched_delay = 0;
+ break;
+ }
+
+ return status;
+}
+
static int be_err_recover(struct be_adapter *adapter)
{
int status;
- /* Error recovery is supported only Lancer as of now */
- if (!lancer_chip(adapter))
- return -EIO;
+ if (!lancer_chip(adapter)) {
+ if (!adapter->error_recovery.recovery_supported ||
+ adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
+ return -EIO;
+ status = be_tpe_recover(adapter);
+ if (status)
+ goto err;
+ }
/* Wait for adapter to reach quiescent state before
* destroying queues
@@ -4999,59 +5440,74 @@ static int be_err_recover(struct be_adapter *adapter)
if (status)
goto err;
+ adapter->flags |= BE_FLAGS_TRY_RECOVERY;
+
be_cleanup(adapter);
status = be_resume(adapter);
if (status)
goto err;
- return 0;
+ adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
+
err:
return status;
}
static void be_err_detection_task(struct work_struct *work)
{
+ struct be_error_recovery *err_rec =
+ container_of(work, struct be_error_recovery,
+ err_detection_work.work);
struct be_adapter *adapter =
- container_of(work, struct be_adapter,
- be_err_detection_work.work);
+ container_of(err_rec, struct be_adapter,
+ error_recovery);
+ u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
struct device *dev = &adapter->pdev->dev;
int recovery_status;
- int delay = ERR_DETECTION_DELAY;
be_detect_error(adapter);
-
- if (be_check_error(adapter, BE_ERROR_HW))
- recovery_status = be_err_recover(adapter);
- else
+ if (!be_check_error(adapter, BE_ERROR_HW))
goto reschedule_task;
+ recovery_status = be_err_recover(adapter);
if (!recovery_status) {
- adapter->recovery_retries = 0;
+ err_rec->recovery_retries = 0;
+ err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
dev_info(dev, "Adapter recovery successful\n");
goto reschedule_task;
- } else if (be_virtfn(adapter)) {
+ } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
+ /* BEx/SH recovery state machine */
+ if (adapter->pf_num == 0 &&
+ err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
+ dev_err(&adapter->pdev->dev,
+ "Adapter recovery in progress\n");
+ resched_delay = err_rec->resched_delay;
+ goto reschedule_task;
+ } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
/* For VFs, check if PF have allocated resources
* every second.
*/
dev_err(dev, "Re-trying adapter recovery\n");
goto reschedule_task;
- } else if (adapter->recovery_retries++ <
- MAX_ERR_RECOVERY_RETRY_COUNT) {
+ } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
+ ERR_RECOVERY_MAX_RETRY_COUNT) {
/* In case of another error during recovery, it takes 30 sec
* for adapter to come out of error. Retry error recovery after
* this time interval.
*/
dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
- delay = ERR_RECOVERY_RETRY_DELAY;
+ resched_delay = ERR_RECOVERY_RETRY_DELAY;
goto reschedule_task;
} else {
dev_err(dev, "Adapter recovery failed\n");
+ dev_err(dev, "Please reboot server to recover\n");
}
return;
+
reschedule_task:
- be_schedule_err_detection(adapter, delay);
+ be_schedule_err_detection(adapter, resched_delay);
}
static void be_log_sfp_info(struct be_adapter *adapter)
@@ -5116,7 +5572,7 @@ static void be_worker(struct work_struct *work)
reschedule:
adapter->work_counter++;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+ queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -5256,14 +5712,18 @@ static int be_drv_init(struct be_adapter *adapter)
}
mutex_init(&adapter->mbox_lock);
- spin_lock_init(&adapter->mcc_lock);
+ mutex_init(&adapter->mcc_lock);
+ mutex_init(&adapter->rx_filter_lock);
spin_lock_init(&adapter->mcc_cq_lock);
init_completion(&adapter->et_cmd_compl);
pci_save_state(adapter->pdev);
INIT_DELAYED_WORK(&adapter->work, be_worker);
- INIT_DELAYED_WORK(&adapter->be_err_detection_work,
+
+ adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
+ adapter->error_recovery.resched_delay = 0;
+ INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
be_err_detection_task);
adapter->rx_fc = true;
@@ -5298,6 +5758,9 @@ static void be_remove(struct pci_dev *pdev)
be_clear(adapter);
+ if (!pci_vfs_assigned(adapter->pdev))
+ be_cmd_reset_function(adapter);
+
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
@@ -5454,6 +5917,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
be_roce_dev_add(adapter);
be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
+ adapter->error_recovery.probe_time = jiffies;
/* On Die temperature not supported for VF. */
if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
@@ -5699,6 +6163,8 @@ static struct pci_driver be_driver = {
static int __init be_init_module(void)
{
+ int status;
+
if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
rx_frag_size != 2048) {
printk(KERN_WARNING DRV_NAME
@@ -5712,12 +6178,33 @@ static int __init be_init_module(void)
pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
}
- return pci_register_driver(&be_driver);
+ be_wq = create_singlethread_workqueue("be_wq");
+ if (!be_wq) {
+ pr_warn(DRV_NAME "workqueue creation failed\n");
+ return -1;
+ }
+
+ be_err_recovery_workq =
+ create_singlethread_workqueue("be_err_recover");
+ if (!be_err_recovery_workq)
+ pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
+
+ status = pci_register_driver(&be_driver);
+ if (status) {
+ destroy_workqueue(be_wq);
+ be_destroy_err_recovery_workq();
+ }
+ return status;
}
module_init(be_init_module);
static void __exit be_exit_module(void)
{
pci_unregister_driver(&be_driver);
+
+ be_destroy_err_recovery_workq();
+
+ if (be_wq)
+ destroy_workqueue(be_wq);
}
module_exit(be_exit_module);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 36361f8bf894..262587240c86 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -60,6 +60,8 @@ struct ftgmac100 {
struct ftgmac100_descs *descs;
dma_addr_t descs_dma_addr;
+ struct page *rx_pages[RX_QUEUE_ENTRIES];
+
unsigned int rx_pointer;
unsigned int tx_clean_pointer;
unsigned int tx_pointer;
@@ -77,6 +79,9 @@ struct ftgmac100 {
int int_mask_all;
bool use_ncsi;
bool enabled;
+
+ u32 rxdes0_edorr_mask;
+ u32 txdes0_edotr_mask;
};
static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
@@ -257,10 +262,11 @@ static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
}
-static void ftgmac100_rxdes_set_dma_own(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rxdes_set_dma_own(const struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
/* clear status bits */
- rxdes->rxdes0 &= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+ rxdes->rxdes0 &= cpu_to_le32(priv->rxdes0_edorr_mask);
}
static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
@@ -298,9 +304,10 @@ static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
}
-static void ftgmac100_rxdes_set_end_of_ring(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rxdes_set_end_of_ring(const struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
- rxdes->rxdes0 |= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+ rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
}
static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
@@ -341,18 +348,27 @@ static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR);
}
+static inline struct page **ftgmac100_rxdes_page_slot(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
+{
+ return &priv->rx_pages[rxdes - priv->descs->rxdes];
+}
+
/*
* rxdes2 is not used by hardware. We use it to keep track of page.
* Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
*/
-static void ftgmac100_rxdes_set_page(struct ftgmac100_rxdes *rxdes, struct page *page)
+static void ftgmac100_rxdes_set_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes,
+ struct page *page)
{
- rxdes->rxdes2 = (unsigned int)page;
+ *ftgmac100_rxdes_page_slot(priv, rxdes) = page;
}
-static struct page *ftgmac100_rxdes_get_page(struct ftgmac100_rxdes *rxdes)
+static struct page *ftgmac100_rxdes_get_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
- return (struct page *)rxdes->rxdes2;
+ return *ftgmac100_rxdes_page_slot(priv, rxdes);
}
/******************************************************************************
@@ -382,7 +398,7 @@ ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv)
if (ftgmac100_rxdes_first_segment(rxdes))
return rxdes;
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
ftgmac100_rx_pointer_advance(priv);
rxdes = ftgmac100_current_rxdes(priv);
}
@@ -453,7 +469,7 @@ static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv)
if (ftgmac100_rxdes_last_segment(rxdes))
done = true;
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
ftgmac100_rx_pointer_advance(priv);
rxdes = ftgmac100_current_rxdes(priv);
} while (!done && ftgmac100_rxdes_packet_ready(rxdes));
@@ -501,7 +517,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
do {
dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
- struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
unsigned int size;
dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
@@ -545,10 +561,11 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
/******************************************************************************
* internal functions (transmit descriptor)
*****************************************************************************/
-static void ftgmac100_txdes_reset(struct ftgmac100_txdes *txdes)
+static void ftgmac100_txdes_reset(const struct ftgmac100 *priv,
+ struct ftgmac100_txdes *txdes)
{
/* clear all except end of ring bit */
- txdes->txdes0 &= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+ txdes->txdes0 &= cpu_to_le32(priv->txdes0_edotr_mask);
txdes->txdes1 = 0;
txdes->txdes2 = 0;
txdes->txdes3 = 0;
@@ -569,9 +586,10 @@ static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes)
txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
}
-static void ftgmac100_txdes_set_end_of_ring(struct ftgmac100_txdes *txdes)
+static void ftgmac100_txdes_set_end_of_ring(const struct ftgmac100 *priv,
+ struct ftgmac100_txdes *txdes)
{
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+ txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
}
static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes)
@@ -690,7 +708,7 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
dev_kfree_skb(skb);
- ftgmac100_txdes_reset(txdes);
+ ftgmac100_txdes_reset(priv, txdes);
ftgmac100_tx_clean_pointer_advance(priv);
@@ -779,9 +797,9 @@ static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
return -ENOMEM;
}
- ftgmac100_rxdes_set_page(rxdes, page);
+ ftgmac100_rxdes_set_page(priv, rxdes, page);
ftgmac100_rxdes_set_dma_addr(rxdes, map);
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
return 0;
}
@@ -791,7 +809,7 @@ static void ftgmac100_free_buffers(struct ftgmac100 *priv)
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
- struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
if (!page)
@@ -828,7 +846,8 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
return -ENOMEM;
/* initialize RX ring */
- ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
+ ftgmac100_rxdes_set_end_of_ring(priv,
+ &priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
@@ -838,7 +857,8 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
}
/* initialize TX ring */
- ftgmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
+ ftgmac100_txdes_set_end_of_ring(priv,
+ &priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
return 0;
err:
@@ -1055,14 +1075,12 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
}
if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF |
- FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR |
- FTGMAC100_INT_PHYSTS_CHG)) {
+ FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR)) {
if (net_ratelimit())
- netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
+ netdev_info(netdev, "[ISR] = 0x%x: %s%s%s\n", status,
status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
- status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
- status & FTGMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
+ status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "");
if (status & FTGMAC100_INT_NO_RXBUF) {
/* RX buffer unavailable */
@@ -1092,6 +1110,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
static int ftgmac100_open(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
+ unsigned int status;
int err;
err = ftgmac100_alloc_buffers(priv);
@@ -1117,6 +1136,11 @@ static int ftgmac100_open(struct net_device *netdev)
ftgmac100_init_hw(priv);
ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10);
+
+ /* Clear stale interrupts */
+ status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+ iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+
if (netdev->phydev)
phy_start(netdev->phydev);
else if (priv->use_ncsi)
@@ -1166,6 +1190,8 @@ static int ftgmac100_stop(struct net_device *netdev)
napi_disable(&priv->napi);
if (netdev->phydev)
phy_stop(netdev->phydev);
+ else if (priv->use_ncsi)
+ ncsi_stop_dev(priv->ndev);
ftgmac100_stop_hw(priv);
free_irq(priv->irq, netdev);
@@ -1226,12 +1252,21 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
int i, err = 0;
+ u32 reg;
/* initialize mdio bus */
priv->mii_bus = mdiobus_alloc();
if (!priv->mii_bus)
return -EIO;
+ if (of_machine_is_compatible("aspeed,ast2400") ||
+ of_machine_is_compatible("aspeed,ast2500")) {
+ /* This driver supports the old MDIO interface */
+ reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
+ reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
+ iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
+ };
+
priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
pdev->name, pdev->id);
@@ -1355,9 +1390,18 @@ static int ftgmac100_probe(struct platform_device *pdev)
FTGMAC100_INT_XPKT_ETH |
FTGMAC100_INT_XPKT_LOST |
FTGMAC100_INT_AHB_ERR |
- FTGMAC100_INT_PHYSTS_CHG |
FTGMAC100_INT_RPKT_BUF |
FTGMAC100_INT_NO_RXBUF);
+
+ if (of_machine_is_compatible("aspeed,ast2400") ||
+ of_machine_is_compatible("aspeed,ast2500")) {
+ priv->rxdes0_edorr_mask = BIT(30);
+ priv->txdes0_edotr_mask = BIT(30);
+ } else {
+ priv->rxdes0_edorr_mask = BIT(15);
+ priv->txdes0_edotr_mask = BIT(15);
+ }
+
if (pdev->dev.of_node &&
of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) {
if (!IS_ENABLED(CONFIG_NET_NCSI)) {
@@ -1367,7 +1411,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Using NCSI interface\n");
priv->use_ncsi = true;
- priv->int_mask_all &= ~FTGMAC100_INT_PHYSTS_CHG;
priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
if (!priv->ndev)
goto err_ncsi_dev;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index 13408d448b05..a7ce0ac8858a 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -134,6 +134,11 @@
#define FTGMAC100_DMAFIFOS_TXDMA_REQ (1 << 31)
/*
+ * Feature Register
+ */
+#define FTGMAC100_REVR_NEW_MDIO_INTERFACE BIT(31)
+
+/*
* Receive buffer size register
*/
#define FTGMAC100_RBSR_SIZE(x) ((x) & 0x3fff)
@@ -152,6 +157,7 @@
#define FTGMAC100_MACCR_FULLDUP (1 << 8)
#define FTGMAC100_MACCR_GIGA_MODE (1 << 9)
#define FTGMAC100_MACCR_CRC_APD (1 << 10)
+#define FTGMAC100_MACCR_PHY_LINK_LEVEL (1 << 11)
#define FTGMAC100_MACCR_RX_RUNT (1 << 12)
#define FTGMAC100_MACCR_JUMBO_LF (1 << 13)
#define FTGMAC100_MACCR_RX_ALL (1 << 14)
@@ -189,7 +195,6 @@ struct ftgmac100_txdes {
} __attribute__ ((aligned(16)));
#define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff)
-#define FTGMAC100_TXDES0_EDOTR (1 << 15)
#define FTGMAC100_TXDES0_CRC_ERR (1 << 19)
#define FTGMAC100_TXDES0_LTS (1 << 28)
#define FTGMAC100_TXDES0_FTS (1 << 29)
@@ -215,7 +220,6 @@ struct ftgmac100_rxdes {
} __attribute__ ((aligned(16)));
#define FTGMAC100_RXDES0_VDBC 0x3fff
-#define FTGMAC100_RXDES0_EDORR (1 << 15)
#define FTGMAC100_RXDES0_MULTICAST (1 << 16)
#define FTGMAC100_RXDES0_BROADCAST (1 << 17)
#define FTGMAC100_RXDES0_RX_ERR (1 << 18)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 692ee248e486..48a033e64423 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -913,13 +913,11 @@ fec_restart(struct net_device *ndev)
* enet-mac reset will reset mac address registers too,
* so need to reconfigure it.
*/
- if (fep->quirks & FEC_QUIRK_ENET_MAC) {
- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
- writel((__force u32)cpu_to_be32(temp_mac[0]),
- fep->hwp + FEC_ADDR_LOW);
- writel((__force u32)cpu_to_be32(temp_mac[1]),
- fep->hwp + FEC_ADDR_HIGH);
- }
+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+ writel((__force u32)cpu_to_be32(temp_mac[0]),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((__force u32)cpu_to_be32(temp_mac[1]),
+ fep->hwp + FEC_ADDR_HIGH);
/* Clear any outstanding interrupt. */
writel(0xffffffff, fep->hwp + FEC_IEVENT);
@@ -2896,7 +2894,7 @@ fec_enet_close(struct net_device *ndev)
* this kind of feature?).
*/
-#define HASH_BITS 6 /* #bits in hash */
+#define FEC_HASH_BITS 6 /* #bits in hash */
#define CRC32_POLY 0xEDB88320
static void set_multicast_list(struct net_device *ndev)
@@ -2944,10 +2942,10 @@ static void set_multicast_list(struct net_device *ndev)
}
}
- /* only upper 6 bits (HASH_BITS) are used
+ /* only upper 6 bits (FEC_HASH_BITS) are used
* which point to specific bit in he hash registers
*/
- hash = (crc >> (32 - HASH_BITS)) & 0x3f;
+ hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
if (hash > 31) {
tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index 8ddeedbcef9c..ddf0260176c9 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -192,7 +192,7 @@ struct fman_mac_params {
/* A handle to the FM object this port related to */
void *fm;
/* MDIO exceptions interrupt source - not valid for all
- * MACs; MUST be set to 'NO_IRQ' for MACs that don't have
+ * MACs; MUST be set to 0 for MACs that don't have
* mdio-irq, or for polling
*/
void *dev_id; /* device cookie used by the exception cbs */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 61fd486c50bb..dc120c148d97 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -60,6 +60,9 @@ module_param(fs_enet_debug, int, 0);
MODULE_PARM_DESC(fs_enet_debug,
"Freescale bitmapped debugging message enable value");
+#define RX_RING_SIZE 32
+#define TX_RING_SIZE 64
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void fs_enet_netpoll(struct net_device *dev);
#endif
@@ -79,8 +82,8 @@ static void skb_align(struct sk_buff *skb, int align)
skb_reserve(skb, align - off);
}
-/* NAPI receive function */
-static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
+/* NAPI function */
+static int fs_enet_napi(struct napi_struct *napi, int budget)
{
struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
struct net_device *dev = fep->ndev;
@@ -90,9 +93,102 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
int received = 0;
u16 pkt_len, sc;
int curidx;
+ int dirtyidx, do_wake, do_restart;
+ int tx_left = TX_RING_SIZE;
- if (budget <= 0)
- return received;
+ spin_lock(&fep->tx_lock);
+ bdp = fep->dirty_tx;
+
+ /* clear status bits for napi*/
+ (*fep->ops->napi_clear_event)(dev);
+
+ do_wake = do_restart = 0;
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) {
+ dirtyidx = bdp - fep->tx_bd_base;
+
+ if (fep->tx_free == fep->tx_ring)
+ break;
+
+ skb = fep->tx_skbuff[dirtyidx];
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
+
+ if (sc & BD_ENET_TX_HB) /* No heartbeat */
+ fep->stats.tx_heartbeat_errors++;
+ if (sc & BD_ENET_TX_LC) /* Late collision */
+ fep->stats.tx_window_errors++;
+ if (sc & BD_ENET_TX_RL) /* Retrans limit */
+ fep->stats.tx_aborted_errors++;
+ if (sc & BD_ENET_TX_UN) /* Underrun */
+ fep->stats.tx_fifo_errors++;
+ if (sc & BD_ENET_TX_CSL) /* Carrier lost */
+ fep->stats.tx_carrier_errors++;
+
+ if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
+ fep->stats.tx_errors++;
+ do_restart = 1;
+ }
+ } else
+ fep->stats.tx_packets++;
+
+ if (sc & BD_ENET_TX_READY) {
+ dev_warn(fep->dev,
+ "HEY! Enet xmit interrupt and TX_READY.\n");
+ }
+
+ /*
+ * Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (sc & BD_ENET_TX_DEF)
+ fep->stats.collisions++;
+
+ /* unmap */
+ if (fep->mapped_as_page[dirtyidx])
+ dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+ else
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+
+ /*
+ * Free the sk buffer associated with this last transmit.
+ */
+ if (skb) {
+ dev_kfree_skb(skb);
+ fep->tx_skbuff[dirtyidx] = NULL;
+ }
+
+ /*
+ * Update pointer to next buffer descriptor to be transmitted.
+ */
+ if ((sc & BD_ENET_TX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->tx_bd_base;
+
+ /*
+ * Since we have freed up a buffer, the ring is no longer
+ * full.
+ */
+ if (++fep->tx_free == MAX_SKB_FRAGS)
+ do_wake = 1;
+ tx_left--;
+ }
+
+ fep->dirty_tx = bdp;
+
+ if (do_restart)
+ (*fep->ops->tx_restart)(dev);
+
+ spin_unlock(&fep->tx_lock);
+
+ if (do_wake)
+ netif_wake_queue(dev);
/*
* First, grab all of the stats for the incoming packet.
@@ -100,10 +196,8 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
*/
bdp = fep->cur_rx;
- /* clear RX status bits for napi*/
- (*fep->ops->napi_clear_rx_event)(dev);
-
- while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0 &&
+ received < budget) {
curidx = bdp - fep->rx_bd_base;
/*
@@ -132,21 +226,10 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
if (sc & BD_ENET_RX_OV)
fep->stats.rx_crc_errors++;
- skb = fep->rx_skbuff[curidx];
-
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
- skbn = skb;
-
+ skbn = fep->rx_skbuff[curidx];
} else {
skb = fep->rx_skbuff[curidx];
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
/*
* Process the incoming frame.
*/
@@ -162,12 +245,30 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
skb_copy_from_linear_data(skb,
skbn->data, pkt_len);
swap(skb, skbn);
+ dma_sync_single_for_cpu(fep->dev,
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(pkt_len),
+ DMA_FROM_DEVICE);
}
} else {
skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
- if (skbn)
+ if (skbn) {
+ dma_addr_t dma;
+
skb_align(skbn, ENET_RX_ALIGN);
+
+ dma_unmap_single(fep->dev,
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ dma = dma_map_single(fep->dev,
+ skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+ CBDW_BUFADDR(bdp, dma);
+ }
}
if (skbn != NULL) {
@@ -182,9 +283,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
}
fep->rx_skbuff[curidx] = skbn;
- CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE));
CBDW_DATLEN(bdp, 0);
CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
@@ -197,134 +295,19 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
bdp = fep->rx_bd_base;
(*fep->ops->rx_bd_done)(dev);
-
- if (received >= budget)
- break;
}
fep->cur_rx = bdp;
- if (received < budget) {
+ if (received < budget && tx_left) {
/* done */
napi_complete(napi);
- (*fep->ops->napi_enable_rx)(dev);
- }
- return received;
-}
-
-static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
-{
- struct fs_enet_private *fep = container_of(napi, struct fs_enet_private,
- napi_tx);
- struct net_device *dev = fep->ndev;
- cbd_t __iomem *bdp;
- struct sk_buff *skb;
- int dirtyidx, do_wake, do_restart;
- u16 sc;
- int has_tx_work = 0;
-
- spin_lock(&fep->tx_lock);
- bdp = fep->dirty_tx;
-
- /* clear TX status bits for napi*/
- (*fep->ops->napi_clear_tx_event)(dev);
-
- do_wake = do_restart = 0;
- while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
- dirtyidx = bdp - fep->tx_bd_base;
-
- if (fep->tx_free == fep->tx_ring)
- break;
-
- skb = fep->tx_skbuff[dirtyidx];
-
- /*
- * Check for errors.
- */
- if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
- BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
-
- if (sc & BD_ENET_TX_HB) /* No heartbeat */
- fep->stats.tx_heartbeat_errors++;
- if (sc & BD_ENET_TX_LC) /* Late collision */
- fep->stats.tx_window_errors++;
- if (sc & BD_ENET_TX_RL) /* Retrans limit */
- fep->stats.tx_aborted_errors++;
- if (sc & BD_ENET_TX_UN) /* Underrun */
- fep->stats.tx_fifo_errors++;
- if (sc & BD_ENET_TX_CSL) /* Carrier lost */
- fep->stats.tx_carrier_errors++;
-
- if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
- fep->stats.tx_errors++;
- do_restart = 1;
- }
- } else
- fep->stats.tx_packets++;
-
- if (sc & BD_ENET_TX_READY) {
- dev_warn(fep->dev,
- "HEY! Enet xmit interrupt and TX_READY.\n");
- }
-
- /*
- * Deferred means some collisions occurred during transmit,
- * but we eventually sent the packet OK.
- */
- if (sc & BD_ENET_TX_DEF)
- fep->stats.collisions++;
-
- /* unmap */
- if (fep->mapped_as_page[dirtyidx])
- dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
- CBDR_DATLEN(bdp), DMA_TO_DEVICE);
- else
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- CBDR_DATLEN(bdp), DMA_TO_DEVICE);
-
- /*
- * Free the sk buffer associated with this last transmit.
- */
- if (skb) {
- dev_kfree_skb(skb);
- fep->tx_skbuff[dirtyidx] = NULL;
- }
-
- /*
- * Update pointer to next buffer descriptor to be transmitted.
- */
- if ((sc & BD_ENET_TX_WRAP) == 0)
- bdp++;
- else
- bdp = fep->tx_bd_base;
-
- /*
- * Since we have freed up a buffer, the ring is no longer
- * full.
- */
- if (++fep->tx_free >= MAX_SKB_FRAGS)
- do_wake = 1;
- has_tx_work = 1;
- }
-
- fep->dirty_tx = bdp;
-
- if (do_restart)
- (*fep->ops->tx_restart)(dev);
+ (*fep->ops->napi_enable)(dev);
- if (!has_tx_work) {
- napi_complete(napi);
- (*fep->ops->napi_enable_tx)(dev);
+ return received;
}
- spin_unlock(&fep->tx_lock);
-
- if (do_wake)
- netif_wake_queue(dev);
-
- if (has_tx_work)
- return budget;
- return 0;
+ return budget;
}
/*
@@ -350,18 +333,18 @@ fs_enet_interrupt(int irq, void *dev_id)
nr++;
int_clr_events = int_events;
- int_clr_events &= ~fep->ev_napi_rx;
+ int_clr_events &= ~fep->ev_napi;
(*fep->ops->clear_int_events)(dev, int_clr_events);
if (int_events & fep->ev_err)
(*fep->ops->ev_error)(dev, int_events);
- if (int_events & fep->ev_rx) {
+ if (int_events & fep->ev) {
napi_ok = napi_schedule_prep(&fep->napi);
- (*fep->ops->napi_disable_rx)(dev);
- (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
+ (*fep->ops->napi_disable)(dev);
+ (*fep->ops->clear_int_events)(dev, fep->ev_napi);
/* NOTE: it is possible for FCCs in NAPI mode */
/* to submit a spurious interrupt while in poll */
@@ -369,17 +352,6 @@ fs_enet_interrupt(int irq, void *dev_id)
__napi_schedule(&fep->napi);
}
- if (int_events & fep->ev_tx) {
- napi_ok = napi_schedule_prep(&fep->napi_tx);
-
- (*fep->ops->napi_disable_tx)(dev);
- (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);
-
- /* NOTE: it is possible for FCCs in NAPI mode */
- /* to submit a spurious interrupt while in poll */
- if (napi_ok)
- __napi_schedule(&fep->napi_tx);
- }
}
handled = nr > 0;
@@ -659,7 +631,8 @@ static void fs_timeout(struct net_device *dev)
}
phy_start(dev->phydev);
- wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
+ wake = fep->tx_free >= MAX_SKB_FRAGS &&
+ !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
spin_unlock_irqrestore(&fep->lock, flags);
if (wake)
@@ -751,11 +724,10 @@ static int fs_enet_open(struct net_device *dev)
int err;
/* to initialize the fep->cur_rx,... */
- /* not doing this, will cause a crash in fs_enet_rx_napi */
+ /* not doing this, will cause a crash in fs_enet_napi */
fs_init_bds(fep->ndev);
napi_enable(&fep->napi);
- napi_enable(&fep->napi_tx);
/* Install our interrupt handler. */
r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
@@ -763,7 +735,6 @@ static int fs_enet_open(struct net_device *dev)
if (r != 0) {
dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
napi_disable(&fep->napi);
- napi_disable(&fep->napi_tx);
return -EINVAL;
}
@@ -771,7 +742,6 @@ static int fs_enet_open(struct net_device *dev)
if (err) {
free_irq(fep->interrupt, dev);
napi_disable(&fep->napi);
- napi_disable(&fep->napi_tx);
return err;
}
phy_start(dev->phydev);
@@ -789,7 +759,6 @@ static int fs_enet_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
napi_disable(&fep->napi);
- napi_disable(&fep->napi_tx);
phy_stop(dev->phydev);
spin_lock_irqsave(&fep->lock, flags);
@@ -861,6 +830,44 @@ static void fs_set_msglevel(struct net_device *dev, u32 value)
fep->msg_enable = value;
}
+static int fs_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_platform_info *fpi = fep->fpi;
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = fpi->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int fs_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, const void *data)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_platform_info *fpi = fep->fpi;
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ fpi->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops fs_ethtool_ops = {
.get_drvinfo = fs_get_drvinfo,
.get_regs_len = fs_get_regs_len,
@@ -872,6 +879,8 @@ static const struct ethtool_ops fs_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_tunable = fs_get_tunable,
+ .set_tunable = fs_set_tunable,
};
static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -939,8 +948,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->cp_command = *data;
}
- fpi->rx_ring = 32;
- fpi->tx_ring = 64;
+ fpi->rx_ring = RX_RING_SIZE;
+ fpi->tx_ring = TX_RING_SIZE;
fpi->rx_copybreak = 240;
fpi->napi_weight = 17;
fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
@@ -1024,8 +1033,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ;
- netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
- netif_tx_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2);
+ netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index e29f54a35210..fee24c822fad 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -81,12 +81,9 @@ struct fs_ops {
void (*adjust_link)(struct net_device *dev);
void (*restart)(struct net_device *dev);
void (*stop)(struct net_device *dev);
- void (*napi_clear_rx_event)(struct net_device *dev);
- void (*napi_enable_rx)(struct net_device *dev);
- void (*napi_disable_rx)(struct net_device *dev);
- void (*napi_clear_tx_event)(struct net_device *dev);
- void (*napi_enable_tx)(struct net_device *dev);
- void (*napi_disable_tx)(struct net_device *dev);
+ void (*napi_clear_event)(struct net_device *dev);
+ void (*napi_enable)(struct net_device *dev);
+ void (*napi_disable)(struct net_device *dev);
void (*rx_bd_done)(struct net_device *dev);
void (*tx_kickstart)(struct net_device *dev);
u32 (*get_int_events)(struct net_device *dev);
@@ -122,7 +119,6 @@ struct phy_info {
struct fs_enet_private {
struct napi_struct napi;
- struct napi_struct napi_tx;
struct device *dev; /* pointer back to the device (must be initialized first) */
struct net_device *ndev;
spinlock_t lock; /* during all ops except TX pckt processing */
@@ -152,10 +148,8 @@ struct fs_enet_private {
int oldduplex, oldspeed, oldlink; /* current settings */
/* event masks */
- u32 ev_napi_rx; /* mask of NAPI rx events */
- u32 ev_napi_tx; /* mask of NAPI rx events */
- u32 ev_rx; /* rx event mask */
- u32 ev_tx; /* tx event mask */
+ u32 ev_napi; /* mask of NAPI events */
+ u32 ev; /* event mask */
u32 ev_err; /* error event mask */
u16 bd_rx_empty; /* mask of BD rx empty */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index d71761a34022..120c758f5d01 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -90,7 +90,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
int ret = -EINVAL;
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (fep->interrupt == NO_IRQ)
+ if (!fep->interrupt)
goto out;
fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
@@ -124,10 +124,8 @@ out:
return ret;
}
-#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
-#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXB)
-#define FCC_RX_EVENT (FCC_ENET_RXF)
-#define FCC_TX_EVENT (FCC_ENET_TXB)
+#define FCC_NAPI_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB | FCC_ENET_TXB)
+#define FCC_EVENT (FCC_ENET_RXF | FCC_ENET_TXB)
#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
static int setup_data(struct net_device *dev)
@@ -137,10 +135,8 @@ static int setup_data(struct net_device *dev)
if (do_pd_setup(fep) != 0)
return -EINVAL;
- fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
- fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK;
- fep->ev_rx = FCC_RX_EVENT;
- fep->ev_tx = FCC_TX_EVENT;
+ fep->ev_napi = FCC_NAPI_EVENT_MSK;
+ fep->ev = FCC_EVENT;
fep->ev_err = FCC_ERR_EVENT_MSK;
return 0;
@@ -424,52 +420,28 @@ static void stop(struct net_device *dev)
fs_cleanup_bds(dev);
}
-static void napi_clear_rx_event(struct net_device *dev)
+static void napi_clear_event_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
- W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
+ W16(fccp, fcc_fcce, FCC_NAPI_EVENT_MSK);
}
-static void napi_enable_rx(struct net_device *dev)
+static void napi_enable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
- S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
+ S16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
}
-static void napi_disable_rx(struct net_device *dev)
+static void napi_disable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
- C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
-}
-
-static void napi_clear_tx_event(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
- W16(fccp, fcc_fcce, FCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_enable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
- S16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_disable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
- C16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
+ C16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
@@ -595,12 +567,9 @@ const struct fs_ops fs_fcc_ops = {
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
- .napi_clear_rx_event = napi_clear_rx_event,
- .napi_enable_rx = napi_enable_rx,
- .napi_disable_rx = napi_disable_rx,
- .napi_clear_tx_event = napi_clear_tx_event,
- .napi_enable_tx = napi_enable_tx,
- .napi_disable_tx = napi_disable_tx,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 35a318ed3a62..777beffa1e1e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (fep->interrupt == NO_IRQ)
+ if (!fep->interrupt)
return -EINVAL;
fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
@@ -109,10 +109,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
return 0;
}
-#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
-#define FEC_RX_EVENT (FEC_ENET_RXF)
-#define FEC_TX_EVENT (FEC_ENET_TXF)
+#define FEC_NAPI_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_TXF)
+#define FEC_EVENT (FEC_ENET_RXF | FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
FEC_ENET_BABT | FEC_ENET_EBERR)
@@ -126,10 +124,8 @@ static int setup_data(struct net_device *dev)
fep->fec.hthi = 0;
fep->fec.htlo = 0;
- fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
- fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK;
- fep->ev_rx = FEC_RX_EVENT;
- fep->ev_tx = FEC_TX_EVENT;
+ fep->ev_napi = FEC_NAPI_EVENT_MSK;
+ fep->ev = FEC_EVENT;
fep->ev_err = FEC_ERR_EVENT_MSK;
return 0;
@@ -396,52 +392,28 @@ static void stop(struct net_device *dev)
}
}
-static void napi_clear_rx_event(struct net_device *dev)
+static void napi_clear_event_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
- FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
+ FW(fecp, ievent, FEC_NAPI_EVENT_MSK);
}
-static void napi_enable_rx(struct net_device *dev)
+static void napi_enable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
- FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
+ FS(fecp, imask, FEC_NAPI_EVENT_MSK);
}
-static void napi_disable_rx(struct net_device *dev)
+static void napi_disable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
- FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
-}
-
-static void napi_clear_tx_event(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
- FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_enable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
- FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_disable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
- FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
+ FC(fecp, imask, FEC_NAPI_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
@@ -513,12 +485,9 @@ const struct fs_ops fs_fec_ops = {
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
- .napi_clear_rx_event = napi_clear_rx_event,
- .napi_enable_rx = napi_enable_rx,
- .napi_disable_rx = napi_disable_rx,
- .napi_clear_tx_event = napi_clear_tx_event,
- .napi_enable_tx = napi_enable_tx,
- .napi_disable_tx = napi_disable_tx,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index e8b9c33d35b4..15abd37d70e3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (fep->interrupt == NO_IRQ)
+ if (!fep->interrupt)
return -EINVAL;
fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
@@ -115,10 +115,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
return 0;
}
-#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
-#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXB)
-#define SCC_RX_EVENT (SCCE_ENET_RXF)
-#define SCC_TX_EVENT (SCCE_ENET_TXB)
+#define SCC_NAPI_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
+#define SCC_EVENT (SCCE_ENET_RXF | SCCE_ENET_TXB)
#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
static int setup_data(struct net_device *dev)
@@ -130,10 +128,8 @@ static int setup_data(struct net_device *dev)
fep->scc.hthi = 0;
fep->scc.htlo = 0;
- fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
- fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK;
- fep->ev_rx = SCC_RX_EVENT;
- fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
+ fep->ev_napi = SCC_NAPI_EVENT_MSK;
+ fep->ev = SCC_EVENT | SCCE_ENET_TXE;
fep->ev_err = SCC_ERR_EVENT_MSK;
return 0;
@@ -379,52 +375,28 @@ static void stop(struct net_device *dev)
fs_cleanup_bds(dev);
}
-static void napi_clear_rx_event(struct net_device *dev)
+static void napi_clear_event_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
- W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
+ W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
}
-static void napi_enable_rx(struct net_device *dev)
+static void napi_enable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
- S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
+ S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
}
-static void napi_disable_rx(struct net_device *dev)
+static void napi_disable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
- C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
-}
-
-static void napi_clear_tx_event(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
- W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_enable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
- S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_disable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
- C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
+ C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
@@ -497,12 +469,9 @@ const struct fs_ops fs_scc_ops = {
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
- .napi_clear_rx_event = napi_clear_rx_event,
- .napi_enable_rx = napi_enable_rx,
- .napi_disable_rx = napi_disable_rx,
- .napi_clear_tx_event = napi_clear_tx_event,
- .napi_enable_tx = napi_enable_tx,
- .napi_disable_tx = napi_disable_tx,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index f3c63dce1e30..446c7b374ff5 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -195,7 +195,7 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
return 0;
}
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+#if IS_ENABLED(CONFIG_GIANFAR)
/*
* Return the TBIPA address, starting from the address
* of the mapped GFAR MDIO registers (struct gfar)
@@ -228,7 +228,7 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
}
#endif
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+#if IS_ENABLED(CONFIG_UCC_GETH)
/*
* Return the TBIPAR address for a QE MDIO node, starting from the address
* of the mapped MII registers (struct fsl_pq_mii)
@@ -306,7 +306,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
#endif
static const struct of_device_id fsl_pq_mdio_match[] = {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+#if IS_ENABLED(CONFIG_GIANFAR)
{
.compatible = "fsl,gianfar-tbi",
.data = &(struct fsl_pq_mdio_data) {
@@ -344,7 +344,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
},
},
#endif
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+#if IS_ENABLED(CONFIG_UCC_GETH)
{
.compatible = "fsl,ucc-mdio",
.data = &(struct fsl_pq_mdio_data) {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 5bf1ade28315..186ef8f16c80 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3756,7 +3756,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
return -EINVAL;
}
if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- pr_err("invalid rx-clock propperty\n");
+ pr_err("invalid rx-clock property\n");
return -EINVAL;
}
ug_info->uf_info.rx_clock = *prop;
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 7b8fe866f603..e03b30c60dcf 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -271,11 +271,8 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
goto err_ioremap;
}
- if (of_get_property(pdev->dev.of_node,
- "little-endian", NULL))
- priv->is_little_endian = true;
- else
- priv->is_little_endian = false;
+ priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
+ "little-endian");
ret = of_mdiobus_register(bus, np);
if (ret) {
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 0c4afe95ef54..39778892b3b3 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -600,7 +600,7 @@ static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
+static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
{
struct hip04_priv *priv;
@@ -755,13 +755,13 @@ static void hip04_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
-static struct ethtool_ops hip04_ethtool_ops = {
+static const struct ethtool_ops hip04_ethtool_ops = {
.get_coalesce = hip04_get_coalesce,
.set_coalesce = hip04_set_coalesce,
.get_drvinfo = hip04_get_drvinfo,
};
-static struct net_device_ops hip04_netdev_ops = {
+static const struct net_device_ops hip04_netdev_ops = {
.ndo_open = hip04_mac_open,
.ndo_stop = hip04_mac_stop,
.ndo_get_stats = hip04_get_stats,
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index b5d7ad0252a0..ced185962ef8 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -699,7 +699,7 @@ static int hisi_femac_net_ioctl(struct net_device *dev,
return phy_mii_ioctl(dev->phydev, ifreq, cmd);
}
-static struct ethtool_ops hisi_femac_ethtools_ops = {
+static const struct ethtool_ops hisi_femac_ethtools_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
@@ -940,8 +940,8 @@ static int hisi_femac_drv_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-int hisi_femac_drv_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int hisi_femac_drv_suspend(struct platform_device *pdev,
+ pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hisi_femac_priv *priv = netdev_priv(ndev);
@@ -957,7 +957,7 @@ int hisi_femac_drv_suspend(struct platform_device *pdev,
return 0;
}
-int hisi_femac_drv_resume(struct platform_device *pdev)
+static int hisi_femac_drv_resume(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hisi_femac_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 275618bb4646..e69a6bed31a9 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -750,7 +750,7 @@ static const struct net_device_ops hix5hd2_netdev_ops = {
.ndo_set_mac_address = hix5hd2_net_set_mac_address,
};
-static struct ethtool_ops hix5hd2_ethtools_ops = {
+static const struct ethtool_ops hix5hd2_ethtools_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 5c8afe1a5ccb..a68eef0ee65f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -644,21 +644,6 @@ hns_mac_phy_parse_addr(struct device *dev, struct fwnode_handle *fwnode)
return addr;
}
-static int hns_mac_phydev_match(struct device *dev, void *fwnode)
-{
- return dev->fwnode == fwnode;
-}
-
-static struct
-platform_device *hns_mac_find_platform_device(struct fwnode_handle *fwnode)
-{
- struct device *dev;
-
- dev = bus_find_device(&platform_bus_type, NULL,
- fwnode, hns_mac_phydev_match);
- return dev ? to_platform_device(dev) : NULL;
-}
-
static int
hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
u32 addr)
@@ -684,8 +669,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
if (!phy || IS_ERR(phy))
return -EIO;
- if (mdio->irq)
- phy->irq = mdio->irq[addr];
+ phy->irq = mdio->irq[addr];
/* All data is now stored in the phy struct;
* register it
@@ -725,7 +709,7 @@ static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
return;
/* dev address in adev */
- pdev = hns_mac_find_platform_device(acpi_fwnode_handle(args.adev));
+ pdev = hns_dsaf_find_platform_device(acpi_fwnode_handle(args.adev));
mii_bus = platform_get_drvdata(pdev);
rc = hns_mac_register_phydev(mii_bus, mac_cb, addr);
if (!rc)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index afb5daa3721d..8e5b3f51b47b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
@@ -115,10 +116,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev,
res);
- if (IS_ERR(dsaf_dev->sc_base)) {
- dev_err(dsaf_dev->dev, "subctrl can not map!\n");
+ if (IS_ERR(dsaf_dev->sc_base))
return PTR_ERR(dsaf_dev->sc_base);
- }
res = platform_get_resource(pdev, IORESOURCE_MEM,
res_idx++);
@@ -129,10 +128,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev,
res);
- if (IS_ERR(dsaf_dev->sds_base)) {
- dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
+ if (IS_ERR(dsaf_dev->sds_base))
return PTR_ERR(dsaf_dev->sds_base);
- }
} else {
dsaf_dev->sub_ctrl = syscon;
}
@@ -147,10 +144,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
}
dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dsaf_dev->ppe_base)) {
- dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n");
+ if (IS_ERR(dsaf_dev->ppe_base))
return PTR_ERR(dsaf_dev->ppe_base);
- }
dsaf_dev->ppe_paddr = res->start;
if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
@@ -166,10 +161,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
}
dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dsaf_dev->io_base)) {
- dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n");
+ if (IS_ERR(dsaf_dev->io_base))
return PTR_ERR(dsaf_dev->io_base);
- }
}
ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num);
@@ -2781,6 +2774,110 @@ static struct platform_driver g_dsaf_driver = {
module_platform_driver(g_dsaf_driver);
+/**
+ * hns_dsaf_roce_reset - reset dsaf and roce
+ * @dsaf_fwnode: Pointer to framework node for the dasf
+ * @enable: false - request reset , true - drop reset
+ * retuen 0 - success , negative -fail
+ */
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
+{
+ struct dsaf_device *dsaf_dev;
+ struct platform_device *pdev;
+ u32 mp;
+ u32 sl;
+ u32 credit;
+ int i;
+ const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
+ };
+ const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
+ {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
+ {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
+ };
+
+ /* find the platform device corresponding to fwnode */
+ if (is_of_node(dsaf_fwnode)) {
+ pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
+ } else if (is_acpi_device_node(dsaf_fwnode)) {
+ pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
+ } else {
+ pr_err("fwnode is neither OF or ACPI type\n");
+ return -EINVAL;
+ }
+
+ /* check if we were a success in fetching pdev */
+ if (!pdev) {
+ pr_err("couldn't find platform device for node\n");
+ return -ENODEV;
+ }
+
+ /* retrieve the dsaf_device from the driver data */
+ dsaf_dev = dev_get_drvdata(&pdev->dev);
+ if (!dsaf_dev) {
+ dev_err(&pdev->dev, "dsaf_dev is NULL\n");
+ return -ENODEV;
+ }
+
+ /* now, make sure we are running on compatible SoC */
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
+ dsaf_dev->ae_dev.name);
+ return -ENODEV;
+ }
+
+ /* do reset or de-reset according to the flag */
+ if (!dereset) {
+ /* reset rocee-channels in dsaf and rocee */
+ dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
+ false);
+ dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
+ } else {
+ /* configure dsaf tx roce correspond to port map and sl map */
+ mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
+ for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
+ dsaf_set_field(mp, 7 << i * 3, i * 3,
+ port_map[i][DSAF_ROCE_6PORT_MODE]);
+ dsaf_set_field(mp, 3 << i * 3, i * 3, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp);
+
+ sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG);
+ for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
+ dsaf_set_field(sl, 3 << i * 2, i * 2,
+ sl_map[i][DSAF_ROCE_6PORT_MODE]);
+ dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
+
+ /* de-reset rocee-channels in dsaf and rocee */
+ dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
+ true);
+ msleep(SRST_TIME_INTERVAL);
+ dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
+
+ /* enable dsaf channel rocee credit */
+ credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
+ dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
+
+ dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(hns_dsaf_roce_reset);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_DESCRIPTION("HNS DSAF driver");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 1daf018d9071..35df187e66f1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -43,6 +43,32 @@ struct hns_mac_cb;
#define DSAF_PRIO_NR 8
#define DSAF_REG_PER_ZONE 3
+#define DSAF_ROCE_CREDIT_CHN 8
+#define DSAF_ROCE_CHAN_MODE 3
+
+enum dsaf_roce_port_mode {
+ DSAF_ROCE_6PORT_MODE,
+ DSAF_ROCE_4PORT_MODE,
+ DSAF_ROCE_2PORT_MODE,
+ DSAF_ROCE_CHAN_MODE_NUM,
+};
+
+enum dsaf_roce_port_num {
+ DSAF_ROCE_PORT_0,
+ DSAF_ROCE_PORT_1,
+ DSAF_ROCE_PORT_2,
+ DSAF_ROCE_PORT_3,
+ DSAF_ROCE_PORT_4,
+ DSAF_ROCE_PORT_5,
+};
+
+enum dsaf_roce_qos_sl {
+ DSAF_ROCE_SL_0,
+ DSAF_ROCE_SL_1,
+ DSAF_ROCE_SL_2,
+ DSAF_ROCE_SL_3,
+};
+
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -279,7 +305,7 @@ struct dsaf_misc_op {
void (*cpld_reset_led)(struct hns_mac_cb *mac_cb);
int (*cpld_set_led_id)(struct hns_mac_cb *mac_cb,
enum hnae_led_state status);
- /* reset seris function, it will be reset if the dereseet is 0 */
+ /* reset series function, it will be reset if the dereset is 0 */
void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset);
void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*xge_core_srst)(struct dsaf_device *dsaf_dev, u32 port,
@@ -287,6 +313,9 @@ struct dsaf_misc_op {
void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
+ void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
+ bool dereset);
+ void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 611b67b6f450..67accce1d33d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -26,6 +26,8 @@ enum _dsm_rst_type {
HNS_XGE_CORE_RESET_FUNC = 0x3,
HNS_XGE_RESET_FUNC = 0x4,
HNS_GE_RESET_FUNC = 0x5,
+ HNS_DSAF_CHN_RESET_FUNC = 0x6,
+ HNS_ROCE_RESET_FUNC = 0x7,
};
const u8 hns_dsaf_acpi_dsm_uuid[] = {
@@ -231,6 +233,66 @@ static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
+/**
+ * hns_dsaf_srst_chns - reset dsaf channels
+ * @dsaf_dev: dsaf device struct pointer
+ * @msk: xbar channels mask value:
+ * bit0-5 for xge0-5
+ * bit6-11 for ppe0-5
+ * bit12-17 for roce0-5
+ * bit18-19 for com/dfx
+ * @enable: false - request reset , true - drop reset
+ */
+void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
+{
+ u32 reg_addr;
+
+ if (!dereset)
+ reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
+
+ dsaf_write_sub(dsaf_dev, reg_addr, msk);
+}
+
+/**
+ * hns_dsaf_srst_chns - reset dsaf channels
+ * @dsaf_dev: dsaf device struct pointer
+ * @msk: xbar channels mask value:
+ * bit0-5 for xge0-5
+ * bit6-11 for ppe0-5
+ * bit12-17 for roce0-5
+ * bit18-19 for com/dfx
+ * @enable: false - request reset , true - drop reset
+ */
+void
+hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_DSAF_CHN_RESET_FUNC,
+ msk, dereset);
+}
+
+void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ if (!dereset) {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
+ } else {
+ dsaf_write_sub(dsaf_dev,
+ DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1);
+ dsaf_write_sub(dsaf_dev,
+ DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1);
+ msleep(20);
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1);
+ }
+}
+
+void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_ROCE_RESET_FUNC, 0, dereset);
+}
+
static void
hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
u32 port, bool dereset)
@@ -539,6 +601,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
misc_op->ppe_srst = hns_ppe_srst_by_port;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
+ misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
+ misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
misc_op->get_phy_if = hns_mac_get_phy_if;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
@@ -555,6 +619,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
+ misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
+ misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
@@ -567,3 +633,18 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
return (void *)misc_op;
}
+
+static int hns_dsaf_dev_match(struct device *dev, void *fwnode)
+{
+ return dev->fwnode == fwnode;
+}
+
+struct
+platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&platform_bus_type, NULL,
+ fwnode, hns_dsaf_dev_match);
+ return dev ? to_platform_device(dev) : NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
index f06bb03d47a6..310e80261366 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
@@ -34,5 +34,6 @@
#define DSAF_LED_ANCHOR_B 5
struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev);
-
+struct
+platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 235f74444b1d..4b8b803822d1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -77,6 +77,12 @@
#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C
#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88
#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C
+#define DSAF_SUB_SC_DSAF_RESET_REQ_REG 0xAA8
+#define DSAF_SUB_SC_DSAF_RESET_DREQ_REG 0xAAC
+#define DSAF_SUB_SC_ROCEE_RESET_REQ_REG 0xA50
+#define DSAF_SUB_SC_ROCEE_RESET_DREQ_REG 0xA54
+#define DSAF_SUB_SC_ROCEE_CLK_DIS_REG 0x32C
+#define DSAF_SUB_SC_ROCEE_CLK_EN_REG 0x328
#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060
#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300
#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300
@@ -133,6 +139,8 @@
#define DSAF_ROCEE_INT_STS_0_REG 0x200
#define DSAFV2_SERDES_LBK_0_REG 0x220
#define DSAF_PAUSE_CFG_REG 0x240
+#define DSAF_ROCE_PORT_MAP_REG 0x2A0
+#define DSAF_ROCE_SL_MAP_REG 0x2A4
#define DSAF_PPE_QID_CFG_0_REG 0x300
#define DSAF_SW_PORT_TYPE_0_REG 0x320
#define DSAF_STP_PORT_TYPE_0_REG 0x340
@@ -178,6 +186,7 @@
#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C
#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C
#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C
+#define DSAF_SBM_ROCEE_CFG_REG_REG 0x2380
#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C
#define DSAF_SBM_FREE_CNT_0_0_REG 0x2010
#define DSAF_SBM_FREE_CNT_1_0_REG 0x2014
@@ -796,6 +805,9 @@
#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9
#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9)
+#define DSAF_CHNS_MASK 0x3f000
+#define DSAF_SBM_ROCEE_CFG_CRD_EN_B 2
+#define SRST_TIME_INTERVAL 20
#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0
#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0)
#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index d7e1f8c7ae92..059aaeda46b1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -994,10 +994,10 @@ static void hns_nic_adjust_link(struct net_device *ndev)
struct hnae_handle *h = priv->ae_handle;
int state = 1;
- if (priv->phy) {
+ if (ndev->phydev) {
h->dev->ops->adjust_link(h, ndev->phydev->speed,
ndev->phydev->duplex);
- state = priv->phy->link;
+ state = ndev->phydev->link;
}
state = state && h->dev->ops->get_status(h);
@@ -1022,7 +1022,6 @@ static void hns_nic_adjust_link(struct net_device *ndev)
*/
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
- struct hns_nic_priv *priv = netdev_priv(ndev);
struct phy_device *phy_dev = h->phy_dev;
int ret;
@@ -1046,8 +1045,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
phy_dev->autoneg = false;
- priv->phy = phy_dev;
-
return 0;
}
@@ -1224,8 +1221,8 @@ static int hns_nic_net_up(struct net_device *ndev)
if (ret)
goto out_start_err;
- if (priv->phy)
- phy_start(priv->phy);
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
clear_bit(NIC_STATE_DOWN, &priv->state);
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
@@ -1259,8 +1256,8 @@ static void hns_nic_net_down(struct net_device *ndev)
netif_tx_disable(ndev);
priv->link = 0;
- if (priv->phy)
- phy_stop(priv->phy);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
ops = priv->ae_handle->dev->ops;
@@ -1359,8 +1356,7 @@ static void hns_nic_net_timeout(struct net_device *ndev)
static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
if (!netif_running(netdev))
return -EINVAL;
@@ -2017,9 +2013,8 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
hns_nic_uninit_ring_data(priv);
priv->ring_data = NULL;
- if (priv->phy)
- phy_disconnect(priv->phy);
- priv->phy = NULL;
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
if (!IS_ERR_OR_NULL(priv->ae_handle))
hnae_put_handle(priv->ae_handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 44bb3015eed3..5b412de350aa 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -59,7 +59,6 @@ struct hns_nic_priv {
u32 port_id;
int phy_mode;
int phy_led_val;
- struct phy_device *phy;
struct net_device *netdev;
struct device *dev;
struct hnae_handle *ae_handle;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index ab33487a5321..47e59bbfd061 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -48,9 +48,9 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
h = priv->ae_handle;
- if (priv->phy) {
- if (!genphy_read_status(priv->phy))
- link_stat = priv->phy->link;
+ if (net_dev->phydev) {
+ if (!genphy_read_status(net_dev->phydev))
+ link_stat = net_dev->phydev->link;
else
link_stat = 0;
}
@@ -64,15 +64,14 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
}
static void hns_get_mdix_mode(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *cmd)
{
int mdix_ctrl, mdix, retval, is_resolved;
- struct hns_nic_priv *priv = netdev_priv(net_dev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = net_dev->phydev;
if (!phy_dev || !phy_dev->mdio.bus) {
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
return;
}
@@ -89,35 +88,35 @@ static void hns_get_mdix_mode(struct net_device *net_dev,
switch (mdix_ctrl) {
case 0x0:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI;
break;
case 0x1:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_X;
break;
case 0x3:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
break;
default:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
break;
}
if (!is_resolved)
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
else if (mdix)
- cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
else
- cmd->eth_tp_mdix = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI;
}
/**
- *hns_nic_get_settings - implement ethtool get settings
+ *hns_nic_get_link_ksettings - implement ethtool get link ksettings
*@net_dev: net_device
- *@cmd: ethtool_cmd
+ *@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
*/
-static int hns_nic_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int hns_nic_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_handle *h;
@@ -125,6 +124,7 @@ static int hns_nic_get_settings(struct net_device *net_dev,
int ret;
u8 duplex;
u16 speed;
+ u32 supported, advertising;
if (!priv || !priv->ae_handle)
return -ESRCH;
@@ -139,38 +139,43 @@ static int hns_nic_get_settings(struct net_device *net_dev,
return -EINVAL;
}
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
/* When there is no phy, autoneg is off. */
- cmd->autoneg = false;
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ cmd->base.autoneg = false;
+ cmd->base.cmd = speed;
+ cmd->base.duplex = duplex;
- if (priv->phy)
- (void)phy_ethtool_gset(priv->phy, cmd);
+ if (net_dev->phydev)
+ (void)phy_ethtool_ksettings_get(net_dev->phydev, cmd);
link_stat = hns_nic_get_link(net_dev);
if (!link_stat) {
- ethtool_cmd_speed_set(cmd, (u32)SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = (u32)SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- if (cmd->autoneg)
- cmd->advertising |= ADVERTISED_Autoneg;
+ if (cmd->base.autoneg)
+ advertising |= ADVERTISED_Autoneg;
- cmd->supported |= h->if_support;
+ supported |= h->if_support;
if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_1000baseT_Full;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_1000baseT_Full;
} else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_10000baseKR_Full;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_10000baseKR_Full;
}
switch (h->media_type) {
case HNAE_MEDIA_TYPE_FIBER:
- cmd->port = PORT_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case HNAE_MEDIA_TYPE_COPPER:
- cmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
break;
case HNAE_MEDIA_TYPE_UNKNOWN:
default:
@@ -178,23 +183,27 @@ static int hns_nic_get_settings(struct net_device *net_dev,
}
if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG))
- cmd->supported |= SUPPORTED_Pause;
+ supported |= SUPPORTED_Pause;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22;
hns_get_mdix_mode(net_dev, cmd);
return 0;
}
/**
- *hns_nic_set_settings - implement ethtool set settings
+ *hns_nic_set_link_settings - implement ethtool set link ksettings
*@net_dev: net_device
- *@cmd: ethtool_cmd
+ *@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
*/
-static int hns_nic_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int hns_nic_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_handle *h;
@@ -208,24 +217,25 @@ static int hns_nic_set_settings(struct net_device *net_dev,
return -ENODEV;
h = priv->ae_handle;
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
- if (cmd->autoneg == AUTONEG_ENABLE || speed != SPEED_10000 ||
- cmd->duplex != DUPLEX_FULL)
+ if (cmd->base.autoneg == AUTONEG_ENABLE ||
+ speed != SPEED_10000 ||
+ cmd->base.duplex != DUPLEX_FULL)
return -EINVAL;
} else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
- if (!priv->phy && cmd->autoneg == AUTONEG_ENABLE)
+ if (!net_dev->phydev && cmd->base.autoneg == AUTONEG_ENABLE)
return -EINVAL;
- if (speed == SPEED_1000 && cmd->duplex == DUPLEX_HALF)
+ if (speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
- if (priv->phy)
- return phy_ethtool_sset(priv->phy, cmd);
+ if (net_dev->phydev)
+ return phy_ethtool_ksettings_set(net_dev->phydev, cmd);
if ((speed != SPEED_10 && speed != SPEED_100 &&
- speed != SPEED_1000) || (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL))
+ speed != SPEED_1000) || (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL))
return -EINVAL;
} else {
netdev_err(net_dev, "Not supported!");
@@ -233,7 +243,7 @@ static int hns_nic_set_settings(struct net_device *net_dev,
}
if (h->dev->ops->adjust_link) {
- h->dev->ops->adjust_link(h, (int)speed, cmd->duplex);
+ h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
return 0;
}
@@ -305,7 +315,7 @@ static int __lb_setup(struct net_device *ndev,
{
int ret = 0;
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = ndev->phydev;
struct hnae_handle *h = priv->ae_handle;
switch (loop) {
@@ -910,7 +920,7 @@ void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES],
ETH_GSTRING_LEN);
buff += ETH_GSTRING_LEN;
- if ((priv->phy) && (!priv->phy->is_c45))
+ if ((netdev->phydev) && (!netdev->phydev->is_c45))
memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY],
ETH_GSTRING_LEN);
@@ -996,7 +1006,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
cnt--;
- if ((!priv->phy) || (priv->phy->is_c45))
+ if ((!netdev->phydev) || (netdev->phydev->is_c45))
cnt--;
return cnt;
@@ -1015,8 +1025,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
int hns_phy_led_set(struct net_device *netdev, int value)
{
int retval;
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
@@ -1039,7 +1048,7 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
int ret;
if (phy_dev)
@@ -1159,8 +1168,7 @@ static int hns_get_regs_len(struct net_device *net_dev)
static int hns_nic_nway_reset(struct net_device *netdev)
{
int ret = 0;
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy = priv->phy;
+ struct phy_device *phy = netdev->phydev;
if (netif_running(netdev)) {
if (phy)
@@ -1264,11 +1272,9 @@ static int hns_get_rxnfc(struct net_device *netdev,
return 0;
}
-static struct ethtool_ops hns_ethtool_ops = {
+static const struct ethtool_ops hns_ethtool_ops = {
.get_drvinfo = hns_nic_get_drvinfo,
.get_link = hns_nic_get_link,
- .get_settings = hns_nic_get_settings,
- .set_settings = hns_nic_set_settings,
.get_ringparam = hns_get_ringparam,
.get_pauseparam = hns_get_pauseparam,
.set_pauseparam = hns_set_pauseparam,
@@ -1288,6 +1294,8 @@ static struct ethtool_ops hns_ethtool_ops = {
.get_rxfh = hns_get_rss,
.set_rxfh = hns_set_rss,
.get_rxnfc = hns_get_rxnfc,
+ .get_link_ksettings = hns_nic_get_link_ksettings,
+ .set_link_ksettings = hns_nic_set_link_ksettings,
};
void hns_ethtool_set_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index befb4ac3e2b0..ce235b776793 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -89,10 +89,10 @@ static char version[] __initdata =
#define DEB(x,y) if (i596_debug & (x)) y
-#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
+#if IS_ENABLED(CONFIG_MVME16x_NET)
#define ENABLE_MVME16x_NET
#endif
-#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
+#if IS_ENABLED(CONFIG_BVME6000_NET)
#define ENABLE_BVME6000_NET
#endif
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 7af09cbc53f0..8f139197f1aa 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2780,7 +2780,7 @@ static int emac_probe(struct platform_device *ofdev)
/* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
dev->emac_irq = irq_of_parse_and_map(np, 0);
dev->wol_irq = irq_of_parse_and_map(np, 1);
- if (dev->emac_irq == NO_IRQ) {
+ if (!dev->emac_irq) {
printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
goto err_free;
}
@@ -2943,9 +2943,9 @@ static int emac_probe(struct platform_device *ofdev)
err_reg_unmap:
iounmap(dev->emacp);
err_irq_unmap:
- if (dev->wol_irq != NO_IRQ)
+ if (dev->wol_irq)
irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq != NO_IRQ)
+ if (dev->emac_irq)
irq_dispose_mapping(dev->emac_irq);
err_free:
free_netdev(ndev);
@@ -2987,9 +2987,9 @@ static int emac_remove(struct platform_device *ofdev)
emac_dbg_unregister(dev);
iounmap(dev->emacp);
- if (dev->wol_irq != NO_IRQ)
+ if (dev->wol_irq)
irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq != NO_IRQ)
+ if (dev->emac_irq)
irq_dispose_mapping(dev->emac_irq);
free_netdev(dev->ndev);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index fdb5cdb3cd15..aaf6fec566b5 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -597,9 +597,8 @@ static int mal_probe(struct platform_device *ofdev)
mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
}
- if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
- mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ ||
- mal->rxde_irq == NO_IRQ) {
+ if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
+ !mal->txde_irq || !mal->rxde_irq) {
printk(KERN_ERR
"mal%d: failed to map interrupts !\n", index);
err = -ENODEV;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 88f3c85fb04a..bfe17d9c022d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -203,7 +203,8 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct device *dev = &adapter->vdev->dev;
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
- send_request_unmap(adapter, ltb->map_id);
+ if (!adapter->failover)
+ send_request_unmap(adapter, ltb->map_id);
}
static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
@@ -522,7 +523,8 @@ static int ibmvnic_close(struct net_device *netdev)
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
- netif_tx_stop_all_queues(netdev);
+ if (!adapter->failover)
+ netif_tx_stop_all_queues(netdev);
if (adapter->bounce_buffer) {
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
@@ -1422,7 +1424,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
scrq = adapter->tx_scrq[i];
scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
+ if (!scrq->irq) {
rc = -EINVAL;
dev_err(dev, "Error mapping irq\n");
goto req_tx_irq_failed;
@@ -1442,7 +1444,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_rx_queues; i++) {
scrq = adapter->rx_scrq[i];
scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
+ if (!scrq->irq) {
rc = -EINVAL;
dev_err(dev, "Error mapping irq\n");
goto req_rx_irq_failed;
@@ -2777,12 +2779,6 @@ static void handle_control_ras_rsp(union ibmvnic_crq *crq,
}
}
-static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
loff_t *ppos)
{
@@ -2834,7 +2830,7 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
static const struct file_operations trace_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = trace_read,
};
@@ -2884,7 +2880,7 @@ static ssize_t paused_write(struct file *file, const char __user *user_buf,
static const struct file_operations paused_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = paused_read,
.write = paused_write,
};
@@ -2932,7 +2928,7 @@ static ssize_t tracing_write(struct file *file, const char __user *user_buf,
static const struct file_operations tracing_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = tracing_read,
.write = tracing_write,
};
@@ -2985,7 +2981,7 @@ static ssize_t error_level_write(struct file *file, const char __user *user_buf,
static const struct file_operations error_level_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = error_level_read,
.write = error_level_write,
};
@@ -3036,7 +3032,7 @@ static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
static const struct file_operations trace_level_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = trace_level_read,
.write = trace_level_write,
};
@@ -3089,7 +3085,7 @@ static ssize_t trace_buff_size_write(struct file *file,
static const struct file_operations trace_size_ops = {
.owner = THIS_MODULE,
- .open = ibmvnic_fw_comp_open,
+ .open = simple_open,
.read = trace_buff_size_read,
.write = trace_buff_size_write,
};
@@ -3280,6 +3276,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
rc = ibmvnic_send_crq_init(adapter);
if (rc)
dev_err(dev, "Error sending init rc=%ld\n", rc);
+ } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
+ dev_info(dev, "Backing device failover detected\n");
+ netif_carrier_off(netdev);
+ adapter->failover = true;
} else {
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
@@ -3615,8 +3615,18 @@ static void handle_crq_init_rsp(struct work_struct *work)
struct device *dev = &adapter->vdev->dev;
struct net_device *netdev = adapter->netdev;
unsigned long timeout = msecs_to_jiffies(30000);
+ bool restart = false;
int rc;
+ if (adapter->failover) {
+ release_sub_crqs(adapter);
+ if (netif_running(netdev)) {
+ netif_tx_disable(netdev);
+ ibmvnic_close(netdev);
+ restart = true;
+ }
+ }
+
send_version_xchg(adapter);
reinit_completion(&adapter->init_done);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -3645,6 +3655,17 @@ static void handle_crq_init_rsp(struct work_struct *work)
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ if (adapter->failover) {
+ adapter->failover = false;
+ if (restart) {
+ rc = ibmvnic_open(netdev);
+ if (rc)
+ goto restart_failed;
+ }
+ netif_carrier_on(netdev);
+ return;
+ }
+
rc = register_netdev(netdev);
if (rc) {
dev_err(dev,
@@ -3655,6 +3676,8 @@ static void handle_crq_init_rsp(struct work_struct *work)
return;
+restart_failed:
+ dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
register_failed:
release_sub_crqs(adapter);
task_failed:
@@ -3692,6 +3715,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
dev_set_drvdata(&dev->dev, netdev);
adapter->vdev = dev;
adapter->netdev = netdev;
+ adapter->failover = false;
ether_addr_copy(adapter->mac_addr, mac_addr_p);
ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
@@ -3721,6 +3745,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(&dev->dev, "Couldn't map stats buffer\n");
+ rc = -ENOMEM;
goto free_crq;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index e82898fd518e..bfc84c7d0e11 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -830,6 +830,7 @@ enum ibmvfc_crq_format {
IBMVNIC_CRQ_INIT = 0x01,
IBMVNIC_CRQ_INIT_COMPLETE = 0x02,
IBMVNIC_PARTITION_MIGRATED = 0x06,
+ IBMVNIC_DEVICE_FAILOVER = 0x08,
};
struct ibmvnic_crq_queue {
@@ -1047,4 +1048,5 @@ struct ibmvnic_adapter {
u8 map_id;
struct work_struct vnic_crq_init;
+ bool failover;
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 2e1b17ad52a3..ad03763e009a 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -334,7 +334,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
e_err("ptp_clock_register failed\n");
- } else {
+ } else if (adapter->ptp_clock) {
e_info("registered PHC clock\n");
}
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index c4cf08dcf5af..4d19e46f7c55 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -240,9 +240,7 @@ struct fm10k_iov_data {
struct fm10k_vf_info vf_info[0];
};
-#define fm10k_vxlan_port_for_each(vp, intfc) \
- list_for_each_entry(vp, &(intfc)->vxlan_port, list)
-struct fm10k_vxlan_port {
+struct fm10k_udp_port {
struct list_head list;
sa_family_t sa_family;
__be16 port;
@@ -335,8 +333,9 @@ struct fm10k_intfc {
u32 reta[FM10K_RETA_SIZE];
u32 rssrk[FM10K_RSSRK_SIZE];
- /* VXLAN port tracking information */
+ /* UDP encapsulation port tracking information */
struct list_head vxlan_port;
+ struct list_head geneve_port;
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_intfc;
@@ -458,7 +457,7 @@ __be16 fm10k_tx_encap_offload(struct sk_buff *skb);
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_ring *tx_ring);
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
-u64 fm10k_get_tx_pending(struct fm10k_ring *ring);
+u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw);
bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring);
void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count);
@@ -496,7 +495,6 @@ int fm10k_close(struct net_device *netdev);
/* Ethtool */
void fm10k_set_ethtool_ops(struct net_device *dev);
-u32 fm10k_get_reta_size(struct net_device *netdev);
void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir);
/* IOV */
@@ -509,7 +507,7 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs);
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
- int vf_idx, u16 vid, u8 qos);
+ int vf_idx, u16 vid, u8 qos, __be16 vlan_proto);
int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate,
int unused);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index d6baaea8bc7c..dd95ac4f4c64 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -207,6 +207,9 @@ s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
/* clear tx_ready to prevent any false hits for reset */
hw->mac.tx_ready = false;
+ if (FM10K_REMOVED(hw->hw_addr))
+ return 0;
+
/* clear the enable bit for all rings */
for (i = 0; i < q_cnt; i++) {
reg = fm10k_read_reg(hw, FM10K_TXDCTL(i));
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 50f71e997448..d51f9c7a47ff 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -34,7 +34,7 @@ u32 fm10k_read_reg(struct fm10k_hw *hw, int reg);
/* write operations, indexed using DWORDS */
#define fm10k_write_reg(hw, reg, val) \
do { \
- u32 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+ u32 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
if (!FM10K_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \
} while (0)
@@ -42,7 +42,7 @@ do { \
/* Switch register write operations, index using DWORDS */
#define fm10k_write_sw_reg(hw, reg, val) \
do { \
- u32 __iomem *sw_addr = ACCESS_ONCE((hw)->sw_addr); \
+ u32 __iomem *sw_addr = READ_ONCE((hw)->sw_addr); \
if (!FM10K_REMOVED(sw_addr)) \
writel((val), &sw_addr[(reg)]); \
} while (0)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index c04cbe9c9f7c..5241e0873397 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -966,7 +966,7 @@ static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
return 0;
}
-u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
+static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
{
return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
}
@@ -1182,6 +1182,7 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.set_rxfh = fm10k_set_rssh,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void fm10k_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 47f0743ec03b..5f4dac0d36ef 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -51,7 +51,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
int i;
/* if there is no iov_data then there is no mailbox to process */
- if (!ACCESS_ONCE(interface->iov_data))
+ if (!READ_ONCE(interface->iov_data))
return 0;
rcu_read_lock();
@@ -99,7 +99,7 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
int i;
/* if there is no iov_data then there is no mailbox to process */
- if (!ACCESS_ONCE(interface->iov_data))
+ if (!READ_ONCE(interface->iov_data))
return 0;
rcu_read_lock();
@@ -445,7 +445,7 @@ int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
}
int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
- u8 qos)
+ u8 qos, __be16 vlan_proto)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
@@ -460,6 +460,10 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
if (qos || (vid > (VLAN_VID_MASK - 1)))
return -EINVAL;
+ /* VF VLAN Protocol part to default is unsupported */
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
vf_info = &iov_data->vf_info[vf_idx];
/* exit if there is nothing to do */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index e9767b6366a8..5de937852436 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -56,7 +56,8 @@ static int __init fm10k_init_module(void)
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
- fm10k_workqueue = alloc_workqueue("fm10k", WQ_MEM_RECLAIM, 0);
+ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ fm10k_driver_name);
fm10k_dbg_init();
@@ -651,11 +652,11 @@ static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
{
struct fm10k_intfc *interface = netdev_priv(skb->dev);
- struct fm10k_vxlan_port *vxlan_port;
+ struct fm10k_udp_port *vxlan_port;
/* we can only offload a vxlan if we recognize it as such */
vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_vxlan_port, list);
+ struct fm10k_udp_port, list);
if (!vxlan_port)
return NULL;
@@ -1128,13 +1129,24 @@ static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
return ring->stats.packets;
}
-u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
+/**
+ * fm10k_get_tx_pending - how many Tx descriptors not processed
+ * @ring: the ring structure
+ * @in_sw: is tx_pending being checked in SW or in HW?
+ */
+u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw)
{
struct fm10k_intfc *interface = ring->q_vector->interface;
struct fm10k_hw *hw = &interface->hw;
+ u32 head, tail;
- u32 head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
- u32 tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
+ if (likely(in_sw)) {
+ head = ring->next_to_clean;
+ tail = ring->next_to_use;
+ } else {
+ head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
+ tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
+ }
return ((head <= tail) ? tail : tail + ring->count) - head;
}
@@ -1143,7 +1155,7 @@ bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
{
u32 tx_done = fm10k_get_tx_completed(tx_ring);
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
- u32 tx_pending = fm10k_get_tx_pending(tx_ring);
+ u32 tx_pending = fm10k_get_tx_pending(tx_ring, true);
clear_check_for_tx_hang(tx_ring);
@@ -1397,7 +1409,7 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
* that the calculation will never get below a 1. The bit shift
* accounts for changes in the ITR due to PCIe link speed.
*/
- itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8;
+ itr_round = READ_ONCE(ring_container->itr_scale) + 8;
avg_wire_size += BIT(itr_round) - 1;
avg_wire_size >>= itr_round;
@@ -1473,7 +1485,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
/* re-enable the q_vector */
fm10k_qv_enable(q_vector);
- return 0;
+ return min(work_done, budget - 1);
}
/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 20a5bbe3f536..05629381be6b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -384,129 +384,171 @@ static void fm10k_request_glort_range(struct fm10k_intfc *interface)
}
/**
- * fm10k_del_vxlan_port_all
+ * fm10k_free_udp_port_info
* @interface: board private structure
*
- * This function frees the entire vxlan_port list
+ * This function frees both geneve_port and vxlan_port structures
**/
-static void fm10k_del_vxlan_port_all(struct fm10k_intfc *interface)
+static void fm10k_free_udp_port_info(struct fm10k_intfc *interface)
{
- struct fm10k_vxlan_port *vxlan_port;
-
- /* flush all entries from list */
- vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_vxlan_port, list);
- while (vxlan_port) {
- list_del(&vxlan_port->list);
- kfree(vxlan_port);
- vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_vxlan_port,
- list);
+ struct fm10k_udp_port *port;
+
+ /* flush all entries from vxlan list */
+ port = list_first_entry_or_null(&interface->vxlan_port,
+ struct fm10k_udp_port, list);
+ while (port) {
+ list_del(&port->list);
+ kfree(port);
+ port = list_first_entry_or_null(&interface->vxlan_port,
+ struct fm10k_udp_port,
+ list);
+ }
+
+ /* flush all entries from geneve list */
+ port = list_first_entry_or_null(&interface->geneve_port,
+ struct fm10k_udp_port, list);
+ while (port) {
+ list_del(&port->list);
+ kfree(port);
+ port = list_first_entry_or_null(&interface->vxlan_port,
+ struct fm10k_udp_port,
+ list);
}
}
/**
- * fm10k_restore_vxlan_port
+ * fm10k_restore_udp_port_info
* @interface: board private structure
*
- * This function restores the value in the tunnel_cfg register after reset
+ * This function restores the value in the tunnel_cfg register(s) after reset
**/
-static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
+static void fm10k_restore_udp_port_info(struct fm10k_intfc *interface)
{
struct fm10k_hw *hw = &interface->hw;
- struct fm10k_vxlan_port *vxlan_port;
+ struct fm10k_udp_port *port;
/* only the PF supports configuring tunnels */
if (hw->mac.type != fm10k_mac_pf)
return;
- vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_vxlan_port, list);
+ port = list_first_entry_or_null(&interface->vxlan_port,
+ struct fm10k_udp_port, list);
/* restore tunnel configuration register */
fm10k_write_reg(hw, FM10K_TUNNEL_CFG,
- (vxlan_port ? ntohs(vxlan_port->port) : 0) |
+ (port ? ntohs(port->port) : 0) |
(ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT));
+
+ port = list_first_entry_or_null(&interface->geneve_port,
+ struct fm10k_udp_port, list);
+
+ /* restore Geneve tunnel configuration register */
+ fm10k_write_reg(hw, FM10K_TUNNEL_CFG_GENEVE,
+ (port ? ntohs(port->port) : 0));
+}
+
+static struct fm10k_udp_port *
+fm10k_remove_tunnel_port(struct list_head *ports,
+ struct udp_tunnel_info *ti)
+{
+ struct fm10k_udp_port *port;
+
+ list_for_each_entry(port, ports, list) {
+ if ((port->port == ti->port) &&
+ (port->sa_family == ti->sa_family)) {
+ list_del(&port->list);
+ return port;
+ }
+ }
+
+ return NULL;
+}
+
+static void fm10k_insert_tunnel_port(struct list_head *ports,
+ struct udp_tunnel_info *ti)
+{
+ struct fm10k_udp_port *port;
+
+ /* remove existing port entry from the list so that the newest items
+ * are always at the tail of the list.
+ */
+ port = fm10k_remove_tunnel_port(ports, ti);
+ if (!port) {
+ port = kmalloc(sizeof(*port), GFP_ATOMIC);
+ if (!port)
+ return;
+ port->port = ti->port;
+ port->sa_family = ti->sa_family;
+ }
+
+ list_add_tail(&port->list, ports);
}
/**
- * fm10k_add_vxlan_port
+ * fm10k_udp_tunnel_add
* @netdev: network interface device structure
* @ti: Tunnel endpoint information
*
- * This function is called when a new VXLAN interface has added a new port
- * number to the range that is currently in use for VXLAN. The new port
- * number is always added to the tail so that the port number list should
- * match the order in which the ports were allocated. The head of the list
- * is always used as the VXLAN port number for offloads.
+ * This function is called when a new UDP tunnel port has been added.
+ * Due to hardware restrictions, only one port per type can be offloaded at
+ * once.
**/
-static void fm10k_add_vxlan_port(struct net_device *dev,
+static void fm10k_udp_tunnel_add(struct net_device *dev,
struct udp_tunnel_info *ti)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_vxlan_port *vxlan_port;
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
/* only the PF supports configuring tunnels */
if (interface->hw.mac.type != fm10k_mac_pf)
return;
- /* existing ports are pulled out so our new entry is always last */
- fm10k_vxlan_port_for_each(vxlan_port, interface) {
- if ((vxlan_port->port == ti->port) &&
- (vxlan_port->sa_family == ti->sa_family)) {
- list_del(&vxlan_port->list);
- goto insert_tail;
- }
- }
-
- /* allocate memory to track ports */
- vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC);
- if (!vxlan_port)
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ fm10k_insert_tunnel_port(&interface->vxlan_port, ti);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ fm10k_insert_tunnel_port(&interface->geneve_port, ti);
+ break;
+ default:
return;
- vxlan_port->port = ti->port;
- vxlan_port->sa_family = ti->sa_family;
-
-insert_tail:
- /* add new port value to list */
- list_add_tail(&vxlan_port->list, &interface->vxlan_port);
+ }
- fm10k_restore_vxlan_port(interface);
+ fm10k_restore_udp_port_info(interface);
}
/**
- * fm10k_del_vxlan_port
+ * fm10k_udp_tunnel_del
* @netdev: network interface device structure
* @ti: Tunnel endpoint information
*
- * This function is called when a new VXLAN interface has freed a port
- * number from the range that is currently in use for VXLAN. The freed
- * port is removed from the list and the new head is used to determine
- * the port number for offloads.
+ * This function is called when a new UDP tunnel port is deleted. The freed
+ * port will be removed from the list, then we reprogram the offloaded port
+ * based on the head of the list.
**/
-static void fm10k_del_vxlan_port(struct net_device *dev,
+static void fm10k_udp_tunnel_del(struct net_device *dev,
struct udp_tunnel_info *ti)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_vxlan_port *vxlan_port;
+ struct fm10k_udp_port *port = NULL;
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
if (interface->hw.mac.type != fm10k_mac_pf)
return;
- /* find the port in the list and free it */
- fm10k_vxlan_port_for_each(vxlan_port, interface) {
- if ((vxlan_port->port == ti->port) &&
- (vxlan_port->sa_family == ti->sa_family)) {
- list_del(&vxlan_port->list);
- kfree(vxlan_port);
- break;
- }
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ port = fm10k_remove_tunnel_port(&interface->vxlan_port, ti);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ port = fm10k_remove_tunnel_port(&interface->geneve_port, ti);
+ break;
+ default:
+ return;
}
- fm10k_restore_vxlan_port(interface);
+ /* if we did remove a port we need to free its memory */
+ kfree(port);
+
+ fm10k_restore_udp_port_info(interface);
}
/**
@@ -555,7 +597,6 @@ int fm10k_open(struct net_device *netdev)
if (err)
goto err_set_queues;
- /* update VXLAN port configuration */
udp_tunnel_get_rx_info(netdev);
fm10k_up(interface);
@@ -591,7 +632,7 @@ int fm10k_close(struct net_device *netdev)
fm10k_qv_free_irq(interface);
- fm10k_del_vxlan_port_all(interface);
+ fm10k_free_udp_port_info(interface);
fm10k_free_all_tx_resources(interface);
fm10k_free_all_rx_resources(interface);
@@ -1055,7 +1096,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
interface->xcast_mode = xcast_mode;
/* Restore tunnel configuration */
- fm10k_restore_vxlan_port(interface);
+ fm10k_restore_udp_port_info(interface);
}
void fm10k_reset_rx_state(struct fm10k_intfc *interface)
@@ -1098,7 +1139,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
rcu_read_lock();
for (i = 0; i < interface->num_rx_queues; i++) {
- ring = ACCESS_ONCE(interface->rx_ring[i]);
+ ring = READ_ONCE(interface->rx_ring[i]);
if (!ring)
continue;
@@ -1114,7 +1155,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
}
for (i = 0; i < interface->num_tx_queues; i++) {
- ring = ACCESS_ONCE(interface->tx_ring[i]);
+ ring = READ_ONCE(interface->tx_ring[i]);
if (!ring)
continue;
@@ -1299,7 +1340,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_l2_accel *l2_accel = ACCESS_ONCE(interface->l2_accel);
+ struct fm10k_l2_accel *l2_accel = READ_ONCE(interface->l2_accel);
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
struct net_device *sdev = priv;
@@ -1375,8 +1416,8 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
- .ndo_udp_tunnel_add = fm10k_add_vxlan_port,
- .ndo_udp_tunnel_del = fm10k_del_vxlan_port,
+ .ndo_udp_tunnel_add = fm10k_udp_tunnel_add,
+ .ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 774a5654bf42..b1a2f8437d59 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -62,7 +62,7 @@ u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
{
- u32 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
if (FM10K_REMOVED(hw_addr))
@@ -133,7 +133,7 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
/* check the real address space to see if we've recovered */
hw_addr = READ_ONCE(interface->uc_addr);
value = readl(hw_addr);
- if ((~value)) {
+ if (~value) {
interface->hw.hw_addr = interface->uc_addr;
netif_device_attach(netdev);
interface->flags |= FM10K_FLAG_RESET_REQUESTED;
@@ -734,15 +734,15 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
u64 rdba = ring->dma;
struct fm10k_hw *hw = &interface->hw;
u32 size = ring->count * sizeof(union fm10k_rx_desc);
- u32 rxqctl = FM10K_RXQCTL_ENABLE | FM10K_RXQCTL_PF;
- u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
+ u32 rxqctl, rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
u32 rxint = FM10K_INT_MAP_DISABLE;
u8 rx_pause = interface->rx_pause;
u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
- fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), 0);
+ rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
+ rxqctl &= ~FM10K_RXQCTL_ENABLE;
fm10k_write_flush(hw);
/* possible poll here to verify ring resources have been cleaned */
@@ -797,6 +797,8 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
/* enable queue */
+ rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
+ rxqctl |= FM10K_RXQCTL_ENABLE;
fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
/* place buffers on ring for receive data */
@@ -1699,7 +1701,7 @@ void fm10k_down(struct fm10k_intfc *interface)
/* start checking at the last ring to have pending Tx */
for (; i < interface->num_tx_queues; i++)
- if (fm10k_get_tx_pending(interface->tx_ring[i]))
+ if (fm10k_get_tx_pending(interface->tx_ring[i], false))
break;
/* if all the queues are drained, we can break now */
@@ -1835,8 +1837,9 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
interface->tx_itr = FM10K_TX_ITR_DEFAULT;
interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
- /* initialize vxlan_port list */
+ /* initialize udp port lists */
INIT_LIST_HEAD(&interface->vxlan_port);
+ INIT_LIST_HEAD(&interface->geneve_port);
netdev_rss_key_fill(rss_key, sizeof(rss_key));
memcpy(interface->rssrk, rss_key, sizeof(rss_key));
@@ -1950,9 +1953,18 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct fm10k_intfc *interface;
int err;
+ if (pdev->error_state != pci_channel_io_normal) {
+ dev_err(&pdev->dev,
+ "PCI device still in an error state. Unable to load...\n");
+ return -EIO;
+ }
+
err = pci_enable_device_mem(pdev);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev,
+ "PCI enable device failed: %d\n", err);
return err;
+ }
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
if (err)
@@ -2275,7 +2287,7 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
{
pci_ers_result_t result;
- if (pci_enable_device_mem(pdev)) {
+ if (pci_reenable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 682299dd0ce4..23fb319fd2a0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -867,10 +867,6 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
qmap_idx = qmap_stride * vf_idx;
- /* MAP Tx queue back to 0 temporarily, and disable it */
- fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
- fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
-
/* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
* used here to indicate to the VF that it will not have privilege to
* write VLAN_TABLE. All policy is enforced on the PF but this allows
@@ -886,9 +882,35 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
vf_info->mac, vf_vid);
- /* load onto outgoing mailbox, ignore any errors on enqueue */
- if (vf_info->mbx.ops.enqueue_tx)
- vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ /* Configure Queue control register with new VLAN ID. The TXQCTL
+ * register is RO from the VF, so the PF must do this even in the
+ * case of notifying the VF of a new VID via the mailbox.
+ */
+ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
+ FM10K_TXQCTL_VID_MASK;
+ txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
+ FM10K_TXQCTL_VF | vf_idx;
+
+ for (i = 0; i < queues_per_pool; i++)
+ fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
+
+ /* try loading a message onto outgoing mailbox first */
+ if (vf_info->mbx.ops.enqueue_tx) {
+ err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ if (err != FM10K_MBX_ERR_NO_MBX)
+ return err;
+ err = 0;
+ }
+
+ /* If we aren't connected to a mailbox, this is most likely because
+ * the VF driver is not running. It should thus be safe to re-map
+ * queues and use the registers to pass the MAC address so that the VF
+ * driver gets correct information during its initialization.
+ */
+
+ /* MAP Tx queue back to 0 temporarily, and disable it */
+ fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
+ fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
/* verify ring has disabled before modifying base address registers */
txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
@@ -927,16 +949,6 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
FM10K_TDLEN_ITR_SCALE_SHIFT);
err_out:
- /* configure Queue control register */
- txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
- FM10K_TXQCTL_VID_MASK;
- txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
- FM10K_TXQCTL_VF | vf_idx;
-
- /* assign VLAN ID */
- for (i = 0; i < queues_per_pool; i++)
- fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
-
/* restore the queue back to VF ownership */
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
return err;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index f4e75c498287..6bb16c13d9d6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -154,6 +154,7 @@ struct fm10k_hw;
#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000
#define FM10K_TUNNEL_CFG 0x0040
#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16
+#define FM10K_TUNNEL_CFG_GENEVE 0x0041
#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050)
#define FM10K_SWPRI_MAX 16
#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800)
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2a882916b4f6..2030d7c1dc94 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -65,76 +65,72 @@
#include "i40e_dcb.h"
/* Useful i40e defaults */
-#define I40E_MAX_VEB 16
-
-#define I40E_MAX_NUM_DESCRIPTORS 4096
-#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
-#define I40E_DEFAULT_NUM_DESCRIPTORS 512
-#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
-#define I40E_MIN_NUM_DESCRIPTORS 64
-#define I40E_MIN_MSIX 2
-#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
-#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
+#define I40E_MAX_VEB 16
+
+#define I40E_MAX_NUM_DESCRIPTORS 4096
+#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
+#define I40E_DEFAULT_NUM_DESCRIPTORS 512
+#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
+#define I40E_MIN_NUM_DESCRIPTORS 64
+#define I40E_MIN_MSIX 2
+#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
+#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */
/* max 16 qps */
#define i40e_default_queues_per_vmdq(pf) \
(((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
-#define I40E_DEFAULT_QUEUES_PER_VF 4
-#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
+#define I40E_DEFAULT_QUEUES_PER_VF 4
+#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
#define i40e_pf_get_max_q_per_tc(pf) \
(((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
-#define I40E_FDIR_RING 0
-#define I40E_FDIR_RING_COUNT 32
+#define I40E_FDIR_RING 0
+#define I40E_FDIR_RING_COUNT 32
#ifdef I40E_FCOE
-#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
-#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
+#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
+#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
#endif /* I40E_FCOE */
-#define I40E_MAX_AQ_BUF_SIZE 4096
-#define I40E_AQ_LEN 256
-#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
-#define I40E_MAX_USER_PRIORITY 8
-#define I40E_DEFAULT_MSG_ENABLE 4
-#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
-#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
+#define I40E_MAX_AQ_BUF_SIZE 4096
+#define I40E_AQ_LEN 256
+#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_MSG_ENABLE 4
+#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
+#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
+#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4)
#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5)
-#define I40E_NVM_VERSION_LO_SHIFT 0
-#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT 12
-#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
-#define I40E_OEM_VER_BUILD_MASK 0xffff
-#define I40E_OEM_VER_PATCH_MASK 0xff
-#define I40E_OEM_VER_BUILD_SHIFT 8
-#define I40E_OEM_VER_SHIFT 24
+#define I40E_NVM_VERSION_LO_SHIFT 0
+#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 12
+#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_OEM_VER_BUILD_MASK 0xffff
+#define I40E_OEM_VER_PATCH_MASK 0xff
+#define I40E_OEM_VER_BUILD_SHIFT 8
+#define I40E_OEM_VER_SHIFT 24
#define I40E_PHY_DEBUG_ALL \
(I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
/* The values in here are decimal coded as hex as is the case in the NVM map*/
-#define I40E_CURRENT_NVM_VERSION_HI 0x2
-#define I40E_CURRENT_NVM_VERSION_LO 0x40
+#define I40E_CURRENT_NVM_VERSION_HI 0x2
+#define I40E_CURRENT_NVM_VERSION_LO 0x40
-/* magic for getting defines into strings */
-#define STRINGIFY(foo) #foo
-#define XSTRINGIFY(bar) STRINGIFY(bar)
-
-#define I40E_RX_DESC(R, i) \
+#define I40E_RX_DESC(R, i) \
(&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
-#define I40E_TX_DESC(R, i) \
+#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
-#define I40E_TX_CTXTDESC(R, i) \
+#define I40E_TX_CTXTDESC(R, i) \
(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
-#define I40E_TX_FDIRDESC(R, i) \
+#define I40E_TX_FDIRDESC(R, i) \
(&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
/* default to trying for four seconds */
-#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
+#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
/**
* i40e_is_mac_710 - Return true if MAC is X710/XL710
@@ -199,9 +195,9 @@ struct i40e_lump_tracking {
#define I40E_FDIR_BUFFER_HEAD_ROOM 32
#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
-#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
-#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
-#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
+#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
+#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
@@ -387,8 +383,8 @@ struct i40e_pf {
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
u16 lan_veb; /* initial relay, if exists */
-#define I40E_NO_VEB 0xffff
-#define I40E_NO_VSI 0xffff
+#define I40E_NO_VEB 0xffff
+#define I40E_NO_VSI 0xffff
u16 next_vsi; /* Next unallocated VSI - 0-based! */
struct i40e_vsi **vsi;
struct i40e_veb *veb[I40E_MAX_VEB];
@@ -423,8 +419,8 @@ struct i40e_pf {
*/
u16 dcbx_cap;
- u32 fcoe_hmc_filt_num;
- u32 fcoe_hmc_cntx_num;
+ u32 fcoe_hmc_filt_num;
+ u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings;
struct ptp_clock *ptp_clock;
@@ -470,10 +466,10 @@ struct i40e_mac_filter {
struct i40e_veb {
struct i40e_pf *pf;
u16 idx;
- u16 veb_idx; /* index of VEB parent */
+ u16 veb_idx; /* index of VEB parent */
u16 seid;
u16 uplink_seid;
- u16 stats_idx; /* index of VEB parent */
+ u16 stats_idx; /* index of VEB parent */
u8 enabled_tc;
u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */
u16 flags;
@@ -534,12 +530,13 @@ struct i40e_vsi {
u32 promisc_threshold;
u16 work_limit;
- u16 int_rate_limit; /* value in usecs */
+ u16 int_rate_limit; /* value in usecs */
+
+ u16 rss_table_size; /* HW RSS table size */
+ u16 rss_size; /* Allocated RSS queues */
+ u8 *rss_hkey_user; /* User configured hash keys */
+ u8 *rss_lut_user; /* User configured lookup table entries */
- u16 rss_table_size; /* HW RSS table size */
- u16 rss_size; /* Allocated RSS queues */
- u8 *rss_hkey_user; /* User configured hash keys */
- u8 *rss_lut_user; /* User configured lookup table entries */
u16 max_frame;
u16 rx_buf_len;
@@ -550,14 +547,14 @@ struct i40e_vsi {
int base_vector;
bool irqs_ready;
- u16 seid; /* HW index of this VSI (absolute index) */
- u16 id; /* VSI number */
+ u16 seid; /* HW index of this VSI (absolute index) */
+ u16 id; /* VSI number */
u16 uplink_seid;
- u16 base_queue; /* vsi's first queue in hw array */
- u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
- u16 req_queue_pairs; /* User requested queue pairs */
- u16 num_queue_pairs; /* Used tx and rx pairs */
+ u16 base_queue; /* vsi's first queue in hw array */
+ u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+ u16 req_queue_pairs; /* User requested queue pairs */
+ u16 num_queue_pairs; /* Used tx and rx pairs */
u16 num_desc;
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
s16 vf_id; /* Virtual function ID for SRIOV VSIs */
@@ -576,19 +573,16 @@ struct i40e_vsi {
/* TC BW limit max quanta within VSI */
u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS];
- struct i40e_pf *back; /* Backreference to associated PF */
- u16 idx; /* index in pf->vsi[] */
- u16 veb_idx; /* index of VEB parent */
- struct kobject *kobj; /* sysfs object */
- bool current_isup; /* Sync 'link up' logging */
+ struct i40e_pf *back; /* Backreference to associated PF */
+ u16 idx; /* index in pf->vsi[] */
+ u16 veb_idx; /* index of VEB parent */
+ struct kobject *kobj; /* sysfs object */
+ bool current_isup; /* Sync 'link up' logging */
void *priv; /* client driver data reference. */
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
-
- /* current rxnfc data */
- struct ethtool_rxnfc rxnfc; /* current rss hash opts */
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -707,6 +701,8 @@ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size);
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -714,8 +710,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
- struct i40e_pf *pf, bool add);
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 11cf1a5ebccf..67e396b2b347 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -204,6 +204,9 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
@@ -450,13 +453,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
+#define I40E_AQ_ARP_INIT_IPV4 0x0800
+#define I40E_AQ_ARP_UNSUP_CTL 0x1000
+#define I40E_AQ_ARP_ENA 0x2000
+#define I40E_AQ_ARP_ADD_IPV4 0x4000
+#define I40E_AQ_ARP_DEL_IPV4 0x8000
__le16 table_id;
- __le32 pfpm_proxyfc;
+ __le32 enabled_offloads;
+#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
@@ -471,17 +476,19 @@ struct i40e_aqc_ns_proxy_data {
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+#define I40E_AQ_NS_PROXY_ADD_0 0x0001
+#define I40E_AQ_NS_PROXY_DEL_0 0x0002
+#define I40E_AQ_NS_PROXY_ADD_1 0x0004
+#define I40E_AQ_NS_PROXY_DEL_1 0x0008
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
@@ -1582,6 +1589,24 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 618f18436618..250db0b244b7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -148,6 +148,11 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
"Cannot locate client instance virtual channel receive routine\n");
continue;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort virtchnl_receive\n");
+ continue;
+ }
cdev->client->ops->virtchnl_receive(&cdev->lan_info,
cdev->client,
vf_id, msg, len);
@@ -181,6 +186,11 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
"Cannot locate client instance l2_param_change routine\n");
continue;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
+ continue;
+ }
cdev->lan_info.params = params;
cdev->client->ops->l2_param_change(&cdev->lan_info,
cdev->client,
@@ -306,6 +316,11 @@ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
"Cannot locate client instance VF reset routine\n");
continue;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
+ continue;
+ }
cdev->client->ops->vf_reset(&cdev->lan_info,
cdev->client, vf_id);
}
@@ -336,6 +351,11 @@ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
"Cannot locate client instance VF enable routine\n");
continue;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
+ continue;
+ }
cdev->client->ops->vf_enable(&cdev->lan_info,
cdev->client, num_vfs);
}
@@ -370,6 +390,11 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
"Cannot locate client instance VF capability routine\n");
continue;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-capable\n");
+ continue;
+ }
capable = cdev->client->ops->vf_capable(&cdev->lan_info,
cdev->client,
vf_id);
@@ -559,6 +584,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
pf->hw.bus.device, pf->hw.bus.func);
}
+ mutex_lock(&i40e_client_instance_mutex);
/* Send an Open request to the client */
atomic_inc(&cdev->ref_cnt);
if (client->ops && client->ops->open)
@@ -568,10 +594,12 @@ void i40e_client_subtask(struct i40e_pf *pf)
set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
} else {
/* remove client instance */
+ mutex_unlock(&i40e_client_instance_mutex);
i40e_client_del_instance(pf, client);
atomic_dec(&client->ref_cnt);
continue;
}
+ mutex_unlock(&i40e_client_instance_mutex);
}
mutex_unlock(&i40e_client_mutex);
}
@@ -654,7 +682,7 @@ int i40e_lan_del_device(struct i40e_pf *pf)
static int i40e_client_release(struct i40e_client *client)
{
struct i40e_client_instance *cdev, *tmp;
- struct i40e_pf *pf = NULL;
+ struct i40e_pf *pf;
int ret = 0;
LIST_HEAD(cdevs_tmp);
@@ -664,12 +692,12 @@ static int i40e_client_release(struct i40e_client *client)
if (strncmp(cdev->client->name, client->name,
I40E_CLIENT_STR_LENGTH))
continue;
+ pf = (struct i40e_pf *)cdev->lan_info.pf;
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (atomic_read(&cdev->ref_cnt) > 0) {
ret = I40E_ERR_NOT_READY;
goto out;
}
- pf = (struct i40e_pf *)cdev->lan_info.pf;
if (client->ops && client->ops->close)
client->ops->close(&cdev->lan_info, client,
false);
@@ -681,8 +709,7 @@ static int i40e_client_release(struct i40e_client *client)
client->name, pf->hw.pf_id);
}
/* delete the client instance from the list */
- list_del(&cdev->list);
- list_add(&cdev->list, &cdevs_tmp);
+ list_move(&cdev->list, &cdevs_tmp);
atomic_dec(&client->ref_cnt);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
@@ -811,7 +838,8 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
wr32(hw, I40E_PFINT_AEQCTL, reg);
}
}
-
+ /* Mitigate sync problems with iwarp VF driver */
+ i40e_flush(hw);
return 0;
err:
kfree(ldev->qvlist_info);
@@ -1009,7 +1037,6 @@ int i40e_unregister_client(struct i40e_client *client)
if (!i40e_client_is_registered(client)) {
pr_info("i40e: Client %s has not been registered\n",
client->name);
- mutex_unlock(&i40e_client_mutex);
ret = -ENODEV;
goto out;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index a4601d97fb24..38a6c36a6a0e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -36,9 +36,9 @@
#define I40E_CLIENT_VERSION_MINOR 01
#define I40E_CLIENT_VERSION_BUILD 00
#define I40E_CLIENT_VERSION_STR \
- XSTRINGIFY(I40E_CLIENT_VERSION_MAJOR) "." \
- XSTRINGIFY(I40E_CLIENT_VERSION_MINOR) "." \
- XSTRINGIFY(I40E_CLIENT_VERSION_BUILD)
+ __stringify(I40E_CLIENT_VERSION_MAJOR) "." \
+ __stringify(I40E_CLIENT_VERSION_MINOR) "." \
+ __stringify(I40E_CLIENT_VERSION_BUILD)
struct i40e_client_version {
u8 major;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 05cf9a719bab..0c1875b5b16d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1054,6 +1054,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
struct i40e_dcbx_config *r_cfg =
&pf->hw.remote_dcbx_config;
int i, ret;
+ u32 switch_id;
bw_data = kzalloc(sizeof(
struct i40e_aqc_query_port_ets_config_resp),
@@ -1063,8 +1064,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
+ vsi = pf->vsi[pf->lan_vsi];
+ switch_id =
+ vsi->info.switch_id & I40E_AQ_VSI_SW_ID_MASK;
+
ret = i40e_aq_query_port_ets_config(&pf->hw,
- pf->mac_seid,
+ switch_id,
bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -1425,84 +1430,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
kfree(desc);
desc = NULL;
- } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
- (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
- struct i40e_fdir_filter fd_data;
- u16 packet_len, i, j = 0;
- char *asc_packet;
- u8 *raw_packet;
- bool add = false;
- int ret;
-
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
- goto command_write_done;
-
- if (strncmp(cmd_buf, "add", 3) == 0)
- add = true;
-
- if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
- goto command_write_done;
-
- asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
- GFP_KERNEL);
- if (!asc_packet)
- goto command_write_done;
-
- raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
- GFP_KERNEL);
-
- if (!raw_packet) {
- kfree(asc_packet);
- asc_packet = NULL;
- goto command_write_done;
- }
-
- cnt = sscanf(&cmd_buf[13],
- "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
- &fd_data.q_index,
- &fd_data.flex_off, &fd_data.pctype,
- &fd_data.dest_vsi, &fd_data.dest_ctl,
- &fd_data.fd_status, &fd_data.cnt_index,
- &fd_data.fd_id, &packet_len, asc_packet);
- if (cnt != 10) {
- dev_info(&pf->pdev->dev,
- "program fd_filter: bad command string, cnt=%d\n",
- cnt);
- kfree(asc_packet);
- asc_packet = NULL;
- kfree(raw_packet);
- goto command_write_done;
- }
-
- /* fix packet length if user entered 0 */
- if (packet_len == 0)
- packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
-
- /* make sure to check the max as well */
- packet_len = min_t(u16,
- packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
-
- for (i = 0; i < packet_len; i++) {
- cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
- if (!cnt)
- break;
- j += 3;
- }
- dev_info(&pf->pdev->dev, "FD raw packet dump\n");
- print_hex_dump(KERN_INFO, "FD raw packet: ",
- DUMP_PREFIX_OFFSET, 16, 1,
- raw_packet, packet_len, true);
- ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
- if (!ret) {
- dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
- } else {
- dev_info(&pf->pdev->dev,
- "Filter command send failed %d\n", ret);
- }
- kfree(raw_packet);
- raw_packet = NULL;
- kfree(asc_packet);
- asc_packet = NULL;
} else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
@@ -1727,8 +1654,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " globr\n");
dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
- dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
- dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " fd current cnt");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index c912e041d102..92bc8846f1ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1560,13 +1560,13 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
#endif
for (i = 0; i < vsi->num_queue_pairs; i++) {
- snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
+ snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
+ snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i);
p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
+ snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i);
p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
+ snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i);
p += ETH_GSTRING_LEN;
}
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
@@ -1581,16 +1581,16 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%u_tx_packets", i);
+ "veb.tc_%d_tx_packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%u_tx_bytes", i);
+ "veb.tc_%d_tx_bytes", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%u_rx_packets", i);
+ "veb.tc_%d_rx_packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%u_rx_bytes", i);
+ "veb.tc_%d_rx_bytes", i);
p += ETH_GSTRING_LEN;
}
}
@@ -1601,23 +1601,23 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xon", i);
+ "port.tx_priority_%d_xon", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xoff", i);
+ "port.tx_priority_%d_xoff", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon", i);
+ "port.rx_priority_%d_xon", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xoff", i);
+ "port.rx_priority_%d_xoff", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon_2_xoff", i);
+ "port.rx_priority_%d_xon_2_xoff", i);
p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
@@ -1970,11 +1970,22 @@ static int i40e_set_phys_id(struct net_device *netdev,
* 125us (8000 interrupts per second) == ITR(62)
*/
+/**
+ * __i40e_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
+ *
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
+ **/
static int __i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *rx_ring, *tx_ring;
struct i40e_vsi *vsi = np->vsi;
ec->tx_max_coalesced_frames_irq = vsi->work_limit;
@@ -1989,14 +2000,18 @@ static int __i40e_get_coalesce(struct net_device *netdev,
return -EINVAL;
}
- if (ITR_IS_DYNAMIC(vsi->rx_rings[queue]->rx_itr_setting))
+ rx_ring = vsi->rx_rings[queue];
+ tx_ring = vsi->tx_rings[queue];
+
+ if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(vsi->tx_rings[queue]->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = vsi->rx_rings[queue]->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = vsi->tx_rings[queue]->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+
/* we use the _usecs_high to store/set the interrupt rate limit
* that the hardware supports, that almost but not quite
@@ -2010,18 +2025,44 @@ static int __i40e_get_coalesce(struct net_device *netdev,
return 0;
}
+/**
+ * i40e_get_coalesce - get a netdev's coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ *
+ * Gets the coalesce settings for a particular netdev. Note that if user has
+ * modified per-queue settings, this only guarantees to represent queue 0. See
+ * __i40e_get_coalesce for more details.
+ **/
static int i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
return __i40e_get_coalesce(netdev, ec, -1);
}
+/**
+ * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
+ * @netdev: netdev structure
+ * @ec: ethtool's coalesce settings
+ * @queue: the particular queue to read
+ *
+ * Will read a specific queue's coalesce settings
+ **/
static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *ec)
{
return __i40e_get_coalesce(netdev, ec, queue);
}
+/**
+ * i40e_set_itr_per_queue - set ITR values for specific queue
+ * @vsi: the VSI to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+
static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct ethtool_coalesce *ec,
int queue)
@@ -2060,6 +2101,14 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
i40e_flush(hw);
}
+/**
+ * __i40e_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
static int __i40e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
@@ -2120,12 +2169,27 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return 0;
}
+/**
+ * i40e_set_coalesce - set coalesce settings for every queue on the netdev
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ *
+ * This will set each queue to the same coalesce settings.
+ **/
static int i40e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
return __i40e_set_coalesce(netdev, ec, -1);
}
+/**
+ * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the specified queue's coalesce settings.
+ **/
static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *ec)
{
@@ -2141,41 +2205,72 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
**/
static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
{
+ struct i40e_hw *hw = &pf->hw;
+ u8 flow_pctype = 0;
+ u64 i_set = 0;
+
cmd->data = 0;
- if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) {
- cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data;
- cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type;
- return 0;
- }
- /* Report default options for RSS on i40e */
switch (cmd->flow_type) {
case TCP_V4_FLOW:
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ break;
case UDP_V4_FLOW:
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through to add IP fields */
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ break;
+ case TCP_V6_FLOW:
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ break;
+ case UDP_V6_FLOW:
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
- cmd->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- case UDP_V6_FLOW:
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through to add IP fields */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
+ /* Default is src/dest for IP, no matter the L4 hashing */
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
default:
return -EINVAL;
}
+ /* Read flow based hash input set register */
+ if (flow_pctype) {
+ i_set = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
+ flow_pctype)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
+ flow_pctype)) << 32);
+ }
+
+ /* Process bits of hash input set */
+ if (i_set) {
+ if (i_set & I40E_L4_SRC_MASK)
+ cmd->data |= RXH_L4_B_0_1;
+ if (i_set & I40E_L4_DST_MASK)
+ cmd->data |= RXH_L4_B_2_3;
+
+ if (cmd->flow_type == TCP_V4_FLOW ||
+ cmd->flow_type == UDP_V4_FLOW) {
+ if (i_set & I40E_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ } else if (cmd->flow_type == TCP_V6_FLOW ||
+ cmd->flow_type == UDP_V6_FLOW) {
+ if (i_set & I40E_L3_V6_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_L3_V6_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ }
+ }
+
return 0;
}
@@ -2318,6 +2413,51 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
/**
+ * i40e_get_rss_hash_bits - Read RSS Hash bits from register
+ * @nfc: pointer to user request
+ * @i_setc bits currently set
+ *
+ * Returns value of bits to be set per user request
+ **/
+static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
+{
+ u64 i_set = i_setc;
+ u64 src_l3 = 0, dst_l3 = 0;
+
+ if (nfc->data & RXH_L4_B_0_1)
+ i_set |= I40E_L4_SRC_MASK;
+ else
+ i_set &= ~I40E_L4_SRC_MASK;
+ if (nfc->data & RXH_L4_B_2_3)
+ i_set |= I40E_L4_DST_MASK;
+ else
+ i_set &= ~I40E_L4_DST_MASK;
+
+ if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) {
+ src_l3 = I40E_L3_V6_SRC_MASK;
+ dst_l3 = I40E_L3_V6_DST_MASK;
+ } else if (nfc->flow_type == TCP_V4_FLOW ||
+ nfc->flow_type == UDP_V4_FLOW) {
+ src_l3 = I40E_L3_SRC_MASK;
+ dst_l3 = I40E_L3_DST_MASK;
+ } else {
+ /* Any other flow type are not supported here */
+ return i_set;
+ }
+
+ if (nfc->data & RXH_IP_SRC)
+ i_set |= src_l3;
+ else
+ i_set &= ~src_l3;
+ if (nfc->data & RXH_IP_DST)
+ i_set |= dst_l3;
+ else
+ i_set &= ~dst_l3;
+
+ return i_set;
+}
+
+/**
* i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
* @pf: pointer to the physical function struct
* @cmd: ethtool rxnfc command
@@ -2329,6 +2469,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
+ u8 flow_pctype = 0;
+ u64 i_set, i_setc;
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -2337,75 +2479,39 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EINVAL;
- /* We need at least the IP SRC and DEST fields for hashing */
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST))
- return -EINVAL;
-
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- return -EINVAL;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- break;
- default:
- return -EINVAL;
- }
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
break;
case TCP_V6_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- return -EINVAL;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- break;
- default:
- return -EINVAL;
- }
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
break;
case UDP_V4_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- return -EINVAL;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- default:
- return -EINVAL;
- }
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- return -EINVAL;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- default:
- return -EINVAL;
- }
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2437,13 +2543,23 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
+ if (flow_pctype) {
+ i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
+ flow_pctype)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
+ flow_pctype)) << 32);
+ i_set = i40e_get_rss_hash_bits(nfc, i_setc);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
+ (u32)i_set);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
+ (u32)(i_set >> 32));
+ hena |= BIT_ULL(flow_pctype);
+ }
+
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
i40e_flush(hw);
- /* Save setting for future output/update */
- pf->vsi[pf->lan_vsi]->rxnfc = *nfc;
-
return 0;
}
@@ -2744,11 +2860,15 @@ static void i40e_get_channels(struct net_device *dev,
static int i40e_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
+ const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
struct i40e_netdev_priv *np = netdev_priv(dev);
unsigned int count = ch->combined_count;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
int new_count;
+ int err = 0;
/* We do not support setting channels for any other VSI at present */
if (vsi->type != I40E_VSI_MAIN)
@@ -2766,6 +2886,26 @@ static int i40e_set_channels(struct net_device *dev,
if (count > i40e_max_channels(vsi))
return -EINVAL;
+ /* verify that the number of channels does not invalidate any current
+ * flow director rules
+ */
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (rule->dest_ctl != drop && count <= rule->q_index) {
+ dev_warn(&pf->pdev->dev,
+ "Existing user defined filter %d assigns flow to queue %d\n",
+ rule->fd_id, rule->q_index);
+ err = -EINVAL;
+ }
+ }
+
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Existing filter rules must be deleted to reduce combined channel count to %d\n",
+ count);
+ return err;
+ }
+
/* update feature limits from largest to smallest supported values */
/* TODO: Flow director limit, DCB etc */
@@ -2846,15 +2986,13 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
u8 *seed = NULL;
u16 i;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir)
- return 0;
-
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
@@ -2872,8 +3010,12 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ if (indir)
+ for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
+ vsi->rss_lut_user[i] = (u8)(indir[i]);
+ else
+ i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
+ vsi->rss_size);
return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
I40E_HLUT_ARRAY_SIZE);
@@ -2943,6 +3085,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
} else {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+
+ /* flush current ATR settings */
+ set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
}
if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d0b3a1bb82ca..ac1faee2a5b8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -41,7 +41,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -57,8 +57,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
-static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
- u16 rss_table_size, u16 rss_size);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
@@ -527,6 +525,7 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
pf->veb[i]->stat_offsets_loaded = false;
}
}
+ pf->hw_csum_rx_error = 0;
}
/**
@@ -1316,7 +1315,7 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
element.vlan_tag = 0;
/* ...and some firmware does it this way. */
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
- I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
}
@@ -1909,7 +1908,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
if (f->vlan == I40E_VLAN_ANY) {
del_list[num_del].vlan_tag = 0;
- cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
} else {
del_list[num_del].vlan_tag =
cpu_to_le16((u16)(f->vlan));
@@ -4616,7 +4615,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- u8 i, enabled_tc;
+ u8 i, enabled_tc = 1;
u8 num_tc = 0;
struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
@@ -4634,8 +4633,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
else
return 1; /* Only TC0 */
- /* At least have TC0 */
- enabled_tc = (enabled_tc ? enabled_tc : 0x1);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (enabled_tc & BIT(i))
num_tc++;
@@ -5245,7 +5242,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = 0;
if (pf->fd_tcp_rule > 0) {
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
pf->fd_tcp_rule = 0;
@@ -5942,13 +5939,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
- /* Wait for some more space to be available to turn on ATR */
+
+ /* Wait for some more space to be available to turn on ATR. We also
+ * must check that no existing ntuple rules for TCP are in effect
+ */
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->fd_tcp_rule == 0)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
}
@@ -5979,9 +5980,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
int fd_room;
int reg;
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
- return;
-
if (!time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
return;
@@ -6001,7 +5999,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
pf->fd_flush_timestamp = jiffies;
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -6021,7 +6019,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
if (!disable_atr)
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
@@ -6055,9 +6053,6 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (test_bit(__I40E_DOWN, &pf->state))
return;
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
- return;
-
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
i40e_fdir_flush_and_replay(pf);
@@ -7157,9 +7152,9 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
pf->pending_udp_bitmap &= ~BIT_ULL(i);
port = pf->udp_ports[i].index;
if (port)
- ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
- pf->udp_ports[i].type,
- NULL, NULL);
+ ret = i40e_aq_add_udp_tunnel(hw, port,
+ pf->udp_ports[i].type,
+ NULL, NULL);
else
ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -7646,7 +7641,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
vectors_left--;
} else {
pf->num_fdsb_msix = 0;
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
}
}
@@ -7666,6 +7660,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
#endif
/* can we reserve enough for iWARP? */
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ iwarp_requested = pf->num_iwarp_msix;
+
if (!vectors_left)
pf->num_iwarp_msix = 0;
else if (vectors_left < pf->num_iwarp_msix)
@@ -7679,18 +7675,23 @@ static int i40e_init_msix(struct i40e_pf *pf)
int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
- /* if we're short on vectors for what's desired, we limit
- * the queues per vmdq. If this is still more than are
- * available, the user will need to change the number of
- * queues/vectors used by the PF later with the ethtool
- * channels command
- */
- if (vmdq_vecs < vmdq_vecs_wanted)
- pf->num_vmdq_qps = 1;
- pf->num_vmdq_msix = pf->num_vmdq_qps;
+ if (!vectors_left) {
+ pf->num_vmdq_msix = 0;
+ pf->num_vmdq_qps = 0;
+ } else {
+ /* if we're short on vectors for what's desired, we limit
+ * the queues per vmdq. If this is still more than are
+ * available, the user will need to change the number of
+ * queues/vectors used by the PF later with the ethtool
+ * channels command
+ */
+ if (vmdq_vecs < vmdq_vecs_wanted)
+ pf->num_vmdq_qps = 1;
+ pf->num_vmdq_msix = pf->num_vmdq_qps;
- v_budget += vmdq_vecs;
- vectors_left -= vmdq_vecs;
+ v_budget += vmdq_vecs;
+ vectors_left -= vmdq_vecs;
+ }
}
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
@@ -7702,21 +7703,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->msix_entries[i].entry = i;
v_actual = i40e_reserve_msix_vectors(pf, v_budget);
- if (v_actual != v_budget) {
- /* If we have limited resources, we will start with no vectors
- * for the special features and then allocate vectors to some
- * of these features based on the policy and at the end disable
- * the features that did not get any vectors.
- */
- iwarp_requested = pf->num_iwarp_msix;
- pf->num_iwarp_msix = 0;
-#ifdef I40E_FCOE
- pf->num_fcoe_qps = 0;
- pf->num_fcoe_msix = 0;
-#endif
- pf->num_vmdq_msix = 0;
- }
-
if (v_actual < I40E_MIN_MSIX) {
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
@@ -7730,9 +7716,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
- } else if (v_actual != v_budget) {
+ } else if (!vectors_left) {
+ /* If we have limited resources, we will start with no vectors
+ * for the special features and then allocate vectors to some
+ * of these features based on the policy and at the end disable
+ * the features that did not get any vectors.
+ */
int vec;
+ dev_info(&pf->pdev->dev,
+ "MSI-X vector limit reached, attempting to redistribute vectors\n");
/* reserve the misc vector */
vec = v_actual - 1;
@@ -7740,7 +7733,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
pf->num_vmdq_vsis = 1;
pf->num_vmdq_qps = 1;
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+#ifdef I40E_FCOE
+ pf->num_fcoe_qps = 0;
+ pf->num_fcoe_msix = 0;
+#endif
/* partition out the remaining vectors */
switch (vec) {
@@ -7772,9 +7768,14 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_vsis = min_t(int, (vec / 2),
I40E_DEFAULT_NUM_VMDQ_VSI);
}
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ pf->num_fdsb_msix = 1;
+ vec--;
+ }
pf->num_lan_msix = min_t(int,
(vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
pf->num_lan_msix);
+ pf->num_lan_qps = pf->num_lan_msix;
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
@@ -7786,6 +7787,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ (pf->num_fdsb_msix == 0)) {
+ dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ }
if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
(pf->num_vmdq_msix == 0)) {
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
@@ -7804,6 +7810,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
}
#endif
+ i40e_debug(&pf->hw, I40E_DEBUG_INIT,
+ "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
+ pf->num_lan_msix,
+ pf->num_vmdq_msix * pf->num_vmdq_vsis,
+ pf->num_fdsb_msix,
+ pf->num_iwarp_msix);
+
return v_actual;
}
@@ -7990,72 +8003,34 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
{
- struct i40e_aqc_get_set_rss_key_data rss_key;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- bool pf_lut = false;
- u8 *rss_lut;
- int ret, i;
-
- memcpy(&rss_key, seed, sizeof(rss_key));
-
- rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
- if (!rss_lut)
- return -ENOMEM;
-
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0; i < vsi->rss_table_size; i++)
- rss_lut[i] = i % vsi->rss_size;
+ int ret = 0;
- ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot set RSS key, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- goto config_rss_aq_out;
+ if (seed) {
+ struct i40e_aqc_get_set_rss_key_data *seed_dw =
+ (struct i40e_aqc_get_set_rss_key_data *)seed;
+ ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS key, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
}
+ if (lut) {
+ bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
- if (vsi->type == I40E_VSI_MAIN)
- pf_lut = true;
-
- ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
- vsi->rss_table_size);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-
-config_rss_aq_out:
- kfree(rss_lut);
- return ret;
-}
-
-/**
- * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
- * @vsi: VSI structure
- **/
-static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
-{
- u8 seed[I40E_HKEY_ARRAY_SIZE];
- struct i40e_pf *pf = vsi->back;
- u8 *lut;
- int ret;
-
- if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
- return 0;
-
- lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
-
- i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
- netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
- vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs);
- ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
- kfree(lut);
-
+ ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ }
return ret;
}
@@ -8106,6 +8081,46 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
}
/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ struct i40e_pf *pf = vsi->back;
+ u8 *lut;
+ int ret;
+
+ if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
+ return 0;
+
+ if (!vsi->rss_size)
+ vsi->rss_size = min_t(int, pf->alloc_rss_size,
+ vsi->num_queue_pairs);
+ if (!vsi->rss_size)
+ return -EINVAL;
+
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+ /* Use the user configured hash keys and lookup table if there is one,
+ * otherwise use default
+ */
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
+ if (vsi->rss_hkey_user)
+ memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+ else
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+ ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
+ kfree(lut);
+
+ return ret;
+}
+
+/**
* i40e_config_rss_reg - Configure RSS keys and lut by writing registers
* @vsi: Pointer to vsi structure
* @seed: RSS hash seed
@@ -8243,8 +8258,8 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
* @rss_table_size: Lookup table size
* @rss_size: Range of queue number for hashing
*/
-static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
- u16 rss_table_size, u16 rss_size)
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size)
{
u16 i;
@@ -8285,6 +8300,8 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs);
+ if (!vsi->rss_size)
+ return -EINVAL;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
@@ -8609,7 +8626,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_WB_ON_ITR_CAPABLE |
I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
I40E_FLAG_NO_PCI_LINK_CHECK |
- I40E_FLAG_100M_SGMII_CAPABLE |
I40E_FLAG_USE_SET_LLDP_MIB |
I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
} else if ((pf->hw.aq.api_maj_ver > 1) ||
@@ -8684,18 +8700,40 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
pf->fdir_pf_active_filters = 0;
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+ }
}
return need_reset;
}
/**
+ * i40e_clear_rss_lut - clear the rx hash lookup table
+ * @vsi: the VSI being configured
+ **/
+static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vf_id = vsi->vf_id;
+ u8 i;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), 0);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
+ }
+}
+
+/**
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -8708,6 +8746,12 @@ static int i40e_set_features(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
bool need_reset;
+ if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
+ i40e_pf_config_rss(pf);
+ else if (!(features & NETIF_F_RXHASH) &&
+ netdev->features & NETIF_F_RXHASH)
+ i40e_clear_rss_lut(vsi);
+
if (features & NETIF_F_HW_VLAN_CTAG_RX)
i40e_vlan_stripping_enable(vsi);
else
@@ -11309,11 +11353,7 @@ static void i40e_remove(struct pci_dev *pdev)
}
/* shutdown the adminq */
- ret_code = i40e_shutdown_adminq(hw);
- if (ret_code)
- dev_warn(&pdev->dev,
- "Failed to destroy the Admin Queue resources: %d\n",
- ret_code);
+ i40e_shutdown_adminq(hw);
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
@@ -11360,6 +11400,12 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
+ if (!pf) {
+ dev_info(&pdev->dev,
+ "Cannot recover - error happened during device probe\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
/* shutdown all operations */
if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
rtnl_lock();
@@ -11582,7 +11628,8 @@ static int __init i40e_init_module(void)
* it can't be any worse than using the system workqueue which
* was already single threaded
*/
- i40e_wq = create_singlethread_workqueue(i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
+ i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index ed39cbad24bd..f1feceab758a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -669,7 +669,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
pf->ptp_clock = NULL;
dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
__func__);
- } else {
+ } else if (pf->ptp_clock) {
struct timespec64 ts;
u32 regval;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index df7ecc9578c9..6287bf63c43c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -40,6 +40,69 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
}
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+/**
+ * i40e_fdir - Generate a Flow Director descriptor based on fdata
+ * @tx_ring: Tx ring to send buffer on
+ * @fdata: Flow director filter data
+ * @add: Indicate if we are adding a rule or deleting one
+ *
+ **/
+static void i40e_fdir(struct i40e_ring *tx_ring,
+ struct i40e_fdir_filter *fdata, bool add)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_pf *pf = tx_ring->vsi->back;
+ u32 flex_ptype, dtype_cmd;
+ u16 i;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
+ (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
+ (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
+ (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+ /* Use LAN VSI Id if not programmed by user */
+ flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
+ ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+
+ dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+ dtype_cmd |= add ?
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT :
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
+ (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
+ (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
+
+ if (fdata->cnt_index) {
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
+ ((u32)fdata->cnt_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
+ }
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+ fdir_desc->rsvd = cpu_to_le32(0);
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+ fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
+}
+
#define I40E_FD_CLEAN_DELAY 10
/**
* i40e_program_fdir_filter - Program a Flow Director filter
@@ -48,14 +111,13 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
* @pf: The PF pointer
* @add: True for add/update, False for remove
**/
-int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
- struct i40e_pf *pf, bool add)
+static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
+ u8 *raw_packet, struct i40e_pf *pf,
+ bool add)
{
- struct i40e_filter_program_desc *fdir_desc;
struct i40e_tx_buffer *tx_buf, *first;
struct i40e_tx_desc *tx_desc;
struct i40e_ring *tx_ring;
- unsigned int fpt, dcc;
struct i40e_vsi *vsi;
struct device *dev;
dma_addr_t dma;
@@ -92,56 +154,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
/* grab the next descriptor */
i = tx_ring->next_to_use;
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
first = &tx_ring->tx_bi[i];
- memset(first, 0, sizeof(struct i40e_tx_buffer));
-
- tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
-
- fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
- I40E_TXD_FLTR_QW0_QINDEX_MASK;
-
- fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
- I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
-
- fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
- I40E_TXD_FLTR_QW0_PCTYPE_MASK;
-
- /* Use LAN VSI Id if not programmed by user */
- if (fdir_data->dest_vsi == 0)
- fpt |= (pf->vsi[pf->lan_vsi]->id) <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
- else
- fpt |= ((u32)fdir_data->dest_vsi <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
- I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
-
- dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
-
- if (add)
- dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- else
- dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT;
-
- dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
- I40E_TXD_FLTR_QW1_DEST_MASK;
-
- dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
- I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
-
- if (fdir_data->cnt_index != 0) {
- dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dcc |= ((u32)fdir_data->cnt_index <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
- }
-
- fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
- fdir_desc->rsvd = cpu_to_le32(0);
- fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
- fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
+ i40e_fdir(tx_ring, fdir_data, add);
/* Now program a dummy descriptor */
i = tx_ring->next_to_use;
@@ -282,18 +296,18 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if (add) {
pf->fd_tcp_rule++;
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- }
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
} else {
pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
(pf->fd_tcp_rule - 1) : 0;
if (pf->fd_tcp_rule == 0) {
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
}
@@ -532,7 +546,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -545,9 +562,6 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
DMA_TO_DEVICE);
}
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
-
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -584,8 +598,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* cleanup Tx queue statistics */
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index));
+ netdev_tx_reset_queue(txring_txq(tx_ring));
}
/**
@@ -754,8 +767,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
- netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
+ /* notify netdev of completed buffers */
+ netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -1864,6 +1877,15 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_PFINT_DYN_CTLN
+static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ return !!(vsi->rx_rings[idx]->rx_itr_setting);
+}
+
+static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ return !!(vsi->tx_rings[idx]->tx_itr_setting);
+}
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
@@ -1879,6 +1901,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
u32 rxval, txval;
int vector;
int idx = q_vector->v_idx;
+ int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
@@ -1887,18 +1910,21 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ rx_itr_setting = get_rx_itr_enabled(vsi, idx);
+ tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+
if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) &&
- !ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) {
+ (!ITR_IS_DYNAMIC(rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(tx_itr_setting))) {
goto enable_int;
}
- if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
- if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
@@ -2621,9 +2647,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
return false;
/* We need to walk through the list and validate that each group
- * of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the last 6 since the last 6 cannot
- * inherit any data from a descriptor after them.
+ * of 6 fragments totals at least gso_size.
*/
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
@@ -2654,8 +2678,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (sum < 0)
return true;
- /* use pre-decrement to avoid processing last fragment */
- if (!--nr_frags)
+ if (!nr_frags--)
break;
sum -= skb_frag_size(stale++);
@@ -2787,9 +2810,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
@@ -2814,13 +2835,11 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB.
*/
if (skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index))) {
+ !netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)) &&
+ !netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
@@ -2840,10 +2859,9 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_QW1_CMD_SHIFT);
/* notify HW of packet */
- if (!tail_bump)
+ if (!tail_bump) {
prefetchw(tx_desc + 1);
-
- if (tail_bump) {
+ } else {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2852,7 +2870,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
wmb();
writel(i, tx_ring->tail);
}
-
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index b78c810d1835..508840585645 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -463,4 +463,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index c92a3bdee229..f861d3109d1a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -163,6 +163,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 6fcbf764f32b..54b8ee2583f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -502,8 +502,16 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
u32 qtx_ctl;
int ret = 0;
+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ ret = -ENOENT;
+ goto error_context;
+ }
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ if (!vsi) {
+ ret = -ENOENT;
+ goto error_context;
+ }
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
@@ -991,7 +999,10 @@ complete_reset:
i40e_enable_vf_mappings(vf);
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
- i40e_notify_client_of_vf_reset(pf, abs_vf_id);
+ /* Do not notify the client during VF init */
+ if (vf->pf->num_alloc_vfs)
+ i40e_notify_client_of_vf_reset(pf, abs_vf_id);
+ vf->num_vlan = 0;
}
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -1089,7 +1100,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
goto err_iov;
}
}
- i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
/* allocate memory */
vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
if (!vfs) {
@@ -1113,6 +1123,8 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
}
pf->num_alloc_vfs = num_alloc_vfs;
+ i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
+
err_alloc:
if (ret)
i40e_free_vfs(pf);
@@ -1472,7 +1484,8 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
+ !vsi) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2213,8 +2226,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
- config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP :
- I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ config ? I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
aq_ret);
}
@@ -2314,6 +2327,7 @@ err:
/* send the response back to the VF */
aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
aq_ret, (u8 *)vrh, len);
+ kfree(vrh);
return aq_ret;
}
@@ -2742,11 +2756,12 @@ error_param:
* @vf_id: VF identifier
* @vlan_id: mac address
* @qos: priority setting
+ * @vlan_proto: vlan protocol
*
* program VF vlan id and/or qos
**/
-int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
- int vf_id, u16 vlan_id, u8 qos)
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto)
{
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -2769,6 +2784,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ ret = -EPROTONOSUPPORT;
+ goto error_pvid;
+ }
+
vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
@@ -2995,6 +3016,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
else
ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
ivi->spoofchk = vf->spoofchk;
+ ivi->trusted = vf->trusted;
ret = 0;
error_param:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 875174141451..4012d069939a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -129,8 +129,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* VF configuration related iplink handlers */
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
- int vf_id, u16 vlan_id, u8 qos);
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto);
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 3114dcfa1724..40b0eafd0c71 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -204,6 +204,9 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
@@ -447,13 +450,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
+#define I40E_AQ_ARP_INIT_IPV4 0x0800
+#define I40E_AQ_ARP_UNSUP_CTL 0x1000
+#define I40E_AQ_ARP_ENA 0x2000
+#define I40E_AQ_ARP_ADD_IPV4 0x4000
+#define I40E_AQ_ARP_DEL_IPV4 0x8000
__le16 table_id;
- __le32 pfpm_proxyfc;
+ __le32 enabled_offloads;
+#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
@@ -468,17 +473,19 @@ struct i40e_aqc_ns_proxy_data {
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+#define I40E_AQ_NS_PROXY_ADD_0 0x0001
+#define I40E_AQ_NS_PROXY_DEL_0 0x0002
+#define I40E_AQ_NS_PROXY_ADD_1 0x0004
+#define I40E_AQ_NS_PROXY_DEL_1 0x0008
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
@@ -1579,6 +1586,24 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 4db0c0326185..7953c13451b9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -302,7 +302,6 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u16 len = le16_to_cpu(aq_desc->datalen);
u8 *buf = (u8 *)buffer;
u16 i = 0;
@@ -326,6 +325,8 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
le32_to_cpu(aq_desc->params.external.addr_low));
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ u16 len = le16_to_cpu(aq_desc->datalen);
+
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index a579193b2c21..75f2a2cdd738 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -51,7 +51,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -64,9 +67,6 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
DMA_TO_DEVICE);
}
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
-
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -103,8 +103,7 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* cleanup Tx queue statistics */
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index));
+ netdev_tx_reset_queue(txring_txq(tx_ring));
}
/**
@@ -273,8 +272,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
- netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
+ /* notify netdev of completed buffers */
+ netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -1312,6 +1311,19 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_VFINT_DYN_CTLN1
+static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+
+ return !!(adapter->rx_rings[idx].rx_itr_setting);
+}
+
+static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+
+ return !!(adapter->tx_rings[idx].tx_itr_setting);
+}
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
@@ -1326,6 +1338,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
bool rx = false, tx = false;
u32 rxval, txval;
int vector;
+ int idx = q_vector->v_idx;
+ int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
@@ -1334,18 +1348,21 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ rx_itr_setting = get_rx_itr_enabled(vsi, idx);
+ tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+
if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
- !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ (!ITR_IS_DYNAMIC(rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(tx_itr_setting))) {
goto enable_int;
}
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
@@ -1832,9 +1849,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
return false;
/* We need to walk through the list and validate that each group
- * of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the last 6 since the last 6 cannot
- * inherit any data from a descriptor after them.
+ * of 6 fragments totals at least gso_size.
*/
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
@@ -1865,8 +1880,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (sum < 0)
return true;
- /* use pre-decrement to avoid processing last fragment */
- if (!--nr_frags)
+ if (!nr_frags--)
break;
sum -= skb_frag_size(stale++);
@@ -2015,9 +2029,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
@@ -2042,13 +2054,11 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB.
*/
if (skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index))) {
+ !netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)) &&
+ !netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
@@ -2068,10 +2078,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_QW1_CMD_SHIFT);
/* notify HW of packet */
- if (!tail_bump)
+ if (!tail_bump) {
prefetchw(tx_desc + 1);
-
- if (tail_bump) {
+ } else {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2080,7 +2089,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
wmb();
writel(i, tx_ring->tail);
}
-
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 0112277e5882..abcdecabbc56 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -287,6 +287,14 @@ struct i40e_ring {
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ /* high bit set means dynamic, use accessors routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 rx_buf_len;
@@ -445,4 +453,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index f04ce6cb70dc..bd691ad86673 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -160,6 +160,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 76ed97db28e2..c5fd724313c7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -59,32 +59,25 @@ struct i40e_vsi {
unsigned long state;
int base_vector;
u16 work_limit;
- /* high bit set means dynamic, use accessor routines to read/write.
- * hardware only supports 2us resolution for the ITR registers.
- * these values always store the USER setting, and must be converted
- * before programming to a register.
- */
- u16 rx_itr_setting;
- u16 tx_itr_setting;
u16 qs_handle;
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define I40EVF_DEFAULT_TXD 512
-#define I40EVF_DEFAULT_RXD 512
-#define I40EVF_MAX_TXD 4096
-#define I40EVF_MIN_TXD 64
-#define I40EVF_MAX_RXD 4096
-#define I40EVF_MIN_RXD 64
-#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
+#define I40EVF_DEFAULT_TXD 512
+#define I40EVF_DEFAULT_RXD 512
+#define I40EVF_MAX_TXD 4096
+#define I40EVF_MIN_TXD 64
+#define I40EVF_MAX_RXD 4096
+#define I40EVF_MIN_RXD 64
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
/* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_2048 2048
-#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
-#define I40EVF_MAX_AQ_BUF_SIZE 4096
-#define I40EVF_AQ_LEN 32
-#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
+#define I40EVF_RXBUFFER_2048 2048
+#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+#define I40EVF_MAX_AQ_BUF_SIZE 4096
+#define I40EVF_AQ_LEN 32
+#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
@@ -111,7 +104,7 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */
#define ITR_COUNTDOWN_START 100
u8 itr_countdown; /* when 0 or 1 update ITR */
- int v_idx; /* vector index in list */
+ int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9];
bool arm_wb_state;
cpumask_var_t affinity_mask;
@@ -129,11 +122,11 @@ struct i40e_q_vector {
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
-#define I40EVF_RX_DESC_ADV(R, i) \
+#define I40EVF_RX_DESC_ADV(R, i) \
(&(((union i40e_adv_rx_desc *)((R).desc))[i]))
-#define I40EVF_TX_DESC_ADV(R, i) \
+#define I40EVF_TX_DESC_ADV(R, i) \
(&(((union i40e_adv_tx_desc *)((R).desc))[i]))
-#define I40EVF_TX_CTXTDESC_ADV(R, i) \
+#define I40EVF_TX_CTXTDESC_ADV(R, i) \
(&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
#define OTHER_VECTOR 1
@@ -204,22 +197,25 @@ struct i40evf_adapter {
struct msix_entry *msix_entries;
u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
-#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
-#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
-#define I40EVF_FLAG_RESET_PENDING BIT(9)
-#define I40EVF_FLAG_RESET_NEEDED BIT(10)
+#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
+#define I40EVF_FLAG_IN_NETPOLL BIT(4)
+#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
+#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
+#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
+#define I40EVF_FLAG_RESET_PENDING BIT(9)
+#define I40EVF_FLAG_RESET_NEEDED BIT(10)
#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
+#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14)
#define I40EVF_FLAG_PROMISC_ON BIT(15)
#define I40EVF_FLAG_ALLMULTI_ON BIT(16)
/* duplicates for common code */
-#define I40E_FLAG_FDIR_ATR_ENABLED 0
-#define I40E_FLAG_DCB_ENABLED 0
-#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
+#define I40E_FLAG_FDIR_ATR_ENABLED 0
+#define I40E_FLAG_DCB_ENABLED 0
+#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
+#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
/* flags for admin queue service task */
@@ -233,7 +229,7 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
-#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
#define I40EVF_FLAG_AQ_GET_HENA BIT(11)
@@ -258,6 +254,7 @@ struct i40evf_adapter {
struct work_struct watchdog_task;
bool netdev_registered;
bool link_up;
+ enum i40e_aq_link_speed link_speed;
enum i40e_virtchnl_ops current_op;
#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_offload_flags & \
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index c9c202f6c521..a9940154eead 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -74,13 +74,33 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
static int i40evf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
- /* In the future the VF will be able to query the PF for
- * some information - for now use a dummy value
- */
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
ecmd->supported = 0;
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->transceiver = XCVR_DUMMY1;
ecmd->port = PORT_NONE;
+ /* Set speed and duplex */
+ switch (adapter->link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_40000);
+ break;
+ case I40E_LINK_SPEED_20GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_20000);
+ break;
+ case I40E_LINK_SPEED_10GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ break;
+ case I40E_LINK_SPEED_1GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ break;
+ case I40E_LINK_SPEED_100MB:
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ break;
+ default:
+ break;
+ }
+ ecmd->duplex = DUPLEX_FULL;
return 0;
}
@@ -276,93 +296,207 @@ static int i40evf_set_ringparam(struct net_device *netdev,
}
/**
- * i40evf_get_coalesce - Get interrupt coalescing settings
- * @netdev: network interface device structure
- * @ec: ethtool coalesce structure
+ * __i40evf_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
*
- * Returns current coalescing settings. This is referred to elsewhere in the
- * driver as Interrupt Throttle Rate, as this is how the hardware describes
- * this functionality.
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
**/
-static int i40evf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+static int __i40evf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_ring *rx_ring, *tx_ring;
ec->tx_max_coalesced_frames = vsi->work_limit;
ec->rx_max_coalesced_frames = vsi->work_limit;
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ /* Rx and Tx usecs per queue value. If user doesn't specify the
+ * queue, return queue 0's value to represent.
+ */
+ if (queue < 0)
+ queue = 0;
+ else if (queue >= adapter->num_active_queues)
+ return -EINVAL;
+
+ rx_ring = &adapter->rx_rings[queue];
+ tx_ring = &adapter->tx_rings[queue];
+
+ if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
/**
- * i40evf_set_coalesce - Set interrupt coalescing settings
+ * i40evf_get_coalesce - Get interrupt coalescing settings
* @netdev: network interface device structure
* @ec: ethtool coalesce structure
*
- * Change current coalescing settings.
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality. Note that if per-queue settings have been modified this
+ * only represents the settings of queue 0.
**/
-static int i40evf_set_coalesce(struct net_device *netdev,
+static int i40evf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_hw *hw = &adapter->hw;
+ return __i40evf_get_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40evf_get_per_queue_coalesce - get coalesce values for specific queue
+ * @netdev: netdev to read
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to read
+ *
+ * Read specific queue's coalesce settings.
+ **/
+static int i40evf_get_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_get_coalesce(netdev, ec, queue);
+}
+
+/**
+ * i40evf_set_itr_per_queue - set ITR values for specific queue
+ * @vsi: the VSI to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_hw *hw = &adapter->hw;
struct i40e_q_vector *q_vector;
+ u16 vector;
+
+ adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs;
+ adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (ec->use_adaptive_rx_coalesce)
+ adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ q_vector = adapter->rx_rings[queue].q_vector;
+ q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
+
+ q_vector = adapter->tx_rings[queue].q_vector;
+ q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
+
+ i40e_flush(hw);
+}
+
+/**
+ * __i40evf_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
+static int __i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_vsi *vsi = &adapter->vsi;
int i;
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
- if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
- vsi->rx_itr_setting = ec->rx_coalesce_usecs;
-
- else
+ if (ec->rx_coalesce_usecs == 0) {
+ if (ec->use_adaptive_rx_coalesce)
+ netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
+ } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
+ }
- if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
- vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- else if (ec->use_adaptive_tx_coalesce)
- vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
else
+ if (ec->tx_coalesce_usecs == 0) {
+ if (ec->use_adaptive_tx_coalesce)
+ netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
+ } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
+ }
- if (ec->use_adaptive_rx_coalesce)
- vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- if (ec->use_adaptive_tx_coalesce)
- vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
- q_vector = &adapter->q_vectors[i];
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
- i40e_flush(hw);
+ /* Rx and Tx usecs has per queue value. If user doesn't specify the
+ * queue, apply to all queues.
+ */
+ if (queue < 0) {
+ for (i = 0; i < adapter->num_active_queues; i++)
+ i40evf_set_itr_per_queue(adapter, ec, i);
+ } else if (queue < adapter->num_active_queues) {
+ i40evf_set_itr_per_queue(adapter, ec, queue);
+ } else {
+ netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
+ adapter->num_active_queues - 1);
+ return -EINVAL;
}
return 0;
}
/**
+ * i40evf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings for every queue.
+ **/
+static int i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_set_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40evf_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to modify
+ *
+ * Modifies a specific queue's coalesce settings.
+ */
+static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_set_coalesce(netdev, ec, queue);
+}
+
+/**
* i40evf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -513,6 +647,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
+ .get_per_queue_coalesce = i40evf_get_per_queue_coalesce,
+ .set_per_queue_coalesce = i40evf_set_per_queue_coalesce,
.get_rxnfc = i40evf_get_rxnfc,
.get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
.get_rxfh = i40evf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 600fb9c4a7f0..14372810fc27 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -370,6 +370,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
+ struct i40e_hw *hw = &adapter->hw;
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
@@ -377,7 +378,10 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.ring = rx_ring;
q_vector->rx.count++;
q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
+ q_vector->ring_mask |= BIT(r_idx);
q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr);
}
/**
@@ -391,6 +395,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
+ struct i40e_hw *hw = &adapter->hw;
tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
@@ -398,9 +403,10 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->num_ringpairs++;
- q_vector->ring_mask |= BIT(t_idx);
+ wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr);
}
/**
@@ -1007,7 +1013,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
* i40evf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
**/
-static int i40evf_up_complete(struct i40evf_adapter *adapter)
+static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
adapter->state = __I40EVF_RUNNING;
clear_bit(__I40E_DOWN, &adapter->vsi.state);
@@ -1016,7 +1022,6 @@ static int i40evf_up_complete(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- return 0;
}
/**
@@ -1037,6 +1042,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
@@ -1154,6 +1160,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
+ tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
@@ -1162,6 +1169,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
+ rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
}
return 0;
@@ -1420,7 +1428,9 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
{
int err;
+ rtnl_lock();
err = i40evf_set_interrupt_capability(adapter);
+ rtnl_unlock();
if (err) {
dev_err(&adapter->pdev->dev,
"Unable to setup interrupt capabilities\n");
@@ -1729,6 +1739,7 @@ static void i40evf_reset_task(struct work_struct *work)
set_bit(__I40E_DOWN, &adapter->vsi.state);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
i40evf_free_traffic_irqs(adapter);
@@ -1767,6 +1778,7 @@ continue_reset:
if (netif_running(adapter->netdev)) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
}
i40evf_irq_disable(adapter);
@@ -1781,8 +1793,7 @@ continue_reset:
i40evf_free_all_tx_resources(adapter);
/* kill and reinit the admin queue */
- if (i40evf_shutdown_adminq(hw))
- dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n");
+ i40evf_shutdown_adminq(hw);
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
err = i40evf_init_adminq(hw);
if (err)
@@ -1802,6 +1813,8 @@ continue_reset:
}
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ /* Open RDMA Client again */
+ adapter->aq_required |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
@@ -1820,9 +1833,7 @@ continue_reset:
i40evf_configure(adapter);
- err = i40evf_up_complete(adapter);
- if (err)
- goto reset_err;
+ i40evf_up_complete(adapter);
i40evf_irq_enable(adapter, true);
} else {
@@ -2052,9 +2063,7 @@ static int i40evf_open(struct net_device *netdev)
i40evf_add_filter(adapter, adapter->hw.mac.addr);
i40evf_configure(adapter);
- err = i40evf_up_complete(adapter);
- if (err)
- goto err_req_irq;
+ i40evf_up_complete(adapter);
i40evf_irq_enable(adapter, true);
@@ -2268,10 +2277,6 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
- adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
- adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -2453,6 +2458,7 @@ static void i40evf_init_task(struct work_struct *work)
goto err_sw_init;
netif_carrier_off(netdev);
+ adapter->link_up = false;
if (!adapter->netdev_registered) {
err = register_netdev(netdev);
@@ -2831,7 +2837,8 @@ static int __init i40evf_init_module(void)
pr_info("%s\n", i40evf_copyright);
- i40evf_wq = create_singlethread_workqueue(i40evf_driver_name);
+ i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
+ i40evf_driver_name);
if (!i40evf_wq) {
pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index d76c221d4c8a..ddf478d6322b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -817,6 +817,45 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
}
/**
+ * i40evf_print_link_message - print link up or down
+ * @adapter: adapter structure
+ *
+ * Log a message telling the world of our wonderous link status
+ */
+static void i40evf_print_link_message(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ char *speed = "Unknown ";
+
+ if (!adapter->link_up) {
+ netdev_info(netdev, "NIC Link is Down\n");
+ return;
+ }
+
+ switch (adapter->link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ speed = "40 G";
+ break;
+ case I40E_LINK_SPEED_20GB:
+ speed = "20 G";
+ break;
+ case I40E_LINK_SPEED_10GB:
+ speed = "10 G";
+ break;
+ case I40E_LINK_SPEED_1GB:
+ speed = "1000 M";
+ break;
+ case I40E_LINK_SPEED_100MB:
+ speed = "100 M";
+ break;
+ default:
+ break;
+ }
+
+ netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed);
+}
+
+/**
* i40evf_request_reset
* @adapter: adapter structure
*
@@ -853,16 +892,20 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
(struct i40e_virtchnl_pf_event *)msg;
switch (vpe->event) {
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
- adapter->link_up =
- vpe->event_data.link_event.link_status;
- if (adapter->link_up && !netif_carrier_ok(netdev)) {
- dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
- netif_carrier_on(netdev);
- netif_tx_wake_all_queues(netdev);
- } else if (!adapter->link_up) {
- dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
+ adapter->link_speed =
+ vpe->event_data.link_event.link_speed;
+ if (adapter->link_up !=
+ vpe->event_data.link_event.link_status) {
+ adapter->link_up =
+ vpe->event_data.link_event.link_status;
+ if (adapter->link_up) {
+ netif_tx_start_all_queues(netdev);
+ netif_carrier_on(netdev);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+ i40evf_print_link_message(adapter);
}
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
@@ -937,8 +980,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
- netif_tx_start_all_queues(adapter->netdev);
- netif_carrier_on(adapter->netdev);
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
i40evf_free_all_tx_resources(adapter);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 199ff98209cf..acf06051e111 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -188,6 +188,11 @@ struct e1000_adv_tx_context_desc {
/* ETQF register bit definitions */
#define E1000_ETQF_FILTER_ENABLE BIT(26)
#define E1000_ETQF_1588 BIT(30)
+#define E1000_ETQF_IMM_INT BIT(29)
+#define E1000_ETQF_QUEUE_ENABLE BIT(31)
+#define E1000_ETQF_QUEUE_SHIFT 16
+#define E1000_ETQF_QUEUE_MASK 0x00070000
+#define E1000_ETQF_ETYPE_MASK 0x0000FFFF
/* FTQF register bit definitions */
#define E1000_FTQF_VF_BP 0x00008000
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2997c443c5dc..2688180a7acd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1024,4 +1024,8 @@
#define E1000_RTTBCNRC_RF_INT_MASK \
(E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+#define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4))
+#define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4))
+#define E1000_VLAPQF_QUEUE_MASK 0x03
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 21d9d02885cb..d84afdd83e53 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -309,6 +309,7 @@
(0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
(0x054E4 + ((_i - 16) * 8)))
+#define E1000_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 5387b3a96489..d11093dce1b9 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -350,11 +350,49 @@ struct hwmon_buff {
};
#endif
+/* The number of L2 ether-type filter registers, Index 3 is reserved
+ * for PTP 1588 timestamp
+ */
+#define MAX_ETYPE_FILTER (4 - 1)
+/* ETQF filter list: one static filter per filter consumer. This is
+ * to avoid filter collisions later. Add new filters here!!
+ *
+ * Current filters: Filter 3
+ */
+#define IGB_ETQF_FILTER_1588 3
+
#define IGB_N_EXTTS 2
#define IGB_N_PEROUT 2
#define IGB_N_SDP 4
#define IGB_RETA_SIZE 128
+enum igb_filter_match_flags {
+ IGB_FILTER_FLAG_ETHER_TYPE = 0x1,
+ IGB_FILTER_FLAG_VLAN_TCI = 0x2,
+};
+
+#define IGB_MAX_RXNFC_FILTERS 16
+
+/* RX network flow classification data structure */
+struct igb_nfc_input {
+ /* Byte layout in order, all values with MSB first:
+ * match_flags - 1 byte
+ * etype - 2 bytes
+ * vlan_tci - 2 bytes
+ */
+ u8 match_flags;
+ __be16 etype;
+ __be16 vlan_tci;
+};
+
+struct igb_nfc_filter {
+ struct hlist_node nfc_node;
+ struct igb_nfc_input filter;
+ u16 etype_reg_index;
+ u16 sw_idx;
+ u16 action;
+};
+
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -451,6 +489,7 @@ struct igb_adapter {
struct timecounter tc;
u32 tx_hwtstamp_timeouts;
u32 rx_hwtstamp_cleared;
+ bool pps_sys_wrap_on;
struct ptp_pin_desc sdp_config[IGB_N_SDP];
struct {
@@ -473,6 +512,13 @@ struct igb_adapter {
int copper_tries;
struct e1000_info ei;
u16 eee_advert;
+
+ /* RX network flow classification support */
+ struct hlist_head nfc_filter_list;
+ unsigned int nfc_filter_count;
+ /* lock for RX network flow classification filter */
+ spinlock_t nfc_lock;
+ bool etype_bitmap[MAX_ETYPE_FILTER];
};
/* flags controlling PTP/1588 function */
@@ -599,4 +645,9 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
}
+int igb_add_filter(struct igb_adapter *adapter,
+ struct igb_nfc_filter *input);
+int igb_erase_filter(struct igb_adapter *adapter,
+ struct igb_nfc_filter *input);
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 64e91c575a39..737b664d004c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2431,6 +2431,63 @@ static int igb_get_ts_info(struct net_device *dev,
}
}
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct igb_nfc_filter *rule = NULL;
+
+ /* report total rule count */
+ cmd->data = IGB_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->sw_idx)
+ return -EINVAL;
+
+ if (rule->filter.match_flags) {
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->action;
+ if (rule->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = rule->filter.etype;
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ if (rule->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_tci = rule->filter.vlan_tci;
+ fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
+ }
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igb_nfc_filter *rule;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = IGB_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
@@ -2484,6 +2541,16 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
cmd->data = adapter->num_rx_queues;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->nfc_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = igb_get_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs);
+ break;
case ETHTOOL_GRXFH:
ret = igb_get_rss_hash_opts(adapter, cmd);
break;
@@ -2598,6 +2665,279 @@ static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
return 0;
}
+static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter,
+ struct igb_nfc_filter *input)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u8 i;
+ u32 etqf;
+ u16 etype;
+
+ /* find an empty etype filter register */
+ for (i = 0; i < MAX_ETYPE_FILTER; ++i) {
+ if (!adapter->etype_bitmap[i])
+ break;
+ }
+ if (i == MAX_ETYPE_FILTER) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n");
+ return -EINVAL;
+ }
+
+ adapter->etype_bitmap[i] = true;
+
+ etqf = rd32(E1000_ETQF(i));
+ etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK);
+
+ etqf |= E1000_ETQF_FILTER_ENABLE;
+ etqf &= ~E1000_ETQF_ETYPE_MASK;
+ etqf |= (etype & E1000_ETQF_ETYPE_MASK);
+
+ etqf &= ~E1000_ETQF_QUEUE_MASK;
+ etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT)
+ & E1000_ETQF_QUEUE_MASK);
+ etqf |= E1000_ETQF_QUEUE_ENABLE;
+
+ wr32(E1000_ETQF(i), etqf);
+
+ input->etype_reg_index = i;
+
+ return 0;
+}
+
+static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter,
+ struct igb_nfc_filter *input)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u16 queue_index;
+ u32 vlapqf;
+
+ vlapqf = rd32(E1000_VLAPQF);
+ vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
+ >> VLAN_PRIO_SHIFT;
+ queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK;
+
+ /* check whether this vlan prio is already set */
+ if ((vlapqf & E1000_VLAPQF_P_VALID(vlan_priority)) &&
+ (queue_index != input->action)) {
+ dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n");
+ return -EEXIST;
+ }
+
+ vlapqf |= E1000_VLAPQF_P_VALID(vlan_priority);
+ vlapqf |= E1000_VLAPQF_QUEUE_SEL(vlan_priority, input->action);
+
+ wr32(E1000_VLAPQF, vlapqf);
+
+ return 0;
+}
+
+int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input)
+{
+ int err = -EINVAL;
+
+ if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) {
+ err = igb_rxnfc_write_etype_filter(adapter, input);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI)
+ err = igb_rxnfc_write_vlan_prio_filter(adapter, input);
+
+ return err;
+}
+
+static void igb_clear_etype_filter_regs(struct igb_adapter *adapter,
+ u16 reg_index)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 etqf = rd32(E1000_ETQF(reg_index));
+
+ etqf &= ~E1000_ETQF_QUEUE_ENABLE;
+ etqf &= ~E1000_ETQF_QUEUE_MASK;
+ etqf &= ~E1000_ETQF_FILTER_ENABLE;
+
+ wr32(E1000_ETQF(reg_index), etqf);
+
+ adapter->etype_bitmap[reg_index] = false;
+}
+
+static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter,
+ u16 vlan_tci)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u32 vlapqf;
+
+ vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ vlapqf = rd32(E1000_VLAPQF);
+ vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority);
+ vlapqf &= ~E1000_VLAPQF_QUEUE_SEL(vlan_priority,
+ E1000_VLAPQF_QUEUE_MASK);
+
+ wr32(E1000_VLAPQF, vlapqf);
+}
+
+int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input)
+{
+ if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE)
+ igb_clear_etype_filter_regs(adapter,
+ input->etype_reg_index);
+
+ if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI)
+ igb_clear_vlan_prio_filter(adapter,
+ ntohs(input->filter.vlan_tci));
+
+ return 0;
+}
+
+static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter,
+ struct igb_nfc_filter *input,
+ u16 sw_idx)
+{
+ struct igb_nfc_filter *rule, *parent;
+ int err = -EINVAL;
+
+ parent = NULL;
+ rule = NULL;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = rule;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && (rule->sw_idx == sw_idx)) {
+ if (!input)
+ err = igb_erase_filter(adapter, rule);
+
+ hlist_del(&rule->nfc_node);
+ kfree(rule);
+ adapter->nfc_filter_count--;
+ }
+
+ /* If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
+ */
+ if (!input)
+ return err;
+
+ /* initialize node */
+ INIT_HLIST_NODE(&input->nfc_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_behind(&parent->nfc_node, &input->nfc_node);
+ else
+ hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list);
+
+ /* update counts */
+ adapter->nfc_filter_count++;
+
+ return 0;
+}
+
+static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct igb_nfc_filter *input, *rule;
+ int err = 0;
+
+ if (!(netdev->hw_features & NETIF_F_NTUPLE))
+ return -EOPNOTSUPP;
+
+ /* Don't allow programming if the action is a queue greater than
+ * the number of online Rx queues.
+ */
+ if ((fsp->ring_cookie == RX_CLS_FLOW_DISC) ||
+ (fsp->ring_cookie >= adapter->num_rx_queues)) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n");
+ return -EINVAL;
+ }
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= IGB_MAX_RXNFC_FILTERS) {
+ dev_err(&adapter->pdev->dev, "Location out of range\n");
+ return -EINVAL;
+ }
+
+ if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ return -EINVAL;
+
+ if (fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK &&
+ fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK))
+ return -EINVAL;
+
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) {
+ input->filter.etype = fsp->h_u.ether_spec.h_proto;
+ input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE;
+ }
+
+ if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
+ if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ input->filter.vlan_tci = fsp->h_ext.vlan_tci;
+ input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
+ }
+
+ input->action = fsp->ring_cookie;
+ input->sw_idx = fsp->location;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (!memcmp(&input->filter, &rule->filter,
+ sizeof(input->filter))) {
+ err = -EEXIST;
+ dev_err(&adapter->pdev->dev,
+ "ethtool: this filter is already set\n");
+ goto err_out_w_lock;
+ }
+ }
+
+ err = igb_add_filter(adapter, input);
+ if (err)
+ goto err_out_w_lock;
+
+ igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+
+ spin_unlock(&adapter->nfc_lock);
+ return 0;
+
+err_out_w_lock:
+ spin_unlock(&adapter->nfc_lock);
+err_out:
+ kfree(input);
+ return err;
+}
+
+static int igb_del_ethtool_nfc_entry(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err;
+
+ spin_lock(&adapter->nfc_lock);
+ err = igb_update_ethtool_nfc_entry(adapter, NULL, fsp->location);
+ spin_unlock(&adapter->nfc_lock);
+
+ return err;
+}
+
static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct igb_adapter *adapter = netdev_priv(dev);
@@ -2607,6 +2947,11 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXFH:
ret = igb_set_rss_hash_opt(adapter, cmd);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = igb_add_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = igb_del_ethtool_nfc_entry(adapter, cmd);
default:
break;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 942a89fb0090..edc9a6ac5169 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -58,7 +58,7 @@
#include "igb.h"
#define MAJ 5
-#define MIN 3
+#define MIN 4
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
@@ -169,13 +169,15 @@ static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos);
+ int vf, u16 vlan, u8 qos, __be16 vlan_proto);
static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
bool setting);
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
static void igb_check_vf_rate_limit(struct igb_adapter *);
+static void igb_nfc_filter_exit(struct igb_adapter *adapter);
+static void igb_nfc_filter_restore(struct igb_adapter *adapter);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
@@ -1611,6 +1613,7 @@ static void igb_configure(struct igb_adapter *adapter)
igb_setup_mrqc(adapter);
igb_setup_rctl(adapter);
+ igb_nfc_filter_restore(adapter);
igb_configure_tx(adapter);
igb_configure_rx(adapter);
@@ -2059,6 +2062,21 @@ static int igb_set_features(struct net_device *netdev,
if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
return 0;
+ if (!(features & NETIF_F_NTUPLE)) {
+ struct hlist_node *node2;
+ struct igb_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+ hlist_for_each_entry_safe(rule, node2,
+ &adapter->nfc_filter_list, nfc_node) {
+ igb_erase_filter(adapter, rule);
+ hlist_del(&rule->nfc_node);
+ kfree(rule);
+ }
+ spin_unlock(&adapter->nfc_lock);
+ adapter->nfc_filter_count = 0;
+ }
+
netdev->features = features;
if (netif_running(netdev))
@@ -3053,6 +3071,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
VLAN_HLEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
@@ -3240,6 +3259,8 @@ static int __igb_close(struct net_device *netdev, bool suspending)
igb_down(adapter);
igb_free_irq(adapter);
+ igb_nfc_filter_exit(adapter);
+
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
@@ -6201,14 +6222,17 @@ static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
return 0;
}
-static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
+static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
{
struct igb_adapter *adapter = netdev_priv(netdev);
if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
igb_disable_port_vlan(adapter, vf);
}
@@ -8306,4 +8330,28 @@ int igb_reinit_queues(struct igb_adapter *adapter)
return err;
}
+
+static void igb_nfc_filter_exit(struct igb_adapter *adapter)
+{
+ struct igb_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igb_erase_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
+}
+
+static void igb_nfc_filter_restore(struct igb_adapter *adapter)
+{
+ struct igb_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igb_add_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
+}
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 336c103ae374..a7895c4cbcc3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -591,6 +591,7 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
tsim |= TSINTR_SYS_WRAP;
else
tsim &= ~TSINTR_SYS_WRAP;
+ igb->pps_sys_wrap_on = !!on;
wr32(E1000_TSIM, tsim);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
return 0;
@@ -998,12 +999,12 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
/* define ethertype filter for timestamped packets */
if (is_l2)
- wr32(E1000_ETQF(3),
+ wr32(E1000_ETQF(IGB_ETQF_FILTER_1588),
(E1000_ETQF_FILTER_ENABLE | /* enable filter */
E1000_ETQF_1588 | /* enable timestamping */
ETH_P_1588)); /* 1588 eth protocol type */
else
- wr32(E1000_ETQF(3), 0);
+ wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), 0);
/* L4 Queue Filter[3]: filter by destination port and protocol */
if (is_l4) {
@@ -1159,7 +1160,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
- } else {
+ } else if (adapter->ptp_clock) {
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
adapter->ptp_flags |= IGB_PTP_ENABLED;
@@ -1235,7 +1236,9 @@ void igb_ptp_reset(struct igb_adapter *adapter)
case e1000_i211:
wr32(E1000_TSAUXC, 0x0);
wr32(E1000_TSSDP, 0x0);
- wr32(E1000_TSIM, TSYNC_INTERRUPTS);
+ wr32(E1000_TSIM,
+ TSYNC_INTERRUPTS |
+ (adapter->pps_sys_wrap_on ? TSINTR_SYS_WRAP : 0));
wr32(E1000_IMS, E1000_IMS_TS);
break;
default:
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index b0778ba65083..12bb877df860 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -47,7 +47,7 @@
#include "igbvf.h"
-#define DRV_VERSION "2.0.2-k"
+#define DRV_VERSION "2.4.0-k"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 9475ff9055aa..b06e32d0d22a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -45,10 +45,10 @@
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif /* IS_ENABLED(CONFIG_FCOE) */
#ifdef CONFIG_IXGBE_DCA
#include <linux/dca.h>
#endif
@@ -645,6 +645,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
#define IXGBE_FLAG_DCB_CAPABLE BIT(27)
+#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28)
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
@@ -653,13 +654,12 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
-#define IXGBE_FLAG2_RESET_REQUESTED BIT(6)
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
-#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
+#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
/* Tx fast path data */
@@ -673,6 +673,7 @@ struct ixgbe_adapter {
/* Port number used to identify VXLAN traffic */
__be16 vxlan_port;
+ __be16 geneve_port;
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
@@ -840,6 +841,7 @@ enum ixgbe_state_t {
__IXGBE_IN_SFP_INIT,
__IXGBE_PTP_RUNNING,
__IXGBE_PTP_TX_IN_PROGRESS,
+ __IXGBE_RESET_REQUESTED,
};
struct ixgbe_cb {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index c47b605e8651..77d3039283f6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -99,6 +99,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550T:
case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
supported = true;
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0d7209eb5abf..f49f80380aa5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -193,7 +193,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
ecmd->supported |= ixgbe_get_supported_10gtypes(hw);
if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->supported |= SUPPORTED_1000baseT_Full;
+ ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
+ SUPPORTED_1000baseKX_Full :
+ SUPPORTED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ?
SUPPORTED_1000baseKX_Full :
@@ -311,6 +313,25 @@ static int ixgbe_get_settings(struct net_device *netdev,
break;
}
+ /* Indicate pause support */
+ ecmd->supported |= SUPPORTED_Pause;
+
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ ecmd->advertising |= ADVERTISED_Pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ ecmd->advertising |= ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+ break;
+ case ixgbe_fc_tx_pause:
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ break;
+ default:
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ }
+
if (netif_carrier_ok(netdev)) {
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
@@ -2926,9 +2947,13 @@ static u32 ixgbe_rss_indir_size(struct net_device *netdev)
static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
{
int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
+ u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
for (i = 0; i < reta_size; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ indir[i] = adapter->rss_indir_tbl[i] & rss_m;
}
static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -3039,8 +3064,8 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
/* We only support one q_vector without MSI-X */
max_combined = 1;
} else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- /* SR-IOV currently only allows one queue on the PF */
- max_combined = 1;
+ /* Limit value based on the queue mask */
+ max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
} else if (tcs > 1) {
/* For DCB report channels per traffic class */
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index bcdc88444ceb..15ab337fd7ad 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -515,15 +515,16 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
+ if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
- /* 32 pool mode with 4 queues per pool */
+ /* 32 pool mode with up to 4 queues per pool */
} else {
vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
rss_m = IXGBE_RSS_4Q_MASK;
- rss_i = 4;
+ /* We can support 4, 2, or 1 queues */
+ rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
}
#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index b4f03748adc0..a244d9a67264 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -137,6 +137,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
/* required last entry */
{0, }
@@ -1103,7 +1104,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
/* Do the reset outside of interrupt context */
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
e_warn(drv, "initiating reset due to tx timeout\n");
ixgbe_service_event_schedule(adapter);
}
@@ -1495,7 +1496,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
struct sk_buff *skb)
{
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
- __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
bool encap_pkt = false;
skb_checksum_none_assert(skb);
@@ -1504,8 +1504,8 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
- if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
- (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
+ /* check for VXLAN and Geneve packets */
+ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
encap_pkt = true;
skb->encapsulation = 1;
}
@@ -2777,7 +2777,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
}
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
ixgbe_service_event_schedule(adapter);
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
}
@@ -3007,7 +3007,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_x550em_a:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
ixgbe_service_event_schedule(adapter);
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
}
@@ -3224,7 +3224,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!wait_loop)
- e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
+ hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
}
static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
@@ -3248,7 +3248,8 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
else if (tcs > 1)
mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
- else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ else if (adapter->ring_feature[RING_F_VMDQ].mask ==
+ IXGBE_82599_VMDQ_4Q_MASK)
mtqc |= IXGBE_MTQC_32VF;
else
mtqc |= IXGBE_MTQC_64VF;
@@ -3475,12 +3476,12 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- /* Program table for at least 2 queues w/ SR-IOV so that VFs can
+ /* Program table for at least 4 queues w/ SR-IOV so that VFs can
* make full use of any rings they may have. We will use the
* PSRTYPE register to control how many rings we use within the PF.
*/
- if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
- rss_i = 2;
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
+ rss_i = 4;
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
@@ -3544,7 +3545,8 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */
else if (tcs > 1)
mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
- else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ else if (adapter->ring_feature[RING_F_VMDQ].mask ==
+ IXGBE_82599_VMDQ_4Q_MASK)
mrqc = IXGBE_MRQC_VMDQRSS32EN;
else
mrqc = IXGBE_MRQC_VMDQRSS64EN;
@@ -3922,6 +3924,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
rfctl &= ~IXGBE_RFCTL_RSC_DIS;
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
rfctl |= IXGBE_RFCTL_RSC_DIS;
+
+ /* disable NFS filtering */
+ rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* Program registers for the distribution of queues */
@@ -4102,23 +4107,20 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- default:
- if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
- break;
- /* fall through */
- case ixgbe_mac_82598EB:
- /* legacy case, we can just disable VLAN filtering */
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+ /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ } else {
vlnctrl &= ~IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
+ /* Nothing to do for 82598 */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
/* We are already in VLAN promisc, nothing to do */
if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
return;
@@ -4126,10 +4128,6 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
/* Set flag so we don't redo unnecessary work */
adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
- /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-
/* Add PF to all active pools */
for (i = IXGBE_VLVF_ENTRIES; --i;) {
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
@@ -4201,19 +4199,9 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
vlnctrl |= IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- default:
- if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
- break;
- /* fall through */
- case ixgbe_mac_82598EB:
+ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
+ hw->mac.type == ixgbe_mac_82598EB)
return;
- }
/* We are not in VLAN promisc, nothing to do */
if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
@@ -4586,18 +4574,23 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
}
}
-static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
+static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
{
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vxlanctrl;
+
+ if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
+ IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
+ return;
+
+ vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
+
+ if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
adapter->vxlan_port = 0;
- break;
- default:
- break;
- }
+
+ if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
+ adapter->geneve_port = 0;
}
#ifdef CONFIG_IXGBE_DCB
@@ -5500,8 +5493,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_napi_disable_all(adapter);
- adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
- IXGBE_FLAG2_RESET_REQUESTED);
+ clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
+ adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
del_timer_sync(&adapter->service_timer);
@@ -5711,8 +5704,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
- case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ /* fall through */
+ case ixgbe_mac_X550EM_x:
#ifdef CONFIG_IXGBE_DCB
adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
#endif
@@ -6144,7 +6139,7 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
- ixgbe_clear_vxlan_port(adapter);
+ ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
udp_tunnel_get_rx_info(netdev);
return 0;
@@ -6921,7 +6916,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
* (Do the reset outside of interrupt context).
*/
e_warn(drv, "initiating reset to clear Tx work after link loss\n");
- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
}
}
}
@@ -7187,11 +7182,9 @@ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
{
- if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
+ if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
return;
- adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
-
/* If we're already down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_REMOVING, &adapter->state) ||
@@ -7225,9 +7218,9 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
- if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
+ if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
rtnl_lock();
- adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+ adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
udp_tunnel_get_rx_info(adapter->netdev);
rtnl_unlock();
}
@@ -7667,6 +7660,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
if (adapter->vxlan_port &&
udp_hdr(skb)->dest == adapter->vxlan_port)
hdr.network = skb_inner_network_header(skb);
+
+ if (adapter->geneve_port &&
+ udp_hdr(skb)->dest == adapter->geneve_port)
+ hdr.network = skb_inner_network_header(skb);
}
/* Currently only IPv4/IPv6 with TCP is supported */
@@ -8802,10 +8799,23 @@ static int ixgbe_set_features(struct net_device *netdev,
netdev->features = features;
if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
- if (features & NETIF_F_RXCSUM)
- adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
- else
- ixgbe_clear_vxlan_port(adapter);
+ if (features & NETIF_F_RXCSUM) {
+ adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
+ } else {
+ u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
+
+ ixgbe_clear_udp_tunnel_port(adapter, port_mask);
+ }
+ }
+
+ if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
+ if (features & NETIF_F_RXCSUM) {
+ adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
+ } else {
+ u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
+
+ ixgbe_clear_udp_tunnel_port(adapter, port_mask);
+ }
}
if (need_reset)
@@ -8818,67 +8828,115 @@ static int ixgbe_set_features(struct net_device *netdev,
}
/**
- * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports
* @dev: The port's netdev
* @ti: Tunnel endpoint information
**/
-static void ixgbe_add_vxlan_port(struct net_device *dev,
- struct udp_tunnel_info *ti)
+static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
__be16 port = ti->port;
-
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
+ u32 port_shift = 0;
+ u32 reg;
if (ti->sa_family != AF_INET)
return;
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
- return;
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ return;
- if (adapter->vxlan_port == port)
- return;
+ if (adapter->vxlan_port == port)
+ return;
+
+ if (adapter->vxlan_port) {
+ netdev_info(dev,
+ "VXLAN port %d set, not adding port %d\n",
+ ntohs(adapter->vxlan_port),
+ ntohs(port));
+ return;
+ }
- if (adapter->vxlan_port) {
- netdev_info(dev,
- "Hit Max num of VXLAN ports, not adding port %d\n",
- ntohs(port));
+ adapter->vxlan_port = port;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
+ return;
+
+ if (adapter->geneve_port == port)
+ return;
+
+ if (adapter->geneve_port) {
+ netdev_info(dev,
+ "GENEVE port %d set, not adding port %d\n",
+ ntohs(adapter->geneve_port),
+ ntohs(port));
+ return;
+ }
+
+ port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
+ adapter->geneve_port = port;
+ break;
+ default:
return;
}
- adapter->vxlan_port = port;
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
+ reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
}
/**
- * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports
* @dev: The port's netdev
* @ti: Tunnel endpoint information
**/
-static void ixgbe_del_vxlan_port(struct net_device *dev,
- struct udp_tunnel_info *ti)
+static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ u32 port_mask;
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
+ ti->type != UDP_TUNNEL_TYPE_GENEVE)
return;
if (ti->sa_family != AF_INET)
return;
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
- return;
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ return;
- if (adapter->vxlan_port != ti->port) {
- netdev_info(dev, "Port %d was not found, not deleting\n",
- ntohs(ti->port));
+ if (adapter->vxlan_port != ti->port) {
+ netdev_info(dev, "VXLAN port %d not found\n",
+ ntohs(ti->port));
+ return;
+ }
+
+ port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
+ return;
+
+ if (adapter->geneve_port != ti->port) {
+ netdev_info(dev, "GENEVE port %d not found\n",
+ ntohs(ti->port));
+ return;
+ }
+
+ port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
+ break;
+ default:
return;
}
- ixgbe_clear_vxlan_port(adapter);
- adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+ ixgbe_clear_udp_tunnel_port(adapter, port_mask);
+ adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
}
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -9192,8 +9250,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
- .ndo_udp_tunnel_add = ixgbe_add_vxlan_port,
- .ndo_udp_tunnel_del = ixgbe_del_vxlan_port,
+ .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
+ .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
.ndo_features_check = ixgbe_features_check,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index db0731e05401..021ab9b89c71 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -346,8 +346,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
return 0;
}
}
- /* clear value if nothing found */
- hw->phy.mdio.prtad = 0;
+ /* indicate no PHY found */
+ hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
return IXGBE_ERR_PHY_ADDR_INVALID;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index e5431bfe3339..a92277683a64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1254,7 +1254,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_clock = NULL;
e_dev_err("ptp_clock_register failed\n");
return err;
- } else
+ } else if (adapter->ptp_clock)
e_dev_info("registered PHC device on %s\n", netdev->name);
/* set default timestamp mode to disabled here. We do this in
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8618599dfd6f..7e5d9850e4b2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -329,13 +329,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
for (i = 0; i < adapter->num_vfs; i++)
ixgbe_vf_configuration(dev, (i | 0x10000000));
+ /* reset before enabling SRIOV to avoid mailbox issues */
+ ixgbe_sriov_reinit(adapter);
+
err = pci_enable_sriov(dev, num_vfs);
if (err) {
e_dev_warn("Failed to enable PCI sriov: %d\n", err);
return err;
}
ixgbe_get_vfs(adapter);
- ixgbe_sriov_reinit(adapter);
return num_vfs;
#else
@@ -1354,13 +1356,16 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
return err;
}
-int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
+ u8 qos, __be16 vlan_proto)
{
int err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
if (vlan || qos) {
/* Check if there is already a port VLAN set, if so
* we have to delete the old one first before we
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 47e65e2f886a..0c7977d27b71 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -43,7 +43,7 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
- u8 qos);
+ u8 qos, __be16 vlan_proto);
int ixgbe_link_mbps(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 1248a9936f7a..31d82e3abac8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -90,6 +90,7 @@
#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
+#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8
#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
/* VF Device IDs */
@@ -487,6 +488,13 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
* Filter Table */
+/* masks for accessing VXLAN and GENEVE UDP ports */
+#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */
+#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */
+#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */
+
+#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16
+
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
@@ -1823,6 +1831,9 @@ enum {
#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+#define IXGBE_X557_LED_MANUAL_SET_MASK BIT(8)
+#define IXGBE_X557_MAX_LED_INDEX 3
+#define IXGBE_X557_LED_PROVISIONING 0xC430
/* LED modes */
#define IXGBE_LED_LINK_UP 0x0
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 4716ca499e67..7e6b9267ca9d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -295,6 +295,12 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_KR_L:
hw->phy.type = ixgbe_phy_x550em_kr;
break;
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+ /* Fallthrough */
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
@@ -1453,7 +1459,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/* Configure internal PHY for KR/KX. */
ixgbe_setup_kr_speed_x550em(hw, speed);
- if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF)
+ if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
return IXGBE_ERR_PHY_ADDR_INVALID;
/* Get external PHY device id */
@@ -2114,6 +2120,50 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
return ixgbe_enable_lasi_ext_t_x550em(hw);
}
+/**
+ * ixgbe_led_on_t_x550em - Turns on the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @led_idx: led number to turn on
+ **/
+static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+{
+ u16 phy_data;
+
+ if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+ return IXGBE_ERR_PARAM;
+
+ /* To turn on the LED, set mode to ON. */
+ hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
+ hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+
+ return 0;
+}
+
+/**
+ * ixgbe_led_off_t_x550em - Turns off the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @led_idx: led number to turn off
+ **/
+static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+{
+ u16 phy_data;
+
+ if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+ return IXGBE_ERR_PARAM;
+
+ /* To turn on the LED, set mode to ON. */
+ hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
+ hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+
+ return 0;
+}
+
/** ixgbe_get_lcd_x550em - Determine lowest common denominator
* @hw: pointer to hardware structure
* @lcd_speed: pointer to lowest common link speed
@@ -2344,18 +2394,12 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
/* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
* PHY address. This register field was has only been used for X552.
*/
- if (!hw->phy.nw_mng_if_sel) {
- if (hw->mac.type == ixgbe_mac_x550em_a) {
- struct ixgbe_adapter *adapter = hw->back;
-
- e_warn(drv, "nw_mng_if_sel not set\n");
- }
- return;
+ if (hw->mac.type == ixgbe_mac_x550em_a &&
+ hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
+ hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
}
-
- hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
}
/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
@@ -2456,6 +2500,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
media_type = ixgbe_media_type_copper;
break;
default:
@@ -2514,6 +2559,9 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_SFP:
/* Config MDIO clock speed before the first MDIO PHY access */
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
@@ -2853,8 +2901,6 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.write_analog_reg8 = NULL, \
.set_rxpba = &ixgbe_set_rxpba_generic, \
.check_link = &ixgbe_check_mac_link_generic, \
- .led_on = &ixgbe_led_on_generic, \
- .led_off = &ixgbe_led_off_generic, \
.blink_led_start = &ixgbe_blink_led_start_X540, \
.blink_led_stop = &ixgbe_blink_led_stop_X540, \
.set_rar = &ixgbe_set_rar_generic, \
@@ -2886,6 +2932,8 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
static const struct ixgbe_mac_operations mac_ops_X550 = {
X550_COMMON_MAC
+ .led_on = ixgbe_led_on_generic,
+ .led_off = ixgbe_led_off_generic,
.reset_hw = &ixgbe_reset_hw_X540,
.get_media_type = &ixgbe_get_media_type_X540,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
@@ -2904,6 +2952,8 @@ static const struct ixgbe_mac_operations mac_ops_X550 = {
static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
X550_COMMON_MAC
+ .led_on = ixgbe_led_on_t_x550em,
+ .led_off = ixgbe_led_off_t_x550em,
.reset_hw = &ixgbe_reset_hw_X550em,
.get_media_type = &ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
@@ -2922,6 +2972,8 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
static struct ixgbe_mac_operations mac_ops_x550em_a = {
X550_COMMON_MAC
+ .led_on = ixgbe_led_on_t_x550em,
+ .led_off = ixgbe_led_off_t_x550em,
.reset_hw = ixgbe_reset_hw_X550em,
.get_media_type = ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
@@ -2997,6 +3049,8 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = {
.identify = &ixgbe_identify_phy_x550em,
.read_reg = &ixgbe_read_phy_reg_x550a,
.write_reg = &ixgbe_write_phy_reg_x550a,
+ .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
+ .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
};
static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index be52f597688b..5639fbe294d0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -502,12 +502,9 @@ extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
-#ifdef DEBUG
-char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
-#define hw_dbg(hw, format, arg...) \
- printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
-#else
-#define hw_dbg(hw, format, arg...) do {} while (0)
-#endif
+#define ixgbevf_hw_to_netdev(hw) \
+ (((struct ixgbevf_adapter *)(hw)->back)->netdev)
+#define hw_dbg(hw, format, arg...) \
+ netdev_dbg(ixgbevf_hw_to_netdev(hw), format, ## arg)
#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d9d6616f02a4..7eaac3234049 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1612,7 +1612,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!wait_loop)
- pr_err("Could not enable Tx Queue %d\n", reg_idx);
+ hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
}
/**
@@ -1810,8 +1810,10 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
if (hw->mac.type >= ixgbe_mac_X550_vf)
ixgbevf_setup_vfmrqc(adapter);
+ spin_lock_bh(&adapter->mbx_lock);
/* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+ spin_unlock_bh(&adapter->mbx_lock);
if (ret)
dev_err(&adapter->pdev->dev,
"Failed to set MTU at %d\n", netdev->mtu);
@@ -2993,6 +2995,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
**/
int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
{
+ struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -3757,8 +3760,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
if ((new_mtu < 68) || (max_frame > max_possible_frame))
return -EINVAL;
+ spin_lock_bh(&adapter->mbx_lock);
/* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, max_frame);
+ spin_unlock_bh(&adapter->mbx_lock);
if (ret)
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index a52f70ec42b6..d46ba1dabcb7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -284,7 +284,8 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
if (addr)
ether_addr_copy(msg_addr, addr);
- ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ sizeof(msgbuf) / sizeof(u32));
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -441,7 +442,8 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
ether_addr_copy(msg_addr, addr);
- ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ sizeof(msgbuf) / sizeof(u32));
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -551,7 +553,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
msgbuf[1] = xcast_mode;
- err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ sizeof(msgbuf) / sizeof(u32));
if (err)
return err;
@@ -588,7 +591,8 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
- err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ sizeof(msgbuf) / sizeof(u32));
if (err)
goto mbx_err;
@@ -791,7 +795,8 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
msgbuf[0] = IXGBE_VF_SET_LPE;
msgbuf[1] = max_size;
- ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ sizeof(msgbuf) / sizeof(u32));
if (ret_val)
return ret_val;
if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
@@ -837,7 +842,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
msg[1] = api;
msg[2] = 0;
- err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3);
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg,
+ sizeof(msg) / sizeof(u32));
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -887,7 +893,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
msg[0] = IXGBE_VF_GET_QUEUE;
msg[1] = msg[2] = msg[3] = msg[4] = 0;
- err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5);
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg,
+ sizeof(msg) / sizeof(u32));
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 8982c882af1b..a0d1b084ecec 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -211,8 +211,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!dev->regs) {
dev_err(&pdev->dev, "Unable to remap SMI register\n");
- ret = -ENODEV;
- goto out_mdio;
+ return -ENODEV;
}
init_waitqueue_head(&dev->smi_busy_wait);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b74548728fb5..5cb07c2017bf 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -400,7 +400,6 @@ struct mvneta_port {
u16 rx_ring_size;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
phy_interface_t phy_interface;
struct device_node *phy_node;
unsigned int link;
@@ -637,8 +636,9 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp)
}
/* Get System Network Statistics */
-struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static struct rtnl_link_stats64 *
+mvneta_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct mvneta_port *pp = netdev_priv(dev);
unsigned int start;
@@ -2653,6 +2653,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
u32 cause_rx_tx;
int rx_queue;
struct mvneta_port *pp = netdev_priv(napi->dev);
+ struct net_device *ndev = pp->dev;
struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
if (!netif_running(pp->dev)) {
@@ -2670,7 +2671,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
(MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE |
MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
- mvneta_fixed_link_update(pp, pp->phy_dev);
+ mvneta_fixed_link_update(pp, ndev->phydev);
}
}
@@ -2965,6 +2966,7 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
static void mvneta_start_dev(struct mvneta_port *pp)
{
int cpu;
+ struct net_device *ndev = pp->dev;
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -2987,15 +2989,16 @@ static void mvneta_start_dev(struct mvneta_port *pp)
MVNETA_CAUSE_LINK_CHANGE |
MVNETA_CAUSE_PSC_SYNC_CHANGE);
- phy_start(pp->phy_dev);
+ phy_start(ndev->phydev);
netif_tx_start_all_queues(pp->dev);
}
static void mvneta_stop_dev(struct mvneta_port *pp)
{
unsigned int cpu;
+ struct net_device *ndev = pp->dev;
- phy_stop(pp->phy_dev);
+ phy_stop(ndev->phydev);
for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
@@ -3168,7 +3171,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
static void mvneta_adjust_link(struct net_device *ndev)
{
struct mvneta_port *pp = netdev_priv(ndev);
- struct phy_device *phydev = pp->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
int status_change = 0;
if (phydev->link) {
@@ -3246,7 +3249,6 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
phy_dev->supported &= PHY_GBIT_FEATURES;
phy_dev->advertising = phy_dev->supported;
- pp->phy_dev = phy_dev;
pp->link = 0;
pp->duplex = 0;
pp->speed = 0;
@@ -3256,8 +3258,9 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
static void mvneta_mdio_remove(struct mvneta_port *pp)
{
- phy_disconnect(pp->phy_dev);
- pp->phy_dev = NULL;
+ struct net_device *ndev = pp->dev;
+
+ phy_disconnect(ndev->phydev);
}
/* Electing a CPU must be done in an atomic way: it should be done
@@ -3515,42 +3518,31 @@ static int mvneta_stop(struct net_device *dev)
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mvneta_port *pp = netdev_priv(dev);
-
- if (!pp->phy_dev)
+ if (!dev->phydev)
return -ENOTSUPP;
- return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
/* Ethtool methods */
-/* Get settings (phy address, speed) for ethtools */
-int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+/* Set link ksettings (phy address, speed) for ethtools */
+static int
+mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
- struct mvneta_port *pp = netdev_priv(dev);
-
- if (!pp->phy_dev)
- return -ENODEV;
-
- return phy_ethtool_gset(pp->phy_dev, cmd);
-}
-
-/* Set settings (phy address, speed) for ethtools */
-int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct mvneta_port *pp = netdev_priv(dev);
- struct phy_device *phydev = pp->phy_dev;
+ struct mvneta_port *pp = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
if (!phydev)
return -ENODEV;
- if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
+ if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
u32 val;
- mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE);
+ mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
- if (cmd->autoneg == AUTONEG_DISABLE) {
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
MVNETA_GMAC_CONFIG_GMII_SPEED |
@@ -3567,17 +3559,17 @@ int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
- pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE);
+ pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
netdev_info(pp->dev, "autoneg status set to %i\n",
pp->use_inband_status);
- if (netif_running(dev)) {
+ if (netif_running(ndev)) {
mvneta_port_down(pp);
mvneta_port_up(pp);
}
}
- return phy_ethtool_sset(pp->phy_dev, cmd);
+ return phy_ethtool_ksettings_set(ndev->phydev, cmd);
}
/* Set interrupt coalescing for ethtools */
@@ -3841,8 +3833,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
const struct ethtool_ops mvneta_eth_tool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = mvneta_ethtool_get_settings,
- .set_settings = mvneta_ethtool_set_settings,
.set_coalesce = mvneta_ethtool_set_coalesce,
.get_coalesce = mvneta_ethtool_get_coalesce,
.get_drvinfo = mvneta_ethtool_get_drvinfo,
@@ -3855,6 +3845,8 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
.get_rxnfc = mvneta_ethtool_get_rxnfc,
.get_rxfh = mvneta_ethtool_get_rxfh,
.set_rxfh = mvneta_ethtool_set_rxfh,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
};
/* Initialize hw */
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index e74fd44a92f7..a32de432800c 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -133,7 +133,7 @@ struct mvneta_bm_pool {
void *mvneta_frag_alloc(unsigned int frag_size);
void mvneta_frag_free(unsigned int frag_size, void *data);
-#if defined(CONFIG_MVNETA_BM) || defined(CONFIG_MVNETA_BM_MODULE)
+#if IS_ENABLED(CONFIG_MVNETA_BM)
void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool, u8 port_map);
void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 467138b423d3..f05ea56dcff2 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3070,7 +3070,7 @@ static int sky2_poll(struct napi_struct *napi, int work_limit)
goto done;
}
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
sky2_read32(hw, B0_Y2_SP_LISR);
done:
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 3743af8f1ded..ad4ab979507b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -18,6 +18,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/if_vlan.h>
#include <linux/reset.h>
#include <linux/tcp.h>
@@ -51,7 +52,7 @@ static const struct mtk_ethtool_stats {
};
static const char * const mtk_clks_source_name[] = {
- "ethif", "esw", "gp1", "gp2"
+ "ethif", "esw", "gp1", "gp2", "trgpll"
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@@ -134,6 +135,33 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
return _mtk_mdio_read(eth, phy_addr, phy_reg);
}
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
+{
+ u32 val;
+ int ret;
+
+ val = (speed == SPEED_1000) ?
+ INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
+ mtk_w32(eth, val, INTF_MODE);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+ ETHSYS_TRGMII_CLK_SEL362_5,
+ ETHSYS_TRGMII_CLK_SEL362_5);
+
+ val = (speed == SPEED_1000) ? 250000000 : 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+
+ val = (speed == SPEED_1000) ?
+ RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_RCK_CTRL);
+
+ val = (speed == SPEED_1000) ?
+ TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_TCK_CTRL);
+}
+
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -144,7 +172,10 @@ static void mtk_phy_link_adjust(struct net_device *dev)
MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
MAC_MCR_BACKPR_EN;
- switch (mac->phy_dev->speed) {
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return;
+
+ switch (dev->phydev->speed) {
case SPEED_1000:
mcr |= MAC_MCR_SPEED_1000;
break;
@@ -153,20 +184,23 @@ static void mtk_phy_link_adjust(struct net_device *dev)
break;
};
- if (mac->phy_dev->link)
+ if (mac->id == 0 && !mac->trgmii)
+ mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
+
+ if (dev->phydev->link)
mcr |= MAC_MCR_FORCE_LINK;
- if (mac->phy_dev->duplex) {
+ if (dev->phydev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
- if (mac->phy_dev->pause)
+ if (dev->phydev->pause)
rmt_adv = LPA_PAUSE_CAP;
- if (mac->phy_dev->asym_pause)
+ if (dev->phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- if (mac->phy_dev->advertising & ADVERTISED_Pause)
+ if (dev->phydev->advertising & ADVERTISED_Pause)
lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+ if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
lcl_adv |= ADVERTISE_PAUSE_ASYM;
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
@@ -183,7 +217,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
- if (mac->phy_dev->link)
+ if (dev->phydev->link)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
@@ -192,17 +226,9 @@ static void mtk_phy_link_adjust(struct net_device *dev)
static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
struct device_node *phy_node)
{
- const __be32 *_addr = NULL;
struct phy_device *phydev;
- int phy_mode, addr;
-
- _addr = of_get_property(phy_node, "reg", NULL);
+ int phy_mode;
- if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
- pr_err("%s: invalid phy address\n", phy_node->name);
- return -EINVAL;
- }
- addr = be32_to_cpu(*_addr);
phy_mode = of_get_phy_mode(phy_node);
if (phy_mode < 0) {
dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
@@ -221,17 +247,17 @@ static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
mac->id, phydev_name(phydev), phydev->phy_id,
phydev->drv->name);
- mac->phy_dev = phydev;
-
return 0;
}
-static int mtk_phy_connect(struct mtk_mac *mac)
+static int mtk_phy_connect(struct net_device *dev)
{
- struct mtk_eth *eth = mac->hw;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth;
struct device_node *np;
- u32 val, ge_mode;
+ u32 val;
+ eth = mac->hw;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
if (!np && of_phy_is_fixed_link(mac->of_node))
if (!of_phy_register_fixed_link(mac->of_node))
@@ -240,22 +266,24 @@ static int mtk_phy_connect(struct mtk_mac *mac)
return -ENODEV;
switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ mac->trgmii = true;
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
- ge_mode = 0;
+ mac->ge_mode = 0;
break;
case PHY_INTERFACE_MODE_MII:
- ge_mode = 1;
+ mac->ge_mode = 1;
break;
case PHY_INTERFACE_MODE_REVMII:
- ge_mode = 2;
+ mac->ge_mode = 2;
break;
case PHY_INTERFACE_MODE_RMII:
if (!mac->id)
goto err_phy;
- ge_mode = 3;
+ mac->ge_mode = 3;
break;
default:
goto err_phy;
@@ -264,23 +292,26 @@ static int mtk_phy_connect(struct mtk_mac *mac)
/* put the gmac into the right mode */
regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
- val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
+ val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
- mtk_phy_connect_node(eth, mac, np);
- mac->phy_dev->autoneg = AUTONEG_ENABLE;
- mac->phy_dev->speed = 0;
- mac->phy_dev->duplex = 0;
+ /* couple phydev to net_device */
+ if (mtk_phy_connect_node(eth, mac, np))
+ goto err_phy;
+
+ dev->phydev->autoneg = AUTONEG_ENABLE;
+ dev->phydev->speed = 0;
+ dev->phydev->duplex = 0;
if (of_phy_is_fixed_link(mac->of_node))
- mac->phy_dev->supported |=
+ dev->phydev->supported |=
SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
- mac->phy_dev->advertising = mac->phy_dev->supported |
+ dev->phydev->advertising = dev->phydev->supported |
ADVERTISED_Autoneg;
- phy_start_aneg(mac->phy_dev);
+ phy_start_aneg(dev->phydev);
of_node_put(np);
@@ -288,7 +319,7 @@ static int mtk_phy_connect(struct mtk_mac *mac)
err_phy:
of_node_put(np);
- dev_err(eth->dev, "invalid phy_mode\n");
+ dev_err(eth->dev, "%s: invalid phy\n", __func__);
return -EINVAL;
}
@@ -336,25 +367,27 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
mdiobus_unregister(eth->mii_bus);
}
-static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
+static inline void mtk_irq_disable(struct mtk_eth *eth,
+ unsigned reg, u32 mask)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&eth->irq_lock, flags);
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
- mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
+ val = mtk_r32(eth, reg);
+ mtk_w32(eth, val & ~mask, reg);
spin_unlock_irqrestore(&eth->irq_lock, flags);
}
-static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
+static inline void mtk_irq_enable(struct mtk_eth *eth,
+ unsigned reg, u32 mask)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&eth->irq_lock, flags);
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
- mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
+ val = mtk_r32(eth, reg);
+ mtk_w32(eth, val | mask, reg);
spin_unlock_irqrestore(&eth->irq_lock, flags);
}
@@ -363,18 +396,20 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
int ret = eth_mac_addr(dev, p);
struct mtk_mac *mac = netdev_priv(dev);
const char *macaddr = dev->dev_addr;
- unsigned long flags;
if (ret)
return ret;
- spin_lock_irqsave(&mac->hw->page_lock, flags);
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ spin_lock_bh(&mac->hw->page_lock);
mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
MTK_GDMA_MAC_ADRH(mac->id));
mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
(macaddr[4] << 8) | macaddr[5],
MTK_GDMA_MAC_ADRL(mac->id));
- spin_unlock_irqrestore(&mac->hw->page_lock, flags);
+ spin_unlock_bh(&mac->hw->page_lock);
return 0;
}
@@ -759,7 +794,6 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct mtk_eth *eth = mac->hw;
struct mtk_tx_ring *ring = &eth->tx_ring;
struct net_device_stats *stats = &dev->stats;
- unsigned long flags;
bool gso = false;
int tx_num;
@@ -767,14 +801,17 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
* however we have 2 queues running on the same ring so we need to lock
* the ring access
*/
- spin_lock_irqsave(&eth->page_lock, flags);
+ spin_lock(&eth->page_lock);
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ goto drop;
tx_num = mtk_cal_txd_req(skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
mtk_stop_queue(eth);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
- spin_unlock_irqrestore(&eth->page_lock, flags);
+ spin_unlock(&eth->page_lock);
return NETDEV_TX_BUSY;
}
@@ -799,22 +836,62 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
mtk_stop_queue(eth);
- spin_unlock_irqrestore(&eth->page_lock, flags);
+ spin_unlock(&eth->page_lock);
return NETDEV_TX_OK;
drop:
- spin_unlock_irqrestore(&eth->page_lock, flags);
+ spin_unlock(&eth->page_lock);
stats->tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
+static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
+{
+ int i;
+ struct mtk_rx_ring *ring;
+ int idx;
+
+ if (!eth->hwlro)
+ return &eth->rx_ring[0];
+
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = &eth->rx_ring[i];
+ idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
+ if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
+ ring->calc_idx_update = true;
+ return ring;
+ }
+ }
+
+ return NULL;
+}
+
+static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
+{
+ struct mtk_rx_ring *ring;
+ int i;
+
+ if (!eth->hwlro) {
+ ring = &eth->rx_ring[0];
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ } else {
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = &eth->rx_ring[i];
+ if (ring->calc_idx_update) {
+ ring->calc_idx_update = false;
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ }
+ }
+ }
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
- int idx = ring->calc_idx;
+ struct mtk_rx_ring *ring;
+ int idx;
struct sk_buff *skb;
u8 *data, *new_data;
struct mtk_rx_dma *rxd, trxd;
@@ -826,7 +903,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
dma_addr_t dma_addr;
int mac = 0;
- idx = NEXT_RX_DESP_IDX(idx);
+ ring = mtk_get_rx_ring(eth);
+ if (unlikely(!ring))
+ goto rx_done;
+
+ idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = &ring->dma[idx];
data = ring->data[idx];
@@ -841,6 +922,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev = eth->netdev[mac];
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ goto release_desc;
+
/* alloc new buffer */
new_data = napi_alloc_frag(ring->frag_size);
if (unlikely(!new_data)) {
@@ -890,17 +974,19 @@ release_desc:
rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
ring->calc_idx = idx;
+
+ done++;
+ }
+
+rx_done:
+ if (done) {
/* make sure that all changes to the dma ring are flushed before
* we continue
*/
wmb();
- mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0);
- done++;
+ mtk_update_rx_cpu_idx(eth);
}
- if (done < budget)
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
-
return done;
}
@@ -1009,7 +1095,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
return budget;
napi_complete(napi);
- mtk_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
return tx_done;
}
@@ -1019,30 +1105,33 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
u32 status, mask;
int rx_done = 0;
+ int remain_budget = budget;
mtk_handle_status_irq(eth);
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
- rx_done = mtk_poll_rx(napi, budget, eth);
+
+poll_again:
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
+ rx_done = mtk_poll_rx(napi, remain_budget, eth);
if (unlikely(netif_msg_intr(eth))) {
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
+ mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
dev_info(eth->dev,
"done rx %d, intr 0x%08x/0x%x\n",
rx_done, status, mask);
}
-
- if (rx_done == budget)
- return budget;
-
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- if (status & MTK_RX_DONE_INT)
+ if (rx_done == remain_budget)
return budget;
+ status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
+ if (status & MTK_RX_DONE_INT) {
+ remain_budget -= rx_done;
+ goto poll_again;
+ }
napi_complete(napi);
- mtk_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
- return rx_done;
+ return rx_done + budget - remain_budget;
}
static int mtk_tx_alloc(struct mtk_eth *eth)
@@ -1089,6 +1178,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
mtk_w32(eth,
ring->phys + ((MTK_DMA_SIZE - 1) * sz),
MTK_QTX_DRX_PTR);
+ mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
return 0;
@@ -1117,32 +1207,41 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
}
-static int mtk_rx_alloc(struct mtk_eth *eth)
+static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
+ struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
+ int rx_data_len, rx_dma_size;
int i;
- ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
+ if (rx_flag == MTK_RX_FLAGS_HWLRO) {
+ rx_data_len = MTK_MAX_LRO_RX_LENGTH;
+ rx_dma_size = MTK_HW_LRO_DMA_SIZE;
+ } else {
+ rx_data_len = ETH_DATA_LEN;
+ rx_dma_size = MTK_DMA_SIZE;
+ }
+
+ ring->frag_size = mtk_max_frag_size(rx_data_len);
ring->buf_size = mtk_max_buf_size(ring->frag_size);
- ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
+ ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
GFP_KERNEL);
if (!ring->data)
return -ENOMEM;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < rx_dma_size; i++) {
ring->data[i] = netdev_alloc_frag(ring->frag_size);
if (!ring->data[i])
return -ENOMEM;
}
ring->dma = dma_alloc_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
+ rx_dma_size * sizeof(*ring->dma),
&ring->phys,
GFP_ATOMIC | __GFP_ZERO);
if (!ring->dma)
return -ENOMEM;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < rx_dma_size; i++) {
dma_addr_t dma_addr = dma_map_single(eth->dev,
ring->data[i] + NET_SKB_PAD,
ring->buf_size,
@@ -1153,28 +1252,30 @@ static int mtk_rx_alloc(struct mtk_eth *eth)
ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
}
- ring->calc_idx = MTK_DMA_SIZE - 1;
+ ring->dma_size = rx_dma_size;
+ ring->calc_idx_update = false;
+ ring->calc_idx = rx_dma_size - 1;
+ ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
- mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0);
- mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0);
- mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0);
- mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
+ mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
+ mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
return 0;
}
-static void mtk_rx_clean(struct mtk_eth *eth)
+static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
+ struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
int i;
if (ring->data && ring->dma) {
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < ring->dma_size; i++) {
if (!ring->data[i])
continue;
if (!ring->dma[i].rxd1)
@@ -1191,13 +1292,275 @@ static void mtk_rx_clean(struct mtk_eth *eth)
if (ring->dma) {
dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
+ ring->dma_size * sizeof(*ring->dma),
ring->dma,
ring->phys);
ring->dma = NULL;
}
}
+static int mtk_hwlro_rx_init(struct mtk_eth *eth)
+{
+ int i;
+ u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
+ u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
+
+ /* set LRO rings to auto-learn modes */
+ ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
+
+ /* validate LRO ring */
+ ring_ctrl_dw2 |= MTK_RING_VLD;
+
+ /* set AGE timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
+ ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
+
+ /* set max AGG timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
+
+ /* set max LRO AGG count */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
+ ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
+
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
+ }
+
+ /* IPv4 checksum update enable */
+ lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
+
+ /* switch priority comparison to packet count mode */
+ lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
+
+ /* bandwidth threshold setting */
+ mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
+
+ /* auto-learn score delta setting */
+ mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
+
+ /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
+ mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
+ MTK_PDMA_LRO_ALT_REFRESH_TIMER);
+
+ /* set HW LRO mode & the max aggregation count for rx packets */
+ lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
+
+ /* the minimal remaining room of SDL0 in RXD for lro aggregation */
+ lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
+
+ /* enable HW LRO */
+ lro_ctrl_dw0 |= MTK_LRO_EN;
+
+ mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
+ mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
+
+ return 0;
+}
+
+static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
+{
+ int i;
+ u32 val;
+
+ /* relinquish lro rings, flush aggregated packets */
+ mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
+
+ /* wait for relinquishments done */
+ for (i = 0; i < 10; i++) {
+ val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
+ if (val & MTK_LRO_RING_RELINQUISH_DONE) {
+ msleep(20);
+ continue;
+ }
+ break;
+ }
+
+ /* invalidate lro rings */
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
+
+ /* disable HW LRO */
+ mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
+}
+
+static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
+
+ /* validate the IP setting */
+ mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+}
+
+static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
+}
+
+static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i])
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int mtk_hwlro_add_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if ((fsp->flow_type != TCP_V4_FLOW) ||
+ (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
+ (fsp->location > 1))
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
+
+ return 0;
+}
+
+static int mtk_hwlro_del_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if (fsp->location > 1)
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+
+ return 0;
+}
+
+static void mtk_hwlro_netdev_disable(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int i, hwlro_idx;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ mac->hwlro_ip[i] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+ }
+
+ mac->hwlro_ip_cnt = 0;
+}
+
+static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ /* only tcp dst ipv4 is meaningful, others are meaningless */
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
+ fsp->m_u.tcp_ip4_spec.ip4dst = 0;
+
+ fsp->h_u.tcp_ip4_spec.ip4src = 0;
+ fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
+ fsp->h_u.tcp_ip4_spec.psrc = 0;
+ fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
+ fsp->h_u.tcp_ip4_spec.pdst = 0;
+ fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
+ fsp->h_u.tcp_ip4_spec.tos = 0;
+ fsp->m_u.tcp_ip4_spec.tos = 0xff;
+
+ return 0;
+}
+
+static int mtk_hwlro_get_fdir_all(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i]) {
+ rule_locs[cnt] = i;
+ cnt++;
+ }
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static netdev_features_t mtk_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!(features & NETIF_F_LRO)) {
+ struct mtk_mac *mac = netdev_priv(dev);
+ int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ if (ip_cnt) {
+ netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
+
+ features |= NETIF_F_LRO;
+ }
+ }
+
+ return features;
+}
+
+static int mtk_set_features(struct net_device *dev, netdev_features_t features)
+{
+ int err = 0;
+
+ if (!((dev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ if (!(features & NETIF_F_LRO))
+ mtk_hwlro_netdev_disable(dev);
+
+ return err;
+}
+
/* wait for DMA to finish whatever it is doing before we start using it again */
static int mtk_dma_busy_wait(struct mtk_eth *eth)
{
@@ -1218,6 +1581,7 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
static int mtk_dma_init(struct mtk_eth *eth)
{
int err;
+ u32 i;
if (mtk_dma_busy_wait(eth))
return -EBUSY;
@@ -1233,10 +1597,21 @@ static int mtk_dma_init(struct mtk_eth *eth)
if (err)
return err;
- err = mtk_rx_alloc(eth);
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
if (err)
return err;
+ if (eth->hwlro) {
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
+ if (err)
+ return err;
+ }
+ err = mtk_hwlro_rx_init(eth);
+ if (err)
+ return err;
+ }
+
/* Enable random early drop and set drop threshold automatically */
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
MTK_QDMA_FC_THRES);
@@ -1261,7 +1636,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
eth->phy_scratch_ring = 0;
}
mtk_tx_clean(eth);
- mtk_rx_clean(eth);
+ mtk_rx_clean(eth, 0);
+
+ if (eth->hwlro) {
+ mtk_hwlro_rx_uninit(eth);
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_rx_clean(eth, i);
+ }
+
kfree(eth->scratch_head);
}
@@ -1282,7 +1664,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
if (likely(napi_schedule_prep(&eth->rx_napi))) {
__napi_schedule(&eth->rx_napi);
- mtk_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
}
return IRQ_HANDLED;
@@ -1294,7 +1676,7 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
if (likely(napi_schedule_prep(&eth->tx_napi))) {
__napi_schedule(&eth->tx_napi);
- mtk_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
}
return IRQ_HANDLED;
@@ -1305,11 +1687,12 @@ static void mtk_poll_controller(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
- mtk_irq_disable(eth, int_mask);
+ mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
mtk_handle_irq_rx(eth->irq[2], dev);
- mtk_irq_enable(eth, int_mask);
+ mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
}
#endif
@@ -1324,11 +1707,15 @@ static int mtk_start_dma(struct mtk_eth *eth)
}
mtk_w32(eth,
- MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
- MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
- MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
+ MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
+ MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO,
MTK_QDMA_GLO_CFG);
+ mtk_w32(eth,
+ MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+ MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
+ MTK_PDMA_GLO_CFG);
+
return 0;
}
@@ -1346,11 +1733,12 @@ static int mtk_open(struct net_device *dev)
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
- mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
}
atomic_inc(&eth->dma_refcnt);
- phy_start(mac->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
return 0;
@@ -1358,16 +1746,15 @@ static int mtk_open(struct net_device *dev)
static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
{
- unsigned long flags;
u32 val;
int i;
/* stop the dma engine */
- spin_lock_irqsave(&eth->page_lock, flags);
+ spin_lock_bh(&eth->page_lock);
val = mtk_r32(eth, glo_cfg);
mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
glo_cfg);
- spin_unlock_irqrestore(&eth->page_lock, flags);
+ spin_unlock_bh(&eth->page_lock);
/* wait for dma stop */
for (i = 0; i < 10; i++) {
@@ -1386,32 +1773,63 @@ static int mtk_stop(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
netif_tx_disable(dev);
- phy_stop(mac->phy_dev);
+ phy_stop(dev->phydev);
/* only shutdown DMA if this is the last user */
if (!atomic_dec_and_test(&eth->dma_refcnt))
return 0;
- mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+ mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
mtk_dma_free(eth);
return 0;
}
-static int __init mtk_hw_init(struct mtk_eth *eth)
+static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
- int err, i;
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ reset_bits);
+
+ usleep_range(1000, 1100);
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ ~reset_bits);
+ mdelay(10);
+}
+
+static int mtk_hw_init(struct mtk_eth *eth)
+{
+ int i, val;
+
+ if (test_and_set_bit(MTK_HW_INIT, &eth->state))
+ return 0;
+
+ pm_runtime_enable(eth->dev);
+ pm_runtime_get_sync(eth->dev);
+
+ clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
+ clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
+ ethsys_reset(eth, RSTCTRL_FE);
+ ethsys_reset(eth, RSTCTRL_PPE);
- /* reset the frame engine */
- reset_control_assert(eth->rstc);
- usleep_range(10, 20);
- reset_control_deassert(eth->rstc);
- usleep_range(10, 20);
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->mac[i])
+ continue;
+ val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
+ val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
+ }
+ regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
/* Set GE2 driving and slew rate */
regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
@@ -1431,22 +1849,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
- err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
- dev_name(eth->dev), eth);
- if (err)
- return err;
- err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
- dev_name(eth->dev), eth);
- if (err)
- return err;
-
- err = mtk_mdio_init(eth);
- if (err)
- return err;
-
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_irq_disable(eth, ~0);
+ mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
+ mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
+ mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
@@ -1460,9 +1867,8 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
for (i = 0; i < 2; i++) {
u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
- /* setup the forward port to send frame to QDMA */
+ /* setup the forward port to send frame to PDMA */
val &= ~0xffff;
- val |= 0x5555;
/* Enable RX checksum */
val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
@@ -1474,6 +1880,22 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
return 0;
}
+static int mtk_hw_deinit(struct mtk_eth *eth)
+{
+ if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
+ return 0;
+
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
+
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+
+ return 0;
+}
+
static int __init mtk_init(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -1492,7 +1914,7 @@ static int __init mtk_init(struct net_device *dev)
dev->addr_assign_type = NET_ADDR_RANDOM;
}
- return mtk_phy_connect(mac);
+ return mtk_phy_connect(dev);
}
static void mtk_uninit(struct net_device *dev)
@@ -1500,19 +1922,18 @@ static void mtk_uninit(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- phy_disconnect(mac->phy_dev);
- mtk_irq_disable(eth, ~0);
+ phy_disconnect(dev->phydev);
+ mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
+ mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
-
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
default:
break;
}
@@ -1528,6 +1949,12 @@ static void mtk_pending_work(struct work_struct *work)
rtnl_lock();
+ dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
+
+ while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
+ cpu_relax();
+
+ dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
@@ -1535,6 +1962,27 @@ static void mtk_pending_work(struct work_struct *work)
mtk_stop(eth->netdev[i]);
__set_bit(i, &restart);
}
+ dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
+
+ /* restart underlying hardware such as power, clock, pin mux
+ * and the connected phy
+ */
+ mtk_hw_deinit(eth);
+
+ if (eth->dev->pins)
+ pinctrl_select_state(eth->dev->pins->p,
+ eth->dev->pins->default_state);
+ mtk_hw_init(eth);
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->mac[i] ||
+ of_phy_is_fixed_link(eth->mac[i]->of_node))
+ continue;
+ err = phy_init_hw(eth->netdev[i]->phydev);
+ if (err)
+ dev_err(eth->dev, "%s: PHY init failed.\n",
+ eth->netdev[i]->name);
+ }
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
@@ -1547,51 +1995,69 @@ static void mtk_pending_work(struct work_struct *work)
dev_close(eth->netdev[i]);
}
}
+
+ dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
+
+ clear_bit_unlock(MTK_RESETTING, &eth->state);
+
rtnl_unlock();
}
-static int mtk_cleanup(struct mtk_eth *eth)
+static int mtk_free_dev(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
continue;
+ free_netdev(eth->netdev[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_unreg_dev(struct mtk_eth *eth)
+{
+ int i;
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
unregister_netdev(eth->netdev[i]);
- free_netdev(eth->netdev[i]);
}
+
+ return 0;
+}
+
+static int mtk_cleanup(struct mtk_eth *eth)
+{
+ mtk_unreg_dev(eth);
+ mtk_free_dev(eth);
cancel_work_sync(&eth->pending_work);
return 0;
}
-static int mtk_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int mtk_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
+ struct mtk_mac *mac = netdev_priv(ndev);
- err = phy_read_status(mac->phy_dev);
- if (err)
- return -ENODEV;
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
- return phy_ethtool_gset(mac->phy_dev, cmd);
+ return phy_ethtool_ksettings_get(ndev->phydev, cmd);
}
-static int mtk_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int mtk_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_mac *mac = netdev_priv(ndev);
- if (cmd->phy_address != mac->phy_dev->mdio.addr) {
- mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
- cmd->phy_address);
- if (!mac->phy_dev)
- return -ENODEV;
- }
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
- return phy_ethtool_sset(mac->phy_dev, cmd);
+ return phy_ethtool_ksettings_set(ndev->phydev, cmd);
}
static void mtk_get_drvinfo(struct net_device *dev,
@@ -1622,7 +2088,10 @@ static int mtk_nway_reset(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
- return genphy_restart_aneg(mac->phy_dev);
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ return genphy_restart_aneg(dev->phydev);
}
static u32 mtk_get_link(struct net_device *dev)
@@ -1630,11 +2099,14 @@ static u32 mtk_get_link(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
int err;
- err = genphy_update_link(mac->phy_dev);
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ err = genphy_update_link(dev->phydev);
if (err)
return ethtool_op_get_link(dev);
- return mac->phy_dev->link;
+ return dev->phydev->link;
}
static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -1670,6 +2142,9 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
unsigned int start;
int i;
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return;
+
if (netif_running(dev) && netif_device_present(dev)) {
if (spin_trylock(&hwstats->stats_lock)) {
mtk_stats_update_mac(mac);
@@ -1677,8 +2152,9 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
}
}
+ data_src = (u64 *)hwstats;
+
do {
- data_src = (u64*)hwstats;
data_dst = data;
start = u64_stats_fetch_begin_irq(&hwstats->syncp);
@@ -1687,9 +2163,65 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}
-static struct ethtool_ops mtk_ethtool_ops = {
- .get_settings = mtk_get_settings,
- .set_settings = mtk_set_settings,
+static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ if (dev->features & NETIF_F_LRO) {
+ cmd->data = MTK_MAX_RX_RING_NUM;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (dev->features & NETIF_F_LRO) {
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ cmd->rule_cnt = mac->hwlro_ip_cnt;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_entry(dev, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_all(dev, cmd,
+ rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_add_ipaddr(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_del_ipaddr(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops mtk_ethtool_ops = {
+ .get_link_ksettings = mtk_get_link_ksettings,
+ .set_link_ksettings = mtk_set_link_ksettings,
.get_drvinfo = mtk_get_drvinfo,
.get_msglevel = mtk_get_msglevel,
.set_msglevel = mtk_set_msglevel,
@@ -1698,6 +2230,8 @@ static struct ethtool_ops mtk_ethtool_ops = {
.get_strings = mtk_get_strings,
.get_sset_count = mtk_get_sset_count,
.get_ethtool_stats = mtk_get_ethtool_stats,
+ .get_rxnfc = mtk_get_rxnfc,
+ .set_rxnfc = mtk_set_rxnfc,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -1712,6 +2246,8 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
+ .ndo_fix_features = mtk_fix_features,
+ .ndo_set_features = mtk_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mtk_poll_controller,
#endif
@@ -1750,6 +2286,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw = eth;
mac->of_node = np;
+ memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
+ mac->hwlro_ip_cnt = 0;
+
mac->hw_stats = devm_kzalloc(eth->dev,
sizeof(*mac->hw_stats),
GFP_KERNEL);
@@ -1766,21 +2305,17 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
+
+ eth->netdev[id]->hw_features = MTK_HW_FEATURES;
+ if (eth->hwlro)
+ eth->netdev[id]->hw_features |= NETIF_F_LRO;
+
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
eth->netdev[id]->features |= MTK_HW_FEATURES;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
- err = register_netdev(eth->netdev[id]);
- if (err) {
- dev_err(eth->dev, "error bringing up device\n");
- goto free_netdev;
- }
eth->netdev[id]->irq = eth->irq[0];
- netif_info(eth, probe, eth->netdev[id],
- "mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[id]->base_addr, eth->irq[0]);
-
return 0;
free_netdev:
@@ -1827,11 +2362,7 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->pctl);
}
- eth->rstc = devm_reset_control_get(&pdev->dev, "eth");
- if (IS_ERR(eth->rstc)) {
- dev_err(&pdev->dev, "no eth reset found\n");
- return PTR_ERR(eth->rstc);
- }
+ eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");
for (i = 0; i < 3; i++) {
eth->irq[i] = platform_get_irq(pdev, i);
@@ -1850,11 +2381,6 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
- clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
- clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
- clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
-
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(&eth->pending_work, mtk_pending_work);
@@ -1872,7 +2398,35 @@ static int mtk_probe(struct platform_device *pdev)
err = mtk_add_mac(eth, mac_np);
if (err)
- goto err_free_dev;
+ goto err_deinit_hw;
+ }
+
+ err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ goto err_free_dev;
+
+ err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ goto err_free_dev;
+
+ err = mtk_mdio_init(eth);
+ if (err)
+ goto err_free_dev;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ err = register_netdev(eth->netdev[i]);
+ if (err) {
+ dev_err(eth->dev, "error bringing up device\n");
+ goto err_deinit_mdio;
+ } else
+ netif_info(eth, probe, eth->netdev[i],
+ "mediatek frame engine at 0x%08lx, irq %d\n",
+ eth->netdev[i]->base_addr, eth->irq[0]);
}
/* we run 2 devices on the same DMA ring so we need a dummy device
@@ -1888,8 +2442,13 @@ static int mtk_probe(struct platform_device *pdev)
return 0;
+err_deinit_mdio:
+ mtk_mdio_cleanup(eth);
err_free_dev:
- mtk_cleanup(eth);
+ mtk_free_dev(eth);
+err_deinit_hw:
+ mtk_hw_deinit(eth);
+
return err;
}
@@ -1905,16 +2464,12 @@ static int mtk_remove(struct platform_device *pdev)
mtk_stop(eth->netdev[i]);
}
- clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
- clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
- clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
- clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
+ mtk_hw_deinit(eth);
netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
mtk_mdio_cleanup(eth);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 6e1ade7a25c5..30031959d6de 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -39,7 +39,21 @@
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_IPV6_CSUM)
-#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (MTK_DMA_SIZE - 1))
+#define NEXT_RX_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+
+#define MTK_MAX_RX_RING_NUM 4
+#define MTK_HW_LRO_DMA_SIZE 8
+
+#define MTK_MAX_LRO_RX_LENGTH (4096 * 3)
+#define MTK_MAX_LRO_IP_CNT 2
+#define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */
+#define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */
+#define MTK_HW_LRO_AGG_TIME 10 /* 200us */
+#define MTK_HW_LRO_AGE_TIME 50 /* 1ms */
+#define MTK_HW_LRO_MAX_AGG_CNT 64
+#define MTK_HW_LRO_BW_THRE 3000
+#define MTK_HW_LRO_REPLACE_DELTA 1000
+#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
/* Frame Engine Global Reset Register */
#define MTK_RST_GL 0x04
@@ -50,6 +64,9 @@
#define MTK_GDM1_AF BIT(28)
#define MTK_GDM2_AF BIT(29)
+/* PDMA HW LRO Alter Flow Timer Register */
+#define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c
+
/* Frame Engine Interrupt Grouping Register */
#define MTK_FE_INT_GRP 0x20
@@ -68,10 +85,77 @@
/* Unicast Filter MAC Address Register - High */
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
+/* PDMA RX Base Pointer Register */
+#define MTK_PRX_BASE_PTR0 0x900
+#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
+
+/* PDMA RX Maximum Count Register */
+#define MTK_PRX_MAX_CNT0 0x904
+#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
+
+/* PDMA RX CPU Pointer Register */
+#define MTK_PRX_CRX_IDX0 0x908
+#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
+
+/* PDMA HW LRO Control Registers */
+#define MTK_PDMA_LRO_CTRL_DW0 0x980
+#define MTK_LRO_EN BIT(0)
+#define MTK_L3_CKS_UPD_EN BIT(7)
+#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
+#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
+#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
+
+#define MTK_PDMA_LRO_CTRL_DW1 0x984
+#define MTK_PDMA_LRO_CTRL_DW2 0x988
+#define MTK_PDMA_LRO_CTRL_DW3 0x98c
+#define MTK_ADMA_MODE BIT(15)
+#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
+
+/* PDMA Global Configuration Register */
+#define MTK_PDMA_GLO_CFG 0xa04
+#define MTK_MULTI_EN BIT(10)
+
+/* PDMA Reset Index Register */
+#define MTK_PDMA_RST_IDX 0xa08
+#define MTK_PST_DRX_IDX0 BIT(16)
+#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
+
+/* PDMA Delay Interrupt Register */
+#define MTK_PDMA_DELAY_INT 0xa0c
+
+/* PDMA Interrupt Status Register */
+#define MTK_PDMA_INT_STATUS 0xa20
+
+/* PDMA Interrupt Mask Register */
+#define MTK_PDMA_INT_MASK 0xa28
+
+/* PDMA HW LRO Alter Flow Delta Register */
+#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
+
/* PDMA Interrupt grouping registers */
#define MTK_PDMA_INT_GRP1 0xa50
#define MTK_PDMA_INT_GRP2 0xa54
+/* PDMA HW LRO IP Setting Registers */
+#define MTK_LRO_RX_RING0_DIP_DW0 0xb04
+#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
+#define MTK_RING_MYIP_VLD BIT(9)
+
+/* PDMA HW LRO Ring Control Registers */
+#define MTK_LRO_RX_RING0_CTRL_DW1 0xb28
+#define MTK_LRO_RX_RING0_CTRL_DW2 0xb2c
+#define MTK_LRO_RX_RING0_CTRL_DW3 0xb30
+#define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40))
+#define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40))
+#define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40))
+#define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22)
+#define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f)
+#define MTK_RING_AUTO_LERAN_MODE (3 << 6)
+#define MTK_RING_VLD BIT(8)
+#define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10)
+#define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26)
+#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
+
/* QDMA TX Queue Configuration Registers */
#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
#define QDMA_RES_THRES 4
@@ -106,7 +190,6 @@
/* QDMA Reset Index Register */
#define MTK_QDMA_RST_IDX 0x1A08
-#define MTK_PST_DRX_IDX0 BIT(16)
/* QDMA Delay Interrupt Register */
#define MTK_QDMA_DELAY_INT 0x1A0C
@@ -119,13 +202,16 @@
/* QDMA Interrupt Status Register */
#define MTK_QMTK_INT_STATUS 0x1A18
+#define MTK_RX_DONE_INT3 BIT(19)
+#define MTK_RX_DONE_INT2 BIT(18)
#define MTK_RX_DONE_INT1 BIT(17)
#define MTK_RX_DONE_INT0 BIT(16)
#define MTK_TX_DONE_INT3 BIT(3)
#define MTK_TX_DONE_INT2 BIT(2)
#define MTK_TX_DONE_INT1 BIT(1)
#define MTK_TX_DONE_INT0 BIT(0)
-#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1)
+#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1 | \
+ MTK_RX_DONE_INT2 | MTK_RX_DONE_INT3)
#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
@@ -227,6 +313,30 @@
MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
+/* TRGMII RXC control register */
+#define TRGMII_RCK_CTRL 0x10300
+#define DQSI0(x) ((x << 0) & GENMASK(6, 0))
+#define DQSI1(x) ((x << 8) & GENMASK(14, 8))
+#define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define RXC_DQSISEL BIT(30)
+#define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
+#define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2)
+
+/* TRGMII RXC control register */
+#define TRGMII_TCK_CTRL 0x10340
+#define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define TXC_INV BIT(30)
+#define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2)
+#define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2))
+
+/* TRGMII Interface mode register */
+#define INTF_MODE 0x10390
+#define TRGMII_INTF_DIS BIT(0)
+#define TRGMII_MODE BIT(1)
+#define TRGMII_CENTRAL_ALIGNED BIT(2)
+#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
+#define INTF_MODE_RGMII_10_100 0
+
/* GPIO port control registers for GMAC 2*/
#define GPIO_OD33_CTRL8 0x4c0
#define GPIO_BIAS_CTRL 0xed0
@@ -237,6 +347,15 @@
#define SYSCFG0_GE_MASK 0x3
#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
+/* ethernet subsystem clock register */
+#define ETHSYS_CLKCFG0 0x2c
+#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
+
+/* ethernet reset control register */
+#define ETHSYS_RSTCTRL 0x34
+#define RSTCTRL_FE BIT(6)
+#define RSTCTRL_PPE BIT(31)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -298,9 +417,15 @@ enum mtk_clks_map {
MTK_CLK_ESW,
MTK_CLK_GP1,
MTK_CLK_GP2,
+ MTK_CLK_TRGPLL,
MTK_CLK_MAX
};
+enum mtk_dev_state {
+ MTK_HW_INIT,
+ MTK_RESETTING
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -338,6 +463,12 @@ struct mtk_tx_ring {
atomic_t free_count;
};
+/* PDMA rx ring mode */
+enum mtk_rx_flags {
+ MTK_RX_FLAGS_NORMAL = 0,
+ MTK_RX_FLAGS_HWLRO,
+};
+
/* struct mtk_rx_ring - This struct holds info describing a RX ring
* @dma: The descriptor ring
* @data: The memory pointed at by the ring
@@ -352,7 +483,10 @@ struct mtk_rx_ring {
dma_addr_t phys;
u16 frag_size;
u16 buf_size;
+ u16 dma_size;
+ bool calc_idx_update;
u16 calc_idx;
+ u32 crx_idx_reg;
};
/* currently no SoC has more than 2 macs */
@@ -384,12 +518,12 @@ struct mtk_rx_ring {
* @clks: clock array for all clocks required
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
+ * @state Initialization and runtime state of the device.
*/
struct mtk_eth {
struct device *dev;
void __iomem *base;
- struct reset_control *rstc;
spinlock_t page_lock;
spinlock_t irq_lock;
struct net_device dummy_dev;
@@ -400,9 +534,10 @@ struct mtk_eth {
unsigned long sysclk;
struct regmap *ethsys;
struct regmap *pctl;
+ bool hwlro;
atomic_t dma_refcnt;
struct mtk_tx_ring tx_ring;
- struct mtk_rx_ring rx_ring;
+ struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
struct napi_struct tx_napi;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
@@ -412,22 +547,28 @@ struct mtk_eth {
struct mii_bus *mii_bus;
struct work_struct pending_work;
+ unsigned long state;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
* SoC
* @id: The number of the MAC
+ * @ge_mode: Interface mode kept for setup restoring
* @of_node: Our devicetree node
* @hw: Backpointer to our main datastruture
* @hw_stats: Packet statistics counter
- * @phy_dev: The attached PHY if available
+ * @trgmii Indicate if the MAC uses TRGMII connected to internal
+ switch
*/
struct mtk_mac {
int id;
+ int ge_mode;
struct device_node *of_node;
struct mtk_eth *hw;
struct mtk_hw_stats *hw_stats;
- struct phy_device *phy_dev;
+ __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
+ int hwlro_ip_cnt;
+ bool trgmii;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index f04a423ff79d..b1cef7a0f7ca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -785,17 +785,23 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+ int ret;
+
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
return mlx4_internal_err_ret_value(dev, op,
op_modifier);
+ down_read(&mlx4_priv(dev)->cmd.switch_sem);
if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_cmd_wait(dev, in_param, out_param,
- out_is_imm, in_modifier,
- op_modifier, op, timeout);
+ ret = mlx4_cmd_wait(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
else
- return mlx4_cmd_poll(dev, in_param, out_param,
- out_is_imm, in_modifier,
- op_modifier, op, timeout);
+ ret = mlx4_cmd_poll(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+
+ up_read(&mlx4_priv(dev)->cmd.switch_sem);
+ return ret;
}
return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
in_modifier, op_modifier, op, timeout);
@@ -1845,6 +1851,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
vp_oper->state.default_qos == vp_admin->default_qos &&
+ vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
vp_oper->state.link_state == vp_admin->link_state &&
vp_oper->state.qos_vport == vp_admin->qos_vport)
return 0;
@@ -1903,6 +1910,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
vp_oper->state.default_vlan = vp_admin->default_vlan;
vp_oper->state.default_qos = vp_admin->default_qos;
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
vp_oper->state.link_state = vp_admin->link_state;
vp_oper->state.qos_vport = vp_admin->qos_vport;
@@ -1916,6 +1924,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
work->qos_vport = vp_oper->state.qos_vport;
work->vlan_id = vp_oper->state.default_vlan;
work->vlan_ix = vp_oper->vlan_idx;
+ work->vlan_proto = vp_oper->state.vlan_proto;
work->priv = priv;
INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
queue_work(priv->mfunc.master.comm_wq, &work->work);
@@ -1986,6 +1995,8 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
int port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_slave_state *slave_state =
+ &priv->mfunc.master.slave_state[slave];
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
int min_port = find_first_bit(actv_ports.ports,
@@ -2000,12 +2011,26 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
priv->mfunc.master.vf_admin[slave].enable_smi[port];
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
- vp_oper->state = *vp_admin;
+ if (vp_admin->vlan_proto != htons(ETH_P_8021AD) ||
+ slave_state->vst_qinq_supported) {
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
+ vp_oper->state.default_vlan = vp_admin->default_vlan;
+ vp_oper->state.default_qos = vp_admin->default_qos;
+ }
+ vp_oper->state.link_state = vp_admin->link_state;
+ vp_oper->state.mac = vp_admin->mac;
+ vp_oper->state.spoofchk = vp_admin->spoofchk;
+ vp_oper->state.tx_rate = vp_admin->tx_rate;
+ vp_oper->state.qos_vport = vp_admin->qos_vport;
+ vp_oper->state.guid = vp_admin->guid;
+
if (MLX4_VGT != vp_admin->default_vlan) {
err = __mlx4_register_vlan(&priv->dev, port,
vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) {
vp_oper->vlan_idx = NO_INDX;
+ vp_oper->state.default_vlan = MLX4_VGT;
+ vp_oper->state.vlan_proto = htons(ETH_P_8021Q);
mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n",
slave, port);
@@ -2086,6 +2111,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
slave_state[slave].old_vlan_api = false;
+ slave_state[slave].vst_qinq_supported = false;
mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
@@ -2353,6 +2379,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
vf_oper = &priv->mfunc.master.vf_oper[i];
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
+ s_state->vst_qinq_supported = false;
mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
s_state->event_eq[j].eqn = -1;
@@ -2382,6 +2409,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
admin_vport->qos_vport =
MLX4_VPP_DEFAULT_VPORT;
oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
+ admin_vport->vlan_proto = htons(ETH_P_8021Q);
+ oper_vport->vlan_proto = htons(ETH_P_8021Q);
vf_oper->vport[port].vlan_idx = NO_INDX;
vf_oper->vport[port].mac_idx = NO_INDX;
mlx4_set_random_admin_guid(dev, i, port);
@@ -2454,6 +2483,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
int flags = 0;
if (!priv->cmd.initialized) {
+ init_rwsem(&priv->cmd.switch_sem);
mutex_init(&priv->cmd.slave_cmd_mutex);
sema_init(&priv->cmd.poll_sem, 1);
priv->cmd.use_events = 0;
@@ -2583,6 +2613,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
if (!priv->cmd.context)
return -ENOMEM;
+ down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
priv->cmd.context[i].next = i + 1;
@@ -2606,6 +2637,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
down(&priv->cmd.poll_sem);
priv->cmd.use_events = 1;
+ up_write(&priv->cmd.switch_sem);
return err;
}
@@ -2618,6 +2650,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0;
for (i = 0; i < priv->cmd.max_cmds; ++i)
@@ -2626,6 +2659,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
kfree(priv->cmd.context);
up(&priv->cmd.poll_sem);
+ up_write(&priv->cmd.switch_sem);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
@@ -2937,10 +2971,13 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
-int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
+int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos,
+ __be16 proto)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *vf_admin;
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_vport_oper_state *vf_oper;
int slave;
if ((!mlx4_is_master(dev)) ||
@@ -2950,12 +2987,31 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
if ((vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (proto == htons(ETH_P_8021AD) &&
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP))
+ return -EPROTONOSUPPORT;
+
+ if (proto != htons(ETH_P_8021Q) &&
+ proto != htons(ETH_P_8021AD))
+ return -EINVAL;
+
+ if ((proto == htons(ETH_P_8021AD)) &&
+ ((vlan == 0) || (vlan == MLX4_VGT)))
+ return -EINVAL;
+
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
+ slave_state = &priv->mfunc.master.slave_state[slave];
+ if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) &&
+ (!slave_state->vst_qinq_supported)) {
+ mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf);
+ return -EPROTONOSUPPORT;
+ }
port = mlx4_slaves_closest_port(dev, slave, port);
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+ vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
return -EPERM;
@@ -2965,6 +3021,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
else
vf_admin->default_vlan = vlan;
vf_admin->default_qos = qos;
+ vf_admin->vlan_proto = proto;
/* If rate was configured prior to VST, we saved the configured rate
* in vf_admin->rate and now, if priority supported we enforce the QoS
@@ -2973,7 +3030,12 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
vf_admin->tx_rate)
vf_admin->qos_vport = slave;
- if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
+ /* Try to activate new vf state without restart,
+ * this option is not supported while moving to VST QinQ mode.
+ */
+ if ((proto == htons(ETH_P_8021AD) &&
+ vf_oper->state.vlan_proto != proto) ||
+ mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
mlx4_info(dev,
"updating vf %d port %d config will take effect on next VF restart\n",
vf, port);
@@ -3117,6 +3179,7 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
ivf->vlan = s_info->default_vlan;
ivf->qos = s_info->default_qos;
+ ivf->vlan_proto = s_info->vlan_proto;
if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
ivf->max_tx_rate = s_info->tx_rate;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 1494997c4f7e..08fc5fc56d43 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -298,7 +298,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
if (IS_ERR(mdev->ptp_clock)) {
mdev->ptp_clock = NULL;
mlx4_err(mdev, "ptp_clock_register failed\n");
- } else {
+ } else if (mdev->ptp_clock) {
mlx4_info(mdev, "registered PHC clock\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fedb829276f4..7e703bed7b82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2400,12 +2400,14 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
}
-static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
+static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
- return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
+ return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
+ vlan_proto);
}
static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
@@ -2643,12 +2645,16 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (IS_ERR(prog))
return PTR_ERR(prog);
}
+ mutex_lock(&mdev->state_lock);
for (i = 0; i < priv->rx_ring_num; i++) {
- /* This xchg is paired with READ_ONCE in the fastpath */
- old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
+ old_prog = rcu_dereference_protected(
+ priv->rx_ring[i]->xdp_prog,
+ lockdep_is_held(&mdev->state_lock));
+ rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
}
+ mutex_unlock(&mdev->state_lock);
return 0;
}
@@ -2681,7 +2687,10 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
priv->xdp_ring_num);
for (i = 0; i < priv->rx_ring_num; i++) {
- old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
+ old_prog = rcu_dereference_protected(
+ priv->rx_ring[i]->xdp_prog,
+ lockdep_is_held(&mdev->state_lock));
+ rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
}
@@ -3217,6 +3226,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
if (mlx4_is_slave(mdev->dev)) {
+ bool vlan_offload_disabled;
int phv;
err = get_phv_bit(mdev->dev, port, &phv);
@@ -3224,6 +3234,18 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
}
+ err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
+ &vlan_offload_disabled);
+ if (!err && vlan_offload_disabled) {
+ dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ }
} else {
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
!(mdev->dev->caps.flags2 &
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 2040dad8611d..f2e8beddcf44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -72,7 +72,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
}
dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
frag_info->dma_dir);
- if (dma_mapping_error(priv->ddev, dma)) {
+ if (unlikely(dma_mapping_error(priv->ddev, dma))) {
put_page(page);
return -ENOMEM;
}
@@ -108,7 +108,8 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
ring_alloc[i].page_size)
continue;
- if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
+ if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i],
+ frag_info, gfp)))
goto out;
}
@@ -537,7 +538,9 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring = *pring;
struct bpf_prog *old_prog;
- old_prog = READ_ONCE(ring->xdp_prog);
+ old_prog = rcu_dereference_protected(
+ ring->xdp_prog,
+ lockdep_is_held(&mdev->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
@@ -583,7 +586,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
frag_info = &priv->frag_info[nr];
if (length <= frag_info->frag_prefix_size)
break;
- if (!frags[nr].page)
+ if (unlikely(!frags[nr].page))
goto fail;
dma = be64_to_cpu(rx_desc->data[nr].addr);
@@ -623,7 +626,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
dma_addr_t dma;
skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
- if (!skb) {
+ if (unlikely(!skb)) {
en_dbg(RX_ERR, priv, "Failed allocating skb\n");
return NULL;
}
@@ -734,7 +737,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
{
__wsum csum_pseudo_hdr = 0;
- if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
+ if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
+ ipv6h->nexthdr == IPPROTO_HOPOPTS))
return -1;
hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
@@ -767,7 +771,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
- if (get_fixed_ipv6_csum(hw_checksum, skb, hdr))
+ if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
return -1;
#endif
return 0;
@@ -794,13 +798,15 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
u64 timestamp;
bool l2_tunnel;
- if (!priv->port_up)
+ if (unlikely(!priv->port_up))
return 0;
- if (budget <= 0)
+ if (unlikely(budget <= 0))
return polled;
- xdp_prog = READ_ONCE(ring->xdp_prog);
+ /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(ring->xdp_prog);
doorbell_pending = 0;
tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring;
@@ -858,15 +864,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop the packet, since HW loopback-ed it */
mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
- rcu_read_lock();
hlist_for_each_entry_rcu(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac,
- ethh->h_source)) {
- rcu_read_unlock();
+ ethh->h_source))
goto next;
- }
}
- rcu_read_unlock();
}
}
@@ -902,16 +904,17 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_PASS:
break;
case XDP_TX:
- if (!mlx4_en_xmit_frame(frags, dev,
+ if (likely(!mlx4_en_xmit_frame(frags, dev,
length, tx_index,
- &doorbell_pending))
+ &doorbell_pending)))
goto consumed;
- break;
+ goto xdp_drop; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
case XDP_DROP:
- if (mlx4_en_rx_recycle(ring, frags))
+xdp_drop:
+ if (likely(mlx4_en_rx_recycle(ring, frags)))
goto consumed;
goto next;
}
@@ -1015,12 +1018,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
- if (!skb) {
+ if (unlikely(!skb)) {
ring->dropped++;
goto next;
}
- if (unlikely(priv->validate_loopback)) {
+ if (unlikely(priv->validate_loopback)) {
validate_loopback(priv, skb);
goto next;
}
@@ -1077,6 +1080,7 @@ consumed:
}
out:
+ rcu_read_unlock();
if (doorbell_pending)
mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index d728704d0c7b..c41ab31a39f8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -158,7 +158,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[31] = "Modifying loopback source checks using UPDATE_QP support",
[32] = "Loopback source checks support",
[33] = "RoCEv2 support",
- [34] = "DMFS Sniffer support (UC & MC)"
+ [34] = "DMFS Sniffer support (UC & MC)",
+ [35] = "QinQ VST mode support",
+ [36] = "sl to vl mapping table change event support"
};
int i;
@@ -248,6 +250,72 @@ out:
return err;
}
+static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port)
+{
+ struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_vport_state *vp_admin;
+ int err;
+
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+
+ if (vp_admin->default_vlan != vp_oper->state.default_vlan) {
+ err = __mlx4_register_vlan(&priv->dev, port,
+ vp_admin->default_vlan,
+ &vp_oper->vlan_idx);
+ if (err) {
+ vp_oper->vlan_idx = NO_INDX;
+ mlx4_warn(&priv->dev,
+ "No vlan resources slave %d, port %d\n",
+ slave, port);
+ return err;
+ }
+ mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
+ (int)(vp_oper->state.default_vlan),
+ vp_oper->vlan_idx, slave, port);
+ }
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
+ vp_oper->state.default_vlan = vp_admin->default_vlan;
+ vp_oper->state.default_qos = vp_admin->default_qos;
+
+ return 0;
+}
+
+static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port)
+{
+ struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_vport_state *vp_admin;
+ int err;
+
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+ slave_state = &priv->mfunc.master.slave_state[slave];
+
+ if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) ||
+ (!slave_state->active))
+ return 0;
+
+ if (vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
+ vp_oper->state.default_vlan == vp_admin->default_vlan &&
+ vp_oper->state.default_qos == vp_admin->default_qos)
+ return 0;
+
+ if (!slave_state->vst_qinq_supported) {
+ /* Warn and revert the request to set vst QinQ mode */
+ vp_admin->vlan_proto = vp_oper->state.vlan_proto;
+ vp_admin->default_vlan = vp_oper->state.default_vlan;
+ vp_admin->default_qos = vp_oper->state.default_qos;
+
+ mlx4_warn(&priv->dev,
+ "Slave %d does not support VST QinQ mode\n", slave);
+ return 0;
+ }
+
+ err = mlx4_activate_vst_qinq(priv, slave, port);
+ return err;
+}
+
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -311,14 +379,18 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
-#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
#define QUERY_FUNC_CAP_PHV_BIT 0x40
+#define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE 0x20
+
+#define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ BIT(30)
+#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31)
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
int converted_port = mlx4_slave_convert_port(
dev, slave, vhcr->in_modifier);
+ struct mlx4_vport_oper_state *vp_oper;
if (converted_port < 0)
return -EINVAL;
@@ -357,15 +429,24 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
- if (dev->caps.phv_bit[port]) {
- field = QUERY_FUNC_CAP_PHV_BIT;
- MLX4_PUT(outbox->buf, field,
- QUERY_FUNC_CAP_FLAGS0_OFFSET);
- }
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ err = mlx4_handle_vst_qinq(priv, slave, port);
+ if (err)
+ return err;
+
+ field = 0;
+ if (dev->caps.phv_bit[port])
+ field |= QUERY_FUNC_CAP_PHV_BIT;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
+ field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET);
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
+ struct mlx4_slave_state *slave_state =
+ &priv->mfunc.master.slave_state[slave];
+
/* enable rdma and ethernet interfaces, new quota locations,
* and reserved lkey
*/
@@ -439,6 +520,10 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
+
+ if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ)
+ slave_state->vst_qinq_supported = true;
+
} else
err = -EINVAL;
@@ -454,10 +539,12 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
u32 size, qkey;
int err = 0, quotas = 0;
u32 in_modifier;
+ u32 slave_caps;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
- in_modifier = op_modifier ? gen_or_port :
+ slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ |
QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
+ in_modifier = op_modifier ? gen_or_port : slave_caps;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -612,8 +699,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(func_cap->phys_port_id, outbox,
QUERY_FUNC_CAP_PHYS_PORT_ID);
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
- func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+ MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
@@ -690,6 +776,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
+#define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D
#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
@@ -703,6 +790,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
+#define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET 0x78
#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
@@ -767,12 +855,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
dev_cap->reserved_mtts = 1 << (field >> 4);
- MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
- dev_cap->max_mrw_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
dev_cap->reserved_mrws = 1 << (field & 0xf);
- MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
- dev_cap->max_mtt_seg = 1 << (field & 0x3f);
MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
dev_cap->num_sys_eqs = size & 0xfff;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
@@ -822,6 +906,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
dev_cap->fs_max_num_qp_per_entry = field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET);
+ if (field & (1 << 5))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT;
MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
if (field & 0x1)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
@@ -857,6 +944,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
dev_cap->max_sq_desc_sz = size;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET);
+ if (field & 0x1)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
dev_cap->max_qp_per_mcg = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
@@ -2698,7 +2788,6 @@ static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
int mlx4_config_mad_demux(struct mlx4_dev *dev)
{
struct mlx4_cmd_mailbox *mailbox;
- int secure_host_active;
int err;
/* Check if mad_demux is supported */
@@ -2721,7 +2810,8 @@ int mlx4_config_mad_demux(struct mlx4_dev *dev)
goto out;
}
- secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox);
+ if (mlx4_check_smp_firewall_active(dev, mailbox))
+ dev->flags |= MLX4_FLAG_SECURE_HOST;
/* Config mad_demux to handle all MADs returned by the query above */
err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
@@ -2732,7 +2822,7 @@ int mlx4_config_mad_demux(struct mlx4_dev *dev)
goto out;
}
- if (secure_host_active)
+ if (dev->flags & MLX4_FLAG_SECURE_HOST)
mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
out:
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -2914,7 +3004,7 @@ int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
if (!err)
- *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+ *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT;
return err;
}
EXPORT_SYMBOL(get_phv_bit);
@@ -2938,6 +3028,22 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
}
EXPORT_SYMBOL(set_phv_bit);
+int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
+ bool *vlan_offload_disabled)
+{
+ struct mlx4_func_cap func_cap;
+ int err;
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+ if (!err)
+ *vlan_offload_disabled =
+ !!(func_cap.flags0 &
+ QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_get_is_vlan_offload_disabled);
+
void mlx4_replace_zero_macs(struct mlx4_dev *dev)
{
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index cdbd76f10ced..5343a0599253 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -80,9 +80,7 @@ struct mlx4_dev_cap {
int max_eqs;
int num_sys_eqs;
int reserved_mtts;
- int max_mrw_sz;
int reserved_mrws;
- int max_mtt_seg;
int max_requester_per_qp;
int max_responder_per_qp;
int max_rdma_global;
@@ -152,7 +150,7 @@ struct mlx4_func_cap {
u32 qp1_proxy_qpn;
u32 reserved_lkey;
u8 physical_port;
- u8 port_flags;
+ u8 flags0;
u8 flags1;
u64 phys_port_id;
u32 extra_flags;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index c9d7fc5159f2..e4878f31e45d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -46,6 +46,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <net/devlink.h>
+#include <linux/rwsem.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
@@ -482,6 +483,7 @@ struct mlx4_slave_state {
u8 init_port_mask;
bool active;
bool old_vlan_api;
+ bool vst_qinq_supported;
u8 function;
dma_addr_t vhcr_dma;
u16 mtu[MLX4_MAX_PORTS + 1];
@@ -507,6 +509,7 @@ struct mlx4_vport_state {
u64 mac;
u16 default_vlan;
u8 default_qos;
+ __be16 vlan_proto;
u32 tx_rate;
bool spoofchk;
u32 link_state;
@@ -627,6 +630,7 @@ struct mlx4_cmd {
struct mutex slave_cmd_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
+ struct rw_semaphore switch_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
@@ -655,6 +659,7 @@ struct mlx4_vf_immed_vlan_work {
u8 qos_vport;
u16 vlan_id;
u16 orig_vlan_id;
+ __be16 vlan_proto;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9099dbd04951..a3528dd1e72e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -340,7 +340,7 @@ struct mlx4_en_rx_ring {
u8 fcs_del;
void *buf;
void *rx_info;
- struct bpf_prog *xdp_prog;
+ struct bpf_prog __rcu *xdp_prog;
struct mlx4_en_page_cache page_cache;
unsigned long bytes;
unsigned long packets;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 8b81114bdc72..84d7857ccc27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -790,10 +790,22 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
} else if (0 != vp_oper->state.default_vlan) {
- qpc->pri_path.vlan_control |=
- MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
- MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
- MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
+ /* vst QinQ should block untagged on TX,
+ * but cvlan is in payload and phv is set so
+ * hw see it as untagged. Block tagged instead.
+ */
+ qpc->pri_path.vlan_control |=
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ } else { /* vst 802.1Q */
+ qpc->pri_path.vlan_control |=
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ }
} else { /* priority tagged */
qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
@@ -802,7 +814,11 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
qpc->pri_path.vlan_index = vp_oper->vlan_idx;
- qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
+ qpc->pri_path.fl |= MLX4_FL_SV;
+ else
+ qpc->pri_path.fl |= MLX4_FL_CV;
qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
qpc->pri_path.sched_queue &= 0xC7;
qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
@@ -5238,6 +5254,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
(1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
@@ -5266,7 +5283,12 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
else if (!work->vlan_id)
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
- else
+ else if (work->vlan_proto == htons(ETH_P_8021AD))
+ vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ else /* vst 802.1Q */
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
@@ -5311,7 +5333,11 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
upd_context->qp_context.pri_path.fvl_rx =
qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.fl =
- qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ if (work->vlan_proto == htons(ETH_P_8021AD))
+ upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
+ else
+ upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
upd_context->qp_context.pri_path.feup =
qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.sched_queue =
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 67146624eb58..f44d089e2ca6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -45,15 +45,12 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
- spin_lock(&srq_table->lock);
-
+ rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
+ rcu_read_unlock();
if (srq)
atomic_inc(&srq->refcount);
-
- spin_unlock(&srq_table->lock);
-
- if (!srq) {
+ else {
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
return;
}
@@ -301,12 +298,11 @@ struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
- unsigned long flags;
- spin_lock_irqsave(&srq_table->lock, flags);
+ rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree,
srqn & (dev->caps.num_srqs - 1));
- spin_unlock_irqrestore(&srq_table->lock, flags);
+ rcu_read_unlock();
return srq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 05cc1effc13c..0343725d7f44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o
+ fs_counters.o rl.o lag.o dev.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index c2ec01a22d55..1e639f886021 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -294,11 +294,13 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
case MLX5_CMD_OP_DEALLOC_PD:
case MLX5_CMD_OP_DEALLOC_UAR:
- case MLX5_CMD_OP_DETTACH_FROM_MCG:
+ case MLX5_CMD_OP_DETACH_FROM_MCG:
case MLX5_CMD_OP_DEALLOC_XRCD:
case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_DESTROY_LAG:
+ case MLX5_CMD_OP_DESTROY_VPORT_LAG:
case MLX5_CMD_OP_DESTROY_TIR:
case MLX5_CMD_OP_DESTROY_SQ:
case MLX5_CMD_OP_DESTROY_RQ:
@@ -315,6 +317,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
+ case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -389,6 +392,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_CREATE_LAG:
+ case MLX5_CMD_OP_MODIFY_LAG:
+ case MLX5_CMD_OP_QUERY_LAG:
+ case MLX5_CMD_OP_CREATE_VPORT_LAG:
case MLX5_CMD_OP_CREATE_TIR:
case MLX5_CMD_OP_MODIFY_TIR:
case MLX5_CMD_OP_QUERY_TIR:
@@ -416,6 +423,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -504,7 +512,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
MLX5_COMMAND_STR_CASE(ACCESS_REG);
MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
- MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG);
+ MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
MLX5_COMMAND_STR_CASE(MAD_IFC);
MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
@@ -526,6 +534,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
+ MLX5_COMMAND_STR_CASE(CREATE_LAG);
+ MLX5_COMMAND_STR_CASE(MODIFY_LAG);
+ MLX5_COMMAND_STR_CASE(QUERY_LAG);
+ MLX5_COMMAND_STR_CASE(DESTROY_LAG);
+ MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
+ MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
MLX5_COMMAND_STR_CASE(CREATE_TIR);
MLX5_COMMAND_STR_CASE(MODIFY_TIR);
MLX5_COMMAND_STR_CASE(DESTROY_TIR);
@@ -564,15 +578,130 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
+ MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
default: return "unknown command opcode";
}
}
+static const char *cmd_status_str(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_STAT_OK:
+ return "OK";
+ case MLX5_CMD_STAT_INT_ERR:
+ return "internal error";
+ case MLX5_CMD_STAT_BAD_OP_ERR:
+ return "bad operation";
+ case MLX5_CMD_STAT_BAD_PARAM_ERR:
+ return "bad parameter";
+ case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
+ return "bad system state";
+ case MLX5_CMD_STAT_BAD_RES_ERR:
+ return "bad resource";
+ case MLX5_CMD_STAT_RES_BUSY:
+ return "resource busy";
+ case MLX5_CMD_STAT_LIM_ERR:
+ return "limits exceeded";
+ case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
+ return "bad resource state";
+ case MLX5_CMD_STAT_IX_ERR:
+ return "bad index";
+ case MLX5_CMD_STAT_NO_RES_ERR:
+ return "no resources";
+ case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
+ return "bad input length";
+ case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
+ return "bad output length";
+ case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
+ return "bad QP state";
+ case MLX5_CMD_STAT_BAD_PKT_ERR:
+ return "bad packet (discarded)";
+ case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
+ return "bad size too many outstanding CQEs";
+ default:
+ return "unknown status";
+ }
+}
+
+static int cmd_status_to_err(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_STAT_OK: return 0;
+ case MLX5_CMD_STAT_INT_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
+ case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
+ case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
+ case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
+ default: return -EIO;
+ }
+}
+
+struct mlx5_ifc_mbox_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_mbox_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
+{
+ *status = MLX5_GET(mbox_out, out, status);
+ *syndrome = MLX5_GET(mbox_out, out, syndrome);
+}
+
+static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
+{
+ u32 syndrome;
+ u8 status;
+ u16 opcode;
+ u16 op_mod;
+
+ mlx5_cmd_mbox_status(out, &status, &syndrome);
+ if (!status)
+ return 0;
+
+ opcode = MLX5_GET(mbox_in, in, opcode);
+ op_mod = MLX5_GET(mbox_in, in, op_mod);
+
+ mlx5_core_err(dev,
+ "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
+ mlx5_command_str(opcode),
+ opcode, op_mod,
+ cmd_status_str(status),
+ status,
+ syndrome);
+
+ return cmd_status_to_err(status);
+}
+
static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_work_ent *ent, int input)
{
- u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
+ u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
struct mlx5_cmd_mailbox *next = msg->next;
int data_only;
u32 offset = 0;
@@ -622,9 +751,7 @@ static void dump_command(struct mlx5_core_dev *dev,
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
{
- struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
- return be16_to_cpu(hdr->opcode);
+ return MLX5_GET(mbox_in, in->first.data, opcode);
}
static void cb_timeout_handler(struct work_struct *work)
@@ -762,16 +889,6 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
return err;
}
-static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
-{
- return &out->syndrome;
-}
-
-static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
-{
- return &out->status;
-}
-
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
@@ -820,7 +937,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out_free;
ds = ent->ts2 - ent->ts1;
- op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+ op = MLX5_GET(mbox_in, in->first.data, opcode);
if (op < ARRAY_SIZE(cmd->stats)) {
stats = &cmd->stats[op];
spin_lock_irq(&stats->lock);
@@ -1035,7 +1152,6 @@ static ssize_t data_write(struct file *filp, const char __user *buf,
struct mlx5_core_dev *dev = filp->private_data;
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
void *ptr;
- int err;
if (*pos != 0)
return -EINVAL;
@@ -1043,25 +1159,15 @@ static ssize_t data_write(struct file *filp, const char __user *buf,
kfree(dbg->in_msg);
dbg->in_msg = NULL;
dbg->inlen = 0;
-
- ptr = kzalloc(count, GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- if (copy_from_user(ptr, buf, count)) {
- err = -EFAULT;
- goto out;
- }
+ ptr = memdup_user(buf, count);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
dbg->in_msg = ptr;
dbg->inlen = count;
*pos = count;
return count;
-
-out:
- kfree(ptr);
- return err;
}
static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
@@ -1321,11 +1427,16 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
callback = ent->callback;
context = ent->context;
err = ent->ret;
- if (!err)
+ if (!err) {
err = mlx5_copy_from_msg(ent->uout,
ent->out,
ent->uout_size);
+ err = err ? err : mlx5_cmd_check(dev,
+ ent->in->first.data,
+ ent->uout);
+ }
+
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
@@ -1377,14 +1488,9 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
return msg;
}
-static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
-{
- return be16_to_cpu(in->opcode);
-}
-
-static int is_manage_pages(struct mlx5_inbox_hdr *in)
+static int is_manage_pages(void *in)
{
- return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
+ return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
}
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
@@ -1401,9 +1507,11 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
- *get_synd_ptr(out) = cpu_to_be32(drv_synd);
- *get_status_ptr(out) = status;
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+
+ err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
+ MLX5_SET(mbox_out, out, status, status);
+ MLX5_SET(mbox_out, out, syndrome, drv_synd);
return err;
}
@@ -1457,7 +1565,10 @@ out_in:
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size)
{
- return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
+ int err;
+
+ err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
+ return err ? : mlx5_cmd_check(dev, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec);
@@ -1694,96 +1805,3 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
pci_pool_destroy(cmd->pool);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup);
-
-static const char *cmd_status_str(u8 status)
-{
- switch (status) {
- case MLX5_CMD_STAT_OK:
- return "OK";
- case MLX5_CMD_STAT_INT_ERR:
- return "internal error";
- case MLX5_CMD_STAT_BAD_OP_ERR:
- return "bad operation";
- case MLX5_CMD_STAT_BAD_PARAM_ERR:
- return "bad parameter";
- case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
- return "bad system state";
- case MLX5_CMD_STAT_BAD_RES_ERR:
- return "bad resource";
- case MLX5_CMD_STAT_RES_BUSY:
- return "resource busy";
- case MLX5_CMD_STAT_LIM_ERR:
- return "limits exceeded";
- case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
- return "bad resource state";
- case MLX5_CMD_STAT_IX_ERR:
- return "bad index";
- case MLX5_CMD_STAT_NO_RES_ERR:
- return "no resources";
- case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
- return "bad input length";
- case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
- return "bad output length";
- case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
- return "bad QP state";
- case MLX5_CMD_STAT_BAD_PKT_ERR:
- return "bad packet (discarded)";
- case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
- return "bad size too many outstanding CQEs";
- default:
- return "unknown status";
- }
-}
-
-static int cmd_status_to_err(u8 status)
-{
- switch (status) {
- case MLX5_CMD_STAT_OK: return 0;
- case MLX5_CMD_STAT_INT_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
- case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
- case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
- case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
- case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
- case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
- case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
- case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
- case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
- default: return -EIO;
- }
-}
-
-/* this will be available till all the commands use set/get macros */
-int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
-{
- if (!hdr->status)
- return 0;
-
- pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
- cmd_status_str(hdr->status), hdr->status,
- be32_to_cpu(hdr->syndrome));
-
- return cmd_status_to_err(hdr->status);
-}
-
-int mlx5_cmd_status_to_err_v2(void *ptr)
-{
- u32 syndrome;
- u8 status;
-
- status = be32_to_cpu(*(__be32 *)ptr) >> 24;
- if (!status)
- return 0;
-
- syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
-
- pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
- cmd_status_str(status), status, syndrome);
-
- return cmd_status_to_err(status);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 873a631ad155..32d4af9b594d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -134,33 +134,29 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
complete(&cq->free);
}
-
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_create_cq_mbox_in *in, int inlen)
+ u32 *in, int inlen)
{
- int err;
struct mlx5_cq_table *table = &dev->priv.cq_table;
- struct mlx5_create_cq_mbox_out out;
- struct mlx5_destroy_cq_mbox_in din;
- struct mlx5_destroy_cq_mbox_out dout;
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
+ u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
c_eqn);
struct mlx5_eq *eq;
+ int err;
eq = mlx5_eqn2eq(dev, eqn);
if (IS_ERR(eq))
return PTR_ERR(eq);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
- memset(&out, 0, sizeof(out));
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ memset(out, 0, sizeof(out));
+ MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
+ cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
atomic_set(&cq->refcount, 1);
@@ -186,10 +182,11 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0;
err_cmd:
- memset(&din, 0, sizeof(din));
- memset(&dout, 0, sizeof(dout));
- din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
- mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
+ memset(din, 0, sizeof(din));
+ memset(dout, 0, sizeof(dout));
+ MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
+ mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL(mlx5_core_create_cq);
@@ -197,8 +194,8 @@ EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
- struct mlx5_destroy_cq_mbox_in in;
- struct mlx5_destroy_cq_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
struct mlx5_core_cq *tmp;
int err;
@@ -214,17 +211,12 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
return -EINVAL;
}
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
- in.cqn = cpu_to_be32(cq->cqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq);
@@ -237,44 +229,23 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
EXPORT_SYMBOL(mlx5_core_destroy_cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_query_cq_mbox_out *out)
+ u32 *out, int outlen)
{
- struct mlx5_query_cq_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, sizeof(*out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
- in.cqn = cpu_to_be32(cq->cqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
- if (err)
- return err;
-
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
+ u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
- return err;
+ MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
+ MLX5_SET(query_cq_in, in, cqn, cq->cqn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_cq);
-
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_modify_cq_mbox_in *in, int in_sz)
+ u32 *in, int inlen)
{
- struct mlx5_modify_cq_mbox_out out;
- int err;
-
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
- err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
- if (err)
- return err;
+ u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return 0;
+ MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
@@ -283,18 +254,20 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
u16 cq_period,
u16 cq_max_count)
{
- struct mlx5_modify_cq_mbox_in in;
-
- memset(&in, 0, sizeof(in));
-
- in.cqn = cpu_to_be32(cq->cqn);
- in.ctx.cq_period = cpu_to_be16(cq_period);
- in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
- in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
- MLX5_CQ_MODIFY_COUNT);
-
- return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
+ void *cqc;
+
+ MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, cq_period, cq_period);
+ MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.modify_field_select.modify_field_select,
+ MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
+
+ return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
}
+EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 5210d92e6bc7..e94a9532e218 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -277,24 +277,28 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
int index, int *is_str)
{
- struct mlx5_query_qp_mbox_out *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
struct mlx5_qp_context *ctx;
u64 param = 0;
+ u32 *out;
int err;
int no_sq;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
- err = mlx5_core_qp_query(dev, qp, out, sizeof(*out));
+ err = mlx5_core_qp_query(dev, qp, out, outlen);
if (err) {
- mlx5_core_warn(dev, "failed to query qp\n");
+ mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
goto out;
}
*is_str = 0;
- ctx = &out->ctx;
+
+ /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
+ ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
+
switch (index) {
case QP_PID:
param = qp->pid;
@@ -358,32 +362,32 @@ out:
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index)
{
- struct mlx5_query_eq_mbox_out *out;
- struct mlx5_eq_context *ctx;
+ int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
u64 param = 0;
+ void *ctx;
+ u32 *out;
int err;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
- ctx = &out->ctx;
-
- err = mlx5_core_eq_query(dev, eq, out, sizeof(*out));
+ err = mlx5_core_eq_query(dev, eq, out, outlen);
if (err) {
mlx5_core_warn(dev, "failed to query eq\n");
goto out;
}
+ ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
switch (index) {
case EQ_NUM_EQES:
- param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
+ param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
break;
case EQ_INTR:
- param = ctx->intr;
+ param = MLX5_GET(eqc, ctx, intr);
break;
case EQ_LOG_PG_SZ:
- param = (ctx->log_page_size & 0x1f) + 12;
+ param = MLX5_GET(eqc, ctx, log_page_size) + 12;
break;
}
@@ -395,37 +399,37 @@ out:
static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int index)
{
- struct mlx5_query_cq_mbox_out *out;
- struct mlx5_cq_context *ctx;
+ int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
u64 param = 0;
+ void *ctx;
+ u32 *out;
int err;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = mlx5_vzalloc(outlen);
if (!out)
return param;
- ctx = &out->ctx;
-
- err = mlx5_core_query_cq(dev, cq, out);
+ err = mlx5_core_query_cq(dev, cq, out, outlen);
if (err) {
mlx5_core_warn(dev, "failed to query cq\n");
goto out;
}
+ ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
switch (index) {
case CQ_PID:
param = cq->pid;
break;
case CQ_NUM_CQES:
- param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
+ param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
break;
case CQ_LOG_PG_SZ:
- param = (ctx->log_pg_sz & 0x1f) + 12;
+ param = MLX5_GET(cqc, ctx, log_page_size);
break;
}
out:
- kfree(out);
+ kvfree(out);
return param;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
new file mode 100644
index 000000000000..a9dbc28f6b97
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+
+static LIST_HEAD(intf_list);
+static LIST_HEAD(mlx5_dev_list);
+/* intf dev list mutex */
+static DEFINE_MUTEX(mlx5_intf_mutex);
+
+struct mlx5_device_context {
+ struct list_head list;
+ struct mlx5_interface *intf;
+ void *context;
+ unsigned long state;
+};
+
+enum {
+ MLX5_INTERFACE_ADDED,
+ MLX5_INTERFACE_ATTACHED,
+};
+
+void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ if (!mlx5_lag_intf_add(intf, priv))
+ return;
+
+ dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
+ if (!dev_ctx)
+ return;
+
+ dev_ctx->intf = intf;
+ dev_ctx->context = intf->add(dev);
+ set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+ if (intf->attach)
+ set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+
+ if (dev_ctx->context) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_add_tail(&dev_ctx->list, &priv->ctx_list);
+ spin_unlock_irq(&priv->ctx_lock);
+ } else {
+ kfree(dev_ctx);
+ }
+}
+
+static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
+ struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf == intf)
+ return dev_ctx;
+ return NULL;
+}
+
+void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = mlx5_get_device(intf, priv);
+ if (!dev_ctx)
+ return;
+
+ spin_lock_irq(&priv->ctx_lock);
+ list_del(&dev_ctx->list);
+ spin_unlock_irq(&priv->ctx_lock);
+
+ if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+ intf->remove(dev, dev_ctx->context);
+
+ kfree(dev_ctx);
+}
+
+static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = mlx5_get_device(intf, priv);
+ if (!dev_ctx)
+ return;
+
+ if (intf->attach) {
+ if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
+ return;
+ intf->attach(dev, dev_ctx->context);
+ set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+ } else {
+ if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+ return;
+ dev_ctx->context = intf->add(dev);
+ set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+ }
+}
+
+void mlx5_attach_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_attach_interface(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = mlx5_get_device(intf, priv);
+ if (!dev_ctx)
+ return;
+
+ if (intf->detach) {
+ if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
+ return;
+ intf->detach(dev, dev_ctx->context);
+ clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+ } else {
+ if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+ return;
+ intf->remove(dev, dev_ctx->context);
+ clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+ }
+}
+
+void mlx5_detach_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_detach_interface(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+bool mlx5_device_registered(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv;
+ bool found = false;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+ if (priv == &dev->priv)
+ found = true;
+ mutex_unlock(&mlx5_intf_mutex);
+
+ return found;
+}
+
+int mlx5_register_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_add_tail(&priv->dev_list, &mlx5_dev_list);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+
+ return 0;
+}
+
+void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_remove_device(intf, priv);
+ list_del(&priv->dev_list);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ if (!intf->add || !intf->remove)
+ return -EINVAL;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_add_tail(&intf->list, &intf_list);
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+ mlx5_remove_device(intf, priv);
+ list_del(&intf->list);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+ if ((dev_ctx->intf->protocol == protocol) &&
+ dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev_ctx->context);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
+/* Must be called with intf_mutex held */
+void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
+{
+ struct mlx5_interface *intf;
+
+ list_for_each_entry(intf, &intf_list, list)
+ if (intf->protocol == protocol) {
+ mlx5_add_device(intf, &dev->priv);
+ break;
+ }
+}
+
+/* Must be called with intf_mutex held */
+void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
+{
+ struct mlx5_interface *intf;
+
+ list_for_each_entry(intf, &intf_list, list)
+ if (intf->protocol == protocol) {
+ mlx5_remove_device(intf, &dev->priv);
+ break;
+ }
+}
+
+static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+{
+ return (u16)((dev->pdev->bus->number << 8) |
+ PCI_SLOT(dev->pdev->devfn));
+}
+
+/* Must be called with intf_mutex held */
+struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+{
+ u16 pci_id = mlx5_gen_pci_id(dev);
+ struct mlx5_core_dev *res = NULL;
+ struct mlx5_core_dev *tmp_dev;
+ struct mlx5_priv *priv;
+
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
+ tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
+ if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
+ res = tmp_dev;
+ break;
+ }
+ }
+
+ return res;
+}
+
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ unsigned long param)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf->event)
+ dev_ctx->intf->event(dev, dev_ctx->context, event, param);
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+}
+
+void mlx5_dev_list_lock(void)
+{
+ mutex_lock(&mlx5_intf_mutex);
+}
+
+void mlx5_dev_list_unlock(void)
+{
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+int mlx5_dev_list_trylock(void)
+{
+ return mutex_trylock(&mlx5_intf_mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index bf722aa88cf0..460363b66cb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -62,12 +62,14 @@
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
-#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
+#define MLX5_RX_HEADROOM NET_SKB_PAD
+
#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */
-#define MLX5_MPWRQ_LOG_WQE_SZ 17
+#define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
@@ -99,6 +101,18 @@
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
+#define MLX5E_ICOSQ_MAX_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
+
+#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+#define MLX5E_XDP_IHS_DS_COUNT \
+ DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
+#define MLX5E_XDP_TX_DS_COUNT \
+ (MLX5E_XDP_IHS_DS_COUNT + \
+ (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+#define MLX5E_XDP_TX_WQEBBS \
+ DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
+
#define MLX5E_NUM_MAIN_GROUPS 9
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
@@ -287,29 +301,53 @@ struct mlx5e_rx_am { /* Adaptive Moderation */
u8 tired;
};
+/* a single cache unit is capable to serve one napi call (for non-striding rq)
+ * or a MPWQE (for striding rq).
+ */
+#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
+ MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
+#define MLX5E_CACHE_SIZE (2 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
+struct mlx5e_page_cache {
+ u32 head;
+ u32 tail;
+ struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
+};
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
- u32 wqe_sz;
- struct sk_buff **skb;
- struct mlx5e_mpw_info *wqe_info;
+
+ union {
+ struct mlx5e_dma_info *dma_info;
+ struct {
+ struct mlx5e_mpw_info *info;
+ void *mtt_no_align;
+ u32 mtt_offset;
+ } mpwqe;
+ };
+ struct {
+ u8 page_order;
+ u32 wqe_sz; /* wqe data buffer size */
+ u8 map_dir; /* dma map direction */
+ } buff;
__be32 mkey_be;
- __be32 umr_mkey_be;
struct device *pdev;
struct net_device *netdev;
struct mlx5e_tstamp *tstamp;
struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
+ struct mlx5e_page_cache page_cache;
+
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_alloc_wqe alloc_wqe;
mlx5e_fp_dealloc_wqe dealloc_wqe;
unsigned long state;
int ix;
- u32 mpwqe_mtt_offset;
struct mlx5e_rx_am am; /* Adaptive Moderation */
+ struct bpf_prog *xdp_prog;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -323,32 +361,15 @@ struct mlx5e_rq {
struct mlx5e_umr_dma_info {
__be64 *mtt;
- __be64 *mtt_no_align;
dma_addr_t mtt_addr;
- struct mlx5e_dma_info *dma_info;
+ struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
+ struct mlx5e_umr_wqe wqe;
};
struct mlx5e_mpw_info {
- union {
- struct mlx5e_dma_info dma_info;
- struct mlx5e_umr_dma_info umr;
- };
+ struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
-
- void (*dma_pre_sync)(struct device *pdev,
- struct mlx5e_mpw_info *wi,
- u32 wqe_offset, u32 len);
- void (*add_skb_frag)(struct mlx5e_rq *rq,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 frag_offset, u32 len);
- void (*copy_skb_header)(struct device *pdev,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 offset,
- u32 headlen);
- void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
};
struct mlx5e_tx_wqe_info {
@@ -373,11 +394,17 @@ enum {
MLX5E_SQ_STATE_BF_ENABLE,
};
-struct mlx5e_ico_wqe_info {
+struct mlx5e_sq_wqe_info {
u8 opcode;
u8 num_wqebbs;
};
+enum mlx5e_sq_type {
+ MLX5E_SQ_TXQ,
+ MLX5E_SQ_ICO,
+ MLX5E_SQ_XDP
+};
+
struct mlx5e_sq {
/* data path */
@@ -395,10 +422,20 @@ struct mlx5e_sq {
struct mlx5e_cq cq;
- /* pointers to per packet info: write@xmit, read@completion */
- struct sk_buff **skb;
- struct mlx5e_sq_dma *dma_fifo;
- struct mlx5e_tx_wqe_info *wqe_info;
+ /* pointers to per tx element info: write@xmit, read@completion */
+ union {
+ struct {
+ struct sk_buff **skb;
+ struct mlx5e_sq_dma *dma_fifo;
+ struct mlx5e_tx_wqe_info *wqe_info;
+ } txq;
+ struct mlx5e_sq_wqe_info *ico_wqe;
+ struct {
+ struct mlx5e_sq_wqe_info *wqe_info;
+ struct mlx5e_dma_info *di;
+ bool doorbell;
+ } xdp;
+ } db;
/* read only */
struct mlx5_wq_cyc wq;
@@ -420,8 +457,8 @@ struct mlx5e_sq {
struct mlx5_uar uar;
struct mlx5e_channel *channel;
int tc;
- struct mlx5e_ico_wqe_info *ico_wqe_info;
u32 rate_limit;
+ u8 type;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -437,8 +474,10 @@ enum channel_flags {
struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
+ struct mlx5e_sq xdp_sq;
struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_sq icosq; /* internal control operations */
+ bool xdp;
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
@@ -620,6 +659,7 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_sq **txq_to_sq_map;
int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+ struct bpf_prog *xdp_prog;
/* priv data path fields - end */
unsigned long state;
@@ -654,40 +694,6 @@ struct mlx5e_priv {
void *ppriv;
};
-enum mlx5e_link_mode {
- MLX5E_1000BASE_CX_SGMII = 0,
- MLX5E_1000BASE_KX = 1,
- MLX5E_10GBASE_CX4 = 2,
- MLX5E_10GBASE_KX4 = 3,
- MLX5E_10GBASE_KR = 4,
- MLX5E_20GBASE_KR2 = 5,
- MLX5E_40GBASE_CR4 = 6,
- MLX5E_40GBASE_KR4 = 7,
- MLX5E_56GBASE_R4 = 8,
- MLX5E_10GBASE_CR = 12,
- MLX5E_10GBASE_SR = 13,
- MLX5E_10GBASE_ER = 14,
- MLX5E_40GBASE_SR4 = 15,
- MLX5E_40GBASE_LR4 = 16,
- MLX5E_50GBASE_SR2 = 18,
- MLX5E_100GBASE_CR4 = 20,
- MLX5E_100GBASE_SR4 = 21,
- MLX5E_100GBASE_KR4 = 22,
- MLX5E_100GBASE_LR4 = 23,
- MLX5E_100BASE_TX = 24,
- MLX5E_1000BASE_T = 25,
- MLX5E_10GBASE_T = 26,
- MLX5E_25GBASE_CR = 27,
- MLX5E_25GBASE_KR = 28,
- MLX5E_25GBASE_SR = 29,
- MLX5E_50GBASE_CR2 = 30,
- MLX5E_50GBASE_KR2 = 31,
- MLX5E_LINK_MODES_NUMBER,
-};
-
-#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
-
-
void mlx5e_build_ptys2ethtool_map(void);
void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
@@ -700,30 +706,19 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
+void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
+ bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
-int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
-void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
-void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe,
- u16 byte_cnt,
- struct mlx5e_mpw_info *wi,
- struct sk_buff *skb);
-void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe,
- u16 byte_cnt,
- struct mlx5e_mpw_info *wi,
- struct sk_buff *skb);
-void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi);
-void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi);
+void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
+void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_rx_am(struct mlx5e_rq *rq);
@@ -810,6 +805,12 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
}
+static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+{
+ return rq->mpwqe.mtt_offset +
+ wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
+}
+
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return min_t(int, mdev->priv.eq_table.num_comp_vectors,
@@ -868,6 +869,7 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
@@ -878,9 +880,12 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
-void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile, void *ppriv);
+struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
+int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
+void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 847a8f3ac2b2..13dc388667b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -273,7 +273,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev);
- if (IS_ERR_OR_NULL(tstamp->ptp)) {
+ if (IS_ERR(tstamp->ptp)) {
mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
PTR_ERR(tstamp->ptp));
tstamp->ptp = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 9cce153e1035..029e856f72a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -60,24 +60,27 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
struct mlx5_core_mkey *mkey)
{
- struct mlx5_create_mkey_mbox_in *in;
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
int err;
- in = mlx5_vzalloc(sizeof(*in));
+ in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- in->seg.flags = MLX5_PERM_LOCAL_WRITE |
- MLX5_PERM_LOCAL_READ |
- MLX5_ACCESS_MODE_PA;
- in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
- err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
- NULL);
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
- kvfree(in);
+ err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
+ kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 7a346bb2ed00..27ff401cec20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -805,7 +805,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
u32 eth_proto_cap;
u32 eth_proto_admin;
u32 eth_proto_lp;
@@ -815,7 +815,6 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
-
if (err) {
netdev_err(netdev, "%s: query port ptys failed: %d\n",
__func__, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1587a9fd5724..36fbc6b21a33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -294,6 +294,36 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
return 0;
}
+static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
+{
+ int i;
+
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ }
+
+ if (priv->fs.vlan.filter_disabled &&
+ !(priv->netdev->flags & IFF_PROMISC))
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
+static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
+{
+ int i;
+
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ }
+
+ if (priv->fs.vlan.filter_disabled &&
+ !(priv->netdev->flags & IFF_PROMISC))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
@@ -1024,14 +1054,10 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
if (err)
goto err_free_g;
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- if (err)
- goto err_destroy_vlan_flow_groups;
+ mlx5e_add_vlan_rules(priv);
return 0;
-err_destroy_vlan_flow_groups:
- mlx5e_destroy_groups(ft);
err_free_g:
kfree(ft->g);
err_destroy_vlan_table:
@@ -1043,6 +1069,7 @@ err_destroy_vlan_table:
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
+ mlx5e_del_vlan_rules(priv);
mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
}
@@ -1100,7 +1127,6 @@ err_destroy_arfs_tables:
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2459c7f3db8d..7eaf38020a8f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -34,6 +34,7 @@
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
+#include <linux/bpf.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -50,7 +51,7 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
u16 max_inline;
u8 min_inline_mode;
- bool icosq;
+ enum mlx5e_sq_type type;
};
struct mlx5e_cq_param {
@@ -63,12 +64,55 @@ struct mlx5e_cq_param {
struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
struct mlx5e_sq_param sq;
+ struct mlx5e_sq_param xdp_sq;
struct mlx5e_sq_param icosq;
struct mlx5e_cq_param rx_cq;
struct mlx5e_cq_param tx_cq;
struct mlx5e_cq_param icosq_cq;
};
+static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, striding_rq) &&
+ MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+ MLX5_CAP_ETH(mdev, reg_umr_sq);
+}
+
+static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
+{
+ priv->params.rq_wq_type = rq_type;
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+ priv->params.mpwqe_log_stride_sz = priv->params.rx_cqe_compress ?
+ MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
+ MLX5_MPWRQ_LOG_STRIDE_SIZE;
+ priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+ priv->params.mpwqe_log_stride_sz;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ }
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
+
+ mlx5_core_info(priv->mdev,
+ "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+ priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ BIT(priv->params.log_rq_size),
+ BIT(priv->params.mpwqe_log_stride_sz),
+ priv->params.rx_cqe_compress_admin);
+}
+
+static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
+{
+ u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
+ !priv->xdp_prog ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_LINKED_LIST;
+ mlx5e_set_rq_type_params(priv, rq_type);
+}
+
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -136,12 +180,18 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_tx += rq_stats->xdp_tx;
+ s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
- s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+ s->rx_cache_reuse += rq_stats->cache_reuse;
+ s->rx_cache_full += rq_stats->cache_full;
+ s->rx_cache_empty += rq_stats->cache_empty;
+ s->rx_cache_busy += rq_stats->cache_busy;
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
@@ -174,18 +224,15 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
struct mlx5_core_dev *mdev = priv->mdev;
- memset(in, 0, sizeof(in));
-
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
MLX5_SET(query_vport_counter_in, in, other_vport, 0);
memset(out, 0, outlen);
-
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
@@ -298,6 +345,117 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+static inline int mlx5e_get_wqe_mtt_sz(void)
+{
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+ * To avoid copying garbage after the mtt array, we allocate
+ * a little more.
+ */
+ return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
+ MLX5_UMR_MTT_ALIGNMENT);
+}
+
+static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
+ struct mlx5e_umr_wqe *wqe, u16 ix)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_data_seg *dseg = &wqe->data;
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
+ u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+ u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
+
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->imm = rq->mkey_be;
+
+ ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+ ucseg->klm_octowords =
+ cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
+ ucseg->bsf_octowords =
+ cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
+ ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+
+ dseg->lkey = sq->mkey_be;
+ dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
+}
+
+static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
+ struct mlx5e_channel *c)
+{
+ int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
+ int i;
+
+ rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->mpwqe.info)
+ goto err_out;
+
+ /* We allocate more than mtt_sz as we will align the pointer */
+ rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (unlikely(!rq->mpwqe.mtt_no_align))
+ goto err_free_wqe_info;
+
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+ wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
+ MLX5_UMR_ALIGN);
+ wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
+ PCI_DMA_TODEVICE);
+ if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
+ goto err_unmap_mtts;
+
+ mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
+ }
+
+ return 0;
+
+err_unmap_mtts:
+ while (--i >= 0) {
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+ dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
+ PCI_DMA_TODEVICE);
+ }
+ kfree(rq->mpwqe.mtt_no_align);
+err_free_wqe_info:
+ kfree(rq->mpwqe.info);
+
+err_out:
+ return -ENOMEM;
+}
+
+static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
+{
+ int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int i;
+
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+ dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
+ PCI_DMA_TODEVICE);
+ }
+ kfree(rq->mpwqe.mtt_no_align);
+ kfree(rq->mpwqe.info);
+}
+
+static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
+
+ if (rep && rep->vport != FDB_UPLINK_VPORT)
+ return true;
+
+ return false;
+}
+
static int mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
@@ -307,6 +465,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 byte_count;
+ u32 frag_sz;
+ int npages;
int wq_sz;
int err;
int i;
@@ -322,63 +482,92 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ rq->wq_type = priv->params.rq_wq_type;
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->tstamp = &priv->tstamp;
+ rq->channel = c;
+ rq->ix = c->ix;
+ rq->priv = c->priv;
+ rq->xdp_prog = priv->xdp_prog;
+
+ rq->buff.map_dir = DMA_FROM_DEVICE;
+ if (rq->xdp_prog)
+ rq->buff.map_dir = DMA_BIDIRECTIONAL;
+
switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
- GFP_KERNEL, cpu_to_node(c->cpu));
- if (!rq->wqe_info) {
- err = -ENOMEM;
+ if (mlx5e_is_vf_vport_rep(priv)) {
+ err = -EINVAL;
goto err_rq_wq_destroy;
}
+
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->mpwqe_mtt_offset = c->ix *
+ rq->mpwqe.mtt_offset = c->ix *
MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
- rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
- byte_count = rq->wqe_sz;
+
+ rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
+ byte_count = rq->buff.wqe_sz;
+ rq->mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
+ err = mlx5e_rq_alloc_mpwqe_info(rq, c);
+ if (err)
+ goto err_rq_wq_destroy;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!rq->skb) {
+ rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->dma_info) {
err = -ENOMEM;
goto err_rq_wq_destroy;
}
- rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+
+ if (mlx5e_is_vf_vport_rep(priv))
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
+ else
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
- rq->wqe_sz = (priv->params.lro_en) ?
+ rq->buff.wqe_sz = (priv->params.lro_en) ?
priv->params.lro_wqe_sz :
MLX5E_SW2HW_MTU(priv->netdev->mtu);
- rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
- byte_count = rq->wqe_sz;
+ byte_count = rq->buff.wqe_sz;
+
+ /* calc the required page order */
+ frag_sz = MLX5_RX_HEADROOM +
+ byte_count /* packet data */ +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ frag_sz = SKB_DATA_ALIGN(frag_sz);
+
+ npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
+ rq->buff.page_order = order_base_2(npages);
+
byte_count |= MLX5_HW_START_PADDING;
+ rq->mkey_be = c->mkey_be;
}
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
wqe->data.byte_count = cpu_to_be32(byte_count);
+ wqe->data.lkey = rq->mkey_be;
}
INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
rq->am.mode = priv->params.rx_cq_period_mode;
- rq->wq_type = priv->params.rq_wq_type;
- rq->pdev = c->pdev;
- rq->netdev = c->netdev;
- rq->tstamp = &priv->tstamp;
- rq->channel = c;
- rq->ix = c->ix;
- rq->priv = c->priv;
- rq->mkey_be = c->mkey_be;
- rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
+ rq->page_cache.head = 0;
+ rq->page_cache.tail = 0;
+
+ if (rq->xdp_prog)
+ bpf_prog_add(rq->xdp_prog, 1);
return 0;
@@ -390,14 +579,25 @@ err_rq_wq_destroy:
static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
+ int i;
+
+ if (rq->xdp_prog)
+ bpf_prog_put(rq->xdp_prog);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- kfree(rq->wqe_info);
+ mlx5e_rq_free_mpwqe_info(rq);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- kfree(rq->skb);
+ kfree(rq->dma_info);
}
+ for (i = rq->page_cache.head; i != rq->page_cache.tail;
+ i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
+ struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
+
+ mlx5e_page_release(rq, dma_info, false);
+ }
mlx5_wq_destroy(&rq->wq_ctrl);
}
@@ -488,7 +688,8 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
- MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
MLX5_SET(rqc, rqc, vsd, vsd);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
@@ -530,7 +731,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
/* UMR WQE (if in progress) is always at wq->head */
if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
- mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+ mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
while (!mlx5_wq_ll_is_empty(wq)) {
wqe_ix_be = *wq->tail_next;
@@ -565,8 +766,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (param->am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
- sq->ico_wqe_info[pi].num_wqebbs = 1;
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ sq->db.ico_wqe[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
return 0;
@@ -590,26 +791,65 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
mlx5e_destroy_rq(rq);
}
-static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
{
- kfree(sq->wqe_info);
- kfree(sq->dma_fifo);
- kfree(sq->skb);
+ kfree(sq->db.xdp.di);
+ kfree(sq->db.xdp.wqe_info);
}
-static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
- sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
- sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
- numa);
- sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
- numa);
+ sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
+ GFP_KERNEL, numa);
+ sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
+ GFP_KERNEL, numa);
+ if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
+ mlx5e_free_sq_xdp_db(sq);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->db.ico_wqe);
+}
- if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
- mlx5e_free_sq_db(sq);
+static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
+{
+ u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+
+ sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
+ GFP_KERNEL, numa);
+ if (!sq->db.ico_wqe)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->db.txq.wqe_info);
+ kfree(sq->db.txq.dma_fifo);
+ kfree(sq->db.txq.skb);
+}
+
+static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb),
+ GFP_KERNEL, numa);
+ sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo),
+ GFP_KERNEL, numa);
+ sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info),
+ GFP_KERNEL, numa);
+ if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) {
+ mlx5e_free_sq_txq_db(sq);
return -ENOMEM;
}
@@ -618,6 +858,46 @@ static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
return 0;
}
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ mlx5e_free_sq_txq_db(sq);
+ break;
+ case MLX5E_SQ_ICO:
+ mlx5e_free_sq_ico_db(sq);
+ break;
+ case MLX5E_SQ_XDP:
+ mlx5e_free_sq_xdp_db(sq);
+ break;
+ }
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ return mlx5e_alloc_sq_txq_db(sq, numa);
+ case MLX5E_SQ_ICO:
+ return mlx5e_alloc_sq_ico_db(sq, numa);
+ case MLX5E_SQ_XDP:
+ return mlx5e_alloc_sq_xdp_db(sq, numa);
+ }
+
+ return 0;
+}
+
+static int mlx5e_sq_get_max_wqebbs(u8 sq_type)
+{
+ switch (sq_type) {
+ case MLX5E_SQ_ICO:
+ return MLX5E_ICOSQ_MAX_WQEBBS;
+ case MLX5E_SQ_XDP:
+ return MLX5E_XDP_TX_WQEBBS;
+ }
+ return MLX5_SEND_WQE_MAX_WQEBBS;
+}
+
static int mlx5e_create_sq(struct mlx5e_channel *c,
int tc,
struct mlx5e_sq_param *param,
@@ -630,6 +910,13 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int err;
+ sq->type = param->type;
+ sq->pdev = c->pdev;
+ sq->tstamp = &priv->tstamp;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+
err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err)
return err;
@@ -658,18 +945,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- if (param->icosq) {
- u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
-
- sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
- wq_sz,
- GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!sq->ico_wqe_info) {
- err = -ENOMEM;
- goto err_free_sq_db;
- }
- } else {
+ if (sq->type == MLX5E_SQ_TXQ) {
int txq_ix;
txq_ix = c->ix + tc * priv->params.num_channels;
@@ -677,19 +953,11 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
priv->txq_to_sq_map[txq_ix] = sq;
}
- sq->pdev = c->pdev;
- sq->tstamp = &priv->tstamp;
- sq->mkey_be = c->mkey_be;
- sq->channel = c;
- sq->tc = tc;
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
sq->bf_budget = MLX5E_SQ_BF_BUDGET;
return 0;
-err_free_sq_db:
- mlx5e_free_sq_db(sq);
-
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
@@ -704,7 +972,6 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
- kfree(sq->ico_wqe_info);
mlx5e_free_sq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
mlx5_unmap_free_uar(priv->mdev, &sq->uar);
@@ -733,11 +1000,12 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
memcpy(sqc, param->sqc, sizeof(param->sqc));
- MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
+ 0 : priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
- MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
+ MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
@@ -852,12 +1120,14 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
netif_tx_disable_queue(sq->txq);
/* last doorbell out, godspeed .. */
- if (mlx5e_sq_has_room_for(sq, 1))
+ if (mlx5e_sq_has_room_for(sq, 1)) {
+ sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
mlx5e_send_nop(sq, true);
+ }
}
mlx5e_disable_sq(sq);
- mlx5e_free_tx_descs(sq);
+ mlx5e_free_sq_descs(sq);
mlx5e_destroy_sq(sq);
}
@@ -1218,14 +1488,31 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
}
}
+ if (priv->xdp_prog) {
+ /* XDP SQ CQ params are same as normal TXQ sq CQ params */
+ err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
+ priv->params.tx_cq_moderation);
+ if (err)
+ goto err_close_sqs;
+
+ err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq);
+ if (err) {
+ mlx5e_close_cq(&c->xdp_sq.cq);
+ goto err_close_sqs;
+ }
+ }
+
+ c->xdp = !!priv->xdp_prog;
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
- goto err_close_sqs;
+ goto err_close_xdp_sq;
netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
*cp = c;
return 0;
+err_close_xdp_sq:
+ mlx5e_close_sq(&c->xdp_sq);
err_close_sqs:
mlx5e_close_sqs(c);
@@ -1254,9 +1541,13 @@ err_napi_del:
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
mlx5e_close_rq(&c->rq);
+ if (c->xdp)
+ mlx5e_close_sq(&c->xdp_sq);
mlx5e_close_sqs(c);
mlx5e_close_sq(&c->icosq);
napi_disable(&c->napi);
+ if (c->xdp)
+ mlx5e_close_cq(&c->xdp_sq.cq);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
@@ -1330,6 +1621,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
param->max_inline = priv->params.tx_max_inline;
param->min_inline_mode = priv->params.tx_min_inline_mode;
+ param->type = MLX5E_SQ_TXQ;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1403,7 +1695,22 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
- param->icosq = true;
+ param->type = MLX5E_SQ_ICO;
+}
+
+static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(priv, param);
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+
+ param->max_inline = priv->params.tx_max_inline;
+ /* FOR XDP SQs will support only L2 inline mode */
+ param->min_inline_mode = MLX5_INLINE_MODE_NONE;
+ param->type = MLX5E_SQ_XDP;
}
static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
@@ -1412,6 +1719,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_chan
mlx5e_build_rq_param(priv, &cparam->rq);
mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
@@ -1885,6 +2193,9 @@ int mlx5e_close(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ if (!netif_device_present(netdev))
+ return -ENODEV;
+
mutex_lock(&priv->state_lock);
err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
@@ -1999,14 +2310,15 @@ static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- memset(in, 0, sizeof(in));
-
MLX5_SET(tisc, tisc, prio, tc << 1);
MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
+ if (mlx5_lag_is_lacp_owner(mdev))
+ MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
+
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
}
@@ -2605,11 +2917,15 @@ static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
}
-static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
+static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
vlan, qos);
}
@@ -2786,6 +3102,106 @@ static void mlx5e_tx_timeout(struct net_device *dev)
schedule_work(&priv->tx_timeout_work);
}
+static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct bpf_prog *old_prog;
+ int err = 0;
+ bool reset, was_opened;
+ int i;
+
+ mutex_lock(&priv->state_lock);
+
+ if ((netdev->features & NETIF_F_LRO) && prog) {
+ netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ /* no need for full reset when exchanging programs */
+ reset = (!priv->xdp_prog || !prog);
+
+ if (was_opened && reset)
+ mlx5e_close_locked(netdev);
+
+ /* exchange programs */
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (prog)
+ bpf_prog_add(prog, 1);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (reset) /* change RQ type according to priv->xdp_prog */
+ mlx5e_set_rq_priv_params(priv);
+
+ if (was_opened && reset)
+ mlx5e_open_locked(netdev);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
+ goto unlock;
+
+ /* exchanging programs w/o reset, we update ref counts on behalf
+ * of the channels RQs here.
+ */
+ bpf_prog_add(prog, priv->params.num_channels);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ struct mlx5e_channel *c = priv->channel[i];
+
+ set_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ napi_synchronize(&c->napi);
+ /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
+
+ old_prog = xchg(&c->rq.xdp_prog, prog);
+
+ clear_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ /* napi_schedule in case we have missed anything */
+ set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+ napi_schedule(&c->napi);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static bool mlx5e_xdp_attached(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return !!priv->xdp_prog;
+}
+
+static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mlx5e_xdp_set(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = mlx5e_xdp_attached(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
+ * reenabling interrupts.
+ */
+static void mlx5e_netpoll(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ napi_schedule(&priv->channel[i]->napi);
+}
+#endif
+
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
@@ -2805,6 +3221,10 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
+ .ndo_xdp = mlx5e_xdp,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mlx5e_netpoll,
+#endif
};
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2836,6 +3256,10 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
.ndo_tx_timeout = mlx5e_tx_timeout,
+ .ndo_xdp = mlx5e_xdp,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mlx5e_netpoll,
+#endif
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2910,13 +3334,6 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
indirection_rqt[i] = i % num_channels;
}
-static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, striding_rq) &&
- MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
- MLX5_CAP_ETH(mdev, reg_umr_sq);
-}
-
static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
{
enum pcie_link_width width;
@@ -2996,11 +3413,13 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- priv->params.log_sq_size =
- MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
- priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
- MLX5_WQ_TYPE_LINKED_LIST;
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = profile->max_nch(mdev);
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+
+ priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
priv->params.rx_cqe_compress_admin = false;
@@ -3013,33 +3432,11 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.rx_cqe_compress_admin =
cqe_compress_heuristic(link_speed, pci_bw);
}
-
priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
- switch (priv->params.rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- priv->params.mpwqe_log_stride_sz =
- priv->params.rx_cqe_compress ?
- MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
- MLX5_MPWRQ_LOG_STRIDE_SIZE;
- priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
- priv->params.mpwqe_log_stride_sz;
+ mlx5e_set_rq_priv_params(priv);
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
priv->params.lro_en = true;
- break;
- default: /* MLX5_WQ_TYPE_LINKED_LIST */
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
- }
-
- mlx5_core_info(mdev,
- "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
- priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- BIT(priv->params.log_rq_size),
- BIT(priv->params.mpwqe_log_stride_sz),
- priv->params.rx_cqe_compress_admin);
-
- priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
- BIT(priv->params.log_rq_size));
priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
@@ -3059,19 +3456,16 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
- priv->params.lro_wqe_sz =
- MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
+ /* Extra room needed for build_skb */
+ MLX5_RX_HEADROOM -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Initialize pflags */
MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->params.num_channels = profile->max_nch(mdev);
- priv->profile = profile;
- priv->ppriv = ppriv;
-
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_ets_init(priv);
#endif
@@ -3211,37 +3605,37 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_mkey_seg *mkc;
- int inlen = sizeof(*in);
u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
int err;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- mkc = &in->seg;
- mkc->status = MLX5_MKEY_STATUS_FREE;
- mkc->flags = MLX5_PERM_UMR_EN |
- MLX5_PERM_LOCAL_READ |
- MLX5_PERM_LOCAL_WRITE |
- MLX5_ACCESS_MODE_MTT;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
- mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
- mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
- mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages));
- mkc->log2_page_size = PAGE_SHIFT;
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
- err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
- NULL, NULL);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
+ MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ MLX5_MTT_OCTW(npages));
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
- kvfree(in);
+ err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen);
+ kvfree(in);
return err;
}
@@ -3360,6 +3754,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
struct mlx5_eswitch_rep rep;
+ mlx5_lag_add(mdev, netdev);
+
if (mlx5e_vxlan_allowed(mdev)) {
rtnl_lock();
udp_tunnel_get_rx_info(netdev);
@@ -3373,9 +3769,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
rep.load = mlx5e_nic_rep_load;
rep.unload = mlx5e_nic_rep_unload;
- rep.vport = 0;
+ rep.vport = FDB_UPLINK_VPORT;
rep.priv_data = priv;
- mlx5_eswitch_register_vport_rep(esw, &rep);
+ mlx5_eswitch_register_vport_rep(esw, 0, &rep);
}
}
@@ -3383,6 +3779,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
queue_work(priv->wq, &priv->set_rx_mode_work);
mlx5e_disable_async_events(priv);
+ mlx5_lag_remove(priv->mdev);
}
static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -3399,13 +3796,13 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.max_tc = MLX5E_MAX_NUM_TC,
};
-void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile, void *ppriv)
+struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
+ int nch = profile->max_nch(mdev);
struct net_device *netdev;
struct mlx5e_priv *priv;
- int nch = profile->max_nch(mdev);
- int err;
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
nch * profile->max_tc,
@@ -3423,12 +3820,31 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
priv->wq = create_singlethread_workqueue("mlx5e");
if (!priv->wq)
- goto err_free_netdev;
+ goto err_cleanup_nic;
+
+ return netdev;
+
+err_cleanup_nic:
+ profile->cleanup(priv);
+ free_netdev(netdev);
+
+ return NULL;
+}
+
+int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+ const struct mlx5e_profile *profile;
+ struct mlx5e_priv *priv;
+ int err;
+
+ priv = netdev_priv(netdev);
+ profile = priv->profile;
+ clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
err = mlx5e_create_umr_mkey(priv);
if (err) {
mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
- goto err_destroy_wq;
+ goto out;
}
err = profile->init_tx(priv);
@@ -3451,20 +3867,16 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
mlx5e_set_dev_port_mtu(netdev);
- err = register_netdev(netdev);
- if (err) {
- mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
- goto err_dealloc_q_counters;
- }
-
if (profile->enable)
profile->enable(priv);
- return priv;
+ rtnl_lock();
+ if (netif_running(netdev))
+ mlx5e_open(netdev);
+ netif_device_attach(netdev);
+ rtnl_unlock();
-err_dealloc_q_counters:
- mlx5e_destroy_q_counter(priv);
- profile->cleanup_rx(priv);
+ return 0;
err_close_drop_rq:
mlx5e_close_drop_rq(priv);
@@ -3475,13 +3887,8 @@ err_cleanup_tx:
err_destroy_umr_mkey:
mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
-err_destroy_wq:
- destroy_workqueue(priv->wq);
-
-err_free_netdev:
- free_netdev(netdev);
-
- return NULL;
+out:
+ return err;
}
static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
@@ -3503,20 +3910,84 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
rep.unload = mlx5e_vport_rep_unload;
rep.vport = vport;
ether_addr_copy(rep.hw_id, mac);
- mlx5_eswitch_register_vport_rep(esw, &rep);
+ mlx5_eswitch_register_vport_rep(esw, vport, &rep);
}
}
+void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ const struct mlx5e_profile *profile = priv->profile;
+
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (profile->disable)
+ profile->disable(priv);
+
+ flush_workqueue(priv->wq);
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ mlx5e_close(netdev);
+ netif_device_detach(netdev);
+ rtnl_unlock();
+
+ mlx5e_destroy_q_counter(priv);
+ profile->cleanup_rx(priv);
+ mlx5e_close_drop_rq(priv);
+ profile->cleanup_tx(priv);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
+ cancel_delayed_work_sync(&priv->update_stats_work);
+}
+
+/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
+ * hardware contexts and to connect it to the current netdev.
+ */
+static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct net_device *netdev = priv->netdev;
+ int err;
+
+ if (netif_device_present(netdev))
+ return 0;
+
+ err = mlx5e_create_mdev_resources(mdev);
+ if (err)
+ return err;
+
+ err = mlx5e_attach_netdev(mdev, netdev);
+ if (err) {
+ mlx5e_destroy_mdev_resources(mdev);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct net_device *netdev = priv->netdev;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ mlx5e_detach_netdev(mdev, netdev);
+ mlx5e_destroy_mdev_resources(mdev);
+}
+
static void *mlx5e_add(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
void *ppriv = NULL;
- void *ret;
-
- if (mlx5e_check_required_hca_cap(mdev))
- return NULL;
+ void *priv;
+ int vport;
+ int err;
+ struct net_device *netdev;
- if (mlx5e_create_mdev_resources(mdev))
+ err = mlx5e_check_required_hca_cap(mdev);
+ if (err)
return NULL;
mlx5e_register_vport_rep(mdev);
@@ -3524,12 +3995,39 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
if (MLX5_CAP_GEN(mdev, vport_group_manager))
ppriv = &esw->offloads.vport_reps[0];
- ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
- if (!ret) {
- mlx5e_destroy_mdev_resources(mdev);
- return NULL;
+ netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
+ if (!netdev) {
+ mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
+ goto err_unregister_reps;
}
- return ret;
+
+ priv = netdev_priv(netdev);
+
+ err = mlx5e_attach(mdev, priv);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
+ goto err_destroy_netdev;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
+ goto err_detach;
+ }
+
+ return priv;
+
+err_detach:
+ mlx5e_detach(mdev, priv);
+
+err_destroy_netdev:
+ mlx5e_destroy_netdev(mdev, priv);
+
+err_unregister_reps:
+ for (vport = 1; vport < total_vfs; vport++)
+ mlx5_eswitch_unregister_vport_rep(esw, vport);
+
+ return NULL;
}
void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
@@ -3537,30 +4035,11 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- set_bit(MLX5E_STATE_DESTROYING, &priv->state);
- if (profile->disable)
- profile->disable(priv);
-
- flush_workqueue(priv->wq);
- if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
- netif_device_detach(netdev);
- mlx5e_close(netdev);
- } else {
- unregister_netdev(netdev);
- }
-
- mlx5e_destroy_q_counter(priv);
- profile->cleanup_rx(priv);
- mlx5e_close_drop_rq(priv);
- profile->cleanup_tx(priv);
- mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
- cancel_delayed_work_sync(&priv->update_stats_work);
+ unregister_netdev(netdev);
destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
-
- if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
- free_netdev(netdev);
+ free_netdev(netdev);
}
static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
@@ -3570,12 +4049,11 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
struct mlx5e_priv *priv = vpriv;
int vport;
- mlx5e_destroy_netdev(mdev, priv);
-
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
- mlx5e_destroy_mdev_resources(mdev);
+ mlx5e_detach(mdev, vpriv);
+ mlx5e_destroy_netdev(mdev, priv);
}
static void *mlx5e_get_netdev(void *vpriv)
@@ -3588,6 +4066,8 @@ static void *mlx5e_get_netdev(void *vpriv)
static struct mlx5_interface mlx5e_interface = {
.add = mlx5e_add,
.remove = mlx5e_remove,
+ .attach = mlx5e_attach,
+ .detach = mlx5e_detach,
.event = mlx5e_async_event,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
.get_dev = mlx5e_get_netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 134de4a11f1d..3c97da103d30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -413,19 +413,50 @@ static struct mlx5e_profile mlx5e_rep_profile = {
int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
- rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
- if (!rep->priv_data) {
- pr_warn("Failed to create representor for vport %d\n",
+ struct net_device *netdev;
+ int err;
+
+ netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
+ if (!netdev) {
+ pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
return -EINVAL;
}
+
+ rep->priv_data = netdev_priv(netdev);
+
+ err = mlx5e_attach_netdev(esw->dev, netdev);
+ if (err) {
+ pr_warn("Failed to attach representor netdev for vport %d\n",
+ rep->vport);
+ goto err_destroy_netdev;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ pr_warn("Failed to register representor netdev for vport %d\n",
+ rep->vport);
+ goto err_detach_netdev;
+ }
+
return 0;
+
+err_detach_netdev:
+ mlx5e_detach_netdev(esw->dev, netdev);
+
+err_destroy_netdev:
+ mlx5e_destroy_netdev(esw->dev, rep->priv_data);
+
+ return err;
+
}
void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
struct mlx5e_priv *priv = rep->priv_data;
+ struct net_device *netdev = priv->netdev;
+ mlx5e_detach_netdev(esw->dev, netdev);
mlx5e_destroy_netdev(esw->dev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e7c969df3dad..c6de6fba5843 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -36,6 +36,7 @@
#include <net/busy_poll.h>
#include "en.h"
#include "en_tc.h"
+#include "eswitch.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
{
@@ -179,96 +180,111 @@ unlock:
mutex_unlock(&priv->state_lock);
}
-int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
-{
- struct sk_buff *skb;
- dma_addr_t dma_addr;
+#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
- skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz);
- if (unlikely(!skb))
- return -ENOMEM;
+static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ struct mlx5e_page_cache *cache = &rq->page_cache;
+ u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
- dma_addr = dma_map_single(rq->pdev,
- /* hw start padding */
- skb->data,
- /* hw end padding */
- rq->wqe_sz,
- DMA_FROM_DEVICE);
+ if (tail_next == cache->head) {
+ rq->stats.cache_full++;
+ return false;
+ }
- if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
- goto err_free_skb;
+ cache->page_cache[cache->tail] = *dma_info;
+ cache->tail = tail_next;
+ return true;
+}
- *((dma_addr_t *)skb->cb) = dma_addr;
- wqe->data.addr = cpu_to_be64(dma_addr);
- wqe->data.lkey = rq->mkey_be;
+static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ struct mlx5e_page_cache *cache = &rq->page_cache;
- rq->skb[ix] = skb;
+ if (unlikely(cache->head == cache->tail)) {
+ rq->stats.cache_empty++;
+ return false;
+ }
- return 0;
+ if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
+ rq->stats.cache_busy++;
+ return false;
+ }
-err_free_skb:
- dev_kfree_skb(skb);
+ *dma_info = cache->page_cache[cache->head];
+ cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
+ rq->stats.cache_reuse++;
- return -ENOMEM;
+ dma_sync_single_for_device(rq->pdev, dma_info->addr,
+ RQ_PAGE_SIZE(rq),
+ DMA_FROM_DEVICE);
+ return true;
}
-void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
{
- struct sk_buff *skb = rq->skb[ix];
+ struct page *page;
+
+ if (mlx5e_rx_cache_get(rq, dma_info))
+ return 0;
+
+ page = dev_alloc_pages(rq->buff.page_order);
+ if (unlikely(!page))
+ return -ENOMEM;
- if (skb) {
- rq->skb[ix] = NULL;
- dma_unmap_single(rq->pdev,
- *((dma_addr_t *)skb->cb),
- rq->wqe_sz,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
+ dma_info->page = page;
+ dma_info->addr = dma_map_page(rq->pdev, page, 0,
+ RQ_PAGE_SIZE(rq), rq->buff.map_dir);
+ if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
+ put_page(page);
+ return -ENOMEM;
}
+
+ return 0;
}
-static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
+void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
+ bool recycle)
{
- return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
+ if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
+ return;
+
+ dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
+ rq->buff.map_dir);
+ put_page(dma_info->page);
}
-static inline void
-mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev,
- struct mlx5e_mpw_info *wi,
- u32 wqe_offset, u32 len)
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
- dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset,
- len, DMA_FROM_DEVICE);
+ struct mlx5e_dma_info *di = &rq->dma_info[ix];
+
+ if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
+ return -ENOMEM;
+
+ wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM);
+ return 0;
}
-static inline void
-mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev,
- struct mlx5e_mpw_info *wi,
- u32 wqe_offset, u32 len)
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
- /* No dma pre sync for fragmented MPWQE */
+ struct mlx5e_dma_info *di = &rq->dma_info[ix];
+
+ mlx5e_page_release(rq, di, true);
}
-static inline void
-mlx5e_add_skb_frag_linear_mpwqe(struct mlx5e_rq *rq,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 frag_offset,
- u32 len)
+static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
{
- unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
-
- wi->skbs_frags[page_idx]++;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- &wi->dma_info.page[page_idx], frag_offset,
- len, truesize);
+ return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
}
-static inline void
-mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 frag_offset,
- u32 len)
+static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset,
+ u32 len)
{
unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
@@ -282,24 +298,11 @@ mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
}
static inline void
-mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 offset,
- u32 headlen)
-{
- struct page *page = &wi->dma_info.page[page_idx];
-
- skb_copy_to_linear_data(skb, page_address(page) + offset,
- ALIGN(headlen, sizeof(long)));
-}
-
-static inline void
-mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 offset,
- u32 headlen)
+mlx5e_copy_skb_header_mpwqe(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen)
{
u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
@@ -324,46 +327,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
}
}
-static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
-{
- return rq->mpwqe_mtt_offset +
- wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
-}
-
-static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
- struct mlx5e_sq *sq,
- struct mlx5e_umr_wqe *wqe,
- u16 ix)
-{
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
- struct mlx5_wqe_data_seg *dseg = &wqe->data;
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
- u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
- u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
-
- memset(wqe, 0, sizeof(*wqe));
- cseg->opmod_idx_opcode =
- cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
- MLX5_OPCODE_UMR);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
- ds_cnt);
- cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- cseg->imm = rq->umr_mkey_be;
-
- ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
- ucseg->klm_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
- ucseg->bsf_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
- ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
-
- dseg->lkey = sq->mkey_be;
- dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
-}
-
-static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
+static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
{
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
struct mlx5e_sq *sq = &rq->channel->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *wqe;
@@ -372,135 +338,74 @@ static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
/* fill sq edge with nops to avoid wqe wrap around */
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
- sq->ico_wqe_info[pi].num_wqebbs = 1;
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ sq->db.ico_wqe[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true);
}
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- mlx5e_build_umr_wqe(rq, sq, wqe, ix);
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR;
- sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs;
+ memcpy(wqe, &wi->umr.wqe, sizeof(*wqe));
+ wqe->ctrl.opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR);
+
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
+ sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
sq->pc += num_wqebbs;
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
}
-static inline int mlx5e_get_wqe_mtt_sz(void)
-{
- /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
- * To avoid copying garbage after the mtt array, we allocate
- * a little more.
- */
- return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
- MLX5_UMR_MTT_ALIGNMENT);
-}
-
-static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi,
- int i)
-{
- struct page *page;
-
- page = dev_alloc_page();
- if (unlikely(!page))
- return -ENOMEM;
-
- wi->umr.dma_info[i].page = page;
- wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) {
- put_page(page);
- return -ENOMEM;
- }
- wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR);
-
- return 0;
-}
-
-static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_rx_wqe *wqe,
- u16 ix)
+static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe,
+ u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
- int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
+ int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
+ int err;
int i;
- wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
- MLX5_MPWRQ_PAGES_PER_WQE,
- GFP_ATOMIC);
- if (unlikely(!wi->umr.dma_info))
- goto err_out;
-
- /* We allocate more than mtt_sz as we will align the pointer */
- wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1,
- GFP_ATOMIC);
- if (unlikely(!wi->umr.mtt_no_align))
- goto err_free_umr;
-
- wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN);
- wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz,
- PCI_DMA_TODEVICE);
- if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr)))
- goto err_free_mtt;
-
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
+
+ err = mlx5e_page_alloc_mapped(rq, dma_info);
+ if (unlikely(err))
goto err_unmap;
- page_ref_add(wi->umr.dma_info[i].page,
- mlx5e_mpwqe_strides_per_page(rq));
+ wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
+ page_ref_add(dma_info->page, pg_strides);
wi->skbs_frags[i] = 0;
}
wi->consumed_strides = 0;
- wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe;
- wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe;
- wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe;
- wi->free_wqe = mlx5e_free_rx_fragmented_mpwqe;
- wqe->data.lkey = rq->umr_mkey_be;
wqe->data.addr = cpu_to_be64(dma_offset);
return 0;
err_unmap:
while (--i >= 0) {
- dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- page_ref_sub(wi->umr.dma_info[i].page,
- mlx5e_mpwqe_strides_per_page(rq));
- put_page(wi->umr.dma_info[i].page);
- }
- dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
-
-err_free_mtt:
- kfree(wi->umr.mtt_no_align);
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
-err_free_umr:
- kfree(wi->umr.dma_info);
+ page_ref_sub(dma_info->page, pg_strides);
+ mlx5e_page_release(rq, dma_info, true);
+ }
-err_out:
- return -ENOMEM;
+ return err;
}
-void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi)
+void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
{
- int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
int i;
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- page_ref_sub(wi->umr.dma_info[i].page,
- mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
- put_page(wi->umr.dma_info[i].page);
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
+
+ page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
+ mlx5e_page_release(rq, dma_info, true);
}
- dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
- kfree(wi->umr.mtt_no_align);
- kfree(wi->umr.dma_info);
}
-void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
+void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
@@ -508,12 +413,11 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
- mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+ mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
return;
}
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
- rq->stats.mpwqe_frag++;
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
@@ -521,84 +425,23 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
mlx5_wq_ll_update_db_record(wq);
}
-static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_rx_wqe *wqe,
- u16 ix)
-{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
- gfp_t gfp_mask;
- int i;
-
- gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC;
- wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
- MLX5_MPWRQ_WQE_PAGE_ORDER);
- if (unlikely(!wi->dma_info.page))
- return -ENOMEM;
-
- wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0,
- rq->wqe_sz, PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) {
- put_page(wi->dma_info.page);
- return -ENOMEM;
- }
-
- /* We split the high-order page into order-0 ones and manage their
- * reference counter to minimize the memory held by small skb fragments
- */
- split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- page_ref_add(&wi->dma_info.page[i],
- mlx5e_mpwqe_strides_per_page(rq));
- wi->skbs_frags[i] = 0;
- }
-
- wi->consumed_strides = 0;
- wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe;
- wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe;
- wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe;
- wi->free_wqe = mlx5e_free_rx_linear_mpwqe;
- wqe->data.lkey = rq->mkey_be;
- wqe->data.addr = cpu_to_be64(wi->dma_info.addr);
-
- return 0;
-}
-
-void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi)
-{
- int i;
-
- dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
- PCI_DMA_FROMDEVICE);
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- page_ref_sub(&wi->dma_info.page[i],
- mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
- put_page(&wi->dma_info.page[i]);
- }
-}
-
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
int err;
- err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix);
- if (unlikely(err)) {
- err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix);
- if (unlikely(err))
- return err;
- set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
- mlx5e_post_umr_wqe(rq, ix);
- return -EBUSY;
- }
-
- return 0;
+ err = mlx5e_alloc_rx_umr_mpwqe(rq, wqe, ix);
+ if (unlikely(err))
+ return err;
+ set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+ mlx5e_post_umr_wqe(rq, ix);
+ return -EBUSY;
}
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
- wi->free_wqe(rq, wi);
+ mlx5e_free_rx_mpwqe(rq, wi);
}
#define RQ_CANNOT_POST(rq) \
@@ -617,9 +460,10 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
int err;
err = rq->alloc_wqe(rq, wqe, wq->head);
+ if (err == -EBUSY)
+ return true;
if (unlikely(err)) {
- if (err != -EBUSY)
- rq->stats.buff_alloc_err++;
+ rq->stats.buff_alloc_err++;
break;
}
@@ -786,40 +630,207 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
rq->stats.packets++;
rq->stats.bytes += cqe_bcnt;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
- napi_gro_receive(rq->cq.napi, skb);
+}
+
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+}
+
+static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *di,
+ unsigned int data_offset,
+ int len)
+{
+ struct mlx5e_sq *sq = &rq->channel->xdp_sq;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5e_sq_wqe_info *wi = &sq->db.xdp.wqe_info[pi];
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE;
+ unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE;
+ void *data = page_address(di->page) + data_offset;
+
+ if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
+ if (sq->db.xdp.doorbell) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ sq->db.xdp.doorbell = false;
+ }
+ rq->stats.xdp_tx_full++;
+ mlx5e_page_release(rq, di, true);
+ return;
+ }
+
+ dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
+ PCI_DMA_TODEVICE);
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* copy the inline part */
+ memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE);
+ eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+
+ dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
+
+ /* write the dma part */
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->byte_count = cpu_to_be32(dma_len);
+ dseg->lkey = sq->mkey_be;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | MLX5E_XDP_TX_DS_COUNT);
+
+ sq->db.xdp.di[pi] = *di;
+ wi->opcode = MLX5_OPCODE_SEND;
+ wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS;
+ sq->pc += MLX5E_XDP_TX_WQEBBS;
+
+ sq->db.xdp.doorbell = true;
+ rq->stats.xdp_tx++;
+}
+
+/* returns true if packet was consumed by xdp */
+static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ const struct bpf_prog *prog,
+ struct mlx5e_dma_info *di,
+ void *data, u16 len)
+{
+ struct xdp_buff xdp;
+ u32 act;
+
+ if (!prog)
+ return false;
+
+ xdp.data = data;
+ xdp.data_end = xdp.data + len;
+ act = bpf_prog_run_xdp(prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len);
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ rq->stats.xdp_drop++;
+ mlx5e_page_release(rq, di, true);
+ return true;
+ }
+}
+
+static inline
+struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ u16 wqe_counter, u32 cqe_bcnt)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog);
+ struct mlx5e_dma_info *di;
+ struct sk_buff *skb;
+ void *va, *data;
+
+ di = &rq->dma_info[wqe_counter];
+ va = page_address(di->page);
+ data = va + MLX5_RX_HEADROOM;
+
+ dma_sync_single_range_for_cpu(rq->pdev,
+ di->addr,
+ MLX5_RX_HEADROOM,
+ rq->buff.wqe_sz,
+ DMA_FROM_DEVICE);
+ prefetch(data);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ mlx5e_page_release(rq, di, true);
+ return NULL;
+ }
+
+ if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt))
+ return NULL; /* page/packet was consumed by XDP */
+
+ skb = build_skb(va, RQ_PAGE_SIZE(rq));
+ if (unlikely(!skb)) {
+ rq->stats.buff_alloc_err++;
+ mlx5e_page_release(rq, di, true);
+ return NULL;
+ }
+
+ /* queue up for recycling ..*/
+ page_ref_inc(di->page);
+ mlx5e_page_release(rq, di, true);
+
+ skb_reserve(skb, MLX5_RX_HEADROOM);
+ skb_put(skb, cqe_bcnt);
+
+ return skb;
}
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5e_rx_wqe *wqe;
- struct sk_buff *skb;
__be16 wqe_counter_be;
+ struct sk_buff *skb;
u16 wqe_counter;
u32 cqe_bcnt;
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
- skb = rq->skb[wqe_counter];
- prefetch(skb->data);
- rq->skb[wqe_counter] = NULL;
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- dma_unmap_single(rq->pdev,
- *((dma_addr_t *)skb->cb),
- rq->wqe_sz,
- DMA_FROM_DEVICE);
-
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
- rq->stats.wqe_err++;
- dev_kfree_skb(skb);
+ skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ if (!skb)
goto wq_ll_pop;
- }
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb_put(skb, cqe_bcnt);
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+}
+
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct net_device *netdev = rq->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rx_wqe *wqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+ u32 cqe_bcnt;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+
+ skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ if (!skb)
+ goto wq_ll_pop;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (rep->vlan && skb_vlan_tag_present(skb))
+ skb_vlan_pop(skb);
+
+ napi_gro_receive(rq->cq.napi, skb);
+
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
@@ -831,7 +842,6 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
- u32 consumed_bytes = ALIGN(cqe_bcnt, rq->mpwqe_stride_sz);
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
u32 wqe_offset = stride_ix * rq->mpwqe_stride_sz;
u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
@@ -845,21 +855,20 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
page_idx++;
frag_offset -= PAGE_SIZE;
}
- wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes);
while (byte_cnt) {
u32 pg_consumed_bytes =
min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
- wi->add_skb_frag(rq, skb, wi, page_idx, frag_offset,
- pg_consumed_bytes);
+ mlx5e_add_skb_frag_mpwqe(rq, skb, wi, page_idx, frag_offset,
+ pg_consumed_bytes);
byte_cnt -= pg_consumed_bytes;
frag_offset = 0;
page_idx++;
}
/* copy header */
- wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset,
- headlen);
+ mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, wi, head_page_idx,
+ head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -869,7 +878,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
- struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
struct sk_buff *skb;
u16 cqe_bcnt;
@@ -899,18 +908,20 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
return;
- wi->free_wqe(rq, wi);
+ mlx5e_free_rx_mpwqe(rq, wi);
mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+ struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
int work_done = 0;
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
@@ -937,6 +948,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
}
+ if (xdp_sq->db.xdp.doorbell) {
+ mlx5e_xmit_xdp_doorbell(xdp_sq);
+ xdp_sq->db.xdp.doorbell = false;
+ }
+
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 499487ce3b53..57452fdc5154 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -65,6 +65,9 @@ struct mlx5e_sw_stats {
u64 rx_csum_none;
u64 rx_csum_complete;
u64 rx_csum_unnecessary_inner;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_full;
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
@@ -73,10 +76,13 @@ struct mlx5e_sw_stats {
u64 tx_xmit_more;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
- u64 rx_mpwqe_frag;
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
+ u64 rx_cache_reuse;
+ u64 rx_cache_full;
+ u64 rx_cache_empty;
+ u64 rx_cache_busy;
/* Special handling counters */
u64 link_down_events_phy;
@@ -97,6 +103,9 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
@@ -105,10 +114,13 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
@@ -272,12 +284,18 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 xdp_drop;
+ u64 xdp_tx;
+ u64 xdp_tx_full;
u64 wqe_err;
u64 mpwqe_filler;
- u64 mpwqe_frag;
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
+ u64 cache_reuse;
+ u64 cache_full;
+ u64 cache_empty;
+ u64 cache_busy;
};
static const struct counter_desc rq_stats_desc[] = {
@@ -286,14 +304,20 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
};
struct mlx5e_sq_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 22cfc4ac1837..ce8c54d18906 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -39,6 +39,7 @@
#include <linux/rhashtable.h>
#include <net/switchdev.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -47,6 +48,7 @@ struct mlx5e_tc_flow {
struct rhash_head node;
u64 cookie;
struct mlx5_flow_rule *rule;
+ struct mlx5_esw_flow_attr *attr;
};
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
@@ -114,27 +116,30 @@ err_create_ft:
static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- u32 action, u32 dst_vport)
+ struct mlx5_esw_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
- u32 src_vport;
+ int err;
- if (rep->vport) /* set source vport for the flow */
- src_vport = rep->vport;
- else
- src_vport = FDB_UPLINK_VPORT;
+ err = mlx5_eswitch_add_vlan_action(esw, attr);
+ if (err)
+ return ERR_PTR(err);
- return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport);
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_rule *rule)
+ struct mlx5_flow_rule *rule,
+ struct mlx5_esw_flow_attr *attr)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_fc *counter = NULL;
counter = mlx5_flow_rule_counter(rule);
+ if (esw && esw->mode == SRIOV_OFFLOADS)
+ mlx5_eswitch_del_vlan_action(esw, attr);
+
mlx5_del_flow_rule(rule);
mlx5_fc_destroy(priv->mdev, counter);
@@ -159,6 +164,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
@@ -222,6 +228,24 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
key->src);
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+ if (mask->vlan_id) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
+ }
+ }
+
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector,
@@ -361,7 +385,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
- u32 *action, u32 *dest_vport)
+ struct mlx5_esw_flow_attr *attr)
{
const struct tc_action *a;
LIST_HEAD(actions);
@@ -369,17 +393,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (tc_no_actions(exts))
return -EINVAL;
- *action = 0;
+ memset(attr, 0, sizeof(*attr));
+ attr->in_rep = priv->ppriv;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
- /* Only support a single action per rule */
- if (*action)
- return -EINVAL;
-
if (is_tcf_gact_shot(a)) {
- *action = MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
@@ -387,7 +408,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlx5e_priv *out_priv;
- struct mlx5_eswitch_rep *out_rep;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -397,13 +417,22 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
out_priv = netdev_priv(out_dev);
- out_rep = out_priv->ppriv;
- if (out_rep->vport == 0)
- *dest_vport = FDB_UPLINK_VPORT;
- else
- *dest_vport = out_rep->vport;
- *action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->out_rep = out_priv->ppriv;
+ continue;
+ }
+
+ if (is_tcf_vlan(a)) {
+ if (tcf_vlan_action(a) == VLAN_F_POP) {
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
+ if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ attr->vlan = tcf_vlan_push_vid(a);
+ }
continue;
}
@@ -417,18 +446,29 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
int err = 0;
- u32 flow_tag, action, dest_vport = 0;
+ bool fdb_flow = false;
+ u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL;
+ struct mlx5_esw_flow_attr *old_attr = NULL;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ if (esw && esw->mode == SRIOV_OFFLOADS)
+ fdb_flow = true;
+
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params);
- if (flow)
+ if (flow) {
old = flow->rule;
- else
- flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ old_attr = flow->attr;
+ } else {
+ if (fdb_flow)
+ flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
+ GFP_KERNEL);
+ else
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ }
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec || !flow) {
@@ -442,11 +482,12 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (err < 0)
goto err_free;
- if (esw && esw->mode == SRIOV_OFFLOADS) {
- err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport);
+ if (fdb_flow) {
+ flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
+ err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
if (err < 0)
goto err_free;
- flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport);
+ flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
} else {
err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
if (err < 0)
@@ -465,7 +506,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto err_del_rule;
if (old)
- mlx5e_tc_del_flow(priv, old);
+ mlx5e_tc_del_flow(priv, old, old_attr);
goto out;
@@ -493,7 +534,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
- mlx5e_tc_del_flow(priv, flow->rule);
+ mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
kfree(flow);
@@ -550,7 +591,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
struct mlx5e_tc_flow *flow = ptr;
struct mlx5e_priv *priv = arg;
- mlx5e_tc_del_flow(priv, flow->rule);
+ mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
kfree(flow);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index eb0e72537f10..70a717382357 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -52,7 +52,6 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
- sq->skb[pi] = NULL;
sq->pc++;
sq->stats.nop++;
@@ -82,15 +81,17 @@ static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
u32 size,
enum mlx5e_dma_map_type map_type)
{
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
+ u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
+
+ sq->db.txq.dma_fifo[i].addr = addr;
+ sq->db.txq.dma_fifo[i].size = size;
+ sq->db.txq.dma_fifo[i].type = map_type;
sq->dma_fifo_pc++;
}
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
{
- return &sq->dma_fifo[i & sq->dma_fifo_mask];
+ return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask];
}
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
@@ -221,7 +222,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi];
+ struct mlx5e_tx_wqe_info *wi = &sq->db.txq.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
@@ -341,7 +342,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->skb[pi] = skb;
+ sq->db.txq.skb[pi] = skb;
wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
sq->pc += wi->num_wqebbs;
@@ -368,8 +369,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
/* fill sq edge with nops to avoid wqe wrap around */
- while ((sq->pc & wq->sz_m1) > sq->edge)
+ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+ sq->db.txq.skb[pi] = NULL;
mlx5e_send_nop(sq, false);
+ }
if (bf)
sq->bf_budget--;
@@ -442,8 +445,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1;
- skb = sq->skb[ci];
- wi = &sq->wqe_info[ci];
+ skb = sq->db.txq.skb[ci];
+ wi = &sq->db.txq.wqe_info[ci];
if (unlikely(!skb)) { /* nop */
sqcc++;
@@ -492,7 +495,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
@@ -501,8 +504,8 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
- skb = sq->skb[ci];
- wi = &sq->wqe_info[ci];
+ skb = sq->db.txq.skb[ci];
+ wi = &sq->db.txq.wqe_info[ci];
if (!skb) { /* nop */
sq->cc++;
@@ -520,3 +523,37 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
sq->cc += wi->num_wqebbs;
}
}
+
+static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ di = &sq->db.xdp.di[ci];
+ wi = &sq->db.xdp.wqe_info[ci];
+
+ if (wi->opcode == MLX5_OPCODE_NOP) {
+ sq->cc++;
+ continue;
+ }
+
+ sq->cc += wi->num_wqebbs;
+
+ mlx5e_page_release(&sq->channel->rq, di, false);
+ }
+}
+
+void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ mlx5e_free_txq_sq_descs(sq);
+ break;
+ case MLX5E_SQ_XDP:
+ mlx5e_free_xdp_sq_descs(sq);
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 9bf33bb69210..5703f19a6a24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -72,7 +72,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
do {
u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
- struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci];
+ struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
mlx5_cqwq_pop(&cq->wq);
sqcc += icowi->num_wqebbs;
@@ -87,7 +87,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
case MLX5_OPCODE_NOP:
break;
case MLX5_OPCODE_UMR:
- mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq);
+ mlx5e_post_rx_mpwqe(&sq->channel->rq);
break;
default:
WARN_ONCE(true,
@@ -105,6 +105,66 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sq->cc = sqcc;
}
+static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_sq *sq;
+ u16 sqcc;
+ int i;
+
+ sq = container_of(cq, struct mlx5e_sq, cq);
+
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ return false;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+ struct mlx5_cqe64 *cqe;
+ u16 wqe_counter;
+ bool last_wqe;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ last_wqe = (sqcc == wqe_counter);
+
+ ci = sqcc & sq->wq.sz_m1;
+ di = &sq->db.xdp.di[ci];
+ wi = &sq->db.xdp.wqe_info[ci];
+
+ if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
+ sqcc++;
+ continue;
+ }
+
+ sqcc += wi->num_wqebbs;
+ /* Recycle RX page */
+ mlx5e_page_release(&sq->channel->rq, di, true);
+ } while (!last_wqe);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -121,6 +181,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
+ if (c->xdp)
+ busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
+
mlx5e_poll_ico_cq(&c->icosq.cq);
busy |= mlx5e_post_rx_wqes(&c->rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 0e30602ef76d..aaca09002ca6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -86,23 +86,12 @@ struct cre_des_eq {
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
- struct mlx5_destroy_eq_mbox_in in;
- struct mlx5_destroy_eq_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ);
- in.eqn = eqn;
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (!err)
- goto ex;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
-ex:
- return err;
+ MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
+ MLX5_SET(destroy_eq_in, in, eq_number, eqn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
@@ -351,11 +340,13 @@ static void init_eq_buf(struct mlx5_eq *eq)
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
{
+ u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
- struct mlx5_create_eq_mbox_in *in;
- struct mlx5_create_eq_mbox_out out;
- int err;
+ __be64 *pas;
+ void *eqc;
int inlen;
+ u32 *in;
+ int err;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
@@ -365,35 +356,36 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
init_eq_buf(eq);
- inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
+ inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
+ MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
+
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto err_buf;
}
- memset(&out, 0, sizeof(out));
- mlx5_fill_page_array(&eq->buf, in->pas);
+ pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
+ mlx5_fill_page_array(&eq->buf, pas);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
- in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
- in->ctx.intr = vecidx;
- in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- in->events_mask = cpu_to_be64(mask);
+ MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
+ MLX5_SET64(create_eq_in, in, event_bitmask, mask);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- if (err)
- goto err_in;
+ eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
+ MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
+ MLX5_SET(eqc, eqc, uar_page, uar->index);
+ MLX5_SET(eqc, eqc, intr, vecidx);
+ MLX5_SET(eqc, eqc, log_page_size,
+ eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- if (out.hdr.status) {
- err = mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ if (err)
goto err_in;
- }
snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
- eq->eqn = out.eq_number;
+ eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = priv->msix_arr[vecidx].vector;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
@@ -547,22 +539,12 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
}
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- struct mlx5_query_eq_mbox_out *out, int outlen)
+ u32 *out, int outlen)
{
- struct mlx5_query_eq_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, outlen);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
- in.eqn = eq->eqn;
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
- if (err)
- return err;
+ u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
- if (out->hdr.status)
- err = mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
+ MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index b247949df135..abbf2c369923 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -81,19 +81,12 @@ enum {
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
-void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
-
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
- int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)];
- int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+ int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
+ int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
void *nic_vport_ctx;
- int err;
-
- memset(out, 0, sizeof(out));
- memset(in, 0, sizeof(in));
MLX5_SET(modify_nic_vport_context_in, in,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
@@ -116,113 +109,44 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- goto ex;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto ex;
- return 0;
-ex:
- return err;
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
/* E-Switch vport context HW commands */
-static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
- u32 *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)];
-
- memset(in, 0, sizeof(in));
-
- MLX5_SET(query_nic_vport_context_in, in, opcode,
- MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
-
- MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
-
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
-}
-
-static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
- u16 *vlan, u8 *qos)
-{
- u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)];
- int err;
- bool cvlan_strip;
- bool cvlan_insert;
-
- memset(out, 0, sizeof(out));
-
- *vlan = 0;
- *qos = 0;
-
- if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
- !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
- return -ENOTSUPP;
-
- err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out));
- if (err)
- goto out;
-
- cvlan_strip = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.vport_cvlan_strip);
-
- cvlan_insert = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.vport_cvlan_insert);
-
- if (cvlan_strip || cvlan_insert) {
- *vlan = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.cvlan_id);
- *qos = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.cvlan_pcp);
- }
-
- esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n",
- vport, *vlan, *qos);
-out:
- return err;
-}
-
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
void *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
-
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
+ MLX5_SET(modify_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
-
- MLX5_SET(modify_esw_vport_context_in, in, opcode,
- MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
-
- return mlx5_cmd_exec_check_status(dev, in, inlen,
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
- u16 vlan, u8 qos, bool set)
+ u16 vlan, u8 qos, u8 set_flags)
{
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
return -ENOTSUPP;
- esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
- vport, vlan, qos, set);
+ esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
+ vport, vlan, qos, set_flags);
- if (set) {
+ if (set_flags & SET_VLAN_STRIP)
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_strip, 1);
+
+ if (set_flags & SET_VLAN_INSERT) {
/* insert only if no vlan in packet */
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_insert, 1);
+
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.cvlan_pcp, qos);
MLX5_SET(modify_esw_vport_context_in, in,
@@ -241,13 +165,10 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
u8 *mac, u8 vlan_valid, u16 vlan)
{
- u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)];
- u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)];
+ u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
u8 *in_mac_addr;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(set_l2_table_entry_in, in, opcode,
MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
MLX5_SET(set_l2_table_entry_in, in, table_index, index);
@@ -257,23 +178,18 @@ static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
ether_addr_copy(&in_mac_addr[2], mac);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
{
- u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)];
- u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
MLX5_SET(delete_l2_table_entry_in, in, opcode,
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
@@ -340,7 +256,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
- pr_warn("FDB: Failed to alloc match parameters\n");
+ esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
return NULL;
}
dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -374,8 +290,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
if (IS_ERR(flow_rule)) {
- pr_warn(
- "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
+ esw_warn(esw->dev,
+ "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
flow_rule = NULL;
}
@@ -955,7 +871,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
vport_num, promisc_all, promisc_mc);
- if (!vport->trusted || !vport->enabled) {
+ if (!vport->info.trusted || !vport->enabled) {
promisc_uc = 0;
promisc_mc = 0;
promisc_all = 0;
@@ -1291,30 +1207,20 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_spec *spec;
- u8 smac[ETH_ALEN];
int err = 0;
u8 *smac_v;
- if (vport->spoofchk) {
- err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
- if (err) {
- esw_warn(esw->dev,
- "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
- vport->vport, err);
- return err;
- }
+ if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
+ mlx5_core_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
+ vport->vport);
+ return -EPERM;
- if (!is_valid_ether_addr(smac)) {
- mlx5_core_warn(esw->dev,
- "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
- vport->vport);
- return -EPERM;
- }
}
esw_vport_cleanup_ingress_rules(esw, vport);
- if (!vport->vlan && !vport->qos && !vport->spoofchk) {
+ if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
esw_vport_disable_ingress_acl(esw, vport);
return 0;
}
@@ -1323,7 +1229,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
- vport->vport, vport->vlan, vport->qos);
+ vport->vport, vport->info.vlan, vport->info.qos);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
@@ -1333,16 +1239,16 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto out;
}
- if (vport->vlan || vport->qos)
+ if (vport->info.vlan || vport->info.qos)
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
- if (vport->spoofchk) {
+ if (vport->info.spoofchk) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
smac_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
outer_headers.smac_47_16);
- ether_addr_copy(smac_v, smac);
+ ether_addr_copy(smac_v, vport->info.mac);
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -1352,8 +1258,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
- pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress allow rule, err(%d)\n",
+ vport->vport, err);
vport->ingress.allow_rule = NULL;
goto out;
}
@@ -1365,8 +1272,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
- pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress drop rule, err(%d)\n",
+ vport->vport, err);
vport->ingress.drop_rule = NULL;
goto out;
}
@@ -1386,7 +1294,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
esw_vport_cleanup_egress_rules(esw, vport);
- if (!vport->vlan && !vport->qos) {
+ if (!vport->info.vlan && !vport->info.qos) {
esw_vport_disable_egress_acl(esw, vport);
return 0;
}
@@ -1395,7 +1303,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
- vport->vport, vport->vlan, vport->qos);
+ vport->vport, vport->info.vlan, vport->info.qos);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
@@ -1409,7 +1317,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->egress.allowed_vlan =
@@ -1418,8 +1326,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
- pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
+ vport->vport, err);
vport->egress.allowed_vlan = NULL;
goto out;
}
@@ -1432,8 +1341,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
0, NULL);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
- pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress drop rule failed, err(%d)\n",
+ vport->vport, err);
vport->egress.drop_rule = NULL;
}
out:
@@ -1441,6 +1351,41 @@ out:
return err;
}
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+ ((u8 *)node_guid)[7] = mac[0];
+ ((u8 *)node_guid)[6] = mac[1];
+ ((u8 *)node_guid)[5] = mac[2];
+ ((u8 *)node_guid)[4] = 0xff;
+ ((u8 *)node_guid)[3] = 0xfe;
+ ((u8 *)node_guid)[2] = mac[3];
+ ((u8 *)node_guid)[1] = mac[4];
+ ((u8 *)node_guid)[0] = mac[5];
+}
+
+static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int vport_num = vport->vport;
+
+ if (!vport_num)
+ return;
+
+ mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ vport_num,
+ vport->info.link_state);
+ mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
+ mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
+ modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
+ (vport->info.vlan || vport->info.qos));
+
+ /* Only legacy mode needs ACLs */
+ if (esw->mode == SRIOV_LEGACY) {
+ esw_vport_ingress_config(esw, vport);
+ esw_vport_egress_config(esw, vport);
+ }
+}
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
@@ -1451,23 +1396,17 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- /* Only VFs need ACLs for VST and spoofchk filtering */
- if (vport_num && esw->mode == SRIOV_LEGACY) {
- esw_vport_ingress_config(esw, vport);
- esw_vport_egress_config(esw, vport);
- }
-
- mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport_num,
- MLX5_ESW_VPORT_ADMIN_STATE_AUTO);
+ /* Restore old vport configuration */
+ esw_apply_vport_conf(esw, vport);
/* Sync with current vport context */
vport->enabled_events = enable_events;
vport->enabled = true;
/* only PF is trusted by default */
- vport->trusted = (vport_num) ? false : true;
+ if (!vport_num)
+ vport->info.trusted = true;
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1487,11 +1426,6 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
vport->enabled = false;
synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
-
- mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport_num,
- MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
/* Wait for current already scheduled events to complete */
flush_workqueue(esw->work_queue);
/* Disable events from this vport */
@@ -1503,7 +1437,12 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
+
if (vport_num && esw->mode == SRIOV_LEGACY) {
+ mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ vport_num,
+ MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
}
@@ -1590,6 +1529,25 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
}
+void mlx5_eswitch_attach(struct mlx5_eswitch *esw)
+{
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
+ MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
+ esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+ /* VF Vports will be enabled when SRIOV is enabled */
+}
+
+void mlx5_eswitch_detach(struct mlx5_eswitch *esw)
+{
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
+ MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
+ esw_disable_vport(esw, 0);
+}
+
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
@@ -1657,6 +1615,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
struct mlx5_vport *vport = &esw->vports[vport_num];
vport->vport = vport_num;
+ vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
@@ -1667,8 +1626,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->mode = SRIOV_NONE;
dev->priv.eswitch = esw;
- esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
- /* VF Vports will be enabled when SRIOV is enabled */
return 0;
abort:
if (esw->work_queue)
@@ -1687,7 +1644,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
return;
esw_info(esw->dev, "cleanup\n");
- esw_disable_vport(esw, 0);
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
@@ -1720,18 +1676,6 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
-static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
-{
- ((u8 *)node_guid)[7] = mac[0];
- ((u8 *)node_guid)[6] = mac[1];
- ((u8 *)node_guid)[5] = mac[2];
- ((u8 *)node_guid)[4] = 0xff;
- ((u8 *)node_guid)[3] = 0xfe;
- ((u8 *)node_guid)[2] = mac[3];
- ((u8 *)node_guid)[1] = mac[4];
- ((u8 *)node_guid)[0] = mac[5];
-}
-
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
@@ -1744,13 +1688,15 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
- if (evport->spoofchk && !is_valid_ether_addr(mac)) {
+ if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
mlx5_core_warn(esw->dev,
"MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
vport);
- return -EPERM;
+ err = -EPERM;
+ goto unlock;
}
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
@@ -1758,7 +1704,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
mlx5_core_warn(esw->dev,
"Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
vport, err);
- return err;
+ goto unlock;
}
node_guid_gen_from_mac(&node_guid, mac);
@@ -1768,9 +1714,12 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
"Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
vport, err);
- mutex_lock(&esw->state_lock);
+ ether_addr_copy(evport->info.mac, mac);
+ evport->info.node_guid = node_guid;
if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
+
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
@@ -1778,22 +1727,38 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int vport, int link_state)
{
+ struct mlx5_vport *evport;
+ int err = 0;
+
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- return mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport, link_state);
+ mutex_lock(&esw->state_lock);
+ evport = &esw->vports[vport];
+
+ err = mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ vport, link_state);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "Failed to set vport %d link state, err = %d",
+ vport, err);
+ goto unlock;
+ }
+
+ evport->info.link_state = link_state;
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return 0;
}
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi)
{
struct mlx5_vport *evport;
- u16 vlan;
- u8 qos;
if (!ESW_ALLOWED(esw))
return -EPERM;
@@ -1805,54 +1770,61 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vport - 1;
- mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac);
- ivi->linkstate = mlx5_query_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport);
- query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
- ivi->vlan = vlan;
- ivi->qos = qos;
- ivi->spoofchk = evport->spoofchk;
+ mutex_lock(&esw->state_lock);
+ ether_addr_copy(ivi->mac, evport->info.mac);
+ ivi->linkstate = evport->info.link_state;
+ ivi->vlan = evport->info.vlan;
+ ivi->qos = evport->info.qos;
+ ivi->spoofchk = evport->info.spoofchk;
+ ivi->trusted = evport->info.trusted;
+ mutex_unlock(&esw->state_lock);
return 0;
}
-int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
- int vport, u16 vlan, u8 qos)
+int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos, u8 set_flags)
{
struct mlx5_vport *evport;
int err = 0;
- int set = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
return -EINVAL;
- if (vlan || qos)
- set = 1;
-
+ mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
- err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err)
- return err;
+ goto unlock;
- mutex_lock(&esw->state_lock);
- evport->vlan = vlan;
- evport->qos = qos;
+ evport->info.vlan = vlan;
+ evport->info.qos = qos;
if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
- goto out;
+ goto unlock;
err = esw_vport_egress_config(esw, evport);
}
-out:
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
+int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos)
+{
+ u8 set_flags = 0;
+
+ if (vlan || qos)
+ set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
+
+ return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+}
+
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk)
{
@@ -1865,16 +1837,14 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- evport = &esw->vports[vport];
-
mutex_lock(&esw->state_lock);
- pschk = evport->spoofchk;
- evport->spoofchk = spoofchk;
- if (evport->enabled && esw->mode == SRIOV_LEGACY) {
+ evport = &esw->vports[vport];
+ pschk = evport->info.spoofchk;
+ evport->info.spoofchk = spoofchk;
+ if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
- if (err)
- evport->spoofchk = pschk;
- }
+ if (err)
+ evport->info.spoofchk = pschk;
mutex_unlock(&esw->state_lock);
return err;
@@ -1890,10 +1860,9 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- evport = &esw->vports[vport];
-
mutex_lock(&esw->state_lock);
- evport->trusted = setting;
+ evport = &esw->vports[vport];
+ evport->info.trusted = setting;
if (evport->enabled)
esw_vport_change_handle_locked(evport);
mutex_unlock(&esw->state_lock);
@@ -1906,7 +1875,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
int err = 0;
u32 *out;
@@ -1919,8 +1888,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
if (!out)
return -ENOMEM;
- memset(in, 0, sizeof(in));
-
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a96140971d77..2e2938e08cda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -109,6 +109,16 @@ struct vport_egress {
struct mlx5_flow_rule *drop_rule;
};
+struct mlx5_vport_info {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ u8 qos;
+ u64 node_guid;
+ int link_state;
+ bool spoofchk;
+ bool trusted;
+};
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
@@ -121,10 +131,8 @@ struct mlx5_vport {
struct vport_ingress ingress;
struct vport_egress egress;
- u16 vlan;
- u8 qos;
- bool spoofchk;
- bool trusted;
+ struct mlx5_vport_info info;
+
bool enabled;
u16 enabled_events;
};
@@ -149,6 +157,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_rule *miss_rule;
+ int vlan_push_pop_refcount;
} offloads;
};
};
@@ -170,11 +179,14 @@ struct mlx5_eswitch_rep {
void (*unload)(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep);
u16 vport;
- struct mlx5_flow_rule *vport_rx_rule;
+ u8 hw_id[ETH_ALEN];
void *priv_data;
+
+ struct mlx5_flow_rule *vport_rx_rule;
struct list_head vport_sqs_list;
+ u16 vlan;
+ u32 vlan_refcount;
bool valid;
- u8 hw_id[ETH_ALEN];
};
struct mlx5_esw_offload {
@@ -201,9 +213,14 @@ struct mlx5_eswitch {
int mode;
};
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
+void mlx5_eswitch_attach(struct mlx5_eswitch *esw);
+void mlx5_eswitch_detach(struct mlx5_eswitch *esw);
void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
@@ -224,14 +241,32 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats);
struct mlx5_flow_spec;
+struct mlx5_esw_flow_attr;
struct mlx5_flow_rule *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- u32 action, u32 src_vport, u32 dst_vport);
+ struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_rule *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
+enum {
+ SET_VLAN_STRIP = BIT(0),
+ SET_VLAN_INSERT = BIT(1)
+};
+
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
+
+struct mlx5_esw_flow_attr {
+ struct mlx5_eswitch_rep *in_rep;
+ struct mlx5_eswitch_rep *out_rep;
+
+ int action;
+ u16 vlan;
+ bool vlan_handled;
+};
+
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
u16 *sqns_array, int sqns_num);
@@ -241,9 +276,17 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ int vport_index,
struct mlx5_eswitch_rep *rep);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport);
+ int vport_index);
+
+int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr);
+int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr);
+int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos, u8 set_flags);
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 7de40e6b0c25..c55ad8d00c05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -46,19 +46,22 @@ enum {
struct mlx5_flow_rule *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- u32 action, u32 src_vport, u32 dst_vport)
+ struct mlx5_esw_flow_attr *attr)
{
struct mlx5_flow_destination dest = { 0 };
struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule;
void *misc;
+ int action;
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ action = attr->action;
+
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport_num = dst_vport;
+ dest.vport_num = attr->out_rep->vport;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true);
@@ -69,7 +72,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, src_vport);
+ MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
@@ -86,6 +89,186 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
}
+static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vf_vport, err = 0;
+
+ esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
+ for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
+ rep = &esw->offloads.vport_reps[vf_vport];
+ if (!rep->valid)
+ continue;
+
+ err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
+ if (err)
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static struct mlx5_eswitch_rep *
+esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
+{
+ struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
+
+ in_rep = attr->in_rep;
+ out_rep = attr->out_rep;
+
+ if (push)
+ vport = in_rep;
+ else if (pop)
+ vport = out_rep;
+ else
+ vport = in_rep;
+
+ return vport;
+}
+
+static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
+ bool push, bool pop, bool fwd)
+{
+ struct mlx5_eswitch_rep *in_rep, *out_rep;
+
+ if ((push || pop) && !fwd)
+ goto out_notsupp;
+
+ in_rep = attr->in_rep;
+ out_rep = attr->out_rep;
+
+ if (push && in_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ if (pop && out_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
+ if (!push && !pop && fwd)
+ if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ /* protects against (1) setting rules with different vlans to push and
+ * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
+ */
+ if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
+ goto out_notsupp;
+
+ return 0;
+
+out_notsupp:
+ return -ENOTSUPP;
+}
+
+int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_eswitch_rep *vport = NULL;
+ bool push, pop, fwd;
+ int err = 0;
+
+ push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
+ pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+
+ err = esw_add_vlan_action_check(attr, push, pop, fwd);
+ if (err)
+ return err;
+
+ attr->vlan_handled = false;
+
+ vport = esw_vlan_action_get_vport(attr, push, pop);
+
+ if (!push && !pop && fwd) {
+ /* tracks VF --> wire rules without vlan push action */
+ if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
+ vport->vlan_refcount++;
+ attr->vlan_handled = true;
+ }
+
+ return 0;
+ }
+
+ if (!push && !pop)
+ return 0;
+
+ if (!(offloads->vlan_push_pop_refcount)) {
+ /* it's the 1st vlan rule, apply global vlan pop policy */
+ err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ }
+ offloads->vlan_push_pop_refcount++;
+
+ if (push) {
+ if (vport->vlan_refcount)
+ goto skip_set_push;
+
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
+ SET_VLAN_INSERT | SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ vport->vlan = attr->vlan;
+skip_set_push:
+ vport->vlan_refcount++;
+ }
+out:
+ if (!err)
+ attr->vlan_handled = true;
+ return err;
+}
+
+int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_eswitch_rep *vport = NULL;
+ bool push, pop, fwd;
+ int err = 0;
+
+ if (!attr->vlan_handled)
+ return 0;
+
+ push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
+ pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+
+ vport = esw_vlan_action_get_vport(attr, push, pop);
+
+ if (!push && !pop && fwd) {
+ /* tracks VF --> wire rules without vlan push action */
+ if (attr->out_rep->vport == FDB_UPLINK_VPORT)
+ vport->vlan_refcount--;
+
+ return 0;
+ }
+
+ if (push) {
+ vport->vlan_refcount--;
+ if (vport->vlan_refcount)
+ goto skip_unset_push;
+
+ vport->vlan = 0;
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
+ 0, 0, SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ }
+
+skip_unset_push:
+ offloads->vlan_push_pop_refcount--;
+ if (offloads->vlan_push_pop_refcount)
+ return 0;
+
+ /* no more vlan rules, stop global vlan pop policy */
+ err = esw_set_global_vlan_pop(esw, 0);
+
+out:
+ return err;
+}
+
static struct mlx5_flow_rule *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
@@ -144,16 +327,12 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
{
struct mlx5_flow_rule *flow_rule;
struct mlx5_esw_sq *esw_sq;
- int vport;
int err;
int i;
if (esw->mode != SRIOV_OFFLOADS)
return 0;
- vport = rep->vport == 0 ?
- FDB_UPLINK_VPORT : rep->vport;
-
for (i = 0; i < sqns_num; i++) {
esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
if (!esw_sq) {
@@ -163,7 +342,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
/* Add re-inject rule to the PF/representor sqs */
flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
- vport,
+ rep->vport,
sqns_array[i]);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -620,27 +799,36 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
}
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
+ int vport_index,
+ struct mlx5_eswitch_rep *__rep)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
+ struct mlx5_eswitch_rep *rep;
+
+ rep = &offloads->vport_reps[vport_index];
+
+ memset(rep, 0, sizeof(*rep));
- memcpy(&offloads->vport_reps[rep->vport], rep,
- sizeof(struct mlx5_eswitch_rep));
+ rep->load = __rep->load;
+ rep->unload = __rep->unload;
+ rep->vport = __rep->vport;
+ rep->priv_data = __rep->priv_data;
+ ether_addr_copy(rep->hw_id, __rep->hw_id);
- INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list);
- offloads->vport_reps[rep->vport].valid = true;
+ INIT_LIST_HEAD(&rep->vport_sqs_list);
+ rep->valid = true;
}
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport)
+ int vport_index)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
- rep = &offloads->vport_reps[vport];
+ rep = &offloads->vport_reps[vport_index];
- if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled)
+ if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
rep->unload(esw, rep);
- offloads->vport_reps[vport].valid = false;
+ rep->valid = false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 287ade151ec8..113c32326333 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -41,10 +41,8 @@
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft)
{
- u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
- u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
@@ -55,30 +53,23 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
}
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
u16 vport,
+ enum fs_flow_table_op_mod op_mod,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id)
{
- u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
- u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
int err;
- memset(in, 0, sizeof(in));
-
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
- if (next_ft) {
- MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
- MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
- }
MLX5_SET(create_flow_table_in, in, table_type, type);
MLX5_SET(create_flow_table_in, in, level, level);
MLX5_SET(create_flow_table_in, in, log_size, log_size);
@@ -87,10 +78,23 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ switch (op_mod) {
+ case FS_FT_OP_MOD_NORMAL:
+ if (next_ft) {
+ MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
+ MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
+ }
+ break;
+
+ case FS_FT_OP_MOD_LAG_DEMUX:
+ MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
+ if (next_ft)
+ MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
+ next_ft->id);
+ break;
+ }
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*table_id = MLX5_GET(create_flow_table_out, out,
table_id);
@@ -100,11 +104,8 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft)
{
- u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
@@ -115,39 +116,49 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
}
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)];
- u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
MLX5_SET(modify_flow_table_in, in, opcode,
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
- if (ft->vport) {
- MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
- MLX5_SET(modify_flow_table_in, in, other_vport, 1);
- }
- MLX5_SET(modify_flow_table_in, in, modify_field_select,
- MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
- if (next_ft) {
- MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
- MLX5_SET(modify_flow_table_in, in, table_miss_id, next_ft->id);
+
+ if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
+ MLX5_SET(modify_flow_table_in, in, modify_field_select,
+ MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
+ if (next_ft) {
+ MLX5_SET(modify_flow_table_in, in,
+ lag_master_next_table_id, next_ft->id);
+ } else {
+ MLX5_SET(modify_flow_table_in, in,
+ lag_master_next_table_id, 0);
+ }
} else {
- MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
+ if (ft->vport) {
+ MLX5_SET(modify_flow_table_in, in, vport_number,
+ ft->vport);
+ MLX5_SET(modify_flow_table_in, in, other_vport, 1);
+ }
+ MLX5_SET(modify_flow_table_in, in, modify_field_select,
+ MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
+ if (next_ft) {
+ MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
+ MLX5_SET(modify_flow_table_in, in, table_miss_id,
+ next_ft->id);
+ } else {
+ MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
+ }
}
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
@@ -155,12 +166,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
u32 *in,
unsigned int *group_id)
{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
int err;
- memset(out, 0, sizeof(out));
-
MLX5_SET(create_flow_group_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
@@ -170,13 +179,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_group_in, in, other_vport, 1);
}
- err = mlx5_cmd_exec_check_status(dev, in,
- inlen, out,
- sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*group_id = MLX5_GET(create_flow_group_out, out,
group_id);
-
return err;
}
@@ -184,11 +190,8 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int group_id)
{
- u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
- u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
@@ -200,8 +203,7 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
}
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
@@ -212,7 +214,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
{
unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
- u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5_flow_rule *dst;
void *in_flow_context;
void *in_match_value;
@@ -290,11 +292,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
- sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
kvfree(in);
-
return err;
}
@@ -303,7 +302,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
unsigned group_id,
struct fs_fte *fte)
{
- return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
+ return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
}
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
@@ -327,12 +326,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int index)
{
- u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
- u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
- int err;
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, ft->type);
@@ -343,74 +338,55 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
MLX5_SET(delete_fte_in, in, other_vport, 1);
}
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
-
- return err;
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
{
- u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)];
- u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
- if (err)
- return err;
-
- *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
-
- return 0;
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+ return err;
}
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
MLX5_SET(dealloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
u64 *packets, u64 *bytes)
{
u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter)];
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
+ MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
void *stats;
int err = 0;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
-
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
*packets = MLX5_GET64(traffic_counter, stats, packets);
*bytes = MLX5_GET64(traffic_counter, stats, octets);
-
return 0;
}
@@ -448,18 +424,14 @@ void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
int
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
{
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- b->out, b->outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
}
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
@@ -480,3 +452,51 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
*packets = MLX5_GET64(traffic_counter, stats, packets);
*bytes = MLX5_GET64(traffic_counter, stats, octets);
}
+
+#define MAX_ENCAP_SIZE (128)
+
+int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
+ int header_type,
+ size_t size,
+ void *encap_header,
+ u32 *encap_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) +
+ (MAX_ENCAP_SIZE / sizeof(u32))];
+ void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in,
+ encap_header);
+ void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in,
+ encap_header);
+ int inlen = header - (void *)in + size;
+ int err;
+
+ if (size > MAX_ENCAP_SIZE)
+ return -EINVAL;
+
+ memset(in, 0, inlen);
+ MLX5_SET(alloc_encap_header_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
+ MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
+ MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
+ memcpy(header, encap_header, size);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+
+ *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
+ return err;
+}
+
+void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(dealloc_encap_header_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
+ MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
+
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 158844cef82b..c5bc4686c832 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -35,6 +35,7 @@
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
u16 vport,
+ enum fs_flow_table_op_mod op_mod,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id);
@@ -88,4 +89,11 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u16 id,
u64 *packets, u64 *bytes);
+int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
+ int header_type,
+ size_t size,
+ void *encap_header,
+ u32 *encap_id);
+void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 3d6c1f65e586..5da2cc878582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -96,6 +96,10 @@
#define OFFLOADS_NUM_PRIOS 1
#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
+#define LAG_PRIO_NUM_LEVELS 1
+#define LAG_NUM_PRIOS 1
+#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
+
struct node_caps {
size_t arr_sz;
long *caps;
@@ -111,12 +115,16 @@ static struct init_tree_node {
int num_levels;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 6,
+ .ar_size = 7,
.children = (struct init_tree_node[]) {
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, LAG_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
+ LAG_PRIO_NUM_LEVELS))),
ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
@@ -345,7 +353,7 @@ static void del_flow_table(struct fs_node *node)
err = mlx5_cmd_destroy_flow_table(dev, ft);
if (err)
- pr_warn("flow steering can't destroy ft\n");
+ mlx5_core_warn(dev, "flow steering can't destroy ft\n");
fs_get_obj(prio, ft->node.parent);
prio->num_ft--;
}
@@ -364,7 +372,7 @@ static void del_rule(struct fs_node *node)
match_value = mlx5_vzalloc(match_len);
if (!match_value) {
- pr_warn("failed to allocate inbox\n");
+ mlx5_core_warn(dev, "failed to allocate inbox\n");
return;
}
@@ -387,8 +395,9 @@ static void del_rule(struct fs_node *node)
modify_mask,
fte);
if (err)
- pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
- __func__, fg->id, fte->index);
+ mlx5_core_warn(dev,
+ "%s can't del rule fg id=%d fte_index=%d\n",
+ __func__, fg->id, fte->index);
}
kvfree(match_value);
}
@@ -409,8 +418,9 @@ static void del_fte(struct fs_node *node)
err = mlx5_cmd_delete_fte(dev, ft,
fte->index);
if (err)
- pr_warn("flow steering can't delete fte in index %d of flow group id %d\n",
- fte->index, fg->id);
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
fte->status = 0;
fg->num_ftes--;
@@ -427,8 +437,8 @@ static void del_flow_group(struct fs_node *node)
dev = get_dev(&ft->node);
if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
- pr_warn("flow steering can't destroy fg %d of ft %d\n",
- fg->id, ft->id);
+ mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
+ fg->id, ft->id);
}
static struct fs_fte *alloc_fte(u8 action,
@@ -475,7 +485,8 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
}
static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
- enum fs_flow_table_type table_type)
+ enum fs_flow_table_type table_type,
+ enum fs_flow_table_op_mod op_mod)
{
struct mlx5_flow_table *ft;
@@ -485,6 +496,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
ft->level = level;
ft->node.type = FS_TYPE_FLOW_TABLE;
+ ft->op_mod = op_mod;
ft->type = table_type;
ft->vport = vport;
ft->max_fte = max_fte;
@@ -722,6 +734,7 @@ static void list_add_flow_table(struct mlx5_flow_table *ft,
}
static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ enum fs_flow_table_op_mod op_mod,
u16 vport, int prio,
int max_fte, u32 level)
{
@@ -754,18 +767,19 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
level += fs_prio->start_level;
ft = alloc_flow_table(level,
vport,
- roundup_pow_of_two(max_fte),
- root->table_type);
+ max_fte ? roundup_pow_of_two(max_fte) : 0,
+ root->table_type,
+ op_mod);
if (!ft) {
err = -ENOMEM;
goto unlock_root;
}
tree_init_node(&ft->node, 1, del_flow_table);
- log_table_sz = ilog2(ft->max_fte);
+ log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
- err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level,
- log_table_sz, next_ft, &ft->id);
+ err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
+ ft->level, log_table_sz, next_ft, &ft->id);
if (err)
goto free_ft;
@@ -792,15 +806,26 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio, int max_fte,
u32 level)
{
- return __mlx5_create_flow_table(ns, 0, prio, max_fte, level);
+ return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio,
+ max_fte, level);
}
struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio, int max_fte,
u32 level, u16 vport)
{
- return __mlx5_create_flow_table(ns, vport, prio, max_fte, level);
+ return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio,
+ max_fte, level);
+}
+
+struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
+ struct mlx5_flow_namespace *ns,
+ int prio, u32 level)
+{
+ return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0,
+ level);
}
+EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
@@ -1379,6 +1404,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
switch (type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
+ case MLX5_FLOW_NAMESPACE_LAG:
case MLX5_FLOW_NAMESPACE_OFFLOADS:
case MLX5_FLOW_NAMESPACE_ETHTOOL:
case MLX5_FLOW_NAMESPACE_KERNEL:
@@ -1401,6 +1427,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return &steering->esw_ingress_root_ns->ns;
else
return NULL;
+ case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
+ if (steering->sniffer_rx_root_ns)
+ return &steering->sniffer_rx_root_ns->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
+ if (steering->sniffer_tx_root_ns)
+ return &steering->sniffer_tx_root_ns->ns;
+ else
+ return NULL;
default:
return NULL;
}
@@ -1700,10 +1736,46 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->esw_egress_root_ns);
cleanup_root_ns(steering->esw_ingress_root_ns);
cleanup_root_ns(steering->fdb_root_ns);
+ cleanup_root_ns(steering->sniffer_rx_root_ns);
+ cleanup_root_ns(steering->sniffer_tx_root_ns);
mlx5_cleanup_fc_stats(dev);
kfree(steering);
}
+static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
+ if (!steering->sniffer_tx_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
+ if (IS_ERR(prio)) {
+ cleanup_root_ns(steering->sniffer_tx_root_ns);
+ return PTR_ERR(prio);
+ }
+ return 0;
+}
+
+static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
+ if (!steering->sniffer_rx_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
+ if (IS_ERR(prio)) {
+ cleanup_root_ns(steering->sniffer_rx_root_ns);
+ return PTR_ERR(prio);
+ }
+ return 0;
+}
+
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
@@ -1800,6 +1872,18 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
}
}
+ if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
+ err = init_sniffer_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
+ err = init_sniffer_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
return 0;
err:
mlx5_cleanup_fs(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 9cffb6aeb4e9..71ff03bceabb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -49,6 +49,13 @@ enum fs_flow_table_type {
FS_FT_ESW_EGRESS_ACL = 0x2,
FS_FT_ESW_INGRESS_ACL = 0x3,
FS_FT_FDB = 0X4,
+ FS_FT_SNIFFER_RX = 0X5,
+ FS_FT_SNIFFER_TX = 0X6,
+};
+
+enum fs_flow_table_op_mod {
+ FS_FT_OP_MOD_NORMAL,
+ FS_FT_OP_MOD_LAG_DEMUX,
};
enum fs_fte_status {
@@ -61,6 +68,8 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns;
struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+ struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
+ struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
};
struct fs_node {
@@ -93,6 +102,7 @@ struct mlx5_flow_table {
unsigned int max_fte;
unsigned int level;
enum fs_flow_table_type type;
+ enum fs_flow_table_op_mod op_mod;
struct {
bool active;
unsigned int required_groups;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 77fc1aa26114..5718aada6605 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -38,13 +38,10 @@
static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0};
MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_query_board_id(struct mlx5_core_dev *dev)
@@ -162,38 +159,18 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd_init_hca_mbox_in in;
- struct mlx5_cmd_init_hca_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
- return err;
+ MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd_teardown_hca_mbox_in in;
- struct mlx5_cmd_teardown_hca_mbox_out out;
- int err;
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
new file mode 100644
index 000000000000..55957246c0e8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+
+enum {
+ MLX5_LAG_FLAG_BONDED = 1 << 0,
+};
+
+struct lag_func {
+ struct mlx5_core_dev *dev;
+ struct net_device *netdev;
+};
+
+/* Used for collection of netdev event info. */
+struct lag_tracker {
+ enum netdev_lag_tx_type tx_type;
+ struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
+ bool is_bonded;
+};
+
+/* LAG data of a ConnectX card.
+ * It serves both its phys functions.
+ */
+struct mlx5_lag {
+ u8 flags;
+ u8 v2p_map[MLX5_MAX_PORTS];
+ struct lag_func pf[MLX5_MAX_PORTS];
+ struct lag_tracker tracker;
+ struct delayed_work bond_work;
+ struct notifier_block nb;
+};
+
+/* General purpose, use for short periods of time.
+ * Beware of lock dependencies (preferably, no locks should be acquired
+ * under it).
+ */
+static DEFINE_MUTEX(lag_mutex);
+
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
+ u8 remap_port2)
+{
+ u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
+ void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
+
+ MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
+
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
+ u8 remap_port2)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
+ void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+
+ MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
+ MLX5_SET(modify_lag_in, in, field_select, 0x1);
+
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
+
+ MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
+
+ MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
+
+int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
+
+ MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
+
+static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
+{
+ return dev->priv.lag;
+}
+
+static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
+ struct net_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].netdev == ndev)
+ return i;
+
+ return -1;
+}
+
+static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
+}
+
+static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
+ u8 *port1, u8 *port2)
+{
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ if (tracker->netdev_state[0].tx_enabled) {
+ *port1 = 1;
+ *port2 = 1;
+ } else {
+ *port1 = 2;
+ *port2 = 2;
+ }
+ } else {
+ *port1 = 1;
+ *port2 = 2;
+ if (!tracker->netdev_state[0].link_up)
+ *port1 = 2;
+ else if (!tracker->netdev_state[1].link_up)
+ *port2 = 1;
+ }
+}
+
+static void mlx5_activate_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ int err;
+
+ ldev->flags |= MLX5_LAG_FLAG_BONDED;
+
+ mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
+ &ldev->v2p_map[1]);
+
+ err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
+ if (err)
+ mlx5_core_err(dev0,
+ "Failed to create LAG (%d)\n",
+ err);
+}
+
+static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ int err;
+
+ ldev->flags &= ~MLX5_LAG_FLAG_BONDED;
+
+ err = mlx5_cmd_destroy_lag(dev0);
+ if (err)
+ mlx5_core_err(dev0,
+ "Failed to destroy LAG (%d)\n",
+ err);
+}
+
+static void mlx5_do_bond(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
+ struct lag_tracker tracker;
+ u8 v2p_port1, v2p_port2;
+ int i, err;
+
+ if (!dev0 || !dev1)
+ return;
+
+ mutex_lock(&lag_mutex);
+ tracker = ldev->tracker;
+ mutex_unlock(&lag_mutex);
+
+ if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) {
+ if (mlx5_sriov_is_enabled(dev0) ||
+ mlx5_sriov_is_enabled(dev1)) {
+ mlx5_core_warn(dev0, "LAG is not supported with SRIOV");
+ return;
+ }
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
+ MLX5_INTERFACE_PROTOCOL_IB);
+
+ mlx5_activate_lag(ldev, &tracker);
+
+ mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_nic_vport_enable_roce(dev1);
+ } else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
+ mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
+ &v2p_port2);
+
+ if ((v2p_port1 != ldev->v2p_map[0]) ||
+ (v2p_port2 != ldev->v2p_map[1])) {
+ ldev->v2p_map[0] = v2p_port1;
+ ldev->v2p_map[1] = v2p_port2;
+
+ err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
+ if (err)
+ mlx5_core_err(dev0,
+ "Failed to modify LAG (%d)\n",
+ err);
+ }
+ } else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
+ mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_nic_vport_disable_roce(dev1);
+
+ mlx5_deactivate_lag(ldev);
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ mlx5_add_dev_by_protocol(ldev->pf[i].dev,
+ MLX5_INTERFACE_PROTOCOL_IB);
+ }
+}
+
+static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
+{
+ schedule_delayed_work(&ldev->bond_work, delay);
+}
+
+static void mlx5_do_bond_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
+ bond_work);
+ int status;
+
+ status = mlx5_dev_list_trylock();
+ if (!status) {
+ /* 1 sec delay. */
+ mlx5_queue_bond_work(ldev, HZ);
+ return;
+ }
+
+ mlx5_do_bond(ldev);
+ mlx5_dev_list_unlock();
+}
+
+static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ struct net_device *ndev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *upper = info->upper_dev, *ndev_tmp;
+ struct netdev_lag_upper_info *lag_upper_info;
+ bool is_bonded;
+ int bond_status = 0;
+ int num_slaves = 0;
+ int idx;
+
+ if (!netif_is_lag_master(upper))
+ return 0;
+
+ lag_upper_info = info->upper_info;
+
+ /* The event may still be of interest if the slave does not belong to
+ * us, but is enslaved to a master which has one or more of our netdevs
+ * as slaves (e.g., if a new slave is added to a master that bonds two
+ * of our netdevs, we should unbond).
+ */
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
+ idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
+ if (idx > -1)
+ bond_status |= (1 << idx);
+
+ num_slaves++;
+ }
+ rcu_read_unlock();
+
+ /* None of this lagdev's netdevs are slaves of this master. */
+ if (!(bond_status & 0x3))
+ return 0;
+
+ if (lag_upper_info)
+ tracker->tx_type = lag_upper_info->tx_type;
+
+ /* Determine bonding status:
+ * A device is considered bonded if both its physical ports are slaves
+ * of the same lag master, and only them.
+ * Lag mode must be activebackup or hash.
+ */
+ is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
+ (bond_status == 0x3) &&
+ ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
+ (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
+
+ if (tracker->is_bonded != is_bonded) {
+ tracker->is_bonded = is_bonded;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ struct net_device *ndev,
+ struct netdev_notifier_changelowerstate_info *info)
+{
+ struct netdev_lag_lower_state_info *lag_lower_info;
+ int idx;
+
+ if (!netif_is_lag_port(ndev))
+ return 0;
+
+ idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
+ if (idx == -1)
+ return 0;
+
+ /* This information is used to determine virtual to physical
+ * port mapping.
+ */
+ lag_lower_info = info->lower_state_info;
+ if (!lag_lower_info)
+ return 0;
+
+ tracker->netdev_state[idx] = *lag_lower_info;
+
+ return 1;
+}
+
+static int mlx5_lag_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct lag_tracker tracker;
+ struct mlx5_lag *ldev;
+ int changed = 0;
+
+ if (!net_eq(dev_net(ndev), &init_net))
+ return NOTIFY_DONE;
+
+ if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
+ return NOTIFY_DONE;
+
+ ldev = container_of(this, struct mlx5_lag, nb);
+ tracker = ldev->tracker;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
+ ptr);
+ break;
+ case NETDEV_CHANGELOWERSTATE:
+ changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
+ ndev, ptr);
+ break;
+ }
+
+ mutex_lock(&lag_mutex);
+ ldev->tracker = tracker;
+ mutex_unlock(&lag_mutex);
+
+ if (changed)
+ mlx5_queue_bond_work(ldev, 0);
+
+ return NOTIFY_DONE;
+}
+
+static struct mlx5_lag *mlx5_lag_dev_alloc(void)
+{
+ struct mlx5_lag *ldev;
+
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return NULL;
+
+ INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
+
+ return ldev;
+}
+
+static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
+{
+ kfree(ldev);
+}
+
+static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
+{
+ unsigned int fn = PCI_FUNC(dev->pdev->devfn);
+
+ if (fn >= MLX5_MAX_PORTS)
+ return;
+
+ mutex_lock(&lag_mutex);
+ ldev->pf[fn].dev = dev;
+ ldev->pf[fn].netdev = netdev;
+ ldev->tracker.netdev_state[fn].link_up = 0;
+ ldev->tracker.netdev_state[fn].tx_enabled = 0;
+
+ dev->priv.lag = ldev;
+ mutex_unlock(&lag_mutex);
+}
+
+static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev == dev)
+ break;
+
+ if (i == MLX5_MAX_PORTS)
+ return;
+
+ mutex_lock(&lag_mutex);
+ memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
+
+ dev->priv.lag = NULL;
+ mutex_unlock(&lag_mutex);
+}
+
+
+/* Must be called with intf_mutex held */
+void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
+{
+ struct mlx5_lag *ldev = NULL;
+ struct mlx5_core_dev *tmp_dev;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
+ return;
+
+ tmp_dev = mlx5_get_next_phys_dev(dev);
+ if (tmp_dev)
+ ldev = tmp_dev->priv.lag;
+
+ if (!ldev) {
+ ldev = mlx5_lag_dev_alloc();
+ if (!ldev) {
+ mlx5_core_err(dev, "Failed to alloc lag dev\n");
+ return;
+ }
+ }
+
+ mlx5_lag_dev_add_pf(ldev, dev, netdev);
+
+ if (!ldev->nb.notifier_call) {
+ ldev->nb.notifier_call = mlx5_lag_netdev_event;
+ if (register_netdevice_notifier(&ldev->nb)) {
+ ldev->nb.notifier_call = NULL;
+ mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
+ }
+ }
+}
+
+/* Must be called with intf_mutex held */
+void mlx5_lag_remove(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ int i;
+
+ ldev = mlx5_lag_dev_get(dev);
+ if (!ldev)
+ return;
+
+ if (mlx5_lag_is_bonded(ldev))
+ mlx5_deactivate_lag(ldev);
+
+ mlx5_lag_dev_remove_pf(ldev, dev);
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ break;
+
+ if (i == MLX5_MAX_PORTS) {
+ if (ldev->nb.notifier_call)
+ unregister_netdevice_notifier(&ldev->nb);
+ cancel_delayed_work_sync(&ldev->bond_work);
+ mlx5_lag_dev_free(ldev);
+ }
+}
+
+bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ bool res;
+
+ mutex_lock(&lag_mutex);
+ ldev = mlx5_lag_dev_get(dev);
+ res = ldev && mlx5_lag_is_bonded(ldev);
+ mutex_unlock(&lag_mutex);
+
+ return res;
+}
+EXPORT_SYMBOL(mlx5_lag_is_active);
+
+struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
+{
+ struct net_device *ndev = NULL;
+ struct mlx5_lag *ldev;
+
+ mutex_lock(&lag_mutex);
+ ldev = mlx5_lag_dev_get(dev);
+
+ if (!(ldev && mlx5_lag_is_bonded(ldev)))
+ goto unlock;
+
+ if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ ndev = ldev->tracker.netdev_state[0].tx_enabled ?
+ ldev->pf[0].netdev : ldev->pf[1].netdev;
+ } else {
+ ndev = ldev->pf[0].netdev;
+ }
+ if (ndev)
+ dev_hold(ndev);
+
+unlock:
+ mutex_unlock(&lag_mutex);
+
+ return ndev;
+}
+EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
+
+bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
+ priv);
+ struct mlx5_lag *ldev;
+
+ if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB)
+ return true;
+
+ ldev = mlx5_lag_dev_get(dev);
+ if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev)
+ return true;
+
+ /* If bonded, we do not add an IB device for PF1. */
+ return false;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
index 1368dac00da0..3a3b0005fd2b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
@@ -39,36 +39,33 @@
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port)
{
- struct mlx5_mad_ifc_mbox_in *in = NULL;
- struct mlx5_mad_ifc_mbox_out *out = NULL;
- int err;
+ int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
+ int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
+ int err = -ENOMEM;
+ void *data;
+ void *resp;
+ u32 *out;
+ u32 *in;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- out = kzalloc(sizeof(*out), GFP_KERNEL);
- if (!out) {
- err = -ENOMEM;
+ in = kzalloc(inlen, GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!in || !out)
goto out;
- }
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC);
- in->hdr.opmod = cpu_to_be16(opmod);
- in->port = port;
+ MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
+ MLX5_SET(mad_ifc_in, in, op_mod, opmod);
+ MLX5_SET(mad_ifc_in, in, port, port);
- memcpy(in->data, inb, sizeof(in->data));
+ data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
+ memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
- err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err)
goto out;
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
- goto out;
- }
-
- memcpy(outb, out->data, sizeof(out->data));
+ resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
+ memcpy(outb, resp,
+ MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
out:
kfree(out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2385bae92672..d9c3c70b29e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -72,16 +72,6 @@ static int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, int, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
-static LIST_HEAD(intf_list);
-static LIST_HEAD(dev_list);
-static DEFINE_MUTEX(intf_mutex);
-
-struct mlx5_device_context {
- struct list_head list;
- struct mlx5_interface *intf;
- void *context;
-};
-
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
@@ -324,7 +314,7 @@ enum {
MLX5_DEV_CAP_FLAG_DCT,
};
-static u16 to_fw_pkey_sz(u32 size)
+static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
{
switch (size) {
case 128:
@@ -340,7 +330,7 @@ static u16 to_fw_pkey_sz(u32 size)
case 4096:
return 5;
default:
- pr_warn("invalid pkey table size %d\n", size);
+ mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
return 0;
}
}
@@ -363,10 +353,6 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
- if (err)
- goto query_ex;
-
- err = mlx5_cmd_status_to_err_v2(out);
if (err) {
mlx5_core_warn(dev,
"QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
@@ -409,20 +395,11 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
{
- u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
- int err;
-
- memset(out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
- err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
- if (err)
- return err;
-
- err = mlx5_cmd_status_to_err_v2(out);
-
- return err;
+ return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
}
static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
@@ -490,7 +467,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
128);
/* we limit the size of the pkey table to 128 entries for now */
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
- to_fw_pkey_sz(128));
+ to_fw_pkey_sz(dev, 128));
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
@@ -528,37 +505,22 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
- u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
- int err;
+ u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, func_id);
- memset(out, 0, sizeof(out));
-
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- return mlx5_cmd_status_to_err_v2(out);
+ return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
- u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
- int err;
+ u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, func_id);
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- return mlx5_cmd_status_to_err_v2(out);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev)
@@ -758,44 +720,40 @@ clean:
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
{
- u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
- u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
- u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
- u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
- int err;
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
u32 sup_issi;
-
- memset(query_in, 0, sizeof(query_in));
- memset(query_out, 0, sizeof(query_out));
+ int err;
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
-
- err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
- query_out, sizeof(query_out));
+ err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
+ query_out, sizeof(query_out));
if (err) {
- if (((struct mlx5_outbox_hdr *)query_out)->status ==
- MLX5_CMD_STAT_BAD_OP_ERR) {
+ u32 syndrome;
+ u8 status;
+
+ mlx5_cmd_mbox_status(query_out, &status, &syndrome);
+ if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
pr_debug("Only ISSI 0 is supported\n");
return 0;
}
- pr_err("failed to query ISSI\n");
+ pr_err("failed to query ISSI err(%d)\n", err);
return err;
}
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
if (sup_issi & (1 << 1)) {
- memset(set_in, 0, sizeof(set_in));
- memset(set_out, 0, sizeof(set_out));
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
+ u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
MLX5_SET(set_issi_in, set_in, current_issi, 1);
-
- err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
- set_out, sizeof(set_out));
+ err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
+ set_out, sizeof(set_out));
if (err) {
- pr_err("failed to set ISSI=1\n");
+ pr_err("failed to set ISSI=1 err(%d)\n", err);
return err;
}
@@ -809,120 +767,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return -ENOTSUPP;
}
-static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
- if (!dev_ctx)
- return;
-
- dev_ctx->intf = intf;
- dev_ctx->context = intf->add(dev);
-
- if (dev_ctx->context) {
- spin_lock_irq(&priv->ctx_lock);
- list_add_tail(&dev_ctx->list, &priv->ctx_list);
- spin_unlock_irq(&priv->ctx_lock);
- } else {
- kfree(dev_ctx);
- }
-}
-
-static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf == intf) {
- spin_lock_irq(&priv->ctx_lock);
- list_del(&dev_ctx->list);
- spin_unlock_irq(&priv->ctx_lock);
-
- intf->remove(dev, dev_ctx->context);
- kfree(dev_ctx);
- return;
- }
-}
-
-static int mlx5_register_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&intf_mutex);
- list_add_tail(&priv->dev_list, &dev_list);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&intf_mutex);
-
- return 0;
-}
-
-static void mlx5_unregister_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_remove_device(intf, priv);
- list_del(&priv->dev_list);
- mutex_unlock(&intf_mutex);
-}
-
-int mlx5_register_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- if (!intf->add || !intf->remove)
- return -EINVAL;
-
- mutex_lock(&intf_mutex);
- list_add_tail(&intf->list, &intf_list);
- list_for_each_entry(priv, &dev_list, dev_list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&intf_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(mlx5_register_interface);
-
-void mlx5_unregister_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- mutex_lock(&intf_mutex);
- list_for_each_entry(priv, &dev_list, dev_list)
- mlx5_remove_device(intf, priv);
- list_del(&intf->list);
- mutex_unlock(&intf_mutex);
-}
-EXPORT_SYMBOL(mlx5_unregister_interface);
-
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
-{
- struct mlx5_priv *priv = &mdev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
- void *result = NULL;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
- if ((dev_ctx->intf->protocol == protocol) &&
- dev_ctx->intf->get_dev) {
- result = dev_ctx->intf->get_dev(dev_ctx->context);
- break;
- }
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
- return result;
-}
-EXPORT_SYMBOL(mlx5_get_protocol_dev);
static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
{
@@ -995,8 +839,102 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
debugfs_remove(priv->dbg_root);
}
-#define MLX5_IB_MOD "mlx5_ib"
-static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err;
+
+ err = mlx5_query_hca_caps(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query hca failed\n");
+ goto out;
+ }
+
+ err = mlx5_query_board_id(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query board id failed\n");
+ goto out;
+ }
+
+ err = mlx5_eq_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize eq\n");
+ goto out;
+ }
+
+ MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
+
+ err = mlx5_init_cq_table(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize cq table\n");
+ goto err_eq_cleanup;
+ }
+
+ mlx5_init_qp_table(dev);
+
+ mlx5_init_srq_table(dev);
+
+ mlx5_init_mkey_table(dev);
+
+ err = mlx5_init_rl_table(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init rate limiting\n");
+ goto err_tables_cleanup;
+ }
+
+#ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_eswitch_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init eswitch %d\n", err);
+ goto err_rl_cleanup;
+ }
+#endif
+
+ err = mlx5_sriov_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init sriov %d\n", err);
+ goto err_eswitch_cleanup;
+ }
+
+ return 0;
+
+err_eswitch_cleanup:
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_cleanup(dev->priv.eswitch);
+
+err_rl_cleanup:
+#endif
+ mlx5_cleanup_rl_table(dev);
+
+err_tables_cleanup:
+ mlx5_cleanup_mkey_table(dev);
+ mlx5_cleanup_srq_table(dev);
+ mlx5_cleanup_qp_table(dev);
+ mlx5_cleanup_cq_table(dev);
+
+err_eq_cleanup:
+ mlx5_eq_cleanup(dev);
+
+out:
+ return err;
+}
+
+static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
+{
+ mlx5_sriov_cleanup(dev);
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_cleanup(dev->priv.eswitch);
+#endif
+ mlx5_cleanup_rl_table(dev);
+ mlx5_cleanup_mkey_table(dev);
+ mlx5_cleanup_srq_table(dev);
+ mlx5_cleanup_qp_table(dev);
+ mlx5_cleanup_cq_table(dev);
+ mlx5_eq_cleanup(dev);
+}
+
+static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
+ bool boot)
{
struct pci_dev *pdev = dev->pdev;
int err;
@@ -1029,12 +967,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto out_err;
}
- mlx5_pagealloc_init(dev);
-
err = mlx5_core_enable_hca(dev, 0);
if (err) {
dev_err(&pdev->dev, "enable hca failed\n");
- goto err_pagealloc_cleanup;
+ goto err_cmd_cleanup;
}
err = mlx5_core_set_issi(dev);
@@ -1087,34 +1023,21 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_start_health_poll(dev);
- err = mlx5_query_hca_caps(dev);
- if (err) {
- dev_err(&pdev->dev, "query hca failed\n");
- goto err_stop_poll;
- }
-
- err = mlx5_query_board_id(dev);
- if (err) {
- dev_err(&pdev->dev, "query board id failed\n");
+ if (boot && mlx5_init_once(dev, priv)) {
+ dev_err(&pdev->dev, "sw objs init failed\n");
goto err_stop_poll;
}
err = mlx5_enable_msix(dev);
if (err) {
dev_err(&pdev->dev, "enable msix failed\n");
- goto err_stop_poll;
- }
-
- err = mlx5_eq_init(dev);
- if (err) {
- dev_err(&pdev->dev, "failed to initialize eq\n");
- goto disable_msix;
+ goto err_cleanup_once;
}
err = mlx5_alloc_uuars(dev, &priv->uuari);
if (err) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
- goto err_eq_cleanup;
+ goto err_disable_msix;
}
err = mlx5_start_eqs(dev);
@@ -1130,15 +1053,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
}
err = mlx5_irq_set_affinity_hints(dev);
- if (err)
+ if (err) {
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
-
- MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
-
- mlx5_init_cq_table(dev);
- mlx5_init_qp_table(dev);
- mlx5_init_srq_table(dev);
- mlx5_init_mkey_table(dev);
+ goto err_affinity_hints;
+ }
err = mlx5_init_fs(dev);
if (err) {
@@ -1146,36 +1064,26 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_fs;
}
- err = mlx5_init_rl_table(dev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init rate limiting\n");
- goto err_rl;
- }
-
#ifdef CONFIG_MLX5_CORE_EN
- err = mlx5_eswitch_init(dev);
- if (err) {
- dev_err(&pdev->dev, "eswitch init failed %d\n", err);
- goto err_reg_dev;
- }
+ mlx5_eswitch_attach(dev->priv.eswitch);
#endif
- err = mlx5_sriov_init(dev);
+ err = mlx5_sriov_attach(dev);
if (err) {
dev_err(&pdev->dev, "sriov init failed %d\n", err);
goto err_sriov;
}
- err = mlx5_register_device(dev);
- if (err) {
- dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
- goto err_reg_dev;
+ if (mlx5_device_registered(dev)) {
+ mlx5_attach_device(dev);
+ } else {
+ err = mlx5_register_device(dev);
+ if (err) {
+ dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
+ goto err_reg_dev;
+ }
}
- err = request_module_nowait(MLX5_IB_MOD);
- if (err)
- pr_info("failed request module on %s\n", MLX5_IB_MOD);
-
clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
@@ -1183,23 +1091,19 @@ out:
return 0;
-err_sriov:
- if (mlx5_sriov_cleanup(dev))
- dev_err(&dev->pdev->dev, "sriov cleanup failed\n");
+err_reg_dev:
+ mlx5_sriov_detach(dev);
+err_sriov:
#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_cleanup(dev->priv.eswitch);
+ mlx5_eswitch_detach(dev->priv.eswitch);
#endif
-err_reg_dev:
- mlx5_cleanup_rl_table(dev);
-err_rl:
mlx5_cleanup_fs(dev);
+
err_fs:
- mlx5_cleanup_mkey_table(dev);
- mlx5_cleanup_srq_table(dev);
- mlx5_cleanup_qp_table(dev);
- mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
+
+err_affinity_hints:
free_comp_eqs(dev);
err_stop_eqs:
@@ -1208,12 +1112,13 @@ err_stop_eqs:
err_free_uar:
mlx5_free_uuars(dev, &priv->uuari);
-err_eq_cleanup:
- mlx5_eq_cleanup(dev);
-
-disable_msix:
+err_disable_msix:
mlx5_disable_msix(dev);
+err_cleanup_once:
+ if (boot)
+ mlx5_cleanup_once(dev);
+
err_stop_poll:
mlx5_stop_health_poll(dev);
if (mlx5_cmd_teardown_hca(dev)) {
@@ -1230,8 +1135,7 @@ reclaim_boot_pages:
err_disable_hca:
mlx5_core_disable_hca(dev, 0);
-err_pagealloc_cleanup:
- mlx5_pagealloc_cleanup(dev);
+err_cmd_cleanup:
mlx5_cmd_cleanup(dev);
out_err:
@@ -1241,40 +1145,35 @@ out_err:
return err;
}
-static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
+ bool cleanup)
{
int err = 0;
- err = mlx5_sriov_cleanup(dev);
- if (err) {
- dev_warn(&dev->pdev->dev, "%s: sriov cleanup failed - abort\n",
- __func__);
- return err;
- }
-
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
__func__);
+ if (cleanup)
+ mlx5_cleanup_once(dev);
goto out;
}
- mlx5_unregister_device(dev);
+
+ if (mlx5_device_registered(dev))
+ mlx5_detach_device(dev);
+
+ mlx5_sriov_detach(dev);
#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_cleanup(dev->priv.eswitch);
+ mlx5_eswitch_detach(dev->priv.eswitch);
#endif
-
- mlx5_cleanup_rl_table(dev);
mlx5_cleanup_fs(dev);
- mlx5_cleanup_mkey_table(dev);
- mlx5_cleanup_srq_table(dev);
- mlx5_cleanup_qp_table(dev);
- mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
- mlx5_eq_cleanup(dev);
mlx5_disable_msix(dev);
+ if (cleanup)
+ mlx5_cleanup_once(dev);
mlx5_stop_health_poll(dev);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
@@ -1284,7 +1183,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
- mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
out:
@@ -1294,22 +1192,6 @@ out:
return err;
}
-void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
- unsigned long param)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf->event)
- dev_ctx->intf->event(dev, dev_ctx->context, event, param);
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-}
-
struct mlx5_core_event_handler {
void (*event)(struct mlx5_core_dev *dev,
enum mlx5_dev_event event,
@@ -1323,6 +1205,7 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif
};
+#define MLX5_IB_MOD "mlx5_ib"
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -1344,8 +1227,9 @@ static int init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
- pr_warn("selected profile out of range, selecting default (%d)\n",
- MLX5_DEFAULT_PROF);
+ mlx5_core_warn(dev,
+ "selected profile out of range, selecting default (%d)\n",
+ MLX5_DEFAULT_PROF);
prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profile[prof_sel];
@@ -1368,12 +1252,18 @@ static int init_one(struct pci_dev *pdev,
goto close_pci;
}
- err = mlx5_load_one(dev, priv);
+ mlx5_pagealloc_init(dev);
+
+ err = mlx5_load_one(dev, priv, true);
if (err) {
dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
goto clean_health;
}
+ err = request_module_nowait(MLX5_IB_MOD);
+ if (err)
+ pr_info("failed request module on %s\n", MLX5_IB_MOD);
+
err = devlink_register(devlink, &pdev->dev);
if (err)
goto clean_load;
@@ -1381,8 +1271,9 @@ static int init_one(struct pci_dev *pdev,
return 0;
clean_load:
- mlx5_unload_one(dev, priv);
+ mlx5_unload_one(dev, priv, true);
clean_health:
+ mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
close_pci:
mlx5_pci_close(dev, priv);
@@ -1400,11 +1291,15 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_priv *priv = &dev->priv;
devlink_unregister(devlink);
- if (mlx5_unload_one(dev, priv)) {
+ mlx5_unregister_device(dev);
+
+ if (mlx5_unload_one(dev, priv, true)) {
dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
mlx5_health_cleanup(dev);
return;
}
+
+ mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_pci_close(dev, priv);
pci_set_drvdata(pdev, NULL);
@@ -1419,7 +1314,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev);
- mlx5_unload_one(dev, priv);
+ mlx5_unload_one(dev, priv, false);
pci_save_state(pdev);
mlx5_pci_disable_device(dev);
return state == pci_channel_io_perm_failure ?
@@ -1491,7 +1386,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "%s was called\n", __func__);
- err = mlx5_load_one(dev, priv);
+ err = mlx5_load_one(dev, priv, false);
if (err)
dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
, __func__, err);
@@ -1513,7 +1408,7 @@ static void shutdown(struct pci_dev *pdev)
dev_info(&pdev->dev, "Shutdown was called\n");
/* Notify mlx5 clients that the kernel is being shut down */
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
- mlx5_unload_one(dev, priv);
+ mlx5_unload_one(dev, priv, false);
mlx5_pci_disable_device(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index d5a0c2d61a18..ba2b09cc192f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -37,70 +37,30 @@
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
-struct mlx5_attach_mcg_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- __be32 rsvd;
- u8 gid[16];
-};
-
-struct mlx5_attach_mcg_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvf[8];
-};
-
-struct mlx5_detach_mcg_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- __be32 rsvd;
- u8 gid[16];
-};
-
-struct mlx5_detach_mcg_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvf[8];
-};
-
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- struct mlx5_attach_mcg_mbox_in in;
- struct mlx5_attach_mcg_mbox_out out;
- int err;
+ u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {0};
+ void *gid;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG);
- memcpy(in.gid, mgid, sizeof(*mgid));
- in.qpn = cpu_to_be32(qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
+ MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
+ gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_attach_mcg);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- struct mlx5_detach_mcg_mbox_in in;
- struct mlx5_detach_mcg_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
- memcpy(in.gid, mgid, sizeof(*mgid));
- in.qpn = cpu_to_be32(qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {0};
+ void *gid;
- return err;
+ MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
+ gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_detach_mcg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 2f86ec6fcf25..3d0cfb9f18f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -58,8 +58,8 @@ do { \
} while (0)
#define mlx5_core_err(__dev, format, ...) \
- dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
- (__dev)->priv.name, __func__, __LINE__, current->pid, \
+ dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \
@@ -75,19 +75,6 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
-static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
- int in_size, u32 *out,
- int out_size)
-{
- int err;
-
- err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
- if (err)
- return err;
-
- return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
-}
-
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
@@ -96,7 +83,12 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
+int mlx5_sriov_init(struct mlx5_core_dev *dev);
+void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
+int mlx5_sriov_attach(struct mlx5_core_dev *dev);
+void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
+bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
@@ -105,7 +97,38 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
void mlx5_cq_tasklet_cb(unsigned long data);
+void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
+void mlx5_lag_remove(struct mlx5_core_dev *dev);
+
+void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
+void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
+void mlx5_attach_device(struct mlx5_core_dev *dev);
+void mlx5_detach_device(struct mlx5_core_dev *dev);
+bool mlx5_device_registered(struct mlx5_core_dev *dev);
+int mlx5_register_device(struct mlx5_core_dev *dev);
+void mlx5_unregister_device(struct mlx5_core_dev *dev);
+void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
+void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
+struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
+void mlx5_dev_list_lock(void);
+void mlx5_dev_list_unlock(void);
+int mlx5_dev_list_trylock(void);
+
+bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
+
void mlx5e_init(void);
void mlx5e_cleanup(void);
+static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
+{
+ /* LACP owner conditions:
+ * 1) Function is physical.
+ * 2) LAG is supported by FW.
+ * 3) LAG is managed by driver (currently the only option).
+ */
+ return MLX5_CAP_GEN(dev, vport_group_manager) &&
+ (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
+ MLX5_CAP_GEN(dev, lag_master);
+}
+
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 77a7293921d5..b9736f505bdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -49,48 +49,43 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
{
}
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- struct mlx5_create_mkey_mbox_in *in, int inlen,
- mlx5_cmd_cbk_t callback, void *context,
- struct mlx5_create_mkey_mbox_out *out)
+int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen,
+ u32 *out, int outlen,
+ mlx5_cmd_cbk_t callback, void *context)
{
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
- struct mlx5_create_mkey_mbox_out lout;
+ u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
+ u32 mkey_index;
+ void *mkc;
int err;
u8 key;
- memset(&lout, 0, sizeof(lout));
spin_lock_irq(&dev->priv.mkey_lock);
key = dev->priv.mkey_key++;
spin_unlock_irq(&dev->priv.mkey_lock);
- in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
- if (callback) {
- err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out),
- callback, context);
- return err;
- } else {
- err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout));
- }
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
- if (err) {
- mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
- return err;
- }
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ MLX5_SET(mkc, mkc, mkey_7_0, key);
- if (lout.hdr.status) {
- mlx5_core_dbg(dev, "status %d\n", lout.hdr.status);
- return mlx5_cmd_status_to_err(&lout.hdr);
- }
+ if (callback)
+ return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen,
+ callback, context);
+
+ err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
+ if (err)
+ return err;
- mkey->iova = be64_to_cpu(in->seg.start_addr);
- mkey->size = be64_to_cpu(in->seg.len);
- mkey->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
- mkey->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
+ mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
+ mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
+ mkey->size = MLX5_GET64(mkc, mkc, len);
+ mkey->key = mlx5_idx_to_mkey(mkey_index) | key;
+ mkey->pd = MLX5_GET(mkc, mkc, pd);
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
- be32_to_cpu(lout.mkey), key, mkey->key);
+ mkey_index, key, mkey->key);
/* connect to mkey tree */
write_lock_irq(&table->lock);
@@ -104,20 +99,25 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
return err;
}
+EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
+
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen)
+{
+ return mlx5_core_create_mkey_cb(dev, mkey, in, inlen,
+ NULL, 0, NULL, NULL);
+}
EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey)
{
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
- struct mlx5_destroy_mkey_mbox_in in;
- struct mlx5_destroy_mkey_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
struct mlx5_core_mkey *deleted_mkey;
unsigned long flags;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
write_lock_irqsave(&table->lock, flags);
deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
@@ -128,94 +128,71 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
return -ENOENT;
}
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
- in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- struct mlx5_query_mkey_mbox_out *out, int outlen)
+ u32 *out, int outlen)
{
- struct mlx5_query_mkey_mbox_in in;
- int err;
+ u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0};
- memset(&in, 0, sizeof(in));
memset(out, 0, outlen);
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
- in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
- if (err)
- return err;
-
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
+ MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey)
{
- struct mlx5_query_special_ctxs_mbox_in in;
- struct mlx5_query_special_ctxs_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- *mkey = be32_to_cpu(out.dump_fill_mkey);
-
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *mkey = MLX5_GET(query_special_contexts_out, out,
+ dump_fill_mkey);
return err;
}
EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
+static inline u32 mlx5_get_psv(u32 *out, int psv_index)
+{
+ switch (psv_index) {
+ case 1: return MLX5_GET(create_psv_out, out, psv1_index);
+ case 2: return MLX5_GET(create_psv_out, out, psv2_index);
+ case 3: return MLX5_GET(create_psv_out, out, psv3_index);
+ default: return MLX5_GET(create_psv_out, out, psv0_index);
+ }
+}
+
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index)
{
- struct mlx5_allocate_psv_in in;
- struct mlx5_allocate_psv_out out;
+ u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {0};
int i, err;
if (npsvs > MLX5_MAX_PSVS)
return -EINVAL;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ MLX5_SET(create_psv_in, in, opcode, MLX5_CMD_OP_CREATE_PSV);
+ MLX5_SET(create_psv_in, in, pd, pdn);
+ MLX5_SET(create_psv_in, in, num_psv, npsvs);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV);
- in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err) {
- mlx5_core_err(dev, "cmd exec failed %d\n", err);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
return err;
- }
-
- if (out.hdr.status) {
- mlx5_core_err(dev, "create_psv bad status %d\n",
- out.hdr.status);
- return mlx5_cmd_status_to_err(&out.hdr);
- }
for (i = 0; i < npsvs; i++)
- sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff;
+ sig_index[i] = mlx5_get_psv(out, i);
return err;
}
@@ -223,29 +200,11 @@ EXPORT_SYMBOL(mlx5_core_create_psv);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
{
- struct mlx5_destroy_psv_in in;
- struct mlx5_destroy_psv_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {0};
- in.psv_number = cpu_to_be32(psv_num);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err) {
- mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err);
- goto out;
- }
-
- if (out.hdr.status) {
- mlx5_core_err(dev, "destroy_psv bad status %d\n",
- out.hdr.status);
- err = mlx5_cmd_status_to_err(&out.hdr);
- goto out;
- }
-
-out:
- return err;
+ MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV);
+ MLX5_SET(destroy_psv_in, in, psvn, psv_num);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_psv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 32dea3524cee..d4585154151d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -44,12 +44,6 @@ enum {
MLX5_PAGES_TAKE = 2
};
-enum {
- MLX5_BOOT_PAGES = 1,
- MLX5_INIT_PAGES = 2,
- MLX5_POST_INIT_PAGES = 3
-};
-
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u16 func_id;
@@ -67,33 +61,6 @@ struct fw_page {
unsigned free_count;
};
-struct mlx5_query_pages_inbox {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_pages_outbox {
- struct mlx5_outbox_hdr hdr;
- __be16 rsvd;
- __be16 func_id;
- __be32 num_pages;
-};
-
-struct mlx5_manage_pages_inbox {
- struct mlx5_inbox_hdr hdr;
- __be16 rsvd;
- __be16 func_id;
- __be32 num_entries;
- __be64 pas[0];
-};
-
-struct mlx5_manage_pages_outbox {
- struct mlx5_outbox_hdr hdr;
- __be32 num_entries;
- u8 rsvd[4];
- __be64 pas[0];
-};
-
enum {
MAX_RECLAIM_TIME_MSECS = 5000,
MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
@@ -167,24 +134,21 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s32 *npages, int boot)
{
- struct mlx5_query_pages_inbox in;
- struct mlx5_query_pages_outbox out;
+ u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
- in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
+ MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
+ MLX5_SET(query_pages_in, in, op_mod, boot ?
+ MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
+ MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- *npages = be32_to_cpu(out.num_pages);
- *func_id = be16_to_cpu(out.func_id);
+ *npages = MLX5_GET(query_pages_out, out, num_pages);
+ *func_id = MLX5_GET(query_pages_out, out, function_id);
return err;
}
@@ -280,46 +244,37 @@ out_alloc:
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
{
- struct mlx5_manage_pages_inbox *in;
- struct mlx5_manage_pages_outbox out;
+ u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
int err;
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return;
-
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
- in->func_id = cpu_to_be16(func_id);
- err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
- if (!err)
- err = mlx5_cmd_status_to_err(&out.hdr);
+ MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+ MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
+ MLX5_SET(manage_pages_in, in, function_id, func_id);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
- mlx5_core_warn(dev, "page notify failed\n");
-
- kfree(in);
+ mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
+ func_id, err);
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
{
- struct mlx5_manage_pages_inbox *in;
- struct mlx5_manage_pages_outbox out;
- int inlen;
+ u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
int err;
+ u32 *in;
int i;
- inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
+ inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
goto out_free;
}
- memset(&out, 0, sizeof(out));
for (i = 0; i < npages; i++) {
retry:
@@ -332,27 +287,21 @@ retry:
goto retry;
}
- in->pas[i] = cpu_to_be64(addr);
+ MLX5_SET64(manage_pages_in, in, pas[i], addr);
}
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
- in->func_id = cpu_to_be16(func_id);
- in->num_entries = cpu_to_be32(npages);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+ MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
+ MLX5_SET(manage_pages_in, in, function_id, func_id);
+ MLX5_SET(manage_pages_in, in, input_num_entries, npages);
+
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
goto out_4k;
}
- err = mlx5_cmd_status_to_err(&out.hdr);
- if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
- func_id, npages, out.hdr.status);
- goto out_4k;
- }
-
dev->priv.fw_pages += npages;
if (func_id)
dev->priv.vfs_pages += npages;
@@ -364,7 +313,7 @@ retry:
out_4k:
for (i--; i >= 0; i--)
- free_4k(dev, be64_to_cpu(in->pas[i]));
+ free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
out_free:
kvfree(in);
if (notify_fail)
@@ -373,64 +322,67 @@ out_free:
}
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
- struct mlx5_manage_pages_inbox *in, int in_size,
- struct mlx5_manage_pages_outbox *out, int out_size)
+ u32 *in, int in_size, u32 *out, int out_size)
{
struct fw_page *fwp;
struct rb_node *p;
+ u32 func_id;
u32 npages;
u32 i = 0;
if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
- return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
- (u32 *)out, out_size);
+ return mlx5_cmd_exec(dev, in, in_size, out, out_size);
- npages = be32_to_cpu(in->num_entries);
+ /* No hard feelings, we want our pages back! */
+ npages = MLX5_GET(manage_pages_in, in, input_num_entries);
+ func_id = MLX5_GET(manage_pages_in, in, function_id);
p = rb_first(&dev->priv.page_root);
while (p && i < npages) {
fwp = rb_entry(p, struct fw_page, rb_node);
- out->pas[i] = cpu_to_be64(fwp->addr);
p = rb_next(p);
+ if (fwp->func_id != func_id)
+ continue;
+
+ MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
i++;
}
- out->num_entries = cpu_to_be32(i);
+ MLX5_SET(manage_pages_out, out, output_num_entries, i);
return 0;
}
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
- struct mlx5_manage_pages_inbox in;
- struct mlx5_manage_pages_outbox *out;
+ int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
int num_claimed;
- int outlen;
- u64 addr;
+ u32 *out;
int err;
int i;
if (nclaimed)
*nclaimed = 0;
- memset(&in, 0, sizeof(in));
- outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
+ outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
out = mlx5_vzalloc(outlen);
if (!out)
return -ENOMEM;
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
- in.func_id = cpu_to_be16(func_id);
- in.num_entries = cpu_to_be32(npages);
+ MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+ MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
+ MLX5_SET(manage_pages_in, in, function_id, func_id);
+ MLX5_SET(manage_pages_in, in, input_num_entries, npages);
+
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
- err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
+ err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
if (err) {
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
- num_claimed = be32_to_cpu(out->num_entries);
+ num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
if (num_claimed > npages) {
mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
num_claimed, npages);
@@ -438,10 +390,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
goto out_free;
}
- for (i = 0; i < num_claimed; i++) {
- addr = be64_to_cpu(out->pas[i]);
- free_4k(dev, addr);
- }
+ for (i = 0; i < num_claimed; i++)
+ free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
+
if (nclaimed)
*nclaimed = num_claimed;
@@ -518,8 +469,8 @@ static int optimal_reclaimed_pages(void)
int ret;
ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
- sizeof(struct mlx5_manage_pages_outbox)) /
- FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
+ MLX5_ST_SZ_BYTES(manage_pages_out)) /
+ MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
return ret;
}
@@ -594,6 +545,12 @@ int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
int prev_vfs_pages = dev->priv.vfs_pages;
+ /* In case of internal error we will free the pages manually later */
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx5_core_warn(dev, "Skipping wait for vf pages stage");
+ return 0;
+ }
+
mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
dev->priv.name);
while (dev->priv.vfs_pages) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
index f2d3aee909e8..bd830d8d6c5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -36,66 +36,27 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-struct mlx5_alloc_pd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_pd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 pdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_pd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 pdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_pd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
{
- struct mlx5_alloc_pd_mbox_in in;
- struct mlx5_alloc_pd_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- *pdn = be32_to_cpu(out.pdn) & 0xffffff;
+ MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *pdn = MLX5_GET(alloc_pd_out, out, pd);
return err;
}
EXPORT_SYMBOL(mlx5_core_alloc_pd);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
{
- struct mlx5_dealloc_pd_mbox_in in;
- struct mlx5_dealloc_pd_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD);
- in.pdn = cpu_to_be32(pdn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
+ u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {0};
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, in, pd, pdn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_dealloc_pd);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 752c08127138..34e7184e23c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -38,45 +38,42 @@
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
- u16 reg_num, int arg, int write)
+ u16 reg_id, int arg, int write)
{
- struct mlx5_access_reg_mbox_in *in = NULL;
- struct mlx5_access_reg_mbox_out *out = NULL;
+ int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
+ int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
int err = -ENOMEM;
+ u32 *out = NULL;
+ u32 *in = NULL;
+ void *data;
- in = mlx5_vzalloc(sizeof(*in) + size_in);
- if (!in)
- return -ENOMEM;
-
- out = mlx5_vzalloc(sizeof(*out) + size_out);
- if (!out)
- goto ex1;
-
- memcpy(in->data, data_in, size_in);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG);
- in->hdr.opmod = cpu_to_be16(!write);
- in->arg = cpu_to_be32(arg);
- in->register_id = cpu_to_be16(reg_num);
- err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
- sizeof(*out) + size_out);
- if (err)
- goto ex2;
+ in = mlx5_vzalloc(inlen);
+ out = mlx5_vzalloc(outlen);
+ if (!in || !out)
+ goto out;
- if (out->hdr.status)
- err = mlx5_cmd_status_to_err(&out->hdr);
+ data = MLX5_ADDR_OF(access_register_in, in, register_data);
+ memcpy(data, data_in, size_in);
- if (!err)
- memcpy(data_out, out->data, size_out);
+ MLX5_SET(access_register_in, in, opcode, MLX5_CMD_OP_ACCESS_REG);
+ MLX5_SET(access_register_in, in, op_mod, !write);
+ MLX5_SET(access_register_in, in, argument, arg);
+ MLX5_SET(access_register_in, in, register_id, reg_id);
+
+ err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ if (err)
+ goto out;
+
+ data = MLX5_ADDR_OF(access_register_out, out, register_data);
+ memcpy(data_out, data, size_out);
-ex2:
+out:
kvfree(out);
-ex1:
kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
-
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
@@ -104,12 +101,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port)
{
- u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, local_port);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
-
return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
ptys_size, MLX5_REG_PTYS, 0, 0);
}
@@ -117,13 +112,11 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
{
+ u32 in[MLX5_ST_SZ_DW(mlcr_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(mlcr_reg)];
- u32 in[MLX5_ST_SZ_DW(mlcr_reg)];
- memset(in, 0, sizeof(in));
MLX5_SET(mlcr_reg, in, local_port, 1);
MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration);
-
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MLCR, 0, 1);
}
@@ -182,25 +175,39 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
-int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, int proto_mask,
- u8 local_port)
+int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
+ u32 *proto_oper, u8 local_port)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
- err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port);
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN,
+ local_port);
if (err)
return err;
- if (proto_mask == MLX5_PTYS_EN)
- *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
- else
- *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
+ *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper);
+
+int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB,
+ local_port);
+ if (err)
+ return err;
+
+ *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
+EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper);
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, int proto_mask)
@@ -246,15 +253,12 @@ EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
{
- u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(paos_reg)];
- memset(in, 0, sizeof(in));
-
MLX5_SET(paos_reg, in, local_port, 1);
MLX5_SET(paos_reg, in, admin_status, status);
MLX5_SET(paos_reg, in, ase, 1);
-
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
@@ -263,19 +267,15 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status)
{
- u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(paos_reg)];
int err;
- memset(in, 0, sizeof(in));
-
MLX5_SET(paos_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 0);
if (err)
return err;
-
*status = MLX5_GET(paos_reg, out, admin_status);
return 0;
}
@@ -284,13 +284,10 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
u16 *max_mtu, u16 *oper_mtu, u8 port)
{
- u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
- memset(in, 0, sizeof(in));
-
MLX5_SET(pmtu_reg, in, local_port, port);
-
mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 0);
@@ -304,14 +301,11 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
{
- u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
- memset(in, 0, sizeof(in));
-
MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
MLX5_SET(pmtu_reg, in, local_port, port);
-
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 1);
}
@@ -333,15 +327,12 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
{
+ u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
- u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
int module_mapping;
int err;
- memset(in, 0, sizeof(in));
-
MLX5_SET(pmlp_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_PMLP, 0, 0);
if (err)
@@ -410,11 +401,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
{
- u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pvlc_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(pvlc_reg, in, local_port, local_port);
-
return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
pvlc_size, MLX5_REG_PVLC, 0, 0);
}
@@ -460,10 +449,9 @@ EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
MLX5_SET(pfcc_reg, in, pptx, tx_pause);
MLX5_SET(pfcc_reg, in, pprx, rx_pause);
@@ -476,13 +464,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
int err;
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 0);
if (err)
@@ -500,10 +486,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx);
MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx);
@@ -517,13 +502,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
int err;
- memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
-
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 0);
if (err)
@@ -567,12 +550,11 @@ int mlx5_max_tc(struct mlx5_core_dev *mdev)
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
{
- u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+ u32 in[MLX5_ST_SZ_DW(qtct_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(qtct_reg)];
int err;
int i;
- memset(in, 0, sizeof(in));
for (i = 0; i < 8; i++) {
if (prio_tc[i] > mlx5_max_tc(mdev))
return -EINVAL;
@@ -617,11 +599,9 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
{
- u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
int i;
- memset(in, 0, sizeof(in));
-
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1);
MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]);
@@ -633,11 +613,9 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
- u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
int i;
- memset(in, 0, sizeof(in));
-
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1);
MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]);
@@ -651,12 +629,10 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
u8 *max_bw_units)
{
- u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
void *ets_tcn_conf;
int i;
- memset(in, 0, sizeof(in));
-
MLX5_SET(qetc_reg, in, port_number, 1);
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
@@ -701,35 +677,24 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)];
- u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0};
MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
-
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)];
- u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)];
+ u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
-
- err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
- out, sizeof(out));
-
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
*wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
@@ -740,11 +705,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
int outlen)
{
- u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(pcmr_reg, in, local_port, 1);
-
return mlx5_core_access_reg(mdev, in, sizeof(in), out,
outlen, MLX5_REG_PCMR, 0, 0);
}
@@ -759,12 +722,10 @@ static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
{
- u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
- memset(in, 0, sizeof(in));
MLX5_SET(pcmr_reg, in, local_port, 1);
MLX5_SET(pcmr_reg, in, fcs_chk, enable);
-
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index b82d65802d96..d0a4005fe63a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -271,30 +271,20 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
- struct mlx5_create_qp_mbox_in *in,
- int inlen)
+ u32 *in, int inlen)
{
- struct mlx5_create_qp_mbox_out out;
- struct mlx5_destroy_qp_mbox_in din;
- struct mlx5_destroy_qp_mbox_out dout;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
+ u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
+ u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
int err;
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- if (err) {
- mlx5_core_warn(dev, "ret %d\n", err);
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ if (err)
return err;
- }
-
- if (out.hdr.status) {
- mlx5_core_warn(dev, "current num of QPs 0x%x\n",
- atomic_read(&dev->num_qps));
- return mlx5_cmd_status_to_err(&out.hdr);
- }
- qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
+ qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
@@ -311,12 +301,11 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
return 0;
err_cmd:
- memset(&din, 0, sizeof(din));
- memset(&dout, 0, sizeof(dout));
- din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
- din.qpn = cpu_to_be32(qp->qpn);
- mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
-
+ memset(din, 0, sizeof(din));
+ memset(dout, 0, sizeof(dout));
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
@@ -324,45 +313,145 @@ EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp)
{
- struct mlx5_destroy_qp_mbox_in in;
- struct mlx5_destroy_qp_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
int err;
mlx5_debug_qp_remove(dev, qp);
destroy_qprqsq_common(dev, qp);
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
- in.qpn = cpu_to_be32(qp->qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
atomic_dec(&dev->num_qps);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
- struct mlx5_modify_qp_mbox_in *in, int sqd_event,
+struct mbox_info {
+ u32 *in;
+ u32 *out;
+ int inlen;
+ int outlen;
+};
+
+static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
+{
+ mbox->inlen = inlen;
+ mbox->outlen = outlen;
+ mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
+ mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
+ if (!mbox->in || !mbox->out) {
+ kfree(mbox->in);
+ kfree(mbox->out);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mbox_free(struct mbox_info *mbox)
+{
+ kfree(mbox->in);
+ kfree(mbox->out);
+}
+
+static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
+ u32 opt_param_mask, void *qpc,
+ struct mbox_info *mbox)
+{
+ mbox->out = NULL;
+ mbox->in = NULL;
+
+#define MBOX_ALLOC(mbox, typ) \
+ mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
+
+#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
+ MLX5_SET(typ##_in, in, opcode, _opcode); \
+ MLX5_SET(typ##_in, in, qpn, _qpn)
+
+#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
+ MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
+ MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
+ memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
+
+ switch (opcode) {
+ /* 2RST & 2ERR */
+ case MLX5_CMD_OP_2RST_QP:
+ if (MBOX_ALLOC(mbox, qp_2rst))
+ return -ENOMEM;
+ MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ if (MBOX_ALLOC(mbox, qp_2err))
+ return -ENOMEM;
+ MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
+ break;
+
+ /* MODIFY with QPC */
+ case MLX5_CMD_OP_RST2INIT_QP:
+ if (MBOX_ALLOC(mbox, rst2init_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ if (MBOX_ALLOC(mbox, init2rtr_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ if (MBOX_ALLOC(mbox, rtr2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ if (MBOX_ALLOC(mbox, rts2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ if (MBOX_ALLOC(mbox, sqerr2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ if (MBOX_ALLOC(mbox, init2init_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
+ default:
+ mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
+ opcode, qpn);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
+ u32 opt_param_mask, void *qpc,
struct mlx5_core_qp *qp)
{
- struct mlx5_modify_qp_mbox_out out;
- int err = 0;
+ struct mbox_info mbox;
+ int err;
- memset(&out, 0, sizeof(out));
- in->hdr.opcode = cpu_to_be16(operation);
- in->qpn = cpu_to_be32(qp->qpn);
- err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
+ opt_param_mask, qpc, &mbox);
if (err)
return err;
- return mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
+ mbox_free(&mbox);
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
@@ -382,66 +471,38 @@ void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
}
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- struct mlx5_query_qp_mbox_out *out, int outlen)
+ u32 *out, int outlen)
{
- struct mlx5_query_qp_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, outlen);
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
- in.qpn = cpu_to_be32(qp->qpn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
- if (err)
- return err;
+ u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
+ MLX5_SET(query_qp_in, in, qpn, qp->qpn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
{
- struct mlx5_alloc_xrcd_mbox_in in;
- struct mlx5_alloc_xrcd_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
- else
- *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
-
+ MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
{
- struct mlx5_dealloc_xrcd_mbox_in in;
- struct mlx5_dealloc_xrcd_mbox_out out;
- int err;
+ u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
- in.xrcdn = cpu_to_be32(xrcdn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
@@ -449,28 +510,23 @@ EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
u8 flags, int error)
{
- struct mlx5_page_fault_resume_mbox_in in;
- struct mlx5_page_fault_resume_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
- in.hdr.opmod = 0;
- flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
- MLX5_PAGE_FAULT_RESUME_WRITE |
- MLX5_PAGE_FAULT_RESUME_RDMA);
- flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
- in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
- (flags << MLX5_QPN_BITS));
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
+
+ MLX5_SET(page_fault_resume_in, in, opcode,
+ MLX5_CMD_OP_PAGE_FAULT_RESUME);
+ MLX5_SET(page_fault_resume_in, in, qpn, qpn);
+
+ if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR)
+ MLX5_SET(page_fault_resume_in, in, req_res, 1);
+ if (flags & MLX5_PAGE_FAULT_RESUME_WRITE)
+ MLX5_SET(page_fault_resume_in, in, read_write, 1);
+ if (flags & MLX5_PAGE_FAULT_RESUME_RDMA)
+ MLX5_SET(page_fault_resume_in, in, rdma, 1);
+ if (error)
+ MLX5_SET(page_fault_resume_in, in, error, 1);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
#endif
@@ -541,15 +597,12 @@ EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
{
- u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
- u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*counter_id = MLX5_GET(alloc_q_counter_out, out,
counter_set_id);
@@ -559,31 +612,25 @@ EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
int reset, void *out, int out_size)
{
- u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
MLX5_SET(query_q_counter_in, in, clear, reset);
MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
}
EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index c07c28bd3d55..104902a93a0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -63,19 +63,14 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
u32 rate, u16 index)
{
- u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)];
- u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
MLX5_SET(set_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_RATE_LIMIT);
MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index b380a6bc1f85..e08627785590 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -37,198 +37,200 @@
#include "eswitch.h"
#endif
-static void enable_vfs(struct mlx5_core_dev *dev, int num_vfs)
+bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+
+ return !!sriov->num_vfs;
+}
+
+static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err;
int vf;
- for (vf = 1; vf <= num_vfs; vf++) {
- err = mlx5_core_enable_hca(dev, vf);
+ if (sriov->enabled_vfs) {
+ mlx5_core_warn(dev,
+ "failed to enable SRIOV on device, already enabled with %d vfs\n",
+ sriov->enabled_vfs);
+ return -EBUSY;
+ }
+
+#ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
+ if (err) {
+ mlx5_core_warn(dev,
+ "failed to enable eswitch SRIOV (%d)\n", err);
+ return err;
+ }
+#endif
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ err = mlx5_core_enable_hca(dev, vf + 1);
if (err) {
- mlx5_core_warn(dev, "failed to enable VF %d\n", vf - 1);
- } else {
- sriov->vfs_ctx[vf - 1].enabled = 1;
- mlx5_core_dbg(dev, "successfully enabled VF %d\n", vf - 1);
+ mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
+ continue;
}
+ sriov->vfs_ctx[vf].enabled = 1;
+ sriov->enabled_vfs++;
+ mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
+
}
+
+ return 0;
}
-static void disable_vfs(struct mlx5_core_dev *dev, int num_vfs)
+static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+ int err;
int vf;
- for (vf = 1; vf <= num_vfs; vf++) {
- if (sriov->vfs_ctx[vf - 1].enabled) {
- if (mlx5_core_disable_hca(dev, vf))
- mlx5_core_warn(dev, "failed to disable VF %d\n", vf - 1);
- else
- sriov->vfs_ctx[vf - 1].enabled = 0;
+ if (!sriov->enabled_vfs)
+ return;
+
+ for (vf = 0; vf < sriov->num_vfs; vf++) {
+ if (!sriov->vfs_ctx[vf].enabled)
+ continue;
+ err = mlx5_core_disable_hca(dev, vf + 1);
+ if (err) {
+ mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
+ continue;
}
+ sriov->vfs_ctx[vf].enabled = 0;
+ sriov->enabled_vfs--;
}
+
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+#endif
+
+ if (mlx5_wait_for_vf_pages(dev))
+ mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
-static int mlx5_core_create_vfs(struct pci_dev *pdev, int num_vfs)
+static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- int err;
-
- if (pci_num_vf(pdev))
- pci_disable_sriov(pdev);
+ int err = 0;
- enable_vfs(dev, num_vfs);
-
- err = pci_enable_sriov(pdev, num_vfs);
- if (err) {
- dev_warn(&pdev->dev, "enable sriov failed %d\n", err);
- goto ex;
+ if (pci_num_vf(pdev)) {
+ mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n");
+ return -EBUSY;
}
- return 0;
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err)
+ mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
-ex:
- disable_vfs(dev, num_vfs);
return err;
}
-static int mlx5_core_sriov_enable(struct pci_dev *pdev, int num_vfs)
+static void mlx5_pci_disable_sriov(struct pci_dev *pdev)
+{
+ pci_disable_sriov(pdev);
+}
+
+static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int err;
+ int err = 0;
- kfree(sriov->vfs_ctx);
- sriov->vfs_ctx = kcalloc(num_vfs, sizeof(*sriov->vfs_ctx), GFP_ATOMIC);
- if (!sriov->vfs_ctx)
- return -ENOMEM;
+ err = mlx5_device_enable_sriov(dev, num_vfs);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
+ return err;
+ }
- sriov->enabled_vfs = num_vfs;
- err = mlx5_core_create_vfs(pdev, num_vfs);
+ err = mlx5_pci_enable_sriov(pdev, num_vfs);
if (err) {
- kfree(sriov->vfs_ctx);
- sriov->vfs_ctx = NULL;
+ mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err);
+ mlx5_device_disable_sriov(dev);
return err;
}
+ sriov->num_vfs = num_vfs;
+
return 0;
}
-static void mlx5_core_init_vfs(struct mlx5_core_dev *dev, int num_vfs)
+static void mlx5_sriov_disable(struct pci_dev *pdev)
{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- sriov->num_vfs = num_vfs;
-}
-
-static void mlx5_core_cleanup_vfs(struct mlx5_core_dev *dev)
-{
- struct mlx5_core_sriov *sriov;
-
- sriov = &dev->priv.sriov;
- disable_vfs(dev, sriov->num_vfs);
-
- if (mlx5_wait_for_vf_pages(dev))
- mlx5_core_warn(dev, "timeout claiming VFs pages\n");
-
+ mlx5_pci_disable_sriov(pdev);
+ mlx5_device_disable_sriov(dev);
sriov->num_vfs = 0;
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int err;
+ int err = 0;
mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
if (!mlx5_core_is_pf(dev))
return -EPERM;
- mlx5_core_cleanup_vfs(dev);
-
- if (!num_vfs) {
-#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_disable_sriov(dev->priv.eswitch);
-#endif
- kfree(sriov->vfs_ctx);
- sriov->vfs_ctx = NULL;
- if (!pci_vfs_assigned(pdev))
- pci_disable_sriov(pdev);
- else
- pr_info("unloading PF driver while leaving orphan VFs\n");
- return 0;
+ if (num_vfs && mlx5_lag_is_active(dev)) {
+ mlx5_core_warn(dev, "can't turn sriov on while LAG is active");
+ return -EINVAL;
}
- err = mlx5_core_sriov_enable(pdev, num_vfs);
- if (err) {
- dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err);
- return err;
- }
+ if (num_vfs)
+ err = mlx5_sriov_enable(pdev, num_vfs);
+ else
+ mlx5_sriov_disable(pdev);
- mlx5_core_init_vfs(dev, num_vfs);
-#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
-#endif
-
- return num_vfs;
+ return err ? err : num_vfs;
}
-static int sync_required(struct pci_dev *pdev)
+int mlx5_sriov_attach(struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int cur_vfs = pci_num_vf(pdev);
- if (cur_vfs != sriov->num_vfs) {
- pr_info("current VFs %d, registered %d - sync needed\n", cur_vfs, sriov->num_vfs);
- return 1;
- }
+ if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
+ return 0;
- return 0;
+ /* If sriov VFs exist in PCI level, enable them in device level */
+ return mlx5_device_enable_sriov(dev, sriov->num_vfs);
+}
+
+void mlx5_sriov_detach(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_is_pf(dev))
+ return;
+
+ mlx5_device_disable_sriov(dev);
}
int mlx5_sriov_init(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
struct pci_dev *pdev = dev->pdev;
- int cur_vfs;
+ int total_vfs;
if (!mlx5_core_is_pf(dev))
return 0;
- if (!sync_required(dev->pdev))
- return 0;
-
- cur_vfs = pci_num_vf(pdev);
- sriov->vfs_ctx = kcalloc(cur_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+ sriov->num_vfs = pci_num_vf(pdev);
+ sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
if (!sriov->vfs_ctx)
return -ENOMEM;
- sriov->enabled_vfs = cur_vfs;
-
- mlx5_core_init_vfs(dev, cur_vfs);
-#ifdef CONFIG_MLX5_CORE_EN
- if (cur_vfs)
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs,
- SRIOV_LEGACY);
-#endif
-
- enable_vfs(dev, cur_vfs);
-
return 0;
}
-int mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
+void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
{
- struct pci_dev *pdev = dev->pdev;
- int err;
+ struct mlx5_core_sriov *sriov = &dev->priv.sriov;
if (!mlx5_core_is_pf(dev))
- return 0;
+ return;
- err = mlx5_core_sriov_configure(pdev, 0);
- if (err)
- return err;
-
- return 0;
+ kfree(sriov->vfs_ctx);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index c07f4d01b70e..3099630015d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -175,8 +175,8 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(create_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_SRQ);
- err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
- sizeof(create_out));
+ err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
+ sizeof(create_out));
kvfree(create_in);
if (!err)
srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
@@ -194,8 +194,8 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_DESTROY_SRQ);
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
- return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
- srq_out, sizeof(srq_out));
+ return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
@@ -209,8 +209,8 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
- return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
- srq_out, sizeof(srq_out));
+ return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
@@ -228,9 +228,8 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_srq_in, srq_in, opcode,
MLX5_CMD_OP_QUERY_SRQ);
MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
- err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
- srq_out,
- MLX5_ST_SZ_BYTES(query_srq_out));
+ err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+ srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
if (err)
goto out;
@@ -272,8 +271,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_XRC_SRQ);
memset(create_out, 0, sizeof(create_out));
- err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
- sizeof(create_out));
+ err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
+ sizeof(create_out));
if (err)
goto out;
@@ -286,36 +285,30 @@ out:
static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
- u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
-
- memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
- memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
}
static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq, u16 lwm)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
- u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
-
- memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
- memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
- return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
}
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
@@ -335,9 +328,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_QUERY_XRC_SRQ);
MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out,
- MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+
+ err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 28274a6fbafe..a00ff49eec18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -36,17 +36,14 @@
int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
{
- u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
- u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
@@ -57,29 +54,23 @@ EXPORT_SYMBOL(mlx5_core_alloc_transport_domain);
void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
{
- u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
int err;
MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rqn = MLX5_GET(create_rq_out, out, rqn);
@@ -95,21 +86,18 @@ int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_rq);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rq);
@@ -121,19 +109,17 @@ int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
MLX5_SET(query_rq_in, in, rqn, rqn);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_rq);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
{
- u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
int err;
MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*sqn = MLX5_GET(create_sq_out, out, sqn);
@@ -142,27 +128,22 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
MLX5_SET(modify_sq_in, in, sqn, sqn);
MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_sq);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
@@ -172,21 +153,20 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
MLX5_SET(query_sq_in, in, sqn, sqn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_sq);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn)
{
- u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
int err;
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
@@ -197,39 +177,32 @@ EXPORT_SYMBOL(mlx5_core_create_tir);
int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_tir_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0};
MLX5_SET(modify_tir_in, in, tirn, tirn);
MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_tir);
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tisn)
{
- u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
int err;
MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*tisn = MLX5_GET(create_tis_out, out, tisn);
@@ -245,34 +218,29 @@ int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
MLX5_SET(modify_tis_in, in, tisn, tisn);
MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS);
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_tis);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_tis);
int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rmpn)
{
- u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
+ u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0};
int err;
MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rmpn = MLX5_GET(create_rmp_out, out, rmpn);
@@ -281,38 +249,31 @@ int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0};
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0};
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ return mlx5_cmd_exec(dev, in, sizeof(in), out,
sizeof(out));
}
int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
+ u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0};
int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
- memset(in, 0, sizeof(in));
MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
MLX5_SET(query_rmp_in, in, rmpn, rmpn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
@@ -347,13 +308,11 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *xsrqn)
{
- u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0};
int err;
MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
@@ -362,33 +321,25 @@ int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0};
void *srqc;
void *xrc_srqc;
int err;
- memset(in, 0, sizeof(in));
MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
-
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
- out,
- MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
if (!err) {
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
xrc_srq_context_entry);
@@ -401,32 +352,25 @@ int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
{
- u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
- u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
MLX5_SET(arm_xrc_srq_in, in, op_mod,
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
-
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn)
{
- u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
int err;
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
@@ -437,25 +381,20 @@ EXPORT_SYMBOL(mlx5_core_create_rqt);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
-
- mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 5ff8af472bf5..ab0b896621a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -42,73 +42,28 @@ enum {
NUM_LOW_LAT_UUARS = 4,
};
-
-struct mlx5_alloc_uar_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_uar_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 uarn;
- u8 rsvd[4];
-};
-
-struct mlx5_free_uar_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 uarn;
- u8 rsvd[4];
-};
-
-struct mlx5_free_uar_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
- struct mlx5_alloc_uar_mbox_in in;
- struct mlx5_alloc_uar_mbox_out out;
+ u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0};
int err;
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- goto ex;
-
- if (out.hdr.status) {
- err = mlx5_cmd_status_to_err(&out.hdr);
- goto ex;
- }
-
- *uarn = be32_to_cpu(out.uarn) & 0xffffff;
-
-ex:
+ MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *uarn = MLX5_GET(alloc_uar_out, out, uar);
return err;
}
EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
- struct mlx5_free_uar_mbox_in in;
- struct mlx5_free_uar_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR);
- in.uarn = cpu_to_be32(uarn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- goto ex;
+ u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0};
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
-ex:
- return err;
+ MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
+ MLX5_SET(dealloc_uar_in, in, uar, uarn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 21365d06982b..525f17af108e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -39,10 +39,7 @@
static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u32 *out, int outlen)
{
- int err;
- u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
MLX5_SET(query_vport_state_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_STATE);
@@ -51,11 +48,7 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
if (vport)
MLX5_SET(query_vport_state_in, in, other_vport, 1);
- err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
- if (err)
- mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
-
- return err;
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
@@ -81,58 +74,43 @@ EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state)
{
- u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
- u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
- int err;
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
MLX5_SET(modify_vport_state_in, in, opcode,
MLX5_CMD_OP_MODIFY_VPORT_STATE);
MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
MLX5_SET(modify_vport_state_in, in, vport_number, vport);
-
if (vport)
MLX5_SET(modify_vport_state_in, in, other_vport, 1);
-
MLX5_SET(modify_vport_state_in, in, admin_state, state);
- err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
- sizeof(out));
- if (err)
- mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
-
- return err;
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
-
- memset(in, 0, sizeof(in));
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
-
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
MLX5_SET(modify_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
-
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
}
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
@@ -147,6 +125,26 @@ void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
+int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 min_inline)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *nic_vport_ctx;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.min_inline, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+ nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ min_wqe_inline_mode, min_inline);
+
+ return mlx5_modify_nic_vport_context(mdev, in, inlen);
+}
+
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr)
{
@@ -254,7 +252,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int *list_size)
{
- u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
void *nic_vport_ctx;
int max_list_size;
int req_list_size;
@@ -278,7 +276,6 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
- memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -291,7 +288,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto out;
@@ -361,7 +358,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
ether_addr_copy(curr_mac, addr_list[i]);
}
- err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
kfree(in);
return err;
}
@@ -406,7 +403,7 @@ int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto out;
@@ -473,7 +470,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
}
- err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
kfree(in);
return err;
}
@@ -631,10 +628,6 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
if (err)
goto out;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto out;
-
tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
gid->global.subnet_prefix = tmp->global.subnet_prefix;
gid->global.interface_id = tmp->global.interface_id;
@@ -700,10 +693,6 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
if (err)
goto out;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto out;
-
pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
@@ -721,7 +710,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *rep)
{
int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
- int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
+ int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
int is_group_manager;
void *out;
void *ctx;
@@ -729,7 +718,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
- memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -752,9 +740,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto ex;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err)
- goto ex;
ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
@@ -969,10 +954,6 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
MLX5_SET(query_vport_counter_in, in, port_num, port_num);
err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
- if (err)
- goto free;
- err = mlx5_cmd_status_to_err_v2(out);
-
free:
kvfree(in);
return err;
@@ -1035,11 +1016,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
- if (err)
- goto ex;
-
- err = mlx5_cmd_status_to_err_v2(out);
-
ex:
kfree(in);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index e25a73ed2981..07a9ba6cfc70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -46,41 +46,24 @@ void mlx5e_vxlan_init(struct mlx5e_priv *priv)
static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- struct mlx5_outbox_hdr *hdr;
- int err;
-
- u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)];
- u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
-
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- hdr = (struct mlx5_outbox_hdr *)out;
- return hdr->status ? -ENOMEM : 0;
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
- u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
-
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
- sizeof(out));
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 068ee65a960b..aa33d58b9f81 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1100,10 +1100,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_alloc_stats;
}
- if (mlxsw_driver->profile->used_max_lag &&
- mlxsw_driver->profile->used_max_port_per_lag) {
- alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
- mlxsw_driver->profile->max_port_per_lag;
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->resources);
+ if (err)
+ goto err_bus_init;
+
+ if (mlxsw_core->resources.max_lag_valid &&
+ mlxsw_core->resources.max_ports_in_lag_valid) {
+ alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag *
+ mlxsw_core->resources.max_ports_in_lag;
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
err = -ENOMEM;
@@ -1111,11 +1116,6 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
}
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
- &mlxsw_core->resources);
- if (err)
- goto err_bus_init;
-
err = mlxsw_emad_init(mlxsw_core);
if (err)
goto err_emad_init;
@@ -1146,10 +1146,10 @@ err_hwmon_init:
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
- mlxsw_bus->fini(bus_priv);
-err_bus_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
free_percpu(mlxsw_core->pcpu_stats);
err_alloc_stats:
devlink_free(devlink);
@@ -1615,7 +1615,7 @@ EXPORT_SYMBOL(mlxsw_core_skb_receive);
static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index)
{
- return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
+ return mlxsw_core->resources.max_ports_in_lag * lag_id +
port_index;
}
@@ -1644,7 +1644,7 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
{
int i;
- for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
+ for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) {
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, i);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index d3476ead9982..c4f550b6f783 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -87,6 +87,7 @@ struct mlxsw_rx_listener {
void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
u8 local_port;
u16 trap_id;
+ enum mlxsw_reg_hpkt_action action;
};
struct mlxsw_event_listener {
@@ -178,8 +179,6 @@ struct mlxsw_swid_config {
struct mlxsw_config_profile {
u16 used_max_vepa_channels:1,
- used_max_lag:1,
- used_max_port_per_lag:1,
used_max_mid:1,
used_max_pgt:1,
used_max_system_port:1,
@@ -191,10 +190,9 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
- used_kvd_sizes:1;
+ used_kvd_split_data:1; /* indicate for the kvd's values */
+
u8 max_vepa_channels;
- u16 max_lag;
- u16 max_port_per_lag;
u16 max_mid;
u16 max_pgt;
u16 max_system_port;
@@ -213,8 +211,9 @@ struct mlxsw_config_profile {
u16 adaptive_routing_group_cap;
u8 arn;
u32 kvd_linear_size;
- u32 kvd_hash_single_size;
- u32 kvd_hash_double_size;
+ u16 kvd_hash_granularity;
+ u8 kvd_hash_single_parts;
+ u8 kvd_hash_double_parts;
u8 resource_query_enable;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -268,8 +267,35 @@ struct mlxsw_driver {
};
struct mlxsw_resources {
- u8 max_span_valid:1;
+ u32 max_span_valid:1,
+ max_lag_valid:1,
+ max_ports_in_lag_valid:1,
+ kvd_size_valid:1,
+ kvd_single_min_size_valid:1,
+ kvd_double_min_size_valid:1,
+ max_virtual_routers_valid:1,
+ max_system_ports_valid:1,
+ max_vlan_groups_valid:1,
+ max_regions_valid:1,
+ max_rif_valid:1;
u8 max_span;
+ u8 max_lag;
+ u8 max_ports_in_lag;
+ u32 kvd_size;
+ u32 kvd_single_min_size;
+ u32 kvd_double_min_size;
+ u16 max_virtual_routers;
+ u16 max_system_ports;
+ u16 max_vlan_groups;
+ u16 max_regions;
+ u16 max_rif;
+
+ /* Internal resources.
+ * Determined by the SW, not queried from the HW.
+ */
+ u32 kvd_single_size;
+ u32 kvd_double_size;
+ u32 kvd_linear_size;
};
struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 1d1360c178bb..e742bd4e8894 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1156,6 +1156,16 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
#define MLXSW_RESOURCES_TABLE_END_ID 0xffff
#define MLXSW_MAX_SPAN_ID 0x2420
+#define MLXSW_MAX_LAG_ID 0x2520
+#define MLXSW_MAX_PORTS_IN_LAG_ID 0x2521
+#define MLXSW_KVD_SIZE_ID 0x1001
+#define MLXSW_KVD_SINGLE_MIN_SIZE_ID 0x1002
+#define MLXSW_KVD_DOUBLE_MIN_SIZE_ID 0x1003
+#define MLXSW_MAX_VIRTUAL_ROUTERS_ID 0x2C01
+#define MLXSW_MAX_SYSTEM_PORT_ID 0x2502
+#define MLXSW_MAX_VLAN_GROUPS_ID 0x2906
+#define MLXSW_MAX_REGIONS_ID 0x2901
+#define MLXSW_MAX_RIF_ID 0x2C02
#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100
#define MLXSW_RESOURCES_PER_QUERY 32
@@ -1167,6 +1177,46 @@ static void mlxsw_pci_resources_query_parse(int id, u64 val,
resources->max_span = val;
resources->max_span_valid = 1;
break;
+ case MLXSW_MAX_LAG_ID:
+ resources->max_lag = val;
+ resources->max_lag_valid = 1;
+ break;
+ case MLXSW_MAX_PORTS_IN_LAG_ID:
+ resources->max_ports_in_lag = val;
+ resources->max_ports_in_lag_valid = 1;
+ break;
+ case MLXSW_KVD_SIZE_ID:
+ resources->kvd_size = val;
+ resources->kvd_size_valid = 1;
+ break;
+ case MLXSW_KVD_SINGLE_MIN_SIZE_ID:
+ resources->kvd_single_min_size = val;
+ resources->kvd_single_min_size_valid = 1;
+ break;
+ case MLXSW_KVD_DOUBLE_MIN_SIZE_ID:
+ resources->kvd_double_min_size = val;
+ resources->kvd_double_min_size_valid = 1;
+ break;
+ case MLXSW_MAX_VIRTUAL_ROUTERS_ID:
+ resources->max_virtual_routers = val;
+ resources->max_virtual_routers_valid = 1;
+ break;
+ case MLXSW_MAX_SYSTEM_PORT_ID:
+ resources->max_system_ports = val;
+ resources->max_system_ports_valid = 1;
+ break;
+ case MLXSW_MAX_VLAN_GROUPS_ID:
+ resources->max_vlan_groups = val;
+ resources->max_vlan_groups_valid = 1;
+ break;
+ case MLXSW_MAX_REGIONS_ID:
+ resources->max_regions = val;
+ resources->max_regions_valid = 1;
+ break;
+ case MLXSW_MAX_RIF_ID:
+ resources->max_rif = val;
+ resources->max_rif_valid = 1;
+ break;
default:
break;
}
@@ -1209,10 +1259,52 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
return -EIO;
}
+static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
+{
+ u32 singles_size, doubles_size, linear_size;
+
+ if (!resources->kvd_single_min_size_valid ||
+ !resources->kvd_double_min_size_valid ||
+ !profile->used_kvd_split_data)
+ return -EIO;
+
+ linear_size = profile->kvd_linear_size;
+
+ /* The hash part is what left of the kvd without the
+ * linear part. It is split to the single size and
+ * double size by the parts ratio from the profile.
+ * Both sizes must be a multiplications of the
+ * granularity from the profile.
+ */
+ doubles_size = (resources->kvd_size - linear_size);
+ doubles_size *= profile->kvd_hash_double_parts;
+ doubles_size /= (profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts);
+ doubles_size /= profile->kvd_hash_granularity;
+ doubles_size *= profile->kvd_hash_granularity;
+ singles_size = resources->kvd_size - doubles_size -
+ linear_size;
+
+ /* Check results are legal. */
+ if (singles_size < resources->kvd_single_min_size ||
+ doubles_size < resources->kvd_double_min_size ||
+ resources->kvd_size < linear_size)
+ return -EIO;
+
+ resources->kvd_single_size = singles_size;
+ resources->kvd_double_size = doubles_size;
+ resources->kvd_linear_size = linear_size;
+
+ return 0;
+}
+
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
- const struct mlxsw_config_profile *profile)
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
{
int i;
+ int err;
mlxsw_cmd_mbox_zero(mbox);
@@ -1222,18 +1314,6 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
mbox, profile->max_vepa_channels);
}
- if (profile->used_max_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_lag_set(
- mbox, profile->max_lag);
- }
- if (profile->used_max_port_per_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
- mbox, profile->max_port_per_lag);
- }
if (profile->used_max_mid) {
mlxsw_cmd_mbox_config_profile_set_max_mid_set(
mbox, 1);
@@ -1310,19 +1390,22 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
- if (profile->used_kvd_sizes) {
- mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(
- mbox, profile->kvd_linear_size);
- mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(
- mbox, profile->kvd_hash_single_size);
+ if (resources->kvd_size_valid) {
+ err = mlxsw_pci_profile_get_kvd_sizes(profile, resources);
+ if (err)
+ return err;
+
+ mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
+ resources->kvd_linear_size);
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
+ 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
+ resources->kvd_single_size);
mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(
- mbox, profile->kvd_hash_double_size);
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
+ resources->kvd_double_size);
}
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
@@ -1524,7 +1607,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_query_resources;
- err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+ err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, resources);
if (err)
goto err_config_profile;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1721098eef13..6460c7256f2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -591,6 +591,12 @@ static const struct mlxsw_reg_info mlxsw_reg_sfn = {
*/
MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
+/* reg_sfn_end
+ * Forces the current session to end.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfn, end, 0x04, 20, 1);
+
/* reg_sfn_num_rec
* Request: Number of learned notifications and aged-out notification
* records requested.
@@ -605,6 +611,7 @@ static inline void mlxsw_reg_sfn_pack(char *payload)
{
MLXSW_REG_ZERO(sfn, payload);
mlxsw_reg_sfn_swid_set(payload, 0);
+ mlxsw_reg_sfn_end_set(payload, 1);
mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
}
@@ -1385,7 +1392,7 @@ static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
{
MLXSW_REG_ZERO(slcr, payload);
mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
- mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_XOR);
+ mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC);
mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
}
@@ -2131,6 +2138,18 @@ MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+enum {
+ MLXSW_REG_PTYS_AN_STATUS_NA,
+ MLXSW_REG_PTYS_AN_STATUS_OK,
+ MLXSW_REG_PTYS_AN_STATUS_FAIL,
+};
+
+/* reg_ptys_an_status
+ * Autonegotiation status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
+
#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
@@ -2145,6 +2164,7 @@ MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2 BIT(18)
#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
@@ -2177,6 +2197,13 @@ MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+/* reg_ptys_eth_proto_lp_advertise
+ * The protocols that were advertised by the link partner during
+ * autonegotiation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32);
+
static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
u32 proto_admin)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d48873bcbddf..1ec0a4ce3c46 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -248,7 +248,8 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
span_entry->used = false;
}
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
{
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
int i;
@@ -262,7 +263,8 @@ struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
return NULL;
}
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry
+*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
{
struct mlxsw_sp_span_entry *span_entry;
@@ -364,7 +366,8 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
}
/* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id);
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
if (err)
goto err_mpar_reg_write;
@@ -405,7 +408,8 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
return;
/* remove the inspected port */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id);
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
/* remove the SBIB buffer if it was egress SPAN */
@@ -556,8 +560,9 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
-static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid, bool learn_enable)
+int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *spvmlr_pl;
@@ -566,13 +571,20 @@ static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
if (!spvmlr_pl)
return -ENOMEM;
- mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
- learn_enable);
+ mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
+ vid_end, learn_enable);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
kfree(spvmlr_pl);
return err;
}
+static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, bool learn_enable)
+{
+ return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
+ learn_enable);
+}
+
static int
mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -811,9 +823,9 @@ err_span_port_mtu_update:
return err;
}
-static struct rtnl_link_stats64 *
-mlxsw_sp_port_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static int
+mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port_pcpu_stats *p;
@@ -840,6 +852,107 @@ mlxsw_sp_port_get_stats64(struct net_device *dev,
tx_dropped += p->tx_dropped;
}
stats->tx_dropped = tx_dropped;
+ return 0;
+}
+
+static bool mlxsw_sp_port_has_offload_stats(int attr_id)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return true;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return mlxsw_sp_port_get_sw_stats64(dev, sp);
+ }
+
+ return -EINVAL;
+}
+
+static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
+ int prio, char *ppcnt_pl)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
+ return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+}
+
+static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err;
+
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl);
+ if (err)
+ goto out;
+
+ stats->tx_packets =
+ mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
+ stats->rx_packets =
+ mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
+ stats->tx_bytes =
+ mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
+ stats->rx_bytes =
+ mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
+ stats->multicast =
+ mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
+
+ stats->rx_crc_errors =
+ mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
+ stats->rx_frame_errors =
+ mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
+
+ stats->rx_length_errors = (
+ mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
+
+ stats->rx_errors = (stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_length_errors);
+
+out:
+ return err;
+}
+
+static void update_stats_cache(struct work_struct *work)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ container_of(work, struct mlxsw_sp_port,
+ hw_stats.update_dw.work);
+
+ if (!netif_carrier_ok(mlxsw_sp_port->dev))
+ goto out;
+
+ mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
+ mlxsw_sp_port->hw_stats.cache);
+
+out:
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
+ MLXSW_HW_STATS_UPDATE_TIME);
+}
+
+/* Return the stats from a cache that is updated periodically,
+ * as this function might get called in an atomic context.
+ */
+static struct rtnl_link_stats64 *
+mlxsw_sp_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
+
return stats;
}
@@ -974,10 +1087,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
goto err_port_vp_mode_trans;
}
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
- if (err)
- goto err_port_vid_learning_set;
-
err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
if (err)
goto err_port_add_vid;
@@ -985,8 +1094,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
return 0;
err_port_add_vid:
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
-err_port_vid_learning_set:
if (list_is_singular(&mlxsw_sp_port->vports_list))
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
err_port_vp_mode_trans:
@@ -1013,8 +1120,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
-
/* Drop FID reference. If this was the last reference the
* resources will be freed.
*/
@@ -1209,6 +1314,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
.ndo_change_mtu = mlxsw_sp_port_change_mtu,
.ndo_get_stats64 = mlxsw_sp_port_get_stats64,
+ .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
+ .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
@@ -1547,8 +1654,6 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
enum mlxsw_reg_ppcnt_grp grp, int prio,
u64 *data, int data_index)
{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_hw_stats *hw_stats;
char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
int i, len;
@@ -1557,10 +1662,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
for (i = 0; i < len; i++)
- data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
+ data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
}
static void mlxsw_sp_port_get_stats(struct net_device *dev,
@@ -1599,112 +1703,149 @@ static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
}
struct mlxsw_sp_port_link_mode {
+ enum ethtool_link_mode_bit_indices mask_ethtool;
u32 mask;
- u32 supported;
- u32 advertised;
u32 speed;
};
static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
- .supported = SUPPORTED_100baseT_Full,
- .advertised = ADVERTISED_100baseT_Full,
- .speed = 100,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
- .speed = 100,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = SPEED_100,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
- .supported = SUPPORTED_1000baseKX_Full,
- .advertised = ADVERTISED_1000baseKX_Full,
- .speed = 1000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = SPEED_1000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
- .supported = SUPPORTED_10000baseT_Full,
- .advertised = ADVERTISED_10000baseT_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
- .supported = SUPPORTED_10000baseKX4_Full,
- .advertised = ADVERTISED_10000baseKX4_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
- .supported = SUPPORTED_20000baseKR2_Full,
- .advertised = ADVERTISED_20000baseKR2_Full,
- .speed = 20000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ .speed = SPEED_20000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
- .supported = SUPPORTED_40000baseCR4_Full,
- .advertised = ADVERTISED_40000baseCR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
- .supported = SUPPORTED_40000baseKR4_Full,
- .advertised = ADVERTISED_40000baseKR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
- .supported = SUPPORTED_40000baseSR4_Full,
- .advertised = ADVERTISED_40000baseSR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
- .supported = SUPPORTED_40000baseLR4_Full,
- .advertised = ADVERTISED_40000baseLR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ .speed = SPEED_40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = SPEED_25000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
- MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
- MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
- .speed = 25000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ .speed = SPEED_50000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
- MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
- .speed = 50000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+ .speed = SPEED_56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
+ .speed = SPEED_56000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .supported = SUPPORTED_56000baseKR4_Full,
- .advertised = ADVERTISED_56000baseKR4_Full,
- .speed = 56000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
+ .speed = SPEED_56000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
- .speed = 100000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+ .speed = SPEED_56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ .speed = SPEED_100000,
},
};
#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
-static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
+static void
+mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
{
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
@@ -1712,43 +1853,29 @@ static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
MLXSW_REG_PTYS_ETH_SPEED_SGMII))
- return SUPPORTED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
- return SUPPORTED_Backplane;
- return 0;
-}
-
-static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
-{
- u32 modes = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
- modes |= mlxsw_sp_port_link_mode[i].supported;
- }
- return modes;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
}
-static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
+static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
{
- u32 modes = 0;
int i;
for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
- modes |= mlxsw_sp_port_link_mode[i].advertised;
+ __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
+ mode);
}
- return modes;
}
static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *cmd)
{
u32 speed = SPEED_UNKNOWN;
u8 duplex = DUPLEX_UNKNOWN;
@@ -1765,8 +1892,8 @@ static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
}
}
out:
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ cmd->base.speed = speed;
+ cmd->base.duplex = duplex;
}
static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
@@ -1791,49 +1918,15 @@ static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
return PORT_OTHER;
}
-static int mlxsw_sp_port_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_cap;
- u32 eth_proto_admin;
- u32 eth_proto_oper;
- int err;
-
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to get proto");
- return err;
- }
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
- &eth_proto_admin, &eth_proto_oper);
-
- cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
- mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause |
- SUPPORTED_Autoneg;
- cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
- mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
- eth_proto_oper, cmd);
-
- eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
- cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
-
- cmd->transceiver = XCVR_INTERNAL;
- return 0;
-}
-
-static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
+static u32
+mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
{
u32 ptys_proto = 0;
int i;
for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (advertising & mlxsw_sp_port_link_mode[i].advertised)
+ if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
+ cmd->link_modes.advertising))
ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
}
return ptys_proto;
@@ -1863,61 +1956,113 @@ static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
return ptys_proto;
}
-static int mlxsw_sp_port_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
+ struct ethtool_link_ksettings *cmd)
{
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+
+ mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
+ mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
+}
+
+static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (!autoneg)
+ return;
+
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
+}
+
+static void
+mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
+ return;
+
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
+ mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
+}
+
+static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 speed;
- u32 eth_proto_new;
- u32 eth_proto_cap;
- u32 eth_proto_admin;
+ u8 autoneg_status;
+ bool autoneg;
int err;
- speed = ethtool_cmd_speed(cmd);
+ autoneg = mlxsw_sp_port->link.autoneg;
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
+ &eth_proto_oper);
+
+ mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
+
+ mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
+
+ eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
+ autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
+ mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
+
+ cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
+ mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
+ cmd);
+
+ return 0;
+}
- eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
- mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
- mlxsw_sp_to_ptys_speed(speed);
+static int
+mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap, eth_proto_new;
+ bool autoneg;
+ int err;
mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to get proto");
+ if (err)
return err;
- }
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
+
+ autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
+ eth_proto_new = autoneg ?
+ mlxsw_sp_to_ptys_advert_link(cmd) :
+ mlxsw_sp_to_ptys_speed(cmd->base.speed);
eth_proto_new = eth_proto_new & eth_proto_cap;
if (!eth_proto_new) {
- netdev_err(dev, "Not supported proto admin requested");
+ netdev_err(dev, "No supported speed requested\n");
return -EINVAL;
}
- if (eth_proto_new == eth_proto_admin)
- return 0;
mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to set proto admin");
+ if (err)
return err;
- }
if (!netif_running(dev))
return 0;
- err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
- if (err) {
- netdev_err(dev, "Failed to set admin status");
- return err;
- }
+ mlxsw_sp_port->link.autoneg = autoneg;
- err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
- if (err) {
- netdev_err(dev, "Failed to set admin status");
- return err;
- }
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
return 0;
}
@@ -1931,8 +2076,8 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.set_phys_id = mlxsw_sp_port_set_phys_id,
.get_ethtool_stats = mlxsw_sp_port_get_stats,
.get_sset_count = mlxsw_sp_port_get_sset_count,
- .get_settings = mlxsw_sp_port_get_settings,
- .set_settings = mlxsw_sp_port_set_settings,
+ .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
+ .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
};
static int
@@ -2082,6 +2227,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mapping.module = module;
mlxsw_sp_port->mapping.width = width;
mlxsw_sp_port->mapping.lane = lane;
+ mlxsw_sp_port->link.autoneg = 1;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@@ -2103,6 +2249,16 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_stats;
}
+ mlxsw_sp_port->hw_stats.cache =
+ kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
+
+ if (!mlxsw_sp_port->hw_stats.cache) {
+ err = -ENOMEM;
+ goto err_alloc_hw_stats;
+ }
+ INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
+ &update_stats_cache);
+
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
@@ -2129,7 +2285,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
*/
- dev->hard_header_len += MLXSW_TXHDR_LEN;
+ dev->needed_headroom = MLXSW_TXHDR_LEN;
err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
if (err) {
@@ -2203,6 +2359,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_core_port_init;
}
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
return 0;
err_core_port_init:
@@ -2223,6 +2380,8 @@ err_port_system_port_mapping_set:
err_dev_addr_init:
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
+ kfree(mlxsw_sp_port->hw_stats.cache);
+err_alloc_hw_stats:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
@@ -2239,6 +2398,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
if (!mlxsw_sp_port)
return;
+ cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
@@ -2248,6 +2408,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
+ kfree(mlxsw_sp_port->hw_stats.cache);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
@@ -2571,123 +2732,47 @@ static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
netif_receive_skb(skb);
}
+static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ skb->offload_fwd_mark = 1;
+ return mlxsw_sp_rx_listener_func(skb, local_port, priv);
+}
+
+#define MLXSW_SP_RXL(_func, _trap_id, _action) \
+ { \
+ .func = _func, \
+ .local_port = MLXSW_PORT_DONT_CARE, \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ .action = MLXSW_REG_HPKT_ACTION_##_action, \
+ }
+
static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_FDB_MC,
- },
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, FDB_MC, TRAP_TO_CPU),
/* Traps for specific L2 packet types, not trapped as FDB MC */
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_STP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_LACP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_EAPOL,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_LLDP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_MMRP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_MVRP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_RPVST,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_DHCP,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_ARPBC,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_ARPUC,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_MTUERROR,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_TTLERROR,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_LBERROR,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_OSPF,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_IP2ME,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
- },
- {
- .func = mlxsw_sp_rx_listener_func,
- .local_port = MLXSW_PORT_DONT_CARE,
- .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
- },
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, STP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LACP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, EAPOL, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LLDP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MMRP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MVRP, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RPVST, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, DHCP, MIRROR_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, IGMP_QUERY, MIRROR_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V1_REPORT, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_REPORT, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_LEAVE, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V3_REPORT, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPBC, MIRROR_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPUC, MIRROR_TO_CPU),
+ /* L3 traps */
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MTUERROR, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, TTLERROR, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LBERROR, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, OSPF, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IP2ME, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RTR_INGRESS0, TRAP_TO_CPU),
+ MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, HOST_MISS_IPV4, TRAP_TO_CPU),
};
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
@@ -2714,7 +2799,7 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_rx_listener_register;
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ mlxsw_reg_hpkt_pack(hpkt_pl, mlxsw_sp_rx_listener[i].action,
mlxsw_sp_rx_listener[i].trap_id);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
if (err)
@@ -2802,7 +2887,9 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ int err;
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
@@ -2813,7 +2900,26 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
MLXSW_REG_SLCR_LAG_HASH_SPORT |
MLXSW_REG_SLCR_LAG_HASH_DPORT |
MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ if (err)
+ return err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!(resources->max_lag_valid && resources->max_ports_in_lag_valid))
+ return -EIO;
+
+ mlxsw_sp->lags = kcalloc(resources->max_lag,
+ sizeof(struct mlxsw_sp_upper),
+ GFP_KERNEL);
+ if (!mlxsw_sp->lags)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->lags);
}
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
@@ -2897,6 +3003,7 @@ err_span_init:
err_router_init:
mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
+ mlxsw_sp_lag_fini(mlxsw_sp);
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
@@ -2910,38 +3017,26 @@ err_rx_listener_register:
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- int i;
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
WARN_ON(!list_empty(&mlxsw_sp->fids));
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
- WARN_ON_ONCE(mlxsw_sp->rifs[i]);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
- .used_max_lag = 1,
- .max_lag = MLXSW_SP_LAG_MAX,
- .used_max_port_per_lag = 1,
- .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_max_pgt = 1,
.max_pgt = 0,
- .used_max_system_port = 1,
- .max_system_port = 64,
- .used_max_vlan_groups = 1,
- .max_vlan_groups = 127,
- .used_max_regions = 1,
- .max_regions = 400,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
@@ -2953,10 +3048,11 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
- .used_kvd_sizes = 1,
+ .used_kvd_split_data = 1,
+ .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
+ .kvd_hash_single_parts = 2,
+ .kvd_hash_double_parts = 1,
.kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
- .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
- .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
.swid_config = {
{
.used_type = 1,
@@ -3073,13 +3169,15 @@ static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_rif; i++)
if (!mlxsw_sp->rifs[i])
return i;
- return MLXSW_SP_RIF_MAX;
+ return MLXSW_SP_INVALID_RIF;
}
static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
@@ -3159,7 +3257,7 @@ mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_RIF_MAX)
+ if (rif == MLXSW_SP_INVALID_RIF)
return ERR_PTR(-ERANGE);
err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
@@ -3391,7 +3489,7 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_RIF_MAX)
+ if (rif == MLXSW_SP_INVALID_RIF)
return -ERANGE;
err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
@@ -3598,12 +3696,14 @@ static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u16 lag_id = mlxsw_sp_port->lag_id;
+ struct mlxsw_resources *resources;
int i, count = 0;
if (!mlxsw_sp_port->lagged)
return true;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
struct mlxsw_sp_port *lag_port;
lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
@@ -3809,11 +3909,13 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *lag_dev,
u16 *p_lag_id)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_upper *lag;
int free_lag_id = -1;
int i;
- for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_lag; i++) {
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
if (lag->ref_count) {
if (lag->dev == lag_dev) {
@@ -3847,9 +3949,11 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
u16 lag_id, u8 *p_port_index)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
*p_port_index = i;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index ac48abebe904..9b22863a924b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -45,7 +45,7 @@
#include <linux/list.h>
#include <linux/dcbnl.h>
#include <linux/in6.h>
-#include <net/switchdev.h>
+#include <linux/notifier.h>
#include "port.h"
#include "core.h"
@@ -54,10 +54,7 @@
#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
#define MLXSW_SP_RFID_BASE 15360
-#define MLXSW_SP_RIF_MAX 800
-
-#define MLXSW_SP_LAG_MAX 64
-#define MLXSW_SP_PORT_PER_LAG_MAX 16
+#define MLXSW_SP_INVALID_RIF 0xffff
#define MLXSW_SP_MID_MAX 7000
@@ -67,8 +64,6 @@
#define MLXSW_SP_LPM_TREE_MAX 22
#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
-#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
-
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
#define MLXSW_SP_BYTES_PER_CELL 96
@@ -77,8 +72,7 @@
#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
-#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
-#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */
+#define MLXSW_SP_KVD_GRANULARITY 128
/* Maximum delay buffer needed in case of PAUSE frames, in cells.
* Assumes 100m cable and maximum MTU.
@@ -253,7 +247,7 @@ struct mlxsw_sp_port_mall_tc_entry {
struct mlxsw_sp_router {
struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
- struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
+ struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
struct {
struct delayed_work dw;
@@ -263,6 +257,7 @@ struct mlxsw_sp_router {
#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
struct list_head nexthop_group_list;
struct list_head nexthop_neighs_list;
+ bool aborted;
};
struct mlxsw_sp {
@@ -275,7 +270,7 @@ struct mlxsw_sp {
DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
} br_mids;
struct list_head fids; /* VLAN-aware bridge FIDs */
- struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
+ struct mlxsw_sp_rif **rifs;
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
@@ -290,7 +285,7 @@ struct mlxsw_sp {
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
- struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
+ struct mlxsw_sp_upper *lags;
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
struct mlxsw_sp_sb sb;
struct mlxsw_sp_router router;
@@ -302,6 +297,7 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
+ struct notifier_block fib_nb;
};
static inline struct mlxsw_sp_upper *
@@ -341,7 +337,8 @@ struct mlxsw_sp_port {
} vport;
struct {
u8 tx_pause:1,
- rx_pause:1;
+ rx_pause:1,
+ autoneg:1;
} link;
struct {
struct ieee_ets *ets;
@@ -360,6 +357,11 @@ struct mlxsw_sp_port {
struct list_head vports_list;
/* TC handles */
struct list_head mall_tc_list;
+ struct {
+ #define MLXSW_HW_STATS_UPDATE_TIME HZ
+ struct rtnl_link_stats64 *cache;
+ struct delayed_work update_dw;
+ } hw_stats;
};
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
@@ -477,9 +479,12 @@ static inline struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+
+ for (i = 0; i < resources->max_rif; i++)
if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
return mlxsw_sp->rifs[i];
@@ -558,6 +563,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate);
+int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable);
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -578,11 +586,6 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans);
-int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4);
int mlxsw_sp_router_neigh_construct(struct net_device *dev,
struct neighbour *n);
void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 953b214f38d0..bcaed8a38037 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -595,9 +595,9 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
- pool_info->pool_type = dir;
+ pool_info->pool_type = (enum devlink_sb_pool_type) dir;
pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
- pool_info->threshold_type = pr->mode;
+ pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
return 0;
}
@@ -608,9 +608,10 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
- enum mlxsw_reg_sbpr_mode mode = threshold_type;
u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
+ enum mlxsw_reg_sbpr_mode mode;
+ mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
}
@@ -696,13 +697,13 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
cm->max_buff);
- *p_pool_index = pool_index_get(cm->pool, pool_type);
+ *p_pool_index = pool_index_get(cm->pool, dir);
return 0;
}
@@ -716,7 +717,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
u8 pool = pool_get(pool_index);
u32 max_buff;
int err;
@@ -943,7 +944,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 3f5c51da6d3e..78fc557d6dd7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -43,6 +43,7 @@
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
+#include <net/ip_fib.h>
#include "spectrum.h"
#include "core.h"
@@ -122,17 +123,20 @@ struct mlxsw_sp_nexthop_group;
struct mlxsw_sp_fib_entry {
struct rhash_head ht_node;
+ struct list_head list;
struct mlxsw_sp_fib_key key;
enum mlxsw_sp_fib_entry_type type;
unsigned int ref_count;
u16 rif; /* used for action local */
struct mlxsw_sp_vr *vr;
+ struct fib_info *fi;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
};
struct mlxsw_sp_fib {
struct rhashtable ht;
+ struct list_head entry_list;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
};
@@ -154,6 +158,7 @@ static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
mlxsw_sp_fib_ht_params);
if (err)
return err;
+ list_add_tail(&fib_entry->list, &fib->entry_list);
if (fib->prefix_ref_count[prefix_len]++ == 0)
mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
return 0;
@@ -166,6 +171,7 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
if (--fib->prefix_ref_count[prefix_len] == 0)
mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+ list_del(&fib_entry->list);
rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
mlxsw_sp_fib_ht_params);
}
@@ -216,6 +222,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
if (err)
goto err_rhashtable_init;
+ INIT_LIST_HEAD(&fib->entry_list);
return fib;
err_rhashtable_init:
@@ -252,7 +259,9 @@ static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
+ mlxsw_reg_ralta_pack(ralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
@@ -261,7 +270,9 @@ static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
+ mlxsw_reg_ralta_pack(ralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
@@ -368,10 +379,12 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (!vr->used)
return vr;
@@ -384,7 +397,9 @@ static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto,
+ vr->lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -394,7 +409,8 @@ static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -410,11 +426,14 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
return vr;
@@ -548,15 +567,33 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
&vr->fib->prefix_usage);
}
-static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_virtual_routers_valid)
+ return -EIO;
+
+ mlxsw_sp->router.vrs = kcalloc(resources->max_virtual_routers,
+ sizeof(struct mlxsw_sp_vr),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router.vrs)
+ return -ENOMEM;
+
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
vr->id = i;
}
+
+ return 0;
+}
+
+static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->router.vrs);
}
struct mlxsw_sp_neigh_key {
@@ -1081,9 +1118,10 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
{
char raleu_pl[MLXSW_REG_RALEU_LEN];
- mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id,
- adj_index, ecmp_size,
- new_adj_index, new_ecmp_size);
+ mlxsw_reg_raleu_pack(raleu_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
+ adj_index, ecmp_size, new_adj_index,
+ new_ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
}
@@ -1489,50 +1527,6 @@ static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
}
-static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
-
- mlxsw_reg_rgcr_pack(rgcr_pl, true);
- mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-}
-
-static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
-
- mlxsw_reg_rgcr_pack(rgcr_pl, false);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-}
-
-int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
-{
- int err;
-
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
- err = __mlxsw_sp_router_init(mlxsw_sp);
- if (err)
- return err;
- mlxsw_sp_lpm_init(mlxsw_sp);
- mlxsw_sp_vrs_init(mlxsw_sp);
- err = mlxsw_sp_neigh_init(mlxsw_sp);
- if (err)
- goto err_neigh_init;
- return 0;
-
-err_neigh_init:
- __mlxsw_sp_router_fini(mlxsw_sp);
- return err;
-}
-
-void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
- mlxsw_sp_neigh_fini(mlxsw_sp);
- __mlxsw_sp_router_fini(mlxsw_sp);
-}
-
static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
@@ -1558,8 +1552,9 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
adjacency_index, ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1573,8 +1568,9 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_local_pack(ralue_pl,
MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
fib_entry->rif);
@@ -1589,8 +1585,9 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
@@ -1637,94 +1634,102 @@ static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
MLXSW_REG_RALUE_OP_WRITE_DELETE);
}
-struct mlxsw_sp_router_fib4_add_info {
- struct switchdev_trans_item tritem;
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_fib_entry *fib_entry;
-};
-
-static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
-{
- const struct mlxsw_sp_router_fib4_add_info *info = data;
- struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
- struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
- struct mlxsw_sp_vr *vr = fib_entry->vr;
-
- mlxsw_sp_fib_entry_destroy(fib_entry);
- mlxsw_sp_vr_put(mlxsw_sp, vr);
- kfree(info);
-}
-
static int
mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4,
+ const struct fib_entry_notifier_info *fen_info,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct fib_info *fi = fib4->fi;
+ struct fib_info *fi = fen_info->fi;
+ struct mlxsw_sp_rif *r = NULL;
+ int nhsel;
+ int err;
- if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) {
+ if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
}
- if (fib4->type != RTN_UNICAST)
+ if (fen_info->type != RTN_UNICAST)
return -EINVAL;
- if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
- struct mlxsw_sp_rif *r;
+ for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
+ const struct fib_nh *nh = &fi->fib_nh[nhsel];
+
+ if (!nh->nh_dev)
+ continue;
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev);
+ if (!r) {
+ /* In case router interface is not found for
+ * at least one of the nexthops, that means
+ * the nexthop points to some device unrelated
+ * to us. Set trap and pass the packets for
+ * this prefix to kernel.
+ */
+ break;
+ }
+ }
+ if (!r) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ return 0;
+ }
+
+ if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev);
- if (!r)
- return -EINVAL;
fib_entry->rif = r->rif;
- return 0;
+ } else {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
+ if (err)
+ return err;
}
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
- return mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
+ fib_info_offload_inc(fen_info->fi);
+ return 0;
}
static void
mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
- return;
- mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
+ if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
+ fib_info_offload_dec(fib_entry->fi);
+ if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
+ mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
}
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4)
+ const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
- struct fib_info *fi = fib4->fi;
+ struct fib_info *fi = fen_info->fi;
struct mlxsw_sp_vr *vr;
int err;
- vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
+ vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr))
return ERR_CAST(vr);
- fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
- sizeof(fib4->dst),
- fib4->dst_len, fi->fib_dev);
+ fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len, fi->fib_dev);
if (fib_entry) {
/* Already exists, just take a reference */
fib_entry->ref_count++;
return fib_entry;
}
- fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
- sizeof(fib4->dst),
- fib4->dst_len, fi->fib_dev);
+ fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len, fi->fib_dev);
if (!fib_entry) {
err = -ENOMEM;
goto err_fib_entry_create;
}
fib_entry->vr = vr;
+ fib_entry->fi = fi;
fib_entry->ref_count = 1;
- err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
+ err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry);
if (err)
goto err_fib4_entry_init;
@@ -1740,21 +1745,23 @@ err_fib_entry_create:
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4)
+ const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_vr *vr;
- vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+ vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id,
+ MLXSW_SP_L3_PROTO_IPV4);
if (!vr)
return NULL;
- return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
- sizeof(fib4->dst), fib4->dst_len,
- fib4->fi->fib_dev);
+ return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len,
+ fen_info->fi->fib_dev);
}
-void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_vr *vr = fib_entry->vr;
@@ -1765,60 +1772,43 @@ void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
-static int
-mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
+static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_router_fib4_add_info *info;
- struct mlxsw_sp_fib_entry *fib_entry;
- int err;
+ unsigned int last_ref_count;
- fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
- if (IS_ERR(fib_entry))
- return PTR_ERR(fib_entry);
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto err_alloc_info;
- }
- info->mlxsw_sp = mlxsw_sp;
- info->fib_entry = fib_entry;
- switchdev_trans_item_enqueue(trans, info,
- mlxsw_sp_router_fib4_add_info_destroy,
- &info->tritem);
- return 0;
-
-err_alloc_info:
- mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
- return err;
+ do {
+ last_ref_count = fib_entry->ref_count;
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+ } while (last_ref_count != 1);
}
-static int
-mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
+static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_router_fib4_add_info *info;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_vr *vr;
int err;
- info = switchdev_trans_item_dequeue(trans);
- fib_entry = info->fib_entry;
- kfree(info);
+ if (mlxsw_sp->router.aborted)
+ return 0;
+
+ fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info);
+ if (IS_ERR(fib_entry)) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n");
+ return PTR_ERR(fib_entry);
+ }
if (fib_entry->ref_count != 1)
return 0;
vr = fib_entry->vr;
err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
- if (err)
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n");
goto err_fib_entry_insert;
- err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
+ }
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_add;
return 0;
@@ -1830,24 +1820,15 @@ err_fib_entry_insert:
return err;
}
-int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
-{
- if (switchdev_trans_ph_prepare(trans))
- return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port,
- fib4, trans);
- return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port,
- fib4, trans);
-}
-
-int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4)
+static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_fib_entry *fib_entry;
- fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
+ if (mlxsw_sp->router.aborted)
+ return 0;
+
+ fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
if (!fib_entry) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
return -ENOENT;
@@ -1861,3 +1842,172 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return 0;
}
+
+static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char ralst_pl[MLXSW_REG_RALST_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ int err;
+
+ mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_fib_entry *tmp;
+ struct mlxsw_sp_vr *vr;
+ int i;
+ int err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ if (!vr->used)
+ continue;
+
+ list_for_each_entry_safe(fib_entry, tmp,
+ &vr->fib->entry_list, list) {
+ bool do_break = &tmp->list == &vr->fib->entry_list;
+
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_remove(fib_entry->vr->fib,
+ fib_entry);
+ mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry);
+ if (do_break)
+ break;
+ }
+ }
+ mlxsw_sp->router.aborted = true;
+ err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
+ if (err)
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
+}
+
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_rif_valid)
+ return -EIO;
+
+ mlxsw_sp->rifs = kcalloc(resources->max_rif,
+ sizeof(struct mlxsw_sp_rif *), GFP_KERNEL);
+ if (!mlxsw_sp->rifs)
+ return -ENOMEM;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, true);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, resources->max_rif);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+ if (err)
+ goto err_rgcr_fail;
+
+ return 0;
+
+err_rgcr_fail:
+ kfree(mlxsw_sp->rifs);
+ return err;
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int i;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_rif; i++)
+ WARN_ON_ONCE(mlxsw_sp->rifs[i]);
+
+ kfree(mlxsw_sp->rifs);
+}
+
+static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
+ struct fib_entry_notifier_info *fen_info = ptr;
+ int err;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_ADD:
+ err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
+ if (err)
+ mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ mlxsw_sp_router_fib4_del(mlxsw_sp, fen_info);
+ break;
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
+ err = __mlxsw_sp_router_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ mlxsw_sp_lpm_init(mlxsw_sp);
+ err = mlxsw_sp_vrs_init(mlxsw_sp);
+ if (err)
+ goto err_vrs_init;
+
+ err = mlxsw_sp_neigh_init(mlxsw_sp);
+ if (err)
+ goto err_neigh_init;
+
+ mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
+ register_fib_notifier(&mlxsw_sp->fib_nb);
+ return 0;
+
+err_neigh_init:
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+err_vrs_init:
+ __mlxsw_sp_router_fini(mlxsw_sp);
+ return err;
+}
+
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ unregister_fib_notifier(&mlxsw_sp->fib_nb);
+ mlxsw_sp_neigh_fini(mlxsw_sp);
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+ __mlxsw_sp_router_fini(mlxsw_sp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b654c517b91..5e00c79e8133 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -254,12 +254,40 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
}
+static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool set)
+{
+ u16 vid;
+ int err;
+
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+
+ return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
+ set);
+ }
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
+ set);
+ if (err)
+ goto err_port_vid_learning_set;
+ }
+
+ return 0;
+
+err_port_vid_learning_set:
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+ __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
+ return err;
+}
+
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
unsigned long brport_flags)
{
+ unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
- bool set;
int err;
if (!mlxsw_sp_port->bridged)
@@ -269,17 +297,30 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
if ((uc_flood ^ brport_flags) & BR_FLOOD) {
- set = mlxsw_sp_port->uc_flood ? false : true;
- err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
+ err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
+ !mlxsw_sp_port->uc_flood);
if (err)
return err;
}
+ if ((learning ^ brport_flags) & BR_LEARNING) {
+ err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
+ !mlxsw_sp_port->learning);
+ if (err)
+ goto err_port_learning_set;
+ }
+
mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
return 0;
+
+err_port_learning_set:
+ if ((uc_flood ^ brport_flags) & BR_FLOOD)
+ mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
+ mlxsw_sp_port->uc_flood);
+ return err;
}
static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
@@ -631,6 +672,27 @@ static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
+static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable)
+{
+ u16 vid, vid_e;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
+ vid_end);
+
+ err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
+ vid_e, learn_enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool flag_untagged, bool flag_pvid)
@@ -671,6 +733,14 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
+ mlxsw_sp_port->learning);
+ if (err) {
+ netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
+ vid_begin, vid_end);
+ goto err_port_vid_learning_set;
+ }
+
/* Changing activity bits only if HW operation succeded */
for (vid = vid_begin; vid <= vid_end; vid++) {
set_bit(vid, mlxsw_sp_port->active_vlans);
@@ -693,6 +763,9 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
err_port_stp_state_set:
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
+ false);
+err_port_vid_learning_set:
if (old_pvid != mlxsw_sp_port->pvid)
mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
err_port_pvid_set:
@@ -971,11 +1044,6 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj),
- trans);
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -997,29 +1065,20 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end)
{
- struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, pvid;
- int err;
if (!mlxsw_sp_port->bridged)
return -EINVAL;
- err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
- if (err) {
- netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
- vid_end);
- return err;
- }
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
+ false);
pvid = mlxsw_sp_port->pvid;
- if (pvid >= vid_begin && pvid <= vid_end) {
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
- if (err) {
- netdev_err(dev, "Unable to del PVID %d\n", pvid);
- return err;
- }
- }
+ if (pvid >= vid_begin && pvid <= vid_end)
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
+
+ __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
+ false);
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
@@ -1117,10 +1176,6 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj));
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj));
@@ -1141,9 +1196,11 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
u16 lag_id)
{
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
if (mlxsw_sp_port)
return mlxsw_sp_port;
@@ -1362,8 +1419,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
vid = fid;
}
- adding = adding && mlxsw_sp_port->learning;
-
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
adding, true);
@@ -1425,8 +1480,6 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
vid = fid;
}
- adding = adding && mlxsw_sp_port->learning;
-
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true);
@@ -1492,20 +1545,18 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
rtnl_lock();
- do {
- mlxsw_reg_sfn_pack(sfn_pl);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
- if (err) {
- dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
- break;
- }
- num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
- for (i = 0; i < num_rec; i++)
- mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+ mlxsw_reg_sfn_pack(sfn_pl);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
+ goto out;
+ }
+ num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
- } while (num_rec);
+out:
rtnl_unlock();
-
kfree(sfn_pl);
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 377daa4d509c..c0c23e2f3275 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -997,7 +997,7 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
*/
- dev->hard_header_len += MLXSW_TXHDR_LEN;
+ dev->needed_headroom = MLXSW_TXHDR_LEN;
err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
if (err) {
@@ -1512,10 +1512,6 @@ static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
static struct mlxsw_config_profile mlxsw_sx_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
- .used_max_lag = 1,
- .max_lag = 64,
- .used_max_port_per_lag = 1,
- .max_port_per_lag = 16,
.used_max_mid = 1,
.max_mid = 7000,
.used_max_pgt = 1,
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index eb807b0dc72a..569ade6cf85c 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -134,7 +134,7 @@ static int lnksts = 0; /* CFG_LNKSTS bit polarity */
/* tunables */
#define RX_BUF_SIZE 1500 /* 8192 */
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define NS83820_VLAN_ACCEL_SUPPORT
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 68178819ff12..0efb2ba9a558 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -3,6 +3,13 @@ obj-$(CONFIG_NFP_NETVF) += nfp_netvf.o
nfp_netvf-objs := \
nfp_net_common.o \
nfp_net_ethtool.o \
+ nfp_net_offload.o \
nfp_netvf_main.o
+ifeq ($(CONFIG_BPF_SYSCALL),y)
+nfp_netvf-objs += \
+ nfp_bpf_verifier.o \
+ nfp_bpf_jit.o
+endif
+
nfp_netvf-$(CONFIG_NFP_NET_DEBUG) += nfp_net_debugfs.o
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
new file mode 100644
index 000000000000..22484b6fd3e8
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_ASM_H__
+#define __NFP_ASM_H__ 1
+
+#include "nfp_bpf.h"
+
+#define REG_NONE 0
+
+#define RE_REG_NO_DST 0x020
+#define RE_REG_IMM 0x020
+#define RE_REG_IMM_encode(x) \
+ (RE_REG_IMM | ((x) & 0x1f) | (((x) & 0x60) << 1))
+#define RE_REG_IMM_MAX 0x07fULL
+#define RE_REG_XFR 0x080
+
+#define UR_REG_XFR 0x180
+#define UR_REG_NN 0x280
+#define UR_REG_NO_DST 0x300
+#define UR_REG_IMM UR_REG_NO_DST
+#define UR_REG_IMM_encode(x) (UR_REG_IMM | (x))
+#define UR_REG_IMM_MAX 0x0ffULL
+
+#define OP_BR_BASE 0x0d800000020ULL
+#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
+#define OP_BR_MASK 0x0000000001fULL
+#define OP_BR_EV_PIP 0x00000000300ULL
+#define OP_BR_CSS 0x0000003c000ULL
+#define OP_BR_DEFBR 0x00000300000ULL
+#define OP_BR_ADDR_LO 0x007ffc00000ULL
+#define OP_BR_ADDR_HI 0x10000000000ULL
+
+#define nfp_is_br(_insn) \
+ (((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE)
+
+enum br_mask {
+ BR_BEQ = 0x00,
+ BR_BNE = 0x01,
+ BR_BHS = 0x04,
+ BR_BLO = 0x05,
+ BR_BGE = 0x08,
+ BR_UNC = 0x18,
+};
+
+enum br_ev_pip {
+ BR_EV_PIP_UNCOND = 0,
+ BR_EV_PIP_COND = 1,
+};
+
+enum br_ctx_signal_state {
+ BR_CSS_NONE = 2,
+};
+
+#define OP_BBYTE_BASE 0x0c800000000ULL
+#define OP_BB_A_SRC 0x000000000ffULL
+#define OP_BB_BYTE 0x00000000300ULL
+#define OP_BB_B_SRC 0x0000003fc00ULL
+#define OP_BB_I8 0x00000040000ULL
+#define OP_BB_EQ 0x00000080000ULL
+#define OP_BB_DEFBR 0x00000300000ULL
+#define OP_BB_ADDR_LO 0x007ffc00000ULL
+#define OP_BB_ADDR_HI 0x10000000000ULL
+
+#define OP_BALU_BASE 0x0e800000000ULL
+#define OP_BA_A_SRC 0x000000003ffULL
+#define OP_BA_B_SRC 0x000000ffc00ULL
+#define OP_BA_DEFBR 0x00000300000ULL
+#define OP_BA_ADDR_HI 0x0007fc00000ULL
+
+#define OP_IMMED_A_SRC 0x000000003ffULL
+#define OP_IMMED_B_SRC 0x000000ffc00ULL
+#define OP_IMMED_IMM 0x0000ff00000ULL
+#define OP_IMMED_WIDTH 0x00060000000ULL
+#define OP_IMMED_INV 0x00080000000ULL
+#define OP_IMMED_SHIFT 0x00600000000ULL
+#define OP_IMMED_BASE 0x0f000000000ULL
+#define OP_IMMED_WR_AB 0x20000000000ULL
+
+enum immed_width {
+ IMMED_WIDTH_ALL = 0,
+ IMMED_WIDTH_BYTE = 1,
+ IMMED_WIDTH_WORD = 2,
+};
+
+enum immed_shift {
+ IMMED_SHIFT_0B = 0,
+ IMMED_SHIFT_1B = 1,
+ IMMED_SHIFT_2B = 2,
+};
+
+#define OP_SHF_BASE 0x08000000000ULL
+#define OP_SHF_A_SRC 0x000000000ffULL
+#define OP_SHF_SC 0x00000000300ULL
+#define OP_SHF_B_SRC 0x0000003fc00ULL
+#define OP_SHF_I8 0x00000040000ULL
+#define OP_SHF_SW 0x00000080000ULL
+#define OP_SHF_DST 0x0000ff00000ULL
+#define OP_SHF_SHIFT 0x001f0000000ULL
+#define OP_SHF_OP 0x00e00000000ULL
+#define OP_SHF_DST_AB 0x01000000000ULL
+#define OP_SHF_WR_AB 0x20000000000ULL
+
+enum shf_op {
+ SHF_OP_NONE = 0,
+ SHF_OP_AND = 2,
+ SHF_OP_OR = 5,
+};
+
+enum shf_sc {
+ SHF_SC_R_ROT = 0,
+ SHF_SC_R_SHF = 1,
+ SHF_SC_L_SHF = 2,
+ SHF_SC_R_DSHF = 3,
+};
+
+#define OP_ALU_A_SRC 0x000000003ffULL
+#define OP_ALU_B_SRC 0x000000ffc00ULL
+#define OP_ALU_DST 0x0003ff00000ULL
+#define OP_ALU_SW 0x00040000000ULL
+#define OP_ALU_OP 0x00f80000000ULL
+#define OP_ALU_DST_AB 0x01000000000ULL
+#define OP_ALU_BASE 0x0a000000000ULL
+#define OP_ALU_WR_AB 0x20000000000ULL
+
+enum alu_op {
+ ALU_OP_NONE = 0x00,
+ ALU_OP_ADD = 0x01,
+ ALU_OP_NEG = 0x04,
+ ALU_OP_AND = 0x08,
+ ALU_OP_SUB_C = 0x0d,
+ ALU_OP_ADD_C = 0x11,
+ ALU_OP_OR = 0x14,
+ ALU_OP_SUB = 0x15,
+ ALU_OP_XOR = 0x18,
+};
+
+enum alu_dst_ab {
+ ALU_DST_A = 0,
+ ALU_DST_B = 1,
+};
+
+#define OP_LDF_BASE 0x0c000000000ULL
+#define OP_LDF_A_SRC 0x000000000ffULL
+#define OP_LDF_SC 0x00000000300ULL
+#define OP_LDF_B_SRC 0x0000003fc00ULL
+#define OP_LDF_I8 0x00000040000ULL
+#define OP_LDF_SW 0x00000080000ULL
+#define OP_LDF_ZF 0x00000100000ULL
+#define OP_LDF_BMASK 0x0000f000000ULL
+#define OP_LDF_SHF 0x001f0000000ULL
+#define OP_LDF_WR_AB 0x20000000000ULL
+
+#define OP_CMD_A_SRC 0x000000000ffULL
+#define OP_CMD_CTX 0x00000000300ULL
+#define OP_CMD_B_SRC 0x0000003fc00ULL
+#define OP_CMD_TOKEN 0x000000c0000ULL
+#define OP_CMD_XFER 0x00001f00000ULL
+#define OP_CMD_CNT 0x0000e000000ULL
+#define OP_CMD_SIG 0x000f0000000ULL
+#define OP_CMD_TGT_CMD 0x07f00000000ULL
+#define OP_CMD_MODE 0x1c0000000000ULL
+
+struct cmd_tgt_act {
+ u8 token;
+ u8 tgt_cmd;
+};
+
+enum cmd_tgt_map {
+ CMD_TGT_READ8,
+ CMD_TGT_WRITE8,
+ CMD_TGT_READ_LE,
+ CMD_TGT_READ_SWAP_LE,
+ __CMD_TGT_MAP_SIZE,
+};
+
+enum cmd_mode {
+ CMD_MODE_40b_AB = 0,
+ CMD_MODE_40b_BA = 1,
+ CMD_MODE_32b = 4,
+};
+
+enum cmd_ctx_swap {
+ CMD_CTX_SWAP = 0,
+ CMD_CTX_NO_SWAP = 3,
+};
+
+#define OP_LCSR_BASE 0x0fc00000000ULL
+#define OP_LCSR_A_SRC 0x000000003ffULL
+#define OP_LCSR_B_SRC 0x000000ffc00ULL
+#define OP_LCSR_WRITE 0x00000200000ULL
+#define OP_LCSR_ADDR 0x001ffc00000ULL
+
+enum lcsr_wr_src {
+ LCSR_WR_AREG,
+ LCSR_WR_BREG,
+ LCSR_WR_IMM,
+};
+
+#define OP_CARB_BASE 0x0e000000000ULL
+#define OP_CARB_OR 0x00000010000ULL
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
new file mode 100644
index 000000000000..87aa8a3e9112
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_BPF_H__
+#define __NFP_BPF_H__ 1
+
+#include <linux/bitfield.h>
+#include <linux/bpf.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#define FIELD_FIT(mask, val) (!((((u64)val) << __bf_shf(mask)) & ~(mask)))
+
+/* For branch fixup logic use up-most byte of branch instruction as scratch
+ * area. Remember to clear this before sending instructions to HW!
+ */
+#define OP_BR_SPECIAL 0xff00000000000000ULL
+
+enum br_special {
+ OP_BR_NORMAL = 0,
+ OP_BR_GO_OUT,
+ OP_BR_GO_ABORT,
+};
+
+enum static_regs {
+ STATIC_REG_PKT = 1,
+#define REG_PKT_BANK ALU_DST_A
+ STATIC_REG_IMM = 2, /* Bank AB */
+};
+
+enum nfp_bpf_action_type {
+ NN_ACT_TC_DROP,
+ NN_ACT_TC_REDIR,
+ NN_ACT_DIRECT,
+};
+
+/* Software register representation, hardware encoding in asm.h */
+#define NN_REG_TYPE GENMASK(31, 24)
+#define NN_REG_VAL GENMASK(7, 0)
+
+enum nfp_bpf_reg_type {
+ NN_REG_GPR_A = BIT(0),
+ NN_REG_GPR_B = BIT(1),
+ NN_REG_NNR = BIT(2),
+ NN_REG_XFER = BIT(3),
+ NN_REG_IMM = BIT(4),
+ NN_REG_NONE = BIT(5),
+};
+
+#define NN_REG_GPR_BOTH (NN_REG_GPR_A | NN_REG_GPR_B)
+
+#define reg_both(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_BOTH))
+#define reg_a(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_A))
+#define reg_b(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_B))
+#define reg_nnr(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_NNR))
+#define reg_xfer(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_XFER))
+#define reg_imm(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_IMM))
+#define reg_none() (FIELD_PREP(NN_REG_TYPE, NN_REG_NONE))
+
+#define pkt_reg(np) reg_a((np)->regs_per_thread - STATIC_REG_PKT)
+#define imm_a(np) reg_a((np)->regs_per_thread - STATIC_REG_IMM)
+#define imm_b(np) reg_b((np)->regs_per_thread - STATIC_REG_IMM)
+#define imm_both(np) reg_both((np)->regs_per_thread - STATIC_REG_IMM)
+
+#define NFP_BPF_ABI_FLAGS reg_nnr(0)
+#define NFP_BPF_ABI_FLAG_MARK 1
+#define NFP_BPF_ABI_MARK reg_nnr(1)
+#define NFP_BPF_ABI_PKT reg_nnr(2)
+#define NFP_BPF_ABI_LEN reg_nnr(3)
+
+struct nfp_prog;
+struct nfp_insn_meta;
+typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
+
+#define nfp_prog_first_meta(nfp_prog) \
+ list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
+#define nfp_prog_last_meta(nfp_prog) \
+ list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
+#define nfp_meta_next(meta) list_next_entry(meta, l)
+#define nfp_meta_prev(meta) list_prev_entry(meta, l)
+
+/**
+ * struct nfp_insn_meta - BPF instruction wrapper
+ * @insn: BPF instruction
+ * @off: index of first generated machine instruction (in nfp_prog.prog)
+ * @n: eBPF instruction number
+ * @skip: skip this instruction (optimized out)
+ * @double_cb: callback for second part of the instruction
+ * @l: link on nfp_prog->insns list
+ */
+struct nfp_insn_meta {
+ struct bpf_insn insn;
+ unsigned int off;
+ unsigned short n;
+ bool skip;
+ instr_cb_t double_cb;
+
+ struct list_head l;
+};
+
+#define BPF_SIZE_MASK 0x18
+
+static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
+{
+ return BPF_CLASS(meta->insn.code);
+}
+
+static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
+{
+ return BPF_SRC(meta->insn.code);
+}
+
+static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
+{
+ return BPF_OP(meta->insn.code);
+}
+
+static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
+{
+ return BPF_MODE(meta->insn.code);
+}
+
+/**
+ * struct nfp_prog - nfp BPF program
+ * @prog: machine code
+ * @prog_len: number of valid instructions in @prog array
+ * @__prog_alloc_len: alloc size of @prog array
+ * @act: BPF program/action type (TC DA, TC with action, XDP etc.)
+ * @num_regs: number of registers used by this program
+ * @regs_per_thread: number of basic registers allocated per thread
+ * @start_off: address of the first instruction in the memory
+ * @tgt_out: jump target for normal exit
+ * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
+ * @tgt_done: jump target to get the next packet
+ * @n_translated: number of successfully translated instructions (for errors)
+ * @error: error code if something went wrong
+ * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
+ */
+struct nfp_prog {
+ u64 *prog;
+ unsigned int prog_len;
+ unsigned int __prog_alloc_len;
+
+ enum nfp_bpf_action_type act;
+
+ unsigned int num_regs;
+ unsigned int regs_per_thread;
+
+ unsigned int start_off;
+ unsigned int tgt_out;
+ unsigned int tgt_abort;
+ unsigned int tgt_done;
+
+ unsigned int n_translated;
+ int error;
+
+ struct list_head insns;
+};
+
+struct nfp_bpf_result {
+ unsigned int n_instr;
+ bool dense_mode;
+};
+
+int
+nfp_bpf_jit(struct bpf_prog *filter, void *prog, enum nfp_bpf_action_type act,
+ unsigned int prog_start, unsigned int prog_done,
+ unsigned int prog_sz, struct nfp_bpf_result *res);
+
+int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
new file mode 100644
index 000000000000..f8df5300f49c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
@@ -0,0 +1,1813 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/kernel.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/pkt_cls.h>
+#include <linux/unistd.h>
+
+#include "nfp_asm.h"
+#include "nfp_bpf.h"
+
+/* --- NFP prog --- */
+/* Foreach "multiple" entries macros provide pos and next<n> pointers.
+ * It's safe to modify the next pointers (but not pos).
+ */
+#define nfp_for_each_insn_walk2(nfp_prog, pos, next) \
+ for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
+ next = list_next_entry(pos, l); \
+ &(nfp_prog)->insns != &pos->l && \
+ &(nfp_prog)->insns != &next->l; \
+ pos = nfp_meta_next(pos), \
+ next = nfp_meta_next(pos))
+
+#define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \
+ for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
+ next = list_next_entry(pos, l), \
+ next2 = list_next_entry(next, l); \
+ &(nfp_prog)->insns != &pos->l && \
+ &(nfp_prog)->insns != &next->l && \
+ &(nfp_prog)->insns != &next2->l; \
+ pos = nfp_meta_next(pos), \
+ next = nfp_meta_next(pos), \
+ next2 = nfp_meta_next(next))
+
+static bool
+nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return meta->l.next != &nfp_prog->insns;
+}
+
+static bool
+nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return meta->l.prev != &nfp_prog->insns;
+}
+
+static void nfp_prog_free(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta, *tmp;
+
+ list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
+ list_del(&meta->l);
+ kfree(meta);
+ }
+ kfree(nfp_prog);
+}
+
+static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
+{
+ if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
+ nfp_prog->error = -ENOSPC;
+ return;
+ }
+
+ nfp_prog->prog[nfp_prog->prog_len] = insn;
+ nfp_prog->prog_len++;
+}
+
+static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
+{
+ return nfp_prog->start_off + nfp_prog->prog_len;
+}
+
+static unsigned int
+nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
+{
+ return offset - nfp_prog->start_off;
+}
+
+/* --- SW reg --- */
+struct nfp_insn_ur_regs {
+ enum alu_dst_ab dst_ab;
+ u16 dst;
+ u16 areg, breg;
+ bool swap;
+ bool wr_both;
+};
+
+struct nfp_insn_re_regs {
+ enum alu_dst_ab dst_ab;
+ u8 dst;
+ u8 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool i8;
+};
+
+static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst)
+{
+ u16 val = FIELD_GET(NN_REG_VAL, swreg);
+
+ switch (FIELD_GET(NN_REG_TYPE, swreg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_NNR:
+ return UR_REG_NN | val;
+ case NN_REG_XFER:
+ return UR_REG_XFR | val;
+ case NN_REG_IMM:
+ if (val & ~0xff) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ return UR_REG_IMM_encode(val);
+ case NN_REG_NONE:
+ return is_dst ? UR_REG_NO_DST : REG_NONE;
+ default:
+ pr_err("unrecognized reg encoding %08x\n", swreg);
+ return 0;
+ }
+}
+
+static int
+swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_unreg(dst, true);
+
+ /* Decode source operands */
+ if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
+ FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_unreg(rreg, false);
+ reg->breg = nfp_swreg_to_unreg(lreg, false);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_unreg(lreg, false);
+ reg->breg = nfp_swreg_to_unreg(rreg, false);
+ }
+
+ return 0;
+}
+
+static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8)
+{
+ u16 val = FIELD_GET(NN_REG_VAL, swreg);
+
+ switch (FIELD_GET(NN_REG_TYPE, swreg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_XFER:
+ return RE_REG_XFR | val;
+ case NN_REG_IMM:
+ if (val & ~(0x7f | has_imm8 << 7)) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ *i8 = val & 0x80;
+ return RE_REG_IMM_encode(val & 0x7f);
+ case NN_REG_NONE:
+ return is_dst ? RE_REG_NO_DST : REG_NONE;
+ default:
+ pr_err("unrecognized reg encoding\n");
+ return 0;
+ }
+}
+
+static int
+swreg_to_restricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_re_regs *reg,
+ bool has_imm8)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
+
+ /* Decode source operands */
+ if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
+ FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ }
+
+ return 0;
+}
+
+/* --- Emitters --- */
+static const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
+ [CMD_TGT_WRITE8] = { 0x00, 0x42 },
+ [CMD_TGT_READ8] = { 0x01, 0x43 },
+ [CMD_TGT_READ_LE] = { 0x01, 0x40 },
+ [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
+};
+
+static void
+__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
+ u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
+{
+ enum cmd_ctx_swap ctx;
+ u64 insn;
+
+ if (sync)
+ ctx = CMD_CTX_SWAP;
+ else
+ ctx = CMD_CTX_NO_SWAP;
+
+ insn = FIELD_PREP(OP_CMD_A_SRC, areg) |
+ FIELD_PREP(OP_CMD_CTX, ctx) |
+ FIELD_PREP(OP_CMD_B_SRC, breg) |
+ FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
+ FIELD_PREP(OP_CMD_XFER, xfer) |
+ FIELD_PREP(OP_CMD_CNT, size) |
+ FIELD_PREP(OP_CMD_SIG, sync) |
+ FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
+ FIELD_PREP(OP_CMD_MODE, mode);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
+ u8 mode, u8 xfer, u32 lreg, u32 rreg, u8 size, bool sync)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), lreg, rreg, &reg, false);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+ if (reg.swap) {
+ pr_err("cmd can't swap arguments\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
+}
+
+static void
+__emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
+ enum br_ctx_signal_state css, u16 addr, u8 defer)
+{
+ u16 addr_lo, addr_hi;
+ u64 insn;
+
+ addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = addr != addr_lo;
+
+ insn = OP_BR_BASE |
+ FIELD_PREP(OP_BR_MASK, mask) |
+ FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
+ FIELD_PREP(OP_BR_CSS, css) |
+ FIELD_PREP(OP_BR_DEFBR, defer) |
+ FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
+ FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
+{
+ if (defer > 2) {
+ pr_err("BUG: branch defer out of bounds %d\n", defer);
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+ __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
+}
+
+static void
+emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
+{
+ __emit_br(nfp_prog, mask,
+ mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
+ BR_CSS_NONE, addr, defer);
+}
+
+static void
+__emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
+ u8 byte, bool equal, u16 addr, u8 defer)
+{
+ u16 addr_lo, addr_hi;
+ u64 insn;
+
+ addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO));
+ addr_hi = addr != addr_lo;
+
+ insn = OP_BBYTE_BASE |
+ FIELD_PREP(OP_BB_A_SRC, areg) |
+ FIELD_PREP(OP_BB_BYTE, byte) |
+ FIELD_PREP(OP_BB_B_SRC, breg) |
+ FIELD_PREP(OP_BB_I8, imm8) |
+ FIELD_PREP(OP_BB_EQ, equal) |
+ FIELD_PREP(OP_BB_DEFBR, defer) |
+ FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
+ FIELD_PREP(OP_BB_ADDR_HI, addr_hi);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_br_byte_neq(struct nfp_prog *nfp_prog,
+ u32 dst, u8 imm, u8 byte, u16 addr, u8 defer)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), dst, reg_imm(imm), &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
+ defer);
+}
+
+static void
+__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
+ enum immed_width width, bool invert,
+ enum immed_shift shift, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_IMMED_BASE |
+ FIELD_PREP(OP_IMMED_A_SRC, areg) |
+ FIELD_PREP(OP_IMMED_B_SRC, breg) |
+ FIELD_PREP(OP_IMMED_IMM, imm_hi) |
+ FIELD_PREP(OP_IMMED_WIDTH, width) |
+ FIELD_PREP(OP_IMMED_INV, invert) |
+ FIELD_PREP(OP_IMMED_SHIFT, shift) |
+ FIELD_PREP(OP_IMMED_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
+ enum immed_width width, bool invert, enum immed_shift shift)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) {
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), &reg);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
+ invert, shift, reg.wr_both);
+}
+
+static void
+__emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
+ enum shf_sc sc, u8 shift,
+ u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both)
+{
+ u64 insn;
+
+ if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ if (sc == SHF_SC_L_SHF)
+ shift = 32 - shift;
+
+ insn = OP_SHF_BASE |
+ FIELD_PREP(OP_SHF_A_SRC, areg) |
+ FIELD_PREP(OP_SHF_SC, sc) |
+ FIELD_PREP(OP_SHF_B_SRC, breg) |
+ FIELD_PREP(OP_SHF_I8, i8) |
+ FIELD_PREP(OP_SHF_SW, sw) |
+ FIELD_PREP(OP_SHF_DST, dst) |
+ FIELD_PREP(OP_SHF_SHIFT, shift) |
+ FIELD_PREP(OP_SHF_OP, op) |
+ FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
+ FIELD_PREP(OP_SHF_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
+ enum shf_sc sc, u8 shift)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(dst, lreg, rreg, &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
+ reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both);
+}
+
+static void
+__emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
+ u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_ALU_BASE |
+ FIELD_PREP(OP_ALU_A_SRC, areg) |
+ FIELD_PREP(OP_ALU_B_SRC, breg) |
+ FIELD_PREP(OP_ALU_DST, dst) |
+ FIELD_PREP(OP_ALU_SW, swap) |
+ FIELD_PREP(OP_ALU_OP, op) |
+ FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
+ FIELD_PREP(OP_ALU_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ err = swreg_to_unrestricted(dst, lreg, rreg, &reg);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_alu(nfp_prog, reg.dst, reg.dst_ab,
+ reg.areg, op, reg.breg, reg.swap, reg.wr_both);
+}
+
+static void
+__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
+ u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
+ bool zero, bool swap, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_LDF_BASE |
+ FIELD_PREP(OP_LDF_A_SRC, areg) |
+ FIELD_PREP(OP_LDF_SC, sc) |
+ FIELD_PREP(OP_LDF_B_SRC, breg) |
+ FIELD_PREP(OP_LDF_I8, imm8) |
+ FIELD_PREP(OP_LDF_SW, swap) |
+ FIELD_PREP(OP_LDF_ZF, zero) |
+ FIELD_PREP(OP_LDF_BMASK, bmask) |
+ FIELD_PREP(OP_LDF_SHF, shift) |
+ FIELD_PREP(OP_LDF_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift,
+ u32 dst, u8 bmask, u32 src, bool zero)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), dst, src, &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
+ reg.i8, zero, reg.swap, reg.wr_both);
+}
+
+static void
+emit_ld_field(struct nfp_prog *nfp_prog, u32 dst, u8 bmask, u32 src,
+ enum shf_sc sc, u8 shift)
+{
+ emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false);
+}
+
+/* --- Wrappers --- */
+static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
+{
+ if (!(imm & 0xffff0000)) {
+ *val = imm;
+ *shift = IMMED_SHIFT_0B;
+ } else if (!(imm & 0xff0000ff)) {
+ *val = imm >> 8;
+ *shift = IMMED_SHIFT_1B;
+ } else if (!(imm & 0x0000ffff)) {
+ *val = imm >> 16;
+ *shift = IMMED_SHIFT_2B;
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
+{
+ enum immed_shift shift;
+ u16 val;
+
+ if (pack_immed(imm, &val, &shift)) {
+ emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
+ } else if (pack_immed(~imm, &val, &shift)) {
+ emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
+ } else {
+ emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
+ false, IMMED_SHIFT_0B);
+ emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
+ false, IMMED_SHIFT_2B);
+ }
+}
+
+/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
+ * If the @imm is small enough encode it directly in operand and return
+ * otherwise load @imm to a spare register and return its encoding.
+ */
+static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+{
+ if (FIELD_FIT(UR_REG_IMM_MAX, imm))
+ return reg_imm(imm);
+
+ wrp_immed(nfp_prog, tmp_reg, imm);
+ return tmp_reg;
+}
+
+/* re_load_imm_any() - encode immediate or use tmp register (restricted)
+ * If the @imm is small enough encode it directly in operand and return
+ * otherwise load @imm to a spare register and return its encoding.
+ */
+static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+{
+ if (FIELD_FIT(RE_REG_IMM_MAX, imm))
+ return reg_imm(imm);
+
+ wrp_immed(nfp_prog, tmp_reg, imm);
+ return tmp_reg;
+}
+
+static void
+wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
+ enum br_special special)
+{
+ emit_br(nfp_prog, mask, 0, 0);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_BR_SPECIAL, special);
+}
+
+static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+{
+ emit_alu(nfp_prog, reg_both(dst), reg_none(), ALU_OP_NONE, reg_b(src));
+}
+
+static int
+construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset,
+ u16 src, bool src_valid, u8 size)
+{
+ unsigned int i;
+ u16 shift, sz;
+ u32 tmp_reg;
+
+ /* We load the value from the address indicated in @offset and then
+ * shift out the data we don't need. Note: this is big endian!
+ */
+ sz = size < 4 ? 4 : size;
+ shift = size < 4 ? 4 - size : 0;
+
+ if (src_valid) {
+ /* Calculate the true offset (src_reg + imm) */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_both(nfp_prog),
+ reg_a(src), ALU_OP_ADD, tmp_reg);
+ /* Check packet length (size guaranteed to fit b/c it's u8) */
+ emit_alu(nfp_prog, imm_a(nfp_prog),
+ imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
+ emit_alu(nfp_prog, reg_none(),
+ NFP_BPF_ABI_LEN, ALU_OP_SUB, imm_a(nfp_prog));
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ /* Load data */
+ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
+ pkt_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true);
+ } else {
+ /* Check packet length */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset + size,
+ imm_a(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ NFP_BPF_ABI_LEN, ALU_OP_SUB, tmp_reg);
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ /* Load data */
+ tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
+ pkt_reg(nfp_prog), tmp_reg, sz - 1, true);
+ }
+
+ i = 0;
+ if (shift)
+ emit_shf(nfp_prog, reg_both(0), reg_none(), SHF_OP_NONE,
+ reg_xfer(0), SHF_SC_R_SHF, shift * 8);
+ else
+ for (; i * 4 < size; i++)
+ emit_alu(nfp_prog, reg_both(i),
+ reg_none(), ALU_OP_NONE, reg_xfer(i));
+
+ if (i < 2)
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ return 0;
+}
+
+static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
+{
+ return construct_data_ind_ld(nfp_prog, offset, 0, false, size);
+}
+
+static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
+{
+ emit_alu(nfp_prog, NFP_BPF_ABI_MARK,
+ reg_none(), ALU_OP_NONE, reg_b(src));
+ emit_alu(nfp_prog, NFP_BPF_ABI_FLAGS,
+ NFP_BPF_ABI_FLAGS, ALU_OP_OR, reg_imm(NFP_BPF_ABI_FLAG_MARK));
+
+ return 0;
+}
+
+static void
+wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
+{
+ u32 tmp_reg;
+
+ if (alu_op == ALU_OP_AND) {
+ if (!imm)
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ if (!imm || !~imm)
+ return;
+ }
+ if (alu_op == ALU_OP_OR) {
+ if (!~imm)
+ wrp_immed(nfp_prog, reg_both(dst), ~0U);
+ if (!imm || !~imm)
+ return;
+ }
+ if (alu_op == ALU_OP_XOR) {
+ if (!~imm)
+ emit_alu(nfp_prog, reg_both(dst), reg_none(),
+ ALU_OP_NEG, reg_b(dst));
+ if (!imm || !~imm)
+ return;
+ }
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
+}
+
+static int
+wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, bool skip)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ if (skip) {
+ meta->skip = true;
+ return 0;
+ }
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
+
+ return 0;
+}
+
+static int
+wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op)
+{
+ u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
+
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
+ emit_alu(nfp_prog, reg_both(dst + 1),
+ reg_a(dst + 1), alu_op, reg_b(src + 1));
+
+ return 0;
+}
+
+static int
+wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, bool skip)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (skip) {
+ meta->skip = true;
+ return 0;
+ }
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int
+wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op)
+{
+ u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
+
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static void
+wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
+ enum br_mask br_mask, u16 off)
+{
+ emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
+ emit_br(nfp_prog, br_mask, off, 0);
+}
+
+static int
+wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, enum br_mask br_mask)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
+ insn->src_reg * 2, br_mask, insn->off);
+ wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
+ insn->src_reg * 2 + 1, br_mask, insn->off);
+
+ return 0;
+}
+
+static int
+wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum br_mask br_mask, bool swap)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u8 reg = insn->dst_reg * 2;
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ if (!swap)
+ emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
+ else
+ emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ if (!swap)
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
+ else
+ emit_alu(nfp_prog, reg_none(),
+ tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
+
+ emit_br(nfp_prog, br_mask, insn->off, 0);
+
+ return 0;
+}
+
+static int
+wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum br_mask br_mask, bool swap)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (swap) {
+ areg ^= breg;
+ breg ^= areg;
+ areg ^= breg;
+ }
+
+ emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
+ emit_br(nfp_prog, br_mask, insn->off, 0);
+
+ return 0;
+}
+
+/* --- Callbacks --- */
+static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->src_reg * 2 + 1);
+
+ return 0;
+}
+
+static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u64 imm = meta->insn.imm; /* sign extend */
+
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
+
+ return 0;
+}
+
+static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
+}
+
+static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
+}
+
+static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
+}
+
+static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+}
+
+static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
+}
+
+static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+}
+
+static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_a(insn->dst_reg * 2), ALU_OP_ADD,
+ reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
+ reg_b(insn->src_reg * 2 + 1));
+
+ return 0;
+}
+
+static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
+
+ return 0;
+}
+
+static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_a(insn->dst_reg * 2), ALU_OP_SUB,
+ reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
+ reg_b(insn->src_reg * 2 + 1));
+
+ return 0;
+}
+
+static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
+
+ return 0;
+}
+
+static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->imm != 32)
+ return 1; /* TODO */
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->dst_reg * 2);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), 0);
+
+ return 0;
+}
+
+static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->imm != 32)
+ return 1; /* TODO */
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->dst_reg * 2 + 1);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
+}
+
+static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
+}
+
+static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
+}
+
+static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+}
+
+static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
+}
+
+static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+}
+
+static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
+}
+
+static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
+}
+
+static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
+}
+
+static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
+}
+
+static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (!insn->imm)
+ return 1; /* TODO: zero shift means indirect */
+
+ emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
+ SHF_SC_L_SHF, insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1),
+ meta->insn.imm);
+
+ return 0;
+}
+
+static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ meta->double_cb = imm_ld8_part2;
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
+
+ return 0;
+}
+
+static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 1);
+}
+
+static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 2);
+}
+
+static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 4);
+}
+
+static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 1);
+}
+
+static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 2);
+}
+
+static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 4);
+}
+
+static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off == offsetof(struct sk_buff, len))
+ emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN);
+ else
+ return -ENOTSUPP;
+
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off == offsetof(struct sk_buff, mark))
+ return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
+
+ return -ENOTSUPP;
+}
+
+static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off < 0) /* TODO */
+ return -ENOTSUPP;
+ emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
+
+ return 0;
+}
+
+static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 or1 = reg_a(insn->dst_reg * 2), or2 = reg_b(insn->dst_reg * 2 + 1);
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (imm & ~0U) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_a(nfp_prog),
+ reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
+ or1 = imm_a(nfp_prog);
+ }
+
+ if (imm >> 32) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
+ or2 = imm_b(nfp_prog);
+ }
+
+ emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
+ emit_br(nfp_prog, BR_BEQ, insn->off, 0);
+
+ return 0;
+}
+
+static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
+}
+
+static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
+}
+
+static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (!imm) {
+ meta->skip = true;
+ return 0;
+ }
+
+ if (imm & ~0U) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ if (imm >> 32) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ return 0;
+}
+
+static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (!imm) {
+ emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
+ ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+
+ return 0;
+}
+
+static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
+ ALU_OP_XOR, reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
+ ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
+ emit_alu(nfp_prog, reg_none(),
+ imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
+ emit_br(nfp_prog, BR_BEQ, insn->off, 0);
+
+ return 0;
+}
+
+static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
+}
+
+static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
+}
+
+static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
+}
+
+static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
+}
+
+static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
+
+ return 0;
+}
+
+static const instr_cb_t instr_cb[256] = {
+ [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64,
+ [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64,
+ [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64,
+ [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64,
+ [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64,
+ [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64,
+ [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64,
+ [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64,
+ [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64,
+ [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
+ [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
+ [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
+ [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
+ [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
+ [BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
+ [BPF_ALU | BPF_MOV | BPF_K] = mov_imm,
+ [BPF_ALU | BPF_XOR | BPF_X] = xor_reg,
+ [BPF_ALU | BPF_XOR | BPF_K] = xor_imm,
+ [BPF_ALU | BPF_AND | BPF_X] = and_reg,
+ [BPF_ALU | BPF_AND | BPF_K] = and_imm,
+ [BPF_ALU | BPF_OR | BPF_X] = or_reg,
+ [BPF_ALU | BPF_OR | BPF_K] = or_imm,
+ [BPF_ALU | BPF_ADD | BPF_X] = add_reg,
+ [BPF_ALU | BPF_ADD | BPF_K] = add_imm,
+ [BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
+ [BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
+ [BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
+ [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
+ [BPF_LD | BPF_ABS | BPF_B] = data_ld1,
+ [BPF_LD | BPF_ABS | BPF_H] = data_ld2,
+ [BPF_LD | BPF_ABS | BPF_W] = data_ld4,
+ [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
+ [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
+ [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
+ [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
+ [BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
+ [BPF_JMP | BPF_JA | BPF_K] = jump,
+ [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
+ [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
+ [BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
+ [BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
+ [BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
+ [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
+ [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg,
+ [BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
+ [BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
+ [BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
+ [BPF_JMP | BPF_EXIT] = goto_out,
+};
+
+/* --- Misc code --- */
+static void br_set_offset(u64 *instr, u16 offset)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = offset != addr_lo;
+ *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
+ *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+ *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
+}
+
+/* --- Assembler logic --- */
+static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta, *next;
+ u32 off, br_idx;
+ u32 idx;
+
+ nfp_for_each_insn_walk2(nfp_prog, meta, next) {
+ if (meta->skip)
+ continue;
+ if (BPF_CLASS(meta->insn.code) != BPF_JMP)
+ continue;
+
+ br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
+ if (!nfp_is_br(nfp_prog->prog[br_idx])) {
+ pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
+ br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
+ return -ELOOP;
+ }
+ /* Leave special branches for later */
+ if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
+ continue;
+
+ /* Find the target offset in assembler realm */
+ off = meta->insn.off;
+ if (!off) {
+ pr_err("Fixup found zero offset!!\n");
+ return -ELOOP;
+ }
+
+ while (off && nfp_meta_has_next(nfp_prog, next)) {
+ next = nfp_meta_next(next);
+ off--;
+ }
+ if (off) {
+ pr_err("Fixup found too large jump!! %d\n", off);
+ return -ELOOP;
+ }
+
+ if (next->skip) {
+ pr_err("Branch landing on removed instruction!!\n");
+ return -ELOOP;
+ }
+
+ for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
+ idx <= br_idx; idx++) {
+ if (!nfp_is_br(nfp_prog->prog[idx]))
+ continue;
+ br_set_offset(&nfp_prog->prog[idx], next->off);
+ }
+ }
+
+ /* Fixup 'goto out's separately, they can be scattered around */
+ for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
+ enum br_special special;
+
+ if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
+ continue;
+
+ special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
+ switch (special) {
+ case OP_BR_NORMAL:
+ break;
+ case OP_BR_GO_OUT:
+ br_set_offset(&nfp_prog->prog[br_idx],
+ nfp_prog->tgt_out);
+ break;
+ case OP_BR_GO_ABORT:
+ br_set_offset(&nfp_prog->prog[br_idx],
+ nfp_prog->tgt_abort);
+ break;
+ }
+
+ nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
+ }
+
+ return 0;
+}
+
+static void nfp_intro(struct nfp_prog *nfp_prog)
+{
+ emit_alu(nfp_prog, pkt_reg(nfp_prog),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
+}
+
+static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
+{
+ const u8 act2code[] = {
+ [NN_ACT_TC_DROP] = 0x22,
+ [NN_ACT_TC_REDIR] = 0x24
+ };
+ /* Target for aborts */
+ nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
+ wrp_immed(nfp_prog, reg_both(0), 0);
+
+ /* Target for normal exits */
+ nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
+ /* Legacy TC mode:
+ * 0 0x11 -> pass, count as stat0
+ * -1 drop 0x22 -> drop, count as stat1
+ * redir 0x24 -> redir, count as stat1
+ * ife mark 0x21 -> pass, count as stat1
+ * ife + tx 0x24 -> redir, count as stat1
+ */
+ emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
+
+ emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]),
+ SHF_SC_L_SHF, 16);
+}
+
+static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
+{
+ /* TC direct-action mode:
+ * 0,1 ok NOT SUPPORTED[1]
+ * 2 drop 0x22 -> drop, count as stat1
+ * 4,5 nuke 0x02 -> drop
+ * 7 redir 0x44 -> redir, count as stat2
+ * * unspec 0x11 -> pass, count as stat0
+ *
+ * [1] We can't support OK and RECLASSIFY because we can't tell TC
+ * the exact decision made. We are forced to support UNSPEC
+ * to handle aborts so that's the only one we handle for passing
+ * packets up the stack.
+ */
+ /* Target for aborts */
+ nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
+
+ emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
+
+ /* Target for normal exits */
+ nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
+
+ /* if R0 > 7 jump to abort */
+ emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
+ emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+
+ wrp_immed(nfp_prog, reg_b(2), 0x41221211);
+ wrp_immed(nfp_prog, reg_b(3), 0x41001211);
+
+ emit_shf(nfp_prog, reg_a(1),
+ reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
+
+ emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
+ emit_shf(nfp_prog, reg_a(2),
+ reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
+
+ emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
+ emit_shf(nfp_prog, reg_b(2),
+ reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
+
+ emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+
+ emit_shf(nfp_prog, reg_b(2),
+ reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
+}
+
+static void nfp_outro(struct nfp_prog *nfp_prog)
+{
+ switch (nfp_prog->act) {
+ case NN_ACT_DIRECT:
+ nfp_outro_tc_da(nfp_prog);
+ break;
+ case NN_ACT_TC_DROP:
+ case NN_ACT_TC_REDIR:
+ nfp_outro_tc_legacy(nfp_prog);
+ break;
+ }
+}
+
+static int nfp_translate(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+ int err;
+
+ nfp_intro(nfp_prog);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ instr_cb_t cb = instr_cb[meta->insn.code];
+
+ meta->off = nfp_prog_current_offset(nfp_prog);
+
+ if (meta->skip) {
+ nfp_prog->n_translated++;
+ continue;
+ }
+
+ if (nfp_meta_has_prev(nfp_prog, meta) &&
+ nfp_meta_prev(meta)->double_cb)
+ cb = nfp_meta_prev(meta)->double_cb;
+ if (!cb)
+ return -ENOENT;
+ err = cb(nfp_prog, meta);
+ if (err)
+ return err;
+
+ nfp_prog->n_translated++;
+ }
+
+ nfp_outro(nfp_prog);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+
+ return nfp_fixup_branches(nfp_prog);
+}
+
+static int
+nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
+ unsigned int cnt)
+{
+ unsigned int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct nfp_insn_meta *meta;
+
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
+
+ meta->insn = prog[i];
+ meta->n = i;
+
+ list_add_tail(&meta->l, &nfp_prog->insns);
+ }
+
+ return 0;
+}
+
+/* --- Optimizations --- */
+static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ struct bpf_insn insn = meta->insn;
+
+ /* Programs converted from cBPF start with register xoring */
+ if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
+ insn.src_reg == insn.dst_reg)
+ continue;
+
+ /* Programs start with R6 = R1 but we ignore the skb pointer */
+ if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
+ insn.src_reg == 1 && insn.dst_reg == 6)
+ meta->skip = true;
+
+ /* Return as soon as something doesn't match */
+ if (!meta->skip)
+ return;
+ }
+}
+
+/* Try to rename registers so that program uses only low ones */
+static int nfp_bpf_opt_reg_rename(struct nfp_prog *nfp_prog)
+{
+ bool reg_used[MAX_BPF_REG] = {};
+ u8 tgt_reg[MAX_BPF_REG] = {};
+ struct nfp_insn_meta *meta;
+ unsigned int i, j;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ if (meta->skip)
+ continue;
+
+ reg_used[meta->insn.src_reg] = true;
+ reg_used[meta->insn.dst_reg] = true;
+ }
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(tgt_reg); i++) {
+ if (!reg_used[i])
+ continue;
+
+ tgt_reg[i] = j++;
+ }
+ nfp_prog->num_regs = j;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ meta->insn.src_reg = tgt_reg[meta->insn.src_reg];
+ meta->insn.dst_reg = tgt_reg[meta->insn.dst_reg];
+ }
+
+ return 0;
+}
+
+/* Remove masking after load since our load guarantees this is not needed */
+static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta1, *meta2;
+ const s32 exp_mask[] = {
+ [BPF_B] = 0x000000ffU,
+ [BPF_H] = 0x0000ffffU,
+ [BPF_W] = 0xffffffffU,
+ };
+
+ nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
+ struct bpf_insn insn, next;
+
+ insn = meta1->insn;
+ next = meta2->insn;
+
+ if (BPF_CLASS(insn.code) != BPF_LD)
+ continue;
+ if (BPF_MODE(insn.code) != BPF_ABS &&
+ BPF_MODE(insn.code) != BPF_IND)
+ continue;
+
+ if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
+ continue;
+
+ if (!exp_mask[BPF_SIZE(insn.code)])
+ continue;
+ if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
+ continue;
+
+ if (next.src_reg || next.dst_reg)
+ continue;
+
+ meta2->skip = true;
+ }
+}
+
+static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta1, *meta2, *meta3;
+
+ nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
+ struct bpf_insn insn, next1, next2;
+
+ insn = meta1->insn;
+ next1 = meta2->insn;
+ next2 = meta3->insn;
+
+ if (BPF_CLASS(insn.code) != BPF_LD)
+ continue;
+ if (BPF_MODE(insn.code) != BPF_ABS &&
+ BPF_MODE(insn.code) != BPF_IND)
+ continue;
+ if (BPF_SIZE(insn.code) != BPF_W)
+ continue;
+
+ if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
+ next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
+ !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
+ next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
+ continue;
+
+ if (next1.src_reg || next1.dst_reg ||
+ next2.src_reg || next2.dst_reg)
+ continue;
+
+ if (next1.imm != 0x20 || next2.imm != 0x20)
+ continue;
+
+ meta2->skip = true;
+ meta3->skip = true;
+ }
+}
+
+static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
+{
+ int ret;
+
+ nfp_bpf_opt_reg_init(nfp_prog);
+
+ ret = nfp_bpf_opt_reg_rename(nfp_prog);
+ if (ret)
+ return ret;
+
+ nfp_bpf_opt_ld_mask(nfp_prog);
+ nfp_bpf_opt_ld_shift(nfp_prog);
+
+ return 0;
+}
+
+/**
+ * nfp_bpf_jit() - translate BPF code into NFP assembly
+ * @filter: kernel BPF filter struct
+ * @prog_mem: memory to store assembler instructions
+ * @act: action attached to this eBPF program
+ * @prog_start: offset of the first instruction when loaded
+ * @prog_done: where to jump on exit
+ * @prog_sz: size of @prog_mem in instructions
+ * @res: achieved parameters of translation results
+ */
+int
+nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
+ enum nfp_bpf_action_type act,
+ unsigned int prog_start, unsigned int prog_done,
+ unsigned int prog_sz, struct nfp_bpf_result *res)
+{
+ struct nfp_prog *nfp_prog;
+ int ret;
+
+ nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
+ if (!nfp_prog)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&nfp_prog->insns);
+ nfp_prog->act = act;
+ nfp_prog->start_off = prog_start;
+ nfp_prog->tgt_done = prog_done;
+
+ ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
+ if (ret)
+ goto out;
+
+ ret = nfp_prog_verify(nfp_prog, filter);
+ if (ret)
+ goto out;
+
+ ret = nfp_bpf_optimize(nfp_prog);
+ if (ret)
+ goto out;
+
+ if (nfp_prog->num_regs <= 7)
+ nfp_prog->regs_per_thread = 16;
+ else
+ nfp_prog->regs_per_thread = 32;
+
+ nfp_prog->prog = prog_mem;
+ nfp_prog->__prog_alloc_len = prog_sz;
+
+ ret = nfp_translate(nfp_prog);
+ if (ret) {
+ pr_err("Translation failed with error %d (translated: %u)\n",
+ ret, nfp_prog->n_translated);
+ ret = -EINVAL;
+ }
+
+ res->n_instr = nfp_prog->prog_len;
+ res->dense_mode = nfp_prog->num_regs <= 7;
+out:
+ nfp_prog_free(nfp_prog);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
new file mode 100644
index 000000000000..144cae87f63a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
+#include <linux/kernel.h>
+#include <linux/pkt_cls.h>
+
+#include "nfp_bpf.h"
+
+/* Analyzer/verifier definitions */
+struct nfp_bpf_analyzer_priv {
+ struct nfp_prog *prog;
+ struct nfp_insn_meta *meta;
+};
+
+static struct nfp_insn_meta *
+nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int insn_idx, unsigned int n_insns)
+{
+ unsigned int forward, backward, i;
+
+ backward = meta->n - insn_idx;
+ forward = insn_idx - meta->n;
+
+ if (min(forward, backward) > n_insns - insn_idx - 1) {
+ backward = n_insns - insn_idx - 1;
+ meta = nfp_prog_last_meta(nfp_prog);
+ }
+ if (min(forward, backward) > insn_idx && backward > insn_idx) {
+ forward = insn_idx;
+ meta = nfp_prog_first_meta(nfp_prog);
+ }
+
+ if (forward < backward)
+ for (i = 0; i < forward; i++)
+ meta = nfp_meta_next(meta);
+ else
+ for (i = 0; i < backward; i++)
+ meta = nfp_meta_prev(meta);
+
+ return meta;
+}
+
+static int
+nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
+ const struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *reg0 = &env->cur_state.regs[0];
+
+ if (reg0->type != CONST_IMM) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ if (nfp_prog->act != NN_ACT_DIRECT &&
+ reg0->imm != 0 && (reg0->imm & ~0U) != ~0U) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ if (nfp_prog->act == NN_ACT_DIRECT && reg0->imm <= TC_ACT_REDIRECT &&
+ reg0->imm != TC_ACT_SHOT && reg0->imm != TC_ACT_STOLEN &&
+ reg0->imm != TC_ACT_QUEUED) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+nfp_bpf_check_ctx_ptr(struct nfp_prog *nfp_prog,
+ const struct bpf_verifier_env *env, u8 reg)
+{
+ if (env->cur_state.regs[reg].type != PTR_TO_CTX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
+{
+ struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv;
+ struct nfp_insn_meta *meta = priv->meta;
+
+ meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
+ priv->meta = meta;
+
+ if (meta->insn.src_reg == BPF_REG_10 ||
+ meta->insn.dst_reg == BPF_REG_10) {
+ pr_err("stack not yet supported\n");
+ return -EINVAL;
+ }
+ if (meta->insn.src_reg >= MAX_BPF_REG ||
+ meta->insn.dst_reg >= MAX_BPF_REG) {
+ pr_err("program uses extended registers - jit hardening?\n");
+ return -EINVAL;
+ }
+
+ if (meta->insn.code == (BPF_JMP | BPF_EXIT))
+ return nfp_bpf_check_exit(priv->prog, env);
+
+ if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
+ return nfp_bpf_check_ctx_ptr(priv->prog, env,
+ meta->insn.src_reg);
+ if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
+ return nfp_bpf_check_ctx_ptr(priv->prog, env,
+ meta->insn.dst_reg);
+
+ return 0;
+}
+
+static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+ .insn_hook = nfp_verify_insn,
+};
+
+int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
+{
+ struct nfp_bpf_analyzer_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->prog = nfp_prog;
+ priv->meta = nfp_prog_first_meta(nfp_prog);
+
+ ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv);
+
+ kfree(priv);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 690635660195..ed824e11a1e3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -62,6 +62,9 @@
/* Max time to wait for NFP to respond on updates (in seconds) */
#define NFP_NET_POLL_TIMEOUT 5
+/* Interval for reading offloaded filter stats */
+#define NFP_NET_STAT_POLL_IVL msecs_to_jiffies(100)
+
/* Bar allocation */
#define NFP_NET_CTRL_BAR 0
#define NFP_NET_Q0_BAR 2
@@ -220,7 +223,7 @@ struct nfp_net_tx_ring {
#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
-#define PCIE_DESC_RX_SPARE cpu_to_le16(BIT(8))
+#define PCIE_DESC_RX_BPF cpu_to_le16(BIT(8))
#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
@@ -266,6 +269,8 @@ struct nfp_net_rx_desc {
};
};
+#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
+
struct nfp_net_rx_hash {
__be32 hash_type;
__be32 hash;
@@ -405,6 +410,11 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
fw_ver->minor == minor;
}
+struct nfp_stat_pair {
+ u64 pkts;
+ u64 bytes;
+};
+
/**
* struct nfp_net - NFP network device structure
* @pdev: Backpointer to PCI device
@@ -413,6 +423,7 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @is_vf: Is the driver attached to a VF?
* @is_nfp3200: Is the driver for a NFP-3200 card?
* @fw_loaded: Is the firmware loaded?
+ * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @ctrl: Local copy of the control register/word.
* @fl_bufsz: Currently configured size of the freelist buffers
* @rx_offset: Offset in the RX buffers where packet data starts
@@ -427,6 +438,11 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
+ * @rx_filter: Filter offload statistics - dropped packets/bytes
+ * @rx_filter_prev: Filter offload statistics - values from previous update
+ * @rx_filter_change: Jiffies when statistics last changed
+ * @rx_filter_stats_timer: Timer for polling filter offload statistics
+ * @rx_filter_lock: Lock protecting timer state changes (teardown)
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
* @num_tx_rings: Currently configured number of TX rings
@@ -473,6 +489,7 @@ struct nfp_net {
unsigned is_vf:1;
unsigned is_nfp3200:1;
unsigned fw_loaded:1;
+ unsigned bpf_offload_skip_sw:1;
u32 ctrl;
u32 fl_bufsz;
@@ -502,6 +519,11 @@ struct nfp_net {
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
+ struct nfp_stat_pair rx_filter, rx_filter_prev;
+ unsigned long rx_filter_change;
+ struct timer_list rx_filter_stats_timer;
+ spinlock_t rx_filter_lock;
+
int max_tx_rings;
int max_rx_rings;
@@ -561,12 +583,28 @@ struct nfp_net {
/* Functions to read/write from/to a BAR
* Performs any endian conversion necessary.
*/
+static inline u16 nn_readb(struct nfp_net *nn, int off)
+{
+ return readb(nn->ctrl_bar + off);
+}
+
static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
{
writeb(val, nn->ctrl_bar + off);
}
-/* NFP-3200 can't handle 16-bit accesses too well - hence no readw/writew */
+/* NFP-3200 can't handle 16-bit accesses too well */
+static inline u16 nn_readw(struct nfp_net *nn, int off)
+{
+ WARN_ON_ONCE(nn->is_nfp3200);
+ return readw(nn->ctrl_bar + off);
+}
+
+static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
+{
+ WARN_ON_ONCE(nn->is_nfp3200);
+ writew(val, nn->ctrl_bar + off);
+}
static inline u32 nn_readl(struct nfp_net *nn, int off)
{
@@ -757,4 +795,9 @@ static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn)
}
#endif /* CONFIG_NFP_NET_DEBUG */
+void nfp_net_filter_stats_timer(unsigned long data);
+int
+nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
+ struct tc_cls_bpf_offload *cls_bpf);
+
#endif /* _NFP_NET_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 39dadfca84ef..aee3fd2b6538 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -60,6 +60,7 @@
#include <linux/ktime.h>
+#include <net/pkt_cls.h>
#include <net/vxlan.h>
#include "nfp_net_ctrl.h"
@@ -1292,38 +1293,72 @@ static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
}
}
-/**
- * nfp_net_set_hash() - Set SKB hash data
- * @netdev: adapter's net_device structure
- * @skb: SKB to set the hash data on
- * @rxd: RX descriptor
- *
- * The RSS hash and hash-type are pre-pended to the packet data.
- * Extract and decode it and set the skb fields.
- */
static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
- struct nfp_net_rx_desc *rxd)
+ unsigned int type, __be32 *hash)
{
- struct nfp_net_rx_hash *rx_hash;
-
- if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS) ||
- !(netdev->features & NETIF_F_RXHASH))
+ if (!(netdev->features & NETIF_F_RXHASH))
return;
- rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
-
- switch (be32_to_cpu(rx_hash->hash_type)) {
+ switch (type) {
case NFP_NET_RSS_IPV4:
case NFP_NET_RSS_IPV6:
case NFP_NET_RSS_IPV6_EX:
- skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L3);
+ skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3);
break;
default:
- skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L4);
+ skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4);
break;
}
}
+static void
+nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
+ struct nfp_net_rx_desc *rxd)
+{
+ struct nfp_net_rx_hash *rx_hash;
+
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+ return;
+
+ rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
+
+ nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
+ &rx_hash->hash);
+}
+
+static void *
+nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
+ int meta_len)
+{
+ u8 *data = skb->data - meta_len;
+ u32 meta_info;
+
+ meta_info = get_unaligned_be32(data);
+ data += 4;
+
+ while (meta_info) {
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ nfp_net_set_hash(netdev, skb,
+ meta_info & NFP_NET_META_FIELD_MASK,
+ (__be32 *)data);
+ data += 4;
+ break;
+ case NFP_NET_META_MARK:
+ skb->mark = get_unaligned_be32(data);
+ data += 4;
+ break;
+ default:
+ return NULL;
+ }
+
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ }
+
+ return data;
+}
+
/**
* nfp_net_rx() - receive up to @budget packets on @rx_ring
* @rx_ring: RX ring to receive from
@@ -1438,14 +1473,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb_reserve(skb, nn->rx_offset);
skb_put(skb, data_len - meta_len);
- nfp_net_set_hash(nn->netdev, skb, rxd);
-
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++;
r_vec->rx_bytes += skb->len;
u64_stats_update_end(&r_vec->rx_sync);
+ if (nn->fw_ver.major <= 3) {
+ nfp_net_set_hash_desc(nn->netdev, skb, rxd);
+ } else if (meta_len) {
+ void *end;
+
+ end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
+ if (unlikely(end != skb->data)) {
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ dev_kfree_skb_any(skb);
+ nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
+ continue;
+ }
+ }
+
skb_record_rx_queue(skb, rx_ring->idx);
skb->protocol = eth_type_trans(skb, nn->netdev);
@@ -2386,6 +2436,31 @@ static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
return stats;
}
+static bool nfp_net_ebpf_capable(struct nfp_net *nn)
+{
+ if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
+ nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
+ return true;
+ return false;
+}
+
+static int
+nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ return -ENOTSUPP;
+ if (proto != htons(ETH_P_ALL))
+ return -ENOTSUPP;
+
+ if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn))
+ return nfp_net_bpf_offload(nn, handle, proto, tc->cls_bpf);
+
+ return -EINVAL;
+}
+
static int nfp_net_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2440,6 +2515,11 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
}
+ if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
+ nn_err(nn, "Cannot disable HW TC offload while in use\n");
+ return -EBUSY;
+ }
+
nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
netdev->features, features, changed);
@@ -2589,6 +2669,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64,
+ .ndo_setup_tc = nfp_net_setup_tc,
.ndo_tx_timeout = nfp_net_tx_timeout,
.ndo_set_rx_mode = nfp_net_set_rx_mode,
.ndo_change_mtu = nfp_net_change_mtu,
@@ -2614,7 +2695,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -2631,7 +2712,8 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
- nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "");
+ nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
+ nfp_net_ebpf_capable(nn) ? "BPF " : "");
}
/**
@@ -2674,10 +2756,13 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
spin_lock_init(&nn->reconfig_lock);
+ spin_lock_init(&nn->rx_filter_lock);
spin_lock_init(&nn->link_status_lock);
setup_timer(&nn->reconfig_timer,
nfp_net_reconfig_timer, (unsigned long)nn);
+ setup_timer(&nn->rx_filter_stats_timer,
+ nfp_net_filter_stats_timer, (unsigned long)nn);
return nn;
}
@@ -2799,6 +2884,9 @@ int nfp_net_netdev_init(struct net_device *netdev)
netdev->features = netdev->hw_features;
+ if (nfp_net_ebpf_capable(nn))
+ netdev->hw_features |= NETIF_F_HW_TC;
+
/* Advertise but disable TSO by default. */
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index ad6c4e31cedd..93b10b441acb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -66,6 +66,13 @@
#define NFP_NET_LSO_MAX_HDR_SZ 255
/**
+ * Prepend field types
+ */
+#define NFP_NET_META_FIELD_SIZE 4
+#define NFP_NET_META_HASH 1 /* next field carries hash type */
+#define NFP_NET_META_MARK 2
+
+/**
* Hash type pre-pended when a RSS hash was computed
*/
#define NFP_NET_RSS_NONE 0
@@ -123,6 +130,7 @@
#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
+#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -134,6 +142,7 @@
#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
+#define NFP_NET_CFG_UPDATE_BPF (0x1 << 10) /* BPF program load */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -196,10 +205,37 @@
#define NFP_NET_CFG_VXLAN_SZ 0x0008
/**
- * 64B reserved for future use (0x0080 - 0x00c0)
+ * NFP6000 - BPF section
+ * @NFP_NET_CFG_BPF_ABI: BPF ABI version
+ * @NFP_NET_CFG_BPF_CAP: BPF capabilities
+ * @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
+ * @NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
+ * @NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
+ * @NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
+ * @NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
+ * @NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
+ * @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
-#define NFP_NET_CFG_RESERVED 0x0080
-#define NFP_NET_CFG_RESERVED_SZ 0x0040
+#define NFP_NET_CFG_BPF_ABI 0x0080
+#define NFP_NET_BPF_ABI 1
+#define NFP_NET_CFG_BPF_CAP 0x0081
+#define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */
+#define NFP_NET_CFG_BPF_MAX_LEN 0x0082
+#define NFP_NET_CFG_BPF_START 0x0084
+#define NFP_NET_CFG_BPF_DONE 0x0086
+#define NFP_NET_CFG_BPF_STACK_SZ 0x0088
+#define NFP_NET_CFG_BPF_INL_MTU 0x0089
+#define NFP_NET_CFG_BPF_SIZE 0x008e
+#define NFP_NET_CFG_BPF_ADDR 0x0090
+#define NFP_NET_CFG_BPF_CFG_8CTX (1 << 0) /* 8ctx mode */
+#define NFP_NET_CFG_BPF_CFG_MASK 7ULL
+#define NFP_NET_CFG_BPF_ADDR_MASK (~NFP_NET_CFG_BPF_CFG_MASK)
+
+/**
+ * 40B reserved for future use (0x0098 - 0x00c0)
+ */
+#define NFP_NET_CFG_RESERVED 0x0098
+#define NFP_NET_CFG_RESERVED_SZ 0x0028
/**
* RSS configuration (0x0100 - 0x01ac):
@@ -303,6 +339,15 @@
#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80)
#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88)
+#define NFP_NET_CFG_STATS_APP0_FRAMES (NFP_NET_CFG_STATS_BASE + 0x90)
+#define NFP_NET_CFG_STATS_APP0_BYTES (NFP_NET_CFG_STATS_BASE + 0x98)
+#define NFP_NET_CFG_STATS_APP1_FRAMES (NFP_NET_CFG_STATS_BASE + 0xa0)
+#define NFP_NET_CFG_STATS_APP1_BYTES (NFP_NET_CFG_STATS_BASE + 0xa8)
+#define NFP_NET_CFG_STATS_APP2_FRAMES (NFP_NET_CFG_STATS_BASE + 0xb0)
+#define NFP_NET_CFG_STATS_APP2_BYTES (NFP_NET_CFG_STATS_BASE + 0xb8)
+#define NFP_NET_CFG_STATS_APP3_FRAMES (NFP_NET_CFG_STATS_BASE + 0xc0)
+#define NFP_NET_CFG_STATS_APP3_BYTES (NFP_NET_CFG_STATS_BASE + 0xc8)
+
/**
* Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 4c9897220969..3418f2277e9d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -106,6 +106,18 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
{"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)},
{"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)},
{"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)},
+
+ {"bpf_pass_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_FRAMES)},
+ {"bpf_pass_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_BYTES)},
+ /* see comments in outro functions in nfp_bpf_jit.c to find out
+ * how different BPF modes use app-specific counters
+ */
+ {"bpf_app1_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_FRAMES)},
+ {"bpf_app1_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_BYTES)},
+ {"bpf_app2_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_FRAMES)},
+ {"bpf_app2_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_BYTES)},
+ {"bpf_app3_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_FRAMES)},
+ {"bpf_app3_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_BYTES)},
};
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
new file mode 100644
index 000000000000..8acfb631a0ea
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_net_offload.c
+ * Netronome network device driver: TC offload functions for PF and VF
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "nfp_bpf.h"
+#include "nfp_net_ctrl.h"
+#include "nfp_net.h"
+
+void nfp_net_filter_stats_timer(unsigned long data)
+{
+ struct nfp_net *nn = (void *)data;
+ struct nfp_stat_pair latest;
+
+ spin_lock_bh(&nn->rx_filter_lock);
+
+ if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ mod_timer(&nn->rx_filter_stats_timer,
+ jiffies + NFP_NET_STAT_POLL_IVL);
+
+ spin_unlock_bh(&nn->rx_filter_lock);
+
+ latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
+ latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+
+ if (latest.pkts != nn->rx_filter.pkts)
+ nn->rx_filter_change = jiffies;
+
+ nn->rx_filter = latest;
+}
+
+static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
+{
+ nn->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
+ nn->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+ nn->rx_filter_prev = nn->rx_filter;
+ nn->rx_filter_change = jiffies;
+}
+
+static int
+nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+{
+ struct tc_action *a;
+ LIST_HEAD(actions);
+ u64 bytes, pkts;
+
+ pkts = nn->rx_filter.pkts - nn->rx_filter_prev.pkts;
+ bytes = nn->rx_filter.bytes - nn->rx_filter_prev.bytes;
+ bytes -= pkts * ETH_HLEN;
+
+ nn->rx_filter_prev = nn->rx_filter;
+
+ preempt_disable();
+
+ tcf_exts_to_list(cls_bpf->exts, &actions);
+ list_for_each_entry(a, &actions, list)
+ tcf_action_stats_update(a, bytes, pkts, nn->rx_filter_change);
+
+ preempt_enable();
+
+ return 0;
+}
+
+static int
+nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+
+ /* TC direct action */
+ if (cls_bpf->exts_integrated) {
+ if (tc_no_actions(cls_bpf->exts))
+ return NN_ACT_DIRECT;
+
+ return -ENOTSUPP;
+ }
+
+ /* TC legacy mode */
+ if (!tc_single_action(cls_bpf->exts))
+ return -ENOTSUPP;
+
+ tcf_exts_to_list(cls_bpf->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (is_tcf_gact_shot(a))
+ return NN_ACT_TC_DROP;
+
+ if (is_tcf_mirred_redirect(a) &&
+ tcf_mirred_ifindex(a) == nn->netdev->ifindex)
+ return NN_ACT_TC_REDIR;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int
+nfp_net_bpf_offload_prepare(struct nfp_net *nn,
+ struct tc_cls_bpf_offload *cls_bpf,
+ struct nfp_bpf_result *res,
+ void **code, dma_addr_t *dma_addr, u16 max_instr)
+{
+ unsigned int code_sz = max_instr * sizeof(u64);
+ enum nfp_bpf_action_type act;
+ u16 start_off, done_off;
+ unsigned int max_mtu;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
+ return -ENOTSUPP;
+
+ ret = nfp_net_bpf_get_act(nn, cls_bpf);
+ if (ret < 0)
+ return ret;
+ act = ret;
+
+ max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ if (max_mtu < nn->netdev->mtu) {
+ nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
+ return -ENOTSUPP;
+ }
+
+ start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+ done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+
+ *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr,
+ GFP_KERNEL);
+ if (!*code)
+ return -ENOMEM;
+
+ ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
+ max_instr, res);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr);
+ return ret;
+}
+
+static void
+nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
+ void *code, dma_addr_t dma_addr,
+ unsigned int code_sz, unsigned int n_instr,
+ bool dense_mode)
+{
+ u64 bpf_addr = dma_addr;
+ int err;
+
+ nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
+
+ if (dense_mode)
+ bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
+
+ nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
+ nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
+
+ /* Load up the JITed code */
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
+ if (err)
+ nn_err(nn, "FW command error while loading BPF: %d\n", err);
+
+ /* Enable passing packets through BPF function */
+ nn->ctrl |= NFP_NET_CFG_CTRL_BPF;
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (err)
+ nn_err(nn, "FW command error while enabling BPF: %d\n", err);
+
+ dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr);
+
+ nfp_net_bpf_stats_reset(nn);
+ mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
+}
+
+static int nfp_net_bpf_stop(struct nfp_net *nn)
+{
+ if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF))
+ return 0;
+
+ spin_lock_bh(&nn->rx_filter_lock);
+ nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF;
+ spin_unlock_bh(&nn->rx_filter_lock);
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+
+ del_timer_sync(&nn->rx_filter_stats_timer);
+ nn->bpf_offload_skip_sw = 0;
+
+ return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+}
+
+int
+nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
+ struct tc_cls_bpf_offload *cls_bpf)
+{
+ struct nfp_bpf_result res;
+ dma_addr_t dma_addr;
+ u16 max_instr;
+ void *code;
+ int err;
+
+ max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
+
+ switch (cls_bpf->command) {
+ case TC_CLSBPF_REPLACE:
+ /* There is nothing stopping us from implementing seamless
+ * replace but the simple method of loading I adopted in
+ * the firmware does not handle atomic replace (i.e. we have to
+ * stop the BPF offload and re-enable it). Leaking-in a few
+ * frames which didn't have BPF applied in the hardware should
+ * be fine if software fallback is available, though.
+ */
+ if (nn->bpf_offload_skip_sw)
+ return -EBUSY;
+
+ err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
+ &dma_addr, max_instr);
+ if (err)
+ return err;
+
+ nfp_net_bpf_stop(nn);
+ nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
+ dma_addr, max_instr * sizeof(u64),
+ res.n_instr, res.dense_mode);
+ return 0;
+
+ case TC_CLSBPF_ADD:
+ if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ return -EBUSY;
+
+ err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
+ &dma_addr, max_instr);
+ if (err)
+ return err;
+
+ nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
+ dma_addr, max_instr * sizeof(u64),
+ res.n_instr, res.dense_mode);
+ return 0;
+
+ case TC_CLSBPF_DESTROY:
+ return nfp_net_bpf_stop(nn);
+
+ case TC_CLSBPF_STATS:
+ return nfp_net_bpf_stats_update(nn, cls_bpf);
+
+ default:
+ return -ENOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index f7062cb648e1..2800bbf65a89 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -148,7 +148,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
} else {
switch (fw_ver.major) {
- case 1 ... 3:
+ case 1 ... 4:
if (is_nfp3200) {
stride = 2;
tx_bar_no = NFP_NET_Q0_BAR;
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 87b7b814778b..712d8bcb7d8c 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -751,7 +751,7 @@ static void netdev_rx(struct net_device *dev)
dev_err(&pdev->dev, "rx crc err\n");
ether->stats.rx_crc_errors++;
} else if (status & RXDS_ALIE) {
- dev_err(&pdev->dev, "rx aligment err\n");
+ dev_err(&pdev->dev, "rx alignment err\n");
ether->stats.rx_frame_errors++;
} else if (status & RXDS_PTLE) {
dev_err(&pdev->dev, "rx longer err\n");
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 6ba48406899e..0df1391f9663 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -88,6 +88,9 @@ config QED
---help---
This enables the support for ...
+config QED_LL2
+ bool
+
config QED_SRIOV
bool "QLogic QED 25/40/100Gb SR-IOV support"
depends on QED && PCI_IOV
@@ -104,4 +107,15 @@ config QEDE
---help---
This enables the support for ...
+config INFINIBAND_QEDR
+ tristate "QLogic qede RoCE sources [debug]"
+ depends on QEDE && 64BIT
+ select QED_LL2
+ default n
+ ---help---
+ This provides a temporary node that allows the compilation
+ and logical testing of the InfiniBand over Ethernet support
+ for QLogic QED. This would be replaced by the 'real' option
+ once the QEDR driver is added [+relocated].
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index d1f157e439cf..cda0af7fbc20 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -2,5 +2,7 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
- qed_selftest.o qed_dcbx.o
+ qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
+qed-$(CONFIG_QED_LL2) += qed_ll2.o
+qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 45ab74676573..653bb5735f0c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -23,10 +23,11 @@
#include <linux/zlib.h>
#include <linux/hashtable.h>
#include <linux/qed/qed_if.h>
+#include "qed_debug.h"
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.7.1.20"
+#define DRV_MODULE_VERSION "8.10.9.20"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -34,6 +35,9 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
+#define QED_WID_SIZE (1024)
+#define QED_PF_DEMS_SIZE (4)
+
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
@@ -42,11 +46,21 @@ enum qed_coalescing_mode {
struct qed_eth_cb_ops;
struct qed_dev_info;
+union qed_mcp_protocol_stats;
+enum qed_mcp_protocol_type;
/* helpers */
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ (cid * QED_PF_DEMS_SIZE);
+
+ return db_addr;
+}
+
+static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
return db_addr;
@@ -69,6 +83,7 @@ struct qed_sb_info;
struct qed_sb_attn_info;
struct qed_cxt_mngr;
struct qed_sb_sp_info;
+struct qed_ll2_info;
struct qed_mcp_info;
struct qed_rt_data {
@@ -148,13 +163,17 @@ enum QED_RESOURCES {
QED_RL,
QED_MAC,
QED_VLAN,
+ QED_RDMA_CNQ_RAM,
QED_ILT,
+ QED_LL2_QUEUE,
+ QED_RDMA_STATS_QUEUE,
QED_MAX_RESC,
};
enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
+ QED_RDMA_CNQ,
QED_MAX_FEATURES,
};
@@ -357,6 +376,9 @@ struct qed_hwfn {
struct qed_sb_attn_info *p_sb_attn;
/* Protocol related */
+ bool using_ll2;
+ struct qed_ll2_info *p_ll2_info;
+ struct qed_rdma_info *p_rdma_info;
struct qed_pf_params pf_params;
bool b_rdma_enabled_in_prs;
@@ -393,6 +415,19 @@ struct qed_hwfn {
/* Buffer for unzipping firmware data */
void *unzip_buf;
+ struct dbg_tools_data dbg_info;
+
+ /* PWM region specific data */
+ u32 dpi_size;
+ u32 dpi_count;
+
+ /* This is used to calculate the doorbell address */
+ u32 dpi_start_offset;
+
+ /* If one of the following is set then EDPM shouldn't be used */
+ u8 dcbx_no_edpm;
+ u8 db_bar_no_edpm;
+
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
@@ -402,6 +437,7 @@ struct qed_hwfn {
#endif
struct z_stream_s *stream;
+ struct qed_roce_ll2_info *ll2;
};
struct pci_params {
@@ -426,6 +462,21 @@ struct qed_int_params {
bool fp_initialized;
u8 fp_msix_base;
u8 fp_msix_cnt;
+ u8 rdma_msix_base;
+ u8 rdma_msix_cnt;
+};
+
+struct qed_dbg_feature {
+ struct dentry *dentry;
+ u8 *dump_buf;
+ u32 buf_size;
+ u32 dumped_dwords;
+};
+
+struct qed_dbg_params {
+ struct qed_dbg_feature features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
+ bool print_data;
};
struct qed_dev {
@@ -442,6 +493,8 @@ struct qed_dev {
CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_B0(dev))
+#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
+#define QED_IS_K2(dev) QED_IS_AH(dev)
#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
@@ -517,7 +570,6 @@ struct qed_dev {
bool b_is_vf;
u32 drv_type;
-
struct qed_eth_stats *reset_stats;
struct qed_fw_data *fw_data;
@@ -542,7 +594,18 @@ struct qed_dev {
} protocol_ops;
void *ops_cookie;
+ struct qed_dbg_params dbg_params;
+
+#ifdef CONFIG_QED_LL2
+ struct qed_cb_ll2_info *ll2;
+ u8 ll2_mac_address[ETH_ALEN];
+#endif
+
const struct firmware *firmware;
+
+ u32 rdma_max_sge;
+ u32 rdma_max_inline;
+ u32 rdma_max_srq_sge;
};
#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
@@ -606,7 +669,9 @@ void qed_link_update(struct qed_hwfn *hwfn);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
-
+void qed_get_protocol_stats(struct qed_dev *cdev,
+ enum qed_mcp_protocol_type type,
+ union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 1c35f376143e..82370a1a59ad 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -48,7 +48,13 @@
#define TM_ELEM_SIZE 4
/* ILT constants */
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
+#define ILT_DEFAULT_HW_P_SIZE 4
+#else
#define ILT_DEFAULT_HW_P_SIZE 3
+#endif
+
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -377,9 +383,8 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
}
}
-u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *vf_cid)
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid)
{
if (vf_cid)
*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
@@ -405,10 +410,10 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
return cnt;
}
-static void
-qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
- enum protocol_type proto,
- u8 seg, u8 seg_type, u32 count, bool has_fl)
+static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg,
+ u8 seg_type, u32 count, bool has_fl)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
@@ -420,8 +425,7 @@ qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
struct qed_ilt_cli_blk *p_blk,
- u32 start_line, u32 total_size,
- u32 elem_size)
+ u32 start_line, u32 total_size, u32 elem_size)
{
u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
@@ -448,8 +452,7 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
p_cli->first.val = *p_line;
p_cli->active = true;
- *p_line += DIV_ROUND_UP(p_blk->total_size,
- p_blk->real_size_in_page);
+ *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
p_cli->last.val = *p_line - 1;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -795,10 +798,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
/* allocate t2 */
- p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem),
+ p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
GFP_KERNEL);
if (!p_mngr->t2) {
- DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
rc = -ENOMEM;
goto t2_fail;
}
@@ -926,12 +928,9 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
void *p_virt;
u32 size;
- size = min_t(u32, sz_left,
- p_blk->real_size_in_page);
+ size = min_t(u32, sz_left, p_blk->real_size_in_page);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- size,
- &p_phys,
- GFP_KERNEL);
+ size, &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
memset(p_virt, 0, size);
@@ -963,7 +962,6 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
GFP_KERNEL);
if (!p_mngr->ilt_shadow) {
- DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
rc = -ENOMEM;
goto ilt_shadow_fail;
}
@@ -976,7 +974,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
p_blk = &clients[i].pf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
- if (rc != 0)
+ if (rc)
goto ilt_shadow_fail;
}
for (k = 0; k < p_mngr->vf_count; k++) {
@@ -985,7 +983,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
p_blk = &clients[i].vf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
- if (rc != 0)
+ if (rc)
goto ilt_shadow_fail;
}
}
@@ -1056,10 +1054,8 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
u32 i;
p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
- if (!p_mngr) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
+ if (!p_mngr)
return -ENOMEM;
- }
/* Initialize ILT client registers */
clients = p_mngr->clients;
@@ -1111,24 +1107,18 @@ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
/* Allocate the ILT shadow table */
rc = qed_ilt_shadow_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
+ if (rc)
goto tables_alloc_fail;
- }
/* Allocate the T2 table */
rc = qed_cxt_src_t2_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n");
+ if (rc)
goto tables_alloc_fail;
- }
/* Allocate and initialize the acquired cids bitmaps */
rc = qed_cid_map_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
+ if (rc)
goto tables_alloc_fail;
- }
return 0;
@@ -1672,7 +1662,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
- active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+ active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
tm_offset += tm_iids.pf_tids[i];
}
@@ -1702,8 +1692,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *p_cid)
+ enum protocol_type type, u32 *p_cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 rel_cid;
@@ -1717,8 +1706,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
p_mngr->acquired[type].max_count);
if (rel_cid >= p_mngr->acquired[type].max_count) {
- DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
- type);
+ DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
return -EINVAL;
}
@@ -1730,8 +1718,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
}
static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
- u32 cid,
- enum protocol_type *p_type)
+ u32 cid, enum protocol_type *p_type)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_cid_acquired_map *p_map;
@@ -1763,8 +1750,7 @@ static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
return true;
}
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
- u32 cid)
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
enum protocol_type type;
@@ -1781,8 +1767,7 @@ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
}
-int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
- struct qed_cxt_info *p_info)
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
@@ -1860,6 +1845,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
+ if (p_hwfn->using_ll2)
+ core_cids += 4;
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
switch (p_hwfn->hw_info.personality) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c6f6f2e8192d..2b8bdaa77800 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -170,6 +170,13 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
u32 cid);
+int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type, u32 iid);
+u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type);
+u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+ enum protocol_type type);
+int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0
#define QED_CTX_FL_MEM 1
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 3656d2fd673d..130da1c0490b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -875,11 +875,8 @@ int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
int rc = 0;
p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
- if (!p_hwfn->p_dcbx_info) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate 'struct qed_dcbx_info'\n");
+ if (!p_hwfn->p_dcbx_info)
rc = -ENOMEM;
- }
return rc;
}
@@ -1190,10 +1187,8 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
}
dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
- if (!dcbx_info) {
- DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
+ if (!dcbx_info)
return -ENOMEM;
- }
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
if (rc) {
@@ -1227,10 +1222,8 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
struct qed_dcbx_get *dcbx_info;
dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
- if (!dcbx_info) {
- DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
+ if (!dcbx_info)
return NULL;
- }
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
@@ -1982,6 +1975,7 @@ static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
if (!dcbx_info->operational.ieee) {
DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
return -EINVAL;
}
@@ -2150,17 +2144,19 @@ static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
return rc;
}
-int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+static int
+qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
{
return qed_dcbnl_get_ieee_ets(cdev, ets, true);
}
-int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+static int
+qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
{
return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
}
-int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
+static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_dcbx_get *dcbx_info;
@@ -2204,7 +2200,7 @@ int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
return 0;
}
-int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
+static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_dcbx_get *dcbx_info;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
new file mode 100644
index 000000000000..88e7d5bef909
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -0,0 +1,6898 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+/* Chip IDs enum */
+enum chip_ids {
+ CHIP_RESERVED,
+ CHIP_BB_B0,
+ CHIP_K2,
+ MAX_CHIP_IDS
+};
+
+/* Memory groups enum */
+enum mem_groups {
+ MEM_GROUP_PXP_MEM,
+ MEM_GROUP_DMAE_MEM,
+ MEM_GROUP_CM_MEM,
+ MEM_GROUP_QM_MEM,
+ MEM_GROUP_TM_MEM,
+ MEM_GROUP_BRB_RAM,
+ MEM_GROUP_BRB_MEM,
+ MEM_GROUP_PRS_MEM,
+ MEM_GROUP_SDM_MEM,
+ MEM_GROUP_PBUF,
+ MEM_GROUP_IOR,
+ MEM_GROUP_RAM,
+ MEM_GROUP_BTB_RAM,
+ MEM_GROUP_RDIF_CTX,
+ MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CONN_CFC_MEM,
+ MEM_GROUP_TASK_CFC_MEM,
+ MEM_GROUP_CAU_PI,
+ MEM_GROUP_CAU_MEM,
+ MEM_GROUP_PXP_ILT,
+ MEM_GROUP_MULD_MEM,
+ MEM_GROUP_BTB_MEM,
+ MEM_GROUP_IGU_MEM,
+ MEM_GROUP_IGU_MSIX,
+ MEM_GROUP_CAU_SB,
+ MEM_GROUP_BMB_RAM,
+ MEM_GROUP_BMB_MEM,
+ MEM_GROUPS_NUM
+};
+
+/* Memory groups names */
+static const char * const s_mem_group_names[] = {
+ "PXP_MEM",
+ "DMAE_MEM",
+ "CM_MEM",
+ "QM_MEM",
+ "TM_MEM",
+ "BRB_RAM",
+ "BRB_MEM",
+ "PRS_MEM",
+ "SDM_MEM",
+ "PBUF",
+ "IOR",
+ "RAM",
+ "BTB_RAM",
+ "RDIF_CTX",
+ "TDIF_CTX",
+ "CONN_CFC_MEM",
+ "TASK_CFC_MEM",
+ "CAU_PI",
+ "CAU_MEM",
+ "PXP_ILT",
+ "MULD_MEM",
+ "BTB_MEM",
+ "IGU_MEM",
+ "IGU_MSIX",
+ "CAU_SB",
+ "BMB_RAM",
+ "BMB_MEM",
+};
+
+/* Idle check conditions */
+static u32 cond4(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
+}
+
+static u32 cond6(const u32 *r, const u32 *imm)
+{
+ return ((r[0] >> imm[0]) & imm[1]) != imm[2];
+}
+
+static u32 cond5(const u32 *r, const u32 *imm)
+{
+ return (r[0] & imm[0]) != imm[1];
+}
+
+static u32 cond8(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) >> imm[1]) !=
+ (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
+}
+
+static u32 cond9(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
+}
+
+static u32 cond1(const u32 *r, const u32 *imm)
+{
+ return (r[0] & ~imm[0]) != imm[1];
+}
+
+static u32 cond0(const u32 *r, const u32 *imm)
+{
+ return r[0] != imm[0];
+}
+
+static u32 cond10(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1] && r[2] == imm[0];
+}
+
+static u32 cond11(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1] && r[2] > imm[0];
+}
+
+static u32 cond3(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1];
+}
+
+static u32 cond12(const u32 *r, const u32 *imm)
+{
+ return r[0] & imm[0];
+}
+
+static u32 cond7(const u32 *r, const u32 *imm)
+{
+ return r[0] < (r[1] - imm[0]);
+}
+
+static u32 cond2(const u32 *r, const u32 *imm)
+{
+ return r[0] > imm[0];
+}
+
+/* Array of Idle Check conditions */
+static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
+ cond0,
+ cond1,
+ cond2,
+ cond3,
+ cond4,
+ cond5,
+ cond6,
+ cond7,
+ cond8,
+ cond9,
+ cond10,
+ cond11,
+ cond12,
+};
+
+/******************************* Data Types **********************************/
+
+enum platform_ids {
+ PLATFORM_ASIC,
+ PLATFORM_RESERVED,
+ PLATFORM_RESERVED2,
+ PLATFORM_RESERVED3,
+ MAX_PLATFORM_IDS
+};
+
+struct dbg_array {
+ const u32 *ptr;
+ u32 size_in_dwords;
+};
+
+/* Chip constant definitions */
+struct chip_defs {
+ const char *name;
+ struct {
+ u8 num_ports;
+ u8 num_pfs;
+ } per_platform[MAX_PLATFORM_IDS];
+};
+
+/* Platform constant definitions */
+struct platform_defs {
+ const char *name;
+ u32 delay_factor;
+};
+
+/* Storm constant definitions */
+struct storm_defs {
+ char letter;
+ enum block_id block_id;
+ enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
+ bool has_vfc;
+ u32 sem_fast_mem_addr;
+ u32 sem_frame_mode_addr;
+ u32 sem_slow_enable_addr;
+ u32 sem_slow_mode_addr;
+ u32 sem_slow_mode1_conf_addr;
+ u32 sem_sync_dbg_empty_addr;
+ u32 sem_slow_dbg_empty_addr;
+ u32 cm_ctx_wr_addr;
+ u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_ag_ctx_rd_addr;
+ u32 cm_conn_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_st_ctx_rd_addr;
+ u32 cm_task_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_ag_ctx_rd_addr;
+ u32 cm_task_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_st_ctx_rd_addr;
+};
+
+/* Block constant definitions */
+struct block_defs {
+ const char *name;
+ bool has_dbg_bus[MAX_CHIP_IDS];
+ bool associated_to_storm;
+ u32 storm_id; /* Valid only if associated_to_storm is true */
+ enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
+ u32 dbg_select_addr;
+ u32 dbg_cycle_enable_addr;
+ u32 dbg_shift_addr;
+ u32 dbg_force_valid_addr;
+ u32 dbg_force_frame_addr;
+ bool has_reset_bit;
+ bool unreset; /* If true, the block is taken out of reset before dump */
+ enum dbg_reset_regs reset_reg;
+ u8 reset_bit_offset; /* Bit offset in reset register */
+};
+
+/* Reset register definitions */
+struct reset_reg_defs {
+ u32 addr;
+ u32 unreset_val;
+ bool exists[MAX_CHIP_IDS];
+};
+
+struct grc_param_defs {
+ u32 default_val[MAX_CHIP_IDS];
+ u32 min;
+ u32 max;
+ bool is_preset;
+ u32 exclude_all_preset_val;
+ u32 crash_preset_val;
+};
+
+struct rss_mem_defs {
+ const char *mem_name;
+ const char *type_name;
+ u32 addr; /* In 128b units */
+ u32 num_entries[MAX_CHIP_IDS];
+ u32 entry_width[MAX_CHIP_IDS]; /* In bits */
+};
+
+struct vfc_ram_defs {
+ const char *mem_name;
+ const char *type_name;
+ u32 base_row;
+ u32 num_rows;
+};
+
+struct big_ram_defs {
+ const char *instance_name;
+ enum mem_groups mem_group_id;
+ enum mem_groups ram_mem_group_id;
+ enum dbg_grc_params grc_param;
+ u32 addr_reg_addr;
+ u32 data_reg_addr;
+ u32 num_of_blocks[MAX_CHIP_IDS];
+};
+
+struct phy_defs {
+ const char *phy_name;
+ u32 base_addr;
+ u32 tbus_addr_lo_addr;
+ u32 tbus_addr_hi_addr;
+ u32 tbus_data_lo_addr;
+ u32 tbus_data_hi_addr;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_LCIDS 320
+#define MAX_LTIDS 320
+#define NUM_IOR_SETS 2
+#define IORS_PER_SET 176
+#define IOR_SET_OFFSET(set_id) ((set_id) * 256)
+#define BYTES_IN_DWORD sizeof(u32)
+
+/* In the macros below, size and offset are specified in bits */
+#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
+#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
+#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
+#define FIELD_DWORD_OFFSET(type, field) \
+ (int)(FIELD_BIT_OFFSET(type, field) / 32)
+#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
+#define FIELD_BIT_MASK(type, field) \
+ (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
+ FIELD_DWORD_SHIFT(type, field))
+#define SET_VAR_FIELD(var, type, field, val) \
+ do { \
+ var[FIELD_DWORD_OFFSET(type, field)] &= \
+ (~FIELD_BIT_MASK(type, field)); \
+ var[FIELD_DWORD_OFFSET(type, field)] |= \
+ (val) << FIELD_DWORD_SHIFT(type, field); \
+ } while (0)
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ for (i = 0; i < (arr_size); i++) \
+ qed_wr(dev, ptt, addr, (arr)[i]); \
+ } while (0)
+#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
+ do { \
+ for (i = 0; i < (arr_size); i++) \
+ (arr)[i] = qed_rd(dev, ptt, addr); \
+ } while (0)
+
+#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
+#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
+#define RAM_LINES_TO_BYTES(lines) \
+ DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
+#define REG_DUMP_LEN_SHIFT 24
+#define MEM_DUMP_ENTRY_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
+#define IDLE_CHK_RULE_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
+#define IDLE_CHK_RESULT_HDR_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
+#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+#define IDLE_CHK_MAX_ENTRIES_SIZE 32
+
+/* The sizes and offsets below are specified in bits */
+#define VFC_CAM_CMD_STRUCT_SIZE 64
+#define VFC_CAM_CMD_ROW_OFFSET 48
+#define VFC_CAM_CMD_ROW_SIZE 9
+#define VFC_CAM_ADDR_STRUCT_SIZE 16
+#define VFC_CAM_ADDR_OP_OFFSET 0
+#define VFC_CAM_ADDR_OP_SIZE 4
+#define VFC_CAM_RESP_STRUCT_SIZE 256
+#define VFC_RAM_ADDR_STRUCT_SIZE 16
+#define VFC_RAM_ADDR_OP_OFFSET 0
+#define VFC_RAM_ADDR_OP_SIZE 2
+#define VFC_RAM_ADDR_ROW_OFFSET 2
+#define VFC_RAM_ADDR_ROW_SIZE 10
+#define VFC_RAM_RESP_STRUCT_SIZE 256
+#define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
+#define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
+#define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
+#define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
+#define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
+#define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
+#define NUM_VFC_RAM_TYPES 4
+#define VFC_CAM_NUM_ROWS 512
+#define VFC_OPCODE_CAM_RD 14
+#define VFC_OPCODE_RAM_RD 0
+#define NUM_RSS_MEM_TYPES 5
+#define NUM_BIG_RAM_TYPES 3
+#define BIG_RAM_BLOCK_SIZE_BYTES 128
+#define BIG_RAM_BLOCK_SIZE_DWORDS \
+ BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
+#define NUM_PHY_TBUS_ADDRESSES 2048
+#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
+#define RESET_REG_UNRESET_OFFSET 4
+#define STALL_DELAY_MS 500
+#define STATIC_DEBUG_LINE_DWORDS 9
+#define NUM_DBG_BUS_LINES 256
+#define NUM_COMMON_GLOBAL_PARAMS 8
+#define FW_IMG_MAIN 1
+#define REG_FIFO_DEPTH_ELEMENTS 32
+#define REG_FIFO_ELEMENT_DWORDS 2
+#define REG_FIFO_DEPTH_DWORDS \
+ (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
+#define IGU_FIFO_DEPTH_ELEMENTS 64
+#define IGU_FIFO_ELEMENT_DWORDS 4
+#define IGU_FIFO_DEPTH_DWORDS \
+ (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
+#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
+#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
+#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
+ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS)
+#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
+ (MCP_REG_SCRATCH + \
+ offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
+#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+#define EMPTY_FW_VERSION_STR "???_???_???_???"
+#define EMPTY_FW_IMAGE_STR "???????????????"
+
+/***************************** Constant Arrays *******************************/
+
+/* Debug arrays */
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+
+/* Chip constant definitions array */
+static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
+ { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
+ { "bb_b0",
+ { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
+ { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
+};
+
+/* Storm constant definitions array */
+static struct storm_defs s_storm_defs[] = {
+ /* Tstorm */
+ {'T', BLOCK_TSEM,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCT}, true,
+ TSEM_REG_FAST_MEMORY,
+ TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
+ TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
+ TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ TCM_REG_CTX_RBC_ACCS,
+ 4, TCM_REG_AGG_CON_CTX,
+ 16, TCM_REG_SM_CON_CTX,
+ 2, TCM_REG_AGG_TASK_CTX,
+ 4, TCM_REG_SM_TASK_CTX},
+ /* Mstorm */
+ {'M', BLOCK_MSEM,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCM}, false,
+ MSEM_REG_FAST_MEMORY,
+ MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
+ MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
+ MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY,
+ MCM_REG_CTX_RBC_ACCS,
+ 1, MCM_REG_AGG_CON_CTX,
+ 10, MCM_REG_SM_CON_CTX,
+ 2, MCM_REG_AGG_TASK_CTX,
+ 7, MCM_REG_SM_TASK_CTX},
+ /* Ustorm */
+ {'U', BLOCK_USEM,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCU}, false,
+ USEM_REG_FAST_MEMORY,
+ USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
+ USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
+ USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY,
+ UCM_REG_CTX_RBC_ACCS,
+ 2, UCM_REG_AGG_CON_CTX,
+ 13, UCM_REG_SM_CON_CTX,
+ 3, UCM_REG_AGG_TASK_CTX,
+ 3, UCM_REG_SM_TASK_CTX},
+ /* Xstorm */
+ {'X', BLOCK_XSEM,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCX}, false,
+ XSEM_REG_FAST_MEMORY,
+ XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
+ XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
+ XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY,
+ XCM_REG_CTX_RBC_ACCS,
+ 9, XCM_REG_AGG_CON_CTX,
+ 15, XCM_REG_SM_CON_CTX,
+ 0, 0,
+ 0, 0},
+ /* Ystorm */
+ {'Y', BLOCK_YSEM,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCY}, false,
+ YSEM_REG_FAST_MEMORY,
+ YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
+ YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
+ YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ YCM_REG_CTX_RBC_ACCS,
+ 2, YCM_REG_AGG_CON_CTX,
+ 3, YCM_REG_SM_CON_CTX,
+ 2, YCM_REG_AGG_TASK_CTX,
+ 12, YCM_REG_SM_TASK_CTX},
+ /* Pstorm */
+ {'P', BLOCK_PSEM,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCS}, true,
+ PSEM_REG_FAST_MEMORY,
+ PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
+ PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
+ PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY,
+ PCM_REG_CTX_RBC_ACCS,
+ 0, 0,
+ 10, PCM_REG_SM_CON_CTX,
+ 0, 0,
+ 0, 0}
+};
+
+/* Block definitions array */
+static struct block_defs block_grc_defs = {
+ "grc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
+ GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
+ GRC_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_UA, 1
+};
+
+static struct block_defs block_miscs_defs = {
+ "miscs", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_misc_defs = {
+ "misc", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_dbu_defs = {
+ "dbu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_pglue_b_defs = {
+ "pglue_b", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+ PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
+ PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
+ PGLUE_B_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 1
+};
+
+static struct block_defs block_cnig_defs = {
+ "cnig", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
+ CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
+ CNIG_REG_DBG_FORCE_FRAME_K2,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 0
+};
+
+static struct block_defs block_cpmu_defs = {
+ "cpmu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 8
+};
+
+static struct block_defs block_ncsi_defs = {
+ "ncsi", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
+ NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
+ NCSI_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 5
+};
+
+static struct block_defs block_opte_defs = {
+ "opte", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 4
+};
+
+static struct block_defs block_bmb_defs = {
+ "bmb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+ BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
+ BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
+ BMB_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 7
+};
+
+static struct block_defs block_pcie_defs = {
+ "pcie", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
+ PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_mcp_defs = {
+ "mcp", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_mcp2_defs = {
+ "mcp2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
+ MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
+ MCP2_REG_DBG_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_pswhst_defs = {
+ "pswhst", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
+ PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
+ PSWHST_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 0
+};
+
+static struct block_defs block_pswhst2_defs = {
+ "pswhst2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
+ PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
+ PSWHST2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 0
+};
+
+static struct block_defs block_pswrd_defs = {
+ "pswrd", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
+ PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
+ PSWRD_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 2
+};
+
+static struct block_defs block_pswrd2_defs = {
+ "pswrd2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
+ PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
+ PSWRD2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 2
+};
+
+static struct block_defs block_pswwr_defs = {
+ "pswwr", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
+ PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
+ PSWWR_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 3
+};
+
+static struct block_defs block_pswwr2_defs = {
+ "pswwr2", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 3
+};
+
+static struct block_defs block_pswrq_defs = {
+ "pswrq", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
+ PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
+ PSWRQ_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 1
+};
+
+static struct block_defs block_pswrq2_defs = {
+ "pswrq2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
+ PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
+ PSWRQ2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 1
+};
+
+static struct block_defs block_pglcs_defs = {
+ "pglcs", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
+ PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
+ PGLCS_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 2
+};
+
+static struct block_defs block_ptu_defs = {
+ "ptu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
+ PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
+ PTU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
+};
+
+static struct block_defs block_dmae_defs = {
+ "dmae", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
+ DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
+ DMAE_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
+};
+
+static struct block_defs block_tcm_defs = {
+ "tcm", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
+ TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
+ TCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
+};
+
+static struct block_defs block_mcm_defs = {
+ "mcm", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
+ MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
+ MCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
+};
+
+static struct block_defs block_ucm_defs = {
+ "ucm", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
+ UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
+ UCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
+};
+
+static struct block_defs block_xcm_defs = {
+ "xcm", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
+ XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
+ XCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
+};
+
+static struct block_defs block_ycm_defs = {
+ "ycm", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
+ YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
+ YCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
+};
+
+static struct block_defs block_pcm_defs = {
+ "pcm", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
+ PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
+ PCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
+};
+
+static struct block_defs block_qm_defs = {
+ "qm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+ QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
+ QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
+ QM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
+};
+
+static struct block_defs block_tm_defs = {
+ "tm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
+ TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
+ TM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
+};
+
+static struct block_defs block_dorq_defs = {
+ "dorq", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
+ DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
+ DORQ_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
+};
+
+static struct block_defs block_brb_defs = {
+ "brb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
+ BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
+ BRB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
+};
+
+static struct block_defs block_src_defs = {
+ "src", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
+ SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
+ SRC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
+};
+
+static struct block_defs block_prs_defs = {
+ "prs", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
+ PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
+ PRS_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
+};
+
+static struct block_defs block_tsdm_defs = {
+ "tsdm", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
+ TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
+ TSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
+};
+
+static struct block_defs block_msdm_defs = {
+ "msdm", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
+ MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
+ MSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
+};
+
+static struct block_defs block_usdm_defs = {
+ "usdm", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
+ USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
+ USDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
+};
+
+static struct block_defs block_xsdm_defs = {
+ "xsdm", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
+ XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
+ XSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
+};
+
+static struct block_defs block_ysdm_defs = {
+ "ysdm", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
+ YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
+ YSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
+};
+
+static struct block_defs block_psdm_defs = {
+ "psdm", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
+ PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
+ PSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
+};
+
+static struct block_defs block_tsem_defs = {
+ "tsem", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
+ TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
+ TSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
+};
+
+static struct block_defs block_msem_defs = {
+ "msem", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
+ MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
+ MSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
+};
+
+static struct block_defs block_usem_defs = {
+ "usem", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
+ USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
+ USEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
+};
+
+static struct block_defs block_xsem_defs = {
+ "xsem", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
+ XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
+ XSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
+};
+
+static struct block_defs block_ysem_defs = {
+ "ysem", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
+ YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
+ YSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
+};
+
+static struct block_defs block_psem_defs = {
+ "psem", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
+ PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
+ PSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
+};
+
+static struct block_defs block_rss_defs = {
+ "rss", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
+ RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
+ RSS_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
+};
+
+static struct block_defs block_tmld_defs = {
+ "tmld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
+ TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
+ TMLD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
+};
+
+static struct block_defs block_muld_defs = {
+ "muld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
+ MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
+ MULD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
+};
+
+static struct block_defs block_yuld_defs = {
+ "yuld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
+ YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
+ YULD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15
+};
+
+static struct block_defs block_xyld_defs = {
+ "xyld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
+ XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
+ XYLD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
+};
+
+static struct block_defs block_prm_defs = {
+ "prm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
+ PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
+ PRM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
+};
+
+static struct block_defs block_pbf_pb1_defs = {
+ "pbf_pb1", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
+ PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
+ PBF_PB1_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 11
+};
+
+static struct block_defs block_pbf_pb2_defs = {
+ "pbf_pb2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
+ PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
+ PBF_PB2_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 12
+};
+
+static struct block_defs block_rpb_defs = {
+ "rpb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
+ RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
+ RPB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
+};
+
+static struct block_defs block_btb_defs = {
+ "btb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+ BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
+ BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
+ BTB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
+};
+
+static struct block_defs block_pbf_defs = {
+ "pbf", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
+ PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
+ PBF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
+};
+
+static struct block_defs block_rdif_defs = {
+ "rdif", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
+ RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
+ RDIF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
+};
+
+static struct block_defs block_tdif_defs = {
+ "tdif", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
+ TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
+ TDIF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
+};
+
+static struct block_defs block_cdu_defs = {
+ "cdu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
+ CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
+ CDU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
+};
+
+static struct block_defs block_ccfc_defs = {
+ "ccfc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
+ CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
+ CCFC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
+};
+
+static struct block_defs block_tcfc_defs = {
+ "tcfc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
+ TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
+ TCFC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
+};
+
+static struct block_defs block_igu_defs = {
+ "igu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
+ IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
+ IGU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
+};
+
+static struct block_defs block_cau_defs = {
+ "cau", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
+ CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
+ CAU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
+};
+
+static struct block_defs block_umac_defs = {
+ "umac", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
+ UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
+ UMAC_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 6
+};
+
+static struct block_defs block_xmac_defs = {
+ "xmac", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_dbg_defs = {
+ "dbg", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
+};
+
+static struct block_defs block_nig_defs = {
+ "nig", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
+ NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
+ NIG_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
+};
+
+static struct block_defs block_wol_defs = {
+ "wol", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
+ WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
+ WOL_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
+};
+
+static struct block_defs block_bmbn_defs = {
+ "bmbn", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+ BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
+ BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
+ BMBN_REG_DBG_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ipc_defs = {
+ "ipc", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 8
+};
+
+static struct block_defs block_nwm_defs = {
+ "nwm", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
+ NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
+ NWM_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
+};
+
+static struct block_defs block_nws_defs = {
+ "nws", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 12
+};
+
+static struct block_defs block_ms_defs = {
+ "ms", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 13
+};
+
+static struct block_defs block_phy_pcie_defs = {
+ "phy_pcie", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
+ PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_led_defs = {
+ "led", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_misc_aeu_defs = {
+ "misc_aeu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_bar0_map_defs = {
+ "bar0_map", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
+ &block_grc_defs,
+ &block_miscs_defs,
+ &block_misc_defs,
+ &block_dbu_defs,
+ &block_pglue_b_defs,
+ &block_cnig_defs,
+ &block_cpmu_defs,
+ &block_ncsi_defs,
+ &block_opte_defs,
+ &block_bmb_defs,
+ &block_pcie_defs,
+ &block_mcp_defs,
+ &block_mcp2_defs,
+ &block_pswhst_defs,
+ &block_pswhst2_defs,
+ &block_pswrd_defs,
+ &block_pswrd2_defs,
+ &block_pswwr_defs,
+ &block_pswwr2_defs,
+ &block_pswrq_defs,
+ &block_pswrq2_defs,
+ &block_pglcs_defs,
+ &block_dmae_defs,
+ &block_ptu_defs,
+ &block_tcm_defs,
+ &block_mcm_defs,
+ &block_ucm_defs,
+ &block_xcm_defs,
+ &block_ycm_defs,
+ &block_pcm_defs,
+ &block_qm_defs,
+ &block_tm_defs,
+ &block_dorq_defs,
+ &block_brb_defs,
+ &block_src_defs,
+ &block_prs_defs,
+ &block_tsdm_defs,
+ &block_msdm_defs,
+ &block_usdm_defs,
+ &block_xsdm_defs,
+ &block_ysdm_defs,
+ &block_psdm_defs,
+ &block_tsem_defs,
+ &block_msem_defs,
+ &block_usem_defs,
+ &block_xsem_defs,
+ &block_ysem_defs,
+ &block_psem_defs,
+ &block_rss_defs,
+ &block_tmld_defs,
+ &block_muld_defs,
+ &block_yuld_defs,
+ &block_xyld_defs,
+ &block_prm_defs,
+ &block_pbf_pb1_defs,
+ &block_pbf_pb2_defs,
+ &block_rpb_defs,
+ &block_btb_defs,
+ &block_pbf_defs,
+ &block_rdif_defs,
+ &block_tdif_defs,
+ &block_cdu_defs,
+ &block_ccfc_defs,
+ &block_tcfc_defs,
+ &block_igu_defs,
+ &block_cau_defs,
+ &block_umac_defs,
+ &block_xmac_defs,
+ &block_dbg_defs,
+ &block_nig_defs,
+ &block_wol_defs,
+ &block_bmbn_defs,
+ &block_ipc_defs,
+ &block_nwm_defs,
+ &block_nws_defs,
+ &block_ms_defs,
+ &block_phy_pcie_defs,
+ &block_led_defs,
+ &block_misc_aeu_defs,
+ &block_bar0_map_defs,
+};
+
+static struct platform_defs s_platform_defs[] = {
+ {"asic", 1},
+ {"reserved", 0},
+ {"reserved2", 0},
+ {"reserved3", 0}
+};
+
+static struct grc_param_defs s_grc_param_defs[] = {
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
+ {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+ MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
+ {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+ MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
+ {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
+ {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */
+};
+
+static struct rss_mem_defs s_rss_mem_defs[] = {
+ { "rss_mem_cid", "rss_cid", 0,
+ {256, 256, 320},
+ {32, 32, 32} },
+ { "rss_mem_key_msb", "rss_key", 1024,
+ {128, 128, 208},
+ {256, 256, 256} },
+ { "rss_mem_key_lsb", "rss_key", 2048,
+ {128, 128, 208},
+ {64, 64, 64} },
+ { "rss_mem_info", "rss_info", 3072,
+ {128, 128, 208},
+ {16, 16, 16} },
+ { "rss_mem_ind", "rss_ind", 4096,
+ {(128 * 128), (128 * 128), (128 * 208)},
+ {16, 16, 16} }
+};
+
+static struct vfc_ram_defs s_vfc_ram_defs[] = {
+ {"vfc_ram_tt1", "vfc_ram", 0, 512},
+ {"vfc_ram_mtt2", "vfc_ram", 512, 128},
+ {"vfc_ram_stt2", "vfc_ram", 640, 32},
+ {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
+};
+
+static struct big_ram_defs s_big_ram_defs[] = {
+ { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
+ BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
+ {4800, 4800, 5632} },
+ { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
+ BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
+ {2880, 2880, 3680} },
+ { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
+ BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
+ {1152, 1152, 1152} }
+};
+
+static struct reset_reg_defs s_reset_regs_defs[] = {
+ { MISCS_REG_RESET_PL_UA, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
+ { MISCS_REG_RESET_PL_HV, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
+ { MISCS_REG_RESET_PL_HV_2, 0x0,
+ {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ { MISC_REG_RESET_PL_UA, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
+ { MISC_REG_RESET_PL_HV, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
+ { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+ { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+ { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+};
+
+static struct phy_defs s_phy_defs[] = {
+ {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8},
+ {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+};
+
+/**************************** Private Functions ******************************/
+
+/* Reads and returns a single dword from the specified unaligned buffer */
+static u32 qed_read_unaligned_dword(u8 *buf)
+{
+ u32 dword;
+
+ memcpy((u8 *)&dword, buf, sizeof(dword));
+ return dword;
+}
+
+/* Initializes debug data for the specified device */
+static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ if (dev_data->initialized)
+ return DBG_STATUS_OK;
+
+ if (QED_IS_K2(p_hwfn->cdev)) {
+ dev_data->chip_id = CHIP_K2;
+ dev_data->mode_enable[MODE_K2] = 1;
+ } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
+ dev_data->chip_id = CHIP_BB_B0;
+ dev_data->mode_enable[MODE_BB_B0] = 1;
+ } else {
+ return DBG_STATUS_UNKNOWN_CHIP;
+ }
+
+ dev_data->platform_id = PLATFORM_ASIC;
+ dev_data->mode_enable[MODE_ASIC] = 1;
+ dev_data->initialized = true;
+ return DBG_STATUS_OK;
+}
+
+/* Reads the FW info structure for the specified Storm from the chip,
+ * and writes it to the specified fw_info pointer.
+ */
+static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 storm_id, struct fw_info *fw_info)
+{
+ /* Read first the address that points to fw_info location.
+ * The address is located in the last line of the Storm RAM.
+ */
+ u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_INT_RAM +
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ sizeof(struct fw_info_location);
+ struct fw_info_location fw_info_location;
+ u32 *dest = (u32 *)&fw_info_location;
+ u32 i;
+
+ memset(&fw_info_location, 0, sizeof(fw_info_location));
+ memset(fw_info, 0, sizeof(*fw_info));
+ for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
+ i++, addr += BYTES_IN_DWORD)
+ dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+ if (fw_info_location.size > 0 && fw_info_location.size <=
+ sizeof(*fw_info)) {
+ /* Read FW version info from Storm RAM */
+ addr = fw_info_location.grc_addr;
+ dest = (u32 *)fw_info;
+ for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
+ i++, addr += BYTES_IN_DWORD)
+ dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+ }
+}
+
+/* Dumps the specified string to the specified buffer. Returns the dumped size
+ * in bytes (actual length + 1 for the null character termination).
+ */
+static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
+{
+ if (dump)
+ strcpy(dump_buf, str);
+ return (u32)strlen(str) + 1;
+}
+
+/* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
+ * in bytes.
+ */
+static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
+{
+ u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size;
+
+ align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
+
+ if (dump && align_size)
+ memset(dump_buf, 0, align_size);
+ return align_size;
+}
+
+/* Writes the specified string param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_str_param(u32 *dump_buf,
+ bool dump,
+ const char *param_name, const char *param_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0;
+
+ /* Dump param name */
+ offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+ /* Indicate a string param value */
+ if (dump)
+ *(char_buf + offset) = 1;
+ offset++;
+
+ /* Dump param value */
+ offset += qed_dump_str(char_buf + offset, dump, param_val);
+
+ /* Align buffer to next dword */
+ offset += qed_dump_align(char_buf + offset, dump, offset);
+ return BYTES_TO_DWORDS(offset);
+}
+
+/* Writes the specified numeric param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_num_param(u32 *dump_buf,
+ bool dump, const char *param_name, u32 param_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0;
+
+ /* Dump param name */
+ offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+ /* Indicate a numeric param value */
+ if (dump)
+ *(char_buf + offset) = 0;
+ offset++;
+
+ /* Align buffer to next dword */
+ offset += qed_dump_align(char_buf + offset, dump, offset);
+
+ /* Dump param value (and change offset from bytes to dwords) */
+ offset = BYTES_TO_DWORDS(offset);
+ if (dump)
+ *(dump_buf + offset) = param_val;
+ offset++;
+ return offset;
+}
+
+/* Reads the FW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
+ char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
+ struct fw_info fw_info = { {0}, {0} };
+ int printed_chars;
+ u32 offset = 0;
+
+ if (dump) {
+ /* Read FW image/version from PRAM in a non-reset SEMI */
+ bool found = false;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
+ storm_id++) {
+ /* Read FW version/image */
+ if (!dev_data->block_in_reset
+ [s_storm_defs[storm_id].block_id]) {
+ /* read FW info for the current Storm */
+ qed_read_fw_info(p_hwfn,
+ p_ptt, storm_id, &fw_info);
+
+ /* Create FW version/image strings */
+ printed_chars =
+ snprintf(fw_ver_str,
+ sizeof(fw_ver_str),
+ "%d_%d_%d_%d",
+ fw_info.ver.num.major,
+ fw_info.ver.num.minor,
+ fw_info.ver.num.rev,
+ fw_info.ver.num.eng);
+ if (printed_chars < 0 || printed_chars >=
+ sizeof(fw_ver_str))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid FW version string\n");
+ switch (fw_info.ver.image_id) {
+ case FW_IMG_MAIN:
+ strcpy(fw_img_str, "main");
+ break;
+ default:
+ strcpy(fw_img_str, "unknown");
+ break;
+ }
+
+ found = true;
+ }
+ }
+ }
+
+ /* Dump FW version, image and timestamp */
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "fw-version", fw_ver_str);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "fw-image", fw_img_str);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "fw-timestamp", fw_info.ver.timestamp);
+ return offset;
+}
+
+/* Reads the MFW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
+
+ if (dump) {
+ u32 global_section_offsize, global_section_addr, mfw_ver;
+ u32 public_data_addr, global_section_offsize_addr;
+ int printed_chars;
+
+ /* Find MCP public data GRC address.
+ * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
+ */
+ public_data_addr = qed_rd(p_hwfn, p_ptt,
+ MISC_REG_SHARED_MEM_ADDR) |
+ MCP_REG_SCRATCH;
+
+ /* Find MCP public global section offset */
+ global_section_offsize_addr = public_data_addr +
+ offsetof(struct mcp_public_data,
+ sections) +
+ sizeof(offsize_t) * PUBLIC_GLOBAL;
+ global_section_offsize = qed_rd(p_hwfn, p_ptt,
+ global_section_offsize_addr);
+ global_section_addr = MCP_REG_SCRATCH +
+ (global_section_offsize &
+ OFFSIZE_OFFSET_MASK) * 4;
+
+ /* Read MFW version from MCP public global section */
+ mfw_ver = qed_rd(p_hwfn, p_ptt,
+ global_section_addr +
+ offsetof(struct public_global, mfw_ver));
+
+ /* Dump MFW version param */
+ printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str),
+ "%d_%d_%d_%d",
+ (u8) (mfw_ver >> 24),
+ (u8) (mfw_ver >> 16),
+ (u8) (mfw_ver >> 8),
+ (u8) mfw_ver);
+ if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid MFW version string\n");
+ }
+
+ return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
+}
+
+/* Writes a section header to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_section_hdr(u32 *dump_buf,
+ bool dump, const char *name, u32 num_params)
+{
+ return qed_dump_num_param(dump_buf, dump, name, num_params);
+}
+
+/* Writes the common global params to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u8 num_specific_global_params)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0;
+
+ /* Find platform string and dump global params section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump,
+ "global_params",
+ NUM_COMMON_GLOBAL_PARAMS +
+ num_specific_global_params);
+
+ /* Store params */
+ offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_mfw_ver_param(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "tools-version", TOOLS_VERSION);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump,
+ "chip",
+ s_chip_defs[dev_data->chip_id].name);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump,
+ "platform",
+ s_platform_defs[dev_data->platform_id].
+ name);
+ offset +=
+ qed_dump_num_param(dump_buf + offset, dump, "pci-func",
+ p_hwfn->abs_pf_id);
+ return offset;
+}
+
+/* Writes the last section to the specified buffer at the given offset.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
+{
+ u32 start_offset = offset, crc = ~0;
+
+ /* Dump CRC section header */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+
+ /* Calculate CRC32 and add it to the dword following the "last" section.
+ */
+ if (dump)
+ *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf,
+ DWORDS_TO_BYTES(offset));
+ offset++;
+ return offset - start_offset;
+}
+
+/* Update blocks reset state */
+static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
+ u32 i;
+
+ /* Read reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++)
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id])
+ reg_val[i] = qed_rd(p_hwfn,
+ p_ptt, s_reset_regs_defs[i].addr);
+
+ /* Check if blocks are in reset */
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ dev_data->block_in_reset[i] =
+ s_block_defs[i]->has_reset_bit &&
+ !(reg_val[s_block_defs[i]->reset_reg] &
+ BIT(s_block_defs[i]->reset_bit_offset));
+}
+
+/* Enable / disable the Debug block */
+static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool enable)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
+}
+
+/* Resets the Debug block */
+static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+
+ dbg_reset_reg_addr =
+ s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr;
+ old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
+ new_reset_reg_val = old_reset_reg_val &
+ ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset);
+
+ qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
+ qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
+}
+
+static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum dbg_bus_frame_modes mode)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
+}
+
+/* Enable / disable Debug Bus clients according to the specified mask.
+ * (1 = enable, 0 = disable)
+ */
+static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 client_mask)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
+}
+
+static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+{
+ const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
+ bool arg1, arg2;
+
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return !qed_is_mode_match(p_hwfn, modes_buf_offset);
+ case INIT_MODE_OP_OR:
+ case INIT_MODE_OP_AND:
+ arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
+ arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
+ return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
+ arg2) : (arg1 && arg2);
+ default:
+ return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
+ }
+}
+
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return dev_data->grc.param_val[grc_param];
+}
+
+/* Clear all GRC params */
+static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_set_by_user[i] = 0;
+}
+
+/* Assign default GRC param values */
+static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ if (!dev_data->grc.param_set_by_user[i])
+ dev_data->grc.param_val[i] =
+ s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
+/* Returns true if the specified entity (indicated by GRC param) should be
+ * included in the dump, false otherwise.
+ */
+static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ return qed_grc_get_param(p_hwfn, grc_param) > 0;
+}
+
+/* Returns true of the specified Storm should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
+ enum dbg_storms storm)
+{
+ return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
+}
+
+/* Returns true if the specified memory should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, u8 mem_group_id)
+{
+ u8 i;
+
+ /* Check Storm match */
+ if (s_block_defs[block_id]->associated_to_storm &&
+ !qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)s_block_defs[block_id]->storm_id))
+ return false;
+
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
+ if (mem_group_id == s_big_ram_defs[i].mem_group_id ||
+ mem_group_id == s_big_ram_defs[i].ram_mem_group_id)
+ return qed_grc_is_included(p_hwfn,
+ s_big_ram_defs[i].grc_param);
+ if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id ==
+ MEM_GROUP_PXP_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
+ if (mem_group_id == MEM_GROUP_RAM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
+ if (mem_group_id == MEM_GROUP_PBUF)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
+ if (mem_group_id == MEM_GROUP_CAU_MEM ||
+ mem_group_id == MEM_GROUP_CAU_SB ||
+ mem_group_id == MEM_GROUP_CAU_PI)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
+ if (mem_group_id == MEM_GROUP_QM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM ||
+ mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+ if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id ==
+ MEM_GROUP_IGU_MSIX)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
+ if (mem_group_id == MEM_GROUP_MULD_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
+ if (mem_group_id == MEM_GROUP_PRS_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
+ if (mem_group_id == MEM_GROUP_DMAE_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
+ if (mem_group_id == MEM_GROUP_TM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
+ if (mem_group_id == MEM_GROUP_SDM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
+ if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id ==
+ MEM_GROUP_RDIF_CTX)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
+ if (mem_group_id == MEM_GROUP_CM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
+ if (mem_group_id == MEM_GROUP_IOR)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
+
+ return true;
+}
+
+/* Stalls all Storms */
+static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool stall)
+{
+ u8 reg_val = stall ? 1 : 0;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id)) {
+ u32 reg_addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALL_0;
+
+ qed_wr(p_hwfn, p_ptt, reg_addr, reg_val);
+ }
+ }
+
+ msleep(STALL_DELAY_MS);
+}
+
+/* Takes all blocks out of reset */
+static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
+ u32 i;
+
+ /* Fill reset regs values */
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset)
+ reg_val[s_block_defs[i]->reset_reg] |=
+ BIT(s_block_defs[i]->reset_bit_offset);
+
+ /* Write reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+ if (reg_val[i])
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_reset_regs_defs[i].addr +
+ RESET_REG_UNRESET_OFFSET, reg_val[i]);
+ }
+ }
+}
+
+/* Returns the attention name offsets of the specified block */
+static const struct dbg_attn_block_type_data *
+qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
+{
+ const struct dbg_attn_block *base_attn_block_arr =
+ (const struct dbg_attn_block *)
+ s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
+
+ return &base_attn_block_arr[block_id].per_type_data[attn_type];
+}
+
+/* Returns the attention registers of the specified block */
+static const struct dbg_attn_reg *
+qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
+ u8 *num_attn_regs)
+{
+ const struct dbg_attn_block_type_data *block_type_data =
+ qed_get_block_attn_data(block_id, attn_type);
+
+ *num_attn_regs = block_type_data->num_regs;
+ return &((const struct dbg_attn_reg *)
+ s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
+ regs_offset];
+}
+
+/* For each block, clear the status of all parities */
+static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 reg_idx, num_attn_regs;
+ u32 block_id;
+
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_attn_reg *attn_reg_arr;
+
+ if (dev_data->block_in_reset[block_id])
+ continue;
+
+ attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ ATTN_TYPE_PARITY,
+ &num_attn_regs);
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data =
+ &attn_reg_arr[reg_idx];
+
+ /* Check mode */
+ bool eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ u16 modes_buf_offset =
+ GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ /* Mode match - read parity status read-clear
+ * register.
+ */
+ qed_rd(p_hwfn, p_ptt,
+ DWORDS_TO_BYTES(reg_data->
+ sts_clr_address));
+ }
+ }
+}
+
+/* Dumps GRC registers section header. Returns the dumped size in dwords.
+ * The following parameters are dumped:
+ * - 'count' = num_dumped_entries
+ * - 'split' = split_type
+ * - 'id'i = split_id (dumped only if split_id >= 0)
+ * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
+ * param_val != NULL)
+ */
+static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
+ bool dump,
+ u32 num_reg_entries,
+ const char *split_type,
+ int split_id,
+ const char *param_name, const char *param_val)
+{
+ u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
+ u32 offset = 0;
+
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "grc_regs", num_params);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "count", num_reg_entries);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "split", split_type);
+ if (split_id >= 0)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "id", split_id);
+ if (param_name && param_val)
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, param_name, param_val);
+ return offset;
+}
+
+/* Dumps GRC register/memory. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
+{
+ u32 offset = 0, i;
+
+ if (dump) {
+ *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
+ for (i = 0; i < len; i++, addr++, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(addr));
+ } else {
+ offset += len + 1;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_regs_arr,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ u32 *num_dumped_reg_entries)
+{
+ u32 i, offset = 0, input_offset = 0;
+ bool mode_match = true;
+
+ *num_dumped_reg_entries = 0;
+ while (input_offset < input_regs_arr.size_in_dwords) {
+ const struct dbg_dump_cond_hdr *cond_hdr =
+ (const struct dbg_dump_cond_hdr *)
+ &input_regs_arr.ptr[input_offset++];
+ bool eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ /* Check mode/block */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match && block_enable[cond_hdr->block_id]) {
+ for (i = 0; i < cond_hdr->data_size;
+ i++, input_offset++) {
+ const struct dbg_dump_reg *reg =
+ (const struct dbg_dump_reg *)
+ &input_regs_arr.ptr[input_offset];
+
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+ dump_buf + offset, dump,
+ GET_FIELD(reg->data,
+ DBG_DUMP_REG_ADDRESS),
+ GET_FIELD(reg->data,
+ DBG_DUMP_REG_LENGTH));
+ (*num_dumped_reg_entries)++;
+ }
+ } else {
+ input_offset += cond_hdr->data_size;
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_regs_arr,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ const char *split_type_name,
+ u32 split_id,
+ const char *param_name,
+ const char *param_val)
+{
+ u32 num_dumped_reg_entries, offset;
+
+ /* Calculate register dump header size (and skip it for now) */
+ offset = qed_grc_dump_regs_hdr(dump_buf,
+ false,
+ 0,
+ split_type_name,
+ split_id, param_name, param_val);
+
+ /* Dump registers */
+ offset += qed_grc_dump_regs_entries(p_hwfn,
+ p_ptt,
+ input_regs_arr,
+ dump_buf + offset,
+ dump,
+ block_enable,
+ &num_dumped_reg_entries);
+
+ /* Write register dump header */
+ if (dump && num_dumped_reg_entries > 0)
+ qed_grc_dump_regs_hdr(dump_buf,
+ dump,
+ num_dumped_reg_entries,
+ split_type_name,
+ split_id, param_name, param_val);
+
+ return num_dumped_reg_entries > 0 ? offset : 0;
+}
+
+/* Dumps registers according to the input registers array.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ const char *param_name, const char *param_val)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, input_offset = 0;
+ u8 port_id, pf_id;
+
+ if (dump)
+ DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
+ const struct dbg_dump_split_hdr *split_hdr =
+ (const struct dbg_dump_split_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
+ u8 split_type_id = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ u32 split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ struct dbg_array curr_input_regs_arr = {
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset],
+ split_data_size};
+
+ switch (split_type_id) {
+ case SPLIT_TYPE_NONE:
+ case SPLIT_TYPE_VF:
+ offset += qed_grc_dump_split_data(p_hwfn,
+ p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump,
+ block_enable,
+ "eng",
+ (u32)(-1),
+ param_name,
+ param_val);
+ break;
+ case SPLIT_TYPE_PORT:
+ for (port_id = 0;
+ port_id <
+ s_chip_defs[dev_data->chip_id].
+ per_platform[dev_data->platform_id].num_ports;
+ port_id++) {
+ if (dump)
+ qed_port_pretend(p_hwfn, p_ptt,
+ port_id);
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "port", port_id,
+ param_name,
+ param_val);
+ }
+ break;
+ case SPLIT_TYPE_PF:
+ case SPLIT_TYPE_PORT_PF:
+ for (pf_id = 0;
+ pf_id <
+ s_chip_defs[dev_data->chip_id].
+ per_platform[dev_data->platform_id].num_pfs;
+ pf_id++) {
+ if (dump)
+ qed_fid_pretend(p_hwfn, p_ptt, pf_id);
+ offset += qed_grc_dump_split_data(p_hwfn,
+ p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "pf", pf_id, param_name,
+ param_val);
+ }
+ break;
+ default:
+ break;
+ }
+
+ input_offset += split_data_size;
+ }
+
+ /* Pretend to original PF */
+ if (dump)
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ return offset;
+}
+
+/* Dump reset registers. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i, offset = 0, num_regs = 0;
+
+ /* Calculate header size */
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, "eng", -1, NULL, NULL);
+
+ /* Write reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS
+ (s_reset_regs_defs
+ [i].addr), 1);
+ num_regs++;
+ }
+ }
+
+ /* Write header */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true, num_regs, "eng", -1, NULL, NULL);
+ return offset;
+}
+
+/* Dump registers that are modified during GRC Dump and therefore must be dumped
+ * first. Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, num_reg_entries = 0, block_id;
+ u8 storm_id, reg_idx, num_attn_regs;
+
+ /* Calculate header size */
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, "eng", -1, NULL, NULL);
+
+ /* Write parity registers */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_attn_reg *attn_reg_arr;
+
+ if (dev_data->block_in_reset[block_id] && dump)
+ continue;
+
+ attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ ATTN_TYPE_PARITY,
+ &num_attn_regs);
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data =
+ &attn_reg_arr[reg_idx];
+ u16 modes_buf_offset;
+ bool eval_mode;
+
+ /* Check mode */
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset =
+ GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
+ /* Mode match - read and dump registers */
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ reg_data->mask_address,
+ 1);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS),
+ 1);
+ num_reg_entries += 2;
+ }
+ }
+ }
+
+ /* Write storm stall status registers */
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
+ dump)
+ continue;
+
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS(s_storm_defs[storm_id].
+ sem_fast_mem_addr +
+ SEM_FAST_REG_STALLED),
+ 1);
+ num_reg_entries++;
+ }
+
+ /* Write header */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true,
+ num_reg_entries, "eng", -1, NULL, NULL);
+ return offset;
+}
+
+/* Dumps a GRC memory header (section and params).
+ * The following parameters are dumped:
+ * name - name is dumped only if it's not NULL.
+ * addr - byte_addr is dumped only if name is NULL.
+ * len - dword_len is always dumped.
+ * width - bit_width is dumped if it's not zero.
+ * packed - packed=1 is dumped if it's not false.
+ * mem_group - mem_group is always dumped.
+ * is_storm - true only if the memory is related to a Storm.
+ * storm_letter - storm letter (valid only if is_storm is true).
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 byte_addr,
+ u32 dword_len,
+ u32 bit_width,
+ bool packed,
+ const char *mem_group,
+ bool is_storm, char storm_letter)
+{
+ u8 num_params = 3;
+ u32 offset = 0;
+ char buf[64];
+
+ if (!dword_len)
+ DP_NOTICE(p_hwfn,
+ "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
+ if (bit_width)
+ num_params++;
+ if (packed)
+ num_params++;
+
+ /* Dump section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "grc_mem", num_params);
+ if (name) {
+ /* Dump name */
+ if (is_storm) {
+ strcpy(buf, "?STORM_");
+ buf[0] = storm_letter;
+ strcpy(buf + strlen(buf), name);
+ } else {
+ strcpy(buf, name);
+ }
+
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "name", buf);
+ if (dump)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumping %d registers from %s...\n",
+ dword_len, buf);
+ } else {
+ /* Dump address */
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "addr", byte_addr);
+ if (dump && dword_len > 64)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumping %d registers from address 0x%x...\n",
+ dword_len, byte_addr);
+ }
+
+ /* Dump len */
+ offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
+
+ /* Dump bit width */
+ if (bit_width)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "width", bit_width);
+
+ /* Dump packed */
+ if (packed)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "packed", 1);
+
+ /* Dump reg type */
+ if (is_storm) {
+ strcpy(buf, "?STORM_");
+ buf[0] = storm_letter;
+ strcpy(buf + strlen(buf), mem_group);
+ } else {
+ strcpy(buf, mem_group);
+ }
+
+ offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
+ return offset;
+}
+
+/* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 byte_addr,
+ u32 dword_len,
+ u32 bit_width,
+ bool packed,
+ const char *mem_group,
+ bool is_storm, char storm_letter)
+{
+ u32 offset = 0;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ name,
+ byte_addr,
+ dword_len,
+ bit_width,
+ packed,
+ mem_group, is_storm, storm_letter);
+ if (dump) {
+ u32 i;
+
+ for (i = 0; i < dword_len;
+ i++, byte_addr += BYTES_IN_DWORD, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+ } else {
+ offset += dword_len;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC memories entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_mems_arr,
+ u32 *dump_buf, bool dump)
+{
+ u32 i, offset = 0, input_offset = 0;
+ bool mode_match = true;
+
+ while (input_offset < input_mems_arr.size_in_dwords) {
+ const struct dbg_dump_cond_hdr *cond_hdr;
+ u32 num_entries;
+ bool eval_mode;
+
+ cond_hdr = (const struct dbg_dump_cond_hdr *)
+ &input_mems_arr.ptr[input_offset++];
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ /* Check required mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (!mode_match) {
+ input_offset += cond_hdr->data_size;
+ continue;
+ }
+
+ num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
+ for (i = 0; i < num_entries;
+ i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
+ const struct dbg_dump_mem *mem =
+ (const struct dbg_dump_mem *)
+ &input_mems_arr.ptr[input_offset];
+ u8 mem_group_id;
+
+ mem_group_id = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_MEM_GROUP_ID);
+ if (mem_group_id >= MEM_GROUPS_NUM) {
+ DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
+ return 0;
+ }
+
+ if (qed_grc_is_mem_included(p_hwfn,
+ (enum block_id)cond_hdr->block_id,
+ mem_group_id)) {
+ u32 mem_byte_addr =
+ DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_ADDRESS));
+ u32 mem_len = GET_FIELD(mem->dword1,
+ DBG_DUMP_MEM_LENGTH);
+ char storm_letter = 'a';
+ bool is_storm = false;
+
+ /* Update memory length for CCFC/TCFC memories
+ * according to number of LCIDs/LTIDs.
+ */
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
+ mem_len = qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS)
+ * (mem_len / MAX_LCIDS);
+ else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ mem_len = qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS)
+ * (mem_len / MAX_LTIDS);
+
+ /* If memory is associated with Storm, update
+ * Storm details.
+ */
+ if (s_block_defs[cond_hdr->block_id]->
+ associated_to_storm) {
+ is_storm = true;
+ storm_letter =
+ s_storm_defs[s_block_defs[
+ cond_hdr->block_id]->
+ storm_id].letter;
+ }
+
+ /* Dump memory */
+ offset += qed_grc_dump_mem(p_hwfn, p_ptt,
+ dump_buf + offset, dump, NULL,
+ mem_byte_addr, mem_len, 0,
+ false,
+ s_mem_group_names[mem_group_id],
+ is_storm, storm_letter);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC memories according to the input array dump_mem.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, input_offset = 0;
+
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
+ const struct dbg_dump_split_hdr *split_hdr =
+ (const struct dbg_dump_split_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
+ u8 split_type_id = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ u32 split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ struct dbg_array curr_input_mems_arr = {
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset],
+ split_data_size};
+
+ switch (split_type_id) {
+ case SPLIT_TYPE_NONE:
+ offset += qed_grc_dump_mem_entries(p_hwfn,
+ p_ptt,
+ curr_input_mems_arr,
+ dump_buf + offset,
+ dump);
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Dumping split memories is currently not supported\n");
+ break;
+ }
+
+ input_offset += split_data_size;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC context data for the specified Storm.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 num_lids,
+ u32 lid_size,
+ u32 rd_reg_addr,
+ u8 storm_id)
+{
+ u32 i, lid, total_size;
+ u32 offset = 0;
+
+ if (!lid_size)
+ return 0;
+ lid_size *= BYTES_IN_DWORD;
+ total_size = num_lids * lid_size;
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ name,
+ 0,
+ total_size,
+ lid_size * 32,
+ false,
+ name,
+ true, s_storm_defs[storm_id].letter);
+
+ /* Dump context data */
+ if (dump) {
+ for (lid = 0; lid < num_lids; lid++) {
+ for (i = 0; i < lid_size; i++, offset++) {
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].cm_ctx_wr_addr,
+ BIT(9) | lid);
+ *(dump_buf + offset) = qed_rd(p_hwfn,
+ p_ptt,
+ rd_reg_addr);
+ }
+ }
+ } else {
+ offset += total_size;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC contexts. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
+
+ /* Dump Conn AG context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_AG_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS),
+ s_storm_defs[storm_id].
+ cm_conn_ag_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_conn_ag_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Conn ST context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_ST_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS),
+ s_storm_defs[storm_id].
+ cm_conn_st_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_conn_st_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Task AG context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_AG_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS),
+ s_storm_defs[storm_id].
+ cm_task_ag_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_task_ag_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Task ST context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_ST_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS),
+ s_storm_defs[storm_id].
+ cm_task_st_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_task_st_ctx_rd_addr,
+ storm_id);
+ }
+
+ return offset;
+}
+
+/* Dumps GRC IORs data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ char buf[10] = "IOR_SET_?";
+ u8 storm_id, set_id;
+ u32 offset = 0;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id)) {
+ for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+ u32 addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STORM_REG_FILE +
+ DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
+
+ buf[strlen(buf) - 1] = '0' + set_id;
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ buf,
+ addr,
+ IORS_PER_SET,
+ 32,
+ false,
+ "ior",
+ true,
+ s_storm_defs
+ [storm_id].letter);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dump VFC CAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump, u8 storm_id)
+{
+ u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
+ u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
+ u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
+ u32 offset = 0;
+ u32 row, i;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ "vfc_cam",
+ 0,
+ total_size,
+ 256,
+ false,
+ "vfc_cam",
+ true, s_storm_defs[storm_id].letter);
+ if (dump) {
+ /* Prepare CAM address */
+ SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
+ for (row = 0; row < VFC_CAM_NUM_ROWS;
+ row++, offset += VFC_CAM_RESP_DWORDS) {
+ /* Write VFC CAM command */
+ SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_WR,
+ cam_cmd, VFC_CAM_CMD_DWORDS);
+
+ /* Write VFC CAM address */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_ADDR,
+ cam_addr, VFC_CAM_ADDR_DWORDS);
+
+ /* Read VFC CAM read response */
+ ARR_REG_RD(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_RD,
+ dump_buf + offset, VFC_CAM_RESP_DWORDS);
+ }
+ } else {
+ offset += total_size;
+ }
+
+ return offset;
+}
+
+/* Dump VFC RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u8 storm_id, struct vfc_ram_defs *ram_defs)
+{
+ u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
+ u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
+ u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
+ u32 offset = 0;
+ u32 row, i;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ ram_defs->mem_name,
+ 0,
+ total_size,
+ 256,
+ false,
+ ram_defs->type_name,
+ true, s_storm_defs[storm_id].letter);
+
+ /* Prepare RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+
+ if (!dump)
+ return offset + total_size;
+
+ for (row = ram_defs->base_row;
+ row < ram_defs->base_row + ram_defs->num_rows;
+ row++, offset += VFC_RAM_RESP_DWORDS) {
+ /* Write VFC RAM command */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_WR,
+ ram_cmd, VFC_RAM_CMD_DWORDS);
+
+ /* Write VFC RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_ADDR,
+ ram_addr, VFC_RAM_ADDR_DWORDS);
+
+ /* Read VFC RAM read response */
+ ARR_REG_RD(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_RD,
+ dump_buf + offset, VFC_RAM_RESP_DWORDS);
+ }
+
+ return offset;
+}
+
+/* Dumps GRC VFC data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 storm_id, i;
+ u32 offset = 0;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id) &&
+ s_storm_defs[storm_id].has_vfc &&
+ (storm_id != DBG_PSTORM_ID ||
+ dev_data->platform_id == PLATFORM_ASIC)) {
+ /* Read CAM */
+ offset += qed_grc_dump_vfc_cam(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, storm_id);
+
+ /* Read RAM */
+ for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
+ offset += qed_grc_dump_vfc_ram(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ storm_id,
+ &s_vfc_ram_defs
+ [i]);
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC RSS data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0;
+ u8 rss_mem_id;
+
+ for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
+ struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
+ u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
+ u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
+ u32 total_size = (num_entries * entry_width) / 32;
+ bool packed = (entry_width == 16);
+ u32 addr = rss_defs->addr;
+ u32 i, j;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ rss_defs->mem_name,
+ addr,
+ total_size,
+ entry_width,
+ packed,
+ rss_defs->type_name, false, 0);
+
+ if (!dump) {
+ offset += total_size;
+ continue;
+ }
+
+ /* Dump RSS data */
+ for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
+ qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
+ for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
+ *(dump_buf + offset) =
+ qed_rd(p_hwfn, p_ptt,
+ RSS_REG_RSS_RAM_DATA +
+ DWORDS_TO_BYTES(j));
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC Big RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump, u8 big_ram_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char mem_name[12] = "???_BIG_RAM";
+ char type_name[8] = "???_RAM";
+ u32 ram_size, total_blocks;
+ u32 offset = 0, i, j;
+
+ total_blocks =
+ s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
+ ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+
+ strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
+ strlen(s_big_ram_defs[big_ram_id].instance_name));
+ strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
+ strlen(s_big_ram_defs[big_ram_id].instance_name));
+
+ /* Dump memory header */
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ mem_name,
+ 0,
+ ram_size,
+ BIG_RAM_BLOCK_SIZE_BYTES * 8,
+ false, type_name, false, 0);
+
+ if (!dump)
+ return offset + ram_size;
+
+ /* Read and dump Big RAM data */
+ for (i = 0; i < total_blocks / 2; i++) {
+ qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
+ i);
+ for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
+ s_big_ram_defs[big_ram_id].
+ data_reg_addr +
+ DWORDS_TO_BYTES(j));
+ }
+
+ return offset;
+}
+
+static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ bool block_enable[MAX_BLOCK_ID] = { 0 };
+ bool halted = false;
+ u32 offset = 0;
+
+ /* Halt MCP */
+ if (dump) {
+ halted = !qed_mcp_halt(p_hwfn, p_ptt);
+ if (!halted)
+ DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+ }
+
+ /* Dump MCP scratchpad */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ MCP_REG_SCRATCH,
+ MCP_REG_SCRATCH_SIZE,
+ 0, false, "MCP", false, 0);
+
+ /* Dump MCP cpu_reg_file */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ MCP_REG_CPU_REG_FILE,
+ MCP_REG_CPU_REG_FILE_SIZE,
+ 0, false, "MCP", false, 0);
+
+ /* Dump MCP registers */
+ block_enable[BLOCK_MCP] = true;
+ offset += qed_grc_dump_registers(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, block_enable, "block", "MCP");
+
+ /* Dump required non-MCP registers */
+ offset += qed_grc_dump_regs_hdr(dump_buf + offset,
+ dump, 1, "eng", -1, "block", "MCP");
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS
+ (MISC_REG_SHARED_MEM_ADDR), 1);
+
+ /* Release MCP */
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt))
+ DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+ return offset;
+}
+
+/* Dumps the tbus indirect memory for all PHYs. */
+static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
+ char mem_name[32];
+ u8 phy_id;
+
+ for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
+ struct phy_defs *phy_defs = &s_phy_defs[phy_id];
+ int printed_chars;
+
+ printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s",
+ phy_defs->phy_name);
+ if (printed_chars < 0 || printed_chars >= sizeof(mem_name))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid PHY memory name\n");
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ mem_name,
+ 0,
+ PHY_DUMP_SIZE_DWORDS,
+ 16, true, mem_name, false, 0);
+ if (dump) {
+ u32 addr_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_lo_addr;
+ u32 addr_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_hi_addr;
+ u32 data_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_lo_addr;
+ u32 data_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_hi_addr;
+ u8 *bytes_buf = (u8 *)(dump_buf + offset);
+
+ for (tbus_hi_offset = 0;
+ tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
+ tbus_hi_offset++) {
+ qed_wr(p_hwfn,
+ p_ptt, addr_hi_addr, tbus_hi_offset);
+ for (tbus_lo_offset = 0; tbus_lo_offset < 256;
+ tbus_lo_offset++) {
+ qed_wr(p_hwfn,
+ p_ptt,
+ addr_lo_addr, tbus_lo_offset);
+ *(bytes_buf++) =
+ (u8)qed_rd(p_hwfn, p_ptt,
+ data_lo_addr);
+ *(bytes_buf++) =
+ (u8)qed_rd(p_hwfn, p_ptt,
+ data_hi_addr);
+ }
+ }
+ }
+
+ offset += PHY_DUMP_SIZE_DWORDS;
+ }
+
+ return offset;
+}
+
+static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block_id,
+ u8 line_id,
+ u8 cycle_en,
+ u8 right_shift, u8 force_valid, u8 force_frame)
+{
+ struct block_defs *p_block_defs = s_block_defs[block_id];
+
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame);
+}
+
+/* Dumps Static Debug data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, block_id, line_id, addr, i;
+ struct block_defs *p_block_defs;
+
+ if (dump) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG, "Dumping static debug data...\n");
+
+ /* Disable all blocks debug output */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ p_block_defs = s_block_defs[block_id];
+
+ if (p_block_defs->has_dbg_bus[dev_data->chip_id])
+ qed_wr(p_hwfn, p_ptt,
+ p_block_defs->dbg_cycle_enable_addr, 0);
+ }
+
+ qed_bus_reset_dbg_block(p_hwfn, p_ptt);
+ qed_bus_set_framing_mode(p_hwfn,
+ p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
+ qed_wr(p_hwfn,
+ p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
+ qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
+ qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
+ }
+
+ /* Dump all static debug lines for each relevant block */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ p_block_defs = s_block_defs[block_id];
+
+ if (!p_block_defs->has_dbg_bus[dev_data->chip_id])
+ continue;
+
+ /* Dump static section params */
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ p_block_defs->name, 0,
+ block_dwords, 32, false,
+ "STATIC", false, 0);
+
+ if (dump && !dev_data->block_in_reset[block_id]) {
+ u8 dbg_client_id =
+ p_block_defs->dbg_client_id[dev_data->chip_id];
+
+ /* Enable block's client */
+ qed_bus_enable_clients(p_hwfn, p_ptt,
+ BIT(dbg_client_id));
+
+ for (line_id = 0; line_id < NUM_DBG_BUS_LINES;
+ line_id++) {
+ /* Configure debug line ID */
+ qed_config_dbg_line(p_hwfn,
+ p_ptt,
+ (enum block_id)block_id,
+ (u8)line_id,
+ 0xf, 0, 0, 0);
+
+ /* Read debug line info */
+ for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
+ i < STATIC_DEBUG_LINE_DWORDS;
+ i++, offset++, addr += BYTES_IN_DWORD)
+ dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
+ addr);
+ }
+
+ /* Disable block's client and debug output */
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ qed_wr(p_hwfn, p_ptt,
+ p_block_defs->dbg_cycle_enable_addr, 0);
+ } else {
+ /* All lines are invalid - dump zeros */
+ if (dump)
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(block_dwords));
+ offset += block_dwords;
+ }
+ }
+
+ if (dump) {
+ qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ }
+
+ return offset;
+}
+
+/* Performs GRC Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ bool parities_masked = false;
+ u8 i, port_mode = 0;
+ u32 offset = 0;
+
+ /* Check if emulation platform */
+ *num_dumped_dwords = 0;
+
+ /* Fill GRC parameters that were not set by the user with their default
+ * value.
+ */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ /* Find port mode */
+ if (dump) {
+ switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
+ case 0:
+ port_mode = 1;
+ break;
+ case 1:
+ port_mode = 2;
+ break;
+ case 2:
+ port_mode = 4;
+ break;
+ }
+ }
+
+ /* Update reset state */
+ if (dump)
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 4);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "grc-dump");
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-lcids",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-ltids",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "num-ports", port_mode);
+
+ /* Dump reset registers (dumped before taking blocks out of reset ) */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+ offset += qed_grc_dump_reset_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Take all blocks out of reset (using reset registers) */
+ if (dump) {
+ qed_grc_unreset_blocks(p_hwfn, p_ptt);
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ }
+
+ /* Disable all parities using MFW command */
+ if (dump) {
+ parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
+ if (!parities_masked) {
+ if (qed_grc_get_param
+ (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
+ return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
+ else
+ DP_NOTICE(p_hwfn,
+ "Failed to mask parities using MFW\n");
+ }
+ }
+
+ /* Dump modified registers (dumped before modifying them) */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+ offset += qed_grc_dump_modified_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Stall storms */
+ if (dump &&
+ (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_IOR) ||
+ qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
+ qed_grc_stall_storms(p_hwfn, p_ptt, true);
+
+ /* Dump all regs */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
+ /* Dump all blocks except MCP */
+ bool block_enable[MAX_BLOCK_ID];
+
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ block_enable[i] = true;
+ block_enable[BLOCK_MCP] = false;
+ offset += qed_grc_dump_registers(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ block_enable, NULL, NULL);
+ }
+
+ /* Dump memories */
+ offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
+
+ /* Dump MCP */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
+ offset += qed_grc_dump_mcp(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump context */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
+ offset += qed_grc_dump_ctx(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump RSS memories */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
+ offset += qed_grc_dump_rss(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump Big RAM */
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
+ if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
+ offset += qed_grc_dump_big_ram(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, i);
+
+ /* Dump IORs */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
+ offset += qed_grc_dump_iors(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump VFC */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
+ offset += qed_grc_dump_vfc(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump PHY tbus */
+ if (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
+ CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
+ offset += qed_grc_dump_phy(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump static debug data */
+ if (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_STATIC) &&
+ dev_data->bus.state == DBG_BUS_STATE_IDLE)
+ offset += qed_grc_dump_static_debug(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+ if (dump) {
+ /* Unstall storms */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
+ qed_grc_stall_storms(p_hwfn, p_ptt, false);
+
+ /* Clear parity status */
+ qed_grc_clear_all_prty(p_hwfn, p_ptt);
+
+ /* Enable all parities using MFW command */
+ if (parities_masked)
+ qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
+ }
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Writes the specified failing Idle Check rule to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *
+ dump_buf,
+ bool dump,
+ u16 rule_id,
+ const struct dbg_idle_chk_rule *rule,
+ u16 fail_entry_id, u32 *cond_reg_values)
+{
+ const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *)
+ s_dbg_arrays
+ [BIN_BUF_DBG_IDLE_CHK_REGS].
+ ptr)[rule->reg_offset];
+ const struct dbg_idle_chk_cond_reg *cond_regs = &regs[0].cond_reg;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct dbg_idle_chk_result_hdr *hdr =
+ (struct dbg_idle_chk_result_hdr *)dump_buf;
+ const struct dbg_idle_chk_info_reg *info_regs =
+ &regs[rule->num_cond_regs].info_reg;
+ u32 next_reg_offset = 0, i, offset = 0;
+ u8 reg_id;
+
+ /* Dump rule data */
+ if (dump) {
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->rule_id = rule_id;
+ hdr->mem_entry_id = fail_entry_id;
+ hdr->severity = rule->severity;
+ hdr->num_dumped_cond_regs = rule->num_cond_regs;
+ }
+
+ offset += IDLE_CHK_RESULT_HDR_DWORDS;
+
+ /* Dump condition register values */
+ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
+ const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
+
+ /* Write register header */
+ if (dump) {
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)(dump_buf
+ + offset);
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ memset(reg_hdr, 0,
+ sizeof(struct dbg_idle_chk_result_reg_hdr));
+ reg_hdr->start_entry = reg->start_entry;
+ reg_hdr->size = reg->entry_size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
+ reg->num_entries > 1 || reg->start_entry > 0
+ ? 1 : 0);
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg_hdr->size;
+ i++, next_reg_offset++, offset++)
+ dump_buf[offset] =
+ cond_reg_values[next_reg_offset];
+ } else {
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
+ reg->entry_size;
+ }
+ }
+
+ /* Dump info register values */
+ for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
+ const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
+ u32 block_id;
+
+ if (!dump) {
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
+ continue;
+ }
+
+ /* Check if register's block is in reset */
+ block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
+ if (block_id >= MAX_BLOCK_ID) {
+ DP_NOTICE(p_hwfn, "Invalid block_id\n");
+ return 0;
+ }
+
+ if (!dev_data->block_in_reset[block_id]) {
+ bool eval_mode = GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ bool mode_match = true;
+
+ /* Check mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match =
+ qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match) {
+ u32 grc_addr =
+ DWORDS_TO_BYTES(GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS));
+
+ /* Write register header */
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)
+ (dump_buf + offset);
+
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ hdr->num_dumped_info_regs++;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->size = reg->size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg->size;
+ i++, offset++, grc_addr += 4)
+ dump_buf[offset] =
+ qed_rd(p_hwfn, p_ptt, grc_addr);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps idle check rule entries. Returns the dumped size in dwords. */
+static u32
+qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump,
+ const struct dbg_idle_chk_rule *input_rules,
+ u32 num_input_rules, u32 *num_failing_rules)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
+ u32 i, j, offset = 0;
+ u16 entry_id;
+ u8 reg_id;
+
+ *num_failing_rules = 0;
+ for (i = 0; i < num_input_rules; i++) {
+ const struct dbg_idle_chk_cond_reg *cond_regs;
+ const struct dbg_idle_chk_rule *rule;
+ const union dbg_idle_chk_reg *regs;
+ u16 num_reg_entries = 1;
+ bool check_rule = true;
+ const u32 *imm_values;
+
+ rule = &input_rules[i];
+ regs = &((const union dbg_idle_chk_reg *)
+ s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
+ [rule->reg_offset];
+ cond_regs = &regs[0].cond_reg;
+ imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
+ [rule->imm_offset];
+
+ /* Check if all condition register blocks are out of reset, and
+ * find maximal number of entries (all condition registers that
+ * are memories must have the same size, which is > 1).
+ */
+ for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
+ reg_id++) {
+ u32 block_id = GET_FIELD(cond_regs[reg_id].data,
+ DBG_IDLE_CHK_COND_REG_BLOCK_ID);
+
+ if (block_id >= MAX_BLOCK_ID) {
+ DP_NOTICE(p_hwfn, "Invalid block_id\n");
+ return 0;
+ }
+
+ check_rule = !dev_data->block_in_reset[block_id];
+ if (cond_regs[reg_id].num_entries > num_reg_entries)
+ num_reg_entries = cond_regs[reg_id].num_entries;
+ }
+
+ if (!check_rule && dump)
+ continue;
+
+ /* Go over all register entries (number of entries is the same
+ * for all condition registers).
+ */
+ for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
+ /* Read current entry of all condition registers */
+ if (dump) {
+ u32 next_reg_offset = 0;
+
+ for (reg_id = 0;
+ reg_id < rule->num_cond_regs;
+ reg_id++) {
+ const struct dbg_idle_chk_cond_reg
+ *reg = &cond_regs[reg_id];
+
+ /* Find GRC address (if it's a memory,
+ * the address of the specific entry is
+ * calculated).
+ */
+ u32 grc_addr =
+ DWORDS_TO_BYTES(
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS));
+
+ if (reg->num_entries > 1 ||
+ reg->start_entry > 0) {
+ u32 padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two
+ (reg->entry_size) : 1;
+
+ grc_addr +=
+ DWORDS_TO_BYTES(
+ (reg->start_entry +
+ entry_id)
+ * padded_entry_size);
+ }
+
+ /* Read registers */
+ if (next_reg_offset + reg->entry_size >=
+ IDLE_CHK_MAX_ENTRIES_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "idle check registers entry is too large\n");
+ return 0;
+ }
+
+ for (j = 0; j < reg->entry_size;
+ j++, next_reg_offset++,
+ grc_addr += 4)
+ cond_reg_values[next_reg_offset] =
+ qed_rd(p_hwfn, p_ptt, grc_addr);
+ }
+ }
+
+ /* Call rule's condition function - a return value of
+ * true indicates failure.
+ */
+ if ((*cond_arr[rule->cond_id])(cond_reg_values,
+ imm_values) || !dump) {
+ offset +=
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
+ (*num_failing_rules)++;
+ break;
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Performs Idle Check Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, input_offset = 0, num_failing_rules = 0;
+ u32 num_failing_rules_offset;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "idle-chk");
+
+ /* Dump idle check section header with a single parameter */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
+ num_failing_rules_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
+ const struct dbg_idle_chk_cond_hdr *cond_hdr =
+ (const struct dbg_idle_chk_cond_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
+ [input_offset++];
+ bool eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ bool mode_match = true;
+
+ /* Check mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match) {
+ u32 curr_failing_rules;
+
+ offset +=
+ qed_idle_chk_dump_rule_entries(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ (const struct dbg_idle_chk_rule *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
+ ptr[input_offset],
+ cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
+ &curr_failing_rules);
+ num_failing_rules += curr_failing_rules;
+ }
+
+ input_offset += cond_hdr->data_size;
+ }
+
+ /* Overwrite num_rules parameter */
+ if (dump)
+ qed_dump_num_param(dump_buf + num_failing_rules_offset,
+ dump, "num_rules", num_failing_rules);
+
+ return offset;
+}
+
+/* Finds the meta data image in NVRAM. */
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
+ struct mcp_file_att file_att;
+
+ /* Call NVRAM get file command */
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type, &ret_mcp_resp, &ret_mcp_param,
+ &ret_txn_size, (u32 *)&file_att) != 0)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Update return values */
+ *nvram_offset_bytes = file_att.nvm_start_addr;
+ *nvram_size_bytes = file_att.len;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
+ image_type, *nvram_offset_bytes, *nvram_size_bytes);
+
+ /* Check alignment */
+ if (*nvram_size_bytes & 0x3)
+ return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes, u32 *ret_buf)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size;
+ u32 bytes_to_copy, read_offset = 0;
+ s32 bytes_left = nvram_size_bytes;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "nvram_read: reading image of size %d bytes from NVRAM\n",
+ nvram_size_bytes);
+ do {
+ bytes_to_copy =
+ (bytes_left >
+ MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
+
+ /* Call NVRAM read command */
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM,
+ (nvram_offset_bytes +
+ read_offset) |
+ (bytes_to_copy <<
+ DRV_MB_PARAM_NVM_LEN_SHIFT),
+ &ret_mcp_resp, &ret_mcp_param,
+ &ret_read_size,
+ (u32 *)((u8 *)ret_buf +
+ read_offset)) != 0)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Update read offset */
+ read_offset += ret_read_size;
+ bytes_left -= ret_read_size;
+ } while (bytes_left > 0);
+
+ return DBG_STATUS_OK;
+}
+
+/* Get info on the MCP Trace data in the scratchpad:
+ * - trace_data_grc_addr - the GRC address of the trace data
+ * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
+ * the header)
+ */
+static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *trace_data_grc_addr,
+ u32 *trace_data_size_bytes)
+{
+ /* Read MCP trace section offsize structure from MCP scratchpad */
+ u32 spad_trace_offsize = qed_rd(p_hwfn,
+ p_ptt,
+ MCP_SPAD_TRACE_OFFSIZE_ADDR);
+ u32 signature;
+
+ /* Extract MCP trace section GRC address from offsize structure (within
+ * scratchpad).
+ */
+ *trace_data_grc_addr =
+ MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
+
+ /* Read signature from MCP trace section */
+ signature = qed_rd(p_hwfn, p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, signature));
+ if (signature != MFW_TRACE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read trace size from MCP trace section */
+ *trace_data_size_bytes = qed_rd(p_hwfn,
+ p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, size));
+ return DBG_STATUS_OK;
+}
+
+/* Reads MCP trace meta data image from NVRAM.
+ * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
+ * file)
+ * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
+ * Trace meta data starts (invalid when loaded from file)
+ * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
+ */
+static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 trace_data_size_bytes,
+ u32 *running_bundle_id,
+ u32 *trace_meta_offset_bytes,
+ u32 *trace_meta_size_bytes)
+{
+ /* Read MCP trace section offsize structure from MCP scratchpad */
+ u32 spad_trace_offsize = qed_rd(p_hwfn,
+ p_ptt,
+ MCP_SPAD_TRACE_OFFSIZE_ADDR);
+
+ /* Find running bundle ID */
+ u32 running_mfw_addr =
+ MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
+ QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
+ enum dbg_status status;
+ u32 nvram_image_type;
+
+ *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
+ if (*running_bundle_id > 1)
+ return DBG_STATUS_INVALID_NVRAM_BUNDLE;
+
+ /* Find image in NVRAM */
+ nvram_image_type =
+ (*running_bundle_id ==
+ DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
+ status = qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ nvram_image_type,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes);
+
+ return status;
+}
+
+/* Reads the MCP Trace data from the specified GRC address into the specified
+ * buffer.
+ */
+static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 grc_addr, u32 size_in_dwords, u32 *buf)
+{
+ u32 i;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
+ size_in_dwords, grc_addr);
+ for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
+ buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
+}
+
+/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
+ * buffer.
+ */
+static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_in_bytes,
+ u32 size_in_bytes, u32 *buf)
+{
+ u8 *byte_buf = (u8 *)buf;
+ u8 modules_num, i;
+ u32 signature;
+
+ /* Read meta data from NVRAM */
+ enum dbg_status status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ nvram_offset_in_bytes,
+ size_in_bytes,
+ buf);
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Extract and check first signature */
+ signature = qed_read_unaligned_dword(byte_buf);
+ byte_buf += sizeof(u32);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Extract number of modules */
+ modules_num = *(byte_buf++);
+
+ /* Skip all modules */
+ for (i = 0; i < modules_num; i++) {
+ u8 module_len = *(byte_buf++);
+
+ byte_buf += module_len;
+ }
+
+ /* Extract and check second signature */
+ signature = qed_read_unaligned_dword(byte_buf);
+ byte_buf += sizeof(u32);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+ return DBG_STATUS_OK;
+}
+
+/* Dump MCP Trace */
+enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
+ u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
+ u32 trace_meta_offset_bytes, trace_meta_size_bytes;
+ enum dbg_status status;
+ int halted = 0;
+
+ *num_dumped_dwords = 0;
+
+ /* Get trace data info */
+ status = qed_mcp_trace_get_data_info(p_hwfn,
+ p_ptt,
+ &trace_data_grc_addr,
+ &trace_data_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "mcp-trace");
+
+ /* Halt MCP while reading from scratchpad so the read data will be
+ * consistent if halt fails, MCP trace is taken anyway, with a small
+ * risk that it may be corrupt.
+ */
+ if (dump) {
+ halted = !qed_mcp_halt(p_hwfn, p_ptt);
+ if (!halted)
+ DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+ }
+
+ /* Find trace data size */
+ trace_data_size_dwords =
+ DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
+ BYTES_IN_DWORD);
+
+ /* Dump trace data section header and param */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_trace_data", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", trace_data_size_dwords);
+
+ /* Read trace data from scratchpad into dump buffer */
+ if (dump)
+ qed_mcp_trace_read_data(p_hwfn,
+ p_ptt,
+ trace_data_grc_addr,
+ trace_data_size_dwords,
+ dump_buf + offset);
+ offset += trace_data_size_dwords;
+
+ /* Resume MCP (only if halt succeeded) */
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
+ DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+
+ /* Dump trace meta section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_trace_meta", 1);
+
+ /* Read trace meta info */
+ status = qed_mcp_trace_get_meta_info(p_hwfn,
+ p_ptt,
+ trace_data_size_bytes,
+ &running_bundle_id,
+ &trace_meta_offset_bytes,
+ &trace_meta_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Dump trace meta size param (trace_meta_size_bytes is always
+ * dword-aligned).
+ */
+ trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+ trace_meta_size_dwords);
+
+ /* Read trace meta image into dump buffer */
+ if (dump) {
+ status = qed_mcp_trace_read_meta(p_hwfn,
+ p_ptt,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes,
+ dump_buf + offset);
+ if (status != DBG_STATUS_OK)
+ return status;
+ }
+
+ offset += trace_meta_size_dwords;
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Dump GRC FIFO */
+enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, dwords_read, size_param_offset;
+ bool fifo_has_data;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "reg-fifo");
+
+ /* Dump fifo data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the FIFO.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "reg_fifo_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
+ * test how much data is available, except for reading it.
+ */
+ offset += REG_FIFO_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+
+ /* Pull available data from fifo. Use DMAE since this is widebus memory
+ * and must be accessed atomically. Test for dwords_read not passing
+ * buffer size since more entries could be added to the buffer as we are
+ * emptying it.
+ */
+ for (dwords_read = 0;
+ fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
+ dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
+ REG_FIFO_ELEMENT_DWORDS) {
+ if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
+ (u64)(uintptr_t)(&dump_buf[offset]),
+ REG_FIFO_ELEMENT_DWORDS, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+ }
+
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ dwords_read);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Dump IGU FIFO */
+enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, dwords_read, size_param_offset;
+ bool fifo_has_data;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "igu-fifo");
+
+ /* Dump fifo data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the FIFO.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "igu_fifo_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
+ * test how much data is available, except for reading it.
+ */
+ offset += IGU_FIFO_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+
+ /* Pull available data from fifo. Use DMAE since this is widebus memory
+ * and must be accessed atomically. Test for dwords_read not passing
+ * buffer size since more entries could be added to the buffer as we are
+ * emptying it.
+ */
+ for (dwords_read = 0;
+ fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
+ dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
+ IGU_FIFO_ELEMENT_DWORDS) {
+ if (qed_dmae_grc2host(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_MEMORY,
+ (u64)(uintptr_t)(&dump_buf[offset]),
+ IGU_FIFO_ELEMENT_DWORDS, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+ }
+
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ dwords_read);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Protection Override dump */
+enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, size_param_offset, override_window_dwords;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "protection-override");
+
+ /* Dump data section header and param. The size param is 0 for now, and
+ * is overwritten after reading the data.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "protection_override_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ /* Add override window info to buffer */
+ override_window_dwords =
+ qed_rd(p_hwfn, p_ptt,
+ GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ if (qed_dmae_grc2host(p_hwfn, p_ptt,
+ GRC_REG_PROTECTION_OVERRIDE_WINDOW,
+ (u64)(uintptr_t)(dump_buf + offset),
+ override_window_dwords, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ offset += override_window_dwords;
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ override_window_dwords);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Performs FW Asserts Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char storm_letter_str[2] = "?";
+ struct fw_info fw_info;
+ u32 offset = 0, i;
+ u8 storm_id;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "fw-asserts");
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
+ last_list_idx, element_addr;
+
+ if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
+ continue;
+
+ /* Read FW info for the current Storm */
+ qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+
+ /* Dump FW Asserts section header and params */
+ storm_letter_str[0] = s_storm_defs[storm_id].letter;
+ offset += qed_dump_section_hdr(dump_buf + offset, dump,
+ "fw_asserts", 2);
+ offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
+ storm_letter_str);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+ fw_info.fw_asserts_section.
+ list_element_dword_size);
+
+ if (!dump) {
+ offset += fw_info.fw_asserts_section.
+ list_element_dword_size;
+ continue;
+ }
+
+ /* Read and dump FW Asserts data */
+ fw_asserts_section_addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_INT_RAM +
+ RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
+ section_ram_line_offset);
+ next_list_idx_addr =
+ fw_asserts_section_addr +
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_next_index_dword_offset);
+ next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
+ last_list_idx = (next_list_idx > 0
+ ? next_list_idx
+ : fw_info.fw_asserts_section.list_num_elements)
+ - 1;
+ element_addr =
+ fw_asserts_section_addr +
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_dword_offset) +
+ last_list_idx *
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_element_dword_size);
+ for (i = 0;
+ i < fw_info.fw_asserts_section.list_element_dword_size;
+ i++, offset++, element_addr += BYTES_IN_DWORD)
+ dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
+ }
+
+ /* Dump last section */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+ return offset;
+}
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
+{
+ /* Convert binary data to debug arrays */
+ u32 num_of_buffers = *(u32 *)bin_ptr;
+ struct bin_buffer_hdr *buf_array;
+ u8 buf_id;
+
+ buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
+
+ for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ s_dbg_arrays[buf_id].ptr =
+ (u32 *)(bin_ptr + buf_array[buf_id].offset);
+ s_dbg_arrays[buf_id].size_in_dwords =
+ BYTES_TO_DWORDS(buf_array[buf_id].length);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+ return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* GRC Dump */
+ status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Clear all GRC params */
+ qed_dbg_grc_clear_params(p_hwfn);
+ return status;
+}
+
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+ if (!dev_data->idle_chk.buf_size_set) {
+ dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn,
+ p_ptt,
+ NULL, false);
+ dev_data->idle_chk.buf_size_set = true;
+ }
+
+ *buf_size = dev_data->idle_chk.buf_size;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Idle Check Dump */
+ *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Perform dump */
+ return qed_mcp_trace_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_reg_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_igu_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_protection_override_dump(p_hwfn,
+ p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_protection_override_dump(p_hwfn,
+ p_ptt,
+ dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
+ return DBG_STATUS_OK;
+}
+
+/******************************* Data Types **********************************/
+
+struct mcp_trace_format {
+ u32 data;
+#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
+#define MCP_TRACE_FORMAT_MODULE_SHIFT 0
+#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
+#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
+#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
+#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
+#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
+#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
+#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
+#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
+#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
+#define MCP_TRACE_FORMAT_LEN_SHIFT 24
+ char *format_str;
+};
+
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+};
+
+/* Reg fifo element */
+struct reg_fifo_element {
+ u64 data;
+#define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
+#define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
+#define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
+#define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
+#define REG_FIFO_ELEMENT_PF_SHIFT 24
+#define REG_FIFO_ELEMENT_PF_MASK 0xf
+#define REG_FIFO_ELEMENT_VF_SHIFT 28
+#define REG_FIFO_ELEMENT_VF_MASK 0xff
+#define REG_FIFO_ELEMENT_PORT_SHIFT 36
+#define REG_FIFO_ELEMENT_PORT_MASK 0x3
+#define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
+#define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
+#define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
+#define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
+#define REG_FIFO_ELEMENT_MASTER_SHIFT 43
+#define REG_FIFO_ELEMENT_MASTER_MASK 0xf
+#define REG_FIFO_ELEMENT_ERROR_SHIFT 47
+#define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
+};
+
+/* IGU fifo element */
+struct igu_fifo_element {
+ u32 dword0;
+#define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
+#define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
+ u32 dword1;
+ u32 dword2;
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
+ u32 reserved;
+};
+
+struct igu_fifo_wr_data {
+ u32 data;
+#define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
+#define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
+#define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
+#define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
+#define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
+#define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
+#define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
+#define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
+};
+
+struct igu_fifo_cleanup_wr_data {
+ u32 data;
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
+};
+
+/* Protection override element */
+struct protection_override_element {
+ u64 data;
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
+#define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
+#define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
+};
+
+enum igu_fifo_sources {
+ IGU_SRC_PXP0,
+ IGU_SRC_PXP1,
+ IGU_SRC_PXP2,
+ IGU_SRC_PXP3,
+ IGU_SRC_PXP4,
+ IGU_SRC_PXP5,
+ IGU_SRC_PXP6,
+ IGU_SRC_PXP7,
+ IGU_SRC_CAU,
+ IGU_SRC_ATTN,
+ IGU_SRC_GRC
+};
+
+enum igu_fifo_addr_types {
+ IGU_ADDR_TYPE_MSIX_MEM,
+ IGU_ADDR_TYPE_WRITE_PBA,
+ IGU_ADDR_TYPE_WRITE_INT_ACK,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS,
+ IGU_ADDR_TYPE_READ_INT,
+ IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
+ IGU_ADDR_TYPE_RESERVED
+};
+
+struct igu_fifo_addr_data {
+ u16 start_addr;
+ u16 end_addr;
+ char *desc;
+ char *vf_desc;
+ enum igu_fifo_addr_types type;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_MSG_LEN 1024
+#define MCP_TRACE_MAX_MODULE_LEN 8
+#define MCP_TRACE_FORMAT_MAX_PARAMS 3
+#define MCP_TRACE_FORMAT_PARAM_WIDTH \
+ (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
+#define REG_FIFO_ELEMENT_ADDR_FACTOR 4
+#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
+#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
+
+/********************************* Macros ************************************/
+
+#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+
+/***************************** Constant Arrays *******************************/
+
+/* Status string array */
+static const char * const s_status_str[] = {
+ "Operation completed successfully",
+ "Debug application version wasn't set",
+ "Unsupported debug application version",
+ "The debug block wasn't reset since the last recording",
+ "Invalid arguments",
+ "The debug output was already set",
+ "Invalid PCI buffer size",
+ "PCI buffer allocation failed",
+ "A PCI buffer wasn't allocated",
+ "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
+ "GRC/Timestamp input overlap in cycle dword 0",
+ "Cannot record Storm data since the entire recording cycle is used by HW",
+ "The Storm was already enabled",
+ "The specified Storm wasn't enabled",
+ "The block was already enabled",
+ "The specified block wasn't enabled",
+ "No input was enabled for recording",
+ "Filters and triggers are not allowed when recording in 64b units",
+ "The filter was already enabled",
+ "The trigger was already enabled",
+ "The trigger wasn't enabled",
+ "A constraint can be added only after a filter was enabled or a trigger state was added",
+ "Cannot add more than 3 trigger states",
+ "Cannot add more than 4 constraints per filter or trigger state",
+ "The recording wasn't started",
+ "A trigger was configured, but it didn't trigger",
+ "No data was recorded",
+ "Dump buffer is too small",
+ "Dumped data is not aligned to chunks",
+ "Unknown chip",
+ "Failed allocating virtual memory",
+ "The input block is in reset",
+ "Invalid MCP trace signature found in NVRAM",
+ "Invalid bundle ID found in NVRAM",
+ "Failed getting NVRAM image",
+ "NVRAM image is not dword-aligned",
+ "Failed reading from NVRAM",
+ "Idle check parsing failed",
+ "MCP Trace data is corrupt",
+ "Dump doesn't contain meta data - it must be provided in an image file",
+ "Failed to halt MCP",
+ "Failed to resume MCP after halt",
+ "DMAE transaction failed",
+ "Failed to empty SEMI sync FIFO",
+ "IGU FIFO data is corrupt",
+ "MCP failed to mask parities",
+ "FW Asserts parsing failed",
+ "GRC FIFO data is corrupt",
+ "Protection Override data is corrupt",
+ "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
+ "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
+};
+
+/* Idle check severity names array */
+static const char * const s_idle_chk_severity_str[] = {
+ "Error",
+ "Error if no traffic",
+ "Warning"
+};
+
+/* MCP Trace level names array */
+static const char * const s_mcp_trace_level_str[] = {
+ "ERROR",
+ "TRACE",
+ "DEBUG"
+};
+
+/* Parsing strings */
+static const char * const s_access_strs[] = {
+ "read",
+ "write"
+};
+
+static const char * const s_privilege_strs[] = {
+ "VF",
+ "PDA",
+ "HV",
+ "UA"
+};
+
+static const char * const s_protection_strs[] = {
+ "(default)",
+ "(default)",
+ "(default)",
+ "(default)",
+ "override VF",
+ "override PDA",
+ "override HV",
+ "override UA"
+};
+
+static const char * const s_master_strs[] = {
+ "???",
+ "pxp",
+ "mcp",
+ "msdm",
+ "psdm",
+ "ysdm",
+ "usdm",
+ "tsdm",
+ "xsdm",
+ "dbu",
+ "dmae",
+ "???",
+ "???",
+ "???",
+ "???",
+ "???"
+};
+
+static const char * const s_reg_fifo_error_strs[] = {
+ "grc timeout",
+ "address doesn't belong to any block",
+ "reserved address in block or write to read-only address",
+ "privilege/protection mismatch",
+ "path isolation error"
+};
+
+static const char * const s_igu_fifo_source_strs[] = {
+ "TSTORM",
+ "MSTORM",
+ "USTORM",
+ "XSTORM",
+ "YSTORM",
+ "PSTORM",
+ "PCIE",
+ "NIG_QM_PBF",
+ "CAU",
+ "ATTN",
+ "GRC",
+};
+
+static const char * const s_igu_fifo_error_strs[] = {
+ "no error",
+ "length error",
+ "function disabled",
+ "VF sent command to attnetion address",
+ "host sent prod update command",
+ "read of during interrupt register while in MIMD mode",
+ "access to PXP BAR reserved address",
+ "producer update command to attention index",
+ "unknown error",
+ "SB index not valid",
+ "SB relative index and FID not found",
+ "FID not match",
+ "command with error flag asserted (PCI error or CAU discard)",
+ "VF sent cleanup and RF cleanup is disabled",
+ "cleanup command on type bigger than 4"
+};
+
+/* IGU FIFO address data */
+static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
+ {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM},
+ {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA},
+ {0x201, 0x201, "Write PBA[64:127]", "reserved",
+ IGU_ADDR_TYPE_WRITE_PBA},
+ {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA},
+ {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
+ IGU_ADDR_TYPE_WRITE_INT_ACK},
+ {0x5f0, 0x5f0, "Attention bits update", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f1, 0x5f1, "Attention bits set", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f2, 0x5f2, "Attention bits clear", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
+};
+
+/******************************** Variables **********************************/
+
+/* MCP Trace meta data - used in case the dump doesn't contain the meta data
+ * (e.g. due to no NVRAM access).
+ */
+static struct dbg_array s_mcp_trace_meta = { NULL, 0 };
+
+/* Temporary buffer, used for print size calculations */
+static char s_temp_buf[MAX_MSG_LEN];
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
+{
+ /* Convert binary data to debug arrays */
+ u32 num_of_buffers = *(u32 *)bin_ptr;
+ struct bin_buffer_hdr *buf_array;
+ u8 buf_id;
+
+ buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
+
+ for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ s_dbg_arrays[buf_id].ptr =
+ (u32 *)(bin_ptr + buf_array[buf_id].offset);
+ s_dbg_arrays[buf_id].size_in_dwords =
+ BYTES_TO_DWORDS(buf_array[buf_id].length);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
+{
+ return (a + b) % size;
+}
+
+static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
+{
+ return (size + a - b) % size;
+}
+
+/* Reads the specified number of bytes from the specified cyclic buffer (up to 4
+ * bytes) and returns them as a dword value. the specified buffer offset is
+ * updated.
+ */
+static u32 qed_read_from_cyclic_buf(void *buf,
+ u32 *offset,
+ u32 buf_size, u8 num_bytes_to_read)
+{
+ u8 *bytes_buf = (u8 *)buf;
+ u8 *val_ptr;
+ u32 val = 0;
+ u8 i;
+
+ val_ptr = (u8 *)&val;
+
+ for (i = 0; i < num_bytes_to_read; i++) {
+ val_ptr[i] = bytes_buf[*offset];
+ *offset = qed_cyclic_add(*offset, 1, buf_size);
+ }
+
+ return val;
+}
+
+/* Reads and returns the next byte from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
+{
+ return ((u8 *)buf)[(*offset)++];
+}
+
+/* Reads and returns the next dword from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
+{
+ u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
+
+ *offset += 4;
+ return dword_val;
+}
+
+/* Reads the next string from the specified buffer, and copies it to the
+ * specified pointer. The specified buffer offset is updated.
+ */
+static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
+{
+ const char *source_str = &((const char *)buf)[*offset];
+
+ strncpy(dest, source_str, size);
+ dest[size - 1] = '\0';
+ *offset += size;
+}
+
+/* Returns a pointer to the specified offset (in bytes) of the specified buffer.
+ * If the specified buffer in NULL, a temporary buffer pointer is returned.
+ */
+static char *qed_get_buf_ptr(void *buf, u32 offset)
+{
+ return buf ? (char *)buf + offset : s_temp_buf;
+}
+
+/* Reads a param from the specified buffer. Returns the number of dwords read.
+ * If the returned str_param is NULL, the param is numeric and its value is
+ * returned in num_param.
+ * Otheriwise, the param is a string and its pointer is returned in str_param.
+ */
+static u32 qed_read_param(u32 *dump_buf,
+ const char **param_name,
+ const char **param_str_val, u32 *param_num_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0; /* In bytes */
+
+ /* Extract param name */
+ *param_name = char_buf;
+ offset += strlen(*param_name) + 1;
+
+ /* Check param type */
+ if (*(char_buf + offset++)) {
+ /* String param */
+ *param_str_val = char_buf + offset;
+ offset += strlen(*param_str_val) + 1;
+ if (offset & 0x3)
+ offset += (4 - (offset & 0x3));
+ } else {
+ /* Numeric param */
+ *param_str_val = NULL;
+ if (offset & 0x3)
+ offset += (4 - (offset & 0x3));
+ *param_num_val = *(u32 *)(char_buf + offset);
+ offset += 4;
+ }
+
+ return offset / 4;
+}
+
+/* Reads a section header from the specified buffer.
+ * Returns the number of dwords read.
+ */
+static u32 qed_read_section_hdr(u32 *dump_buf,
+ const char **section_name,
+ u32 *num_section_params)
+{
+ const char *param_str_val;
+
+ return qed_read_param(dump_buf,
+ section_name, &param_str_val, num_section_params);
+}
+
+/* Reads section params from the specified buffer and prints them to the results
+ * buffer. Returns the number of dwords read.
+ */
+static u32 qed_print_section_params(u32 *dump_buf,
+ u32 num_section_params,
+ char *results_buf, u32 *num_chars_printed)
+{
+ u32 i, dump_offset = 0, results_offset = 0;
+
+ for (i = 0; i < num_section_params; i++) {
+ const char *param_name;
+ const char *param_str_val;
+ u32 param_num_val = 0;
+
+ dump_offset += qed_read_param(dump_buf + dump_offset,
+ &param_name,
+ &param_str_val, &param_num_val);
+ if (param_str_val)
+ /* String param */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%s: %s\n", param_name, param_str_val);
+ else if (strcmp(param_name, "fw-timestamp"))
+ /* Numeric param */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%s: %d\n", param_name, param_num_val);
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ *num_chars_printed = results_offset;
+ return dump_offset;
+}
+
+const char *qed_dbg_get_status_str(enum dbg_status status)
+{
+ return (status <
+ MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+}
+
+/* Parses the idle check rules and returns the number of characters printed.
+ * In case of parsing error, returns 0.
+ */
+static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 *dump_buf_end,
+ u32 num_rules,
+ bool print_fw_idle_chk,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */
+ u16 i, j;
+
+ *num_errors = 0;
+ *num_warnings = 0;
+
+ /* Go over dumped results */
+ for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
+ rule_idx++) {
+ const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
+ struct dbg_idle_chk_result_hdr *hdr;
+ const char *parsing_str;
+ u32 parsing_str_offset;
+ const char *lsi_msg;
+ u8 curr_reg_id = 0;
+ bool has_fw_msg;
+
+ hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
+ rule_parsing_data =
+ (const struct dbg_idle_chk_rule_parsing_data *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
+ ptr[hdr->rule_id];
+ parsing_str_offset =
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
+ has_fw_msg =
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
+ parsing_str = &((const char *)
+ s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ [parsing_str_offset];
+ lsi_msg = parsing_str;
+
+ if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
+ return 0;
+
+ /* Skip rule header */
+ dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4);
+
+ /* Update errors/warnings count */
+ if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
+ hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
+ (*num_errors)++;
+ else
+ (*num_warnings)++;
+
+ /* Print rule severity */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s: ",
+ s_idle_chk_severity_str[hdr->severity]);
+
+ /* Print rule message */
+ if (has_fw_msg)
+ parsing_str += strlen(parsing_str) + 1;
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s.",
+ has_fw_msg &&
+ print_fw_idle_chk ? parsing_str : lsi_msg);
+ parsing_str += strlen(parsing_str) + 1;
+
+ /* Print register values */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), " Registers:");
+ for (i = 0;
+ i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
+ i++) {
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr
+ = (struct dbg_idle_chk_result_reg_hdr *)
+ dump_buf;
+ bool is_mem =
+ GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
+ u8 reg_id =
+ GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
+
+ /* Skip reg header */
+ dump_buf +=
+ (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4);
+
+ /* Skip register names until the required reg_id is
+ * reached.
+ */
+ for (; reg_id > curr_reg_id;
+ curr_reg_id++,
+ parsing_str += strlen(parsing_str) + 1);
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), " %s",
+ parsing_str);
+ if (i < hdr->num_dumped_cond_regs && is_mem)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "[%d]", hdr->mem_entry_id +
+ reg_hdr->start_entry);
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "=");
+ for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "0x%x", *dump_buf);
+ if (j < reg_hdr->size - 1)
+ results_offset +=
+ sprintf(qed_get_buf_ptr
+ (results_buf,
+ results_offset), ",");
+ }
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ }
+
+ /* Check if end of dump buffer was exceeded */
+ if (dump_buf > dump_buf_end)
+ return 0;
+ return results_offset;
+}
+
+/* Parses an idle check dump buffer.
+ * If result_buf is not NULL, the idle check results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes,
+ u32 *num_errors,
+ u32 *num_warnings)
+{
+ const char *section_name, *param_name, *param_str_val;
+ u32 *dump_buf_end = dump_buf + num_dumped_dwords;
+ u32 num_section_params = 0, num_rules;
+ u32 results_offset = 0; /* Offset in results_buf in bytes */
+
+ *parsed_results_bytes = 0;
+ *num_errors = 0;
+ *num_warnings = 0;
+ if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read idle_chk section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "idle_chk") || num_section_params != 1)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &num_rules);
+ if (strcmp(param_name, "num_rules") != 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ if (num_rules) {
+ u32 rules_print_size;
+
+ /* Print FW output */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "FW_IDLE_CHECK:\n");
+ rules_print_size =
+ qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
+ dump_buf_end, num_rules,
+ true,
+ results_buf ?
+ results_buf +
+ results_offset : NULL,
+ num_errors, num_warnings);
+ results_offset += rules_print_size;
+ if (rules_print_size == 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ /* Print LSI output */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nLSI_IDLE_CHECK:\n");
+ rules_print_size =
+ qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
+ dump_buf_end, num_rules,
+ false,
+ results_buf ?
+ results_buf +
+ results_offset : NULL,
+ num_errors, num_warnings);
+ results_offset += rules_print_size;
+ if (rules_print_size == 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ }
+
+ /* Print errors/warnings count */
+ if (*num_errors) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
+ *num_errors, *num_warnings);
+ } else if (*num_warnings) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check completed successfuly (with %d warnings)\n",
+ *num_warnings);
+ } else {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check completed successfuly\n");
+ }
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ u32 num_errors, num_warnings;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL,
+ results_buf_size,
+ &num_errors, &num_warnings);
+}
+
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size,
+ num_errors, num_warnings);
+}
+
+/* Frees the specified MCP Trace meta data */
+static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
+ struct mcp_trace_meta *meta)
+{
+ u32 i;
+
+ /* Release modules */
+ if (meta->modules) {
+ for (i = 0; i < meta->modules_num; i++)
+ kfree(meta->modules[i]);
+ kfree(meta->modules);
+ }
+
+ /* Release formats */
+ if (meta->formats) {
+ for (i = 0; i < meta->formats_num; i++)
+ kfree(meta->formats[i].format_str);
+ kfree(meta->formats);
+ }
+}
+
+/* Allocates and fills MCP Trace meta data based on the specified meta data
+ * dump buffer.
+ * Returns debug status code.
+ */
+static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf,
+ struct mcp_trace_meta *meta)
+{
+ u8 *meta_buf_bytes = (u8 *)meta_buf;
+ u32 offset = 0, signature, i;
+
+ memset(meta, 0, sizeof(*meta));
+
+ /* Read first signature */
+ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read number of modules and allocate memory for all the modules
+ * pointers.
+ */
+ meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+ meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
+ if (!meta->modules)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Allocate and read all module strings */
+ for (i = 0; i < meta->modules_num; i++) {
+ u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+
+ *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
+ if (!(*(meta->modules + i))) {
+ /* Update number of modules to be released */
+ meta->modules_num = i ? i - 1 : 0;
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
+
+ qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
+ *(meta->modules + i));
+ if (module_len > MCP_TRACE_MAX_MODULE_LEN)
+ (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
+ }
+
+ /* Read second signature */
+ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read number of formats and allocate memory for all formats */
+ meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ meta->formats = kzalloc(meta->formats_num *
+ sizeof(struct mcp_trace_format),
+ GFP_KERNEL);
+ if (!meta->formats)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Allocate and read all strings */
+ for (i = 0; i < meta->formats_num; i++) {
+ struct mcp_trace_format *format_ptr = &meta->formats[i];
+ u8 format_len;
+
+ format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
+ &offset);
+ format_len =
+ (format_ptr->data &
+ MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
+ format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
+ if (!format_ptr->format_str) {
+ /* Update number of modules to be released */
+ meta->formats_num = i ? i - 1 : 0;
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
+
+ qed_read_str_from_buf(meta_buf_bytes,
+ &offset,
+ format_len, format_ptr->format_str);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+/* Parses an MCP Trace dump buffer.
+ * If result_buf is not NULL, the MCP Trace results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_mask, param_shift, param_num_val;
+ u32 num_section_params, offset, end_offset, bytes_left;
+ const char *section_name, *param_name, *param_str_val;
+ u32 trace_data_dwords, trace_meta_dwords;
+ struct mcp_trace_meta meta;
+ struct mcp_trace *trace;
+ enum dbg_status status;
+ const u32 *meta_buf;
+ u8 *trace_buf;
+
+ *parsed_results_bytes = 0;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read trace_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ trace_data_dwords = param_num_val;
+
+ /* Prepare trace info */
+ trace = (struct mcp_trace *)dump_buf;
+ trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace);
+ offset = trace->trace_oldest;
+ end_offset = trace->trace_prod;
+ bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
+ dump_buf += trace_data_dwords;
+
+ /* Read meta_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "mcp_trace_meta"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size") != 0)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ trace_meta_dwords = param_num_val;
+
+ /* Choose meta data buffer */
+ if (!trace_meta_dwords) {
+ /* Dump doesn't include meta data */
+ if (!s_mcp_trace_meta.ptr)
+ return DBG_STATUS_MCP_TRACE_NO_META;
+ meta_buf = s_mcp_trace_meta.ptr;
+ } else {
+ /* Dump includes meta data */
+ meta_buf = dump_buf;
+ }
+
+ /* Allocate meta data memory */
+ status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
+ if (status != DBG_STATUS_OK)
+ goto free_mem;
+
+ /* Ignore the level and modules masks - just print everything that is
+ * already in the buffer.
+ */
+ while (bytes_left) {
+ struct mcp_trace_format *format_ptr;
+ u8 format_level, format_module;
+ u32 params[3] = { 0, 0, 0 };
+ u32 header, format_idx, i;
+
+ if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ header = qed_read_from_cyclic_buf(trace_buf,
+ &offset,
+ trace->size,
+ MFW_TRACE_ENTRY_SIZE);
+ bytes_left -= MFW_TRACE_ENTRY_SIZE;
+ format_idx = header & MFW_TRACE_EVENTID_MASK;
+
+ /* Skip message if its index doesn't exist in the meta data */
+ if (format_idx > meta.formats_num) {
+ u8 format_size =
+ (u8)((header &
+ MFW_TRACE_PRM_SIZE_MASK) >>
+ MFW_TRACE_PRM_SIZE_SHIFT);
+
+ if (bytes_left < format_size) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ offset = qed_cyclic_add(offset,
+ format_size, trace->size);
+ bytes_left -= format_size;
+ continue;
+ }
+
+ format_ptr = &meta.formats[format_idx];
+ for (i = 0,
+ param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
+ MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
+ i < MCP_TRACE_FORMAT_MAX_PARAMS;
+ i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
+ param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
+ /* Extract param size (0..3) */
+ u8 param_size =
+ (u8)((format_ptr->data &
+ param_mask) >> param_shift);
+
+ /* If the param size is zero, there are no other
+ * parameters.
+ */
+ if (!param_size)
+ break;
+
+ /* Size is encoded using 2 bits, where 3 is used to
+ * encode 4.
+ */
+ if (param_size == 3)
+ param_size = 4;
+ if (bytes_left < param_size) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ params[i] = qed_read_from_cyclic_buf(trace_buf,
+ &offset,
+ trace->size,
+ param_size);
+ bytes_left -= param_size;
+ }
+
+ format_level =
+ (u8)((format_ptr->data &
+ MCP_TRACE_FORMAT_LEVEL_MASK) >>
+ MCP_TRACE_FORMAT_LEVEL_SHIFT);
+ format_module =
+ (u8)((format_ptr->data &
+ MCP_TRACE_FORMAT_MODULE_MASK) >>
+ MCP_TRACE_FORMAT_MODULE_SHIFT);
+ if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ /* Print current message to results buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s %-8s: ",
+ s_mcp_trace_level_str[format_level],
+ meta.modules[format_module]);
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ format_ptr->format_str, params[0], params[1],
+ params[2]);
+ }
+
+free_mem:
+ *parsed_results_bytes = results_offset + 1;
+ qed_mcp_trace_free_meta(p_hwfn, &meta);
+ return status;
+}
+
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Parses a Reg FIFO dump buffer.
+ * If result_buf is not NULL, the Reg FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct reg_fifo_element *elements;
+ u8 i, j, err_val, vf_val;
+ char vf_str[4];
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read reg_fifo_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "reg_fifo_data"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
+ elements = (struct reg_fifo_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ bool err_printed = false;
+
+ /* Discover if element belongs to a VF or a PF */
+ vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
+ if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
+ sprintf(vf_str, "%s", "N/A");
+ else
+ sprintf(vf_str, "%d", vf_val);
+
+ /* Add parsed element to parsed buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+ elements[i].data,
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ADDRESS) *
+ REG_FIFO_ELEMENT_ADDR_FACTOR,
+ s_access_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ACCESS)],
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PF), vf_str,
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PORT),
+ s_privilege_strs[GET_FIELD(elements[i].
+ data,
+ REG_FIFO_ELEMENT_PRIVILEGE)],
+ s_protection_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PROTECTION)],
+ s_master_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_MASTER)]);
+
+ /* Print errors */
+ for (j = 0,
+ err_val = GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ERROR);
+ j < ARRAY_SIZE(s_reg_fifo_error_strs);
+ j++, err_val >>= 1) {
+ if (!(err_val & 0x1))
+ continue;
+ if (err_printed)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ ", ");
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s",
+ s_reg_fifo_error_strs[j]);
+ err_printed = true;
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "fifo contained %d elements", num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Parses an IGU FIFO dump buffer.
+ * If result_buf is not NULL, the IGU FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct igu_fifo_element *elements;
+ char parsed_addr_data[32];
+ char parsed_wr_data[256];
+ u8 i, j;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read igu_fifo_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "igu_fifo_data"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
+ elements = (struct igu_fifo_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ /* dword12 (dword index 1 and 2) contains bits 32..95 of the
+ * FIFO element.
+ */
+ u64 dword12 =
+ ((u64)elements[i].dword2 << 32) | elements[i].dword1;
+ bool is_wr_cmd = GET_FIELD(dword12,
+ IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
+ bool is_pf = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_IS_PF);
+ u16 cmd_addr = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
+ u8 source = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_SOURCE);
+ u8 err_type = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
+ const struct igu_fifo_addr_data *addr_data = NULL;
+
+ if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Find address data */
+ for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data;
+ j++)
+ if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr &&
+ cmd_addr <= s_igu_fifo_addr_data[j].end_addr)
+ addr_data = &s_igu_fifo_addr_data[j];
+ if (!addr_data)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Prepare parsed address data */
+ switch (addr_data->type) {
+ case IGU_ADDR_TYPE_MSIX_MEM:
+ sprintf(parsed_addr_data,
+ " vector_num=0x%x", cmd_addr / 2);
+ break;
+ case IGU_ADDR_TYPE_WRITE_INT_ACK:
+ case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
+ sprintf(parsed_addr_data,
+ " SB=0x%x", cmd_addr - addr_data->start_addr);
+ break;
+ default:
+ parsed_addr_data[0] = '\0';
+ }
+
+ /* Prepare parsed write data */
+ if (is_wr_cmd) {
+ u32 wr_data = GET_FIELD(dword12,
+ IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
+ u32 prod_cons = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_PROD_CONS);
+ u8 is_cleanup = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_CMD_TYPE);
+
+ if (source == IGU_SRC_ATTN) {
+ sprintf(parsed_wr_data,
+ "prod: 0x%x, ", prod_cons);
+ } else {
+ if (is_cleanup) {
+ u8 cleanup_val = GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
+ u8 cleanup_type = GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
+ cleanup_val ? "set" : "clear",
+ cleanup_type);
+ } else {
+ u8 update_flag = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_UPDATE_FLAG);
+ u8 en_dis_int_for_sb =
+ GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
+ u8 segment = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_SEGMENT);
+ u8 timer_mask = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_TIMER_MASK);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
+ prod_cons,
+ update_flag ? "update" : "nop",
+ en_dis_int_for_sb
+ ? (en_dis_int_for_sb ==
+ 1 ? "disable" : "nop") :
+ "enable",
+ segment ? "attn" : "regular",
+ timer_mask);
+ }
+ }
+ } else {
+ parsed_wr_data[0] = '\0';
+ }
+
+ /* Add parsed element to parsed buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
+ elements[i].dword2, elements[i].dword1,
+ elements[i].dword0,
+ is_pf ? "pf" : "vf",
+ GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_FID),
+ s_igu_fifo_source_strs[source],
+ is_wr_cmd ? "wr" : "rd", cmd_addr,
+ (!is_pf && addr_data->vf_desc)
+ ? addr_data->vf_desc : addr_data->desc,
+ parsed_addr_data, parsed_wr_data,
+ s_igu_fifo_error_strs[err_type]);
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "fifo contained %d elements", num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+static enum dbg_status
+qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct protection_override_element *elements;
+ u8 i;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read protection_override_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "protection_override_data"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0)
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ elements = (struct protection_override_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ u32 address = GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
+ PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
+ i, address,
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_READ),
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WRITE),
+ s_protection_strs[GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
+ s_protection_strs[GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "protection override contained %d elements",
+ num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size);
+}
+
+/* Parses a FW Asserts dump buffer.
+ * If result_buf is not NULL, the FW Asserts results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, num_section_params, param_num_val, i;
+ const char *param_name, *param_str_val, *section_name;
+ bool last_section_found = false;
+
+ *parsed_results_bytes = 0;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+ while (!last_section_found) {
+ const char *storm_letter = NULL;
+ u32 storm_dump_size = 0;
+
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name,
+ &num_section_params);
+ if (!strcmp(section_name, "last")) {
+ last_section_found = true;
+ continue;
+ } else if (strcmp(section_name, "fw_asserts")) {
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+
+ /* Extract params */
+ for (i = 0; i < num_section_params; i++) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &param_num_val);
+ if (!strcmp(param_name, "storm"))
+ storm_letter = param_str_val;
+ else if (!strcmp(param_name, "size"))
+ storm_dump_size = param_num_val;
+ else
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+
+ if (!storm_letter || !storm_dump_size)
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print data */
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\n%sSTORM_ASSERT: size=%d\n",
+ storm_letter, storm_dump_size);
+ for (i = 0; i < storm_dump_size; i++, dump_buf++)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%08x\n", *dump_buf);
+ }
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_fw_asserts_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_fw_asserts_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Wrapper for unifying the idle_chk and mcp_trace api */
+enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 num_errors, num_warnnings;
+
+ return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
+ results_buf, &num_errors,
+ &num_warnnings);
+}
+
+/* Feature meta data lookup table */
+static struct {
+ char *name;
+ enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *size);
+ enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ u32 buf_size, u32 *dumped_dwords);
+ enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf, u32 num_dumped_dwords,
+ char *results_buf);
+ enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+} qed_features_lookup[] = {
+ {
+ "grc", qed_dbg_grc_get_dump_buf_size,
+ qed_dbg_grc_dump, NULL, NULL}, {
+ "idle_chk",
+ qed_dbg_idle_chk_get_dump_buf_size,
+ qed_dbg_idle_chk_dump,
+ qed_print_idle_chk_results_wrapper,
+ qed_get_idle_chk_results_buf_size}, {
+ "mcp_trace",
+ qed_dbg_mcp_trace_get_dump_buf_size,
+ qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
+ qed_get_mcp_trace_results_buf_size}, {
+ "reg_fifo",
+ qed_dbg_reg_fifo_get_dump_buf_size,
+ qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
+ qed_get_reg_fifo_results_buf_size}, {
+ "igu_fifo",
+ qed_dbg_igu_fifo_get_dump_buf_size,
+ qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
+ qed_get_igu_fifo_results_buf_size}, {
+ "protection_override",
+ qed_dbg_protection_override_get_dump_buf_size,
+ qed_dbg_protection_override_dump,
+ qed_print_protection_override_results,
+ qed_get_protection_override_results_buf_size}, {
+ "fw_asserts",
+ qed_dbg_fw_asserts_get_dump_buf_size,
+ qed_dbg_fw_asserts_dump,
+ qed_print_fw_asserts_results,
+ qed_get_fw_asserts_results_buf_size},};
+
+static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
+{
+ u32 i, precision = 80;
+
+ if (!p_text_buf)
+ return;
+
+ pr_notice("\n%.*s", precision, p_text_buf);
+ for (i = precision; i < text_size; i += precision)
+ pr_cont("%.*s", precision, p_text_buf + i);
+ pr_cont("\n");
+}
+
+#define QED_RESULTS_BUF_MIN_SIZE 16
+/* Generic function for decoding debug feature info */
+enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
+ enum qed_dbg_features feature_idx)
+{
+ struct qed_dbg_feature *feature =
+ &p_hwfn->cdev->dbg_params.features[feature_idx];
+ u32 text_size_bytes, null_char_pos, i;
+ enum dbg_status rc;
+ char *text_buf;
+
+ /* Check if feature supports formatting capability */
+ if (!qed_features_lookup[feature_idx].results_buf_size)
+ return DBG_STATUS_OK;
+
+ /* Obtain size of formatted output */
+ rc = qed_features_lookup[feature_idx].
+ results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
+ feature->dumped_dwords, &text_size_bytes);
+ if (rc != DBG_STATUS_OK)
+ return rc;
+
+ /* Make sure that the allocated size is a multiple of dword (4 bytes) */
+ null_char_pos = text_size_bytes - 1;
+ text_size_bytes = (text_size_bytes + 3) & ~0x3;
+
+ if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
+ DP_NOTICE(p_hwfn->cdev,
+ "formatted size of feature was too small %d. Aborting\n",
+ text_size_bytes);
+ return DBG_STATUS_INVALID_ARGS;
+ }
+
+ /* Allocate temp text buf */
+ text_buf = vzalloc(text_size_bytes);
+ if (!text_buf)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Decode feature opcodes to string on temp buf */
+ rc = qed_features_lookup[feature_idx].
+ print_results(p_hwfn, (u32 *)feature->dump_buf,
+ feature->dumped_dwords, text_buf);
+ if (rc != DBG_STATUS_OK) {
+ vfree(text_buf);
+ return rc;
+ }
+
+ /* Replace the original null character with a '\n' character.
+ * The bytes that were added as a result of the dword alignment are also
+ * padded with '\n' characters.
+ */
+ for (i = null_char_pos; i < text_size_bytes; i++)
+ text_buf[i] = '\n';
+
+ /* Dump printable feature to log */
+ if (p_hwfn->cdev->dbg_params.print_data)
+ qed_dbg_print_feature(text_buf, text_size_bytes);
+
+ /* Free the old dump_buf and point the dump_buf to the newly allocagted
+ * and formatted text buffer.
+ */
+ vfree(feature->dump_buf);
+ feature->dump_buf = text_buf;
+ feature->buf_size = text_size_bytes;
+ feature->dumped_dwords = text_size_bytes / 4;
+ return rc;
+}
+
+/* Generic function for performing the dump of a debug feature. */
+enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_dbg_features feature_idx)
+{
+ struct qed_dbg_feature *feature =
+ &p_hwfn->cdev->dbg_params.features[feature_idx];
+ u32 buf_size_dwords;
+ enum dbg_status rc;
+
+ DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
+ qed_features_lookup[feature_idx].name);
+
+ /* Dump_buf was already allocated need to free (this can happen if dump
+ * was called but file was never read).
+ * We can't use the buffer as is since size may have changed.
+ */
+ if (feature->dump_buf) {
+ vfree(feature->dump_buf);
+ feature->dump_buf = NULL;
+ }
+
+ /* Get buffer size from hsi, allocate accordingly, and perform the
+ * dump.
+ */
+ rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
+ &buf_size_dwords);
+ if (rc != DBG_STATUS_OK)
+ return rc;
+ feature->buf_size = buf_size_dwords * sizeof(u32);
+ feature->dump_buf = vmalloc(feature->buf_size);
+ if (!feature->dump_buf)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ rc = qed_features_lookup[feature_idx].
+ perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
+ feature->buf_size / sizeof(u32),
+ &feature->dumped_dwords);
+
+ /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
+ * In this case the buffer holds valid binary data, but we wont able
+ * to parse it (since parsing relies on data in NVRAM which is only
+ * accessible when MFW is responsive). skip the formatting but return
+ * success so that binary data is provided.
+ */
+ if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+ return DBG_STATUS_OK;
+
+ if (rc != DBG_STATUS_OK)
+ return rc;
+
+ /* Format output */
+ rc = format_feature(p_hwfn, feature_idx);
+ return rc;
+}
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
+}
+
+int qed_dbg_grc_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
+}
+
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
+ num_dumped_bytes);
+}
+
+int qed_dbg_idle_chk_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
+}
+
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
+ num_dumped_bytes);
+}
+
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
+}
+
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
+ num_dumped_bytes);
+}
+
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
+}
+
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
+ num_dumped_bytes);
+}
+
+int qed_dbg_protection_override_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
+}
+
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
+ num_dumped_bytes);
+}
+
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
+}
+
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
+ num_dumped_bytes);
+}
+
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
+}
+
+/* Defines the amount of bytes allocated for recording the length of debugfs
+ * feature buffer.
+ */
+#define REGDUMP_HEADER_SIZE sizeof(u32)
+#define REGDUMP_HEADER_FEATURE_SHIFT 24
+#define REGDUMP_HEADER_ENGINE_SHIFT 31
+#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
+enum debug_print_features {
+ OLD_MODE = 0,
+ IDLE_CHK = 1,
+ GRC_DUMP = 2,
+ MCP_TRACE = 3,
+ REG_FIFO = 4,
+ PROTECTION_OVERRIDE = 5,
+ IGU_FIFO = 6,
+ PHY = 7,
+ FW_ASSERTS = 8,
+};
+
+static u32 qed_calc_regdump_header(enum debug_print_features feature,
+ int engine, u32 feature_size, u8 omit_engine)
+{
+ /* Insert the engine, feature and mode inside the header and combine it
+ * with feature size.
+ */
+ return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
+ (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
+ (engine << REGDUMP_HEADER_ENGINE_SHIFT);
+}
+
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
+{
+ u8 cur_engine, omit_engine = 0, org_engine;
+ u32 offset = 0, feature_size;
+ int rc;
+
+ if (cdev->num_hwfns == 1)
+ omit_engine = 1;
+
+ org_engine = qed_get_debug_engine(cdev);
+ for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+ /* Collect idle_chks and grcDump for each hw function */
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "obtaining idle_chk and grcdump for current engine\n");
+ qed_set_debug_engine(cdev, cur_engine);
+
+ /* First idle_chk */
+ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+ }
+
+ /* Second idle_chk */
+ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+ }
+
+ /* reg_fifo dump */
+ rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(REG_FIFO, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
+ }
+
+ /* igu_fifo dump */
+ rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IGU_FIFO, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
+ }
+
+ /* protection_override dump */
+ rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE,
+ &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(PROTECTION_OVERRIDE,
+ cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev,
+ "qed_dbg_protection_override failed. rc = %d\n",
+ rc);
+ }
+
+ /* fw_asserts dump */
+ rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(FW_ASSERTS, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
+ rc);
+ }
+
+ /* GRC dump - must be last because when mcp stuck it will
+ * clutter idle_chk, reg_fifo, ...
+ */
+ rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(GRC_DUMP, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
+ }
+ }
+
+ /* mcp_trace */
+ rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(MCP_TRACE, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
+ }
+
+ qed_set_debug_engine(cdev, org_engine);
+
+ return 0;
+}
+
+int qed_dbg_all_data_size(struct qed_dev *cdev)
+{
+ u8 cur_engine, org_engine;
+ u32 regs_len = 0;
+
+ org_engine = qed_get_debug_engine(cdev);
+ for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+ /* Engine specific */
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "calculating idle_chk and grcdump register length for current engine\n");
+ qed_set_debug_engine(cdev, cur_engine);
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE +
+ qed_dbg_protection_override_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
+ }
+
+ /* Engine common */
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
+ qed_set_debug_engine(cdev, org_engine);
+
+ return regs_len;
+}
+
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+ enum qed_dbg_features feature, u32 *num_dumped_bytes)
+{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct qed_dbg_feature *qed_feature =
+ &cdev->dbg_params.features[feature];
+ enum dbg_status dbg_rc;
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ /* Acquire ptt */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EINVAL;
+
+ /* Get dump */
+ dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
+ if (dbg_rc != DBG_STATUS_OK) {
+ DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
+ qed_dbg_get_status_str(dbg_rc));
+ *num_dumped_bytes = 0;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "copying debugfs feature to external buffer\n");
+ memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
+ *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
+ 4;
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
+{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ struct qed_dbg_feature *qed_feature =
+ &cdev->dbg_params.features[feature];
+ u32 buf_size_dwords;
+ enum dbg_status rc;
+
+ if (!p_ptt)
+ return -EINVAL;
+
+ rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
+ &buf_size_dwords);
+ if (rc != DBG_STATUS_OK)
+ buf_size_dwords = 0;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ qed_feature->buf_size = buf_size_dwords * sizeof(u32);
+ return qed_feature->buf_size;
+}
+
+u8 qed_get_debug_engine(struct qed_dev *cdev)
+{
+ return cdev->dbg_params.engine_for_debug;
+}
+
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
+{
+ DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
+ engine_number);
+ cdev->dbg_params.engine_for_debug = engine_number;
+}
+
+void qed_dbg_pf_init(struct qed_dev *cdev)
+{
+ const u8 *dbg_values;
+
+ /* Debug values are after init values.
+ * The offset is the first dword of the file.
+ */
+ dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
+ qed_dbg_set_bin_ptr((u8 *)dbg_values);
+ qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
+}
+
+void qed_dbg_pf_exit(struct qed_dev *cdev)
+{
+ struct qed_dbg_feature *feature = NULL;
+ enum qed_dbg_features feature_idx;
+
+ /* Debug features' buffers may be allocated if debug feature was used
+ * but dump wasn't called.
+ */
+ for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
+ feature = &cdev->dbg_params.features[feature_idx];
+ if (feature->dump_buf) {
+ vfree(feature->dump_buf);
+ feature->dump_buf = NULL;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
new file mode 100644
index 000000000000..f872d7324814
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -0,0 +1,54 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEBUGFS_H
+#define _QED_DEBUGFS_H
+
+enum qed_dbg_features {
+ DBG_FEATURE_GRC,
+ DBG_FEATURE_IDLE_CHK,
+ DBG_FEATURE_MCP_TRACE,
+ DBG_FEATURE_REG_FIFO,
+ DBG_FEATURE_IGU_FIFO,
+ DBG_FEATURE_PROTECTION_OVERRIDE,
+ DBG_FEATURE_FW_ASSERTS,
+ DBG_FEATURE_NUM
+};
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
+int qed_dbg_grc_size(struct qed_dev *cdev);
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_idle_chk_size(struct qed_dev *cdev);
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev);
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev);
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_protection_override_size(struct qed_dev *cdev);
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer);
+int qed_dbg_all_data_size(struct qed_dev *cdev);
+u8 qed_get_debug_engine(struct qed_dev *cdev);
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number);
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+ enum qed_dbg_features feature, u32 *num_dumped_bytes);
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature);
+
+void qed_dbg_pf_init(struct qed_dev *cdev);
+void qed_dbg_pf_exit(struct qed_dev *cdev);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 0e4f4a9306b5..754f6a908858 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -29,14 +29,18 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
+#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
+#include "qed_roce.h"
-static spinlock_t qm_lock;
-static bool qm_lock_init = false;
+static DEFINE_SPINLOCK(qm_lock);
+
+#define QED_MIN_DPIS (4)
+#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
/* API common to all protocols */
enum BAR_ID {
@@ -44,8 +48,7 @@ enum BAR_ID {
BAR_ID_1 /* Used for doorbells */
};
-static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
- enum BAR_ID bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@@ -70,8 +73,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
}
}
-void qed_init_dp(struct qed_dev *cdev,
- u32 dp_module, u8 dp_level)
+void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level)
{
u32 i;
@@ -150,6 +152,9 @@ void qed_resc_free(struct qed_dev *cdev)
qed_eq_free(p_hwfn, p_hwfn->p_eq);
qed_consq_free(p_hwfn, p_hwfn->p_consq);
qed_int_free(p_hwfn);
+#ifdef CONFIG_QED_LL2
+ qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
+#endif
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
@@ -343,7 +348,6 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
return 0;
alloc_err:
- DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
qed_qm_info_free(p_hwfn);
return -ENOMEM;
}
@@ -407,6 +411,9 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
int qed_resc_alloc(struct qed_dev *cdev)
{
+#ifdef CONFIG_QED_LL2
+ struct qed_ll2_info *p_ll2_info;
+#endif
struct qed_consq *p_consq;
struct qed_eq *p_eq;
int i, rc = 0;
@@ -427,18 +434,12 @@ int qed_resc_alloc(struct qed_dev *cdev)
RESC_NUM(p_hwfn, QED_L2_QUEUE);
p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
- if (!p_hwfn->p_tx_cids) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for Tx Cids\n");
+ if (!p_hwfn->p_tx_cids)
goto alloc_no_mem;
- }
p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
- if (!p_hwfn->p_rx_cids) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for Rx Cids\n");
+ if (!p_hwfn->p_rx_cids)
goto alloc_no_mem;
- }
}
for_each_hwfn(cdev, i) {
@@ -523,29 +524,29 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_no_mem;
p_hwfn->p_consq = p_consq;
+#ifdef CONFIG_QED_LL2
+ if (p_hwfn->using_ll2) {
+ p_ll2_info = qed_ll2_alloc(p_hwfn);
+ if (!p_ll2_info)
+ goto alloc_no_mem;
+ p_hwfn->p_ll2_info = p_ll2_info;
+ }
+#endif
+
/* DMA info initialization */
rc = qed_dmae_info_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for dmae_info structure\n");
+ if (rc)
goto alloc_err;
- }
/* DCBX initialization */
rc = qed_dcbx_info_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate memory for dcbx structure\n");
+ if (rc)
goto alloc_err;
- }
}
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
- if (!cdev->reset_stats) {
- DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
- rc = -ENOMEM;
- goto alloc_err;
- }
+ if (!cdev->reset_stats)
+ goto alloc_no_mem;
return 0;
@@ -580,6 +581,10 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+#ifdef CONFIG_QED_LL2
+ if (p_hwfn->using_ll2)
+ qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
+#endif
}
}
@@ -605,9 +610,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
/* Make sure notification is not set before initiating final cleanup */
if (REG_RD(p_hwfn, addr)) {
- DP_NOTICE(
- p_hwfn,
- "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+ DP_NOTICE(p_hwfn,
+ "Unexpected; Found final cleanup notification before initiating final cleanup\n");
REG_WR(p_hwfn, addr, 0);
}
@@ -701,17 +705,14 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
continue;
qed_init_cau_sb_entry(p_hwfn, &sb_entry,
- p_block->function_id,
- 0, 0);
- STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
- sb_entry);
+ p_block->function_id, 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
}
}
}
static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int hw_mode)
+ struct qed_ptt *p_ptt, int hw_mode)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
@@ -759,7 +760,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_port_unpretend(p_hwfn, p_ptt);
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
- if (rc != 0)
+ if (rc)
return rc;
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
@@ -780,6 +781,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
+ qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
+ qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
}
/* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
@@ -787,38 +791,141 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int hw_mode)
+static int
+qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
{
- int rc = 0;
+ u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
+ u32 dpi_bit_shift, dpi_count;
+ u32 min_dpis;
- rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
- if (rc != 0)
- return rc;
+ /* Calculate DPI size */
+ dpi_page_size_1 = QED_WID_SIZE * n_cpus;
+ dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE);
+ dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2);
+ dpi_page_size = roundup_pow_of_two(dpi_page_size);
+ dpi_bit_shift = ilog2(dpi_page_size / 4096);
- if (hw_mode & (1 << MODE_MF_SI)) {
- u8 pf_id = 0;
+ dpi_count = pwm_region_size / dpi_page_size;
- if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
- "PF[%08x] is first eth on engine\n", pf_id);
+ min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
+ min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis);
- /* We should have configured BIT for ppfid, i.e., the
- * relative function number in the port. But there's a
- * bug in LLH in BB where the ppfid is actually engine
- * based, so we need to take this into account.
- */
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
- }
+ p_hwfn->dpi_size = dpi_page_size;
+ p_hwfn->dpi_count = dpi_count;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
+
+ if (dpi_count < min_dpis)
+ return -EINVAL;
+
+ return 0;
+}
+
+enum QED_ROCE_EDPM_MODE {
+ QED_ROCE_EDPM_MODE_ENABLE = 0,
+ QED_ROCE_EDPM_MODE_FORCE_ON = 1,
+ QED_ROCE_EDPM_MODE_DISABLE = 2,
+};
- /* Take the protocol-based hit vector if there is a hit,
- * otherwise take the other vector.
+static int
+qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 pwm_regsize, norm_regsize;
+ u32 non_pwm_conn, min_addr_reg1;
+ u32 db_bar_size, n_cpus;
+ u32 roce_edpm_mode;
+ u32 pf_dems_shift;
+ int rc = 0;
+ u8 cond;
+
+ db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1);
+ if (p_hwfn->cdev->num_hwfns > 1)
+ db_bar_size /= 2;
+
+ /* Calculate doorbell regions */
+ non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+ NULL) +
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ NULL);
+ norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096);
+ min_addr_reg1 = norm_regsize / 4096;
+ pwm_regsize = db_bar_size - norm_regsize;
+
+ /* Check that the normal and PWM sizes are valid */
+ if (db_bar_size < norm_regsize) {
+ DP_ERR(p_hwfn->cdev,
+ "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
+ db_bar_size, norm_regsize);
+ return -EINVAL;
+ }
+
+ if (pwm_regsize < QED_MIN_PWM_REGION) {
+ DP_ERR(p_hwfn->cdev,
+ "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
+ pwm_regsize,
+ QED_MIN_PWM_REGION, db_bar_size, norm_regsize);
+ return -EINVAL;
+ }
+
+ /* Calculate number of DPIs */
+ roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
+ if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) ||
+ ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) {
+ /* Either EDPM is mandatory, or we are attempting to allocate a
+ * WID per CPU.
*/
- qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
+ n_cpus = num_active_cpus();
+ rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
- return rc;
+
+ cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) ||
+ (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE);
+ if (cond || p_hwfn->dcbx_no_edpm) {
+ /* Either EDPM is disabled from user configuration, or it is
+ * disabled via DCBx, or it is not mandatory and we failed to
+ * allocated a WID per CPU.
+ */
+ n_cpus = 1;
+ rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+
+ if (cond)
+ qed_rdma_dpm_bar(p_hwfn, p_ptt);
+ }
+
+ DP_INFO(p_hwfn,
+ "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
+ norm_regsize,
+ pwm_regsize,
+ p_hwfn->dpi_size,
+ p_hwfn->dpi_count,
+ ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ "disabled" : "enabled");
+
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
+ p_hwfn->dpi_count,
+ p_hwfn->pf_params.rdma_pf_params.min_dpis);
+ return -EINVAL;
+ }
+
+ p_hwfn->dpi_start_offset = norm_regsize;
+
+ /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
+ pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
+
+ return 0;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int hw_mode)
+{
+ return qed_init_run(p_hwfn, p_ptt, PHASE_PORT,
+ p_hwfn->port_id, hw_mode);
}
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
@@ -848,7 +955,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_init_rt(p_hwfn);
/* Set VLAN in NIG if needed */
- if (hw_mode & (1 << MODE_MF_SD)) {
+ if (hw_mode & BIT(MODE_MF_SD)) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
@@ -856,7 +963,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
}
/* Enable classification by MAC if needed */
- if (hw_mode & (1 << MODE_MF_SI)) {
+ if (hw_mode & BIT(MODE_MF_SI)) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"Configuring TAGMAC_CLS_TYPE\n");
STORE_RT_REG(p_hwfn,
@@ -871,7 +978,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Cleanup chip from previous driver if such remains exist */
rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
- if (rc != 0)
+ if (rc)
return rc;
/* PF Init sequence */
@@ -887,20 +994,9 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
- if (hw_mode & (1 << MODE_MF_SI)) {
- u8 pf_id = 0;
- u32 val = 0;
-
- if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
- if (p_hwfn->rel_pf_id == pf_id) {
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
- "PF[%d] is first ETH on engine\n",
- pf_id);
- val = 1;
- }
- qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
- }
- }
+ rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
if (b_hw_start) {
/* enable interrupts */
@@ -950,8 +1046,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
/* Read shadow of current MFW mailbox */
qed_mcp_read_mb(p_hwfn, p_main_ptt);
memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
- p_hwfn->mcp_info->mfw_mb_cur,
- p_hwfn->mcp_info->mfw_mb_length);
+ p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
}
int qed_hw_init(struct qed_dev *cdev,
@@ -971,7 +1066,7 @@ int qed_hw_init(struct qed_dev *cdev,
if (IS_PF(cdev)) {
rc = qed_init_fw_data(cdev, bin_fw_data);
- if (rc != 0)
+ if (rc)
return rc;
}
@@ -988,8 +1083,7 @@ int qed_hw_init(struct qed_dev *cdev,
qed_calc_hw_mode(p_hwfn);
- rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
- &load_code);
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
if (rc) {
DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
return rc;
@@ -1004,11 +1098,6 @@ int qed_hw_init(struct qed_dev *cdev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
- if (!qm_lock_init) {
- spin_lock_init(&qm_lock);
- qm_lock_init = true;
- }
-
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -1071,9 +1160,8 @@ int qed_hw_init(struct qed_dev *cdev,
}
#define QED_HW_STOP_RETRY_LIMIT (10)
-static inline void qed_hw_timers_stop(struct qed_dev *cdev,
- struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static void qed_hw_timers_stop(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int i;
@@ -1084,8 +1172,7 @@ static inline void qed_hw_timers_stop(struct qed_dev *cdev,
for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
if ((!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
- (!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK)))
+ (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
break;
/* Dependent on number of connection/tasks, possibly
@@ -1190,8 +1277,7 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
}
DP_VERBOSE(p_hwfn,
- NETIF_MSG_IFDOWN,
- "Shutting down the fastpath\n");
+ NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1219,14 +1305,13 @@ void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
}
-static int qed_reg_assert(struct qed_hwfn *hwfn,
- struct qed_ptt *ptt, u32 reg,
- bool expected)
+static int qed_reg_assert(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 reg, bool expected)
{
- u32 assert_val = qed_rd(hwfn, ptt, reg);
+ u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
if (assert_val != expected) {
- DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+ DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
reg, expected);
return -EINVAL;
}
@@ -1306,8 +1391,7 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
/* Clean Previous errors if such exist */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
- 1 << p_hwfn->abs_pf_id);
+ PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
/* enable internal target-read */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1317,7 +1401,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
static void get_function_id(struct qed_hwfn *p_hwfn)
{
/* ME Register */
- p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+ p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
@@ -1326,6 +1411,10 @@ static void get_function_id(struct qed_hwfn *p_hwfn)
PXP_CONCRETE_FID_PFID);
p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
PXP_CONCRETE_FID_PORT);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+ p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
}
static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
@@ -1333,6 +1422,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
u32 *feat_num = p_hwfn->hw_info.feat_num;
int num_features = 1;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
+ * status blocks equally between L2 / RoCE but with consideration as
+ * to how many l2 queues / cnqs we have
+ */
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ num_features++;
+
+ feat_num[QED_RDMA_CNQ] =
+ min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
+ RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
+ }
+#endif
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
@@ -1373,6 +1475,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
num_funcs;
resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+ resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs;
+ resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB /
+ num_funcs;
for (i = 0; i < QED_MAX_RESC; i++)
resc_start[i] = resc_num[i] * enabled_func_idx;
@@ -1396,7 +1502,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
"RL = %d start = %d\n"
"MAC = %d start = %d\n"
"VLAN = %d start = %d\n"
- "ILT = %d start = %d\n",
+ "ILT = %d start = %d\n"
+ "LL2_QUEUE = %d start = %d\n",
p_hwfn->hw_info.resc_num[QED_SB],
p_hwfn->hw_info.resc_start[QED_SB],
p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
@@ -1412,13 +1519,14 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
p_hwfn->hw_info.resc_num[QED_VLAN],
p_hwfn->hw_info.resc_start[QED_VLAN],
p_hwfn->hw_info.resc_num[QED_ILT],
- p_hwfn->hw_info.resc_start[QED_ILT]);
+ p_hwfn->hw_info.resc_start[QED_ILT],
+ RESC_NUM(p_hwfn, QED_LL2_QUEUE),
+ RESC_START(p_hwfn, QED_LL2_QUEUE));
return 0;
}
-static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
@@ -1472,8 +1580,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
default:
- DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
- core_cfg);
+ DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
break;
}
@@ -1484,11 +1591,11 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
link_temp = qed_rd(p_hwfn, p_ptt,
port_cfg_addr +
offsetof(struct nvm_cfg1_port, speed_cap_mask));
- link->speed.advertised_speeds =
- link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link->speed.advertised_speeds = link_temp;
- p_hwfn->mcp_info->link_capabilities.speed_capabilities =
- link->speed.advertised_speeds;
+ link_temp = link->speed.advertised_speeds;
+ p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
link_temp = qed_rd(p_hwfn, p_ptt,
port_cfg_addr +
@@ -1517,8 +1624,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
link->speed.forced_speed = 100000;
break;
default:
- DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
- link_temp);
+ DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
}
link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
@@ -1628,10 +1734,10 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+ "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
p_hwfn->rel_pf_id,
p_hwfn->abs_pf_id,
- p_hwfn->num_funcs_on_engine);
+ p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
static int
@@ -1703,10 +1809,9 @@ static int qed_get_dev_info(struct qed_dev *cdev)
u32 tmp;
/* Read Vendor Id / Device Id */
- pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
- &cdev->vendor_id);
- pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
- &cdev->device_id);
+ pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
+ pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
+
cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM);
cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
@@ -1771,10 +1876,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* Allocate PTT pool */
rc = qed_ptt_pool_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
+ if (rc)
goto err0;
- }
/* Allocate the main PTT */
p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
@@ -1782,7 +1885,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
rc = qed_get_dev_info(p_hwfn->cdev);
- if (rc != 0)
+ if (rc)
goto err1;
}
@@ -1804,10 +1907,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* Allocate the init RT array and initialize the init-ops engine */
rc = qed_init_alloc(p_hwfn);
- if (rc) {
- DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
+ if (rc)
goto err2;
- }
return rc;
err2:
@@ -2015,10 +2116,8 @@ qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
&p_phys, GFP_KERNEL);
- if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ if (!p_virt)
return -ENOMEM;
- }
if (i == 0) {
qed_chain_init_mem(p_chain, p_virt, p_phys);
@@ -2048,10 +2147,8 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
- if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ if (!p_virt)
return -ENOMEM;
- }
qed_chain_init_mem(p_chain, p_virt, p_phys);
qed_chain_reset(p_chain);
@@ -2068,13 +2165,9 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
void *p_virt = NULL;
size = page_cnt * sizeof(*pp_virt_addr_tbl);
- pp_virt_addr_tbl = vmalloc(size);
- if (!pp_virt_addr_tbl) {
- DP_NOTICE(cdev,
- "Failed to allocate memory for the chain virtual addresses table\n");
+ pp_virt_addr_tbl = vzalloc(size);
+ if (!pp_virt_addr_tbl)
return -ENOMEM;
- }
- memset(pp_virt_addr_tbl, 0, size);
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
@@ -2087,19 +2180,15 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
size, &p_pbl_phys, GFP_KERNEL);
qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
- if (!p_pbl_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n");
+ if (!p_pbl_virt)
return -ENOMEM;
- }
for (i = 0; i < page_cnt; i++) {
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
&p_phys, GFP_KERNEL);
- if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ if (!p_virt)
return -ENOMEM;
- }
if (i == 0) {
qed_chain_init_mem(p_chain, p_virt, p_phys);
@@ -2134,7 +2223,8 @@ int qed_chain_alloc(struct qed_dev *cdev,
rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
if (rc) {
DP_NOTICE(cdev,
- "Cannot allocate a chain with the given arguments:\n"
+ "Cannot allocate a chain with the given arguments:\n");
+ DP_NOTICE(cdev,
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
intended_use, mode, cnt_type, num_elems, elem_size);
return rc;
@@ -2183,8 +2273,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
return 0;
}
-int qed_fw_vport(struct qed_hwfn *p_hwfn,
- u8 src_id, u8 *dst_id)
+int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
u8 min, max;
@@ -2203,8 +2292,7 @@ int qed_fw_vport(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
- u8 src_id, u8 *dst_id)
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
u8 min, max;
@@ -2223,6 +2311,98 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
+ u8 *p_filter)
+{
+ *p_high = p_filter[1] | (p_filter[0] << 8);
+ *p_low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+}
+
+int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high = 0, low = 0, en;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return 0;
+
+ qed_llh_mac_to_filter(&high, &low, p_filter);
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ if (en)
+ continue;
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), low);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), high);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "Failed to find an empty LLH filter to utilize\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "mac: %pM is added at %d\n",
+ p_filter, i);
+
+ return 0;
+}
+
+void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high = 0, low = 0;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ qed_llh_mac_to_filter(&high, &low, p_filter);
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), 0);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "mac: %pM is removed from %d\n",
+ p_filter, i);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
+}
+
static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 hw_addr, void *p_eth_qzone,
size_t eth_qzone_size, u8 timeset)
@@ -2386,8 +2566,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
* 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
*/
static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
- u16 vport_id, u32 req_rate,
- u32 min_pf_rate)
+ u16 vport_id, u32 req_rate, u32 min_pf_rate)
{
u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
int non_requested_count = 0, req_count = 0, i, num_vports;
@@ -2471,7 +2650,7 @@ static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
- if (rc == 0)
+ if (!rc)
qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
p_link->min_pf_rate);
else
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 343bb0344f62..b6711c106597 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -310,6 +310,26 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 *dst_id);
/**
+ * @brief qed_llh_add_mac_filter - configures a MAC filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to add
+ */
+int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter);
+
+/**
+ * @brief qed_llh_remove_mac_filter - removes a MAC filter from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to remove
+ */
+void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter);
+
+/**
* *@brief Cleanup of previous driver remains prior to load
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 6f9d3b831a2a..72eee29c677f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -536,6 +536,247 @@ struct core_conn_context {
struct regpair ustorm_st_padding[2];
};
+enum core_error_handle {
+ LL2_DROP_PACKET,
+ LL2_DO_NOTHING,
+ LL2_ASSERT,
+ MAX_CORE_ERROR_HANDLE
+};
+
+enum core_event_opcode {
+ CORE_EVENT_TX_QUEUE_START,
+ CORE_EVENT_TX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_START,
+ CORE_EVENT_RX_QUEUE_STOP,
+ MAX_CORE_EVENT_OPCODE
+};
+
+enum core_l4_pseudo_checksum_mode {
+ CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+ CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
+ MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct core_ll2_port_stats {
+ struct regpair gsi_invalid_hdr;
+ struct regpair gsi_invalid_pkt_length;
+ struct regpair gsi_unsupported_pkt_typ;
+ struct regpair gsi_crcchksm_error;
+};
+
+struct core_ll2_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+};
+
+struct core_ll2_rx_prod {
+ __le16 bd_prod;
+ __le16 cqe_prod;
+ __le32 reserved;
+};
+
+struct core_ll2_tstorm_per_queue_stat {
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+};
+
+struct core_ll2_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+enum core_ramrod_cmd_id {
+ CORE_RAMROD_UNUSED,
+ CORE_RAMROD_RX_QUEUE_START,
+ CORE_RAMROD_TX_QUEUE_START,
+ CORE_RAMROD_RX_QUEUE_STOP,
+ CORE_RAMROD_TX_QUEUE_STOP,
+ MAX_CORE_RAMROD_CMD_ID
+};
+
+enum core_roce_flavor_type {
+ CORE_ROCE,
+ CORE_RROCE,
+ MAX_CORE_ROCE_FLAVOR_TYPE
+};
+
+struct core_rx_action_on_error {
+ u8 error_type;
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+};
+
+struct core_rx_bd {
+ struct regpair addr;
+ __le16 reserved[4];
+};
+
+struct core_rx_bd_with_buff_len {
+ struct regpair addr;
+ __le16 buff_length;
+ __le16 reserved[3];
+};
+
+union core_rx_bd_union {
+ struct core_rx_bd rx_bd;
+ struct core_rx_bd_with_buff_len rx_bd_with_len;
+};
+
+struct core_rx_cqe_opaque_data {
+ __le32 data[2];
+};
+
+enum core_rx_cqe_type {
+ CORE_RX_CQE_ILLIGAL_TYPE,
+ CORE_RX_CQE_TYPE_REGULAR,
+ CORE_RX_CQE_TYPE_GSI_OFFLOAD,
+ CORE_RX_CQE_TYPE_SLOW_PATH,
+ MAX_CORE_RX_CQE_TYPE
+};
+
+struct core_rx_fast_path_cqe {
+ u8 type;
+ u8 placement_offset;
+ struct parsing_and_err_flags parse_flags;
+ __le16 packet_length;
+ __le16 vlan;
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved[4];
+};
+
+struct core_rx_gsi_offload_cqe {
+ u8 type;
+ u8 data_length_error;
+ struct parsing_and_err_flags parse_flags;
+ __le16 data_length;
+ __le16 vlan;
+ __le32 src_mac_addrhi;
+ __le16 src_mac_addrlo;
+ u8 reserved1[2];
+ __le32 gid_dst[4];
+};
+
+struct core_rx_slow_path_cqe {
+ u8 type;
+ u8 ramrod_cmd_id;
+ __le16 echo;
+ __le32 reserved1[7];
+};
+
+union core_rx_cqe_union {
+ struct core_rx_fast_path_cqe rx_cqe_fp;
+ struct core_rx_gsi_offload_cqe rx_cqe_gsi;
+ struct core_rx_slow_path_cqe rx_cqe_sp;
+};
+
+struct core_rx_start_ramrod_data {
+ struct regpair bd_base;
+ struct regpair cqe_pbl_addr;
+ __le16 mtu;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 drop_ttl0_flg;
+ __le16 num_of_pbl_pages;
+ u8 inner_vlan_removal_en;
+ u8 queue_id;
+ u8 main_func_queue;
+ u8 mf_si_bcast_accept_all;
+ u8 mf_si_mcast_accept_all;
+ struct core_rx_action_on_error action_on_error;
+ u8 gsi_offload_flag;
+ u8 reserved[7];
+};
+
+struct core_rx_stop_ramrod_data {
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 queue_id;
+ u8 reserved1;
+ __le16 reserved2[2];
+};
+
+struct core_tx_bd_flags {
+ u8 as_bitfield;
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
+#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
+#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12
+
+};
+
+struct core_tx_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ __le16 nw_vlan_or_lb_echo;
+ u8 bitfield0;
+#define CORE_TX_BD_NBDS_MASK 0xF
+#define CORE_TX_BD_NBDS_SHIFT 0
+#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
+#define CORE_TX_BD_RESERVED0_MASK 0x7
+#define CORE_TX_BD_RESERVED0_SHIFT 5
+ struct core_tx_bd_flags bd_flags;
+ __le16 bitfield1;
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+#define CORE_TX_BD_TX_DST_MASK 0x1
+#define CORE_TX_BD_TX_DST_SHIFT 14
+#define CORE_TX_BD_RESERVED1_MASK 0x1
+#define CORE_TX_BD_RESERVED1_SHIFT 15
+};
+
+enum core_tx_dest {
+ CORE_TX_DEST_NW,
+ CORE_TX_DEST_LB,
+ MAX_CORE_TX_DEST
+};
+
+struct core_tx_start_ramrod_data {
+ struct regpair pbl_base_addr;
+ __le16 mtu;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 stats_en;
+ u8 stats_id;
+ u8 conn_type;
+ __le16 pbl_size;
+ __le16 qm_pq_id;
+ u8 gsi_offload_flag;
+ u8 resrved[3];
+};
+
+struct core_tx_stop_ramrod_data {
+ __le32 reserved0[2];
+};
+
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
@@ -636,9 +877,33 @@ struct hsi_fp_ver_struct {
};
/* Mstorm non-triggering VF zone */
+enum malicious_vf_error_id {
+ MALICIOUS_VF_NO_ERROR,
+ VF_PF_CHANNEL_NOT_READY,
+ VF_ZONE_MSG_NOT_VALID,
+ VF_ZONE_FUNC_NOT_ENABLED,
+ ETH_PACKET_TOO_SMALL,
+ ETH_ILLEGAL_VLAN_MODE,
+ ETH_MTU_VIOLATION,
+ ETH_ILLEGAL_INBAND_TAGS,
+ ETH_VLAN_INSERT_AND_INBAND_VLAN,
+ ETH_ILLEGAL_NBDS,
+ ETH_FIRST_BD_WO_SOP,
+ ETH_INSUFFICIENT_BDS,
+ ETH_ILLEGAL_LSO_HDR_NBDS,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_ZERO_SIZE_BD,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_INSUFFICIENT_PAYLOAD,
+ ETH_EDPM_OUT_OF_SYNC,
+ ETH_TUNN_IPV6_EXT_NBD_ERR,
+ ETH_CONTROL_PACKET_VIOLATION,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
+
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
- struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF];
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
};
/* Mstorm VF zone */
@@ -705,13 +970,17 @@ struct pf_start_ramrod_data {
struct protocol_dcb_data {
u8 dcb_enable_flag;
+ u8 reserved_a;
u8 dcb_priority;
u8 dcb_tc;
- u8 reserved;
+ u8 reserved_b;
+ u8 reserved0;
};
struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
+ u8 update_rx_def_ucast_clss;
+ u8 update_rx_def_non_ucast_clss;
u8 update_tx_pf_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
@@ -727,7 +996,7 @@ struct pf_update_tunnel_config {
u8 tunnel_clss_ipgre;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
- __le16 reserved[3];
+ __le16 reserved[2];
};
struct pf_update_ramrod_data {
@@ -736,16 +1005,17 @@ struct pf_update_ramrod_data {
u8 update_fcoe_dcb_data_flag;
u8 update_iscsi_dcb_data_flag;
u8 update_roce_dcb_data_flag;
+ u8 update_rroce_dcb_data_flag;
u8 update_iwarp_dcb_data_flag;
u8 update_mf_vlan_flag;
- u8 reserved;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
struct protocol_dcb_data iscsi_dcb_data;
struct protocol_dcb_data roce_dcb_data;
+ struct protocol_dcb_data rroce_dcb_data;
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan;
- __le16 reserved2;
+ __le16 reserved;
struct pf_update_tunnel_config tunnel_config;
};
@@ -766,10 +1036,14 @@ enum protocol_version_array_key {
MAX_PROTOCOL_VERSION_ARRAY_KEY
};
-/* Pstorm non-triggering VF zone */
+struct rdma_sent_stats {
+ struct regpair sent_bytes;
+ struct regpair sent_pkts;
+};
+
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
- struct regpair reserved[2];
+ struct rdma_sent_stats rdma_stats;
};
/* Pstorm VF zone */
@@ -786,7 +1060,11 @@ struct ramrod_header {
__le16 echo;
};
-/* Slowpath Element (SPQE) */
+struct rdma_rcv_stats {
+ struct regpair rcv_bytes;
+ struct regpair rcv_pkts;
+};
+
struct slow_path_element {
struct ramrod_header hdr;
struct regpair data_ptr;
@@ -794,7 +1072,7 @@ struct slow_path_element {
/* Tstorm non-triggering VF zone */
struct tstorm_non_trigger_vf_zone {
- struct regpair reserved[2];
+ struct rdma_rcv_stats rdma_stats;
};
struct tstorm_per_port_stat {
@@ -802,9 +1080,14 @@ struct tstorm_per_port_stat {
struct regpair mac_error_discard;
struct regpair mftag_filter_discard;
struct regpair eth_mac_filter_discard;
- struct regpair reserved[5];
+ struct regpair ll2_mac_filter_discard;
+ struct regpair ll2_conn_disabled_discard;
+ struct regpair iscsi_irregular_pkt;
+ struct regpair reserved;
+ struct regpair roce_irregular_pkt;
struct regpair eth_irregular_pkt;
- struct regpair reserved1[2];
+ struct regpair reserved1;
+ struct regpair preroce_irregular_pkt;
struct regpair eth_gre_tunn_filter_discard;
struct regpair eth_vxlan_tunn_filter_discard;
struct regpair eth_geneve_tunn_filter_discard;
@@ -870,7 +1153,13 @@ struct vf_stop_ramrod_data {
__le32 reserved2;
};
-/* Attentions status block */
+enum vf_zone_size_mode {
+ VF_ZONE_SIZE_MODE_DEFAULT,
+ VF_ZONE_SIZE_MODE_DOUBLE,
+ VF_ZONE_SIZE_MODE_QUAD,
+ MAX_VF_ZONE_SIZE_MODE
+};
+
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
@@ -1442,13 +1731,6 @@ enum bin_dbg_buffer_type {
MAX_BIN_DBG_BUFFER_TYPE
};
-/* Chip IDs */
-enum chip_ids {
- CHIP_RESERVED,
- CHIP_BB_B0,
- CHIP_RESERVED2,
- MAX_CHIP_IDS
-};
/* Attention bit mapping */
struct dbg_attn_bit_mapping {
@@ -1527,6 +1809,371 @@ enum dbg_attn_type {
MAX_DBG_ATTN_TYPE
};
+/* condition header for registers dump */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u8 block_id; /* block ID */
+ u8 data_size; /* size in dwords of the data following this header */
+};
+
+/* memory data for registers dump */
+struct dbg_dump_mem {
+ __le32 dword0;
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ __le32 dword1;
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_RESERVED_MASK 0xFF
+#define DBG_DUMP_MEM_RESERVED_SHIFT 24
+};
+
+/* register data for registers dump */
+struct dbg_dump_reg {
+ __le32 data;
+#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+/* split header for registers dump */
+struct dbg_dump_split_hdr {
+ __le32 hdr;
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+/* condition header for idle check */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ __le16 data_size; /* size in dwords of the data following this header */
+};
+
+/* Idle Check condition register */
+struct dbg_idle_chk_cond_reg {
+ __le32 data;
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ __le16 num_entries; /* number of registers entries to check */
+ u8 entry_size; /* size of registers entry (in dwords) */
+ u8 start_entry; /* index of the first entry to check */
+};
+
+/* Idle Check info register */
+struct dbg_idle_chk_info_reg {
+ __le32 data;
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ __le16 size; /* register size in dwords */
+ struct dbg_mode_hdr mode; /* Mode header */
+};
+
+/* Idle Check register */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
+ struct dbg_idle_chk_info_reg info_reg; /* info register */
+};
+
+/* Idle Check result header */
+struct dbg_idle_chk_result_hdr {
+ __le16 rule_id; /* Failing rule index */
+ __le16 mem_entry_id; /* Failing memory entry index */
+ u8 num_dumped_cond_regs; /* number of dumped condition registers */
+ u8 num_dumped_info_regs; /* number of dumped condition registers */
+ u8 severity; /* from dbg_idle_chk_severity_types enum */
+ u8 reserved;
+};
+
+/* Idle Check result register header */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry; /* index of the first checked entry */
+ __le16 size; /* register size in dwords */
+};
+
+/* Idle Check rule */
+struct dbg_idle_chk_rule {
+ __le16 rule_id; /* Idle Check rule ID */
+ u8 severity; /* value from dbg_idle_chk_severity_types enum */
+ u8 cond_id; /* Condition ID */
+ u8 num_cond_regs; /* number of condition registers */
+ u8 num_info_regs; /* number of info registers */
+ u8 num_imms; /* number of immediates in the condition */
+ u8 reserved1;
+ __le16 reg_offset; /* offset of this rules registers in the idle check
+ * register array (in dbg_idle_chk_reg units).
+ */
+ __le16 imm_offset; /* offset of this rules immediate values in the
+ * immediate values array (in dwords).
+ */
+};
+
+/* Idle Check rule parsing data */
+struct dbg_idle_chk_rule_parsing_data {
+ __le32 data;
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+/* idle check severity types */
+enum dbg_idle_chk_severity_types {
+ /* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+ /* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ /* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+/* Debug Bus block data */
+struct dbg_bus_block_data {
+ u8 enabled; /* Indicates if the block is enabled for recording (0/1) */
+ u8 hw_id; /* HW ID associated with the block */
+ u8 line_num; /* Debug line number to select */
+ u8 right_shift; /* Number of units to right the debug data (0-3) */
+ u8 cycle_en; /* 4-bit value: bit i set -> unit i is enabled. */
+ u8 force_valid; /* 4-bit value: bit i set -> unit i is forced valid. */
+ u8 force_frame; /* 4-bit value: bit i set -> unit i frame bit is forced.
+ */
+ u8 reserved;
+};
+
+/* Debug Bus Clients */
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+/* Debug Bus memory address */
+struct dbg_bus_mem_addr {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Debug Bus PCI buffer data */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
+ struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
+ __le32 size; /* PCI buffer size in bytes */
+};
+
+/* Debug Bus Storm EID range filter params */
+struct dbg_bus_storm_eid_range_params {
+ u8 min; /* Minimal event ID to filter on */
+ u8 max; /* Maximal event ID to filter on */
+};
+
+/* Debug Bus Storm EID mask filter params */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val; /* Event ID value */
+ u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
+};
+
+/* Debug Bus Storm EID filter params */
+union dbg_bus_storm_eid_params {
+ struct dbg_bus_storm_eid_range_params range;
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/* Debug Bus Storm data */
+struct dbg_bus_storm_data {
+ u8 fast_enabled;
+ u8 fast_mode;
+ u8 slow_enabled;
+ u8 slow_mode;
+ u8 hw_id;
+ u8 eid_filter_en;
+ u8 eid_range_not_mask;
+ u8 cid_filter_en;
+ union dbg_bus_storm_eid_params eid_filter_params;
+ __le16 reserved;
+ __le32 cid;
+};
+
+/* Debug Bus data */
+struct dbg_bus_data {
+ __le32 app_version; /* The tools version number of the application */
+ u8 state; /* The current debug bus state */
+ u8 hw_dwords; /* HW dwords per cycle */
+ u8 next_hw_id; /* Next HW ID to be associated with an input */
+ u8 num_enabled_blocks; /* Number of blocks enabled for recording */
+ u8 num_enabled_storms; /* Number of Storms enabled for recording */
+ u8 target; /* Output target */
+ u8 next_trigger_state; /* ID of next trigger state to be added */
+ u8 next_constraint_id; /* ID of next filter/trigger constraint to be
+ * added.
+ */
+ u8 one_shot_en; /* Indicates if one-shot mode is enabled (0/1) */
+ u8 grc_input_en; /* Indicates if GRC recording is enabled (0/1) */
+ u8 timestamp_input_en; /* Indicates if timestamp recording is enabled
+ * (0/1).
+ */
+ u8 filter_en; /* Indicates if the recording filter is enabled (0/1) */
+ u8 trigger_en; /* Indicates if the recording trigger is enabled (0/1) */
+ u8 adding_filter; /* If true, the next added constraint belong to the
+ * filter. Otherwise, it belongs to the last added
+ * trigger state. Valid only if either filter or
+ * triggers are enabled.
+ */
+ u8 filter_pre_trigger; /* Indicates if the recording filter should be
+ * applied before the trigger. Valid only if both
+ * filter and trigger are enabled (0/1).
+ */
+ u8 filter_post_trigger; /* Indicates if the recording filter should be
+ * applied after the trigger. Valid only if both
+ * filter and trigger are enabled (0/1).
+ */
+ u8 unify_inputs; /* If true, all inputs are associated with HW ID 0.
+ * Otherwise, each input is assigned a different HW ID
+ * (0/1).
+ */
+ u8 rcv_from_other_engine; /* Indicates if the other engine sends it NW
+ * recording to this engine (0/1).
+ */
+ struct dbg_bus_pci_buf_data pci_buf; /* Debug Bus PCI buffer data. Valid
+ * only when the target is
+ * DBG_BUS_TARGET_ID_PCI.
+ */
+ __le16 reserved;
+ struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */
+ struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
+};
+
+/* Debug bus frame modes */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
+ DBG_BUS_FRAME_MODE_4HW_0ST = 3, /* 4 HW dwords, 0 Storm dwords */
+ DBG_BUS_FRAME_MODE_8HW_0ST = 4, /* 8 HW dwords, 0 Storm dwords */
+ MAX_DBG_BUS_FRAME_MODES
+};
+
+/* Debug bus states */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
+ DBG_BUS_STATE_READY, /* debug bus is ready for configuration and
+ * recording.
+ */
+ DBG_BUS_STATE_RECORDING, /* debug bus is currently recording */
+ DBG_BUS_STATE_STOPPED, /* debug bus recording has stopped */
+ MAX_DBG_BUS_STATES
+};
+
+/* Debug bus target IDs */
+enum dbg_bus_targets {
+ /* records debug bus to DBG block internal buffer */
+ DBG_BUS_TARGET_ID_INT_BUF,
+ /* records debug bus to the NW */
+ DBG_BUS_TARGET_ID_NIG,
+ /* records debug bus to a PCI buffer */
+ DBG_BUS_TARGET_ID_PCI,
+ MAX_DBG_BUS_TARGETS
+};
+
+/* GRC Dump data */
+struct dbg_grc_data {
+ __le32 param_val[40]; /* Value of each GRC parameter. Array size must
+ * match the enum dbg_grc_params.
+ */
+ u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was
+ * set by the user (0/1). Array size must
+ * match the enum dbg_grc_params.
+ */
+};
+
+/* Debug GRC params */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM, /* dump Tstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MSTORM, /* dump Mstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_USTORM, /* dump Ustorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_XSTORM, /* dump Xstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_YSTORM, /* dump Ystorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PSTORM, /* dump Pstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_REGS, /* dump non-memory registers (0/1) */
+ DBG_GRC_PARAM_DUMP_RAM, /* dump Storm internal RAMs (0/1) */
+ DBG_GRC_PARAM_DUMP_PBUF, /* dump Storm passive buffer (0/1) */
+ DBG_GRC_PARAM_DUMP_IOR, /* dump Storm IORs (0/1) */
+ DBG_GRC_PARAM_DUMP_VFC, /* dump VFC memories (0/1) */
+ DBG_GRC_PARAM_DUMP_CM_CTX, /* dump CM contexts (0/1) */
+ DBG_GRC_PARAM_DUMP_PXP, /* dump PXP memories (0/1) */
+ DBG_GRC_PARAM_DUMP_RSS, /* dump RSS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_CAU, /* dump CAU memories (0/1) */
+ DBG_GRC_PARAM_DUMP_QM, /* dump QM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MCP, /* dump MCP memories (0/1) */
+ DBG_GRC_PARAM_RESERVED, /* reserved */
+ DBG_GRC_PARAM_DUMP_CFC, /* dump CFC memories (0/1) */
+ DBG_GRC_PARAM_DUMP_IGU, /* dump IGU memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BRB, /* dump BRB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BTB, /* dump BTB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BMB, /* dump BMB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_NIG, /* dump NIG memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MULD, /* dump MULD memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PRS, /* dump PRS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_DMAE, /* dump PRS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_TM, /* dump TM (timers) memories (0/1) */
+ DBG_GRC_PARAM_DUMP_SDM, /* dump SDM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_DIF, /* dump DIF memories (0/1) */
+ DBG_GRC_PARAM_DUMP_STATIC, /* dump static debug data (0/1) */
+ DBG_GRC_PARAM_UNSTALL, /* un-stall Storms after dump (0/1) */
+ DBG_GRC_PARAM_NUM_LCIDS, /* number of LCIDs (0..320) */
+ DBG_GRC_PARAM_NUM_LTIDS, /* number of LTIDs (0..320) */
+ /* preset: exclude all memories from dump (1 only) */
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+ /* preset: include memories for crash dump (1 only) */
+ DBG_GRC_PARAM_CRASH,
+ /* perform dump only if MFW is responding (0/1) */
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+ MAX_DBG_GRC_PARAMS
+};
+
+/* Debug reset registers */
+enum dbg_reset_regs {
+ DBG_RESET_REG_MISCS_PL_UA,
+ DBG_RESET_REG_MISCS_PL_HV,
+ DBG_RESET_REG_MISCS_PL_HV_2,
+ DBG_RESET_REG_MISC_PL_UA,
+ DBG_RESET_REG_MISC_PL_HV,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ DBG_RESET_REG_MISC_PL_PDA_VAUX,
+ MAX_DBG_RESET_REGS
+};
+
/* Debug status codes */
enum dbg_status {
DBG_STATUS_OK,
@@ -1579,9 +2226,45 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
MAX_DBG_STATUS
};
+/* Debug Storms IDs */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+/* Idle Check data */
+struct idle_chk_data {
+ __le32 buf_size; /* Idle check buffer size in dwords */
+ u8 buf_size_set; /* Indicates if the idle check buffer size was set
+ * (0/1).
+ */
+ u8 reserved1;
+ __le16 reserved2;
+};
+
+/* Debug Tools data (per HW function) */
+struct dbg_tools_data {
+ struct dbg_grc_data grc; /* GRC Dump data */
+ struct dbg_bus_data bus; /* Debug Bus data */
+ struct idle_chk_data idle_chk; /* Idle Check data */
+ u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
+ u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1).
+ */
+ u8 chip_id; /* Chip ID (from enum chip_ids) */
+ u8 platform_id; /* Platform ID (from enum platform_ids) */
+ u8 initialized; /* Indicates if the data was initialized */
+ u8 reserved;
+};
+
/********************************/
/* HSI Init Functions constants */
/********************************/
@@ -1589,7 +2272,41 @@ enum dbg_status {
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
-/* QM per-port init parameters */
+struct init_brb_ram_req {
+ __le32 guranteed_per_tc;
+ __le32 headroom_per_tc;
+ __le32 min_pkt_size;
+ __le32 max_ports_per_engine;
+ u8 num_active_tcs[MAX_NUM_PORTS];
+};
+
+struct init_ets_tc_req {
+ u8 use_sp;
+ u8 use_wfq;
+ __le16 weight;
+};
+
+struct init_ets_req {
+ __le32 mtu;
+ struct init_ets_tc_req tc_req[NUM_OF_TCS];
+};
+
+struct init_nig_lb_rl_req {
+ __le16 lb_mac_rate;
+ __le16 lb_rate;
+ __le32 mtu;
+ __le16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+struct init_nig_pri_tc_map_entry {
+ u8 tc_id;
+ u8 valid;
+};
+
+struct init_nig_pri_tc_map_req {
+ struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
struct init_qm_port_params {
u8 active;
u8 active_phys_tcs;
@@ -1619,7 +2336,7 @@ struct init_qm_vport_params {
/* Width of GRC address in bits (addresses are specified in dwords) */
#define GRC_ADDR_BITS 23
-#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+#define MAX_GRC_ADDR (BIT(GRC_ADDR_BITS) - 1)
/* indicates an init that should be applied to any phase ID */
#define ANY_PHASE_ID 0xffff
@@ -1627,15 +2344,50 @@ struct init_qm_vport_params {
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
+struct fw_asserts_ram_section {
+ __le16 section_ram_line_offset;
+ __le16 section_ram_line_size;
+ u8 list_dword_offset;
+ u8 list_element_dword_size;
+ u8 list_num_elements;
+ u8 list_next_index_dword_offset;
+};
+
+struct fw_ver_num {
+ u8 major; /* Firmware major version number */
+ u8 minor; /* Firmware minor version number */
+ u8 rev; /* Firmware revision version number */
+ u8 eng; /* Firmware engineering version number (for bootleg versions) */
+};
+
+struct fw_ver_info {
+ __le16 tools_ver; /* Tools version number */
+ u8 image_id; /* FW image ID (e.g. main) */
+ u8 reserved1;
+ struct fw_ver_num num; /* FW version number */
+ __le32 timestamp; /* FW Timestamp in unix time (sec. since 1970) */
+ __le32 reserved2;
+};
+
+struct fw_info {
+ struct fw_ver_info ver;
+ struct fw_asserts_ram_section fw_asserts_section;
+};
+
+struct fw_info_location {
+ __le32 grc_addr;
+ __le32 size;
+};
+
enum init_modes {
MODE_RESERVED,
MODE_BB_B0,
- MODE_RESERVED2,
+ MODE_K2,
MODE_ASIC,
+ MODE_RESERVED2,
MODE_RESERVED3,
MODE_RESERVED4,
MODE_RESERVED5,
- MODE_RESERVED6,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
@@ -1644,7 +2396,7 @@ enum init_modes {
MODE_PORTS_PER_ENG_4,
MODE_100G,
MODE_40G,
- MODE_RESERVED7,
+ MODE_RESERVED6,
MAX_INIT_MODES
};
@@ -1674,11 +2426,11 @@ struct bin_buffer_hdr {
/* binary init buffer types */
enum bin_init_buffer_type {
- BIN_BUF_FW_VER_INFO,
+ BIN_BUF_INIT_FW_VER_INFO,
BIN_BUF_INIT_CMD,
BIN_BUF_INIT_VAL,
BIN_BUF_INIT_MODE_TREE,
- BIN_BUF_IRO,
+ BIN_BUF_INIT_IRO,
MAX_BIN_INIT_BUFFER_TYPE
};
@@ -1902,8 +2654,276 @@ struct iro {
__le16 size;
};
+/***************************** Public Functions *******************************/
/**
- * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct.
+ * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
+ * arrays.
+ *
+ * @param bin_ptr - a pointer to the binary data with debug arrays.
+ */
+enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+/**
+ * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
+ * GRC Dump.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the collected GRC data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified dump buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
+ * for idle check results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the idle check
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the idle check data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
+ * for mcp trace results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the trace data in MCP scratchpad contain an invalid signature
+ * - the bundle ID in NVRAM is invalid
+ * - the trace meta data cannot be found (in NVRAM or image file)
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the mcp trace data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - the trace data in MCP scratchpad contain an invalid signature
+ * - the bundle ID in NVRAM is invalid
+ * - the trace meta data cannot be found (in NVRAM or image file)
+ * - the trace meta data cannot be read (from NVRAM or image file)
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
+ * for grc trace fifo results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
+ * the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the reg fifo data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
+ * for the IGU fifo results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
+ * the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the IGU fifo data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
+ * buffer size for protection override window results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for protection
+ * override data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_protection_override_dump - Reads protection override window
+ * entries and writes the results into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the protection override data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
+ * size for FW Asserts results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the FW Asserts data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_print_attn - Prints attention registers values in the
+ * specified results struct.
*
* @param p_hwfn
* @param results - Pointer to the attention read results
@@ -1915,47 +2935,241 @@ struct iro {
enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results);
+/******************************** Constants **********************************/
+
#define MAX_NAME_LEN 16
+/***************************** Public Functions *******************************/
+/**
+ * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
+ * debug arrays.
+ *
+ * @param bin_ptr - a pointer to the binary data with debug arrays.
+ */
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
+/**
+ * @brief qed_dbg_get_status_str - Returns a string for the specified status.
+ *
+ * @param status - a debug status code.
+ *
+ * @return a string for the specified status
+ */
+const char *qed_dbg_get_status_str(enum dbg_status status);
+/**
+ * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
+ * for idle check results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - idle check dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_idle_chk_results - Prints idle check results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - idle check dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the idle check results.
+ * @param num_errors - OUT: number of errors found in idle check.
+ * @param num_warnings - OUT: number of warnings found in idle check.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors,
+ u32 *num_warnings);
+/**
+ * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
+ * for MCP Trace results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - MCP Trace dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_mcp_trace_results - Prints MCP Trace results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - mcp trace dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the mcp trace results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
+ * for reg_fifo results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - reg fifo dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_reg_fifo_results - Prints reg fifo results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - reg fifo dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the reg fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
+ * for igu_fifo results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - IGU fifo dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_igu_fifo_results - Prints IGU fifo results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - IGU fifo dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the IGU fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_protection_override_results_buf_size - Returns the required
+ * buffer size for protection override results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - protection override dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_protection_override_results - Prints protection override
+ * results.
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - protection override dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the reg fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
+ * for FW Asserts results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - FW Asserts dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_fw_asserts_results - Prints FW Asserts results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - FW Asserts dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the FW Asserts results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
/* Win 2 */
-#define GTT_BAR0_MAP_REG_IGU_CMD \
- 0x00f000UL
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
/* Win 3 */
-#define GTT_BAR0_MAP_REG_TSDM_RAM \
- 0x010000UL
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
/* Win 4 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM \
- 0x011000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
/* Win 5 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
- 0x012000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
-#define GTT_BAR0_MAP_REG_USDM_RAM \
- 0x013000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
/* Win 7 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
- 0x014000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
/* Win 8 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
- 0x015000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
/* Win 9 */
-#define GTT_BAR0_MAP_REG_XSDM_RAM \
- 0x016000UL
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
/* Win 10 */
-#define GTT_BAR0_MAP_REG_YSDM_RAM \
- 0x017000UL
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
/* Win 11 */
-#define GTT_BAR0_MAP_REG_PSDM_RAM \
- 0x018000UL
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
/**
* @brief qed_qm_pf_mem_size - prepare QM ILT sizes
@@ -2003,7 +3217,7 @@ struct qed_qm_pf_rt_init_params {
u16 num_vf_pqs;
u8 start_vport;
u8 num_vports;
- u8 pf_wfq;
+ u16 pf_wfq;
u32 pf_rl;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
@@ -2138,6 +3352,9 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define TSTORM_PORT_STAT_OFFSET(port_id) \
(IRO[1].base + ((port_id) * IRO[1].m1))
#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[2].base + ((port_id) * IRO[2].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
(IRO[3].base + ((vf_id) * IRO[3].m1))
#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
@@ -2153,42 +3370,90 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
+ (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size)
#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[18].base + ((stat_counter_id) * IRO[18].m1))
#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
(IRO[19].base + ((queue_id) * IRO[19].m1))
#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size)
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
+ (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[21].base + ((pf_id) * IRO[21].m1))
+ (IRO[22].base + ((pf_id) * IRO[22].m1))
#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[22].size)
+ (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[23].base + ((pf_id) * IRO[23].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size)
+ (IRO[24].base + ((pf_id) * IRO[24].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[24].base + ((stat_counter_id) * IRO[24].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size)
+ (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[25].base + ((pf_id) * IRO[25].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size)
+ (IRO[26].base + ((pf_id) * IRO[26].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
- (IRO[26].base + ((ethtype) * IRO[26].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size)
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size)
+ (IRO[27].base + ((ethtype) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
- (IRO[28].base + ((pf_id) * IRO[28].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size)
+ (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[29].base + ((queue_id) * IRO[29].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size)
-
-static const struct iro iro_arr[46] = {
+ (IRO[30].base + ((queue_id) * IRO[30].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
+ (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[42].base + ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+
+static const struct iro iro_arr[47] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{0x4cb0, 0x78, 0x0, 0x0, 0x78},
{0x6318, 0x20, 0x0, 0x0, 0x20},
@@ -2201,20 +3466,21 @@ static const struct iro iro_arr[46] = {
{0x3df0, 0x0, 0x0, 0x0, 0x78},
{0x29b0, 0x0, 0x0, 0x0, 0x78},
{0x4c38, 0x0, 0x0, 0x0, 0x78},
- {0x4a48, 0x0, 0x0, 0x0, 0x78},
+ {0x4990, 0x0, 0x0, 0x0, 0x78},
{0x7e48, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
{0x60f8, 0x10, 0x0, 0x0, 0x10},
{0xb820, 0x30, 0x0, 0x0, 0x30},
{0x95b8, 0x30, 0x0, 0x0, 0x30},
- {0x4c18, 0x80, 0x0, 0x0, 0x40},
+ {0x4b60, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xc9a8, 0x0, 0x0, 0x0, 0x4},
- {0x4c58, 0x80, 0x0, 0x0, 0x20},
+ {0x53a0, 0x80, 0x4, 0x0, 0x4},
+ {0xc8f0, 0x0, 0x0, 0x0, 0x4},
+ {0x4ba0, 0x80, 0x0, 0x0, 0x20},
{0x8050, 0x40, 0x0, 0x0, 0x30},
{0xe770, 0x60, 0x0, 0x0, 0x60},
{0x2b48, 0x80, 0x0, 0x0, 0x38},
- {0xdf88, 0x78, 0x0, 0x0, 0x78},
+ {0xf188, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
{0xacf0, 0x0, 0x0, 0x0, 0xf0},
{0xade0, 0x8, 0x0, 0x0, 0x8},
@@ -2226,455 +3492,457 @@ static const struct iro iro_arr[46] = {
{0x200, 0x10, 0x8, 0x0, 0x8},
{0xb78, 0x10, 0x8, 0x0, 0x2},
{0xd888, 0x38, 0x0, 0x0, 0x24},
- {0x12120, 0x10, 0x0, 0x0, 0x8},
- {0x11b20, 0x38, 0x0, 0x0, 0x18},
+ {0x12c38, 0x10, 0x0, 0x0, 0x8},
+ {0x11aa0, 0x38, 0x0, 0x0, 0x18},
{0xa8c0, 0x30, 0x0, 0x0, 0x10},
{0x86f8, 0x28, 0x0, 0x0, 0x18},
- {0xeff8, 0x10, 0x0, 0x0, 0x10},
+ {0x101f8, 0x10, 0x0, 0x0, 0x10},
{0xdd08, 0x48, 0x0, 0x0, 0x38},
- {0xf460, 0x20, 0x0, 0x0, 0x20},
+ {0x10660, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
{0x5000, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30799
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30815
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30849
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31523
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32547
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33059
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924
-
-#define RUNTIME_ARRAY_SIZE 33925
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29839
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30510
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30767
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30769
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30801
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30817
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30851
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31011
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31012
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31525
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32549
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33061
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926
+
+#define RUNTIME_ARRAY_SIZE 33927
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -3201,7 +4469,31 @@ struct eth_conn_context {
struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
-/* opcodes for the event ring */
+enum eth_error_code {
+ ETH_OK = 0x00,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2,
+ ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC,
+ ETH_FILTERS_VLAN_ADD_FAIL_FULL,
+ ETH_FILTERS_VLAN_ADD_FAIL_DUP,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1,
+ ETH_FILTERS_PAIR_ADD_FAIL_DUP,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1,
+ ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
+ ETH_FILTERS_VNI_ADD_FAIL_FULL,
+ ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ MAX_ETH_ERROR_CODE
+};
+
enum eth_event_opcode {
ETH_EVENT_UNUSED,
ETH_EVENT_VPORT_START,
@@ -3269,7 +4561,13 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE
};
-/* Ethernet Ramrod Command IDs */
+enum eth_ipv4_frag_type {
+ ETH_IPV4_NOT_FRAG,
+ ETH_IPV4_FIRST_FRAG,
+ ETH_IPV4_NON_FIRST_FRAG,
+ MAX_ETH_IPV4_FRAG_TYPE
+};
+
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START,
@@ -3451,8 +4749,8 @@ struct rx_queue_start_ramrod_data {
u8 toggle_val;
u8 vf_rx_prod_index;
-
- u8 reserved[6];
+ u8 vf_rx_prod_use_zone_a;
+ u8 reserved[5];
__le16 reserved1;
struct regpair cqe_pbl_addr;
struct regpair bd_base;
@@ -3526,10 +4824,11 @@ struct tx_queue_start_ramrod_data {
__le16 pxp_st_index;
__le16 comp_agg_size;
__le16 queue_zone_id;
- __le16 test_dup_count;
+ __le16 reserved2;
__le16 pbl_size;
__le16 tx_queue_id;
-
+ __le16 same_as_last_id;
+ __le16 reserved[3];
struct regpair pbl_base_addr;
struct regpair bd_cons_address;
};
@@ -4926,8 +6225,8 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
@@ -4988,6 +6287,10 @@ enum roce_event_opcode {
MAX_ROCE_EVENT_OPCODE
};
+struct roce_init_func_ramrod_data {
+ struct rdma_init_func_ramrod_data rdma;
+};
+
struct roce_modify_qp_req_ramrod_data {
__le16 flags;
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
@@ -6639,6 +7942,35 @@ struct ystorm_iscsi_conn_ag_ctx {
__le32 reg2;
__le32 reg3;
};
+
+#define MFW_TRACE_SIGNATURE 0x25071946
+
+/* The trace in the buffer */
+#define MFW_TRACE_EVENTID_MASK 0x00ffff
+#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
+#define MFW_TRACE_PRM_SIZE_SHIFT 16
+#define MFW_TRACE_ENTRY_SIZE 3
+
+struct mcp_trace {
+ u32 signature; /* Help to identify that the trace is valid */
+ u32 size; /* the size of the trace buffer in bytes */
+ u32 curr_level; /* 2 - all will be written to the buffer
+ * 1 - debug trace will not be written
+ * 0 - just errors will be written to the buffer
+ */
+ u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
+ * mask it.
+ */
+
+ /* Warning: the following pointers are assumed to be 32bits as they are
+ * used only in the MFW.
+ */
+ u32 trace_prod; /* The next trace will be written to this offset */
+ u32 trace_oldest; /* The oldest valid trace starts at this offset
+ * (usually very close after the current producer).
+ */
+};
+
#define VF_MAX_STATIC 192
#define MCP_GLOB_PATH_MAX 2
@@ -6646,6 +7978,7 @@ struct ystorm_iscsi_conn_ag_ctx {
#define MCP_GLOB_PORT_MAX 4
#define MCP_GLOB_FUNC_MAX 16
+typedef u32 offsize_t; /* In DWORDS !!! */
/* Offset from the beginning of the MCP scratchpad */
#define OFFSIZE_OFFSET_SHIFT 0
#define OFFSIZE_OFFSET_MASK 0x0000ffff
@@ -7236,8 +8569,19 @@ struct public_drv_mb {
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
#define DRV_MSG_CODE_MCP_RESET 0x00090000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+#define DRV_MSG_CODE_MCP_HALT 0x00100000
+
+#define DRV_MSG_CODE_GET_STATS 0x00130000
+#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+
+#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
@@ -7248,6 +8592,9 @@ struct public_drv_mb {
#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+
+#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
@@ -7285,6 +8632,8 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+
+#define FW_MSG_CODE_NVM_OK 0x00010000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -7315,10 +8664,10 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_RESERVED4,
MFW_DRV_MSG_BW_UPDATE,
MFW_DRV_MSG_BW_UPDATE5,
- MFW_DRV_MSG_BW_UPDATE6,
- MFW_DRV_MSG_BW_UPDATE7,
- MFW_DRV_MSG_BW_UPDATE8,
- MFW_DRV_MSG_BW_UPDATE9,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_BW_UPDATE11,
@@ -7521,4 +8870,101 @@ struct nvm_cfg1 {
struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
};
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+#define MCP_TRACE_SIZE 2048 /* 2kb */
+
+/* This section is located at a fixed location in the beginning of the
+ * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
+ * All the rest of data has a floating location which differs from version to
+ * version, and is pointed by the mcp_meta_data below.
+ * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
+ * with it from nvram in order to clear this portion.
+ */
+struct static_init {
+ u32 num_sections;
+ offsize_t sections[SPAD_SECTION_MAX];
+#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
+
+ struct mcp_trace trace;
+#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace)))
+ u8 trace_buffer[MCP_TRACE_SIZE];
+#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer)))
+ /* running_mfw has the same definition as in nvm_map.h.
+ * This bit indicate both the running dir, and the running bundle.
+ * It is set once when the LIM is loaded.
+ */
+ u32 running_mfw;
+#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw))))
+ u32 build_time;
+#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time))))
+ u32 reset_type;
+#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type))))
+ u32 mfw_secure_mode;
+#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode))))
+ u16 pme_status_pf_bitmap;
+#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap))))
+ u16 pme_enable_pf_bitmap;
+#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap))))
+ u32 mim_nvm_addr;
+ u32 mim_start_addr;
+ u32 ah_pcie_link_params;
+#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff)
+#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0)
+#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00)
+#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8)
+#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000)
+#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16)
+#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000)
+#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24)
+#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params))))
+
+ u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+ NVM_TYPE_INIT_HW = 0x19,
+ NVM_TYPE_DEFAULT_CFG = 0x1a,
+ NVM_TYPE_MDUMP = 0x1b,
+ NVM_TYPE_META = 0x1c,
+ NVM_TYPE_ISCSI_CFG = 0x1d,
+ NVM_TYPE_FCOE_CFG = 0x1f,
+ NVM_TYPE_ETH_PHY_FW1 = 0x20,
+ NVM_TYPE_ETH_PHY_FW2 = 0x21,
+ NVM_TYPE_MAX,
+};
+
+#define DIR_ID_1 (0)
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index e17885321faf..6e4fae9b1430 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -44,8 +44,7 @@ struct qed_ptt_pool {
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
{
- struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
- GFP_KERNEL);
+ struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL);
int i;
if (!p_pool)
@@ -113,16 +112,14 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
return NULL;
}
-void qed_ptt_release(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
}
-u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
return le32_to_cpu(p_ptt->pxp.offset) << 2;
@@ -141,8 +138,7 @@ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
}
void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 new_hw_addr)
+ struct qed_ptt *p_ptt, u32 new_hw_addr)
{
u32 prev_hw_addr;
@@ -166,8 +162,7 @@ void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
}
static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 hw_addr)
+ struct qed_ptt *p_ptt, u32 hw_addr)
{
u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
u32 offset;
@@ -224,10 +219,7 @@ u32 qed_rd(struct qed_hwfn *p_hwfn,
static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- void *addr,
- u32 hw_addr,
- size_t n,
- bool to_device)
+ void *addr, u32 hw_addr, size_t n, bool to_device)
{
u32 dw_count, *host_addr, hw_offset;
size_t quota, done = 0;
@@ -259,8 +251,7 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
}
void qed_memcpy_from(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- void *dest, u32 hw_addr, size_t n)
+ struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n)
{
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
@@ -270,8 +261,7 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
}
void qed_memcpy_to(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 hw_addr, void *src, size_t n)
+ struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n)
{
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
@@ -280,9 +270,7 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
}
-void qed_fid_pretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 fid)
+void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid)
{
u16 control = 0;
@@ -309,8 +297,7 @@ void qed_fid_pretend(struct qed_hwfn *p_hwfn,
}
void qed_port_pretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 port_id)
+ struct qed_ptt *p_ptt, u8 port_id)
{
u16 control = 0;
@@ -326,8 +313,7 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn,
*(u32 *)&p_ptt->pxp.pretend);
}
-void qed_port_unpretend(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u16 control = 0;
@@ -429,28 +415,27 @@ u32 qed_dmae_idx_to_go_cmd(u8 idx)
return DMAE_REG_GO_C0 + (idx << 2);
}
-static int
-qed_dmae_post_command(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
- struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+ struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
u8 idx_cmd = p_hwfn->dmae_info.channel, i;
int qed_status = 0;
/* verify address is not NULL */
- if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
- ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+ if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+ ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
DP_NOTICE(p_hwfn,
"source or destination address 0 idx_cmd=%d\n"
"opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
- idx_cmd,
- le32_to_cpu(command->opcode),
- le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length_dw),
- le32_to_cpu(command->src_addr_hi),
- le32_to_cpu(command->src_addr_lo),
- le32_to_cpu(command->dst_addr_hi),
- le32_to_cpu(command->dst_addr_lo));
+ idx_cmd,
+ le32_to_cpu(p_command->opcode),
+ le16_to_cpu(p_command->opcode_b),
+ le16_to_cpu(p_command->length_dw),
+ le32_to_cpu(p_command->src_addr_hi),
+ le32_to_cpu(p_command->src_addr_lo),
+ le32_to_cpu(p_command->dst_addr_hi),
+ le32_to_cpu(p_command->dst_addr_lo));
return -EINVAL;
}
@@ -459,13 +444,13 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
NETIF_MSG_HW,
"Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
idx_cmd,
- le32_to_cpu(command->opcode),
- le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length_dw),
- le32_to_cpu(command->src_addr_hi),
- le32_to_cpu(command->src_addr_lo),
- le32_to_cpu(command->dst_addr_hi),
- le32_to_cpu(command->dst_addr_lo));
+ le32_to_cpu(p_command->opcode),
+ le16_to_cpu(p_command->opcode_b),
+ le16_to_cpu(p_command->length_dw),
+ le32_to_cpu(p_command->src_addr_hi),
+ le32_to_cpu(p_command->src_addr_lo),
+ le32_to_cpu(p_command->dst_addr_hi),
+ le32_to_cpu(p_command->dst_addr_lo));
/* Copy the command to DMAE - need to do it before every call
* for source/dest address no reset.
@@ -475,7 +460,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
*/
for (i = 0; i < DMAE_CMD_SIZE; i++) {
u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
- *(((u32 *)command) + i) : 0;
+ *(((u32 *)p_command) + i) : 0;
qed_wr(p_hwfn, p_ptt,
DMAE_REG_CMD_MEM +
@@ -483,9 +468,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
(i * sizeof(u32)), data);
}
- qed_wr(p_hwfn, p_ptt,
- qed_dmae_idx_to_go_cmd(idx_cmd),
- DMAE_GO_VALUE);
+ qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
return qed_status;
}
@@ -498,31 +481,23 @@ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
*p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(u32),
- p_addr,
- GFP_KERNEL);
- if (!*p_comp) {
- DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
+ sizeof(u32), p_addr, GFP_KERNEL);
+ if (!*p_comp)
goto err;
- }
p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
*p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct dmae_cmd),
p_addr, GFP_KERNEL);
- if (!*p_cmd) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
+ if (!*p_cmd)
goto err;
- }
p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
*p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(u32) * DMAE_MAX_RW_SIZE,
p_addr, GFP_KERNEL);
- if (!*p_buff) {
- DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
+ if (!*p_buff)
goto err;
- }
p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
@@ -543,8 +518,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(u32),
- p_hwfn->dmae_info.p_completion_word,
- p_phys);
+ p_hwfn->dmae_info.p_completion_word, p_phys);
p_hwfn->dmae_info.p_completion_word = NULL;
}
@@ -552,8 +526,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct dmae_cmd),
- p_hwfn->dmae_info.p_dmae_cmd,
- p_phys);
+ p_hwfn->dmae_info.p_dmae_cmd, p_phys);
p_hwfn->dmae_info.p_dmae_cmd = NULL;
}
@@ -571,9 +544,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
{
- u32 wait_cnt = 0;
- u32 wait_cnt_limit = 10000;
-
+ u32 wait_cnt_limit = 10000, wait_cnt = 0;
int qed_status = 0;
barrier();
@@ -606,7 +577,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
u64 dst_addr,
u8 src_type,
u8 dst_type,
- u32 length)
+ u32 length_dw)
{
dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
@@ -624,7 +595,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
(void *)(uintptr_t)src_addr,
- length * sizeof(u32));
+ length_dw * sizeof(u32));
break;
default:
return -EINVAL;
@@ -645,7 +616,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- cmd->length_dw = cpu_to_le16((u16)length);
+ cmd->length_dw = cpu_to_le16((u16)length_dw);
qed_dmae_post_command(p_hwfn, p_ptt);
@@ -654,16 +625,14 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
if (qed_status) {
DP_NOTICE(p_hwfn,
"qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
- src_addr,
- dst_addr,
- length);
+ src_addr, dst_addr, length_dw);
return qed_status;
}
if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
memcpy((void *)(uintptr_t)(dst_addr),
&p_hwfn->dmae_info.p_intermediate_buffer[0],
- length * sizeof(u32));
+ length_dw * sizeof(u32));
return 0;
}
@@ -730,10 +699,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
if (qed_status) {
DP_NOTICE(p_hwfn,
"qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
- qed_status,
- src_addr,
- dst_addr,
- length_cur);
+ qed_status, src_addr, dst_addr, length_cur);
break;
}
}
@@ -743,10 +709,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u64 source_addr,
- u32 grc_addr,
- u32 size_in_dwords,
- u32 flags)
+ u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct qed_dmae_params params;
@@ -768,9 +731,10 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc;
}
-int
-qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
- dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct qed_dmae_params params;
@@ -791,12 +755,11 @@ qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
return rc;
}
-int
-qed_dmae_host2host(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- dma_addr_t source_addr,
- dma_addr_t dest_addr,
- u32 size_in_dwords, struct qed_dmae_params *p_params)
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params)
{
int rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 9866a20d2128..d567ba94c8d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -59,17 +59,14 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
p_hwfn->rt_data.b_valid[i] = false;
}
-void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
- u32 rt_offset,
- u32 val)
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
- u32 rt_offset, u32 *p_val,
- size_t size)
+ u32 rt_offset, u32 *p_val, size_t size)
{
size_t i;
@@ -81,10 +78,7 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
static int qed_init_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u16 rt_offset,
- u16 size,
- bool b_must_dmae)
+ u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
@@ -102,8 +96,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
* simply write the data instead of using dmae.
*/
if (!b_must_dmae) {
- qed_wr(p_hwfn, p_ptt, addr + (i << 2),
- p_init_val[i]);
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
continue;
}
@@ -115,7 +108,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(p_init_val + i),
addr + (i << 2), segment, 0);
- if (rc != 0)
+ if (rc)
return rc;
/* Jump over the entire segment, including invalid entry */
@@ -182,9 +175,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u32 fill,
- u32 fill_count)
+ u32 addr, u32 fill, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
@@ -199,15 +190,12 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
- addr, fill_count,
- QED_DMAE_FLAG_RW_REPL_SRC);
+ addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
}
static void qed_init_fill(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr,
- u32 fill,
- u32 fill_count)
+ u32 addr, u32 fill, u32 fill_count)
{
u32 i;
@@ -218,12 +206,12 @@ static void qed_init_fill(struct qed_hwfn *p_hwfn,
static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_write_op *cmd,
- bool b_must_dmae,
- bool b_can_dmae)
+ bool b_must_dmae, bool b_can_dmae)
{
+ u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
u32 data = le32_to_cpu(cmd->data);
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
- u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+
u32 offset, output_len, input_len, max_size;
struct qed_dev *cdev = p_hwfn->cdev;
union init_array_hdr *hdr;
@@ -233,8 +221,7 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
array_data = cdev->fw_data->arr_data;
- hdr = (union init_array_hdr *)(array_data +
- dmae_array_offset);
+ hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
data = le32_to_cpu(hdr->raw.data);
switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
case INIT_ARR_ZIPPED:
@@ -290,13 +277,12 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
/* init_ops write command */
static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct init_write_op *cmd,
- bool b_can_dmae)
+ struct init_write_op *p_cmd, bool b_can_dmae)
{
- u32 data = le32_to_cpu(cmd->data);
- u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ u32 data = le32_to_cpu(p_cmd->data);
bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
- union init_write_args *arg = &cmd->args;
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ union init_write_args *arg = &p_cmd->args;
int rc = 0;
/* Sanitize */
@@ -309,20 +295,18 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
case INIT_SRC_INLINE:
- qed_wr(p_hwfn, p_ptt, addr,
- le32_to_cpu(arg->inline_val));
+ data = le32_to_cpu(p_cmd->args.inline_val);
+ qed_wr(p_hwfn, p_ptt, addr, data);
break;
case INIT_SRC_ZEROS:
- if (b_must_dmae ||
- (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
- rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
- le32_to_cpu(arg->zeros_count));
+ data = le32_to_cpu(p_cmd->args.zeros_count);
+ if (b_must_dmae || (b_can_dmae && (data >= 64)))
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
else
- qed_init_fill(p_hwfn, p_ptt, addr, 0,
- le32_to_cpu(arg->zeros_count));
+ qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
case INIT_SRC_ARRAY:
- rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+ rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
b_must_dmae, b_can_dmae);
break;
case INIT_SRC_RUNTIME:
@@ -353,8 +337,7 @@ static inline bool comp_or(u32 val, u32 expected_val)
/* init_ops read/poll commands */
static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct init_read_op *cmd)
+ struct qed_ptt *p_ptt, struct init_read_op *cmd)
{
bool (*comp_check)(u32 val, u32 expected_val);
u32 delay = QED_INIT_POLL_PERIOD_US, val;
@@ -412,35 +395,33 @@ static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
}
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
- u16 *offset,
- int modes)
+ u16 *p_offset, int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
const u8 *modes_tree_buf;
u8 arg1, arg2, tree_val;
modes_tree_buf = cdev->fw_data->modes_tree_buf;
- tree_val = modes_tree_buf[(*offset)++];
+ tree_val = modes_tree_buf[(*p_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
- return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+ return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
case INIT_MODE_OP_OR:
- arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
- arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 | arg2;
case INIT_MODE_OP_AND:
- arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
- arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 & arg2;
default:
tree_val -= MAX_INIT_MODE_OPS;
- return (modes & (1 << tree_val)) ? 1 : 0;
+ return (modes & BIT(tree_val)) ? 1 : 0;
}
}
static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
- struct init_if_mode_op *p_cmd,
- int modes)
+ struct init_if_mode_op *p_cmd, int modes)
{
u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
@@ -453,8 +434,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
struct init_if_phase_op *p_cmd,
- u32 phase,
- u32 phase_id)
+ u32 phase, u32 phase_id)
{
u32 data = le32_to_cpu(p_cmd->phase_data);
u32 op_data = le32_to_cpu(p_cmd->op_data);
@@ -468,10 +448,7 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
}
int qed_init_run(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- int phase,
- int phase_id,
- int modes)
+ struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
@@ -483,10 +460,8 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
init_ops = cdev->fw_data->init_ops;
p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
- if (!p_hwfn->unzip_buf) {
- DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
+ if (!p_hwfn->unzip_buf)
return -ENOMEM;
- }
for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
union init_op *cmd = &init_ops[cmd_num];
@@ -557,7 +532,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
/* First Dword contains metadata and should be skipped */
buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
- offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+ offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 8fa50fa23c8d..2adedc6fb6cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1775,10 +1775,9 @@ struct qed_sb_attn_info {
};
static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
- struct qed_sb_attn_info *p_sb_desc)
+ struct qed_sb_attn_info *p_sb_desc)
{
- u16 rc = 0;
- u16 index;
+ u16 rc = 0, index;
/* Make certain HW write took affect */
mmiowb();
@@ -1802,15 +1801,13 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
* @param asserted_bits newly asserted bits
* @return int
*/
-static int qed_int_assertion(struct qed_hwfn *p_hwfn,
- u16 asserted_bits)
+static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
{
struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
u32 igu_mask;
/* Mask the source of the attention in the IGU */
- igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- IGU_REG_ATTENTION_ENABLE);
+ igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
@@ -2041,7 +2038,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
if ((p_bit->flags & ATTENTION_PARITY) &&
- !!(parities & (1 << bit_idx)))
+ !!(parities & BIT(bit_idx)))
qed_int_deassertion_parity(p_hwfn, p_bit,
bit_idx);
@@ -2114,8 +2111,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
~((u32)deasserted_bits));
/* Unmask deasserted attentions in IGU */
- aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- IGU_REG_ATTENTION_ENABLE);
+ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
@@ -2160,8 +2156,7 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
index, attn_bits, attn_acks, asserted_bits,
deasserted_bits, p_sb_attn_sw->known_attn);
} else if (asserted_bits == 0x100) {
- DP_INFO(p_hwfn,
- "MFW indication via attention\n");
+ DP_INFO(p_hwfn, "MFW indication via attention\n");
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"MFW indication [deassertion]\n");
@@ -2173,18 +2168,14 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
return rc;
}
- if (deasserted_bits) {
+ if (deasserted_bits)
rc = qed_int_deassertion(p_hwfn, deasserted_bits);
- if (rc)
- return rc;
- }
return rc;
}
static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
- void __iomem *igu_addr,
- u32 ack_cons)
+ void __iomem *igu_addr, u32 ack_cons)
{
struct igu_prod_cons_update igu_ack = { 0 };
@@ -2242,9 +2233,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
/* Gather Interrupts/Attentions information */
if (!sb_info->sb_virt) {
- DP_ERR(
- p_hwfn->cdev,
- "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+ DP_ERR(p_hwfn->cdev,
+ "Interrupt Status block is NULL - cannot check for new interrupts!\n");
} else {
u32 tmp_index = sb_info->sb_ack;
@@ -2255,9 +2245,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
}
if (!sb_attn || !sb_attn->sb_attn) {
- DP_ERR(
- p_hwfn->cdev,
- "Attentions Status block is NULL - cannot check for new attentions!\n");
+ DP_ERR(p_hwfn->cdev,
+ "Attentions Status block is NULL - cannot check for new attentions!\n");
} else {
u16 tmp_index = sb_attn->index;
@@ -2313,8 +2302,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
if (p_sb->sb_attn)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
SB_ATTN_ALIGNED_SIZE(p_hwfn),
- p_sb->sb_attn,
- p_sb->sb_phys);
+ p_sb->sb_attn, p_sb->sb_phys);
kfree(p_sb);
}
@@ -2337,8 +2325,7 @@ static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr)
+ void *sb_virt_addr, dma_addr_t sb_phy_addr)
{
struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
int i, j, k;
@@ -2378,15 +2365,13 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
{
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_sb_attn_info *p_sb;
- void *p_virt;
dma_addr_t p_phys = 0;
+ void *p_virt;
/* SB struct */
p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
- if (!p_sb) {
- DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
+ if (!p_sb)
return -ENOMEM;
- }
/* SB ring */
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
@@ -2394,7 +2379,6 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
&p_phys, GFP_KERNEL);
if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
kfree(p_sb);
return -ENOMEM;
}
@@ -2412,9 +2396,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry,
- u8 pf_id,
- u16 vf_number,
- u8 vf_valid)
+ u8 pf_id, u16 vf_number, u8 vf_valid)
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cau_state;
@@ -2428,12 +2410,6 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
- /* setting the time resultion to a fixed value ( = 1) */
- SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
- QED_CAU_DEF_RX_TIMER_RES);
- SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
- QED_CAU_DEF_TX_TIMER_RES);
-
cau_state = CAU_HC_DISABLE_STATE;
if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
@@ -2468,9 +2444,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t sb_phys,
- u16 igu_sb_id,
- u16 vf_number,
- u8 vf_valid)
+ u16 igu_sb_id, u16 vf_number, u8 vf_valid)
{
struct cau_sb_entry sb_entry;
@@ -2514,8 +2488,7 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
timer_res = 2;
timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
- QED_COAL_RX_STATE_MACHINE,
- timeset);
+ QED_COAL_RX_STATE_MACHINE, timeset);
if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
timer_res = 0;
@@ -2541,8 +2514,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u8 timeset)
{
struct cau_pi_entry pi_entry;
- u32 sb_offset;
- u32 pi_offset;
+ u32 sb_offset, pi_offset;
if (IS_VF(p_hwfn->cdev))
return;
@@ -2569,8 +2541,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
}
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_sb_info *sb_info)
+ struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
{
/* zero status block and ack counter */
sb_info->sb_ack = 0;
@@ -2590,8 +2561,7 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
*
* @return u16
*/
-static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
- u16 sb_id)
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
u16 igu_sb_id;
@@ -2603,8 +2573,12 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
else
igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
- (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+ if (sb_id == QED_SP_SB_ID)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+ else
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
return igu_sb_id;
}
@@ -2612,9 +2586,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
int qed_int_sb_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr,
- u16 sb_id)
+ void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
{
sb_info->sb_virt = sb_virt_addr;
sb_info->sb_phys = sb_phy_addr;
@@ -2650,8 +2622,7 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
}
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
- struct qed_sb_info *sb_info,
- u16 sb_id)
+ struct qed_sb_info *sb_info, u16 sb_id)
{
if (sb_id == QED_SP_SB_ID) {
DP_ERR(p_hwfn, "Do Not free sp sb using this function");
@@ -2685,8 +2656,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
kfree(p_sb);
}
-static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_sb_sp_info *p_sb;
dma_addr_t p_phys = 0;
@@ -2694,17 +2664,14 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
/* SB struct */
p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
- if (!p_sb) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
+ if (!p_sb)
return -ENOMEM;
- }
/* SB ring */
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
SB_ALIGNED_SIZE(p_hwfn),
&p_phys, GFP_KERNEL);
if (!p_virt) {
- DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
kfree(p_sb);
return -ENOMEM;
}
@@ -2721,9 +2688,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
- void *cookie,
- u8 *sb_idx,
- __le16 **p_fw_cons)
+ void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
int rc = -ENOMEM;
@@ -2764,8 +2729,7 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
}
void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_int_mode int_mode)
+ struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
{
u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
@@ -2809,7 +2773,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
rc = qed_slowpath_irq_req(p_hwfn);
- if (rc != 0) {
+ if (rc) {
DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
return -EINVAL;
}
@@ -2822,8 +2786,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
-void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
p_hwfn->b_int_enabled = 0;
@@ -2950,13 +2913,11 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.opaque_fid, b_set);
}
-static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 sb_id)
+static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 sb_id)
{
u32 val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY +
- sizeof(u32) * sb_id);
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
struct qed_igu_block *p_block;
p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
@@ -2983,8 +2944,7 @@ out:
return val;
}
-int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
u32 val, min_vf = 0, max_vf = 0;
@@ -2993,7 +2953,6 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
-
if (!p_hwfn->hw_info.p_igu_info)
return -ENOMEM;
@@ -3104,22 +3063,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
*/
void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
{
- u32 igu_pf_conf = 0;
-
- igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
}
u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
{
- u64 intr_status = 0;
- u32 intr_status_lo = 0;
- u32 intr_status_hi = 0;
u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
IGU_CMD_INT_ACK_BASE;
u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
IGU_CMD_INT_ACK_BASE;
+ u32 intr_status_hi = 0, intr_status_lo = 0;
+ u64 intr_status = 0;
intr_status_lo = REG_RD(p_hwfn,
GTT_BAR0_MAP_REG_IGU_CMD +
@@ -3153,26 +3109,20 @@ static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
kfree(p_hwfn->sp_dpc);
}
-int qed_int_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int rc = 0;
rc = qed_int_sp_dpc_alloc(p_hwfn);
- if (rc) {
- DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
+ if (rc)
return rc;
- }
+
rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
- if (rc) {
- DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
+ if (rc)
return rc;
- }
+
rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
- if (rc) {
- DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
- return rc;
- }
+
return rc;
}
@@ -3183,8 +3133,7 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
qed_int_sp_dpc_free(p_hwfn);
}
-void qed_int_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
qed_int_sb_attn_setup(p_hwfn, p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 401e738543b5..ddd410a91e13 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -52,7 +52,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
u16 rx_mode = 0;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -80,8 +80,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
/* TPA related fields */
- memset(&p_ramrod->tpa_param, 0,
- sizeof(struct eth_vport_tpa_param));
+ memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
@@ -102,6 +101,9 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->tx_switching_en = p_params->tx_switching;
+ p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
+ p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
p_params->concrete_fid);
@@ -109,8 +111,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_start_params *p_params)
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
{
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
@@ -306,14 +308,14 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
memset(&p_ramrod->approx_mcast.bins, 0,
sizeof(p_ramrod->approx_mcast.bins));
- if (p_params->update_approx_mcast_flg) {
- p_ramrod->common.update_approx_mcast_flg = 1;
- for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
- u32 *p_bins = (u32 *)p_params->bins;
- __le32 val = cpu_to_le32(p_bins[i]);
+ if (!p_params->update_approx_mcast_flg)
+ return;
- p_ramrod->approx_mcast.bins[i] = val;
- }
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)p_params->bins;
+
+ p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
}
}
@@ -336,7 +338,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
}
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -361,8 +363,8 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
p_cmn->accept_any_vlan = p_params->accept_any_vlan;
- p_cmn->update_accept_any_vlan_flg =
- p_params->update_accept_any_vlan_flg;
+ val = p_params->update_accept_any_vlan_flg;
+ p_cmn->update_accept_any_vlan_flg = val;
p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
val = p_params->update_inner_vlan_removal_flg;
@@ -411,7 +413,7 @@ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
return qed_vf_pf_vport_stop(p_hwfn);
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
- if (rc != 0)
+ if (rc)
return rc;
memset(&init_data, 0, sizeof(init_data));
@@ -476,7 +478,7 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
comp_mode, p_comp_data);
- if (rc != 0) {
+ if (rc) {
DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
return rc;
}
@@ -511,11 +513,12 @@ static int qed_sp_release_queue_cid(
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
- struct qed_queue_start_common_params *params,
+ struct qed_queue_start_common_params *p_params,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, bool b_use_zone_a_prod)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -526,23 +529,23 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
int rc = -EINVAL;
/* Store information for the stop */
- p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
- p_rx_cid->cid = cid;
- p_rx_cid->opaque_fid = opaque_fid;
- p_rx_cid->vport_id = params->vport_id;
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
+ p_rx_cid->cid = cid;
+ p_rx_cid->opaque_fid = opaque_fid;
+ p_rx_cid->vport_id = p_params->vport_id;
- rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
- if (rc != 0)
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
return rc;
- rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
- if (rc != 0)
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
+ if (rc)
return rc;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, cid, params->queue_id, params->vport_id,
- params->sb);
+ opaque_fid,
+ cid, p_params->queue_id, p_params->vport_id, p_params->sb);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -558,24 +561,28 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = cpu_to_le16(params->sb);
- p_ramrod->sb_index = params->sb_idx;
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->stats_counter_id = stats_id;
- p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
- p_ramrod->complete_cqe_flg = 0;
- p_ramrod->complete_event_flg = 1;
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
- p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
+ p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
- p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- p_ramrod->vf_rx_prod_index = params->vf_qid;
- if (params->vf_qid)
+ if (p_params->vf_qid || b_use_zone_a_prod) {
+ p_ramrod->vf_rx_prod_index = p_params->vf_qid;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
+ "Queue%s is meant for VF rxq[%02x]\n",
+ b_use_zone_a_prod ? " [legacy]" : "",
+ p_params->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
+ }
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -583,7 +590,7 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
static int
qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
- struct qed_queue_start_common_params *params,
+ struct qed_queue_start_common_params *p_params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
@@ -597,20 +604,20 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_rxq_start(p_hwfn,
- params->queue_id,
- params->sb,
- params->sb_idx,
+ p_params->queue_id,
+ p_params->sb,
+ (u8)p_params->sb_idx,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr, cqe_pbl_size, pp_prod);
}
- rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
- if (rc != 0)
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
+ if (rc)
return rc;
- rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
- if (rc != 0)
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+ if (rc)
return rc;
*pp_prod = (u8 __iomem *)p_hwfn->regview +
@@ -622,9 +629,8 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
(u32 *)(&init_prod_val));
/* Allocate a CID for the queue */
- p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_rx_cid->cid);
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return rc;
@@ -634,14 +640,13 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
opaque_fid,
p_rx_cid->cid,
- params,
+ p_params,
abs_stats_id,
bd_max_bytes,
bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size);
+ cqe_pbl_addr, cqe_pbl_size, false);
- if (rc != 0)
+ if (rc)
qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
return rc;
@@ -788,21 +793,20 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_ramrod = &p_ent->ramrod.tx_queue_start;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = abs_vport_id;
- p_ramrod->sb_id = cpu_to_le16(p_params->sb);
- p_ramrod->sb_index = p_params->sb_idx;
- p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->stats_counter_id = stats_id;
- p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
- p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+ p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
+
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
- pq_id = qed_get_qm_pq(p_hwfn,
- PROTOCOLID_ETH,
- p_pq_params);
- p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+ pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -836,8 +840,7 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
memset(&pq_params, 0, sizeof(pq_params));
/* Allocate a CID for the queue */
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_tx_cid->cid);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return rc;
@@ -896,8 +899,7 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
}
-static enum eth_filter_action
-qed_filter_action(enum qed_filter_opcode opcode)
+static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
{
enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
@@ -1033,19 +1035,19 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
- p_second_filter->type = p_first_filter->type;
- p_second_filter->mac_msb = p_first_filter->mac_msb;
- p_second_filter->mac_mid = p_first_filter->mac_mid;
- p_second_filter->mac_lsb = p_first_filter->mac_lsb;
- p_second_filter->vlan_id = p_first_filter->vlan_id;
- p_second_filter->vni = p_first_filter->vni;
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
p_first_filter->vport_id = vport_to_remove_from;
- p_second_filter->action = ETH_FILTER_ACTION_ADD;
- p_second_filter->vport_id = vport_to_add_to;
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
p_first_filter->vport_id = vport_to_add_to;
memcpy(p_second_filter, p_first_filter,
@@ -1086,7 +1088,7 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
&p_ramrod, &p_ent,
comp_mode, p_comp_data);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
return rc;
}
@@ -1094,10 +1096,8 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
p_header->assert_on_error = p_filter_cmd->assert_on_error;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc != 0) {
- DP_ERR(p_hwfn,
- "Unicast filter ADD command failed %d\n",
- rc);
+ if (rc) {
+ DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
return rc;
}
@@ -1136,15 +1136,10 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
* Return:
******************************************************************************/
static u32 qed_calc_crc32c(u8 *crc32_packet,
- u32 crc32_length,
- u32 crc32_seed,
- u8 complement)
+ u32 crc32_length, u32 crc32_seed, u8 complement)
{
- u32 byte = 0;
- u32 bit = 0;
- u8 msb = 0;
- u8 current_byte = 0;
- u32 crc32_result = crc32_seed;
+ u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+ u8 msb = 0, current_byte = 0;
if ((!crc32_packet) ||
(crc32_length == 0) ||
@@ -1164,9 +1159,7 @@ static u32 qed_calc_crc32c(u8 *crc32_packet,
return crc32_result;
}
-static inline u32 qed_crc32c_le(u32 seed,
- u8 *mac,
- u32 len)
+static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
{
u32 packet_buf[2] = { 0 };
@@ -1196,17 +1189,14 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
u8 abs_vport_id = 0;
int rc, i;
- if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ if (p_filter_cmd->opcode == QED_FILTER_ADD)
rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
&abs_vport_id);
- if (rc)
- return rc;
- } else {
+ else
rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
&abs_vport_id);
- if (rc)
- return rc;
- }
+ if (rc)
+ return rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -1244,11 +1234,11 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ struct vport_update_ramrod_mcast *p_ramrod_bins;
u32 *p_bins = (u32 *)bins;
- struct vport_update_ramrod_mcast *approx_mcast;
- approx_mcast = &p_ramrod->approx_mcast;
- approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+ p_ramrod_bins = &p_ramrod->approx_mcast;
+ p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
}
}
@@ -1286,8 +1276,7 @@ static int qed_filter_mcast_cmd(struct qed_dev *cdev,
rc = qed_sp_eth_filter_mcast(p_hwfn,
opaque_fid,
p_filter_cmd,
- comp_mode,
- p_comp_data);
+ comp_mode, p_comp_data);
}
return rc;
}
@@ -1314,9 +1303,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
rc = qed_sp_eth_filter_ucast(p_hwfn,
opaque_fid,
p_filter_cmd,
- comp_mode,
- p_comp_data);
- if (rc != 0)
+ comp_mode, p_comp_data);
+ if (rc)
break;
}
@@ -1590,8 +1578,7 @@ out:
}
}
-void qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
{
u32 i;
@@ -1698,6 +1685,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
&info->num_vlan_filters);
qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+
+ info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
}
qed_fill_dev_info(cdev, &info->common);
@@ -1766,8 +1755,7 @@ static int qed_start_vport(struct qed_dev *cdev,
return 0;
}
-static int qed_stop_vport(struct qed_dev *cdev,
- u8 vport_id)
+static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
{
int rc, i;
@@ -1775,8 +1763,7 @@ static int qed_stop_vport(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
rc = qed_sp_vport_stop(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- vport_id);
+ p_hwfn->hw_info.opaque_fid, vport_id);
if (rc) {
DP_ERR(cdev, "Failed to stop VPORT\n");
@@ -1801,10 +1788,8 @@ static int qed_update_vport(struct qed_dev *cdev,
/* Translate protocol params into sp params */
sp_params.vport_id = params->vport_id;
- sp_params.update_vport_active_rx_flg =
- params->update_vport_active_flg;
- sp_params.update_vport_active_tx_flg =
- params->update_vport_active_flg;
+ sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
+ sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
@@ -1817,8 +1802,7 @@ static int qed_update_vport(struct qed_dev *cdev,
* We need to re-fix the rss values per engine for CMT.
*/
if (cdev->num_hwfns > 1 && params->update_rss_flg) {
- struct qed_update_vport_rss_params *rss =
- &params->rss_params;
+ struct qed_update_vport_rss_params *rss = &params->rss_params;
int k, max = 0;
/* Find largest entry, since it's possible RSS needs to
@@ -1861,8 +1845,8 @@ static int qed_update_vport(struct qed_dev *cdev,
QED_RSS_IND_TABLE_SIZE * sizeof(u16));
memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
QED_RSS_KEY_SIZE * sizeof(u32));
+ sp_params.rss_params = &sp_rss_params;
}
- sp_params.rss_params = &sp_rss_params;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -1893,8 +1877,8 @@ static int qed_start_rxq(struct qed_dev *cdev,
u16 cqe_pbl_size,
void __iomem **pp_prod)
{
- int rc, hwfn_index;
struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
hwfn_index = params->rss_id % cdev->num_hwfns;
p_hwfn = &cdev->hwfns[hwfn_index];
@@ -1935,8 +1919,7 @@ static int qed_stop_rxq(struct qed_dev *cdev,
rc = qed_sp_eth_rx_queue_stop(p_hwfn,
params->rx_queue_id / cdev->num_hwfns,
- params->eq_completion_only,
- false);
+ params->eq_completion_only, false);
if (rc) {
DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
return rc;
@@ -2047,11 +2030,11 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
memset(&accept_flags, 0, sizeof(accept_flags));
- accept_flags.update_rx_mode_config = 1;
- accept_flags.update_tx_mode_config = 1;
- accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
- QED_ACCEPT_MCAST_MATCHED |
- QED_ACCEPT_BCAST;
+ accept_flags.update_rx_mode_config = 1;
+ accept_flags.update_tx_mode_config = 1;
+ accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
QED_ACCEPT_MCAST_MATCHED |
QED_ACCEPT_BCAST;
@@ -2072,9 +2055,8 @@ static int qed_configure_filter_ucast(struct qed_dev *cdev,
struct qed_filter_ucast ucast;
if (!params->vlan_valid && !params->mac_valid) {
- DP_NOTICE(
- cdev,
- "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+ DP_NOTICE(cdev,
+ "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
return -EINVAL;
}
@@ -2135,8 +2117,7 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev,
for (i = 0; i < mcast.num_mc_addrs; i++)
ether_addr_copy(mcast.mac[i], params->mac[i]);
- return qed_filter_mcast_cmd(cdev, &mcast,
- QED_SPQ_MODE_CB, NULL);
+ return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
}
static int qed_configure_filter(struct qed_dev *cdev,
@@ -2153,15 +2134,13 @@ static int qed_configure_filter(struct qed_dev *cdev,
accept_flags = params->filter.accept_flags;
return qed_configure_filter_rx_mode(cdev, accept_flags);
default:
- DP_NOTICE(cdev, "Unknown filter type %d\n",
- (int)params->type);
+ DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
return -EINVAL;
}
}
static int qed_fp_cqe_completion(struct qed_dev *dev,
- u8 rss_id,
- struct eth_slow_path_rx_cqe *cqe)
+ u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
{
return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
cqe);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 002114543451..e495d62fcc03 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -102,6 +102,8 @@ struct qed_sp_vport_start_params {
u16 opaque_fid;
u8 vport_id;
u16 mtu;
+ bool check_mac;
+ bool check_ethtype;
};
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
@@ -213,6 +215,8 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
+
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
@@ -223,7 +227,8 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, bool b_use_zone_a_prod);
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
new file mode 100644
index 000000000000..a6db10717d5c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -0,0 +1,1792 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <net/ipv6.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
+#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
+
+#define QED_LL2_TX_SIZE (256)
+#define QED_LL2_RX_SIZE (4096)
+
+struct qed_cb_ll2_info {
+ int rx_cnt;
+ u32 rx_size;
+ u8 handle;
+ bool frags_mapped;
+
+ /* Lock protecting LL2 buffer lists in sleepless context */
+ spinlock_t lock;
+ struct list_head list;
+
+ const struct qed_ll2_cb_ops *cbs;
+ void *cb_cookie;
+};
+
+struct qed_ll2_buffer {
+ struct list_head list;
+ void *data;
+ dma_addr_t phys_addr;
+};
+
+static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct sk_buff *skb = cookie;
+
+ /* All we need to do is release the mapping */
+ dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
+ cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
+ b_last_fragment);
+
+ if (cdev->ll2->frags_mapped)
+ /* Case where mapped frags were received, need to
+ * free skb with nr_frags marked as 0
+ */
+ skb_shinfo(skb)->nr_frags = 0;
+
+ dev_kfree_skb_any(skb);
+}
+
+static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
+ u8 **data, dma_addr_t *phys_addr)
+{
+ *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
+ if (!(*data)) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
+ return -ENOMEM;
+ }
+
+ *phys_addr = dma_map_single(&cdev->pdev->dev,
+ ((*data) + NET_SKB_PAD),
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
+ DP_INFO(cdev, "Failed to map LL2 buffer data\n");
+ kfree((*data));
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
+ struct qed_ll2_buffer *buffer)
+{
+ spin_lock_bh(&cdev->ll2->lock);
+
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ kfree(buffer->data);
+ list_del(&buffer->list);
+
+ cdev->ll2->rx_cnt--;
+ if (!cdev->ll2->rx_cnt)
+ DP_INFO(cdev, "All LL2 entries were removed\n");
+
+ spin_unlock_bh(&cdev->ll2->lock);
+
+ return 0;
+}
+
+static void qed_ll2_kill_buffers(struct qed_dev *cdev)
+{
+ struct qed_ll2_buffer *buffer, *tmp_buffer;
+
+ list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
+ qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ struct qed_ll2_rx_packet *p_pkt,
+ struct core_rx_fast_path_cqe *p_cqe,
+ bool b_last_packet)
+{
+ u16 packet_length = le16_to_cpu(p_cqe->packet_length);
+ struct qed_ll2_buffer *buffer = p_pkt->cookie;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u16 vlan = le16_to_cpu(p_cqe->vlan);
+ u32 opaque_data_0, opaque_data_1;
+ u8 pad = p_cqe->placement_offset;
+ dma_addr_t new_phys_addr;
+ struct sk_buff *skb;
+ bool reuse = false;
+ int rc = -EINVAL;
+ u8 *new_data;
+
+ opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
+ opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
+ "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
+ (u64)p_pkt->rx_buf_addr, pad, packet_length,
+ le16_to_cpu(p_cqe->parse_flags.flags), vlan,
+ opaque_data_0, opaque_data_1);
+
+ if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buffer->data, packet_length, false);
+ }
+
+ /* Determine if data is valid */
+ if (packet_length < ETH_HLEN)
+ reuse = true;
+
+ /* Allocate a replacement for buffer; Reuse upon failure */
+ if (!reuse)
+ rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
+ &new_phys_addr);
+
+ /* If need to reuse or there's no replacement buffer, repost this */
+ if (rc)
+ goto out_post;
+
+ skb = build_skb(buffer->data, 0);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto out_post;
+ }
+
+ pad += NET_SKB_PAD;
+ skb_reserve(skb, pad);
+ skb_put(skb, packet_length);
+ skb_checksum_none_assert(skb);
+
+ /* Get parital ethernet information instead of eth_type_trans(),
+ * Since we don't have an associated net_device.
+ */
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ /* Pass SKB onward */
+ if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
+ if (vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+ cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
+ opaque_data_0, opaque_data_1);
+ }
+
+ /* Update Buffer information and update FW producer */
+ buffer->data = new_data;
+ buffer->phys_addr = new_phys_addr;
+
+out_post:
+ rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
+
+ if (rc)
+ qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ bool b_lock,
+ bool b_only_active)
+{
+ struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
+
+ if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
+ return NULL;
+
+ if (!p_hwfn->p_ll2_info)
+ return NULL;
+
+ p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+ if (b_only_active) {
+ if (b_lock)
+ mutex_lock(&p_ll2_conn->mutex);
+ if (p_ll2_conn->b_active)
+ p_ret = p_ll2_conn;
+ if (b_lock)
+ mutex_unlock(&p_ll2_conn->mutex);
+ } else {
+ p_ret = p_ll2_conn;
+ }
+
+ return p_ret;
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
+ *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
+}
+
+static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ bool b_last_packet = false, b_last_frag = false;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_tx_queue *p_tx;
+ dma_addr_t tx_frag;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ p_tx = &p_ll2_conn->tx_queue;
+
+ while (!list_empty(&p_tx->active_descq)) {
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ b_last_packet = list_empty(&p_tx->active_descq);
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+ p_tx->cur_completing_packet = *p_pkt;
+ p_tx->cur_completing_bd_idx = 1;
+ b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+ tx_frag = p_pkt->bds_set[0].tx_frag;
+ if (p_ll2_conn->gsi_enable)
+ qed_ll2b_release_tx_gsi_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
+ else
+ qed_ll2b_complete_tx_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
+
+ }
+}
+
+static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = p_cookie;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
+ struct qed_ll2_tx_packet *p_pkt;
+ bool b_last_frag = false;
+ unsigned long flags;
+ dma_addr_t tx_frag;
+ int rc = -EINVAL;
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ if (p_tx->b_completing_packet) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+ num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+ while (num_bds) {
+ if (list_empty(&p_tx->active_descq))
+ goto out;
+
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ goto out;
+
+ p_tx->b_completing_packet = true;
+ p_tx->cur_completing_packet = *p_pkt;
+ num_bds_in_packet = p_pkt->bd_used;
+ list_del(&p_pkt->list_entry);
+
+ if (num_bds < num_bds_in_packet) {
+ DP_NOTICE(p_hwfn,
+ "Rest of BDs does not cover whole packet\n");
+ goto out;
+ }
+
+ num_bds -= num_bds_in_packet;
+ p_tx->bds_idx += num_bds_in_packet;
+ while (num_bds_in_packet--)
+ qed_chain_consume(&p_tx->txq_chain);
+
+ p_tx->cur_completing_bd_idx = 1;
+ b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ tx_frag = p_pkt->bds_set[0].tx_frag;
+ if (p_ll2_conn->gsi_enable)
+ qed_ll2b_complete_tx_gsi_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag, !num_bds);
+ else
+ qed_ll2b_complete_tx_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag, !num_bds);
+ spin_lock_irqsave(&p_tx->lock, flags);
+ }
+
+ p_tx->b_completing_packet = false;
+ rc = 0;
+out:
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ return rc;
+}
+
+static int
+qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long lock_flags, bool b_last_cqe)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ u16 packet_length, parse_flags, vlan;
+ u32 src_mac_addrhi;
+ u16 src_mac_addrlo;
+
+ if (!list_empty(&p_rx->active_descq))
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt) {
+ DP_NOTICE(p_hwfn,
+ "GSI Rx completion but active_descq is empty\n");
+ return -EIO;
+ }
+
+ list_del(&p_pkt->list_entry);
+ parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
+ packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
+ vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
+ src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
+ src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
+ if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ DP_NOTICE(p_hwfn,
+ "Mismatch between active_descq and the LL2 Rx chain\n");
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+ qed_ll2b_complete_rx_gsi_packet(p_hwfn,
+ p_ll2_info->my_id,
+ p_pkt->cookie,
+ p_pkt->rx_buf_addr,
+ packet_length,
+ p_cqe->rx_cqe_gsi.data_length_error,
+ parse_flags,
+ vlan,
+ src_mac_addrhi,
+ src_mac_addrlo, b_last_cqe);
+ spin_lock_irqsave(&p_rx->lock, lock_flags);
+
+ return 0;
+}
+
+static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long lock_flags,
+ bool b_last_cqe)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+
+ if (!list_empty(&p_rx->active_descq))
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt) {
+ DP_NOTICE(p_hwfn,
+ "LL2 Rx completion but active_descq is empty\n");
+ return -EIO;
+ }
+ list_del(&p_pkt->list_entry);
+
+ if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ DP_NOTICE(p_hwfn,
+ "Mismatch between active_descq and the LL2 Rx chain\n");
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+ qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
+ p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
+ spin_lock_irqsave(&p_rx->lock, lock_flags);
+
+ return 0;
+}
+
+static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = cookie;
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ union core_rx_cqe_union *cqe = NULL;
+ u16 cq_new_idx = 0, cq_old_idx = 0;
+ unsigned long flags = 0;
+ int rc = 0;
+
+ spin_lock_irqsave(&p_rx->lock, flags);
+ cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+ while (cq_new_idx != cq_old_idx) {
+ bool b_last_cqe = (cq_new_idx == cq_old_idx);
+
+ cqe = qed_chain_consume(&p_rx->rcq_chain);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
+ cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
+
+ switch (cqe->rx_cqe_sp.type) {
+ case CORE_RX_CQE_TYPE_SLOW_PATH:
+ DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
+ rc = -EINVAL;
+ break;
+ case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
+ rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
+ cqe, flags, b_last_cqe);
+ break;
+ case CORE_RX_CQE_TYPE_REGULAR:
+ rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
+ cqe, flags, b_last_cqe);
+ break;
+ default:
+ rc = -EIO;
+ }
+ }
+
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return rc;
+}
+
+void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ struct qed_ll2_rx_queue *p_rx;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ p_rx = &p_ll2_conn->rx_queue;
+
+ while (!list_empty(&p_rx->active_descq)) {
+ dma_addr_t rx_buf_addr;
+ void *cookie;
+ bool b_last;
+
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ rx_buf_addr = p_pkt->rx_buf_addr;
+ cookie = p_pkt->cookie;
+
+ b_last = list_empty(&p_rx->active_descq);
+ }
+}
+
+static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ u8 action_on_error)
+{
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct core_rx_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 cqe_pbl_size;
+ int rc = 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_rx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+ p_ramrod->sb_index = p_rx->rx_sb_index;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ DMA_REGPAIR_LE(p_ramrod->bd_base,
+ p_rx->rxq_chain.p_phys_addr);
+ cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
+ qed_chain_get_pbl_phys(&p_rx->rcq_chain));
+
+ p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
+ p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
+ p_ramrod->queue_id = p_ll2_conn->queue_id;
+ p_ramrod->main_func_queue = 1;
+
+ if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
+ p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
+ p_ramrod->mf_si_bcast_accept_all = 1;
+ p_ramrod->mf_si_mcast_accept_all = 1;
+ } else {
+ p_ramrod->mf_si_bcast_accept_all = 0;
+ p_ramrod->mf_si_mcast_accept_all = 0;
+ }
+
+ p_ramrod->action_on_error.error_type = action_on_error;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct core_tx_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params pq_params;
+ u16 pq_id = 0, pbl_size;
+ int rc = -EINVAL;
+
+ if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_tx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+ p_ramrod->sb_index = p_tx->tx_sb_index;
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ p_ll2_conn->tx_stats_en = 1;
+ p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
+ p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
+
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
+ qed_chain_get_pbl_phys(&p_tx->txq_chain));
+ pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = p_ll2_conn->tx_tc;
+ pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+
+ switch (conn_type) {
+ case QED_LL2_TYPE_ISCSI:
+ case QED_LL2_TYPE_ISCSI_OOO:
+ p_ramrod->conn_type = PROTOCOLID_ISCSI;
+ break;
+ case QED_LL2_TYPE_ROCE:
+ p_ramrod->conn_type = PROTOCOLID_ROCE;
+ break;
+ default:
+ p_ramrod->conn_type = PROTOCOLID_ETH;
+ DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
+ }
+
+ p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct core_rx_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
+
+ p_ramrod->complete_event_flg = 1;
+ p_ramrod->queue_id = p_ll2_conn->queue_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
+{
+ struct qed_ll2_rx_packet *p_descq;
+ u32 capacity;
+ int rc = 0;
+
+ if (!rx_num_desc)
+ goto out;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_NEXT_PTR,
+ QED_CHAIN_CNT_TYPE_U16,
+ rx_num_desc,
+ sizeof(struct core_rx_bd),
+ &p_ll2_info->rx_queue.rxq_chain);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
+ goto out;
+ }
+
+ capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
+ p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
+ GFP_KERNEL);
+ if (!p_descq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
+ goto out;
+ }
+ p_ll2_info->rx_queue.descq_array = p_descq;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ rx_num_desc,
+ sizeof(struct core_rx_fast_path_cqe),
+ &p_ll2_info->rx_queue.rcq_chain);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
+ p_ll2_info->conn_type, rx_num_desc);
+
+out:
+ return rc;
+}
+
+static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info,
+ u16 tx_num_desc)
+{
+ struct qed_ll2_tx_packet *p_descq;
+ u32 capacity;
+ int rc = 0;
+
+ if (!tx_num_desc)
+ goto out;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ tx_num_desc,
+ sizeof(struct core_tx_bd),
+ &p_ll2_info->tx_queue.txq_chain);
+ if (rc)
+ goto out;
+
+ capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
+ p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
+ GFP_KERNEL);
+ if (!p_descq) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ p_ll2_info->tx_queue.descq_array = p_descq;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
+ p_ll2_info->conn_type, tx_num_desc);
+
+out:
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
+ tx_num_desc);
+ return rc;
+}
+
+int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_params,
+ u16 rx_num_desc,
+ u16 tx_num_desc,
+ u8 *p_connection_handle)
+{
+ qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
+ struct qed_ll2_info *p_ll2_info = NULL;
+ int rc;
+ u8 i;
+
+ if (!p_connection_handle || !p_hwfn->p_ll2_info)
+ return -EINVAL;
+
+ /* Find a free connection to be used */
+ for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
+ mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
+ if (p_hwfn->p_ll2_info[i].b_active) {
+ mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+ continue;
+ }
+
+ p_hwfn->p_ll2_info[i].b_active = true;
+ p_ll2_info = &p_hwfn->p_ll2_info[i];
+ mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+ break;
+ }
+ if (!p_ll2_info)
+ return -EBUSY;
+
+ p_ll2_info->conn_type = p_params->conn_type;
+ p_ll2_info->mtu = p_params->mtu;
+ p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
+ p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
+ p_ll2_info->tx_tc = p_params->tx_tc;
+ p_ll2_info->tx_dest = p_params->tx_dest;
+ p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
+ p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
+ p_ll2_info->gsi_enable = p_params->gsi_enable;
+
+ rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
+ if (rc)
+ goto q_allocate_fail;
+
+ rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
+ if (rc)
+ goto q_allocate_fail;
+
+ /* Register callbacks for the Rx/Tx queues */
+ comp_rx_cb = qed_ll2_rxq_completion;
+ comp_tx_cb = qed_ll2_txq_completion;
+
+ if (rx_num_desc) {
+ qed_int_register_cb(p_hwfn, comp_rx_cb,
+ &p_hwfn->p_ll2_info[i],
+ &p_ll2_info->rx_queue.rx_sb_index,
+ &p_ll2_info->rx_queue.p_fw_cons);
+ p_ll2_info->rx_queue.b_cb_registred = true;
+ }
+
+ if (tx_num_desc) {
+ qed_int_register_cb(p_hwfn,
+ comp_tx_cb,
+ &p_hwfn->p_ll2_info[i],
+ &p_ll2_info->tx_queue.tx_sb_index,
+ &p_ll2_info->tx_queue.p_fw_cons);
+ p_ll2_info->tx_queue.b_cb_registred = true;
+ }
+
+ *p_connection_handle = i;
+ return rc;
+
+q_allocate_fail:
+ qed_ll2_release_connection(p_hwfn, i);
+ return -ENOMEM;
+}
+
+static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ u8 action_on_error = 0;
+
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
+ return 0;
+
+ DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
+
+ SET_FIELD(action_on_error,
+ CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
+ p_ll2_conn->ai_err_packet_too_big);
+ SET_FIELD(action_on_error,
+ CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
+
+ return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+}
+
+int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_rx_queue *p_rx;
+ struct qed_ll2_tx_queue *p_tx;
+ int rc = -EINVAL;
+ u32 i, capacity;
+ u8 qid;
+
+ p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_rx = &p_ll2_conn->rx_queue;
+ p_tx = &p_ll2_conn->tx_queue;
+
+ qed_chain_reset(&p_rx->rxq_chain);
+ qed_chain_reset(&p_rx->rcq_chain);
+ INIT_LIST_HEAD(&p_rx->active_descq);
+ INIT_LIST_HEAD(&p_rx->free_descq);
+ INIT_LIST_HEAD(&p_rx->posting_descq);
+ spin_lock_init(&p_rx->lock);
+ capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
+ for (i = 0; i < capacity; i++)
+ list_add_tail(&p_rx->descq_array[i].list_entry,
+ &p_rx->free_descq);
+ *p_rx->p_fw_cons = 0;
+
+ qed_chain_reset(&p_tx->txq_chain);
+ INIT_LIST_HEAD(&p_tx->active_descq);
+ INIT_LIST_HEAD(&p_tx->free_descq);
+ INIT_LIST_HEAD(&p_tx->sending_descq);
+ spin_lock_init(&p_tx->lock);
+ capacity = qed_chain_get_capacity(&p_tx->txq_chain);
+ for (i = 0; i < capacity; i++)
+ list_add_tail(&p_tx->descq_array[i].list_entry,
+ &p_tx->free_descq);
+ p_tx->cur_completing_bd_idx = 0;
+ p_tx->bds_idx = 0;
+ p_tx->b_completing_packet = false;
+ p_tx->cur_send_packet = NULL;
+ p_tx->cur_send_frag_num = 0;
+ p_tx->cur_completing_frag_num = 0;
+ *p_tx->p_fw_cons = 0;
+
+ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+
+ qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
+ p_ll2_conn->queue_id = qid;
+ p_ll2_conn->tx_stats_id = qid;
+ p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_LL2_RX_PRODS_OFFSET(qid);
+ p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(p_ll2_conn->cid,
+ DQ_DEMS_LEGACY);
+
+ rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+
+ rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
+
+ return rc;
+}
+
+static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_rx_queue *p_rx,
+ struct qed_ll2_rx_packet *p_curp)
+{
+ struct qed_ll2_rx_packet *p_posting_packet = NULL;
+ struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
+ bool b_notify_fw = false;
+ u16 bd_prod, cq_prod;
+
+ /* This handles the flushing of already posted buffers */
+ while (!list_empty(&p_rx->posting_descq)) {
+ p_posting_packet = list_first_entry(&p_rx->posting_descq,
+ struct qed_ll2_rx_packet,
+ list_entry);
+ list_del(&p_posting_packet->list_entry);
+ list_add_tail(&p_posting_packet->list_entry,
+ &p_rx->active_descq);
+ b_notify_fw = true;
+ }
+
+ /* This handles the supplied packet [if there is one] */
+ if (p_curp) {
+ list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
+ b_notify_fw = true;
+ }
+
+ if (!b_notify_fw)
+ return;
+
+ bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
+ cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
+ rx_prod.bd_prod = cpu_to_le16(bd_prod);
+ rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+ DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+}
+
+int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr,
+ u16 buf_len, void *cookie, u8 notify_fw)
+{
+ struct core_rx_bd_with_buff_len *p_curb = NULL;
+ struct qed_ll2_rx_packet *p_curp = NULL;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_rx_queue *p_rx;
+ unsigned long flags;
+ void *p_data;
+ int rc = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_rx = &p_ll2_conn->rx_queue;
+
+ spin_lock_irqsave(&p_rx->lock, flags);
+ if (!list_empty(&p_rx->free_descq))
+ p_curp = list_first_entry(&p_rx->free_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (p_curp) {
+ if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
+ qed_chain_get_elem_left(&p_rx->rcq_chain)) {
+ p_data = qed_chain_produce(&p_rx->rxq_chain);
+ p_curb = (struct core_rx_bd_with_buff_len *)p_data;
+ qed_chain_produce(&p_rx->rcq_chain);
+ }
+ }
+
+ /* If we're lacking entires, let's try to flush buffers to FW */
+ if (!p_curp || !p_curb) {
+ rc = -EBUSY;
+ p_curp = NULL;
+ goto out_notify;
+ }
+
+ /* We have an Rx packet we can fill */
+ DMA_REGPAIR_LE(p_curb->addr, addr);
+ p_curb->buff_length = cpu_to_le16(buf_len);
+ p_curp->rx_buf_addr = addr;
+ p_curp->cookie = cookie;
+ p_curp->rxq_bd = p_curb;
+ p_curp->buf_length = buf_len;
+ list_del(&p_curp->list_entry);
+
+ /* Check if we only want to enqueue this packet without informing FW */
+ if (!notify_fw) {
+ list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
+ goto out;
+ }
+
+out_notify:
+ qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
+out:
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return rc;
+}
+
+static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_tx_queue *p_tx,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *p_cookie,
+ u8 notify_fw)
+{
+ list_del(&p_curp->list_entry);
+ p_curp->cookie = p_cookie;
+ p_curp->bd_used = num_of_bds;
+ p_curp->notify_fw = notify_fw;
+ p_tx->cur_send_packet = p_curp;
+ p_tx->cur_send_frag_num = 0;
+
+ p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
+ p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
+ p_tx->cur_send_frag_num++;
+}
+
+static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ enum core_tx_dest tx_dest,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum core_roce_flavor_type type,
+ dma_addr_t first_frag,
+ u16 first_frag_len)
+{
+ struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
+ u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
+ struct core_tx_bd *start_bd = NULL;
+ u16 frag_idx;
+
+ start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+ start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
+ SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
+ cpu_to_le16(l4_hdr_offset_w));
+ SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
+ start_bd->bd_flags.as_bitfield = bd_flags;
+ start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
+ CORE_TX_BD_FLAGS_START_BD_SHIFT;
+ SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
+ DMA_REGPAIR_LE(start_bd->addr, first_frag);
+ start_bd->nbytes = cpu_to_le16(first_frag_len);
+
+ SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
+ type);
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+ "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
+ p_ll2->queue_id,
+ p_ll2->cid,
+ p_ll2->conn_type,
+ prod_idx,
+ first_frag_len,
+ num_of_bds,
+ le32_to_cpu(start_bd->addr.hi),
+ le32_to_cpu(start_bd->addr.lo));
+
+ if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
+ return;
+
+ /* Need to provide the packet with additional BDs for frags */
+ for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
+ frag_idx < num_of_bds; frag_idx++) {
+ struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
+
+ *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+ (*p_bd)->bd_flags.as_bitfield = 0;
+ (*p_bd)->bitfield1 = 0;
+ (*p_bd)->bitfield0 = 0;
+ p_curp->bds_set[frag_idx].tx_frag = 0;
+ p_curp->bds_set[frag_idx].frag_len = 0;
+ }
+}
+
+/* This should be called while the Txq spinlock is being held */
+static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct core_db_data db_msg = { 0, 0, 0 };
+ u16 bd_prod;
+
+ /* If there are missing BDs, don't do anything now */
+ if (p_ll2_conn->tx_queue.cur_send_frag_num !=
+ p_ll2_conn->tx_queue.cur_send_packet->bd_used)
+ return;
+
+ /* Push the current packet to the list and clean after it */
+ list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
+ &p_ll2_conn->tx_queue.sending_descq);
+ p_ll2_conn->tx_queue.cur_send_packet = NULL;
+ p_ll2_conn->tx_queue.cur_send_frag_num = 0;
+
+ /* Notify FW of packet only if requested to */
+ if (!b_notify)
+ return;
+
+ bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
+
+ while (!list_empty(&p_tx->sending_descq)) {
+ p_pkt = list_first_entry(&p_tx->sending_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
+ }
+
+ SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_TX_BD_PROD_CMD);
+ db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+ db_msg.spq_prod = cpu_to_le16(bd_prod);
+
+ /* Make sure the BDs data is updated before ringing the doorbell */
+ wmb();
+
+ DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+ "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
+ p_ll2_conn->queue_id,
+ p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
+}
+
+int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ u8 num_of_bds,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum qed_ll2_roce_flavor_type qed_roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *cookie, u8 notify_fw)
+{
+ struct qed_ll2_tx_packet *p_curp = NULL;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ enum core_roce_flavor_type roce_flavor;
+ struct qed_ll2_tx_queue *p_tx;
+ struct qed_chain *p_tx_chain;
+ unsigned long flags;
+ int rc = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_tx = &p_ll2_conn->tx_queue;
+ p_tx_chain = &p_tx->txq_chain;
+
+ if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
+ return -EIO;
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ if (p_tx->cur_send_packet) {
+ rc = -EEXIST;
+ goto out;
+ }
+
+ /* Get entry, but only if we have tx elements for it */
+ if (!list_empty(&p_tx->free_descq))
+ p_curp = list_first_entry(&p_tx->free_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
+ p_curp = NULL;
+
+ if (!p_curp) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if (qed_roce_flavor == QED_LL2_ROCE) {
+ roce_flavor = CORE_ROCE;
+ } else if (qed_roce_flavor == QED_LL2_RROCE) {
+ roce_flavor = CORE_RROCE;
+ } else {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Prepare packet and BD, and perhaps send a doorbell to FW */
+ qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
+ num_of_bds, first_frag,
+ first_frag_len, cookie, notify_fw);
+ qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
+ num_of_bds, CORE_TX_DEST_NW,
+ vlan, bd_flags, l4_hdr_offset_w,
+ roce_flavor,
+ first_frag, first_frag_len);
+
+ qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+
+out:
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ return rc;
+}
+
+int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr, u16 nbytes)
+{
+ struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ u16 cur_send_frag_num = 0;
+ struct core_tx_bd *p_bd;
+ unsigned long flags;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+
+ if (!p_ll2_conn->tx_queue.cur_send_packet)
+ return -EINVAL;
+
+ p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
+ cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
+
+ if (cur_send_frag_num >= p_cur_send_packet->bd_used)
+ return -EINVAL;
+
+ /* Fill the BD information, and possibly notify FW */
+ p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
+ DMA_REGPAIR_LE(p_bd->addr, addr);
+ p_bd->nbytes = cpu_to_le16(nbytes);
+ p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
+ p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
+
+ p_ll2_conn->tx_queue.cur_send_frag_num++;
+
+ spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
+ qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+ spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
+
+ return 0;
+}
+
+int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ int rc = -EINVAL;
+
+ p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+
+ /* Stop Tx & Rx of connection, if needed */
+ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+ qed_ll2_txq_flush(p_hwfn, connection_handle);
+ }
+
+ if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+ qed_ll2_rxq_flush(p_hwfn, connection_handle);
+ }
+
+ return rc;
+}
+
+void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->rx_queue.b_cb_registred = false;
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
+ }
+
+ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->tx_queue.b_cb_registred = false;
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
+ }
+
+ kfree(p_ll2_conn->tx_queue.descq_array);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
+
+ kfree(p_ll2_conn->rx_queue.descq_array);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
+
+ qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
+
+ mutex_lock(&p_ll2_conn->mutex);
+ p_ll2_conn->b_active = false;
+ mutex_unlock(&p_ll2_conn->mutex);
+}
+
+struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ll2_info *p_ll2_connections;
+ u8 i;
+
+ /* Allocate LL2's set struct */
+ p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
+ sizeof(struct qed_ll2_info), GFP_KERNEL);
+ if (!p_ll2_connections) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
+ return NULL;
+ }
+
+ for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+ p_ll2_connections[i].my_id = i;
+
+ return p_ll2_connections;
+}
+
+void qed_ll2_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections)
+{
+ int i;
+
+ for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+ mutex_init(&p_ll2_connections[i].mutex);
+}
+
+void qed_ll2_free(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections)
+{
+ kfree(p_ll2_connections);
+}
+
+static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_tstorm_per_queue_stat tstats;
+ u8 qid = p_ll2_conn->queue_id;
+ u32 tstats_addr;
+
+ memset(&tstats, 0, sizeof(tstats));
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+ p_stats->packet_too_big_discard =
+ HILO_64_REGPAIR(tstats.packet_too_big_discard);
+ p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
+}
+
+static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_ustorm_per_queue_stat ustats;
+ u8 qid = p_ll2_conn->queue_id;
+ u32 ustats_addr;
+
+ memset(&ustats, 0, sizeof(ustats));
+ ustats_addr = BAR0_MAP_REG_USDM_RAM +
+ CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
+
+ p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_pstorm_per_queue_stat pstats;
+ u8 stats_id = p_ll2_conn->tx_stats_id;
+ u32 pstats_addr;
+
+ memset(&pstats, 0, sizeof(pstats));
+ pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+ CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+ p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+}
+
+int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+ u8 connection_handle, struct qed_ll2_stats *p_stats)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ptt *p_ptt;
+
+ memset(p_stats, 0, sizeof(*p_stats));
+
+ if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
+ !p_hwfn->p_ll2_info)
+ return -EINVAL;
+
+ p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ return -EINVAL;
+ }
+
+ _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+ _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+ if (p_ll2_conn->tx_stats_en)
+ _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ return 0;
+}
+
+static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
+ const struct qed_ll2_cb_ops *ops,
+ void *cookie)
+{
+ cdev->ll2->cbs = ops;
+ cdev->ll2->cb_cookie = cookie;
+}
+
+static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+{
+ struct qed_ll2_info ll2_info;
+ struct qed_ll2_buffer *buffer;
+ enum qed_ll2_conn_type conn_type;
+ struct qed_ptt *p_ptt;
+ int rc, i;
+
+ /* Initialize LL2 locks & lists */
+ INIT_LIST_HEAD(&cdev->ll2->list);
+ spin_lock_init(&cdev->ll2->lock);
+ cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
+ L1_CACHE_BYTES + params->mtu;
+ cdev->ll2->frags_mapped = params->frags_mapped;
+
+ /*Allocate memory for LL2 */
+ DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
+ cdev->ll2->rx_size);
+ for (i = 0; i < QED_LL2_RX_SIZE; i++) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
+ goto fail;
+ }
+
+ rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
+ &buffer->phys_addr);
+ if (rc) {
+ kfree(buffer);
+ goto fail;
+ }
+
+ list_add_tail(&buffer->list, &cdev->ll2->list);
+ }
+
+ switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ conn_type = QED_LL2_TYPE_ISCSI;
+ break;
+ case QED_PCI_ETH_ROCE:
+ conn_type = QED_LL2_TYPE_ROCE;
+ break;
+ default:
+ conn_type = QED_LL2_TYPE_TEST;
+ }
+
+ /* Prepare the temporary ll2 information */
+ memset(&ll2_info, 0, sizeof(ll2_info));
+ ll2_info.conn_type = conn_type;
+ ll2_info.mtu = params->mtu;
+ ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+ ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
+ ll2_info.tx_tc = 0;
+ ll2_info.tx_dest = CORE_TX_DEST_NW;
+ ll2_info.gsi_enable = 1;
+
+ rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
+ QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
+ &cdev->ll2->handle);
+ if (rc) {
+ DP_INFO(cdev, "Failed to acquire LL2 connection\n");
+ goto fail;
+ }
+
+ rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle);
+ if (rc) {
+ DP_INFO(cdev, "Failed to establish LL2 connection\n");
+ goto release_fail;
+ }
+
+ /* Post all Rx buffers to FW */
+ spin_lock_bh(&cdev->ll2->lock);
+ list_for_each_entry(buffer, &cdev->ll2->list, list) {
+ rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
+ if (rc) {
+ DP_INFO(cdev,
+ "Failed to post an Rx buffer; Deleting it\n");
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ kfree(buffer->data);
+ list_del(&buffer->list);
+ kfree(buffer);
+ } else {
+ cdev->ll2->rx_cnt++;
+ }
+ }
+ spin_unlock_bh(&cdev->ll2->lock);
+
+ if (!cdev->ll2->rx_cnt) {
+ DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
+ goto release_terminate;
+ }
+
+ if (!is_valid_ether_addr(params->ll2_mac_address)) {
+ DP_INFO(cdev, "Invalid Ethernet address\n");
+ goto release_terminate;
+ }
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_INFO(cdev, "Failed to acquire PTT\n");
+ goto release_terminate;
+ }
+
+ rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ params->ll2_mac_address);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ if (rc) {
+ DP_ERR(cdev, "Failed to allocate LLH filter\n");
+ goto release_terminate_all;
+ }
+
+ ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
+
+ return 0;
+
+release_terminate_all:
+
+release_terminate:
+ qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+release_fail:
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+fail:
+ qed_ll2_kill_buffers(cdev);
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+ return -EINVAL;
+}
+
+static int qed_ll2_stop(struct qed_dev *cdev)
+{
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
+ return 0;
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_INFO(cdev, "Failed to acquire PTT\n");
+ goto fail;
+ }
+
+ qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ cdev->ll2_mac_address);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ eth_zero_addr(cdev->ll2_mac_address);
+
+ rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle);
+ if (rc)
+ DP_INFO(cdev, "Failed to terminate LL2 connection\n");
+
+ qed_ll2_kill_buffers(cdev);
+
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+ return rc;
+fail:
+ return -EINVAL;
+}
+
+static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
+{
+ const skb_frag_t *frag;
+ int rc = -EINVAL, i;
+ dma_addr_t mapping;
+ u16 vlan = 0;
+ u8 flags = 0;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
+ DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
+ return -EINVAL;
+ }
+
+ if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+ DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
+ 1 + skb_shinfo(skb)->nr_frags);
+ return -EINVAL;
+ }
+
+ mapping = dma_map_single(&cdev->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
+ DP_NOTICE(cdev, "SKB mapping failed\n");
+ return -EINVAL;
+ }
+
+ /* Request HW to calculate IP csum */
+ if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
+ ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+
+ if (skb_vlan_tag_present(skb)) {
+ vlan = skb_vlan_tag_get(skb);
+ flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
+ }
+
+ rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ 1 + skb_shinfo(skb)->nr_frags,
+ vlan, flags, 0, 0 /* RoCE FLAVOR */,
+ mapping, skb->len, skb, 1);
+ if (rc)
+ goto err;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ if (!cdev->ll2->frags_mapped) {
+ mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev,
+ mapping))) {
+ DP_NOTICE(cdev,
+ "Unable to map frag - dropping packet\n");
+ goto err;
+ }
+ } else {
+ mapping = page_to_phys(skb_frag_page(frag)) |
+ frag->page_offset;
+ }
+
+ rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ mapping,
+ skb_frag_size(frag));
+
+ /* if failed not much to do here, partial packet has been posted
+ * we can't free memory, will need to wait for completion.
+ */
+ if (rc)
+ goto err2;
+ }
+
+ return 0;
+
+err:
+ dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
+
+err2:
+ return rc;
+}
+
+static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+ if (!cdev->ll2)
+ return -EINVAL;
+
+ return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle, stats);
+}
+
+const struct qed_ll2_ops qed_ll2_ops_pass = {
+ .start = &qed_ll2_start,
+ .stop = &qed_ll2_stop,
+ .start_xmit = &qed_ll2_start_xmit,
+ .register_cb_ops = &qed_ll2_register_cb_ops,
+ .get_stats = &qed_ll2_stats,
+};
+
+int qed_ll2_alloc_if(struct qed_dev *cdev)
+{
+ cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
+ return cdev->ll2 ? 0 : -ENOMEM;
+}
+
+void qed_ll2_dealloc_if(struct qed_dev *cdev)
+{
+ kfree(cdev->ll2);
+ cdev->ll2 = NULL;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
new file mode 100644
index 000000000000..80a5dc2d652d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -0,0 +1,316 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_LL2_H
+#define _QED_LL2_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+
+#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
+
+enum qed_ll2_roce_flavor_type {
+ QED_LL2_ROCE,
+ QED_LL2_RROCE,
+ MAX_QED_LL2_ROCE_FLAVOR_TYPE
+};
+
+enum qed_ll2_conn_type {
+ QED_LL2_TYPE_RESERVED,
+ QED_LL2_TYPE_ISCSI,
+ QED_LL2_TYPE_TEST,
+ QED_LL2_TYPE_ISCSI_OOO,
+ QED_LL2_TYPE_RESERVED2,
+ QED_LL2_TYPE_ROCE,
+ QED_LL2_TYPE_RESERVED3,
+ MAX_QED_LL2_RX_CONN_TYPE
+};
+
+struct qed_ll2_rx_packet {
+ struct list_head list_entry;
+ struct core_rx_bd_with_buff_len *rxq_bd;
+ dma_addr_t rx_buf_addr;
+ u16 buf_length;
+ void *cookie;
+ u8 placement_offset;
+ u16 parse_flags;
+ u16 packet_length;
+ u16 vlan;
+ u32 opaque_data[2];
+};
+
+struct qed_ll2_tx_packet {
+ struct list_head list_entry;
+ u16 bd_used;
+ u16 vlan;
+ u16 l4_hdr_offset_w;
+ u8 bd_flags;
+ bool notify_fw;
+ void *cookie;
+
+ struct {
+ struct core_tx_bd *txq_bd;
+ dma_addr_t tx_frag;
+ u16 frag_len;
+ } bds_set[ETH_TX_MAX_BDS_PER_NON_LSO_PACKET];
+};
+
+struct qed_ll2_rx_queue {
+ /* Lock protecting the Rx queue manipulation */
+ spinlock_t lock;
+ struct qed_chain rxq_chain;
+ struct qed_chain rcq_chain;
+ u8 rx_sb_index;
+ bool b_cb_registred;
+ __le16 *p_fw_cons;
+ struct list_head active_descq;
+ struct list_head free_descq;
+ struct list_head posting_descq;
+ struct qed_ll2_rx_packet *descq_array;
+ void __iomem *set_prod_addr;
+};
+
+struct qed_ll2_tx_queue {
+ /* Lock protecting the Tx queue manipulation */
+ spinlock_t lock;
+ struct qed_chain txq_chain;
+ u8 tx_sb_index;
+ bool b_cb_registred;
+ __le16 *p_fw_cons;
+ struct list_head active_descq;
+ struct list_head free_descq;
+ struct list_head sending_descq;
+ struct qed_ll2_tx_packet *descq_array;
+ struct qed_ll2_tx_packet *cur_send_packet;
+ struct qed_ll2_tx_packet cur_completing_packet;
+ u16 cur_completing_bd_idx;
+ void __iomem *doorbell_addr;
+ u16 bds_idx;
+ u16 cur_send_frag_num;
+ u16 cur_completing_frag_num;
+ bool b_completing_packet;
+};
+
+struct qed_ll2_info {
+ /* Lock protecting the state of LL2 */
+ struct mutex mutex;
+ enum qed_ll2_conn_type conn_type;
+ u32 cid;
+ u8 my_id;
+ u8 queue_id;
+ u8 tx_stats_id;
+ bool b_active;
+ u16 mtu;
+ u8 rx_drop_ttl0_flg;
+ u8 rx_vlan_removal_en;
+ u8 tx_tc;
+ enum core_tx_dest tx_dest;
+ enum core_error_handle ai_err_packet_too_big;
+ enum core_error_handle ai_err_no_buf;
+ u8 tx_stats_en;
+ struct qed_ll2_rx_queue rx_queue;
+ struct qed_ll2_tx_queue tx_queue;
+ u8 gsi_enable;
+};
+
+/**
+ * @brief qed_ll2_acquire_connection - allocate resources,
+ * starts rx & tx (if relevant) queues pair. Provides
+ * connecion handler as output parameter.
+ *
+ * @param p_hwfn
+ * @param p_params Contain various configuration properties
+ * @param rx_num_desc
+ * @param tx_num_desc
+ *
+ * @param p_connection_handle Output container for LL2 connection's handle
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_params,
+ u16 rx_num_desc,
+ u16 tx_num_desc,
+ u8 *p_connection_handle);
+
+/**
+ * @brief qed_ll2_establish_connection - start previously
+ * allocated LL2 queues pair
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param addr rx (physical address) buffers to submit
+ * @param cookie
+ * @param notify_fw produce corresponding Rx BD immediately
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr,
+ u16 buf_len, void *cookie, u8 notify_fw);
+
+/**
+ * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
+ * to prepare Tx packet submission to FW.
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param num_of_bds a number of requested BD equals a number of
+ * fragments in Tx packet
+ * @param vlan VLAN to insert to packet (if insertion set)
+ * @param bd_flags
+ * @param l4_hdr_offset_w L4 Header Offset from start of packet
+ * (in words). This is needed if both l4_csum
+ * and ipv6_ext are set
+ * @param first_frag
+ * @param first_frag_len
+ * @param cookie
+ *
+ * @param notify_fw
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ u8 num_of_bds,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum qed_ll2_roce_flavor_type qed_roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *cookie, u8 notify_fw);
+
+/**
+ * @brief qed_ll2_release_connection - releases resources
+ * allocated for LL2 connection
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ */
+void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
+ * Tx BD of BDs requested by
+ * qed_ll2_prepare_tx_packet
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle
+ * obtained from
+ * qed_ll2_require_connection
+ * @param addr
+ * @param nbytes
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr, u16 nbytes);
+
+/**
+ * @brief qed_ll2_terminate_connection - stops Tx/Rx queues
+ *
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle
+ * obtained from
+ * qed_ll2_require_connection
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_get_stats - get LL2 queue's statistics
+ *
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param p_stats
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+ u8 connection_handle, struct qed_ll2_stats *p_stats);
+
+/**
+ * @brief qed_ll2_alloc - Allocates LL2 connections set
+ *
+ * @param p_hwfn
+ *
+ * @return pointer to alocated qed_ll2_info or NULL
+ */
+struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ll2_setup - Inits LL2 connections set
+ *
+ * @param p_hwfn
+ * @param p_ll2_connections
+ *
+ */
+void qed_ll2_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections);
+
+/**
+ * @brief qed_ll2_free - Releases LL2 connections set
+ *
+ * @param p_hwfn
+ * @param p_ll2_connections
+ *
+ */
+void qed_ll2_free(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections);
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet);
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c7dc34bfdd0a..4ee3151e80c2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -22,15 +22,22 @@
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_ll2_if.h>
#include "qed.h"
#include "qed_sriov.h"
#include "qed_sp.h"
#include "qed_dev_api.h"
+#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_hw.h"
#include "qed_selftest.h"
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#define QED_ROCE_QPS (8192)
+#define QED_ROCE_DPIS (8)
+#endif
+
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -51,8 +58,6 @@ MODULE_FIRMWARE(QED_FW_FILE_NAME);
static int __init qed_init(void)
{
- pr_notice("qed_init called\n");
-
pr_info("%s", version);
return 0;
@@ -106,8 +111,7 @@ static void qed_free_pci(struct qed_dev *cdev)
/* Performs PCI initializations as well as initializing PCI-related parameters
* in the device structrue. Returns 0 in case of success.
*/
-static int qed_init_pci(struct qed_dev *cdev,
- struct pci_dev *pdev)
+static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
{
u8 rev_id;
int rc;
@@ -207,8 +211,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
- dev_info->rdma_supported =
- (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE);
+ dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
+ QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
@@ -263,8 +267,7 @@ static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
}
/* Sets the requested power state */
-static int qed_set_power_state(struct qed_dev *cdev,
- pci_power_t state)
+static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
{
if (!cdev)
return -ENODEV;
@@ -366,8 +369,8 @@ static int qed_enable_msix(struct qed_dev *cdev,
DP_NOTICE(cdev,
"Trying to enable MSI-X with less vectors (%d out of %d)\n",
cnt, int_params->in.num_vectors);
- rc = pci_enable_msix_exact(cdev->pdev,
- int_params->msix_table, cnt);
+ rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
+ cnt);
if (!rc)
rc = cnt;
}
@@ -439,6 +442,11 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
}
out:
+ if (!rc)
+ DP_INFO(cdev, "Using %s interrupts\n",
+ int_params->out.int_mode == QED_INT_MODE_INTA ?
+ "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
+ "MSI" : "MSIX");
cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
return rc;
@@ -514,19 +522,18 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
{
struct qed_dev *cdev = hwfn->cdev;
+ u32 int_mode;
int rc = 0;
u8 id;
- if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int_mode = cdev->int_params.out.int_mode;
+ if (int_mode == QED_INT_MODE_MSIX) {
id = hwfn->my_id;
snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
id, cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
rc = request_irq(cdev->int_params.msix_table[id].vector,
qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
- if (!rc)
- DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
- "Requested slowpath MSI-X\n");
} else {
unsigned long flags = 0;
@@ -541,6 +548,13 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
flags, cdev->name, cdev);
}
+ if (rc)
+ DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
+ else
+ DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
+ "Requested slowpath %s\n",
+ (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
+
return rc;
}
@@ -581,6 +595,8 @@ static int qed_nic_stop(struct qed_dev *cdev)
}
}
+ qed_dbg_pf_exit(cdev);
+
return rc;
}
@@ -599,7 +615,16 @@ static int qed_nic_reset(struct qed_dev *cdev)
static int qed_nic_setup(struct qed_dev *cdev)
{
- int rc;
+ int rc, i;
+
+ /* Determine if interface is going to require LL2 */
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->using_ll2 = true;
+ }
+ }
rc = qed_resc_alloc(cdev);
if (rc)
@@ -657,6 +682,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
struct qed_sb_cnt_info sb_cnt_info;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ int num_l2_queues;
+#endif
int rc;
int i;
@@ -687,6 +715,31 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ num_l2_queues = 0;
+ for_each_hwfn(cdev, i)
+ num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA,
+ "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
+ cdev->int_params.fp_msix_cnt, num_l2_queues);
+
+ if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
+ cdev->int_params.rdma_msix_cnt =
+ (cdev->int_params.fp_msix_cnt - num_l2_queues)
+ / cdev->num_hwfns;
+ cdev->int_params.rdma_msix_base =
+ cdev->int_params.fp_msix_base + num_l2_queues;
+ cdev->int_params.fp_msix_cnt = num_l2_queues;
+ } else {
+ cdev->int_params.rdma_msix_cnt = 0;
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
+ cdev->int_params.rdma_msix_cnt,
+ cdev->int_params.rdma_msix_base);
+#endif
+
return 0;
}
@@ -790,6 +843,13 @@ static void qed_update_pf_params(struct qed_dev *cdev,
{
int i;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+ params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ /* divide by 3 the MRs to avoid MF ILT overflow */
+ params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
+ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
+#endif
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -834,13 +894,13 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (IS_PF(cdev)) {
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(cdev);
- if (rc) {
- DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+ if (rc)
goto err2;
- }
/* First Dword used to diffrentiate between various sources */
data = cdev->firmware->data + sizeof(u32);
+
+ qed_dbg_pf_init(cdev);
}
memset(&tunn_info, 0, sizeof(tunn_info));
@@ -864,6 +924,12 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
+ /* Allocate LL2 interface if needed */
+ if (QED_LEADING_HWFN(cdev)->using_ll2) {
+ rc = qed_ll2_alloc_if(cdev);
+ if (rc)
+ goto err3;
+ }
if (IS_PF(cdev)) {
hwfn = QED_LEADING_HWFN(cdev);
drv_version.version = (params->drv_major << 24) |
@@ -884,6 +950,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
return 0;
+err3:
+ qed_hw_stop(cdev);
err2:
qed_hw_timers_stop_all(cdev);
if (IS_PF(cdev))
@@ -906,6 +974,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
+ qed_ll2_dealloc_if(cdev);
+
if (IS_PF(cdev)) {
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
@@ -974,8 +1044,7 @@ static u32 qed_sb_init(struct qed_dev *cdev,
}
static u32 qed_sb_release(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- u16 sb_id)
+ struct qed_sb_info *sb_info, u16 sb_id)
{
struct qed_hwfn *p_hwfn;
int hwfn_index;
@@ -1025,20 +1094,23 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
link_params->speed.autoneg = params->autoneg;
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
link_params->speed.advertised_speeds = 0;
- if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
- (params->adv_speeds & SUPPORTED_1000baseT_Full))
+ if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
+ (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
- if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
- if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
- if (params->adv_speeds & 0)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
- if (params->adv_speeds & 0)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+ if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
}
@@ -1168,50 +1240,56 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->link_up = true;
/* TODO - at the moment assume supported and advertised speed equal */
- if_link->supported_caps = SUPPORTED_FIBRE;
+ if_link->supported_caps = QED_LM_FIBRE_BIT;
if (params.speed.autoneg)
- if_link->supported_caps |= SUPPORTED_Autoneg;
+ if_link->supported_caps |= QED_LM_Autoneg_BIT;
if (params.pause.autoneg ||
(params.pause.forced_rx && params.pause.forced_tx))
- if_link->supported_caps |= SUPPORTED_Asym_Pause;
+ if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
if (params.pause.autoneg || params.pause.forced_rx ||
params.pause.forced_tx)
- if_link->supported_caps |= SUPPORTED_Pause;
+ if_link->supported_caps |= QED_LM_Pause_BIT;
if_link->advertised_caps = if_link->supported_caps;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full;
+ if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
+ QED_LM_1000baseT_Full_BIT;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
+ if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->advertised_caps |= 0;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->advertised_caps |= 0;
+ if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->supported_caps |= SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full;
+ if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
+ QED_LM_1000baseT_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
+ if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->supported_caps |= 0;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->supported_caps |= 0;
+ if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link.link_up)
if_link->speed = link.speed;
@@ -1231,33 +1309,29 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
/* Link partner capabilities */
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_1G_HD)
- if_link->lp_caps |= SUPPORTED_1000baseT_Half;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_1G_FD)
- if_link->lp_caps |= SUPPORTED_1000baseT_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_10G)
- if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_40G)
- if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_50G)
- if_link->lp_caps |= 0;
- if (link.partner_adv_speed &
- QED_LINK_PARTNER_SPEED_100G)
- if_link->lp_caps |= 0;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
+ if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
+ if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
+ if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
+ if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
+ if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
+ if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
+ if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
if (link.an_complete)
- if_link->lp_caps |= SUPPORTED_Autoneg;
+ if_link->lp_caps |= QED_LM_Autoneg_BIT;
if (link.partner_adv_pause)
- if_link->lp_caps |= SUPPORTED_Pause;
+ if_link->lp_caps |= QED_LM_Pause_BIT;
if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
- if_link->lp_caps |= SUPPORTED_Asym_Pause;
+ if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
}
static void qed_get_current_link(struct qed_dev *cdev,
@@ -1385,9 +1459,32 @@ const struct qed_common_ops qed_common_ops_pass = {
.get_link = &qed_get_current_link,
.drain = &qed_drain,
.update_msglvl = &qed_init_dp,
+ .dbg_all_data = &qed_dbg_all_data,
+ .dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
};
+
+void qed_get_protocol_stats(struct qed_dev *cdev,
+ enum qed_mcp_protocol_type type,
+ union qed_mcp_protocol_stats *stats)
+{
+ struct qed_eth_stats eth_stats;
+
+ memset(stats, 0, sizeof(*stats));
+
+ switch (type) {
+ case QED_MCP_LAN_STATS:
+ qed_get_vport_stats(cdev, &eth_stats);
+ stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
+ stats->lan_stats.fcs_err = -1;
+ break;
+ default:
+ DP_ERR(cdev, "Invalid protocol type = %d\n", type);
+ return;
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index f776a77794c5..bdc9ba92f6d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -54,8 +54,7 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
return true;
}
-void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_PORT);
@@ -68,8 +67,7 @@ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
}
-void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
u32 tmp, i;
@@ -99,8 +97,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
return 0;
}
-static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
u32 drv_mb_offsize, mfw_mb_offsize;
@@ -143,8 +140,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info;
u32 size;
@@ -165,9 +161,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
- p_info->mfw_mb_shadow =
- kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
- p_info->mfw_mb_length), GFP_KERNEL);
+ p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
@@ -177,7 +171,6 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
return 0;
err:
- DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
qed_mcp_free(p_hwfn);
return -ENOMEM;
}
@@ -189,8 +182,7 @@ err:
* access is achieved by setting a blocking flag, which will fail other
* competing contexts to send their mailboxes.
*/
-static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
- u32 cmd)
+static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
{
spin_lock_bh(&p_hwfn->mcp_info->lock);
@@ -221,15 +213,13 @@ static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
return 0;
}
-static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn,
- u32 cmd)
+static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
{
if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
spin_unlock_bh(&p_hwfn->mcp_info->lock);
}
-int qed_mcp_reset(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
u8 delay = CHIP_MCP_RESP_ITER_US;
@@ -326,7 +316,8 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
} else {
/* FW BUG! */
- DP_ERR(p_hwfn, "MFW failed to respond!\n");
+ DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
+ cmd, param);
*o_mcp_resp = 0;
rc = -EAGAIN;
}
@@ -342,7 +333,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
}
@@ -398,9 +389,36 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
+{
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.p_data_dst = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ *o_txn_size = *o_mcp_param;
+ memcpy(o_buf, &union_data.raw_data, *o_txn_size);
+
+ return 0;
+}
+
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *p_load_code)
+ struct qed_ptt *p_ptt, u32 *p_load_code)
{
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_mcp_mb_params mb_params;
@@ -527,8 +545,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
"Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
transceiver_state,
(u32)(p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port,
- transceiver_data)));
+ offsetof(struct public_port, transceiver_data)));
transceiver_state = GET_FIELD(transceiver_state,
ETH_TRANSCEIVER_STATE);
@@ -540,8 +557,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
}
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool b_reset)
+ struct qed_ptt *p_ptt, bool b_reset)
{
struct qed_mcp_link_state *p_link;
u8 max_bw, min_bw;
@@ -557,8 +573,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
"Received link update [0x%08x] from mfw [Addr 0x%x]\n",
status,
(u32)(p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port,
- link_status)));
+ offsetof(struct public_port, link_status)));
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link indications\n");
@@ -635,6 +650,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
QED_LINK_PARTNER_SPEED_20G : 0;
p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_25G : 0;
+ p_link->partner_adv_speed |=
(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
QED_LINK_PARTNER_SPEED_40G : 0;
p_link->partner_adv_speed |=
@@ -722,6 +740,48 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
return 0;
}
+static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum MFW_DRV_MSG_TYPE type)
+{
+ enum qed_mcp_protocol_type stats_type;
+ union qed_mcp_protocol_stats stats;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ u32 hsi_param;
+
+ switch (type) {
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ stats_type = QED_MCP_LAN_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
+ break;
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ stats_type = QED_MCP_FCOE_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
+ break;
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ stats_type = QED_MCP_ISCSI_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
+ break;
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ stats_type = QED_MCP_RDMA_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
+ return;
+ }
+
+ qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_STATS;
+ mb_params.param = hsi_param;
+ memcpy(&union_data, &stats, sizeof(stats));
+ mb_params.p_data_src = &union_data;
+ qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
struct public_func *p_shmem_info)
{
@@ -752,8 +812,7 @@ static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct public_func *p_data,
- int pfid)
+ struct public_func *p_data, int pfid)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_FUNC);
@@ -763,51 +822,20 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
memset(p_data, 0, sizeof(*p_data));
- size = min_t(u32, sizeof(*p_data),
- QED_SECTION_SIZE(mfw_path_offsize));
+ size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
for (i = 0; i < size / sizeof(u32); i++)
((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
func_addr + (i << 2));
return size;
}
-int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_pf)
-{
- struct public_func shmem_info;
- int i;
-
- /* Find first Ethernet interface in port */
- for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
- i += p_hwfn->cdev->num_ports_in_engines) {
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID_BY_REL(p_hwfn, i));
-
- if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
- continue;
-
- if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
- FUNC_MF_CFG_PROTOCOL_ETHERNET) {
- *p_pf = (u8)i;
- return 0;
- }
- }
-
- DP_NOTICE(p_hwfn,
- "Failed to find on port an ethernet interface in MF_SI mode\n");
-
- return -EINVAL;
-}
-
-static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_function_info *p_info;
struct public_func shmem_info;
u32 resp = 0, param = 0;
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID(p_hwfn));
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
qed_read_pf_bandwidth(p_hwfn, &shmem_info);
@@ -867,6 +895,12 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
+ break;
case MFW_DRV_MSG_BW_UPDATE:
qed_mcp_update_bw(p_hwfn, p_ptt);
break;
@@ -940,8 +974,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_get_media_type(struct qed_dev *cdev,
- u32 *p_media_type)
+int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
{
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
@@ -950,7 +983,7 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
}
@@ -1003,15 +1036,13 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
struct qed_mcp_function_info *info;
struct public_func shmem_info;
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
- MCP_PF_ID(p_hwfn));
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
info = &p_hwfn->mcp_info->func_info;
info->pause_on_host = (shmem_info.config &
FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
- if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
- &info->protocol)) {
+ if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
DP_ERR(p_hwfn, "Unknown personality %08x\n",
(u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
return -EINVAL;
@@ -1072,15 +1103,13 @@ struct qed_mcp_link_capabilities
return &p_hwfn->mcp_info->link_capabilities;
}
-int qed_mcp_drain(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 resp = 0, param = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NIG_DRAIN, 1000,
- &resp, &param);
+ DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
/* Wait for the drain to complete before returning */
msleep(1020);
@@ -1089,8 +1118,7 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
}
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *p_flash_size)
+ struct qed_ptt *p_ptt, u32 *p_flash_size)
{
u32 flash_size;
@@ -1168,8 +1196,35 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_led_mode mode)
+int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
+ &param);
+ if (rc)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 value, cpu_mode;
+
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
+
+ value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
+ cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+
+ return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+}
+
+int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_led_mode mode)
{
u32 resp = 0, param = 0, drv_mb_param;
int rc;
@@ -1195,6 +1250,27 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
+int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 mask_parities)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
+ mask_parities, &resp, &param);
+
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "MCP response failure for mask parities, aborting\n");
+ } else if (resp != FW_MSG_CODE_OK) {
+ DP_ERR(p_hwfn,
+ "MCP did not acknowledge mask parity request. Old MFW?\n");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 drv_mb_param = 0, rsp, param;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 7f319aa1b229..dff520ed069b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -60,9 +60,10 @@ struct qed_mcp_link_state {
#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
#define QED_LINK_PARTNER_SPEED_10G BIT(2)
#define QED_LINK_PARTNER_SPEED_20G BIT(3)
-#define QED_LINK_PARTNER_SPEED_40G BIT(4)
-#define QED_LINK_PARTNER_SPEED_50G BIT(5)
-#define QED_LINK_PARTNER_SPEED_100G BIT(6)
+#define QED_LINK_PARTNER_SPEED_25G BIT(4)
+#define QED_LINK_PARTNER_SPEED_40G BIT(5)
+#define QED_LINK_PARTNER_SPEED_50G BIT(6)
+#define QED_LINK_PARTNER_SPEED_100G BIT(7)
u32 partner_adv_speed;
bool partner_tx_flow_ctrl_en;
@@ -105,6 +106,47 @@ struct qed_mcp_drv_version {
u8 name[MCP_DRV_VER_STR_SIZE - 4];
};
+struct qed_mcp_lan_stats {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+};
+
+struct qed_mcp_fcoe_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct qed_mcp_iscsi_stats {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct qed_mcp_rdma_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_byts;
+};
+
+enum qed_mcp_protocol_type {
+ QED_MCP_LAN_STATS,
+ QED_MCP_FCOE_STATS,
+ QED_MCP_ISCSI_STATS,
+ QED_MCP_RDMA_STATS
+};
+
+union qed_mcp_protocol_stats {
+ struct qed_mcp_lan_stats lan_stats;
+ struct qed_mcp_fcoe_stats fcoe_stats;
+ struct qed_mcp_iscsi_stats iscsi_stats;
+ struct qed_mcp_rdma_stats rdma_stats;
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -426,6 +468,29 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief - Sends an NVM read command request to the MFW to get
+ * a buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param o_txn_size - Buffer size output
+ * @param o_buf - Pointer to the buffer returned by the MFW.
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
+
+/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
@@ -447,6 +512,26 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num);
+/**
+ * @brief - Halt the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Wake up the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
@@ -458,6 +543,7 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
u8 min_bw);
-int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_pf);
+int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 mask_parities);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index f6b86ca1ff79..b414a0542177 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -116,8 +116,14 @@
0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
+#define TCFC_REG_WEAK_ENABLE_VF \
+ 0x2d0704UL
#define TCFC_REG_STRONG_ENABLE_PF \
0x2d0708UL
+#define TCFC_REG_STRONG_ENABLE_VF \
+ 0x2d070cUL
+#define CCFC_REG_WEAK_ENABLE_VF \
+ 0x2e0704UL
#define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL
#define PGLUE_B_REG_PGL_ADDR_88_F0 \
@@ -202,6 +208,26 @@
0x50196cUL
#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
0x501964UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE \
+ 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \
+ 32
+#define NIG_REG_LLH_FUNC_FILTER_EN \
+ 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE \
+ 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE \
+ 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL \
+ 0x501b40UL
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_SIZE \
+ 16
#define NCSI_REG_CONFIG \
0x040200UL
#define PBF_REG_INIT \
@@ -258,6 +284,8 @@
0x1f0a1cUL
#define PRS_REG_ROCE_DEST_QP_MAX_PF \
0x1f0430UL
+#define PRS_REG_USE_LIGHT_L2 \
+ 0x1f096cUL
#define PSDM_REG_ENABLE_IN1 \
0xfa0004UL
#define PSEM_REG_ENABLE_IN \
@@ -521,4 +549,910 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
+
+#define PGLCS_REG_DBG_SELECT \
+ 0x001d14UL
+#define PGLCS_REG_DBG_DWORD_ENABLE \
+ 0x001d18UL
+#define PGLCS_REG_DBG_SHIFT \
+ 0x001d1cUL
+#define PGLCS_REG_DBG_FORCE_VALID \
+ 0x001d20UL
+#define PGLCS_REG_DBG_FORCE_FRAME \
+ 0x001d24UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
+ 0x008070UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_2 \
+ 0x008080UL
+#define MISC_REG_RESET_PL_PDA_VAUX \
+ 0x008090UL
+#define MISCS_REG_RESET_PL_UA \
+ 0x009050UL
+#define MISCS_REG_RESET_PL_HV \
+ 0x009060UL
+#define MISCS_REG_RESET_PL_HV_2 \
+ 0x009150UL
+#define DMAE_REG_DBG_SELECT \
+ 0x00c510UL
+#define DMAE_REG_DBG_DWORD_ENABLE \
+ 0x00c514UL
+#define DMAE_REG_DBG_SHIFT \
+ 0x00c518UL
+#define DMAE_REG_DBG_FORCE_VALID \
+ 0x00c51cUL
+#define DMAE_REG_DBG_FORCE_FRAME \
+ 0x00c520UL
+#define NCSI_REG_DBG_SELECT \
+ 0x040474UL
+#define NCSI_REG_DBG_DWORD_ENABLE \
+ 0x040478UL
+#define NCSI_REG_DBG_SHIFT \
+ 0x04047cUL
+#define NCSI_REG_DBG_FORCE_VALID \
+ 0x040480UL
+#define NCSI_REG_DBG_FORCE_FRAME \
+ 0x040484UL
+#define GRC_REG_DBG_SELECT \
+ 0x0500a4UL
+#define GRC_REG_DBG_DWORD_ENABLE \
+ 0x0500a8UL
+#define GRC_REG_DBG_SHIFT \
+ 0x0500acUL
+#define GRC_REG_DBG_FORCE_VALID \
+ 0x0500b0UL
+#define GRC_REG_DBG_FORCE_FRAME \
+ 0x0500b4UL
+#define UMAC_REG_DBG_SELECT \
+ 0x051094UL
+#define UMAC_REG_DBG_DWORD_ENABLE \
+ 0x051098UL
+#define UMAC_REG_DBG_SHIFT \
+ 0x05109cUL
+#define UMAC_REG_DBG_FORCE_VALID \
+ 0x0510a0UL
+#define UMAC_REG_DBG_FORCE_FRAME \
+ 0x0510a4UL
+#define MCP2_REG_DBG_SELECT \
+ 0x052400UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MCP2_REG_DBG_SHIFT \
+ 0x052408UL
+#define MCP2_REG_DBG_FORCE_VALID \
+ 0x052440UL
+#define MCP2_REG_DBG_FORCE_FRAME \
+ 0x052444UL
+#define PCIE_REG_DBG_SELECT \
+ 0x0547e8UL
+#define PCIE_REG_DBG_DWORD_ENABLE \
+ 0x0547ecUL
+#define PCIE_REG_DBG_SHIFT \
+ 0x0547f0UL
+#define PCIE_REG_DBG_FORCE_VALID \
+ 0x0547f4UL
+#define PCIE_REG_DBG_FORCE_FRAME \
+ 0x0547f8UL
+#define DORQ_REG_DBG_SELECT \
+ 0x100ad0UL
+#define DORQ_REG_DBG_DWORD_ENABLE \
+ 0x100ad4UL
+#define DORQ_REG_DBG_SHIFT \
+ 0x100ad8UL
+#define DORQ_REG_DBG_FORCE_VALID \
+ 0x100adcUL
+#define DORQ_REG_DBG_FORCE_FRAME \
+ 0x100ae0UL
+#define IGU_REG_DBG_SELECT \
+ 0x181578UL
+#define IGU_REG_DBG_DWORD_ENABLE \
+ 0x18157cUL
+#define IGU_REG_DBG_SHIFT \
+ 0x181580UL
+#define IGU_REG_DBG_FORCE_VALID \
+ 0x181584UL
+#define IGU_REG_DBG_FORCE_FRAME \
+ 0x181588UL
+#define CAU_REG_DBG_SELECT \
+ 0x1c0ea8UL
+#define CAU_REG_DBG_DWORD_ENABLE \
+ 0x1c0eacUL
+#define CAU_REG_DBG_SHIFT \
+ 0x1c0eb0UL
+#define CAU_REG_DBG_FORCE_VALID \
+ 0x1c0eb4UL
+#define CAU_REG_DBG_FORCE_FRAME \
+ 0x1c0eb8UL
+#define PRS_REG_DBG_SELECT \
+ 0x1f0b6cUL
+#define PRS_REG_DBG_DWORD_ENABLE \
+ 0x1f0b70UL
+#define PRS_REG_DBG_SHIFT \
+ 0x1f0b74UL
+#define PRS_REG_DBG_FORCE_VALID \
+ 0x1f0ba0UL
+#define PRS_REG_DBG_FORCE_FRAME \
+ 0x1f0ba4UL
+#define CNIG_REG_DBG_SELECT_K2 \
+ 0x218254UL
+#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
+ 0x218258UL
+#define CNIG_REG_DBG_SHIFT_K2 \
+ 0x21825cUL
+#define CNIG_REG_DBG_FORCE_VALID_K2 \
+ 0x218260UL
+#define CNIG_REG_DBG_FORCE_FRAME_K2 \
+ 0x218264UL
+#define PRM_REG_DBG_SELECT \
+ 0x2306a8UL
+#define PRM_REG_DBG_DWORD_ENABLE \
+ 0x2306acUL
+#define PRM_REG_DBG_SHIFT \
+ 0x2306b0UL
+#define PRM_REG_DBG_FORCE_VALID \
+ 0x2306b4UL
+#define PRM_REG_DBG_FORCE_FRAME \
+ 0x2306b8UL
+#define SRC_REG_DBG_SELECT \
+ 0x238700UL
+#define SRC_REG_DBG_DWORD_ENABLE \
+ 0x238704UL
+#define SRC_REG_DBG_SHIFT \
+ 0x238708UL
+#define SRC_REG_DBG_FORCE_VALID \
+ 0x23870cUL
+#define SRC_REG_DBG_FORCE_FRAME \
+ 0x238710UL
+#define RSS_REG_DBG_SELECT \
+ 0x238c4cUL
+#define RSS_REG_DBG_DWORD_ENABLE \
+ 0x238c50UL
+#define RSS_REG_DBG_SHIFT \
+ 0x238c54UL
+#define RSS_REG_DBG_FORCE_VALID \
+ 0x238c58UL
+#define RSS_REG_DBG_FORCE_FRAME \
+ 0x238c5cUL
+#define RPB_REG_DBG_SELECT \
+ 0x23c728UL
+#define RPB_REG_DBG_DWORD_ENABLE \
+ 0x23c72cUL
+#define RPB_REG_DBG_SHIFT \
+ 0x23c730UL
+#define RPB_REG_DBG_FORCE_VALID \
+ 0x23c734UL
+#define RPB_REG_DBG_FORCE_FRAME \
+ 0x23c738UL
+#define PSWRQ2_REG_DBG_SELECT \
+ 0x240100UL
+#define PSWRQ2_REG_DBG_DWORD_ENABLE \
+ 0x240104UL
+#define PSWRQ2_REG_DBG_SHIFT \
+ 0x240108UL
+#define PSWRQ2_REG_DBG_FORCE_VALID \
+ 0x24010cUL
+#define PSWRQ2_REG_DBG_FORCE_FRAME \
+ 0x240110UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ_REG_DBG_DWORD_ENABLE \
+ 0x280024UL
+#define PSWRQ_REG_DBG_SHIFT \
+ 0x280028UL
+#define PSWRQ_REG_DBG_FORCE_VALID \
+ 0x28002cUL
+#define PSWRQ_REG_DBG_FORCE_FRAME \
+ 0x280030UL
+#define PSWWR_REG_DBG_SELECT \
+ 0x29a084UL
+#define PSWWR_REG_DBG_DWORD_ENABLE \
+ 0x29a088UL
+#define PSWWR_REG_DBG_SHIFT \
+ 0x29a08cUL
+#define PSWWR_REG_DBG_FORCE_VALID \
+ 0x29a090UL
+#define PSWWR_REG_DBG_FORCE_FRAME \
+ 0x29a094UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD_REG_DBG_DWORD_ENABLE \
+ 0x29c044UL
+#define PSWRD_REG_DBG_SHIFT \
+ 0x29c048UL
+#define PSWRD_REG_DBG_FORCE_VALID \
+ 0x29c04cUL
+#define PSWRD_REG_DBG_FORCE_FRAME \
+ 0x29c050UL
+#define PSWRD2_REG_DBG_SELECT \
+ 0x29d400UL
+#define PSWRD2_REG_DBG_DWORD_ENABLE \
+ 0x29d404UL
+#define PSWRD2_REG_DBG_SHIFT \
+ 0x29d408UL
+#define PSWRD2_REG_DBG_FORCE_VALID \
+ 0x29d40cUL
+#define PSWRD2_REG_DBG_FORCE_FRAME \
+ 0x29d410UL
+#define PSWHST2_REG_DBG_SELECT \
+ 0x29e058UL
+#define PSWHST2_REG_DBG_DWORD_ENABLE \
+ 0x29e05cUL
+#define PSWHST2_REG_DBG_SHIFT \
+ 0x29e060UL
+#define PSWHST2_REG_DBG_FORCE_VALID \
+ 0x29e064UL
+#define PSWHST2_REG_DBG_FORCE_FRAME \
+ 0x29e068UL
+#define PSWHST_REG_DBG_SELECT \
+ 0x2a0100UL
+#define PSWHST_REG_DBG_DWORD_ENABLE \
+ 0x2a0104UL
+#define PSWHST_REG_DBG_SHIFT \
+ 0x2a0108UL
+#define PSWHST_REG_DBG_FORCE_VALID \
+ 0x2a010cUL
+#define PSWHST_REG_DBG_FORCE_FRAME \
+ 0x2a0110UL
+#define PGLUE_B_REG_DBG_SELECT \
+ 0x2a8400UL
+#define PGLUE_B_REG_DBG_DWORD_ENABLE \
+ 0x2a8404UL
+#define PGLUE_B_REG_DBG_SHIFT \
+ 0x2a8408UL
+#define PGLUE_B_REG_DBG_FORCE_VALID \
+ 0x2a840cUL
+#define PGLUE_B_REG_DBG_FORCE_FRAME \
+ 0x2a8410UL
+#define TM_REG_DBG_SELECT \
+ 0x2c07a8UL
+#define TM_REG_DBG_DWORD_ENABLE \
+ 0x2c07acUL
+#define TM_REG_DBG_SHIFT \
+ 0x2c07b0UL
+#define TM_REG_DBG_FORCE_VALID \
+ 0x2c07b4UL
+#define TM_REG_DBG_FORCE_FRAME \
+ 0x2c07b8UL
+#define TCFC_REG_DBG_SELECT \
+ 0x2d0500UL
+#define TCFC_REG_DBG_DWORD_ENABLE \
+ 0x2d0504UL
+#define TCFC_REG_DBG_SHIFT \
+ 0x2d0508UL
+#define TCFC_REG_DBG_FORCE_VALID \
+ 0x2d050cUL
+#define TCFC_REG_DBG_FORCE_FRAME \
+ 0x2d0510UL
+#define CCFC_REG_DBG_SELECT \
+ 0x2e0500UL
+#define CCFC_REG_DBG_DWORD_ENABLE \
+ 0x2e0504UL
+#define CCFC_REG_DBG_SHIFT \
+ 0x2e0508UL
+#define CCFC_REG_DBG_FORCE_VALID \
+ 0x2e050cUL
+#define CCFC_REG_DBG_FORCE_FRAME \
+ 0x2e0510UL
+#define QM_REG_DBG_SELECT \
+ 0x2f2e74UL
+#define QM_REG_DBG_DWORD_ENABLE \
+ 0x2f2e78UL
+#define QM_REG_DBG_SHIFT \
+ 0x2f2e7cUL
+#define QM_REG_DBG_FORCE_VALID \
+ 0x2f2e80UL
+#define QM_REG_DBG_FORCE_FRAME \
+ 0x2f2e84UL
+#define RDIF_REG_DBG_SELECT \
+ 0x300500UL
+#define RDIF_REG_DBG_DWORD_ENABLE \
+ 0x300504UL
+#define RDIF_REG_DBG_SHIFT \
+ 0x300508UL
+#define RDIF_REG_DBG_FORCE_VALID \
+ 0x30050cUL
+#define RDIF_REG_DBG_FORCE_FRAME \
+ 0x300510UL
+#define TDIF_REG_DBG_SELECT \
+ 0x310500UL
+#define TDIF_REG_DBG_DWORD_ENABLE \
+ 0x310504UL
+#define TDIF_REG_DBG_SHIFT \
+ 0x310508UL
+#define TDIF_REG_DBG_FORCE_VALID \
+ 0x31050cUL
+#define TDIF_REG_DBG_FORCE_FRAME \
+ 0x310510UL
+#define BRB_REG_DBG_SELECT \
+ 0x340ed0UL
+#define BRB_REG_DBG_DWORD_ENABLE \
+ 0x340ed4UL
+#define BRB_REG_DBG_SHIFT \
+ 0x340ed8UL
+#define BRB_REG_DBG_FORCE_VALID \
+ 0x340edcUL
+#define BRB_REG_DBG_FORCE_FRAME \
+ 0x340ee0UL
+#define XYLD_REG_DBG_SELECT \
+ 0x4c1600UL
+#define XYLD_REG_DBG_DWORD_ENABLE \
+ 0x4c1604UL
+#define XYLD_REG_DBG_SHIFT \
+ 0x4c1608UL
+#define XYLD_REG_DBG_FORCE_VALID \
+ 0x4c160cUL
+#define XYLD_REG_DBG_FORCE_FRAME \
+ 0x4c1610UL
+#define YULD_REG_DBG_SELECT \
+ 0x4c9600UL
+#define YULD_REG_DBG_DWORD_ENABLE \
+ 0x4c9604UL
+#define YULD_REG_DBG_SHIFT \
+ 0x4c9608UL
+#define YULD_REG_DBG_FORCE_VALID \
+ 0x4c960cUL
+#define YULD_REG_DBG_FORCE_FRAME \
+ 0x4c9610UL
+#define TMLD_REG_DBG_SELECT \
+ 0x4d1600UL
+#define TMLD_REG_DBG_DWORD_ENABLE \
+ 0x4d1604UL
+#define TMLD_REG_DBG_SHIFT \
+ 0x4d1608UL
+#define TMLD_REG_DBG_FORCE_VALID \
+ 0x4d160cUL
+#define TMLD_REG_DBG_FORCE_FRAME \
+ 0x4d1610UL
+#define MULD_REG_DBG_SELECT \
+ 0x4e1600UL
+#define MULD_REG_DBG_DWORD_ENABLE \
+ 0x4e1604UL
+#define MULD_REG_DBG_SHIFT \
+ 0x4e1608UL
+#define MULD_REG_DBG_FORCE_VALID \
+ 0x4e160cUL
+#define MULD_REG_DBG_FORCE_FRAME \
+ 0x4e1610UL
+#define NIG_REG_DBG_SELECT \
+ 0x502140UL
+#define NIG_REG_DBG_DWORD_ENABLE \
+ 0x502144UL
+#define NIG_REG_DBG_SHIFT \
+ 0x502148UL
+#define NIG_REG_DBG_FORCE_VALID \
+ 0x50214cUL
+#define NIG_REG_DBG_FORCE_FRAME \
+ 0x502150UL
+#define BMB_REG_DBG_SELECT \
+ 0x540a7cUL
+#define BMB_REG_DBG_DWORD_ENABLE \
+ 0x540a80UL
+#define BMB_REG_DBG_SHIFT \
+ 0x540a84UL
+#define BMB_REG_DBG_FORCE_VALID \
+ 0x540a88UL
+#define BMB_REG_DBG_FORCE_FRAME \
+ 0x540a8cUL
+#define PTU_REG_DBG_SELECT \
+ 0x560100UL
+#define PTU_REG_DBG_DWORD_ENABLE \
+ 0x560104UL
+#define PTU_REG_DBG_SHIFT \
+ 0x560108UL
+#define PTU_REG_DBG_FORCE_VALID \
+ 0x56010cUL
+#define PTU_REG_DBG_FORCE_FRAME \
+ 0x560110UL
+#define CDU_REG_DBG_SELECT \
+ 0x580704UL
+#define CDU_REG_DBG_DWORD_ENABLE \
+ 0x580708UL
+#define CDU_REG_DBG_SHIFT \
+ 0x58070cUL
+#define CDU_REG_DBG_FORCE_VALID \
+ 0x580710UL
+#define CDU_REG_DBG_FORCE_FRAME \
+ 0x580714UL
+#define WOL_REG_DBG_SELECT \
+ 0x600140UL
+#define WOL_REG_DBG_DWORD_ENABLE \
+ 0x600144UL
+#define WOL_REG_DBG_SHIFT \
+ 0x600148UL
+#define WOL_REG_DBG_FORCE_VALID \
+ 0x60014cUL
+#define WOL_REG_DBG_FORCE_FRAME \
+ 0x600150UL
+#define BMBN_REG_DBG_SELECT \
+ 0x610140UL
+#define BMBN_REG_DBG_DWORD_ENABLE \
+ 0x610144UL
+#define BMBN_REG_DBG_SHIFT \
+ 0x610148UL
+#define BMBN_REG_DBG_FORCE_VALID \
+ 0x61014cUL
+#define BMBN_REG_DBG_FORCE_FRAME \
+ 0x610150UL
+#define NWM_REG_DBG_SELECT \
+ 0x8000ecUL
+#define NWM_REG_DBG_DWORD_ENABLE \
+ 0x8000f0UL
+#define NWM_REG_DBG_SHIFT \
+ 0x8000f4UL
+#define NWM_REG_DBG_FORCE_VALID \
+ 0x8000f8UL
+#define NWM_REG_DBG_FORCE_FRAME \
+ 0x8000fcUL
+#define PBF_REG_DBG_SELECT \
+ 0xd80060UL
+#define PBF_REG_DBG_DWORD_ENABLE \
+ 0xd80064UL
+#define PBF_REG_DBG_SHIFT \
+ 0xd80068UL
+#define PBF_REG_DBG_FORCE_VALID \
+ 0xd8006cUL
+#define PBF_REG_DBG_FORCE_FRAME \
+ 0xd80070UL
+#define PBF_PB1_REG_DBG_SELECT \
+ 0xda0728UL
+#define PBF_PB1_REG_DBG_DWORD_ENABLE \
+ 0xda072cUL
+#define PBF_PB1_REG_DBG_SHIFT \
+ 0xda0730UL
+#define PBF_PB1_REG_DBG_FORCE_VALID \
+ 0xda0734UL
+#define PBF_PB1_REG_DBG_FORCE_FRAME \
+ 0xda0738UL
+#define PBF_PB2_REG_DBG_SELECT \
+ 0xda4728UL
+#define PBF_PB2_REG_DBG_DWORD_ENABLE \
+ 0xda472cUL
+#define PBF_PB2_REG_DBG_SHIFT \
+ 0xda4730UL
+#define PBF_PB2_REG_DBG_FORCE_VALID \
+ 0xda4734UL
+#define PBF_PB2_REG_DBG_FORCE_FRAME \
+ 0xda4738UL
+#define BTB_REG_DBG_SELECT \
+ 0xdb08c8UL
+#define BTB_REG_DBG_DWORD_ENABLE \
+ 0xdb08ccUL
+#define BTB_REG_DBG_SHIFT \
+ 0xdb08d0UL
+#define BTB_REG_DBG_FORCE_VALID \
+ 0xdb08d4UL
+#define BTB_REG_DBG_FORCE_FRAME \
+ 0xdb08d8UL
+#define XSDM_REG_DBG_SELECT \
+ 0xf80e28UL
+#define XSDM_REG_DBG_DWORD_ENABLE \
+ 0xf80e2cUL
+#define XSDM_REG_DBG_SHIFT \
+ 0xf80e30UL
+#define XSDM_REG_DBG_FORCE_VALID \
+ 0xf80e34UL
+#define XSDM_REG_DBG_FORCE_FRAME \
+ 0xf80e38UL
+#define YSDM_REG_DBG_SELECT \
+ 0xf90e28UL
+#define YSDM_REG_DBG_DWORD_ENABLE \
+ 0xf90e2cUL
+#define YSDM_REG_DBG_SHIFT \
+ 0xf90e30UL
+#define YSDM_REG_DBG_FORCE_VALID \
+ 0xf90e34UL
+#define YSDM_REG_DBG_FORCE_FRAME \
+ 0xf90e38UL
+#define PSDM_REG_DBG_SELECT \
+ 0xfa0e28UL
+#define PSDM_REG_DBG_DWORD_ENABLE \
+ 0xfa0e2cUL
+#define PSDM_REG_DBG_SHIFT \
+ 0xfa0e30UL
+#define PSDM_REG_DBG_FORCE_VALID \
+ 0xfa0e34UL
+#define PSDM_REG_DBG_FORCE_FRAME \
+ 0xfa0e38UL
+#define TSDM_REG_DBG_SELECT \
+ 0xfb0e28UL
+#define TSDM_REG_DBG_DWORD_ENABLE \
+ 0xfb0e2cUL
+#define TSDM_REG_DBG_SHIFT \
+ 0xfb0e30UL
+#define TSDM_REG_DBG_FORCE_VALID \
+ 0xfb0e34UL
+#define TSDM_REG_DBG_FORCE_FRAME \
+ 0xfb0e38UL
+#define MSDM_REG_DBG_SELECT \
+ 0xfc0e28UL
+#define MSDM_REG_DBG_DWORD_ENABLE \
+ 0xfc0e2cUL
+#define MSDM_REG_DBG_SHIFT \
+ 0xfc0e30UL
+#define MSDM_REG_DBG_FORCE_VALID \
+ 0xfc0e34UL
+#define MSDM_REG_DBG_FORCE_FRAME \
+ 0xfc0e38UL
+#define USDM_REG_DBG_SELECT \
+ 0xfd0e28UL
+#define USDM_REG_DBG_DWORD_ENABLE \
+ 0xfd0e2cUL
+#define USDM_REG_DBG_SHIFT \
+ 0xfd0e30UL
+#define USDM_REG_DBG_FORCE_VALID \
+ 0xfd0e34UL
+#define USDM_REG_DBG_FORCE_FRAME \
+ 0xfd0e38UL
+#define XCM_REG_DBG_SELECT \
+ 0x1000040UL
+#define XCM_REG_DBG_DWORD_ENABLE \
+ 0x1000044UL
+#define XCM_REG_DBG_SHIFT \
+ 0x1000048UL
+#define XCM_REG_DBG_FORCE_VALID \
+ 0x100004cUL
+#define XCM_REG_DBG_FORCE_FRAME \
+ 0x1000050UL
+#define YCM_REG_DBG_SELECT \
+ 0x1080040UL
+#define YCM_REG_DBG_DWORD_ENABLE \
+ 0x1080044UL
+#define YCM_REG_DBG_SHIFT \
+ 0x1080048UL
+#define YCM_REG_DBG_FORCE_VALID \
+ 0x108004cUL
+#define YCM_REG_DBG_FORCE_FRAME \
+ 0x1080050UL
+#define PCM_REG_DBG_SELECT \
+ 0x1100040UL
+#define PCM_REG_DBG_DWORD_ENABLE \
+ 0x1100044UL
+#define PCM_REG_DBG_SHIFT \
+ 0x1100048UL
+#define PCM_REG_DBG_FORCE_VALID \
+ 0x110004cUL
+#define PCM_REG_DBG_FORCE_FRAME \
+ 0x1100050UL
+#define TCM_REG_DBG_SELECT \
+ 0x1180040UL
+#define TCM_REG_DBG_DWORD_ENABLE \
+ 0x1180044UL
+#define TCM_REG_DBG_SHIFT \
+ 0x1180048UL
+#define TCM_REG_DBG_FORCE_VALID \
+ 0x118004cUL
+#define TCM_REG_DBG_FORCE_FRAME \
+ 0x1180050UL
+#define MCM_REG_DBG_SELECT \
+ 0x1200040UL
+#define MCM_REG_DBG_DWORD_ENABLE \
+ 0x1200044UL
+#define MCM_REG_DBG_SHIFT \
+ 0x1200048UL
+#define MCM_REG_DBG_FORCE_VALID \
+ 0x120004cUL
+#define MCM_REG_DBG_FORCE_FRAME \
+ 0x1200050UL
+#define UCM_REG_DBG_SELECT \
+ 0x1280050UL
+#define UCM_REG_DBG_DWORD_ENABLE \
+ 0x1280054UL
+#define UCM_REG_DBG_SHIFT \
+ 0x1280058UL
+#define UCM_REG_DBG_FORCE_VALID \
+ 0x128005cUL
+#define UCM_REG_DBG_FORCE_FRAME \
+ 0x1280060UL
+#define XSEM_REG_DBG_SELECT \
+ 0x1401528UL
+#define XSEM_REG_DBG_DWORD_ENABLE \
+ 0x140152cUL
+#define XSEM_REG_DBG_SHIFT \
+ 0x1401530UL
+#define XSEM_REG_DBG_FORCE_VALID \
+ 0x1401534UL
+#define XSEM_REG_DBG_FORCE_FRAME \
+ 0x1401538UL
+#define YSEM_REG_DBG_SELECT \
+ 0x1501528UL
+#define YSEM_REG_DBG_DWORD_ENABLE \
+ 0x150152cUL
+#define YSEM_REG_DBG_SHIFT \
+ 0x1501530UL
+#define YSEM_REG_DBG_FORCE_VALID \
+ 0x1501534UL
+#define YSEM_REG_DBG_FORCE_FRAME \
+ 0x1501538UL
+#define PSEM_REG_DBG_SELECT \
+ 0x1601528UL
+#define PSEM_REG_DBG_DWORD_ENABLE \
+ 0x160152cUL
+#define PSEM_REG_DBG_SHIFT \
+ 0x1601530UL
+#define PSEM_REG_DBG_FORCE_VALID \
+ 0x1601534UL
+#define PSEM_REG_DBG_FORCE_FRAME \
+ 0x1601538UL
+#define TSEM_REG_DBG_SELECT \
+ 0x1701528UL
+#define TSEM_REG_DBG_DWORD_ENABLE \
+ 0x170152cUL
+#define TSEM_REG_DBG_SHIFT \
+ 0x1701530UL
+#define TSEM_REG_DBG_FORCE_VALID \
+ 0x1701534UL
+#define TSEM_REG_DBG_FORCE_FRAME \
+ 0x1701538UL
+#define MSEM_REG_DBG_SELECT \
+ 0x1801528UL
+#define MSEM_REG_DBG_DWORD_ENABLE \
+ 0x180152cUL
+#define MSEM_REG_DBG_SHIFT \
+ 0x1801530UL
+#define MSEM_REG_DBG_FORCE_VALID \
+ 0x1801534UL
+#define MSEM_REG_DBG_FORCE_FRAME \
+ 0x1801538UL
+#define USEM_REG_DBG_SELECT \
+ 0x1901528UL
+#define USEM_REG_DBG_DWORD_ENABLE \
+ 0x190152cUL
+#define USEM_REG_DBG_SHIFT \
+ 0x1901530UL
+#define USEM_REG_DBG_FORCE_VALID \
+ 0x1901534UL
+#define USEM_REG_DBG_FORCE_FRAME \
+ 0x1901538UL
+#define PCIE_REG_DBG_COMMON_SELECT \
+ 0x054398UL
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
+ 0x05439cUL
+#define PCIE_REG_DBG_COMMON_SHIFT \
+ 0x0543a0UL
+#define PCIE_REG_DBG_COMMON_FORCE_VALID \
+ 0x0543a4UL
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME \
+ 0x0543a8UL
+#define MISC_REG_RESET_PL_UA \
+ 0x008050UL
+#define MISC_REG_RESET_PL_HV \
+ 0x008060UL
+#define XCM_REG_CTX_RBC_ACCS \
+ 0x1001800UL
+#define XCM_REG_AGG_CON_CTX \
+ 0x1001804UL
+#define XCM_REG_SM_CON_CTX \
+ 0x1001808UL
+#define YCM_REG_CTX_RBC_ACCS \
+ 0x1081800UL
+#define YCM_REG_AGG_CON_CTX \
+ 0x1081804UL
+#define YCM_REG_AGG_TASK_CTX \
+ 0x1081808UL
+#define YCM_REG_SM_CON_CTX \
+ 0x108180cUL
+#define YCM_REG_SM_TASK_CTX \
+ 0x1081810UL
+#define PCM_REG_CTX_RBC_ACCS \
+ 0x1101440UL
+#define PCM_REG_SM_CON_CTX \
+ 0x1101444UL
+#define TCM_REG_CTX_RBC_ACCS \
+ 0x11814c0UL
+#define TCM_REG_AGG_CON_CTX \
+ 0x11814c4UL
+#define TCM_REG_AGG_TASK_CTX \
+ 0x11814c8UL
+#define TCM_REG_SM_CON_CTX \
+ 0x11814ccUL
+#define TCM_REG_SM_TASK_CTX \
+ 0x11814d0UL
+#define MCM_REG_CTX_RBC_ACCS \
+ 0x1201800UL
+#define MCM_REG_AGG_CON_CTX \
+ 0x1201804UL
+#define MCM_REG_AGG_TASK_CTX \
+ 0x1201808UL
+#define MCM_REG_SM_CON_CTX \
+ 0x120180cUL
+#define MCM_REG_SM_TASK_CTX \
+ 0x1201810UL
+#define UCM_REG_CTX_RBC_ACCS \
+ 0x1281700UL
+#define UCM_REG_AGG_CON_CTX \
+ 0x1281704UL
+#define UCM_REG_AGG_TASK_CTX \
+ 0x1281708UL
+#define UCM_REG_SM_CON_CTX \
+ 0x128170cUL
+#define UCM_REG_SM_TASK_CTX \
+ 0x1281710UL
+#define XSEM_REG_SLOW_DBG_EMPTY \
+ 0x1401140UL
+#define XSEM_REG_SYNC_DBG_EMPTY \
+ 0x1401160UL
+#define XSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1401400UL
+#define XSEM_REG_SLOW_DBG_MODE \
+ 0x1401404UL
+#define XSEM_REG_DBG_FRAME_MODE \
+ 0x1401408UL
+#define XSEM_REG_DBG_MODE1_CFG \
+ 0x1401420UL
+#define XSEM_REG_FAST_MEMORY \
+ 0x1440000UL
+#define YSEM_REG_SYNC_DBG_EMPTY \
+ 0x1501160UL
+#define YSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1501400UL
+#define YSEM_REG_SLOW_DBG_MODE \
+ 0x1501404UL
+#define YSEM_REG_DBG_FRAME_MODE \
+ 0x1501408UL
+#define YSEM_REG_DBG_MODE1_CFG \
+ 0x1501420UL
+#define YSEM_REG_FAST_MEMORY \
+ 0x1540000UL
+#define PSEM_REG_SLOW_DBG_EMPTY \
+ 0x1601140UL
+#define PSEM_REG_SYNC_DBG_EMPTY \
+ 0x1601160UL
+#define PSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1601400UL
+#define PSEM_REG_SLOW_DBG_MODE \
+ 0x1601404UL
+#define PSEM_REG_DBG_FRAME_MODE \
+ 0x1601408UL
+#define PSEM_REG_DBG_MODE1_CFG \
+ 0x1601420UL
+#define PSEM_REG_FAST_MEMORY \
+ 0x1640000UL
+#define TSEM_REG_SLOW_DBG_EMPTY \
+ 0x1701140UL
+#define TSEM_REG_SYNC_DBG_EMPTY \
+ 0x1701160UL
+#define TSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1701400UL
+#define TSEM_REG_SLOW_DBG_MODE \
+ 0x1701404UL
+#define TSEM_REG_DBG_FRAME_MODE \
+ 0x1701408UL
+#define TSEM_REG_DBG_MODE1_CFG \
+ 0x1701420UL
+#define TSEM_REG_FAST_MEMORY \
+ 0x1740000UL
+#define MSEM_REG_SLOW_DBG_EMPTY \
+ 0x1801140UL
+#define MSEM_REG_SYNC_DBG_EMPTY \
+ 0x1801160UL
+#define MSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1801400UL
+#define MSEM_REG_SLOW_DBG_MODE \
+ 0x1801404UL
+#define MSEM_REG_DBG_FRAME_MODE \
+ 0x1801408UL
+#define MSEM_REG_DBG_MODE1_CFG \
+ 0x1801420UL
+#define MSEM_REG_FAST_MEMORY \
+ 0x1840000UL
+#define USEM_REG_SLOW_DBG_EMPTY \
+ 0x1901140UL
+#define USEM_REG_SYNC_DBG_EMPTY \
+ 0x1901160UL
+#define USEM_REG_SLOW_DBG_ACTIVE \
+ 0x1901400UL
+#define USEM_REG_SLOW_DBG_MODE \
+ 0x1901404UL
+#define USEM_REG_DBG_FRAME_MODE \
+ 0x1901408UL
+#define USEM_REG_DBG_MODE1_CFG \
+ 0x1901420UL
+#define USEM_REG_FAST_MEMORY \
+ 0x1940000UL
+#define SEM_FAST_REG_INT_RAM \
+ 0x020000UL
+#define SEM_FAST_REG_INT_RAM_SIZE \
+ 20480
+#define GRC_REG_TRACE_FIFO_VALID_DATA \
+ 0x050064UL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \
+ 0x05040cUL
+#define GRC_REG_PROTECTION_OVERRIDE_WINDOW \
+ 0x050500UL
+#define IGU_REG_ERROR_HANDLING_MEMORY \
+ 0x181520UL
+#define MCP_REG_CPU_MODE \
+ 0xe05000UL
+#define MCP_REG_CPU_MODE_SOFT_HALT \
+ (0x1 << 10)
+#define BRB_REG_BIG_RAM_ADDRESS \
+ 0x340800UL
+#define BRB_REG_BIG_RAM_DATA \
+ 0x341500UL
+#define SEM_FAST_REG_STALL_0 \
+ 0x000488UL
+#define SEM_FAST_REG_STALLED \
+ 0x000494UL
+#define BTB_REG_BIG_RAM_ADDRESS \
+ 0xdb0800UL
+#define BTB_REG_BIG_RAM_DATA \
+ 0xdb0c00UL
+#define BMB_REG_BIG_RAM_ADDRESS \
+ 0x540800UL
+#define BMB_REG_BIG_RAM_DATA \
+ 0x540f00UL
+#define SEM_FAST_REG_STORM_REG_FILE \
+ 0x008000UL
+#define RSS_REG_RSS_RAM_ADDR \
+ 0x238c30UL
+#define MISCS_REG_BLOCK_256B_EN \
+ 0x009074UL
+#define MCP_REG_SCRATCH_SIZE \
+ 57344
+#define MCP_REG_CPU_REG_FILE \
+ 0xe05200UL
+#define MCP_REG_CPU_REG_FILE_SIZE \
+ 32
+#define DBG_REG_DEBUG_TARGET \
+ 0x01005cUL
+#define DBG_REG_FULL_MODE \
+ 0x010060UL
+#define DBG_REG_CALENDAR_OUT_DATA \
+ 0x010480UL
+#define GRC_REG_TRACE_FIFO \
+ 0x050068UL
+#define IGU_REG_ERROR_HANDLING_DATA_VALID \
+ 0x181530UL
+#define DBG_REG_DBG_BLOCK_ON \
+ 0x010454UL
+#define DBG_REG_FRAMING_MODE \
+ 0x010058UL
+#define SEM_FAST_REG_VFC_DATA_WR \
+ 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR \
+ 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD \
+ 0x000b48UL
+#define RSS_REG_RSS_RAM_DATA \
+ 0x238c20UL
+#define MISC_REG_BLOCK_256B_EN \
+ 0x008c14UL
+#define NWS_REG_NWS_CMU \
+ 0x720000UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0 \
+ 0x000680UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8 \
+ 0x000684UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0 \
+ 0x0006c0UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8 \
+ 0x0006c4UL
+#define MS_REG_MS_CMU \
+ 0x6a4000UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130 \
+ 0x000208UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132 \
+ 0x000210UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131 \
+ 0x00020cUL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133 \
+ 0x000214UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130 \
+ 0x000208UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131 \
+ 0x00020cUL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132 \
+ 0x000210UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133 \
+ 0x000214UL
+#define PHY_PCIE_REG_PHY0 \
+ 0x620000UL
+#define PHY_PCIE_REG_PHY1 \
+ 0x624000UL
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
+#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
+#define DORQ_REG_PF_DPM_ENABLE 0x100510UL
+#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
+#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
+#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
new file mode 100644
index 000000000000..23430059471c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -0,0 +1,2954 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/bitops.h>
+#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_roce.h"
+#include "qed_ll2.h"
+
+void qed_async_roce_event(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
+ p_eqe->opcode, &p_eqe->data);
+}
+
+static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
+
+ bmap->max_count = max_count;
+
+ bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
+ GFP_KERNEL);
+ if (!bmap->bitmap) {
+ DP_NOTICE(p_hwfn,
+ "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
+ return -ENOMEM;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
+ bmap->bitmap);
+ return 0;
+}
+
+static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
+
+ *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
+
+ if (*id_num >= bmap->max_count) {
+ DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
+ bmap->max_count);
+ return -EINVAL;
+ }
+
+ __set_bit(*id_num, bmap->bitmap);
+
+ return 0;
+}
+
+static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ bool b_acquired;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
+ if (id_num >= bmap->max_count)
+ return;
+
+ b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
+ if (!b_acquired) {
+ DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
+ return;
+ }
+}
+
+u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+{
+ /* First sb id for RoCE is after all the l2 sb */
+ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
+}
+
+u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
+{
+ return QED_CAU_DEF_RX_TIMER_RES;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_info *p_rdma_info;
+ u32 num_cons, num_tasks;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
+ /* Allocate a struct with current pf rdma info */
+ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
+ if (!p_rdma_info) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ p_hwfn->p_rdma_info = p_rdma_info;
+ p_rdma_info->proto = PROTOCOLID_ROCE;
+
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
+
+ p_rdma_info->num_qps = num_cons / 2;
+
+ num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
+
+ /* Each MR uses a single task */
+ p_rdma_info->num_mrs = num_tasks;
+
+ /* Queue zone lines are shared between RoCE and L2 in such a way that
+ * they can be used by each without obstructing the other.
+ */
+ p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
+
+ /* Allocate a struct with device params and fill it */
+ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
+ if (!p_rdma_info->dev) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
+ rc);
+ goto free_rdma_info;
+ }
+
+ /* Allocate a struct with port params and fill it */
+ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
+ if (!p_rdma_info->port) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
+ rc);
+ goto free_rdma_dev;
+ }
+
+ /* Allocate bit map for pd's */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate pd_map, rc = %d\n",
+ rc);
+ goto free_rdma_port;
+ }
+
+ /* Allocate DPI bitmap */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
+ p_hwfn->dpi_count);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate DPI bitmap, rc = %d\n", rc);
+ goto free_pd_map;
+ }
+
+ /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
+ * twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
+ p_rdma_info->num_qps * 2);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cq bitmap, rc = %d\n", rc);
+ goto free_dpi_map;
+ }
+
+ /* Allocate bitmap for toggle bit for cq icids
+ * We toggle the bit every time we create or resize cq for a given icid.
+ * The maximum number of CQs is bounded to twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
+ p_rdma_info->num_qps * 2);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate toogle bits, rc = %d\n", rc);
+ goto free_cq_map;
+ }
+
+ /* Allocate bitmap for itids */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
+ p_rdma_info->num_mrs);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate itids bitmaps, rc = %d\n", rc);
+ goto free_toggle_map;
+ }
+
+ /* Allocate bitmap for cids used for qps. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cid bitmap, rc = %d\n", rc);
+ goto free_tid_map;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
+ return 0;
+
+free_tid_map:
+ kfree(p_rdma_info->tid_map.bitmap);
+free_toggle_map:
+ kfree(p_rdma_info->toggle_bits.bitmap);
+free_cq_map:
+ kfree(p_rdma_info->cq_map.bitmap);
+free_dpi_map:
+ kfree(p_rdma_info->dpi_map.bitmap);
+free_pd_map:
+ kfree(p_rdma_info->pd_map.bitmap);
+free_rdma_port:
+ kfree(p_rdma_info->port);
+free_rdma_dev:
+ kfree(p_rdma_info->dev);
+free_rdma_info:
+ kfree(p_rdma_info);
+
+ return rc;
+}
+
+void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ kfree(p_rdma_info->cid_map.bitmap);
+ kfree(p_rdma_info->tid_map.bitmap);
+ kfree(p_rdma_info->toggle_bits.bitmap);
+ kfree(p_rdma_info->cq_map.bitmap);
+ kfree(p_rdma_info->dpi_map.bitmap);
+ kfree(p_rdma_info->pd_map.bitmap);
+
+ kfree(p_rdma_info->port);
+ kfree(p_rdma_info->dev);
+
+ kfree(p_rdma_info);
+}
+
+static void qed_rdma_free(struct qed_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+
+ qed_rdma_resc_free(p_hwfn);
+}
+
+static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
+{
+ guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
+ guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
+ guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
+ guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
+ guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
+}
+
+static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_events *events;
+
+ events = &p_hwfn->p_rdma_info->events;
+
+ events->unaffiliated_event = params->events->unaffiliated_event;
+ events->affiliated_event = params->events->affiliated_event;
+ events->context = params->events->context;
+}
+
+static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 pci_status_control;
+ u32 num_qps;
+
+ /* Vendor specific information */
+ dev->vendor_id = cdev->vendor_id;
+ dev->vendor_part_id = cdev->device_id;
+ dev->hw_ver = 0;
+ dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
+ (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
+
+ qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
+ dev->node_guid = dev->sys_image_guid;
+
+ dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
+ RDMA_MAX_SGE_PER_RQ_WQE);
+
+ if (cdev->rdma_max_sge)
+ dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
+
+ dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+
+ dev->max_inline = (cdev->rdma_max_inline) ?
+ min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
+ dev->max_inline;
+
+ dev->max_wqe = QED_RDMA_MAX_WQE;
+ dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
+
+ /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
+ * it is up-aligned to 16 and then to ILT page size within qed cxt.
+ * This is OK in terms of ILT but we don't want to configure the FW
+ * above its abilities
+ */
+ num_qps = ROCE_MAX_QPS;
+ num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
+ dev->max_qp = num_qps;
+
+ /* CQs uses the same icids that QPs use hence they are limited by the
+ * number of icids. There are two icids per QP.
+ */
+ dev->max_cq = num_qps * 2;
+
+ /* The number of mrs is smaller by 1 since the first is reserved */
+ dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
+ dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
+
+ /* The maximum CQE capacity per CQ supported.
+ * max number of cqes will be in two layer pbl,
+ * 8 is the pointer size in bytes
+ * 32 is the size of cq element in bytes
+ */
+ if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
+ dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
+ else
+ dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
+
+ dev->max_mw = 0;
+ dev->max_fmr = QED_RDMA_MAX_FMR;
+ dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
+ dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
+ dev->max_pkey = QED_RDMA_MAX_P_KEY;
+
+ dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
+ dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ RDMA_REQ_RD_ATOMIC_ELM_SIZE;
+ dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
+ p_hwfn->p_rdma_info->num_qps;
+ dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
+ dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
+ dev->max_pd = RDMA_MAX_PDS;
+ dev->max_ah = p_hwfn->p_rdma_info->num_qps;
+ dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
+
+ /* Set capablities */
+ dev->dev_caps = 0;
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
+
+ /* Check atomic operations support in PCI configuration space. */
+ pci_read_config_dword(cdev->pdev,
+ cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
+ &pci_status_control);
+
+ if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
+}
+
+static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ port->max_msg_size = min_t(u64,
+ (dev->max_mr_mw_fmr_size *
+ p_hwfn->cdev->rdma_max_sge),
+ BIT(31));
+
+ port->pkey_bad_counter = 0;
+}
+
+static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ll2_ethertype_en;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
+
+ /* We delay writing to this reg until first cid is allocated. See
+ * qed_cxt_dynamic_ilt_alloc function for more details
+ */
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en | 0x01));
+
+ if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
+ DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
+ return 0;
+}
+
+static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params,
+ struct qed_ptt *p_ptt)
+{
+ struct rdma_init_func_ramrod_data *p_ramrod;
+ struct qed_rdma_cnq_params *p_cnq_pbl_list;
+ struct rdma_init_func_hdr *p_params_header;
+ struct rdma_cnq_params *p_cnq_params;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 cnq_id, sb_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
+
+ /* Save the number of cnqs for the function close ramrod */
+ p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
+
+ p_params_header = &p_ramrod->params_header;
+ p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
+ QED_RDMA_CNQ_RAM);
+ p_params_header->num_cnqs = params->desired_cnq;
+
+ if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
+ p_params_header->cq_ring_mode = 1;
+ else
+ p_params_header->cq_ring_mode = 0;
+
+ for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
+ sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
+ p_cnq_params = &p_ramrod->cnq_params[cnq_id];
+ p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
+ p_cnq_params->sb_num =
+ cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
+
+ p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
+ p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
+
+ DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
+ p_cnq_pbl_list->pbl_ptr);
+
+ /* we assume here that cnq_id and qz_offset are the same */
+ p_cnq_params->queue_zone_num =
+ cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
+ cnq_id);
+ }
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ /* The first DPI is reserved for the Kernel */
+ __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
+
+ /* Tid 0 will be used as the key for "reserved MR".
+ * The driver should allocate memory for it so it can be loaded but no
+ * ramrod should be passed on it.
+ */
+ qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
+ if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
+ DP_NOTICE(p_hwfn,
+ "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
+
+ spin_lock_init(&p_hwfn->p_rdma_info->lock);
+
+ qed_rdma_init_devinfo(p_hwfn, params);
+ qed_rdma_init_port(p_hwfn);
+ qed_rdma_init_events(p_hwfn, params);
+
+ rc = qed_rdma_reserve_lkey(p_hwfn);
+ if (rc)
+ return rc;
+
+ rc = qed_rdma_init_hw(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ return qed_rdma_start_fw(p_hwfn, params, p_ptt);
+}
+
+int qed_rdma_stop(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_close_func_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u32 ll2_ethertype_en;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ /* Disable RoCE search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en & 0xFFFE));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Stop RoCE */
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto out;
+
+ p_ramrod = &p_ent->ramrod.rdma_close_func;
+
+ p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
+ p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+out:
+ qed_rdma_free(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 dpi_start_offset;
+ u32 returned_id = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
+
+ /* Allocate DPI */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
+ &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ out_params->dpi = (u16)returned_id;
+
+ /* Calculate the corresponding DPI address */
+ dpi_start_offset = p_hwfn->dpi_start_offset;
+
+ out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size));
+
+ out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size);
+
+ out_params->dpi_size = p_hwfn->dpi_size;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
+ return rc;
+}
+
+struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
+
+ /* Link may have changed */
+ p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+
+ return p_port;
+}
+
+struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
+
+ /* Return struct with device parameters */
+ return p_hwfn->p_rdma_info->dev;
+}
+
+void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
+void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+{
+ struct qed_hwfn *p_hwfn;
+ u16 qz_num;
+ u32 addr;
+
+ p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ wmb();
+}
+
+static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ info->rdma_type = QED_RDMA_TYPE_ROCE;
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static int qed_rdma_get_sb_start(struct qed_dev *cdev)
+{
+ int feat_num;
+
+ if (cdev->num_hwfns > 1)
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
+ else
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
+ cdev->num_hwfns;
+
+ return feat_num;
+}
+
+static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
+{
+ int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
+ int n_msix = cdev->int_params.rdma_msix_cnt;
+
+ return min_t(int, n_cnq, n_msix);
+}
+
+static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
+ DP_ERR(cdev,
+ "qed roce supports only MSI-X interrupts (detected %d).\n",
+ cdev->int_params.out.int_mode);
+ return -EINVAL;
+ } else if (cdev->int_params.fp_msix_cnt) {
+ limit = cdev->int_params.rdma_msix_cnt;
+ }
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.rdma_msix_base;
+
+ info->msix_cnt = cdev->int_params.rdma_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
+ info->msix_cnt, msix_base);
+ }
+
+ return 0;
+}
+
+int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 returned_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
+
+ /* Allocates an unused protection domain */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->pd_map, &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ *pd = (u16)returned_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
+ return rc;
+}
+
+void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
+
+ /* Returns a previously allocated protection domain for reuse */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static enum qed_rdma_toggle_bit
+qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ enum qed_rdma_toggle_bit toggle_bit;
+ u32 bmap_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
+
+ /* the function toggle the bit that is related to a given icid
+ * and returns the new toggle bit's value
+ */
+ bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
+
+ spin_lock_bh(&p_info->lock);
+ toggle_bit = !test_and_change_bit(bmap_id,
+ p_info->toggle_bits.bitmap);
+ spin_unlock_bh(&p_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
+ toggle_bit);
+
+ return toggle_bit;
+}
+
+int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params, u16 *icid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ struct rdma_create_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 returned_id, start_cid;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
+ params->cq_handle_hi, params->cq_handle_lo);
+
+ /* Allocate icid */
+ spin_lock_bh(&p_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_info->cq_map, &returned_id);
+ spin_unlock_bh(&p_info->lock);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
+ return rc;
+ }
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_info->proto);
+ *icid = returned_id + start_cid;
+
+ /* Check if icid requires a page allocation */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
+ if (rc)
+ goto err;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = *icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send create CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_CREATE_CQ,
+ p_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_create_cq;
+
+ p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
+ p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
+ p_ramrod->dpi = cpu_to_le16(params->dpi);
+ p_ramrod->is_two_level_pbl = params->pbl_two_level;
+ p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
+ p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
+ p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
+ params->cnq_id;
+ p_ramrod->int_timeout = params->int_timeout;
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+
+ p_ramrod->toggle_bit = toggle_bit;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc) {
+ /* restore toggle bit */
+ qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+ goto err;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
+ return rc;
+
+err:
+ /* release allocated icid */
+ qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
+ DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
+
+ return rc;
+}
+
+int qed_rdma_resize_cq(void *rdma_cxt,
+ struct qed_rdma_resize_cq_in_params *in_params,
+ struct qed_rdma_resize_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_resize_cq_output_params *p_ramrod_res;
+ struct rdma_resize_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ u8 fw_return_code;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ (struct rdma_resize_cq_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed resize cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_RESIZE_CQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_resize_cq;
+
+ p_ramrod->flags = 0;
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
+ in_params->icid);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
+ in_params->pbl_two_level);
+
+ p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
+ p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
+ p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc)
+ goto err;
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
+ out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
+
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+ DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
+
+ return rc;
+}
+
+int qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_destroy_cq_output_params *p_ramrod_res;
+ struct rdma_destroy_cq_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ (struct rdma_destroy_cq_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send destroy CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DESTROY_CQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ /* Free icid */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn,
+ &p_hwfn->p_rdma_info->cq_map,
+ (in_params->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->
+ p_rdma_info->proto)));
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
+{
+ p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
+ p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
+ p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+}
+
+static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
+ __le32 *dst_gid)
+{
+ u32 i;
+
+ if (qp->roce_mode == ROCE_V2_IPV4) {
+ /* The IPv4 addresses shall be aligned to the highest word.
+ * The lower words must be zero.
+ */
+ memset(src_gid, 0, sizeof(union qed_gid));
+ memset(dst_gid, 0, sizeof(union qed_gid));
+ src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
+ dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
+ } else {
+ /* GIDs and IPv6 addresses coincide in location and size */
+ for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
+ src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
+ dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
+ }
+ }
+}
+
+static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
+{
+ enum roce_flavor flavor;
+
+ switch (roce_mode) {
+ case ROCE_V1:
+ flavor = PLAIN_ROCE;
+ break;
+ case ROCE_V2_IPV4:
+ flavor = RROCE_IPV4;
+ break;
+ case ROCE_V2_IPV6:
+ flavor = ROCE_V2_IPV6;
+ break;
+ default:
+ flavor = MAX_ROCE_MODE;
+ break;
+ }
+ return flavor;
+}
+
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 responder_icid;
+ u32 requester_icid;
+ int rc;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+ &responder_icid);
+ if (rc) {
+ spin_unlock_bh(&p_rdma_info->lock);
+ return rc;
+ }
+
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+ &requester_icid);
+
+ spin_unlock_bh(&p_rdma_info->lock);
+ if (rc)
+ goto err;
+
+ /* the two icid's should be adjacent */
+ if ((requester_icid - responder_icid) != 1) {
+ DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_rdma_info->proto);
+ requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_rdma_info->proto);
+
+ /* If these icids require a new ILT line allocate DMA-able context for
+ * an ILT page
+ */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
+ if (rc)
+ goto err;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
+ if (rc)
+ goto err;
+
+ *cid = (u16)responder_icid;
+ return rc;
+
+err:
+ spin_lock_bh(&p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
+
+ spin_unlock_bh(&p_rdma_info->lock);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Allocate CID - failed, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp)
+{
+ struct roce_create_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params qm_params;
+ enum roce_flavor roce_flavor;
+ struct qed_spq_entry *p_ent;
+ u16 physical_queue0 = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* Allocate DMA-able memory for IRQ */
+ qp->irq_num_pages = 1;
+ qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ RDMA_RING_PAGE_SIZE,
+ &qp->irq_phys_addr, GFP_KERNEL);
+ if (!qp->irq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
+
+ p_ramrod->flags = 0;
+
+ roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+ qp->e2e_flow_control_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+ qp->min_rnr_nak_timer);
+
+ p_ramrod->max_ird = qp->max_rd_atomic_resp;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->irq_num_pages = qp->irq_num_pages;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
+ p_ramrod->pd = cpu_to_le16(qp->pd);
+ p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
+ DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
+ p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
+ p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+ p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+ p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
+ p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ qp->rq_cq_id);
+
+ memset(&qm_params, 0, sizeof(qm_params));
+ qm_params.roce.qpid = qp->icid >> 1;
+ physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+
+ p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+ qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+ p_ramrod->udp_src_port = qp->udp_src_port;
+ p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
+
+ p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+ qp->stats_queue;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
+ rc, physical_queue0);
+
+ if (rc)
+ goto err;
+
+ qp->resp_offloaded = true;
+
+ return rc;
+
+err:
+ DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->irq, qp->irq_phys_addr);
+
+ return rc;
+}
+
+static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp)
+{
+ struct roce_create_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params qm_params;
+ enum roce_flavor roce_flavor;
+ struct qed_spq_entry *p_ent;
+ u16 physical_queue0 = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* Allocate DMA-able memory for ORQ */
+ qp->orq_num_pages = 1;
+ qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ RDMA_RING_PAGE_SIZE,
+ &qp->orq_phys_addr, GFP_KERNEL);
+ if (!qp->orq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_RAMROD_CREATE_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_create_qp_req;
+
+ p_ramrod->flags = 0;
+
+ roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+ qp->rnr_retry_cnt);
+
+ p_ramrod->max_ord = qp->max_rd_atomic_req;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->orq_num_pages = qp->orq_num_pages;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+ p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
+ p_ramrod->pd = cpu_to_le16(qp->pd);
+ p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
+ DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
+ p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
+ p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+ p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+ p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
+ p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ qp->sq_cq_id);
+
+ memset(&qm_params, 0, sizeof(qm_params));
+ qm_params.roce.qpid = qp->icid >> 1;
+ physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+
+ p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+ qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+ p_ramrod->udp_src_port = qp->udp_src_port;
+ p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+ p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+ qp->stats_queue;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+
+ if (rc)
+ goto err;
+
+ qp->req_offloaded = true;
+
+ return rc;
+
+err:
+ DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->orq, qp->orq_phys_addr);
+ return rc;
+}
+
+static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ bool move_to_err, u32 modify_flags)
+{
+ struct roce_modify_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (move_to_err && !qp->resp_offloaded)
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_EVENT_MODIFY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
+
+ p_ramrod->flags = 0;
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+ qp->e2e_flow_control_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
+
+ p_ramrod->fields = 0;
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+ qp->min_rnr_nak_timer);
+
+ p_ramrod->max_ird = qp->max_rd_atomic_resp;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ bool move_to_sqd,
+ bool move_to_err, u32 modify_flags)
+{
+ struct roce_modify_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (move_to_err && !(qp->req_offloaded))
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_EVENT_MODIFY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
+
+ p_ramrod->flags = 0;
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
+ qp->sqd_async);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
+
+ p_ramrod->fields = 0;
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+ qp->rnr_retry_cnt);
+
+ p_ramrod->max_ord = qp->max_rd_atomic_req;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ u32 *num_invalidated_mw)
+{
+ struct roce_destroy_qp_resp_output_params *p_ramrod_res;
+ struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (!qp->resp_offloaded)
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_RAMROD_DESTROY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
+
+ p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
+
+ if (!p_ramrod_res) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+
+ /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->irq, qp->irq_phys_addr);
+
+ qp->resp_offloaded = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
+
+err:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct roce_destroy_qp_resp_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ u32 *num_bound_mw)
+{
+ struct roce_destroy_qp_req_output_params *p_ramrod_res;
+ struct roce_destroy_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (!qp->req_offloaded)
+ return 0;
+
+ p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy requester failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
+
+ /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->orq, qp->orq_phys_addr);
+
+ qp->req_offloaded = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
+
+err:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
+ struct roce_query_qp_req_output_params *p_req_ramrod_res;
+ struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
+ struct roce_query_qp_req_ramrod_data *p_req_ramrod;
+ struct qed_sp_init_data init_data;
+ dma_addr_t resp_ramrod_res_phys;
+ dma_addr_t req_ramrod_res_phys;
+ struct qed_spq_entry *p_ent;
+ bool rq_err_state;
+ bool sq_err_state;
+ bool sq_draining;
+ int rc = -ENOMEM;
+
+ if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
+ /* We can't send ramrod to the fw since this qp wasn't offloaded
+ * to the fw yet
+ */
+ out_params->draining = false;
+ out_params->rq_psn = qp->rq_psn;
+ out_params->sq_psn = qp->sq_psn;
+ out_params->state = qp->cur_state;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
+ return 0;
+ }
+
+ if (!(qp->resp_offloaded)) {
+ DP_NOTICE(p_hwfn,
+ "The responder's qp should be offloded before requester's\n");
+ return -EINVAL;
+ }
+
+ /* Send a query responder ramrod to FW to get RQ-PSN and state */
+ p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_resp_ramrod_res),
+ &resp_ramrod_res_phys, GFP_KERNEL);
+ if (!p_resp_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed query qp failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err_resp;
+
+ p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
+ DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err_resp;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+
+ out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
+ rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
+ ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
+
+ if (!(qp->req_offloaded)) {
+ /* Don't send query qp for the requester */
+ out_params->sq_psn = qp->sq_psn;
+ out_params->draining = false;
+
+ if (rq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_ERR;
+
+ out_params->state = qp->cur_state;
+
+ return 0;
+ }
+
+ /* Send a query requester ramrod to FW to get SQ-PSN and state */
+ p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_req_ramrod_res),
+ &req_ramrod_res_phys,
+ GFP_KERNEL);
+ if (!p_req_ramrod_res) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed query qp failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ init_data.cid = qp->icid + 1;
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err_req;
+
+ p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
+ DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err_req;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+
+ out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
+ sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
+ sq_draining =
+ GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
+
+ out_params->draining = false;
+
+ if (rq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_ERR;
+ else if (sq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_SQE;
+ else if (sq_draining)
+ out_params->draining = true;
+ out_params->state = qp->cur_state;
+
+ return 0;
+
+err_req:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+ return rc;
+err_resp:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+ return rc;
+}
+
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ u32 num_invalidated_mw = 0;
+ u32 num_bound_mw = 0;
+ u32 start_cid;
+ int rc;
+
+ /* Destroys the specified QP */
+ if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
+ (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
+ DP_NOTICE(p_hwfn,
+ "QP must be in error, reset or init state before destroying it\n");
+ return -EINVAL;
+ }
+
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
+ if (rc)
+ return rc;
+
+ /* Send destroy requester ramrod */
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
+ if (rc)
+ return rc;
+
+ if (num_invalidated_mw != num_bound_mw) {
+ DP_NOTICE(p_hwfn,
+ "number of invalidate memory windows is different from bounded ones\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->p_rdma_info->proto);
+
+ /* Release responder's icid */
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
+ qp->icid - start_cid);
+
+ /* Release requester's icid */
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
+ qp->icid + 1 - start_cid);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ return 0;
+}
+
+int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* The following fields are filled in from qp and not FW as they can't
+ * be modified by FW
+ */
+ out_params->mtu = qp->mtu;
+ out_params->dest_qp = qp->dest_qp;
+ out_params->incoming_atomic_en = qp->incoming_atomic_en;
+ out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
+ out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
+ out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
+ out_params->dgid = qp->dgid;
+ out_params->flow_label = qp->flow_label;
+ out_params->hop_limit_ttl = qp->hop_limit_ttl;
+ out_params->traffic_class_tos = qp->traffic_class_tos;
+ out_params->timeout = qp->ack_timeout;
+ out_params->rnr_retry = qp->rnr_retry_cnt;
+ out_params->retry_cnt = qp->retry_cnt;
+ out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
+ out_params->pkey_index = 0;
+ out_params->max_rd_atomic = qp->max_rd_atomic_req;
+ out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
+ out_params->sqd_async = qp->sqd_async;
+
+ rc = qed_roce_query_qp(p_hwfn, qp, out_params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ rc = qed_roce_destroy_qp(p_hwfn, qp);
+
+ /* free qp params struct */
+ kfree(qp);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
+ return rc;
+}
+
+struct qed_rdma_qp *
+qed_rdma_create_qp(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *in_params,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_qp *qp;
+ u8 max_stats_queues;
+ int rc;
+
+ if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
+ DP_ERR(p_hwfn->cdev,
+ "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+ rdma_cxt, in_params, out_params);
+ return NULL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "qed rdma create qp called with qp_handle = %08x%08x\n",
+ in_params->qp_handle_hi, in_params->qp_handle_lo);
+
+ /* Some sanity checks... */
+ max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
+ if (in_params->stats_queue >= max_stats_queues) {
+ DP_ERR(p_hwfn->cdev,
+ "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
+ in_params->stats_queue, max_stats_queues);
+ return NULL;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
+ return NULL;
+ }
+
+ rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
+ qp->qpid = ((0xFF << 16) | qp->icid);
+
+ DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
+
+ if (rc) {
+ kfree(qp);
+ return NULL;
+ }
+
+ qp->cur_state = QED_ROCE_QP_STATE_RESET;
+ qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
+ qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
+ qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
+ qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
+ qp->use_srq = in_params->use_srq;
+ qp->signal_all = in_params->signal_all;
+ qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
+ qp->pd = in_params->pd;
+ qp->dpi = in_params->dpi;
+ qp->sq_cq_id = in_params->sq_cq_id;
+ qp->sq_num_pages = in_params->sq_num_pages;
+ qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
+ qp->rq_cq_id = in_params->rq_cq_id;
+ qp->rq_num_pages = in_params->rq_num_pages;
+ qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
+ qp->srq_id = in_params->srq_id;
+ qp->req_offloaded = false;
+ qp->resp_offloaded = false;
+ qp->e2e_flow_control_en = qp->use_srq ? false : true;
+ qp->stats_queue = in_params->stats_queue;
+
+ out_params->icid = qp->icid;
+ out_params->qp_id = qp->qpid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
+ return qp;
+}
+
+static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ u32 num_invalidated_mw = 0, num_bound_mw = 0;
+ int rc = 0;
+
+ /* Perform additional operations according to the current state and the
+ * next state
+ */
+ if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
+ (prev_state == QED_ROCE_QP_STATE_RESET)) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
+ /* Init->RTR or Reset->RTR */
+ rc = qed_roce_sp_create_responder(p_hwfn, qp);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* RTR-> RTS */
+ rc = qed_roce_sp_create_requester(p_hwfn, qp);
+ if (rc)
+ return rc;
+
+ /* Send modify responder ramrod */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* RTS->RTS */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+ /* RTS->SQD */
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+ /* SQD->SQD */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* SQD->RTS */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+
+ return rc;
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
+ qp->cur_state == QED_ROCE_QP_STATE_SQE) {
+ /* ->ERR */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
+ params->modify_flags);
+ return rc;
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
+ /* Any state -> RESET */
+
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
+ &num_invalidated_mw);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
+ &num_bound_mw);
+
+ if (num_invalidated_mw != num_bound_mw) {
+ DP_NOTICE(p_hwfn,
+ "number of invalidate memory windows is different from bounded ones\n");
+ return -EINVAL;
+ }
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
+ }
+
+ return rc;
+}
+
+int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ enum qed_roce_qp_state prev_state;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
+ qp->icid, params->new_state);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
+ qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
+ qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
+ qp->incoming_atomic_en = params->incoming_atomic_en;
+ }
+
+ /* Update QP structure with the updated values */
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
+ qp->roce_mode = params->roce_mode;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
+ qp->pkey = params->pkey;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
+ qp->e2e_flow_control_en = params->e2e_flow_control_en;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
+ qp->dest_qp = params->dest_qp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
+ /* Indicates that the following parameters have changed:
+ * Traffic class, flow label, hop limit, source GID,
+ * destination GID, loopback indicator
+ */
+ qp->traffic_class_tos = params->traffic_class_tos;
+ qp->flow_label = params->flow_label;
+ qp->hop_limit_ttl = params->hop_limit_ttl;
+
+ qp->sgid = params->sgid;
+ qp->dgid = params->dgid;
+ qp->udp_src_port = 0;
+ qp->vlan_id = params->vlan_id;
+ qp->mtu = params->mtu;
+ qp->lb_indication = params->lb_indication;
+ memcpy((u8 *)&qp->remote_mac_addr[0],
+ (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
+ if (params->use_local_mac) {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&params->local_mac_addr[0], ETH_ALEN);
+ } else {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+ }
+ }
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
+ qp->rq_psn = params->rq_psn;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
+ qp->sq_psn = params->sq_psn;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
+ qp->max_rd_atomic_req = params->max_rd_atomic_req;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
+ qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
+ qp->ack_timeout = params->ack_timeout;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
+ qp->retry_cnt = params->retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
+ qp->rnr_retry_cnt = params->rnr_retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
+ qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
+
+ qp->sqd_async = params->sqd_async;
+
+ prev_state = qp->cur_state;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
+ qp->cur_state = params->new_state;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
+ qp->cur_state);
+ }
+
+ rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_register_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ enum rdma_tid_type tid_type;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (p_hwfn->p_rdma_info->last_tid < params->itid)
+ p_hwfn->p_rdma_info->last_tid = params->itid;
+
+ p_ramrod = &p_ent->ramrod.rdma_register_tid;
+
+ p_ramrod->flags = 0;
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
+ params->pbl_two_level);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
+
+ /* Don't initialize D/C field, as it may override other bits. */
+ if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
+ params->page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
+ p_hwfn->p_rdma_info->last_tid);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
+ params->remote_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
+ params->remote_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
+ params->remote_atomic);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
+ params->local_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
+ params->mw_bind);
+
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
+ params->pbl_page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
+
+ switch (params->tid_type) {
+ case QED_RDMA_TID_REGISTERED_MR:
+ tid_type = RDMA_TID_REGISTERED_MR;
+ break;
+ case QED_RDMA_TID_FMR:
+ tid_type = RDMA_TID_FMR;
+ break;
+ case QED_RDMA_TID_MW_TYPE1:
+ tid_type = RDMA_TID_MW_TYPE1;
+ break;
+ case QED_RDMA_TID_MW_TYPE2A:
+ tid_type = RDMA_TID_MW_TYPE2A;
+ break;
+ default:
+ rc = -EINVAL;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
+
+ p_ramrod->itid = cpu_to_le32(params->itid);
+ p_ramrod->key = params->key;
+ p_ramrod->pd = cpu_to_le16(params->pd);
+ p_ramrod->length_hi = (u8)(params->length >> 32);
+ p_ramrod->length_lo = DMA_LO_LE(params->length);
+ if (params->zbva) {
+ /* Lower 32 bits of the registered MR address.
+ * In case of zero based MR, will hold FBO
+ */
+ p_ramrod->va.hi = 0;
+ p_ramrod->va.lo = cpu_to_le32(params->fbo);
+ } else {
+ DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
+ }
+ DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
+
+ /* DIF */
+ if (params->dif_enabled) {
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
+ DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
+ params->dif_error_addr);
+ DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_deregister_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
+ p_ramrod->itid = cpu_to_le32(itid);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
+ /* Bit indicating that the TID is in use and a nig drain is
+ * required before sending the ramrod again
+ */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ rc = -EBUSY;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Drain failed\n");
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Resend the ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto,
+ &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to init sp-element\n");
+ return rc;
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Ramrod failed\n");
+ return rc;
+ }
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
+ fw_return_code);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
+ return rc;
+}
+
+static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+{
+ return QED_LEADING_HWFN(cdev);
+}
+
+static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 val;
+
+ val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
+ DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
+ "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
+ val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
+}
+
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ p_hwfn->db_bar_no_edpm = true;
+
+ qed_rdma_dpm_conf(p_hwfn, p_ptt);
+}
+
+int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_ptt *p_ptt;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "desired_cnq = %08x\n", params->desired_cnq);
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ goto err;
+
+ rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err1;
+
+ rc = qed_rdma_setup(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err2;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+
+err2:
+ qed_rdma_free(p_hwfn);
+err1:
+ qed_ptt_release(p_hwfn, p_ptt);
+err:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_init(struct qed_dev *cdev,
+ struct qed_rdma_start_in_params *params)
+{
+ return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
+}
+
+void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ struct qed_roce_ll2_packet *packet = cookie;
+ struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+
+ roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
+}
+
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
+ cookie, first_frag_addr,
+ b_last_fragment, b_last_packet);
+}
+
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet)
+{
+ struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+ struct qed_roce_ll2_rx_params params;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_roce_ll2_packet pkt;
+
+ DP_VERBOSE(cdev,
+ QED_MSG_LL2,
+ "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
+ (void *)(uintptr_t)rx_buf_addr,
+ data_length, data_length_error);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.n_seg = 1;
+ pkt.payload[0].baddr = rx_buf_addr;
+ pkt.payload[0].len = data_length;
+
+ memset(&params, 0, sizeof(params));
+ params.vlan_id = vlan;
+ *((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
+ *((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
+
+ if (data_length_error) {
+ DP_ERR(cdev,
+ "roce ll2 rx complete: data length error %d, length=%d\n",
+ data_length_error, data_length);
+ params.rc = -EINVAL;
+ }
+
+ roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
+}
+
+static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ u8 *new_mac_address)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
+ DP_ERR(cdev,
+ "qed roce mac filter failed - roce_info/ll2 NULL\n");
+ return -EINVAL;
+ }
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to acquire PTT\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hwfn->ll2->lock);
+ if (old_mac_address)
+ qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ old_mac_address);
+ if (new_mac_address)
+ rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ new_mac_address);
+ mutex_unlock(&hwfn->ll2->lock);
+
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to add mac filter\n");
+
+ return rc;
+}
+
+static int qed_roce_ll2_start(struct qed_dev *cdev,
+ struct qed_roce_ll2_params *params)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2;
+ struct qed_ll2_info ll2_params;
+ int rc;
+
+ if (!params) {
+ DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
+ return -EINVAL;
+ }
+ if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
+ params->cbs.tx_cb, params->cbs.rx_cb);
+ return -EINVAL;
+ }
+ if (!is_valid_ether_addr(params->mac_address)) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
+ params->mac_address);
+ return -EINVAL;
+ }
+
+ /* Initialize */
+ roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
+ if (!roce_ll2) {
+ DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
+ return -ENOMEM;
+ }
+ memset(roce_ll2, 0, sizeof(*roce_ll2));
+ roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+ roce_ll2->cbs = params->cbs;
+ roce_ll2->cb_cookie = params->cb_cookie;
+ mutex_init(&roce_ll2->lock);
+
+ memset(&ll2_params, 0, sizeof(ll2_params));
+ ll2_params.conn_type = QED_LL2_TYPE_ROCE;
+ ll2_params.mtu = params->mtu;
+ ll2_params.rx_drop_ttl0_flg = true;
+ ll2_params.rx_vlan_removal_en = false;
+ ll2_params.tx_dest = CORE_TX_DEST_NW;
+ ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
+ ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
+ ll2_params.gsi_enable = true;
+
+ rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
+ params->max_rx_buffers,
+ params->max_tx_buffers,
+ &roce_ll2->handle);
+ if (rc) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
+ rc);
+ goto err;
+ }
+
+ rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle);
+ if (rc) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
+ rc);
+ goto err1;
+ }
+
+ hwfn->ll2 = roce_ll2;
+
+ rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
+ if (rc) {
+ hwfn->ll2 = NULL;
+ goto err2;
+ }
+ ether_addr_copy(roce_ll2->mac_address, params->mac_address);
+
+ return 0;
+
+err2:
+ qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err1:
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err:
+ kfree(roce_ll2);
+ return rc;
+}
+
+static int qed_roce_ll2_stop(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+ int rc;
+
+ if (!cdev) {
+ DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
+ return -EINVAL;
+ }
+
+ if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
+ DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
+ return -EINVAL;
+ }
+
+ /* remove LL2 MAC address filter */
+ rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
+ eth_zero_addr(roce_ll2->mac_address);
+
+ rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle);
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
+ rc);
+
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+
+ roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+ kfree(roce_ll2);
+
+ return rc;
+}
+
+static int qed_roce_ll2_tx(struct qed_dev *cdev,
+ struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_tx_params *params)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+ enum qed_ll2_roce_flavor_type qed_roce_flavor;
+ u8 flags = 0;
+ int rc;
+ int i;
+
+ if (!cdev || !pkt || !params) {
+ DP_ERR(cdev,
+ "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
+ cdev, pkt, params);
+ return -EINVAL;
+ }
+
+ qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
+ : QED_LL2_RROCE;
+
+ if (pkt->roce_mode == ROCE_V2_IPV4)
+ flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+
+ /* Tx header */
+ rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
+ 1 + pkt->n_seg, 0, flags, 0,
+ qed_roce_flavor, pkt->header.baddr,
+ pkt->header.len, pkt, 1);
+ if (rc) {
+ DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
+ return QED_ROCE_TX_HEAD_FAILURE;
+ }
+
+ /* Tx payload */
+ for (i = 0; i < pkt->n_seg; i++) {
+ rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle,
+ pkt->payload[i].baddr,
+ pkt->payload[i].len);
+ if (rc) {
+ /* If failed not much to do here, partial packet has
+ * been posted * we can't free memory, will need to wait
+ * for completion
+ */
+ DP_ERR(cdev,
+ "roce ll2 tx: payload failed (rc=%d)\n", rc);
+ return QED_ROCE_TX_FRAG_FAILURE;
+ }
+ }
+
+ return 0;
+}
+
+static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
+ struct qed_roce_ll2_buffer *buf,
+ u64 cookie, u8 notify_fw)
+{
+ return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->ll2->handle,
+ buf->baddr, buf->len,
+ (void *)(uintptr_t)cookie, notify_fw);
+}
+
+static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+
+ return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle, stats);
+}
+
+static const struct qed_rdma_ops qed_rdma_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_rdma_dev_info,
+ .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
+ .rdma_init = &qed_rdma_init,
+ .rdma_add_user = &qed_rdma_add_user,
+ .rdma_remove_user = &qed_rdma_remove_user,
+ .rdma_stop = &qed_rdma_stop,
+ .rdma_query_port = &qed_rdma_query_port,
+ .rdma_query_device = &qed_rdma_query_device,
+ .rdma_get_start_sb = &qed_rdma_get_sb_start,
+ .rdma_get_rdma_int = &qed_rdma_get_int,
+ .rdma_set_rdma_int = &qed_rdma_set_int,
+ .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
+ .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
+ .rdma_alloc_pd = &qed_rdma_alloc_pd,
+ .rdma_dealloc_pd = &qed_rdma_free_pd,
+ .rdma_create_cq = &qed_rdma_create_cq,
+ .rdma_destroy_cq = &qed_rdma_destroy_cq,
+ .rdma_create_qp = &qed_rdma_create_qp,
+ .rdma_modify_qp = &qed_rdma_modify_qp,
+ .rdma_query_qp = &qed_rdma_query_qp,
+ .rdma_destroy_qp = &qed_rdma_destroy_qp,
+ .rdma_alloc_tid = &qed_rdma_alloc_tid,
+ .rdma_free_tid = &qed_rdma_free_tid,
+ .rdma_register_tid = &qed_rdma_register_tid,
+ .rdma_deregister_tid = &qed_rdma_deregister_tid,
+ .roce_ll2_start = &qed_roce_ll2_start,
+ .roce_ll2_stop = &qed_roce_ll2_stop,
+ .roce_ll2_tx = &qed_roce_ll2_tx,
+ .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
+ .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
+ .roce_ll2_stats = &qed_roce_ll2_stats,
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops()
+{
+ return &qed_rdma_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
new file mode 100644
index 000000000000..2f091e8a0f40
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -0,0 +1,216 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_ROCE_H
+#define _QED_ROCE_H
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_ll2.h"
+
+#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
+#define QED_RDMA_MAX_P_KEY (1)
+#define QED_RDMA_MAX_WQE (0x7FFF)
+#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
+#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
+#define QED_RDMA_ACK_DELAY (15)
+#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
+#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
+#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
+/* Add 1 for header element */
+#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
+#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
+#define QED_RDMA_MAX_SRQS (32 * 1024)
+
+#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
+#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
+
+enum qed_rdma_toggle_bit {
+ QED_RDMA_TOGGLE_BIT_CLEAR = 0,
+ QED_RDMA_TOGGLE_BIT_SET = 1
+};
+
+struct qed_bmap {
+ unsigned long *bitmap;
+ u32 max_count;
+};
+
+struct qed_rdma_info {
+ /* spin lock to protect bitmaps */
+ spinlock_t lock;
+
+ struct qed_bmap cq_map;
+ struct qed_bmap pd_map;
+ struct qed_bmap tid_map;
+ struct qed_bmap qp_map;
+ struct qed_bmap srq_map;
+ struct qed_bmap cid_map;
+ struct qed_bmap dpi_map;
+ struct qed_bmap toggle_bits;
+ struct qed_rdma_events events;
+ struct qed_rdma_device *dev;
+ struct qed_rdma_port *port;
+ u32 last_tid;
+ u8 num_cnqs;
+ u32 num_qps;
+ u32 num_mrs;
+ u16 queue_zone_base;
+ enum protocol_type proto;
+};
+
+struct qed_rdma_resize_cq_in_params {
+ u16 icid;
+ u32 cq_size;
+ bool pbl_two_level;
+ u64 pbl_ptr;
+ u16 pbl_num_pages;
+ u8 pbl_page_size_log;
+};
+
+struct qed_rdma_resize_cq_out_params {
+ u32 prod;
+ u32 cons;
+};
+
+struct qed_rdma_resize_cnq_in_params {
+ u32 cnq_id;
+ u32 pbl_page_size_log;
+ u64 pbl_ptr;
+};
+
+struct qed_rdma_qp {
+ struct regpair qp_handle;
+ struct regpair qp_handle_async;
+ u32 qpid;
+ u16 icid;
+ enum qed_roce_qp_state cur_state;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+
+ u16 pd;
+ u16 pkey;
+ u32 dest_qp;
+ u16 mtu;
+ u16 srq_id;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u16 dpi;
+ u32 flow_label;
+ bool lb_indication;
+ u16 vlan_id;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ enum roce_mode roce_mode;
+ u16 udp_src_port;
+ u8 stats_queue;
+
+ /* requeseter */
+ u8 max_rd_atomic_req;
+ u32 sq_psn;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ dma_addr_t sq_pbl_ptr;
+ void *orq;
+ dma_addr_t orq_phys_addr;
+ u8 orq_num_pages;
+ bool req_offloaded;
+
+ /* responder */
+ u8 max_rd_atomic_resp;
+ u32 rq_psn;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ dma_addr_t rq_pbl_ptr;
+ void *irq;
+ dma_addr_t irq_phys_addr;
+ u8 irq_num_pages;
+ bool resp_offloaded;
+
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+
+ void *shared_queue;
+ dma_addr_t shared_queue_phys_addr;
+};
+
+int
+qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params);
+int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
+int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
+int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
+void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
+struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
+struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
+int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params);
+void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
+int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
+int qed_rdma_stop(void *rdma_cxt);
+u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
+u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
+void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
+void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
+void qed_async_roce_event(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe);
+int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
+int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params);
+int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params);
+
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#else
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+#endif
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
index a342bfe4280d..9b7678f26909 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -2,6 +2,7 @@
#include "qed_dev_api.h"
#include "qed_mcp.h"
#include "qed_sp.h"
+#include "qed_selftest.h"
int qed_selftest_memory(struct qed_dev *cdev)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index a548504c3420..652c90819758 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -61,6 +61,10 @@ union ramrod_data {
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update;
+ struct core_rx_start_ramrod_data core_rx_queue_start;
+ struct core_rx_stop_ramrod_data core_rx_queue_stop;
+ struct core_tx_start_ramrod_data core_tx_queue_start;
+ struct core_tx_stop_ramrod_data core_tx_queue_stop;
struct vport_filter_update_ramrod_data vport_filter_update;
struct rdma_init_func_ramrod_data rdma_init_func;
@@ -81,6 +85,7 @@ union ramrod_data {
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
struct rdma_srq_modify_ramrod_data rdma_modify_srq;
+ struct roce_init_func_ramrod_data roce_init_func;
struct iscsi_slow_path_hdr iscsi_empty;
struct iscsi_init_ramrod_params iscsi_init;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index a52f3fc051f5..2888eb0628f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -25,9 +25,7 @@
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
- u8 cmd,
- u8 protocol,
- struct qed_sp_init_data *p_data)
+ u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
{
u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct qed_spq_entry *p_ent = NULL;
@@ -38,7 +36,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
rc = qed_spq_get_entry(p_hwfn, pp_ent);
- if (rc != 0)
+ if (rc)
return rc;
p_ent = *pp_ent;
@@ -321,8 +319,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_START,
- PROTOCOLID_COMMON,
- &init_data);
+ PROTOCOLID_COMMON, &init_data);
if (rc)
return rc;
@@ -356,8 +353,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
- qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
- &p_ramrod->tunnel_config);
+ qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
@@ -389,8 +385,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
- sb, sb_index,
- p_ramrod->outer_tag);
+ sb, sb_index, p_ramrod->outer_tag);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index d73456eab1d7..caff41544898 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,6 +28,9 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#include "qed_roce.h"
+#endif
/***************************************************************************
* Structures & Definitions
@@ -41,8 +44,7 @@
***************************************************************************/
static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
void *cookie,
- union event_ring_data *data,
- u8 fw_return_code)
+ union event_ring_data *data, u8 fw_return_code)
{
struct qed_spq_comp_done *comp_done;
@@ -109,9 +111,8 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/***************************************************************************
* SPQ entries inner API
***************************************************************************/
-static int
-qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent)
+static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
{
p_ent->flags = 0;
@@ -189,8 +190,7 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
}
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
- struct qed_spq *p_spq,
- struct qed_spq_entry *p_ent)
+ struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
{
struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
u16 echo = qed_chain_get_prod_idx(p_chain);
@@ -240,6 +240,11 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ case PROTOCOLID_ROCE:
+ qed_async_roce_event(p_hwfn, p_eqe);
+ return 0;
+#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
@@ -255,8 +260,7 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
/***************************************************************************
* EQ API
***************************************************************************/
-void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
- u16 prod)
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
{
u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
@@ -267,9 +271,7 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
mmiowb();
}
-int qed_eq_completion(struct qed_hwfn *p_hwfn,
- void *cookie)
-
+int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
{
struct qed_eq *p_eq = cookie;
struct qed_chain *p_chain = &p_eq->chain;
@@ -323,17 +325,14 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn,
return rc;
}
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
- u16 num_elem)
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
{
struct qed_eq *p_eq;
/* Allocate EQ struct */
p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
- if (!p_eq) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
+ if (!p_eq)
return NULL;
- }
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -342,17 +341,12 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element),
- &p_eq->chain)) {
- DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
+ &p_eq->chain))
goto eq_allocate_fail;
- }
/* register EQ completion on the SP SB */
- qed_int_register_cb(p_hwfn,
- qed_eq_completion,
- p_eq,
- &p_eq->eq_sb_index,
- &p_eq->p_fw_cons);
+ qed_int_register_cb(p_hwfn, qed_eq_completion,
+ p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
return p_eq;
@@ -361,14 +355,12 @@ eq_allocate_fail:
return NULL;
}
-void qed_eq_setup(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq)
+void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
{
qed_chain_reset(&p_eq->chain);
}
-void qed_eq_free(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq)
+void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
{
if (!p_eq)
return;
@@ -379,10 +371,9 @@ void qed_eq_free(struct qed_hwfn *p_hwfn,
/***************************************************************************
* CQE API - manipulate EQ functionality
***************************************************************************/
-static int qed_cqe_completion(
- struct qed_hwfn *p_hwfn,
- struct eth_slow_path_rx_cqe *cqe,
- enum protocol_type protocol)
+static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe,
+ enum protocol_type protocol)
{
if (IS_VF(p_hwfn->cdev))
return 0;
@@ -463,12 +454,9 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
u32 capacity;
/* SPQ struct */
- p_spq =
- kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
- if (!p_spq) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
+ p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
+ if (!p_spq)
return -ENOMEM;
- }
/* SPQ ring */
if (qed_chain_alloc(p_hwfn->cdev,
@@ -477,18 +465,14 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_CNT_TYPE_U16,
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
- &p_spq->chain)) {
- DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
+ &p_spq->chain))
goto spq_allocate_fail;
- }
/* allocate and fill the SPQ elements (incl. ramrod data list) */
capacity = qed_chain_get_capacity(&p_spq->chain);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- capacity *
- sizeof(struct qed_spq_entry),
+ capacity * sizeof(struct qed_spq_entry),
&p_phys, GFP_KERNEL);
-
if (!p_virt)
goto spq_allocate_fail;
@@ -525,9 +509,7 @@ void qed_spq_free(struct qed_hwfn *p_hwfn)
kfree(p_spq);
}
-int
-qed_spq_get_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry **pp_ent)
+int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_ent = NULL;
@@ -538,14 +520,15 @@ qed_spq_get_entry(struct qed_hwfn *p_hwfn,
if (list_empty(&p_spq->free_pool)) {
p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
if (!p_ent) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate an SPQ entry for a pending ramrod\n");
rc = -ENOMEM;
goto out_unlock;
}
p_ent->queue = &p_spq->unlimited_pending;
} else {
p_ent = list_first_entry(&p_spq->free_pool,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
list_del(&p_ent->list);
p_ent->queue = &p_spq->pending;
}
@@ -564,8 +547,7 @@ static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
}
-void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent)
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
{
spin_lock_bh(&p_hwfn->p_spq->lock);
__qed_spq_return_entry(p_hwfn, p_ent);
@@ -586,10 +568,9 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
*
* @return int
*/
-static int
-qed_spq_add_entry(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent,
- enum spq_priority priority)
+static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ enum spq_priority priority)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
@@ -604,8 +585,7 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_en2;
p_en2 = list_first_entry(&p_spq->free_pool,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
list_del(&p_en2->list);
/* Copy the ring element physical pointer to the new
@@ -655,8 +635,7 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
* Posting new Ramrods
***************************************************************************/
static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
- struct list_head *head,
- u32 keep_reserve)
+ struct list_head *head, u32 keep_reserve)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
int rc;
@@ -690,8 +669,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
break;
p_ent = list_first_entry(&p_spq->unlimited_pending,
- struct qed_spq_entry,
- list);
+ struct qed_spq_entry, list);
if (!p_ent)
return -EINVAL;
@@ -705,8 +683,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
}
int qed_spq_post(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent,
- u8 *fw_return_code)
+ struct qed_spq_entry *p_ent, u8 *fw_return_code)
{
int rc = 0;
struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
@@ -803,8 +780,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
return -EINVAL;
spin_lock_bh(&p_spq->lock);
- list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
- list) {
+ list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
if (p_ent->elem.hdr.echo == echo) {
u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
@@ -846,15 +822,22 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
if (!found) {
DP_NOTICE(p_hwfn,
- "Failed to find an entry this EQE completes\n");
+ "Failed to find an entry this EQE [echo %04x] completes\n",
+ le16_to_cpu(echo));
return -EEXIST;
}
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Complete EQE [echo %04x]: func %p cookie %p)\n",
+ le16_to_cpu(echo),
p_ent->comp_cb.function, p_ent->comp_cb.cookie);
if (found->comp_cb.function)
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
+ else
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Got a completion without a callback function\n");
if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
(found->queue == &p_spq->unlimited_pending))
@@ -878,10 +861,8 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
/* Allocate ConsQ struct */
p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
- if (!p_consq) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
+ if (!p_consq)
return NULL;
- }
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -889,10 +870,8 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
QED_CHAIN_PAGE_SIZE / 0x80,
- 0x80, &p_consq->chain)) {
- DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
+ 0x80, &p_consq->chain))
goto consq_allocate_fail;
- }
return p_consq;
@@ -901,14 +880,12 @@ consq_allocate_fail:
return NULL;
}
-void qed_consq_setup(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq)
+void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
{
qed_chain_reset(&p_consq->chain);
}
-void qed_consq_free(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq)
+void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
{
if (!p_consq)
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 15399da268d9..d2d6621fe0e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -60,7 +60,8 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
}
fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
- if (fp_minor > ETH_HSI_VER_MINOR) {
+ if (fp_minor > ETH_HSI_VER_MINOR &&
+ fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
@@ -107,8 +108,8 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
- int rel_vf_id, bool b_enabled_only)
+static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -185,8 +186,8 @@ static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
return false;
}
-int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
- int vfid, struct qed_ptt *p_ptt)
+static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+ int vfid, struct qed_ptt *p_ptt)
{
struct qed_bulletin_content *p_bulletin;
int crc_size = sizeof(p_bulletin->crc);
@@ -454,10 +455,8 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn)
}
p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
- if (!p_sriov) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ if (!p_sriov)
return -ENOMEM;
- }
p_hwfn->pf_iov_info = p_sriov;
@@ -506,10 +505,9 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
/* Allocate a new struct for IOV information */
cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
- if (!cdev->p_iov_info) {
- DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
+ if (!cdev->p_iov_info)
return -ENOMEM;
- }
+
cdev->p_iov_info->pos = pos;
rc = qed_iov_pci_cfg_info(cdev);
@@ -575,7 +573,7 @@ static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
}
}
-void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
{
u16 i;
@@ -699,7 +697,7 @@ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
&qzone_id);
reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
- val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+ val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
qed_wr(p_hwfn, p_ptt, reg_addr, val);
}
}
@@ -1090,13 +1088,13 @@ static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
/* Prepare response for all extended tlvs if they are found by PF */
for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
- if (!(tlvs_mask & (1 << i)))
+ if (!(tlvs_mask & BIT(i)))
continue;
resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
qed_iov_vport_to_tlv(p_hwfn, i), size);
- if (tlvs_accepted & (1 << i))
+ if (tlvs_accepted & BIT(i))
resp->hdr.status = status;
else
resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
@@ -1132,9 +1130,10 @@ static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
}
-struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
- u16 relative_vf_id,
- bool b_enabled_only)
+static struct
+qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
{
struct qed_vf_info *vf = NULL;
@@ -1145,7 +1144,7 @@ struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
return &vf->p_vf_info;
}
-void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
{
struct qed_public_vf_info *vf_info;
@@ -1241,6 +1240,16 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_req->num_vlan_filters,
p_resp->num_vlan_filters,
p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* Some legacy OSes are incapable of correctly handling this
+ * failure.
+ */
+ if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
+ (p_vf->acquire.vfdev_info.os_type ==
+ VFPF_ACQUIRE_OS_WINDOWS))
+ return PFVF_STATUS_SUCCESS;
+
return PFVF_STATUS_NO_RESOURCE;
}
@@ -1280,22 +1289,42 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
memset(resp, 0, sizeof(*resp));
+ /* Write the PF version so that VF would know which version
+ * is supported - might be later overriden. This guarantees that
+ * VF could recognize legacy PF based on lack of versions in reply.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
+ if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+ vf->abs_vf_id, vf->state);
+ goto out;
+ }
+
/* Validate FW compatibility */
if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
- DP_INFO(p_hwfn,
- "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
- vf->abs_vf_id,
- req->vfdev_info.eth_fp_hsi_major,
- req->vfdev_info.eth_fp_hsi_minor,
- ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
-
- /* Write the PF version so that VF would know which version
- * is supported.
- */
- pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
- pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
- goto out;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] is pre-fastpath HSI\n",
+ vf->abs_vf_id);
+ p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+ vf->abs_vf_id,
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ goto out;
+ }
}
/* On 100g PFs, prevent old VFs from loading */
@@ -1334,8 +1363,11 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
pfdev_info->fw_minor = FW_MINOR_VERSION;
pfdev_info->fw_rev = FW_REVISION_VERSION;
pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
- pfdev_info->minor_fp_hsi = min_t(u8,
- ETH_HSI_VER_MINOR,
+
+ /* Incorrect when legacy, but doesn't matter as legacy isn't reading
+ * this field.
+ */
+ pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
req->vfdev_info.eth_fp_hsi_minor);
pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
@@ -1438,14 +1470,11 @@ static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
filter.type = QED_FILTER_VLAN;
filter.vlan = p_vf->shadow_config.vlans[i].vid;
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
filter.vlan, p_vf->relative_vf_id);
- rc = qed_sp_eth_filter_ucast(p_hwfn,
- p_vf->opaque_fid,
- &filter,
- QED_SPQ_MODE_CB, NULL);
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to configure VLAN [%04x] to VF [%04x]\n",
@@ -1463,7 +1492,7 @@ qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
{
int rc = 0;
- if ((events & (1 << VLAN_ADDR_FORCED)) &&
+ if ((events & BIT(VLAN_ADDR_FORCED)) &&
!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
@@ -1479,7 +1508,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (!p_vf->vport_instance)
return -EINVAL;
- if (events & (1 << MAC_ADDR_FORCED)) {
+ if (events & BIT(MAC_ADDR_FORCED)) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
@@ -1502,7 +1531,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
}
- if (events & (1 << VLAN_ADDR_FORCED)) {
+ if (events & BIT(VLAN_ADDR_FORCED)) {
struct qed_sp_vport_update_params vport_update;
u8 removal;
int i;
@@ -1572,7 +1601,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (filter.vlan)
p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
else
- p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+ p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
}
/* If forced features are terminated, we need to configure the shadow
@@ -1619,8 +1648,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
qed_int_cau_conf_sb(p_hwfn, p_ptt,
start->sb_addr[sb_id],
- vf->igu_sbs[sb_id],
- vf->abs_vf_id, 1);
+ vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
}
qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
@@ -1632,7 +1660,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
* vfs that would still be fine, since they passed '0' as padding].
*/
p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
- if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
u8 vf_req = start->only_untagged;
vf_info->bulletin.p_virt->default_only_untagged = vf_req;
@@ -1650,9 +1678,10 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
+ params.check_mac = true;
rc = qed_sp_eth_vport_start(p_hwfn, &params);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn,
"qed_iov_vf_mbx_start_vport returned error %d\n", rc);
status = PFVF_STATUS_FAILURE;
@@ -1679,7 +1708,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
vf->spoof_chk = false;
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
- if (rc != 0) {
+ if (rc) {
DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
rc);
status = PFVF_STATUS_FAILURE;
@@ -1695,21 +1724,32 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct qed_vf_info *vf, u8 status)
+ struct qed_vf_info *vf,
+ u8 status, bool b_legacy)
{
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
struct vfpf_start_rxq_tlv *req;
+ u16 length;
mbx->offset = (u8 *)mbx->reply_virt;
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
- sizeof(*p_tlv));
+ length);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if (status == PFVF_STATUS_SUCCESS) {
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
req = &mbx->req_virt->start_rxq;
p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
offsetof(struct mstorm_vf_zone,
@@ -1717,7 +1757,7 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
sizeof(struct eth_rx_prod_data) * req->rx_qid;
}
- qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
+ qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
@@ -1728,6 +1768,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_rxq_tlv *req;
+ bool b_legacy_vf = false;
int rc;
memset(&params, 0, sizeof(params));
@@ -1743,13 +1784,27 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
+ /* Legacy VFs have their Producers in a different location, which they
+ * calculate on their own and clean the producer prior to this.
+ */
+ if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) {
+ b_legacy_vf = true;
+ } else {
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+ 0);
+ }
+
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
vf->vf_queues[req->rx_qid].fw_cid,
&params,
vf->abs_vf_id + 0x10,
req->bd_max_bytes,
req->rxq_addr,
- req->cqe_pbl_addr, req->cqe_pbl_size);
+ req->cqe_pbl_addr, req->cqe_pbl_size,
+ b_legacy_vf);
if (rc) {
status = PFVF_STATUS_FAILURE;
@@ -1760,7 +1815,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
}
out:
- qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
+ qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
}
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
@@ -1769,23 +1824,38 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
{
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
+ bool b_legacy = false;
+ u16 length;
mbx->offset = (u8 *)mbx->reply_virt;
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy = true;
+
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
- sizeof(*p_tlv));
+ length);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if (status == PFVF_STATUS_SUCCESS) {
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
u16 qid = mbx->req_virt->start_txq.tx_qid;
- p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
- DQ_DEMS_LEGACY);
+ p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
+ DQ_DEMS_LEGACY);
}
- qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
@@ -2045,7 +2115,7 @@ qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
/* Ignore the VF request if we're forcing a vlan */
- if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+ if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
p_data->update_inner_vlan_removal_flg = 1;
p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
}
@@ -2340,7 +2410,7 @@ static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
/* In forced mode, we're willing to remove entries - but we don't add
* new ones.
*/
- if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
return 0;
if (p_params->opcode == QED_FILTER_ADD ||
@@ -2374,7 +2444,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
int i;
/* If we're in forced-mode, we don't allow any change */
- if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
return 0;
/* First remove entries and then add new ones */
@@ -2441,8 +2511,8 @@ qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
- int vfid, struct qed_filter_ucast *params)
+static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+ int vfid, struct qed_filter_ucast *params)
{
struct qed_public_vf_info *vf;
@@ -2509,7 +2579,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
}
/* Determine if the unicast filtering is acceptible by PF */
- if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+ if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
(params.type == QED_FILTER_VLAN ||
params.type == QED_FILTER_MAC_VLAN)) {
/* Once VLAN is forced or PVID is set, do not allow
@@ -2521,7 +2591,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
goto out;
}
- if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+ if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
(params.type == QED_FILTER_MAC ||
params.type == QED_FILTER_MAC_VLAN)) {
if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
@@ -2749,7 +2819,7 @@ cleanup:
/* Mark VF for ack and clean pending state */
if (p_vf->state == VF_RESET)
p_vf->state = VF_STOPPED;
- ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+ ack_vfs[vfid / 32] |= BIT((vfid % 32));
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
@@ -2759,7 +2829,8 @@ cleanup:
return rc;
}
-int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+static int
+qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 ack_vfs[VF_MAX_STATIC / 32];
int rc = 0;
@@ -2805,7 +2876,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
continue;
vfid = p_vf->abs_vf_id;
- if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
u16 rel_vf_id = p_vf->relative_vf_id;
@@ -2946,7 +3017,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
}
}
-void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
+static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
{
u64 add_bit = 1ULL << (vfid % 64);
@@ -3064,14 +3135,13 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
vf_info->bulletin.p_virt->valid_bitmap |= feature;
/* Forced MAC will disable MAC_ADDR */
- vf_info->bulletin.p_virt->valid_bitmap &=
- ~(1 << VFPF_BULLETIN_MAC_ADDR);
+ vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
-void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
- u16 pvid, int vfid)
+static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+ u16 pvid, int vfid)
{
struct qed_vf_info *vf_info;
u64 feature;
@@ -3104,7 +3174,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
return !!p_vf_info->vport_instance;
}
-bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
@@ -3126,7 +3196,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
return vf_info->spoof_chk;
}
-int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
{
struct qed_vf_info *vf;
int rc = -EINVAL;
@@ -3163,13 +3233,14 @@ static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
if (!p_vf || !p_vf->bulletin.p_virt)
return NULL;
- if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
return NULL;
return p_vf->bulletin.p_virt->mac;
}
-u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+static u16
+qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
{
struct qed_vf_info *p_vf;
@@ -3177,7 +3248,7 @@ u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
if (!p_vf || !p_vf->bulletin.p_virt)
return 0;
- if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
return 0;
return p_vf->bulletin.p_virt->pvid;
@@ -3201,7 +3272,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
}
-int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+static int
+qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
{
struct qed_vf_info *vf;
u8 vport_id;
@@ -3760,7 +3832,8 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
qed_ptt_release(hwfn, ptt);
}
-void qed_iov_pf_task(struct work_struct *work)
+static void qed_iov_pf_task(struct work_struct *work)
+
{
struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
iov_task.work);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 9b780b31b15c..abf5bf11f865 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -46,6 +46,17 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
return p_tlv;
}
+static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
+{
+ union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF request status = 0x%x, PF reply status = 0x%x\n",
+ req_status, resp->default_resp.hdr.status);
+
+ mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+}
+
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
@@ -103,16 +114,12 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
"VF <-- PF Timeout [Type %d]\n",
p_req->first_tlv.tl.type);
rc = -EBUSY;
- goto exit;
} else {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"PF response: %d [Type %d]\n",
*done, p_req->first_tlv.tl.type);
}
-exit:
- mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
-
return rc;
}
@@ -191,6 +198,9 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn,
QED_MSG_IOV, "attempting to acquire resources\n");
+ /* Clear response buffer, as this might be a re-send */
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
@@ -205,9 +215,12 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* PF agrees to allocate our resources */
if (!(resp->pfdev_info.capabilities &
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
- DP_INFO(p_hwfn,
- "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
- return -EINVAL;
+ /* It's possible legacy PF mistakenly accepted;
+ * but we don't care - simply mark it as
+ * legacy and continue.
+ */
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
resources_acquired = true;
@@ -215,27 +228,55 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
attempts < VF_ACQUIRE_THRESH) {
qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
&resp->resc);
+ } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
+ if (pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn,
+ "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR,
+ ETH_HSI_VER_MINOR,
+ pfdev_info->major_fp_hsi);
+ rc = -EINVAL;
+ goto exit;
+ }
- /* Clear response buffer */
- memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
- } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
- pfdev_info->major_fp_hsi &&
- (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
- DP_NOTICE(p_hwfn,
- "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
- pfdev_info->major_fp_hsi,
- pfdev_info->minor_fp_hsi,
- ETH_HSI_VER_MAJOR,
- ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
- return -EINVAL;
+ if (!pfdev_info->major_fp_hsi) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ DP_NOTICE(p_hwfn,
+ "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
+ rc = -EINVAL;
+ goto exit;
+ } else {
+ DP_INFO(p_hwfn,
+ "PF is old - try re-acquire to see if it supports FW-version override\n");
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ continue;
+ }
+ }
+
+ /* If PF/VF are using same Major, PF must have had
+ * it's reasons. Simply fail.
+ */
+ DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n");
+ rc = -EINVAL;
+ goto exit;
} else {
DP_ERR(p_hwfn,
"PF returned error %d to VF acquisition request\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto exit;
}
}
+ /* Mark the PF as legacy, if needed */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
+ p_iov->b_pre_fp_hsi = true;
+
/* Update bulletin board size with response from PF */
p_iov->bulletin.size = resp->bulletin_size;
@@ -253,14 +294,18 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
}
- if (ETH_HSI_VER_MINOR &&
+ if (!p_iov->b_pre_fp_hsi &&
+ ETH_HSI_VER_MINOR &&
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
DP_INFO(p_hwfn,
"PF is using older fastpath HSI; %02x.%02x is configured\n",
ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
}
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
@@ -286,31 +331,23 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
/* Allocate vf sriov info */
p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
- if (!p_iov) {
- DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ if (!p_iov)
return -ENOMEM;
- }
/* Allocate vf2pf msg */
p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
&p_iov->vf2pf_request_phys,
GFP_KERNEL);
- if (!p_iov->vf2pf_request) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate `vf2pf_request' DMA memory\n");
+ if (!p_iov->vf2pf_request)
goto free_p_iov;
- }
p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union pfvf_tlvs),
&p_iov->pf2vf_reply_phys,
GFP_KERNEL);
- if (!p_iov->pf2vf_reply) {
- DP_NOTICE(p_hwfn,
- "Failed to allocate `pf2vf_reply' DMA memory\n");
+ if (!p_iov->pf2vf_reply)
goto free_vf2pf_request;
- }
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
@@ -347,6 +384,9 @@ free_p_iov:
return -ENOMEM;
}
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
+ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u8 rx_qid,
@@ -374,6 +414,21 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1;
+ /* If PF is legacy, we'll need to calculate producers ourselves
+ * as well as clean them.
+ */
+ if (pp_prod && p_iov->b_pre_fp_hsi) {
+ u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview +
+ MSTORM_QZONE_START(p_hwfn->cdev) +
+ hw_qid * MSTORM_QZONE_SIZE;
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)(&init_prod_val));
+ }
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
@@ -381,13 +436,15 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->queue_start;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
/* Learn the address of the producer from the response */
- if (pp_prod) {
+ if (pp_prod && !p_iov->b_pre_fp_hsi) {
u32 init_prod_val = 0;
*pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
@@ -399,6 +456,8 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)&init_prod_val);
}
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -424,10 +483,15 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -470,13 +534,27 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
}
if (pp_doorbell) {
- *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+ u32 db_addr;
+
+ db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ db_addr;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
tx_queue_id, *pp_doorbell, resp->offset);
}
exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -501,10 +579,15 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -543,10 +626,15 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -567,10 +655,15 @@ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -770,13 +863,18 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
return rc;
}
@@ -797,14 +895,19 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EAGAIN;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EAGAIN;
+ goto exit;
+ }
p_hwfn->b_int_enabled = 0;
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
@@ -828,6 +931,8 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
rc = -EAGAIN;
+ qed_vf_pf_req_end(p_hwfn, rc);
+
p_hwfn->b_int_enabled = 0;
if (p_iov->vf2pf_request)
@@ -896,12 +1001,17 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EAGAIN;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EAGAIN;
+ goto exit;
+ }
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
@@ -920,12 +1030,17 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
- return 0;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
@@ -1071,8 +1186,8 @@ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
return false;
}
-bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
- u8 *dst_mac, u8 *p_is_forced)
+static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+ u8 *dst_mac, u8 *p_is_forced)
{
struct qed_bulletin_content *bulletin;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index b23ce58e932f..35db7a28aa13 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -86,7 +86,7 @@ struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
-#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
u64 capabilities;
u8 fw_major;
@@ -551,6 +551,11 @@ struct qed_vf_iov {
/* we set aside a copy of the acquire response */
struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* In case PF originates prior to the fp-hsi version comparison,
+ * this has to be propagated as it affects the fastpath.
+ */
+ bool b_pre_fp_hsi;
};
#ifdef CONFIG_QED_SRIOV
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 74a49850d74d..28dc58919c85 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
+qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 02b06d4e40ae..28c0e9f42c9e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -25,7 +25,7 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 10
-#define QEDE_REVISION_VERSION 1
+#define QEDE_REVISION_VERSION 9
#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
@@ -36,6 +36,8 @@
struct qede_stats {
u64 no_buff_discards;
+ u64 packet_too_big_discard;
+ u64 ttl0_discard;
u64 rx_ucast_bytes;
u64 rx_mcast_bytes;
u64 rx_bcast_bytes;
@@ -104,6 +106,13 @@ struct qede_vlan {
bool configured;
};
+struct qede_rdma_dev {
+ struct qedr_dev *qedr_dev;
+ struct list_head entry;
+ struct list_head roce_event_list;
+ struct workqueue_struct *roce_wq;
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -124,16 +133,22 @@ struct qede_dev {
(edev)->dev_info.num_tc)
struct qede_fastpath *fp_array;
- u16 req_rss;
- u16 num_rss;
+ u8 req_num_tx;
+ u8 fp_num_tx;
+ u8 req_num_rx;
+ u8 fp_num_rx;
+ u16 req_queues;
+ u16 num_queues;
u8 num_tc;
-#define QEDE_RSS_CNT(edev) ((edev)->num_rss)
-#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \
- (edev)->num_tc)
-#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss)
-#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss)
+#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
+#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
+#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \
+ (edev)->num_tc)
+#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \
+ QEDE_TSS_COUNT(edev))
+#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev))
#define QEDE_TX_QUEUE(edev, txqidx) \
- (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \
+ (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\
(edev), (txqidx))])
struct qed_int_info int_info;
@@ -177,6 +192,8 @@ struct qede_dev {
unsigned long sp_flags;
u16 vxlan_dst_port;
u16 geneve_dst_port;
+
+ struct qede_rdma_dev rdma_info;
};
enum QEDE_STATE {
@@ -235,6 +252,7 @@ struct qede_rx_queue {
u16 num_rx_buffers;
u16 rxq_id;
+ u64 rcv_pkts;
u64 rx_hw_errors;
u64 rx_alloc_errors;
u64 rx_ip_frags;
@@ -263,6 +281,10 @@ struct qede_tx_queue {
union db_prod tx_db;
u16 num_tx_buffers;
+ u64 xmit_pkts;
+ u64 stopped_cnt;
+
+ bool is_legacy;
};
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -277,7 +299,11 @@ struct qede_tx_queue {
struct qede_fastpath {
struct qede_dev *edev;
- u8 rss_id;
+#define QEDE_FASTPATH_TX BIT(0)
+#define QEDE_FASTPATH_RX BIT(1)
+#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
+ u8 type;
+ u8 id;
struct napi_struct napi;
struct qed_sb_info *sb_info;
struct qede_rx_queue *rxq;
@@ -337,6 +363,6 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
#define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256
-#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
+#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index f8492cac9290..25a9b293ee8f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -35,6 +35,7 @@ static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
} qede_rqstats_arr[] = {
+ QEDE_RQSTAT(rcv_pkts),
QEDE_RQSTAT(rx_hw_errors),
QEDE_RQSTAT(rx_alloc_errors),
QEDE_RQSTAT(rx_ip_frags),
@@ -44,6 +45,24 @@ static const struct {
#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
(*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
qede_rqstats_arr[(sindex)].offset)))
+#define QEDE_TQSTAT_OFFSET(stat_name) \
+ (offsetof(struct qede_tx_queue, stat_name))
+#define QEDE_TQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_TQSTAT(stat_name) \
+ {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)}
+#define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr)
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+} qede_tqstats_arr[] = {
+ QEDE_TQSTAT(xmit_pkts),
+ QEDE_TQSTAT(stopped_cnt),
+};
+
+#define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \
+ (*((u64 *)(((void *)(&dev->fp_array[tssid].txqs[tcid])) +\
+ qede_tqstats_arr[(sindex)].offset)))
+
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
@@ -107,6 +126,8 @@ static const struct {
QEDE_PF_STAT(mftag_filter_discards),
QEDE_PF_STAT(mac_filter_discards),
QEDE_STAT(tx_err_drop_pkts),
+ QEDE_STAT(ttl0_discard),
+ QEDE_STAT(packet_too_big_discard),
QEDE_STAT(coalesced_pkts),
QEDE_STAT(coalesced_events),
@@ -151,17 +172,29 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
int i, j, k;
+ for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+ int tc;
+
+ for (j = 0; j < QEDE_NUM_RQSTATS; j++)
+ sprintf(buf + (k + j) * ETH_GSTRING_LEN,
+ "%d: %s", i, qede_rqstats_arr[j].string);
+ k += QEDE_NUM_RQSTATS;
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ for (j = 0; j < QEDE_NUM_TQSTATS; j++)
+ sprintf(buf + (k + j) * ETH_GSTRING_LEN,
+ "%d.%d: %s", i, tc,
+ qede_tqstats_arr[j].string);
+ k += QEDE_NUM_TQSTATS;
+ }
+ }
+
for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
if (IS_VF(edev) && qede_stats_arr[i].pf_only)
continue;
- strcpy(buf + j * ETH_GSTRING_LEN,
+ strcpy(buf + (k + j) * ETH_GSTRING_LEN,
qede_stats_arr[i].string);
j++;
}
-
- for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
- strcpy(buf + j * ETH_GSTRING_LEN,
- qede_rqstats_arr[k].string);
}
static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -197,19 +230,30 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock);
+ for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) {
+ int tc;
+
+ if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++)
+ buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid);
+ }
+
+ if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++)
+ buf[cnt++] = QEDE_TQSTATS_DATA(edev,
+ sidx,
+ qid, tc);
+ }
+ }
+ }
+
for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
continue;
buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
}
- for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
- buf[cnt] = 0;
- for (qid = 0; qid < edev->num_rss; qid++)
- buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
- cnt++;
- }
-
mutex_unlock(&edev->qede_lock);
}
@@ -227,7 +271,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
if (qede_stats_arr[i].pf_only)
num_stats--;
}
- return num_stats + QEDE_NUM_RQSTATS;
+ return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS +
+ QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc;
case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST:
@@ -249,78 +294,150 @@ static u32 qede_get_priv_flags(struct net_device *dev)
return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
}
-static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+struct qede_link_mode_mapping {
+ u32 qed_link_mode;
+ u32 ethtool_link_mode;
+};
+
+static const struct qede_link_mode_mapping qed_lm_map[] = {
+ {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
+ {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
+ {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT},
+ {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
+ {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT},
+ {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
+ {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+ {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
+ {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
+ {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
+ {QED_LM_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
+};
+
+#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < QED_LM_COUNT; i++) { \
+ if ((caps) & (qed_lm_map[i].qed_link_mode)) \
+ __set_bit(qed_lm_map[i].ethtool_link_mode,\
+ lk_ksettings->link_modes.name); \
+ } \
+}
+
+#define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < QED_LM_COUNT; i++) { \
+ if (test_bit(qed_lm_map[i].ethtool_link_mode, \
+ lk_ksettings->link_modes.name)) \
+ caps |= qed_lm_map[i].qed_link_mode; \
+ } \
+}
+
+static int qede_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
+ struct ethtool_link_settings *base = &cmd->base;
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
memset(&current_link, 0, sizeof(current_link));
edev->ops->common->get_link(edev->cdev, &current_link);
- cmd->supported = current_link.supported_caps;
- cmd->advertising = current_link.advertised_caps;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported)
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising)
+
+ ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
+ QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising)
+
if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
- ethtool_cmd_speed_set(cmd, current_link.speed);
- cmd->duplex = current_link.duplex;
+ base->speed = current_link.speed;
+ base->duplex = current_link.duplex;
} else {
- cmd->duplex = DUPLEX_UNKNOWN;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ base->speed = SPEED_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
}
- cmd->port = current_link.port;
- cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
- AUTONEG_DISABLE;
- cmd->lp_advertising = current_link.lp_caps;
+ base->port = current_link.port;
+ base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
return 0;
}
-static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int qede_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
+ const struct ethtool_link_settings *base = &cmd->base;
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
struct qed_link_params params;
- u32 speed;
if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
- DP_INFO(edev,
- "Link settings are not allowed to be changed\n");
+ DP_INFO(edev, "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
-
memset(&current_link, 0, sizeof(current_link));
memset(&params, 0, sizeof(params));
edev->ops->common->get_link(edev->cdev, &current_link);
- speed = ethtool_cmd_speed(cmd);
params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (base->autoneg == AUTONEG_ENABLE) {
params.autoneg = true;
params.forced_speed = 0;
- params.adv_speeds = cmd->advertising;
- } else { /* forced speed */
+ QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising)
+ } else { /* forced speed */
params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
params.autoneg = false;
- params.forced_speed = speed;
- switch (speed) {
+ params.forced_speed = base->speed;
+ switch (base->speed) {
case SPEED_10000:
if (!(current_link.supported_caps &
- SUPPORTED_10000baseKR_Full)) {
+ QED_LM_10000baseKR_Full_BIT)) {
DP_INFO(edev, "10G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = SUPPORTED_10000baseKR_Full;
+ params.adv_speeds = QED_LM_10000baseKR_Full_BIT;
+ break;
+ case SPEED_25000:
+ if (!(current_link.supported_caps &
+ QED_LM_25000baseKR_Full_BIT)) {
+ DP_INFO(edev, "25G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_25000baseKR_Full_BIT;
break;
case SPEED_40000:
if (!(current_link.supported_caps &
- SUPPORTED_40000baseLR4_Full)) {
+ QED_LM_40000baseLR4_Full_BIT)) {
DP_INFO(edev, "40G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = SUPPORTED_40000baseLR4_Full;
+ params.adv_speeds = QED_LM_40000baseLR4_Full_BIT;
+ break;
+ case SPEED_50000:
+ if (!(current_link.supported_caps &
+ QED_LM_50000baseKR2_Full_BIT)) {
+ DP_INFO(edev, "50G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_50000baseKR2_Full_BIT;
+ break;
+ case SPEED_100000:
+ if (!(current_link.supported_caps &
+ QED_LM_100000baseKR4_Full_BIT)) {
+ DP_INFO(edev, "100G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_100000baseKR4_Full_BIT;
break;
default:
- DP_INFO(edev, "Unsupported speed %u\n", speed);
+ DP_INFO(edev, "Unsupported speed %u\n", base->speed);
return -EINVAL;
}
}
@@ -368,8 +485,7 @@ static u32 qede_get_msglevel(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
- return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
- edev->dp_module;
+ return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module;
}
static void qede_set_msglevel(struct net_device *ndev, u32 level)
@@ -393,8 +509,7 @@ static int qede_nway_reset(struct net_device *dev)
struct qed_link_params link_params;
if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
- DP_INFO(edev,
- "Link settings are not allowed to be changed\n");
+ DP_INFO(edev, "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -467,7 +582,7 @@ static int qede_set_coalesce(struct net_device *dev,
rxc = (u16)coal->rx_coalesce_usecs;
txc = (u16)coal->tx_coalesce_usecs;
- for_each_rss(i) {
+ for_each_queue(i) {
sb_id = edev->fp_array[i].sb_info->igu_sb_id;
rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
(u8)i, sb_id);
@@ -563,7 +678,7 @@ static int qede_set_pauseparam(struct net_device *dev,
memset(&params, 0, sizeof(params));
params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
if (epause->autoneg) {
- if (!(current_link.supported_caps & SUPPORTED_Autoneg)) {
+ if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) {
DP_INFO(edev, "autoneg not supported\n");
return -EINVAL;
}
@@ -580,6 +695,28 @@ static int qede_set_pauseparam(struct net_device *dev,
return 0;
}
+static void qede_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *buffer)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ regs->version = 0;
+ memset(buffer, 0, regs->len);
+
+ if (edev->ops && edev->ops->common)
+ edev->ops->common->dbg_all_data(edev->cdev, buffer);
+}
+
+static int qede_get_regs_len(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (edev->ops && edev->ops->common)
+ return edev->ops->common->dbg_all_data_size(edev->cdev);
+ else
+ return -EINVAL;
+}
+
static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
{
edev->ndev->mtu = args->mtu;
@@ -619,45 +756,70 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
- channels->combined_count = QEDE_RSS_CNT(edev);
+ channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
+ edev->fp_num_rx;
+ channels->tx_count = edev->fp_num_tx;
+ channels->rx_count = edev->fp_num_rx;
}
static int qede_set_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
struct qede_dev *edev = netdev_priv(dev);
+ u32 count;
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
channels->rx_count, channels->tx_count,
channels->other_count, channels->combined_count);
- /* We don't support separate rx / tx, nor `other' channels. */
- if (channels->rx_count || channels->tx_count ||
- channels->other_count || (channels->combined_count == 0) ||
- (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) {
+ count = channels->rx_count + channels->tx_count +
+ channels->combined_count;
+
+ /* We don't support `other' channels */
+ if (channels->other_count) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"command parameters not supported\n");
return -EINVAL;
}
+ if (!(channels->combined_count || (channels->rx_count &&
+ channels->tx_count))) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "need to request at least one transmit and one receive channel\n");
+ return -EINVAL;
+ }
+
+ if (count > QEDE_MAX_RSS_CNT(edev)) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "requested channels = %d max supported channels = %d\n",
+ count, QEDE_MAX_RSS_CNT(edev));
+ return -EINVAL;
+ }
+
/* Check if there was a change in the active parameters */
- if (channels->combined_count == QEDE_RSS_CNT(edev)) {
+ if ((count == QEDE_QUEUE_CNT(edev)) &&
+ (channels->tx_count == edev->fp_num_tx) &&
+ (channels->rx_count == edev->fp_num_rx)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"No change in active parameters\n");
return 0;
}
/* We need the number of queues to be divisible between the hwfns */
- if (channels->combined_count % edev->dev_info.common.num_hwfns) {
+ if ((count % edev->dev_info.common.num_hwfns) ||
+ (channels->tx_count % edev->dev_info.common.num_hwfns) ||
+ (channels->rx_count % edev->dev_info.common.num_hwfns)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
- "Number of channels must be divisable by %04x\n",
+ "Number of channels must be divisible by %04x\n",
edev->dev_info.common.num_hwfns);
return -EINVAL;
}
/* Set number of queues and reload if necessary */
- edev->req_rss = channels->combined_count;
+ edev->req_queues = count;
+ edev->req_num_tx = channels->tx_count;
+ edev->req_num_rx = channels->rx_count;
if (netif_running(dev))
qede_reload(edev, NULL, NULL);
@@ -727,7 +889,7 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
- info->data = edev->num_rss;
+ info->data = QEDE_RSS_COUNT(edev);
return 0;
case ETHTOOL_GRXFH:
return qede_get_rss_flags(edev, info);
@@ -930,7 +1092,7 @@ static void qede_netif_start(struct qede_dev *edev)
if (!netif_running(edev->ndev))
return;
- for_each_rss(i) {
+ for_each_queue(i) {
/* Update and reenable interrupts */
qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
napi_enable(&edev->fp_array[i].napi);
@@ -942,7 +1104,7 @@ static void qede_netif_stop(struct qede_dev *edev)
{
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
napi_disable(&edev->fp_array[i].napi);
/* Disable interrupts */
qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
@@ -952,11 +1114,23 @@ static void qede_netif_stop(struct qede_dev *edev)
static int qede_selftest_transmit_traffic(struct qede_dev *edev,
struct sk_buff *skb)
{
- struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+ struct qede_tx_queue *txq = NULL;
struct eth_tx_1st_bd *first_bd;
dma_addr_t mapping;
int i, idx, val;
+ for_each_queue(i) {
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ txq = edev->fp_array[i].txqs;
+ break;
+ }
+ }
+
+ if (!txq) {
+ DP_NOTICE(edev, "Tx path is not available\n");
+ return -1;
+ }
+
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
txq->sw_tx_ring[idx].skb = skb;
@@ -1020,14 +1194,26 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
static int qede_selftest_receive_traffic(struct qede_dev *edev)
{
- struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
u8 *data_ptr;
int i;
+ for_each_queue(i) {
+ if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ rxq = edev->fp_array[i].rxq;
+ break;
+ }
+ }
+
+ if (!rxq) {
+ DP_NOTICE(edev, "Rx path is not available\n");
+ return -1;
+ }
+
/* The packet is expected to receive on rx-queue 0 even though RSS is
* enabled. This is because the queue 0 is configured as the default
* queue and that the loopback traffic is not IP.
@@ -1228,9 +1414,11 @@ static int qede_get_tunable(struct net_device *dev,
}
static const struct ethtool_ops qede_ethtool_ops = {
- .get_settings = qede_get_settings,
- .set_settings = qede_set_settings,
+ .get_link_ksettings = qede_get_link_ksettings,
+ .set_link_ksettings = qede_set_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
+ .get_regs_len = qede_get_regs_len,
+ .get_regs = qede_get_regs,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
.nway_reset = qede_nway_reset,
@@ -1260,7 +1448,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
- .get_settings = qede_get_settings,
+ .get_link_ksettings = qede_get_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9544e4c41359..343038ca047d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -36,7 +36,7 @@
#include <linux/random.h>
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
-
+#include <linux/qed/qede_roce.h>
#include "qede.h"
static char version[] =
@@ -100,7 +100,8 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
static void qede_link_update(void *dev, struct qed_link_output *link);
#ifdef CONFIG_QED_SRIOV
-static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -109,6 +110,9 @@ static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
return -EINVAL;
}
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
vlan, vf);
@@ -189,8 +193,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
struct ethtool_drvinfo drvinfo;
struct qede_dev *edev;
- /* Currently only support name change */
- if (event != NETDEV_CHANGENAME)
+ if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
goto done;
/* Check whether this is a qede device */
@@ -203,11 +206,18 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
goto done;
edev = netdev_priv(ndev);
- /* Notify qed of the name change */
- if (!edev->ops || !edev->ops->common)
- goto done;
- edev->ops->common->set_id(edev->cdev, edev->ndev->name,
- "qede");
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ /* Notify qed of the name change */
+ if (!edev->ops || !edev->ops->common)
+ goto done;
+ edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
+ break;
+ case NETDEV_CHANGEADDR:
+ edev = netdev_priv(ndev);
+ qede_roce_event_changeaddr(edev);
+ break;
+ }
done:
return NOTIFY_DONE;
@@ -222,7 +232,7 @@ int __init qede_init(void)
{
int ret;
- pr_notice("qede_init: %s\n", version);
+ pr_info("qede_init: %s\n", version);
qed_ops = qed_get_eth_ops();
if (!qed_ops) {
@@ -253,7 +263,8 @@ int __init qede_init(void)
static void __exit qede_cleanup(void)
{
- pr_notice("qede_cleanup called\n");
+ if (debug & QED_LOG_INFO_MASK)
+ pr_info("qede_cleanup called\n");
unregister_netdevice_notifier(&qede_netdev_notifier);
pci_unregister_driver(&qede_pci_driver);
@@ -270,8 +281,7 @@ module_exit(qede_cleanup);
/* Unmap the data and free skb */
static int qede_free_tx_pkt(struct qede_dev *edev,
- struct qede_tx_queue *txq,
- int *len)
+ struct qede_tx_queue *txq, int *len)
{
u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
@@ -329,8 +339,7 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
static void qede_free_failed_tx_pkt(struct qede_dev *edev,
struct qede_tx_queue *txq,
struct eth_tx_1st_bd *first_bd,
- int nbd,
- bool data_split)
+ int nbd, bool data_split)
{
u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
@@ -339,8 +348,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
/* Return prod to its position before this skb was handled */
qed_chain_set_prod(&txq->tx_pbl,
- le16_to_cpu(txq->tx_db.data.bd_prod),
- first_bd);
+ le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
@@ -366,8 +374,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
/* Return again prod to its position before this skb was handled */
qed_chain_set_prod(&txq->tx_pbl,
- le16_to_cpu(txq->tx_db.data.bd_prod),
- first_bd);
+ le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
/* Free skb */
dev_kfree_skb_any(skb);
@@ -376,8 +383,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
}
static u32 qede_xmit_type(struct qede_dev *edev,
- struct sk_buff *skb,
- int *ipv6_ext)
+ struct sk_buff *skb, int *ipv6_ext)
{
u32 rc = XMIT_L4_CSUM;
__be16 l3_proto;
@@ -434,15 +440,13 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
}
static int map_frag_to_bd(struct qede_dev *edev,
- skb_frag_t *frag,
- struct eth_tx_bd *bd)
+ skb_frag_t *frag, struct eth_tx_bd *bd)
{
dma_addr_t mapping;
/* Map skb non-linear frag data for DMA */
mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
return -ENOMEM;
@@ -504,9 +508,8 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
}
/* Main transmit function */
-static
-netdev_tx_t qede_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
+static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
struct netdev_queue *netdev_txq;
@@ -526,12 +529,11 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb);
- WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
+ WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
txq = QEDE_TX_QUEUE(edev, txq_index);
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
- WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
- (MAX_SKB_FRAGS + 1));
+ WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
@@ -606,6 +608,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
}
+ /* Legacy FW had flipped behavior in regard to this bit -
+ * I.e., needed to set to prevent FW from touching encapsulated
+ * packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy))
+ first_bd->data.bitfields ^=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
* support parsing IPv6 with extension header/s.
@@ -731,6 +741,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
qede_update_tx_producer(txq);
netif_tx_stop_queue(netdev_txq);
+ txq->stopped_cnt++;
DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
"Stop queue was called\n");
/* paired memory barrier is in qede_tx_int(), we have to keep
@@ -764,8 +775,7 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
}
-static int qede_tx_int(struct qede_dev *edev,
- struct qede_tx_queue *txq)
+static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
u16 hw_bd_cons;
@@ -791,6 +801,7 @@ static int qede_tx_int(struct qede_dev *edev,
bytes_compl += len;
pkts_compl++;
txq->sw_tx_cons++;
+ txq->xmit_pkts++;
}
netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
@@ -963,8 +974,7 @@ static inline void qede_update_rx_prod(struct qede_dev *edev,
static u32 qede_get_rxhash(struct qede_dev *edev,
u8 bitfields,
- __le32 rss_hash,
- enum pkt_hash_types *rxhash_type)
+ __le32 rss_hash, enum pkt_hash_types *rxhash_type)
{
enum rss_hash_type htype;
@@ -993,12 +1003,10 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
static inline void qede_skb_receive(struct qede_dev *edev,
struct qede_fastpath *fp,
- struct sk_buff *skb,
- u16 vlan_tag)
+ struct sk_buff *skb, u16 vlan_tag)
{
if (vlan_tag)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
napi_gro_receive(&fp->napi, skb);
}
@@ -1021,8 +1029,7 @@ static void qede_set_gro_params(struct qede_dev *edev,
static int qede_fill_frag_skb(struct qede_dev *edev,
struct qede_rx_queue *rxq,
- u8 tpa_agg_index,
- u16 len_on_bd)
+ u8 tpa_agg_index, u16 len_on_bd)
{
struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
NUM_RX_BDS_MAX];
@@ -1209,7 +1216,7 @@ static void qede_gro_receive(struct qede_dev *edev,
#endif
send_skb:
- skb_record_rx_queue(skb, fp->rss_id);
+ skb_record_rx_queue(skb, fp->rxq->rxq_id);
qede_skb_receive(edev, fp, skb, vlan_tag);
}
@@ -1413,7 +1420,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
edev->ops->eth_cqe_completion(
- edev->cdev, fp->rss_id,
+ edev->cdev, fp->id,
(struct eth_slow_path_rx_cqe *)cqe);
goto next_cqe;
}
@@ -1470,7 +1477,7 @@ alloc_skb:
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
if (unlikely(!skb)) {
DP_NOTICE(edev,
- "Build_skb failed, dropping incoming packet\n");
+ "skb allocation failed, dropping incoming packet\n");
qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
rxq->rx_alloc_errors++;
goto next_cqe;
@@ -1578,14 +1585,13 @@ alloc_skb:
skb->protocol = eth_type_trans(skb, edev->ndev);
rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
- fp_cqe->rss_hash,
- &rxhash_type);
+ fp_cqe->rss_hash, &rxhash_type);
skb_set_hash(skb, rx_hash, rxhash_type);
qede_set_skb_csum(skb, csum_flag);
- skb_record_rx_queue(skb, fp->rss_id);
+ skb_record_rx_queue(skb, fp->rxq->rxq_id);
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
next_rx_only:
@@ -1604,6 +1610,8 @@ next_cqe: /* don't consume bd rx buffer */
/* Update producers */
qede_update_rx_prod(edev, rxq);
+ rxq->rcv_pkts += rx_pkt;
+
return rx_pkt;
}
@@ -1616,10 +1624,12 @@ static int qede_poll(struct napi_struct *napi, int budget)
u8 tc;
for (tc = 0; tc < edev->num_tc; tc++)
- if (qede_txq_has_work(&fp->txqs[tc]))
+ if (likely(fp->type & QEDE_FASTPATH_TX) &&
+ qede_txq_has_work(&fp->txqs[tc]))
qede_tx_int(edev, &fp->txqs[tc]);
- rx_work_done = qede_has_rx_work(fp->rxq) ?
+ rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
+ qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
if (rx_work_done < budget) {
qed_sb_update_sb_idx(fp->sb_info);
@@ -1639,8 +1649,10 @@ static int qede_poll(struct napi_struct *napi, int budget)
rmb();
/* Fall out from the NAPI loop if needed */
- if (!(qede_has_rx_work(fp->rxq) ||
- qede_has_tx_work(fp))) {
+ if (!((likely(fp->type & QEDE_FASTPATH_RX) &&
+ qede_has_rx_work(fp->rxq)) ||
+ (likely(fp->type & QEDE_FASTPATH_TX) &&
+ qede_has_tx_work(fp)))) {
napi_complete(napi);
/* Update and reenable interrupts */
@@ -1711,6 +1723,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->ops->get_vport_stats(edev->cdev, &stats);
edev->stats.no_buff_discards = stats.no_buff_discards;
+ edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
+ edev->stats.ttl0_discard = stats.ttl0_discard;
edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
@@ -1790,9 +1804,9 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
}
-static struct rtnl_link_stats64 *qede_get_stats64(
- struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static
+struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -2106,14 +2120,13 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
}
DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
- "marked vlan %d as non-configured\n",
- vlan->vid);
+ "marked vlan %d as non-configured\n", vlan->vid);
}
edev->accept_any_vlan = false;
}
-int qede_set_features(struct net_device *dev, netdev_features_t features)
+static int qede_set_features(struct net_device *dev, netdev_features_t features)
{
struct qede_dev *edev = netdev_priv(dev);
netdev_features_t changes = features ^ dev->features;
@@ -2149,7 +2162,7 @@ static void qede_udp_tunnel_add(struct net_device *dev,
edev->vxlan_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
t_port);
set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
@@ -2160,7 +2173,7 @@ static void qede_udp_tunnel_add(struct net_device *dev,
edev->geneve_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
t_port);
set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
@@ -2184,7 +2197,7 @@ static void qede_udp_tunnel_del(struct net_device *dev,
edev->vxlan_dst_port = 0;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
t_port);
set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
@@ -2195,7 +2208,7 @@ static void qede_udp_tunnel_del(struct net_device *dev,
edev->geneve_dst_port = 0;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
t_port);
set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
@@ -2240,15 +2253,13 @@ static const struct net_device_ops qede_netdev_ops = {
static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
struct pci_dev *pdev,
struct qed_dev_eth_info *info,
- u32 dp_module,
- u8 dp_level)
+ u32 dp_module, u8 dp_level)
{
struct net_device *ndev;
struct qede_dev *edev;
ndev = alloc_etherdev_mqs(sizeof(*edev),
- info->num_queues,
- info->num_queues);
+ info->num_queues, info->num_queues);
if (!ndev) {
pr_err("etherdev allocation failed\n");
return NULL;
@@ -2264,6 +2275,9 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+ DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
+ info->num_queues, info->num_queues);
+
SET_NETDEV_DEV(ndev, &pdev->dev);
memset(&edev->stats, 0, sizeof(edev->stats));
@@ -2352,7 +2366,7 @@ static void qede_free_fp_array(struct qede_dev *edev)
struct qede_fastpath *fp;
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
fp = &edev->fp_array[i];
kfree(fp->sb_info);
@@ -2361,22 +2375,33 @@ static void qede_free_fp_array(struct qede_dev *edev)
}
kfree(edev->fp_array);
}
- edev->num_rss = 0;
+
+ edev->num_queues = 0;
+ edev->fp_num_tx = 0;
+ edev->fp_num_rx = 0;
}
static int qede_alloc_fp_array(struct qede_dev *edev)
{
+ u8 fp_combined, fp_rx = edev->fp_num_rx;
struct qede_fastpath *fp;
int i;
- edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+ edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
sizeof(*edev->fp_array), GFP_KERNEL);
if (!edev->fp_array) {
DP_NOTICE(edev, "fp array allocation failed\n");
goto err;
}
- for_each_rss(i) {
+ fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
+
+ /* Allocate the FP elements for Rx queues followed by combined and then
+ * the Tx. This ordering should be maintained so that the respective
+ * queues (Rx or Tx) will be together in the fastpath array and the
+ * associated ids will be sequential.
+ */
+ for_each_queue(i) {
fp = &edev->fp_array[i];
fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
@@ -2385,16 +2410,33 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
goto err;
}
- fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
- if (!fp->rxq) {
- DP_NOTICE(edev, "RXQ struct allocation failed\n");
- goto err;
+ if (fp_rx) {
+ fp->type = QEDE_FASTPATH_RX;
+ fp_rx--;
+ } else if (fp_combined) {
+ fp->type = QEDE_FASTPATH_COMBINED;
+ fp_combined--;
+ } else {
+ fp->type = QEDE_FASTPATH_TX;
}
- fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
- if (!fp->txqs) {
- DP_NOTICE(edev, "TXQ array allocation failed\n");
- goto err;
+ if (fp->type & QEDE_FASTPATH_TX) {
+ fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs),
+ GFP_KERNEL);
+ if (!fp->txqs) {
+ DP_NOTICE(edev,
+ "TXQ array allocation failed\n");
+ goto err;
+ }
+ }
+
+ if (fp->type & QEDE_FASTPATH_RX) {
+ fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
+ if (!fp->rxq) {
+ DP_NOTICE(edev,
+ "RXQ struct allocation failed\n");
+ goto err;
+ }
}
}
@@ -2456,7 +2498,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
bool is_vf, enum qede_probe_mode mode)
{
struct qed_probe_params probe_params;
- struct qed_slowpath_params params;
+ struct qed_slowpath_params sp_params;
struct qed_dev_eth_info dev_info;
struct qede_dev *edev;
struct qed_dev *cdev;
@@ -2479,14 +2521,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_update_pf_params(cdev);
/* Start the Slowpath-process */
- memset(&params, 0, sizeof(struct qed_slowpath_params));
- params.int_mode = QED_INT_MODE_MSIX;
- params.drv_major = QEDE_MAJOR_VERSION;
- params.drv_minor = QEDE_MINOR_VERSION;
- params.drv_rev = QEDE_REVISION_VERSION;
- params.drv_eng = QEDE_ENGINEERING_VERSION;
- strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
- rc = qed_ops->common->slowpath_start(cdev, &params);
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.int_mode = QED_INT_MODE_MSIX;
+ sp_params.drv_major = QEDE_MAJOR_VERSION;
+ sp_params.drv_minor = QEDE_MINOR_VERSION;
+ sp_params.drv_rev = QEDE_REVISION_VERSION;
+ sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
+ strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ rc = qed_ops->common->slowpath_start(cdev, &sp_params);
if (rc) {
pr_notice("Cannot start slowpath\n");
goto err1;
@@ -2509,10 +2551,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_init_ndev(edev);
+ rc = qede_roce_dev_add(edev);
+ if (rc)
+ goto err3;
+
rc = register_netdev(edev->ndev);
if (rc) {
DP_NOTICE(edev, "Cannot register net-device\n");
- goto err3;
+ goto err4;
}
edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
@@ -2532,6 +2578,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
return 0;
+err4:
+ qede_roce_dev_remove(edev);
err3:
free_netdev(edev->ndev);
err2:
@@ -2578,8 +2626,11 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
DP_INFO(edev, "Starting qede_remove\n");
cancel_delayed_work_sync(&edev->sp_task);
+
unregister_netdev(ndev);
+ qede_roce_dev_remove(edev);
+
edev->ops->common->set_power_state(cdev, PCI_D0);
pci_set_drvdata(pdev, NULL);
@@ -2590,7 +2641,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qed_ops->common->slowpath_stop(cdev);
qed_ops->common->remove(cdev);
- pr_notice("Ending successfully qede_remove\n");
+ dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
static void qede_remove(struct pci_dev *pdev)
@@ -2609,8 +2660,8 @@ static int qede_set_num_queues(struct qede_dev *edev)
u16 rss_num;
/* Setup queues according to possible resources*/
- if (edev->req_rss)
- rss_num = edev->req_rss;
+ if (edev->req_queues)
+ rss_num = edev->req_queues;
else
rss_num = netif_get_num_default_rss_queues() *
edev->dev_info.common.num_hwfns;
@@ -2620,11 +2671,15 @@ static int qede_set_num_queues(struct qede_dev *edev)
rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
if (rc > 0) {
/* Managed to request interrupts for our queues */
- edev->num_rss = rc;
+ edev->num_queues = rc;
DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
- QEDE_RSS_CNT(edev), rss_num);
+ QEDE_QUEUE_CNT(edev), rss_num);
rc = 0;
}
+
+ edev->fp_num_tx = edev->req_num_tx;
+ edev->fp_num_rx = edev->req_num_rx;
+
return rc;
}
@@ -2638,16 +2693,14 @@ static void qede_free_mem_sb(struct qede_dev *edev,
/* This function allocates fast-path status block memory */
static int qede_alloc_mem_sb(struct qede_dev *edev,
- struct qed_sb_info *sb_info,
- u16 sb_id)
+ struct qed_sb_info *sb_info, u16 sb_id)
{
struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
sb_virt = dma_alloc_coherent(&edev->pdev->dev,
- sizeof(*sb_virt),
- &sb_phys, GFP_KERNEL);
+ sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
DP_ERR(edev, "Status block allocation failed\n");
return -ENOMEM;
@@ -2679,16 +2732,15 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
data = rx_buf->data;
dma_unmap_page(&edev->pdev->dev,
- rx_buf->mapping,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
rx_buf->data = NULL;
__free_page(data);
}
}
-static void qede_free_sge_mem(struct qede_dev *edev,
- struct qede_rx_queue *rxq) {
+static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
int i;
if (edev->gro_disable)
@@ -2707,8 +2759,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
}
}
-static void qede_free_mem_rxq(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
qede_free_sge_mem(edev, rxq);
@@ -2730,9 +2781,6 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct eth_rx_bd *rx_bd;
dma_addr_t mapping;
struct page *data;
- u16 rx_buf_size;
-
- rx_buf_size = rxq->rx_buf_size;
data = alloc_pages(GFP_ATOMIC, 0);
if (unlikely(!data)) {
@@ -2767,8 +2815,7 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
return 0;
}
-static int qede_alloc_sge_mem(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
dma_addr_t mapping;
int i;
@@ -2815,15 +2862,14 @@ err:
}
/* This function allocates all memory needed per Rx queue */
-static int qede_alloc_mem_rxq(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
int i, rc, size;
rxq->num_rx_buffers = edev->q_num_rx_buffers;
- rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
- edev->ndev->mtu;
+ rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+
if (rxq->rx_buf_size > PAGE_SIZE)
rxq->rx_buf_size = PAGE_SIZE;
@@ -2877,8 +2923,7 @@ err:
return rc;
}
-static void qede_free_mem_txq(struct qede_dev *edev,
- struct qede_tx_queue *txq)
+static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
/* Free the parallel SW ring */
kfree(txq->sw_tx_ring);
@@ -2888,8 +2933,7 @@ static void qede_free_mem_txq(struct qede_dev *edev,
}
/* This function allocates all memory needed per Tx queue */
-static int qede_alloc_mem_txq(struct qede_dev *edev,
- struct qede_tx_queue *txq)
+static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
int size, rc;
union eth_tx_bd_types *p_virt;
@@ -2921,41 +2965,45 @@ err:
}
/* This function frees all memory of a single fp */
-static void qede_free_mem_fp(struct qede_dev *edev,
- struct qede_fastpath *fp)
+static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
int tc;
qede_free_mem_sb(edev, fp->sb_info);
- qede_free_mem_rxq(edev, fp->rxq);
+ if (fp->type & QEDE_FASTPATH_RX)
+ qede_free_mem_rxq(edev, fp->rxq);
- for (tc = 0; tc < edev->num_tc; tc++)
- qede_free_mem_txq(edev, &fp->txqs[tc]);
+ if (fp->type & QEDE_FASTPATH_TX)
+ for (tc = 0; tc < edev->num_tc; tc++)
+ qede_free_mem_txq(edev, &fp->txqs[tc]);
}
/* This function allocates all memory needed for a single fp (i.e. an entity
- * which contains status block, one rx queue and multiple per-TC tx queues.
+ * which contains status block, one rx queue and/or multiple per-TC tx queues.
*/
-static int qede_alloc_mem_fp(struct qede_dev *edev,
- struct qede_fastpath *fp)
+static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
int rc, tc;
- rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
- if (rc)
- goto err;
-
- rc = qede_alloc_mem_rxq(edev, fp->rxq);
+ rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
if (rc)
goto err;
- for (tc = 0; tc < edev->num_tc; tc++) {
- rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+ if (fp->type & QEDE_FASTPATH_RX) {
+ rc = qede_alloc_mem_rxq(edev, fp->rxq);
if (rc)
goto err;
}
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+ if (rc)
+ goto err;
+ }
+ }
+
return 0;
err:
return rc;
@@ -2965,7 +3013,7 @@ static void qede_free_mem_load(struct qede_dev *edev)
{
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
qede_free_mem_fp(edev, fp);
@@ -2975,16 +3023,16 @@ static void qede_free_mem_load(struct qede_dev *edev)
/* This function allocates all qede memory at NIC load. */
static int qede_alloc_mem_load(struct qede_dev *edev)
{
- int rc = 0, rss_id;
+ int rc = 0, queue_id;
- for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
- struct qede_fastpath *fp = &edev->fp_array[rss_id];
+ for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
+ struct qede_fastpath *fp = &edev->fp_array[queue_id];
rc = qede_alloc_mem_fp(edev, fp);
if (rc) {
DP_ERR(edev,
"Failed to allocate memory for fastpath - rss id = %d\n",
- rss_id);
+ queue_id);
qede_free_mem_load(edev);
return rc;
}
@@ -2996,30 +3044,38 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
/* This function inits fp content and resets the SB, RXQ and TXQ structures */
static void qede_init_fp(struct qede_dev *edev)
{
- int rss_id, txq_index, tc;
+ int queue_id, rxq_index = 0, txq_index = 0, tc;
struct qede_fastpath *fp;
- for_each_rss(rss_id) {
- fp = &edev->fp_array[rss_id];
+ for_each_queue(queue_id) {
+ fp = &edev->fp_array[queue_id];
fp->edev = edev;
- fp->rss_id = rss_id;
+ fp->id = queue_id;
memset((void *)&fp->napi, 0, sizeof(fp->napi));
memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
- memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
- fp->rxq->rxq_id = rss_id;
+ if (fp->type & QEDE_FASTPATH_RX) {
+ memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
+ fp->rxq->rxq_id = rxq_index++;
+ }
- memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
- for (tc = 0; tc < edev->num_tc; tc++) {
- txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
- fp->txqs[tc].index = txq_index;
+ if (fp->type & QEDE_FASTPATH_TX) {
+ memset((void *)fp->txqs, 0,
+ (edev->num_tc * sizeof(*fp->txqs)));
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ fp->txqs[tc].index = txq_index +
+ tc * QEDE_TSS_COUNT(edev);
+ if (edev->dev_info.is_legacy)
+ fp->txqs[tc].is_legacy = true;
+ }
+ txq_index++;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
- edev->ndev->name, rss_id);
+ edev->ndev->name, queue_id);
}
edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
@@ -3029,12 +3085,13 @@ static int qede_set_real_num_queues(struct qede_dev *edev)
{
int rc = 0;
- rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+ rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
if (rc) {
DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
return rc;
}
- rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+
+ rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
if (rc) {
DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
return rc;
@@ -3047,7 +3104,7 @@ static void qede_napi_disable_remove(struct qede_dev *edev)
{
int i;
- for_each_rss(i) {
+ for_each_queue(i) {
napi_disable(&edev->fp_array[i].napi);
netif_napi_del(&edev->fp_array[i].napi);
@@ -3059,7 +3116,7 @@ static void qede_napi_add_enable(struct qede_dev *edev)
int i;
/* Add NAPI objects */
- for_each_rss(i) {
+ for_each_queue(i) {
netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
qede_poll, NAPI_POLL_WEIGHT);
napi_enable(&edev->fp_array[i].napi);
@@ -3088,14 +3145,14 @@ static int qede_req_msix_irqs(struct qede_dev *edev)
int i, rc;
/* Sanitize number of interrupts == number of prepared RSS queues */
- if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+ if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
DP_ERR(edev,
"Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
- QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+ QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
return -EINVAL;
}
- for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
rc = request_irq(edev->int_info.msix[i].vector,
qede_msix_fp_int, 0, edev->fp_array[i].name,
&edev->fp_array[i]);
@@ -3140,18 +3197,17 @@ static int qede_setup_irqs(struct qede_dev *edev)
/* qed should learn receive the RSS ids and callbacks */
ops = edev->ops->common;
- for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
ops->simd_handler_config(edev->cdev,
&edev->fp_array[i], i,
qede_simd_fp_handler);
- edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+ edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
}
return 0;
}
static int qede_drain_txq(struct qede_dev *edev,
- struct qede_tx_queue *txq,
- bool allow_drain)
+ struct qede_tx_queue *txq, bool allow_drain)
{
int rc, cnt = 1000;
@@ -3203,45 +3259,53 @@ static int qede_stop_queues(struct qede_dev *edev)
}
/* Flush Tx queues. If needed, request drain from MCP */
- for_each_rss(i) {
+ for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qede_tx_queue *txq = &fp->txqs[tc];
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qede_tx_queue *txq = &fp->txqs[tc];
- rc = qede_drain_txq(edev, txq, true);
- if (rc)
- return rc;
+ rc = qede_drain_txq(edev, txq, true);
+ if (rc)
+ return rc;
+ }
}
}
- /* Stop all Queues in reverse order*/
- for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
+ /* Stop all Queues in reverse order */
+ for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
struct qed_stop_rxq_params rx_params;
- /* Stop the Tx Queue(s)*/
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qed_stop_txq_params tx_params;
-
- tx_params.rss_id = i;
- tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
- rc = edev->ops->q_tx_stop(cdev, &tx_params);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ #%d\n",
- tx_params.tx_queue_id);
- return rc;
+ /* Stop the Tx Queue(s) */
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qed_stop_txq_params tx_params;
+ u8 val;
+
+ tx_params.rss_id = i;
+ val = edev->fp_array[i].txqs[tc].index;
+ tx_params.tx_queue_id = val;
+ rc = edev->ops->q_tx_stop(cdev, &tx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ #%d\n",
+ tx_params.tx_queue_id);
+ return rc;
+ }
}
}
- /* Stop the Rx Queue*/
- memset(&rx_params, 0, sizeof(rx_params));
- rx_params.rss_id = i;
- rx_params.rx_queue_id = i;
+ /* Stop the Rx Queue */
+ if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ memset(&rx_params, 0, sizeof(rx_params));
+ rx_params.rss_id = i;
+ rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id;
- rc = edev->ops->q_rx_stop(cdev, &rx_params);
- if (rc) {
- DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
- return rc;
+ rc = edev->ops->q_rx_stop(cdev, &rx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+ return rc;
+ }
}
}
@@ -3264,7 +3328,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
struct qed_start_vport_params start = {0};
bool reset_rss_indir = false;
- if (!edev->num_rss) {
+ if (!edev->num_queues) {
DP_ERR(edev,
"Cannot update V-VPORT as active as there are no Rx queues\n");
return -EINVAL;
@@ -3288,50 +3352,66 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
"Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
- for_each_rss(i) {
+ for_each_queue(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
- dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
-
- memset(&q_params, 0, sizeof(q_params));
- q_params.rss_id = i;
- q_params.queue_id = i;
- q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = RX_PI;
-
- rc = edev->ops->q_rx_start(cdev, &q_params,
- fp->rxq->rx_buf_size,
- fp->rxq->rx_bd_ring.p_phys_addr,
- phys_table,
- fp->rxq->rx_comp_ring.page_cnt,
- &fp->rxq->hw_rxq_prod_addr);
- if (rc) {
- DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
- return rc;
- }
+ dma_addr_t p_phys_table;
+ u32 page_cnt;
- fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ struct qede_rx_queue *rxq = fp->rxq;
+ __le16 *val;
- qede_update_rx_prod(edev, fp->rxq);
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.rss_id = i;
+ q_params.queue_id = rxq->rxq_id;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = RX_PI;
+
+ p_phys_table =
+ qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
+ page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
+
+ rc = edev->ops->q_rx_start(cdev, &q_params,
+ rxq->rx_buf_size,
+ rxq->rx_bd_ring.p_phys_addr,
+ p_phys_table,
+ page_cnt,
+ &rxq->hw_rxq_prod_addr);
+ if (rc) {
+ DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
+ rc);
+ return rc;
+ }
+
+ val = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ rxq->hw_cons_ptr = val;
+
+ qede_update_rx_prod(edev, rxq);
+ }
+
+ if (!(fp->type & QEDE_FASTPATH_TX))
+ continue;
for (tc = 0; tc < edev->num_tc; tc++) {
struct qede_tx_queue *txq = &fp->txqs[tc];
- int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+
+ p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
+ page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
memset(&q_params, 0, sizeof(q_params));
q_params.rss_id = i;
- q_params.queue_id = txq_index;
+ q_params.queue_id = txq->index;
q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id;
q_params.sb_idx = TX_PI(tc);
rc = edev->ops->q_tx_start(cdev, &q_params,
- txq->tx_pbl.pbl.p_phys_table,
- txq->tx_pbl.page_cnt,
+ p_phys_table, page_cnt,
&txq->doorbell_addr);
if (rc) {
DP_ERR(edev, "Start TXQ #%d failed %d\n",
- txq_index, rc);
+ txq->index, rc);
return rc;
}
@@ -3362,13 +3442,13 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
}
/* Fill struct with RSS params */
- if (QEDE_RSS_CNT(edev) > 1) {
+ if (QEDE_RSS_COUNT(edev) > 1) {
vport_update_params.update_rss_flg = 1;
/* Need to validate current RSS config uses valid entries */
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
if (edev->rss_params.rss_ind_table[i] >=
- edev->num_rss) {
+ QEDE_RSS_COUNT(edev)) {
reset_rss_indir = true;
break;
}
@@ -3381,7 +3461,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
u16 indir_val;
- val = QEDE_RSS_CNT(edev);
+ val = QEDE_RSS_COUNT(edev);
indir_val = ethtool_rxfh_indir_default(i, val);
edev->rss_params.rss_ind_table[i] = indir_val;
}
@@ -3447,6 +3527,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
DP_INFO(edev, "Starting qede unload\n");
+ qede_roce_dev_event_close(edev);
mutex_lock(&edev->qede_lock);
edev->state = QEDE_STATE_CLOSED;
@@ -3510,7 +3591,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
if (rc)
goto err1;
DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
- QEDE_RSS_CNT(edev), edev->num_tc);
+ QEDE_QUEUE_CNT(edev), edev->num_tc);
rc = qede_set_real_num_queues(edev);
if (rc)
@@ -3547,6 +3628,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
/* Query whether link is already-up */
memset(&link_output, 0, sizeof(link_output));
edev->ops->common->get_link(edev->cdev, &link_output);
+ qede_roce_dev_event_open(edev);
qede_link_update(edev, &link_output);
DP_INFO(edev, "Ending successfully qede load\n");
@@ -3563,7 +3645,9 @@ err2:
err1:
edev->ops->common->set_fp_int(edev->cdev, 0);
qede_free_fp_array(edev);
- edev->num_rss = 0;
+ edev->num_queues = 0;
+ edev->fp_num_tx = 0;
+ edev->fp_num_rx = 0;
err0:
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c
new file mode 100644
index 000000000000..9867f960b063
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c
@@ -0,0 +1,314 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/qed/qede_roce.h>
+#include "qede.h"
+
+static struct qedr_driver *qedr_drv;
+static LIST_HEAD(qedr_dev_list);
+static DEFINE_MUTEX(qedr_dev_list_lock);
+
+bool qede_roce_supported(struct qede_dev *dev)
+{
+ return dev->dev_info.common.rdma_supported;
+}
+
+static void _qede_roce_dev_add(struct qede_dev *edev)
+{
+ if (!qedr_drv)
+ return;
+
+ edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
+ edev->ndev);
+}
+
+static int qede_roce_create_wq(struct qede_dev *edev)
+{
+ INIT_LIST_HEAD(&edev->rdma_info.roce_event_list);
+ edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq");
+ if (!edev->rdma_info.roce_wq) {
+ DP_NOTICE(edev, "qedr: Could not create workqueue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void qede_roce_cleanup_event(struct qede_dev *edev)
+{
+ struct list_head *head = &edev->rdma_info.roce_event_list;
+ struct qede_roce_event_work *event_node;
+
+ flush_workqueue(edev->rdma_info.roce_wq);
+ while (!list_empty(head)) {
+ event_node = list_entry(head->next, struct qede_roce_event_work,
+ list);
+ cancel_work_sync(&event_node->work);
+ list_del(&event_node->list);
+ kfree(event_node);
+ }
+}
+
+static void qede_roce_destroy_wq(struct qede_dev *edev)
+{
+ qede_roce_cleanup_event(edev);
+ destroy_workqueue(edev->rdma_info.roce_wq);
+}
+
+int qede_roce_dev_add(struct qede_dev *edev)
+{
+ int rc = 0;
+
+ if (qede_roce_supported(edev)) {
+ rc = qede_roce_create_wq(edev);
+ if (rc)
+ return rc;
+
+ INIT_LIST_HEAD(&edev->rdma_info.entry);
+ mutex_lock(&qedr_dev_list_lock);
+ list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
+ _qede_roce_dev_add(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+ }
+
+ return rc;
+}
+
+static void _qede_roce_dev_remove(struct qede_dev *edev)
+{
+ if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
+ qedr_drv->remove(edev->rdma_info.qedr_dev);
+ edev->rdma_info.qedr_dev = NULL;
+}
+
+void qede_roce_dev_remove(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ qede_roce_destroy_wq(edev);
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_roce_dev_remove(edev);
+ list_del(&edev->rdma_info.entry);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void _qede_roce_dev_open(struct qede_dev *edev)
+{
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
+}
+
+static void qede_roce_dev_open(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_roce_dev_open(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void _qede_roce_dev_close(struct qede_dev *edev)
+{
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
+}
+
+static void qede_roce_dev_close(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_roce_dev_close(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void qede_roce_dev_shutdown(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+int qede_roce_register_driver(struct qedr_driver *drv)
+{
+ struct qede_dev *edev;
+ u8 qedr_counter = 0;
+
+ mutex_lock(&qedr_dev_list_lock);
+ if (qedr_drv) {
+ mutex_unlock(&qedr_dev_list_lock);
+ return -EINVAL;
+ }
+ qedr_drv = drv;
+
+ list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+ struct net_device *ndev;
+
+ qedr_counter++;
+ _qede_roce_dev_add(edev);
+ ndev = edev->ndev;
+ if (netif_running(ndev) && netif_oper_up(ndev))
+ _qede_roce_dev_open(edev);
+ }
+ mutex_unlock(&qedr_dev_list_lock);
+
+ DP_INFO(edev, "qedr: discovered and registered %d RoCE funcs\n",
+ qedr_counter);
+
+ return 0;
+}
+EXPORT_SYMBOL(qede_roce_register_driver);
+
+void qede_roce_unregister_driver(struct qedr_driver *drv)
+{
+ struct qede_dev *edev;
+
+ mutex_lock(&qedr_dev_list_lock);
+ list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+ if (edev->rdma_info.qedr_dev)
+ _qede_roce_dev_remove(edev);
+ }
+ qedr_drv = NULL;
+ mutex_unlock(&qedr_dev_list_lock);
+}
+EXPORT_SYMBOL(qede_roce_unregister_driver);
+
+static void qede_roce_changeaddr(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
+}
+
+struct qede_roce_event_work *qede_roce_get_free_event_node(struct qede_dev
+ *edev)
+{
+ struct qede_roce_event_work *event_node = NULL;
+ struct list_head *list_node = NULL;
+ bool found = false;
+
+ list_for_each(list_node, &edev->rdma_info.roce_event_list) {
+ event_node = list_entry(list_node, struct qede_roce_event_work,
+ list);
+ if (!work_pending(&event_node->work)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
+ if (!event_node) {
+ DP_NOTICE(edev,
+ "qedr: Could not allocate memory for roce work\n");
+ return NULL;
+ }
+ list_add_tail(&event_node->list,
+ &edev->rdma_info.roce_event_list);
+ }
+
+ return event_node;
+}
+
+static void qede_roce_handle_event(struct work_struct *work)
+{
+ struct qede_roce_event_work *event_node;
+ enum qede_roce_event event;
+ struct qede_dev *edev;
+
+ event_node = container_of(work, struct qede_roce_event_work, work);
+ event = event_node->event;
+ edev = event_node->ptr;
+
+ switch (event) {
+ case QEDE_UP:
+ qede_roce_dev_open(edev);
+ break;
+ case QEDE_DOWN:
+ qede_roce_dev_close(edev);
+ break;
+ case QEDE_CLOSE:
+ qede_roce_dev_shutdown(edev);
+ break;
+ case QEDE_CHANGE_ADDR:
+ qede_roce_changeaddr(edev);
+ break;
+ default:
+ DP_NOTICE(edev, "Invalid roce event %d", event);
+ }
+}
+
+static void qede_roce_add_event(struct qede_dev *edev,
+ enum qede_roce_event event)
+{
+ struct qede_roce_event_work *event_node;
+
+ if (!edev->rdma_info.qedr_dev)
+ return;
+
+ event_node = qede_roce_get_free_event_node(edev);
+ if (!event_node)
+ return;
+
+ event_node->event = event;
+ event_node->ptr = edev;
+
+ INIT_WORK(&event_node->work, qede_roce_handle_event);
+ queue_work(edev->rdma_info.roce_wq, &event_node->work);
+}
+
+void qede_roce_dev_event_open(struct qede_dev *edev)
+{
+ qede_roce_add_event(edev, QEDE_UP);
+}
+
+void qede_roce_dev_event_close(struct qede_dev *edev)
+{
+ qede_roce_add_event(edev, QEDE_DOWN);
+}
+
+void qede_roce_event_changeaddr(struct qede_dev *edev)
+{
+ qede_roce_add_event(edev, QEDE_CHANGE_ADDR);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 3ebef27e0964..3ae3968b0edf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -432,18 +432,19 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
struct net_device *netdev,
- struct net_device *filter_dev, int idx)
+ struct net_device *filter_dev, int *idx)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
qlcnic_sriov_check(adapter))
- idx = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
+ err = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
- return idx;
+ return err;
}
static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 24061b9b92e8..5f327659efa7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -238,7 +238,7 @@ int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
int qlcnic_sriov_get_vf_config(struct net_device *, int ,
struct ifla_vf_info *);
-int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
+int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool);
#else
static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index afd687e5e779..50eaafa3eaba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1915,7 +1915,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
}
int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
- u16 vlan, u8 qos)
+ u16 vlan, u8 qos, __be16 vlan_proto)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1928,6 +1928,9 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
if (vf >= sriov->num_vfs || qos > 7)
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
if (vlan > MAX_VLAN_ID) {
netdev_err(netdev,
"Invalid VLAN ID, allowed range is [0 - %d]\n",
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index a76e380cf89a..9ba568db576f 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -24,4 +24,16 @@ config QCA7000
To compile this driver as a module, choose M here. The module
will be called qcaspi.
+config QCOM_EMAC
+ tristate "Qualcomm Technologies, Inc. EMAC Gigabit Ethernet support"
+ select CRC32
+ select PHYLIB
+ ---help---
+ This driver supports the Qualcomm Technologies, Inc. Gigabit
+ Ethernet Media Access Controller (EMAC). The controller
+ supports IEEE 802.3-2002, half-duplex mode at 10/100 Mb/s,
+ full-duplex mode at 10/100/1000Mb/s, Wake On LAN (WOL) for
+ low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
+ Precision Clock Synchronization Protocol.
+
endif # NET_VENDOR_QUALCOMM
diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile
index 9da2d75db700..aacb0a585c68 100644
--- a/drivers/net/ethernet/qualcomm/Makefile
+++ b/drivers/net/ethernet/qualcomm/Makefile
@@ -4,3 +4,5 @@
obj-$(CONFIG_QCA7000) += qcaspi.o
qcaspi-objs := qca_spi.o qca_framing.o qca_7k.o qca_debug.o
+
+obj-y += emac/
diff --git a/drivers/net/ethernet/qualcomm/emac/Makefile b/drivers/net/ethernet/qualcomm/emac/Makefile
new file mode 100644
index 000000000000..01ee144c6386
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Qualcomm Technologies, Inc. EMAC Gigabit Ethernet driver
+#
+
+obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o
+
+qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
new file mode 100644
index 000000000000..e97968ed4b8f
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -0,0 +1,1528 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC Ethernet Controller MAC layer support
+ */
+
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <net/ip6_checksum.h>
+#include "emac.h"
+#include "emac-sgmii.h"
+
+/* EMAC base register offsets */
+#define EMAC_MAC_CTRL 0x001480
+#define EMAC_WOL_CTRL0 0x0014a0
+#define EMAC_RSS_KEY0 0x0014b0
+#define EMAC_H1TPD_BASE_ADDR_LO 0x0014e0
+#define EMAC_H2TPD_BASE_ADDR_LO 0x0014e4
+#define EMAC_H3TPD_BASE_ADDR_LO 0x0014e8
+#define EMAC_INTER_SRAM_PART9 0x001534
+#define EMAC_DESC_CTRL_0 0x001540
+#define EMAC_DESC_CTRL_1 0x001544
+#define EMAC_DESC_CTRL_2 0x001550
+#define EMAC_DESC_CTRL_10 0x001554
+#define EMAC_DESC_CTRL_12 0x001558
+#define EMAC_DESC_CTRL_13 0x00155c
+#define EMAC_DESC_CTRL_3 0x001560
+#define EMAC_DESC_CTRL_4 0x001564
+#define EMAC_DESC_CTRL_5 0x001568
+#define EMAC_DESC_CTRL_14 0x00156c
+#define EMAC_DESC_CTRL_15 0x001570
+#define EMAC_DESC_CTRL_16 0x001574
+#define EMAC_DESC_CTRL_6 0x001578
+#define EMAC_DESC_CTRL_8 0x001580
+#define EMAC_DESC_CTRL_9 0x001584
+#define EMAC_DESC_CTRL_11 0x001588
+#define EMAC_TXQ_CTRL_0 0x001590
+#define EMAC_TXQ_CTRL_1 0x001594
+#define EMAC_TXQ_CTRL_2 0x001598
+#define EMAC_RXQ_CTRL_0 0x0015a0
+#define EMAC_RXQ_CTRL_1 0x0015a4
+#define EMAC_RXQ_CTRL_2 0x0015a8
+#define EMAC_RXQ_CTRL_3 0x0015ac
+#define EMAC_BASE_CPU_NUMBER 0x0015b8
+#define EMAC_DMA_CTRL 0x0015c0
+#define EMAC_MAILBOX_0 0x0015e0
+#define EMAC_MAILBOX_5 0x0015e4
+#define EMAC_MAILBOX_6 0x0015e8
+#define EMAC_MAILBOX_13 0x0015ec
+#define EMAC_MAILBOX_2 0x0015f4
+#define EMAC_MAILBOX_3 0x0015f8
+#define EMAC_MAILBOX_11 0x00160c
+#define EMAC_AXI_MAST_CTRL 0x001610
+#define EMAC_MAILBOX_12 0x001614
+#define EMAC_MAILBOX_9 0x001618
+#define EMAC_MAILBOX_10 0x00161c
+#define EMAC_ATHR_HEADER_CTRL 0x001620
+#define EMAC_CLK_GATE_CTRL 0x001814
+#define EMAC_MISC_CTRL 0x001990
+#define EMAC_MAILBOX_7 0x0019e0
+#define EMAC_MAILBOX_8 0x0019e4
+#define EMAC_MAILBOX_15 0x001bd4
+#define EMAC_MAILBOX_16 0x001bd8
+
+/* EMAC_MAC_CTRL */
+#define SINGLE_PAUSE_MODE 0x10000000
+#define DEBUG_MODE 0x08000000
+#define BROAD_EN 0x04000000
+#define MULTI_ALL 0x02000000
+#define RX_CHKSUM_EN 0x01000000
+#define HUGE 0x00800000
+#define SPEED(x) (((x) & 0x3) << 20)
+#define SPEED_MASK SPEED(0x3)
+#define SIMR 0x00080000
+#define TPAUSE 0x00010000
+#define PROM_MODE 0x00008000
+#define VLAN_STRIP 0x00004000
+#define PRLEN_BMSK 0x00003c00
+#define PRLEN_SHFT 10
+#define HUGEN 0x00000200
+#define FLCHK 0x00000100
+#define PCRCE 0x00000080
+#define CRCE 0x00000040
+#define FULLD 0x00000020
+#define MAC_LP_EN 0x00000010
+#define RXFC 0x00000008
+#define TXFC 0x00000004
+#define RXEN 0x00000002
+#define TXEN 0x00000001
+
+
+/* EMAC_WOL_CTRL0 */
+#define LK_CHG_PME 0x20
+#define LK_CHG_EN 0x10
+#define MG_FRAME_PME 0x8
+#define MG_FRAME_EN 0x4
+#define WK_FRAME_EN 0x1
+
+/* EMAC_DESC_CTRL_3 */
+#define RFD_RING_SIZE_BMSK 0xfff
+
+/* EMAC_DESC_CTRL_4 */
+#define RX_BUFFER_SIZE_BMSK 0xffff
+
+/* EMAC_DESC_CTRL_6 */
+#define RRD_RING_SIZE_BMSK 0xfff
+
+/* EMAC_DESC_CTRL_9 */
+#define TPD_RING_SIZE_BMSK 0xffff
+
+/* EMAC_TXQ_CTRL_0 */
+#define NUM_TXF_BURST_PREF_BMSK 0xffff0000
+#define NUM_TXF_BURST_PREF_SHFT 16
+#define LS_8023_SP 0x80
+#define TXQ_MODE 0x40
+#define TXQ_EN 0x20
+#define IP_OP_SP 0x10
+#define NUM_TPD_BURST_PREF_BMSK 0xf
+#define NUM_TPD_BURST_PREF_SHFT 0
+
+/* EMAC_TXQ_CTRL_1 */
+#define JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK 0x7ff
+
+/* EMAC_TXQ_CTRL_2 */
+#define TXF_HWM_BMSK 0xfff0000
+#define TXF_LWM_BMSK 0xfff
+
+/* EMAC_RXQ_CTRL_0 */
+#define RXQ_EN BIT(31)
+#define CUT_THRU_EN BIT(30)
+#define RSS_HASH_EN BIT(29)
+#define NUM_RFD_BURST_PREF_BMSK 0x3f00000
+#define NUM_RFD_BURST_PREF_SHFT 20
+#define IDT_TABLE_SIZE_BMSK 0x1ff00
+#define IDT_TABLE_SIZE_SHFT 8
+#define SP_IPV6 0x80
+
+/* EMAC_RXQ_CTRL_1 */
+#define JUMBO_1KAH_BMSK 0xf000
+#define JUMBO_1KAH_SHFT 12
+#define RFD_PREF_LOW_TH 0x10
+#define RFD_PREF_LOW_THRESHOLD_BMSK 0xfc0
+#define RFD_PREF_LOW_THRESHOLD_SHFT 6
+#define RFD_PREF_UP_TH 0x10
+#define RFD_PREF_UP_THRESHOLD_BMSK 0x3f
+#define RFD_PREF_UP_THRESHOLD_SHFT 0
+
+/* EMAC_RXQ_CTRL_2 */
+#define RXF_DOF_THRESFHOLD 0x1a0
+#define RXF_DOF_THRESHOLD_BMSK 0xfff0000
+#define RXF_DOF_THRESHOLD_SHFT 16
+#define RXF_UOF_THRESFHOLD 0xbe
+#define RXF_UOF_THRESHOLD_BMSK 0xfff
+#define RXF_UOF_THRESHOLD_SHFT 0
+
+/* EMAC_RXQ_CTRL_3 */
+#define RXD_TIMER_BMSK 0xffff0000
+#define RXD_THRESHOLD_BMSK 0xfff
+#define RXD_THRESHOLD_SHFT 0
+
+/* EMAC_DMA_CTRL */
+#define DMAW_DLY_CNT_BMSK 0xf0000
+#define DMAW_DLY_CNT_SHFT 16
+#define DMAR_DLY_CNT_BMSK 0xf800
+#define DMAR_DLY_CNT_SHFT 11
+#define DMAR_REQ_PRI 0x400
+#define REGWRBLEN_BMSK 0x380
+#define REGWRBLEN_SHFT 7
+#define REGRDBLEN_BMSK 0x70
+#define REGRDBLEN_SHFT 4
+#define OUT_ORDER_MODE 0x4
+#define ENH_ORDER_MODE 0x2
+#define IN_ORDER_MODE 0x1
+
+/* EMAC_MAILBOX_13 */
+#define RFD3_PROC_IDX_BMSK 0xfff0000
+#define RFD3_PROC_IDX_SHFT 16
+#define RFD3_PROD_IDX_BMSK 0xfff
+#define RFD3_PROD_IDX_SHFT 0
+
+/* EMAC_MAILBOX_2 */
+#define NTPD_CONS_IDX_BMSK 0xffff0000
+#define NTPD_CONS_IDX_SHFT 16
+
+/* EMAC_MAILBOX_3 */
+#define RFD0_CONS_IDX_BMSK 0xfff
+#define RFD0_CONS_IDX_SHFT 0
+
+/* EMAC_MAILBOX_11 */
+#define H3TPD_PROD_IDX_BMSK 0xffff0000
+#define H3TPD_PROD_IDX_SHFT 16
+
+/* EMAC_AXI_MAST_CTRL */
+#define DATA_BYTE_SWAP 0x8
+#define MAX_BOUND 0x2
+#define MAX_BTYPE 0x1
+
+/* EMAC_MAILBOX_12 */
+#define H3TPD_CONS_IDX_BMSK 0xffff0000
+#define H3TPD_CONS_IDX_SHFT 16
+
+/* EMAC_MAILBOX_9 */
+#define H2TPD_PROD_IDX_BMSK 0xffff
+#define H2TPD_PROD_IDX_SHFT 0
+
+/* EMAC_MAILBOX_10 */
+#define H1TPD_CONS_IDX_BMSK 0xffff0000
+#define H1TPD_CONS_IDX_SHFT 16
+#define H2TPD_CONS_IDX_BMSK 0xffff
+#define H2TPD_CONS_IDX_SHFT 0
+
+/* EMAC_ATHR_HEADER_CTRL */
+#define HEADER_CNT_EN 0x2
+#define HEADER_ENABLE 0x1
+
+/* EMAC_MAILBOX_0 */
+#define RFD0_PROC_IDX_BMSK 0xfff0000
+#define RFD0_PROC_IDX_SHFT 16
+#define RFD0_PROD_IDX_BMSK 0xfff
+#define RFD0_PROD_IDX_SHFT 0
+
+/* EMAC_MAILBOX_5 */
+#define RFD1_PROC_IDX_BMSK 0xfff0000
+#define RFD1_PROC_IDX_SHFT 16
+#define RFD1_PROD_IDX_BMSK 0xfff
+#define RFD1_PROD_IDX_SHFT 0
+
+/* EMAC_MISC_CTRL */
+#define RX_UNCPL_INT_EN 0x1
+
+/* EMAC_MAILBOX_7 */
+#define RFD2_CONS_IDX_BMSK 0xfff0000
+#define RFD2_CONS_IDX_SHFT 16
+#define RFD1_CONS_IDX_BMSK 0xfff
+#define RFD1_CONS_IDX_SHFT 0
+
+/* EMAC_MAILBOX_8 */
+#define RFD3_CONS_IDX_BMSK 0xfff
+#define RFD3_CONS_IDX_SHFT 0
+
+/* EMAC_MAILBOX_15 */
+#define NTPD_PROD_IDX_BMSK 0xffff
+#define NTPD_PROD_IDX_SHFT 0
+
+/* EMAC_MAILBOX_16 */
+#define H1TPD_PROD_IDX_BMSK 0xffff
+#define H1TPD_PROD_IDX_SHFT 0
+
+#define RXQ0_RSS_HSTYP_IPV6_TCP_EN 0x20
+#define RXQ0_RSS_HSTYP_IPV6_EN 0x10
+#define RXQ0_RSS_HSTYP_IPV4_TCP_EN 0x8
+#define RXQ0_RSS_HSTYP_IPV4_EN 0x4
+
+/* EMAC_EMAC_WRAPPER_TX_TS_INX */
+#define EMAC_WRAPPER_TX_TS_EMPTY BIT(31)
+#define EMAC_WRAPPER_TX_TS_INX_BMSK 0xffff
+
+struct emac_skb_cb {
+ u32 tpd_idx;
+ unsigned long jiffies;
+};
+
+#define EMAC_SKB_CB(skb) ((struct emac_skb_cb *)(skb)->cb)
+#define EMAC_RSS_IDT_SIZE 256
+#define JUMBO_1KAH 0x4
+#define RXD_TH 0x100
+#define EMAC_TPD_LAST_FRAGMENT 0x80000000
+#define EMAC_TPD_TSTAMP_SAVE 0x80000000
+
+/* EMAC Errors in emac_rrd.word[3] */
+#define EMAC_RRD_L4F BIT(14)
+#define EMAC_RRD_IPF BIT(15)
+#define EMAC_RRD_CRC BIT(21)
+#define EMAC_RRD_FAE BIT(22)
+#define EMAC_RRD_TRN BIT(23)
+#define EMAC_RRD_RNT BIT(24)
+#define EMAC_RRD_INC BIT(25)
+#define EMAC_RRD_FOV BIT(29)
+#define EMAC_RRD_LEN BIT(30)
+
+/* Error bits that will result in a received frame being discarded */
+#define EMAC_RRD_ERROR (EMAC_RRD_IPF | EMAC_RRD_CRC | EMAC_RRD_FAE | \
+ EMAC_RRD_TRN | EMAC_RRD_RNT | EMAC_RRD_INC | \
+ EMAC_RRD_FOV | EMAC_RRD_LEN)
+#define EMAC_RRD_STATS_DW_IDX 3
+
+#define EMAC_RRD(RXQ, SIZE, IDX) ((RXQ)->rrd.v_addr + (SIZE * (IDX)))
+#define EMAC_RFD(RXQ, SIZE, IDX) ((RXQ)->rfd.v_addr + (SIZE * (IDX)))
+#define EMAC_TPD(TXQ, SIZE, IDX) ((TXQ)->tpd.v_addr + (SIZE * (IDX)))
+
+#define GET_RFD_BUFFER(RXQ, IDX) (&((RXQ)->rfd.rfbuff[(IDX)]))
+#define GET_TPD_BUFFER(RTQ, IDX) (&((RTQ)->tpd.tpbuff[(IDX)]))
+
+#define EMAC_TX_POLL_HWTXTSTAMP_THRESHOLD 8
+
+#define ISR_RX_PKT (\
+ RX_PKT_INT0 |\
+ RX_PKT_INT1 |\
+ RX_PKT_INT2 |\
+ RX_PKT_INT3)
+
+#define EMAC_MAC_IRQ_RES "core0"
+
+void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr)
+{
+ u32 crc32, bit, reg, mta;
+
+ /* Calculate the CRC of the MAC address */
+ crc32 = ether_crc(ETH_ALEN, addr);
+
+ /* The HASH Table is an array of 2 32-bit registers. It is
+ * treated like an array of 64 bits (BitArray[hash_value]).
+ * Use the upper 6 bits of the above CRC as the hash value.
+ */
+ reg = (crc32 >> 31) & 0x1;
+ bit = (crc32 >> 26) & 0x1F;
+
+ mta = readl(adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2));
+ mta |= BIT(bit);
+ writel(mta, adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2));
+}
+
+void emac_mac_multicast_addr_clear(struct emac_adapter *adpt)
+{
+ writel(0, adpt->base + EMAC_HASH_TAB_REG0);
+ writel(0, adpt->base + EMAC_HASH_TAB_REG1);
+}
+
+/* definitions for RSS */
+#define EMAC_RSS_KEY(_i, _type) \
+ (EMAC_RSS_KEY0 + ((_i) * sizeof(_type)))
+#define EMAC_RSS_TBL(_i, _type) \
+ (EMAC_IDT_TABLE0 + ((_i) * sizeof(_type)))
+
+/* Config MAC modes */
+void emac_mac_mode_config(struct emac_adapter *adpt)
+{
+ struct net_device *netdev = adpt->netdev;
+ u32 mac;
+
+ mac = readl(adpt->base + EMAC_MAC_CTRL);
+ mac &= ~(VLAN_STRIP | PROM_MODE | MULTI_ALL | MAC_LP_EN);
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ mac |= VLAN_STRIP;
+
+ if (netdev->flags & IFF_PROMISC)
+ mac |= PROM_MODE;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ mac |= MULTI_ALL;
+
+ writel(mac, adpt->base + EMAC_MAC_CTRL);
+}
+
+/* Config descriptor rings */
+static void emac_mac_dma_rings_config(struct emac_adapter *adpt)
+{
+ static const unsigned short tpd_q_offset[] = {
+ EMAC_DESC_CTRL_8, EMAC_H1TPD_BASE_ADDR_LO,
+ EMAC_H2TPD_BASE_ADDR_LO, EMAC_H3TPD_BASE_ADDR_LO};
+ static const unsigned short rfd_q_offset[] = {
+ EMAC_DESC_CTRL_2, EMAC_DESC_CTRL_10,
+ EMAC_DESC_CTRL_12, EMAC_DESC_CTRL_13};
+ static const unsigned short rrd_q_offset[] = {
+ EMAC_DESC_CTRL_5, EMAC_DESC_CTRL_14,
+ EMAC_DESC_CTRL_15, EMAC_DESC_CTRL_16};
+
+ /* TPD (Transmit Packet Descriptor) */
+ writel(upper_32_bits(adpt->tx_q.tpd.dma_addr),
+ adpt->base + EMAC_DESC_CTRL_1);
+
+ writel(lower_32_bits(adpt->tx_q.tpd.dma_addr),
+ adpt->base + tpd_q_offset[0]);
+
+ writel(adpt->tx_q.tpd.count & TPD_RING_SIZE_BMSK,
+ adpt->base + EMAC_DESC_CTRL_9);
+
+ /* RFD (Receive Free Descriptor) & RRD (Receive Return Descriptor) */
+ writel(upper_32_bits(adpt->rx_q.rfd.dma_addr),
+ adpt->base + EMAC_DESC_CTRL_0);
+
+ writel(lower_32_bits(adpt->rx_q.rfd.dma_addr),
+ adpt->base + rfd_q_offset[0]);
+ writel(lower_32_bits(adpt->rx_q.rrd.dma_addr),
+ adpt->base + rrd_q_offset[0]);
+
+ writel(adpt->rx_q.rfd.count & RFD_RING_SIZE_BMSK,
+ adpt->base + EMAC_DESC_CTRL_3);
+ writel(adpt->rx_q.rrd.count & RRD_RING_SIZE_BMSK,
+ adpt->base + EMAC_DESC_CTRL_6);
+
+ writel(adpt->rxbuf_size & RX_BUFFER_SIZE_BMSK,
+ adpt->base + EMAC_DESC_CTRL_4);
+
+ writel(0, adpt->base + EMAC_DESC_CTRL_11);
+
+ /* Load all of the base addresses above and ensure that triggering HW to
+ * read ring pointers is flushed
+ */
+ writel(1, adpt->base + EMAC_INTER_SRAM_PART9);
+}
+
+/* Config transmit parameters */
+static void emac_mac_tx_config(struct emac_adapter *adpt)
+{
+ u32 val;
+
+ writel((EMAC_MAX_TX_OFFLOAD_THRESH >> 3) &
+ JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK, adpt->base + EMAC_TXQ_CTRL_1);
+
+ val = (adpt->tpd_burst << NUM_TPD_BURST_PREF_SHFT) &
+ NUM_TPD_BURST_PREF_BMSK;
+
+ val |= TXQ_MODE | LS_8023_SP;
+ val |= (0x0100 << NUM_TXF_BURST_PREF_SHFT) &
+ NUM_TXF_BURST_PREF_BMSK;
+
+ writel(val, adpt->base + EMAC_TXQ_CTRL_0);
+ emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_2,
+ (TXF_HWM_BMSK | TXF_LWM_BMSK), 0);
+}
+
+/* Config receive parameters */
+static void emac_mac_rx_config(struct emac_adapter *adpt)
+{
+ u32 val;
+
+ val = (adpt->rfd_burst << NUM_RFD_BURST_PREF_SHFT) &
+ NUM_RFD_BURST_PREF_BMSK;
+ val |= (SP_IPV6 | CUT_THRU_EN);
+
+ writel(val, adpt->base + EMAC_RXQ_CTRL_0);
+
+ val = readl(adpt->base + EMAC_RXQ_CTRL_1);
+ val &= ~(JUMBO_1KAH_BMSK | RFD_PREF_LOW_THRESHOLD_BMSK |
+ RFD_PREF_UP_THRESHOLD_BMSK);
+ val |= (JUMBO_1KAH << JUMBO_1KAH_SHFT) |
+ (RFD_PREF_LOW_TH << RFD_PREF_LOW_THRESHOLD_SHFT) |
+ (RFD_PREF_UP_TH << RFD_PREF_UP_THRESHOLD_SHFT);
+ writel(val, adpt->base + EMAC_RXQ_CTRL_1);
+
+ val = readl(adpt->base + EMAC_RXQ_CTRL_2);
+ val &= ~(RXF_DOF_THRESHOLD_BMSK | RXF_UOF_THRESHOLD_BMSK);
+ val |= (RXF_DOF_THRESFHOLD << RXF_DOF_THRESHOLD_SHFT) |
+ (RXF_UOF_THRESFHOLD << RXF_UOF_THRESHOLD_SHFT);
+ writel(val, adpt->base + EMAC_RXQ_CTRL_2);
+
+ val = readl(adpt->base + EMAC_RXQ_CTRL_3);
+ val &= ~(RXD_TIMER_BMSK | RXD_THRESHOLD_BMSK);
+ val |= RXD_TH << RXD_THRESHOLD_SHFT;
+ writel(val, adpt->base + EMAC_RXQ_CTRL_3);
+}
+
+/* Config dma */
+static void emac_mac_dma_config(struct emac_adapter *adpt)
+{
+ u32 dma_ctrl = DMAR_REQ_PRI;
+
+ switch (adpt->dma_order) {
+ case emac_dma_ord_in:
+ dma_ctrl |= IN_ORDER_MODE;
+ break;
+ case emac_dma_ord_enh:
+ dma_ctrl |= ENH_ORDER_MODE;
+ break;
+ case emac_dma_ord_out:
+ dma_ctrl |= OUT_ORDER_MODE;
+ break;
+ default:
+ break;
+ }
+
+ dma_ctrl |= (((u32)adpt->dmar_block) << REGRDBLEN_SHFT) &
+ REGRDBLEN_BMSK;
+ dma_ctrl |= (((u32)adpt->dmaw_block) << REGWRBLEN_SHFT) &
+ REGWRBLEN_BMSK;
+ dma_ctrl |= (((u32)adpt->dmar_dly_cnt) << DMAR_DLY_CNT_SHFT) &
+ DMAR_DLY_CNT_BMSK;
+ dma_ctrl |= (((u32)adpt->dmaw_dly_cnt) << DMAW_DLY_CNT_SHFT) &
+ DMAW_DLY_CNT_BMSK;
+
+ /* config DMA and ensure that configuration is flushed to HW */
+ writel(dma_ctrl, adpt->base + EMAC_DMA_CTRL);
+}
+
+/* set MAC address */
+static void emac_set_mac_address(struct emac_adapter *adpt, u8 *addr)
+{
+ u32 sta;
+
+ /* for example: 00-A0-C6-11-22-33
+ * 0<-->C6112233, 1<-->00A0.
+ */
+
+ /* low 32bit word */
+ sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) |
+ (((u32)addr[4]) << 8) | (((u32)addr[5]));
+ writel(sta, adpt->base + EMAC_MAC_STA_ADDR0);
+
+ /* hight 32bit word */
+ sta = (((u32)addr[0]) << 8) | (u32)addr[1];
+ writel(sta, adpt->base + EMAC_MAC_STA_ADDR1);
+}
+
+static void emac_mac_config(struct emac_adapter *adpt)
+{
+ struct net_device *netdev = adpt->netdev;
+ unsigned int max_frame;
+ u32 val;
+
+ emac_set_mac_address(adpt, netdev->dev_addr);
+
+ max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ adpt->rxbuf_size = netdev->mtu > EMAC_DEF_RX_BUF_SIZE ?
+ ALIGN(max_frame, 8) : EMAC_DEF_RX_BUF_SIZE;
+
+ emac_mac_dma_rings_config(adpt);
+
+ writel(netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
+ adpt->base + EMAC_MAX_FRAM_LEN_CTRL);
+
+ emac_mac_tx_config(adpt);
+ emac_mac_rx_config(adpt);
+ emac_mac_dma_config(adpt);
+
+ val = readl(adpt->base + EMAC_AXI_MAST_CTRL);
+ val &= ~(DATA_BYTE_SWAP | MAX_BOUND);
+ val |= MAX_BTYPE;
+ writel(val, adpt->base + EMAC_AXI_MAST_CTRL);
+ writel(0, adpt->base + EMAC_CLK_GATE_CTRL);
+ writel(RX_UNCPL_INT_EN, adpt->base + EMAC_MISC_CTRL);
+}
+
+void emac_mac_reset(struct emac_adapter *adpt)
+{
+ emac_mac_stop(adpt);
+
+ emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, SOFT_RST);
+ usleep_range(100, 150); /* reset may take up to 100usec */
+
+ /* interrupt clear-on-read */
+ emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN);
+}
+
+void emac_mac_start(struct emac_adapter *adpt)
+{
+ struct phy_device *phydev = adpt->phydev;
+ u32 mac, csr1;
+
+ /* enable tx queue */
+ emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, 0, TXQ_EN);
+
+ /* enable rx queue */
+ emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, 0, RXQ_EN);
+
+ /* enable mac control */
+ mac = readl(adpt->base + EMAC_MAC_CTRL);
+ csr1 = readl(adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
+
+ mac |= TXEN | RXEN; /* enable RX/TX */
+
+ /* We don't have ethtool support yet, so force flow-control mode
+ * to 'full' always.
+ */
+ mac |= TXFC | RXFC;
+
+ /* setup link speed */
+ mac &= ~SPEED_MASK;
+ if (phydev->speed == SPEED_1000) {
+ mac |= SPEED(2);
+ csr1 |= FREQ_MODE;
+ } else {
+ mac |= SPEED(1);
+ csr1 &= ~FREQ_MODE;
+ }
+
+ if (phydev->duplex == DUPLEX_FULL)
+ mac |= FULLD;
+ else
+ mac &= ~FULLD;
+
+ /* other parameters */
+ mac |= (CRCE | PCRCE);
+ mac |= ((adpt->preamble << PRLEN_SHFT) & PRLEN_BMSK);
+ mac |= BROAD_EN;
+ mac |= FLCHK;
+ mac &= ~RX_CHKSUM_EN;
+ mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
+ DEBUG_MODE | SINGLE_PAUSE_MODE);
+
+ writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
+
+ writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL);
+
+ /* enable interrupt read clear, low power sleep mode and
+ * the irq moderators
+ */
+
+ writel_relaxed(adpt->irq_mod, adpt->base + EMAC_IRQ_MOD_TIM_INIT);
+ writel_relaxed(INT_RD_CLR_EN | LPW_MODE | IRQ_MODERATOR_EN |
+ IRQ_MODERATOR2_EN, adpt->base + EMAC_DMA_MAS_CTRL);
+
+ emac_mac_mode_config(adpt);
+
+ emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL,
+ (HEADER_ENABLE | HEADER_CNT_EN), 0);
+
+ emac_reg_update32(adpt->csr + EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN);
+}
+
+void emac_mac_stop(struct emac_adapter *adpt)
+{
+ emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, RXQ_EN, 0);
+ emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, TXQ_EN, 0);
+ emac_reg_update32(adpt->base + EMAC_MAC_CTRL, TXEN | RXEN, 0);
+ usleep_range(1000, 1050); /* stopping mac may take upto 1msec */
+}
+
+/* Free all descriptors of given transmit queue */
+static void emac_tx_q_descs_free(struct emac_adapter *adpt)
+{
+ struct emac_tx_queue *tx_q = &adpt->tx_q;
+ unsigned int i;
+ size_t size;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_q->tpd.tpbuff)
+ return;
+
+ for (i = 0; i < tx_q->tpd.count; i++) {
+ struct emac_buffer *tpbuf = GET_TPD_BUFFER(tx_q, i);
+
+ if (tpbuf->dma_addr) {
+ dma_unmap_single(adpt->netdev->dev.parent,
+ tpbuf->dma_addr, tpbuf->length,
+ DMA_TO_DEVICE);
+ tpbuf->dma_addr = 0;
+ }
+ if (tpbuf->skb) {
+ dev_kfree_skb_any(tpbuf->skb);
+ tpbuf->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct emac_buffer) * tx_q->tpd.count;
+ memset(tx_q->tpd.tpbuff, 0, size);
+
+ /* clear the descriptor ring */
+ memset(tx_q->tpd.v_addr, 0, tx_q->tpd.size);
+
+ tx_q->tpd.consume_idx = 0;
+ tx_q->tpd.produce_idx = 0;
+}
+
+/* Free all descriptors of given receive queue */
+static void emac_rx_q_free_descs(struct emac_adapter *adpt)
+{
+ struct device *dev = adpt->netdev->dev.parent;
+ struct emac_rx_queue *rx_q = &adpt->rx_q;
+ unsigned int i;
+ size_t size;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_q->rfd.rfbuff)
+ return;
+
+ for (i = 0; i < rx_q->rfd.count; i++) {
+ struct emac_buffer *rfbuf = GET_RFD_BUFFER(rx_q, i);
+
+ if (rfbuf->dma_addr) {
+ dma_unmap_single(dev, rfbuf->dma_addr, rfbuf->length,
+ DMA_FROM_DEVICE);
+ rfbuf->dma_addr = 0;
+ }
+ if (rfbuf->skb) {
+ dev_kfree_skb(rfbuf->skb);
+ rfbuf->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct emac_buffer) * rx_q->rfd.count;
+ memset(rx_q->rfd.rfbuff, 0, size);
+
+ /* clear the descriptor rings */
+ memset(rx_q->rrd.v_addr, 0, rx_q->rrd.size);
+ rx_q->rrd.produce_idx = 0;
+ rx_q->rrd.consume_idx = 0;
+
+ memset(rx_q->rfd.v_addr, 0, rx_q->rfd.size);
+ rx_q->rfd.produce_idx = 0;
+ rx_q->rfd.consume_idx = 0;
+}
+
+/* Free all buffers associated with given transmit queue */
+static void emac_tx_q_bufs_free(struct emac_adapter *adpt)
+{
+ struct emac_tx_queue *tx_q = &adpt->tx_q;
+
+ emac_tx_q_descs_free(adpt);
+
+ kfree(tx_q->tpd.tpbuff);
+ tx_q->tpd.tpbuff = NULL;
+ tx_q->tpd.v_addr = NULL;
+ tx_q->tpd.dma_addr = 0;
+ tx_q->tpd.size = 0;
+}
+
+/* Allocate TX descriptor ring for the given transmit queue */
+static int emac_tx_q_desc_alloc(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q)
+{
+ struct emac_ring_header *ring_header = &adpt->ring_header;
+ size_t size;
+
+ size = sizeof(struct emac_buffer) * tx_q->tpd.count;
+ tx_q->tpd.tpbuff = kzalloc(size, GFP_KERNEL);
+ if (!tx_q->tpd.tpbuff)
+ return -ENOMEM;
+
+ tx_q->tpd.size = tx_q->tpd.count * (adpt->tpd_size * 4);
+ tx_q->tpd.dma_addr = ring_header->dma_addr + ring_header->used;
+ tx_q->tpd.v_addr = ring_header->v_addr + ring_header->used;
+ ring_header->used += ALIGN(tx_q->tpd.size, 8);
+ tx_q->tpd.produce_idx = 0;
+ tx_q->tpd.consume_idx = 0;
+
+ return 0;
+}
+
+/* Free all buffers associated with given transmit queue */
+static void emac_rx_q_bufs_free(struct emac_adapter *adpt)
+{
+ struct emac_rx_queue *rx_q = &adpt->rx_q;
+
+ emac_rx_q_free_descs(adpt);
+
+ kfree(rx_q->rfd.rfbuff);
+ rx_q->rfd.rfbuff = NULL;
+
+ rx_q->rfd.v_addr = NULL;
+ rx_q->rfd.dma_addr = 0;
+ rx_q->rfd.size = 0;
+
+ rx_q->rrd.v_addr = NULL;
+ rx_q->rrd.dma_addr = 0;
+ rx_q->rrd.size = 0;
+}
+
+/* Allocate RX descriptor rings for the given receive queue */
+static int emac_rx_descs_alloc(struct emac_adapter *adpt)
+{
+ struct emac_ring_header *ring_header = &adpt->ring_header;
+ struct emac_rx_queue *rx_q = &adpt->rx_q;
+ size_t size;
+
+ size = sizeof(struct emac_buffer) * rx_q->rfd.count;
+ rx_q->rfd.rfbuff = kzalloc(size, GFP_KERNEL);
+ if (!rx_q->rfd.rfbuff)
+ return -ENOMEM;
+
+ rx_q->rrd.size = rx_q->rrd.count * (adpt->rrd_size * 4);
+ rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4);
+
+ rx_q->rrd.dma_addr = ring_header->dma_addr + ring_header->used;
+ rx_q->rrd.v_addr = ring_header->v_addr + ring_header->used;
+ ring_header->used += ALIGN(rx_q->rrd.size, 8);
+
+ rx_q->rfd.dma_addr = ring_header->dma_addr + ring_header->used;
+ rx_q->rfd.v_addr = ring_header->v_addr + ring_header->used;
+ ring_header->used += ALIGN(rx_q->rfd.size, 8);
+
+ rx_q->rrd.produce_idx = 0;
+ rx_q->rrd.consume_idx = 0;
+
+ rx_q->rfd.produce_idx = 0;
+ rx_q->rfd.consume_idx = 0;
+
+ return 0;
+}
+
+/* Allocate all TX and RX descriptor rings */
+int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
+{
+ struct emac_ring_header *ring_header = &adpt->ring_header;
+ struct device *dev = adpt->netdev->dev.parent;
+ unsigned int num_tx_descs = adpt->tx_desc_cnt;
+ unsigned int num_rx_descs = adpt->rx_desc_cnt;
+ int ret;
+
+ adpt->tx_q.tpd.count = adpt->tx_desc_cnt;
+
+ adpt->rx_q.rrd.count = adpt->rx_desc_cnt;
+ adpt->rx_q.rfd.count = adpt->rx_desc_cnt;
+
+ /* Ring DMA buffer. Each ring may need up to 8 bytes for alignment,
+ * hence the additional padding bytes are allocated.
+ */
+ ring_header->size = num_tx_descs * (adpt->tpd_size * 4) +
+ num_rx_descs * (adpt->rfd_size * 4) +
+ num_rx_descs * (adpt->rrd_size * 4) +
+ 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
+
+ ring_header->used = 0;
+ ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size,
+ &ring_header->dma_addr,
+ GFP_KERNEL);
+ if (!ring_header->v_addr)
+ return -ENOMEM;
+
+ ring_header->used = ALIGN(ring_header->dma_addr, 8) -
+ ring_header->dma_addr;
+
+ ret = emac_tx_q_desc_alloc(adpt, &adpt->tx_q);
+ if (ret) {
+ netdev_err(adpt->netdev, "error: Tx Queue alloc failed\n");
+ goto err_alloc_tx;
+ }
+
+ ret = emac_rx_descs_alloc(adpt);
+ if (ret) {
+ netdev_err(adpt->netdev, "error: Rx Queue alloc failed\n");
+ goto err_alloc_rx;
+ }
+
+ return 0;
+
+err_alloc_rx:
+ emac_tx_q_bufs_free(adpt);
+err_alloc_tx:
+ dma_free_coherent(dev, ring_header->size,
+ ring_header->v_addr, ring_header->dma_addr);
+
+ ring_header->v_addr = NULL;
+ ring_header->dma_addr = 0;
+ ring_header->size = 0;
+ ring_header->used = 0;
+
+ return ret;
+}
+
+/* Free all TX and RX descriptor rings */
+void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt)
+{
+ struct emac_ring_header *ring_header = &adpt->ring_header;
+ struct device *dev = adpt->netdev->dev.parent;
+
+ emac_tx_q_bufs_free(adpt);
+ emac_rx_q_bufs_free(adpt);
+
+ dma_free_coherent(dev, ring_header->size,
+ ring_header->v_addr, ring_header->dma_addr);
+
+ ring_header->v_addr = NULL;
+ ring_header->dma_addr = 0;
+ ring_header->size = 0;
+ ring_header->used = 0;
+}
+
+/* Initialize descriptor rings */
+static void emac_mac_rx_tx_ring_reset_all(struct emac_adapter *adpt)
+{
+ unsigned int i;
+
+ adpt->tx_q.tpd.produce_idx = 0;
+ adpt->tx_q.tpd.consume_idx = 0;
+ for (i = 0; i < adpt->tx_q.tpd.count; i++)
+ adpt->tx_q.tpd.tpbuff[i].dma_addr = 0;
+
+ adpt->rx_q.rrd.produce_idx = 0;
+ adpt->rx_q.rrd.consume_idx = 0;
+ adpt->rx_q.rfd.produce_idx = 0;
+ adpt->rx_q.rfd.consume_idx = 0;
+ for (i = 0; i < adpt->rx_q.rfd.count; i++)
+ adpt->rx_q.rfd.rfbuff[i].dma_addr = 0;
+}
+
+/* Produce new receive free descriptor */
+static void emac_mac_rx_rfd_create(struct emac_adapter *adpt,
+ struct emac_rx_queue *rx_q,
+ dma_addr_t addr)
+{
+ u32 *hw_rfd = EMAC_RFD(rx_q, adpt->rfd_size, rx_q->rfd.produce_idx);
+
+ *(hw_rfd++) = lower_32_bits(addr);
+ *hw_rfd = upper_32_bits(addr);
+
+ if (++rx_q->rfd.produce_idx == rx_q->rfd.count)
+ rx_q->rfd.produce_idx = 0;
+}
+
+/* Fill up receive queue's RFD with preallocated receive buffers */
+static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
+ struct emac_rx_queue *rx_q)
+{
+ struct emac_buffer *curr_rxbuf;
+ struct emac_buffer *next_rxbuf;
+ unsigned int count = 0;
+ u32 next_produce_idx;
+
+ next_produce_idx = rx_q->rfd.produce_idx + 1;
+ if (next_produce_idx == rx_q->rfd.count)
+ next_produce_idx = 0;
+
+ curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx);
+ next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx);
+
+ /* this always has a blank rx_buffer*/
+ while (!next_rxbuf->dma_addr) {
+ struct sk_buff *skb;
+ int ret;
+
+ skb = netdev_alloc_skb_ip_align(adpt->netdev, adpt->rxbuf_size);
+ if (!skb)
+ break;
+
+ curr_rxbuf->dma_addr =
+ dma_map_single(adpt->netdev->dev.parent, skb->data,
+ curr_rxbuf->length, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(adpt->netdev->dev.parent,
+ curr_rxbuf->dma_addr);
+ if (ret) {
+ dev_kfree_skb(skb);
+ break;
+ }
+ curr_rxbuf->skb = skb;
+ curr_rxbuf->length = adpt->rxbuf_size;
+
+ emac_mac_rx_rfd_create(adpt, rx_q, curr_rxbuf->dma_addr);
+ next_produce_idx = rx_q->rfd.produce_idx + 1;
+ if (next_produce_idx == rx_q->rfd.count)
+ next_produce_idx = 0;
+
+ curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx);
+ next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx);
+ count++;
+ }
+
+ if (count) {
+ u32 prod_idx = (rx_q->rfd.produce_idx << rx_q->produce_shift) &
+ rx_q->produce_mask;
+ emac_reg_update32(adpt->base + rx_q->produce_reg,
+ rx_q->produce_mask, prod_idx);
+ }
+}
+
+static void emac_adjust_link(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+
+ if (phydev->link)
+ emac_mac_start(adpt);
+ else
+ emac_mac_stop(adpt);
+
+ phy_print_status(phydev);
+}
+
+/* Bringup the interface/HW */
+int emac_mac_up(struct emac_adapter *adpt)
+{
+ struct net_device *netdev = adpt->netdev;
+ struct emac_irq *irq = &adpt->irq;
+ int ret;
+
+ emac_mac_rx_tx_ring_reset_all(adpt);
+ emac_mac_config(adpt);
+
+ ret = request_irq(irq->irq, emac_isr, 0, EMAC_MAC_IRQ_RES, irq);
+ if (ret) {
+ netdev_err(adpt->netdev, "could not request %s irq\n",
+ EMAC_MAC_IRQ_RES);
+ return ret;
+ }
+
+ emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
+
+ ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
+ PHY_INTERFACE_MODE_SGMII);
+ if (ret) {
+ netdev_err(adpt->netdev, "could not connect phy\n");
+ free_irq(irq->irq, irq);
+ return ret;
+ }
+
+ /* enable mac irq */
+ writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
+ writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
+
+ adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
+ phy_start(adpt->phydev);
+
+ napi_enable(&adpt->rx_q.napi);
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+/* Bring down the interface/HW */
+void emac_mac_down(struct emac_adapter *adpt)
+{
+ struct net_device *netdev = adpt->netdev;
+
+ netif_stop_queue(netdev);
+ napi_disable(&adpt->rx_q.napi);
+
+ phy_stop(adpt->phydev);
+ phy_disconnect(adpt->phydev);
+
+ /* disable mac irq */
+ writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
+ writel(0, adpt->base + EMAC_INT_MASK);
+ synchronize_irq(adpt->irq.irq);
+ free_irq(adpt->irq.irq, &adpt->irq);
+
+ emac_mac_reset(adpt);
+
+ emac_tx_q_descs_free(adpt);
+ netdev_reset_queue(adpt->netdev);
+ emac_rx_q_free_descs(adpt);
+}
+
+/* Consume next received packet descriptor */
+static bool emac_rx_process_rrd(struct emac_adapter *adpt,
+ struct emac_rx_queue *rx_q,
+ struct emac_rrd *rrd)
+{
+ u32 *hw_rrd = EMAC_RRD(rx_q, adpt->rrd_size, rx_q->rrd.consume_idx);
+
+ rrd->word[3] = *(hw_rrd + 3);
+
+ if (!RRD_UPDT(rrd))
+ return false;
+
+ rrd->word[4] = 0;
+ rrd->word[5] = 0;
+
+ rrd->word[0] = *(hw_rrd++);
+ rrd->word[1] = *(hw_rrd++);
+ rrd->word[2] = *(hw_rrd++);
+
+ if (unlikely(RRD_NOR(rrd) != 1)) {
+ netdev_err(adpt->netdev,
+ "error: multi-RFD not support yet! nor:%lu\n",
+ RRD_NOR(rrd));
+ }
+
+ /* mark rrd as processed */
+ RRD_UPDT_SET(rrd, 0);
+ *hw_rrd = rrd->word[3];
+
+ if (++rx_q->rrd.consume_idx == rx_q->rrd.count)
+ rx_q->rrd.consume_idx = 0;
+
+ return true;
+}
+
+/* Produce new transmit descriptor */
+static void emac_tx_tpd_create(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q, struct emac_tpd *tpd)
+{
+ u32 *hw_tpd;
+
+ tx_q->tpd.last_produce_idx = tx_q->tpd.produce_idx;
+ hw_tpd = EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.produce_idx);
+
+ if (++tx_q->tpd.produce_idx == tx_q->tpd.count)
+ tx_q->tpd.produce_idx = 0;
+
+ *(hw_tpd++) = tpd->word[0];
+ *(hw_tpd++) = tpd->word[1];
+ *(hw_tpd++) = tpd->word[2];
+ *hw_tpd = tpd->word[3];
+}
+
+/* Mark the last transmit descriptor as such (for the transmit packet) */
+static void emac_tx_tpd_mark_last(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q)
+{
+ u32 *hw_tpd =
+ EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.last_produce_idx);
+ u32 tmp_tpd;
+
+ tmp_tpd = *(hw_tpd + 1);
+ tmp_tpd |= EMAC_TPD_LAST_FRAGMENT;
+ *(hw_tpd + 1) = tmp_tpd;
+}
+
+static void emac_rx_rfd_clean(struct emac_rx_queue *rx_q, struct emac_rrd *rrd)
+{
+ struct emac_buffer *rfbuf = rx_q->rfd.rfbuff;
+ u32 consume_idx = RRD_SI(rrd);
+ unsigned int i;
+
+ for (i = 0; i < RRD_NOR(rrd); i++) {
+ rfbuf[consume_idx].skb = NULL;
+ if (++consume_idx == rx_q->rfd.count)
+ consume_idx = 0;
+ }
+
+ rx_q->rfd.consume_idx = consume_idx;
+ rx_q->rfd.process_idx = consume_idx;
+}
+
+/* Push the received skb to upper layers */
+static void emac_receive_skb(struct emac_rx_queue *rx_q,
+ struct sk_buff *skb,
+ u16 vlan_tag, bool vlan_flag)
+{
+ if (vlan_flag) {
+ u16 vlan;
+
+ EMAC_TAG_TO_VLAN(vlan_tag, vlan);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+ }
+
+ napi_gro_receive(&rx_q->napi, skb);
+}
+
+/* Process receive event */
+void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
+ int *num_pkts, int max_pkts)
+{
+ u32 proc_idx, hw_consume_idx, num_consume_pkts;
+ struct net_device *netdev = adpt->netdev;
+ struct emac_buffer *rfbuf;
+ unsigned int count = 0;
+ struct emac_rrd rrd;
+ struct sk_buff *skb;
+ u32 reg;
+
+ reg = readl_relaxed(adpt->base + rx_q->consume_reg);
+
+ hw_consume_idx = (reg & rx_q->consume_mask) >> rx_q->consume_shift;
+ num_consume_pkts = (hw_consume_idx >= rx_q->rrd.consume_idx) ?
+ (hw_consume_idx - rx_q->rrd.consume_idx) :
+ (hw_consume_idx + rx_q->rrd.count - rx_q->rrd.consume_idx);
+
+ do {
+ if (!num_consume_pkts)
+ break;
+
+ if (!emac_rx_process_rrd(adpt, rx_q, &rrd))
+ break;
+
+ if (likely(RRD_NOR(&rrd) == 1)) {
+ /* good receive */
+ rfbuf = GET_RFD_BUFFER(rx_q, RRD_SI(&rrd));
+ dma_unmap_single(adpt->netdev->dev.parent,
+ rfbuf->dma_addr, rfbuf->length,
+ DMA_FROM_DEVICE);
+ rfbuf->dma_addr = 0;
+ skb = rfbuf->skb;
+ } else {
+ netdev_err(adpt->netdev,
+ "error: multi-RFD not support yet!\n");
+ break;
+ }
+ emac_rx_rfd_clean(rx_q, &rrd);
+ num_consume_pkts--;
+ count++;
+
+ /* Due to a HW issue in L4 check sum detection (UDP/TCP frags
+ * with DF set are marked as error), drop packets based on the
+ * error mask rather than the summary bit (ignoring L4F errors)
+ */
+ if (rrd.word[EMAC_RRD_STATS_DW_IDX] & EMAC_RRD_ERROR) {
+ netif_dbg(adpt, rx_status, adpt->netdev,
+ "Drop error packet[RRD: 0x%x:0x%x:0x%x:0x%x]\n",
+ rrd.word[0], rrd.word[1],
+ rrd.word[2], rrd.word[3]);
+
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ skb_put(skb, RRD_PKT_SIZE(&rrd) - ETH_FCS_LEN);
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (netdev->features & NETIF_F_RXCSUM)
+ skb->ip_summed = RRD_L4F(&rrd) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd),
+ (bool)RRD_CVTAG(&rrd));
+
+ netdev->last_rx = jiffies;
+ (*num_pkts)++;
+ } while (*num_pkts < max_pkts);
+
+ if (count) {
+ proc_idx = (rx_q->rfd.process_idx << rx_q->process_shft) &
+ rx_q->process_mask;
+ emac_reg_update32(adpt->base + rx_q->process_reg,
+ rx_q->process_mask, proc_idx);
+ emac_mac_rx_descs_refill(adpt, rx_q);
+ }
+}
+
+/* get the number of free transmit descriptors */
+static unsigned int emac_tpd_num_free_descs(struct emac_tx_queue *tx_q)
+{
+ u32 produce_idx = tx_q->tpd.produce_idx;
+ u32 consume_idx = tx_q->tpd.consume_idx;
+
+ return (consume_idx > produce_idx) ?
+ (consume_idx - produce_idx - 1) :
+ (tx_q->tpd.count + consume_idx - produce_idx - 1);
+}
+
+/* Process transmit event */
+void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
+{
+ u32 reg = readl_relaxed(adpt->base + tx_q->consume_reg);
+ u32 hw_consume_idx, pkts_compl = 0, bytes_compl = 0;
+ struct emac_buffer *tpbuf;
+
+ hw_consume_idx = (reg & tx_q->consume_mask) >> tx_q->consume_shift;
+
+ while (tx_q->tpd.consume_idx != hw_consume_idx) {
+ tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
+ if (tpbuf->dma_addr) {
+ dma_unmap_single(adpt->netdev->dev.parent,
+ tpbuf->dma_addr, tpbuf->length,
+ DMA_TO_DEVICE);
+ tpbuf->dma_addr = 0;
+ }
+
+ if (tpbuf->skb) {
+ pkts_compl++;
+ bytes_compl += tpbuf->skb->len;
+ dev_kfree_skb_irq(tpbuf->skb);
+ tpbuf->skb = NULL;
+ }
+
+ if (++tx_q->tpd.consume_idx == tx_q->tpd.count)
+ tx_q->tpd.consume_idx = 0;
+ }
+
+ netdev_completed_queue(adpt->netdev, pkts_compl, bytes_compl);
+
+ if (netif_queue_stopped(adpt->netdev))
+ if (emac_tpd_num_free_descs(tx_q) > (MAX_SKB_FRAGS + 1))
+ netif_wake_queue(adpt->netdev);
+}
+
+/* Initialize all queue data structures */
+void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
+ struct emac_adapter *adpt)
+{
+ adpt->rx_q.netdev = adpt->netdev;
+
+ adpt->rx_q.produce_reg = EMAC_MAILBOX_0;
+ adpt->rx_q.produce_mask = RFD0_PROD_IDX_BMSK;
+ adpt->rx_q.produce_shift = RFD0_PROD_IDX_SHFT;
+
+ adpt->rx_q.process_reg = EMAC_MAILBOX_0;
+ adpt->rx_q.process_mask = RFD0_PROC_IDX_BMSK;
+ adpt->rx_q.process_shft = RFD0_PROC_IDX_SHFT;
+
+ adpt->rx_q.consume_reg = EMAC_MAILBOX_3;
+ adpt->rx_q.consume_mask = RFD0_CONS_IDX_BMSK;
+ adpt->rx_q.consume_shift = RFD0_CONS_IDX_SHFT;
+
+ adpt->rx_q.irq = &adpt->irq;
+ adpt->rx_q.intr = adpt->irq.mask & ISR_RX_PKT;
+
+ adpt->tx_q.produce_reg = EMAC_MAILBOX_15;
+ adpt->tx_q.produce_mask = NTPD_PROD_IDX_BMSK;
+ adpt->tx_q.produce_shift = NTPD_PROD_IDX_SHFT;
+
+ adpt->tx_q.consume_reg = EMAC_MAILBOX_2;
+ adpt->tx_q.consume_mask = NTPD_CONS_IDX_BMSK;
+ adpt->tx_q.consume_shift = NTPD_CONS_IDX_SHFT;
+}
+
+/* Fill up transmit descriptors with TSO and Checksum offload information */
+static int emac_tso_csum(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q,
+ struct sk_buff *skb,
+ struct emac_tpd *tpd)
+{
+ unsigned int hdr_len;
+ int ret;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ u32 pkt_len = ((unsigned char *)ip_hdr(skb) - skb->data)
+ + ntohs(ip_hdr(skb)->tot_len);
+ if (skb->len > pkt_len)
+ pskb_trim(skb, pkt_len);
+ }
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (unlikely(skb->len == hdr_len)) {
+ /* we only need to do csum */
+ netif_warn(adpt, tx_err, adpt->netdev,
+ "tso not needed for packet with 0 data\n");
+ goto do_csum;
+ }
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ TPD_IPV4_SET(tpd, 1);
+ }
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+ /* ipv6 tso need an extra tpd */
+ struct emac_tpd extra_tpd;
+
+ memset(tpd, 0, sizeof(*tpd));
+ memset(&extra_tpd, 0, sizeof(extra_tpd));
+
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ TPD_PKT_LEN_SET(&extra_tpd, skb->len);
+ TPD_LSO_SET(&extra_tpd, 1);
+ TPD_LSOV_SET(&extra_tpd, 1);
+ emac_tx_tpd_create(adpt, tx_q, &extra_tpd);
+ TPD_LSOV_SET(tpd, 1);
+ }
+
+ TPD_LSO_SET(tpd, 1);
+ TPD_TCPHDR_OFFSET_SET(tpd, skb_transport_offset(skb));
+ TPD_MSS_SET(tpd, skb_shinfo(skb)->gso_size);
+ return 0;
+ }
+
+do_csum:
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ unsigned int css, cso;
+
+ cso = skb_transport_offset(skb);
+ if (unlikely(cso & 0x1)) {
+ netdev_err(adpt->netdev,
+ "error: payload offset should be even\n");
+ return -EINVAL;
+ }
+ css = cso + skb->csum_offset;
+
+ TPD_PAYLOAD_OFFSET_SET(tpd, cso >> 1);
+ TPD_CXSUM_OFFSET_SET(tpd, css >> 1);
+ TPD_CSX_SET(tpd, 1);
+ }
+
+ return 0;
+}
+
+/* Fill up transmit descriptors */
+static void emac_tx_fill_tpd(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q, struct sk_buff *skb,
+ struct emac_tpd *tpd)
+{
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int first = tx_q->tpd.produce_idx;
+ unsigned int len = skb_headlen(skb);
+ struct emac_buffer *tpbuf = NULL;
+ unsigned int mapped_len = 0;
+ unsigned int i;
+ int count = 0;
+ int ret;
+
+ /* if Large Segment Offload is (in TCP Segmentation Offload struct) */
+ if (TPD_LSO(tpd)) {
+ mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
+ tpbuf->length = mapped_len;
+ tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
+ skb->data, tpbuf->length,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(adpt->netdev->dev.parent,
+ tpbuf->dma_addr);
+ if (ret)
+ goto error;
+
+ TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
+ TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
+ TPD_BUF_LEN_SET(tpd, tpbuf->length);
+ emac_tx_tpd_create(adpt, tx_q, tpd);
+ count++;
+ }
+
+ if (mapped_len < len) {
+ tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
+ tpbuf->length = len - mapped_len;
+ tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent,
+ skb->data + mapped_len,
+ tpbuf->length, DMA_TO_DEVICE);
+ ret = dma_mapping_error(adpt->netdev->dev.parent,
+ tpbuf->dma_addr);
+ if (ret)
+ goto error;
+
+ TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
+ TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
+ TPD_BUF_LEN_SET(tpd, tpbuf->length);
+ emac_tx_tpd_create(adpt, tx_q, tpd);
+ count++;
+ }
+
+ for (i = 0; i < nr_frags; i++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[i];
+
+ tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
+ tpbuf->length = frag->size;
+ tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+ frag->page.p, frag->page_offset,
+ tpbuf->length, DMA_TO_DEVICE);
+ ret = dma_mapping_error(adpt->netdev->dev.parent,
+ tpbuf->dma_addr);
+ if (ret)
+ goto error;
+
+ TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
+ TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
+ TPD_BUF_LEN_SET(tpd, tpbuf->length);
+ emac_tx_tpd_create(adpt, tx_q, tpd);
+ count++;
+ }
+
+ /* The last tpd */
+ wmb();
+ emac_tx_tpd_mark_last(adpt, tx_q);
+
+ /* The last buffer info contain the skb address,
+ * so it will be freed after unmap
+ */
+ tpbuf->skb = skb;
+
+ return;
+
+error:
+ /* One of the memory mappings failed, so undo everything */
+ tx_q->tpd.produce_idx = first;
+
+ while (count--) {
+ tpbuf = GET_TPD_BUFFER(tx_q, first);
+ dma_unmap_page(adpt->netdev->dev.parent, tpbuf->dma_addr,
+ tpbuf->length, DMA_TO_DEVICE);
+ tpbuf->dma_addr = 0;
+ tpbuf->length = 0;
+
+ if (++first == tx_q->tpd.count)
+ first = 0;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+/* Transmit the packet using specified transmit queue */
+int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
+ struct sk_buff *skb)
+{
+ struct emac_tpd tpd;
+ u32 prod_idx;
+
+ memset(&tpd, 0, sizeof(tpd));
+
+ if (emac_tso_csum(adpt, tx_q, skb, &tpd) != 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ u16 tag;
+
+ EMAC_VLAN_TO_TAG(skb_vlan_tag_get(skb), tag);
+ TPD_CVLAN_TAG_SET(&tpd, tag);
+ TPD_INSTC_SET(&tpd, 1);
+ }
+
+ if (skb_network_offset(skb) != ETH_HLEN)
+ TPD_TYP_SET(&tpd, 1);
+
+ emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
+
+ netdev_sent_queue(adpt->netdev, skb->len);
+
+ /* Make sure the are enough free descriptors to hold one
+ * maximum-sized SKB. We need one desc for each fragment,
+ * one for the checksum (emac_tso_csum), one for TSO, and
+ * and one for the SKB header.
+ */
+ if (emac_tpd_num_free_descs(tx_q) < (MAX_SKB_FRAGS + 3))
+ netif_stop_queue(adpt->netdev);
+
+ /* update produce idx */
+ prod_idx = (tx_q->tpd.produce_idx << tx_q->produce_shift) &
+ tx_q->produce_mask;
+ emac_reg_update32(adpt->base + tx_q->produce_reg,
+ tx_q->produce_mask, prod_idx);
+
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
new file mode 100644
index 000000000000..f3aa24dc4a29
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -0,0 +1,248 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* EMAC DMA HW engine uses three rings:
+ * Tx:
+ * TPD: Transmit Packet Descriptor ring.
+ * Rx:
+ * RFD: Receive Free Descriptor ring.
+ * Ring of descriptors with empty buffers to be filled by Rx HW.
+ * RRD: Receive Return Descriptor ring.
+ * Ring of descriptors with buffers filled with received data.
+ */
+
+#ifndef _EMAC_HW_H_
+#define _EMAC_HW_H_
+
+/* EMAC_CSR register offsets */
+#define EMAC_EMAC_WRAPPER_CSR1 0x000000
+#define EMAC_EMAC_WRAPPER_CSR2 0x000004
+#define EMAC_EMAC_WRAPPER_TX_TS_LO 0x000104
+#define EMAC_EMAC_WRAPPER_TX_TS_HI 0x000108
+#define EMAC_EMAC_WRAPPER_TX_TS_INX 0x00010c
+
+/* DMA Order Settings */
+enum emac_dma_order {
+ emac_dma_ord_in = 1,
+ emac_dma_ord_enh = 2,
+ emac_dma_ord_out = 4
+};
+
+enum emac_dma_req_block {
+ emac_dma_req_128 = 0,
+ emac_dma_req_256 = 1,
+ emac_dma_req_512 = 2,
+ emac_dma_req_1024 = 3,
+ emac_dma_req_2048 = 4,
+ emac_dma_req_4096 = 5
+};
+
+/* Returns the value of bits idx...idx+n_bits */
+#define BITS_GET(val, lo, hi) ((le32_to_cpu(val) & GENMASK((hi), (lo))) >> lo)
+#define BITS_SET(val, lo, hi, new_val) \
+ val = cpu_to_le32((le32_to_cpu(val) & (~GENMASK((hi), (lo)))) | \
+ (((new_val) << (lo)) & GENMASK((hi), (lo))))
+
+/* RRD (Receive Return Descriptor) */
+struct emac_rrd {
+ u32 word[6];
+
+/* number of RFD */
+#define RRD_NOR(rrd) BITS_GET((rrd)->word[0], 16, 19)
+/* start consumer index of rfd-ring */
+#define RRD_SI(rrd) BITS_GET((rrd)->word[0], 20, 31)
+/* vlan-tag (CVID, CFI and PRI) */
+#define RRD_CVALN_TAG(rrd) BITS_GET((rrd)->word[2], 0, 15)
+/* length of the packet */
+#define RRD_PKT_SIZE(rrd) BITS_GET((rrd)->word[3], 0, 13)
+/* L4(TCP/UDP) checksum failed */
+#define RRD_L4F(rrd) BITS_GET((rrd)->word[3], 14, 14)
+/* vlan tagged */
+#define RRD_CVTAG(rrd) BITS_GET((rrd)->word[3], 16, 16)
+/* When set, indicates that the descriptor is updated by the IP core.
+ * When cleared, indicates that the descriptor is invalid.
+ */
+#define RRD_UPDT(rrd) BITS_GET((rrd)->word[3], 31, 31)
+#define RRD_UPDT_SET(rrd, val) BITS_SET((rrd)->word[3], 31, 31, val)
+/* timestamp low */
+#define RRD_TS_LOW(rrd) BITS_GET((rrd)->word[4], 0, 29)
+/* timestamp high */
+#define RRD_TS_HI(rrd) le32_to_cpu((rrd)->word[5])
+};
+
+/* TPD (Transmit Packet Descriptor) */
+struct emac_tpd {
+ u32 word[4];
+
+/* Number of bytes of the transmit packet. (include 4-byte CRC) */
+#define TPD_BUF_LEN_SET(tpd, val) BITS_SET((tpd)->word[0], 0, 15, val)
+/* Custom Checksum Offload: When set, ask IP core to offload custom checksum */
+#define TPD_CSX_SET(tpd, val) BITS_SET((tpd)->word[1], 8, 8, val)
+/* TCP Large Send Offload: When set, ask IP core to do offload TCP Large Send */
+#define TPD_LSO(tpd) BITS_GET((tpd)->word[1], 12, 12)
+#define TPD_LSO_SET(tpd, val) BITS_SET((tpd)->word[1], 12, 12, val)
+/* Large Send Offload Version: When set, indicates this is an LSOv2
+ * (for both IPv4 and IPv6). When cleared, indicates this is an LSOv1
+ * (only for IPv4).
+ */
+#define TPD_LSOV_SET(tpd, val) BITS_SET((tpd)->word[1], 13, 13, val)
+/* IPv4 packet: When set, indicates this is an IPv4 packet, this bit is only
+ * for LSOV2 format.
+ */
+#define TPD_IPV4_SET(tpd, val) BITS_SET((tpd)->word[1], 16, 16, val)
+/* 0: Ethernet frame (DA+SA+TYPE+DATA+CRC)
+ * 1: IEEE 802.3 frame (DA+SA+LEN+DSAP+SSAP+CTL+ORG+TYPE+DATA+CRC)
+ */
+#define TPD_TYP_SET(tpd, val) BITS_SET((tpd)->word[1], 17, 17, val)
+/* Low-32bit Buffer Address */
+#define TPD_BUFFER_ADDR_L_SET(tpd, val) ((tpd)->word[2] = cpu_to_le32(val))
+/* CVLAN Tag to be inserted if INS_VLAN_TAG is set, CVLAN TPID based on global
+ * register configuration.
+ */
+#define TPD_CVLAN_TAG_SET(tpd, val) BITS_SET((tpd)->word[3], 0, 15, val)
+/* Insert CVlan Tag: When set, ask MAC to insert CVLAN TAG to outgoing packet
+ */
+#define TPD_INSTC_SET(tpd, val) BITS_SET((tpd)->word[3], 17, 17, val)
+/* High-14bit Buffer Address, So, the 64b-bit address is
+ * {DESC_CTRL_11_TX_DATA_HIADDR[17:0],(register) BUFFER_ADDR_H, BUFFER_ADDR_L}
+ */
+#define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 30, val)
+/* Format D. Word offset from the 1st byte of this packet to start to calculate
+ * the custom checksum.
+ */
+#define TPD_PAYLOAD_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 0, 7, val)
+/* Format D. Word offset from the 1st byte of this packet to fill the custom
+ * checksum to
+ */
+#define TPD_CXSUM_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 18, 25, val)
+
+/* Format C. TCP Header offset from the 1st byte of this packet. (byte unit) */
+#define TPD_TCPHDR_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 0, 7, val)
+/* Format C. MSS (Maximum Segment Size) got from the protocol layer. (byte unit)
+ */
+#define TPD_MSS_SET(tpd, val) BITS_SET((tpd)->word[1], 18, 30, val)
+/* packet length in ext tpd */
+#define TPD_PKT_LEN_SET(tpd, val) ((tpd)->word[2] = cpu_to_le32(val))
+};
+
+/* emac_ring_header represents a single, contiguous block of DMA space
+ * mapped for the three descriptor rings (tpd, rfd, rrd)
+ */
+struct emac_ring_header {
+ void *v_addr; /* virtual address */
+ dma_addr_t dma_addr; /* dma address */
+ size_t size; /* length in bytes */
+ size_t used;
+};
+
+/* emac_buffer is wrapper around a pointer to a socket buffer
+ * so a DMA handle can be stored along with the skb
+ */
+struct emac_buffer {
+ struct sk_buff *skb; /* socket buffer */
+ u16 length; /* rx buffer length */
+ dma_addr_t dma_addr; /* dma address */
+};
+
+/* receive free descriptor (rfd) ring */
+struct emac_rfd_ring {
+ struct emac_buffer *rfbuff;
+ u32 *v_addr; /* virtual address */
+ dma_addr_t dma_addr; /* dma address */
+ size_t size; /* length in bytes */
+ unsigned int count; /* number of desc in the ring */
+ unsigned int produce_idx;
+ unsigned int process_idx;
+ unsigned int consume_idx; /* unused */
+};
+
+/* Receive Return Desciptor (RRD) ring */
+struct emac_rrd_ring {
+ u32 *v_addr; /* virtual address */
+ dma_addr_t dma_addr; /* physical address */
+ size_t size; /* length in bytes */
+ unsigned int count; /* number of desc in the ring */
+ unsigned int produce_idx; /* unused */
+ unsigned int consume_idx;
+};
+
+/* Rx queue */
+struct emac_rx_queue {
+ struct net_device *netdev; /* netdev ring belongs to */
+ struct emac_rrd_ring rrd;
+ struct emac_rfd_ring rfd;
+ struct napi_struct napi;
+ struct emac_irq *irq;
+
+ u32 intr;
+ u32 produce_mask;
+ u32 process_mask;
+ u32 consume_mask;
+
+ u16 produce_reg;
+ u16 process_reg;
+ u16 consume_reg;
+
+ u8 produce_shift;
+ u8 process_shft;
+ u8 consume_shift;
+};
+
+/* Transimit Packet Descriptor (tpd) ring */
+struct emac_tpd_ring {
+ struct emac_buffer *tpbuff;
+ u32 *v_addr; /* virtual address */
+ dma_addr_t dma_addr; /* dma address */
+
+ size_t size; /* length in bytes */
+ unsigned int count; /* number of desc in the ring */
+ unsigned int produce_idx;
+ unsigned int consume_idx;
+ unsigned int last_produce_idx;
+};
+
+/* Tx queue */
+struct emac_tx_queue {
+ struct emac_tpd_ring tpd;
+
+ u32 produce_mask;
+ u32 consume_mask;
+
+ u16 max_packets; /* max packets per interrupt */
+ u16 produce_reg;
+ u16 consume_reg;
+
+ u8 produce_shift;
+ u8 consume_shift;
+};
+
+struct emac_adapter;
+
+int emac_mac_up(struct emac_adapter *adpt);
+void emac_mac_down(struct emac_adapter *adpt);
+void emac_mac_reset(struct emac_adapter *adpt);
+void emac_mac_start(struct emac_adapter *adpt);
+void emac_mac_stop(struct emac_adapter *adpt);
+void emac_mac_mode_config(struct emac_adapter *adpt);
+void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
+ int *num_pkts, int max_pkts);
+int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
+ struct sk_buff *skb);
+void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q);
+void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
+ struct emac_adapter *adpt);
+int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt);
+void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt);
+void emac_mac_multicast_addr_clear(struct emac_adapter *adpt);
+void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr);
+
+#endif /*_EMAC_HW_H_*/
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
new file mode 100644
index 000000000000..da4e90db4d98
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -0,0 +1,227 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/iopoll.h>
+#include <linux/acpi.h>
+#include "emac.h"
+#include "emac-mac.h"
+#include "emac-phy.h"
+#include "emac-sgmii.h"
+
+/* EMAC base register offsets */
+#define EMAC_MDIO_CTRL 0x001414
+#define EMAC_PHY_STS 0x001418
+#define EMAC_MDIO_EX_CTRL 0x001440
+
+/* EMAC_MDIO_CTRL */
+#define MDIO_MODE BIT(30)
+#define MDIO_PR BIT(29)
+#define MDIO_AP_EN BIT(28)
+#define MDIO_BUSY BIT(27)
+#define MDIO_CLK_SEL_BMSK 0x7000000
+#define MDIO_CLK_SEL_SHFT 24
+#define MDIO_START BIT(23)
+#define SUP_PREAMBLE BIT(22)
+#define MDIO_RD_NWR BIT(21)
+#define MDIO_REG_ADDR_BMSK 0x1f0000
+#define MDIO_REG_ADDR_SHFT 16
+#define MDIO_DATA_BMSK 0xffff
+#define MDIO_DATA_SHFT 0
+
+/* EMAC_PHY_STS */
+#define PHY_ADDR_BMSK 0x1f0000
+#define PHY_ADDR_SHFT 16
+
+#define MDIO_CLK_25_4 0
+#define MDIO_CLK_25_28 7
+
+#define MDIO_WAIT_TIMES 1000
+
+#define EMAC_LINK_SPEED_DEFAULT (\
+ EMAC_LINK_SPEED_10_HALF |\
+ EMAC_LINK_SPEED_10_FULL |\
+ EMAC_LINK_SPEED_100_HALF |\
+ EMAC_LINK_SPEED_100_FULL |\
+ EMAC_LINK_SPEED_1GB_FULL)
+
+/**
+ * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
+ * @adpt: the emac adapter
+ *
+ * The autopoll feature takes over the MDIO bus. In order for
+ * the PHY driver to be able to talk to the PHY over the MDIO
+ * bus, we need to temporarily disable the autopoll feature.
+ */
+static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
+{
+ u32 val;
+
+ /* disable autopoll */
+ emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
+
+ /* wait for any mdio polling to complete */
+ if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
+ !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
+ return 0;
+
+ /* failed to disable; ensure it is enabled before returning */
+ emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
+
+ return -EBUSY;
+}
+
+/**
+ * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
+ * @adpt: the emac adapter
+ *
+ * The EMAC has the ability to poll the external PHY on the MDIO
+ * bus for link state changes. This eliminates the need for the
+ * driver to poll the phy. If if the link state does change,
+ * the EMAC issues an interrupt on behalf of the PHY.
+ */
+static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
+{
+ emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
+}
+
+static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct emac_adapter *adpt = bus->priv;
+ u32 reg;
+ int ret;
+
+ ret = emac_phy_mdio_autopoll_disable(adpt);
+ if (ret)
+ return ret;
+
+ emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
+ (addr << PHY_ADDR_SHFT));
+
+ reg = SUP_PREAMBLE |
+ ((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) |
+ ((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) |
+ MDIO_START | MDIO_RD_NWR;
+
+ writel(reg, adpt->base + EMAC_MDIO_CTRL);
+
+ if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
+ !(reg & (MDIO_START | MDIO_BUSY)),
+ 100, MDIO_WAIT_TIMES * 100))
+ ret = -EIO;
+ else
+ ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
+
+ emac_phy_mdio_autopoll_enable(adpt);
+
+ return ret;
+}
+
+static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct emac_adapter *adpt = bus->priv;
+ u32 reg;
+ int ret;
+
+ ret = emac_phy_mdio_autopoll_disable(adpt);
+ if (ret)
+ return ret;
+
+ emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
+ (addr << PHY_ADDR_SHFT));
+
+ reg = SUP_PREAMBLE |
+ ((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) |
+ ((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) |
+ ((val << MDIO_DATA_SHFT) & MDIO_DATA_BMSK) |
+ MDIO_START;
+
+ writel(reg, adpt->base + EMAC_MDIO_CTRL);
+
+ if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
+ !(reg & (MDIO_START | MDIO_BUSY)), 100,
+ MDIO_WAIT_TIMES * 100))
+ ret = -EIO;
+
+ emac_phy_mdio_autopoll_enable(adpt);
+
+ return ret;
+}
+
+/* Configure the MDIO bus and connect the external PHY */
+int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mii_bus *mii_bus;
+ int ret;
+
+ /* Create the mii_bus object for talking to the MDIO bus */
+ adpt->mii_bus = mii_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "emac-mdio";
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+ mii_bus->read = emac_mdio_read;
+ mii_bus->write = emac_mdio_write;
+ mii_bus->parent = &pdev->dev;
+ mii_bus->priv = adpt;
+
+ if (has_acpi_companion(&pdev->dev)) {
+ u32 phy_addr;
+
+ ret = mdiobus_register(mii_bus);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register mdio bus\n");
+ return ret;
+ }
+ ret = device_property_read_u32(&pdev->dev, "phy-channel",
+ &phy_addr);
+ if (ret)
+ /* If we can't read a valid phy address, then assume
+ * that there is only one phy on this mdio bus.
+ */
+ adpt->phydev = phy_find_first(mii_bus);
+ else
+ adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
+
+ } else {
+ struct device_node *phy_np;
+
+ ret = of_mdiobus_register(mii_bus, np);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register mdio bus\n");
+ return ret;
+ }
+
+ phy_np = of_parse_phandle(np, "phy-handle", 0);
+ adpt->phydev = of_phy_find_device(phy_np);
+ }
+
+ if (!adpt->phydev) {
+ dev_err(&pdev->dev, "could not find external phy\n");
+ mdiobus_unregister(mii_bus);
+ return -ENODEV;
+ }
+
+ if (adpt->phydev->drv)
+ phy_attached_print(adpt->phydev, NULL);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.h b/drivers/net/ethernet/qualcomm/emac/emac-phy.h
new file mode 100644
index 000000000000..49f3701a6dd7
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _EMAC_PHY_H_
+#define _EMAC_PHY_H_
+
+typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt);
+
+/** emac_phy - internal emac phy
+ * @base base address
+ * @digital per-lane digital block
+ * @initialize initialization function
+ */
+struct emac_phy {
+ void __iomem *base;
+ void __iomem *digital;
+ emac_sgmii_initialize initialize;
+};
+
+struct emac_adapter;
+
+int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt);
+
+#endif /* _EMAC_PHY_H_ */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
new file mode 100644
index 000000000000..75c1b530e39e
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -0,0 +1,784 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC SGMII Controller driver.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/acpi.h>
+#include <linux/of_device.h>
+#include "emac.h"
+#include "emac-mac.h"
+#include "emac-sgmii.h"
+
+/* EMAC_QSERDES register offsets */
+#define EMAC_QSERDES_COM_SYS_CLK_CTRL 0x000000
+#define EMAC_QSERDES_COM_PLL_CNTRL 0x000014
+#define EMAC_QSERDES_COM_PLL_IP_SETI 0x000018
+#define EMAC_QSERDES_COM_PLL_CP_SETI 0x000024
+#define EMAC_QSERDES_COM_PLL_IP_SETP 0x000028
+#define EMAC_QSERDES_COM_PLL_CP_SETP 0x00002c
+#define EMAC_QSERDES_COM_SYSCLK_EN_SEL 0x000038
+#define EMAC_QSERDES_COM_RESETSM_CNTRL 0x000040
+#define EMAC_QSERDES_COM_PLLLOCK_CMP1 0x000044
+#define EMAC_QSERDES_COM_PLLLOCK_CMP2 0x000048
+#define EMAC_QSERDES_COM_PLLLOCK_CMP3 0x00004c
+#define EMAC_QSERDES_COM_PLLLOCK_CMP_EN 0x000050
+#define EMAC_QSERDES_COM_DEC_START1 0x000064
+#define EMAC_QSERDES_COM_DIV_FRAC_START1 0x000098
+#define EMAC_QSERDES_COM_DIV_FRAC_START2 0x00009c
+#define EMAC_QSERDES_COM_DIV_FRAC_START3 0x0000a0
+#define EMAC_QSERDES_COM_DEC_START2 0x0000a4
+#define EMAC_QSERDES_COM_PLL_CRCTRL 0x0000ac
+#define EMAC_QSERDES_COM_RESET_SM 0x0000bc
+#define EMAC_QSERDES_TX_BIST_MODE_LANENO 0x000100
+#define EMAC_QSERDES_TX_TX_EMP_POST1_LVL 0x000108
+#define EMAC_QSERDES_TX_TX_DRV_LVL 0x00010c
+#define EMAC_QSERDES_TX_LANE_MODE 0x000150
+#define EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN 0x000170
+#define EMAC_QSERDES_RX_CDR_CONTROL 0x000200
+#define EMAC_QSERDES_RX_CDR_CONTROL2 0x000210
+#define EMAC_QSERDES_RX_RX_EQ_GAIN12 0x000230
+
+/* EMAC_SGMII register offsets */
+#define EMAC_SGMII_PHY_SERDES_START 0x000000
+#define EMAC_SGMII_PHY_CMN_PWR_CTRL 0x000004
+#define EMAC_SGMII_PHY_RX_PWR_CTRL 0x000008
+#define EMAC_SGMII_PHY_TX_PWR_CTRL 0x00000C
+#define EMAC_SGMII_PHY_LANE_CTRL1 0x000018
+#define EMAC_SGMII_PHY_AUTONEG_CFG2 0x000048
+#define EMAC_SGMII_PHY_CDR_CTRL0 0x000058
+#define EMAC_SGMII_PHY_SPEED_CFG1 0x000074
+#define EMAC_SGMII_PHY_POW_DWN_CTRL0 0x000080
+#define EMAC_SGMII_PHY_RESET_CTRL 0x0000a8
+#define EMAC_SGMII_PHY_IRQ_CMD 0x0000ac
+#define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x0000b0
+#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x0000b4
+#define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x0000b8
+#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x0000d4
+#define EMAC_SGMII_PHY_AUTONEG0_STATUS 0x0000e0
+#define EMAC_SGMII_PHY_AUTONEG1_STATUS 0x0000e4
+
+/* EMAC_QSERDES_COM_PLL_IP_SETI */
+#define PLL_IPSETI(x) ((x) & 0x3f)
+
+/* EMAC_QSERDES_COM_PLL_CP_SETI */
+#define PLL_CPSETI(x) ((x) & 0xff)
+
+/* EMAC_QSERDES_COM_PLL_IP_SETP */
+#define PLL_IPSETP(x) ((x) & 0x3f)
+
+/* EMAC_QSERDES_COM_PLL_CP_SETP */
+#define PLL_CPSETP(x) ((x) & 0x1f)
+
+/* EMAC_QSERDES_COM_PLL_CRCTRL */
+#define PLL_RCTRL(x) (((x) & 0xf) << 4)
+#define PLL_CCTRL(x) ((x) & 0xf)
+
+/* SGMII v2 PHY registers per lane */
+#define EMAC_SGMII_PHY_LN_OFFSET 0x0400
+
+/* SGMII v2 digital lane registers */
+#define EMAC_SGMII_LN_DRVR_CTRL0 0x00C
+#define EMAC_SGMII_LN_DRVR_TAP_EN 0x018
+#define EMAC_SGMII_LN_TX_MARGINING 0x01C
+#define EMAC_SGMII_LN_TX_PRE 0x020
+#define EMAC_SGMII_LN_TX_POST 0x024
+#define EMAC_SGMII_LN_TX_BAND_MODE 0x060
+#define EMAC_SGMII_LN_LANE_MODE 0x064
+#define EMAC_SGMII_LN_PARALLEL_RATE 0x078
+#define EMAC_SGMII_LN_CML_CTRL_MODE0 0x0B8
+#define EMAC_SGMII_LN_MIXER_CTRL_MODE0 0x0D0
+#define EMAC_SGMII_LN_VGA_INITVAL 0x134
+#define EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0 0x17C
+#define EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0 0x188
+#define EMAC_SGMII_LN_UCDR_SO_CONFIG 0x194
+#define EMAC_SGMII_LN_RX_BAND 0x19C
+#define EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0 0x1B8
+#define EMAC_SGMII_LN_RSM_CONFIG 0x1F0
+#define EMAC_SGMII_LN_SIGDET_ENABLES 0x224
+#define EMAC_SGMII_LN_SIGDET_CNTRL 0x228
+#define EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL 0x22C
+#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x2A0
+#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x2AC
+#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x2BC
+
+/* SGMII v2 digital lane register values */
+#define UCDR_STEP_BY_TWO_MODE0 BIT(7)
+#define UCDR_xO_GAIN_MODE(x) ((x) & 0x7f)
+#define UCDR_ENABLE BIT(6)
+#define UCDR_SO_SATURATION(x) ((x) & 0x3f)
+#define SIGDET_LP_BYP_PS4 BIT(7)
+#define SIGDET_EN_PS0_TO_PS2 BIT(6)
+#define EN_ACCOUPLEVCM_SW_MUX BIT(5)
+#define EN_ACCOUPLEVCM_SW BIT(4)
+#define RX_SYNC_EN BIT(3)
+#define RXTERM_HIGHZ_PS5 BIT(2)
+#define SIGDET_EN_PS3 BIT(1)
+#define EN_ACCOUPLE_VCM_PS3 BIT(0)
+#define UFS_MODE BIT(5)
+#define TXVAL_VALID_INIT BIT(4)
+#define TXVAL_VALID_MUX BIT(3)
+#define TXVAL_VALID BIT(2)
+#define USB3P1_MODE BIT(1)
+#define KR_PCIGEN3_MODE BIT(0)
+#define PRE_EN BIT(3)
+#define POST_EN BIT(2)
+#define MAIN_EN_MUX BIT(1)
+#define MAIN_EN BIT(0)
+#define TX_MARGINING_MUX BIT(6)
+#define TX_MARGINING(x) ((x) & 0x3f)
+#define TX_PRE_MUX BIT(6)
+#define TX_PRE(x) ((x) & 0x3f)
+#define TX_POST_MUX BIT(6)
+#define TX_POST(x) ((x) & 0x3f)
+#define CML_GEAR_MODE(x) (((x) & 7) << 3)
+#define CML2CMOS_IBOOST_MODE(x) ((x) & 7)
+#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2)
+#define MIXER_DATARATE_MODE(x) ((x) & 3)
+#define VGA_THRESH_DFE(x) ((x) & 0x3f)
+#define SIGDET_LP_BYP_PS0_TO_PS2 BIT(5)
+#define SIGDET_LP_BYP_MUX BIT(4)
+#define SIGDET_LP_BYP BIT(3)
+#define SIGDET_EN_MUX BIT(2)
+#define SIGDET_EN BIT(1)
+#define SIGDET_FLT_BYP BIT(0)
+#define SIGDET_LVL(x) (((x) & 0xf) << 4)
+#define SIGDET_BW_CTRL(x) ((x) & 0xf)
+#define SIGDET_DEGLITCH_CTRL(x) (((x) & 0xf) << 1)
+#define SIGDET_DEGLITCH_BYP BIT(0)
+#define INVERT_PCS_RX_CLK BIT(7)
+#define PWM_EN BIT(6)
+#define RXBIAS_SEL(x) (((x) & 0x3) << 4)
+#define EBDAC_SIGN BIT(3)
+#define EDAC_SIGN BIT(2)
+#define EN_AUXTAP1SIGN_INVERT BIT(1)
+#define EN_DAC_CHOPPING BIT(0)
+#define DRVR_LOGIC_CLK_EN BIT(4)
+#define DRVR_LOGIC_CLK_DIV(x) ((x) & 0xf)
+#define PARALLEL_RATE_MODE2(x) (((x) & 0x3) << 4)
+#define PARALLEL_RATE_MODE1(x) (((x) & 0x3) << 2)
+#define PARALLEL_RATE_MODE0(x) ((x) & 0x3)
+#define BAND_MODE2(x) (((x) & 0x3) << 4)
+#define BAND_MODE1(x) (((x) & 0x3) << 2)
+#define BAND_MODE0(x) ((x) & 0x3)
+#define LANE_SYNC_MODE BIT(5)
+#define LANE_MODE(x) ((x) & 0x1f)
+#define CDR_PD_SEL_MODE0(x) (((x) & 0x3) << 5)
+#define EN_DLL_MODE0 BIT(4)
+#define EN_IQ_DCC_MODE0 BIT(3)
+#define EN_IQCAL_MODE0 BIT(2)
+#define EN_QPATH_MODE0 BIT(1)
+#define EN_EPATH_MODE0 BIT(0)
+#define FORCE_TSYNC_ACK BIT(7)
+#define FORCE_CMN_ACK BIT(6)
+#define FORCE_CMN_READY BIT(5)
+#define EN_RCLK_DEGLITCH BIT(4)
+#define BYPASS_RSM_CDR_RESET BIT(3)
+#define BYPASS_RSM_TSYNC BIT(2)
+#define BYPASS_RSM_SAMP_CAL BIT(1)
+#define BYPASS_RSM_DLL_CAL BIT(0)
+
+/* EMAC_QSERDES_COM_SYS_CLK_CTRL */
+#define SYSCLK_CM BIT(4)
+#define SYSCLK_AC_COUPLE BIT(3)
+
+/* EMAC_QSERDES_COM_PLL_CNTRL */
+#define OCP_EN BIT(5)
+#define PLL_DIV_FFEN BIT(2)
+#define PLL_DIV_ORD BIT(1)
+
+/* EMAC_QSERDES_COM_SYSCLK_EN_SEL */
+#define SYSCLK_SEL_CMOS BIT(3)
+
+/* EMAC_QSERDES_COM_RESETSM_CNTRL */
+#define FRQ_TUNE_MODE BIT(4)
+
+/* EMAC_QSERDES_COM_PLLLOCK_CMP_EN */
+#define PLLLOCK_CMP_EN BIT(0)
+
+/* EMAC_QSERDES_COM_DEC_START1 */
+#define DEC_START1_MUX BIT(7)
+#define DEC_START1(x) ((x) & 0x7f)
+
+/* EMAC_QSERDES_COM_DIV_FRAC_START1 * EMAC_QSERDES_COM_DIV_FRAC_START2 */
+#define DIV_FRAC_START_MUX BIT(7)
+#define DIV_FRAC_START(x) ((x) & 0x7f)
+
+/* EMAC_QSERDES_COM_DIV_FRAC_START3 */
+#define DIV_FRAC_START3_MUX BIT(4)
+#define DIV_FRAC_START3(x) ((x) & 0xf)
+
+/* EMAC_QSERDES_COM_DEC_START2 */
+#define DEC_START2_MUX BIT(1)
+#define DEC_START2 BIT(0)
+
+/* EMAC_QSERDES_COM_RESET_SM */
+#define READY BIT(5)
+
+/* EMAC_QSERDES_TX_TX_EMP_POST1_LVL */
+#define TX_EMP_POST1_LVL_MUX BIT(5)
+#define TX_EMP_POST1_LVL(x) ((x) & 0x1f)
+#define TX_EMP_POST1_LVL_BMSK 0x1f
+#define TX_EMP_POST1_LVL_SHFT 0
+
+/* EMAC_QSERDES_TX_TX_DRV_LVL */
+#define TX_DRV_LVL_MUX BIT(4)
+#define TX_DRV_LVL(x) ((x) & 0xf)
+
+/* EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN */
+#define EMP_EN_MUX BIT(1)
+#define EMP_EN BIT(0)
+
+/* EMAC_QSERDES_RX_CDR_CONTROL & EMAC_QSERDES_RX_CDR_CONTROL2 */
+#define HBW_PD_EN BIT(7)
+#define SECONDORDERENABLE BIT(6)
+#define FIRSTORDER_THRESH(x) (((x) & 0x7) << 3)
+#define SECONDORDERGAIN(x) ((x) & 0x7)
+
+/* EMAC_QSERDES_RX_RX_EQ_GAIN12 */
+#define RX_EQ_GAIN2(x) (((x) & 0xf) << 4)
+#define RX_EQ_GAIN1(x) ((x) & 0xf)
+
+/* EMAC_SGMII_PHY_SERDES_START */
+#define SERDES_START BIT(0)
+
+/* EMAC_SGMII_PHY_CMN_PWR_CTRL */
+#define BIAS_EN BIT(6)
+#define PLL_EN BIT(5)
+#define SYSCLK_EN BIT(4)
+#define CLKBUF_L_EN BIT(3)
+#define PLL_TXCLK_EN BIT(1)
+#define PLL_RXCLK_EN BIT(0)
+
+/* EMAC_SGMII_PHY_RX_PWR_CTRL */
+#define L0_RX_SIGDET_EN BIT(7)
+#define L0_RX_TERM_MODE(x) (((x) & 3) << 4)
+#define L0_RX_I_EN BIT(1)
+
+/* EMAC_SGMII_PHY_TX_PWR_CTRL */
+#define L0_TX_EN BIT(5)
+#define L0_CLKBUF_EN BIT(4)
+#define L0_TRAN_BIAS_EN BIT(1)
+
+/* EMAC_SGMII_PHY_LANE_CTRL1 */
+#define L0_RX_EQUALIZE_ENABLE BIT(6)
+#define L0_RESET_TSYNC_EN BIT(4)
+#define L0_DRV_LVL(x) ((x) & 0xf)
+
+/* EMAC_SGMII_PHY_AUTONEG_CFG2 */
+#define FORCE_AN_TX_CFG BIT(5)
+#define FORCE_AN_RX_CFG BIT(4)
+#define AN_ENABLE BIT(0)
+
+/* EMAC_SGMII_PHY_SPEED_CFG1 */
+#define DUPLEX_MODE BIT(4)
+#define SPDMODE_1000 BIT(1)
+#define SPDMODE_100 BIT(0)
+#define SPDMODE_10 0
+#define SPDMODE_BMSK 3
+#define SPDMODE_SHFT 0
+
+/* EMAC_SGMII_PHY_POW_DWN_CTRL0 */
+#define PWRDN_B BIT(0)
+#define CDR_MAX_CNT(x) ((x) & 0xff)
+
+/* EMAC_QSERDES_TX_BIST_MODE_LANENO */
+#define BIST_LANE_NUMBER(x) (((x) & 3) << 5)
+#define BISTMODE(x) ((x) & 0x1f)
+
+/* EMAC_QSERDES_COM_PLLLOCK_CMPx */
+#define PLLLOCK_CMP(x) ((x) & 0xff)
+
+/* EMAC_SGMII_PHY_RESET_CTRL */
+#define PHY_SW_RESET BIT(0)
+
+/* EMAC_SGMII_PHY_IRQ_CMD */
+#define IRQ_GLOBAL_CLEAR BIT(0)
+
+/* EMAC_SGMII_PHY_INTERRUPT_MASK */
+#define DECODE_CODE_ERR BIT(7)
+#define DECODE_DISP_ERR BIT(6)
+#define PLL_UNLOCK BIT(5)
+#define AN_ILLEGAL_TERM BIT(4)
+#define SYNC_FAIL BIT(3)
+#define AN_START BIT(2)
+#define AN_END BIT(1)
+#define AN_REQUEST BIT(0)
+
+#define SGMII_PHY_IRQ_CLR_WAIT_TIME 10
+
+#define SGMII_PHY_INTERRUPT_ERR (\
+ DECODE_CODE_ERR |\
+ DECODE_DISP_ERR)
+
+#define SGMII_ISR_AN_MASK (\
+ AN_REQUEST |\
+ AN_START |\
+ AN_END |\
+ AN_ILLEGAL_TERM |\
+ PLL_UNLOCK |\
+ SYNC_FAIL)
+
+#define SGMII_ISR_MASK (\
+ SGMII_PHY_INTERRUPT_ERR |\
+ SGMII_ISR_AN_MASK)
+
+/* SGMII TX_CONFIG */
+#define TXCFG_LINK 0x8000
+#define TXCFG_MODE_BMSK 0x1c00
+#define TXCFG_1000_FULL 0x1800
+#define TXCFG_100_FULL 0x1400
+#define TXCFG_100_HALF 0x0400
+#define TXCFG_10_FULL 0x1000
+#define TXCFG_10_HALF 0x0000
+
+#define SERDES_START_WAIT_TIMES 100
+
+struct emac_reg_write {
+ unsigned int offset;
+ u32 val;
+};
+
+static void emac_reg_write_all(void __iomem *base,
+ const struct emac_reg_write *itr, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; ++itr, ++i)
+ writel(itr->val, base + itr->offset);
+}
+
+static const struct emac_reg_write physical_coding_sublayer_programming_v1[] = {
+ {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
+ {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
+ {EMAC_SGMII_PHY_CMN_PWR_CTRL,
+ BIAS_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN | PLL_RXCLK_EN},
+ {EMAC_SGMII_PHY_TX_PWR_CTRL, L0_TX_EN | L0_CLKBUF_EN | L0_TRAN_BIAS_EN},
+ {EMAC_SGMII_PHY_RX_PWR_CTRL,
+ L0_RX_SIGDET_EN | L0_RX_TERM_MODE(1) | L0_RX_I_EN},
+ {EMAC_SGMII_PHY_CMN_PWR_CTRL,
+ BIAS_EN | PLL_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN |
+ PLL_RXCLK_EN},
+ {EMAC_SGMII_PHY_LANE_CTRL1,
+ L0_RX_EQUALIZE_ENABLE | L0_RESET_TSYNC_EN | L0_DRV_LVL(15)},
+};
+
+static const struct emac_reg_write sysclk_refclk_setting[] = {
+ {EMAC_QSERDES_COM_SYSCLK_EN_SEL, SYSCLK_SEL_CMOS},
+ {EMAC_QSERDES_COM_SYS_CLK_CTRL, SYSCLK_CM | SYSCLK_AC_COUPLE},
+};
+
+static const struct emac_reg_write pll_setting[] = {
+ {EMAC_QSERDES_COM_PLL_IP_SETI, PLL_IPSETI(1)},
+ {EMAC_QSERDES_COM_PLL_CP_SETI, PLL_CPSETI(59)},
+ {EMAC_QSERDES_COM_PLL_IP_SETP, PLL_IPSETP(10)},
+ {EMAC_QSERDES_COM_PLL_CP_SETP, PLL_CPSETP(9)},
+ {EMAC_QSERDES_COM_PLL_CRCTRL, PLL_RCTRL(15) | PLL_CCTRL(11)},
+ {EMAC_QSERDES_COM_PLL_CNTRL, OCP_EN | PLL_DIV_FFEN | PLL_DIV_ORD},
+ {EMAC_QSERDES_COM_DEC_START1, DEC_START1_MUX | DEC_START1(2)},
+ {EMAC_QSERDES_COM_DEC_START2, DEC_START2_MUX | DEC_START2},
+ {EMAC_QSERDES_COM_DIV_FRAC_START1,
+ DIV_FRAC_START_MUX | DIV_FRAC_START(85)},
+ {EMAC_QSERDES_COM_DIV_FRAC_START2,
+ DIV_FRAC_START_MUX | DIV_FRAC_START(42)},
+ {EMAC_QSERDES_COM_DIV_FRAC_START3,
+ DIV_FRAC_START3_MUX | DIV_FRAC_START3(3)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP1, PLLLOCK_CMP(43)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP2, PLLLOCK_CMP(104)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP3, PLLLOCK_CMP(0)},
+ {EMAC_QSERDES_COM_PLLLOCK_CMP_EN, PLLLOCK_CMP_EN},
+ {EMAC_QSERDES_COM_RESETSM_CNTRL, FRQ_TUNE_MODE},
+};
+
+static const struct emac_reg_write cdr_setting[] = {
+ {EMAC_QSERDES_RX_CDR_CONTROL,
+ SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(2)},
+ {EMAC_QSERDES_RX_CDR_CONTROL2,
+ SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(4)},
+};
+
+static const struct emac_reg_write tx_rx_setting[] = {
+ {EMAC_QSERDES_TX_BIST_MODE_LANENO, 0},
+ {EMAC_QSERDES_TX_TX_DRV_LVL, TX_DRV_LVL_MUX | TX_DRV_LVL(15)},
+ {EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN, EMP_EN_MUX | EMP_EN},
+ {EMAC_QSERDES_TX_TX_EMP_POST1_LVL,
+ TX_EMP_POST1_LVL_MUX | TX_EMP_POST1_LVL(1)},
+ {EMAC_QSERDES_RX_RX_EQ_GAIN12, RX_EQ_GAIN2(15) | RX_EQ_GAIN1(15)},
+ {EMAC_QSERDES_TX_LANE_MODE, LANE_MODE(8)},
+};
+
+static const struct emac_reg_write sgmii_v2_laned[] = {
+ /* CDR Settings */
+ {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
+ UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
+ {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)},
+ {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
+
+ /* TX/RX Settings */
+ {EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2},
+
+ {EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE},
+ {EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN},
+ {EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)},
+ {EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX},
+ {EMAC_SGMII_LN_TX_POST, TX_POST_MUX},
+
+ {EMAC_SGMII_LN_CML_CTRL_MODE0,
+ CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)},
+ {EMAC_SGMII_LN_MIXER_CTRL_MODE0,
+ MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)},
+ {EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)},
+ {EMAC_SGMII_LN_SIGDET_ENABLES,
+ SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP},
+ {EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)},
+
+ {EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)},
+ {EMAC_SGMII_LN_RX_MISC_CNTRL0, 0},
+ {EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV,
+ DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)},
+
+ {EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
+ {EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(2)},
+ {EMAC_SGMII_LN_RX_BAND, BAND_MODE0(3)},
+ {EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
+ {EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(3)},
+ {EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL},
+};
+
+static const struct emac_reg_write physical_coding_sublayer_programming_v2[] = {
+ {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
+ {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
+ {EMAC_SGMII_PHY_TX_PWR_CTRL, 0},
+ {EMAC_SGMII_PHY_LANE_CTRL1, L0_RX_EQUALIZE_ENABLE},
+};
+
+static int emac_sgmii_link_init(struct emac_adapter *adpt)
+{
+ struct phy_device *phydev = adpt->phydev;
+ struct emac_phy *phy = &adpt->phy;
+ u32 val;
+
+ val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG);
+ val |= AN_ENABLE;
+ writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
+ } else {
+ u32 speed_cfg;
+
+ switch (phydev->speed) {
+ case SPEED_10:
+ speed_cfg = SPDMODE_10;
+ break;
+ case SPEED_100:
+ speed_cfg = SPDMODE_100;
+ break;
+ case SPEED_1000:
+ speed_cfg = SPDMODE_1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (phydev->duplex == DUPLEX_FULL)
+ speed_cfg |= DUPLEX_MODE;
+
+ val &= ~AN_ENABLE;
+ writel(speed_cfg, phy->base + EMAC_SGMII_PHY_SPEED_CFG1);
+ writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
+ }
+
+ return 0;
+}
+
+static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
+{
+ struct emac_phy *phy = &adpt->phy;
+ u32 status;
+
+ writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
+ writel_relaxed(IRQ_GLOBAL_CLEAR, phy->base + EMAC_SGMII_PHY_IRQ_CMD);
+ /* Ensure interrupt clear command is written to HW */
+ wmb();
+
+ /* After set the IRQ_GLOBAL_CLEAR bit, the status clearing must
+ * be confirmed before clearing the bits in other registers.
+ * It takes a few cycles for hw to clear the interrupt status.
+ */
+ if (readl_poll_timeout_atomic(phy->base +
+ EMAC_SGMII_PHY_INTERRUPT_STATUS,
+ status, !(status & irq_bits), 1,
+ SGMII_PHY_IRQ_CLR_WAIT_TIME)) {
+ netdev_err(adpt->netdev,
+ "error: failed clear SGMII irq: status:0x%x bits:0x%x\n",
+ status, irq_bits);
+ return -EIO;
+ }
+
+ /* Finalize clearing procedure */
+ writel_relaxed(0, phy->base + EMAC_SGMII_PHY_IRQ_CMD);
+ writel_relaxed(0, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
+
+ /* Ensure that clearing procedure finalization is written to HW */
+ wmb();
+
+ return 0;
+}
+
+int emac_sgmii_init_v1(struct emac_adapter *adpt)
+{
+ struct emac_phy *phy = &adpt->phy;
+ unsigned int i;
+ int ret;
+
+ ret = emac_sgmii_link_init(adpt);
+ if (ret)
+ return ret;
+
+ emac_reg_write_all(phy->base, physical_coding_sublayer_programming_v1,
+ ARRAY_SIZE(physical_coding_sublayer_programming_v1));
+ emac_reg_write_all(phy->base, sysclk_refclk_setting,
+ ARRAY_SIZE(sysclk_refclk_setting));
+ emac_reg_write_all(phy->base, pll_setting, ARRAY_SIZE(pll_setting));
+ emac_reg_write_all(phy->base, cdr_setting, ARRAY_SIZE(cdr_setting));
+ emac_reg_write_all(phy->base, tx_rx_setting,
+ ARRAY_SIZE(tx_rx_setting));
+
+ /* Power up the Ser/Des engine */
+ writel(SERDES_START, phy->base + EMAC_SGMII_PHY_SERDES_START);
+
+ for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
+ if (readl(phy->base + EMAC_QSERDES_COM_RESET_SM) & READY)
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (i == SERDES_START_WAIT_TIMES) {
+ netdev_err(adpt->netdev, "error: ser/des failed to start\n");
+ return -EIO;
+ }
+ /* Mask out all the SGMII Interrupt */
+ writel(0, phy->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+ emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
+
+ return 0;
+}
+
+int emac_sgmii_init_v2(struct emac_adapter *adpt)
+{
+ struct emac_phy *phy = &adpt->phy;
+ void __iomem *phy_regs = phy->base;
+ void __iomem *laned = phy->digital;
+ unsigned int i;
+ u32 lnstatus;
+ int ret;
+
+ ret = emac_sgmii_link_init(adpt);
+ if (ret)
+ return ret;
+
+ /* PCS lane-x init */
+ emac_reg_write_all(phy->base, physical_coding_sublayer_programming_v2,
+ ARRAY_SIZE(physical_coding_sublayer_programming_v2));
+
+ /* SGMII lane-x init */
+ emac_reg_write_all(phy->digital,
+ sgmii_v2_laned, ARRAY_SIZE(sgmii_v2_laned));
+
+ /* Power up PCS and start reset lane state machine */
+
+ writel(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL);
+ writel(1, laned + SGMII_LN_RSM_START);
+
+ /* Wait for c_ready assertion */
+ for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
+ lnstatus = readl(phy_regs + SGMII_PHY_LN_LANE_STATUS);
+ if (lnstatus & BIT(1))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (i == SERDES_START_WAIT_TIMES) {
+ netdev_err(adpt->netdev, "SGMII failed to start\n");
+ return -EIO;
+ }
+
+ /* Disable digital and SERDES loopback */
+ writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN0);
+ writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN2);
+ writel(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1);
+
+ /* Mask out all the SGMII Interrupt */
+ writel(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+ emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
+
+ return 0;
+}
+
+static void emac_sgmii_reset_prepare(struct emac_adapter *adpt)
+{
+ struct emac_phy *phy = &adpt->phy;
+ u32 val;
+
+ /* Reset PHY */
+ val = readl(phy->base + EMAC_EMAC_WRAPPER_CSR2);
+ writel(((val & ~PHY_RESET) | PHY_RESET), phy->base +
+ EMAC_EMAC_WRAPPER_CSR2);
+ /* Ensure phy-reset command is written to HW before the release cmd */
+ msleep(50);
+ val = readl(phy->base + EMAC_EMAC_WRAPPER_CSR2);
+ writel((val & ~PHY_RESET), phy->base + EMAC_EMAC_WRAPPER_CSR2);
+ /* Ensure phy-reset release command is written to HW before initializing
+ * SGMII
+ */
+ msleep(50);
+}
+
+void emac_sgmii_reset(struct emac_adapter *adpt)
+{
+ int ret;
+
+ clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
+ emac_sgmii_reset_prepare(adpt);
+
+ ret = adpt->phy.initialize(adpt);
+ if (ret)
+ netdev_err(adpt->netdev,
+ "could not reinitialize internal PHY (error=%i)\n",
+ ret);
+
+ clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000);
+}
+
+static int emac_sgmii_acpi_match(struct device *dev, void *data)
+{
+ static const struct acpi_device_id match_table[] = {
+ {
+ .id = "QCOM8071",
+ .driver_data = (kernel_ulong_t)emac_sgmii_init_v2,
+ },
+ {}
+ };
+ const struct acpi_device_id *id = acpi_match_device(match_table, dev);
+ emac_sgmii_initialize *initialize = data;
+
+ if (id)
+ *initialize = (emac_sgmii_initialize)id->driver_data;
+
+ return !!id;
+}
+
+static const struct of_device_id emac_sgmii_dt_match[] = {
+ {
+ .compatible = "qcom,fsm9900-emac-sgmii",
+ .data = emac_sgmii_init_v1,
+ },
+ {
+ .compatible = "qcom,qdf2432-emac-sgmii",
+ .data = emac_sgmii_init_v2,
+ },
+ {}
+};
+
+int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
+{
+ struct platform_device *sgmii_pdev = NULL;
+ struct emac_phy *phy = &adpt->phy;
+ struct resource *res;
+ int ret;
+
+ if (has_acpi_companion(&pdev->dev)) {
+ struct device *dev;
+
+ dev = device_find_child(&pdev->dev, &phy->initialize,
+ emac_sgmii_acpi_match);
+
+ if (!dev) {
+ dev_err(&pdev->dev, "cannot find internal phy node\n");
+ return -ENODEV;
+ }
+
+ sgmii_pdev = to_platform_device(dev);
+ } else {
+ const struct of_device_id *match;
+ struct device_node *np;
+
+ np = of_parse_phandle(pdev->dev.of_node, "internal-phy", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "missing internal-phy property\n");
+ return -ENODEV;
+ }
+
+ sgmii_pdev = of_find_device_by_node(np);
+ if (!sgmii_pdev) {
+ dev_err(&pdev->dev, "invalid internal-phy property\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(emac_sgmii_dt_match, &sgmii_pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "unrecognized internal phy node\n");
+ ret = -ENODEV;
+ goto error_put_device;
+ }
+
+ phy->initialize = (emac_sgmii_initialize)match->data;
+ }
+
+ /* Base address is the first address */
+ res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -EINVAL;
+ goto error_put_device;
+ }
+
+ phy->base = ioremap(res->start, resource_size(res));
+ if (!phy->base) {
+ ret = -ENOMEM;
+ goto error_put_device;
+ }
+
+ /* v2 SGMII has a per-lane digital digital, so parse it if it exists */
+ res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ phy->digital = ioremap(res->start, resource_size(res));
+ if (!phy->digital) {
+ ret = -ENOMEM;
+ goto error_unmap_base;
+ }
+ }
+
+ ret = phy->initialize(adpt);
+ if (ret)
+ goto error;
+
+ /* We've remapped the addresses, so we don't need the device any
+ * more. of_find_device_by_node() says we should release it.
+ */
+ put_device(&sgmii_pdev->dev);
+
+ return 0;
+
+error:
+ if (phy->digital)
+ iounmap(phy->digital);
+error_unmap_base:
+ iounmap(phy->base);
+error_put_device:
+ put_device(&sgmii_pdev->dev);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
new file mode 100644
index 000000000000..ce79212ff403
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _EMAC_SGMII_H_
+#define _EMAC_SGMII_H_
+
+struct emac_adapter;
+struct platform_device;
+
+int emac_sgmii_init_v1(struct emac_adapter *adpt);
+int emac_sgmii_init_v2(struct emac_adapter *adpt);
+int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt);
+void emac_sgmii_reset(struct emac_adapter *adpt);
+
+#endif
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
new file mode 100644
index 000000000000..9bf3b2b82e95
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -0,0 +1,755 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC Gigabit Ethernet Driver */
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include "emac.h"
+#include "emac-mac.h"
+#include "emac-phy.h"
+#include "emac-sgmii.h"
+
+#define EMAC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define EMAC_RRD_SIZE 4
+/* The RRD size if timestamping is enabled: */
+#define EMAC_TS_RRD_SIZE 6
+#define EMAC_TPD_SIZE 4
+#define EMAC_RFD_SIZE 2
+
+#define REG_MAC_RX_STATUS_BIN EMAC_RXMAC_STATC_REG0
+#define REG_MAC_RX_STATUS_END EMAC_RXMAC_STATC_REG22
+#define REG_MAC_TX_STATUS_BIN EMAC_TXMAC_STATC_REG0
+#define REG_MAC_TX_STATUS_END EMAC_TXMAC_STATC_REG24
+
+#define RXQ0_NUM_RFD_PREF_DEF 8
+#define TXQ0_NUM_TPD_PREF_DEF 5
+
+#define EMAC_PREAMBLE_DEF 7
+
+#define DMAR_DLY_CNT_DEF 15
+#define DMAW_DLY_CNT_DEF 4
+
+#define IMR_NORMAL_MASK (\
+ ISR_ERROR |\
+ ISR_GPHY_LINK |\
+ ISR_TX_PKT |\
+ GPHY_WAKEUP_INT)
+
+#define IMR_EXTENDED_MASK (\
+ SW_MAN_INT |\
+ ISR_OVER |\
+ ISR_ERROR |\
+ ISR_GPHY_LINK |\
+ ISR_TX_PKT |\
+ GPHY_WAKEUP_INT)
+
+#define ISR_TX_PKT (\
+ TX_PKT_INT |\
+ TX_PKT_INT1 |\
+ TX_PKT_INT2 |\
+ TX_PKT_INT3)
+
+#define ISR_GPHY_LINK (\
+ GPHY_LINK_UP_INT |\
+ GPHY_LINK_DOWN_INT)
+
+#define ISR_OVER (\
+ RFD0_UR_INT |\
+ RFD1_UR_INT |\
+ RFD2_UR_INT |\
+ RFD3_UR_INT |\
+ RFD4_UR_INT |\
+ RXF_OF_INT |\
+ TXF_UR_INT)
+
+#define ISR_ERROR (\
+ DMAR_TO_INT |\
+ DMAW_TO_INT |\
+ TXQ_TO_INT)
+
+/* in sync with enum emac_clk_id */
+static const char * const emac_clk_name[] = {
+ "axi_clk", "cfg_ahb_clk", "high_speed_clk", "mdio_clk", "tx_clk",
+ "rx_clk", "sys_clk"
+};
+
+void emac_reg_update32(void __iomem *addr, u32 mask, u32 val)
+{
+ u32 data = readl(addr);
+
+ writel(((data & ~mask) | val), addr);
+}
+
+/* reinitialize */
+int emac_reinit_locked(struct emac_adapter *adpt)
+{
+ int ret;
+
+ mutex_lock(&adpt->reset_lock);
+
+ emac_mac_down(adpt);
+ emac_sgmii_reset(adpt);
+ ret = emac_mac_up(adpt);
+
+ mutex_unlock(&adpt->reset_lock);
+
+ return ret;
+}
+
+/* NAPI */
+static int emac_napi_rtx(struct napi_struct *napi, int budget)
+{
+ struct emac_rx_queue *rx_q =
+ container_of(napi, struct emac_rx_queue, napi);
+ struct emac_adapter *adpt = netdev_priv(rx_q->netdev);
+ struct emac_irq *irq = rx_q->irq;
+ int work_done = 0;
+
+ emac_mac_rx_process(adpt, rx_q, &work_done, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ irq->mask |= rx_q->intr;
+ writel(irq->mask, adpt->base + EMAC_INT_MASK);
+ }
+
+ return work_done;
+}
+
+/* Transmit the packet */
+static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb);
+}
+
+irqreturn_t emac_isr(int _irq, void *data)
+{
+ struct emac_irq *irq = data;
+ struct emac_adapter *adpt =
+ container_of(irq, struct emac_adapter, irq);
+ struct emac_rx_queue *rx_q = &adpt->rx_q;
+ u32 isr, status;
+
+ /* disable the interrupt */
+ writel(0, adpt->base + EMAC_INT_MASK);
+
+ isr = readl_relaxed(adpt->base + EMAC_INT_STATUS);
+
+ status = isr & irq->mask;
+ if (status == 0)
+ goto exit;
+
+ if (status & ISR_ERROR) {
+ netif_warn(adpt, intr, adpt->netdev,
+ "warning: error irq status 0x%lx\n",
+ status & ISR_ERROR);
+ /* reset MAC */
+ schedule_work(&adpt->work_thread);
+ }
+
+ /* Schedule the napi for receive queue with interrupt
+ * status bit set
+ */
+ if (status & rx_q->intr) {
+ if (napi_schedule_prep(&rx_q->napi)) {
+ irq->mask &= ~rx_q->intr;
+ __napi_schedule(&rx_q->napi);
+ }
+ }
+
+ if (status & TX_PKT_INT)
+ emac_mac_tx_process(adpt, &adpt->tx_q);
+
+ if (status & ISR_OVER)
+ net_warn_ratelimited("warning: TX/RX overflow\n");
+
+ /* link event */
+ if (status & ISR_GPHY_LINK)
+ phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
+
+exit:
+ /* enable the interrupt */
+ writel(irq->mask, adpt->base + EMAC_INT_MASK);
+
+ return IRQ_HANDLED;
+}
+
+/* Configure VLAN tag strip/insert feature */
+static int emac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ /* We only need to reprogram the hardware if the VLAN tag features
+ * have changed, and if it's already running.
+ */
+ if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)))
+ return 0;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ /* emac_mac_mode_config() uses netdev->features to configure the EMAC,
+ * so make sure it's set first.
+ */
+ netdev->features = features;
+
+ return emac_reinit_locked(adpt);
+}
+
+/* Configure Multicast and Promiscuous modes */
+static void emac_rx_mode_set(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+
+ emac_mac_mode_config(adpt);
+
+ /* update multicast address filtering */
+ emac_mac_multicast_addr_clear(adpt);
+ netdev_for_each_mc_addr(ha, netdev)
+ emac_mac_multicast_addr_set(adpt, ha->addr);
+}
+
+/* Change the Maximum Transfer Unit (MTU) */
+static int emac_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ unsigned int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ if ((max_frame < EMAC_MIN_ETH_FRAME_SIZE) ||
+ (max_frame > EMAC_MAX_ETH_FRAME_SIZE)) {
+ netdev_err(adpt->netdev, "error: invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ netif_info(adpt, hw, adpt->netdev,
+ "changing MTU from %d to %d\n", netdev->mtu,
+ new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ return emac_reinit_locked(adpt);
+
+ return 0;
+}
+
+/* Called when the network interface is made active */
+static int emac_open(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ int ret;
+
+ /* allocate rx/tx dma buffer & descriptors */
+ ret = emac_mac_rx_tx_rings_alloc_all(adpt);
+ if (ret) {
+ netdev_err(adpt->netdev, "error allocating rx/tx rings\n");
+ return ret;
+ }
+
+ ret = emac_mac_up(adpt);
+ if (ret) {
+ emac_mac_rx_tx_rings_free_all(adpt);
+ return ret;
+ }
+
+ emac_mac_start(adpt);
+
+ return 0;
+}
+
+/* Called when the network interface is disabled */
+static int emac_close(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ mutex_lock(&adpt->reset_lock);
+
+ emac_mac_down(adpt);
+ emac_mac_rx_tx_rings_free_all(adpt);
+
+ mutex_unlock(&adpt->reset_lock);
+
+ return 0;
+}
+
+/* Respond to a TX hang */
+static void emac_tx_timeout(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ schedule_work(&adpt->work_thread);
+}
+
+/* IOCTL support for the interface */
+static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!netdev->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+/* Provide network statistics info for the interface */
+static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *net_stats)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ unsigned int addr = REG_MAC_RX_STATUS_BIN;
+ struct emac_stats *stats = &adpt->stats;
+ u64 *stats_itr = &adpt->stats.rx_ok;
+ u32 val;
+
+ spin_lock(&stats->lock);
+
+ while (addr <= REG_MAC_RX_STATUS_END) {
+ val = readl_relaxed(adpt->base + addr);
+ *stats_itr += val;
+ stats_itr++;
+ addr += sizeof(u32);
+ }
+
+ /* additional rx status */
+ val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG23);
+ adpt->stats.rx_crc_align += val;
+ val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG24);
+ adpt->stats.rx_jabbers += val;
+
+ /* update tx status */
+ addr = REG_MAC_TX_STATUS_BIN;
+ stats_itr = &adpt->stats.tx_ok;
+
+ while (addr <= REG_MAC_TX_STATUS_END) {
+ val = readl_relaxed(adpt->base + addr);
+ *stats_itr += val;
+ ++stats_itr;
+ addr += sizeof(u32);
+ }
+
+ /* additional tx status */
+ val = readl_relaxed(adpt->base + EMAC_TXMAC_STATC_REG25);
+ adpt->stats.tx_col += val;
+
+ /* return parsed statistics */
+ net_stats->rx_packets = stats->rx_ok;
+ net_stats->tx_packets = stats->tx_ok;
+ net_stats->rx_bytes = stats->rx_byte_cnt;
+ net_stats->tx_bytes = stats->tx_byte_cnt;
+ net_stats->multicast = stats->rx_mcast;
+ net_stats->collisions = stats->tx_1_col + stats->tx_2_col * 2 +
+ stats->tx_late_col + stats->tx_abort_col;
+
+ net_stats->rx_errors = stats->rx_frag + stats->rx_fcs_err +
+ stats->rx_len_err + stats->rx_sz_ov +
+ stats->rx_align_err;
+ net_stats->rx_fifo_errors = stats->rx_rxf_ov;
+ net_stats->rx_length_errors = stats->rx_len_err;
+ net_stats->rx_crc_errors = stats->rx_fcs_err;
+ net_stats->rx_frame_errors = stats->rx_align_err;
+ net_stats->rx_over_errors = stats->rx_rxf_ov;
+ net_stats->rx_missed_errors = stats->rx_rxf_ov;
+
+ net_stats->tx_errors = stats->tx_late_col + stats->tx_abort_col +
+ stats->tx_underrun + stats->tx_trunc;
+ net_stats->tx_fifo_errors = stats->tx_underrun;
+ net_stats->tx_aborted_errors = stats->tx_abort_col;
+ net_stats->tx_window_errors = stats->tx_late_col;
+
+ spin_unlock(&stats->lock);
+
+ return net_stats;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_open,
+ .ndo_stop = emac_close,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_start_xmit = emac_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = emac_change_mtu,
+ .ndo_do_ioctl = emac_ioctl,
+ .ndo_tx_timeout = emac_tx_timeout,
+ .ndo_get_stats64 = emac_get_stats64,
+ .ndo_set_features = emac_set_features,
+ .ndo_set_rx_mode = emac_rx_mode_set,
+};
+
+/* Watchdog task routine, called to reinitialize the EMAC */
+static void emac_work_thread(struct work_struct *work)
+{
+ struct emac_adapter *adpt =
+ container_of(work, struct emac_adapter, work_thread);
+
+ emac_reinit_locked(adpt);
+}
+
+/* Initialize various data structures */
+static void emac_init_adapter(struct emac_adapter *adpt)
+{
+ u32 reg;
+
+ /* descriptors */
+ adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS;
+ adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS;
+
+ /* dma */
+ adpt->dma_order = emac_dma_ord_out;
+ adpt->dmar_block = emac_dma_req_4096;
+ adpt->dmaw_block = emac_dma_req_128;
+ adpt->dmar_dly_cnt = DMAR_DLY_CNT_DEF;
+ adpt->dmaw_dly_cnt = DMAW_DLY_CNT_DEF;
+ adpt->tpd_burst = TXQ0_NUM_TPD_PREF_DEF;
+ adpt->rfd_burst = RXQ0_NUM_RFD_PREF_DEF;
+
+ /* irq moderator */
+ reg = ((EMAC_DEF_RX_IRQ_MOD >> 1) << IRQ_MODERATOR2_INIT_SHFT) |
+ ((EMAC_DEF_TX_IRQ_MOD >> 1) << IRQ_MODERATOR_INIT_SHFT);
+ adpt->irq_mod = reg;
+
+ /* others */
+ adpt->preamble = EMAC_PREAMBLE_DEF;
+}
+
+/* Get the clock */
+static int emac_clks_get(struct platform_device *pdev,
+ struct emac_adapter *adpt)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMAC_CLK_CNT; i++) {
+ struct clk *clk = devm_clk_get(&pdev->dev, emac_clk_name[i]);
+
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev,
+ "could not claim clock %s (error=%li)\n",
+ emac_clk_name[i], PTR_ERR(clk));
+
+ return PTR_ERR(clk);
+ }
+
+ adpt->clk[i] = clk;
+ }
+
+ return 0;
+}
+
+/* Initialize clocks */
+static int emac_clks_phase1_init(struct platform_device *pdev,
+ struct emac_adapter *adpt)
+{
+ int ret;
+
+ ret = emac_clks_get(pdev, adpt);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_AXI]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
+ if (ret)
+ return ret;
+
+ return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
+}
+
+/* Enable clocks; needs emac_clks_phase1_init to be called before */
+static int emac_clks_phase2_init(struct platform_device *pdev,
+ struct emac_adapter *adpt)
+{
+ int ret;
+
+ ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_TX]);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(adpt->clk[EMAC_CLK_MDIO], 25000000);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_MDIO]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_RX]);
+ if (ret)
+ return ret;
+
+ return clk_prepare_enable(adpt->clk[EMAC_CLK_SYS]);
+}
+
+static void emac_clks_teardown(struct emac_adapter *adpt)
+{
+
+ unsigned int i;
+
+ for (i = 0; i < EMAC_CLK_CNT; i++)
+ clk_disable_unprepare(adpt->clk[i]);
+}
+
+/* Get the resources */
+static int emac_probe_resources(struct platform_device *pdev,
+ struct emac_adapter *adpt)
+{
+ struct net_device *netdev = adpt->netdev;
+ struct resource *res;
+ char maddr[ETH_ALEN];
+ int ret = 0;
+
+ /* get mac address */
+ if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN))
+ ether_addr_copy(netdev->dev_addr, maddr);
+ else
+ eth_hw_addr_random(netdev);
+
+ /* Core 0 interrupt */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "error: missing core0 irq resource (error=%i)\n", ret);
+ return ret;
+ }
+ adpt->irq.irq = ret;
+
+ /* base register address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adpt->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(adpt->base))
+ return PTR_ERR(adpt->base);
+
+ /* CSR register address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ adpt->csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(adpt->csr))
+ return PTR_ERR(adpt->csr);
+
+ netdev->base_addr = (unsigned long)adpt->base;
+
+ return 0;
+}
+
+static const struct of_device_id emac_dt_match[] = {
+ {
+ .compatible = "qcom,fsm9900-emac",
+ },
+ {}
+};
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id emac_acpi_match[] = {
+ {
+ .id = "QCOM8070",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, emac_acpi_match);
+#endif
+
+static int emac_probe(struct platform_device *pdev)
+{
+ struct net_device *netdev;
+ struct emac_adapter *adpt;
+ struct emac_phy *phy;
+ u16 devid, revid;
+ u32 reg;
+ int ret;
+
+ /* The EMAC itself is capable of 64-bit DMA, so try that first. */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ /* Some platforms may restrict the EMAC's address bus to less
+ * then the size of DDR. In this case, we need to try a
+ * smaller mask. We could try every possible smaller mask,
+ * but that's overkill. Instead, just fall to 32-bit, which
+ * should always work.
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "could not set DMA mask\n");
+ return ret;
+ }
+ }
+
+ netdev = alloc_etherdev(sizeof(struct emac_adapter));
+ if (!netdev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adpt = netdev_priv(netdev);
+ adpt->netdev = netdev;
+ adpt->msg_enable = EMAC_MSG_DEFAULT;
+
+ phy = &adpt->phy;
+
+ mutex_init(&adpt->reset_lock);
+ spin_lock_init(&adpt->stats.lock);
+
+ adpt->irq.mask = RX_PKT_INT0 | IMR_NORMAL_MASK;
+
+ ret = emac_probe_resources(pdev, adpt);
+ if (ret)
+ goto err_undo_netdev;
+
+ /* initialize clocks */
+ ret = emac_clks_phase1_init(pdev, adpt);
+ if (ret) {
+ dev_err(&pdev->dev, "could not initialize clocks\n");
+ goto err_undo_netdev;
+ }
+
+ netdev->watchdog_timeo = EMAC_WATCHDOG_TIME;
+ netdev->irq = adpt->irq.irq;
+
+ adpt->rrd_size = EMAC_RRD_SIZE;
+ adpt->tpd_size = EMAC_TPD_SIZE;
+ adpt->rfd_size = EMAC_RFD_SIZE;
+
+ netdev->netdev_ops = &emac_netdev_ops;
+
+ emac_init_adapter(adpt);
+
+ /* init external phy */
+ ret = emac_phy_config(pdev, adpt);
+ if (ret)
+ goto err_undo_clocks;
+
+ /* init internal sgmii phy */
+ ret = emac_sgmii_config(pdev, adpt);
+ if (ret)
+ goto err_undo_mdiobus;
+
+ /* enable clocks */
+ ret = emac_clks_phase2_init(pdev, adpt);
+ if (ret) {
+ dev_err(&pdev->dev, "could not initialize clocks\n");
+ goto err_undo_mdiobus;
+ }
+
+ emac_mac_reset(adpt);
+
+ /* set hw features */
+ netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_features = netdev->features;
+
+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ INIT_WORK(&adpt->work_thread, emac_work_thread);
+
+ /* Initialize queues */
+ emac_mac_rx_tx_ring_init_all(pdev, adpt);
+
+ netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx,
+ NAPI_POLL_WEIGHT);
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register net device\n");
+ goto err_undo_napi;
+ }
+
+ reg = readl_relaxed(adpt->base + EMAC_DMA_MAS_CTRL);
+ devid = (reg & DEV_ID_NUM_BMSK) >> DEV_ID_NUM_SHFT;
+ revid = (reg & DEV_REV_NUM_BMSK) >> DEV_REV_NUM_SHFT;
+ reg = readl_relaxed(adpt->base + EMAC_CORE_HW_VERSION);
+
+ netif_info(adpt, probe, netdev,
+ "hardware id %d.%d, hardware version %d.%d.%d\n",
+ devid, revid,
+ (reg & MAJOR_BMSK) >> MAJOR_SHFT,
+ (reg & MINOR_BMSK) >> MINOR_SHFT,
+ (reg & STEP_BMSK) >> STEP_SHFT);
+
+ return 0;
+
+err_undo_napi:
+ netif_napi_del(&adpt->rx_q.napi);
+err_undo_mdiobus:
+ mdiobus_unregister(adpt->mii_bus);
+err_undo_clocks:
+ emac_clks_teardown(adpt);
+err_undo_netdev:
+ free_netdev(netdev);
+
+ return ret;
+}
+
+static int emac_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+ netif_napi_del(&adpt->rx_q.napi);
+
+ emac_clks_teardown(adpt);
+
+ mdiobus_unregister(adpt->mii_bus);
+ free_netdev(netdev);
+
+ if (adpt->phy.digital)
+ iounmap(adpt->phy.digital);
+ iounmap(adpt->phy.base);
+
+ return 0;
+}
+
+static struct platform_driver emac_platform_driver = {
+ .probe = emac_probe,
+ .remove = emac_remove,
+ .driver = {
+ .name = "qcom-emac",
+ .of_match_table = emac_dt_match,
+ .acpi_match_table = ACPI_PTR(emac_acpi_match),
+ },
+};
+
+module_platform_driver(emac_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-emac");
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h
new file mode 100644
index 000000000000..0c76e6cb8c9e
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/emac/emac.h
@@ -0,0 +1,335 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _EMAC_H_
+#define _EMAC_H_
+
+#include <linux/irqreturn.h>
+#include <linux/netdevice.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include "emac-mac.h"
+#include "emac-phy.h"
+
+/* EMAC base register offsets */
+#define EMAC_DMA_MAS_CTRL 0x001400
+#define EMAC_IRQ_MOD_TIM_INIT 0x001408
+#define EMAC_BLK_IDLE_STS 0x00140c
+#define EMAC_PHY_LINK_DELAY 0x00141c
+#define EMAC_SYS_ALIV_CTRL 0x001434
+#define EMAC_MAC_IPGIFG_CTRL 0x001484
+#define EMAC_MAC_STA_ADDR0 0x001488
+#define EMAC_MAC_STA_ADDR1 0x00148c
+#define EMAC_HASH_TAB_REG0 0x001490
+#define EMAC_HASH_TAB_REG1 0x001494
+#define EMAC_MAC_HALF_DPLX_CTRL 0x001498
+#define EMAC_MAX_FRAM_LEN_CTRL 0x00149c
+#define EMAC_INT_STATUS 0x001600
+#define EMAC_INT_MASK 0x001604
+#define EMAC_RXMAC_STATC_REG0 0x001700
+#define EMAC_RXMAC_STATC_REG22 0x001758
+#define EMAC_TXMAC_STATC_REG0 0x001760
+#define EMAC_TXMAC_STATC_REG24 0x0017c0
+#define EMAC_CORE_HW_VERSION 0x001974
+#define EMAC_IDT_TABLE0 0x001b00
+#define EMAC_RXMAC_STATC_REG23 0x001bc8
+#define EMAC_RXMAC_STATC_REG24 0x001bcc
+#define EMAC_TXMAC_STATC_REG25 0x001bd0
+#define EMAC_INT1_MASK 0x001bf0
+#define EMAC_INT1_STATUS 0x001bf4
+#define EMAC_INT2_MASK 0x001bf8
+#define EMAC_INT2_STATUS 0x001bfc
+#define EMAC_INT3_MASK 0x001c00
+#define EMAC_INT3_STATUS 0x001c04
+
+/* EMAC_DMA_MAS_CTRL */
+#define DEV_ID_NUM_BMSK 0x7f000000
+#define DEV_ID_NUM_SHFT 24
+#define DEV_REV_NUM_BMSK 0xff0000
+#define DEV_REV_NUM_SHFT 16
+#define INT_RD_CLR_EN 0x4000
+#define IRQ_MODERATOR2_EN 0x800
+#define IRQ_MODERATOR_EN 0x400
+#define LPW_CLK_SEL 0x80
+#define LPW_STATE 0x20
+#define LPW_MODE 0x10
+#define SOFT_RST 0x1
+
+/* EMAC_IRQ_MOD_TIM_INIT */
+#define IRQ_MODERATOR2_INIT_BMSK 0xffff0000
+#define IRQ_MODERATOR2_INIT_SHFT 16
+#define IRQ_MODERATOR_INIT_BMSK 0xffff
+#define IRQ_MODERATOR_INIT_SHFT 0
+
+/* EMAC_INT_STATUS */
+#define DIS_INT BIT(31)
+#define PTP_INT BIT(30)
+#define RFD4_UR_INT BIT(29)
+#define TX_PKT_INT3 BIT(26)
+#define TX_PKT_INT2 BIT(25)
+#define TX_PKT_INT1 BIT(24)
+#define RX_PKT_INT3 BIT(19)
+#define RX_PKT_INT2 BIT(18)
+#define RX_PKT_INT1 BIT(17)
+#define RX_PKT_INT0 BIT(16)
+#define TX_PKT_INT BIT(15)
+#define TXQ_TO_INT BIT(14)
+#define GPHY_WAKEUP_INT BIT(13)
+#define GPHY_LINK_DOWN_INT BIT(12)
+#define GPHY_LINK_UP_INT BIT(11)
+#define DMAW_TO_INT BIT(10)
+#define DMAR_TO_INT BIT(9)
+#define TXF_UR_INT BIT(8)
+#define RFD3_UR_INT BIT(7)
+#define RFD2_UR_INT BIT(6)
+#define RFD1_UR_INT BIT(5)
+#define RFD0_UR_INT BIT(4)
+#define RXF_OF_INT BIT(3)
+#define SW_MAN_INT BIT(2)
+
+/* EMAC_MAILBOX_6 */
+#define RFD2_PROC_IDX_BMSK 0xfff0000
+#define RFD2_PROC_IDX_SHFT 16
+#define RFD2_PROD_IDX_BMSK 0xfff
+#define RFD2_PROD_IDX_SHFT 0
+
+/* EMAC_CORE_HW_VERSION */
+#define MAJOR_BMSK 0xf0000000
+#define MAJOR_SHFT 28
+#define MINOR_BMSK 0xfff0000
+#define MINOR_SHFT 16
+#define STEP_BMSK 0xffff
+#define STEP_SHFT 0
+
+/* EMAC_EMAC_WRAPPER_CSR1 */
+#define TX_INDX_FIFO_SYNC_RST BIT(23)
+#define TX_TS_FIFO_SYNC_RST BIT(22)
+#define RX_TS_FIFO2_SYNC_RST BIT(21)
+#define RX_TS_FIFO1_SYNC_RST BIT(20)
+#define TX_TS_ENABLE BIT(16)
+#define DIS_1588_CLKS BIT(11)
+#define FREQ_MODE BIT(9)
+#define ENABLE_RRD_TIMESTAMP BIT(3)
+
+/* EMAC_EMAC_WRAPPER_CSR2 */
+#define HDRIVE_BMSK 0x3000
+#define HDRIVE_SHFT 12
+#define SLB_EN BIT(9)
+#define PLB_EN BIT(8)
+#define WOL_EN BIT(3)
+#define PHY_RESET BIT(0)
+
+#define EMAC_DEV_ID 0x0040
+
+/* SGMII v2 per lane registers */
+#define SGMII_LN_RSM_START 0x029C
+
+/* SGMII v2 PHY common registers */
+#define SGMII_PHY_CMN_CTRL 0x0408
+#define SGMII_PHY_CMN_RESET_CTRL 0x0410
+
+/* SGMII v2 PHY registers per lane */
+#define SGMII_PHY_LN_OFFSET 0x0400
+#define SGMII_PHY_LN_LANE_STATUS 0x00DC
+#define SGMII_PHY_LN_BIST_GEN0 0x008C
+#define SGMII_PHY_LN_BIST_GEN1 0x0090
+#define SGMII_PHY_LN_BIST_GEN2 0x0094
+#define SGMII_PHY_LN_BIST_GEN3 0x0098
+#define SGMII_PHY_LN_CDR_CTRL1 0x005C
+
+enum emac_clk_id {
+ EMAC_CLK_AXI,
+ EMAC_CLK_CFG_AHB,
+ EMAC_CLK_HIGH_SPEED,
+ EMAC_CLK_MDIO,
+ EMAC_CLK_TX,
+ EMAC_CLK_RX,
+ EMAC_CLK_SYS,
+ EMAC_CLK_CNT
+};
+
+#define EMAC_LINK_SPEED_UNKNOWN 0x0
+#define EMAC_LINK_SPEED_10_HALF BIT(0)
+#define EMAC_LINK_SPEED_10_FULL BIT(1)
+#define EMAC_LINK_SPEED_100_HALF BIT(2)
+#define EMAC_LINK_SPEED_100_FULL BIT(3)
+#define EMAC_LINK_SPEED_1GB_FULL BIT(5)
+
+#define EMAC_MAX_SETUP_LNK_CYCLE 100
+
+/* Wake On Lan */
+#define EMAC_WOL_PHY 0x00000001 /* PHY Status Change */
+#define EMAC_WOL_MAGIC 0x00000002 /* Magic Packet */
+
+struct emac_stats {
+ /* rx */
+ u64 rx_ok; /* good packets */
+ u64 rx_bcast; /* good broadcast packets */
+ u64 rx_mcast; /* good multicast packets */
+ u64 rx_pause; /* pause packet */
+ u64 rx_ctrl; /* control packets other than pause frame. */
+ u64 rx_fcs_err; /* packets with bad FCS. */
+ u64 rx_len_err; /* packets with length mismatch */
+ u64 rx_byte_cnt; /* good bytes count (without FCS) */
+ u64 rx_runt; /* runt packets */
+ u64 rx_frag; /* fragment count */
+ u64 rx_sz_64; /* packets that are 64 bytes */
+ u64 rx_sz_65_127; /* packets that are 65-127 bytes */
+ u64 rx_sz_128_255; /* packets that are 128-255 bytes */
+ u64 rx_sz_256_511; /* packets that are 256-511 bytes */
+ u64 rx_sz_512_1023; /* packets that are 512-1023 bytes */
+ u64 rx_sz_1024_1518; /* packets that are 1024-1518 bytes */
+ u64 rx_sz_1519_max; /* packets that are 1519-MTU bytes*/
+ u64 rx_sz_ov; /* packets that are >MTU bytes (truncated) */
+ u64 rx_rxf_ov; /* packets dropped due to RX FIFO overflow */
+ u64 rx_align_err; /* alignment errors */
+ u64 rx_bcast_byte_cnt; /* broadcast packets byte count (without FCS) */
+ u64 rx_mcast_byte_cnt; /* multicast packets byte count (without FCS) */
+ u64 rx_err_addr; /* packets dropped due to address filtering */
+ u64 rx_crc_align; /* CRC align errors */
+ u64 rx_jabbers; /* jabbers */
+
+ /* tx */
+ u64 tx_ok; /* good packets */
+ u64 tx_bcast; /* good broadcast packets */
+ u64 tx_mcast; /* good multicast packets */
+ u64 tx_pause; /* pause packets */
+ u64 tx_exc_defer; /* packets with excessive deferral */
+ u64 tx_ctrl; /* control packets other than pause frame */
+ u64 tx_defer; /* packets that are deferred. */
+ u64 tx_byte_cnt; /* good bytes count (without FCS) */
+ u64 tx_sz_64; /* packets that are 64 bytes */
+ u64 tx_sz_65_127; /* packets that are 65-127 bytes */
+ u64 tx_sz_128_255; /* packets that are 128-255 bytes */
+ u64 tx_sz_256_511; /* packets that are 256-511 bytes */
+ u64 tx_sz_512_1023; /* packets that are 512-1023 bytes */
+ u64 tx_sz_1024_1518; /* packets that are 1024-1518 bytes */
+ u64 tx_sz_1519_max; /* packets that are 1519-MTU bytes */
+ u64 tx_1_col; /* packets single prior collision */
+ u64 tx_2_col; /* packets with multiple prior collisions */
+ u64 tx_late_col; /* packets with late collisions */
+ u64 tx_abort_col; /* packets aborted due to excess collisions */
+ u64 tx_underrun; /* packets aborted due to FIFO underrun */
+ u64 tx_rd_eop; /* count of reads beyond EOP */
+ u64 tx_len_err; /* packets with length mismatch */
+ u64 tx_trunc; /* packets truncated due to size >MTU */
+ u64 tx_bcast_byte; /* broadcast packets byte count (without FCS) */
+ u64 tx_mcast_byte; /* multicast packets byte count (without FCS) */
+ u64 tx_col; /* collisions */
+
+ spinlock_t lock; /* prevent multiple simultaneous readers */
+};
+
+/* RSS hstype Definitions */
+#define EMAC_RSS_HSTYP_IPV4_EN 0x00000001
+#define EMAC_RSS_HSTYP_TCP4_EN 0x00000002
+#define EMAC_RSS_HSTYP_IPV6_EN 0x00000004
+#define EMAC_RSS_HSTYP_TCP6_EN 0x00000008
+#define EMAC_RSS_HSTYP_ALL_EN (\
+ EMAC_RSS_HSTYP_IPV4_EN |\
+ EMAC_RSS_HSTYP_TCP4_EN |\
+ EMAC_RSS_HSTYP_IPV6_EN |\
+ EMAC_RSS_HSTYP_TCP6_EN)
+
+#define EMAC_VLAN_TO_TAG(_vlan, _tag) \
+ (_tag = ((((_vlan) >> 8) & 0xFF) | (((_vlan) & 0xFF) << 8)))
+
+#define EMAC_TAG_TO_VLAN(_tag, _vlan) \
+ (_vlan = ((((_tag) >> 8) & 0xFF) | (((_tag) & 0xFF) << 8)))
+
+#define EMAC_DEF_RX_BUF_SIZE 1536
+#define EMAC_MAX_JUMBO_PKT_SIZE (9 * 1024)
+#define EMAC_MAX_TX_OFFLOAD_THRESH (9 * 1024)
+
+#define EMAC_MAX_ETH_FRAME_SIZE EMAC_MAX_JUMBO_PKT_SIZE
+#define EMAC_MIN_ETH_FRAME_SIZE 68
+
+#define EMAC_DEF_TX_QUEUES 1
+#define EMAC_DEF_RX_QUEUES 1
+
+#define EMAC_MIN_TX_DESCS 128
+#define EMAC_MIN_RX_DESCS 128
+
+#define EMAC_MAX_TX_DESCS 16383
+#define EMAC_MAX_RX_DESCS 2047
+
+#define EMAC_DEF_TX_DESCS 512
+#define EMAC_DEF_RX_DESCS 256
+
+#define EMAC_DEF_RX_IRQ_MOD 250
+#define EMAC_DEF_TX_IRQ_MOD 250
+
+#define EMAC_WATCHDOG_TIME (5 * HZ)
+
+/* by default check link every 4 seconds */
+#define EMAC_TRY_LINK_TIMEOUT (4 * HZ)
+
+/* emac_irq per-device (per-adapter) irq properties.
+ * @irq: irq number.
+ * @mask mask to use over status register.
+ */
+struct emac_irq {
+ unsigned int irq;
+ u32 mask;
+};
+
+/* The device's main data structure */
+struct emac_adapter {
+ struct net_device *netdev;
+ struct mii_bus *mii_bus;
+ struct phy_device *phydev;
+
+ void __iomem *base;
+ void __iomem *csr;
+
+ struct emac_phy phy;
+ struct emac_stats stats;
+
+ struct emac_irq irq;
+ struct clk *clk[EMAC_CLK_CNT];
+
+ /* All Descriptor memory */
+ struct emac_ring_header ring_header;
+ struct emac_tx_queue tx_q;
+ struct emac_rx_queue rx_q;
+ unsigned int tx_desc_cnt;
+ unsigned int rx_desc_cnt;
+ unsigned int rrd_size; /* in quad words */
+ unsigned int rfd_size; /* in quad words */
+ unsigned int tpd_size; /* in quad words */
+
+ unsigned int rxbuf_size;
+
+ /* Ring parameter */
+ u8 tpd_burst;
+ u8 rfd_burst;
+ unsigned int dmaw_dly_cnt;
+ unsigned int dmar_dly_cnt;
+ enum emac_dma_req_block dmar_block;
+ enum emac_dma_req_block dmaw_block;
+ enum emac_dma_order dma_order;
+
+ u32 irq_mod;
+ u32 preamble;
+
+ struct work_struct work_thread;
+
+ u16 msg_enable;
+
+ struct mutex reset_lock;
+};
+
+int emac_reinit_locked(struct emac_adapter *adpt);
+void emac_reg_update32(void __iomem *addr, u32 mask, u32 val);
+irqreturn_t emac_isr(int irq, void *data);
+
+#endif /* _EMAC_H_ */
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index cb29ee24cf1b..5ef5d728c250 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1062,14 +1062,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* this should always be supported */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "32-bit PCI DMA addresses"
- "not supported by the card\n");
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
goto err_out_disable_dev;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "32-bit PCI DMA addresses"
- "not supported by the card\n");
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
goto err_out_disable_dev;
}
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 4f132cf177cd..85ec447c2d18 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -27,7 +27,7 @@ config SH_ETH
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740, R8A777x and R8A779x.
+ R8A7740, R8A774x, R8A777x and R8A779x.
config RAVB
tristate "Renesas Ethernet AVB support"
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 4e5d5e953e15..f1109661a533 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1011,7 +1011,6 @@ struct ravb_private {
struct work_struct work;
/* MII transceiver section. */
struct mii_bus *mii_bus; /* MDIO bus control */
- struct phy_device *phydev; /* PHY device control */
int link;
phy_interface_t phy_interface;
int msg_enable;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 1e1cc0fad17f..630536bc72f9 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -942,7 +942,7 @@ out:
static void ravb_adjust_link(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = ndev->phydev;
bool new_state = false;
if (phydev->link) {
@@ -1032,48 +1032,47 @@ static int ravb_phy_init(struct net_device *ndev)
phy_attached_info(phydev);
- priv->phydev = phydev;
-
return 0;
}
/* PHY control start function */
static int ravb_phy_start(struct net_device *ndev)
{
- struct ravb_private *priv = netdev_priv(ndev);
int error;
error = ravb_phy_init(ndev);
if (error)
return error;
- phy_start(priv->phydev);
+ phy_start(ndev->phydev);
return 0;
}
-static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int ravb_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct ravb_private *priv = netdev_priv(ndev);
int error = -ENODEV;
unsigned long flags;
- if (priv->phydev) {
+ if (ndev->phydev) {
spin_lock_irqsave(&priv->lock, flags);
- error = phy_ethtool_gset(priv->phydev, ecmd);
+ error = phy_ethtool_ksettings_get(ndev->phydev, cmd);
spin_unlock_irqrestore(&priv->lock, flags);
}
return error;
}
-static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int ravb_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned long flags;
int error;
- if (!priv->phydev)
+ if (!ndev->phydev)
return -ENODEV;
spin_lock_irqsave(&priv->lock, flags);
@@ -1081,11 +1080,11 @@ static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
/* Disable TX and RX */
ravb_rcv_snd_disable(ndev);
- error = phy_ethtool_sset(priv->phydev, ecmd);
+ error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
if (error)
goto error_exit;
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
priv->duplex = 1;
else
priv->duplex = 0;
@@ -1110,9 +1109,9 @@ static int ravb_nway_reset(struct net_device *ndev)
int error = -ENODEV;
unsigned long flags;
- if (priv->phydev) {
+ if (ndev->phydev) {
spin_lock_irqsave(&priv->lock, flags);
- error = phy_start_aneg(priv->phydev);
+ error = phy_start_aneg(ndev->phydev);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1309,8 +1308,6 @@ static int ravb_get_ts_info(struct net_device *ndev,
}
static const struct ethtool_ops ravb_ethtool_ops = {
- .get_settings = ravb_get_settings,
- .set_settings = ravb_set_settings,
.nway_reset = ravb_nway_reset,
.get_msglevel = ravb_get_msglevel,
.set_msglevel = ravb_set_msglevel,
@@ -1321,6 +1318,8 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.get_ringparam = ravb_get_ringparam,
.set_ringparam = ravb_set_ringparam,
.get_ts_info = ravb_get_ts_info,
+ .get_link_ksettings = ravb_get_link_ksettings,
+ .set_link_ksettings = ravb_set_link_ksettings,
};
static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
@@ -1661,10 +1660,9 @@ static int ravb_close(struct net_device *ndev)
}
/* PHY disconnect */
- if (priv->phydev) {
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
}
if (priv->chip_id != RCAR_GEN2) {
@@ -1753,8 +1751,7 @@ static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
/* ioctl to device function */
static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
- struct ravb_private *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -1876,6 +1873,20 @@ static int ravb_set_gti(struct net_device *ndev)
return 0;
}
+static void ravb_set_config_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ if (priv->chip_id == RCAR_GEN2) {
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
+ /* Set CSEL value */
+ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
+ } else {
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
+ CCC_GAC | CCC_CSEL_HPB);
+ }
+}
+
static int ravb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1978,14 +1989,7 @@ static int ravb_probe(struct platform_device *pdev)
ndev->ethtool_ops = &ravb_ethtool_ops;
/* Set AVB config mode */
- if (chip_id == RCAR_GEN2) {
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
- /* Set CSEL value */
- ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
- } else {
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
- CCC_GAC | CCC_CSEL_HPB);
- }
+ ravb_set_config_mode(ndev);
/* Set GTI value */
error = ravb_set_gti(ndev);
@@ -2096,8 +2100,55 @@ static int ravb_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int ravb_runtime_nop(struct device *dev)
+static int __maybe_unused ravb_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = ravb_close(ndev);
+ }
+
+ return ret;
+}
+
+static int __maybe_unused ravb_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ /* All register have been reset to default values.
+ * Restore all registers which where setup at probe time and
+ * reopen device if it was running before system suspended.
+ */
+
+ /* Set AVB config mode */
+ ravb_set_config_mode(ndev);
+
+ /* Set GTI value */
+ ret = ravb_set_gti(ndev);
+ if (ret)
+ return ret;
+
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+
+ /* Restore descriptor base address table */
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
+
+ if (netif_running(ndev)) {
+ ret = ravb_open(ndev);
+ if (ret < 0)
+ return ret;
+ netif_device_attach(ndev);
+ }
+
+ return ret;
+}
+
+static int __maybe_unused ravb_runtime_nop(struct device *dev)
{
/* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
@@ -2110,20 +2161,16 @@ static int ravb_runtime_nop(struct device *dev)
}
static const struct dev_pm_ops ravb_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
};
-#define RAVB_PM_OPS (&ravb_dev_pm_ops)
-#else
-#define RAVB_PM_OPS NULL
-#endif
-
static struct platform_driver ravb_driver = {
.probe = ravb_probe,
.remove = ravb_remove,
.driver = {
.name = "ravb",
- .pm = RAVB_PM_OPS,
+ .pm = &ravb_dev_pm_ops,
.of_match_table = ravb_match_table,
},
};
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 054e795df90f..05b0dc55de77 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1728,7 +1728,7 @@ out:
static void sh_eth_adjust_link(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- struct phy_device *phydev = mdp->phydev;
+ struct phy_device *phydev = ndev->phydev;
int new_state = 0;
if (phydev->link) {
@@ -1805,51 +1805,48 @@ static int sh_eth_phy_init(struct net_device *ndev)
phy_attached_info(phydev);
- mdp->phydev = phydev;
-
return 0;
}
/* PHY control start function */
static int sh_eth_phy_start(struct net_device *ndev)
{
- struct sh_eth_private *mdp = netdev_priv(ndev);
int ret;
ret = sh_eth_phy_init(ndev);
if (ret)
return ret;
- phy_start(mdp->phydev);
+ phy_start(ndev->phydev);
return 0;
}
-static int sh_eth_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+static int sh_eth_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
int ret;
- if (!mdp->phydev)
+ if (!ndev->phydev)
return -ENODEV;
spin_lock_irqsave(&mdp->lock, flags);
- ret = phy_ethtool_gset(mdp->phydev, ecmd);
+ ret = phy_ethtool_ksettings_get(ndev->phydev, cmd);
spin_unlock_irqrestore(&mdp->lock, flags);
return ret;
}
-static int sh_eth_set_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+static int sh_eth_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
int ret;
- if (!mdp->phydev)
+ if (!ndev->phydev)
return -ENODEV;
spin_lock_irqsave(&mdp->lock, flags);
@@ -1857,11 +1854,11 @@ static int sh_eth_set_settings(struct net_device *ndev,
/* disable tx and rx */
sh_eth_rcv_snd_disable(ndev);
- ret = phy_ethtool_sset(mdp->phydev, ecmd);
+ ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
if (ret)
goto error_exit;
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
mdp->duplex = 1;
else
mdp->duplex = 0;
@@ -2072,11 +2069,11 @@ static int sh_eth_nway_reset(struct net_device *ndev)
unsigned long flags;
int ret;
- if (!mdp->phydev)
+ if (!ndev->phydev)
return -ENODEV;
spin_lock_irqsave(&mdp->lock, flags);
- ret = phy_start_aneg(mdp->phydev);
+ ret = phy_start_aneg(ndev->phydev);
spin_unlock_irqrestore(&mdp->lock, flags);
return ret;
@@ -2203,8 +2200,6 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
}
static const struct ethtool_ops sh_eth_ethtool_ops = {
- .get_settings = sh_eth_get_settings,
- .set_settings = sh_eth_set_settings,
.get_regs_len = sh_eth_get_regs_len,
.get_regs = sh_eth_get_regs,
.nway_reset = sh_eth_nway_reset,
@@ -2216,6 +2211,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_sset_count = sh_eth_get_sset_count,
.get_ringparam = sh_eth_get_ringparam,
.set_ringparam = sh_eth_set_ringparam,
+ .get_link_ksettings = sh_eth_get_link_ksettings,
+ .set_link_ksettings = sh_eth_set_link_ksettings,
};
/* network device open function */
@@ -2413,10 +2410,9 @@ static int sh_eth_close(struct net_device *ndev)
sh_eth_dev_exit(ndev);
/* PHY Disconnect */
- if (mdp->phydev) {
- phy_stop(mdp->phydev);
- phy_disconnect(mdp->phydev);
- mdp->phydev = NULL;
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
}
free_irq(ndev->irq, ndev);
@@ -2434,8 +2430,7 @@ static int sh_eth_close(struct net_device *ndev)
/* ioctl to device function */
static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- struct phy_device *phydev = mdp->phydev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -2964,6 +2959,8 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
static const struct of_device_id sh_eth_match_table[] = {
{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
+ { .compatible = "renesas,ether-r8a7743", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r8a7745", .data = &r8a779x_data },
{ .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
{ .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
{ .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index c62380e34a1d..d050f37f3e0f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -518,7 +518,6 @@ struct sh_eth_private {
/* MII transceiver section. */
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
- struct phy_device *phydev; /* PHY device control */
int link;
phy_interface_t phy_interface;
int msg_enable;
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 1ab995f7146b..2eb9b49569d5 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/notifier.h>
#include <net/neighbour.h>
#include <net/switchdev.h>
@@ -52,6 +53,9 @@ struct rocker_port {
struct rocker_dma_ring_info rx_ring;
};
+struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
+ struct rocker *rocker);
+
struct rocker_world_ops;
struct rocker {
@@ -66,6 +70,7 @@ struct rocker {
spinlock_t cmd_ring_lock; /* for cmd ring accesses */
struct rocker_dma_ring_info cmd_ring;
struct rocker_dma_ring_info event_ring;
+ struct notifier_block fib_nb;
struct rocker_world_ops *wops;
void *wpriv;
};
@@ -117,11 +122,6 @@ struct rocker_world_ops {
int (*port_obj_vlan_dump)(const struct rocker_port *rocker_port,
struct switchdev_obj_port_vlan *vlan,
switchdev_obj_dump_cb_t *cb);
- int (*port_obj_fib4_add)(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans);
- int (*port_obj_fib4_del)(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4);
int (*port_obj_fdb_add)(struct rocker_port *rocker_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans);
@@ -141,6 +141,11 @@ struct rocker_world_ops {
int (*port_ev_mac_vlan_seen)(struct rocker_port *rocker_port,
const unsigned char *addr,
__be16 vlan_id);
+ int (*fib4_add)(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info);
+ int (*fib4_del)(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info);
+ void (*fib4_abort)(struct rocker *rocker);
};
extern struct rocker_world_ops rocker_ofdpa_ops;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index f0b09b05ed3f..5424fb341613 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1625,29 +1625,6 @@ rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
}
static int
-rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
-{
- struct rocker_world_ops *wops = rocker_port->rocker->wops;
-
- if (!wops->port_obj_fib4_add)
- return -EOPNOTSUPP;
- return wops->port_obj_fib4_add(rocker_port, fib4, trans);
-}
-
-static int
-rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4)
-{
- struct rocker_world_ops *wops = rocker_port->rocker->wops;
-
- if (!wops->port_obj_fib4_del)
- return -EOPNOTSUPP;
- return wops->port_obj_fib4_del(rocker_port, fib4);
-}
-
-static int
rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
@@ -1733,6 +1710,34 @@ static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
}
+static int rocker_world_fib4_add(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct rocker_world_ops *wops = rocker->wops;
+
+ if (!wops->fib4_add)
+ return 0;
+ return wops->fib4_add(rocker, fen_info);
+}
+
+static int rocker_world_fib4_del(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct rocker_world_ops *wops = rocker->wops;
+
+ if (!wops->fib4_del)
+ return 0;
+ return wops->fib4_del(rocker, fen_info);
+}
+
+static void rocker_world_fib4_abort(struct rocker *rocker)
+{
+ struct rocker_world_ops *wops = rocker->wops;
+
+ if (wops->fib4_abort)
+ wops->fib4_abort(rocker);
+}
+
/*****************
* Net device ops
*****************/
@@ -2096,11 +2101,6 @@ static int rocker_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = rocker_world_port_obj_fib4_add(rocker_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj),
- trans);
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = rocker_world_port_obj_fdb_add(rocker_port,
SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -2125,10 +2125,6 @@ static int rocker_port_obj_del(struct net_device *dev,
err = rocker_world_port_obj_vlan_del(rocker_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = rocker_world_port_obj_fib4_del(rocker_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj));
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = rocker_world_port_obj_fdb_del(rocker_port,
SWITCHDEV_OBJ_PORT_FDB(obj));
@@ -2175,6 +2171,31 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
.switchdev_port_obj_dump = rocker_port_obj_dump,
};
+static int rocker_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct rocker *rocker = container_of(nb, struct rocker, fib_nb);
+ struct fib_entry_notifier_info *fen_info = ptr;
+ int err;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_ADD:
+ err = rocker_world_fib4_add(rocker, fen_info);
+ if (err)
+ rocker_world_fib4_abort(rocker);
+ else
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ rocker_world_fib4_del(rocker, fen_info);
+ break;
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ rocker_world_fib4_abort(rocker);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
/********************
* ethtool interface
********************/
@@ -2412,7 +2433,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
skb->protocol = eth_type_trans(skb, rocker_port->dev);
if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
- skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
+ skb->offload_fwd_mark = 1;
rocker_port->dev->stats.rx_packets++;
rocker_port->dev->stats.rx_bytes += skb->len;
@@ -2740,6 +2761,9 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_probe_ports;
}
+ rocker->fib_nb.notifier_call = rocker_router_fib_event;
+ register_fib_notifier(&rocker->fib_nb);
+
dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
(int)sizeof(rocker->hw.id), &rocker->hw.id);
@@ -2771,6 +2795,7 @@ static void rocker_remove(struct pci_dev *pdev)
{
struct rocker *rocker = pci_get_drvdata(pdev);
+ unregister_fib_notifier(&rocker->fib_nb);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
rocker_remove_ports(rocker);
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
@@ -2799,6 +2824,37 @@ static bool rocker_port_dev_check(const struct net_device *dev)
return dev->netdev_ops == &rocker_port_netdev_ops;
}
+static bool rocker_port_dev_check_under(const struct net_device *dev,
+ struct rocker *rocker)
+{
+ struct rocker_port *rocker_port;
+
+ if (!rocker_port_dev_check(dev))
+ return false;
+
+ rocker_port = netdev_priv(dev);
+ if (rocker_port->rocker != rocker)
+ return false;
+
+ return true;
+}
+
+struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
+ struct rocker *rocker)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (rocker_port_dev_check_under(dev, rocker))
+ return netdev_priv(dev);
+
+ netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
+ if (rocker_port_dev_check_under(lower_dev, rocker))
+ return netdev_priv(lower_dev);
+ }
+ return NULL;
+}
+
static int rocker_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 1ca796316173..431a60804272 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -99,6 +99,7 @@ struct ofdpa_flow_tbl_entry {
struct ofdpa_flow_tbl_key key;
size_t key_len;
u32 key_crc32; /* key */
+ struct fib_info *fi;
};
struct ofdpa_group_tbl_entry {
@@ -189,6 +190,7 @@ struct ofdpa {
spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
u32 neigh_tbl_next_index;
unsigned long ageing_time;
+ bool fib_aborted;
};
struct ofdpa_port {
@@ -1043,7 +1045,8 @@ static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
__be16 eth_type, __be32 dst,
__be32 dst_mask, u32 priority,
enum rocker_of_dpa_table_id goto_tbl,
- u32 group_id, int flags)
+ u32 group_id, struct fib_info *fi,
+ int flags)
{
struct ofdpa_flow_tbl_entry *entry;
@@ -1060,6 +1063,7 @@ static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
entry->key.ucast_routing.group_id = group_id;
entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
ucast_routing.group_id);
+ entry->fi = fi;
return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
}
@@ -1425,7 +1429,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
eth_type, ip_addr,
inet_make_mask(32),
priority, goto_tbl,
- group_id, flags);
+ group_id, NULL, flags);
if (err)
netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
@@ -2390,7 +2394,7 @@ found:
static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
struct switchdev_trans *trans, __be32 dst,
- int dst_len, const struct fib_info *fi,
+ int dst_len, struct fib_info *fi,
u32 tb_id, int flags)
{
const struct fib_nh *nh;
@@ -2426,7 +2430,7 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
dst_mask, priority, goto_tbl,
- group_id, flags);
+ group_id, fi, flags);
if (err)
netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
err, &dst);
@@ -2558,7 +2562,6 @@ static int ofdpa_port_init(struct rocker_port *rocker_port)
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
int err;
- switchdev_port_fwd_mark_set(ofdpa_port->dev, NULL, false);
rocker_port_set_learning(rocker_port,
!!(ofdpa_port->brport_flags & BR_LEARNING));
@@ -2719,28 +2722,6 @@ static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
return err;
}
-static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
-{
- struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
-
- return ofdpa_port_fib_ipv4(ofdpa_port, trans,
- htonl(fib4->dst), fib4->dst_len,
- fib4->fi, fib4->tb_id, 0);
-}
-
-static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4)
-{
- struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
-
- return ofdpa_port_fib_ipv4(ofdpa_port, NULL,
- htonl(fib4->dst), fib4->dst_len,
- fib4->fi, fib4->tb_id,
- OFDPA_OP_FLAG_REMOVE);
-}
-
static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
@@ -2817,7 +2798,6 @@ static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
ofdpa_port->bridge_dev = bridge;
- switchdev_port_fwd_mark_set(ofdpa_port->dev, bridge, true);
return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
}
@@ -2836,8 +2816,6 @@ static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
ofdpa_port_internal_vlan_id_get(ofdpa_port,
ofdpa_port->dev->ifindex);
- switchdev_port_fwd_mark_set(ofdpa_port->dev, ofdpa_port->bridge_dev,
- false);
ofdpa_port->bridge_dev = NULL;
err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
@@ -2926,6 +2904,82 @@ static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
}
+static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
+ struct rocker *rocker)
+{
+ struct rocker_port *rocker_port;
+
+ rocker_port = rocker_port_dev_lower_find(dev, rocker);
+ return rocker_port ? rocker_port->wpriv : NULL;
+}
+
+static int ofdpa_fib4_add(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct ofdpa *ofdpa = rocker->wpriv;
+ struct ofdpa_port *ofdpa_port;
+ int err;
+
+ if (ofdpa->fib_aborted)
+ return 0;
+ ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
+ if (!ofdpa_port)
+ return 0;
+ err = ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+ fen_info->dst_len, fen_info->fi,
+ fen_info->tb_id, 0);
+ if (err)
+ return err;
+ fib_info_offload_inc(fen_info->fi);
+ return 0;
+}
+
+static int ofdpa_fib4_del(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct ofdpa *ofdpa = rocker->wpriv;
+ struct ofdpa_port *ofdpa_port;
+
+ if (ofdpa->fib_aborted)
+ return 0;
+ ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
+ if (!ofdpa_port)
+ return 0;
+ fib_info_offload_dec(fen_info->fi);
+ return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+ fen_info->dst_len, fen_info->fi,
+ fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
+}
+
+static void ofdpa_fib4_abort(struct rocker *rocker)
+{
+ struct ofdpa *ofdpa = rocker->wpriv;
+ struct ofdpa_port *ofdpa_port;
+ struct ofdpa_flow_tbl_entry *flow_entry;
+ struct hlist_node *tmp;
+ unsigned long flags;
+ int bkt;
+
+ if (ofdpa->fib_aborted)
+ return;
+
+ spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
+ hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
+ if (flow_entry->key.tbl_id !=
+ ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
+ continue;
+ ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev,
+ rocker);
+ if (!ofdpa_port)
+ continue;
+ fib_info_offload_dec(flow_entry->fi);
+ ofdpa_flow_tbl_del(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE,
+ flow_entry);
+ }
+ spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
+ ofdpa->fib_aborted = true;
+}
+
struct rocker_world_ops rocker_ofdpa_ops = {
.kind = "ofdpa",
.priv_size = sizeof(struct ofdpa),
@@ -2945,8 +2999,6 @@ struct rocker_world_ops rocker_ofdpa_ops = {
.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
.port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
- .port_obj_fib4_add = ofdpa_port_obj_fib4_add,
- .port_obj_fib4_del = ofdpa_port_obj_fib4_del,
.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
.port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
@@ -2955,4 +3007,7 @@ struct rocker_world_ops rocker_ofdpa_ops = {
.port_neigh_update = ofdpa_port_neigh_update,
.port_neigh_destroy = ofdpa_port_neigh_destroy,
.port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
+ .fib4_add = ofdpa_fib4_add,
+ .fib4_del = ofdpa_fib4_del,
+ .fib4_abort = ofdpa_fib4_abort,
};
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e00a669e9e09..00279da6a1e8 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -177,7 +177,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx)
static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
@@ -188,7 +188,7 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
- if (outlen < sizeof(outbuf)) {
+ if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
netif_err(efx, drv, efx->net_dev,
"unable to read datapath firmware capabilities\n");
return -EIO;
@@ -197,6 +197,12 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
nic_data->datapath_caps =
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+ if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
+ GET_CAPABILITIES_V2_OUT_FLAGS2);
+ else
+ nic_data->datapath_caps2 = 0;
+
/* record the DPCPU firmware IDs to determine VEB vswitching support.
*/
nic_data->rx_dpcpu_fw_id =
@@ -227,6 +233,116 @@ static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
return rc > 0 ? rc : -ERANGE;
}
+static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int implemented;
+ unsigned int enabled;
+ int rc;
+
+ nic_data->workaround_35388 = false;
+ nic_data->workaround_61265 = false;
+
+ rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+
+ if (rc == -ENOSYS) {
+ /* Firmware without GET_WORKAROUNDS - not a problem. */
+ rc = 0;
+ } else if (rc == 0) {
+ /* Bug61265 workaround is always enabled if implemented. */
+ if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
+ nic_data->workaround_61265 = true;
+
+ if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
+ nic_data->workaround_35388 = true;
+ } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
+ /* Workaround is implemented but not enabled.
+ * Try to enable it.
+ */
+ rc = efx_mcdi_set_workaround(efx,
+ MC_CMD_WORKAROUND_BUG35388,
+ true, NULL);
+ if (rc == 0)
+ nic_data->workaround_35388 = true;
+ /* If we failed to set the workaround just carry on. */
+ rc = 0;
+ }
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "workaround for bug 35388 is %sabled\n",
+ nic_data->workaround_35388 ? "en" : "dis");
+ netif_dbg(efx, probe, efx->net_dev,
+ "workaround for bug 61265 is %sabled\n",
+ nic_data->workaround_61265 ? "en" : "dis");
+
+ return rc;
+}
+
+static void efx_ef10_process_timer_config(struct efx_nic *efx,
+ const efx_dword_t *data)
+{
+ unsigned int max_count;
+
+ if (EFX_EF10_WORKAROUND_61265(efx)) {
+ efx->timer_quantum_ns = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
+ efx->timer_max_ns = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
+ } else if (EFX_EF10_WORKAROUND_35388(efx)) {
+ efx->timer_quantum_ns = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
+ max_count = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
+ efx->timer_max_ns = max_count * efx->timer_quantum_ns;
+ } else {
+ efx->timer_quantum_ns = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
+ max_count = MCDI_DWORD(data,
+ GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
+ efx->timer_max_ns = max_count * efx->timer_quantum_ns;
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "got timer properties from MC: quantum %u ns; max %u ns\n",
+ efx->timer_quantum_ns, efx->timer_max_ns);
+}
+
+static int efx_ef10_get_timer_config(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
+ int rc;
+
+ rc = efx_ef10_get_timer_workarounds(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+
+ if (rc == 0) {
+ efx_ef10_process_timer_config(efx, outbuf);
+ } else if (rc == -ENOSYS || rc == -EPERM) {
+ /* Not available - fall back to Huntington defaults. */
+ unsigned int quantum;
+
+ rc = efx_ef10_get_sysclk_freq(efx);
+ if (rc < 0)
+ return rc;
+
+ quantum = 1536000 / rc; /* 1536 cycles */
+ efx->timer_quantum_ns = quantum;
+ efx->timer_max_ns = efx->type->timer_period_max * quantum;
+ rc = 0;
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
+ MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
+ NULL, 0, rc);
+ }
+
+ return rc;
+}
+
static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
@@ -527,32 +643,9 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail5;
- rc = efx_ef10_get_sysclk_freq(efx);
+ rc = efx_ef10_get_timer_config(efx);
if (rc < 0)
goto fail5;
- efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
-
- /* Check whether firmware supports bug 35388 workaround.
- * First try to enable it, then if we get EPERM, just
- * ask if it's already enabled
- */
- rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
- if (rc == 0) {
- nic_data->workaround_35388 = true;
- } else if (rc == -EPERM) {
- unsigned int enabled;
-
- rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
- if (rc)
- goto fail3;
- nic_data->workaround_35388 = enabled &
- MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
- } else if (rc != -ENOSYS && rc != -ENOENT) {
- goto fail5;
- }
- netif_dbg(efx, probe, efx->net_dev,
- "workaround for bug 35388 is %sabled\n",
- nic_data->workaround_35388 ? "en" : "dis");
rc = efx_mcdi_mon_probe(efx);
if (rc && rc != -EPERM)
@@ -1440,9 +1533,10 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
(1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
(1ULL << GENERIC_STAT_rx_noskb_drops))
-/* These statistics are only provided by the 10G MAC. For a 10G/40G
- * switchable port we do not expose these because they might not
- * include all the packets they should.
+/* On 7000 series NICs, these statistics are only provided by the 10G MAC.
+ * For a 10G/40G switchable port we do not expose these because they might
+ * not include all the packets they should.
+ * On 8000 series NICs these statistics are always provided.
*/
#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
(1ULL << EF10_STAT_port_tx_lt64) | \
@@ -1488,10 +1582,15 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
return 0;
- if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
- else
+ /* 8000 series have everything even at 40G */
+ if (nic_data->datapath_caps2 &
+ (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
+ raw_mask |= HUNT_10G_ONLY_STAT_MASK;
+ } else {
raw_mask |= HUNT_10G_ONLY_STAT_MASK;
+ }
if (nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
@@ -1617,7 +1716,6 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
efx_ef10_get_stat_mask(efx, mask);
dma_stats = efx->stats_buffer.addr;
- nic_data = efx->nic_data;
generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
@@ -1744,27 +1842,43 @@ static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- unsigned int mode, value;
+ unsigned int mode, usecs;
efx_dword_t timer_cmd;
- if (channel->irq_moderation) {
+ if (channel->irq_moderation_us) {
mode = 3;
- value = channel->irq_moderation - 1;
+ usecs = channel->irq_moderation_us;
} else {
mode = 0;
- value = 0;
+ usecs = 0;
}
- if (EFX_EF10_WORKAROUND_35388(efx)) {
+ if (EFX_EF10_WORKAROUND_61265(efx)) {
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
+ unsigned int ns = usecs * 1000;
+
+ MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
+ channel->channel);
+ MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
+ MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
+ MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
+
+ efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
+ inbuf, sizeof(inbuf), 0, NULL, 0);
+ } else if (EFX_EF10_WORKAROUND_35388(efx)) {
+ unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
+
EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
EFE_DD_EVQ_IND_TIMER_FLAGS,
ERF_DD_EVQ_IND_TIMER_MODE, mode,
- ERF_DD_EVQ_IND_TIMER_VAL, value);
+ ERF_DD_EVQ_IND_TIMER_VAL, ticks);
efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
channel->channel);
} else {
+ unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
+
EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
- ERF_DZ_TC_TIMER_VAL, value);
+ ERF_DZ_TC_TIMER_VAL, ticks);
efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
channel->channel);
}
@@ -1935,14 +2049,18 @@ static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void efx_ef10_irq_test_generate(struct efx_nic *efx)
+static int efx_ef10_irq_test_generate(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
+ if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
+ NULL) == 0)
+ return -ENOTSUPP;
+
BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
- (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
+ return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
inbuf, sizeof(inbuf), NULL, 0, NULL);
}
@@ -2536,13 +2654,12 @@ fail:
static int efx_ef10_ev_init(struct efx_channel *channel)
{
MCDI_DECLARE_BUF(inbuf,
- MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
- EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
+ MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data;
- bool supports_rx_merge;
size_t inlen, outlen;
unsigned int enabled, implemented;
dma_addr_t dma_addr;
@@ -2550,9 +2667,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
int i;
nic_data = efx->nic_data;
- supports_rx_merge =
- !!(nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
/* Fill event queue with all ones (i.e. empty events) */
memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
@@ -2561,11 +2675,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
/* INIT_EVQ expects index in vector table, not absolute */
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
- MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
- INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
- INIT_EVQ_IN_FLAG_RX_MERGE, 1,
- INIT_EVQ_IN_FLAG_TX_MERGE, 1,
- INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
@@ -2574,6 +2683,27 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+ if (nic_data->datapath_caps2 &
+ 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
+ /* Use the new generic approach to specifying event queue
+ * configuration, requesting lower latency or higher throughput.
+ * The options that actually get used appear in the output.
+ */
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
+ INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_V2_IN_FLAG_TYPE,
+ MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
+ } else {
+ bool cut_thru = !(nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
+
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
+ }
+
dma_addr = channel->eventq.buf.dma_addr;
for (i = 0; i < entries; ++i) {
MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
@@ -2584,6 +2714,13 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
+
+ if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %d using event queue flags %08x\n",
+ channel->channel,
+ MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
+
/* IRQ return is ignored */
if (channel->channel || rc)
return rc;
@@ -2591,8 +2728,8 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
/* Successfully created event queue on channel 0 */
rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
if (rc == -ENOSYS) {
- /* GET_WORKAROUNDS was implemented before the bug26807
- * workaround, thus the latter must be unavailable in this fw
+ /* GET_WORKAROUNDS was implemented before this workaround,
+ * thus it must be unavailable in this firmware.
*/
nic_data->workaround_26807 = false;
rc = 0;
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 14b821b1c880..3cf3557106c2 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -281,6 +281,27 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
* NAPI guarantees serialisation of polls of the same device, which
* provides the guarantee required by efx_process_channel().
*/
+static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
+{
+ int step = efx->irq_mod_step_us;
+
+ if (channel->irq_mod_score < irq_adapt_low_thresh) {
+ if (channel->irq_moderation_us > step) {
+ channel->irq_moderation_us -= step;
+ efx->type->push_irq_moderation(channel);
+ }
+ } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
+ if (channel->irq_moderation_us <
+ efx->irq_rx_moderation_us) {
+ channel->irq_moderation_us += step;
+ efx->type->push_irq_moderation(channel);
+ }
+ }
+
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+}
+
static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
@@ -301,22 +322,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
if (efx_channel_has_rx_queue(channel) &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
- if (unlikely(channel->irq_mod_score <
- irq_adapt_low_thresh)) {
- if (channel->irq_moderation > 1) {
- channel->irq_moderation -= 1;
- efx->type->push_irq_moderation(channel);
- }
- } else if (unlikely(channel->irq_mod_score >
- irq_adapt_high_thresh)) {
- if (channel->irq_moderation <
- efx->irq_rx_moderation) {
- channel->irq_moderation += 1;
- efx->type->push_irq_moderation(channel);
- }
- }
- channel->irq_count = 0;
- channel->irq_mod_score = 0;
+ efx_update_irq_mod(efx, channel);
}
efx_filter_rfs_expire(channel);
@@ -1703,6 +1709,7 @@ static int efx_probe_nic(struct efx_nic *efx)
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
/* Initialise the interrupt moderation settings */
+ efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
true);
@@ -1949,14 +1956,21 @@ static void efx_remove_all(struct efx_nic *efx)
* Interrupt moderation
*
**************************************************************************/
-
-static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
+unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
{
if (usecs == 0)
return 0;
- if (usecs * 1000 < quantum_ns)
+ if (usecs * 1000 < efx->timer_quantum_ns)
return 1; /* never round down to 0 */
- return usecs * 1000 / quantum_ns;
+ return usecs * 1000 / efx->timer_quantum_ns;
+}
+
+unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
+{
+ /* We must round up when converting ticks to microseconds
+ * because we round down when converting the other way.
+ */
+ return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
}
/* Set interrupt moderation parameters */
@@ -1965,21 +1979,16 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
bool rx_may_override_tx)
{
struct efx_channel *channel;
- unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
- efx->timer_quantum_ns,
- 1000);
- unsigned int tx_ticks;
- unsigned int rx_ticks;
+ unsigned int timer_max_us;
EFX_ASSERT_RESET_SERIALISED(efx);
- if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
- return -EINVAL;
+ timer_max_us = efx->timer_max_ns / 1000;
- tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
- rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
+ if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
+ return -EINVAL;
- if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
+ if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
!rx_may_override_tx) {
netif_err(efx, drv, efx->net_dev, "Channels are shared. "
"RX and TX IRQ moderation must be equal\n");
@@ -1987,12 +1996,12 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
}
efx->irq_rx_adaptive = rx_adaptive;
- efx->irq_rx_moderation = rx_ticks;
+ efx->irq_rx_moderation_us = rx_usecs;
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel))
- channel->irq_moderation = rx_ticks;
+ channel->irq_moderation_us = rx_usecs;
else if (efx_channel_has_tx_queues(channel))
- channel->irq_moderation = tx_ticks;
+ channel->irq_moderation_us = tx_usecs;
}
return 0;
@@ -2001,26 +2010,21 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive)
{
- /* We must round up when converting ticks to microseconds
- * because we round down when converting the other way.
- */
-
*rx_adaptive = efx->irq_rx_adaptive;
- *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
- efx->timer_quantum_ns,
- 1000);
+ *rx_usecs = efx->irq_rx_moderation_us;
/* If channels are shared between RX and TX, so is IRQ
* moderation. Otherwise, IRQ moderation is the same for all
* TX channels and is not adaptive.
*/
- if (efx->tx_channel_offset == 0)
+ if (efx->tx_channel_offset == 0) {
*tx_usecs = *rx_usecs;
- else
- *tx_usecs = DIV_ROUND_UP(
- efx->channel[efx->tx_channel_offset]->irq_moderation *
- efx->timer_quantum_ns,
- 1000);
+ } else {
+ struct efx_channel *tx_channel;
+
+ tx_channel = efx->channel[efx->tx_channel_offset];
+ *tx_usecs = tx_channel->irq_moderation_us;
+ }
}
/**************************************************************************
@@ -2259,8 +2263,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
rc = efx_check_disabled(efx);
if (rc)
return rc;
- if (new_mtu > EFX_MAX_MTU)
+ if (new_mtu > EFX_MAX_MTU) {
+ netif_err(efx, drv, efx->net_dev,
+ "Requested MTU of %d too big (max: %d)\n",
+ new_mtu, EFX_MAX_MTU);
return -EINVAL;
+ }
+ if (new_mtu < EFX_MIN_MTU) {
+ netif_err(efx, drv, efx->net_dev,
+ "Requested MTU of %d too small (min: %d)\n",
+ new_mtu, EFX_MIN_MTU);
+ return -EINVAL;
+ }
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index c3ae739e9c7a..342ae16e1f2d 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -204,6 +204,8 @@ int efx_try_recovery(struct efx_nic *efx);
/* Global */
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
+unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index d790cb8d9db3..1a7092602aec 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -378,12 +378,15 @@ static void falcon_push_irq_moderation(struct efx_channel *channel)
struct efx_nic *efx = channel->efx;
/* Set timer register */
- if (channel->irq_moderation) {
+ if (channel->irq_moderation_us) {
+ unsigned int ticks;
+
+ ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us);
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_AB_TC_TIMER_MODE,
FFE_BB_TIMER_MODE_INT_HLDOFF,
FRF_AB_TC_TIMER_VAL,
- channel->irq_moderation - 1);
+ ticks - 1);
} else {
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_AB_TC_TIMER_MODE,
@@ -2373,6 +2376,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
EFX_MAX_CHANNELS);
efx->max_tx_channels = efx->max_channels;
efx->timer_quantum_ns = 4968; /* 621 cycles */
+ efx->timer_max_ns = efx->type->timer_period_max *
+ efx->timer_quantum_ns;
/* Initialise I2C adapter */
board = falcon_board(efx);
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 1736f4b806af..f6883b2b5da3 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -64,7 +64,7 @@
#define LM87_ALARM_TEMP_INT 0x10
#define LM87_ALARM_TEMP_EXT1 0x20
-#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
+#if IS_ENABLED(CONFIG_SENSORS_LM87)
static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
{
@@ -455,7 +455,7 @@ static int sfe4001_init(struct efx_nic *efx)
struct falcon_board *board = falcon_board(efx);
int rc;
-#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
+#if IS_ENABLED(CONFIG_SENSORS_LM90)
board->hwmon_client =
i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
#else
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 4c83739d158f..4762ec444cb8 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1477,9 +1477,10 @@ void efx_farch_irq_disable_master(struct efx_nic *efx)
* Interrupt must already have been enabled, otherwise nasty things
* may happen.
*/
-void efx_farch_irq_test_generate(struct efx_nic *efx)
+int efx_farch_irq_test_generate(struct efx_nic *efx)
{
efx_farch_interrupts(efx, true, true);
+ return 0;
}
/* Process a fatal interrupt
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d28e7dd8fa3c..241520943ada 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -548,7 +548,10 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
err_len, rc);
}
- async->complete(efx, async->cookie, rc, outbuf, data_len);
+
+ if (async->complete)
+ async->complete(efx, async->cookie, rc, outbuf,
+ min(async->outlen, data_len));
kfree(async);
efx_mcdi_release(mcdi);
@@ -1153,7 +1156,8 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
* acquired locks in the wrong order.
*/
list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
- async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
+ if (async->complete)
+ async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
list_del(&async->list);
kfree(async);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index c9a5b003caaf..ccceafc15896 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -2645,16 +2645,20 @@
#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
/* enum: CSR IREG bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
-/* enum: RX DPCPU bus. */
+/* enum: RX0 DPCPU bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
/* enum: TX0 DPCPU bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
/* enum: TX1 DPCPU bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
-/* enum: RX DICPU bus. */
+/* enum: RX0 DICPU bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
/* enum: TX DICPU bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* enum: RX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7
+/* enum: RX1 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
/* Pattern written to RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
/* Actual value read from RAM / register */
@@ -3612,6 +3616,8 @@
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
@@ -4389,6 +4395,8 @@
* the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
*/
#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_WORKAROUND_BUG61265 0x7
/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
* the workaround
*/
@@ -4413,7 +4421,6 @@
* (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
* output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
* returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
- * Anything else: currently undefined. Locks required: None. Return code: 0.
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
@@ -5479,6 +5486,8 @@
#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
#define LICENSED_V3_FEATURES_MASK_LBN 0
#define LICENSED_V3_FEATURES_MASK_WIDTH 64
@@ -5634,6 +5643,109 @@
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
+#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+/* Actual configuration applied on the card */
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
/* QUEUE_CRC_MODE structuredef */
#define QUEUE_CRC_MODE_LEN 1
#define QUEUE_CRC_MODE_MODE_LBN 0
@@ -5697,8 +5809,8 @@
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
-#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_LBN 10
-#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10
+#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -7854,6 +7966,20 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -7910,6 +8036,288 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) in not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -9026,7 +9434,7 @@
*/
#define MC_CMD_GET_RXDP_CONFIG 0xc2
-#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
@@ -10125,7 +10533,9 @@
* that this operation returns a zero-length response
*/
#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
-/* enum: report counts of installed licenses */
+/* enum: report counts of installed licenses Returns EAGAIN if license
+ * processing (updating) has been started but not yet completed.
+ */
#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
/* MC_CMD_LICENSING_V3_OUT msgresponse */
@@ -10763,6 +11173,8 @@
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80
/***********************************/
@@ -11280,22 +11692,110 @@
#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_RX_BALANCING_IN msgrequest */
-#define MC_CMD_RX_BALANCING_IN_LEN 4
+#define MC_CMD_RX_BALANCING_IN_LEN 16
/* The RX port whose upconverter table will be modified */
#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
-#define MC_CMD_RX_BALANCING_IN_PORT_LEN 1
/* The VLAN priority associated to the table index and vFIFO */
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 1
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 1
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
/* The resulting bit of SRC^DST for indexing the table */
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 2
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 1
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
/* The RX engine to which the vFIFO in the table entry will point to */
-#define MC_CMD_RX_BALANCING_IN_ENG_OFST 3
-#define MC_CMD_RX_BALANCING_IN_ENG_LEN 1
+#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
/* MC_CMD_RX_BALANCING_OUT msgresponse */
#define MC_CMD_RX_BALANCING_OUT_LEN 0
+/***********************************/
+/* MC_CMD_SET_EVQ_TMR
+ * Update the timer load, timer reload and timer mode values for a given EVQ.
+ * The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
+ * be rounded up to the granularity supported by the hardware, then truncated
+ * to the range supported by the hardware. The resulting value after the
+ * rounding and truncation will be returned to the caller (in TMR_LOAD_ACT_NS
+ * and TMR_RELOAD_ACT_NS).
+ */
+#define MC_CMD_SET_EVQ_TMR 0x120
+
+#define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_EVQ_TMR_IN msgrequest */
+#define MC_CMD_SET_EVQ_TMR_IN_LEN 16
+/* Function-relative queue instance */
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+/* Requested value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+/* Requested value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */
+
+/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */
+#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8
+/* Actual value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+/* Actual value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES
+ * Query properties about the event queue timers.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122
+
+#define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_IN_LEN 0
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
+/* Reserved for future use. */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
+ * nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
+ * allowed for timer load/reload counts.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
+ * multiple of this step size will be rounded in an implementation defined
+ * manner.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
+ * meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+/* Timer durations requested via MCDI that are not a multiple of this step size
+ * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+/* For timers updated using the bug35388 workaround, this is the time interval
+ * (in nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count. This field is only meaningful if the bug35388 workaround
+ * is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+/* For timers updated using the bug35388 workaround, this is the maximum value
+ * allowed for timer load/reload counts. This field is only meaningful if the
+ * bug35388 workaround is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+/* For timers updated using the bug35388 workaround, timer load/reload counts
+ * not a multiple of this step size will be rounded in an implementation
+ * defined manner. This field is only meaningful if the bug35388 workaround is
+ * enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9ff062a36ea8..99d8c82124bb 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -76,6 +76,9 @@
/* Maximum possible MTU the driver supports */
#define EFX_MAX_MTU (9 * 1024)
+/* Minimum MTU, from RFC791 (IP) */
+#define EFX_MIN_MTU 68
+
/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
* and should be a multiple of the cache line size.
*/
@@ -392,7 +395,7 @@ enum efx_sync_events_state {
* @eventq_init: Event queue initialised flag
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
- * @irq_moderation: IRQ moderation value (in hardware ticks)
+ * @irq_moderation_us: IRQ moderation value (in microseconds)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
* @state: state for NAPI vs busy polling
@@ -433,7 +436,7 @@ struct efx_channel {
bool eventq_init;
bool enabled;
int irq;
- unsigned int irq_moderation;
+ unsigned int irq_moderation_us;
struct net_device *napi_dev;
struct napi_struct napi_str;
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -810,8 +813,10 @@ struct vfdi_status;
* @membase: Memory BAR value
* @interrupt_mode: Interrupt mode
* @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
+ * @timer_max_ns: Interrupt timer maximum value, in nanoseconds
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
- * @irq_rx_moderation: IRQ moderation time for RX event queues
+ * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
+ * @irq_rx_moderation_us: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
* @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
* @reset_pending: Bitmask for pending resets
@@ -940,8 +945,10 @@ struct efx_nic {
enum efx_int_mode interrupt_mode;
unsigned int timer_quantum_ns;
+ unsigned int timer_max_ns;
bool irq_rx_adaptive;
- unsigned int irq_rx_moderation;
+ unsigned int irq_mod_step_us;
+ unsigned int irq_rx_moderation_us;
u32 msg_enable;
enum nic_state state;
@@ -1271,7 +1278,7 @@ struct efx_nic_type {
int (*mcdi_poll_reboot)(struct efx_nic *efx);
void (*mcdi_reboot_detected)(struct efx_nic *efx);
void (*irq_enable_master)(struct efx_nic *efx);
- void (*irq_test_generate)(struct efx_nic *efx);
+ int (*irq_test_generate)(struct efx_nic *efx);
void (*irq_disable_non_ev)(struct efx_nic *efx);
irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 89b83e59e1dc..aa1945a858d5 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -66,11 +66,11 @@ void efx_nic_event_test_start(struct efx_channel *channel)
channel->efx->type->ev_test_generate(channel);
}
-void efx_nic_irq_test_start(struct efx_nic *efx)
+int efx_nic_irq_test_start(struct efx_nic *efx)
{
efx->last_irq_cpu = -1;
smp_wmb();
- efx->type->irq_test_generate(efx);
+ return efx->type->irq_test_generate(efx);
}
/* Hook interrupt handler(s)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 96944c3c9d14..73bee7ea332a 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -507,10 +507,13 @@ enum {
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @workaround_26807: Flag: firmware supports workaround for bug 26807
+ * @workaround_61265: Flag: firmware supports workaround for bug 61265
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
* after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
* %MC_CMD_GET_CAPABILITIES response)
+ * @datapath_caps2: Further Capabilities of datapath firmware (FLAGS2 field of
+ * %MC_CMD_GET_CAPABILITIES response)
* @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
* @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
* @vport_id: The function's vport ID, only relevant for PFs
@@ -540,8 +543,10 @@ struct efx_ef10_nic_data {
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool workaround_26807;
+ bool workaround_61265;
bool must_check_datapath_caps;
u32 datapath_caps;
+ u32 datapath_caps2;
unsigned int rx_dpcpu_fw_id;
unsigned int tx_dpcpu_fw_id;
unsigned int vport_id;
@@ -741,12 +746,12 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
/* Interrupts */
int efx_nic_init_interrupt(struct efx_nic *efx);
-void efx_nic_irq_test_start(struct efx_nic *efx);
+int efx_nic_irq_test_start(struct efx_nic *efx);
void efx_nic_fini_interrupt(struct efx_nic *efx);
/* Falcon/Siena interrupts */
void efx_farch_irq_enable_master(struct efx_nic *efx);
-void efx_farch_irq_test_generate(struct efx_nic *efx);
+int efx_farch_irq_test_generate(struct efx_nic *efx);
void efx_farch_irq_disable_master(struct efx_nic *efx);
irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index c771e0af4e06..77a5364f7a10 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1269,13 +1269,13 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
if (IS_ERR(ptp->phc_clock)) {
rc = PTR_ERR(ptp->phc_clock);
goto fail3;
- }
-
- INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
- ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
- if (!ptp->pps_workwq) {
- rc = -ENOMEM;
- goto fail4;
+ } else if (ptp->phc_clock) {
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
}
}
ptp->nic_ts_enabled = false;
@@ -1306,7 +1306,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- channel->irq_moderation = 0;
+ channel->irq_moderation_us = 0;
channel->rx_queue.core_index = 0;
return efx_ptp_probe(efx, channel);
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 9d78830da609..cd38b44ae23a 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -135,11 +135,19 @@ static int efx_test_interrupts(struct efx_nic *efx,
{
unsigned long timeout, wait;
int cpu;
+ int rc;
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
- efx_nic_irq_test_start(efx);
+ rc = efx_nic_irq_test_start(efx);
+ if (rc == -ENOTSUPP) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "direct interrupt testing not supported\n");
+ tests->interrupt = 0;
+ return 0;
+ }
+
timeout = jiffies + IRQ_TIMEOUT;
wait = 1;
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index 009dbe88f3be..32a427253a03 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -28,7 +28,7 @@ struct efx_loopback_self_tests {
/* Efx self test results
* For fields which are not counters, 1 indicates success and -1
- * indicates failure.
+ * indicates failure; 0 indicates test could not be run.
*/
struct efx_self_tests {
/* online tests */
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 2219b5424d2b..04ed1b4c7cd9 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -34,19 +34,24 @@ static void siena_init_wol(struct efx_nic *efx);
static void siena_push_irq_moderation(struct efx_channel *channel)
{
+ struct efx_nic *efx = channel->efx;
efx_dword_t timer_cmd;
- if (channel->irq_moderation)
+ if (channel->irq_moderation_us) {
+ unsigned int ticks;
+
+ ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us);
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
FFE_CZ_TIMER_MODE_INT_HLDOFF,
FRF_CZ_TC_TIMER_VAL,
- channel->irq_moderation - 1);
- else
+ ticks - 1);
+ } else {
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
FFE_CZ_TIMER_MODE_DIS,
FRF_CZ_TC_TIMER_VAL, 0);
+ }
efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
channel->channel);
}
@@ -222,6 +227,9 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
efx->timer_quantum_ns =
(caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ?
3072 : 6144; /* 768 cycles */
+ efx->timer_max_ns = efx->type->timer_period_max *
+ efx->timer_quantum_ns;
+
return rc;
}
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
index 816c44689e67..9abcf4aded30 100644
--- a/drivers/net/ethernet/sfc/sriov.c
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -22,7 +22,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
}
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
- u8 qos)
+ u8 qos, __be16 vlan_proto)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -31,6 +31,9 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
(qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
} else {
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
index 400df526586d..ba1762e7f216 100644
--- a/drivers/net/ethernet/sfc/sriov.h
+++ b/drivers/net/ethernet/sfc/sriov.h
@@ -16,7 +16,7 @@
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
- u8 qos);
+ u8 qos, __be16 vlan_proto);
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk);
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index 2310b75d4ec2..351cd14cb9f9 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -50,4 +50,8 @@
#define EFX_WORKAROUND_35388(efx) \
(efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
+/* Moderation timer access must go through MCDI */
+#define EFX_EF10_WORKAROUND_61265(efx) \
+ (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265)
+
#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 95001ee408ab..6f85276376e8 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1426,7 +1426,7 @@ static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
rx_flags |= RxATX;
}
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Can accept Jumbo packet */
rx_flags |= RxAJAB;
#endif
@@ -1750,7 +1750,7 @@ static int sis900_rx(struct net_device *net_dev)
data_size = rx_status & DSIZE;
rx_size = data_size - CRC_SIZE;
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* ``TOOLONG'' flag means jumbo packet received. */
if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
rx_status &= (~ ((unsigned int)TOOLONG));
diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h
index 7d430d322931..f0da3dc52c01 100644
--- a/drivers/net/ethernet/sis/sis900.h
+++ b/drivers/net/ethernet/sis/sis900.h
@@ -310,7 +310,7 @@ enum sis630_revision_id {
#define CRC_SIZE 4
#define MAC_HEADER_SIZE 14
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define MAX_FRAME_SIZE (1518 + 4)
#else
#define MAX_FRAME_SIZE 1518
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 503a3b6dce91..73212590d04a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2323,6 +2323,9 @@ static int smc_drv_probe(struct platform_device *pdev)
} else {
lp->cfg.flags |= SMC91X_USE_16BIT;
}
+ if (!device_property_read_u32(&pdev->dev, "reg-shift",
+ &val))
+ lp->io_shift = val;
}
#endif
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 4f8910b7db2e..e9b8579e6241 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -62,6 +62,7 @@
#include <linux/acpi.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/gpio/consumer.h>
#include "smsc911x.h"
@@ -147,6 +148,9 @@ struct smsc911x_data {
/* regulators */
struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
+ /* Reset GPIO */
+ struct gpio_desc *reset_gpiod;
+
/* clock */
struct clk *clk;
};
@@ -438,6 +442,11 @@ static int smsc911x_request_resources(struct platform_device *pdev)
netdev_err(ndev, "couldn't get regulators %d\n",
ret);
+ /* Request optional RESET GPIO */
+ pdata->reset_gpiod = devm_gpiod_get_optional(&pdev->dev,
+ "reset",
+ GPIOD_OUT_LOW);
+
/* Request clock */
pdata->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk))
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 8f06a6621ab1..3818c5e06eba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -61,13 +61,13 @@ config DWMAC_LPC18XX
config DWMAC_MESON
tristate "Amlogic Meson dwmac support"
default ARCH_MESON
- depends on OF && (ARCH_MESON || COMPILE_TEST)
+ depends on OF && COMMON_CLK && (ARCH_MESON || COMPILE_TEST)
help
Support for Ethernet controller on Amlogic Meson SoCs.
This selects the Amlogic Meson SoC glue layer support for
- the stmmac device driver. This driver is used for Meson6 and
- Meson8 SoCs.
+ the stmmac device driver. This driver is used for Meson6,
+ Meson8, Meson8b and GXBB SoCs.
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
@@ -104,6 +104,18 @@ config DWMAC_STI
device driver. This driver is used on for the STi series
SOCs GMAC ethernet controller.
+config DWMAC_STM32
+ tristate "STM32 DWMAC support"
+ default ARCH_STM32
+ depends on OF && HAS_IOMEM
+ select MFD_SYSCON
+ ---help---
+ Support for ethernet controller on STM32 SOCs.
+
+ This selects STM32 SoC glue layer support for the stmmac
+ device driver. This driver is used on for the STM32 series
+ SOCs GMAC ethernet controller.
+
config DWMAC_SUNXI
tristate "Allwinner GMAC support"
default ARCH_SUNXI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 44b630cd1755..5d6ece5919b3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -9,10 +9,11 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
-obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
+obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
+obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
stmmac-platform-objs:= stmmac_platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 2533b91f1421..d3292c4a6eda 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -30,7 +30,7 @@
#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/module.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define STMMAC_VLAN_TAG_USED
#include <linux/if_vlan.h>
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index c1bac1912b37..309d99536a2c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -1,5 +1,5 @@
/*
- * Amlogic Meson DWMAC glue layer
+ * Amlogic Meson6 and Meson8 DWMAC glue layer
*
* Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
*
@@ -96,5 +96,5 @@ static struct platform_driver meson6_dwmac_driver = {
module_platform_driver(meson6_dwmac_driver);
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
-MODULE_DESCRIPTION("Amlogic Meson DWMAC glue layer");
+MODULE_DESCRIPTION("Amlogic Meson6 and Meson8 DWMAC glue layer");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
new file mode 100644
index 000000000000..250e4ceafc8d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -0,0 +1,324 @@
+/*
+ * Amlogic Meson8b and GXBB DWMAC glue layer
+ *
+ * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define PRG_ETH0 0x0
+
+#define PRG_ETH0_RGMII_MODE BIT(0)
+
+/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
+#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
+#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
+
+#define PRG_ETH0_TXDLY_SHIFT 5
+#define PRG_ETH0_TXDLY_MASK GENMASK(6, 5)
+#define PRG_ETH0_TXDLY_OFF (0x0 << PRG_ETH0_TXDLY_SHIFT)
+#define PRG_ETH0_TXDLY_QUARTER (0x1 << PRG_ETH0_TXDLY_SHIFT)
+#define PRG_ETH0_TXDLY_HALF (0x2 << PRG_ETH0_TXDLY_SHIFT)
+#define PRG_ETH0_TXDLY_THREE_QUARTERS (0x3 << PRG_ETH0_TXDLY_SHIFT)
+
+/* divider for the result of m250_sel */
+#define PRG_ETH0_CLK_M250_DIV_SHIFT 7
+#define PRG_ETH0_CLK_M250_DIV_WIDTH 3
+
+/* divides the result of m25_sel by either 5 (bit unset) or 10 (bit set) */
+#define PRG_ETH0_CLK_M25_DIV_SHIFT 10
+#define PRG_ETH0_CLK_M25_DIV_WIDTH 1
+
+#define PRG_ETH0_INVERTED_RMII_CLK BIT(11)
+#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12)
+
+#define MUX_CLK_NUM_PARENTS 2
+
+struct meson8b_dwmac {
+ struct platform_device *pdev;
+
+ void __iomem *regs;
+
+ phy_interface_t phy_mode;
+
+ struct clk_mux m250_mux;
+ struct clk *m250_mux_clk;
+ struct clk *m250_mux_parent[MUX_CLK_NUM_PARENTS];
+
+ struct clk_divider m250_div;
+ struct clk *m250_div_clk;
+
+ struct clk_divider m25_div;
+ struct clk *m25_div_clk;
+};
+
+static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
+ u32 mask, u32 value)
+{
+ u32 data;
+
+ data = readl(dwmac->regs + reg);
+ data &= ~mask;
+ data |= (value & mask);
+
+ writel(data, dwmac->regs + reg);
+}
+
+static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
+{
+ struct clk_init_data init;
+ int i, ret;
+ struct device *dev = &dwmac->pdev->dev;
+ char clk_name[32];
+ const char *clk_div_parents[1];
+ const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
+ static struct clk_div_table clk_25m_div_table[] = {
+ { .val = 0, .div = 5 },
+ { .val = 1, .div = 10 },
+ { /* sentinel */ },
+ };
+
+ /* get the mux parents from DT */
+ for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
+ char name[16];
+
+ snprintf(name, sizeof(name), "clkin%d", i);
+ dwmac->m250_mux_parent[i] = devm_clk_get(dev, name);
+ if (IS_ERR(dwmac->m250_mux_parent[i])) {
+ ret = PTR_ERR(dwmac->m250_mux_parent[i]);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Missing clock %s\n", name);
+ return ret;
+ }
+
+ mux_parent_names[i] =
+ __clk_get_name(dwmac->m250_mux_parent[i]);
+ }
+
+ /* create the m250_mux */
+ snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
+ init.name = clk_name;
+ init.ops = &clk_mux_ops;
+ init.flags = 0;
+ init.parent_names = mux_parent_names;
+ init.num_parents = MUX_CLK_NUM_PARENTS;
+
+ dwmac->m250_mux.reg = dwmac->regs + PRG_ETH0;
+ dwmac->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
+ dwmac->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
+ dwmac->m250_mux.flags = 0;
+ dwmac->m250_mux.table = NULL;
+ dwmac->m250_mux.hw.init = &init;
+
+ dwmac->m250_mux_clk = devm_clk_register(dev, &dwmac->m250_mux.hw);
+ if (WARN_ON(IS_ERR(dwmac->m250_mux_clk)))
+ return PTR_ERR(dwmac->m250_mux_clk);
+
+ /* create the m250_div */
+ snprintf(clk_name, sizeof(clk_name), "%s#m250_div", dev_name(dev));
+ init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ clk_div_parents[0] = __clk_get_name(dwmac->m250_mux_clk);
+ init.parent_names = clk_div_parents;
+ init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+ dwmac->m250_div.reg = dwmac->regs + PRG_ETH0;
+ dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
+ dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
+ dwmac->m250_div.hw.init = &init;
+ dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+
+ dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
+ if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
+ return PTR_ERR(dwmac->m250_div_clk);
+
+ /* create the m25_div */
+ snprintf(clk_name, sizeof(clk_name), "%s#m25_div", dev_name(dev));
+ init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk);
+ init.parent_names = clk_div_parents;
+ init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+ dwmac->m25_div.reg = dwmac->regs + PRG_ETH0;
+ dwmac->m25_div.shift = PRG_ETH0_CLK_M25_DIV_SHIFT;
+ dwmac->m25_div.width = PRG_ETH0_CLK_M25_DIV_WIDTH;
+ dwmac->m25_div.table = clk_25m_div_table;
+ dwmac->m25_div.hw.init = &init;
+ dwmac->m25_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+
+ dwmac->m25_div_clk = devm_clk_register(dev, &dwmac->m25_div.hw);
+ if (WARN_ON(IS_ERR(dwmac->m25_div_clk)))
+ return PTR_ERR(dwmac->m25_div_clk);
+
+ return 0;
+}
+
+static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+{
+ int ret;
+ unsigned long clk_rate;
+
+ switch (dwmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* Generate a 25MHz clock for the PHY */
+ clk_rate = 25 * 1000 * 1000;
+
+ /* enable RGMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
+ PRG_ETH0_RGMII_MODE);
+
+ /* only relevant for RMII mode -> disable in RGMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_INVERTED_RMII_CLK, 0);
+
+ /* TX clock delay - all known boards use a 1/4 cycle delay */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
+ PRG_ETH0_TXDLY_QUARTER);
+ break;
+
+ case PHY_INTERFACE_MODE_RMII:
+ /* Use the rate of the mux clock for the internal RMII PHY */
+ clk_rate = clk_get_rate(dwmac->m250_mux_clk);
+
+ /* disable RGMII mode -> enables RMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
+ 0);
+
+ /* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_INVERTED_RMII_CLK,
+ PRG_ETH0_INVERTED_RMII_CLK);
+
+ /* TX clock delay cannot be configured in RMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
+ 0);
+
+ break;
+
+ default:
+ dev_err(&dwmac->pdev->dev, "unsupported phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(dwmac->m25_div_clk);
+ if (ret) {
+ dev_err(&dwmac->pdev->dev, "failed to enable the PHY clock\n");
+ return ret;
+ }
+
+ ret = clk_set_rate(dwmac->m25_div_clk, clk_rate);
+ if (ret) {
+ clk_disable_unprepare(dwmac->m25_div_clk);
+
+ dev_err(&dwmac->pdev->dev, "failed to set PHY clock\n");
+ return ret;
+ }
+
+ /* enable TX_CLK and PHY_REF_CLK generator */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK,
+ PRG_ETH0_TX_AND_PHY_REF_CLK);
+
+ return 0;
+}
+
+static int meson8b_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct resource *res;
+ struct meson8b_dwmac *dwmac;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dwmac->regs))
+ return PTR_ERR(dwmac->regs);
+
+ dwmac->pdev = pdev;
+ dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+ if (dwmac->phy_mode < 0) {
+ dev_err(&pdev->dev, "missing phy-mode property\n");
+ return -EINVAL;
+ }
+
+ ret = meson8b_init_clk(dwmac);
+ if (ret)
+ return ret;
+
+ ret = meson8b_init_prg_eth(dwmac);
+ if (ret)
+ return ret;
+
+ plat_dat->bsp_priv = dwmac;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
+
+static int meson8b_dwmac_remove(struct platform_device *pdev)
+{
+ struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+
+ clk_disable_unprepare(dwmac->m25_div_clk);
+
+ return stmmac_pltfr_remove(pdev);
+}
+
+static const struct of_device_id meson8b_dwmac_match[] = {
+ { .compatible = "amlogic,meson8b-dwmac" },
+ { .compatible = "amlogic,meson-gxbb-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, meson8b_dwmac_match);
+
+static struct platform_driver meson8b_dwmac_driver = {
+ .probe = meson8b_dwmac_probe,
+ .remove = meson8b_dwmac_remove,
+ .driver = {
+ .name = "meson8b-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = meson8b_dwmac_match,
+ },
+};
+module_platform_driver(meson8b_dwmac_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Amlogic Meson8b and GXBB DWMAC glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 92105916ef40..3740a4417fa0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
#include "stmmac_platform.h"
@@ -301,6 +302,118 @@ static const struct rk_gmac_ops rk3288_ops = {
.set_rmii_speed = rk3288_set_rmii_speed,
};
+#define RK3366_GRF_SOC_CON6 0x0418
+#define RK3366_GRF_SOC_CON7 0x041c
+
+/* RK3366_GRF_SOC_CON6 */
+#define RK3366_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_CLR_BIT(11))
+#define RK3366_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_BIT(11))
+#define RK3366_GMAC_FLOW_CTRL GRF_BIT(8)
+#define RK3366_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
+#define RK3366_GMAC_SPEED_10M GRF_CLR_BIT(7)
+#define RK3366_GMAC_SPEED_100M GRF_BIT(7)
+#define RK3366_GMAC_RMII_CLK_25M GRF_BIT(3)
+#define RK3366_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
+#define RK3366_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
+#define RK3366_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
+#define RK3366_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3366_GMAC_RMII_MODE GRF_BIT(6)
+#define RK3366_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
+
+/* RK3366_GRF_SOC_CON7 */
+#define RK3366_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3366_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+#define RK3366_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3366_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3366_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3366_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_PHY_INTF_SEL_RGMII |
+ RK3366_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
+ RK3366_GMAC_RXCLK_DLY_ENABLE |
+ RK3366_GMAC_TXCLK_DLY_ENABLE |
+ RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
+}
+
+static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_RMII_CLK_2_5M |
+ RK3366_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_RMII_CLK_25M |
+ RK3366_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3366_ops = {
+ .set_to_rgmii = rk3366_set_to_rgmii,
+ .set_to_rmii = rk3366_set_to_rmii,
+ .set_rgmii_speed = rk3366_set_rgmii_speed,
+ .set_rmii_speed = rk3366_set_rmii_speed,
+};
+
#define RK3368_GRF_SOC_CON15 0x043c
#define RK3368_GRF_SOC_CON16 0x0440
@@ -413,6 +526,118 @@ static const struct rk_gmac_ops rk3368_ops = {
.set_rmii_speed = rk3368_set_rmii_speed,
};
+#define RK3399_GRF_SOC_CON5 0xc214
+#define RK3399_GRF_SOC_CON6 0xc218
+
+/* RK3399_GRF_SOC_CON5 */
+#define RK3399_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_CLR_BIT(11))
+#define RK3399_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_BIT(11))
+#define RK3399_GMAC_FLOW_CTRL GRF_BIT(8)
+#define RK3399_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
+#define RK3399_GMAC_SPEED_10M GRF_CLR_BIT(7)
+#define RK3399_GMAC_SPEED_100M GRF_BIT(7)
+#define RK3399_GMAC_RMII_CLK_25M GRF_BIT(3)
+#define RK3399_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
+#define RK3399_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
+#define RK3399_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
+#define RK3399_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3399_GMAC_RMII_MODE GRF_BIT(6)
+#define RK3399_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
+
+/* RK3399_GRF_SOC_CON6 */
+#define RK3399_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3399_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+#define RK3399_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3399_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3399_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3399_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_PHY_INTF_SEL_RGMII |
+ RK3399_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
+ RK3399_GMAC_RXCLK_DLY_ENABLE |
+ RK3399_GMAC_TXCLK_DLY_ENABLE |
+ RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
+}
+
+static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_RMII_CLK_2_5M |
+ RK3399_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_RMII_CLK_25M |
+ RK3399_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3399_ops = {
+ .set_to_rgmii = rk3399_set_to_rgmii,
+ .set_to_rmii = rk3399_set_to_rmii,
+ .set_rgmii_speed = rk3399_set_rgmii_speed,
+ .set_rmii_speed = rk3399_set_rmii_speed,
+};
+
static int gmac_clk_init(struct rk_priv_data *bsp_priv)
{
struct device *dev = &bsp_priv->pdev->dev;
@@ -629,6 +854,16 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
"rockchip,grf");
bsp_priv->pdev = pdev;
+ gmac_clk_init(bsp_priv);
+
+ return bsp_priv;
+}
+
+static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
+{
+ int ret;
+ struct device *dev = &bsp_priv->pdev->dev;
+
/*rmii or rgmii*/
if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
dev_info(dev, "init for RGMII\n");
@@ -641,15 +876,6 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
dev_err(dev, "NO interface defined!\n");
}
- gmac_clk_init(bsp_priv);
-
- return bsp_priv;
-}
-
-static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
-{
- int ret;
-
ret = phy_power_on(bsp_priv, true);
if (ret)
return ret;
@@ -658,11 +884,19 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
if (ret)
return ret;
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
return 0;
}
static void rk_gmac_powerdown(struct rk_priv_data *gmac)
{
+ struct device *dev = &gmac->pdev->dev;
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
phy_power_on(gmac, false);
gmac_clk_enable(gmac, false);
}
@@ -760,7 +994,9 @@ static int rk_gmac_probe(struct platform_device *pdev)
static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+ { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
+ { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
{ }
};
MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
new file mode 100644
index 000000000000..e5a926b8bee7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -0,0 +1,194 @@
+/*
+ * dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
+ *
+ * Copyright (C) Alexandre Torgue 2015
+ * Author: Alexandre Torgue <alexandre.torgue@gmail.com>
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define MII_PHY_SEL_MASK BIT(23)
+
+struct stm32_dwmac {
+ struct clk *clk_tx;
+ struct clk *clk_rx;
+ u32 mode_reg; /* MAC glue-logic mode register */
+ struct regmap *regmap;
+ u32 speed;
+};
+
+static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ u32 reg = dwmac->mode_reg;
+ u32 val;
+ int ret;
+
+ val = (plat_dat->interface == PHY_INTERFACE_MODE_MII) ? 0 : 1;
+ ret = regmap_update_bits(dwmac->regmap, reg, MII_PHY_SEL_MASK, val);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dwmac->clk_tx);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ clk_disable_unprepare(dwmac->clk_tx);
+
+ return ret;
+}
+
+static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac)
+{
+ clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->clk_rx);
+}
+
+static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int err;
+
+ /* Get TX/RX clocks */
+ dwmac->clk_tx = devm_clk_get(dev, "mac-clk-tx");
+ if (IS_ERR(dwmac->clk_tx)) {
+ dev_err(dev, "No tx clock provided...\n");
+ return PTR_ERR(dwmac->clk_tx);
+ }
+ dwmac->clk_rx = devm_clk_get(dev, "mac-clk-rx");
+ if (IS_ERR(dwmac->clk_rx)) {
+ dev_err(dev, "No rx clock provided...\n");
+ return PTR_ERR(dwmac->clk_rx);
+ }
+
+ /* Get mode register */
+ dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ if (IS_ERR(dwmac->regmap))
+ return PTR_ERR(dwmac->regmap);
+
+ err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg);
+ if (err)
+ dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err);
+
+ return err;
+}
+
+static int stm32_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct stm32_dwmac *dwmac;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ ret = stm32_dwmac_parse_data(dwmac, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+ return ret;
+ }
+
+ plat_dat->bsp_priv = dwmac;
+
+ ret = stm32_dwmac_init(plat_dat);
+ if (ret)
+ return ret;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ stm32_dwmac_clk_disable(dwmac);
+
+ return ret;
+}
+
+static int stm32_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret = stmmac_dvr_remove(&pdev->dev);
+
+ stm32_dwmac_clk_disable(priv->plat->bsp_priv);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_dwmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = stmmac_suspend(dev);
+ stm32_dwmac_clk_disable(priv->plat->bsp_priv);
+
+ return ret;
+}
+
+static int stm32_dwmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = stm32_dwmac_init(priv->plat);
+ if (ret)
+ return ret;
+
+ ret = stmmac_resume(dev);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
+ stm32_dwmac_suspend, stm32_dwmac_resume);
+
+static const struct of_device_id stm32_dwmac_match[] = {
+ { .compatible = "st,stm32-dwmac"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, stm32_dwmac_match);
+
+static struct platform_driver stm32_dwmac_driver = {
+ .probe = stm32_dwmac_probe,
+ .remove = stm32_dwmac_remove,
+ .driver = {
+ .name = "stm32-dwmac",
+ .pm = &stm32_dwmac_pm_ops,
+ .of_match_table = stm32_dwmac_match,
+ },
+};
+module_platform_driver(stm32_dwmac_driver);
+
+MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@gmail.com>");
+MODULE_DESCRIPTION("STMicroelectronics MCU DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 885a5e64519d..7df4ff158f3d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -145,7 +145,7 @@ static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
numhashregs = 8;
break;
default:
- pr_debug("STMMAC: err in setting mulitcast filter\n");
+ pr_debug("STMMAC: err in setting multicast filter\n");
return;
break;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 756bb548e81a..0a0d6a86f397 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -265,6 +265,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
* once needed on other platforms.
*/
if (of_device_is_compatible(np, "st,spear600-gmac") ||
+ of_device_is_compatible(np, "snps,dwmac-3.50a") ||
of_device_is_compatible(np, "snps,dwmac-3.70a") ||
of_device_is_compatible(np, "snps,dwmac")) {
/* Note that the max-frame-size parameter as defined in the
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index ffeb8d9e2b2e..64e147f53a9c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -30,4 +30,12 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
int stmmac_pltfr_remove(struct platform_device *pdev);
extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
+static inline void *get_stmmac_bsp_priv(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ return priv->plat->bsp_priv;
+}
+
#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 170a18b61281..6e3b82972ce8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -187,7 +187,7 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
if (IS_ERR(priv->ptp_clock)) {
priv->ptp_clock = NULL;
pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
- } else
+ } else if (priv->ptp_clock)
pr_debug("Added PTP HW clock successfully on %s\n",
priv->dev->name);
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 4490ebaed127..0d0053128542 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -2743,7 +2743,7 @@ static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
lp->msg_enable = msglevel;
}
-static struct ethtool_ops dwceqos_ethtool_ops = {
+static const struct ethtool_ops dwceqos_ethtool_ops = {
.get_drvinfo = dwceqos_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_pauseparam = dwceqos_get_pauseparam,
@@ -2761,7 +2761,7 @@ static struct ethtool_ops dwceqos_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static struct net_device_ops netdev_ops = {
+static const struct net_device_ops netdev_ops = {
.ndo_open = dwceqos_open,
.ndo_stop = dwceqos_stop,
.ndo_start_xmit = dwceqos_start_xmit,
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index d300d536d06f..fa0cfda24fd9 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -546,7 +546,8 @@ fatal_error:
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int queue, len;
+ int queue;
+ unsigned int len;
struct cpmac_desc *desc;
struct cpmac_priv *priv = netdev_priv(dev);
@@ -556,7 +557,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_padto(skb, ETH_ZLEN)))
return NETDEV_TX_OK;
- len = max(skb->len, ETH_ZLEN);
+ len = max_t(unsigned int, skb->len, ETH_ZLEN);
queue = skb_get_queue_mapping(skb);
netif_stop_subqueue(dev, queue);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f85d605e4560..c6cff3d2ff05 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -124,7 +124,7 @@ do { \
#define RX_PRIORITY_MAPPING 0x76543210
#define TX_PRIORITY_MAPPING 0x33221100
-#define CPDMA_TX_PRIORITY_MAP 0x76543210
+#define CPDMA_TX_PRIORITY_MAP 0x01234567
#define CPSW_VLAN_AWARE BIT(1)
#define CPSW_ALE_VLAN_AWARE 1
@@ -140,9 +140,11 @@ do { \
#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
-#define cpsw_slave_index(priv) \
- ((priv->data.dual_emac) ? priv->emac_port : \
- priv->data.active_slave)
+#define cpsw_slave_index(cpsw, priv) \
+ ((cpsw->data.dual_emac) ? priv->emac_port : \
+ cpsw->data.active_slave)
+#define IRQ_NUM 2
+#define CPSW_MAX_QUEUES 8
static int debug_level;
module_param(debug_level, int, 0);
@@ -363,38 +365,41 @@ static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
__raw_writel(val, slave->regs + offset);
}
-struct cpsw_priv {
- struct platform_device *pdev;
- struct net_device *ndev;
- struct napi_struct napi_rx;
- struct napi_struct napi_tx;
+struct cpsw_common {
struct device *dev;
struct cpsw_platform_data data;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
struct cpsw_ss_regs __iomem *regs;
struct cpsw_wr_regs __iomem *wr_regs;
u8 __iomem *hw_stats;
struct cpsw_host_regs __iomem *host_port_regs;
- u32 msg_enable;
u32 version;
u32 coal_intvl;
u32 bus_freq_mhz;
int rx_packet_max;
- struct clk *clk;
- u8 mac_addr[ETH_ALEN];
struct cpsw_slave *slaves;
struct cpdma_ctlr *dma;
- struct cpdma_chan *txch, *rxch;
+ struct cpdma_chan *txch[CPSW_MAX_QUEUES];
+ struct cpdma_chan *rxch[CPSW_MAX_QUEUES];
struct cpsw_ale *ale;
- bool rx_pause;
- bool tx_pause;
bool quirk_irq;
bool rx_irq_disabled;
bool tx_irq_disabled;
- /* snapshot of IRQ numbers */
- u32 irqs_table[4];
- u32 num_irqs;
- struct cpts *cpts;
+ u32 irqs_table[IRQ_NUM];
+ struct cpts *cpts;
+ int rx_ch_num, tx_ch_num;
+};
+
+struct cpsw_priv {
+ struct net_device *ndev;
+ struct device *dev;
+ u32 msg_enable;
+ u8 mac_addr[ETH_ALEN];
+ bool rx_pause;
+ bool tx_pause;
u32 emac_port;
+ struct cpsw_common *cpsw;
};
struct cpsw_stats {
@@ -455,108 +460,92 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
{ "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
{ "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
{ "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
- { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
- { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
- { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
- { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
- { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
- { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
- { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
- { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
- { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
- { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
- { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
- { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
- { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
- { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
- { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
- { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
- { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
- { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
- { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
- { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
- { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
- { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
- { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
- { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
- { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
- { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
};
-#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats)
+static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
+ { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
+ { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
+ { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
+ { "misqueued", CPDMA_RX_STAT(misqueued) },
+ { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
+ { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
+ { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
+ { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
+ { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
+ { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
+ { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
+ { "requeue", CPDMA_RX_STAT(requeue) },
+ { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
+};
-#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
+#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
+#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
+
+#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
+#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
#define for_each_slave(priv, func, arg...) \
do { \
struct cpsw_slave *slave; \
+ struct cpsw_common *cpsw = (priv)->cpsw; \
int n; \
- if (priv->data.dual_emac) \
- (func)((priv)->slaves + priv->emac_port, ##arg);\
+ if (cpsw->data.dual_emac) \
+ (func)((cpsw)->slaves + priv->emac_port, ##arg);\
else \
- for (n = (priv)->data.slaves, \
- slave = (priv)->slaves; \
+ for (n = cpsw->data.slaves, \
+ slave = cpsw->slaves; \
n; n--) \
(func)(slave++, ##arg); \
} while (0)
-#define cpsw_get_slave_ndev(priv, __slave_no__) \
- ((__slave_no__ < priv->data.slaves) ? \
- priv->slaves[__slave_no__].ndev : NULL)
-#define cpsw_get_slave_priv(priv, __slave_no__) \
- (((__slave_no__ < priv->data.slaves) && \
- (priv->slaves[__slave_no__].ndev)) ? \
- netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
-
-#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
+
+#define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb) \
do { \
- if (!priv->data.dual_emac) \
+ if (!cpsw->data.dual_emac) \
break; \
if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
- ndev = cpsw_get_slave_ndev(priv, 0); \
- priv = netdev_priv(ndev); \
+ ndev = cpsw->slaves[0].ndev; \
skb->dev = ndev; \
} else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
- ndev = cpsw_get_slave_ndev(priv, 1); \
- priv = netdev_priv(ndev); \
+ ndev = cpsw->slaves[1].ndev; \
skb->dev = ndev; \
} \
} while (0)
-#define cpsw_add_mcast(priv, addr) \
+#define cpsw_add_mcast(cpsw, priv, addr) \
do { \
- if (priv->data.dual_emac) { \
- struct cpsw_slave *slave = priv->slaves + \
+ if (cpsw->data.dual_emac) { \
+ struct cpsw_slave *slave = cpsw->slaves + \
priv->emac_port; \
- int slave_port = cpsw_get_slave_port(priv, \
+ int slave_port = cpsw_get_slave_port( \
slave->slave_num); \
- cpsw_ale_add_mcast(priv->ale, addr, \
+ cpsw_ale_add_mcast(cpsw->ale, addr, \
1 << slave_port | ALE_PORT_HOST, \
ALE_VLAN, slave->port_vlan, 0); \
} else { \
- cpsw_ale_add_mcast(priv->ale, addr, \
+ cpsw_ale_add_mcast(cpsw->ale, addr, \
ALE_ALL_PORTS, \
0, 0, 0); \
} \
} while (0)
-static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+static inline int cpsw_get_slave_port(u32 slave_num)
{
return slave_num + 1;
}
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_ale *ale = priv->ale;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_ale *ale = cpsw->ale;
int i;
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
bool flag = false;
/* Enabling promiscuous mode for one interface will be
* common for both the interface as the interface shares
* the same hardware resource.
*/
- for (i = 0; i < priv->data.slaves; i++)
- if (priv->slaves[i].ndev->flags & IFF_PROMISC)
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
flag = true;
if (!enable && flag) {
@@ -579,7 +568,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
unsigned long timeout = jiffies + HZ;
/* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
- for (i = 0; i <= priv->data.slaves; i++) {
+ for (i = 0; i <= cpsw->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 1);
cpsw_ale_control_set(ale, i,
@@ -606,7 +595,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
/* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
- for (i = 0; i <= priv->data.slaves; i++) {
+ for (i = 0; i <= cpsw->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 0);
cpsw_ale_control_set(ale, i,
@@ -620,17 +609,18 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
int vid;
- if (priv->data.dual_emac)
- vid = priv->slaves[priv->emac_port].port_vlan;
+ if (cpsw->data.dual_emac)
+ vid = cpsw->slaves[priv->emac_port].port_vlan;
else
- vid = priv->data.default_vlan;
+ vid = cpsw->data.default_vlan;
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
cpsw_set_promiscious(ndev, true);
- cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI);
+ cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
return;
} else {
/* Disable promiscuous mode */
@@ -638,51 +628,54 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
}
/* Restore allmulti on vlans if necessary */
- cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
+ cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS, vid);
+ cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid);
if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
/* program multicast address list into ALE register */
netdev_for_each_mc_addr(ha, ndev) {
- cpsw_add_mcast(priv, (u8 *)ha->addr);
+ cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr);
}
}
}
-static void cpsw_intr_enable(struct cpsw_priv *priv)
+static void cpsw_intr_enable(struct cpsw_common *cpsw)
{
- __raw_writel(0xFF, &priv->wr_regs->tx_en);
- __raw_writel(0xFF, &priv->wr_regs->rx_en);
+ __raw_writel(0xFF, &cpsw->wr_regs->tx_en);
+ __raw_writel(0xFF, &cpsw->wr_regs->rx_en);
- cpdma_ctlr_int_ctrl(priv->dma, true);
+ cpdma_ctlr_int_ctrl(cpsw->dma, true);
return;
}
-static void cpsw_intr_disable(struct cpsw_priv *priv)
+static void cpsw_intr_disable(struct cpsw_common *cpsw)
{
- __raw_writel(0, &priv->wr_regs->tx_en);
- __raw_writel(0, &priv->wr_regs->rx_en);
+ __raw_writel(0, &cpsw->wr_regs->tx_en);
+ __raw_writel(0, &cpsw->wr_regs->rx_en);
- cpdma_ctlr_int_ctrl(priv->dma, false);
+ cpdma_ctlr_int_ctrl(cpsw->dma, false);
return;
}
static void cpsw_tx_handler(void *token, int len, int status)
{
+ struct netdev_queue *txq;
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
/* Check whether the queue is stopped due to stalled tx dma, if the
* queue is stopped then start the queue as we have free desc for tx
*/
- if (unlikely(netif_queue_stopped(ndev)))
- netif_wake_queue(ndev);
- cpts_tx_timestamp(priv->cpts, skb);
+ txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
+ if (unlikely(netif_tx_queue_stopped(txq)))
+ netif_tx_wake_queue(txq);
+
+ cpts_tx_timestamp(cpsw->cpts, skb);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
@@ -690,22 +683,23 @@ static void cpsw_tx_handler(void *token, int len, int status)
static void cpsw_rx_handler(void *token, int len, int status)
{
+ struct cpdma_chan *ch;
struct sk_buff *skb = token;
struct sk_buff *new_skb;
struct net_device *ndev = skb->dev;
- struct cpsw_priv *priv = netdev_priv(ndev);
int ret = 0;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
+ cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb);
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
bool ndev_status = false;
- struct cpsw_slave *slave = priv->slaves;
+ struct cpsw_slave *slave = cpsw->slaves;
int n;
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
/* In dual emac mode check for all interfaces */
- for (n = priv->data.slaves; n; n--, slave++)
+ for (n = cpsw->data.slaves; n; n--, slave++)
if (netif_running(slave->ndev))
ndev_status = true;
}
@@ -726,10 +720,11 @@ static void cpsw_rx_handler(void *token, int len, int status)
return;
}
- new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
+ new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
if (new_skb) {
+ skb_copy_queue_mapping(new_skb, skb);
skb_put(skb, len);
- cpts_rx_timestamp(priv->cpts, skb);
+ cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -741,83 +736,117 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
requeue:
- ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
- skb_tailroom(new_skb), 0);
+ if (netif_dormant(ndev)) {
+ dev_kfree_skb_any(new_skb);
+ return;
+ }
+
+ ch = cpsw->rxch[skb_get_queue_mapping(new_skb)];
+ ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
+ skb_tailroom(new_skb), 0);
if (WARN_ON(ret < 0))
dev_kfree_skb_any(new_skb);
}
static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
{
- struct cpsw_priv *priv = dev_id;
+ struct cpsw_common *cpsw = dev_id;
- writel(0, &priv->wr_regs->tx_en);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ writel(0, &cpsw->wr_regs->tx_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
- if (priv->quirk_irq) {
- disable_irq_nosync(priv->irqs_table[1]);
- priv->tx_irq_disabled = true;
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[1]);
+ cpsw->tx_irq_disabled = true;
}
- napi_schedule(&priv->napi_tx);
+ napi_schedule(&cpsw->napi_tx);
return IRQ_HANDLED;
}
static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
{
- struct cpsw_priv *priv = dev_id;
+ struct cpsw_common *cpsw = dev_id;
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- writel(0, &priv->wr_regs->rx_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
+ writel(0, &cpsw->wr_regs->rx_en);
- if (priv->quirk_irq) {
- disable_irq_nosync(priv->irqs_table[0]);
- priv->rx_irq_disabled = true;
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[0]);
+ cpsw->rx_irq_disabled = true;
}
- napi_schedule(&priv->napi_rx);
+ napi_schedule(&cpsw->napi_rx);
return IRQ_HANDLED;
}
static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
{
- struct cpsw_priv *priv = napi_to_priv(napi_tx);
- int num_tx;
+ u32 ch_map;
+ int num_tx, ch;
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
+ for (ch = 0, num_tx = 0; num_tx < budget; ch_map >>= 1, ch++) {
+ if (!ch_map) {
+ ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
+ if (!ch_map)
+ break;
+
+ ch = 0;
+ }
+
+ if (!(ch_map & 0x01))
+ continue;
+
+ num_tx += cpdma_chan_process(cpsw->txch[ch], budget - num_tx);
+ }
- num_tx = cpdma_chan_process(priv->txch, budget);
if (num_tx < budget) {
napi_complete(napi_tx);
- writel(0xff, &priv->wr_regs->tx_en);
- if (priv->quirk_irq && priv->tx_irq_disabled) {
- priv->tx_irq_disabled = false;
- enable_irq(priv->irqs_table[1]);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ if (cpsw->quirk_irq && cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
}
}
- if (num_tx)
- cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx);
-
return num_tx;
}
static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
{
- struct cpsw_priv *priv = napi_to_priv(napi_rx);
- int num_rx;
+ u32 ch_map;
+ int num_rx, ch;
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
+ for (ch = 0, num_rx = 0; num_rx < budget; ch_map >>= 1, ch++) {
+ if (!ch_map) {
+ ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
+ if (!ch_map)
+ break;
+
+ ch = 0;
+ }
+
+ if (!(ch_map & 0x01))
+ continue;
+
+ num_rx += cpdma_chan_process(cpsw->rxch[ch], budget - num_rx);
+ }
- num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
napi_complete(napi_rx);
- writel(0xff, &priv->wr_regs->rx_en);
- if (priv->quirk_irq && priv->rx_irq_disabled) {
- priv->rx_irq_disabled = false;
- enable_irq(priv->irqs_table[0]);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
}
}
- if (num_rx)
- cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
-
return num_rx;
}
@@ -850,17 +879,18 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
struct phy_device *phy = slave->phy;
u32 mac_control = 0;
u32 slave_port;
+ struct cpsw_common *cpsw = priv->cpsw;
if (!phy)
return;
- slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+ slave_port = cpsw_get_slave_port(slave->slave_num);
if (phy->link) {
- mac_control = priv->data.mac_control;
+ mac_control = cpsw->data.mac_control;
/* enable forwarding */
- cpsw_ale_control_set(priv->ale, slave_port,
+ cpsw_ale_control_set(cpsw->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
if (phy->speed == 1000)
@@ -884,7 +914,7 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
} else {
mac_control = 0;
/* disable forwarding */
- cpsw_ale_control_set(priv->ale, slave_port,
+ cpsw_ale_control_set(cpsw->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
}
@@ -906,19 +936,19 @@ static void cpsw_adjust_link(struct net_device *ndev)
if (link) {
netif_carrier_on(ndev);
if (netif_running(ndev))
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
} else {
netif_carrier_off(ndev);
- netif_stop_queue(ndev);
+ netif_tx_stop_all_queues(ndev);
}
}
static int cpsw_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *coal)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- coal->rx_coalesce_usecs = priv->coal_intvl;
+ coal->rx_coalesce_usecs = cpsw->coal_intvl;
return 0;
}
@@ -931,11 +961,12 @@ static int cpsw_set_coalesce(struct net_device *ndev,
u32 prescale = 0;
u32 addnl_dvdr = 1;
u32 coal_intvl = 0;
+ struct cpsw_common *cpsw = priv->cpsw;
coal_intvl = coal->rx_coalesce_usecs;
- int_ctrl = readl(&priv->wr_regs->int_control);
- prescale = priv->bus_freq_mhz * 4;
+ int_ctrl = readl(&cpsw->wr_regs->int_control);
+ prescale = cpsw->bus_freq_mhz * 4;
if (!coal->rx_coalesce_usecs) {
int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
@@ -963,53 +994,69 @@ static int cpsw_set_coalesce(struct net_device *ndev,
}
num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
- writel(num_interrupts, &priv->wr_regs->rx_imax);
- writel(num_interrupts, &priv->wr_regs->tx_imax);
+ writel(num_interrupts, &cpsw->wr_regs->rx_imax);
+ writel(num_interrupts, &cpsw->wr_regs->tx_imax);
int_ctrl |= CPSW_INTPACEEN;
int_ctrl &= (~CPSW_INTPRESCALE_MASK);
int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
update_return:
- writel(int_ctrl, &priv->wr_regs->int_control);
+ writel(int_ctrl, &cpsw->wr_regs->int_control);
cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
- if (priv->data.dual_emac) {
- int i;
-
- for (i = 0; i < priv->data.slaves; i++) {
- priv = netdev_priv(priv->slaves[i].ndev);
- priv->coal_intvl = coal_intvl;
- }
- } else {
- priv->coal_intvl = coal_intvl;
- }
+ cpsw->coal_intvl = coal_intvl;
return 0;
}
static int cpsw_get_sset_count(struct net_device *ndev, int sset)
{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
switch (sset) {
case ETH_SS_STATS:
- return CPSW_STATS_LEN;
+ return (CPSW_STATS_COMMON_LEN +
+ (cpsw->rx_ch_num + cpsw->tx_ch_num) *
+ CPSW_STATS_CH_LEN);
default:
return -EOPNOTSUPP;
}
}
+static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
+{
+ int ch_stats_len;
+ int line;
+ int i;
+
+ ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
+ for (i = 0; i < ch_stats_len; i++) {
+ line = i % CPSW_STATS_CH_LEN;
+ snprintf(*p, ETH_GSTRING_LEN,
+ "%s DMA chan %d: %s", rx_dir ? "Rx" : "Tx",
+ i / CPSW_STATS_CH_LEN,
+ cpsw_gstrings_ch_stats[line].stat_string);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < CPSW_STATS_LEN; i++) {
+ for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
memcpy(p, cpsw_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
+ cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
break;
}
}
@@ -1017,86 +1064,78 @@ static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
static void cpsw_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpdma_chan_stats rx_stats;
- struct cpdma_chan_stats tx_stats;
- u32 val;
u8 *p;
- int i;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpdma_chan_stats ch_stats;
+ int i, l, ch;
/* Collect Davinci CPDMA stats for Rx and Tx Channel */
- cpdma_chan_get_stats(priv->rxch, &rx_stats);
- cpdma_chan_get_stats(priv->txch, &tx_stats);
-
- for (i = 0; i < CPSW_STATS_LEN; i++) {
- switch (cpsw_gstrings_stats[i].type) {
- case CPSW_STATS:
- val = readl(priv->hw_stats +
- cpsw_gstrings_stats[i].stat_offset);
- data[i] = val;
- break;
-
- case CPDMA_RX_STATS:
- p = (u8 *)&rx_stats +
- cpsw_gstrings_stats[i].stat_offset;
- data[i] = *(u32 *)p;
- break;
+ for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
+ data[l] = readl(cpsw->hw_stats +
+ cpsw_gstrings_stats[l].stat_offset);
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ cpdma_chan_get_stats(cpsw->rxch[ch], &ch_stats);
+ for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+ p = (u8 *)&ch_stats +
+ cpsw_gstrings_ch_stats[i].stat_offset;
+ data[l] = *(u32 *)p;
+ }
+ }
- case CPDMA_TX_STATS:
- p = (u8 *)&tx_stats +
- cpsw_gstrings_stats[i].stat_offset;
- data[i] = *(u32 *)p;
- break;
+ for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+ cpdma_chan_get_stats(cpsw->txch[ch], &ch_stats);
+ for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+ p = (u8 *)&ch_stats +
+ cpsw_gstrings_ch_stats[i].stat_offset;
+ data[l] = *(u32 *)p;
}
}
}
-static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
+static int cpsw_common_res_usage_state(struct cpsw_common *cpsw)
{
u32 i;
u32 usage_count = 0;
- if (!priv->data.dual_emac)
+ if (!cpsw->data.dual_emac)
return 0;
- for (i = 0; i < priv->data.slaves; i++)
- if (priv->slaves[i].open_stat)
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].open_stat)
usage_count++;
return usage_count;
}
-static inline int cpsw_tx_packet_submit(struct net_device *ndev,
- struct cpsw_priv *priv, struct sk_buff *skb)
+static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
+ struct sk_buff *skb,
+ struct cpdma_chan *txch)
{
- if (!priv->data.dual_emac)
- return cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, 0);
+ struct cpsw_common *cpsw = priv->cpsw;
- if (ndev == cpsw_get_slave_ndev(priv, 0))
- return cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, 1);
- else
- return cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, 2);
+ return cpdma_chan_submit(txch, skb, skb->data, skb->len,
+ priv->emac_port + cpsw->data.dual_emac);
}
static inline void cpsw_add_dual_emac_def_ale_entries(
struct cpsw_priv *priv, struct cpsw_slave *slave,
u32 slave_port)
{
+ struct cpsw_common *cpsw = priv->cpsw;
u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
- if (priv->version == CPSW_VERSION_1)
+ if (cpsw->version == CPSW_VERSION_1)
slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
else
slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
- cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask,
+ cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
port_mask, port_mask, 0);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan, 0);
- cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan);
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN |
+ ALE_SECURE, slave->port_vlan);
}
static void soft_reset_slave(struct cpsw_slave *slave)
@@ -1110,13 +1149,14 @@ static void soft_reset_slave(struct cpsw_slave *slave)
static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
u32 slave_port;
+ struct cpsw_common *cpsw = priv->cpsw;
soft_reset_slave(slave);
/* setup priority mapping */
__raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
- switch (priv->version) {
+ switch (cpsw->version) {
case CPSW_VERSION_1:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
break;
@@ -1128,17 +1168,17 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
/* setup max packet size, and mac address */
- __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
+ __raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
cpsw_set_slave_mac(slave, priv);
slave->mac_control = 0; /* no link yet */
- slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+ slave_port = cpsw_get_slave_port(slave->slave_num);
- if (priv->data.dual_emac)
+ if (cpsw->data.dual_emac)
cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
else
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
if (slave->data->phy_node) {
@@ -1168,81 +1208,121 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
phy_start(slave->phy);
/* Configure GMII_SEL register */
- cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
+ cpsw_phy_sel(cpsw->dev, slave->phy->interface, slave->slave_num);
}
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
{
- const int vlan = priv->data.default_vlan;
+ struct cpsw_common *cpsw = priv->cpsw;
+ const int vlan = cpsw->data.default_vlan;
u32 reg;
int i;
int unreg_mcast_mask;
- reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
CPSW2_PORT_VLAN;
- writel(vlan, &priv->host_port_regs->port_vlan);
+ writel(vlan, &cpsw->host_port_regs->port_vlan);
- for (i = 0; i < priv->data.slaves; i++)
- slave_write(priv->slaves + i, vlan, reg);
+ for (i = 0; i < cpsw->data.slaves; i++)
+ slave_write(cpsw->slaves + i, vlan, reg);
if (priv->ndev->flags & IFF_ALLMULTI)
unreg_mcast_mask = ALE_ALL_PORTS;
else
unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
- cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS,
+ cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
ALE_ALL_PORTS, ALE_ALL_PORTS,
unreg_mcast_mask);
}
static void cpsw_init_host_port(struct cpsw_priv *priv)
{
- u32 control_reg;
u32 fifo_mode;
+ u32 control_reg;
+ struct cpsw_common *cpsw = priv->cpsw;
/* soft reset the controller and initialize ale */
- soft_reset("cpsw", &priv->regs->soft_reset);
- cpsw_ale_start(priv->ale);
+ soft_reset("cpsw", &cpsw->regs->soft_reset);
+ cpsw_ale_start(cpsw->ale);
/* switch to vlan unaware mode */
- cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
- control_reg = readl(&priv->regs->control);
+ control_reg = readl(&cpsw->regs->control);
control_reg |= CPSW_VLAN_AWARE;
- writel(control_reg, &priv->regs->control);
- fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
+ writel(control_reg, &cpsw->regs->control);
+ fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
CPSW_FIFO_NORMAL_MODE;
- writel(fifo_mode, &priv->host_port_regs->tx_in_ctl);
+ writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
/* setup host port priority mapping */
__raw_writel(CPDMA_TX_PRIORITY_MAP,
- &priv->host_port_regs->cpdma_tx_pri_map);
- __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
+ &cpsw->host_port_regs->cpdma_tx_pri_map);
+ __raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
- cpsw_ale_control_set(priv->ale, HOST_PORT_NUM,
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
- if (!priv->data.dual_emac) {
- cpsw_ale_add_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
+ if (!cpsw->data.dual_emac) {
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
0, 0);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
}
}
-static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
+static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct sk_buff *skb;
+ int ch_buf_num;
+ int ch, i, ret;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch[ch]);
+ for (i = 0; i < ch_buf_num; i++) {
+ skb = __netdev_alloc_skb_ip_align(priv->ndev,
+ cpsw->rx_packet_max,
+ GFP_KERNEL);
+ if (!skb) {
+ cpsw_err(priv, ifup, "cannot allocate skb\n");
+ return -ENOMEM;
+ }
+
+ skb_set_queue_mapping(skb, ch);
+ ret = cpdma_chan_submit(cpsw->rxch[ch], skb, skb->data,
+ skb_tailroom(skb), 0);
+ if (ret < 0) {
+ cpsw_err(priv, ifup,
+ "cannot submit skb to channel %d rx, error %d\n",
+ ch, ret);
+ kfree_skb(skb);
+ return ret;
+ }
+ kmemleak_not_leak(skb);
+ }
+
+ cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
+ ch, ch_buf_num);
+ }
+
+ return 0;
+}
+
+static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
{
u32 slave_port;
- slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+ slave_port = cpsw_get_slave_port(slave->slave_num);
if (!slave->phy)
return;
phy_stop(slave->phy);
phy_disconnect(slave->phy);
slave->phy = NULL;
- cpsw_ale_control_set(priv->ale, slave_port,
+ cpsw_ale_control_set(cpsw->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
soft_reset_slave(slave);
}
@@ -1250,115 +1330,111 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
static int cpsw_ndo_open(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int i, ret;
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
u32 reg;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(cpsw->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
+ pm_runtime_put_noidle(cpsw->dev);
return ret;
}
- if (!cpsw_common_res_usage_state(priv))
- cpsw_intr_disable(priv);
+ if (!cpsw_common_res_usage_state(cpsw))
+ cpsw_intr_disable(cpsw);
netif_carrier_off(ndev);
- reg = priv->version;
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto err_cleanup;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto err_cleanup;
+ }
+
+ reg = cpsw->version;
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
CPSW_RTL_VERSION(reg));
/* initialize host and slave ports */
- if (!cpsw_common_res_usage_state(priv))
+ if (!cpsw_common_res_usage_state(cpsw))
cpsw_init_host_port(priv);
for_each_slave(priv, cpsw_slave_open, priv);
/* Add default VLAN */
- if (!priv->data.dual_emac)
+ if (!cpsw->data.dual_emac)
cpsw_add_default_vlan(priv);
else
- cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
+ cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
- if (!cpsw_common_res_usage_state(priv)) {
- struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
- int buf_num;
-
+ if (!cpsw_common_res_usage_state(cpsw)) {
/* setup tx dma to fixed prio and zero offset */
- cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
- cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
+ cpdma_control_set(cpsw->dma, CPDMA_TX_PRIO_FIXED, 1);
+ cpdma_control_set(cpsw->dma, CPDMA_RX_BUFFER_OFFSET, 0);
/* disable priority elevation */
- __raw_writel(0, &priv->regs->ptype);
+ __raw_writel(0, &cpsw->regs->ptype);
/* enable statistics collection only on all ports */
- __raw_writel(0x7, &priv->regs->stat_port_en);
+ __raw_writel(0x7, &cpsw->regs->stat_port_en);
/* Enable internal fifo flow control */
- writel(0x7, &priv->regs->flow_control);
+ writel(0x7, &cpsw->regs->flow_control);
- napi_enable(&priv_sl0->napi_rx);
- napi_enable(&priv_sl0->napi_tx);
+ napi_enable(&cpsw->napi_rx);
+ napi_enable(&cpsw->napi_tx);
- if (priv_sl0->tx_irq_disabled) {
- priv_sl0->tx_irq_disabled = false;
- enable_irq(priv->irqs_table[1]);
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
}
- if (priv_sl0->rx_irq_disabled) {
- priv_sl0->rx_irq_disabled = false;
- enable_irq(priv->irqs_table[0]);
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
}
- buf_num = cpdma_chan_get_rx_buf_num(priv->dma);
- for (i = 0; i < buf_num; i++) {
- struct sk_buff *skb;
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret < 0)
+ goto err_cleanup;
- ret = -ENOMEM;
- skb = __netdev_alloc_skb_ip_align(priv->ndev,
- priv->rx_packet_max, GFP_KERNEL);
- if (!skb)
- goto err_cleanup;
- ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), 0);
- if (ret < 0) {
- kfree_skb(skb);
- goto err_cleanup;
- }
- kmemleak_not_leak(skb);
- }
- /* continue even if we didn't manage to submit all
- * receive descs
- */
- cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
-
- if (cpts_register(&priv->pdev->dev, priv->cpts,
- priv->data.cpts_clock_mult,
- priv->data.cpts_clock_shift))
+ if (cpts_register(cpsw->dev, cpsw->cpts,
+ cpsw->data.cpts_clock_mult,
+ cpsw->data.cpts_clock_shift))
dev_err(priv->dev, "error registering cpts device\n");
}
/* Enable Interrupt pacing if configured */
- if (priv->coal_intvl != 0) {
+ if (cpsw->coal_intvl != 0) {
struct ethtool_coalesce coal;
- coal.rx_coalesce_usecs = priv->coal_intvl;
+ coal.rx_coalesce_usecs = cpsw->coal_intvl;
cpsw_set_coalesce(ndev, &coal);
}
- cpdma_ctlr_start(priv->dma);
- cpsw_intr_enable(priv);
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+
+ if (cpsw->data.dual_emac)
+ cpsw->slaves[priv->emac_port].open_stat = true;
+
+ netif_tx_start_all_queues(ndev);
- if (priv->data.dual_emac)
- priv->slaves[priv->emac_port].open_stat = true;
return 0;
err_cleanup:
- cpdma_ctlr_stop(priv->dma);
- for_each_slave(priv, cpsw_slave_stop, priv);
- pm_runtime_put_sync(&priv->pdev->dev);
+ cpdma_ctlr_stop(cpsw->dma);
+ for_each_slave(priv, cpsw_slave_stop, cpsw);
+ pm_runtime_put_sync(cpsw->dev);
netif_carrier_off(priv->ndev);
return ret;
}
@@ -1366,25 +1442,24 @@ err_cleanup:
static int cpsw_ndo_stop(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
- netif_stop_queue(priv->ndev);
+ netif_tx_stop_all_queues(priv->ndev);
netif_carrier_off(priv->ndev);
- if (cpsw_common_res_usage_state(priv) <= 1) {
- struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
-
- napi_disable(&priv_sl0->napi_rx);
- napi_disable(&priv_sl0->napi_tx);
- cpts_unregister(priv->cpts);
- cpsw_intr_disable(priv);
- cpdma_ctlr_stop(priv->dma);
- cpsw_ale_stop(priv->ale);
- }
- for_each_slave(priv, cpsw_slave_stop, priv);
- pm_runtime_put_sync(&priv->pdev->dev);
- if (priv->data.dual_emac)
- priv->slaves[priv->emac_port].open_stat = false;
+ if (cpsw_common_res_usage_state(cpsw) <= 1) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
+ cpts_unregister(cpsw->cpts);
+ cpsw_intr_disable(cpsw);
+ cpdma_ctlr_stop(cpsw->dma);
+ cpsw_ale_stop(cpsw->ale);
+ }
+ for_each_slave(priv, cpsw_slave_stop, cpsw);
+ pm_runtime_put_sync(cpsw->dev);
+ if (cpsw->data.dual_emac)
+ cpsw->slaves[priv->emac_port].open_stat = false;
return 0;
}
@@ -1392,7 +1467,10 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int ret;
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct netdev_queue *txq;
+ struct cpdma_chan *txch;
+ int ret, q_idx;
netif_trans_update(ndev);
@@ -1403,12 +1481,17 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- priv->cpts->tx_enable)
+ cpsw->cpts->tx_enable)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_tx_timestamp(skb);
- ret = cpsw_tx_packet_submit(ndev, priv, skb);
+ q_idx = skb_get_queue_mapping(skb);
+ if (q_idx >= cpsw->tx_ch_num)
+ q_idx = q_idx % cpsw->tx_ch_num;
+
+ txch = cpsw->txch[q_idx];
+ ret = cpsw_tx_packet_submit(priv, skb, txch);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
goto fail;
@@ -1417,24 +1500,27 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
/* If there is no more tx desc left free then we need to
* tell the kernel to stop sending us tx frames.
*/
- if (unlikely(!cpdma_check_free_tx_desc(priv->txch)))
- netif_stop_queue(ndev);
+ if (unlikely(!cpdma_check_free_tx_desc(txch))) {
+ txq = netdev_get_tx_queue(ndev, q_idx);
+ netif_tx_stop_queue(txq);
+ }
return NETDEV_TX_OK;
fail:
ndev->stats.tx_dropped++;
- netif_stop_queue(ndev);
+ txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
+ netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
}
#ifdef CONFIG_TI_CPTS
-static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
{
- struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave];
+ struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
u32 ts_en, seq_id;
- if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
+ if (!cpsw->cpts->tx_enable && !cpsw->cpts->rx_enable) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -1442,10 +1528,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (priv->cpts->tx_enable)
+ if (cpsw->cpts->tx_enable)
ts_en |= CPSW_V1_TS_TX_EN;
- if (priv->cpts->rx_enable)
+ if (cpsw->cpts->rx_enable)
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -1455,32 +1541,33 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
{
struct cpsw_slave *slave;
+ struct cpsw_common *cpsw = priv->cpsw;
u32 ctrl, mtype;
- if (priv->data.dual_emac)
- slave = &priv->slaves[priv->emac_port];
+ if (cpsw->data.dual_emac)
+ slave = &cpsw->slaves[priv->emac_port];
else
- slave = &priv->slaves[priv->data.active_slave];
+ slave = &cpsw->slaves[cpsw->data.active_slave];
ctrl = slave_read(slave, CPSW2_CONTROL);
- switch (priv->version) {
+ switch (cpsw->version) {
case CPSW_VERSION_2:
ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (priv->cpts->tx_enable)
+ if (cpsw->cpts->tx_enable)
ctrl |= CTRL_V2_TX_TS_BITS;
- if (priv->cpts->rx_enable)
+ if (cpsw->cpts->rx_enable)
ctrl |= CTRL_V2_RX_TS_BITS;
break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
- if (priv->cpts->tx_enable)
+ if (cpsw->cpts->tx_enable)
ctrl |= CTRL_V3_TX_TS_BITS;
- if (priv->cpts->rx_enable)
+ if (cpsw->cpts->rx_enable)
ctrl |= CTRL_V3_RX_TS_BITS;
break;
}
@@ -1489,18 +1576,19 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
slave_write(slave, ctrl, CPSW2_CONTROL);
- __raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
+ __raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype);
}
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_priv *priv = netdev_priv(dev);
- struct cpts *cpts = priv->cpts;
struct hwtstamp_config cfg;
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpts *cpts = cpsw->cpts;
- if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2 &&
- priv->version != CPSW_VERSION_3)
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1540,9 +1628,9 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON;
- switch (priv->version) {
+ switch (cpsw->version) {
case CPSW_VERSION_1:
- cpsw_hwtstamp_v1(priv);
+ cpsw_hwtstamp_v1(cpsw);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
@@ -1557,13 +1645,13 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct cpts *cpts = priv->cpts;
+ struct cpsw_common *cpsw = ndev_to_cpsw(dev);
+ struct cpts *cpts = cpsw->cpts;
struct hwtstamp_config cfg;
- if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2 &&
- priv->version != CPSW_VERSION_3)
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
cfg.flags = 0;
@@ -1579,7 +1667,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct cpsw_priv *priv = netdev_priv(dev);
- int slave_no = cpsw_slave_index(priv);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
if (!netif_running(dev))
return -EINVAL;
@@ -1593,27 +1682,33 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
#endif
}
- if (!priv->slaves[slave_no].phy)
+ if (!cpsw->slaves[slave_no].phy)
return -EOPNOTSUPP;
- return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd);
+ return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
}
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ch;
cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
ndev->stats.tx_errors++;
- cpsw_intr_disable(priv);
- cpdma_chan_stop(priv->txch);
- cpdma_chan_start(priv->txch);
- cpsw_intr_enable(priv);
+ cpsw_intr_disable(cpsw);
+ for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+ cpdma_chan_stop(cpsw->txch[ch]);
+ cpdma_chan_start(cpsw->txch[ch]);
+ }
+
+ cpsw_intr_enable(cpsw);
}
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct sockaddr *addr = (struct sockaddr *)p;
+ struct cpsw_common *cpsw = priv->cpsw;
int flags = 0;
u16 vid = 0;
int ret;
@@ -1621,27 +1716,27 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(cpsw->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
+ pm_runtime_put_noidle(cpsw->dev);
return ret;
}
- if (priv->data.dual_emac) {
- vid = priv->slaves[priv->emac_port].port_vlan;
+ if (cpsw->data.dual_emac) {
+ vid = cpsw->slaves[priv->emac_port].port_vlan;
flags = ALE_VLAN;
}
- cpsw_ale_del_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
flags, vid);
- cpsw_ale_add_ucast(priv->ale, addr->sa_data, HOST_PORT_NUM,
+ cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
flags, vid);
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
for_each_slave(priv, cpsw_set_slave_mac, priv);
- pm_runtime_put(&priv->pdev->dev);
+ pm_runtime_put(cpsw->dev);
return 0;
}
@@ -1649,12 +1744,12 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cpsw_ndo_poll_controller(struct net_device *ndev)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- cpsw_intr_disable(priv);
- cpsw_rx_interrupt(priv->irqs_table[0], priv);
- cpsw_tx_interrupt(priv->irqs_table[1], priv);
- cpsw_intr_enable(priv);
+ cpsw_intr_disable(cpsw);
+ cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
+ cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
+ cpsw_intr_enable(cpsw);
}
#endif
@@ -1664,8 +1759,9 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
int ret;
int unreg_mcast_mask = 0;
u32 port_mask;
+ struct cpsw_common *cpsw = priv->cpsw;
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
if (priv->ndev->flags & IFF_ALLMULTI)
@@ -1679,27 +1775,27 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
}
- ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
+ ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
unreg_mcast_mask);
if (ret != 0)
return ret;
- ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+ ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
goto clean_vid;
- ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, vid, 0);
if (ret != 0)
goto clean_vlan_ucast;
return 0;
clean_vlan_ucast:
- cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid);
clean_vid:
- cpsw_ale_del_vlan(priv->ale, vid, 0);
+ cpsw_ale_del_vlan(cpsw->ale, vid, 0);
return ret;
}
@@ -1707,26 +1803,27 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
int ret;
- if (vid == priv->data.default_vlan)
+ if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(cpsw->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
+ pm_runtime_put_noidle(cpsw->dev);
return ret;
}
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
/* In dual EMAC, reserved VLAN id should not be used for
* creating VLAN interfaces as this can break the dual
* EMAC port separation
*/
int i;
- for (i = 0; i < priv->data.slaves; i++) {
- if (vid == priv->slaves[i].port_vlan)
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (vid == cpsw->slaves[i].port_vlan)
return -EINVAL;
}
}
@@ -1734,7 +1831,7 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
ret = cpsw_add_vlan_ale_entry(priv, vid);
- pm_runtime_put(&priv->pdev->dev);
+ pm_runtime_put(cpsw->dev);
return ret;
}
@@ -1742,39 +1839,40 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
int ret;
- if (vid == priv->data.default_vlan)
+ if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(cpsw->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
+ pm_runtime_put_noidle(cpsw->dev);
return ret;
}
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
int i;
- for (i = 0; i < priv->data.slaves; i++) {
- if (vid == priv->slaves[i].port_vlan)
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (vid == cpsw->slaves[i].port_vlan)
return -EINVAL;
}
}
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
- ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
if (ret != 0)
return ret;
- ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
return ret;
- ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
+ ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
- pm_runtime_put(&priv->pdev->dev);
+ pm_runtime_put(cpsw->dev);
return ret;
}
@@ -1797,31 +1895,32 @@ static const struct net_device_ops cpsw_netdev_ops = {
static int cpsw_get_regs_len(struct net_device *ndev)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- return priv->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
+ return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
}
static void cpsw_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
u32 *reg = p;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
/* update CPSW IP version */
- regs->version = priv->version;
+ regs->version = cpsw->version;
- cpsw_ale_dump(priv->ale, reg);
+ cpsw_ale_dump(cpsw->ale, reg);
}
static void cpsw_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct platform_device *pdev = to_platform_device(cpsw->dev);
strlcpy(info->driver, "cpsw", sizeof(info->driver));
strlcpy(info->version, "1.0", sizeof(info->version));
- strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
+ strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static u32 cpsw_get_msglevel(struct net_device *ndev)
@@ -1840,7 +1939,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
#ifdef CONFIG_TI_CPTS
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
@@ -1849,7 +1948,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = priv->cpts->phc_index;
+ info->phc_index = cpsw->cpts->phc_index;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -1872,10 +1971,11 @@ static int cpsw_get_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int slave_no = cpsw_slave_index(priv);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
- if (priv->slaves[slave_no].phy)
- return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd);
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_gset(cpsw->slaves[slave_no].phy, ecmd);
else
return -EOPNOTSUPP;
}
@@ -1883,10 +1983,11 @@ static int cpsw_get_settings(struct net_device *ndev,
static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int slave_no = cpsw_slave_index(priv);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
- if (priv->slaves[slave_no].phy)
- return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd);
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_sset(cpsw->slaves[slave_no].phy, ecmd);
else
return -EOPNOTSUPP;
}
@@ -1894,22 +1995,24 @@ static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int slave_no = cpsw_slave_index(priv);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
wol->supported = 0;
wol->wolopts = 0;
- if (priv->slaves[slave_no].phy)
- phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol);
+ if (cpsw->slaves[slave_no].phy)
+ phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
}
static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- int slave_no = cpsw_slave_index(priv);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
- if (priv->slaves[slave_no].phy)
- return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol);
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
else
return -EOPNOTSUPP;
}
@@ -1940,12 +2043,13 @@ static int cpsw_set_pauseparam(struct net_device *ndev,
static int cpsw_ethtool_op_begin(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
int ret;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(cpsw->dev);
if (ret < 0) {
cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
- pm_runtime_put_noidle(&priv->pdev->dev);
+ pm_runtime_put_noidle(cpsw->dev);
}
return ret;
@@ -1956,11 +2060,185 @@ static void cpsw_ethtool_op_complete(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
- ret = pm_runtime_put(&priv->pdev->dev);
+ ret = pm_runtime_put(priv->cpsw->dev);
if (ret < 0)
cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
}
+static void cpsw_get_channels(struct net_device *ndev,
+ struct ethtool_channels *ch)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ ch->max_combined = 0;
+ ch->max_rx = CPSW_MAX_QUEUES;
+ ch->max_tx = CPSW_MAX_QUEUES;
+ ch->max_other = 0;
+ ch->other_count = 0;
+ ch->rx_count = cpsw->rx_ch_num;
+ ch->tx_count = cpsw->tx_ch_num;
+ ch->combined_count = 0;
+}
+
+static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
+ struct ethtool_channels *ch)
+{
+ if (ch->combined_count)
+ return -EINVAL;
+
+ /* verify we have at least one channel in each direction */
+ if (!ch->rx_count || !ch->tx_count)
+ return -EINVAL;
+
+ if (ch->rx_count > cpsw->data.channels ||
+ ch->tx_count > cpsw->data.channels)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
+{
+ int (*poll)(struct napi_struct *, int);
+ struct cpsw_common *cpsw = priv->cpsw;
+ void (*handler)(void *, int, int);
+ struct cpdma_chan **chan;
+ int ret, *ch;
+
+ if (rx) {
+ ch = &cpsw->rx_ch_num;
+ chan = cpsw->rxch;
+ handler = cpsw_rx_handler;
+ poll = cpsw_rx_poll;
+ } else {
+ ch = &cpsw->tx_ch_num;
+ chan = cpsw->txch;
+ handler = cpsw_tx_handler;
+ poll = cpsw_tx_poll;
+ }
+
+ while (*ch < ch_num) {
+ chan[*ch] = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
+
+ if (IS_ERR(chan[*ch]))
+ return PTR_ERR(chan[*ch]);
+
+ if (!chan[*ch])
+ return -EINVAL;
+
+ cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
+ (rx ? "rx" : "tx"));
+ (*ch)++;
+ }
+
+ while (*ch > ch_num) {
+ (*ch)--;
+
+ ret = cpdma_chan_destroy(chan[*ch]);
+ if (ret)
+ return ret;
+
+ cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
+ (rx ? "rx" : "tx"));
+ }
+
+ return 0;
+}
+
+static int cpsw_update_channels(struct cpsw_priv *priv,
+ struct ethtool_channels *ch)
+{
+ int ret;
+
+ ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
+ if (ret)
+ return ret;
+
+ ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ ret = cpsw_check_ch_settings(cpsw, chs);
+ if (ret < 0)
+ return ret;
+
+ /* Disable NAPI scheduling */
+ cpsw_intr_disable(cpsw);
+
+ /* Stop all transmit queues for every network device.
+ * Disable re-using rx descriptors with dormant_on.
+ */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+ if (!(slave->ndev && netif_running(slave->ndev)))
+ continue;
+
+ netif_tx_stop_all_queues(slave->ndev);
+ netif_dormant_on(slave->ndev);
+ }
+
+ /* Handle rest of tx packets and stop cpdma channels */
+ cpdma_ctlr_stop(cpsw->dma);
+ ret = cpsw_update_channels(priv, chs);
+ if (ret)
+ goto err;
+
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+ if (!(slave->ndev && netif_running(slave->ndev)))
+ continue;
+
+ /* Inform stack about new count of queues */
+ ret = netif_set_real_num_tx_queues(slave->ndev,
+ cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto err;
+ }
+
+ ret = netif_set_real_num_rx_queues(slave->ndev,
+ cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto err;
+ }
+
+ /* Enable rx packets handling */
+ netif_dormant_off(slave->ndev);
+ }
+
+ if (cpsw_common_res_usage_state(cpsw)) {
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret)
+ goto err;
+
+ /* After this receive is started */
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ }
+
+ /* Resume transmit for every affected interface */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+ if (!(slave->ndev && netif_running(slave->ndev)))
+ continue;
+ netif_tx_start_all_queues(slave->ndev);
+ }
+ return 0;
+err:
+ dev_err(priv->dev, "cannot update channels number, closing device\n");
+ dev_close(ndev);
+ return ret;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
@@ -1982,14 +2260,16 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_regs = cpsw_get_regs,
.begin = cpsw_ethtool_op_begin,
.complete = cpsw_ethtool_op_complete,
+ .get_channels = cpsw_get_channels,
+ .set_channels = cpsw_set_channels,
};
-static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
+static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
u32 slave_reg_ofs, u32 sliver_reg_ofs)
{
- void __iomem *regs = priv->regs;
+ void __iomem *regs = cpsw->regs;
int slave_num = slave->slave_num;
- struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
+ struct cpsw_slave_data *data = cpsw->data.slave_data + slave_num;
slave->data = data;
slave->regs = regs + slave_reg_ofs;
@@ -2160,71 +2440,50 @@ no_phy_slave:
return 0;
}
-static int cpsw_probe_dual_emac(struct platform_device *pdev,
- struct cpsw_priv *priv)
+static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
{
- struct cpsw_platform_data *data = &priv->data;
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_platform_data *data = &cpsw->data;
struct net_device *ndev;
struct cpsw_priv *priv_sl2;
- int ret = 0, i;
+ int ret = 0;
- ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+ ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
if (!ndev) {
- dev_err(&pdev->dev, "cpsw: error allocating net_device\n");
+ dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
return -ENOMEM;
}
priv_sl2 = netdev_priv(ndev);
- priv_sl2->data = *data;
- priv_sl2->pdev = pdev;
+ priv_sl2->cpsw = cpsw;
priv_sl2->ndev = ndev;
priv_sl2->dev = &ndev->dev;
priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
- priv_sl2->rx_packet_max = max(rx_packet_max, 128);
if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
ETH_ALEN);
- dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+ dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
+ priv_sl2->mac_addr);
} else {
random_ether_addr(priv_sl2->mac_addr);
- dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+ dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
+ priv_sl2->mac_addr);
}
memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
- priv_sl2->slaves = priv->slaves;
- priv_sl2->clk = priv->clk;
-
- priv_sl2->coal_intvl = 0;
- priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
-
- priv_sl2->regs = priv->regs;
- priv_sl2->host_port_regs = priv->host_port_regs;
- priv_sl2->wr_regs = priv->wr_regs;
- priv_sl2->hw_stats = priv->hw_stats;
- priv_sl2->dma = priv->dma;
- priv_sl2->txch = priv->txch;
- priv_sl2->rxch = priv->rxch;
- priv_sl2->ale = priv->ale;
priv_sl2->emac_port = 1;
- priv->slaves[1].ndev = ndev;
- priv_sl2->cpts = priv->cpts;
- priv_sl2->version = priv->version;
-
- for (i = 0; i < priv->num_irqs; i++) {
- priv_sl2->irqs_table[i] = priv->irqs_table[i];
- priv_sl2->num_irqs = priv->num_irqs;
- }
+ cpsw->slaves[1].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
/* register the network device */
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, cpsw->dev);
ret = register_netdev(ndev);
if (ret) {
- dev_err(&pdev->dev, "cpsw: error registering net device\n");
+ dev_err(cpsw->dev, "cpsw: error registering net device\n");
free_netdev(ndev);
ret = -ENODEV;
}
@@ -2272,6 +2531,7 @@ MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
static int cpsw_probe(struct platform_device *pdev)
{
+ struct clk *clk;
struct cpsw_platform_data *data;
struct net_device *ndev;
struct cpsw_priv *priv;
@@ -2282,10 +2542,14 @@ static int cpsw_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
struct gpio_descs *mode;
u32 slave_offset, sliver_offset, slave_size;
+ struct cpsw_common *cpsw;
int ret = 0, i;
int irq;
- ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+ cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ cpsw->dev = &pdev->dev;
+
+ ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
if (!ndev) {
dev_err(&pdev->dev, "error allocating net_device\n");
return -ENOMEM;
@@ -2293,13 +2557,13 @@ static int cpsw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
- priv->pdev = pdev;
+ priv->cpsw = cpsw;
priv->ndev = ndev;
priv->dev = &ndev->dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
- priv->rx_packet_max = max(rx_packet_max, 128);
- priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
- if (!priv->cpts) {
+ cpsw->rx_packet_max = max(rx_packet_max, 128);
+ cpsw->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
+ if (!cpsw->cpts) {
dev_err(&pdev->dev, "error allocating cpts\n");
ret = -ENOMEM;
goto clean_ndev_ret;
@@ -2320,12 +2584,14 @@ static int cpsw_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(&priv->data, pdev)) {
+ if (cpsw_probe_dt(&cpsw->data, pdev)) {
dev_err(&pdev->dev, "cpsw: platform data missing\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
}
- data = &priv->data;
+ data = &cpsw->data;
+ cpsw->rx_ch_num = 1;
+ cpsw->tx_ch_num = 1;
if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
@@ -2337,27 +2603,26 @@ static int cpsw_probe(struct platform_device *pdev)
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
- priv->slaves = devm_kzalloc(&pdev->dev,
+ cpsw->slaves = devm_kzalloc(&pdev->dev,
sizeof(struct cpsw_slave) * data->slaves,
GFP_KERNEL);
- if (!priv->slaves) {
+ if (!cpsw->slaves) {
ret = -ENOMEM;
goto clean_runtime_disable_ret;
}
for (i = 0; i < data->slaves; i++)
- priv->slaves[i].slave_num = i;
+ cpsw->slaves[i].slave_num = i;
- priv->slaves[0].ndev = ndev;
+ cpsw->slaves[0].ndev = ndev;
priv->emac_port = 0;
- priv->clk = devm_clk_get(&pdev->dev, "fck");
- if (IS_ERR(priv->clk)) {
+ clk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(clk)) {
dev_err(priv->dev, "fck is not found\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
}
- priv->coal_intvl = 0;
- priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
+ cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
@@ -2365,7 +2630,7 @@ static int cpsw_probe(struct platform_device *pdev)
ret = PTR_ERR(ss_regs);
goto clean_runtime_disable_ret;
}
- priv->regs = ss_regs;
+ cpsw->regs = ss_regs;
/* Need to enable clocks with runtime PM api to access module
* registers
@@ -2375,24 +2640,24 @@ static int cpsw_probe(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
goto clean_runtime_disable_ret;
}
- priv->version = readl(&priv->regs->id_ver);
+ cpsw->version = readl(&cpsw->regs->id_ver);
pm_runtime_put_sync(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->wr_regs)) {
- ret = PTR_ERR(priv->wr_regs);
+ cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(cpsw->wr_regs)) {
+ ret = PTR_ERR(cpsw->wr_regs);
goto clean_runtime_disable_ret;
}
memset(&dma_params, 0, sizeof(dma_params));
memset(&ale_params, 0, sizeof(ale_params));
- switch (priv->version) {
+ switch (cpsw->version) {
case CPSW_VERSION_1:
- priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
- priv->hw_stats = ss_regs + CPSW1_HW_STATS;
+ cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
+ cpsw->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
+ cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
@@ -2404,9 +2669,9 @@ static int cpsw_probe(struct platform_device *pdev)
case CPSW_VERSION_2:
case CPSW_VERSION_3:
case CPSW_VERSION_4:
- priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
- priv->hw_stats = ss_regs + CPSW2_HW_STATS;
+ cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
+ cpsw->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
+ cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
@@ -2417,13 +2682,14 @@ static int cpsw_probe(struct platform_device *pdev)
(u32 __force) ss_res->start + CPSW2_BD_OFFSET;
break;
default:
- dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
+ dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
ret = -ENODEV;
goto clean_runtime_disable_ret;
}
- for (i = 0; i < priv->data.slaves; i++) {
- struct cpsw_slave *slave = &priv->slaves[i];
- cpsw_slave_init(slave, priv, slave_offset, sliver_offset);
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+
+ cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
slave_offset += slave_size;
sliver_offset += SLIVER_SIZE;
}
@@ -2443,19 +2709,16 @@ static int cpsw_probe(struct platform_device *pdev)
dma_params.has_ext_regs = true;
dma_params.desc_hw_addr = dma_params.desc_mem_phys;
- priv->dma = cpdma_ctlr_create(&dma_params);
- if (!priv->dma) {
+ cpsw->dma = cpdma_ctlr_create(&dma_params);
+ if (!cpsw->dma) {
dev_err(priv->dev, "error initializing dma\n");
ret = -ENOMEM;
goto clean_runtime_disable_ret;
}
- priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
- cpsw_tx_handler);
- priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0),
- cpsw_rx_handler);
-
- if (WARN_ON(!priv->txch || !priv->rxch)) {
+ cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+ cpsw->rxch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
+ if (WARN_ON(!cpsw->rxch[0] || !cpsw->txch[0])) {
dev_err(priv->dev, "error initializing dma channels\n");
ret = -ENOMEM;
goto clean_dma_ret;
@@ -2466,8 +2729,8 @@ static int cpsw_probe(struct platform_device *pdev)
ale_params.ale_entries = data->ale_entries;
ale_params.ale_ports = data->slaves;
- priv->ale = cpsw_ale_create(&ale_params);
- if (!priv->ale) {
+ cpsw->ale = cpsw_ale_create(&ale_params);
+ if (!cpsw->ale) {
dev_err(priv->dev, "error initializing ale engine\n");
ret = -ENODEV;
goto clean_dma_ret;
@@ -2484,7 +2747,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (of_id) {
pdev->id_entry = of_id->data;
if (pdev->id_entry->driver_data)
- priv->quirk_irq = true;
+ cpsw->quirk_irq = true;
}
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
@@ -2502,9 +2765,9 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
- priv->irqs_table[0] = irq;
+ cpsw->irqs_table[0] = irq;
ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
- 0, dev_name(&pdev->dev), priv);
+ 0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(priv->dev, "error attaching irq (%d)\n", ret);
goto clean_ale_ret;
@@ -2517,21 +2780,20 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
- priv->irqs_table[1] = irq;
+ cpsw->irqs_table[1] = irq;
ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
- 0, dev_name(&pdev->dev), priv);
+ 0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(priv->dev, "error attaching irq (%d)\n", ret);
goto clean_ale_ret;
}
- priv->num_irqs = 2;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
- netif_tx_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
+ netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2545,8 +2807,8 @@ static int cpsw_probe(struct platform_device *pdev)
cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
&ss_res->start, ndev->irq);
- if (priv->data.dual_emac) {
- ret = cpsw_probe_dual_emac(pdev, priv);
+ if (cpsw->data.dual_emac) {
+ ret = cpsw_probe_dual_emac(priv);
if (ret) {
cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
goto clean_ale_ret;
@@ -2556,9 +2818,9 @@ static int cpsw_probe(struct platform_device *pdev)
return 0;
clean_ale_ret:
- cpsw_ale_destroy(priv->ale);
+ cpsw_ale_destroy(cpsw->ale);
clean_dma_ret:
- cpdma_ctlr_destroy(priv->dma);
+ cpdma_ctlr_destroy(cpsw->dma);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
clean_ndev_ret:
@@ -2569,7 +2831,7 @@ clean_ndev_ret:
static int cpsw_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
int ret;
ret = pm_runtime_get_sync(&pdev->dev);
@@ -2578,17 +2840,17 @@ static int cpsw_remove(struct platform_device *pdev)
return ret;
}
- if (priv->data.dual_emac)
- unregister_netdev(cpsw_get_slave_ndev(priv, 1));
+ if (cpsw->data.dual_emac)
+ unregister_netdev(cpsw->slaves[1].ndev);
unregister_netdev(ndev);
- cpsw_ale_destroy(priv->ale);
- cpdma_ctlr_destroy(priv->dma);
+ cpsw_ale_destroy(cpsw->ale);
+ cpdma_ctlr_destroy(cpsw->dma);
of_platform_depopulate(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (priv->data.dual_emac)
- free_netdev(cpsw_get_slave_ndev(priv, 1));
+ if (cpsw->data.dual_emac)
+ free_netdev(cpsw->slaves[1].ndev);
free_netdev(ndev);
return 0;
}
@@ -2598,14 +2860,14 @@ static int cpsw_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
int i;
- for (i = 0; i < priv->data.slaves; i++) {
- if (netif_running(priv->slaves[i].ndev))
- cpsw_ndo_stop(priv->slaves[i].ndev);
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (netif_running(cpsw->slaves[i].ndev))
+ cpsw_ndo_stop(cpsw->slaves[i].ndev);
}
} else {
if (netif_running(ndev))
@@ -2613,7 +2875,7 @@ static int cpsw_suspend(struct device *dev)
}
/* Select sleep pin state */
- pinctrl_pm_select_sleep_state(&pdev->dev);
+ pinctrl_pm_select_sleep_state(dev);
return 0;
}
@@ -2622,17 +2884,17 @@ static int cpsw_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = netdev_priv(ndev);
/* Select default pin state */
- pinctrl_pm_select_default_state(&pdev->dev);
+ pinctrl_pm_select_default_state(dev);
- if (priv->data.dual_emac) {
+ if (cpsw->data.dual_emac) {
int i;
- for (i = 0; i < priv->data.slaves; i++) {
- if (netif_running(priv->slaves[i].ndev))
- cpsw_ndo_open(priv->slaves[i].ndev);
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (netif_running(cpsw->slaves[i].ndev))
+ cpsw_ndo_open(cpsw->slaves[i].ndev);
}
} else {
if (netif_running(ndev))
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 19e5f32a8a64..c3f35f11a8fd 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -86,7 +86,7 @@ struct cpdma_desc_pool {
void __iomem *iomap; /* ioremap map */
void *cpumap; /* dma_alloc map */
int desc_size, mem_size;
- int num_desc, used_desc;
+ int num_desc;
struct device *dev;
struct gen_pool *gen_pool;
};
@@ -104,6 +104,7 @@ struct cpdma_ctlr {
struct cpdma_desc_pool *pool;
spinlock_t lock;
struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
+ int chan_num;
};
struct cpdma_chan {
@@ -123,6 +124,13 @@ struct cpdma_chan {
int int_set, int_clear, td;
};
+#define tx_chan_num(chan) (chan)
+#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
+#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
+#define is_tx_chan(chan) (!is_rx_chan(chan))
+#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
+#define chan_linear(chan) __chan_linear((chan)->chan_num)
+
/* The following make access to common cpdma_ctlr params more readable */
#define dmaregs params.dmaregs
#define num_chan params.num_chan
@@ -148,7 +156,10 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
if (!pool)
return;
- WARN_ON(pool->used_desc);
+ WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
+ "cpdma_desc_pool size %d != avail %d",
+ gen_pool_size(pool->gen_pool),
+ gen_pool_avail(pool->gen_pool));
if (pool->cpumap)
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
pool->phys);
@@ -232,21 +243,14 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
static struct cpdma_desc __iomem *
cpdma_desc_alloc(struct cpdma_desc_pool *pool)
{
- struct cpdma_desc __iomem *desc = NULL;
-
- desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool,
- pool->desc_size);
- if (desc)
- pool->used_desc++;
-
- return desc;
+ return (struct cpdma_desc __iomem *)
+ gen_pool_alloc(pool->gen_pool, pool->desc_size);
}
static void cpdma_desc_free(struct cpdma_desc_pool *pool,
struct cpdma_desc __iomem *desc, int num_desc)
{
gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
- pool->used_desc--;
}
struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
@@ -260,6 +264,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
ctlr->state = CPDMA_STATE_IDLE;
ctlr->params = *params;
ctlr->dev = params->dev;
+ ctlr->chan_num = 0;
spin_lock_init(&ctlr->lock);
ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
@@ -336,12 +341,14 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
}
ctlr->state = CPDMA_STATE_TEARDOWN;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
if (ctlr->channels[i])
cpdma_chan_stop(ctlr->channels[i]);
}
+ spin_lock_irqsave(&ctlr->lock, flags);
dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
@@ -403,13 +410,52 @@ void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
}
EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
+u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
+{
+ return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
+}
+EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state);
+
+u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
+{
+ return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
+}
+EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
+
+/**
+ * cpdma_chan_split_pool - Splits ctrl pool between all channels.
+ * Has to be called under ctlr lock
+ */
+static void cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+{
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ struct cpdma_chan *chan;
+ int ch_desc_num;
+ int i;
+
+ if (!ctlr->chan_num)
+ return;
+
+ /* calculate average size of pool slice */
+ ch_desc_num = pool->num_desc / ctlr->chan_num;
+
+ /* split ctlr pool */
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ chan = ctlr->channels[i];
+ if (chan)
+ chan->desc_num = ch_desc_num;
+ }
+}
+
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
- cpdma_handler_fn handler)
+ cpdma_handler_fn handler, int rx_type)
{
+ int offset = chan_num * 4;
struct cpdma_chan *chan;
- int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
unsigned long flags;
+ chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
+
if (__chan_linear(chan_num) >= ctlr->num_chan)
return NULL;
@@ -451,14 +497,25 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
spin_lock_init(&chan->lock);
ctlr->channels[chan_num] = chan;
+ ctlr->chan_num++;
+
+ cpdma_chan_split_pool(ctlr);
+
spin_unlock_irqrestore(&ctlr->lock, flags);
return chan;
}
EXPORT_SYMBOL_GPL(cpdma_chan_create);
-int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr)
+int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
{
- return ctlr->pool->num_desc / 2;
+ unsigned long flags;
+ int desc_num;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ desc_num = chan->desc_num;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return desc_num;
}
EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
@@ -475,6 +532,10 @@ int cpdma_chan_destroy(struct cpdma_chan *chan)
if (chan->state != CPDMA_STATE_IDLE)
cpdma_chan_stop(chan);
ctlr->channels[chan->chan_num] = NULL;
+ ctlr->chan_num--;
+
+ cpdma_chan_split_pool(ctlr);
+
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 4b46cd6e9a3f..a07b22b12bc1 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -17,13 +17,6 @@
#define CPDMA_MAX_CHANNELS BITS_PER_LONG
-#define tx_chan_num(chan) (chan)
-#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
-#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
-#define is_tx_chan(chan) (!is_rx_chan(chan))
-#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
-#define chan_linear(chan) __chan_linear((chan)->chan_num)
-
#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7)
#define CPDMA_EOI_RX_THRESH 0x0
@@ -79,8 +72,8 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
- cpdma_handler_fn handler);
-int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr);
+ cpdma_handler_fn handler, int rx_type);
+int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan);
int cpdma_chan_destroy(struct cpdma_chan *chan);
int cpdma_chan_start(struct cpdma_chan *chan);
int cpdma_chan_stop(struct cpdma_chan *chan);
@@ -94,6 +87,8 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr);
+u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr);
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
enum cpdma_control {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 727a79f3c7dd..2fd94a5bc1f3 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -597,14 +597,14 @@ static u32 hash_get(u8 *addr)
}
/**
- * hash_add - Hash function to add mac addr from hash table
+ * emac_hash_add - Hash function to add mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
* @mac_addr: mac address to delete from hash table
*
* Adds mac address to the internal hash table
*
*/
-static int hash_add(struct emac_priv *priv, u8 *mac_addr)
+static int emac_hash_add(struct emac_priv *priv, u8 *mac_addr)
{
struct device *emac_dev = &priv->ndev->dev;
u32 rc = 0;
@@ -613,7 +613,7 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr)
if (hash_value >= EMAC_NUM_MULTICAST_BITS) {
if (netif_msg_drv(priv)) {
- dev_err(emac_dev, "DaVinci EMAC: hash_add(): Invalid "\
+ dev_err(emac_dev, "DaVinci EMAC: emac_hash_add(): Invalid "\
"Hash %08x, should not be greater than %08x",
hash_value, (EMAC_NUM_MULTICAST_BITS - 1));
}
@@ -639,14 +639,14 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr)
}
/**
- * hash_del - Hash function to delete mac addr from hash table
+ * emac_hash_del - Hash function to delete mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
* @mac_addr: mac address to delete from hash table
*
* Removes mac address from the internal hash table
*
*/
-static int hash_del(struct emac_priv *priv, u8 *mac_addr)
+static int emac_hash_del(struct emac_priv *priv, u8 *mac_addr)
{
u32 hash_value;
u32 hash_bit;
@@ -696,10 +696,10 @@ static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
switch (action) {
case EMAC_MULTICAST_ADD:
- update = hash_add(priv, mac_addr);
+ update = emac_hash_add(priv, mac_addr);
break;
case EMAC_MULTICAST_DEL:
- update = hash_del(priv, mac_addr);
+ update = emac_hash_del(priv, mac_addr);
break;
case EMAC_ALL_MULTI_SET:
update = 1;
@@ -1870,10 +1870,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
goto no_pdata;
}
- priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
- emac_tx_handler);
- priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH),
- emac_rx_handler);
+ priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH,
+ emac_tx_handler, 0);
+ priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
+ emac_rx_handler, 1);
if (WARN_ON(!priv->txchan || !priv->rxchan)) {
rc = -ENOMEM;
goto no_cpdma_chan;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 79f0ec4e51ac..bc258d7e41df 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1791,7 +1791,7 @@ fail_alloc_rx:
gelic_card_free_chain(card, card->tx_chain.head);
fail_alloc_tx:
free_irq(card->irq, card);
- netdev->irq = NO_IRQ;
+ netdev->irq = 0;
fail_request_irq:
ps3_sb_event_receive_port_destroy(dev, card->irq);
fail_alloc_irq:
@@ -1843,7 +1843,7 @@ static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
netdev0 = card->netdev[GELIC_PORT_ETHERNET_0];
/* disconnect event port */
free_irq(card->irq, card);
- netdev0->irq = NO_IRQ;
+ netdev0->irq = 0;
ps3_sb_event_receive_port_destroy(card->dev, card->irq);
wait_event(card->waitq,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index f38696ceee74..908e72e18ef7 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -1724,24 +1724,21 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
struct velocity_td_info *tdinfo, struct tx_desc *td)
{
struct sk_buff *skb = tdinfo->skb;
+ int i;
/*
* Don't unmap the pre-allocated tx_bufs
*/
- if (tdinfo->skb_dma) {
- int i;
+ for (i = 0; i < tdinfo->nskb_dma; i++) {
+ size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
- for (i = 0; i < tdinfo->nskb_dma; i++) {
- size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
+ /* For scatter-gather */
+ if (skb_shinfo(skb)->nr_frags > 0)
+ pktlen = max_t(size_t, pktlen,
+ td->td_buf[i].size & ~TD_QUEUE);
- /* For scatter-gather */
- if (skb_shinfo(skb)->nr_frags > 0)
- pktlen = max_t(size_t, pktlen,
- td->td_buf[i].size & ~TD_QUEUE);
-
- dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
- le16_to_cpu(pktlen), DMA_TO_DEVICE);
- }
+ dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
+ le16_to_cpu(pktlen), DMA_TO_DEVICE);
}
dev_kfree_skb_irq(skb);
tdinfo->skb = NULL;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 4f5c024c6192..6d68c8a8f4f2 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ
+ depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -18,7 +18,7 @@ if NET_VENDOR_XILINX
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
- depends on (PPC32 || MICROBLAZE || ARCH_ZYNQ)
+ depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
select PHYLIB
---help---
This driver supports the 10/100 Ethernet Lite from Xilinx.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 36ee7ab300ae..69e2a833a84f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1297,7 +1297,7 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
return 0;
}
-static struct ethtool_ops axienet_ethtool_ops = {
+static const struct ethtool_ops axienet_ethtool_ops = {
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 9006877c53f2..e46b1ebbbff4 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -97,7 +97,6 @@ static struct acpi_driver fjes_acpi_driver = {
static struct platform_driver fjes_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = fjes_probe,
.remove = fjes_remove,
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 5a1e98547031..470b3dcd54e5 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -127,7 +127,7 @@ struct sixpack {
#define AX25_6PACK_HEADER_LEN 0
-static void sixpack_decode(struct sixpack *, unsigned char[], int);
+static void sixpack_decode(struct sixpack *, const unsigned char[], int);
static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
/*
@@ -428,7 +428,7 @@ out:
/*
* Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
+ * This function is called by the tty module in the kernel when
* a block of 6pack data has been received, which can now be decapsulated
* and sent on to some IP layer for further processing.
*/
@@ -436,7 +436,6 @@ static void sixpack_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct sixpack *sp;
- unsigned char buf[512];
int count1;
if (!count)
@@ -446,10 +445,7 @@ static void sixpack_receive_buf(struct tty_struct *tty,
if (!sp)
return;
- memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
-
/* Read the characters out of the buffer */
-
count1 = count;
while (count) {
count--;
@@ -459,7 +455,7 @@ static void sixpack_receive_buf(struct tty_struct *tty,
continue;
}
}
- sixpack_decode(sp, buf, count1);
+ sixpack_decode(sp, cp, count1);
sp_put(sp);
tty_unthrottle(tty);
@@ -992,7 +988,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
/* decode a 6pack packet */
static void
-sixpack_decode(struct sixpack *sp, unsigned char *pre_rbuff, int count)
+sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
{
unsigned char inbyte;
int count1;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index d95a50ae996d..622ab3ab9e93 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -484,7 +484,7 @@ static void bpq_setup(struct net_device *dev)
dev->flags = 0;
dev->features = NETIF_F_LLTX; /* Allow recursion */
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
dev->header_ops = &ax25_header_ops;
#endif
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 591af71eae56..f4fbcb5aa24a 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -84,8 +84,6 @@ struct ndis_recv_scale_cap { /* NDIS_RECEIVE_SCALE_CAPABILITIES */
#define NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 40
#define ITAB_NUM 128
-#define HASH_KEYLEN NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2
-extern u8 netvsc_hash_key[];
struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
struct ndis_obj_header hdr;
@@ -175,7 +173,7 @@ struct rndis_device {
struct rndis_message;
struct netvsc_device;
int netvsc_device_add(struct hv_device *device, void *additional_info);
-int netvsc_device_remove(struct hv_device *device);
+void netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
@@ -435,7 +433,7 @@ struct nvsp_1_message_revoke_send_buffer {
*/
struct nvsp_1_message_send_rndis_packet {
/*
- * This field is specified by RNIDS. They assume there's two different
+ * This field is specified by RNDIS. They assume there's two different
* channels of communication. However, the Network VSP only has one.
* Therefore, the channel travels with the RNDIS packet.
*/
@@ -490,6 +488,7 @@ struct nvsp_2_vsc_capability {
u64 sriov:1;
u64 ieee8021q:1;
u64 correlation_id:1;
+ u64 teaming:1;
};
};
} __packed;
@@ -579,7 +578,7 @@ struct nvsp_5_send_indirect_table {
/* The number of entries in the send indirection table */
u32 count;
- /* The offset of the send indireciton table from top of this struct.
+ /* The offset of the send indirection table from top of this struct.
* The send indirection table tells which channel to put the send
* traffic on. Each entry is a channel number.
*/
@@ -633,12 +632,36 @@ struct multi_send_data {
u32 count; /* counter of batched packets */
};
+struct recv_comp_data {
+ u64 tid; /* transaction id */
+ u32 status;
+};
+
+/* Netvsc Receive Slots Max */
+#define NETVSC_RECVSLOT_MAX (NETVSC_RECEIVE_BUFFER_SIZE / ETH_DATA_LEN + 1)
+
+struct multi_recv_comp {
+ void *buf; /* queued receive completions */
+ u32 first; /* first data entry */
+ u32 next; /* next entry for writing */
+};
+
struct netvsc_stats {
u64 packets;
u64 bytes;
+ u64 broadcast;
+ u64 multicast;
struct u64_stats_sync syncp;
};
+struct netvsc_ethtool_stats {
+ unsigned long tx_scattered;
+ unsigned long tx_no_memory;
+ unsigned long tx_no_space;
+ unsigned long tx_too_big;
+ unsigned long tx_busy;
+};
+
struct netvsc_reconfig {
struct list_head list;
u32 event;
@@ -668,14 +691,14 @@ struct net_device_context {
/* Ethtool settings */
u8 duplex;
u32 speed;
+ struct netvsc_ethtool_stats eth_stats;
/* the device is going away */
bool start_remove;
/* State to manage the associated VF interface. */
- struct net_device *vf_netdev;
- bool vf_inject;
- atomic_t vf_use_cnt;
+ struct net_device __rcu *vf_netdev;
+
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
/* Serial number of the VF to team with */
@@ -711,7 +734,6 @@ struct netvsc_device {
struct nvsp_message channel_init_pkt;
struct nvsp_message revoke_packet;
- /* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
u32 send_table[VRSS_SEND_TAB_SIZE];
@@ -735,6 +757,9 @@ struct netvsc_device {
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
+ struct multi_recv_comp mrc[VRSS_CHANNEL_MAX];
+ atomic_t num_outstanding_recvs;
+
atomic_t open_cnt;
};
@@ -1213,7 +1238,7 @@ struct rndis_message {
u32 ndis_msg_type;
/* Total length of this message, from the beginning */
- /* of the sruct rndis_message, in bytes. */
+ /* of the struct rndis_message, in bytes. */
u32 msg_len;
/* Actual message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 410fb8e81376..720b5fa9e625 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -59,7 +59,6 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
VM_PKT_DATA_INBAND, 0);
}
-
static struct netvsc_device *alloc_net_device(void)
{
struct netvsc_device *net_device;
@@ -74,17 +73,26 @@ static struct netvsc_device *alloc_net_device(void)
return NULL;
}
+ net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX *
+ sizeof(struct recv_comp_data));
+
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
atomic_set(&net_device->open_cnt, 0);
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
+ init_completion(&net_device->channel_init_wait);
return net_device;
}
static void free_netvsc_device(struct netvsc_device *nvdev)
{
+ int i;
+
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++)
+ vfree(nvdev->mrc[i].buf);
+
kfree(nvdev->cb_buffer);
kfree(nvdev);
}
@@ -107,20 +115,20 @@ static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
goto get_in_err;
if (net_device->destroy &&
- atomic_read(&net_device->num_outstanding_sends) == 0)
+ atomic_read(&net_device->num_outstanding_sends) == 0 &&
+ atomic_read(&net_device->num_outstanding_recvs) == 0)
net_device = NULL;
get_in_err:
return net_device;
}
-
-static int netvsc_destroy_buf(struct hv_device *device)
+static void netvsc_destroy_buf(struct hv_device *device)
{
struct nvsp_message *revoke_packet;
- int ret = 0;
struct net_device *ndev = hv_get_drvdata(device);
struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
+ int ret;
/*
* If we got a section count, it means we received a
@@ -150,7 +158,7 @@ static int netvsc_destroy_buf(struct hv_device *device)
if (ret != 0) {
netdev_err(ndev, "unable to send "
"revoke receive buffer to netvsp\n");
- return ret;
+ return;
}
}
@@ -165,7 +173,7 @@ static int netvsc_destroy_buf(struct hv_device *device)
if (ret != 0) {
netdev_err(ndev,
"unable to teardown receive buffer's gpadl\n");
- return ret;
+ return;
}
net_device->recv_buf_gpadl_handle = 0;
}
@@ -209,7 +217,7 @@ static int netvsc_destroy_buf(struct hv_device *device)
if (ret != 0) {
netdev_err(ndev, "unable to send "
"revoke send buffer to netvsp\n");
- return ret;
+ return;
}
}
/* Teardown the gpadl on the vsp end */
@@ -223,7 +231,7 @@ static int netvsc_destroy_buf(struct hv_device *device)
if (ret != 0) {
netdev_err(ndev,
"unable to teardown send buffer's gpadl\n");
- return ret;
+ return;
}
net_device->send_buf_gpadl_handle = 0;
}
@@ -233,8 +241,6 @@ static int netvsc_destroy_buf(struct hv_device *device)
net_device->send_buf = NULL;
}
kfree(net_device->send_section_map);
-
- return ret;
}
static int netvsc_init_buf(struct hv_device *device)
@@ -276,7 +282,6 @@ static int netvsc_init_buf(struct hv_device *device)
goto cleanup;
}
-
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
@@ -403,7 +408,7 @@ static int netvsc_init_buf(struct hv_device *device)
/* Section count is simply the size divided by the section size.
*/
net_device->send_section_cnt =
- net_device->send_buf_size/net_device->send_section_size;
+ net_device->send_buf_size / net_device->send_section_size;
dev_info(&device->device, "Send section size: %d, Section count:%d\n",
net_device->send_section_size, net_device->send_section_cnt);
@@ -412,8 +417,8 @@ static int netvsc_init_buf(struct hv_device *device)
net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
BITS_PER_LONG);
- net_device->send_section_map =
- kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
+ net_device->send_section_map = kcalloc(net_device->map_words,
+ sizeof(ulong), GFP_KERNEL);
if (net_device->send_section_map == NULL) {
ret = -ENOMEM;
goto cleanup;
@@ -428,7 +433,6 @@ exit:
return ret;
}
-
/* Negotiate NVSP protocol version */
static int negotiate_nvsp_ver(struct hv_device *device,
struct netvsc_device *net_device,
@@ -468,9 +472,13 @@ static int negotiate_nvsp_ver(struct hv_device *device,
init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
- if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
+ if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
+ /* Teaming bit is needed to receive link speed updates */
+ init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
+ }
+
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
(unsigned long)init_packet,
@@ -485,9 +493,10 @@ static int netvsc_connect_vsp(struct hv_device *device)
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
int ndis_version;
- u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
+ const u32 ver_list[] = {
+ NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
- int i, num_ver = 4; /* number of different NVSP versions */
+ int i;
net_device = get_outbound_net_device(device);
if (!net_device)
@@ -496,7 +505,7 @@ static int netvsc_connect_vsp(struct hv_device *device)
init_packet = &net_device->channel_init_pkt;
/* Negotiate the latest NVSP protocol supported */
- for (i = num_ver - 1; i >= 0; i--)
+ for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
if (negotiate_nvsp_ver(device, net_device, init_packet,
ver_list[i]) == 0) {
net_device->nvsp_version = ver_list[i];
@@ -555,7 +564,7 @@ static void netvsc_disconnect_vsp(struct hv_device *device)
/*
* netvsc_device_remove - Callback when the root bus device is removed
*/
-int netvsc_device_remove(struct hv_device *device)
+void netvsc_device_remove(struct hv_device *device)
{
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -577,10 +586,8 @@ int netvsc_device_remove(struct hv_device *device)
/* Release all resources */
vfree(net_device->sub_cb_buf);
free_netvsc_device(net_device);
- return 0;
}
-
#define RING_AVAIL_PERCENT_HIWATER 20
#define RING_AVAIL_PERCENT_LOWATER 10
@@ -604,72 +611,79 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
sync_change_bit(index, net_device->send_section_map);
}
+static void netvsc_send_tx_complete(struct netvsc_device *net_device,
+ struct vmbus_channel *incoming_channel,
+ struct hv_device *device,
+ struct vmpacket_descriptor *packet)
+{
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct vmbus_channel *channel = device->channel;
+ int num_outstanding_sends;
+ u16 q_idx = 0;
+ int queue_sends;
+
+ /* Notify the layer above us */
+ if (likely(skb)) {
+ struct hv_netvsc_packet *nvsc_packet
+ = (struct hv_netvsc_packet *)skb->cb;
+ u32 send_index = nvsc_packet->send_buf_index;
+
+ if (send_index != NETVSC_INVALID_INDEX)
+ netvsc_free_send_slot(net_device, send_index);
+ q_idx = nvsc_packet->q_idx;
+ channel = incoming_channel;
+
+ dev_consume_skb_any(skb);
+ }
+
+ num_outstanding_sends =
+ atomic_dec_return(&net_device->num_outstanding_sends);
+ queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]);
+
+ if (net_device->destroy && num_outstanding_sends == 0)
+ wake_up(&net_device->wait_drain);
+
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
+ !net_device_ctx->start_remove &&
+ (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
+ queue_sends < 1))
+ netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
+}
+
static void netvsc_send_completion(struct netvsc_device *net_device,
struct vmbus_channel *incoming_channel,
struct hv_device *device,
struct vmpacket_descriptor *packet)
{
struct nvsp_message *nvsp_packet;
- struct hv_netvsc_packet *nvsc_packet;
struct net_device *ndev = hv_get_drvdata(device);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- u32 send_index;
- struct sk_buff *skb;
nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
- (packet->offset8 << 3));
+ (packet->offset8 << 3));
- if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
- (nvsp_packet->hdr.msg_type ==
- NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
- (nvsp_packet->hdr.msg_type ==
- NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
- (nvsp_packet->hdr.msg_type ==
- NVSP_MSG5_TYPE_SUBCHANNEL)) {
+ switch (nvsp_packet->hdr.msg_type) {
+ case NVSP_MSG_TYPE_INIT_COMPLETE:
+ case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
+ case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
+ case NVSP_MSG5_TYPE_SUBCHANNEL:
/* Copy the response back */
memcpy(&net_device->channel_init_pkt, nvsp_packet,
sizeof(struct nvsp_message));
complete(&net_device->channel_init_wait);
- } else if (nvsp_packet->hdr.msg_type ==
- NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
- int num_outstanding_sends;
- u16 q_idx = 0;
- struct vmbus_channel *channel = device->channel;
- int queue_sends;
-
- /* Get the send context */
- skb = (struct sk_buff *)(unsigned long)packet->trans_id;
-
- /* Notify the layer above us */
- if (skb) {
- nvsc_packet = (struct hv_netvsc_packet *) skb->cb;
- send_index = nvsc_packet->send_buf_index;
- if (send_index != NETVSC_INVALID_INDEX)
- netvsc_free_send_slot(net_device, send_index);
- q_idx = nvsc_packet->q_idx;
- channel = incoming_channel;
- dev_kfree_skb_any(skb);
- }
-
- num_outstanding_sends =
- atomic_dec_return(&net_device->num_outstanding_sends);
- queue_sends = atomic_dec_return(&net_device->
- queue_sends[q_idx]);
+ break;
- if (net_device->destroy && num_outstanding_sends == 0)
- wake_up(&net_device->wait_drain);
+ case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
+ netvsc_send_tx_complete(net_device, incoming_channel,
+ device, packet);
+ break;
- if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
- !net_device_ctx->start_remove &&
- (hv_ringbuf_avail_percent(&channel->outbound) >
- RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
- netif_tx_wake_queue(netdev_get_tx_queue(
- ndev, q_idx));
- } else {
- netdev_err(ndev, "Unknown send completion packet type- "
- "%d received!!\n", nvsp_packet->hdr.msg_type);
+ default:
+ netdev_err(ndev,
+ "Unknown send completion type %d received!!\n",
+ nvsp_packet->hdr.msg_type);
}
-
}
static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
@@ -859,7 +873,7 @@ int netvsc_send(struct hv_device *device,
struct sk_buff *skb)
{
struct netvsc_device *net_device;
- int ret = 0, m_ret = 0;
+ int ret = 0;
struct vmbus_channel *out_channel;
u16 q_idx = packet->q_idx;
u32 pktlen = packet->total_data_buflen, msd_len = 0;
@@ -930,7 +944,7 @@ int netvsc_send(struct hv_device *device,
}
if (msdp->skb)
- dev_kfree_skb_any(msdp->skb);
+ dev_consume_skb_any(msdp->skb);
if (xmit_more && !packet->cp_partial) {
msdp->skb = skb;
@@ -948,8 +962,8 @@ int netvsc_send(struct hv_device *device,
}
if (msd_send) {
- m_ret = netvsc_send_pkt(device, msd_send, net_device,
- NULL, msd_skb);
+ int m_ret = netvsc_send_pkt(device, msd_send, net_device,
+ NULL, msd_skb);
if (m_ret != 0) {
netvsc_free_send_slot(net_device,
@@ -968,49 +982,121 @@ send_now:
return ret;
}
-static void netvsc_send_recv_completion(struct hv_device *device,
- struct vmbus_channel *channel,
- struct netvsc_device *net_device,
- u64 transaction_id, u32 status)
+static int netvsc_send_recv_completion(struct vmbus_channel *channel,
+ u64 transaction_id, u32 status)
{
struct nvsp_message recvcompMessage;
- int retries = 0;
int ret;
- struct net_device *ndev = hv_get_drvdata(device);
recvcompMessage.hdr.msg_type =
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
-retry_send_cmplt:
/* Send the completion */
ret = vmbus_sendpacket(channel, &recvcompMessage,
- sizeof(struct nvsp_message), transaction_id,
- VM_PKT_COMP, 0);
- if (ret == 0) {
- /* success */
- /* no-op */
- } else if (ret == -EAGAIN) {
- /* no more room...wait a bit and attempt to retry 3 times */
- retries++;
- netdev_err(ndev, "unable to send receive completion pkt"
- " (tid %llx)...retrying %d\n", transaction_id, retries);
-
- if (retries < 4) {
- udelay(100);
- goto retry_send_cmplt;
- } else {
- netdev_err(ndev, "unable to send receive "
- "completion pkt (tid %llx)...give up retrying\n",
- transaction_id);
- }
- } else {
- netdev_err(ndev, "unable to send receive "
- "completion pkt - %llx\n", transaction_id);
+ sizeof(struct nvsp_message_header) + sizeof(u32),
+ transaction_id, VM_PKT_COMP, 0);
+
+ return ret;
+}
+
+static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
+ u32 *filled, u32 *avail)
+{
+ u32 first = nvdev->mrc[q_idx].first;
+ u32 next = nvdev->mrc[q_idx].next;
+
+ *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
+ next - first;
+
+ *avail = NETVSC_RECVSLOT_MAX - *filled - 1;
+}
+
+/* Read the first filled slot, no change to index */
+static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
+ *nvdev, u16 q_idx)
+{
+ u32 filled, avail;
+
+ if (!nvdev->mrc[q_idx].buf)
+ return NULL;
+
+ count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
+ if (!filled)
+ return NULL;
+
+ return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first *
+ sizeof(struct recv_comp_data);
+}
+
+/* Put the first filled slot back to available pool */
+static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
+{
+ int num_recv;
+
+ nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) %
+ NETVSC_RECVSLOT_MAX;
+
+ num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
+
+ if (nvdev->destroy && num_recv == 0)
+ wake_up(&nvdev->wait_drain);
+}
+
+/* Check and send pending recv completions */
+static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
+ struct vmbus_channel *channel, u16 q_idx)
+{
+ struct recv_comp_data *rcd;
+ int ret;
+
+ while (true) {
+ rcd = read_recv_comp_slot(nvdev, q_idx);
+ if (!rcd)
+ break;
+
+ ret = netvsc_send_recv_completion(channel, rcd->tid,
+ rcd->status);
+ if (ret)
+ break;
+
+ put_recv_comp_slot(nvdev, q_idx);
}
}
+#define NETVSC_RCD_WATERMARK 80
+
+/* Get next available slot */
+static inline struct recv_comp_data *get_recv_comp_slot(
+ struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
+{
+ u32 filled, avail, next;
+ struct recv_comp_data *rcd;
+
+ if (!nvdev->recv_section)
+ return NULL;
+
+ if (!nvdev->mrc[q_idx].buf)
+ return NULL;
+
+ if (atomic_read(&nvdev->num_outstanding_recvs) >
+ nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100)
+ netvsc_chk_recv_comp(nvdev, channel, q_idx);
+
+ count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
+ if (!avail)
+ return NULL;
+
+ next = nvdev->mrc[q_idx].next;
+ rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data);
+ nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX;
+
+ atomic_inc(&nvdev->num_outstanding_recvs);
+
+ return rcd;
+}
+
static void netvsc_receive(struct netvsc_device *net_device,
struct vmbus_channel *channel,
struct hv_device *device,
@@ -1025,6 +1111,9 @@ static void netvsc_receive(struct netvsc_device *net_device,
int count = 0;
struct net_device *ndev = hv_get_drvdata(device);
void *data;
+ int ret;
+ struct recv_comp_data *rcd;
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
/*
* All inbound packets other than send completion should be xfer page
@@ -1069,13 +1158,29 @@ static void netvsc_receive(struct netvsc_device *net_device,
/* Pass it to the upper layer */
status = rndis_filter_receive(device, netvsc_packet, &data,
channel);
+ }
+ if (!net_device->mrc[q_idx].buf) {
+ ret = netvsc_send_recv_completion(channel,
+ vmxferpage_packet->d.trans_id,
+ status);
+ if (ret)
+ netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
+ q_idx, vmxferpage_packet->d.trans_id, ret);
+ return;
}
- netvsc_send_recv_completion(device, channel, net_device,
- vmxferpage_packet->d.trans_id, status);
-}
+ rcd = get_recv_comp_slot(net_device, channel, q_idx);
+ if (!rcd) {
+ netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
+ q_idx, vmxferpage_packet->d.trans_id);
+ return;
+ }
+
+ rcd->tid = vmxferpage_packet->d.trans_id;
+ rcd->status = status;
+}
static void netvsc_send_table(struct hv_device *hdev,
struct nvsp_message *nvmsg)
@@ -1157,11 +1262,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
}
}
-
void netvsc_channel_cb(void *context)
{
int ret;
struct vmbus_channel *channel = (struct vmbus_channel *)context;
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct hv_device *device;
struct netvsc_device *net_device;
u32 bytes_recvd;
@@ -1213,8 +1318,6 @@ void netvsc_channel_cb(void *context)
ndev,
request_id,
desc);
-
-
} else {
/*
* We are done for this pass.
@@ -1241,7 +1344,8 @@ void netvsc_channel_cb(void *context)
if (bufferlen > NETVSC_PACKET_SIZE)
kfree(buffer);
- return;
+
+ netvsc_chk_recv_comp(net_device, channel, q_idx);
}
/*
@@ -1263,9 +1367,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
net_device->ring_size = ring_size;
- /* Initialize the NetVSC channel extension */
- init_completion(&net_device->channel_init_wait);
-
set_per_channel_state(device->channel, net_device->cb_buffer);
/* Open the channel */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3ba29fc80d05..52eeb2f67276 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -40,7 +40,6 @@
#include "hyperv_net.h"
-
#define RING_SIZE_MIN 64
#define LINKCHANGE_INT (2 * HZ)
#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
@@ -358,18 +357,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct rndis_message *rndis_msg;
struct rndis_packet *rndis_pkt;
u32 rndis_msg_size;
- bool isvlan;
- bool linear = false;
struct rndis_per_packet_info *ppi;
struct ndis_tcp_ip_checksum_info *csum_info;
- struct ndis_tcp_lso_info *lso_info;
int hdr_offset;
u32 net_trans_info;
u32 hash;
u32 skb_length;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
struct hv_page_buffer *pb = page_buf;
- struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
/* We will atmost need two pages to describe the rndis
* header. We can only transmit MAX_PAGE_BUFFER_COUNT number
@@ -377,22 +372,20 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* more pages we try linearizing it.
*/
-check_size:
skb_length = skb->len;
num_data_pgs = netvsc_get_slots(skb) + 2;
- if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
- net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
- num_data_pgs, skb->len);
- ret = -EFAULT;
- goto drop;
- } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
- if (skb_linearize(skb)) {
- net_alert_ratelimited("failed to linearize skb\n");
- ret = -ENOMEM;
+
+ if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
+ ++net_device_ctx->eth_stats.tx_scattered;
+
+ if (skb_linearize(skb))
+ goto no_memory;
+
+ num_data_pgs = netvsc_get_slots(skb) + 2;
+ if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
+ ++net_device_ctx->eth_stats.tx_too_big;
goto drop;
}
- linear = true;
- goto check_size;
}
/*
@@ -401,17 +394,14 @@ check_size:
* structure.
*/
ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
- if (ret) {
- netdev_err(net, "unable to alloc hv_netvsc_packet\n");
- ret = -ENOMEM;
- goto drop;
- }
+ if (ret)
+ goto no_memory;
+
/* Use the skb control buffer for building up the packet */
BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
FIELD_SIZEOF(struct sk_buff, cb));
packet = (struct hv_netvsc_packet *)skb->cb;
-
packet->q_idx = skb_get_queue_mapping(skb);
packet->total_data_buflen = skb->len;
@@ -420,8 +410,6 @@ check_size:
memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
- isvlan = skb->vlan_tci & VLAN_TAG_PRESENT;
-
/* Add the rndis header */
rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
rndis_msg->msg_len = packet->total_data_buflen;
@@ -440,7 +428,7 @@ check_size:
*(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
}
- if (isvlan) {
+ if (skb_vlan_tag_present(skb)) {
struct ndis_pkt_8021q_info *vlan;
rndis_msg_size += NDIS_VLAN_PPI_SIZE;
@@ -461,8 +449,37 @@ check_size:
* Setup the sendside checksum offload only if this is not a
* GSO packet.
*/
- if (skb_is_gso(skb))
- goto do_lso;
+ if (skb_is_gso(skb)) {
+ struct ndis_tcp_lso_info *lso_info;
+
+ rndis_msg_size += NDIS_LSO_PPI_SIZE;
+ ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
+ TCP_LARGESEND_PKTINFO);
+
+ lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
+ ppi->ppi_offset);
+
+ lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
+ if (net_trans_info & (INFO_IPV4 << 16)) {
+ lso_info->lso_v2_transmit.ip_version =
+ NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
+ ip_hdr(skb)->tot_len = 0;
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ } else {
+ lso_info->lso_v2_transmit.ip_version =
+ NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ }
+ lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
+ lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
+ goto do_send;
+ }
if ((skb->ip_summed == CHECKSUM_NONE) ||
(skb->ip_summed == CHECKSUM_UNNECESSARY))
@@ -495,7 +512,7 @@ check_size:
ret = skb_cow_head(skb, 0);
if (ret)
- goto drop;
+ goto no_memory;
uh = udp_hdr(skb);
udp_len = ntohs(uh->len);
@@ -509,35 +526,6 @@ check_size:
csum_info->transmit.udp_checksum = 0;
}
- goto do_send;
-
-do_lso:
- rndis_msg_size += NDIS_LSO_PPI_SIZE;
- ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
- TCP_LARGESEND_PKTINFO);
-
- lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
- ppi->ppi_offset);
-
- lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
- if (net_trans_info & (INFO_IPV4 << 16)) {
- lso_info->lso_v2_transmit.ip_version =
- NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
- ip_hdr(skb)->tot_len = 0;
- ip_hdr(skb)->check = 0;
- tcp_hdr(skb)->check =
- ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
- } else {
- lso_info->lso_v2_transmit.ip_version =
- NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
- }
- lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
- lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
do_send:
/* Start filling in the page buffers with the rndis hdr */
@@ -550,21 +538,33 @@ do_send:
skb_tx_timestamp(skb);
ret = netvsc_send(net_device_ctx->device_ctx, packet,
rndis_msg, &pb, skb);
+ if (likely(ret == 0)) {
+ struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
-drop:
- if (ret == 0) {
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->packets++;
tx_stats->bytes += skb_length;
u64_stats_update_end(&tx_stats->syncp);
- } else {
- if (ret != -EAGAIN) {
- dev_kfree_skb_any(skb);
- net->stats.tx_dropped++;
- }
+ return NETDEV_TX_OK;
+ }
+
+ if (ret == -EAGAIN) {
+ ++net_device_ctx->eth_stats.tx_busy;
+ return NETDEV_TX_BUSY;
}
- return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
+ if (ret == -ENOSPC)
+ ++net_device_ctx->eth_stats.tx_no_space;
+
+drop:
+ dev_kfree_skb_any(skb);
+ net->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+
+no_memory:
+ ++net_device_ctx->eth_stats.tx_no_memory;
+ goto drop;
}
/*
@@ -579,19 +579,32 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct netvsc_reconfig *event;
unsigned long flags;
- /* Handle link change statuses only */
+ net = hv_get_drvdata(device_obj);
+
+ if (!net)
+ return;
+
+ ndev_ctx = netdev_priv(net);
+
+ /* Update the physical link speed when changing to another vSwitch */
+ if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
+ u32 speed;
+
+ speed = *(u32 *)((void *)indicate + indicate->
+ status_buf_offset) / 10000;
+ ndev_ctx->speed = speed;
+ return;
+ }
+
+ /* Handle these link change statuses below */
if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
return;
- net = hv_get_drvdata(device_obj);
-
- if (!net || net->reg_state != NETREG_REGISTERED)
+ if (net->reg_state != NETREG_REGISTERED)
return;
- ndev_ctx = netdev_priv(net);
-
event = kzalloc(sizeof(*event), GFP_ATOMIC);
if (!event)
return;
@@ -604,7 +617,6 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
-
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct hv_netvsc_packet *packet,
struct ndis_tcp_ip_checksum_info *csum_info,
@@ -655,51 +667,23 @@ int netvsc_recv_callback(struct hv_device *device_obj,
{
struct net_device *net = hv_get_drvdata(device_obj);
struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct net_device *vf_netdev;
struct sk_buff *skb;
- struct sk_buff *vf_skb;
struct netvsc_stats *rx_stats;
- u32 bytes_recvd = packet->total_data_buflen;
- int ret = 0;
- if (!net || net->reg_state != NETREG_REGISTERED)
+ if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
- if (READ_ONCE(net_device_ctx->vf_inject)) {
- atomic_inc(&net_device_ctx->vf_use_cnt);
- if (!READ_ONCE(net_device_ctx->vf_inject)) {
- /*
- * We raced; just move on.
- */
- atomic_dec(&net_device_ctx->vf_use_cnt);
- goto vf_injection_done;
- }
-
- /*
- * Inject this packet into the VF inerface.
- * On Hyper-V, multicast and brodcast packets
- * are only delivered on the synthetic interface
- * (after subjecting these to policy filters on
- * the host). Deliver these via the VF interface
- * in the guest.
- */
- vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
- packet, csum_info, *data,
- vlan_tci);
- if (vf_skb != NULL) {
- ++net_device_ctx->vf_netdev->stats.rx_packets;
- net_device_ctx->vf_netdev->stats.rx_bytes +=
- bytes_recvd;
- netif_receive_skb(vf_skb);
- } else {
- ++net->stats.rx_dropped;
- ret = NVSP_STAT_FAIL;
- }
- atomic_dec(&net_device_ctx->vf_use_cnt);
- return ret;
- }
-
-vf_injection_done:
- rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
+ /*
+ * If necessary, inject this packet into the VF interface.
+ * On Hyper-V, multicast and brodcast packets are only delivered
+ * to the synthetic interface (after subjecting these to
+ * policy filters on the host). Deliver these via the VF
+ * interface in the guest.
+ */
+ vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
+ if (vf_netdev && (vf_netdev->flags & IFF_UP))
+ net = vf_netdev;
/* Allocate a skb - TODO direct I/O to pages? */
skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
@@ -707,12 +691,25 @@ vf_injection_done:
++net->stats.rx_dropped;
return NVSP_STAT_FAIL;
}
- skb_record_rx_queue(skb, channel->
- offermsg.offer.sub_channel_index);
+ if (net != vf_netdev)
+ skb_record_rx_queue(skb,
+ channel->offermsg.offer.sub_channel_index);
+
+ /*
+ * Even if injecting the packet, record the statistics
+ * on the synthetic device because modifying the VF device
+ * statistics will not work correctly.
+ */
+ rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += packet->total_data_buflen;
+
+ if (skb->pkt_type == PACKET_BROADCAST)
+ ++rx_stats->broadcast;
+ else if (skb->pkt_type == PACKET_MULTICAST)
+ ++rx_stats->multicast;
u64_stats_update_end(&rx_stats->syncp);
/*
@@ -728,8 +725,12 @@ vf_injection_done:
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct hv_device *dev = net_device_ctx->device_ctx;
+
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info));
}
static void netvsc_get_channels(struct net_device *net,
@@ -951,7 +952,7 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
cpu);
struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
cpu);
- u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast;
unsigned int start;
do {
@@ -964,12 +965,14 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
rx_packets = rx_stats->packets;
rx_bytes = rx_stats->bytes;
+ rx_multicast = rx_stats->multicast + rx_stats->broadcast;
} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
t->tx_bytes += tx_bytes;
t->tx_packets += tx_packets;
t->rx_bytes += rx_bytes;
t->rx_packets += rx_packets;
+ t->multicast += rx_multicast;
}
t->tx_dropped = net->stats.tx_dropped;
@@ -1005,6 +1008,51 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
return err;
}
+static const struct {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} netvsc_stats[] = {
+ { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
+ { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
+ { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
+ { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
+ { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
+};
+
+static int netvsc_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(netvsc_stats);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void netvsc_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ const void *nds = &ndc->eth_stats;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
+ data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
+}
+
+static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ netvsc_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netvsc_poll_controller(struct net_device *net)
{
@@ -1017,6 +1065,9 @@ static void netvsc_poll_controller(struct net_device *net)
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_ethtool_stats = netvsc_get_ethtool_stats,
+ .get_sset_count = netvsc_get_sset_count,
+ .get_strings = netvsc_get_strings,
.get_channels = netvsc_get_channels,
.set_channels = netvsc_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
@@ -1151,25 +1202,44 @@ static void netvsc_free_netdev(struct net_device *netdev)
free_netdev(netdev);
}
-static struct net_device *get_netvsc_net_device(char *mac)
+static struct net_device *get_netvsc_bymac(const u8 *mac)
{
- struct net_device *dev, *found = NULL;
- int rtnl_locked;
+ struct net_device *dev;
- rtnl_locked = rtnl_trylock();
+ ASSERT_RTNL();
for_each_netdev(&init_net, dev) {
- if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
- if (dev->netdev_ops != &device_ops)
- continue;
- found = dev;
- break;
- }
+ if (dev->netdev_ops != &device_ops)
+ continue; /* not a netvsc device */
+
+ if (ether_addr_equal(mac, dev->perm_addr))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
+{
+ struct net_device *dev;
+
+ ASSERT_RTNL();
+
+ for_each_netdev(&init_net, dev) {
+ struct net_device_context *net_device_ctx;
+
+ if (dev->netdev_ops != &device_ops)
+ continue; /* not a netvsc device */
+
+ net_device_ctx = netdev_priv(dev);
+ if (net_device_ctx->nvdev == NULL)
+ continue; /* device is removed */
+
+ if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
+ return dev; /* a match */
}
- if (rtnl_locked)
- rtnl_unlock();
- return found;
+ return NULL;
}
static int netvsc_register_vf(struct net_device *vf_netdev)
@@ -1177,9 +1247,8 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
struct net_device *ndev;
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
- if (eth_ops == NULL || eth_ops == &ethtool_ops)
+ if (vf_netdev->addr_len != ETH_ALEN)
return NOTIFY_DONE;
/*
@@ -1187,13 +1256,13 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* associate with the VF interface. If we don't find a matching
* synthetic interface, move on.
*/
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_bymac(vf_netdev->perm_addr);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || net_device_ctx->vf_netdev)
+ if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
return NOTIFY_DONE;
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
@@ -1201,46 +1270,26 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* Take a reference on the module.
*/
try_module_get(THIS_MODULE);
- net_device_ctx->vf_netdev = vf_netdev;
- return NOTIFY_OK;
-}
-
-static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
-{
- net_device_ctx->vf_inject = true;
-}
-static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
-{
- net_device_ctx->vf_inject = false;
-
- /* Wait for currently active users to drain out. */
- while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
- udelay(50);
+ dev_hold(vf_netdev);
+ rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
+ return NOTIFY_OK;
}
static int netvsc_vf_up(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct netvsc_device *netvsc_dev;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
struct net_device_context *net_device_ctx;
- if (eth_ops == &ethtool_ops)
- return NOTIFY_DONE;
-
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || !net_device_ctx->vf_netdev)
- return NOTIFY_DONE;
-
netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
- netvsc_inject_enable(net_device_ctx);
/*
* Open the device before switching data path.
@@ -1261,29 +1310,20 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
return NOTIFY_OK;
}
-
static int netvsc_vf_down(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct netvsc_device *netvsc_dev;
struct net_device_context *net_device_ctx;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
- if (eth_ops == &ethtool_ops)
- return NOTIFY_DONE;
-
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || !net_device_ctx->vf_netdev)
- return NOTIFY_DONE;
-
netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
- netvsc_inject_disable(net_device_ctx);
netvsc_switch_datapath(ndev, false);
netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
rndis_filter_close(netvsc_dev);
@@ -1295,28 +1335,23 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
return NOTIFY_OK;
}
-
static int netvsc_unregister_vf(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct netvsc_device *netvsc_dev;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
struct net_device_context *net_device_ctx;
- if (eth_ops == &ethtool_ops)
- return NOTIFY_DONE;
-
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || !net_device_ctx->vf_netdev)
- return NOTIFY_DONE;
+
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
- netvsc_inject_disable(net_device_ctx);
- net_device_ctx->vf_netdev = NULL;
+
+ RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
+ dev_put(vf_netdev);
module_put(THIS_MODULE);
return NOTIFY_OK;
}
@@ -1337,6 +1372,8 @@ static int netvsc_probe(struct hv_device *dev,
netif_carrier_off(net);
+ netvsc_init_settings(net);
+
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
@@ -1366,10 +1403,6 @@ static int netvsc_probe(struct hv_device *dev,
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
- atomic_set(&net_device_ctx->vf_use_cnt, 0);
- net_device_ctx->vf_netdev = NULL;
- net_device_ctx->vf_inject = false;
-
net->netdev_ops = &device_ops;
net->hw_features = NETVSC_HW_FEATURES;
@@ -1398,8 +1431,6 @@ static int netvsc_probe(struct hv_device *dev,
netif_set_real_num_tx_queues(net, nvdev->num_chn);
netif_set_real_num_rx_queues(net, nvdev->num_chn);
- netvsc_init_settings(net);
-
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
@@ -1423,7 +1454,6 @@ static int netvsc_remove(struct hv_device *dev)
return 0;
}
-
ndev_ctx = netdev_priv(net);
net_device = ndev_ctx->nvdev;
@@ -1470,7 +1500,6 @@ static struct hv_driver netvsc_drv = {
.remove = netvsc_remove,
};
-
/*
* On Hyper-V, every VF interface is matched with a corresponding
* synthetic interface. The synthetic interface is presented first
@@ -1482,13 +1511,21 @@ static int netvsc_netdev_event(struct notifier_block *this,
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ /* Skip our own events */
+ if (event_dev->netdev_ops == &device_ops)
+ return NOTIFY_DONE;
+
+ /* Avoid non-Ethernet type devices */
+ if (event_dev->type != ARPHRD_ETHER)
+ return NOTIFY_DONE;
+
/* Avoid Vlan dev with same MAC registering as VF */
if (event_dev->priv_flags & IFF_802_1Q_VLAN)
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
- if (event_dev->priv_flags & IFF_BONDING &&
- event_dev->flags & IFF_MASTER)
+ if ((event_dev->priv_flags & IFF_BONDING) &&
+ (event_dev->flags & IFF_MASTER))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 8e830f741d47..9195d5da8485 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -663,13 +663,14 @@ cleanup:
return ret;
}
-u8 netvsc_hash_key[HASH_KEYLEN] = {
+static const u8 netvsc_hash_key[] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
+#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key)
static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
{
@@ -720,7 +721,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
for (i = 0; i < HASH_KEYLEN; i++)
keyp[i] = netvsc_hash_key[i];
-
ret = rndis_filter_send_request(rdev, request);
if (ret != 0)
goto cleanup;
@@ -738,7 +738,6 @@ cleanup:
return ret;
}
-
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
u32 size = sizeof(u32);
@@ -752,6 +751,28 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev)
return ret;
}
+static int rndis_filter_query_link_speed(struct rndis_device *dev)
+{
+ u32 size = sizeof(u32);
+ u32 link_speed;
+ struct net_device_context *ndc;
+ int ret;
+
+ ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_LINK_SPEED,
+ &link_speed, &size);
+
+ if (!ret) {
+ ndc = netdev_priv(dev->ndev);
+
+ /* The link speed reported from host is in 100bps unit, so
+ * we convert it to Mbps here.
+ */
+ ndc->speed = link_speed / 10000;
+ }
+
+ return ret;
+}
+
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
{
struct rndis_request *request;
@@ -792,7 +813,6 @@ cleanup:
return ret;
}
-
static int rndis_filter_init_device(struct rndis_device *dev)
{
struct rndis_request *request;
@@ -875,11 +895,11 @@ cleanup:
/* Wait for all send completions */
wait_event(nvdev->wait_drain,
- atomic_read(&nvdev->num_outstanding_sends) == 0);
+ atomic_read(&nvdev->num_outstanding_sends) == 0 &&
+ atomic_read(&nvdev->num_outstanding_recvs) == 0);
if (request)
put_rndis_request(dev, request);
- return;
}
static int rndis_filter_open_device(struct rndis_device *dev)
@@ -931,6 +951,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) *
NETVSC_PACKET_SIZE);
+ nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX *
+ sizeof(struct recv_comp_data));
+
ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
nvscdev->ring_size * PAGE_SIZE, NULL, 0,
netvsc_channel_cb, new_sc);
@@ -946,7 +969,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
}
int rndis_filter_device_add(struct hv_device *dev,
- void *additional_info)
+ void *additional_info)
{
int ret;
struct net_device *net = hv_get_drvdata(dev);
@@ -1028,7 +1051,6 @@ int rndis_filter_device_add(struct hv_device *dev,
offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
-
ret = rndis_filter_set_offload_params(net, &offloads);
if (ret)
goto err_dev_remv;
@@ -1044,6 +1066,8 @@ int rndis_filter_device_add(struct hv_device *dev,
if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
return 0;
+ rndis_filter_query_link_speed(rndis_device);
+
/* vRSS setup */
memset(&rsscap, 0, rsscap_size);
ret = rndis_filter_query_device(rndis_device,
@@ -1152,7 +1176,6 @@ void rndis_filter_device_remove(struct hv_device *dev)
netvsc_device_remove(dev);
}
-
int rndis_filter_open(struct netvsc_device *nvdev)
{
if (!nvdev)
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 0becf0ac3926..ec387efb61d0 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -30,7 +30,7 @@
static int numlbs = 2;
static LIST_HEAD(fakelb_phys);
-static DEFINE_SPINLOCK(fakelb_phys_lock);
+static DEFINE_MUTEX(fakelb_phys_lock);
static LIST_HEAD(fakelb_ifup_phys);
static DEFINE_RWLOCK(fakelb_ifup_phys_lock);
@@ -188,9 +188,9 @@ static int fakelb_add_one(struct device *dev)
if (err)
goto err_reg;
- spin_lock(&fakelb_phys_lock);
+ mutex_lock(&fakelb_phys_lock);
list_add_tail(&phy->list, &fakelb_phys);
- spin_unlock(&fakelb_phys_lock);
+ mutex_unlock(&fakelb_phys_lock);
return 0;
@@ -222,10 +222,10 @@ static int fakelb_probe(struct platform_device *pdev)
return 0;
err_slave:
- spin_lock(&fakelb_phys_lock);
+ mutex_lock(&fakelb_phys_lock);
list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
fakelb_del(phy);
- spin_unlock(&fakelb_phys_lock);
+ mutex_unlock(&fakelb_phys_lock);
return err;
}
@@ -233,10 +233,10 @@ static int fakelb_remove(struct platform_device *pdev)
{
struct fakelb_phy *phy, *tmp;
- spin_lock(&fakelb_phys_lock);
+ mutex_lock(&fakelb_phys_lock);
list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
fakelb_del(phy);
- spin_unlock(&fakelb_phys_lock);
+ mutex_unlock(&fakelb_phys_lock);
return 0;
}
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 695a5dc9ace3..7e0732f5ea07 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -23,11 +23,13 @@
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
#include <net/ip.h>
#include <net/ip6_route.h>
#include <net/rtnetlink.h>
#include <net/route.h>
#include <net/addrconf.h>
+#include <net/l3mdev.h>
#define IPVLAN_DRV "ipvlan"
#define IPV_DRV_VER "0.1"
@@ -124,4 +126,8 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6);
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
void ipvlan_ht_addr_del(struct ipvl_addr *addr);
+struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
+ u16 proto);
+unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index b5f9511d819e..b4e990743e1d 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -560,6 +560,7 @@ int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
case IPVLAN_MODE_L2:
return ipvlan_xmit_mode_l2(skb, dev);
case IPVLAN_MODE_L3:
+ case IPVLAN_MODE_L3S:
return ipvlan_xmit_mode_l3(skb, dev);
}
@@ -664,6 +665,8 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
return ipvlan_handle_mode_l2(pskb, port);
case IPVLAN_MODE_L3:
return ipvlan_handle_mode_l3(pskb, port);
+ case IPVLAN_MODE_L3S:
+ return RX_HANDLER_PASS;
}
/* Should not reach here */
@@ -672,3 +675,94 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
+
+static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ipvl_addr *addr = NULL;
+ struct ipvl_port *port;
+ void *lyr3h;
+ int addr_type;
+
+ if (!dev || !netif_is_ipvlan_port(dev))
+ goto out;
+
+ port = ipvlan_port_get_rcu(dev);
+ if (!port || port->mode != IPVLAN_MODE_L3S)
+ goto out;
+
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!lyr3h)
+ goto out;
+
+ addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
+out:
+ return addr;
+}
+
+struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
+ u16 proto)
+{
+ struct ipvl_addr *addr;
+ struct net_device *sdev;
+
+ addr = ipvlan_skb_to_addr(skb, dev);
+ if (!addr)
+ goto out;
+
+ sdev = addr->master->dev;
+ switch (proto) {
+ case AF_INET:
+ {
+ int err;
+ struct iphdr *ip4h = ip_hdr(skb);
+
+ err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr,
+ ip4h->tos, sdev);
+ if (unlikely(err))
+ goto out;
+ break;
+ }
+ case AF_INET6:
+ {
+ struct dst_entry *dst;
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ int flags = RT6_LOOKUP_F_HAS_SADDR;
+ struct flowi6 fl6 = {
+ .flowi6_iif = sdev->ifindex,
+ .daddr = ip6h->daddr,
+ .saddr = ip6h->saddr,
+ .flowlabel = ip6_flowinfo(ip6h),
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = ip6h->nexthdr,
+ };
+
+ skb_dst_drop(skb);
+ dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, flags);
+ skb_dst_set(skb, dst);
+ break;
+ }
+ default:
+ break;
+ }
+
+out:
+ return skb;
+}
+
+unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct ipvl_addr *addr;
+ unsigned int len;
+
+ addr = ipvlan_skb_to_addr(skb, skb->dev);
+ if (!addr)
+ goto out;
+
+ skb->dev = addr->master->dev;
+ len = skb->len + ETH_HLEN;
+ ipvlan_count_rx(addr->master, len, true, false);
+out:
+ return NF_ACCEPT;
+}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 18b4e8c7f68a..f442eb366863 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -9,24 +9,87 @@
#include "ipvlan.h"
+static u32 ipvl_nf_hook_refcnt = 0;
+
+static struct nf_hook_ops ipvl_nfops[] __read_mostly = {
+ {
+ .hook = ipvlan_nf_input,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = INT_MAX,
+ },
+ {
+ .hook = ipvlan_nf_input,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = INT_MAX,
+ },
+};
+
+static struct l3mdev_ops ipvl_l3mdev_ops __read_mostly = {
+ .l3mdev_l3_rcv = ipvlan_l3_rcv,
+};
+
static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
{
ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj;
}
-static void ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
+static int ipvlan_register_nf_hook(void)
+{
+ int err = 0;
+
+ if (!ipvl_nf_hook_refcnt) {
+ err = _nf_register_hooks(ipvl_nfops, ARRAY_SIZE(ipvl_nfops));
+ if (!err)
+ ipvl_nf_hook_refcnt = 1;
+ } else {
+ ipvl_nf_hook_refcnt++;
+ }
+
+ return err;
+}
+
+static void ipvlan_unregister_nf_hook(void)
+{
+ WARN_ON(!ipvl_nf_hook_refcnt);
+
+ ipvl_nf_hook_refcnt--;
+ if (!ipvl_nf_hook_refcnt)
+ _nf_unregister_hooks(ipvl_nfops, ARRAY_SIZE(ipvl_nfops));
+}
+
+static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
{
struct ipvl_dev *ipvlan;
+ struct net_device *mdev = port->dev;
+ int err = 0;
+ ASSERT_RTNL();
if (port->mode != nval) {
+ if (nval == IPVLAN_MODE_L3S) {
+ /* New mode is L3S */
+ err = ipvlan_register_nf_hook();
+ if (!err) {
+ mdev->l3mdev_ops = &ipvl_l3mdev_ops;
+ mdev->priv_flags |= IFF_L3MDEV_MASTER;
+ } else
+ return err;
+ } else if (port->mode == IPVLAN_MODE_L3S) {
+ /* Old mode was L3S */
+ mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
+ ipvlan_unregister_nf_hook();
+ mdev->l3mdev_ops = NULL;
+ }
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- if (nval == IPVLAN_MODE_L3)
+ if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
ipvlan->dev->flags |= IFF_NOARP;
else
ipvlan->dev->flags &= ~IFF_NOARP;
}
port->mode = nval;
}
+ return err;
}
static int ipvlan_port_create(struct net_device *dev)
@@ -74,6 +137,11 @@ static void ipvlan_port_destroy(struct net_device *dev)
struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
dev->priv_flags &= ~IFF_IPVLAN_MASTER;
+ if (port->mode == IPVLAN_MODE_L3S) {
+ dev->priv_flags &= ~IFF_L3MDEV_MASTER;
+ ipvlan_unregister_nf_hook();
+ dev->l3mdev_ops = NULL;
+ }
netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq);
__skb_queue_purge(&port->backlog);
@@ -132,7 +200,8 @@ static int ipvlan_open(struct net_device *dev)
struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_addr *addr;
- if (ipvlan->port->mode == IPVLAN_MODE_L3)
+ if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
+ ipvlan->port->mode == IPVLAN_MODE_L3S)
dev->flags |= IFF_NOARP;
else
dev->flags &= ~IFF_NOARP;
@@ -372,13 +441,14 @@ static int ipvlan_nl_changelink(struct net_device *dev,
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
+ int err = 0;
if (data && data[IFLA_IPVLAN_MODE]) {
u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
- ipvlan_set_port_mode(port, nmode);
+ err = ipvlan_set_port_mode(port, nmode);
}
- return 0;
+ return err;
}
static size_t ipvlan_nl_getsize(const struct net_device *dev)
@@ -473,10 +543,13 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
unregister_netdevice(dev);
return err;
}
+ err = ipvlan_set_port_mode(port, mode);
+ if (err) {
+ unregister_netdevice(dev);
+ return err;
+ }
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
- ipvlan_set_port_mode(port, mode);
-
netif_stacked_transfer_operstate(phy_dev, dev);
return 0;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 351e701eb043..3ea47f28e143 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2973,6 +2973,7 @@ static void macsec_setup(struct net_device *dev)
dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &macsec_netdev_ops;
dev->destructor = macsec_free_netdev;
+ SET_NETDEV_DEVTYPE(dev, &macsec_type);
eth_zero_addr(dev->broadcast);
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index b4863e4e522b..5078a0d0db64 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -15,152 +15,218 @@ if PHYLIB
config SWPHY
bool
-comment "MII PHY device drivers"
-
-config AQUANTIA_PHY
- tristate "Drivers for the Aquantia PHYs"
- ---help---
- Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+comment "MDIO bus device drivers"
-config AT803X_PHY
- tristate "Drivers for Atheros AT803X PHYs"
- ---help---
- Currently supports the AT8030 and AT8035 model
+config MDIO_BCM_IPROC
+ tristate "Broadcom iProc MDIO bus controller"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on HAS_IOMEM && OF_MDIO
+ help
+ This module provides a driver for the MDIO busses found in the
+ Broadcom iProc SoC's.
-config AMD_PHY
- tristate "Drivers for the AMD PHYs"
- ---help---
- Currently supports the am79c874
+config MDIO_BCM_UNIMAC
+ tristate "Broadcom UniMAC MDIO bus controller"
+ depends on HAS_IOMEM
+ help
+ This module provides a driver for the Broadcom UniMAC MDIO busses.
+ This hardware can be found in the Broadcom GENET Ethernet MAC
+ controllers as well as some Broadcom Ethernet switches such as the
+ Starfighter 2 switches.
-config MARVELL_PHY
- tristate "Drivers for Marvell PHYs"
- ---help---
- Currently has a driver for the 88E1011S
-
-config DAVICOM_PHY
- tristate "Drivers for Davicom PHYs"
- ---help---
- Currently supports dm9161e and dm9131
+config MDIO_BITBANG
+ tristate "Bitbanged MDIO buses"
+ help
+ This module implements the MDIO bus protocol in software,
+ for use by low level drivers that export the ability to
+ drive the relevant pins.
-config QSEMI_PHY
- tristate "Drivers for Quality Semiconductor PHYs"
- ---help---
- Currently supports the qs6612
+ If in doubt, say N.
-config LXT_PHY
- tristate "Drivers for the Intel LXT PHYs"
- ---help---
- Currently supports the lxt970, lxt971
+config MDIO_BUS_MUX
+ tristate
+ depends on OF_MDIO
+ help
+ This module provides a driver framework for MDIO bus
+ multiplexers which connect one of several child MDIO busses
+ to a parent bus. Switching between child busses is done by
+ device specific drivers.
-config CICADA_PHY
- tristate "Drivers for the Cicada PHYs"
- ---help---
- Currently supports the cis8204
+config MDIO_BUS_MUX_BCM_IPROC
+ tristate "Broadcom iProc based MDIO bus multiplexers"
+ depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ default ARCH_BCM_IPROC
+ help
+ This module provides a driver for MDIO bus multiplexers found in
+ iProc based Broadcom SoCs. This multiplexer connects one of several
+ child MDIO bus to a parent bus. Buses could be internal as well as
+ external and selection logic lies inside the same multiplexer.
-config VITESSE_PHY
- tristate "Drivers for the Vitesse PHYs"
- ---help---
- Currently supports the vsc8244
+config MDIO_BUS_MUX_GPIO
+ tristate "GPIO controlled MDIO bus multiplexers"
+ depends on OF_GPIO && OF_MDIO
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexers that
+ are controlled via GPIO lines. The multiplexer connects one of
+ several child MDIO busses to a parent bus. Child bus
+ selection is under the control of GPIO lines.
-config TERANETICS_PHY
- tristate "Drivers for the Teranetics PHYs"
- ---help---
- Currently supports the Teranetics TN2020
+config MDIO_BUS_MUX_MMIOREG
+ tristate "MMIO device-controlled MDIO bus multiplexers"
+ depends on OF_MDIO && HAS_IOMEM
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexers that
+ are controlled via a simple memory-mapped device, like an FPGA.
+ The multiplexer connects one of several child MDIO busses to a
+ parent bus. Child bus selection is under the control of one of
+ the FPGA's registers.
-config SMSC_PHY
- tristate "Drivers for SMSC PHYs"
- ---help---
- Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs
+ Currently, only 8-bit registers are supported.
-config BCM_NET_PHYLIB
+config MDIO_CAVIUM
tristate
-config BROADCOM_PHY
- tristate "Drivers for Broadcom PHYs"
- select BCM_NET_PHYLIB
+config MDIO_GPIO
+ tristate "GPIO lib-based bitbanged MDIO buses"
+ depends on MDIO_BITBANG && GPIOLIB
---help---
- Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
- BCM5481 and BCM5482 PHYs.
+ Supports GPIO lib-based MDIO busses.
-config BCM_CYGNUS_PHY
- tristate "Drivers for Broadcom Cygnus SoC internal PHY"
- depends on ARCH_BCM_CYGNUS || COMPILE_TEST
- depends on MDIO_BCM_IPROC
- select BCM_NET_PHYLIB
+ To compile this driver as a module, choose M here: the module
+ will be called mdio-gpio.
+
+config MDIO_HISI_FEMAC
+ tristate "Hisilicon FEMAC MDIO bus controller"
+ depends on HAS_IOMEM && OF_MDIO
+ help
+ This module provides a driver for the MDIO busses found in the
+ Hisilicon SoC that have an Fast Ethernet MAC.
+
+config MDIO_MOXART
+ tristate "MOXA ART MDIO interface support"
+ depends on ARCH_MOXART
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the MOXA ART SoC
+
+config MDIO_OCTEON
+ tristate "Octeon and some ThunderX SOCs MDIO buses"
+ depends on 64BIT
+ depends on HAS_IOMEM
+ select MDIO_CAVIUM
+ help
+ This module provides a driver for the Octeon and ThunderX MDIO
+ buses. It is required by the Octeon and ThunderX ethernet device
+ drivers on some systems.
+
+config MDIO_SUN4I
+ tristate "Allwinner sun4i MDIO interface support"
+ depends on ARCH_SUNXI
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the Allwinner SoC that have an EMAC (A10,
+ A12, A10s, etc.)
+
+config MDIO_THUNDER
+ tristate "ThunderX SOCs MDIO buses"
+ depends on 64BIT
+ depends on PCI
+ select MDIO_CAVIUM
+ help
+ This driver supports the MDIO interfaces found on Cavium
+ ThunderX SoCs when the MDIO bus device appears as a PCI
+ device.
+
+config MDIO_XGENE
+ tristate "APM X-Gene SoC MDIO bus controller"
+ help
+ This module provides a driver for the MDIO busses found in the
+ APM X-Gene SoC's.
+
+comment "MII PHY device drivers"
+
+config AMD_PHY
+ tristate "AMD PHYs"
---help---
- This PHY driver is for the 1G internal PHYs of the Broadcom
- Cygnus Family SoC.
+ Currently supports the am79c874
- Currently supports internal PHY's used in the BCM11300,
- BCM11320, BCM11350, BCM11360, BCM58300, BCM58302,
- BCM58303 & BCM58305 Broadcom Cygnus SoCs.
+config AQUANTIA_PHY
+ tristate "Aquantia PHYs"
+ ---help---
+ Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+
+config AT803X_PHY
+ tristate "AT803X PHYs"
+ ---help---
+ Currently supports the AT8030 and AT8035 model
config BCM63XX_PHY
- tristate "Drivers for Broadcom 63xx SOCs internal PHY"
+ tristate "Broadcom 63xx SOCs internal PHY"
depends on BCM63XX
select BCM_NET_PHYLIB
---help---
Currently supports the 6348 and 6358 PHYs.
config BCM7XXX_PHY
- tristate "Drivers for Broadcom 7xxx SOCs internal PHYs"
+ tristate "Broadcom 7xxx SOCs internal PHYs"
select BCM_NET_PHYLIB
---help---
Currently supports the BCM7366, BCM7439, BCM7445, and
40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
config BCM87XX_PHY
- tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
+ tristate "Broadcom BCM8706 and BCM8727 PHYs"
help
Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs.
-config ICPLUS_PHY
- tristate "Drivers for ICPlus PHYs"
+config BCM_CYGNUS_PHY
+ tristate "Broadcom Cygnus SoC internal PHY"
+ depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+ depends on MDIO_BCM_IPROC
+ select BCM_NET_PHYLIB
---help---
- Currently supports the IP175C and IP1001 PHYs.
+ This PHY driver is for the 1G internal PHYs of the Broadcom
+ Cygnus Family SoC.
-config REALTEK_PHY
- tristate "Drivers for Realtek PHYs"
- ---help---
- Supports the Realtek 821x PHY.
+ Currently supports internal PHY's used in the BCM11300,
+ BCM11320, BCM11350, BCM11360, BCM58300, BCM58302,
+ BCM58303 & BCM58305 Broadcom Cygnus SoCs.
-config NATIONAL_PHY
- tristate "Drivers for National Semiconductor PHYs"
- ---help---
- Currently supports the DP83865 PHY.
+config BCM_NET_PHYLIB
+ tristate
-config STE10XP
- tristate "Driver for STMicroelectronics STe10Xp PHYs"
+config BROADCOM_PHY
+ tristate "Broadcom PHYs"
+ select BCM_NET_PHYLIB
---help---
- This is the driver for the STe100p and STe101p PHYs.
+ Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
+ BCM5481 and BCM5482 PHYs.
-config LSI_ET1011C_PHY
- tristate "Driver for LSI ET1011C PHY"
+config CICADA_PHY
+ tristate "Cicada PHYs"
---help---
- Supports the LSI ET1011C PHY.
+ Currently supports the cis8204
-config MICREL_PHY
- tristate "Driver for Micrel PHYs"
+config DAVICOM_PHY
+ tristate "Davicom PHYs"
---help---
- Supports the KSZ9021, VSC8201, KS8001 PHYs.
+ Currently supports dm9161e and dm9131
config DP83848_PHY
- tristate "Driver for Texas Instruments DP83848 PHY"
+ tristate "Texas Instruments DP83848 PHY"
---help---
Supports the DP83848 PHY.
config DP83867_PHY
- tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
+ tristate "Texas Instruments DP83867 Gigabit PHY"
---help---
Currently supports the DP83867 PHY.
-config MICROCHIP_PHY
- tristate "Drivers for Microchip PHYs"
- help
- Supports the LAN88XX PHYs.
-
config FIXED_PHY
- tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
select SWPHY
---help---
@@ -169,137 +235,90 @@ config FIXED_PHY
Currently tested with mpc866ads and mpc8349e-mitx.
-config MDIO_BITBANG
- tristate "Support for bitbanged MDIO buses"
- help
- This module implements the MDIO bus protocol in software,
- for use by low level drivers that export the ability to
- drive the relevant pins.
-
- If in doubt, say N.
-
-config MDIO_GPIO
- tristate "Support for GPIO lib-based bitbanged MDIO buses"
- depends on MDIO_BITBANG && GPIOLIB
+config ICPLUS_PHY
+ tristate "ICPlus PHYs"
---help---
- Supports GPIO lib-based MDIO busses.
+ Currently supports the IP175C and IP1001 PHYs.
- To compile this driver as a module, choose M here: the module
- will be called mdio-gpio.
+config INTEL_XWAY_PHY
+ tristate "Intel XWAY PHYs"
+ ---help---
+ Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs.
+ These PHYs are marked as standalone chips under the names
+ PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel
+ SoCs xRX200, xRX300, xRX330, xRX350 and xRX550.
-config MDIO_CAVIUM
- tristate
+config LSI_ET1011C_PHY
+ tristate "LSI ET1011C PHY"
+ ---help---
+ Supports the LSI ET1011C PHY.
-config MDIO_OCTEON
- tristate "Support for MDIO buses on Octeon and some ThunderX SOCs"
- depends on 64BIT
- depends on HAS_IOMEM
- select MDIO_CAVIUM
- help
- This module provides a driver for the Octeon and ThunderX MDIO
- buses. It is required by the Octeon and ThunderX ethernet device
- drivers on some systems.
+config LXT_PHY
+ tristate "Intel LXT PHYs"
+ ---help---
+ Currently supports the lxt970, lxt971
-config MDIO_THUNDER
- tristate "Support for MDIO buses on ThunderX SOCs"
- depends on 64BIT
- depends on PCI
- select MDIO_CAVIUM
- help
- This driver supports the MDIO interfaces found on Cavium
- ThunderX SoCs when the MDIO bus device appears as a PCI
- device.
+config MARVELL_PHY
+ tristate "Marvell PHYs"
+ ---help---
+ Currently has a driver for the 88E1011S
+config MICREL_PHY
+ tristate "Micrel PHYs"
+ ---help---
+ Supports the KSZ9021, VSC8201, KS8001 PHYs.
-config MDIO_SUN4I
- tristate "Allwinner sun4i MDIO interface support"
- depends on ARCH_SUNXI
+config MICROCHIP_PHY
+ tristate "Microchip PHYs"
help
- This driver supports the MDIO interface found in the network
- interface units of the Allwinner SoC that have an EMAC (A10,
- A12, A10s, etc.)
-
-config MDIO_MOXART
- tristate "MOXA ART MDIO interface support"
- depends on ARCH_MOXART
- help
- This driver supports the MDIO interface found in the network
- interface units of the MOXA ART SoC
+ Supports the LAN88XX PHYs.
-config MDIO_BUS_MUX
- tristate
- depends on OF_MDIO
- help
- This module provides a driver framework for MDIO bus
- multiplexers which connect one of several child MDIO busses
- to a parent bus. Switching between child busses is done by
- device specific drivers.
+config MICROSEMI_PHY
+ tristate "Microsemi PHYs"
+ ---help---
+ Currently supports the VSC8531 and VSC8541 PHYs
-config MDIO_BUS_MUX_GPIO
- tristate "Support for GPIO controlled MDIO bus multiplexers"
- depends on OF_GPIO && OF_MDIO
- select MDIO_BUS_MUX
- help
- This module provides a driver for MDIO bus multiplexers that
- are controlled via GPIO lines. The multiplexer connects one of
- several child MDIO busses to a parent bus. Child bus
- selection is under the control of GPIO lines.
+config NATIONAL_PHY
+ tristate "National Semiconductor PHYs"
+ ---help---
+ Currently supports the DP83865 PHY.
-config MDIO_BUS_MUX_MMIOREG
- tristate "Support for MMIO device-controlled MDIO bus multiplexers"
- depends on OF_MDIO && HAS_IOMEM
- select MDIO_BUS_MUX
- help
- This module provides a driver for MDIO bus multiplexers that
- are controlled via a simple memory-mapped device, like an FPGA.
- The multiplexer connects one of several child MDIO busses to a
- parent bus. Child bus selection is under the control of one of
- the FPGA's registers.
+config QSEMI_PHY
+ tristate "Quality Semiconductor PHYs"
+ ---help---
+ Currently supports the qs6612
- Currently, only 8-bit registers are supported.
+config REALTEK_PHY
+ tristate "Realtek PHYs"
+ ---help---
+ Supports the Realtek 821x PHY.
-config MDIO_BUS_MUX_BCM_IPROC
- tristate "Support for iProc based MDIO bus multiplexers"
- depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
- select MDIO_BUS_MUX
- default ARCH_BCM_IPROC
- help
- This module provides a driver for MDIO bus multiplexers found in
- iProc based Broadcom SoCs. This multiplexer connects one of several
- child MDIO bus to a parent bus. Buses could be internal as well as
- external and selection logic lies inside the same multiplexer.
+config SMSC_PHY
+ tristate "SMSC PHYs"
+ ---help---
+ Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs
-config MDIO_BCM_UNIMAC
- tristate "Broadcom UniMAC MDIO bus controller"
- depends on HAS_IOMEM
- help
- This module provides a driver for the Broadcom UniMAC MDIO busses.
- This hardware can be found in the Broadcom GENET Ethernet MAC
- controllers as well as some Broadcom Ethernet switches such as the
- Starfighter 2 switches.
+config STE10XP
+ tristate "STMicroelectronics STe10Xp PHYs"
+ ---help---
+ This is the driver for the STe100p and STe101p PHYs.
-config MDIO_BCM_IPROC
- tristate "Broadcom iProc MDIO bus controller"
- depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on HAS_IOMEM && OF_MDIO
- help
- This module provides a driver for the MDIO busses found in the
- Broadcom iProc SoC's.
+config TERANETICS_PHY
+ tristate "Teranetics PHYs"
+ ---help---
+ Currently supports the Teranetics TN2020
-config INTEL_XWAY_PHY
- tristate "Driver for Intel XWAY PHYs"
- ---help---
- Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs.
- These PHYs are marked as standalone chips under the names
- PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel
- SoCs xRX200, xRX300, xRX330, xRX350 and xRX550.
+config VITESSE_PHY
+ tristate "Vitesse PHYs"
+ ---help---
+ Currently supports the vsc8244
-config MDIO_HISI_FEMAC
- tristate "Hisilicon FEMAC MDIO bus controller"
- depends on HAS_IOMEM && OF_MDIO
- help
- This module provides a driver for the MDIO busses found in the
- Hisilicon SoC that have an Fast Ethernet MAC.
+config XILINX_GMII2RGMII
+ tristate "Xilinx GMII2RGMII converter driver"
+ ---help---
+ This driver support xilinx GMII to RGMII IP core it provides
+ the Reduced Gigabit Media Independent Interface(RGMII) between
+ Ethernet physical media devices and the Gigabit Ethernet controller.
config MDIO_XGENE
tristate "APM X-Gene SoC MDIO bus controller"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 534dfa74d5a2..e58667d111e7 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,51 +1,55 @@
-# Makefile for Linux PHY drivers
+# Makefile for Linux PHY drivers and MDIO bus drivers
libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o
libphy-$(CONFIG_SWPHY) += swphy.o
obj-$(CONFIG_PHYLIB) += libphy.o
+
+obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
+obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
+obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
+obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
+obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
+obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
+obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
+obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
+obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
+obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
+obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
+obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
+obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
+
+obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
-obj-$(CONFIG_MARVELL_PHY) += marvell.o
-obj-$(CONFIG_DAVICOM_PHY) += davicom.o
-obj-$(CONFIG_CICADA_PHY) += cicada.o
-obj-$(CONFIG_LXT_PHY) += lxt.o
-obj-$(CONFIG_QSEMI_PHY) += qsemi.o
-obj-$(CONFIG_SMSC_PHY) += smsc.o
-obj-$(CONFIG_TERANETICS_PHY) += teranetics.o
-obj-$(CONFIG_VITESSE_PHY) += vitesse.o
-obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
-obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+obj-$(CONFIG_AT803X_PHY) += at803x.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
-obj-$(CONFIG_ICPLUS_PHY) += icplus.o
-obj-$(CONFIG_REALTEK_PHY) += realtek.o
-obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
-obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
-obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
-obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
-obj-$(CONFIG_NATIONAL_PHY) += national.o
+obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
+obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+obj-$(CONFIG_CICADA_PHY) += cicada.o
+obj-$(CONFIG_DAVICOM_PHY) += davicom.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
-obj-$(CONFIG_STE10XP) += ste10Xp.o
-obj-$(CONFIG_MICREL_PHY) += micrel.o
-obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
-obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
-obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
+obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
+obj-$(CONFIG_ICPLUS_PHY) += icplus.o
+obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
+obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+obj-$(CONFIG_LXT_PHY) += lxt.o
+obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
-obj-$(CONFIG_AT803X_PHY) += at803x.o
-obj-$(CONFIG_AMD_PHY) += amd.o
-obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
-obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
-obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
-obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
-obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
-obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
-obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
+obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
-obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
-obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
-obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
-obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
+obj-$(CONFIG_MICROSEMI_PHY) += mscc.o
+obj-$(CONFIG_NATIONAL_PHY) += national.o
+obj-$(CONFIG_QSEMI_PHY) += qsemi.o
+obj-$(CONFIG_REALTEK_PHY) += realtek.o
+obj-$(CONFIG_SMSC_PHY) += smsc.o
+obj-$(CONFIG_STE10XP) += ste10Xp.o
+obj-$(CONFIG_TERANETICS_PHY) += teranetics.o
+obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 885ac9cbab5a..081df68d2ce1 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -439,6 +439,10 @@ static int ksz9021_config_init(struct phy_device *phydev)
#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
#define MII_KSZ9031RN_CLK_PAD_SKEW 8
+/* MMD Address 0x1C */
+#define MII_KSZ9031RN_EDPD 0x23
+#define MII_KSZ9031RN_EDPD_ENABLE BIT(0)
+
static int ksz9031_extended_write(struct phy_device *phydev,
u8 mode, u32 dev_addr, u32 regnum, u16 val)
{
@@ -510,6 +514,18 @@ static int ksz9031_center_flp_timing(struct phy_device *phydev)
return genphy_restart_aneg(phydev);
}
+/* Enable energy-detect power-down mode */
+static int ksz9031_enable_edpd(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = ksz9031_extended_read(phydev, OP_DATA, 0x1C, MII_KSZ9031RN_EDPD);
+ if (reg < 0)
+ return reg;
+ return ksz9031_extended_write(phydev, OP_DATA, 0x1C, MII_KSZ9031RN_EDPD,
+ reg | MII_KSZ9031RN_EDPD_ENABLE);
+}
+
static int ksz9031_config_init(struct phy_device *phydev)
{
const struct device *dev = &phydev->mdio.dev;
@@ -525,6 +541,11 @@ static int ksz9031_config_init(struct phy_device *phydev)
};
static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
const struct device *dev_walker;
+ int result;
+
+ result = ksz9031_enable_edpd(phydev);
+ if (result < 0)
+ return result;
/* The Micrel driver has a deprecated option to place phy OF
* properties in the MAC node. Walk up the tree of devices to
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 15f820648f82..7c00e508a101 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -55,7 +55,7 @@ static int lan88xx_phy_ack_interrupt(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
-int lan88xx_suspend(struct phy_device *phydev)
+static int lan88xx_suspend(struct phy_device *phydev)
{
struct lan88xx_priv *priv = phydev->priv;
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
new file mode 100644
index 000000000000..a17573e3bd8a
--- /dev/null
+++ b/drivers/net/phy/mscc.c
@@ -0,0 +1,337 @@
+/*
+ * Driver for Microsemi VSC85xx PHYs
+ *
+ * Author: Nagaraju Lakkaraju
+ * License: Dual MIT/GPL
+ * Copyright (c) 2016 Microsemi Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mdio.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <dt-bindings/net/mscc-phy-vsc8531.h>
+
+enum rgmii_rx_clock_delay {
+ RGMII_RX_CLK_DELAY_0_2_NS = 0,
+ RGMII_RX_CLK_DELAY_0_8_NS = 1,
+ RGMII_RX_CLK_DELAY_1_1_NS = 2,
+ RGMII_RX_CLK_DELAY_1_7_NS = 3,
+ RGMII_RX_CLK_DELAY_2_0_NS = 4,
+ RGMII_RX_CLK_DELAY_2_3_NS = 5,
+ RGMII_RX_CLK_DELAY_2_6_NS = 6,
+ RGMII_RX_CLK_DELAY_3_4_NS = 7
+};
+
+/* Microsemi VSC85xx PHY registers */
+/* IEEE 802. Std Registers */
+#define MSCC_PHY_EXT_PHY_CNTL_1 23
+#define MAC_IF_SELECTION_MASK 0x1800
+#define MAC_IF_SELECTION_GMII 0
+#define MAC_IF_SELECTION_RMII 1
+#define MAC_IF_SELECTION_RGMII 2
+#define MAC_IF_SELECTION_POS 11
+#define FAR_END_LOOPBACK_MODE_MASK 0x0008
+
+#define MII_VSC85XX_INT_MASK 25
+#define MII_VSC85XX_INT_MASK_MASK 0xa000
+#define MII_VSC85XX_INT_STATUS 26
+
+#define MSCC_PHY_WOL_MAC_CONTROL 27
+#define EDGE_RATE_CNTL_POS 5
+#define EDGE_RATE_CNTL_MASK 0x00E0
+
+#define MSCC_EXT_PAGE_ACCESS 31
+#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
+#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
+
+/* Extended Page 2 Registers */
+#define MSCC_PHY_RGMII_CNTL 20
+#define RGMII_RX_CLK_DELAY_MASK 0x0070
+#define RGMII_RX_CLK_DELAY_POS 4
+
+/* Microsemi PHY ID's */
+#define PHY_ID_VSC8531 0x00070570
+#define PHY_ID_VSC8541 0x00070770
+
+struct edge_rate_table {
+ u16 vddmac;
+ int slowdown[MSCC_SLOWDOWN_MAX];
+};
+
+struct edge_rate_table edge_table[MSCC_VDDMAC_MAX] = {
+ {3300, { 0, -2, -4, -7, -10, -17, -29, -53} },
+ {2500, { 0, -3, -6, -10, -14, -23, -37, -63} },
+ {1800, { 0, -5, -9, -16, -23, -35, -52, -76} },
+ {1500, { 0, -6, -14, -21, -29, -42, -58, -77} },
+};
+
+struct vsc8531_private {
+ u8 edge_slowdown;
+ u16 vddmac;
+};
+
+static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
+{
+ int rc;
+
+ rc = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page);
+ return rc;
+}
+
+static u8 edge_rate_magic_get(u16 vddmac,
+ int slowdown)
+{
+ int rc = (MSCC_SLOWDOWN_MAX - 1);
+ u8 vdd;
+ u8 sd;
+
+ for (vdd = 0; vdd < MSCC_VDDMAC_MAX; vdd++) {
+ if (edge_table[vdd].vddmac == vddmac) {
+ for (sd = 0; sd < MSCC_SLOWDOWN_MAX; sd++) {
+ if (edge_table[vdd].slowdown[sd] <= slowdown) {
+ rc = (MSCC_SLOWDOWN_MAX - sd - 1);
+ break;
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev,
+ u8 edge_rate)
+{
+ int rc;
+ u16 reg_val;
+
+ mutex_lock(&phydev->lock);
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc != 0)
+ goto out_unlock;
+ reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+ reg_val &= ~(EDGE_RATE_CNTL_MASK);
+ reg_val |= (edge_rate << EDGE_RATE_CNTL_POS);
+ rc = phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
+ if (rc != 0)
+ goto out_unlock;
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
+
+out_unlock:
+ mutex_unlock(&phydev->lock);
+
+ return rc;
+}
+
+static int vsc85xx_mac_if_set(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ int rc;
+ u16 reg_val;
+
+ mutex_lock(&phydev->lock);
+ reg_val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
+ reg_val &= ~(MAC_IF_SELECTION_MASK);
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ reg_val |= (MAC_IF_SELECTION_RGMII << MAC_IF_SELECTION_POS);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ reg_val |= (MAC_IF_SELECTION_RMII << MAC_IF_SELECTION_POS);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ reg_val |= (MAC_IF_SELECTION_GMII << MAC_IF_SELECTION_POS);
+ break;
+ default:
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ rc = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, reg_val);
+ if (rc != 0)
+ goto out_unlock;
+
+ rc = genphy_soft_reset(phydev);
+
+out_unlock:
+ mutex_unlock(&phydev->lock);
+
+ return rc;
+}
+
+static int vsc85xx_default_config(struct phy_device *phydev)
+{
+ int rc;
+ u16 reg_val;
+
+ mutex_lock(&phydev->lock);
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc != 0)
+ goto out_unlock;
+
+ reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL);
+ reg_val &= ~(RGMII_RX_CLK_DELAY_MASK);
+ reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS);
+ phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val);
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
+
+out_unlock:
+ mutex_unlock(&phydev->lock);
+
+ return rc;
+}
+
+#ifdef CONFIG_OF_MDIO
+static int vsc8531_of_init(struct phy_device *phydev)
+{
+ int rc;
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+
+ if (!of_node)
+ return -ENODEV;
+
+ rc = of_property_read_u16(of_node, "vsc8531,vddmac",
+ &vsc8531->vddmac);
+ if (rc == -EINVAL)
+ vsc8531->vddmac = MSCC_VDDMAC_3300;
+ rc = of_property_read_u8(of_node, "vsc8531,edge-slowdown",
+ &vsc8531->edge_slowdown);
+ if (rc == -EINVAL)
+ vsc8531->edge_slowdown = 0;
+
+ rc = 0;
+ return rc;
+}
+#else
+static int vsc8531_of_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int vsc85xx_config_init(struct phy_device *phydev)
+{
+ int rc;
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ u8 edge_rate;
+
+ rc = vsc8531_of_init(phydev);
+ if (rc)
+ return rc;
+
+ rc = vsc85xx_default_config(phydev);
+ if (rc)
+ return rc;
+
+ rc = vsc85xx_mac_if_set(phydev, phydev->interface);
+ if (rc)
+ return rc;
+
+ edge_rate = edge_rate_magic_get(vsc8531->vddmac,
+ -(int)vsc8531->edge_slowdown);
+ rc = vsc85xx_edge_rate_cntl_set(phydev, edge_rate);
+ if (rc)
+ return rc;
+
+ rc = genphy_config_init(phydev);
+
+ return rc;
+}
+
+static int vsc85xx_ack_interrupt(struct phy_device *phydev)
+{
+ int rc = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+
+ return (rc < 0) ? rc : 0;
+}
+
+static int vsc85xx_config_intr(struct phy_device *phydev)
+{
+ int rc;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ rc = phy_write(phydev, MII_VSC85XX_INT_MASK,
+ MII_VSC85XX_INT_MASK_MASK);
+ } else {
+ rc = phy_write(phydev, MII_VSC85XX_INT_MASK, 0);
+ if (rc < 0)
+ return rc;
+ rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+ }
+
+ return rc;
+}
+
+static int vsc85xx_probe(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531;
+
+ vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
+ if (!vsc8531)
+ return -ENOMEM;
+
+ phydev->priv = vsc8531;
+
+ return 0;
+}
+
+/* Microsemi VSC85xx PHYs */
+static struct phy_driver vsc85xx_driver[] = {
+{
+ .phy_id = PHY_ID_VSC8531,
+ .name = "Microsemi VSC8531",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc85xx_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc85xx_probe,
+},
+{
+ .phy_id = PHY_ID_VSC8541,
+ .name = "Microsemi VSC8541 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc85xx_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc85xx_probe,
+}
+
+};
+
+module_phy_driver(vsc85xx_driver);
+
+static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+ { PHY_ID_VSC8531, 0xfffffff0, },
+ { PHY_ID_VSC8541, 0xfffffff0, },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl);
+
+MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver");
+MODULE_AUTHOR("Nagaraju Lakkaraju");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
new file mode 100644
index 000000000000..d15dd3938ba8
--- /dev/null
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -0,0 +1,112 @@
+/* Xilinx GMII2RGMII Converter driver
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ * Copyright (C) 2016 Andrew Lunn <andrew@lunn.ch>
+ *
+ * Author: Andrew Lunn <andrew@lunn.ch>
+ * Author: Kedareswara rao Appana <appanad@xilinx.com>
+ *
+ * Description:
+ * This driver is developed for Xilinx GMII2RGMII Converter
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of_mdio.h>
+
+#define XILINX_GMII2RGMII_REG 0x10
+#define XILINX_GMII2RGMII_SPEED_MASK (BMCR_SPEED1000 | BMCR_SPEED100)
+
+struct gmii2rgmii {
+ struct phy_device *phy_dev;
+ struct phy_driver *phy_drv;
+ struct phy_driver conv_phy_drv;
+ int addr;
+};
+
+static int xgmiitorgmii_read_status(struct phy_device *phydev)
+{
+ struct gmii2rgmii *priv = phydev->priv;
+ u16 val = 0;
+
+ priv->phy_drv->read_status(phydev);
+
+ val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
+ val &= XILINX_GMII2RGMII_SPEED_MASK;
+
+ if (phydev->speed == SPEED_1000)
+ val |= BMCR_SPEED1000;
+ else if (phydev->speed == SPEED_100)
+ val |= BMCR_SPEED100;
+ else
+ val |= BMCR_SPEED10;
+
+ mdiobus_write(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG, val);
+
+ return 0;
+}
+
+static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct device_node *np = dev->of_node, *phy_node;
+ struct gmii2rgmii *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy_node) {
+ dev_err(dev, "Couldn't parse phy-handle\n");
+ return -ENODEV;
+ }
+
+ priv->phy_dev = of_phy_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!priv->phy_dev) {
+ dev_info(dev, "Couldn't find phydev\n");
+ return -EPROBE_DEFER;
+ }
+
+ priv->addr = mdiodev->addr;
+ priv->phy_drv = priv->phy_dev->drv;
+ memcpy(&priv->conv_phy_drv, priv->phy_dev->drv,
+ sizeof(struct phy_driver));
+ priv->conv_phy_drv.read_status = xgmiitorgmii_read_status;
+ priv->phy_dev->priv = priv;
+ priv->phy_dev->drv = &priv->conv_phy_drv;
+
+ return 0;
+}
+
+static const struct of_device_id xgmiitorgmii_of_match[] = {
+ { .compatible = "xlnx,gmii-to-rgmii-1.0" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgmiitorgmii_of_match);
+
+static struct mdio_driver xgmiitorgmii_driver = {
+ .probe = xgmiitorgmii_probe,
+ .mdiodrv.driver = {
+ .name = "xgmiitorgmii",
+ .of_match_table = xgmiitorgmii_of_match,
+ },
+};
+
+mdio_module_driver(xgmiitorgmii_driver);
+
+MODULE_DESCRIPTION("Xilinx GMII2RGMII converter driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index f226db4616b7..5489c0ec1d9a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1103,6 +1103,15 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
}
conf.file = file;
+
+ /* Don't use device name generated by the rtnetlink layer when ifname
+ * isn't specified. Let ppp_dev_configure() set the device name using
+ * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
+ * userspace to infer the device name using to the PPPIOCGUNIT ioctl.
+ */
+ if (!tb[IFLA_IFNAME])
+ conf.ifname_is_set = false;
+
err = ppp_dev_configure(src_net, dev, &conf);
out_unlock:
@@ -1354,6 +1363,8 @@ static void ppp_setup(struct net_device *dev)
dev->netdev_ops = &ppp_netdev_ops;
SET_NETDEV_DEVTYPE(dev, &ppp_type);
+ dev->features |= NETIF_F_LLTX;
+
dev->hard_header_len = PPP_HDRLEN;
dev->mtu = PPP_MRU;
dev->addr_len = 0;
@@ -1367,12 +1378,8 @@ static void ppp_setup(struct net_device *dev)
* Transmit-side routines.
*/
-/*
- * Called to do any work queued up on the transmit side
- * that can now be done.
- */
-static void
-ppp_xmit_process(struct ppp *ppp)
+/* Called to do any work queued up on the transmit side that can now be done */
+static void __ppp_xmit_process(struct ppp *ppp)
{
struct sk_buff *skb;
@@ -1392,6 +1399,30 @@ ppp_xmit_process(struct ppp *ppp)
ppp_xmit_unlock(ppp);
}
+static DEFINE_PER_CPU(int, ppp_xmit_recursion);
+
+static void ppp_xmit_process(struct ppp *ppp)
+{
+ local_bh_disable();
+
+ if (unlikely(__this_cpu_read(ppp_xmit_recursion)))
+ goto err;
+
+ __this_cpu_inc(ppp_xmit_recursion);
+ __ppp_xmit_process(ppp);
+ __this_cpu_dec(ppp_xmit_recursion);
+
+ local_bh_enable();
+
+ return;
+
+err:
+ local_bh_enable();
+
+ if (net_ratelimit())
+ netdev_err(ppp->dev, "recursion detected\n");
+}
+
static inline struct sk_buff *
pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
{
@@ -1847,11 +1878,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
}
#endif /* CONFIG_PPP_MULTILINK */
-/*
- * Try to send data out on a channel.
- */
-static void
-ppp_channel_push(struct channel *pch)
+/* Try to send data out on a channel */
+static void __ppp_channel_push(struct channel *pch)
{
struct sk_buff *skb;
struct ppp *ppp;
@@ -1876,11 +1904,22 @@ ppp_channel_push(struct channel *pch)
read_lock_bh(&pch->upl);
ppp = pch->ppp;
if (ppp)
- ppp_xmit_process(ppp);
+ __ppp_xmit_process(ppp);
read_unlock_bh(&pch->upl);
}
}
+static void ppp_channel_push(struct channel *pch)
+{
+ local_bh_disable();
+
+ __this_cpu_inc(ppp_xmit_recursion);
+ __ppp_channel_push(pch);
+ __this_cpu_dec(ppp_xmit_recursion);
+
+ local_bh_enable();
+}
+
/*
* Receive-side routines.
*/
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index ae0905ed4a32..1951b1085cb8 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -37,6 +37,7 @@
#include <net/icmp.h>
#include <net/route.h>
#include <net/gre.h>
+#include <net/pptp.h>
#include <linux/uaccess.h>
@@ -53,41 +54,6 @@ static struct proto pptp_sk_proto __read_mostly;
static const struct ppp_channel_ops pptp_chan_ops;
static const struct proto_ops pptp_ops;
-#define PPP_LCP_ECHOREQ 0x09
-#define PPP_LCP_ECHOREP 0x0A
-#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
-
-#define MISSING_WINDOW 20
-#define WRAPPED(curseq, lastseq)\
- ((((curseq) & 0xffffff00) == 0) &&\
- (((lastseq) & 0xffffff00) == 0xffffff00))
-
-#define PPTP_GRE_PROTO 0x880B
-#define PPTP_GRE_VER 0x1
-
-#define PPTP_GRE_FLAG_C 0x80
-#define PPTP_GRE_FLAG_R 0x40
-#define PPTP_GRE_FLAG_K 0x20
-#define PPTP_GRE_FLAG_S 0x10
-#define PPTP_GRE_FLAG_A 0x80
-
-#define PPTP_GRE_IS_C(f) ((f)&PPTP_GRE_FLAG_C)
-#define PPTP_GRE_IS_R(f) ((f)&PPTP_GRE_FLAG_R)
-#define PPTP_GRE_IS_K(f) ((f)&PPTP_GRE_FLAG_K)
-#define PPTP_GRE_IS_S(f) ((f)&PPTP_GRE_FLAG_S)
-#define PPTP_GRE_IS_A(f) ((f)&PPTP_GRE_FLAG_A)
-
-#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
-struct pptp_gre_header {
- u8 flags;
- u8 ver;
- __be16 protocol;
- __be16 payload_len;
- __be16 call_id;
- __be32 seq;
- __be32 ack;
-} __packed;
-
static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
{
struct pppox_sock *sock;
@@ -240,16 +206,14 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
skb_push(skb, header_len);
hdr = (struct pptp_gre_header *)(skb->data);
- hdr->flags = PPTP_GRE_FLAG_K;
- hdr->ver = PPTP_GRE_VER;
- hdr->protocol = htons(PPTP_GRE_PROTO);
- hdr->call_id = htons(opt->dst_addr.call_id);
+ hdr->gre_hd.flags = GRE_KEY | GRE_VERSION_1 | GRE_SEQ;
+ hdr->gre_hd.protocol = GRE_PROTO_PPP;
+ hdr->call_id = htons(opt->dst_addr.call_id);
- hdr->flags |= PPTP_GRE_FLAG_S;
- hdr->seq = htonl(++opt->seq_sent);
+ hdr->seq = htonl(++opt->seq_sent);
if (opt->ack_sent != seq_recv) {
/* send ack with this message */
- hdr->ver |= PPTP_GRE_FLAG_A;
+ hdr->gre_hd.flags |= GRE_ACK;
hdr->ack = htonl(seq_recv);
opt->ack_sent = seq_recv;
}
@@ -312,7 +276,7 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
headersize = sizeof(*header);
/* test if acknowledgement present */
- if (PPTP_GRE_IS_A(header->ver)) {
+ if (GRE_IS_ACK(header->gre_hd.flags)) {
__u32 ack;
if (!pskb_may_pull(skb, headersize))
@@ -320,7 +284,7 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
header = (struct pptp_gre_header *)(skb->data);
/* ack in different place if S = 0 */
- ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq;
+ ack = GRE_IS_SEQ(header->gre_hd.flags) ? header->ack : header->seq;
ack = ntohl(ack);
@@ -333,7 +297,7 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
headersize -= sizeof(header->ack);
}
/* test if payload present */
- if (!PPTP_GRE_IS_S(header->flags))
+ if (!GRE_IS_SEQ(header->gre_hd.flags))
goto drop;
payload_len = ntohs(header->payload_len);
@@ -394,11 +358,11 @@ static int pptp_rcv(struct sk_buff *skb)
header = (struct pptp_gre_header *)skb->data;
- if (ntohs(header->protocol) != PPTP_GRE_PROTO || /* PPTP-GRE protocol for PPTP */
- PPTP_GRE_IS_C(header->flags) || /* flag C should be clear */
- PPTP_GRE_IS_R(header->flags) || /* flag R should be clear */
- !PPTP_GRE_IS_K(header->flags) || /* flag K should be set */
- (header->flags&0xF) != 0) /* routing and recursion ctrl = 0 */
+ if (header->gre_hd.protocol != GRE_PROTO_PPP || /* PPTP-GRE protocol for PPTP */
+ GRE_IS_CSUM(header->gre_hd.flags) || /* flag CSUM should be clear */
+ GRE_IS_ROUTING(header->gre_hd.flags) || /* flag ROUTING should be clear */
+ !GRE_IS_KEY(header->gre_hd.flags) || /* flag KEY should be set */
+ (header->gre_hd.flags & GRE_FLAGS)) /* flag Recursion Ctrl should be clear */
/* if invalid, discard this packet */
goto drop;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 6f9df375c5d4..8093e39ae263 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -731,14 +731,9 @@ static int update_filter(struct tap_filter *filter, void __user *arg)
}
alen = ETH_ALEN * uf.count;
- addr = kmalloc(alen, GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
-
- if (copy_from_user(addr, arg + sizeof(uf), alen)) {
- err = -EFAULT;
- goto done;
- }
+ addr = memdup_user(arg + sizeof(uf), alen);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
/* The filter is updated without holding any locks. Which is
* perfectly safe. We disable it first and in the worst
@@ -758,7 +753,7 @@ static int update_filter(struct tap_filter *filter, void __user *arg)
for (; n < uf.count; n++) {
if (!is_multicast_ether_addr(addr[n].u)) {
err = 0; /* no filter */
- goto done;
+ goto free_addr;
}
addr_hash_set(filter->mask, addr[n].u);
}
@@ -774,8 +769,7 @@ static int update_filter(struct tap_filter *filter, void __user *arg)
/* Return the number of exact filters */
err = nexact;
-
-done:
+free_addr:
kfree(addr);
return err;
}
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index a2d3ea6efb20..d1092421aaa7 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -46,6 +46,7 @@
#define AX_CMD_SET_SW_MII 0x06
#define AX_CMD_READ_MII_REG 0x07
#define AX_CMD_WRITE_MII_REG 0x08
+#define AX_CMD_STATMNGSTS_REG 0x09
#define AX_CMD_SET_HW_MII 0x0a
#define AX_CMD_READ_EEPROM 0x0b
#define AX_CMD_WRITE_EEPROM 0x0c
@@ -71,6 +72,17 @@
#define AX_CMD_SW_RESET 0x20
#define AX_CMD_SW_PHY_STATUS 0x21
#define AX_CMD_SW_PHY_SELECT 0x22
+#define AX_QCTCTRL 0x2A
+
+#define AX_CHIPCODE_MASK 0x70
+#define AX_AX88772_CHIPCODE 0x00
+#define AX_AX88772A_CHIPCODE 0x10
+#define AX_AX88772B_CHIPCODE 0x20
+#define AX_HOST_EN 0x01
+
+#define AX_PHYSEL_PSEL 0x01
+#define AX_PHYSEL_SSMII 0
+#define AX_PHYSEL_SSEN 0x10
#define AX_PHY_SELECT_MASK (BIT(3) | BIT(2))
#define AX_PHY_SELECT_INTERNAL 0
@@ -173,6 +185,10 @@ struct asix_rx_fixup_info {
};
struct asix_common_private {
+ void (*resume)(struct usbnet *dev);
+ void (*suspend)(struct usbnet *dev);
+ u16 presvd_phy_advertise;
+ u16 presvd_phy_bmcr;
struct asix_rx_fixup_info rx_fixup_info;
};
@@ -182,10 +198,10 @@ extern const struct driver_info ax88172a_info;
#define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */
int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data);
+ u16 size, void *data, int in_pm);
int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data);
+ u16 size, void *data, int in_pm);
void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
u16 index, u16 size, void *data);
@@ -197,27 +213,31 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags);
-int asix_set_sw_mii(struct usbnet *dev);
-int asix_set_hw_mii(struct usbnet *dev);
+int asix_set_sw_mii(struct usbnet *dev, int in_pm);
+int asix_set_hw_mii(struct usbnet *dev, int in_pm);
int asix_read_phy_addr(struct usbnet *dev, int internal);
int asix_get_phy_addr(struct usbnet *dev);
-int asix_sw_reset(struct usbnet *dev, u8 flags);
+int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm);
-u16 asix_read_rx_ctl(struct usbnet *dev);
-int asix_write_rx_ctl(struct usbnet *dev, u16 mode);
+u16 asix_read_rx_ctl(struct usbnet *dev, int in_pm);
+int asix_write_rx_ctl(struct usbnet *dev, u16 mode, int in_pm);
-u16 asix_read_medium_status(struct usbnet *dev);
-int asix_write_medium_mode(struct usbnet *dev, u16 mode);
+u16 asix_read_medium_status(struct usbnet *dev, int in_pm);
+int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm);
-int asix_write_gpio(struct usbnet *dev, u16 value, int sleep);
+int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm);
void asix_set_multicast(struct net_device *net);
int asix_mdio_read(struct net_device *netdev, int phy_id, int loc);
void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val);
+int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc);
+void asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc,
+ int val);
+
void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo);
int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 7de5ab589e4e..f79eb12c326a 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -22,24 +22,49 @@
#include "asix.h"
int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data)
+ u16 size, void *data, int in_pm)
{
int ret;
- ret = usbnet_read_cmd(dev, cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, index, data, size);
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
+
+ BUG_ON(!dev);
+
+ if (!in_pm)
+ fn = usbnet_read_cmd;
+ else
+ fn = usbnet_read_cmd_nopm;
+
+ ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
+
+ if (unlikely(ret < 0))
+ netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
+ index, ret);
- if (ret != size && ret >= 0)
- return -EINVAL;
return ret;
}
int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data)
+ u16 size, void *data, int in_pm)
{
- return usbnet_write_cmd(dev, cmd,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, index, data, size);
+ int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
+
+ BUG_ON(!dev);
+
+ if (!in_pm)
+ fn = usbnet_write_cmd;
+ else
+ fn = usbnet_write_cmd_nopm;
+
+ ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
+
+ if (unlikely(ret < 0))
+ netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
+ index, ret);
+
+ return ret;
}
void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
@@ -225,19 +250,20 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
return skb;
}
-int asix_set_sw_mii(struct usbnet *dev)
+int asix_set_sw_mii(struct usbnet *dev, int in_pm)
{
int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
+ ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL, in_pm);
+
if (ret < 0)
netdev_err(dev->net, "Failed to enable software MII access\n");
return ret;
}
-int asix_set_hw_mii(struct usbnet *dev)
+int asix_set_hw_mii(struct usbnet *dev, int in_pm)
{
int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
+ ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL, in_pm);
if (ret < 0)
netdev_err(dev->net, "Failed to enable hardware MII access\n");
return ret;
@@ -247,7 +273,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal)
{
int offset = (internal ? 1 : 0);
u8 buf[2];
- int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
+ int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf, 0);
netdev_dbg(dev->net, "asix_get_phy_addr()\n");
@@ -270,21 +296,21 @@ int asix_get_phy_addr(struct usbnet *dev)
}
-int asix_sw_reset(struct usbnet *dev, u8 flags)
+int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm)
{
int ret;
- ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
+ ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL, in_pm);
if (ret < 0)
netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
return ret;
}
-u16 asix_read_rx_ctl(struct usbnet *dev)
+u16 asix_read_rx_ctl(struct usbnet *dev, int in_pm)
{
__le16 v;
- int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v);
+ int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v, in_pm);
if (ret < 0) {
netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
@@ -295,12 +321,12 @@ out:
return ret;
}
-int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
+int asix_write_rx_ctl(struct usbnet *dev, u16 mode, int in_pm)
{
int ret;
netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
- ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL, in_pm);
if (ret < 0)
netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
mode, ret);
@@ -308,10 +334,11 @@ int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
return ret;
}
-u16 asix_read_medium_status(struct usbnet *dev)
+u16 asix_read_medium_status(struct usbnet *dev, int in_pm)
{
__le16 v;
- int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
+ int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS,
+ 0, 0, 2, &v, in_pm);
if (ret < 0) {
netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
@@ -323,12 +350,13 @@ u16 asix_read_medium_status(struct usbnet *dev)
}
-int asix_write_medium_mode(struct usbnet *dev, u16 mode)
+int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm)
{
int ret;
netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
- ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE,
+ mode, 0, 0, NULL, in_pm);
if (ret < 0)
netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
mode, ret);
@@ -336,12 +364,12 @@ int asix_write_medium_mode(struct usbnet *dev, u16 mode)
return ret;
}
-int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
+int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
{
int ret;
netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
- ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL, in_pm);
if (ret < 0)
netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
value, ret);
@@ -398,16 +426,31 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
+ u8 smsr;
+ int i = 0;
+ int ret;
mutex_lock(&dev->phy_mutex);
- asix_set_sw_mii(dev);
+ do {
+ ret = asix_set_sw_mii(dev, 0);
+ if (ret == -ENODEV)
+ break;
+ usleep_range(1000, 1100);
+ ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
+ 0, 0, 1, &smsr, 0);
+ } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+ if (ret == -ENODEV) {
+ mutex_unlock(&dev->phy_mutex);
+ return ret;
+ }
+
asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
- (__u16)loc, 2, &res);
- asix_set_hw_mii(dev);
+ (__u16)loc, 2, &res, 0);
+ asix_set_hw_mii(dev, 0);
mutex_unlock(&dev->phy_mutex);
netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
- phy_id, loc, le16_to_cpu(res));
+ phy_id, loc, le16_to_cpu(res));
return le16_to_cpu(res);
}
@@ -416,13 +459,95 @@ void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
+ u8 smsr;
+ int i = 0;
+ int ret;
netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
- phy_id, loc, val);
+ phy_id, loc, val);
+
mutex_lock(&dev->phy_mutex);
- asix_set_sw_mii(dev);
- asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
- asix_set_hw_mii(dev);
+ do {
+ ret = asix_set_sw_mii(dev, 0);
+ if (ret == -ENODEV)
+ break;
+ usleep_range(1000, 1100);
+ ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
+ 0, 0, 1, &smsr, 0);
+ } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+ if (ret == -ENODEV) {
+ mutex_unlock(&dev->phy_mutex);
+ return;
+ }
+
+ asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
+ (__u16)loc, 2, &res, 0);
+ asix_set_hw_mii(dev, 0);
+ mutex_unlock(&dev->phy_mutex);
+}
+
+int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res;
+ u8 smsr;
+ int i = 0;
+ int ret;
+
+ mutex_lock(&dev->phy_mutex);
+ do {
+ ret = asix_set_sw_mii(dev, 1);
+ if (ret == -ENODEV)
+ break;
+ usleep_range(1000, 1100);
+ ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
+ 0, 0, 1, &smsr, 1);
+ } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+ if (ret == -ENODEV) {
+ mutex_unlock(&dev->phy_mutex);
+ return ret;
+ }
+
+ asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
+ (__u16)loc, 2, &res, 1);
+ asix_set_hw_mii(dev, 1);
+ mutex_unlock(&dev->phy_mutex);
+
+ netdev_dbg(dev->net, "asix_mdio_read_nopm() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
+ phy_id, loc, le16_to_cpu(res));
+
+ return le16_to_cpu(res);
+}
+
+void
+asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res = cpu_to_le16(val);
+ u8 smsr;
+ int i = 0;
+ int ret;
+
+ netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
+ phy_id, loc, val);
+
+ mutex_lock(&dev->phy_mutex);
+ do {
+ ret = asix_set_sw_mii(dev, 1);
+ if (ret == -ENODEV)
+ break;
+ usleep_range(1000, 1100);
+ ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
+ 0, 0, 1, &smsr, 1);
+ } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+ if (ret == -ENODEV) {
+ mutex_unlock(&dev->phy_mutex);
+ return;
+ }
+
+ asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
+ (__u16)loc, 2, &res, 1);
+ asix_set_hw_mii(dev, 1);
mutex_unlock(&dev->phy_mutex);
}
@@ -431,7 +556,8 @@ void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt;
- if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
+ if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE,
+ 0, 0, 1, &opt, 0) < 0) {
wolinfo->supported = 0;
wolinfo->wolopts = 0;
return;
@@ -455,7 +581,7 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
opt |= AX_MONITOR_MAGIC;
if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
- opt, 0, 0, NULL) < 0)
+ opt, 0, 0, NULL, 0) < 0)
return -EINVAL;
return 0;
@@ -490,7 +616,7 @@ int asix_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
/* ax8817x returns 2 bytes from eeprom on read */
for (i = first_word; i <= last_word; i++) {
if (asix_read_cmd(dev, AX_CMD_READ_EEPROM, i, 0, 2,
- &(eeprom_buff[i - first_word])) < 0) {
+ &eeprom_buff[i - first_word], 0) < 0) {
kfree(eeprom_buff);
return -EIO;
}
@@ -531,7 +657,7 @@ int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
the EEPROM */
if (eeprom->offset & 1) {
ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, first_word, 0, 2,
- &(eeprom_buff[0]));
+ &eeprom_buff[0], 0);
if (ret < 0) {
netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", first_word);
goto free;
@@ -540,7 +666,7 @@ int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
if ((eeprom->offset + eeprom->len) & 1) {
ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, last_word, 0, 2,
- &(eeprom_buff[last_word - first_word]));
+ &eeprom_buff[last_word - first_word], 0);
if (ret < 0) {
netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", last_word);
goto free;
@@ -550,7 +676,7 @@ int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
memcpy((u8 *)eeprom_buff + (eeprom->offset & 1), data, eeprom->len);
/* write data to EEPROM */
- ret = asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0x0000, 0, 0, NULL);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0x0000, 0, 0, NULL, 0);
if (ret < 0) {
netdev_err(net, "Failed to enable EEPROM write\n");
goto free;
@@ -561,7 +687,7 @@ int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
netdev_dbg(net, "write to EEPROM at offset 0x%02x, data 0x%04x\n",
i, eeprom_buff[i - first_word]);
ret = asix_write_cmd(dev, AX_CMD_WRITE_EEPROM, i,
- eeprom_buff[i - first_word], 0, NULL);
+ eeprom_buff[i - first_word], 0, NULL, 0);
if (ret < 0) {
netdev_err(net, "Failed to write EEPROM at offset 0x%02x.\n",
i);
@@ -570,7 +696,7 @@ int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
msleep(20);
}
- ret = asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0x0000, 0, 0, NULL);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0x0000, 0, 0, NULL, 0);
if (ret < 0) {
netdev_err(net, "Failed to disable EEPROM write\n");
goto free;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 5cabefc23494..cce24950a0ab 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -35,6 +35,15 @@
#define PHY_MODE_RTL8211CL 0x000C
+#define AX88772A_PHY14H 0x14
+#define AX88772A_PHY14H_DEFAULT 0x442C
+
+#define AX88772A_PHY15H 0x15
+#define AX88772A_PHY15H_DEFAULT 0x03C8
+
+#define AX88772A_PHY16H 0x16
+#define AX88772A_PHY16H_DEFAULT 0x4044
+
struct ax88172_int_data {
__le16 res1;
u8 link;
@@ -79,6 +88,8 @@ static u32 asix_get_phyid(struct usbnet *dev)
/* Poll for the rare case the FW or phy isn't ready yet. */
for (i = 0; i < 100; i++) {
phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
+ if (phy_reg < 0)
+ return 0;
if (phy_reg != 0 && phy_reg != 0xFFFF)
break;
mdelay(1);
@@ -184,7 +195,7 @@ static int ax88172_link_reset(struct usbnet *dev)
netdev_dbg(dev->net, "ax88172_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
- asix_write_medium_mode(dev, mode);
+ asix_write_medium_mode(dev, mode, 0);
return 0;
}
@@ -201,6 +212,28 @@ static const struct net_device_ops ax88172_netdev_ops = {
.ndo_set_rx_mode = ax88172_set_multicast,
};
+static void asix_phy_reset(struct usbnet *dev, unsigned int reset_bits)
+{
+ unsigned int timeout = 5000;
+
+ asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, reset_bits);
+
+ /* give phy_id a chance to process reset */
+ udelay(500);
+
+ /* See IEEE 802.3 "22.2.4.1.1 Reset": 500ms max */
+ while (timeout--) {
+ if (asix_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR)
+ & BMCR_RESET)
+ udelay(100);
+ else
+ return;
+ }
+
+ netdev_err(dev->net, "BMCR_RESET timeout on phy_id %d\n",
+ dev->mii.phy_id);
+}
+
static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret = 0;
@@ -213,18 +246,19 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
/* Toggle the GPIOs in a manufacturer/model specific way */
for (i = 2; i >= 0; i--) {
ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS,
- (gpio_bits >> (i * 8)) & 0xff, 0, 0, NULL);
+ (gpio_bits >> (i * 8)) & 0xff, 0, 0, NULL, 0);
if (ret < 0)
goto out;
msleep(5);
}
- ret = asix_write_rx_ctl(dev, 0x80);
+ ret = asix_write_rx_ctl(dev, 0x80, 0);
if (ret < 0)
goto out;
/* Get the MAC address */
- ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
+ ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID,
+ 0, 0, ETH_ALEN, buf, 0);
if (ret < 0) {
netdev_dbg(dev->net, "read AX_CMD_READ_NODE_ID failed: %d\n",
ret);
@@ -246,7 +280,7 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
- asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
+ asix_phy_reset(dev, BMCR_RESET);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
mii_nway_restart(&dev->mii);
@@ -290,7 +324,7 @@ static int ax88772_link_reset(struct usbnet *dev)
netdev_dbg(dev->net, "ax88772_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
- asix_write_medium_mode(dev, mode);
+ asix_write_medium_mode(dev, mode, 0);
return 0;
}
@@ -298,78 +332,218 @@ static int ax88772_link_reset(struct usbnet *dev)
static int ax88772_reset(struct usbnet *dev)
{
struct asix_data *data = (struct asix_data *)&dev->data;
+ int ret;
+
+ /* Rewrite MAC address */
+ ether_addr_copy(data->mac_addr, dev->net->dev_addr);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0,
+ ETH_ALEN, data->mac_addr, 0);
+ if (ret < 0)
+ goto out;
+
+ /* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, 0);
+ if (ret < 0)
+ goto out;
+
+ asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int ax88772_hw_reset(struct usbnet *dev, int in_pm)
+{
+ struct asix_data *data = (struct asix_data *)&dev->data;
int ret, embd_phy;
u16 rx_ctl;
- ret = asix_write_gpio(dev,
- AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5);
+ ret = asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_2 |
+ AX_GPIO_GPO2EN, 5, in_pm);
if (ret < 0)
goto out;
- embd_phy = ((asix_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
+ embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
- ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy,
+ 0, 0, NULL, in_pm);
if (ret < 0) {
netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
goto out;
}
- ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
- if (ret < 0)
- goto out;
+ if (embd_phy) {
+ ret = asix_sw_reset(dev, AX_SWRESET_IPPD, in_pm);
+ if (ret < 0)
+ goto out;
- msleep(150);
+ usleep_range(10000, 11000);
- ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
- if (ret < 0)
- goto out;
+ ret = asix_sw_reset(dev, AX_SWRESET_CLEAR, in_pm);
+ if (ret < 0)
+ goto out;
- msleep(150);
+ msleep(60);
- if (embd_phy) {
- ret = asix_sw_reset(dev, AX_SWRESET_IPRL);
+ ret = asix_sw_reset(dev, AX_SWRESET_IPRL | AX_SWRESET_PRL,
+ in_pm);
if (ret < 0)
goto out;
} else {
- ret = asix_sw_reset(dev, AX_SWRESET_PRTE);
+ ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL,
+ in_pm);
if (ret < 0)
goto out;
}
msleep(150);
- rx_ctl = asix_read_rx_ctl(dev);
- netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
- ret = asix_write_rx_ctl(dev, 0x0000);
+
+ if (in_pm && (!asix_mdio_read_nopm(dev->net, dev->mii.phy_id,
+ MII_PHYSID1))){
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, in_pm);
if (ret < 0)
goto out;
- rx_ctl = asix_read_rx_ctl(dev);
- netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+ ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, in_pm);
+ if (ret < 0)
+ goto out;
+
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
+ AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
+ AX88772_IPG2_DEFAULT, 0, NULL, in_pm);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
+ goto out;
+ }
+
+ /* Rewrite MAC address */
+ ether_addr_copy(data->mac_addr, dev->net->dev_addr);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0,
+ ETH_ALEN, data->mac_addr, in_pm);
+ if (ret < 0)
+ goto out;
- ret = asix_sw_reset(dev, AX_SWRESET_PRL);
+ /* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, in_pm);
if (ret < 0)
goto out;
- msleep(150);
+ rx_ctl = asix_read_rx_ctl(dev, in_pm);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+ rx_ctl);
- ret = asix_sw_reset(dev, AX_SWRESET_IPRL | AX_SWRESET_PRL);
+ rx_ctl = asix_read_medium_status(dev, in_pm);
+ netdev_dbg(dev->net,
+ "Medium Status is 0x%04x after all initializations\n",
+ rx_ctl);
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
+{
+ struct asix_data *data = (struct asix_data *)&dev->data;
+ int ret, embd_phy;
+ u16 rx_ctl, phy14h, phy15h, phy16h;
+ u8 chipcode = 0;
+
+ ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm);
if (ret < 0)
goto out;
- msleep(150);
+ embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
- asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
- asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
- ADVERTISE_ALL | ADVERTISE_CSMA);
- mii_nway_restart(&dev->mii);
+ ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy |
+ AX_PHYSEL_SSEN, 0, 0, NULL, in_pm);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+ goto out;
+ }
+ usleep_range(10000, 11000);
+
+ ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_IPRL, in_pm);
+ if (ret < 0)
+ goto out;
+
+ usleep_range(10000, 11000);
+
+ ret = asix_sw_reset(dev, AX_SWRESET_IPRL, in_pm);
+ if (ret < 0)
+ goto out;
+
+ msleep(160);
+
+ ret = asix_sw_reset(dev, AX_SWRESET_CLEAR, in_pm);
+ if (ret < 0)
+ goto out;
- ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT);
+ ret = asix_sw_reset(dev, AX_SWRESET_IPRL, in_pm);
if (ret < 0)
goto out;
+ msleep(200);
+
+ if (in_pm && (!asix_mdio_read_nopm(dev->net, dev->mii.phy_id,
+ MII_PHYSID1))) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0,
+ 0, 1, &chipcode, in_pm);
+ if (ret < 0)
+ goto out;
+
+ if ((chipcode & AX_CHIPCODE_MASK) == AX_AX88772B_CHIPCODE) {
+ ret = asix_write_cmd(dev, AX_QCTCTRL, 0x8000, 0x8001,
+ 0, NULL, in_pm);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Write BQ setting failed: %d\n",
+ ret);
+ goto out;
+ }
+ } else if ((chipcode & AX_CHIPCODE_MASK) == AX_AX88772A_CHIPCODE) {
+ /* Check if the PHY registers have default settings */
+ phy14h = asix_mdio_read_nopm(dev->net, dev->mii.phy_id,
+ AX88772A_PHY14H);
+ phy15h = asix_mdio_read_nopm(dev->net, dev->mii.phy_id,
+ AX88772A_PHY15H);
+ phy16h = asix_mdio_read_nopm(dev->net, dev->mii.phy_id,
+ AX88772A_PHY16H);
+
+ netdev_dbg(dev->net,
+ "772a_hw_reset: MR20=0x%x MR21=0x%x MR22=0x%x\n",
+ phy14h, phy15h, phy16h);
+
+ /* Restore PHY registers default setting if not */
+ if (phy14h != AX88772A_PHY14H_DEFAULT)
+ asix_mdio_write_nopm(dev->net, dev->mii.phy_id,
+ AX88772A_PHY14H,
+ AX88772A_PHY14H_DEFAULT);
+ if (phy15h != AX88772A_PHY15H_DEFAULT)
+ asix_mdio_write_nopm(dev->net, dev->mii.phy_id,
+ AX88772A_PHY15H,
+ AX88772A_PHY15H_DEFAULT);
+ if (phy16h != AX88772A_PHY16H_DEFAULT)
+ asix_mdio_write_nopm(dev->net, dev->mii.phy_id,
+ AX88772A_PHY16H,
+ AX88772A_PHY16H_DEFAULT);
+ }
+
ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
- AX88772_IPG2_DEFAULT, 0, NULL);
+ AX88772_IPG2_DEFAULT, 0, NULL, in_pm);
if (ret < 0) {
netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
goto out;
@@ -378,20 +552,29 @@ static int ax88772_reset(struct usbnet *dev)
/* Rewrite MAC address */
memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
- data->mac_addr);
+ data->mac_addr, in_pm);
if (ret < 0)
goto out;
/* Set RX_CTL to default values with 2k buffer, and enable cactus */
- ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, in_pm);
if (ret < 0)
goto out;
- rx_ctl = asix_read_rx_ctl(dev);
+ ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, in_pm);
+ if (ret < 0)
+ return ret;
+
+ /* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, in_pm);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = asix_read_rx_ctl(dev, in_pm);
netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
rx_ctl);
- rx_ctl = asix_read_medium_status(dev);
+ rx_ctl = asix_read_medium_status(dev, in_pm);
netdev_dbg(dev->net,
"Medium Status is 0x%04x after all initializations\n",
rx_ctl);
@@ -400,7 +583,6 @@ static int ax88772_reset(struct usbnet *dev)
out:
return ret;
-
}
static const struct net_device_ops ax88772_netdev_ops = {
@@ -415,11 +597,97 @@ static const struct net_device_ops ax88772_netdev_ops = {
.ndo_set_rx_mode = asix_set_multicast,
};
+static void ax88772_suspend(struct usbnet *dev)
+{
+ struct asix_common_private *priv = dev->driver_priv;
+ u16 medium;
+
+ /* Stop MAC operation */
+ medium = asix_read_medium_status(dev, 0);
+ medium &= ~AX_MEDIUM_RE;
+ asix_write_medium_mode(dev, medium, 0);
+
+ netdev_dbg(dev->net, "ax88772_suspend: medium=0x%04x\n",
+ asix_read_medium_status(dev, 0));
+
+ /* Preserve BMCR for restoring */
+ priv->presvd_phy_bmcr =
+ asix_mdio_read_nopm(dev->net, dev->mii.phy_id, MII_BMCR);
+
+ /* Preserve ANAR for restoring */
+ priv->presvd_phy_advertise =
+ asix_mdio_read_nopm(dev->net, dev->mii.phy_id, MII_ADVERTISE);
+}
+
+static int asix_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct asix_common_private *priv = dev->driver_priv;
+
+ if (priv->suspend)
+ priv->suspend(dev);
+
+ return usbnet_suspend(intf, message);
+}
+
+static void ax88772_restore_phy(struct usbnet *dev)
+{
+ struct asix_common_private *priv = dev->driver_priv;
+
+ if (priv->presvd_phy_advertise) {
+ /* Restore Advertisement control reg */
+ asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_ADVERTISE,
+ priv->presvd_phy_advertise);
+
+ /* Restore BMCR */
+ asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
+ priv->presvd_phy_bmcr);
+
+ mii_nway_restart(&dev->mii);
+ priv->presvd_phy_advertise = 0;
+ priv->presvd_phy_bmcr = 0;
+ }
+}
+
+static void ax88772_resume(struct usbnet *dev)
+{
+ int i;
+
+ for (i = 0; i < 3; i++)
+ if (!ax88772_hw_reset(dev, 1))
+ break;
+ ax88772_restore_phy(dev);
+}
+
+static void ax88772a_resume(struct usbnet *dev)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (!ax88772a_hw_reset(dev, 1))
+ break;
+ }
+
+ ax88772_restore_phy(dev);
+}
+
+static int asix_resume(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct asix_common_private *priv = dev->driver_priv;
+
+ if (priv->resume)
+ priv->resume(dev);
+
+ return usbnet_resume(intf);
+}
+
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
- int ret, embd_phy, i;
- u8 buf[ETH_ALEN];
+ int ret, i;
+ u8 buf[ETH_ALEN], chipcode = 0;
u32 phyid;
+ struct asix_common_private *priv;
usbnet_get_endpoints(dev,intf);
@@ -427,13 +695,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
if (dev->driver_info->data & FLAG_EEPROM_MAC) {
for (i = 0; i < (ETH_ALEN >> 1); i++) {
ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i,
- 0, 2, buf + i * 2);
+ 0, 2, buf + i * 2, 0);
if (ret < 0)
break;
}
} else {
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
- 0, 0, ETH_ALEN, buf);
+ 0, 0, ETH_ALEN, buf, 0);
}
if (ret < 0) {
@@ -456,16 +724,11 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
- embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
-
- /* Reset the PHY to normal operation mode */
- ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
- if (ret < 0) {
- netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
- return ret;
- }
+ asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
+ chipcode &= AX_CHIPCODE_MASK;
- ax88772_reset(dev);
+ (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
+ ax88772a_hw_reset(dev, 0);
/* Read PHYID register *AFTER* the PHY was reset properly */
phyid = asix_get_phyid(dev);
@@ -482,6 +745,18 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
if (!dev->driver_priv)
return -ENOMEM;
+ priv = dev->driver_priv;
+
+ priv->presvd_phy_bmcr = 0;
+ priv->presvd_phy_advertise = 0;
+ if (chipcode == AX_AX88772_CHIPCODE) {
+ priv->resume = ax88772_resume;
+ priv->suspend = ax88772_suspend;
+ } else {
+ priv->resume = ax88772a_resume;
+ priv->suspend = ax88772_suspend;
+ }
+
return 0;
}
@@ -593,12 +868,12 @@ static int ax88178_reset(struct usbnet *dev)
int gpio0 = 0;
u32 phyid;
- asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
+ asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0);
netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
- asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL);
- asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom);
- asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
+ asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL, 0);
+ asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0);
+ asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL, 0);
netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
@@ -614,15 +889,16 @@ static int ax88178_reset(struct usbnet *dev)
netdev_dbg(dev->net, "GPIO0: %d, PhyMode: %d\n", gpio0, data->phymode);
/* Power up external GigaPHY through AX88178 GPIO pin */
- asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
+ asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 |
+ AX_GPIO_GPO1EN, 40, 0);
if ((le16_to_cpu(eeprom) >> 8) != 1) {
- asix_write_gpio(dev, 0x003c, 30);
- asix_write_gpio(dev, 0x001c, 300);
- asix_write_gpio(dev, 0x003c, 30);
+ asix_write_gpio(dev, 0x003c, 30, 0);
+ asix_write_gpio(dev, 0x001c, 300, 0);
+ asix_write_gpio(dev, 0x003c, 30, 0);
} else {
netdev_dbg(dev->net, "gpio phymode == 1 path\n");
- asix_write_gpio(dev, AX_GPIO_GPO1EN, 30);
- asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
+ asix_write_gpio(dev, AX_GPIO_GPO1EN, 30, 0);
+ asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30, 0);
}
/* Read PHYID register *AFTER* powering up PHY */
@@ -630,15 +906,15 @@ static int ax88178_reset(struct usbnet *dev)
netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
/* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
- asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
+ asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL, 0);
- asix_sw_reset(dev, 0);
+ asix_sw_reset(dev, 0, 0);
msleep(150);
- asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
+ asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD, 0);
msleep(150);
- asix_write_rx_ctl(dev, 0);
+ asix_write_rx_ctl(dev, 0, 0);
if (data->phymode == PHY_MODE_MARVELL) {
marvell_phy_init(dev);
@@ -646,27 +922,23 @@ static int ax88178_reset(struct usbnet *dev)
} else if (data->phymode == PHY_MODE_RTL8211CL)
rtl8211cl_phy_init(dev);
- asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR,
- BMCR_RESET | BMCR_ANENABLE);
+ asix_phy_reset(dev, BMCR_RESET | BMCR_ANENABLE);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000,
ADVERTISE_1000FULL);
+ asix_write_medium_mode(dev, AX88178_MEDIUM_DEFAULT, 0);
mii_nway_restart(&dev->mii);
- ret = asix_write_medium_mode(dev, AX88178_MEDIUM_DEFAULT);
- if (ret < 0)
- return ret;
-
/* Rewrite MAC address */
memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
- data->mac_addr);
+ data->mac_addr, 0);
if (ret < 0)
return ret;
- ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, 0);
if (ret < 0)
return ret;
@@ -704,7 +976,7 @@ static int ax88178_link_reset(struct usbnet *dev)
netdev_dbg(dev->net, "ax88178_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
speed, ecmd.duplex, mode);
- asix_write_medium_mode(dev, mode);
+ asix_write_medium_mode(dev, mode, 0);
if (data->phymode == PHY_MODE_MARVELL && data->ledmode)
marvell_led_status(dev, speed);
@@ -733,15 +1005,15 @@ static void ax88178_set_mfb(struct usbnet *dev)
mfb = AX_RX_CTL_MFB_16384;
}
- rxctl = asix_read_rx_ctl(dev);
- asix_write_rx_ctl(dev, (rxctl & ~AX_RX_CTL_MFB_16384) | mfb);
+ rxctl = asix_read_rx_ctl(dev, 0);
+ asix_write_rx_ctl(dev, (rxctl & ~AX_RX_CTL_MFB_16384) | mfb, 0);
- medium = asix_read_medium_status(dev);
+ medium = asix_read_medium_status(dev, 0);
if (dev->net->mtu > 1500)
medium |= AX_MEDIUM_JFE;
else
medium &= ~AX_MEDIUM_JFE;
- asix_write_medium_mode(dev, medium);
+ asix_write_medium_mode(dev, medium, 0);
if (dev->rx_urb_size > old_rx_urb_size)
usbnet_unlink_rx_urbs(dev);
@@ -790,7 +1062,7 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
usbnet_get_endpoints(dev,intf);
/* Get the MAC address */
- ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
+ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
if (ret < 0) {
netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
return ret;
@@ -811,10 +1083,10 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &ax88178_ethtool_ops;
/* Blink LEDS so users know driver saw dongle */
- asix_sw_reset(dev, 0);
+ asix_sw_reset(dev, 0, 0);
msleep(150);
- asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
+ asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD, 0);
msleep(150);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
@@ -877,7 +1149,7 @@ static const struct driver_info ax88772_info = {
.unbind = ax88772_unbind,
.status = asix_status,
.link_reset = ax88772_link_reset,
- .reset = ax88772_link_reset,
+ .reset = ax88772_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
@@ -1005,7 +1277,7 @@ static const struct usb_device_id products [] = {
}, {
// Lenovo U2L100P 10/100
USB_DEVICE (0x17ef, 0x7203),
- .driver_info = (unsigned long) &ax88772_info,
+ .driver_info = (unsigned long)&ax88772b_info,
}, {
// ASIX AX88772B 10/100
USB_DEVICE (0x0b95, 0x772b),
@@ -1073,7 +1345,7 @@ static const struct usb_device_id products [] = {
}, {
// Asus USB Ethernet Adapter
USB_DEVICE (0x0b95, 0x7e2b),
- .driver_info = (unsigned long) &ax88772_info,
+ .driver_info = (unsigned long)&ax88772b_info,
}, {
/* ASIX 88172a demo board */
USB_DEVICE(0x0b95, 0x172a),
@@ -1095,8 +1367,8 @@ static struct usb_driver asix_driver = {
.name = DRIVER_NAME,
.id_table = products,
.probe = usbnet_probe,
- .suspend = usbnet_suspend,
- .resume = usbnet_resume,
+ .suspend = asix_suspend,
+ .resume = asix_resume,
.disconnect = usbnet_disconnect,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 163a2c576e69..49a3bc107d05 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -81,7 +81,7 @@ static void ax88172a_adjust_link(struct net_device *netdev)
}
if (mode != priv->oldmode) {
- asix_write_medium_mode(dev, mode);
+ asix_write_medium_mode(dev, mode, 0);
priv->oldmode = mode;
netdev_dbg(netdev, "speed %u duplex %d, setting mode to 0x%04x\n",
phydev->speed, phydev->duplex, mode);
@@ -176,18 +176,19 @@ static int ax88172a_reset_phy(struct usbnet *dev, int embd_phy)
{
int ret;
- ret = asix_sw_reset(dev, AX_SWRESET_IPPD);
+ ret = asix_sw_reset(dev, AX_SWRESET_IPPD, 0);
if (ret < 0)
goto err;
msleep(150);
- ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
+ ret = asix_sw_reset(dev, AX_SWRESET_CLEAR, 0);
if (ret < 0)
goto err;
msleep(150);
- ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_IPPD);
+ ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_IPPD,
+ 0);
if (ret < 0)
goto err;
@@ -213,7 +214,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
dev->driver_priv = priv;
/* Get the MAC address */
- ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
+ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
if (ret < 0) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
goto free;
@@ -224,7 +225,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &ax88172a_ethtool_ops;
/* are we using the internal or the external phy? */
- ret = asix_read_cmd(dev, AX_CMD_SW_PHY_STATUS, 0, 0, 1, buf);
+ ret = asix_read_cmd(dev, AX_CMD_SW_PHY_STATUS, 0, 0, 1, buf, 0);
if (ret < 0) {
netdev_err(dev->net, "Failed to read software interface selection register: %d\n",
ret);
@@ -303,20 +304,20 @@ static int ax88172a_reset(struct usbnet *dev)
ax88172a_reset_phy(dev, priv->use_embdphy);
msleep(150);
- rx_ctl = asix_read_rx_ctl(dev);
+ rx_ctl = asix_read_rx_ctl(dev, 0);
netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
- ret = asix_write_rx_ctl(dev, 0x0000);
+ ret = asix_write_rx_ctl(dev, 0x0000, 0);
if (ret < 0)
goto out;
- rx_ctl = asix_read_rx_ctl(dev);
+ rx_ctl = asix_read_rx_ctl(dev, 0);
netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
msleep(150);
ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
- AX88772_IPG2_DEFAULT, 0, NULL);
+ AX88772_IPG2_DEFAULT, 0, NULL, 0);
if (ret < 0) {
netdev_err(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
goto out;
@@ -325,20 +326,20 @@ static int ax88172a_reset(struct usbnet *dev)
/* Rewrite MAC address */
memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
- data->mac_addr);
+ data->mac_addr, 0);
if (ret < 0)
goto out;
/* Set RX_CTL to default values with 2k buffer, and enable cactus */
- ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, 0);
if (ret < 0)
goto out;
- rx_ctl = asix_read_rx_ctl(dev);
+ rx_ctl = asix_read_rx_ctl(dev, 0);
netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
rx_ctl);
- rx_ctl = asix_read_medium_status(dev);
+ rx_ctl = asix_read_medium_status(dev, 0);
netdev_dbg(dev->net, "Medium Status is 0x%04x after all initializations\n",
rx_ctl);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 4b4458616693..e7b516342678 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -50,6 +50,8 @@
*
*****************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -108,23 +110,12 @@
/*****************************************************************************/
/* Debugging functions */
/*****************************************************************************/
-#define D__(lvl_, fmt, arg...) \
- do { \
- printk(lvl_ "[%d:%s]: " fmt "\n", \
- __LINE__, __func__, ## arg); \
- } while (0)
-
-#define D_(lvl, args...) \
- do { \
- if (lvl & debug) \
- D__(KERN_INFO, args); \
- } while (0)
-
-#define D1(args...) D_(0x01, ##args)
-#define D2(args...) D_(0x02, ##args)
-#define D3(args...) D_(0x04, ##args)
-#define D4(args...) D_(0x08, ##args)
-#define D5(args...) D_(0x10, ##args)
+#define hso_dbg(lvl, fmt, ...) \
+do { \
+ if ((lvl) & debug) \
+ pr_info("[%d:%s] " fmt, \
+ __LINE__, __func__, ##__VA_ARGS__); \
+} while (0)
/*****************************************************************************/
/* Enumerators */
@@ -649,7 +640,7 @@ static int get_free_serial_index(void)
}
spin_unlock_irqrestore(&serial_table_lock, flags);
- printk(KERN_ERR "%s: no free serial devices in table\n", __func__);
+ pr_err("%s: no free serial devices in table\n", __func__);
return -1;
}
@@ -709,7 +700,8 @@ static void handle_usb_error(int status, const char *function,
}
/* log a meaningful explanation of an USB status */
- D1("%s: received USB status - %s (%d)", function, explanation, status);
+ hso_dbg(0x1, "%s: received USB status - %s (%d)\n",
+ function, explanation, status);
}
/* Network interface functions */
@@ -808,7 +800,7 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
DUMP1(skb->data, skb->len);
/* Copy it from kernel memory to OUR memory */
memcpy(odev->mux_bulk_tx_buf, skb->data, skb->len);
- D1("len: %d/%d", skb->len, MUX_BULK_TX_BUF_SIZE);
+ hso_dbg(0x1, "len: %d/%d\n", skb->len, MUX_BULK_TX_BUF_SIZE);
/* Fill in the URB for shipping it out. */
usb_fill_bulk_urb(odev->mux_bulk_tx_urb,
@@ -872,7 +864,7 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
unsigned char *tmp_rx_buf;
/* log if needed */
- D1("Rx %d bytes", count);
+ hso_dbg(0x1, "Rx %d bytes\n", count);
DUMP(ip_pkt, min(128, (int)count));
while (count) {
@@ -912,7 +904,7 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
frame_len);
if (!odev->skb_rx_buf) {
/* We got no receive buffer. */
- D1("could not allocate memory");
+ hso_dbg(0x1, "could not allocate memory\n");
odev->rx_parse_state = WAIT_SYNC;
continue;
}
@@ -972,11 +964,11 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
break;
case WAIT_SYNC:
- D1(" W_S");
+ hso_dbg(0x1, " W_S\n");
count = 0;
break;
default:
- D1(" ");
+ hso_dbg(0x1, "\n");
count--;
break;
}
@@ -1020,7 +1012,7 @@ static void read_bulk_callback(struct urb *urb)
/* Sanity check */
if (!odev || !test_bit(HSO_NET_RUNNING, &odev->flags)) {
- D1("BULK IN callback but driver is not active!");
+ hso_dbg(0x1, "BULK IN callback but driver is not active!\n");
return;
}
usb_mark_last_busy(urb->dev);
@@ -1112,11 +1104,11 @@ static void _hso_serial_set_termios(struct tty_struct *tty,
struct hso_serial *serial = tty->driver_data;
if (!serial) {
- printk(KERN_ERR "%s: no tty structures", __func__);
+ pr_err("%s: no tty structures", __func__);
return;
}
- D4("port %d", serial->minor);
+ hso_dbg(0x8, "port %d\n", serial->minor);
/*
* Fix up unsupported bits
@@ -1205,11 +1197,11 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
struct hso_serial *serial = urb->context;
int status = urb->status;
- D4("\n--- Got serial_read_bulk callback %02x ---", status);
+ hso_dbg(0x8, "--- Got serial_read_bulk callback %02x ---\n", status);
/* sanity check */
if (!serial) {
- D1("serial == NULL");
+ hso_dbg(0x1, "serial == NULL\n");
return;
}
if (status) {
@@ -1217,7 +1209,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
return;
}
- D1("Actual length = %d\n", urb->actual_length);
+ hso_dbg(0x1, "Actual length = %d\n", urb->actual_length);
DUMP1(urb->transfer_buffer, urb->actual_length);
/* Anyone listening? */
@@ -1266,7 +1258,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
if (serial == NULL || serial->magic != HSO_SERIAL_MAGIC) {
WARN_ON(1);
tty->driver_data = NULL;
- D1("Failed to open port");
+ hso_dbg(0x1, "Failed to open port\n");
return -ENODEV;
}
@@ -1275,7 +1267,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
if (result < 0)
goto err_out;
- D1("Opening %d", serial->minor);
+ hso_dbg(0x1, "Opening %d\n", serial->minor);
/* setup */
tty->driver_data = serial;
@@ -1298,7 +1290,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
kref_get(&serial->parent->ref);
}
} else {
- D1("Port was already open");
+ hso_dbg(0x1, "Port was already open\n");
}
usb_autopm_put_interface(serial->parent->interface);
@@ -1317,7 +1309,7 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
struct hso_serial *serial = tty->driver_data;
u8 usb_gone;
- D1("Closing serial port");
+ hso_dbg(0x1, "Closing serial port\n");
/* Open failed, no close cleanup required */
if (serial == NULL)
@@ -1357,7 +1349,7 @@ static int hso_serial_write(struct tty_struct *tty, const unsigned char *buf,
/* sanity check */
if (serial == NULL) {
- printk(KERN_ERR "%s: serial is NULL\n", __func__);
+ pr_err("%s: serial is NULL\n", __func__);
return -ENODEV;
}
@@ -1412,8 +1404,8 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
unsigned long flags;
if (old)
- D5("Termios called with: cflags new[%d] - old[%d]",
- tty->termios.c_cflag, old->c_cflag);
+ hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n",
+ tty->termios.c_cflag, old->c_cflag);
/* the actual setup */
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1649,7 +1641,7 @@ static int hso_serial_tiocmget(struct tty_struct *tty)
/* sanity check */
if (!serial) {
- D1("no tty structures");
+ hso_dbg(0x1, "no tty structures\n");
return -EINVAL;
}
spin_lock_irq(&serial->serial_lock);
@@ -1682,7 +1674,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
/* sanity check */
if (!serial) {
- D1("no tty structures");
+ hso_dbg(0x1, "no tty structures\n");
return -EINVAL;
}
@@ -1721,7 +1713,7 @@ static int hso_serial_ioctl(struct tty_struct *tty,
{
struct hso_serial *serial = tty->driver_data;
int ret = 0;
- D4("IOCTL cmd: %d, arg: %ld", cmd, arg);
+ hso_dbg(0x8, "IOCTL cmd: %d, arg: %ld\n", cmd, arg);
if (!serial)
return -ENODEV;
@@ -1783,7 +1775,7 @@ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port,
/* Sanity check */
if (!serial || !ctrl_urb || !ctrl_req) {
- printk(KERN_ERR "%s: Wrong arguments\n", __func__);
+ pr_err("%s: Wrong arguments\n", __func__);
return -EINVAL;
}
@@ -1808,9 +1800,9 @@ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port,
pipe = usb_sndctrlpipe(serial->parent->usb, 0);
}
/* syslog */
- D2("%s command (%02x) len: %d, port: %d",
- type == USB_CDC_GET_ENCAPSULATED_RESPONSE ? "Read" : "Write",
- ctrl_req->bRequestType, ctrl_req->wLength, port);
+ hso_dbg(0x2, "%s command (%02x) len: %d, port: %d\n",
+ type == USB_CDC_GET_ENCAPSULATED_RESPONSE ? "Read" : "Write",
+ ctrl_req->bRequestType, ctrl_req->wLength, port);
/* Load ctrl urb */
ctrl_urb->transfer_flags = 0;
@@ -1876,11 +1868,11 @@ static void intr_callback(struct urb *urb)
handle_usb_error(status, __func__, NULL);
return;
}
- D4("\n--- Got intr callback 0x%02X ---", status);
+ hso_dbg(0x8, "--- Got intr callback 0x%02X ---\n", status);
/* what request? */
port_req = urb->transfer_buffer;
- D4(" port_req = 0x%.2X\n", *port_req);
+ hso_dbg(0x8, "port_req = 0x%.2X\n", *port_req);
/* loop over all muxed ports to find the one sending this */
for (i = 0; i < 8; i++) {
/* max 8 channels on MUX */
@@ -1888,7 +1880,8 @@ static void intr_callback(struct urb *urb)
serial = get_serial_by_shared_int_and_type(shared_int,
(1 << i));
if (serial != NULL) {
- D1("Pending read interrupt on port %d\n", i);
+ hso_dbg(0x1, "Pending read interrupt on port %d\n",
+ i);
spin_lock(&serial->serial_lock);
if (serial->rx_state == RX_IDLE &&
serial->port.count > 0) {
@@ -1900,8 +1893,8 @@ static void intr_callback(struct urb *urb)
} else
serial->rx_state = RX_PENDING;
} else {
- D1("Already a read pending on "
- "port %d or port not open\n", i);
+ hso_dbg(0x1, "Already a read pending on port %d or port not open\n",
+ i);
}
spin_unlock(&serial->serial_lock);
}
@@ -1933,7 +1926,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
/* sanity check */
if (!serial) {
- D1("serial == NULL");
+ hso_dbg(0x1, "serial == NULL\n");
return;
}
@@ -1948,7 +1941,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
tty_port_tty_wakeup(&serial->port);
hso_kick_transmit(serial);
- D1(" ");
+ hso_dbg(0x1, "\n");
}
/* called for writing diag or CS serial port */
@@ -1996,8 +1989,8 @@ static void ctrl_callback(struct urb *urb)
/* what request? */
req = (struct usb_ctrlrequest *)(urb->setup_packet);
- D4("\n--- Got muxed ctrl callback 0x%02X ---", status);
- D4("Actual length of urb = %d\n", urb->actual_length);
+ hso_dbg(0x8, "--- Got muxed ctrl callback 0x%02X ---\n", status);
+ hso_dbg(0x8, "Actual length of urb = %d\n", urb->actual_length);
DUMP1(urb->transfer_buffer, urb->actual_length);
if (req->bRequestType ==
@@ -2023,7 +2016,7 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
/* Sanity check */
if (urb == NULL || serial == NULL) {
- D1("serial = NULL");
+ hso_dbg(0x1, "serial = NULL\n");
return -2;
}
@@ -2035,7 +2028,7 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
}
/* Push data to tty */
- D1("data to push to tty");
+ hso_dbg(0x1, "data to push to tty\n");
count = tty_buffer_request_room(&serial->port, urb->actual_length);
if (count >= urb->actual_length) {
tty_insert_flip_string(&serial->port, urb->transfer_buffer,
@@ -2300,10 +2293,8 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->rx_data_length = rx_size;
for (i = 0; i < serial->num_rx_urbs; i++) {
serial->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
- if (!serial->rx_urb[i]) {
- dev_err(dev, "Could not allocate urb?\n");
+ if (!serial->rx_urb[i])
goto exit;
- }
serial->rx_urb[i]->transfer_buffer = NULL;
serial->rx_urb[i]->transfer_buffer_length = 0;
serial->rx_data[i] = kzalloc(serial->rx_data_length,
@@ -2314,10 +2305,8 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
/* TX, allocate urb and initialize */
serial->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!serial->tx_urb) {
- dev_err(dev, "Could not allocate urb?\n");
+ if (!serial->tx_urb)
goto exit;
- }
serial->tx_urb->transfer_buffer = NULL;
serial->tx_urb->transfer_buffer_length = 0;
/* prepare our TX buffer */
@@ -2419,7 +2408,7 @@ static void hso_net_init(struct net_device *net)
{
struct hso_net *hso_net = netdev_priv(net);
- D1("sizeof hso_net is %d", (int)sizeof(*hso_net));
+ hso_dbg(0x1, "sizeof hso_net is %zu\n", sizeof(*hso_net));
/* fill in the other fields */
net->netdev_ops = &hso_netdev_ops;
@@ -2555,20 +2544,16 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
/* start allocating */
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL);
- if (!hso_net->mux_bulk_rx_urb_pool[i]) {
- dev_err(&interface->dev, "Could not allocate rx urb\n");
+ if (!hso_net->mux_bulk_rx_urb_pool[i])
goto exit;
- }
hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE,
GFP_KERNEL);
if (!hso_net->mux_bulk_rx_buf_pool[i])
goto exit;
}
hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!hso_net->mux_bulk_tx_urb) {
- dev_err(&interface->dev, "Could not allocate tx urb\n");
+ if (!hso_net->mux_bulk_tx_urb)
goto exit;
- }
hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL);
if (!hso_net->mux_bulk_tx_buf)
goto exit;
@@ -2787,10 +2772,8 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface)
}
mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!mux->shared_intr_urb) {
- dev_err(&interface->dev, "Could not allocate intr urb?\n");
+ if (!mux->shared_intr_urb)
goto exit;
- }
mux->shared_intr_buf =
kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize),
GFP_KERNEL);
@@ -3239,7 +3222,7 @@ static int __init hso_init(void)
int result;
/* put it in the log */
- printk(KERN_INFO "hso: %s\n", version);
+ pr_info("%s\n", version);
/* Initialise the serial table semaphore and table */
spin_lock_init(&serial_table_lock);
@@ -3270,16 +3253,15 @@ static int __init hso_init(void)
/* register the tty driver */
result = tty_register_driver(tty_drv);
if (result) {
- printk(KERN_ERR "%s - tty_register_driver failed(%d)\n",
- __func__, result);
+ pr_err("%s - tty_register_driver failed(%d)\n",
+ __func__, result);
goto err_free_tty;
}
/* register this module as an usb driver */
result = usb_register(&hso_driver);
if (result) {
- printk(KERN_ERR "Could not register hso driver? error: %d\n",
- result);
+ pr_err("Could not register hso driver - error: %d\n", result);
goto err_unreg_tty;
}
@@ -3294,7 +3276,7 @@ err_free_tty:
static void __exit hso_exit(void)
{
- printk(KERN_INFO "hso: unloaded\n");
+ pr_info("unloaded\n");
tty_unregister_driver(tty_drv);
put_tty_driver(tty_drv);
@@ -3311,7 +3293,7 @@ MODULE_DESCRIPTION(MOD_DESCRIPTION);
MODULE_LICENSE(MOD_LICENSE);
/* change the debug level (eg: insmod hso.ko debug=0x04) */
-MODULE_PARM_DESC(debug, "Level of debug [0x01 | 0x02 | 0x04 | 0x08 | 0x10]");
+MODULE_PARM_DESC(debug, "debug level mask [0x01 | 0x02 | 0x04 | 0x08 | 0x10]");
module_param(debug, int, S_IRUGO | S_IWUSR);
/* set the major tty number (eg: insmod hso.ko tty_major=245) */
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 528b9c9c4e60..66b34ddbe216 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -265,8 +265,6 @@ static int kaweth_control(struct kaweth_device *kaweth,
struct usb_ctrlrequest *dr;
int retval;
- netdev_dbg(kaweth->net, "kaweth_control()\n");
-
if(in_interrupt()) {
netdev_dbg(kaweth->net, "in_interrupt()\n");
return -EBUSY;
@@ -300,8 +298,6 @@ static int kaweth_read_configuration(struct kaweth_device *kaweth)
{
int retval;
- netdev_dbg(kaweth->net, "Reading kaweth configuration\n");
-
retval = kaweth_control(kaweth,
usb_rcvctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_GET_ETHERNET_DESC,
@@ -451,8 +447,6 @@ static int kaweth_trigger_firmware(struct kaweth_device *kaweth,
kaweth->firmware_buf[6] = 0x00;
kaweth->firmware_buf[7] = 0x00;
- netdev_dbg(kaweth->net, "Triggering firmware\n");
-
return kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_SCAN,
@@ -471,7 +465,6 @@ static int kaweth_reset(struct kaweth_device *kaweth)
{
int result;
- netdev_dbg(kaweth->net, "kaweth_reset(%p)\n", kaweth);
result = usb_reset_configuration(kaweth->dev);
mdelay(10);
@@ -685,8 +678,6 @@ static int kaweth_open(struct net_device *net)
struct kaweth_device *kaweth = netdev_priv(net);
int res;
- netdev_dbg(kaweth->net, "Opening network device.\n");
-
res = usb_autopm_get_interface(kaweth->intf);
if (res) {
dev_err(&kaweth->intf->dev, "Interface cannot be resumed.\n");
@@ -951,7 +942,6 @@ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message)
struct kaweth_device *kaweth = usb_get_intfdata(intf);
unsigned long flags;
- dev_dbg(&intf->dev, "Suspending device\n");
spin_lock_irqsave(&kaweth->device_lock, flags);
kaweth->status |= KAWETH_STATUS_SUSPENDING;
spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -968,7 +958,6 @@ static int kaweth_resume(struct usb_interface *intf)
struct kaweth_device *kaweth = usb_get_intfdata(intf);
unsigned long flags;
- dev_dbg(&intf->dev, "Resuming device\n");
spin_lock_irqsave(&kaweth->device_lock, flags);
kaweth->status &= ~KAWETH_STATUS_SUSPENDING;
spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -1190,8 +1179,6 @@ err_fw:
dev_info(dev, "kaweth interface created at %s\n",
kaweth->net->name);
- dev_dbg(dev, "Kaweth probe returning.\n");
-
return 0;
err_intfdata:
@@ -1219,8 +1206,6 @@ static void kaweth_disconnect(struct usb_interface *intf)
struct kaweth_device *kaweth = usb_get_intfdata(intf);
struct net_device *netdev;
- dev_info(&intf->dev, "Unregistering\n");
-
usb_set_intfdata(intf, NULL);
if (!kaweth) {
dev_warn(&intf->dev, "unregistering non-existent device\n");
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 6a9d474b08b2..db558b8b32fe 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1179,7 +1179,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
* NOTE: annoying asymmetry: if it's active, schedule_work() fails,
* but tasklet_schedule() doesn't. hope the failure is rare.
*/
-void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
+static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
{
set_bit(work, &dev->flags);
if (!schedule_delayed_work(&dev->wq, 0))
@@ -1406,7 +1406,7 @@ static u32 lan78xx_get_link(struct net_device *net)
return net->phydev->link;
}
-int lan78xx_nway_reset(struct net_device *net)
+static int lan78xx_nway_reset(struct net_device *net)
{
return phy_start_aneg(net->phydev);
}
@@ -1997,7 +1997,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
+static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
{
struct lan78xx_net *dev = netdev_priv(netdev);
struct sockaddr *addr = p;
@@ -2371,7 +2371,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
remove_wait_queue(&unlink_wakeup, &wait);
}
-int lan78xx_stop(struct net_device *net)
+static int lan78xx_stop(struct net_device *net)
{
struct lan78xx_net *dev = netdev_priv(net);
@@ -2533,7 +2533,8 @@ static void lan78xx_queue_skb(struct sk_buff_head *list,
entry->state = state;
}
-netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
+static netdev_tx_t
+lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
{
struct lan78xx_net *dev = netdev_priv(net);
struct sk_buff *skb2 = NULL;
@@ -2562,7 +2563,8 @@ netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
-int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
+static int
+lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
{
int tmp;
struct usb_host_interface *alt = NULL;
@@ -2700,7 +2702,7 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
}
}
-void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
+static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
{
int status;
@@ -3002,10 +3004,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
gso_skb:
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netif_dbg(dev, tx_err, dev->net, "no urb\n");
+ if (!urb)
goto drop;
- }
entry = (struct skb_data *)skb->cb;
entry->urb = urb;
@@ -3285,7 +3285,7 @@ static void lan78xx_disconnect(struct usb_interface *intf)
usb_put_dev(udev);
}
-void lan78xx_tx_timeout(struct net_device *net)
+static void lan78xx_tx_timeout(struct net_device *net)
{
struct lan78xx_net *dev = netdev_priv(net);
@@ -3605,7 +3605,7 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
return 0;
}
-int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
+static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
{
struct lan78xx_net *dev = usb_get_intfdata(intf);
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
@@ -3701,7 +3701,7 @@ out:
return ret;
}
-int lan78xx_resume(struct usb_interface *intf)
+static int lan78xx_resume(struct usb_interface *intf)
{
struct lan78xx_net *dev = usb_get_intfdata(intf);
struct sk_buff *skb;
@@ -3768,7 +3768,7 @@ int lan78xx_resume(struct usb_interface *intf)
return 0;
}
-int lan78xx_reset_resume(struct usb_interface *intf)
+static int lan78xx_reset_resume(struct usb_interface *intf)
{
struct lan78xx_net *dev = usb_get_intfdata(intf);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 9bbe0161a2f4..1434e5dd5f9c 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1129,7 +1129,8 @@ static int pegasus_probe(struct usb_interface *intf,
return -ENODEV;
if (pegasus_count == 0) {
- pegasus_workqueue = create_singlethread_workqueue("pegasus");
+ pegasus_workqueue = alloc_workqueue("pegasus", WQ_MEM_RECLAIM,
+ 0);
if (!pegasus_workqueue)
return -ENOMEM;
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c254248863d4..44d439f50961 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1076,8 +1076,7 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
return -ENODEV;
if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
netif_warn(tp, probe, tp->netdev,
- "Invalid buffer when reading pass-thru MAC addr: "
- "(%d, %d)\n",
+ "Invalid buffer for pass-thru MAC addr: (%d, %d)\n",
obj->type, obj->string.length);
goto amacout;
}
@@ -1090,8 +1089,8 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
ret = hex2bin(buf, obj->string.pointer + 9, 6);
if (!(ret == 0 && is_valid_ether_addr(buf))) {
netif_warn(tp, probe, tp->netdev,
- "Invalid MAC when reading pass-thru MAC addr: "
- "%d, %pM\n", ret, buf);
+ "Invalid MAC for pass-thru MAC addr: %d, %pM\n",
+ ret, buf);
ret = -EINVAL;
goto amacout;
}
@@ -1111,9 +1110,9 @@ static int set_ethernet_addr(struct r8152 *tp)
struct sockaddr sa;
int ret;
- if (tp->version == RTL_VER_01)
+ if (tp->version == RTL_VER_01) {
ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
- else {
+ } else {
/* if this is not an RTL8153-AD, no eFuse mac pass thru set,
* or system doesn't provide valid _SB.AMAC this will be
* be expected to non-zero
@@ -4043,7 +4042,7 @@ static int rtl8152_set_coalesce(struct net_device *netdev,
return ret;
}
-static struct ethtool_ops ops = {
+static const struct ethtool_ops ops = {
.get_drvinfo = rtl8152_get_drvinfo,
.get_settings = rtl8152_get_settings,
.set_settings = rtl8152_set_settings,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index dc989a8b5afb..831aa33d078a 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -33,7 +33,7 @@
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
-#define SMSC_DRIVER_VERSION "1.0.4"
+#define SMSC_DRIVER_VERSION "1.0.5"
#define HS_USB_PKT_SIZE (512)
#define FS_USB_PKT_SIZE (64)
#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE)
@@ -64,6 +64,7 @@
#define CARRIER_CHECK_DELAY (2 * HZ)
struct smsc95xx_priv {
+ u32 chip_id;
u32 mac_cr;
u32 hash_hi;
u32 hash_lo;
@@ -71,6 +72,7 @@ struct smsc95xx_priv {
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
+ u8 mdix_ctrl;
bool link_ok;
struct delayed_work carrier_check;
struct usbnet *dev;
@@ -782,14 +784,113 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
return ret;
}
+static int get_mdix_status(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u32 val;
+ int buf;
+
+ buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, SPECIAL_CTRL_STS);
+ if (buf & SPECIAL_CTRL_STS_OVRRD_AMDIX_) {
+ if (buf & SPECIAL_CTRL_STS_AMDIX_ENABLE_)
+ return ETH_TP_MDI_AUTO;
+ else if (buf & SPECIAL_CTRL_STS_AMDIX_STATE_)
+ return ETH_TP_MDI_X;
+ } else {
+ buf = smsc95xx_read_reg(dev, STRAP_STATUS, &val);
+ if (val & STRAP_STATUS_AMDIX_EN_)
+ return ETH_TP_MDI_AUTO;
+ }
+
+ return ETH_TP_MDI;
+}
+
+static void set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ int buf;
+
+ if ((pdata->chip_id == ID_REV_CHIP_ID_9500A_) ||
+ (pdata->chip_id == ID_REV_CHIP_ID_9530_) ||
+ (pdata->chip_id == ID_REV_CHIP_ID_89530_) ||
+ (pdata->chip_id == ID_REV_CHIP_ID_9730_)) {
+ /* Extend Manual AutoMDIX timer for 9500A/9500Ai */
+ buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
+ PHY_EDPD_CONFIG);
+ buf |= PHY_EDPD_CONFIG_EXT_CROSSOVER_;
+ smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
+ PHY_EDPD_CONFIG, buf);
+ }
+
+ if (mdix_ctrl == ETH_TP_MDI) {
+ buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
+ SPECIAL_CTRL_STS);
+ buf |= SPECIAL_CTRL_STS_OVRRD_AMDIX_;
+ buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
+ SPECIAL_CTRL_STS_AMDIX_STATE_);
+ smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
+ SPECIAL_CTRL_STS, buf);
+ } else if (mdix_ctrl == ETH_TP_MDI_X) {
+ buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
+ SPECIAL_CTRL_STS);
+ buf |= SPECIAL_CTRL_STS_OVRRD_AMDIX_;
+ buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
+ SPECIAL_CTRL_STS_AMDIX_STATE_);
+ buf |= SPECIAL_CTRL_STS_AMDIX_STATE_;
+ smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
+ SPECIAL_CTRL_STS, buf);
+ } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
+ buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
+ SPECIAL_CTRL_STS);
+ buf &= ~SPECIAL_CTRL_STS_OVRRD_AMDIX_;
+ buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
+ SPECIAL_CTRL_STS_AMDIX_STATE_);
+ buf |= SPECIAL_CTRL_STS_AMDIX_ENABLE_;
+ smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
+ SPECIAL_CTRL_STS, buf);
+ }
+ pdata->mdix_ctrl = mdix_ctrl;
+}
+
+static int smsc95xx_get_settings(struct net_device *net,
+ struct ethtool_cmd *cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ int retval;
+
+ retval = usbnet_get_settings(net, cmd);
+
+ cmd->eth_tp_mdix = pdata->mdix_ctrl;
+ cmd->eth_tp_mdix_ctrl = pdata->mdix_ctrl;
+
+ return retval;
+}
+
+static int smsc95xx_set_settings(struct net_device *net,
+ struct ethtool_cmd *cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ int retval;
+
+ if (pdata->mdix_ctrl != cmd->eth_tp_mdix_ctrl)
+ set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
+
+ retval = usbnet_set_settings(net, cmd);
+
+ return retval;
+}
+
static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link = usbnet_get_link,
.nway_reset = usbnet_nway_reset,
.get_drvinfo = usbnet_get_drvinfo,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
+ .get_settings = smsc95xx_get_settings,
+ .set_settings = smsc95xx_set_settings,
.get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
.get_eeprom = smsc95xx_ethtool_get_eeprom,
.set_eeprom = smsc95xx_ethtool_set_eeprom,
@@ -1194,6 +1295,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret < 0)
return ret;
val >>= 16;
+ pdata->chip_id = val;
+ pdata->mdix_ctrl = get_mdix_status(dev->net);
if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9530_) ||
(val == ID_REV_CHIP_ID_89530_) || (val == ID_REV_CHIP_ID_9730_))
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index 526faa0c44e6..29a4d9efce7c 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -144,6 +144,14 @@
#define BURST_CAP (0x38)
+#define STRAP_STATUS (0x3C)
+#define STRAP_STATUS_PWR_SEL_ (0x00000020)
+#define STRAP_STATUS_AMDIX_EN_ (0x00000010)
+#define STRAP_STATUS_PORT_SWAP_ (0x00000008)
+#define STRAP_STATUS_EEP_SIZE_ (0x00000004)
+#define STRAP_STATUS_RMT_WKP_ (0x00000002)
+#define STRAP_STATUS_EEP_DISABLE_ (0x00000001)
+
#define GPIO_WAKE (0x64)
#define INT_EP_CTL (0x68)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3bfb59209326..d5071e364d40 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -2062,11 +2062,8 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
cmd, reqtype, value, index, size);
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(dev->net, "Error allocating URB in"
- " %s!\n", __func__);
+ if (!urb)
goto fail;
- }
if (data) {
buf = kmemdup(data, size, GFP_ATOMIC);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f37a6e61d4ad..fbc853e64531 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -313,7 +313,7 @@ static const struct net_device_ops veth_netdev_ops = {
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
- NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
+ NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
@@ -340,6 +340,7 @@ static void veth_setup(struct net_device *dev)
dev->hw_features = VETH_FEATURES;
dev->hw_enc_features = VETH_FEATURES;
+ dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
}
/*
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 4244b9d4418e..b5554f2ebee4 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1641,7 +1641,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
}
}
-void
+static void
vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
{
int i;
@@ -3186,7 +3186,6 @@ vmxnet3_tx_timeout(struct net_device *netdev)
netdev_err(adapter->netdev, "tx hang\n");
schedule_work(&adapter->work);
- netif_wake_queue(adapter->netdev);
}
@@ -3213,6 +3212,7 @@ vmxnet3_reset_work(struct work_struct *data)
}
rtnl_unlock();
+ netif_wake_queue(adapter->netdev);
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 1ce7420322ee..85c271c70d42 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -37,9 +37,6 @@
#include <net/l3mdev.h>
#include <net/fib_rules.h>
-#define RT_FL_TOS(oldflp4) \
- ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
-
#define DRV_NAME "vrf"
#define DRV_VERSION "1.0"
@@ -137,6 +134,20 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
}
#if IS_ENABLED(CONFIG_IPV6)
+static int vrf_ip6_local_out(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
+ sk, skb, NULL, skb_dst(skb)->dev, dst_output);
+
+ if (likely(err == 1))
+ err = dst_output(net, sk, skb);
+
+ return err;
+}
+
static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
struct net_device *dev)
{
@@ -151,7 +162,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
.flowlabel = ip6_flowinfo(iph),
.flowi6_mark = skb->mark,
.flowi6_proto = iph->nexthdr,
- .flowi6_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF,
+ .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
};
int ret = NET_XMIT_DROP;
struct dst_entry *dst;
@@ -207,7 +218,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
/* strip the ethernet header added for pass through VRF device */
__skb_pull(skb, skb_network_offset(skb));
- ret = ip6_local_out(net, skb->sk, skb);
+ ret = vrf_ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
dev->stats.tx_errors++;
else
@@ -227,6 +238,20 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
}
#endif
+/* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
+static int vrf_ip_local_out(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
+ skb, NULL, skb_dst(skb)->dev, dst_output);
+ if (likely(err == 1))
+ err = dst_output(net, sk, skb);
+
+ return err;
+}
+
static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
struct net_device *vrf_dev)
{
@@ -237,8 +262,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
.flowi4_oif = vrf_dev->ifindex,
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_tos = RT_TOS(ip4h->tos),
- .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_L3MDEV_SRC |
- FLOWI_FLAG_SKIP_NH_OIF,
+ .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
.daddr = ip4h->daddr,
};
struct net *net = dev_net(vrf_dev);
@@ -292,7 +316,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
RT_SCOPE_LINK);
}
- ret = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+ ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
vrf_dev->stats.tx_errors++;
else
@@ -377,6 +401,43 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
+/* set dst on skb to send packet to us via dev_xmit path. Allows
+ * packet to go through device based features such as qdisc, netfilter
+ * hooks and packet sockets with skb->dev set to vrf device.
+ */
+static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct net_vrf *vrf = netdev_priv(vrf_dev);
+ struct dst_entry *dst = NULL;
+ struct rt6_info *rt6;
+
+ /* don't divert link scope packets */
+ if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
+ return skb;
+
+ rcu_read_lock();
+
+ rt6 = rcu_dereference(vrf->rt6);
+ if (likely(rt6)) {
+ dst = &rt6->dst;
+ dst_hold(dst);
+ }
+
+ rcu_read_unlock();
+
+ if (unlikely(!dst)) {
+ vrf_tx_error(vrf_dev, skb);
+ return NULL;
+ }
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+
+ return skb;
+}
+
/* holding rtnl */
static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
{
@@ -463,6 +524,13 @@ out:
return rc;
}
#else
+static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb)
+{
+ return skb;
+}
+
static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
{
}
@@ -531,6 +599,55 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
+/* set dst on skb to send packet to us via dev_xmit path. Allows
+ * packet to go through device based features such as qdisc, netfilter
+ * hooks and packet sockets with skb->dev set to vrf device.
+ */
+static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct net_vrf *vrf = netdev_priv(vrf_dev);
+ struct dst_entry *dst = NULL;
+ struct rtable *rth;
+
+ rcu_read_lock();
+
+ rth = rcu_dereference(vrf->rth);
+ if (likely(rth)) {
+ dst = &rth->dst;
+ dst_hold(dst);
+ }
+
+ rcu_read_unlock();
+
+ if (unlikely(!dst)) {
+ vrf_tx_error(vrf_dev, skb);
+ return NULL;
+ }
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+
+ return skb;
+}
+
+/* called with rcu lock held */
+static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb,
+ u16 proto)
+{
+ switch (proto) {
+ case AF_INET:
+ return vrf_ip_out(vrf_dev, sk, skb);
+ case AF_INET6:
+ return vrf_ip6_out(vrf_dev, sk, skb);
+ }
+
+ return skb;
+}
+
/* holding rtnl */
static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
{
@@ -722,63 +839,6 @@ static u32 vrf_fib_table(const struct net_device *dev)
return vrf->tb_id;
}
-static struct rtable *vrf_get_rtable(const struct net_device *dev,
- const struct flowi4 *fl4)
-{
- struct rtable *rth = NULL;
-
- if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) {
- struct net_vrf *vrf = netdev_priv(dev);
-
- rcu_read_lock();
-
- rth = rcu_dereference(vrf->rth);
- if (likely(rth))
- dst_hold(&rth->dst);
-
- rcu_read_unlock();
- }
-
- return rth;
-}
-
-/* called under rcu_read_lock */
-static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
-{
- struct fib_result res = { .tclassid = 0 };
- struct net *net = dev_net(dev);
- u32 orig_tos = fl4->flowi4_tos;
- u8 flags = fl4->flowi4_flags;
- u8 scope = fl4->flowi4_scope;
- u8 tos = RT_FL_TOS(fl4);
- int rc;
-
- if (unlikely(!fl4->daddr))
- return 0;
-
- fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
- fl4->flowi4_iif = LOOPBACK_IFINDEX;
- /* make sure oif is set to VRF device for lookup */
- fl4->flowi4_oif = dev->ifindex;
- fl4->flowi4_tos = tos & IPTOS_RT_MASK;
- fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
- RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
-
- rc = fib_lookup(net, fl4, &res, 0);
- if (!rc) {
- if (res.type == RTN_LOCAL)
- fl4->saddr = res.fi->fib_prefsrc ? : fl4->daddr;
- else
- fib_select_path(net, &res, fl4, -1);
- }
-
- fl4->flowi4_flags = flags;
- fl4->flowi4_tos = orig_tos;
- fl4->flowi4_scope = scope;
-
- return rc;
-}
-
static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return 0;
@@ -970,106 +1030,44 @@ static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
}
#if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
- struct flowi6 *fl6)
+/* send to link-local or multicast address via interface enslaved to
+ * VRF device. Force lookup to VRF table without changing flow struct
+ */
+static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
+ struct flowi6 *fl6)
{
- bool need_strict = rt6_need_strict(&fl6->daddr);
- struct net_vrf *vrf = netdev_priv(dev);
struct net *net = dev_net(dev);
+ int flags = RT6_LOOKUP_F_IFACE;
struct dst_entry *dst = NULL;
struct rt6_info *rt;
- /* send to link-local or multicast address */
- if (need_strict) {
- int flags = RT6_LOOKUP_F_IFACE;
-
- /* VRF device does not have a link-local address and
- * sending packets to link-local or mcast addresses over
- * a VRF device does not make sense
- */
- if (fl6->flowi6_oif == dev->ifindex) {
- struct dst_entry *dst = &net->ipv6.ip6_null_entry->dst;
-
- dst_hold(dst);
- return dst;
- }
-
- if (!ipv6_addr_any(&fl6->saddr))
- flags |= RT6_LOOKUP_F_HAS_SADDR;
-
- rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
- if (rt)
- dst = &rt->dst;
-
- } else if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
-
- rcu_read_lock();
-
- rt = rcu_dereference(vrf->rt6);
- if (likely(rt)) {
- dst = &rt->dst;
- dst_hold(dst);
- }
-
- rcu_read_unlock();
+ /* VRF device does not have a link-local address and
+ * sending packets to link-local or mcast addresses over
+ * a VRF device does not make sense
+ */
+ if (fl6->flowi6_oif == dev->ifindex) {
+ dst = &net->ipv6.ip6_null_entry->dst;
+ dst_hold(dst);
+ return dst;
}
- /* make sure oif is set to VRF device for lookup */
- if (!need_strict)
- fl6->flowi6_oif = dev->ifindex;
-
- return dst;
-}
-
-/* called under rcu_read_lock */
-static int vrf_get_saddr6(struct net_device *dev, const struct sock *sk,
- struct flowi6 *fl6)
-{
- struct net *net = dev_net(dev);
- struct dst_entry *dst;
- struct rt6_info *rt;
- int err;
-
- if (rt6_need_strict(&fl6->daddr)) {
- rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif,
- RT6_LOOKUP_F_IFACE);
- if (unlikely(!rt))
- return 0;
+ if (!ipv6_addr_any(&fl6->saddr))
+ flags |= RT6_LOOKUP_F_HAS_SADDR;
+ rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
+ if (rt)
dst = &rt->dst;
- } else {
- __u8 flags = fl6->flowi6_flags;
- fl6->flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
- fl6->flowi6_flags |= FLOWI_FLAG_SKIP_NH_OIF;
-
- dst = ip6_route_output(net, sk, fl6);
- rt = (struct rt6_info *)dst;
-
- fl6->flowi6_flags = flags;
- }
-
- err = dst->error;
- if (!err) {
- err = ip6_route_get_saddr(net, rt, &fl6->daddr,
- sk ? inet6_sk(sk)->srcprefs : 0,
- &fl6->saddr);
- }
-
- dst_release(dst);
-
- return err;
+ return dst;
}
#endif
static const struct l3mdev_ops vrf_l3mdev_ops = {
.l3mdev_fib_table = vrf_fib_table,
- .l3mdev_get_rtable = vrf_get_rtable,
- .l3mdev_get_saddr = vrf_get_saddr,
.l3mdev_l3_rcv = vrf_l3_rcv,
+ .l3mdev_l3_out = vrf_l3_out,
#if IS_ENABLED(CONFIG_IPV6)
- .l3mdev_get_rt6_dst = vrf_get_rt6_dst,
- .l3mdev_get_saddr6 = vrf_get_saddr6,
+ .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
#endif
};
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 6e65832051d6..e7d16687538b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -27,7 +27,6 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/vxlan.h>
-#include <net/protocol.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_tunnel.h>
@@ -288,7 +287,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
nla_put_s32(skb, NDA_LINK_NETNSID,
- peernet2id_alloc(dev_net(vxlan->dev), vxlan->net)))
+ peernet2id(dev_net(vxlan->dev), vxlan->net)))
goto nla_put_failure;
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
@@ -861,20 +860,20 @@ out:
/* Dump forwarding table */
static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
- struct net_device *filter_dev, int idx)
+ struct net_device *filter_dev, int *idx)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
unsigned int h;
+ int err = 0;
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct vxlan_fdb *f;
- int err;
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
struct vxlan_rdst *rd;
list_for_each_entry_rcu(rd, &f->remotes, list) {
- if (idx < cb->args[0])
+ if (*idx < cb->args[2])
goto skip;
err = vxlan_fdb_info(skb, vxlan, f,
@@ -882,17 +881,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, rd);
- if (err < 0) {
- cb->args[1] = err;
+ if (err < 0)
goto out;
- }
skip:
- ++idx;
+ *idx += 1;
}
}
}
out:
- return idx;
+ return err;
}
/* Watch incoming packets to learn mapping between Ethernet address
@@ -1294,7 +1291,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
struct metadata_dst *tun_dst;
tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
- vxlan_vni_to_tun_id(vni), sizeof(*md));
+ key32_to_tunnel_id(vni), sizeof(*md));
if (!tun_dst)
goto drop;
@@ -1948,7 +1945,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
- vni = vxlan_tun_id_to_vni(info->key.tun_id);
+ vni = tunnel_id_to_key32(info->key.tun_id);
remote_ip.sa.sa_family = ip_tunnel_info_af(info);
if (remote_ip.sa.sa_family == AF_INET) {
remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
@@ -2106,6 +2103,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
vni, md, flags, udp_sum);
if (err < 0) {
dst_release(ndst);
+ dev->stats.tx_errors++;
return;
}
udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 6f044450b702..5fbf83d5aa57 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -162,7 +162,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
ALIGNMENT_OF_UCC_HDLC_PRAM);
if (priv->ucc_pram_offset < 0) {
- dev_err(priv->dev, "Can not allocate MURAM for hdlc prameter.\n");
+ dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
ret = -ENOMEM;
goto free_tx_bd;
}
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index d98c7e57137d..3a421ca8a4d0 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -582,8 +582,8 @@ handle_channel( struct net_device *dev )
/*
- * Routine returns 1 if it need to acknoweledge received frame.
- * Empty frame received without errors won't be acknoweledged.
+ * Routine returns 1 if it needs to acknowledge received frame.
+ * Empty frame received without errors won't be acknowledged.
*/
static int
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index fc1355d98bc6..5d429f816125 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -206,7 +206,6 @@ int i2400mu_notification_setup(struct i2400mu *i2400mu)
i2400mu->notif_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!i2400mu->notif_urb) {
ret = -ENOMEM;
- dev_err(dev, "notification: cannot allocate URB\n");
goto error_alloc_urb;
}
epd = usb_get_epd(i2400mu->usb_iface,
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 8aded24bcdf4..7a60d2e652da 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -706,10 +706,8 @@ static int ar5523_alloc_rx_bufs(struct ar5523 *ar)
data->ar = ar;
data->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!data->urb) {
- ar5523_err(ar, "could not allocate rx data urb\n");
+ if (!data->urb)
goto err;
- }
list_add_tail(&data->list, &ar->rx_data_free);
atomic_inc(&ar->rx_data_free_cnt);
}
@@ -824,7 +822,6 @@ static void ar5523_tx_work_locked(struct ar5523 *ar)
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- ar5523_err(ar, "Failed to allocate TX urb\n");
ieee80211_free_txskb(ar->hw, skb);
continue;
}
@@ -949,10 +946,8 @@ static int ar5523_alloc_tx_cmd(struct ar5523 *ar)
init_completion(&cmd->done);
cmd->urb_tx = usb_alloc_urb(0, GFP_KERNEL);
- if (!cmd->urb_tx) {
- ar5523_err(ar, "could not allocate urb\n");
+ if (!cmd->urb_tx)
return -ENOMEM;
- }
cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ,
GFP_KERNEL,
&cmd->urb_tx->transfer_dma);
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index acec16b9cf49..766c63bf05c4 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -91,59 +91,37 @@ static int ath10k_ahb_clock_init(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct device *dev;
- int ret;
dev = &ar_ahb->pdev->dev;
- ar_ahb->cmd_clk = clk_get(dev, "wifi_wcss_cmd");
+ ar_ahb->cmd_clk = devm_clk_get(dev, "wifi_wcss_cmd");
if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) {
ath10k_err(ar, "failed to get cmd clk: %ld\n",
PTR_ERR(ar_ahb->cmd_clk));
- ret = ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV;
- goto out;
+ return ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV;
}
- ar_ahb->ref_clk = clk_get(dev, "wifi_wcss_ref");
+ ar_ahb->ref_clk = devm_clk_get(dev, "wifi_wcss_ref");
if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) {
ath10k_err(ar, "failed to get ref clk: %ld\n",
PTR_ERR(ar_ahb->ref_clk));
- ret = ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV;
- goto err_cmd_clk_put;
+ return ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV;
}
- ar_ahb->rtc_clk = clk_get(dev, "wifi_wcss_rtc");
+ ar_ahb->rtc_clk = devm_clk_get(dev, "wifi_wcss_rtc");
if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
ath10k_err(ar, "failed to get rtc clk: %ld\n",
PTR_ERR(ar_ahb->rtc_clk));
- ret = ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV;
- goto err_ref_clk_put;
+ return ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV;
}
return 0;
-
-err_ref_clk_put:
- clk_put(ar_ahb->ref_clk);
-
-err_cmd_clk_put:
- clk_put(ar_ahb->cmd_clk);
-
-out:
- return ret;
}
static void ath10k_ahb_clock_deinit(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
- if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk))
- clk_put(ar_ahb->cmd_clk);
-
- if (!IS_ERR_OR_NULL(ar_ahb->ref_clk))
- clk_put(ar_ahb->ref_clk);
-
- if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk))
- clk_put(ar_ahb->rtc_clk);
-
ar_ahb->cmd_clk = NULL;
ar_ahb->ref_clk = NULL;
ar_ahb->rtc_clk = NULL;
@@ -213,92 +191,51 @@ static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct device *dev;
- int ret;
dev = &ar_ahb->pdev->dev;
- ar_ahb->core_cold_rst = reset_control_get(dev, "wifi_core_cold");
- if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst)) {
+ ar_ahb->core_cold_rst = devm_reset_control_get(dev, "wifi_core_cold");
+ if (IS_ERR(ar_ahb->core_cold_rst)) {
ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n",
PTR_ERR(ar_ahb->core_cold_rst));
- ret = ar_ahb->core_cold_rst ?
- PTR_ERR(ar_ahb->core_cold_rst) : -ENODEV;
- goto out;
+ return PTR_ERR(ar_ahb->core_cold_rst);
}
- ar_ahb->radio_cold_rst = reset_control_get(dev, "wifi_radio_cold");
- if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst)) {
+ ar_ahb->radio_cold_rst = devm_reset_control_get(dev, "wifi_radio_cold");
+ if (IS_ERR(ar_ahb->radio_cold_rst)) {
ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_cold_rst));
- ret = ar_ahb->radio_cold_rst ?
- PTR_ERR(ar_ahb->radio_cold_rst) : -ENODEV;
- goto err_core_cold_rst_put;
+ return PTR_ERR(ar_ahb->radio_cold_rst);
}
- ar_ahb->radio_warm_rst = reset_control_get(dev, "wifi_radio_warm");
- if (IS_ERR_OR_NULL(ar_ahb->radio_warm_rst)) {
+ ar_ahb->radio_warm_rst = devm_reset_control_get(dev, "wifi_radio_warm");
+ if (IS_ERR(ar_ahb->radio_warm_rst)) {
ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_warm_rst));
- ret = ar_ahb->radio_warm_rst ?
- PTR_ERR(ar_ahb->radio_warm_rst) : -ENODEV;
- goto err_radio_cold_rst_put;
+ return PTR_ERR(ar_ahb->radio_warm_rst);
}
- ar_ahb->radio_srif_rst = reset_control_get(dev, "wifi_radio_srif");
- if (IS_ERR_OR_NULL(ar_ahb->radio_srif_rst)) {
+ ar_ahb->radio_srif_rst = devm_reset_control_get(dev, "wifi_radio_srif");
+ if (IS_ERR(ar_ahb->radio_srif_rst)) {
ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_srif_rst));
- ret = ar_ahb->radio_srif_rst ?
- PTR_ERR(ar_ahb->radio_srif_rst) : -ENODEV;
- goto err_radio_warm_rst_put;
+ return PTR_ERR(ar_ahb->radio_srif_rst);
}
- ar_ahb->cpu_init_rst = reset_control_get(dev, "wifi_cpu_init");
- if (IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ar_ahb->cpu_init_rst = devm_reset_control_get(dev, "wifi_cpu_init");
+ if (IS_ERR(ar_ahb->cpu_init_rst)) {
ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n",
PTR_ERR(ar_ahb->cpu_init_rst));
- ret = ar_ahb->cpu_init_rst ?
- PTR_ERR(ar_ahb->cpu_init_rst) : -ENODEV;
- goto err_radio_srif_rst_put;
+ return PTR_ERR(ar_ahb->cpu_init_rst);
}
return 0;
-
-err_radio_srif_rst_put:
- reset_control_put(ar_ahb->radio_srif_rst);
-
-err_radio_warm_rst_put:
- reset_control_put(ar_ahb->radio_warm_rst);
-
-err_radio_cold_rst_put:
- reset_control_put(ar_ahb->radio_cold_rst);
-
-err_core_cold_rst_put:
- reset_control_put(ar_ahb->core_cold_rst);
-
-out:
- return ret;
}
static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
- if (!IS_ERR_OR_NULL(ar_ahb->core_cold_rst))
- reset_control_put(ar_ahb->core_cold_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->radio_cold_rst))
- reset_control_put(ar_ahb->radio_cold_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->radio_warm_rst))
- reset_control_put(ar_ahb->radio_warm_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->radio_srif_rst))
- reset_control_put(ar_ahb->radio_srif_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->cpu_init_rst))
- reset_control_put(ar_ahb->cpu_init_rst);
-
ar_ahb->core_cold_rst = NULL;
ar_ahb->radio_cold_rst = NULL;
ar_ahb->radio_warm_rst = NULL;
@@ -462,13 +399,13 @@ static void ath10k_ahb_halt_chip(struct ath10k *ar)
static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
{
struct ath10k *ar = arg;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
ath10k_pci_disable_and_clear_legacy_irq(ar);
- tasklet_schedule(&ar_pci->intr_tq);
+ ath10k_pci_irq_msi_fw_mask(ar);
+ napi_schedule(&ar->napi);
return IRQ_HANDLED;
}
@@ -572,12 +509,13 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ar_ahb->irq = platform_get_irq_byname(pdev, "legacy");
if (ar_ahb->irq < 0) {
ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq);
+ ret = ar_ahb->irq;
goto err_clock_deinit;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
ar_ahb->mem, ar_ahb->mem_len,
ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
return 0;
@@ -717,6 +655,9 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar)
synchronize_irq(ar_ahb->irq);
ath10k_pci_flush(ar);
+
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
}
static int ath10k_ahb_hif_power_up(struct ath10k *ar)
@@ -748,6 +689,7 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce_deinit;
}
+ napi_enable(&ar->napi);
return 0;
@@ -831,7 +773,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
goto err_resource_deinit;
}
- ath10k_pci_init_irq_tasklets(ar);
+ ath10k_pci_init_napi(ar);
ret = ath10k_ahb_request_irq_legacy(ar);
if (ret)
@@ -846,6 +788,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
+ ret = -ENODEV;
goto err_halt_device;
}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 3d29b0875b3e..2872d347ea78 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -221,7 +221,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
- ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
@@ -287,7 +287,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI,
- "bmi fast download address 0x%x buffer 0x%p length %d\n",
+ "bmi fast download address 0x%x buffer 0x%pK length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 9fb8d7472d18..0b4d79659884 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -39,7 +39,7 @@
* chooses what to send (buffer address, length). The destination
* side keeps a supply of "anonymous receive buffers" available and
* it handles incoming data as it arrives (when the destination
- * recieves an interrupt).
+ * receives an interrupt).
*
* The sender may send a simple buffer (address/length) or it may
* send a small list of buffers. When a small list is sent, hardware
@@ -433,6 +433,13 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int write_index = dest_ring->write_index;
u32 ctrl_addr = pipe->ctrl_addr;
+ u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+
+ /* Prevent CE ring stuck issue that will occur when ring is full.
+ * Make sure that write index is 1 less than read index.
+ */
+ if ((cur_write_idx + nentries) == dest_ring->sw_index)
+ nentries -= 1;
write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
@@ -840,7 +847,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot init ce src ring id %d entries %d base_addr %p\n",
+ "boot init ce src ring id %d entries %d base_addr %pK\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
@@ -874,7 +881,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot ce dest ring id %d entries %d base_addr %p\n",
+ "boot ce dest ring id %d entries %d base_addr %pK\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index e88982921aa3..21ae8d663e67 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -60,7 +60,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 2116,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
@@ -68,6 +67,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -79,7 +80,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 2116,
.fw = {
.dir = QCA9887_HW_1_0_FW_DIR,
@@ -87,6 +87,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9887_BOARD_DATA_SZ,
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -104,6 +106,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -114,7 +118,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
@@ -122,6 +125,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -132,7 +137,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
@@ -140,6 +144,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -150,7 +156,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 8124,
.fw = {
/* uses same binaries as hw3.0 */
@@ -159,6 +164,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -171,7 +178,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 0xf,
.rx_chain_mask = 0xf,
.max_spatial_stream = 4,
@@ -182,6 +188,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -194,7 +203,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 0xf,
.rx_chain_mask = 0xf,
.max_spatial_stream = 4,
@@ -205,6 +213,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -216,7 +227,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.continuous_frag_desc = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 3,
.rx_chain_mask = 3,
.max_spatial_stream = 2,
@@ -227,6 +237,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -244,6 +257,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -261,6 +276,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -274,7 +291,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 125000,
.max_probe_resp_desc_thres = 24,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 0x3,
.rx_chain_mask = 0x3,
.max_spatial_stream = 2,
@@ -285,6 +301,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA4019_BOARD_DATA_SZ,
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
},
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
};
@@ -304,6 +323,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
[ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
[ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
+ [ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -699,7 +719,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
if (!ar->running_fw->fw_file.otp_data ||
!ar->running_fw->fw_file.otp_len) {
- ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
ar->running_fw->fw_file.otp_data,
ar->running_fw->fw_file.otp_len);
return 0;
@@ -745,7 +765,7 @@ static int ath10k_download_fw(struct ath10k *ar)
data = ar->running_fw->fw_file.firmware_data;
data_len = ar->running_fw->fw_file.firmware_len;
- ret = ath10k_swap_code_seg_configure(ar);
+ ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file);
if (ret) {
ath10k_err(ar, "failed to configure fw code swap: %d\n",
ret);
@@ -753,7 +773,7 @@ static int ath10k_download_fw(struct ath10k *ar)
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot uploading firmware image %p len %d\n",
+ "boot uploading firmware image %pK len %d\n",
data, data_len);
ret = ath10k_bmi_fast_download(ar, address, data, data_len);
@@ -787,7 +807,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (!IS_ERR(ar->pre_cal_file))
release_firmware(ar->pre_cal_file);
- ath10k_swap_code_seg_release(ar);
+ ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file);
ar->normal_mode_fw.fw_file.otp_data = NULL;
ar->normal_mode_fw.fw_file.otp_len = 0;
@@ -1497,14 +1517,14 @@ static void ath10k_core_restart(struct work_struct *work)
ieee80211_stop_queues(ar->hw);
ath10k_drain_tx(ar);
- complete_all(&ar->scan.started);
- complete_all(&ar->scan.completed);
- complete_all(&ar->scan.on_channel);
- complete_all(&ar->offchan_tx_completed);
- complete_all(&ar->install_key_done);
- complete_all(&ar->vdev_setup_done);
- complete_all(&ar->thermal.wmi_sync);
- complete_all(&ar->bss_survey_done);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
+ complete(&ar->offchan_tx_completed);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->thermal.wmi_sync);
+ complete(&ar->bss_survey_done);
wake_up(&ar->htt.empty_tx_wq);
wake_up(&ar->wmi.tx_credits_wq);
wake_up(&ar->peer_mapping_wq);
@@ -1705,6 +1725,55 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
return 0;
}
+static int ath10k_core_reset_rx_filter(struct ath10k *ar)
+{
+ int ret;
+ int vdev_id;
+ int vdev_type;
+ int vdev_subtype;
+ const u8 *vdev_addr;
+
+ vdev_id = 0;
+ vdev_type = WMI_VDEV_TYPE_STA;
+ vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
+ vdev_addr = ar->mac_addr;
+
+ ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype,
+ vdev_addr);
+ if (ret) {
+ ath10k_err(ar, "failed to create dummy vdev: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_delete(ar, vdev_id);
+ if (ret) {
+ ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret);
+ return ret;
+ }
+
+ /* WMI and HTT may use separate HIF pipes and are not guaranteed to be
+ * serialized properly implicitly.
+ *
+ * Moreover (most) WMI commands have no explicit acknowledges. It is
+ * possible to infer it implicitly by poking firmware with echo
+ * command - getting a reply means all preceding comments have been
+ * (mostly) processed.
+ *
+ * In case of vdev create/delete this is sufficient.
+ *
+ * Without this it's possible to end up with a race when HTT Rx ring is
+ * started before vdev create/delete hack is complete allowing a short
+ * window of opportunity to receive (and Tx ACK) a bunch of frames.
+ */
+ ret = ath10k_wmi_barrier(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to ping firmware: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
const struct ath10k_fw_components *fw)
{
@@ -1872,6 +1941,25 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
+ /* Some firmware revisions do not properly set up hardware rx filter
+ * registers.
+ *
+ * A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK
+ * is filled with 0s instead of 1s allowing HW to respond with ACKs to
+ * any frames that matches MAC_PCU_RX_FILTER which is also
+ * misconfigured to accept anything.
+ *
+ * The ADDR1 is programmed using internal firmware structure field and
+ * can't be (easily/sanely) reached from the driver explicitly. It is
+ * possible to implicitly make it correct by creating a dummy vdev and
+ * then deleting it.
+ */
+ status = ath10k_core_reset_rx_filter(ar);
+ if (status) {
+ ath10k_err(ar, "failed to reset rx filter: %d\n", status);
+ goto err_hif_stop;
+ }
+
/* If firmware indicates Full Rx Reorder support it must be used in a
* slightly different manner. Let HTT code know.
*/
@@ -1884,7 +1972,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
- ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
+ if (ar->max_num_vdevs >= 64)
+ ar->free_vdev_map = 0xFFFFFFFFFFFFFFFFLL;
+ else
+ ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
INIT_LIST_HEAD(&ar->arvifs);
@@ -2031,7 +2122,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
- ret = ath10k_swap_code_seg_init(ar);
+ ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
if (ret) {
ath10k_err(ar, "failed to initialize code swap segment: %d\n",
ret);
@@ -2072,6 +2163,9 @@ static void ath10k_core_register_work(struct work_struct *work)
struct ath10k *ar = container_of(work, struct ath10k, register_work);
int status;
+ /* peer stats are enabled by default */
+ set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err(ar, "could not probe fw (%d)\n", status);
@@ -2249,6 +2343,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_WORK(&ar->register_work, ath10k_core_register_work);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
+ init_dummy_netdev(&ar->napi_dev);
+
ret = ath10k_debug_create(ar);
if (ret)
goto err_free_aux_wq;
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 30ae5bf81611..dda49af1eb74 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -65,6 +65,10 @@
#define ATH10K_KEEPALIVE_MAX_IDLE 3895
#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+/* NAPI poll budget */
+#define ATH10K_NAPI_BUDGET 64
+#define ATH10K_NAPI_QUOTA_LIMIT 60
+
struct ath10k;
enum ath10k_bus {
@@ -142,6 +146,7 @@ struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
+ struct completion barrier;
wait_queue_head_t tx_credits_wq;
DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
struct wmi_cmd_map *cmd;
@@ -196,10 +201,10 @@ struct ath10k_fw_stats_pdev {
/* PDEV stats */
s32 ch_noise_floor;
- u32 tx_frame_count;
- u32 rx_frame_count;
- u32 rx_clear_count;
- u32 cycle_count;
+ u32 tx_frame_count; /* Cycles spent transmitting frames */
+ u32 rx_frame_count; /* Cycles spent receiving frames */
+ u32 rx_clear_count; /* Total channel busy time, evidently */
+ u32 cycle_count; /* Total on-channel time */
u32 phy_err_count;
u32 chan_tx_power;
u32 ack_rx_bad;
@@ -440,7 +445,7 @@ struct ath10k_debug {
struct completion tpc_complete;
/* protected by conf_mutex */
- u32 fw_dbglog_mask;
+ u64 fw_dbglog_mask;
u32 fw_dbglog_level;
u32 pktlog_filter;
u32 reg_addr;
@@ -551,6 +556,13 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
+ /* Older firmware with HTT delivers incorrect tx status for null func
+ * frames to driver, but this fixed in 10.2 and 10.4 firmware versions.
+ * Also this workaround results in reporting of incorrect null func
+ * status for 10.4. This flag is used to skip the workaround.
+ */
+ ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -663,6 +675,15 @@ struct ath10k_fw_file {
const void *codeswap_data;
size_t codeswap_len;
+
+ /* The original idea of struct ath10k_fw_file was that it only
+ * contains struct firmware and pointers to various parts (actual
+ * firmware binary, otp, metadata etc) of the file. This seg_info
+ * is actually created separate but as this is used similarly as
+ * the other firmware components it's more convenient to have it
+ * here.
+ */
+ struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
};
struct ath10k_fw_components {
@@ -715,53 +736,7 @@ struct ath10k {
struct ath10k_htc htc;
struct ath10k_htt htt;
- struct ath10k_hw_params {
- u32 id;
- u16 dev_id;
- const char *name;
- u32 patch_load_addr;
- int uart_pin;
- u32 otp_exe_param;
-
- /* Type of hw cycle counter wraparound logic, for more info
- * refer enum ath10k_hw_cc_wraparound_type.
- */
- enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
-
- /* Some of chip expects fragment descriptor to be continuous
- * memory for any TX operation. Set continuous_frag_desc flag
- * for the hardware which have such requirement.
- */
- bool continuous_frag_desc;
-
- /* CCK hardware rate table mapping for the newer chipsets
- * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
- * are in a proper order with respect to the rate/preamble
- */
- bool cck_rate_map_rev2;
-
- u32 channel_counters_freq_hz;
-
- /* Mgmt tx descriptors threshold for limiting probe response
- * frames.
- */
- u32 max_probe_resp_desc_thres;
-
- /* The padding bytes's location is different on various chips */
- enum ath10k_hw_4addr_pad hw_4addr_pad;
-
- u32 tx_chain_mask;
- u32 rx_chain_mask;
- u32 max_spatial_stream;
- u32 cal_data_len;
-
- struct ath10k_hw_params_fw {
- const char *dir;
- const char *board;
- size_t board_size;
- size_t board_ext_size;
- } fw;
- } hw_params;
+ struct ath10k_hw_params hw_params;
/* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
struct ath10k_fw_components normal_mode_fw;
@@ -775,10 +750,6 @@ struct ath10k {
const struct firmware *cal_file;
struct {
- struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
- } swap;
-
- struct {
u32 vendor;
u32 device;
u32 subsystem_vendor;
@@ -936,6 +907,10 @@ struct ath10k {
struct ath10k_thermal thermal;
struct ath10k_wow wow;
+ /* NAPI */
+ struct net_device napi_dev;
+ struct napi_struct napi;
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 8f0fd41dfd4b..832da6ed9f13 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1228,9 +1228,9 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
{
struct ath10k *ar = file->private_data;
unsigned int len;
- char buf[64];
+ char buf[96];
- len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
+ len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n",
ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -1242,15 +1242,16 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
{
struct ath10k *ar = file->private_data;
int ret;
- char buf[64];
- unsigned int log_level, mask;
+ char buf[96];
+ unsigned int log_level;
+ u64 mask;
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = 0;
- ret = sscanf(buf, "%x %u", &mask, &log_level);
+ ret = sscanf(buf, "%llx %u", &mask, &log_level);
if (!ret)
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 5b3c6bcf9598..175aae38c375 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -44,7 +44,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
return skb;
}
@@ -62,7 +62,7 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
{
struct ath10k *ar = ep->htc->ar;
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
ep->eid, skb);
ath10k_htc_restore_tx_skb(ep->htc, skb);
@@ -404,7 +404,7 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
goto out;
}
- ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 430a83e142aa..0d2ed09f202b 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -595,7 +595,7 @@ enum htt_rx_mpdu_status {
/* only accept EAPOL frames */
HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
- /* Non-data in promiscous mode */
+ /* Non-data in promiscuous mode */
HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
@@ -900,7 +900,7 @@ struct htt_rx_in_ord_ind {
* Purpose: indicate how many 32-bit integers follow the message header
* - NUM_CHARS
* Bits 31:16
- * Purpose: indicate how many 8-bit charaters follow the series of integers
+ * Purpose: indicate how many 8-bit characters follow the series of integers
*/
struct htt_rx_test {
u8 num_ints;
@@ -1042,10 +1042,10 @@ struct htt_dbg_stats_wal_tx_stats {
/* illegal rate phy errors */
__le32 illgl_rate_phy_err;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_tx_timeout;
/* wal pdev resets */
@@ -1665,7 +1665,6 @@ struct ath10k_htt {
/* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls */
- struct tasklet_struct txrx_compl_task;
struct sk_buff_head rx_compl_q;
struct sk_buff_head rx_in_ord_compl_q;
struct sk_buff_head tx_fetch_ind_q;
@@ -1798,5 +1797,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt,
struct sk_buff *msdu);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb);
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 24c8d65bcf34..0b4c1562420f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -34,7 +34,6 @@
#define HTT_RX_RING_REFILL_RESCHED_MS 5
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
-static void ath10k_htt_txrx_compl_task(unsigned long ptr);
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
@@ -226,7 +225,6 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
del_timer_sync(&htt->rx_ring.refill_retry_timer);
- tasklet_kill(&htt->txrx_compl_task);
skb_queue_purge(&htt->rx_compl_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
@@ -520,9 +518,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
skb_queue_head_init(&htt->tx_fetch_ind_q);
atomic_set(&htt->num_mpdus_ready, 0);
- tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
- (unsigned long)htt);
-
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@@ -931,7 +926,7 @@ static void ath10k_process_rx(struct ath10k *ar,
*status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
@@ -958,7 +953,7 @@ static void ath10k_process_rx(struct ath10k *ar,
trace_ath10k_rx_hdr(ar, skb->data, skb->len);
trace_ath10k_rx_payload(ar, skb->data, skb->len);
- ieee80211_rx(ar->hw, skb);
+ ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
}
static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
@@ -1056,9 +1051,11 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
const u8 first_hdr[64])
{
struct ieee80211_hdr *hdr;
+ struct htt_rx_desc *rxd;
size_t hdr_len;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
+ int l3_pad_bytes;
/* Delivered decapped frame:
* [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1072,19 +1069,12 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
*/
/* pull decapped header and copy SA & DA */
- if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
- ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
- /* The QCA99X0 4 address mode pad 2 bytes at the
- * beginning of MSDU
- */
- hdr = (struct ieee80211_hdr *)(msdu->data + 2);
- /* The skb length need be extended 2 as the 2 bytes at the tail
- * be excluded due to the padding
- */
- skb_put(msdu, 2);
- } else {
- hdr = (struct ieee80211_hdr *)(msdu->data);
- }
+ rxd = (void *)msdu->data - sizeof(*rxd);
+
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ skb_put(msdu, l3_pad_bytes);
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
ether_addr_copy(da, ieee80211_get_DA(hdr));
@@ -1113,6 +1103,7 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
size_t hdr_len, crypto_len;
void *rfc1042;
bool is_first, is_last, is_amsdu;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
rxd = (void *)msdu->data - sizeof(*rxd);
hdr = (void *)rxd->rx_hdr_status;
@@ -1129,8 +1120,8 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
- rfc1042 += round_up(hdr_len, 4) +
- round_up(crypto_len, 4);
+ rfc1042 += round_up(hdr_len, bytes_aligned) +
+ round_up(crypto_len, bytes_aligned);
}
if (is_amsdu)
@@ -1151,6 +1142,8 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
void *rfc1042;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
+ int l3_pad_bytes;
+ struct htt_rx_desc *rxd;
/* Delivered decapped frame:
* [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1161,6 +1154,11 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
if (WARN_ON_ONCE(!rfc1042))
return;
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ skb_put(msdu, l3_pad_bytes);
+ skb_pull(msdu, l3_pad_bytes);
+
/* pull decapped header and copy SA & DA */
eth = (struct ethhdr *)msdu->data;
ether_addr_copy(da, eth->h_dest);
@@ -1191,6 +1189,8 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
{
struct ieee80211_hdr *hdr;
size_t hdr_len;
+ int l3_pad_bytes;
+ struct htt_rx_desc *rxd;
/* Delivered decapped frame:
* [amsdu header] <-- replaced with 802.11 hdr
@@ -1198,7 +1198,11 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
* [payload]
*/
- skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+
+ skb_put(msdu, l3_pad_bytes);
+ skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
@@ -1527,7 +1531,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct sk_buff_head amsdu;
- int ret;
+ int ret, num_msdus;
__skb_queue_head_init(&amsdu);
@@ -1549,13 +1553,14 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return ret;
}
+ num_msdus = skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
- return 0;
+ return num_msdus;
}
static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
@@ -1579,15 +1584,6 @@ static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
mpdu_count += mpdu_ranges[i].mpdu_count;
atomic_add(mpdu_count, &htt->num_mpdus_ready);
-
- tasklet_schedule(&htt->txrx_compl_task);
-}
-
-static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
-{
- atomic_inc(&htt->num_mpdus_ready);
-
- tasklet_schedule(&htt->txrx_compl_task);
}
static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
@@ -1772,14 +1768,15 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
RX_FLAG_MMIC_STRIPPED;
}
-static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
- struct sk_buff_head *list)
+static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+ struct sk_buff_head *list)
{
struct ath10k_htt *htt = &ar->htt;
struct ieee80211_rx_status *status = &htt->rx_status;
struct htt_rx_offload_msdu *rx;
struct sk_buff *msdu;
size_t offset;
+ int num_msdu = 0;
while ((msdu = __skb_dequeue(list))) {
/* Offloaded frames don't have Rx descriptor. Instead they have
@@ -1819,10 +1816,12 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
ath10k_process_rx(ar, status, msdu);
+ num_msdu++;
}
+ return num_msdu;
}
-static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (void *)skb->data;
@@ -1835,12 +1834,12 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
u8 tid;
bool offload;
bool frag;
- int ret;
+ int ret, num_msdus = 0;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused)
- return;
+ return -EIO;
skb_pull(skb, sizeof(resp->hdr));
skb_pull(skb, sizeof(resp->rx_in_ord_ind));
@@ -1859,7 +1858,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
ath10k_warn(ar, "dropping invalid in order rx indication\n");
- return;
+ return -EINVAL;
}
/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
@@ -1870,14 +1869,14 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
if (ret < 0) {
ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
htt->rx_confused = true;
- return;
+ return -EIO;
}
/* Offloaded frames are very different and need to be handled
* separately.
*/
if (offload)
- ath10k_htt_rx_h_rx_offload(ar, &list);
+ num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
@@ -1890,6 +1889,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
+ num_msdus += skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
@@ -1902,9 +1902,10 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
htt->rx_confused = true;
__skb_queue_purge(&list);
- return;
+ return -EIO;
}
}
+ return num_msdus;
}
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
@@ -2267,7 +2268,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
- tasklet_schedule(&htt->txrx_compl_task);
break;
case HTT_T2H_MSG_TYPE_SEC_IND: {
struct ath10k *ar = htt->ar;
@@ -2284,7 +2284,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
- ath10k_htt_rx_frag_handler(htt);
+ atomic_inc(&htt->num_mpdus_ready);
break;
}
case HTT_T2H_MSG_TYPE_TEST:
@@ -2320,8 +2320,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
- skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
- tasklet_schedule(&htt->txrx_compl_task);
+ __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
@@ -2347,7 +2346,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
- tasklet_schedule(&htt->txrx_compl_task);
break;
}
case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
@@ -2376,27 +2374,77 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
}
EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
-static void ath10k_htt_txrx_compl_task(unsigned long ptr)
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{
- struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
- struct ath10k *ar = htt->ar;
+ struct ath10k_htt *htt = &ar->htt;
struct htt_tx_done tx_done = {};
- struct sk_buff_head rx_ind_q;
struct sk_buff_head tx_ind_q;
struct sk_buff *skb;
unsigned long flags;
- int num_mpdus;
+ int quota = 0, done, num_rx_msdus;
+ bool resched_napi = false;
- __skb_queue_head_init(&rx_ind_q);
__skb_queue_head_init(&tx_ind_q);
- spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
- skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
- spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
+ /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
+ * process it first to utilize full available quota.
+ */
+ while (quota < budget) {
+ if (skb_queue_empty(&htt->rx_in_ord_compl_q))
+ break;
- spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
- skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
- spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
+ skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
+ if (!skb) {
+ resched_napi = true;
+ goto exit;
+ }
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
+ spin_unlock_bh(&htt->rx_ring.lock);
+ if (num_rx_msdus < 0) {
+ resched_napi = true;
+ goto exit;
+ }
+
+ dev_kfree_skb_any(skb);
+ if (num_rx_msdus > 0)
+ quota += num_rx_msdus;
+
+ if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
+ !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
+ resched_napi = true;
+ goto exit;
+ }
+ }
+
+ while (quota < budget) {
+ /* no more data to receive */
+ if (!atomic_read(&htt->num_mpdus_ready))
+ break;
+
+ num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
+ if (num_rx_msdus < 0) {
+ resched_napi = true;
+ goto exit;
+ }
+
+ quota += num_rx_msdus;
+ atomic_dec(&htt->num_mpdus_ready);
+ if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
+ atomic_read(&htt->num_mpdus_ready)) {
+ resched_napi = true;
+ goto exit;
+ }
+ }
+
+ /* From NAPI documentation:
+ * The napi poll() function may also process TX completions, in which
+ * case if it processes the entire TX ring then it should count that
+ * work as the rest of the budget.
+ */
+ if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
+ quota = budget;
/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
* From kfifo_get() documentation:
@@ -2406,27 +2454,24 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
while (kfifo_get(&htt->txdone_fifo, &tx_done))
ath10k_txrx_tx_unref(htt, &tx_done);
- while ((skb = __skb_dequeue(&tx_ind_q))) {
- ath10k_htt_rx_tx_fetch_ind(ar, skb);
- dev_kfree_skb_any(skb);
- }
-
- num_mpdus = atomic_read(&htt->num_mpdus_ready);
-
- while (num_mpdus) {
- if (ath10k_htt_rx_handle_amsdu(htt))
- break;
+ ath10k_mac_tx_push_pending(ar);
- num_mpdus--;
- atomic_dec(&htt->num_mpdus_ready);
- }
+ spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
+ skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
+ spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
- while ((skb = __skb_dequeue(&rx_ind_q))) {
- spin_lock_bh(&htt->rx_ring.lock);
- ath10k_htt_rx_in_ord_ind(ar, skb);
- spin_unlock_bh(&htt->rx_ring.lock);
+ while ((skb = __skb_dequeue(&tx_ind_q))) {
+ ath10k_htt_rx_tx_fetch_ind(ar, skb);
dev_kfree_skb_any(skb);
}
+exit:
ath10k_htt_rx_msdu_buff_replenish(htt);
+ /* In case of rx failure or more data to read, report budget
+ * to reschedule NAPI poll
+ */
+ done = resched_napi ? budget : quota;
+
+ return done;
}
+EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 7c072b605bc7..ae5b33fe5ba8 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -390,8 +390,6 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
int size;
- tasklet_kill(&htt->txrx_compl_task);
-
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
idr_destroy(&htt->pending_tx);
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index f903d468dbe6..675e75d66db2 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -85,7 +85,7 @@ const struct ath10k_hw_regs qca99x0_regs = {
.ce7_base_address = 0x0004bc00,
/* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
* CE0 and CE1 no other copy engine is directly referred in the code.
- * It is not really neccessary to assign address for newly supported
+ * It is not really necessary to assign address for newly supported
* CEs in this address table.
* Copy Engine Address
* CE8 0x0004c000
@@ -219,3 +219,16 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
survey->time = CCNT_TO_MSEC(ar, cc);
survey->time_busy = CCNT_TO_MSEC(ar, rcc);
}
+
+const struct ath10k_hw_ops qca988x_ops = {
+};
+
+static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
+{
+ return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
+ RX_MSDU_END_INFO1_L3_HDR_PAD);
+}
+
+const struct ath10k_hw_ops qca99x0_ops = {
+ .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index e014cd732a0d..6038b7486f1d 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -284,7 +284,7 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
-/* Known pecularities:
+/* Known peculiarities:
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
@@ -338,11 +338,6 @@ enum ath10k_hw_rate_rev2_cck {
ATH10K_HW_RATE_REV2_CCK_SP_11M,
};
-enum ath10k_hw_4addr_pad {
- ATH10K_HW_4ADDR_PAD_AFTER,
- ATH10K_HW_4ADDR_PAD_BEFORE,
-};
-
enum ath10k_hw_cc_wraparound_type {
ATH10K_HW_CC_WRAP_DISABLED = 0,
@@ -363,6 +358,80 @@ enum ath10k_hw_cc_wraparound_type {
ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
};
+struct ath10k_hw_params {
+ u32 id;
+ u16 dev_id;
+ const char *name;
+ u32 patch_load_addr;
+ int uart_pin;
+ u32 otp_exe_param;
+
+ /* Type of hw cycle counter wraparound logic, for more info
+ * refer enum ath10k_hw_cc_wraparound_type.
+ */
+ enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
+
+ /* Some of chip expects fragment descriptor to be continuous
+ * memory for any TX operation. Set continuous_frag_desc flag
+ * for the hardware which have such requirement.
+ */
+ bool continuous_frag_desc;
+
+ /* CCK hardware rate table mapping for the newer chipsets
+ * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
+ * are in a proper order with respect to the rate/preamble
+ */
+ bool cck_rate_map_rev2;
+
+ u32 channel_counters_freq_hz;
+
+ /* Mgmt tx descriptors threshold for limiting probe response
+ * frames.
+ */
+ u32 max_probe_resp_desc_thres;
+
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 max_spatial_stream;
+ u32 cal_data_len;
+
+ struct ath10k_hw_params_fw {
+ const char *dir;
+ const char *board;
+ size_t board_size;
+ size_t board_ext_size;
+ } fw;
+
+ /* qca99x0 family chips deliver broadcast/multicast management
+ * frames encrypted and expect software do decryption.
+ */
+ bool sw_decrypt_mcast_mgmt;
+
+ const struct ath10k_hw_ops *hw_ops;
+
+ /* Number of bytes used for alignment in rx_hdr_status of rx desc. */
+ int decap_align_bytes;
+};
+
+struct htt_rx_desc;
+
+/* Defines needed for Rx descriptor abstraction */
+struct ath10k_hw_ops {
+ int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+};
+
+extern const struct ath10k_hw_ops qca988x_ops;
+extern const struct ath10k_hw_ops qca99x0_ops;
+
+static inline int
+ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
+ struct htt_rx_desc *rxd)
+{
+ if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
+ return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
+ return 0;
+}
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 0bbd0a00edcc..76297d69f1ed 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -824,7 +824,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
*/
for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
if (ar->peer_map[i] == peer) {
- ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n",
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
peer->addr, peer, i);
ar->peer_map[i] = NULL;
}
@@ -2793,7 +2793,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret)
- ath10k_warn(ar, "faield to down vdev %i: %d\n",
+ ath10k_warn(ar, "failed to down vdev %i: %d\n",
arvif->vdev_id, ret);
arvif->def_wep_key_idx = -1;
@@ -3255,6 +3255,8 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
if (ar->htt.target_version_major < 3 &&
(ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->running_fw->fw_file.fw_features) &&
+ !test_bit(ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR,
ar->running_fw->fw_file.fw_features))
return ATH10K_HW_TXRX_MGMT;
@@ -3524,7 +3526,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
if (!ath10k_mac_tx_frm_has_freq(ar)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
skb);
skb_queue_tail(&ar->offchan_tx_queue, skb);
@@ -3586,7 +3588,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
skb);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -3643,7 +3645,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
if (time_left == 0)
- ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
+ ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
skb);
if (!peer && tmp_peer_created) {
@@ -3777,7 +3779,9 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
enum ath10k_hw_txrx_mode txmode;
enum ath10k_mac_tx_path txpath;
struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
size_t skb_len;
+ bool is_mgmt, is_presp;
int ret;
spin_lock_bh(&ar->htt.tx_lock);
@@ -3801,6 +3805,22 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
skb_len = skb->len;
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
+
+ if (is_mgmt) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+
+ if (ret) {
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ return ret;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
if (unlikely(ret)) {
@@ -3808,6 +3828,8 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
spin_lock_bh(&ar->htt.tx_lock);
ath10k_htt_tx_dec_pending(htt);
+ if (is_mgmt)
+ ath10k_htt_tx_mgmt_dec_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock);
return ret;
@@ -3894,7 +3916,7 @@ void __ath10k_scan_finish(struct ath10k *ar)
ar->scan.roc_freq = 0;
ath10k_offchan_tx_purge(ar);
cancel_delayed_work(&ar->scan.timeout);
- complete_all(&ar->scan.completed);
+ complete(&ar->scan.completed);
break;
}
}
@@ -4100,13 +4122,29 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ieee80211_txq *f_txq;
+ struct ath10k_txq *f_artxq;
+ int ret = 0;
+ int max = 16;
spin_lock_bh(&ar->txqs_lock);
if (list_empty(&artxq->list))
list_add_tail(&artxq->list, &ar->txqs);
+
+ f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+ f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
+ list_del_init(&f_artxq->list);
+
+ while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
+ ret = ath10k_mac_tx_push_txq(hw, f_txq);
+ if (ret)
+ break;
+ }
+ if (ret != -ENOENT)
+ list_add_tail(&f_artxq->list, &ar->txqs);
spin_unlock_bh(&ar->txqs_lock);
- ath10k_mac_tx_push_pending(ar);
+ ath10k_htt_tx_txq_update(hw, f_txq);
ath10k_htt_tx_txq_update(hw, txq);
}
@@ -5186,7 +5224,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
ret = ath10k_monitor_recalc(ar);
if (ret)
- ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
mutex_unlock(&ar->conf_mutex);
}
@@ -5984,8 +6022,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
* Existing station deletion.
*/
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac vdev %d peer delete %pM (sta gone)\n",
- arvif->vdev_id, sta->addr);
+ "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
+ arvif->vdev_id, sta->addr, sta);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
@@ -6001,7 +6039,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
continue;
if (peer->sta == sta) {
- ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n",
+ ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
sta->addr, peer, i, arvif->vdev_id);
peer->sta = NULL;
@@ -6538,7 +6576,7 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
goto exit;
}
- ath10k_mac_update_bss_chan_survey(ar, survey->channel);
+ ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
spin_lock_bh(&ar->data_lock);
memcpy(survey, ar_survey, sizeof(*survey));
@@ -7134,7 +7172,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx add freq %hu width %d ptr %p\n",
+ "mac chanctx add freq %hu width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -7158,7 +7196,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx remove freq %hu width %d ptr %p\n",
+ "mac chanctx remove freq %hu width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -7223,7 +7261,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx change freq %hu width %d ptr %p changed %x\n",
+ "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@@ -7281,7 +7319,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx assign ptr %p vdev_id %i\n",
+ "mac chanctx assign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
if (WARN_ON(arvif->is_started)) {
@@ -7342,7 +7380,7 @@ ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx unassign ptr %p vdev_id %i\n",
+ "mac chanctx unassign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 07933c51a850..0457e315d336 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1506,12 +1506,10 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
ath10k_ce_per_engine_service(ar, pipe);
}
-void ath10k_pci_kill_tasklet(struct ath10k *ar)
+static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- tasklet_kill(&ar_pci->intr_tq);
-
del_timer_sync(&ar_pci->rx_post_retry);
}
@@ -1570,7 +1568,7 @@ void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
ul_pipe, dl_pipe);
}
-static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
{
u32 val;
@@ -1693,14 +1691,12 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{
struct ath10k *ar;
- struct ath10k_pci *ar_pci;
struct ath10k_ce_pipe *ce_pipe;
struct ath10k_ce_ring *ce_ring;
struct sk_buff *skb;
int i;
ar = pci_pipe->hif_ce_state;
- ar_pci = ath10k_pci_priv(ar);
ce_pipe = pci_pipe->ce_hdl;
ce_ring = ce_pipe->src_ring;
@@ -1753,7 +1749,7 @@ void ath10k_pci_ce_deinit(struct ath10k *ar)
void ath10k_pci_flush(struct ath10k *ar)
{
- ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_rx_retry_sync(ar);
ath10k_pci_buffer_cleanup(ar);
}
@@ -1780,6 +1776,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
WARN_ON(ar_pci->ps_wake_refcount > 0);
@@ -2533,6 +2531,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce;
}
+ napi_enable(&ar->napi);
return 0;
@@ -2725,7 +2724,7 @@ static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
return 0;
err_free:
- kfree(data);
+ kfree(caldata);
return -EINVAL;
}
@@ -2772,35 +2771,53 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
return IRQ_NONE;
}
- if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
- if (!ath10k_pci_irq_pending(ar))
- return IRQ_NONE;
-
- ath10k_pci_disable_and_clear_legacy_irq(ar);
- }
+ if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
+ !ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
- tasklet_schedule(&ar_pci->intr_tq);
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+ napi_schedule(&ar->napi);
return IRQ_HANDLED;
}
-static void ath10k_pci_tasklet(unsigned long data)
+static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
{
- struct ath10k *ar = (struct ath10k *)data;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done = 0;
if (ath10k_pci_has_fw_crashed(ar)) {
- ath10k_pci_irq_disable(ar);
ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar);
- return;
+ napi_complete(ctx);
+ return done;
}
ath10k_ce_per_engine_service_any(ar);
- /* Re-enable legacy irq that was disabled in the irq handler */
- if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
+ done = ath10k_htt_txrx_compl_task(ar, budget);
+
+ if (done < budget) {
+ napi_complete(ctx);
+ /* In case of MSI, it is possible that interrupts are received
+ * while NAPI poll is inprogress. So pending interrupts that are
+ * received after processing all copy engine pipes by NAPI poll
+ * will not be handled again. This is causing failure to
+ * complete boot sequence in x86 platform. So before enabling
+ * interrupts safer to check for pending interrupts for
+ * immediate servicing.
+ */
+ if (CE_INTERRUPT_SUMMARY(ar)) {
+ napi_reschedule(ctx);
+ goto out;
+ }
ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_unmask(ar);
+ }
+
+out:
+ return done;
}
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
@@ -2858,11 +2875,10 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
free_irq(ar_pci->pdev->irq, ar);
}
-void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
+void ath10k_pci_init_napi(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
+ ATH10K_NAPI_BUDGET);
}
static int ath10k_pci_init_irq(struct ath10k *ar)
@@ -2870,7 +2886,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
- ath10k_pci_init_irq_tasklets(ar);
+ ath10k_pci_init_napi(ar);
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
ath10k_info(ar, "limiting irq mode to: %d\n",
@@ -3062,7 +3078,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
goto err_master;
}
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
return 0;
err_master:
@@ -3131,7 +3147,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
void ath10k_pci_release_resource(struct ath10k *ar)
{
- ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_rx_retry_sync(ar);
+ netif_napi_del(&ar->napi);
ath10k_pci_ce_deinit(ar);
ath10k_pci_free_pipes(ar);
}
@@ -3297,7 +3314,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
err_free_irq:
ath10k_pci_free_irq(ar);
- ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_rx_retry_sync(ar);
err_deinit_irq:
ath10k_pci_deinit_irq(ar);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 6eca1df2ce60..9854ad56b2de 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -177,8 +177,6 @@ struct ath10k_pci {
/* Operating interrupt mode */
enum ath10k_pci_irq_mode oper_irq_mode;
- struct tasklet_struct intr_tq;
-
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
/* Copy Engine used for Diagnostic Accesses */
@@ -294,8 +292,7 @@ void ath10k_pci_free_pipes(struct ath10k *ar);
void ath10k_pci_free_pipes(struct ath10k *ar);
void ath10k_pci_rx_replenish_retry(unsigned long ptr);
void ath10k_pci_ce_deinit(struct ath10k *ar);
-void ath10k_pci_init_irq_tasklets(struct ath10k *ar);
-void ath10k_pci_kill_tasklet(struct ath10k *ar);
+void ath10k_pci_init_napi(struct ath10k *ar);
int ath10k_pci_init_pipes(struct ath10k *ar);
int ath10k_pci_init_config(struct ath10k *ar);
void ath10k_pci_rx_post(struct ath10k *ar);
@@ -303,6 +300,7 @@ void ath10k_pci_flush(struct ath10k *ar);
void ath10k_pci_enable_legacy_irq(struct ath10k *ar);
bool ath10k_pci_irq_pending(struct ath10k *ar);
void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar);
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar);
int ath10k_pci_wait_for_target_init(struct ath10k *ar);
int ath10k_pci_setup_resource(struct ath10k *ar);
void ath10k_pci_release_resource(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
index 0c5f5863dac8..adf4592374b4 100644
--- a/drivers/net/wireless/ath/ath10k/swap.c
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -134,17 +134,18 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
return seg_info;
}
-int ath10k_swap_code_seg_configure(struct ath10k *ar)
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ const struct ath10k_fw_file *fw_file)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info = NULL;
- if (!ar->swap.firmware_swap_code_seg_info)
+ if (!fw_file->firmware_swap_code_seg_info)
return 0;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
- seg_info = ar->swap.firmware_swap_code_seg_info;
+ seg_info = fw_file->firmware_swap_code_seg_info;
ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
&seg_info->seg_hw_info,
@@ -158,28 +159,29 @@ int ath10k_swap_code_seg_configure(struct ath10k *ar)
return 0;
}
-void ath10k_swap_code_seg_release(struct ath10k *ar)
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file)
{
- ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
+ ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info);
/* FIXME: these two assignments look to bein wrong place! Shouldn't
* they be in ath10k_core_free_firmware_files() like the rest?
*/
- ar->normal_mode_fw.fw_file.codeswap_data = NULL;
- ar->normal_mode_fw.fw_file.codeswap_len = 0;
+ fw_file->codeswap_data = NULL;
+ fw_file->codeswap_len = 0;
- ar->swap.firmware_swap_code_seg_info = NULL;
+ fw_file->firmware_swap_code_seg_info = NULL;
}
-int ath10k_swap_code_seg_init(struct ath10k *ar)
+int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info;
const void *codeswap_data;
size_t codeswap_len;
- codeswap_data = ar->normal_mode_fw.fw_file.codeswap_data;
- codeswap_len = ar->normal_mode_fw.fw_file.codeswap_len;
+ codeswap_data = fw_file->codeswap_data;
+ codeswap_len = fw_file->codeswap_len;
if (!codeswap_len || !codeswap_data)
return 0;
@@ -200,7 +202,7 @@ int ath10k_swap_code_seg_init(struct ath10k *ar)
return ret;
}
- ar->swap.firmware_swap_code_seg_info = seg_info;
+ fw_file->firmware_swap_code_seg_info = seg_info;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index 36991c7b07a0..f5dc0476493e 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -23,6 +23,8 @@
/* Currently only one swap segment is supported */
#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1
+struct ath10k_fw_file;
+
struct ath10k_swap_code_seg_tlv {
__le32 address;
__le32 length;
@@ -58,8 +60,11 @@ struct ath10k_swap_code_seg_info {
dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
};
-int ath10k_swap_code_seg_configure(struct ath10k *ar);
-void ath10k_swap_code_seg_release(struct ath10k *ar);
-int ath10k_swap_code_seg_init(struct ath10k *ar);
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ const struct ath10k_fw_file *fw_file);
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file);
+int ath10k_swap_code_seg_init(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index aaf53a81e78b..a47cab44d9c8 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -405,7 +405,7 @@ Fw Mode/SubMode Mask
* 1. target firmware would check magic number and if it's a match, firmware
* would consider the bits[0:15] are valid and base on that to calculate
* the end of DRAM. Early allocation would be located at that area and
- * may be reclaimed when necesary
+ * may be reclaimed when necessary
* 2. if no magic number is found, early allocation would happen at "_end"
* symbol of ROM which is located before the app-data and might NOT be
* re-claimable. If this is adopted, link script should keep this in
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 120f4234d3b0..ed85f938e3c0 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -23,6 +23,7 @@
#include "wmi.h"
#include "hif.h"
#include "hw.h"
+#include "core.h"
#include "testmode_i.h"
@@ -45,7 +46,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode event wmi cmd_id %d skb %p skb->len %d\n",
+ "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
cmd_id, skb, skb->len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
@@ -240,6 +241,18 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
goto err;
}
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len) {
+ ret = ath10k_swap_code_seg_init(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to init utf code swap segment: %d\n",
+ ret);
+ goto err_release_utf_mode_fw;
+ }
+ }
+
spin_lock_bh(&ar->data_lock);
ar->testmode.utf_monitor = true;
spin_unlock_bh(&ar->data_lock);
@@ -279,6 +292,11 @@ err_power_down:
ath10k_hif_power_down(ar);
err_release_utf_mode_fw:
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+ ath10k_swap_code_seg_release(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+
release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
@@ -301,6 +319,11 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
spin_unlock_bh(&ar->data_lock);
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+ ath10k_swap_code_seg_release(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+
release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
@@ -360,7 +383,7 @@ static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
+ "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
cmd_id, buf, buf_len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 444b52c7e4f3..0a47269be289 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -192,7 +192,7 @@ int ath10k_thermal_register(struct ath10k *ar)
/* Avoid linking error on devm_hwmon_device_register_with_groups, I
* guess linux/hwmon.h is missing proper stubs. */
- if (!config_enabled(CONFIG_HWMON))
+ if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index b29a86a26c13..9852c5d51139 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -44,7 +44,7 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
- ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
@@ -119,8 +119,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
- ath10k_mac_tx_push_pending(ar);
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 64ebd304f907..c9a8bb1186f2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -51,6 +51,8 @@ struct wmi_ops {
struct wmi_roam_ev_arg *arg);
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg);
+ int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg);
enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
@@ -123,7 +125,7 @@ struct wmi_ops {
enum wmi_force_fw_hang_type type,
u32 delay_ms);
struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
- struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
+ struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
u32 log_level);
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
@@ -194,6 +196,7 @@ struct wmi_ops {
struct sk_buff *(*gen_pdev_bss_chan_info_req)
(struct ath10k *ar,
enum wmi_bss_survey_req_type type);
+ struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -349,6 +352,16 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
}
+static inline int
+ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_echo_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
+}
+
static inline enum wmi_txbf_conf
ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
{
@@ -932,7 +945,7 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
}
static inline int
-ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
+ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
{
struct sk_buff *skb;
@@ -1382,4 +1395,20 @@ ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
wmi->cmd->pdev_bss_chan_info_request_cmdid);
}
+static inline int
+ath10k_wmi_echo(struct ath10k *ar, u32 value)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct sk_buff *skb;
+
+ if (!wmi->ops->gen_echo)
+ return -EOPNOTSUPP;
+
+ skb = wmi->ops->gen_echo(ar, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index e09337ee7c96..e64f59300a7c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1223,6 +1223,33 @@ ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_echo_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->value = ev->value;
+
+ kfree(tb);
+ return 0;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
{
@@ -2441,7 +2468,7 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
}
static struct sk_buff *
-ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
u32 log_level) {
struct wmi_tlv_dbglog_cmd *cmd;
struct wmi_tlv *tlv;
@@ -3081,6 +3108,34 @@ ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
return skb;
}
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
+{
+ struct wmi_echo_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->value = cpu_to_le32(value);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
+ return skb;
+}
+
/****************/
/* TLV mappings */
/****************/
@@ -3429,6 +3484,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+ .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
@@ -3485,6 +3541,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_tlv_op_gen_echo,
};
static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index d2462886b75c..54df425bb0fc 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -29,6 +29,9 @@
#include "p2p.h"
#include "hw.h"
+#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
+#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
+
/* MAIN WMI cmd track */
static struct wmi_cmd_map wmi_cmd_map = {
.init_cmdid = WMI_INIT_CMDID,
@@ -1874,7 +1877,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
memcpy(cmd->buf, msdu->data, msdu->len);
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
trace_ath10k_tx_hdr(ar, skb->data, skb->len);
@@ -2240,6 +2243,29 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
return 0;
}
+static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
+ struct ieee80211_hdr *hdr)
+{
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return false;
+
+ /* FW delivers WEP Shared Auth frame with Protected Bit set and
+ * encrypted payload. However in case of PMF it delivers decrypted
+ * frames with Protected Bit set.
+ */
+ if (ieee80211_is_auth(hdr->frame_control))
+ return false;
+
+ /* qca99x0 based FW delivers broadcast or multicast management frames
+ * (ex: group privacy action frames in mesh) as encrypted payload.
+ */
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
+ ar->hw_params.sw_decrypt_mcast_mgmt)
+ return false;
+
+ return true;
+}
+
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
@@ -2326,11 +2352,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_handle_wep_reauth(ar, skb, status);
- /* FW delivers WEP Shared Auth frame with Protected Bit set and
- * encrypted payload. However in case of PMF it delivers decrypted
- * frames with Protected Bit set. */
- if (ieee80211_has_protected(hdr->frame_control) &&
- !ieee80211_is_auth(hdr->frame_control)) {
+ if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
status->flag |= RX_FLAG_DECRYPTED;
if (!ieee80211_is_action(hdr->frame_control) &&
@@ -2347,7 +2369,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_mac_handle_beacon(ar, skb);
ath10k_dbg(ar, ATH10K_DBG_MGMT,
- "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+ "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
@@ -2495,7 +2517,21 @@ exit:
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
+ struct wmi_echo_ev_arg arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse echo: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event echo value 0x%08x\n",
+ le32_to_cpu(arg.value));
+
+ if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
+ complete(&ar->wmi.barrier);
}
int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
@@ -3478,6 +3514,12 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
continue;
}
+ /* mac80211 would have already asked us to stop beaconing and
+ * bring the vdev down, so continue in that case
+ */
+ if (!arvif->is_up)
+ continue;
+
/* There are no completions for beacons so wait for next SWBA
* before telling mac80211 to decrement CSA counter
*
@@ -3527,7 +3569,6 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "failed to map beacon: %d\n",
ret);
dev_kfree_skb_any(bcn);
- ret = -EIO;
goto skip;
}
@@ -4792,6 +4833,17 @@ static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ struct wmi_echo_event *ev = (void *)skb->data;
+
+ arg->value = ev->value;
+
+ return 0;
+}
+
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_rdy_ev_arg arg = {};
@@ -5124,6 +5176,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_10_2_event_id id;
+ bool consumed;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -5133,6 +5186,18 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10_2_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
switch (id) {
case WMI_10_2_MGMT_RX_EVENTID:
ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -5248,6 +5313,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_10_4_event_id id;
+ bool consumed;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -5257,6 +5323,18 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10_4_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
switch (id) {
case WMI_10_4_MGMT_RX_EVENTID:
ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -5306,6 +5384,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
+ case WMI_10_4_WDS_PEER_EVENTID:
ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
@@ -6863,7 +6942,7 @@ ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
}
static struct sk_buff *
-ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
u32 log_level)
{
struct wmi_dbglog_cfg_cmd *cmd;
@@ -6901,6 +6980,44 @@ ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
}
static struct sk_buff *
+ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
+ u32 log_level)
+{
+ struct wmi_10_4_dbglog_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ u32 cfg;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
+
+ if (module_enable) {
+ cfg = SM(log_level,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ } else {
+ /* set back defaults, all modules with WARN level */
+ cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ module_enable = ~0;
+ }
+
+ cmd->module_enable = __cpu_to_le64(module_enable);
+ cmd->module_valid = __cpu_to_le64(~0);
+ cmd->config_enable = __cpu_to_le32(cfg);
+ cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
+ __le64_to_cpu(cmd->module_enable),
+ __le64_to_cpu(cmd->module_valid),
+ __le32_to_cpu(cmd->config_enable),
+ __le32_to_cpu(cmd->config_valid));
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
{
struct wmi_pdev_pktlog_enable_cmd *cmd;
@@ -7649,6 +7766,48 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
return skb;
}
+static struct sk_buff *
+ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
+{
+ struct wmi_echo_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_echo_cmd *)skb->data;
+ cmd->value = cpu_to_le32(value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi echo value 0x%08x\n", value);
+ return skb;
+}
+
+int
+ath10k_wmi_barrier(struct ath10k *ar)
+{
+ int ret;
+ int time_left;
+
+ spin_lock_bh(&ar->data_lock);
+ reinit_completion(&ar->wmi.barrier);
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->wmi.barrier,
+ ATH10K_WMI_BARRIER_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static const struct wmi_ops wmi_ops = {
.rx = ath10k_wmi_op_rx,
.map_svc = wmi_main_svc_map,
@@ -7665,6 +7824,7 @@ static const struct wmi_ops wmi_ops = {
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7709,6 +7869,7 @@ static const struct wmi_ops wmi_ops = {
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_op_gen_echo,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -7738,6 +7899,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7777,6 +7939,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_op_gen_echo,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -7796,6 +7959,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_echo = ath10k_wmi_op_gen_echo,
.pull_scan = ath10k_wmi_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
@@ -7807,6 +7971,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7862,6 +8027,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_echo = ath10k_wmi_op_gen_echo,
.pull_scan = ath10k_wmi_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
@@ -7873,6 +8039,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7968,7 +8135,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
- .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
@@ -7980,10 +8147,12 @@ static const struct wmi_ops wmi_10_4_ops = {
.ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
/* shared with 10.2 */
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_request_stats = ath10k_wmi_op_gen_request_stats,
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
+ .gen_echo = ath10k_wmi_op_gen_echo,
};
int ath10k_wmi_attach(struct ath10k *ar)
@@ -8036,6 +8205,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
+ init_completion(&ar->wmi.barrier);
INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 3ef468893b3f..1b243c899bef 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -55,7 +55,7 @@
* type.
*
* 6. Comment each parameter part of the WMI command/event structure by
- * using the 2 stars at the begining of C comment instead of one star to
+ * using the 2 stars at the beginning of C comment instead of one star to
* enable HTML document generation using Doxygen.
*
*/
@@ -180,6 +180,7 @@ enum wmi_service {
WMI_SERVICE_MESH_NON_11S,
WMI_SERVICE_PEER_STATS,
WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
WMI_SERVICE_TX_MODE_PUSH_ONLY,
WMI_SERVICE_TX_MODE_PUSH_PULL,
WMI_SERVICE_TX_MODE_DYNAMIC,
@@ -305,6 +306,7 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
WMI_10_4_SERVICE_PEER_STATS,
WMI_10_4_SERVICE_MESH_11S,
+ WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
@@ -402,6 +404,7 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_MESH_NON_11S);
SVCSTR(WMI_SERVICE_PEER_STATS);
SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+ SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT);
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
@@ -652,6 +655,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_PEER_STATS, len);
SVCMAP(WMI_10_4_SERVICE_MESH_11S,
WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len);
SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
@@ -2082,7 +2087,7 @@ struct wmi_resource_config {
* In offload mode target supports features like WOW, chatter and
* other protocol offloads. In order to support them some
* functionalities like reorder buffering, PN checking need to be
- * done in target. This determines maximum number of peers suported
+ * done in target. This determines maximum number of peers supported
* by target in offload mode
*/
__le32 num_offload_peers;
@@ -2263,7 +2268,7 @@ struct wmi_resource_config {
* Max. number of Tx fragments per MSDU
* This parameter controls the max number of Tx fragments per MSDU.
* This is sent by the target as part of the WMI_SERVICE_READY event
- * and is overriden by the OS shim as required.
+ * and is overridden by the OS shim as required.
*/
__le32 max_frag_entries;
} __packed;
@@ -2445,7 +2450,7 @@ struct wmi_resource_config_10x {
* Max. number of Tx fragments per MSDU
* This parameter controls the max number of Tx fragments per MSDU.
* This is sent by the target as part of the WMI_SERVICE_READY event
- * and is overriden by the OS shim as required.
+ * and is overridden by the OS shim as required.
*/
__le32 max_frag_entries;
} __packed;
@@ -2739,7 +2744,7 @@ struct wmi_init_cmd {
struct wmi_host_mem_chunks mem_chunks;
} __packed;
-/* _10x stucture is from 10.X FW API */
+/* _10x structure is from 10.X FW API */
struct wmi_init_cmd_10x {
struct wmi_resource_config_10x resource_config;
struct wmi_host_mem_chunks mem_chunks;
@@ -3962,7 +3967,7 @@ struct wmi_pdev_stats_tx {
/* illegal rate phy errors */
__le32 illgl_rate_phy_err;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
/* wal pdev continous xretry */
@@ -4217,10 +4222,10 @@ struct wmi_10_2_stats_event {
*/
struct wmi_pdev_stats_base {
__le32 chan_nf;
- __le32 tx_frame_count;
- __le32 rx_frame_count;
- __le32 rx_clear_count;
- __le32 cycle_count;
+ __le32 tx_frame_count; /* Cycles spent transmitting frames */
+ __le32 rx_frame_count; /* Cycles spent receiving frames */
+ __le32 rx_clear_count; /* Total channel busy time, evidently */
+ __le32 cycle_count; /* Total on-channel time */
__le32 phy_err_count;
__le32 chan_tx_pwr;
} __packed;
@@ -4456,9 +4461,9 @@ struct wmi_vdev_start_request_cmd {
__le32 flags;
/* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
struct wmi_ssid ssid;
- /* beacon/probe reponse xmit rate. Applicable for SoftAP. */
+ /* beacon/probe response xmit rate. Applicable for SoftAP. */
__le32 bcn_tx_rate;
- /* beacon/probe reponse xmit power. Applicable for SoftAP. */
+ /* beacon/probe response xmit power. Applicable for SoftAP. */
__le32 bcn_tx_power;
/* number of p2p NOA descriptor(s) from scan entry */
__le32 num_noa_descriptors;
@@ -4686,7 +4691,7 @@ enum wmi_vdev_param {
WMI_VDEV_PARAM_BEACON_INTERVAL,
/* Listen interval in TUs */
WMI_VDEV_PARAM_LISTEN_INTERVAL,
- /* muticast rate in Mbps */
+ /* multicast rate in Mbps */
WMI_VDEV_PARAM_MULTICAST_RATE,
/* management frame rate in Mbps */
WMI_VDEV_PARAM_MGMT_TX_RATE,
@@ -4817,7 +4822,7 @@ enum wmi_10x_vdev_param {
WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
/* Listen interval in TUs */
WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
- /* muticast rate in Mbps */
+ /* multicast rate in Mbps */
WMI_10X_VDEV_PARAM_MULTICAST_RATE,
/* management frame rate in Mbps */
WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
@@ -5062,7 +5067,7 @@ struct wmi_vdev_simple_event {
} __packed;
/* VDEV start response status codes */
-/* VDEV succesfully started */
+/* VDEV successfully started */
#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0
/* requested VDEV not found */
@@ -5378,7 +5383,7 @@ enum wmi_sta_ps_param_pspoll_count {
#define WMI_UAPSD_AC_TYPE_TRIG 1
#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
- ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
+ (type == WMI_UAPSD_AC_TYPE_DELI ? 1 << (ac << 1) : 1 << ((ac << 1) + 1))
enum wmi_sta_ps_param_uapsd {
WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
@@ -6169,6 +6174,20 @@ struct wmi_dbglog_cfg_cmd {
__le32 config_valid;
} __packed;
+struct wmi_10_4_dbglog_cfg_cmd {
+ /* bitmask to hold mod id config*/
+ __le64 module_enable;
+
+ /* see ATH10K_DBGLOG_CFG_ */
+ __le32 config_enable;
+
+ /* mask of module id bits to be changed */
+ __le64 module_valid;
+
+ /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+ __le32 config_valid;
+} __packed;
+
enum wmi_roam_reason {
WMI_ROAM_REASON_BETTER_AP = 1,
WMI_ROAM_REASON_BEACON_MISS = 2,
@@ -6296,6 +6315,10 @@ struct wmi_roam_ev_arg {
__le32 rssi;
};
+struct wmi_echo_ev_arg {
+ __le32 value;
+};
+
struct wmi_pdev_temperature_event {
/* temperature value in Celcius degree */
__le32 temperature;
@@ -6624,5 +6647,6 @@ void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
char *buf);
int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
enum wmi_vdev_subtype subtype);
+int ath10k_wmi_barrier(struct ath10k *ar);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 929d7ccc031c..4f8d9ed04f5e 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -909,7 +909,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
struct ath5k_hw *ah = inode->i_private;
bool res;
int i, ret;
- u32 eesize;
+ u32 eesize; /* NB: in 16-bit words */
u16 val, *buf;
/* Get eeprom size */
@@ -932,7 +932,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
/* Create buffer and read in eeprom */
- buf = vmalloc(eesize);
+ buf = vmalloc(eesize * 2);
if (!buf) {
ret = -ENOMEM;
goto err;
@@ -952,7 +952,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
}
ep->buf = buf;
- ep->len = i;
+ ep->len = eesize * 2;
file->private_data = (void *)ep;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 72e2ec67768d..b7fe0af4cb24 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1449,14 +1449,14 @@ static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
return -EIO;
if (test_bit(CONNECTED, &vif->flags)) {
- ar->tx_pwr = 0;
+ ar->tx_pwr = 255;
if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
return -EIO;
}
- wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
+ wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 255,
5 * HZ);
if (signal_pending(current)) {
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index 18c070850a09..d1942537ea10 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -64,7 +64,7 @@ int ath6kl_hif_rw_comp_handler(void *context, int status)
}
EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler);
-#define REG_DUMP_COUNT_AR6003 60
+#define REGISTER_DUMP_COUNT 60
#define REGISTER_DUMP_LEN_MAX 60
static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
@@ -73,9 +73,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
u32 i, address, regdump_addr = 0;
int ret;
- if (ar->target_type != TARGET_TYPE_AR6003)
- return;
-
/* the reg dump pointer is copied to the host interest area */
address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
address = TARG_VTOP(ar->target_type, address);
@@ -95,7 +92,7 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
/* fetch register dump data */
ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
- REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
+ REGISTER_DUMP_COUNT * (sizeof(u32)));
if (ret) {
ath6kl_warn("failed to get register dump: %d\n", ret);
return;
@@ -105,9 +102,9 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
ar->wiphy->fw_version);
- BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
+ BUILD_BUG_ON(REGISTER_DUMP_COUNT % 4);
- for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) {
+ for (i = 0; i < REGISTER_DUMP_COUNT; i += 4) {
ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
i,
le32_to_cpu(regdump_val[i]),
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index b8cf04d11975..3fd1cc98fd2f 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -3520,7 +3520,7 @@ int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid,
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID,
NO_SYNC_WMIFLAG);
- return 0;
+ return ret;
}
int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index f68cb00450e0..8f231c67dd51 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -180,7 +180,7 @@ config ATH9K_HTC_DEBUGFS
config ATH9K_HWRNG
bool "Random number generator support"
depends on ATH9K && (HW_RANDOM = y || HW_RANDOM = ATH9K)
- default y
+ default n
---help---
This option incorporates the ADC register output as a source of
randomness into Linux entropy pool (/dev/urandom and /dev/random)
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 1b271b99c49e..8eea8d22e72e 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -260,8 +260,8 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
int cur_bin;
int upper, lower, cur_vit_mask;
int i;
- int8_t mask_m[123];
- int8_t mask_p[123];
+ int8_t mask_m[123] = {0};
+ int8_t mask_p[123] = {0};
int8_t mask_amt;
int tmp_mask;
static const int pilot_mask_reg[4] = {
@@ -274,9 +274,6 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
};
static const int inc[4] = { 0, 100, 0, 0 };
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
cur_bin = -6000;
upper = bin + 100;
lower = bin - 100;
@@ -302,7 +299,7 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
upper = bin + 120;
lower = bin - 120;
- for (i = 0; i < 123; i++) {
+ for (i = 0; i < ARRAY_SIZE(mask_m); i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
/* workaround for gcc bug #37014 */
volatile int tmp_v = abs(cur_vit_mask - bin);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 5bd2cbaf582d..08607d7fdb56 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3252,7 +3252,8 @@ static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
int i;
for (i = 0; i < mdata_size / 2; i++, data++)
- ath9k_hw_nvram_read(ah, i, data);
+ if (!ath9k_hw_nvram_read(ah, i, data))
+ return -EIO;
return 0;
}
@@ -3282,7 +3283,8 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
if (ath9k_hw_use_flash(ah)) {
u8 txrx;
- ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
+ if (ar9300_eeprom_restore_flash(ah, mptr, mdata_size))
+ return -EIO;
/* check if eeprom contains valid data */
eep = (struct ar9300_eeprom *) mptr;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 490f74d9ddf0..ddb28861e7fe 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -22,7 +22,7 @@
#ifdef CONFIG_MAC80211_LEDS
-void ath_fill_led_pin(struct ath_softc *sc)
+static void ath_fill_led_pin(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 7cb65c303f8d..e9f32b52fc8c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -924,7 +924,7 @@ static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data,
} else {
if (iter_data->primary_beacon_vif->type != NL80211_IFTYPE_AP &&
vif->type == NL80211_IFTYPE_AP)
- iter_data->primary_beacon_vif = vif;
+ iter_data->primary_beacon_vif = vif;
}
iter_data->beacons = true;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 8ddd604bd00c..52bfbb988611 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -50,9 +50,11 @@ static u16 bits_per_symbol[][2] = {
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
- int tx_flags, struct ath_txq *txq);
+ int tx_flags, struct ath_txq *txq,
+ struct ieee80211_sta *sta);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
+ struct ieee80211_sta *sta,
struct ath_tx_status *ts, int txok);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head, bool internal);
@@ -77,6 +79,22 @@ enum {
/* Aggregation logic */
/*********************/
+static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->status.status_driver_data[0];
+
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ ieee80211_tx_status(hw, skb);
+ return;
+ }
+
+ if (sta)
+ ieee80211_tx_status_noskb(hw, sta, info);
+
+ dev_kfree_skb(skb);
+}
+
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
__acquires(&txq->axq_lock)
{
@@ -92,6 +110,7 @@ void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
__releases(&txq->axq_lock)
{
+ struct ieee80211_hw *hw = sc->hw;
struct sk_buff_head q;
struct sk_buff *skb;
@@ -100,7 +119,7 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
spin_unlock_bh(&txq->axq_lock);
while ((skb = __skb_dequeue(&q)))
- ieee80211_tx_status(sc->hw, skb);
+ ath_tx_status(hw, skb);
}
static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
@@ -253,7 +272,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
}
list_add_tail(&bf->list, &bf_head);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
}
if (sendbar) {
@@ -318,12 +337,12 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!bf) {
- ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
+ ath_tx_complete(sc, skb, ATH_TX_ERROR, txq, NULL);
continue;
}
list_add_tail(&bf->list, &bf_head);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
}
}
@@ -426,15 +445,14 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
+ struct ieee80211_sta *sta,
+ struct ath_atx_tid *tid,
struct ath_tx_status *ts, int txok)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
- struct ieee80211_sta *sta;
- struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
- struct ath_atx_tid *tid = NULL;
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct list_head bf_head;
struct sk_buff_head bf_pending;
@@ -460,12 +478,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
for (i = 0; i < ts->ts_rateindex; i++)
retries += rates[i].count;
- rcu_read_lock();
-
- sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
if (!sta) {
- rcu_read_unlock();
-
INIT_LIST_HEAD(&bf_head);
while (bf) {
bf_next = bf->bf_next;
@@ -473,7 +486,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (!bf->bf_state.stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, ts, 0);
bf = bf_next;
}
@@ -481,7 +494,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
an = (struct ath_node *)sta->drv_priv;
- tid = ath_get_skb_tid(sc, an, skb);
seq_first = tid->seq_start;
isba = ts->ts_flags & ATH9K_TX_BA;
@@ -583,7 +595,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ts);
}
- ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
!txfail);
} else {
if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
@@ -604,7 +616,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_update_baw(sc, tid, seqno);
ath_tx_complete_buf(sc, bf, txq,
- &bf_head, ts, 0);
+ &bf_head, NULL, ts,
+ 0);
bar_index = max_t(int, bar_index,
ATH_BA_INDEX(seq_first, seqno));
break;
@@ -648,8 +661,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_txq_lock(sc, txq);
}
- rcu_read_unlock();
-
if (needreset)
ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
}
@@ -664,7 +675,11 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status *ts, struct ath_buf *bf,
struct list_head *bf_head)
{
+ struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *info;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ struct ath_atx_tid *tid = NULL;
bool txok, flush;
txok = !(ts->ts_status & ATH9K_TXERR_MASK);
@@ -677,6 +692,16 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc,
ts->ts_rateindex);
+
+ hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
+ sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
+ if (sta) {
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+ tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
+ if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
+ tid->clear_ps_filter = true;
+ }
+
if (!bf_isampdu(bf)) {
if (!flush) {
info = IEEE80211_SKB_CB(bf->bf_mpdu);
@@ -685,9 +710,9 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
}
- ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
+ ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
} else
- ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
+ ath_tx_complete_aggr(sc, txq, bf, bf_head, sta, tid, ts, txok);
if (!flush)
ath_txq_schedule(sc, txq);
@@ -923,7 +948,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
list_add(&bf->list, &bf_head);
__skb_unlink(skb, *q);
ath_tx_update_baw(sc, tid, seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
continue;
}
@@ -1832,6 +1857,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
*/
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
{
+ rcu_read_lock();
ath_txq_lock(sc, txq);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@@ -1850,6 +1876,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
ath_drain_txq_list(sc, txq, &txq->axq_q);
ath_txq_unlock_complete(sc, txq);
+ rcu_read_unlock();
}
bool ath_drain_all_txq(struct ath_softc *sc)
@@ -2472,7 +2499,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/*****************/
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
- int tx_flags, struct ath_txq *txq)
+ int tx_flags, struct ath_txq *txq,
+ struct ieee80211_sta *sta)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2492,15 +2520,17 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- padpos = ieee80211_hdrlen(hdr->frame_control);
- padsize = padpos & 3;
- if (padsize && skb->len>padpos+padsize) {
- /*
- * Remove MAC header padding before giving the frame back to
- * mac80211.
- */
- memmove(skb->data + padsize, skb->data, padpos);
- skb_pull(skb, padsize);
+ if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ padpos = ieee80211_hdrlen(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len>padpos+padsize) {
+ /*
+ * Remove MAC header padding before giving the frame back to
+ * mac80211.
+ */
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
}
spin_lock_irqsave(&sc->sc_pm_lock, flags);
@@ -2515,12 +2545,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
- __skb_queue_tail(&txq->complete_q, skb);
ath_txq_skb_done(sc, txq, skb);
+ tx_info->status.status_driver_data[0] = sta;
+ __skb_queue_tail(&txq->complete_q, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
+ struct ieee80211_sta *sta,
struct ath_tx_status *ts, int txok)
{
struct sk_buff *skb = bf->bf_mpdu;
@@ -2548,7 +2580,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
complete(&sc->paprd_complete);
} else {
ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
- ath_tx_complete(sc, skb, tx_flags, txq);
+ ath_tx_complete(sc, skb, tx_flags, txq, sta);
}
skip_tx_complete:
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
@@ -2700,10 +2732,12 @@ void ath_tx_tasklet(struct ath_softc *sc)
u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
int i;
+ rcu_read_lock();
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
ath_tx_processq(sc, &sc->tx.txq[i]);
}
+ rcu_read_unlock();
}
void ath_tx_edma_tasklet(struct ath_softc *sc)
@@ -2717,6 +2751,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
struct list_head *fifo_list;
int status;
+ rcu_read_lock();
for (;;) {
if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
break;
@@ -2787,6 +2822,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
ath_txq_unlock_complete(sc, txq);
}
+ rcu_read_unlock();
}
/*****************/
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index 6808db433283..ec3a64e5d2bb 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -75,7 +75,8 @@ static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf,
if (!ar)
return -ENODEV;
- dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops);
+ dfops = container_of(debugfs_real_fops(file),
+ struct carl9170_debugfs_fops, fops);
if (!dfops->read)
return -ENOSYS;
@@ -127,7 +128,8 @@ static ssize_t carl9170_debugfs_write(struct file *file,
if (!ar)
return -ENODEV;
- dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops);
+ dfops = container_of(debugfs_real_fops(file),
+ struct carl9170_debugfs_fops, fops);
if (!dfops->write)
return -ENOSYS;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 76842e6ca38e..99ab20334d21 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -670,6 +670,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
ar->readlen = outlen;
spin_unlock_bh(&ar->cmd_lock);
+ reinit_completion(&ar->cmd_wait);
err = __carl9170_exec_cmd(ar, &ar->cmd, false);
if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
@@ -778,10 +779,7 @@ void carl9170_usb_stop(struct ar9170 *ar)
spin_lock_bh(&ar->cmd_lock);
ar->readlen = 0;
spin_unlock_bh(&ar->cmd_lock);
- complete_all(&ar->cmd_wait);
-
- /* This is required to prevent an early completion on _start */
- reinit_completion(&ar->cmd_wait);
+ complete(&ar->cmd_wait);
/*
* Note:
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 2f8136d50f78..4100ffd42a43 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -338,7 +338,7 @@ static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
return true;
}
-static struct dfs_pattern_detector default_dpd = {
+static const struct dfs_pattern_detector default_dpd = {
.exit = dpd_exit,
.set_dfs_domain = dpd_set_domain,
.add_pulse = dpd_add_pulse,
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index f0e1175fb76a..d117240d9a73 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -354,10 +354,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
__func__, wdev, wdev->iftype);
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
wil_err(wil, "Already scanning\n");
+ mutex_unlock(&wil->p2p_wdev_mutex);
return -EAGAIN;
}
+ mutex_unlock(&wil->p2p_wdev_mutex);
/* check we are client side */
switch (wdev->iftype) {
@@ -760,14 +763,11 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
return rc;
}
-static struct wil_tid_crypto_rx_single *
-wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
- enum wmi_key_usage key_usage, const u8 *mac_addr)
+static struct wil_sta_info *
+wil_find_sta_by_key_usage(struct wil6210_priv *wil,
+ enum wmi_key_usage key_usage, const u8 *mac_addr)
{
int cid = -EINVAL;
- int tid = 0;
- struct wil_sta_info *s;
- struct wil_tid_crypto_rx *c;
if (key_usage == WMI_KEY_USE_TX_GROUP)
return NULL; /* not needed */
@@ -778,18 +778,72 @@ wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
else if (key_usage == WMI_KEY_USE_RX_GROUP)
cid = wil_find_cid_by_idx(wil, 0);
if (cid < 0) {
- wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
- key_usage_str[key_usage], key_index);
+ wil_err(wil, "No CID for %pM %s\n", mac_addr,
+ key_usage_str[key_usage]);
return ERR_PTR(cid);
}
- s = &wil->sta[cid];
- if (key_usage == WMI_KEY_USE_PAIRWISE)
- c = &s->tid_crypto_rx[tid];
- else
- c = &s->group_crypto_rx;
+ return &wil->sta[cid];
+}
+
+static void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
+ struct wil_sta_info *cs,
+ struct key_params *params)
+{
+ struct wil_tid_crypto_rx_single *cc;
+ int tid;
+
+ if (!cs)
+ return;
- return &c->key_id[key_index];
+ switch (key_usage) {
+ case WMI_KEY_USE_PAIRWISE:
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ cc = &cs->tid_crypto_rx[tid].key_id[key_index];
+ if (params->seq)
+ memcpy(cc->pn, params->seq,
+ IEEE80211_GCMP_PN_LEN);
+ else
+ memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+ cc->key_set = true;
+ }
+ break;
+ case WMI_KEY_USE_RX_GROUP:
+ cc = &cs->group_crypto_rx.key_id[key_index];
+ if (params->seq)
+ memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
+ else
+ memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+ cc->key_set = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage,
+ struct wil_sta_info *cs)
+{
+ struct wil_tid_crypto_rx_single *cc;
+ int tid;
+
+ if (!cs)
+ return;
+
+ switch (key_usage) {
+ case WMI_KEY_USE_PAIRWISE:
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ cc = &cs->tid_crypto_rx[tid].key_id[key_index];
+ cc->key_set = false;
+ }
+ break;
+ case WMI_KEY_USE_RX_GROUP:
+ cc = &cs->group_crypto_rx.key_id[key_index];
+ cc->key_set = false;
+ break;
+ default:
+ break;
+ }
}
static int wil_cfg80211_add_key(struct wiphy *wiphy,
@@ -801,24 +855,26 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
- struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
- key_index,
- key_usage,
- mac_addr);
+ struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
+ mac_addr);
+
+ if (!params) {
+ wil_err(wil, "NULL params\n");
+ return -EINVAL;
+ }
wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
- if (IS_ERR(cc)) {
+ if (IS_ERR(cs)) {
wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
__func__, mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
}
- if (cc)
- cc->key_set = false;
+ wil_del_rx_key(key_index, key_usage, cs);
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
@@ -831,13 +887,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
params->key, key_usage);
- if ((rc == 0) && cc) {
- if (params->seq)
- memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
- else
- memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
- cc->key_set = true;
- }
+ if (!rc)
+ wil_set_crypto_rx(key_index, key_usage, cs, params);
return rc;
}
@@ -849,20 +900,18 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
- struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
- key_index,
- key_usage,
- mac_addr);
+ struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
+ mac_addr);
wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
key_usage_str[key_usage], key_index);
- if (IS_ERR(cc))
+ if (IS_ERR(cs))
wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
mac_addr, key_usage_str[key_usage], key_index);
- if (!IS_ERR_OR_NULL(cc))
- cc->key_set = false;
+ if (!IS_ERR_OR_NULL(cs))
+ wil_del_rx_key(key_index, key_usage, cs);
return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
}
@@ -1363,23 +1412,16 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- u8 started;
+ struct wil_p2p_info *p2p = &wil->p2p;
+
+ if (!p2p->p2p_dev_started)
+ return;
wil_dbg_misc(wil, "%s: entered\n", __func__);
mutex_lock(&wil->mutex);
- started = wil_p2p_stop_discovery(wil);
- if (started && wil->scan_request) {
- struct cfg80211_scan_info info = {
- .aborted = true,
- };
-
- cfg80211_scan_done(wil->scan_request, &info);
- wil->scan_request = NULL;
- wil->radio_wdev = wil->wdev;
- }
+ wil_p2p_stop_radio_operations(wil);
+ p2p->p2p_dev_started = 0;
mutex_unlock(&wil->mutex);
-
- wil->p2p.p2p_dev_started = 0;
}
static struct cfg80211_ops wil_cfg80211_ops = {
@@ -1464,14 +1506,8 @@ struct wireless_dev *wil_cfg80211_init(struct device *dev)
set_wiphy_dev(wdev->wiphy, dev);
wil_wiphy_init(wdev->wiphy);
- rc = wiphy_register(wdev->wiphy);
- if (rc < 0)
- goto out_failed_reg;
-
return wdev;
-out_failed_reg:
- wiphy_free(wdev->wiphy);
out:
kfree(wdev);
@@ -1487,7 +1523,6 @@ void wil_wdev_free(struct wil6210_priv *wil)
if (!wdev)
return;
- wiphy_unregister(wdev->wiphy);
wiphy_free(wdev->wiphy);
kfree(wdev);
}
@@ -1498,11 +1533,11 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
mutex_lock(&wil->p2p_wdev_mutex);
p2p_wdev = wil->p2p_wdev;
+ wil->p2p_wdev = NULL;
+ wil->radio_wdev = wil_to_wdev(wil);
+ mutex_unlock(&wil->p2p_wdev_mutex);
if (p2p_wdev) {
- wil->p2p_wdev = NULL;
- wil->radio_wdev = wil_to_wdev(wil);
cfg80211_unregister_wdev(p2p_wdev);
kfree(p2p_wdev);
}
- mutex_unlock(&wil->p2p_wdev_mutex);
}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index a8098b406cc0..5e4058a4037b 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1553,6 +1553,56 @@ static const struct file_operations fops_led_blink_time = {
.open = simple_open,
};
+/*---------FW capabilities------------*/
+static int wil_fw_capabilities_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ seq_printf(s, "fw_capabilities : %*pb\n", WMI_FW_CAPABILITY_MAX,
+ wil->fw_capabilities);
+
+ return 0;
+}
+
+static int wil_fw_capabilities_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_fw_capabilities_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_fw_capabilities = {
+ .open = wil_fw_capabilities_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------FW version------------*/
+static int wil_fw_version_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ if (wil->fw_version[0])
+ seq_printf(s, "%s\n", wil->fw_version);
+ else
+ seq_puts(s, "N/A\n");
+
+ return 0;
+}
+
+static int wil_fw_version_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_fw_version_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_fw_version = {
+ .open = wil_fw_version_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@@ -1603,6 +1653,8 @@ static const struct {
{"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
{"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg},
{"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time},
+ {"fw_capabilities", S_IRUGO, &fops_fw_capabilities},
+ {"fw_version", S_IRUGO, &fops_fw_version},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1643,7 +1695,6 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(privacy, S_IRUGO, doff_u32),
WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
- WIL_FIELD(fw_version, S_IRUGO, doff_u32),
WIL_FIELD(hw_version, S_IRUGO, doff_x32),
WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index 7a2c6c129ad5..2f2b910501ba 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -58,6 +58,15 @@ struct wil_fw_record_comment { /* type == wil_fw_type_comment */
u8 data[0]; /* free-form data [data_size], see above */
} __packed;
+/* FW capabilities encoded inside a comment record */
+#define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba)
+struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
+ /* identifies capabilities record */
+ __le32 magic;
+ /* capabilities (variable size), see enum wmi_fw_capability */
+ u8 capabilities[0];
+};
+
/* perform action
* data_size = @head.size - offsetof(struct wil_fw_record_action, data)
*/
@@ -93,6 +102,9 @@ struct wil_fw_record_verify { /* type == wil_fw_verify */
/* file header
* First record of every file
*/
+/* the FW version prefix in the comment */
+#define WIL_FW_VERSION_PREFIX "FW version: "
+#define WIL_FW_VERSION_PREFIX_LEN (sizeof(WIL_FW_VERSION_PREFIX) - 1)
struct wil_fw_record_file_header {
__le32 signature ; /* Wilocity signature */
__le32 reserved;
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index d30657ee7e83..8f40eb301924 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -118,6 +118,12 @@ static int wil_fw_verify(struct wil6210_priv *wil, const u8 *data, size_t size)
return (int)dlen;
}
+static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ return 0;
+}
+
static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
size_t size)
{
@@ -126,6 +132,27 @@ static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
return 0;
}
+static int
+fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_capabilities *rec = data;
+ size_t capa_size;
+
+ if (size < sizeof(*rec) ||
+ le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
+ return 0;
+
+ capa_size = size - offsetof(struct wil_fw_record_capabilities,
+ capabilities);
+ bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ memcpy(wil->fw_capabilities, rec->capabilities,
+ min(sizeof(wil->fw_capabilities), capa_size));
+ wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1,
+ rec->capabilities, capa_size, false);
+ return 0;
+}
+
static int fw_handle_data(struct wil6210_priv *wil, const void *data,
size_t size)
{
@@ -196,6 +223,13 @@ static int fw_handle_file_header(struct wil6210_priv *wil, const void *data,
wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, d->comment,
sizeof(d->comment), true);
+ if (!memcmp(d->comment, WIL_FW_VERSION_PREFIX,
+ WIL_FW_VERSION_PREFIX_LEN))
+ memcpy(wil->fw_version,
+ d->comment + WIL_FW_VERSION_PREFIX_LEN,
+ min(sizeof(d->comment) - WIL_FW_VERSION_PREFIX_LEN,
+ sizeof(wil->fw_version) - 1));
+
return 0;
}
@@ -383,42 +417,51 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
static const struct {
int type;
- int (*handler)(struct wil6210_priv *wil, const void *data, size_t size);
+ int (*load_handler)(struct wil6210_priv *wil, const void *data,
+ size_t size);
+ int (*parse_handler)(struct wil6210_priv *wil, const void *data,
+ size_t size);
} wil_fw_handlers[] = {
- {wil_fw_type_comment, fw_handle_comment},
- {wil_fw_type_data, fw_handle_data},
- {wil_fw_type_fill, fw_handle_fill},
+ {wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
+ {wil_fw_type_data, fw_handle_data, fw_ignore_section},
+ {wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
/* wil_fw_type_action */
/* wil_fw_type_verify */
- {wil_fw_type_file_header, fw_handle_file_header},
- {wil_fw_type_direct_write, fw_handle_direct_write},
- {wil_fw_type_gateway_data, fw_handle_gateway_data},
- {wil_fw_type_gateway_data4, fw_handle_gateway_data4},
+ {wil_fw_type_file_header, fw_handle_file_header,
+ fw_handle_file_header},
+ {wil_fw_type_direct_write, fw_handle_direct_write, fw_ignore_section},
+ {wil_fw_type_gateway_data, fw_handle_gateway_data, fw_ignore_section},
+ {wil_fw_type_gateway_data4, fw_handle_gateway_data4,
+ fw_ignore_section},
};
static int wil_fw_handle_record(struct wil6210_priv *wil, int type,
- const void *data, size_t size)
+ const void *data, size_t size, bool load)
{
int i;
- for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++) {
+ for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++)
if (wil_fw_handlers[i].type == type)
- return wil_fw_handlers[i].handler(wil, data, size);
- }
+ return load ?
+ wil_fw_handlers[i].load_handler(
+ wil, data, size) :
+ wil_fw_handlers[i].parse_handler(
+ wil, data, size);
wil_err_fw(wil, "unknown record type: %d\n", type);
return -EINVAL;
}
/**
- * wil_fw_load - load FW into device
- *
- * Load the FW and uCode code and data to the corresponding device
- * memory regions
+ * wil_fw_process - process section from FW file
+ * if load is true: Load the FW and uCode code and data to the
+ * corresponding device memory regions,
+ * otherwise only parse and look for capabilities
*
* Return error code
*/
-static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
+static int wil_fw_process(struct wil6210_priv *wil, const void *data,
+ size_t size, bool load)
{
int rc = 0;
const struct wil_fw_record_head *hdr;
@@ -437,7 +480,7 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
return -EINVAL;
}
rc = wil_fw_handle_record(wil, le16_to_cpu(hdr->type),
- &hdr[1], hdr_sz);
+ &hdr[1], hdr_sz, load);
if (rc)
return rc;
}
@@ -456,13 +499,16 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
}
/**
- * wil_request_firmware - Request firmware and load to device
+ * wil_request_firmware - Request firmware
*
- * Request firmware image from the file and load it to device
+ * Request firmware image from the file
+ * If load is true, load firmware to device, otherwise
+ * only parse and extract capabilities
*
* Return error code
*/
-int wil_request_firmware(struct wil6210_priv *wil, const char *name)
+int wil_request_firmware(struct wil6210_priv *wil, const char *name,
+ bool load)
{
int rc, rc1;
const struct firmware *fw;
@@ -482,7 +528,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name)
rc = rc1;
goto out;
}
- rc = wil_fw_load(wil, d, rc1);
+ rc = wil_fw_process(wil, d, rc1, load);
if (rc < 0)
goto out;
}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 011e7412dcc0..64046e0bd0a2 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -101,7 +101,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP);
}
-static void wil6210_mask_halp(struct wil6210_priv *wil)
+void wil6210_mask_halp(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
@@ -503,6 +503,13 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
offsetof(struct RGF_ICR, ICR));
u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
offsetof(struct RGF_ICR, IMV));
+
+ /* HALP interrupt can be unmasked when misc interrupts are
+ * masked
+ */
+ if (icr_misc & BIT_DMA_EP_MISC_ICR_HALP)
+ return 0;
+
wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
"Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
"Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
@@ -592,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
void wil6210_set_halp(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -600,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil)
void wil6210_clear_halp(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
BIT_DMA_EP_MISC_ICR_HALP);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 4bc92e54984a..e7130b54d1d8 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -232,6 +232,9 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil->wdev;
+ if (unlikely(!ndev))
+ return;
+
might_sleep();
wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
reason_code, from_event ? "+" : "-");
@@ -849,6 +852,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
bitmap_zero(wil->status, wil_status_last);
mutex_unlock(&wil->wmi_mutex);
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
@@ -860,6 +864,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
+ mutex_unlock(&wil->p2p_wdev_mutex);
wil_mask_irq(wil);
@@ -888,11 +893,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
WIL_FW2_NAME);
wil_halt_cpu(wil);
+ memset(wil->fw_version, 0, sizeof(wil->fw_version));
/* Loading f/w from the file */
- rc = wil_request_firmware(wil, WIL_FW_NAME);
+ rc = wil_request_firmware(wil, WIL_FW_NAME, true);
if (rc)
return rc;
- rc = wil_request_firmware(wil, WIL_FW2_NAME);
+ rc = wil_request_firmware(wil, WIL_FW2_NAME, true);
if (rc)
return rc;
@@ -1035,10 +1041,10 @@ int wil_up(struct wil6210_priv *wil)
int __wil_down(struct wil6210_priv *wil)
{
- int rc;
-
WARN_ON(!mutex_is_locked(&wil->mutex));
+ set_bit(wil_status_resetting, wil->status);
+
if (wil->platform_ops.bus_request)
wil->platform_ops.bus_request(wil->platform_handle, 0);
@@ -1050,8 +1056,9 @@ int __wil_down(struct wil6210_priv *wil)
}
wil_enable_irq(wil);
- (void)wil_p2p_stop_discovery(wil);
+ wil_p2p_stop_radio_operations(wil);
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
@@ -1063,18 +1070,7 @@ int __wil_down(struct wil6210_priv *wil)
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
-
- if (test_bit(wil_status_fwconnected, wil->status) ||
- test_bit(wil_status_fwconnecting, wil->status)) {
-
- mutex_unlock(&wil->mutex);
- rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
- WMI_DISCONNECT_EVENTID, NULL, 0,
- WIL6210_DISCONNECT_TO_MS);
- mutex_lock(&wil->mutex);
- if (rc)
- wil_err(wil, "timeout waiting for disconnect\n");
- }
+ mutex_unlock(&wil->p2p_wdev_mutex);
wil_reset(wil, false);
@@ -1118,23 +1114,26 @@ void wil_halp_vote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
if (++wil->halp.ref_cnt == 1) {
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
- if (!rc)
+ if (!rc) {
wil_err(wil, "%s: HALP vote timed out\n", __func__);
- else
- wil_dbg_misc(wil,
- "%s: HALP vote completed after %d ms\n",
- __func__,
- jiffies_to_msecs(to_jiffies - rc));
+ /* Mask HALP as done in case the interrupt is raised */
+ wil6210_mask_halp(wil);
+ } else {
+ wil_dbg_irq(wil,
+ "%s: HALP vote completed after %d ms\n",
+ __func__,
+ jiffies_to_msecs(to_jiffies - rc));
+ }
}
- wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}
@@ -1145,16 +1144,16 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
if (--wil->halp.ref_cnt == 0) {
wil6210_clear_halp(wil);
- wil_dbg_misc(wil, "%s: HALP unvote\n", __func__);
+ wil_dbg_irq(wil, "%s: HALP unvote\n", __func__);
}
- wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 098409753d5b..61de5e9f8ef0 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -179,13 +179,6 @@ void *wil_if_alloc(struct device *dev)
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
- netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
- WIL6210_NAPI_BUDGET);
- netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
- WIL6210_NAPI_BUDGET);
-
- netif_tx_stop_all_queues(ndev);
-
return wil;
out_priv:
@@ -216,25 +209,48 @@ void wil_if_free(struct wil6210_priv *wil)
int wil_if_add(struct wil6210_priv *wil)
{
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ struct wiphy *wiphy = wdev->wiphy;
struct net_device *ndev = wil_to_ndev(wil);
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "entered");
+
+ strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+
+ rc = wiphy_register(wiphy);
+ if (rc < 0) {
+ wil_err(wil, "failed to register wiphy, err %d\n", rc);
+ return rc;
+ }
+
+ netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
+ WIL6210_NAPI_BUDGET);
+ netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
+ WIL6210_NAPI_BUDGET);
+
+ netif_tx_stop_all_queues(ndev);
rc = register_netdev(ndev);
if (rc < 0) {
dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
- return rc;
+ goto out_wiphy;
}
return 0;
+
+out_wiphy:
+ wiphy_unregister(wdev->wiphy);
+ return rc;
}
void wil_if_remove(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil_to_wdev(wil);
wil_dbg_misc(wil, "%s()\n", __func__);
unregister_netdev(ndev);
+ wiphy_unregister(wdev->wiphy);
}
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index e0f8aa0ebfac..4087785d3090 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -263,3 +263,49 @@ void wil_p2p_search_expired(struct work_struct *work)
mutex_unlock(&wil->p2p_wdev_mutex);
}
}
+
+void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
+{
+ struct wil_p2p_info *p2p = &wil->p2p;
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ lockdep_assert_held(&wil->mutex);
+
+ mutex_lock(&wil->p2p_wdev_mutex);
+
+ if (wil->radio_wdev != wil->p2p_wdev)
+ goto out;
+
+ if (!p2p->discovery_started) {
+ /* Regular scan on the p2p device */
+ if (wil->scan_request &&
+ wil->scan_request->wdev == wil->p2p_wdev) {
+ cfg80211_scan_done(wil->scan_request, &info);
+ wil->scan_request = NULL;
+ }
+ goto out;
+ }
+
+ /* Search or listen on p2p device */
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ wil_p2p_stop_discovery(wil);
+ mutex_lock(&wil->p2p_wdev_mutex);
+
+ if (wil->scan_request) {
+ /* search */
+ cfg80211_scan_done(wil->scan_request, &info);
+ wil->scan_request = NULL;
+ } else {
+ /* listen */
+ cfg80211_remain_on_channel_expired(wil->radio_wdev,
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ }
+
+out:
+ wil->radio_wdev = wil->wdev;
+ mutex_unlock(&wil->p2p_wdev_mutex);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 7b5c4222bc33..44746ca0d2e6 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/suspend.h>
#include "wil6210.h"
+#include <linux/rtnetlink.h>
static bool use_msi = true;
module_param(use_msi, bool, S_IRUGO);
@@ -38,6 +39,7 @@ void wil_set_capabilities(struct wil6210_priv *wil)
u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
bitmap_zero(wil->hw_capabilities, hw_capability_last);
+ bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
switch (rev_id) {
case JTAG_DEV_ID_SPARROW_B0:
@@ -51,6 +53,9 @@ void wil_set_capabilities(struct wil6210_priv *wil)
}
wil_info(wil, "Board hardware is %s\n", wil->hw_name);
+
+ /* extract FW capabilities from file without loading the FW */
+ wil_request_firmware(wil, WIL_FW_NAME, false);
}
void wil_disable_irq(struct wil6210_priv *wil)
@@ -293,6 +298,9 @@ static void wil_pcie_remove(struct pci_dev *pdev)
#endif /* CONFIG_PM */
wil6210_debugfs_remove(wil);
+ rtnl_lock();
+ wil_p2p_wdev_free(wil);
+ rtnl_unlock();
wil_if_remove(wil);
wil_if_pcie_disable(wil);
pci_iounmap(pdev, csr);
@@ -300,7 +308,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
if (wil->platform_ops.uninit)
wil->platform_ops.uninit(wil->platform_handle);
- wil_p2p_wdev_free(wil);
wil_if_free(wil);
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index f2f6a404d3d1..4c38520d4dd2 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -873,9 +873,12 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
rc = -EINVAL;
goto out_free;
}
- vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+ spin_lock_bh(&txdata->lock);
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
if (txdata->dot1x_open && (agg_wsize >= 0))
wil_addba_tx_request(wil, id, agg_wsize);
@@ -950,9 +953,11 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
rc = -EINVAL;
goto out_free;
}
- vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+ spin_lock_bh(&txdata->lock);
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
return 0;
out_free:
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index ecab4af90602..a949cd62bc4e 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -17,6 +17,7 @@
#ifndef __WIL6210_H__
#define __WIL6210_H__
+#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <net/cfg80211.h>
@@ -576,10 +577,11 @@ struct wil6210_priv {
struct wireless_dev *wdev;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
- u32 fw_version;
+ u8 fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
const char *hw_name;
DECLARE_BITMAP(hw_capabilities, hw_capability_last);
+ DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
u8 n_mids; /* number of additional MIDs as reported by FW */
u32 recovery_count; /* num of FW recovery attempts in a short time */
u32 recovery_state; /* FW recovery state machine */
@@ -657,7 +659,7 @@ struct wil6210_priv {
/* P2P_DEVICE vif */
struct wireless_dev *p2p_wdev;
- struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
+ struct mutex p2p_wdev_mutex; /* protect @p2p_wdev and @scan_request */
struct wireless_dev *radio_wdev;
/* High Access Latency Policy voting */
@@ -828,6 +830,7 @@ void wil_unmask_irq(struct wil6210_priv *wil);
void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
void wil_disable_irq(struct wil6210_priv *wil);
void wil_enable_irq(struct wil6210_priv *wil);
+void wil6210_mask_halp(struct wil6210_priv *wil);
/* P2P */
bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
@@ -840,6 +843,7 @@ u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
void wil_p2p_listen_expired(struct work_struct *work);
void wil_p2p_search_expired(struct work_struct *work);
+void wil_p2p_stop_radio_operations(struct wil6210_priv *wil);
/* WMI for P2P */
int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
@@ -893,7 +897,8 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
-int wil_request_firmware(struct wil6210_priv *wil, const char *name);
+int wil_request_firmware(struct wil6210_priv *wil, const char *name,
+ bool load);
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 4d92541913c0..fae4f1285d08 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -312,14 +312,14 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
struct wireless_dev *wdev = wil->wdev;
struct wmi_ready_event *evt = d;
- wil->fw_version = le32_to_cpu(evt->sw_version);
wil->n_mids = evt->numof_additional_mids;
- wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
+ wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n",
+ wil->fw_version, le32_to_cpu(evt->sw_version),
evt->mac, wil->n_mids);
/* ignore MAC address, we already have it from the boot loader */
- snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
- "%d", wil->fw_version);
+ strlcpy(wdev->wiphy->fw_version, wil->fw_version,
+ sizeof(wdev->wiphy->fw_version));
wil_set_recovery_state(wil, fw_recovery_idle);
set_bit(wil_status_fwready, wil->status);
@@ -424,6 +424,7 @@ static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
void *d, int len)
{
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct wmi_scan_complete_event *data = d;
struct cfg80211_scan_info info = {
@@ -435,14 +436,13 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
wil->scan_request, info.aborted);
del_timer_sync(&wil->scan_timer);
- mutex_lock(&wil->p2p_wdev_mutex);
cfg80211_scan_done(wil->scan_request, &info);
wil->radio_wdev = wil->wdev;
- mutex_unlock(&wil->p2p_wdev_mutex);
wil->scan_request = NULL;
} else {
wil_err(wil, "SCAN_COMPLETE while not scanning\n");
}
+ mutex_unlock(&wil->p2p_wdev_mutex);
}
static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 685fe0ddea26..f430e8a80603 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -46,6 +46,16 @@ enum wmi_mid {
MID_BROADCAST = 0xFF,
};
+/* FW capability IDs
+ * Each ID maps to a bit in a 32-bit bitmask value provided by the FW to
+ * the host
+ */
+enum wmi_fw_capability {
+ WMI_FW_CAPABILITY_FTM = 0,
+ WMI_FW_CAPABILITY_PS_CONFIG = 1,
+ WMI_FW_CAPABILITY_MAX,
+};
+
/* WMI_CMD_HDR */
struct wmi_cmd_hdr {
u8 mid;
@@ -120,6 +130,8 @@ enum wmi_command_id {
WMI_BF_SM_MGMT_CMDID = 0x838,
WMI_BF_RXSS_MGMT_CMDID = 0x839,
WMI_BF_TRIG_CMDID = 0x83A,
+ WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
+ WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
WMI_SET_SECTORS_CMDID = 0x849,
WMI_MAINTAIN_PAUSE_CMDID = 0x850,
WMI_MAINTAIN_RESUME_CMDID = 0x851,
@@ -134,10 +146,15 @@ enum wmi_command_id {
WMI_BF_CTRL_CMDID = 0x862,
WMI_NOTIFY_REQ_CMDID = 0x863,
WMI_GET_STATUS_CMDID = 0x864,
+ WMI_GET_RF_STATUS_CMDID = 0x866,
+ WMI_GET_BASEBAND_TYPE_CMDID = 0x867,
WMI_UNIT_TEST_CMDID = 0x900,
WMI_HICCUP_CMDID = 0x901,
WMI_FLASH_READ_CMDID = 0x902,
WMI_FLASH_WRITE_CMDID = 0x903,
+ /* Power management */
+ WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
+ WMI_TRAFFIC_RESUME_CMDID = 0x905,
/* P2P */
WMI_P2P_CFG_CMDID = 0x910,
WMI_PORT_ALLOCATE_CMDID = 0x911,
@@ -150,6 +167,26 @@ enum wmi_command_id {
WMI_PCP_START_CMDID = 0x918,
WMI_PCP_STOP_CMDID = 0x919,
WMI_GET_PCP_FACTOR_CMDID = 0x91B,
+ /* Power Save Configuration Commands */
+ WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_CMDID = 0x91D,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_READ_CMDID = 0x91E,
+ /* Per MAC Power Save Configuration commands
+ * Not supported yet
+ */
+ WMI_PS_MID_CFG_CMDID = 0x91F,
+ /* Not supported yet */
+ WMI_PS_MID_CFG_READ_CMDID = 0x920,
+ WMI_RS_CFG_CMDID = 0x921,
+ WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
+ WMI_AOA_MEAS_CMDID = 0x923,
+ WMI_TOF_SESSION_START_CMDID = 0x991,
+ WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
+ WMI_TOF_SET_LCR_CMDID = 0x993,
+ WMI_TOF_SET_LCI_CMDID = 0x994,
+ WMI_TOF_CHANNEL_INFO_CMDID = 0x995,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
@@ -291,9 +328,8 @@ enum wmi_scan_type {
/* WMI_START_SCAN_CMDID */
struct wmi_start_scan_cmd {
u8 direct_scan_mac_addr[WMI_MAC_LEN];
- /* DMG Beacon frame is transmitted during active scanning */
+ /* run scan with discovery beacon. Relevant for ACTIVE scan only. */
u8 discovery_mode;
- /* reserved */
u8 reserved;
/* Max duration in the home channel(ms) */
__le32 dwell_time;
@@ -453,6 +489,12 @@ struct wmi_port_delete_cmd {
u8 reserved[3];
} __packed;
+/* WMI_TRAFFIC_DEFERRAL_CMDID */
+struct wmi_traffic_deferral_cmd {
+ /* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
+ u8 wakeup_trigger;
+} __packed;
+
/* WMI_P2P_CFG_CMDID */
enum wmi_discovery_mode {
WMI_DISCOVERY_MODE_NON_OFFLOAD = 0x00,
@@ -818,85 +860,193 @@ struct wmi_pmc_cmd {
__le64 mem_base;
} __packed;
+enum wmi_aoa_meas_type {
+ WMI_AOA_PHASE_MEAS = 0x00,
+ WMI_AOA_PHASE_AMP_MEAS = 0x01,
+};
+
+/* WMI_AOA_MEAS_CMDID */
+struct wmi_aoa_meas_cmd {
+ u8 mac_addr[WMI_MAC_LEN];
+ /* channels IDs:
+ * 0 - 58320 MHz
+ * 1 - 60480 MHz
+ * 2 - 62640 MHz
+ */
+ u8 channel;
+ /* enum wmi_aoa_meas_type */
+ u8 aoa_meas_type;
+ __le32 meas_rf_mask;
+} __packed;
+
+enum wmi_tof_burst_duration {
+ WMI_TOF_BURST_DURATION_250_USEC = 2,
+ WMI_TOF_BURST_DURATION_500_USEC = 3,
+ WMI_TOF_BURST_DURATION_1_MSEC = 4,
+ WMI_TOF_BURST_DURATION_2_MSEC = 5,
+ WMI_TOF_BURST_DURATION_4_MSEC = 6,
+ WMI_TOF_BURST_DURATION_8_MSEC = 7,
+ WMI_TOF_BURST_DURATION_16_MSEC = 8,
+ WMI_TOF_BURST_DURATION_32_MSEC = 9,
+ WMI_TOF_BURST_DURATION_64_MSEC = 10,
+ WMI_TOF_BURST_DURATION_128_MSEC = 11,
+ WMI_TOF_BURST_DURATION_NO_PREFERENCES = 15,
+};
+
+enum wmi_tof_session_start_flags {
+ WMI_TOF_SESSION_START_FLAG_SECURED = 0x1,
+ WMI_TOF_SESSION_START_FLAG_ASAP = 0x2,
+ WMI_TOF_SESSION_START_FLAG_LCI_REQ = 0x4,
+ WMI_TOF_SESSION_START_FLAG_LCR_REQ = 0x8,
+};
+
+/* WMI_TOF_SESSION_START_CMDID */
+struct wmi_ftm_dest_info {
+ u8 channel;
+ /* wmi_tof_session_start_flags_e */
+ u8 flags;
+ u8 initial_token;
+ u8 num_of_ftm_per_burst;
+ u8 num_of_bursts_exp;
+ /* wmi_tof_burst_duration_e */
+ u8 burst_duration;
+ /* Burst Period indicate interval between two consecutive burst
+ * instances, in units of 100 ms
+ */
+ __le16 burst_period;
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 reserved;
+} __packed;
+
+/* WMI_TOF_SESSION_START_CMDID */
+struct wmi_tof_session_start_cmd {
+ __le32 session_id;
+ u8 num_of_aoa_measures;
+ u8 aoa_type;
+ __le16 num_of_dest;
+ u8 reserved[4];
+ struct wmi_ftm_dest_info ftm_dest_info[0];
+} __packed;
+
+enum wmi_tof_channel_info_report_type {
+ WMI_TOF_CHANNEL_INFO_TYPE_CIR = 0x1,
+ WMI_TOF_CHANNEL_INFO_TYPE_RSSI = 0x2,
+ WMI_TOF_CHANNEL_INFO_TYPE_SNR = 0x4,
+ WMI_TOF_CHANNEL_INFO_TYPE_DEBUG_DATA = 0x8,
+ WMI_TOF_CHANNEL_INFO_TYPE_VENDOR_SPECIFIC = 0x10,
+};
+
+/* WMI_TOF_CHANNEL_INFO_CMDID */
+struct wmi_tof_channel_info_cmd {
+ /* wmi_tof_channel_info_report_type_e */
+ __le32 channel_info_report_request;
+} __packed;
+
/* WMI Events
* List of Events (target to host)
*/
enum wmi_event_id {
- WMI_READY_EVENTID = 0x1001,
- WMI_CONNECT_EVENTID = 0x1002,
- WMI_DISCONNECT_EVENTID = 0x1003,
- WMI_SCAN_COMPLETE_EVENTID = 0x100A,
- WMI_REPORT_STATISTICS_EVENTID = 0x100B,
- WMI_RD_MEM_RSP_EVENTID = 0x1800,
- WMI_FW_READY_EVENTID = 0x1801,
- WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
- WMI_ECHO_RSP_EVENTID = 0x1803,
- WMI_FS_TUNE_DONE_EVENTID = 0x180A,
- WMI_CORR_MEASURE_EVENTID = 0x180B,
- WMI_READ_RSSI_EVENTID = 0x180C,
- WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
- WMI_DC_CALIB_DONE_EVENTID = 0x180F,
- WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
- WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
- WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
- WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
- WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
- WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
- WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A,
- WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
- WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
- WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
- WMI_VRING_CFG_DONE_EVENTID = 0x1821,
- WMI_BA_STATUS_EVENTID = 0x1823,
- WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
- WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
- WMI_DELBA_EVENTID = 0x1826,
- WMI_GET_SSID_EVENTID = 0x1828,
- WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
- WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
- WMI_READ_MAC_RXQ_EVENTID = 0x1830,
- WMI_READ_MAC_TXQ_EVENTID = 0x1831,
- WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
- WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
- WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
- WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
- WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
- WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
- WMI_RS_MGMT_DONE_EVENTID = 0x1852,
- WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
- WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
- WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
- WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
- WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
- WMI_OTP_READ_RESULT_EVENTID = 0x1856,
- WMI_LED_CFG_DONE_EVENTID = 0x1858,
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID = 0x1002,
+ WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_SCAN_COMPLETE_EVENTID = 0x100A,
+ WMI_REPORT_STATISTICS_EVENTID = 0x100B,
+ WMI_RD_MEM_RSP_EVENTID = 0x1800,
+ WMI_FW_READY_EVENTID = 0x1801,
+ WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
+ WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_FS_TUNE_DONE_EVENTID = 0x180A,
+ WMI_CORR_MEASURE_EVENTID = 0x180B,
+ WMI_READ_RSSI_EVENTID = 0x180C,
+ WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
+ WMI_DC_CALIB_DONE_EVENTID = 0x180F,
+ WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
+ WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
+ WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
+ WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
+ WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
+ WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
+ WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A,
+ WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
+ WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
+ WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
+ WMI_VRING_CFG_DONE_EVENTID = 0x1821,
+ WMI_BA_STATUS_EVENTID = 0x1823,
+ WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
+ WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
+ WMI_DELBA_EVENTID = 0x1826,
+ WMI_GET_SSID_EVENTID = 0x1828,
+ WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
+ WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
+ WMI_READ_MAC_RXQ_EVENTID = 0x1830,
+ WMI_READ_MAC_TXQ_EVENTID = 0x1831,
+ WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
+ WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
+ WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
+ WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
+ WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
+ WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
+ WMI_RS_MGMT_DONE_EVENTID = 0x1852,
+ WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
+ WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
+ WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
+ WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+ WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
+ WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842,
+ WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843,
+ WMI_OTP_READ_RESULT_EVENTID = 0x1856,
+ WMI_LED_CFG_DONE_EVENTID = 0x1858,
/* Performance monitoring events */
- WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
- WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
- WMI_BF_CTRL_DONE_EVENTID = 0x1862,
- WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
- WMI_GET_STATUS_DONE_EVENTID = 0x1864,
- WMI_VRING_EN_EVENTID = 0x1865,
- WMI_UNIT_TEST_EVENTID = 0x1900,
- WMI_FLASH_READ_DONE_EVENTID = 0x1902,
- WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+ WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
+ WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
+ WMI_BF_CTRL_DONE_EVENTID = 0x1862,
+ WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
+ WMI_GET_STATUS_DONE_EVENTID = 0x1864,
+ WMI_VRING_EN_EVENTID = 0x1865,
+ WMI_GET_RF_STATUS_EVENTID = 0x1866,
+ WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
+ WMI_UNIT_TEST_EVENTID = 0x1900,
+ WMI_FLASH_READ_DONE_EVENTID = 0x1902,
+ WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+ /* Power management */
+ WMI_TRAFFIC_DEFERRAL_EVENTID = 0x1904,
+ WMI_TRAFFIC_RESUME_EVENTID = 0x1905,
/* P2P */
- WMI_P2P_CFG_DONE_EVENTID = 0x1910,
- WMI_PORT_ALLOCATED_EVENTID = 0x1911,
- WMI_PORT_DELETED_EVENTID = 0x1912,
- WMI_LISTEN_STARTED_EVENTID = 0x1914,
- WMI_SEARCH_STARTED_EVENTID = 0x1915,
- WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
- WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
- WMI_PCP_STARTED_EVENTID = 0x1918,
- WMI_PCP_STOPPED_EVENTID = 0x1919,
- WMI_PCP_FACTOR_EVENTID = 0x191A,
- WMI_SET_CHANNEL_EVENTID = 0x9000,
- WMI_ASSOC_REQ_EVENTID = 0x9001,
- WMI_EAPOL_RX_EVENTID = 0x9002,
- WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
- WMI_FW_VER_EVENTID = 0x9004,
- WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
+ WMI_P2P_CFG_DONE_EVENTID = 0x1910,
+ WMI_PORT_ALLOCATED_EVENTID = 0x1911,
+ WMI_PORT_DELETED_EVENTID = 0x1912,
+ WMI_LISTEN_STARTED_EVENTID = 0x1914,
+ WMI_SEARCH_STARTED_EVENTID = 0x1915,
+ WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
+ WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
+ WMI_PCP_STARTED_EVENTID = 0x1918,
+ WMI_PCP_STOPPED_EVENTID = 0x1919,
+ WMI_PCP_FACTOR_EVENTID = 0x191A,
+ /* Power Save Configuration Events */
+ WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_EVENTID = 0x191D,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_READ_EVENTID = 0x191E,
+ /* Not supported yet */
+ WMI_PS_MID_CFG_EVENTID = 0x191F,
+ /* Not supported yet */
+ WMI_PS_MID_CFG_READ_EVENTID = 0x1920,
+ WMI_RS_CFG_DONE_EVENTID = 0x1921,
+ WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
+ WMI_AOA_MEAS_EVENTID = 0x1923,
+ WMI_TOF_SESSION_END_EVENTID = 0x1991,
+ WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992,
+ WMI_TOF_SET_LCR_EVENTID = 0x1993,
+ WMI_TOF_SET_LCI_EVENTID = 0x1994,
+ WMI_TOF_FTM_PER_DEST_RES_EVENTID = 0x1995,
+ WMI_TOF_CHANNEL_INFO_EVENTID = 0x1996,
+ WMI_SET_CHANNEL_EVENTID = 0x9000,
+ WMI_ASSOC_REQ_EVENTID = 0x9001,
+ WMI_EAPOL_RX_EVENTID = 0x9002,
+ WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
+ WMI_FW_VER_EVENTID = 0x9004,
+ WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
};
/* Events data structures */
@@ -943,10 +1093,85 @@ struct wmi_get_status_done_event {
/* WMI_FW_VER_EVENTID */
struct wmi_fw_ver_event {
- u8 major;
- u8 minor;
- __le16 subminor;
- __le16 build;
+ /* FW image version */
+ __le32 fw_major;
+ __le32 fw_minor;
+ __le32 fw_subminor;
+ __le32 fw_build;
+ /* FW image build time stamp */
+ __le32 hour;
+ __le32 minute;
+ __le32 second;
+ __le32 day;
+ __le32 month;
+ __le32 year;
+ /* Boot Loader image version */
+ __le32 bl_major;
+ __le32 bl_minor;
+ __le32 bl_subminor;
+ __le32 bl_build;
+ /* The number of entries in the FW capabilies array */
+ u8 fw_capabilities_len;
+ u8 reserved[3];
+ /* FW capabilities info
+ * Must be the last member of the struct
+ */
+ __le32 fw_capabilities[0];
+} __packed;
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum rf_type {
+ RF_UNKNOWN = 0x00,
+ RF_MARLON = 0x01,
+ RF_SPARROW = 0x02,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum board_file_rf_type {
+ BF_RF_MARLON = 0x00,
+ BF_RF_SPARROW = 0x01,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum rf_status {
+ RF_OK = 0x00,
+ RF_NO_COMM = 0x01,
+ RF_WRONG_BOARD_FILE = 0x02,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+struct wmi_get_rf_status_event {
+ /* enum rf_type */
+ __le32 rf_type;
+ /* attached RFs bit vector */
+ __le32 attached_rf_vector;
+ /* enabled RFs bit vector */
+ __le32 enabled_rf_vector;
+ /* enum rf_status, refers to enabled RFs */
+ u8 rf_status[32];
+ /* enum board file RF type */
+ __le32 board_file_rf_type;
+ /* board file platform type */
+ __le32 board_file_platform_type;
+ /* board file version */
+ __le32 board_file_version;
+ __le32 reserved[2];
+} __packed;
+
+/* WMI_GET_BASEBAND_TYPE_EVENTID */
+enum baseband_type {
+ BASEBAND_UNKNOWN = 0x00,
+ BASEBAND_SPARROW_M_A0 = 0x03,
+ BASEBAND_SPARROW_M_A1 = 0x04,
+ BASEBAND_SPARROW_M_B0 = 0x05,
+ BASEBAND_SPARROW_M_C0 = 0x06,
+ BASEBAND_SPARROW_M_D0 = 0x07,
+};
+
+/* WMI_GET_BASEBAND_TYPE_EVENTID */
+struct wmi_get_baseband_type_event {
+ /* enum baseband_type */
+ __le32 baseband_type;
} __packed;
/* WMI_MAC_ADDR_RESP_EVENTID */
@@ -1410,4 +1635,553 @@ struct wmi_led_cfg_done_event {
__le32 status;
} __packed;
+#define WMI_NUM_MCS (13)
+
+/* Rate search parameters configuration per connection */
+struct wmi_rs_cfg {
+ /* The maximal allowed PER for each MCS
+ * MCS will be considered as failed if PER during RS is higher
+ */
+ u8 per_threshold[WMI_NUM_MCS];
+ /* Number of MPDUs for each MCS
+ * this is the minimal statistic required to make an educated
+ * decision
+ */
+ u8 min_frame_cnt[WMI_NUM_MCS];
+ /* stop threshold [0-100] */
+ u8 stop_th;
+ /* MCS1 stop threshold [0-100] */
+ u8 mcs1_fail_th;
+ u8 max_back_failure_th;
+ /* Debug feature for disabling internal RS trigger (which is
+ * currently triggered by BF Done)
+ */
+ u8 dbg_disable_internal_trigger;
+ __le32 back_failure_mask;
+ __le32 mcs_en_vec;
+} __packed;
+
+/* WMI_RS_CFG_CMDID */
+struct wmi_rs_cfg_cmd {
+ /* connection id */
+ u8 cid;
+ /* enable or disable rate search */
+ u8 rs_enable;
+ /* rate search configuration */
+ struct wmi_rs_cfg rs_cfg;
+} __packed;
+
+/* WMI_RS_CFG_DONE_EVENTID */
+struct wmi_rs_cfg_done_event {
+ u8 cid;
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_CMDID */
+struct wmi_get_detailed_rs_res_cmd {
+ /* connection id */
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/* RS results status */
+enum wmi_rs_results_status {
+ WMI_RS_RES_VALID = 0x00,
+ WMI_RS_RES_INVALID = 0x01,
+};
+
+/* Rate search results */
+struct wmi_rs_results {
+ /* number of sent MPDUs */
+ u8 num_of_tx_pkt[WMI_NUM_MCS];
+ /* number of non-acked MPDUs */
+ u8 num_of_non_acked_pkt[WMI_NUM_MCS];
+ /* RS timestamp */
+ __le32 tsf;
+ /* RS selected MCS */
+ u8 mcs;
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_EVENTID */
+struct wmi_get_detailed_rs_res_event {
+ u8 cid;
+ /* enum wmi_rs_results_status */
+ u8 status;
+ /* detailed rs results */
+ struct wmi_rs_results rs_results;
+ u8 reserved[3];
+} __packed;
+
+/* broadcast connection ID */
+#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
+
+/* Types wmi_link_maintain_cfg presets for WMI_LINK_MAINTAIN_CFG_WRITE_CMD */
+enum wmi_link_maintain_cfg_type {
+ /* AP/PCP default normal (non-FST) configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_AP = 0x00,
+ /* AP/PCP default FST configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_AP = 0x01,
+ /* STA default normal (non-FST) configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_STA = 0x02,
+ /* STA default FST configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_STA = 0x03,
+ /* custom configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM = 0x04,
+ /* number of defined configuration types */
+ WMI_LINK_MAINTAIN_CFG_TYPES_NUM = 0x05,
+};
+
+/* Response status codes for WMI_LINK_MAINTAIN_CFG_WRITE/READ commands */
+enum wmi_link_maintain_cfg_response_status {
+ /* WMI_LINK_MAINTAIN_CFG_WRITE/READ command successfully accomplished
+ */
+ WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_OK = 0x00,
+ /* ERROR due to bad argument in WMI_LINK_MAINTAIN_CFG_WRITE/READ
+ * command request
+ */
+ WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_BAD_ARGUMENT = 0x01,
+};
+
+/* Link Loss and Keep Alive configuration */
+struct wmi_link_maintain_cfg {
+ /* link_loss_enable_detectors_vec */
+ __le32 link_loss_enable_detectors_vec;
+ /* detectors check period usec */
+ __le32 check_link_loss_period_usec;
+ /* max allowed tx ageing */
+ __le32 tx_ageing_threshold_usec;
+ /* keep alive period for high SNR */
+ __le32 keep_alive_period_usec_high_snr;
+ /* keep alive period for low SNR */
+ __le32 keep_alive_period_usec_low_snr;
+ /* lower snr limit for keep alive period update */
+ __le32 keep_alive_snr_threshold_low_db;
+ /* upper snr limit for keep alive period update */
+ __le32 keep_alive_snr_threshold_high_db;
+ /* num of successive bad bcons causing link-loss */
+ __le32 bad_beacons_num_threshold;
+ /* SNR limit for bad_beacons_detector */
+ __le32 bad_beacons_snr_threshold_db;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_WRITE_CMDID */
+struct wmi_link_maintain_cfg_write_cmd {
+ /* enum wmi_link_maintain_cfg_type_e - type of requested default
+ * configuration to be applied
+ */
+ __le32 cfg_type;
+ /* requested connection ID or WMI_LINK_MAINTAIN_CFG_CID_BROADCAST */
+ __le32 cid;
+ /* custom configuration settings to be applied (relevant only if
+ * cfg_type==WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM)
+ */
+ struct wmi_link_maintain_cfg lm_cfg;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_READ_CMDID */
+struct wmi_link_maintain_cfg_read_cmd {
+ /* connection ID which configuration settings are requested */
+ __le32 cid;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */
+struct wmi_link_maintain_cfg_write_done_event {
+ /* requested connection ID */
+ __le32 cid;
+ /* wmi_link_maintain_cfg_response_status_e - write status */
+ __le32 status;
+} __packed;
+
+/* \WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENT */
+struct wmi_link_maintain_cfg_read_done_event {
+ /* requested connection ID */
+ __le32 cid;
+ /* wmi_link_maintain_cfg_response_status_e - read status */
+ __le32 status;
+ /* Retrieved configuration settings */
+ struct wmi_link_maintain_cfg lm_cfg;
+} __packed;
+
+enum wmi_traffic_deferral_status {
+ WMI_TRAFFIC_DEFERRAL_APPROVED = 0x0,
+ WMI_TRAFFIC_DEFERRAL_REJECTED = 0x1,
+};
+
+/* WMI_TRAFFIC_DEFERRAL_EVENTID */
+struct wmi_traffic_deferral_event {
+ /* enum wmi_traffic_deferral_status_e */
+ u8 status;
+} __packed;
+
+enum wmi_traffic_resume_status {
+ WMI_TRAFFIC_RESUME_SUCCESS = 0x0,
+ WMI_TRAFFIC_RESUME_FAILED = 0x1,
+};
+
+/* WMI_TRAFFIC_RESUME_EVENTID */
+struct wmi_traffic_resume_event {
+ /* enum wmi_traffic_resume_status_e */
+ u8 status;
+} __packed;
+
+/* Power Save command completion status codes */
+enum wmi_ps_cfg_cmd_status {
+ WMI_PS_CFG_CMD_STATUS_SUCCESS = 0x00,
+ WMI_PS_CFG_CMD_STATUS_BAD_PARAM = 0x01,
+ /* other error */
+ WMI_PS_CFG_CMD_STATUS_ERROR = 0x02,
+};
+
+/* Device Power Save Profiles */
+enum wmi_ps_profile_type {
+ WMI_PS_PROFILE_TYPE_DEFAULT = 0x00,
+ WMI_PS_PROFILE_TYPE_PS_DISABLED = 0x01,
+ WMI_PS_PROFILE_TYPE_MAX_PS = 0x02,
+ WMI_PS_PROFILE_TYPE_LOW_LATENCY_PS = 0x03,
+};
+
+/* WMI_PS_DEV_PROFILE_CFG_CMDID
+ *
+ * Power save profile to be used by the device
+ *
+ * Returned event:
+ * - WMI_PS_DEV_PROFILE_CFG_EVENTID
+ */
+struct wmi_ps_dev_profile_cfg_cmd {
+ /* wmi_ps_profile_type_e */
+ u8 ps_profile;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PS_DEV_PROFILE_CFG_EVENTID */
+struct wmi_ps_dev_profile_cfg_event {
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+enum wmi_ps_level {
+ WMI_PS_LEVEL_DEEP_SLEEP = 0x00,
+ WMI_PS_LEVEL_SHALLOW_SLEEP = 0x01,
+ /* awake = all PS mechanisms are disabled */
+ WMI_PS_LEVEL_AWAKE = 0x02,
+};
+
+enum wmi_ps_deep_sleep_clk_level {
+ /* 33k */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC = 0x00,
+ /* 10k */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_OSC = 0x01,
+ /* @RTC Low latency */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC_LT = 0x02,
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_XTAL = 0x03,
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_SYSCLK = 0x04,
+ /* Not Applicable */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_N_A = 0xFF,
+};
+
+/* Response by the FW to a D3 entry request */
+enum wmi_ps_d3_resp_policy {
+ WMI_PS_D3_RESP_POLICY_DEFAULT = 0x00,
+ /* debug -D3 req is always denied */
+ WMI_PS_D3_RESP_POLICY_DENIED = 0x01,
+ /* debug -D3 req is always approved */
+ WMI_PS_D3_RESP_POLICY_APPROVED = 0x02,
+};
+
+/* Device common power save configurations */
+struct wmi_ps_dev_cfg {
+ /* lowest level of PS allowed while unassociated, enum wmi_ps_level_e
+ */
+ u8 ps_unassoc_min_level;
+ /* lowest deep sleep clock level while nonassoc, enum
+ * wmi_ps_deep_sleep_clk_level_e
+ */
+ u8 ps_unassoc_deep_sleep_min_level;
+ /* lowest level of PS allowed while associated, enum wmi_ps_level_e */
+ u8 ps_assoc_min_level;
+ /* lowest deep sleep clock level while assoc, enum
+ * wmi_ps_deep_sleep_clk_level_e
+ */
+ u8 ps_assoc_deep_sleep_min_level;
+ /* enum wmi_ps_deep_sleep_clk_level_e */
+ u8 ps_assoc_low_latency_ds_min_level;
+ /* enum wmi_ps_d3_resp_policy_e */
+ u8 ps_D3_response_policy;
+ /* BOOL */
+ u8 ps_D3_pm_pme_enabled;
+ /* BOOL */
+ u8 ps_halp_enable;
+ u8 ps_deep_sleep_enter_thresh_msec;
+ /* BOOL */
+ u8 ps_voltage_scaling_en;
+} __packed;
+
+/* WMI_PS_DEV_CFG_CMDID
+ *
+ * Configure common Power Save parameters of the device and all MIDs.
+ *
+ * Returned event:
+ * - WMI_PS_DEV_CFG_EVENTID
+ */
+struct wmi_ps_dev_cfg_cmd {
+ /* Device Power Save configuration to be applied */
+ struct wmi_ps_dev_cfg ps_dev_cfg;
+ /* alignment to 32b */
+ u8 reserved[2];
+} __packed;
+
+/* WMI_PS_DEV_CFG_EVENTID */
+struct wmi_ps_dev_cfg_event {
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+/* WMI_PS_DEV_CFG_READ_CMDID
+ *
+ * request to retrieve device Power Save configuration
+ * (WMI_PS_DEV_CFG_CMD params)
+ *
+ * Returned event:
+ * - WMI_PS_DEV_CFG_READ_EVENTID
+ */
+struct wmi_ps_dev_cfg_read_cmd {
+ __le32 reserved;
+} __packed;
+
+/* WMI_PS_DEV_CFG_READ_EVENTID */
+struct wmi_ps_dev_cfg_read_event {
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+ /* Retrieved device Power Save configuration (WMI_PS_DEV_CFG_CMD
+ * params)
+ */
+ struct wmi_ps_dev_cfg dev_ps_cfg;
+ /* alignment to 32b */
+ u8 reserved[2];
+} __packed;
+
+/* Per Mac Power Save configurations */
+struct wmi_ps_mid_cfg {
+ /* Low power RX in BTI is enabled, BOOL */
+ u8 beacon_lprx_enable;
+ /* Sync to sector ID enabled, BOOL */
+ u8 beacon_sync_to_sectorId_enable;
+ /* Low power RX in DTI is enabled, BOOL */
+ u8 frame_exchange_lprx_enable;
+ /* Sleep Cycle while in scheduled PS, 1-31 */
+ u8 scheduled_sleep_cycle_pow2;
+ /* Stay Awake for k BIs every (sleep_cycle - k) BIs, 1-31 */
+ u8 scheduled_num_of_awake_bis;
+ u8 am_to_traffic_load_thresh_mbp;
+ u8 traffic_to_am_load_thresh_mbps;
+ u8 traffic_to_am_num_of_no_traffic_bis;
+ /* BOOL */
+ u8 continuous_traffic_psm;
+ __le16 no_traffic_to_min_usec;
+ __le16 no_traffic_to_max_usec;
+ __le16 snoozing_sleep_interval_milisec;
+ u8 max_no_data_awake_events;
+ /* Trigger WEB after k failed beacons */
+ u8 num_of_failed_beacons_rx_to_trigger_web;
+ /* Trigger BF after k failed beacons */
+ u8 num_of_failed_beacons_rx_to_trigger_bf;
+ /* Trigger SOB after k successful beacons */
+ u8 num_of_successful_beacons_rx_to_trigger_sob;
+} __packed;
+
+/* WMI_PS_MID_CFG_CMDID
+ *
+ * Configure Power Save parameters of a specific MID.
+ * These parameters are relevant for the specific BSS this MID belongs to.
+ *
+ * Returned event:
+ * - WMI_PS_MID_CFG_EVENTID
+ */
+struct wmi_ps_mid_cfg_cmd {
+ /* MAC ID */
+ u8 mid;
+ /* mid PS configuration to be applied */
+ struct wmi_ps_mid_cfg ps_mid_cfg;
+} __packed;
+
+/* WMI_PS_MID_CFG_EVENTID */
+struct wmi_ps_mid_cfg_event {
+ /* MAC ID */
+ u8 mid;
+ /* alignment to 32b */
+ u8 reserved[3];
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+/* WMI_PS_MID_CFG_READ_CMDID
+ *
+ * request to retrieve Power Save configuration of mid
+ * (WMI_PS_MID_CFG_CMD params)
+ *
+ * Returned event:
+ * - WMI_PS_MID_CFG_READ_EVENTID
+ */
+struct wmi_ps_mid_cfg_read_cmd {
+ /* MAC ID */
+ u8 mid;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PS_MID_CFG_READ_EVENTID */
+struct wmi_ps_mid_cfg_read_event {
+ /* MAC ID */
+ u8 mid;
+ /* Retrieved MID Power Save configuration(WMI_PS_MID_CFG_CMD params) */
+ struct wmi_ps_mid_cfg mid_ps_cfg;
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+#define WMI_AOA_MAX_DATA_SIZE (128)
+
+enum wmi_aoa_meas_status {
+ WMI_AOA_MEAS_SUCCESS = 0x00,
+ WMI_AOA_MEAS_PEER_INCAPABLE = 0x01,
+ WMI_AOA_MEAS_FAILURE = 0x02,
+};
+
+/* WMI_AOA_MEAS_EVENTID */
+struct wmi_aoa_meas_event {
+ u8 mac_addr[WMI_MAC_LEN];
+ /* channels IDs:
+ * 0 - 58320 MHz
+ * 1 - 60480 MHz
+ * 2 - 62640 MHz
+ */
+ u8 channel;
+ /* enum wmi_aoa_meas_type */
+ u8 aoa_meas_type;
+ /* Measurments are from RFs, defined by the mask */
+ __le32 meas_rf_mask;
+ /* enum wmi_aoa_meas_status */
+ u8 meas_status;
+ u8 reserved;
+ /* Length of meas_data in bytes */
+ __le16 length;
+ u8 meas_data[WMI_AOA_MAX_DATA_SIZE];
+} __packed;
+
+/* WMI_TOF_GET_CAPABILITIES_EVENTID */
+struct wmi_tof_get_capabilities_event {
+ u8 ftm_capability;
+ /* maximum supported number of destination to start TOF */
+ u8 max_num_of_dest;
+ /* maximum supported number of measurements per burst */
+ u8 max_num_of_meas_per_burst;
+ u8 reserved;
+ /* maximum supported multi bursts */
+ __le16 max_multi_bursts_sessions;
+ /* maximum supported FTM burst duration , wmi_tof_burst_duration_e */
+ __le16 max_ftm_burst_duration;
+ /* AOA supported types */
+ __le32 aoa_supported_types;
+} __packed;
+
+enum wmi_tof_session_end_status {
+ WMI_TOF_SESSION_END_NO_ERROR = 0x00,
+ WMI_TOF_SESSION_END_FAIL = 0x01,
+ WMI_TOF_SESSION_END_PARAMS_ERROR = 0x02,
+ WMI_TOF_SESSION_END_ABORTED = 0x03,
+};
+
+/* WMI_TOF_SESSION_END_EVENTID */
+struct wmi_tof_session_end_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* wmi_tof_session_end_status_e */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* Responder FTM Results */
+struct wmi_responder_ftm_res {
+ u8 t1[6];
+ u8 t2[6];
+ u8 t3[6];
+ u8 t4[6];
+ __le16 tod_err;
+ __le16 toa_err;
+ __le16 tod_err_initiator;
+ __le16 toa_err_initiator;
+} __packed;
+
+enum wmi_tof_ftm_per_dest_res_status {
+ WMI_PER_DEST_RES_NO_ERROR = 0x00,
+ WMI_PER_DEST_RES_TX_RX_FAIL = 0x01,
+ WMI_PER_DEST_RES_PARAM_DONT_MATCH = 0x02,
+};
+
+enum wmi_tof_ftm_per_dest_res_flags {
+ WMI_PER_DEST_RES_REQ_START = 0x01,
+ WMI_PER_DEST_RES_BURST_REPORT_END = 0x02,
+ WMI_PER_DEST_RES_REQ_END = 0x04,
+ WMI_PER_DEST_RES_PARAM_UPDATE = 0x08,
+};
+
+/* WMI_TOF_FTM_PER_DEST_RES_EVENTID */
+struct wmi_tof_ftm_per_dest_res_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* destination MAC address */
+ u8 dst_mac[WMI_MAC_LEN];
+ /* wmi_tof_ftm_per_dest_res_flags_e */
+ u8 flags;
+ /* wmi_tof_ftm_per_dest_res_status_e */
+ u8 status;
+ /* responder ASAP */
+ u8 responder_asap;
+ /* responder number of FTM per burst */
+ u8 responder_num_ftm_per_burst;
+ /* responder number of FTM burst exponent */
+ u8 responder_num_ftm_bursts_exp;
+ /* responder burst duration ,wmi_tof_burst_duration_e */
+ u8 responder_burst_duration;
+ /* responder burst period, indicate interval between two consecutive
+ * burst instances, in units of 100 ms
+ */
+ __le16 responder_burst_period;
+ /* receive burst counter */
+ __le16 bursts_cnt;
+ /* tsf of responder start burst */
+ __le32 tsf_sync;
+ /* actual received ftm per burst */
+ u8 actual_ftm_per_burst;
+ u8 reserved0[7];
+ struct wmi_responder_ftm_res responder_ftm_res[0];
+} __packed;
+
+enum wmi_tof_channel_info_type {
+ WMI_TOF_CHANNEL_INFO_AOA = 0x00,
+ WMI_TOF_CHANNEL_INFO_LCI = 0x01,
+ WMI_TOF_CHANNEL_INFO_LCR = 0x02,
+ WMI_TOF_CHANNEL_INFO_VENDOR_SPECIFIC = 0x03,
+ WMI_TOF_CHANNEL_INFO_CIR = 0x04,
+ WMI_TOF_CHANNEL_INFO_RSSI = 0x05,
+ WMI_TOF_CHANNEL_INFO_SNR = 0x06,
+ WMI_TOF_CHANNEL_INFO_DEBUG = 0x07,
+};
+
+/* WMI_TOF_CHANNEL_INFO_EVENTID */
+struct wmi_tof_channel_info_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* destination MAC address */
+ u8 dst_mac[WMI_MAC_LEN];
+ /* wmi_tof_channel_info_type_e */
+ u8 type;
+ /* data report length */
+ u8 len;
+ /* data report payload */
+ u8 report[0];
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c
index b4bcd94aff6c..77046384dd80 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43/debugfs.c
@@ -524,7 +524,8 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
goto out_unlock;
}
- dfops = container_of(file->f_op, struct b43_debugfs_fops, fops);
+ dfops = container_of(debugfs_real_fops(file),
+ struct b43_debugfs_fops, fops);
if (!dfops->read) {
err = -ENOSYS;
goto out_unlock;
@@ -585,7 +586,8 @@ static ssize_t b43_debugfs_write(struct file *file,
goto out_unlock;
}
- dfops = container_of(file->f_op, struct b43_debugfs_fops, fops);
+ dfops = container_of(debugfs_real_fops(file),
+ struct b43_debugfs_fops, fops);
if (!dfops->write) {
err = -ENOSYS;
goto out_unlock;
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
index 090910ea259e..82ef56ed7ca1 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
@@ -221,7 +221,8 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf,
goto out_unlock;
}
- dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops);
+ dfops = container_of(debugfs_real_fops(file),
+ struct b43legacy_debugfs_fops, fops);
if (!dfops->read) {
err = -ENOSYS;
goto out_unlock;
@@ -287,7 +288,8 @@ static ssize_t b43legacy_debugfs_write(struct file *file,
goto out_unlock;
}
- dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops);
+ dfops = container_of(debugfs_real_fops(file),
+ struct b43legacy_debugfs_fops, fops);
if (!dfops->write) {
err = -ENOSYS;
goto out_unlock;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index d1bc51f92686..038a960c5104 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -194,7 +194,7 @@ retry:
}
/* Check info buffer */
- info = (void *)&msg[1];
+ info = (void *)&bcdc->buf[0];
/* Copy info buffer */
if (buf) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index f549c25608d6..72139b579b18 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -420,7 +420,7 @@ u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
{
- u32 data;
+ u32 data = 0;
int retval;
brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
@@ -1101,6 +1101,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b8aec5e5ef93..b777e1b2f87a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1595,15 +1595,9 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
val = 1;
brcmf_dbg(CONN, "shared key\n");
break;
- case NL80211_AUTHTYPE_AUTOMATIC:
- val = 2;
- brcmf_dbg(CONN, "automatic\n");
- break;
- case NL80211_AUTHTYPE_NETWORK_EAP:
- brcmf_dbg(CONN, "network eap\n");
default:
val = 2;
- brcmf_err("invalid auth type (%d)\n", sme->auth_type);
+ brcmf_dbg(CONN, "automatic, auth type (%d)\n", sme->auth_type);
break;
}
@@ -2533,7 +2527,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
WL_BSS_INFO_MAX);
if (err) {
brcmf_err("Failed to get bss info (%d)\n", err);
- return;
+ goto out_kfree;
}
si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period);
@@ -2545,6 +2539,9 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
if (capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
+
+out_kfree:
+ kfree(buf);
}
static s32
@@ -3703,6 +3700,7 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
struct cfg80211_wowlan *wowl)
{
u32 wowl_config;
+ struct brcmf_wowl_wakeind_le wowl_wakeind;
u32 i;
brcmf_dbg(TRACE, "Suspend, wowl config.\n");
@@ -3744,8 +3742,9 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
if (!test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
wowl_config |= BRCMF_WOWL_UNASSOC;
- brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear",
- sizeof(struct brcmf_wowl_wakeind_le));
+ memcpy(&wowl_wakeind, "clear", 6);
+ brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", &wowl_wakeind,
+ sizeof(wowl_wakeind));
brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config);
brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1);
brcmf_bus_wowl_config(cfg->pub->bus_if, true);
@@ -3884,11 +3883,11 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
if (!check_vif_up(ifp->vif))
return -EIO;
- brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", &pmksa->bssid);
+ brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", pmksa->bssid);
npmk = le32_to_cpu(cfg->pmk_list.npmk);
for (i = 0; i < npmk; i++)
- if (!memcmp(&pmksa->bssid, &pmk[i].bssid, ETH_ALEN))
+ if (!memcmp(pmksa->bssid, pmk[i].bssid, ETH_ALEN))
break;
if ((npmk > 0) && (i < npmk)) {
@@ -4502,6 +4501,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
u16 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
bool mbss;
int is_11d;
+ bool supports_11d;
brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
settings->chandef.chan->hw_value,
@@ -4514,11 +4514,16 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
mbss = ifp->vif->mbss;
/* store current 11d setting */
- brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, &ifp->vif->is_11d);
- country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
- settings->beacon.tail_len,
- WLAN_EID_COUNTRY);
- is_11d = country_ie ? 1 : 0;
+ if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
+ &ifp->vif->is_11d)) {
+ supports_11d = false;
+ } else {
+ country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
+ settings->beacon.tail_len,
+ WLAN_EID_COUNTRY);
+ is_11d = country_ie ? 1 : 0;
+ supports_11d = true;
+ }
memset(&ssid_le, 0, sizeof(ssid_le));
if (settings->ssid == NULL || settings->ssid_len == 0) {
@@ -4577,7 +4582,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
/* Parameters shared by all radio interfaces */
if (!mbss) {
- if (is_11d != ifp->vif->is_11d) {
+ if ((supports_11d) && (is_11d != ifp->vif->is_11d)) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
is_11d);
if (err < 0) {
@@ -4619,7 +4624,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_err("SET INFRA error %d\n", err);
goto exit;
}
- } else if (WARN_ON(is_11d != ifp->vif->is_11d)) {
+ } else if (WARN_ON(supports_11d && (is_11d != ifp->vif->is_11d))) {
/* Multiple-BSS should use same 11d configuration */
err = -EINVAL;
goto exit;
@@ -4753,11 +4758,8 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
brcmf_err("setting INFRA mode failed %d\n", err);
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
brcmf_fil_iovar_int_set(ifp, "mbss", 0);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
- ifp->vif->is_11d);
- if (err < 0)
- brcmf_err("restoring REGULATORY setting failed %d\n",
- err);
+ brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
+ ifp->vif->is_11d);
/* Bring device back up so it can be used again */
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 65e8c8766441..5eaac13e2317 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -136,27 +136,6 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
err);
}
-static void
-_brcmf_set_mac_address(struct work_struct *work)
-{
- struct brcmf_if *ifp;
- s32 err;
-
- ifp = container_of(work, struct brcmf_if, setmacaddr_work);
-
- brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
-
- err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
- ETH_ALEN);
- if (err < 0) {
- brcmf_err("Setting cur_etheraddr failed, %d\n", err);
- } else {
- brcmf_dbg(TRACE, "MAC address updated to %pM\n",
- ifp->mac_addr);
- memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
- }
-}
-
#if IS_ENABLED(CONFIG_IPV6)
static void _brcmf_update_ndtable(struct work_struct *work)
{
@@ -190,10 +169,20 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct sockaddr *sa = (struct sockaddr *)addr;
+ int err;
- memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
- schedule_work(&ifp->setmacaddr_work);
- return 0;
+ brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
+
+ err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", sa->sa_data,
+ ETH_ALEN);
+ if (err < 0) {
+ brcmf_err("Setting cur_etheraddr failed, %d\n", err);
+ } else {
+ brcmf_dbg(TRACE, "updated to %pM\n", sa->sa_data);
+ memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN);
+ memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+ }
+ return err;
}
static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
@@ -519,13 +508,9 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
ndev->needed_headroom += drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
- drvr->rxsz = ndev->mtu + ndev->hard_header_len +
- drvr->hdrlen;
-
/* set the mac address */
memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
- INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
INIT_WORK(&ifp->ndoffload_work, _brcmf_update_ndtable);
@@ -730,7 +715,6 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
}
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
- cancel_work_sync(&ifp->setmacaddr_work);
cancel_work_sync(&ifp->multicast_work);
cancel_work_sync(&ifp->ndoffload_work);
}
@@ -886,9 +870,12 @@ static int brcmf_inet6addr_changed(struct notifier_block *nb,
}
break;
case NETDEV_DOWN:
- if (i < NDOL_MAX_ENTRIES)
- for (; i < ifp->ipv6addr_idx; i++)
+ if (i < NDOL_MAX_ENTRIES) {
+ for (; i < ifp->ipv6addr_idx - 1; i++)
table[i] = table[i + 1];
+ memset(&table[i], 0, sizeof(table[i]));
+ ifp->ipv6addr_idx--;
+ }
break;
default:
break;
@@ -1061,8 +1048,7 @@ fail:
brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr);
}
- if (ifp)
- brcmf_net_detach(ifp->ndev, false);
+ brcmf_net_detach(ifp->ndev, false);
if (p2p_ifp)
brcmf_net_detach(p2p_ifp->ndev, false);
drvr->iflist[0] = NULL;
@@ -1169,7 +1155,8 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
!brcmf_get_pend_8021x_cnt(ifp),
MAX_WAIT_FOR_8021X_TX);
- WARN_ON(!err);
+ if (!err)
+ brcmf_err("Timed out waiting for no pending 802.1x packets\n");
return !err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 8fa34cad5a96..c94dcab260d0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -112,15 +112,11 @@ struct brcmf_pub {
/* Internal brcmf items */
uint hdrlen; /* Total BRCMF header length (proto + bus) */
- uint rxsz; /* Rx buffer size bus module should use */
/* Dongle media info */
char fwver[BRCMF_DRIVER_FIRMWARE_VERSION_LEN];
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
- /* Multicast data packets sent to dongle */
- unsigned long tx_multicast;
-
struct mac_address addresses[BRCMF_MAX_IFS];
struct brcmf_if *iflist[BRCMF_MAX_IFS];
@@ -176,7 +172,6 @@ enum brcmf_netif_stop_reason {
* @vif: points to cfg80211 specific interface information.
* @ndev: associated network device.
* @stats: interface specific network statistics.
- * @setmacaddr_work: worker object for setting mac address.
* @multicast_work: worker object for multicast provisioning.
* @ndoffload_work: worker object for neighbor discovery offload configuration.
* @fws_desc: interface specific firmware-signalling descriptor.
@@ -193,7 +188,6 @@ struct brcmf_if {
struct brcmf_cfg80211_vif *vif;
struct net_device *ndev;
struct net_device_stats stats;
- struct work_struct setmacaddr_work;
struct work_struct multicast_work;
struct work_struct ndoffload_work;
struct brcmf_fws_mac_descriptor *fws_desc;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
index 7e269f9aa607..d0b738da2458 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
@@ -234,13 +234,20 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
{
+ struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
struct brcmf_flowring_ring *ring;
+ struct brcmf_if *ifp;
u16 hash_idx;
+ u8 ifidx;
struct sk_buff *skb;
ring = flow->rings[flowid];
if (!ring)
return;
+
+ ifidx = brcmf_flowring_ifidx_get(flow, flowid);
+ ifp = brcmf_get_ifp(bus_if->drvr, ifidx);
+
brcmf_flowring_block(flow, flowid, false);
hash_idx = ring->hash_id;
flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
@@ -249,7 +256,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
skb = skb_dequeue(&ring->skblist);
while (skb) {
- brcmu_pkt_buf_free_skb(skb);
+ brcmf_txfinalize(ifp, skb, false);
skb = skb_dequeue(&ring->skblist);
}
@@ -495,14 +502,18 @@ void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
} else {
search = flow->tdls_entry;
if (memcmp(search->mac, peer, ETH_ALEN) == 0)
- return;
+ goto free_entry;
while (search->next) {
search = search->next;
if (memcmp(search->mac, peer, ETH_ALEN) == 0)
- return;
+ goto free_entry;
}
search->next = tdls_entry;
}
flow->tdls_active = true;
+ return;
+
+free_entry:
+ kfree(tdls_entry);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 9f9024a7bd64..a190f535efc9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2104,8 +2104,6 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
if ((skb->priority == 0) || (skb->priority > 7))
skb->priority = cfg80211_classify8021d(skb, NULL);
- drvr->tx_multicast += !!multicast;
-
if (fws->avoid_queueing) {
rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
if (rc < 0)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 68ab3ac15650..b892dac70f4b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -313,6 +313,7 @@ struct rte_console {
#define KSO_WAIT_US 50
#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
+#define BRCMF_SDIO_MAX_ACCESS_ERRORS 5
/*
* Conversion of 802.1D priority to precedence level
@@ -677,6 +678,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
{
u8 wr_val = 0, rd_val, cmp_val, bmask;
int err = 0;
+ int err_cnt = 0;
int try_cnt = 0;
brcmf_dbg(TRACE, "Enter: on=%d\n", on);
@@ -712,9 +714,14 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
*/
rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
&err);
- if (((rd_val & bmask) == cmp_val) && !err)
+ if (!err) {
+ if ((rd_val & bmask) == cmp_val)
+ break;
+ err_cnt = 0;
+ }
+ /* bail out upon subsequent access errors */
+ if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS))
break;
-
udelay(KSO_WAIT_US);
brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
wr_val, &err);
@@ -3757,7 +3764,8 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
u32 val, rev;
val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
- if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+ if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 ||
+ sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) &&
addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
if (rev >= 2) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
index a10f35c5eb3d..fe6755944b7b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
@@ -19,6 +19,7 @@
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "tracepoint.h"
+#include "debug.h"
void __brcmf_err(const char *func, const char *fmt, ...)
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 98b15a9a2779..2f978a39b58a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1099,15 +1099,11 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
devinfo->tx_freecount = ntxq;
devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!devinfo->ctl_urb) {
- brcmf_err("usb_alloc_urb (ctl) failed\n");
+ if (!devinfo->ctl_urb)
goto error;
- }
devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!devinfo->bulk_urb) {
- brcmf_err("usb_alloc_urb (bulk) failed\n");
+ if (!devinfo->bulk_urb)
goto error;
- }
return &devinfo->bus_pub;
@@ -1462,11 +1458,15 @@ static int brcmf_usb_reset_resume(struct usb_interface *intf)
#define BRCMF_USB_DEVICE(dev_id) \
{ USB_DEVICE(BRCM_USB_VENDOR_ID_BROADCOM, dev_id) }
+#define LINKSYS_USB_DEVICE(dev_id) \
+ { USB_DEVICE(BRCM_USB_VENDOR_ID_LINKSYS, dev_id) }
+
static struct usb_device_id brcmf_usb_devid_table[] = {
BRCMF_USB_DEVICE(BRCM_USB_43143_DEVICE_ID),
BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID),
BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID),
BRCMF_USB_DEVICE(BRCM_USB_43569_DEVICE_ID),
+ LINKSYS_USB_DEVICE(BRCM_USB_43235_LINKSYS_DEVICE_ID),
{ USB_DEVICE(BRCM_USB_VENDOR_ID_LG, BRCM_USB_43242_LG_DEVICE_ID) },
/* special entry for device with firmware loaded and running */
BRCMF_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID),
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index 3cc42bef6245..d0407d9ad782 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -22,6 +22,7 @@
#define BRCM_USB_VENDOR_ID_BROADCOM 0x0a5c
#define BRCM_USB_VENDOR_ID_LG 0x043e
+#define BRCM_USB_VENDOR_ID_LINKSYS 0x13b1
#define BRCM_PCIE_VENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
/* Chipcommon Core Chip IDs */
@@ -58,6 +59,7 @@
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
+#define BRCM_USB_43235_LINKSYS_DEVICE_ID 0x0039
#define BRCM_USB_43236_DEVICE_ID 0xbd17
#define BRCM_USB_43242_DEVICE_ID 0xbd1f
#define BRCM_USB_43242_LG_DEVICE_ID 0x3101
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 209dc9988455..4db327a95414 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -2671,7 +2671,7 @@ const struct il_ops il3945_ops = {
.send_led_cmd = il3945_send_led_cmd,
};
-static struct il_cfg il3945_bg_cfg = {
+static const struct il_cfg il3945_bg_cfg = {
.name = "3945BG",
.fw_name_pre = IL3945_FW_PRE,
.ucode_api_max = IL3945_UCODE_API_MAX,
@@ -2700,7 +2700,7 @@ static struct il_cfg il3945_bg_cfg = {
},
};
-static struct il_cfg il3945_abg_cfg = {
+static const struct il_cfg il3945_abg_cfg = {
.name = "3945ABG",
.fw_name_pre = IL3945_FW_PRE,
.ucode_api_max = IL3945_UCODE_API_MAX,
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 726ede391cb9..3bba521d2cd9 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -1320,7 +1320,7 @@ struct il_priv {
u64 timestamp;
union {
-#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+#if IS_ENABLED(CONFIG_IWL3945)
struct {
void *shared_virt;
dma_addr_t shared_phys;
@@ -1351,7 +1351,7 @@ struct il_priv {
} _3945;
#endif
-#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+#if IS_ENABLED(CONFIG_IWL4965)
struct {
struct il_rx_phy_res last_phy_res;
bool last_phy_res_valid;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
index b662cf35b033..c7509c51e9d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -46,15 +46,6 @@
*
******************************************************************************/
-static inline const struct fw_img *
-iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
-{
- if (ucode_type >= IWL_UCODE_TYPE_MAX)
- return NULL;
-
- return &priv->fw->img[ucode_type];
-}
-
/*
* Calibration
*/
@@ -330,7 +321,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwl_ucode_type old_type;
static const u16 alive_cmd[] = { REPLY_ALIVE };
- fw = iwl_get_ucode_image(priv, ucode_type);
+ fw = iwl_get_ucode_image(priv->fw, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index 64690c14ff4d..d4b73dedf89b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -73,13 +73,13 @@
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17
-#define IWL7265D_UCODE_API_MAX 24
-#define IWL3168_UCODE_API_MAX 24
+#define IWL7265D_UCODE_API_MAX 26
+#define IWL3168_UCODE_API_MAX 26
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 16
-#define IWL7265_UCODE_API_MIN 16
-#define IWL7265D_UCODE_API_MIN 16
+#define IWL7260_UCODE_API_MIN 17
+#define IWL7265_UCODE_API_MIN 17
+#define IWL7265D_UCODE_API_MIN 17
#define IWL3168_UCODE_API_MIN 20
/* NVM versions */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 6c6725e808d4..d02ca1491d16 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -70,11 +70,11 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 24
-#define IWL8265_UCODE_API_MAX 24
+#define IWL8000_UCODE_API_MAX 26
+#define IWL8265_UCODE_API_MAX 26
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 16
+#define IWL8000_UCODE_API_MIN 17
#define IWL8265_UCODE_API_MIN 20
/* NVM versions */
@@ -212,6 +212,17 @@ const struct iwl_cfg iwl8265_2ac_cfg = {
.vht_mu_mimo_supported = true,
};
+const struct iwl_cfg iwl8275_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 8275",
+ .fw_name_pre = IWL8265_FW_PRE,
+ IWL_DEVICE_8265,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .vht_mu_mimo_supported = true,
+};
+
const struct iwl_cfg iwl4165_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 4165",
.fw_name_pre = IWL8000_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index fbaf705f3fa7..ff850410d897 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -55,10 +55,10 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 24
+#define IWL9000_UCODE_API_MAX 26
/* Lowest firmware API version supported */
-#define IWL9000_UCODE_API_MIN 16
+#define IWL9000_UCODE_API_MIN 17
/* NVM versions */
#define IWL9000_NVM_VERSION 0x0a1d
@@ -72,15 +72,15 @@
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
-#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
+#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
-#define IWL9260LC_FW_PRE "iwlwifi-9260-th-a0-lc-a0-"
+#define IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
#define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260_MODULE_FIRMWARE(api) \
IWL9260_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL9260LC_MODULE_FIRMWARE(api) \
- IWL9260LC_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9000LC_MODULE_FIRMWARE(api) \
+ IWL9000LC_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
@@ -146,41 +146,73 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.mac_addr_from_csr = true, \
.rf_id = true
+const struct iwl_cfg iwl9160_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9160",
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
const struct iwl_cfg iwl9260_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .name = "Intel(R) Dual Band Wireless AC 9260",
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9270_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9270",
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9460",
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+};
+
+const struct iwl_cfg iwl9560_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9560",
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
};
/*
* TODO the struct below is for internal testing only this should be
* removed by EO 2016~
*/
-const struct iwl_cfg iwl9260lc_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9260LC_FW_PRE,
- IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwl5165_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 5165",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .integrated = true,
+const struct iwl_cfg iwl9000lc_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9000",
+ .fw_name_pre = IWL9000LC_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
};
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9260LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
index 4d78232c8afe..ea1618525878 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 24
+#define IWL_A000_UCODE_API_MAX 26
/* Lowest firmware API version supported */
#define IWL_A000_UCODE_API_MIN 24
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 423b23320d4f..2660cc4b9f8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -359,7 +359,6 @@ struct iwl_cfg {
high_temp:1,
mac_addr_from_csr:1,
lp_xtal_workaround:1,
- no_power_up_nic_in_init:1,
disable_dummy_notification:1,
apmg_not_supported:1,
mq_rx_supported:1,
@@ -445,13 +444,17 @@ extern const struct iwl_cfg iwl7265d_n_cfg;
extern const struct iwl_cfg iwl8260_2n_cfg;
extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8265_2ac_cfg;
+extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
+extern const struct iwl_cfg iwl9000lc_2ac_cfg;
+extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
-extern const struct iwl_cfg iwl9260lc_2ac_cfg;
-extern const struct iwl_cfg iwl5165_2ac_cfg;
+extern const struct iwl_cfg iwl9270_2ac_cfg;
+extern const struct iwl_cfg iwl9460_2ac_cfg;
+extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg;
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 871ad02fdb17..d73e9d436027 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -589,6 +589,8 @@ enum dtd_diode_reg {
* Causes for the FH register interrupts
*/
enum msix_fh_int_causes {
+ MSIX_FH_INT_CAUSES_Q0 = BIT(0),
+ MSIX_FH_INT_CAUSES_Q1 = BIT(1),
MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16),
MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17),
MSIX_FH_INT_CAUSES_S2D = BIT(19),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
index 1d9dd153ef1c..50510fb6ab8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
@@ -33,9 +33,6 @@
#define CREATE_TRACE_POINTS
#include "iwl-devtrace.h"
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 1d6f5d21a663..33ef5372d195 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -77,7 +77,6 @@
*/
#define FH_MEM_LOWER_BOUND (0x1000)
#define FH_MEM_UPPER_BOUND (0x2000)
-#define TFH_MEM_LOWER_BOUND (0xA06000)
/**
* Keep-Warm (KW) buffer base address.
@@ -120,7 +119,7 @@
#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
/* a000 TFD table address, 64 bit */
-#define TFH_TFDQ_CBB_TABLE (TFH_MEM_LOWER_BOUND + 0x1C00)
+#define TFH_TFDQ_CBB_TABLE (0x1C00)
/* Find TFD CB base pointer for given queue */
static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
@@ -156,7 +155,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* In case of DRAM read address which is not aligned to 128B, the TFH will
* enable transfer size which doesn't cross 64B DRAM address boundary.
*/
-#define TFH_TRANSFER_MODE (TFH_MEM_LOWER_BOUND + 0x1F40)
+#define TFH_TRANSFER_MODE (0x1F40)
#define TFH_TRANSFER_MAX_PENDING_REQ 0xc
#define TFH_CHUNK_SIZE_128 BIT(8)
#define TFH_CHUNK_SPLIT_MODE BIT(10)
@@ -167,7 +166,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* the start of the TFD first TB.
* In case of a DRAM Tx CMD update the TFH will update PN and Key ID
*/
-#define TFH_TXCMD_UPDATE_CFG (TFH_MEM_LOWER_BOUND + 0x1F48)
+#define TFH_TXCMD_UPDATE_CFG (0x1F48)
/*
* Controls TX DMA operation
*
@@ -181,22 +180,22 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* set to 1 - interrupt is sent to the driver
* Bit 0: Indicates the snoop configuration
*/
-#define TFH_SRV_DMA_CHNL0_CTRL (TFH_MEM_LOWER_BOUND + 0x1F60)
+#define TFH_SRV_DMA_CHNL0_CTRL (0x1F60)
#define TFH_SRV_DMA_SNOOP BIT(0)
#define TFH_SRV_DMA_TO_DRIVER BIT(24)
#define TFH_SRV_DMA_START BIT(31)
/* Defines the DMA SRAM write start address to transfer a data block */
-#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F64)
+#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64)
/* Defines the 64bits DRAM start address to read the DMA data block from */
-#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F68)
+#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68)
/*
* Defines the number of bytes to transfer from DRAM to SRAM.
* Note that this register may be configured with non-dword aligned size.
*/
-#define TFH_SRV_DMA_CHNL0_BC (TFH_MEM_LOWER_BOUND + 0x1F70)
+#define TFH_SRV_DMA_CHNL0_BC (0x1F70)
/**
* Rx SRAM Control and Status Registers (RSCSR)
@@ -644,6 +643,7 @@ struct iwl_rb_status {
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
+#define IWL_TFH_NUM_TBS 25
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
@@ -665,25 +665,29 @@ struct iwl_tfd_tb {
} __packed;
/**
- * struct iwl_tfd
+ * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor
*
- * Transmit Frame Descriptor (TFD)
- *
- * @ __reserved1[3] reserved
- * @ num_tbs 0-4 number of active tbs
- * 5 reserved
- * 6-7 padding (not used)
- * @ tbs[20] transmit frame buffer descriptors
- * @ __pad padding
+ * This structure contains dma address and length of transmission address
*
+ * @tb_len length of the tx buffer
+ * @addr 64 bits dma address
+ */
+struct iwl_tfh_tb {
+ __le16 tb_len;
+ __le64 addr;
+} __packed;
+
+/**
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
- * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ * contiguous 256 TFDs.
+ * For pre a000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For a000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
*
* Driver must indicate the physical address of the base of each
* circular buffer via the FH_MEM_CBBC_QUEUE registers.
*
- * Each TFD contains pointer/size information for up to 20 data buffers
+ * Each TFD contains pointer/size information for up to 20 / 25 data buffers
* in host DRAM. These buffers collectively contain the (one) frame described
* by the TFD. Each buffer must be a single contiguous block of memory within
* itself, but buffers may be scattered in host DRAM. Each buffer has max size
@@ -692,6 +696,16 @@ struct iwl_tfd_tb {
*
* A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
*/
+
+/**
+ * struct iwl_tfd - Transmit Frame Descriptor (TFD)
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ * 5 reserved
+ * 6-7 padding (not used)
+ * @ tbs[20] transmit frame buffer descriptors
+ * @ __pad padding
+ */
struct iwl_tfd {
u8 __reserved1[3];
u8 num_tbs;
@@ -699,6 +713,19 @@ struct iwl_tfd {
__le32 __pad;
} __packed;
+/**
+ * struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD)
+ * @ num_tbs 0-4 number of active tbs
+ * 5 -15 reserved
+ * @ tbs[25] transmit frame buffer descriptors
+ * @ __pad padding
+ */
+struct iwl_tfh_tfd {
+ __le16 num_tbs;
+ struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS];
+ __le32 __pad;
+} __packed;
+
/* Keep Warm Size */
#define IWL_KW_SIZE 0x1000 /* 4k */
@@ -707,8 +734,13 @@ struct iwl_tfd {
/**
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
+ * For devices up to a000:
+ * @tfd_offset 0-12 - tx command byte count
+ * 12-16 - station index
+ * For a000 and on:
* @tfd_offset 0-12 - tx command byte count
- * 12-16 - station index
+ * 12-13 - number of 64 byte chunks
+ * 14-16 - reserved
*/
struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 1b1e045f8907..ceec5ca2b1ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -199,8 +199,6 @@ struct iwl_ucode_capa {
* @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID,
* treats good CRC threshold as a boolean
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
- * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
- * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
* @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
* offload profile config command.
@@ -210,36 +208,24 @@ struct iwl_ucode_capa {
* from the probe request template.
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
- * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
- * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
- * P2P client interfaces simultaneously if they are in different bindings.
- * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
- * P2P client interfaces simultaneously if they are in same bindings.
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
- * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
- IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
- IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
- IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
- IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
@@ -249,24 +235,21 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
- * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
* @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
- * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
- * instead of 3.
- * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
- * (command version 3) that supports per-chain limits
+ * @IWL_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan
+ * iteration complete notification, and the timestamp reported for RX
+ * received during scan, are reported in TSF of the mac specified in the
+ * scan request.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
- IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
- IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
- IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
- IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27,
+ IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
+ IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
index 74ea68d1063c..5f229556339a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
@@ -329,4 +329,13 @@ iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
return conf_tlv->usniffer;
}
+static inline const struct fw_img *
+iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
+{
+ if (ucode_type >= IWL_UCODE_TYPE_MAX)
+ return NULL;
+
+ return &fw->img[ucode_type];
+}
+
#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 92c8b5f9a9cb..a9f69fdd170b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -267,7 +267,7 @@ static const char *get_rfh_string(int cmd)
IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i);
IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i);
IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i);
- };
+ }
switch (cmd) {
IWL_CMD(RFH_RXF_DMA_CFG);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
index 8aa1f2b7fdfc..88f260db3744 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
@@ -99,8 +99,12 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
continue;
for (i = 0; i < w->n_cmds; i++) {
- if (w->cmds[i] ==
- WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+ u16 rec_id = WIDE_ID(pkt->hdr.group_id,
+ pkt->hdr.cmd);
+
+ if (w->cmds[i] == rec_id ||
+ (!iwl_cmd_groupid(w->cmds[i]) &&
+ DEF_ID(w->cmds[i]) == rec_id)) {
found = true;
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 43f8f7d45ddb..3bd6fc1b76d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -67,6 +67,7 @@
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
@@ -564,11 +565,16 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
__le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
__le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
- /* If OEM did not fuse address - get it from OTP */
- if (!mac_addr0 && !mac_addr1) {
- mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
- mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
- }
+ iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+ /*
+ * If the OEM fused a valid address, use it instead of the one in the
+ * OTP
+ */
+ if (is_valid_ether_addr(data->hw_addr))
+ return;
+
+ mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
+ mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
}
@@ -899,3 +905,91 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
return regd;
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
+
+#ifdef CONFIG_ACPI
+#define WRDD_METHOD "WRDD"
+#define WRDD_WIFI (0x07)
+#define WRDD_WIGIG (0x10)
+
+static u32 iwl_wrdd_get_mcc(struct device *dev, union acpi_object *wrdd)
+{
+ union acpi_object *mcc_pkg, *domain_type, *mcc_value;
+ u32 i;
+
+ if (wrdd->type != ACPI_TYPE_PACKAGE ||
+ wrdd->package.count < 2 ||
+ wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ wrdd->package.elements[0].integer.value != 0) {
+ IWL_DEBUG_EEPROM(dev, "Unsupported wrdd structure\n");
+ return 0;
+ }
+
+ for (i = 1 ; i < wrdd->package.count ; ++i) {
+ mcc_pkg = &wrdd->package.elements[i];
+
+ if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
+ mcc_pkg->package.count < 2 ||
+ mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ mcc_pkg = NULL;
+ continue;
+ }
+
+ domain_type = &mcc_pkg->package.elements[0];
+ if (domain_type->integer.value == WRDD_WIFI)
+ break;
+
+ mcc_pkg = NULL;
+ }
+
+ if (mcc_pkg) {
+ mcc_value = &mcc_pkg->package.elements[1];
+ return mcc_value->integer.value;
+ }
+
+ return 0;
+}
+
+int iwl_get_bios_mcc(struct device *dev, char *mcc)
+{
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ u32 mcc_val;
+
+ root_handle = ACPI_HANDLE(dev);
+ if (!root_handle) {
+ IWL_DEBUG_EEPROM(dev,
+ "Could not retrieve root port ACPI handle\n");
+ return -ENOENT;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(root_handle, (acpi_string)WRDD_METHOD,
+ &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_EEPROM(dev, "WRD method not found\n");
+ return -ENOENT;
+ }
+
+ /* Call WRDD with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_EEPROM(dev, "WRDC invocation failed (0x%x)\n",
+ status);
+ return -ENOENT;
+ }
+
+ mcc_val = iwl_wrdd_get_mcc(dev, wrdd.pointer);
+ kfree(wrdd.pointer);
+ if (!mcc_val)
+ return -ENOENT;
+
+ mcc[0] = (mcc_val >> 8) & 0xff;
+ mcc[1] = mcc_val & 0xff;
+ mcc[2] = '\0';
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_get_bios_mcc);
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index d704d52aa7ec..7249e5b403f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -5,7 +5,8 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -93,4 +94,21 @@ struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc);
+#ifdef CONFIG_ACPI
+/**
+ * iwl_get_bios_mcc - read MCC from BIOS, if available
+ *
+ * @dev: the struct device
+ * @mcc: output buffer (3 bytes) that will get the MCC
+ *
+ * This function tries to read the current MCC from ACPI if available.
+ */
+int iwl_get_bios_mcc(struct device *dev, char *mcc);
+#else
+static inline int iwl_get_bios_mcc(struct device *dev, char *mcc)
+{
+ return -ENOENT;
+}
+#endif
+
#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
index 7beba9ae5617..2893826d7d2b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
@@ -110,7 +110,7 @@ enum iwl_phy_db_section_type {
IWL_PHY_DB_MAX
};
-#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
+#define PHY_DB_CMD 0x6c
/*
* phy db - configure operational ucode
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 459bf736fd5b..406ef301b8ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -302,22 +302,17 @@
#define OSC_CLK_FORCE_CONTROL (0x8)
#define FH_UCODE_LOAD_STATUS (0x1AF0)
-#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
-enum secure_load_status_reg {
- LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
- LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
- LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
- LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
- LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
-};
-#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38)
-#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C)
+/*
+ * Replacing FH_UCODE_LOAD_STATUS
+ * This register is writen by driver and is read by uCode during boot flow.
+ * Note this address is cleared after MAC reset.
+ */
+#define UREG_UCODE_LOAD_STATUS (0xa05c40)
+
#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
-#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
-#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 6069a9ff53fa..d42cab291025 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -65,6 +65,7 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
+#include "iwl-fh.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
@@ -77,7 +78,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
static struct lock_class_key __key;
#endif
- trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
+ trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL);
if (!trans)
return NULL;
@@ -102,18 +103,14 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
SLAB_HWCACHE_ALIGN,
NULL);
if (!trans->dev_cmd_pool)
- goto free;
+ return NULL;
return trans;
- free:
- kfree(trans);
- return NULL;
}
void iwl_trans_free(struct iwl_trans *trans)
{
kmem_cache_destroy(trans->dev_cmd_pool);
- kfree(trans);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
@@ -139,6 +136,9 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
+ if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id))
+ cmd->id = DEF_ID(cmd->id);
+
ret = trans->ops->send_cmd(trans, cmd);
if (!(cmd->flags & CMD_ASYNC))
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 5535e2238da3..0296124a7f9c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -153,6 +153,7 @@ static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
/* make u16 wide id out of u8 group and opcode */
#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+#define DEF_ID(opcode) ((1 << 8) | (opcode))
/* due to the conversion, this group is special; new groups
* should be defined in the appropriate fw-api header files
@@ -262,8 +263,6 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* (i.e. mark it as non-idle).
* @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
* called after this command completes. Valid only with CMD_ASYNC.
- * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
- * check that we leave enough room for the TBs bitmap which needs 20 bits.
*/
enum CMD_MODE {
CMD_ASYNC = BIT(0),
@@ -274,8 +273,6 @@ enum CMD_MODE {
CMD_MAKE_TRANS_IDLE = BIT(5),
CMD_WAKE_UP_TRANS = BIT(6),
CMD_WANT_ASYNC_CALLBACK = BIT(7),
-
- CMD_TB_BITMAP_POS = 11,
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -488,7 +485,6 @@ struct iwl_hcmd_arr {
* @bc_table_dword: set to true if the BC table expects the byte count to be
* in DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
- * @wide_cmd_header: firmware supports wide host command header
* @sw_csum_tx: transport should compute the TCP checksum
* @command_groups: array of command groups, each member is an array of the
* commands in the group; for debugging only
@@ -510,7 +506,6 @@ struct iwl_trans_config {
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
bool scd_set_active;
- bool wide_cmd_header;
bool sw_csum_tx;
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
@@ -649,6 +644,8 @@ struct iwl_trans_ops {
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
+ dma_addr_t (*get_txq_byte_table)(struct iwl_trans *trans, int txq_id);
+
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
@@ -772,6 +769,7 @@ enum iwl_plat_pm_mode {
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
+ * @wide_cmd_header: true when ucode supports wide command header format
* @num_rx_queues: number of RX queues allocated by the transport;
* the transport must set this before calling iwl_drv_start()
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
@@ -823,6 +821,7 @@ struct iwl_trans {
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
+ bool wide_cmd_header;
u8 num_rx_queues;
@@ -1073,6 +1072,15 @@ static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
}
+static inline dma_addr_t iwl_trans_get_txq_byte_table(struct iwl_trans *trans,
+ int queue)
+{
+ /* we should never be called if the trans doesn't support it */
+ BUG_ON(!trans->ops->get_txq_byte_table);
+
+ return trans->ops->get_txq_byte_table(trans, queue);
+}
+
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
int frame_limit, u16 ssn,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index b23271755daf..2d6f44fbaf62 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -504,6 +504,28 @@ static inline char *iwl_dbgfs_is_match(char *name, char *buf)
return !strncmp(name, buf, len) ? buf + len : NULL;
}
+static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 curr_gp2;
+ u64 curr_os;
+ s64 diff;
+ char buf[64];
+ const size_t bufsz = sizeof(buf);
+ int pos = 0;
+
+ iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+ do_div(curr_os, NSEC_PER_USEC);
+ diff = curr_os - curr_gp2;
+ pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
char *buf,
size_t count, loff_t *ppos)
@@ -1530,6 +1552,8 @@ MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64);
+MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
+
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -1554,8 +1578,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
- (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
- mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
+ (vif->type == NL80211_IFTYPE_STATION && vif->p2p)))
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
@@ -1570,6 +1593,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff,
+ mvmvif->dbgfs_dir, S_IRUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index b34489817c70..539d718df797 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -917,6 +917,59 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
return ret ?: count;
}
+static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_rx_cmd_buffer rxb = {
+ ._rx_page_order = 0,
+ .truesize = 0, /* not used */
+ ._offset = 0,
+ };
+ struct iwl_rx_packet *pkt;
+ struct iwl_rx_mpdu_desc *desc;
+ int bin_len = count / 2;
+ int ret = -EINVAL;
+
+ /* supporting only 9000 descriptor */
+ if (!mvm->trans->cfg->mq_rx_supported)
+ return -ENOTSUPP;
+
+ rxb._page = alloc_pages(GFP_ATOMIC, 0);
+ if (!rxb._page)
+ return -ENOMEM;
+ pkt = rxb_addr(&rxb);
+
+ ret = hex2bin(page_address(rxb._page), buf, bin_len);
+ if (ret)
+ goto out;
+
+ /* avoid invalid memory access */
+ if (bin_len < sizeof(*pkt) + sizeof(*desc))
+ goto out;
+
+ /* check this is RX packet */
+ if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) !=
+ WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))
+ goto out;
+
+ /* check the length in metadata matches actual received length */
+ desc = (void *)pkt->data;
+ if (le16_to_cpu(desc->mpdu_len) !=
+ (bin_len - sizeof(*desc) - sizeof(*pkt)))
+ goto out;
+
+ local_bh_disable();
+ iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0);
+ local_bh_enable();
+ ret = 0;
+
+out:
+ iwl_free_rxb(&rxb);
+
+ return ret ?: count;
+}
+
static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1454,6 +1507,7 @@ MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
+MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
@@ -1464,6 +1518,132 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
#endif
+static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_dbg_mem_access_cmd cmd = {};
+ struct iwl_dbg_mem_access_rsp *rsp;
+ struct iwl_host_cmd hcmd = {
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &cmd, },
+ .len = { sizeof(cmd) },
+ };
+ size_t delta, len;
+ ssize_t ret;
+
+ hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
+ DEBUG_GROUP, 0);
+ cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
+
+ /* Take care of alignment of both the position and the length */
+ delta = *ppos & 0x3;
+ cmd.addr = cpu_to_le32(*ppos - delta);
+ cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4,
+ (size_t)DEBUG_MEM_MAX_SIZE_DWORDS));
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ len = min((size_t)le32_to_cpu(rsp->len) << 2,
+ iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp));
+ len = min(len - delta, count);
+ if (len < 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len);
+ *ppos += ret;
+
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_mem_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_dbg_mem_access_cmd *cmd;
+ struct iwl_dbg_mem_access_rsp *rsp;
+ struct iwl_host_cmd hcmd = {};
+ size_t cmd_size;
+ size_t data_size;
+ u32 op, len;
+ ssize_t ret;
+
+ hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
+ DEBUG_GROUP, 0);
+
+ if (*ppos & 0x3 || count < 4) {
+ op = DEBUG_MEM_OP_WRITE_BYTES;
+ len = min(count, (size_t)(4 - (*ppos & 0x3)));
+ data_size = len;
+ } else {
+ op = DEBUG_MEM_OP_WRITE;
+ len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS);
+ data_size = len << 2;
+ }
+
+ cmd_size = sizeof(*cmd) + ALIGN(data_size, 4);
+ cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->op = cpu_to_le32(op);
+ cmd->len = cpu_to_le32(len);
+ cmd->addr = cpu_to_le32(*ppos);
+ if (copy_from_user((void *)cmd->data, user_buf, data_size)) {
+ kfree(cmd);
+ return -EFAULT;
+ }
+
+ hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ hcmd.data[0] = (void *)cmd;
+ hcmd.len[0] = cmd_size;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+
+ kfree(cmd);
+
+ if (ret < 0)
+ return ret;
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = data_size;
+ *ppos += ret;
+
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static const struct file_operations iwl_dbgfs_mem_ops = {
+ .read = iwl_dbgfs_mem_read,
+ .write = iwl_dbgfs_mem_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
struct dentry *bcast_dir __maybe_unused;
@@ -1502,6 +1682,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, S_IWUSR);
if (!debugfs_create_bool("enable_scan_iteration_notif",
S_IRUSR | S_IWUSR,
mvm->debugfs_dir,
@@ -1560,6 +1741,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
goto err;
+ debugfs_create_file("mem", S_IRUSR | S_IWUSR, dbgfs_dir, mvm,
+ &iwl_dbgfs_mem_ops);
+
/*
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
index 404b0de9e2dc..3fa43d1348a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
@@ -313,35 +313,26 @@ enum iwl_dev_tx_power_cmd_mode {
IWL_TX_POWER_MODE_SET_ACK = 3,
}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */;
+#define IWL_NUM_CHAIN_LIMITS 2
+#define IWL_NUM_SUB_BANDS 5
+
/**
- * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
* @set_mode: see &enum iwl_dev_tx_power_cmd_mode
* @mac_context_id: id of the mac ctx for which we are reducing TX power.
* @pwr_restriction: TX power restriction in 1/8 dBms.
* @dev_24: device TX power restriction in 1/8 dBms
* @dev_52_low: device TX power restriction upper band - low
* @dev_52_high: device TX power restriction upper band - high
+ * @per_chain_restriction: per chain restrictions
*/
-struct iwl_dev_tx_power_cmd_v2 {
+struct iwl_dev_tx_power_cmd_v3 {
__le32 set_mode;
__le32 mac_context_id;
__le16 pwr_restriction;
__le16 dev_24;
__le16 dev_52_low;
__le16 dev_52_high;
-} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
-
-#define IWL_NUM_CHAIN_LIMITS 2
-#define IWL_NUM_SUB_BANDS 5
-
-/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
- * @v2: version 2 of the command, embedded here for easier software handling
- * @per_chain_restriction: per chain restrictions
- */
-struct iwl_dev_tx_power_cmd_v3 {
- /* v3 is just an extension of v2 - keep this here */
- struct iwl_dev_tx_power_cmd_v2 v2;
__le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
index f01dab0d0dac..0c294c9f98e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -603,6 +604,8 @@ struct iwl_scan_req_umac_tail {
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @general_flags: &enum iwl_umac_scan_general_flags
+ * @reserved2: for future use and alignment
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
* @extended_dwell: dwell time for channels 1, 6 and 11
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
@@ -620,8 +623,10 @@ struct iwl_scan_req_umac {
__le32 flags;
__le32 uid;
__le32 ooc_priority;
- /* SCAN_GENERAL_PARAMS_API_S_VER_1 */
- __le32 general_flags;
+ /* SCAN_GENERAL_PARAMS_API_S_VER_4 */
+ __le16 general_flags;
+ u8 reserved2;
+ u8 scan_start_mac_id;
u8 extended_dwell;
u8 active_dwell;
u8 passive_dwell;
@@ -629,7 +634,7 @@ struct iwl_scan_req_umac {
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
u8 channel_flags;
u8 n_channels;
__le16 reserved;
@@ -718,8 +723,8 @@ struct iwl_scan_offload_profiles_query {
* @status: one of SCAN_COMP_STATUS_*
* @bt_status: BT on/off status
* @last_channel: last channel that was scanned
- * @tsf_low: TSF timer (lower half) in usecs
- * @tsf_high: TSF timer (higher half) in usecs
+ * @start_tsf: TSF timer in usecs of the scan start time for the mac specified
+ * in &struct iwl_scan_req_umac.
* @results: array of scan results, only "scanned_channels" of them are valid
*/
struct iwl_umac_scan_iter_complete_notif {
@@ -728,9 +733,8 @@ struct iwl_umac_scan_iter_complete_notif {
u8 status;
u8 bt_status;
u8 last_channel;
- __le32 tsf_low;
- __le32 tsf_high;
+ __le64 start_tsf;
struct iwl_scan_results_notif results[];
-} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
index d1c4fb849111..6c8e3ca79323 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
@@ -433,25 +433,42 @@ struct iwl_mvm_rm_sta_cmd {
} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
/**
+ * struct iwl_mvm_mgmt_mcast_key_cmd_v1
+ * ( MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: %iwl_sta_key_flag
+ * @igtk:
+ * @k1: unused
+ * @k2: unused
+ * @sta_id: station ID that support IGTK
+ * @key_id:
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwl_mvm_mgmt_mcast_key_cmd_v1 {
+ __le32 ctrl_flags;
+ u8 igtk[16];
+ u8 k1[16];
+ u8 k2[16];
+ __le32 key_id;
+ __le32 sta_id;
+ __le64 receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
+/**
* struct iwl_mvm_mgmt_mcast_key_cmd
* ( MGMT_MCAST_KEY = 0x1f )
* @ctrl_flags: %iwl_sta_key_flag
- * @IGTK:
- * @K1: unused
- * @K2: unused
+ * @igtk: IGTK master key
* @sta_id: station ID that support IGTK
* @key_id:
* @receive_seq_cnt: initial RSC/PN needed for replay check
*/
struct iwl_mvm_mgmt_mcast_key_cmd {
__le32 ctrl_flags;
- u8 IGTK[16];
- u8 K1[16];
- u8 K2[16];
+ u8 igtk[32];
__le32 key_id;
__le32 sta_id;
__le64 receive_seq_cnt;
-} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */
struct iwl_mvm_wep_key {
u8 key_index;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 4144623e1616..59ca97a11b2b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -89,7 +89,6 @@
* @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
* Should be set for 26/30 length MAC headers
* @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
- * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
* @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
* @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
* @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
@@ -116,7 +115,6 @@ enum iwl_tx_flags {
TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18),
TX_CMD_FLG_MH_PAD = BIT(20),
TX_CMD_FLG_RESP_TO_DRV = BIT(21),
- TX_CMD_FLG_CCMP_AGG = BIT(22),
TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
TX_CMD_FLG_DUR = BIT(25),
TX_CMD_FLG_FW_DROP = BIT(26),
@@ -149,7 +147,7 @@ enum iwl_tx_pm_timeouts {
* @TX_CMD_SEC_EXT: extended cipher algorithm.
* @TX_CMD_SEC_GCMP: GCMP encryption algorithm.
* @TX_CMD_SEC_KEY128: set for 104 bits WEP key.
- * @TC_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken
+ * @TX_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken
* from the table instead of from the TX command.
* If the key is taken from the key table its index should be given by the
* first byte of the TX command key field.
@@ -161,7 +159,7 @@ enum iwl_tx_cmd_sec_ctrl {
TX_CMD_SEC_EXT = 0x04,
TX_CMD_SEC_GCMP = 0x05,
TX_CMD_SEC_KEY128 = 0x08,
- TC_CMD_SEC_KEY_FROM_TABLE = 0x08,
+ TX_CMD_SEC_KEY_FROM_TABLE = 0x08,
};
/* TODO: how does these values are OK with only 16 bit variable??? */
@@ -578,6 +576,85 @@ struct iwl_mvm_ba_notif {
} __packed;
/**
+ * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
+ * @q_num: TFD queue number
+ * @tfd_index: Index of first un-acked frame in the TFD queue
+ */
+struct iwl_mvm_compressed_ba_tfd {
+ u8 q_num;
+ u8 reserved;
+ __le16 tfd_index;
+} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue
+ * @q_num: RA TID queue number
+ * @tid: TID of the queue
+ * @ssn: BA window current SSN
+ */
+struct iwl_mvm_compressed_ba_ratid {
+ u8 q_num;
+ u8 tid;
+ __le16 ssn;
+} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */
+
+/*
+ * enum iwl_mvm_ba_resp_flags - TX aggregation status
+ * @IWL_MVM_BA_RESP_TX_AGG: generated due to BA
+ * @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR
+ * @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA
+ * @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun
+ * @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill
+ * @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the
+ * expected time
+ */
+enum iwl_mvm_ba_resp_flags {
+ IWL_MVM_BA_RESP_TX_AGG,
+ IWL_MVM_BA_RESP_TX_BAR,
+ IWL_MVM_BA_RESP_TX_AGG_FAIL,
+ IWL_MVM_BA_RESP_TX_UNDERRUN,
+ IWL_MVM_BA_RESP_TX_BT_KILL,
+ IWL_MVM_BA_RESP_TX_DSP_TIMEOUT
+};
+
+/**
+ * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @flags: status flag, see the &iwl_mvm_ba_resp_flags
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ * not a copy from the LQ command. Thus, if not the first rate was used
+ * for Tx-ing then this value will be set to 0 by FW.
+ * @initial_rate: TLC rate info, initial rate index, TLC table color
+ * @retry_cnt: retry count
+ * @query_byte_cnt: SCD query byte count
+ * @query_frame_cnt: SCD query frame count
+ * @txed: number of frames sent in the aggregation (all-TIDs)
+ * @done: number of frames that were Acked by the BA (all-TIDs)
+ * @wireless_time: Wireless-media time
+ * @tx_rate: the rate the aggregation was sent at
+ * @tfd_cnt: number of TFD-Q elements
+ * @ra_tid_cnt: number of RATID-Q elements
+ */
+struct iwl_mvm_compressed_ba_notif {
+ __le32 flags;
+ u8 sta_id;
+ u8 reduced_txp;
+ u8 initial_rate;
+ u8 retry_cnt;
+ __le32 query_byte_cnt;
+ __le16 query_frame_cnt;
+ __le16 txed;
+ __le16 done;
+ __le32 wireless_time;
+ __le32 tx_rate;
+ __le16 tfd_cnt;
+ __le16 ra_tid_cnt;
+ struct iwl_mvm_compressed_ba_tfd tfd[1];
+ struct iwl_mvm_compressed_ba_ratid ra_tid[0];
+} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
+
+/**
* struct iwl_mac_beacon_cmd_v6 - beacon template command
* @tx: the tx commands associated with the beacon frame
* @template_id: currently equal to the mac context id of the coresponding
@@ -675,13 +752,21 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
tx_resp->frame_count) & 0xfff;
}
+/* Available options for the SCD_QUEUE_CFG HCMD */
+enum iwl_scd_cfg_actions {
+ SCD_CFG_DISABLE_QUEUE = 0x0,
+ SCD_CFG_ENABLE_QUEUE = 0x1,
+ SCD_CFG_UPDATE_QUEUE_TID = 0x2,
+};
+
/**
* struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
* @token:
* @sta_id: station id
* @tid:
* @scd_queue: scheduler queue to confiug
- * @enable: 1 queue enable, 0 queue disable
+ * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner
+ * Value is one of %iwl_scd_cfg_actions options
* @aggregate: 1 aggregated queue, 0 otherwise
* @tx_fifo: %enum iwl_mvm_tx_fifo
* @window: BA window size
@@ -692,7 +777,7 @@ struct iwl_scd_txq_cfg_cmd {
u8 sta_id;
u8 tid;
u8 scd_queue;
- u8 enable;
+ u8 action;
u8 aggregate;
u8 tx_fifo;
u8 window;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 71076f02796e..97633690f3d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -205,7 +205,7 @@ enum {
/* Phy */
PHY_CONFIGURATION_CMD = 0x6a,
CALIB_RES_NOTIF_PHY_DB = 0x6b,
- /* PHY_DB_CMD = 0x6c, */
+ PHY_DB_CMD = 0x6c,
/* ToF - 802.11mc FTM */
TOF_CMD = 0x10,
@@ -340,6 +340,11 @@ enum iwl_prot_offload_subcmd_ids {
STORED_BEACON_NTF = 0xFF,
};
+enum iwl_fmac_debug_cmds {
+ LMAC_RD_WR = 0x0,
+ UMAC_RD_WR = 0x1,
+};
+
/* command groups */
enum {
LEGACY_GROUP = 0x0,
@@ -349,6 +354,7 @@ enum {
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
PROT_OFFLOAD_GROUP = 0xb,
+ DEBUG_GROUP = 0xf,
};
/**
@@ -482,13 +488,17 @@ struct iwl_nvm_access_cmd {
* @block_size: the block size in powers of 2
* @block_num: number of blocks specified in the command.
* @device_phy_addr: virtual addresses from device side
+ * 32 bit address for API version 1, 64 bit address for API version 2.
*/
struct iwl_fw_paging_cmd {
__le32 flags;
__le32 block_size;
__le32 block_num;
- __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
-} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+ union {
+ __le32 addr32[NUM_OF_FW_PAGING_BLOCKS];
+ __le64 addr64[NUM_OF_FW_PAGING_BLOCKS];
+ } device_phy_addr;
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_2 */
/*
* Fw items ID's
@@ -1973,8 +1983,9 @@ struct iwl_tdls_config_res {
struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
-#define TX_FIFO_MAX_NUM 8
-#define RX_FIFO_MAX_NUM 2
+#define TX_FIFO_MAX_NUM_9000 8
+#define TX_FIFO_MAX_NUM 15
+#define RX_FIFO_MAX_NUM 2
#define TX_FIFO_INTERNAL_MAX_NUM 6
/**
@@ -2000,6 +2011,21 @@ struct iwl_tdls_config_res {
* NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
* set, the last 3 members don't exist.
*/
+struct iwl_shared_mem_cfg_v1 {
+ __le32 shared_mem_addr;
+ __le32 shared_mem_size;
+ __le32 sample_buff_addr;
+ __le32 sample_buff_size;
+ __le32 txfifo_addr;
+ __le32 txfifo_size[TX_FIFO_MAX_NUM_9000];
+ __le32 rxfifo_size[RX_FIFO_MAX_NUM];
+ __le32 page_buff_addr;
+ __le32 page_buff_size;
+ __le32 rxfifo_addr;
+ __le32 internal_txfifo_addr;
+ __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
+
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
__le32 shared_mem_size;
@@ -2013,7 +2039,7 @@ struct iwl_shared_mem_cfg {
__le32 rxfifo_addr;
__le32 internal_txfifo_addr;
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
-} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
/**
* VHT MU-MIMO group configuration
@@ -2129,4 +2155,48 @@ struct iwl_channel_switch_noa_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
+/* Operation types for the debug mem access */
+enum {
+ DEBUG_MEM_OP_READ = 0,
+ DEBUG_MEM_OP_WRITE = 1,
+ DEBUG_MEM_OP_WRITE_BYTES = 2,
+};
+
+#define DEBUG_MEM_MAX_SIZE_DWORDS 32
+
+/**
+ * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory
+ * @op: DEBUG_MEM_OP_*
+ * @addr: address to read/write from/to
+ * @len: in dwords, to read/write
+ * @data: for write opeations, contains the source buffer
+ */
+struct iwl_dbg_mem_access_cmd {
+ __le32 op;
+ __le32 addr;
+ __le32 len;
+ __le32 data[];
+} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */
+
+/* Status responses for the debug mem access */
+enum {
+ DEBUG_MEM_STATUS_SUCCESS = 0x0,
+ DEBUG_MEM_STATUS_FAILED = 0x1,
+ DEBUG_MEM_STATUS_LOCKED = 0x2,
+ DEBUG_MEM_STATUS_HIDDEN = 0x3,
+ DEBUG_MEM_STATUS_LENGTH = 0x4,
+};
+
+/**
+ * struct iwl_dbg_mem_access_rsp - Response to debug mem commands
+ * @status: DEBUG_MEM_STATUS_*
+ * @len: read dwords (0 for write operations)
+ * @data: contains the read DWs
+ */
+struct iwl_dbg_mem_access_rsp {
+ __le32 status;
+ __le32 len;
+ __le32 data[];
+} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 46b52bf705fb..d89d0a1fd34e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -440,14 +440,12 @@ static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
{ .start = 0x00a04560, .end = 0x00a0457c },
{ .start = 0x00a04590, .end = 0x00a04598 },
{ .start = 0x00a045c0, .end = 0x00a045f4 },
- { .start = 0x00a44000, .end = 0x00a7bf80 },
};
static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
{ .start = 0x00a05c00, .end = 0x00a05c18 },
{ .start = 0x00a05400, .end = 0x00a056e8 },
{ .start = 0x00a08000, .end = 0x00a098bc },
- { .start = 0x00adfc00, .end = 0x00adfd1c },
{ .start = 0x00a02400, .end = 0x00a02758 },
};
@@ -559,7 +557,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
sizeof(struct iwl_fw_error_dump_fifo);
}
- for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
+ for (i = 0; i < mem_cfg->num_txfifo_entries; i++) {
if (!mem_cfg->txfifo_size[i])
continue;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 7e0cdbf8bf74..872066317fa5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -90,15 +90,6 @@ struct iwl_mvm_alive_data {
u32 scd_base_addr;
};
-static inline const struct fw_img *
-iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
-{
- if (ucode_type >= IWL_UCODE_TYPE_MAX)
- return NULL;
-
- return &mvm->fw->img[ucode_type];
-}
-
static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
{
struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
@@ -385,9 +376,7 @@ static int iwl_save_fw_paging(struct iwl_mvm *mvm,
/* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{
- int blk_idx;
- __le32 dev_phy_addr;
- struct iwl_fw_paging_cmd fw_paging_cmd = {
+ struct iwl_fw_paging_cmd paging_cmd = {
.flags =
cpu_to_le32(PAGING_CMD_IS_SECURED |
PAGING_CMD_IS_ENABLED |
@@ -396,18 +385,32 @@ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
.block_num = cpu_to_le32(mvm->num_of_paging_blk),
};
+ int blk_idx, size = sizeof(paging_cmd);
+
+ /* A bit hard coded - but this is the old API and will be deprecated */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ size -= NUM_OF_FW_PAGING_BLOCKS * 4;
/* loop for for all paging blocks + CSS block */
for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
- dev_phy_addr =
- cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
- PAGE_2_EXP_SIZE);
- fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+ dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
+
+ addr = addr >> PAGE_2_EXP_SIZE;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ __le64 phy_addr = cpu_to_le64(addr);
+
+ paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
+ } else {
+ __le32 phy_addr = cpu_to_le32(addr);
+
+ paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
+ }
}
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
IWL_ALWAYS_LONG_GROUP, 0),
- 0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+ 0, size, &paging_cmd);
}
/*
@@ -580,9 +583,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
- fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
+ fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
else
- fw = iwl_get_ucode_image(mvm, ucode_type);
+ fw = iwl_get_ucode_image(mvm->fw, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
mvm->cur_ucode = ucode_type;
@@ -826,59 +829,48 @@ out:
return ret;
}
-static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
+static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
{
- struct iwl_host_cmd cmd = {
- .flags = CMD_WANT_SKB,
- .data = { NULL, },
- .len = { 0, },
- };
- struct iwl_shared_mem_cfg *mem_cfg;
- struct iwl_rx_packet *pkt;
- u32 i;
+ struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
+ int i;
- lockdep_assert_held(&mvm->mutex);
+ mvm->shared_mem_cfg.num_txfifo_entries =
+ ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
+ mvm->shared_mem_cfg.txfifo_size[i] =
+ le32_to_cpu(mem_cfg->txfifo_size[i]);
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
+ mvm->shared_mem_cfg.rxfifo_size[i] =
+ le32_to_cpu(mem_cfg->rxfifo_size[i]);
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
- cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
- else
- cmd.id = SHARED_MEM_CFG;
+ BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
+ sizeof(mem_cfg->internal_txfifo_size));
- if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
- return;
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+ i++)
+ mvm->shared_mem_cfg.internal_txfifo_size[i] =
+ le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+}
- pkt = cmd.resp_pkt;
- mem_cfg = (void *)pkt->data;
-
- mvm->shared_mem_cfg.shared_mem_addr =
- le32_to_cpu(mem_cfg->shared_mem_addr);
- mvm->shared_mem_cfg.shared_mem_size =
- le32_to_cpu(mem_cfg->shared_mem_size);
- mvm->shared_mem_cfg.sample_buff_addr =
- le32_to_cpu(mem_cfg->sample_buff_addr);
- mvm->shared_mem_cfg.sample_buff_size =
- le32_to_cpu(mem_cfg->sample_buff_size);
- mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
+static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
+ int i;
+
+ mvm->shared_mem_cfg.num_txfifo_entries =
+ ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
mvm->shared_mem_cfg.txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
mvm->shared_mem_cfg.rxfifo_size[i] =
le32_to_cpu(mem_cfg->rxfifo_size[i]);
- mvm->shared_mem_cfg.page_buff_addr =
- le32_to_cpu(mem_cfg->page_buff_addr);
- mvm->shared_mem_cfg.page_buff_size =
- le32_to_cpu(mem_cfg->page_buff_size);
- /* new API has more data */
+ /* new API has more data, from rxfifo_addr field and on */
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
- mvm->shared_mem_cfg.rxfifo_addr =
- le32_to_cpu(mem_cfg->rxfifo_addr);
- mvm->shared_mem_cfg.internal_txfifo_addr =
- le32_to_cpu(mem_cfg->internal_txfifo_addr);
-
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
@@ -888,6 +880,33 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
mvm->shared_mem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
+}
+
+static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
+{
+ struct iwl_host_cmd cmd = {
+ .flags = CMD_WANT_SKB,
+ .data = { NULL, },
+ .len = { 0, },
+ };
+ struct iwl_rx_packet *pkt;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+ cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+ else
+ cmd.id = SHARED_MEM_CFG;
+
+ if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
+ return;
+
+ pkt = cmd.resp_pkt;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_parse_shared_mem_a000(mvm, pkt);
+ else
+ iwl_mvm_parse_shared_mem(mvm, pkt);
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
@@ -1027,19 +1046,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
{
struct iwl_mvm_sar_table sar_table;
struct iwl_dev_tx_power_cmd cmd = {
- .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
int ret, i, j, idx;
int len = sizeof(cmd);
- /* we can't do anything with the table if the FW doesn't support it */
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_TX_POWER_CHAIN)) {
- IWL_DEBUG_RADIO(mvm,
- "FW doesn't support per-chain TX power settings.\n");
- return 0;
- }
-
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
@@ -1096,27 +1107,27 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
* (for example, if we were in RFKILL)
*/
ret = iwl_run_init_mvm_ucode(mvm, false);
- if (ret && !iwlmvm_mod_params.init_dbg) {
+
+ if (iwlmvm_mod_params.init_dbg)
+ return 0;
+
+ if (ret) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
/* this can't happen */
if (WARN_ON(ret > 0))
ret = -ERFKILL;
goto error;
}
- if (!iwlmvm_mod_params.init_dbg) {
- /*
- * Stop and start the transport without entering low power
- * mode. This will save the state of other components on the
- * device that are triggered by the INIT firwmare (MFUART).
- */
- _iwl_trans_stop_device(mvm->trans, false);
- ret = _iwl_trans_start_hw(mvm->trans, false);
- if (ret)
- goto error;
- }
- if (iwlmvm_mod_params.init_dbg)
- return 0;
+ /*
+ * Stop and start the transport without entering low power
+ * mode. This will save the state of other components on the
+ * device that are triggered by the INIT firwmare (MFUART).
+ */
+ _iwl_trans_stop_device(mvm->trans, false);
+ ret = _iwl_trans_start_hw(mvm->trans, false);
+ if (ret)
+ goto error;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret) {
@@ -1214,9 +1225,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* TODO: read the budget from BIOS / Platform NVM */
- if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0)
+ if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) {
ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
mvm->cooling_dev.cur_state);
+ if (ret)
+ goto error;
+ }
#else
/* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 69c42ce45b8a..6b962d6b067a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -539,6 +539,11 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MAX_TID_COUNT, 0);
+ else
+ iwl_mvm_disable_txq(mvm,
+ IWL_MVM_DQA_P2P_DEVICE_QUEUE,
+ vif->hw_queue[0], IWL_MAX_TID_COUNT,
+ 0);
break;
case NL80211_IFTYPE_AP:
@@ -769,26 +774,6 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cmd->ac[txf].fifos_mask = BIT(txf);
}
- if (vif->type == NL80211_IFTYPE_AP) {
- /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
- cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |=
- BIT(IWL_MVM_TX_FIFO_MCAST);
-
- /*
- * in AP mode, pass probe requests and beacons from other APs
- * (needed for ht protection); when there're no any associated
- * station don't ask FW to pass beacons to prevent unnecessary
- * wake-ups.
- */
- cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
- if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
- cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
- IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
- } else {
- IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
- }
- }
-
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
@@ -1186,6 +1171,7 @@ static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
*/
static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd,
struct iwl_mac_data_ap *ctxt_ap,
bool add)
{
@@ -1196,6 +1182,23 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
.beacon_device_ts = 0
};
+ /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
+ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+
+ /*
+ * in AP mode, pass probe requests and beacons from other APs
+ * (needed for ht protection); when there're no any associated
+ * station don't ask FW to pass beacons to prevent unnecessary
+ * wake-ups.
+ */
+ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+ if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
+ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+ IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
+ } else {
+ IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
+ }
+
ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
ctxt_ap->bi_reciprocal =
cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
@@ -1253,7 +1256,7 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
/* Fill the data specific for ap mode */
- iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap,
action == FW_CTXT_ACTION_ADD);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
@@ -1272,7 +1275,7 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
/* Fill the data specific for GO mode */
- iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap,
action == FW_CTXT_ACTION_ADD);
cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow &
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 5dd77e336617..318efd814037 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -465,7 +465,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
- BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4);
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
hw->wiphy->cipher_suites = mvm->ciphers;
@@ -479,17 +479,23 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->n_cipher_suites++;
}
- /*
- * Enable 11w if advertised by firmware and software crypto
- * is not enabled (as the firmware will interpret some mgmt
- * packets, so enabling it with software crypto isn't safe)
+ /* Enable 11w if software crypto is not enabled (as the
+ * firmware will interpret some mgmt packets, so enabling it
+ * with software crypto isn't safe).
*/
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
- !iwlwifi_mod_params.sw_crypto) {
+ if (!iwlwifi_mod_params.sw_crypto) {
ieee80211_hw_set(hw, MFP_CAPABLE);
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_AES_CMAC;
hw->wiphy->n_cipher_suites++;
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_BIP_GMAC_128;
+ hw->wiphy->n_cipher_suites++;
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_BIP_GMAC_256;
+ hw->wiphy->n_cipher_suites++;
+ }
}
/* currently FW API supports only one optional cipher scheme */
@@ -539,9 +545,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
- hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
-
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
@@ -645,6 +649,16 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SCAN_START_TIME);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BSS_PARENT_TSF);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ }
+
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
#ifdef CONFIG_PM_SLEEP
@@ -712,6 +726,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (ret)
iwl_mvm_leds_exit(mvm);
+ if (mvm->cfg->vht_mu_mimo_supported)
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+
return ret;
}
@@ -1251,20 +1269,18 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
struct iwl_dev_tx_power_cmd cmd = {
- .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
- .v3.v2.mac_context_id =
+ .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+ .v3.mac_context_id =
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
- .v3.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
+ .v3.pwr_restriction = cpu_to_le16(8 * tx_power),
};
int len = sizeof(cmd);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
- cmd.v3.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+ cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
- len = sizeof(cmd.v3.v2);
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
@@ -2221,6 +2237,10 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
case NL80211_IFTYPE_ADHOC:
iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
break;
+ case NL80211_IFTYPE_MONITOR:
+ if (changes & BSS_CHANGED_MU_GROUPS)
+ iwl_mvm_update_mu_groups(mvm, vif);
+ break;
default:
/* shouldn't happen */
WARN_ON_ONCE(1);
@@ -2747,6 +2767,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
break;
case WLAN_CIPHER_SUITE_WEP40:
@@ -2780,9 +2802,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
* GTK on AP interface is a TX-only key, return 0;
* on IBSS they're per-station and because we're lazy
* we don't support them for RX, so do the same.
- * CMAC in AP/IBSS modes must be done in software.
+ * CMAC/GMAC in AP/IBSS modes must be done in software.
*/
- if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
ret = -EOPNOTSUPP;
else
ret = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 6a615bb73042..d17cbf603f7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -604,16 +604,9 @@ enum iwl_mvm_tdls_cs_state {
};
struct iwl_mvm_shared_mem_cfg {
- u32 shared_mem_addr;
- u32 shared_mem_size;
- u32 sample_buff_addr;
- u32 sample_buff_size;
- u32 txfifo_addr;
+ int num_txfifo_entries;
u32 txfifo_size[TX_FIFO_MAX_NUM];
u32 rxfifo_size[RX_FIFO_MAX_NUM];
- u32 page_buff_addr;
- u32 page_buff_size;
- u32 rxfifo_addr;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
@@ -699,6 +692,10 @@ struct iwl_mvm_baid_data {
* it. In this state, when a new queue is needed to be allocated but no
* such free queue exists, an inactive queue might be freed and given to
* the new RA/TID.
+ * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
+ * This is the state of a queue that has had traffic pass through it, but
+ * needs to be reconfigured for some reason, e.g. the queue needs to
+ * become unshared and aggregations re-enabled on.
*/
enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE,
@@ -706,10 +703,11 @@ enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_READY,
IWL_MVM_QUEUE_SHARED,
IWL_MVM_QUEUE_INACTIVE,
+ IWL_MVM_QUEUE_RECONFIGURING,
};
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
-#define IWL_MVM_NUM_CIPHERS 8
+#define IWL_MVM_NUM_CIPHERS 10
struct iwl_mvm {
/* for logger access */
@@ -769,6 +767,7 @@ struct iwl_mvm {
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
+ u8 txq_tid; /* The TID "owner" of this queue*/
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
/* Timestamp for inactivation per TID of this queue */
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
@@ -822,6 +821,12 @@ struct iwl_mvm {
/* UMAC scan tracking */
u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
+ /* start time of last scan in TSF of the mac that requested the scan */
+ u64 scan_start;
+
+ /* the vif that requested the current scan */
+ struct iwl_mvm_vif *scan_vif;
+
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
@@ -1124,6 +1129,18 @@ static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
(mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3);
}
+static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
+{
+ return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
+ (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
+}
+
+static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
+{
+ return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
+ (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
+}
+
static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
{
bool nvm_lar = mvm->nvm_data->lar_enabled;
@@ -1194,6 +1211,12 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
}
+static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
+{
+ /* TODO - replace with TLV once defined */
+ return mvm->trans->cfg->use_tfh;
+}
+
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
{
#ifdef CONFIG_THERMAL
@@ -1245,6 +1268,7 @@ u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
@@ -1281,8 +1305,6 @@ static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
}
static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 7a686f67f007..eade099b6dbf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -66,7 +66,6 @@
*****************************************************************************/
#include <linux/firmware.h>
#include <linux/rtnetlink.h>
-#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-csr.h"
#include "mvm.h"
@@ -751,96 +750,6 @@ exit:
return resp_cp;
}
-#ifdef CONFIG_ACPI
-#define WRD_METHOD "WRDD"
-#define WRDD_WIFI (0x07)
-#define WRDD_WIGIG (0x10)
-
-static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd)
-{
- union acpi_object *mcc_pkg, *domain_type, *mcc_value;
- u32 i;
-
- if (wrdd->type != ACPI_TYPE_PACKAGE ||
- wrdd->package.count < 2 ||
- wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
- wrdd->package.elements[0].integer.value != 0) {
- IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n");
- return 0;
- }
-
- for (i = 1 ; i < wrdd->package.count ; ++i) {
- mcc_pkg = &wrdd->package.elements[i];
-
- if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
- mcc_pkg->package.count < 2 ||
- mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
- mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
- mcc_pkg = NULL;
- continue;
- }
-
- domain_type = &mcc_pkg->package.elements[0];
- if (domain_type->integer.value == WRDD_WIFI)
- break;
-
- mcc_pkg = NULL;
- }
-
- if (mcc_pkg) {
- mcc_value = &mcc_pkg->package.elements[1];
- return mcc_value->integer.value;
- }
-
- return 0;
-}
-
-static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
-{
- acpi_handle root_handle;
- acpi_handle handle;
- struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
- u32 mcc_val;
-
- root_handle = ACPI_HANDLE(mvm->dev);
- if (!root_handle) {
- IWL_DEBUG_LAR(mvm,
- "Could not retrieve root port ACPI handle\n");
- return -ENOENT;
- }
-
- /* Get the method's handle */
- status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_LAR(mvm, "WRD method not found\n");
- return -ENOENT;
- }
-
- /* Call WRDD with no arguments */
- status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status);
- return -ENOENT;
- }
-
- mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer);
- kfree(wrdd.pointer);
- if (!mcc_val)
- return -ENOENT;
-
- mcc[0] = (mcc_val >> 8) & 0xff;
- mcc[1] = mcc_val & 0xff;
- mcc[2] = '\0';
- return 0;
-}
-#else /* CONFIG_ACPI */
-static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
-{
- return -ENOENT;
-}
-#endif
-
int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
{
bool tlv_lar;
@@ -884,7 +793,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
return -EIO;
if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
- !iwl_mvm_get_bios_mcc(mvm, mcc)) {
+ !iwl_get_bios_mcc(mvm->dev, mcc)) {
kfree(regd);
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
MCC_SOURCE_BIOS, NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 55d9096da68c..05fe6dd1a2c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -359,6 +359,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(BT_COEX_CI),
HCMD_NAME(PHY_CONFIGURATION_CMD),
HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
+ HCMD_NAME(PHY_DB_CMD),
HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
HCMD_NAME(SCAN_OFFLOAD_CONFIG_CMD),
@@ -652,11 +653,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
/* the hardware splits the A-MSDU */
if (mvm->cfg->mq_rx_supported)
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_WIDE_CMD_HDR);
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
- trans_cfg.bc_table_dword = true;
+ trans->wide_cmd_header = true;
+ trans_cfg.bc_table_dword = true;
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
@@ -711,37 +710,21 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_DEBUG_EEPROM(mvm->trans->dev,
"working without external nvm file\n");
- if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
- "not allowing power-up and not having nvm_file\n"))
+ err = iwl_trans_start_hw(mvm->trans);
+ if (err)
goto out_free;
- /*
- * Even if nvm exists in the nvm_file driver should read again the nvm
- * from the nic because there might be entries that exist in the OTP
- * and not in the file.
- * for nics with no_power_up_nic_in_init: rely completley on nvm_file
- */
- if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) {
- err = iwl_nvm_init(mvm, false);
- if (err)
- goto out_free;
- } else {
- err = iwl_trans_start_hw(mvm->trans);
- if (err)
- goto out_free;
-
- mutex_lock(&mvm->mutex);
- iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
- err = iwl_run_init_mvm_ucode(mvm, true);
- if (!err || !iwlmvm_mod_params.init_dbg)
- iwl_mvm_stop_device(mvm);
- iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
- mutex_unlock(&mvm->mutex);
- /* returns 0 if successful, 1 if success but in rfkill */
- if (err < 0 && !iwlmvm_mod_params.init_dbg) {
- IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
- goto out_free;
- }
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
+ err = iwl_run_init_mvm_ucode(mvm, true);
+ if (!err || !iwlmvm_mod_params.init_dbg)
+ iwl_mvm_stop_device(mvm);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
+ mutex_unlock(&mvm->mutex);
+ /* returns 0 if successful, 1 if success but in rfkill */
+ if (err < 0 && !iwlmvm_mod_params.init_dbg) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+ goto out_free;
}
scan_size = iwl_mvm_scan_size(mvm);
@@ -783,8 +766,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
flush_delayed_work(&mvm->fw_dump_wk);
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
- if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
- iwl_trans_op_mode_leave(trans);
+ iwl_trans_op_mode_leave(trans);
+
ieee80211_free_hw(mvm->hw);
return NULL;
}
@@ -857,9 +840,7 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
struct iwl_mvm *mvm =
container_of(wk, struct iwl_mvm, async_handlers_wk);
struct iwl_async_handler_entry *entry, *tmp;
- struct list_head local_list;
-
- INIT_LIST_HEAD(&local_list);
+ LIST_HEAD(local_list);
/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
@@ -966,10 +947,11 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+ if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
- else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
+ else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
@@ -981,13 +963,14 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+ if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
- else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP &&
- pkt->hdr.cmd == RX_QUEUES_NOTIFICATION))
+ else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
- else if (pkt->hdr.cmd == FRAME_RELEASE)
+ else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
@@ -1666,13 +1649,14 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
+ if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
- else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
- pkt->hdr.group_id == DATA_PATH_GROUP))
+ else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
iwl_mvm_rx_queue_notif(mvm, rxb, queue);
- else
+ else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index ff85865b1dda..af6d10c23e5a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -694,8 +694,7 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
/* enable PM on p2p if p2p stand alone */
if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
- p2p_mvmvif->pm_enabled = true;
+ p2p_mvmvif->pm_enabled = true;
return;
}
@@ -707,12 +706,10 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
ap_mvmvif->phy_ctxt->id);
/* clients are not stand alone: enable PM if DCM */
- if (!(client_same_channel || ap_same_channel) &&
- (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
+ if (!(client_same_channel || ap_same_channel)) {
if (vifs->bss_active)
bss_mvmvif->pm_enabled = true;
- if (vifs->p2p_active &&
- (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM))
+ if (vifs->p2p_active)
p2p_mvmvif->pm_enabled = true;
return;
}
@@ -721,12 +718,10 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
* There is only one channel in the system and there are only
* bss and p2p clients that share it
*/
- if (client_same_channel && !vifs->ap_active &&
- (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
+ if (client_same_channel && !vifs->ap_active) {
/* share same channel*/
bss_mvmvif->pm_enabled = true;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
- p2p_mvmvif->pm_enabled = true;
+ p2p_mvmvif->pm_enabled = true;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index df6c32caa5f0..a57c6ef5bc14 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -132,7 +132,8 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
IEEE80211_CCMP_PN_LEN) <= 0)
return -1;
- memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+ if (!(stats->flag & RX_FLAG_AMSDU_MORE))
+ memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
stats->flag |= RX_FLAG_PN_VALIDATED;
return 0;
@@ -417,10 +418,11 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
ssn = ieee80211_sn_inc(ssn);
- /* holes are valid since nssn indicates frames were received. */
- if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
- continue;
- /* Empty the list. Will have more than one frame for A-MSDU */
+ /*
+ * Empty the list. Will have more than one frame for A-MSDU.
+ * Empty list is valid as well since nssn indicates frames were
+ * received.
+ */
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
@@ -433,7 +435,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
if (reorder_buf->num_stored && !reorder_buf->removed) {
u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
- while (!skb_peek_tail(&reorder_buf->entries[index]))
+ while (skb_queue_empty(&reorder_buf->entries[index]))
index = (index + 1) % reorder_buf->buf_size;
/* modify timer to match next frame's expiration time */
mod_timer(&reorder_buf->reorder_timer,
@@ -451,17 +453,17 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
u16 sn = 0, index = 0;
bool expired = false;
- spin_lock_bh(&buf->lock);
+ spin_lock(&buf->lock);
if (!buf->num_stored || buf->removed) {
- spin_unlock_bh(&buf->lock);
+ spin_unlock(&buf->lock);
return;
}
for (i = 0; i < buf->buf_size ; i++) {
index = (buf->head_sn + i) % buf->buf_size;
- if (!skb_peek_tail(&buf->entries[index]))
+ if (skb_queue_empty(&buf->entries[index]))
continue;
if (!time_after(jiffies, buf->reorder_time[index] +
RX_REORDER_BUF_TIMEOUT_MQ))
@@ -491,7 +493,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
buf->reorder_time[index] +
1 + RX_REORDER_BUF_TIMEOUT_MQ);
}
- spin_unlock_bh(&buf->lock);
+ spin_unlock(&buf->lock);
}
static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
@@ -502,7 +504,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
struct iwl_mvm_reorder_buffer *reorder_buf;
u8 baid = data->baid;
- if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
+ if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
return;
rcu_read_lock();
@@ -589,6 +591,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT;
+ /*
+ * This also covers the case of receiving a Block Ack Request
+ * outside a BA session; we'll pass it to mac80211 and that
+ * then sends a delBA action frame.
+ */
if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
return false;
@@ -598,9 +605,10 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- /* not a data packet */
- if (!ieee80211_is_data_qos(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1))
+ /* not a data packet or a bar */
+ if (!ieee80211_is_back_req(hdr->frame_control) &&
+ (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1)))
return false;
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
@@ -624,6 +632,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
spin_lock_bh(&buffer->lock);
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ goto drop;
+ }
+
/*
* If there was a significant jump in the nssn - adjust.
* If the SN is smaller than the NSSN it might need to first go into
@@ -883,6 +896,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u8 *qc = ieee80211_get_qos_ctl(hdr);
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ if (!(desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
+ rx_status->flag |= RX_FLAG_AMSDU_MORE;
}
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
iwl_mvm_agg_rx_received(mvm, baid);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index dac120f8861b..f279fdd6eb44 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -141,6 +141,7 @@ struct iwl_mvm_scan_params {
struct cfg80211_match_set *match_sets;
int n_scan_plans;
struct cfg80211_sched_scan_plan *scan_plans;
+ u32 measurement_dwell;
};
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
@@ -232,6 +233,27 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
return IWL_SCAN_TYPE_WILD;
}
+static int
+iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm,
+ struct cfg80211_scan_request *req,
+ struct iwl_mvm_scan_params *params)
+{
+ if (!req->duration)
+ return 0;
+
+ if (req->duration_mandatory &&
+ req->duration > scan_timing[params->type].max_out_time) {
+ IWL_DEBUG_SCAN(mvm,
+ "Measurement scan - too long dwell %hu (max out time %u)\n",
+ req->duration,
+ scan_timing[params->type].max_out_time);
+ return -EOPNOTSUPP;
+ }
+
+ return min_t(u32, (u32)req->duration,
+ scan_timing[params->type].max_out_time);
+}
+
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
{
/* require rrm scan whenever the fw supports it */
@@ -717,22 +739,6 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
}
-static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
- enum iwl_scan_priority_ext prio)
-{
- if (fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
- return cpu_to_le32(prio);
-
- if (prio <= IWL_SCAN_PRIORITY_EXT_2)
- return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
-
- if (prio <= IWL_SCAN_PRIORITY_EXT_4)
- return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
-
- return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
-}
-
static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_lmac *cmd,
struct iwl_mvm_scan_params *params)
@@ -743,7 +749,7 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
cmd->extended_dwell = scan_timing[params->type].dwell_extended;
cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
- cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+ cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
@@ -1033,21 +1039,24 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_umac *cmd,
struct iwl_mvm_scan_params *params)
{
- cmd->extended_dwell = scan_timing[params->type].dwell_extended;
- cmd->active_dwell = scan_timing[params->type].dwell_active;
- cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+ if (params->measurement_dwell) {
+ cmd->active_dwell = params->measurement_dwell;
+ cmd->passive_dwell = params->measurement_dwell;
+ cmd->extended_dwell = params->measurement_dwell;
+ } else {
+ cmd->active_dwell = scan_timing[params->type].dwell_active;
+ cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+ cmd->extended_dwell = scan_timing[params->type].dwell_extended;
+ }
cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
- cmd->scan_priority =
- iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+ cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
if (iwl_mvm_is_regular_scan(params))
- cmd->ooc_priority =
- iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
else
- cmd->ooc_priority =
- iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
static void
@@ -1067,11 +1076,11 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
}
}
-static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
+static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
{
- int flags = 0;
+ u16 flags = 0;
if (params->n_ssids == 0)
flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
@@ -1093,6 +1102,9 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (!iwl_mvm_is_regular_scan(params))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+ if (params->measurement_dwell)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvm->scan_iter_notif_enabled)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
@@ -1119,6 +1131,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->fw->ucode_capa.n_scan_channels;
int uid, i;
u32 ssid_bitmap = 0;
+ struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
@@ -1136,8 +1149,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->scan_uid_status[uid] = type;
cmd->uid = cpu_to_le32(uid);
- cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params,
+ cmd->general_flags = cpu_to_le16(iwl_mvm_scan_umac_flags(mvm, params,
vif));
+ cmd->scan_start_mac_id = scan_vif->id;
if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
@@ -1289,6 +1303,12 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_get_scan_type(mvm,
vif->type == NL80211_IFTYPE_P2P_DEVICE);
+ ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
+ if (ret < 0)
+ return ret;
+
+ params.measurement_dwell = ret;
+
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
@@ -1315,6 +1335,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
+ mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
queue_delayed_work(system_wq, &mvm->scan_timeout_dwork,
@@ -1437,9 +1458,12 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
struct cfg80211_scan_info info = {
.aborted = aborted,
+ .scan_start_tsf = mvm->scan_start,
};
+ memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN);
ieee80211_scan_completed(mvm->hw, &info);
+ mvm->scan_vif = NULL;
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
cancel_delayed_work(&mvm->scan_timeout_dwork);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
@@ -1473,6 +1497,8 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
u8 buf[256];
+ mvm->scan_start = le64_to_cpu(notif->start_tsf);
+
IWL_DEBUG_SCAN(mvm,
"UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
notif->status, notif->scanned_channels,
@@ -1485,6 +1511,10 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
ieee80211_sched_scan_results(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
}
+
+ IWL_DEBUG_SCAN(mvm,
+ "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
+ mvm->scan_start);
}
static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 3130b9c68a74..fc771885e383 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -468,6 +468,11 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
continue;
+ /* Don't try and take queues being reconfigured */
+ if (mvm->queue_info[queue].status ==
+ IWL_MVM_QUEUE_RECONFIGURING)
+ continue;
+
ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
}
@@ -501,31 +506,37 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
queue = ac_to_queue[IEEE80211_AC_VO];
/* Make sure queue found (or not) is legal */
- if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE &&
- queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) ||
- (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE &&
- queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) ||
- (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) {
+ if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
+ !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
+ (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
IWL_ERR(mvm, "No DATA queues available to share\n");
- queue = -ENOSPC;
+ return -ENOSPC;
+ }
+
+ /* Make sure the queue isn't in the middle of being reconfigured */
+ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
+ IWL_ERR(mvm,
+ "TXQ %d is in the middle of re-config - try again\n",
+ queue);
+ return -EBUSY;
}
return queue;
}
/*
- * If a given queue has a higher AC than the TID stream that is being added to
- * it, the queue needs to be redirected to the lower AC. This function does that
+ * If a given queue has a higher AC than the TID stream that is being compared
+ * to, the queue needs to be redirected to the lower AC. This function does that
* in such a case, otherwise - if no redirection required - it does nothing,
* unless the %force param is true.
*/
-static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
- int ac, int ssn, unsigned int wdg_timeout,
- bool force)
+int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
- .enable = 0,
+ .action = SCD_CFG_DISABLE_QUEUE,
};
bool shared_queue;
unsigned long mq;
@@ -551,11 +562,12 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
+ cmd.tid = mvm->queue_info[queue].txq_tid;
mq = mvm->queue_info[queue].hw_queue_to_mac80211;
shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
spin_unlock_bh(&mvm->queue_info_lock);
- IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n",
+ IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
queue, iwl_mvm_ac_to_tx_fifo[ac]);
/* Stop MAC queues and wait for this queue to empty */
@@ -576,9 +588,12 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
ret);
/* Make sure the SCD wrptr is correctly set before reconfiguring */
- iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac],
- cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
- ssn, wdg_timeout);
+ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
+
+ /* Update the TID "owner" of the queue */
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].txq_tid = tid;
+ spin_unlock_bh(&mvm->queue_info_lock);
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
@@ -709,7 +724,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (WARN_ON(queue <= 0)) {
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
tid, cfg.sta_id);
- return -ENOSPC;
+ return queue;
}
/*
@@ -728,21 +743,23 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (using_inactive_queue) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
- .enable = 0,
+ .action = SCD_CFG_DISABLE_QUEUE,
};
- u8 ac;
+ u8 txq_curr_ac;
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
spin_lock_bh(&mvm->queue_info_lock);
- ac = mvm->queue_info[queue].mac80211_ac;
+ txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
- cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac];
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac];
+ cmd.tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock);
/* Disable the queue */
- iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids,
- true);
+ if (disable_agg_tids)
+ iwl_mvm_invalidate_sta_queue(mvm, queue,
+ disable_agg_tids, false);
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
&cmd);
@@ -758,6 +775,10 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
return ret;
}
+
+ /* If TXQ is allocated to another STA, update removal in FW */
+ if (cmd.sta_id != mvmsta->sta_id)
+ iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
}
IWL_DEBUG_TX_QUEUES(mvm,
@@ -827,6 +848,119 @@ out_err:
return ret;
}
+static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_UPDATE_QUEUE_TID,
+ };
+ s8 sta_id;
+ int tid;
+ unsigned long tid_bitmap;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
+ return;
+
+ /* Find any TID for queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ cmd.tid = tid;
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
+ queue, ret);
+ else
+ IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
+ queue, tid);
+}
+
+static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ s8 sta_id;
+ int tid = -1;
+ unsigned long tid_bitmap;
+ unsigned int wdg_timeout;
+ int ssn;
+ int ret = true;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* Find TID for queue, and make sure it is the only one on the queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ if (tid_bitmap != BIT(tid)) {
+ IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
+ queue, tid_bitmap);
+ return;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
+ tid);
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+
+ ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+
+ ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
+ tid_to_mac80211_ac[tid], ssn,
+ wdg_timeout, true);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
+ return;
+ }
+
+ /* If aggs should be turned back on - do it */
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+ struct iwl_mvm_add_sta_cmd cmd = {0};
+
+ mvmsta->tid_disable_agg &= ~BIT(tid);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (!ret) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d is now aggregated again\n",
+ queue);
+
+ /* Mark queue intenally as aggregating again */
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
+ }
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+ spin_unlock_bh(&mvm->queue_info_lock);
+}
+
static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
{
if (tid == IWL_MAX_TID_COUNT)
@@ -894,13 +1028,42 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
unsigned long deferred_tid_traffic;
- int sta_id, tid;
+ int queue, sta_id, tid;
/* Check inactivity of queues */
iwl_mvm_inactivity_check(mvm);
mutex_lock(&mvm->mutex);
+ /* Reconfigure queues requiring reconfiguation */
+ for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
+ bool reconfig;
+ bool change_owner;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ reconfig = (mvm->queue_info[queue].status ==
+ IWL_MVM_QUEUE_RECONFIGURING);
+
+ /*
+ * We need to take into account a situation in which a TXQ was
+ * allocated to TID x, and then turned shared by adding TIDs y
+ * and z. If TID x becomes inactive and is removed from the TXQ,
+ * ownership must be given to one of the remaining TIDs.
+ * This is mainly because if TID x continues - a new queue can't
+ * be allocated for it as long as it is an owner of another TXQ.
+ */
+ change_owner = !(mvm->queue_info[queue].tid_bitmap &
+ BIT(mvm->queue_info[queue].txq_tid)) &&
+ (mvm->queue_info[queue].status ==
+ IWL_MVM_QUEUE_SHARED);
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ if (reconfig)
+ iwl_mvm_unshare_queue(mvm, queue);
+ else if (change_owner)
+ iwl_mvm_change_queue_owner(mvm, queue);
+ }
+
/* Go over all stations with deferred traffic */
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
IWL_MVM_STATION_COUNT) {
@@ -963,6 +1126,61 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
return 0;
}
+/*
+ * In DQA mode, after a HW restart the queues should be allocated as before, in
+ * order to avoid race conditions when there are shared queues. This function
+ * does the re-mapping and queue allocation.
+ *
+ * Note that re-enabling aggregations isn't done in this function.
+ */
+static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
+ int i;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .sta_id = mvm_sta->sta_id,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ /* Make sure reserved queue is still marked as such (or allocated) */
+ mvm->queue_info[mvm_sta->reserved_queue].status =
+ IWL_MVM_QUEUE_RESERVED;
+
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+ struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
+ int txq_id = tid_data->txq_id;
+ int ac;
+ u8 mac_queue;
+
+ if (txq_id == IEEE80211_INVAL_HW_QUEUE)
+ continue;
+
+ skb_queue_head_init(&tid_data->deferred_tx_frames);
+
+ ac = tid_to_mac80211_ac[i];
+ mac_queue = mvm_sta->vif->hw_queue[ac];
+
+ cfg.tid = i;
+ cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
+ cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-mapping sta %d tid %d to queue %d\n",
+ mvm_sta->sta_id, i, txq_id);
+
+ iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
+ IEEE80211_SEQ_TO_SN(tid_data->seq_number),
+ &cfg, wdg_timeout);
+
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+ }
+
+ atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
+}
+
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -985,6 +1203,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
spin_lock_init(&mvm_sta->lock);
+ /* In DQA mode, if this is a HW restart, re-alloc existing queues */
+ if (iwl_mvm_is_dqa_supported(mvm) &&
+ test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
+ goto update_fw;
+ }
+
mvm_sta->sta_id = sta_id;
mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color);
@@ -1048,6 +1273,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
goto err;
}
+update_fw:
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
if (ret)
goto err;
@@ -1071,13 +1297,6 @@ err:
return ret;
}
-int iwl_mvm_update_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
-}
-
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
@@ -1270,9 +1489,31 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/* If DQA is supported - the queues can be disabled now */
- if (iwl_mvm_is_dqa_supported(mvm))
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ u8 reserved_txq = mvm_sta->reserved_queue;
+ enum iwl_mvm_queue_status *status;
+
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+ /*
+ * If no traffic has gone through the reserved TXQ - it
+ * is still marked as IWL_MVM_QUEUE_RESERVED, and
+ * should be manually marked as free again
+ */
+ spin_lock_bh(&mvm->queue_info_lock);
+ status = &mvm->queue_info[reserved_txq].status;
+ if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
+ (*status != IWL_MVM_QUEUE_FREE),
+ "sta_id %d reserved txq %d status %d",
+ mvm_sta->sta_id, reserved_txq, *status)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return -EINVAL;
+ }
+
+ *status = IWL_MVM_QUEUE_FREE;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ }
+
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) {
/* if associated - we can't remove the AP STA now */
@@ -1802,11 +2043,9 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
baid_data->baid = baid;
baid_data->timeout = timeout;
baid_data->last_rx = jiffies;
- init_timer(&baid_data->session_timer);
- baid_data->session_timer.function =
- iwl_mvm_rx_agg_session_expired;
- baid_data->session_timer.data =
- (unsigned long)&mvm->baid_map[baid];
+ setup_timer(&baid_data->session_timer,
+ iwl_mvm_rx_agg_session_expired,
+ (unsigned long)&mvm->baid_map[baid]);
baid_data->mvm = mvm;
baid_data->tid = tid;
baid_data->sta_id = mvm_sta->sta_id;
@@ -1956,7 +2195,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO;
}
- spin_lock_bh(&mvm->queue_info_lock);
+ spin_lock(&mvm->queue_info_lock);
/*
* Note the possible cases:
@@ -1967,14 +2206,20 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* non-DQA mode, since the TXQ hasn't yet been allocated
*/
txq_id = mvmsta->tid_data[tid].txq_id;
- if (!iwl_mvm_is_dqa_supported(mvm) ||
+ if (iwl_mvm_is_dqa_supported(mvm) &&
+ unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
+ ret = -ENXIO;
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can't start tid %d agg on shared queue!\n",
+ tid);
+ goto release_locks;
+ } else if (!iwl_mvm_is_dqa_supported(mvm) ||
mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
mvm->first_agg_queue,
mvm->last_agg_queue);
if (txq_id < 0) {
ret = txq_id;
- spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto release_locks;
}
@@ -1982,7 +2227,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* TXQ hasn't yet been enabled, so mark it only as reserved */
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
}
- spin_unlock_bh(&mvm->queue_info_lock);
+
+ spin_unlock(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm,
"AGG for tid %d will be on queue #%d\n",
@@ -2006,8 +2252,11 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
ret = 0;
+ goto out;
release_locks:
+ spin_unlock(&mvm->queue_info_lock);
+out:
spin_unlock_bh(&mvmsta->lock);
return ret;
@@ -2023,6 +2272,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
int queue, ret;
bool alloc_queue = true;
+ enum iwl_mvm_queue_status queue_status;
u16 ssn;
struct iwl_trans_txq_scd_cfg cfg = {
@@ -2048,13 +2298,15 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+ spin_lock_bh(&mvm->queue_info_lock);
+ queue_status = mvm->queue_info[queue].status;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
/* In DQA mode, the existing queue might need to be reconfigured */
if (iwl_mvm_is_dqa_supported(mvm)) {
- spin_lock_bh(&mvm->queue_info_lock);
/* Maybe there is no need to even alloc a queue... */
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
alloc_queue = false;
- spin_unlock_bh(&mvm->queue_info_lock);
/*
* Only reconfig the SCD for the queue if the window size has
@@ -2089,9 +2341,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
&cfg, wdg_timeout);
- ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
- if (ret)
- return -EIO;
+ /* Send ADD_STA command to enable aggs only if the queue isn't shared */
+ if (queue_status != IWL_MVM_QUEUE_SHARED) {
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ return -EIO;
+ }
/* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
@@ -2123,7 +2378,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u16 txq_id;
int err;
-
/*
* If mac80211 is cleaning its state, then say that we finished since
* our state has been cleared anyway.
@@ -2152,6 +2406,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*/
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
+
spin_unlock_bh(&mvm->queue_info_lock);
switch (tid_data->state) {
@@ -2412,9 +2667,15 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
/* verify the key details match the required command's expectations */
- if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
- (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
- (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
+ if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
+ (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
+ (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
+ return -EINVAL;
+
+ if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
return -EINVAL;
igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
@@ -2430,11 +2691,18 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_AES_CMAC:
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
+ break;
default:
return -EINVAL;
}
- memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
+ memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+ igtk_cmd.ctrl_flags |=
+ cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
pn = seq.aes_cmac.pn;
igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
@@ -2449,6 +2717,19 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
remove_key ? "removing" : "installing",
igtk_cmd.sta_id);
+ if (!iwl_mvm_has_new_rx_api(mvm)) {
+ struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
+ .ctrl_flags = igtk_cmd.ctrl_flags,
+ .key_id = igtk_cmd.key_id,
+ .sta_id = igtk_cmd.sta_id,
+ .receive_seq_cnt = igtk_cmd.receive_seq_cnt
+ };
+
+ memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
+ ARRAY_SIZE(igtk_cmd_v1.igtk));
+ return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
+ sizeof(igtk_cmd_v1), &igtk_cmd_v1);
+ }
return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
sizeof(igtk_cmd), &igtk_cmd);
}
@@ -2573,7 +2854,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
}
sta_id = mvm_sta->sta_id;
- if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
goto end;
}
@@ -2659,7 +2942,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
- if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index bbc1cab2c3bf..e068d5355865 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -473,9 +473,14 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int iwl_mvm_update_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+
+static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
+}
+
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -554,4 +559,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
+int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force);
+
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 58fc7b3c711c..63a051be832e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -241,11 +241,8 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
};
u32 cmdid;
- if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
- cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
- PHY_OPS_GROUP, 0);
- else
- cmdid = CMD_DTS_MEASUREMENT_TRIGGER;
+ cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+ PHY_OPS_GROUP, 0);
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
@@ -261,9 +258,6 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
DTS_MEASUREMENT_NOTIF_WIDE) };
int ret;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
- temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
-
lockdep_assert_held(&mvm->mutex);
iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index b3a87a31de30..66957ac12ca4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -346,7 +346,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
rate_idx = info->control.rates[0].idx;
/* if the rate isn't a well known legacy rate, take the lowest one */
- if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY)
+ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
rate_idx = rate_lowest_index(
&mvm->nvm_data->bands[info->band], sta);
@@ -441,7 +441,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
* one.
* Need to handle this.
*/
- tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TC_CMD_SEC_KEY_FROM_TABLE;
+ tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE;
tx_cmd->key[0] = keyconf->hw_key_idx;
iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
break;
@@ -490,16 +490,34 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info, __le16 fc)
{
- if (iwl_mvm_is_dqa_supported(mvm)) {
- if (info->control.vif->type == NL80211_IFTYPE_AP &&
- ieee80211_is_probe_resp(fc))
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ return info->hw_queue;
+
+ switch (info->control.vif->type) {
+ case NL80211_IFTYPE_AP:
+ /*
+ * handle legacy hostapd as well, where station may be added
+ * only after assoc.
+ */
+ if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
- else if (ieee80211_is_mgmt(fc) &&
- info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ if (info->hw_queue == info->control.vif->cab_queue)
+ return info->hw_queue;
+
+ WARN_ON_ONCE(1);
+ return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ if (ieee80211_is_mgmt(fc))
return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
- }
+ if (info->hw_queue == info->control.vif->cab_queue)
+ return info->hw_queue;
- return info->hw_queue;
+ WARN_ON_ONCE(1);
+ return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ default:
+ WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
+ return -1;
+ }
}
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
@@ -559,6 +577,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
sta_id = mvmvif->bcast_sta.sta_id;
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
hdr->frame_control);
+ if (queue < 0)
+ return -1;
+
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
@@ -837,6 +858,22 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
}
}
+/* Check if there are any timed-out TIDs on a given shared TXQ */
+static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
+{
+ unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
+ unsigned long now = jiffies;
+ int tid;
+
+ for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
+ IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+ return true;
+ }
+
+ return false;
+}
+
/*
* Sets the fields in the Tx cmd that are crypto related
*/
@@ -903,9 +940,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
tid = IWL_MAX_TID_COUNT;
}
- if (iwl_mvm_is_dqa_supported(mvm))
+ if (iwl_mvm_is_dqa_supported(mvm)) {
txq_id = mvmsta->tid_data[tid].txq_id;
+ if (ieee80211_is_mgmt(fc))
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+ }
+
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
@@ -939,7 +980,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
-
}
/* If we are here - TXQ exists and needs to be re-activated */
@@ -952,8 +992,25 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id);
}
- /* Keep track of the time of the last frame for this RA/TID */
- mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ /* Keep track of the time of the last frame for this RA/TID */
+ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
+
+ /*
+ * If we have timed-out TIDs - schedule the worker that will
+ * reconfig the queues and update them
+ *
+ * Note that the mvm->queue_info_lock isn't being taken here in
+ * order to not serialize the TX flow. This isn't dangerous
+ * because scheduling mvm->add_stream_wk can't ruin the state,
+ * and if we DON'T schedule it due to some race condition then
+ * next TX we get here we will.
+ */
+ if (unlikely(mvm->queue_info[txq_id].status ==
+ IWL_MVM_QUEUE_SHARED &&
+ iwl_mvm_txq_should_update(mvm, txq_id)))
+ schedule_work(&mvm->add_stream_wk);
+ }
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
@@ -1067,9 +1124,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed);
- iwl_mvm_disable_txq(mvm, tid_data->txq_id,
- vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
- CMD_ASYNC);
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ u8 mac80211_ac = tid_to_mac80211_ac[tid];
+
+ iwl_mvm_disable_txq(mvm, tid_data->txq_id,
+ vif->hw_queue[mac80211_ac], tid,
+ CMD_ASYNC);
+ }
tid_data->state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -1547,41 +1608,16 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
}
-static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
- struct iwl_mvm_ba_notif *ba_notif,
- struct iwl_mvm_tid_data *tid_data)
-{
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_len = ba_notif->txed_2_done;
- info->status.ampdu_len = ba_notif->txed;
- iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
- info);
- /* TODO: not accounted if the whole A-MPDU failed */
- info->status.tx_time = tid_data->tx_time;
- info->status.status_driver_data[0] =
- (void *)(uintptr_t)ba_notif->reduced_txp;
- info->status.status_driver_data[1] =
- (void *)(uintptr_t)tid_data->rate_n_flags;
-}
-
-void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ int txq, int index,
+ struct ieee80211_tx_info *ba_info, u32 rate)
{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
struct sk_buff_head reclaimed_skbs;
struct iwl_mvm_tid_data *tid_data;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
struct sk_buff *skb;
- int sta_id, tid, freed;
- /* "flow" corresponds to Tx queue */
- u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
- /* "ssn" is start of block-ack Tx window, corresponds to index
- * (in Tx queue's circular buffer) of first TFD/frame in window */
- u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
-
- sta_id = ba_notif->sta_id;
- tid = ba_notif->tid;
+ int freed;
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
tid >= IWL_MAX_TID_COUNT,
@@ -1601,10 +1637,10 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
mvmsta = iwl_mvm_sta_from_mac80211(sta);
tid_data = &mvmsta->tid_data[tid];
- if (tid_data->txq_id != scd_flow) {
+ if (tid_data->txq_id != txq) {
IWL_ERR(mvm,
- "invalid BA notification: Q %d, tid %d, flow %d\n",
- tid_data->txq_id, tid, scd_flow);
+ "invalid BA notification: Q %d, tid %d\n",
+ tid_data->txq_id, tid);
rcu_read_unlock();
return;
}
@@ -1618,27 +1654,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* block-ack window (we assume that they've been successfully
* transmitted ... if not, it's too late anyway).
*/
- iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn,
- &reclaimed_skbs);
+ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
- IWL_DEBUG_TX_REPLY(mvm,
- "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
- (u8 *)&ba_notif->sta_addr_lo32,
- ba_notif->sta_id);
- IWL_DEBUG_TX_REPLY(mvm,
- "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
- ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
- (unsigned long long)le64_to_cpu(ba_notif->bitmap),
- scd_flow, ba_resp_scd_ssn, ba_notif->txed,
- ba_notif->txed_2_done);
-
- IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
- ba_notif->reduced_txp);
- tid_data->next_reclaimed = ba_resp_scd_ssn;
+ tid_data->next_reclaimed = index;
iwl_mvm_check_ratid_empty(mvm, sta, tid);
freed = 0;
+ ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
skb_queue_walk(&reclaimed_skbs, skb) {
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -1660,8 +1683,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
- if (freed == 1)
- iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data);
+ if (freed == 1) {
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ memcpy(&info->status, &ba_info->status,
+ sizeof(ba_info->status));
+ iwl_mvm_hwrate_to_tx_status(rate, info);
+ }
}
spin_unlock_bh(&mvmsta->lock);
@@ -1671,7 +1698,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* Still it's important to update RS about sent vs. acked.
*/
if (skb_queue_empty(&reclaimed_skbs)) {
- struct ieee80211_tx_info ba_info = {};
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
if (mvmsta->vif)
@@ -1681,11 +1707,11 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (WARN_ON_ONCE(!chanctx_conf))
goto out;
- ba_info.band = chanctx_conf->def.chan->band;
- iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
+ ba_info->band = chanctx_conf->def.chan->band;
+ iwl_mvm_hwrate_to_tx_status(rate, ba_info);
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
- iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info, false);
+ iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
}
out:
@@ -1697,6 +1723,92 @@ out:
}
}
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int sta_id, tid, txq, index;
+ struct ieee80211_tx_info ba_info = {};
+ struct iwl_mvm_ba_notif *ba_notif;
+ struct iwl_mvm_tid_data *tid_data;
+ struct iwl_mvm_sta *mvmsta;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ struct iwl_mvm_compressed_ba_notif *ba_res =
+ (void *)pkt->data;
+
+ sta_id = ba_res->sta_id;
+ ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
+ ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
+ ba_info.status.tx_time =
+ (u16)le32_to_cpu(ba_res->wireless_time);
+ ba_info.status.status_driver_data[0] =
+ (void *)(uintptr_t)ba_res->reduced_txp;
+
+ /*
+ * TODO:
+ * When supporting multi TID aggregations - we need to move
+ * next_reclaimed to be per TXQ and not per TID or handle it
+ * in a different way.
+ * This will go together with SN and AddBA offload and cannot
+ * be handled properly for now.
+ */
+ WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
+ iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
+ (int)ba_res->tfd[0].q_num,
+ le16_to_cpu(ba_res->tfd[0].tfd_index),
+ &ba_info, le32_to_cpu(ba_res->tx_rate));
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+ return;
+ }
+
+ ba_notif = (void *)pkt->data;
+ sta_id = ba_notif->sta_id;
+ tid = ba_notif->tid;
+ /* "flow" corresponds to Tx queue */
+ txq = le16_to_cpu(ba_notif->scd_flow);
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ index = le16_to_cpu(ba_notif->scd_ssn);
+
+ rcu_read_lock();
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+ if (WARN_ON_ONCE(!mvmsta)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ tid_data = &mvmsta->tid_data[tid];
+
+ ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
+ ba_info.status.ampdu_len = ba_notif->txed;
+ ba_info.status.tx_time = tid_data->tx_time;
+ ba_info.status.status_driver_data[0] =
+ (void *)(uintptr_t)ba_notif->reduced_txp;
+
+ rcu_read_unlock();
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
+ tid_data->rate_n_flags);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
+ (u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
+ ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
+ le64_to_cpu(ba_notif->bitmap), txq, index,
+ ba_notif->txed, ba_notif->txed_2_done);
+
+ IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
+ ba_notif->reduced_txp);
+}
+
/*
* Note that there are transports that buffer frames before they reach
* the firmware. This means that after flush_tx_path is called, the
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 68f4e7fdfc11..d04babd99b53 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -512,7 +512,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
base = mvm->fw->inst_errlog_ptr;
}
- if (base < 0x800000) {
+ if (base < 0x400000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
@@ -610,7 +610,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
- .enable = 1,
+ .action = SCD_CFG_ENABLE_QUEUE,
.window = frame_limit,
.sta_id = sta_id,
.ssn = cpu_to_le16(ssn),
@@ -669,6 +669,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
tid_to_mac80211_ac[cfg->tid];
else
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
+
+ mvm->queue_info[queue].txq_tid = cfg->tid;
}
IWL_DEBUG_TX_QUEUES(mvm,
@@ -682,7 +684,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
if (enable_queue) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
- .enable = 1,
+ .action = SCD_CFG_ENABLE_QUEUE,
.window = cfg->frame_limit,
.sta_id = cfg->sta_id,
.ssn = cpu_to_le16(ssn),
@@ -709,7 +711,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
- .enable = 0,
+ .action = SCD_CFG_DISABLE_QUEUE,
};
bool remove_mac_queue = true;
int ret;
@@ -744,8 +746,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
~BIT(mac80211_queue);
mvm->queue_info[queue].hw_queue_refcount--;
- cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
- if (!cmd.enable)
+ cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
+ SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
+ if (cmd.action == SCD_CFG_DISABLE_QUEUE)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
IWL_DEBUG_TX_QUEUES(mvm,
@@ -755,12 +758,13 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].hw_queue_to_mac80211);
/* If the queue is still enabled - nothing left to do in this func */
- if (cmd.enable) {
+ if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
spin_unlock_bh(&mvm->queue_info_lock);
return;
}
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+ cmd.tid = mvm->queue_info[queue].txq_tid;
/* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].hw_queue_refcount ||
@@ -1131,7 +1135,13 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
}
- /* TODO: if queue was shared - need to re-enable AGGs */
+ /* If the queue is marked as shared - "unshare" it */
+ if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
+ mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
+ IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
+ queue);
+ }
}
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
@@ -1215,6 +1225,28 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
rcu_read_unlock();
}
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
+{
+ bool ps_disabled;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Disable power save when reading GP2 */
+ ps_disabled = mvm->ps_disabled;
+ if (!ps_disabled) {
+ mvm->ps_disabled = true;
+ iwl_mvm_power_update_device(mvm);
+ }
+
+ *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+ *boottime = ktime_get_boot_ns();
+
+ if (!ps_disabled) {
+ mvm->ps_disabled = ps_disabled;
+ iwl_mvm_power_update_device(mvm);
+ }
+}
+
int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
enum iwl_lqm_cmd_operatrions operation,
u32 duration, u32 timeout)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 78cf9a7f3eac..001be406a3d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -487,6 +487,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
@@ -500,22 +501,36 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
/* 9000 Series */
+ {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
@@ -608,7 +623,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
- const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
int ret;
@@ -637,11 +651,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (iwl_trans->cfg->rf_id) {
- if (cfg == &iwl9260_2ac_cfg)
- cfg_9260lc = &iwl9260lc_2ac_cfg;
- if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
- cfg = cfg_9260lc;
- iwl_trans->cfg = cfg_9260lc;
+ if (cfg == &iwl9460_2ac_cfg &&
+ iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
+ cfg = &iwl9000lc_2ac_cfg;
+ iwl_trans->cfg = cfg;
}
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 11e347dd44c7..cac6d99012b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -37,6 +37,7 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/timer.h>
+#include <linux/cpu.h>
#include "iwl-fh.h"
#include "iwl-csr.h"
@@ -49,7 +50,7 @@
* be needed for potential data in the SKB's head. The remaining ones can
* be used for frags.
*/
-#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
/*
* RX related structures and functions
@@ -192,41 +193,9 @@ struct iwl_cmd_meta {
/* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source;
u32 flags;
+ u32 tbs;
};
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues.
- *
- * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
- * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
- * there might be HW changes in the future). For the normal TX
- * queues, n_window, which is the size of the software queue data
- * is also 256; however, for the command queue, n_window is only
- * 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
- * the software buffers (in the variables @meta, @txb in struct
- * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
- * the same struct) have 256.
- * This means that we end up with the following:
- * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
- * SW entries: | 0 | ... | 31 |
- * where N is a number between 0 and 7. This means that the SW
- * data is a window overlayed over the HW queue.
- */
-struct iwl_queue {
- int write_ptr; /* 1-st empty entry (index) host_w*/
- int read_ptr; /* last used entry (index) host_r*/
- /* use for monitoring and recovering the stuck queue */
- dma_addr_t dma_addr; /* physical addr for BD's */
- int n_window; /* safe queue window */
- u32 id;
- int low_mark; /* low watermark, resume queue if free
- * space more than this */
- int high_mark; /* high watermark, stop queue if free
- * space less than this */
-};
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32
@@ -273,13 +242,32 @@ struct iwl_pcie_first_tb_buf {
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @write_ptr: 1-st empty entry (index) host_w
+ * @read_ptr: last used entry (index) host_r
+ * @dma_addr: physical addr for BD's
+ * @n_window: safe queue window
+ * @id: queue id
+ * @low_mark: low watermark, resume queue if free space more than this
+ * @high_mark: high watermark, stop queue if free space less than this
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
+ *
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
+ * there might be HW changes in the future). For the normal TX
+ * queues, n_window, which is the size of the software queue data
+ * is also 256; however, for the command queue, n_window is only
+ * 32 since we don't need so many commands pending. Since the HW
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
+ * This means that we end up with the following:
+ * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
+ * SW entries: | 0 | ... | 31 |
+ * where N is a number between 0 and 7. This means that the SW
+ * data is a window overlayed over the HW queue.
*/
struct iwl_txq {
- struct iwl_queue q;
- struct iwl_tfd *tfds;
+ void *tfds;
struct iwl_pcie_first_tb_buf *first_tb_bufs;
dma_addr_t first_tb_dma;
struct iwl_pcie_txq_entry *entries;
@@ -294,6 +282,14 @@ struct iwl_txq {
bool block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
+
+ int write_ptr;
+ int read_ptr;
+ dma_addr_t dma_addr;
+ int n_window;
+ u32 id;
+ int low_mark;
+ int high_mark;
};
static inline dma_addr_t
@@ -309,6 +305,16 @@ struct iwl_tso_hdr_page {
};
/**
+ * enum iwl_shared_irq_flags - level of sharing for irq
+ * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
+ * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
+ */
+enum iwl_shared_irq_flags {
+ IWL_SHARED_IRQ_NON_RX = BIT(0),
+ IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
+};
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
@@ -326,7 +332,6 @@ struct iwl_tso_hdr_page {
* @rx_buf_size: Rx buffer size
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
- * @wide_cmd_header: true when ucode supports wide command header format
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
* @rx_page_order: page order for receive buffer size
@@ -338,8 +343,10 @@ struct iwl_tso_hdr_page {
* @fw_mon_size: size of the buffer for the firmware monitor
* @msix_entries: array of MSI-X entries
* @msix_enabled: true if managed to enable MSI-X
- * @allocated_vector: the number of interrupt vector allocated by the OS
- * @default_irq_num: default irq for non rx interrupt
+ * @shared_vec_mask: the type of causes the shared vector handles
+ * (see iwl_shared_irq_flags).
+ * @alloc_vecs: the number of interrupt vectors allocated by the OS
+ * @def_irq: default irq for non rx causes
* @fh_init_mask: initial unmasked fh causes
* @hw_init_mask: initial unmasked hw causes
* @fh_mask: current unmasked fh causes
@@ -391,11 +398,12 @@ struct iwl_trans_pcie {
unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
+ u8 max_tbs;
+ u16 tfd_size;
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
bool scd_set_active;
- bool wide_cmd_header;
bool sw_csum_tx;
u32 rx_page_order;
@@ -410,12 +418,14 @@ struct iwl_trans_pcie {
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
bool msix_enabled;
- u32 allocated_vector;
- u32 default_irq_num;
+ u8 shared_vec_mask;
+ u32 alloc_vecs;
+ u32 def_irq;
u32 fh_init_mask;
u32 hw_init_mask;
u32 fh_mask;
u32 hw_mask;
+ cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
};
static inline struct iwl_trans_pcie *
@@ -474,6 +484,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode);
+dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq);
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -486,11 +497,20 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
+ u8 idx)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->tb_len);
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- return le16_to_cpu(tb->hi_n_len) >> 4;
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+ }
}
/*****************************************************
@@ -617,9 +637,9 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
- iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
+ if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
}
}
@@ -628,22 +648,22 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
- iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
+ if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
- txq->q.id);
+ txq->id);
}
-static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
+static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
}
-static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
+static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 5c36e6d00622..6fe5546dc773 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -487,15 +487,13 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
while (pending) {
int i;
- struct list_head local_allocated;
+ LIST_HEAD(local_allocated);
gfp_t gfp_mask = GFP_KERNEL;
/* Do not post a warning if there are only a few requests */
if (pending < RX_PENDING_WATERMARK)
gfp_mask |= __GFP_NOWARN;
- INIT_LIST_HEAD(&local_allocated);
-
for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
struct iwl_rx_mem_buffer *rxb;
struct page *page;
@@ -1108,13 +1106,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
FH_RSCSR_RXQ_POS != rxq->id);
IWL_DEBUG_RX(trans,
- "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
+ "cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
rxcb._offset,
iwl_get_cmd_string(trans,
iwl_cmd_id(pkt->hdr.cmd,
pkt->hdr.group_id,
0)),
- pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
+ pkt->hdr.group_id, pkt->hdr.cmd,
+ le16_to_cpu(pkt->hdr.sequence));
len = iwl_rx_packet_len(pkt);
len += sizeof(u32); /* account for status word */
@@ -1142,7 +1141,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
sequence = le16_to_cpu(pkt->hdr.sequence);
index = SEQ_TO_INDEX(sequence);
- cmd_index = get_cmd_index(&txq->q, index);
+ cmd_index = get_cmd_index(txq, index);
if (rxq->id == 0)
iwl_op_mode_rx(trans->op_mode, &rxq->napi,
@@ -1885,6 +1884,20 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
inta_fh,
iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+ if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
+ inta_fh & MSIX_FH_INT_CAUSES_Q0) {
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans, 0);
+ local_bh_enable();
+ }
+
+ if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
+ inta_fh & MSIX_FH_INT_CAUSES_Q1) {
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans, 1);
+ local_bh_enable();
+ }
+
/* This "Tx" DMA channel is used only for loading uCode */
if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 74f2f035bd28..ae95533e587d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -827,10 +827,16 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
if (ret)
return ret;
- /* Notify the ucode of the loaded section number and status */
- val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
- val = val | (sec_num << shift_param);
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+ /* Notify ucode of loaded section number and status */
+ if (trans->cfg->use_tfh) {
+ val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
+ } else {
+ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+ }
sec_num = (sec_num << 1) | 0x1;
}
@@ -838,10 +844,21 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
iwl_enable_interrupts(trans);
- if (cpu == 1)
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
- else
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
+ if (trans->cfg->use_tfh) {
+ if (cpu == 1)
+ iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+ 0xFFFF);
+ else
+ iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+ 0xFFFFFFFF);
+ } else {
+ if (cpu == 1)
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+ 0xFFFF);
+ else
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+ 0xFFFFFFFF);
+ }
return 0;
}
@@ -886,14 +903,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return ret;
}
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
- iwl_set_bits_prph(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- (LMPM_CPU_UCODE_LOADING_COMPLETED |
- LMPM_CPU_HDRS_LOADING_COMPLETED |
- LMPM_CPU_UCODE_LOADING_STARTED) <<
- shift_param);
-
*first_ucode_section = last_read_idx;
return 0;
@@ -1161,7 +1170,7 @@ static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
if (trans_pcie->msix_enabled) {
int i;
- for (i = 0; i < trans_pcie->allocated_vector; i++)
+ for (i = 0; i < trans_pcie->alloc_vecs; i++)
synchronize_irq(trans_pcie->msix_entries[i].vector);
} else {
synchronize_irq(trans_pcie->pci_dev->irq);
@@ -1420,13 +1429,58 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
+static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
+ int i;
+
+ /*
+ * Access all non RX causes and map them to the default irq.
+ * In case we are missing at least one interrupt vector,
+ * the first interrupt vector will serve non-RX and FBQ causes.
+ */
+ for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
+ iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
+ iwl_clear_bit(trans, causes_list[i].mask_reg,
+ causes_list[i].cause_num);
+ }
+}
+
+static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 offset =
+ trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+ u32 val, idx;
+
+ /*
+ * The first RX queue - fallback queue, which is designated for
+ * management frame, command responses etc, is always mapped to the
+ * first interrupt vector. The other RX queues are mapped to
+ * the other (N - 2) interrupt vectors.
+ */
+ val = BIT(MSIX_FH_INT_CAUSES_Q(0));
+ for (idx = 1; idx < trans->num_rx_queues; idx++) {
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
+ MSIX_FH_INT_CAUSES_Q(idx - offset));
+ val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
+ }
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
+
+ val = MSIX_FH_INT_CAUSES_Q(0);
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
+ val |= MSIX_NON_AUTO_CLEAR_CAUSE;
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
+}
+
static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
{
- u32 val, max_rx_vector, i;
struct iwl_trans *trans = trans_pcie->trans;
- max_rx_vector = trans_pcie->allocated_vector - 1;
-
if (!trans_pcie->msix_enabled) {
if (trans->cfg->mq_rx_supported)
iwl_write_prph(trans, UREG_CHICK,
@@ -1437,25 +1491,16 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
/*
- * Each cause from the list above and the RX causes is represented as
- * a byte in the IVAR table. We access the first (N - 1) bytes and map
- * them to the (N - 1) vectors so these vectors will be used as rx
- * vectors. Then access all non rx causes and map them to the
- * default queue (N'th queue).
+ * Each cause from the causes list above and the RX causes is
+ * represented as a byte in the IVAR table. The first nibble
+ * represents the bound interrupt vector of the cause, the second
+ * represents no auto clear for this cause. This will be set if its
+ * interrupt vector is bound to serve other causes.
*/
- for (i = 0; i < max_rx_vector; i++) {
- iwl_write8(trans, CSR_MSIX_RX_IVAR(i), MSIX_FH_INT_CAUSES_Q(i));
- iwl_clear_bit(trans, CSR_MSIX_FH_INT_MASK_AD,
- BIT(MSIX_FH_INT_CAUSES_Q(i)));
- }
+ iwl_pcie_map_rx_causes(trans);
+
+ iwl_pcie_map_non_rx_causes(trans);
- for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
- val = trans_pcie->default_irq_num |
- MSIX_NON_AUTO_CLEAR_CAUSE;
- iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
- iwl_clear_bit(trans, causes_list[i].mask_reg,
- causes_list[i].cause_num);
- }
trans_pcie->fh_init_mask =
~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
@@ -1468,40 +1513,55 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int max_irqs, num_irqs, i, ret, nr_online_cpus;
u16 pci_cmd;
- int max_vector;
- int ret, i;
-
- if (trans->cfg->mq_rx_supported) {
- max_vector = min_t(u32, (num_possible_cpus() + 2),
- IWL_MAX_RX_HW_QUEUES);
- for (i = 0; i < max_vector; i++)
- trans_pcie->msix_entries[i].entry = i;
-
- ret = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
- MSIX_MIN_INTERRUPT_VECTORS,
- max_vector);
- if (ret > 1) {
- IWL_DEBUG_INFO(trans,
- "Enable MSI-X allocate %d interrupt vector\n",
- ret);
- trans_pcie->allocated_vector = ret;
- trans_pcie->default_irq_num =
- trans_pcie->allocated_vector - 1;
- trans_pcie->trans->num_rx_queues =
- trans_pcie->allocated_vector - 1;
- trans_pcie->msix_enabled = true;
-
- return;
- }
+
+ if (!trans->cfg->mq_rx_supported)
+ goto enable_msi;
+
+ nr_online_cpus = num_online_cpus();
+ max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
+ for (i = 0; i < max_irqs; i++)
+ trans_pcie->msix_entries[i].entry = i;
+
+ num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
+ MSIX_MIN_INTERRUPT_VECTORS,
+ max_irqs);
+ if (num_irqs < 0) {
IWL_DEBUG_INFO(trans,
- "ret = %d %s move to msi mode\n", ret,
- (ret == 1) ?
- "can't allocate more than 1 interrupt vector" :
- "failed to enable msi-x mode");
- pci_disable_msix(pdev);
+ "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
+ num_irqs);
+ goto enable_msi;
}
+ trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
+
+ IWL_DEBUG_INFO(trans,
+ "MSI-X enabled. %d interrupt vectors were allocated\n",
+ num_irqs);
+ /*
+ * In case the OS provides fewer interrupts than requested, different
+ * causes will share the same interrupt vector as follows:
+ * One interrupt less: non rx causes shared with FBQ.
+ * Two interrupts less: non rx causes shared with FBQ and RSS.
+ * More than two interrupts: we will use fewer RSS queues.
+ */
+ if (num_irqs <= nr_online_cpus) {
+ trans_pcie->trans->num_rx_queues = num_irqs + 1;
+ trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
+ IWL_SHARED_IRQ_FIRST_RSS;
+ } else if (num_irqs == nr_online_cpus + 1) {
+ trans_pcie->trans->num_rx_queues = num_irqs;
+ trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
+ } else {
+ trans_pcie->trans->num_rx_queues = num_irqs - 1;
+ }
+
+ trans_pcie->alloc_vecs = num_irqs;
+ trans_pcie->msix_enabled = true;
+ return;
+
+enable_msi:
ret = pci_enable_msi(pdev);
if (ret) {
dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
@@ -1514,36 +1574,57 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
}
}
+static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
+{
+ int iter_rx_q, i, ret, cpu, offset;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
+ iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
+ offset = 1 + i;
+ for (; i < iter_rx_q ; i++) {
+ /*
+ * Get the cpu prior to the place to search
+ * (i.e. return will be > i - 1).
+ */
+ cpu = cpumask_next(i - offset, cpu_online_mask);
+ cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
+ ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
+ &trans_pcie->affinity_mask[i]);
+ if (ret)
+ IWL_ERR(trans_pcie->trans,
+ "Failed to set affinity mask for IRQ %d\n",
+ i);
+ }
+}
+
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie)
{
- int i, last_vector;
-
- last_vector = trans_pcie->trans->num_rx_queues;
+ int i;
- for (i = 0; i < trans_pcie->allocated_vector; i++) {
+ for (i = 0; i < trans_pcie->alloc_vecs; i++) {
int ret;
-
- ret = request_threaded_irq(trans_pcie->msix_entries[i].vector,
- iwl_pcie_msix_isr,
- (i == last_vector) ?
- iwl_pcie_irq_msix_handler :
- iwl_pcie_irq_rx_msix_handler,
- IRQF_SHARED,
- DRV_NAME,
- &trans_pcie->msix_entries[i]);
+ struct msix_entry *msix_entry;
+
+ msix_entry = &trans_pcie->msix_entries[i];
+ ret = devm_request_threaded_irq(&pdev->dev,
+ msix_entry->vector,
+ iwl_pcie_msix_isr,
+ (i == trans_pcie->def_irq) ?
+ iwl_pcie_irq_msix_handler :
+ iwl_pcie_irq_rx_msix_handler,
+ IRQF_SHARED,
+ DRV_NAME,
+ msix_entry);
if (ret) {
- int j;
-
IWL_ERR(trans_pcie->trans,
"Error allocating IRQ %d\n", i);
- for (j = 0; j < i; j++)
- free_irq(trans_pcie->msix_entries[j].vector,
- &trans_pcie->msix_entries[j]);
- pci_disable_msix(pdev);
+
return ret;
}
}
+ iwl_pcie_irq_set_affinity(trans_pcie->trans);
return 0;
}
@@ -1672,7 +1753,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->rx_page_order =
iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
- trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
@@ -1703,22 +1783,16 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_rx_free(trans);
if (trans_pcie->msix_enabled) {
- for (i = 0; i < trans_pcie->allocated_vector; i++)
- free_irq(trans_pcie->msix_entries[i].vector,
- &trans_pcie->msix_entries[i]);
+ for (i = 0; i < trans_pcie->alloc_vecs; i++) {
+ irq_set_affinity_hint(
+ trans_pcie->msix_entries[i].vector,
+ NULL);
+ }
- pci_disable_msix(trans_pcie->pci_dev);
trans_pcie->msix_enabled = false;
} else {
- free_irq(trans_pcie->pci_dev->irq, trans);
-
iwl_pcie_free_ict(trans);
-
- pci_disable_msi(trans_pcie->pci_dev);
}
- iounmap(trans_pcie->hw_base);
- pci_release_regions(trans_pcie->pci_dev);
- pci_disable_device(trans_pcie->pci_dev);
iwl_pcie_free_fw_monitor(trans);
@@ -1890,7 +1964,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
txq->frozen = freeze;
- if (txq->q.read_ptr == txq->q.write_ptr)
+ if (txq->read_ptr == txq->write_ptr)
goto next_queue;
if (freeze) {
@@ -1938,7 +2012,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
txq->block--;
if (!txq->block) {
iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (i << 8));
+ txq->write_ptr | (i << 8));
}
} else if (block) {
txq->block++;
@@ -1958,10 +2032,14 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
int cnt;
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
- txq->q.read_ptr, txq->q.write_ptr);
+ txq->read_ptr, txq->write_ptr);
+
+ if (trans->cfg->use_tfh)
+ /* TODO: access new SCD registers and dump them */
+ return;
scd_sram_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ SCD_TX_STTS_QUEUE_OFFSET(txq->id);
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
@@ -1996,7 +2074,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
- struct iwl_queue *q;
int cnt;
unsigned long now = jiffies;
int ret = 0;
@@ -2014,13 +2091,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
txq = &trans_pcie->txq[cnt];
- q = &txq->q;
- wr_ptr = ACCESS_ONCE(q->write_ptr);
+ wr_ptr = ACCESS_ONCE(txq->write_ptr);
- while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
!time_after(jiffies,
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
- u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+ u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
if (WARN_ONCE(wr_ptr != write_ptr,
"WR pointer moved while flushing %d -> %d\n",
@@ -2029,7 +2105,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
usleep_range(1000, 2000);
}
- if (q->read_ptr != q->write_ptr) {
+ if (txq->read_ptr != txq->write_ptr) {
IWL_ERR(trans,
"fail to flush all tx fifo queues Q %d\n", cnt);
ret = -ETIMEDOUT;
@@ -2197,7 +2273,6 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
- struct iwl_queue *q;
char *buf;
int pos = 0;
int cnt;
@@ -2215,10 +2290,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
txq = &trans_pcie->txq[cnt];
- q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
- cnt, q->read_ptr, q->write_ptr,
+ cnt, txq->read_ptr, txq->write_ptr,
!!test_bit(cnt, trans_pcie->queue_used),
!!test_bit(cnt, trans_pcie->queue_stopped),
txq->need_update, txq->frozen,
@@ -2424,13 +2498,14 @@ err:
}
#endif /*CONFIG_IWLWIFI_DEBUGFS */
-static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 cmdlen = 0;
int i;
- for (i = 0; i < IWL_NUM_OF_TBS; i++)
- cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+ for (i = 0; i < trans_pcie->max_tbs; i++)
+ cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
return cmdlen;
}
@@ -2645,7 +2720,7 @@ static struct iwl_trans_dump_data
/* host commands */
len += sizeof(*data) +
- cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+ cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
/* FW monitor */
if (trans_pcie->fw_mon_page) {
@@ -2713,12 +2788,13 @@ static struct iwl_trans_dump_data
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data;
spin_lock_bh(&cmdq->lock);
- ptr = cmdq->q.write_ptr;
- for (i = 0; i < cmdq->q.n_window; i++) {
- u8 idx = get_cmd_index(&cmdq->q, ptr);
+ ptr = cmdq->write_ptr;
+ for (i = 0; i < cmdq->n_window; i++) {
+ u8 idx = get_cmd_index(cmdq, ptr);
u32 caplen, cmdlen;
- cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
+ cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
+ trans_pcie->tfd_size * ptr);
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
if (cmdlen) {
@@ -2788,6 +2864,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.txq_disable = iwl_trans_pcie_txq_disable,
.txq_enable = iwl_trans_pcie_txq_enable,
+ .get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table,
+
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
@@ -2821,13 +2899,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans *trans;
int ret, addr_size;
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ERR_PTR(ret);
+
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, cfg, &trans_ops_pcie, 0);
if (!trans)
return ERR_PTR(-ENOMEM);
- trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
-
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->trans = trans;
@@ -2841,9 +2921,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_no_pci;
}
- ret = pci_enable_device(pdev);
- if (ret)
- goto out_no_pci;
if (!cfg->base_params->pcie_l1_allowed) {
/*
@@ -2861,6 +2938,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
else
addr_size = 36;
+ if (cfg->use_tfh) {
+ trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
+ trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
+
+ } else {
+ trans_pcie->max_tbs = IWL_NUM_OF_TBS;
+ trans_pcie->tfd_size = sizeof(struct iwl_tfd);
+ }
+ trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
+
pci_set_master(pdev);
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
@@ -2875,21 +2962,21 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* both attempts failed: */
if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");
- goto out_pci_disable_device;
+ goto out_no_pci;
}
}
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
if (ret) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
- goto out_pci_disable_device;
+ dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
+ goto out_no_pci;
}
- trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
+ trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
if (!trans_pcie->hw_base) {
- dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+ dev_err(&pdev->dev, "pcim_iomap_table failed\n");
ret = -ENODEV;
- goto out_pci_release_regions;
+ goto out_no_pci;
}
/* We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -2916,7 +3003,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
ret = iwl_pcie_prepare_card_hw(trans);
if (ret) {
IWL_WARN(trans, "Exit HW not ready\n");
- goto out_pci_disable_msi;
+ goto out_no_pci;
}
/*
@@ -2933,7 +3020,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
25000);
if (ret < 0) {
IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
- goto out_pci_disable_msi;
+ goto out_no_pci;
}
if (iwl_trans_grab_nic_access(trans, &flags)) {
@@ -2965,15 +3052,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (trans_pcie->msix_enabled) {
if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
- goto out_pci_release_regions;
+ goto out_no_pci;
} else {
ret = iwl_pcie_alloc_ict(trans);
if (ret)
- goto out_pci_disable_msi;
+ goto out_no_pci;
- ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
- iwl_pcie_irq_handler,
- IRQF_SHARED, DRV_NAME, trans);
+ ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ iwl_pcie_isr,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans);
if (ret) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
@@ -2991,12 +3079,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
out_free_ict:
iwl_pcie_free_ict(trans);
-out_pci_disable_msi:
- pci_disable_msi(pdev);
-out_pci_release_regions:
- pci_release_regions(pdev);
-out_pci_disable_device:
- pci_disable_device(pdev);
out_no_pci:
free_percpu(trans_pcie->tso_hdr_page);
iwl_trans_free(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 18650dccdb58..e9a278b60dfd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -71,7 +71,7 @@
*
***************************************************/
-static int iwl_queue_space(const struct iwl_queue *q)
+static int iwl_queue_space(const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
@@ -102,7 +102,7 @@ static int iwl_queue_space(const struct iwl_queue *q)
/*
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
{
q->n_window = slots_num;
q->id = id;
@@ -158,13 +158,13 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
spin_lock(&txq->lock);
/* check if triggered erroneously */
- if (txq->q.read_ptr == txq->q.write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
spin_unlock(&txq->lock);
return;
}
spin_unlock(&txq->lock);
- IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id,
jiffies_to_msecs(txq->wd_timeout));
iwl_trans_pcie_log_scd_error(trans, txq);
@@ -176,22 +176,21 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
* iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt)
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int write_ptr = txq->q.write_ptr;
- int txq_id = txq->q.id;
+ int write_ptr = txq->write_ptr;
+ int txq_id = txq->id;
u8 sec_ctl = 0;
- u8 sta_id = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
- (void *) txq->entries[txq->q.write_ptr].cmd->payload;
+ (void *)txq->entries[txq->write_ptr].cmd->payload;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- sta_id = tx_cmd->sta_id;
sec_ctl = tx_cmd->sec_ctl;
switch (sec_ctl & TX_CMD_SEC_MSK) {
@@ -205,14 +204,32 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
break;
}
-
if (trans_pcie->bc_table_dword)
len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
return;
- bc_ent = cpu_to_le16(len | (sta_id << 12));
+ if (trans->cfg->use_tfh) {
+ u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the
+ * TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ } else {
+ u8 sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
+ }
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -227,12 +244,12 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
+ int txq_id = txq->id;
+ int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->q.read_ptr].cmd->payload;
+ (void *)txq->entries[read_ptr].cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
@@ -240,6 +257,7 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
+
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
@@ -255,7 +273,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
- int txq_id = txq->q.id;
+ int txq_id = txq->id;
lockdep_assert_held(&txq->lock);
@@ -289,10 +307,10 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* if not in power-save mode, uCode will never sleep when we're
* trying to tx (during RFKILL, we're not trying to tx).
*/
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
if (!txq->block)
iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
+ txq->write_ptr | (txq_id << 8));
}
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
@@ -312,49 +330,93 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
}
}
-static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, int idx)
+{
+ return txq->tfds + trans_pcie->tfd_size * idx;
+}
+
+static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- addr |=
- ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return (dma_addr_t)(le64_to_cpu(tb->addr));
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ dma_addr_t hi_len;
+
+ if (sizeof(dma_addr_t) <= sizeof(u32))
+ return addr;
- return addr;
+ hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
+
+ /*
+ * shift by 16 twice to avoid warnings on 32-bit
+ * (where this code never runs anyway due to the
+ * if statement above)
+ */
+ return addr | ((hi_len << 16) << 16);
+ }
}
-static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
+static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
+ u8 idx, dma_addr_t addr, u16 len)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+ struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
- tb->hi_n_len = cpu_to_le16(hi_n_len);
+ tfd_fh->num_tbs = cpu_to_le16(idx + 1);
+ } else {
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+ struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
- tfd->num_tbs = idx + 1;
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd_fh->num_tbs = idx + 1;
+ }
}
-static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
+static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
{
- return tfd->num_tbs & 0x1f;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+
+ return tfd->num_tbs & 0x1f;
+ }
}
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd)
+ struct iwl_txq *txq, int index)
{
- int i;
- int num_tbs;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+ void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
/* Sanity check on number of chunks */
- num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
- if (num_tbs >= IWL_NUM_OF_TBS) {
+ if (num_tbs >= trans_pcie->max_tbs) {
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
/* @todo issue fatal error, it is quite serious situation */
return;
@@ -363,18 +425,30 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
/* first TB is never freed - it's the bidirectional DMA data */
for (i = 1; i < num_tbs; i++) {
- if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+ if (meta->tbs & BIT(i))
dma_unmap_page(trans->dev,
- iwl_pcie_tfd_tb_get_addr(tfd, i),
- iwl_pcie_tfd_tb_get_len(tfd, i),
+ iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
+ iwl_pcie_tfd_tb_get_len(trans, tfd, i),
DMA_TO_DEVICE);
else
dma_unmap_single(trans->dev,
- iwl_pcie_tfd_tb_get_addr(tfd, i),
- iwl_pcie_tfd_tb_get_len(tfd, i),
+ iwl_pcie_tfd_tb_get_addr(trans, tfd,
+ i),
+ iwl_pcie_tfd_tb_get_len(trans, tfd,
+ i),
DMA_TO_DEVICE);
}
- tfd->num_tbs = 0;
+
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ } else {
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ }
+
}
/*
@@ -388,20 +462,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
*/
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
- struct iwl_tfd *tfd_tmp = txq->tfds;
-
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
* idx is bounded by n_window
*/
- int rd_ptr = txq->q.read_ptr;
- int idx = get_cmd_index(&txq->q, rd_ptr);
+ int rd_ptr = txq->read_ptr;
+ int idx = get_cmd_index(txq, rd_ptr);
lockdep_assert_held(&txq->lock);
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
- iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
+ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
/* free SKB */
if (txq->entries) {
@@ -423,23 +495,21 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
dma_addr_t addr, u16 len, bool reset)
{
- struct iwl_queue *q;
- struct iwl_tfd *tfd, *tfd_tmp;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ void *tfd;
u32 num_tbs;
- q = &txq->q;
- tfd_tmp = txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
+ tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
if (reset)
- memset(tfd, 0, sizeof(*tfd));
+ memset(tfd, 0, trans_pcie->tfd_size);
- num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
- /* Each TFD can point to a maximum 20 Tx buffers */
- if (num_tbs >= IWL_NUM_OF_TBS) {
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (num_tbs >= trans_pcie->max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
+ trans_pcie->max_tbs);
return -EINVAL;
}
@@ -447,7 +517,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
"Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
- iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
+ iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
return num_tbs;
}
@@ -457,7 +527,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
u32 txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+ size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
size_t tb0_buf_sz;
int i;
@@ -468,7 +538,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
(unsigned long)txq);
txq->trans_pcie = trans_pcie;
- txq->q.n_window = slots_num;
+ txq->n_window = slots_num;
txq->entries = kcalloc(slots_num,
sizeof(struct iwl_pcie_txq_entry),
@@ -489,7 +559,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->q.dma_addr, GFP_KERNEL);
+ &txq->dma_addr, GFP_KERNEL);
if (!txq->tfds)
goto error;
@@ -503,11 +573,11 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
if (!txq->first_tb_bufs)
goto err_free_tfds;
- txq->q.id = txq_id;
+ txq->id = txq_id;
return 0;
err_free_tfds:
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
error:
if (txq->entries && txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++)
@@ -531,7 +601,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
/* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(&txq->q, slots_num, txq_id);
+ ret = iwl_queue_init(txq, slots_num, txq_id);
if (ret)
return ret;
@@ -545,10 +615,10 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr);
+ txq->dma_addr);
else
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr >> 8);
+ txq->dma_addr >> 8);
return 0;
}
@@ -595,15 +665,14 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
spin_lock_bh(&txq->lock);
- while (q->write_ptr != q->read_ptr) {
+ while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, q->read_ptr);
+ txq_id, txq->read_ptr);
if (txq_id != trans_pcie->cmd_queue) {
- struct sk_buff *skb = txq->entries[q->read_ptr].skb;
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
@@ -611,15 +680,15 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
if (txq_id != trans_pcie->cmd_queue) {
IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
- q->id);
+ txq->id);
iwl_trans_unref(trans);
} else {
iwl_pcie_clear_cmd_in_flight(trans);
@@ -663,7 +732,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc array of command/tx buffers */
if (txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < txq->q.n_window; i++) {
+ for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf);
}
@@ -671,13 +740,13 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
- txq->tfds, txq->q.dma_addr);
- txq->q.dma_addr = 0;
+ trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+ txq->tfds, txq->dma_addr);
+ txq->dma_addr = 0;
txq->tfds = NULL;
dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->q.n_window,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
txq->first_tb_bufs, txq->first_tb_dma);
}
@@ -703,6 +772,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ if (trans->cfg->use_tfh)
+ return;
+
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -758,14 +830,14 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr);
+ txq->dma_addr);
else
iwl_write_direct32(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr >> 8);
+ txq->dma_addr >> 8);
iwl_pcie_txq_unmap(trans, txq_id);
- txq->q.read_ptr = 0;
- txq->q.write_ptr = 0;
+ txq->read_ptr = 0;
+ txq->write_ptr = 0;
}
/* Tell NIC where to find the "keep warm" buffer */
@@ -970,11 +1042,13 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
}
}
- if (trans->cfg->use_tfh)
+ if (trans->cfg->use_tfh) {
iwl_write_direct32(trans, TFH_TRANSFER_MODE,
TFH_TRANSFER_MAX_PENDING_REQ |
TFH_CHUNK_SIZE_128 |
TFH_CHUNK_SPLIT_MODE);
+ return 0;
+ }
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
if (trans->cfg->base_params->num_of_queues > 20)
@@ -1007,7 +1081,7 @@ static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
* if empty delete timer, otherwise move timer forward
* since we're making progress on this queue
*/
- if (txq->q.read_ptr == txq->q.write_ptr)
+ if (txq->read_ptr == txq->write_ptr)
del_timer(&txq->stuck_timer);
else
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
@@ -1020,7 +1094,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
- struct iwl_queue *q = &txq->q;
int last_to_free;
/* This function is not meant to release cmd queue*/
@@ -1035,21 +1108,21 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
}
- if (txq->q.read_ptr == tfd_num)
+ if (txq->read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->q.read_ptr, tfd_num, ssn);
+ txq_id, txq->read_ptr, tfd_num, ssn);
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
last_to_free = iwl_queue_dec_wrap(tfd_num);
- if (!iwl_queue_used(q, last_to_free)) {
+ if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
- q->write_ptr, q->read_ptr);
+ txq->write_ptr, txq->read_ptr);
goto out;
}
@@ -1057,9 +1130,9 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
for (;
- q->read_ptr != tfd_num;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
- struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb;
+ txq->read_ptr != tfd_num;
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
@@ -1068,16 +1141,17 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
__skb_queue_tail(skbs, skb);
- txq->entries[txq->q.read_ptr].skb = NULL;
+ txq->entries[txq->read_ptr].skb = NULL;
- iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
+ if (!trans->cfg->use_tfh)
+ iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
iwl_pcie_txq_free_tfd(trans, txq);
}
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+ if (iwl_queue_space(txq) > txq->low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head overflow_skbs;
@@ -1109,12 +1183,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
}
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ if (iwl_queue_space(txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
}
- if (q->read_ptr == q->write_ptr) {
- IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
+ if (txq->read_ptr == txq->write_ptr) {
+ IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id);
iwl_trans_unref(trans);
}
@@ -1176,31 +1250,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
unsigned long flags;
int nfreed = 0;
lockdep_assert_held(&txq->lock);
- if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
+ if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
- q->write_ptr, q->read_ptr);
+ txq->write_ptr, txq->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
+ for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, q->write_ptr, q->read_ptr);
+ idx, txq->write_ptr, txq->read_ptr);
iwl_force_nmi(trans);
}
}
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1249,6 +1322,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
+ if (cfg && trans->cfg->use_tfh)
+ WARN_ONCE(1, "Expected no calls to SCD configuration");
+
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
if (cfg) {
@@ -1283,14 +1359,14 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
*/
iwl_scd_txq_disable_agg(trans, txq_id);
- ssn = txq->q.read_ptr;
+ ssn = txq->read_ptr;
}
}
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
- txq->q.read_ptr = (ssn & 0xff);
- txq->q.write_ptr = (ssn & 0xff);
+ txq->read_ptr = (ssn & 0xff);
+ txq->write_ptr = (ssn & 0xff);
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(ssn & 0xff) | (txq_id << 8));
@@ -1343,6 +1419,14 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
txq->ampdu = !shared_mode;
}
+dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return trans_pcie->scd_bc_tbls.dma +
+ txq * sizeof(struct iwlagn_scd_bc_tbl);
+}
+
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
bool configure_scd)
{
@@ -1366,6 +1450,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
return;
}
+ if (configure_scd && trans->cfg->use_tfh)
+ WARN_ONCE(1, "Expected no calls to SCD configuration");
+
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
@@ -1395,7 +1482,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
@@ -1410,7 +1496,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- if (WARN(!trans_pcie->wide_cmd_header &&
+ if (WARN(!trans->wide_cmd_header &&
group_id > IWL_ALWAYS_LONG_GROUP,
"unsupported wide command %#x\n", cmd->id))
return -EINVAL;
@@ -1494,7 +1580,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -1503,7 +1589,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- idx = get_cmd_index(q, q->write_ptr);
+ idx = get_cmd_index(txq, txq->write_ptr);
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
@@ -1522,7 +1608,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
copy_size = sizeof(struct iwl_cmd_header_wide);
@@ -1530,7 +1616,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
cmd_pos = sizeof(struct iwl_cmd_header);
@@ -1580,7 +1666,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
+ cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1596,8 +1682,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
@@ -1620,8 +1706,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
phys_addr = dma_map_single(trans->dev, (void *)data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
@@ -1629,8 +1715,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
}
- BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
- sizeof(out_meta->flags) * BITS_PER_BYTE);
+ BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
out_meta->flags = cmd->flags;
if (WARN_ON_ONCE(txq->entries[idx].free_buf))
kzfree(txq->entries[idx].free_buf);
@@ -1639,7 +1724,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr && txq->wd_timeout)
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
@@ -1651,7 +1736,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1689,20 +1774,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
txq_id, trans_pcie->cmd_queue, sequence,
- trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
- trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
+ trans_pcie->txq[trans_pcie->cmd_queue].read_ptr,
+ trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
spin_lock_bh(&txq->lock);
- cmd_index = get_cmd_index(&txq->q, index);
+ cmd_index = get_cmd_index(txq, index);
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
- iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
+ iwl_pcie_tfd_unmap(trans, meta, txq, index);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -1815,14 +1900,13 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
HOST_COMPLETE_TIMEOUT);
if (!ret) {
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
iwl_get_cmd_string(trans, cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
- q->read_ptr, q->write_ptr);
+ txq->read_ptr, txq->write_ptr);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
@@ -1900,7 +1984,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
- struct iwl_queue *q = &txq->q;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 tb2_len;
int i;
@@ -1915,8 +1999,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
skb->data + hdr_len,
tb2_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
return -EINVAL;
}
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
@@ -1935,19 +2019,19 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
return -EINVAL;
}
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
- out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+ out_meta->tbs |= BIT(tb_idx);
}
trace_iwlwifi_dev_tx(trans->dev, skb,
- &txq->tfds[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
skb->data + hdr_len, tb2_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb,
@@ -2008,7 +2092,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
- struct iwl_queue *q = &txq->q;
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
@@ -2022,8 +2105,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb,
- &txq->tfds[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
NULL, 0);
@@ -2179,7 +2262,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
return 0;
out_unmap:
- iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
return ret;
}
#else /* CONFIG_INET */
@@ -2203,9 +2286,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq;
- struct iwl_queue *q;
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
void *tb1_addr;
+ void *tfd;
u16 len, tb1_len;
bool wait_write_ptr;
__le16 fc;
@@ -2214,7 +2297,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
bool amsdu;
txq = &trans_pcie->txq[txq_id];
- q = &txq->q;
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))
@@ -2236,7 +2318,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+ skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
__skb_linearize(skb))
return -ENOMEM;
@@ -2249,11 +2331,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(q) < q->high_mark) {
+ if (iwl_queue_space(txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(q) < 3)) {
+ if (unlikely(iwl_queue_space(txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -2274,19 +2356,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
*/
wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
WARN_ONCE(txq->ampdu &&
- (wifi_seq & 0xff) != q->write_ptr,
+ (wifi_seq & 0xff) != txq->write_ptr,
"Q: %d WiFi Seq %d tfdNum %d",
- txq_id, wifi_seq, q->write_ptr);
+ txq_id, wifi_seq, txq->write_ptr);
/* Set up driver data for this TFD */
- txq->entries[q->write_ptr].skb = skb;
- txq->entries[q->write_ptr].cmd = dev_cmd;
+ txq->entries[txq->write_ptr].skb = skb;
+ txq->entries[txq->write_ptr].cmd = dev_cmd;
dev_cmd->hdr.sequence =
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
+ INDEX_TO_SEQ(txq->write_ptr)));
- tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr);
+ tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
@@ -2294,7 +2376,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
/* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[q->write_ptr].meta;
+ out_meta = &txq->entries[txq->write_ptr].meta;
out_meta->flags = 0;
/*
@@ -2319,7 +2401,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr,
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
IWL_FIRST_TB_SIZE, true);
@@ -2344,13 +2426,15 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
}
+ tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
+ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
+ iwl_pcie_tfd_get_num_tbs(trans, tfd));
wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
if (txq->wd_timeout) {
/*
* If the TXQ is active, then set the timer, if not,
@@ -2364,12 +2448,12 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
else
txq->frozen_expiry_remainder = txq->wd_timeout;
}
- IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
+ IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
iwl_trans_ref(trans);
}
/* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index 3e5fa7872b64..a5656bc0e6aa 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -3041,13 +3041,9 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
p->length > 1024 || !p->pointer)
return -EINVAL;
- param = kmalloc(p->length, GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- ret = -EFAULT;
- goto out;
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param)) {
+ return PTR_ERR(param);
}
if (p->length < sizeof(struct prism2_download_param) +
@@ -3803,13 +3799,9 @@ static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = kmalloc(p->length, GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- ret = -EFAULT;
- goto out;
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param)) {
+ return PTR_ERR(param);
}
switch (param->cmd) {
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 56f109bc8394..bca6935a94db 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -1613,10 +1613,8 @@ static int ezusb_probe(struct usb_interface *interface,
}
upriv->read_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!upriv->read_urb) {
- err("No free urbs available");
+ if (!upriv->read_urb)
goto error;
- }
if (le16_to_cpu(ep->wMaxPacketSize) != 64)
pr_warn("bulk in: wMaxPacketSize!= 64\n");
if (ep->bEndpointAddress != (2 | USB_DIR_IN))
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 8c35ac838fce..431f13b4faf6 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -487,7 +487,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
};
static spinlock_t hwsim_radio_lock;
-static struct list_head hwsim_radios;
+static LIST_HEAD(hwsim_radios);
static int hwsim_radio_idx;
static struct platform_driver mac80211_hwsim_driver = {
@@ -3376,7 +3376,6 @@ static int __init init_mac80211_hwsim(void)
mac80211_hwsim_unassign_vif_chanctx;
spin_lock_init(&hwsim_radio_lock);
- INIT_LIST_HEAD(&hwsim_radios);
err = register_pernet_device(&hwsim_net_ops);
if (err)
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index 799a2efe5793..e0ade40d9497 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -198,22 +198,16 @@ static int if_usb_probe(struct usb_interface *intf,
}
cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->rx_urb) {
- lbtf_deb_usbd(&udev->dev, "Rx URB allocation failed\n");
+ if (!cardp->rx_urb)
goto dealloc;
- }
cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->tx_urb) {
- lbtf_deb_usbd(&udev->dev, "Tx URB allocation failed\n");
+ if (!cardp->tx_urb)
goto dealloc;
- }
cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->cmd_urb) {
- lbtf_deb_usbd(&udev->dev, "Cmd URB allocation failed\n");
+ if (!cardp->cmd_urb)
goto dealloc;
- }
cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE,
GFP_KERNEL);
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index 81c60d0a1bda..43dccd5b0291 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -260,22 +260,17 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
rdr_event = (void *)(skb->data + sizeof(u32));
- if (le32_to_cpu(rdr_event->passed)) {
- mwifiex_dbg(priv->adapter, MSG,
- "radar detected; indicating kernel\n");
- if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
- mwifiex_dbg(priv->adapter, ERROR,
- "Failed to stop CAC in FW\n");
- cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
- GFP_KERNEL);
- mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n",
- rdr_event->reg_domain);
- mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n",
- rdr_event->det_type);
- } else {
- mwifiex_dbg(priv->adapter, MSG,
- "false radar detection event!\n");
- }
+ mwifiex_dbg(priv->adapter, MSG,
+ "radar detected; indicating kernel\n");
+ if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to stop CAC in FW\n");
+ cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
+ GFP_KERNEL);
+ mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n",
+ rdr_event->reg_domain);
+ mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n",
+ rdr_event->det_type);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h
index afdd58aa90de..ea0fa68b9913 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n.h
@@ -171,9 +171,10 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
struct mwifiex_sta_node *node)
{
-
- if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) ||
- !priv->ap_11n_enabled)
+ if (!node || ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP) &&
+ !priv->ap_11n_enabled) ||
+ ((priv->bss_mode == NL80211_IFTYPE_ADHOC) &&
+ !priv->adapter->adhoc_11n_enabled))
return 0;
return node->is_11n_enabled;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index a74cc43b1953..94480123efa3 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -78,8 +78,15 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
*/
static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
{
- int ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
+ int ret;
+
+ if (!payload) {
+ mwifiex_dbg(priv->adapter, INFO, "info: fw drop data\n");
+ return 0;
+ }
+
+ ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
if (!ret)
return 0;
@@ -921,3 +928,72 @@ void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter)
else
mwifiex_update_ampdu_rxwinsize(adapter, false);
}
+
+/* This function handles rxba_sync event
+ */
+void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+ u8 *event_buf, u16 len)
+{
+ struct mwifiex_ie_types_rxba_sync *tlv_rxba = (void *)event_buf;
+ u16 tlv_type, tlv_len;
+ struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
+ u8 i, j;
+ u16 seq_num, tlv_seq_num, tlv_bitmap_len;
+ int tlv_buf_left = len;
+ int ret;
+ u8 *tmp;
+
+ mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
+ event_buf, len);
+ while (tlv_buf_left >= sizeof(*tlv_rxba)) {
+ tlv_type = le16_to_cpu(tlv_rxba->header.type);
+ tlv_len = le16_to_cpu(tlv_rxba->header.len);
+ if (tlv_type != TLV_TYPE_RXBA_SYNC) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Wrong TLV id=0x%x\n", tlv_type);
+ return;
+ }
+
+ tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
+ tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
+ mwifiex_dbg(priv->adapter, INFO,
+ "%pM tid=%d seq_num=%d bitmap_len=%d\n",
+ tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
+ tlv_bitmap_len);
+
+ rx_reor_tbl_ptr =
+ mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
+ tlv_rxba->mac);
+ if (!rx_reor_tbl_ptr) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Can not find rx_reorder_tbl!");
+ return;
+ }
+
+ for (i = 0; i < tlv_bitmap_len; i++) {
+ for (j = 0 ; j < 8; j++) {
+ if (tlv_rxba->bitmap[i] & (1 << j)) {
+ seq_num = (MAX_TID_VALUE - 1) &
+ (tlv_seq_num + i * 8 + j);
+
+ mwifiex_dbg(priv->adapter, ERROR,
+ "drop packet,seq=%d\n",
+ seq_num);
+
+ ret = mwifiex_11n_rx_reorder_pkt
+ (priv, seq_num, tlv_rxba->tid,
+ tlv_rxba->mac, 0, NULL);
+
+ if (ret)
+ mwifiex_dbg(priv->adapter,
+ ERROR,
+ "Fail to drop packet");
+ }
+ }
+ }
+
+ tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
+ tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
+ tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
+ }
+}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
index 63ecea89b4ab..22d991f514c8 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
@@ -81,5 +81,6 @@ struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta);
void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta);
void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags);
-
+void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+ u8 *event_buf, u16 len);
#endif /* _MWIFIEX_11N_RXREORDER_H_ */
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index a8ff969c95c2..39ce76ad00bc 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -484,6 +484,29 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
}
/*
+ * CFG802.11 operation handler to set default mgmt key.
+ */
+static int
+mwifiex_cfg80211_set_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *netdev,
+ u8 key_index)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
+ struct mwifiex_ds_encrypt_key encrypt_key;
+
+ wiphy_dbg(wiphy, "set default mgmt key, key index=%d\n", key_index);
+
+ memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
+ encrypt_key.key_len = WLAN_KEY_LEN_CCMP;
+ encrypt_key.key_index = key_index;
+ encrypt_key.is_igtk_def_key = true;
+ eth_broadcast_addr(encrypt_key.mac_addr);
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET, true, &encrypt_key, true);
+}
+
+/*
* This function sends domain information to the firmware.
*
* The following information are passed to the firmware -
@@ -2012,10 +2035,6 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
- mwifiex_dbg(priv->adapter, MSG,
- "info: successfully disconnected from %pM:\t"
- "reason code %d\n", priv->cfg_bssid, reason_code);
-
eth_zero_addr(priv->cfg_bssid);
priv->hs2_enabled = false;
@@ -2485,6 +2504,16 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
priv->scan_request = request;
+ if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ ether_addr_copy(priv->random_mac, request->mac_addr);
+ for (i = 0; i < ETH_ALEN; i++) {
+ priv->random_mac[i] &= request->mac_addr_mask[i];
+ priv->random_mac[i] |= get_random_int() &
+ ~(request->mac_addr_mask[i]);
+ }
+ }
+
+ ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
user_scan_cfg->num_ssids = request->n_ssids;
user_scan_cfg->ssid_list = request->ssids;
@@ -2726,7 +2755,7 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
ht_info->cap &= ~IEEE80211_HT_CAP_SGI_40;
if (adapter->user_dev_mcs_support == HT_STREAM_2X2)
- ht_info->cap |= 3 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ ht_info->cap |= 2 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
else
ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
@@ -3913,6 +3942,88 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
return ret;
}
+#ifdef CONFIG_NL80211_TESTMODE
+
+enum mwifiex_tm_attr {
+ __MWIFIEX_TM_ATTR_INVALID = 0,
+ MWIFIEX_TM_ATTR_CMD = 1,
+ MWIFIEX_TM_ATTR_DATA = 2,
+
+ /* keep last */
+ __MWIFIEX_TM_ATTR_AFTER_LAST,
+ MWIFIEX_TM_ATTR_MAX = __MWIFIEX_TM_ATTR_AFTER_LAST - 1,
+};
+
+static const struct nla_policy mwifiex_tm_policy[MWIFIEX_TM_ATTR_MAX + 1] = {
+ [MWIFIEX_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [MWIFIEX_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = MWIFIEX_SIZE_OF_CMD_BUFFER },
+};
+
+enum mwifiex_tm_command {
+ MWIFIEX_TM_CMD_HOSTCMD = 0,
+};
+
+static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+ struct mwifiex_ds_misc_cmd *hostcmd;
+ struct nlattr *tb[MWIFIEX_TM_ATTR_MAX + 1];
+ struct mwifiex_adapter *adapter;
+ struct sk_buff *skb;
+ int err;
+
+ if (!priv)
+ return -EINVAL;
+ adapter = priv->adapter;
+
+ err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len,
+ mwifiex_tm_policy);
+ if (err)
+ return err;
+
+ if (!tb[MWIFIEX_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[MWIFIEX_TM_ATTR_CMD])) {
+ case MWIFIEX_TM_CMD_HOSTCMD:
+ if (!tb[MWIFIEX_TM_ATTR_DATA])
+ return -EINVAL;
+
+ hostcmd = kzalloc(sizeof(*hostcmd), GFP_KERNEL);
+ if (!hostcmd)
+ return -ENOMEM;
+
+ hostcmd->len = nla_len(tb[MWIFIEX_TM_ATTR_DATA]);
+ memcpy(hostcmd->cmd, nla_data(tb[MWIFIEX_TM_ATTR_DATA]),
+ hostcmd->len);
+
+ if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
+ dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
+ return -EFAULT;
+ }
+
+ /* process hostcmd response*/
+ skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
+ if (!skb)
+ return -ENOMEM;
+ err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
+ hostcmd->len, hostcmd->cmd);
+ if (err) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+
+ err = cfg80211_testmode_reply(skb);
+ kfree(hostcmd);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+#endif
+
static int
mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
@@ -3994,6 +4105,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.leave_ibss = mwifiex_cfg80211_leave_ibss,
.add_key = mwifiex_cfg80211_add_key,
.del_key = mwifiex_cfg80211_del_key,
+ .set_default_mgmt_key = mwifiex_cfg80211_set_default_mgmt_key,
.mgmt_tx = mwifiex_cfg80211_mgmt_tx,
.mgmt_frame_register = mwifiex_cfg80211_mgmt_frame_register,
.remain_on_channel = mwifiex_cfg80211_remain_on_channel,
@@ -4025,6 +4137,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch,
.add_station = mwifiex_cfg80211_add_station,
.change_station = mwifiex_cfg80211_change_station,
+ CFG80211_TESTMODE_CMD(mwifiex_tm_cmd)
.get_channel = mwifiex_cfg80211_get_channel,
.start_radar_detection = mwifiex_cfg80211_start_radar_detection,
.channel_switch = mwifiex_cfg80211_channel_switch,
@@ -4135,9 +4248,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->cipher_suites = mwifiex_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
- if (adapter->region_code)
- wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS |
+ if (adapter->regd) {
+ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS |
REGULATORY_COUNTRY_IE_IGNORE;
+ wiphy_apply_custom_regulatory(wiphy, adapter->regd);
+ }
ether_addr_copy(wiphy->perm_addr, adapter->perm_addr);
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
@@ -4173,7 +4289,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->features |= NL80211_FEATURE_HT_IBSS |
NL80211_FEATURE_INACTIVITY_TIMER |
NL80211_FEATURE_LOW_PRIORITY_SCAN |
- NL80211_FEATURE_NEED_OBSS_SCAN;
+ NL80211_FEATURE_NEED_OBSS_SCAN |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
@@ -4200,19 +4319,27 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
return ret;
}
- if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
- mwifiex_dbg(adapter, INFO,
- "driver hint alpha2: %2.2s\n", reg_alpha2);
- regulatory_hint(wiphy, reg_alpha2);
- } else {
- if (adapter->region_code == 0x00) {
- mwifiex_dbg(adapter, WARN, "Ignore world regulatory domain\n");
+ if (!adapter->regd) {
+ if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
+ mwifiex_dbg(adapter, INFO,
+ "driver hint alpha2: %2.2s\n", reg_alpha2);
+ regulatory_hint(wiphy, reg_alpha2);
} else {
- country_code =
- mwifiex_11d_code_2_region(adapter->region_code);
- if (country_code &&
- regulatory_hint(wiphy, country_code))
- mwifiex_dbg(priv->adapter, ERROR, "regulatory_hint() failed\n");
+ if (adapter->region_code == 0x00) {
+ mwifiex_dbg(adapter, WARN,
+ "Ignore world regulatory domain\n");
+ } else {
+ wiphy->regulatory_flags |=
+ REGULATORY_DISABLE_BEACON_HINTS |
+ REGULATORY_COUNTRY_IE_IGNORE;
+ country_code =
+ mwifiex_11d_code_2_region(
+ adapter->region_code);
+ if (country_code &&
+ regulatory_hint(wiphy, country_code))
+ mwifiex_dbg(priv->adapter, ERROR,
+ "regulatory_hint() failed\n");
+ }
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index c29f26d8baf2..53477280f39c 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -480,13 +480,27 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
*/
int mwifiex_process_event(struct mwifiex_adapter *adapter)
{
- int ret;
+ int ret, i;
struct mwifiex_private *priv =
mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
struct sk_buff *skb = adapter->event_skb;
- u32 eventcause = adapter->event_cause;
+ u32 eventcause;
struct mwifiex_rxinfo *rx_info;
+ if ((adapter->event_cause & EVENT_ID_MASK) == EVENT_RADAR_DETECTED) {
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (priv && mwifiex_is_11h_active(priv)) {
+ adapter->event_cause |=
+ ((priv->bss_num & 0xff) << 16) |
+ ((priv->bss_type & 0xff) << 24);
+ break;
+ }
+ }
+ }
+
+ eventcause = adapter->event_cause;
+
/* Save the last event to debug log */
adapter->dbg.last_event_index =
(adapter->dbg.last_event_index + 1) % DBG_CMD_NUM;
@@ -581,6 +595,14 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
return -1;
}
}
+ /* We don't expect commands in manufacturing mode. They are cooked
+ * in application and ready to download buffer is passed to the driver
+ */
+ if (adapter->mfg_mode && cmd_no) {
+ dev_dbg(adapter->dev, "Ignoring commands in manufacturing mode\n");
+ return -1;
+ }
+
/* Get a new command node */
cmd_node = mwifiex_get_cmd_node(adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index bccf17ad588e..b9284b533294 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -118,6 +118,8 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
+ p += sprintf(p, "region_code=\"0x%x\"\n",
+ priv->adapter->region_code);
netdev_for_each_mc_addr(ha, netdev)
p += sprintf(p, "multicast_address[%d]=\"%pM\"\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 5596b6be1898..4b1894b4757f 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -78,6 +78,7 @@ enum KEY_TYPE_ID {
KEY_TYPE_ID_AES,
KEY_TYPE_ID_WAPI,
KEY_TYPE_ID_AES_CMAC,
+ KEY_TYPE_ID_AES_CMAC_DEF,
};
#define WPA_PN_SIZE 8
@@ -176,6 +177,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
#define TLV_TYPE_TX_PAUSE (PROPRIETARY_TLV_BASE_ID + 148)
+#define TLV_TYPE_RXBA_SYNC (PROPRIETARY_TLV_BASE_ID + 153)
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
#define TLV_TYPE_REPEAT_COUNT (PROPRIETARY_TLV_BASE_ID + 176)
@@ -188,6 +190,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_BTCOEX_WL_AGGR_WINSIZE (PROPRIETARY_TLV_BASE_ID + 202)
#define TLV_BTCOEX_WL_SCANTIME (PROPRIETARY_TLV_BASE_ID + 203)
#define TLV_TYPE_BSS_MODE (PROPRIETARY_TLV_BASE_ID + 206)
+#define TLV_TYPE_RANDOM_MAC (PROPRIETARY_TLV_BASE_ID + 236)
+#define TLV_TYPE_CHAN_ATTR_CFG (PROPRIETARY_TLV_BASE_ID + 237)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -208,6 +212,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define MWIFIEX_TX_DATA_BUF_SIZE_4K 4096
#define MWIFIEX_TX_DATA_BUF_SIZE_8K 8192
+#define MWIFIEX_TX_DATA_BUF_SIZE_12K 12288
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
@@ -379,6 +384,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_MC_POLICY 0x0121
#define HostCmd_CMD_TDLS_OPER 0x0122
#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223
+#define HostCmd_CMD_CHAN_REGION_CFG 0x0242
#define PROTOCOL_NO_SECURITY 0x01
#define PROTOCOL_STATIC_WEP 0x02
@@ -411,6 +417,14 @@ enum P2P_MODES {
P2P_MODE_CLIENT = 3,
};
+enum mwifiex_channel_flags {
+ MWIFIEX_CHANNEL_PASSIVE = BIT(0),
+ MWIFIEX_CHANNEL_DFS = BIT(1),
+ MWIFIEX_CHANNEL_NOHT40 = BIT(2),
+ MWIFIEX_CHANNEL_NOHT80 = BIT(3),
+ MWIFIEX_CHANNEL_DISABLED = BIT(7),
+};
+
#define HostCmd_RET_BIT 0x8000
#define HostCmd_ACT_GEN_GET 0x0000
#define HostCmd_ACT_GEN_SET 0x0001
@@ -504,6 +518,8 @@ enum P2P_MODES {
#define EVENT_RSSI_HIGH 0x0000001c
#define EVENT_SNR_HIGH 0x0000001d
#define EVENT_IBSS_COALESCED 0x0000001e
+#define EVENT_IBSS_STA_CONNECT 0x00000020
+#define EVENT_IBSS_STA_DISCONNECT 0x00000021
#define EVENT_DATA_RSSI_LOW 0x00000024
#define EVENT_DATA_SNR_LOW 0x00000025
#define EVENT_DATA_RSSI_HIGH 0x00000026
@@ -531,6 +547,7 @@ enum P2P_MODES {
#define EVENT_CHANNEL_REPORT_RDY 0x00000054
#define EVENT_TX_DATA_PAUSE 0x00000055
#define EVENT_EXT_SCAN_REPORT 0x00000058
+#define EVENT_RXBA_SYNC 0x00000059
#define EVENT_BG_SCAN_STOPPED 0x00000065
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_MULTI_CHAN_INFO 0x0000006a
@@ -734,6 +751,16 @@ struct mwifiex_ie_types_chan_list_param_set {
struct mwifiex_chan_scan_param_set chan_scan_param[1];
} __packed;
+struct mwifiex_ie_types_rxba_sync {
+ struct mwifiex_ie_types_header header;
+ u8 mac[ETH_ALEN];
+ u8 tid;
+ u8 reserved;
+ __le16 seq_num;
+ __le16 bitmap_len;
+ u8 bitmap[1];
+} __packed;
+
struct chan_band_param_set {
u8 radio_type;
u8 chan_number;
@@ -780,6 +807,11 @@ struct mwifiex_ie_types_scan_chan_gap {
__le16 chan_gap;
} __packed;
+struct mwifiex_ie_types_random_mac {
+ struct mwifiex_ie_types_header header;
+ u8 mac[ETH_ALEN];
+} __packed;
+
struct mwifiex_ietypes_chanstats {
struct mwifiex_ie_types_header header;
struct mwifiex_fw_chan_stats chanstats[0];
@@ -1464,6 +1496,7 @@ struct mwifiex_user_scan_cfg {
/* Variable number (fixed maximum) of channels to scan up */
struct mwifiex_user_scan_chan chan_list[MWIFIEX_USER_SCAN_CHAN_MAX];
u16 scan_chan_gap;
+ u8 random_mac[ETH_ALEN];
} __packed;
#define MWIFIEX_BG_SCAN_CHAN_MAX 38
@@ -1646,7 +1679,7 @@ struct mwifiex_ie_types_sta_info {
};
struct host_cmd_ds_sta_list {
- u16 sta_count;
+ __le16 sta_count;
u8 tlv[0];
} __packed;
@@ -1667,6 +1700,12 @@ struct mwifiex_ie_types_wmm_param_set {
u8 wmm_ie[1];
};
+struct mwifiex_ie_types_mgmt_frame {
+ struct mwifiex_ie_types_header header;
+ __le16 frame_control;
+ u8 frame_contents[0];
+};
+
struct mwifiex_ie_types_wmm_queue_status {
struct mwifiex_ie_types_header header;
u8 queue_index;
@@ -2034,26 +2073,26 @@ struct host_cmd_ds_set_bss_mode {
struct host_cmd_ds_pcie_details {
/* TX buffer descriptor ring address */
- u32 txbd_addr_lo;
- u32 txbd_addr_hi;
+ __le32 txbd_addr_lo;
+ __le32 txbd_addr_hi;
/* TX buffer descriptor ring count */
- u32 txbd_count;
+ __le32 txbd_count;
/* RX buffer descriptor ring address */
- u32 rxbd_addr_lo;
- u32 rxbd_addr_hi;
+ __le32 rxbd_addr_lo;
+ __le32 rxbd_addr_hi;
/* RX buffer descriptor ring count */
- u32 rxbd_count;
+ __le32 rxbd_count;
/* Event buffer descriptor ring address */
- u32 evtbd_addr_lo;
- u32 evtbd_addr_hi;
+ __le32 evtbd_addr_lo;
+ __le32 evtbd_addr_hi;
/* Event buffer descriptor ring count */
- u32 evtbd_count;
+ __le32 evtbd_count;
/* Sleep cookie buffer physical address */
- u32 sleep_cookie_addr_lo;
- u32 sleep_cookie_addr_hi;
+ __le32 sleep_cookie_addr_lo;
+ __le32 sleep_cookie_addr_hi;
} __packed;
struct mwifiex_ie_types_rssi_threshold {
@@ -2093,8 +2132,8 @@ struct mwifiex_ie_types_mc_group_info {
u8 chan_buf_weight;
u8 band_config;
u8 chan_num;
- u32 chan_time;
- u32 reserved;
+ __le32 chan_time;
+ __le32 reserved;
union {
u8 sdio_func_num;
u8 usb_ep_num;
@@ -2185,7 +2224,7 @@ struct host_cmd_ds_robust_coex {
} __packed;
struct host_cmd_ds_wakeup_reason {
- u16 wakeup_reason;
+ __le16 wakeup_reason;
} __packed;
struct host_cmd_ds_gtk_rekey_params {
@@ -2196,6 +2235,10 @@ struct host_cmd_ds_gtk_rekey_params {
__le32 replay_ctr_high;
} __packed;
+struct host_cmd_ds_chan_region_cfg {
+ __le16 action;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2270,6 +2313,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_robust_coex coex;
struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
struct host_cmd_ds_gtk_rekey_params rekey;
+ struct host_cmd_ds_chan_region_cfg reg_cfg;
} params;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 1489c90192bd..82839d9f079f 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -298,6 +298,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
adapter->arp_filter_size = 0;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
+ adapter->mfg_mode = mfg_mode;
adapter->key_api_major_ver = 0;
adapter->key_api_minor_ver = 0;
eth_broadcast_addr(adapter->perm_addr);
@@ -553,15 +554,22 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
return -1;
}
}
+ if (adapter->mfg_mode) {
+ adapter->hw_status = MWIFIEX_HW_STATUS_READY;
+ ret = -EINPROGRESS;
+ } else {
+ for (i = 0; i < adapter->priv_num; i++) {
+ if (adapter->priv[i]) {
+ ret = mwifiex_sta_init_cmd(adapter->priv[i],
+ first_sta, true);
+ if (ret == -1)
+ return -1;
+
+ first_sta = false;
+ }
+
- for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
- true);
- if (ret == -1)
- return -1;
- first_sta = false;
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 70429815ff53..536ab834b126 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -260,6 +260,7 @@ struct mwifiex_ds_encrypt_key {
u8 is_igtk_key;
u8 is_current_wep_key;
u8 is_rx_seq_valid;
+ u8 is_igtk_def_key;
};
struct mwifiex_power_cfg {
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index 1c7b00630b90..b89596c18b41 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -669,9 +669,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
sizeof(priv->assoc_rsp_buf));
- memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
-
assoc_rsp->a_id = cpu_to_le16(aid);
+ memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
if (status_code) {
priv->adapter->dbg.num_cmd_assoc_failure++;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index db4925db39aa..2478ccd6f2d9 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -23,6 +23,7 @@
#include "11n.h"
#define VERSION "1.0"
+#define MFG_FIRMWARE "mwifiex_mfg.bin"
static unsigned int debug_mask = MWIFIEX_DEFAULT_DEBUG_MASK;
module_param(debug_mask, uint, 0);
@@ -37,6 +38,10 @@ module_param(driver_mode, ushort, 0);
MODULE_PARM_DESC(driver_mode,
"station=0x1(default), ap-sta=0x3, station-p2p=0x5, ap-sta-p2p=0x7");
+bool mfg_mode;
+module_param(mfg_mode, bool, 0);
+MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
+
/*
* This function registers the device and performs all the necessary
* initializations.
@@ -139,6 +144,8 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
adapter->nd_info = NULL;
}
+ kfree(adapter->regd);
+
vfree(adapter->chan_stats);
kfree(adapter);
return 0;
@@ -486,9 +493,11 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
*/
static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
{
- flush_workqueue(adapter->workqueue);
- destroy_workqueue(adapter->workqueue);
- adapter->workqueue = NULL;
+ if (adapter->workqueue) {
+ flush_workqueue(adapter->workqueue);
+ destroy_workqueue(adapter->workqueue);
+ adapter->workqueue = NULL;
+ }
if (adapter->rx_workqueue) {
flush_workqueue(adapter->rx_workqueue);
@@ -559,16 +568,21 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
}
/* Wait for mwifiex_init to complete */
- wait_event_interruptible(adapter->init_wait_q,
- adapter->init_wait_q_woken);
- if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
- goto err_init_fw;
+ if (!adapter->mfg_mode) {
+ wait_event_interruptible(adapter->init_wait_q,
+ adapter->init_wait_q_woken);
+ if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
+ goto err_init_fw;
+ }
priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
- if (mwifiex_register_cfg80211(adapter)) {
- mwifiex_dbg(adapter, ERROR,
- "cannot register with cfg80211\n");
- goto err_init_fw;
+
+ if (!adapter->wiphy) {
+ if (mwifiex_register_cfg80211(adapter)) {
+ mwifiex_dbg(adapter, ERROR,
+ "cannot register with cfg80211\n");
+ goto err_init_fw;
+ }
}
if (mwifiex_init_channel_scan_gap(adapter)) {
@@ -662,16 +676,41 @@ done:
/*
* This function initializes the hardware and gets firmware.
*/
-static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
+static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter,
+ bool req_fw_nowait)
{
int ret;
- ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
- adapter->dev, GFP_KERNEL, adapter,
- mwifiex_fw_dpc);
- if (ret < 0)
- mwifiex_dbg(adapter, ERROR,
- "request_firmware_nowait error %d\n", ret);
+ /* Override default firmware with manufacturing one if
+ * manufacturing mode is enabled
+ */
+ if (mfg_mode) {
+ if (strlcpy(adapter->fw_name, MFG_FIRMWARE,
+ sizeof(adapter->fw_name)) >=
+ sizeof(adapter->fw_name)) {
+ pr_err("%s: fw_name too long!\n", __func__);
+ return -1;
+ }
+ }
+
+ if (req_fw_nowait) {
+ ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
+ adapter->dev, GFP_KERNEL, adapter,
+ mwifiex_fw_dpc);
+ if (ret < 0)
+ mwifiex_dbg(adapter, ERROR,
+ "request_firmware_nowait error %d\n", ret);
+ } else {
+ ret = request_firmware(&adapter->firmware,
+ adapter->fw_name,
+ adapter->dev);
+ if (ret < 0)
+ mwifiex_dbg(adapter, ERROR,
+ "request_firmware error %d\n", ret);
+ else
+ mwifiex_fw_dpc(adapter->firmware, (void *)adapter);
+ }
+
return ret;
}
@@ -1321,6 +1360,199 @@ static void mwifiex_main_work_queue(struct work_struct *work)
}
/*
+ * This function gets called during PCIe function level reset. Required
+ * code is extracted from mwifiex_remove_card()
+ */
+static int
+mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem)
+{
+ struct mwifiex_private *priv;
+ int i;
+
+ if (!adapter)
+ goto exit_return;
+
+ if (down_interruptible(sem))
+ goto exit_sem_err;
+
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ mwifiex_deauthenticate(priv, NULL);
+
+ /* We can no longer handle interrupts once we start doing the teardown
+ * below.
+ */
+ if (adapter->if_ops.disable_int)
+ adapter->if_ops.disable_int(adapter);
+
+ adapter->surprise_removed = true;
+ mwifiex_terminate_workqueue(adapter);
+
+ /* Stop data */
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (priv && priv->netdev) {
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+ if (netif_carrier_ok(priv->netdev))
+ netif_carrier_off(priv->netdev);
+ netif_device_detach(priv->netdev);
+ }
+ }
+
+ mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n");
+ adapter->init_wait_q_woken = false;
+
+ if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
+ wait_event_interruptible(adapter->init_wait_q,
+ adapter->init_wait_q_woken);
+ if (adapter->if_ops.down_dev)
+ adapter->if_ops.down_dev(adapter);
+
+ mwifiex_dbg(adapter, CMD, "cmd: mwifiex_shutdown_drv done\n");
+ if (atomic_read(&adapter->rx_pending) ||
+ atomic_read(&adapter->tx_pending) ||
+ atomic_read(&adapter->cmd_pending)) {
+ mwifiex_dbg(adapter, ERROR,
+ "rx_pending=%d, tx_pending=%d,\t"
+ "cmd_pending=%d\n",
+ atomic_read(&adapter->rx_pending),
+ atomic_read(&adapter->tx_pending),
+ atomic_read(&adapter->cmd_pending));
+ }
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (!priv)
+ continue;
+ rtnl_lock();
+ if (priv->netdev &&
+ priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
+ mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
+ rtnl_unlock();
+ }
+
+ up(sem);
+exit_sem_err:
+ mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+exit_return:
+ return 0;
+}
+
+/* This function gets called during PCIe function level reset. Required
+ * code is extracted from mwifiex_add_card()
+ */
+static int
+mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct semaphore *sem,
+ struct mwifiex_if_ops *if_ops, u8 iface_type)
+{
+ char fw_name[32];
+ struct pcie_service_card *card = adapter->card;
+
+ if (down_interruptible(sem))
+ goto exit_sem_err;
+
+ mwifiex_init_lock_list(adapter);
+ if (adapter->if_ops.up_dev)
+ adapter->if_ops.up_dev(adapter);
+
+ adapter->iface_type = iface_type;
+ adapter->card_sem = sem;
+
+ adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
+ adapter->surprise_removed = false;
+ init_waitqueue_head(&adapter->init_wait_q);
+ adapter->is_suspended = false;
+ adapter->hs_activated = false;
+ init_waitqueue_head(&adapter->hs_activate_wait_q);
+ init_waitqueue_head(&adapter->cmd_wait_q.wait);
+ adapter->cmd_wait_q.status = 0;
+ adapter->scan_wait_q_woken = false;
+
+ if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB)
+ adapter->rx_work_enabled = true;
+
+ adapter->workqueue =
+ alloc_workqueue("MWIFIEX_WORK_QUEUE",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ if (!adapter->workqueue)
+ goto err_kmalloc;
+
+ INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
+
+ if (adapter->rx_work_enabled) {
+ adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE",
+ WQ_HIGHPRI |
+ WQ_MEM_RECLAIM |
+ WQ_UNBOUND, 1);
+ if (!adapter->rx_workqueue)
+ goto err_kmalloc;
+ INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue);
+ }
+
+ /* Register the device. Fill up the private data structure with
+ * relevant information from the card. Some code extracted from
+ * mwifiex_register_dev()
+ */
+ mwifiex_dbg(adapter, INFO, "%s, mwifiex_init_hw_fw()...\n", __func__);
+ strcpy(fw_name, adapter->fw_name);
+ strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME);
+
+ adapter->tx_buf_size = card->pcie.tx_buf_size;
+ adapter->ext_scan = card->pcie.can_ext_scan;
+ if (mwifiex_init_hw_fw(adapter, false)) {
+ strcpy(adapter->fw_name, fw_name);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: firmware init failed\n", __func__);
+ goto err_init_fw;
+ }
+ strcpy(adapter->fw_name, fw_name);
+ mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+ up(sem);
+ return 0;
+
+err_init_fw:
+ mwifiex_dbg(adapter, ERROR, "info: %s: unregister device\n", __func__);
+ if (adapter->if_ops.unregister_dev)
+ adapter->if_ops.unregister_dev(adapter);
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
+ mwifiex_dbg(adapter, ERROR,
+ "info: %s: shutdown mwifiex\n", __func__);
+ adapter->init_wait_q_woken = false;
+
+ if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
+ wait_event_interruptible(adapter->init_wait_q,
+ adapter->init_wait_q_woken);
+ }
+
+err_kmalloc:
+ mwifiex_terminate_workqueue(adapter);
+ adapter->surprise_removed = true;
+ up(sem);
+exit_sem_err:
+ mwifiex_dbg(adapter, INFO, "%s, error\n", __func__);
+
+ return -1;
+}
+
+/* This function processes pre and post PCIe function level resets.
+ * It performs software cleanup without touching PCIe specific code.
+ * Also, during initialization PCIe stuff is skipped.
+ */
+void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare)
+{
+ struct mwifiex_if_ops if_ops;
+
+ if (!prepare) {
+ mwifiex_reinit_sw(adapter, adapter->card_sem, &if_ops,
+ adapter->iface_type);
+ } else {
+ memcpy(&if_ops, &adapter->if_ops,
+ sizeof(struct mwifiex_if_ops));
+ mwifiex_shutdown_sw(adapter, adapter->card_sem);
+ }
+}
+EXPORT_SYMBOL_GPL(mwifiex_do_flr);
+
+/*
* This function adds the card.
*
* This function follows the following major steps to set up the device -
@@ -1391,7 +1623,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
goto err_registerdev;
}
- if (mwifiex_init_hw_fw(adapter)) {
+ if (mwifiex_init_hw_fw(adapter, true)) {
pr_err("%s: firmware init failed\n", __func__);
goto err_init_fw;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 9f6bb400bdae..26df28f4bfb2 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -58,6 +58,7 @@
#include "sdio.h"
extern const char driver_version[];
+extern bool mfg_mode;
struct mwifiex_adapter;
struct mwifiex_private;
@@ -675,6 +676,7 @@ struct mwifiex_private {
struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
u8 assoc_resp_ht_param;
bool ht_param_present;
+ u8 random_mac[ETH_ALEN];
};
@@ -827,6 +829,8 @@ struct mwifiex_if_ops {
void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *);
void (*multi_port_resync)(struct mwifiex_adapter *);
bool (*is_port_ready)(struct mwifiex_private *);
+ void (*down_dev)(struct mwifiex_adapter *);
+ void (*up_dev)(struct mwifiex_adapter *);
};
struct mwifiex_adapter {
@@ -989,6 +993,7 @@ struct mwifiex_adapter {
u32 drv_info_size;
bool scan_chan_gap_enabled;
struct sk_buff_head rx_data_q;
+ bool mfg_mode;
struct mwifiex_chan_stats *chan_stats;
u32 num_in_chan_stats;
int survey_idx;
@@ -1004,6 +1009,7 @@ struct mwifiex_adapter {
bool usb_mc_status;
bool usb_mc_setup;
struct cfg80211_wowlan_nd_info *nd_info;
+ struct ieee80211_regdomain *regd;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1625,4 +1631,5 @@ void mwifiex_debugfs_remove(void);
void mwifiex_dev_debugfs_init(struct mwifiex_private *priv);
void mwifiex_dev_debugfs_remove(struct mwifiex_private *priv);
#endif
+void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare);
#endif /* !_MWIFIEX_MAIN_H_ */
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 453ab6ad4784..3c3c4f197da8 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -225,7 +225,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
if (!adapter || !adapter->priv_num)
return;
- if (user_rmmod) {
+ if (user_rmmod && !adapter->mfg_mode) {
#ifdef CONFIG_PM_SLEEP
if (adapter->is_suspended)
mwifiex_pcie_resume(&pdev->dev);
@@ -277,6 +277,52 @@ static const struct pci_device_id mwifiex_ids[] = {
MODULE_DEVICE_TABLE(pci, mwifiex_ids);
+static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
+{
+ struct mwifiex_adapter *adapter;
+ struct pcie_service_card *card;
+
+ if (!pdev) {
+ pr_err("%s: PCIe device is not specified\n", __func__);
+ return;
+ }
+
+ card = (struct pcie_service_card *)pci_get_drvdata(pdev);
+ if (!card || !card->adapter) {
+ pr_err("%s: Card or adapter structure is not valid (%ld)\n",
+ __func__, (long)card);
+ return;
+ }
+
+ adapter = card->adapter;
+ mwifiex_dbg(adapter, INFO,
+ "%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n",
+ __func__, pdev->vendor, pdev->device,
+ pdev->revision,
+ prepare ? "Pre-FLR" : "Post-FLR");
+
+ if (prepare) {
+ /* Kernel would be performing FLR after this notification.
+ * Cleanup all software without cleaning anything related to
+ * PCIe and HW.
+ */
+ mwifiex_do_flr(adapter, prepare);
+ adapter->surprise_removed = true;
+ } else {
+ /* Kernel stores and restores PCIe function context before and
+ * after performing FLR respectively. Reconfigure the software
+ * and firmware including firmware redownload
+ */
+ adapter->surprise_removed = false;
+ mwifiex_do_flr(adapter, prepare);
+ }
+ mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+}
+
+static const struct pci_error_handlers mwifiex_pcie_err_handler[] = {
+ { .reset_notify = mwifiex_pcie_reset_notify, },
+};
+
#ifdef CONFIG_PM_SLEEP
/* Power Management Hooks */
static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend,
@@ -295,6 +341,7 @@ static struct pci_driver __refdata mwifiex_pcie = {
},
#endif
.shutdown = mwifiex_pcie_shutdown,
+ .err_handler = mwifiex_pcie_err_handler,
};
/*
@@ -1956,8 +2003,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (firmware_len - offset < txlen)
txlen = firmware_len - offset;
- mwifiex_dbg(adapter, INFO, ".");
-
tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
card->pcie.blksz_fw_dl;
@@ -2043,6 +2088,10 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
ret = -1;
else
ret = 0;
+
+ mwifiex_dbg(adapter, INFO, "Try %d if FW is ready <%d,%#x>",
+ tries, ret, firmware_stat);
+
if (ret)
continue;
if (firmware_stat == FIRMWARE_READY_PCIE) {
@@ -2074,8 +2123,7 @@ mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
adapter->winner = 1;
} else {
mwifiex_dbg(adapter, ERROR,
- "PCI-E is not the winner <%#x,%d>, exit dnld\n",
- ret, adapter->winner);
+ "PCI-E is not the winner <%#x>", winner);
}
return ret;
@@ -2863,7 +2911,7 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
{
int revision_id = 0;
- int version;
+ int version, magic;
struct pcie_service_card *card = adapter->card;
switch (card->dev->device) {
@@ -2888,30 +2936,19 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
}
break;
case PCIE_DEVICE_ID_MARVELL_88W8997:
- mwifiex_read_reg(adapter, 0x0c48, &revision_id);
+ mwifiex_read_reg(adapter, 0x8, &revision_id);
mwifiex_read_reg(adapter, 0x0cd0, &version);
+ mwifiex_read_reg(adapter, 0x0cd4, &magic);
+ revision_id &= 0xff;
version &= 0x7;
- switch (revision_id) {
- case PCIE8997_V2:
- if (version == CHIP_VER_PCIEUART)
- strcpy(adapter->fw_name,
- PCIEUART8997_FW_NAME_V2);
- else
- strcpy(adapter->fw_name,
- PCIEUSB8997_FW_NAME_V2);
- break;
- case PCIE8997_Z:
- if (version == CHIP_VER_PCIEUART)
- strcpy(adapter->fw_name,
- PCIEUART8997_FW_NAME_Z);
- else
- strcpy(adapter->fw_name,
- PCIEUSB8997_FW_NAME_Z);
- break;
- default:
- strcpy(adapter->fw_name, PCIE8997_DEFAULT_FW_NAME);
- break;
- }
+ magic &= 0xff;
+ if (revision_id == PCIE8997_A1 &&
+ magic == CHIP_MAGIC_VALUE &&
+ version == CHIP_VER_PCIEUART)
+ strcpy(adapter->fw_name, PCIEUART8997_FW_NAME_V4);
+ else
+ strcpy(adapter->fw_name, PCIEUSB8997_FW_NAME_V4);
+ break;
default:
break;
}
@@ -2952,7 +2989,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- const struct mwifiex_pcie_card_reg *reg;
struct pci_dev *pdev;
int i;
@@ -2976,8 +3012,90 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
if (card->msi_enable)
pci_disable_msi(pdev);
}
+ }
+}
+
+/* This function initializes the PCI-E host memory space, WCB rings, etc.
+ *
+ * The following initializations steps are followed -
+ * - Allocate TXBD ring buffers
+ * - Allocate RXBD ring buffers
+ * - Allocate event BD ring buffers
+ * - Allocate command response ring buffer
+ * - Allocate sleep cookie buffer
+ * Part of mwifiex_pcie_init(), not reset the PCIE registers
+ */
+static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ int ret;
+ struct pci_dev *pdev = card->dev;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- reg = card->pcie.reg;
+ card->cmdrsp_buf = NULL;
+ ret = mwifiex_pcie_create_txbd_ring(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to create txbd ring\n");
+ goto err_cre_txbd;
+ }
+
+ ret = mwifiex_pcie_create_rxbd_ring(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to create rxbd ring\n");
+ goto err_cre_rxbd;
+ }
+
+ ret = mwifiex_pcie_create_evtbd_ring(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to create evtbd ring\n");
+ goto err_cre_evtbd;
+ }
+
+ ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to allocate cmdbuf buffer\n");
+ goto err_alloc_cmdbuf;
+ }
+
+ if (reg->sleep_cookie) {
+ ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to allocate sleep_cookie buffer\n");
+ goto err_alloc_cookie;
+ }
+ } else {
+ card->sleep_cookie_vbase = NULL;
+ }
+ return;
+
+err_alloc_cookie:
+ mwifiex_pcie_delete_cmdrsp_buf(adapter);
+err_alloc_cmdbuf:
+ mwifiex_pcie_delete_evtbd_ring(adapter);
+err_cre_evtbd:
+ mwifiex_pcie_delete_rxbd_ring(adapter);
+err_cre_rxbd:
+ mwifiex_pcie_delete_txbd_ring(adapter);
+err_cre_txbd:
+ pci_iounmap(pdev, card->pci_mmap1);
+}
+
+/* This function cleans up the PCI-E host memory space.
+ * Some code is extracted from mwifiex_unregister_dev()
+ *
+ */
+static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+ if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
+ mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n");
+
+ adapter->seq_num = 0;
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
+
+ if (card) {
if (reg->sleep_cookie)
mwifiex_pcie_delete_sleep_cookie_buf(adapter);
@@ -2987,6 +3105,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
mwifiex_pcie_delete_txbd_ring(adapter);
card->cmdrsp_buf = NULL;
}
+
+ return;
}
static struct mwifiex_if_ops pcie_ops = {
@@ -3013,6 +3133,8 @@ static struct mwifiex_if_ops pcie_ops = {
.clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
.reg_dump = mwifiex_pcie_reg_dump,
.device_dump = mwifiex_pcie_device_dump,
+ .down_dev = mwifiex_pcie_down_dev,
+ .up_dev = mwifiex_pcie_up_dev,
};
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index f05061cea5cd..46f99cae9399 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -32,11 +32,9 @@
#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
#define PCIE8897_A0_FW_NAME "mrvl/pcie8897_uapsta_a0.bin"
#define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin"
-#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcieusb8997_combo_v2.bin"
-#define PCIEUART8997_FW_NAME_Z "mrvl/pcieuart8997_combo.bin"
-#define PCIEUART8997_FW_NAME_V2 "mrvl/pcieuart8997_combo_v2.bin"
-#define PCIEUSB8997_FW_NAME_Z "mrvl/pcieusb8997_combo.bin"
-#define PCIEUSB8997_FW_NAME_V2 "mrvl/pcieusb8997_combo_v2.bin"
+#define PCIEUART8997_FW_NAME_V4 "mrvl/pcieuart8997_combo_v4.bin"
+#define PCIEUSB8997_FW_NAME_V4 "mrvl/pcieusb8997_combo_v4.bin"
+#define PCIE8997_DEFAULT_WIFIFW_NAME "mrvl/pcie8997_wlan_v4.bin"
#define PCIE_VENDOR_ID_MARVELL (0x11ab)
#define PCIE_VENDOR_ID_V2_MARVELL (0x1b4b)
@@ -46,9 +44,10 @@
#define PCIE8897_A0 0x1100
#define PCIE8897_B0 0x1200
-#define PCIE8997_Z 0x0
-#define PCIE8997_V2 0x471
+#define PCIE8997_A0 0x10
+#define PCIE8997_A1 0x11
#define CHIP_VER_PCIEUART 0x3
+#define CHIP_MAGIC_VALUE 0x24
/* Constants for Buffer Descriptor (BD) rings */
#define MWIFIEX_MAX_TXRX_BD 0x20
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 21ec84794d0c..97c9765b5bc6 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -820,6 +820,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_num_probes *num_probes_tlv;
struct mwifiex_ie_types_scan_chan_gap *chan_gap_tlv;
+ struct mwifiex_ie_types_random_mac *random_mac_tlv;
struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
struct mwifiex_ie_types_bssid_list *bssid_tlv;
u8 *tlv_pos;
@@ -835,6 +836,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
u8 ssid_filter;
struct mwifiex_ie_types_htcap *ht_cap;
struct mwifiex_ie_types_bss_mode *bss_mode;
+ const u8 zero_mac[6] = {0, 0, 0, 0, 0, 0};
/* The tlv_buf_len is calculated for each scan command. The TLVs added
in this routine will be preserved since the routine that sends the
@@ -967,6 +969,18 @@ mwifiex_config_scan(struct mwifiex_private *priv,
tlv_pos +=
sizeof(struct mwifiex_ie_types_scan_chan_gap);
}
+
+ if (!ether_addr_equal(user_scan_in->random_mac, zero_mac)) {
+ random_mac_tlv = (void *)tlv_pos;
+ random_mac_tlv->header.type =
+ cpu_to_le16(TLV_TYPE_RANDOM_MAC);
+ random_mac_tlv->header.len =
+ cpu_to_le16(sizeof(random_mac_tlv->mac));
+ ether_addr_copy(random_mac_tlv->mac,
+ user_scan_in->random_mac);
+ tlv_pos +=
+ sizeof(struct mwifiex_ie_types_random_mac);
+ }
} else {
scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
num_probes = adapter->scan_probes;
@@ -1922,6 +1936,7 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
}
adapter->active_scan_triggered = true;
+ ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
user_scan_cfg->num_ssids = priv->scan_request->n_ssids;
user_scan_cfg->ssid_list = priv->scan_request->ssids;
@@ -2179,18 +2194,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
if (chan_band_tlv && adapter->nd_info) {
adapter->nd_info->matches[idx] =
- kzalloc(sizeof(*pmatch) +
- sizeof(u32), GFP_ATOMIC);
+ kzalloc(sizeof(*pmatch) + sizeof(u32),
+ GFP_ATOMIC);
pmatch = adapter->nd_info->matches[idx];
if (pmatch) {
- memset(pmatch, 0, sizeof(*pmatch));
- if (chan_band_tlv) {
- pmatch->n_channels = 1;
- pmatch->channels[0] =
- chan_band->chan_number;
- }
+ pmatch->n_channels = 1;
+ pmatch->channels[0] = chan_band->chan_number;
}
}
@@ -2761,6 +2772,7 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
if (!scan_cfg)
return -ENOMEM;
+ ether_addr_copy(scan_cfg->random_mac, priv->random_mac);
scan_cfg->ssid_list = req_ssid;
scan_cfg->num_ssids = 1;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index d3e1561ca075..8718950004f3 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -122,9 +122,11 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
IRQF_TRIGGER_LOW,
"wifi_wake", cfg);
if (ret) {
- dev_err(dev,
+ dev_dbg(dev,
"Failed to request irq_wifi %d (%d)\n",
cfg->irq_wifi, ret);
+ card->plt_wake_cfg = NULL;
+ return 0;
}
disable_irq(cfg->irq_wifi);
}
@@ -289,7 +291,7 @@ mwifiex_sdio_remove(struct sdio_func *func)
mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
- if (user_rmmod) {
+ if (user_rmmod && !adapter->mfg_mode) {
if (adapter->is_suspended)
mwifiex_sdio_resume(adapter->dev);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 7897037b0992..2a162c33d271 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -598,6 +598,11 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
memcpy(km->key_param_set.key_params.cmac_aes.key,
enc_key->key_material, enc_key->key_len);
len += sizeof(struct mwifiex_cmac_aes_param);
+ } else if (enc_key->is_igtk_def_key) {
+ mwifiex_dbg(adapter, INFO,
+ "%s: Set CMAC default Key index\n", __func__);
+ km->key_param_set.key_type = KEY_TYPE_ID_AES_CMAC_DEF;
+ km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
} else {
mwifiex_dbg(adapter, INFO,
"%s: Set AES Key\n", __func__);
@@ -706,15 +711,10 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
(priv->wep_key_curr_index & KEY_INDEX_MASK))
key_info |= KEY_DEFAULT;
} else {
- if (mac) {
- if (is_broadcast_ether_addr(mac))
- key_info |= KEY_MCAST;
- else
- key_info |= KEY_UNICAST |
- KEY_DEFAULT;
- } else {
+ if (is_broadcast_ether_addr(mac))
key_info |= KEY_MCAST;
- }
+ else
+ key_info |= KEY_UNICAST | KEY_DEFAULT;
}
}
km->key_param_set.key_info = cpu_to_le16(key_info);
@@ -1244,20 +1244,23 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
return 0;
/* Send the ring base addresses and count to firmware */
- host_spec->txbd_addr_lo = (u32)(card->txbd_ring_pbase);
- host_spec->txbd_addr_hi = (u32)(((u64)card->txbd_ring_pbase)>>32);
- host_spec->txbd_count = MWIFIEX_MAX_TXRX_BD;
- host_spec->rxbd_addr_lo = (u32)(card->rxbd_ring_pbase);
- host_spec->rxbd_addr_hi = (u32)(((u64)card->rxbd_ring_pbase)>>32);
- host_spec->rxbd_count = MWIFIEX_MAX_TXRX_BD;
- host_spec->evtbd_addr_lo = (u32)(card->evtbd_ring_pbase);
- host_spec->evtbd_addr_hi = (u32)(((u64)card->evtbd_ring_pbase)>>32);
- host_spec->evtbd_count = MWIFIEX_MAX_EVT_BD;
+ host_spec->txbd_addr_lo = cpu_to_le32((u32)(card->txbd_ring_pbase));
+ host_spec->txbd_addr_hi =
+ cpu_to_le32((u32)(((u64)card->txbd_ring_pbase) >> 32));
+ host_spec->txbd_count = cpu_to_le32(MWIFIEX_MAX_TXRX_BD);
+ host_spec->rxbd_addr_lo = cpu_to_le32((u32)(card->rxbd_ring_pbase));
+ host_spec->rxbd_addr_hi =
+ cpu_to_le32((u32)(((u64)card->rxbd_ring_pbase) >> 32));
+ host_spec->rxbd_count = cpu_to_le32(MWIFIEX_MAX_TXRX_BD);
+ host_spec->evtbd_addr_lo = cpu_to_le32((u32)(card->evtbd_ring_pbase));
+ host_spec->evtbd_addr_hi =
+ cpu_to_le32((u32)(((u64)card->evtbd_ring_pbase) >> 32));
+ host_spec->evtbd_count = cpu_to_le32(MWIFIEX_MAX_EVT_BD);
if (card->sleep_cookie_vbase) {
host_spec->sleep_cookie_addr_lo =
- (u32)(card->sleep_cookie_pbase);
- host_spec->sleep_cookie_addr_hi =
- (u32)(((u64)(card->sleep_cookie_pbase)) >> 32);
+ cpu_to_le32((u32)(card->sleep_cookie_pbase));
+ host_spec->sleep_cookie_addr_hi = cpu_to_le32((u32)(((u64)
+ (card->sleep_cookie_pbase)) >> 32));
mwifiex_dbg(priv->adapter, INFO,
"sleep_cook_lo phy addr: 0x%x\n",
host_spec->sleep_cookie_addr_lo);
@@ -1482,7 +1485,7 @@ int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
continue;
/* property header is 6 bytes, data must fit in cmd buffer */
- if (prop && prop->value && prop->length > 6 &&
+ if (prop->value && prop->length > 6 &&
prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) {
ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
HostCmd_ACT_GEN_SET, 0,
@@ -1596,6 +1599,21 @@ static int mwifiex_cmd_gtk_rekey_offload(struct mwifiex_private *priv,
return 0;
}
+static int mwifiex_cmd_chan_region_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action)
+{
+ struct host_cmd_ds_chan_region_cfg *reg = &cmd->params.reg_cfg;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_CHAN_REGION_CFG);
+ cmd->size = cpu_to_le16(sizeof(*reg) + S_DS_GEN);
+
+ if (cmd_action == HostCmd_ACT_GEN_GET)
+ reg->action = cpu_to_le16(cmd_action);
+
+ return 0;
+}
+
static int
mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
@@ -2136,6 +2154,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_gtk_rekey_offload(priv, cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_CHAN_REGION_CFG:
+ ret = mwifiex_cmd_chan_region_cfg(priv, cmd_ptr, cmd_action);
+ break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2273,6 +2294,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
if (ret)
return -1;
}
+
+ mwifiex_send_cmd(priv, HostCmd_CMD_CHAN_REGION_CFG,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
}
/* get tx rate */
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index ccf54932e321..8548027abf71 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -962,7 +962,7 @@ static int mwifiex_ret_uap_sta_list(struct mwifiex_private *priv,
int i;
struct mwifiex_sta_node *sta_node;
- for (i = 0; i < sta_list->sta_count; i++) {
+ for (i = 0; i < (le16_to_cpu(sta_list->sta_count)); i++) {
sta_node = mwifiex_get_sta_entry(priv, sta_info->mac);
if (unlikely(!sta_node))
continue;
@@ -1022,6 +1022,138 @@ static int mwifiex_ret_robust_coex(struct mwifiex_private *priv,
return 0;
}
+static struct ieee80211_regdomain *
+mwifiex_create_custom_regdomain(struct mwifiex_private *priv,
+ u8 *buf, u16 buf_len)
+{
+ u16 num_chan = buf_len / 2;
+ struct ieee80211_regdomain *regd;
+ struct ieee80211_reg_rule *rule;
+ bool new_rule;
+ int regd_size, idx, freq, prev_freq = 0;
+ u32 bw, prev_bw = 0;
+ u8 chflags, prev_chflags = 0, valid_rules = 0;
+
+ if (WARN_ON_ONCE(num_chan > NL80211_MAX_SUPP_REG_RULES))
+ return ERR_PTR(-EINVAL);
+
+ regd_size = sizeof(struct ieee80211_regdomain) +
+ num_chan * sizeof(struct ieee80211_reg_rule);
+
+ regd = kzalloc(regd_size, GFP_KERNEL);
+ if (!regd)
+ return ERR_PTR(-ENOMEM);
+
+ for (idx = 0; idx < num_chan; idx++) {
+ u8 chan;
+ enum nl80211_band band;
+
+ chan = *buf++;
+ if (!chan) {
+ kfree(regd);
+ return NULL;
+ }
+ chflags = *buf++;
+ band = (chan <= 14) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ freq = ieee80211_channel_to_frequency(chan, band);
+ new_rule = false;
+
+ if (chflags & MWIFIEX_CHANNEL_DISABLED)
+ continue;
+
+ if (band == NL80211_BAND_5GHZ) {
+ if (!(chflags & MWIFIEX_CHANNEL_NOHT80))
+ bw = MHZ_TO_KHZ(80);
+ else if (!(chflags & MWIFIEX_CHANNEL_NOHT40))
+ bw = MHZ_TO_KHZ(40);
+ else
+ bw = MHZ_TO_KHZ(20);
+ } else {
+ if (!(chflags & MWIFIEX_CHANNEL_NOHT40))
+ bw = MHZ_TO_KHZ(40);
+ else
+ bw = MHZ_TO_KHZ(20);
+ }
+
+ if (idx == 0 || prev_chflags != chflags || prev_bw != bw ||
+ freq - prev_freq > 20) {
+ valid_rules++;
+ new_rule = true;
+ }
+
+ rule = &regd->reg_rules[valid_rules - 1];
+
+ rule->freq_range.end_freq_khz = MHZ_TO_KHZ(freq + 10);
+
+ prev_chflags = chflags;
+ prev_freq = freq;
+ prev_bw = bw;
+
+ if (!new_rule)
+ continue;
+
+ rule->freq_range.start_freq_khz = MHZ_TO_KHZ(freq - 10);
+ rule->power_rule.max_eirp = DBM_TO_MBM(19);
+
+ if (chflags & MWIFIEX_CHANNEL_PASSIVE)
+ rule->flags = NL80211_RRF_NO_IR;
+
+ if (chflags & MWIFIEX_CHANNEL_DFS)
+ rule->flags = NL80211_RRF_DFS;
+
+ rule->freq_range.max_bandwidth_khz = bw;
+ }
+
+ regd->n_reg_rules = valid_rules;
+ regd->alpha2[0] = '9';
+ regd->alpha2[1] = '9';
+
+ return regd;
+}
+
+static int mwifiex_ret_chan_region_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_chan_region_cfg *reg = &resp->params.reg_cfg;
+ u16 action = le16_to_cpu(reg->action);
+ u16 tlv, tlv_buf_len, tlv_buf_left;
+ struct mwifiex_ie_types_header *head;
+ struct ieee80211_regdomain *regd;
+ u8 *tlv_buf;
+
+ if (action != HostCmd_ACT_GEN_GET)
+ return 0;
+
+ tlv_buf = (u8 *)reg + sizeof(*reg);
+ tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*reg);
+
+ while (tlv_buf_left >= sizeof(*head)) {
+ head = (struct mwifiex_ie_types_header *)tlv_buf;
+ tlv = le16_to_cpu(head->type);
+ tlv_buf_len = le16_to_cpu(head->len);
+
+ if (tlv_buf_left < (sizeof(*head) + tlv_buf_len))
+ break;
+
+ switch (tlv) {
+ case TLV_TYPE_CHAN_ATTR_CFG:
+ mwifiex_dbg_dump(priv->adapter, CMD_D, "CHAN:",
+ (u8 *)head + sizeof(*head),
+ tlv_buf_len);
+ regd = mwifiex_create_custom_regdomain(priv,
+ (u8 *)head + sizeof(*head), tlv_buf_len);
+ if (!IS_ERR(regd))
+ priv->adapter->regd = regd;
+ break;
+ }
+
+ tlv_buf += (sizeof(*head) + tlv_buf_len);
+ tlv_buf_left -= (sizeof(*head) + tlv_buf_len);
+ }
+
+ return 0;
+}
+
/*
* This function handles the command responses.
*
@@ -1239,6 +1371,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
break;
+ case HostCmd_CMD_CHAN_REGION_CFG:
+ ret = mwifiex_ret_chan_region_cfg(priv, resp);
+ break;
default:
mwifiex_dbg(adapter, ERROR,
"CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index a422f3306d4d..9df0c4dc06ed 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -25,6 +25,99 @@
#include "wmm.h"
#include "11n.h"
+#define MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE 12
+
+static int mwifiex_check_ibss_peer_capabilties(struct mwifiex_private *priv,
+ struct mwifiex_sta_node *sta_ptr,
+ struct sk_buff *event)
+{
+ int evt_len, ele_len;
+ u8 *curr;
+ struct ieee_types_header *ele_hdr;
+ struct mwifiex_ie_types_mgmt_frame *tlv_mgmt_frame;
+ const struct ieee80211_ht_cap *ht_cap;
+ const struct ieee80211_vht_cap *vht_cap;
+
+ skb_pull(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE);
+ evt_len = event->len;
+ curr = event->data;
+
+ mwifiex_dbg_dump(priv->adapter, EVT_D, "ibss peer capabilties:",
+ event->data, event->len);
+
+ skb_push(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE);
+
+ tlv_mgmt_frame = (void *)curr;
+ if (evt_len >= sizeof(*tlv_mgmt_frame) &&
+ le16_to_cpu(tlv_mgmt_frame->header.type) ==
+ TLV_TYPE_UAP_MGMT_FRAME) {
+ /* Locate curr pointer to the start of beacon tlv,
+ * timestamp 8 bytes, beacon intervel 2 bytes,
+ * capability info 2 bytes, totally 12 byte beacon header
+ */
+ evt_len = le16_to_cpu(tlv_mgmt_frame->header.len);
+ curr += (sizeof(*tlv_mgmt_frame) + 12);
+ } else {
+ mwifiex_dbg(priv->adapter, MSG,
+ "management frame tlv not found!\n");
+ return 0;
+ }
+
+ while (evt_len >= sizeof(*ele_hdr)) {
+ ele_hdr = (struct ieee_types_header *)curr;
+ ele_len = ele_hdr->len;
+
+ if (evt_len < ele_len + sizeof(*ele_hdr))
+ break;
+
+ switch (ele_hdr->element_id) {
+ case WLAN_EID_HT_CAPABILITY:
+ sta_ptr->is_11n_enabled = true;
+ ht_cap = (void *)(ele_hdr + 2);
+ sta_ptr->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU ?
+ MWIFIEX_TX_DATA_BUF_SIZE_8K :
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ mwifiex_dbg(priv->adapter, INFO,
+ "11n enabled!, max_amsdu : %d\n",
+ sta_ptr->max_amsdu);
+ break;
+
+ case WLAN_EID_VHT_CAPABILITY:
+ sta_ptr->is_11ac_enabled = true;
+ vht_cap = (void *)(ele_hdr + 2);
+ /* check VHT MAXMPDU capability */
+ switch (le32_to_cpu(vht_cap->vht_cap_info) & 0x3) {
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_12K;
+ break;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_8K;
+ break;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ default:
+ break;
+ }
+
+ mwifiex_dbg(priv->adapter, INFO,
+ "11ac enabled!, max_amsdu : %d\n",
+ sta_ptr->max_amsdu);
+ break;
+ default:
+ break;
+ }
+
+ curr += (ele_len + sizeof(*ele_hdr));
+ evt_len -= (ele_len + sizeof(*ele_hdr));
+ }
+
+ return 0;
+}
+
/*
* This function resets the connection state.
*
@@ -519,6 +612,8 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
* - EVENT_LINK_QUALITY
* - EVENT_PRE_BEACON_LOST
* - EVENT_IBSS_COALESCED
+ * - EVENT_IBSS_STA_CONNECT
+ * - EVENT_IBSS_STA_DISCONNECT
* - EVENT_WEP_ICV_ERR
* - EVENT_BW_CHANGE
* - EVENT_HOSTWAKE_STAIE
@@ -547,9 +642,11 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
int mwifiex_process_sta_event(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
- int ret = 0;
+ int ret = 0, i;
u32 eventcause = adapter->event_cause;
u16 ctrl, reason_code;
+ u8 ibss_sta_addr[ETH_ALEN];
+ struct mwifiex_sta_node *sta_ptr;
switch (eventcause) {
case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -708,7 +805,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_EXT_SCAN_REPORT:
mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
- if (adapter->ext_scan && !priv->scan_aborting)
+ /* We intend to skip this event during suspend, but handle
+ * it in interface disabled case
+ */
+ if (adapter->ext_scan && (!priv->scan_aborting ||
+ !netif_running(priv->netdev)))
ret = mwifiex_handle_event_ext_scan_report(priv,
adapter->event_skb->data);
@@ -771,6 +872,39 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
HostCmd_ACT_GEN_GET, 0, NULL, false);
break;
+ case EVENT_IBSS_STA_CONNECT:
+ ether_addr_copy(ibss_sta_addr, adapter->event_body + 2);
+ mwifiex_dbg(adapter, EVENT, "event: IBSS_STA_CONNECT %pM\n",
+ ibss_sta_addr);
+ sta_ptr = mwifiex_add_sta_entry(priv, ibss_sta_addr);
+ if (sta_ptr && adapter->adhoc_11n_enabled) {
+ mwifiex_check_ibss_peer_capabilties(priv, sta_ptr,
+ adapter->event_skb);
+ if (sta_ptr->is_11n_enabled)
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] =
+ priv->aggr_prio_tbl[i].ampdu_user;
+ else
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] =
+ BA_STREAM_NOT_ALLOWED;
+ memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
+ }
+
+ break;
+ case EVENT_IBSS_STA_DISCONNECT:
+ ether_addr_copy(ibss_sta_addr, adapter->event_body + 2);
+ mwifiex_dbg(adapter, EVENT, "event: IBSS_STA_DISCONNECT %pM\n",
+ ibss_sta_addr);
+ sta_ptr = mwifiex_get_sta_entry(priv, ibss_sta_addr);
+ if (sta_ptr && sta_ptr->is_11n_enabled) {
+ mwifiex_11n_del_rx_reorder_tbl_by_ta(priv,
+ ibss_sta_addr);
+ mwifiex_del_tx_ba_stream_tbl_by_ra(priv, ibss_sta_addr);
+ }
+ mwifiex_wmm_del_peer_ra_list(priv, ibss_sta_addr);
+ mwifiex_del_sta_entry(priv, ibss_sta_addr);
+ break;
case EVENT_ADDBA:
mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
@@ -869,6 +1003,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
mwifiex_bt_coex_wlan_param_update_event(priv,
adapter->event_skb);
break;
+ case EVENT_RXBA_SYNC:
+ dev_dbg(adapter->dev, "EVENT: RXBA_SYNC\n");
+ mwifiex_11n_rxba_sync_event(priv, adapter->event_body,
+ adapter->event_skb->len -
+ sizeof(eventcause));
+ break;
default:
mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index e06647a327b6..644f3a248741 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -574,7 +574,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
adapter->hs_activate_wait_q_woken = false;
- memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
+ memset(&hscfg, 0, sizeof(hscfg));
hscfg.is_invoke_hostcmd = true;
adapter->hs_enabling = true;
@@ -1138,7 +1138,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
{
struct mwifiex_ds_encrypt_key encrypt_key;
- memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
+ memset(&encrypt_key, 0, sizeof(encrypt_key));
encrypt_key.key_len = key_len;
encrypt_key.key_index = key_index;
@@ -1180,7 +1180,7 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel)
{
struct mwifiex_ver_ext ver_ext;
- memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
+ memset(&ver_ext, 0, sizeof(ver_ext));
ver_ext.version_str_sel = version_str_sel;
if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
HostCmd_ACT_GEN_GET, 0, &ver_ext, true))
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index 86ff54296f39..d24eca34ac11 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -306,7 +306,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
mwifiex_process_multi_chan_event(priv, adapter->event_skb);
break;
-
+ case EVENT_RXBA_SYNC:
+ dev_dbg(adapter->dev, "EVENT: RXBA_SYNC\n");
+ mwifiex_11n_rxba_sync_event(priv, adapter->event_body,
+ adapter->event_skb->len -
+ sizeof(eventcause));
+ break;
default:
mwifiex_dbg(adapter, EVENT,
"event: unknown event id: %#x\n", eventcause);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 0857575c5c39..73eb0846db21 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -273,6 +273,8 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
} else {
mwifiex_dbg(adapter, DATA,
"%s: DATA\n", __func__);
+ mwifiex_write_data_complete(adapter, context->skb, 0,
+ urb->status ? -1 : 0);
for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
port = &card->port[i];
if (context->ep == port->tx_data_ep) {
@@ -282,8 +284,6 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
}
}
adapter->data_sent = false;
- mwifiex_write_data_complete(adapter, context->skb, 0,
- urb->status ? -1 : 0);
}
if (card->mc_resync_flag)
@@ -611,7 +611,7 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
if (!adapter->priv_num)
return;
- if (user_rmmod) {
+ if (user_rmmod && !adapter->mfg_mode) {
#ifdef CONFIG_PM
if (adapter->is_suspended)
mwifiex_usb_resume(intf);
@@ -657,11 +657,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
card->tx_cmd.ep = card->tx_cmd_ep;
card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!card->tx_cmd.urb) {
- mwifiex_dbg(adapter, ERROR,
- "tx_cmd.urb allocation failed\n");
+ if (!card->tx_cmd.urb)
return -ENOMEM;
- }
for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
port = &card->port[i];
@@ -677,11 +674,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
port->tx_data_list[j].ep = port->tx_data_ep;
port->tx_data_list[j].urb =
usb_alloc_urb(0, GFP_KERNEL);
- if (!port->tx_data_list[j].urb) {
- mwifiex_dbg(adapter, ERROR,
- "urb allocation failed\n");
+ if (!port->tx_data_list[j].urb)
return -ENOMEM;
- }
}
}
@@ -697,10 +691,8 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
card->rx_cmd.ep = card->rx_cmd_ep;
card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!card->rx_cmd.urb) {
- mwifiex_dbg(adapter, ERROR, "rx_cmd.urb allocation failed\n");
+ if (!card->rx_cmd.urb)
return -ENOMEM;
- }
card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
if (!card->rx_cmd.skb)
@@ -714,11 +706,8 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
card->rx_data_list[i].ep = card->rx_data_ep;
card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!card->rx_data_list[i].urb) {
- mwifiex_dbg(adapter, ERROR,
- "rx_data_list[] urb allocation failed\n");
+ if (!card->rx_data_list[i].urb)
return -1;
- }
if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
MWIFIEX_RX_DATA_BUF_SIZE))
return -1;
@@ -852,7 +841,7 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
struct usb_tx_data_port *port = NULL;
u8 *data = (u8 *)skb->data;
struct urb *tx_urb;
- int idx, ret;
+ int idx, ret = -EINPROGRESS;
if (adapter->is_suspended) {
mwifiex_dbg(adapter, ERROR,
@@ -876,8 +865,9 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
if (atomic_read(&port->tx_data_urb_pending)
>= MWIFIEX_TX_DATA_URB) {
port->block_status = true;
- ret = -EBUSY;
- goto done;
+ adapter->data_sent =
+ mwifiex_usb_data_sent(adapter);
+ return -EBUSY;
}
if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB)
port->tx_data_ix = 0;
@@ -908,6 +898,14 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
else
atomic_inc(&port->tx_data_urb_pending);
+ if (ep != card->tx_cmd_ep &&
+ atomic_read(&port->tx_data_urb_pending) ==
+ MWIFIEX_TX_DATA_URB) {
+ port->block_status = true;
+ adapter->data_sent = mwifiex_usb_data_sent(adapter);
+ ret = -ENOSR;
+ }
+
if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
mwifiex_dbg(adapter, ERROR,
"%s: usb_submit_urb failed\n", __func__);
@@ -916,29 +914,15 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
} else {
atomic_dec(&port->tx_data_urb_pending);
port->block_status = false;
+ adapter->data_sent = false;
if (port->tx_data_ix)
port->tx_data_ix--;
else
port->tx_data_ix = MWIFIEX_TX_DATA_URB;
}
-
- return -1;
- } else {
- if (ep != card->tx_cmd_ep &&
- atomic_read(&port->tx_data_urb_pending) ==
- MWIFIEX_TX_DATA_URB) {
- port->block_status = true;
- ret = -ENOSR;
- goto done;
- }
+ ret = -1;
}
- return -EINPROGRESS;
-
-done:
- if (ep != card->tx_cmd_ep)
- adapter->data_sent = mwifiex_usb_data_sent(adapter);
-
return ret;
}
@@ -1037,6 +1021,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
dnld_cmd = le32_to_cpu(fwdata->fw_hdr.dnld_cmd);
tlen += sizeof(struct fw_header);
+ /* Command 7 doesn't have data length field */
+ if (dnld_cmd == FW_CMD_7)
+ dlen = 0;
+
memcpy(fwdata->data, &firmware[tlen], dlen);
fwdata->seq_num = cpu_to_le32(fw_seqnum);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h
index b4e9246bbcdc..30e8eb8c259d 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.h
+++ b/drivers/net/wireless/marvell/mwifiex/usb.h
@@ -46,11 +46,12 @@
#define USB8766_DEFAULT_FW_NAME "mrvl/usb8766_uapsta.bin"
#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
#define USB8801_DEFAULT_FW_NAME "mrvl/usb8801_uapsta.bin"
-#define USB8997_DEFAULT_FW_NAME "mrvl/usb8997_uapsta.bin"
+#define USB8997_DEFAULT_FW_NAME "mrvl/usbusb8997_combo_v4.bin"
#define FW_DNLD_TX_BUF_SIZE 620
#define FW_DNLD_RX_BUF_SIZE 2048
#define FW_HAS_LAST_BLOCK 0x00000004
+#define FW_CMD_7 0x00000007
#define FW_DATA_XMIT_SIZE \
(sizeof(struct fw_header) + dlen + sizeof(u32))
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 6681be0511c7..18fbb96a46e9 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -386,6 +386,7 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
"unknown public action frame category %d\n",
category);
}
+ break;
default:
mwifiex_dbg(priv->adapter, INFO,
"unknown mgmt frame subtype %#x\n", stype);
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 57a80cfa39b1..a8bc064bc14f 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -103,7 +103,7 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
- if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
+ if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
trace_mt_rx(dev, rxwi, fce_info);
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.h b/drivers/net/wireless/mediatek/mt7601u/dma.h
index 978e8a90b87f..270d126880c0 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.h
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.h
@@ -18,8 +18,6 @@
#include <asm/unaligned.h>
#include <linux/skbuff.h>
-#include "util.h"
-
#define MT_DMA_HDR_LEN 4
#define MT_RX_INFO_LEN 4
#define MT_FCE_INFO_LEN 4
@@ -79,9 +77,9 @@ static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
*/
info = flags |
- MT76_SET(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
- MT76_SET(MT_TXD_INFO_D_PORT, d_port) |
- MT76_SET(MT_TXD_INFO_TYPE, type);
+ FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
+ FIELD_PREP(MT_TXD_INFO_TYPE, type);
put_unaligned_le32(info, skb_push(skb, sizeof(info)));
return skb_put_padto(skb, round_up(skb->len, 4) + 4);
@@ -90,7 +88,7 @@ static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
static inline int
mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
{
- flags |= MT76_SET(MT_TXD_PKT_INFO_QSEL, qsel);
+ flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
index 8d8ee0344f7b..da6faea092d6 100644
--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -45,8 +45,8 @@ mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data,
val = mt76_rr(dev, MT_EFUSE_CTRL);
val &= ~(MT_EFUSE_CTRL_AIN |
MT_EFUSE_CTRL_MODE);
- val |= MT76_SET(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
- MT76_SET(MT_EFUSE_CTRL_MODE, mode) |
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+ FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
MT_EFUSE_CTRL_KICK;
mt76_wr(dev, MT_EFUSE_CTRL, val);
@@ -128,8 +128,8 @@ mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom)
if (!field_valid(nic_conf0 >> 8))
return;
- if (MT76_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
- MT76_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+ if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+ FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
dev_err(dev->dev,
"Error: device has more than 1 RX/TX stream!\n");
}
@@ -150,7 +150,7 @@ mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *eeprom)
mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
- MT76_SET(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
return 0;
}
@@ -176,7 +176,7 @@ mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom)
u8 max_pwr;
val = mt7601u_rr(dev, MT_TX_ALC_CFG_0);
- max_pwr = MT76_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
+ max_pwr = FIELD_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
if (mt7601u_has_tssi(dev, eeprom)) {
mt7601u_set_channel_target_power(dev, eeprom, max_pwr);
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index 8fa78d7156be..44d46e25db80 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -108,8 +108,9 @@ static void mt7601u_init_usb_dma(struct mt7601u_dev *dev)
{
u32 val;
- val = MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
- MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+ val = FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT,
+ MT_USB_AGGR_SIZE_LIMIT) |
MT_USB_DMA_CFG_RX_BULK_EN |
MT_USB_DMA_CFG_TX_BULK_EN;
if (dev->in_max_packet == 512)
@@ -396,8 +397,9 @@ int mt7601u_init_hardware(struct mt7601u_dev *dev)
mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
- mt7601u_wr(dev, MT_TXOP_CTRL_CFG, MT76_SET(MT_TXOP_TRUN_EN, 0x3f) |
- MT76_SET(MT_TXOP_EXT_CCA_DLY, 0x58));
+ mt7601u_wr(dev, MT_TXOP_CTRL_CFG,
+ FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+ FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
ret = mt7601u_eeprom_init(dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
index e21c53ed09fb..3c576392ed89 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mac.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -19,13 +19,13 @@
static void
mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
{
- u8 idx = MT76_GET(MT_TXWI_RATE_MCS, rate);
+ u8 idx = FIELD_GET(MT_TXWI_RATE_MCS, rate);
txrate->idx = 0;
txrate->flags = 0;
txrate->count = 1;
- switch (MT76_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
+ switch (FIELD_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
case MT_PHY_TYPE_OFDM:
txrate->idx = idx + 4;
return;
@@ -47,7 +47,7 @@ mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
return;
}
- if (MT76_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
+ if (FIELD_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
if (rate & MT_TXWI_RATE_SGI)
@@ -125,9 +125,9 @@ u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
bw = 0;
}
- rateval = MT76_SET(MT_RXWI_RATE_MCS, rate_idx);
- rateval |= MT76_SET(MT_RXWI_RATE_PHY, phy);
- rateval |= MT76_SET(MT_RXWI_RATE_BW, bw);
+ rateval = FIELD_PREP(MT_RXWI_RATE_MCS, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
rateval |= MT_RXWI_RATE_SGI;
@@ -156,9 +156,9 @@ struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev)
stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS);
stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR);
stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ);
- stat.pktid = MT76_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
- stat.wcid = MT76_GET(MT_TX_STAT_FIFO_WCID, val);
- stat.rate = MT76_GET(MT_TX_STAT_FIFO_RATE, val);
+ stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
+ stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, val);
+ stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, val);
return stat;
}
@@ -270,7 +270,7 @@ void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval)
}
val &= ~MT_BEACON_TIME_CFG_INTVAL;
- val |= MT76_SET(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+ val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
MT_BEACON_TIME_CFG_TBTT_EN;
@@ -349,8 +349,8 @@ mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
u8 zmac[ETH_ALEN] = {};
u32 attr;
- attr = MT76_SET(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
- MT76_SET(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
mt76_wr(dev, MT_WCID_ATTR(idx), attr);
@@ -382,15 +382,15 @@ void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
rcu_read_unlock();
mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
- MT76_SET(MT_MAX_LEN_CFG_AMPDU, min_factor));
+ FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
}
static void
mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
{
- u8 idx = MT76_GET(MT_RXWI_RATE_MCS, rate);
+ u8 idx = FIELD_GET(MT_RXWI_RATE_MCS, rate);
- switch (MT76_GET(MT_RXWI_RATE_PHY, rate)) {
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
case MT_PHY_TYPE_OFDM:
if (WARN_ON(idx >= 8))
idx = 0;
@@ -436,7 +436,7 @@ mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
u16 rate, int rssi)
{
dev->bcn_freq_off = rxwi->freq_off;
- dev->bcn_phy_mode = MT76_GET(MT_RXWI_RATE_PHY, rate);
+ dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
}
@@ -458,7 +458,7 @@ u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
u16 rate = le16_to_cpu(rxwi->rate);
int rssi;
- len = MT76_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
if (len < 10)
return 0;
@@ -542,8 +542,8 @@ int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
val = mt7601u_rr(dev, MT_WCID_ATTR(idx));
val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
- val |= MT76_SET(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
- MT76_SET(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+ val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+ FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
val &= ~MT_WCID_ATTR_PAIRWISE;
val |= MT_WCID_ATTR_PAIRWISE *
!!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index e70dd9523911..43ebd460ba86 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -15,7 +15,6 @@
#include "mt7601u.h"
#include "mac.h"
#include <linux/etherdevice.h>
-#include <linux/version.h>
static int mt7601u_start(struct ieee80211_hw *hw)
{
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
index 91c4b3427965..dbdfb3f5c507 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mcu.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -43,8 +43,8 @@ static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
u8 seq, enum mcu_cmd cmd)
{
WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
- MT76_SET(MT_TXD_CMD_INFO_SEQ, seq) |
- MT76_SET(MT_TXD_CMD_INFO_TYPE, cmd)));
+ FIELD_PREP(MT_TXD_CMD_INFO_SEQ, seq) |
+ FIELD_PREP(MT_TXD_CMD_INFO_TYPE, cmd)));
}
static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
@@ -100,13 +100,13 @@ static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
urb_status);
- if (MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
- MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+ if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
return 0;
- dev_err(dev->dev, "Error: MCU resp evt:%hhx seq:%hhx-%hhx!\n",
- MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
- seq, MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+ dev_err(dev->dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
}
dev_err(dev->dev, "Error: %s timed out\n", __func__);
@@ -291,9 +291,9 @@ static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
u32 val;
int ret;
- reg = cpu_to_le32(MT76_SET(MT_TXD_INFO_TYPE, DMA_PACKET) |
- MT76_SET(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
- MT76_SET(MT_TXD_INFO_LEN, len));
+ reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_PACKET) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_TXD_INFO_LEN, len));
memcpy(buf.buf, &reg, sizeof(reg));
memcpy(buf.buf + sizeof(reg), data, len);
memset(buf.buf + sizeof(reg) + len, 0, 8);
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
index 428bd2f10b7b..c7ec40475a5f 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -15,6 +15,7 @@
#ifndef MT7601U_H
#define MT7601U_H
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/mutex.h>
@@ -24,7 +25,6 @@
#include <linux/debugfs.h>
#include "regs.h"
-#include "util.h"
#define MT_CALIBRATE_INTERVAL (4 * HZ)
@@ -299,7 +299,7 @@ bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
/* Compatibility with mt76 */
#define mt76_rmw_field(_dev, _reg, _field, _val) \
- mt76_rmw(_dev, _reg, _field, MT76_SET(_field, _val))
+ mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset)
{
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 1908af6add87..ca09a5d4305e 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -41,11 +41,12 @@ mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value)
goto out;
}
- mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_DATA, value) |
- MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
- MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
- MT_RF_CSR_CFG_WR |
- MT_RF_CSR_CFG_KICK);
+ mt7601u_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) |
+ MT_RF_CSR_CFG_WR |
+ MT_RF_CSR_CFG_KICK);
trace_rf_write(dev, bank, offset, value);
out:
mutex_unlock(&dev->reg_atomic_mutex);
@@ -74,17 +75,18 @@ mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset)
if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
goto out;
- mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
- MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
- MT_RF_CSR_CFG_KICK);
+ mt7601u_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) |
+ MT_RF_CSR_CFG_KICK);
if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
goto out;
val = mt7601u_rr(dev, MT_RF_CSR_CFG);
- if (MT76_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
- MT76_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
- ret = MT76_GET(MT_RF_CSR_CFG_DATA, val);
+ if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
+ FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+ ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
trace_rf_read(dev, bank, offset, ret);
}
out:
@@ -139,8 +141,8 @@ static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val)
}
mt7601u_wr(dev, MT_BBP_CSR_CFG,
- MT76_SET(MT_BBP_CSR_CFG_VAL, val) |
- MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+ FIELD_PREP(MT_BBP_CSR_CFG_VAL, val) |
+ FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) |
MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY);
trace_bbp_write(dev, offset, val);
out:
@@ -163,7 +165,7 @@ static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
goto out;
mt7601u_wr(dev, MT_BBP_CSR_CFG,
- MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+ FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) |
MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY |
MT_BBP_CSR_CFG_READ);
@@ -171,8 +173,8 @@ static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
goto out;
val = mt7601u_rr(dev, MT_BBP_CSR_CFG);
- if (MT76_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
- ret = MT76_GET(MT_BBP_CSR_CFG_VAL, val);
+ if (FIELD_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
+ ret = FIELD_GET(MT_BBP_CSR_CFG_VAL, val);
trace_bbp_read(dev, offset, ret);
}
out:
@@ -249,9 +251,9 @@ int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
/* bw40 */ { -2, 16, 34 }
}
};
- int bw = MT76_GET(MT_RXWI_RATE_BW, rate);
- int aux_lna = MT76_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
- int lna_id = MT76_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
+ int bw = FIELD_GET(MT_RXWI_RATE_BW, rate);
+ int aux_lna = FIELD_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
+ int lna_id = FIELD_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
int val;
if (lna_id) /* LNA id can be 0, 2, 3. */
@@ -259,7 +261,7 @@ int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
val = 8;
val -= lna[aux_lna][bw][lna_id];
- val -= MT76_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
+ val -= FIELD_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
val -= dev->ee->lna_gain;
val -= dev->ee->rssi_offset[0];
@@ -939,7 +941,7 @@ static int mt7601u_tssi_cal(struct mt7601u_dev *dev)
dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr);
val = mt7601u_rr(dev, MT_TX_ALC_CFG_1);
- curr_pwr = s6_to_int(MT76_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
+ curr_pwr = s6_to_int(FIELD_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
diff_pwr += curr_pwr;
val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr);
mt7601u_wr(dev, MT_TX_ALC_CFG_1, val);
diff --git a/drivers/net/wireless/mediatek/mt7601u/regs.h b/drivers/net/wireless/mediatek/mt7601u/regs.h
index afd8978e83fa..27a429d90cec 100644
--- a/drivers/net/wireless/mediatek/mt7601u/regs.h
+++ b/drivers/net/wireless/mediatek/mt7601u/regs.h
@@ -17,10 +17,6 @@
#include <linux/bitops.h>
-#ifndef GENMASK
-#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
-#endif
-
#define MT_ASIC_VERSION 0x0000
#define MT76XX_REV_E3 0x22
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index a0a33dc8f6bc..ad77bec1ba0f 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -175,11 +175,12 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
ba_size = min_t(int, 63, ba_size);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
ba_size = 0;
- txwi->ack_ctl |= MT76_SET(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
- txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
- MT76_SET(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density));
+ txwi->flags =
+ cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density));
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
txwi->flags = 0;
}
@@ -188,7 +189,7 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
- pkt_len |= MT76_SET(MT_TXWI_LEN_PKTID, pkt_id);
+ pkt_len |= FIELD_PREP(MT_TXWI_LEN_PKTID, pkt_id);
txwi->len_ctl = cpu_to_le16(pkt_len);
return txwi;
@@ -285,9 +286,9 @@ int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
WARN_ON(cw_min > 0xf);
WARN_ON(cw_max > 0xf);
- val = MT76_SET(MT_EDCA_CFG_AIFSN, params->aifs) |
- MT76_SET(MT_EDCA_CFG_CWMIN, cw_min) |
- MT76_SET(MT_EDCA_CFG_CWMAX, cw_max);
+ val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
/* TODO: based on user-controlled EnableTxBurst var vendor drv sets
* a really long txop on AC0 (see connect.c:2009) but only on
* connect? When not connected should be 0.
@@ -295,7 +296,7 @@ int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!hw_q)
val |= 0x60;
else
- val |= MT76_SET(MT_EDCA_CFG_TXOP, params->txop);
+ val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.h b/drivers/net/wireless/mediatek/mt7601u/util.h
deleted file mode 100644
index b89140bf1210..000000000000
--- a/drivers/net/wireless/mediatek/mt7601u/util.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76_UTIL_H
-#define __MT76_UTIL_H
-
-/*
- * Power of two check, this will check
- * if the mask that has been given contains and contiguous set of bits.
- * Note that we cannot use the is_power_of_2() function since this
- * check must be done at compile-time.
- */
-#define is_power_of_two(x) ( !((x) & ((x)-1)) )
-#define low_bit_mask(x) ( ((x)-1) & ~(x) )
-#define is_valid_mask(x) is_power_of_two(1LU + (x) + low_bit_mask(x))
-
-/*
- * Macros to find first set bit in a variable.
- * These macros behave the same as the __ffs() functions but
- * the most important difference that this is done during
- * compile-time rather then run-time.
- */
-#define compile_ffs2(__x) \
- __builtin_choose_expr(((__x) & 0x1), 0, 1)
-
-#define compile_ffs4(__x) \
- __builtin_choose_expr(((__x) & 0x3), \
- (compile_ffs2((__x))), \
- (compile_ffs2((__x) >> 2) + 2))
-
-#define compile_ffs8(__x) \
- __builtin_choose_expr(((__x) & 0xf), \
- (compile_ffs4((__x))), \
- (compile_ffs4((__x) >> 4) + 4))
-
-#define compile_ffs16(__x) \
- __builtin_choose_expr(((__x) & 0xff), \
- (compile_ffs8((__x))), \
- (compile_ffs8((__x) >> 8) + 8))
-
-#define compile_ffs32(__x) \
- __builtin_choose_expr(((__x) & 0xffff), \
- (compile_ffs16((__x))), \
- (compile_ffs16((__x) >> 16) + 16))
-
-/*
- * This macro will check the requirements for the FIELD{8,16,32} macros
- * The mask should be a constant non-zero contiguous set of bits which
- * does not exceed the given typelimit.
- */
-#define FIELD_CHECK(__mask) \
- BUILD_BUG_ON(!(__mask) || !is_valid_mask(__mask))
-
-#define MT76_SET(_mask, _val) \
- ({ \
- FIELD_CHECK(_mask); \
- (((u32) (_val)) << compile_ffs32(_mask)) & _mask; \
- })
-
-#define MT76_GET(_mask, _val) \
- ({ \
- FIELD_CHECK(_mask); \
- (u32) (((_val) & _mask) >> compile_ffs32(_mask)); \
- })
-
-#endif
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 7cf26c6124d1..6005e14213ca 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -831,8 +831,10 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
sizeof(struct usb_anchor),
GFP_KERNEL);
- if (!rt2x00dev->anchor)
+ if (!rt2x00dev->anchor) {
+ retval = -ENOMEM;
goto exit_free_reg;
+ }
init_usb_anchor(rt2x00dev->anchor);
return 0;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 4341d56805f8..1016628926d2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -29,6 +29,7 @@
#define RTL8XXXU_DEBUG_H2C 0x800
#define RTL8XXXU_DEBUG_ACTION 0x1000
#define RTL8XXXU_DEBUG_EFUSE 0x2000
+#define RTL8XXXU_DEBUG_INTERRUPT 0x4000
#define RTW_USB_CONTROL_MSG_TIMEOUT 500
#define RTL8XXXU_MAX_REG_POLL 500
@@ -43,6 +44,7 @@
#define TX_TOTAL_PAGE_NUM 0xf8
#define TX_TOTAL_PAGE_NUM_8192E 0xf3
+#define TX_TOTAL_PAGE_NUM_8723B 0xf7
/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
#define TX_PAGE_NUM_PUBQ 0xe7
#define TX_PAGE_NUM_HI_PQ 0x0c
@@ -54,6 +56,11 @@
#define TX_PAGE_NUM_LO_PQ_8192E 0x0c
#define TX_PAGE_NUM_NORM_PQ_8192E 0x00
+#define TX_PAGE_NUM_PUBQ_8723B 0xe7
+#define TX_PAGE_NUM_HI_PQ_8723B 0x0c
+#define TX_PAGE_NUM_LO_PQ_8723B 0x02
+#define TX_PAGE_NUM_NORM_PQ_8723B 0x02
+
#define RTL_FW_PAGE_SIZE 4096
#define RTL8XXXU_FIRMWARE_POLL_MAX 1000
@@ -1312,7 +1319,7 @@ struct rtl8xxxu_fileops {
int (*power_on) (struct rtl8xxxu_priv *priv);
void (*power_off) (struct rtl8xxxu_priv *priv);
void (*reset_8051) (struct rtl8xxxu_priv *priv);
- int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page);
+ int (*llt_init) (struct rtl8xxxu_priv *priv);
void (*init_phy_bb) (struct rtl8xxxu_priv *priv);
int (*init_phy_rf) (struct rtl8xxxu_priv *priv);
void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv);
@@ -1330,11 +1337,17 @@ struct rtl8xxxu_fileops {
u32 ramask, int sgi);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
+ void (*fill_txdesc) (struct ieee80211_hdr *hdr,
+ struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
+ u16 rate_flag, bool sgi, bool short_preamble,
+ bool ampdu_enable);
int writeN_block_size;
int rx_agg_buf_size;
char tx_desc_size;
char rx_desc_size;
- char has_s0s1;
+ u8 has_s0s1:1;
+ u8 has_tx_report:1;
+ u8 gen2_thermal_meter:1;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
@@ -1388,14 +1401,14 @@ int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name);
void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv);
void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv);
void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv);
-int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page);
+int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start);
int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv);
int rtl8xxxu_gen2_h2c_cmd(struct rtl8xxxu_priv *priv,
struct h2c_cmd *h2c, int len);
int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv);
void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv);
-int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page);
+int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv,
@@ -1421,6 +1434,14 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
int rtl8xxxu_gen2_channel_to_group(int channel);
bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
int result[][8], int c1, int c2);
+void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
+ struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
+ u16 rate_flag, bool sgi, bool short_preamble,
+ bool ampdu_enable);
+void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
+ struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate,
+ u16 rate_flag, bool sgi, bool short_preamble,
+ bool ampdu_enable);
extern struct rtl8xxxu_fileops rtl8192cu_fops;
extern struct rtl8xxxu_fileops rtl8192eu_fops;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
index 69d1a1453ede..f9e2050812ab 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -567,6 +567,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.set_tx_power = rtl8xxxu_gen1_set_tx_power,
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
+ .fill_txdesc = rtl8xxxu_fill_txdesc_v1,
.writeN_block_size = 128,
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
@@ -579,5 +580,9 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.pbp_rx = PBP_PAGE_SIZE_128,
.pbp_tx = PBP_PAGE_SIZE_128,
.mactable = rtl8xxxu_gen1_mac_init_table,
+ .total_page_num = TX_TOTAL_PAGE_NUM,
+ .page_num_hi = TX_PAGE_NUM_HI_PQ,
+ .page_num_lo = TX_PAGE_NUM_LO_PQ,
+ .page_num_norm = TX_PAGE_NUM_NORM_PQ,
};
#endif
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 9a1994f49b7b..df54d27e7851 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1396,6 +1396,114 @@ exit:
return ret;
}
+static int rtl8192eu_active_to_lps(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int retry, retval;
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+ retry = 100;
+ retval = -EBUSY;
+ /*
+ * Poll 32 bit wide 0x05f8 for 0x00000000 to ensure no TX is pending.
+ */
+ do {
+ val32 = rtl8xxxu_read32(priv, REG_SCH_TX_CMD);
+ if (!val32) {
+ retval = 0;
+ break;
+ }
+ } while (retry--);
+
+ if (!retry) {
+ dev_warn(dev, "Failed to flush TX queue\n");
+ retval = -EBUSY;
+ goto out;
+ }
+
+ /* Disable CCK and OFDM, clock gated */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BBRSTB;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ udelay(2);
+
+ /* Reset whole BB */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BB_GLB_RSTN;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ /* Reset MAC TRX */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 &= 0xff00;
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 &= ~CR_SECURITY_ENABLE;
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST);
+ val8 |= DUAL_TSF_TX_OK;
+ rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8);
+
+out:
+ return retval;
+}
+
+static int rtl8192eu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ int count, ret = 0;
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+
+ /* Switch DPDT_SEL_P output from register 0x65[2] */
+ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+ val8 &= ~LEDCFG2_DPDT_SELECT;
+ rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+ /* 0x0005[1] = 1 turn off MAC by HW state machine*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ if ((val8 & BIT(1)) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (!count) {
+ dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
+ __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int rtl8192eu_emu_to_disabled(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /* 0x04[12:11] = 01 enable WL suspend */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ val8 |= BIT(3);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ return 0;
+}
+
static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
{
u16 val16;
@@ -1446,6 +1554,40 @@ exit:
return ret;
}
+void rtl8192eu_power_off(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ rtl8xxxu_flush_fifo(priv);
+
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00);
+
+ rtl8192eu_active_to_lps(priv);
+
+ /* Reset Firmware if running in RAM */
+ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+ rtl8xxxu_firmware_self_reset(priv);
+
+ /* Reset MCU */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* Reset MCU ready status */
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+ rtl8xxxu_reset_8051(priv);
+
+ rtl8192eu_active_to_emu(priv);
+ rtl8192eu_emu_to_disabled(priv);
+}
+
static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
{
u32 val32;
@@ -1487,7 +1629,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.parse_efuse = rtl8192eu_parse_efuse,
.load_firmware = rtl8192eu_load_firmware,
.power_on = rtl8192eu_power_on,
- .power_off = rtl8xxxu_power_off,
+ .power_off = rtl8192eu_power_off,
.reset_8051 = rtl8xxxu_reset_8051,
.llt_init = rtl8xxxu_auto_llt_table,
.init_phy_bb = rtl8192eu_init_phy_bb,
@@ -1501,10 +1643,12 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.set_tx_power = rtl8192e_set_tx_power,
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
+ .fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.writeN_block_size = 128,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
.has_s0s1 = 0,
+ .gen2_thermal_meter = 1,
.adda_1t_init = 0x0fc01616,
.adda_1t_path_on = 0x0fc01616,
.adda_2t_path_on_a = 0x0fc01616,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
index 686c551581b1..aef373028155 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -384,6 +384,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.set_tx_power = rtl8xxxu_gen1_set_tx_power,
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
+ .fill_txdesc = rtl8xxxu_fill_txdesc_v1,
.writeN_block_size = 1024,
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
@@ -396,4 +397,8 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.pbp_rx = PBP_PAGE_SIZE_128,
.pbp_tx = PBP_PAGE_SIZE_128,
.mactable = rtl8xxxu_gen1_mac_init_table,
+ .total_page_num = TX_TOTAL_PAGE_NUM,
+ .page_num_hi = TX_PAGE_NUM_HI_PQ,
+ .page_num_lo = TX_PAGE_NUM_LO_PQ,
+ .page_num_norm = TX_PAGE_NUM_NORM_PQ,
};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 9d45afb0e3fd..6c086b5657e9 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1662,10 +1662,13 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.set_tx_power = rtl8723b_set_tx_power,
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
+ .fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.writeN_block_size = 1024,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
.has_s0s1 = 1,
+ .has_tx_report = 1,
+ .gen2_thermal_meter = 1,
.adda_1t_init = 0x01c00014,
.adda_1t_path_on = 0x01c00014,
.adda_2t_path_on_a = 0x01c00014,
@@ -1674,4 +1677,8 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.pbp_rx = PBP_PAGE_SIZE_256,
.pbp_tx = PBP_PAGE_SIZE_256,
.mactable = rtl8723b_mac_init_table,
+ .total_page_num = TX_TOTAL_PAGE_NUM_8723B,
+ .page_num_hi = TX_PAGE_NUM_HI_PQ_8723B,
+ .page_num_lo = TX_PAGE_NUM_LO_PQ_8723B,
+ .page_num_norm = TX_PAGE_NUM_NORM_PQ_8723B,
};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 77048db3b32a..b2d7f6e69667 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -894,7 +894,7 @@ int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
return retval;
}
-int
+static int
rtl8xxxu_gen1_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c, int len)
{
struct device *dev = &priv->udev->dev;
@@ -2472,10 +2472,13 @@ static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
return ret;
}
-int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv)
{
int ret;
int i;
+ u8 last_tx_page;
+
+ last_tx_page = priv->fops->total_page_num;
for (i = 0; i < last_tx_page; i++) {
ret = rtl8xxxu_llt_write(priv, i, i + 1);
@@ -2503,7 +2506,7 @@ exit:
return ret;
}
-int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv)
{
u32 val32;
int ret = 0;
@@ -3847,28 +3850,6 @@ void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
}
-static void rtl8xxxu_old_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u32 val32;
-
- if (priv->ep_tx_normal_queue)
- val8 = TX_PAGE_NUM_NORM_PQ;
- else
- val8 = 0;
-
- rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
-
- val32 = (TX_PAGE_NUM_PUBQ << RQPN_PUB_PQ_SHIFT) | RQPN_LOAD;
-
- if (priv->ep_tx_high_queue)
- val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
- if (priv->ep_tx_low_queue)
- val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
-
- rtl8xxxu_write32(priv, REG_RQPN, val32);
-}
-
static void rtl8xxxu_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
{
struct rtl8xxxu_fileops *fops = priv->fops;
@@ -3891,7 +3872,7 @@ static void rtl8xxxu_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
val32 = (nq << RQPN_NPQ_SHIFT) | (eq << RQPN_EPQ_SHIFT);
rtl8xxxu_write32(priv, REG_RQPN_NPQ, val32);
- pubq = fops->total_page_num - hq - lq - nq;
+ pubq = fops->total_page_num - hq - lq - nq - 1;
val32 = RQPN_LOAD;
val32 |= (hq << RQPN_HI_PQ_SHIFT);
@@ -3905,6 +3886,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_fileops *fops = priv->fops;
bool macpower;
int ret;
u8 val8;
@@ -3923,18 +3905,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
else
macpower = true;
- ret = priv->fops->power_on(priv);
+ ret = fops->power_on(priv);
if (ret < 0) {
dev_warn(dev, "%s: Failed power on\n", __func__);
goto exit;
}
- if (!macpower) {
- if (priv->fops->total_page_num)
- rtl8xxxu_init_queue_reserved_page(priv);
- else
- rtl8xxxu_old_init_queue_reserved_page(priv);
- }
+ if (!macpower)
+ rtl8xxxu_init_queue_reserved_page(priv);
ret = rtl8xxxu_init_queue_priority(priv);
dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
@@ -3944,19 +3922,19 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Set RX page boundary
*/
- rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, priv->fops->trxff_boundary);
+ rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, fops->trxff_boundary);
ret = rtl8xxxu_download_firmware(priv);
- dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
+ dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
if (ret)
goto exit;
ret = rtl8xxxu_start_firmware(priv);
- dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret);
+ dev_dbg(dev, "%s: start_firmware %i\n", __func__, ret);
if (ret)
goto exit;
- if (priv->fops->phy_init_antenna_selection)
- priv->fops->phy_init_antenna_selection(priv);
+ if (fops->phy_init_antenna_selection)
+ fops->phy_init_antenna_selection(priv);
ret = rtl8xxxu_init_mac(priv);
@@ -3969,7 +3947,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (ret)
goto exit;
- ret = priv->fops->init_phy_rf(priv);
+ ret = fops->init_phy_rf(priv);
if (ret)
goto exit;
@@ -3994,13 +3972,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Set TX buffer boundary
*/
- if (priv->rtl_chip == RTL8192E)
- val8 = TX_TOTAL_PAGE_NUM_8192E + 1;
- else
- val8 = TX_TOTAL_PAGE_NUM + 1;
-
- if (priv->rtl_chip == RTL8723B)
- val8 -= 1;
+ val8 = fops->total_page_num + 1;
rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
@@ -4013,14 +3985,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
* The vendor drivers set PBP for all devices, except 8192e.
* There is no explanation for this in any of the sources.
*/
- val8 = (priv->fops->pbp_rx << PBP_PAGE_SIZE_RX_SHIFT) |
- (priv->fops->pbp_tx << PBP_PAGE_SIZE_TX_SHIFT);
+ val8 = (fops->pbp_rx << PBP_PAGE_SIZE_RX_SHIFT) |
+ (fops->pbp_tx << PBP_PAGE_SIZE_TX_SHIFT);
if (priv->rtl_chip != RTL8192E)
rtl8xxxu_write8(priv, REG_PBP, val8);
dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
if (!macpower) {
- ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM);
+ ret = fops->llt_init(priv);
if (ret) {
dev_warn(dev, "%s: LLT table init failed\n", __func__);
goto exit;
@@ -4029,13 +4001,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Chip specific quirks
*/
- priv->fops->usb_quirks(priv);
+ fops->usb_quirks(priv);
/*
- * Presumably this is for 8188EU as well
- * Enable TX report and TX report timer
+ * Enable TX report and TX report timer for 8723bu/8188eu/...
*/
- if (priv->rtl_chip == RTL8723B) {
+ if (fops->has_tx_report) {
val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
@@ -4170,8 +4141,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
}
- if (priv->fops->init_aggregation)
- priv->fops->init_aggregation(priv);
+ if (fops->init_aggregation)
+ fops->init_aggregation(priv);
/*
* Enable CCK and OFDM block
@@ -4188,7 +4159,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Start out with default power levels for channel 6, 20MHz
*/
- priv->fops->set_tx_power(priv, 1, false);
+ fops->set_tx_power(priv, 1, false);
/* Let the 8051 take control of antenna setting */
if (priv->rtl_chip != RTL8192E) {
@@ -4204,8 +4175,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
- if (priv->fops->init_statistics)
- priv->fops->init_statistics(priv);
+ if (fops->init_statistics)
+ fops->init_statistics(priv);
if (priv->rtl_chip == RTL8192E) {
/*
@@ -4223,12 +4194,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8723a_phy_lc_calibrate(priv);
- priv->fops->phy_iq_calibrate(priv);
+ fops->phy_iq_calibrate(priv);
/*
* This should enable thermal meter
*/
- if (priv->fops->tx_desc_size == sizeof(struct rtl8xxxu_txdesc40))
+ if (fops->gen2_thermal_meter)
rtl8xxxu_write_rfreg(priv,
RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
else
@@ -4783,6 +4754,113 @@ static void rtl8xxxu_dump_action(struct device *dev,
}
}
+/*
+ * Fill in v1 (gen1) specific TX descriptor bits.
+ * This format is used on 8188cu/8192cu/8723au
+ */
+void
+rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
+ struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
+ u16 rate_flag, bool sgi, bool short_preamble,
+ bool ampdu_enable)
+{
+ u16 seq_number;
+
+ seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ tx_desc->txdw5 = cpu_to_le32(rate);
+
+ if (ieee80211_is_data(hdr->frame_control))
+ tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
+
+ tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
+
+ if (ampdu_enable)
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
+ else
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ tx_desc->txdw5 = cpu_to_le32(rate);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
+ tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
+ }
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
+
+ if (short_preamble)
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
+
+ if (sgi)
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
+
+ if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /*
+ * Use RTS rate 24M - does the mac80211 tell
+ * us which to use?
+ */
+ tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M <<
+ TXDESC32_RTS_RATE_SHIFT);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
+ }
+}
+
+/*
+ * Fill in v2 (gen2) specific TX descriptor bits.
+ * This format is used on 8192eu/8723bu
+ */
+void
+rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
+ struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate,
+ u16 rate_flag, bool sgi, bool short_preamble,
+ bool ampdu_enable)
+{
+ struct rtl8xxxu_txdesc40 *tx_desc40;
+ u16 seq_number;
+
+ tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc32;
+
+ seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ tx_desc40->txdw4 = cpu_to_le32(rate);
+ if (ieee80211_is_data(hdr->frame_control)) {
+ tx_desc40->txdw4 |= cpu_to_le32(0x1f <<
+ TXDESC40_DATA_RATE_FB_SHIFT);
+ }
+
+ tx_desc40->txdw9 = cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
+
+ if (ampdu_enable)
+ tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
+ else
+ tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ tx_desc40->txdw4 = cpu_to_le32(rate);
+ tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_USE_DRIVER_RATE);
+ tx_desc40->txdw4 |=
+ cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT);
+ tx_desc40->txdw4 |= cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
+ }
+
+ if (short_preamble)
+ tx_desc40->txdw5 |= cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
+
+ if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /*
+ * Use RTS rate 24M - does the mac80211 tell
+ * us which to use?
+ */
+ tx_desc40->txdw4 |= cpu_to_le32(DESC_RATE_24M <<
+ TXDESC40_RTS_RATE_SHIFT);
+ tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
+ tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
+ }
+}
+
static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -4792,7 +4870,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
struct rtl8xxxu_txdesc32 *tx_desc;
- struct rtl8xxxu_txdesc40 *tx_desc40;
struct rtl8xxxu_tx_urb *tx_urb;
struct ieee80211_sta *sta = NULL;
struct ieee80211_vif *vif = tx_info->control.vif;
@@ -4803,7 +4880,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
u16 rate_flag = tx_info->control.rates[0].flags;
int tx_desc_size = priv->fops->tx_desc_size;
int ret;
- bool usedesc40, ampdu_enable;
+ bool usedesc40, ampdu_enable, sgi = false, short_preamble = false;
if (skb_headroom(skb) < tx_desc_size) {
dev_warn(dev,
@@ -4881,107 +4958,26 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
}
}
- if (rate_flag & IEEE80211_TX_RC_MCS)
+ if (rate_flag & IEEE80211_TX_RC_MCS &&
+ !ieee80211_is_mgmt(hdr->frame_control))
rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
else
rate = tx_rate->hw_value;
- seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- if (!usedesc40) {
- tx_desc->txdw5 = cpu_to_le32(rate);
-
- if (ieee80211_is_data(hdr->frame_control))
- tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
-
- tx_desc->txdw3 =
- cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
-
- if (ampdu_enable)
- tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
- else
- tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
-
- if (ieee80211_is_mgmt(hdr->frame_control)) {
- tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
- tx_desc->txdw4 |=
- cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
- tx_desc->txdw5 |=
- cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
- tx_desc->txdw5 |=
- cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
- }
-
- if (ieee80211_is_data_qos(hdr->frame_control))
- tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
+ if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
+ (ieee80211_is_data_qos(hdr->frame_control) &&
+ sta && sta->ht_cap.cap &
+ (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20)))
+ sgi = true;
- if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
- (sta && vif && vif->bss_conf.use_short_preamble))
- tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
+ if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
+ (sta && vif && vif->bss_conf.use_short_preamble))
+ short_preamble = true;
- if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
- (ieee80211_is_data_qos(hdr->frame_control) &&
- sta && sta->ht_cap.cap &
- (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
- tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
- }
-
- if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
- /*
- * Use RTS rate 24M - does the mac80211 tell
- * us which to use?
- */
- tx_desc->txdw4 |=
- cpu_to_le32(DESC_RATE_24M <<
- TXDESC32_RTS_RATE_SHIFT);
- tx_desc->txdw4 |=
- cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
- tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
- }
- } else {
- tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc;
-
- tx_desc40->txdw4 = cpu_to_le32(rate);
- if (ieee80211_is_data(hdr->frame_control)) {
- tx_desc->txdw4 |=
- cpu_to_le32(0x1f <<
- TXDESC40_DATA_RATE_FB_SHIFT);
- }
-
- tx_desc40->txdw9 =
- cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
-
- if (ampdu_enable)
- tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
- else
- tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
-
- if (ieee80211_is_mgmt(hdr->frame_control)) {
- tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value);
- tx_desc40->txdw3 |=
- cpu_to_le32(TXDESC40_USE_DRIVER_RATE);
- tx_desc40->txdw4 |=
- cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT);
- tx_desc40->txdw4 |=
- cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
- }
+ seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
- (sta && vif && vif->bss_conf.use_short_preamble))
- tx_desc40->txdw5 |=
- cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
-
- if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
- /*
- * Use RTS rate 24M - does the mac80211 tell
- * us which to use?
- */
- tx_desc->txdw4 |=
- cpu_to_le32(DESC_RATE_24M <<
- TXDESC40_RTS_RATE_SHIFT);
- tx_desc->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
- tx_desc->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
- }
- }
+ priv->fops->fill_txdesc(hdr, tx_desc, rate, rate_flag,
+ sgi, short_preamble, ampdu_enable);
rtl8xxxu_calc_tx_desc_csum(tx_desc);
@@ -5379,7 +5375,8 @@ static void rtl8xxxu_int_complete(struct urb *urb)
struct device *dev = &priv->udev->dev;
int ret;
- dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_INTERRUPT)
+ dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
if (urb->status == 0) {
usb_anchor_urb(urb, &priv->int_anchor);
ret = usb_submit_urb(urb, GFP_ATOMIC);
@@ -5704,7 +5701,7 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
switch (action) {
case IEEE80211_AMPDU_TX_START:
- dev_info(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
ampdu_factor = sta->ht_cap.ampdu_factor;
ampdu_density = sta->ht_cap.ampdu_density;
rtl8xxxu_set_ampdu_factor(priv, ampdu_factor);
@@ -5714,21 +5711,21 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ampdu_factor, ampdu_density);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
- dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
rtl8xxxu_set_ampdu_factor(priv, 0);
rtl8xxxu_set_ampdu_min_space(priv, 0);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
__func__);
rtl8xxxu_set_ampdu_factor(priv, 0);
rtl8xxxu_set_ampdu_min_space(priv, 0);
break;
case IEEE80211_AMPDU_RX_START:
- dev_info(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
break;
case IEEE80211_AMPDU_RX_STOP:
- dev_info(dev, "%s: IEEE80211_AMPDU_RX_STOP\n", __func__);
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_RX_STOP\n", __func__);
break;
default:
break;
@@ -5947,7 +5944,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
struct ieee80211_hw *hw;
struct usb_device *udev;
struct ieee80211_supported_band *sband;
- int ret = 0;
+ int ret;
int untested = 1;
udev = usb_get_dev(interface_to_usbdev(interface));
@@ -5971,6 +5968,18 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (id->idProduct == 0x1004)
untested = 0;
break;
+ case 0x20f4:
+ if (id->idProduct == 0x648b)
+ untested = 0;
+ break;
+ case 0x2001:
+ if (id->idProduct == 0x3308)
+ untested = 0;
+ break;
+ case 0x2357:
+ if (id->idProduct == 0x0109)
+ untested = 0;
+ break;
default:
break;
}
@@ -5987,6 +5996,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw = ieee80211_alloc_hw(sizeof(struct rtl8xxxu_priv), &rtl8xxxu_ops);
if (!hw) {
ret = -ENOMEM;
+ priv = NULL;
goto exit;
}
@@ -6035,6 +6045,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
}
ret = rtl8xxxu_init_device(hw);
+ if (ret)
+ goto exit;
hw->wiphy->max_scan_ssids = 1;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
@@ -6085,9 +6097,20 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
goto exit;
}
+ return 0;
+
exit:
- if (ret < 0)
- usb_put_dev(udev);
+ usb_set_intfdata(interface, NULL);
+
+ if (priv) {
+ kfree(priv->fw_data);
+ mutex_destroy(&priv->usb_buf_mutex);
+ mutex_destroy(&priv->h2c_mutex);
+ }
+ usb_put_dev(udev);
+
+ ieee80211_free_hw(hw);
+
return ret;
}
@@ -6111,6 +6134,11 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface)
mutex_destroy(&priv->usb_buf_mutex);
mutex_destroy(&priv->h2c_mutex);
+ if (priv->udev->state != USB_STATE_NOTATTACHED) {
+ dev_info(&priv->udev->dev,
+ "Device still attached, trying to reset\n");
+ usb_reset_device(priv->udev);
+ }
usb_put_dev(priv->udev);
ieee80211_free_hw(hw);
}
@@ -6124,6 +6152,9 @@ static struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8723au_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192eu_fops},
+/* Tested by Myckel Habets */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0109, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xb720, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8723bu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
@@ -6140,6 +6171,12 @@ static struct usb_device_id dev_table[] = {
/* Tested by Andrea Merello */
{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Jocelyn Mayer */
+{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x648b, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Stefano Bravi */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
/* Currently untested 8188 series devices */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
@@ -6187,8 +6224,6 @@ static struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
- .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
@@ -6199,8 +6234,6 @@ static struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xed17, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x648b, 0xff, 0xff, 0xff),
- .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0090, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 921c5653fff2..315ccfb2dff5 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -213,10 +213,66 @@
#define REG_HMBOX_EXT_1 0x008a
#define REG_HMBOX_EXT_2 0x008c
#define REG_HMBOX_EXT_3 0x008e
+
/* Interrupt registers for 8192e/8723bu/8812 */
#define REG_HIMR0 0x00b0
+#define IMR0_TXCCK BIT(30) /* TXRPT interrupt when CCX bit
+ of the packet is set */
+#define IMR0_PSTIMEOUT BIT(29) /* Power Save Time Out Int */
+#define IMR0_GTINT4 BIT(28) /* Set when GTIMER4 expires */
+#define IMR0_GTINT3 BIT(27) /* Set when GTIMER3 expires */
+#define IMR0_TBDER BIT(26) /* Transmit Beacon0 Error */
+#define IMR0_TBDOK BIT(25) /* Transmit Beacon0 OK */
+#define IMR0_TSF_BIT32_TOGGLE BIT(24) /* TSF Timer BIT32 toggle
+ indication interrupt */
+#define IMR0_BCNDMAINT0 BIT(20) /* Beacon DMA Interrupt 0 */
+#define IMR0_BCNDERR0 BIT(16) /* Beacon Queue DMA Error 0 */
+#define IMR0_HSISR_IND_ON_INT BIT(15) /* HSISR Indicator (HSIMR &
+ HSISR is true) */
+#define IMR0_BCNDMAINT_E BIT(14) /* Beacon DMA Interrupt
+ Extension for Win7 */
+#define IMR0_ATIMEND BIT(12) /* CTWidnow End or
+ ATIM Window End */
+#define IMR0_HISR1_IND_INT BIT(11) /* HISR1 Indicator
+ (HISR1 & HIMR1 is true) */
+#define IMR0_C2HCMD BIT(10) /* CPU to Host Command INT
+ Status, Write 1 to clear */
+#define IMR0_CPWM2 BIT(9) /* CPU power Mode exchange INT
+ Status, Write 1 to clear */
+#define IMR0_CPWM BIT(8) /* CPU power Mode exchange INT
+ Status, Write 1 to clear */
+#define IMR0_HIGHDOK BIT(7) /* High Queue DMA OK */
+#define IMR0_MGNTDOK BIT(6) /* Management Queue DMA OK */
+#define IMR0_BKDOK BIT(5) /* AC_BK DMA OK */
+#define IMR0_BEDOK BIT(4) /* AC_BE DMA OK */
+#define IMR0_VIDOK BIT(3) /* AC_VI DMA OK */
+#define IMR0_VODOK BIT(2) /* AC_VO DMA OK */
+#define IMR0_RDU BIT(1) /* Rx Descriptor Unavailable */
+#define IMR0_ROK BIT(0) /* Receive DMA OK */
#define REG_HISR0 0x00b4
#define REG_HIMR1 0x00b8
+#define IMR1_BCNDMAINT7 BIT(27) /* Beacon DMA Interrupt 7 */
+#define IMR1_BCNDMAINT6 BIT(26) /* Beacon DMA Interrupt 6 */
+#define IMR1_BCNDMAINT5 BIT(25) /* Beacon DMA Interrupt 5 */
+#define IMR1_BCNDMAINT4 BIT(24) /* Beacon DMA Interrupt 4 */
+#define IMR1_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */
+#define IMR1_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */
+#define IMR1_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */
+#define IMR1_BCNDERR7 BIT(20) /* Beacon Queue DMA Err Int 7 */
+#define IMR1_BCNDERR6 BIT(19) /* Beacon Queue DMA Err Int 6 */
+#define IMR1_BCNDERR5 BIT(18) /* Beacon Queue DMA Err Int 5 */
+#define IMR1_BCNDERR4 BIT(17) /* Beacon Queue DMA Err Int 4 */
+#define IMR1_BCNDERR3 BIT(16) /* Beacon Queue DMA Err Int 3 */
+#define IMR1_BCNDERR2 BIT(15) /* Beacon Queue DMA Err Int 2 */
+#define IMR1_BCNDERR1 BIT(14) /* Beacon Queue DMA Err Int 1 */
+#define IMR1_ATIMEND_E BIT(13) /* ATIM Window End Extension
+ for Win7 */
+#define IMR1_TXERR BIT(11) /* Tx Error Flag Int Status,
+ write 1 to clear */
+#define IMR1_RXERR BIT(10) /* Rx Error Flag Int Status,
+ write 1 to clear */
+#define IMR1_TXFOVW BIT(9) /* Transmit FIFO Overflow */
+#define IMR1_RXFOVW BIT(8) /* Receive FIFO Overflow */
#define REG_HISR1 0x00bc
/* Host suspend counter on FPGA platform */
@@ -620,6 +676,7 @@
#define REG_SCH_TXCMD 0x05d0
/* define REG_FW_TSF_SYNC_CNT 0x04a0 */
+#define REG_SCH_TX_CMD 0x05f8
#define REG_FW_RESET_TSF_CNT_1 0x05fc
#define REG_FW_RESET_TSF_CNT_0 0x05fd
#define REG_FW_BCN_DIS_CNT 0x05fe
@@ -780,6 +837,10 @@
#define FPGA_RF_MODE_OFDM BIT(25)
#define REG_FPGA0_TX_INFO 0x0804
+#define FPGA0_TX_INFO_OFDM_PATH_A BIT(0)
+#define FPGA0_TX_INFO_OFDM_PATH_B BIT(1)
+#define FPGA0_TX_INFO_OFDM_PATH_C BIT(2)
+#define FPGA0_TX_INFO_OFDM_PATH_D BIT(3)
#define REG_FPGA0_PSD_FUNC 0x0808
#define REG_FPGA0_TX_GAIN 0x080c
#define REG_FPGA0_RF_TIMING1 0x0810
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 41f77f8a309e..f95760c13c56 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -765,7 +765,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
mac->bw_40 = false;
mac->bw_80 = false;
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ channel_type);
break;
}
}
@@ -1135,7 +1136,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->mode = WIRELESS_MODE_AC_24G;
}
- if (vif->type == NL80211_IFTYPE_STATION && sta)
+ if (vif->type == NL80211_IFTYPE_STATION)
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
rcu_read_unlock();
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index d12586d4f845..0dfa9eac3926 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -179,7 +179,8 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ rtlpci->const_support_pciaspm);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 9a64f9b703e5..18d979affc18 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -151,7 +151,7 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", state_toset);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 3524441fd516..6ee6bf8e7eaf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -345,9 +345,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
return &rtl_regdom_no_midband;
case COUNTRY_CODE_IC:
return &rtl_regdom_11;
- case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_TELEC_NETGEAR:
return &rtl_regdom_60_64;
+ case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_SPAIN:
case COUNTRY_CODE_FRANCE:
case COUNTRY_CODE_ISRAEL:
@@ -406,6 +406,8 @@ static u8 channel_plan_to_country_code(u8 channelplan)
return COUNTRY_CODE_WORLD_WIDE_13;
case 0x22:
return COUNTRY_CODE_IC;
+ case 0x25:
+ return COUNTRY_CODE_ETSI;
case 0x32:
return COUNTRY_CODE_TELEC_NETGEAR;
case 0x41:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index 629125658b87..5360d5332359 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -334,7 +334,7 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum);
@@ -405,7 +405,7 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 4ab6201daf1a..37d6efc3d240 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -355,9 +355,11 @@ void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((u64 *)(val)) = tsf;
break; }
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -571,7 +573,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -735,7 +738,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -2352,7 +2355,7 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
index b504bd092fc4..f05c2c674165 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
@@ -62,7 +62,7 @@ void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -100,7 +100,7 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 7498a1218cba..fffaa92eda81 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -1346,7 +1346,8 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -2128,7 +2129,7 @@ bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -2166,7 +2167,8 @@ static void rtl88e_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -2319,7 +2321,7 @@ static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index 47e32cb0ec1a..e7b11b40e68d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -280,7 +280,7 @@ static struct rtl_mod_params rtl88ee_mod_params = {
.debug = DBG_EMERG,
};
-static struct rtl_hal_cfg rtl88ee_hal_cfg = {
+static const struct rtl_hal_cfg rtl88ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl88e_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
index 43fcb25c885f..7d152466152b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
@@ -352,7 +352,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -456,7 +456,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 60ab2ec4f4ef..27e3d5f9ca34 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -910,7 +910,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -1567,7 +1568,7 @@ bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -1605,7 +1606,8 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 244607951e28..a47be73a0980 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -141,9 +141,11 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -367,7 +369,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -2154,7 +2157,7 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
index 8283e9b27639..24e483ba3fa4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
@@ -62,7 +62,7 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -97,7 +97,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index 1ee5a6ae9960..46d0d945f283 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -300,12 +300,9 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpath);
break;
default:
break;
@@ -554,7 +551,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 4780bdc63b2b..87aa209ae325 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -258,7 +258,7 @@ static struct rtl_mod_params rtl92ce_mod_params = {
.debug = DBG_EMERG,
};
-static struct rtl_hal_cfg rtl92ce_hal_cfg = {
+static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92c_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 8789752f8143..ae8f055483fa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1560,7 +1560,7 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -1931,7 +1931,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
index 75a2deb23af1..8514ab652520 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
@@ -62,7 +62,7 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -95,7 +95,7 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
index c972fa50926d..4b2976465905 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
@@ -277,12 +277,9 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpath);
break;
default:
break;
@@ -517,7 +514,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 62ef8209718f..8de29cc3ced0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -435,7 +435,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
@@ -512,7 +512,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
bwrite_success = true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index b0f632462335..d91f8bbfe7a0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -164,9 +164,11 @@ void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_INT_AC:
*((bool *)(val)) = rtlpriv->dm.disable_tx_int;
break;
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -361,7 +363,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -502,7 +505,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -1757,7 +1760,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
return;
if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
- return;
+ goto exit;
_rtl92de_efuse_update_chip_version(hw);
_rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
@@ -1790,6 +1793,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
break;
}
rtlefuse->txpwr_fromeprom = true;
+exit:
kfree(hwinfo);
}
@@ -2170,7 +2174,7 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
index 76a57ae4af3e..811ba57eb9bb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
@@ -71,7 +71,7 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -106,7 +106,7 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index d334d2a5ea63..2a1edfd21b96 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -588,7 +588,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
* setting. */
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_Array_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
+ "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
agctab_array_table[i],
agctab_array_table[i + 1]);
}
@@ -604,7 +604,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
* setting. */
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_Array_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
+ "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
agctab_array_table[i],
agctab_array_table[i + 1]);
}
@@ -620,7 +620,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
* setting. */
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_5GArray_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
+ "The Rtl819XAGCTAB_5GArray_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
agctab_5garray_table[i],
agctab_5garray_table[i + 1]);
}
@@ -836,12 +836,9 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -2850,7 +2847,8 @@ static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
break;
@@ -2963,7 +2961,8 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -2994,7 +2993,7 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -3182,7 +3181,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index c6e09a19de1a..0538a4d09568 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -262,7 +262,7 @@ static struct rtl_mod_params rtl92de_mod_params = {
.debug = DBG_EMERG,
};
-static struct rtl_hal_cfg rtl92de_hal_cfg = {
+static const struct rtl_hal_cfg rtl92de_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8192de",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 0708eedd9671..b3f6a9ed15d4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -344,7 +344,7 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -433,7 +433,7 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index b07af8d15273..ebf663e1a81a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -338,9 +338,11 @@ void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((u64 *)(val)) = tsf;
}
break;
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -566,7 +568,8 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -685,7 +688,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -2463,7 +2466,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
index 8388e371c8e2..47da05dd3076 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
@@ -61,7 +61,7 @@ void rtl92ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -91,7 +91,7 @@ void rtl92ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index beafc9a10ad8..5ad7e753c357 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -1927,7 +1927,8 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -3001,7 +3002,7 @@ bool rtl92ee_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -3041,7 +3042,8 @@ static void rtl92ee_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -3187,7 +3189,7 @@ static bool _rtl92ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index c31c6bfb536d..ac299cbe59b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -262,7 +262,7 @@ static struct rtl_mod_params rtl92ee_mod_params = {
.debug = DBG_EMERG,
};
-static struct rtl_hal_cfg rtl92ee_hal_cfg = {
+static const struct rtl_hal_cfg rtl92ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92ee_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index ddfa0aee5bf8..52e4430edb54 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -77,9 +77,11 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((bool *)(val)) = rtlpriv->dm.current_mrc_switch;
break;
}
+ case HAL_DEF_WOWLAN:
+ break;
default: {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -297,7 +299,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -433,7 +436,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
@@ -2465,7 +2468,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
index 44949b5cbb87..9849cb988186 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
@@ -68,7 +68,7 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -104,7 +104,7 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index 881821f4e243..4bb75581ab38 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -442,7 +442,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -648,7 +649,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 31baca41ac2f..5e8e02d5de8a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -306,7 +306,7 @@ static struct rtl_mod_params rtl92se_mod_params = {
/* Because memory R/W bursting will cause system hang/crash
* for 92se, so we don't read back after every write action */
-static struct rtl_hal_cfg rtl92se_hal_cfg = {
+static const struct rtl_hal_cfg rtl92se_hal_cfg = {
.bar_id = 1,
.write_readback = false,
.name = "rtl92s_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index b7c0d38ee5b5..1186755e55b8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -124,7 +124,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -230,7 +230,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index b88c7ee72dbf..f8be0bd7e326 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -141,9 +141,11 @@ void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -366,7 +368,8 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -546,7 +549,7 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -1654,7 +1657,7 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
rtlefuse->autoload_failflag, hwinfo);
if (rtlhal->oem_id != RT_CID_DEFAULT)
- return;
+ goto exit;
switch (rtlefuse->eeprom_oemid) {
case EEPROM_CID_DEFAULT:
@@ -2225,7 +2228,7 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
index 13173351cbfd..c7be9342136c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
@@ -63,7 +63,7 @@ void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -105,7 +105,7 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 601b78efedfb..17b58cb32d55 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -1023,7 +1023,8 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -1499,7 +1500,7 @@ bool rtl8723e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -1536,7 +1537,8 @@ static void rtl8723e_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -1682,7 +1684,7 @@ static bool _rtl8723e_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index ff49a8c0ff61..89c828ad89f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -276,7 +276,7 @@ static struct rtl_mod_params rtl8723e_mod_params = {
.disable_watchdog = false,
};
-static struct rtl_hal_cfg rtl8723e_hal_cfg = {
+static const struct rtl_hal_cfg rtl8723e_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723e_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index d5da0f3c1217..8c5c27ce8e05 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -122,7 +122,7 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -195,7 +195,7 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 82e4476cab23..aba60c3145c5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -348,9 +348,11 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((u64 *)(val)) = tsf;
}
break;
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -607,7 +609,8 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -723,8 +726,7 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n",
- variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -2565,7 +2567,7 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
index 4196efb723a2..497913eb3b37 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
@@ -58,7 +58,7 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -100,7 +100,7 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 285818df149b..3cc2232f25ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -837,7 +837,7 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -1507,7 +1507,8 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -2515,7 +2516,7 @@ bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -2553,7 +2554,8 @@ static void rtl8723be_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -2705,7 +2707,7 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 2101793438ed..20b53f035483 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -276,7 +276,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
.ant_sel = 0,
};
-static struct rtl_hal_cfg rtl8723be_hal_cfg = {
+static const struct rtl_hal_cfg rtl8723be_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723be_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index a4fc70e8c9c0..b665446351a4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -392,7 +392,7 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -481,7 +481,7 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 0cddf1ad0fff..1281ebe0c30a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -480,7 +480,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -671,7 +671,8 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -800,7 +801,7 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -3934,7 +3935,7 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
index ba1946a0280e..fcb3b28c6b8f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
@@ -60,7 +60,7 @@ void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -133,7 +133,7 @@ void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index a71bfe38e7e1..5dad402171c2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -2063,12 +2063,9 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -2133,16 +2130,10 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
break;
case RF90_PATH_B:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
- break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -4670,7 +4661,7 @@ bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -4714,7 +4705,8 @@ static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -4820,7 +4812,7 @@ static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 4159f9b14db6..22f687b1f133 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -316,7 +316,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = {
.disable_watchdog = 0,
};
-static struct rtl_hal_cfg rtl8821ae_hal_cfg = {
+static const struct rtl_hal_cfg rtl8821ae_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8821ae_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 41617b7b0822..32aa5c1d070a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -739,11 +739,8 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
for (i = 0; i < rtlusb->rx_urb_num; i++) {
err = -ENOMEM;
urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- "Failed to alloc URB!!\n");
+ if (!urb)
goto err_out;
- }
err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
if (err < 0) {
@@ -907,15 +904,12 @@ static void _rtl_tx_complete(struct urb *urb)
static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
struct sk_buff *skb, u32 ep_num)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
struct urb *_urb;
WARN_ON(NULL == skb);
_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!_urb) {
- RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
- "Can't allocate URB for bulk out!\n");
kfree_skb(skb);
return NULL;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index c5086c2229aa..595f7d5d091a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -394,110 +394,110 @@ enum io_type {
};
enum hw_variables {
- HW_VAR_ETHER_ADDR,
- HW_VAR_MULTICAST_REG,
- HW_VAR_BASIC_RATE,
- HW_VAR_BSSID,
- HW_VAR_MEDIA_STATUS,
- HW_VAR_SECURITY_CONF,
- HW_VAR_BEACON_INTERVAL,
- HW_VAR_ATIM_WINDOW,
- HW_VAR_LISTEN_INTERVAL,
- HW_VAR_CS_COUNTER,
- HW_VAR_DEFAULTKEY0,
- HW_VAR_DEFAULTKEY1,
- HW_VAR_DEFAULTKEY2,
- HW_VAR_DEFAULTKEY3,
- HW_VAR_SIFS,
- HW_VAR_R2T_SIFS,
- HW_VAR_DIFS,
- HW_VAR_EIFS,
- HW_VAR_SLOT_TIME,
- HW_VAR_ACK_PREAMBLE,
- HW_VAR_CW_CONFIG,
- HW_VAR_CW_VALUES,
- HW_VAR_RATE_FALLBACK_CONTROL,
- HW_VAR_CONTENTION_WINDOW,
- HW_VAR_RETRY_COUNT,
- HW_VAR_TR_SWITCH,
- HW_VAR_COMMAND,
- HW_VAR_WPA_CONFIG,
- HW_VAR_AMPDU_MIN_SPACE,
- HW_VAR_SHORTGI_DENSITY,
- HW_VAR_AMPDU_FACTOR,
- HW_VAR_MCS_RATE_AVAILABLE,
- HW_VAR_AC_PARAM,
- HW_VAR_ACM_CTRL,
- HW_VAR_DIS_Req_Qsize,
- HW_VAR_CCX_CHNL_LOAD,
- HW_VAR_CCX_NOISE_HISTOGRAM,
- HW_VAR_CCX_CLM_NHM,
- HW_VAR_TxOPLimit,
- HW_VAR_TURBO_MODE,
- HW_VAR_RF_STATE,
- HW_VAR_RF_OFF_BY_HW,
- HW_VAR_BUS_SPEED,
- HW_VAR_SET_DEV_POWER,
-
- HW_VAR_RCR,
- HW_VAR_RATR_0,
- HW_VAR_RRSR,
- HW_VAR_CPU_RST,
- HW_VAR_CHECK_BSSID,
- HW_VAR_LBK_MODE,
- HW_VAR_AES_11N_FIX,
- HW_VAR_USB_RX_AGGR,
- HW_VAR_USER_CONTROL_TURBO_MODE,
- HW_VAR_RETRY_LIMIT,
- HW_VAR_INIT_TX_RATE,
- HW_VAR_TX_RATE_REG,
- HW_VAR_EFUSE_USAGE,
- HW_VAR_EFUSE_BYTES,
- HW_VAR_AUTOLOAD_STATUS,
- HW_VAR_RF_2R_DISABLE,
- HW_VAR_SET_RPWM,
- HW_VAR_H2C_FW_PWRMODE,
- HW_VAR_H2C_FW_JOINBSSRPT,
- HW_VAR_H2C_FW_MEDIASTATUSRPT,
- HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- HW_VAR_FW_PSMODE_STATUS,
- HW_VAR_INIT_RTS_RATE,
- HW_VAR_RESUME_CLK_ON,
- HW_VAR_FW_LPS_ACTION,
- HW_VAR_1X1_RECV_COMBINE,
- HW_VAR_STOP_SEND_BEACON,
- HW_VAR_TSF_TIMER,
- HW_VAR_IO_CMD,
-
- HW_VAR_RF_RECOVERY,
- HW_VAR_H2C_FW_UPDATE_GTK,
- HW_VAR_WF_MASK,
- HW_VAR_WF_CRC,
- HW_VAR_WF_IS_MAC_ADDR,
- HW_VAR_H2C_FW_OFFLOAD,
- HW_VAR_RESET_WFCRC,
-
- HW_VAR_HANDLE_FW_C2H,
- HW_VAR_DL_FW_RSVD_PAGE,
- HW_VAR_AID,
- HW_VAR_HW_SEQ_ENABLE,
- HW_VAR_CORRECT_TSF,
- HW_VAR_BCN_VALID,
- HW_VAR_FWLPS_RF_ON,
- HW_VAR_DUAL_TSF_RST,
- HW_VAR_SWITCH_EPHY_WoWLAN,
- HW_VAR_INT_MIGRATION,
- HW_VAR_INT_AC,
- HW_VAR_RF_TIMING,
-
- HAL_DEF_WOWLAN,
- HW_VAR_MRC,
- HW_VAR_KEEP_ALIVE,
- HW_VAR_NAV_UPPER,
-
- HW_VAR_MGT_FILTER,
- HW_VAR_CTRL_FILTER,
- HW_VAR_DATA_FILTER,
+ HW_VAR_ETHER_ADDR = 0x0,
+ HW_VAR_MULTICAST_REG = 0x1,
+ HW_VAR_BASIC_RATE = 0x2,
+ HW_VAR_BSSID = 0x3,
+ HW_VAR_MEDIA_STATUS= 0x4,
+ HW_VAR_SECURITY_CONF= 0x5,
+ HW_VAR_BEACON_INTERVAL = 0x6,
+ HW_VAR_ATIM_WINDOW = 0x7,
+ HW_VAR_LISTEN_INTERVAL = 0x8,
+ HW_VAR_CS_COUNTER = 0x9,
+ HW_VAR_DEFAULTKEY0 = 0xa,
+ HW_VAR_DEFAULTKEY1 = 0xb,
+ HW_VAR_DEFAULTKEY2 = 0xc,
+ HW_VAR_DEFAULTKEY3 = 0xd,
+ HW_VAR_SIFS = 0xe,
+ HW_VAR_R2T_SIFS = 0xf,
+ HW_VAR_DIFS = 0x10,
+ HW_VAR_EIFS = 0x11,
+ HW_VAR_SLOT_TIME = 0x12,
+ HW_VAR_ACK_PREAMBLE = 0x13,
+ HW_VAR_CW_CONFIG = 0x14,
+ HW_VAR_CW_VALUES = 0x15,
+ HW_VAR_RATE_FALLBACK_CONTROL= 0x16,
+ HW_VAR_CONTENTION_WINDOW = 0x17,
+ HW_VAR_RETRY_COUNT = 0x18,
+ HW_VAR_TR_SWITCH = 0x19,
+ HW_VAR_COMMAND = 0x1a,
+ HW_VAR_WPA_CONFIG = 0x1b,
+ HW_VAR_AMPDU_MIN_SPACE = 0x1c,
+ HW_VAR_SHORTGI_DENSITY = 0x1d,
+ HW_VAR_AMPDU_FACTOR = 0x1e,
+ HW_VAR_MCS_RATE_AVAILABLE = 0x1f,
+ HW_VAR_AC_PARAM = 0x20,
+ HW_VAR_ACM_CTRL = 0x21,
+ HW_VAR_DIS_Req_Qsize = 0x22,
+ HW_VAR_CCX_CHNL_LOAD = 0x23,
+ HW_VAR_CCX_NOISE_HISTOGRAM = 0x24,
+ HW_VAR_CCX_CLM_NHM = 0x25,
+ HW_VAR_TxOPLimit = 0x26,
+ HW_VAR_TURBO_MODE = 0x27,
+ HW_VAR_RF_STATE = 0x28,
+ HW_VAR_RF_OFF_BY_HW = 0x29,
+ HW_VAR_BUS_SPEED = 0x2a,
+ HW_VAR_SET_DEV_POWER = 0x2b,
+
+ HW_VAR_RCR = 0x2c,
+ HW_VAR_RATR_0 = 0x2d,
+ HW_VAR_RRSR = 0x2e,
+ HW_VAR_CPU_RST = 0x2f,
+ HW_VAR_CHECK_BSSID = 0x30,
+ HW_VAR_LBK_MODE = 0x31,
+ HW_VAR_AES_11N_FIX = 0x32,
+ HW_VAR_USB_RX_AGGR = 0x33,
+ HW_VAR_USER_CONTROL_TURBO_MODE = 0x34,
+ HW_VAR_RETRY_LIMIT = 0x35,
+ HW_VAR_INIT_TX_RATE = 0x36,
+ HW_VAR_TX_RATE_REG = 0x37,
+ HW_VAR_EFUSE_USAGE = 0x38,
+ HW_VAR_EFUSE_BYTES = 0x39,
+ HW_VAR_AUTOLOAD_STATUS = 0x3a,
+ HW_VAR_RF_2R_DISABLE = 0x3b,
+ HW_VAR_SET_RPWM = 0x3c,
+ HW_VAR_H2C_FW_PWRMODE = 0x3d,
+ HW_VAR_H2C_FW_JOINBSSRPT = 0x3e,
+ HW_VAR_H2C_FW_MEDIASTATUSRPT = 0x3f,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD = 0x40,
+ HW_VAR_FW_PSMODE_STATUS = 0x41,
+ HW_VAR_INIT_RTS_RATE = 0x42,
+ HW_VAR_RESUME_CLK_ON = 0x43,
+ HW_VAR_FW_LPS_ACTION = 0x44,
+ HW_VAR_1X1_RECV_COMBINE = 0x45,
+ HW_VAR_STOP_SEND_BEACON = 0x46,
+ HW_VAR_TSF_TIMER = 0x47,
+ HW_VAR_IO_CMD = 0x48,
+
+ HW_VAR_RF_RECOVERY = 0x49,
+ HW_VAR_H2C_FW_UPDATE_GTK = 0x4a,
+ HW_VAR_WF_MASK = 0x4b,
+ HW_VAR_WF_CRC = 0x4c,
+ HW_VAR_WF_IS_MAC_ADDR = 0x4d,
+ HW_VAR_H2C_FW_OFFLOAD = 0x4e,
+ HW_VAR_RESET_WFCRC = 0x4f,
+
+ HW_VAR_HANDLE_FW_C2H = 0x50,
+ HW_VAR_DL_FW_RSVD_PAGE = 0x51,
+ HW_VAR_AID = 0x52,
+ HW_VAR_HW_SEQ_ENABLE = 0x53,
+ HW_VAR_CORRECT_TSF = 0x54,
+ HW_VAR_BCN_VALID = 0x55,
+ HW_VAR_FWLPS_RF_ON = 0x56,
+ HW_VAR_DUAL_TSF_RST = 0x57,
+ HW_VAR_SWITCH_EPHY_WoWLAN = 0x58,
+ HW_VAR_INT_MIGRATION = 0x59,
+ HW_VAR_INT_AC = 0x5a,
+ HW_VAR_RF_TIMING = 0x5b,
+
+ HAL_DEF_WOWLAN = 0x5c,
+ HW_VAR_MRC = 0x5d,
+ HW_VAR_KEEP_ALIVE = 0x5e,
+ HW_VAR_NAV_UPPER = 0x5f,
+
+ HW_VAR_MGT_FILTER = 0x60,
+ HW_VAR_CTRL_FILTER = 0x61,
+ HW_VAR_DATA_FILTER = 0x62,
};
enum rt_media_status {
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
index 4be0409308cb..b5525a38264b 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.c
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -309,3 +309,32 @@ out:
kfree(acx);
return ret;
}
+
+int wl18xx_acx_time_sync_cfg(struct wl1271 *wl)
+{
+ struct acx_time_sync_cfg *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx time sync cfg: mode %d, addr: %pM",
+ wl->conf.sg.params[WL18XX_CONF_SG_TIME_SYNC],
+ wl->zone_master_mac_addr);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->sync_mode = wl->conf.sg.params[WL18XX_CONF_SG_TIME_SYNC];
+ memcpy(acx->zone_mac_addr, wl->zone_master_mac_addr, ETH_ALEN);
+
+ ret = wl1271_cmd_configure(wl, ACX_TIME_SYNC_CFG,
+ acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx time sync cfg failed: %d", ret);
+ goto out;
+ }
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
index 342a2993ef98..2edbbbfd8421 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.h
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -37,6 +37,7 @@ enum {
ACX_RX_BA_FILTER = 0x0058,
ACX_AP_SLEEP_CFG = 0x0059,
ACX_DYNAMIC_TRACES_CFG = 0x005A,
+ ACX_TIME_SYNC_CFG = 0x005B,
};
/* numbers of bits the length field takes (add 1 for the actual number) */
@@ -388,6 +389,17 @@ struct acx_dynamic_fw_traces_cfg {
__le32 dynamic_fw_traces;
} __packed;
+/*
+ * ACX_TIME_SYNC_CFG
+ * configure the time sync parameters
+ */
+struct acx_time_sync_cfg {
+ struct acx_header header;
+ u8 sync_mode;
+ u8 zone_mac_addr[ETH_ALEN];
+ u8 padding[1];
+} __packed;
+
int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
u32 sdio_blk_size, u32 extra_mem_blks,
u32 len_field_size);
@@ -402,5 +414,6 @@ int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action);
int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action);
int wl18xx_acx_ap_sleep(struct wl1271 *wl);
int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl);
+int wl18xx_acx_time_sync_cfg(struct wl1271 *wl);
#endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 2c5df43b8ed9..b36ce185c9f2 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -22,6 +22,7 @@
#include <net/genetlink.h>
#include "event.h"
#include "scan.h"
+#include "conf.h"
#include "../wlcore/cmd.h"
#include "../wlcore/debug.h"
#include "../wlcore/vendor_cmd.h"
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 00a04dfc03d1..06d6943b257c 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -1397,25 +1397,24 @@ out:
return ret;
}
-#define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
-
static int wl18xx_load_conf_file(struct device *dev, struct wlcore_conf *conf,
- struct wl18xx_priv_conf *priv_conf)
+ struct wl18xx_priv_conf *priv_conf,
+ const char *file)
{
struct wlcore_conf_file *conf_file;
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL18XX_CONF_FILE_NAME, dev);
+ ret = request_firmware(&fw, file, dev);
if (ret < 0) {
wl1271_error("could not get configuration binary %s: %d",
- WL18XX_CONF_FILE_NAME, ret);
+ file, ret);
return ret;
}
if (fw->size != WL18XX_CONF_SIZE) {
- wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
- WL18XX_CONF_SIZE, fw->size);
+ wl1271_error("%s configuration binary size is wrong, expected %zu got %zu",
+ file, WL18XX_CONF_SIZE, fw->size);
ret = -EINVAL;
goto out_release;
}
@@ -1448,9 +1447,12 @@ out_release:
static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
{
+ struct platform_device *pdev = wl->pdev;
+ struct wlcore_platdev_data *pdata = dev_get_platdata(&pdev->dev);
struct wl18xx_priv *priv = wl->priv;
- if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf) < 0) {
+ if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf,
+ pdata->family->cfg_name) < 0) {
wl1271_warning("falling back to default config");
/* apply driver default configuration */
@@ -2141,4 +2143,3 @@ MODULE_PARM_DESC(num_rx_desc_param,
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL18XX_FW_NAME);
-MODULE_FIRMWARE(WL18XX_CONF_FILE_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index f75d30444117..f00509ea8aca 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -282,6 +282,9 @@ EXPORT_SYMBOL_GPL(wlcore_boot_upload_firmware);
int wlcore_boot_upload_nvs(struct wl1271 *wl)
{
+ struct platform_device *pdev = wl->pdev;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
+ const char *nvs_name = "unknown";
size_t nvs_len, burst_len;
int i;
u32 dest_addr, val;
@@ -293,6 +296,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
return -ENODEV;
}
+ if (pdev_data && pdev_data->family)
+ nvs_name = pdev_data->family->nvs_name;
+
if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) {
struct wl1271_nvs_file *nvs =
(struct wl1271_nvs_file *)wl->nvs;
@@ -310,8 +316,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
(wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
wl->enable_11a)) {
- wl1271_error("nvs size is not as expected: %zu != %zu",
- wl->nvs_len, sizeof(struct wl1271_nvs_file));
+ wl1271_error("%s size is not as expected: %zu != %zu",
+ nvs_name, wl->nvs_len,
+ sizeof(struct wl1271_nvs_file));
kfree(wl->nvs);
wl->nvs = NULL;
wl->nvs_len = 0;
@@ -328,8 +335,8 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
if (nvs->general_params.dual_mode_select)
wl->enable_11a = true;
} else {
- wl1271_error("nvs size is not as expected: %zu != %zu",
- wl->nvs_len,
+ wl1271_error("%s size is not as expected: %zu != %zu",
+ nvs_name, wl->nvs_len,
sizeof(struct wl128x_nvs_file));
kfree(wl->nvs);
wl->nvs = NULL;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 9e1f2d9c9865..471521a0db7b 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -4986,7 +4986,6 @@ static int wl12xx_sta_add(struct wl1271 *wl,
return ret;
wl_sta = (struct wl1271_station *)sta->drv_priv;
- wl_sta->wl = wl;
hlid = wl_sta->hlid;
ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
@@ -6414,9 +6413,12 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
goto out;
}
wl->nvs_len = fw->size;
- } else {
+ } else if (pdev_data->family->nvs_name) {
wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
- WL12XX_NVS_NAME);
+ pdev_data->family->nvs_name);
+ wl->nvs = NULL;
+ wl->nvs_len = 0;
+ } else {
wl->nvs = NULL;
wl->nvs_len = 0;
}
@@ -6511,21 +6513,29 @@ out:
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
{
- int ret;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
+ const char *nvs_name;
+ int ret = 0;
- if (!wl->ops || !wl->ptable)
+ if (!wl->ops || !wl->ptable || !pdev_data)
return -EINVAL;
wl->dev = &pdev->dev;
wl->pdev = pdev;
platform_set_drvdata(pdev, wl);
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
- WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
- wl, wlcore_nvs_cb);
- if (ret < 0) {
- wl1271_error("request_firmware_nowait failed: %d", ret);
- complete_all(&wl->nvs_loading_complete);
+ if (pdev_data->family && pdev_data->family->nvs_name) {
+ nvs_name = pdev_data->family->nvs_name;
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ nvs_name, &pdev->dev, GFP_KERNEL,
+ wl, wlcore_nvs_cb);
+ if (ret < 0) {
+ wl1271_error("request_firmware_nowait failed for %s: %d",
+ nvs_name, ret);
+ complete_all(&wl->nvs_loading_complete);
+ }
+ } else {
+ wlcore_nvs_cb(NULL, wl);
}
return ret;
@@ -6534,9 +6544,11 @@ EXPORT_SYMBOL_GPL(wlcore_probe);
int wlcore_remove(struct platform_device *pdev)
{
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct wl1271 *wl = platform_get_drvdata(pdev);
- wait_for_completion(&wl->nvs_loading_complete);
+ if (pdev_data->family && pdev_data->family->nvs_name)
+ wait_for_completion(&wl->nvs_loading_complete);
if (!wl->initialized)
return 0;
@@ -6573,4 +6585,3 @@ MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL12XX_NVS_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 5839acbbc782..a6e94b1a12cb 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -216,17 +216,33 @@ static struct wl1271_if_operations sdio_ops = {
};
#ifdef CONFIG_OF
+
+static const struct wilink_family_data wl127x_data = {
+ .name = "wl127x",
+ .nvs_name = "ti-connectivity/wl127x-nvs.bin",
+};
+
+static const struct wilink_family_data wl128x_data = {
+ .name = "wl128x",
+ .nvs_name = "ti-connectivity/wl128x-nvs.bin",
+};
+
+static const struct wilink_family_data wl18xx_data = {
+ .name = "wl18xx",
+ .cfg_name = "ti-connectivity/wl18xx-conf.bin",
+};
+
static const struct of_device_id wlcore_sdio_of_match_table[] = {
- { .compatible = "ti,wl1271" },
- { .compatible = "ti,wl1273" },
- { .compatible = "ti,wl1281" },
- { .compatible = "ti,wl1283" },
- { .compatible = "ti,wl1801" },
- { .compatible = "ti,wl1805" },
- { .compatible = "ti,wl1807" },
- { .compatible = "ti,wl1831" },
- { .compatible = "ti,wl1835" },
- { .compatible = "ti,wl1837" },
+ { .compatible = "ti,wl1271", .data = &wl127x_data },
+ { .compatible = "ti,wl1273", .data = &wl127x_data },
+ { .compatible = "ti,wl1281", .data = &wl128x_data },
+ { .compatible = "ti,wl1283", .data = &wl128x_data },
+ { .compatible = "ti,wl1801", .data = &wl18xx_data },
+ { .compatible = "ti,wl1805", .data = &wl18xx_data },
+ { .compatible = "ti,wl1807", .data = &wl18xx_data },
+ { .compatible = "ti,wl1831", .data = &wl18xx_data },
+ { .compatible = "ti,wl1835", .data = &wl18xx_data },
+ { .compatible = "ti,wl1837", .data = &wl18xx_data },
{ }
};
@@ -234,9 +250,13 @@ static int wlcore_probe_of(struct device *dev, int *irq,
struct wlcore_platdev_data *pdev_data)
{
struct device_node *np = dev->of_node;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(wlcore_sdio_of_match_table, np);
+ if (!of_id)
+ return -ENODEV;
- if (!np || !of_match_node(wlcore_sdio_of_match_table, np))
- return -ENODATA;
+ pdev_data->family = of_id->data;
*irq = irq_of_parse_and_map(np, 0);
if (!*irq) {
@@ -263,7 +283,7 @@ static int wlcore_probe_of(struct device *dev, int *irq,
static int wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct wlcore_platdev_data pdev_data;
+ struct wlcore_platdev_data *pdev_data;
struct wl12xx_sdio_glue *glue;
struct resource res[1];
mmc_pm_flag_t mmcflags;
@@ -275,14 +295,15 @@ static int wl1271_probe(struct sdio_func *func,
if (func->num != 0x02)
return -ENODEV;
- memset(&pdev_data, 0x00, sizeof(pdev_data));
- pdev_data.if_ops = &sdio_ops;
+ pdev_data = devm_kzalloc(&func->dev, sizeof(*pdev_data), GFP_KERNEL);
+ if (!pdev_data)
+ return -ENOMEM;
- glue = kzalloc(sizeof(*glue), GFP_KERNEL);
- if (!glue) {
- dev_err(&func->dev, "can't allocate glue\n");
- goto out;
- }
+ pdev_data->if_ops = &sdio_ops;
+
+ glue = devm_kzalloc(&func->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
glue->dev = &func->dev;
@@ -292,16 +313,16 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- ret = wlcore_probe_of(&func->dev, &irq, &pdev_data);
+ ret = wlcore_probe_of(&func->dev, &irq, pdev_data);
if (ret)
- goto out_free_glue;
+ goto out;
/* if sdio can keep power while host is suspended, enable wow */
mmcflags = sdio_get_host_pm_caps(func);
dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
if (mmcflags & MMC_PM_KEEP_POWER)
- pdev_data.pwr_in_suspend = true;
+ pdev_data->pwr_in_suspend = true;
sdio_set_drvdata(func, glue);
@@ -323,7 +344,7 @@ static int wl1271_probe(struct sdio_func *func,
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device");
ret = -ENOMEM;
- goto out_free_glue;
+ goto out;
}
glue->core->dev.parent = &func->dev;
@@ -341,8 +362,8 @@ static int wl1271_probe(struct sdio_func *func,
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, &pdev_data,
- sizeof(pdev_data));
+ ret = platform_device_add_data(glue->core, pdev_data,
+ sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -358,9 +379,6 @@ static int wl1271_probe(struct sdio_func *func,
out_dev_put:
platform_device_put(glue->core);
-out_free_glue:
- kfree(glue);
-
out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 6d24040889b8..f949ad2bd898 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -79,19 +79,19 @@
#define WSPI_MAX_NUM_OF_CHUNKS \
((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
-
-struct wilink_familiy_data {
- char name[8];
+static const struct wilink_family_data wl127x_data = {
+ .name = "wl127x",
+ .nvs_name = "ti-connectivity/wl127x-nvs.bin",
};
-const struct wilink_familiy_data *wilink_data;
-
-static const struct wilink_familiy_data wl18xx_data = {
- .name = "wl18xx",
+static const struct wilink_family_data wl128x_data = {
+ .name = "wl128x",
+ .nvs_name = "ti-connectivity/wl128x-nvs.bin",
};
-static const struct wilink_familiy_data wl12xx_data = {
- .name = "wl12xx",
+static const struct wilink_family_data wl18xx_data = {
+ .name = "wl18xx",
+ .cfg_name = "ti-connectivity/wl18xx-conf.bin",
};
struct wl12xx_spi_glue {
@@ -429,10 +429,10 @@ static struct wl1271_if_operations spi_ops = {
};
static const struct of_device_id wlcore_spi_of_match_table[] = {
- { .compatible = "ti,wl1271", .data = &wl12xx_data},
- { .compatible = "ti,wl1273", .data = &wl12xx_data},
- { .compatible = "ti,wl1281", .data = &wl12xx_data},
- { .compatible = "ti,wl1283", .data = &wl12xx_data},
+ { .compatible = "ti,wl1271", .data = &wl127x_data},
+ { .compatible = "ti,wl1273", .data = &wl127x_data},
+ { .compatible = "ti,wl1281", .data = &wl128x_data},
+ { .compatible = "ti,wl1283", .data = &wl128x_data},
{ .compatible = "ti,wl1801", .data = &wl18xx_data},
{ .compatible = "ti,wl1805", .data = &wl18xx_data},
{ .compatible = "ti,wl1807", .data = &wl18xx_data},
@@ -460,9 +460,9 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
if (!of_id)
return -ENODEV;
- wilink_data = of_id->data;
- dev_info(&spi->dev, "selected chip familiy is %s\n",
- wilink_data->name);
+ pdev_data->family = of_id->data;
+ dev_info(&spi->dev, "selected chip family is %s\n",
+ pdev_data->family->name);
if (of_find_property(dt_node, "clock-xtal", NULL))
pdev_data->ref_clock_xtal = true;
@@ -479,13 +479,15 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
static int wl1271_probe(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue;
- struct wlcore_platdev_data pdev_data;
+ struct wlcore_platdev_data *pdev_data;
struct resource res[1];
int ret;
- memset(&pdev_data, 0x00, sizeof(pdev_data));
+ pdev_data = devm_kzalloc(&spi->dev, sizeof(*pdev_data), GFP_KERNEL);
+ if (!pdev_data)
+ return -ENOMEM;
- pdev_data.if_ops = &spi_ops;
+ pdev_data->if_ops = &spi_ops;
glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL);
if (!glue) {
@@ -509,7 +511,7 @@ static int wl1271_probe(struct spi_device *spi)
return PTR_ERR(glue->reg);
}
- ret = wlcore_probe_of(spi, glue, &pdev_data);
+ ret = wlcore_probe_of(spi, glue, pdev_data);
if (ret) {
dev_err(glue->dev,
"can't get device tree parameters (%d)\n", ret);
@@ -522,7 +524,7 @@ static int wl1271_probe(struct spi_device *spi)
return ret;
}
- glue->core = platform_device_alloc(wilink_data->name,
+ glue->core = platform_device_alloc(pdev_data->family->name,
PLATFORM_DEVID_AUTO);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device\n");
@@ -543,8 +545,8 @@ static int wl1271_probe(struct spi_device *spi)
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, &pdev_data,
- sizeof(pdev_data));
+ ret = platform_device_add_data(glue->core, pdev_data,
+ sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 8f28aa02230c..1827546ba807 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -501,6 +501,9 @@ struct wl1271 {
/* dynamic fw traces */
u32 dynamic_fw_traces;
+
+ /* time sync zone master */
+ u8 zone_master_mac_addr[ETH_ALEN];
};
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 242b4e37b94c..e840985385fc 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -35,12 +35,11 @@
#include "conf.h"
#include "ini.h"
-/*
- * wl127x and wl128x are using the same NVS file name. However, the
- * ini parameters between them are different. The driver validates
- * the correct NVS size in wl1271_boot_upload_nvs().
- */
-#define WL12XX_NVS_NAME "ti-connectivity/wl1271-nvs.bin"
+struct wilink_family_data {
+ const char *name;
+ const char *nvs_name; /* wl12xx nvs file */
+ const char *cfg_name; /* wl18xx cfg file */
+};
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
@@ -208,6 +207,7 @@ struct wl1271_if_operations {
struct wlcore_platdev_data {
struct wl1271_if_operations *if_ops;
+ const struct wilink_family_data *family;
bool ref_clock_xtal; /* specify whether the clock is XTAL or not */
u32 ref_clock_freq; /* in Hertz */
@@ -347,7 +347,6 @@ struct wl1271_station {
* Used in both AP and STA mode.
*/
u64 total_freed_pkts;
- struct wl1271 *wl;
};
struct wl12xx_vif {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 82d94f83b6b4..932f3f81e8cf 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1258,7 +1258,9 @@ static int wl3501_reset(struct net_device *dev)
{
struct wl3501_card *this = netdev_priv(dev);
int rc = -ENODEV;
+ unsigned long flags;
+ spin_lock_irqsave(&this->lock, flags);
wl3501_block_interrupt(this);
if (wl3501_init_firmware(this)) {
@@ -1280,20 +1282,17 @@ static int wl3501_reset(struct net_device *dev)
pr_debug("%s: device reset", dev->name);
rc = 0;
out:
+ spin_unlock_irqrestore(&this->lock, flags);
return rc;
}
static void wl3501_tx_timeout(struct net_device *dev)
{
- struct wl3501_card *this = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
- unsigned long flags;
int rc;
stats->tx_errors++;
- spin_lock_irqsave(&this->lock, flags);
rc = wl3501_reset(dev);
- spin_unlock_irqrestore(&this->lock, flags);
if (rc)
printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
dev->name, rc);
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index a912dc051111..c5effd6c6be9 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -193,7 +193,7 @@ static int upload_code(struct usb_device *udev,
0, 0, p, sizeof(ret), 5000 /* ms */);
if (r != sizeof(ret)) {
dev_err(&udev->dev,
- "control request firmeware confirmation failed."
+ "control request firmware confirmation failed."
" Return value %d\n", r);
if (r >= 0)
r = -ENODEV;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 84d6cbdd11b2..b38fb2cf3364 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -292,8 +292,6 @@ struct xenvif {
#endif
struct xen_netif_ctrl_back_ring ctrl;
- struct task_struct *ctrl_task;
- wait_queue_head_t ctrl_wq;
unsigned int ctrl_irq;
/* Miscellaneous private stuff. */
@@ -359,7 +357,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue);
int xenvif_dealloc_kthread(void *data);
-int xenvif_ctrl_kthread(void *data);
+irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index fb87cb39a56b..613bac057650 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -32,15 +32,6 @@
#include <linux/vmalloc.h>
#include <linux/rculist.h>
-static void xenvif_del_hash(struct rcu_head *rcu)
-{
- struct xenvif_hash_cache_entry *entry;
-
- entry = container_of(rcu, struct xenvif_hash_cache_entry, rcu);
-
- kfree(entry);
-}
-
static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
unsigned int len, u32 val)
{
@@ -76,7 +67,7 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
if (++vif->hash.cache.count > xenvif_hash_cache_size) {
list_del_rcu(&oldest->link);
vif->hash.cache.count--;
- call_rcu(&oldest->rcu, xenvif_del_hash);
+ kfree_rcu(oldest, rcu);
}
}
@@ -114,7 +105,7 @@ static void xenvif_flush_hash(struct xenvif *vif)
list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
list_del_rcu(&entry->link);
vif->hash.cache.count--;
- call_rcu(&entry->rcu, xenvif_del_hash);
+ kfree_rcu(entry, rcu);
}
spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 83deeebfc2d1..fb50c6d5f6c3 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -128,15 +128,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id)
-{
- struct xenvif *vif = dev_id;
-
- wake_up(&vif->ctrl_wq);
-
- return IRQ_HANDLED;
-}
-
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
struct net_device *dev = queue->vif->dev;
@@ -570,8 +561,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
struct net_device *dev = vif->dev;
void *addr;
struct xen_netif_ctrl_sring *shared;
- struct task_struct *task;
- int err = -ENOMEM;
+ int err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
&ring_ref, 1, &addr);
@@ -581,11 +571,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
shared = (struct xen_netif_ctrl_sring *)addr;
BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
- init_waitqueue_head(&vif->ctrl_wq);
-
- err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn,
- xenvif_ctrl_interrupt,
- 0, dev->name, vif);
+ err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
if (err < 0)
goto err_unmap;
@@ -593,19 +579,13 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
xenvif_init_hash(vif);
- task = kthread_create(xenvif_ctrl_kthread, (void *)vif,
- "%s-control", dev->name);
- if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", dev->name);
- err = PTR_ERR(task);
+ err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
+ IRQF_ONESHOT, "xen-netback-ctrl", vif);
+ if (err) {
+ pr_warn("Could not setup irq handler for %s\n", dev->name);
goto err_deinit;
}
- get_task_struct(task);
- vif->ctrl_task = task;
-
- wake_up_process(vif->ctrl_task);
-
return 0;
err_deinit:
@@ -774,12 +754,6 @@ void xenvif_disconnect_data(struct xenvif *vif)
void xenvif_disconnect_ctrl(struct xenvif *vif)
{
- if (vif->ctrl_task) {
- kthread_stop(vif->ctrl_task);
- put_task_struct(vif->ctrl_task);
- vif->ctrl_task = NULL;
- }
-
if (vif->ctrl_irq) {
xenvif_deinit_hash(vif);
unbind_from_irqhandler(vif->ctrl_irq, vif);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index edbae0b1e8f0..3d0c989384b5 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -2359,24 +2359,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif)
return 0;
}
-int xenvif_ctrl_kthread(void *data)
+irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
{
struct xenvif *vif = data;
- for (;;) {
- wait_event_interruptible(vif->ctrl_wq,
- xenvif_ctrl_work_todo(vif) ||
- kthread_should_stop());
- if (kthread_should_stop())
- break;
-
- while (xenvif_ctrl_work_todo(vif))
- xenvif_ctrl_action(vif);
+ while (xenvif_ctrl_work_todo(vif))
+ xenvif_ctrl_action(vif);
- cond_resched();
- }
-
- return 0;
+ return IRQ_HANDLED;
}
static int __init netback_init(void)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 96ccd4e943db..e17879dd5d5a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -565,6 +565,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netfront_queue *queue = NULL;
unsigned int num_queues = dev->real_num_tx_queues;
u16 queue_index;
+ struct sk_buff *nskb;
/* Drop the packet if no queues are set up */
if (num_queues < 1)
@@ -593,6 +594,20 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
+
+ /* The first req should be at least ETH_HLEN size or the packet will be
+ * dropped by netback.
+ */
+ if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+ nskb = skb_copy(skb, GFP_ATOMIC);
+ if (!nskb)
+ goto drop;
+ dev_kfree_skb_any(skb);
+ skb = nskb;
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ }
+
len = skb_headlen(skb);
spin_lock_irqsave(&queue->tx_lock, flags);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index d5c5894f252e..8601c10acf74 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -102,13 +102,16 @@ struct ntb_queue_entry {
void *buf;
unsigned int len;
unsigned int flags;
+ int retries;
+ int errors;
+ unsigned int tx_index;
+ unsigned int rx_index;
struct ntb_transport_qp *qp;
union {
struct ntb_payload_header __iomem *tx_hdr;
struct ntb_payload_header *rx_hdr;
};
- unsigned int index;
};
struct ntb_rx_info {
@@ -259,6 +262,12 @@ enum {
static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops;
static struct ntb_client ntb_transport_client;
+static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry);
+static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
+static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
+static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
+
static int ntb_transport_bus_match(struct device *dev,
struct device_driver *drv)
@@ -1229,7 +1238,7 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
break;
entry->rx_hdr->flags = 0;
- iowrite32(entry->index, &qp->rx_info->entry);
+ iowrite32(entry->rx_index, &qp->rx_info->entry);
cb_data = entry->cb_data;
len = entry->len;
@@ -1247,10 +1256,36 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
}
-static void ntb_rx_copy_callback(void *data)
+static void ntb_rx_copy_callback(void *data,
+ const struct dmaengine_result *res)
{
struct ntb_queue_entry *entry = data;
+ /* we need to check DMA results if we are using DMA */
+ if (res) {
+ enum dmaengine_tx_result dma_err = res->result;
+
+ switch (dma_err) {
+ case DMA_TRANS_READ_FAILED:
+ case DMA_TRANS_WRITE_FAILED:
+ entry->errors++;
+ case DMA_TRANS_ABORTED:
+ {
+ struct ntb_transport_qp *qp = entry->qp;
+ void *offset = qp->rx_buff + qp->rx_max_frame *
+ qp->rx_index;
+
+ ntb_memcpy_rx(entry, offset);
+ qp->rx_memcpy++;
+ return;
+ }
+
+ case DMA_TRANS_NOERROR:
+ default:
+ break;
+ }
+ }
+
entry->flags |= DESC_DONE_FLAG;
ntb_complete_rxc(entry->qp);
@@ -1266,10 +1301,10 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
/* Ensure that the data is fully copied out before clearing the flag */
wmb();
- ntb_rx_copy_callback(entry);
+ ntb_rx_copy_callback(entry, NULL);
}
-static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
+static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
{
struct dma_async_tx_descriptor *txd;
struct ntb_transport_qp *qp = entry->qp;
@@ -1282,13 +1317,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
int retries = 0;
len = entry->len;
-
- if (!chan)
- goto err;
-
- if (len < copy_bytes)
- goto err;
-
device = chan->device;
pay_off = (size_t)offset & ~PAGE_MASK;
buff_off = (size_t)buf & ~PAGE_MASK;
@@ -1316,7 +1344,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
unmap->from_cnt = 1;
for (retries = 0; retries < DMA_RETRIES; retries++) {
- txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
+ txd = device->device_prep_dma_memcpy(chan,
+ unmap->addr[1],
unmap->addr[0], len,
DMA_PREP_INTERRUPT);
if (txd)
@@ -1331,7 +1360,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
goto err_get_unmap;
}
- txd->callback = ntb_rx_copy_callback;
+ txd->callback_result = ntb_rx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);
@@ -1345,13 +1374,38 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
qp->rx_async++;
- return;
+ return 0;
err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err:
+ return -ENXIO;
+}
+
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
+{
+ struct ntb_transport_qp *qp = entry->qp;
+ struct dma_chan *chan = qp->rx_dma_chan;
+ int res;
+
+ if (!chan)
+ goto err;
+
+ if (entry->len < copy_bytes)
+ goto err;
+
+ res = ntb_async_rx_submit(entry, offset);
+ if (res < 0)
+ goto err;
+
+ if (!entry->retries)
+ qp->rx_async++;
+
+ return;
+
+err:
ntb_memcpy_rx(entry, offset);
qp->rx_memcpy++;
}
@@ -1397,7 +1451,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
}
entry->rx_hdr = hdr;
- entry->index = qp->rx_index;
+ entry->rx_index = qp->rx_index;
if (hdr->len > entry->len) {
dev_dbg(&qp->ndev->pdev->dev,
@@ -1467,12 +1521,39 @@ static void ntb_transport_rxc_db(unsigned long data)
}
}
-static void ntb_tx_copy_callback(void *data)
+static void ntb_tx_copy_callback(void *data,
+ const struct dmaengine_result *res)
{
struct ntb_queue_entry *entry = data;
struct ntb_transport_qp *qp = entry->qp;
struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
+ /* we need to check DMA results if we are using DMA */
+ if (res) {
+ enum dmaengine_tx_result dma_err = res->result;
+
+ switch (dma_err) {
+ case DMA_TRANS_READ_FAILED:
+ case DMA_TRANS_WRITE_FAILED:
+ entry->errors++;
+ case DMA_TRANS_ABORTED:
+ {
+ void __iomem *offset =
+ qp->tx_mw + qp->tx_max_frame *
+ entry->tx_index;
+
+ /* resubmit via CPU */
+ ntb_memcpy_tx(entry, offset);
+ qp->tx_memcpy++;
+ return;
+ }
+
+ case DMA_TRANS_NOERROR:
+ default:
+ break;
+ }
+ }
+
iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
@@ -1507,40 +1588,25 @@ static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
/* Ensure that the data is fully copied out before setting the flags */
wmb();
- ntb_tx_copy_callback(entry);
+ ntb_tx_copy_callback(entry, NULL);
}
-static void ntb_async_tx(struct ntb_transport_qp *qp,
- struct ntb_queue_entry *entry)
+static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry)
{
- struct ntb_payload_header __iomem *hdr;
struct dma_async_tx_descriptor *txd;
struct dma_chan *chan = qp->tx_dma_chan;
struct dma_device *device;
+ size_t len = entry->len;
+ void *buf = entry->buf;
size_t dest_off, buff_off;
struct dmaengine_unmap_data *unmap;
dma_addr_t dest;
dma_cookie_t cookie;
- void __iomem *offset;
- size_t len = entry->len;
- void *buf = entry->buf;
int retries = 0;
- offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
- hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
- entry->tx_hdr = hdr;
-
- iowrite32(entry->len, &hdr->len);
- iowrite32((u32)qp->tx_pkts, &hdr->ver);
-
- if (!chan)
- goto err;
-
- if (len < copy_bytes)
- goto err;
-
device = chan->device;
- dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
+ dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
buff_off = (size_t)buf & ~PAGE_MASK;
dest_off = (size_t)dest & ~PAGE_MASK;
@@ -1560,8 +1626,9 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
unmap->to_cnt = 1;
for (retries = 0; retries < DMA_RETRIES; retries++) {
- txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0],
- len, DMA_PREP_INTERRUPT);
+ txd = device->device_prep_dma_memcpy(chan, dest,
+ unmap->addr[0], len,
+ DMA_PREP_INTERRUPT);
if (txd)
break;
@@ -1574,7 +1641,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
goto err_get_unmap;
}
- txd->callback = ntb_tx_copy_callback;
+ txd->callback_result = ntb_tx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);
@@ -1585,14 +1652,48 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
dmaengine_unmap_put(unmap);
dma_async_issue_pending(chan);
- qp->tx_async++;
- return;
+ return 0;
err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err:
+ return -ENXIO;
+}
+
+static void ntb_async_tx(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry)
+{
+ struct ntb_payload_header __iomem *hdr;
+ struct dma_chan *chan = qp->tx_dma_chan;
+ void __iomem *offset;
+ int res;
+
+ entry->tx_index = qp->tx_index;
+ offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
+ hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
+ entry->tx_hdr = hdr;
+
+ iowrite32(entry->len, &hdr->len);
+ iowrite32((u32)qp->tx_pkts, &hdr->ver);
+
+ if (!chan)
+ goto err;
+
+ if (entry->len < copy_bytes)
+ goto err;
+
+ res = ntb_async_tx_submit(qp, entry);
+ if (res < 0)
+ goto err;
+
+ if (!entry->retries)
+ qp->tx_async++;
+
+ return;
+
+err:
ntb_memcpy_tx(entry, offset);
qp->tx_memcpy++;
}
@@ -1928,6 +2029,9 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
entry->buf = data;
entry->len = len;
entry->flags = 0;
+ entry->retries = 0;
+ entry->errors = 0;
+ entry->rx_index = 0;
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
@@ -1970,6 +2074,9 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
entry->buf = data;
entry->len = len;
entry->flags = 0;
+ entry->errors = 0;
+ entry->retries = 0;
+ entry->tx_index = 0;
rc = ntb_process_tx(qp, entry);
if (rc)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2feacc70bf61..329381a28edf 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -156,12 +156,14 @@ static void nvme_free_ns(struct kref *kref)
{
struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
- if (ns->type == NVME_NS_LIGHTNVM)
- nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
+ if (ns->ndev)
+ nvme_nvm_unregister(ns);
- spin_lock(&dev_list_lock);
- ns->disk->private_data = NULL;
- spin_unlock(&dev_list_lock);
+ if (ns->disk) {
+ spin_lock(&dev_list_lock);
+ ns->disk->private_data = NULL;
+ spin_unlock(&dev_list_lock);
+ }
put_disk(ns->disk);
ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
@@ -597,7 +599,7 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
}
int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
- dma_addr_t dma_addr, u32 *result)
+ void *buffer, size_t buflen, u32 *result)
{
struct nvme_command c;
struct nvme_completion cqe;
@@ -606,10 +608,9 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_get_features;
c.features.nsid = cpu_to_le32(nsid);
- c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
@@ -617,7 +618,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
}
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
- dma_addr_t dma_addr, u32 *result)
+ void *buffer, size_t buflen, u32 *result)
{
struct nvme_command c;
struct nvme_completion cqe;
@@ -625,12 +626,11 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
- c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
- NVME_QID_ANY, 0, 0);
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe,
+ buffer, buflen, 0, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
return ret;
@@ -664,7 +664,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
u32 result;
int status, nr_io_queues;
- status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
+ status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
&result);
if (status < 0)
return status;
@@ -888,42 +888,32 @@ static void nvme_config_discard(struct nvme_ns *ns)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
-static int nvme_revalidate_disk(struct gendisk *disk)
+static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
{
- struct nvme_ns *ns = disk->private_data;
- struct nvme_id_ns *id;
- u8 lbaf, pi_type;
- u16 old_ms;
- unsigned short bs;
-
- if (test_bit(NVME_NS_DEAD, &ns->flags)) {
- set_capacity(disk, 0);
- return -ENODEV;
- }
- if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
- dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
- __func__);
- return -ENODEV;
- }
- if (id->ncap == 0) {
- kfree(id);
+ if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) {
+ dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__);
return -ENODEV;
}
- if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
- if (nvme_nvm_register(ns->queue, disk->disk_name)) {
- dev_warn(disk_to_dev(ns->disk),
- "%s: LightNVM init failure\n", __func__);
- kfree(id);
- return -ENODEV;
- }
- ns->type = NVME_NS_LIGHTNVM;
+ if ((*id)->ncap == 0) {
+ kfree(*id);
+ return -ENODEV;
}
if (ns->ctrl->vs >= NVME_VS(1, 1))
- memcpy(ns->eui, id->eui64, sizeof(ns->eui));
+ memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
if (ns->ctrl->vs >= NVME_VS(1, 2))
- memcpy(ns->uuid, id->nguid, sizeof(ns->uuid));
+ memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
+
+ return 0;
+}
+
+static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+{
+ struct nvme_ns *ns = disk->private_data;
+ u8 lbaf, pi_type;
+ u16 old_ms;
+ unsigned short bs;
old_ms = ns->ms;
lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
@@ -962,8 +952,26 @@ static int nvme_revalidate_disk(struct gendisk *disk)
if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
nvme_config_discard(ns);
blk_mq_unfreeze_queue(disk->queue);
+}
+
+static int nvme_revalidate_disk(struct gendisk *disk)
+{
+ struct nvme_ns *ns = disk->private_data;
+ struct nvme_id_ns *id = NULL;
+ int ret;
+
+ if (test_bit(NVME_NS_DEAD, &ns->flags)) {
+ set_capacity(disk, 0);
+ return -ENODEV;
+ }
+
+ ret = nvme_revalidate_ns(ns, &id);
+ if (ret)
+ return ret;
+ __nvme_revalidate_disk(disk, id);
kfree(id);
+
return 0;
}
@@ -1425,7 +1433,7 @@ static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
struct nvme_ctrl *ctrl = ns->ctrl;
int serial_len = sizeof(ctrl->serial);
int model_len = sizeof(ctrl->model);
@@ -1449,7 +1457,7 @@ static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
return sprintf(buf, "%pU\n", ns->uuid);
}
static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
@@ -1457,7 +1465,7 @@ static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
return sprintf(buf, "%8phd\n", ns->eui);
}
static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
@@ -1465,7 +1473,7 @@ static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
return sprintf(buf, "%d\n", ns->ns_id);
}
static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
@@ -1482,7 +1490,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
if (a == &dev_attr_uuid.attr) {
if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
@@ -1642,6 +1650,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns;
struct gendisk *disk;
+ struct nvme_id_ns *id;
+ char disk_name[DISK_NAME_LEN];
int node = dev_to_node(ctrl->dev);
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
@@ -1659,34 +1669,49 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
- disk = alloc_disk_node(0, node);
- if (!disk)
- goto out_free_queue;
-
kref_init(&ns->kref);
ns->ns_id = nsid;
- ns->disk = disk;
ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
-
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
nvme_set_queue_limits(ctrl, ns->queue);
- disk->fops = &nvme_fops;
- disk->private_data = ns;
- disk->queue = ns->queue;
- disk->flags = GENHD_FL_EXT_DEVT;
- sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
+ sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
+
+ if (nvme_revalidate_ns(ns, &id))
+ goto out_free_queue;
+
+ if (nvme_nvm_ns_supported(ns, id)) {
+ if (nvme_nvm_register(ns, disk_name, node,
+ &nvme_ns_attr_group)) {
+ dev_warn(ctrl->dev, "%s: LightNVM init failure\n",
+ __func__);
+ goto out_free_id;
+ }
+ } else {
+ disk = alloc_disk_node(0, node);
+ if (!disk)
+ goto out_free_id;
+
+ disk->fops = &nvme_fops;
+ disk->private_data = ns;
+ disk->queue = ns->queue;
+ disk->flags = GENHD_FL_EXT_DEVT;
+ memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
+ ns->disk = disk;
- if (nvme_revalidate_disk(ns->disk))
- goto out_free_disk;
+ __nvme_revalidate_disk(disk, id);
+ }
mutex_lock(&ctrl->namespaces_mutex);
list_add_tail(&ns->list, &ctrl->namespaces);
mutex_unlock(&ctrl->namespaces_mutex);
kref_get(&ctrl->kref);
- if (ns->type == NVME_NS_LIGHTNVM)
+
+ kfree(id);
+
+ if (ns->ndev)
return;
device_add_disk(ctrl->device, ns->disk);
@@ -1695,8 +1720,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
pr_warn("%s: failed to create sysfs group for identification\n",
ns->disk->disk_name);
return;
- out_free_disk:
- kfree(disk);
+ out_free_id:
+ kfree(id);
out_free_queue:
blk_cleanup_queue(ns->queue);
out_release_instance:
@@ -1710,7 +1735,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
- if (ns->disk->flags & GENHD_FL_UP) {
+ if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
@@ -1733,7 +1758,7 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
- if (revalidate_disk(ns->disk))
+ if (ns->disk && revalidate_disk(ns->disk))
nvme_ns_remove(ns);
nvme_put_ns(ns);
} else
@@ -1826,9 +1851,6 @@ static void nvme_scan_work(struct work_struct *work)
list_sort(NULL, &ctrl->namespaces, ns_cmp);
mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
-
- if (ctrl->ops->post_scan)
- ctrl->ops->post_scan(ctrl);
}
void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -2038,7 +2060,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
* Revalidating a dead namespace sets capacity to 0. This will
* end buffered writers dirtying pages that can't be synced.
*/
- if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ if (ns->disk && !test_and_set_bit(NVME_NS_DEAD, &ns->flags))
revalidate_disk(ns->disk);
blk_set_queue_dying(ns->queue);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 4eff49174466..5a3f008d3480 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -111,8 +111,19 @@ static void nvmf_host_put(struct nvmf_host *host)
*/
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
{
- return snprintf(buf, size, "traddr=%s,trsvcid=%s\n",
- ctrl->opts->traddr, ctrl->opts->trsvcid);
+ int len = 0;
+
+ if (ctrl->opts->mask & NVMF_OPT_TRADDR)
+ len += snprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
+ if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
+ len += snprintf(buf + len, size - len, "%strsvcid=%s",
+ (len) ? "," : "", ctrl->opts->trsvcid);
+ if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
+ len += snprintf(buf + len, size - len, "%shost_traddr=%s",
+ (len) ? "," : "", ctrl->opts->host_traddr);
+ len += snprintf(buf + len, size - len, "\n");
+
+ return len;
}
EXPORT_SYMBOL_GPL(nvmf_get_address);
@@ -519,6 +530,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
{ NVMF_OPT_KATO, "keep_alive_tmo=%d" },
{ NVMF_OPT_HOSTNQN, "hostnqn=%s" },
+ { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
{ NVMF_OPT_ERR, NULL }
};
@@ -675,6 +687,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->reconnect_delay = token;
break;
+ case NVMF_OPT_HOST_TRADDR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->host_traddr = p;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -741,6 +761,7 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->traddr);
kfree(opts->trsvcid);
kfree(opts->subsysnqn);
+ kfree(opts->host_traddr);
kfree(opts);
}
EXPORT_SYMBOL_GPL(nvmf_free_options);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46e460aee52d..924145c979f1 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -52,6 +52,7 @@ enum {
NVMF_OPT_KATO = 1 << 7,
NVMF_OPT_HOSTNQN = 1 << 8,
NVMF_OPT_RECONNECT_DELAY = 1 << 9,
+ NVMF_OPT_HOST_TRADDR = 1 << 10,
};
/**
@@ -64,9 +65,12 @@ enum {
* being added.
* @subsysnqn: Hold the fully qualified NQN subystem name (format defined
* in the NVMe specification, "NVMe Qualified Names").
- * @traddr: network address that will be used by the host to communicate
- * to the added NVMe controller.
- * @trsvcid: network port used for host-controller communication.
+ * @traddr: The transport-specific TRADDR field for a port on the
+ * subsystem which is adding a controller.
+ * @trsvcid: The transport-specific TRSVCID field for a port on the
+ * subsystem which is adding a controller.
+ * @host_traddr: A transport-specific field identifying the NVME host port
+ * to use for the connection to the controller.
* @queue_size: Number of IO queue elements.
* @nr_io_queues: Number of controller IO queues that will be established.
* @reconnect_delay: Time between two consecutive reconnect attempts.
@@ -80,6 +84,7 @@ struct nvmf_ctrl_options {
char *subsysnqn;
char *traddr;
char *trsvcid;
+ char *host_traddr;
size_t queue_size;
unsigned int nr_io_queues;
unsigned int reconnect_delay;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 63f483daf930..f5e3011e31fc 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -475,7 +475,7 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
- rqd->bio->bi_iter.bi_sector));
+ rqd->bio->bi_iter.bi_sector));
}
static void nvme_nvm_end_io(struct request *rq, int error)
@@ -592,14 +592,37 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.max_phys_sect = 64,
};
-int nvme_nvm_register(struct request_queue *q, char *disk_name)
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
+ const struct attribute_group *attrs)
{
- return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
+ struct request_queue *q = ns->queue;
+ struct nvm_dev *dev;
+ int ret;
+
+ dev = nvm_alloc_dev(node);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->q = q;
+ memcpy(dev->name, disk_name, DISK_NAME_LEN);
+ dev->ops = &nvme_nvm_dev_ops;
+ dev->parent_dev = ns->ctrl->device;
+ dev->private_data = ns;
+ ns->ndev = dev;
+
+ ret = nvm_register(dev);
+
+ ns->lba_shift = ilog2(dev->sec_size) - 9;
+
+ if (sysfs_create_group(&dev->dev.kobj, attrs))
+ pr_warn("%s: failed to create sysfs group for identification\n",
+ disk_name);
+ return ret;
}
-void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
+void nvme_nvm_unregister(struct nvme_ns *ns)
{
- nvm_unregister(disk_name);
+ nvm_unregister(ns->ndev);
}
/* move to shared place when used in multiple places. */
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ab18b78102bf..d47f5a5d18c7 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/kref.h>
#include <linux/blk-mq.h>
+#include <linux/lightnvm.h>
enum {
/*
@@ -154,6 +155,7 @@ struct nvme_ns {
struct nvme_ctrl *ctrl;
struct request_queue *queue;
struct gendisk *disk;
+ struct nvm_dev *ndev;
struct kref kref;
int instance;
@@ -165,7 +167,6 @@ struct nvme_ns {
u16 ms;
bool ext;
u8 pi_type;
- int type;
unsigned long flags;
#define NVME_NS_REMOVING 0
@@ -184,7 +185,6 @@ struct nvme_ctrl_ops {
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int (*reset_ctrl)(struct nvme_ctrl *ctrl);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
- void (*post_scan)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
int (*delete_ctrl)(struct nvme_ctrl *ctrl);
const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl);
@@ -292,9 +292,9 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
struct nvme_id_ns **id);
int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
- dma_addr_t dma_addr, u32 *result);
+ void *buffer, size_t buflen, u32 *result);
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
- dma_addr_t dma_addr, u32 *result);
+ void *buffer, size_t buflen, u32 *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
@@ -307,20 +307,35 @@ int nvme_sg_get_version_num(int __user *ip);
#ifdef CONFIG_NVM
int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
-int nvme_nvm_register(struct request_queue *q, char *disk_name);
-void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
+ const struct attribute_group *attrs);
+void nvme_nvm_unregister(struct nvme_ns *ns);
+
+static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
+{
+ if (dev->type->devnode)
+ return dev_to_disk(dev)->private_data;
+
+ return (container_of(dev, struct nvm_dev, dev))->private_data;
+}
#else
-static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
+static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
+ int node,
+ const struct attribute_group *attrs)
{
return 0;
}
-static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
+static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
{
return 0;
}
+static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
+{
+ return dev_to_disk(dev)->private_data;
+}
#endif /* CONFIG_NVM */
int __init nvme_core_init(void);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 60f7eab11865..68ef1875e8a8 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -16,6 +16,7 @@
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -88,7 +89,6 @@ struct nvme_dev {
unsigned max_qid;
int q_depth;
u32 db_stride;
- struct msix_entry *entry;
void __iomem *bar;
struct work_struct reset_work;
struct work_struct remove_work;
@@ -201,6 +201,11 @@ static unsigned int nvme_cmd_size(struct nvme_dev *dev)
nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
}
+static int nvmeq_irq(struct nvme_queue *nvmeq)
+{
+ return pci_irq_vector(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector);
+}
+
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@@ -263,6 +268,13 @@ static int nvme_init_request(void *data, struct request *req,
return 0;
}
+static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nvme_dev *dev = set->driver_data;
+
+ return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev));
+}
+
/**
* __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
* @nvmeq: The queue to use
@@ -960,7 +972,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
spin_unlock_irq(&nvmeq->q_lock);
return 1;
}
- vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
+ vector = nvmeq_irq(nvmeq);
nvmeq->dev->online_queues--;
nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock);
@@ -968,7 +980,6 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
- irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
return 0;
@@ -1075,15 +1086,14 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
return NULL;
}
-static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
- const char *name)
+static int queue_request_irq(struct nvme_queue *nvmeq)
{
if (use_threaded_interrupts)
- return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
- nvme_irq_check, nvme_irq, IRQF_SHARED,
- name, nvmeq);
- return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
- IRQF_SHARED, name, nvmeq);
+ return request_threaded_irq(nvmeq_irq(nvmeq), nvme_irq_check,
+ nvme_irq, IRQF_SHARED, nvmeq->irqname, nvmeq);
+ else
+ return request_irq(nvmeq_irq(nvmeq), nvme_irq, IRQF_SHARED,
+ nvmeq->irqname, nvmeq);
}
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1114,7 +1124,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
if (result < 0)
goto release_cq;
- result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
+ result = queue_request_irq(nvmeq);
if (result < 0)
goto release_sq;
@@ -1131,7 +1141,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
static struct blk_mq_ops nvme_mq_admin_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_hctx = nvme_admin_init_hctx,
.exit_hctx = nvme_admin_exit_hctx,
.init_request = nvme_admin_init_request,
@@ -1141,9 +1150,9 @@ static struct blk_mq_ops nvme_mq_admin_ops = {
static struct blk_mq_ops nvme_mq_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_hctx = nvme_init_hctx,
.init_request = nvme_init_request,
+ .map_queues = nvme_pci_map_queues,
.timeout = nvme_timeout,
.poll = nvme_poll,
};
@@ -1234,7 +1243,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
goto free_nvmeq;
nvmeq->cq_vector = 0;
- result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
+ result = queue_request_irq(nvmeq);
if (result) {
nvmeq->cq_vector = -1;
goto free_nvmeq;
@@ -1382,7 +1391,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct nvme_queue *adminq = dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int result, i, vecs, nr_io_queues, size;
+ int result, nr_io_queues, size;
nr_io_queues = num_online_cpus();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
@@ -1417,29 +1426,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
/* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, adminq);
+ free_irq(pci_irq_vector(pdev, 0), adminq);
/*
* If we enable msix early due to not intx, disable it again before
* setting up the full range we need.
*/
- if (pdev->msi_enabled)
- pci_disable_msi(pdev);
- else if (pdev->msix_enabled)
- pci_disable_msix(pdev);
-
- for (i = 0; i < nr_io_queues; i++)
- dev->entry[i].entry = i;
- vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
- if (vecs < 0) {
- vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32));
- if (vecs < 0) {
- vecs = 1;
- } else {
- for (i = 0; i < vecs; i++)
- dev->entry[i].vector = i + pdev->irq;
- }
- }
+ pci_free_irq_vectors(pdev);
+ nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues,
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY);
+ if (nr_io_queues <= 0)
+ return -EIO;
+ dev->max_qid = nr_io_queues;
/*
* Should investigate if there's a performance win from allocating
@@ -1447,10 +1445,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* path to scale better, even if the receive path is limited by the
* number of interrupts.
*/
- nr_io_queues = vecs;
- dev->max_qid = nr_io_queues;
- result = queue_request_irq(dev, adminq, adminq->irqname);
+ result = queue_request_irq(adminq);
if (result) {
adminq->cq_vector = -1;
goto free_queues;
@@ -1462,23 +1458,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
return result;
}
-static void nvme_pci_post_scan(struct nvme_ctrl *ctrl)
-{
- struct nvme_dev *dev = to_nvme_dev(ctrl);
- struct nvme_queue *nvmeq;
- int i;
-
- for (i = 0; i < dev->online_queues; i++) {
- nvmeq = dev->queues[i];
-
- if (!nvmeq->tags || !(*nvmeq->tags))
- continue;
-
- irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
- blk_mq_tags_cpumask(*nvmeq->tags));
- }
-}
-
static void nvme_del_queue_end(struct request *req, int error)
{
struct nvme_queue *nvmeq = req->end_io_data;
@@ -1615,15 +1594,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
* interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
* adjust this later.
*/
- if (pci_enable_msix(pdev, dev->entry, 1)) {
- pci_enable_msi(pdev);
- dev->entry[0].vector = pdev->irq;
- }
-
- if (!dev->entry[0].vector) {
- result = -ENODEV;
- goto disable;
- }
+ result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (result < 0)
+ return result;
cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1665,10 +1638,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- if (pdev->msi_enabled)
- pci_disable_msi(pdev);
- else if (pdev->msix_enabled)
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
if (pci_is_enabled(pdev)) {
pci_disable_pcie_error_reporting(pdev);
@@ -1743,7 +1713,6 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
if (dev->ctrl.admin_q)
blk_put_queue(dev->ctrl.admin_q);
kfree(dev->queues);
- kfree(dev->entry);
kfree(dev);
}
@@ -1887,7 +1856,6 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.reg_read64 = nvme_pci_reg_read64,
.reset_ctrl = nvme_pci_reset_ctrl,
.free_ctrl = nvme_pci_free_ctrl,
- .post_scan = nvme_pci_post_scan,
.submit_async_event = nvme_pci_submit_async_event,
};
@@ -1920,10 +1888,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return -ENOMEM;
- dev->entry = kzalloc_node(num_possible_cpus() * sizeof(*dev->entry),
- GFP_KERNEL, node);
- if (!dev->entry)
- goto free;
dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
GFP_KERNEL, node);
if (!dev->queues)
@@ -1964,7 +1928,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvme_dev_unmap(dev);
free:
kfree(dev->queues);
- kfree(dev->entry);
kfree(dev);
return result;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index fbdb2267e460..5a8388177959 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -54,7 +54,6 @@
struct nvme_rdma_device {
struct ib_device *dev;
struct ib_pd *pd;
- struct ib_mr *mr;
struct kref ref;
struct list_head entry;
};
@@ -408,10 +407,7 @@ static void nvme_rdma_free_dev(struct kref *ref)
list_del(&ndev->entry);
mutex_unlock(&device_list_mutex);
- if (!register_always)
- ib_dereg_mr(ndev->mr);
ib_dealloc_pd(ndev->pd);
-
kfree(ndev);
}
@@ -444,24 +440,16 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
ndev->dev = cm_id->device;
kref_init(&ndev->ref);
- ndev->pd = ib_alloc_pd(ndev->dev);
+ ndev->pd = ib_alloc_pd(ndev->dev,
+ register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
if (IS_ERR(ndev->pd))
goto out_free_dev;
- if (!register_always) {
- ndev->mr = ib_get_dma_mr(ndev->pd,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE);
- if (IS_ERR(ndev->mr))
- goto out_free_pd;
- }
-
if (!(ndev->dev->attrs.device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS)) {
dev_err(&ndev->dev->dev,
"Memory registrations not supported.\n");
- goto out_free_mr;
+ goto out_free_pd;
}
list_add(&ndev->entry, &device_list);
@@ -469,9 +457,6 @@ out_unlock:
mutex_unlock(&device_list_mutex);
return ndev;
-out_free_mr:
- if (!register_always)
- ib_dereg_mr(ndev->mr);
out_free_pd:
ib_dealloc_pd(ndev->pd);
out_free_dev:
@@ -915,7 +900,7 @@ static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
- put_unaligned_le32(queue->device->mr->rkey, sg->key);
+ put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
return 0;
}
@@ -1000,7 +985,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
nvme_rdma_queue_idx(queue))
return nvme_rdma_map_sg_inline(queue, req, c);
- if (!register_always)
+ if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
return nvme_rdma_map_sg_single(queue, req, c);
}
@@ -1495,7 +1480,6 @@ static void nvme_rdma_complete_rq(struct request *rq)
static struct blk_mq_ops nvme_rdma_mq_ops = {
.queue_rq = nvme_rdma_queue_rq,
.complete = nvme_rdma_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_rdma_init_request,
.exit_request = nvme_rdma_exit_request,
.reinit_request = nvme_rdma_reinit_request,
@@ -1507,7 +1491,6 @@ static struct blk_mq_ops nvme_rdma_mq_ops = {
static struct blk_mq_ops nvme_rdma_admin_mq_ops = {
.queue_rq = nvme_rdma_queue_rq,
.complete = nvme_rdma_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_rdma_init_admin_request,
.exit_request = nvme_rdma_exit_admin_request,
.reinit_request = nvme_rdma_reinit_request,
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index e947e298a737..c2a0a1c7d05d 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -72,15 +72,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#define ALL_LUNS_RETURNED 0x02
#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01
#define RESTRICTED_LUNS_RETURNED 0x00
-#define NVME_POWER_STATE_START_VALID 0x00
-#define NVME_POWER_STATE_ACTIVE 0x01
-#define NVME_POWER_STATE_IDLE 0x02
-#define NVME_POWER_STATE_STANDBY 0x03
-#define NVME_POWER_STATE_LU_CONTROL 0x07
-#define POWER_STATE_0 0
-#define POWER_STATE_1 1
-#define POWER_STATE_2 2
-#define POWER_STATE_3 3
#define DOWNLOAD_SAVE_ACTIVATE 0x05
#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E
#define ACTIVATE_DEFERRED_MICROCODE 0x0F
@@ -915,7 +906,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
kfree(smart_log);
/* Get Features for Temp Threshold */
- res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, 0,
+ res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, NULL, 0,
&feature_resp);
if (res != NVME_SC_SUCCESS)
temp_c_thresh = LOG_TEMP_UNKNOWN;
@@ -1048,7 +1039,7 @@ static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
if (len < MODE_PAGE_CACHING_LEN)
return -EINVAL;
- nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, 0,
+ nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, NULL, 0,
&feature_resp);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
@@ -1229,64 +1220,6 @@ static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
/* Start Stop Unit Helper Functions */
-static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
- u8 pc, u8 pcmod, u8 start)
-{
- int res;
- int nvme_sc;
- struct nvme_id_ctrl *id_ctrl;
- int lowest_pow_st; /* max npss = lowest power consumption */
- unsigned ps_desired = 0;
-
- nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
- res = nvme_trans_status_code(hdr, nvme_sc);
- if (res)
- return res;
-
- lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
- kfree(id_ctrl);
-
- switch (pc) {
- case NVME_POWER_STATE_START_VALID:
- /* Action unspecified if POWER CONDITION MODIFIER != 0 */
- if (pcmod == 0 && start == 0x1)
- ps_desired = POWER_STATE_0;
- if (pcmod == 0 && start == 0x0)
- ps_desired = lowest_pow_st;
- break;
- case NVME_POWER_STATE_ACTIVE:
- /* Action unspecified if POWER CONDITION MODIFIER != 0 */
- if (pcmod == 0)
- ps_desired = POWER_STATE_0;
- break;
- case NVME_POWER_STATE_IDLE:
- /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
- if (pcmod == 0x0)
- ps_desired = POWER_STATE_1;
- else if (pcmod == 0x1)
- ps_desired = POWER_STATE_2;
- else if (pcmod == 0x2)
- ps_desired = POWER_STATE_3;
- break;
- case NVME_POWER_STATE_STANDBY:
- /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
- if (pcmod == 0x0)
- ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2));
- else if (pcmod == 0x1)
- ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1));
- break;
- case NVME_POWER_STATE_LU_CONTROL:
- default:
- res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
- ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
- SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
- break;
- }
- nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_POWER_MGMT, ps_desired, 0,
- NULL);
- return nvme_trans_status_code(hdr, nvme_sc);
-}
-
static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 buffer_id)
{
@@ -1395,7 +1328,7 @@ static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
case MODE_PAGE_CACHING:
dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC,
- dword11, 0, NULL);
+ dword11, NULL, 0, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
break;
case MODE_PAGE_CONTROL:
@@ -2235,11 +2168,10 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *cmd)
{
- u8 immed, pcmod, pc, no_flush, start;
+ u8 immed, pcmod, no_flush, start;
immed = cmd[1] & 0x01;
pcmod = cmd[3] & 0x0f;
- pc = (cmd[4] & 0xf0) >> 4;
no_flush = cmd[4] & 0x04;
start = cmd[4] & 0x01;
@@ -2254,8 +2186,8 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
if (res)
return res;
}
- /* Setup the expected power state transition */
- return nvme_trans_power_state(ns, hdr, pc, pcmod, start);
+
+ return 0;
}
}
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 47c564b5a289..7ab9c9381b98 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <generated/utsrelease.h>
+#include <asm/unaligned.h>
#include "nvmet.h"
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
@@ -29,8 +30,84 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len;
}
+static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
+ struct nvme_smart_log *slog)
+{
+ u16 status;
+ struct nvmet_ns *ns;
+ u64 host_reads, host_writes, data_units_read, data_units_written;
+
+ status = NVME_SC_SUCCESS;
+ ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
+ if (!ns) {
+ status = NVME_SC_INVALID_NS;
+ pr_err("nvmet : Counld not find namespace id : %d\n",
+ le32_to_cpu(req->cmd->get_log_page.nsid));
+ goto out;
+ }
+
+ host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
+ data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
+ host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+ data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+
+ put_unaligned_le64(host_reads, &slog->host_reads[0]);
+ put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+ put_unaligned_le64(host_writes, &slog->host_writes[0]);
+ put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
+ nvmet_put_namespace(ns);
+out:
+ return status;
+}
+
+static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
+ struct nvme_smart_log *slog)
+{
+ u16 status;
+ u64 host_reads = 0, host_writes = 0;
+ u64 data_units_read = 0, data_units_written = 0;
+ struct nvmet_ns *ns;
+ struct nvmet_ctrl *ctrl;
+
+ status = NVME_SC_SUCCESS;
+ ctrl = req->sq->ctrl;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
+ data_units_read +=
+ part_stat_read(ns->bdev->bd_part, sectors[READ]);
+ host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+ data_units_written +=
+ part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+
+ }
+ rcu_read_unlock();
+
+ put_unaligned_le64(host_reads, &slog->host_reads[0]);
+ put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+ put_unaligned_le64(host_writes, &slog->host_writes[0]);
+ put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
+
+ return status;
+}
+
+static u16 nvmet_get_smart_log(struct nvmet_req *req,
+ struct nvme_smart_log *slog)
+{
+ u16 status;
+
+ WARN_ON(req == NULL || slog == NULL);
+ if (req->cmd->get_log_page.nsid == 0xFFFFFFFF)
+ status = nvmet_get_smart_log_all(req, slog);
+ else
+ status = nvmet_get_smart_log_nsid(req, slog);
+ return status;
+}
+
static void nvmet_execute_get_log_page(struct nvmet_req *req)
{
+ struct nvme_smart_log *smart_log;
size_t data_len = nvmet_get_log_page_len(req->cmd);
void *buf;
u16 status = 0;
@@ -59,6 +136,16 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
* available (e.g. units or commands read/written) those aren't
* persistent over power loss.
*/
+ if (data_len != sizeof(*smart_log)) {
+ status = NVME_SC_INTERNAL;
+ goto err;
+ }
+ smart_log = buf;
+ status = nvmet_get_smart_log(req, smart_log);
+ if (status) {
+ memset(buf, '\0', data_len);
+ goto err;
+ }
break;
case 0x03:
/*
@@ -73,6 +160,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, buf, data_len);
+err:
kfree(buf);
out:
nvmet_req_complete(req, status);
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 2cd069b691ae..4a96c2049b7b 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -58,6 +58,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
if (req->cmd->rw.opcode == nvme_cmd_write) {
op = REQ_OP_WRITE;
+ op_flags = WRITE_ODIRECT;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
op_flags |= REQ_FUA;
} else {
@@ -205,7 +206,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_execute_dsm;
- req->data_len = le32_to_cpu(cmd->dsm.nr) *
+ req->data_len = le32_to_cpu(cmd->dsm.nr + 1) *
sizeof(struct nvme_dsm_range);
return 0;
default:
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 395e60dad835..d5df77d686b2 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -273,7 +273,6 @@ static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static struct blk_mq_ops nvme_loop_mq_ops = {
.queue_rq = nvme_loop_queue_rq,
.complete = nvme_loop_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_loop_init_request,
.init_hctx = nvme_loop_init_hctx,
.timeout = nvme_loop_timeout,
@@ -282,7 +281,6 @@ static struct blk_mq_ops nvme_loop_mq_ops = {
static struct blk_mq_ops nvme_loop_admin_mq_ops = {
.queue_rq = nvme_loop_queue_rq,
.complete = nvme_loop_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_loop_init_admin_request,
.init_hctx = nvme_loop_init_admin_hctx,
.timeout = nvme_loop_timeout,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1cbe6e053b5b..f8d23999e0f2 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -848,7 +848,7 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
ndev->device = cm_id->device;
kref_init(&ndev->ref);
- ndev->pd = ib_alloc_pd(ndev->device);
+ ndev->pd = ib_alloc_pd(ndev->device, 0);
if (IS_ERR(ndev->pd))
goto out_free_dev;
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index f550c4596a7a..ba140eaee5c8 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -101,4 +101,14 @@ config NVMEM_VF610_OCOTP
This driver can also be build as a module. If so, the module will
be called nvmem-vf610-ocotp.
+config MESON_EFUSE
+ tristate "Amlogic eFuse Support"
+ depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
+ help
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_meson_efuse.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 45ab1ae08fa9..8f942a0cdaec 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -22,3 +22,5 @@ obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_sunxi_sid-y := sunxi_sid.o
obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
nvmem-vf610-ocotp-y := vf610-ocotp.o
+obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o
+nvmem_meson_efuse-y := meson-efuse.o
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
new file mode 100644
index 000000000000..f207c3b10482
--- /dev/null
+++ b/drivers/nvmem/meson-efuse.c
@@ -0,0 +1,93 @@
+/*
+ * Amlogic eFuse Driver
+ *
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <linux/firmware/meson/meson_sm.h>
+
+static int meson_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ u8 *buf = val;
+ int ret;
+
+ ret = meson_sm_call_read(buf, SM_EFUSE_READ, offset,
+ bytes, 0, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct nvmem_config econfig = {
+ .name = "meson-efuse",
+ .owner = THIS_MODULE,
+ .stride = 1,
+ .word_size = 1,
+ .read_only = true,
+};
+
+static const struct of_device_id meson_efuse_match[] = {
+ { .compatible = "amlogic,meson-gxbb-efuse", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_efuse_match);
+
+static int meson_efuse_probe(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem;
+ unsigned int size;
+
+ if (meson_sm_call(SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0)
+ return -EINVAL;
+
+ econfig.dev = &pdev->dev;
+ econfig.reg_read = meson_efuse_read;
+ econfig.size = size;
+
+ nvmem = nvmem_register(&econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int meson_efuse_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static struct platform_driver meson_efuse_driver = {
+ .probe = meson_efuse_probe,
+ .remove = meson_efuse_remove,
+ .driver = {
+ .name = "meson-efuse",
+ .of_match_table = meson_efuse_match,
+ },
+};
+
+module_platform_driver(meson_efuse_driver);
+
+MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
+MODULE_DESCRIPTION("Amlogic Meson NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index 4d3f391f0a0b..423907bdd259 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -22,17 +22,29 @@
#include <linux/nvmem-provider.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#define EFUSE_A_SHIFT 6
-#define EFUSE_A_MASK 0x3ff
-#define EFUSE_PGENB BIT(3)
-#define EFUSE_LOAD BIT(2)
-#define EFUSE_STROBE BIT(1)
-#define EFUSE_CSB BIT(0)
-
-#define REG_EFUSE_CTRL 0x0000
-#define REG_EFUSE_DOUT 0x0004
+#define RK3288_A_SHIFT 6
+#define RK3288_A_MASK 0x3ff
+#define RK3288_PGENB BIT(3)
+#define RK3288_LOAD BIT(2)
+#define RK3288_STROBE BIT(1)
+#define RK3288_CSB BIT(0)
+
+#define RK3399_A_SHIFT 16
+#define RK3399_A_MASK 0x3ff
+#define RK3399_NBYTES 4
+#define RK3399_STROBSFTSEL BIT(9)
+#define RK3399_RSB BIT(7)
+#define RK3399_PD BIT(5)
+#define RK3399_PGENB BIT(3)
+#define RK3399_LOAD BIT(2)
+#define RK3399_STROBE BIT(1)
+#define RK3399_CSB BIT(0)
+
+#define REG_EFUSE_CTRL 0x0000
+#define REG_EFUSE_DOUT 0x0004
struct rockchip_efuse_chip {
struct device *dev;
@@ -40,8 +52,8 @@ struct rockchip_efuse_chip {
struct clk *clk;
};
-static int rockchip_efuse_read(void *context, unsigned int offset,
- void *val, size_t bytes)
+static int rockchip_rk3288_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct rockchip_efuse_chip *efuse = context;
u8 *buf = val;
@@ -53,27 +65,82 @@ static int rockchip_efuse_read(void *context, unsigned int offset,
return ret;
}
- writel(EFUSE_LOAD | EFUSE_PGENB, efuse->base + REG_EFUSE_CTRL);
+ writel(RK3288_LOAD | RK3288_PGENB, efuse->base + REG_EFUSE_CTRL);
udelay(1);
while (bytes--) {
writel(readl(efuse->base + REG_EFUSE_CTRL) &
- (~(EFUSE_A_MASK << EFUSE_A_SHIFT)),
+ (~(RK3288_A_MASK << RK3288_A_SHIFT)),
efuse->base + REG_EFUSE_CTRL);
writel(readl(efuse->base + REG_EFUSE_CTRL) |
- ((offset++ & EFUSE_A_MASK) << EFUSE_A_SHIFT),
+ ((offset++ & RK3288_A_MASK) << RK3288_A_SHIFT),
efuse->base + REG_EFUSE_CTRL);
udelay(1);
writel(readl(efuse->base + REG_EFUSE_CTRL) |
- EFUSE_STROBE, efuse->base + REG_EFUSE_CTRL);
+ RK3288_STROBE, efuse->base + REG_EFUSE_CTRL);
udelay(1);
*buf++ = readb(efuse->base + REG_EFUSE_DOUT);
writel(readl(efuse->base + REG_EFUSE_CTRL) &
- (~EFUSE_STROBE), efuse->base + REG_EFUSE_CTRL);
+ (~RK3288_STROBE), efuse->base + REG_EFUSE_CTRL);
+ udelay(1);
+ }
+
+ /* Switch to standby mode */
+ writel(RK3288_PGENB | RK3288_CSB, efuse->base + REG_EFUSE_CTRL);
+
+ clk_disable_unprepare(efuse->clk);
+
+ return 0;
+}
+
+static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_efuse_chip *efuse = context;
+ unsigned int addr_start, addr_end, addr_offset, addr_len;
+ u32 out_value;
+ u8 *buf;
+ int ret, i = 0;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret < 0) {
+ dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
+ return ret;
+ }
+
+ addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES;
+ addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES;
+ addr_offset = offset % RK3399_NBYTES;
+ addr_len = addr_end - addr_start;
+
+ buf = kzalloc(sizeof(*buf) * addr_len * RK3399_NBYTES, GFP_KERNEL);
+ if (!buf) {
+ clk_disable_unprepare(efuse->clk);
+ return -ENOMEM;
+ }
+
+ writel(RK3399_LOAD | RK3399_PGENB | RK3399_STROBSFTSEL | RK3399_RSB,
+ efuse->base + REG_EFUSE_CTRL);
+ udelay(1);
+ while (addr_len--) {
+ writel(readl(efuse->base + REG_EFUSE_CTRL) | RK3399_STROBE |
+ ((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT),
+ efuse->base + REG_EFUSE_CTRL);
udelay(1);
+ out_value = readl(efuse->base + REG_EFUSE_DOUT);
+ writel(readl(efuse->base + REG_EFUSE_CTRL) & (~RK3399_STROBE),
+ efuse->base + REG_EFUSE_CTRL);
+ udelay(1);
+
+ memcpy(&buf[i], &out_value, RK3399_NBYTES);
+ i += RK3399_NBYTES;
}
/* Switch to standby mode */
- writel(EFUSE_PGENB | EFUSE_CSB, efuse->base + REG_EFUSE_CTRL);
+ writel(RK3399_PD | RK3399_CSB, efuse->base + REG_EFUSE_CTRL);
+
+ memcpy(val, buf + addr_offset, bytes);
+
+ kfree(buf);
clk_disable_unprepare(efuse->clk);
@@ -89,7 +156,27 @@ static struct nvmem_config econfig = {
};
static const struct of_device_id rockchip_efuse_match[] = {
- { .compatible = "rockchip,rockchip-efuse", },
+ /* deprecated but kept around for dts binding compatibility */
+ {
+ .compatible = "rockchip,rockchip-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
+ .compatible = "rockchip,rk3066a-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
+ .compatible = "rockchip,rk3188-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
+ .compatible = "rockchip,rk3288-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
+ .compatible = "rockchip,rk3399-efuse",
+ .data = (void *)&rockchip_rk3399_efuse_read,
+ },
{ /* sentinel */},
};
MODULE_DEVICE_TABLE(of, rockchip_efuse_match);
@@ -99,6 +186,14 @@ static int rockchip_efuse_probe(struct platform_device *pdev)
struct resource *res;
struct nvmem_device *nvmem;
struct rockchip_efuse_chip *efuse;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!match || !match->data) {
+ dev_err(dev, "failed to get match data\n");
+ return -EINVAL;
+ }
efuse = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_efuse_chip),
GFP_KERNEL);
@@ -116,7 +211,7 @@ static int rockchip_efuse_probe(struct platform_device *pdev)
efuse->dev = &pdev->dev;
econfig.size = resource_size(res);
- econfig.reg_read = rockchip_efuse_read;
+ econfig.reg_read = match->data;
econfig.priv = efuse;
econfig.dev = efuse->dev;
nvmem = nvmem_register(&econfig);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 3ce69536a7b3..d687e6de24a0 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1146,16 +1146,18 @@ EXPORT_SYMBOL_GPL(of_property_count_elems_of_size);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @len: requested length of property value
+ * @min: minimum allowed length of property value
+ * @max: maximum allowed length of property value (0 means unlimited)
+ * @len: if !=NULL, actual length is written to here
*
* Search for a property in a device node and valid the requested size.
* Returns the property value on success, -EINVAL if the property does not
* exist, -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
+ * property data is too small or too large.
*
*/
static void *of_find_property_value_of_size(const struct device_node *np,
- const char *propname, u32 len)
+ const char *propname, u32 min, u32 max, size_t *len)
{
struct property *prop = of_find_property(np, propname, NULL);
@@ -1163,9 +1165,14 @@ static void *of_find_property_value_of_size(const struct device_node *np,
return ERR_PTR(-EINVAL);
if (!prop->value)
return ERR_PTR(-ENODATA);
- if (len > prop->length)
+ if (prop->length < min)
+ return ERR_PTR(-EOVERFLOW);
+ if (max && prop->length > max)
return ERR_PTR(-EOVERFLOW);
+ if (len)
+ *len = prop->length;
+
return prop->value;
}
@@ -1189,7 +1196,9 @@ int of_property_read_u32_index(const struct device_node *np,
u32 index, u32 *out_value)
{
const u32 *val = of_find_property_value_of_size(np, propname,
- ((index + 1) * sizeof(*out_value)));
+ ((index + 1) * sizeof(*out_value)),
+ 0,
+ NULL);
if (IS_ERR(val))
return PTR_ERR(val);
@@ -1200,102 +1209,145 @@ int of_property_read_u32_index(const struct device_node *np,
EXPORT_SYMBOL_GPL(of_property_read_u32_index);
/**
- * of_property_read_u8_array - Find and read an array of u8 from a property.
+ * of_property_read_variable_u8_array - Find and read an array of u8 from a
+ * property, with bounds on the minimum and maximum array size.
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
*
* Search for a property in a device node and read 8-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
+ * it. Returns number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
*
* dts entry of array should be like:
* property = /bits/ 8 <0x50 0x60 0x70>;
*
* The out_values is modified only if a valid u8 value can be decoded.
*/
-int of_property_read_u8_array(const struct device_node *np,
- const char *propname, u8 *out_values, size_t sz)
+int of_property_read_variable_u8_array(const struct device_node *np,
+ const char *propname, u8 *out_values,
+ size_t sz_min, size_t sz_max)
{
+ size_t sz, count;
const u8 *val = of_find_property_value_of_size(np, propname,
- (sz * sizeof(*out_values)));
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
if (IS_ERR(val))
return PTR_ERR(val);
- while (sz--)
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--)
*out_values++ = *val++;
- return 0;
+
+ return sz;
}
-EXPORT_SYMBOL_GPL(of_property_read_u8_array);
+EXPORT_SYMBOL_GPL(of_property_read_variable_u8_array);
/**
- * of_property_read_u16_array - Find and read an array of u16 from a property.
+ * of_property_read_variable_u16_array - Find and read an array of u16 from a
+ * property, with bounds on the minimum and maximum array size.
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
*
* Search for a property in a device node and read 16-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
+ * it. Returns number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
*
* dts entry of array should be like:
* property = /bits/ 16 <0x5000 0x6000 0x7000>;
*
* The out_values is modified only if a valid u16 value can be decoded.
*/
-int of_property_read_u16_array(const struct device_node *np,
- const char *propname, u16 *out_values, size_t sz)
+int of_property_read_variable_u16_array(const struct device_node *np,
+ const char *propname, u16 *out_values,
+ size_t sz_min, size_t sz_max)
{
+ size_t sz, count;
const __be16 *val = of_find_property_value_of_size(np, propname,
- (sz * sizeof(*out_values)));
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
if (IS_ERR(val))
return PTR_ERR(val);
- while (sz--)
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--)
*out_values++ = be16_to_cpup(val++);
- return 0;
+
+ return sz;
}
-EXPORT_SYMBOL_GPL(of_property_read_u16_array);
+EXPORT_SYMBOL_GPL(of_property_read_variable_u16_array);
/**
- * of_property_read_u32_array - Find and read an array of 32 bit integers
- * from a property.
+ * of_property_read_variable_u32_array - Find and read an array of 32 bit
+ * integers from a property, with bounds on the minimum and maximum array size.
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
*
* Search for a property in a device node and read 32-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
+ * it. Returns number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
*
* The out_values is modified only if a valid u32 value can be decoded.
*/
-int of_property_read_u32_array(const struct device_node *np,
+int of_property_read_variable_u32_array(const struct device_node *np,
const char *propname, u32 *out_values,
- size_t sz)
+ size_t sz_min, size_t sz_max)
{
+ size_t sz, count;
const __be32 *val = of_find_property_value_of_size(np, propname,
- (sz * sizeof(*out_values)));
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
if (IS_ERR(val))
return PTR_ERR(val);
- while (sz--)
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--)
*out_values++ = be32_to_cpup(val++);
- return 0;
+
+ return sz;
}
-EXPORT_SYMBOL_GPL(of_property_read_u32_array);
+EXPORT_SYMBOL_GPL(of_property_read_variable_u32_array);
/**
* of_property_read_u64 - Find and read a 64 bit integer from a property
@@ -1314,7 +1366,9 @@ int of_property_read_u64(const struct device_node *np, const char *propname,
u64 *out_value)
{
const __be32 *val = of_find_property_value_of_size(np, propname,
- sizeof(*out_value));
+ sizeof(*out_value),
+ 0,
+ NULL);
if (IS_ERR(val))
return PTR_ERR(val);
@@ -1325,38 +1379,51 @@ int of_property_read_u64(const struct device_node *np, const char *propname,
EXPORT_SYMBOL_GPL(of_property_read_u64);
/**
- * of_property_read_u64_array - Find and read an array of 64 bit integers
- * from a property.
+ * of_property_read_variable_u64_array - Find and read an array of 64 bit
+ * integers from a property, with bounds on the minimum and maximum array size.
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
*
* Search for a property in a device node and read 64-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
+ * it. Returns number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
*
* The out_values is modified only if a valid u64 value can be decoded.
*/
-int of_property_read_u64_array(const struct device_node *np,
+int of_property_read_variable_u64_array(const struct device_node *np,
const char *propname, u64 *out_values,
- size_t sz)
+ size_t sz_min, size_t sz_max)
{
+ size_t sz, count;
const __be32 *val = of_find_property_value_of_size(np, propname,
- (sz * sizeof(*out_values)));
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
if (IS_ERR(val))
return PTR_ERR(val);
- while (sz--) {
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--) {
*out_values++ = of_read_number(val, 2);
val += 2;
}
- return 0;
+
+ return sz;
}
-EXPORT_SYMBOL_GPL(of_property_read_u64_array);
+EXPORT_SYMBOL_GPL(of_property_read_variable_u64_array);
/**
* of_property_read_string - Find and read a string from a property
@@ -2010,6 +2077,8 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
name = of_get_property(of_aliases, "stdout", NULL);
if (name)
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
+ if (of_stdout)
+ console_set_by_of();
}
if (!of_aliases)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 085c6389afd1..c89d5d231a0e 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -924,7 +924,7 @@ static inline void early_init_dt_check_for_initrd(unsigned long node)
#ifdef CONFIG_SERIAL_EARLYCON
-static int __init early_init_dt_scan_chosen_serial(void)
+int __init early_init_dt_scan_chosen_stdout(void)
{
int offset;
const char *p, *q, *options = NULL;
@@ -968,15 +968,6 @@ static int __init early_init_dt_scan_chosen_serial(void)
}
return -ENODEV;
}
-
-static int __init setup_of_earlycon(char *buf)
-{
- if (buf)
- return 0;
-
- return early_init_dt_scan_chosen_serial();
-}
-early_param("earlycon", setup_of_earlycon);
#endif
/**
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index f39ccd5aa701..f811d2796437 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -142,6 +142,7 @@ struct platform_device *of_device_alloc(struct device_node *np,
}
dev->dev.of_node = of_node_get(np);
+ dev->dev.fwnode = &np->fwnode;
dev->dev.parent = parent ? : &platform_bus;
if (bus_id)
@@ -241,6 +242,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
/* setup generic device info */
dev->dev.of_node = of_node_get(node);
+ dev->dev.fwnode = &node->fwnode;
dev->dev.parent = parent ? : &platform_bus;
dev->dev.platform_data = platform_data;
if (bus_id)
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 67f9916ff14d..6555eb78d91c 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -25,7 +25,7 @@ config PCI_MSI
If you don't know what to do here, say Y.
config PCI_MSI_IRQ_DOMAIN
- def_bool ARM || ARM64 || X86
+ def_bool ARC || ARM || ARM64 || X86
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 9b485d873b0d..d7e7c0a827c3 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -274,4 +274,31 @@ config PCIE_ARTPEC6
Say Y here to enable PCIe controller support on Axis ARTPEC-6
SoCs. This PCIe controller uses the DesignWare core.
+config PCIE_ROCKCHIP
+ bool "Rockchip PCIe controller"
+ depends on ARCH_ROCKCHIP
+ depends on OF
+ depends on PCI_MSI_IRQ_DOMAIN
+ select MFD_SYSCON
+ help
+ Say Y here if you want internal PCI support on Rockchip SoC.
+ There is 1 internal PCIe port available to support GEN2 with
+ 4 slots.
+
+config VMD
+ depends on PCI_MSI && X86_64
+ tristate "Intel Volume Management Device Driver"
+ default N
+ ---help---
+ Adds support for the Intel Volume Management Device (VMD). VMD is a
+ secondary PCI host bridge that allows PCI Express root ports,
+ and devices attached to them, to be removed from the default
+ PCI domain and placed within the VMD domain. This provides
+ more bus resources than are otherwise possible with a
+ single domain. If you know your system provides one of these and
+ has devices attached to it, say Y; if you are not sure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vmd.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 88434101e4c4..084cb4983645 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -31,3 +31,5 @@ obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+obj-$(CONFIG_VMD) += vmd.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index ef9893fa3176..e4a5b7ee90cf 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -848,7 +848,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
struct device_node *np = dev->of_node;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
resource_size_t iobase;
INIT_LIST_HEAD(&pcie->resources);
@@ -862,7 +862,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
if (err)
goto out_release_res;
- resource_list_for_each_entry(win, &pcie->resources) {
+ resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
struct resource *res = win->res;
switch (resource_type(res)) {
@@ -874,9 +874,11 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
lower_32_bits(res->start),
OB_PCIE_IO);
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+ resource_list_destroy_entry(win);
+ }
break;
case IORESOURCE_MEM:
advk_pcie_set_ob_win(pcie, 0,
@@ -925,10 +927,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pcie->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pcie->base)) {
- dev_err(&pdev->dev, "Failed to map registers\n");
+ if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- }
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 81b3949a26db..19223ed2e619 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -15,7 +15,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -443,25 +443,6 @@ err_phy:
return ret;
}
-static int __exit dra7xx_pcie_remove(struct platform_device *pdev)
-{
- struct dra7xx_pcie *dra7xx = platform_get_drvdata(pdev);
- struct pcie_port *pp = &dra7xx->pp;
- struct device *dev = &pdev->dev;
- int count = dra7xx->phy_count;
-
- if (pp->irq_domain)
- irq_domain_remove(pp->irq_domain);
- pm_runtime_put(dev);
- pm_runtime_disable(dev);
- while (count--) {
- phy_power_off(dra7xx->phy[count]);
- phy_exit(dra7xx->phy[count]);
- }
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int dra7xx_pcie_suspend(struct device *dev)
{
@@ -545,19 +526,13 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
{ .compatible = "ti,dra7-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
static struct platform_driver dra7xx_pcie_driver = {
- .remove = __exit_p(dra7xx_pcie_remove),
.driver = {
.name = "dra7-pcie",
.of_match_table = of_dra7xx_pcie_match,
+ .suppress_bind_attrs = true,
.pm = &dra7xx_pcie_pm_ops,
},
};
-
-module_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
-
-MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
-MODULE_DESCRIPTION("TI PCIe controller driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 219976103efc..2e2d7f00b9e8 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -16,7 +16,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -425,12 +425,15 @@ static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
exynos_pcie_msi_init(pp);
}
-static inline void exynos_pcie_readl_rc(struct pcie_port *pp,
- void __iomem *dbi_base, u32 *val)
+static inline u32 exynos_pcie_readl_rc(struct pcie_port *pp,
+ void __iomem *dbi_base)
{
+ u32 val;
+
exynos_pcie_sideband_dbi_r_mode(pp, true);
- *val = readl(dbi_base);
+ val = readl(dbi_base);
exynos_pcie_sideband_dbi_r_mode(pp, false);
+ return val;
}
static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
@@ -624,7 +627,6 @@ static const struct of_device_id exynos_pcie_of_match[] = {
{ .compatible = "samsung,exynos5440-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
static struct platform_driver exynos_pcie_driver = {
.remove = __exit_p(exynos_pcie_remove),
@@ -641,7 +643,3 @@ static int __init exynos_pcie_init(void)
return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
}
subsys_initcall(exynos_pcie_init);
-
-MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
-MODULE_DESCRIPTION("Samsung PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 9d9d34e959b6..e3c48b5deb93 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -1,4 +1,6 @@
/*
+ * Generic PCI host driver common code
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -17,7 +19,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci-ecam.h>
@@ -29,7 +30,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
int err, res_valid = 0;
struct device_node *np = dev->of_node;
resource_size_t iobase;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
if (err)
@@ -39,15 +40,17 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
if (err)
return err;
- resource_list_for_each_entry(win, resources) {
+ resource_list_for_each_entry_safe(win, tmp, resources) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+ resource_list_destroy_entry(win);
+ }
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
@@ -162,7 +165,3 @@ int pci_host_common_probe(struct platform_device *pdev,
pci_bus_add_devices(bus);
return 0;
}
-
-MODULE_DESCRIPTION("Generic PCI host driver common code");
-MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 6955ffdb89f3..763ff8745828 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -200,11 +200,11 @@ struct tran_int_desc {
*/
struct pci_message {
- u32 message_type;
+ u32 type;
} __packed;
struct pci_child_message {
- u32 message_type;
+ struct pci_message message_type;
union win_slot_encoding wslot;
} __packed;
@@ -222,7 +222,8 @@ struct pci_packet {
void (*completion_func)(void *context, struct pci_response *resp,
int resp_packet_size);
void *compl_ctxt;
- struct pci_message message;
+
+ struct pci_message message[0];
};
/*
@@ -258,7 +259,7 @@ struct pci_bus_d0_entry {
struct pci_bus_relations {
struct pci_incoming_message incoming;
u32 device_count;
- struct pci_function_description func[1];
+ struct pci_function_description func[0];
} __packed;
struct pci_q_res_req_response {
@@ -314,7 +315,7 @@ struct pci_dev_incoming {
} __packed;
struct pci_eject_response {
- u32 message_type;
+ struct pci_message message_type;
union win_slot_encoding wslot;
u32 status;
} __packed;
@@ -373,7 +374,6 @@ struct hv_pcibus_device {
struct list_head children;
struct list_head dr_list;
- struct work_struct wrk;
struct msi_domain_info msi_info;
struct msi_controller msi_chip;
@@ -393,7 +393,7 @@ struct hv_dr_work {
struct hv_dr_state {
struct list_head list_entry;
u32 device_count;
- struct pci_function_description func[1];
+ struct pci_function_description func[0];
};
enum hv_pcichild_state {
@@ -447,15 +447,16 @@ struct hv_pci_compl {
* for any message for which the completion packet contains a
* status and nothing else.
*/
-static
-void
-hv_pci_generic_compl(void *context, struct pci_response *resp,
- int resp_packet_size)
+static void hv_pci_generic_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
{
struct hv_pci_compl *comp_pkt = context;
if (resp_packet_size >= offsetofend(struct pci_response, status))
comp_pkt->completion_status = resp->status;
+ else
+ comp_pkt->completion_status = -1;
+
complete(&comp_pkt->host_event);
}
@@ -694,13 +695,12 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
struct pci_delete_interrupt *int_pkt;
struct {
struct pci_packet pkt;
- u8 buffer[sizeof(struct pci_delete_interrupt) -
- sizeof(struct pci_message)];
+ u8 buffer[sizeof(struct pci_delete_interrupt)];
} ctxt;
memset(&ctxt, 0, sizeof(ctxt));
int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
- int_pkt->message_type.message_type =
+ int_pkt->message_type.type =
PCI_DELETE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
int_pkt->int_desc = *int_desc;
@@ -847,8 +847,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct cpumask *affinity;
struct {
struct pci_packet pkt;
- u8 buffer[sizeof(struct pci_create_interrupt) -
- sizeof(struct pci_message)];
+ u8 buffer[sizeof(struct pci_create_interrupt)];
} ctxt;
int cpu;
int ret;
@@ -876,7 +875,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
ctxt.pkt.completion_func = hv_pci_compose_compl;
ctxt.pkt.compl_ctxt = &comp;
int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message;
- int_pkt->message_type.message_type = PCI_CREATE_INTERRUPT_MESSAGE;
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
int_pkt->int_desc.vector = cfg->vector;
int_pkt->int_desc.vector_count = 1;
@@ -897,8 +896,10 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
sizeof(*int_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (!ret)
- wait_for_completion(&comp.comp_pkt.host_event);
+ if (ret)
+ goto free_int_desc;
+
+ wait_for_completion(&comp.comp_pkt.host_event);
if (comp.comp_pkt.completion_status < 0) {
dev_err(&hbus->hdev->device,
@@ -1289,7 +1290,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
pkt.init_packet.compl_ctxt = &comp_pkt;
pkt.init_packet.completion_func = q_resource_requirements;
res_req = (struct pci_child_message *)&pkt.init_packet.message;
- res_req->message_type = PCI_QUERY_RESOURCE_REQUIREMENTS;
+ res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
res_req->wslot.slot = desc->win_slot.slot;
ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
@@ -1466,8 +1467,7 @@ static void pci_devices_present_work(struct work_struct *work)
if (hpdev->reported_missing) {
found = true;
put_pcichild(hpdev, hv_pcidev_ref_childlist);
- list_del(&hpdev->list_entry);
- list_add_tail(&hpdev->list_entry, &removed);
+ list_move_tail(&hpdev->list_entry, &removed);
break;
}
}
@@ -1558,8 +1558,7 @@ static void hv_eject_device_work(struct work_struct *work)
int wslot;
struct {
struct pci_packet pkt;
- u8 buffer[sizeof(struct pci_eject_response) -
- sizeof(struct pci_message)];
+ u8 buffer[sizeof(struct pci_eject_response)];
} ctxt;
hpdev = container_of(work, struct hv_pci_dev, wrk);
@@ -1585,7 +1584,7 @@ static void hv_eject_device_work(struct work_struct *work)
memset(&ctxt, 0, sizeof(ctxt));
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
- ejct_pkt->message_type = PCI_EJECTION_COMPLETE;
+ ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
@@ -1688,7 +1687,7 @@ static void hv_pci_onchannelcallback(void *context)
case VM_PKT_DATA_INBAND:
new_message = (struct pci_incoming_message *)buffer;
- switch (new_message->message_type.message_type) {
+ switch (new_message->message_type.type) {
case PCI_BUS_RELATIONS:
bus_rel = (struct pci_bus_relations *)buffer;
@@ -1719,7 +1718,7 @@ static void hv_pci_onchannelcallback(void *context)
default:
dev_warn(&hbus->hdev->device,
"Unimplemented protocol message %x\n",
- new_message->message_type.message_type);
+ new_message->message_type.type);
break;
}
break;
@@ -1772,7 +1771,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
version_req = (struct pci_version_request *)&pkt->message;
- version_req->message_type.message_type = PCI_QUERY_PROTOCOL_VERSION;
+ version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT;
ret = vmbus_sendpacket(hdev->channel, version_req,
@@ -1973,7 +1972,7 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
- d0_entry->message_type.message_type = PCI_BUS_D0ENTRY;
+ d0_entry->message_type.type = PCI_BUS_D0ENTRY;
d0_entry->mmio_base = hbus->mem_config->start;
ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
@@ -2019,7 +2018,7 @@ static int hv_pci_query_relations(struct hv_device *hdev)
return -ENOTEMPTY;
memset(&message, 0, sizeof(message));
- message.message_type = PCI_QUERY_BUS_RELATIONS;
+ message.type = PCI_QUERY_BUS_RELATIONS;
ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
0, VM_PKT_DATA_INBAND, 0);
@@ -2072,8 +2071,8 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- pkt->message.message_type = PCI_RESOURCES_ASSIGNED;
res_assigned = (struct pci_resources_assigned *)&pkt->message;
+ res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED;
res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
put_pcichild(hpdev, hv_pcidev_ref_by_slot);
@@ -2123,7 +2122,7 @@ static int hv_send_resources_released(struct hv_device *hdev)
continue;
memset(&pkt, 0, sizeof(pkt));
- pkt.message_type = PCI_RESOURCES_RELEASED;
+ pkt.message_type.type = PCI_RESOURCES_RELEASED;
pkt.wslot.slot = hpdev->desc.win_slot.slot;
put_pcichild(hpdev, hv_pcidev_ref_by_slot);
@@ -2290,7 +2289,7 @@ static int hv_pci_remove(struct hv_device *hdev)
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
pkt.teardown_packet.compl_ctxt = &comp_pkt;
- pkt.teardown_packet.message.message_type = PCI_BUS_D0EXIT;
+ pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
sizeof(struct pci_message),
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index b741a36a67f3..ead4a5c3480b 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -739,7 +739,6 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
{},
};
-MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
static struct platform_driver imx6_pcie_driver = {
.driver = {
@@ -749,14 +748,8 @@ static struct platform_driver imx6_pcie_driver = {
.shutdown = imx6_pcie_shutdown,
};
-/* Freescale PCIe driver does not allow module unload */
-
static int __init imx6_pcie_init(void)
{
return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
}
-module_init(imx6_pcie_init);
-
-MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
-MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
+device_initcall(imx6_pcie_init);
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 8ba28834d470..82b461b5b08a 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -334,8 +334,9 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
if (ks_pcie->error_irq <= 0)
dev_info(&pdev->dev, "no error IRQ defined\n");
else {
- if (request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
- IRQF_SHARED, "pcie-error-irq", ks_pcie) < 0) {
+ ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
+ IRQF_SHARED, "pcie-error-irq", ks_pcie);
+ if (ret < 0) {
dev_err(&pdev->dev, "failed to request error IRQ %d\n",
ks_pcie->error_irq);
return ret;
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 6de0757b11e4..e2a8e4cab22e 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -240,7 +240,7 @@ struct tegra_msi {
};
/* used to differentiate between Tegra SoC generations */
-struct tegra_pcie_soc_data {
+struct tegra_pcie_soc {
unsigned int num_ports;
unsigned int msi_base_shift;
u32 pads_pll_ctl;
@@ -300,7 +300,7 @@ struct tegra_pcie {
struct regulator_bulk_data *supplies;
unsigned int num_supplies;
- const struct tegra_pcie_soc_data *soc_data;
+ const struct tegra_pcie_soc *soc;
struct dentry *debugfs;
};
@@ -542,8 +542,8 @@ static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
{
- const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
unsigned long value;
/* enable reference clock */
@@ -562,8 +562,8 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
{
- const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
unsigned long value;
/* assert port reset */
@@ -621,7 +621,11 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
if (err < 0)
return err;
- pci_add_resource_offset(&sys->resources, &pcie->pio, sys->io_offset);
+ err = pci_remap_iospace(&pcie->pio, pcie->io.start);
+ if (!err)
+ pci_add_resource_offset(&sys->resources, &pcie->pio,
+ sys->io_offset);
+
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
sys->mem_offset);
@@ -631,7 +635,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
if (err < 0)
return err;
- pci_remap_iospace(&pcie->pio, pcie->io.start);
return 1;
}
@@ -774,7 +777,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
u32 value;
timeout = jiffies + msecs_to_jiffies(timeout);
@@ -790,7 +793,7 @@ static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
u32 value;
int err;
@@ -845,7 +848,7 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
u32 value;
/* disable TX/RX data */
@@ -906,7 +909,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
int err;
@@ -974,7 +977,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
unsigned long value;
int err;
@@ -1067,7 +1070,7 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
static int tegra_pcie_power_on(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
int err;
reset_control_assert(pcie->pcie_xrst);
@@ -1117,7 +1120,7 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
if (IS_ERR(pcie->pex_clk))
@@ -1234,7 +1237,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
struct device_node *np = pcie->dev->of_node;
struct tegra_pcie_port *port;
int err;
@@ -1486,7 +1489,7 @@ static const struct irq_domain_ops msi_domain_ops = {
static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
unsigned long base;
int err;
@@ -1799,8 +1802,8 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
struct device_node *np = pcie->dev->of_node, *port;
+ const struct tegra_pcie_soc *soc = pcie->soc;
struct of_pci_range_parser parser;
struct of_pci_range range;
u32 lanes = 0, mask = 0;
@@ -2043,7 +2046,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
return 0;
}
-static const struct tegra_pcie_soc_data tegra20_pcie_data = {
+static const struct tegra_pcie_soc tegra20_pcie = {
.num_ports = 2,
.msi_base_shift = 0,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
@@ -2056,7 +2059,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
.has_gen2 = false,
};
-static const struct tegra_pcie_soc_data tegra30_pcie_data = {
+static const struct tegra_pcie_soc tegra30_pcie = {
.num_ports = 3,
.msi_base_shift = 8,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
@@ -2070,7 +2073,7 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
.has_gen2 = false,
};
-static const struct tegra_pcie_soc_data tegra124_pcie_data = {
+static const struct tegra_pcie_soc tegra124_pcie = {
.num_ports = 2,
.msi_base_shift = 8,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
@@ -2084,9 +2087,9 @@ static const struct tegra_pcie_soc_data tegra124_pcie_data = {
};
static const struct of_device_id tegra_pcie_of_match[] = {
- { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
- { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
- { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
+ { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
+ { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
+ { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
{ },
};
@@ -2201,21 +2204,16 @@ remove:
static int tegra_pcie_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct tegra_pcie *pcie;
int err;
- match = of_match_device(tegra_pcie_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
+ pcie->soc = of_device_get_match_data(&pdev->dev);
INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports);
- pcie->soc_data = match->data;
pcie->dev = &pdev->dev;
err = tegra_pcie_parse_dt(pcie);
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index f234405770ab..b7dc07002f13 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -74,7 +74,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
int err, mem = 1, res_valid = 0;
struct device_node *np = dev->of_node;
resource_size_t iobase;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
if (err)
@@ -84,15 +84,17 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
if (err)
goto out_release_res;
- resource_list_for_each_entry(win, res) {
+ resource_list_for_each_entry_safe(win, tmp, res) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+ resource_list_destroy_entry(win);
+ }
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
diff --git a/drivers/pci/host/pcie-altera-msi.c b/drivers/pci/host/pcie-altera-msi.c
index 99177f4ccde2..4e5d628e8cd4 100644
--- a/drivers/pci/host/pcie-altera-msi.c
+++ b/drivers/pci/host/pcie-altera-msi.c
@@ -1,4 +1,8 @@
/*
+ * Altera PCIe MSI support
+ *
+ * Author: Ley Foon Tan <lftan@altera.com>
+ *
* Copyright Altera Corporation (C) 2013-2015. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
@@ -16,7 +20,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -237,11 +241,6 @@ static int altera_msi_probe(struct platform_device *pdev)
msi->pdev = pdev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
- if (!res) {
- dev_err(&pdev->dev, "no csr memory resource defined\n");
- return -ENODEV;
- }
-
msi->csr_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(msi->csr_base)) {
dev_err(&pdev->dev, "failed to map csr memory\n");
@@ -250,11 +249,6 @@ static int altera_msi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"vector_slave");
- if (!res) {
- dev_err(&pdev->dev, "no vector_slave memory resource defined\n");
- return -ENODEV;
- }
-
msi->vector_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(msi->vector_base)) {
dev_err(&pdev->dev, "failed to map vector_slave memory\n");
@@ -308,7 +302,3 @@ static int __init altera_msi_init(void)
return platform_driver_register(&altera_msi_driver);
}
subsys_initcall(altera_msi_init);
-
-MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
-MODULE_DESCRIPTION("Altera PCIe MSI support");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index 2b7837650db8..c24e96559cbb 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -1,6 +1,9 @@
/*
* Copyright Altera Corporation (C) 2013-2015. All rights reserved
*
+ * Author: Ley Foon Tan <lftan@altera.com>
+ * Description: Altera PCIe host controller driver
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -17,7 +20,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
@@ -43,6 +46,7 @@
#define RP_LTSSM_MASK 0x1f
#define LTSSM_L0 0xf
+#define PCIE_CAP_OFFSET 0x80
/* TLP configuration type 0 and 1 */
#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
@@ -61,7 +65,8 @@
#define TLP_LOOP 500
#define RP_DEVFN 0
-#define LINK_UP_TIMEOUT 5000
+#define LINK_UP_TIMEOUT HZ
+#define LINK_RETRAIN_TIMEOUT HZ
#define INTX_NUM 4
@@ -99,38 +104,6 @@ static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
}
-static void altera_pcie_retrain(struct pci_dev *dev)
-{
- u16 linkcap, linkstat;
- struct altera_pcie *pcie = dev->bus->sysdata;
- int timeout = 0;
-
- if (!altera_pcie_link_is_up(pcie))
- return;
-
- /*
- * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
- * current speed is 2.5 GB/s.
- */
- pcie_capability_read_word(dev, PCI_EXP_LNKCAP, &linkcap);
-
- if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
- return;
-
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat);
- if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
- pcie_capability_set_word(dev, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_RL);
- while (!altera_pcie_link_is_up(pcie)) {
- timeout++;
- if (timeout > LINK_UP_TIMEOUT)
- break;
- udelay(5);
- }
- }
-}
-DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain);
-
/*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation
* from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
@@ -171,13 +144,6 @@ static bool altera_pcie_valid_config(struct altera_pcie *pcie,
if (bus->number == pcie->root_bus_nr && dev > 0)
return false;
- /*
- * Do not read more than one device on the bus directly attached
- * to root port, root port can only attach to one downstream port.
- */
- if (bus->primary == pcie->root_bus_nr && dev > 0)
- return false;
-
return true;
}
@@ -301,22 +267,14 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
return PCIBIOS_SUCCESSFUL;
}
-static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *value)
+static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size,
+ u32 *value)
{
- struct altera_pcie *pcie = bus->sysdata;
int ret;
u32 data;
u8 byte_en;
- if (altera_pcie_hide_rc_bar(bus, devfn, where))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
- *value = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
switch (size) {
case 1:
byte_en = 1 << (where & 3);
@@ -329,7 +287,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
break;
}
- ret = tlp_cfg_dword_read(pcie, bus->number, devfn,
+ ret = tlp_cfg_dword_read(pcie, busno, devfn,
(where & ~DWORD_MASK), byte_en, &data);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
@@ -349,20 +307,14 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
-static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 value)
+static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size,
+ u32 value)
{
- struct altera_pcie *pcie = bus->sysdata;
u32 data32;
u32 shift = 8 * (where & 3);
u8 byte_en;
- if (altera_pcie_hide_rc_bar(bus, devfn, where))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
switch (size) {
case 1:
data32 = (value & 0xff) << shift;
@@ -378,8 +330,40 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
break;
}
- return tlp_cfg_dword_write(pcie, bus->number, devfn,
- (where & ~DWORD_MASK), byte_en, data32);
+ return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK),
+ byte_en, data32);
+}
+
+static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
+ *value = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
+ value);
+}
+
+static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
+ value);
}
static struct pci_ops altera_pcie_ops = {
@@ -387,6 +371,90 @@ static struct pci_ops altera_pcie_ops = {
.write = altera_pcie_cfg_write,
};
+static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int offset, u16 *value)
+{
+ u32 data;
+ int ret;
+
+ ret = _altera_pcie_cfg_read(pcie, busno, devfn,
+ PCIE_CAP_OFFSET + offset, sizeof(*value),
+ &data);
+ *value = data;
+ return ret;
+}
+
+static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int offset, u16 value)
+{
+ return _altera_pcie_cfg_write(pcie, busno, devfn,
+ PCIE_CAP_OFFSET + offset, sizeof(value),
+ value);
+}
+
+static void altera_wait_link_retrain(struct altera_pcie *pcie)
+{
+ u16 reg16;
+ unsigned long start_jiffies;
+
+ /* Wait for link training end. */
+ start_jiffies = jiffies;
+ for (;;) {
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+
+ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
+ dev_err(&pcie->pdev->dev, "link retrain timeout\n");
+ break;
+ }
+ udelay(100);
+ }
+
+ /* Wait for link is up */
+ start_jiffies = jiffies;
+ for (;;) {
+ if (altera_pcie_link_is_up(pcie))
+ break;
+
+ if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
+ dev_err(&pcie->pdev->dev, "link up timeout\n");
+ break;
+ }
+ udelay(100);
+ }
+}
+
+static void altera_pcie_retrain(struct altera_pcie *pcie)
+{
+ u16 linkcap, linkstat, linkctl;
+
+ if (!altera_pcie_link_is_up(pcie))
+ return;
+
+ /*
+ * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
+ * current speed is 2.5 GB/s.
+ */
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP,
+ &linkcap);
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+ return;
+
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA,
+ &linkstat);
+ if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKCTL, &linkctl);
+ linkctl |= PCI_EXP_LNKCTL_RL;
+ altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKCTL, linkctl);
+
+ altera_wait_link_retrain(pcie);
+ }
+}
+
static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -508,6 +576,11 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
return 0;
}
+static void altera_pcie_host_init(struct altera_pcie *pcie)
+{
+ altera_pcie_retrain(pcie);
+}
+
static int altera_pcie_probe(struct platform_device *pdev)
{
struct altera_pcie *pcie;
@@ -545,6 +618,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
/* enable all interrupts */
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+ altera_pcie_host_init(pcie);
bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
pcie, &pcie->resources);
@@ -568,7 +642,6 @@ static const struct of_device_id altera_pcie_of_match[] = {
{ .compatible = "altr,pcie-root-port-1.0", },
{},
};
-MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
static struct platform_driver altera_pcie_driver = {
.probe = altera_pcie_probe,
@@ -583,8 +656,4 @@ static int altera_pcie_init(void)
{
return platform_driver_register(&altera_pcie_driver);
}
-module_init(altera_pcie_init);
-
-MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
-MODULE_DESCRIPTION("Altera PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
+device_initcall(altera_pcie_init);
diff --git a/drivers/pci/host/pcie-artpec6.c b/drivers/pci/host/pcie-artpec6.c
index 16ba70b7ec65..39bf1a6df463 100644
--- a/drivers/pci/host/pcie-artpec6.c
+++ b/drivers/pci/host/pcie-artpec6.c
@@ -191,8 +191,8 @@ static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
return dw_handle_msi_irq(pp);
}
-static int __init artpec6_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
+static int artpec6_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
{
int ret;
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index c8079dc81c10..17da005497a5 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -100,9 +100,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pp->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
dw_plat_pcie->mem_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dw_plat_pcie->mem_base))
return PTR_ERR(dw_plat_pcie->mem_base);
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 12afce19890b..74da71ea544a 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -14,7 +14,6 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -26,7 +25,17 @@
#include "pcie-designware.h"
-/* Synopsis specific PCIE configuration registers */
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+/* Parameters for the waiting for iATU enabled routine */
+#define LINK_WAIT_MAX_IATU_RETRIES 5
+#define LINK_WAIT_IATU_MIN 9000
+#define LINK_WAIT_IATU_MAX 10000
+
+/* Synopsys-specific PCIe configuration registers */
#define PCIE_PORT_LINK_CONTROL 0x710
#define PORT_LINK_MODE_MASK (0x3f << 16)
#define PORT_LINK_MODE_1_LANES (0x1 << 16)
@@ -51,6 +60,7 @@
#define PCIE_ATU_VIEWPORT 0x900
#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
+#define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
#define PCIE_ATU_CR1 0x904
@@ -70,10 +80,26 @@
#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET 0x91C
+/*
+ * iATU Unroll-specific register definitions
+ * From 4.80 core version the address translation will be made by unroll
+ */
+#define PCIE_ATU_UNR_REGION_CTRL1 0x00
+#define PCIE_ATU_UNR_REGION_CTRL2 0x04
+#define PCIE_ATU_UNR_LOWER_BASE 0x08
+#define PCIE_ATU_UNR_UPPER_BASE 0x0C
+#define PCIE_ATU_UNR_LIMIT 0x10
+#define PCIE_ATU_UNR_LOWER_TARGET 0x14
+#define PCIE_ATU_UNR_UPPER_TARGET 0x18
+
+/* Register address builder */
+#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) ((0x3 << 20) | (region << 9))
+
/* PCIe Port Logic registers */
#define PLR_OFFSET 0x700
#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
-#define PCIE_PHY_DEBUG_R1_LINK_UP 0x00000010
+#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
+#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
static struct pci_ops dw_pcie_ops;
@@ -115,12 +141,12 @@ int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
-static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val)
+static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
{
if (pp->ops->readl_rc)
- pp->ops->readl_rc(pp, pp->dbi_base + reg, val);
- else
- *val = readl(pp->dbi_base + reg);
+ return pp->ops->readl_rc(pp, pp->dbi_base + reg);
+
+ return readl(pp->dbi_base + reg);
}
static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
@@ -131,6 +157,27 @@ static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
writel(val, pp->dbi_base + reg);
}
+static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
+{
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+ if (pp->ops->readl_rc)
+ return pp->ops->readl_rc(pp, pp->dbi_base + offset + reg);
+
+ return readl(pp->dbi_base + offset + reg);
+}
+
+static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index,
+ u32 val, u32 reg)
+{
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+ if (pp->ops->writel_rc)
+ pp->ops->writel_rc(pp, val, pp->dbi_base + offset + reg);
+ else
+ writel(val, pp->dbi_base + offset + reg);
+}
+
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
@@ -152,24 +199,57 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
int type, u64 cpu_addr, u64 pci_addr, u32 size)
{
- u32 val;
-
- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
- PCIE_ATU_VIEWPORT);
- dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE);
- dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
- PCIE_ATU_LIMIT);
- dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET);
- dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET);
- dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ u32 retries, val;
+
+ if (pp->iatu_unroll_enabled) {
+ dw_pcie_writel_unroll(pp, index,
+ lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE);
+ dw_pcie_writel_unroll(pp, index,
+ upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE);
+ dw_pcie_writel_unroll(pp, index,
+ lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT);
+ dw_pcie_writel_unroll(pp, index,
+ lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET);
+ dw_pcie_writel_unroll(pp, index,
+ upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET);
+ dw_pcie_writel_unroll(pp, index,
+ type, PCIE_ATU_UNR_REGION_CTRL1);
+ dw_pcie_writel_unroll(pp, index,
+ PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2);
+ } else {
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr),
+ PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr),
+ PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, lower_32_bits(pci_addr),
+ PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pci_addr),
+ PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ }
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
- dw_pcie_readl_rc(pp, PCIE_ATU_CR2, &val);
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ if (pp->iatu_unroll_enabled)
+ val = dw_pcie_readl_unroll(pp, index,
+ PCIE_ATU_UNR_REGION_CTRL2);
+ else
+ val = dw_pcie_readl_rc(pp, PCIE_ATU_CR2);
+
+ if (val == PCIE_ATU_ENABLE)
+ return;
+
+ usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ }
+ dev_err(pp->dev, "iATU is not being enabled\n");
}
static struct irq_chip dw_msi_irq_chip = {
@@ -412,7 +492,8 @@ int dw_pcie_link_up(struct pcie_port *pp)
return pp->ops->link_up(pp);
val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
- return val & PCIE_PHY_DEBUG_R1_LINK_UP;
+ return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
+ (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
}
static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
@@ -428,6 +509,17 @@ static const struct irq_domain_ops msi_domain_ops = {
.map = dw_pcie_msi_map,
};
+static u8 dw_pcie_iatu_unroll_enabled(struct pcie_port *pp)
+{
+ u32 val;
+
+ val = dw_pcie_readl_rc(pp, PCIE_ATU_VIEWPORT);
+ if (val == 0xffffffff)
+ return 1;
+
+ return 0;
+}
+
int dw_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
@@ -436,7 +528,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct resource *cfg_res;
int i, ret;
LIST_HEAD(res);
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
@@ -457,17 +549,20 @@ int dw_pcie_host_init(struct pcie_port *pp)
goto error;
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
+ resource_list_for_each_entry_safe(win, tmp, &res) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
- pp->io = win->res;
- pp->io->name = "I/O";
- pp->io_size = resource_size(pp->io);
- pp->io_bus_addr = pp->io->start - win->offset;
- ret = pci_remap_iospace(pp->io, pp->io_base);
- if (ret)
+ ret = pci_remap_iospace(win->res, pp->io_base);
+ if (ret) {
dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
- ret, pp->io);
+ ret, win->res);
+ resource_list_destroy_entry(win);
+ } else {
+ pp->io = win->res;
+ pp->io->name = "I/O";
+ pp->io_size = resource_size(pp->io);
+ pp->io_bus_addr = pp->io->start - win->offset;
+ }
break;
case IORESOURCE_MEM:
pp->mem = win->res;
@@ -524,6 +619,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
pp->lanes = 0;
+ ret = of_property_read_u32(np, "num-viewport", &pp->num_viewport);
+ if (ret)
+ pp->num_viewport = 2;
+
if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (!pp->ops->msi_host_init) {
pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
@@ -544,6 +643,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
+ pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+
if (pp->ops->host_init)
pp->ops->host_init(pp);
@@ -609,13 +710,14 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
va_cfg_base = pp->va_cfg1_base;
}
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
type, cpu_addr,
busdev, cfg_size);
ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->num_viewport <= 2)
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
return ret;
}
@@ -646,13 +748,14 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
va_cfg_base = pp->va_cfg1_base;
}
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
type, cpu_addr,
busdev, cfg_size);
ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->num_viewport <= 2)
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
return ret;
}
@@ -670,13 +773,6 @@ static int dw_pcie_valid_config(struct pcie_port *pp,
if (bus->number == pp->root_bus_nr && dev > 0)
return 0;
- /*
- * do not read more than one device on the bus directly attached
- * to RC's (Virtual Bridge's) DS side.
- */
- if (bus->primary == pp->root_bus_nr && dev > 0)
- return 0;
-
return 1;
}
@@ -720,7 +816,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
u32 val;
/* set the number of lanes */
- dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
+ val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
switch (pp->lanes) {
case 1:
@@ -742,7 +838,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
/* set link width speed control register */
- dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
+ val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
switch (pp->lanes) {
case 1:
@@ -765,19 +861,19 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
/* setup interrupt pins */
- dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
+ val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
val |= 0x00000100;
dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
/* setup bus numbers */
- dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val);
+ val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
val &= 0xff000000;
val |= 0x00010100;
dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
/* setup command register */
- dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
+ val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
@@ -788,10 +884,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* uses its own address translation component rather than ATU, so
* we should not program the ATU here.
*/
- if (!pp->ops->rd_other_conf)
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
+ if (!pp->ops->rd_other_conf) {
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
+ if (pp->num_viewport > 2)
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX2,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
+ }
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
@@ -802,7 +903,3 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
}
-
-MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
-MODULE_DESCRIPTION("Designware PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index f437f9b5be04..c8e5bc647f49 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -22,11 +22,6 @@
#define MAX_MSI_IRQS 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_USLEEP_MIN 90000
-#define LINK_WAIT_USLEEP_MAX 100000
-
struct pcie_port {
struct device *dev;
u8 root_bus_nr;
@@ -49,16 +44,17 @@ struct pcie_port {
struct resource *busn;
int irq;
u32 lanes;
+ u32 num_viewport;
struct pcie_host_ops *ops;
int msi_irq;
struct irq_domain *irq_domain;
unsigned long msi_data;
+ u8 iatu_unroll_enabled;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
struct pcie_host_ops {
- void (*readl_rc)(struct pcie_port *pp,
- void __iomem *dbi_base, u32 *val);
+ u32 (*readl_rc)(struct pcie_port *pp, void __iomem *dbi_base);
void (*writel_rc)(struct pcie_port *pp,
u32 val, void __iomem *dbi_base);
int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index f2f90c50f75d..5ec2d440a6b7 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -1,7 +1,11 @@
/*
+ * Qualcomm PCIe root complex driver
+ *
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
* Copyright 2015 Linaro Limited.
*
+ * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -19,7 +23,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
@@ -570,37 +574,19 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return 0;
}
-static int qcom_pcie_remove(struct platform_device *pdev)
-{
- struct qcom_pcie *pcie = platform_get_drvdata(pdev);
-
- qcom_ep_reset_assert(pcie);
- phy_power_off(pcie->phy);
- phy_exit(pcie->phy);
- pcie->ops->deinit(pcie);
-
- return 0;
-}
-
static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
{ .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
{ .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
{ }
};
-MODULE_DEVICE_TABLE(of, qcom_pcie_match);
static struct platform_driver qcom_pcie_driver = {
.probe = qcom_pcie_probe,
- .remove = qcom_pcie_remove,
.driver = {
.name = "qcom-pcie",
+ .suppress_bind_attrs = true,
.of_match_table = qcom_pcie_match,
},
};
-
-module_platform_driver(qcom_pcie_driver);
-
-MODULE_AUTHOR("Stanimir Varbanov <svarbanov@mm-sol.com>");
-MODULE_DESCRIPTION("Qualcomm PCIe root complex driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(qcom_pcie_driver);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 65db7a221509..e06b1d3b4dea 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -84,8 +84,18 @@
#define IDSETR1 0x011004
#define TLCTLR 0x011048
#define MACSR 0x011054
+#define SPCHGFIN (1 << 4)
+#define SPCHGFAIL (1 << 6)
+#define SPCHGSUC (1 << 7)
+#define LINK_SPEED (0xf << 16)
+#define LINK_SPEED_2_5GTS (1 << 16)
+#define LINK_SPEED_5_0GTS (2 << 16)
#define MACCTLR 0x011058
+#define SPEED_CHANGE (1 << 24)
#define SCRAMBLE_DISABLE (1 << 27)
+#define MACS2R 0x011078
+#define MACCGSPSETR 0x011084
+#define SPCNGRSN (1 << 31)
/* R-Car H1 PHY */
#define H1_PCIEPHYADRR 0x04000c
@@ -385,11 +395,67 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
return 1;
}
+static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 1000;
+ u32 macsr;
+
+ if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
+ return;
+
+ if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
+ dev_err(pcie->dev, "Speed change already in progress\n");
+ return;
+ }
+
+ macsr = rcar_pci_read_reg(pcie, MACSR);
+ if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
+ goto done;
+
+ /* Set target link speed to 5.0 GT/s */
+ rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
+ PCI_EXP_LNKSTA_CLS_5_0GB);
+
+ /* Set speed change reason as intentional factor */
+ rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
+
+ /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
+ if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
+ rcar_pci_write_reg(pcie, macsr, MACSR);
+
+ /* Start link speed change */
+ rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
+
+ while (timeout--) {
+ macsr = rcar_pci_read_reg(pcie, MACSR);
+ if (macsr & SPCHGFIN) {
+ /* Clear the interrupt bits */
+ rcar_pci_write_reg(pcie, macsr, MACSR);
+
+ if (macsr & SPCHGFAIL)
+ dev_err(pcie->dev, "Speed change failed\n");
+
+ goto done;
+ }
+
+ msleep(1);
+ };
+
+ dev_err(pcie->dev, "Speed change timed out\n");
+
+done:
+ dev_info(pcie->dev, "Current link speed is %s GT/s\n",
+ (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
+}
+
static int rcar_pcie_enable(struct rcar_pcie *pcie)
{
struct pci_bus *bus, *child;
LIST_HEAD(res);
+ /* Try setting 5 GT/s link speed */
+ rcar_pcie_force_speedup(pcie);
+
rcar_pcie_setup(&res, pcie);
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
@@ -608,6 +674,18 @@ static int rcar_msi_alloc(struct rcar_msi *chip)
return msi;
}
+static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+ msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
+ order_base_2(no_irqs));
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
{
mutex_lock(&chip->lock);
@@ -665,7 +743,7 @@ static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
if (hwirq < 0)
return hwirq;
- irq = irq_create_mapping(msi->domain, hwirq);
+ irq = irq_find_mapping(msi->domain, hwirq);
if (!irq) {
rcar_msi_free(msi, hwirq);
return -EINVAL;
@@ -682,6 +760,58 @@ static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
return 0;
}
+static int rcar_msi_setup_irqs(struct msi_controller *chip,
+ struct pci_dev *pdev, int nvec, int type)
+{
+ struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct msi_desc *desc;
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+ int i;
+
+ /* MSI-X interrupts are not supported */
+ if (type == PCI_CAP_ID_MSIX)
+ return -EINVAL;
+
+ WARN_ON(!list_is_singular(&pdev->dev.msi_list));
+ desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
+
+ hwirq = rcar_msi_alloc_region(msi, nvec);
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ irq = irq_find_mapping(msi->domain, hwirq);
+ if (!irq)
+ return -ENOSPC;
+
+ for (i = 0; i < nvec; i++) {
+ /*
+ * irq_create_mapping() called from rcar_pcie_probe() pre-
+ * allocates descs, so there is no need to allocate descs here.
+ * We can therefore assume that if irq_find_mapping() above
+ * returns non-zero, then the descs are also successfully
+ * allocated.
+ */
+ if (irq_set_msi_desc_off(irq, i, desc)) {
+ /* TODO: clear */
+ return -EINVAL;
+ }
+ }
+
+ desc->nvec_used = nvec;
+ desc->msi_attrib.multiple = order_base_2(nvec);
+
+ msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+ msg.data = hwirq;
+
+ pci_write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
{
struct rcar_msi *msi = to_rcar_msi(chip);
@@ -716,12 +846,13 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
struct platform_device *pdev = to_platform_device(pcie->dev);
struct rcar_msi *msi = &pcie->msi;
unsigned long base;
- int err;
+ int err, i;
mutex_init(&msi->lock);
msi->chip.dev = pcie->dev;
msi->chip.setup_irq = rcar_msi_setup_irq;
+ msi->chip.setup_irqs = rcar_msi_setup_irqs;
msi->chip.teardown_irq = rcar_msi_teardown_irq;
msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
@@ -731,6 +862,9 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
return -ENOMEM;
}
+ for (i = 0; i < INT_PCI_MSI_NR; i++)
+ irq_create_mapping(msi->domain, i);
+
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
@@ -775,6 +909,10 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
if (err)
return err;
+ pcie->base = devm_ioremap_resource(&pdev->dev, &res);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
pcie->clk = devm_clk_get(&pdev->dev, "pcie");
if (IS_ERR(pcie->clk)) {
dev_err(pcie->dev, "cannot get platform clock\n");
@@ -782,7 +920,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
}
err = clk_prepare_enable(pcie->clk);
if (err)
- goto fail_clk;
+ return err;
pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
if (IS_ERR(pcie->bus_clk)) {
@@ -792,7 +930,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
}
err = clk_prepare_enable(pcie->bus_clk);
if (err)
- goto err_map_reg;
+ goto fail_clk;
i = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (!i) {
@@ -810,12 +948,6 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
}
pcie->msi.irq2 = i;
- pcie->base = devm_ioremap_resource(&pdev->dev, &res);
- if (IS_ERR(pcie->base)) {
- err = PTR_ERR(pcie->base);
- goto err_map_reg;
- }
-
return 0;
err_map_reg:
@@ -865,12 +997,16 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
* Set up 64-bit inbound regions as the range parser doesn't
* distinguish between 32 and 64-bit types.
*/
- rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
+ rcar_pci_write_reg(pcie, lower_32_bits(pci_addr),
+ PCIEPRAR(idx));
rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
- rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
+ rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags,
+ PCIELAMR(idx));
- rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
- rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
+ rcar_pci_write_reg(pcie, upper_32_bits(pci_addr),
+ PCIEPRAR(idx + 1));
+ rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr),
+ PCIELAR(idx + 1));
rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
pci_addr += size;
@@ -919,6 +1055,7 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
/* Get the dma-ranges from DT */
for_each_of_pci_range(&parser, &range) {
u64 end = range.cpu_addr + range.size - 1;
+
dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
range.flags, range.cpu_addr, end, range.pci_addr);
@@ -932,9 +1069,12 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
static const struct of_device_id rcar_pcie_of_match[] = {
{ .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
- { .compatible = "renesas,pcie-rcar-gen2", .data = rcar_pcie_hw_init_gen2 },
- { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init_gen2 },
- { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init_gen2 },
+ { .compatible = "renesas,pcie-rcar-gen2",
+ .data = rcar_pcie_hw_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7790",
+ .data = rcar_pcie_hw_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7791",
+ .data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
{},
};
@@ -945,9 +1085,10 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
resource_size_t iobase;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase);
+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
+ &iobase);
if (err)
return err;
@@ -955,14 +1096,17 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
if (err)
goto out_release_res;
- resource_list_for_each_entry(win, &pci->resources) {
+ resource_list_for_each_entry_safe(win, tmp, &pci->resources) {
struct resource *res = win->res;
if (resource_type(res) == IORESOURCE_IO) {
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+
+ resource_list_destroy_entry(win);
+ }
}
}
@@ -998,8 +1142,8 @@ static int rcar_pcie_probe(struct platform_device *pdev)
return err;
}
- err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
- if (err)
+ err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
+ if (err)
return err;
of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
new file mode 100644
index 000000000000..b8c82fc812dc
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -0,0 +1,1229 @@
+/*
+ * Rockchip AXI PCIe host controller driver
+ *
+ * Copyright (c) 2016 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Wenrui Li <wenrui.li@rock-chips.com>
+ *
+ * Bits taken from Synopsys Designware Host controller driver and
+ * ARM PCI Host generic driver.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
+ * bits. This allows atomic updates of the register without locking.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+
+#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
+
+#define PCIE_CLIENT_BASE 0x0
+#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
+#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
+#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
+#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
+#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
+#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
+#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
+#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
+#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
+#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
+#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
+#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
+#define PCIE_CLIENT_INTR_SHIFT 5
+#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
+#define PCIE_CLIENT_INT_MSG BIT(14)
+#define PCIE_CLIENT_INT_HOT_RST BIT(13)
+#define PCIE_CLIENT_INT_DPA BIT(12)
+#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
+#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
+#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
+#define PCIE_CLIENT_INT_INTD BIT(8)
+#define PCIE_CLIENT_INT_INTC BIT(7)
+#define PCIE_CLIENT_INT_INTB BIT(6)
+#define PCIE_CLIENT_INT_INTA BIT(5)
+#define PCIE_CLIENT_INT_LOCAL BIT(4)
+#define PCIE_CLIENT_INT_UDMA BIT(3)
+#define PCIE_CLIENT_INT_PHY BIT(2)
+#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
+#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
+
+#define PCIE_CLIENT_INT_LEGACY \
+ (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
+ PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
+
+#define PCIE_CLIENT_INT_CLI \
+ (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
+ PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
+ PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
+ PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
+ PCIE_CLIENT_INT_PHY)
+
+#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
+#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
+#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
+#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
+#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
+#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
+#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
+#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
+#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
+#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
+ (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
+#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
+#define PCIE_CORE_INT_PRFPE BIT(0)
+#define PCIE_CORE_INT_CRFPE BIT(1)
+#define PCIE_CORE_INT_RRPE BIT(2)
+#define PCIE_CORE_INT_PRFO BIT(3)
+#define PCIE_CORE_INT_CRFO BIT(4)
+#define PCIE_CORE_INT_RT BIT(5)
+#define PCIE_CORE_INT_RTR BIT(6)
+#define PCIE_CORE_INT_PE BIT(7)
+#define PCIE_CORE_INT_MTR BIT(8)
+#define PCIE_CORE_INT_UCR BIT(9)
+#define PCIE_CORE_INT_FCE BIT(10)
+#define PCIE_CORE_INT_CT BIT(11)
+#define PCIE_CORE_INT_UTC BIT(18)
+#define PCIE_CORE_INT_MMVC BIT(19)
+#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
+#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
+
+#define PCIE_CORE_INT \
+ (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
+ PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
+ PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
+ PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
+ PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
+ PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
+ PCIE_CORE_INT_MMVC)
+
+#define PCIE_RC_CONFIG_BASE 0xa00000
+#define PCIE_RC_CONFIG_VENDOR (PCIE_RC_CONFIG_BASE + 0x00)
+#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
+#define PCIE_RC_CONFIG_SCC_SHIFT 16
+#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_RC_CONFIG_LCS_RETRAIN_LINK BIT(5)
+#define PCIE_RC_CONFIG_LCS_LBMIE BIT(10)
+#define PCIE_RC_CONFIG_LCS_LABIE BIT(11)
+#define PCIE_RC_CONFIG_LCS_LBMS BIT(30)
+#define PCIE_RC_CONFIG_LCS_LAMS BIT(31)
+#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+
+#define PCIE_CORE_AXI_CONF_BASE 0xc00000
+#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
+#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
+#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
+#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
+
+#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
+#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
+#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
+
+/* Size of one AXI Region (not Region 0) */
+#define AXI_REGION_SIZE BIT(20)
+/* Size of Region 0, equal to sum of sizes of other regions */
+#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
+#define OB_REG_SIZE_SHIFT 5
+#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
+#define AXI_WRAPPER_IO_WRITE 0x6
+#define AXI_WRAPPER_MEM_WRITE 0x2
+
+#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
+#define MIN_AXI_ADDR_BITS_PASSED 8
+#define ROCKCHIP_VENDOR_ID 0x1d87
+#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
+#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
+#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
+#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
+#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
+ (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
+ PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
+
+#define RC_REGION_0_ADDR_TRANS_H 0x00000000
+#define RC_REGION_0_ADDR_TRANS_L 0x00000000
+#define RC_REGION_0_PASS_BITS (25 - 1)
+#define MAX_AXI_WRAPPER_REGION_NUM 33
+
+struct rockchip_pcie {
+ void __iomem *reg_base; /* DT axi-base */
+ void __iomem *apb_base; /* DT apb-base */
+ struct phy *phy;
+ struct reset_control *core_rst;
+ struct reset_control *mgmt_rst;
+ struct reset_control *mgmt_sticky_rst;
+ struct reset_control *pipe_rst;
+ struct clk *aclk_pcie;
+ struct clk *aclk_perf_pcie;
+ struct clk *hclk_pcie;
+ struct clk *clk_pcie_pm;
+ struct regulator *vpcie3v3; /* 3.3V power supply */
+ struct regulator *vpcie1v8; /* 1.8V power supply */
+ struct regulator *vpcie0v9; /* 0.9V power supply */
+ struct gpio_desc *ep_gpio;
+ u32 lanes;
+ u8 root_bus_nr;
+ struct device *dev;
+ struct irq_domain *irq_domain;
+};
+
+static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
+{
+ return readl(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
+{
+ writel(val, rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCIE_RC_CONFIG_LCS_LBMIE | PCIE_RC_CONFIG_LCS_LABIE);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCIE_RC_CONFIG_LCS_LBMS | PCIE_RC_CONFIG_LCS_LAMS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
+{
+ u32 val;
+
+ /* Update Tx credit maximum update interval */
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
+ val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
+ val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
+ rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
+}
+
+static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, int dev)
+{
+ /* access only one slot on each root port */
+ if (bus->number == rockchip->root_bus_nr && dev > 0)
+ return 0;
+
+ /*
+ * do not read more than one device on the bus directly attached
+ * to RC's downstream side.
+ */
+ if (bus->primary == rockchip->root_bus_nr && dev > 0)
+ return 0;
+
+ return 1;
+}
+
+static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 4) {
+ *val = readl(addr);
+ } else if (size == 2) {
+ *val = readw(addr);
+ } else if (size == 1) {
+ *val = readb(addr);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 val)
+{
+ u32 mask, tmp, offset;
+
+ offset = where & ~0x3;
+
+ if (size == 4) {
+ writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+
+ /*
+ * N.B. This read/modify/write isn't safe in general because it can
+ * corrupt RW1C bits in adjacent registers. But the hardware
+ * doesn't support smaller writes.
+ */
+ tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask;
+ tmp |= val << ((where & 0x3) * 8);
+ writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ u32 busdev;
+
+ busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ if (!IS_ALIGNED(busdev, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 4) {
+ *val = readl(rockchip->reg_base + busdev);
+ } else if (size == 2) {
+ *val = readw(rockchip->reg_base + busdev);
+ } else if (size == 1) {
+ *val = readb(rockchip->reg_base + busdev);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ u32 busdev;
+
+ busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+ if (!IS_ALIGNED(busdev, size))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (size == 4)
+ writel(val, rockchip->reg_base + busdev);
+ else if (size == 2)
+ writew(val, rockchip->reg_base + busdev);
+ else if (size == 1)
+ writeb(val, rockchip->reg_base + busdev);
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (bus->number == rockchip->root_bus_nr)
+ return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val);
+}
+
+static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == rockchip->root_bus_nr)
+ return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val);
+}
+
+static struct pci_ops rockchip_pcie_ops = {
+ .read = rockchip_pcie_rd_conf,
+ .write = rockchip_pcie_wr_conf,
+};
+
+/**
+ * rockchip_pcie_init_port - Initialize hardware
+ * @rockchip: PCIe port information
+ */
+static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+ u32 status;
+ unsigned long timeout;
+
+ gpiod_set_value(rockchip->ep_gpio, 0);
+
+ err = phy_init(rockchip->phy);
+ if (err < 0) {
+ dev_err(dev, "fail to init phy, err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->core_rst);
+ if (err) {
+ dev_err(dev, "assert core_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->mgmt_rst);
+ if (err) {
+ dev_err(dev, "assert mgmt_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->mgmt_sticky_rst);
+ if (err) {
+ dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->pipe_rst);
+ if (err) {
+ dev_err(dev, "assert pipe_rst err %d\n", err);
+ return err;
+ }
+
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_CONF_ENABLE |
+ PCIE_CLIENT_LINK_TRAIN_ENABLE |
+ PCIE_CLIENT_ARI_ENABLE |
+ PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
+ PCIE_CLIENT_MODE_RC |
+ PCIE_CLIENT_GEN_SEL_2,
+ PCIE_CLIENT_CONFIG);
+
+ err = phy_power_on(rockchip->phy);
+ if (err) {
+ dev_err(dev, "fail to power on phy, err %d\n", err);
+ return err;
+ }
+
+ /*
+ * Please don't reorder the deassert sequence of the following
+ * four reset pins.
+ */
+ err = reset_control_deassert(rockchip->mgmt_sticky_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->core_rst);
+ if (err) {
+ dev_err(dev, "deassert core_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->mgmt_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->pipe_rst);
+ if (err) {
+ dev_err(dev, "deassert pipe_rst err %d\n", err);
+ return err;
+ }
+
+ /*
+ * We need to read/write PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 before
+ * enabling ASPM. Otherwise L1PwrOnSc and L1PwrOnVal isn't
+ * reliable and enabling ASPM doesn't work. This is a controller
+ * bug we need to work around.
+ */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
+
+ /* Fix the transmitted FTS count desired to exit from L0s. */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
+ status = (status & PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+ (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
+
+ /* Enable Gen1 training */
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ gpiod_set_value(rockchip->ep_gpio, 1);
+
+ /* 500ms timeout value should be enough for Gen1/2 training */
+ timeout = jiffies + msecs_to_jiffies(500);
+
+ for (;;) {
+ status = rockchip_pcie_read(rockchip,
+ PCIE_CLIENT_BASIC_STATUS1);
+ if ((status & PCIE_CLIENT_LINK_STATUS_MASK) ==
+ PCIE_CLIENT_LINK_STATUS_UP) {
+ dev_dbg(dev, "PCIe link training gen1 pass!\n");
+ break;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "PCIe link training gen1 timeout!\n");
+ return -ETIMEDOUT;
+ }
+
+ msleep(20);
+ }
+
+ /*
+ * Enable retrain for gen2. This should be configured only after
+ * gen1 finished.
+ */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCIE_RC_CONFIG_LCS_RETRAIN_LINK;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ for (;;) {
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) ==
+ PCIE_CORE_PL_CONF_SPEED_5G) {
+ dev_dbg(dev, "PCIe link training gen2 pass!\n");
+ break;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+ break;
+ }
+
+ msleep(20);
+ }
+
+ /* Check the final link width from negotiated lane counter from MGMT */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+ PCIE_CORE_PL_CONF_LANE_MASK);
+ dev_dbg(dev, "current link width is x%d\n", status);
+
+ rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+ PCIE_RC_CONFIG_VENDOR);
+ rockchip_pcie_write(rockchip,
+ PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
+ PCIE_RC_CONFIG_RID_CCR);
+ rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
+
+ rockchip_pcie_write(rockchip,
+ (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
+ PCIE_CORE_OB_REGION_ADDR0);
+ rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
+ PCIE_CORE_OB_REGION_ADDR1);
+ rockchip_pcie_write(rockchip, 0x0080000a, PCIE_CORE_OB_REGION_DESC0);
+ rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
+
+ return 0;
+}
+
+static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 sub_reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LOCAL) {
+ dev_dbg(dev, "local interrupt received\n");
+ sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
+ if (sub_reg & PCIE_CORE_INT_PRFPE)
+ dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFPE)
+ dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_RRPE)
+ dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_PRFO)
+ dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFO)
+ dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_RT)
+ dev_dbg(dev, "replay timer timed out\n");
+
+ if (sub_reg & PCIE_CORE_INT_RTR)
+ dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
+
+ if (sub_reg & PCIE_CORE_INT_PE)
+ dev_dbg(dev, "phy error detected on receive side\n");
+
+ if (sub_reg & PCIE_CORE_INT_MTR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_UCR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_FCE)
+ dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
+
+ if (sub_reg & PCIE_CORE_INT_CT)
+ dev_dbg(dev, "a request timed out waiting for completion\n");
+
+ if (sub_reg & PCIE_CORE_INT_UTC)
+ dev_dbg(dev, "unmapped TC error\n");
+
+ if (sub_reg & PCIE_CORE_INT_MMVC)
+ dev_dbg(dev, "MSI mask register changes\n");
+
+ rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
+ } else if (reg & PCIE_CLIENT_INT_PHY) {
+ dev_dbg(dev, "phy link changes\n");
+ rockchip_pcie_update_txcredit_mui(rockchip);
+ rockchip_pcie_clr_bw_int(rockchip);
+ }
+
+ rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
+ dev_dbg(dev, "legacy done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_MSG)
+ dev_dbg(dev, "message done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_HOT_RST)
+ dev_dbg(dev, "hot reset interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_DPA)
+ dev_dbg(dev, "dpa interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_FATAL_ERR)
+ dev_dbg(dev, "fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
+ dev_dbg(dev, "no fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_CORR_ERR)
+ dev_dbg(dev, "correctable error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_PHY)
+ dev_dbg(dev, "phy interrupt received\n");
+
+ rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
+ PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
+ PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
+ PCIE_CLIENT_INT_NFATAL_ERR |
+ PCIE_CLIENT_INT_CORR_ERR |
+ PCIE_CLIENT_INT_PHY),
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 hwirq;
+ u32 virq;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
+
+ while (reg) {
+ hwirq = ffs(reg) - 1;
+ reg &= ~BIT(hwirq);
+
+ virq = irq_find_mapping(rockchip->irq_domain, hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+
+/**
+ * rockchip_pcie_parse_dt - Parse Device Tree
+ * @rockchip: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *node = dev->of_node;
+ struct resource *regs;
+ int irq;
+ int err;
+
+ regs = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "axi-base");
+ rockchip->reg_base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(rockchip->reg_base))
+ return PTR_ERR(rockchip->reg_base);
+
+ regs = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "apb-base");
+ rockchip->apb_base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
+
+ rockchip->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(rockchip->phy)) {
+ if (PTR_ERR(rockchip->phy) != -EPROBE_DEFER)
+ dev_err(dev, "missing phy\n");
+ return PTR_ERR(rockchip->phy);
+ }
+
+ rockchip->lanes = 1;
+ err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
+ if (!err && (rockchip->lanes == 0 ||
+ rockchip->lanes == 3 ||
+ rockchip->lanes > 4)) {
+ dev_warn(dev, "invalid num-lanes, default to use one lane\n");
+ rockchip->lanes = 1;
+ }
+
+ rockchip->core_rst = devm_reset_control_get(dev, "core");
+ if (IS_ERR(rockchip->core_rst)) {
+ if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing core reset property in node\n");
+ return PTR_ERR(rockchip->core_rst);
+ }
+
+ rockchip->mgmt_rst = devm_reset_control_get(dev, "mgmt");
+ if (IS_ERR(rockchip->mgmt_rst)) {
+ if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_rst);
+ }
+
+ rockchip->mgmt_sticky_rst = devm_reset_control_get(dev, "mgmt-sticky");
+ if (IS_ERR(rockchip->mgmt_sticky_rst)) {
+ if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt-sticky reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_sticky_rst);
+ }
+
+ rockchip->pipe_rst = devm_reset_control_get(dev, "pipe");
+ if (IS_ERR(rockchip->pipe_rst)) {
+ if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pipe reset property in node\n");
+ return PTR_ERR(rockchip->pipe_rst);
+ }
+
+ rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->ep_gpio)) {
+ dev_err(dev, "missing ep-gpios property in node\n");
+ return PTR_ERR(rockchip->ep_gpio);
+ }
+
+ rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_pcie)) {
+ dev_err(dev, "aclk clock not found\n");
+ return PTR_ERR(rockchip->aclk_pcie);
+ }
+
+ rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
+ if (IS_ERR(rockchip->aclk_perf_pcie)) {
+ dev_err(dev, "aclk_perf clock not found\n");
+ return PTR_ERR(rockchip->aclk_perf_pcie);
+ }
+
+ rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
+ if (IS_ERR(rockchip->hclk_pcie)) {
+ dev_err(dev, "hclk clock not found\n");
+ return PTR_ERR(rockchip->hclk_pcie);
+ }
+
+ rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
+ if (IS_ERR(rockchip->clk_pcie_pm)) {
+ dev_err(dev, "pm clock not found\n");
+ return PTR_ERR(rockchip->clk_pcie_pm);
+ }
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0) {
+ dev_err(dev, "missing sys IRQ resource\n");
+ return -EINVAL;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
+ IRQF_SHARED, "pcie-sys", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe subsystem IRQ\n");
+ return err;
+ }
+
+ irq = platform_get_irq_byname(pdev, "legacy");
+ if (irq < 0) {
+ dev_err(dev, "missing legacy IRQ resource\n");
+ return -EINVAL;
+ }
+
+ irq_set_chained_handler_and_data(irq,
+ rockchip_pcie_legacy_int_handler,
+ rockchip);
+
+ irq = platform_get_irq_byname(pdev, "client");
+ if (irq < 0) {
+ dev_err(dev, "missing client IRQ resource\n");
+ return -EINVAL;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
+ IRQF_SHARED, "pcie-client", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe client IRQ\n");
+ return err;
+ }
+
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie3v3 regulator found\n");
+ }
+
+ rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
+ if (IS_ERR(rockchip->vpcie1v8)) {
+ if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie1v8 regulator found\n");
+ }
+
+ rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
+ if (IS_ERR(rockchip->vpcie0v9)) {
+ if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie0v9 regulator found\n");
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ if (!IS_ERR(rockchip->vpcie3v3)) {
+ err = regulator_enable(rockchip->vpcie3v3);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie3v3 regulator\n");
+ goto err_out;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie1v8)) {
+ err = regulator_enable(rockchip->vpcie1v8);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+ goto err_disable_3v3;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie0v9)) {
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ goto err_disable_1v8;
+ }
+ }
+
+ return 0;
+
+err_disable_1v8:
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+err_disable_3v3:
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+err_out:
+ return err;
+}
+
+static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
+ (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
+ rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
+ PCIE_CORE_INT_MASK);
+
+ rockchip_pcie_enable_bw_int(rockchip);
+}
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct device_node *intc = of_get_next_child(dev->of_node, NULL);
+
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_add_linear(intc, 4,
+ &intx_domain_ops, rockchip);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
+ int region_no, int type, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ob_addr_0;
+ u32 ob_addr_1;
+ u32 ob_desc_0;
+ u32 aw_offset;
+
+ if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < 8)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+ if (region_no == 0) {
+ if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+ if (region_no != 0) {
+ if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+
+ aw_offset = (region_no << OB_REG_SIZE_SHIFT);
+
+ ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
+ ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
+ ob_addr_1 = upper_addr;
+ ob_desc_0 = (1 << 23 | type);
+
+ rockchip_pcie_write(rockchip, ob_addr_0,
+ PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_addr_1,
+ PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_desc_0,
+ PCIE_CORE_OB_REGION_DESC0 + aw_offset);
+ rockchip_pcie_write(rockchip, 0,
+ PCIE_CORE_OB_REGION_DESC1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
+ int region_no, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ib_addr_0;
+ u32 ib_addr_1;
+ u32 aw_offset;
+
+ if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+
+ aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
+
+ ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
+ ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
+ ib_addr_1 = upper_addr;
+
+ rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct rockchip_pcie *rockchip;
+ struct device *dev = &pdev->dev;
+ struct pci_bus *bus, *child;
+ struct resource_entry *win;
+ resource_size_t io_base;
+ struct resource *mem;
+ struct resource *io;
+ phys_addr_t io_bus_addr = 0;
+ u32 io_size;
+ phys_addr_t mem_bus_addr = 0;
+ u32 mem_size = 0;
+ int reg_no;
+ int err;
+ int offset;
+
+ LIST_HEAD(res);
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
+ if (!rockchip)
+ return -ENOMEM;
+
+ rockchip->dev = dev;
+
+ err = rockchip_pcie_parse_dt(rockchip);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(rockchip->aclk_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable aclk_pcie clock\n");
+ goto err_aclk_pcie;
+ }
+
+ err = clk_prepare_enable(rockchip->aclk_perf_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
+ goto err_aclk_perf_pcie;
+ }
+
+ err = clk_prepare_enable(rockchip->hclk_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable hclk_pcie clock\n");
+ goto err_hclk_pcie;
+ }
+
+ err = clk_prepare_enable(rockchip->clk_pcie_pm);
+ if (err) {
+ dev_err(dev, "unable to enable hclk_pcie clock\n");
+ goto err_pcie_pm;
+ }
+
+ err = rockchip_pcie_set_vpcie(rockchip);
+ if (err) {
+ dev_err(dev, "failed to set vpcie regulator\n");
+ goto err_set_vpcie;
+ }
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ goto err_vpcie;
+
+ platform_set_drvdata(pdev, rockchip);
+
+ rockchip_pcie_enable_interrupts(rockchip);
+
+ err = rockchip_pcie_init_irq_domain(rockchip);
+ if (err < 0)
+ goto err_vpcie;
+
+ err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
+ &res, &io_base);
+ if (err)
+ goto err_vpcie;
+
+ err = devm_request_pci_bus_resources(dev, &res);
+ if (err)
+ goto err_vpcie;
+
+ /* Get the I/O and memory ranges from DT */
+ io_size = 0;
+ resource_list_for_each_entry(win, &res) {
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ io = win->res;
+ io->name = "I/O";
+ io_size = resource_size(io);
+ io_bus_addr = io->start - win->offset;
+ err = pci_remap_iospace(io, io_base);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, io);
+ continue;
+ }
+ break;
+ case IORESOURCE_MEM:
+ mem = win->res;
+ mem->name = "MEM";
+ mem_size = resource_size(mem);
+ mem_bus_addr = mem->start - win->offset;
+ break;
+ case IORESOURCE_BUS:
+ rockchip->root_bus_nr = win->res->start;
+ break;
+ default:
+ continue;
+ }
+ }
+
+ if (mem_size) {
+ for (reg_no = 0; reg_no < (mem_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+ AXI_WRAPPER_MEM_WRITE,
+ 20 - 1,
+ mem_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC mem outbound ATU failed\n");
+ goto err_vpcie;
+ }
+ }
+ }
+
+ err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+ if (err) {
+ dev_err(dev, "program RC mem inbound ATU failed\n");
+ goto err_vpcie;
+ }
+
+ offset = mem_size >> 20;
+
+ if (io_size) {
+ for (reg_no = 0; reg_no < (io_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip,
+ reg_no + 1 + offset,
+ AXI_WRAPPER_IO_WRITE,
+ 20 - 1,
+ io_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC io outbound ATU failed\n");
+ goto err_vpcie;
+ }
+ }
+ }
+
+ bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res);
+ if (!bus) {
+ err = -ENOMEM;
+ goto err_vpcie;
+ }
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(bus);
+
+ dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
+
+ return err;
+
+err_vpcie:
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+err_set_vpcie:
+ clk_disable_unprepare(rockchip->clk_pcie_pm);
+err_pcie_pm:
+ clk_disable_unprepare(rockchip->hclk_pcie);
+err_hclk_pcie:
+ clk_disable_unprepare(rockchip->aclk_perf_pcie);
+err_aclk_perf_pcie:
+ clk_disable_unprepare(rockchip->aclk_pcie);
+err_aclk_pcie:
+ return err;
+}
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3399-pcie", },
+ {}
+};
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ },
+ .probe = rockchip_pcie_probe,
+
+};
+builtin_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index a4060b85ab23..09aed85f275a 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -15,7 +15,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -355,7 +355,6 @@ static const struct of_device_id spear13xx_pcie_of_match[] = {
{ .compatible = "st,spear1340-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match);
static struct platform_driver spear13xx_pcie_driver = {
.probe = spear13xx_pcie_probe,
@@ -365,14 +364,8 @@ static struct platform_driver spear13xx_pcie_driver = {
},
};
-/* SPEAr13xx PCIe driver does not allow module unload */
-
static int __init spear13xx_pcie_init(void)
{
return platform_driver_register(&spear13xx_pcie_driver);
}
-module_init(spear13xx_pcie_init);
-
-MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver");
-MODULE_AUTHOR("Pratyush Anand <pratyush.anand@gmail.com>");
-MODULE_LICENSE("GPL v2");
+device_initcall(spear13xx_pcie_init);
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 0b597d9190b4..67eae4179290 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -15,7 +15,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -85,10 +85,15 @@
#define MSGF_MISC_SR_MASTER_ERR BIT(5)
#define MSGF_MISC_SR_I_ADDR_ERR BIT(6)
#define MSGF_MISC_SR_E_ADDR_ERR BIT(7)
-#define MSGF_MISC_SR_UR_DETECT BIT(20)
-
-#define MSGF_MISC_SR_PCIE_CORE GENMASK(18, 16)
-#define MSGF_MISC_SR_PCIE_CORE_ERR GENMASK(31, 22)
+#define MSGF_MISC_SR_FATAL_AER BIT(16)
+#define MSGF_MISC_SR_NON_FATAL_AER BIT(17)
+#define MSGF_MISC_SR_CORR_AER BIT(18)
+#define MSGF_MISC_SR_UR_DETECT BIT(20)
+#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
+#define MSGF_MISC_SR_FATAL_DEV BIT(23)
+#define MSGF_MISC_SR_LINK_DOWN BIT(24)
+#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
+#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
MSGF_MISC_SR_RXMSG_OVER | \
@@ -96,9 +101,15 @@
MSGF_MISC_SR_MASTER_ERR | \
MSGF_MISC_SR_I_ADDR_ERR | \
MSGF_MISC_SR_E_ADDR_ERR | \
+ MSGF_MISC_SR_FATAL_AER | \
+ MSGF_MISC_SR_NON_FATAL_AER | \
+ MSGF_MISC_SR_CORR_AER | \
MSGF_MISC_SR_UR_DETECT | \
- MSGF_MISC_SR_PCIE_CORE | \
- MSGF_MISC_SR_PCIE_CORE_ERR)
+ MSGF_MISC_SR_NON_FATAL_DEV | \
+ MSGF_MISC_SR_FATAL_DEV | \
+ MSGF_MISC_SR_LINK_DOWN | \
+ MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
+ MSGF_MSIC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */
#define MSGF_LEG_SR_INTA BIT(0)
@@ -109,8 +120,8 @@
MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
/* MSI interrupt status mask bits */
-#define MSGF_MSI_SR_LO_MASK BIT(0)
-#define MSGF_MSI_SR_HI_MASK BIT(0)
+#define MSGF_MSI_SR_LO_MASK GENMASK(31, 0)
+#define MSGF_MSI_SR_HI_MASK GENMASK(31, 0)
#define MSII_PRESENT BIT(0)
#define MSII_ENABLE BIT(0)
@@ -291,8 +302,29 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
dev_err(pcie->dev,
"In Misc Egress address translation error\n");
- if (misc_stat & MSGF_MISC_SR_PCIE_CORE_ERR)
- dev_err(pcie->dev, "PCIe Core error\n");
+ if (misc_stat & MSGF_MISC_SR_FATAL_AER)
+ dev_err(pcie->dev, "Fatal Error in AER Capability\n");
+
+ if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
+ dev_err(pcie->dev, "Non-Fatal Error in AER Capability\n");
+
+ if (misc_stat & MSGF_MISC_SR_CORR_AER)
+ dev_err(pcie->dev, "Correctable Error in AER Capability\n");
+
+ if (misc_stat & MSGF_MISC_SR_UR_DETECT)
+ dev_err(pcie->dev, "Unsupported request Detected\n");
+
+ if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
+ dev_err(pcie->dev, "Non-Fatal Error Detected\n");
+
+ if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
+ dev_err(pcie->dev, "Fatal Error Detected\n");
+
+ if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
+ dev_info(pcie->dev, "Link Autonomous Bandwidth Management Status bit set\n");
+
+ if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
+ dev_info(pcie->dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */
nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
@@ -459,40 +491,6 @@ static const struct irq_domain_ops dev_msi_domain_ops = {
.free = nwl_irq_domain_free,
};
-static void nwl_msi_free_irq_domain(struct nwl_pcie *pcie)
-{
- struct nwl_msi *msi = &pcie->msi;
-
- if (msi->irq_msi0)
- irq_set_chained_handler_and_data(msi->irq_msi0, NULL, NULL);
- if (msi->irq_msi1)
- irq_set_chained_handler_and_data(msi->irq_msi1, NULL, NULL);
-
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
- if (msi->dev_domain)
- irq_domain_remove(msi->dev_domain);
-
- kfree(msi->bitmap);
- msi->bitmap = NULL;
-}
-
-static void nwl_pcie_free_irq_domain(struct nwl_pcie *pcie)
-{
- int i;
- u32 irq;
-
- for (i = 0; i < INTX_NUM; i++) {
- irq = irq_find_mapping(pcie->legacy_irq_domain, i + 1);
- if (irq > 0)
- irq_dispose_mapping(irq);
- }
- if (pcie->legacy_irq_domain)
- irq_domain_remove(pcie->legacy_irq_domain);
-
- nwl_msi_free_irq_domain(pcie);
-}
-
static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
@@ -867,25 +865,12 @@ error:
return err;
}
-static int nwl_pcie_remove(struct platform_device *pdev)
-{
- struct nwl_pcie *pcie = platform_get_drvdata(pdev);
-
- nwl_pcie_free_irq_domain(pcie);
- platform_set_drvdata(pdev, NULL);
- return 0;
-}
-
static struct platform_driver nwl_pcie_driver = {
.driver = {
.name = "nwl-pcie",
+ .suppress_bind_attrs = true,
.of_match_table = nwl_pcie_of_match,
},
.probe = nwl_pcie_probe,
- .remove = nwl_pcie_remove,
};
-module_platform_driver(nwl_pcie_driver);
-
-MODULE_AUTHOR("Xilinx, Inc");
-MODULE_DESCRIPTION("NWL PCIe driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(nwl_pcie_driver);
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index a30e01639557..be568039d9d0 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -18,7 +18,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -101,7 +101,8 @@
* @msi_pages: MSI pages
* @root_busno: Root Bus number
* @dev: Device pointer
- * @irq_domain: IRQ domain pointer
+ * @msi_domain: MSI IRQ domain pointer
+ * @leg_domain: Legacy IRQ domain pointer
* @resources: Bus Resources
*/
struct xilinx_pcie_port {
@@ -110,7 +111,8 @@ struct xilinx_pcie_port {
unsigned long msi_pages;
u8 root_busno;
struct device *dev;
- struct irq_domain *irq_domain;
+ struct irq_domain *msi_domain;
+ struct irq_domain *leg_domain;
struct list_head resources;
};
@@ -168,13 +170,6 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
if (bus->number == port->root_busno && devfn > 0)
return false;
- /*
- * Do not read more than one device on the bus directly attached
- * to RC.
- */
- if (bus->primary == port->root_busno && devfn > 0)
- return false;
-
return true;
}
@@ -219,13 +214,15 @@ static void xilinx_pcie_destroy_msi(unsigned int irq)
{
struct msi_desc *msi;
struct xilinx_pcie_port *port;
+ struct irq_data *d = irq_get_irq_data(irq);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (!test_bit(irq, msi_irq_in_use)) {
+ if (!test_bit(hwirq, msi_irq_in_use)) {
msi = irq_get_msi_desc(irq);
port = msi_desc_to_pci_sysdata(msi);
dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
} else {
- clear_bit(irq, msi_irq_in_use);
+ clear_bit(hwirq, msi_irq_in_use);
}
}
@@ -257,6 +254,7 @@ static void xilinx_msi_teardown_irq(struct msi_controller *chip,
unsigned int irq)
{
xilinx_pcie_destroy_msi(irq);
+ irq_dispose_mapping(irq);
}
/**
@@ -281,7 +279,7 @@ static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
if (hwirq < 0)
return hwirq;
- irq = irq_create_mapping(port->irq_domain, hwirq);
+ irq = irq_create_mapping(port->msi_domain, hwirq);
if (!irq)
return -EINVAL;
@@ -432,7 +430,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
/* Check whether interrupt valid */
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
dev_warn(port->dev, "RP Intr FIFO1 read error\n");
- return IRQ_HANDLED;
+ goto error;
}
if (!(val & XILINX_PCIE_RPIFR1_MSI_INTR)) {
@@ -443,7 +441,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
/* Handle INTx Interrupt */
val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
- generic_handle_irq(irq_find_mapping(port->irq_domain,
+ generic_handle_irq(irq_find_mapping(port->leg_domain,
val));
}
}
@@ -454,7 +452,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
dev_warn(port->dev, "RP Intr FIFO1 read error\n");
- return IRQ_HANDLED;
+ goto error;
}
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
@@ -499,6 +497,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (status & XILINX_PCIE_INTR_MST_ERRP)
dev_warn(port->dev, "Master error poison\n");
+error:
/* Clear the Interrupt Decode register */
pcie_write(port, status, XILINX_PCIE_REG_IDR);
@@ -506,35 +505,6 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
}
/**
- * xilinx_pcie_free_irq_domain - Free IRQ domain
- * @port: PCIe port information
- */
-static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port)
-{
- int i;
- u32 irq, num_irqs;
-
- /* Free IRQ Domain */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
-
- free_pages(port->msi_pages, 0);
-
- num_irqs = XILINX_NUM_MSI_IRQS;
- } else {
- /* INTx */
- num_irqs = 4;
- }
-
- for (i = 0; i < num_irqs; i++) {
- irq = irq_find_mapping(port->irq_domain, i);
- if (irq > 0)
- irq_dispose_mapping(irq);
- }
-
- irq_domain_remove(port->irq_domain);
-}
-
-/**
* xilinx_pcie_init_irq_domain - Initialize IRQ domain
* @port: PCIe port information
*
@@ -553,21 +523,21 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
return -ENODEV;
}
- port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
+ port->leg_domain = irq_domain_add_linear(pcie_intc_node, 4,
&intx_domain_ops,
port);
- if (!port->irq_domain) {
+ if (!port->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENODEV;
}
/* Setup MSI */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- port->irq_domain = irq_domain_add_linear(node,
+ port->msi_domain = irq_domain_add_linear(node,
XILINX_NUM_MSI_IRQS,
&msi_domain_ops,
&xilinx_pcie_msi_chip);
- if (!port->irq_domain) {
+ if (!port->msi_domain) {
dev_err(dev, "Failed to get a MSI IRQ domain\n");
return -ENODEV;
}
@@ -724,21 +694,6 @@ error:
return err;
}
-/**
- * xilinx_pcie_remove - Remove function
- * @pdev: Platform device pointer
- *
- * Return: '0' always
- */
-static int xilinx_pcie_remove(struct platform_device *pdev)
-{
- struct xilinx_pcie_port *port = platform_get_drvdata(pdev);
-
- xilinx_pcie_free_irq_domain(port);
-
- return 0;
-}
-
static struct of_device_id xilinx_pcie_of_match[] = {
{ .compatible = "xlnx,axi-pcie-host-1.00.a", },
{}
@@ -751,10 +706,5 @@ static struct platform_driver xilinx_pcie_driver = {
.suppress_bind_attrs = true,
},
.probe = xilinx_pcie_probe,
- .remove = xilinx_pcie_remove,
};
-module_platform_driver(xilinx_pcie_driver);
-
-MODULE_AUTHOR("Xilinx Inc");
-MODULE_DESCRIPTION("Xilinx AXI PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(xilinx_pcie_driver);
diff --git a/arch/x86/pci/vmd.c b/drivers/pci/host/vmd.c
index 7948be342ee9..37e29b580be3 100644
--- a/arch/x86/pci/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -58,16 +58,11 @@ struct vmd_irq {
/**
* struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
* @irq_list: the list of irq's the VMD one demuxes to.
- * @vmd_vector: the h/w IRQ assigned to the VMD.
- * @index: index into the VMD MSI-X table; used for message routing.
* @count: number of child IRQs assigned to this vector; used to track
* sharing.
*/
struct vmd_irq_list {
struct list_head irq_list;
- struct vmd_dev *vmd;
- unsigned int vmd_vector;
- unsigned int index;
unsigned int count;
};
@@ -78,7 +73,6 @@ struct vmd_dev {
char __iomem *cfgbar;
int msix_count;
- struct msix_entry *msix_entries;
struct vmd_irq_list *irqs;
struct pci_sysdata sysdata;
@@ -97,6 +91,12 @@ static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
return container_of(bus->sysdata, struct vmd_dev, sysdata);
}
+static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
+ struct vmd_irq_list *irqs)
+{
+ return irqs - vmd->irqs;
+}
+
/*
* Drivers managing a device in a VMD domain allocate their own IRQs as before,
* but the MSI entry for the hardware it's driving will be programmed with a
@@ -109,9 +109,11 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct vmd_irq *vmdirq = data->chip_data;
struct vmd_irq_list *irq = vmdirq->irq;
+ struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
msg->address_hi = MSI_ADDR_BASE_HI;
- msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_DEST_ID(irq->index);
+ msg->address_lo = MSI_ADDR_BASE_LO |
+ MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq));
msg->data = 0;
}
@@ -200,6 +202,7 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
struct msi_desc *desc = arg->desc;
struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ unsigned int index, vector;
if (!vmdirq)
return -ENOMEM;
@@ -207,9 +210,11 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
INIT_LIST_HEAD(&vmdirq->node);
vmdirq->irq = vmd_next_irq(vmd, desc);
vmdirq->virq = virq;
+ index = index_from_irqs(vmd, vmdirq->irq);
+ vector = pci_irq_vector(vmd->dev, index);
- irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip,
- vmdirq, handle_untracked_irq, vmd, NULL);
+ irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
+ handle_untracked_irq, vmd, NULL);
return 0;
}
@@ -219,6 +224,8 @@ static void vmd_msi_free(struct irq_domain *domain,
struct vmd_irq *vmdirq = irq_get_chip_data(virq);
unsigned long flags;
+ synchronize_rcu();
+
/* XXX: Potential optimization to rebalance */
raw_spin_lock_irqsave(&list_lock, flags);
vmdirq->irq->count--;
@@ -602,6 +609,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
.parent = res,
};
+ sd->vmd_domain = true;
sd->domain = vmd_find_free_domain();
if (sd->domain < 0)
return sd->domain;
@@ -677,30 +685,19 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (vmd->msix_count < 0)
return -ENODEV;
+ vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (vmd->msix_count < 0)
+ return vmd->msix_count;
+
vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
GFP_KERNEL);
if (!vmd->irqs)
return -ENOMEM;
- vmd->msix_entries = devm_kcalloc(&dev->dev, vmd->msix_count,
- sizeof(*vmd->msix_entries),
- GFP_KERNEL);
- if (!vmd->msix_entries)
- return -ENOMEM;
- for (i = 0; i < vmd->msix_count; i++)
- vmd->msix_entries[i].entry = i;
-
- vmd->msix_count = pci_enable_msix_range(vmd->dev, vmd->msix_entries, 1,
- vmd->msix_count);
- if (vmd->msix_count < 0)
- return vmd->msix_count;
-
for (i = 0; i < vmd->msix_count; i++) {
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
- vmd->irqs[i].vmd_vector = vmd->msix_entries[i].vector;
- vmd->irqs[i].index = i;
-
- err = devm_request_irq(&dev->dev, vmd->irqs[i].vmd_vector,
+ err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
vmd_irq, 0, "vmd", &vmd->irqs[i]);
if (err)
return err;
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 555bcde3b196..60e66e027ebc 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -101,10 +101,8 @@ int cpci_unconfigure_slot(struct slot *slot);
#ifdef CONFIG_HOTPLUG_PCI_CPCI
int cpci_hotplug_init(int debug);
-void cpci_hotplug_exit(void);
#else
static inline int cpci_hotplug_init(int debug) { return 0; }
-static inline void cpci_hotplug_exit(void) { }
#endif
#endif /* _CPCI_HOTPLUG_H */
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 7d3866c47312..7ec8a8f72c69 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -719,13 +719,3 @@ cpci_hotplug_init(int debug)
cpci_debug = debug;
return 0;
}
-
-void __exit
-cpci_hotplug_exit(void)
-{
- /*
- * Clean everything up.
- */
- cpci_hp_stop();
- cpci_hp_unregister_controller(controller);
-}
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 9acd1997c6fe..fea0b8b33589 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -25,7 +25,7 @@
*
*/
-#include <linux/module.h>
+#include <linux/module.h> /* try_module_get & module_put */
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -537,17 +537,11 @@ static int __init pci_hotplug_init(void)
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return result;
}
+device_initcall(pci_hotplug_init);
-static void __exit pci_hotplug_exit(void)
-{
- cpci_hotplug_exit();
-}
-
-module_init(pci_hotplug_init);
-module_exit(pci_hotplug_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+/*
+ * not really modular, but the easiest way to keep compat with existing
+ * bootargs behaviour is to continue using module_param here.
+ */
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index e764918641ae..37d70b5ad22f 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -152,6 +152,9 @@ bool pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_reset_slot(struct slot *slot, int probe);
+int pciehp_set_raw_indicator_status(struct hotplug_slot *h_slot, u8 status);
+int pciehp_get_raw_indicator_status(struct hotplug_slot *h_slot, u8 *status);
+
static inline const char *slot_name(struct slot *slot)
{
return hotplug_slot_name(slot->hotplug_slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index ac531e674a05..7d32fa33dcef 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -27,7 +27,6 @@
*
*/
-#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -47,10 +46,10 @@ static bool pciehp_force;
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
#define DRIVER_DESC "PCI Express Hot Plug Controller Driver"
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-
+/*
+ * not really modular, but the easiest way to keep compat with existing
+ * bootargs behaviour is to continue using module_param here.
+ */
module_param(pciehp_debug, bool, 0644);
module_param(pciehp_poll_mode, bool, 0644);
module_param(pciehp_poll_time, int, 0644);
@@ -114,6 +113,9 @@ static int init_slot(struct controller *ctrl)
if (ATTN_LED(ctrl)) {
ops->get_attention_status = get_attention_status;
ops->set_attention_status = set_attention_status;
+ } else if (ctrl->pcie->port->hotplug_user_indicators) {
+ ops->get_attention_status = pciehp_get_raw_indicator_status;
+ ops->set_attention_status = pciehp_set_raw_indicator_status;
}
/* register this slot with the hotplug pci core */
@@ -337,13 +339,4 @@ static int __init pcied_init(void)
return retval;
}
-
-static void __exit pcied_cleanup(void)
-{
- dbg("unload_pciehpd()\n");
- pcie_port_service_unregister(&hpdriver_portdrv);
- info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
-}
-
-module_init(pcied_init);
-module_exit(pcied_cleanup);
+device_initcall(pcied_init);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 880978b6d534..efe69e879455 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -106,7 +106,7 @@ static int board_added(struct slot *p_slot)
/* Check for a power fault */
if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
- ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
+ ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(p_slot));
retval = -EIO;
goto err_exit;
}
@@ -120,6 +120,7 @@ static int board_added(struct slot *p_slot)
}
pciehp_green_led_on(p_slot);
+ pciehp_set_attention_status(p_slot, 0);
return 0;
err_exit:
@@ -253,11 +254,11 @@ static void handle_button_press_event(struct slot *p_slot)
pciehp_get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
- ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
+ ctrl_info(ctrl, "Slot(%s): Powering off due to button press\n",
slot_name(p_slot));
} else {
p_slot->state = BLINKINGON_STATE;
- ctrl_info(ctrl, "PCI slot #%s - powering on due to button press\n",
+ ctrl_info(ctrl, "Slot(%s) Powering on due to button press\n",
slot_name(p_slot));
}
/* blink green LED and turn off amber */
@@ -272,14 +273,14 @@ static void handle_button_press_event(struct slot *p_slot)
* press the attention again before the 5 sec. limit
* expires to cancel hot-add or hot-remove
*/
- ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
+ ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE)
pciehp_green_led_on(p_slot);
else
pciehp_green_led_off(p_slot);
pciehp_set_attention_status(p_slot, 0);
- ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
+ ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n",
slot_name(p_slot));
p_slot->state = STATIC_STATE;
break;
@@ -290,10 +291,12 @@ static void handle_button_press_event(struct slot *p_slot)
* this means that the previous attention button action
* to hot-add or hot-remove is undergoing
*/
- ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
+ ctrl_info(ctrl, "Slot(%s): Button ignored\n",
+ slot_name(p_slot));
break;
default:
- ctrl_warn(ctrl, "ignoring invalid state %#x\n", p_slot->state);
+ ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
+ slot_name(p_slot), p_slot->state);
break;
}
}
@@ -301,20 +304,6 @@ static void handle_button_press_event(struct slot *p_slot)
/*
* Note: This function must be called with slot->lock held
*/
-static void handle_surprise_event(struct slot *p_slot)
-{
- u8 getstatus;
-
- pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus)
- pciehp_queue_power_work(p_slot, DISABLE_REQ);
- else
- pciehp_queue_power_work(p_slot, ENABLE_REQ);
-}
-
-/*
- * Note: This function must be called with slot->lock held
- */
static void handle_link_event(struct slot *p_slot, u32 event)
{
struct controller *ctrl = p_slot->ctrl;
@@ -330,31 +319,27 @@ static void handle_link_event(struct slot *p_slot, u32 event)
break;
case POWERON_STATE:
if (event == INT_LINK_UP) {
- ctrl_info(ctrl,
- "Link Up event ignored on slot(%s): already powering on\n",
+ ctrl_info(ctrl, "Slot(%s): Link Up event ignored; already powering on\n",
slot_name(p_slot));
} else {
- ctrl_info(ctrl,
- "Link Down event queued on slot(%s): currently getting powered on\n",
+ ctrl_info(ctrl, "Slot(%s): Link Down event queued; currently getting powered on\n",
slot_name(p_slot));
pciehp_queue_power_work(p_slot, DISABLE_REQ);
}
break;
case POWEROFF_STATE:
if (event == INT_LINK_UP) {
- ctrl_info(ctrl,
- "Link Up event queued on slot(%s): currently getting powered off\n",
+ ctrl_info(ctrl, "Slot(%s): Link Up event queued; currently getting powered off\n",
slot_name(p_slot));
pciehp_queue_power_work(p_slot, ENABLE_REQ);
} else {
- ctrl_info(ctrl,
- "Link Down event ignored on slot(%s): already powering off\n",
+ ctrl_info(ctrl, "Slot(%s): Link Down event ignored; already powering off\n",
slot_name(p_slot));
}
break;
default:
- ctrl_err(ctrl, "ignoring invalid state %#x on slot(%s)\n",
- p_slot->state, slot_name(p_slot));
+ ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
+ slot_name(p_slot), p_slot->state);
break;
}
}
@@ -377,14 +362,14 @@ static void interrupt_event_handler(struct work_struct *work)
pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
- handle_surprise_event(p_slot);
+ pciehp_queue_power_work(p_slot, ENABLE_REQ);
break;
case INT_PRESENCE_OFF:
/*
* Regardless of surprise capability, we need to
* definitely remove a card that has been pulled out!
*/
- handle_surprise_event(p_slot);
+ pciehp_queue_power_work(p_slot, DISABLE_REQ);
break;
case INT_LINK_UP:
case INT_LINK_DOWN:
@@ -404,18 +389,17 @@ static void interrupt_event_handler(struct work_struct *work)
int pciehp_enable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
- int rc;
struct controller *ctrl = p_slot->ctrl;
pciehp_get_adapter_status(p_slot, &getstatus);
if (!getstatus) {
- ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
+ ctrl_info(ctrl, "Slot(%s): No adapter\n", slot_name(p_slot));
return -ENODEV;
}
if (MRL_SENS(p_slot->ctrl)) {
pciehp_get_latch_status(p_slot, &getstatus);
if (getstatus) {
- ctrl_info(ctrl, "Latch open on slot(%s)\n",
+ ctrl_info(ctrl, "Slot(%s): Latch open\n",
slot_name(p_slot));
return -ENODEV;
}
@@ -424,19 +408,13 @@ int pciehp_enable_slot(struct slot *p_slot)
if (POWER_CTRL(p_slot->ctrl)) {
pciehp_get_power_status(p_slot, &getstatus);
if (getstatus) {
- ctrl_info(ctrl, "Already enabled on slot(%s)\n",
+ ctrl_info(ctrl, "Slot(%s): Already enabled\n",
slot_name(p_slot));
return -EINVAL;
}
}
- pciehp_get_latch_status(p_slot, &getstatus);
-
- rc = board_added(p_slot);
- if (rc)
- pciehp_get_latch_status(p_slot, &getstatus);
-
- return rc;
+ return board_added(p_slot);
}
/*
@@ -453,7 +431,7 @@ int pciehp_disable_slot(struct slot *p_slot)
if (POWER_CTRL(p_slot->ctrl)) {
pciehp_get_power_status(p_slot, &getstatus);
if (!getstatus) {
- ctrl_info(ctrl, "Already disabled on slot(%s)\n",
+ ctrl_info(ctrl, "Slot(%s): Already disabled\n",
slot_name(p_slot));
return -EINVAL;
}
@@ -481,17 +459,17 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
p_slot->state = STATIC_STATE;
break;
case POWERON_STATE:
- ctrl_info(ctrl, "Slot %s is already in powering on state\n",
+ ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
slot_name(p_slot));
break;
case BLINKINGOFF_STATE:
case POWEROFF_STATE:
- ctrl_info(ctrl, "Already enabled on slot %s\n",
+ ctrl_info(ctrl, "Slot(%s): Already enabled\n",
slot_name(p_slot));
break;
default:
- ctrl_err(ctrl, "invalid state %#x on slot %s\n",
- p_slot->state, slot_name(p_slot));
+ ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n",
+ slot_name(p_slot), p_slot->state);
break;
}
mutex_unlock(&p_slot->lock);
@@ -518,17 +496,17 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
p_slot->state = STATIC_STATE;
break;
case POWEROFF_STATE:
- ctrl_info(ctrl, "Slot %s is already in powering off state\n",
+ ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
slot_name(p_slot));
break;
case BLINKINGON_STATE:
case POWERON_STATE:
- ctrl_info(ctrl, "Already disabled on slot %s\n",
+ ctrl_info(ctrl, "Slot(%s): Already disabled\n",
slot_name(p_slot));
break;
default:
- ctrl_err(ctrl, "invalid state %#x on slot %s\n",
- p_slot->state, slot_name(p_slot));
+ ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n",
+ slot_name(p_slot), p_slot->state);
break;
}
mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 08e84d61874e..b57fc6d6e28a 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -355,6 +355,18 @@ static int pciehp_link_enable(struct controller *ctrl)
return __pciehp_link_set(ctrl, true);
}
+int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
+ u8 *status)
+{
+ struct slot *slot = hotplug_slot->private;
+ struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ u16 slot_ctrl;
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+ *status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
+ return 0;
+}
+
void pciehp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
@@ -431,6 +443,17 @@ int pciehp_query_power_fault(struct slot *slot)
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
+int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
+ u8 status)
+{
+ struct slot *slot = hotplug_slot->private;
+ struct controller *ctrl = slot->ctrl;
+
+ pcie_write_cmd_nowait(ctrl, status << 6,
+ PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
+ return 0;
+}
+
void pciehp_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
@@ -535,14 +558,14 @@ void pciehp_power_off_slot(struct slot *slot)
PCI_EXP_SLTCTL_PWR_OFF);
}
-static irqreturn_t pcie_isr(int irq, void *dev_id)
+static irqreturn_t pciehp_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
struct pci_bus *subordinate = pdev->subordinate;
struct pci_dev *dev;
struct slot *slot = ctrl->slot;
- u16 detected, intr_loc;
+ u16 status, events;
u8 present;
bool link;
@@ -550,36 +573,31 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
if (pdev->current_state == PCI_D3cold)
return IRQ_NONE;
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
+ if (status == (u16) ~0) {
+ ctrl_info(ctrl, "%s: no response from device\n", __func__);
+ return IRQ_NONE;
+ }
+
/*
- * In order to guarantee that all interrupt events are
- * serviced, we need to re-inspect Slot Status register after
- * clearing what is presumed to be the last pending interrupt.
+ * Slot Status contains plain status bits as well as event
+ * notification bits; right now we only want the event bits.
*/
- intr_loc = 0;
- do {
- pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected);
- if (detected == (u16) ~0) {
- ctrl_info(ctrl, "%s: no response from device\n",
- __func__);
- return IRQ_HANDLED;
- }
+ events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
+ PCI_EXP_SLTSTA_DLLSC);
+ if (!events)
+ return IRQ_NONE;
- detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
- PCI_EXP_SLTSTA_PDC |
- PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
- detected &= ~intr_loc;
- intr_loc |= detected;
- if (!intr_loc)
- return IRQ_NONE;
- if (detected)
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
- intr_loc);
- } while (detected);
+ /* Capture link status before clearing interrupts */
+ if (events & PCI_EXP_SLTSTA_DLLSC)
+ link = pciehp_check_link_active(ctrl);
- ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", intr_loc);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+ ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
/* Check Command Complete Interrupt Pending */
- if (intr_loc & PCI_EXP_SLTSTA_CC) {
+ if (events & PCI_EXP_SLTSTA_CC) {
ctrl->cmd_busy = 0;
smp_mb();
wake_up(&ctrl->queue);
@@ -589,42 +607,38 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
list_for_each_entry(dev, &subordinate->devices, bus_list) {
if (dev->ignore_hotplug) {
ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n",
- intr_loc, pci_name(dev));
+ events, pci_name(dev));
return IRQ_HANDLED;
}
}
}
- if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
- return IRQ_HANDLED;
-
/* Check Attention Button Pressed */
- if (intr_loc & PCI_EXP_SLTSTA_ABP) {
- ctrl_info(ctrl, "Button pressed on Slot(%s)\n",
+ if (events & PCI_EXP_SLTSTA_ABP) {
+ ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
slot_name(slot));
pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS);
}
/* Check Presence Detect Changed */
- if (intr_loc & PCI_EXP_SLTSTA_PDC) {
- pciehp_get_adapter_status(slot, &present);
- ctrl_info(ctrl, "Card %spresent on Slot(%s)\n",
- present ? "" : "not ", slot_name(slot));
+ if (events & PCI_EXP_SLTSTA_PDC) {
+ present = !!(status & PCI_EXP_SLTSTA_PDS);
+ ctrl_info(ctrl, "Slot(%s): Card %spresent\n", slot_name(slot),
+ present ? "" : "not ");
pciehp_queue_interrupt_event(slot, present ? INT_PRESENCE_ON :
INT_PRESENCE_OFF);
}
/* Check Power Fault Detected */
- if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
+ if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
- ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(slot));
+ ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
pciehp_queue_interrupt_event(slot, INT_POWER_FAULT);
}
- if (intr_loc & PCI_EXP_SLTSTA_DLLSC) {
- link = pciehp_check_link_active(ctrl);
- ctrl_info(ctrl, "slot(%s): Link %s event\n",
- slot_name(slot), link ? "Up" : "Down");
+ if (events & PCI_EXP_SLTSTA_DLLSC) {
+ ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
+ link ? "Up" : "Down");
pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
INT_LINK_DOWN);
}
@@ -632,6 +646,25 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t pcie_isr(int irq, void *dev_id)
+{
+ irqreturn_t rc, handled = IRQ_NONE;
+
+ /*
+ * To guarantee that all interrupt events are serviced, we need to
+ * re-inspect Slot Status register after clearing what is presumed
+ * to be the last pending interrupt.
+ */
+ do {
+ rc = pciehp_isr(irq, dev_id);
+ if (rc == IRQ_HANDLED)
+ handled = IRQ_HANDLED;
+ } while (rc == IRQ_HANDLED);
+
+ /* Return IRQ_HANDLED if we handled one or more events */
+ return handled;
+}
+
void pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@@ -804,6 +837,10 @@ struct controller *pcie_init(struct pcie_device *dev)
}
ctrl->pcie = dev;
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
+
+ if (pdev->hotplug_user_indicators)
+ slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
+
ctrl->slot_cap = slot_cap;
mutex_init(&ctrl->ctrl_lock);
init_waitqueue_head(&ctrl->queue);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index e6245b03f0a1..56efaf72d08e 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -22,6 +22,12 @@
#define DRIVER_AUTHOR "Gavin Shan, IBM Corporation"
#define DRIVER_DESC "PowerPC PowerNV PCI Hotplug Driver"
+struct pnv_php_event {
+ bool added;
+ struct pnv_php_slot *php_slot;
+ struct work_struct work;
+};
+
static LIST_HEAD(pnv_php_slot_list);
static DEFINE_SPINLOCK(pnv_php_lock);
@@ -29,12 +35,40 @@ static void pnv_php_register(struct device_node *dn);
static void pnv_php_unregister_one(struct device_node *dn);
static void pnv_php_unregister(struct device_node *dn);
+static void pnv_php_disable_irq(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ u16 ctrl;
+
+ if (php_slot->irq > 0) {
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl);
+ ctrl &= ~(PCI_EXP_SLTCTL_HPIE |
+ PCI_EXP_SLTCTL_PDCE |
+ PCI_EXP_SLTCTL_DLLSCE);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl);
+
+ free_irq(php_slot->irq, php_slot);
+ php_slot->irq = 0;
+ }
+
+ if (php_slot->wq) {
+ destroy_workqueue(php_slot->wq);
+ php_slot->wq = NULL;
+ }
+
+ if (pdev->msix_enabled)
+ pci_disable_msix(pdev);
+ else if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+}
+
static void pnv_php_free_slot(struct kref *kref)
{
struct pnv_php_slot *php_slot = container_of(kref,
struct pnv_php_slot, kref);
WARN_ON(!list_empty(&php_slot->children));
+ pnv_php_disable_irq(php_slot);
kfree(php_slot->name);
kfree(php_slot);
}
@@ -122,7 +156,7 @@ static void pnv_php_detach_device_nodes(struct device_node *parent)
of_node_put(dn);
refcount = atomic_read(&dn->kobj.kref.refcount);
- if (unlikely(refcount != 1))
+ if (refcount != 1)
pr_warn("Invalid refcount %d on <%s>\n",
refcount, of_node_full_name(dn));
@@ -184,11 +218,11 @@ static int pnv_php_populate_changeset(struct of_changeset *ocs,
for_each_child_of_node(dn, child) {
ret = of_changeset_attach_node(ocs, child);
- if (unlikely(ret))
+ if (ret)
break;
ret = pnv_php_populate_changeset(ocs, child);
- if (unlikely(ret))
+ if (ret)
break;
}
@@ -201,7 +235,7 @@ static void *pnv_php_add_one_pdn(struct device_node *dn, void *data)
struct pci_dn *pdn;
pdn = pci_add_device_node_info(hose, dn);
- if (unlikely(!pdn))
+ if (!pdn)
return ERR_PTR(-ENOMEM);
return NULL;
@@ -224,21 +258,21 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
* fits the real size.
*/
fdt1 = kzalloc(0x10000, GFP_KERNEL);
- if (unlikely(!fdt1)) {
+ if (!fdt1) {
ret = -ENOMEM;
dev_warn(&php_slot->pdev->dev, "Cannot alloc FDT blob\n");
goto out;
}
ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d getting FDT blob\n",
ret);
goto free_fdt1;
}
fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL);
- if (unlikely(!fdt)) {
+ if (!fdt) {
ret = -ENOMEM;
dev_warn(&php_slot->pdev->dev, "Cannot %d bytes memory\n",
fdt_totalsize(fdt1));
@@ -248,7 +282,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
/* Unflatten device tree blob */
memcpy(fdt, fdt1, fdt_totalsize(fdt1));
dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
- if (unlikely(!dt)) {
+ if (!dt) {
ret = -EINVAL;
dev_warn(&php_slot->pdev->dev, "Cannot unflatten FDT\n");
goto free_fdt;
@@ -258,7 +292,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
of_changeset_init(&php_slot->ocs);
pnv_php_reverse_nodes(php_slot->dn);
ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn);
- if (unlikely(ret)) {
+ if (ret) {
pnv_php_reverse_nodes(php_slot->dn);
dev_warn(&php_slot->pdev->dev, "Error %d populating changeset\n",
ret);
@@ -267,7 +301,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
php_slot->dn->child = NULL;
ret = of_changeset_apply(&php_slot->ocs);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d applying changeset\n",
ret);
goto destroy_changeset;
@@ -301,7 +335,7 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
int ret;
ret = pnv_pci_set_power_state(php_slot->id, state, &msg);
- if (likely(ret > 0)) {
+ if (ret > 0) {
if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle ||
be64_to_cpu(msg.params[2]) != state ||
be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
@@ -311,7 +345,7 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
be64_to_cpu(msg.params[3]));
return -ENOMSG;
}
- } else if (unlikely(ret < 0)) {
+ } else if (ret < 0) {
dev_warn(&php_slot->pdev->dev, "Error %d powering %s\n",
ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
return ret;
@@ -338,7 +372,7 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
* be on.
*/
ret = pnv_pci_get_power_state(php_slot->id, &power_state);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d getting power status\n",
ret);
} else {
@@ -360,7 +394,7 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
* get that, it will fail back to be empty.
*/
ret = pnv_pci_get_presence_state(php_slot->id, &presence);
- if (likely(ret >= 0)) {
+ if (ret >= 0) {
*state = presence;
slot->info->adapter_status = presence;
ret = 0;
@@ -393,7 +427,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
/* Retrieve slot presence status */
ret = pnv_php_get_adapter_state(slot, &presence);
- if (unlikely(ret))
+ if (ret)
return ret;
/* Proceed if there have nothing behind the slot */
@@ -414,7 +448,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
php_slot->power_state_check = true;
ret = pnv_php_get_power_state(slot, &power_status);
- if (unlikely(ret))
+ if (ret)
return ret;
if (power_status != OPAL_PCI_SLOT_POWER_ON)
@@ -423,7 +457,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
/* Check the power status. Scan the slot if it is already on */
ret = pnv_php_get_power_state(slot, &power_status);
- if (unlikely(ret))
+ if (ret)
return ret;
if (power_status == OPAL_PCI_SLOT_POWER_ON)
@@ -431,7 +465,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
/* Power is off, turn it on and then scan the slot */
ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
- if (unlikely(ret))
+ if (ret)
return ret;
scan:
@@ -513,29 +547,30 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
struct pci_bus *bus;
const char *label;
uint64_t id;
+ int ret;
- label = of_get_property(dn, "ibm,slot-label", NULL);
- if (unlikely(!label))
+ ret = of_property_read_string(dn, "ibm,slot-label", &label);
+ if (ret)
return NULL;
if (pnv_pci_get_slot_id(dn, &id))
return NULL;
bus = pci_find_bus_by_node(dn);
- if (unlikely(!bus))
+ if (!bus)
return NULL;
php_slot = kzalloc(sizeof(*php_slot), GFP_KERNEL);
- if (unlikely(!php_slot))
+ if (!php_slot)
return NULL;
php_slot->name = kstrdup(label, GFP_KERNEL);
- if (unlikely(!php_slot->name)) {
+ if (!php_slot->name) {
kfree(php_slot);
return NULL;
}
- if (likely(dn->child && PCI_DN(dn->child)))
+ if (dn->child && PCI_DN(dn->child))
php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn);
else
php_slot->slot_no = -1; /* Placeholder slot */
@@ -567,7 +602,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
/* Check if the slot is registered or not */
parent = pnv_php_find_slot(php_slot->dn);
- if (unlikely(parent)) {
+ if (parent) {
pnv_php_put_slot(parent);
return -EEXIST;
}
@@ -575,7 +610,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
/* Register PCI slot */
ret = pci_hp_register(&php_slot->slot, php_slot->bus,
php_slot->slot_no, php_slot->name);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d registering slot\n",
ret);
return ret;
@@ -609,33 +644,213 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
return 0;
}
+static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ struct msix_entry entry;
+ int nr_entries, ret;
+ u16 pcie_flag;
+
+ /* Get total number of MSIx entries */
+ nr_entries = pci_msix_vec_count(pdev);
+ if (nr_entries < 0)
+ return nr_entries;
+
+ /* Check hotplug MSIx entry is in range */
+ pcie_capability_read_word(pdev, PCI_EXP_FLAGS, &pcie_flag);
+ entry.entry = (pcie_flag & PCI_EXP_FLAGS_IRQ) >> 9;
+ if (entry.entry >= nr_entries)
+ return -ERANGE;
+
+ /* Enable MSIx */
+ ret = pci_enable_msix_exact(pdev, &entry, 1);
+ if (ret) {
+ dev_warn(&pdev->dev, "Error %d enabling MSIx\n", ret);
+ return ret;
+ }
+
+ return entry.vector;
+}
+
+static void pnv_php_event_handler(struct work_struct *work)
+{
+ struct pnv_php_event *event =
+ container_of(work, struct pnv_php_event, work);
+ struct pnv_php_slot *php_slot = event->php_slot;
+
+ if (event->added)
+ pnv_php_enable_slot(&php_slot->slot);
+ else
+ pnv_php_disable_slot(&php_slot->slot);
+
+ kfree(event);
+}
+
+static irqreturn_t pnv_php_interrupt(int irq, void *data)
+{
+ struct pnv_php_slot *php_slot = data;
+ struct pci_dev *pchild, *pdev = php_slot->pdev;
+ struct eeh_dev *edev;
+ struct eeh_pe *pe;
+ struct pnv_php_event *event;
+ u16 sts, lsts;
+ u8 presence;
+ bool added;
+ unsigned long flags;
+ int ret;
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts);
+ sts &= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts);
+ if (sts & PCI_EXP_SLTSTA_DLLSC) {
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lsts);
+ added = !!(lsts & PCI_EXP_LNKSTA_DLLLA);
+ } else if (sts & PCI_EXP_SLTSTA_PDC) {
+ ret = pnv_pci_get_presence_state(php_slot->id, &presence);
+ if (!ret)
+ return IRQ_HANDLED;
+ added = !!(presence == OPAL_PCI_SLOT_PRESENT);
+ } else {
+ return IRQ_NONE;
+ }
+
+ /* Freeze the removed PE to avoid unexpected error reporting */
+ if (!added) {
+ pchild = list_first_entry_or_null(&php_slot->bus->devices,
+ struct pci_dev, bus_list);
+ edev = pchild ? pci_dev_to_eeh_dev(pchild) : NULL;
+ pe = edev ? edev->pe : NULL;
+ if (pe) {
+ eeh_serialize_lock(&flags);
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ eeh_serialize_unlock(flags);
+ eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE);
+ }
+ }
+
+ /*
+ * The PE is left in frozen state if the event is missed. It's
+ * fine as the PCI devices (PE) aren't functional any more.
+ */
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event) {
+ dev_warn(&pdev->dev, "PCI slot [%s] missed hotplug event 0x%04x\n",
+ php_slot->name, sts);
+ return IRQ_HANDLED;
+ }
+
+ dev_info(&pdev->dev, "PCI slot [%s] %s (IRQ: %d)\n",
+ php_slot->name, added ? "added" : "removed", irq);
+ INIT_WORK(&event->work, pnv_php_event_handler);
+ event->added = added;
+ event->php_slot = php_slot;
+ queue_work(php_slot->wq, &event->work);
+
+ return IRQ_HANDLED;
+}
+
+static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ u16 sts, ctrl;
+ int ret;
+
+ /* Allocate workqueue */
+ php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
+ if (!php_slot->wq) {
+ dev_warn(&pdev->dev, "Cannot alloc workqueue\n");
+ pnv_php_disable_irq(php_slot);
+ return;
+ }
+
+ /* Clear pending interrupts */
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts);
+ sts |= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts);
+
+ /* Request the interrupt */
+ ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED,
+ php_slot->name, php_slot);
+ if (ret) {
+ pnv_php_disable_irq(php_slot);
+ dev_warn(&pdev->dev, "Error %d enabling IRQ %d\n", ret, irq);
+ return;
+ }
+
+ /* Enable the interrupts */
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl);
+ ctrl |= (PCI_EXP_SLTCTL_HPIE |
+ PCI_EXP_SLTCTL_PDCE |
+ PCI_EXP_SLTCTL_DLLSCE);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl);
+
+ /* The interrupt is initialized successfully when @irq is valid */
+ php_slot->irq = irq;
+}
+
+static void pnv_php_enable_irq(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ int irq, ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_warn(&pdev->dev, "Error %d enabling device\n", ret);
+ return;
+ }
+
+ pci_set_master(pdev);
+
+ /* Enable MSIx interrupt */
+ irq = pnv_php_enable_msix(php_slot);
+ if (irq > 0) {
+ pnv_php_init_irq(php_slot, irq);
+ return;
+ }
+
+ /*
+ * Use MSI if MSIx doesn't work. Fail back to legacy INTx
+ * if MSI doesn't work either
+ */
+ ret = pci_enable_msi(pdev);
+ if (!ret || pdev->irq) {
+ irq = pdev->irq;
+ pnv_php_init_irq(php_slot, irq);
+ }
+}
+
static int pnv_php_register_one(struct device_node *dn)
{
struct pnv_php_slot *php_slot;
- const __be32 *prop32;
+ u32 prop32;
int ret;
/* Check if it's hotpluggable slot */
- prop32 = of_get_property(dn, "ibm,slot-pluggable", NULL);
- if (!prop32 || !of_read_number(prop32, 1))
+ ret = of_property_read_u32(dn, "ibm,slot-pluggable", &prop32);
+ if (ret || !prop32)
return -ENXIO;
- prop32 = of_get_property(dn, "ibm,reset-by-firmware", NULL);
- if (!prop32 || !of_read_number(prop32, 1))
+ ret = of_property_read_u32(dn, "ibm,reset-by-firmware", &prop32);
+ if (ret || !prop32)
return -ENXIO;
php_slot = pnv_php_alloc_slot(dn);
- if (unlikely(!php_slot))
+ if (!php_slot)
return -ENODEV;
ret = pnv_php_register_slot(php_slot);
- if (unlikely(ret))
+ if (ret)
goto free_slot;
ret = pnv_php_enable(php_slot, false);
- if (unlikely(ret))
+ if (ret)
goto unregister_slot;
+ /* Enable interrupt if the slot supports surprise hotplug */
+ ret = of_property_read_u32(dn, "ibm,slot-surprise-pluggable", &prop32);
+ if (!ret && prop32)
+ pnv_php_enable_irq(php_slot);
+
return 0;
unregister_slot:
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 2194b447201d..e30f05c8517f 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -136,7 +136,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
- pci_setup_device(virtfn);
+ rc = pci_setup_device(virtfn);
+ if (rc)
+ goto failed0;
+
virtfn->dev.parent = dev->dev.parent;
virtfn->physfn = pci_dev_get(dev);
virtfn->is_virtfn = 1;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 9a033e8ee9a4..d966d47c9e80 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -452,6 +452,27 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return error;
}
+static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ static const pci_power_t state_conv[] = {
+ [ACPI_STATE_D0] = PCI_D0,
+ [ACPI_STATE_D1] = PCI_D1,
+ [ACPI_STATE_D2] = PCI_D2,
+ [ACPI_STATE_D3_HOT] = PCI_D3hot,
+ [ACPI_STATE_D3_COLD] = PCI_D3cold,
+ };
+ int state;
+
+ if (!adev || !acpi_device_power_manageable(adev))
+ return PCI_UNKNOWN;
+
+ if (acpi_device_get_power(adev, &state) || state == ACPI_STATE_UNKNOWN)
+ return PCI_UNKNOWN;
+
+ return state_conv[state];
+}
+
static bool acpi_pci_can_wakeup(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
@@ -534,6 +555,7 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
+ .get_state = acpi_pci_get_power_state,
.choose_state = acpi_pci_choose_state,
.sleep_wake = acpi_pci_sleep_wake,
.run_wake = acpi_pci_run_wake,
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index e39a67c8ef39..1ccce1cd6aca 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -466,7 +466,6 @@ static void pci_device_shutdown(struct device *dev)
pci_msi_shutdown(pci_dev);
pci_msix_shutdown(pci_dev);
-#ifdef CONFIG_KEXEC_CORE
/*
* If this is a kexec reboot, turn off Bus Master bit on the
* device to tell it to not continue to do DMA. Don't touch
@@ -476,7 +475,6 @@ static void pci_device_shutdown(struct device *dev)
*/
if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
pci_clear_master(pci_dev);
-#endif
}
#ifdef CONFIG_PM
@@ -684,8 +682,19 @@ static int pci_pm_prepare(struct device *dev)
static void pci_pm_complete(struct device *dev)
{
- pci_dev_complete_resume(to_pci_dev(dev));
- pm_complete_with_resume_check(dev);
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+ pci_dev_complete_resume(pci_dev);
+ pm_generic_complete(dev);
+
+ /* Resume device if platform firmware has put it in reset-power-on */
+ if (dev->power.direct_complete && pm_resume_via_firmware()) {
+ pci_power_t pre_sleep_state = pci_dev->current_state;
+
+ pci_update_current_state(pci_dev, pci_dev->current_state);
+ if (pci_dev->current_state < pre_sleep_state)
+ pm_request_resume(dev);
+ }
}
#else /* !CONFIG_PM_SLEEP */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 415956c5c593..ba34907538f6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -576,8 +576,9 @@ static const struct pci_platform_pm_ops *pci_platform_pm;
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
{
- if (!ops->is_manageable || !ops->set_state || !ops->choose_state ||
- !ops->sleep_wake || !ops->run_wake || !ops->need_resume)
+ if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
+ !ops->choose_state || !ops->sleep_wake || !ops->run_wake ||
+ !ops->need_resume)
return -EINVAL;
pci_platform_pm = ops;
return 0;
@@ -594,6 +595,11 @@ static inline int platform_pci_set_power_state(struct pci_dev *dev,
return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
}
+static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
+{
+ return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
+}
+
static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
{
return pci_platform_pm ?
@@ -725,26 +731,25 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
}
/**
- * pci_update_current_state - Read PCI power state of given device from its
- * PCI PM registers and cache it
+ * pci_update_current_state - Read power state of given device and cache it
* @dev: PCI device to handle.
* @state: State to cache in case the device doesn't have the PM capability
+ *
+ * The power state is read from the PMCSR register, which however is
+ * inaccessible in D3cold. The platform firmware is therefore queried first
+ * to detect accessibility of the register. In case the platform firmware
+ * reports an incorrect state or the device isn't power manageable by the
+ * platform at all, we try to detect D3cold by testing accessibility of the
+ * vendor ID in config space.
*/
void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
{
- if (dev->pm_cap) {
+ if (platform_pci_get_power_state(dev) == PCI_D3cold ||
+ !pci_device_is_present(dev)) {
+ dev->current_state = PCI_D3cold;
+ } else if (dev->pm_cap) {
u16 pmcsr;
- /*
- * Configuration space is not accessible for device in
- * D3cold, so just keep or set D3cold for safety
- */
- if (dev->current_state == PCI_D3cold)
- return;
- if (state == PCI_D3cold) {
- dev->current_state = PCI_D3cold;
- return;
- }
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
} else {
@@ -1983,9 +1988,22 @@ static pci_power_t pci_target_state(struct pci_dev *dev)
default:
target_state = state;
}
- } else if (!dev->pm_cap) {
+
+ return target_state;
+ }
+
+ if (!dev->pm_cap)
target_state = PCI_D0;
- } else if (device_may_wakeup(&dev->dev)) {
+
+ /*
+ * If the device is in D3cold even though it's not power-manageable by
+ * the platform, it may have been powered down by non-standard means.
+ * Best to let it slumber.
+ */
+ if (dev->current_state == PCI_D3cold)
+ target_state = PCI_D3cold;
+
+ if (device_may_wakeup(&dev->dev)) {
/*
* Find the deepest state from which the device can generate
* wake-up events, make it the target state and enable device
@@ -4983,6 +5001,13 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
spin_lock(&resource_alignment_lock);
p = resource_alignment_param;
+ if (!*p)
+ goto out;
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
+ goto out;
+ }
+
while (*p) {
count = 0;
if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
@@ -5047,6 +5072,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
}
p++;
}
+out:
spin_unlock(&resource_alignment_lock);
return align;
}
@@ -5065,6 +5091,15 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
resource_size_t align, size;
u16 command;
+ /*
+ * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
+ * 3.4.1.11. Their resources are allocated from the space
+ * described by the VF BARx register in the PF's SR-IOV capability.
+ * We can't influence their alignment here.
+ */
+ if (dev->is_virtfn)
+ return;
+
/* check if specified PCI is target device to reassign */
align = pci_specified_resource_alignment(dev);
if (!align)
@@ -5087,6 +5122,12 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
r = &dev->resource[i];
if (!(r->flags & IORESOURCE_MEM))
continue;
+ if (r->flags & IORESOURCE_PCI_FIXED) {
+ dev_info(&dev->dev, "Ignoring requested alignment for BAR%d: %pR\n",
+ i, r);
+ continue;
+ }
+
size = resource_size(r);
if (size < align) {
size = align;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9730c474b016..451856210e18 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -42,6 +42,8 @@ int pci_probe_reset_function(struct pci_dev *dev);
*
* @set_state: invokes the platform firmware to set the device's power state
*
+ * @get_state: queries the platform firmware for a device's current power state
+ *
* @choose_state: returns PCI power state of given device preferred by the
* platform; to be used during system-wide transitions from a
* sleeping state to the working state and vice versa
@@ -62,6 +64,7 @@ int pci_probe_reset_function(struct pci_dev *dev);
struct pci_platform_pm_ops {
bool (*is_manageable)(struct pci_dev *dev);
int (*set_state)(struct pci_dev *dev, pci_power_t state);
+ pci_power_t (*get_state)(struct pci_dev *dev);
pci_power_t (*choose_state)(struct pci_dev *dev);
int (*sleep_wake)(struct pci_dev *dev, bool enable);
int (*run_wake)(struct pci_dev *dev, bool enable);
@@ -332,6 +335,12 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
void pci_enable_acs(struct pci_dev *dev);
+#ifdef CONFIG_PCIE_PTM
+void pci_ptm_init(struct pci_dev *dev);
+#else
+static inline void pci_ptm_init(struct pci_dev *dev) { }
+#endif
+
struct pci_dev_reset_methods {
u16 vendor;
u16 device;
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 7fcea75afa4c..7ce77635e5ad 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -92,3 +92,14 @@ config PCIE_DPC
will be handled by the DPC driver. If your system doesn't
have this capability or you do not want to use this feature,
it is safe to answer N.
+
+config PCIE_PTM
+ bool "PCIe Precision Time Measurement support"
+ default n
+ depends on PCIEPORTBUS
+ help
+ This enables PCI Express Precision Time Measurement (PTM)
+ support.
+
+ This is only useful if you have devices that support PTM, but it
+ is safe to enable even if you don't.
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index b24525b3dec1..36e35ea8fde7 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_PCIEAER) += aer/
obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += pcie-dpc.o
+obj-$(CONFIG_PCIE_PTM) += ptm.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 48d21e0edd56..139150b2bdfd 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -15,7 +15,6 @@
*
*/
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/sched.h>
@@ -37,9 +36,6 @@
#define DRIVER_VERSION "v1.0"
#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
#define DRIVER_DESC "Root Port Advanced Error Reporting Driver"
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
static int aer_probe(struct pcie_device *dev);
static void aer_remove(struct pcie_device *dev);
@@ -70,7 +66,7 @@ static int pcie_aer_disable;
void pci_no_aer(void)
{
- pcie_aer_disable = 1; /* has priority over 'forceload' */
+ pcie_aer_disable = 1;
}
bool pci_aer_available(void)
@@ -134,7 +130,7 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
SYSTEM_ERROR_INTR_ON_MESG_MASK);
- aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ aer_pos = pdev->aer_cap;
/* Clear error status */
pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
@@ -173,7 +169,7 @@ static void aer_disable_rootport(struct aer_rpc *rpc)
*/
set_downstream_devices_error_reporting(pdev, false);
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ pos = pdev->aer_cap;
/* Disable Root's interrupt in response to error messages */
pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
@@ -200,7 +196,7 @@ irqreturn_t aer_irq(int irq, void *context)
unsigned long flags;
int pos;
- pos = pci_find_ext_capability(pdev->port, PCI_EXT_CAP_ID_ERR);
+ pos = pdev->port->aer_cap;
/*
* Must lock access to Root Error Status Reg, Root Error ID Reg,
* and Root error producer/consumer index
@@ -294,7 +290,6 @@ static void aer_remove(struct pcie_device *dev)
/**
* aer_probe - initialize resources
* @dev: pointer to the pcie_dev data structure
- * @id: pointer to the service id data structure
*
* Invoked when PCI Express bus loads AER service driver.
*/
@@ -304,11 +299,6 @@ static int aer_probe(struct pcie_device *dev)
struct aer_rpc *rpc;
struct device *device = &dev->device;
- /* Init */
- status = aer_init(dev);
- if (status)
- return status;
-
/* Alloc rpc data structure */
rpc = aer_alloc_rpc(dev);
if (!rpc) {
@@ -343,7 +333,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
u32 reg32;
int pos;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
/* Disable Root's interrupt in response to error messages */
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
@@ -396,7 +386,7 @@ static void aer_error_resume(struct pci_dev *dev)
pcie_capability_write_word(dev, PCI_EXP_DEVSTA, reg16);
/* Clean AER Root Error Status */
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
if (dev->error_state == pci_channel_io_normal)
@@ -417,16 +407,4 @@ static int __init aer_service_init(void)
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
-
-/**
- * aer_service_exit - unregister AER root service driver
- *
- * Invoked when AER root service driver is unloaded.
- */
-static void __exit aer_service_exit(void)
-{
- pcie_port_service_unregister(&aerdriver);
-}
-
-module_init(aer_service_init);
-module_exit(aer_service_exit);
+device_initcall(aer_service_init);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 945c939a86c5..d51e4a57b190 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -60,6 +60,7 @@ struct aer_rpc {
struct pcie_device *rpd; /* Root Port device */
struct work_struct dpc_handler;
struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
+ struct aer_err_info e_info;
unsigned short prod_idx; /* Error Producer Index */
unsigned short cons_idx; /* Error Consumer Index */
int isr;
@@ -105,7 +106,6 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
}
extern struct bus_type pcie_port_bus_type;
-int aer_init(struct pcie_device *dev);
void aer_isr(struct work_struct *work);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
@@ -121,11 +121,4 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
return 0;
}
#endif
-
-static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
- int enable)
-{
- pci_dev->__aer_firmware_first = !!enable;
- pci_dev->__aer_firmware_first_valid = 1;
-}
#endif /* _AERDRV_H_ */
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 521e39c1b66d..b1303b32053f 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -27,11 +27,6 @@
#include <linux/kfifo.h>
#include "aerdrv.h"
-static bool forceload;
-static bool nosourceid;
-module_param(forceload, bool, 0);
-module_param(nosourceid, bool, 0);
-
#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
@@ -40,7 +35,7 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
if (pcie_aer_get_firmware_first(dev))
return -EIO;
- if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
+ if (!dev->aer_cap)
return -EIO;
return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
@@ -62,7 +57,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
int pos;
u32 status;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
if (!pos)
return -EIO;
@@ -83,7 +78,7 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return -ENODEV;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
if (!pos)
return -EIO;
@@ -102,6 +97,12 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
return 0;
}
+int pci_aer_init(struct pci_dev *dev)
+{
+ dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ return pci_cleanup_aer_error_status_regs(dev);
+}
+
/**
* add_error_device - list device to be handled
* @e_info: pointer to error info
@@ -132,7 +133,8 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
* When bus id is equal to 0, it might be a bad id
* reported by root port.
*/
- if (!nosourceid && (PCI_BUS_NUM(e_info->id) != 0)) {
+ if ((PCI_BUS_NUM(e_info->id) != 0) &&
+ !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
/* Device ID match? */
if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
return true;
@@ -144,10 +146,10 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
/*
* When either
- * 1) nosourceid==y;
- * 2) bus id is equal to 0. Some ports might lose the bus
+ * 1) bus id is equal to 0. Some ports might lose the bus
* id of error source id;
- * 3) There are multiple errors and prior id comparing fails;
+ * 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
+ * 3) There are multiple errors and prior ID comparing fails;
* We check AER status registers to find possible reporter.
*/
if (atomic_read(&dev->enable_cnt) == 0)
@@ -158,7 +160,7 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
if (!(reg16 & PCI_EXP_AER_FLAGS))
return false;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
if (!pos)
return false;
@@ -555,7 +557,7 @@ static void handle_error_source(struct pcie_device *aerdev,
* Correctable error does not need software intervention.
* No need to go through error recovery process.
*/
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
@@ -647,7 +649,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
info->status = 0;
info->tlp_header_valid = 0;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
/* The device might not support AER */
if (!pos)
@@ -715,15 +717,8 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
static void aer_isr_one_error(struct pcie_device *p_device,
struct aer_err_source *e_src)
{
- struct aer_err_info *e_info;
-
- /* struct aer_err_info might be big, so we allocate it with slab */
- e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
- if (!e_info) {
- dev_printk(KERN_DEBUG, &p_device->port->dev,
- "Can't allocate mem when processing AER errors\n");
- return;
- }
+ struct aer_rpc *rpc = get_service_data(p_device);
+ struct aer_err_info *e_info = &rpc->e_info;
/*
* There is a possibility that both correctable error and
@@ -762,8 +757,6 @@ static void aer_isr_one_error(struct pcie_device *p_device,
if (find_source_device(p_device->port, e_info))
aer_process_err_devices(p_device, e_info);
}
-
- kfree(e_info);
}
/**
@@ -812,19 +805,3 @@ void aer_isr(struct work_struct *work)
aer_isr_one_error(p_device, &e_src);
mutex_unlock(&rpc->rpc_mutex);
}
-
-/**
- * aer_init - provide AER initialization
- * @dev: pointer to AER pcie device
- *
- * Invoked when AER service driver is loaded.
- */
-int aer_init(struct pcie_device *dev)
-{
- if (forceload) {
- dev_printk(KERN_DEBUG, &dev->device,
- "aerdrv forceload requested.\n");
- pcie_aer_force_firmware_first(dev->port, 0);
- }
- return 0;
-}
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 167fe411ce2e..54c4b691e51f 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -219,15 +219,13 @@ int cper_severity_to_aer(int cper_severity)
}
EXPORT_SYMBOL_GPL(cper_severity_to_aer);
-void cper_print_aer(struct pci_dev *dev, int cper_severity,
+void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer)
{
- int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
+ int layer, agent, status_strs_size, tlp_header_valid = 0;
u32 status, mask;
const char **status_strs;
- aer_severity = cper_severity_to_aer(cper_severity);
-
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index 250f87861786..9811b14d9ad8 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -1,5 +1,7 @@
/*
* PCI Express Downstream Port Containment services driver
+ * Author: Keith Busch <keith.busch@intel.com>
+ *
* Copyright (C) 2016 Intel Corp.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -9,7 +11,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pcieport_if.h>
@@ -143,16 +145,4 @@ static int __init dpc_service_init(void)
{
return pcie_port_service_register(&dpcdriver);
}
-
-static void __exit dpc_service_exit(void)
-{
- pcie_port_service_unregister(&dpcdriver);
-}
-
-MODULE_DESCRIPTION("PCI Express Downstream Port Containment driver");
-MODULE_AUTHOR("Keith Busch <keith.busch@intel.com>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.1");
-
-module_init(dpc_service_init);
-module_exit(dpc_service_exit);
+device_initcall(dpc_service_init);
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 1ae4c73e7a3c..884bad5320f8 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -10,7 +10,6 @@
* for more details.
*/
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -449,17 +448,6 @@ static int pcie_pme_resume(struct pcie_device *srv)
return 0;
}
-/**
- * pcie_pme_remove - Prepare PCIe PME service device for removal.
- * @srv - PCIe service device to remove.
- */
-static void pcie_pme_remove(struct pcie_device *srv)
-{
- pcie_pme_suspend(srv);
- free_irq(srv->irq, srv);
- kfree(get_service_data(srv));
-}
-
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
@@ -468,7 +456,6 @@ static struct pcie_port_service_driver pcie_pme_driver = {
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
.resume = pcie_pme_resume,
- .remove = pcie_pme_remove,
};
/**
@@ -478,5 +465,4 @@ static int __init pcie_pme_service_init(void)
{
return pcie_port_service_register(&pcie_pme_driver);
}
-
-module_init(pcie_pme_service_init);
+device_initcall(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 70d7ad8c6d17..79327cc14e7d 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -1,12 +1,13 @@
/*
* File: portdrv_pci.c
* Purpose: PCI Express Port Bus Driver
+ * Author: Tom Nguyen <tom.l.nguyen@intel.com>
+ * Version: v1.0
*
* Copyright (C) 2004 Intel
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
*/
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -21,16 +22,6 @@
#include "portdrv.h"
#include "aer/aerdrv.h"
-/*
- * Version Information
- */
-#define DRIVER_VERSION "v1.0"
-#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
-#define DRIVER_DESC "PCIe Port Bus Driver"
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-
/* If this switch is set, PCIe port native services should not be enabled. */
bool pcie_ports_disabled;
@@ -341,7 +332,6 @@ static const struct pci_device_id port_pci_ids[] = { {
PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0),
}, { /* end: all zeroes */ }
};
-MODULE_DEVICE_TABLE(pci, port_pci_ids);
static const struct pci_error_handlers pcie_portdrv_err_handler = {
.error_detected = pcie_portdrv_error_detected,
@@ -406,5 +396,4 @@ static int __init pcie_portdrv_init(void)
out:
return retval;
}
-
-module_init(pcie_portdrv_init);
+device_initcall(pcie_portdrv_init);
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
new file mode 100644
index 000000000000..bab8ac63c4f3
--- /dev/null
+++ b/drivers/pci/pcie/ptm.c
@@ -0,0 +1,142 @@
+/*
+ * PCI Express Precision Time Measurement
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include "../pci.h"
+
+static void pci_ptm_info(struct pci_dev *dev)
+{
+ char clock_desc[8];
+
+ switch (dev->ptm_granularity) {
+ case 0:
+ snprintf(clock_desc, sizeof(clock_desc), "unknown");
+ break;
+ case 255:
+ snprintf(clock_desc, sizeof(clock_desc), ">254ns");
+ break;
+ default:
+ snprintf(clock_desc, sizeof(clock_desc), "%udns",
+ dev->ptm_granularity);
+ break;
+ }
+ dev_info(&dev->dev, "PTM enabled%s, %s granularity\n",
+ dev->ptm_root ? " (root)" : "", clock_desc);
+}
+
+void pci_ptm_init(struct pci_dev *dev)
+{
+ int pos;
+ u32 cap, ctrl;
+ u8 local_clock;
+ struct pci_dev *ups;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!pos)
+ return;
+
+ /*
+ * Enable PTM only on interior devices (root ports, switch ports,
+ * etc.) on the assumption that it causes no link traffic until an
+ * endpoint enables it.
+ */
+ if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
+ return;
+
+ pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
+ local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
+
+ /*
+ * There's no point in enabling PTM unless it's enabled in the
+ * upstream device or this device can be a PTM Root itself. Per
+ * the spec recommendation (PCIe r3.1, sec 7.32.3), select the
+ * furthest upstream Time Source as the PTM Root.
+ */
+ ups = pci_upstream_bridge(dev);
+ if (ups && ups->ptm_enabled) {
+ ctrl = PCI_PTM_CTRL_ENABLE;
+ if (ups->ptm_granularity == 0)
+ dev->ptm_granularity = 0;
+ else if (ups->ptm_granularity > local_clock)
+ dev->ptm_granularity = ups->ptm_granularity;
+ } else {
+ if (cap & PCI_PTM_CAP_ROOT) {
+ ctrl = PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT;
+ dev->ptm_root = 1;
+ dev->ptm_granularity = local_clock;
+ } else
+ return;
+ }
+
+ ctrl |= dev->ptm_granularity << 8;
+ pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
+ dev->ptm_enabled = 1;
+
+ pci_ptm_info(dev);
+}
+
+int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
+{
+ int pos;
+ u32 cap, ctrl;
+ struct pci_dev *ups;
+
+ if (!pci_is_pcie(dev))
+ return -EINVAL;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
+ if (!(cap & PCI_PTM_CAP_REQ))
+ return -EINVAL;
+
+ /*
+ * For a PCIe Endpoint, PTM is only useful if the endpoint can
+ * issue PTM requests to upstream devices that have PTM enabled.
+ *
+ * For Root Complex Integrated Endpoints, there is no upstream
+ * device, so there must be some implementation-specific way to
+ * associate the endpoint with a time source.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT) {
+ ups = pci_upstream_bridge(dev);
+ if (!ups || !ups->ptm_enabled)
+ return -EINVAL;
+
+ dev->ptm_granularity = ups->ptm_granularity;
+ } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
+ dev->ptm_granularity = 0;
+ } else
+ return -EINVAL;
+
+ ctrl = PCI_PTM_CTRL_ENABLE;
+ ctrl |= dev->ptm_granularity << 8;
+ pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
+ dev->ptm_enabled = 1;
+
+ pci_ptm_info(dev);
+
+ if (granularity)
+ *granularity = dev->ptm_granularity;
+ return 0;
+}
+EXPORT_SYMBOL(pci_enable_ptm);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 93f280df3428..ab002671fa60 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1666,7 +1666,11 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Enable ACS P2P upstream forwarding */
pci_enable_acs(dev);
- pci_cleanup_aer_error_status_regs(dev);
+ /* Precision Time Measurement */
+ pci_ptm_init(dev);
+
+ /* Advanced Error Reporting */
+ pci_aer_init(dev);
}
/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 44e0ff37480b..cffc1c095519 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3198,6 +3198,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
static void quirk_no_pm_reset(struct pci_dev *dev)
{
@@ -4431,3 +4432,20 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
+
+/*
+ * VMD-enabled root ports will change the source ID for all messages
+ * to the VMD device. Rather than doing device matching with the source
+ * ID, the AER driver should traverse the child device tree, reading
+ * AER registers to find the faulting device.
+ */
+static void quirk_no_aersid(struct pci_dev *pdev)
+{
+ /* VMD Domain */
+ if (pdev->bus->sysdata && pci_domain_nr(pdev->bus) >= 0x10000)
+ pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 04e2653bb8c0..4d5c5f9f0dbd 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -12,4 +12,11 @@ config ARM_PMU
Say y if you want to use CPU performance monitors on ARM-based
systems.
+config XGENE_PMU
+ depends on PERF_EVENTS && ARCH_XGENE
+ bool "APM X-Gene SoC PMU"
+ default n
+ help
+ Say y if you want to use APM X-Gene SoC performance monitors.
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index acd2397ded94..b116e982810b 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_ARM_PMU) += arm_pmu.o
+obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
new file mode 100644
index 000000000000..c2ac7646b99f
--- /dev/null
+++ b/drivers/perf/xgene_pmu.c
@@ -0,0 +1,1398 @@
+/*
+ * APM X-Gene SoC PMU (Performance Monitor Unit)
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Author: Hoan Tran <hotran@apm.com>
+ * Tai Nguyen <ttnguyen@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define CSW_CSWCR 0x0000
+#define CSW_CSWCR_DUALMCB_MASK BIT(0)
+#define MCBADDRMR 0x0000
+#define MCBADDRMR_DUALMCU_MODE_MASK BIT(2)
+
+#define PCPPMU_INTSTATUS_REG 0x000
+#define PCPPMU_INTMASK_REG 0x004
+#define PCPPMU_INTMASK 0x0000000F
+#define PCPPMU_INTENMASK 0xFFFFFFFF
+#define PCPPMU_INTCLRMASK 0xFFFFFFF0
+#define PCPPMU_INT_MCU BIT(0)
+#define PCPPMU_INT_MCB BIT(1)
+#define PCPPMU_INT_L3C BIT(2)
+#define PCPPMU_INT_IOB BIT(3)
+
+#define PMU_MAX_COUNTERS 4
+#define PMU_CNT_MAX_PERIOD 0x100000000ULL
+#define PMU_OVERFLOW_MASK 0xF
+#define PMU_PMCR_E BIT(0)
+#define PMU_PMCR_P BIT(1)
+
+#define PMU_PMEVCNTR0 0x000
+#define PMU_PMEVCNTR1 0x004
+#define PMU_PMEVCNTR2 0x008
+#define PMU_PMEVCNTR3 0x00C
+#define PMU_PMEVTYPER0 0x400
+#define PMU_PMEVTYPER1 0x404
+#define PMU_PMEVTYPER2 0x408
+#define PMU_PMEVTYPER3 0x40C
+#define PMU_PMAMR0 0xA00
+#define PMU_PMAMR1 0xA04
+#define PMU_PMCNTENSET 0xC00
+#define PMU_PMCNTENCLR 0xC20
+#define PMU_PMINTENSET 0xC40
+#define PMU_PMINTENCLR 0xC60
+#define PMU_PMOVSR 0xC80
+#define PMU_PMCR 0xE04
+
+#define to_pmu_dev(p) container_of(p, struct xgene_pmu_dev, pmu)
+#define GET_CNTR(ev) (ev->hw.idx)
+#define GET_EVENTID(ev) (ev->hw.config & 0xFFULL)
+#define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL)
+#define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL)
+
+struct hw_pmu_info {
+ u32 type;
+ u32 enable_mask;
+ void __iomem *csr;
+};
+
+struct xgene_pmu_dev {
+ struct hw_pmu_info *inf;
+ struct xgene_pmu *parent;
+ struct pmu pmu;
+ u8 max_counters;
+ DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS);
+ u64 max_period;
+ const struct attribute_group **attr_groups;
+ struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS];
+};
+
+struct xgene_pmu {
+ struct device *dev;
+ int version;
+ void __iomem *pcppmu_csr;
+ u32 mcb_active_mask;
+ u32 mc_active_mask;
+ cpumask_t cpu;
+ raw_spinlock_t lock;
+ struct list_head l3cpmus;
+ struct list_head iobpmus;
+ struct list_head mcbpmus;
+ struct list_head mcpmus;
+};
+
+struct xgene_pmu_dev_ctx {
+ char *name;
+ struct list_head next;
+ struct xgene_pmu_dev *pmu_dev;
+ struct hw_pmu_info inf;
+};
+
+struct xgene_pmu_data {
+ int id;
+ u32 data;
+};
+
+enum xgene_pmu_version {
+ PCP_PMU_V1 = 1,
+ PCP_PMU_V2,
+};
+
+enum xgene_pmu_dev_type {
+ PMU_TYPE_L3C = 0,
+ PMU_TYPE_IOB,
+ PMU_TYPE_MCB,
+ PMU_TYPE_MC,
+};
+
+/*
+ * sysfs format attributes
+ */
+static ssize_t xgene_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "%s\n", (char *) eattr->var);
+}
+
+#define XGENE_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \
+ .var = (void *) _config, } \
+ })[0].attr.attr)
+
+static struct attribute *l3c_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"),
+ XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"),
+ NULL,
+};
+
+static struct attribute *iob_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"),
+ XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"),
+ NULL,
+};
+
+static struct attribute *mcb_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"),
+ XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"),
+ NULL,
+};
+
+static struct attribute *mc_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"),
+ NULL,
+};
+
+static const struct attribute_group l3c_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = l3c_pmu_format_attrs,
+};
+
+static const struct attribute_group iob_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = iob_pmu_format_attrs,
+};
+
+static const struct attribute_group mcb_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = mcb_pmu_format_attrs,
+};
+
+static const struct attribute_group mc_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = mc_pmu_format_attrs,
+};
+
+/*
+ * sysfs event attributes
+ */
+static ssize_t xgene_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var);
+}
+
+#define XGENE_PMU_EVENT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \
+ .var = (void *) _config, } \
+ })[0].attr.attr)
+
+static struct attribute *l3c_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(read-hit, 0x02),
+ XGENE_PMU_EVENT_ATTR(read-miss, 0x03),
+ XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06),
+ XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07),
+ XGENE_PMU_EVENT_ATTR(tq-full, 0x08),
+ XGENE_PMU_EVENT_ATTR(ackq-full, 0x09),
+ XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a),
+ XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b),
+ XGENE_PMU_EVENT_ATTR(odb-full, 0x0c),
+ XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d),
+ XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e),
+ XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f),
+ NULL,
+};
+
+static struct attribute *iob_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(axi0-read, 0x02),
+ XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03),
+ XGENE_PMU_EVENT_ATTR(axi1-read, 0x04),
+ XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05),
+ XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06),
+ XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07),
+ XGENE_PMU_EVENT_ATTR(axi0-write, 0x10),
+ XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11),
+ XGENE_PMU_EVENT_ATTR(axi1-write, 0x13),
+ XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14),
+ XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16),
+ NULL,
+};
+
+static struct attribute *mcb_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(csw-read, 0x02),
+ XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03),
+ XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04),
+ XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05),
+ NULL,
+};
+
+static struct attribute *mc_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02),
+ XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03),
+ XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04),
+ XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05),
+ XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06),
+ XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07),
+ XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08),
+ XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09),
+ XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a),
+ XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b),
+ XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c),
+ XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d),
+ XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e),
+ XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f),
+ XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10),
+ XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11),
+ XGENE_PMU_EVENT_ATTR(mcu-request, 0x12),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13),
+ XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14),
+ XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a),
+ XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b),
+ XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c),
+ NULL,
+};
+
+static const struct attribute_group l3c_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = l3c_pmu_events_attrs,
+};
+
+static const struct attribute_group iob_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = iob_pmu_events_attrs,
+};
+
+static const struct attribute_group mcb_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = mcb_pmu_events_attrs,
+};
+
+static const struct attribute_group mc_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = mc_pmu_events_attrs,
+};
+
+/*
+ * sysfs cpumask attributes
+ */
+static ssize_t xgene_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
+}
+
+static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL);
+
+static struct attribute *xgene_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group pmu_cpumask_attr_group = {
+ .attrs = xgene_pmu_cpumask_attrs,
+};
+
+/*
+ * Per PMU device attribute groups
+ */
+static const struct attribute_group *l3c_pmu_attr_groups[] = {
+ &l3c_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &l3c_pmu_events_attr_group,
+ NULL
+};
+
+static const struct attribute_group *iob_pmu_attr_groups[] = {
+ &iob_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &iob_pmu_events_attr_group,
+ NULL
+};
+
+static const struct attribute_group *mcb_pmu_attr_groups[] = {
+ &mcb_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &mcb_pmu_events_attr_group,
+ NULL
+};
+
+static const struct attribute_group *mc_pmu_attr_groups[] = {
+ &mc_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &mc_pmu_events_attr_group,
+ NULL
+};
+
+static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev)
+{
+ int cntr;
+
+ cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask,
+ pmu_dev->max_counters);
+ if (cntr == pmu_dev->max_counters)
+ return -ENOSPC;
+ set_bit(cntr, pmu_dev->cntr_assign_mask);
+
+ return cntr;
+}
+
+static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr)
+{
+ clear_bit(cntr, pmu_dev->cntr_assign_mask);
+}
+
+static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu)
+{
+ writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
+}
+
+static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu)
+{
+ writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
+}
+
+static inline u32 xgene_pmu_read_counter(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
+}
+
+static inline void
+xgene_pmu_write_counter(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
+}
+
+static inline void
+xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx));
+}
+
+static inline void
+xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMAMR0);
+}
+
+static inline void
+xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMAMR1);
+}
+
+static inline void
+xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET);
+}
+
+static inline void
+xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR);
+}
+
+static inline void
+xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMINTENSET);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMINTENSET);
+}
+
+static inline void
+xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR);
+}
+
+static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCR);
+ val |= PMU_PMCR_P;
+ writel(val, pmu_dev->inf->csr + PMU_PMCR);
+}
+
+static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCR);
+ val |= PMU_PMCR_E;
+ writel(val, pmu_dev->inf->csr + PMU_PMCR);
+}
+
+static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCR);
+ val &= ~PMU_PMCR_E;
+ writel(val, pmu_dev->inf->csr + PMU_PMCR);
+}
+
+static void xgene_perf_pmu_enable(struct pmu *pmu)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
+ int enabled = bitmap_weight(pmu_dev->cntr_assign_mask,
+ pmu_dev->max_counters);
+
+ if (!enabled)
+ return;
+
+ xgene_pmu_start_counters(pmu_dev);
+}
+
+static void xgene_perf_pmu_disable(struct pmu *pmu)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
+
+ xgene_pmu_stop_counters(pmu_dev);
+}
+
+static int xgene_perf_event_init(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct perf_event *sibling;
+
+ /* Test the event attr type check for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * SOC PMU counters are shared across all cores.
+ * Therefore, it does not support per-process mode.
+ * Also, it does not support event sampling mode.
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ /* SOC counters do not have usr/os/guest/host bits */
+ if (event->attr.exclude_user || event->attr.exclude_kernel ||
+ event->attr.exclude_host || event->attr.exclude_guest)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+ /*
+ * Many perf core operations (eg. events rotation) operate on a
+ * single CPU context. This is obvious for CPU PMUs, where one
+ * expects the same sets of events being observed on all CPUs,
+ * but can lead to issues for off-core PMUs, where each
+ * event could be theoretically assigned to a different CPU. To
+ * mitigate this, we enforce CPU assignment to one, selected
+ * processor (the one described in the "cpumask" attribute).
+ */
+ event->cpu = cpumask_first(&pmu_dev->parent->cpu);
+
+ hw->config = event->attr.config;
+ /*
+ * Each bit of the config1 field represents an agent from which the
+ * request of the event come. The event is counted only if it's caused
+ * by a request of an agent has the bit cleared.
+ * By default, the event is counted for all agents.
+ */
+ hw->config_base = event->attr.config1;
+
+ /*
+ * We must NOT create groups containing mixed PMUs, although software
+ * events are acceptable
+ */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ list_for_each_entry(sibling, &event->group_leader->sibling_list,
+ group_entry)
+ if (sibling->pmu != event->pmu &&
+ !is_software_event(sibling))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void xgene_perf_enable_event(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+
+ xgene_pmu_write_evttype(pmu_dev, GET_CNTR(event), GET_EVENTID(event));
+ xgene_pmu_write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
+ if (pmu_dev->inf->type == PMU_TYPE_IOB)
+ xgene_pmu_write_agent1msk(pmu_dev, ~((u32)GET_AGENT1ID(event)));
+
+ xgene_pmu_enable_counter(pmu_dev, GET_CNTR(event));
+ xgene_pmu_enable_counter_int(pmu_dev, GET_CNTR(event));
+}
+
+static void xgene_perf_disable_event(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+
+ xgene_pmu_disable_counter(pmu_dev, GET_CNTR(event));
+ xgene_pmu_disable_counter_int(pmu_dev, GET_CNTR(event));
+}
+
+static void xgene_perf_event_set_period(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ /*
+ * The X-Gene PMU counters have a period of 2^32. To account for the
+ * possiblity of extreme interrupt latency we program for a period of
+ * half that. Hopefully we can handle the interrupt before another 2^31
+ * events occur and the counter overtakes its previous value.
+ */
+ u64 val = 1ULL << 31;
+
+ local64_set(&hw->prev_count, val);
+ xgene_pmu_write_counter(pmu_dev, hw->idx, (u32) val);
+}
+
+static void xgene_perf_event_update(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u64 delta, prev_raw_count, new_raw_count;
+
+again:
+ prev_raw_count = local64_read(&hw->prev_count);
+ new_raw_count = xgene_pmu_read_counter(pmu_dev, GET_CNTR(event));
+
+ if (local64_cmpxchg(&hw->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period;
+
+ local64_add(delta, &event->count);
+}
+
+static void xgene_perf_read(struct perf_event *event)
+{
+ xgene_perf_event_update(event);
+}
+
+static void xgene_perf_start(struct perf_event *event, int flags)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED)))
+ return;
+
+ WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
+ hw->state = 0;
+
+ xgene_perf_event_set_period(event);
+
+ if (flags & PERF_EF_RELOAD) {
+ u64 prev_raw_count = local64_read(&hw->prev_count);
+
+ xgene_pmu_write_counter(pmu_dev, GET_CNTR(event),
+ (u32) prev_raw_count);
+ }
+
+ xgene_perf_enable_event(event);
+ perf_event_update_userpage(event);
+}
+
+static void xgene_perf_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hw = &event->hw;
+ u64 config;
+
+ if (hw->state & PERF_HES_UPTODATE)
+ return;
+
+ xgene_perf_disable_event(event);
+ WARN_ON_ONCE(hw->state & PERF_HES_STOPPED);
+ hw->state |= PERF_HES_STOPPED;
+
+ if (hw->state & PERF_HES_UPTODATE)
+ return;
+
+ config = hw->config;
+ xgene_perf_read(event);
+ hw->state |= PERF_HES_UPTODATE;
+}
+
+static int xgene_perf_add(struct perf_event *event, int flags)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ /* Allocate an event counter */
+ hw->idx = get_next_avail_cntr(pmu_dev);
+ if (hw->idx < 0)
+ return -EAGAIN;
+
+ /* Update counter event pointer for Interrupt handler */
+ pmu_dev->pmu_counter_event[hw->idx] = event;
+
+ if (flags & PERF_EF_START)
+ xgene_perf_start(event, PERF_EF_RELOAD);
+
+ return 0;
+}
+
+static void xgene_perf_del(struct perf_event *event, int flags)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ xgene_perf_stop(event, PERF_EF_UPDATE);
+
+ /* clear the assigned counter */
+ clear_avail_cntr(pmu_dev, GET_CNTR(event));
+
+ perf_event_update_userpage(event);
+ pmu_dev->pmu_counter_event[hw->idx] = NULL;
+}
+
+static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
+{
+ struct xgene_pmu *xgene_pmu;
+
+ pmu_dev->max_period = PMU_CNT_MAX_PERIOD - 1;
+ /* First version PMU supports only single event counter */
+ xgene_pmu = pmu_dev->parent;
+ if (xgene_pmu->version == PCP_PMU_V1)
+ pmu_dev->max_counters = 1;
+ else
+ pmu_dev->max_counters = PMU_MAX_COUNTERS;
+
+ /* Perf driver registration */
+ pmu_dev->pmu = (struct pmu) {
+ .attr_groups = pmu_dev->attr_groups,
+ .task_ctx_nr = perf_invalid_context,
+ .pmu_enable = xgene_perf_pmu_enable,
+ .pmu_disable = xgene_perf_pmu_disable,
+ .event_init = xgene_perf_event_init,
+ .add = xgene_perf_add,
+ .del = xgene_perf_del,
+ .start = xgene_perf_start,
+ .stop = xgene_perf_stop,
+ .read = xgene_perf_read,
+ };
+
+ /* Hardware counter init */
+ xgene_pmu_stop_counters(pmu_dev);
+ xgene_pmu_reset_counters(pmu_dev);
+
+ return perf_pmu_register(&pmu_dev->pmu, name, -1);
+}
+
+static int
+xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
+{
+ struct device *dev = xgene_pmu->dev;
+ struct xgene_pmu_dev *pmu;
+ int rc;
+
+ pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+ pmu->parent = xgene_pmu;
+ pmu->inf = &ctx->inf;
+ ctx->pmu_dev = pmu;
+
+ switch (pmu->inf->type) {
+ case PMU_TYPE_L3C:
+ pmu->attr_groups = l3c_pmu_attr_groups;
+ break;
+ case PMU_TYPE_IOB:
+ pmu->attr_groups = iob_pmu_attr_groups;
+ break;
+ case PMU_TYPE_MCB:
+ if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
+ goto dev_err;
+ pmu->attr_groups = mcb_pmu_attr_groups;
+ break;
+ case PMU_TYPE_MC:
+ if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
+ goto dev_err;
+ pmu->attr_groups = mc_pmu_attr_groups;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = xgene_init_perf(pmu, ctx->name);
+ if (rc) {
+ dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name);
+ goto dev_err;
+ }
+
+ dev_info(dev, "%s PMU registered\n", ctx->name);
+
+ return rc;
+
+dev_err:
+ devm_kfree(dev, pmu);
+ return -ENODEV;
+}
+
+static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev)
+{
+ struct xgene_pmu *xgene_pmu = pmu_dev->parent;
+ u32 pmovsr;
+ int idx;
+
+ pmovsr = readl(pmu_dev->inf->csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK;
+ if (!pmovsr)
+ return;
+
+ /* Clear interrupt flag */
+ if (xgene_pmu->version == PCP_PMU_V1)
+ writel(0x0, pmu_dev->inf->csr + PMU_PMOVSR);
+ else
+ writel(pmovsr, pmu_dev->inf->csr + PMU_PMOVSR);
+
+ for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) {
+ struct perf_event *event = pmu_dev->pmu_counter_event[idx];
+ int overflowed = pmovsr & BIT(idx);
+
+ /* Ignore if we don't have an event. */
+ if (!event || !overflowed)
+ continue;
+ xgene_perf_event_update(event);
+ xgene_perf_event_set_period(event);
+ }
+}
+
+static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
+{
+ struct xgene_pmu_dev_ctx *ctx;
+ struct xgene_pmu *xgene_pmu = dev_id;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&xgene_pmu->lock, flags);
+
+ /* Get Interrupt PMU source */
+ val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG);
+ if (val & PCPPMU_INT_MCU) {
+ list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+ if (val & PCPPMU_INT_MCB) {
+ list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+ if (val & PCPPMU_INT_L3C) {
+ list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+ if (val & PCPPMU_INT_IOB) {
+ list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&xgene_pmu->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int acpi_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
+ struct resource *res;
+ unsigned int reg;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ csw_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(csw_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
+ return PTR_ERR(csw_csr);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ mcba_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcba_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
+ return PTR_ERR(mcba_csr);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ mcbb_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcbb_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
+ return PTR_ERR(mcbb_csr);
+ }
+
+ reg = readl(csw_csr + CSW_CSWCR);
+ if (reg & CSW_CSWCR_DUALMCB_MASK) {
+ /* Dual MCB active */
+ xgene_pmu->mcb_active_mask = 0x3;
+ /* Probe all active MC(s) */
+ reg = readl(mcbb_csr + CSW_CSWCR);
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
+ } else {
+ /* Single MCB active */
+ xgene_pmu->mcb_active_mask = 0x1;
+ /* Probe all active MC(s) */
+ reg = readl(mcba_csr + CSW_CSWCR);
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
+ }
+
+ return 0;
+}
+
+static int fdt_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ struct regmap *csw_map, *mcba_map, *mcbb_map;
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int reg;
+
+ csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw");
+ if (IS_ERR(csw_map)) {
+ dev_err(&pdev->dev, "unable to get syscon regmap csw\n");
+ return PTR_ERR(csw_map);
+ }
+
+ mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba");
+ if (IS_ERR(mcba_map)) {
+ dev_err(&pdev->dev, "unable to get syscon regmap mcba\n");
+ return PTR_ERR(mcba_map);
+ }
+
+ mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb");
+ if (IS_ERR(mcbb_map)) {
+ dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n");
+ return PTR_ERR(mcbb_map);
+ }
+
+ if (regmap_read(csw_map, CSW_CSWCR, &reg))
+ return -EINVAL;
+
+ if (reg & CSW_CSWCR_DUALMCB_MASK) {
+ /* Dual MCB active */
+ xgene_pmu->mcb_active_mask = 0x3;
+ /* Probe all active MC(s) */
+ if (regmap_read(mcbb_map, MCBADDRMR, &reg))
+ return 0;
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
+ } else {
+ /* Single MCB active */
+ xgene_pmu->mcb_active_mask = 0x1;
+ /* Probe all active MC(s) */
+ if (regmap_read(mcba_map, MCBADDRMR, &reg))
+ return 0;
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
+ }
+
+ return 0;
+}
+
+static int xgene_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ if (has_acpi_companion(&pdev->dev))
+ return acpi_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
+ return fdt_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
+}
+
+static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
+{
+ switch (type) {
+ case PMU_TYPE_L3C:
+ return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id);
+ case PMU_TYPE_IOB:
+ return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
+ case PMU_TYPE_MCB:
+ return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
+ case PMU_TYPE_MC:
+ return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id);
+ default:
+ return devm_kasprintf(dev, GFP_KERNEL, "unknown");
+ }
+}
+
+#if defined(CONFIG_ACPI)
+static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct resource *res = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
+ acpi_dev_resource_memory(ares, res);
+
+ /* Always tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static struct
+xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ struct acpi_device *adev, u32 type)
+{
+ struct device *dev = xgene_pmu->dev;
+ struct list_head resource_list;
+ struct xgene_pmu_dev_ctx *ctx;
+ const union acpi_object *obj;
+ struct hw_pmu_info *inf;
+ void __iomem *dev_csr;
+ struct resource res;
+ int enable_bit;
+ int rc;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ INIT_LIST_HEAD(&resource_list);
+ rc = acpi_dev_get_resources(adev, &resource_list,
+ acpi_pmu_dev_add_resource, &res);
+ acpi_dev_free_resource_list(&resource_list);
+ if (rc < 0 || IS_ERR(&res)) {
+ dev_err(dev, "PMU type %d: No resource address found\n", type);
+ goto err;
+ }
+
+ dev_csr = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(dev_csr)) {
+ dev_err(dev, "PMU type %d: Fail to map resource\n", type);
+ goto err;
+ }
+
+ /* A PMU device node without enable-bit-index is always enabled */
+ rc = acpi_dev_get_property(adev, "enable-bit-index",
+ ACPI_TYPE_INTEGER, &obj);
+ if (rc < 0)
+ enable_bit = 0;
+ else
+ enable_bit = (int) obj->integer.value;
+
+ ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
+ if (!ctx->name) {
+ dev_err(dev, "PMU type %d: Fail to get device name\n", type);
+ goto err;
+ }
+ inf = &ctx->inf;
+ inf->type = type;
+ inf->csr = dev_csr;
+ inf->enable_mask = 1 << enable_bit;
+
+ return ctx;
+err:
+ devm_kfree(dev, ctx);
+ return NULL;
+}
+
+static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct xgene_pmu *xgene_pmu = data;
+ struct xgene_pmu_dev_ctx *ctx;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ if (!strcmp(acpi_device_hid(adev), "APMC0D5D"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_L3C);
+ else if (!strcmp(acpi_device_hid(adev), "APMC0D5E"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_IOB);
+ else if (!strcmp(acpi_device_hid(adev), "APMC0D5F"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MCB);
+ else if (!strcmp(acpi_device_hid(adev), "APMC0D60"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MC);
+ else
+ ctx = NULL;
+
+ if (!ctx)
+ return AE_OK;
+
+ if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
+ /* Can't add the PMU device, skip it */
+ devm_kfree(xgene_pmu->dev, ctx);
+ return AE_OK;
+ }
+
+ switch (ctx->inf.type) {
+ case PMU_TYPE_L3C:
+ list_add(&ctx->next, &xgene_pmu->l3cpmus);
+ break;
+ case PMU_TYPE_IOB:
+ list_add(&ctx->next, &xgene_pmu->iobpmus);
+ break;
+ case PMU_TYPE_MCB:
+ list_add(&ctx->next, &xgene_pmu->mcbpmus);
+ break;
+ case PMU_TYPE_MC:
+ list_add(&ctx->next, &xgene_pmu->mcpmus);
+ break;
+ }
+ return AE_OK;
+}
+
+static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ struct device *dev = xgene_pmu->dev;
+ acpi_handle handle;
+ acpi_status status;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -EINVAL;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_pmu_dev_add, NULL, xgene_pmu, NULL);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "failed to probe PMU devices\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+#else
+static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static struct
+xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ struct device_node *np, u32 type)
+{
+ struct device *dev = xgene_pmu->dev;
+ struct xgene_pmu_dev_ctx *ctx;
+ struct hw_pmu_info *inf;
+ void __iomem *dev_csr;
+ struct resource res;
+ int enable_bit;
+ int rc;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ rc = of_address_to_resource(np, 0, &res);
+ if (rc < 0) {
+ dev_err(dev, "PMU type %d: No resource address found\n", type);
+ goto err;
+ }
+ dev_csr = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(dev_csr)) {
+ dev_err(dev, "PMU type %d: Fail to map resource\n", type);
+ goto err;
+ }
+
+ /* A PMU device node without enable-bit-index is always enabled */
+ if (of_property_read_u32(np, "enable-bit-index", &enable_bit))
+ enable_bit = 0;
+
+ ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
+ if (!ctx->name) {
+ dev_err(dev, "PMU type %d: Fail to get device name\n", type);
+ goto err;
+ }
+ inf = &ctx->inf;
+ inf->type = type;
+ inf->csr = dev_csr;
+ inf->enable_mask = 1 << enable_bit;
+
+ return ctx;
+err:
+ devm_kfree(dev, ctx);
+ return NULL;
+}
+
+static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ struct xgene_pmu_dev_ctx *ctx;
+ struct device_node *np;
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_device_is_available(np))
+ continue;
+
+ if (of_device_is_compatible(np, "apm,xgene-pmu-l3c"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C);
+ else if (of_device_is_compatible(np, "apm,xgene-pmu-iob"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB);
+ else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB);
+ else if (of_device_is_compatible(np, "apm,xgene-pmu-mc"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC);
+ else
+ ctx = NULL;
+
+ if (!ctx)
+ continue;
+
+ if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
+ /* Can't add the PMU device, skip it */
+ devm_kfree(xgene_pmu->dev, ctx);
+ continue;
+ }
+
+ switch (ctx->inf.type) {
+ case PMU_TYPE_L3C:
+ list_add(&ctx->next, &xgene_pmu->l3cpmus);
+ break;
+ case PMU_TYPE_IOB:
+ list_add(&ctx->next, &xgene_pmu->iobpmus);
+ break;
+ case PMU_TYPE_MCB:
+ list_add(&ctx->next, &xgene_pmu->mcbpmus);
+ break;
+ case PMU_TYPE_MC:
+ list_add(&ctx->next, &xgene_pmu->mcpmus);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ if (has_acpi_companion(&pdev->dev))
+ return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev);
+ return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev);
+}
+
+static const struct xgene_pmu_data xgene_pmu_data = {
+ .id = PCP_PMU_V1,
+};
+
+static const struct xgene_pmu_data xgene_pmu_v2_data = {
+ .id = PCP_PMU_V2,
+};
+
+static const struct of_device_id xgene_pmu_of_match[] = {
+ { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data },
+ { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_pmu_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_pmu_acpi_match[] = {
+ {"APMC0D5B", PCP_PMU_V1},
+ {"APMC0D5C", PCP_PMU_V2},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
+#endif
+
+static int xgene_pmu_probe(struct platform_device *pdev)
+{
+ const struct xgene_pmu_data *dev_data;
+ const struct of_device_id *of_id;
+ struct xgene_pmu *xgene_pmu;
+ struct resource *res;
+ int irq, rc;
+ int version;
+
+ xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
+ if (!xgene_pmu)
+ return -ENOMEM;
+ xgene_pmu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, xgene_pmu);
+
+ version = -EINVAL;
+ of_id = of_match_device(xgene_pmu_of_match, &pdev->dev);
+ if (of_id) {
+ dev_data = (const struct xgene_pmu_data *) of_id->data;
+ version = dev_data->id;
+ }
+
+#ifdef CONFIG_ACPI
+ if (ACPI_COMPANION(&pdev->dev)) {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev);
+ if (acpi_id)
+ version = (int) acpi_id->driver_data;
+ }
+#endif
+ if (version < 0)
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&xgene_pmu->l3cpmus);
+ INIT_LIST_HEAD(&xgene_pmu->iobpmus);
+ INIT_LIST_HEAD(&xgene_pmu->mcbpmus);
+ INIT_LIST_HEAD(&xgene_pmu->mcpmus);
+
+ xgene_pmu->version = version;
+ dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xgene_pmu->pcppmu_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
+ rc = PTR_ERR(xgene_pmu->pcppmu_csr);
+ goto err;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ rc = -EINVAL;
+ goto err;
+ }
+ rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(&pdev->dev), xgene_pmu);
+ if (rc) {
+ dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
+ goto err;
+ }
+
+ raw_spin_lock_init(&xgene_pmu->lock);
+
+ /* Check for active MCBs and MCUs */
+ rc = xgene_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n");
+ xgene_pmu->mcb_active_mask = 0x1;
+ xgene_pmu->mc_active_mask = 0x1;
+ }
+
+ /* Pick one core to use for cpumask attributes */
+ cpumask_set_cpu(smp_processor_id(), &xgene_pmu->cpu);
+
+ /* Make sure that the overflow interrupt is handled by this CPU */
+ rc = irq_set_affinity(irq, &xgene_pmu->cpu);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to set interrupt affinity!\n");
+ goto err;
+ }
+
+ /* Walk through the tree for all PMU perf devices */
+ rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "No PMU perf devices found!\n");
+ goto err;
+ }
+
+ /* Enable interrupt */
+ xgene_pmu_unmask_int(xgene_pmu);
+
+ return 0;
+
+err:
+ if (xgene_pmu->pcppmu_csr)
+ devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
+ devm_kfree(&pdev->dev, xgene_pmu);
+
+ return rc;
+}
+
+static void
+xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
+{
+ struct xgene_pmu_dev_ctx *ctx;
+ struct device *dev = xgene_pmu->dev;
+ struct xgene_pmu_dev *pmu_dev;
+
+ list_for_each_entry(ctx, pmus, next) {
+ pmu_dev = ctx->pmu_dev;
+ if (pmu_dev->inf->csr)
+ devm_iounmap(dev, pmu_dev->inf->csr);
+ devm_kfree(dev, ctx);
+ devm_kfree(dev, pmu_dev);
+ }
+}
+
+static int xgene_pmu_remove(struct platform_device *pdev)
+{
+ struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
+
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus);
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
+
+ if (xgene_pmu->pcppmu_csr)
+ devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
+ devm_kfree(&pdev->dev, xgene_pmu);
+
+ return 0;
+}
+
+static struct platform_driver xgene_pmu_driver = {
+ .probe = xgene_pmu_probe,
+ .remove = xgene_pmu_remove,
+ .driver = {
+ .name = "xgene-pmu",
+ .of_match_table = xgene_pmu_of_match,
+ .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
+ },
+};
+
+builtin_platform_driver(xgene_pmu_driver);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 19bff3a10f69..fe00f9134d51 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -24,6 +24,15 @@ config PHY_BCM_NS_USB2
Enable this to support Broadcom USB 2.0 PHY connected to the USB
controller on Northstar family.
+config PHY_BCM_NS_USB3
+ tristate "Broadcom Northstar USB 3.0 PHY Driver"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ select GENERIC_PHY
+ help
+ Enable this to support Broadcom USB 3.0 PHY connected to the USB
+ controller on Northstar family.
+
config PHY_BERLIN_USB
tristate "Marvell Berlin USB PHY Driver"
depends on ARCH_BERLIN && RESET_CONTROLLER && HAS_IOMEM && OF
@@ -258,7 +267,9 @@ config PHY_SUN4I_USB
depends on RESET_CONTROLLER
depends on EXTCON
depends on POWER_SUPPLY
+ depends on USB_SUPPORT
select GENERIC_PHY
+ select USB_COMMON
help
Enable this to support the transceiver that is part of Allwinner
sunxi SoCs.
@@ -358,6 +369,14 @@ config PHY_ROCKCHIP_USB
help
Enable this to support the Rockchip USB 2.0 PHY.
+config PHY_ROCKCHIP_INNO_USB2
+ tristate "Rockchip INNO USB2PHY Driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ help
+ Support for Rockchip USB2.0 PHY with Innosilicon IP block.
+
config PHY_ROCKCHIP_EMMC
tristate "Rockchip EMMC PHY Driver"
depends on ARCH_ROCKCHIP && OF
@@ -372,6 +391,23 @@ config PHY_ROCKCHIP_DP
help
Enable this to support the Rockchip Display Port PHY.
+config PHY_ROCKCHIP_PCIE
+ tristate "Rockchip PCIe PHY Driver"
+ depends on (ARCH_ROCKCHIP && OF) || COMPILE_TEST
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support the Rockchip PCIe PHY.
+
+config PHY_ROCKCHIP_TYPEC
+ tristate "Rockchip TYPEC PHY Driver"
+ depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
+ select EXTCON
+ select GENERIC_PHY
+ select RESET_CONTROLLER
+ help
+ Enable this to support the Rockchip USB TYPEC PHY.
+
config PHY_ST_SPEAR1310_MIPHY
tristate "ST SPEAR1310-MIPHY driver"
select GENERIC_PHY
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 90ae19879b0a..a534cf5be07d 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_GENERIC_PHY) += phy-core.o
obj-$(CONFIG_PHY_BCM_NS_USB2) += phy-bcm-ns-usb2.o
+obj-$(CONFIG_PHY_BCM_NS_USB3) += phy-bcm-ns-usb3.o
obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o
obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o
obj-$(CONFIG_PHY_DA8XX_USB) += phy-da8xx-usb.o
@@ -39,8 +40,11 @@ phy-exynos-usb2-$(CONFIG_PHY_S5PV210_USB2) += phy-s5pv210-usb2.o
obj-$(CONFIG_PHY_EXYNOS5_USBDRD) += phy-exynos5-usbdrd.o
obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
obj-$(CONFIG_PHY_ROCKCHIP_USB) += phy-rockchip-usb.o
+obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
+obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
+obj-$(CONFIG_PHY_ROCKCHIP_TYPEC) += phy-rockchip-typec.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o
obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o
diff --git a/drivers/phy/phy-bcm-ns-usb3.c b/drivers/phy/phy-bcm-ns-usb3.c
new file mode 100644
index 000000000000..f420fa4bebfc
--- /dev/null
+++ b/drivers/phy/phy-bcm-ns-usb3.c
@@ -0,0 +1,274 @@
+/*
+ * Broadcom Northstar USB 3.0 PHY Driver
+ *
+ * Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * All magic values used for initialization (and related comments) were obtained
+ * from Broadcom's SDK:
+ * Copyright (c) Broadcom Corp, 2012
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bcma/bcma.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/slab.h>
+
+#define BCM_NS_USB3_MII_MNG_TIMEOUT_US 1000 /* usecs */
+
+enum bcm_ns_family {
+ BCM_NS_UNKNOWN,
+ BCM_NS_AX,
+ BCM_NS_BX,
+};
+
+struct bcm_ns_usb3 {
+ struct device *dev;
+ enum bcm_ns_family family;
+ void __iomem *dmp;
+ void __iomem *ccb_mii;
+ struct phy *phy;
+};
+
+static const struct of_device_id bcm_ns_usb3_id_table[] = {
+ {
+ .compatible = "brcm,ns-ax-usb3-phy",
+ .data = (int *)BCM_NS_AX,
+ },
+ {
+ .compatible = "brcm,ns-bx-usb3-phy",
+ .data = (int *)BCM_NS_BX,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm_ns_usb3_id_table);
+
+static int bcm_ns_usb3_wait_reg(struct bcm_ns_usb3 *usb3, void __iomem *addr,
+ u32 mask, u32 value, unsigned long timeout)
+{
+ unsigned long deadline = jiffies + timeout;
+ u32 val;
+
+ do {
+ val = readl(addr);
+ if ((val & mask) == value)
+ return 0;
+ cpu_relax();
+ udelay(10);
+ } while (!time_after_eq(jiffies, deadline));
+
+ dev_err(usb3->dev, "Timeout waiting for register %p\n", addr);
+
+ return -EBUSY;
+}
+
+static inline int bcm_ns_usb3_mii_mng_wait_idle(struct bcm_ns_usb3 *usb3)
+{
+ return bcm_ns_usb3_wait_reg(usb3, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL,
+ 0x0100, 0x0000,
+ usecs_to_jiffies(BCM_NS_USB3_MII_MNG_TIMEOUT_US));
+}
+
+static int bcm_ns_usb3_mii_mng_write32(struct bcm_ns_usb3 *usb3, u32 value)
+{
+ int err;
+
+ err = bcm_ns_usb3_mii_mng_wait_idle(usb3);
+ if (err < 0) {
+ dev_err(usb3->dev, "Couldn't write 0x%08x value\n", value);
+ return err;
+ }
+
+ writel(value, usb3->ccb_mii + BCMA_CCB_MII_MNG_CMD_DATA);
+
+ return 0;
+}
+
+static int bcm_ns_usb3_phy_init_ns_bx(struct bcm_ns_usb3 *usb3)
+{
+ int err;
+
+ /* Enable MDIO. Setting MDCDIV as 26 */
+ writel(0x0000009a, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL);
+
+ /* Wait for MDIO? */
+ udelay(2);
+
+ /* USB3 PLL Block */
+ err = bcm_ns_usb3_mii_mng_write32(usb3, 0x587e8000);
+ if (err < 0)
+ return err;
+
+ /* Assert Ana_Pllseq start */
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x58061000);
+
+ /* Assert CML Divider ratio to 26 */
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x582a6400);
+
+ /* Asserting PLL Reset */
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x582ec000);
+
+ /* Deaaserting PLL Reset */
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x582e8000);
+
+ /* Waiting MII Mgt interface idle */
+ bcm_ns_usb3_mii_mng_wait_idle(usb3);
+
+ /* Deasserting USB3 system reset */
+ writel(0, usb3->dmp + BCMA_RESET_CTL);
+
+ /* PLL frequency monitor enable */
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x58069000);
+
+ /* PIPE Block */
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x587e8060);
+
+ /* CMPMAX & CMPMINTH setting */
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x580af30d);
+
+ /* DEGLITCH MIN & MAX setting */
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x580e6302);
+
+ /* TXPMD block */
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x587e8040);
+
+ /* Enabling SSC */
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x58061003);
+
+ /* Waiting MII Mgt interface idle */
+ bcm_ns_usb3_mii_mng_wait_idle(usb3);
+
+ return 0;
+}
+
+static int bcm_ns_usb3_phy_init_ns_ax(struct bcm_ns_usb3 *usb3)
+{
+ int err;
+
+ /* Enable MDIO. Setting MDCDIV as 26 */
+ writel(0x0000009a, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL);
+
+ /* Wait for MDIO? */
+ udelay(2);
+
+ /* PLL30 block */
+ err = bcm_ns_usb3_mii_mng_write32(usb3, 0x587e8000);
+ if (err < 0)
+ return err;
+
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x582a6400);
+
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x587e80e0);
+
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x580a009c);
+
+ /* Enable SSC */
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x587e8040);
+
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x580a21d3);
+
+ bcm_ns_usb3_mii_mng_write32(usb3, 0x58061003);
+
+ /* Waiting MII Mgt interface idle */
+ bcm_ns_usb3_mii_mng_wait_idle(usb3);
+
+ /* Deasserting USB3 system reset */
+ writel(0, usb3->dmp + BCMA_RESET_CTL);
+
+ return 0;
+}
+
+static int bcm_ns_usb3_phy_init(struct phy *phy)
+{
+ struct bcm_ns_usb3 *usb3 = phy_get_drvdata(phy);
+ int err;
+
+ /* Perform USB3 system soft reset */
+ writel(BCMA_RESET_CTL_RESET, usb3->dmp + BCMA_RESET_CTL);
+
+ switch (usb3->family) {
+ case BCM_NS_AX:
+ err = bcm_ns_usb3_phy_init_ns_ax(usb3);
+ break;
+ case BCM_NS_BX:
+ err = bcm_ns_usb3_phy_init_ns_bx(usb3);
+ break;
+ default:
+ WARN_ON(1);
+ err = -ENOTSUPP;
+ }
+
+ return err;
+}
+
+static const struct phy_ops ops = {
+ .init = bcm_ns_usb3_phy_init,
+ .owner = THIS_MODULE,
+};
+
+static int bcm_ns_usb3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ struct bcm_ns_usb3 *usb3;
+ struct resource *res;
+ struct phy_provider *phy_provider;
+
+ usb3 = devm_kzalloc(dev, sizeof(*usb3), GFP_KERNEL);
+ if (!usb3)
+ return -ENOMEM;
+
+ usb3->dev = dev;
+
+ of_id = of_match_device(bcm_ns_usb3_id_table, dev);
+ if (!of_id)
+ return -EINVAL;
+ usb3->family = (enum bcm_ns_family)of_id->data;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmp");
+ usb3->dmp = devm_ioremap_resource(dev, res);
+ if (IS_ERR(usb3->dmp)) {
+ dev_err(dev, "Failed to map DMP regs\n");
+ return PTR_ERR(usb3->dmp);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ccb-mii");
+ usb3->ccb_mii = devm_ioremap_resource(dev, res);
+ if (IS_ERR(usb3->ccb_mii)) {
+ dev_err(dev, "Failed to map ChipCommon B MII regs\n");
+ return PTR_ERR(usb3->ccb_mii);
+ }
+
+ usb3->phy = devm_phy_create(dev, NULL, &ops);
+ if (IS_ERR(usb3->phy)) {
+ dev_err(dev, "Failed to create PHY\n");
+ return PTR_ERR(usb3->phy);
+ }
+
+ phy_set_drvdata(usb3->phy, usb3);
+ platform_set_drvdata(pdev, usb3);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Broadcom Northstar USB 3.0 PHY driver\n");
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver bcm_ns_usb3_driver = {
+ .probe = bcm_ns_usb3_probe,
+ .driver = {
+ .name = "bcm_ns_usb3",
+ .of_match_table = bcm_ns_usb3_id_table,
+ },
+};
+module_platform_driver(bcm_ns_usb3_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-bcm-ns2-pcie.c b/drivers/phy/phy-bcm-ns2-pcie.c
index 9513f7ab1eaa..4c7d11d2b378 100644
--- a/drivers/phy/phy-bcm-ns2-pcie.c
+++ b/drivers/phy/phy-bcm-ns2-pcie.c
@@ -18,11 +18,6 @@
#include <linux/phy.h>
#include <linux/phy/phy.h>
-struct ns2_pci_phy {
- struct mdio_device *mdiodev;
- struct phy *phy;
-};
-
#define BLK_ADDR_REG_OFFSET 0x1f
#define PLL_AFE1_100MHZ_BLK 0x2100
#define PLL_CLK_AMP_OFFSET 0x03
@@ -30,17 +25,17 @@ struct ns2_pci_phy {
static int ns2_pci_phy_init(struct phy *p)
{
- struct ns2_pci_phy *phy = phy_get_drvdata(p);
+ struct mdio_device *mdiodev = phy_get_drvdata(p);
int rc;
/* select the AFE 100MHz block page */
- rc = mdiobus_write(phy->mdiodev->bus, phy->mdiodev->addr,
+ rc = mdiobus_write(mdiodev->bus, mdiodev->addr,
BLK_ADDR_REG_OFFSET, PLL_AFE1_100MHZ_BLK);
if (rc)
goto err;
/* set the 100 MHz reference clock amplitude to 2.05 v */
- rc = mdiobus_write(phy->mdiodev->bus, phy->mdiodev->addr,
+ rc = mdiobus_write(mdiodev->bus, mdiodev->addr,
PLL_CLK_AMP_OFFSET, PLL_CLK_AMP_2P05V);
if (rc)
goto err;
@@ -48,19 +43,19 @@ static int ns2_pci_phy_init(struct phy *p)
return 0;
err:
- dev_err(&phy->mdiodev->dev, "Error %d writing to phy\n", rc);
+ dev_err(&mdiodev->dev, "Error %d writing to phy\n", rc);
return rc;
}
-static struct phy_ops ns2_pci_phy_ops = {
+static const struct phy_ops ns2_pci_phy_ops = {
.init = ns2_pci_phy_init,
+ .owner = THIS_MODULE,
};
static int ns2_pci_phy_probe(struct mdio_device *mdiodev)
{
struct device *dev = &mdiodev->dev;
struct phy_provider *provider;
- struct ns2_pci_phy *p;
struct phy *phy;
phy = devm_phy_create(dev, dev->of_node, &ns2_pci_phy_ops);
@@ -69,16 +64,7 @@ static int ns2_pci_phy_probe(struct mdio_device *mdiodev)
return PTR_ERR(phy);
}
- p = devm_kmalloc(dev, sizeof(struct ns2_pci_phy),
- GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- p->mdiodev = mdiodev;
- dev_set_drvdata(dev, p);
-
- p->phy = phy;
- phy_set_drvdata(phy, p);
+ phy_set_drvdata(phy, mdiodev);
provider = devm_of_phy_provider_register(&phy->dev,
of_phy_simple_xlate);
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 8eca906b6e70..a268f4d6f3e9 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -357,6 +357,21 @@ int phy_set_mode(struct phy *phy, enum phy_mode mode)
}
EXPORT_SYMBOL_GPL(phy_set_mode);
+int phy_reset(struct phy *phy)
+{
+ int ret;
+
+ if (!phy || !phy->ops->reset)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->reset(phy);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_reset);
+
/**
* _of_phy_get() - lookup and obtain a reference to a phy by phandle
* @np: device_node for which to get the phy
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c
index b2e59b6170ac..32ae78c8ca17 100644
--- a/drivers/phy/phy-da8xx-usb.c
+++ b/drivers/phy/phy-da8xx-usb.c
@@ -154,7 +154,7 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
d_phy->regmap = syscon_regmap_lookup_by_compatible(
"ti,da830-cfgchip");
else
- d_phy->regmap = syscon_regmap_lookup_by_pdevname("syscon.0");
+ d_phy->regmap = syscon_regmap_lookup_by_pdevname("syscon");
if (IS_ERR(d_phy->regmap)) {
dev_err(dev, "Failed to get syscon\n");
return PTR_ERR(d_phy->regmap);
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c
index 20696f53303f..07ed608905ac 100644
--- a/drivers/phy/phy-exynos5-usbdrd.c
+++ b/drivers/phy/phy-exynos5-usbdrd.c
@@ -249,7 +249,7 @@ static void exynos5_usbdrd_phy_isol(struct phy_usb_instance *inst,
static unsigned int
exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst)
{
- static u32 reg;
+ u32 reg;
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
/* restore any previous reference clock settings */
@@ -295,7 +295,7 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst)
static unsigned int
exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst)
{
- static u32 reg;
+ u32 reg;
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
/* restore any previous reference clock settings */
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index c134989052f5..fe909fd8144f 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -133,11 +133,49 @@ static int omap_usb_power_on(struct phy *x)
return omap_usb_phy_power(phy, true);
}
+static int omap_usb2_disable_clocks(struct omap_usb *phy)
+{
+ clk_disable(phy->wkupclk);
+ if (!IS_ERR(phy->optclk))
+ clk_disable(phy->optclk);
+
+ return 0;
+}
+
+static int omap_usb2_enable_clocks(struct omap_usb *phy)
+{
+ int ret;
+
+ ret = clk_enable(phy->wkupclk);
+ if (ret < 0) {
+ dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
+ goto err0;
+ }
+
+ if (!IS_ERR(phy->optclk)) {
+ ret = clk_enable(phy->optclk);
+ if (ret < 0) {
+ dev_err(phy->dev, "Failed to enable optclk %d\n", ret);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ clk_disable(phy->wkupclk);
+
+err0:
+ return ret;
+}
+
static int omap_usb_init(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
u32 val;
+ omap_usb2_enable_clocks(phy);
+
if (phy->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
/*
*
@@ -155,8 +193,16 @@ static int omap_usb_init(struct phy *x)
return 0;
}
+static int omap_usb_exit(struct phy *x)
+{
+ struct omap_usb *phy = phy_get_drvdata(x);
+
+ return omap_usb2_disable_clocks(phy);
+}
+
static const struct phy_ops ops = {
.init = omap_usb_init,
+ .exit = omap_usb_exit,
.power_on = omap_usb_power_on,
.power_off = omap_usb_power_off,
.owner = THIS_MODULE,
@@ -376,65 +422,11 @@ static int omap_usb2_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-
-static int omap_usb2_runtime_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_usb *phy = platform_get_drvdata(pdev);
-
- clk_disable(phy->wkupclk);
- if (!IS_ERR(phy->optclk))
- clk_disable(phy->optclk);
-
- return 0;
-}
-
-static int omap_usb2_runtime_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_usb *phy = platform_get_drvdata(pdev);
- int ret;
-
- ret = clk_enable(phy->wkupclk);
- if (ret < 0) {
- dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
- goto err0;
- }
-
- if (!IS_ERR(phy->optclk)) {
- ret = clk_enable(phy->optclk);
- if (ret < 0) {
- dev_err(phy->dev, "Failed to enable optclk %d\n", ret);
- goto err1;
- }
- }
-
- return 0;
-
-err1:
- clk_disable(phy->wkupclk);
-
-err0:
- return ret;
-}
-
-static const struct dev_pm_ops omap_usb2_pm_ops = {
- SET_RUNTIME_PM_OPS(omap_usb2_runtime_suspend, omap_usb2_runtime_resume,
- NULL)
-};
-
-#define DEV_PM_OPS (&omap_usb2_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif
-
static struct platform_driver omap_usb2_driver = {
.probe = omap_usb2_probe,
.remove = omap_usb2_remove,
.driver = {
.name = "omap-usb2",
- .pm = DEV_PM_OPS,
.of_match_table = omap_usb2_id_table,
},
};
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 107cb57c3513..18a5b495ad65 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -283,10 +283,8 @@ static int __ufs_qcom_phy_init_vreg(struct phy *phy,
err = 0;
}
snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name);
- if (of_get_property(dev->of_node, prop_name, NULL))
- vreg->is_always_on = true;
- else
- vreg->is_always_on = false;
+ vreg->is_always_on = of_property_read_bool(dev->of_node,
+ prop_name);
}
if (!strcmp(name, "vdda-pll")) {
diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
index 31156c9c4707..3d97eadd247d 100644
--- a/drivers/phy/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/phy-rcar-gen3-usb2.c
@@ -280,6 +280,7 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
{ .compatible = "renesas,usb2-phy-r8a7795" },
+ { .compatible = "renesas,usb2-phy-r8a7796" },
{ .compatible = "renesas,rcar-gen3-usb2-phy" },
{ }
};
diff --git a/drivers/phy/phy-rockchip-inno-usb2.c b/drivers/phy/phy-rockchip-inno-usb2.c
new file mode 100644
index 000000000000..ac203107b071
--- /dev/null
+++ b/drivers/phy/phy-rockchip-inno-usb2.c
@@ -0,0 +1,707 @@
+/*
+ * Rockchip USB2.0 PHY with Innosilicon IP block driver
+ *
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/gpio/consumer.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define BIT_WRITEABLE_SHIFT 16
+#define SCHEDULE_DELAY (60 * HZ)
+
+enum rockchip_usb2phy_port_id {
+ USB2PHY_PORT_OTG,
+ USB2PHY_PORT_HOST,
+ USB2PHY_NUM_PORTS,
+};
+
+enum rockchip_usb2phy_host_state {
+ PHY_STATE_HS_ONLINE = 0,
+ PHY_STATE_DISCONNECT = 1,
+ PHY_STATE_CONNECT = 2,
+ PHY_STATE_FS_LS_ONLINE = 4,
+};
+
+struct usb2phy_reg {
+ unsigned int offset;
+ unsigned int bitend;
+ unsigned int bitstart;
+ unsigned int disable;
+ unsigned int enable;
+};
+
+/**
+ * struct rockchip_usb2phy_port_cfg: usb-phy port configuration.
+ * @phy_sus: phy suspend register.
+ * @ls_det_en: linestate detection enable register.
+ * @ls_det_st: linestate detection state register.
+ * @ls_det_clr: linestate detection clear register.
+ * @utmi_ls: utmi linestate state register.
+ * @utmi_hstdet: utmi host disconnect register.
+ */
+struct rockchip_usb2phy_port_cfg {
+ struct usb2phy_reg phy_sus;
+ struct usb2phy_reg ls_det_en;
+ struct usb2phy_reg ls_det_st;
+ struct usb2phy_reg ls_det_clr;
+ struct usb2phy_reg utmi_ls;
+ struct usb2phy_reg utmi_hstdet;
+};
+
+/**
+ * struct rockchip_usb2phy_cfg: usb-phy configuration.
+ * @reg: the address offset of grf for usb-phy config.
+ * @num_ports: specify how many ports that the phy has.
+ * @clkout_ctl: keep on/turn off output clk of phy.
+ */
+struct rockchip_usb2phy_cfg {
+ unsigned int reg;
+ unsigned int num_ports;
+ struct usb2phy_reg clkout_ctl;
+ const struct rockchip_usb2phy_port_cfg port_cfgs[USB2PHY_NUM_PORTS];
+};
+
+/**
+ * struct rockchip_usb2phy_port: usb-phy port data.
+ * @port_id: flag for otg port or host port.
+ * @suspended: phy suspended flag.
+ * @ls_irq: IRQ number assigned for linestate detection.
+ * @mutex: for register updating in sm_work.
+ * @sm_work: OTG state machine work.
+ * @phy_cfg: port register configuration, assigned by driver data.
+ */
+struct rockchip_usb2phy_port {
+ struct phy *phy;
+ unsigned int port_id;
+ bool suspended;
+ int ls_irq;
+ struct mutex mutex;
+ struct delayed_work sm_work;
+ const struct rockchip_usb2phy_port_cfg *port_cfg;
+};
+
+/**
+ * struct rockchip_usb2phy: usb2.0 phy driver data.
+ * @grf: General Register Files regmap.
+ * @clk: clock struct of phy input clk.
+ * @clk480m: clock struct of phy output clk.
+ * @clk_hw: clock struct of phy output clk management.
+ * @phy_cfg: phy register configuration, assigned by driver data.
+ * @ports: phy port instance.
+ */
+struct rockchip_usb2phy {
+ struct device *dev;
+ struct regmap *grf;
+ struct clk *clk;
+ struct clk *clk480m;
+ struct clk_hw clk480m_hw;
+ const struct rockchip_usb2phy_cfg *phy_cfg;
+ struct rockchip_usb2phy_port ports[USB2PHY_NUM_PORTS];
+};
+
+static inline int property_enable(struct rockchip_usb2phy *rphy,
+ const struct usb2phy_reg *reg, bool en)
+{
+ unsigned int val, mask, tmp;
+
+ tmp = en ? reg->enable : reg->disable;
+ mask = GENMASK(reg->bitend, reg->bitstart);
+ val = (tmp << reg->bitstart) | (mask << BIT_WRITEABLE_SHIFT);
+
+ return regmap_write(rphy->grf, reg->offset, val);
+}
+
+static inline bool property_enabled(struct rockchip_usb2phy *rphy,
+ const struct usb2phy_reg *reg)
+{
+ int ret;
+ unsigned int tmp, orig;
+ unsigned int mask = GENMASK(reg->bitend, reg->bitstart);
+
+ ret = regmap_read(rphy->grf, reg->offset, &orig);
+ if (ret)
+ return false;
+
+ tmp = (orig & mask) >> reg->bitstart;
+ return tmp == reg->enable;
+}
+
+static int rockchip_usb2phy_clk480m_enable(struct clk_hw *hw)
+{
+ struct rockchip_usb2phy *rphy =
+ container_of(hw, struct rockchip_usb2phy, clk480m_hw);
+ int ret;
+
+ /* turn on 480m clk output if it is off */
+ if (!property_enabled(rphy, &rphy->phy_cfg->clkout_ctl)) {
+ ret = property_enable(rphy, &rphy->phy_cfg->clkout_ctl, true);
+ if (ret)
+ return ret;
+
+ /* waitting for the clk become stable */
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+static void rockchip_usb2phy_clk480m_disable(struct clk_hw *hw)
+{
+ struct rockchip_usb2phy *rphy =
+ container_of(hw, struct rockchip_usb2phy, clk480m_hw);
+
+ /* turn off 480m clk output */
+ property_enable(rphy, &rphy->phy_cfg->clkout_ctl, false);
+}
+
+static int rockchip_usb2phy_clk480m_enabled(struct clk_hw *hw)
+{
+ struct rockchip_usb2phy *rphy =
+ container_of(hw, struct rockchip_usb2phy, clk480m_hw);
+
+ return property_enabled(rphy, &rphy->phy_cfg->clkout_ctl);
+}
+
+static unsigned long
+rockchip_usb2phy_clk480m_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 480000000;
+}
+
+static const struct clk_ops rockchip_usb2phy_clkout_ops = {
+ .enable = rockchip_usb2phy_clk480m_enable,
+ .disable = rockchip_usb2phy_clk480m_disable,
+ .is_enabled = rockchip_usb2phy_clk480m_enabled,
+ .recalc_rate = rockchip_usb2phy_clk480m_recalc_rate,
+};
+
+static void rockchip_usb2phy_clk480m_unregister(void *data)
+{
+ struct rockchip_usb2phy *rphy = data;
+
+ of_clk_del_provider(rphy->dev->of_node);
+ clk_unregister(rphy->clk480m);
+}
+
+static int
+rockchip_usb2phy_clk480m_register(struct rockchip_usb2phy *rphy)
+{
+ struct device_node *node = rphy->dev->of_node;
+ struct clk_init_data init;
+ const char *clk_name;
+ int ret;
+
+ init.flags = 0;
+ init.name = "clk_usbphy_480m";
+ init.ops = &rockchip_usb2phy_clkout_ops;
+
+ /* optional override of the clockname */
+ of_property_read_string(node, "clock-output-names", &init.name);
+
+ if (rphy->clk) {
+ clk_name = __clk_get_name(rphy->clk);
+ init.parent_names = &clk_name;
+ init.num_parents = 1;
+ } else {
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ }
+
+ rphy->clk480m_hw.init = &init;
+
+ /* register the clock */
+ rphy->clk480m = clk_register(rphy->dev, &rphy->clk480m_hw);
+ if (IS_ERR(rphy->clk480m)) {
+ ret = PTR_ERR(rphy->clk480m);
+ goto err_ret;
+ }
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, rphy->clk480m);
+ if (ret < 0)
+ goto err_clk_provider;
+
+ ret = devm_add_action(rphy->dev, rockchip_usb2phy_clk480m_unregister,
+ rphy);
+ if (ret < 0)
+ goto err_unreg_action;
+
+ return 0;
+
+err_unreg_action:
+ of_clk_del_provider(node);
+err_clk_provider:
+ clk_unregister(rphy->clk480m);
+err_ret:
+ return ret;
+}
+
+static int rockchip_usb2phy_init(struct phy *phy)
+{
+ struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent);
+ int ret;
+
+ if (rport->port_id == USB2PHY_PORT_HOST) {
+ /* clear linestate and enable linestate detect irq */
+ mutex_lock(&rport->mutex);
+
+ ret = property_enable(rphy, &rport->port_cfg->ls_det_clr, true);
+ if (ret) {
+ mutex_unlock(&rport->mutex);
+ return ret;
+ }
+
+ ret = property_enable(rphy, &rport->port_cfg->ls_det_en, true);
+ if (ret) {
+ mutex_unlock(&rport->mutex);
+ return ret;
+ }
+
+ mutex_unlock(&rport->mutex);
+ schedule_delayed_work(&rport->sm_work, SCHEDULE_DELAY);
+ }
+
+ return 0;
+}
+
+static int rockchip_usb2phy_power_on(struct phy *phy)
+{
+ struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent);
+ int ret;
+
+ dev_dbg(&rport->phy->dev, "port power on\n");
+
+ if (!rport->suspended)
+ return 0;
+
+ ret = clk_prepare_enable(rphy->clk480m);
+ if (ret)
+ return ret;
+
+ ret = property_enable(rphy, &rport->port_cfg->phy_sus, false);
+ if (ret)
+ return ret;
+
+ rport->suspended = false;
+ return 0;
+}
+
+static int rockchip_usb2phy_power_off(struct phy *phy)
+{
+ struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent);
+ int ret;
+
+ dev_dbg(&rport->phy->dev, "port power off\n");
+
+ if (rport->suspended)
+ return 0;
+
+ ret = property_enable(rphy, &rport->port_cfg->phy_sus, true);
+ if (ret)
+ return ret;
+
+ rport->suspended = true;
+ clk_disable_unprepare(rphy->clk480m);
+
+ return 0;
+}
+
+static int rockchip_usb2phy_exit(struct phy *phy)
+{
+ struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
+
+ if (rport->port_id == USB2PHY_PORT_HOST)
+ cancel_delayed_work_sync(&rport->sm_work);
+
+ return 0;
+}
+
+static const struct phy_ops rockchip_usb2phy_ops = {
+ .init = rockchip_usb2phy_init,
+ .exit = rockchip_usb2phy_exit,
+ .power_on = rockchip_usb2phy_power_on,
+ .power_off = rockchip_usb2phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * The function manage host-phy port state and suspend/resume phy port
+ * to save power.
+ *
+ * we rely on utmi_linestate and utmi_hostdisconnect to identify whether
+ * devices is disconnect or not. Besides, we do not need care it is FS/LS
+ * disconnected or HS disconnected, actually, we just only need get the
+ * device is disconnected at last through rearm the delayed work,
+ * to suspend the phy port in _PHY_STATE_DISCONNECT_ case.
+ *
+ * NOTE: It may invoke *phy_powr_off or *phy_power_on which will invoke
+ * some clk related APIs, so do not invoke it from interrupt context directly.
+ */
+static void rockchip_usb2phy_sm_work(struct work_struct *work)
+{
+ struct rockchip_usb2phy_port *rport =
+ container_of(work, struct rockchip_usb2phy_port, sm_work.work);
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+ unsigned int sh = rport->port_cfg->utmi_hstdet.bitend -
+ rport->port_cfg->utmi_hstdet.bitstart + 1;
+ unsigned int ul, uhd, state;
+ unsigned int ul_mask, uhd_mask;
+ int ret;
+
+ mutex_lock(&rport->mutex);
+
+ ret = regmap_read(rphy->grf, rport->port_cfg->utmi_ls.offset, &ul);
+ if (ret < 0)
+ goto next_schedule;
+
+ ret = regmap_read(rphy->grf, rport->port_cfg->utmi_hstdet.offset,
+ &uhd);
+ if (ret < 0)
+ goto next_schedule;
+
+ uhd_mask = GENMASK(rport->port_cfg->utmi_hstdet.bitend,
+ rport->port_cfg->utmi_hstdet.bitstart);
+ ul_mask = GENMASK(rport->port_cfg->utmi_ls.bitend,
+ rport->port_cfg->utmi_ls.bitstart);
+
+ /* stitch on utmi_ls and utmi_hstdet as phy state */
+ state = ((uhd & uhd_mask) >> rport->port_cfg->utmi_hstdet.bitstart) |
+ (((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << sh);
+
+ switch (state) {
+ case PHY_STATE_HS_ONLINE:
+ dev_dbg(&rport->phy->dev, "HS online\n");
+ break;
+ case PHY_STATE_FS_LS_ONLINE:
+ /*
+ * For FS/LS device, the online state share with connect state
+ * from utmi_ls and utmi_hstdet register, so we distinguish
+ * them via suspended flag.
+ *
+ * Plus, there are two cases, one is D- Line pull-up, and D+
+ * line pull-down, the state is 4; another is D+ line pull-up,
+ * and D- line pull-down, the state is 2.
+ */
+ if (!rport->suspended) {
+ /* D- line pull-up, D+ line pull-down */
+ dev_dbg(&rport->phy->dev, "FS/LS online\n");
+ break;
+ }
+ /* fall through */
+ case PHY_STATE_CONNECT:
+ if (rport->suspended) {
+ dev_dbg(&rport->phy->dev, "Connected\n");
+ rockchip_usb2phy_power_on(rport->phy);
+ rport->suspended = false;
+ } else {
+ /* D+ line pull-up, D- line pull-down */
+ dev_dbg(&rport->phy->dev, "FS/LS online\n");
+ }
+ break;
+ case PHY_STATE_DISCONNECT:
+ if (!rport->suspended) {
+ dev_dbg(&rport->phy->dev, "Disconnected\n");
+ rockchip_usb2phy_power_off(rport->phy);
+ rport->suspended = true;
+ }
+
+ /*
+ * activate the linestate detection to get the next device
+ * plug-in irq.
+ */
+ property_enable(rphy, &rport->port_cfg->ls_det_clr, true);
+ property_enable(rphy, &rport->port_cfg->ls_det_en, true);
+
+ /*
+ * we don't need to rearm the delayed work when the phy port
+ * is suspended.
+ */
+ mutex_unlock(&rport->mutex);
+ return;
+ default:
+ dev_dbg(&rport->phy->dev, "unknown phy state\n");
+ break;
+ }
+
+next_schedule:
+ mutex_unlock(&rport->mutex);
+ schedule_delayed_work(&rport->sm_work, SCHEDULE_DELAY);
+}
+
+static irqreturn_t rockchip_usb2phy_linestate_irq(int irq, void *data)
+{
+ struct rockchip_usb2phy_port *rport = data;
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+
+ if (!property_enabled(rphy, &rport->port_cfg->ls_det_st))
+ return IRQ_NONE;
+
+ mutex_lock(&rport->mutex);
+
+ /* disable linestate detect irq and clear its status */
+ property_enable(rphy, &rport->port_cfg->ls_det_en, false);
+ property_enable(rphy, &rport->port_cfg->ls_det_clr, true);
+
+ mutex_unlock(&rport->mutex);
+
+ /*
+ * In this case for host phy port, a new device is plugged in,
+ * meanwhile, if the phy port is suspended, we need rearm the work to
+ * resume it and mange its states; otherwise, we do nothing about that.
+ */
+ if (rport->suspended && rport->port_id == USB2PHY_PORT_HOST)
+ rockchip_usb2phy_sm_work(&rport->sm_work.work);
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
+ struct rockchip_usb2phy_port *rport,
+ struct device_node *child_np)
+{
+ int ret;
+
+ rport->port_id = USB2PHY_PORT_HOST;
+ rport->port_cfg = &rphy->phy_cfg->port_cfgs[USB2PHY_PORT_HOST];
+ rport->suspended = true;
+
+ mutex_init(&rport->mutex);
+ INIT_DELAYED_WORK(&rport->sm_work, rockchip_usb2phy_sm_work);
+
+ rport->ls_irq = of_irq_get_byname(child_np, "linestate");
+ if (rport->ls_irq < 0) {
+ dev_err(rphy->dev, "no linestate irq provided\n");
+ return rport->ls_irq;
+ }
+
+ ret = devm_request_threaded_irq(rphy->dev, rport->ls_irq, NULL,
+ rockchip_usb2phy_linestate_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy", rport);
+ if (ret) {
+ dev_err(rphy->dev, "failed to request irq handle\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rockchip_usb2phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child_np;
+ struct phy_provider *provider;
+ struct rockchip_usb2phy *rphy;
+ const struct rockchip_usb2phy_cfg *phy_cfgs;
+ const struct of_device_id *match;
+ unsigned int reg;
+ int index, ret;
+
+ rphy = devm_kzalloc(dev, sizeof(*rphy), GFP_KERNEL);
+ if (!rphy)
+ return -ENOMEM;
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!match || !match->data) {
+ dev_err(dev, "phy configs are not assigned!\n");
+ return -EINVAL;
+ }
+
+ if (!dev->parent || !dev->parent->of_node)
+ return -EINVAL;
+
+ rphy->grf = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(rphy->grf))
+ return PTR_ERR(rphy->grf);
+
+ if (of_property_read_u32(np, "reg", &reg)) {
+ dev_err(dev, "the reg property is not assigned in %s node\n",
+ np->name);
+ return -EINVAL;
+ }
+
+ rphy->dev = dev;
+ phy_cfgs = match->data;
+ platform_set_drvdata(pdev, rphy);
+
+ /* find out a proper config which can be matched with dt. */
+ index = 0;
+ while (phy_cfgs[index].reg) {
+ if (phy_cfgs[index].reg == reg) {
+ rphy->phy_cfg = &phy_cfgs[index];
+ break;
+ }
+
+ ++index;
+ }
+
+ if (!rphy->phy_cfg) {
+ dev_err(dev, "no phy-config can be matched with %s node\n",
+ np->name);
+ return -EINVAL;
+ }
+
+ rphy->clk = of_clk_get_by_name(np, "phyclk");
+ if (!IS_ERR(rphy->clk)) {
+ clk_prepare_enable(rphy->clk);
+ } else {
+ dev_info(&pdev->dev, "no phyclk specified\n");
+ rphy->clk = NULL;
+ }
+
+ ret = rockchip_usb2phy_clk480m_register(rphy);
+ if (ret) {
+ dev_err(dev, "failed to register 480m output clock\n");
+ goto disable_clks;
+ }
+
+ index = 0;
+ for_each_available_child_of_node(np, child_np) {
+ struct rockchip_usb2phy_port *rport = &rphy->ports[index];
+ struct phy *phy;
+
+ /*
+ * This driver aim to support both otg-port and host-port,
+ * but unfortunately, the otg part is not ready in current,
+ * so this comments and below codes are interim, which should
+ * be changed after otg-port is supplied soon.
+ */
+ if (of_node_cmp(child_np->name, "host-port"))
+ goto next_child;
+
+ phy = devm_phy_create(dev, child_np, &rockchip_usb2phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create phy\n");
+ ret = PTR_ERR(phy);
+ goto put_child;
+ }
+
+ rport->phy = phy;
+ phy_set_drvdata(rport->phy, rport);
+
+ ret = rockchip_usb2phy_host_port_init(rphy, rport, child_np);
+ if (ret)
+ goto put_child;
+
+next_child:
+ /* to prevent out of boundary */
+ if (++index >= rphy->phy_cfg->num_ports)
+ break;
+ }
+
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(provider);
+
+put_child:
+ of_node_put(child_np);
+disable_clks:
+ if (rphy->clk) {
+ clk_disable_unprepare(rphy->clk);
+ clk_put(rphy->clk);
+ }
+ return ret;
+}
+
+static const struct rockchip_usb2phy_cfg rk3366_phy_cfgs[] = {
+ {
+ .reg = 0x700,
+ .num_ports = 2,
+ .clkout_ctl = { 0x0724, 15, 15, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x0728, 15, 0, 0, 0x1d1 },
+ .ls_det_en = { 0x0680, 4, 4, 0, 1 },
+ .ls_det_st = { 0x0690, 4, 4, 0, 1 },
+ .ls_det_clr = { 0x06a0, 4, 4, 0, 1 },
+ .utmi_ls = { 0x049c, 14, 13, 0, 1 },
+ .utmi_hstdet = { 0x049c, 12, 12, 0, 1 }
+ }
+ },
+ },
+ { /* sentinel */ }
+};
+
+static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
+ {
+ .reg = 0xe450,
+ .num_ports = 2,
+ .clkout_ctl = { 0xe450, 4, 4, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0xe458, 1, 0, 0x2, 0x1 },
+ .ls_det_en = { 0xe3c0, 6, 6, 0, 1 },
+ .ls_det_st = { 0xe3e0, 6, 6, 0, 1 },
+ .ls_det_clr = { 0xe3d0, 6, 6, 0, 1 },
+ .utmi_ls = { 0xe2ac, 22, 21, 0, 1 },
+ .utmi_hstdet = { 0xe2ac, 23, 23, 0, 1 }
+ }
+ },
+ },
+ {
+ .reg = 0xe460,
+ .num_ports = 2,
+ .clkout_ctl = { 0xe460, 4, 4, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0xe468, 1, 0, 0x2, 0x1 },
+ .ls_det_en = { 0xe3c0, 11, 11, 0, 1 },
+ .ls_det_st = { 0xe3e0, 11, 11, 0, 1 },
+ .ls_det_clr = { 0xe3d0, 11, 11, 0, 1 },
+ .utmi_ls = { 0xe2ac, 26, 25, 0, 1 },
+ .utmi_hstdet = { 0xe2ac, 27, 27, 0, 1 }
+ }
+ },
+ },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id rockchip_usb2phy_dt_match[] = {
+ { .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
+ { .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rockchip_usb2phy_dt_match);
+
+static struct platform_driver rockchip_usb2phy_driver = {
+ .probe = rockchip_usb2phy_probe,
+ .driver = {
+ .name = "rockchip-usb2phy",
+ .of_match_table = rockchip_usb2phy_dt_match,
+ },
+};
+module_platform_driver(rockchip_usb2phy_driver);
+
+MODULE_AUTHOR("Frank Wang <frank.wang@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip USB2.0 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-rockchip-pcie.c b/drivers/phy/phy-rockchip-pcie.c
new file mode 100644
index 000000000000..a2b4c6b58aea
--- /dev/null
+++ b/drivers/phy/phy-rockchip-pcie.c
@@ -0,0 +1,357 @@
+/*
+ * Rockchip PCIe PHY driver
+ *
+ * Copyright (C) 2016 Shawn Lin <shawn.lin@rock-chips.com>
+ * Copyright (C) 2016 ROCKCHIP, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/*
+ * The higher 16-bit of this register is used for write protection
+ * only if BIT(x + 16) set to 1 the BIT(x) can be written.
+ */
+#define HIWORD_UPDATE(val, mask, shift) \
+ ((val) << (shift) | (mask) << ((shift) + 16))
+
+#define PHY_MAX_LANE_NUM 4
+#define PHY_CFG_DATA_SHIFT 7
+#define PHY_CFG_ADDR_SHIFT 1
+#define PHY_CFG_DATA_MASK 0xf
+#define PHY_CFG_ADDR_MASK 0x3f
+#define PHY_CFG_RD_MASK 0x3ff
+#define PHY_CFG_WR_ENABLE 1
+#define PHY_CFG_WR_DISABLE 1
+#define PHY_CFG_WR_SHIFT 0
+#define PHY_CFG_WR_MASK 1
+#define PHY_CFG_PLL_LOCK 0x10
+#define PHY_CFG_CLK_TEST 0x10
+#define PHY_CFG_CLK_SCC 0x12
+#define PHY_CFG_SEPE_RATE BIT(3)
+#define PHY_CFG_PLL_100M BIT(3)
+#define PHY_PLL_LOCKED BIT(9)
+#define PHY_PLL_OUTPUT BIT(10)
+#define PHY_LANE_A_STATUS 0x30
+#define PHY_LANE_B_STATUS 0x31
+#define PHY_LANE_C_STATUS 0x32
+#define PHY_LANE_D_STATUS 0x33
+#define PHY_LANE_RX_DET_SHIFT 11
+#define PHY_LANE_RX_DET_TH 0x1
+#define PHY_LANE_IDLE_OFF 0x1
+#define PHY_LANE_IDLE_MASK 0x1
+#define PHY_LANE_IDLE_A_SHIFT 3
+#define PHY_LANE_IDLE_B_SHIFT 4
+#define PHY_LANE_IDLE_C_SHIFT 5
+#define PHY_LANE_IDLE_D_SHIFT 6
+
+struct rockchip_pcie_data {
+ unsigned int pcie_conf;
+ unsigned int pcie_status;
+ unsigned int pcie_laneoff;
+};
+
+struct rockchip_pcie_phy {
+ struct rockchip_pcie_data *phy_data;
+ struct regmap *reg_base;
+ struct reset_control *phy_rst;
+ struct clk *clk_pciephy_ref;
+};
+
+static inline void phy_wr_cfg(struct rockchip_pcie_phy *rk_phy,
+ u32 addr, u32 data)
+{
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
+ HIWORD_UPDATE(data,
+ PHY_CFG_DATA_MASK,
+ PHY_CFG_DATA_SHIFT) |
+ HIWORD_UPDATE(addr,
+ PHY_CFG_ADDR_MASK,
+ PHY_CFG_ADDR_SHIFT));
+ udelay(1);
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
+ HIWORD_UPDATE(PHY_CFG_WR_ENABLE,
+ PHY_CFG_WR_MASK,
+ PHY_CFG_WR_SHIFT));
+ udelay(1);
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
+ HIWORD_UPDATE(PHY_CFG_WR_DISABLE,
+ PHY_CFG_WR_MASK,
+ PHY_CFG_WR_SHIFT));
+}
+
+static inline u32 phy_rd_cfg(struct rockchip_pcie_phy *rk_phy,
+ u32 addr)
+{
+ u32 val;
+
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
+ HIWORD_UPDATE(addr,
+ PHY_CFG_RD_MASK,
+ PHY_CFG_ADDR_SHIFT));
+ regmap_read(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_status,
+ &val);
+ return val;
+}
+
+static int rockchip_pcie_phy_power_off(struct phy *phy)
+{
+ struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
+ int err = 0;
+
+ err = reset_control_assert(rk_phy->phy_rst);
+ if (err) {
+ dev_err(&phy->dev, "assert phy_rst err %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_phy_power_on(struct phy *phy)
+{
+ struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
+ int err = 0;
+ u32 status;
+ unsigned long timeout;
+
+ err = reset_control_deassert(rk_phy->phy_rst);
+ if (err) {
+ dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
+ return err;
+ }
+
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
+ HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
+ PHY_CFG_ADDR_MASK,
+ PHY_CFG_ADDR_SHIFT));
+
+ /*
+ * No documented timeout value for phy operation below,
+ * so we make it large enough here. And we use loop-break
+ * method which should not be harmful.
+ */
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ err = -EINVAL;
+ while (time_before(jiffies, timeout)) {
+ regmap_read(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_status,
+ &status);
+ if (status & PHY_PLL_LOCKED) {
+ dev_dbg(&phy->dev, "pll locked!\n");
+ err = 0;
+ break;
+ }
+ msleep(20);
+ }
+
+ if (err) {
+ dev_err(&phy->dev, "pll lock timeout!\n");
+ goto err_pll_lock;
+ }
+
+ phy_wr_cfg(rk_phy, PHY_CFG_CLK_TEST, PHY_CFG_SEPE_RATE);
+ phy_wr_cfg(rk_phy, PHY_CFG_CLK_SCC, PHY_CFG_PLL_100M);
+
+ err = -ETIMEDOUT;
+ while (time_before(jiffies, timeout)) {
+ regmap_read(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_status,
+ &status);
+ if (!(status & PHY_PLL_OUTPUT)) {
+ dev_dbg(&phy->dev, "pll output enable done!\n");
+ err = 0;
+ break;
+ }
+ msleep(20);
+ }
+
+ if (err) {
+ dev_err(&phy->dev, "pll output enable timeout!\n");
+ goto err_pll_lock;
+ }
+
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
+ HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
+ PHY_CFG_ADDR_MASK,
+ PHY_CFG_ADDR_SHIFT));
+ err = -EINVAL;
+ while (time_before(jiffies, timeout)) {
+ regmap_read(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_status,
+ &status);
+ if (status & PHY_PLL_LOCKED) {
+ dev_dbg(&phy->dev, "pll relocked!\n");
+ err = 0;
+ break;
+ }
+ msleep(20);
+ }
+
+ if (err) {
+ dev_err(&phy->dev, "pll relock timeout!\n");
+ goto err_pll_lock;
+ }
+
+ return 0;
+
+err_pll_lock:
+ reset_control_assert(rk_phy->phy_rst);
+ return err;
+}
+
+static int rockchip_pcie_phy_init(struct phy *phy)
+{
+ struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
+ int err = 0;
+
+ err = clk_prepare_enable(rk_phy->clk_pciephy_ref);
+ if (err) {
+ dev_err(&phy->dev, "Fail to enable pcie ref clock.\n");
+ goto err_refclk;
+ }
+
+ err = reset_control_assert(rk_phy->phy_rst);
+ if (err) {
+ dev_err(&phy->dev, "assert phy_rst err %d\n", err);
+ goto err_reset;
+ }
+
+ return err;
+
+err_reset:
+ clk_disable_unprepare(rk_phy->clk_pciephy_ref);
+err_refclk:
+ return err;
+}
+
+static int rockchip_pcie_phy_exit(struct phy *phy)
+{
+ struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
+ int err = 0;
+
+ clk_disable_unprepare(rk_phy->clk_pciephy_ref);
+
+ err = reset_control_deassert(rk_phy->phy_rst);
+ if (err) {
+ dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
+ goto err_reset;
+ }
+
+ return err;
+
+err_reset:
+ clk_prepare_enable(rk_phy->clk_pciephy_ref);
+ return err;
+}
+
+static const struct phy_ops ops = {
+ .init = rockchip_pcie_phy_init,
+ .exit = rockchip_pcie_phy_exit,
+ .power_on = rockchip_pcie_phy_power_on,
+ .power_off = rockchip_pcie_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static const struct rockchip_pcie_data rk3399_pcie_data = {
+ .pcie_conf = 0xe220,
+ .pcie_status = 0xe2a4,
+ .pcie_laneoff = 0xe214,
+};
+
+static const struct of_device_id rockchip_pcie_phy_dt_ids[] = {
+ {
+ .compatible = "rockchip,rk3399-pcie-phy",
+ .data = &rk3399_pcie_data,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_pcie_phy_dt_ids);
+
+static int rockchip_pcie_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie_phy *rk_phy;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ struct regmap *grf;
+ const struct of_device_id *of_id;
+
+ grf = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(grf)) {
+ dev_err(dev, "Cannot find GRF syscon\n");
+ return PTR_ERR(grf);
+ }
+
+ rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL);
+ if (!rk_phy)
+ return -ENOMEM;
+
+ of_id = of_match_device(rockchip_pcie_phy_dt_ids, &pdev->dev);
+ if (!of_id)
+ return -EINVAL;
+
+ rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data;
+ rk_phy->reg_base = grf;
+
+ rk_phy->phy_rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(rk_phy->phy_rst)) {
+ if (PTR_ERR(rk_phy->phy_rst) != -EPROBE_DEFER)
+ dev_err(dev,
+ "missing phy property for reset controller\n");
+ return PTR_ERR(rk_phy->phy_rst);
+ }
+
+ rk_phy->clk_pciephy_ref = devm_clk_get(dev, "refclk");
+ if (IS_ERR(rk_phy->clk_pciephy_ref)) {
+ dev_err(dev, "refclk not found.\n");
+ return PTR_ERR(rk_phy->clk_pciephy_ref);
+ }
+
+ generic_phy = devm_phy_create(dev, dev->of_node, &ops);
+ if (IS_ERR(generic_phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(generic_phy);
+ }
+
+ phy_set_drvdata(generic_phy, rk_phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver rockchip_pcie_driver = {
+ .probe = rockchip_pcie_phy_probe,
+ .driver = {
+ .name = "rockchip-pcie-phy",
+ .of_match_table = rockchip_pcie_phy_dt_ids,
+ },
+};
+
+module_platform_driver(rockchip_pcie_driver);
+
+MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip PCIe PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-rockchip-typec.c b/drivers/phy/phy-rockchip-typec.c
new file mode 100644
index 000000000000..7cfb0f8995de
--- /dev/null
+++ b/drivers/phy/phy-rockchip-typec.c
@@ -0,0 +1,1023 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ * Kever Yang <kever.yang@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * The ROCKCHIP Type-C PHY has two PLL clocks. The first PLL clock
+ * is used for USB3, the second PLL clock is used for DP. This Type-C PHY has
+ * 3 working modes: USB3 only mode, DP only mode, and USB3+DP mode.
+ * At USB3 only mode, both PLL clocks need to be initialized, this allows the
+ * PHY to switch mode between USB3 and USB3+DP, without disconnecting the USB
+ * device.
+ * In The DP only mode, only the DP PLL needs to be powered on, and the 4 lanes
+ * are all used for DP.
+ *
+ * This driver gets extcon cable state and property, then decides which mode to
+ * select:
+ *
+ * 1. USB3 only mode:
+ * EXTCON_USB or EXTCON_USB_HOST state is true, and
+ * EXTCON_PROP_USB_SS property is true.
+ * EXTCON_DISP_DP state is false.
+ *
+ * 2. DP only mode:
+ * EXTCON_DISP_DP state is true, and
+ * EXTCON_PROP_USB_SS property is false.
+ * If EXTCON_USB_HOST state is true, it is DP + USB2 mode, since the USB2 phy
+ * is a separate phy, so this case is still DP only mode.
+ *
+ * 3. USB3+DP mode:
+ * EXTCON_USB_HOST and EXTCON_DISP_DP are both true, and
+ * EXTCON_PROP_USB_SS property is true.
+ *
+ * This Type-C PHY driver supports normal and flip orientation. The orientation
+ * is reported by the EXTCON_PROP_USB_TYPEC_POLARITY property: true is flip
+ * orientation, false is normal orientation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/extcon.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+
+#define CMN_SSM_BANDGAP (0x21 << 2)
+#define CMN_SSM_BIAS (0x22 << 2)
+#define CMN_PLLSM0_PLLEN (0x29 << 2)
+#define CMN_PLLSM0_PLLPRE (0x2a << 2)
+#define CMN_PLLSM0_PLLVREF (0x2b << 2)
+#define CMN_PLLSM0_PLLLOCK (0x2c << 2)
+#define CMN_PLLSM1_PLLEN (0x31 << 2)
+#define CMN_PLLSM1_PLLPRE (0x32 << 2)
+#define CMN_PLLSM1_PLLVREF (0x33 << 2)
+#define CMN_PLLSM1_PLLLOCK (0x34 << 2)
+#define CMN_PLLSM1_USER_DEF_CTRL (0x37 << 2)
+#define CMN_ICAL_OVRD (0xc1 << 2)
+#define CMN_PLL0_VCOCAL_OVRD (0x83 << 2)
+#define CMN_PLL0_VCOCAL_INIT (0x84 << 2)
+#define CMN_PLL0_VCOCAL_ITER (0x85 << 2)
+#define CMN_PLL0_LOCK_REFCNT_START (0x90 << 2)
+#define CMN_PLL0_LOCK_PLLCNT_START (0x92 << 2)
+#define CMN_PLL0_LOCK_PLLCNT_THR (0x93 << 2)
+#define CMN_PLL0_INTDIV (0x94 << 2)
+#define CMN_PLL0_FRACDIV (0x95 << 2)
+#define CMN_PLL0_HIGH_THR (0x96 << 2)
+#define CMN_PLL0_DSM_DIAG (0x97 << 2)
+#define CMN_PLL0_SS_CTRL1 (0x98 << 2)
+#define CMN_PLL0_SS_CTRL2 (0x99 << 2)
+#define CMN_PLL1_VCOCAL_START (0xa1 << 2)
+#define CMN_PLL1_VCOCAL_OVRD (0xa3 << 2)
+#define CMN_PLL1_VCOCAL_INIT (0xa4 << 2)
+#define CMN_PLL1_VCOCAL_ITER (0xa5 << 2)
+#define CMN_PLL1_LOCK_REFCNT_START (0xb0 << 2)
+#define CMN_PLL1_LOCK_PLLCNT_START (0xb2 << 2)
+#define CMN_PLL1_LOCK_PLLCNT_THR (0xb3 << 2)
+#define CMN_PLL1_INTDIV (0xb4 << 2)
+#define CMN_PLL1_FRACDIV (0xb5 << 2)
+#define CMN_PLL1_HIGH_THR (0xb6 << 2)
+#define CMN_PLL1_DSM_DIAG (0xb7 << 2)
+#define CMN_PLL1_SS_CTRL1 (0xb8 << 2)
+#define CMN_PLL1_SS_CTRL2 (0xb9 << 2)
+#define CMN_RXCAL_OVRD (0xd1 << 2)
+#define CMN_TXPUCAL_CTRL (0xe0 << 2)
+#define CMN_TXPUCAL_OVRD (0xe1 << 2)
+#define CMN_TXPDCAL_OVRD (0xf1 << 2)
+#define CMN_DIAG_PLL0_FBH_OVRD (0x1c0 << 2)
+#define CMN_DIAG_PLL0_FBL_OVRD (0x1c1 << 2)
+#define CMN_DIAG_PLL0_OVRD (0x1c2 << 2)
+#define CMN_DIAG_PLL0_V2I_TUNE (0x1c5 << 2)
+#define CMN_DIAG_PLL0_CP_TUNE (0x1c6 << 2)
+#define CMN_DIAG_PLL0_LF_PROG (0x1c7 << 2)
+#define CMN_DIAG_PLL1_FBH_OVRD (0x1d0 << 2)
+#define CMN_DIAG_PLL1_FBL_OVRD (0x1d1 << 2)
+#define CMN_DIAG_PLL1_OVRD (0x1d2 << 2)
+#define CMN_DIAG_PLL1_V2I_TUNE (0x1d5 << 2)
+#define CMN_DIAG_PLL1_CP_TUNE (0x1d6 << 2)
+#define CMN_DIAG_PLL1_LF_PROG (0x1d7 << 2)
+#define CMN_DIAG_PLL1_PTATIS_TUNE1 (0x1d8 << 2)
+#define CMN_DIAG_PLL1_PTATIS_TUNE2 (0x1d9 << 2)
+#define CMN_DIAG_PLL1_INCLK_CTRL (0x1da << 2)
+#define CMN_DIAG_HSCLK_SEL (0x1e0 << 2)
+
+#define XCVR_PSM_RCTRL(n) ((0x4001 | ((n) << 9)) << 2)
+#define XCVR_PSM_CAL_TMR(n) ((0x4002 | ((n) << 9)) << 2)
+#define XCVR_PSM_A0IN_TMR(n) ((0x4003 | ((n) << 9)) << 2)
+#define TX_TXCC_CAL_SCLR_MULT(n) ((0x4047 | ((n) << 9)) << 2)
+#define TX_TXCC_CPOST_MULT_00(n) ((0x404c | ((n) << 9)) << 2)
+#define TX_TXCC_CPOST_MULT_01(n) ((0x404d | ((n) << 9)) << 2)
+#define TX_TXCC_CPOST_MULT_10(n) ((0x404e | ((n) << 9)) << 2)
+#define TX_TXCC_CPOST_MULT_11(n) ((0x404f | ((n) << 9)) << 2)
+#define TX_TXCC_MGNFS_MULT_000(n) ((0x4050 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNFS_MULT_001(n) ((0x4051 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNFS_MULT_010(n) ((0x4052 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNFS_MULT_011(n) ((0x4053 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNFS_MULT_100(n) ((0x4054 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNFS_MULT_101(n) ((0x4055 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNFS_MULT_110(n) ((0x4056 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNFS_MULT_111(n) ((0x4057 | ((n) << 9)) << 2)
+#define XCVR_DIAG_PLLDRC_CTRL(n) ((0x40e0 | ((n) << 9)) << 2)
+#define XCVR_DIAG_BIDI_CTRL(n) ((0x40e8 | ((n) << 9)) << 2)
+#define XCVR_DIAG_LANE_FCM_EN_MGN(n) ((0x40f2 | ((n) << 9)) << 2)
+#define TX_PSC_A0(n) ((0x4100 | ((n) << 9)) << 2)
+#define TX_PSC_A1(n) ((0x4101 | ((n) << 9)) << 2)
+#define TX_PSC_A2(n) ((0x4102 | ((n) << 9)) << 2)
+#define TX_PSC_A3(n) ((0x4103 | ((n) << 9)) << 2)
+#define TX_RCVDET_CTRL(n) ((0x4120 | ((n) << 9)) << 2)
+#define TX_RCVDET_EN_TMR(n) ((0x4122 | ((n) << 9)) << 2)
+#define TX_RCVDET_ST_TMR(n) ((0x4123 | ((n) << 9)) << 2)
+#define TX_DIAG_TX_DRV(n) ((0x41e1 | ((n) << 9)) << 2)
+#define TX_DIAG_BGREF_PREDRV_DELAY (0x41e7 << 2)
+#define TX_ANA_CTRL_REG_1 (0x5020 << 2)
+#define TX_ANA_CTRL_REG_2 (0x5021 << 2)
+#define TXDA_COEFF_CALC_CTRL (0x5022 << 2)
+#define TX_DIG_CTRL_REG_2 (0x5024 << 2)
+#define TXDA_CYA_AUXDA_CYA (0x5025 << 2)
+#define TX_ANA_CTRL_REG_3 (0x5026 << 2)
+#define TX_ANA_CTRL_REG_4 (0x5027 << 2)
+#define TX_ANA_CTRL_REG_5 (0x5029 << 2)
+
+#define RX_PSC_A0(n) ((0x8000 | ((n) << 9)) << 2)
+#define RX_PSC_A1(n) ((0x8001 | ((n) << 9)) << 2)
+#define RX_PSC_A2(n) ((0x8002 | ((n) << 9)) << 2)
+#define RX_PSC_A3(n) ((0x8003 | ((n) << 9)) << 2)
+#define RX_PSC_CAL(n) ((0x8006 | ((n) << 9)) << 2)
+#define RX_PSC_RDY(n) ((0x8007 | ((n) << 9)) << 2)
+#define RX_IQPI_ILL_CAL_OVRD (0x8023 << 2)
+#define RX_EPI_ILL_CAL_OVRD (0x8033 << 2)
+#define RX_SDCAL0_OVRD (0x8041 << 2)
+#define RX_SDCAL1_OVRD (0x8049 << 2)
+#define RX_SLC_INIT (0x806d << 2)
+#define RX_SLC_RUN (0x806e << 2)
+#define RX_CDRLF_CNFG2 (0x8081 << 2)
+#define RX_SIGDET_HL_FILT_TMR(n) ((0x8090 | ((n) << 9)) << 2)
+#define RX_SLC_IOP0_OVRD (0x8101 << 2)
+#define RX_SLC_IOP1_OVRD (0x8105 << 2)
+#define RX_SLC_QOP0_OVRD (0x8109 << 2)
+#define RX_SLC_QOP1_OVRD (0x810d << 2)
+#define RX_SLC_EOP0_OVRD (0x8111 << 2)
+#define RX_SLC_EOP1_OVRD (0x8115 << 2)
+#define RX_SLC_ION0_OVRD (0x8119 << 2)
+#define RX_SLC_ION1_OVRD (0x811d << 2)
+#define RX_SLC_QON0_OVRD (0x8121 << 2)
+#define RX_SLC_QON1_OVRD (0x8125 << 2)
+#define RX_SLC_EON0_OVRD (0x8129 << 2)
+#define RX_SLC_EON1_OVRD (0x812d << 2)
+#define RX_SLC_IEP0_OVRD (0x8131 << 2)
+#define RX_SLC_IEP1_OVRD (0x8135 << 2)
+#define RX_SLC_QEP0_OVRD (0x8139 << 2)
+#define RX_SLC_QEP1_OVRD (0x813d << 2)
+#define RX_SLC_EEP0_OVRD (0x8141 << 2)
+#define RX_SLC_EEP1_OVRD (0x8145 << 2)
+#define RX_SLC_IEN0_OVRD (0x8149 << 2)
+#define RX_SLC_IEN1_OVRD (0x814d << 2)
+#define RX_SLC_QEN0_OVRD (0x8151 << 2)
+#define RX_SLC_QEN1_OVRD (0x8155 << 2)
+#define RX_SLC_EEN0_OVRD (0x8159 << 2)
+#define RX_SLC_EEN1_OVRD (0x815d << 2)
+#define RX_REE_CTRL_DATA_MASK(n) ((0x81bb | ((n) << 9)) << 2)
+#define RX_DIAG_SIGDET_TUNE(n) ((0x81dc | ((n) << 9)) << 2)
+#define RX_DIAG_SC2C_DELAY (0x81e1 << 2)
+
+#define PMA_LANE_CFG (0xc000 << 2)
+#define PIPE_CMN_CTRL1 (0xc001 << 2)
+#define PIPE_CMN_CTRL2 (0xc002 << 2)
+#define PIPE_COM_LOCK_CFG1 (0xc003 << 2)
+#define PIPE_COM_LOCK_CFG2 (0xc004 << 2)
+#define PIPE_RCV_DET_INH (0xc005 << 2)
+#define DP_MODE_CTL (0xc008 << 2)
+#define DP_CLK_CTL (0xc009 << 2)
+#define STS (0xc00F << 2)
+#define PHY_ISO_CMN_CTRL (0xc010 << 2)
+#define PHY_DP_TX_CTL (0xc408 << 2)
+#define PMA_CMN_CTRL1 (0xc800 << 2)
+#define PHY_PMA_ISO_CMN_CTRL (0xc810 << 2)
+#define PHY_ISOLATION_CTRL (0xc81f << 2)
+#define PHY_PMA_ISO_XCVR_CTRL(n) ((0xcc11 | ((n) << 6)) << 2)
+#define PHY_PMA_ISO_LINK_MODE(n) ((0xcc12 | ((n) << 6)) << 2)
+#define PHY_PMA_ISO_PWRST_CTRL(n) ((0xcc13 | ((n) << 6)) << 2)
+#define PHY_PMA_ISO_TX_DATA_LO(n) ((0xcc14 | ((n) << 6)) << 2)
+#define PHY_PMA_ISO_TX_DATA_HI(n) ((0xcc15 | ((n) << 6)) << 2)
+#define PHY_PMA_ISO_RX_DATA_LO(n) ((0xcc16 | ((n) << 6)) << 2)
+#define PHY_PMA_ISO_RX_DATA_HI(n) ((0xcc17 | ((n) << 6)) << 2)
+#define TX_BIST_CTRL(n) ((0x4140 | ((n) << 9)) << 2)
+#define TX_BIST_UDDWR(n) ((0x4141 | ((n) << 9)) << 2)
+
+/*
+ * Selects which PLL clock will be driven on the analog high speed
+ * clock 0: PLL 0 div 1
+ * clock 1: PLL 1 div 2
+ */
+#define CLK_PLL_CONFIG 0X30
+#define CLK_PLL_MASK 0x33
+
+#define CMN_READY BIT(0)
+
+#define DP_PLL_CLOCK_ENABLE BIT(2)
+#define DP_PLL_ENABLE BIT(0)
+#define DP_PLL_DATA_RATE_RBR ((2 << 12) | (4 << 8))
+#define DP_PLL_DATA_RATE_HBR ((2 << 12) | (4 << 8))
+#define DP_PLL_DATA_RATE_HBR2 ((1 << 12) | (2 << 8))
+
+#define DP_MODE_A0 BIT(4)
+#define DP_MODE_A2 BIT(6)
+#define DP_MODE_ENTER_A0 0xc101
+#define DP_MODE_ENTER_A2 0xc104
+
+#define PHY_MODE_SET_TIMEOUT 100000
+
+#define PIN_ASSIGN_C_E 0x51d9
+#define PIN_ASSIGN_D_F 0x5100
+
+#define MODE_DISCONNECT 0
+#define MODE_UFP_USB BIT(0)
+#define MODE_DFP_USB BIT(1)
+#define MODE_DFP_DP BIT(2)
+
+struct usb3phy_reg {
+ u32 offset;
+ u32 enable_bit;
+ u32 write_enable;
+};
+
+struct rockchip_usb3phy_port_cfg {
+ struct usb3phy_reg typec_conn_dir;
+ struct usb3phy_reg usb3tousb2_en;
+ struct usb3phy_reg external_psm;
+ struct usb3phy_reg pipe_status;
+};
+
+struct rockchip_typec_phy {
+ struct device *dev;
+ void __iomem *base;
+ struct extcon_dev *extcon;
+ struct regmap *grf_regs;
+ struct clk *clk_core;
+ struct clk *clk_ref;
+ struct reset_control *uphy_rst;
+ struct reset_control *pipe_rst;
+ struct reset_control *tcphy_rst;
+ struct rockchip_usb3phy_port_cfg port_cfgs;
+ /* mutex to protect access to individual PHYs */
+ struct mutex lock;
+
+ bool flip;
+ u8 mode;
+};
+
+struct phy_reg {
+ u16 value;
+ u32 addr;
+};
+
+struct phy_reg usb3_pll_cfg[] = {
+ { 0xf0, CMN_PLL0_VCOCAL_INIT },
+ { 0x18, CMN_PLL0_VCOCAL_ITER },
+ { 0xd0, CMN_PLL0_INTDIV },
+ { 0x4a4a, CMN_PLL0_FRACDIV },
+ { 0x34, CMN_PLL0_HIGH_THR },
+ { 0x1ee, CMN_PLL0_SS_CTRL1 },
+ { 0x7f03, CMN_PLL0_SS_CTRL2 },
+ { 0x20, CMN_PLL0_DSM_DIAG },
+ { 0, CMN_DIAG_PLL0_OVRD },
+ { 0, CMN_DIAG_PLL0_FBH_OVRD },
+ { 0, CMN_DIAG_PLL0_FBL_OVRD },
+ { 0x7, CMN_DIAG_PLL0_V2I_TUNE },
+ { 0x45, CMN_DIAG_PLL0_CP_TUNE },
+ { 0x8, CMN_DIAG_PLL0_LF_PROG },
+};
+
+struct phy_reg dp_pll_cfg[] = {
+ { 0xf0, CMN_PLL1_VCOCAL_INIT },
+ { 0x18, CMN_PLL1_VCOCAL_ITER },
+ { 0x30b9, CMN_PLL1_VCOCAL_START },
+ { 0x21c, CMN_PLL1_INTDIV },
+ { 0, CMN_PLL1_FRACDIV },
+ { 0x5, CMN_PLL1_HIGH_THR },
+ { 0x35, CMN_PLL1_SS_CTRL1 },
+ { 0x7f1e, CMN_PLL1_SS_CTRL2 },
+ { 0x20, CMN_PLL1_DSM_DIAG },
+ { 0, CMN_PLLSM1_USER_DEF_CTRL },
+ { 0, CMN_DIAG_PLL1_OVRD },
+ { 0, CMN_DIAG_PLL1_FBH_OVRD },
+ { 0, CMN_DIAG_PLL1_FBL_OVRD },
+ { 0x6, CMN_DIAG_PLL1_V2I_TUNE },
+ { 0x45, CMN_DIAG_PLL1_CP_TUNE },
+ { 0x8, CMN_DIAG_PLL1_LF_PROG },
+ { 0x100, CMN_DIAG_PLL1_PTATIS_TUNE1 },
+ { 0x7, CMN_DIAG_PLL1_PTATIS_TUNE2 },
+ { 0x4, CMN_DIAG_PLL1_INCLK_CTRL },
+};
+
+static void tcphy_cfg_24m(struct rockchip_typec_phy *tcphy)
+{
+ u32 i, rdata;
+
+ /*
+ * cmn_ref_clk_sel = 3, select the 24Mhz for clk parent
+ * cmn_psm_clk_dig_div = 2, set the clk division to 2
+ */
+ writel(0x830, tcphy->base + PMA_CMN_CTRL1);
+ for (i = 0; i < 4; i++) {
+ /*
+ * The following PHY configuration assumes a 24 MHz reference
+ * clock.
+ */
+ writel(0x90, tcphy->base + XCVR_DIAG_LANE_FCM_EN_MGN(i));
+ writel(0x960, tcphy->base + TX_RCVDET_EN_TMR(i));
+ writel(0x30, tcphy->base + TX_RCVDET_ST_TMR(i));
+ }
+
+ rdata = readl(tcphy->base + CMN_DIAG_HSCLK_SEL);
+ rdata &= ~CLK_PLL_MASK;
+ rdata |= CLK_PLL_CONFIG;
+ writel(rdata, tcphy->base + CMN_DIAG_HSCLK_SEL);
+}
+
+static void tcphy_cfg_usb3_pll(struct rockchip_typec_phy *tcphy)
+{
+ u32 i;
+
+ /* load the configuration of PLL0 */
+ for (i = 0; i < ARRAY_SIZE(usb3_pll_cfg); i++)
+ writel(usb3_pll_cfg[i].value,
+ tcphy->base + usb3_pll_cfg[i].addr);
+}
+
+static void tcphy_cfg_dp_pll(struct rockchip_typec_phy *tcphy)
+{
+ u32 i;
+
+ /* set the default mode to RBR */
+ writel(DP_PLL_CLOCK_ENABLE | DP_PLL_ENABLE | DP_PLL_DATA_RATE_RBR,
+ tcphy->base + DP_CLK_CTL);
+
+ /* load the configuration of PLL1 */
+ for (i = 0; i < ARRAY_SIZE(dp_pll_cfg); i++)
+ writel(dp_pll_cfg[i].value, tcphy->base + dp_pll_cfg[i].addr);
+}
+
+static void tcphy_tx_usb3_cfg_lane(struct rockchip_typec_phy *tcphy, u32 lane)
+{
+ writel(0x7799, tcphy->base + TX_PSC_A0(lane));
+ writel(0x7798, tcphy->base + TX_PSC_A1(lane));
+ writel(0x5098, tcphy->base + TX_PSC_A2(lane));
+ writel(0x5098, tcphy->base + TX_PSC_A3(lane));
+ writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_000(lane));
+ writel(0xbf, tcphy->base + XCVR_DIAG_BIDI_CTRL(lane));
+}
+
+static void tcphy_rx_usb3_cfg_lane(struct rockchip_typec_phy *tcphy, u32 lane)
+{
+ writel(0xa6fd, tcphy->base + RX_PSC_A0(lane));
+ writel(0xa6fd, tcphy->base + RX_PSC_A1(lane));
+ writel(0xa410, tcphy->base + RX_PSC_A2(lane));
+ writel(0x2410, tcphy->base + RX_PSC_A3(lane));
+ writel(0x23ff, tcphy->base + RX_PSC_CAL(lane));
+ writel(0x13, tcphy->base + RX_SIGDET_HL_FILT_TMR(lane));
+ writel(0x03e7, tcphy->base + RX_REE_CTRL_DATA_MASK(lane));
+ writel(0x1004, tcphy->base + RX_DIAG_SIGDET_TUNE(lane));
+ writel(0x2010, tcphy->base + RX_PSC_RDY(lane));
+ writel(0xfb, tcphy->base + XCVR_DIAG_BIDI_CTRL(lane));
+}
+
+static void tcphy_dp_cfg_lane(struct rockchip_typec_phy *tcphy, u32 lane)
+{
+ u16 rdata;
+
+ writel(0xbefc, tcphy->base + XCVR_PSM_RCTRL(lane));
+ writel(0x6799, tcphy->base + TX_PSC_A0(lane));
+ writel(0x6798, tcphy->base + TX_PSC_A1(lane));
+ writel(0x98, tcphy->base + TX_PSC_A2(lane));
+ writel(0x98, tcphy->base + TX_PSC_A3(lane));
+
+ writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_000(lane));
+ writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_001(lane));
+ writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_010(lane));
+ writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_011(lane));
+ writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_100(lane));
+ writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_101(lane));
+ writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_110(lane));
+ writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_111(lane));
+ writel(0, tcphy->base + TX_TXCC_CPOST_MULT_10(lane));
+ writel(0, tcphy->base + TX_TXCC_CPOST_MULT_01(lane));
+ writel(0, tcphy->base + TX_TXCC_CPOST_MULT_00(lane));
+ writel(0, tcphy->base + TX_TXCC_CPOST_MULT_11(lane));
+
+ writel(0x128, tcphy->base + TX_TXCC_CAL_SCLR_MULT(lane));
+ writel(0x400, tcphy->base + TX_DIAG_TX_DRV(lane));
+
+ rdata = readl(tcphy->base + XCVR_DIAG_PLLDRC_CTRL(lane));
+ rdata = (rdata & 0x8fff) | 0x6000;
+ writel(rdata, tcphy->base + XCVR_DIAG_PLLDRC_CTRL(lane));
+}
+
+static inline int property_enable(struct rockchip_typec_phy *tcphy,
+ const struct usb3phy_reg *reg, bool en)
+{
+ u32 mask = 1 << reg->write_enable;
+ u32 val = en << reg->enable_bit;
+
+ return regmap_write(tcphy->grf_regs, reg->offset, val | mask);
+}
+
+static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
+{
+ u16 rdata, rdata2, val;
+
+ /* disable txda_cal_latch_en for rewrite the calibration values */
+ rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+ val = rdata & 0xdfff;
+ writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+
+ /*
+ * read a resistor calibration code from CMN_TXPUCAL_CTRL[6:0] and
+ * write it to TX_DIG_CTRL_REG_2[6:0], and delay 1ms to make sure it
+ * works.
+ */
+ rdata = readl(tcphy->base + TX_DIG_CTRL_REG_2);
+ rdata = rdata & 0xffc0;
+
+ rdata2 = readl(tcphy->base + CMN_TXPUCAL_CTRL);
+ rdata2 = rdata2 & 0x3f;
+
+ val = rdata | rdata2;
+ writel(val, tcphy->base + TX_DIG_CTRL_REG_2);
+ usleep_range(1000, 1050);
+
+ /*
+ * Enable signal for latch that sample and holds calibration values.
+ * Activate this signal for 1 clock cycle to sample new calibration
+ * values.
+ */
+ rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+ val = rdata | 0x2000;
+ writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+ usleep_range(150, 200);
+
+ /* set TX Voltage Level and TX Deemphasis to 0 */
+ writel(0, tcphy->base + PHY_DP_TX_CTL);
+ /* re-enable decap */
+ writel(0x100, tcphy->base + TX_ANA_CTRL_REG_2);
+ writel(0x300, tcphy->base + TX_ANA_CTRL_REG_2);
+ writel(0x2008, tcphy->base + TX_ANA_CTRL_REG_1);
+ writel(0x2018, tcphy->base + TX_ANA_CTRL_REG_1);
+
+ writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
+
+ /*
+ * Programs txda_drv_ldo_prog[15:0], Sets driver LDO
+ * voltage 16'h1001 for DP-AUX-TX and RX
+ */
+ writel(0x1001, tcphy->base + TX_ANA_CTRL_REG_4);
+
+ /* re-enables Bandgap reference for LDO */
+ writel(0x2098, tcphy->base + TX_ANA_CTRL_REG_1);
+ writel(0x2198, tcphy->base + TX_ANA_CTRL_REG_1);
+
+ /*
+ * re-enables the transmitter pre-driver, driver data selection MUX,
+ * and receiver detect circuits.
+ */
+ writel(0x301, tcphy->base + TX_ANA_CTRL_REG_2);
+ writel(0x303, tcphy->base + TX_ANA_CTRL_REG_2);
+
+ /*
+ * BIT 12: Controls auxda_polarity, which selects the polarity of the
+ * xcvr:
+ * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull
+ * down aux_m)
+ * 0, Normal polarity (if TYPE_C, pulls up aux_m and pulls down
+ * aux_p)
+ */
+ val = 0xa078;
+ if (!tcphy->flip)
+ val |= BIT(12);
+ writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+
+ writel(0, tcphy->base + TX_ANA_CTRL_REG_3);
+ writel(0, tcphy->base + TX_ANA_CTRL_REG_4);
+ writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
+
+ /*
+ * Controls low_power_swing_en, set the voltage swing of the driver
+ * to 400mv. The values below are peak to peak (differential) values.
+ */
+ writel(4, tcphy->base + TXDA_COEFF_CALC_CTRL);
+ writel(0, tcphy->base + TXDA_CYA_AUXDA_CYA);
+
+ /* Controls tx_high_z_tm_en */
+ val = readl(tcphy->base + TX_DIG_CTRL_REG_2);
+ val |= BIT(15);
+ writel(val, tcphy->base + TX_DIG_CTRL_REG_2);
+}
+
+static int tcphy_phy_init(struct rockchip_typec_phy *tcphy, u8 mode)
+{
+ struct rockchip_usb3phy_port_cfg *cfg = &tcphy->port_cfgs;
+ int ret, i;
+ u32 val;
+
+ ret = clk_prepare_enable(tcphy->clk_core);
+ if (ret) {
+ dev_err(tcphy->dev, "Failed to prepare_enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(tcphy->clk_ref);
+ if (ret) {
+ dev_err(tcphy->dev, "Failed to prepare_enable ref clock\n");
+ goto err_clk_core;
+ }
+
+ reset_control_deassert(tcphy->tcphy_rst);
+
+ property_enable(tcphy, &cfg->typec_conn_dir, tcphy->flip);
+
+ tcphy_cfg_24m(tcphy);
+
+ if (mode == MODE_DFP_DP) {
+ tcphy_cfg_dp_pll(tcphy);
+ for (i = 0; i < 4; i++)
+ tcphy_dp_cfg_lane(tcphy, i);
+
+ writel(PIN_ASSIGN_C_E, tcphy->base + PMA_LANE_CFG);
+ } else {
+ tcphy_cfg_usb3_pll(tcphy);
+ tcphy_cfg_dp_pll(tcphy);
+ if (tcphy->flip) {
+ tcphy_tx_usb3_cfg_lane(tcphy, 3);
+ tcphy_rx_usb3_cfg_lane(tcphy, 2);
+ tcphy_dp_cfg_lane(tcphy, 0);
+ tcphy_dp_cfg_lane(tcphy, 1);
+ } else {
+ tcphy_tx_usb3_cfg_lane(tcphy, 0);
+ tcphy_rx_usb3_cfg_lane(tcphy, 1);
+ tcphy_dp_cfg_lane(tcphy, 2);
+ tcphy_dp_cfg_lane(tcphy, 3);
+ }
+
+ writel(PIN_ASSIGN_D_F, tcphy->base + PMA_LANE_CFG);
+ }
+
+ writel(DP_MODE_ENTER_A2, tcphy->base + DP_MODE_CTL);
+
+ reset_control_deassert(tcphy->uphy_rst);
+
+ ret = readx_poll_timeout(readl, tcphy->base + PMA_CMN_CTRL1,
+ val, val & CMN_READY, 10,
+ PHY_MODE_SET_TIMEOUT);
+ if (ret < 0) {
+ dev_err(tcphy->dev, "wait pma ready timeout\n");
+ ret = -ETIMEDOUT;
+ goto err_wait_pma;
+ }
+
+ reset_control_deassert(tcphy->pipe_rst);
+
+ return 0;
+
+err_wait_pma:
+ reset_control_assert(tcphy->uphy_rst);
+ reset_control_assert(tcphy->tcphy_rst);
+ clk_disable_unprepare(tcphy->clk_ref);
+err_clk_core:
+ clk_disable_unprepare(tcphy->clk_core);
+ return ret;
+}
+
+static void tcphy_phy_deinit(struct rockchip_typec_phy *tcphy)
+{
+ reset_control_assert(tcphy->tcphy_rst);
+ reset_control_assert(tcphy->uphy_rst);
+ reset_control_assert(tcphy->pipe_rst);
+ clk_disable_unprepare(tcphy->clk_core);
+ clk_disable_unprepare(tcphy->clk_ref);
+}
+
+static int tcphy_get_mode(struct rockchip_typec_phy *tcphy)
+{
+ struct extcon_dev *edev = tcphy->extcon;
+ union extcon_property_value property;
+ unsigned int id;
+ bool dfp, ufp, dp;
+ u8 mode;
+ int ret;
+
+ ufp = extcon_get_state(edev, EXTCON_USB);
+ dfp = extcon_get_state(edev, EXTCON_USB_HOST);
+ dp = extcon_get_state(edev, EXTCON_DISP_DP);
+
+ mode = MODE_DFP_USB;
+ id = EXTCON_USB_HOST;
+
+ if (ufp) {
+ mode = MODE_UFP_USB;
+ id = EXTCON_USB;
+ } else if (dp) {
+ mode = MODE_DFP_DP;
+ id = EXTCON_DISP_DP;
+
+ ret = extcon_get_property(edev, id, EXTCON_PROP_USB_SS,
+ &property);
+ if (ret) {
+ dev_err(tcphy->dev, "get superspeed property failed\n");
+ return ret;
+ }
+
+ if (property.intval)
+ mode |= MODE_DFP_USB;
+ }
+
+ ret = extcon_get_property(edev, id, EXTCON_PROP_USB_TYPEC_POLARITY,
+ &property);
+ if (ret) {
+ dev_err(tcphy->dev, "get polarity property failed\n");
+ return ret;
+ }
+
+ tcphy->flip = property.intval ? 1 : 0;
+
+ return mode;
+}
+
+static int rockchip_usb3_phy_power_on(struct phy *phy)
+{
+ struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
+ struct rockchip_usb3phy_port_cfg *cfg = &tcphy->port_cfgs;
+ const struct usb3phy_reg *reg = &cfg->pipe_status;
+ int timeout, new_mode, ret = 0;
+ u32 val;
+
+ mutex_lock(&tcphy->lock);
+
+ new_mode = tcphy_get_mode(tcphy);
+ if (new_mode < 0) {
+ ret = new_mode;
+ goto unlock_ret;
+ }
+
+ /* DP-only mode; fall back to USB2 */
+ if (!(new_mode & (MODE_DFP_USB | MODE_UFP_USB)))
+ goto unlock_ret;
+
+ if (tcphy->mode == new_mode)
+ goto unlock_ret;
+
+ if (tcphy->mode == MODE_DISCONNECT)
+ tcphy_phy_init(tcphy, new_mode);
+
+ /* wait TCPHY for pipe ready */
+ for (timeout = 0; timeout < 100; timeout++) {
+ regmap_read(tcphy->grf_regs, reg->offset, &val);
+ if (!(val & BIT(reg->enable_bit))) {
+ tcphy->mode |= new_mode & (MODE_DFP_USB | MODE_UFP_USB);
+ goto unlock_ret;
+ }
+ usleep_range(10, 20);
+ }
+
+ if (tcphy->mode == MODE_DISCONNECT)
+ tcphy_phy_deinit(tcphy);
+
+ ret = -ETIMEDOUT;
+
+unlock_ret:
+ mutex_unlock(&tcphy->lock);
+ return ret;
+}
+
+static int rockchip_usb3_phy_power_off(struct phy *phy)
+{
+ struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
+
+ mutex_lock(&tcphy->lock);
+
+ if (tcphy->mode == MODE_DISCONNECT)
+ goto unlock;
+
+ tcphy->mode &= ~(MODE_UFP_USB | MODE_DFP_USB);
+ if (tcphy->mode == MODE_DISCONNECT)
+ tcphy_phy_deinit(tcphy);
+
+unlock:
+ mutex_unlock(&tcphy->lock);
+ return 0;
+}
+
+static const struct phy_ops rockchip_usb3_phy_ops = {
+ .power_on = rockchip_usb3_phy_power_on,
+ .power_off = rockchip_usb3_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int rockchip_dp_phy_power_on(struct phy *phy)
+{
+ struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
+ int new_mode, ret = 0;
+ u32 val;
+
+ mutex_lock(&tcphy->lock);
+
+ new_mode = tcphy_get_mode(tcphy);
+ if (new_mode < 0) {
+ ret = new_mode;
+ goto unlock_ret;
+ }
+
+ if (!(new_mode & MODE_DFP_DP)) {
+ ret = -ENODEV;
+ goto unlock_ret;
+ }
+
+ if (tcphy->mode == new_mode)
+ goto unlock_ret;
+
+ /*
+ * If the PHY has been power on, but the mode is not DP only mode,
+ * re-init the PHY for setting all of 4 lanes to DP.
+ */
+ if (new_mode == MODE_DFP_DP && tcphy->mode != MODE_DISCONNECT) {
+ tcphy_phy_deinit(tcphy);
+ tcphy_phy_init(tcphy, new_mode);
+ } else if (tcphy->mode == MODE_DISCONNECT) {
+ tcphy_phy_init(tcphy, new_mode);
+ }
+
+ ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL,
+ val, val & DP_MODE_A2, 1000,
+ PHY_MODE_SET_TIMEOUT);
+ if (ret < 0) {
+ dev_err(tcphy->dev, "failed to wait TCPHY enter A2\n");
+ goto power_on_finish;
+ }
+
+ tcphy_dp_aux_calibration(tcphy);
+
+ writel(DP_MODE_ENTER_A0, tcphy->base + DP_MODE_CTL);
+
+ ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL,
+ val, val & DP_MODE_A0, 1000,
+ PHY_MODE_SET_TIMEOUT);
+ if (ret < 0) {
+ writel(DP_MODE_ENTER_A2, tcphy->base + DP_MODE_CTL);
+ dev_err(tcphy->dev, "failed to wait TCPHY enter A0\n");
+ goto power_on_finish;
+ }
+
+ tcphy->mode |= MODE_DFP_DP;
+
+power_on_finish:
+ if (tcphy->mode == MODE_DISCONNECT)
+ tcphy_phy_deinit(tcphy);
+unlock_ret:
+ mutex_unlock(&tcphy->lock);
+ return ret;
+}
+
+static int rockchip_dp_phy_power_off(struct phy *phy)
+{
+ struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
+
+ mutex_lock(&tcphy->lock);
+
+ if (tcphy->mode == MODE_DISCONNECT)
+ goto unlock;
+
+ tcphy->mode &= ~MODE_DFP_DP;
+
+ writel(DP_MODE_ENTER_A2, tcphy->base + DP_MODE_CTL);
+
+ if (tcphy->mode == MODE_DISCONNECT)
+ tcphy_phy_deinit(tcphy);
+
+unlock:
+ mutex_unlock(&tcphy->lock);
+ return 0;
+}
+
+static const struct phy_ops rockchip_dp_phy_ops = {
+ .power_on = rockchip_dp_phy_power_on,
+ .power_off = rockchip_dp_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int tcphy_get_param(struct device *dev,
+ struct usb3phy_reg *reg,
+ const char *name)
+{
+ u32 buffer[3];
+ int ret;
+
+ ret = of_property_read_u32_array(dev->of_node, name, buffer, 3);
+ if (ret) {
+ dev_err(dev, "Can not parse %s\n", name);
+ return ret;
+ }
+
+ reg->offset = buffer[0];
+ reg->enable_bit = buffer[1];
+ reg->write_enable = buffer[2];
+ return 0;
+}
+
+static int tcphy_parse_dt(struct rockchip_typec_phy *tcphy,
+ struct device *dev)
+{
+ struct rockchip_usb3phy_port_cfg *cfg = &tcphy->port_cfgs;
+ int ret;
+
+ ret = tcphy_get_param(dev, &cfg->typec_conn_dir,
+ "rockchip,typec-conn-dir");
+ if (ret)
+ return ret;
+
+ ret = tcphy_get_param(dev, &cfg->usb3tousb2_en,
+ "rockchip,usb3tousb2-en");
+ if (ret)
+ return ret;
+
+ ret = tcphy_get_param(dev, &cfg->external_psm,
+ "rockchip,external-psm");
+ if (ret)
+ return ret;
+
+ ret = tcphy_get_param(dev, &cfg->pipe_status,
+ "rockchip,pipe-status");
+ if (ret)
+ return ret;
+
+ tcphy->grf_regs = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ if (IS_ERR(tcphy->grf_regs)) {
+ dev_err(dev, "could not find grf dt node\n");
+ return PTR_ERR(tcphy->grf_regs);
+ }
+
+ tcphy->clk_core = devm_clk_get(dev, "tcpdcore");
+ if (IS_ERR(tcphy->clk_core)) {
+ dev_err(dev, "could not get uphy core clock\n");
+ return PTR_ERR(tcphy->clk_core);
+ }
+
+ tcphy->clk_ref = devm_clk_get(dev, "tcpdphy-ref");
+ if (IS_ERR(tcphy->clk_ref)) {
+ dev_err(dev, "could not get uphy ref clock\n");
+ return PTR_ERR(tcphy->clk_ref);
+ }
+
+ tcphy->uphy_rst = devm_reset_control_get(dev, "uphy");
+ if (IS_ERR(tcphy->uphy_rst)) {
+ dev_err(dev, "no uphy_rst reset control found\n");
+ return PTR_ERR(tcphy->uphy_rst);
+ }
+
+ tcphy->pipe_rst = devm_reset_control_get(dev, "uphy-pipe");
+ if (IS_ERR(tcphy->pipe_rst)) {
+ dev_err(dev, "no pipe_rst reset control found\n");
+ return PTR_ERR(tcphy->pipe_rst);
+ }
+
+ tcphy->tcphy_rst = devm_reset_control_get(dev, "uphy-tcphy");
+ if (IS_ERR(tcphy->tcphy_rst)) {
+ dev_err(dev, "no tcphy_rst reset control found\n");
+ return PTR_ERR(tcphy->tcphy_rst);
+ }
+
+ return 0;
+}
+
+static void typec_phy_pre_init(struct rockchip_typec_phy *tcphy)
+{
+ struct rockchip_usb3phy_port_cfg *cfg = &tcphy->port_cfgs;
+
+ reset_control_assert(tcphy->tcphy_rst);
+ reset_control_assert(tcphy->uphy_rst);
+ reset_control_assert(tcphy->pipe_rst);
+
+ /* select external psm clock */
+ property_enable(tcphy, &cfg->external_psm, 1);
+ property_enable(tcphy, &cfg->usb3tousb2_en, 0);
+
+ tcphy->mode = MODE_DISCONNECT;
+}
+
+static int rockchip_typec_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child_np;
+ struct rockchip_typec_phy *tcphy;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ int ret;
+
+ tcphy = devm_kzalloc(dev, sizeof(*tcphy), GFP_KERNEL);
+ if (!tcphy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tcphy->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(tcphy->base))
+ return PTR_ERR(tcphy->base);
+
+ ret = tcphy_parse_dt(tcphy, dev);
+ if (ret)
+ return ret;
+
+ tcphy->dev = dev;
+ platform_set_drvdata(pdev, tcphy);
+ mutex_init(&tcphy->lock);
+
+ typec_phy_pre_init(tcphy);
+
+ tcphy->extcon = extcon_get_edev_by_phandle(dev, 0);
+ if (IS_ERR(tcphy->extcon)) {
+ if (PTR_ERR(tcphy->extcon) != -EPROBE_DEFER)
+ dev_err(dev, "Invalid or missing extcon\n");
+ return PTR_ERR(tcphy->extcon);
+ }
+
+ pm_runtime_enable(dev);
+
+ for_each_available_child_of_node(np, child_np) {
+ struct phy *phy;
+
+ if (!of_node_cmp(child_np->name, "dp-port"))
+ phy = devm_phy_create(dev, child_np,
+ &rockchip_dp_phy_ops);
+ else if (!of_node_cmp(child_np->name, "usb3-port"))
+ phy = devm_phy_create(dev, child_np,
+ &rockchip_usb3_phy_ops);
+ else
+ continue;
+
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create phy: %s\n",
+ child_np->name);
+ return PTR_ERR(phy);
+ }
+
+ phy_set_drvdata(phy, tcphy);
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(dev, "Failed to register phy provider\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ return 0;
+}
+
+static int rockchip_typec_phy_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id rockchip_typec_phy_dt_ids[] = {
+ { .compatible = "rockchip,rk3399-typec-phy" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_typec_phy_dt_ids);
+
+static struct platform_driver rockchip_typec_phy_driver = {
+ .probe = rockchip_typec_phy_probe,
+ .remove = rockchip_typec_phy_remove,
+ .driver = {
+ .name = "rockchip-typec-phy",
+ .of_match_table = rockchip_typec_phy_dt_ids,
+ },
+};
+
+module_platform_driver(rockchip_typec_phy_driver);
+
+MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_AUTHOR("Kever Yang <kever.yang@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip USB TYPE-C PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 2a7381f4fe4c..734987fa0ad7 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -29,6 +29,7 @@
#include <linux/reset.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
+#include <linux/delay.h>
static int enable_usb_uart;
@@ -64,6 +65,7 @@ struct rockchip_usb_phy {
struct clk_hw clk480m_hw;
struct phy *phy;
bool uart_enabled;
+ struct reset_control *reset;
};
static int rockchip_usb_phy_power(struct rockchip_usb_phy *phy,
@@ -144,9 +146,23 @@ static int rockchip_usb_phy_power_on(struct phy *_phy)
return clk_prepare_enable(phy->clk480m);
}
+static int rockchip_usb_phy_reset(struct phy *_phy)
+{
+ struct rockchip_usb_phy *phy = phy_get_drvdata(_phy);
+
+ if (phy->reset) {
+ reset_control_assert(phy->reset);
+ udelay(10);
+ reset_control_deassert(phy->reset);
+ }
+
+ return 0;
+}
+
static const struct phy_ops ops = {
.power_on = rockchip_usb_phy_power_on,
.power_off = rockchip_usb_phy_power_off,
+ .reset = rockchip_usb_phy_reset,
.owner = THIS_MODULE,
};
@@ -185,6 +201,10 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
return -EINVAL;
}
+ rk_phy->reset = of_reset_control_get(child, "phy-reset");
+ if (IS_ERR(rk_phy->reset))
+ rk_phy->reset = NULL;
+
rk_phy->reg_offset = reg_offset;
rk_phy->clk = of_clk_get_by_name(child, "phyclk");
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index 8c7eb335622e..b9342a2af7b3 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -40,6 +40,7 @@
#include <linux/power_supply.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
+#include <linux/spinlock.h>
#include <linux/usb/of.h>
#include <linux/workqueue.h>
@@ -50,7 +51,7 @@
#define REG_PHYCTL_A33 0x10
#define REG_PHY_UNK_H3 0x20
-#define REG_PMU_UNK_H3 0x10
+#define REG_PMU_UNK1 0x10
#define PHYCTL_DATA BIT(7)
@@ -98,6 +99,7 @@ enum sun4i_usb_phy_type {
sun6i_a31_phy,
sun8i_a33_phy,
sun8i_h3_phy,
+ sun50i_a64_phy,
};
struct sun4i_usb_phy_cfg {
@@ -106,13 +108,14 @@ struct sun4i_usb_phy_cfg {
u32 disc_thresh;
u8 phyctl_offset;
bool dedicated_clocks;
+ bool enable_pmu_unk1;
};
struct sun4i_usb_phy_data {
void __iomem *base;
const struct sun4i_usb_phy_cfg *cfg;
enum usb_dr_mode dr_mode;
- struct mutex mutex;
+ spinlock_t reg_lock; /* guard access to phyctl reg */
struct sun4i_usb_phy {
struct phy *phy;
void __iomem *pmu;
@@ -122,7 +125,6 @@ struct sun4i_usb_phy_data {
bool regulator_on;
int index;
} phys[MAX_PHYS];
- int first_phy;
/* phy0 / otg related variables */
struct extcon_dev *extcon;
bool phy0_init;
@@ -131,6 +133,7 @@ struct sun4i_usb_phy_data {
struct power_supply *vbus_power_supply;
struct notifier_block vbus_power_nb;
bool vbus_power_nb_registered;
+ bool force_session_end;
int id_det_irq;
int vbus_det_irq;
int id_det;
@@ -179,12 +182,14 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
u32 temp, usbc_bit = BIT(phy->index * 2);
void __iomem *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
+ unsigned long flags;
int i;
- mutex_lock(&phy_data->mutex);
+ spin_lock_irqsave(&phy_data->reg_lock, flags);
- if (phy_data->cfg->type == sun8i_a33_phy) {
- /* A33 needs us to set phyctl to 0 explicitly */
+ if (phy_data->cfg->type == sun8i_a33_phy ||
+ phy_data->cfg->type == sun50i_a64_phy) {
+ /* A33 or A64 needs us to set phyctl to 0 explicitly */
writel(0, phyctl);
}
@@ -218,7 +223,8 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
data >>= 1;
}
- mutex_unlock(&phy_data->mutex);
+
+ spin_unlock_irqrestore(&phy_data->reg_lock, flags);
}
static void sun4i_usb_phy_passby(struct sun4i_usb_phy *phy, int enable)
@@ -258,14 +264,16 @@ static int sun4i_usb_phy_init(struct phy *_phy)
return ret;
}
+ if (data->cfg->enable_pmu_unk1) {
+ val = readl(phy->pmu + REG_PMU_UNK1);
+ writel(val & ~2, phy->pmu + REG_PMU_UNK1);
+ }
+
if (data->cfg->type == sun8i_h3_phy) {
if (phy->index == 0) {
val = readl(data->base + REG_PHY_UNK_H3);
writel(val & ~1, data->base + REG_PHY_UNK_H3);
}
-
- val = readl(phy->pmu + REG_PMU_UNK_H3);
- writel(val & ~2, phy->pmu + REG_PMU_UNK_H3);
} else {
/* Enable USB 45 Ohm resistor calibration */
if (phy->index == 0)
@@ -320,7 +328,10 @@ static int sun4i_usb_phy0_get_id_det(struct sun4i_usb_phy_data *data)
{
switch (data->dr_mode) {
case USB_DR_MODE_OTG:
- return gpiod_get_value_cansleep(data->id_det_gpio);
+ if (data->id_det_gpio)
+ return gpiod_get_value_cansleep(data->id_det_gpio);
+ else
+ return 1; /* Fallback to peripheral mode */
case USB_DR_MODE_HOST:
return 0;
case USB_DR_MODE_PERIPHERAL:
@@ -382,8 +393,10 @@ static int sun4i_usb_phy_power_on(struct phy *_phy)
/* For phy0 only turn on Vbus if we don't have an ext. Vbus */
if (phy->index == 0 && sun4i_usb_phy0_have_vbus_det(data) &&
- data->vbus_det)
+ data->vbus_det) {
+ dev_warn(&_phy->dev, "External vbus detected, not enabling our own vbus\n");
return 0;
+ }
ret = regulator_enable(phy->vbus);
if (ret)
@@ -419,6 +432,35 @@ static int sun4i_usb_phy_power_off(struct phy *_phy)
return 0;
}
+static int sun4i_usb_phy_set_mode(struct phy *_phy, enum phy_mode mode)
+{
+ struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+ struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+
+ if (phy->index != 0)
+ return -EINVAL;
+
+ switch (mode) {
+ case PHY_MODE_USB_HOST:
+ data->dr_mode = USB_DR_MODE_HOST;
+ break;
+ case PHY_MODE_USB_DEVICE:
+ data->dr_mode = USB_DR_MODE_PERIPHERAL;
+ break;
+ case PHY_MODE_USB_OTG:
+ data->dr_mode = USB_DR_MODE_OTG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_info(&_phy->dev, "Changing dr_mode to %d\n", (int)data->dr_mode);
+ data->force_session_end = true;
+ queue_delayed_work(system_wq, &data->detect, 0);
+
+ return 0;
+}
+
void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
@@ -432,6 +474,7 @@ static const struct phy_ops sun4i_usb_phy_ops = {
.exit = sun4i_usb_phy_exit,
.power_on = sun4i_usb_phy_power_on,
.power_off = sun4i_usb_phy_power_off,
+ .set_mode = sun4i_usb_phy_set_mode,
.owner = THIS_MODULE,
};
@@ -440,7 +483,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
struct sun4i_usb_phy_data *data =
container_of(work, struct sun4i_usb_phy_data, detect.work);
struct phy *phy0 = data->phys[0].phy;
- int id_det, vbus_det, id_notify = 0, vbus_notify = 0;
+ bool force_session_end, id_notify = false, vbus_notify = false;
+ int id_det, vbus_det;
if (phy0 == NULL)
return;
@@ -455,27 +499,30 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
return;
}
+ force_session_end = data->force_session_end;
+ data->force_session_end = false;
+
if (id_det != data->id_det) {
- /*
- * When a host cable (id == 0) gets plugged in on systems
- * without vbus detection report vbus low for long enough for
- * the musb-ip to end the current device session.
- */
+ /* id-change, force session end if we've no vbus detection */
if (data->dr_mode == USB_DR_MODE_OTG &&
- !sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
+ !sun4i_usb_phy0_have_vbus_det(data))
+ force_session_end = true;
+
+ /* When entering host mode (id = 0) force end the session now */
+ if (force_session_end && id_det == 0) {
sun4i_usb_phy0_set_vbus_detect(phy0, 0);
msleep(200);
sun4i_usb_phy0_set_vbus_detect(phy0, 1);
}
sun4i_usb_phy0_set_id_detect(phy0, id_det);
data->id_det = id_det;
- id_notify = 1;
+ id_notify = true;
}
if (vbus_det != data->vbus_det) {
sun4i_usb_phy0_set_vbus_detect(phy0, vbus_det);
data->vbus_det = vbus_det;
- vbus_notify = 1;
+ vbus_notify = true;
}
mutex_unlock(&phy0->mutex);
@@ -483,13 +530,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
if (id_notify) {
extcon_set_cable_state_(data->extcon, EXTCON_USB_HOST,
!id_det);
- /*
- * When a host cable gets unplugged (id == 1) on systems
- * without vbus detection report vbus low for long enough to
- * the musb-ip to end the current host session.
- */
- if (data->dr_mode == USB_DR_MODE_OTG &&
- !sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
+ /* When leaving host mode force end the session here */
+ if (force_session_end && id_det == 1) {
mutex_lock(&phy0->mutex);
sun4i_usb_phy0_set_vbus_detect(phy0, 0);
msleep(1000);
@@ -534,8 +576,7 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
{
struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
- if (args->args[0] < data->first_phy ||
- args->args[0] >= data->cfg->num_phys)
+ if (args->args[0] >= data->cfg->num_phys)
return ERR_PTR(-ENODEV);
return data->phys[args->args[0]].phy;
@@ -577,7 +618,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- mutex_init(&data->mutex);
+ spin_lock_init(&data->reg_lock);
INIT_DELAYED_WORK(&data->detect, sun4i_usb_phy0_id_vbus_det_scan);
dev_set_drvdata(dev, data);
data->cfg = of_device_get_match_data(dev);
@@ -610,33 +651,18 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
}
data->dr_mode = of_usb_get_dr_mode_by_phy(np, 0);
- switch (data->dr_mode) {
- case USB_DR_MODE_OTG:
- /* otg without id_det makes no sense, and is not supported */
- if (!data->id_det_gpio) {
- dev_err(dev, "usb0_id_det missing or invalid\n");
- return -ENODEV;
- }
- /* fall through */
- case USB_DR_MODE_HOST:
- case USB_DR_MODE_PERIPHERAL:
- data->extcon = devm_extcon_dev_allocate(dev,
- sun4i_usb_phy0_cable);
- if (IS_ERR(data->extcon))
- return PTR_ERR(data->extcon);
- ret = devm_extcon_dev_register(dev, data->extcon);
- if (ret) {
- dev_err(dev, "failed to register extcon: %d\n", ret);
- return ret;
- }
- break;
- default:
- dev_info(dev, "dr_mode unknown, not registering usb phy0\n");
- data->first_phy = 1;
+ data->extcon = devm_extcon_dev_allocate(dev, sun4i_usb_phy0_cable);
+ if (IS_ERR(data->extcon))
+ return PTR_ERR(data->extcon);
+
+ ret = devm_extcon_dev_register(dev, data->extcon);
+ if (ret) {
+ dev_err(dev, "failed to register extcon: %d\n", ret);
+ return ret;
}
- for (i = data->first_phy; i < data->cfg->num_phys; i++) {
+ for (i = 0; i < data->cfg->num_phys; i++) {
struct sun4i_usb_phy *phy = data->phys + i;
char name[16];
@@ -737,6 +763,7 @@ static const struct sun4i_usb_phy_cfg sun4i_a10_cfg = {
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
+ .enable_pmu_unk1 = false,
};
static const struct sun4i_usb_phy_cfg sun5i_a13_cfg = {
@@ -745,6 +772,7 @@ static const struct sun4i_usb_phy_cfg sun5i_a13_cfg = {
.disc_thresh = 2,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
+ .enable_pmu_unk1 = false,
};
static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = {
@@ -753,6 +781,7 @@ static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = {
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
+ .enable_pmu_unk1 = false,
};
static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = {
@@ -761,6 +790,7 @@ static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = {
.disc_thresh = 2,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
+ .enable_pmu_unk1 = false,
};
static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = {
@@ -769,6 +799,7 @@ static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = {
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
+ .enable_pmu_unk1 = false,
};
static const struct sun4i_usb_phy_cfg sun8i_a33_cfg = {
@@ -777,6 +808,7 @@ static const struct sun4i_usb_phy_cfg sun8i_a33_cfg = {
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
+ .enable_pmu_unk1 = false,
};
static const struct sun4i_usb_phy_cfg sun8i_h3_cfg = {
@@ -784,6 +816,16 @@ static const struct sun4i_usb_phy_cfg sun8i_h3_cfg = {
.type = sun8i_h3_phy,
.disc_thresh = 3,
.dedicated_clocks = true,
+ .enable_pmu_unk1 = true,
+};
+
+static const struct sun4i_usb_phy_cfg sun50i_a64_cfg = {
+ .num_phys = 2,
+ .type = sun50i_a64_phy,
+ .disc_thresh = 3,
+ .phyctl_offset = REG_PHYCTL_A33,
+ .dedicated_clocks = true,
+ .enable_pmu_unk1 = true,
};
static const struct of_device_id sun4i_usb_phy_of_match[] = {
@@ -794,6 +836,8 @@ static const struct of_device_id sun4i_usb_phy_of_match[] = {
{ .compatible = "allwinner,sun8i-a23-usb-phy", .data = &sun8i_a23_cfg },
{ .compatible = "allwinner,sun8i-a33-usb-phy", .data = &sun8i_a33_cfg },
{ .compatible = "allwinner,sun8i-h3-usb-phy", .data = &sun8i_h3_cfg },
+ { .compatible = "allwinner,sun50i-a64-usb-phy",
+ .data = &sun50i_a64_cfg},
{ },
};
MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match);
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index d9b10a39a2cf..87e6334eab93 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -172,6 +172,7 @@ struct twl4030_usb {
int irq;
enum musb_vbus_id_status linkstat;
bool vbus_supplied;
+ bool musb_mailbox_pending;
struct delayed_work id_workaround_work;
};
@@ -439,6 +440,17 @@ static int __maybe_unused twl4030_usb_runtime_resume(struct device *dev)
(PHY_CLK_CTRL_CLOCKGATING_EN |
PHY_CLK_CTRL_CLK32K_EN));
+ twl4030_i2c_access(twl, 1);
+ twl4030_usb_set_mode(twl, twl->usb_mode);
+ if (twl->usb_mode == T2_USB_MODE_ULPI)
+ twl4030_i2c_access(twl, 0);
+ /*
+ * According to the TPS65950 TRM, there has to be at least 50ms
+ * delay between setting POWER_CTRL_OTG_ENAB and enabling charging
+ * so wait here so that a fully enabled phy can be expected after
+ * resume
+ */
+ msleep(50);
return 0;
}
@@ -459,11 +471,6 @@ static int twl4030_phy_power_on(struct phy *phy)
dev_dbg(twl->dev, "%s\n", __func__);
pm_runtime_get_sync(twl->dev);
- twl4030_i2c_access(twl, 1);
- twl4030_usb_set_mode(twl, twl->usb_mode);
- if (twl->usb_mode == T2_USB_MODE_ULPI)
- twl4030_i2c_access(twl, 0);
- twl->linkstat = MUSB_UNKNOWN;
schedule_delayed_work(&twl->id_workaround_work, HZ);
return 0;
@@ -569,9 +576,12 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_put_autosuspend(twl->dev);
}
+ twl->musb_mailbox_pending = true;
+ }
+ if (twl->musb_mailbox_pending) {
err = musb_mailbox(status);
- if (err)
- twl->linkstat = MUSB_UNKNOWN;
+ if (!err)
+ twl->musb_mailbox_pending = false;
}
/* don't schedule during sleep - irq works right then */
@@ -676,6 +686,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
twl->irq = platform_get_irq(pdev, 0);
twl->vbus_supplied = false;
twl->linkstat = MUSB_UNKNOWN;
+ twl->musb_mailbox_pending = false;
twl->phy.dev = twl->dev;
twl->phy.label = "twl4030";
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index ec83dfdbc206..873424ab0e32 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/phy/phy.h>
+#include <linux/phy/tegra/xusb.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -101,7 +102,8 @@ tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index)
return of_find_node_by_name(np, pad->soc->lanes[index].name);
}
-int tegra_xusb_lane_lookup_function(struct tegra_xusb_lane *lane,
+static int
+tegra_xusb_lane_lookup_function(struct tegra_xusb_lane *lane,
const char *function)
{
unsigned int i;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b3fe1d339632..0e75d94972ba 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -254,6 +254,7 @@ config PINCTRL_ZYNQ
help
This selects the pinctrl driver for Xilinx Zynq.
+source "drivers/pinctrl/aspeed/Kconfig"
source "drivers/pinctrl/bcm/Kconfig"
source "drivers/pinctrl/berlin/Kconfig"
source "drivers/pinctrl/freescale/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 8ebd7b8e1621..11bad373dfe0 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
+obj-$(CONFIG_ARCH_ASPEED) += aspeed/
obj-y += bcm/
obj-$(CONFIG_PINCTRL_BERLIN) += berlin/
obj-y += freescale/
diff --git a/drivers/pinctrl/aspeed/Kconfig b/drivers/pinctrl/aspeed/Kconfig
new file mode 100644
index 000000000000..998eabef3a65
--- /dev/null
+++ b/drivers/pinctrl/aspeed/Kconfig
@@ -0,0 +1,24 @@
+config PINCTRL_ASPEED
+ bool
+ depends on (ARCH_ASPEED || COMPILE_TEST) && OF
+ depends on MFD_SYSCON
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select REGMAP_MMIO
+
+config PINCTRL_ASPEED_G4
+ bool "Aspeed G4 SoC pin control"
+ depends on (MACH_ASPEED_G4 || COMPILE_TEST) && OF
+ select PINCTRL_ASPEED
+ help
+ Say Y here to enable pin controller support for Aspeed's 4th
+ generation SoCs. GPIO is provided by a separate GPIO driver.
+
+config PINCTRL_ASPEED_G5
+ bool "Aspeed G5 SoC pin control"
+ depends on (MACH_ASPEED_G5 || COMPILE_TEST) && OF
+ select PINCTRL_ASPEED
+ help
+ Say Y here to enable pin controller support for Aspeed's 5th
+ generation SoCs. GPIO is provided by a separate GPIO driver.
diff --git a/drivers/pinctrl/aspeed/Makefile b/drivers/pinctrl/aspeed/Makefile
new file mode 100644
index 000000000000..191ed0fc1804
--- /dev/null
+++ b/drivers/pinctrl/aspeed/Makefile
@@ -0,0 +1,6 @@
+# Aspeed pinctrl support
+
+ccflags-y += -Woverride-init
+obj-$(CONFIG_PINCTRL_ASPEED) += pinctrl-aspeed.o
+obj-$(CONFIG_PINCTRL_ASPEED_G4) += pinctrl-aspeed-g4.o
+obj-$(CONFIG_PINCTRL_ASPEED_G5) += pinctrl-aspeed-g5.o
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
new file mode 100644
index 000000000000..a21b071ff290
--- /dev/null
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
@@ -0,0 +1,1231 @@
+/*
+ * Copyright (C) 2016 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-aspeed.h"
+
+/*
+ * Uses undefined macros for symbol naming and references, eg GPIOA0, MAC1LINK,
+ * TIMER3 etc.
+ *
+ * Pins are defined in GPIO bank order:
+ *
+ * GPIOA0: 0
+ * ...
+ * GPIOA7: 7
+ * GPIOB0: 8
+ * ...
+ * GPIOZ7: 207
+ * GPIOAA0: 208
+ * ...
+ * GPIOAB3: 219
+ *
+ * Not all pins have their signals defined (yet).
+ */
+
+#define A4 2
+SSSF_PIN_DECL(A4, GPIOA2, TIMER3, SIG_DESC_SET(SCU80, 2));
+
+#define I2C9_DESC SIG_DESC_SET(SCU90, 22)
+
+#define C5 4
+SIG_EXPR_LIST_DECL_SINGLE(SCL9, I2C9, I2C9_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(TIMER5, TIMER5, SIG_DESC_SET(SCU80, 4));
+MS_PIN_DECL(C5, GPIOA4, SCL9, TIMER5);
+
+FUNC_GROUP_DECL(TIMER5, C5);
+
+#define B4 5
+SIG_EXPR_LIST_DECL_SINGLE(SDA9, I2C9, I2C9_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(TIMER6, TIMER6, SIG_DESC_SET(SCU80, 5));
+MS_PIN_DECL(B4, GPIOA5, SDA9, TIMER6);
+
+FUNC_GROUP_DECL(TIMER6, B4);
+FUNC_GROUP_DECL(I2C9, C5, B4);
+
+#define MDIO2_DESC SIG_DESC_SET(SCU90, 2)
+
+#define A3 6
+SIG_EXPR_LIST_DECL_SINGLE(MDC2, MDIO2, MDIO2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(TIMER7, TIMER7, SIG_DESC_SET(SCU80, 6));
+MS_PIN_DECL(A3, GPIOA6, MDC2, TIMER7);
+
+FUNC_GROUP_DECL(TIMER7, A3);
+
+#define D5 7
+SIG_EXPR_LIST_DECL_SINGLE(MDIO2, MDIO2, MDIO2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(TIMER8, TIMER8, SIG_DESC_SET(SCU80, 7));
+MS_PIN_DECL(D5, GPIOA7, MDIO2, TIMER8);
+
+FUNC_GROUP_DECL(TIMER8, D5);
+FUNC_GROUP_DECL(MDIO2, A3, D5);
+
+#define H19 13
+#define H19_DESC SIG_DESC_SET(SCU80, 13)
+SIG_EXPR_LIST_DECL_SINGLE(LPCPD, LPCPD, H19_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LPCSMI, LPCSMI, H19_DESC);
+MS_PIN_DECL(H19, GPIOB5, LPCPD, LPCSMI);
+
+FUNC_GROUP_DECL(LPCPD, H19);
+FUNC_GROUP_DECL(LPCSMI, H19);
+
+#define H20 14
+SSSF_PIN_DECL(H20, GPIOB6, LPCPME, SIG_DESC_SET(SCU80, 14));
+
+#define SD1_DESC SIG_DESC_SET(SCU90, 0)
+#define I2C10_DESC SIG_DESC_SET(SCU90, 23)
+
+#define C4 16
+SIG_EXPR_LIST_DECL_SINGLE(SD1CLK, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SCL10, I2C10, I2C10_DESC);
+MS_PIN_DECL(C4, GPIOC0, SD1CLK, SCL10);
+
+#define B3 17
+SIG_EXPR_LIST_DECL_SINGLE(SD1CMD, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SDA10, I2C10, I2C10_DESC);
+MS_PIN_DECL(B3, GPIOC1, SD1CMD, SDA10);
+
+FUNC_GROUP_DECL(I2C10, C4, B3);
+
+#define I2C11_DESC SIG_DESC_SET(SCU90, 24)
+
+#define A2 18
+SIG_EXPR_LIST_DECL_SINGLE(SD1DAT0, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SCL11, I2C11, I2C11_DESC);
+MS_PIN_DECL(A2, GPIOC2, SD1DAT0, SCL11);
+
+#define E5 19
+SIG_EXPR_LIST_DECL_SINGLE(SD1DAT1, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SDA11, I2C11, I2C11_DESC);
+MS_PIN_DECL(E5, GPIOC3, SD1DAT1, SDA11);
+
+FUNC_GROUP_DECL(I2C11, A2, E5);
+
+#define I2C12_DESC SIG_DESC_SET(SCU90, 25)
+
+#define D4 20
+SIG_EXPR_LIST_DECL_SINGLE(SD1DAT2, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SCL12, I2C12, I2C12_DESC);
+MS_PIN_DECL(D4, GPIOC4, SD1DAT2, SCL12);
+
+#define C3 21
+SIG_EXPR_LIST_DECL_SINGLE(SD1DAT3, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SDA12, I2C12, I2C12_DESC);
+MS_PIN_DECL(C3, GPIOC5, SD1DAT3, SDA12);
+
+FUNC_GROUP_DECL(I2C12, D4, C3);
+
+#define I2C13_DESC SIG_DESC_SET(SCU90, 26)
+
+#define B2 22
+SIG_EXPR_LIST_DECL_SINGLE(SD1CD, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SCL13, I2C13, I2C13_DESC);
+MS_PIN_DECL(B2, GPIOC6, SD1CD, SCL13);
+
+#define A1 23
+SIG_EXPR_LIST_DECL_SINGLE(SD1WP, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SDA13, I2C13, I2C13_DESC);
+MS_PIN_DECL(A1, GPIOC7, SD1WP, SDA13);
+
+FUNC_GROUP_DECL(I2C13, B2, A1);
+FUNC_GROUP_DECL(SD1, C4, B3, A2, E5, D4, C3, B2, A1);
+
+#define SD2_DESC SIG_DESC_SET(SCU90, 1)
+#define GPID_DESC SIG_DESC_SET(HW_STRAP1, 21)
+#define GPID0_DESC SIG_DESC_SET(SCU8C, 8)
+
+#define A18 24
+SIG_EXPR_LIST_DECL_SINGLE(SD2CLK, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID0IN, GPID0, GPID0_DESC);
+SIG_EXPR_DECL(GPID0IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID0IN, GPID0, GPID);
+MS_PIN_DECL(A18, GPIOD0, SD2CLK, GPID0IN);
+
+#define D16 25
+SIG_EXPR_LIST_DECL_SINGLE(SD2CMD, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID0OUT, GPID0, GPID0_DESC);
+SIG_EXPR_DECL(GPID0OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID0OUT, GPID0, GPID);
+MS_PIN_DECL(D16, GPIOD1, SD2CMD, GPID0OUT);
+
+FUNC_GROUP_DECL(GPID0, A18, D16);
+
+#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 22)
+#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12)
+#define GPIE2_DESC SIG_DESC_SET(SCU8C, 13)
+#define GPIE4_DESC SIG_DESC_SET(SCU8C, 14)
+#define GPIE6_DESC SIG_DESC_SET(SCU8C, 15)
+
+#define D15 32
+SIG_EXPR_LIST_DECL_SINGLE(NCTS3, NCTS3, SIG_DESC_SET(SCU80, 16));
+SIG_EXPR_DECL(GPIE0IN, GPIE0, GPIE0_DESC);
+SIG_EXPR_DECL(GPIE0IN, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE0IN, GPIE0, GPIE);
+MS_PIN_DECL(D15, GPIOE0, NCTS3, GPIE0IN);
+
+FUNC_GROUP_DECL(NCTS3, D15);
+
+#define C15 33
+SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17));
+SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC);
+SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE);
+MS_PIN_DECL(C15, GPIOE1, NDCD3, GPIE0OUT);
+
+FUNC_GROUP_DECL(NDCD3, C15);
+FUNC_GROUP_DECL(GPIE0, D15, C15);
+
+#define B15 34
+SIG_EXPR_LIST_DECL_SINGLE(NDSR3, NDSR3, SIG_DESC_SET(SCU80, 18));
+SIG_EXPR_DECL(GPIE2IN, GPIE2, GPIE2_DESC);
+SIG_EXPR_DECL(GPIE2IN, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE2IN, GPIE2, GPIE);
+MS_PIN_DECL(B15, GPIOE2, NDSR3, GPIE2IN);
+
+FUNC_GROUP_DECL(NDSR3, B15);
+
+#define A15 35
+SIG_EXPR_LIST_DECL_SINGLE(NRI3, NRI3, SIG_DESC_SET(SCU80, 19));
+SIG_EXPR_DECL(GPIE2OUT, GPIE2, GPIE2_DESC);
+SIG_EXPR_DECL(GPIE2OUT, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE2OUT, GPIE2, GPIE);
+MS_PIN_DECL(A15, GPIOE3, NRI3, GPIE2OUT);
+
+FUNC_GROUP_DECL(NRI3, A15);
+FUNC_GROUP_DECL(GPIE2, B15, A15);
+
+#define E14 36
+SIG_EXPR_LIST_DECL_SINGLE(NDTR3, NDTR3, SIG_DESC_SET(SCU80, 20));
+SIG_EXPR_DECL(GPIE4IN, GPIE4, GPIE4_DESC);
+SIG_EXPR_DECL(GPIE4IN, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE4IN, GPIE4, GPIE);
+MS_PIN_DECL(E14, GPIOE4, NDTR3, GPIE4IN);
+
+FUNC_GROUP_DECL(NDTR3, E14);
+
+#define D14 37
+SIG_EXPR_LIST_DECL_SINGLE(NRTS3, NRTS3, SIG_DESC_SET(SCU80, 21));
+SIG_EXPR_DECL(GPIE4OUT, GPIE4, GPIE4_DESC);
+SIG_EXPR_DECL(GPIE4OUT, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE4OUT, GPIE4, GPIE);
+MS_PIN_DECL(D14, GPIOE5, NRTS3, GPIE4OUT);
+
+FUNC_GROUP_DECL(NRTS3, D14);
+FUNC_GROUP_DECL(GPIE4, E14, D14);
+
+#define C14 38
+SIG_EXPR_LIST_DECL_SINGLE(TXD3, TXD3, SIG_DESC_SET(SCU80, 22));
+SIG_EXPR_DECL(GPIE6IN, GPIE6, GPIE6_DESC);
+SIG_EXPR_DECL(GPIE6IN, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE6IN, GPIE6, GPIE);
+MS_PIN_DECL(C14, GPIOE6, TXD3, GPIE6IN);
+
+FUNC_GROUP_DECL(TXD3, C14);
+
+#define B14 39
+SIG_EXPR_LIST_DECL_SINGLE(RXD3, RXD3, SIG_DESC_SET(SCU80, 23));
+SIG_EXPR_DECL(GPIE6OUT, GPIE6, GPIE6_DESC);
+SIG_EXPR_DECL(GPIE6OUT, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE6OUT, GPIE6, GPIE);
+MS_PIN_DECL(B14, GPIOE7, RXD3, GPIE6OUT);
+
+FUNC_GROUP_DECL(RXD3, B14);
+FUNC_GROUP_DECL(GPIE6, C14, B14);
+
+#define D18 40
+SSSF_PIN_DECL(D18, GPIOF0, NCTS4, SIG_DESC_SET(SCU80, 24));
+
+#define ACPI_DESC SIG_DESC_BIT(HW_STRAP1, 19, 0)
+
+#define B19 41
+SIG_EXPR_LIST_DECL_SINGLE(NDCD4, NDCD4, SIG_DESC_SET(SCU80, 25));
+SIG_EXPR_DECL(SIOPBI, SIOPBI, SIG_DESC_SET(SCUA4, 12));
+SIG_EXPR_DECL(SIOPBI, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPBI, SIOPBI, ACPI);
+MS_PIN_DECL(B19, GPIOF1, NDCD4, SIOPBI);
+FUNC_GROUP_DECL(NDCD4, B19);
+FUNC_GROUP_DECL(SIOPBI, B19);
+
+#define D17 43
+SIG_EXPR_LIST_DECL_SINGLE(NRI4, NRI4, SIG_DESC_SET(SCU80, 27));
+SIG_EXPR_DECL(SIOPBO, SIOPBO, SIG_DESC_SET(SCUA4, 14));
+SIG_EXPR_DECL(SIOPBO, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPBO, SIOPBO, ACPI);
+MS_PIN_DECL(D17, GPIOF3, NRI4, SIOPBO);
+FUNC_GROUP_DECL(NRI4, D17);
+FUNC_GROUP_DECL(SIOPBO, D17);
+
+FUNC_GROUP_DECL(ACPI, B19, D17);
+
+#define E16 46
+SSSF_PIN_DECL(E16, GPIOF6, TXD4, SIG_DESC_SET(SCU80, 30));
+
+#define C17 47
+SSSF_PIN_DECL(C17, GPIOF7, RXD4, SIG_DESC_SET(SCU80, 31));
+
+#define AA22 54
+SSSF_PIN_DECL(AA22, GPIOG6, FLBUSY, SIG_DESC_SET(SCU84, 6));
+
+#define U18 55
+SSSF_PIN_DECL(U18, GPIOG7, FLWP, SIG_DESC_SET(SCU84, 7));
+
+#define UART6_DESC SIG_DESC_SET(SCU90, 7)
+#define ROM16_DESC SIG_DESC_SET(SCU90, 6)
+#define FLASH_WIDE SIG_DESC_SET(HW_STRAP1, 4)
+#define BOOT_SRC_NOR { HW_STRAP1, GENMASK(1, 0), 0, 0 }
+
+#define A8 56
+SIG_EXPR_DECL(ROMD8, ROM16, ROM16_DESC);
+SIG_EXPR_DECL(ROMD8, ROM16S, FLASH_WIDE, BOOT_SRC_NOR);
+SIG_EXPR_LIST_DECL_DUAL(ROMD8, ROM16, ROM16S);
+SIG_EXPR_LIST_DECL_SINGLE(NCTS6, NCTS6, UART6_DESC);
+MS_PIN_DECL(A8, GPIOH0, ROMD8, NCTS6);
+
+#define C7 57
+SIG_EXPR_DECL(ROMD9, ROM16, ROM16_DESC);
+SIG_EXPR_DECL(ROMD9, ROM16S, FLASH_WIDE, BOOT_SRC_NOR);
+SIG_EXPR_LIST_DECL_DUAL(ROMD9, ROM16, ROM16S);
+SIG_EXPR_LIST_DECL_SINGLE(NDCD6, NDCD6, UART6_DESC);
+MS_PIN_DECL(C7, GPIOH1, ROMD9, NDCD6);
+
+#define B7 58
+SIG_EXPR_DECL(ROMD10, ROM16, ROM16_DESC);
+SIG_EXPR_DECL(ROMD10, ROM16S, FLASH_WIDE, BOOT_SRC_NOR);
+SIG_EXPR_LIST_DECL_DUAL(ROMD10, ROM16, ROM16S);
+SIG_EXPR_LIST_DECL_SINGLE(NDSR6, NDSR6, UART6_DESC);
+MS_PIN_DECL(B7, GPIOH2, ROMD10, NDSR6);
+
+#define A7 59
+SIG_EXPR_DECL(ROMD11, ROM16, ROM16_DESC);
+SIG_EXPR_DECL(ROMD11, ROM16S, FLASH_WIDE, BOOT_SRC_NOR);
+SIG_EXPR_LIST_DECL_DUAL(ROMD11, ROM16, ROM16S);
+SIG_EXPR_LIST_DECL_SINGLE(NRI6, NRI6, UART6_DESC);
+MS_PIN_DECL(A7, GPIOH3, ROMD11, NRI6);
+
+#define D7 60
+SIG_EXPR_DECL(ROMD12, ROM16, ROM16_DESC);
+SIG_EXPR_DECL(ROMD12, ROM16S, FLASH_WIDE, BOOT_SRC_NOR);
+SIG_EXPR_LIST_DECL_DUAL(ROMD12, ROM16, ROM16S);
+SIG_EXPR_LIST_DECL_SINGLE(NDTR6, NDTR6, UART6_DESC);
+MS_PIN_DECL(D7, GPIOH4, ROMD12, NDTR6);
+
+#define B6 61
+SIG_EXPR_DECL(ROMD13, ROM16, ROM16_DESC);
+SIG_EXPR_DECL(ROMD13, ROM16S, FLASH_WIDE, BOOT_SRC_NOR);
+SIG_EXPR_LIST_DECL_DUAL(ROMD13, ROM16, ROM16S);
+SIG_EXPR_LIST_DECL_SINGLE(NRTS6, NRTS6, UART6_DESC);
+MS_PIN_DECL(B6, GPIOH5, ROMD13, NRTS6);
+
+#define A6 62
+SIG_EXPR_DECL(ROMD14, ROM16, ROM16_DESC);
+SIG_EXPR_DECL(ROMD14, ROM16S, FLASH_WIDE, BOOT_SRC_NOR);
+SIG_EXPR_LIST_DECL_DUAL(ROMD14, ROM16, ROM16S);
+SIG_EXPR_LIST_DECL_SINGLE(TXD6, TXD6, UART6_DESC);
+MS_PIN_DECL(A6, GPIOH6, ROMD14, TXD6);
+
+#define E7 63
+SIG_EXPR_DECL(ROMD15, ROM16, ROM16_DESC);
+SIG_EXPR_DECL(ROMD15, ROM16S, FLASH_WIDE, BOOT_SRC_NOR);
+SIG_EXPR_LIST_DECL_DUAL(ROMD15, ROM16, ROM16S);
+SIG_EXPR_LIST_DECL_SINGLE(RXD6, RXD6, UART6_DESC);
+MS_PIN_DECL(E7, GPIOH7, ROMD15, RXD6);
+
+FUNC_GROUP_DECL(UART6, A8, C7, B7, A7, D7, B6, A6, E7);
+
+#define J3 75
+SSSF_PIN_DECL(J3, GPIOJ3, SGPMI, SIG_DESC_SET(SCU84, 11));
+
+#define T4 76
+SSSF_PIN_DECL(T4, GPIOJ4, VGAHS, SIG_DESC_SET(SCU84, 12));
+
+#define U2 77
+SSSF_PIN_DECL(U2, GPIOJ5, VGAVS, SIG_DESC_SET(SCU84, 13));
+
+#define T2 78
+SSSF_PIN_DECL(T2, GPIOJ6, DDCCLK, SIG_DESC_SET(SCU84, 14));
+
+#define T1 79
+SSSF_PIN_DECL(T1, GPIOJ7, DDCDAT, SIG_DESC_SET(SCU84, 15));
+
+#define I2C5_DESC SIG_DESC_SET(SCU90, 18)
+
+#define E3 80
+SIG_EXPR_LIST_DECL_SINGLE(SCL5, I2C5, I2C5_DESC);
+SS_PIN_DECL(E3, GPIOK0, SCL5);
+
+#define D2 81
+SIG_EXPR_LIST_DECL_SINGLE(SDA5, I2C5, I2C5_DESC);
+SS_PIN_DECL(D2, GPIOK1, SDA5);
+
+FUNC_GROUP_DECL(I2C5, E3, D2);
+
+#define I2C6_DESC SIG_DESC_SET(SCU90, 19)
+
+#define C1 82
+SIG_EXPR_LIST_DECL_SINGLE(SCL6, I2C6, I2C6_DESC);
+SS_PIN_DECL(C1, GPIOK2, SCL6);
+
+#define F4 83
+SIG_EXPR_LIST_DECL_SINGLE(SDA6, I2C6, I2C6_DESC);
+SS_PIN_DECL(F4, GPIOK3, SDA6);
+
+FUNC_GROUP_DECL(I2C6, C1, F4);
+
+#define I2C7_DESC SIG_DESC_SET(SCU90, 20)
+
+#define E2 84
+SIG_EXPR_LIST_DECL_SINGLE(SCL7, I2C7, I2C7_DESC);
+SS_PIN_DECL(E2, GPIOK4, SCL7);
+
+#define D1 85
+SIG_EXPR_LIST_DECL_SINGLE(SDA7, I2C7, I2C7_DESC);
+SS_PIN_DECL(D1, GPIOK5, SDA7);
+
+FUNC_GROUP_DECL(I2C7, E2, D1);
+
+#define I2C8_DESC SIG_DESC_SET(SCU90, 21)
+
+#define G5 86
+SIG_EXPR_LIST_DECL_SINGLE(SCL8, I2C8, I2C8_DESC);
+SS_PIN_DECL(G5, GPIOK6, SCL8);
+
+#define F3 87
+SIG_EXPR_LIST_DECL_SINGLE(SDA8, I2C8, I2C8_DESC);
+SS_PIN_DECL(F3, GPIOK7, SDA8);
+
+FUNC_GROUP_DECL(I2C8, G5, F3);
+
+#define U1 88
+SSSF_PIN_DECL(U1, GPIOL0, NCTS1, SIG_DESC_SET(SCU84, 16));
+
+#define VPI18_DESC { SCU90, GENMASK(5, 4), 1, 0 }
+#define VPI24_DESC { SCU90, GENMASK(5, 4), 2, 0 }
+#define VPI30_DESC { SCU90, GENMASK(5, 4), 3, 0 }
+
+#define T5 89
+#define T5_DESC SIG_DESC_SET(SCU84, 17)
+SIG_EXPR_DECL(VPIDE, VPI18, VPI18_DESC, T5_DESC);
+SIG_EXPR_DECL(VPIDE, VPI24, VPI24_DESC, T5_DESC);
+SIG_EXPR_DECL(VPIDE, VPI30, VPI30_DESC, T5_DESC);
+SIG_EXPR_LIST_DECL(VPIDE, SIG_EXPR_PTR(VPIDE, VPI18),
+ SIG_EXPR_PTR(VPIDE, VPI24),
+ SIG_EXPR_PTR(VPIDE, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NDCD1, NDCD1, T5_DESC);
+MS_PIN_DECL(T5, GPIOL1, VPIDE, NDCD1);
+FUNC_GROUP_DECL(NDCD1, T5);
+
+#define U3 90
+#define U3_DESC SIG_DESC_SET(SCU84, 18)
+SIG_EXPR_DECL(VPIODD, VPI18, VPI18_DESC, U3_DESC);
+SIG_EXPR_DECL(VPIODD, VPI24, VPI24_DESC, U3_DESC);
+SIG_EXPR_DECL(VPIODD, VPI30, VPI30_DESC, U3_DESC);
+SIG_EXPR_LIST_DECL(VPIODD, SIG_EXPR_PTR(VPIODD, VPI18),
+ SIG_EXPR_PTR(VPIODD, VPI24),
+ SIG_EXPR_PTR(VPIODD, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NDSR1, NDSR1, U3_DESC);
+MS_PIN_DECL(U3, GPIOL2, VPIODD, NDSR1);
+FUNC_GROUP_DECL(NDSR1, U3);
+
+#define V1 91
+#define V1_DESC SIG_DESC_SET(SCU84, 19)
+SIG_EXPR_DECL(VPIHS, VPI18, VPI18_DESC, V1_DESC);
+SIG_EXPR_DECL(VPIHS, VPI24, VPI24_DESC, V1_DESC);
+SIG_EXPR_DECL(VPIHS, VPI30, VPI30_DESC, V1_DESC);
+SIG_EXPR_LIST_DECL(VPIHS, SIG_EXPR_PTR(VPIHS, VPI18),
+ SIG_EXPR_PTR(VPIHS, VPI24),
+ SIG_EXPR_PTR(VPIHS, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NRI1, NRI1, V1_DESC);
+MS_PIN_DECL(V1, GPIOL3, VPIHS, NRI1);
+FUNC_GROUP_DECL(NRI1, V1);
+
+#define U4 92
+#define U4_DESC SIG_DESC_SET(SCU84, 20)
+SIG_EXPR_DECL(VPIVS, VPI18, VPI18_DESC, U4_DESC);
+SIG_EXPR_DECL(VPIVS, VPI24, VPI24_DESC, U4_DESC);
+SIG_EXPR_DECL(VPIVS, VPI30, VPI30_DESC, U4_DESC);
+SIG_EXPR_LIST_DECL(VPIVS, SIG_EXPR_PTR(VPIVS, VPI18),
+ SIG_EXPR_PTR(VPIVS, VPI24),
+ SIG_EXPR_PTR(VPIVS, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NDTR1, NDTR1, U4_DESC);
+MS_PIN_DECL(U4, GPIOL4, VPIVS, NDTR1);
+FUNC_GROUP_DECL(NDTR1, U4);
+
+#define V2 93
+#define V2_DESC SIG_DESC_SET(SCU84, 21)
+SIG_EXPR_DECL(VPICLK, VPI18, VPI18_DESC, V2_DESC);
+SIG_EXPR_DECL(VPICLK, VPI24, VPI24_DESC, V2_DESC);
+SIG_EXPR_DECL(VPICLK, VPI30, VPI30_DESC, V2_DESC);
+SIG_EXPR_LIST_DECL(VPICLK, SIG_EXPR_PTR(VPICLK, VPI18),
+ SIG_EXPR_PTR(VPICLK, VPI24),
+ SIG_EXPR_PTR(VPICLK, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NRTS1, NRTS1, V2_DESC);
+MS_PIN_DECL(V2, GPIOL5, VPICLK, NRTS1);
+FUNC_GROUP_DECL(NRTS1, V2);
+
+#define W1 94
+#define W1_DESC SIG_DESC_SET(SCU84, 22)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB0, VPI30, VPI30_DESC, W1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(TXD1, TXD1, W1_DESC);
+MS_PIN_DECL(W1, GPIOL6, VPIB0, TXD1);
+FUNC_GROUP_DECL(TXD1, W1);
+
+#define U5 95
+#define U5_DESC SIG_DESC_SET(SCU84, 23)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB1, VPI30, VPI30_DESC, U5_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RXD1, RXD1, U5_DESC);
+MS_PIN_DECL(U5, GPIOL7, VPIB1, RXD1);
+FUNC_GROUP_DECL(RXD1, U5);
+
+#define W4 104
+#define W4_DESC SIG_DESC_SET(SCU88, 0)
+SIG_EXPR_LIST_DECL_SINGLE(VPIG0, VPI30, VPI30_DESC, W4_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(PWM0, PWM0, W4_DESC);
+MS_PIN_DECL(W4, GPION0, VPIG0, PWM0);
+FUNC_GROUP_DECL(PWM0, W4);
+
+#define Y3 105
+#define Y3_DESC SIG_DESC_SET(SCU88, 1)
+SIG_EXPR_LIST_DECL_SINGLE(VPIG1, VPI30, VPI30_DESC, Y3_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(PWM1, PWM1, Y3_DESC);
+MS_PIN_DECL(Y3, GPION1, VPIG1, PWM1);
+FUNC_GROUP_DECL(PWM1, Y3);
+
+#define AA2 106
+#define AA2_DESC SIG_DESC_SET(SCU88, 2)
+SIG_EXPR_DECL(VPIG2, VPI18, VPI18_DESC, AA2_DESC);
+SIG_EXPR_DECL(VPIG2, VPI24, VPI24_DESC, AA2_DESC);
+SIG_EXPR_DECL(VPIG2, VPI30, VPI30_DESC, AA2_DESC);
+SIG_EXPR_LIST_DECL(VPIG2, SIG_EXPR_PTR(VPIG2, VPI18),
+ SIG_EXPR_PTR(VPIG2, VPI24),
+ SIG_EXPR_PTR(VPIG2, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(PWM2, PWM2, AA2_DESC);
+MS_PIN_DECL(AA2, GPION2, VPIG2, PWM2);
+FUNC_GROUP_DECL(PWM2, AA2);
+
+#define AB1 107
+#define AB1_DESC SIG_DESC_SET(SCU88, 3)
+SIG_EXPR_DECL(VPIG3, VPI18, VPI18_DESC, AB1_DESC);
+SIG_EXPR_DECL(VPIG3, VPI24, VPI24_DESC, AB1_DESC);
+SIG_EXPR_DECL(VPIG3, VPI30, VPI30_DESC, AB1_DESC);
+SIG_EXPR_LIST_DECL(VPIG3, SIG_EXPR_PTR(VPIG3, VPI18),
+ SIG_EXPR_PTR(VPIG2, VPI24),
+ SIG_EXPR_PTR(VPIG2, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(PWM3, PWM3, AB1_DESC);
+MS_PIN_DECL(AB1, GPION3, VPIG3, PWM3);
+FUNC_GROUP_DECL(PWM3, AB1);
+
+#define W5 108
+#define W5_DESC SIG_DESC_SET(SCU88, 4)
+SIG_EXPR_DECL(VPIG4, VPI18, VPI18_DESC, W5_DESC);
+SIG_EXPR_DECL(VPIG4, VPI24, VPI24_DESC, W5_DESC);
+SIG_EXPR_DECL(VPIG4, VPI30, VPI30_DESC, W5_DESC);
+SIG_EXPR_LIST_DECL(VPIG4, SIG_EXPR_PTR(VPIG4, VPI18),
+ SIG_EXPR_PTR(VPIG2, VPI24),
+ SIG_EXPR_PTR(VPIG2, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(PWM4, PWM4, W5_DESC);
+MS_PIN_DECL(W5, GPION4, VPIG4, PWM4);
+FUNC_GROUP_DECL(PWM4, W5);
+
+#define Y4 109
+#define Y4_DESC SIG_DESC_SET(SCU88, 5)
+SIG_EXPR_DECL(VPIG5, VPI18, VPI18_DESC, Y4_DESC);
+SIG_EXPR_DECL(VPIG5, VPI24, VPI24_DESC, Y4_DESC);
+SIG_EXPR_DECL(VPIG5, VPI30, VPI30_DESC, Y4_DESC);
+SIG_EXPR_LIST_DECL(VPIG5, SIG_EXPR_PTR(VPIG5, VPI18),
+ SIG_EXPR_PTR(VPIG2, VPI24),
+ SIG_EXPR_PTR(VPIG2, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(PWM5, PWM5, Y4_DESC);
+MS_PIN_DECL(Y4, GPION5, VPIG5, PWM5);
+FUNC_GROUP_DECL(PWM5, Y4);
+
+#define AA3 110
+#define AA3_DESC SIG_DESC_SET(SCU88, 6)
+SIG_EXPR_LIST_DECL_SINGLE(VPIG6, VPI30, VPI30_DESC, AA3_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(PWM6, PWM6, AA3_DESC);
+MS_PIN_DECL(AA3, GPION6, VPIG6, PWM6);
+FUNC_GROUP_DECL(PWM6, AA3);
+
+#define AB2 111
+#define AB2_DESC SIG_DESC_SET(SCU88, 7)
+SIG_EXPR_LIST_DECL_SINGLE(VPIG7, VPI30, VPI30_DESC, AB2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(PWM7, PWM7, AB2_DESC);
+MS_PIN_DECL(AB2, GPION7, VPIG7, PWM7);
+FUNC_GROUP_DECL(PWM7, AB2);
+
+#define V6 112
+SIG_EXPR_LIST_DECL_SINGLE(VPIG8, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 8));
+SS_PIN_DECL(V6, GPIOO0, VPIG8);
+
+#define Y5 113
+SIG_EXPR_LIST_DECL_SINGLE(VPIG9, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 9));
+SS_PIN_DECL(Y5, GPIOO1, VPIG9);
+
+FUNC_GROUP_DECL(VPI18, T5, U3, V1, U4, V2, AA22, W5, Y4, AA3, AB2);
+FUNC_GROUP_DECL(VPI24, T5, U3, V1, U4, V2, AA22, W5, Y4, AA3, AB2, V6, Y5);
+FUNC_GROUP_DECL(VPI30, T5, U3, V1, U4, V2, W1, U5, W4, Y3, AA22, W5, Y4, AA3,
+ AB2);
+
+#define Y7 125
+SIG_EXPR_LIST_DECL_SINGLE(GPIOP5, GPIOP5);
+MS_PIN_DECL_(Y7, SIG_EXPR_LIST_PTR(GPIOP5));
+
+#define AA7 126
+SSSF_PIN_DECL(AA7, GPIOP6, BMCINT, SIG_DESC_SET(SCU88, 22));
+
+#define AB7 127
+SSSF_PIN_DECL(AB7, GPIOP7, FLACK, SIG_DESC_SET(SCU88, 23));
+
+#define I2C3_DESC SIG_DESC_SET(SCU90, 16)
+
+#define D3 128
+SIG_EXPR_LIST_DECL_SINGLE(SCL3, I2C3, I2C3_DESC);
+SS_PIN_DECL(D3, GPIOQ0, SCL3);
+
+#define C2 129
+SIG_EXPR_LIST_DECL_SINGLE(SDA3, I2C3, I2C3_DESC);
+SS_PIN_DECL(C2, GPIOQ1, SDA3);
+
+FUNC_GROUP_DECL(I2C3, D3, C2);
+
+#define I2C4_DESC SIG_DESC_SET(SCU90, 17)
+
+#define B1 130
+SIG_EXPR_LIST_DECL_SINGLE(SCL4, I2C4, I2C4_DESC);
+SS_PIN_DECL(B1, GPIOQ2, SCL4);
+
+#define F5 131
+SIG_EXPR_LIST_DECL_SINGLE(SDA4, I2C4, I2C4_DESC);
+SS_PIN_DECL(F5, GPIOQ3, SDA4);
+
+FUNC_GROUP_DECL(I2C4, B1, F5);
+
+#define DASH9028_DESC SIG_DESC_SET(SCU90, 28)
+
+#define H2 134
+SIG_EXPR_LIST_DECL_SINGLE(DASHH2, DASHH2, DASH9028_DESC);
+SS_PIN_DECL(H2, GPIOQ6, DASHH2);
+
+#define H1 135
+SIG_EXPR_LIST_DECL_SINGLE(DASHH1, DASHH1, DASH9028_DESC);
+SS_PIN_DECL(H1, GPIOQ7, DASHH1);
+
+#define V20 136
+SSSF_PIN_DECL(V20, GPIOR0, ROMCS1, SIG_DESC_SET(SCU88, 24));
+
+#define W21 137
+SSSF_PIN_DECL(W21, GPIOR1, ROMCS2, SIG_DESC_SET(SCU88, 25));
+
+#define Y22 138
+SSSF_PIN_DECL(Y22, GPIOR2, ROMCS3, SIG_DESC_SET(SCU88, 26));
+
+#define U19 139
+SSSF_PIN_DECL(U19, GPIOR3, ROMCS4, SIG_DESC_SET(SCU88, 27));
+
+#define VPOOFF0_DESC { SCU94, GENMASK(1, 0), 0, 0 }
+#define VPO12_DESC { SCU94, GENMASK(1, 0), 1, 0 }
+#define VPO24_DESC { SCU94, GENMASK(1, 0), 2, 0 }
+#define VPOOFF1_DESC { SCU94, GENMASK(1, 0), 3, 0 }
+#define VPO_OFF_12 { SCU94, 0x2, 0, 0 }
+#define VPO_24_OFF SIG_DESC_SET(SCU94, 1)
+
+#define V21 140
+#define V21_DESC SIG_DESC_SET(SCU88, 28)
+SIG_EXPR_DECL(ROMA24, ROM8, V21_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA24, ROM16, V21_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA24, ROM16S, V21_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL(ROMA24, SIG_EXPR_PTR(ROMA24, ROM8),
+ SIG_EXPR_PTR(ROMA24, ROM16),
+ SIG_EXPR_PTR(ROMA24, ROM16S));
+SIG_EXPR_LIST_DECL_SINGLE(VPOR6, VPO24, V21_DESC, VPO_24_OFF);
+MS_PIN_DECL(V21, GPIOR4, ROMA24, VPOR6);
+
+#define W22 141
+#define W22_DESC SIG_DESC_SET(SCU88, 29)
+SIG_EXPR_DECL(ROMA25, ROM8, W22_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA25, ROM16, W22_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA25, ROM16S, W22_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL(ROMA25, SIG_EXPR_PTR(ROMA25, ROM8),
+ SIG_EXPR_PTR(ROMA25, ROM16),
+ SIG_EXPR_PTR(ROMA25, ROM16S));
+SIG_EXPR_LIST_DECL_SINGLE(VPOR7, VPO24, W22_DESC, VPO_24_OFF);
+MS_PIN_DECL(W22, GPIOR5, ROMA25, VPOR7);
+
+#define C6 142
+SIG_EXPR_LIST_DECL_SINGLE(MDC1, MDIO1, SIG_DESC_SET(SCU88, 30));
+SS_PIN_DECL(C6, GPIOR6, MDC1);
+
+#define A5 143
+SIG_EXPR_LIST_DECL_SINGLE(MDIO1, MDIO1, SIG_DESC_SET(SCU88, 31));
+SS_PIN_DECL(A5, GPIOR7, MDIO1);
+
+FUNC_GROUP_DECL(MDIO1, C6, A5);
+
+#define U21 144
+#define U21_DESC SIG_DESC_SET(SCU8C, 0)
+SIG_EXPR_DECL(ROMD4, ROM8, U21_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMD4, ROM16, U21_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMD4, ROM16S, U21_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL(ROMD4, SIG_EXPR_PTR(ROMD4, ROM8),
+ SIG_EXPR_PTR(ROMD4, ROM16),
+ SIG_EXPR_PTR(ROMD4, ROM16S));
+SIG_EXPR_DECL(VPODE, VPO12, U21_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPODE, VPO24, U21_DESC, VPO12_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPODE, VPO12, VPO24);
+MS_PIN_DECL(U21, GPIOS0, ROMD4, VPODE);
+
+#define T19 145
+#define T19_DESC SIG_DESC_SET(SCU8C, 1)
+SIG_EXPR_DECL(ROMD5, ROM8, T19_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMD5, ROM16, T19_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMD5, ROM16S, T19_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL(ROMD5, SIG_EXPR_PTR(ROMD5, ROM8),
+ SIG_EXPR_PTR(ROMD5, ROM16),
+ SIG_EXPR_PTR(ROMD5, ROM16S));
+SIG_EXPR_DECL(VPOHS, VPO12, T19_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOHS, VPO24, T19_DESC, VPO24_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOHS, VPO12, VPO24);
+MS_PIN_DECL(T19, GPIOS1, ROMD5, VPOHS);
+
+#define V22 146
+#define V22_DESC SIG_DESC_SET(SCU8C, 2)
+SIG_EXPR_DECL(ROMD6, ROM8, V22_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMD6, ROM16, V22_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMD6, ROM16S, V22_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL(ROMD6, SIG_EXPR_PTR(ROMD6, ROM8),
+ SIG_EXPR_PTR(ROMD6, ROM16),
+ SIG_EXPR_PTR(ROMD6, ROM16S));
+SIG_EXPR_DECL(VPOVS, VPO12, V22_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOVS, VPO24, V22_DESC, VPO24_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOVS, VPO12, VPO24);
+MS_PIN_DECL(V22, GPIOS2, ROMD6, VPOVS);
+
+#define U20 147
+#define U20_DESC SIG_DESC_SET(SCU8C, 3)
+SIG_EXPR_DECL(ROMD7, ROM8, U20_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMD7, ROM16, U20_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMD7, ROM16S, U20_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL(ROMD7, SIG_EXPR_PTR(ROMD7, ROM8),
+ SIG_EXPR_PTR(ROMD7, ROM16),
+ SIG_EXPR_PTR(ROMD7, ROM16S));
+SIG_EXPR_DECL(VPOCLK, VPO12, U20_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOCLK, VPO24, U20_DESC, VPO24_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOCLK, VPO12, VPO24);
+MS_PIN_DECL(U20, GPIOS3, ROMD7, VPOCLK);
+
+#define R18 148
+#define ROMOE_DESC SIG_DESC_SET(SCU8C, 4)
+SIG_EXPR_LIST_DECL_SINGLE(GPIOS4, GPIOS4);
+SIG_EXPR_DECL(ROMOE, ROM8, ROMOE_DESC);
+SIG_EXPR_DECL(ROMOE, ROM16, ROMOE_DESC);
+SIG_EXPR_DECL(ROMOE, ROM16S, ROMOE_DESC);
+SIG_EXPR_LIST_DECL(ROMOE, SIG_EXPR_PTR(ROMOE, ROM8),
+ SIG_EXPR_PTR(ROMOE, ROM16),
+ SIG_EXPR_PTR(ROMOE, ROM16S));
+MS_PIN_DECL_(R18, SIG_EXPR_LIST_PTR(ROMOE), SIG_EXPR_LIST_PTR(GPIOS4));
+
+#define N21 149
+#define ROMWE_DESC SIG_DESC_SET(SCU8C, 5)
+SIG_EXPR_LIST_DECL_SINGLE(GPIOS5, GPIOS5);
+SIG_EXPR_DECL(ROMWE, ROM8, ROMWE_DESC);
+SIG_EXPR_DECL(ROMWE, ROM16, ROMWE_DESC);
+SIG_EXPR_DECL(ROMWE, ROM16S, ROMWE_DESC);
+SIG_EXPR_LIST_DECL(ROMWE, SIG_EXPR_PTR(ROMWE, ROM8),
+ SIG_EXPR_PTR(ROMWE, ROM16),
+ SIG_EXPR_PTR(ROMWE, ROM16S));
+MS_PIN_DECL_(N21, SIG_EXPR_LIST_PTR(ROMWE), SIG_EXPR_LIST_PTR(GPIOS5));
+
+#define L22 150
+#define L22_DESC SIG_DESC_SET(SCU8C, 6)
+SIG_EXPR_DECL(ROMA22, ROM8, L22_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA22, ROM16, L22_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA22, ROM16S, L22_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL(ROMA22, SIG_EXPR_PTR(ROMA22, ROM8),
+ SIG_EXPR_PTR(ROMA22, ROM16),
+ SIG_EXPR_PTR(ROMA22, ROM16S));
+SIG_EXPR_LIST_DECL_SINGLE(VPOR4, VPO24, L22_DESC, VPO_24_OFF);
+MS_PIN_DECL(L22, GPIOS6, ROMA22, VPOR4);
+
+#define K18 151
+#define K18_DESC SIG_DESC_SET(SCU8C, 7)
+SIG_EXPR_DECL(ROMA23, ROM8, K18_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA23, ROM16, K18_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA23, ROM16S, K18_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL(ROMA23, SIG_EXPR_PTR(ROMA23, ROM8),
+ SIG_EXPR_PTR(ROMA23, ROM16),
+ SIG_EXPR_PTR(ROMA23, ROM16S));
+SIG_EXPR_LIST_DECL_SINGLE(VPOR5, VPO24, K18_DESC, VPO_24_OFF);
+MS_PIN_DECL(K18, GPIOS7, ROMA23, VPOR5);
+
+FUNC_GROUP_DECL(ROM8, V20, U21, T19, V22, U20, R18, N21, L22, K18, W21, Y22,
+ U19);
+FUNC_GROUP_DECL(ROM16, V20, U21, T19, V22, U20, R18, N21, L22, K18,
+ A8, C7, B7, A7, D7, B6, A6, E7, W21, Y22, U19);
+FUNC_GROUP_DECL(VPO12, U21, T19, V22, U20);
+FUNC_GROUP_DECL(VPO24, U21, T19, V22, U20, L22, K18, V21, W22);
+
+#define RMII1_DESC SIG_DESC_BIT(HW_STRAP1, 6, 0)
+
+#define A12 152
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT0, GPIOT0, SIG_DESC_SET(SCUA0, 0));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1TXEN, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXCK, RGMII1);
+MS_PIN_DECL_(A12, SIG_EXPR_LIST_PTR(GPIOT0), SIG_EXPR_LIST_PTR(RMII1TXEN),
+ SIG_EXPR_LIST_PTR(RGMII1TXCK));
+
+#define B12 153
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT1, GPIOT1, SIG_DESC_SET(SCUA0, 1));
+SIG_EXPR_LIST_DECL_SINGLE(DASHB12, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXCTL, RGMII1);
+MS_PIN_DECL_(B12, SIG_EXPR_LIST_PTR(GPIOT1), SIG_EXPR_LIST_PTR(DASHB12),
+ SIG_EXPR_LIST_PTR(RGMII1TXCTL));
+
+#define C12 154
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT2, GPIOT2, SIG_DESC_SET(SCUA0, 2));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1TXD0, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXD0, RGMII1);
+MS_PIN_DECL_(C12, SIG_EXPR_LIST_PTR(GPIOT2), SIG_EXPR_LIST_PTR(RMII1TXD0),
+ SIG_EXPR_LIST_PTR(RGMII1TXD0));
+
+#define D12 155
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT3, GPIOT3, SIG_DESC_SET(SCUA0, 3));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1TXD1, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXD1, RGMII1);
+MS_PIN_DECL_(D12, SIG_EXPR_LIST_PTR(GPIOT3), SIG_EXPR_LIST_PTR(RMII1TXD1),
+ SIG_EXPR_LIST_PTR(RGMII1TXD1));
+
+#define E12 156
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT4, GPIOT4, SIG_DESC_SET(SCUA0, 4));
+SIG_EXPR_LIST_DECL_SINGLE(DASHE12, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXD2, RGMII1);
+MS_PIN_DECL_(E12, SIG_EXPR_LIST_PTR(GPIOT4), SIG_EXPR_LIST_PTR(DASHE12),
+ SIG_EXPR_LIST_PTR(RGMII1TXD2));
+
+#define A13 157
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT5, GPIOT5, SIG_DESC_SET(SCUA0, 5));
+SIG_EXPR_LIST_DECL_SINGLE(DASHA13, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXD3, RGMII1);
+MS_PIN_DECL_(A13, SIG_EXPR_LIST_PTR(GPIOT5), SIG_EXPR_LIST_PTR(DASHA13),
+ SIG_EXPR_LIST_PTR(RGMII1TXD3));
+
+#define E11 164
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU4, GPIOU4, SIG_DESC_SET(SCUA0, 12));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1RCLK, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXCK, RGMII1);
+MS_PIN_DECL_(E11, SIG_EXPR_LIST_PTR(GPIOU4), SIG_EXPR_LIST_PTR(RMII1RCLK),
+ SIG_EXPR_LIST_PTR(RGMII1RXCK));
+
+#define D11 165
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU5, GPIOU5, SIG_DESC_SET(SCUA0, 13));
+SIG_EXPR_LIST_DECL_SINGLE(DASHD11, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXCTL, RGMII1);
+MS_PIN_DECL_(D11, SIG_EXPR_LIST_PTR(GPIOU5), SIG_EXPR_LIST_PTR(DASHD11),
+ SIG_EXPR_LIST_PTR(RGMII1RXCTL));
+
+#define C11 166
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU6, GPIOU6, SIG_DESC_SET(SCUA0, 14));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1RXD0, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXD0, RGMII1);
+MS_PIN_DECL_(C11, SIG_EXPR_LIST_PTR(GPIOU6), SIG_EXPR_LIST_PTR(RMII1RXD0),
+ SIG_EXPR_LIST_PTR(RGMII1RXD0));
+
+#define B11 167
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU7, GPIOU7, SIG_DESC_SET(SCUA0, 15));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1RXD1, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXD1, RGMII1);
+MS_PIN_DECL_(B11, SIG_EXPR_LIST_PTR(GPIOU7), SIG_EXPR_LIST_PTR(RMII1RXD1),
+ SIG_EXPR_LIST_PTR(RGMII1RXD1));
+
+#define A11 168
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV0, GPIOV0, SIG_DESC_SET(SCUA0, 16));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1CRSDV, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXD2, RGMII1);
+MS_PIN_DECL_(A11, SIG_EXPR_LIST_PTR(GPIOV0), SIG_EXPR_LIST_PTR(RMII1CRSDV),
+ SIG_EXPR_LIST_PTR(RGMII1RXD2));
+
+#define E10 169
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV1, GPIOV1, SIG_DESC_SET(SCUA0, 17));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1RXER, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXD3, RGMII1);
+MS_PIN_DECL_(E10, SIG_EXPR_LIST_PTR(GPIOV1), SIG_EXPR_LIST_PTR(RMII1RXER),
+ SIG_EXPR_LIST_PTR(RGMII1RXD3));
+
+FUNC_GROUP_DECL(RMII1, A12, B12, C12, D12, E12, A13, E11, D11, C11, B11, A11,
+ E10);
+FUNC_GROUP_DECL(RGMII1, A12, B12, C12, D12, E12, A13, E11, D11, C11, B11, A11,
+ E10);
+
+/* Note we account for GPIOY4-GPIOY7 even though they're not valid, thus 216
+ * pins becomes 220.
+ */
+#define ASPEED_G4_NR_PINS 220
+
+/* Pins, groups and functions are sort(1):ed alphabetically for sanity */
+
+static struct pinctrl_pin_desc aspeed_g4_pins[ASPEED_G4_NR_PINS] = {
+ ASPEED_PINCTRL_PIN(A1),
+ ASPEED_PINCTRL_PIN(A11),
+ ASPEED_PINCTRL_PIN(A12),
+ ASPEED_PINCTRL_PIN(A13),
+ ASPEED_PINCTRL_PIN(A15),
+ ASPEED_PINCTRL_PIN(A18),
+ ASPEED_PINCTRL_PIN(A2),
+ ASPEED_PINCTRL_PIN(A3),
+ ASPEED_PINCTRL_PIN(A4),
+ ASPEED_PINCTRL_PIN(A5),
+ ASPEED_PINCTRL_PIN(A6),
+ ASPEED_PINCTRL_PIN(A7),
+ ASPEED_PINCTRL_PIN(A8),
+ ASPEED_PINCTRL_PIN(AA2),
+ ASPEED_PINCTRL_PIN(AA22),
+ ASPEED_PINCTRL_PIN(AA3),
+ ASPEED_PINCTRL_PIN(AA7),
+ ASPEED_PINCTRL_PIN(AB1),
+ ASPEED_PINCTRL_PIN(AB2),
+ ASPEED_PINCTRL_PIN(AB7),
+ ASPEED_PINCTRL_PIN(B1),
+ ASPEED_PINCTRL_PIN(B11),
+ ASPEED_PINCTRL_PIN(B12),
+ ASPEED_PINCTRL_PIN(B14),
+ ASPEED_PINCTRL_PIN(B15),
+ ASPEED_PINCTRL_PIN(B19),
+ ASPEED_PINCTRL_PIN(B2),
+ ASPEED_PINCTRL_PIN(B3),
+ ASPEED_PINCTRL_PIN(B4),
+ ASPEED_PINCTRL_PIN(B6),
+ ASPEED_PINCTRL_PIN(B7),
+ ASPEED_PINCTRL_PIN(C1),
+ ASPEED_PINCTRL_PIN(C11),
+ ASPEED_PINCTRL_PIN(C12),
+ ASPEED_PINCTRL_PIN(C14),
+ ASPEED_PINCTRL_PIN(C15),
+ ASPEED_PINCTRL_PIN(C17),
+ ASPEED_PINCTRL_PIN(C2),
+ ASPEED_PINCTRL_PIN(C3),
+ ASPEED_PINCTRL_PIN(C4),
+ ASPEED_PINCTRL_PIN(C5),
+ ASPEED_PINCTRL_PIN(C6),
+ ASPEED_PINCTRL_PIN(C7),
+ ASPEED_PINCTRL_PIN(D1),
+ ASPEED_PINCTRL_PIN(D11),
+ ASPEED_PINCTRL_PIN(D12),
+ ASPEED_PINCTRL_PIN(D14),
+ ASPEED_PINCTRL_PIN(D15),
+ ASPEED_PINCTRL_PIN(D16),
+ ASPEED_PINCTRL_PIN(D17),
+ ASPEED_PINCTRL_PIN(D18),
+ ASPEED_PINCTRL_PIN(D2),
+ ASPEED_PINCTRL_PIN(D3),
+ ASPEED_PINCTRL_PIN(D4),
+ ASPEED_PINCTRL_PIN(D5),
+ ASPEED_PINCTRL_PIN(D7),
+ ASPEED_PINCTRL_PIN(E10),
+ ASPEED_PINCTRL_PIN(E11),
+ ASPEED_PINCTRL_PIN(E12),
+ ASPEED_PINCTRL_PIN(E14),
+ ASPEED_PINCTRL_PIN(E16),
+ ASPEED_PINCTRL_PIN(E2),
+ ASPEED_PINCTRL_PIN(E3),
+ ASPEED_PINCTRL_PIN(E5),
+ ASPEED_PINCTRL_PIN(E7),
+ ASPEED_PINCTRL_PIN(F3),
+ ASPEED_PINCTRL_PIN(F4),
+ ASPEED_PINCTRL_PIN(F5),
+ ASPEED_PINCTRL_PIN(G5),
+ ASPEED_PINCTRL_PIN(H1),
+ ASPEED_PINCTRL_PIN(H19),
+ ASPEED_PINCTRL_PIN(H2),
+ ASPEED_PINCTRL_PIN(H20),
+ ASPEED_PINCTRL_PIN(J3),
+ ASPEED_PINCTRL_PIN(K18),
+ ASPEED_PINCTRL_PIN(L22),
+ ASPEED_PINCTRL_PIN(N21),
+ ASPEED_PINCTRL_PIN(R18),
+ ASPEED_PINCTRL_PIN(T1),
+ ASPEED_PINCTRL_PIN(T19),
+ ASPEED_PINCTRL_PIN(T2),
+ ASPEED_PINCTRL_PIN(T4),
+ ASPEED_PINCTRL_PIN(T5),
+ ASPEED_PINCTRL_PIN(U1),
+ ASPEED_PINCTRL_PIN(U18),
+ ASPEED_PINCTRL_PIN(U19),
+ ASPEED_PINCTRL_PIN(U2),
+ ASPEED_PINCTRL_PIN(U20),
+ ASPEED_PINCTRL_PIN(U21),
+ ASPEED_PINCTRL_PIN(U3),
+ ASPEED_PINCTRL_PIN(U4),
+ ASPEED_PINCTRL_PIN(U5),
+ ASPEED_PINCTRL_PIN(V1),
+ ASPEED_PINCTRL_PIN(V2),
+ ASPEED_PINCTRL_PIN(V20),
+ ASPEED_PINCTRL_PIN(V21),
+ ASPEED_PINCTRL_PIN(V22),
+ ASPEED_PINCTRL_PIN(V6),
+ ASPEED_PINCTRL_PIN(W1),
+ ASPEED_PINCTRL_PIN(W21),
+ ASPEED_PINCTRL_PIN(W22),
+ ASPEED_PINCTRL_PIN(W4),
+ ASPEED_PINCTRL_PIN(W5),
+ ASPEED_PINCTRL_PIN(Y22),
+ ASPEED_PINCTRL_PIN(Y3),
+ ASPEED_PINCTRL_PIN(Y4),
+ ASPEED_PINCTRL_PIN(Y5),
+ ASPEED_PINCTRL_PIN(Y7),
+};
+
+static const struct aspeed_pin_group aspeed_g4_groups[] = {
+ ASPEED_PINCTRL_GROUP(ACPI),
+ ASPEED_PINCTRL_GROUP(BMCINT),
+ ASPEED_PINCTRL_GROUP(DDCCLK),
+ ASPEED_PINCTRL_GROUP(DDCDAT),
+ ASPEED_PINCTRL_GROUP(FLACK),
+ ASPEED_PINCTRL_GROUP(FLBUSY),
+ ASPEED_PINCTRL_GROUP(FLWP),
+ ASPEED_PINCTRL_GROUP(GPID0),
+ ASPEED_PINCTRL_GROUP(GPIE0),
+ ASPEED_PINCTRL_GROUP(GPIE2),
+ ASPEED_PINCTRL_GROUP(GPIE4),
+ ASPEED_PINCTRL_GROUP(GPIE6),
+ ASPEED_PINCTRL_GROUP(I2C10),
+ ASPEED_PINCTRL_GROUP(I2C11),
+ ASPEED_PINCTRL_GROUP(I2C12),
+ ASPEED_PINCTRL_GROUP(I2C13),
+ ASPEED_PINCTRL_GROUP(I2C3),
+ ASPEED_PINCTRL_GROUP(I2C4),
+ ASPEED_PINCTRL_GROUP(I2C5),
+ ASPEED_PINCTRL_GROUP(I2C6),
+ ASPEED_PINCTRL_GROUP(I2C7),
+ ASPEED_PINCTRL_GROUP(I2C8),
+ ASPEED_PINCTRL_GROUP(I2C9),
+ ASPEED_PINCTRL_GROUP(LPCPD),
+ ASPEED_PINCTRL_GROUP(LPCPME),
+ ASPEED_PINCTRL_GROUP(LPCPME),
+ ASPEED_PINCTRL_GROUP(LPCSMI),
+ ASPEED_PINCTRL_GROUP(MDIO1),
+ ASPEED_PINCTRL_GROUP(MDIO2),
+ ASPEED_PINCTRL_GROUP(NCTS1),
+ ASPEED_PINCTRL_GROUP(NCTS3),
+ ASPEED_PINCTRL_GROUP(NCTS4),
+ ASPEED_PINCTRL_GROUP(NDCD1),
+ ASPEED_PINCTRL_GROUP(NDCD3),
+ ASPEED_PINCTRL_GROUP(NDCD4),
+ ASPEED_PINCTRL_GROUP(NDSR1),
+ ASPEED_PINCTRL_GROUP(NDSR3),
+ ASPEED_PINCTRL_GROUP(NDTR1),
+ ASPEED_PINCTRL_GROUP(NDTR3),
+ ASPEED_PINCTRL_GROUP(NRI1),
+ ASPEED_PINCTRL_GROUP(NRI3),
+ ASPEED_PINCTRL_GROUP(NRI4),
+ ASPEED_PINCTRL_GROUP(NRTS1),
+ ASPEED_PINCTRL_GROUP(NRTS3),
+ ASPEED_PINCTRL_GROUP(PWM0),
+ ASPEED_PINCTRL_GROUP(PWM1),
+ ASPEED_PINCTRL_GROUP(PWM2),
+ ASPEED_PINCTRL_GROUP(PWM3),
+ ASPEED_PINCTRL_GROUP(PWM4),
+ ASPEED_PINCTRL_GROUP(PWM5),
+ ASPEED_PINCTRL_GROUP(PWM6),
+ ASPEED_PINCTRL_GROUP(PWM7),
+ ASPEED_PINCTRL_GROUP(RGMII1),
+ ASPEED_PINCTRL_GROUP(RMII1),
+ ASPEED_PINCTRL_GROUP(ROM16),
+ ASPEED_PINCTRL_GROUP(ROM8),
+ ASPEED_PINCTRL_GROUP(ROMCS1),
+ ASPEED_PINCTRL_GROUP(ROMCS2),
+ ASPEED_PINCTRL_GROUP(ROMCS3),
+ ASPEED_PINCTRL_GROUP(ROMCS4),
+ ASPEED_PINCTRL_GROUP(RXD1),
+ ASPEED_PINCTRL_GROUP(RXD3),
+ ASPEED_PINCTRL_GROUP(RXD4),
+ ASPEED_PINCTRL_GROUP(SD1),
+ ASPEED_PINCTRL_GROUP(SGPMI),
+ ASPEED_PINCTRL_GROUP(SIOPBI),
+ ASPEED_PINCTRL_GROUP(SIOPBO),
+ ASPEED_PINCTRL_GROUP(TIMER3),
+ ASPEED_PINCTRL_GROUP(TIMER5),
+ ASPEED_PINCTRL_GROUP(TIMER6),
+ ASPEED_PINCTRL_GROUP(TIMER7),
+ ASPEED_PINCTRL_GROUP(TIMER8),
+ ASPEED_PINCTRL_GROUP(TXD1),
+ ASPEED_PINCTRL_GROUP(TXD3),
+ ASPEED_PINCTRL_GROUP(TXD4),
+ ASPEED_PINCTRL_GROUP(UART6),
+ ASPEED_PINCTRL_GROUP(VGAHS),
+ ASPEED_PINCTRL_GROUP(VGAVS),
+ ASPEED_PINCTRL_GROUP(VPI18),
+ ASPEED_PINCTRL_GROUP(VPI24),
+ ASPEED_PINCTRL_GROUP(VPI30),
+ ASPEED_PINCTRL_GROUP(VPO12),
+ ASPEED_PINCTRL_GROUP(VPO24),
+};
+
+static const struct aspeed_pin_function aspeed_g4_functions[] = {
+ ASPEED_PINCTRL_FUNC(ACPI),
+ ASPEED_PINCTRL_FUNC(BMCINT),
+ ASPEED_PINCTRL_FUNC(DDCCLK),
+ ASPEED_PINCTRL_FUNC(DDCDAT),
+ ASPEED_PINCTRL_FUNC(FLACK),
+ ASPEED_PINCTRL_FUNC(FLBUSY),
+ ASPEED_PINCTRL_FUNC(FLWP),
+ ASPEED_PINCTRL_FUNC(GPID0),
+ ASPEED_PINCTRL_FUNC(GPIE0),
+ ASPEED_PINCTRL_FUNC(GPIE2),
+ ASPEED_PINCTRL_FUNC(GPIE4),
+ ASPEED_PINCTRL_FUNC(GPIE6),
+ ASPEED_PINCTRL_FUNC(I2C10),
+ ASPEED_PINCTRL_FUNC(I2C11),
+ ASPEED_PINCTRL_FUNC(I2C12),
+ ASPEED_PINCTRL_FUNC(I2C13),
+ ASPEED_PINCTRL_FUNC(I2C3),
+ ASPEED_PINCTRL_FUNC(I2C4),
+ ASPEED_PINCTRL_FUNC(I2C5),
+ ASPEED_PINCTRL_FUNC(I2C6),
+ ASPEED_PINCTRL_FUNC(I2C7),
+ ASPEED_PINCTRL_FUNC(I2C8),
+ ASPEED_PINCTRL_FUNC(I2C9),
+ ASPEED_PINCTRL_FUNC(LPCPD),
+ ASPEED_PINCTRL_FUNC(LPCPME),
+ ASPEED_PINCTRL_FUNC(LPCSMI),
+ ASPEED_PINCTRL_FUNC(MDIO1),
+ ASPEED_PINCTRL_FUNC(MDIO2),
+ ASPEED_PINCTRL_FUNC(NCTS1),
+ ASPEED_PINCTRL_FUNC(NCTS3),
+ ASPEED_PINCTRL_FUNC(NCTS4),
+ ASPEED_PINCTRL_FUNC(NDCD1),
+ ASPEED_PINCTRL_FUNC(NDCD3),
+ ASPEED_PINCTRL_FUNC(NDCD4),
+ ASPEED_PINCTRL_FUNC(NDSR1),
+ ASPEED_PINCTRL_FUNC(NDSR3),
+ ASPEED_PINCTRL_FUNC(NDTR1),
+ ASPEED_PINCTRL_FUNC(NDTR3),
+ ASPEED_PINCTRL_FUNC(NRI1),
+ ASPEED_PINCTRL_FUNC(NRI3),
+ ASPEED_PINCTRL_FUNC(NRI4),
+ ASPEED_PINCTRL_FUNC(NRTS1),
+ ASPEED_PINCTRL_FUNC(NRTS3),
+ ASPEED_PINCTRL_FUNC(PWM0),
+ ASPEED_PINCTRL_FUNC(PWM1),
+ ASPEED_PINCTRL_FUNC(PWM2),
+ ASPEED_PINCTRL_FUNC(PWM3),
+ ASPEED_PINCTRL_FUNC(PWM4),
+ ASPEED_PINCTRL_FUNC(PWM5),
+ ASPEED_PINCTRL_FUNC(PWM6),
+ ASPEED_PINCTRL_FUNC(PWM7),
+ ASPEED_PINCTRL_FUNC(RGMII1),
+ ASPEED_PINCTRL_FUNC(RMII1),
+ ASPEED_PINCTRL_FUNC(ROM16),
+ ASPEED_PINCTRL_FUNC(ROM8),
+ ASPEED_PINCTRL_FUNC(ROMCS1),
+ ASPEED_PINCTRL_FUNC(ROMCS2),
+ ASPEED_PINCTRL_FUNC(ROMCS3),
+ ASPEED_PINCTRL_FUNC(ROMCS4),
+ ASPEED_PINCTRL_FUNC(RXD1),
+ ASPEED_PINCTRL_FUNC(RXD3),
+ ASPEED_PINCTRL_FUNC(RXD4),
+ ASPEED_PINCTRL_FUNC(SD1),
+ ASPEED_PINCTRL_FUNC(SGPMI),
+ ASPEED_PINCTRL_FUNC(SIOPBI),
+ ASPEED_PINCTRL_FUNC(SIOPBO),
+ ASPEED_PINCTRL_FUNC(TIMER3),
+ ASPEED_PINCTRL_FUNC(TIMER5),
+ ASPEED_PINCTRL_FUNC(TIMER6),
+ ASPEED_PINCTRL_FUNC(TIMER7),
+ ASPEED_PINCTRL_FUNC(TIMER8),
+ ASPEED_PINCTRL_FUNC(TXD1),
+ ASPEED_PINCTRL_FUNC(TXD3),
+ ASPEED_PINCTRL_FUNC(TXD4),
+ ASPEED_PINCTRL_FUNC(UART6),
+ ASPEED_PINCTRL_FUNC(VGAHS),
+ ASPEED_PINCTRL_FUNC(VGAVS),
+ ASPEED_PINCTRL_FUNC(VPI18),
+ ASPEED_PINCTRL_FUNC(VPI24),
+ ASPEED_PINCTRL_FUNC(VPI30),
+ ASPEED_PINCTRL_FUNC(VPO12),
+ ASPEED_PINCTRL_FUNC(VPO24),
+};
+
+static struct aspeed_pinctrl_data aspeed_g4_pinctrl_data = {
+ .pins = aspeed_g4_pins,
+ .npins = ARRAY_SIZE(aspeed_g4_pins),
+ .groups = aspeed_g4_groups,
+ .ngroups = ARRAY_SIZE(aspeed_g4_groups),
+ .functions = aspeed_g4_functions,
+ .nfunctions = ARRAY_SIZE(aspeed_g4_functions),
+};
+
+static struct pinmux_ops aspeed_g4_pinmux_ops = {
+ .get_functions_count = aspeed_pinmux_get_fn_count,
+ .get_function_name = aspeed_pinmux_get_fn_name,
+ .get_function_groups = aspeed_pinmux_get_fn_groups,
+ .set_mux = aspeed_pinmux_set_mux,
+ .gpio_request_enable = aspeed_gpio_request_enable,
+ .strict = true,
+};
+
+static struct pinctrl_ops aspeed_g4_pinctrl_ops = {
+ .get_groups_count = aspeed_pinctrl_get_groups_count,
+ .get_group_name = aspeed_pinctrl_get_group_name,
+ .get_group_pins = aspeed_pinctrl_get_group_pins,
+ .pin_dbg_show = aspeed_pinctrl_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static struct pinctrl_desc aspeed_g4_pinctrl_desc = {
+ .name = "aspeed-g4-pinctrl",
+ .pins = aspeed_g4_pins,
+ .npins = ARRAY_SIZE(aspeed_g4_pins),
+ .pctlops = &aspeed_g4_pinctrl_ops,
+ .pmxops = &aspeed_g4_pinmux_ops,
+};
+
+static int aspeed_g4_pinctrl_probe(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_g4_pins); i++)
+ aspeed_g4_pins[i].number = i;
+
+ return aspeed_pinctrl_probe(pdev, &aspeed_g4_pinctrl_desc,
+ &aspeed_g4_pinctrl_data);
+}
+
+static const struct of_device_id aspeed_g4_pinctrl_of_match[] = {
+ { .compatible = "aspeed,ast2400-pinctrl", },
+ { .compatible = "aspeed,g4-pinctrl", },
+ { },
+};
+
+static struct platform_driver aspeed_g4_pinctrl_driver = {
+ .probe = aspeed_g4_pinctrl_probe,
+ .driver = {
+ .name = "aspeed-g4-pinctrl",
+ .of_match_table = aspeed_g4_pinctrl_of_match,
+ },
+};
+
+static int aspeed_g4_pinctrl_init(void)
+{
+ return platform_driver_register(&aspeed_g4_pinctrl_driver);
+}
+
+arch_initcall(aspeed_g4_pinctrl_init);
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
new file mode 100644
index 000000000000..e1ab864e1a7f
--- /dev/null
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -0,0 +1,808 @@
+/*
+ * Copyright (C) 2016 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-aspeed.h"
+
+#define ASPEED_G5_NR_PINS 228
+
+#define COND1 SIG_DESC_BIT(SCU90, 6, 0)
+#define COND2 { SCU94, GENMASK(1, 0), 0, 0 }
+
+#define B14 0
+SSSF_PIN_DECL(B14, GPIOA0, MAC1LINK, SIG_DESC_SET(SCU80, 0));
+
+#define E13 3
+SSSF_PIN_DECL(E13, GPIOA3, TIMER4, SIG_DESC_SET(SCU80, 3));
+
+#define I2C9_DESC SIG_DESC_SET(SCU90, 22)
+
+#define C14 4
+SIG_EXPR_LIST_DECL_SINGLE(SCL9, I2C9, I2C9_DESC, COND1);
+SIG_EXPR_LIST_DECL_SINGLE(TIMER5, TIMER5, SIG_DESC_SET(SCU80, 4), COND1);
+MS_PIN_DECL(C14, GPIOA4, SCL9, TIMER5);
+
+FUNC_GROUP_DECL(TIMER5, C14);
+
+#define A13 5
+SIG_EXPR_LIST_DECL_SINGLE(SDA9, I2C9, I2C9_DESC, COND1);
+SIG_EXPR_LIST_DECL_SINGLE(TIMER6, TIMER6, SIG_DESC_SET(SCU80, 5), COND1);
+MS_PIN_DECL(A13, GPIOA5, SDA9, TIMER6);
+
+FUNC_GROUP_DECL(TIMER6, A13);
+
+FUNC_GROUP_DECL(I2C9, C14, A13);
+
+#define MDIO2_DESC SIG_DESC_SET(SCU90, 2)
+
+#define C13 6
+SIG_EXPR_LIST_DECL_SINGLE(MDC2, MDIO2, MDIO2_DESC, COND1);
+SIG_EXPR_LIST_DECL_SINGLE(TIMER7, TIMER7, SIG_DESC_SET(SCU80, 6), COND1);
+MS_PIN_DECL(C13, GPIOA6, MDC2, TIMER7);
+
+FUNC_GROUP_DECL(TIMER7, C13);
+
+#define B13 7
+SIG_EXPR_LIST_DECL_SINGLE(MDIO2, MDIO2, MDIO2_DESC, COND1);
+SIG_EXPR_LIST_DECL_SINGLE(TIMER8, TIMER8, SIG_DESC_SET(SCU80, 7), COND1);
+MS_PIN_DECL(B13, GPIOA7, MDIO2, TIMER8);
+
+FUNC_GROUP_DECL(TIMER8, B13);
+
+FUNC_GROUP_DECL(MDIO2, C13, B13);
+
+#define H20 15
+GPIO_PIN_DECL(H20, GPIOB7);
+
+#define SD1_DESC SIG_DESC_SET(SCU90, 0)
+
+#define C12 16
+#define I2C10_DESC SIG_DESC_SET(SCU90, 23)
+SIG_EXPR_LIST_DECL_SINGLE(SD1CLK, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SCL10, I2C10, I2C10_DESC);
+MS_PIN_DECL(C12, GPIOC0, SD1CLK, SCL10);
+
+#define A12 17
+SIG_EXPR_LIST_DECL_SINGLE(SD1CMD, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SDA10, I2C10, I2C10_DESC);
+MS_PIN_DECL(A12, GPIOC1, SD1CMD, SDA10);
+
+FUNC_GROUP_DECL(I2C10, C12, A12);
+
+#define B12 18
+#define I2C11_DESC SIG_DESC_SET(SCU90, 24)
+SIG_EXPR_LIST_DECL_SINGLE(SD1DAT0, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SCL11, I2C11, I2C11_DESC);
+MS_PIN_DECL(B12, GPIOC2, SD1DAT0, SCL11);
+
+#define D9 19
+SIG_EXPR_LIST_DECL_SINGLE(SD1DAT1, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SDA11, I2C11, I2C11_DESC);
+MS_PIN_DECL(D9, GPIOC3, SD1DAT1, SDA11);
+
+FUNC_GROUP_DECL(I2C11, B12, D9);
+
+#define D10 20
+#define I2C12_DESC SIG_DESC_SET(SCU90, 25)
+SIG_EXPR_LIST_DECL_SINGLE(SD1DAT2, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SCL12, I2C12, I2C12_DESC);
+MS_PIN_DECL(D10, GPIOC4, SD1DAT2, SCL12);
+
+#define E12 21
+SIG_EXPR_LIST_DECL_SINGLE(SD1DAT3, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SDA12, I2C12, I2C12_DESC);
+MS_PIN_DECL(E12, GPIOC5, SD1DAT3, SDA12);
+
+FUNC_GROUP_DECL(I2C12, D10, E12);
+
+#define C11 22
+#define I2C13_DESC SIG_DESC_SET(SCU90, 26)
+SIG_EXPR_LIST_DECL_SINGLE(SD1CD, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SCL13, I2C13, I2C13_DESC);
+MS_PIN_DECL(C11, GPIOC6, SD1CD, SCL13);
+
+#define B11 23
+SIG_EXPR_LIST_DECL_SINGLE(SD1WP, SD1, SD1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SDA13, I2C13, I2C13_DESC);
+MS_PIN_DECL(B11, GPIOC7, SD1WP, SDA13);
+
+FUNC_GROUP_DECL(I2C13, C11, B11);
+FUNC_GROUP_DECL(SD1, C12, A12, B12, D9, D10, E12, C11, B11);
+
+#define SD2_DESC SIG_DESC_SET(SCU90, 1)
+#define GPID0_DESC SIG_DESC_SET(SCU8C, 8)
+#define GPID_DESC SIG_DESC_SET(HW_STRAP1, 21)
+
+#define F19 24
+SIG_EXPR_LIST_DECL_SINGLE(SD2CLK, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID0IN, GPID0, GPID0_DESC);
+SIG_EXPR_DECL(GPID0IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID0IN, GPID0, GPID);
+MS_PIN_DECL(F19, GPIOD0, SD2CLK, GPID0IN);
+
+#define E21 25
+SIG_EXPR_LIST_DECL_SINGLE(SD2CMD, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID0OUT, GPID0, GPID0_DESC);
+SIG_EXPR_DECL(GPID0OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID0OUT, GPID0, GPID);
+MS_PIN_DECL(E21, GPIOD1, SD2CMD, GPID0OUT);
+
+FUNC_GROUP_DECL(GPID0, F19, E21);
+
+#define GPID2_DESC SIG_DESC_SET(SCU8C, 9)
+
+#define D20 26
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC);
+SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID);
+MS_PIN_DECL(D20, GPIOD2, SD2DAT0, GPID2IN);
+
+#define D21 27
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC);
+SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID);
+MS_PIN_DECL(D21, GPIOD3, SD2DAT1, GPID2OUT);
+
+FUNC_GROUP_DECL(GPID2, D20, D21);
+
+#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21)
+#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12)
+
+#define B20 32
+SIG_EXPR_LIST_DECL_SINGLE(NCTS3, NCTS3, SIG_DESC_SET(SCU80, 16));
+SIG_EXPR_DECL(GPIE0IN, GPIE0, GPIE0_DESC);
+SIG_EXPR_DECL(GPIE0IN, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE0IN, GPIE0, GPIE);
+MS_PIN_DECL(B20, GPIOE0, NCTS3, GPIE0IN);
+
+#define C20 33
+SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17));
+SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC);
+SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE);
+MS_PIN_DECL(C20, GPIE0, NDCD3, GPIE0OUT);
+
+FUNC_GROUP_DECL(GPIE0, B20, C20);
+
+#define SPI1_DESC SIG_DESC_SET(HW_STRAP1, 13)
+#define C18 64
+SIG_EXPR_LIST_DECL_SINGLE(SYSCS, SPI1, COND1, SPI1_DESC);
+SS_PIN_DECL(C18, GPIOI0, SYSCS);
+
+#define E15 65
+SIG_EXPR_LIST_DECL_SINGLE(SYSCK, SPI1, COND1, SPI1_DESC);
+SS_PIN_DECL(E15, GPIOI1, SYSCK);
+
+#define A14 66
+SIG_EXPR_LIST_DECL_SINGLE(SYSMOSI, SPI1, COND1, SPI1_DESC);
+SS_PIN_DECL(A14, GPIOI2, SYSMOSI);
+
+#define C16 67
+SIG_EXPR_LIST_DECL_SINGLE(SYSMISO, SPI1, COND1, SPI1_DESC);
+SS_PIN_DECL(C16, GPIOI3, SYSMISO);
+
+FUNC_GROUP_DECL(SPI1, C18, E15, A14, C16);
+
+#define L2 73
+SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9));
+SS_PIN_DECL(L2, GPIOJ1, SGPMLD);
+
+#define N3 74
+SIG_EXPR_LIST_DECL_SINGLE(SGPMO, SGPM, SIG_DESC_SET(SCU84, 10));
+SS_PIN_DECL(N3, GPIOJ2, SGPMO);
+
+#define N4 75
+SIG_EXPR_LIST_DECL_SINGLE(SGPMI, SGPM, SIG_DESC_SET(SCU84, 11));
+SS_PIN_DECL(N4, GPIOJ3, SGPMI);
+
+#define I2C5_DESC SIG_DESC_SET(SCU90, 18)
+
+#define L3 80
+SIG_EXPR_LIST_DECL_SINGLE(SCL5, I2C5, I2C5_DESC);
+SS_PIN_DECL(L3, GPIOK0, SCL5);
+
+#define L4 81
+SIG_EXPR_LIST_DECL_SINGLE(SDA5, I2C5, I2C5_DESC);
+SS_PIN_DECL(L4, GPIOK1, SDA5);
+
+FUNC_GROUP_DECL(I2C5, L3, L4);
+
+#define I2C6_DESC SIG_DESC_SET(SCU90, 19)
+
+#define L1 82
+SIG_EXPR_LIST_DECL_SINGLE(SCL6, I2C6, I2C6_DESC);
+SS_PIN_DECL(L1, GPIOK2, SCL6);
+
+#define N2 83
+SIG_EXPR_LIST_DECL_SINGLE(SDA6, I2C6, I2C6_DESC);
+SS_PIN_DECL(N2, GPIOK3, SDA6);
+
+FUNC_GROUP_DECL(I2C6, L1, N2);
+
+#define I2C7_DESC SIG_DESC_SET(SCU90, 20)
+
+#define N1 84
+SIG_EXPR_LIST_DECL_SINGLE(SCL7, I2C7, I2C7_DESC);
+SS_PIN_DECL(N1, GPIOK4, SCL7);
+
+#define P1 85
+SIG_EXPR_LIST_DECL_SINGLE(SDA7, I2C7, I2C7_DESC);
+SS_PIN_DECL(P1, GPIOK5, SDA7);
+
+FUNC_GROUP_DECL(I2C7, N1, P1);
+
+#define I2C8_DESC SIG_DESC_SET(SCU90, 21)
+
+#define P2 86
+SIG_EXPR_LIST_DECL_SINGLE(SCL8, I2C8, I2C8_DESC);
+SS_PIN_DECL(P2, GPIOK6, SCL8);
+
+#define R1 87
+SIG_EXPR_LIST_DECL_SINGLE(SDA8, I2C8, I2C8_DESC);
+SS_PIN_DECL(R1, GPIOK7, SDA8);
+
+FUNC_GROUP_DECL(I2C8, P2, R1);
+
+#define VPIOFF0_DESC { SCU90, GENMASK(5, 4), 0, 0 }
+#define VPIOFF1_DESC { SCU90, GENMASK(5, 4), 1, 0 }
+#define VPI24_DESC { SCU90, GENMASK(5, 4), 2, 0 }
+#define VPIRSVD_DESC { SCU90, GENMASK(5, 4), 3, 0 }
+
+#define V2 104
+#define V2_DESC SIG_DESC_SET(SCU88, 0)
+SIG_EXPR_LIST_DECL_SINGLE(DASHN0, DASHN0, VPIRSVD_DESC, V2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(PWM0, PWM0, V2_DESC, COND2);
+MS_PIN_DECL(V2, GPION0, DASHN0, PWM0);
+FUNC_GROUP_DECL(PWM0, V2);
+
+#define W2 105
+#define W2_DESC SIG_DESC_SET(SCU88, 1)
+SIG_EXPR_LIST_DECL_SINGLE(DASHN1, DASHN1, VPIRSVD_DESC, W2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(PWM1, PWM1, W2_DESC, COND2);
+MS_PIN_DECL(W2, GPION1, DASHN1, PWM1);
+FUNC_GROUP_DECL(PWM1, W2);
+
+#define V3 106
+#define V3_DESC SIG_DESC_SET(SCU88, 2)
+SIG_EXPR_DECL(VPIG2, VPI24, VPI24_DESC, V3_DESC, COND2);
+SIG_EXPR_DECL(VPIG2, VPIRSVD, VPIRSVD_DESC, V3_DESC, COND2);
+SIG_EXPR_LIST_DECL_DUAL(VPIG2, VPI24, VPIRSVD);
+SIG_EXPR_LIST_DECL_SINGLE(PWM2, PWM2, V3_DESC, COND2);
+MS_PIN_DECL(V3, GPION2, VPIG2, PWM2);
+FUNC_GROUP_DECL(PWM2, V3);
+
+#define U3 107
+#define U3_DESC SIG_DESC_SET(SCU88, 3)
+SIG_EXPR_DECL(VPIG3, VPI24, VPI24_DESC, U3_DESC, COND2);
+SIG_EXPR_DECL(VPIG3, VPIRSVD, VPIRSVD_DESC, U3_DESC, COND2);
+SIG_EXPR_LIST_DECL_DUAL(VPIG3, VPI24, VPIRSVD);
+SIG_EXPR_LIST_DECL_SINGLE(PWM3, PWM3, U3_DESC, COND2);
+MS_PIN_DECL(U3, GPION3, VPIG3, PWM3);
+FUNC_GROUP_DECL(PWM3, U3);
+
+#define W3 108
+#define W3_DESC SIG_DESC_SET(SCU88, 4)
+SIG_EXPR_DECL(VPIG4, VPI24, VPI24_DESC, W3_DESC, COND2);
+SIG_EXPR_DECL(VPIG4, VPIRSVD, VPIRSVD_DESC, W3_DESC, COND2);
+SIG_EXPR_LIST_DECL_DUAL(VPIG4, VPI24, VPIRSVD);
+SIG_EXPR_LIST_DECL_SINGLE(PWM4, PWM4, W3_DESC, COND2);
+MS_PIN_DECL(W3, GPION4, VPIG4, PWM4);
+FUNC_GROUP_DECL(PWM4, W3);
+
+#define AA3 109
+#define AA3_DESC SIG_DESC_SET(SCU88, 5)
+SIG_EXPR_DECL(VPIG5, VPI24, VPI24_DESC, AA3_DESC, COND2);
+SIG_EXPR_DECL(VPIG5, VPIRSVD, VPIRSVD_DESC, AA3_DESC, COND2);
+SIG_EXPR_LIST_DECL_DUAL(VPIG5, VPI24, VPIRSVD);
+SIG_EXPR_LIST_DECL_SINGLE(PWM5, PWM5, AA3_DESC, COND2);
+MS_PIN_DECL(AA3, GPION5, VPIG5, PWM5);
+FUNC_GROUP_DECL(PWM5, AA3);
+
+#define Y3 110
+#define Y3_DESC SIG_DESC_SET(SCU88, 6)
+SIG_EXPR_LIST_DECL_SINGLE(VPIG6, VPI24, VPI24_DESC, Y3_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(PWM6, PWM6, Y3_DESC, COND2);
+MS_PIN_DECL(Y3, GPION6, VPIG6, PWM6);
+FUNC_GROUP_DECL(PWM6, Y3);
+
+#define T4 111
+#define T4_DESC SIG_DESC_SET(SCU88, 7)
+SIG_EXPR_LIST_DECL_SINGLE(VPIG7, VPI24, VPI24_DESC, T4_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(PWM7, PWM7, T4_DESC, COND2);
+MS_PIN_DECL(T4, GPION7, VPIG7, PWM7);
+FUNC_GROUP_DECL(PWM7, T4);
+
+#define V6 127
+SIG_EXPR_LIST_DECL_SINGLE(DASHV6, DASHV6, SIG_DESC_SET(SCU90, 28),
+ SIG_DESC_SET(SCU88, 23));
+SS_PIN_DECL(V6, GPIOP7, DASHV6);
+
+#define I2C3_DESC SIG_DESC_SET(SCU90, 16)
+
+#define A11 128
+SIG_EXPR_LIST_DECL_SINGLE(SCL3, I2C3, I2C3_DESC);
+SS_PIN_DECL(A11, GPIOQ0, SCL3);
+
+#define A10 129
+SIG_EXPR_LIST_DECL_SINGLE(SDA3, I2C3, I2C3_DESC);
+SS_PIN_DECL(A10, GPIOQ1, SDA3);
+
+FUNC_GROUP_DECL(I2C3, A11, A10);
+
+#define I2C4_DESC SIG_DESC_SET(SCU90, 17)
+
+#define A9 130
+SIG_EXPR_LIST_DECL_SINGLE(SCL4, I2C4, I2C4_DESC);
+SS_PIN_DECL(A9, GPIOQ2, SCL4);
+
+#define B9 131
+SIG_EXPR_LIST_DECL_SINGLE(SDA4, I2C4, I2C4_DESC);
+SS_PIN_DECL(B9, GPIOQ3, SDA4);
+
+FUNC_GROUP_DECL(I2C4, A9, B9);
+
+#define I2C14_DESC SIG_DESC_SET(SCU90, 27)
+
+#define N21 132
+SIG_EXPR_LIST_DECL_SINGLE(SCL14, I2C14, I2C14_DESC);
+SS_PIN_DECL(N21, GPIOQ4, SCL14);
+
+#define N22 133
+SIG_EXPR_LIST_DECL_SINGLE(SDA14, I2C14, I2C14_DESC);
+SS_PIN_DECL(N22, GPIOQ5, SDA14);
+
+FUNC_GROUP_DECL(I2C14, N21, N22);
+
+#define B10 134
+SSSF_PIN_DECL(B10, GPIOQ6, OSCCLK, SIG_DESC_SET(SCU2C, 1));
+
+#define N20 135
+SSSF_PIN_DECL(N20, GPIOQ7, PEWAKE, SIG_DESC_SET(SCU2C, 29));
+
+#define D8 142
+SIG_EXPR_LIST_DECL_SINGLE(MDC1, MDIO1, SIG_DESC_SET(SCU88, 30));
+SS_PIN_DECL(D8, GPIOR6, MDC1);
+
+#define E10 143
+SIG_EXPR_LIST_DECL_SINGLE(MDIO1, MDIO1, SIG_DESC_SET(SCU88, 31));
+SS_PIN_DECL(E10, GPIOR7, MDIO1);
+
+FUNC_GROUP_DECL(MDIO1, D8, E10);
+
+/* RGMII1/RMII1 */
+
+#define RMII1_DESC SIG_DESC_BIT(HW_STRAP1, 6, 0)
+#define RMII2_DESC SIG_DESC_BIT(HW_STRAP1, 7, 0)
+
+#define B5 152
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT0, GPIOT0, SIG_DESC_SET(SCUA0, 0));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1RCLKO, RMII1, RMII1_DESC,
+ SIG_DESC_SET(SCU48, 29));
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXCK, RGMII1);
+MS_PIN_DECL_(B5, SIG_EXPR_LIST_PTR(GPIOT0), SIG_EXPR_LIST_PTR(RMII1RCLKO),
+ SIG_EXPR_LIST_PTR(RGMII1TXCK));
+
+#define E9 153
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT1, GPIOT1, SIG_DESC_SET(SCUA0, 1));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1TXEN, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXCTL, RGMII1);
+MS_PIN_DECL_(E9, SIG_EXPR_LIST_PTR(GPIOT1), SIG_EXPR_LIST_PTR(RMII1TXEN),
+ SIG_EXPR_LIST_PTR(RGMII1TXCTL));
+
+#define F9 154
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT2, GPIOT2, SIG_DESC_SET(SCUA0, 2));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1TXD0, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXD0, RGMII1);
+MS_PIN_DECL_(F9, SIG_EXPR_LIST_PTR(GPIOT2), SIG_EXPR_LIST_PTR(RMII1TXD0),
+ SIG_EXPR_LIST_PTR(RGMII1TXD0));
+
+#define A5 155
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT3, GPIOT3, SIG_DESC_SET(SCUA0, 3));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1TXD1, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXD1, RGMII1);
+MS_PIN_DECL_(A5, SIG_EXPR_LIST_PTR(GPIOT3), SIG_EXPR_LIST_PTR(RMII1TXD1),
+ SIG_EXPR_LIST_PTR(RGMII1TXD1));
+
+#define E7 156
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT4, GPIOT4, SIG_DESC_SET(SCUA0, 4));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1DASH0, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXD2, RGMII1);
+MS_PIN_DECL_(E7, SIG_EXPR_LIST_PTR(GPIOT4), SIG_EXPR_LIST_PTR(RMII1DASH0),
+ SIG_EXPR_LIST_PTR(RGMII1TXD2));
+
+#define D7 157
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT5, GPIOT5, SIG_DESC_SET(SCUA0, 5));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1DASH1, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXD3, RGMII1);
+MS_PIN_DECL_(D7, SIG_EXPR_LIST_PTR(GPIOT5), SIG_EXPR_LIST_PTR(RMII1DASH1),
+ SIG_EXPR_LIST_PTR(RGMII1TXD3));
+
+#define B2 158
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT6, GPIOT6, SIG_DESC_SET(SCUA0, 6));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RCLKO, RMII2, RMII2_DESC,
+ SIG_DESC_SET(SCU48, 30));
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXCK, RGMII2);
+MS_PIN_DECL_(B2, SIG_EXPR_LIST_PTR(GPIOT6), SIG_EXPR_LIST_PTR(RMII2RCLKO),
+ SIG_EXPR_LIST_PTR(RGMII2TXCK));
+
+#define B1 159
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT7, GPIOT7, SIG_DESC_SET(SCUA0, 7));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2TXEN, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXCTL, RGMII2);
+MS_PIN_DECL_(B1, SIG_EXPR_LIST_PTR(GPIOT7), SIG_EXPR_LIST_PTR(RMII2TXEN),
+ SIG_EXPR_LIST_PTR(RGMII2TXCTL));
+
+#define A2 160
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU0, GPIOU0, SIG_DESC_SET(SCUA0, 8));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2TXD0, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD0, RGMII2);
+MS_PIN_DECL_(A2, SIG_EXPR_LIST_PTR(GPIOU0), SIG_EXPR_LIST_PTR(RMII2TXD0),
+ SIG_EXPR_LIST_PTR(RGMII2TXD0));
+
+#define B3 161
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU1, GPIOU1, SIG_DESC_SET(SCUA0, 9));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2TXD1, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD1, RGMII2);
+MS_PIN_DECL_(B3, SIG_EXPR_LIST_PTR(GPIOU1), SIG_EXPR_LIST_PTR(RMII2TXD1),
+ SIG_EXPR_LIST_PTR(RGMII2TXD1));
+
+#define D5 162
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU2, GPIOU2, SIG_DESC_SET(SCUA0, 10));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2DASH0, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD2, RGMII2);
+MS_PIN_DECL_(D5, SIG_EXPR_LIST_PTR(GPIOU2), SIG_EXPR_LIST_PTR(RMII2DASH0),
+ SIG_EXPR_LIST_PTR(RGMII2TXD2));
+
+#define D4 163
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU3, GPIOU3, SIG_DESC_SET(SCUA0, 11));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2DASH1, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD3, RGMII2);
+MS_PIN_DECL_(D4, SIG_EXPR_LIST_PTR(GPIOU3), SIG_EXPR_LIST_PTR(RMII2DASH1),
+ SIG_EXPR_LIST_PTR(RGMII2TXD3));
+
+#define B4 164
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU4, GPIOU4, SIG_DESC_SET(SCUA0, 12));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1RCLKI, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXCK, RGMII1);
+MS_PIN_DECL_(B4, SIG_EXPR_LIST_PTR(GPIOU4), SIG_EXPR_LIST_PTR(RMII1RCLKI),
+ SIG_EXPR_LIST_PTR(RGMII1RXCK));
+
+#define A4 165
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU5, GPIOU5, SIG_DESC_SET(SCUA0, 13));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1DASH2, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXCTL, RGMII1);
+MS_PIN_DECL_(A4, SIG_EXPR_LIST_PTR(GPIOU5), SIG_EXPR_LIST_PTR(RMII1DASH2),
+ SIG_EXPR_LIST_PTR(RGMII1RXCTL));
+
+#define A3 166
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU6, GPIOU6, SIG_DESC_SET(SCUA0, 14));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1RXD0, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXD0, RGMII1);
+MS_PIN_DECL_(A3, SIG_EXPR_LIST_PTR(GPIOU6), SIG_EXPR_LIST_PTR(RMII1RXD0),
+ SIG_EXPR_LIST_PTR(RGMII1RXD0));
+
+#define D6 167
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU7, GPIOU7, SIG_DESC_SET(SCUA0, 15));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1RXD1, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXD1, RGMII1);
+MS_PIN_DECL_(D6, SIG_EXPR_LIST_PTR(GPIOU7), SIG_EXPR_LIST_PTR(RMII1RXD1),
+ SIG_EXPR_LIST_PTR(RGMII1RXD1));
+
+#define C5 168
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV0, GPIOV0, SIG_DESC_SET(SCUA0, 16));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1CRSDV, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXD2, RGMII1);
+MS_PIN_DECL_(C5, SIG_EXPR_LIST_PTR(GPIOV0), SIG_EXPR_LIST_PTR(RMII1CRSDV),
+ SIG_EXPR_LIST_PTR(RGMII1RXD2));
+
+#define C4 169
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV1, GPIOV1, SIG_DESC_SET(SCUA0, 17));
+SIG_EXPR_LIST_DECL_SINGLE(RMII1RXER, RMII1, RMII1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXD3, RGMII1);
+MS_PIN_DECL_(C4, SIG_EXPR_LIST_PTR(GPIOV1), SIG_EXPR_LIST_PTR(RMII1RXER),
+ SIG_EXPR_LIST_PTR(RGMII1RXD3));
+
+FUNC_GROUP_DECL(RGMII1, B4, A4, A3, D6, C5, C4, B5, E9, F9, A5, E7, D7);
+FUNC_GROUP_DECL(RMII1, B4, A3, D6, C5, C4, B5, E9, F9, A5);
+
+#define C2 170
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV2, GPIOV2, SIG_DESC_SET(SCUA0, 18));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RCLKI, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXCK, RGMII2);
+MS_PIN_DECL_(C2, SIG_EXPR_LIST_PTR(GPIOV2), SIG_EXPR_LIST_PTR(RMII2RCLKI),
+ SIG_EXPR_LIST_PTR(RGMII2RXCK));
+
+#define C1 171
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV3, GPIOV3, SIG_DESC_SET(SCUA0, 19));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2DASH2, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXCTL, RGMII2);
+MS_PIN_DECL_(C1, SIG_EXPR_LIST_PTR(GPIOV3), SIG_EXPR_LIST_PTR(RMII2DASH2),
+ SIG_EXPR_LIST_PTR(RGMII2RXCTL));
+
+#define C3 172
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV4, GPIOV4, SIG_DESC_SET(SCUA0, 20));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RXD0, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD0, RGMII2);
+MS_PIN_DECL_(C3, SIG_EXPR_LIST_PTR(GPIOV4), SIG_EXPR_LIST_PTR(RMII2RXD0),
+ SIG_EXPR_LIST_PTR(RGMII2RXD0));
+
+#define D1 173
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV5, GPIOV5, SIG_DESC_SET(SCUA0, 21));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RXD1, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD1, RGMII2);
+MS_PIN_DECL_(D1, SIG_EXPR_LIST_PTR(GPIOV5), SIG_EXPR_LIST_PTR(RMII2RXD1),
+ SIG_EXPR_LIST_PTR(RGMII2RXD1));
+
+#define D2 174
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV6, GPIOV6, SIG_DESC_SET(SCUA0, 22));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2CRSDV, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD2, RGMII2);
+MS_PIN_DECL_(D2, SIG_EXPR_LIST_PTR(GPIOV6), SIG_EXPR_LIST_PTR(RMII2CRSDV),
+ SIG_EXPR_LIST_PTR(RGMII2RXD2));
+
+#define E6 175
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV7, GPIOV7, SIG_DESC_SET(SCUA0, 23));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RXER, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD3, RGMII2);
+MS_PIN_DECL_(E6, SIG_EXPR_LIST_PTR(GPIOV7), SIG_EXPR_LIST_PTR(RMII2RXER),
+ SIG_EXPR_LIST_PTR(RGMII2RXD3));
+
+FUNC_GROUP_DECL(RGMII2, B2, B1, A2, B3, D5, D4, C2, C1, C3, D1, D2, E6);
+FUNC_GROUP_DECL(RMII2, B2, B1, A2, B3, C2, C3, D1, D2, E6);
+
+/* Pins, groups and functions are sort(1):ed alphabetically for sanity */
+
+static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
+ ASPEED_PINCTRL_PIN(A10),
+ ASPEED_PINCTRL_PIN(A11),
+ ASPEED_PINCTRL_PIN(A12),
+ ASPEED_PINCTRL_PIN(A13),
+ ASPEED_PINCTRL_PIN(A14),
+ ASPEED_PINCTRL_PIN(A2),
+ ASPEED_PINCTRL_PIN(A3),
+ ASPEED_PINCTRL_PIN(A4),
+ ASPEED_PINCTRL_PIN(A5),
+ ASPEED_PINCTRL_PIN(A9),
+ ASPEED_PINCTRL_PIN(AA3),
+ ASPEED_PINCTRL_PIN(B1),
+ ASPEED_PINCTRL_PIN(B10),
+ ASPEED_PINCTRL_PIN(B11),
+ ASPEED_PINCTRL_PIN(B12),
+ ASPEED_PINCTRL_PIN(B13),
+ ASPEED_PINCTRL_PIN(B14),
+ ASPEED_PINCTRL_PIN(B2),
+ ASPEED_PINCTRL_PIN(B20),
+ ASPEED_PINCTRL_PIN(B3),
+ ASPEED_PINCTRL_PIN(B4),
+ ASPEED_PINCTRL_PIN(B5),
+ ASPEED_PINCTRL_PIN(B9),
+ ASPEED_PINCTRL_PIN(C1),
+ ASPEED_PINCTRL_PIN(C11),
+ ASPEED_PINCTRL_PIN(C12),
+ ASPEED_PINCTRL_PIN(C13),
+ ASPEED_PINCTRL_PIN(C14),
+ ASPEED_PINCTRL_PIN(C16),
+ ASPEED_PINCTRL_PIN(C18),
+ ASPEED_PINCTRL_PIN(C2),
+ ASPEED_PINCTRL_PIN(C20),
+ ASPEED_PINCTRL_PIN(C3),
+ ASPEED_PINCTRL_PIN(C4),
+ ASPEED_PINCTRL_PIN(C5),
+ ASPEED_PINCTRL_PIN(D1),
+ ASPEED_PINCTRL_PIN(D10),
+ ASPEED_PINCTRL_PIN(D2),
+ ASPEED_PINCTRL_PIN(D20),
+ ASPEED_PINCTRL_PIN(D21),
+ ASPEED_PINCTRL_PIN(D4),
+ ASPEED_PINCTRL_PIN(D5),
+ ASPEED_PINCTRL_PIN(D6),
+ ASPEED_PINCTRL_PIN(D7),
+ ASPEED_PINCTRL_PIN(D8),
+ ASPEED_PINCTRL_PIN(D9),
+ ASPEED_PINCTRL_PIN(E10),
+ ASPEED_PINCTRL_PIN(E12),
+ ASPEED_PINCTRL_PIN(E13),
+ ASPEED_PINCTRL_PIN(E15),
+ ASPEED_PINCTRL_PIN(E21),
+ ASPEED_PINCTRL_PIN(E6),
+ ASPEED_PINCTRL_PIN(E7),
+ ASPEED_PINCTRL_PIN(E9),
+ ASPEED_PINCTRL_PIN(F19),
+ ASPEED_PINCTRL_PIN(F9),
+ ASPEED_PINCTRL_PIN(H20),
+ ASPEED_PINCTRL_PIN(L1),
+ ASPEED_PINCTRL_PIN(L2),
+ ASPEED_PINCTRL_PIN(L3),
+ ASPEED_PINCTRL_PIN(L4),
+ ASPEED_PINCTRL_PIN(N1),
+ ASPEED_PINCTRL_PIN(N2),
+ ASPEED_PINCTRL_PIN(N20),
+ ASPEED_PINCTRL_PIN(N21),
+ ASPEED_PINCTRL_PIN(N22),
+ ASPEED_PINCTRL_PIN(N3),
+ ASPEED_PINCTRL_PIN(N4),
+ ASPEED_PINCTRL_PIN(P1),
+ ASPEED_PINCTRL_PIN(P2),
+ ASPEED_PINCTRL_PIN(R1),
+ ASPEED_PINCTRL_PIN(T4),
+ ASPEED_PINCTRL_PIN(U3),
+ ASPEED_PINCTRL_PIN(V2),
+ ASPEED_PINCTRL_PIN(V3),
+ ASPEED_PINCTRL_PIN(V6),
+ ASPEED_PINCTRL_PIN(W2),
+ ASPEED_PINCTRL_PIN(W3),
+ ASPEED_PINCTRL_PIN(Y3),
+};
+
+static const struct aspeed_pin_group aspeed_g5_groups[] = {
+ ASPEED_PINCTRL_GROUP(GPID0),
+ ASPEED_PINCTRL_GROUP(GPID2),
+ ASPEED_PINCTRL_GROUP(GPIE0),
+ ASPEED_PINCTRL_GROUP(I2C10),
+ ASPEED_PINCTRL_GROUP(I2C11),
+ ASPEED_PINCTRL_GROUP(I2C12),
+ ASPEED_PINCTRL_GROUP(I2C13),
+ ASPEED_PINCTRL_GROUP(I2C14),
+ ASPEED_PINCTRL_GROUP(I2C3),
+ ASPEED_PINCTRL_GROUP(I2C4),
+ ASPEED_PINCTRL_GROUP(I2C5),
+ ASPEED_PINCTRL_GROUP(I2C6),
+ ASPEED_PINCTRL_GROUP(I2C7),
+ ASPEED_PINCTRL_GROUP(I2C8),
+ ASPEED_PINCTRL_GROUP(I2C9),
+ ASPEED_PINCTRL_GROUP(MAC1LINK),
+ ASPEED_PINCTRL_GROUP(MDIO1),
+ ASPEED_PINCTRL_GROUP(MDIO2),
+ ASPEED_PINCTRL_GROUP(OSCCLK),
+ ASPEED_PINCTRL_GROUP(PEWAKE),
+ ASPEED_PINCTRL_GROUP(PWM0),
+ ASPEED_PINCTRL_GROUP(PWM1),
+ ASPEED_PINCTRL_GROUP(PWM2),
+ ASPEED_PINCTRL_GROUP(PWM3),
+ ASPEED_PINCTRL_GROUP(PWM4),
+ ASPEED_PINCTRL_GROUP(PWM5),
+ ASPEED_PINCTRL_GROUP(PWM6),
+ ASPEED_PINCTRL_GROUP(PWM7),
+ ASPEED_PINCTRL_GROUP(RGMII1),
+ ASPEED_PINCTRL_GROUP(RGMII2),
+ ASPEED_PINCTRL_GROUP(RMII1),
+ ASPEED_PINCTRL_GROUP(RMII2),
+ ASPEED_PINCTRL_GROUP(SD1),
+ ASPEED_PINCTRL_GROUP(SPI1),
+ ASPEED_PINCTRL_GROUP(TIMER4),
+ ASPEED_PINCTRL_GROUP(TIMER5),
+ ASPEED_PINCTRL_GROUP(TIMER6),
+ ASPEED_PINCTRL_GROUP(TIMER7),
+ ASPEED_PINCTRL_GROUP(TIMER8),
+};
+
+static const struct aspeed_pin_function aspeed_g5_functions[] = {
+ ASPEED_PINCTRL_FUNC(GPID0),
+ ASPEED_PINCTRL_FUNC(GPID2),
+ ASPEED_PINCTRL_FUNC(GPIE0),
+ ASPEED_PINCTRL_FUNC(I2C10),
+ ASPEED_PINCTRL_FUNC(I2C11),
+ ASPEED_PINCTRL_FUNC(I2C12),
+ ASPEED_PINCTRL_FUNC(I2C13),
+ ASPEED_PINCTRL_FUNC(I2C14),
+ ASPEED_PINCTRL_FUNC(I2C3),
+ ASPEED_PINCTRL_FUNC(I2C4),
+ ASPEED_PINCTRL_FUNC(I2C5),
+ ASPEED_PINCTRL_FUNC(I2C6),
+ ASPEED_PINCTRL_FUNC(I2C7),
+ ASPEED_PINCTRL_FUNC(I2C8),
+ ASPEED_PINCTRL_FUNC(I2C9),
+ ASPEED_PINCTRL_FUNC(MAC1LINK),
+ ASPEED_PINCTRL_FUNC(MDIO1),
+ ASPEED_PINCTRL_FUNC(MDIO2),
+ ASPEED_PINCTRL_FUNC(OSCCLK),
+ ASPEED_PINCTRL_FUNC(PEWAKE),
+ ASPEED_PINCTRL_FUNC(PWM0),
+ ASPEED_PINCTRL_FUNC(PWM1),
+ ASPEED_PINCTRL_FUNC(PWM2),
+ ASPEED_PINCTRL_FUNC(PWM3),
+ ASPEED_PINCTRL_FUNC(PWM4),
+ ASPEED_PINCTRL_FUNC(PWM5),
+ ASPEED_PINCTRL_FUNC(PWM6),
+ ASPEED_PINCTRL_FUNC(PWM7),
+ ASPEED_PINCTRL_FUNC(RGMII1),
+ ASPEED_PINCTRL_FUNC(RGMII2),
+ ASPEED_PINCTRL_FUNC(RMII1),
+ ASPEED_PINCTRL_FUNC(RMII2),
+ ASPEED_PINCTRL_FUNC(SD1),
+ ASPEED_PINCTRL_FUNC(SPI1),
+ ASPEED_PINCTRL_FUNC(TIMER4),
+ ASPEED_PINCTRL_FUNC(TIMER5),
+ ASPEED_PINCTRL_FUNC(TIMER6),
+ ASPEED_PINCTRL_FUNC(TIMER7),
+ ASPEED_PINCTRL_FUNC(TIMER8),
+};
+
+static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
+ .pins = aspeed_g5_pins,
+ .npins = ARRAY_SIZE(aspeed_g5_pins),
+ .groups = aspeed_g5_groups,
+ .ngroups = ARRAY_SIZE(aspeed_g5_groups),
+ .functions = aspeed_g5_functions,
+ .nfunctions = ARRAY_SIZE(aspeed_g5_functions),
+};
+
+static struct pinmux_ops aspeed_g5_pinmux_ops = {
+ .get_functions_count = aspeed_pinmux_get_fn_count,
+ .get_function_name = aspeed_pinmux_get_fn_name,
+ .get_function_groups = aspeed_pinmux_get_fn_groups,
+ .set_mux = aspeed_pinmux_set_mux,
+ .gpio_request_enable = aspeed_gpio_request_enable,
+ .strict = true,
+};
+
+static struct pinctrl_ops aspeed_g5_pinctrl_ops = {
+ .get_groups_count = aspeed_pinctrl_get_groups_count,
+ .get_group_name = aspeed_pinctrl_get_group_name,
+ .get_group_pins = aspeed_pinctrl_get_group_pins,
+ .pin_dbg_show = aspeed_pinctrl_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static struct pinctrl_desc aspeed_g5_pinctrl_desc = {
+ .name = "aspeed-g5-pinctrl",
+ .pins = aspeed_g5_pins,
+ .npins = ARRAY_SIZE(aspeed_g5_pins),
+ .pctlops = &aspeed_g5_pinctrl_ops,
+ .pmxops = &aspeed_g5_pinmux_ops,
+};
+
+static int aspeed_g5_pinctrl_probe(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_g5_pins); i++)
+ aspeed_g5_pins[i].number = i;
+
+ return aspeed_pinctrl_probe(pdev, &aspeed_g5_pinctrl_desc,
+ &aspeed_g5_pinctrl_data);
+}
+
+static const struct of_device_id aspeed_g5_pinctrl_of_match[] = {
+ { .compatible = "aspeed,ast2500-pinctrl", },
+ { .compatible = "aspeed,g5-pinctrl", },
+ { },
+};
+
+static struct platform_driver aspeed_g5_pinctrl_driver = {
+ .probe = aspeed_g5_pinctrl_probe,
+ .driver = {
+ .name = "aspeed-g5-pinctrl",
+ .of_match_table = aspeed_g5_pinctrl_of_match,
+ },
+};
+
+static int aspeed_g5_pinctrl_init(void)
+{
+ return platform_driver_register(&aspeed_g5_pinctrl_driver);
+}
+
+arch_initcall(aspeed_g5_pinctrl_init);
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
new file mode 100644
index 000000000000..0391f9f13f3e
--- /dev/null
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (C) 2016 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "../core.h"
+#include "pinctrl-aspeed.h"
+
+int aspeed_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->ngroups;
+}
+
+const char *aspeed_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->groups[group].name;
+}
+
+int aspeed_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group, const unsigned int **pins,
+ unsigned int *npins)
+{
+ struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pdata->groups[group].pins[0];
+ *npins = pdata->groups[group].npins;
+
+ return 0;
+}
+
+void aspeed_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int offset)
+{
+ seq_printf(s, " %s", dev_name(pctldev->dev));
+}
+
+int aspeed_pinmux_get_fn_count(struct pinctrl_dev *pctldev)
+{
+ struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->nfunctions;
+}
+
+const char *aspeed_pinmux_get_fn_name(struct pinctrl_dev *pctldev,
+ unsigned int function)
+{
+ struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ return pdata->functions[function].name;
+}
+
+int aspeed_pinmux_get_fn_groups(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pdata->functions[function].groups;
+ *num_groups = pdata->functions[function].ngroups;
+
+ return 0;
+}
+
+static inline void aspeed_sig_desc_print_val(
+ const struct aspeed_sig_desc *desc, bool enable, u32 rv)
+{
+ pr_debug("SCU%x[0x%08x]=0x%x, got 0x%x from 0x%08x\n", desc->reg,
+ desc->mask, enable ? desc->enable : desc->disable,
+ (rv & desc->mask) >> __ffs(desc->mask), rv);
+}
+
+/**
+ * Query the enabled or disabled state of a signal descriptor
+ *
+ * @desc: The signal descriptor of interest
+ * @enabled: True to query the enabled state, false to query disabled state
+ * @regmap: The SCU regmap instance
+ *
+ * @return True if the descriptor's bitfield is configured to the state
+ * selected by @enabled, false otherwise
+ *
+ * Evaluation of descriptor state is non-trivial in that it is not a binary
+ * outcome: The bitfields can be greater than one bit in size and thus can take
+ * a value that is neither the enabled nor disabled state recorded in the
+ * descriptor (typically this means a different function to the one of interest
+ * is enabled). Thus we must explicitly test for either condition as required.
+ */
+static bool aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
+ bool enabled, struct regmap *map)
+{
+ unsigned int raw;
+ u32 want;
+
+ if (regmap_read(map, desc->reg, &raw) < 0)
+ return false;
+
+ aspeed_sig_desc_print_val(desc, enabled, raw);
+ want = enabled ? desc->enable : desc->disable;
+
+ return ((raw & desc->mask) >> __ffs(desc->mask)) == want;
+}
+
+/**
+ * Query the enabled or disabled state for a mux function's signal on a pin
+ *
+ * @expr: An expression controlling the signal for a mux function on a pin
+ * @enabled: True to query the enabled state, false to query disabled state
+ * @regmap: The SCU regmap instance
+ *
+ * @return True if the expression composed by @enabled evaluates true, false
+ * otherwise
+ *
+ * A mux function is enabled or disabled if the function's signal expression
+ * for each pin in the function's pin group evaluates true for the desired
+ * state. An signal expression evaluates true if all of its associated signal
+ * descriptors evaluate true for the desired state.
+ *
+ * If an expression's state is described by more than one bit, either through
+ * multi-bit bitfields in a single signal descriptor or through multiple signal
+ * descriptors of a single bit then it is possible for the expression to be in
+ * neither the enabled nor disabled state. Thus we must explicitly test for
+ * either condition as required.
+ */
+static bool aspeed_sig_expr_eval(const struct aspeed_sig_expr *expr,
+ bool enabled, struct regmap *map)
+{
+ int i;
+
+ for (i = 0; i < expr->ndescs; i++) {
+ const struct aspeed_sig_desc *desc = &expr->descs[i];
+
+ if (!aspeed_sig_desc_eval(desc, enabled, map))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Configure a pin's signal by applying an expression's descriptor state for
+ * all descriptors in the expression.
+ *
+ * @expr: The expression associated with the function whose signal is to be
+ * configured
+ * @enable: true to enable an function's signal through a pin's signal
+ * expression, false to disable the function's signal
+ * @map: The SCU's regmap instance for pinmux register access.
+ *
+ * @return true if the expression is configured as requested, false otherwise
+ */
+static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
+ bool enable, struct regmap *map)
+{
+ int i;
+ bool ret;
+
+ ret = aspeed_sig_expr_eval(expr, enable, map);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < expr->ndescs; i++) {
+ const struct aspeed_sig_desc *desc = &expr->descs[i];
+ u32 pattern = enable ? desc->enable : desc->disable;
+
+ /*
+ * Strap registers are configured in hardware or by early-boot
+ * firmware. Treat them as read-only despite that we can write
+ * them. This may mean that certain functions cannot be
+ * deconfigured and is the reason we re-evaluate after writing
+ * all descriptor bits.
+ */
+ if (desc->reg == HW_STRAP1 || desc->reg == HW_STRAP2)
+ continue;
+
+ ret = regmap_update_bits(map, desc->reg, desc->mask,
+ pattern << __ffs(desc->mask)) == 0;
+
+ if (!ret)
+ return ret;
+ }
+
+ return aspeed_sig_expr_eval(expr, enable, map);
+}
+
+static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
+ struct regmap *map)
+{
+ return aspeed_sig_expr_set(expr, true, map);
+}
+
+static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
+ struct regmap *map)
+{
+ return aspeed_sig_expr_set(expr, false, map);
+}
+
+/**
+ * Disable a signal on a pin by disabling all provided signal expressions.
+ *
+ * @exprs: The list of signal expressions (from a priority level on a pin)
+ * @map: The SCU's regmap instance for pinmux register access.
+ *
+ * @return true if all expressions in the list are successfully disabled, false
+ * otherwise
+ */
+static bool aspeed_disable_sig(const struct aspeed_sig_expr **exprs,
+ struct regmap *map)
+{
+ bool disabled = true;
+
+ if (!exprs)
+ return true;
+
+ while (*exprs) {
+ bool ret;
+
+ ret = aspeed_sig_expr_disable(*exprs, map);
+ disabled = disabled && ret;
+
+ exprs++;
+ }
+
+ return disabled;
+}
+
+/**
+ * Search for the signal expression needed to enable the pin's signal for the
+ * requested function.
+ *
+ * @exprs: List of signal expressions (haystack)
+ * @name: The name of the requested function (needle)
+ *
+ * @return A pointer to the signal expression whose function tag matches the
+ * provided name, otherwise NULL.
+ *
+ */
+static const struct aspeed_sig_expr *aspeed_find_expr_by_name(
+ const struct aspeed_sig_expr **exprs, const char *name)
+{
+ while (*exprs) {
+ if (strcmp((*exprs)->function, name) == 0)
+ return *exprs;
+ exprs++;
+ }
+
+ return NULL;
+}
+
+static char *get_defined_attribute(const struct aspeed_pin_desc *pdesc,
+ const char *(*get)(
+ const struct aspeed_sig_expr *))
+{
+ char *found = NULL;
+ size_t len = 0;
+ const struct aspeed_sig_expr ***prios, **funcs, *expr;
+
+ prios = pdesc->prios;
+
+ while ((funcs = *prios)) {
+ while ((expr = *funcs)) {
+ const char *str = get(expr);
+ size_t delta = strlen(str) + 2;
+ char *expanded;
+
+ expanded = krealloc(found, len + delta + 1, GFP_KERNEL);
+ if (!expanded) {
+ kfree(found);
+ return expanded;
+ }
+
+ found = expanded;
+ found[len] = '\0';
+ len += delta;
+
+ strcat(found, str);
+ strcat(found, ", ");
+
+ funcs++;
+ }
+ prios++;
+ }
+
+ if (len < 2) {
+ kfree(found);
+ return NULL;
+ }
+
+ found[len - 2] = '\0';
+
+ return found;
+}
+
+static const char *aspeed_sig_expr_function(const struct aspeed_sig_expr *expr)
+{
+ return expr->function;
+}
+
+static char *get_defined_functions(const struct aspeed_pin_desc *pdesc)
+{
+ return get_defined_attribute(pdesc, aspeed_sig_expr_function);
+}
+
+static const char *aspeed_sig_expr_signal(const struct aspeed_sig_expr *expr)
+{
+ return expr->signal;
+}
+
+static char *get_defined_signals(const struct aspeed_pin_desc *pdesc)
+{
+ return get_defined_attribute(pdesc, aspeed_sig_expr_signal);
+}
+
+int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
+ unsigned int group)
+{
+ int i;
+ const struct aspeed_pinctrl_data *pdata =
+ pinctrl_dev_get_drvdata(pctldev);
+ const struct aspeed_pin_group *pgroup = &pdata->groups[group];
+ const struct aspeed_pin_function *pfunc =
+ &pdata->functions[function];
+
+ for (i = 0; i < pgroup->npins; i++) {
+ int pin = pgroup->pins[i];
+ const struct aspeed_pin_desc *pdesc = pdata->pins[pin].drv_data;
+ const struct aspeed_sig_expr *expr = NULL;
+ const struct aspeed_sig_expr **funcs;
+ const struct aspeed_sig_expr ***prios;
+
+ if (!pdesc)
+ return -EINVAL;
+
+ prios = pdesc->prios;
+
+ if (!prios)
+ continue;
+
+ /* Disable functions at a higher priority than that requested */
+ while ((funcs = *prios)) {
+ expr = aspeed_find_expr_by_name(funcs, pfunc->name);
+
+ if (expr)
+ break;
+
+ if (!aspeed_disable_sig(funcs, pdata->map))
+ return -EPERM;
+
+ prios++;
+ }
+
+ if (!expr) {
+ char *functions = get_defined_functions(pdesc);
+ char *signals = get_defined_signals(pdesc);
+
+ pr_warn("No function %s found on pin %s (%d). Found signal(s) %s for function(s) %s\n",
+ pfunc->name, pdesc->name, pin, signals,
+ functions);
+ kfree(signals);
+ kfree(functions);
+
+ return -ENXIO;
+ }
+
+ if (!aspeed_sig_expr_enable(expr, pdata->map))
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
+{
+ /*
+ * The signal type is GPIO if the signal name has "GPIO" as a prefix.
+ * strncmp (rather than strcmp) is used to implement the prefix
+ * requirement.
+ *
+ * expr->signal might look like "GPIOT3" in the GPIO case.
+ */
+ return strncmp(expr->signal, "GPIO", 4) == 0;
+}
+
+static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
+{
+ if (!exprs)
+ return false;
+
+ while (*exprs) {
+ if (aspeed_expr_is_gpio(*exprs))
+ return true;
+ exprs++;
+ }
+
+ return false;
+}
+
+int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ const struct aspeed_pinctrl_data *pdata =
+ pinctrl_dev_get_drvdata(pctldev);
+ const struct aspeed_pin_desc *pdesc = pdata->pins[offset].drv_data;
+ const struct aspeed_sig_expr ***prios, **funcs, *expr;
+
+ if (!pdesc)
+ return -EINVAL;
+
+ prios = pdesc->prios;
+
+ if (!prios)
+ return -ENXIO;
+
+ /* Disable any functions of higher priority than GPIO */
+ while ((funcs = *prios)) {
+ if (aspeed_gpio_in_exprs(funcs))
+ break;
+
+ if (!aspeed_disable_sig(funcs, pdata->map))
+ return -EPERM;
+
+ prios++;
+ }
+
+ if (!funcs) {
+ char *signals = get_defined_signals(pdesc);
+
+ pr_warn("No GPIO signal type found on pin %s (%d). Found: %s\n",
+ pdesc->name, offset, signals);
+ kfree(signals);
+
+ return -ENXIO;
+ }
+
+ expr = *funcs;
+
+ /*
+ * Disabling all higher-priority expressions is enough to enable the
+ * lowest-priority signal type. As such it has no associated
+ * expression.
+ */
+ if (!expr)
+ return 0;
+
+ /*
+ * If GPIO is not the lowest priority signal type, assume there is only
+ * one expression defined to enable the GPIO function
+ */
+ if (!aspeed_sig_expr_enable(expr, pdata->map))
+ return -EPERM;
+
+ return 0;
+}
+
+int aspeed_pinctrl_probe(struct platform_device *pdev,
+ struct pinctrl_desc *pdesc,
+ struct aspeed_pinctrl_data *pdata)
+{
+ struct device *parent;
+ struct pinctrl_dev *pctl;
+
+ parent = pdev->dev.parent;
+ if (!parent) {
+ dev_err(&pdev->dev, "No parent for syscon pincontroller\n");
+ return -ENODEV;
+ }
+
+ pdata->map = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR(pdata->map)) {
+ dev_err(&pdev->dev, "No regmap for syscon pincontroller parent\n");
+ return PTR_ERR(pdata->map);
+ }
+
+ pctl = pinctrl_register(pdesc, &pdev->dev, pdata);
+
+ if (IS_ERR(pctl)) {
+ dev_err(&pdev->dev, "Failed to register pinctrl\n");
+ return PTR_ERR(pctl);
+ }
+
+ platform_set_drvdata(pdev, pdata);
+
+ return 0;
+}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
new file mode 100644
index 000000000000..3e72ef8c54bf
--- /dev/null
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
@@ -0,0 +1,569 @@
+/*
+ * Copyright (C) 2016 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef PINCTRL_ASPEED
+#define PINCTRL_ASPEED
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/regmap.h>
+
+/*
+ * The ASPEED SoCs provide typically more than 200 pins for GPIO and other
+ * functions. The SoC function enabled on a pin is determined on a priority
+ * basis where a given pin can provide a number of different signal types.
+ *
+ * The signal active on a pin is described by both a priority level and
+ * compound logical expressions involving multiple operators, registers and
+ * bits. Some difficulty arises as the pin's function bit masks for each
+ * priority level are frequently not the same (i.e. cannot just flip a bit to
+ * change from a high to low priority signal), or even in the same register.
+ * Further, not all signals can be unmuxed, as some expressions depend on
+ * values in the hardware strapping register (which is treated as read-only).
+ *
+ * SoC Multi-function Pin Expression Examples
+ * ------------------------------------------
+ *
+ * Here are some sample mux configurations from the AST2400 and AST2500
+ * datasheets to illustrate the corner cases, roughly in order of least to most
+ * corner. The signal priorities are in decending order from P0 (highest).
+ *
+ * D6 is a pin with a single function (beside GPIO); a high priority signal
+ * that participates in one function:
+ *
+ * Ball | Default | P0 Signal | P0 Expression | P1 Signal | P1 Expression | Other
+ * -----+---------+-----------+-----------------------------+-----------+---------------+----------
+ * D6 GPIOA0 MAC1LINK SCU80[0]=1 GPIOA0
+ * -----+---------+-----------+-----------------------------+-----------+---------------+----------
+ *
+ * C5 is a multi-signal pin (high and low priority signals). Here we touch
+ * different registers for the different functions that enable each signal:
+ *
+ * -----+---------+-----------+-----------------------------+-----------+---------------+----------
+ * C5 GPIOA4 SCL9 SCU90[22]=1 TIMER5 SCU80[4]=1 GPIOA4
+ * -----+---------+-----------+-----------------------------+-----------+---------------+----------
+ *
+ * E19 is a single-signal pin with two functions that influence the active
+ * signal. In this case both bits have the same meaning - enable a dedicated
+ * LPC reset pin. However it's not always the case that the bits in the
+ * OR-relationship have the same meaning.
+ *
+ * -----+---------+-----------+-----------------------------+-----------+---------------+----------
+ * E19 GPIOB4 LPCRST# SCU80[12]=1 | Strap[14]=1 GPIOB4
+ * -----+---------+-----------+-----------------------------+-----------+---------------+----------
+ *
+ * For example, pin B19 has a low-priority signal that's enabled by two
+ * distinct SoC functions: A specific SIOPBI bit in register SCUA4, and an ACPI
+ * bit in the STRAP register. The ACPI bit configures signals on pins in
+ * addition to B19. Both of the low priority functions as well as the high
+ * priority function must be disabled for GPIOF1 to be used.
+ *
+ * Ball | Default | P0 Signal | P0 Expression | P1 Signal | P1 Expression | Other
+ * -----+---------+-----------+-----------------------------------------+-----------+----------------------------------------+----------
+ * B19 GPIOF1 NDCD4 SCU80[25]=1 SIOPBI# SCUA4[12]=1 | Strap[19]=0 GPIOF1
+ * -----+---------+-----------+-----------------------------------------+-----------+----------------------------------------+----------
+ *
+ * For pin E18, the SoC ANDs the expected state of three bits to determine the
+ * pin's active signal:
+ *
+ * * SCU3C[3]: Enable external SOC reset function
+ * * SCU80[15]: Enable SPICS1# or EXTRST# function pin
+ * * SCU90[31]: Select SPI interface CS# output
+ *
+ * -----+---------+-----------+-----------------------------------------+-----------+----------------------------------------+----------
+ * E18 GPIOB7 EXTRST# SCU3C[3]=1 & SCU80[15]=1 & SCU90[31]=0 SPICS1# SCU3C[3]=1 & SCU80[15]=1 & SCU90[31]=1 GPIOB7
+ * -----+---------+-----------+-----------------------------------------+-----------+----------------------------------------+----------
+ *
+ * (Bits SCU3C[3] and SCU80[15] appear to only be used in the expressions for
+ * selecting the signals on pin E18)
+ *
+ * Pin T5 is a multi-signal pin with a more complex configuration:
+ *
+ * Ball | Default | P0 Signal | P0 Expression | P1 Signal | P1 Expression | Other
+ * -----+---------+-----------+------------------------------+-----------+---------------+----------
+ * T5 GPIOL1 VPIDE SCU90[5:4]!=0 & SCU84[17]=1 NDCD1 SCU84[17]=1 GPIOL1
+ * -----+---------+-----------+------------------------------+-----------+---------------+----------
+ *
+ * The high priority signal configuration is best thought of in terms of its
+ * exploded form, with reference to the SCU90[5:4] bits:
+ *
+ * * SCU90[5:4]=00: disable
+ * * SCU90[5:4]=01: 18 bits (R6/G6/B6) video mode.
+ * * SCU90[5:4]=10: 24 bits (R8/G8/B8) video mode.
+ * * SCU90[5:4]=11: 30 bits (R10/G10/B10) video mode.
+ *
+ * Re-writing:
+ *
+ * -----+---------+-----------+------------------------------+-----------+---------------+----------
+ * T5 GPIOL1 VPIDE (SCU90[5:4]=1 & SCU84[17]=1) NDCD1 SCU84[17]=1 GPIOL1
+ * | (SCU90[5:4]=2 & SCU84[17]=1)
+ * | (SCU90[5:4]=3 & SCU84[17]=1)
+ * -----+---------+-----------+------------------------------+-----------+---------------+----------
+ *
+ * For reference the SCU84[17] bit configure the "UART1 NDCD1 or Video VPIDE
+ * function pin", where the signal itself is determined by whether SCU94[5:4]
+ * is disabled or in one of the 18, 24 or 30bit video modes.
+ *
+ * Other video-input-related pins require an explicit state in SCU90[5:4], e.g.
+ * W1 and U5:
+ *
+ * -----+---------+-----------+------------------------------+-----------+---------------+----------
+ * W1 GPIOL6 VPIB0 SCU90[5:4]=3 & SCU84[22]=1 TXD1 SCU84[22]=1 GPIOL6
+ * U5 GPIOL7 VPIB1 SCU90[5:4]=3 & SCU84[23]=1 RXD1 SCU84[23]=1 GPIOL7
+ * -----+---------+-----------+------------------------------+-----------+---------------+----------
+ *
+ * The examples of T5 and W1 are particularly fertile, as they also demonstrate
+ * that despite operating as part of the video input bus each signal needs to
+ * be enabled individually via it's own SCU84 (in the cases of T5 and W1)
+ * register bit. This is a little crazy if the bus doesn't have optional
+ * signals, but is used to decent effect with some of the UARTs where not all
+ * signals are required. However, this isn't done consistently - UART1 is
+ * enabled on a per-pin basis, and by contrast, all signals for UART6 are
+ * enabled by a single bit.
+ *
+ * Further, the high and low priority signals listed in the table above share
+ * a configuration bit. The VPI signals should operate in concert in a single
+ * function, but the UART signals should retain the ability to be configured
+ * independently. This pushes the implementation down the path of tagging a
+ * signal's expressions with the function they participate in, rather than
+ * defining masks affecting multiple signals per function. The latter approach
+ * fails in this instance where applying the configuration for the UART pin of
+ * interest will stomp on the state of other UART signals when disabling the
+ * VPI functions on the current pin.
+ *
+ * Ball | Default | P0 Signal | P0 Expression | P1 Signal | P1 Expression | Other
+ * -----+------------+-----------+---------------------------+-----------+---------------+------------
+ * A12 RGMII1TXCK GPIOT0 SCUA0[0]=1 RMII1TXEN Strap[6]=0 RGMII1TXCK
+ * B12 RGMII1TXCTL GPIOT1 SCUA0[1]=1 – Strap[6]=0 RGMII1TXCTL
+ * -----+------------+-----------+---------------------------+-----------+---------------+------------
+ *
+ * A12 demonstrates that the "Other" signal isn't always GPIO - in this case
+ * GPIOT0 is a high-priority signal and RGMII1TXCK is Other. Thus, GPIO
+ * should be treated like any other signal type with full function expression
+ * requirements, and not assumed to be the default case. Separately, GPIOT0 and
+ * GPIOT1's signal descriptor bits are distinct, therefore we must iterate all
+ * pins in the function's group to disable the higher-priority signals such
+ * that the signal for the function of interest is correctly enabled.
+ *
+ * Finally, three priority levels aren't always enough; the AST2500 brings with
+ * it 18 pins of five priority levels, however the 18 pins only use three of
+ * the five priority levels.
+ *
+ * Ultimately the requirement to control pins in the examples above drive the
+ * design:
+ *
+ * * Pins provide signals according to functions activated in the mux
+ * configuration
+ *
+ * * Pins provide up to five signal types in a priority order
+ *
+ * * For priorities levels defined on a pin, each priority provides one signal
+ *
+ * * Enabling lower priority signals requires higher priority signals be
+ * disabled
+ *
+ * * A function represents a set of signals; functions are distinct if their
+ * sets of signals are not equal
+ *
+ * * Signals participate in one or more functions
+ *
+ * * A function is described by an expression of one or more signal
+ * descriptors, which compare bit values in a register
+ *
+ * * A signal expression is the smallest set of signal descriptors whose
+ * comparisons must evaluate 'true' for a signal to be enabled on a pin.
+ *
+ * * A function's signal is active on a pin if evaluating all signal
+ * descriptors in the pin's signal expression for the function yields a 'true'
+ * result
+ *
+ * * A signal at a given priority on a given pin is active if any of the
+ * functions in which the signal participates are active, and no higher
+ * priority signal on the pin is active
+ *
+ * * GPIO is configured per-pin
+ *
+ * And so:
+ *
+ * * To disable a signal, any function(s) activating the signal must be
+ * disabled
+ *
+ * * Each pin must know the signal expressions of functions in which it
+ * participates, for the purpose of enabling the Other function. This is done
+ * by deactivating all functions that activate higher priority signals on the
+ * pin.
+ *
+ * As a concrete example:
+ *
+ * * T5 provides three signals types: VPIDE, NDCD1 and GPIO
+ *
+ * * The VPIDE signal participates in 3 functions: VPI18, VPI24 and VPI30
+ *
+ * * The NDCD1 signal participates in just its own NDCD1 function
+ *
+ * * VPIDE is high priority, NDCD1 is low priority, and GPIOL1 is the least
+ * prioritised
+ *
+ * * The prerequisit for activating the NDCD1 signal is that the VPI18, VPI24
+ * and VPI30 functions all be disabled
+ *
+ * * Similarly, all of VPI18, VPI24, VPI30 and NDCD1 functions must be disabled
+ * to provide GPIOL6
+ *
+ * Considerations
+ * --------------
+ *
+ * If pinctrl allows us to allocate a pin we can configure a function without
+ * concern for the function of already allocated pins, if pin groups are
+ * created with respect to the SoC functions in which they participate. This is
+ * intuitive, but it did not feel obvious from the bit/pin relationships.
+ *
+ * Conversely, failing to allocate all pins in a group indicates some bits (as
+ * well as pins) required for the group's configuration will already be in use,
+ * likely in a way that's inconsistent with the requirements of the failed
+ * group.
+ */
+
+/*
+ * The "Multi-function Pins Mapping and Control" table in the SoC datasheet
+ * references registers by the device/offset mnemonic. The register macros
+ * below are named the same way to ease transcription and verification (as
+ * opposed to naming them e.g. PINMUX_CTRL_[0-9]). Further, signal expressions
+ * reference registers beyond those dedicated to pinmux, such as the system
+ * reset control and MAC clock configuration registers. The AST2500 goes a step
+ * further and references registers in the graphics IP block, but that isn't
+ * handled yet.
+ */
+#define SCU2C 0x2C /* Misc. Control Register */
+#define SCU3C 0x3C /* System Reset Control/Status Register */
+#define SCU48 0x48 /* MAC Interface Clock Delay Setting */
+#define HW_STRAP1 0x70 /* AST2400 strapping is 33 bits, is split */
+#define SCU80 0x80 /* Multi-function Pin Control #1 */
+#define SCU84 0x84 /* Multi-function Pin Control #2 */
+#define SCU88 0x88 /* Multi-function Pin Control #3 */
+#define SCU8C 0x8C /* Multi-function Pin Control #4 */
+#define SCU90 0x90 /* Multi-function Pin Control #5 */
+#define SCU94 0x94 /* Multi-function Pin Control #6 */
+#define SCUA0 0xA0 /* Multi-function Pin Control #7 */
+#define SCUA4 0xA4 /* Multi-function Pin Control #8 */
+#define SCUA8 0xA8 /* Multi-function Pin Control #9 */
+#define HW_STRAP2 0xD0 /* Strapping */
+
+ /**
+ * A signal descriptor, which describes the register, bits and the
+ * enable/disable values that should be compared or written.
+ *
+ * @reg: The register offset from base in bytes
+ * @mask: The mask to apply to the register. The lowest set bit of the mask is
+ * used to derive the shift value.
+ * @enable: The value that enables the function. Value should be in the LSBs,
+ * not at the position of the mask.
+ * @disable: The value that disables the function. Value should be in the
+ * LSBs, not at the position of the mask.
+ */
+struct aspeed_sig_desc {
+ unsigned int reg;
+ u32 mask;
+ u32 enable;
+ u32 disable;
+};
+
+/**
+ * Describes a signal expression. The expression is evaluated by ANDing the
+ * evaluation of the descriptors.
+ *
+ * @signal: The signal name for the priority level on the pin. If the signal
+ * type is GPIO, then the signal name must begin with the string
+ * "GPIO", e.g. GPIOA0, GPIOT4 etc.
+ * @function: The name of the function the signal participates in for the
+ * associated expression
+ * @ndescs: The number of signal descriptors in the expression
+ * @descs: Pointer to an array of signal descriptors that comprise the
+ * function expression
+ */
+struct aspeed_sig_expr {
+ const char *signal;
+ const char *function;
+ int ndescs;
+ const struct aspeed_sig_desc *descs;
+};
+
+/**
+ * A struct capturing the list of expressions enabling signals at each priority
+ * for a given pin. The signal configuration for a priority level is evaluated
+ * by ORing the evaluation of the signal expressions in the respective
+ * priority's list.
+ *
+ * @name: A name for the pin
+ * @prios: A pointer to an array of expression list pointers
+ *
+ */
+struct aspeed_pin_desc {
+ const char *name;
+ const struct aspeed_sig_expr ***prios;
+};
+
+/* Macro hell */
+
+/**
+ * Short-hand macro for describing a configuration enabled by the state of one
+ * bit. The disable value is derived.
+ *
+ * @reg: The signal's associated register, offset from base
+ * @idx: The signal's bit index in the register
+ * @val: The value (0 or 1) that enables the function
+ */
+#define SIG_DESC_BIT(reg, idx, val) \
+ { reg, BIT_MASK(idx), val, (((val) + 1) & 1) }
+
+/**
+ * A further short-hand macro describing a configuration enabled with a set bit.
+ *
+ * @reg: The configuration's associated register, offset from base
+ * @idx: The configuration's bit index in the register
+ */
+#define SIG_DESC_SET(reg, idx) SIG_DESC_BIT(reg, idx, 1)
+
+#define SIG_DESC_LIST_SYM(sig, func) sig_descs_ ## sig ## _ ## func
+#define SIG_DESC_LIST_DECL(sig, func, ...) \
+ static const struct aspeed_sig_desc SIG_DESC_LIST_SYM(sig, func)[] = \
+ { __VA_ARGS__ }
+
+#define SIG_EXPR_SYM(sig, func) sig_expr_ ## sig ## _ ## func
+#define SIG_EXPR_DECL_(sig, func) \
+ static const struct aspeed_sig_expr SIG_EXPR_SYM(sig, func) = \
+ { \
+ .signal = #sig, \
+ .function = #func, \
+ .ndescs = ARRAY_SIZE(SIG_DESC_LIST_SYM(sig, func)), \
+ .descs = &(SIG_DESC_LIST_SYM(sig, func))[0], \
+ }
+
+/**
+ * Declare a signal expression.
+ *
+ * @sig: A macro symbol name for the signal (is subjected to stringification
+ * and token pasting)
+ * @func: The function in which the signal is participating
+ * @...: Signal descriptors that define the signal expression
+ *
+ * For example, the following declares the ROMD8 signal for the ROM16 function:
+ *
+ * SIG_EXPR_DECL(ROMD8, ROM16, SIG_DESC_SET(SCU90, 6));
+ *
+ * And with multiple signal descriptors:
+ *
+ * SIG_EXPR_DECL(ROMD8, ROM16S, SIG_DESC_SET(HW_STRAP1, 4),
+ * { HW_STRAP1, GENMASK(1, 0), 0, 0 });
+ */
+#define SIG_EXPR_DECL(sig, func, ...) \
+ SIG_DESC_LIST_DECL(sig, func, __VA_ARGS__); \
+ SIG_EXPR_DECL_(sig, func)
+
+/**
+ * Declare a pointer to a signal expression
+ *
+ * @sig: The macro symbol name for the signal (subjected to token pasting)
+ * @func: The macro symbol name for the function (subjected to token pasting)
+ */
+#define SIG_EXPR_PTR(sig, func) (&SIG_EXPR_SYM(sig, func))
+
+#define SIG_EXPR_LIST_SYM(sig) sig_exprs_ ## sig
+
+/**
+ * Declare a signal expression list for reference in a struct aspeed_pin_prio.
+ *
+ * @sig: A macro symbol name for the signal (is subjected to token pasting)
+ * @...: Signal expression structure pointers (use SIG_EXPR_PTR())
+ *
+ * For example, the 16-bit ROM bus can be enabled by one of two possible signal
+ * expressions:
+ *
+ * SIG_EXPR_DECL(ROMD8, ROM16, SIG_DESC_SET(SCU90, 6));
+ * SIG_EXPR_DECL(ROMD8, ROM16S, SIG_DESC_SET(HW_STRAP1, 4),
+ * { HW_STRAP1, GENMASK(1, 0), 0, 0 });
+ * SIG_EXPR_LIST_DECL(ROMD8, SIG_EXPR_PTR(ROMD8, ROM16),
+ * SIG_EXPR_PTR(ROMD8, ROM16S));
+ */
+#define SIG_EXPR_LIST_DECL(sig, ...) \
+ static const struct aspeed_sig_expr *SIG_EXPR_LIST_SYM(sig)[] = \
+ { __VA_ARGS__, NULL }
+
+/**
+ * A short-hand macro for declaring a function expression and an expression
+ * list with a single function.
+ *
+ * @func: A macro symbol name for the function (is subjected to token pasting)
+ * @...: Function descriptors that define the function expression
+ *
+ * For example, signal NCTS6 participates in its own function with one group:
+ *
+ * SIG_EXPR_LIST_DECL_SINGLE(NCTS6, NCTS6, SIG_DESC_SET(SCU90, 7));
+ */
+#define SIG_EXPR_LIST_DECL_SINGLE(sig, func, ...) \
+ SIG_DESC_LIST_DECL(sig, func, __VA_ARGS__); \
+ SIG_EXPR_DECL_(sig, func); \
+ SIG_EXPR_LIST_DECL(sig, SIG_EXPR_PTR(sig, func))
+
+#define SIG_EXPR_LIST_DECL_DUAL(sig, f0, f1) \
+ SIG_EXPR_LIST_DECL(sig, SIG_EXPR_PTR(sig, f0), SIG_EXPR_PTR(sig, f1))
+
+#define SIG_EXPR_LIST_PTR(sig) (&SIG_EXPR_LIST_SYM(sig)[0])
+
+#define PIN_EXPRS_SYM(pin) pin_exprs_ ## pin
+#define PIN_EXPRS_PTR(pin) (&PIN_EXPRS_SYM(pin)[0])
+#define PIN_SYM(pin) pin_ ## pin
+
+#define MS_PIN_DECL_(pin, ...) \
+ static const struct aspeed_sig_expr **PIN_EXPRS_SYM(pin)[] = \
+ { __VA_ARGS__, NULL }; \
+ static const struct aspeed_pin_desc PIN_SYM(pin) = \
+ { #pin, PIN_EXPRS_PTR(pin) }
+
+/**
+ * Declare a multi-signal pin
+ *
+ * @pin: The pin number
+ * @other: Macro name for "other" functionality (subjected to stringification)
+ * @high: Macro name for the highest priority signal functions
+ * @low: Macro name for the low signal functions
+ *
+ * For example:
+ *
+ * #define A8 56
+ * SIG_EXPR_DECL(ROMD8, ROM16, SIG_DESC_SET(SCU90, 6));
+ * SIG_EXPR_DECL(ROMD8, ROM16S, SIG_DESC_SET(HW_STRAP1, 4),
+ * { HW_STRAP1, GENMASK(1, 0), 0, 0 });
+ * SIG_EXPR_LIST_DECL(ROMD8, SIG_EXPR_PTR(ROMD8, ROM16),
+ * SIG_EXPR_PTR(ROMD8, ROM16S));
+ * SIG_EXPR_LIST_DECL_SINGLE(NCTS6, NCTS6, SIG_DESC_SET(SCU90, 7));
+ * MS_PIN_DECL(A8, GPIOH0, ROMD8, NCTS6);
+ */
+#define MS_PIN_DECL(pin, other, high, low) \
+ SIG_EXPR_LIST_DECL_SINGLE(other, other); \
+ MS_PIN_DECL_(pin, \
+ SIG_EXPR_LIST_PTR(high), \
+ SIG_EXPR_LIST_PTR(low), \
+ SIG_EXPR_LIST_PTR(other))
+
+#define PIN_GROUP_SYM(func) pins_ ## func
+#define FUNC_GROUP_SYM(func) groups_ ## func
+#define FUNC_GROUP_DECL(func, ...) \
+ static const int PIN_GROUP_SYM(func)[] = { __VA_ARGS__ }; \
+ static const char *FUNC_GROUP_SYM(func)[] = { #func }
+
+/**
+ * Declare a single signal pin
+ *
+ * @pin: The pin number
+ * @other: Macro name for "other" functionality (subjected to stringification)
+ * @sig: Macro name for the signal (subjected to stringification)
+ *
+ * For example:
+ *
+ * #define E3 80
+ * SIG_EXPR_LIST_DECL_SINGLE(SCL5, I2C5, I2C5_DESC);
+ * SS_PIN_DECL(E3, GPIOK0, SCL5);
+ */
+#define SS_PIN_DECL(pin, other, sig) \
+ SIG_EXPR_LIST_DECL_SINGLE(other, other); \
+ MS_PIN_DECL_(pin, SIG_EXPR_LIST_PTR(sig), SIG_EXPR_LIST_PTR(other))
+
+/**
+ * Single signal, single function pin declaration
+ *
+ * @pin: The pin number
+ * @other: Macro name for "other" functionality (subjected to stringification)
+ * @sig: Macro name for the signal (subjected to stringification)
+ * @...: Signal descriptors that define the function expression
+ *
+ * For example:
+ *
+ * SSSF_PIN_DECL(A4, GPIOA2, TIMER3, SIG_DESC_SET(SCU80, 2));
+ */
+#define SSSF_PIN_DECL(pin, other, sig, ...) \
+ SIG_EXPR_LIST_DECL_SINGLE(sig, sig, __VA_ARGS__); \
+ SIG_EXPR_LIST_DECL_SINGLE(other, other); \
+ MS_PIN_DECL_(pin, SIG_EXPR_LIST_PTR(sig), SIG_EXPR_LIST_PTR(other)); \
+ FUNC_GROUP_DECL(sig, pin)
+
+#define GPIO_PIN_DECL(pin, gpio) \
+ SIG_EXPR_LIST_DECL_SINGLE(gpio, gpio); \
+ MS_PIN_DECL_(pin, SIG_EXPR_LIST_PTR(gpio))
+
+struct aspeed_pinctrl_data {
+ struct regmap *map;
+
+ const struct pinctrl_pin_desc *pins;
+ const unsigned int npins;
+
+ const struct aspeed_pin_group *groups;
+ const unsigned int ngroups;
+
+ const struct aspeed_pin_function *functions;
+ const unsigned int nfunctions;
+};
+
+#define ASPEED_PINCTRL_PIN(name_) \
+ [name_] = { \
+ .number = name_, \
+ .name = #name_, \
+ .drv_data = (void *) &(PIN_SYM(name_)) \
+ }
+
+struct aspeed_pin_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned int npins;
+};
+
+#define ASPEED_PINCTRL_GROUP(name_) { \
+ .name = #name_, \
+ .pins = &(PIN_GROUP_SYM(name_))[0], \
+ .npins = ARRAY_SIZE(PIN_GROUP_SYM(name_)), \
+}
+
+struct aspeed_pin_function {
+ const char *name;
+ const char *const *groups;
+ unsigned int ngroups;
+};
+
+#define ASPEED_PINCTRL_FUNC(name_, ...) { \
+ .name = #name_, \
+ .groups = &FUNC_GROUP_SYM(name_)[0], \
+ .ngroups = ARRAY_SIZE(FUNC_GROUP_SYM(name_)), \
+}
+
+int aspeed_pinctrl_get_groups_count(struct pinctrl_dev *pctldev);
+const char *aspeed_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group);
+int aspeed_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group, const unsigned int **pins,
+ unsigned int *npins);
+void aspeed_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int offset);
+int aspeed_pinmux_get_fn_count(struct pinctrl_dev *pctldev);
+const char *aspeed_pinmux_get_fn_name(struct pinctrl_dev *pctldev,
+ unsigned int function);
+int aspeed_pinmux_get_fn_groups(struct pinctrl_dev *pctldev,
+ unsigned int function, const char * const **groups,
+ unsigned int * const num_groups);
+int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
+ unsigned int group);
+int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset);
+int aspeed_pinctrl_probe(struct platform_device *pdev,
+ struct pinctrl_desc *pdesc,
+ struct aspeed_pinctrl_data *pdata);
+
+#endif /* PINCTRL_ASPEED */
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index 582f6df446e8..a5331fdfc795 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1018,7 +1018,7 @@ static void bcm281xx_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, " %s", dev_name(pctldev->dev));
}
-static struct pinctrl_ops bcm281xx_pinctrl_ops = {
+static const struct pinctrl_ops bcm281xx_pinctrl_ops = {
.get_groups_count = bcm281xx_pinctrl_get_groups_count,
.get_group_name = bcm281xx_pinctrl_get_group_name,
.get_group_pins = bcm281xx_pinctrl_get_group_pins,
@@ -1080,7 +1080,7 @@ static int bcm281xx_pinmux_set(struct pinctrl_dev *pctldev,
return rc;
}
-static struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = {
+static const struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = {
.get_functions_count = bcm281xx_pinctrl_get_fcns_count,
.get_function_name = bcm281xx_pinctrl_get_fcn_name,
.get_function_groups = bcm281xx_pinctrl_get_fcn_groups,
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index ca817896ed24..13a4c2774157 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -531,7 +531,7 @@ static void ns2_pin_dbg_show(struct pinctrl_dev *pctrl_dev,
seq_printf(s, " %s", dev_name(pctrl_dev->dev));
}
-static struct pinctrl_ops ns2_pinctrl_ops = {
+static const struct pinctrl_ops ns2_pinctrl_ops = {
.get_groups_count = ns2_get_groups_count,
.get_group_name = ns2_get_group_name,
.get_group_pins = ns2_get_group_pins,
@@ -959,7 +959,7 @@ static int ns2_pin_config_set(struct pinctrl_dev *pctrldev, unsigned int pin,
out:
return ret;
}
-static struct pinmux_ops ns2_pinmux_ops = {
+static const struct pinmux_ops ns2_pinmux_ops = {
.get_functions_count = ns2_get_functions_count,
.get_function_name = ns2_get_function_name,
.get_function_groups = ns2_get_function_groups,
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 4149db309c8b..35c17653c694 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -348,7 +348,7 @@ static void nsp_pin_dbg_show(struct pinctrl_dev *pctrl_dev,
seq_printf(s, " %s", dev_name(pctrl_dev->dev));
}
-static struct pinctrl_ops nsp_pinctrl_ops = {
+static const struct pinctrl_ops nsp_pinctrl_ops = {
.get_groups_count = nsp_get_groups_count,
.get_group_name = nsp_get_group_name,
.get_group_pins = nsp_get_group_pins,
@@ -518,7 +518,7 @@ static void nsp_gpio_disable_free(struct pinctrl_dev *pctrl_dev,
spin_unlock_irqrestore(&pinctrl->lock, flags);
}
-static struct pinmux_ops nsp_pinmux_ops = {
+static const struct pinmux_ops nsp_pinmux_ops = {
.get_functions_count = nsp_get_functions_count,
.get_function_name = nsp_get_function_name,
.get_function_groups = nsp_get_function_groups,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 71391757938b..47613201269a 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -315,7 +315,7 @@ static int imx_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
/* Currently implementation only for shared mux/conf register */
if (!(info->flags & SHARE_MUX_CONF_REG))
- return -EINVAL;
+ return 0;
pin_reg = &info->pin_regs[offset];
if (pin_reg->mux_reg == -1)
@@ -380,7 +380,7 @@ static int imx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
* They are part of the shared mux/conf register.
*/
if (!(info->flags & SHARE_MUX_CONF_REG))
- return -EINVAL;
+ return 0;
pin_reg = &info->pin_regs[offset];
if (pin_reg->mux_reg == -1)
@@ -501,7 +501,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
ret = imx_pinconf_get(pctldev, pin->pin, &config);
if (ret)
return;
- seq_printf(s, "%s: 0x%lx", name, config);
+ seq_printf(s, " %s: 0x%lx\n", name, config);
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 0fe8fad25e4d..30389f4ccab4 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -134,6 +134,7 @@ struct chv_gpio_pinrange {
* @gpio_ranges: An array of GPIO ranges in this community
* @ngpio_ranges: Number of GPIO ranges
* @ngpios: Total number of GPIOs in this community
+ * @nirqs: Total number of IRQs this community can generate
*/
struct chv_community {
const char *uid;
@@ -146,6 +147,7 @@ struct chv_community {
const struct chv_gpio_pinrange *gpio_ranges;
size_t ngpio_ranges;
size_t ngpios;
+ size_t nirqs;
};
struct chv_pin_context {
@@ -396,6 +398,12 @@ static const struct chv_community southwest_community = {
.gpio_ranges = southwest_gpio_ranges,
.ngpio_ranges = ARRAY_SIZE(southwest_gpio_ranges),
.ngpios = ARRAY_SIZE(southwest_pins),
+ /*
+ * Southwest community can benerate GPIO interrupts only for the
+ * first 8 interrupts. The upper half (8-15) can only be used to
+ * trigger GPEs.
+ */
+ .nirqs = 8,
};
static const struct pinctrl_pin_desc north_pins[] = {
@@ -479,6 +487,12 @@ static const struct chv_community north_community = {
.gpio_ranges = north_gpio_ranges,
.ngpio_ranges = ARRAY_SIZE(north_gpio_ranges),
.ngpios = ARRAY_SIZE(north_pins),
+ /*
+ * North community can benerate GPIO interrupts only for the first
+ * 8 interrupts. The upper half (8-15) can only be used to trigger
+ * GPEs.
+ */
+ .nirqs = 8,
};
static const struct pinctrl_pin_desc east_pins[] = {
@@ -521,6 +535,7 @@ static const struct chv_community east_community = {
.gpio_ranges = east_gpio_ranges,
.ngpio_ranges = ARRAY_SIZE(east_gpio_ranges),
.ngpios = ARRAY_SIZE(east_pins),
+ .nirqs = 16,
};
static const struct pinctrl_pin_desc southeast_pins[] = {
@@ -646,6 +661,7 @@ static const struct chv_community southeast_community = {
.gpio_ranges = southeast_gpio_ranges,
.ngpio_ranges = ARRAY_SIZE(southeast_gpio_ranges),
.ngpios = ARRAY_SIZE(southeast_pins),
+ .nirqs = 16,
};
static const struct chv_community *chv_communities[] = {
@@ -1497,7 +1513,7 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(chip, desc);
pending = readl(pctrl->regs + CHV_INTSTAT);
- for_each_set_bit(intr_line, &pending, 16) {
+ for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
unsigned irq, offset;
offset = pctrl->intr_lines[intr_line];
@@ -1520,8 +1536,9 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->base = -1;
+ chip->irq_need_valid_mask = true;
- ret = gpiochip_add_data(chip, pctrl);
+ ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "Failed to register gpiochip\n");
return ret;
@@ -1533,12 +1550,27 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
range->base, range->npins);
if (ret) {
dev_err(pctrl->dev, "failed to add GPIO pin range\n");
- goto fail;
+ return ret;
}
offset += range->npins;
}
+ /* Do not add GPIOs that can only generate GPEs to the IRQ domain */
+ for (i = 0; i < pctrl->community->npins; i++) {
+ const struct pinctrl_pin_desc *desc;
+ u32 intsel;
+
+ desc = &pctrl->community->pins[i];
+
+ intsel = readl(chv_padreg(pctrl, desc->number, CHV_PADCTRL0));
+ intsel &= CHV_PADCTRL0_INTSEL_MASK;
+ intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
+
+ if (intsel >= pctrl->community->nirqs)
+ clear_bit(i, chip->irq_valid_mask);
+ }
+
/* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
@@ -1546,17 +1578,12 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add IRQ chip\n");
- goto fail;
+ return ret;
}
gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq,
chv_gpio_irq_handler);
return 0;
-
-fail:
- gpiochip_remove(chip);
-
- return ret;
}
static int chv_pinctrl_probe(struct platform_device *pdev)
@@ -1624,15 +1651,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int chv_pinctrl_remove(struct platform_device *pdev)
-{
- struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
-
- gpiochip_remove(&pctrl->chip);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int chv_pinctrl_suspend(struct device *dev)
{
@@ -1729,7 +1747,6 @@ MODULE_DEVICE_TABLE(acpi, chv_pinctrl_acpi_match);
static struct platform_driver chv_pinctrl_driver = {
.probe = chv_pinctrl_probe,
- .remove = chv_pinctrl_remove,
.driver = {
.name = "cherryview-pinctrl",
.pm = &chv_pinctrl_pm_ops,
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 257cab129692..63387a40b973 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -86,6 +86,7 @@ struct intel_pinctrl_context {
* @communities: All communities in this pin controller
* @ncommunities: Number of communities in this pin controller
* @context: Configuration saved over system sleep
+ * @irq: pinctrl/GPIO chip irq number
*/
struct intel_pinctrl {
struct device *dev;
@@ -97,6 +98,7 @@ struct intel_pinctrl {
struct intel_community *communities;
size_t ncommunities;
struct intel_pinctrl_context context;
+ int irq;
};
#define pin_to_padno(c, p) ((p) - (c)->pin_base)
@@ -793,38 +795,12 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- const struct intel_community *community;
unsigned pin = irqd_to_hwirq(d);
- unsigned padno, gpp, gpp_offset;
- unsigned long flags;
- u32 gpe_en;
-
- community = intel_get_community(pctrl, pin);
- if (!community)
- return -EINVAL;
-
- raw_spin_lock_irqsave(&pctrl->lock, flags);
- padno = pin_to_padno(community, pin);
- gpp = padno / community->gpp_size;
- gpp_offset = padno % community->gpp_size;
-
- /* Clear the existing wake status */
- writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4);
-
- /*
- * The controller will generate wake when GPE of the corresponding
- * pad is enabled and it is not routed to SCI (GPIROUTSCI is not
- * set).
- */
- gpe_en = readl(community->regs + GPI_GPE_EN + gpp * 4);
if (on)
- gpe_en |= BIT(gpp_offset);
+ enable_irq_wake(pctrl->irq);
else
- gpe_en &= ~BIT(gpp_offset);
- writel(gpe_en, community->regs + GPI_GPE_EN + gpp * 4);
-
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ disable_irq_wake(pctrl->irq);
dev_dbg(pctrl->dev, "%sable wake for pin %u\n", on ? "en" : "dis", pin);
return 0;
@@ -905,6 +881,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.parent = pctrl->dev;
pctrl->chip.base = -1;
+ pctrl->irq = irq;
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 7fb765642ee7..7826c7f0cb7c 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -854,7 +854,7 @@ static int mrfld_pinctrl_probe(struct platform_device *pdev)
*/
nfamilies = ARRAY_SIZE(mrfld_families),
families = devm_kmemdup(&pdev->dev, mrfld_families,
- nfamilies * sizeof(mrfld_families),
+ sizeof(mrfld_families),
GFP_KERNEL);
if (!families)
return -ENOMEM;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index ce554e0d6979..f9aef2ac03a1 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/gpio/driver.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -1055,7 +1054,7 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
return 0;
}
-static struct gpio_chip mtk_gpio_chip = {
+static const struct gpio_chip mtk_gpio_chip = {
.owner = THIS_MODULE,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
@@ -1496,7 +1495,3 @@ chip_error:
gpiochip_remove(pctl->chip);
return ret;
}
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MediaTek Pinctrl Driver");
-MODULE_AUTHOR("Hongzhou Yang <hongzhou.yang@mediatek.com>");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index cb4d6ad30530..c3928aa3fefa 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -156,6 +156,11 @@ static const unsigned int emmc_clk_pins[] = { PIN(BOOT_8, EE_OFF) };
static const unsigned int emmc_cmd_pins[] = { PIN(BOOT_10, EE_OFF) };
static const unsigned int emmc_ds_pins[] = { PIN(BOOT_15, EE_OFF) };
+static const unsigned int nor_d_pins[] = { PIN(BOOT_11, EE_OFF) };
+static const unsigned int nor_q_pins[] = { PIN(BOOT_12, EE_OFF) };
+static const unsigned int nor_c_pins[] = { PIN(BOOT_13, EE_OFF) };
+static const unsigned int nor_cs_pins[] = { PIN(BOOT_15, EE_OFF) };
+
static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) };
static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) };
static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) };
@@ -163,6 +168,23 @@ static const unsigned int sdcard_d3_pins[] = { PIN(CARD_4, EE_OFF) };
static const unsigned int sdcard_cmd_pins[] = { PIN(CARD_3, EE_OFF) };
static const unsigned int sdcard_clk_pins[] = { PIN(CARD_2, EE_OFF) };
+static const unsigned int sdio_d0_pins[] = { PIN(GPIOX_0, EE_OFF) };
+static const unsigned int sdio_d1_pins[] = { PIN(GPIOX_1, EE_OFF) };
+static const unsigned int sdio_d2_pins[] = { PIN(GPIOX_2, EE_OFF) };
+static const unsigned int sdio_d3_pins[] = { PIN(GPIOX_3, EE_OFF) };
+static const unsigned int sdio_cmd_pins[] = { PIN(GPIOX_4, EE_OFF) };
+static const unsigned int sdio_clk_pins[] = { PIN(GPIOX_5, EE_OFF) };
+static const unsigned int sdio_irq_pins[] = { PIN(GPIOX_7, EE_OFF) };
+
+static const unsigned int nand_ce0_pins[] = { PIN(BOOT_8, EE_OFF) };
+static const unsigned int nand_ce1_pins[] = { PIN(BOOT_9, EE_OFF) };
+static const unsigned int nand_rb0_pins[] = { PIN(BOOT_10, EE_OFF) };
+static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, EE_OFF) };
+static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, EE_OFF) };
+static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, EE_OFF) };
+static const unsigned int nand_ren_wr_pins[] = { PIN(BOOT_14, EE_OFF) };
+static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, EE_OFF) };
+
static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_12, EE_OFF) };
static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_13, EE_OFF) };
static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_14, EE_OFF) };
@@ -178,6 +200,15 @@ static const unsigned int uart_rx_c_pins[] = { PIN(GPIOY_14, EE_OFF) };
static const unsigned int uart_cts_c_pins[] = { PIN(GPIOX_11, EE_OFF) };
static const unsigned int uart_rts_c_pins[] = { PIN(GPIOX_12, EE_OFF) };
+static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, EE_OFF) };
+static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, EE_OFF) };
+
+static const unsigned int i2c_sck_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
+static const unsigned int i2c_sda_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
+
+static const unsigned int i2c_sck_c_pins[] = { PIN(GPIODV_29, EE_OFF) };
+static const unsigned int i2c_sda_c_pins[] = { PIN(GPIODV_28, EE_OFF) };
+
static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_0, EE_OFF) };
static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_1, EE_OFF) };
static const unsigned int eth_clk_rx_clk_pins[] = { PIN(GPIOZ_2, EE_OFF) };
@@ -193,6 +224,14 @@ static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_11, EE_OFF) };
static const unsigned int eth_txd2_pins[] = { PIN(GPIOZ_12, EE_OFF) };
static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) };
+static const unsigned int pwm_a_x_pins[] = { PIN(GPIOX_6, EE_OFF) };
+static const unsigned int pwm_a_y_pins[] = { PIN(GPIOY_16, EE_OFF) };
+static const unsigned int pwm_b_pins[] = { PIN(GPIODV_29, EE_OFF) };
+static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, EE_OFF) };
+static const unsigned int pwm_e_pins[] = { PIN(GPIOX_19, EE_OFF) };
+static const unsigned int pwm_f_x_pins[] = { PIN(GPIOX_7, EE_OFF) };
+static const unsigned int pwm_f_y_pins[] = { PIN(GPIOY_15, EE_OFF) };
+
static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = {
MESON_PIN(GPIOAO_0, 0),
MESON_PIN(GPIOAO_1, 0),
@@ -225,6 +264,13 @@ static const unsigned int i2c_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
static const unsigned int i2c_slave_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
static const unsigned int i2c_slave_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
+static const unsigned int remote_input_ao_pins[] = {PIN(GPIOAO_7, 0) };
+
+static const unsigned int pwm_ao_a_3_pins[] = { PIN(GPIOAO_3, 0) };
+static const unsigned int pwm_ao_a_6_pins[] = { PIN(GPIOAO_6, 0) };
+static const unsigned int pwm_ao_a_12_pins[] = { PIN(GPIOAO_12, 0) };
+static const unsigned int pwm_ao_b_pins[] = { PIN(GPIOAO_13, 0) };
+
static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0, EE_OFF),
GPIO_GROUP(GPIOZ_1, EE_OFF),
@@ -355,16 +401,28 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GPIO_GROUP(GPIO_TEST_N, EE_OFF),
/* Bank X */
+ GROUP(sdio_d0, 8, 5),
+ GROUP(sdio_d1, 8, 4),
+ GROUP(sdio_d2, 8, 3),
+ GROUP(sdio_d3, 8, 2),
+ GROUP(sdio_cmd, 8, 1),
+ GROUP(sdio_clk, 8, 0),
+ GROUP(sdio_irq, 8, 11),
GROUP(uart_tx_a, 4, 13),
GROUP(uart_rx_a, 4, 12),
GROUP(uart_cts_a, 4, 11),
GROUP(uart_rts_a, 4, 10),
+ GROUP(pwm_a_x, 3, 17),
+ GROUP(pwm_e, 2, 30),
+ GROUP(pwm_f_x, 3, 18),
/* Bank Y */
GROUP(uart_cts_c, 1, 19),
GROUP(uart_rts_c, 1, 18),
GROUP(uart_tx_c, 1, 17),
GROUP(uart_rx_c, 1, 16),
+ GROUP(pwm_a_y, 1, 21),
+ GROUP(pwm_f_y, 1, 20),
/* Bank Z */
GROUP(eth_mdio, 6, 1),
@@ -387,12 +445,32 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GROUP(uart_rx_b, 2, 28),
GROUP(uart_cts_b, 2, 27),
GROUP(uart_rts_b, 2, 26),
+ GROUP(pwm_b, 3, 21),
+ GROUP(pwm_d, 3, 20),
+ GROUP(i2c_sck_a, 7, 27),
+ GROUP(i2c_sda_a, 7, 26),
+ GROUP(i2c_sck_b, 7, 25),
+ GROUP(i2c_sda_b, 7, 24),
+ GROUP(i2c_sck_c, 7, 23),
+ GROUP(i2c_sda_c, 7, 22),
/* Bank BOOT */
GROUP(emmc_nand_d07, 4, 30),
GROUP(emmc_clk, 4, 18),
GROUP(emmc_cmd, 4, 19),
GROUP(emmc_ds, 4, 31),
+ GROUP(nor_d, 5, 1),
+ GROUP(nor_q, 5, 3),
+ GROUP(nor_c, 5, 2),
+ GROUP(nor_cs, 5, 0),
+ GROUP(nand_ce0, 4, 26),
+ GROUP(nand_ce1, 4, 27),
+ GROUP(nand_rb0, 4, 25),
+ GROUP(nand_ale, 4, 24),
+ GROUP(nand_cle, 4, 23),
+ GROUP(nand_wen_clk, 4, 22),
+ GROUP(nand_ren_wr, 4, 21),
+ GROUP(nand_dqs, 4, 20),
/* Bank CARD */
GROUP(sdcard_d1, 2, 14),
@@ -432,6 +510,11 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
GROUP(i2c_sda_ao, 0, 5),
GROUP(i2c_slave_sck_ao, 0, 2),
GROUP(i2c_slave_sda_ao, 0, 1),
+ GROUP(remote_input_ao, 0, 0),
+ GROUP(pwm_ao_a_3, 0, 22),
+ GROUP(pwm_ao_a_6, 0, 18),
+ GROUP(pwm_ao_a_12, 0, 17),
+ GROUP(pwm_ao_b, 0, 3),
};
static const char * const gpio_periphs_groups[] = {
@@ -475,11 +558,25 @@ static const char * const emmc_groups[] = {
"emmc_nand_d07", "emmc_clk", "emmc_cmd", "emmc_ds",
};
+static const char * const nor_groups[] = {
+ "nor_d", "nor_q", "nor_c", "nor_cs",
+};
+
static const char * const sdcard_groups[] = {
"sdcard_d0", "sdcard_d1", "sdcard_d2", "sdcard_d3",
"sdcard_cmd", "sdcard_clk",
};
+static const char * const sdio_groups[] = {
+ "sdio_d0", "sdio_d1", "sdio_d2", "sdio_d3",
+ "sdio_cmd", "sdio_clk", "sdio_irq",
+};
+
+static const char * const nand_groups[] = {
+ "nand_ce0", "nand_ce1", "nand_rb0", "nand_ale", "nand_cle",
+ "nand_wen_clk", "nand_ren_wr", "nand_dqs",
+};
+
static const char * const uart_a_groups[] = {
"uart_tx_a", "uart_rx_a", "uart_cts_a", "uart_rts_a",
};
@@ -492,6 +589,18 @@ static const char * const uart_c_groups[] = {
"uart_tx_c", "uart_rx_c", "uart_cts_c", "uart_rts_c",
};
+static const char * const i2c_a_groups[] = {
+ "i2c_sck_a", "i2c_sda_a",
+};
+
+static const char * const i2c_b_groups[] = {
+ "i2c_sck_b", "i2c_sda_b",
+};
+
+static const char * const i2c_c_groups[] = {
+ "i2c_sck_c", "i2c_sda_c",
+};
+
static const char * const eth_groups[] = {
"eth_mdio", "eth_mdc", "eth_clk_rx_clk", "eth_rx_dv",
"eth_rxd0", "eth_rxd1", "eth_rxd2", "eth_rxd3",
@@ -499,6 +608,34 @@ static const char * const eth_groups[] = {
"eth_txd0", "eth_txd1", "eth_txd2", "eth_txd3",
};
+static const char * const pwm_a_x_groups[] = {
+ "pwm_a_x",
+};
+
+static const char * const pwm_a_y_groups[] = {
+ "pwm_a_y",
+};
+
+static const char * const pwm_b_groups[] = {
+ "pwm_b",
+};
+
+static const char * const pwm_d_groups[] = {
+ "pwm_d",
+};
+
+static const char * const pwm_e_groups[] = {
+ "pwm_e",
+};
+
+static const char * const pwm_f_x_groups[] = {
+ "pwm_f_x",
+};
+
+static const char * const pwm_f_y_groups[] = {
+ "pwm_f_y",
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -521,14 +658,47 @@ static const char * const i2c_slave_ao_groups[] = {
"i2c_slave_sdk_ao", "i2c_slave_sda_ao",
};
+static const char * const remote_input_ao_groups[] = {
+ "remote_input_ao",
+};
+
+static const char * const pwm_ao_a_3_groups[] = {
+ "pwm_ao_a_3",
+};
+
+static const char * const pwm_ao_a_6_groups[] = {
+ "pwm_ao_a_6",
+};
+
+static const char * const pwm_ao_a_12_groups[] = {
+ "pwm_ao_a_12",
+};
+
+static const char * const pwm_ao_b_groups[] = {
+ "pwm_ao_b",
+};
+
static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
+ FUNCTION(nor),
FUNCTION(sdcard),
+ FUNCTION(sdio),
+ FUNCTION(nand),
FUNCTION(uart_a),
FUNCTION(uart_b),
FUNCTION(uart_c),
+ FUNCTION(i2c_a),
+ FUNCTION(i2c_b),
+ FUNCTION(i2c_c),
FUNCTION(eth),
+ FUNCTION(pwm_a_x),
+ FUNCTION(pwm_a_y),
+ FUNCTION(pwm_b),
+ FUNCTION(pwm_d),
+ FUNCTION(pwm_e),
+ FUNCTION(pwm_f_x),
+ FUNCTION(pwm_f_y),
};
static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
@@ -537,6 +707,11 @@ static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
FUNCTION(uart_ao_b),
FUNCTION(i2c_ao),
FUNCTION(i2c_slave_ao),
+ FUNCTION(remote_input_ao),
+ FUNCTION(pwm_ao_a_3),
+ FUNCTION(pwm_ao_a_6),
+ FUNCTION(pwm_ao_a_12),
+ FUNCTION(pwm_ao_b),
};
static struct meson_bank meson_gxbb_periphs_banks[] = {
@@ -556,38 +731,28 @@ static struct meson_bank meson_gxbb_aobus_banks[] = {
BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_13, 0), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-static struct meson_domain_data meson_gxbb_periphs_domain_data = {
+struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.name = "periphs-banks",
- .banks = meson_gxbb_periphs_banks,
- .num_banks = ARRAY_SIZE(meson_gxbb_periphs_banks),
.pin_base = 14,
- .num_pins = 120,
-};
-
-static struct meson_domain_data meson_gxbb_aobus_domain_data = {
- .name = "aobus-banks",
- .banks = meson_gxbb_aobus_banks,
- .num_banks = ARRAY_SIZE(meson_gxbb_aobus_banks),
- .pin_base = 0,
- .num_pins = 14,
-};
-
-struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.pins = meson_gxbb_periphs_pins,
.groups = meson_gxbb_periphs_groups,
.funcs = meson_gxbb_periphs_functions,
- .domain_data = &meson_gxbb_periphs_domain_data,
+ .banks = meson_gxbb_periphs_banks,
.num_pins = ARRAY_SIZE(meson_gxbb_periphs_pins),
.num_groups = ARRAY_SIZE(meson_gxbb_periphs_groups),
.num_funcs = ARRAY_SIZE(meson_gxbb_periphs_functions),
+ .num_banks = ARRAY_SIZE(meson_gxbb_periphs_banks),
};
struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
+ .name = "aobus-banks",
+ .pin_base = 0,
.pins = meson_gxbb_aobus_pins,
.groups = meson_gxbb_aobus_groups,
.funcs = meson_gxbb_aobus_functions,
- .domain_data = &meson_gxbb_aobus_domain_data,
+ .banks = meson_gxbb_aobus_banks,
.num_pins = ARRAY_SIZE(meson_gxbb_aobus_pins),
.num_groups = ARRAY_SIZE(meson_gxbb_aobus_groups),
.num_funcs = ARRAY_SIZE(meson_gxbb_aobus_functions),
+ .num_banks = ARRAY_SIZE(meson_gxbb_aobus_banks),
};
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 44e69c963f5d..57122eda155a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -21,9 +21,8 @@
* domain which can't be powered off; the bank also uses a set of
* registers different from the other banks.
*
- * For each of the two power domains (regular and always-on) there are
- * 4 different register ranges that control the following properties
- * of the pins:
+ * For each pin controller there are 4 different register ranges that
+ * control the following properties of the pins:
* 1) pin muxing
* 2) pull enable/disable
* 3) pull up/down
@@ -33,8 +32,8 @@
* direction are the same and thus there are only 3 register ranges.
*
* Every pinmux group can be enabled by a specific bit in the first
- * register range of the domain; when all groups for a given pin are
- * disabled the pin acts as a GPIO.
+ * register range; when all groups for a given pin are disabled the
+ * pin acts as a GPIO.
*
* For the pull and GPIO configuration every bank uses a contiguous
* set of bits in the register sets described above; the same register
@@ -66,21 +65,21 @@
/**
* meson_get_bank() - find the bank containing a given pin
*
- * @domain: the domain containing the pin
+ * @pc: the pinctrl instance
* @pin: the pin number
* @bank: the found bank
*
* Return: 0 on success, a negative value on error
*/
-static int meson_get_bank(struct meson_domain *domain, unsigned int pin,
+static int meson_get_bank(struct meson_pinctrl *pc, unsigned int pin,
struct meson_bank **bank)
{
int i;
- for (i = 0; i < domain->data->num_banks; i++) {
- if (pin >= domain->data->banks[i].first &&
- pin <= domain->data->banks[i].last) {
- *bank = &domain->data->banks[i];
+ for (i = 0; i < pc->data->num_banks; i++) {
+ if (pin >= pc->data->banks[i].first &&
+ pin <= pc->data->banks[i].last) {
+ *bank = &pc->data->banks[i];
return 0;
}
}
@@ -89,33 +88,6 @@ static int meson_get_bank(struct meson_domain *domain, unsigned int pin,
}
/**
- * meson_get_domain_and_bank() - find domain and bank containing a given pin
- *
- * @pc: Meson pin controller device
- * @pin: the pin number
- * @domain: the found domain
- * @bank: the found bank
- *
- * Return: 0 on success, a negative value on error
- */
-static int meson_get_domain_and_bank(struct meson_pinctrl *pc, unsigned int pin,
- struct meson_domain **domain,
- struct meson_bank **bank)
-{
- struct meson_domain *d;
-
- d = pc->domain;
-
- if (pin >= d->data->pin_base &&
- pin < d->data->pin_base + d->data->num_pins) {
- *domain = d;
- return meson_get_bank(d, pin, bank);
- }
-
- return -EINVAL;
-}
-
-/**
* meson_calc_reg_and_bit() - calculate register and bit for a pin
*
* @bank: the bank containing the pin
@@ -190,7 +162,6 @@ static void meson_pmx_disable_other_groups(struct meson_pinctrl *pc,
unsigned int pin, int sel_group)
{
struct meson_pmx_group *group;
- struct meson_domain *domain;
int i, j;
for (i = 0; i < pc->data->num_groups; i++) {
@@ -201,8 +172,7 @@ static void meson_pmx_disable_other_groups(struct meson_pinctrl *pc,
for (j = 0; j < group->num_pins; j++) {
if (group->pins[j] == pin) {
/* We have found a group using the pin */
- domain = pc->domain;
- regmap_update_bits(domain->reg_mux,
+ regmap_update_bits(pc->reg_mux,
group->reg * 4,
BIT(group->bit), 0);
}
@@ -216,7 +186,6 @@ static int meson_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
struct meson_pmx_func *func = &pc->data->funcs[func_num];
struct meson_pmx_group *group = &pc->data->groups[group_num];
- struct meson_domain *domain = pc->domain;
int i, ret = 0;
dev_dbg(pc->dev, "enable function %s, group %s\n", func->name,
@@ -231,7 +200,7 @@ static int meson_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
/* Function 0 (GPIO) doesn't need any additional setting */
if (func_num)
- ret = regmap_update_bits(domain->reg_mux, group->reg * 4,
+ ret = regmap_update_bits(pc->reg_mux, group->reg * 4,
BIT(group->bit), BIT(group->bit));
return ret;
@@ -287,14 +256,13 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
unsigned long *configs, unsigned num_configs)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- struct meson_domain *domain;
struct meson_bank *bank;
enum pin_config_param param;
unsigned int reg, bit;
int i, ret;
u16 arg;
- ret = meson_get_domain_and_bank(pc, pin, &domain, &bank);
+ ret = meson_get_bank(pc, pin, &bank);
if (ret)
return ret;
@@ -307,7 +275,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
- ret = regmap_update_bits(domain->reg_pull, reg,
+ ret = regmap_update_bits(pc->reg_pull, reg,
BIT(bit), 0);
if (ret)
return ret;
@@ -317,13 +285,13 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
meson_calc_reg_and_bit(bank, pin, REG_PULLEN,
&reg, &bit);
- ret = regmap_update_bits(domain->reg_pullen, reg,
+ ret = regmap_update_bits(pc->reg_pullen, reg,
BIT(bit), BIT(bit));
if (ret)
return ret;
meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
- ret = regmap_update_bits(domain->reg_pull, reg,
+ ret = regmap_update_bits(pc->reg_pull, reg,
BIT(bit), BIT(bit));
if (ret)
return ret;
@@ -333,13 +301,13 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
meson_calc_reg_and_bit(bank, pin, REG_PULLEN,
&reg, &bit);
- ret = regmap_update_bits(domain->reg_pullen, reg,
+ ret = regmap_update_bits(pc->reg_pullen, reg,
BIT(bit), BIT(bit));
if (ret)
return ret;
meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
- ret = regmap_update_bits(domain->reg_pull, reg,
+ ret = regmap_update_bits(pc->reg_pull, reg,
BIT(bit), 0);
if (ret)
return ret;
@@ -354,18 +322,17 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
static int meson_pinconf_get_pull(struct meson_pinctrl *pc, unsigned int pin)
{
- struct meson_domain *domain;
struct meson_bank *bank;
unsigned int reg, bit, val;
int ret, conf;
- ret = meson_get_domain_and_bank(pc, pin, &domain, &bank);
+ ret = meson_get_bank(pc, pin, &bank);
if (ret)
return ret;
meson_calc_reg_and_bit(bank, pin, REG_PULLEN, &reg, &bit);
- ret = regmap_read(domain->reg_pullen, reg, &val);
+ ret = regmap_read(pc->reg_pullen, reg, &val);
if (ret)
return ret;
@@ -374,7 +341,7 @@ static int meson_pinconf_get_pull(struct meson_pinctrl *pc, unsigned int pin)
} else {
meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
- ret = regmap_read(domain->reg_pull, reg, &val);
+ ret = regmap_read(pc->reg_pull, reg, &val);
if (ret)
return ret;
@@ -452,82 +419,82 @@ static int meson_gpio_request(struct gpio_chip *chip, unsigned gpio)
static void meson_gpio_free(struct gpio_chip *chip, unsigned gpio)
{
- struct meson_domain *domain = gpiochip_get_data(chip);
+ struct meson_pinctrl *pc = gpiochip_get_data(chip);
- pinctrl_free_gpio(domain->data->pin_base + gpio);
+ pinctrl_free_gpio(pc->data->pin_base + gpio);
}
static int meson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
- struct meson_domain *domain = gpiochip_get_data(chip);
+ struct meson_pinctrl *pc = gpiochip_get_data(chip);
unsigned int reg, bit, pin;
struct meson_bank *bank;
int ret;
- pin = domain->data->pin_base + gpio;
- ret = meson_get_bank(domain, pin, &bank);
+ pin = pc->data->pin_base + gpio;
+ ret = meson_get_bank(pc, pin, &bank);
if (ret)
return ret;
meson_calc_reg_and_bit(bank, pin, REG_DIR, &reg, &bit);
- return regmap_update_bits(domain->reg_gpio, reg, BIT(bit), BIT(bit));
+ return regmap_update_bits(pc->reg_gpio, reg, BIT(bit), BIT(bit));
}
static int meson_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
{
- struct meson_domain *domain = gpiochip_get_data(chip);
+ struct meson_pinctrl *pc = gpiochip_get_data(chip);
unsigned int reg, bit, pin;
struct meson_bank *bank;
int ret;
- pin = domain->data->pin_base + gpio;
- ret = meson_get_bank(domain, pin, &bank);
+ pin = pc->data->pin_base + gpio;
+ ret = meson_get_bank(pc, pin, &bank);
if (ret)
return ret;
meson_calc_reg_and_bit(bank, pin, REG_DIR, &reg, &bit);
- ret = regmap_update_bits(domain->reg_gpio, reg, BIT(bit), 0);
+ ret = regmap_update_bits(pc->reg_gpio, reg, BIT(bit), 0);
if (ret)
return ret;
meson_calc_reg_and_bit(bank, pin, REG_OUT, &reg, &bit);
- return regmap_update_bits(domain->reg_gpio, reg, BIT(bit),
+ return regmap_update_bits(pc->reg_gpio, reg, BIT(bit),
value ? BIT(bit) : 0);
}
static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
{
- struct meson_domain *domain = gpiochip_get_data(chip);
+ struct meson_pinctrl *pc = gpiochip_get_data(chip);
unsigned int reg, bit, pin;
struct meson_bank *bank;
int ret;
- pin = domain->data->pin_base + gpio;
- ret = meson_get_bank(domain, pin, &bank);
+ pin = pc->data->pin_base + gpio;
+ ret = meson_get_bank(pc, pin, &bank);
if (ret)
return;
meson_calc_reg_and_bit(bank, pin, REG_OUT, &reg, &bit);
- regmap_update_bits(domain->reg_gpio, reg, BIT(bit),
+ regmap_update_bits(pc->reg_gpio, reg, BIT(bit),
value ? BIT(bit) : 0);
}
static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio)
{
- struct meson_domain *domain = gpiochip_get_data(chip);
+ struct meson_pinctrl *pc = gpiochip_get_data(chip);
unsigned int reg, bit, val, pin;
struct meson_bank *bank;
int ret;
- pin = domain->data->pin_base + gpio;
- ret = meson_get_bank(domain, pin, &bank);
+ pin = pc->data->pin_base + gpio;
+ ret = meson_get_bank(pc, pin, &bank);
if (ret)
return ret;
meson_calc_reg_and_bit(bank, pin, REG_IN, &reg, &bit);
- regmap_read(domain->reg_gpio, reg, &val);
+ regmap_read(pc->reg_gpio, reg, &val);
return !!(val & BIT(bit));
}
@@ -562,35 +529,32 @@ static const struct of_device_id meson_pinctrl_dt_match[] = {
static int meson_gpiolib_register(struct meson_pinctrl *pc)
{
- struct meson_domain *domain;
int ret;
- domain = pc->domain;
-
- domain->chip.label = domain->data->name;
- domain->chip.parent = pc->dev;
- domain->chip.request = meson_gpio_request;
- domain->chip.free = meson_gpio_free;
- domain->chip.direction_input = meson_gpio_direction_input;
- domain->chip.direction_output = meson_gpio_direction_output;
- domain->chip.get = meson_gpio_get;
- domain->chip.set = meson_gpio_set;
- domain->chip.base = domain->data->pin_base;
- domain->chip.ngpio = domain->data->num_pins;
- domain->chip.can_sleep = false;
- domain->chip.of_node = domain->of_node;
- domain->chip.of_gpio_n_cells = 2;
-
- ret = gpiochip_add_data(&domain->chip, domain);
+ pc->chip.label = pc->data->name;
+ pc->chip.parent = pc->dev;
+ pc->chip.request = meson_gpio_request;
+ pc->chip.free = meson_gpio_free;
+ pc->chip.direction_input = meson_gpio_direction_input;
+ pc->chip.direction_output = meson_gpio_direction_output;
+ pc->chip.get = meson_gpio_get;
+ pc->chip.set = meson_gpio_set;
+ pc->chip.base = pc->data->pin_base;
+ pc->chip.ngpio = pc->data->num_pins;
+ pc->chip.can_sleep = false;
+ pc->chip.of_node = pc->of_node;
+ pc->chip.of_gpio_n_cells = 2;
+
+ ret = gpiochip_add_data(&pc->chip, pc);
if (ret) {
dev_err(pc->dev, "can't add gpio chip %s\n",
- domain->data->name);
+ pc->data->name);
goto fail;
}
- ret = gpiochip_add_pin_range(&domain->chip, dev_name(pc->dev),
- 0, domain->data->pin_base,
- domain->chip.ngpio);
+ ret = gpiochip_add_pin_range(&pc->chip, dev_name(pc->dev),
+ 0, pc->data->pin_base,
+ pc->chip.ngpio);
if (ret) {
dev_err(pc->dev, "can't add pin range\n");
goto fail;
@@ -598,7 +562,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
return 0;
fail:
- gpiochip_remove(&pc->domain->chip);
+ gpiochip_remove(&pc->chip);
return ret;
}
@@ -637,58 +601,46 @@ static struct regmap *meson_map_resource(struct meson_pinctrl *pc,
static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
struct device_node *node)
{
- struct device_node *np;
- struct meson_domain *domain;
- int num_domains = 0;
+ struct device_node *np, *gpio_np = NULL;
for_each_child_of_node(node, np) {
if (!of_find_property(np, "gpio-controller", NULL))
continue;
- num_domains++;
+ if (gpio_np) {
+ dev_err(pc->dev, "multiple gpio nodes\n");
+ return -EINVAL;
+ }
+ gpio_np = np;
}
- if (num_domains != 1) {
- dev_err(pc->dev, "wrong number of subnodes\n");
+ if (!gpio_np) {
+ dev_err(pc->dev, "no gpio node found\n");
return -EINVAL;
}
- pc->domain = devm_kzalloc(pc->dev, sizeof(struct meson_domain), GFP_KERNEL);
- if (!pc->domain)
- return -ENOMEM;
-
- domain = pc->domain;
- domain->data = pc->data->domain_data;
-
- for_each_child_of_node(node, np) {
- if (!of_find_property(np, "gpio-controller", NULL))
- continue;
-
- domain->of_node = np;
+ pc->of_node = gpio_np;
- domain->reg_mux = meson_map_resource(pc, np, "mux");
- if (IS_ERR(domain->reg_mux)) {
- dev_err(pc->dev, "mux registers not found\n");
- return PTR_ERR(domain->reg_mux);
- }
-
- domain->reg_pull = meson_map_resource(pc, np, "pull");
- if (IS_ERR(domain->reg_pull)) {
- dev_err(pc->dev, "pull registers not found\n");
- return PTR_ERR(domain->reg_pull);
- }
+ pc->reg_mux = meson_map_resource(pc, gpio_np, "mux");
+ if (IS_ERR(pc->reg_mux)) {
+ dev_err(pc->dev, "mux registers not found\n");
+ return PTR_ERR(pc->reg_mux);
+ }
- domain->reg_pullen = meson_map_resource(pc, np, "pull-enable");
- /* Use pull region if pull-enable one is not present */
- if (IS_ERR(domain->reg_pullen))
- domain->reg_pullen = domain->reg_pull;
+ pc->reg_pull = meson_map_resource(pc, gpio_np, "pull");
+ if (IS_ERR(pc->reg_pull)) {
+ dev_err(pc->dev, "pull registers not found\n");
+ return PTR_ERR(pc->reg_pull);
+ }
- domain->reg_gpio = meson_map_resource(pc, np, "gpio");
- if (IS_ERR(domain->reg_gpio)) {
- dev_err(pc->dev, "gpio registers not found\n");
- return PTR_ERR(domain->reg_gpio);
- }
+ pc->reg_pullen = meson_map_resource(pc, gpio_np, "pull-enable");
+ /* Use pull region if pull-enable one is not present */
+ if (IS_ERR(pc->reg_pullen))
+ pc->reg_pullen = pc->reg_pull;
- break;
+ pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio");
+ if (IS_ERR(pc->reg_gpio)) {
+ dev_err(pc->dev, "gpio registers not found\n");
+ return PTR_ERR(pc->reg_gpio);
}
return 0;
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index d89442ea4a4d..98b5080650c1 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -95,54 +95,17 @@ struct meson_bank {
struct meson_reg_desc regs[NUM_REG];
};
-/**
- * struct meson_domain_data - domain platform data
- *
- * @name: name of the domain
- * @banks: set of banks belonging to the domain
- * @num_banks: number of banks in the domain
- */
-struct meson_domain_data {
- const char *name;
- struct meson_bank *banks;
- unsigned int num_banks;
- unsigned int pin_base;
- unsigned int num_pins;
-};
-
-/**
- * struct meson_domain
- *
- * @reg_mux: registers for mux settings
- * @reg_pullen: registers for pull-enable settings
- * @reg_pull: registers for pull settings
- * @reg_gpio: registers for gpio settings
- * @chip: gpio chip associated with the domain
- * @data; platform data for the domain
- * @node: device tree node for the domain
- *
- * A domain represents a set of banks controlled by the same set of
- * registers.
- */
-struct meson_domain {
- struct regmap *reg_mux;
- struct regmap *reg_pullen;
- struct regmap *reg_pull;
- struct regmap *reg_gpio;
-
- struct gpio_chip chip;
- struct meson_domain_data *data;
- struct device_node *of_node;
-};
-
struct meson_pinctrl_data {
+ const char *name;
const struct pinctrl_pin_desc *pins;
struct meson_pmx_group *groups;
struct meson_pmx_func *funcs;
- struct meson_domain_data *domain_data;
+ unsigned int pin_base;
unsigned int num_pins;
unsigned int num_groups;
unsigned int num_funcs;
+ struct meson_bank *banks;
+ unsigned int num_banks;
};
struct meson_pinctrl {
@@ -150,7 +113,12 @@ struct meson_pinctrl {
struct pinctrl_dev *pcdev;
struct pinctrl_desc desc;
struct meson_pinctrl_data *data;
- struct meson_domain *domain;
+ struct regmap *reg_mux;
+ struct regmap *reg_pullen;
+ struct regmap *reg_pull;
+ struct regmap *reg_gpio;
+ struct gpio_chip chip;
+ struct device_node *of_node;
};
#define PIN(x, b) (b + x)
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 32de191e0807..07f1cb21c1b8 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -931,38 +931,28 @@ static struct meson_bank meson8_aobus_banks[] = {
BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-static struct meson_domain_data meson8_cbus_domain_data = {
+struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.name = "cbus-banks",
- .banks = meson8_cbus_banks,
- .num_banks = ARRAY_SIZE(meson8_cbus_banks),
.pin_base = 0,
- .num_pins = 120,
-};
-
-static struct meson_domain_data meson8_aobus_domain_data = {
- .name = "ao-bank",
- .banks = meson8_aobus_banks,
- .num_banks = ARRAY_SIZE(meson8_aobus_banks),
- .pin_base = 120,
- .num_pins = 16,
-};
-
-struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.pins = meson8_cbus_pins,
.groups = meson8_cbus_groups,
.funcs = meson8_cbus_functions,
- .domain_data = &meson8_cbus_domain_data,
+ .banks = meson8_cbus_banks,
.num_pins = ARRAY_SIZE(meson8_cbus_pins),
.num_groups = ARRAY_SIZE(meson8_cbus_groups),
.num_funcs = ARRAY_SIZE(meson8_cbus_functions),
+ .num_banks = ARRAY_SIZE(meson8_cbus_banks),
};
struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
+ .name = "ao-bank",
+ .pin_base = 120,
.pins = meson8_aobus_pins,
.groups = meson8_aobus_groups,
.funcs = meson8_aobus_functions,
- .domain_data = &meson8_aobus_domain_data,
+ .banks = meson8_aobus_banks,
.num_pins = ARRAY_SIZE(meson8_aobus_pins),
.num_groups = ARRAY_SIZE(meson8_aobus_groups),
.num_funcs = ARRAY_SIZE(meson8_aobus_functions),
+ .num_banks = ARRAY_SIZE(meson8_aobus_banks),
};
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 874f2edf8be3..76f077f18193 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -896,38 +896,28 @@ static struct meson_bank meson8b_aobus_banks[] = {
BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-static struct meson_domain_data meson8b_cbus_domain_data = {
+struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.name = "cbus-banks",
- .banks = meson8b_cbus_banks,
- .num_banks = ARRAY_SIZE(meson8b_cbus_banks),
.pin_base = 0,
- .num_pins = 130,
-};
-
-static struct meson_domain_data meson8b_aobus_domain_data = {
- .name = "aobus-banks",
- .banks = meson8b_aobus_banks,
- .num_banks = ARRAY_SIZE(meson8b_aobus_banks),
- .pin_base = 130,
- .num_pins = 16,
-};
-
-struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.pins = meson8b_cbus_pins,
.groups = meson8b_cbus_groups,
.funcs = meson8b_cbus_functions,
- .domain_data = &meson8b_cbus_domain_data,
+ .banks = meson8b_cbus_banks,
.num_pins = ARRAY_SIZE(meson8b_cbus_pins),
.num_groups = ARRAY_SIZE(meson8b_cbus_groups),
.num_funcs = ARRAY_SIZE(meson8b_cbus_functions),
+ .num_banks = ARRAY_SIZE(meson8b_cbus_banks),
};
struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
+ .name = "aobus-banks",
+ .pin_base = 130,
.pins = meson8b_aobus_pins,
.groups = meson8b_aobus_groups,
.funcs = meson8b_aobus_functions,
- .domain_data = &meson8b_aobus_domain_data,
+ .banks = meson8b_aobus_banks,
.num_pins = ARRAY_SIZE(meson8b_aobus_pins),
.num_groups = ARRAY_SIZE(meson8b_aobus_groups),
.num_funcs = ARRAY_SIZE(meson8b_aobus_functions),
+ .num_banks = ARRAY_SIZE(meson8b_aobus_banks),
};
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 345c3df669a0..84e144167b44 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -64,11 +64,11 @@ static int orion_mpp_ctrl_set(unsigned pid, unsigned long config)
return 0;
}
-#define V(f5181l, f5182, f5281) \
- ((f5181l << 0) | (f5182 << 1) | (f5281 << 2))
+#define V(f5181, f5182, f5281) \
+ ((f5181 << 0) | (f5182 << 1) | (f5281 << 2))
enum orion_variant {
- V_5181L = V(1, 0, 0),
+ V_5181 = V(1, 0, 0),
V_5182 = V(0, 1, 0),
V_5281 = V(0, 0, 1),
V_ALL = V(1, 1, 1),
@@ -103,13 +103,13 @@ static struct mvebu_mpp_mode orion_mpp_modes[] = {
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL),
MPP_VAR_FUNCTION(0x2, "pci", "req5", V_ALL),
MPP_VAR_FUNCTION(0x4, "nand", "re0", V_5182 | V_5281),
- MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181L),
+ MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181),
MPP_VAR_FUNCTION(0x5, "sata0", "act", V_5182)),
MPP_MODE(7,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL),
MPP_VAR_FUNCTION(0x2, "pci", "gnt5", V_ALL),
MPP_VAR_FUNCTION(0x4, "nand", "we0", V_5182 | V_5281),
- MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181L),
+ MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181),
MPP_VAR_FUNCTION(0x5, "sata1", "act", V_5182)),
MPP_MODE(8,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL),
@@ -165,7 +165,7 @@ static struct mvebu_mpp_ctrl orion_mpp_controls[] = {
MPP_FUNC_CTRL(0, 19, NULL, orion_mpp_ctrl),
};
-static struct pinctrl_gpio_range mv88f5181l_gpio_ranges[] = {
+static struct pinctrl_gpio_range mv88f5181_gpio_ranges[] = {
MPP_GPIO_RANGE(0, 0, 0, 16),
};
@@ -177,14 +177,14 @@ static struct pinctrl_gpio_range mv88f5281_gpio_ranges[] = {
MPP_GPIO_RANGE(0, 0, 0, 16),
};
-static struct mvebu_pinctrl_soc_info mv88f5181l_info = {
- .variant = V_5181L,
+static struct mvebu_pinctrl_soc_info mv88f5181_info = {
+ .variant = V_5181,
.controls = orion_mpp_controls,
.ncontrols = ARRAY_SIZE(orion_mpp_controls),
.modes = orion_mpp_modes,
.nmodes = ARRAY_SIZE(orion_mpp_modes),
- .gpioranges = mv88f5181l_gpio_ranges,
- .ngpioranges = ARRAY_SIZE(mv88f5181l_gpio_ranges),
+ .gpioranges = mv88f5181_gpio_ranges,
+ .ngpioranges = ARRAY_SIZE(mv88f5181_gpio_ranges),
};
static struct mvebu_pinctrl_soc_info mv88f5182_info = {
@@ -212,7 +212,8 @@ static struct mvebu_pinctrl_soc_info mv88f5281_info = {
* muxing, they are identical.
*/
static const struct of_device_id orion_pinctrl_of_match[] = {
- { .compatible = "marvell,88f5181l-pinctrl", .data = &mv88f5181l_info },
+ { .compatible = "marvell,88f5181-pinctrl", .data = &mv88f5181_info },
+ { .compatible = "marvell,88f5181l-pinctrl", .data = &mv88f5181_info },
{ .compatible = "marvell,88f5182-pinctrl", .data = &mv88f5182_info },
{ .compatible = "marvell,88f5281-pinctrl", .data = &mv88f5281_info },
{ }
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 7d343c22c90c..f95001bc1d58 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -4,6 +4,8 @@
* Author: Patrice Chotard <patrice.chotard@st.com>
* License terms: GNU General Public License (GPL) version 2
*
+ * Driver allows to use AxB5xx unused pins to be used as GPIO
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -12,7 +14,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -1269,8 +1270,3 @@ static int __init abx500_gpio_init(void)
return platform_driver_register(&abx500_gpio_driver);
}
core_initcall(abx500_gpio_init);
-
-MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
-MODULE_DESCRIPTION("Driver allows to use AxB5xx unused pins to be used as GPIO");
-MODULE_ALIAS("platform:abx500-gpio");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 35f62180db4e..d318ca055489 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -11,7 +11,6 @@
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
@@ -1110,10 +1109,8 @@ static int nmk_gpio_probe(struct platform_device *dev)
return PTR_ERR(nmk_chip);
}
- if (of_get_property(np, "st,supports-sleepmode", NULL))
- supports_sleepmode = true;
- else
- supports_sleepmode = false;
+ supports_sleepmode =
+ of_property_read_bool(np, "st,supports-sleepmode");
/* Correct platform device ID */
dev->id = nmk_chip->bank;
@@ -1180,7 +1177,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
irqchip,
0,
handle_edge_irq,
- IRQ_TYPE_EDGE_FALLING);
+ IRQ_TYPE_NONE);
if (ret) {
dev_err(&dev->dev, "could not add irqchip\n");
gpiochip_remove(&nmk_chip->chip);
@@ -1985,7 +1982,3 @@ static int __init nmk_pinctrl_init(void)
return platform_driver_register(&nmk_pinctrl_driver);
}
core_initcall(nmk_pinctrl_init);
-
-MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
-MODULE_DESCRIPTION("Nomadik GPIO Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index b3e772390ab6..aea310a91821 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -383,12 +383,26 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
int ret = 0;
u32 pin_reg;
unsigned long flags;
+ bool level_trig;
+ u32 active_level;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ /*
+ * When level_trig is set EDGE and active_level is set HIGH in BIOS
+ * default settings, ignore incoming settings from client and use
+ * BIOS settings to configure GPIO register.
+ */
+ level_trig = !(pin_reg & (LEVEL_TRIGGER << LEVEL_TRIG_OFF));
+ active_level = pin_reg & (ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+
+ if(level_trig &&
+ ((active_level >> ACTIVE_LEVEL_OFF) == ACTIVE_HIGH))
+ type = IRQ_TYPE_EDGE_FALLING;
+
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
pin_reg &= ~BIT(LEVEL_TRIG_OFF);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 80daead3a5a1..9f0904185909 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1614,7 +1614,7 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
&gpio_irqchip,
0,
handle_edge_irq,
- IRQ_TYPE_EDGE_BOTH);
+ IRQ_TYPE_NONE);
if (ret) {
dev_err(&pdev->dev, "at91_gpio.%d: Couldn't add irqchip to gpiochip.\n",
at91_gpio->pioc_idx);
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index 8edb3f8c72c8..a30146da7ffd 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -1004,9 +1004,7 @@ static int palmas_pinctrl_probe(struct platform_device *pdev)
bool enable_dvfs2 = false;
if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_device(palmas_pinctrl_of_match, &pdev->dev);
- pinctrl_data = match->data;
+ pinctrl_data = of_device_get_match_data(&pdev->dev);
enable_dvfs1 = of_property_read_bool(pdev->dev.of_node,
"ti,palmas-enable-dvfs1");
enable_dvfs2 = of_property_read_bool(pdev->dev.of_node,
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 44902c63f507..49bf7dcb7ed8 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -23,7 +23,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/bitops.h>
@@ -2704,7 +2704,6 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = (void *)&rk3399_pin_ctrl },
{},
};
-MODULE_DEVICE_TABLE(of, rockchip_pinctrl_dt_match);
static struct platform_driver rockchip_pinctrl_driver = {
.probe = rockchip_pinctrl_probe,
@@ -2720,7 +2719,3 @@ static int __init rockchip_pinctrl_drv_register(void)
return platform_driver_register(&rockchip_pinctrl_driver);
}
postcore_initcall(rockchip_pinctrl_drv_register);
-
-MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
-MODULE_DESCRIPTION("Rockchip pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 0de1c67dfb94..99da4cf91031 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -335,61 +335,25 @@ struct st_pinctrl {
};
/* SOC specific data */
-/* STiH415 data */
-static const unsigned int stih415_input_delays[] = {0, 500, 1000, 1500};
-static const unsigned int stih415_output_delays[] = {0, 1000, 2000, 3000};
-
-#define STIH415_PCTRL_COMMON_DATA \
- .rt_style = st_retime_style_packed, \
- .input_delays = stih415_input_delays, \
- .ninput_delays = ARRAY_SIZE(stih415_input_delays), \
- .output_delays = stih415_output_delays, \
- .noutput_delays = ARRAY_SIZE(stih415_output_delays)
-
-static const struct st_pctl_data stih415_sbc_data = {
- STIH415_PCTRL_COMMON_DATA,
- .alt = 0, .oe = 5, .pu = 7, .od = 9, .rt = 16,
-};
-
-static const struct st_pctl_data stih415_front_data = {
- STIH415_PCTRL_COMMON_DATA,
- .alt = 0, .oe = 8, .pu = 10, .od = 12, .rt = 16,
-};
-
-static const struct st_pctl_data stih415_rear_data = {
- STIH415_PCTRL_COMMON_DATA,
- .alt = 0, .oe = 6, .pu = 8, .od = 10, .rt = 38,
-};
-
-static const struct st_pctl_data stih415_left_data = {
- STIH415_PCTRL_COMMON_DATA,
- .alt = 0, .oe = 3, .pu = 4, .od = 5, .rt = 6,
-};
-
-static const struct st_pctl_data stih415_right_data = {
- STIH415_PCTRL_COMMON_DATA,
- .alt = 0, .oe = 5, .pu = 7, .od = 9, .rt = 11,
-};
-/* STiH416 data */
-static const unsigned int stih416_delays[] = {0, 300, 500, 750, 1000, 1250,
+static const unsigned int stih407_delays[] = {0, 300, 500, 750, 1000, 1250,
1500, 1750, 2000, 2250, 2500, 2750, 3000, 3250 };
-static const struct st_pctl_data stih416_data = {
- .rt_style = st_retime_style_dedicated,
- .input_delays = stih416_delays,
- .ninput_delays = ARRAY_SIZE(stih416_delays),
- .output_delays = stih416_delays,
- .noutput_delays = ARRAY_SIZE(stih416_delays),
+static const struct st_pctl_data stih407_data = {
+ .rt_style = st_retime_style_dedicated,
+ .input_delays = stih407_delays,
+ .ninput_delays = ARRAY_SIZE(stih407_delays),
+ .output_delays = stih407_delays,
+ .noutput_delays = ARRAY_SIZE(stih407_delays),
.alt = 0, .oe = 40, .pu = 50, .od = 60, .rt = 100,
};
static const struct st_pctl_data stih407_flashdata = {
.rt_style = st_retime_style_none,
- .input_delays = stih416_delays,
- .ninput_delays = ARRAY_SIZE(stih416_delays),
- .output_delays = stih416_delays,
- .noutput_delays = ARRAY_SIZE(stih416_delays),
+ .input_delays = stih407_delays,
+ .ninput_delays = ARRAY_SIZE(stih407_delays),
+ .output_delays = stih407_delays,
+ .noutput_delays = ARRAY_SIZE(stih407_delays),
.alt = 0,
.oe = -1, /* Not Available */
.pu = -1, /* Not Available */
@@ -799,21 +763,6 @@ static int st_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
return (direction == ST_GPIO_DIRECTION_IN);
}
-static int st_gpio_xlate(struct gpio_chip *gc,
- const struct of_phandle_args *gpiospec, u32 *flags)
-{
- if (WARN_ON(gc->of_gpio_n_cells < 1))
- return -EINVAL;
-
- if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
- return -EINVAL;
-
- if (gpiospec->args[0] > gc->ngpio)
- return -EINVAL;
-
- return gpiospec->args[0];
-}
-
/* Pinctrl Groups */
static int st_pctl_get_groups_count(struct pinctrl_dev *pctldev)
{
@@ -1486,8 +1435,6 @@ static struct gpio_chip st_gpio_template = {
.direction_output = st_gpio_direction_output,
.get_direction = st_gpio_get_direction,
.ngpio = ST_GPIO_PINS_PER_BANK,
- .of_gpio_n_cells = 1,
- .of_xlate = st_gpio_xlate,
};
static struct irq_chip st_gpio_irqchip = {
@@ -1579,21 +1526,9 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
}
static const struct of_device_id st_pctl_of_match[] = {
- { .compatible = "st,stih415-sbc-pinctrl", .data = &stih415_sbc_data },
- { .compatible = "st,stih415-rear-pinctrl", .data = &stih415_rear_data },
- { .compatible = "st,stih415-left-pinctrl", .data = &stih415_left_data },
- { .compatible = "st,stih415-right-pinctrl",
- .data = &stih415_right_data },
- { .compatible = "st,stih415-front-pinctrl",
- .data = &stih415_front_data },
- { .compatible = "st,stih416-sbc-pinctrl", .data = &stih416_data},
- { .compatible = "st,stih416-front-pinctrl", .data = &stih416_data},
- { .compatible = "st,stih416-rear-pinctrl", .data = &stih416_data},
- { .compatible = "st,stih416-fvdp-fe-pinctrl", .data = &stih416_data},
- { .compatible = "st,stih416-fvdp-lite-pinctrl", .data = &stih416_data},
- { .compatible = "st,stih407-sbc-pinctrl", .data = &stih416_data},
- { .compatible = "st,stih407-front-pinctrl", .data = &stih416_data},
- { .compatible = "st,stih407-rear-pinctrl", .data = &stih416_data},
+ { .compatible = "st,stih407-sbc-pinctrl", .data = &stih407_data},
+ { .compatible = "st,stih407-front-pinctrl", .data = &stih407_data},
+ { .compatible = "st,stih407-rear-pinctrl", .data = &stih407_data},
{ .compatible = "st,stih407-flash-pinctrl", .data = &stih407_flashdata},
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 7afdbede6823..e0ecffcbe11f 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -233,7 +233,7 @@ static const unsigned int sdio0_2_pins[] = {40, 41, 42, 43, 44, 45};
static const unsigned int sdio1_0_pins[] = {10, 11, 12, 13, 14, 15};
static const unsigned int sdio1_1_pins[] = {22, 23, 24, 25, 26, 27};
static const unsigned int sdio1_2_pins[] = {34, 35, 36, 37, 38, 39};
-static const unsigned int sdio1_3_pins[] = {46, 47, 48, 49, 40, 51};
+static const unsigned int sdio1_3_pins[] = {46, 47, 48, 49, 50, 51};
static const unsigned int sdio0_emio_wp_pins[] = {54};
static const unsigned int sdio0_emio_cd_pins[] = {55};
static const unsigned int sdio1_emio_wp_pins[] = {56};
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 51c42d746883..775c88303017 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -156,7 +156,7 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
- val &= mask;
+ val &= ~mask;
val |= i << g->mux_bit;
writel(val, pctrl->regs + g->ctl_reg);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 686accb89f52..664b641fd776 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -800,6 +800,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8941-gpio" }, /* 36 GPIO's */
{ .compatible = "qcom,pm8994-gpio" }, /* 22 GPIO's */
{ .compatible = "qcom,pma8084-gpio" }, /* 22 GPIO's */
+ { .compatible = "qcom,spmi-gpio" }, /* Generic */
{ },
};
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 1735ffef9d5c..6556dbeae65e 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -914,6 +914,7 @@ static const struct of_device_id pmic_mpp_of_match[] = {
{ .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,pm8994-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,pma8084-mpp" }, /* 8 MPP's */
+ { .compatible = "qcom,spmi-mpp" }, /* Generic */
{ },
};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 051b5bf701a8..d32fa2b5ff82 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -428,14 +428,10 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
int eint_irq;
chained_irq_enter(chip, desc);
- chip->irq_mask(&desc->irq_data);
-
- if (chip->irq_ack)
- chip->irq_ack(&desc->irq_data);
eint_irq = irq_linear_revmap(bank->irq_domain, eintd->irq);
generic_handle_irq(eint_irq);
- chip->irq_unmask(&desc->irq_data);
+
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 513fe6b23248..620727fabe64 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1041,17 +1041,9 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- if (!dev->of_node) {
- dev_err(dev, "device tree node not found\n");
- return -ENODEV;
- }
-
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata) {
- dev_err(dev, "failed to allocate memory for driver's "
- "private data\n");
+ if (!drvdata)
return -ENOMEM;
- }
ctrl = samsung_pinctrl_get_soc_data(drvdata, pdev);
if (IS_ERR(ctrl)) {
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 415dd8023063..07eca54bdc1c 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -54,6 +54,11 @@ config PINCTRL_PFC_R8A7791
depends on ARCH_R8A7791
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A7792
+ def_bool y
+ depends on ARCH_R8A7792
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7793
def_bool y
depends on ARCH_R8A7793
@@ -69,6 +74,11 @@ config PINCTRL_PFC_R8A7795
depends on ARCH_R8A7795
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A7796
+ def_bool y
+ depends on ARCH_R8A7796
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_SH7203
def_bool y
depends on CPU_SUBTYPE_SH7203
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 8a2c8710fc93..2dda8c63f3cf 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -7,9 +7,11 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o
obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
obj-$(CONFIG_PINCTRL_PFC_R8A7790) += pfc-r8a7790.o
obj-$(CONFIG_PINCTRL_PFC_R8A7791) += pfc-r8a7791.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7792) += pfc-r8a7792.o
obj-$(CONFIG_PINCTRL_PFC_R8A7793) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7794) += pfc-r8a7794.o
obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7796) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index a3b82041b6a2..f3a8897d4e8f 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -494,6 +494,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7791_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7792
+ {
+ .compatible = "renesas,pfc-r8a7792",
+ .data = &r8a7792_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A7793
{
.compatible = "renesas,pfc-r8a7793",
@@ -512,6 +518,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7795_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7796
+ {
+ .compatible = "renesas,pfc-r8a7796",
+ .data = &r8a7796_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_SH73A0
{
.compatible = "renesas,pfc-sh73a0",
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 0c1a60c9a844..7ca37c3019ab 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -13,6 +13,10 @@
#include "sh_pfc.h"
+/*
+ * Pins 0-23 assigned to GPIO bank 6 can be used for SD interfaces in
+ * which case they support both 3.3V and 1.8V signalling.
+ */
#define CPU_ALL_PORT(fn, sfx) \
PORT_GP_32(0, fn, sfx), \
PORT_GP_26(1, fn, sfx), \
@@ -20,7 +24,15 @@
PORT_GP_32(3, fn, sfx), \
PORT_GP_32(4, fn, sfx), \
PORT_GP_32(5, fn, sfx), \
- PORT_GP_32(6, fn, sfx), \
+ PORT_GP_CFG_24(6, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_1(6, 24, fn, sfx), \
+ PORT_GP_1(6, 25, fn, sfx), \
+ PORT_GP_1(6, 26, fn, sfx), \
+ PORT_GP_1(6, 27, fn, sfx), \
+ PORT_GP_1(6, 28, fn, sfx), \
+ PORT_GP_1(6, 29, fn, sfx), \
+ PORT_GP_1(6, 30, fn, sfx), \
+ PORT_GP_1(6, 31, fn, sfx), \
PORT_GP_26(7, fn, sfx)
enum {
@@ -6404,9 +6416,24 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
+static int r8a7791_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
+{
+ if (pin < RCAR_GP_PIN(6, 0) || pin > RCAR_GP_PIN(6, 23))
+ return -EINVAL;
+
+ *pocctrl = 0xe606008c;
+
+ return 31 - (pin & 0x1f);
+}
+
+static const struct sh_pfc_soc_operations r8a7791_pinmux_ops = {
+ .pin_to_pocctrl = r8a7791_pin_to_pocctrl,
+};
+
#ifdef CONFIG_PINCTRL_PFC_R8A7791
const struct sh_pfc_soc_info r8a7791_pinmux_info = {
.name = "r8a77910_pfc",
+ .ops = &r8a7791_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
new file mode 100644
index 000000000000..21badb6166b9
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
@@ -0,0 +1,2739 @@
+/*
+ * r8a7792 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2016 Cogent Embedded, Inc., <source@cogentembedded.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+
+#include "core.h"
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, sfx) \
+ PORT_GP_29(0, fn, sfx), \
+ PORT_GP_23(1, fn, sfx), \
+ PORT_GP_32(2, fn, sfx), \
+ PORT_GP_28(3, fn, sfx), \
+ PORT_GP_17(4, fn, sfx), \
+ PORT_GP_17(5, fn, sfx), \
+ PORT_GP_17(6, fn, sfx), \
+ PORT_GP_17(7, fn, sfx), \
+ PORT_GP_17(8, fn, sfx), \
+ PORT_GP_17(9, fn, sfx), \
+ PORT_GP_32(10, fn, sfx), \
+ PORT_GP_30(11, fn, sfx)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+
+ /* GPSR0 */
+ FN_IP0_0, FN_IP0_1, FN_IP0_2, FN_IP0_3, FN_IP0_4, FN_IP0_5,
+ FN_IP0_6, FN_IP0_7, FN_IP0_8, FN_IP0_9, FN_IP0_10, FN_IP0_11,
+ FN_IP0_12, FN_IP0_13, FN_IP0_14, FN_IP0_15, FN_IP0_16,
+ FN_IP0_17, FN_IP0_18, FN_IP0_19, FN_IP0_20, FN_IP0_21,
+ FN_IP0_22, FN_IP0_23, FN_IP1_0, FN_IP1_1, FN_IP1_2,
+ FN_IP1_3, FN_IP1_4,
+
+ /* GPSR1 */
+ FN_IP1_5, FN_IP1_6, FN_IP1_7, FN_IP1_8, FN_IP1_9, FN_IP1_10,
+ FN_IP1_11, FN_IP1_12, FN_IP1_13, FN_IP1_14, FN_IP1_15, FN_IP1_16,
+ FN_DU1_DB2_C0_DATA12, FN_DU1_DB3_C1_DATA13, FN_DU1_DB4_C2_DATA14,
+ FN_DU1_DB5_C3_DATA15, FN_DU1_DB6_C4, FN_DU1_DB7_C5,
+ FN_DU1_EXHSYNC_DU1_HSYNC, FN_DU1_EXVSYNC_DU1_VSYNC,
+ FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_DU1_DISP, FN_DU1_CDE,
+
+ /* GPSR2 */
+ FN_D0, FN_D1, FN_D2, FN_D3, FN_D4, FN_D5, FN_D6, FN_D7,
+ FN_D8, FN_D9, FN_D10, FN_D11, FN_D12, FN_D13, FN_D14, FN_D15,
+ FN_A0, FN_A1, FN_A2, FN_A3, FN_A4, FN_A5, FN_A6, FN_A7,
+ FN_A8, FN_A9, FN_A10, FN_A11, FN_A12, FN_A13, FN_A14, FN_A15,
+
+ /* GPSR3 */
+ FN_A16, FN_A17, FN_A18, FN_A19, FN_IP1_17, FN_IP1_18,
+ FN_CS1_N_A26, FN_EX_CS0_N, FN_EX_CS1_N, FN_EX_CS2_N, FN_EX_CS3_N,
+ FN_EX_CS4_N, FN_EX_CS5_N, FN_BS_N, FN_RD_N, FN_RD_WR_N,
+ FN_WE0_N, FN_WE1_N, FN_EX_WAIT0, FN_IRQ0, FN_IRQ1, FN_IRQ2, FN_IRQ3,
+ FN_IP1_19, FN_IP1_20, FN_IP1_21, FN_IP1_22, FN_CS0_N,
+
+ /* GPSR4 */
+ FN_VI0_CLK, FN_VI0_CLKENB, FN_VI0_HSYNC_N, FN_VI0_VSYNC_N,
+ FN_VI0_D0_B0_C0, FN_VI0_D1_B1_C1, FN_VI0_D2_B2_C2, FN_VI0_D3_B3_C3,
+ FN_VI0_D4_B4_C4, FN_VI0_D5_B5_C5, FN_VI0_D6_B6_C6, FN_VI0_D7_B7_C7,
+ FN_VI0_D8_G0_Y0, FN_VI0_D9_G1_Y1, FN_VI0_D10_G2_Y2, FN_VI0_D11_G3_Y3,
+ FN_VI0_FIELD,
+
+ /* GPSR5 */
+ FN_VI1_CLK, FN_VI1_CLKENB, FN_VI1_HSYNC_N, FN_VI1_VSYNC_N,
+ FN_VI1_D0_B0_C0, FN_VI1_D1_B1_C1, FN_VI1_D2_B2_C2, FN_VI1_D3_B3_C3,
+ FN_VI1_D4_B4_C4, FN_VI1_D5_B5_C5, FN_VI1_D6_B6_C6, FN_VI1_D7_B7_C7,
+ FN_VI1_D8_G0_Y0, FN_VI1_D9_G1_Y1, FN_VI1_D10_G2_Y2, FN_VI1_D11_G3_Y3,
+ FN_VI1_FIELD,
+
+ /* GPSR6 */
+ FN_IP2_0, FN_IP2_1, FN_IP2_2, FN_IP2_3, FN_IP2_4, FN_IP2_5, FN_IP2_6,
+ FN_IP2_7, FN_IP2_8, FN_IP2_9, FN_IP2_10, FN_IP2_11, FN_IP2_12,
+ FN_IP2_13, FN_IP2_14, FN_IP2_15, FN_IP2_16,
+
+ /* GPSR7 */
+ FN_IP3_0, FN_IP3_1, FN_IP3_2, FN_IP3_3, FN_IP3_4, FN_IP3_5, FN_IP3_6,
+ FN_IP3_7, FN_IP3_8, FN_IP3_9, FN_IP3_10, FN_IP3_11, FN_IP3_12,
+ FN_IP3_13, FN_VI3_D10_Y2, FN_IP3_14, FN_VI3_FIELD,
+
+ /* GPSR8 */
+ FN_VI4_CLK, FN_IP4_0, FN_IP4_1, FN_IP4_3_2, FN_IP4_4, FN_IP4_6_5,
+ FN_IP4_8_7, FN_IP4_10_9, FN_IP4_12_11, FN_IP4_14_13, FN_IP4_16_15,
+ FN_IP4_18_17, FN_IP4_20_19, FN_IP4_21, FN_IP4_22, FN_IP4_23, FN_IP4_24,
+
+ /* GPSR9 */
+ FN_VI5_CLK, FN_IP5_0, FN_IP5_1, FN_IP5_2, FN_IP5_3, FN_IP5_4, FN_IP5_5,
+ FN_IP5_6, FN_IP5_7, FN_IP5_8, FN_IP5_9, FN_IP5_10, FN_IP5_11,
+ FN_VI5_D9_Y1, FN_VI5_D10_Y2, FN_VI5_D11_Y3, FN_VI5_FIELD,
+
+ /* GPSR10 */
+ FN_IP6_0, FN_IP6_1, FN_HRTS0_N, FN_IP6_2, FN_IP6_3, FN_IP6_4, FN_IP6_5,
+ FN_HCTS1_N, FN_IP6_6, FN_IP6_7, FN_SCK0, FN_CTS0_N, FN_RTS0_N,
+ FN_TX0, FN_RX0, FN_SCK1, FN_CTS1_N, FN_RTS1_N, FN_TX1, FN_RX1,
+ FN_IP6_9_8, FN_IP6_11_10, FN_IP6_13_12, FN_IP6_15_14, FN_IP6_16,
+ FN_IP6_18_17, FN_SCIF_CLK, FN_CAN0_TX, FN_CAN0_RX, FN_CAN_CLK,
+ FN_CAN1_TX, FN_CAN1_RX,
+
+ /* GPSR11 */
+ FN_IP7_1_0, FN_IP7_3_2, FN_IP7_5_4, FN_IP7_6, FN_IP7_7, FN_SD0_CLK,
+ FN_SD0_CMD, FN_SD0_DAT0, FN_SD0_DAT1, FN_SD0_DAT2, FN_SD0_DAT3,
+ FN_SD0_CD, FN_SD0_WP, FN_IP7_9_8, FN_IP7_11_10, FN_IP7_13_12,
+ FN_IP7_15_14, FN_IP7_16, FN_IP7_17, FN_IP7_18, FN_IP7_19, FN_IP7_20,
+ FN_ADICLK, FN_ADICS_SAMP, FN_ADIDATA, FN_ADICHS0, FN_ADICHS1,
+ FN_ADICHS2, FN_AVS1, FN_AVS2,
+
+ /* IPSR0 */
+ FN_DU0_DR0_DATA0, FN_DU0_DR1_DATA1, FN_DU0_DR2_Y4_DATA2,
+ FN_DU0_DR3_Y5_DATA3, FN_DU0_DR4_Y6_DATA4, FN_DU0_DR5_Y7_DATA5,
+ FN_DU0_DR6_Y8_DATA6, FN_DU0_DR7_Y9_DATA7, FN_DU0_DG0_DATA8,
+ FN_DU0_DG1_DATA9, FN_DU0_DG2_C6_DATA10, FN_DU0_DG3_C7_DATA11,
+ FN_DU0_DG4_Y0_DATA12, FN_DU0_DG5_Y1_DATA13, FN_DU0_DG6_Y2_DATA14,
+ FN_DU0_DG7_Y3_DATA15, FN_DU0_DB0, FN_DU0_DB1, FN_DU0_DB2_C0,
+ FN_DU0_DB3_C1, FN_DU0_DB4_C2, FN_DU0_DB5_C3, FN_DU0_DB6_C4,
+ FN_DU0_DB7_C5,
+
+ /* IPSR1 */
+ FN_DU0_EXHSYNC_DU0_HSYNC, FN_DU0_EXVSYNC_DU0_VSYNC,
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_DU0_DISP, FN_DU0_CDE,
+ FN_DU1_DR2_Y4_DATA0, FN_DU1_DR3_Y5_DATA1, FN_DU1_DR4_Y6_DATA2,
+ FN_DU1_DR5_Y7_DATA3, FN_DU1_DR6_DATA4, FN_DU1_DR7_DATA5,
+ FN_DU1_DG2_C6_DATA6, FN_DU1_DG3_C7_DATA7, FN_DU1_DG4_Y0_DATA8,
+ FN_DU1_DG5_Y1_DATA9, FN_DU1_DG6_Y2_DATA10, FN_DU1_DG7_Y3_DATA11,
+ FN_A20, FN_MOSI_IO0, FN_A21, FN_MISO_IO1, FN_A22, FN_IO2,
+ FN_A23, FN_IO3, FN_A24, FN_SPCLK, FN_A25, FN_SSL,
+
+ /* IPSR2 */
+ FN_VI2_CLK, FN_AVB_RX_CLK, FN_VI2_CLKENB, FN_AVB_RX_DV,
+ FN_VI2_HSYNC_N, FN_AVB_RXD0, FN_VI2_VSYNC_N, FN_AVB_RXD1,
+ FN_VI2_D0_C0, FN_AVB_RXD2, FN_VI2_D1_C1, FN_AVB_RXD3,
+ FN_VI2_D2_C2, FN_AVB_RXD4, FN_VI2_D3_C3, FN_AVB_RXD5,
+ FN_VI2_D4_C4, FN_AVB_RXD6, FN_VI2_D5_C5, FN_AVB_RXD7,
+ FN_VI2_D6_C6, FN_AVB_RX_ER, FN_VI2_D7_C7, FN_AVB_COL,
+ FN_VI2_D8_Y0, FN_AVB_TXD3, FN_VI2_D9_Y1, FN_AVB_TX_EN,
+ FN_VI2_D10_Y2, FN_AVB_TXD0, FN_VI2_D11_Y3, FN_AVB_TXD1,
+ FN_VI2_FIELD, FN_AVB_TXD2,
+
+ /* IPSR3 */
+ FN_VI3_CLK, FN_AVB_TX_CLK, FN_VI3_CLKENB, FN_AVB_TXD4,
+ FN_VI3_HSYNC_N, FN_AVB_TXD5, FN_VI3_VSYNC_N, FN_AVB_TXD6,
+ FN_VI3_D0_C0, FN_AVB_TXD7, FN_VI3_D1_C1, FN_AVB_TX_ER,
+ FN_VI3_D2_C2, FN_AVB_GTX_CLK, FN_VI3_D3_C3, FN_AVB_MDC,
+ FN_VI3_D4_C4, FN_AVB_MDIO, FN_VI3_D5_C5, FN_AVB_LINK,
+ FN_VI3_D6_C6, FN_AVB_MAGIC, FN_VI3_D7_C7, FN_AVB_PHY_INT,
+ FN_VI3_D8_Y0, FN_AVB_CRS, FN_VI3_D9_Y1, FN_AVB_GTXREFCLK,
+ FN_VI3_D11_Y3, FN_AVB_AVTP_MATCH,
+
+ /* IPSR4 */
+ FN_VI4_CLKENB, FN_VI0_D12_G4_Y4, FN_VI4_HSYNC_N, FN_VI0_D13_G5_Y5,
+ FN_VI4_VSYNC_N, FN_VI0_D14_G6_Y6, FN_RDR_CLKOUT,
+ FN_VI4_D0_C0, FN_VI0_D15_G7_Y7,
+ FN_VI4_D1_C1, FN_VI0_D16_R0, FN_VI1_D12_G4_Y4,
+ FN_VI4_D2_C2, FN_VI0_D17_R1, FN_VI1_D13_G5_Y5,
+ FN_VI4_D3_C3, FN_VI0_D18_R2, FN_VI1_D14_G6_Y6,
+ FN_VI4_D4_C4, FN_VI0_D19_R3, FN_VI1_D15_G7_Y7,
+ FN_VI4_D5_C5, FN_VI0_D20_R4, FN_VI2_D12_Y4,
+ FN_VI4_D6_C6, FN_VI0_D21_R5, FN_VI2_D13_Y5,
+ FN_VI4_D7_C7, FN_VI0_D22_R6, FN_VI2_D14_Y6,
+ FN_VI4_D8_Y0, FN_VI0_D23_R7, FN_VI2_D15_Y7,
+ FN_VI4_D9_Y1, FN_VI3_D12_Y4, FN_VI4_D10_Y2, FN_VI3_D13_Y5,
+ FN_VI4_D11_Y3, FN_VI3_D14_Y6, FN_VI4_FIELD, FN_VI3_D15_Y7,
+
+ /* IPSR5 */
+ FN_VI5_CLKENB, FN_VI1_D12_G4_Y4_B, FN_VI5_HSYNC_N, FN_VI1_D13_G5_Y5_B,
+ FN_VI5_VSYNC_N, FN_VI1_D14_G6_Y6_B, FN_VI5_D0_C0, FN_VI1_D15_G7_Y7_B,
+ FN_VI5_D1_C1, FN_VI1_D16_R0, FN_VI5_D2_C2, FN_VI1_D17_R1,
+ FN_VI5_D3_C3, FN_VI1_D18_R2, FN_VI5_D4_C4, FN_VI1_D19_R3,
+ FN_VI5_D5_C5, FN_VI1_D20_R4, FN_VI5_D6_C6, FN_VI1_D21_R5,
+ FN_VI5_D7_C7, FN_VI1_D22_R6, FN_VI5_D8_Y0, FN_VI1_D23_R7,
+
+ /* IPSR6 */
+ FN_MSIOF0_SCK, FN_HSCK0, FN_MSIOF0_SYNC, FN_HCTS0_N,
+ FN_MSIOF0_TXD, FN_HTX0, FN_MSIOF0_RXD, FN_HRX0,
+ FN_MSIOF1_SCK, FN_HSCK1, FN_MSIOF1_SYNC, FN_HRTS1_N,
+ FN_MSIOF1_TXD, FN_HTX1, FN_MSIOF1_RXD, FN_HRX1,
+ FN_DRACK0, FN_SCK2, FN_DACK0, FN_TX2, FN_DREQ0_N, FN_RX2,
+ FN_DACK1, FN_SCK3, FN_TX3, FN_DREQ1_N, FN_RX3,
+
+ /* IPSR7 */
+ FN_PWM0, FN_TCLK1, FN_FSO_CFE_0, FN_PWM1, FN_TCLK2, FN_FSO_CFE_1,
+ FN_PWM2, FN_TCLK3, FN_FSO_TOE, FN_PWM3, FN_PWM4,
+ FN_SSI_SCK34, FN_TPU0TO0, FN_SSI_WS34, FN_TPU0TO1,
+ FN_SSI_SDATA3, FN_TPU0TO2, FN_SSI_SCK4, FN_TPU0TO3,
+ FN_SSI_WS4, FN_SSI_SDATA4, FN_AUDIO_CLKOUT,
+ FN_AUDIO_CLKA, FN_AUDIO_CLKB,
+
+ /* MOD_SEL */
+ FN_SEL_VI1_0, FN_SEL_VI1_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ DU1_DB2_C0_DATA12_MARK, DU1_DB3_C1_DATA13_MARK,
+ DU1_DB4_C2_DATA14_MARK, DU1_DB5_C3_DATA15_MARK,
+ DU1_DB6_C4_MARK, DU1_DB7_C5_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK,
+ DU1_EXVSYNC_DU1_VSYNC_MARK, DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK,
+ DU1_DISP_MARK, DU1_CDE_MARK,
+
+ D0_MARK, D1_MARK, D2_MARK, D3_MARK, D4_MARK, D5_MARK, D6_MARK,
+ D7_MARK, D8_MARK, D9_MARK, D10_MARK, D11_MARK, D12_MARK, D13_MARK,
+ D14_MARK, D15_MARK, A0_MARK, A1_MARK, A2_MARK, A3_MARK, A4_MARK,
+ A5_MARK, A6_MARK, A7_MARK, A8_MARK, A9_MARK, A10_MARK, A11_MARK,
+ A12_MARK, A13_MARK, A14_MARK, A15_MARK,
+
+ A16_MARK, A17_MARK, A18_MARK, A19_MARK, CS1_N_A26_MARK,
+ EX_CS0_N_MARK, EX_CS1_N_MARK, EX_CS2_N_MARK, EX_CS3_N_MARK,
+ EX_CS4_N_MARK, EX_CS5_N_MARK, BS_N_MARK, RD_N_MARK, RD_WR_N_MARK,
+ WE0_N_MARK, WE1_N_MARK, EX_WAIT0_MARK,
+ IRQ0_MARK, IRQ1_MARK, IRQ2_MARK, IRQ3_MARK, CS0_N_MARK,
+
+ VI0_CLK_MARK, VI0_CLKENB_MARK, VI0_HSYNC_N_MARK, VI0_VSYNC_N_MARK,
+ VI0_D0_B0_C0_MARK, VI0_D1_B1_C1_MARK, VI0_D2_B2_C2_MARK,
+ VI0_D3_B3_C3_MARK, VI0_D4_B4_C4_MARK, VI0_D5_B5_C5_MARK,
+ VI0_D6_B6_C6_MARK, VI0_D7_B7_C7_MARK, VI0_D8_G0_Y0_MARK,
+ VI0_D9_G1_Y1_MARK, VI0_D10_G2_Y2_MARK, VI0_D11_G3_Y3_MARK,
+ VI0_FIELD_MARK,
+
+ VI1_CLK_MARK, VI1_CLKENB_MARK, VI1_HSYNC_N_MARK, VI1_VSYNC_N_MARK,
+ VI1_D0_B0_C0_MARK, VI1_D1_B1_C1_MARK, VI1_D2_B2_C2_MARK,
+ VI1_D3_B3_C3_MARK, VI1_D4_B4_C4_MARK, VI1_D5_B5_C5_MARK,
+ VI1_D6_B6_C6_MARK, VI1_D7_B7_C7_MARK, VI1_D8_G0_Y0_MARK,
+ VI1_D9_G1_Y1_MARK, VI1_D10_G2_Y2_MARK, VI1_D11_G3_Y3_MARK,
+ VI1_FIELD_MARK,
+
+ VI3_D10_Y2_MARK, VI3_FIELD_MARK,
+
+ VI4_CLK_MARK,
+
+ VI5_CLK_MARK, VI5_D9_Y1_MARK, VI5_D10_Y2_MARK, VI5_D11_Y3_MARK,
+ VI5_FIELD_MARK,
+
+ HRTS0_N_MARK, HCTS1_N_MARK, SCK0_MARK, CTS0_N_MARK, RTS0_N_MARK,
+ TX0_MARK, RX0_MARK, SCK1_MARK, CTS1_N_MARK, RTS1_N_MARK,
+ TX1_MARK, RX1_MARK, SCIF_CLK_MARK, CAN0_TX_MARK, CAN0_RX_MARK,
+ CAN_CLK_MARK, CAN1_TX_MARK, CAN1_RX_MARK,
+
+ SD0_CLK_MARK, SD0_CMD_MARK, SD0_DAT0_MARK, SD0_DAT1_MARK,
+ SD0_DAT2_MARK, SD0_DAT3_MARK, SD0_CD_MARK, SD0_WP_MARK,
+ ADICLK_MARK, ADICS_SAMP_MARK, ADIDATA_MARK, ADICHS0_MARK,
+ ADICHS1_MARK, ADICHS2_MARK, AVS1_MARK, AVS2_MARK,
+
+ /* IPSR0 */
+ DU0_DR0_DATA0_MARK, DU0_DR1_DATA1_MARK, DU0_DR2_Y4_DATA2_MARK,
+ DU0_DR3_Y5_DATA3_MARK, DU0_DR4_Y6_DATA4_MARK, DU0_DR5_Y7_DATA5_MARK,
+ DU0_DR6_Y8_DATA6_MARK, DU0_DR7_Y9_DATA7_MARK, DU0_DG0_DATA8_MARK,
+ DU0_DG1_DATA9_MARK, DU0_DG2_C6_DATA10_MARK, DU0_DG3_C7_DATA11_MARK,
+ DU0_DG4_Y0_DATA12_MARK, DU0_DG5_Y1_DATA13_MARK, DU0_DG6_Y2_DATA14_MARK,
+ DU0_DG7_Y3_DATA15_MARK, DU0_DB0_MARK, DU0_DB1_MARK,
+ DU0_DB2_C0_MARK, DU0_DB3_C1_MARK, DU0_DB4_C2_MARK, DU0_DB5_C3_MARK,
+ DU0_DB6_C4_MARK, DU0_DB7_C5_MARK,
+
+ /* IPSR1 */
+ DU0_EXHSYNC_DU0_HSYNC_MARK, DU0_EXVSYNC_DU0_VSYNC_MARK,
+ DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK, DU0_DISP_MARK, DU0_CDE_MARK,
+ DU1_DR2_Y4_DATA0_MARK, DU1_DR3_Y5_DATA1_MARK, DU1_DR4_Y6_DATA2_MARK,
+ DU1_DR5_Y7_DATA3_MARK, DU1_DR6_DATA4_MARK, DU1_DR7_DATA5_MARK,
+ DU1_DG2_C6_DATA6_MARK, DU1_DG3_C7_DATA7_MARK, DU1_DG4_Y0_DATA8_MARK,
+ DU1_DG5_Y1_DATA9_MARK, DU1_DG6_Y2_DATA10_MARK, DU1_DG7_Y3_DATA11_MARK,
+ A20_MARK, MOSI_IO0_MARK, A21_MARK, MISO_IO1_MARK, A22_MARK, IO2_MARK,
+ A23_MARK, IO3_MARK, A24_MARK, SPCLK_MARK, A25_MARK, SSL_MARK,
+
+ /* IPSR2 */
+ VI2_CLK_MARK, AVB_RX_CLK_MARK, VI2_CLKENB_MARK, AVB_RX_DV_MARK,
+ VI2_HSYNC_N_MARK, AVB_RXD0_MARK, VI2_VSYNC_N_MARK, AVB_RXD1_MARK,
+ VI2_D0_C0_MARK, AVB_RXD2_MARK, VI2_D1_C1_MARK, AVB_TX_CLK_MARK,
+ VI2_D2_C2_MARK, AVB_RXD4_MARK, VI2_D3_C3_MARK, AVB_RXD5_MARK,
+ VI2_D4_C4_MARK, AVB_RXD6_MARK, VI2_D5_C5_MARK, AVB_RXD7_MARK,
+ VI2_D6_C6_MARK, AVB_RX_ER_MARK, VI2_D7_C7_MARK, AVB_COL_MARK,
+ VI2_D8_Y0_MARK, AVB_RXD3_MARK, VI2_D9_Y1_MARK, AVB_TX_EN_MARK,
+ VI2_D10_Y2_MARK, AVB_TXD0_MARK,
+ VI2_D11_Y3_MARK, AVB_TXD1_MARK, VI2_FIELD_MARK, AVB_TXD2_MARK,
+
+ /* IPSR3 */
+ VI3_CLK_MARK, AVB_TXD3_MARK, VI3_CLKENB_MARK, AVB_TXD4_MARK,
+ VI3_HSYNC_N_MARK, AVB_TXD5_MARK, VI3_VSYNC_N_MARK, AVB_TXD6_MARK,
+ VI3_D0_C0_MARK, AVB_TXD7_MARK, VI3_D1_C1_MARK, AVB_TX_ER_MARK,
+ VI3_D2_C2_MARK, AVB_GTX_CLK_MARK, VI3_D3_C3_MARK, AVB_MDC_MARK,
+ VI3_D4_C4_MARK, AVB_MDIO_MARK, VI3_D5_C5_MARK, AVB_LINK_MARK,
+ VI3_D6_C6_MARK, AVB_MAGIC_MARK, VI3_D7_C7_MARK, AVB_PHY_INT_MARK,
+ VI3_D8_Y0_MARK, AVB_CRS_MARK, VI3_D9_Y1_MARK, AVB_GTXREFCLK_MARK,
+ VI3_D11_Y3_MARK, AVB_AVTP_MATCH_MARK,
+
+ /* IPSR4 */
+ VI4_CLKENB_MARK, VI0_D12_G4_Y4_MARK, VI4_HSYNC_N_MARK,
+ VI0_D13_G5_Y5_MARK, VI4_VSYNC_N_MARK, VI0_D14_G6_Y6_MARK,
+ RDR_CLKOUT_MARK, VI4_D0_C0_MARK, VI0_D15_G7_Y7_MARK, VI4_D1_C1_MARK,
+ VI0_D16_R0_MARK, VI1_D12_G4_Y4_MARK, VI4_D2_C2_MARK, VI0_D17_R1_MARK,
+ VI1_D13_G5_Y5_MARK, VI4_D3_C3_MARK, VI0_D18_R2_MARK, VI1_D14_G6_Y6_MARK,
+ VI4_D4_C4_MARK, VI0_D19_R3_MARK, VI1_D15_G7_Y7_MARK, VI4_D5_C5_MARK,
+ VI0_D20_R4_MARK, VI2_D12_Y4_MARK, VI4_D6_C6_MARK, VI0_D21_R5_MARK,
+ VI2_D13_Y5_MARK, VI4_D7_C7_MARK, VI0_D22_R6_MARK, VI2_D14_Y6_MARK,
+ VI4_D8_Y0_MARK, VI0_D23_R7_MARK, VI2_D15_Y7_MARK, VI4_D9_Y1_MARK,
+ VI3_D12_Y4_MARK, VI4_D10_Y2_MARK, VI3_D13_Y5_MARK, VI4_D11_Y3_MARK,
+ VI3_D14_Y6_MARK, VI4_FIELD_MARK, VI3_D15_Y7_MARK,
+
+ /* IPSR5 */
+ VI5_CLKENB_MARK, VI1_D12_G4_Y4_B_MARK, VI5_HSYNC_N_MARK,
+ VI1_D13_G5_Y5_B_MARK, VI5_VSYNC_N_MARK, VI1_D14_G6_Y6_B_MARK,
+ VI5_D0_C0_MARK, VI1_D15_G7_Y7_B_MARK, VI5_D1_C1_MARK, VI1_D16_R0_MARK,
+ VI5_D2_C2_MARK, VI1_D17_R1_MARK, VI5_D3_C3_MARK, VI1_D18_R2_MARK,
+ VI5_D4_C4_MARK, VI1_D19_R3_MARK, VI5_D5_C5_MARK, VI1_D20_R4_MARK,
+ VI5_D6_C6_MARK, VI1_D21_R5_MARK, VI5_D7_C7_MARK, VI1_D22_R6_MARK,
+ VI5_D8_Y0_MARK, VI1_D23_R7_MARK,
+
+ /* IPSR6 */
+ MSIOF0_SCK_MARK, HSCK0_MARK, MSIOF0_SYNC_MARK, HCTS0_N_MARK,
+ MSIOF0_TXD_MARK, HTX0_MARK, MSIOF0_RXD_MARK, HRX0_MARK,
+ MSIOF1_SCK_MARK, HSCK1_MARK, MSIOF1_SYNC_MARK, HRTS1_N_MARK,
+ MSIOF1_TXD_MARK, HTX1_MARK, MSIOF1_RXD_MARK, HRX1_MARK,
+ DRACK0_MARK, SCK2_MARK, DACK0_MARK, TX2_MARK, DREQ0_N_MARK,
+ RX2_MARK, DACK1_MARK, SCK3_MARK, TX3_MARK, DREQ1_N_MARK,
+ RX3_MARK,
+
+ /* IPSR7 */
+ PWM0_MARK, TCLK1_MARK, FSO_CFE_0_MARK, PWM1_MARK, TCLK2_MARK,
+ FSO_CFE_1_MARK, PWM2_MARK, TCLK3_MARK, FSO_TOE_MARK, PWM3_MARK,
+ PWM4_MARK, SSI_SCK34_MARK, TPU0TO0_MARK, SSI_WS34_MARK, TPU0TO1_MARK,
+ SSI_SDATA3_MARK, TPU0TO2_MARK, SSI_SCK4_MARK, TPU0TO3_MARK,
+ SSI_WS4_MARK, SSI_SDATA4_MARK, AUDIO_CLKOUT_MARK, AUDIO_CLKA_MARK,
+ AUDIO_CLKB_MARK,
+ PINMUX_MARK_END,
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
+
+ PINMUX_SINGLE(DU1_DB2_C0_DATA12),
+ PINMUX_SINGLE(DU1_DB3_C1_DATA13),
+ PINMUX_SINGLE(DU1_DB4_C2_DATA14),
+ PINMUX_SINGLE(DU1_DB5_C3_DATA15),
+ PINMUX_SINGLE(DU1_DB6_C4),
+ PINMUX_SINGLE(DU1_DB7_C5),
+ PINMUX_SINGLE(DU1_EXHSYNC_DU1_HSYNC),
+ PINMUX_SINGLE(DU1_EXVSYNC_DU1_VSYNC),
+ PINMUX_SINGLE(DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ PINMUX_SINGLE(DU1_DISP),
+ PINMUX_SINGLE(DU1_CDE),
+ PINMUX_SINGLE(D0),
+ PINMUX_SINGLE(D1),
+ PINMUX_SINGLE(D2),
+ PINMUX_SINGLE(D3),
+ PINMUX_SINGLE(D4),
+ PINMUX_SINGLE(D5),
+ PINMUX_SINGLE(D6),
+ PINMUX_SINGLE(D7),
+ PINMUX_SINGLE(D8),
+ PINMUX_SINGLE(D9),
+ PINMUX_SINGLE(D10),
+ PINMUX_SINGLE(D11),
+ PINMUX_SINGLE(D12),
+ PINMUX_SINGLE(D13),
+ PINMUX_SINGLE(D14),
+ PINMUX_SINGLE(D15),
+ PINMUX_SINGLE(A0),
+ PINMUX_SINGLE(A1),
+ PINMUX_SINGLE(A2),
+ PINMUX_SINGLE(A3),
+ PINMUX_SINGLE(A4),
+ PINMUX_SINGLE(A5),
+ PINMUX_SINGLE(A6),
+ PINMUX_SINGLE(A7),
+ PINMUX_SINGLE(A8),
+ PINMUX_SINGLE(A9),
+ PINMUX_SINGLE(A10),
+ PINMUX_SINGLE(A11),
+ PINMUX_SINGLE(A12),
+ PINMUX_SINGLE(A13),
+ PINMUX_SINGLE(A14),
+ PINMUX_SINGLE(A15),
+ PINMUX_SINGLE(A16),
+ PINMUX_SINGLE(A17),
+ PINMUX_SINGLE(A18),
+ PINMUX_SINGLE(A19),
+ PINMUX_SINGLE(CS1_N_A26),
+ PINMUX_SINGLE(EX_CS0_N),
+ PINMUX_SINGLE(EX_CS1_N),
+ PINMUX_SINGLE(EX_CS2_N),
+ PINMUX_SINGLE(EX_CS3_N),
+ PINMUX_SINGLE(EX_CS4_N),
+ PINMUX_SINGLE(EX_CS5_N),
+ PINMUX_SINGLE(BS_N),
+ PINMUX_SINGLE(RD_N),
+ PINMUX_SINGLE(RD_WR_N),
+ PINMUX_SINGLE(WE0_N),
+ PINMUX_SINGLE(WE1_N),
+ PINMUX_SINGLE(EX_WAIT0),
+ PINMUX_SINGLE(IRQ0),
+ PINMUX_SINGLE(IRQ1),
+ PINMUX_SINGLE(IRQ2),
+ PINMUX_SINGLE(IRQ3),
+ PINMUX_SINGLE(CS0_N),
+ PINMUX_SINGLE(VI0_CLK),
+ PINMUX_SINGLE(VI0_CLKENB),
+ PINMUX_SINGLE(VI0_HSYNC_N),
+ PINMUX_SINGLE(VI0_VSYNC_N),
+ PINMUX_SINGLE(VI0_D0_B0_C0),
+ PINMUX_SINGLE(VI0_D1_B1_C1),
+ PINMUX_SINGLE(VI0_D2_B2_C2),
+ PINMUX_SINGLE(VI0_D3_B3_C3),
+ PINMUX_SINGLE(VI0_D4_B4_C4),
+ PINMUX_SINGLE(VI0_D5_B5_C5),
+ PINMUX_SINGLE(VI0_D6_B6_C6),
+ PINMUX_SINGLE(VI0_D7_B7_C7),
+ PINMUX_SINGLE(VI0_D8_G0_Y0),
+ PINMUX_SINGLE(VI0_D9_G1_Y1),
+ PINMUX_SINGLE(VI0_D10_G2_Y2),
+ PINMUX_SINGLE(VI0_D11_G3_Y3),
+ PINMUX_SINGLE(VI0_FIELD),
+ PINMUX_SINGLE(VI1_CLK),
+ PINMUX_SINGLE(VI1_CLKENB),
+ PINMUX_SINGLE(VI1_HSYNC_N),
+ PINMUX_SINGLE(VI1_VSYNC_N),
+ PINMUX_SINGLE(VI1_D0_B0_C0),
+ PINMUX_SINGLE(VI1_D1_B1_C1),
+ PINMUX_SINGLE(VI1_D2_B2_C2),
+ PINMUX_SINGLE(VI1_D3_B3_C3),
+ PINMUX_SINGLE(VI1_D4_B4_C4),
+ PINMUX_SINGLE(VI1_D5_B5_C5),
+ PINMUX_SINGLE(VI1_D6_B6_C6),
+ PINMUX_SINGLE(VI1_D7_B7_C7),
+ PINMUX_SINGLE(VI1_D8_G0_Y0),
+ PINMUX_SINGLE(VI1_D9_G1_Y1),
+ PINMUX_SINGLE(VI1_D10_G2_Y2),
+ PINMUX_SINGLE(VI1_D11_G3_Y3),
+ PINMUX_SINGLE(VI1_FIELD),
+ PINMUX_SINGLE(VI3_D10_Y2),
+ PINMUX_SINGLE(VI3_FIELD),
+ PINMUX_SINGLE(VI4_CLK),
+ PINMUX_SINGLE(VI5_CLK),
+ PINMUX_SINGLE(VI5_D9_Y1),
+ PINMUX_SINGLE(VI5_D10_Y2),
+ PINMUX_SINGLE(VI5_D11_Y3),
+ PINMUX_SINGLE(VI5_FIELD),
+ PINMUX_SINGLE(HRTS0_N),
+ PINMUX_SINGLE(HCTS1_N),
+ PINMUX_SINGLE(SCK0),
+ PINMUX_SINGLE(CTS0_N),
+ PINMUX_SINGLE(RTS0_N),
+ PINMUX_SINGLE(TX0),
+ PINMUX_SINGLE(RX0),
+ PINMUX_SINGLE(SCK1),
+ PINMUX_SINGLE(CTS1_N),
+ PINMUX_SINGLE(RTS1_N),
+ PINMUX_SINGLE(TX1),
+ PINMUX_SINGLE(RX1),
+ PINMUX_SINGLE(SCIF_CLK),
+ PINMUX_SINGLE(CAN0_TX),
+ PINMUX_SINGLE(CAN0_RX),
+ PINMUX_SINGLE(CAN_CLK),
+ PINMUX_SINGLE(CAN1_TX),
+ PINMUX_SINGLE(CAN1_RX),
+ PINMUX_SINGLE(SD0_CLK),
+ PINMUX_SINGLE(SD0_CMD),
+ PINMUX_SINGLE(SD0_DAT0),
+ PINMUX_SINGLE(SD0_DAT1),
+ PINMUX_SINGLE(SD0_DAT2),
+ PINMUX_SINGLE(SD0_DAT3),
+ PINMUX_SINGLE(SD0_CD),
+ PINMUX_SINGLE(SD0_WP),
+ PINMUX_SINGLE(ADICLK),
+ PINMUX_SINGLE(ADICS_SAMP),
+ PINMUX_SINGLE(ADIDATA),
+ PINMUX_SINGLE(ADICHS0),
+ PINMUX_SINGLE(ADICHS1),
+ PINMUX_SINGLE(ADICHS2),
+ PINMUX_SINGLE(AVS1),
+ PINMUX_SINGLE(AVS2),
+
+ /* IPSR0 */
+ PINMUX_IPSR_GPSR(IP0_0, DU0_DR0_DATA0),
+ PINMUX_IPSR_GPSR(IP0_1, DU0_DR1_DATA1),
+ PINMUX_IPSR_GPSR(IP0_2, DU0_DR2_Y4_DATA2),
+ PINMUX_IPSR_GPSR(IP0_3, DU0_DR3_Y5_DATA3),
+ PINMUX_IPSR_GPSR(IP0_4, DU0_DR4_Y6_DATA4),
+ PINMUX_IPSR_GPSR(IP0_5, DU0_DR5_Y7_DATA5),
+ PINMUX_IPSR_GPSR(IP0_6, DU0_DR6_Y8_DATA6),
+ PINMUX_IPSR_GPSR(IP0_7, DU0_DR7_Y9_DATA7),
+ PINMUX_IPSR_GPSR(IP0_8, DU0_DG0_DATA8),
+ PINMUX_IPSR_GPSR(IP0_9, DU0_DG1_DATA9),
+ PINMUX_IPSR_GPSR(IP0_10, DU0_DG2_C6_DATA10),
+ PINMUX_IPSR_GPSR(IP0_11, DU0_DG3_C7_DATA11),
+ PINMUX_IPSR_GPSR(IP0_12, DU0_DG4_Y0_DATA12),
+ PINMUX_IPSR_GPSR(IP0_13, DU0_DG5_Y1_DATA13),
+ PINMUX_IPSR_GPSR(IP0_14, DU0_DG6_Y2_DATA14),
+ PINMUX_IPSR_GPSR(IP0_15, DU0_DG7_Y3_DATA15),
+ PINMUX_IPSR_GPSR(IP0_16, DU0_DB0),
+ PINMUX_IPSR_GPSR(IP0_17, DU0_DB1),
+ PINMUX_IPSR_GPSR(IP0_18, DU0_DB2_C0),
+ PINMUX_IPSR_GPSR(IP0_19, DU0_DB3_C1),
+ PINMUX_IPSR_GPSR(IP0_20, DU0_DB4_C2),
+ PINMUX_IPSR_GPSR(IP0_21, DU0_DB5_C3),
+ PINMUX_IPSR_GPSR(IP0_22, DU0_DB6_C4),
+ PINMUX_IPSR_GPSR(IP0_23, DU0_DB7_C5),
+
+ /* IPSR1 */
+ PINMUX_IPSR_GPSR(IP1_0, DU0_EXHSYNC_DU0_HSYNC),
+ PINMUX_IPSR_GPSR(IP1_1, DU0_EXVSYNC_DU0_VSYNC),
+ PINMUX_IPSR_GPSR(IP1_2, DU0_EXODDF_DU0_ODDF_DISP_CDE),
+ PINMUX_IPSR_GPSR(IP1_3, DU0_DISP),
+ PINMUX_IPSR_GPSR(IP1_4, DU0_CDE),
+ PINMUX_IPSR_GPSR(IP1_5, DU1_DR2_Y4_DATA0),
+ PINMUX_IPSR_GPSR(IP1_6, DU1_DR3_Y5_DATA1),
+ PINMUX_IPSR_GPSR(IP1_7, DU1_DR4_Y6_DATA2),
+ PINMUX_IPSR_GPSR(IP1_8, DU1_DR5_Y7_DATA3),
+ PINMUX_IPSR_GPSR(IP1_9, DU1_DR6_DATA4),
+ PINMUX_IPSR_GPSR(IP1_10, DU1_DR7_DATA5),
+ PINMUX_IPSR_GPSR(IP1_11, DU1_DG2_C6_DATA6),
+ PINMUX_IPSR_GPSR(IP1_12, DU1_DG3_C7_DATA7),
+ PINMUX_IPSR_GPSR(IP1_13, DU1_DG4_Y0_DATA8),
+ PINMUX_IPSR_GPSR(IP1_14, DU1_DG5_Y1_DATA9),
+ PINMUX_IPSR_GPSR(IP1_15, DU1_DG6_Y2_DATA10),
+ PINMUX_IPSR_GPSR(IP1_16, DU1_DG7_Y3_DATA11),
+ PINMUX_IPSR_GPSR(IP1_17, A20),
+ PINMUX_IPSR_GPSR(IP1_17, MOSI_IO0),
+ PINMUX_IPSR_GPSR(IP1_18, A21),
+ PINMUX_IPSR_GPSR(IP1_18, MISO_IO1),
+ PINMUX_IPSR_GPSR(IP1_19, A22),
+ PINMUX_IPSR_GPSR(IP1_19, IO2),
+ PINMUX_IPSR_GPSR(IP1_20, A23),
+ PINMUX_IPSR_GPSR(IP1_20, IO3),
+ PINMUX_IPSR_GPSR(IP1_21, A24),
+ PINMUX_IPSR_GPSR(IP1_21, SPCLK),
+ PINMUX_IPSR_GPSR(IP1_22, A25),
+ PINMUX_IPSR_GPSR(IP1_22, SSL),
+
+ /* IPSR2 */
+ PINMUX_IPSR_GPSR(IP2_0, VI2_CLK),
+ PINMUX_IPSR_GPSR(IP2_0, AVB_RX_CLK),
+ PINMUX_IPSR_GPSR(IP2_1, VI2_CLKENB),
+ PINMUX_IPSR_GPSR(IP2_1, AVB_RX_DV),
+ PINMUX_IPSR_GPSR(IP2_2, VI2_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP2_2, AVB_RXD0),
+ PINMUX_IPSR_GPSR(IP2_3, VI2_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP2_3, AVB_RXD1),
+ PINMUX_IPSR_GPSR(IP2_4, VI2_D0_C0),
+ PINMUX_IPSR_GPSR(IP2_4, AVB_RXD2),
+ PINMUX_IPSR_GPSR(IP2_5, VI2_D1_C1),
+ PINMUX_IPSR_GPSR(IP2_5, AVB_RXD3),
+ PINMUX_IPSR_GPSR(IP2_6, VI2_D2_C2),
+ PINMUX_IPSR_GPSR(IP2_6, AVB_RXD4),
+ PINMUX_IPSR_GPSR(IP2_7, VI2_D3_C3),
+ PINMUX_IPSR_GPSR(IP2_7, AVB_RXD5),
+ PINMUX_IPSR_GPSR(IP2_8, VI2_D4_C4),
+ PINMUX_IPSR_GPSR(IP2_8, AVB_RXD6),
+ PINMUX_IPSR_GPSR(IP2_9, VI2_D5_C5),
+ PINMUX_IPSR_GPSR(IP2_9, AVB_RXD7),
+ PINMUX_IPSR_GPSR(IP2_10, VI2_D6_C6),
+ PINMUX_IPSR_GPSR(IP2_10, AVB_RX_ER),
+ PINMUX_IPSR_GPSR(IP2_11, VI2_D7_C7),
+ PINMUX_IPSR_GPSR(IP2_11, AVB_COL),
+ PINMUX_IPSR_GPSR(IP2_12, VI2_D8_Y0),
+ PINMUX_IPSR_GPSR(IP2_12, AVB_TXD3),
+ PINMUX_IPSR_GPSR(IP2_13, VI2_D9_Y1),
+ PINMUX_IPSR_GPSR(IP2_13, AVB_TX_EN),
+ PINMUX_IPSR_GPSR(IP2_14, VI2_D10_Y2),
+ PINMUX_IPSR_GPSR(IP2_14, AVB_TXD0),
+ PINMUX_IPSR_GPSR(IP2_15, VI2_D11_Y3),
+ PINMUX_IPSR_GPSR(IP2_15, AVB_TXD1),
+ PINMUX_IPSR_GPSR(IP2_16, VI2_FIELD),
+ PINMUX_IPSR_GPSR(IP2_16, AVB_TXD2),
+
+ /* IPSR3 */
+ PINMUX_IPSR_GPSR(IP3_0, VI3_CLK),
+ PINMUX_IPSR_GPSR(IP3_0, AVB_TX_CLK),
+ PINMUX_IPSR_GPSR(IP3_1, VI3_CLKENB),
+ PINMUX_IPSR_GPSR(IP3_1, AVB_TXD4),
+ PINMUX_IPSR_GPSR(IP3_2, VI3_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP3_2, AVB_TXD5),
+ PINMUX_IPSR_GPSR(IP3_3, VI3_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP3_3, AVB_TXD6),
+ PINMUX_IPSR_GPSR(IP3_4, VI3_D0_C0),
+ PINMUX_IPSR_GPSR(IP3_4, AVB_TXD7),
+ PINMUX_IPSR_GPSR(IP3_5, VI3_D1_C1),
+ PINMUX_IPSR_GPSR(IP3_5, AVB_TX_ER),
+ PINMUX_IPSR_GPSR(IP3_6, VI3_D2_C2),
+ PINMUX_IPSR_GPSR(IP3_6, AVB_GTX_CLK),
+ PINMUX_IPSR_GPSR(IP3_7, VI3_D3_C3),
+ PINMUX_IPSR_GPSR(IP3_7, AVB_MDC),
+ PINMUX_IPSR_GPSR(IP3_8, VI3_D4_C4),
+ PINMUX_IPSR_GPSR(IP3_8, AVB_MDIO),
+ PINMUX_IPSR_GPSR(IP3_9, VI3_D5_C5),
+ PINMUX_IPSR_GPSR(IP3_9, AVB_LINK),
+ PINMUX_IPSR_GPSR(IP3_10, VI3_D6_C6),
+ PINMUX_IPSR_GPSR(IP3_10, AVB_MAGIC),
+ PINMUX_IPSR_GPSR(IP3_11, VI3_D7_C7),
+ PINMUX_IPSR_GPSR(IP3_11, AVB_PHY_INT),
+ PINMUX_IPSR_GPSR(IP3_12, VI3_D8_Y0),
+ PINMUX_IPSR_GPSR(IP3_12, AVB_CRS),
+ PINMUX_IPSR_GPSR(IP3_13, VI3_D9_Y1),
+ PINMUX_IPSR_GPSR(IP3_13, AVB_GTXREFCLK),
+ PINMUX_IPSR_GPSR(IP3_14, VI3_D11_Y3),
+ PINMUX_IPSR_GPSR(IP3_14, AVB_AVTP_MATCH),
+
+ /* IPSR4 */
+ PINMUX_IPSR_GPSR(IP4_0, VI4_CLKENB),
+ PINMUX_IPSR_GPSR(IP4_0, VI0_D12_G4_Y4),
+ PINMUX_IPSR_GPSR(IP4_1, VI4_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP4_1, VI0_D13_G5_Y5),
+ PINMUX_IPSR_GPSR(IP4_3_2, VI4_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP4_3_2, VI0_D14_G6_Y6),
+ PINMUX_IPSR_GPSR(IP4_4, VI4_D0_C0),
+ PINMUX_IPSR_GPSR(IP4_4, VI0_D15_G7_Y7),
+ PINMUX_IPSR_GPSR(IP4_6_5, VI4_D1_C1),
+ PINMUX_IPSR_GPSR(IP4_6_5, VI0_D16_R0),
+ PINMUX_IPSR_MSEL(IP4_6_5, VI1_D12_G4_Y4, SEL_VI1_0),
+ PINMUX_IPSR_GPSR(IP4_8_7, VI4_D2_C2),
+ PINMUX_IPSR_GPSR(IP4_8_7, VI0_D17_R1),
+ PINMUX_IPSR_MSEL(IP4_8_7, VI1_D13_G5_Y5, SEL_VI1_0),
+ PINMUX_IPSR_GPSR(IP4_10_9, VI4_D3_C3),
+ PINMUX_IPSR_GPSR(IP4_10_9, VI0_D18_R2),
+ PINMUX_IPSR_MSEL(IP4_10_9, VI1_D14_G6_Y6, SEL_VI1_0),
+ PINMUX_IPSR_GPSR(IP4_12_11, VI4_D4_C4),
+ PINMUX_IPSR_GPSR(IP4_12_11, VI0_D19_R3),
+ PINMUX_IPSR_MSEL(IP4_12_11, VI1_D15_G7_Y7, SEL_VI1_0),
+ PINMUX_IPSR_GPSR(IP4_14_13, VI4_D5_C5),
+ PINMUX_IPSR_GPSR(IP4_14_13, VI0_D20_R4),
+ PINMUX_IPSR_GPSR(IP4_14_13, VI2_D12_Y4),
+ PINMUX_IPSR_GPSR(IP4_16_15, VI4_D6_C6),
+ PINMUX_IPSR_GPSR(IP4_16_15, VI0_D21_R5),
+ PINMUX_IPSR_GPSR(IP4_16_15, VI2_D13_Y5),
+ PINMUX_IPSR_GPSR(IP4_18_17, VI4_D7_C7),
+ PINMUX_IPSR_GPSR(IP4_18_17, VI0_D22_R6),
+ PINMUX_IPSR_GPSR(IP4_18_17, VI2_D14_Y6),
+ PINMUX_IPSR_GPSR(IP4_20_19, VI4_D8_Y0),
+ PINMUX_IPSR_GPSR(IP4_20_19, VI0_D23_R7),
+ PINMUX_IPSR_GPSR(IP4_20_19, VI2_D15_Y7),
+ PINMUX_IPSR_GPSR(IP4_21, VI4_D9_Y1),
+ PINMUX_IPSR_GPSR(IP4_21, VI3_D12_Y4),
+ PINMUX_IPSR_GPSR(IP4_22, VI4_D10_Y2),
+ PINMUX_IPSR_GPSR(IP4_22, VI3_D13_Y5),
+ PINMUX_IPSR_GPSR(IP4_23, VI4_D11_Y3),
+ PINMUX_IPSR_GPSR(IP4_23, VI3_D14_Y6),
+ PINMUX_IPSR_GPSR(IP4_24, VI4_FIELD),
+ PINMUX_IPSR_GPSR(IP4_24, VI3_D15_Y7),
+
+ /* IPSR5 */
+ PINMUX_IPSR_GPSR(IP5_0, VI5_CLKENB),
+ PINMUX_IPSR_MSEL(IP5_0, VI1_D12_G4_Y4_B, SEL_VI1_1),
+ PINMUX_IPSR_GPSR(IP5_1, VI5_HSYNC_N),
+ PINMUX_IPSR_MSEL(IP5_1, VI1_D13_G5_Y5_B, SEL_VI1_1),
+ PINMUX_IPSR_GPSR(IP5_2, VI5_VSYNC_N),
+ PINMUX_IPSR_MSEL(IP5_2, VI1_D14_G6_Y6_B, SEL_VI1_1),
+ PINMUX_IPSR_GPSR(IP5_3, VI5_D0_C0),
+ PINMUX_IPSR_MSEL(IP5_3, VI1_D15_G7_Y7_B, SEL_VI1_1),
+ PINMUX_IPSR_GPSR(IP5_4, VI5_D1_C1),
+ PINMUX_IPSR_GPSR(IP5_4, VI1_D16_R0),
+ PINMUX_IPSR_GPSR(IP5_5, VI5_D2_C2),
+ PINMUX_IPSR_GPSR(IP5_5, VI1_D17_R1),
+ PINMUX_IPSR_GPSR(IP5_6, VI5_D3_C3),
+ PINMUX_IPSR_GPSR(IP5_6, VI1_D18_R2),
+ PINMUX_IPSR_GPSR(IP5_7, VI5_D4_C4),
+ PINMUX_IPSR_GPSR(IP5_7, VI1_D19_R3),
+ PINMUX_IPSR_GPSR(IP5_8, VI5_D5_C5),
+ PINMUX_IPSR_GPSR(IP5_8, VI1_D20_R4),
+ PINMUX_IPSR_GPSR(IP5_9, VI5_D6_C6),
+ PINMUX_IPSR_GPSR(IP5_9, VI1_D21_R5),
+ PINMUX_IPSR_GPSR(IP5_10, VI5_D7_C7),
+ PINMUX_IPSR_GPSR(IP5_10, VI1_D22_R6),
+ PINMUX_IPSR_GPSR(IP5_11, VI5_D8_Y0),
+ PINMUX_IPSR_GPSR(IP5_11, VI1_D23_R7),
+
+ /* IPSR6 */
+ PINMUX_IPSR_GPSR(IP6_0, MSIOF0_SCK),
+ PINMUX_IPSR_GPSR(IP6_0, HSCK0),
+ PINMUX_IPSR_GPSR(IP6_1, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP6_1, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP6_2, MSIOF0_TXD),
+ PINMUX_IPSR_GPSR(IP6_2, HTX0),
+ PINMUX_IPSR_GPSR(IP6_3, MSIOF0_RXD),
+ PINMUX_IPSR_GPSR(IP6_3, HRX0),
+ PINMUX_IPSR_GPSR(IP6_4, MSIOF1_SCK),
+ PINMUX_IPSR_GPSR(IP6_4, HSCK1),
+ PINMUX_IPSR_GPSR(IP6_5, MSIOF1_SYNC),
+ PINMUX_IPSR_GPSR(IP6_5, HRTS1_N),
+ PINMUX_IPSR_GPSR(IP6_6, MSIOF1_TXD),
+ PINMUX_IPSR_GPSR(IP6_6, HTX1),
+ PINMUX_IPSR_GPSR(IP6_7, MSIOF1_RXD),
+ PINMUX_IPSR_GPSR(IP6_7, HRX1),
+ PINMUX_IPSR_GPSR(IP6_9_8, DRACK0),
+ PINMUX_IPSR_GPSR(IP6_9_8, SCK2),
+ PINMUX_IPSR_GPSR(IP6_11_10, DACK0),
+ PINMUX_IPSR_GPSR(IP6_11_10, TX2),
+ PINMUX_IPSR_GPSR(IP6_13_12, DREQ0_N),
+ PINMUX_IPSR_GPSR(IP6_13_12, RX2),
+ PINMUX_IPSR_GPSR(IP6_15_14, DACK1),
+ PINMUX_IPSR_GPSR(IP6_15_14, SCK3),
+ PINMUX_IPSR_GPSR(IP6_16, TX3),
+ PINMUX_IPSR_GPSR(IP6_18_17, DREQ1_N),
+ PINMUX_IPSR_GPSR(IP6_18_17, RX3),
+
+ /* IPSR7 */
+ PINMUX_IPSR_GPSR(IP7_1_0, PWM0),
+ PINMUX_IPSR_GPSR(IP7_1_0, TCLK1),
+ PINMUX_IPSR_GPSR(IP7_1_0, FSO_CFE_0),
+ PINMUX_IPSR_GPSR(IP7_3_2, PWM1),
+ PINMUX_IPSR_GPSR(IP7_3_2, TCLK2),
+ PINMUX_IPSR_GPSR(IP7_3_2, FSO_CFE_1),
+ PINMUX_IPSR_GPSR(IP7_5_4, PWM2),
+ PINMUX_IPSR_GPSR(IP7_5_4, TCLK3),
+ PINMUX_IPSR_GPSR(IP7_5_4, FSO_TOE),
+ PINMUX_IPSR_GPSR(IP7_6, PWM3),
+ PINMUX_IPSR_GPSR(IP7_7, PWM4),
+ PINMUX_IPSR_GPSR(IP7_9_8, SSI_SCK34),
+ PINMUX_IPSR_GPSR(IP7_9_8, TPU0TO0),
+ PINMUX_IPSR_GPSR(IP7_11_10, SSI_WS34),
+ PINMUX_IPSR_GPSR(IP7_11_10, TPU0TO1),
+ PINMUX_IPSR_GPSR(IP7_13_12, SSI_SDATA3),
+ PINMUX_IPSR_GPSR(IP7_13_12, TPU0TO2),
+ PINMUX_IPSR_GPSR(IP7_15_14, SSI_SCK4),
+ PINMUX_IPSR_GPSR(IP7_15_14, TPU0TO3),
+ PINMUX_IPSR_GPSR(IP7_16, SSI_WS4),
+ PINMUX_IPSR_GPSR(IP7_17, SSI_SDATA4),
+ PINMUX_IPSR_GPSR(IP7_18, AUDIO_CLKOUT),
+ PINMUX_IPSR_GPSR(IP7_19, AUDIO_CLKA),
+ PINMUX_IPSR_GPSR(IP7_20, AUDIO_CLKB),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+};
+
+/* - AVB -------------------------------------------------------------------- */
+static const unsigned int avb_link_pins[] = {
+ RCAR_GP_PIN(7, 9),
+};
+static const unsigned int avb_link_mux[] = {
+ AVB_LINK_MARK,
+};
+static const unsigned int avb_magic_pins[] = {
+ RCAR_GP_PIN(7, 10),
+};
+static const unsigned int avb_magic_mux[] = {
+ AVB_MAGIC_MARK,
+};
+static const unsigned int avb_phy_int_pins[] = {
+ RCAR_GP_PIN(7, 11),
+};
+static const unsigned int avb_phy_int_mux[] = {
+ AVB_PHY_INT_MARK,
+};
+static const unsigned int avb_mdio_pins[] = {
+ RCAR_GP_PIN(7, 7), RCAR_GP_PIN(7, 8),
+};
+static const unsigned int avb_mdio_mux[] = {
+ AVB_MDC_MARK, AVB_MDIO_MARK,
+};
+static const unsigned int avb_mii_pins[] = {
+ RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 16),
+ RCAR_GP_PIN(6, 12),
+
+ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 3), RCAR_GP_PIN(6, 4),
+ RCAR_GP_PIN(6, 5),
+
+ RCAR_GP_PIN(6, 10), RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+ RCAR_GP_PIN(7, 12), RCAR_GP_PIN(6, 13), RCAR_GP_PIN(7, 5),
+ RCAR_GP_PIN(7, 0), RCAR_GP_PIN(6, 11),
+};
+static const unsigned int avb_mii_mux[] = {
+ AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK,
+ AVB_TXD3_MARK,
+
+ AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK,
+ AVB_RXD3_MARK,
+
+ AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK,
+ AVB_CRS_MARK, AVB_TX_EN_MARK, AVB_TX_ER_MARK,
+ AVB_TX_CLK_MARK, AVB_COL_MARK,
+};
+static const unsigned int avb_gmii_pins[] = {
+ RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 16),
+ RCAR_GP_PIN(6, 12), RCAR_GP_PIN(7, 1), RCAR_GP_PIN(7, 2),
+ RCAR_GP_PIN(7, 3), RCAR_GP_PIN(7, 4),
+
+ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 3), RCAR_GP_PIN(6, 4),
+ RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6), RCAR_GP_PIN(6, 7),
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+
+ RCAR_GP_PIN(6, 10), RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+ RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 13),
+ RCAR_GP_PIN(6, 13), RCAR_GP_PIN(7, 5), RCAR_GP_PIN(7, 0),
+ RCAR_GP_PIN(6, 11),
+};
+static const unsigned int avb_gmii_mux[] = {
+ AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK,
+ AVB_TXD3_MARK, AVB_TXD4_MARK, AVB_TXD5_MARK,
+ AVB_TXD6_MARK, AVB_TXD7_MARK,
+
+ AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK,
+ AVB_RXD3_MARK, AVB_RXD4_MARK, AVB_RXD5_MARK,
+ AVB_RXD6_MARK, AVB_RXD7_MARK,
+
+ AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK,
+ AVB_CRS_MARK, AVB_GTX_CLK_MARK, AVB_GTXREFCLK_MARK,
+ AVB_TX_EN_MARK, AVB_TX_ER_MARK, AVB_TX_CLK_MARK,
+ AVB_COL_MARK,
+};
+static const unsigned int avb_avtp_match_pins[] = {
+ RCAR_GP_PIN(7, 15),
+};
+static const unsigned int avb_avtp_match_mux[] = {
+ AVB_AVTP_MATCH_MARK,
+};
+/* - CAN -------------------------------------------------------------------- */
+static const unsigned int can0_data_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(10, 27), RCAR_GP_PIN(10, 28),
+};
+static const unsigned int can0_data_mux[] = {
+ CAN0_TX_MARK, CAN0_RX_MARK,
+};
+static const unsigned int can1_data_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(10, 30), RCAR_GP_PIN(10, 31),
+};
+static const unsigned int can1_data_mux[] = {
+ CAN1_TX_MARK, CAN1_RX_MARK,
+};
+static const unsigned int can_clk_pins[] = {
+ /* CAN_CLK */
+ RCAR_GP_PIN(10, 29),
+};
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du0_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(0, 7), RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 3), RCAR_GP_PIN(0, 2),
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 23), RCAR_GP_PIN(0, 22), RCAR_GP_PIN(0, 21),
+ RCAR_GP_PIN(0, 20), RCAR_GP_PIN(0, 19), RCAR_GP_PIN(0, 18),
+};
+static const unsigned int du0_rgb666_mux[] = {
+ DU0_DR7_Y9_DATA7_MARK, DU0_DR6_Y8_DATA6_MARK, DU0_DR5_Y7_DATA5_MARK,
+ DU0_DR4_Y6_DATA4_MARK, DU0_DR3_Y5_DATA3_MARK, DU0_DR2_Y4_DATA2_MARK,
+ DU0_DG7_Y3_DATA15_MARK, DU0_DG6_Y2_DATA14_MARK, DU0_DG5_Y1_DATA13_MARK,
+ DU0_DG4_Y0_DATA12_MARK, DU0_DG3_C7_DATA11_MARK, DU0_DG2_C6_DATA10_MARK,
+ DU0_DB7_C5_MARK, DU0_DB6_C4_MARK, DU0_DB5_C3_MARK,
+ DU0_DB4_C2_MARK, DU0_DB3_C1_MARK, DU0_DB2_C0_MARK,
+};
+static const unsigned int du0_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(0, 7), RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 3), RCAR_GP_PIN(0, 2),
+ RCAR_GP_PIN(0, 1), RCAR_GP_PIN(0, 0),
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 8),
+ RCAR_GP_PIN(0, 23), RCAR_GP_PIN(0, 22), RCAR_GP_PIN(0, 21),
+ RCAR_GP_PIN(0, 20), RCAR_GP_PIN(0, 19), RCAR_GP_PIN(0, 18),
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int du0_rgb888_mux[] = {
+ DU0_DR7_Y9_DATA7_MARK, DU0_DR6_Y8_DATA6_MARK, DU0_DR5_Y7_DATA5_MARK,
+ DU0_DR4_Y6_DATA4_MARK, DU0_DR3_Y5_DATA3_MARK, DU0_DR2_Y4_DATA2_MARK,
+ DU0_DR1_DATA1_MARK, DU0_DR0_DATA0_MARK,
+ DU0_DG7_Y3_DATA15_MARK, DU0_DG6_Y2_DATA14_MARK, DU0_DG5_Y1_DATA13_MARK,
+ DU0_DG4_Y0_DATA12_MARK, DU0_DG3_C7_DATA11_MARK, DU0_DG2_C6_DATA10_MARK,
+ DU0_DG1_DATA9_MARK, DU0_DG0_DATA8_MARK,
+ DU0_DB7_C5_MARK, DU0_DB6_C4_MARK, DU0_DB5_C3_MARK,
+ DU0_DB4_C2_MARK, DU0_DB3_C1_MARK, DU0_DB2_C0_MARK,
+ DU0_DB1_MARK, DU0_DB0_MARK,
+};
+static const unsigned int du0_sync_pins[] = {
+ /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+ RCAR_GP_PIN(0, 25), RCAR_GP_PIN(0, 24),
+};
+static const unsigned int du0_sync_mux[] = {
+ DU0_EXVSYNC_DU0_VSYNC_MARK, DU0_EXHSYNC_DU0_HSYNC_MARK,
+};
+static const unsigned int du0_oddf_pins[] = {
+ /* EXODDF/ODDF/DISP/CDE */
+ RCAR_GP_PIN(0, 26),
+};
+static const unsigned int du0_oddf_mux[] = {
+ DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK
+};
+static const unsigned int du0_disp_pins[] = {
+ /* DISP */
+ RCAR_GP_PIN(0, 27),
+};
+static const unsigned int du0_disp_mux[] = {
+ DU0_DISP_MARK,
+};
+static const unsigned int du0_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(0, 28),
+};
+static const unsigned int du0_cde_mux[] = {
+ DU0_CDE_MARK,
+};
+static const unsigned int du1_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3),
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+ RCAR_GP_PIN(1, 11), RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int du1_rgb666_mux[] = {
+ DU1_DR7_DATA5_MARK, DU1_DR6_DATA4_MARK, DU1_DR5_Y7_DATA3_MARK,
+ DU1_DR4_Y6_DATA2_MARK, DU1_DR3_Y5_DATA1_MARK, DU1_DR2_Y4_DATA0_MARK,
+ DU1_DG7_Y3_DATA11_MARK, DU1_DG6_Y2_DATA10_MARK, DU1_DG5_Y1_DATA9_MARK,
+ DU1_DG4_Y0_DATA8_MARK, DU1_DG3_C7_DATA7_MARK, DU1_DG2_C6_DATA6_MARK,
+ DU1_DB7_C5_MARK, DU1_DB6_C4_MARK, DU1_DB5_C3_DATA15_MARK,
+ DU1_DB4_C2_DATA14_MARK, DU1_DB3_C1_DATA13_MARK, DU1_DB2_C0_DATA12_MARK,
+};
+static const unsigned int du1_sync_pins[] = {
+ /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+ RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+};
+static const unsigned int du1_sync_mux[] = {
+ DU1_EXVSYNC_DU1_VSYNC_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK,
+};
+static const unsigned int du1_oddf_pins[] = {
+ /* EXODDF/ODDF/DISP/CDE */
+ RCAR_GP_PIN(1, 20),
+};
+static const unsigned int du1_oddf_mux[] = {
+ DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK
+};
+static const unsigned int du1_disp_pins[] = {
+ /* DISP */
+ RCAR_GP_PIN(1, 21),
+};
+static const unsigned int du1_disp_mux[] = {
+ DU1_DISP_MARK,
+};
+static const unsigned int du1_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int du1_cde_mux[] = {
+ DU1_CDE_MARK,
+};
+/* - INTC ------------------------------------------------------------------- */
+static const unsigned int intc_irq0_pins[] = {
+ /* IRQ0 */
+ RCAR_GP_PIN(3, 19),
+};
+static const unsigned int intc_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_irq1_pins[] = {
+ /* IRQ1 */
+ RCAR_GP_PIN(3, 20),
+};
+static const unsigned int intc_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_irq2_pins[] = {
+ /* IRQ2 */
+ RCAR_GP_PIN(3, 21),
+};
+static const unsigned int intc_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_irq3_pins[] = {
+ /* IRQ3 */
+ RCAR_GP_PIN(3, 22),
+};
+static const unsigned int intc_irq3_mux[] = {
+ IRQ3_MARK,
+};
+/* - LBSC ------------------------------------------------------------------- */
+static const unsigned int lbsc_cs0_pins[] = {
+ /* CS0# */
+ RCAR_GP_PIN(3, 27),
+};
+static const unsigned int lbsc_cs0_mux[] = {
+ CS0_N_MARK,
+};
+static const unsigned int lbsc_cs1_pins[] = {
+ /* CS1#_A26 */
+ RCAR_GP_PIN(3, 6),
+};
+static const unsigned int lbsc_cs1_mux[] = {
+ CS1_N_A26_MARK,
+};
+static const unsigned int lbsc_ex_cs0_pins[] = {
+ /* EX_CS0# */
+ RCAR_GP_PIN(3, 7),
+};
+static const unsigned int lbsc_ex_cs0_mux[] = {
+ EX_CS0_N_MARK,
+};
+static const unsigned int lbsc_ex_cs1_pins[] = {
+ /* EX_CS1# */
+ RCAR_GP_PIN(3, 8),
+};
+static const unsigned int lbsc_ex_cs1_mux[] = {
+ EX_CS1_N_MARK,
+};
+static const unsigned int lbsc_ex_cs2_pins[] = {
+ /* EX_CS2# */
+ RCAR_GP_PIN(3, 9),
+};
+static const unsigned int lbsc_ex_cs2_mux[] = {
+ EX_CS2_N_MARK,
+};
+static const unsigned int lbsc_ex_cs3_pins[] = {
+ /* EX_CS3# */
+ RCAR_GP_PIN(3, 10),
+};
+static const unsigned int lbsc_ex_cs3_mux[] = {
+ EX_CS3_N_MARK,
+};
+static const unsigned int lbsc_ex_cs4_pins[] = {
+ /* EX_CS4# */
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int lbsc_ex_cs4_mux[] = {
+ EX_CS4_N_MARK,
+};
+static const unsigned int lbsc_ex_cs5_pins[] = {
+ /* EX_CS5# */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int lbsc_ex_cs5_mux[] = {
+ EX_CS5_N_MARK,
+};
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(10, 0),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(10, 1),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_rx_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(10, 4),
+};
+static const unsigned int msiof0_rx_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+static const unsigned int msiof0_tx_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(10, 3),
+};
+static const unsigned int msiof0_tx_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(10, 5),
+};
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+static const unsigned int msiof1_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(10, 6),
+};
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+static const unsigned int msiof1_rx_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(10, 9),
+};
+static const unsigned int msiof1_rx_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+static const unsigned int msiof1_tx_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(10, 8),
+};
+static const unsigned int msiof1_tx_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+/* - QSPI ------------------------------------------------------------------- */
+static const unsigned int qspi_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 25), RCAR_GP_PIN(3, 26),
+};
+static const unsigned int qspi_ctrl_mux[] = {
+ SPCLK_MARK, SSL_MARK,
+};
+static const unsigned int qspi_data2_pins[] = {
+ /* MOSI_IO0, MISO_IO1 */
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+};
+static const unsigned int qspi_data2_mux[] = {
+ MOSI_IO0_MARK, MISO_IO1_MARK,
+};
+static const unsigned int qspi_data4_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 23),
+ RCAR_GP_PIN(3, 24),
+};
+static const unsigned int qspi_data4_mux[] = {
+ MOSI_IO0_MARK, MISO_IO1_MARK, IO2_MARK, IO3_MARK,
+};
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(10, 14), RCAR_GP_PIN(10, 13),
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(10, 10),
+};
+static const unsigned int scif0_clk_mux[] = {
+ SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(10, 12), RCAR_GP_PIN(10, 11),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+ RTS0_N_MARK, CTS0_N_MARK,
+};
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(10, 25), RCAR_GP_PIN(10, 24),
+};
+static const unsigned int scif3_data_mux[] = {
+ RX3_MARK, TX3_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(10, 23),
+};
+static const unsigned int scif3_clk_mux[] = {
+ SCK3_MARK,
+};
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* DAT0 */
+ RCAR_GP_PIN(11, 7),
+};
+static const unsigned int sdhi0_data1_mux[] = {
+ SD0_DAT0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+ /* DAT[0-3] */
+ RCAR_GP_PIN(11, 7), RCAR_GP_PIN(11, 8),
+ RCAR_GP_PIN(11, 9), RCAR_GP_PIN(11, 10),
+};
+static const unsigned int sdhi0_data4_mux[] = {
+ SD0_DAT0_MARK, SD0_DAT1_MARK, SD0_DAT2_MARK, SD0_DAT3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(11, 5), RCAR_GP_PIN(11, 6),
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SD0_CLK_MARK, SD0_CMD_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(11, 11),
+};
+static const unsigned int sdhi0_cd_mux[] = {
+ SD0_CD_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(11, 12),
+};
+static const unsigned int sdhi0_wp_mux[] = {
+ SD0_WP_MARK,
+};
+/* - VIN0 ------------------------------------------------------------------- */
+static const union vin_data vin0_data_pins = {
+ .data24 = {
+ /* B */
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+ RCAR_GP_PIN(4, 6), RCAR_GP_PIN(4, 7),
+ RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 9),
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 11),
+ /* G */
+ RCAR_GP_PIN(4, 12), RCAR_GP_PIN(4, 13),
+ RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 15),
+ RCAR_GP_PIN(8, 1), RCAR_GP_PIN(8, 2),
+ RCAR_GP_PIN(8, 3), RCAR_GP_PIN(8, 4),
+ /* R */
+ RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 6),
+ RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 8),
+ RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 10),
+ RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 12),
+ },
+};
+static const union vin_data vin0_data_mux = {
+ .data24 = {
+ /* B */
+ VI0_D0_B0_C0_MARK, VI0_D1_B1_C1_MARK,
+ VI0_D2_B2_C2_MARK, VI0_D3_B3_C3_MARK,
+ VI0_D4_B4_C4_MARK, VI0_D5_B5_C5_MARK,
+ VI0_D6_B6_C6_MARK, VI0_D7_B7_C7_MARK,
+ /* G */
+ VI0_D8_G0_Y0_MARK, VI0_D9_G1_Y1_MARK,
+ VI0_D10_G2_Y2_MARK, VI0_D11_G3_Y3_MARK,
+ VI0_D12_G4_Y4_MARK, VI0_D13_G5_Y5_MARK,
+ VI0_D14_G6_Y6_MARK, VI0_D15_G7_Y7_MARK,
+ /* R */
+ VI0_D16_R0_MARK, VI0_D17_R1_MARK,
+ VI0_D18_R2_MARK, VI0_D19_R3_MARK,
+ VI0_D20_R4_MARK, VI0_D21_R5_MARK,
+ VI0_D22_R6_MARK, VI0_D23_R7_MARK,
+ },
+};
+static const unsigned int vin0_data18_pins[] = {
+ /* B */
+ RCAR_GP_PIN(4, 6), RCAR_GP_PIN(4, 7),
+ RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 9),
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 11),
+ /* G */
+ RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 15),
+ RCAR_GP_PIN(8, 1), RCAR_GP_PIN(8, 2),
+ RCAR_GP_PIN(8, 3), RCAR_GP_PIN(8, 4),
+ /* R */
+ RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 8),
+ RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 10),
+ RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 12),
+};
+static const unsigned int vin0_data18_mux[] = {
+ /* B */
+ VI0_D2_B2_C2_MARK, VI0_D3_B3_C3_MARK,
+ VI0_D4_B4_C4_MARK, VI0_D5_B5_C5_MARK,
+ VI0_D6_B6_C6_MARK, VI0_D7_B7_C7_MARK,
+ /* G */
+ VI0_D10_G2_Y2_MARK, VI0_D11_G3_Y3_MARK,
+ VI0_D12_G4_Y4_MARK, VI0_D13_G5_Y5_MARK,
+ VI0_D14_G6_Y6_MARK, VI0_D15_G7_Y7_MARK,
+ /* R */
+ VI0_D18_R2_MARK, VI0_D19_R3_MARK,
+ VI0_D20_R4_MARK, VI0_D21_R5_MARK,
+ VI0_D22_R6_MARK, VI0_D23_R7_MARK,
+};
+static const unsigned int vin0_sync_pins[] = {
+ /* HSYNC#, VSYNC# */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+};
+static const unsigned int vin0_sync_mux[] = {
+ VI0_HSYNC_N_MARK, VI0_VSYNC_N_MARK,
+};
+static const unsigned int vin0_field_pins[] = {
+ RCAR_GP_PIN(4, 16),
+};
+static const unsigned int vin0_field_mux[] = {
+ VI0_FIELD_MARK,
+};
+static const unsigned int vin0_clkenb_pins[] = {
+ RCAR_GP_PIN(4, 1),
+};
+static const unsigned int vin0_clkenb_mux[] = {
+ VI0_CLKENB_MARK,
+};
+static const unsigned int vin0_clk_pins[] = {
+ RCAR_GP_PIN(4, 0),
+};
+static const unsigned int vin0_clk_mux[] = {
+ VI0_CLK_MARK,
+};
+/* - VIN1 ------------------------------------------------------------------- */
+static const union vin_data vin1_data_pins = {
+ .data24 = {
+ /* B */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5),
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7),
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9),
+ RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11),
+ /* G */
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 13),
+ RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15),
+ RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 6),
+ RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 8),
+ /* R */
+ RCAR_GP_PIN(9, 5), RCAR_GP_PIN(9, 6),
+ RCAR_GP_PIN(9, 7), RCAR_GP_PIN(9, 8),
+ RCAR_GP_PIN(9, 9), RCAR_GP_PIN(9, 10),
+ RCAR_GP_PIN(9, 11), RCAR_GP_PIN(9, 12),
+ },
+};
+static const union vin_data vin1_data_mux = {
+ .data24 = {
+ /* B */
+ VI1_D0_B0_C0_MARK, VI1_D1_B1_C1_MARK,
+ VI1_D2_B2_C2_MARK, VI1_D3_B3_C3_MARK,
+ VI1_D4_B4_C4_MARK, VI1_D5_B5_C5_MARK,
+ VI1_D6_B6_C6_MARK, VI1_D7_B7_C7_MARK,
+ /* G */
+ VI1_D8_G0_Y0_MARK, VI1_D9_G1_Y1_MARK,
+ VI1_D10_G2_Y2_MARK, VI1_D11_G3_Y3_MARK,
+ VI1_D12_G4_Y4_MARK, VI1_D13_G5_Y5_MARK,
+ VI1_D14_G6_Y6_MARK, VI1_D15_G7_Y7_MARK,
+ /* R */
+ VI1_D16_R0_MARK, VI1_D17_R1_MARK,
+ VI1_D18_R2_MARK, VI1_D19_R3_MARK,
+ VI1_D20_R4_MARK, VI1_D21_R5_MARK,
+ VI1_D22_R6_MARK, VI1_D23_R7_MARK,
+ },
+};
+static const unsigned int vin1_data18_pins[] = {
+ /* B */
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7),
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9),
+ RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11),
+ /* G */
+ RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15),
+ RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 6),
+ RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 8),
+ /* R */
+ RCAR_GP_PIN(9, 7), RCAR_GP_PIN(9, 8),
+ RCAR_GP_PIN(9, 9), RCAR_GP_PIN(9, 10),
+ RCAR_GP_PIN(9, 11), RCAR_GP_PIN(9, 12),
+};
+static const unsigned int vin1_data18_mux[] = {
+ /* B */
+ VI1_D2_B2_C2_MARK, VI1_D3_B3_C3_MARK,
+ VI1_D4_B4_C4_MARK, VI1_D5_B5_C5_MARK,
+ VI1_D6_B6_C6_MARK, VI1_D7_B7_C7_MARK,
+ /* G */
+ VI1_D10_G2_Y2_MARK, VI1_D11_G3_Y3_MARK,
+ VI1_D12_G4_Y4_MARK, VI1_D13_G5_Y5_MARK,
+ VI1_D14_G6_Y6_MARK, VI1_D15_G7_Y7_MARK,
+ /* R */
+ VI1_D18_R2_MARK, VI1_D19_R3_MARK,
+ VI1_D20_R4_MARK, VI1_D21_R5_MARK,
+ VI1_D22_R6_MARK, VI1_D23_R7_MARK,
+};
+static const union vin_data vin1_data_b_pins = {
+ .data24 = {
+ /* B */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5),
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7),
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9),
+ RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11),
+ /* G */
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 13),
+ RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15),
+ RCAR_GP_PIN(9, 1), RCAR_GP_PIN(9, 2),
+ RCAR_GP_PIN(9, 3), RCAR_GP_PIN(9, 4),
+ /* R */
+ RCAR_GP_PIN(9, 5), RCAR_GP_PIN(9, 6),
+ RCAR_GP_PIN(9, 7), RCAR_GP_PIN(9, 8),
+ RCAR_GP_PIN(9, 9), RCAR_GP_PIN(9, 10),
+ RCAR_GP_PIN(9, 11), RCAR_GP_PIN(9, 12),
+ },
+};
+static const union vin_data vin1_data_b_mux = {
+ .data24 = {
+ /* B */
+ VI1_D0_B0_C0_MARK, VI1_D1_B1_C1_MARK,
+ VI1_D2_B2_C2_MARK, VI1_D3_B3_C3_MARK,
+ VI1_D4_B4_C4_MARK, VI1_D5_B5_C5_MARK,
+ VI1_D6_B6_C6_MARK, VI1_D7_B7_C7_MARK,
+ /* G */
+ VI1_D8_G0_Y0_MARK, VI1_D9_G1_Y1_MARK,
+ VI1_D10_G2_Y2_MARK, VI1_D11_G3_Y3_MARK,
+ VI1_D12_G4_Y4_B_MARK, VI1_D13_G5_Y5_B_MARK,
+ VI1_D14_G6_Y6_B_MARK, VI1_D15_G7_Y7_B_MARK,
+ /* R */
+ VI1_D16_R0_MARK, VI1_D17_R1_MARK,
+ VI1_D18_R2_MARK, VI1_D19_R3_MARK,
+ VI1_D20_R4_MARK, VI1_D21_R5_MARK,
+ VI1_D22_R6_MARK, VI1_D23_R7_MARK,
+ },
+};
+static const unsigned int vin1_data18_b_pins[] = {
+ /* B */
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7),
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9),
+ RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11),
+ /* G */
+ RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15),
+ RCAR_GP_PIN(9, 1), RCAR_GP_PIN(9, 2),
+ RCAR_GP_PIN(9, 3), RCAR_GP_PIN(9, 4),
+ /* R */
+ RCAR_GP_PIN(9, 7), RCAR_GP_PIN(9, 8),
+ RCAR_GP_PIN(9, 9), RCAR_GP_PIN(9, 10),
+ RCAR_GP_PIN(9, 11), RCAR_GP_PIN(9, 12),
+};
+static const unsigned int vin1_data18_b_mux[] = {
+ /* B */
+ VI1_D2_B2_C2_MARK, VI1_D3_B3_C3_MARK,
+ VI1_D4_B4_C4_MARK, VI1_D5_B5_C5_MARK,
+ VI1_D6_B6_C6_MARK, VI1_D7_B7_C7_MARK,
+ /* G */
+ VI1_D10_G2_Y2_MARK, VI1_D11_G3_Y3_MARK,
+ VI1_D12_G4_Y4_B_MARK, VI1_D13_G5_Y5_B_MARK,
+ VI1_D14_G6_Y6_B_MARK, VI1_D15_G7_Y7_B_MARK,
+ /* R */
+ VI1_D18_R2_MARK, VI1_D19_R3_MARK,
+ VI1_D20_R4_MARK, VI1_D21_R5_MARK,
+ VI1_D22_R6_MARK, VI1_D23_R7_MARK,
+};
+static const unsigned int vin1_sync_pins[] = {
+ /* HSYNC#, VSYNC# */
+ RCAR_GP_PIN(5, 2), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int vin1_sync_mux[] = {
+ VI1_HSYNC_N_MARK, VI1_VSYNC_N_MARK,
+};
+static const unsigned int vin1_field_pins[] = {
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int vin1_field_mux[] = {
+ VI1_FIELD_MARK,
+};
+static const unsigned int vin1_clkenb_pins[] = {
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int vin1_clkenb_mux[] = {
+ VI1_CLKENB_MARK,
+};
+static const unsigned int vin1_clk_pins[] = {
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int vin1_clk_mux[] = {
+ VI1_CLK_MARK,
+};
+/* - VIN2 ------------------------------------------------------------------- */
+static const union vin_data vin2_data_pins = {
+ .data16 = {
+ RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 5),
+ RCAR_GP_PIN(6, 6), RCAR_GP_PIN(6, 7),
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+ RCAR_GP_PIN(6, 10), RCAR_GP_PIN(6, 11),
+ RCAR_GP_PIN(6, 12), RCAR_GP_PIN(6, 13),
+ RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+ RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 10),
+ RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 12),
+ },
+};
+static const union vin_data vin2_data_mux = {
+ .data16 = {
+ VI2_D0_C0_MARK, VI2_D1_C1_MARK,
+ VI2_D2_C2_MARK, VI2_D3_C3_MARK,
+ VI2_D4_C4_MARK, VI2_D5_C5_MARK,
+ VI2_D6_C6_MARK, VI2_D7_C7_MARK,
+ VI2_D8_Y0_MARK, VI2_D9_Y1_MARK,
+ VI2_D10_Y2_MARK, VI2_D11_Y3_MARK,
+ VI2_D12_Y4_MARK, VI2_D13_Y5_MARK,
+ VI2_D14_Y6_MARK, VI2_D15_Y7_MARK,
+ },
+};
+static const unsigned int vin2_sync_pins[] = {
+ /* HSYNC#, VSYNC# */
+ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 3),
+};
+static const unsigned int vin2_sync_mux[] = {
+ VI2_HSYNC_N_MARK, VI2_VSYNC_N_MARK,
+};
+static const unsigned int vin2_field_pins[] = {
+ RCAR_GP_PIN(6, 16),
+};
+static const unsigned int vin2_field_mux[] = {
+ VI2_FIELD_MARK,
+};
+static const unsigned int vin2_clkenb_pins[] = {
+ RCAR_GP_PIN(6, 1),
+};
+static const unsigned int vin2_clkenb_mux[] = {
+ VI2_CLKENB_MARK,
+};
+static const unsigned int vin2_clk_pins[] = {
+ RCAR_GP_PIN(6, 0),
+};
+static const unsigned int vin2_clk_mux[] = {
+ VI2_CLK_MARK,
+};
+/* - VIN3 ------------------------------------------------------------------- */
+static const union vin_data vin3_data_pins = {
+ .data16 = {
+ RCAR_GP_PIN(7, 4), RCAR_GP_PIN(7, 5),
+ RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 7),
+ RCAR_GP_PIN(7, 8), RCAR_GP_PIN(7, 9),
+ RCAR_GP_PIN(7, 10), RCAR_GP_PIN(7, 11),
+ RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 13),
+ RCAR_GP_PIN(7, 14), RCAR_GP_PIN(7, 15),
+ RCAR_GP_PIN(8, 13), RCAR_GP_PIN(8, 14),
+ RCAR_GP_PIN(8, 15), RCAR_GP_PIN(8, 16),
+ },
+};
+static const union vin_data vin3_data_mux = {
+ .data16 = {
+ VI3_D0_C0_MARK, VI3_D1_C1_MARK,
+ VI3_D2_C2_MARK, VI3_D3_C3_MARK,
+ VI3_D4_C4_MARK, VI3_D5_C5_MARK,
+ VI3_D6_C6_MARK, VI3_D7_C7_MARK,
+ VI3_D8_Y0_MARK, VI3_D9_Y1_MARK,
+ VI3_D10_Y2_MARK, VI3_D11_Y3_MARK,
+ VI3_D12_Y4_MARK, VI3_D13_Y5_MARK,
+ VI3_D14_Y6_MARK, VI3_D15_Y7_MARK,
+ },
+};
+static const unsigned int vin3_sync_pins[] = {
+ /* HSYNC#, VSYNC# */
+ RCAR_GP_PIN(7, 2), RCAR_GP_PIN(7, 3),
+};
+static const unsigned int vin3_sync_mux[] = {
+ VI3_HSYNC_N_MARK, VI3_VSYNC_N_MARK,
+};
+static const unsigned int vin3_field_pins[] = {
+ RCAR_GP_PIN(7, 16),
+};
+static const unsigned int vin3_field_mux[] = {
+ VI3_FIELD_MARK,
+};
+static const unsigned int vin3_clkenb_pins[] = {
+ RCAR_GP_PIN(7, 1),
+};
+static const unsigned int vin3_clkenb_mux[] = {
+ VI3_CLKENB_MARK,
+};
+static const unsigned int vin3_clk_pins[] = {
+ RCAR_GP_PIN(7, 0),
+};
+static const unsigned int vin3_clk_mux[] = {
+ VI3_CLK_MARK,
+};
+/* - VIN4 ------------------------------------------------------------------- */
+static const union vin_data vin4_data_pins = {
+ .data12 = {
+ RCAR_GP_PIN(8, 4), RCAR_GP_PIN(8, 5),
+ RCAR_GP_PIN(8, 6), RCAR_GP_PIN(8, 7),
+ RCAR_GP_PIN(8, 8), RCAR_GP_PIN(8, 9),
+ RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 11),
+ RCAR_GP_PIN(8, 12), RCAR_GP_PIN(8, 13),
+ RCAR_GP_PIN(8, 14), RCAR_GP_PIN(8, 15),
+ },
+};
+static const union vin_data vin4_data_mux = {
+ .data12 = {
+ VI4_D0_C0_MARK, VI4_D1_C1_MARK,
+ VI4_D2_C2_MARK, VI4_D3_C3_MARK,
+ VI4_D4_C4_MARK, VI4_D5_C5_MARK,
+ VI4_D6_C6_MARK, VI4_D7_C7_MARK,
+ VI4_D8_Y0_MARK, VI4_D9_Y1_MARK,
+ VI4_D10_Y2_MARK, VI4_D11_Y3_MARK,
+ },
+};
+static const unsigned int vin4_sync_pins[] = {
+ /* HSYNC#, VSYNC# */
+ RCAR_GP_PIN(8, 2), RCAR_GP_PIN(8, 3),
+};
+static const unsigned int vin4_sync_mux[] = {
+ VI4_HSYNC_N_MARK, VI4_VSYNC_N_MARK,
+};
+static const unsigned int vin4_field_pins[] = {
+ RCAR_GP_PIN(8, 16),
+};
+static const unsigned int vin4_field_mux[] = {
+ VI4_FIELD_MARK,
+};
+static const unsigned int vin4_clkenb_pins[] = {
+ RCAR_GP_PIN(8, 1),
+};
+static const unsigned int vin4_clkenb_mux[] = {
+ VI4_CLKENB_MARK,
+};
+static const unsigned int vin4_clk_pins[] = {
+ RCAR_GP_PIN(8, 0),
+};
+static const unsigned int vin4_clk_mux[] = {
+ VI4_CLK_MARK,
+};
+/* - VIN5 ------------------------------------------------------------------- */
+static const union vin_data vin5_data_pins = {
+ .data12 = {
+ RCAR_GP_PIN(9, 4), RCAR_GP_PIN(9, 5),
+ RCAR_GP_PIN(9, 6), RCAR_GP_PIN(9, 7),
+ RCAR_GP_PIN(9, 8), RCAR_GP_PIN(9, 9),
+ RCAR_GP_PIN(9, 10), RCAR_GP_PIN(9, 11),
+ RCAR_GP_PIN(9, 12), RCAR_GP_PIN(9, 13),
+ RCAR_GP_PIN(9, 14), RCAR_GP_PIN(9, 15),
+ },
+};
+static const union vin_data vin5_data_mux = {
+ .data12 = {
+ VI5_D0_C0_MARK, VI5_D1_C1_MARK,
+ VI5_D2_C2_MARK, VI5_D3_C3_MARK,
+ VI5_D4_C4_MARK, VI5_D5_C5_MARK,
+ VI5_D6_C6_MARK, VI5_D7_C7_MARK,
+ VI5_D8_Y0_MARK, VI5_D9_Y1_MARK,
+ VI5_D10_Y2_MARK, VI5_D11_Y3_MARK,
+ },
+};
+static const unsigned int vin5_sync_pins[] = {
+ /* HSYNC#, VSYNC# */
+ RCAR_GP_PIN(9, 2), RCAR_GP_PIN(9, 3),
+};
+static const unsigned int vin5_sync_mux[] = {
+ VI5_HSYNC_N_MARK, VI5_VSYNC_N_MARK,
+};
+static const unsigned int vin5_field_pins[] = {
+ RCAR_GP_PIN(9, 16),
+};
+static const unsigned int vin5_field_mux[] = {
+ VI5_FIELD_MARK,
+};
+static const unsigned int vin5_clkenb_pins[] = {
+ RCAR_GP_PIN(9, 1),
+};
+static const unsigned int vin5_clkenb_mux[] = {
+ VI5_CLKENB_MARK,
+};
+static const unsigned int vin5_clk_pins[] = {
+ RCAR_GP_PIN(9, 0),
+};
+static const unsigned int vin5_clk_mux[] = {
+ VI5_CLK_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_gmii),
+ SH_PFC_PIN_GROUP(avb_avtp_match),
+ SH_PFC_PIN_GROUP(can0_data),
+ SH_PFC_PIN_GROUP(can1_data),
+ SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(du0_rgb666),
+ SH_PFC_PIN_GROUP(du0_rgb888),
+ SH_PFC_PIN_GROUP(du0_sync),
+ SH_PFC_PIN_GROUP(du0_oddf),
+ SH_PFC_PIN_GROUP(du0_disp),
+ SH_PFC_PIN_GROUP(du0_cde),
+ SH_PFC_PIN_GROUP(du1_rgb666),
+ SH_PFC_PIN_GROUP(du1_sync),
+ SH_PFC_PIN_GROUP(du1_oddf),
+ SH_PFC_PIN_GROUP(du1_disp),
+ SH_PFC_PIN_GROUP(du1_cde),
+ SH_PFC_PIN_GROUP(intc_irq0),
+ SH_PFC_PIN_GROUP(intc_irq1),
+ SH_PFC_PIN_GROUP(intc_irq2),
+ SH_PFC_PIN_GROUP(intc_irq3),
+ SH_PFC_PIN_GROUP(lbsc_cs0),
+ SH_PFC_PIN_GROUP(lbsc_cs1),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs0),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs1),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs2),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs3),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs4),
+ SH_PFC_PIN_GROUP(lbsc_ex_cs5),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_rx),
+ SH_PFC_PIN_GROUP(msiof0_tx),
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_rx),
+ SH_PFC_PIN_GROUP(msiof1_tx),
+ SH_PFC_PIN_GROUP(qspi_ctrl),
+ SH_PFC_PIN_GROUP(qspi_data2),
+ SH_PFC_PIN_GROUP(qspi_data4),
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif3_data),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ VIN_DATA_PIN_GROUP(vin0_data, 24),
+ VIN_DATA_PIN_GROUP(vin0_data, 20),
+ SH_PFC_PIN_GROUP(vin0_data18),
+ VIN_DATA_PIN_GROUP(vin0_data, 16),
+ VIN_DATA_PIN_GROUP(vin0_data, 12),
+ VIN_DATA_PIN_GROUP(vin0_data, 10),
+ VIN_DATA_PIN_GROUP(vin0_data, 8),
+ SH_PFC_PIN_GROUP(vin0_sync),
+ SH_PFC_PIN_GROUP(vin0_field),
+ SH_PFC_PIN_GROUP(vin0_clkenb),
+ SH_PFC_PIN_GROUP(vin0_clk),
+ VIN_DATA_PIN_GROUP(vin1_data, 24),
+ VIN_DATA_PIN_GROUP(vin1_data, 20),
+ SH_PFC_PIN_GROUP(vin1_data18),
+ VIN_DATA_PIN_GROUP(vin1_data, 16),
+ VIN_DATA_PIN_GROUP(vin1_data, 12),
+ VIN_DATA_PIN_GROUP(vin1_data, 10),
+ VIN_DATA_PIN_GROUP(vin1_data, 8),
+ VIN_DATA_PIN_GROUP(vin1_data_b, 24),
+ VIN_DATA_PIN_GROUP(vin1_data_b, 20),
+ SH_PFC_PIN_GROUP(vin1_data18_b),
+ VIN_DATA_PIN_GROUP(vin1_data_b, 16),
+ SH_PFC_PIN_GROUP(vin1_sync),
+ SH_PFC_PIN_GROUP(vin1_field),
+ SH_PFC_PIN_GROUP(vin1_clkenb),
+ SH_PFC_PIN_GROUP(vin1_clk),
+ VIN_DATA_PIN_GROUP(vin2_data, 16),
+ VIN_DATA_PIN_GROUP(vin2_data, 12),
+ VIN_DATA_PIN_GROUP(vin2_data, 10),
+ VIN_DATA_PIN_GROUP(vin2_data, 8),
+ SH_PFC_PIN_GROUP(vin2_sync),
+ SH_PFC_PIN_GROUP(vin2_field),
+ SH_PFC_PIN_GROUP(vin2_clkenb),
+ SH_PFC_PIN_GROUP(vin2_clk),
+ VIN_DATA_PIN_GROUP(vin3_data, 16),
+ VIN_DATA_PIN_GROUP(vin3_data, 12),
+ VIN_DATA_PIN_GROUP(vin3_data, 10),
+ VIN_DATA_PIN_GROUP(vin3_data, 8),
+ SH_PFC_PIN_GROUP(vin3_sync),
+ SH_PFC_PIN_GROUP(vin3_field),
+ SH_PFC_PIN_GROUP(vin3_clkenb),
+ SH_PFC_PIN_GROUP(vin3_clk),
+ VIN_DATA_PIN_GROUP(vin4_data, 12),
+ VIN_DATA_PIN_GROUP(vin4_data, 10),
+ VIN_DATA_PIN_GROUP(vin4_data, 8),
+ SH_PFC_PIN_GROUP(vin4_sync),
+ SH_PFC_PIN_GROUP(vin4_field),
+ SH_PFC_PIN_GROUP(vin4_clkenb),
+ SH_PFC_PIN_GROUP(vin4_clk),
+ VIN_DATA_PIN_GROUP(vin5_data, 12),
+ VIN_DATA_PIN_GROUP(vin5_data, 10),
+ VIN_DATA_PIN_GROUP(vin5_data, 8),
+ SH_PFC_PIN_GROUP(vin5_sync),
+ SH_PFC_PIN_GROUP(vin5_field),
+ SH_PFC_PIN_GROUP(vin5_clkenb),
+ SH_PFC_PIN_GROUP(vin5_clk),
+};
+
+static const char * const avb_groups[] = {
+ "avb_link",
+ "avb_magic",
+ "avb_phy_int",
+ "avb_mdio",
+ "avb_mii",
+ "avb_gmii",
+ "avb_avtp_match",
+};
+
+static const char * const can0_groups[] = {
+ "can0_data",
+ "can_clk",
+};
+
+static const char * const can1_groups[] = {
+ "can1_data",
+ "can_clk",
+};
+
+static const char * const du0_groups[] = {
+ "du0_rgb666",
+ "du0_rgb888",
+ "du0_sync",
+ "du0_oddf",
+ "du0_disp",
+ "du0_cde",
+};
+
+static const char * const du1_groups[] = {
+ "du1_rgb666",
+ "du1_sync",
+ "du1_oddf",
+ "du1_disp",
+ "du1_cde",
+};
+
+static const char * const intc_groups[] = {
+ "intc_irq0",
+ "intc_irq1",
+ "intc_irq2",
+ "intc_irq3",
+};
+
+static const char * const lbsc_groups[] = {
+ "lbsc_cs0",
+ "lbsc_cs1",
+ "lbsc_ex_cs0",
+ "lbsc_ex_cs1",
+ "lbsc_ex_cs2",
+ "lbsc_ex_cs3",
+ "lbsc_ex_cs4",
+ "lbsc_ex_cs5",
+};
+
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_rx",
+ "msiof0_tx",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_sync",
+ "msiof1_rx",
+ "msiof1_tx",
+};
+
+static const char * const qspi_groups[] = {
+ "qspi_ctrl",
+ "qspi_data2",
+ "qspi_data4",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data",
+ "scif0_clk",
+ "scif0_ctrl",
+};
+
+static const char * const scif3_groups[] = {
+ "scif3_data",
+ "scif3_clk",
+};
+
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const vin0_groups[] = {
+ "vin0_data24",
+ "vin0_data20",
+ "vin0_data18",
+ "vin0_data16",
+ "vin0_data12",
+ "vin0_data10",
+ "vin0_data8",
+ "vin0_sync",
+ "vin0_field",
+ "vin0_clkenb",
+ "vin0_clk",
+};
+
+static const char * const vin1_groups[] = {
+ "vin1_data24",
+ "vin1_data20",
+ "vin1_data18",
+ "vin1_data16",
+ "vin1_data12",
+ "vin1_data10",
+ "vin1_data8",
+ "vin1_data24_b",
+ "vin1_data20_b",
+ "vin1_data16_b",
+ "vin1_sync",
+ "vin1_field",
+ "vin1_clkenb",
+ "vin1_clk",
+};
+
+static const char * const vin2_groups[] = {
+ "vin2_data16",
+ "vin2_data12",
+ "vin2_data10",
+ "vin2_data8",
+ "vin2_sync",
+ "vin2_field",
+ "vin2_clkenb",
+ "vin2_clk",
+};
+
+static const char * const vin3_groups[] = {
+ "vin3_data16",
+ "vin3_data12",
+ "vin3_data10",
+ "vin3_data8",
+ "vin3_sync",
+ "vin3_field",
+ "vin3_clkenb",
+ "vin3_clk",
+};
+
+static const char * const vin4_groups[] = {
+ "vin4_data12",
+ "vin4_data10",
+ "vin4_data8",
+ "vin4_sync",
+ "vin4_field",
+ "vin4_clkenb",
+ "vin4_clk",
+};
+
+static const char * const vin5_groups[] = {
+ "vin5_data12",
+ "vin5_data10",
+ "vin5_data8",
+ "vin5_sync",
+ "vin5_field",
+ "vin5_clkenb",
+ "vin5_clk",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
+ SH_PFC_FUNCTION(du0),
+ SH_PFC_FUNCTION(du1),
+ SH_PFC_FUNCTION(intc),
+ SH_PFC_FUNCTION(lbsc),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(qspi),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(vin0),
+ SH_PFC_FUNCTION(vin1),
+ SH_PFC_FUNCTION(vin2),
+ SH_PFC_FUNCTION(vin3),
+ SH_PFC_FUNCTION(vin4),
+ SH_PFC_FUNCTION(vin5),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_0_28_FN, FN_IP1_4,
+ GP_0_27_FN, FN_IP1_3,
+ GP_0_26_FN, FN_IP1_2,
+ GP_0_25_FN, FN_IP1_1,
+ GP_0_24_FN, FN_IP1_0,
+ GP_0_23_FN, FN_IP0_23,
+ GP_0_22_FN, FN_IP0_22,
+ GP_0_21_FN, FN_IP0_21,
+ GP_0_20_FN, FN_IP0_20,
+ GP_0_19_FN, FN_IP0_19,
+ GP_0_18_FN, FN_IP0_18,
+ GP_0_17_FN, FN_IP0_17,
+ GP_0_16_FN, FN_IP0_16,
+ GP_0_15_FN, FN_IP0_15,
+ GP_0_14_FN, FN_IP0_14,
+ GP_0_13_FN, FN_IP0_13,
+ GP_0_12_FN, FN_IP0_12,
+ GP_0_11_FN, FN_IP0_11,
+ GP_0_10_FN, FN_IP0_10,
+ GP_0_9_FN, FN_IP0_9,
+ GP_0_8_FN, FN_IP0_8,
+ GP_0_7_FN, FN_IP0_7,
+ GP_0_6_FN, FN_IP0_6,
+ GP_0_5_FN, FN_IP0_5,
+ GP_0_4_FN, FN_IP0_4,
+ GP_0_3_FN, FN_IP0_3,
+ GP_0_2_FN, FN_IP0_2,
+ GP_0_1_FN, FN_IP0_1,
+ GP_0_0_FN, FN_IP0_0 }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_1_22_FN, FN_DU1_CDE,
+ GP_1_21_FN, FN_DU1_DISP,
+ GP_1_20_FN, FN_DU1_EXODDF_DU1_ODDF_DISP_CDE,
+ GP_1_19_FN, FN_DU1_EXVSYNC_DU1_VSYNC,
+ GP_1_18_FN, FN_DU1_EXHSYNC_DU1_HSYNC,
+ GP_1_17_FN, FN_DU1_DB7_C5,
+ GP_1_16_FN, FN_DU1_DB6_C4,
+ GP_1_15_FN, FN_DU1_DB5_C3_DATA15,
+ GP_1_14_FN, FN_DU1_DB4_C2_DATA14,
+ GP_1_13_FN, FN_DU1_DB3_C1_DATA13,
+ GP_1_12_FN, FN_DU1_DB2_C0_DATA12,
+ GP_1_11_FN, FN_IP1_16,
+ GP_1_10_FN, FN_IP1_15,
+ GP_1_9_FN, FN_IP1_14,
+ GP_1_8_FN, FN_IP1_13,
+ GP_1_7_FN, FN_IP1_12,
+ GP_1_6_FN, FN_IP1_11,
+ GP_1_5_FN, FN_IP1_10,
+ GP_1_4_FN, FN_IP1_9,
+ GP_1_3_FN, FN_IP1_8,
+ GP_1_2_FN, FN_IP1_7,
+ GP_1_1_FN, FN_IP1_6,
+ GP_1_0_FN, FN_IP1_5, }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1) {
+ GP_2_31_FN, FN_A15,
+ GP_2_30_FN, FN_A14,
+ GP_2_29_FN, FN_A13,
+ GP_2_28_FN, FN_A12,
+ GP_2_27_FN, FN_A11,
+ GP_2_26_FN, FN_A10,
+ GP_2_25_FN, FN_A9,
+ GP_2_24_FN, FN_A8,
+ GP_2_23_FN, FN_A7,
+ GP_2_22_FN, FN_A6,
+ GP_2_21_FN, FN_A5,
+ GP_2_20_FN, FN_A4,
+ GP_2_19_FN, FN_A3,
+ GP_2_18_FN, FN_A2,
+ GP_2_17_FN, FN_A1,
+ GP_2_16_FN, FN_A0,
+ GP_2_15_FN, FN_D15,
+ GP_2_14_FN, FN_D14,
+ GP_2_13_FN, FN_D13,
+ GP_2_12_FN, FN_D12,
+ GP_2_11_FN, FN_D11,
+ GP_2_10_FN, FN_D10,
+ GP_2_9_FN, FN_D9,
+ GP_2_8_FN, FN_D8,
+ GP_2_7_FN, FN_D7,
+ GP_2_6_FN, FN_D6,
+ GP_2_5_FN, FN_D5,
+ GP_2_4_FN, FN_D4,
+ GP_2_3_FN, FN_D3,
+ GP_2_2_FN, FN_D2,
+ GP_2_1_FN, FN_D1,
+ GP_2_0_FN, FN_D0 }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_3_27_FN, FN_CS0_N,
+ GP_3_26_FN, FN_IP1_22,
+ GP_3_25_FN, FN_IP1_21,
+ GP_3_24_FN, FN_IP1_20,
+ GP_3_23_FN, FN_IP1_19,
+ GP_3_22_FN, FN_IRQ3,
+ GP_3_21_FN, FN_IRQ2,
+ GP_3_20_FN, FN_IRQ1,
+ GP_3_19_FN, FN_IRQ0,
+ GP_3_18_FN, FN_EX_WAIT0,
+ GP_3_17_FN, FN_WE1_N,
+ GP_3_16_FN, FN_WE0_N,
+ GP_3_15_FN, FN_RD_WR_N,
+ GP_3_14_FN, FN_RD_N,
+ GP_3_13_FN, FN_BS_N,
+ GP_3_12_FN, FN_EX_CS5_N,
+ GP_3_11_FN, FN_EX_CS4_N,
+ GP_3_10_FN, FN_EX_CS3_N,
+ GP_3_9_FN, FN_EX_CS2_N,
+ GP_3_8_FN, FN_EX_CS1_N,
+ GP_3_7_FN, FN_EX_CS0_N,
+ GP_3_6_FN, FN_CS1_N_A26,
+ GP_3_5_FN, FN_IP1_18,
+ GP_3_4_FN, FN_IP1_17,
+ GP_3_3_FN, FN_A19,
+ GP_3_2_FN, FN_A18,
+ GP_3_1_FN, FN_A17,
+ GP_3_0_FN, FN_A16 }
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_4_16_FN, FN_VI0_FIELD,
+ GP_4_15_FN, FN_VI0_D11_G3_Y3,
+ GP_4_14_FN, FN_VI0_D10_G2_Y2,
+ GP_4_13_FN, FN_VI0_D9_G1_Y1,
+ GP_4_12_FN, FN_VI0_D8_G0_Y0,
+ GP_4_11_FN, FN_VI0_D7_B7_C7,
+ GP_4_10_FN, FN_VI0_D6_B6_C6,
+ GP_4_9_FN, FN_VI0_D5_B5_C5,
+ GP_4_8_FN, FN_VI0_D4_B4_C4,
+ GP_4_7_FN, FN_VI0_D3_B3_C3,
+ GP_4_6_FN, FN_VI0_D2_B2_C2,
+ GP_4_5_FN, FN_VI0_D1_B1_C1,
+ GP_4_4_FN, FN_VI0_D0_B0_C0,
+ GP_4_3_FN, FN_VI0_VSYNC_N,
+ GP_4_2_FN, FN_VI0_HSYNC_N,
+ GP_4_1_FN, FN_VI0_CLKENB,
+ GP_4_0_FN, FN_VI0_CLK }
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_5_16_FN, FN_VI1_FIELD,
+ GP_5_15_FN, FN_VI1_D11_G3_Y3,
+ GP_5_14_FN, FN_VI1_D10_G2_Y2,
+ GP_5_13_FN, FN_VI1_D9_G1_Y1,
+ GP_5_12_FN, FN_VI1_D8_G0_Y0,
+ GP_5_11_FN, FN_VI1_D7_B7_C7,
+ GP_5_10_FN, FN_VI1_D6_B6_C6,
+ GP_5_9_FN, FN_VI1_D5_B5_C5,
+ GP_5_8_FN, FN_VI1_D4_B4_C4,
+ GP_5_7_FN, FN_VI1_D3_B3_C3,
+ GP_5_6_FN, FN_VI1_D2_B2_C2,
+ GP_5_5_FN, FN_VI1_D1_B1_C1,
+ GP_5_4_FN, FN_VI1_D0_B0_C0,
+ GP_5_3_FN, FN_VI1_VSYNC_N,
+ GP_5_2_FN, FN_VI1_HSYNC_N,
+ GP_5_1_FN, FN_VI1_CLKENB,
+ GP_5_0_FN, FN_VI1_CLK }
+ },
+ { PINMUX_CFG_REG("GPSR6", 0xE606001C, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_6_16_FN, FN_IP2_16,
+ GP_6_15_FN, FN_IP2_15,
+ GP_6_14_FN, FN_IP2_14,
+ GP_6_13_FN, FN_IP2_13,
+ GP_6_12_FN, FN_IP2_12,
+ GP_6_11_FN, FN_IP2_11,
+ GP_6_10_FN, FN_IP2_10,
+ GP_6_9_FN, FN_IP2_9,
+ GP_6_8_FN, FN_IP2_8,
+ GP_6_7_FN, FN_IP2_7,
+ GP_6_6_FN, FN_IP2_6,
+ GP_6_5_FN, FN_IP2_5,
+ GP_6_4_FN, FN_IP2_4,
+ GP_6_3_FN, FN_IP2_3,
+ GP_6_2_FN, FN_IP2_2,
+ GP_6_1_FN, FN_IP2_1,
+ GP_6_0_FN, FN_IP2_0 }
+ },
+ { PINMUX_CFG_REG("GPSR7", 0xE6060020, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_7_16_FN, FN_VI3_FIELD,
+ GP_7_15_FN, FN_IP3_14,
+ GP_7_14_FN, FN_VI3_D10_Y2,
+ GP_7_13_FN, FN_IP3_13,
+ GP_7_12_FN, FN_IP3_12,
+ GP_7_11_FN, FN_IP3_11,
+ GP_7_10_FN, FN_IP3_10,
+ GP_7_9_FN, FN_IP3_9,
+ GP_7_8_FN, FN_IP3_8,
+ GP_7_7_FN, FN_IP3_7,
+ GP_7_6_FN, FN_IP3_6,
+ GP_7_5_FN, FN_IP3_5,
+ GP_7_4_FN, FN_IP3_4,
+ GP_7_3_FN, FN_IP3_3,
+ GP_7_2_FN, FN_IP3_2,
+ GP_7_1_FN, FN_IP3_1,
+ GP_7_0_FN, FN_IP3_0 }
+ },
+ { PINMUX_CFG_REG("GPSR8", 0xE6060024, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_8_16_FN, FN_IP4_24,
+ GP_8_15_FN, FN_IP4_23,
+ GP_8_14_FN, FN_IP4_22,
+ GP_8_13_FN, FN_IP4_21,
+ GP_8_12_FN, FN_IP4_20_19,
+ GP_8_11_FN, FN_IP4_18_17,
+ GP_8_10_FN, FN_IP4_16_15,
+ GP_8_9_FN, FN_IP4_14_13,
+ GP_8_8_FN, FN_IP4_12_11,
+ GP_8_7_FN, FN_IP4_10_9,
+ GP_8_6_FN, FN_IP4_8_7,
+ GP_8_5_FN, FN_IP4_6_5,
+ GP_8_4_FN, FN_IP4_4,
+ GP_8_3_FN, FN_IP4_3_2,
+ GP_8_2_FN, FN_IP4_1,
+ GP_8_1_FN, FN_IP4_0,
+ GP_8_0_FN, FN_VI4_CLK }
+ },
+ { PINMUX_CFG_REG("GPSR9", 0xE6060028, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_9_16_FN, FN_VI5_FIELD,
+ GP_9_15_FN, FN_VI5_D11_Y3,
+ GP_9_14_FN, FN_VI5_D10_Y2,
+ GP_9_13_FN, FN_VI5_D9_Y1,
+ GP_9_12_FN, FN_IP5_11,
+ GP_9_11_FN, FN_IP5_10,
+ GP_9_10_FN, FN_IP5_9,
+ GP_9_9_FN, FN_IP5_8,
+ GP_9_8_FN, FN_IP5_7,
+ GP_9_7_FN, FN_IP5_6,
+ GP_9_6_FN, FN_IP5_5,
+ GP_9_5_FN, FN_IP5_4,
+ GP_9_4_FN, FN_IP5_3,
+ GP_9_3_FN, FN_IP5_2,
+ GP_9_2_FN, FN_IP5_1,
+ GP_9_1_FN, FN_IP5_0,
+ GP_9_0_FN, FN_VI5_CLK }
+ },
+ { PINMUX_CFG_REG("GPSR10", 0xE606002C, 32, 1) {
+ GP_10_31_FN, FN_CAN1_RX,
+ GP_10_30_FN, FN_CAN1_TX,
+ GP_10_29_FN, FN_CAN_CLK,
+ GP_10_28_FN, FN_CAN0_RX,
+ GP_10_27_FN, FN_CAN0_TX,
+ GP_10_26_FN, FN_SCIF_CLK,
+ GP_10_25_FN, FN_IP6_18_17,
+ GP_10_24_FN, FN_IP6_16,
+ GP_10_23_FN, FN_IP6_15_14,
+ GP_10_22_FN, FN_IP6_13_12,
+ GP_10_21_FN, FN_IP6_11_10,
+ GP_10_20_FN, FN_IP6_9_8,
+ GP_10_19_FN, FN_RX1,
+ GP_10_18_FN, FN_TX1,
+ GP_10_17_FN, FN_RTS1_N,
+ GP_10_16_FN, FN_CTS1_N,
+ GP_10_15_FN, FN_SCK1,
+ GP_10_14_FN, FN_RX0,
+ GP_10_13_FN, FN_TX0,
+ GP_10_12_FN, FN_RTS0_N,
+ GP_10_11_FN, FN_CTS0_N,
+ GP_10_10_FN, FN_SCK0,
+ GP_10_9_FN, FN_IP6_7,
+ GP_10_8_FN, FN_IP6_6,
+ GP_10_7_FN, FN_HCTS1_N,
+ GP_10_6_FN, FN_IP6_5,
+ GP_10_5_FN, FN_IP6_4,
+ GP_10_4_FN, FN_IP6_3,
+ GP_10_3_FN, FN_IP6_2,
+ GP_10_2_FN, FN_HRTS0_N,
+ GP_10_1_FN, FN_IP6_1,
+ GP_10_0_FN, FN_IP6_0 }
+ },
+ { PINMUX_CFG_REG("GPSR11", 0xE6060030, 32, 1) {
+ 0, 0,
+ 0, 0,
+ GP_11_29_FN, FN_AVS2,
+ GP_11_28_FN, FN_AVS1,
+ GP_11_27_FN, FN_ADICHS2,
+ GP_11_26_FN, FN_ADICHS1,
+ GP_11_25_FN, FN_ADICHS0,
+ GP_11_24_FN, FN_ADIDATA,
+ GP_11_23_FN, FN_ADICS_SAMP,
+ GP_11_22_FN, FN_ADICLK,
+ GP_11_21_FN, FN_IP7_20,
+ GP_11_20_FN, FN_IP7_19,
+ GP_11_19_FN, FN_IP7_18,
+ GP_11_18_FN, FN_IP7_17,
+ GP_11_17_FN, FN_IP7_16,
+ GP_11_16_FN, FN_IP7_15_14,
+ GP_11_15_FN, FN_IP7_13_12,
+ GP_11_14_FN, FN_IP7_11_10,
+ GP_11_13_FN, FN_IP7_9_8,
+ GP_11_12_FN, FN_SD0_WP,
+ GP_11_11_FN, FN_SD0_CD,
+ GP_11_10_FN, FN_SD0_DAT3,
+ GP_11_9_FN, FN_SD0_DAT2,
+ GP_11_8_FN, FN_SD0_DAT1,
+ GP_11_7_FN, FN_SD0_DAT0,
+ GP_11_6_FN, FN_SD0_CMD,
+ GP_11_5_FN, FN_SD0_CLK,
+ GP_11_4_FN, FN_IP7_7,
+ GP_11_3_FN, FN_IP7_6,
+ GP_11_2_FN, FN_IP7_5_4,
+ GP_11_1_FN, FN_IP7_3_2,
+ GP_11_0_FN, FN_IP7_1_0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR0", 0xE6060040, 32,
+ 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1) {
+ /* IP0_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_27_24 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_23 [1] */
+ FN_DU0_DB7_C5, 0,
+ /* IP0_22 [1] */
+ FN_DU0_DB6_C4, 0,
+ /* IP0_21 [1] */
+ FN_DU0_DB5_C3, 0,
+ /* IP0_20 [1] */
+ FN_DU0_DB4_C2, 0,
+ /* IP0_19 [1] */
+ FN_DU0_DB3_C1, 0,
+ /* IP0_18 [1] */
+ FN_DU0_DB2_C0, 0,
+ /* IP0_17 [1] */
+ FN_DU0_DB1, 0,
+ /* IP0_16 [1] */
+ FN_DU0_DB0, 0,
+ /* IP0_15 [1] */
+ FN_DU0_DG7_Y3_DATA15, 0,
+ /* IP0_14 [1] */
+ FN_DU0_DG6_Y2_DATA14, 0,
+ /* IP0_13 [1] */
+ FN_DU0_DG5_Y1_DATA13, 0,
+ /* IP0_12 [1] */
+ FN_DU0_DG4_Y0_DATA12, 0,
+ /* IP0_11 [1] */
+ FN_DU0_DG3_C7_DATA11, 0,
+ /* IP0_10 [1] */
+ FN_DU0_DG2_C6_DATA10, 0,
+ /* IP0_9 [1] */
+ FN_DU0_DG1_DATA9, 0,
+ /* IP0_8 [1] */
+ FN_DU0_DG0_DATA8, 0,
+ /* IP0_7 [1] */
+ FN_DU0_DR7_Y9_DATA7, 0,
+ /* IP0_6 [1] */
+ FN_DU0_DR6_Y8_DATA6, 0,
+ /* IP0_5 [1] */
+ FN_DU0_DR5_Y7_DATA5, 0,
+ /* IP0_4 [1] */
+ FN_DU0_DR4_Y6_DATA4, 0,
+ /* IP0_3 [1] */
+ FN_DU0_DR3_Y5_DATA3, 0,
+ /* IP0_2 [1] */
+ FN_DU0_DR2_Y4_DATA2, 0,
+ /* IP0_1 [1] */
+ FN_DU0_DR1_DATA1, 0,
+ /* IP0_0 [1] */
+ FN_DU0_DR0_DATA0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR1", 0xE6060044, 32,
+ 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1) {
+ /* IP1_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_27_24 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_23 [1] */
+ 0, 0,
+ /* IP1_22 [1] */
+ FN_A25, FN_SSL,
+ /* IP1_21 [1] */
+ FN_A24, FN_SPCLK,
+ /* IP1_20 [1] */
+ FN_A23, FN_IO3,
+ /* IP1_19 [1] */
+ FN_A22, FN_IO2,
+ /* IP1_18 [1] */
+ FN_A21, FN_MISO_IO1,
+ /* IP1_17 [1] */
+ FN_A20, FN_MOSI_IO0,
+ /* IP1_16 [1] */
+ FN_DU1_DG7_Y3_DATA11, 0,
+ /* IP1_15 [1] */
+ FN_DU1_DG6_Y2_DATA10, 0,
+ /* IP1_14 [1] */
+ FN_DU1_DG5_Y1_DATA9, 0,
+ /* IP1_13 [1] */
+ FN_DU1_DG4_Y0_DATA8, 0,
+ /* IP1_12 [1] */
+ FN_DU1_DG3_C7_DATA7, 0,
+ /* IP1_11 [1] */
+ FN_DU1_DG2_C6_DATA6, 0,
+ /* IP1_10 [1] */
+ FN_DU1_DR7_DATA5, 0,
+ /* IP1_9 [1] */
+ FN_DU1_DR6_DATA4, 0,
+ /* IP1_8 [1] */
+ FN_DU1_DR5_Y7_DATA3, 0,
+ /* IP1_7 [1] */
+ FN_DU1_DR4_Y6_DATA2, 0,
+ /* IP1_6 [1] */
+ FN_DU1_DR3_Y5_DATA1, 0,
+ /* IP1_5 [1] */
+ FN_DU1_DR2_Y4_DATA0, 0,
+ /* IP1_4 [1] */
+ FN_DU0_CDE, 0,
+ /* IP1_3 [1] */
+ FN_DU0_DISP, 0,
+ /* IP1_2 [1] */
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, 0,
+ /* IP1_1 [1] */
+ FN_DU0_EXVSYNC_DU0_VSYNC, 0,
+ /* IP1_0 [1] */
+ FN_DU0_EXHSYNC_DU0_HSYNC, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR2", 0xE6060048, 32,
+ 4, 4,
+ 4, 3, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1) {
+ /* IP2_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_27_24 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_23_20 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_19_17 [3] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_16 [1] */
+ FN_VI2_FIELD, FN_AVB_TXD2,
+ /* IP2_15 [1] */
+ FN_VI2_D11_Y3, FN_AVB_TXD1,
+ /* IP2_14 [1] */
+ FN_VI2_D10_Y2, FN_AVB_TXD0,
+ /* IP2_13 [1] */
+ FN_VI2_D9_Y1, FN_AVB_TX_EN,
+ /* IP2_12 [1] */
+ FN_VI2_D8_Y0, FN_AVB_TXD3,
+ /* IP2_11 [1] */
+ FN_VI2_D7_C7, FN_AVB_COL,
+ /* IP2_10 [1] */
+ FN_VI2_D6_C6, FN_AVB_RX_ER,
+ /* IP2_9 [1] */
+ FN_VI2_D5_C5, FN_AVB_RXD7,
+ /* IP2_8 [1] */
+ FN_VI2_D4_C4, FN_AVB_RXD6,
+ /* IP2_7 [1] */
+ FN_VI2_D3_C3, FN_AVB_RXD5,
+ /* IP2_6 [1] */
+ FN_VI2_D2_C2, FN_AVB_RXD4,
+ /* IP2_5 [1] */
+ FN_VI2_D1_C1, FN_AVB_RXD3,
+ /* IP2_4 [1] */
+ FN_VI2_D0_C0, FN_AVB_RXD2,
+ /* IP2_3 [1] */
+ FN_VI2_VSYNC_N, FN_AVB_RXD1,
+ /* IP2_2 [1] */
+ FN_VI2_HSYNC_N, FN_AVB_RXD0,
+ /* IP2_1 [1] */
+ FN_VI2_CLKENB, FN_AVB_RX_DV,
+ /* IP2_0 [1] */
+ FN_VI2_CLK, FN_AVB_RX_CLK }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR3", 0xE606004C, 32,
+ 4, 4,
+ 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1) {
+ /* IP3_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP3_27_24 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP3_23_20 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP3_19_16 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP3_15 [1] */
+ 0, 0,
+ /* IP3_14 [1] */
+ FN_VI3_D11_Y3, FN_AVB_AVTP_MATCH,
+ /* IP3_13 [1] */
+ FN_VI3_D9_Y1, FN_AVB_GTXREFCLK,
+ /* IP3_12 [1] */
+ FN_VI3_D8_Y0, FN_AVB_CRS,
+ /* IP3_11 [1] */
+ FN_VI3_D7_C7, FN_AVB_PHY_INT,
+ /* IP3_10 [1] */
+ FN_VI3_D6_C6, FN_AVB_MAGIC,
+ /* IP3_9 [1] */
+ FN_VI3_D5_C5, FN_AVB_LINK,
+ /* IP3_8 [1] */
+ FN_VI3_D4_C4, FN_AVB_MDIO,
+ /* IP3_7 [1] */
+ FN_VI3_D3_C3, FN_AVB_MDC,
+ /* IP3_6 [1] */
+ FN_VI3_D2_C2, FN_AVB_GTX_CLK,
+ /* IP3_5 [1] */
+ FN_VI3_D1_C1, FN_AVB_TX_ER,
+ /* IP3_4 [1] */
+ FN_VI3_D0_C0, FN_AVB_TXD7,
+ /* IP3_3 [1] */
+ FN_VI3_VSYNC_N, FN_AVB_TXD6,
+ /* IP3_2 [1] */
+ FN_VI3_HSYNC_N, FN_AVB_TXD5,
+ /* IP3_1 [1] */
+ FN_VI3_CLKENB, FN_AVB_TXD4,
+ /* IP3_0 [1] */
+ FN_VI3_CLK, FN_AVB_TX_CLK }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR4", 0xE6060050, 32,
+ 4, 3, 1,
+ 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 2, 1, 1) {
+ /* IP4_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_27_25 [3] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_24 [1] */
+ FN_VI4_FIELD, FN_VI3_D15_Y7,
+ /* IP4_23 [1] */
+ FN_VI4_D11_Y3, FN_VI3_D14_Y6,
+ /* IP4_22 [1] */
+ FN_VI4_D10_Y2, FN_VI3_D13_Y5,
+ /* IP4_21 [1] */
+ FN_VI4_D9_Y1, FN_VI3_D12_Y4,
+ /* IP4_20_19 [2] */
+ FN_VI4_D8_Y0, FN_VI0_D23_R7, FN_VI2_D15_Y7, 0,
+ /* IP4_18_17 [2] */
+ FN_VI4_D7_C7, FN_VI0_D22_R6, FN_VI2_D14_Y6, 0,
+ /* IP4_16_15 [2] */
+ FN_VI4_D6_C6, FN_VI0_D21_R5, FN_VI2_D13_Y5, 0,
+ /* IP4_14_13 [2] */
+ FN_VI4_D5_C5, FN_VI0_D20_R4, FN_VI2_D12_Y4, 0,
+ /* IP4_12_11 [2] */
+ FN_VI4_D4_C4, FN_VI0_D19_R3, FN_VI1_D15_G7_Y7, 0,
+ /* IP4_10_9 [2] */
+ FN_VI4_D3_C3, FN_VI0_D18_R2, FN_VI1_D14_G6_Y6, 0,
+ /* IP4_8_7 [2] */
+ FN_VI4_D2_C2, 0, FN_VI0_D17_R1, FN_VI1_D13_G5_Y5,
+ /* IP4_6_5 [2] */
+ FN_VI4_D1_C1, FN_VI0_D16_R0, FN_VI1_D12_G4_Y4, 0,
+ /* IP4_4 [1] */
+ FN_VI4_D0_C0, FN_VI0_D15_G7_Y7,
+ /* IP4_3_2 [2] */
+ FN_VI4_VSYNC_N, FN_VI0_D14_G6_Y6, 0, 0,
+ /* IP4_1 [1] */
+ FN_VI4_HSYNC_N, FN_VI0_D13_G5_Y5,
+ /* IP4_0 [1] */
+ FN_VI4_CLKENB, FN_VI0_D12_G4_Y4 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR5", 0xE6060054, 32,
+ 4, 4,
+ 4, 4,
+ 4, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1) {
+ /* IP5_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_27_24 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_23_20 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_19_16 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_15_12 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_11 [1] */
+ FN_VI5_D8_Y0, FN_VI1_D23_R7,
+ /* IP5_10 [1] */
+ FN_VI5_D7_C7, FN_VI1_D22_R6,
+ /* IP5_9 [1] */
+ FN_VI5_D6_C6, FN_VI1_D21_R5,
+ /* IP5_8 [1] */
+ FN_VI5_D5_C5, FN_VI1_D20_R4,
+ /* IP5_7 [1] */
+ FN_VI5_D4_C4, FN_VI1_D19_R3,
+ /* IP5_6 [1] */
+ FN_VI5_D3_C3, FN_VI1_D18_R2,
+ /* IP5_5 [1] */
+ FN_VI5_D2_C2, FN_VI1_D17_R1,
+ /* IP5_4 [1] */
+ FN_VI5_D1_C1, FN_VI1_D16_R0,
+ /* IP5_3 [1] */
+ FN_VI5_D0_C0, FN_VI1_D15_G7_Y7_B,
+ /* IP5_2 [1] */
+ FN_VI5_VSYNC_N, FN_VI1_D14_G6_Y6_B,
+ /* IP5_1 [1] */
+ FN_VI5_HSYNC_N, FN_VI1_D13_G5_Y5_B,
+ /* IP5_0 [1] */
+ FN_VI5_CLKENB, FN_VI1_D12_G4_Y4_B }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR6", 0xE6060058, 32,
+ 4, 4,
+ 4, 1, 2, 1,
+ 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1) {
+ /* IP6_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_27_24 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_23_20 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_19 [1] */
+ 0, 0,
+ /* IP6_18_17 [2] */
+ FN_DREQ1_N, FN_RX3, 0, 0,
+ /* IP6_16 [1] */
+ FN_TX3, 0,
+ /* IP6_15_14 [2] */
+ FN_DACK1, FN_SCK3, 0, 0,
+ /* IP6_13_12 [2] */
+ FN_DREQ0_N, FN_RX2, 0, 0,
+ /* IP6_11_10 [2] */
+ FN_DACK0, FN_TX2, 0, 0,
+ /* IP6_9_8 [2] */
+ FN_DRACK0, FN_SCK2, 0, 0,
+ /* IP6_7 [1] */
+ FN_MSIOF1_RXD, FN_HRX1,
+ /* IP6_6 [1] */
+ FN_MSIOF1_TXD, FN_HTX1,
+ /* IP6_5 [1] */
+ FN_MSIOF1_SYNC, FN_HRTS1_N,
+ /* IP6_4 [1] */
+ FN_MSIOF1_SCK, FN_HSCK1,
+ /* IP6_3 [1] */
+ FN_MSIOF0_RXD, FN_HRX0,
+ /* IP6_2 [1] */
+ FN_MSIOF0_TXD, FN_HTX0,
+ /* IP6_1 [1] */
+ FN_MSIOF0_SYNC, FN_HCTS0_N,
+ /* IP6_0 [1] */
+ FN_MSIOF0_SCK, FN_HSCK0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR7", 0xE606005C, 32,
+ 4, 4,
+ 3, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2,
+ 1, 1, 2, 2, 2) {
+ /* IP7_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_27_24 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_23_21 [3] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_20 [1] */
+ FN_AUDIO_CLKB, 0,
+ /* IP7_19 [1] */
+ FN_AUDIO_CLKA, 0,
+ /* IP7_18 [1] */
+ FN_AUDIO_CLKOUT, 0,
+ /* IP7_17 [1] */
+ FN_SSI_SDATA4, 0,
+ /* IP7_16 [1] */
+ FN_SSI_WS4, 0,
+ /* IP7_15_14 [2] */
+ FN_SSI_SCK4, FN_TPU0TO3, 0, 0,
+ /* IP7_13_12 [2] */
+ FN_SSI_SDATA3, FN_TPU0TO2, 0, 0,
+ /* IP7_11_10 [2] */
+ FN_SSI_WS34, FN_TPU0TO1, 0, 0,
+ /* IP7_9_8 [2] */
+ FN_SSI_SCK34, FN_TPU0TO0, 0, 0,
+ /* IP7_7 [1] */
+ FN_PWM4, 0,
+ /* IP7_6 [1] */
+ FN_PWM3, 0,
+ /* IP7_5_4 [2] */
+ FN_PWM2, FN_TCLK3, FN_FSO_TOE, 0,
+ /* IP7_3_2 [2] */
+ FN_PWM1, FN_TCLK2, FN_FSO_CFE_1, 0,
+ /* IP7_1_0 [2] */
+ FN_PWM0, FN_TCLK1, FN_FSO_CFE_0, 0 }
+ },
+ { },
+};
+
+const struct sh_pfc_soc_info r8a7792_pinmux_info = {
+ .name = "r8a77920_pfc",
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index 8bc2cf0c594e..ed734f560c84 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -22,7 +22,9 @@
PORT_GP_32(3, fn, sfx), \
PORT_GP_32(4, fn, sfx), \
PORT_GP_28(5, fn, sfx), \
- PORT_GP_26(6, fn, sfx)
+ PORT_GP_CFG_24(6, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_1(6, 24, fn, sfx), \
+ PORT_GP_1(6, 25, fn, sfx)
enum {
PINMUX_RESERVED = 0,
@@ -5160,8 +5162,32 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
+static int r8a7794_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
+{
+ *pocctrl = 0xe606006c;
+
+ switch (pin & 0x1f) {
+ case 6: return 23;
+ case 7: return 16;
+ case 14: return 15;
+ case 15: return 8;
+ case 0 ... 5:
+ case 8 ... 13:
+ return 22 - (pin & 0x1f);
+ case 16 ... 23:
+ return 47 - (pin & 0x1f);
+ }
+
+ return -EINVAL;
+}
+
+static const struct sh_pfc_soc_operations r8a7794_pinmux_ops = {
+ .pin_to_pocctrl = r8a7794_pin_to_pocctrl,
+};
+
const struct sh_pfc_soc_info r8a7794_pinmux_info = {
.name = "r8a77940_pfc",
+ .ops = &r8a7794_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index b74cdd310d83..2e8cc2adbed7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -1,5 +1,5 @@
/*
- * R-Car Gen3 processor support - PFC hardware block.
+ * R8A7795 processor support - PFC hardware block.
*
* Copyright (C) 2015 Renesas Electronics Corporation
*
@@ -13,19 +13,23 @@
#include "core.h"
#include "sh_pfc.h"
+#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | \
+ SH_PFC_PIN_CFG_PULL_UP | \
+ SH_PFC_PIN_CFG_PULL_DOWN)
+
#define CPU_ALL_PORT(fn, sfx) \
- PORT_GP_CFG_16(0, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_28(1, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_15(2, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_12(3, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_CFG_1(3, 12, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_1(3, 13, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_1(3, 14, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_1(3, 15, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_18(4, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_CFG_26(5, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_32(6, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_4(7, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH)
+ PORT_GP_CFG_16(0, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_15(2, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_12(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_1(3, 12, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_18(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_26(5, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_32(6, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_4(7, fn, sfx, CFG_FLAGS)
/*
* F_() : just information
* FM() : macro for FN_xxx / xxx_MARK
@@ -1871,6 +1875,86 @@ static const unsigned int drif3_data1_b_mux[] = {
RIF3_D1_B_MARK,
};
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2),
+};
+static const unsigned int du_rgb666_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK,
+};
+static const unsigned int du_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 8),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16),
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2),
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int du_rgb888_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK, DU_DR1_MARK, DU_DR0_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK, DU_DG1_MARK, DU_DG0_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK, DU_DB1_MARK, DU_DB0_MARK,
+};
+static const unsigned int du_clk_out_0_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int du_clk_out_0_mux[] = {
+ DU_DOTCLKOUT0_MARK
+};
+static const unsigned int du_clk_out_1_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int du_clk_out_1_mux[] = {
+ DU_DOTCLKOUT1_MARK
+};
+static const unsigned int du_sync_pins[] = {
+ /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+ RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 4),
+};
+static const unsigned int du_sync_mux[] = {
+ DU_EXVSYNC_DU_VSYNC_MARK, DU_EXHSYNC_DU_HSYNC_MARK
+};
+static const unsigned int du_oddf_pins[] = {
+ /* EXDISP/EXODDF/EXCDE */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int du_oddf_mux[] = {
+ DU_EXODDF_DU_ODDF_DISP_CDE_MARK,
+};
+static const unsigned int du_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int du_cde_mux[] = {
+ DU_CDE_MARK,
+};
+static const unsigned int du_disp_pins[] = {
+ /* DISP */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int du_disp_mux[] = {
+ DU_DISP_MARK,
+};
/* - HSCIF0 ----------------------------------------------------------------- */
static const unsigned int hscif0_data_pins[] = {
/* RX, TX */
@@ -3593,6 +3677,14 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(drif3_ctrl_b),
SH_PFC_PIN_GROUP(drif3_data0_b),
SH_PFC_PIN_GROUP(drif3_data1_b),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
SH_PFC_PIN_GROUP(hscif0_data),
SH_PFC_PIN_GROUP(hscif0_clk),
SH_PFC_PIN_GROUP(hscif0_ctrl),
@@ -3918,6 +4010,17 @@ static const char * const drif3_groups[] = {
"drif3_data1_b",
};
+static const char * const du_groups[] = {
+ "du_rgb666",
+ "du_rgb888",
+ "du_clk_out_0",
+ "du_clk_out_1",
+ "du_sync",
+ "du_oddf",
+ "du_cde",
+ "du_disp",
+};
+
static const char * const hscif0_groups[] = {
"hscif0_data",
"hscif0_clk",
@@ -4265,6 +4368,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(drif1),
SH_PFC_FUNCTION(drif2),
SH_PFC_FUNCTION(drif3),
+ SH_PFC_FUNCTION(du),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
SH_PFC_FUNCTION(hscif2),
@@ -5073,8 +5177,234 @@ static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return bit;
}
+#define PUEN 0xe6060400
+#define PUD 0xe6060440
+
+#define PU0 0x00
+#define PU1 0x04
+#define PU2 0x08
+#define PU3 0x0c
+#define PU4 0x10
+#define PU5 0x14
+#define PU6 0x18
+
+static const struct {
+ u16 reg : 11;
+ u16 bit : 5;
+} pullups[] = {
+ [RCAR_GP_PIN(2, 11)] = { PU0, 31 }, /* AVB_PHY_INT */
+ [RCAR_GP_PIN(2, 10)] = { PU0, 30 }, /* AVB_MAGIC */
+ [RCAR_GP_PIN(2, 9)] = { PU0, 29 }, /* AVB_MDC */
+
+ [RCAR_GP_PIN(1, 19)] = { PU1, 31 }, /* A19 */
+ [RCAR_GP_PIN(1, 18)] = { PU1, 30 }, /* A18 */
+ [RCAR_GP_PIN(1, 17)] = { PU1, 29 }, /* A17 */
+ [RCAR_GP_PIN(1, 16)] = { PU1, 28 }, /* A16 */
+ [RCAR_GP_PIN(1, 15)] = { PU1, 27 }, /* A15 */
+ [RCAR_GP_PIN(1, 14)] = { PU1, 26 }, /* A14 */
+ [RCAR_GP_PIN(1, 13)] = { PU1, 25 }, /* A13 */
+ [RCAR_GP_PIN(1, 12)] = { PU1, 24 }, /* A12 */
+ [RCAR_GP_PIN(1, 11)] = { PU1, 23 }, /* A11 */
+ [RCAR_GP_PIN(1, 10)] = { PU1, 22 }, /* A10 */
+ [RCAR_GP_PIN(1, 9)] = { PU1, 21 }, /* A9 */
+ [RCAR_GP_PIN(1, 8)] = { PU1, 20 }, /* A8 */
+ [RCAR_GP_PIN(1, 7)] = { PU1, 19 }, /* A7 */
+ [RCAR_GP_PIN(1, 6)] = { PU1, 18 }, /* A6 */
+ [RCAR_GP_PIN(1, 5)] = { PU1, 17 }, /* A5 */
+ [RCAR_GP_PIN(1, 4)] = { PU1, 16 }, /* A4 */
+ [RCAR_GP_PIN(1, 3)] = { PU1, 15 }, /* A3 */
+ [RCAR_GP_PIN(1, 2)] = { PU1, 14 }, /* A2 */
+ [RCAR_GP_PIN(1, 1)] = { PU1, 13 }, /* A1 */
+ [RCAR_GP_PIN(1, 0)] = { PU1, 12 }, /* A0 */
+ [RCAR_GP_PIN(2, 8)] = { PU1, 11 }, /* PWM2_A */
+ [RCAR_GP_PIN(2, 7)] = { PU1, 10 }, /* PWM1_A */
+ [RCAR_GP_PIN(2, 6)] = { PU1, 9 }, /* PWM0 */
+ [RCAR_GP_PIN(2, 5)] = { PU1, 8 }, /* IRQ5 */
+ [RCAR_GP_PIN(2, 4)] = { PU1, 7 }, /* IRQ4 */
+ [RCAR_GP_PIN(2, 3)] = { PU1, 6 }, /* IRQ3 */
+ [RCAR_GP_PIN(2, 2)] = { PU1, 5 }, /* IRQ2 */
+ [RCAR_GP_PIN(2, 1)] = { PU1, 4 }, /* IRQ1 */
+ [RCAR_GP_PIN(2, 0)] = { PU1, 3 }, /* IRQ0 */
+ [RCAR_GP_PIN(2, 14)] = { PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
+ [RCAR_GP_PIN(2, 13)] = { PU1, 1 }, /* AVB_AVTP_MATCH_A */
+ [RCAR_GP_PIN(2, 12)] = { PU1, 0 }, /* AVB_LINK */
+
+ [RCAR_GP_PIN(7, 3)] = { PU2, 29 }, /* HDMI1_CEC */
+ [RCAR_GP_PIN(7, 2)] = { PU2, 28 }, /* HDMI0_CEC */
+ [RCAR_GP_PIN(7, 1)] = { PU2, 27 }, /* AVS2 */
+ [RCAR_GP_PIN(7, 0)] = { PU2, 26 }, /* AVS1 */
+ [RCAR_GP_PIN(0, 15)] = { PU2, 25 }, /* D15 */
+ [RCAR_GP_PIN(0, 14)] = { PU2, 24 }, /* D14 */
+ [RCAR_GP_PIN(0, 13)] = { PU2, 23 }, /* D13 */
+ [RCAR_GP_PIN(0, 12)] = { PU2, 22 }, /* D12 */
+ [RCAR_GP_PIN(0, 11)] = { PU2, 21 }, /* D11 */
+ [RCAR_GP_PIN(0, 10)] = { PU2, 20 }, /* D10 */
+ [RCAR_GP_PIN(0, 9)] = { PU2, 19 }, /* D9 */
+ [RCAR_GP_PIN(0, 8)] = { PU2, 18 }, /* D8 */
+ [RCAR_GP_PIN(0, 7)] = { PU2, 17 }, /* D7 */
+ [RCAR_GP_PIN(0, 6)] = { PU2, 16 }, /* D6 */
+ [RCAR_GP_PIN(0, 5)] = { PU2, 15 }, /* D5 */
+ [RCAR_GP_PIN(0, 4)] = { PU2, 14 }, /* D4 */
+ [RCAR_GP_PIN(0, 3)] = { PU2, 13 }, /* D3 */
+ [RCAR_GP_PIN(0, 2)] = { PU2, 12 }, /* D2 */
+ [RCAR_GP_PIN(0, 1)] = { PU2, 11 }, /* D1 */
+ [RCAR_GP_PIN(0, 0)] = { PU2, 10 }, /* D0 */
+ [RCAR_GP_PIN(1, 27)] = { PU2, 8 }, /* EX_WAIT0_A */
+ [RCAR_GP_PIN(1, 26)] = { PU2, 7 }, /* WE1_N */
+ [RCAR_GP_PIN(1, 25)] = { PU2, 6 }, /* WE0_N */
+ [RCAR_GP_PIN(1, 24)] = { PU2, 5 }, /* RD_WR_N */
+ [RCAR_GP_PIN(1, 23)] = { PU2, 4 }, /* RD_N */
+ [RCAR_GP_PIN(1, 22)] = { PU2, 3 }, /* BS_N */
+ [RCAR_GP_PIN(1, 21)] = { PU2, 2 }, /* CS1_N_A26 */
+ [RCAR_GP_PIN(1, 20)] = { PU2, 1 }, /* CS0_N */
+
+ [RCAR_GP_PIN(4, 9)] = { PU3, 31 }, /* SD3_DAT0 */
+ [RCAR_GP_PIN(4, 8)] = { PU3, 30 }, /* SD3_CMD */
+ [RCAR_GP_PIN(4, 7)] = { PU3, 29 }, /* SD3_CLK */
+ [RCAR_GP_PIN(4, 6)] = { PU3, 28 }, /* SD2_DS */
+ [RCAR_GP_PIN(4, 5)] = { PU3, 27 }, /* SD2_DAT3 */
+ [RCAR_GP_PIN(4, 4)] = { PU3, 26 }, /* SD2_DAT2 */
+ [RCAR_GP_PIN(4, 3)] = { PU3, 25 }, /* SD2_DAT1 */
+ [RCAR_GP_PIN(4, 2)] = { PU3, 24 }, /* SD2_DAT0 */
+ [RCAR_GP_PIN(4, 1)] = { PU3, 23 }, /* SD2_CMD */
+ [RCAR_GP_PIN(4, 0)] = { PU3, 22 }, /* SD2_CLK */
+ [RCAR_GP_PIN(3, 11)] = { PU3, 21 }, /* SD1_DAT3 */
+ [RCAR_GP_PIN(3, 10)] = { PU3, 20 }, /* SD1_DAT2 */
+ [RCAR_GP_PIN(3, 9)] = { PU3, 19 }, /* SD1_DAT1 */
+ [RCAR_GP_PIN(3, 8)] = { PU3, 18 }, /* SD1_DAT0 */
+ [RCAR_GP_PIN(3, 7)] = { PU3, 17 }, /* SD1_CMD */
+ [RCAR_GP_PIN(3, 6)] = { PU3, 16 }, /* SD1_CLK */
+ [RCAR_GP_PIN(3, 5)] = { PU3, 15 }, /* SD0_DAT3 */
+ [RCAR_GP_PIN(3, 4)] = { PU3, 14 }, /* SD0_DAT2 */
+ [RCAR_GP_PIN(3, 3)] = { PU3, 13 }, /* SD0_DAT1 */
+ [RCAR_GP_PIN(3, 2)] = { PU3, 12 }, /* SD0_DAT0 */
+ [RCAR_GP_PIN(3, 1)] = { PU3, 11 }, /* SD0_CMD */
+ [RCAR_GP_PIN(3, 0)] = { PU3, 10 }, /* SD0_CLK */
+
+ [RCAR_GP_PIN(5, 19)] = { PU4, 31 }, /* MSIOF0_SS1 */
+ [RCAR_GP_PIN(5, 18)] = { PU4, 30 }, /* MSIOF0_SYNC */
+ [RCAR_GP_PIN(5, 17)] = { PU4, 29 }, /* MSIOF0_SCK */
+ [RCAR_GP_PIN(5, 16)] = { PU4, 28 }, /* HRTS0_N */
+ [RCAR_GP_PIN(5, 15)] = { PU4, 27 }, /* HCTS0_N */
+ [RCAR_GP_PIN(5, 14)] = { PU4, 26 }, /* HTX0 */
+ [RCAR_GP_PIN(5, 13)] = { PU4, 25 }, /* HRX0 */
+ [RCAR_GP_PIN(5, 12)] = { PU4, 24 }, /* HSCK0 */
+ [RCAR_GP_PIN(5, 11)] = { PU4, 23 }, /* RX2_A */
+ [RCAR_GP_PIN(5, 10)] = { PU4, 22 }, /* TX2_A */
+ [RCAR_GP_PIN(5, 9)] = { PU4, 21 }, /* SCK2 */
+ [RCAR_GP_PIN(5, 8)] = { PU4, 20 }, /* RTS1_N_TANS */
+ [RCAR_GP_PIN(5, 7)] = { PU4, 19 }, /* CTS1_N */
+ [RCAR_GP_PIN(5, 6)] = { PU4, 18 }, /* TX1_A */
+ [RCAR_GP_PIN(5, 5)] = { PU4, 17 }, /* RX1_A */
+ [RCAR_GP_PIN(5, 4)] = { PU4, 16 }, /* RTS0_N_TANS */
+ [RCAR_GP_PIN(5, 3)] = { PU4, 15 }, /* CTS0_N */
+ [RCAR_GP_PIN(5, 2)] = { PU4, 14 }, /* TX0 */
+ [RCAR_GP_PIN(5, 1)] = { PU4, 13 }, /* RX0 */
+ [RCAR_GP_PIN(5, 0)] = { PU4, 12 }, /* SCK0 */
+ [RCAR_GP_PIN(3, 15)] = { PU4, 11 }, /* SD1_WP */
+ [RCAR_GP_PIN(3, 14)] = { PU4, 10 }, /* SD1_CD */
+ [RCAR_GP_PIN(3, 13)] = { PU4, 9 }, /* SD0_WP */
+ [RCAR_GP_PIN(3, 12)] = { PU4, 8 }, /* SD0_CD */
+ [RCAR_GP_PIN(4, 17)] = { PU4, 7 }, /* SD3_DS */
+ [RCAR_GP_PIN(4, 16)] = { PU4, 6 }, /* SD3_DAT7 */
+ [RCAR_GP_PIN(4, 15)] = { PU4, 5 }, /* SD3_DAT6 */
+ [RCAR_GP_PIN(4, 14)] = { PU4, 4 }, /* SD3_DAT5 */
+ [RCAR_GP_PIN(4, 13)] = { PU4, 3 }, /* SD3_DAT4 */
+ [RCAR_GP_PIN(4, 12)] = { PU4, 2 }, /* SD3_DAT3 */
+ [RCAR_GP_PIN(4, 11)] = { PU4, 1 }, /* SD3_DAT2 */
+ [RCAR_GP_PIN(4, 10)] = { PU4, 0 }, /* SD3_DAT1 */
+
+ [RCAR_GP_PIN(6, 24)] = { PU5, 31 }, /* USB0_PWEN */
+ [RCAR_GP_PIN(6, 23)] = { PU5, 30 }, /* AUDIO_CLKB_B */
+ [RCAR_GP_PIN(6, 22)] = { PU5, 29 }, /* AUDIO_CLKA_A */
+ [RCAR_GP_PIN(6, 21)] = { PU5, 28 }, /* SSI_SDATA9_A */
+ [RCAR_GP_PIN(6, 20)] = { PU5, 27 }, /* SSI_SDATA8 */
+ [RCAR_GP_PIN(6, 19)] = { PU5, 26 }, /* SSI_SDATA7 */
+ [RCAR_GP_PIN(6, 18)] = { PU5, 25 }, /* SSI_WS78 */
+ [RCAR_GP_PIN(6, 17)] = { PU5, 24 }, /* SSI_SCK78 */
+ [RCAR_GP_PIN(6, 16)] = { PU5, 23 }, /* SSI_SDATA6 */
+ [RCAR_GP_PIN(6, 15)] = { PU5, 22 }, /* SSI_WS6 */
+ [RCAR_GP_PIN(6, 14)] = { PU5, 21 }, /* SSI_SCK6 */
+ [RCAR_GP_PIN(6, 13)] = { PU5, 20 }, /* SSI_SDATA5 */
+ [RCAR_GP_PIN(6, 12)] = { PU5, 19 }, /* SSI_WS5 */
+ [RCAR_GP_PIN(6, 11)] = { PU5, 18 }, /* SSI_SCK5 */
+ [RCAR_GP_PIN(6, 10)] = { PU5, 17 }, /* SSI_SDATA4 */
+ [RCAR_GP_PIN(6, 9)] = { PU5, 16 }, /* SSI_WS4 */
+ [RCAR_GP_PIN(6, 8)] = { PU5, 15 }, /* SSI_SCK4 */
+ [RCAR_GP_PIN(6, 7)] = { PU5, 14 }, /* SSI_SDATA3 */
+ [RCAR_GP_PIN(6, 6)] = { PU5, 13 }, /* SSI_WS34 */
+ [RCAR_GP_PIN(6, 5)] = { PU5, 12 }, /* SSI_SCK34 */
+ [RCAR_GP_PIN(6, 4)] = { PU5, 11 }, /* SSI_SDATA2_A */
+ [RCAR_GP_PIN(6, 3)] = { PU5, 10 }, /* SSI_SDATA1_A */
+ [RCAR_GP_PIN(6, 2)] = { PU5, 9 }, /* SSI_SDATA0 */
+ [RCAR_GP_PIN(6, 1)] = { PU5, 8 }, /* SSI_WS01239 */
+ [RCAR_GP_PIN(6, 0)] = { PU5, 7 }, /* SSI_SCK01239 */
+ [RCAR_GP_PIN(5, 25)] = { PU5, 5 }, /* MLB_DAT */
+ [RCAR_GP_PIN(5, 24)] = { PU5, 4 }, /* MLB_SIG */
+ [RCAR_GP_PIN(5, 23)] = { PU5, 3 }, /* MLB_CLK */
+ [RCAR_GP_PIN(5, 22)] = { PU5, 2 }, /* MSIOF0_RXD */
+ [RCAR_GP_PIN(5, 21)] = { PU5, 1 }, /* MSIOF0_SS2 */
+ [RCAR_GP_PIN(5, 20)] = { PU5, 0 }, /* MSIOF0_TXD */
+
+ [RCAR_GP_PIN(6, 31)] = { PU6, 6 }, /* USB31_OVC */
+ [RCAR_GP_PIN(6, 30)] = { PU6, 5 }, /* USB31_PWEN */
+ [RCAR_GP_PIN(6, 29)] = { PU6, 4 }, /* USB30_OVC */
+ [RCAR_GP_PIN(6, 28)] = { PU6, 3 }, /* USB30_PWEN */
+ [RCAR_GP_PIN(6, 27)] = { PU6, 2 }, /* USB1_OVC */
+ [RCAR_GP_PIN(6, 26)] = { PU6, 1 }, /* USB1_PWEN */
+ [RCAR_GP_PIN(6, 25)] = { PU6, 0 }, /* USB0_OVC */
+};
+
+static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
+{
+ u32 reg;
+ u32 bit;
+
+ if (WARN_ON_ONCE(!pullups[pin].reg))
+ return PIN_CONFIG_BIAS_DISABLE;
+
+ reg = pullups[pin].reg;
+ bit = BIT(pullups[pin].bit);
+
+ if (sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit) {
+ if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ return PIN_CONFIG_BIAS_PULL_UP;
+ else
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ } else
+ return PIN_CONFIG_BIAS_DISABLE;
+}
+
+static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
+{
+ u32 enable, updown;
+ u32 reg;
+ u32 bit;
+
+ if (WARN_ON_ONCE(!pullups[pin].reg))
+ return;
+
+ reg = pullups[pin].reg;
+ bit = BIT(pullups[pin].bit);
+
+ enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
+ if (bias != PIN_CONFIG_BIAS_DISABLE)
+ enable |= bit;
+
+ updown = sh_pfc_read_reg(pfc, PUD + reg, 32) & ~bit;
+ if (bias == PIN_CONFIG_BIAS_PULL_UP)
+ updown |= bit;
+
+ sh_pfc_write_reg(pfc, PUD + reg, 32, updown);
+ sh_pfc_write_reg(pfc, PUEN + reg, 32, enable);
+}
+
static const struct sh_pfc_soc_operations r8a7795_pinmux_ops = {
.pin_to_pocctrl = r8a7795_pin_to_pocctrl,
+ .get_bias = r8a7795_pinmux_get_bias,
+ .set_bias = r8a7795_pinmux_set_bias,
};
const struct sh_pfc_soc_info r8a7795_pinmux_info = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
new file mode 100644
index 000000000000..dc9b671ccf2e
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -0,0 +1,2671 @@
+/*
+ * R8A7796 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ *
+ * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+ *
+ * R-Car Gen3 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+
+#include "core.h"
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, sfx) \
+ PORT_GP_16(0, fn, sfx), \
+ PORT_GP_29(1, fn, sfx), \
+ PORT_GP_15(2, fn, sfx), \
+ PORT_GP_CFG_12(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_1(3, 12, fn, sfx), \
+ PORT_GP_1(3, 13, fn, sfx), \
+ PORT_GP_1(3, 14, fn, sfx), \
+ PORT_GP_1(3, 15, fn, sfx), \
+ PORT_GP_CFG_18(4, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_26(5, fn, sfx), \
+ PORT_GP_32(6, fn, sfx), \
+ PORT_GP_4(7, fn, sfx)
+/*
+ * F_() : just information
+ * FM() : macro for FN_xxx / xxx_MARK
+ */
+
+/* GPSR0 */
+#define GPSR0_15 F_(D15, IP7_11_8)
+#define GPSR0_14 F_(D14, IP7_7_4)
+#define GPSR0_13 F_(D13, IP7_3_0)
+#define GPSR0_12 F_(D12, IP6_31_28)
+#define GPSR0_11 F_(D11, IP6_27_24)
+#define GPSR0_10 F_(D10, IP6_23_20)
+#define GPSR0_9 F_(D9, IP6_19_16)
+#define GPSR0_8 F_(D8, IP6_15_12)
+#define GPSR0_7 F_(D7, IP6_11_8)
+#define GPSR0_6 F_(D6, IP6_7_4)
+#define GPSR0_5 F_(D5, IP6_3_0)
+#define GPSR0_4 F_(D4, IP5_31_28)
+#define GPSR0_3 F_(D3, IP5_27_24)
+#define GPSR0_2 F_(D2, IP5_23_20)
+#define GPSR0_1 F_(D1, IP5_19_16)
+#define GPSR0_0 F_(D0, IP5_15_12)
+
+/* GPSR1 */
+#define GPSR1_28 FM(CLKOUT)
+#define GPSR1_27 F_(EX_WAIT0_A, IP5_11_8)
+#define GPSR1_26 F_(WE1_N, IP5_7_4)
+#define GPSR1_25 F_(WE0_N, IP5_3_0)
+#define GPSR1_24 F_(RD_WR_N, IP4_31_28)
+#define GPSR1_23 F_(RD_N, IP4_27_24)
+#define GPSR1_22 F_(BS_N, IP4_23_20)
+#define GPSR1_21 F_(CS1_N_A26, IP4_19_16)
+#define GPSR1_20 F_(CS0_N, IP4_15_12)
+#define GPSR1_19 F_(A19, IP4_11_8)
+#define GPSR1_18 F_(A18, IP4_7_4)
+#define GPSR1_17 F_(A17, IP4_3_0)
+#define GPSR1_16 F_(A16, IP3_31_28)
+#define GPSR1_15 F_(A15, IP3_27_24)
+#define GPSR1_14 F_(A14, IP3_23_20)
+#define GPSR1_13 F_(A13, IP3_19_16)
+#define GPSR1_12 F_(A12, IP3_15_12)
+#define GPSR1_11 F_(A11, IP3_11_8)
+#define GPSR1_10 F_(A10, IP3_7_4)
+#define GPSR1_9 F_(A9, IP3_3_0)
+#define GPSR1_8 F_(A8, IP2_31_28)
+#define GPSR1_7 F_(A7, IP2_27_24)
+#define GPSR1_6 F_(A6, IP2_23_20)
+#define GPSR1_5 F_(A5, IP2_19_16)
+#define GPSR1_4 F_(A4, IP2_15_12)
+#define GPSR1_3 F_(A3, IP2_11_8)
+#define GPSR1_2 F_(A2, IP2_7_4)
+#define GPSR1_1 F_(A1, IP2_3_0)
+#define GPSR1_0 F_(A0, IP1_31_28)
+
+/* GPSR2 */
+#define GPSR2_14 F_(AVB_AVTP_CAPTURE_A, IP0_23_20)
+#define GPSR2_13 F_(AVB_AVTP_MATCH_A, IP0_19_16)
+#define GPSR2_12 F_(AVB_LINK, IP0_15_12)
+#define GPSR2_11 F_(AVB_PHY_INT, IP0_11_8)
+#define GPSR2_10 F_(AVB_MAGIC, IP0_7_4)
+#define GPSR2_9 F_(AVB_MDC, IP0_3_0)
+#define GPSR2_8 F_(PWM2_A, IP1_27_24)
+#define GPSR2_7 F_(PWM1_A, IP1_23_20)
+#define GPSR2_6 F_(PWM0, IP1_19_16)
+#define GPSR2_5 F_(IRQ5, IP1_15_12)
+#define GPSR2_4 F_(IRQ4, IP1_11_8)
+#define GPSR2_3 F_(IRQ3, IP1_7_4)
+#define GPSR2_2 F_(IRQ2, IP1_3_0)
+#define GPSR2_1 F_(IRQ1, IP0_31_28)
+#define GPSR2_0 F_(IRQ0, IP0_27_24)
+
+/* GPSR3 */
+#define GPSR3_15 F_(SD1_WP, IP11_23_20)
+#define GPSR3_14 F_(SD1_CD, IP11_19_16)
+#define GPSR3_13 F_(SD0_WP, IP11_15_12)
+#define GPSR3_12 F_(SD0_CD, IP11_11_8)
+#define GPSR3_11 F_(SD1_DAT3, IP8_31_28)
+#define GPSR3_10 F_(SD1_DAT2, IP8_27_24)
+#define GPSR3_9 F_(SD1_DAT1, IP8_23_20)
+#define GPSR3_8 F_(SD1_DAT0, IP8_19_16)
+#define GPSR3_7 F_(SD1_CMD, IP8_15_12)
+#define GPSR3_6 F_(SD1_CLK, IP8_11_8)
+#define GPSR3_5 F_(SD0_DAT3, IP8_7_4)
+#define GPSR3_4 F_(SD0_DAT2, IP8_3_0)
+#define GPSR3_3 F_(SD0_DAT1, IP7_31_28)
+#define GPSR3_2 F_(SD0_DAT0, IP7_27_24)
+#define GPSR3_1 F_(SD0_CMD, IP7_23_20)
+#define GPSR3_0 F_(SD0_CLK, IP7_19_16)
+
+/* GPSR4 */
+#define GPSR4_17 F_(SD3_DS, IP11_11_8)
+#define GPSR4_16 F_(SD3_DAT7, IP10_7_4)
+#define GPSR4_15 F_(SD3_DAT6, IP10_3_0)
+#define GPSR4_14 F_(SD3_DAT5, IP9_31_28)
+#define GPSR4_13 F_(SD3_DAT4, IP9_27_24)
+#define GPSR4_12 F_(SD3_DAT3, IP10_19_16)
+#define GPSR4_11 F_(SD3_DAT2, IP10_15_12)
+#define GPSR4_10 F_(SD3_DAT1, IP10_11_8)
+#define GPSR4_9 F_(SD3_DAT0, IP10_7_4)
+#define GPSR4_8 F_(SD3_CMD, IP10_3_0)
+#define GPSR4_7 F_(SD3_CLK, IP9_31_28)
+#define GPSR4_6 F_(SD2_DS, IP9_23_20)
+#define GPSR4_5 F_(SD2_DAT3, IP9_19_16)
+#define GPSR4_4 F_(SD2_DAT2, IP9_15_12)
+#define GPSR4_3 F_(SD2_DAT1, IP9_11_8)
+#define GPSR4_2 F_(SD2_DAT0, IP9_7_4)
+#define GPSR4_1 F_(SD2_CMD, IP9_7_4)
+#define GPSR4_0 F_(SD2_CLK, IP9_3_0)
+
+/* GPSR5 */
+#define GPSR5_25 F_(MLB_DAT, IP14_19_16)
+#define GPSR5_24 F_(MLB_SIG, IP14_15_12)
+#define GPSR5_23 F_(MLB_CLK, IP14_11_8)
+#define GPSR5_22 FM(MSIOF0_RXD)
+#define GPSR5_21 F_(MSIOF0_SS2, IP14_7_4)
+#define GPSR5_20 FM(MSIOF0_TXD)
+#define GPSR5_19 F_(MSIOF0_SS1, IP14_3_0)
+#define GPSR5_18 F_(MSIOF0_SYNC, IP13_31_28)
+#define GPSR5_17 FM(MSIOF0_SCK)
+#define GPSR5_16 F_(HRTS0_N, IP13_27_24)
+#define GPSR5_15 F_(HCTS0_N, IP13_23_20)
+#define GPSR5_14 F_(HTX0, IP13_19_16)
+#define GPSR5_13 F_(HRX0, IP13_15_12)
+#define GPSR5_12 F_(HSCK0, IP13_11_8)
+#define GPSR5_11 F_(RX2_A, IP13_7_4)
+#define GPSR5_10 F_(TX2_A, IP13_3_0)
+#define GPSR5_9 F_(SCK2, IP12_31_28)
+#define GPSR5_8 F_(RTS1_N_TANS, IP12_27_24)
+#define GPSR5_7 F_(CTS1_N, IP12_23_20)
+#define GPSR5_6 F_(TX1_A, IP12_19_16)
+#define GPSR5_5 F_(RX1_A, IP12_15_12)
+#define GPSR5_4 F_(RTS0_N_TANS, IP12_11_8)
+#define GPSR5_3 F_(CTS0_N, IP12_7_4)
+#define GPSR5_2 F_(TX0, IP12_3_0)
+#define GPSR5_1 F_(RX0, IP11_31_28)
+#define GPSR5_0 F_(SCK0, IP11_27_24)
+
+/* GPSR6 */
+#define GPSR6_31 F_(GP6_31, IP18_7_4)
+#define GPSR6_30 F_(GP6_30, IP18_3_0)
+#define GPSR6_29 F_(USB30_OVC, IP17_31_28)
+#define GPSR6_28 F_(USB30_PWEN, IP17_27_24)
+#define GPSR6_27 F_(USB1_OVC, IP17_23_20)
+#define GPSR6_26 F_(USB1_PWEN, IP17_19_16)
+#define GPSR6_25 F_(USB0_OVC, IP17_15_12)
+#define GPSR6_24 F_(USB0_PWEN, IP17_11_8)
+#define GPSR6_23 F_(AUDIO_CLKB_B, IP17_7_4)
+#define GPSR6_22 F_(AUDIO_CLKA_A, IP17_3_0)
+#define GPSR6_21 F_(SSI_SDATA9_A, IP16_31_28)
+#define GPSR6_20 F_(SSI_SDATA8, IP16_27_24)
+#define GPSR6_19 F_(SSI_SDATA7, IP16_23_20)
+#define GPSR6_18 F_(SSI_WS78, IP16_19_16)
+#define GPSR6_17 F_(SSI_SCK78, IP16_15_12)
+#define GPSR6_16 F_(SSI_SDATA6, IP16_11_8)
+#define GPSR6_15 F_(SSI_WS6, IP16_7_4)
+#define GPSR6_14 F_(SSI_SCK6, IP16_3_0)
+#define GPSR6_13 FM(SSI_SDATA5)
+#define GPSR6_12 FM(SSI_WS5)
+#define GPSR6_11 FM(SSI_SCK5)
+#define GPSR6_10 F_(SSI_SDATA4, IP15_31_28)
+#define GPSR6_9 F_(SSI_WS4, IP15_27_24)
+#define GPSR6_8 F_(SSI_SCK4, IP15_23_20)
+#define GPSR6_7 F_(SSI_SDATA3, IP15_19_16)
+#define GPSR6_6 F_(SSI_WS34, IP15_15_12)
+#define GPSR6_5 F_(SSI_SCK34, IP15_11_8)
+#define GPSR6_4 F_(SSI_SDATA2_A, IP15_7_4)
+#define GPSR6_3 F_(SSI_SDATA1_A, IP15_3_0)
+#define GPSR6_2 F_(SSI_SDATA0, IP14_31_28)
+#define GPSR6_1 F_(SSI_WS0129, IP14_27_24)
+#define GPSR6_0 F_(SSI_SCK0129, IP14_23_20)
+
+/* GPSR7 */
+#define GPSR7_3 FM(GP7_03)
+#define GPSR7_2 FM(HDMI0_CEC)
+#define GPSR7_1 FM(AVS2)
+#define GPSR7_0 FM(AVS1)
+
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* A */ /* B */ /* C - F */
+#define IP0_3_0 FM(AVB_MDC) F_(0, 0) FM(MSIOF2_SS2_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_7_4 FM(AVB_MAGIC) F_(0, 0) FM(MSIOF2_SS1_C) FM(SCK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_11_8 FM(AVB_PHY_INT) F_(0, 0) FM(MSIOF2_SYNC_C) FM(RX4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_15_12 FM(AVB_LINK) F_(0, 0) FM(MSIOF2_SCK_C) FM(TX4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_19_16 FM(AVB_AVTP_MATCH_A) F_(0, 0) FM(MSIOF2_RXD_C) FM(CTS4_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_23_20 FM(AVB_AVTP_CAPTURE_A) F_(0, 0) FM(MSIOF2_TXD_C) FM(RTS4_N_TANS_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_27_24 FM(IRQ0) FM(QPOLB) F_(0, 0) FM(DU_CDE) FM(VI4_DATA0_B) FM(CAN0_TX_B) FM(CANFD0_TX_B) FM(MSIOF3_SS1_E) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_31_28 FM(IRQ1) FM(QPOLA) F_(0, 0) FM(DU_DISP) FM(VI4_DATA1_B) FM(CAN0_RX_B) FM(CANFD0_RX_B) FM(MSIOF3_SS2_E) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_3_0 FM(IRQ2) FM(QCPV_QDE) F_(0, 0) FM(DU_EXODDF_DU_ODDF_DISP_CDE) FM(VI4_DATA2_B) F_(0, 0) F_(0, 0) FM(MSIOF3_SYNC_E) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_7_4 FM(IRQ3) FM(QSTVB_QVE) FM(A25) FM(DU_DOTCLKOUT1) FM(VI4_DATA3_B) F_(0, 0) F_(0, 0) FM(MSIOF3_SCK_E) F_(0, 0) FM(PWM4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_11_8 FM(IRQ4) FM(QSTH_QHS) FM(A24) FM(DU_EXHSYNC_DU_HSYNC) FM(VI4_DATA4_B) F_(0, 0) F_(0, 0) FM(MSIOF3_RXD_E) F_(0, 0) FM(PWM5_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_15_12 FM(IRQ5) FM(QSTB_QHE) FM(A23) FM(DU_EXVSYNC_DU_VSYNC) FM(VI4_DATA5_B) F_(0, 0) F_(0, 0) FM(MSIOF3_TXD_E) F_(0, 0) FM(PWM6_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_19_16 FM(PWM0) FM(AVB_AVTP_PPS)FM(A22) F_(0, 0) FM(VI4_DATA6_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(IECLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_23_20 FM(PWM1_A) F_(0, 0) FM(A21) FM(HRX3_D) FM(VI4_DATA7_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(IERX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_27_24 FM(PWM2_A) F_(0, 0) FM(A20) FM(HTX3_D) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(IETX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_31_28 FM(A0) FM(LCDOUT16) FM(MSIOF3_SYNC_B) F_(0, 0) FM(VI4_DATA8) F_(0, 0) FM(DU_DB0) F_(0, 0) F_(0, 0) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_3_0 FM(A1) FM(LCDOUT17) FM(MSIOF3_TXD_B) F_(0, 0) FM(VI4_DATA9) F_(0, 0) FM(DU_DB1) F_(0, 0) F_(0, 0) FM(PWM4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_7_4 FM(A2) FM(LCDOUT18) FM(MSIOF3_SCK_B) F_(0, 0) FM(VI4_DATA10) F_(0, 0) FM(DU_DB2) F_(0, 0) F_(0, 0) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_11_8 FM(A3) FM(LCDOUT19) FM(MSIOF3_RXD_B) F_(0, 0) FM(VI4_DATA11) F_(0, 0) FM(DU_DB3) F_(0, 0) F_(0, 0) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_15_12 FM(A4) FM(LCDOUT20) FM(MSIOF3_SS1_B) F_(0, 0) FM(VI4_DATA12) FM(VI5_DATA12) FM(DU_DB4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_19_16 FM(A5) FM(LCDOUT21) FM(MSIOF3_SS2_B) FM(SCK4_B) FM(VI4_DATA13) FM(VI5_DATA13) FM(DU_DB5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_23_20 FM(A6) FM(LCDOUT22) FM(MSIOF2_SS1_A) FM(RX4_B) FM(VI4_DATA14) FM(VI5_DATA14) FM(DU_DB6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24 FM(A7) FM(LCDOUT23) FM(MSIOF2_SS2_A) FM(TX4_B) FM(VI4_DATA15) FM(VI5_DATA15) FM(DU_DB7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_31_28 FM(A8) FM(RX3_B) FM(MSIOF2_SYNC_A) FM(HRX4_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(SDA6_A) FM(AVB_AVTP_MATCH_B) FM(PWM1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_3_0 FM(A9) F_(0, 0) FM(MSIOF2_SCK_A) FM(CTS4_N_B) F_(0, 0) FM(VI5_VSYNC_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_7_4 FM(A10) F_(0, 0) FM(MSIOF2_RXD_A) FM(RTS4_N_TANS_B) F_(0, 0) FM(VI5_HSYNC_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_11_8 FM(A11) FM(TX3_B) FM(MSIOF2_TXD_A) FM(HTX4_B) FM(HSCK4) FM(VI5_FIELD) F_(0, 0) FM(SCL6_A) FM(AVB_AVTP_CAPTURE_B) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* A */ /* B */ /* C - F */
+#define IP3_15_12 FM(A12) FM(LCDOUT12) FM(MSIOF3_SCK_C) F_(0, 0) FM(HRX4_A) FM(VI5_DATA8) FM(DU_DG4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_19_16 FM(A13) FM(LCDOUT13) FM(MSIOF3_SYNC_C) F_(0, 0) FM(HTX4_A) FM(VI5_DATA9) FM(DU_DG5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_23_20 FM(A14) FM(LCDOUT14) FM(MSIOF3_RXD_C) F_(0, 0) FM(HCTS4_N) FM(VI5_DATA10) FM(DU_DG6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_27_24 FM(A15) FM(LCDOUT15) FM(MSIOF3_TXD_C) F_(0, 0) FM(HRTS4_N) FM(VI5_DATA11) FM(DU_DG7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_31_28 FM(A16) FM(LCDOUT8) F_(0, 0) F_(0, 0) FM(VI4_FIELD) F_(0, 0) FM(DU_DG0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_3_0 FM(A17) FM(LCDOUT9) F_(0, 0) F_(0, 0) FM(VI4_VSYNC_N) F_(0, 0) FM(DU_DG1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_7_4 FM(A18) FM(LCDOUT10) F_(0, 0) F_(0, 0) FM(VI4_HSYNC_N) F_(0, 0) FM(DU_DG2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_11_8 FM(A19) FM(LCDOUT11) F_(0, 0) F_(0, 0) FM(VI4_CLKENB) F_(0, 0) FM(DU_DG3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_15_12 FM(CS0_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(VI5_CLKENB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_19_16 FM(CS1_N_A26) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(VI5_CLK) F_(0, 0) FM(EX_WAIT0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_23_20 FM(BS_N) FM(QSTVA_QVS) FM(MSIOF3_SCK_D) FM(SCK3) FM(HSCK3) F_(0, 0) F_(0, 0) F_(0, 0) FM(CAN1_TX) FM(CANFD1_TX) FM(IETX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_27_24 FM(RD_N) F_(0, 0) FM(MSIOF3_SYNC_D) FM(RX3_A) FM(HRX3_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(CAN0_TX_A) FM(CANFD0_TX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_31_28 FM(RD_WR_N) F_(0, 0) FM(MSIOF3_RXD_D) FM(TX3_A) FM(HTX3_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(CAN0_RX_A) FM(CANFD0_RX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_3_0 FM(WE0_N) F_(0, 0) FM(MSIOF3_TXD_D) FM(CTS3_N) FM(HCTS3_N) F_(0, 0) F_(0, 0) FM(SCL6_B) FM(CAN_CLK) F_(0, 0) FM(IECLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_7_4 FM(WE1_N) F_(0, 0) FM(MSIOF3_SS1_D) FM(RTS3_N_TANS) FM(HRTS3_N) F_(0, 0) F_(0, 0) FM(SDA6_B) FM(CAN1_RX) FM(CANFD1_RX) FM(IERX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_11_8 FM(EX_WAIT0_A) FM(QCLK) F_(0, 0) F_(0, 0) FM(VI4_CLK) F_(0, 0) FM(DU_DOTCLKOUT0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_15_12 FM(D0) FM(MSIOF2_SS1_B)FM(MSIOF3_SCK_A) F_(0, 0) FM(VI4_DATA16) FM(VI5_DATA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_19_16 FM(D1) FM(MSIOF2_SS2_B)FM(MSIOF3_SYNC_A) F_(0, 0) FM(VI4_DATA17) FM(VI5_DATA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_23_20 FM(D2) F_(0, 0) FM(MSIOF3_RXD_A) F_(0, 0) FM(VI4_DATA18) FM(VI5_DATA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_27_24 FM(D3) F_(0, 0) FM(MSIOF3_TXD_A) F_(0, 0) FM(VI4_DATA19) FM(VI5_DATA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_31_28 FM(D4) FM(MSIOF2_SCK_B)F_(0, 0) F_(0, 0) FM(VI4_DATA20) FM(VI5_DATA4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_3_0 FM(D5) FM(MSIOF2_SYNC_B)F_(0, 0) F_(0, 0) FM(VI4_DATA21) FM(VI5_DATA5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_7_4 FM(D6) FM(MSIOF2_RXD_B)F_(0, 0) F_(0, 0) FM(VI4_DATA22) FM(VI5_DATA6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_11_8 FM(D7) FM(MSIOF2_TXD_B)F_(0, 0) F_(0, 0) FM(VI4_DATA23) FM(VI5_DATA7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_15_12 FM(D8) FM(LCDOUT0) FM(MSIOF2_SCK_D) FM(SCK4_C) FM(VI4_DATA0_A) F_(0, 0) FM(DU_DR0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_19_16 FM(D9) FM(LCDOUT1) FM(MSIOF2_SYNC_D) F_(0, 0) FM(VI4_DATA1_A) F_(0, 0) FM(DU_DR1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_23_20 FM(D10) FM(LCDOUT2) FM(MSIOF2_RXD_D) FM(HRX3_B) FM(VI4_DATA2_A) FM(CTS4_N_C) FM(DU_DR2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_27_24 FM(D11) FM(LCDOUT3) FM(MSIOF2_TXD_D) FM(HTX3_B) FM(VI4_DATA3_A) FM(RTS4_N_TANS_C)FM(DU_DR3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_31_28 FM(D12) FM(LCDOUT4) FM(MSIOF2_SS1_D) FM(RX4_C) FM(VI4_DATA4_A) F_(0, 0) FM(DU_DR4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* A */ /* B */ /* C - F */
+#define IP7_3_0 FM(D13) FM(LCDOUT5) FM(MSIOF2_SS2_D) FM(TX4_C) FM(VI4_DATA5_A) F_(0, 0) FM(DU_DR5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_7_4 FM(D14) FM(LCDOUT6) FM(MSIOF3_SS1_A) FM(HRX3_C) FM(VI4_DATA6_A) F_(0, 0) FM(DU_DR6) FM(SCL6_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_11_8 FM(D15) FM(LCDOUT7) FM(MSIOF3_SS2_A) FM(HTX3_C) FM(VI4_DATA7_A) F_(0, 0) FM(DU_DR7) FM(SDA6_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_15_12 FM(FSCLKST) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_19_16 FM(SD0_CLK) F_(0, 0) FM(MSIOF1_SCK_E) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_OPWM_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_23_20 FM(SD0_CMD) F_(0, 0) FM(MSIOF1_SYNC_E) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_27_24 FM(SD0_DAT0) F_(0, 0) FM(MSIOF1_RXD_E) F_(0, 0) F_(0, 0) FM(TS_SCK0_B) FM(STP_ISCLK_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_31_28 FM(SD0_DAT1) F_(0, 0) FM(MSIOF1_TXD_E) F_(0, 0) F_(0, 0) FM(TS_SPSYNC0_B)FM(STP_ISSYNC_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_3_0 FM(SD0_DAT2) F_(0, 0) FM(MSIOF1_SS1_E) F_(0, 0) F_(0, 0) FM(TS_SDAT0_B) FM(STP_ISD_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_7_4 FM(SD0_DAT3) F_(0, 0) FM(MSIOF1_SS2_E) F_(0, 0) F_(0, 0) FM(TS_SDEN0_B) FM(STP_ISEN_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_11_8 FM(SD1_CLK) F_(0, 0) FM(MSIOF1_SCK_G) F_(0, 0) F_(0, 0) FM(SIM0_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_15_12 FM(SD1_CMD) F_(0, 0) FM(MSIOF1_SYNC_G) FM(NFCE_N_B) F_(0, 0) FM(SIM0_D_A) FM(STP_IVCXO27_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_19_16 FM(SD1_DAT0) FM(SD2_DAT4) FM(MSIOF1_RXD_G) FM(NFWP_N_B) F_(0, 0) FM(TS_SCK1_B) FM(STP_ISCLK_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_23_20 FM(SD1_DAT1) FM(SD2_DAT5) FM(MSIOF1_TXD_G) FM(NFDATA14_B) F_(0, 0) FM(TS_SPSYNC1_B)FM(STP_ISSYNC_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_27_24 FM(SD1_DAT2) FM(SD2_DAT6) FM(MSIOF1_SS1_G) FM(NFDATA15_B) F_(0, 0) FM(TS_SDAT1_B) FM(STP_ISD_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_31_28 FM(SD1_DAT3) FM(SD2_DAT7) FM(MSIOF1_SS2_G) FM(NFRB_N_B) F_(0, 0) FM(TS_SDEN1_B) FM(STP_ISEN_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_3_0 FM(SD2_CLK) F_(0, 0) F_(0, 0) FM(NFDATA8) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_7_4 FM(SD2_CMD) F_(0, 0) F_(0, 0) FM(NFDATA9) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_11_8 FM(SD2_DAT0) F_(0, 0) F_(0, 0) FM(NFDATA10) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_15_12 FM(SD2_DAT1) F_(0, 0) F_(0, 0) FM(NFDATA11) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_19_16 FM(SD2_DAT2) F_(0, 0) F_(0, 0) FM(NFDATA12) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_23_20 FM(SD2_DAT3) F_(0, 0) F_(0, 0) FM(NFDATA13) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_27_24 FM(SD2_DS) F_(0, 0) F_(0, 0) FM(NFALE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(SATA_DEVSLP_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_31_28 FM(SD3_CLK) F_(0, 0) F_(0, 0) FM(NFWE_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_3_0 FM(SD3_CMD) F_(0, 0) F_(0, 0) FM(NFRE_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_7_4 FM(SD3_DAT0) F_(0, 0) F_(0, 0) FM(NFDATA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_11_8 FM(SD3_DAT1) F_(0, 0) F_(0, 0) FM(NFDATA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_15_12 FM(SD3_DAT2) F_(0, 0) F_(0, 0) FM(NFDATA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_19_16 FM(SD3_DAT3) F_(0, 0) F_(0, 0) FM(NFDATA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_23_20 FM(SD3_DAT4) FM(SD2_CD_A) F_(0, 0) FM(NFDATA4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_27_24 FM(SD3_DAT5) FM(SD2_WP_A) F_(0, 0) FM(NFDATA5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_31_28 FM(SD3_DAT6) FM(SD3_CD) F_(0, 0) FM(NFDATA6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_3_0 FM(SD3_DAT7) FM(SD3_WP) F_(0, 0) FM(NFDATA7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_7_4 FM(SD3_DS) F_(0, 0) F_(0, 0) FM(NFCLE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_11_8 FM(SD0_CD) F_(0, 0) FM(NFDATA14_A) F_(0, 0) FM(SCL2_B) FM(SIM0_RST_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* A */ /* B */ /* C - F */
+#define IP11_15_12 FM(SD0_WP) F_(0, 0) FM(NFDATA15_A) F_(0, 0) FM(SDA2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_19_16 FM(SD1_CD) F_(0, 0) FM(NFRB_N_A) F_(0, 0) F_(0, 0) FM(SIM0_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_23_20 FM(SD1_WP) F_(0, 0) FM(NFCE_N_A) F_(0, 0) F_(0, 0) FM(SIM0_D_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_27_24 FM(SCK0) FM(HSCK1_B) FM(MSIOF1_SS2_B) FM(AUDIO_CLKC_B) FM(SDA2_A) FM(SIM0_RST_B) FM(STP_OPWM_0_C) FM(RIF0_CLK_B) F_(0, 0) FM(ADICHS2) FM(SCK5_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_31_28 FM(RX0) FM(HRX1_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SCK0_C) FM(STP_ISCLK_0_C) FM(RIF0_D0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_3_0 FM(TX0) FM(HTX1_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SPSYNC0_C)FM(STP_ISSYNC_0_C) FM(RIF0_D1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_7_4 FM(CTS0_N) FM(HCTS1_N_B) FM(MSIOF1_SYNC_B) F_(0, 0) F_(0, 0) FM(TS_SPSYNC1_C)FM(STP_ISSYNC_1_C) FM(RIF1_SYNC_B) FM(AUDIO_CLKOUT_C) FM(ADICS_SAMP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_11_8 FM(RTS0_N_TANS) FM(HRTS1_N_B) FM(MSIOF1_SS1_B) FM(AUDIO_CLKA_B) FM(SCL2_A) F_(0, 0) FM(STP_IVCXO27_1_C) FM(RIF0_SYNC_B) FM(FSO_TOE_A) FM(ADICHS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_15_12 FM(RX1_A) FM(HRX1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDAT0_C) FM(STP_ISD_0_C) FM(RIF1_CLK_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_19_16 FM(TX1_A) FM(HTX1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDEN0_C) FM(STP_ISEN_0_C) FM(RIF1_D0_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_23_20 FM(CTS1_N) FM(HCTS1_N_A) FM(MSIOF1_RXD_B) F_(0, 0) F_(0, 0) FM(TS_SDEN1_C) FM(STP_ISEN_1_C) FM(RIF1_D0_B) F_(0, 0) FM(ADIDATA) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_27_24 FM(RTS1_N_TANS) FM(HRTS1_N_A) FM(MSIOF1_TXD_B) F_(0, 0) F_(0, 0) FM(TS_SDAT1_C) FM(STP_ISD_1_C) FM(RIF1_D1_B) F_(0, 0) FM(ADICHS0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_31_28 FM(SCK2) FM(SCIF_CLK_B) FM(MSIOF1_SCK_B) F_(0, 0) F_(0, 0) FM(TS_SCK1_C) FM(STP_ISCLK_1_C) FM(RIF1_CLK_B) F_(0, 0) FM(ADICLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_3_0 FM(TX2_A) F_(0, 0) F_(0, 0) FM(SD2_CD_B) FM(SCL1_A) F_(0, 0) FM(FMCLK_A) FM(RIF1_D1_C) F_(0, 0) FM(FSO_CFE_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_7_4 FM(RX2_A) F_(0, 0) F_(0, 0) FM(SD2_WP_B) FM(SDA1_A) F_(0, 0) FM(FMIN_A) FM(RIF1_SYNC_C) F_(0, 0) FM(FSO_CFE_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_11_8 FM(HSCK0) F_(0, 0) FM(MSIOF1_SCK_D) FM(AUDIO_CLKB_A) FM(SSI_SDATA1_B)FM(TS_SCK0_D) FM(STP_ISCLK_0_D) FM(RIF0_CLK_C) F_(0, 0) F_(0, 0) FM(RX5_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_15_12 FM(HRX0) F_(0, 0) FM(MSIOF1_RXD_D) F_(0, 0) FM(SSI_SDATA2_B)FM(TS_SDEN0_D) FM(STP_ISEN_0_D) FM(RIF0_D0_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_19_16 FM(HTX0) F_(0, 0) FM(MSIOF1_TXD_D) F_(0, 0) FM(SSI_SDATA9_B)FM(TS_SDAT0_D) FM(STP_ISD_0_D) FM(RIF0_D1_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_23_20 FM(HCTS0_N) FM(RX2_B) FM(MSIOF1_SYNC_D) F_(0, 0) FM(SSI_SCK9_A) FM(TS_SPSYNC0_D)FM(STP_ISSYNC_0_D) FM(RIF0_SYNC_C) FM(AUDIO_CLKOUT1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_27_24 FM(HRTS0_N) FM(TX2_B) FM(MSIOF1_SS1_D) F_(0, 0) FM(SSI_WS9_A) F_(0, 0) FM(STP_IVCXO27_0_D) FM(BPFCLK_A) FM(AUDIO_CLKOUT2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_31_28 FM(MSIOF0_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(AUDIO_CLKOUT_A) F_(0, 0) FM(TX5_B) F_(0, 0) F_(0, 0) FM(BPFCLK_D) F_(0, 0) F_(0, 0)
+#define IP14_3_0 FM(MSIOF0_SS1) FM(RX5_A) FM(NFWP_N_A) FM(AUDIO_CLKA_C) FM(SSI_SCK2_A) F_(0, 0) FM(STP_IVCXO27_0_C) F_(0, 0) FM(AUDIO_CLKOUT3_A) F_(0, 0) FM(TCLK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_7_4 FM(MSIOF0_SS2) FM(TX5_A) FM(MSIOF1_SS2_D) FM(AUDIO_CLKC_A) FM(SSI_WS2_A) F_(0, 0) FM(STP_OPWM_0_D) F_(0, 0) FM(AUDIO_CLKOUT_D) F_(0, 0) FM(SPEEDIN_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_11_8 FM(MLB_CLK) F_(0, 0) FM(MSIOF1_SCK_F) F_(0, 0) FM(SCL1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_15_12 FM(MLB_SIG) FM(RX1_B) FM(MSIOF1_SYNC_F) F_(0, 0) FM(SDA1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_19_16 FM(MLB_DAT) FM(TX1_B) FM(MSIOF1_RXD_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_23_20 FM(SSI_SCK0129) F_(0, 0) FM(MSIOF1_TXD_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_27_24 FM(SSI_WS0129) F_(0, 0) FM(MSIOF1_SS1_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* A */ /* B */ /* C - F */
+#define IP14_31_28 FM(SSI_SDATA0) F_(0, 0) FM(MSIOF1_SS2_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_3_0 FM(SSI_SDATA1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_7_4 FM(SSI_SDATA2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(SSI_SCK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_11_8 FM(SSI_SCK34) F_(0, 0) FM(MSIOF1_SS1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_OPWM_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_15_12 FM(SSI_WS34) FM(HCTS2_N_A) FM(MSIOF1_SS2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_19_16 FM(SSI_SDATA3) FM(HRTS2_N_A) FM(MSIOF1_TXD_A) F_(0, 0) F_(0, 0) FM(TS_SCK0_A) FM(STP_ISCLK_0_A) FM(RIF0_D1_A) FM(RIF2_D0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_23_20 FM(SSI_SCK4) FM(HRX2_A) FM(MSIOF1_SCK_A) F_(0, 0) F_(0, 0) FM(TS_SDAT0_A) FM(STP_ISD_0_A) FM(RIF0_CLK_A) FM(RIF2_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_27_24 FM(SSI_WS4) FM(HTX2_A) FM(MSIOF1_SYNC_A) F_(0, 0) F_(0, 0) FM(TS_SDEN0_A) FM(STP_ISEN_0_A) FM(RIF0_SYNC_A) FM(RIF2_SYNC_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_31_28 FM(SSI_SDATA4) FM(HSCK2_A) FM(MSIOF1_RXD_A) F_(0, 0) F_(0, 0) FM(TS_SPSYNC0_A)FM(STP_ISSYNC_0_A) FM(RIF0_D0_A) FM(RIF2_D1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_3_0 FM(SSI_SCK6) F_(0, 0) F_(0, 0) FM(SIM0_RST_D) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_7_4 FM(SSI_WS6) F_(0, 0) F_(0, 0) FM(SIM0_D_D) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_11_8 FM(SSI_SDATA6) F_(0, 0) F_(0, 0) FM(SIM0_CLK_D) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_15_12 FM(SSI_SCK78) FM(HRX2_B) FM(MSIOF1_SCK_C) F_(0, 0) F_(0, 0) FM(TS_SCK1_A) FM(STP_ISCLK_1_A) FM(RIF1_CLK_A) FM(RIF3_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_19_16 FM(SSI_WS78) FM(HTX2_B) FM(MSIOF1_SYNC_C) F_(0, 0) F_(0, 0) FM(TS_SDAT1_A) FM(STP_ISD_1_A) FM(RIF1_SYNC_A) FM(RIF3_SYNC_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_23_20 FM(SSI_SDATA7) FM(HCTS2_N_B) FM(MSIOF1_RXD_C) F_(0, 0) F_(0, 0) FM(TS_SDEN1_A) FM(STP_ISEN_1_A) FM(RIF1_D0_A) FM(RIF3_D0_A) F_(0, 0) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_27_24 FM(SSI_SDATA8) FM(HRTS2_N_B) FM(MSIOF1_TXD_C) F_(0, 0) F_(0, 0) FM(TS_SPSYNC1_A)FM(STP_ISSYNC_1_A) FM(RIF1_D1_A) FM(RIF3_D1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP16_31_28 FM(SSI_SDATA9_A) FM(HSCK2_B) FM(MSIOF1_SS1_C) FM(HSCK1_A) FM(SSI_WS1_B) FM(SCK1) FM(STP_IVCXO27_1_A) FM(SCK5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_3_0 FM(AUDIO_CLKA_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_7_4 FM(AUDIO_CLKB_B) FM(SCIF_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_1_D) FM(REMOCON_A) F_(0, 0) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP17_11_8 FM(USB0_PWEN) F_(0, 0) F_(0, 0) FM(SIM0_RST_C) F_(0, 0) FM(TS_SCK1_D) FM(STP_ISCLK_1_D) FM(BPFCLK_B) FM(RIF3_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(HSCK2_C) F_(0, 0) F_(0, 0)
+#define IP17_15_12 FM(USB0_OVC) F_(0, 0) F_(0, 0) FM(SIM0_D_C) F_(0, 0) FM(TS_SDAT1_D) FM(STP_ISD_1_D) F_(0, 0) FM(RIF3_SYNC_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(HRX2_C) F_(0, 0) F_(0, 0)
+#define IP17_19_16 FM(USB1_PWEN) F_(0, 0) F_(0, 0) FM(SIM0_CLK_C) FM(SSI_SCK1_A) FM(TS_SCK0_E) FM(STP_ISCLK_0_E) FM(FMCLK_B) FM(RIF2_CLK_B) F_(0, 0) FM(SPEEDIN_A) F_(0, 0) F_(0, 0) FM(HTX2_C) F_(0, 0) F_(0, 0)
+#define IP17_23_20 FM(USB1_OVC) F_(0, 0) FM(MSIOF1_SS2_C) F_(0, 0) FM(SSI_WS1_A) FM(TS_SDAT0_E) FM(STP_ISD_0_E) FM(FMIN_B) FM(RIF2_SYNC_B) F_(0, 0) FM(REMOCON_B) F_(0, 0) F_(0, 0) FM(HCTS2_N_C) F_(0, 0) F_(0, 0)
+#define IP17_27_24 FM(USB30_PWEN) F_(0, 0) F_(0, 0) FM(AUDIO_CLKOUT_B) FM(SSI_SCK2_B) FM(TS_SDEN1_D) FM(STP_ISEN_1_D) FM(STP_OPWM_0_E)FM(RIF3_D0_B) F_(0, 0) FM(TCLK2_B) FM(TPU0TO0) FM(BPFCLK_C) FM(HRTS2_N_C) F_(0, 0) F_(0, 0)
+#define IP17_31_28 FM(USB30_OVC) F_(0, 0) F_(0, 0) FM(AUDIO_CLKOUT1_B) FM(SSI_WS2_B) FM(TS_SPSYNC1_D)FM(STP_ISSYNC_1_D) FM(STP_IVCXO27_0_E)FM(RIF3_D1_B) F_(0, 0) FM(FSO_TOE_B) FM(TPU0TO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP18_3_0 FM(GP6_30) F_(0, 0) F_(0, 0) FM(AUDIO_CLKOUT2_B) FM(SSI_SCK9_B) FM(TS_SDEN0_E) FM(STP_ISEN_0_E) F_(0, 0) FM(RIF2_D0_B) F_(0, 0) FM(FSO_CFE_0_A) FM(TPU0TO2) F_(0, 0) FM(FMCLK_C) FM(FMCLK_D) F_(0, 0)
+#define IP18_7_4 FM(GP6_31) F_(0, 0) F_(0, 0) FM(AUDIO_CLKOUT3_B) FM(SSI_WS9_B) FM(TS_SPSYNC0_E)FM(STP_ISSYNC_0_E) F_(0, 0) FM(RIF2_D1_B) F_(0, 0) FM(FSO_CFE_1_A) FM(TPU0TO3) F_(0, 0) FM(FMIN_C) FM(FMIN_D) F_(0, 0)
+
+#define PINMUX_GPSR \
+\
+ GPSR6_31 \
+ GPSR6_30 \
+ GPSR6_29 \
+ GPSR1_28 GPSR6_28 \
+ GPSR1_27 GPSR6_27 \
+ GPSR1_26 GPSR6_26 \
+ GPSR1_25 GPSR5_25 GPSR6_25 \
+ GPSR1_24 GPSR5_24 GPSR6_24 \
+ GPSR1_23 GPSR5_23 GPSR6_23 \
+ GPSR1_22 GPSR5_22 GPSR6_22 \
+ GPSR1_21 GPSR5_21 GPSR6_21 \
+ GPSR1_20 GPSR5_20 GPSR6_20 \
+ GPSR1_19 GPSR5_19 GPSR6_19 \
+ GPSR1_18 GPSR5_18 GPSR6_18 \
+ GPSR1_17 GPSR4_17 GPSR5_17 GPSR6_17 \
+ GPSR1_16 GPSR4_16 GPSR5_16 GPSR6_16 \
+GPSR0_15 GPSR1_15 GPSR3_15 GPSR4_15 GPSR5_15 GPSR6_15 \
+GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR4_14 GPSR5_14 GPSR6_14 \
+GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR4_13 GPSR5_13 GPSR6_13 \
+GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR4_12 GPSR5_12 GPSR6_12 \
+GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR4_11 GPSR5_11 GPSR6_11 \
+GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 \
+GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 \
+GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 \
+GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 \
+GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 \
+GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 \
+GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 \
+GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 GPSR7_3 \
+GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 GPSR7_2 \
+GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 GPSR7_1 \
+GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0 GPSR7_0
+
+#define PINMUX_IPSR \
+\
+FM(IP0_3_0) IP0_3_0 FM(IP1_3_0) IP1_3_0 FM(IP2_3_0) IP2_3_0 FM(IP3_3_0) IP3_3_0 \
+FM(IP0_7_4) IP0_7_4 FM(IP1_7_4) IP1_7_4 FM(IP2_7_4) IP2_7_4 FM(IP3_7_4) IP3_7_4 \
+FM(IP0_11_8) IP0_11_8 FM(IP1_11_8) IP1_11_8 FM(IP2_11_8) IP2_11_8 FM(IP3_11_8) IP3_11_8 \
+FM(IP0_15_12) IP0_15_12 FM(IP1_15_12) IP1_15_12 FM(IP2_15_12) IP2_15_12 FM(IP3_15_12) IP3_15_12 \
+FM(IP0_19_16) IP0_19_16 FM(IP1_19_16) IP1_19_16 FM(IP2_19_16) IP2_19_16 FM(IP3_19_16) IP3_19_16 \
+FM(IP0_23_20) IP0_23_20 FM(IP1_23_20) IP1_23_20 FM(IP2_23_20) IP2_23_20 FM(IP3_23_20) IP3_23_20 \
+FM(IP0_27_24) IP0_27_24 FM(IP1_27_24) IP1_27_24 FM(IP2_27_24) IP2_27_24 FM(IP3_27_24) IP3_27_24 \
+FM(IP0_31_28) IP0_31_28 FM(IP1_31_28) IP1_31_28 FM(IP2_31_28) IP2_31_28 FM(IP3_31_28) IP3_31_28 \
+\
+FM(IP4_3_0) IP4_3_0 FM(IP5_3_0) IP5_3_0 FM(IP6_3_0) IP6_3_0 FM(IP7_3_0) IP7_3_0 \
+FM(IP4_7_4) IP4_7_4 FM(IP5_7_4) IP5_7_4 FM(IP6_7_4) IP6_7_4 FM(IP7_7_4) IP7_7_4 \
+FM(IP4_11_8) IP4_11_8 FM(IP5_11_8) IP5_11_8 FM(IP6_11_8) IP6_11_8 FM(IP7_11_8) IP7_11_8 \
+FM(IP4_15_12) IP4_15_12 FM(IP5_15_12) IP5_15_12 FM(IP6_15_12) IP6_15_12 FM(IP7_15_12) IP7_15_12 \
+FM(IP4_19_16) IP4_19_16 FM(IP5_19_16) IP5_19_16 FM(IP6_19_16) IP6_19_16 FM(IP7_19_16) IP7_19_16 \
+FM(IP4_23_20) IP4_23_20 FM(IP5_23_20) IP5_23_20 FM(IP6_23_20) IP6_23_20 FM(IP7_23_20) IP7_23_20 \
+FM(IP4_27_24) IP4_27_24 FM(IP5_27_24) IP5_27_24 FM(IP6_27_24) IP6_27_24 FM(IP7_27_24) IP7_27_24 \
+FM(IP4_31_28) IP4_31_28 FM(IP5_31_28) IP5_31_28 FM(IP6_31_28) IP6_31_28 FM(IP7_31_28) IP7_31_28 \
+\
+FM(IP8_3_0) IP8_3_0 FM(IP9_3_0) IP9_3_0 FM(IP10_3_0) IP10_3_0 FM(IP11_3_0) IP11_3_0 \
+FM(IP8_7_4) IP8_7_4 FM(IP9_7_4) IP9_7_4 FM(IP10_7_4) IP10_7_4 FM(IP11_7_4) IP11_7_4 \
+FM(IP8_11_8) IP8_11_8 FM(IP9_11_8) IP9_11_8 FM(IP10_11_8) IP10_11_8 FM(IP11_11_8) IP11_11_8 \
+FM(IP8_15_12) IP8_15_12 FM(IP9_15_12) IP9_15_12 FM(IP10_15_12) IP10_15_12 FM(IP11_15_12) IP11_15_12 \
+FM(IP8_19_16) IP8_19_16 FM(IP9_19_16) IP9_19_16 FM(IP10_19_16) IP10_19_16 FM(IP11_19_16) IP11_19_16 \
+FM(IP8_23_20) IP8_23_20 FM(IP9_23_20) IP9_23_20 FM(IP10_23_20) IP10_23_20 FM(IP11_23_20) IP11_23_20 \
+FM(IP8_27_24) IP8_27_24 FM(IP9_27_24) IP9_27_24 FM(IP10_27_24) IP10_27_24 FM(IP11_27_24) IP11_27_24 \
+FM(IP8_31_28) IP8_31_28 FM(IP9_31_28) IP9_31_28 FM(IP10_31_28) IP10_31_28 FM(IP11_31_28) IP11_31_28 \
+\
+FM(IP12_3_0) IP12_3_0 FM(IP13_3_0) IP13_3_0 FM(IP14_3_0) IP14_3_0 FM(IP15_3_0) IP15_3_0 \
+FM(IP12_7_4) IP12_7_4 FM(IP13_7_4) IP13_7_4 FM(IP14_7_4) IP14_7_4 FM(IP15_7_4) IP15_7_4 \
+FM(IP12_11_8) IP12_11_8 FM(IP13_11_8) IP13_11_8 FM(IP14_11_8) IP14_11_8 FM(IP15_11_8) IP15_11_8 \
+FM(IP12_15_12) IP12_15_12 FM(IP13_15_12) IP13_15_12 FM(IP14_15_12) IP14_15_12 FM(IP15_15_12) IP15_15_12 \
+FM(IP12_19_16) IP12_19_16 FM(IP13_19_16) IP13_19_16 FM(IP14_19_16) IP14_19_16 FM(IP15_19_16) IP15_19_16 \
+FM(IP12_23_20) IP12_23_20 FM(IP13_23_20) IP13_23_20 FM(IP14_23_20) IP14_23_20 FM(IP15_23_20) IP15_23_20 \
+FM(IP12_27_24) IP12_27_24 FM(IP13_27_24) IP13_27_24 FM(IP14_27_24) IP14_27_24 FM(IP15_27_24) IP15_27_24 \
+FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM(IP15_31_28) IP15_31_28 \
+\
+FM(IP16_3_0) IP16_3_0 FM(IP17_3_0) IP17_3_0 FM(IP18_3_0) IP18_3_0 \
+FM(IP16_7_4) IP16_7_4 FM(IP17_7_4) IP17_7_4 FM(IP18_7_4) IP18_7_4 \
+FM(IP16_11_8) IP16_11_8 FM(IP17_11_8) IP17_11_8 \
+FM(IP16_15_12) IP16_15_12 FM(IP17_15_12) IP17_15_12 \
+FM(IP16_19_16) IP16_19_16 FM(IP17_19_16) IP17_19_16 \
+FM(IP16_23_20) IP16_23_20 FM(IP17_23_20) IP17_23_20 \
+FM(IP16_27_24) IP16_27_24 FM(IP17_27_24) IP17_27_24 \
+FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
+
+/* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+#define MOD_SEL0_31_30_29 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1) FM(SEL_MSIOF3_2) FM(SEL_MSIOF3_3) FM(SEL_MSIOF3_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL0_28_27 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1) FM(SEL_MSIOF2_2) FM(SEL_MSIOF2_3)
+#define MOD_SEL0_26_25_24 FM(SEL_MSIOF1_0) FM(SEL_MSIOF1_1) FM(SEL_MSIOF1_2) FM(SEL_MSIOF1_3) FM(SEL_MSIOF1_4) FM(SEL_MSIOF1_5) FM(SEL_MSIOF1_6) F_(0, 0)
+#define MOD_SEL0_23 FM(SEL_LBSC_0) FM(SEL_LBSC_1)
+#define MOD_SEL0_22 FM(SEL_IEBUS_0) FM(SEL_IEBUS_1)
+#define MOD_SEL0_21 FM(SEL_I2C2_0) FM(SEL_I2C2_1)
+#define MOD_SEL0_20 FM(SEL_I2C1_0) FM(SEL_I2C1_1)
+#define MOD_SEL0_19 FM(SEL_HSCIF4_0) FM(SEL_HSCIF4_1)
+#define MOD_SEL0_18_17 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1) FM(SEL_HSCIF3_2) FM(SEL_HSCIF3_3)
+#define MOD_SEL0_16 FM(SEL_HSCIF1_0) FM(SEL_HSCIF1_1)
+#define MOD_SEL0_15 FM(SEL_FSO_0) FM(SEL_FSO_1)
+#define MOD_SEL0_14_13 FM(SEL_HSCIF2_0) FM(SEL_HSCIF2_1) FM(SEL_HSCIF2_2) F_(0, 0)
+#define MOD_SEL0_12 FM(SEL_ETHERAVB_0) FM(SEL_ETHERAVB_1)
+#define MOD_SEL0_11 FM(SEL_DRIF3_0) FM(SEL_DRIF3_1)
+#define MOD_SEL0_10 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
+#define MOD_SEL0_9_8 FM(SEL_DRIF1_0) FM(SEL_DRIF1_1) FM(SEL_DRIF1_2) F_(0, 0)
+#define MOD_SEL0_7_6 FM(SEL_DRIF0_0) FM(SEL_DRIF0_1) FM(SEL_DRIF0_2) F_(0, 0)
+#define MOD_SEL0_5 FM(SEL_CANFD0_0) FM(SEL_CANFD0_1)
+#define MOD_SEL0_4_3 FM(SEL_ADG_A_0) FM(SEL_ADG_A_1) FM(SEL_ADG_A_2) FM(SEL_ADG_A_3)
+#define MOD_SEL0_2 FM(SEL_5LINE_0) FM(SEL_5LINE_1)
+
+/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+#define MOD_SEL1_31_30 FM(SEL_TSIF1_0) FM(SEL_TSIF1_1) FM(SEL_TSIF1_2) FM(SEL_TSIF1_3)
+#define MOD_SEL1_29_28_27 FM(SEL_TSIF0_0) FM(SEL_TSIF0_1) FM(SEL_TSIF0_2) FM(SEL_TSIF0_3) FM(SEL_TSIF0_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL1_26 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
+#define MOD_SEL1_25_24 FM(SEL_SSP1_1_0) FM(SEL_SSP1_1_1) FM(SEL_SSP1_1_2) FM(SEL_SSP1_1_3)
+#define MOD_SEL1_23_22_21 FM(SEL_SSP1_0_0) FM(SEL_SSP1_0_1) FM(SEL_SSP1_0_2) FM(SEL_SSP1_0_3) FM(SEL_SSP1_0_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL1_20 FM(SEL_SSI_0) FM(SEL_SSI_1)
+#define MOD_SEL1_19 FM(SEL_SPEED_PULSE_0) FM(SEL_SPEED_PULSE_1)
+#define MOD_SEL1_18_17 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1) FM(SEL_SIMCARD_2) FM(SEL_SIMCARD_3)
+#define MOD_SEL1_16 FM(SEL_SDHI2_0) FM(SEL_SDHI2_1)
+#define MOD_SEL1_15_14 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1) FM(SEL_SCIF4_2) F_(0, 0)
+#define MOD_SEL1_13 FM(SEL_SCIF3_0) FM(SEL_SCIF3_1)
+#define MOD_SEL1_12 FM(SEL_SCIF2_0) FM(SEL_SCIF2_1)
+#define MOD_SEL1_11 FM(SEL_SCIF1_0) FM(SEL_SCIF1_1)
+#define MOD_SEL1_10 FM(SEL_SATA_0) FM(SEL_SATA_1)
+#define MOD_SEL1_9 FM(SEL_REMOCON_0) FM(SEL_REMOCON_1)
+#define MOD_SEL1_6 FM(SEL_RCAN0_0) FM(SEL_RCAN0_1)
+#define MOD_SEL1_5 FM(SEL_PWM6_0) FM(SEL_PWM6_1)
+#define MOD_SEL1_4 FM(SEL_PWM5_0) FM(SEL_PWM5_1)
+#define MOD_SEL1_3 FM(SEL_PWM4_0) FM(SEL_PWM4_1)
+#define MOD_SEL1_2 FM(SEL_PWM3_0) FM(SEL_PWM3_1)
+#define MOD_SEL1_1 FM(SEL_PWM2_0) FM(SEL_PWM2_1)
+#define MOD_SEL1_0 FM(SEL_PWM1_0) FM(SEL_PWM1_1)
+
+/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+#define MOD_SEL2_31 FM(I2C_SEL_5_0) FM(I2C_SEL_5_1)
+#define MOD_SEL2_30 FM(I2C_SEL_3_0) FM(I2C_SEL_3_1)
+#define MOD_SEL2_29 FM(I2C_SEL_0_0) FM(I2C_SEL_0_1)
+#define MOD_SEL2_28_27 FM(SEL_FM_0) FM(SEL_FM_1) FM(SEL_FM_2) FM(SEL_FM_3)
+#define MOD_SEL2_26 FM(SEL_SCIF5_0) FM(SEL_SCIF5_1)
+#define MOD_SEL2_25_24_23 FM(SEL_I2C6_0) FM(SEL_I2C6_1) FM(SEL_I2C6_2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL2_22 FM(SEL_NDF_0) FM(SEL_NDF_1)
+#define MOD_SEL2_21 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
+#define MOD_SEL2_20 FM(SEL_SSI9_0) FM(SEL_SSI9_1)
+#define MOD_SEL2_19 FM(SEL_TIMER_TMU2_0) FM(SEL_TIMER_TMU2_1)
+#define MOD_SEL2_18 FM(SEL_ADG_B_0) FM(SEL_ADG_B_1)
+#define MOD_SEL2_17 FM(SEL_ADG_C_0) FM(SEL_ADG_C_1)
+#define MOD_SEL2_0 FM(SEL_VIN4_0) FM(SEL_VIN4_1)
+
+#define PINMUX_MOD_SELS \
+\
+MOD_SEL0_31_30_29 MOD_SEL1_31_30 MOD_SEL2_31 \
+ MOD_SEL2_30 \
+ MOD_SEL1_29_28_27 MOD_SEL2_29 \
+MOD_SEL0_28_27 MOD_SEL2_28_27 \
+MOD_SEL0_26_25_24 MOD_SEL1_26 MOD_SEL2_26 \
+ MOD_SEL1_25_24 MOD_SEL2_25_24_23 \
+MOD_SEL0_23 MOD_SEL1_23_22_21 \
+MOD_SEL0_22 MOD_SEL2_22 \
+MOD_SEL0_21 MOD_SEL2_21 \
+MOD_SEL0_20 MOD_SEL1_20 MOD_SEL2_20 \
+MOD_SEL0_19 MOD_SEL1_19 MOD_SEL2_19 \
+MOD_SEL0_18_17 MOD_SEL1_18_17 MOD_SEL2_18 \
+ MOD_SEL2_17 \
+MOD_SEL0_16 MOD_SEL1_16 \
+MOD_SEL0_15 MOD_SEL1_15_14 \
+MOD_SEL0_14_13 \
+ MOD_SEL1_13 \
+MOD_SEL0_12 MOD_SEL1_12 \
+MOD_SEL0_11 MOD_SEL1_11 \
+MOD_SEL0_10 MOD_SEL1_10 \
+MOD_SEL0_9_8 MOD_SEL1_9 \
+MOD_SEL0_7_6 \
+ MOD_SEL1_6 \
+MOD_SEL0_5 MOD_SEL1_5 \
+MOD_SEL0_4_3 MOD_SEL1_4 \
+ MOD_SEL1_3 \
+MOD_SEL0_2 MOD_SEL1_2 \
+ MOD_SEL1_1 \
+ MOD_SEL1_0 MOD_SEL2_0
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x) FN_##x,
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x) x##_MARK,
+ PINMUX_MARK_BEGIN,
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(),
+
+ PINMUX_SINGLE(AVS1),
+ PINMUX_SINGLE(AVS2),
+ PINMUX_SINGLE(CLKOUT),
+ PINMUX_SINGLE(GP7_03),
+ PINMUX_SINGLE(HDMI0_CEC),
+ PINMUX_SINGLE(MSIOF0_RXD),
+ PINMUX_SINGLE(MSIOF0_SCK),
+ PINMUX_SINGLE(MSIOF0_TXD),
+ PINMUX_SINGLE(SSI_SCK5),
+ PINMUX_SINGLE(SSI_SDATA5),
+ PINMUX_SINGLE(SSI_WS5),
+
+ /* IPSR0 */
+ PINMUX_IPSR_GPSR(IP0_3_0, AVB_MDC),
+ PINMUX_IPSR_MSEL(IP0_3_0, MSIOF2_SS2_C, SEL_MSIOF2_2),
+
+ PINMUX_IPSR_GPSR(IP0_7_4, AVB_MAGIC),
+ PINMUX_IPSR_MSEL(IP0_7_4, MSIOF2_SS1_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_MSEL(IP0_7_4, SCK4_A, SEL_SCIF4_0),
+
+ PINMUX_IPSR_GPSR(IP0_11_8, AVB_PHY_INT),
+ PINMUX_IPSR_MSEL(IP0_11_8, MSIOF2_SYNC_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_MSEL(IP0_11_8, RX4_A, SEL_SCIF4_0),
+
+ PINMUX_IPSR_GPSR(IP0_15_12, AVB_LINK),
+ PINMUX_IPSR_MSEL(IP0_15_12, MSIOF2_SCK_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_MSEL(IP0_15_12, TX4_A, SEL_SCIF4_0),
+
+ PINMUX_IPSR_MSEL(IP0_19_16, AVB_AVTP_MATCH_A, SEL_ETHERAVB_0),
+ PINMUX_IPSR_MSEL(IP0_19_16, MSIOF2_RXD_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_MSEL(IP0_19_16, CTS4_N_A, SEL_SCIF4_0),
+
+ PINMUX_IPSR_MSEL(IP0_23_20, AVB_AVTP_CAPTURE_A, SEL_ETHERAVB_0),
+ PINMUX_IPSR_MSEL(IP0_23_20, MSIOF2_TXD_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_MSEL(IP0_23_20, RTS4_N_TANS_A, SEL_SCIF4_0),
+
+ PINMUX_IPSR_GPSR(IP0_27_24, IRQ0),
+ PINMUX_IPSR_GPSR(IP0_27_24, QPOLB),
+ PINMUX_IPSR_GPSR(IP0_27_24, DU_CDE),
+ PINMUX_IPSR_MSEL(IP0_27_24, VI4_DATA0_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP0_27_24, CAN0_TX_B, SEL_RCAN0_1),
+ PINMUX_IPSR_MSEL(IP0_27_24, CANFD0_TX_B, SEL_CANFD0_1),
+ PINMUX_IPSR_MSEL(IP0_27_24, MSIOF3_SS2_E, SEL_MSIOF3_4),
+
+ PINMUX_IPSR_GPSR(IP0_31_28, IRQ1),
+ PINMUX_IPSR_GPSR(IP0_31_28, QPOLA),
+ PINMUX_IPSR_GPSR(IP0_31_28, DU_DISP),
+ PINMUX_IPSR_MSEL(IP0_31_28, VI4_DATA1_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP0_31_28, CAN0_RX_B, SEL_RCAN0_1),
+ PINMUX_IPSR_MSEL(IP0_31_28, CANFD0_RX_B, SEL_CANFD0_1),
+ PINMUX_IPSR_MSEL(IP0_27_24, MSIOF3_SS1_E, SEL_MSIOF3_4),
+
+ /* IPSR1 */
+ PINMUX_IPSR_GPSR(IP1_3_0, IRQ2),
+ PINMUX_IPSR_GPSR(IP1_3_0, QCPV_QDE),
+ PINMUX_IPSR_GPSR(IP1_3_0, DU_EXODDF_DU_ODDF_DISP_CDE),
+ PINMUX_IPSR_MSEL(IP1_3_0, VI4_DATA2_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP1_3_0, PWM3_B, SEL_PWM3_1),
+ PINMUX_IPSR_MSEL(IP1_3_0, MSIOF3_SYNC_E, SEL_MSIOF3_4),
+
+ PINMUX_IPSR_GPSR(IP1_7_4, IRQ3),
+ PINMUX_IPSR_GPSR(IP1_7_4, QSTVB_QVE),
+ PINMUX_IPSR_GPSR(IP1_7_4, A25),
+ PINMUX_IPSR_GPSR(IP1_7_4, DU_DOTCLKOUT1),
+ PINMUX_IPSR_MSEL(IP1_7_4, VI4_DATA3_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP1_7_4, PWM4_B, SEL_PWM4_1),
+ PINMUX_IPSR_MSEL(IP1_7_4, MSIOF3_SCK_E, SEL_MSIOF3_4),
+
+ PINMUX_IPSR_GPSR(IP1_11_8, IRQ4),
+ PINMUX_IPSR_GPSR(IP1_11_8, QSTH_QHS),
+ PINMUX_IPSR_GPSR(IP1_11_8, A24),
+ PINMUX_IPSR_GPSR(IP1_11_8, DU_EXHSYNC_DU_HSYNC),
+ PINMUX_IPSR_MSEL(IP1_11_8, VI4_DATA4_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP1_11_8, PWM5_B, SEL_PWM5_1),
+ PINMUX_IPSR_MSEL(IP1_11_8, MSIOF3_RXD_E, SEL_MSIOF3_4),
+
+ PINMUX_IPSR_GPSR(IP1_15_12, IRQ5),
+ PINMUX_IPSR_GPSR(IP1_15_12, QSTB_QHE),
+ PINMUX_IPSR_GPSR(IP1_15_12, A23),
+ PINMUX_IPSR_GPSR(IP1_15_12, DU_EXVSYNC_DU_VSYNC),
+ PINMUX_IPSR_MSEL(IP1_15_12, VI4_DATA5_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP1_15_12, PWM6_B, SEL_PWM6_1),
+ PINMUX_IPSR_MSEL(IP1_15_12, MSIOF3_TXD_E, SEL_MSIOF3_4),
+
+ PINMUX_IPSR_GPSR(IP1_19_16, PWM0),
+ PINMUX_IPSR_GPSR(IP1_19_16, AVB_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP1_19_16, A22),
+ PINMUX_IPSR_MSEL(IP1_19_16, VI4_DATA6_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP1_19_16, IECLK_B, SEL_IEBUS_1),
+
+ PINMUX_IPSR_MSEL(IP1_23_20, PWM1_A, SEL_PWM1_0),
+ PINMUX_IPSR_GPSR(IP1_23_20, A21),
+ PINMUX_IPSR_MSEL(IP1_23_20, HRX3_D, SEL_HSCIF3_3),
+ PINMUX_IPSR_MSEL(IP1_23_20, VI4_DATA7_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP1_23_20, IERX_B, SEL_IEBUS_1),
+
+ PINMUX_IPSR_MSEL(IP1_27_24, PWM2_A, SEL_PWM2_0),
+ PINMUX_IPSR_GPSR(IP1_27_24, A20),
+ PINMUX_IPSR_MSEL(IP1_27_24, HTX3_D, SEL_HSCIF3_3),
+ PINMUX_IPSR_MSEL(IP1_27_24, IETX_B, SEL_IEBUS_1),
+
+ PINMUX_IPSR_GPSR(IP1_31_28, A0),
+ PINMUX_IPSR_GPSR(IP1_31_28, LCDOUT16),
+ PINMUX_IPSR_MSEL(IP1_31_28, MSIOF3_SYNC_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_GPSR(IP1_31_28, VI4_DATA8),
+ PINMUX_IPSR_GPSR(IP1_31_28, DU_DB0),
+ PINMUX_IPSR_MSEL(IP1_31_28, PWM3_A, SEL_PWM3_0),
+
+ /* IPSR2 */
+ PINMUX_IPSR_GPSR(IP2_3_0, A1),
+ PINMUX_IPSR_GPSR(IP2_3_0, LCDOUT17),
+ PINMUX_IPSR_MSEL(IP2_3_0, MSIOF3_TXD_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_GPSR(IP2_3_0, VI4_DATA9),
+ PINMUX_IPSR_GPSR(IP2_3_0, DU_DB1),
+ PINMUX_IPSR_MSEL(IP2_3_0, PWM4_A, SEL_PWM4_0),
+
+ PINMUX_IPSR_GPSR(IP2_7_4, A2),
+ PINMUX_IPSR_GPSR(IP2_7_4, LCDOUT18),
+ PINMUX_IPSR_MSEL(IP2_7_4, MSIOF3_SCK_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_GPSR(IP2_7_4, VI4_DATA10),
+ PINMUX_IPSR_GPSR(IP2_7_4, DU_DB2),
+ PINMUX_IPSR_MSEL(IP2_7_4, PWM5_A, SEL_PWM5_0),
+
+ PINMUX_IPSR_GPSR(IP2_11_8, A3),
+ PINMUX_IPSR_GPSR(IP2_11_8, LCDOUT19),
+ PINMUX_IPSR_MSEL(IP2_11_8, MSIOF3_RXD_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_GPSR(IP2_11_8, VI4_DATA11),
+ PINMUX_IPSR_GPSR(IP2_11_8, DU_DB3),
+ PINMUX_IPSR_MSEL(IP2_11_8, PWM6_A, SEL_PWM6_0),
+
+ PINMUX_IPSR_GPSR(IP2_15_12, A4),
+ PINMUX_IPSR_GPSR(IP2_15_12, LCDOUT20),
+ PINMUX_IPSR_MSEL(IP2_15_12, MSIOF3_SS1_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_GPSR(IP2_15_12, VI4_DATA12),
+ PINMUX_IPSR_GPSR(IP2_15_12, VI5_DATA12),
+ PINMUX_IPSR_GPSR(IP2_15_12, DU_DB4),
+
+ PINMUX_IPSR_GPSR(IP2_19_16, A5),
+ PINMUX_IPSR_GPSR(IP2_19_16, LCDOUT21),
+ PINMUX_IPSR_MSEL(IP2_19_16, MSIOF3_SS2_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_MSEL(IP2_19_16, SCK4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_GPSR(IP2_19_16, VI4_DATA13),
+ PINMUX_IPSR_GPSR(IP2_19_16, VI5_DATA13),
+ PINMUX_IPSR_GPSR(IP2_19_16, DU_DB5),
+
+ PINMUX_IPSR_GPSR(IP2_23_20, A6),
+ PINMUX_IPSR_GPSR(IP2_23_20, LCDOUT22),
+ PINMUX_IPSR_MSEL(IP2_23_20, MSIOF2_SS1_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP2_23_20, RX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_GPSR(IP2_23_20, VI4_DATA14),
+ PINMUX_IPSR_GPSR(IP2_23_20, VI5_DATA14),
+ PINMUX_IPSR_GPSR(IP2_23_20, DU_DB6),
+
+ PINMUX_IPSR_GPSR(IP2_27_24, A7),
+ PINMUX_IPSR_GPSR(IP2_27_24, LCDOUT23),
+ PINMUX_IPSR_MSEL(IP2_27_24, MSIOF2_SS2_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP2_27_24, TX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_GPSR(IP2_27_24, VI4_DATA15),
+ PINMUX_IPSR_GPSR(IP2_27_24, VI5_DATA15),
+ PINMUX_IPSR_GPSR(IP2_27_24, DU_DB7),
+
+ PINMUX_IPSR_GPSR(IP2_31_28, A8),
+ PINMUX_IPSR_MSEL(IP2_31_28, RX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MSEL(IP2_31_28, MSIOF2_SYNC_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP2_31_28, HRX4_B, SEL_HSCIF4_1),
+ PINMUX_IPSR_MSEL(IP2_31_28, SDA6_A, SEL_I2C6_0),
+ PINMUX_IPSR_MSEL(IP2_31_28, AVB_AVTP_MATCH_B, SEL_ETHERAVB_1),
+ PINMUX_IPSR_MSEL(IP2_31_28, PWM1_B, SEL_PWM1_1),
+
+ /* IPSR3 */
+ PINMUX_IPSR_GPSR(IP3_3_0, A9),
+ PINMUX_IPSR_MSEL(IP3_3_0, MSIOF2_SCK_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP3_3_0, CTS4_N_B, SEL_SCIF4_1),
+ PINMUX_IPSR_GPSR(IP3_3_0, VI5_VSYNC_N),
+
+ PINMUX_IPSR_GPSR(IP3_7_4, A10),
+ PINMUX_IPSR_MSEL(IP3_7_4, MSIOF2_RXD_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP3_7_4, RTS4_N_TANS_B, SEL_SCIF4_1),
+ PINMUX_IPSR_GPSR(IP3_7_4, VI5_HSYNC_N),
+
+ PINMUX_IPSR_GPSR(IP3_11_8, A11),
+ PINMUX_IPSR_MSEL(IP3_11_8, TX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MSEL(IP3_11_8, MSIOF2_TXD_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP3_11_8, HTX4_B, SEL_HSCIF4_1),
+ PINMUX_IPSR_GPSR(IP3_11_8, HSCK4),
+ PINMUX_IPSR_GPSR(IP3_11_8, VI5_FIELD),
+ PINMUX_IPSR_MSEL(IP3_11_8, SCL6_A, SEL_I2C6_0),
+ PINMUX_IPSR_MSEL(IP3_11_8, AVB_AVTP_CAPTURE_B, SEL_ETHERAVB_1),
+ PINMUX_IPSR_MSEL(IP3_11_8, PWM2_B, SEL_PWM2_1),
+
+ PINMUX_IPSR_GPSR(IP3_15_12, A12),
+ PINMUX_IPSR_GPSR(IP3_15_12, LCDOUT12),
+ PINMUX_IPSR_MSEL(IP3_15_12, MSIOF3_SCK_C, SEL_MSIOF3_2),
+ PINMUX_IPSR_MSEL(IP3_15_12, HRX4_A, SEL_HSCIF4_0),
+ PINMUX_IPSR_GPSR(IP3_15_12, VI5_DATA8),
+ PINMUX_IPSR_GPSR(IP3_15_12, DU_DG4),
+
+ PINMUX_IPSR_GPSR(IP3_19_16, A13),
+ PINMUX_IPSR_GPSR(IP3_19_16, LCDOUT13),
+ PINMUX_IPSR_MSEL(IP3_19_16, MSIOF3_SYNC_C, SEL_MSIOF3_2),
+ PINMUX_IPSR_MSEL(IP3_19_16, HTX4_A, SEL_HSCIF4_0),
+ PINMUX_IPSR_GPSR(IP3_19_16, VI5_DATA9),
+ PINMUX_IPSR_GPSR(IP3_19_16, DU_DG5),
+
+ PINMUX_IPSR_GPSR(IP3_23_20, A14),
+ PINMUX_IPSR_GPSR(IP3_23_20, LCDOUT14),
+ PINMUX_IPSR_MSEL(IP3_23_20, MSIOF3_RXD_C, SEL_MSIOF3_2),
+ PINMUX_IPSR_GPSR(IP3_23_20, HCTS4_N),
+ PINMUX_IPSR_GPSR(IP3_23_20, VI5_DATA10),
+ PINMUX_IPSR_GPSR(IP3_23_20, DU_DG6),
+
+ PINMUX_IPSR_GPSR(IP3_27_24, A15),
+ PINMUX_IPSR_GPSR(IP3_27_24, LCDOUT15),
+ PINMUX_IPSR_MSEL(IP3_27_24, MSIOF3_TXD_C, SEL_MSIOF3_2),
+ PINMUX_IPSR_GPSR(IP3_27_24, HRTS4_N),
+ PINMUX_IPSR_GPSR(IP3_27_24, VI5_DATA11),
+ PINMUX_IPSR_GPSR(IP3_27_24, DU_DG7),
+
+ PINMUX_IPSR_GPSR(IP3_31_28, A16),
+ PINMUX_IPSR_GPSR(IP3_31_28, LCDOUT8),
+ PINMUX_IPSR_GPSR(IP3_31_28, VI4_FIELD),
+ PINMUX_IPSR_GPSR(IP3_31_28, DU_DG0),
+
+ /* IPSR4 */
+ PINMUX_IPSR_GPSR(IP4_3_0, A17),
+ PINMUX_IPSR_GPSR(IP4_3_0, LCDOUT9),
+ PINMUX_IPSR_GPSR(IP4_3_0, VI4_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP4_3_0, DU_DG1),
+
+ PINMUX_IPSR_GPSR(IP4_7_4, A18),
+ PINMUX_IPSR_GPSR(IP4_7_4, LCDOUT10),
+ PINMUX_IPSR_GPSR(IP4_7_4, VI4_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP4_7_4, DU_DG2),
+
+ PINMUX_IPSR_GPSR(IP4_11_8, A19),
+ PINMUX_IPSR_GPSR(IP4_11_8, LCDOUT11),
+ PINMUX_IPSR_GPSR(IP4_11_8, VI4_CLKENB),
+ PINMUX_IPSR_GPSR(IP4_11_8, DU_DG3),
+
+ PINMUX_IPSR_GPSR(IP4_15_12, CS0_N),
+ PINMUX_IPSR_GPSR(IP4_15_12, VI5_CLKENB),
+
+ PINMUX_IPSR_GPSR(IP4_19_16, CS1_N_A26),
+ PINMUX_IPSR_GPSR(IP4_19_16, VI5_CLK),
+ PINMUX_IPSR_MSEL(IP4_19_16, EX_WAIT0_B, SEL_LBSC_1),
+
+ PINMUX_IPSR_GPSR(IP4_23_20, BS_N),
+ PINMUX_IPSR_GPSR(IP4_23_20, QSTVA_QVS),
+ PINMUX_IPSR_MSEL(IP4_23_20, MSIOF3_SCK_D, SEL_MSIOF3_3),
+ PINMUX_IPSR_GPSR(IP4_23_20, SCK3),
+ PINMUX_IPSR_GPSR(IP4_23_20, HSCK3),
+ PINMUX_IPSR_GPSR(IP4_23_20, CAN1_TX),
+ PINMUX_IPSR_GPSR(IP4_23_20, CANFD1_TX),
+ PINMUX_IPSR_MSEL(IP4_23_20, IETX_A, SEL_IEBUS_0),
+
+ PINMUX_IPSR_GPSR(IP4_27_24, RD_N),
+ PINMUX_IPSR_MSEL(IP4_27_24, MSIOF3_SYNC_D, SEL_MSIOF3_3),
+ PINMUX_IPSR_MSEL(IP4_27_24, RX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_MSEL(IP4_27_24, HRX3_A, SEL_HSCIF3_0),
+ PINMUX_IPSR_MSEL(IP4_27_24, CAN0_TX_A, SEL_RCAN0_0),
+ PINMUX_IPSR_MSEL(IP4_27_24, CANFD0_TX_A, SEL_CANFD0_0),
+
+ PINMUX_IPSR_GPSR(IP4_31_28, RD_WR_N),
+ PINMUX_IPSR_MSEL(IP4_31_28, MSIOF3_RXD_D, SEL_MSIOF3_3),
+ PINMUX_IPSR_MSEL(IP4_31_28, TX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_MSEL(IP4_31_28, HTX3_A, SEL_HSCIF3_0),
+ PINMUX_IPSR_MSEL(IP4_31_28, CAN0_RX_A, SEL_RCAN0_0),
+ PINMUX_IPSR_MSEL(IP4_31_28, CANFD0_RX_A, SEL_CANFD0_0),
+
+ /* IPSR5 */
+ PINMUX_IPSR_GPSR(IP5_3_0, WE0_N),
+ PINMUX_IPSR_MSEL(IP5_3_0, MSIOF3_TXD_D, SEL_MSIOF3_3),
+ PINMUX_IPSR_GPSR(IP5_3_0, CTS3_N),
+ PINMUX_IPSR_GPSR(IP5_3_0, HCTS3_N),
+ PINMUX_IPSR_MSEL(IP5_3_0, SCL6_B, SEL_I2C6_1),
+ PINMUX_IPSR_GPSR(IP5_3_0, CAN_CLK),
+ PINMUX_IPSR_MSEL(IP5_3_0, IECLK_A, SEL_IEBUS_0),
+
+ PINMUX_IPSR_GPSR(IP5_7_4, WE1_N),
+ PINMUX_IPSR_MSEL(IP5_7_4, MSIOF3_SS1_D, SEL_MSIOF3_3),
+ PINMUX_IPSR_GPSR(IP5_7_4, RTS3_N_TANS),
+ PINMUX_IPSR_GPSR(IP5_7_4, HRTS3_N),
+ PINMUX_IPSR_MSEL(IP5_7_4, SDA6_B, SEL_I2C6_1),
+ PINMUX_IPSR_GPSR(IP5_7_4, CAN1_RX),
+ PINMUX_IPSR_GPSR(IP5_7_4, CANFD1_RX),
+ PINMUX_IPSR_MSEL(IP5_7_4, IERX_A, SEL_IEBUS_0),
+
+ PINMUX_IPSR_MSEL(IP5_11_8, EX_WAIT0_A, SEL_LBSC_0),
+ PINMUX_IPSR_GPSR(IP5_11_8, QCLK),
+ PINMUX_IPSR_GPSR(IP5_11_8, VI4_CLK),
+ PINMUX_IPSR_GPSR(IP5_11_8, DU_DOTCLKOUT0),
+
+ PINMUX_IPSR_GPSR(IP5_15_12, D0),
+ PINMUX_IPSR_MSEL(IP5_15_12, MSIOF2_SS1_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_MSEL(IP5_15_12, MSIOF3_SCK_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_GPSR(IP5_15_12, VI4_DATA16),
+ PINMUX_IPSR_GPSR(IP5_15_12, VI5_DATA0),
+
+ PINMUX_IPSR_GPSR(IP5_19_16, D1),
+ PINMUX_IPSR_MSEL(IP5_19_16, MSIOF2_SS2_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_MSEL(IP5_19_16, MSIOF3_SYNC_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_GPSR(IP5_19_16, VI4_DATA17),
+ PINMUX_IPSR_GPSR(IP5_19_16, VI5_DATA1),
+
+ PINMUX_IPSR_GPSR(IP5_23_20, D2),
+ PINMUX_IPSR_MSEL(IP5_23_20, MSIOF3_RXD_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_GPSR(IP5_23_20, VI4_DATA18),
+ PINMUX_IPSR_GPSR(IP5_23_20, VI5_DATA2),
+
+ PINMUX_IPSR_GPSR(IP5_27_24, D3),
+ PINMUX_IPSR_MSEL(IP5_27_24, MSIOF3_TXD_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_GPSR(IP5_27_24, VI4_DATA19),
+ PINMUX_IPSR_GPSR(IP5_27_24, VI5_DATA3),
+
+ PINMUX_IPSR_GPSR(IP5_31_28, D4),
+ PINMUX_IPSR_MSEL(IP5_31_28, MSIOF2_SCK_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP5_31_28, VI4_DATA20),
+ PINMUX_IPSR_GPSR(IP5_31_28, VI5_DATA4),
+
+ /* IPSR6 */
+ PINMUX_IPSR_GPSR(IP6_3_0, D5),
+ PINMUX_IPSR_MSEL(IP6_3_0, MSIOF2_SYNC_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP6_3_0, VI4_DATA21),
+ PINMUX_IPSR_GPSR(IP6_3_0, VI5_DATA5),
+
+ PINMUX_IPSR_GPSR(IP6_7_4, D6),
+ PINMUX_IPSR_MSEL(IP6_7_4, MSIOF2_RXD_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP6_7_4, VI4_DATA22),
+ PINMUX_IPSR_GPSR(IP6_7_4, VI5_DATA6),
+
+ PINMUX_IPSR_GPSR(IP6_11_8, D7),
+ PINMUX_IPSR_MSEL(IP6_11_8, MSIOF2_TXD_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP6_11_8, VI4_DATA23),
+ PINMUX_IPSR_GPSR(IP6_11_8, VI5_DATA7),
+
+ PINMUX_IPSR_GPSR(IP6_15_12, D8),
+ PINMUX_IPSR_GPSR(IP6_15_12, LCDOUT0),
+ PINMUX_IPSR_MSEL(IP6_15_12, MSIOF2_SCK_D, SEL_MSIOF2_3),
+ PINMUX_IPSR_MSEL(IP6_15_12, SCK4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MSEL(IP6_15_12, VI4_DATA0_A, SEL_VIN4_0),
+ PINMUX_IPSR_GPSR(IP6_15_12, DU_DR0),
+
+ PINMUX_IPSR_GPSR(IP6_19_16, D9),
+ PINMUX_IPSR_GPSR(IP6_19_16, LCDOUT1),
+ PINMUX_IPSR_MSEL(IP6_19_16, MSIOF2_SYNC_D, SEL_MSIOF2_3),
+ PINMUX_IPSR_MSEL(IP6_19_16, VI4_DATA1_A, SEL_VIN4_0),
+ PINMUX_IPSR_GPSR(IP6_19_16, DU_DR1),
+
+ PINMUX_IPSR_GPSR(IP6_23_20, D10),
+ PINMUX_IPSR_GPSR(IP6_23_20, LCDOUT2),
+ PINMUX_IPSR_MSEL(IP6_23_20, MSIOF2_RXD_D, SEL_MSIOF2_3),
+ PINMUX_IPSR_MSEL(IP6_23_20, HRX3_B, SEL_HSCIF3_1),
+ PINMUX_IPSR_MSEL(IP6_23_20, VI4_DATA2_A, SEL_VIN4_0),
+ PINMUX_IPSR_MSEL(IP6_23_20, CTS4_N_C, SEL_SCIF4_2),
+ PINMUX_IPSR_GPSR(IP6_23_20, DU_DR2),
+
+ PINMUX_IPSR_GPSR(IP6_27_24, D11),
+ PINMUX_IPSR_GPSR(IP6_27_24, LCDOUT3),
+ PINMUX_IPSR_MSEL(IP6_27_24, MSIOF2_TXD_D, SEL_MSIOF2_3),
+ PINMUX_IPSR_MSEL(IP6_27_24, HTX3_B, SEL_HSCIF3_1),
+ PINMUX_IPSR_MSEL(IP6_27_24, VI4_DATA3_A, SEL_VIN4_0),
+ PINMUX_IPSR_MSEL(IP6_27_24, RTS4_N_TANS_C, SEL_SCIF4_2),
+ PINMUX_IPSR_GPSR(IP6_27_24, DU_DR3),
+
+ PINMUX_IPSR_GPSR(IP6_31_28, D12),
+ PINMUX_IPSR_GPSR(IP6_31_28, LCDOUT4),
+ PINMUX_IPSR_MSEL(IP6_31_28, MSIOF2_SS1_D, SEL_MSIOF2_3),
+ PINMUX_IPSR_MSEL(IP6_31_28, RX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MSEL(IP6_31_28, VI4_DATA4_A, SEL_VIN4_0),
+ PINMUX_IPSR_GPSR(IP6_31_28, DU_DR4),
+
+ /* IPSR7 */
+ PINMUX_IPSR_GPSR(IP7_3_0, D13),
+ PINMUX_IPSR_GPSR(IP7_3_0, LCDOUT5),
+ PINMUX_IPSR_MSEL(IP7_3_0, MSIOF2_SS2_D, SEL_MSIOF2_3),
+ PINMUX_IPSR_MSEL(IP7_3_0, TX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MSEL(IP7_3_0, VI4_DATA5_A, SEL_VIN4_0),
+ PINMUX_IPSR_GPSR(IP7_3_0, DU_DR5),
+
+ PINMUX_IPSR_GPSR(IP7_7_4, D14),
+ PINMUX_IPSR_GPSR(IP7_7_4, LCDOUT6),
+ PINMUX_IPSR_MSEL(IP7_7_4, MSIOF3_SS1_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_MSEL(IP7_7_4, HRX3_C, SEL_HSCIF3_2),
+ PINMUX_IPSR_MSEL(IP7_7_4, VI4_DATA6_A, SEL_VIN4_0),
+ PINMUX_IPSR_GPSR(IP7_7_4, DU_DR6),
+ PINMUX_IPSR_MSEL(IP7_7_4, SCL6_C, SEL_I2C6_2),
+
+ PINMUX_IPSR_GPSR(IP7_11_8, D15),
+ PINMUX_IPSR_GPSR(IP7_11_8, LCDOUT7),
+ PINMUX_IPSR_MSEL(IP7_11_8, MSIOF3_SS2_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_MSEL(IP7_11_8, HTX3_C, SEL_HSCIF3_2),
+ PINMUX_IPSR_MSEL(IP7_11_8, VI4_DATA7_A, SEL_VIN4_0),
+ PINMUX_IPSR_GPSR(IP7_11_8, DU_DR7),
+ PINMUX_IPSR_MSEL(IP7_11_8, SDA6_C, SEL_I2C6_2),
+
+ PINMUX_IPSR_GPSR(IP7_15_12, FSCLKST),
+
+ PINMUX_IPSR_GPSR(IP7_19_16, SD0_CLK),
+ PINMUX_IPSR_MSEL(IP7_19_16, MSIOF1_SCK_E, SEL_MSIOF1_4),
+ PINMUX_IPSR_MSEL(IP7_19_16, STP_OPWM_0_B, SEL_SSP1_0_1),
+
+ PINMUX_IPSR_GPSR(IP7_23_20, SD0_CMD),
+ PINMUX_IPSR_MSEL(IP7_23_20, MSIOF1_SYNC_E, SEL_MSIOF1_4),
+ PINMUX_IPSR_MSEL(IP7_23_20, STP_IVCXO27_0_B, SEL_SSP1_0_1),
+
+ PINMUX_IPSR_GPSR(IP7_27_24, SD0_DAT0),
+ PINMUX_IPSR_MSEL(IP7_27_24, MSIOF1_RXD_E, SEL_MSIOF1_4),
+ PINMUX_IPSR_MSEL(IP7_27_24, TS_SCK0_B, SEL_TSIF0_1),
+ PINMUX_IPSR_MSEL(IP7_27_24, STP_ISCLK_0_B, SEL_SSP1_0_1),
+
+ PINMUX_IPSR_GPSR(IP7_31_28, SD0_DAT1),
+ PINMUX_IPSR_MSEL(IP7_31_28, MSIOF1_TXD_E, SEL_MSIOF1_4),
+ PINMUX_IPSR_MSEL(IP7_31_28, TS_SPSYNC0_B, SEL_TSIF0_1),
+ PINMUX_IPSR_MSEL(IP7_31_28, STP_ISSYNC_0_B, SEL_SSP1_0_1),
+
+ /* IPSR8 */
+ PINMUX_IPSR_GPSR(IP8_3_0, SD0_DAT2),
+ PINMUX_IPSR_MSEL(IP8_3_0, MSIOF1_SS1_E, SEL_MSIOF1_4),
+ PINMUX_IPSR_MSEL(IP8_3_0, TS_SDAT0_B, SEL_TSIF0_1),
+ PINMUX_IPSR_MSEL(IP8_3_0, STP_ISD_0_B, SEL_SSP1_0_1),
+
+ PINMUX_IPSR_GPSR(IP8_7_4, SD0_DAT3),
+ PINMUX_IPSR_MSEL(IP8_7_4, MSIOF1_SS2_E, SEL_MSIOF1_4),
+ PINMUX_IPSR_MSEL(IP8_7_4, TS_SDEN0_B, SEL_TSIF0_1),
+ PINMUX_IPSR_MSEL(IP8_7_4, STP_ISEN_0_B, SEL_SSP1_0_1),
+
+ PINMUX_IPSR_GPSR(IP8_11_8, SD1_CLK),
+ PINMUX_IPSR_MSEL(IP8_11_8, MSIOF1_SCK_G, SEL_MSIOF1_6),
+ PINMUX_IPSR_MSEL(IP8_11_8, SIM0_CLK_A, SEL_SIMCARD_0),
+
+ PINMUX_IPSR_GPSR(IP8_15_12, SD1_CMD),
+ PINMUX_IPSR_MSEL(IP8_15_12, MSIOF1_SYNC_G, SEL_MSIOF1_6),
+ PINMUX_IPSR_MSEL(IP8_15_12, NFCE_N_B, SEL_NDF_1),
+ PINMUX_IPSR_MSEL(IP8_15_12, SIM0_D_A, SEL_SIMCARD_0),
+ PINMUX_IPSR_MSEL(IP8_15_12, STP_IVCXO27_1_B, SEL_SSP1_1_1),
+
+ PINMUX_IPSR_GPSR(IP8_19_16, SD1_DAT0),
+ PINMUX_IPSR_GPSR(IP8_19_16, SD2_DAT4),
+ PINMUX_IPSR_MSEL(IP8_19_16, MSIOF1_RXD_G, SEL_MSIOF1_6),
+ PINMUX_IPSR_MSEL(IP8_19_16, NFWP_N_B, SEL_NDF_1),
+ PINMUX_IPSR_MSEL(IP8_19_16, TS_SCK1_B, SEL_TSIF1_1),
+ PINMUX_IPSR_MSEL(IP8_19_16, STP_ISCLK_1_B, SEL_SSP1_1_1),
+
+ PINMUX_IPSR_GPSR(IP8_23_20, SD1_DAT1),
+ PINMUX_IPSR_GPSR(IP8_23_20, SD2_DAT5),
+ PINMUX_IPSR_MSEL(IP8_23_20, MSIOF1_TXD_G, SEL_MSIOF1_6),
+ PINMUX_IPSR_MSEL(IP8_23_20, NFDATA14_B, SEL_NDF_1),
+ PINMUX_IPSR_MSEL(IP8_23_20, TS_SPSYNC1_B, SEL_TSIF1_1),
+ PINMUX_IPSR_MSEL(IP8_23_20, STP_ISSYNC_1_B, SEL_SSP1_1_1),
+
+ PINMUX_IPSR_GPSR(IP8_27_24, SD1_DAT2),
+ PINMUX_IPSR_GPSR(IP8_27_24, SD2_DAT6),
+ PINMUX_IPSR_MSEL(IP8_27_24, MSIOF1_SS1_G, SEL_MSIOF1_6),
+ PINMUX_IPSR_MSEL(IP8_27_24, NFDATA15_B, SEL_NDF_1),
+ PINMUX_IPSR_MSEL(IP8_27_24, TS_SDAT1_B, SEL_TSIF1_1),
+ PINMUX_IPSR_MSEL(IP8_27_24, STP_ISD_1_B, SEL_SSP1_1_1),
+
+ PINMUX_IPSR_GPSR(IP8_31_28, SD1_DAT3),
+ PINMUX_IPSR_GPSR(IP8_31_28, SD2_DAT7),
+ PINMUX_IPSR_MSEL(IP8_31_28, MSIOF1_SS2_G, SEL_MSIOF1_6),
+ PINMUX_IPSR_MSEL(IP8_31_28, NFRB_N_B, SEL_NDF_1),
+ PINMUX_IPSR_MSEL(IP8_31_28, TS_SDEN1_B, SEL_TSIF1_1),
+ PINMUX_IPSR_MSEL(IP8_31_28, STP_ISEN_1_B, SEL_SSP1_1_1),
+
+ /* IPSR9 */
+ PINMUX_IPSR_GPSR(IP9_3_0, SD2_CLK),
+ PINMUX_IPSR_GPSR(IP9_3_0, NFDATA8),
+
+ PINMUX_IPSR_GPSR(IP9_7_4, SD2_CMD),
+ PINMUX_IPSR_GPSR(IP9_7_4, NFDATA9),
+
+ PINMUX_IPSR_GPSR(IP9_11_8, SD2_DAT0),
+ PINMUX_IPSR_GPSR(IP9_11_8, NFDATA10),
+
+ PINMUX_IPSR_GPSR(IP9_15_12, SD2_DAT1),
+ PINMUX_IPSR_GPSR(IP9_15_12, NFDATA11),
+
+ PINMUX_IPSR_GPSR(IP9_19_16, SD2_DAT2),
+ PINMUX_IPSR_GPSR(IP9_19_16, NFDATA12),
+
+ PINMUX_IPSR_GPSR(IP9_23_20, SD2_DAT3),
+ PINMUX_IPSR_GPSR(IP9_23_20, NFDATA13),
+
+ PINMUX_IPSR_GPSR(IP9_27_24, SD2_DS),
+ PINMUX_IPSR_GPSR(IP9_27_24, NFALE),
+
+ PINMUX_IPSR_GPSR(IP9_31_28, SD3_CLK),
+ PINMUX_IPSR_GPSR(IP9_31_28, NFWE_N),
+
+ /* IPSR10 */
+ PINMUX_IPSR_GPSR(IP10_3_0, SD3_CMD),
+ PINMUX_IPSR_GPSR(IP10_3_0, NFRE_N),
+
+ PINMUX_IPSR_GPSR(IP10_7_4, SD3_DAT0),
+ PINMUX_IPSR_GPSR(IP10_7_4, NFDATA0),
+
+ PINMUX_IPSR_GPSR(IP10_11_8, SD3_DAT1),
+ PINMUX_IPSR_GPSR(IP10_11_8, NFDATA1),
+
+ PINMUX_IPSR_GPSR(IP10_15_12, SD3_DAT2),
+ PINMUX_IPSR_GPSR(IP10_15_12, NFDATA2),
+
+ PINMUX_IPSR_GPSR(IP10_19_16, SD3_DAT3),
+ PINMUX_IPSR_GPSR(IP10_19_16, NFDATA3),
+
+ PINMUX_IPSR_GPSR(IP10_23_20, SD3_DAT4),
+ PINMUX_IPSR_MSEL(IP10_23_20, SD2_CD_A, SEL_SDHI2_0),
+ PINMUX_IPSR_GPSR(IP10_23_20, NFDATA4),
+
+ PINMUX_IPSR_GPSR(IP10_27_24, SD3_DAT5),
+ PINMUX_IPSR_MSEL(IP10_27_24, SD2_WP_A, SEL_SDHI2_0),
+ PINMUX_IPSR_GPSR(IP10_27_24, NFDATA5),
+
+ PINMUX_IPSR_GPSR(IP10_31_28, SD3_DAT6),
+ PINMUX_IPSR_GPSR(IP10_31_28, SD3_CD),
+ PINMUX_IPSR_GPSR(IP10_31_28, NFDATA6),
+
+ /* IPSR11 */
+ PINMUX_IPSR_GPSR(IP11_3_0, SD3_DAT7),
+ PINMUX_IPSR_GPSR(IP11_3_0, SD3_WP),
+ PINMUX_IPSR_GPSR(IP11_3_0, NFDATA7),
+
+ PINMUX_IPSR_GPSR(IP11_7_4, SD3_DS),
+ PINMUX_IPSR_GPSR(IP11_7_4, NFCLE),
+
+ PINMUX_IPSR_GPSR(IP11_11_8, SD0_CD),
+ PINMUX_IPSR_MSEL(IP11_11_8, SCL2_B, SEL_I2C2_1),
+ PINMUX_IPSR_MSEL(IP11_11_8, SIM0_RST_A, SEL_SIMCARD_0),
+
+ PINMUX_IPSR_GPSR(IP11_15_12, SD0_WP),
+ PINMUX_IPSR_MSEL(IP11_15_12, SDA2_B, SEL_I2C2_1),
+
+ PINMUX_IPSR_GPSR(IP11_19_16, SD1_CD),
+ PINMUX_IPSR_MSEL(IP11_19_16, SIM0_CLK_B, SEL_SIMCARD_1),
+
+ PINMUX_IPSR_GPSR(IP11_23_20, SD1_WP),
+ PINMUX_IPSR_MSEL(IP11_23_20, SIM0_D_B, SEL_SIMCARD_1),
+
+ PINMUX_IPSR_GPSR(IP11_27_24, SCK0),
+ PINMUX_IPSR_MSEL(IP11_27_24, HSCK1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MSEL(IP11_27_24, MSIOF1_SS2_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_MSEL(IP11_27_24, AUDIO_CLKC_B, SEL_ADG_C_1),
+ PINMUX_IPSR_MSEL(IP11_27_24, SDA2_A, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP11_27_24, SIM0_RST_B, SEL_SIMCARD_1),
+ PINMUX_IPSR_MSEL(IP11_27_24, STP_OPWM_0_C, SEL_SSP1_0_2),
+ PINMUX_IPSR_MSEL(IP11_27_24, RIF0_CLK_B, SEL_DRIF0_1),
+ PINMUX_IPSR_GPSR(IP11_27_24, ADICHS2),
+ PINMUX_IPSR_MSEL(IP11_27_24, SCK5_B, SEL_SCIF5_1),
+
+ PINMUX_IPSR_GPSR(IP11_31_28, RX0),
+ PINMUX_IPSR_MSEL(IP11_31_28, HRX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MSEL(IP11_31_28, TS_SCK0_C, SEL_TSIF0_2),
+ PINMUX_IPSR_MSEL(IP11_31_28, STP_ISCLK_0_C, SEL_SSP1_0_2),
+ PINMUX_IPSR_MSEL(IP11_31_28, RIF0_D0_B, SEL_DRIF0_1),
+
+ /* IPSR12 */
+ PINMUX_IPSR_GPSR(IP12_3_0, TX0),
+ PINMUX_IPSR_MSEL(IP12_3_0, HTX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MSEL(IP12_3_0, TS_SPSYNC0_C, SEL_TSIF0_2),
+ PINMUX_IPSR_MSEL(IP12_3_0, STP_ISSYNC_0_C, SEL_SSP1_0_2),
+ PINMUX_IPSR_MSEL(IP12_3_0, RIF0_D1_B, SEL_DRIF0_1),
+
+ PINMUX_IPSR_GPSR(IP12_7_4, CTS0_N),
+ PINMUX_IPSR_MSEL(IP12_7_4, HCTS1_N_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MSEL(IP12_7_4, MSIOF1_SYNC_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_MSEL(IP12_7_4, TS_SPSYNC1_C, SEL_TSIF1_2),
+ PINMUX_IPSR_MSEL(IP12_7_4, STP_ISSYNC_1_C, SEL_SSP1_1_2),
+ PINMUX_IPSR_MSEL(IP12_7_4, RIF1_SYNC_B, SEL_DRIF1_1),
+ PINMUX_IPSR_GPSR(IP12_7_4, AUDIO_CLKOUT_C),
+ PINMUX_IPSR_GPSR(IP12_7_4, ADICS_SAMP),
+
+ PINMUX_IPSR_GPSR(IP12_11_8, RTS0_N_TANS),
+ PINMUX_IPSR_MSEL(IP12_11_8, HRTS1_N_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MSEL(IP12_11_8, MSIOF1_SS1_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_MSEL(IP12_11_8, AUDIO_CLKA_B, SEL_ADG_A_1),
+ PINMUX_IPSR_MSEL(IP12_11_8, SCL2_A, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP12_11_8, STP_IVCXO27_1_C, SEL_SSP1_1_2),
+ PINMUX_IPSR_MSEL(IP12_11_8, RIF0_SYNC_B, SEL_DRIF0_1),
+ PINMUX_IPSR_MSEL(IP12_11_8, FSO_TOE_A, SEL_FSO_0),
+ PINMUX_IPSR_GPSR(IP12_11_8, ADICHS1),
+
+ PINMUX_IPSR_MSEL(IP12_15_12, RX1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_15_12, HRX1_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_15_12, TS_SDAT0_C, SEL_TSIF0_2),
+ PINMUX_IPSR_MSEL(IP12_15_12, STP_ISD_0_C, SEL_SSP1_0_2),
+ PINMUX_IPSR_MSEL(IP12_15_12, RIF1_CLK_C, SEL_DRIF1_2),
+
+ PINMUX_IPSR_MSEL(IP12_19_16, TX1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_19_16, HTX1_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_19_16, TS_SDEN0_C, SEL_TSIF0_2),
+ PINMUX_IPSR_MSEL(IP12_19_16, STP_ISEN_0_C, SEL_SSP1_0_2),
+ PINMUX_IPSR_MSEL(IP12_19_16, RIF1_D0_C, SEL_DRIF1_2),
+
+ PINMUX_IPSR_GPSR(IP12_23_20, CTS1_N),
+ PINMUX_IPSR_MSEL(IP12_23_20, HCTS1_N_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_23_20, MSIOF1_RXD_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_MSEL(IP12_23_20, TS_SDEN1_C, SEL_TSIF1_2),
+ PINMUX_IPSR_MSEL(IP12_23_20, STP_ISEN_1_C, SEL_SSP1_1_2),
+ PINMUX_IPSR_MSEL(IP12_23_20, RIF1_D0_B, SEL_DRIF1_1),
+ PINMUX_IPSR_GPSR(IP12_23_20, ADIDATA),
+
+ PINMUX_IPSR_GPSR(IP12_27_24, RTS1_N_TANS),
+ PINMUX_IPSR_MSEL(IP12_27_24, HRTS1_N_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_27_24, MSIOF1_TXD_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_MSEL(IP12_27_24, TS_SDAT1_C, SEL_TSIF1_2),
+ PINMUX_IPSR_MSEL(IP12_27_24, STP_ISD_1_C, SEL_SSP1_1_2),
+ PINMUX_IPSR_MSEL(IP12_27_24, RIF1_D1_B, SEL_DRIF1_1),
+ PINMUX_IPSR_GPSR(IP12_27_24, ADICHS0),
+
+ PINMUX_IPSR_GPSR(IP12_31_28, SCK2),
+ PINMUX_IPSR_MSEL(IP12_31_28, SCIF_CLK_B, SEL_SCIF1_1),
+ PINMUX_IPSR_MSEL(IP12_31_28, MSIOF1_SCK_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_MSEL(IP12_31_28, TS_SCK1_C, SEL_TSIF1_2),
+ PINMUX_IPSR_MSEL(IP12_31_28, STP_ISCLK_1_C, SEL_SSP1_1_2),
+ PINMUX_IPSR_MSEL(IP12_31_28, RIF1_CLK_B, SEL_DRIF1_1),
+ PINMUX_IPSR_GPSR(IP12_31_28, ADICLK),
+
+ /* IPSR13 */
+ PINMUX_IPSR_MSEL(IP13_3_0, TX2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_MSEL(IP13_3_0, SD2_CD_B, SEL_SDHI2_1),
+ PINMUX_IPSR_MSEL(IP13_3_0, SCL1_A, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP13_3_0, FMCLK_A, SEL_FM_0),
+ PINMUX_IPSR_MSEL(IP13_3_0, RIF1_D1_C, SEL_DRIF1_2),
+ PINMUX_IPSR_MSEL(IP13_3_0, FSO_CFE_0_B, SEL_FSO_1),
+
+ PINMUX_IPSR_MSEL(IP13_7_4, RX2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_MSEL(IP13_7_4, SD2_WP_B, SEL_SDHI2_1),
+ PINMUX_IPSR_MSEL(IP13_7_4, SDA1_A, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP13_7_4, FMIN_A, SEL_FM_0),
+ PINMUX_IPSR_MSEL(IP13_7_4, RIF1_SYNC_C, SEL_DRIF1_2),
+ PINMUX_IPSR_MSEL(IP13_7_4, FSO_CFE_1_B, SEL_FSO_1),
+
+ PINMUX_IPSR_GPSR(IP13_11_8, HSCK0),
+ PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3),
+ PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADG_B_0),
+ PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI_1),
+ PINMUX_IPSR_MSEL(IP13_11_8, TS_SCK0_D, SEL_TSIF0_3),
+ PINMUX_IPSR_MSEL(IP13_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3),
+ PINMUX_IPSR_MSEL(IP13_11_8, RIF0_CLK_C, SEL_DRIF0_2),
+ PINMUX_IPSR_MSEL(IP13_11_8, RX5_B, SEL_SCIF5_1),
+
+ PINMUX_IPSR_GPSR(IP13_15_12, HRX0),
+ PINMUX_IPSR_MSEL(IP13_15_12, MSIOF1_RXD_D, SEL_MSIOF1_3),
+ PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI_1),
+ PINMUX_IPSR_MSEL(IP13_15_12, TS_SDEN0_D, SEL_TSIF0_3),
+ PINMUX_IPSR_MSEL(IP13_15_12, STP_ISEN_0_D, SEL_SSP1_0_3),
+ PINMUX_IPSR_MSEL(IP13_15_12, RIF0_D0_C, SEL_DRIF0_2),
+
+ PINMUX_IPSR_GPSR(IP13_19_16, HTX0),
+ PINMUX_IPSR_MSEL(IP13_19_16, MSIOF1_TXD_D, SEL_MSIOF1_3),
+ PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI_1),
+ PINMUX_IPSR_MSEL(IP13_19_16, TS_SDAT0_D, SEL_TSIF0_3),
+ PINMUX_IPSR_MSEL(IP13_19_16, STP_ISD_0_D, SEL_SSP1_0_3),
+ PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_C, SEL_DRIF0_2),
+
+ PINMUX_IPSR_GPSR(IP13_23_20, HCTS0_N),
+ PINMUX_IPSR_MSEL(IP13_23_20, RX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MSEL(IP13_23_20, MSIOF1_SYNC_D, SEL_MSIOF1_3),
+ PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI_0),
+ PINMUX_IPSR_MSEL(IP13_23_20, TS_SPSYNC0_D, SEL_TSIF0_3),
+ PINMUX_IPSR_MSEL(IP13_23_20, STP_ISSYNC_0_D, SEL_SSP1_0_3),
+ PINMUX_IPSR_MSEL(IP13_23_20, RIF0_SYNC_C, SEL_DRIF0_2),
+ PINMUX_IPSR_GPSR(IP13_23_20, AUDIO_CLKOUT1_A),
+
+ PINMUX_IPSR_GPSR(IP13_27_24, HRTS0_N),
+ PINMUX_IPSR_MSEL(IP13_27_24, TX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MSEL(IP13_27_24, MSIOF1_SS1_D, SEL_MSIOF1_3),
+ PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI_0),
+ PINMUX_IPSR_MSEL(IP13_27_24, STP_IVCXO27_0_D, SEL_SSP1_0_3),
+ PINMUX_IPSR_MSEL(IP13_27_24, BPFCLK_A, SEL_FM_0),
+ PINMUX_IPSR_GPSR(IP13_27_24, AUDIO_CLKOUT2_A),
+
+ PINMUX_IPSR_GPSR(IP13_31_28, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP13_31_28, AUDIO_CLKOUT_A),
+ PINMUX_IPSR_MSEL(IP13_31_28, TX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MSEL(IP13_31_28, BPFCLK_D, SEL_FM_3),
+
+ /* IPSR14 */
+ PINMUX_IPSR_GPSR(IP14_3_0, MSIOF0_SS1),
+ PINMUX_IPSR_MSEL(IP14_3_0, RX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_MSEL(IP14_3_0, NFWP_N_A, SEL_NDF_0),
+ PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADG_A_2),
+ PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI_0),
+ PINMUX_IPSR_MSEL(IP14_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2),
+ PINMUX_IPSR_GPSR(IP14_3_0, AUDIO_CLKOUT3_A),
+ PINMUX_IPSR_MSEL(IP14_3_0, TCLK1_B, SEL_TIMER_TMU_1),
+
+ PINMUX_IPSR_GPSR(IP14_7_4, MSIOF0_SS2),
+ PINMUX_IPSR_MSEL(IP14_7_4, TX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_MSEL(IP14_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3),
+ PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADG_C_0),
+ PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI_0),
+ PINMUX_IPSR_MSEL(IP14_7_4, STP_OPWM_0_D, SEL_SSP1_0_3),
+ PINMUX_IPSR_GPSR(IP14_7_4, AUDIO_CLKOUT_D),
+ PINMUX_IPSR_MSEL(IP14_7_4, SPEEDIN_B, SEL_SPEED_PULSE_1),
+
+ PINMUX_IPSR_GPSR(IP14_11_8, MLB_CLK),
+ PINMUX_IPSR_MSEL(IP14_11_8, MSIOF1_SCK_F, SEL_MSIOF1_5),
+ PINMUX_IPSR_MSEL(IP14_11_8, SCL1_B, SEL_I2C1_1),
+
+ PINMUX_IPSR_GPSR(IP14_15_12, MLB_SIG),
+ PINMUX_IPSR_MSEL(IP14_15_12, RX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_MSEL(IP14_15_12, MSIOF1_SYNC_F, SEL_MSIOF1_5),
+ PINMUX_IPSR_MSEL(IP14_15_12, SDA1_B, SEL_I2C1_1),
+
+ PINMUX_IPSR_GPSR(IP14_19_16, MLB_DAT),
+ PINMUX_IPSR_MSEL(IP14_19_16, TX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_MSEL(IP14_19_16, MSIOF1_RXD_F, SEL_MSIOF1_5),
+
+ PINMUX_IPSR_GPSR(IP14_23_20, SSI_SCK0129),
+ PINMUX_IPSR_MSEL(IP14_23_20, MSIOF1_TXD_F, SEL_MSIOF1_5),
+
+ PINMUX_IPSR_GPSR(IP14_27_24, SSI_WS0129),
+ PINMUX_IPSR_MSEL(IP14_27_24, MSIOF1_SS1_F, SEL_MSIOF1_5),
+
+ PINMUX_IPSR_GPSR(IP14_31_28, SSI_SDATA0),
+ PINMUX_IPSR_MSEL(IP14_31_28, MSIOF1_SS2_F, SEL_MSIOF1_5),
+
+ /* IPSR15 */
+ PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI_0),
+
+ PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI_0),
+ PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI_1),
+
+ PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK34),
+ PINMUX_IPSR_MSEL(IP15_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_MSEL(IP15_11_8, STP_OPWM_0_A, SEL_SSP1_0_0),
+
+ PINMUX_IPSR_GPSR(IP15_15_12, SSI_WS34),
+ PINMUX_IPSR_MSEL(IP15_15_12, HCTS2_N_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_MSEL(IP15_15_12, MSIOF1_SS2_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_MSEL(IP15_15_12, STP_IVCXO27_0_A, SEL_SSP1_0_0),
+
+ PINMUX_IPSR_GPSR(IP15_19_16, SSI_SDATA3),
+ PINMUX_IPSR_MSEL(IP15_19_16, HRTS2_N_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_MSEL(IP15_19_16, MSIOF1_TXD_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_MSEL(IP15_19_16, TS_SCK0_A, SEL_TSIF0_0),
+ PINMUX_IPSR_MSEL(IP15_19_16, STP_ISCLK_0_A, SEL_SSP1_0_0),
+ PINMUX_IPSR_MSEL(IP15_19_16, RIF0_D1_A, SEL_DRIF0_0),
+ PINMUX_IPSR_MSEL(IP15_19_16, RIF2_D0_A, SEL_DRIF2_0),
+
+ PINMUX_IPSR_GPSR(IP15_23_20, SSI_SCK4),
+ PINMUX_IPSR_MSEL(IP15_23_20, HRX2_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_MSEL(IP15_23_20, MSIOF1_SCK_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_MSEL(IP15_23_20, TS_SDAT0_A, SEL_TSIF0_0),
+ PINMUX_IPSR_MSEL(IP15_23_20, STP_ISD_0_A, SEL_SSP1_0_0),
+ PINMUX_IPSR_MSEL(IP15_23_20, RIF0_CLK_A, SEL_DRIF0_0),
+ PINMUX_IPSR_MSEL(IP15_23_20, RIF2_CLK_A, SEL_DRIF2_0),
+
+ PINMUX_IPSR_GPSR(IP15_27_24, SSI_WS4),
+ PINMUX_IPSR_MSEL(IP15_27_24, HTX2_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_MSEL(IP15_27_24, MSIOF1_SYNC_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_MSEL(IP15_27_24, TS_SDEN0_A, SEL_TSIF0_0),
+ PINMUX_IPSR_MSEL(IP15_27_24, STP_ISEN_0_A, SEL_SSP1_0_0),
+ PINMUX_IPSR_MSEL(IP15_27_24, RIF0_SYNC_A, SEL_DRIF0_0),
+ PINMUX_IPSR_MSEL(IP15_27_24, RIF2_SYNC_A, SEL_DRIF2_0),
+
+ PINMUX_IPSR_GPSR(IP15_31_28, SSI_SDATA4),
+ PINMUX_IPSR_MSEL(IP15_31_28, HSCK2_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_MSEL(IP15_31_28, MSIOF1_RXD_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_MSEL(IP15_31_28, TS_SPSYNC0_A, SEL_TSIF0_0),
+ PINMUX_IPSR_MSEL(IP15_31_28, STP_ISSYNC_0_A, SEL_SSP1_0_0),
+ PINMUX_IPSR_MSEL(IP15_31_28, RIF0_D0_A, SEL_DRIF0_0),
+ PINMUX_IPSR_MSEL(IP15_31_28, RIF2_D1_A, SEL_DRIF2_0),
+
+ /* IPSR16 */
+ PINMUX_IPSR_GPSR(IP16_3_0, SSI_SCK6),
+ PINMUX_IPSR_MSEL(IP16_3_0, SIM0_RST_D, SEL_SIMCARD_3),
+
+ PINMUX_IPSR_GPSR(IP16_7_4, SSI_WS6),
+ PINMUX_IPSR_MSEL(IP16_7_4, SIM0_D_D, SEL_SIMCARD_3),
+
+ PINMUX_IPSR_GPSR(IP16_11_8, SSI_SDATA6),
+ PINMUX_IPSR_MSEL(IP16_11_8, SIM0_CLK_D, SEL_SIMCARD_3),
+
+ PINMUX_IPSR_GPSR(IP16_15_12, SSI_SCK78),
+ PINMUX_IPSR_MSEL(IP16_15_12, HRX2_B, SEL_HSCIF2_1),
+ PINMUX_IPSR_MSEL(IP16_15_12, MSIOF1_SCK_C, SEL_MSIOF1_2),
+ PINMUX_IPSR_MSEL(IP16_15_12, TS_SCK1_A, SEL_TSIF1_0),
+ PINMUX_IPSR_MSEL(IP16_15_12, STP_ISCLK_1_A, SEL_SSP1_1_0),
+ PINMUX_IPSR_MSEL(IP16_15_12, RIF1_CLK_A, SEL_DRIF1_0),
+ PINMUX_IPSR_MSEL(IP16_15_12, RIF3_CLK_A, SEL_DRIF3_0),
+
+ PINMUX_IPSR_GPSR(IP16_19_16, SSI_WS78),
+ PINMUX_IPSR_MSEL(IP16_19_16, HTX2_B, SEL_HSCIF2_1),
+ PINMUX_IPSR_MSEL(IP16_19_16, MSIOF1_SYNC_C, SEL_MSIOF1_2),
+ PINMUX_IPSR_MSEL(IP16_19_16, TS_SDAT1_A, SEL_TSIF1_0),
+ PINMUX_IPSR_MSEL(IP16_19_16, STP_ISD_1_A, SEL_SSP1_1_0),
+ PINMUX_IPSR_MSEL(IP16_19_16, RIF1_SYNC_A, SEL_DRIF1_0),
+ PINMUX_IPSR_MSEL(IP16_19_16, RIF3_SYNC_A, SEL_DRIF3_0),
+
+ PINMUX_IPSR_GPSR(IP16_23_20, SSI_SDATA7),
+ PINMUX_IPSR_MSEL(IP16_23_20, HCTS2_N_B, SEL_HSCIF2_1),
+ PINMUX_IPSR_MSEL(IP16_23_20, MSIOF1_RXD_C, SEL_MSIOF1_2),
+ PINMUX_IPSR_MSEL(IP16_23_20, TS_SDEN1_A, SEL_TSIF1_0),
+ PINMUX_IPSR_MSEL(IP16_23_20, STP_ISEN_1_A, SEL_SSP1_1_0),
+ PINMUX_IPSR_MSEL(IP16_23_20, RIF1_D0_A, SEL_DRIF1_0),
+ PINMUX_IPSR_MSEL(IP16_23_20, RIF3_D0_A, SEL_DRIF3_0),
+ PINMUX_IPSR_MSEL(IP16_23_20, TCLK2_A, SEL_TIMER_TMU_0),
+
+ PINMUX_IPSR_GPSR(IP16_27_24, SSI_SDATA8),
+ PINMUX_IPSR_MSEL(IP16_27_24, HRTS2_N_B, SEL_HSCIF2_1),
+ PINMUX_IPSR_MSEL(IP16_27_24, MSIOF1_TXD_C, SEL_MSIOF1_2),
+ PINMUX_IPSR_MSEL(IP16_27_24, TS_SPSYNC1_A, SEL_TSIF1_0),
+ PINMUX_IPSR_MSEL(IP16_27_24, STP_ISSYNC_1_A, SEL_SSP1_1_0),
+ PINMUX_IPSR_MSEL(IP16_27_24, RIF1_D1_A, SEL_DRIF1_0),
+ PINMUX_IPSR_MSEL(IP16_27_24, RIF3_D1_A, SEL_DRIF3_0),
+
+ PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI_0),
+ PINMUX_IPSR_MSEL(IP16_31_28, HSCK2_B, SEL_HSCIF2_1),
+ PINMUX_IPSR_MSEL(IP16_31_28, MSIOF1_SS1_C, SEL_MSIOF1_2),
+ PINMUX_IPSR_MSEL(IP16_31_28, HSCK1_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI_1),
+ PINMUX_IPSR_GPSR(IP16_31_28, SCK1),
+ PINMUX_IPSR_MSEL(IP16_31_28, STP_IVCXO27_1_A, SEL_SSP1_1_0),
+ PINMUX_IPSR_GPSR(IP16_31_28, SCK5_A),
+
+ /* IPSR17 */
+ PINMUX_IPSR_MSEL(IP17_3_0, AUDIO_CLKA_A, SEL_ADG_A_0),
+ PINMUX_IPSR_GPSR(IP17_3_0, CC5_OSCOUT),
+
+ PINMUX_IPSR_MSEL(IP17_7_4, AUDIO_CLKB_B, SEL_ADG_B_1),
+ PINMUX_IPSR_MSEL(IP17_7_4, SCIF_CLK_A, SEL_SCIF1_0),
+ PINMUX_IPSR_MSEL(IP17_7_4, STP_IVCXO27_1_D, SEL_SSP1_1_3),
+ PINMUX_IPSR_MSEL(IP17_7_4, REMOCON_A, SEL_REMOCON_0),
+ PINMUX_IPSR_MSEL(IP17_7_4, TCLK1_A, SEL_TIMER_TMU_0),
+
+ PINMUX_IPSR_GPSR(IP17_11_8, USB0_PWEN),
+ PINMUX_IPSR_MSEL(IP17_11_8, SIM0_RST_C, SEL_SIMCARD_2),
+ PINMUX_IPSR_MSEL(IP17_11_8, TS_SCK1_D, SEL_TSIF1_3),
+ PINMUX_IPSR_MSEL(IP17_11_8, STP_ISCLK_1_D, SEL_SSP1_1_3),
+ PINMUX_IPSR_MSEL(IP17_11_8, BPFCLK_B, SEL_FM_1),
+ PINMUX_IPSR_MSEL(IP17_11_8, RIF3_CLK_B, SEL_DRIF3_1),
+ PINMUX_IPSR_MSEL(IP17_11_8, HSCK2_C, SEL_HSCIF2_2),
+
+ PINMUX_IPSR_GPSR(IP17_15_12, USB0_OVC),
+ PINMUX_IPSR_MSEL(IP17_15_12, SIM0_D_C, SEL_SIMCARD_2),
+ PINMUX_IPSR_MSEL(IP17_15_12, TS_SDAT1_D, SEL_TSIF1_3),
+ PINMUX_IPSR_MSEL(IP17_15_12, STP_ISD_1_D, SEL_SSP1_1_3),
+ PINMUX_IPSR_MSEL(IP17_15_12, RIF3_SYNC_B, SEL_DRIF3_1),
+ PINMUX_IPSR_MSEL(IP17_15_12, HRX2_C, SEL_HSCIF2_2),
+
+ PINMUX_IPSR_GPSR(IP17_19_16, USB1_PWEN),
+ PINMUX_IPSR_MSEL(IP17_19_16, SIM0_CLK_C, SEL_SIMCARD_2),
+ PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI_0),
+ PINMUX_IPSR_MSEL(IP17_19_16, TS_SCK0_E, SEL_TSIF0_4),
+ PINMUX_IPSR_MSEL(IP17_19_16, STP_ISCLK_0_E, SEL_SSP1_0_4),
+ PINMUX_IPSR_MSEL(IP17_19_16, FMCLK_B, SEL_FM_1),
+ PINMUX_IPSR_MSEL(IP17_19_16, RIF2_CLK_B, SEL_DRIF2_1),
+ PINMUX_IPSR_MSEL(IP17_19_16, SPEEDIN_A, SEL_SPEED_PULSE_0),
+ PINMUX_IPSR_MSEL(IP17_19_16, HTX2_C, SEL_HSCIF2_2),
+
+ PINMUX_IPSR_GPSR(IP17_23_20, USB1_OVC),
+ PINMUX_IPSR_MSEL(IP17_23_20, MSIOF1_SS2_C, SEL_MSIOF1_2),
+ PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI_0),
+ PINMUX_IPSR_MSEL(IP17_23_20, TS_SDAT0_E, SEL_TSIF0_4),
+ PINMUX_IPSR_MSEL(IP17_23_20, STP_ISD_0_E, SEL_SSP1_0_4),
+ PINMUX_IPSR_MSEL(IP17_23_20, FMIN_B, SEL_FM_1),
+ PINMUX_IPSR_MSEL(IP17_23_20, RIF2_SYNC_B, SEL_DRIF2_1),
+ PINMUX_IPSR_MSEL(IP17_23_20, REMOCON_B, SEL_REMOCON_1),
+ PINMUX_IPSR_MSEL(IP17_23_20, HCTS2_N_C, SEL_HSCIF2_2),
+
+ PINMUX_IPSR_GPSR(IP17_27_24, USB30_PWEN),
+ PINMUX_IPSR_GPSR(IP17_27_24, AUDIO_CLKOUT_B),
+ PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI_1),
+ PINMUX_IPSR_MSEL(IP17_27_24, TS_SDEN1_D, SEL_TSIF1_3),
+ PINMUX_IPSR_MSEL(IP17_27_24, STP_ISEN_1_D, SEL_SSP1_1_2),
+ PINMUX_IPSR_MSEL(IP17_27_24, STP_OPWM_0_E, SEL_SSP1_0_4),
+ PINMUX_IPSR_MSEL(IP17_27_24, RIF3_D0_B, SEL_DRIF3_1),
+ PINMUX_IPSR_MSEL(IP17_27_24, TCLK2_B, SEL_TIMER_TMU_1),
+ PINMUX_IPSR_GPSR(IP17_27_24, TPU0TO0),
+ PINMUX_IPSR_MSEL(IP17_27_24, BPFCLK_C, SEL_FM_2),
+ PINMUX_IPSR_MSEL(IP17_27_24, HRTS2_N_C, SEL_HSCIF2_2),
+
+ PINMUX_IPSR_GPSR(IP17_31_28, USB30_OVC),
+ PINMUX_IPSR_GPSR(IP17_31_28, AUDIO_CLKOUT1_B),
+ PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI_1),
+ PINMUX_IPSR_MSEL(IP17_31_28, TS_SPSYNC1_D, SEL_TSIF1_3),
+ PINMUX_IPSR_MSEL(IP17_31_28, STP_ISSYNC_1_D, SEL_SSP1_1_3),
+ PINMUX_IPSR_MSEL(IP17_31_28, STP_IVCXO27_0_E, SEL_SSP1_0_4),
+ PINMUX_IPSR_MSEL(IP17_31_28, RIF3_D1_B, SEL_DRIF3_1),
+ PINMUX_IPSR_MSEL(IP17_31_28, FSO_TOE_B, SEL_FSO_1),
+ PINMUX_IPSR_GPSR(IP17_31_28, TPU0TO1),
+
+ /* IPSR18 */
+ PINMUX_IPSR_GPSR(IP18_3_0, GP6_30),
+ PINMUX_IPSR_GPSR(IP18_3_0, AUDIO_CLKOUT2_B),
+ PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI_1),
+ PINMUX_IPSR_MSEL(IP18_3_0, TS_SDEN0_E, SEL_TSIF0_4),
+ PINMUX_IPSR_MSEL(IP18_3_0, STP_ISEN_0_E, SEL_SSP1_0_4),
+ PINMUX_IPSR_MSEL(IP18_3_0, RIF2_D0_B, SEL_DRIF2_1),
+ PINMUX_IPSR_GPSR(IP18_3_0, TPU0TO2),
+ PINMUX_IPSR_MSEL(IP18_3_0, FSO_CFE_0_A, SEL_FSO_0),
+ PINMUX_IPSR_MSEL(IP18_3_0, FMCLK_C, SEL_FM_2),
+ PINMUX_IPSR_MSEL(IP18_3_0, FMCLK_D, SEL_FM_3),
+
+ PINMUX_IPSR_GPSR(IP18_7_4, GP6_31),
+ PINMUX_IPSR_GPSR(IP18_7_4, AUDIO_CLKOUT3_B),
+ PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI_1),
+ PINMUX_IPSR_MSEL(IP18_7_4, TS_SPSYNC0_E, SEL_TSIF0_4),
+ PINMUX_IPSR_MSEL(IP18_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4),
+ PINMUX_IPSR_MSEL(IP18_7_4, RIF2_D1_B, SEL_DRIF2_1),
+ PINMUX_IPSR_GPSR(IP18_7_4, TPU0TO3),
+ PINMUX_IPSR_MSEL(IP18_7_4, FSO_CFE_1_A, SEL_FSO_0),
+ PINMUX_IPSR_MSEL(IP18_7_4, FMIN_C, SEL_FM_2),
+ PINMUX_IPSR_MSEL(IP18_7_4, FMIN_D, SEL_FM_3),
+
+ /* I2C */
+ PINMUX_IPSR_NOGP(0, I2C_SEL_0_1),
+ PINMUX_IPSR_NOGP(0, I2C_SEL_3_1),
+ PINMUX_IPSR_NOGP(0, I2C_SEL_5_1),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int scif0_clk_mux[] = {
+ SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+ RTS0_N_TANS_MARK, CTS0_N_MARK,
+};
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int scif1_data_a_mux[] = {
+ RX1_A_MARK, TX1_A_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int scif1_clk_mux[] = {
+ SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+static const unsigned int scif1_ctrl_mux[] = {
+ RTS1_N_TANS_MARK, CTS1_N_MARK,
+};
+
+static const unsigned int scif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 25),
+};
+static const unsigned int scif1_data_b_mux[] = {
+ RX1_B_MARK, TX1_B_MARK,
+};
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int scif2_data_a_mux[] = {
+ RX2_A_MARK, TX2_A_MARK,
+};
+static const unsigned int scif2_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 9),
+};
+static const unsigned int scif2_clk_mux[] = {
+ SCK2_MARK,
+};
+static const unsigned int scif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int scif2_data_b_mux[] = {
+ RX2_B_MARK, TX2_B_MARK,
+};
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int scif3_data_a_mux[] = {
+ RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int scif3_clk_mux[] = {
+ SCK3_MARK,
+};
+static const unsigned int scif3_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int scif3_ctrl_mux[] = {
+ RTS3_N_TANS_MARK, CTS3_N_MARK,
+};
+static const unsigned int scif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int scif3_data_b_mux[] = {
+ RX3_B_MARK, TX3_B_MARK,
+};
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 12),
+};
+static const unsigned int scif4_data_a_mux[] = {
+ RX4_A_MARK, TX4_A_MARK,
+};
+static const unsigned int scif4_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 10),
+};
+static const unsigned int scif4_clk_a_mux[] = {
+ SCK4_A_MARK,
+};
+static const unsigned int scif4_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
+};
+static const unsigned int scif4_ctrl_a_mux[] = {
+ RTS4_N_TANS_A_MARK, CTS4_N_A_MARK,
+};
+static const unsigned int scif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int scif4_data_b_mux[] = {
+ RX4_B_MARK, TX4_B_MARK,
+};
+static const unsigned int scif4_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 5),
+};
+static const unsigned int scif4_clk_b_mux[] = {
+ SCK4_B_MARK,
+};
+static const unsigned int scif4_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int scif4_ctrl_b_mux[] = {
+ RTS4_N_TANS_B_MARK, CTS4_N_B_MARK,
+};
+static const unsigned int scif4_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+};
+static const unsigned int scif4_data_c_mux[] = {
+ RX4_C_MARK, TX4_C_MARK,
+};
+static const unsigned int scif4_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 8),
+};
+static const unsigned int scif4_clk_c_mux[] = {
+ SCK4_C_MARK,
+};
+static const unsigned int scif4_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+};
+static const unsigned int scif4_ctrl_c_mux[] = {
+ RTS4_N_TANS_C_MARK, CTS4_N_C_MARK,
+};
+/* - SCIF5 ------------------------------------------------------------------ */
+static const unsigned int scif5_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int scif5_data_a_mux[] = {
+ RX5_A_MARK, TX5_A_MARK,
+};
+static const unsigned int scif5_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int scif5_clk_a_mux[] = {
+ SCK5_A_MARK,
+};
+
+static const unsigned int scif5_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 18),
+};
+static const unsigned int scif5_data_b_mux[] = {
+ RX5_B_MARK, TX5_B_MARK,
+};
+static const unsigned int scif5_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int scif5_clk_b_mux[] = {
+ SCK5_B_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_a_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(6, 23),
+};
+static const unsigned int scif_clk_a_mux[] = {
+ SCIF_CLK_A_MARK,
+};
+static const unsigned int scif_clk_b_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(5, 9),
+};
+static const unsigned int scif_clk_b_mux[] = {
+ SCIF_CLK_B_MARK,
+};
+
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 2),
+};
+static const unsigned int sdhi0_data1_mux[] = {
+ SD0_DAT0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+};
+static const unsigned int sdhi0_data4_mux[] = {
+ SD0_DAT0_MARK, SD0_DAT1_MARK,
+ SD0_DAT2_MARK, SD0_DAT3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SD0_CLK_MARK, SD0_CMD_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int sdhi0_cd_mux[] = {
+ SD0_CD_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 13),
+};
+static const unsigned int sdhi0_wp_mux[] = {
+ SD0_WP_MARK,
+};
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 8),
+};
+static const unsigned int sdhi1_data1_mux[] = {
+ SD1_DAT0_MARK,
+};
+static const unsigned int sdhi1_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi1_data4_mux[] = {
+ SD1_DAT0_MARK, SD1_DAT1_MARK,
+ SD1_DAT2_MARK, SD1_DAT3_MARK,
+};
+static const unsigned int sdhi1_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+};
+static const unsigned int sdhi1_ctrl_mux[] = {
+ SD1_CLK_MARK, SD1_CMD_MARK,
+};
+static const unsigned int sdhi1_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 14),
+};
+static const unsigned int sdhi1_cd_mux[] = {
+ SD1_CD_MARK,
+};
+static const unsigned int sdhi1_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 15),
+};
+static const unsigned int sdhi1_wp_mux[] = {
+ SD1_WP_MARK,
+};
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 2),
+};
+static const unsigned int sdhi2_data1_mux[] = {
+ SD2_DAT0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+};
+static const unsigned int sdhi2_data4_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+};
+static const unsigned int sdhi2_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi2_data8_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+ SD2_DAT4_MARK, SD2_DAT5_MARK,
+ SD2_DAT6_MARK, SD2_DAT7_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SD2_CLK_MARK, SD2_CMD_MARK,
+};
+static const unsigned int sdhi2_cd_a_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 13),
+};
+static const unsigned int sdhi2_cd_a_mux[] = {
+ SD2_CD_A_MARK,
+};
+static const unsigned int sdhi2_cd_b_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(5, 10),
+};
+static const unsigned int sdhi2_cd_b_mux[] = {
+ SD2_CD_B_MARK,
+};
+static const unsigned int sdhi2_wp_a_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 14),
+};
+static const unsigned int sdhi2_wp_a_mux[] = {
+ SD2_WP_A_MARK,
+};
+static const unsigned int sdhi2_wp_b_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(5, 11),
+};
+static const unsigned int sdhi2_wp_b_mux[] = {
+ SD2_WP_B_MARK,
+};
+static const unsigned int sdhi2_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int sdhi2_ds_mux[] = {
+ SD2_DS_MARK,
+};
+/* - SDHI3 ------------------------------------------------------------------ */
+static const unsigned int sdhi3_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 9),
+};
+static const unsigned int sdhi3_data1_mux[] = {
+ SD3_DAT0_MARK,
+};
+static const unsigned int sdhi3_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int sdhi3_data4_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+};
+static const unsigned int sdhi3_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_data8_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+ SD3_DAT4_MARK, SD3_DAT5_MARK,
+ SD3_DAT6_MARK, SD3_DAT7_MARK,
+};
+static const unsigned int sdhi3_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
+};
+static const unsigned int sdhi3_ctrl_mux[] = {
+ SD3_CLK_MARK, SD3_CMD_MARK,
+};
+static const unsigned int sdhi3_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 15),
+};
+static const unsigned int sdhi3_cd_mux[] = {
+ SD3_CD_MARK,
+};
+static const unsigned int sdhi3_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_wp_mux[] = {
+ SD3_WP_MARK,
+};
+static const unsigned int sdhi3_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 17),
+};
+static const unsigned int sdhi3_ds_mux[] = {
+ SD3_DS_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif2_data_a),
+ SH_PFC_PIN_GROUP(scif2_clk),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_ctrl),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif4_data_a),
+ SH_PFC_PIN_GROUP(scif4_clk_a),
+ SH_PFC_PIN_GROUP(scif4_ctrl_a),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_clk_b),
+ SH_PFC_PIN_GROUP(scif4_ctrl_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_clk_c),
+ SH_PFC_PIN_GROUP(scif4_ctrl_c),
+ SH_PFC_PIN_GROUP(scif5_data_a),
+ SH_PFC_PIN_GROUP(scif5_clk_a),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_clk_b),
+ SH_PFC_PIN_GROUP(scif_clk_a),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_data8),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_a),
+ SH_PFC_PIN_GROUP(sdhi2_wp_a),
+ SH_PFC_PIN_GROUP(sdhi2_cd_b),
+ SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(sdhi2_ds),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_data8),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(sdhi3_ds),
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data",
+ "scif0_clk",
+ "scif0_ctrl",
+};
+
+static const char * const scif1_groups[] = {
+ "scif1_data_a",
+ "scif1_clk",
+ "scif1_ctrl",
+ "scif1_data_b",
+};
+
+static const char * const scif2_groups[] = {
+ "scif2_data_a",
+ "scif2_clk",
+ "scif2_data_b",
+};
+
+static const char * const scif3_groups[] = {
+ "scif3_data_a",
+ "scif3_clk",
+ "scif3_ctrl",
+ "scif3_data_b",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data_a",
+ "scif4_clk_a",
+ "scif4_ctrl_a",
+ "scif4_data_b",
+ "scif4_clk_b",
+ "scif4_ctrl_b",
+ "scif4_data_c",
+ "scif4_clk_c",
+ "scif4_ctrl_c",
+};
+
+static const char * const scif5_groups[] = {
+ "scif5_data_a",
+ "scif5_clk_a",
+ "scif5_data_b",
+ "scif5_clk_b",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk_a",
+ "scif_clk_b",
+};
+
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+ "sdhi1_data1",
+ "sdhi1_data4",
+ "sdhi1_ctrl",
+ "sdhi1_cd",
+ "sdhi1_wp",
+};
+
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_data8",
+ "sdhi2_ctrl",
+ "sdhi2_cd_a",
+ "sdhi2_wp_a",
+ "sdhi2_cd_b",
+ "sdhi2_wp_b",
+ "sdhi2_ds",
+};
+
+static const char * const sdhi3_groups[] = {
+ "sdhi3_data1",
+ "sdhi3_data4",
+ "sdhi3_data8",
+ "sdhi3_ctrl",
+ "sdhi3_cd",
+ "sdhi3_wp",
+ "sdhi3_ds",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y) FN_##y
+#define FM(x) FN_##x
+ { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_0_15_FN, GPSR0_15,
+ GP_0_14_FN, GPSR0_14,
+ GP_0_13_FN, GPSR0_13,
+ GP_0_12_FN, GPSR0_12,
+ GP_0_11_FN, GPSR0_11,
+ GP_0_10_FN, GPSR0_10,
+ GP_0_9_FN, GPSR0_9,
+ GP_0_8_FN, GPSR0_8,
+ GP_0_7_FN, GPSR0_7,
+ GP_0_6_FN, GPSR0_6,
+ GP_0_5_FN, GPSR0_5,
+ GP_0_4_FN, GPSR0_4,
+ GP_0_3_FN, GPSR0_3,
+ GP_0_2_FN, GPSR0_2,
+ GP_0_1_FN, GPSR0_1,
+ GP_0_0_FN, GPSR0_0, }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_1_28_FN, GPSR1_28,
+ GP_1_27_FN, GPSR1_27,
+ GP_1_26_FN, GPSR1_26,
+ GP_1_25_FN, GPSR1_25,
+ GP_1_24_FN, GPSR1_24,
+ GP_1_23_FN, GPSR1_23,
+ GP_1_22_FN, GPSR1_22,
+ GP_1_21_FN, GPSR1_21,
+ GP_1_20_FN, GPSR1_20,
+ GP_1_19_FN, GPSR1_19,
+ GP_1_18_FN, GPSR1_18,
+ GP_1_17_FN, GPSR1_17,
+ GP_1_16_FN, GPSR1_16,
+ GP_1_15_FN, GPSR1_15,
+ GP_1_14_FN, GPSR1_14,
+ GP_1_13_FN, GPSR1_13,
+ GP_1_12_FN, GPSR1_12,
+ GP_1_11_FN, GPSR1_11,
+ GP_1_10_FN, GPSR1_10,
+ GP_1_9_FN, GPSR1_9,
+ GP_1_8_FN, GPSR1_8,
+ GP_1_7_FN, GPSR1_7,
+ GP_1_6_FN, GPSR1_6,
+ GP_1_5_FN, GPSR1_5,
+ GP_1_4_FN, GPSR1_4,
+ GP_1_3_FN, GPSR1_3,
+ GP_1_2_FN, GPSR1_2,
+ GP_1_1_FN, GPSR1_1,
+ GP_1_0_FN, GPSR1_0, }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_2_14_FN, GPSR2_14,
+ GP_2_13_FN, GPSR2_13,
+ GP_2_12_FN, GPSR2_12,
+ GP_2_11_FN, GPSR2_11,
+ GP_2_10_FN, GPSR2_10,
+ GP_2_9_FN, GPSR2_9,
+ GP_2_8_FN, GPSR2_8,
+ GP_2_7_FN, GPSR2_7,
+ GP_2_6_FN, GPSR2_6,
+ GP_2_5_FN, GPSR2_5,
+ GP_2_4_FN, GPSR2_4,
+ GP_2_3_FN, GPSR2_3,
+ GP_2_2_FN, GPSR2_2,
+ GP_2_1_FN, GPSR2_1,
+ GP_2_0_FN, GPSR2_0, }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_3_15_FN, GPSR3_15,
+ GP_3_14_FN, GPSR3_14,
+ GP_3_13_FN, GPSR3_13,
+ GP_3_12_FN, GPSR3_12,
+ GP_3_11_FN, GPSR3_11,
+ GP_3_10_FN, GPSR3_10,
+ GP_3_9_FN, GPSR3_9,
+ GP_3_8_FN, GPSR3_8,
+ GP_3_7_FN, GPSR3_7,
+ GP_3_6_FN, GPSR3_6,
+ GP_3_5_FN, GPSR3_5,
+ GP_3_4_FN, GPSR3_4,
+ GP_3_3_FN, GPSR3_3,
+ GP_3_2_FN, GPSR3_2,
+ GP_3_1_FN, GPSR3_1,
+ GP_3_0_FN, GPSR3_0, }
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_4_17_FN, GPSR4_17,
+ GP_4_16_FN, GPSR4_16,
+ GP_4_15_FN, GPSR4_15,
+ GP_4_14_FN, GPSR4_14,
+ GP_4_13_FN, GPSR4_13,
+ GP_4_12_FN, GPSR4_12,
+ GP_4_11_FN, GPSR4_11,
+ GP_4_10_FN, GPSR4_10,
+ GP_4_9_FN, GPSR4_9,
+ GP_4_8_FN, GPSR4_8,
+ GP_4_7_FN, GPSR4_7,
+ GP_4_6_FN, GPSR4_6,
+ GP_4_5_FN, GPSR4_5,
+ GP_4_4_FN, GPSR4_4,
+ GP_4_3_FN, GPSR4_3,
+ GP_4_2_FN, GPSR4_2,
+ GP_4_1_FN, GPSR4_1,
+ GP_4_0_FN, GPSR4_0, }
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_5_25_FN, GPSR5_25,
+ GP_5_24_FN, GPSR5_24,
+ GP_5_23_FN, GPSR5_23,
+ GP_5_22_FN, GPSR5_22,
+ GP_5_21_FN, GPSR5_21,
+ GP_5_20_FN, GPSR5_20,
+ GP_5_19_FN, GPSR5_19,
+ GP_5_18_FN, GPSR5_18,
+ GP_5_17_FN, GPSR5_17,
+ GP_5_16_FN, GPSR5_16,
+ GP_5_15_FN, GPSR5_15,
+ GP_5_14_FN, GPSR5_14,
+ GP_5_13_FN, GPSR5_13,
+ GP_5_12_FN, GPSR5_12,
+ GP_5_11_FN, GPSR5_11,
+ GP_5_10_FN, GPSR5_10,
+ GP_5_9_FN, GPSR5_9,
+ GP_5_8_FN, GPSR5_8,
+ GP_5_7_FN, GPSR5_7,
+ GP_5_6_FN, GPSR5_6,
+ GP_5_5_FN, GPSR5_5,
+ GP_5_4_FN, GPSR5_4,
+ GP_5_3_FN, GPSR5_3,
+ GP_5_2_FN, GPSR5_2,
+ GP_5_1_FN, GPSR5_1,
+ GP_5_0_FN, GPSR5_0, }
+ },
+ { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1) {
+ GP_6_31_FN, GPSR6_31,
+ GP_6_30_FN, GPSR6_30,
+ GP_6_29_FN, GPSR6_29,
+ GP_6_28_FN, GPSR6_28,
+ GP_6_27_FN, GPSR6_27,
+ GP_6_26_FN, GPSR6_26,
+ GP_6_25_FN, GPSR6_25,
+ GP_6_24_FN, GPSR6_24,
+ GP_6_23_FN, GPSR6_23,
+ GP_6_22_FN, GPSR6_22,
+ GP_6_21_FN, GPSR6_21,
+ GP_6_20_FN, GPSR6_20,
+ GP_6_19_FN, GPSR6_19,
+ GP_6_18_FN, GPSR6_18,
+ GP_6_17_FN, GPSR6_17,
+ GP_6_16_FN, GPSR6_16,
+ GP_6_15_FN, GPSR6_15,
+ GP_6_14_FN, GPSR6_14,
+ GP_6_13_FN, GPSR6_13,
+ GP_6_12_FN, GPSR6_12,
+ GP_6_11_FN, GPSR6_11,
+ GP_6_10_FN, GPSR6_10,
+ GP_6_9_FN, GPSR6_9,
+ GP_6_8_FN, GPSR6_8,
+ GP_6_7_FN, GPSR6_7,
+ GP_6_6_FN, GPSR6_6,
+ GP_6_5_FN, GPSR6_5,
+ GP_6_4_FN, GPSR6_4,
+ GP_6_3_FN, GPSR6_3,
+ GP_6_2_FN, GPSR6_2,
+ GP_6_1_FN, GPSR6_1,
+ GP_6_0_FN, GPSR6_0, }
+ },
+ { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_7_3_FN, GPSR7_3,
+ GP_7_2_FN, GPSR7_2,
+ GP_7_1_FN, GPSR7_1,
+ GP_7_0_FN, GPSR7_0, }
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+ IP0_31_28
+ IP0_27_24
+ IP0_23_20
+ IP0_19_16
+ IP0_15_12
+ IP0_11_8
+ IP0_7_4
+ IP0_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+ IP1_31_28
+ IP1_27_24
+ IP1_23_20
+ IP1_19_16
+ IP1_15_12
+ IP1_11_8
+ IP1_7_4
+ IP1_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+ IP2_31_28
+ IP2_27_24
+ IP2_23_20
+ IP2_19_16
+ IP2_15_12
+ IP2_11_8
+ IP2_7_4
+ IP2_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+ IP3_31_28
+ IP3_27_24
+ IP3_23_20
+ IP3_19_16
+ IP3_15_12
+ IP3_11_8
+ IP3_7_4
+ IP3_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+ IP4_31_28
+ IP4_27_24
+ IP4_23_20
+ IP4_19_16
+ IP4_15_12
+ IP4_11_8
+ IP4_7_4
+ IP4_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+ IP5_31_28
+ IP5_27_24
+ IP5_23_20
+ IP5_19_16
+ IP5_15_12
+ IP5_11_8
+ IP5_7_4
+ IP5_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+ IP6_31_28
+ IP6_27_24
+ IP6_23_20
+ IP6_19_16
+ IP6_15_12
+ IP6_11_8
+ IP6_7_4
+ IP6_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+ IP7_31_28
+ IP7_27_24
+ IP7_23_20
+ IP7_19_16
+ IP7_15_12
+ IP7_11_8
+ IP7_7_4
+ IP7_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+ IP8_31_28
+ IP8_27_24
+ IP8_23_20
+ IP8_19_16
+ IP8_15_12
+ IP8_11_8
+ IP8_7_4
+ IP8_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
+ IP9_31_28
+ IP9_27_24
+ IP9_23_20
+ IP9_19_16
+ IP9_15_12
+ IP9_11_8
+ IP9_7_4
+ IP9_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
+ IP10_31_28
+ IP10_27_24
+ IP10_23_20
+ IP10_19_16
+ IP10_15_12
+ IP10_11_8
+ IP10_7_4
+ IP10_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4) {
+ IP11_31_28
+ IP11_27_24
+ IP11_23_20
+ IP11_19_16
+ IP11_15_12
+ IP11_11_8
+ IP11_7_4
+ IP11_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4) {
+ IP12_31_28
+ IP12_27_24
+ IP12_23_20
+ IP12_19_16
+ IP12_15_12
+ IP12_11_8
+ IP12_7_4
+ IP12_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4) {
+ IP13_31_28
+ IP13_27_24
+ IP13_23_20
+ IP13_19_16
+ IP13_15_12
+ IP13_11_8
+ IP13_7_4
+ IP13_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4) {
+ IP14_31_28
+ IP14_27_24
+ IP14_23_20
+ IP14_19_16
+ IP14_15_12
+ IP14_11_8
+ IP14_7_4
+ IP14_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4) {
+ IP15_31_28
+ IP15_27_24
+ IP15_23_20
+ IP15_19_16
+ IP15_15_12
+ IP15_11_8
+ IP15_7_4
+ IP15_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4) {
+ IP16_31_28
+ IP16_27_24
+ IP16_23_20
+ IP16_19_16
+ IP16_15_12
+ IP16_11_8
+ IP16_7_4
+ IP16_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4) {
+ IP17_31_28
+ IP17_27_24
+ IP17_23_20
+ IP17_19_16
+ IP17_15_12
+ IP17_11_8
+ IP17_7_4
+ IP17_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4) {
+ /* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP18_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP18_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ IP18_7_4
+ IP18_3_0 }
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
+ 3, 2, 3, 1, 1, 1, 1, 1, 2, 1,
+ 1, 2, 1, 1, 1, 2, 2, 1, 2, 3) {
+ MOD_SEL0_31_30_29
+ MOD_SEL0_28_27
+ MOD_SEL0_26_25_24
+ MOD_SEL0_23
+ MOD_SEL0_22
+ MOD_SEL0_21
+ MOD_SEL0_20
+ MOD_SEL0_19
+ MOD_SEL0_18_17
+ MOD_SEL0_16
+ MOD_SEL0_15
+ MOD_SEL0_14_13
+ MOD_SEL0_12
+ MOD_SEL0_11
+ MOD_SEL0_10
+ MOD_SEL0_9_8
+ MOD_SEL0_7_6
+ MOD_SEL0_5
+ MOD_SEL0_4_3
+ /* RESERVED 2, 1, 0 */
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
+ 2, 3, 1, 2, 3, 1, 1, 2, 1,
+ 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1) {
+ MOD_SEL1_31_30
+ MOD_SEL1_29_28_27
+ MOD_SEL1_26
+ MOD_SEL1_25_24
+ MOD_SEL1_23_22_21
+ MOD_SEL1_20
+ MOD_SEL1_19
+ MOD_SEL1_18_17
+ MOD_SEL1_16
+ MOD_SEL1_15_14
+ MOD_SEL1_13
+ MOD_SEL1_12
+ MOD_SEL1_11
+ MOD_SEL1_10
+ MOD_SEL1_9
+ 0, 0, 0, 0, /* RESERVED 8, 7 */
+ MOD_SEL1_6
+ MOD_SEL1_5
+ MOD_SEL1_4
+ MOD_SEL1_3
+ MOD_SEL1_2
+ MOD_SEL1_1
+ MOD_SEL1_0 }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
+ 1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1, 1,
+ 4, 4, 4, 3, 1) {
+ MOD_SEL2_31
+ MOD_SEL2_30
+ MOD_SEL2_29
+ MOD_SEL2_28_27
+ MOD_SEL2_26
+ MOD_SEL2_25_24_23
+ MOD_SEL2_22
+ MOD_SEL2_21
+ MOD_SEL2_20
+ MOD_SEL2_19
+ MOD_SEL2_18
+ MOD_SEL2_17
+ /* RESERVED 16 */
+ 0, 0,
+ /* RESERVED 15, 14, 13, 12 */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 11, 10, 9, 8 */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 7, 6, 5, 4 */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* RESERVED 3, 2, 1 */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ MOD_SEL2_0 }
+ },
+ { },
+};
+
+static int r8a7796_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
+{
+ int bit = -EINVAL;
+
+ *pocctrl = 0xe6060380;
+
+ if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
+ bit = pin & 0x1f;
+
+ if (pin >= RCAR_GP_PIN(4, 0) && pin <= RCAR_GP_PIN(4, 17))
+ bit = (pin & 0x1f) + 12;
+
+ return bit;
+}
+
+static const struct sh_pfc_soc_operations r8a7796_pinmux_ops = {
+ .pin_to_pocctrl = r8a7796_pin_to_pocctrl,
+};
+
+const struct sh_pfc_soc_info r8a7796_pinmux_info = {
+ .name = "r8a77960_pfc",
+ .ops = &r8a7796_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index e208ee04a9f4..c5772584594c 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -596,6 +596,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
struct sh_pfc *pfc = pmx->pfc;
enum pin_config_param param = pinconf_to_config_param(*config);
unsigned long flags;
+ unsigned int arg;
if (!sh_pfc_pinconf_validate(pfc, _pin, param))
return -ENOTSUPP;
@@ -616,7 +617,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
if (bias != param)
return -EINVAL;
- *config = 0;
+ arg = 0;
break;
}
@@ -627,7 +628,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
if (ret < 0)
return ret;
- *config = ret;
+ arg = ret;
break;
}
@@ -646,7 +647,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
val = sh_pfc_read_reg(pfc, pocctrl, 32);
spin_unlock_irqrestore(&pfc->lock, flags);
- *config = (val & BIT(bit)) ? 3300 : 1800;
+ arg = (val & BIT(bit)) ? 3300 : 1800;
break;
}
@@ -654,6 +655,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
return -ENOTSUPP;
}
+ *config = pinconf_to_config_packed(param, arg);
return 0;
}
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 5e966c09434d..2345421103db 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -257,9 +257,11 @@ extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
extern const struct sh_pfc_soc_info r8a7790_pinmux_info;
extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7792_pinmux_info;
extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7796_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
extern const struct sh_pfc_soc_info sh7269_pinmux_info;
@@ -354,72 +356,103 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
* GP port style (32 ports banks)
*/
-#define PORT_GP_CFG_1(bank, pin, fn, sfx, cfg) fn(bank, pin, GP_##bank##_##pin, sfx, cfg)
+#define PORT_GP_CFG_1(bank, pin, fn, sfx, cfg) \
+ fn(bank, pin, GP_##bank##_##pin, sfx, cfg)
#define PORT_GP_1(bank, pin, fn, sfx) PORT_GP_CFG_1(bank, pin, fn, sfx, 0)
-#define PORT_GP_CFG_4(bank, fn, sfx, cfg) \
- PORT_GP_CFG_1(bank, 0, fn, sfx, cfg), PORT_GP_CFG_1(bank, 1, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 2, fn, sfx, cfg), PORT_GP_CFG_1(bank, 3, fn, sfx, cfg)
+#define PORT_GP_CFG_4(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_1(bank, 0, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 1, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 2, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 3, fn, sfx, cfg)
#define PORT_GP_4(bank, fn, sfx) PORT_GP_CFG_4(bank, fn, sfx, 0)
-#define PORT_GP_CFG_8(bank, fn, sfx, cfg) \
- PORT_GP_CFG_4(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 4, fn, sfx, cfg), PORT_GP_CFG_1(bank, 5, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 6, fn, sfx, cfg), PORT_GP_CFG_1(bank, 7, fn, sfx, cfg)
+#define PORT_GP_CFG_8(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_4(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 4, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 5, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 6, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 7, fn, sfx, cfg)
#define PORT_GP_8(bank, fn, sfx) PORT_GP_CFG_8(bank, fn, sfx, 0)
-#define PORT_GP_CFG_9(bank, fn, sfx, cfg) \
- PORT_GP_CFG_8(bank, fn, sfx, cfg), \
+#define PORT_GP_CFG_9(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_8(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 8, fn, sfx, cfg)
#define PORT_GP_9(bank, fn, sfx) PORT_GP_CFG_9(bank, fn, sfx, 0)
-#define PORT_GP_CFG_12(bank, fn, sfx, cfg) \
- PORT_GP_CFG_8(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 8, fn, sfx, cfg), PORT_GP_CFG_1(bank, 9, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 10, fn, sfx, cfg), PORT_GP_CFG_1(bank, 11, fn, sfx, cfg)
+#define PORT_GP_CFG_12(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_9(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 9, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 10, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 11, fn, sfx, cfg)
#define PORT_GP_12(bank, fn, sfx) PORT_GP_CFG_12(bank, fn, sfx, 0)
-#define PORT_GP_CFG_14(bank, fn, sfx, cfg) \
- PORT_GP_CFG_12(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 12, fn, sfx, cfg), PORT_GP_CFG_1(bank, 13, fn, sfx, cfg)
+#define PORT_GP_CFG_14(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_12(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 12, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 13, fn, sfx, cfg)
#define PORT_GP_14(bank, fn, sfx) PORT_GP_CFG_14(bank, fn, sfx, 0)
-#define PORT_GP_CFG_15(bank, fn, sfx, cfg) \
- PORT_GP_CFG_14(bank, fn, sfx, cfg), \
+#define PORT_GP_CFG_15(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_14(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 14, fn, sfx, cfg)
#define PORT_GP_15(bank, fn, sfx) PORT_GP_CFG_15(bank, fn, sfx, 0)
-#define PORT_GP_CFG_16(bank, fn, sfx, cfg) \
- PORT_GP_CFG_14(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 14, fn, sfx, cfg), PORT_GP_CFG_1(bank, 15, fn, sfx, cfg)
+#define PORT_GP_CFG_16(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_15(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 15, fn, sfx, cfg)
#define PORT_GP_16(bank, fn, sfx) PORT_GP_CFG_16(bank, fn, sfx, 0)
-#define PORT_GP_CFG_18(bank, fn, sfx, cfg) \
- PORT_GP_CFG_16(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 16, fn, sfx, cfg), PORT_GP_CFG_1(bank, 17, fn, sfx, cfg)
+#define PORT_GP_CFG_17(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_16(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 16, fn, sfx, cfg)
+#define PORT_GP_17(bank, fn, sfx) PORT_GP_CFG_17(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_18(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_17(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 17, fn, sfx, cfg)
#define PORT_GP_18(bank, fn, sfx) PORT_GP_CFG_18(bank, fn, sfx, 0)
-#define PORT_GP_CFG_26(bank, fn, sfx, cfg) \
- PORT_GP_CFG_18(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 18, fn, sfx, cfg), PORT_GP_CFG_1(bank, 19, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 20, fn, sfx, cfg), PORT_GP_CFG_1(bank, 21, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 22, fn, sfx, cfg), PORT_GP_CFG_1(bank, 23, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 24, fn, sfx, cfg), PORT_GP_CFG_1(bank, 25, fn, sfx, cfg)
+#define PORT_GP_CFG_23(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_18(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 18, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 19, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 20, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 21, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 22, fn, sfx, cfg)
+#define PORT_GP_23(bank, fn, sfx) PORT_GP_CFG_23(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_24(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_23(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 23, fn, sfx, cfg)
+#define PORT_GP_24(bank, fn, sfx) PORT_GP_CFG_24(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_26(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_24(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 24, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 25, fn, sfx, cfg)
#define PORT_GP_26(bank, fn, sfx) PORT_GP_CFG_26(bank, fn, sfx, 0)
-#define PORT_GP_CFG_28(bank, fn, sfx, cfg) \
- PORT_GP_CFG_26(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 26, fn, sfx, cfg), PORT_GP_CFG_1(bank, 27, fn, sfx, cfg)
+#define PORT_GP_CFG_28(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_26(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 26, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 27, fn, sfx, cfg)
#define PORT_GP_28(bank, fn, sfx) PORT_GP_CFG_28(bank, fn, sfx, 0)
-#define PORT_GP_CFG_30(bank, fn, sfx, cfg) \
- PORT_GP_CFG_28(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 28, fn, sfx, cfg), PORT_GP_CFG_1(bank, 29, fn, sfx, cfg)
+#define PORT_GP_CFG_29(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_28(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 28, fn, sfx, cfg)
+#define PORT_GP_29(bank, fn, sfx) PORT_GP_CFG_29(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_30(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_29(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 29, fn, sfx, cfg)
#define PORT_GP_30(bank, fn, sfx) PORT_GP_CFG_30(bank, fn, sfx, 0)
-#define PORT_GP_CFG_32(bank, fn, sfx, cfg) \
- PORT_GP_CFG_30(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 30, fn, sfx, cfg), PORT_GP_CFG_1(bank, 31, fn, sfx, cfg)
+#define PORT_GP_CFG_32(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_30(bank, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 30, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 31, fn, sfx, cfg)
#define PORT_GP_32(bank, fn, sfx) PORT_GP_CFG_32(bank, fn, sfx, 0)
#define PORT_GP_32_REV(bank, fn, sfx) \
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 19952f73fa8c..7f3041697813 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -7,7 +7,7 @@
* Licensed under GPLv2 or later.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/bitops.h>
@@ -6158,6 +6158,3 @@ static int __init atlas7_gpio_init(void)
return platform_driver_register(&atlas7_gpio_driver);
}
subsys_initcall(atlas7_gpio_init);
-
-MODULE_DESCRIPTION("SIRFSOC Atlas7 pin control driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 762c0c9c1278..0df72be60704 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -1,6 +1,11 @@
/*
* pinmux driver for CSR SiRFprimaII
*
+ * Authors:
+ * Rongjun Ying <rongjun.ying@csr.com>
+ * Yuping Luo <yuping.luo@csr.com>
+ * Barry Song <baohua.song@csr.com>
+ *
* Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
* company.
*
@@ -8,7 +13,6 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/io.h>
@@ -884,9 +888,3 @@ static int __init sirfsoc_gpio_init(void)
return sirfsoc_gpio_probe(np);
}
subsys_initcall(sirfsoc_gpio_init);
-
-MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>");
-MODULE_AUTHOR("Yuping Luo <yuping.luo@csr.com>");
-MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
-MODULE_DESCRIPTION("SIRFSOC pin control driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig
index 4c40dae384d1..c03dce7a22df 100644
--- a/drivers/pinctrl/stm32/Kconfig
+++ b/drivers/pinctrl/stm32/Kconfig
@@ -6,16 +6,17 @@ config PINCTRL_STM32
select PINMUX
select GENERIC_PINCONF
select GPIOLIB
+ select MFD_SYSCON
config PINCTRL_STM32F429
bool "STMicroelectronics STM32F429 pin control" if COMPILE_TEST && !MACH_STM32F429
- depends on OF
+ depends on OF && IRQ_DOMAIN_HIERARCHY
default MACH_STM32F429
select PINCTRL_STM32
config PINCTRL_STM32F746
bool "STMicroelectronics STM32F746 pin control" if COMPILE_TEST && !MACH_STM32F746
- depends on OF
+ depends on OF && IRQ_DOMAIN_HIERARCHY
default MACH_STM32F746
select PINCTRL_STM32
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 4ae596bf19b5..200667f08c37 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -8,6 +8,8 @@
#include <linux/clk.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -20,6 +22,7 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -40,6 +43,7 @@
#define STM32_GPIO_AFRH 0x24
#define STM32_GPIO_PINS_PER_BANK 16
+#define STM32_GPIO_IRQ_LINE 16
#define gpio_range_to_bank(chip) \
container_of(chip, struct stm32_gpio_bank, range)
@@ -65,6 +69,8 @@ struct stm32_gpio_bank {
spinlock_t lock;
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range range;
+ struct fwnode_handle *fwnode;
+ struct irq_domain *domain;
};
struct stm32_pinctrl {
@@ -77,6 +83,9 @@ struct stm32_pinctrl {
struct stm32_gpio_bank *banks;
unsigned nbanks;
const struct stm32_pinctrl_match_data *match_data;
+ struct irq_domain *domain;
+ struct regmap *regmap;
+ struct regmap_field *irqmux[STM32_GPIO_PINS_PER_BANK];
};
static inline int stm32_gpio_pin(int gpio)
@@ -174,17 +183,113 @@ static int stm32_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
-static struct gpio_chip stm32_gpio_template = {
+
+static int stm32_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = bank->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = offset;
+ fwspec.param[1] = IRQ_TYPE_NONE;
+
+ return irq_create_fwspec_mapping(&fwspec);
+}
+
+static const struct gpio_chip stm32_gpio_template = {
.request = stm32_gpio_request,
.free = stm32_gpio_free,
.get = stm32_gpio_get,
.set = stm32_gpio_set,
.direction_input = stm32_gpio_direction_input,
.direction_output = stm32_gpio_direction_output,
+ .to_irq = stm32_gpio_to_irq,
};
-/* Pinctrl functions */
+static struct irq_chip stm32_gpio_irq_chip = {
+ .name = "stm32gpio",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+};
+
+static int stm32_gpio_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if ((fwspec->param_count != 2) ||
+ (fwspec->param[0] >= STM32_GPIO_IRQ_LINE))
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+}
+static void stm32_gpio_domain_activate(struct irq_domain *d,
+ struct irq_data *irq_data)
+{
+ struct stm32_gpio_bank *bank = d->host_data;
+ struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
+
+ regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->range.id);
+}
+
+static int stm32_gpio_domain_alloc(struct irq_domain *d,
+ unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct stm32_gpio_bank *bank = d->host_data;
+ struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ int ret;
+
+ hwirq = fwspec->param[0];
+ parent_fwspec.fwnode = d->parent->fwnode;
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = fwspec->param[0];
+ parent_fwspec.param[1] = fwspec->param[1];
+
+ irq_domain_set_hwirq_and_chip(d, virq, hwirq, &stm32_gpio_irq_chip,
+ bank);
+
+ ret = gpiochip_lock_as_irq(&bank->gpio_chip, hwirq);
+ if (ret) {
+ dev_err(pctl->dev, "Unable to configure STM32 %s%ld as IRQ\n",
+ bank->gpio_chip.label, hwirq);
+ return ret;
+ }
+
+ ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &parent_fwspec);
+ if (ret)
+ gpiochip_unlock_as_irq(&bank->gpio_chip, hwirq);
+
+ return ret;
+}
+
+static void stm32_gpio_domain_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct stm32_gpio_bank *bank = d->host_data;
+ struct irq_data *data = irq_get_irq_data(virq);
+
+ irq_domain_free_irqs_common(d, virq, nr_irqs);
+ gpiochip_unlock_as_irq(&bank->gpio_chip, data->hwirq);
+}
+
+static const struct irq_domain_ops stm32_gpio_domain_ops = {
+ .translate = stm32_gpio_domain_translate,
+ .alloc = stm32_gpio_domain_alloc,
+ .free = stm32_gpio_domain_free,
+ .activate = stm32_gpio_domain_activate,
+};
+
+/* Pinctrl functions */
static struct stm32_pinctrl_group *
stm32_pctrl_find_group_by_pin(struct stm32_pinctrl *pctl, u32 pin)
{
@@ -857,6 +962,17 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
range->pin_base = range->base = range->id * STM32_GPIO_PINS_PER_BANK;
range->npins = bank->gpio_chip.ngpio;
range->gc = &bank->gpio_chip;
+
+ /* create irq hierarchical domain */
+ bank->fwnode = of_node_to_fwnode(np);
+
+ bank->domain = irq_domain_create_hierarchy(pctl->domain, 0,
+ STM32_GPIO_IRQ_LINE, bank->fwnode,
+ &stm32_gpio_domain_ops, bank);
+
+ if (!bank->domain)
+ return -ENODEV;
+
err = gpiochip_add_data(&bank->gpio_chip, bank);
if (err) {
dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_nr);
@@ -867,6 +983,47 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
return 0;
}
+static int stm32_pctrl_dt_setup_irq(struct platform_device *pdev,
+ struct stm32_pinctrl *pctl)
+{
+ struct device_node *np = pdev->dev.of_node, *parent;
+ struct device *dev = &pdev->dev;
+ struct regmap *rm;
+ int offset, ret, i;
+
+ parent = of_irq_find_parent(np);
+ if (!parent)
+ return -ENXIO;
+
+ pctl->domain = irq_find_host(parent);
+ if (!pctl->domain)
+ return -ENXIO;
+
+ pctl->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(pctl->regmap))
+ return PTR_ERR(pctl->regmap);
+
+ rm = pctl->regmap;
+
+ ret = of_property_read_u32_index(np, "st,syscfg", 1, &offset);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < STM32_GPIO_PINS_PER_BANK; i++) {
+ struct reg_field mux;
+
+ mux.reg = offset + (i / 4) * 4;
+ mux.lsb = (i % 4) * 4;
+ mux.msb = mux.lsb + 3;
+
+ pctl->irqmux[i] = devm_regmap_field_alloc(dev, rm, mux);
+ if (IS_ERR(pctl->irqmux[i]))
+ return PTR_ERR(pctl->irqmux[i]);
+ }
+
+ return 0;
+}
+
static int stm32_pctrl_build_state(struct platform_device *pdev)
{
struct stm32_pinctrl *pctl = platform_get_drvdata(pdev);
@@ -935,6 +1092,10 @@ int stm32_pctl_probe(struct platform_device *pdev)
return -EINVAL;
}
+ ret = stm32_pctrl_dt_setup_irq(pdev, pctl);
+ if (ret)
+ return ret;
+
for_each_child_of_node(np, child)
if (of_property_read_bool(child, "gpio-controller"))
banks++;
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index aaf075b972f5..bff1ffc6f01e 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -17,6 +17,10 @@ config PINCTRL_SUN5I_A13
def_bool MACH_SUN5I
select PINCTRL_SUNXI
+config PINCTRL_GR8
+ def_bool MACH_SUN5I
+ select PINCTRL_SUNXI_COMMON
+
config PINCTRL_SUN6I_A31
def_bool MACH_SUN6I
select PINCTRL_SUNXI
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index 2d8b64e222e0..95f93d0561fc 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -5,6 +5,7 @@ obj-y += pinctrl-sunxi.o
obj-$(CONFIG_PINCTRL_SUN4I_A10) += pinctrl-sun4i-a10.o
obj-$(CONFIG_PINCTRL_SUN5I_A10S) += pinctrl-sun5i-a10s.o
obj-$(CONFIG_PINCTRL_SUN5I_A13) += pinctrl-sun5i-a13.o
+obj-$(CONFIG_PINCTRL_GR8) += pinctrl-gr8.o
obj-$(CONFIG_PINCTRL_SUN6I_A31) += pinctrl-sun6i-a31.o
obj-$(CONFIG_PINCTRL_SUN6I_A31S) += pinctrl-sun6i-a31s.o
obj-$(CONFIG_PINCTRL_SUN6I_A31_R) += pinctrl-sun6i-a31-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-gr8.c b/drivers/pinctrl/sunxi/pinctrl-gr8.c
new file mode 100644
index 000000000000..2904d2b7378b
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-gr8.c
@@ -0,0 +1,541 @@
+/*
+ * NextThing GR8 SoCs pinctrl driver.
+ *
+ * Copyright (C) 2016 Mylene Josserand
+ *
+ * Based on pinctrl-sun5i-a13.c
+ *
+ * Mylene Josserand <mylene.josserand@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun5i_gr8_pins[] = {
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm0"),
+ SUNXI_FUNCTION(0x3, "spdif"), /* DO */
+ SUNXI_FUNCTION_IRQ(0x6, 16)), /* EINT16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ir0"), /* TX */
+ SUNXI_FUNCTION_IRQ(0x6, 17)), /* EINT17 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ir0"), /* RX */
+ SUNXI_FUNCTION_IRQ(0x6, 18)), /* EINT18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION_IRQ(0x6, 19)), /* EINT19 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION_IRQ(0x6, 20)), /* EINT20 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* LRCK */
+ SUNXI_FUNCTION_IRQ(0x6, 21)), /* EINT21 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DO */
+ SUNXI_FUNCTION_IRQ(0x6, 22)), /* EINT22 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DI */
+ SUNXI_FUNCTION(0x3, "spdif"), /* DI */
+ SUNXI_FUNCTION_IRQ(0x6, 23)), /* EINT23 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* CS1 */
+ SUNXI_FUNCTION(0x3, "spdif"), /* DO */
+ SUNXI_FUNCTION_IRQ(0x6, 24)), /* EINT24 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS0 */
+ SUNXI_FUNCTION_IRQ(0x6, 25)), /* EINT25 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* CLK */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CK0 */
+ SUNXI_FUNCTION_IRQ(0x6, 26)), /* EINT26 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DO0 */
+ SUNXI_FUNCTION_IRQ(0x6, 27)), /* EINT27 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* MISO */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DI0 */
+ SUNXI_FUNCTION_IRQ(0x6, 28)), /* EINT28 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NWE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NALE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NCLE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NCE1 */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NCE0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NRE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NRB0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NRB1 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ1 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ2 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ3 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ4 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ5 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ6 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ7 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQS */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x4, "uart3")), /* RTS */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart2")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart2")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "uart2")), /* CTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "uart2")), /* RTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ECRS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ECOL */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ERXD0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ERXD1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ERXD2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ERXD3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ERXCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ERXERR */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ERXDV */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ETXD0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ETXD1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ETXD2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ETXD3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x3, "emac")), /* ETXEN */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "emac")), /* ETXCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x3, "emac")), /* ETXERR*/
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "emac")), /* EMDC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "emac")), /* EMDIO */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "csi0"), /* PCLK */
+ SUNXI_FUNCTION(0x4, "spi2"), /* CS0 */
+ SUNXI_FUNCTION_IRQ(0x6, 14)), /* EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* ERR */
+ SUNXI_FUNCTION(0x3, "csi0"), /* MCLK */
+ SUNXI_FUNCTION(0x4, "spi2"), /* CLK */
+ SUNXI_FUNCTION_IRQ(0x6, 15)), /* EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* SYNC */
+ SUNXI_FUNCTION(0x3, "csi0"), /* HSYNC */
+ SUNXI_FUNCTION(0x4, "spi2")), /* MOSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* DVLD */
+ SUNXI_FUNCTION(0x3, "csi0"), /* VSYNC */
+ SUNXI_FUNCTION(0x4, "spi2")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "csi0"), /* D0 */
+ SUNXI_FUNCTION(0x4, "mmc2")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "csi0"), /* D1 */
+ SUNXI_FUNCTION(0x4, "mmc2")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "csi0"), /* D2 */
+ SUNXI_FUNCTION(0x4, "mmc2")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "csi0"), /* D3 */
+ SUNXI_FUNCTION(0x4, "mmc2")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "csi0"), /* D4 */
+ SUNXI_FUNCTION(0x4, "mmc2")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "csi0"), /* D5 */
+ SUNXI_FUNCTION(0x4, "mmc2")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "csi0"), /* D6 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "csi0"), /* D7 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* RX */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* MS1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* DI1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x4, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x4, "jtag")), /* DO1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* CK1 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x2, "gps"), /* CLK */
+ SUNXI_FUNCTION_IRQ(0x6, 0)), /* EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x2, "gps"), /* SIGN */
+ SUNXI_FUNCTION_IRQ(0x6, 1)), /* EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x2, "gps"), /* MAG */
+ SUNXI_FUNCTION_IRQ(0x6, 2)), /* EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION(0x3, "ms"), /* BS */
+ SUNXI_FUNCTION(0x4, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ(0x6, 3)), /* EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "ms"), /* CLK */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ(0x6, 4)), /* EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION(0x3, "ms"), /* D0 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ(0x6, 5)), /* EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION(0x3, "ms"), /* D1 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
+ SUNXI_FUNCTION(0x5, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ(0x6, 6)), /* EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION(0x3, "ms"), /* D2 */
+ SUNXI_FUNCTION(0x5, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ(0x6, 7)), /* EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION(0x3, "ms"), /* D3 */
+ SUNXI_FUNCTION(0x5, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ(0x6, 8)), /* EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ(0x6, 9)), /* EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ(0x6, 10)), /* EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
+ SUNXI_FUNCTION_IRQ(0x6, 11)), /* EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
+ SUNXI_FUNCTION_IRQ(0x6, 12)), /* EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
+ SUNXI_FUNCTION(0x3, "pwm1"),
+ SUNXI_FUNCTION(0x5, "uart2"), /* CTS */
+ SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */
+};
+
+static const struct sunxi_pinctrl_desc sun5i_gr8_pinctrl_data = {
+ .pins = sun5i_gr8_pins,
+ .npins = ARRAY_SIZE(sun5i_gr8_pins),
+ .irq_banks = 1,
+};
+
+static int sun5i_gr8_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun5i_gr8_pinctrl_data);
+}
+
+static const struct of_device_id sun5i_gr8_pinctrl_match[] = {
+ { .compatible = "nextthing,gr8-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun5i_gr8_pinctrl_match);
+
+static struct platform_driver sun5i_gr8_pinctrl_driver = {
+ .probe = sun5i_gr8_pinctrl_probe,
+ .driver = {
+ .name = "gr8-pinctrl",
+ .of_match_table = sun5i_gr8_pinctrl_match,
+ },
+};
+module_platform_driver(sun5i_gr8_pinctrl_driver);
+
+MODULE_AUTHOR("Mylene Josserand <mylene.josserand@free-electrons.com");
+MODULE_DESCRIPTION("NextThing GR8 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
index 022863ab0c58..a70b52957e24 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
@@ -896,10 +896,18 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out")),
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 27),
SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ /*
+ * The SPDIF block is not referenced at all in the A31 user
+ * manual. However it is described in the code leaked and the
+ * configuration files supplied by vendors.
+ */
+ SUNXI_FUNCTION(0x3, "spdif")), /* SPDIF IN */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 28),
SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ /* Undocumented mux function - see above */
+ SUNXI_FUNCTION(0x3, "spdif")), /* SPDIF OUT */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 29),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 26a2ad3b651f..518a92df4418 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -60,7 +60,6 @@ static const struct sunxi_desc_pin sun8i_h3_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "sim"), /* PWREN */
- SUNXI_FUNCTION(0x3, "pwm1"),
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PA_EINT6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 54455af566ec..0facbea5f465 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include "../core.h"
-#include "../../gpio/gpiolib.h"
#include "pinctrl-sunxi.h"
static struct irq_chip sunxi_pinctrl_edge_irq_chip;
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 6c084b266651..04053fe1e980 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -19,6 +19,7 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#define EC_COMMAND_RETRIES 50
@@ -234,11 +235,44 @@ static int cros_ec_host_command_proto_query_v2(struct cros_ec_device *ec_dev)
return ret;
}
+static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
+ u16 cmd, u32 *mask)
+{
+ struct ec_params_get_cmd_versions *pver;
+ struct ec_response_get_cmd_versions *rver;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kmalloc(sizeof(*msg) + max(sizeof(*rver), sizeof(*pver)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 0;
+ msg->command = EC_CMD_GET_CMD_VERSIONS;
+ msg->insize = sizeof(*rver);
+ msg->outsize = sizeof(*pver);
+
+ pver = (struct ec_params_get_cmd_versions *)msg->data;
+ pver->cmd = cmd;
+
+ ret = cros_ec_cmd_xfer(ec_dev, msg);
+ if (ret > 0) {
+ rver = (struct ec_response_get_cmd_versions *)msg->data;
+ *mask = rver->version_mask;
+ }
+
+ kfree(msg);
+
+ return ret;
+}
+
int cros_ec_query_all(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
struct cros_ec_command *proto_msg;
struct ec_response_get_protocol_info *proto_info;
+ u32 ver_mask = 0;
int ret;
proto_msg = kzalloc(sizeof(*proto_msg) + sizeof(*proto_info),
@@ -328,6 +362,15 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
goto exit;
}
+ /* Probe if MKBP event is supported */
+ ret = cros_ec_get_host_command_version_mask(ec_dev,
+ EC_CMD_GET_NEXT_EVENT,
+ &ver_mask);
+ if (ret < 0 || ver_mask == 0)
+ ec_dev->mkbp_event_supported = 0;
+ else
+ ec_dev->mkbp_event_supported = 1;
+
exit:
kfree(proto_msg);
return ret;
@@ -397,3 +440,52 @@ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
return ret;
}
EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
+
+static int get_next_event(struct cros_ec_device *ec_dev)
+{
+ u8 buffer[sizeof(struct cros_ec_command) + sizeof(ec_dev->event_data)];
+ struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
+ int ret;
+
+ msg->version = 0;
+ msg->command = EC_CMD_GET_NEXT_EVENT;
+ msg->insize = sizeof(ec_dev->event_data);
+ msg->outsize = 0;
+
+ ret = cros_ec_cmd_xfer(ec_dev, msg);
+ if (ret > 0) {
+ ec_dev->event_size = ret - 1;
+ memcpy(&ec_dev->event_data, msg->data,
+ sizeof(ec_dev->event_data));
+ }
+
+ return ret;
+}
+
+static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
+{
+ u8 buffer[sizeof(struct cros_ec_command) +
+ sizeof(ec_dev->event_data.data)];
+ struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
+
+ msg->version = 0;
+ msg->command = EC_CMD_MKBP_STATE;
+ msg->insize = sizeof(ec_dev->event_data.data);
+ msg->outsize = 0;
+
+ ec_dev->event_size = cros_ec_cmd_xfer(ec_dev, msg);
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_KEY_MATRIX;
+ memcpy(&ec_dev->event_data.data, msg->data,
+ sizeof(ec_dev->event_data.data));
+
+ return ec_dev->event_size;
+}
+
+int cros_ec_get_next_event(struct cros_ec_device *ec_dev)
+{
+ if (ec_dev->mkbp_event_supported)
+ return get_next_event(ec_dev);
+ else
+ return get_keyboard_state_event(ec_dev);
+}
+EXPORT_SYMBOL(cros_ec_get_next_event);
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index acd4a1524a1e..63454b5cac27 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -1,517 +1,3 @@
-menuconfig POWER_SUPPLY
- bool "Power supply class support"
- help
- Say Y here to enable power supply class support. This allows
- power supply (batteries, AC, USB) monitoring by userspace
- via sysfs and uevent (if available) and/or APM kernel interface
- (if selected below).
-
-if POWER_SUPPLY
-
-config POWER_SUPPLY_DEBUG
- bool "Power supply debug"
- help
- Say Y here to enable debugging messages for power supply class
- and drivers.
-
-config PDA_POWER
- tristate "Generic PDA/phone power driver"
- depends on !S390
- help
- Say Y here to enable generic power driver for PDAs and phones with
- one or two external power supplies (AC/USB) connected to main and
- backup batteries, and optional builtin charger.
-
-config APM_POWER
- tristate "APM emulation for class batteries"
- depends on APM_EMULATION
- help
- Say Y here to enable support APM status emulation using
- battery class devices.
-
-config GENERIC_ADC_BATTERY
- tristate "Generic battery support using IIO"
- depends on IIO
- help
- Say Y here to enable support for the generic battery driver
- which uses IIO framework to read adc.
-
-config MAX8925_POWER
- tristate "MAX8925 battery charger support"
- depends on MFD_MAX8925
- help
- Say Y here to enable support for the battery charger in the Maxim
- MAX8925 PMIC.
-
-config WM831X_BACKUP
- tristate "WM831X backup battery charger support"
- depends on MFD_WM831X
- help
- Say Y here to enable support for the backup battery charger
- in the Wolfson Microelectronics WM831x PMICs.
-
-config WM831X_POWER
- tristate "WM831X PMU support"
- depends on MFD_WM831X
- help
- Say Y here to enable support for the power management unit
- provided by Wolfson Microelectronics WM831x PMICs.
-
-config WM8350_POWER
- tristate "WM8350 PMU support"
- depends on MFD_WM8350
- help
- Say Y here to enable support for the power management unit
- provided by the Wolfson Microelectronics WM8350 PMIC.
-
-config TEST_POWER
- tristate "Test power driver"
- help
- This driver is used for testing. It's safe to say M here.
-
-config BATTERY_88PM860X
- tristate "Marvell 88PM860x battery driver"
- depends on MFD_88PM860X
- help
- Say Y here to enable battery monitor for Marvell 88PM860x chip.
-
-config BATTERY_ACT8945A
- tristate "Active-semi ACT8945A charger driver"
- depends on MFD_ACT8945A || COMPILE_TEST
- help
- Say Y here to enable support for power supply provided by
- Active-semi ActivePath ACT8945A charger.
-
-config BATTERY_DS2760
- tristate "DS2760 battery driver (HP iPAQ & others)"
- depends on W1 && W1_SLAVE_DS2760
- help
- Say Y here to enable support for batteries with ds2760 chip.
-
-config BATTERY_DS2780
- tristate "DS2780 battery driver"
- depends on HAS_IOMEM
- select W1
- select W1_SLAVE_DS2780
- help
- Say Y here to enable support for batteries with ds2780 chip.
-
-config BATTERY_DS2781
- tristate "DS2781 battery driver"
- depends on HAS_IOMEM
- select W1
- select W1_SLAVE_DS2781
- help
- If you enable this you will have the DS2781 battery driver support.
-
- The battery monitor chip is used in many batteries/devices
- as the one who is responsible for charging/discharging/monitoring
- Li+ batteries.
-
- If you are unsure, say N.
-
-config BATTERY_DS2782
- tristate "DS2782/DS2786 standalone gas-gauge"
- depends on I2C
- help
- Say Y here to enable support for the DS2782/DS2786 standalone battery
- gas-gauge.
-
-config BATTERY_PMU
- tristate "Apple PMU battery"
- depends on PPC32 && ADB_PMU
- help
- Say Y here to expose battery information on Apple machines
- through the generic battery class.
-
-config BATTERY_OLPC
- tristate "One Laptop Per Child battery"
- depends on X86_32 && OLPC
- help
- Say Y to enable support for the battery on the OLPC laptop.
-
-config BATTERY_TOSA
- tristate "Sharp SL-6000 (tosa) battery"
- depends on MACH_TOSA && MFD_TC6393XB && TOUCHSCREEN_WM97XX
- help
- Say Y to enable support for the battery on the Sharp Zaurus
- SL-6000 (tosa) models.
-
-config BATTERY_COLLIE
- tristate "Sharp SL-5500 (collie) battery"
- depends on SA1100_COLLIE && MCP_UCB1200
- help
- Say Y to enable support for the battery on the Sharp Zaurus
- SL-5500 (collie) models.
-
-config BATTERY_IPAQ_MICRO
- tristate "iPAQ Atmel Micro ASIC battery driver"
- depends on MFD_IPAQ_MICRO
- help
- Choose this option if you want to monitor battery status on
- Compaq/HP iPAQ h3100 and h3600.
-
-config BATTERY_WM97XX
- bool "WM97xx generic battery driver"
- depends on TOUCHSCREEN_WM97XX=y
- help
- Say Y to enable support for battery measured by WM97xx aux port.
-
-config BATTERY_SBS
- tristate "SBS Compliant gas gauge"
- depends on I2C
- help
- Say Y to include support for SBS battery driver for SBS-compliant
- gas gauges.
-
-config BATTERY_BQ27XXX
- tristate "BQ27xxx battery driver"
- help
- Say Y here to enable support for batteries with BQ27xxx chips.
-
-config BATTERY_BQ27XXX_I2C
- tristate "BQ27xxx I2C support"
- depends on BATTERY_BQ27XXX
- depends on I2C
- default y
- help
- Say Y here to enable support for batteries with BQ27xxx chips
- connected over an I2C bus.
-
-config BATTERY_DA9030
- tristate "DA9030 battery driver"
- depends on PMIC_DA903X
- help
- Say Y here to enable support for batteries charger integrated into
- DA9030 PMIC.
-
-config BATTERY_DA9052
- tristate "Dialog DA9052 Battery"
- depends on PMIC_DA9052
- help
- Say Y here to enable support for batteries charger integrated into
- DA9052 PMIC.
-
-config CHARGER_DA9150
- tristate "Dialog Semiconductor DA9150 Charger support"
- depends on MFD_DA9150
- depends on DA9150_GPADC
- depends on IIO
- help
- Say Y here to enable support for charger unit of the DA9150
- Integrated Charger & Fuel-Gauge IC.
-
- This driver can also be built as a module. If so, the module will be
- called da9150-charger.
-
-config BATTERY_DA9150
- tristate "Dialog Semiconductor DA9150 Fuel Gauge support"
- depends on MFD_DA9150
- help
- Say Y here to enable support for the Fuel-Gauge unit of the DA9150
- Integrated Charger & Fuel-Gauge IC
-
- This driver can also be built as a module. If so, the module will be
- called da9150-fg.
-
-config AXP288_CHARGER
- tristate "X-Powers AXP288 Charger"
- depends on MFD_AXP20X && EXTCON_AXP288
- help
- Say yes here to have support X-Power AXP288 power management IC (PMIC)
- integrated charger.
-
-config AXP288_FUEL_GAUGE
- tristate "X-Powers AXP288 Fuel Gauge"
- depends on MFD_AXP20X && IIO
- help
- Say yes here to have support for X-Power power management IC (PMIC)
- Fuel Gauge. The device provides battery statistics and status
- monitoring as well as alerts for battery over/under voltage and
- over/under temperature.
-
-config BATTERY_MAX17040
- tristate "Maxim MAX17040 Fuel Gauge"
- depends on I2C
- help
- MAX17040 is fuel-gauge systems for lithium-ion (Li+) batteries
- in handheld and portable equipment. The MAX17040 is configured
- to operate with a single lithium cell
-
-config BATTERY_MAX17042
- tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
- depends on I2C
- select REGMAP_I2C
- help
- MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
- in handheld and portable equipment. The MAX17042 is configured
- to operate with a single lithium cell. MAX8997 and MAX8966 are
- multi-function devices that include fuel gauages that are compatible
- with MAX17042. This driver also supports max17047/50 chips which are
- improved version of max17042.
-
-config BATTERY_Z2
- tristate "Z2 battery driver"
- depends on I2C && MACH_ZIPIT2
- help
- Say Y to include support for the battery on the Zipit Z2.
-
-config BATTERY_S3C_ADC
- tristate "Battery driver for Samsung ADC based monitoring"
- depends on S3C_ADC
- help
- Say Y here to enable support for iPAQ h1930/h1940/rx1950 battery
-
-config BATTERY_TWL4030_MADC
- tristate "TWL4030 MADC battery driver"
- depends on TWL4030_MADC
- help
- Say Y here to enable this dumb driver for batteries managed
- through the TWL4030 MADC.
-
-config CHARGER_88PM860X
- tristate "Marvell 88PM860x Charger driver"
- depends on MFD_88PM860X && BATTERY_88PM860X
- help
- Say Y here to enable charger for Marvell 88PM860x chip.
-
-config CHARGER_PCF50633
- tristate "NXP PCF50633 MBC"
- depends on MFD_PCF50633
- help
- Say Y to include support for NXP PCF50633 Main Battery Charger.
-
-config BATTERY_JZ4740
- tristate "Ingenic JZ4740 battery"
- depends on MACH_JZ4740
- depends on MFD_JZ4740_ADC
- help
- Say Y to enable support for the battery on Ingenic JZ4740 based
- boards.
-
- This driver can be build as a module. If so, the module will be
- called jz4740-battery.
-
-config BATTERY_INTEL_MID
- tristate "Battery driver for Intel MID platforms"
- depends on INTEL_SCU_IPC && SPI
- help
- Say Y here to enable the battery driver on Intel MID
- platforms.
-
-config BATTERY_RX51
- tristate "Nokia RX-51 (N900) battery driver"
- depends on TWL4030_MADC
- help
- Say Y here to enable support for battery information on Nokia
- RX-51, also known as N900 tablet.
-
-config CHARGER_ISP1704
- tristate "ISP1704 USB Charger Detection"
- depends on USB_PHY
- depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
- help
- Say Y to enable support for USB Charger Detection with
- ISP1707/ISP1704 USB transceivers.
-
-config CHARGER_MAX8903
- tristate "MAX8903 Battery DC-DC Charger for USB and Adapter Power"
- help
- Say Y to enable support for the MAX8903 DC-DC charger and sysfs.
- The driver supports controlling charger-enable and current-limit
- pins based on the status of charger connections with interrupt
- handlers.
-
-config CHARGER_TWL4030
- tristate "OMAP TWL4030 BCI charger driver"
- depends on IIO && TWL4030_CORE
- help
- Say Y here to enable support for TWL4030 Battery Charge Interface.
-
-config CHARGER_LP8727
- tristate "TI/National Semiconductor LP8727 charger driver"
- depends on I2C
- help
- Say Y here to enable support for LP8727 Charger Driver.
-
-config CHARGER_LP8788
- tristate "TI LP8788 charger driver"
- depends on MFD_LP8788
- depends on LP8788_ADC
- depends on IIO
- help
- Say Y to enable support for the LP8788 linear charger.
-
-config CHARGER_GPIO
- tristate "GPIO charger"
- depends on GPIOLIB || COMPILE_TEST
- help
- Say Y to include support for chargers which report their online status
- through a GPIO pin.
-
- This driver can be build as a module. If so, the module will be
- called gpio-charger.
-
-config CHARGER_MANAGER
- bool "Battery charger manager for multiple chargers"
- depends on REGULATOR
- select EXTCON
- help
- Say Y to enable charger-manager support, which allows multiple
- chargers attached to a battery and multiple batteries attached to a
- system. The charger-manager also can monitor charging status in
- runtime and in suspend-to-RAM by waking up the system periodically
- with help of suspend_again support.
-
-config CHARGER_MAX14577
- tristate "Maxim MAX14577/77836 battery charger driver"
- depends on MFD_MAX14577
- help
- Say Y to enable support for the battery charger control sysfs and
- platform data of MAX14577/77836 MUICs.
-
-config CHARGER_MAX77693
- tristate "Maxim MAX77693 battery charger driver"
- depends on MFD_MAX77693
- help
- Say Y to enable support for the Maxim MAX77693 battery charger.
-
-config CHARGER_MAX8997
- tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
- depends on MFD_MAX8997 && REGULATOR_MAX8997
- help
- Say Y to enable support for the battery charger control sysfs and
- platform data of MAX8997/LP3974 PMICs.
-
-config CHARGER_MAX8998
- tristate "Maxim MAX8998/LP3974 PMIC battery charger driver"
- depends on MFD_MAX8998 && REGULATOR_MAX8998
- help
- Say Y to enable support for the battery charger control sysfs and
- platform data of MAX8998/LP3974 PMICs.
-
-config CHARGER_QCOM_SMBB
- tristate "Qualcomm Switch-Mode Battery Charger and Boost"
- depends on MFD_SPMI_PMIC || COMPILE_TEST
- depends on OF
- depends on EXTCON
- help
- Say Y to include support for the Switch-Mode Battery Charger and
- Boost (SMBB) hardware found in Qualcomm PM8941 PMICs. The charger
- is an integrated, single-cell lithium-ion battery charger. DT
- configuration is required for loading, see the devicetree
- documentation for more detail. The base name for this driver is
- 'pm8941_charger'.
-
-config CHARGER_BQ2415X
- tristate "TI BQ2415x battery charger driver"
- depends on I2C
- help
- Say Y to enable support for the TI BQ2415x battery charger
- PMICs.
-
- You'll need this driver to charge batteries on e.g. Nokia
- RX-51/N900.
-
-config CHARGER_BQ24190
- tristate "TI BQ24190 battery charger driver"
- depends on I2C
- depends on GPIOLIB || COMPILE_TEST
- help
- Say Y to enable support for the TI BQ24190 battery charger.
-
-config CHARGER_BQ24257
- tristate "TI BQ24250/24251/24257 battery charger driver"
- depends on I2C
- depends on GPIOLIB || COMPILE_TEST
- depends on REGMAP_I2C
- help
- Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery
- chargers.
-
-config CHARGER_BQ24735
- tristate "TI BQ24735 battery charger support"
- depends on I2C
- depends on GPIOLIB || COMPILE_TEST
- help
- Say Y to enable support for the TI BQ24735 battery charger.
-
-config CHARGER_BQ25890
- tristate "TI BQ25890 battery charger driver"
- depends on I2C
- depends on GPIOLIB || COMPILE_TEST
- select REGMAP_I2C
- help
- Say Y to enable support for the TI BQ25890 battery charger.
-
-config CHARGER_SMB347
- tristate "Summit Microelectronics SMB347 Battery Charger"
- depends on I2C
- select REGMAP_I2C
- help
- Say Y to include support for Summit Microelectronics SMB347
- Battery Charger.
-
-config CHARGER_TPS65090
- tristate "TPS65090 battery charger driver"
- depends on MFD_TPS65090
- help
- Say Y here to enable support for battery charging with TPS65090
- PMIC chips.
-
-config CHARGER_TPS65217
- tristate "TPS65217 battery charger driver"
- depends on MFD_TPS65217
- help
- Say Y here to enable support for battery charging with TPS65217
- PMIC chips.
-
-config BATTERY_GAUGE_LTC2941
- tristate "LTC2941/LTC2943 Battery Gauge Driver"
- depends on I2C
- help
- Say Y here to include support for LTC2941 and LTC2943 Battery
- Gauge IC. The driver reports the charge count continuously, and
- measures the voltage and temperature every 10 seconds.
-
-config AB8500_BM
- bool "AB8500 Battery Management Driver"
- depends on AB8500_CORE && AB8500_GPADC
- help
- Say Y to include support for AB8500 battery management.
-
-config BATTERY_GOLDFISH
- tristate "Goldfish battery driver"
- depends on GOLDFISH || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Say Y to enable support for the battery and AC power in the
- Goldfish emulator.
-
-config BATTERY_RT5033
- tristate "RT5033 fuel gauge support"
- depends on MFD_RT5033
- help
- This adds support for battery fuel gauge in Richtek RT5033 PMIC.
- The fuelgauge calculates and determines the battery state of charge
- according to battery open circuit voltage.
-
-config CHARGER_RT9455
- tristate "Richtek RT9455 battery charger driver"
- depends on I2C
- depends on GPIOLIB || COMPILE_TEST
- select REGMAP_I2C
- help
- Say Y to enable support for Richtek RT9455 battery charger.
-
-config AXP20X_POWER
- tristate "AXP20x power supply driver"
- depends on MFD_AXP20X
- help
- This driver provides support for the power supply features of
- AXP20x PMIC.
-
-endif # POWER_SUPPLY
-
-source "drivers/power/reset/Kconfig"
source "drivers/power/avs/Kconfig"
+source "drivers/power/reset/Kconfig"
+source "drivers/power/supply/Kconfig"
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index e46b75d448a5..ff35c712d824 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -1,76 +1,3 @@
-subdir-ccflags-$(CONFIG_POWER_SUPPLY_DEBUG) := -DDEBUG
-
-power_supply-y := power_supply_core.o
-power_supply-$(CONFIG_SYSFS) += power_supply_sysfs.o
-power_supply-$(CONFIG_LEDS_TRIGGERS) += power_supply_leds.o
-
-obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
-obj-$(CONFIG_GENERIC_ADC_BATTERY) += generic-adc-battery.o
-
-obj-$(CONFIG_PDA_POWER) += pda_power.o
-obj-$(CONFIG_APM_POWER) += apm_power.o
-obj-$(CONFIG_AXP20X_POWER) += axp20x_usb_power.o
-obj-$(CONFIG_MAX8925_POWER) += max8925_power.o
-obj-$(CONFIG_WM831X_BACKUP) += wm831x_backup.o
-obj-$(CONFIG_WM831X_POWER) += wm831x_power.o
-obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
-obj-$(CONFIG_TEST_POWER) += test_power.o
-
-obj-$(CONFIG_BATTERY_88PM860X) += 88pm860x_battery.o
-obj-$(CONFIG_BATTERY_ACT8945A) += act8945a_charger.o
-obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
-obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o
-obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o
-obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
-obj-$(CONFIG_BATTERY_GAUGE_LTC2941) += ltc2941-battery-gauge.o
-obj-$(CONFIG_BATTERY_GOLDFISH) += goldfish_battery.o
-obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
-obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
-obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
-obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
-obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
-obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
-obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
-obj-$(CONFIG_BATTERY_BQ27XXX) += bq27xxx_battery.o
-obj-$(CONFIG_BATTERY_BQ27XXX_I2C) += bq27xxx_battery_i2c.o
-obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
-obj-$(CONFIG_BATTERY_DA9052) += da9052-battery.o
-obj-$(CONFIG_CHARGER_DA9150) += da9150-charger.o
-obj-$(CONFIG_BATTERY_DA9150) += da9150-fg.o
-obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
-obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
-obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
-obj-$(CONFIG_BATTERY_RT5033) += rt5033_battery.o
-obj-$(CONFIG_CHARGER_RT9455) += rt9455_charger.o
-obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
-obj-$(CONFIG_BATTERY_TWL4030_MADC) += twl4030_madc_battery.o
-obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
-obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
-obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
-obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
-obj-$(CONFIG_BATTERY_RX51) += rx51_battery.o
-obj-$(CONFIG_AB8500_BM) += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o pm2301_charger.o
-obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
-obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
-obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
-obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
-obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o
-obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
-obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
-obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
-obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
-obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
-obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
-obj-$(CONFIG_CHARGER_QCOM_SMBB) += qcom_smbb.o
-obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
-obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
-obj-$(CONFIG_CHARGER_BQ24257) += bq24257_charger.o
-obj-$(CONFIG_CHARGER_BQ24735) += bq24735-charger.o
-obj-$(CONFIG_CHARGER_BQ25890) += bq25890_charger.o
obj-$(CONFIG_POWER_AVS) += avs/
-obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
-obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
-obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
obj-$(CONFIG_POWER_RESET) += reset/
-obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
-obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
+obj-$(CONFIG_POWER_SUPPLY) += supply/
diff --git a/drivers/power/act8945a_charger.c b/drivers/power/act8945a_charger.c
deleted file mode 100644
index b5c00e45741e..000000000000
--- a/drivers/power/act8945a_charger.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Power supply driver for the Active-semi ACT8945A PMIC
- *
- * Copyright (C) 2015 Atmel Corporation
- *
- * Author: Wenyou Yang <wenyou.yang@atmel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
-#include <linux/power_supply.h>
-#include <linux/regmap.h>
-
-static const char *act8945a_charger_model = "ACT8945A";
-static const char *act8945a_charger_manufacturer = "Active-semi";
-
-/**
- * ACT8945A Charger Register Map
- */
-
-/* 0x70: Reserved */
-#define ACT8945A_APCH_CFG 0x71
-#define ACT8945A_APCH_STATUS 0x78
-#define ACT8945A_APCH_CTRL 0x79
-#define ACT8945A_APCH_STATE 0x7A
-
-/* ACT8945A_APCH_CFG */
-#define APCH_CFG_OVPSET (0x3 << 0)
-#define APCH_CFG_OVPSET_6V6 (0x0 << 0)
-#define APCH_CFG_OVPSET_7V (0x1 << 0)
-#define APCH_CFG_OVPSET_7V5 (0x2 << 0)
-#define APCH_CFG_OVPSET_8V (0x3 << 0)
-#define APCH_CFG_PRETIMO (0x3 << 2)
-#define APCH_CFG_PRETIMO_40_MIN (0x0 << 2)
-#define APCH_CFG_PRETIMO_60_MIN (0x1 << 2)
-#define APCH_CFG_PRETIMO_80_MIN (0x2 << 2)
-#define APCH_CFG_PRETIMO_DISABLED (0x3 << 2)
-#define APCH_CFG_TOTTIMO (0x3 << 4)
-#define APCH_CFG_TOTTIMO_3_HOUR (0x0 << 4)
-#define APCH_CFG_TOTTIMO_4_HOUR (0x1 << 4)
-#define APCH_CFG_TOTTIMO_5_HOUR (0x2 << 4)
-#define APCH_CFG_TOTTIMO_DISABLED (0x3 << 4)
-#define APCH_CFG_SUSCHG (0x1 << 7)
-
-#define APCH_STATUS_CHGDAT BIT(0)
-#define APCH_STATUS_INDAT BIT(1)
-#define APCH_STATUS_TEMPDAT BIT(2)
-#define APCH_STATUS_TIMRDAT BIT(3)
-#define APCH_STATUS_CHGSTAT BIT(4)
-#define APCH_STATUS_INSTAT BIT(5)
-#define APCH_STATUS_TEMPSTAT BIT(6)
-#define APCH_STATUS_TIMRSTAT BIT(7)
-
-#define APCH_CTRL_CHGEOCOUT BIT(0)
-#define APCH_CTRL_INDIS BIT(1)
-#define APCH_CTRL_TEMPOUT BIT(2)
-#define APCH_CTRL_TIMRPRE BIT(3)
-#define APCH_CTRL_CHGEOCIN BIT(4)
-#define APCH_CTRL_INCON BIT(5)
-#define APCH_CTRL_TEMPIN BIT(6)
-#define APCH_CTRL_TIMRTOT BIT(7)
-
-#define APCH_STATE_ACINSTAT (0x1 << 1)
-#define APCH_STATE_CSTATE (0x3 << 4)
-#define APCH_STATE_CSTATE_SHIFT 4
-#define APCH_STATE_CSTATE_DISABLED 0x00
-#define APCH_STATE_CSTATE_EOC 0x01
-#define APCH_STATE_CSTATE_FAST 0x02
-#define APCH_STATE_CSTATE_PRE 0x03
-
-struct act8945a_charger {
- struct regmap *regmap;
- bool battery_temperature;
-};
-
-static int act8945a_get_charger_state(struct regmap *regmap, int *val)
-{
- int ret;
- unsigned int status, state;
-
- ret = regmap_read(regmap, ACT8945A_APCH_STATUS, &status);
- if (ret < 0)
- return ret;
-
- ret = regmap_read(regmap, ACT8945A_APCH_STATE, &state);
- if (ret < 0)
- return ret;
-
- state &= APCH_STATE_CSTATE;
- state >>= APCH_STATE_CSTATE_SHIFT;
-
- if (state == APCH_STATE_CSTATE_EOC) {
- if (status & APCH_STATUS_CHGDAT)
- *val = POWER_SUPPLY_STATUS_FULL;
- else
- *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
- } else if ((state == APCH_STATE_CSTATE_FAST) ||
- (state == APCH_STATE_CSTATE_PRE)) {
- *val = POWER_SUPPLY_STATUS_CHARGING;
- } else {
- *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
- }
-
- return 0;
-}
-
-static int act8945a_get_charge_type(struct regmap *regmap, int *val)
-{
- int ret;
- unsigned int state;
-
- ret = regmap_read(regmap, ACT8945A_APCH_STATE, &state);
- if (ret < 0)
- return ret;
-
- state &= APCH_STATE_CSTATE;
- state >>= APCH_STATE_CSTATE_SHIFT;
-
- switch (state) {
- case APCH_STATE_CSTATE_PRE:
- *val = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
- break;
- case APCH_STATE_CSTATE_FAST:
- *val = POWER_SUPPLY_CHARGE_TYPE_FAST;
- break;
- case APCH_STATE_CSTATE_EOC:
- case APCH_STATE_CSTATE_DISABLED:
- default:
- *val = POWER_SUPPLY_CHARGE_TYPE_NONE;
- }
-
- return 0;
-}
-
-static int act8945a_get_battery_health(struct act8945a_charger *charger,
- struct regmap *regmap, int *val)
-{
- int ret;
- unsigned int status;
-
- ret = regmap_read(regmap, ACT8945A_APCH_STATUS, &status);
- if (ret < 0)
- return ret;
-
- if (charger->battery_temperature && !(status & APCH_STATUS_TEMPDAT))
- *val = POWER_SUPPLY_HEALTH_OVERHEAT;
- else if (!(status & APCH_STATUS_INDAT))
- *val = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- else if (status & APCH_STATUS_TIMRDAT)
- *val = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
- else
- *val = POWER_SUPPLY_HEALTH_GOOD;
-
- return 0;
-}
-
-static enum power_supply_property act8945a_charger_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_CHARGE_TYPE,
- POWER_SUPPLY_PROP_TECHNOLOGY,
- POWER_SUPPLY_PROP_HEALTH,
- POWER_SUPPLY_PROP_MODEL_NAME,
- POWER_SUPPLY_PROP_MANUFACTURER
-};
-
-static int act8945a_charger_get_property(struct power_supply *psy,
- enum power_supply_property prop,
- union power_supply_propval *val)
-{
- struct act8945a_charger *charger = power_supply_get_drvdata(psy);
- struct regmap *regmap = charger->regmap;
- int ret = 0;
-
- switch (prop) {
- case POWER_SUPPLY_PROP_STATUS:
- ret = act8945a_get_charger_state(regmap, &val->intval);
- break;
- case POWER_SUPPLY_PROP_CHARGE_TYPE:
- ret = act8945a_get_charge_type(regmap, &val->intval);
- break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
- break;
- case POWER_SUPPLY_PROP_HEALTH:
- ret = act8945a_get_battery_health(charger,
- regmap, &val->intval);
- break;
- case POWER_SUPPLY_PROP_MODEL_NAME:
- val->strval = act8945a_charger_model;
- break;
- case POWER_SUPPLY_PROP_MANUFACTURER:
- val->strval = act8945a_charger_manufacturer;
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static const struct power_supply_desc act8945a_charger_desc = {
- .name = "act8945a-charger",
- .type = POWER_SUPPLY_TYPE_BATTERY,
- .get_property = act8945a_charger_get_property,
- .properties = act8945a_charger_props,
- .num_properties = ARRAY_SIZE(act8945a_charger_props),
-};
-
-#define DEFAULT_TOTAL_TIME_OUT 3
-#define DEFAULT_PRE_TIME_OUT 40
-#define DEFAULT_INPUT_OVP_THRESHOLD 6600
-
-static int act8945a_charger_config(struct device *dev,
- struct act8945a_charger *charger)
-{
- struct device_node *np = dev->of_node;
- enum of_gpio_flags flags;
- struct regmap *regmap = charger->regmap;
-
- u32 total_time_out;
- u32 pre_time_out;
- u32 input_voltage_threshold;
- int chglev_pin;
-
- unsigned int value = 0;
-
- if (!np) {
- dev_err(dev, "no charger of node\n");
- return -EINVAL;
- }
-
- charger->battery_temperature = of_property_read_bool(np,
- "active-semi,check-battery-temperature");
-
- chglev_pin = of_get_named_gpio_flags(np,
- "active-semi,chglev-gpios", 0, &flags);
-
- if (gpio_is_valid(chglev_pin)) {
- gpio_set_value(chglev_pin,
- ((flags == OF_GPIO_ACTIVE_LOW) ? 0 : 1));
- }
-
- if (of_property_read_u32(np,
- "active-semi,input-voltage-threshold-microvolt",
- &input_voltage_threshold))
- input_voltage_threshold = DEFAULT_INPUT_OVP_THRESHOLD;
-
- if (of_property_read_u32(np,
- "active-semi,precondition-timeout",
- &pre_time_out))
- pre_time_out = DEFAULT_PRE_TIME_OUT;
-
- if (of_property_read_u32(np, "active-semi,total-timeout",
- &total_time_out))
- total_time_out = DEFAULT_TOTAL_TIME_OUT;
-
- switch (input_voltage_threshold) {
- case 8000:
- value |= APCH_CFG_OVPSET_8V;
- break;
- case 7500:
- value |= APCH_CFG_OVPSET_7V5;
- break;
- case 7000:
- value |= APCH_CFG_OVPSET_7V;
- break;
- case 6600:
- default:
- value |= APCH_CFG_OVPSET_6V6;
- break;
- }
-
- switch (pre_time_out) {
- case 60:
- value |= APCH_CFG_PRETIMO_60_MIN;
- break;
- case 80:
- value |= APCH_CFG_PRETIMO_80_MIN;
- break;
- case 0:
- value |= APCH_CFG_PRETIMO_DISABLED;
- break;
- case 40:
- default:
- value |= APCH_CFG_PRETIMO_40_MIN;
- break;
- }
-
- switch (total_time_out) {
- case 4:
- value |= APCH_CFG_TOTTIMO_4_HOUR;
- break;
- case 5:
- value |= APCH_CFG_TOTTIMO_5_HOUR;
- break;
- case 0:
- value |= APCH_CFG_TOTTIMO_DISABLED;
- break;
- case 3:
- default:
- value |= APCH_CFG_TOTTIMO_3_HOUR;
- break;
- }
-
- return regmap_write(regmap, ACT8945A_APCH_CFG, value);
-}
-
-static int act8945a_charger_probe(struct platform_device *pdev)
-{
- struct act8945a_charger *charger;
- struct power_supply *psy;
- struct power_supply_config psy_cfg = {};
- int ret;
-
- charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
- if (!charger)
- return -ENOMEM;
-
- charger->regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!charger->regmap) {
- dev_err(&pdev->dev, "Parent did not provide regmap\n");
- return -EINVAL;
- }
-
- ret = act8945a_charger_config(pdev->dev.parent, charger);
- if (ret)
- return ret;
-
- psy_cfg.of_node = pdev->dev.parent->of_node;
- psy_cfg.drv_data = charger;
-
- psy = devm_power_supply_register(&pdev->dev,
- &act8945a_charger_desc,
- &psy_cfg);
- if (IS_ERR(psy)) {
- dev_err(&pdev->dev, "failed to register power supply\n");
- return PTR_ERR(psy);
- }
-
- return 0;
-}
-
-static struct platform_driver act8945a_charger_driver = {
- .driver = {
- .name = "act8945a-charger",
- },
- .probe = act8945a_charger_probe,
-};
-module_platform_driver(act8945a_charger_driver);
-
-MODULE_DESCRIPTION("Active-semi ACT8945A ActivePath charger driver");
-MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c
index c70f1bffe038..09380857a1c5 100644
--- a/drivers/power/reset/keystone-reset.c
+++ b/drivers/power/reset/keystone-reset.c
@@ -139,7 +139,7 @@ static int rsctrl_probe(struct platform_device *pdev)
}
if (val >= WDT_MUX_NUMBER) {
- dev_err(dev, "ti,wdt-list property can contain"
+ dev_err(dev, "ti,wdt-list property can contain "
"only numbers < 4\n");
return -EINVAL;
}
diff --git a/drivers/power/reset/reboot-mode.c b/drivers/power/reset/reboot-mode.c
index 2dfbbce0f817..fb512183ace3 100644
--- a/drivers/power/reset/reboot-mode.c
+++ b/drivers/power/reset/reboot-mode.c
@@ -135,6 +135,65 @@ int reboot_mode_unregister(struct reboot_mode_driver *reboot)
}
EXPORT_SYMBOL_GPL(reboot_mode_unregister);
+static void devm_reboot_mode_release(struct device *dev, void *res)
+{
+ reboot_mode_unregister(*(struct reboot_mode_driver **)res);
+}
+
+/**
+ * devm_reboot_mode_register() - resource managed reboot_mode_register()
+ * @dev: device to associate this resource with
+ * @reboot: reboot mode driver
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int devm_reboot_mode_register(struct device *dev,
+ struct reboot_mode_driver *reboot)
+{
+ struct reboot_mode_driver **dr;
+ int rc;
+
+ dr = devres_alloc(devm_reboot_mode_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ rc = reboot_mode_register(reboot);
+ if (rc) {
+ devres_free(dr);
+ return rc;
+ }
+
+ *dr = reboot;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_reboot_mode_register);
+
+static int devm_reboot_mode_match(struct device *dev, void *res, void *data)
+{
+ struct reboot_mode_driver **p = res;
+
+ if (WARN_ON(!p || !*p))
+ return 0;
+
+ return *p == data;
+}
+
+/**
+ * devm_reboot_mode_unregister() - resource managed reboot_mode_unregister()
+ * @dev: device to associate this resource with
+ * @reboot: reboot mode driver
+ */
+void devm_reboot_mode_unregister(struct device *dev,
+ struct reboot_mode_driver *reboot)
+{
+ WARN_ON(devres_release(dev,
+ devm_reboot_mode_release,
+ devm_reboot_mode_match, reboot));
+}
+EXPORT_SYMBOL_GPL(devm_reboot_mode_unregister);
+
MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com");
MODULE_DESCRIPTION("System reboot mode core library");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/reboot-mode.h b/drivers/power/reset/reboot-mode.h
index 2491bb71f591..75f7fe5c881f 100644
--- a/drivers/power/reset/reboot-mode.h
+++ b/drivers/power/reset/reboot-mode.h
@@ -10,5 +10,9 @@ struct reboot_mode_driver {
int reboot_mode_register(struct reboot_mode_driver *reboot);
int reboot_mode_unregister(struct reboot_mode_driver *reboot);
+int devm_reboot_mode_register(struct device *dev,
+ struct reboot_mode_driver *reboot);
+void devm_reboot_mode_unregister(struct device *dev,
+ struct reboot_mode_driver *reboot);
#endif
diff --git a/drivers/power/reset/st-poweroff.c b/drivers/power/reset/st-poweroff.c
index a488877a3538..2046b31232f7 100644
--- a/drivers/power/reset/st-poweroff.c
+++ b/drivers/power/reset/st-poweroff.c
@@ -28,28 +28,6 @@ struct reset_syscfg {
unsigned int mask_rst_msk;
};
-/* STiH415 */
-#define STIH415_SYSCFG_11 0x2c
-#define STIH415_SYSCFG_15 0x3c
-
-static struct reset_syscfg stih415_reset = {
- .offset_rst = STIH415_SYSCFG_11,
- .mask_rst = BIT(0),
- .offset_rst_msk = STIH415_SYSCFG_15,
- .mask_rst_msk = BIT(0)
-};
-
-/* STiH416 */
-#define STIH416_SYSCFG_500 0x7d0
-#define STIH416_SYSCFG_504 0x7e0
-
-static struct reset_syscfg stih416_reset = {
- .offset_rst = STIH416_SYSCFG_500,
- .mask_rst = BIT(0),
- .offset_rst_msk = STIH416_SYSCFG_504,
- .mask_rst_msk = BIT(0)
-};
-
/* STiH407 */
#define STIH407_SYSCFG_4000 0x0
#define STIH407_SYSCFG_4008 0x20
@@ -61,16 +39,6 @@ static struct reset_syscfg stih407_reset = {
.mask_rst_msk = BIT(0)
};
-/* STiD127 */
-#define STID127_SYSCFG_700 0x0
-#define STID127_SYSCFG_773 0x124
-
-static struct reset_syscfg stid127_reset = {
- .offset_rst = STID127_SYSCFG_773,
- .mask_rst = BIT(0),
- .offset_rst_msk = STID127_SYSCFG_700,
- .mask_rst_msk = BIT(8)
-};
static struct reset_syscfg *st_restart_syscfg;
@@ -99,17 +67,8 @@ static struct notifier_block st_restart_nb = {
static const struct of_device_id st_reset_of_match[] = {
{
- .compatible = "st,stih415-restart",
- .data = (void *)&stih415_reset,
- }, {
- .compatible = "st,stih416-restart",
- .data = (void *)&stih416_reset,
- }, {
.compatible = "st,stih407-restart",
.data = (void *)&stih407_reset,
- }, {
- .compatible = "st,stid127-restart",
- .data = (void *)&stid127_reset,
},
{}
};
diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c
index 9e1cba5dd58e..1ecb51d67149 100644
--- a/drivers/power/reset/syscon-reboot-mode.c
+++ b/drivers/power/reset/syscon-reboot-mode.c
@@ -53,8 +53,6 @@ static int syscon_reboot_mode_probe(struct platform_device *pdev)
syscon_rbm->reboot.write = syscon_reboot_mode_write;
syscon_rbm->mask = 0xffffffff;
- dev_set_drvdata(&pdev->dev, syscon_rbm);
-
syscon_rbm->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
if (IS_ERR(syscon_rbm->map))
return PTR_ERR(syscon_rbm->map);
@@ -65,20 +63,13 @@ static int syscon_reboot_mode_probe(struct platform_device *pdev)
of_property_read_u32(pdev->dev.of_node, "mask", &syscon_rbm->mask);
- ret = reboot_mode_register(&syscon_rbm->reboot);
+ ret = devm_reboot_mode_register(&pdev->dev, &syscon_rbm->reboot);
if (ret)
dev_err(&pdev->dev, "can't register reboot mode\n");
return ret;
}
-static int syscon_reboot_mode_remove(struct platform_device *pdev)
-{
- struct syscon_reboot_mode *syscon_rbm = dev_get_drvdata(&pdev->dev);
-
- return reboot_mode_unregister(&syscon_rbm->reboot);
-}
-
static const struct of_device_id syscon_reboot_mode_of_match[] = {
{ .compatible = "syscon-reboot-mode" },
{}
@@ -86,7 +77,6 @@ static const struct of_device_id syscon_reboot_mode_of_match[] = {
static struct platform_driver syscon_reboot_mode_driver = {
.probe = syscon_reboot_mode_probe,
- .remove = syscon_reboot_mode_remove,
.driver = {
.name = "syscon-reboot-mode",
.of_match_table = syscon_reboot_mode_of_match,
diff --git a/drivers/power/reset/xgene-reboot.c b/drivers/power/reset/xgene-reboot.c
index f07e93c97ba3..73c3d93e5318 100644
--- a/drivers/power/reset/xgene-reboot.c
+++ b/drivers/power/reset/xgene-reboot.c
@@ -81,8 +81,10 @@ static int xgene_reboot_probe(struct platform_device *pdev)
ctx->restart_handler.notifier_call = xgene_restart_handler;
ctx->restart_handler.priority = 128;
err = register_restart_handler(&ctx->restart_handler);
- if (err)
+ if (err) {
+ iounmap(ctx->csr);
dev_err(dev, "cannot register restart handler (err=%d)\n", err);
+ }
return err;
}
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index a5b009673d0e..b0b1eb3a78c2 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -58,9 +58,12 @@ static int zx_reboot_probe(struct platform_device *pdev)
}
err = register_restart_handler(&zx_restart_nb);
- if (err)
+ if (err) {
+ iounmap(base);
+ iounmap(pcu_base);
dev_err(&pdev->dev, "Register restart handler failed(err=%d)\n",
err);
+ }
return err;
}
diff --git a/drivers/power/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c
index 63c57dc82ac1..63c57dc82ac1 100644
--- a/drivers/power/88pm860x_battery.c
+++ b/drivers/power/supply/88pm860x_battery.c
diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/supply/88pm860x_charger.c
index 2b82e44d9027..2b82e44d9027 100644
--- a/drivers/power/88pm860x_charger.c
+++ b/drivers/power/supply/88pm860x_charger.c
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
new file mode 100644
index 000000000000..76806a0be820
--- /dev/null
+++ b/drivers/power/supply/Kconfig
@@ -0,0 +1,514 @@
+menuconfig POWER_SUPPLY
+ bool "Power supply class support"
+ help
+ Say Y here to enable power supply class support. This allows
+ power supply (batteries, AC, USB) monitoring by userspace
+ via sysfs and uevent (if available) and/or APM kernel interface
+ (if selected below).
+
+if POWER_SUPPLY
+
+config POWER_SUPPLY_DEBUG
+ bool "Power supply debug"
+ help
+ Say Y here to enable debugging messages for power supply class
+ and drivers.
+
+config PDA_POWER
+ tristate "Generic PDA/phone power driver"
+ depends on !S390
+ help
+ Say Y here to enable generic power driver for PDAs and phones with
+ one or two external power supplies (AC/USB) connected to main and
+ backup batteries, and optional builtin charger.
+
+config APM_POWER
+ tristate "APM emulation for class batteries"
+ depends on APM_EMULATION
+ help
+ Say Y here to enable support APM status emulation using
+ battery class devices.
+
+config GENERIC_ADC_BATTERY
+ tristate "Generic battery support using IIO"
+ depends on IIO
+ help
+ Say Y here to enable support for the generic battery driver
+ which uses IIO framework to read adc.
+
+config MAX8925_POWER
+ tristate "MAX8925 battery charger support"
+ depends on MFD_MAX8925
+ help
+ Say Y here to enable support for the battery charger in the Maxim
+ MAX8925 PMIC.
+
+config WM831X_BACKUP
+ tristate "WM831X backup battery charger support"
+ depends on MFD_WM831X
+ help
+ Say Y here to enable support for the backup battery charger
+ in the Wolfson Microelectronics WM831x PMICs.
+
+config WM831X_POWER
+ tristate "WM831X PMU support"
+ depends on MFD_WM831X
+ help
+ Say Y here to enable support for the power management unit
+ provided by Wolfson Microelectronics WM831x PMICs.
+
+config WM8350_POWER
+ tristate "WM8350 PMU support"
+ depends on MFD_WM8350
+ help
+ Say Y here to enable support for the power management unit
+ provided by the Wolfson Microelectronics WM8350 PMIC.
+
+config TEST_POWER
+ tristate "Test power driver"
+ help
+ This driver is used for testing. It's safe to say M here.
+
+config BATTERY_88PM860X
+ tristate "Marvell 88PM860x battery driver"
+ depends on MFD_88PM860X
+ help
+ Say Y here to enable battery monitor for Marvell 88PM860x chip.
+
+config BATTERY_ACT8945A
+ tristate "Active-semi ACT8945A charger driver"
+ depends on MFD_ACT8945A || COMPILE_TEST
+ help
+ Say Y here to enable support for power supply provided by
+ Active-semi ActivePath ACT8945A charger.
+
+config BATTERY_DS2760
+ tristate "DS2760 battery driver (HP iPAQ & others)"
+ depends on W1 && W1_SLAVE_DS2760
+ help
+ Say Y here to enable support for batteries with ds2760 chip.
+
+config BATTERY_DS2780
+ tristate "DS2780 battery driver"
+ depends on HAS_IOMEM
+ select W1
+ select W1_SLAVE_DS2780
+ help
+ Say Y here to enable support for batteries with ds2780 chip.
+
+config BATTERY_DS2781
+ tristate "DS2781 battery driver"
+ depends on HAS_IOMEM
+ select W1
+ select W1_SLAVE_DS2781
+ help
+ If you enable this you will have the DS2781 battery driver support.
+
+ The battery monitor chip is used in many batteries/devices
+ as the one who is responsible for charging/discharging/monitoring
+ Li+ batteries.
+
+ If you are unsure, say N.
+
+config BATTERY_DS2782
+ tristate "DS2782/DS2786 standalone gas-gauge"
+ depends on I2C
+ help
+ Say Y here to enable support for the DS2782/DS2786 standalone battery
+ gas-gauge.
+
+config BATTERY_PMU
+ tristate "Apple PMU battery"
+ depends on PPC32 && ADB_PMU
+ help
+ Say Y here to expose battery information on Apple machines
+ through the generic battery class.
+
+config BATTERY_OLPC
+ tristate "One Laptop Per Child battery"
+ depends on X86_32 && OLPC
+ help
+ Say Y to enable support for the battery on the OLPC laptop.
+
+config BATTERY_TOSA
+ tristate "Sharp SL-6000 (tosa) battery"
+ depends on MACH_TOSA && MFD_TC6393XB && TOUCHSCREEN_WM97XX
+ help
+ Say Y to enable support for the battery on the Sharp Zaurus
+ SL-6000 (tosa) models.
+
+config BATTERY_COLLIE
+ tristate "Sharp SL-5500 (collie) battery"
+ depends on SA1100_COLLIE && MCP_UCB1200
+ help
+ Say Y to enable support for the battery on the Sharp Zaurus
+ SL-5500 (collie) models.
+
+config BATTERY_IPAQ_MICRO
+ tristate "iPAQ Atmel Micro ASIC battery driver"
+ depends on MFD_IPAQ_MICRO
+ help
+ Choose this option if you want to monitor battery status on
+ Compaq/HP iPAQ h3100 and h3600.
+
+config BATTERY_WM97XX
+ bool "WM97xx generic battery driver"
+ depends on TOUCHSCREEN_WM97XX=y
+ help
+ Say Y to enable support for battery measured by WM97xx aux port.
+
+config BATTERY_SBS
+ tristate "SBS Compliant gas gauge"
+ depends on I2C
+ help
+ Say Y to include support for SBS battery driver for SBS-compliant
+ gas gauges.
+
+config BATTERY_BQ27XXX
+ tristate "BQ27xxx battery driver"
+ help
+ Say Y here to enable support for batteries with BQ27xxx chips.
+
+config BATTERY_BQ27XXX_I2C
+ tristate "BQ27xxx I2C support"
+ depends on BATTERY_BQ27XXX
+ depends on I2C
+ default y
+ help
+ Say Y here to enable support for batteries with BQ27xxx chips
+ connected over an I2C bus.
+
+config BATTERY_DA9030
+ tristate "DA9030 battery driver"
+ depends on PMIC_DA903X
+ help
+ Say Y here to enable support for batteries charger integrated into
+ DA9030 PMIC.
+
+config BATTERY_DA9052
+ tristate "Dialog DA9052 Battery"
+ depends on PMIC_DA9052
+ help
+ Say Y here to enable support for batteries charger integrated into
+ DA9052 PMIC.
+
+config CHARGER_DA9150
+ tristate "Dialog Semiconductor DA9150 Charger support"
+ depends on MFD_DA9150
+ depends on DA9150_GPADC
+ depends on IIO
+ help
+ Say Y here to enable support for charger unit of the DA9150
+ Integrated Charger & Fuel-Gauge IC.
+
+ This driver can also be built as a module. If so, the module will be
+ called da9150-charger.
+
+config BATTERY_DA9150
+ tristate "Dialog Semiconductor DA9150 Fuel Gauge support"
+ depends on MFD_DA9150
+ help
+ Say Y here to enable support for the Fuel-Gauge unit of the DA9150
+ Integrated Charger & Fuel-Gauge IC
+
+ This driver can also be built as a module. If so, the module will be
+ called da9150-fg.
+
+config AXP288_CHARGER
+ tristate "X-Powers AXP288 Charger"
+ depends on MFD_AXP20X && EXTCON_AXP288
+ help
+ Say yes here to have support X-Power AXP288 power management IC (PMIC)
+ integrated charger.
+
+config AXP288_FUEL_GAUGE
+ tristate "X-Powers AXP288 Fuel Gauge"
+ depends on MFD_AXP20X && IIO
+ help
+ Say yes here to have support for X-Power power management IC (PMIC)
+ Fuel Gauge. The device provides battery statistics and status
+ monitoring as well as alerts for battery over/under voltage and
+ over/under temperature.
+
+config BATTERY_MAX17040
+ tristate "Maxim MAX17040 Fuel Gauge"
+ depends on I2C
+ help
+ MAX17040 is fuel-gauge systems for lithium-ion (Li+) batteries
+ in handheld and portable equipment. The MAX17040 is configured
+ to operate with a single lithium cell
+
+config BATTERY_MAX17042
+ tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
+ in handheld and portable equipment. The MAX17042 is configured
+ to operate with a single lithium cell. MAX8997 and MAX8966 are
+ multi-function devices that include fuel gauages that are compatible
+ with MAX17042. This driver also supports max17047/50 chips which are
+ improved version of max17042.
+
+config BATTERY_Z2
+ tristate "Z2 battery driver"
+ depends on I2C && MACH_ZIPIT2
+ help
+ Say Y to include support for the battery on the Zipit Z2.
+
+config BATTERY_S3C_ADC
+ tristate "Battery driver for Samsung ADC based monitoring"
+ depends on S3C_ADC
+ help
+ Say Y here to enable support for iPAQ h1930/h1940/rx1950 battery
+
+config BATTERY_TWL4030_MADC
+ tristate "TWL4030 MADC battery driver"
+ depends on TWL4030_MADC
+ help
+ Say Y here to enable this dumb driver for batteries managed
+ through the TWL4030 MADC.
+
+config CHARGER_88PM860X
+ tristate "Marvell 88PM860x Charger driver"
+ depends on MFD_88PM860X && BATTERY_88PM860X
+ help
+ Say Y here to enable charger for Marvell 88PM860x chip.
+
+config CHARGER_PCF50633
+ tristate "NXP PCF50633 MBC"
+ depends on MFD_PCF50633
+ help
+ Say Y to include support for NXP PCF50633 Main Battery Charger.
+
+config BATTERY_JZ4740
+ tristate "Ingenic JZ4740 battery"
+ depends on MACH_JZ4740
+ depends on MFD_JZ4740_ADC
+ help
+ Say Y to enable support for the battery on Ingenic JZ4740 based
+ boards.
+
+ This driver can be build as a module. If so, the module will be
+ called jz4740-battery.
+
+config BATTERY_INTEL_MID
+ tristate "Battery driver for Intel MID platforms"
+ depends on INTEL_SCU_IPC && SPI
+ help
+ Say Y here to enable the battery driver on Intel MID
+ platforms.
+
+config BATTERY_RX51
+ tristate "Nokia RX-51 (N900) battery driver"
+ depends on TWL4030_MADC
+ help
+ Say Y here to enable support for battery information on Nokia
+ RX-51, also known as N900 tablet.
+
+config CHARGER_ISP1704
+ tristate "ISP1704 USB Charger Detection"
+ depends on USB_PHY
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
+ help
+ Say Y to enable support for USB Charger Detection with
+ ISP1707/ISP1704 USB transceivers.
+
+config CHARGER_MAX8903
+ tristate "MAX8903 Battery DC-DC Charger for USB and Adapter Power"
+ help
+ Say Y to enable support for the MAX8903 DC-DC charger and sysfs.
+ The driver supports controlling charger-enable and current-limit
+ pins based on the status of charger connections with interrupt
+ handlers.
+
+config CHARGER_TWL4030
+ tristate "OMAP TWL4030 BCI charger driver"
+ depends on IIO && TWL4030_CORE
+ help
+ Say Y here to enable support for TWL4030 Battery Charge Interface.
+
+config CHARGER_LP8727
+ tristate "TI/National Semiconductor LP8727 charger driver"
+ depends on I2C
+ help
+ Say Y here to enable support for LP8727 Charger Driver.
+
+config CHARGER_LP8788
+ tristate "TI LP8788 charger driver"
+ depends on MFD_LP8788
+ depends on LP8788_ADC
+ depends on IIO
+ help
+ Say Y to enable support for the LP8788 linear charger.
+
+config CHARGER_GPIO
+ tristate "GPIO charger"
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Say Y to include support for chargers which report their online status
+ through a GPIO pin.
+
+ This driver can be build as a module. If so, the module will be
+ called gpio-charger.
+
+config CHARGER_MANAGER
+ bool "Battery charger manager for multiple chargers"
+ depends on REGULATOR
+ select EXTCON
+ help
+ Say Y to enable charger-manager support, which allows multiple
+ chargers attached to a battery and multiple batteries attached to a
+ system. The charger-manager also can monitor charging status in
+ runtime and in suspend-to-RAM by waking up the system periodically
+ with help of suspend_again support.
+
+config CHARGER_MAX14577
+ tristate "Maxim MAX14577/77836 battery charger driver"
+ depends on MFD_MAX14577
+ help
+ Say Y to enable support for the battery charger control sysfs and
+ platform data of MAX14577/77836 MUICs.
+
+config CHARGER_MAX77693
+ tristate "Maxim MAX77693 battery charger driver"
+ depends on MFD_MAX77693
+ help
+ Say Y to enable support for the Maxim MAX77693 battery charger.
+
+config CHARGER_MAX8997
+ tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
+ depends on MFD_MAX8997 && REGULATOR_MAX8997
+ help
+ Say Y to enable support for the battery charger control sysfs and
+ platform data of MAX8997/LP3974 PMICs.
+
+config CHARGER_MAX8998
+ tristate "Maxim MAX8998/LP3974 PMIC battery charger driver"
+ depends on MFD_MAX8998 && REGULATOR_MAX8998
+ help
+ Say Y to enable support for the battery charger control sysfs and
+ platform data of MAX8998/LP3974 PMICs.
+
+config CHARGER_QCOM_SMBB
+ tristate "Qualcomm Switch-Mode Battery Charger and Boost"
+ depends on MFD_SPMI_PMIC || COMPILE_TEST
+ depends on OF
+ depends on EXTCON
+ help
+ Say Y to include support for the Switch-Mode Battery Charger and
+ Boost (SMBB) hardware found in Qualcomm PM8941 PMICs. The charger
+ is an integrated, single-cell lithium-ion battery charger. DT
+ configuration is required for loading, see the devicetree
+ documentation for more detail. The base name for this driver is
+ 'pm8941_charger'.
+
+config CHARGER_BQ2415X
+ tristate "TI BQ2415x battery charger driver"
+ depends on I2C
+ help
+ Say Y to enable support for the TI BQ2415x battery charger
+ PMICs.
+
+ You'll need this driver to charge batteries on e.g. Nokia
+ RX-51/N900.
+
+config CHARGER_BQ24190
+ tristate "TI BQ24190 battery charger driver"
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Say Y to enable support for the TI BQ24190 battery charger.
+
+config CHARGER_BQ24257
+ tristate "TI BQ24250/24251/24257 battery charger driver"
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
+ depends on REGMAP_I2C
+ help
+ Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery
+ chargers.
+
+config CHARGER_BQ24735
+ tristate "TI BQ24735 battery charger support"
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Say Y to enable support for the TI BQ24735 battery charger.
+
+config CHARGER_BQ25890
+ tristate "TI BQ25890 battery charger driver"
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
+ select REGMAP_I2C
+ help
+ Say Y to enable support for the TI BQ25890 battery charger.
+
+config CHARGER_SMB347
+ tristate "Summit Microelectronics SMB347 Battery Charger"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y to include support for Summit Microelectronics SMB347
+ Battery Charger.
+
+config CHARGER_TPS65090
+ tristate "TPS65090 battery charger driver"
+ depends on MFD_TPS65090
+ help
+ Say Y here to enable support for battery charging with TPS65090
+ PMIC chips.
+
+config CHARGER_TPS65217
+ tristate "TPS65217 battery charger driver"
+ depends on MFD_TPS65217
+ help
+ Say Y here to enable support for battery charging with TPS65217
+ PMIC chips.
+
+config BATTERY_GAUGE_LTC2941
+ tristate "LTC2941/LTC2943 Battery Gauge Driver"
+ depends on I2C
+ help
+ Say Y here to include support for LTC2941 and LTC2943 Battery
+ Gauge IC. The driver reports the charge count continuously, and
+ measures the voltage and temperature every 10 seconds.
+
+config AB8500_BM
+ bool "AB8500 Battery Management Driver"
+ depends on AB8500_CORE && AB8500_GPADC
+ help
+ Say Y to include support for AB8500 battery management.
+
+config BATTERY_GOLDFISH
+ tristate "Goldfish battery driver"
+ depends on GOLDFISH || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say Y to enable support for the battery and AC power in the
+ Goldfish emulator.
+
+config BATTERY_RT5033
+ tristate "RT5033 fuel gauge support"
+ depends on MFD_RT5033
+ help
+ This adds support for battery fuel gauge in Richtek RT5033 PMIC.
+ The fuelgauge calculates and determines the battery state of charge
+ according to battery open circuit voltage.
+
+config CHARGER_RT9455
+ tristate "Richtek RT9455 battery charger driver"
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
+ select REGMAP_I2C
+ help
+ Say Y to enable support for Richtek RT9455 battery charger.
+
+config AXP20X_POWER
+ tristate "AXP20x power supply driver"
+ depends on MFD_AXP20X
+ help
+ This driver provides support for the power supply features of
+ AXP20x PMIC.
+
+endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
new file mode 100644
index 000000000000..36c599d9a495
--- /dev/null
+++ b/drivers/power/supply/Makefile
@@ -0,0 +1,74 @@
+subdir-ccflags-$(CONFIG_POWER_SUPPLY_DEBUG) := -DDEBUG
+
+power_supply-y := power_supply_core.o
+power_supply-$(CONFIG_SYSFS) += power_supply_sysfs.o
+power_supply-$(CONFIG_LEDS_TRIGGERS) += power_supply_leds.o
+
+obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
+obj-$(CONFIG_GENERIC_ADC_BATTERY) += generic-adc-battery.o
+
+obj-$(CONFIG_PDA_POWER) += pda_power.o
+obj-$(CONFIG_APM_POWER) += apm_power.o
+obj-$(CONFIG_AXP20X_POWER) += axp20x_usb_power.o
+obj-$(CONFIG_MAX8925_POWER) += max8925_power.o
+obj-$(CONFIG_WM831X_BACKUP) += wm831x_backup.o
+obj-$(CONFIG_WM831X_POWER) += wm831x_power.o
+obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
+obj-$(CONFIG_TEST_POWER) += test_power.o
+
+obj-$(CONFIG_BATTERY_88PM860X) += 88pm860x_battery.o
+obj-$(CONFIG_BATTERY_ACT8945A) += act8945a_charger.o
+obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
+obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o
+obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o
+obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
+obj-$(CONFIG_BATTERY_GAUGE_LTC2941) += ltc2941-battery-gauge.o
+obj-$(CONFIG_BATTERY_GOLDFISH) += goldfish_battery.o
+obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
+obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
+obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
+obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
+obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
+obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
+obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
+obj-$(CONFIG_BATTERY_BQ27XXX) += bq27xxx_battery.o
+obj-$(CONFIG_BATTERY_BQ27XXX_I2C) += bq27xxx_battery_i2c.o
+obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
+obj-$(CONFIG_BATTERY_DA9052) += da9052-battery.o
+obj-$(CONFIG_CHARGER_DA9150) += da9150-charger.o
+obj-$(CONFIG_BATTERY_DA9150) += da9150-fg.o
+obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
+obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
+obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
+obj-$(CONFIG_BATTERY_RT5033) += rt5033_battery.o
+obj-$(CONFIG_CHARGER_RT9455) += rt9455_charger.o
+obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
+obj-$(CONFIG_BATTERY_TWL4030_MADC) += twl4030_madc_battery.o
+obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
+obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
+obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
+obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
+obj-$(CONFIG_BATTERY_RX51) += rx51_battery.o
+obj-$(CONFIG_AB8500_BM) += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o pm2301_charger.o
+obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
+obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
+obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
+obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
+obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o
+obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
+obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
+obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
+obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
+obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
+obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
+obj-$(CONFIG_CHARGER_QCOM_SMBB) += qcom_smbb.o
+obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
+obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
+obj-$(CONFIG_CHARGER_BQ24257) += bq24257_charger.o
+obj-$(CONFIG_CHARGER_BQ24735) += bq24735-charger.o
+obj-$(CONFIG_CHARGER_BQ25890) += bq25890_charger.o
+obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
+obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
+obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
+obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
+obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
diff --git a/drivers/power/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c
index d29864533093..d29864533093 100644
--- a/drivers/power/ab8500_bmdata.c
+++ b/drivers/power/supply/ab8500_bmdata.c
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index bf2e5dd301e7..6ffdc18f2599 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -1095,7 +1095,7 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
/* Create a work queue for the btemp */
di->btemp_wq =
- create_singlethread_workqueue("ab8500_btemp_wq");
+ alloc_workqueue("ab8500_btemp_wq", WQ_MEM_RECLAIM, 0);
if (di->btemp_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
return -ENOMEM;
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 30de5d42b26a..5cee9aa87aa3 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -3540,8 +3540,8 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_state.usb_current = -1;
/* Create a work queue for the charger */
- di->charger_wq =
- create_singlethread_workqueue("ab8500_charger_wq");
+ di->charger_wq = alloc_ordered_workqueue("ab8500_charger_wq",
+ WQ_MEM_RECLAIM);
if (di->charger_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
return -ENOMEM;
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 5a36cf88578a..2199f673118c 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -245,13 +245,8 @@ static LIST_HEAD(ab8500_fg_list);
*/
struct ab8500_fg *ab8500_fg_get(void)
{
- struct ab8500_fg *fg;
-
- if (list_empty(&ab8500_fg_list))
- return NULL;
-
- fg = list_first_entry(&ab8500_fg_list, struct ab8500_fg, node);
- return fg;
+ return list_first_entry_or_null(&ab8500_fg_list, struct ab8500_fg,
+ node);
}
/* Main battery properties */
@@ -3096,7 +3091,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
/* Create a work queue for running the FG algorithm */
- di->fg_wq = create_singlethread_workqueue("ab8500_fg_wq");
+ di->fg_wq = alloc_ordered_workqueue("ab8500_fg_wq", WQ_MEM_RECLAIM);
if (di->fg_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
return -ENOMEM;
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index d9104b1ab7cf..a4411d6bbc96 100644
--- a/drivers/power/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -2091,8 +2091,8 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
abx500_chargalg_maintenance_timer_expired;
/* Create a work queue for the chargalg */
- di->chargalg_wq =
- create_singlethread_workqueue("abx500_chargalg_wq");
+ di->chargalg_wq = alloc_ordered_workqueue("abx500_chargalg_wq",
+ WQ_MEM_RECLAIM);
if (di->chargalg_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
return -ENOMEM;
diff --git a/drivers/power/supply/act8945a_charger.c b/drivers/power/supply/act8945a_charger.c
new file mode 100644
index 000000000000..d1eb2e359532
--- /dev/null
+++ b/drivers/power/supply/act8945a_charger.c
@@ -0,0 +1,666 @@
+/*
+ * Power supply driver for the Active-semi ACT8945A PMIC
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ *
+ * Author: Wenyou Yang <wenyou.yang@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
+
+static const char *act8945a_charger_model = "ACT8945A";
+static const char *act8945a_charger_manufacturer = "Active-semi";
+
+/**
+ * ACT8945A Charger Register Map
+ */
+
+/* 0x70: Reserved */
+#define ACT8945A_APCH_CFG 0x71
+#define ACT8945A_APCH_STATUS 0x78
+#define ACT8945A_APCH_CTRL 0x79
+#define ACT8945A_APCH_STATE 0x7A
+
+/* ACT8945A_APCH_CFG */
+#define APCH_CFG_OVPSET (0x3 << 0)
+#define APCH_CFG_OVPSET_6V6 (0x0 << 0)
+#define APCH_CFG_OVPSET_7V (0x1 << 0)
+#define APCH_CFG_OVPSET_7V5 (0x2 << 0)
+#define APCH_CFG_OVPSET_8V (0x3 << 0)
+#define APCH_CFG_PRETIMO (0x3 << 2)
+#define APCH_CFG_PRETIMO_40_MIN (0x0 << 2)
+#define APCH_CFG_PRETIMO_60_MIN (0x1 << 2)
+#define APCH_CFG_PRETIMO_80_MIN (0x2 << 2)
+#define APCH_CFG_PRETIMO_DISABLED (0x3 << 2)
+#define APCH_CFG_TOTTIMO (0x3 << 4)
+#define APCH_CFG_TOTTIMO_3_HOUR (0x0 << 4)
+#define APCH_CFG_TOTTIMO_4_HOUR (0x1 << 4)
+#define APCH_CFG_TOTTIMO_5_HOUR (0x2 << 4)
+#define APCH_CFG_TOTTIMO_DISABLED (0x3 << 4)
+#define APCH_CFG_SUSCHG (0x1 << 7)
+
+#define APCH_STATUS_CHGDAT BIT(0)
+#define APCH_STATUS_INDAT BIT(1)
+#define APCH_STATUS_TEMPDAT BIT(2)
+#define APCH_STATUS_TIMRDAT BIT(3)
+#define APCH_STATUS_CHGSTAT BIT(4)
+#define APCH_STATUS_INSTAT BIT(5)
+#define APCH_STATUS_TEMPSTAT BIT(6)
+#define APCH_STATUS_TIMRSTAT BIT(7)
+
+#define APCH_CTRL_CHGEOCOUT BIT(0)
+#define APCH_CTRL_INDIS BIT(1)
+#define APCH_CTRL_TEMPOUT BIT(2)
+#define APCH_CTRL_TIMRPRE BIT(3)
+#define APCH_CTRL_CHGEOCIN BIT(4)
+#define APCH_CTRL_INCON BIT(5)
+#define APCH_CTRL_TEMPIN BIT(6)
+#define APCH_CTRL_TIMRTOT BIT(7)
+
+#define APCH_STATE_ACINSTAT (0x1 << 1)
+#define APCH_STATE_CSTATE (0x3 << 4)
+#define APCH_STATE_CSTATE_SHIFT 4
+#define APCH_STATE_CSTATE_DISABLED 0x00
+#define APCH_STATE_CSTATE_EOC 0x01
+#define APCH_STATE_CSTATE_FAST 0x02
+#define APCH_STATE_CSTATE_PRE 0x03
+
+struct act8945a_charger {
+ struct power_supply *psy;
+ struct power_supply_desc desc;
+ struct regmap *regmap;
+ struct work_struct work;
+
+ bool init_done;
+ struct gpio_desc *lbo_gpio;
+ struct gpio_desc *chglev_gpio;
+};
+
+static int act8945a_get_charger_state(struct regmap *regmap, int *val)
+{
+ int ret;
+ unsigned int status, state;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATE, &state);
+ if (ret < 0)
+ return ret;
+
+ state &= APCH_STATE_CSTATE;
+ state >>= APCH_STATE_CSTATE_SHIFT;
+
+ switch (state) {
+ case APCH_STATE_CSTATE_PRE:
+ case APCH_STATE_CSTATE_FAST:
+ *val = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case APCH_STATE_CSTATE_EOC:
+ if (status & APCH_STATUS_CHGDAT)
+ *val = POWER_SUPPLY_STATUS_FULL;
+ else
+ *val = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case APCH_STATE_CSTATE_DISABLED:
+ default:
+ if (!(status & APCH_STATUS_INDAT))
+ *val = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ }
+
+ return 0;
+}
+
+static int act8945a_get_charge_type(struct regmap *regmap, int *val)
+{
+ int ret;
+ unsigned int status, state;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATE, &state);
+ if (ret < 0)
+ return ret;
+
+ state &= APCH_STATE_CSTATE;
+ state >>= APCH_STATE_CSTATE_SHIFT;
+
+ switch (state) {
+ case APCH_STATE_CSTATE_PRE:
+ *val = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case APCH_STATE_CSTATE_FAST:
+ *val = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ case APCH_STATE_CSTATE_EOC:
+ *val = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ case APCH_STATE_CSTATE_DISABLED:
+ default:
+ if (!(status & APCH_STATUS_INDAT))
+ *val = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ else
+ *val = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int act8945a_get_battery_health(struct regmap *regmap, int *val)
+{
+ int ret;
+ unsigned int status, state, config;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_CFG, &config);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATE, &state);
+ if (ret < 0)
+ return ret;
+
+ state &= APCH_STATE_CSTATE;
+ state >>= APCH_STATE_CSTATE_SHIFT;
+
+ switch (state) {
+ case APCH_STATE_CSTATE_DISABLED:
+ if (config & APCH_CFG_SUSCHG) {
+ *val = POWER_SUPPLY_HEALTH_UNKNOWN;
+ } else if (status & APCH_STATUS_INDAT) {
+ if (!(status & APCH_STATUS_TEMPDAT))
+ *val = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (status & APCH_STATUS_TIMRDAT)
+ *val = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
+ else
+ *val = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else {
+ *val = POWER_SUPPLY_HEALTH_GOOD;
+ }
+ break;
+ case APCH_STATE_CSTATE_PRE:
+ case APCH_STATE_CSTATE_FAST:
+ case APCH_STATE_CSTATE_EOC:
+ default:
+ *val = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ }
+
+ return 0;
+}
+
+static int act8945a_get_capacity_level(struct act8945a_charger *charger,
+ struct regmap *regmap, int *val)
+{
+ int ret;
+ unsigned int status, state, config;
+ int lbo_level = gpiod_get_value(charger->lbo_gpio);
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_CFG, &config);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATE, &state);
+ if (ret < 0)
+ return ret;
+
+ state &= APCH_STATE_CSTATE;
+ state >>= APCH_STATE_CSTATE_SHIFT;
+
+ switch (state) {
+ case APCH_STATE_CSTATE_PRE:
+ *val = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ break;
+ case APCH_STATE_CSTATE_FAST:
+ if (lbo_level)
+ *val = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
+ else
+ *val = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ break;
+ case APCH_STATE_CSTATE_EOC:
+ if (status & APCH_STATUS_CHGDAT)
+ *val = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else
+ *val = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ break;
+ case APCH_STATE_CSTATE_DISABLED:
+ default:
+ if (config & APCH_CFG_SUSCHG) {
+ *val = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ } else {
+ *val = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ if (!(status & APCH_STATUS_INDAT)) {
+ if (!lbo_level)
+ *val = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+#define MAX_CURRENT_USB_HIGH 450000
+#define MAX_CURRENT_USB_LOW 90000
+#define MAX_CURRENT_USB_PRE 45000
+/*
+ * Riset(K) = 2336 * (1V/Ichg(mA)) - 0.205
+ * Riset = 2.43K
+ */
+#define MAX_CURRENT_AC_HIGH 886527
+#define MAX_CURRENT_AC_LOW 117305
+#define MAX_CURRENT_AC_HIGH_PRE 88653
+#define MAX_CURRENT_AC_LOW_PRE 11731
+
+static int act8945a_get_current_max(struct act8945a_charger *charger,
+ struct regmap *regmap, int *val)
+{
+ int ret;
+ unsigned int status, state;
+ unsigned int acin_state;
+ int chgin_level = gpiod_get_value(charger->chglev_gpio);
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATE, &state);
+ if (ret < 0)
+ return ret;
+
+ acin_state = (state & APCH_STATE_ACINSTAT) >> 1;
+
+ state &= APCH_STATE_CSTATE;
+ state >>= APCH_STATE_CSTATE_SHIFT;
+
+ switch (state) {
+ case APCH_STATE_CSTATE_PRE:
+ if (acin_state) {
+ if (chgin_level)
+ *val = MAX_CURRENT_AC_HIGH_PRE;
+ else
+ *val = MAX_CURRENT_AC_LOW_PRE;
+ } else {
+ *val = MAX_CURRENT_USB_PRE;
+ }
+ break;
+ case APCH_STATE_CSTATE_FAST:
+ if (acin_state) {
+ if (chgin_level)
+ *val = MAX_CURRENT_AC_HIGH;
+ else
+ *val = MAX_CURRENT_AC_LOW;
+ } else {
+ if (chgin_level)
+ *val = MAX_CURRENT_USB_HIGH;
+ else
+ *val = MAX_CURRENT_USB_LOW;
+ }
+ break;
+ case APCH_STATE_CSTATE_EOC:
+ case APCH_STATE_CSTATE_DISABLED:
+ default:
+ *val = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property act8945a_charger_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER
+};
+
+static int act8945a_charger_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct act8945a_charger *charger = power_supply_get_drvdata(psy);
+ struct regmap *regmap = charger->regmap;
+ int ret = 0;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = act8945a_get_charger_state(regmap, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = act8945a_get_charge_type(regmap, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = act8945a_get_battery_health(regmap, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ ret = act8945a_get_capacity_level(charger,
+ regmap, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ ret = act8945a_get_current_max(charger,
+ regmap, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = act8945a_charger_model;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = act8945a_charger_manufacturer;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int act8945a_enable_interrupt(struct act8945a_charger *charger)
+{
+ struct regmap *regmap = charger->regmap;
+ unsigned char ctrl;
+ int ret;
+
+ ctrl = APCH_CTRL_CHGEOCOUT | APCH_CTRL_CHGEOCIN |
+ APCH_CTRL_INDIS | APCH_CTRL_INCON |
+ APCH_CTRL_TEMPOUT | APCH_CTRL_TEMPIN |
+ APCH_CTRL_TIMRPRE | APCH_CTRL_TIMRTOT;
+ ret = regmap_write(regmap, ACT8945A_APCH_CTRL, ctrl);
+ if (ret)
+ return ret;
+
+ ctrl = APCH_STATUS_CHGSTAT | APCH_STATUS_INSTAT |
+ APCH_STATUS_TEMPSTAT | APCH_STATUS_TIMRSTAT;
+ ret = regmap_write(regmap, ACT8945A_APCH_STATUS, ctrl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int act8945a_set_supply_type(struct act8945a_charger *charger,
+ unsigned int *type)
+{
+ unsigned int status, state;
+ int ret;
+
+ ret = regmap_read(charger->regmap, ACT8945A_APCH_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(charger->regmap, ACT8945A_APCH_STATE, &state);
+ if (ret < 0)
+ return ret;
+
+ if (status & APCH_STATUS_INDAT) {
+ if (state & APCH_STATE_ACINSTAT)
+ *type = POWER_SUPPLY_TYPE_MAINS;
+ else
+ *type = POWER_SUPPLY_TYPE_USB;
+ } else {
+ *type = POWER_SUPPLY_TYPE_BATTERY;
+ }
+
+ return 0;
+}
+
+static void act8945a_work(struct work_struct *work)
+{
+ struct act8945a_charger *charger =
+ container_of(work, struct act8945a_charger, work);
+
+ act8945a_set_supply_type(charger, &charger->desc.type);
+
+ power_supply_changed(charger->psy);
+}
+
+static irqreturn_t act8945a_status_changed(int irq, void *dev_id)
+{
+ struct act8945a_charger *charger = dev_id;
+
+ if (charger->init_done)
+ schedule_work(&charger->work);
+
+ return IRQ_HANDLED;
+}
+
+#define DEFAULT_TOTAL_TIME_OUT 3
+#define DEFAULT_PRE_TIME_OUT 40
+#define DEFAULT_INPUT_OVP_THRESHOLD 6600
+
+static int act8945a_charger_config(struct device *dev,
+ struct act8945a_charger *charger)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap = charger->regmap;
+
+ u32 total_time_out;
+ u32 pre_time_out;
+ u32 input_voltage_threshold;
+ int err, ret;
+
+ unsigned int tmp;
+ unsigned int value = 0;
+
+ if (!np) {
+ dev_err(dev, "no charger of node\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_read(regmap, ACT8945A_APCH_CFG, &tmp);
+ if (ret)
+ return ret;
+
+ if (tmp & APCH_CFG_SUSCHG) {
+ value |= APCH_CFG_SUSCHG;
+ dev_info(dev, "have been suspended\n");
+ }
+
+ charger->lbo_gpio = devm_gpiod_get_optional(dev, "active-semi,lbo",
+ GPIOD_IN);
+ if (IS_ERR(charger->lbo_gpio)) {
+ err = PTR_ERR(charger->lbo_gpio);
+ dev_err(dev, "unable to claim gpio \"lbo\": %d\n", err);
+ return err;
+ }
+
+ ret = devm_request_irq(dev, gpiod_to_irq(charger->lbo_gpio),
+ act8945a_status_changed,
+ (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING),
+ "act8945a_lbo_detect", charger);
+ if (ret)
+ dev_info(dev, "failed to request gpio \"lbo\" IRQ\n");
+
+ charger->chglev_gpio = devm_gpiod_get_optional(dev,
+ "active-semi,chglev",
+ GPIOD_IN);
+ if (IS_ERR(charger->chglev_gpio)) {
+ err = PTR_ERR(charger->chglev_gpio);
+ dev_err(dev, "unable to claim gpio \"chglev\": %d\n", err);
+ return err;
+ }
+
+ if (of_property_read_u32(np,
+ "active-semi,input-voltage-threshold-microvolt",
+ &input_voltage_threshold))
+ input_voltage_threshold = DEFAULT_INPUT_OVP_THRESHOLD;
+
+ if (of_property_read_u32(np,
+ "active-semi,precondition-timeout",
+ &pre_time_out))
+ pre_time_out = DEFAULT_PRE_TIME_OUT;
+
+ if (of_property_read_u32(np, "active-semi,total-timeout",
+ &total_time_out))
+ total_time_out = DEFAULT_TOTAL_TIME_OUT;
+
+ switch (input_voltage_threshold) {
+ case 8000:
+ value |= APCH_CFG_OVPSET_8V;
+ break;
+ case 7500:
+ value |= APCH_CFG_OVPSET_7V5;
+ break;
+ case 7000:
+ value |= APCH_CFG_OVPSET_7V;
+ break;
+ case 6600:
+ default:
+ value |= APCH_CFG_OVPSET_6V6;
+ break;
+ }
+
+ switch (pre_time_out) {
+ case 60:
+ value |= APCH_CFG_PRETIMO_60_MIN;
+ break;
+ case 80:
+ value |= APCH_CFG_PRETIMO_80_MIN;
+ break;
+ case 0:
+ value |= APCH_CFG_PRETIMO_DISABLED;
+ break;
+ case 40:
+ default:
+ value |= APCH_CFG_PRETIMO_40_MIN;
+ break;
+ }
+
+ switch (total_time_out) {
+ case 4:
+ value |= APCH_CFG_TOTTIMO_4_HOUR;
+ break;
+ case 5:
+ value |= APCH_CFG_TOTTIMO_5_HOUR;
+ break;
+ case 0:
+ value |= APCH_CFG_TOTTIMO_DISABLED;
+ break;
+ case 3:
+ default:
+ value |= APCH_CFG_TOTTIMO_3_HOUR;
+ break;
+ }
+
+ return regmap_write(regmap, ACT8945A_APCH_CFG, value);
+}
+
+static int act8945a_charger_probe(struct platform_device *pdev)
+{
+ struct act8945a_charger *charger;
+ struct power_supply_config psy_cfg = {};
+ int irq, ret;
+
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!charger->regmap) {
+ dev_err(&pdev->dev, "Parent did not provide regmap\n");
+ return -EINVAL;
+ }
+
+ ret = act8945a_charger_config(&pdev->dev, charger);
+ if (ret)
+ return ret;
+
+ irq = of_irq_get(pdev->dev.of_node, 0);
+ if (irq == -EPROBE_DEFER) {
+ dev_err(&pdev->dev, "failed to find IRQ number\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, act8945a_status_changed,
+ IRQF_TRIGGER_FALLING, "act8945a_interrupt",
+ charger);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request nIRQ pin IRQ\n");
+ return ret;
+ }
+
+ charger->desc.name = "act8945a-charger";
+ charger->desc.get_property = act8945a_charger_get_property;
+ charger->desc.properties = act8945a_charger_props;
+ charger->desc.num_properties = ARRAY_SIZE(act8945a_charger_props);
+
+ ret = act8945a_set_supply_type(charger, &charger->desc.type);
+ if (ret)
+ return -EINVAL;
+
+ psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.drv_data = charger;
+
+ charger->psy = devm_power_supply_register(&pdev->dev,
+ &charger->desc,
+ &psy_cfg);
+ if (IS_ERR(charger->psy)) {
+ dev_err(&pdev->dev, "failed to register power supply\n");
+ return PTR_ERR(charger->psy);
+ }
+
+ platform_set_drvdata(pdev, charger);
+
+ INIT_WORK(&charger->work, act8945a_work);
+
+ ret = act8945a_enable_interrupt(charger);
+ if (ret)
+ return -EIO;
+
+ charger->init_done = true;
+
+ return 0;
+}
+
+static int act8945a_charger_remove(struct platform_device *pdev)
+{
+ struct act8945a_charger *charger = platform_get_drvdata(pdev);
+
+ charger->init_done = false;
+ cancel_work_sync(&charger->work);
+
+ return 0;
+}
+
+static struct platform_driver act8945a_charger_driver = {
+ .driver = {
+ .name = "act8945a-charger",
+ },
+ .probe = act8945a_charger_probe,
+ .remove = act8945a_charger_remove,
+};
+module_platform_driver(act8945a_charger_driver);
+
+MODULE_DESCRIPTION("Active-semi ACT8945A ActivePath charger driver");
+MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/apm_power.c b/drivers/power/supply/apm_power.c
index 9d1a7fbcaed4..9d1a7fbcaed4 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/supply/apm_power.c
diff --git a/drivers/power/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 6af6feb7058d..6af6feb7058d 100644
--- a/drivers/power/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
diff --git a/drivers/power/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 4030eeb7cf65..75b8e0c7402b 100644
--- a/drivers/power/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -23,7 +23,6 @@
#include <linux/usb/otg.h>
#include <linux/notifier.h>
#include <linux/power_supply.h>
-#include <linux/notifier.h>
#include <linux/property.h>
#include <linux/mfd/axp20x.h>
#include <linux/extcon.h>
diff --git a/drivers/power/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 50c0110d6b58..5bdde692f724 100644
--- a/drivers/power/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -22,7 +22,6 @@
#include <linux/regmap.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
-#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/mfd/axp20x.h>
#include <linux/platform_device.h>
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 73e2f0b79dd4..73e2f0b79dd4 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index f5746b9f4e83..f5746b9f4e83 100644
--- a/drivers/power/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
diff --git a/drivers/power/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index 1fea2c7ef97f..6fc31bdc639b 100644
--- a/drivers/power/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -1068,6 +1068,12 @@ static int bq24257_probe(struct i2c_client *client,
return ret;
}
+ ret = bq24257_power_supply_init(bq);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register power supply\n");
+ return ret;
+ }
+
ret = devm_request_threaded_irq(dev, client->irq, NULL,
bq24257_irq_handler_thread,
IRQF_TRIGGER_FALLING |
@@ -1078,12 +1084,6 @@ static int bq24257_probe(struct i2c_client *client,
return ret;
}
- ret = bq24257_power_supply_init(bq);
- if (ret < 0) {
- dev_err(dev, "Failed to register power supply\n");
- return ret;
- }
-
ret = sysfs_create_group(&bq->charger->dev.kobj, &bq24257_attr_group);
if (ret < 0) {
dev_err(dev, "Can't create sysfs entries\n");
diff --git a/drivers/power/bq24735-charger.c b/drivers/power/supply/bq24735-charger.c
index fa454c19ce17..eb7783b42e0a 100644
--- a/drivers/power/bq24735-charger.c
+++ b/drivers/power/supply/bq24735-charger.c
@@ -25,7 +25,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
@@ -49,6 +49,7 @@ struct bq24735 {
struct i2c_client *client;
struct bq24735_platform *pdata;
struct mutex lock;
+ struct gpio_desc *status_gpio;
bool charging;
};
@@ -177,12 +178,8 @@ static int bq24735_config_charger(struct bq24735 *charger)
static bool bq24735_charger_is_present(struct bq24735 *charger)
{
- struct bq24735_platform *pdata = charger->pdata;
- int ret;
-
- if (pdata->status_gpio_valid) {
- ret = gpio_get_value_cansleep(pdata->status_gpio);
- return ret ^= pdata->status_gpio_active_low == 0;
+ if (charger->status_gpio) {
+ return !gpiod_get_value_cansleep(charger->status_gpio);
} else {
int ac = 0;
@@ -201,8 +198,12 @@ static bool bq24735_charger_is_present(struct bq24735 *charger)
static int bq24735_charger_is_charging(struct bq24735 *charger)
{
- int ret = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
+ int ret;
+ if (!bq24735_charger_is_present(charger))
+ return 0;
+
+ ret = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
if (ret < 0)
return ret;
@@ -304,7 +305,6 @@ static struct bq24735_platform *bq24735_parse_dt_data(struct i2c_client *client)
struct device_node *np = client->dev.of_node;
u32 val;
int ret;
- enum of_gpio_flags flags;
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
@@ -313,12 +313,6 @@ static struct bq24735_platform *bq24735_parse_dt_data(struct i2c_client *client)
return NULL;
}
- pdata->status_gpio = of_get_named_gpio_flags(np, "ti,ac-detect-gpios",
- 0, &flags);
-
- if (flags & OF_GPIO_ACTIVE_LOW)
- pdata->status_gpio_active_low = 1;
-
ret = of_property_read_u32(np, "ti,charge-current", &val);
if (!ret)
pdata->charge_current = val;
@@ -392,21 +386,16 @@ static int bq24735_charger_probe(struct i2c_client *client,
i2c_set_clientdata(client, charger);
- if (gpio_is_valid(charger->pdata->status_gpio)) {
- ret = devm_gpio_request(&client->dev,
- charger->pdata->status_gpio,
- name);
- if (ret) {
- dev_err(&client->dev,
- "Failed GPIO request for GPIO %d: %d\n",
- charger->pdata->status_gpio, ret);
- }
-
- charger->pdata->status_gpio_valid = !ret;
+ charger->status_gpio = devm_gpiod_get_optional(&client->dev,
+ "ti,ac-detect",
+ GPIOD_IN);
+ if (IS_ERR(charger->status_gpio)) {
+ ret = PTR_ERR(charger->status_gpio);
+ dev_err(&client->dev, "Getting gpio failed: %d\n", ret);
+ return ret;
}
- if (!charger->pdata->status_gpio_valid
- || bq24735_charger_is_present(charger)) {
+ if (!charger->status_gpio || bq24735_charger_is_present(charger)) {
ret = bq24735_read_word(client, BQ24735_MANUFACTURER_ID);
if (ret < 0) {
dev_err(&client->dev, "Failed to read manufacturer id : %d\n",
diff --git a/drivers/power/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index f993a55cde20..f993a55cde20 100644
--- a/drivers/power/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 323d05a12f9b..3b0dbc689d72 100644
--- a/drivers/power/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -39,6 +39,7 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/param.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
@@ -390,8 +391,35 @@ static struct {
BQ27XXX_PROP(BQ27421, bq27421_battery_props),
};
+static DEFINE_MUTEX(bq27xxx_list_lock);
+static LIST_HEAD(bq27xxx_battery_devices);
+
+static int poll_interval_param_set(const char *val, const struct kernel_param *kp)
+{
+ struct bq27xxx_device_info *di;
+ int ret;
+
+ ret = param_set_uint(val, kp);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&bq27xxx_list_lock);
+ list_for_each_entry(di, &bq27xxx_battery_devices, list) {
+ cancel_delayed_work_sync(&di->work);
+ schedule_delayed_work(&di->work, 0);
+ }
+ mutex_unlock(&bq27xxx_list_lock);
+
+ return ret;
+}
+
+static const struct kernel_param_ops param_ops_poll_interval = {
+ .get = param_get_uint,
+ .set = poll_interval_param_set,
+};
+
static unsigned int poll_interval = 360;
-module_param(poll_interval, uint, 0644);
+module_param_cb(poll_interval, &param_ops_poll_interval, &poll_interval, 0644);
MODULE_PARM_DESC(poll_interval,
"battery poll interval in seconds - 0 disables polling");
@@ -644,8 +672,9 @@ static bool bq27xxx_battery_dead(struct bq27xxx_device_info *di, u16 flags)
static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
{
int flags;
+ bool has_singe_flag = di->chip == BQ27000 || di->chip == BQ27010;
- flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, false);
+ flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
if (flags < 0) {
dev_err(di->dev, "error reading flag register:%d\n", flags);
return flags;
@@ -745,7 +774,7 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
}
if (di->chip == BQ27000 || di->chip == BQ27010) {
- flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, false);
+ flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, true);
if (flags & BQ27000_FLAG_CHGS) {
dev_dbg(di->dev, "negative current!\n");
curr = -curr;
@@ -971,6 +1000,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
bq27xxx_battery_update(di);
+ mutex_lock(&bq27xxx_list_lock);
+ list_add(&di->list, &bq27xxx_battery_devices);
+ mutex_unlock(&bq27xxx_list_lock);
+
return 0;
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_setup);
@@ -989,6 +1022,10 @@ void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
power_supply_unregister(di->bat);
+ mutex_lock(&bq27xxx_list_lock);
+ list_del(&di->list);
+ mutex_unlock(&bq27xxx_list_lock);
+
mutex_destroy(&di->lock);
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
diff --git a/drivers/power/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 85d4ea2a9c20..85d4ea2a9c20 100644
--- a/drivers/power/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
diff --git a/drivers/power/charger-manager.c b/drivers/power/supply/charger-manager.c
index e664ca7c0afd..e664ca7c0afd 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
diff --git a/drivers/power/collie_battery.c b/drivers/power/supply/collie_battery.c
index 3a0bc608d4b5..3a0bc608d4b5 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/supply/collie_battery.c
diff --git a/drivers/power/da9030_battery.c b/drivers/power/supply/da9030_battery.c
index 5ca0f4d90792..5ca0f4d90792 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/supply/da9030_battery.c
diff --git a/drivers/power/da9052-battery.c b/drivers/power/supply/da9052-battery.c
index 830ec46fe7d0..830ec46fe7d0 100644
--- a/drivers/power/da9052-battery.c
+++ b/drivers/power/supply/da9052-battery.c
diff --git a/drivers/power/da9150-charger.c b/drivers/power/supply/da9150-charger.c
index 60099815296e..60099815296e 100644
--- a/drivers/power/da9150-charger.c
+++ b/drivers/power/supply/da9150-charger.c
diff --git a/drivers/power/da9150-fg.c b/drivers/power/supply/da9150-fg.c
index 8b8ce978656a..8b8ce978656a 100644
--- a/drivers/power/da9150-fg.c
+++ b/drivers/power/supply/da9150-fg.c
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index 80f73ccb77ab..17225689e3f6 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -28,8 +28,8 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
-#include "../w1/w1.h"
-#include "../w1/slaves/w1_ds2760.h"
+#include "../../w1/w1.h"
+#include "../../w1/slaves/w1_ds2760.h"
struct ds2760_device_info {
struct device *dev;
@@ -566,7 +566,8 @@ static int ds2760_battery_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work);
INIT_DELAYED_WORK(&di->set_charged_work,
ds2760_battery_set_charged_work);
- di->monitor_wqueue = create_singlethread_workqueue(dev_name(&pdev->dev));
+ di->monitor_wqueue = alloc_ordered_workqueue(dev_name(&pdev->dev),
+ WQ_MEM_RECLAIM);
if (!di->monitor_wqueue) {
retval = -ESRCH;
goto workqueue_failed;
diff --git a/drivers/power/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c
index d3743d0ad55b..1b3b6fa89c28 100644
--- a/drivers/power/ds2780_battery.c
+++ b/drivers/power/supply/ds2780_battery.c
@@ -21,8 +21,8 @@
#include <linux/power_supply.h>
#include <linux/idr.h>
-#include "../w1/w1.h"
-#include "../w1/slaves/w1_ds2780.h"
+#include "../../w1/w1.h"
+#include "../../w1/slaves/w1_ds2780.h"
/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
#define DS2780_CURRENT_UNITS 1563
diff --git a/drivers/power/ds2781_battery.c b/drivers/power/supply/ds2781_battery.c
index c3680024f399..cc0149131f89 100644
--- a/drivers/power/ds2781_battery.c
+++ b/drivers/power/supply/ds2781_battery.c
@@ -19,8 +19,8 @@
#include <linux/power_supply.h>
#include <linux/idr.h>
-#include "../w1/w1.h"
-#include "../w1/slaves/w1_ds2781.h"
+#include "../../w1/w1.h"
+#include "../../w1/slaves/w1_ds2781.h"
/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
#define DS2781_CURRENT_UNITS 1563
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/supply/ds2782_battery.c
index a1b7e0592245..a1b7e0592245 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/supply/ds2782_battery.c
diff --git a/drivers/power/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index edb36bf781b0..edb36bf781b0 100644
--- a/drivers/power/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
diff --git a/drivers/power/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c
index f5c525e4482a..f5c525e4482a 100644
--- a/drivers/power/goldfish_battery.c
+++ b/drivers/power/supply/goldfish_battery.c
diff --git a/drivers/power/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index c5869b1941ac..c5869b1941ac 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/supply/intel_mid_battery.c
index 9fa4acc107ca..dc7feef1bea4 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/supply/intel_mid_battery.c
@@ -689,8 +689,7 @@ static int probe(int irq, struct device *dev)
/* initialize all required framework before enabling interrupts */
INIT_WORK(&pbi->handler, pmic_battery_handle_intrpt);
INIT_DELAYED_WORK(&pbi->monitor_battery, pmic_battery_monitor);
- pbi->monitor_wqueue =
- create_singlethread_workqueue(dev_name(dev));
+ pbi->monitor_wqueue = alloc_workqueue(dev_name(dev), WQ_MEM_RECLAIM, 0);
if (!pbi->monitor_wqueue) {
dev_err(dev, "%s(): wqueue init failed\n", __func__);
retval = -ESRCH;
diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c
index 35b01c7d775b..4af7b770f293 100644
--- a/drivers/power/ipaq_micro_battery.c
+++ b/drivers/power/supply/ipaq_micro_battery.c
@@ -235,7 +235,7 @@ static int micro_batt_probe(struct platform_device *pdev)
return -ENOMEM;
mb->micro = dev_get_drvdata(pdev->dev.parent);
- mb->wq = create_singlethread_workqueue("ipaq-battery-wq");
+ mb->wq = alloc_workqueue("ipaq-battery-wq", WQ_MEM_RECLAIM, 0);
if (!mb->wq)
return -ENOMEM;
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/supply/isp1704_charger.c
index 4cd6899b961e..4cd6899b961e 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/supply/isp1704_charger.c
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/supply/jz4740-battery.c
index 88f04f4d1a70..88f04f4d1a70 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/supply/jz4740-battery.c
diff --git a/drivers/power/lp8727_charger.c b/drivers/power/supply/lp8727_charger.c
index 042fb3dacb46..042fb3dacb46 100644
--- a/drivers/power/lp8727_charger.c
+++ b/drivers/power/supply/lp8727_charger.c
diff --git a/drivers/power/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 7321b727d484..7321b727d484 100644
--- a/drivers/power/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
diff --git a/drivers/power/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index 4adf2ba021ce..4adf2ba021ce 100644
--- a/drivers/power/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
diff --git a/drivers/power/max14577_charger.c b/drivers/power/supply/max14577_charger.c
index a36bcaf62dd4..449fc56f09eb 100644
--- a/drivers/power/max14577_charger.c
+++ b/drivers/power/supply/max14577_charger.c
@@ -2,7 +2,7 @@
* max14577_charger.c - Battery charger driver for the Maxim 14577/77836
*
* Copyright (C) 2013,2014 Samsung Electronics
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -643,6 +643,6 @@ static struct platform_driver max14577_charger_driver = {
};
module_platform_driver(max14577_charger_driver);
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("Maxim 14577/77836 charger driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 8689c80202b5..8689c80202b5 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
diff --git a/drivers/power/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index da7a75f82489..da7a75f82489 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
diff --git a/drivers/power/max77693_charger.c b/drivers/power/supply/max77693_charger.c
index 060cab5ae3aa..6c78884bad5e 100644
--- a/drivers/power/max77693_charger.c
+++ b/drivers/power/supply/max77693_charger.c
@@ -2,7 +2,7 @@
* max77693_charger.c - Battery charger driver for the Maxim 77693
*
* Copyright (C) 2014 Samsung Electronics
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -766,6 +766,6 @@ static struct platform_driver max77693_charger_driver = {
};
module_platform_driver(max77693_charger_driver);
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("Maxim 77693 charger driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/max8903_charger.c b/drivers/power/supply/max8903_charger.c
index fdc73d686153..fdc73d686153 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/supply/max8903_charger.c
diff --git a/drivers/power/max8925_power.c b/drivers/power/supply/max8925_power.c
index 3b94620ce5c1..3b94620ce5c1 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/supply/max8925_power.c
diff --git a/drivers/power/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index 0b2eab571528..0b2eab571528 100644
--- a/drivers/power/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
diff --git a/drivers/power/max8998_charger.c b/drivers/power/supply/max8998_charger.c
index b64cf0f14142..b64cf0f14142 100644
--- a/drivers/power/max8998_charger.c
+++ b/drivers/power/supply/max8998_charger.c
diff --git a/drivers/power/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index 9e29b1321648..9e29b1321648 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/supply/pcf50633-charger.c
index d05597b4e40f..d05597b4e40f 100644
--- a/drivers/power/pcf50633-charger.c
+++ b/drivers/power/supply/pcf50633-charger.c
diff --git a/drivers/power/pda_power.c b/drivers/power/supply/pda_power.c
index dfe1ee89f7c7..dfe1ee89f7c7 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/supply/pda_power.c
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
index fb62ed3fc38c..78561b6884fc 100644
--- a/drivers/power/pm2301_charger.c
+++ b/drivers/power/supply/pm2301_charger.c
@@ -1054,7 +1054,8 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
pm2->ac_chg.external = true;
/* Create a work queue for the charger */
- pm2->charger_wq = create_singlethread_workqueue("pm2xxx_charger_wq");
+ pm2->charger_wq = alloc_ordered_workqueue("pm2xxx_charger_wq",
+ WQ_MEM_RECLAIM);
if (pm2->charger_wq == NULL) {
ret = -ENOMEM;
dev_err(pm2->dev, "failed to create work queue\n");
diff --git a/drivers/power/pm2301_charger.h b/drivers/power/supply/pm2301_charger.h
index 24181cf9717b..24181cf9717b 100644
--- a/drivers/power/pm2301_charger.h
+++ b/drivers/power/supply/pm2301_charger.h
diff --git a/drivers/power/pmu_battery.c b/drivers/power/supply/pmu_battery.c
index 9c8d5253812c..9c8d5253812c 100644
--- a/drivers/power/pmu_battery.c
+++ b/drivers/power/supply/pmu_battery.c
diff --git a/drivers/power/power_supply.h b/drivers/power/supply/power_supply.h
index cc439fd89d8d..cc439fd89d8d 100644
--- a/drivers/power/power_supply.h
+++ b/drivers/power/supply/power_supply.h
diff --git a/drivers/power/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index a74d8ca383a1..a74d8ca383a1 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
diff --git a/drivers/power/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c
index 2277ad9c2f68..2277ad9c2f68 100644
--- a/drivers/power/power_supply_leds.c
+++ b/drivers/power/supply/power_supply_leds.c
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index bcde8d13476a..bcde8d13476a 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
diff --git a/drivers/power/qcom_smbb.c b/drivers/power/supply/qcom_smbb.c
index b5896ba2a602..b5896ba2a602 100644
--- a/drivers/power/qcom_smbb.c
+++ b/drivers/power/supply/qcom_smbb.c
diff --git a/drivers/power/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c
index bcdd83048492..bcdd83048492 100644
--- a/drivers/power/rt5033_battery.c
+++ b/drivers/power/supply/rt5033_battery.c
diff --git a/drivers/power/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index cfdbde9daf94..cfdbde9daf94 100644
--- a/drivers/power/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
diff --git a/drivers/power/rx51_battery.c b/drivers/power/supply/rx51_battery.c
index af9383d23d12..af9383d23d12 100644
--- a/drivers/power/rx51_battery.c
+++ b/drivers/power/supply/rx51_battery.c
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
index 0ffe5cd3abf6..0ffe5cd3abf6 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/supply/s3c_adc_battery.c
diff --git a/drivers/power/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index 768b9fcb58ea..8bb2eb38eb1c 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -26,7 +26,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/stat.h>
@@ -41,6 +41,7 @@ enum {
REG_TIME_TO_EMPTY,
REG_TIME_TO_FULL,
REG_STATUS,
+ REG_CAPACITY_LEVEL,
REG_CYCLE_COUNT,
REG_SERIAL_NUMBER,
REG_REMAINING_CAPACITY,
@@ -68,6 +69,7 @@ enum sbs_battery_mode {
#define MANUFACTURER_ACCESS_SLEEP 0x0011
/* battery status value bits */
+#define BATTERY_INITIALIZED 0x80
#define BATTERY_DISCHARGING 0x40
#define BATTERY_FULL_CHARGED 0x20
#define BATTERY_FULL_DISCHARGED 0x10
@@ -110,6 +112,8 @@ static const struct chip_data {
SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0, 65535),
[REG_STATUS] =
SBS_DATA(POWER_SUPPLY_PROP_STATUS, 0x16, 0, 65535),
+ [REG_CAPACITY_LEVEL] =
+ SBS_DATA(POWER_SUPPLY_PROP_CAPACITY_LEVEL, 0x16, 0, 65535),
[REG_CYCLE_COUNT] =
SBS_DATA(POWER_SUPPLY_PROP_CYCLE_COUNT, 0x17, 0, 65535),
[REG_DESIGN_CAPACITY] =
@@ -131,6 +135,7 @@ static const struct chip_data {
static enum power_supply_property sbs_properties[] = {
POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -158,13 +163,13 @@ static enum power_supply_property sbs_properties[] = {
struct sbs_info {
struct i2c_client *client;
struct power_supply *power_supply;
- struct sbs_platform_data *pdata;
bool is_present;
- bool gpio_detect;
+ struct gpio_desc *gpio_detect;
bool enable_detection;
- int irq;
int last_state;
int poll_time;
+ u32 i2c_retry_count;
+ u32 poll_retry_count;
struct delayed_work work;
int ignore_changes;
};
@@ -179,8 +184,7 @@ static int sbs_read_word_data(struct i2c_client *client, u8 address)
s32 ret = 0;
int retries = 1;
- if (chip->pdata)
- retries = max(chip->pdata->i2c_retry_count + 1, 1);
+ retries = chip->i2c_retry_count;
while (retries > 0) {
ret = i2c_smbus_read_word_data(client, address);
@@ -207,10 +211,8 @@ static int sbs_read_string_data(struct i2c_client *client, u8 address,
int retries_length = 1, retries_block = 1;
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
- if (chip->pdata) {
- retries_length = max(chip->pdata->i2c_retry_count + 1, 1);
- retries_block = max(chip->pdata->i2c_retry_count + 1, 1);
- }
+ retries_length = chip->i2c_retry_count;
+ retries_block = chip->i2c_retry_count;
/* Adapter needs to support these two functions */
if (!i2c_check_functionality(client->adapter,
@@ -274,8 +276,7 @@ static int sbs_write_word_data(struct i2c_client *client, u8 address,
s32 ret = 0;
int retries = 1;
- if (chip->pdata)
- retries = max(chip->pdata->i2c_retry_count + 1, 1);
+ retries = chip->i2c_retry_count;
while (retries > 0) {
ret = i2c_smbus_write_word_data(client, address,
@@ -302,32 +303,31 @@ static int sbs_get_battery_presence_and_health(
s32 ret;
struct sbs_info *chip = i2c_get_clientdata(client);
- if (psp == POWER_SUPPLY_PROP_PRESENT &&
- chip->gpio_detect) {
- ret = gpio_get_value(chip->pdata->battery_detect);
- if (ret == chip->pdata->battery_detect_present)
- val->intval = 1;
- else
- val->intval = 0;
+ if (psp == POWER_SUPPLY_PROP_PRESENT && chip->gpio_detect) {
+ ret = gpiod_get_value_cansleep(chip->gpio_detect);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
chip->is_present = val->intval;
return ret;
}
- /* Write to ManufacturerAccess with
- * ManufacturerAccess command and then
- * read the status */
- ret = sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr,
- MANUFACTURER_ACCESS_STATUS);
+ /*
+ * Write to ManufacturerAccess with ManufacturerAccess command
+ * and then read the status. Do not check for error on the write
+ * since not all batteries implement write access to this command,
+ * while others mandate it.
+ */
+ sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr,
+ MANUFACTURER_ACCESS_STATUS);
+
+ ret = sbs_read_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr);
if (ret < 0) {
if (psp == POWER_SUPPLY_PROP_PRESENT)
val->intval = 0; /* battery removed */
return ret;
}
- ret = sbs_read_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr);
- if (ret < 0)
- return ret;
-
if (ret < sbs_data[REG_MANUFACTURER_DATA].min_value ||
ret > sbs_data[REG_MANUFACTURER_DATA].max_value) {
val->intval = 0;
@@ -377,8 +377,23 @@ static int sbs_get_battery_property(struct i2c_client *client,
if (ret >= sbs_data[reg_offset].min_value &&
ret <= sbs_data[reg_offset].max_value) {
val->intval = ret;
- if (psp != POWER_SUPPLY_PROP_STATUS)
+ if (psp == POWER_SUPPLY_PROP_CAPACITY_LEVEL) {
+ if (!(ret & BATTERY_INITIALIZED))
+ val->intval =
+ POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ else if (ret & BATTERY_FULL_CHARGED)
+ val->intval =
+ POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else if (ret & BATTERY_FULL_DISCHARGED)
+ val->intval =
+ POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else
+ val->intval =
+ POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ return 0;
+ } else if (psp != POWER_SUPPLY_PROP_STATUS) {
return 0;
+ }
if (ret & BATTERY_FULL_CHARGED)
val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -590,6 +605,7 @@ static int sbs_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_STATUS:
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
case POWER_SUPPLY_PROP_CYCLE_COUNT:
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_CURRENT_NOW:
@@ -661,8 +677,14 @@ done:
static irqreturn_t sbs_irq(int irq, void *devid)
{
- struct power_supply *battery = devid;
+ struct sbs_info *chip = devid;
+ struct power_supply *battery = chip->power_supply;
+ int ret;
+ ret = gpiod_get_value_cansleep(chip->gpio_detect);
+ if (ret < 0)
+ return ret;
+ chip->is_present = ret;
power_supply_changed(battery);
return IRQ_HANDLED;
@@ -681,7 +703,7 @@ static void sbs_external_power_changed(struct power_supply *psy)
cancel_delayed_work_sync(&chip->work);
schedule_delayed_work(&chip->work, HZ);
- chip->poll_time = chip->pdata->poll_retry_count;
+ chip->poll_time = chip->poll_retry_count;
}
static void sbs_delayed_work(struct work_struct *work)
@@ -717,80 +739,6 @@ static void sbs_delayed_work(struct work_struct *work)
}
}
-#if defined(CONFIG_OF)
-
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
-
-static const struct of_device_id sbs_dt_ids[] = {
- { .compatible = "sbs,sbs-battery" },
- { .compatible = "ti,bq20z75" },
- { }
-};
-MODULE_DEVICE_TABLE(of, sbs_dt_ids);
-
-static struct sbs_platform_data *sbs_of_populate_pdata(
- struct i2c_client *client)
-{
- struct device_node *of_node = client->dev.of_node;
- struct sbs_platform_data *pdata = client->dev.platform_data;
- enum of_gpio_flags gpio_flags;
- int rc;
- u32 prop;
-
- /* verify this driver matches this device */
- if (!of_node)
- return NULL;
-
- /* if platform data is set, honor it */
- if (pdata)
- return pdata;
-
- /* first make sure at least one property is set, otherwise
- * it won't change behavior from running without pdata.
- */
- if (!of_get_property(of_node, "sbs,i2c-retry-count", NULL) &&
- !of_get_property(of_node, "sbs,poll-retry-count", NULL) &&
- !of_get_property(of_node, "sbs,battery-detect-gpios", NULL))
- goto of_out;
-
- pdata = devm_kzalloc(&client->dev, sizeof(struct sbs_platform_data),
- GFP_KERNEL);
- if (!pdata)
- goto of_out;
-
- rc = of_property_read_u32(of_node, "sbs,i2c-retry-count", &prop);
- if (!rc)
- pdata->i2c_retry_count = prop;
-
- rc = of_property_read_u32(of_node, "sbs,poll-retry-count", &prop);
- if (!rc)
- pdata->poll_retry_count = prop;
-
- if (!of_get_property(of_node, "sbs,battery-detect-gpios", NULL)) {
- pdata->battery_detect = -1;
- goto of_out;
- }
-
- pdata->battery_detect = of_get_named_gpio_flags(of_node,
- "sbs,battery-detect-gpios", 0, &gpio_flags);
-
- if (gpio_flags & OF_GPIO_ACTIVE_LOW)
- pdata->battery_detect_present = 0;
- else
- pdata->battery_detect_present = 1;
-
-of_out:
- return pdata;
-}
-#else
-static struct sbs_platform_data *sbs_of_populate_pdata(
- struct i2c_client *client)
-{
- return client->dev.platform_data;
-}
-#endif
-
static const struct power_supply_desc sbs_default_desc = {
.type = POWER_SUPPLY_TYPE_BATTERY,
.properties = sbs_properties,
@@ -819,13 +767,12 @@ static int sbs_probe(struct i2c_client *client,
if (!sbs_desc->name)
return -ENOMEM;
- chip = kzalloc(sizeof(struct sbs_info), GFP_KERNEL);
+ chip = devm_kzalloc(&client->dev, sizeof(struct sbs_info), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->client = client;
chip->enable_detection = false;
- chip->gpio_detect = false;
psy_cfg.of_node = client->dev.of_node;
psy_cfg.drv_data = chip;
/* ignore first notification of external change, it is generated
@@ -834,11 +781,31 @@ static int sbs_probe(struct i2c_client *client,
chip->ignore_changes = 1;
chip->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
- pdata = sbs_of_populate_pdata(client);
+ /* use pdata if available, fall back to DT properties,
+ * or hardcoded defaults if not
+ */
+ rc = of_property_read_u32(client->dev.of_node, "sbs,i2c-retry-count",
+ &chip->i2c_retry_count);
+ if (rc)
+ chip->i2c_retry_count = 0;
+
+ rc = of_property_read_u32(client->dev.of_node, "sbs,poll-retry-count",
+ &chip->poll_retry_count);
+ if (rc)
+ chip->poll_retry_count = 0;
if (pdata) {
- chip->gpio_detect = gpio_is_valid(pdata->battery_detect);
- chip->pdata = pdata;
+ chip->poll_retry_count = pdata->poll_retry_count;
+ chip->i2c_retry_count = pdata->i2c_retry_count;
+ }
+ chip->i2c_retry_count = chip->i2c_retry_count + 1;
+
+ chip->gpio_detect = devm_gpiod_get_optional(&client->dev,
+ "sbs,battery-detect", GPIOD_IN);
+ if (IS_ERR(chip->gpio_detect)) {
+ dev_err(&client->dev, "Failed to get gpio: %ld\n",
+ PTR_ERR(chip->gpio_detect));
+ return PTR_ERR(chip->gpio_detect);
}
i2c_set_clientdata(client, chip);
@@ -846,47 +813,26 @@ static int sbs_probe(struct i2c_client *client,
if (!chip->gpio_detect)
goto skip_gpio;
- rc = gpio_request(pdata->battery_detect, dev_name(&client->dev));
- if (rc) {
- dev_warn(&client->dev, "Failed to request gpio: %d\n", rc);
- chip->gpio_detect = false;
- goto skip_gpio;
- }
-
- rc = gpio_direction_input(pdata->battery_detect);
- if (rc) {
- dev_warn(&client->dev, "Failed to get gpio as input: %d\n", rc);
- gpio_free(pdata->battery_detect);
- chip->gpio_detect = false;
- goto skip_gpio;
- }
-
- irq = gpio_to_irq(pdata->battery_detect);
+ irq = gpiod_to_irq(chip->gpio_detect);
if (irq <= 0) {
dev_warn(&client->dev, "Failed to get gpio as irq: %d\n", irq);
- gpio_free(pdata->battery_detect);
- chip->gpio_detect = false;
goto skip_gpio;
}
- rc = request_irq(irq, sbs_irq,
+ rc = devm_request_threaded_irq(&client->dev, irq, NULL, sbs_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&client->dev), chip->power_supply);
+ dev_name(&client->dev), chip);
if (rc) {
dev_warn(&client->dev, "Failed to request irq: %d\n", rc);
- gpio_free(pdata->battery_detect);
- chip->gpio_detect = false;
goto skip_gpio;
}
- chip->irq = irq;
-
skip_gpio:
/*
* Before we register, we might need to make sure we can actually talk
* to the battery.
*/
- if (!force_load) {
+ if (!(force_load || chip->gpio_detect)) {
rc = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
if (rc < 0) {
@@ -896,7 +842,7 @@ skip_gpio:
}
}
- chip->power_supply = power_supply_register(&client->dev, sbs_desc,
+ chip->power_supply = devm_power_supply_register(&client->dev, sbs_desc,
&psy_cfg);
if (IS_ERR(chip->power_supply)) {
dev_err(&client->dev,
@@ -915,13 +861,6 @@ skip_gpio:
return 0;
exit_psupply:
- if (chip->irq)
- free_irq(chip->irq, chip->power_supply);
- if (chip->gpio_detect)
- gpio_free(pdata->battery_detect);
-
- kfree(chip);
-
return rc;
}
@@ -929,18 +868,8 @@ static int sbs_remove(struct i2c_client *client)
{
struct sbs_info *chip = i2c_get_clientdata(client);
- if (chip->irq)
- free_irq(chip->irq, chip->power_supply);
- if (chip->gpio_detect)
- gpio_free(chip->pdata->battery_detect);
-
- power_supply_unregister(chip->power_supply);
-
cancel_delayed_work_sync(&chip->work);
- kfree(chip);
- chip = NULL;
-
return 0;
}
@@ -950,16 +879,16 @@ static int sbs_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct sbs_info *chip = i2c_get_clientdata(client);
- s32 ret;
if (chip->poll_time > 0)
cancel_delayed_work_sync(&chip->work);
- /* write to manufacturer access with sleep command */
- ret = sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr,
+ /*
+ * Write to manufacturer access with sleep command.
+ * Support is manufacturer dependend, so ignore errors.
+ */
+ sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr,
MANUFACTURER_ACCESS_SLEEP);
- if (chip->is_present && ret < 0)
- return ret;
return 0;
}
@@ -978,13 +907,20 @@ static const struct i2c_device_id sbs_id[] = {
};
MODULE_DEVICE_TABLE(i2c, sbs_id);
+static const struct of_device_id sbs_dt_ids[] = {
+ { .compatible = "sbs,sbs-battery" },
+ { .compatible = "ti,bq20z75" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sbs_dt_ids);
+
static struct i2c_driver sbs_battery_driver = {
.probe = sbs_probe,
.remove = sbs_remove,
.id_table = sbs_id,
.driver = {
.name = "sbs-battery",
- .of_match_table = of_match_ptr(sbs_dt_ids),
+ .of_match_table = sbs_dt_ids,
.pm = SBS_PM_OPS,
},
};
diff --git a/drivers/power/smb347-charger.c b/drivers/power/supply/smb347-charger.c
index 072c5189bd6d..072c5189bd6d 100644
--- a/drivers/power/smb347-charger.c
+++ b/drivers/power/supply/smb347-charger.c
diff --git a/drivers/power/test_power.c b/drivers/power/supply/test_power.c
index 57246cdbd042..57246cdbd042 100644
--- a/drivers/power/test_power.c
+++ b/drivers/power/supply/test_power.c
diff --git a/drivers/power/tosa_battery.c b/drivers/power/supply/tosa_battery.c
index 6e88c1b37945..6e88c1b37945 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/supply/tosa_battery.c
diff --git a/drivers/power/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
index 1b4b5e09538e..1b4b5e09538e 100644
--- a/drivers/power/tps65090-charger.c
+++ b/drivers/power/supply/tps65090-charger.c
diff --git a/drivers/power/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
index 4c56e54af6ac..9fd019f9b88c 100644
--- a/drivers/power/tps65217_charger.c
+++ b/drivers/power/supply/tps65217_charger.c
@@ -46,6 +46,8 @@ struct tps65217_charger {
int prev_ac_online;
struct task_struct *poll_task;
+
+ int irq;
};
static enum power_supply_property tps65217_ac_props[] = {
@@ -198,6 +200,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65217_charger *charger;
struct power_supply_config cfg = {};
+ int irq;
int ret;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -221,18 +224,40 @@ static int tps65217_charger_probe(struct platform_device *pdev)
return PTR_ERR(charger->ac);
}
+ irq = platform_get_irq_byname(pdev, "AC");
+ if (irq < 0)
+ irq = -ENXIO;
+ charger->irq = irq;
+
ret = tps65217_config_charger(charger);
if (ret < 0) {
dev_err(charger->dev, "charger config failed, err %d\n", ret);
return ret;
}
- charger->poll_task = kthread_run(tps65217_charger_poll_task,
- charger, "ktps65217charger");
- if (IS_ERR(charger->poll_task)) {
- ret = PTR_ERR(charger->poll_task);
- dev_err(charger->dev, "Unable to run kthread err %d\n", ret);
- return ret;
+ if (irq != -ENXIO) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ tps65217_charger_irq,
+ 0, "tps65217-charger",
+ charger);
+ if (ret) {
+ dev_err(charger->dev,
+ "Unable to register irq %d err %d\n", irq,
+ ret);
+ return ret;
+ }
+
+ /* Check current state */
+ tps65217_charger_irq(irq, charger);
+ } else {
+ charger->poll_task = kthread_run(tps65217_charger_poll_task,
+ charger, "ktps65217charger");
+ if (IS_ERR(charger->poll_task)) {
+ ret = PTR_ERR(charger->poll_task);
+ dev_err(charger->dev,
+ "Unable to run kthread err %d\n", ret);
+ return ret;
+ }
}
return 0;
@@ -242,7 +267,8 @@ static int tps65217_charger_remove(struct platform_device *pdev)
{
struct tps65217_charger *charger = platform_get_drvdata(pdev);
- kthread_stop(charger->poll_task);
+ if (charger->irq == -ENXIO)
+ kthread_stop(charger->poll_task);
return 0;
}
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index bcd4dc304f27..bcd4dc304f27 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/supply/twl4030_madc_battery.c
index f5817e422d64..f5817e422d64 100644
--- a/drivers/power/twl4030_madc_battery.c
+++ b/drivers/power/supply/twl4030_madc_battery.c
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/supply/wm831x_backup.c
index 2e33109ca8c7..2e33109ca8c7 100644
--- a/drivers/power/wm831x_backup.c
+++ b/drivers/power/supply/wm831x_backup.c
diff --git a/drivers/power/wm831x_power.c b/drivers/power/supply/wm831x_power.c
index 7082301da945..7082301da945 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/supply/wm831x_power.c
diff --git a/drivers/power/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index 5c5880664e09..5c5880664e09 100644
--- a/drivers/power/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c
index c2f09ed35050..6285626d142a 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/supply/wm97xx_battery.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/power/wm97xx_battery.c
- *
* Battery measurement code for WM97xx
*
* based on tosa_battery.c
diff --git a/drivers/power/z2_battery.c b/drivers/power/supply/z2_battery.c
index b201e3facf73..8a43b49cfd35 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/supply/z2_battery.c
@@ -317,7 +317,6 @@ MODULE_DEVICE_TABLE(i2c, z2_batt_id);
static struct i2c_driver z2_batt_driver = {
.driver = {
.name = "z2-battery",
- .owner = THIS_MODULE,
.pm = Z2_BATTERY_PM_OPS
},
.probe = z2_batt_probe,
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index 632701a1d993..b7f300b79ffd 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -958,7 +958,7 @@ static int ps3_vuart_bus_interrupt_get(void)
fail_request_irq:
ps3_vuart_irq_destroy(vuart_bus_priv.virq);
- vuart_bus_priv.virq = NO_IRQ;
+ vuart_bus_priv.virq = 0;
fail_alloc_irq:
kfree(vuart_bus_priv.bmp);
vuart_bus_priv.bmp = NULL;
@@ -982,7 +982,7 @@ static int ps3_vuart_bus_interrupt_put(void)
free_irq(vuart_bus_priv.virq, &vuart_bus_priv);
ps3_vuart_irq_destroy(vuart_bus_priv.virq);
- vuart_bus_priv.virq = NO_IRQ;
+ vuart_bus_priv.virq = 0;
kfree(vuart_bus_priv.bmp);
vuart_bus_priv.bmp = NULL;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 2e481b9e8ea5..86280b7e41f3 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -263,6 +263,7 @@ no_sysfs:
no_device:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
+ ida_simple_remove(&ptp_clocks_map, index);
no_slot:
kfree(ptp);
no_memory:
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index ee4f183ef9ee..344a3bac210b 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -268,18 +268,19 @@ static int setup_interrupt(int gpio)
return err;
irq = gpio_to_irq(gpio);
+ if (irq < 0)
+ return irq;
- if (NO_IRQ == irq)
- return NO_IRQ;
-
- if (irq_set_irq_type(irq, IRQF_TRIGGER_FALLING)) {
+ err = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
+ if (err) {
pr_err("cannot set trigger type for irq %d\n", irq);
- return NO_IRQ;
+ return err;
}
- if (request_irq(irq, isr, 0, DRIVER, &ixp_clock)) {
+ err = request_irq(irq, isr, 0, DRIVER, &ixp_clock);
+ if (err) {
pr_err("request_irq failed for irq %d\n", irq);
- return NO_IRQ;
+ return err;
}
return irq;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 6c88e31c01f7..936f7ccc9736 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -323,7 +323,7 @@ config REGULATOR_LP872X
config REGULATOR_LP873X
tristate "TI LP873X Power regulators"
- depends on MFD_LP873X && OF
+ depends on MFD_TI_LP873X && OF
help
This driver supports LP873X voltage regulator chips. LP873X
provides two step-down converters and two general-purpose LDO
@@ -353,6 +353,14 @@ config REGULATOR_LTC3589
This enables support for the LTC3589, LTC3589-1, and LTC3589-2
8-output regulators controlled via I2C.
+config REGULATOR_LTC3676
+ tristate "LTC3676 8-output voltage regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This enables support for the LTC3676
+ 8-output regulators controlled via I2C.
+
config REGULATOR_MAX14577
tristate "Maxim 14577/77836 regulator"
depends on MFD_MAX14577
@@ -635,11 +643,11 @@ config REGULATOR_RC5T583
outputs which can be controlled by i2c communication.
config REGULATOR_RK808
- tristate "Rockchip RK808 Power regulators"
+ tristate "Rockchip RK808/RK818 Power regulators"
depends on MFD_RK808
help
Select this option to enable the power regulator of ROCKCHIP
- PMIC RK808.
+ PMIC RK808 and RK818.
This driver supports the control of different power rails of device
through regulator interface. The device supports multiple DCDC/LDO
outputs which can be controlled by i2c communication.
@@ -820,7 +828,7 @@ config REGULATOR_TPS65912
This driver supports TPS65912 voltage regulator chip.
config REGULATOR_TPS80031
- tristate "TI TPS80031/TPS80032 power regualtor driver"
+ tristate "TI TPS80031/TPS80032 power regulator driver"
depends on MFD_TPS80031
help
TPS80031/ TPS80032 Fully Integrated Power Management with Power
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index f3da9eea9ab6..2142a5d3fc08 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
obj-$(CONFIG_REGULATOR_LTC3589) += ltc3589.o
+obj-$(CONFIG_REGULATOR_LTC3676) += ltc3676.o
obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 6d9ac76a772f..54382ef902c6 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -244,16 +244,64 @@ static const struct regulator_desc axp22x_drivevbus_regulator = {
.ops = &axp20x_ops_sw,
};
-static const struct regulator_linear_range axp809_dcdc4_ranges[] = {
- REGULATOR_LINEAR_RANGE(600000, 0x0, 0x2f, 20000),
- REGULATOR_LINEAR_RANGE(1800000, 0x30, 0x38, 100000),
+static const struct regulator_linear_range axp806_dcdca_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x32, 10000),
+ REGULATOR_LINEAR_RANGE(1120000, 0x33, 0x47, 20000),
};
-static const struct regulator_linear_range axp809_dldo1_ranges[] = {
+static const struct regulator_linear_range axp806_dcdcd_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x2d, 20000),
+ REGULATOR_LINEAR_RANGE(1600000, 0x2e, 0x3f, 100000),
+};
+
+static const struct regulator_linear_range axp806_cldo2_ranges[] = {
REGULATOR_LINEAR_RANGE(700000, 0x0, 0x1a, 100000),
REGULATOR_LINEAR_RANGE(3400000, 0x1b, 0x1f, 200000),
};
+static const struct regulator_desc axp806_regulators[] = {
+ AXP_DESC_RANGES(AXP806, DCDCA, "dcdca", "vina", axp806_dcdca_ranges,
+ 72, AXP806_DCDCA_V_CTRL, 0x7f, AXP806_PWR_OUT_CTRL1,
+ BIT(0)),
+ AXP_DESC(AXP806, DCDCB, "dcdcb", "vinb", 1000, 2550, 50,
+ AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(1)),
+ AXP_DESC_RANGES(AXP806, DCDCC, "dcdcc", "vinc", axp806_dcdca_ranges,
+ 72, AXP806_DCDCC_V_CTRL, 0x7f, AXP806_PWR_OUT_CTRL1,
+ BIT(2)),
+ AXP_DESC_RANGES(AXP806, DCDCD, "dcdcd", "vind", axp806_dcdcd_ranges,
+ 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
+ BIT(3)),
+ AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
+ AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
+ AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
+ AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
+ AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
+ AXP806_ALDO2_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(6)),
+ AXP_DESC(AXP806, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
+ AXP806_ALDO3_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(7)),
+ AXP_DESC(AXP806, BLDO1, "bldo1", "bldoin", 700, 1900, 100,
+ AXP806_BLDO1_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(0)),
+ AXP_DESC(AXP806, BLDO2, "bldo2", "bldoin", 700, 1900, 100,
+ AXP806_BLDO2_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(1)),
+ AXP_DESC(AXP806, BLDO3, "bldo3", "bldoin", 700, 1900, 100,
+ AXP806_BLDO3_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(2)),
+ AXP_DESC(AXP806, BLDO4, "bldo4", "bldoin", 700, 1900, 100,
+ AXP806_BLDO4_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(3)),
+ AXP_DESC(AXP806, CLDO1, "cldo1", "cldoin", 700, 3300, 100,
+ AXP806_CLDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL2, BIT(4)),
+ AXP_DESC_RANGES(AXP806, CLDO2, "cldo2", "cldoin", axp806_cldo2_ranges,
+ 32, AXP806_CLDO2_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL2,
+ BIT(5)),
+ AXP_DESC(AXP806, CLDO3, "cldo3", "cldoin", 700, 3300, 100,
+ AXP806_CLDO3_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL2, BIT(6)),
+ AXP_DESC_SW(AXP806, SW, "sw", "swin", AXP806_PWR_OUT_CTRL2, BIT(7)),
+};
+
+static const struct regulator_linear_range axp809_dcdc4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x2f, 20000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x30, 0x38, 100000),
+};
+
static const struct regulator_desc axp809_regulators[] = {
AXP_DESC(AXP809, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(1)),
@@ -278,7 +326,7 @@ static const struct regulator_desc axp809_regulators[] = {
AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(7)),
AXP_DESC(AXP809, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
- AXP_DESC_RANGES(AXP809, DLDO1, "dldo1", "dldoin", axp809_dldo1_ranges,
+ AXP_DESC_RANGES(AXP809, DLDO1, "dldo1", "dldoin", axp806_cldo2_ranges,
32, AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2,
BIT(3)),
AXP_DESC(AXP809, DLDO2, "dldo2", "dldoin", 700, 3300, 100,
@@ -302,6 +350,7 @@ static const struct regulator_desc axp809_regulators[] = {
static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+ unsigned int reg = AXP20X_DCDC_FREQ;
u32 min, max, def, step;
switch (axp20x->variant) {
@@ -312,6 +361,14 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
def = 1500;
step = 75;
break;
+ case AXP806_ID:
+ /*
+ * AXP806 DCDC work frequency setting has the same range and
+ * step as AXP22X, but at a different register.
+ * Fall through to the check below.
+ * (See include/linux/mfd/axp20x.h)
+ */
+ reg = AXP806_DCDC_FREQ_CTRL;
case AXP221_ID:
case AXP223_ID:
case AXP809_ID:
@@ -343,7 +400,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
dcdcfreq = (dcdcfreq - min) / step;
- return regmap_update_bits(axp20x->regmap, AXP20X_DCDC_FREQ,
+ return regmap_update_bits(axp20x->regmap, reg,
AXP20X_FREQ_DCDC_MASK, dcdcfreq);
}
@@ -377,6 +434,7 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
+ unsigned int reg = AXP20X_DCDC_MODE;
unsigned int mask;
switch (axp20x->variant) {
@@ -392,6 +450,13 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
workmode <<= ffs(mask) - 1;
break;
+ case AXP806_ID:
+ reg = AXP806_DCDC_MODE_CTRL2;
+ /*
+ * AXP806 DCDC regulator IDs have the same range as AXP22X.
+ * Fall through to the check below.
+ * (See include/linux/mfd/axp20x.h)
+ */
case AXP221_ID:
case AXP223_ID:
case AXP809_ID:
@@ -408,7 +473,34 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
return -EINVAL;
}
- return regmap_update_bits(rdev->regmap, AXP20X_DCDC_MODE, mask, workmode);
+ return regmap_update_bits(rdev->regmap, reg, mask, workmode);
+}
+
+/*
+ * This function checks whether a regulator is part of a poly-phase
+ * output setup based on the registers settings. Returns true if it is.
+ */
+static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
+{
+ u32 reg = 0;
+
+ /* Only AXP806 has poly-phase outputs */
+ if (axp20x->variant != AXP806_ID)
+ return false;
+
+ regmap_read(axp20x->regmap, AXP806_DCDC_MODE_CTRL2, &reg);
+
+ switch (id) {
+ case AXP806_DCDCB:
+ return (((reg & GENMASK(7, 6)) == BIT(6)) ||
+ ((reg & GENMASK(7, 6)) == BIT(7)));
+ case AXP806_DCDCC:
+ return ((reg & GENMASK(7, 6)) == BIT(7));
+ case AXP806_DCDCE:
+ return !!(reg & BIT(5));
+ }
+
+ return false;
}
static int axp20x_regulator_probe(struct platform_device *pdev)
@@ -440,6 +532,10 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
drivevbus = of_property_read_bool(pdev->dev.parent->of_node,
"x-powers,drive-vbus-en");
break;
+ case AXP806_ID:
+ regulators = axp806_regulators;
+ nregulators = AXP806_REG_ID_MAX;
+ break;
case AXP809_ID:
regulators = axp809_regulators;
nregulators = AXP809_REG_ID_MAX;
@@ -458,6 +554,14 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
struct regulator_desc *new_desc;
/*
+ * If this regulator is a slave in a poly-phase setup,
+ * skip it, as its controls are bound to the master
+ * regulator and won't work.
+ */
+ if (axp20x_is_polyphase_slave(axp20x, i))
+ continue;
+
+ /*
* Regulators DC1SW and DC5LDO are connected internally,
* so we have to handle their supply names separately.
*
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index db320e8fa865..67426c0477d3 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -679,24 +679,6 @@ static int drms_uA_update(struct regulator_dev *rdev)
!rdev->desc->ops->set_load)
return -EINVAL;
- /* get output voltage */
- output_uV = _regulator_get_voltage(rdev);
- if (output_uV <= 0) {
- rdev_err(rdev, "invalid output voltage found\n");
- return -EINVAL;
- }
-
- /* get input voltage */
- input_uV = 0;
- if (rdev->supply)
- input_uV = regulator_get_voltage(rdev->supply);
- if (input_uV <= 0)
- input_uV = rdev->constraints->input_uV;
- if (input_uV <= 0) {
- rdev_err(rdev, "invalid input voltage found\n");
- return -EINVAL;
- }
-
/* calc total requested load */
list_for_each_entry(sibling, &rdev->consumer_list, list)
current_uA += sibling->uA_load;
@@ -709,6 +691,24 @@ static int drms_uA_update(struct regulator_dev *rdev)
if (err < 0)
rdev_err(rdev, "failed to set load %d\n", current_uA);
} else {
+ /* get output voltage */
+ output_uV = _regulator_get_voltage(rdev);
+ if (output_uV <= 0) {
+ rdev_err(rdev, "invalid output voltage found\n");
+ return -EINVAL;
+ }
+
+ /* get input voltage */
+ input_uV = 0;
+ if (rdev->supply)
+ input_uV = regulator_get_voltage(rdev->supply);
+ if (input_uV <= 0)
+ input_uV = rdev->constraints->input_uV;
+ if (input_uV <= 0) {
+ rdev_err(rdev, "invalid input voltage found\n");
+ return -EINVAL;
+ }
+
/* now get the optimum mode for our new total regulator load */
mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV,
output_uV, current_uA);
@@ -2743,6 +2743,24 @@ static int _regulator_call_set_voltage_sel(struct regulator_dev *rdev,
return ret;
}
+static int _regulator_set_voltage_time(struct regulator_dev *rdev,
+ int old_uV, int new_uV)
+{
+ unsigned int ramp_delay = 0;
+
+ if (rdev->constraints->ramp_delay)
+ ramp_delay = rdev->constraints->ramp_delay;
+ else if (rdev->desc->ramp_delay)
+ ramp_delay = rdev->desc->ramp_delay;
+
+ if (ramp_delay == 0) {
+ rdev_warn(rdev, "ramp_delay not set\n");
+ return 0;
+ }
+
+ return DIV_ROUND_UP(abs(new_uV - old_uV), ramp_delay);
+}
+
static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
@@ -2751,6 +2769,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int best_val = 0;
unsigned int selector;
int old_selector = -1;
+ const struct regulator_ops *ops = rdev->desc->ops;
+ int old_uV = _regulator_get_voltage(rdev);
trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
@@ -2762,29 +2782,28 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
* info to call set_voltage_time_sel().
*/
if (_regulator_is_enabled(rdev) &&
- rdev->desc->ops->set_voltage_time_sel &&
- rdev->desc->ops->get_voltage_sel) {
- old_selector = rdev->desc->ops->get_voltage_sel(rdev);
+ ops->set_voltage_time_sel && ops->get_voltage_sel) {
+ old_selector = ops->get_voltage_sel(rdev);
if (old_selector < 0)
return old_selector;
}
- if (rdev->desc->ops->set_voltage) {
+ if (ops->set_voltage) {
ret = _regulator_call_set_voltage(rdev, min_uV, max_uV,
&selector);
if (ret >= 0) {
- if (rdev->desc->ops->list_voltage)
- best_val = rdev->desc->ops->list_voltage(rdev,
- selector);
+ if (ops->list_voltage)
+ best_val = ops->list_voltage(rdev,
+ selector);
else
best_val = _regulator_get_voltage(rdev);
}
- } else if (rdev->desc->ops->set_voltage_sel) {
+ } else if (ops->set_voltage_sel) {
ret = regulator_map_voltage(rdev, min_uV, max_uV);
if (ret >= 0) {
- best_val = rdev->desc->ops->list_voltage(rdev, ret);
+ best_val = ops->list_voltage(rdev, ret);
if (min_uV <= best_val && max_uV >= best_val) {
selector = ret;
if (old_selector == selector)
@@ -2800,34 +2819,50 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
ret = -EINVAL;
}
- /* Call set_voltage_time_sel if successfully obtained old_selector */
- if (ret == 0 && !rdev->constraints->ramp_disable && old_selector >= 0
- && old_selector != selector) {
+ if (ret)
+ goto out;
- delay = rdev->desc->ops->set_voltage_time_sel(rdev,
- old_selector, selector);
- if (delay < 0) {
- rdev_warn(rdev, "set_voltage_time_sel() failed: %d\n",
- delay);
- delay = 0;
+ if (ops->set_voltage_time_sel) {
+ /*
+ * Call set_voltage_time_sel if successfully obtained
+ * old_selector
+ */
+ if (old_selector >= 0 && old_selector != selector)
+ delay = ops->set_voltage_time_sel(rdev, old_selector,
+ selector);
+ } else {
+ if (old_uV != best_val) {
+ if (ops->set_voltage_time)
+ delay = ops->set_voltage_time(rdev, old_uV,
+ best_val);
+ else
+ delay = _regulator_set_voltage_time(rdev,
+ old_uV,
+ best_val);
}
+ }
- /* Insert any necessary delays */
- if (delay >= 1000) {
- mdelay(delay / 1000);
- udelay(delay % 1000);
- } else if (delay) {
- udelay(delay);
- }
+ if (delay < 0) {
+ rdev_warn(rdev, "failed to get delay: %d\n", delay);
+ delay = 0;
}
- if (ret == 0 && best_val >= 0) {
+ /* Insert any necessary delays */
+ if (delay >= 1000) {
+ mdelay(delay / 1000);
+ udelay(delay % 1000);
+ } else if (delay) {
+ udelay(delay);
+ }
+
+ if (best_val >= 0) {
unsigned long data = best_val;
_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
(void *)data);
}
+out:
trace_regulator_set_voltage_complete(rdev_get_name(rdev), best_val);
return ret;
@@ -2998,9 +3033,13 @@ int regulator_set_voltage_time(struct regulator *regulator,
int voltage;
int i;
+ if (ops->set_voltage_time)
+ return ops->set_voltage_time(rdev, old_uV, new_uV);
+ else if (!ops->set_voltage_time_sel)
+ return _regulator_set_voltage_time(rdev, old_uV, new_uV);
+
/* Currently requires operations to do this */
- if (!ops->list_voltage || !ops->set_voltage_time_sel
- || !rdev->desc->n_voltages)
+ if (!ops->list_voltage || !rdev->desc->n_voltages)
return -EINVAL;
for (i = 0; i < rdev->desc->n_voltages; i++) {
@@ -3039,19 +3078,8 @@ int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int old_selector,
unsigned int new_selector)
{
- unsigned int ramp_delay = 0;
int old_volt, new_volt;
- if (rdev->constraints->ramp_delay)
- ramp_delay = rdev->constraints->ramp_delay;
- else if (rdev->desc->ramp_delay)
- ramp_delay = rdev->desc->ramp_delay;
-
- if (ramp_delay == 0) {
- rdev_warn(rdev, "ramp_delay not set\n");
- return 0;
- }
-
/* sanity check */
if (!rdev->desc->ops->list_voltage)
return -EINVAL;
@@ -3059,7 +3087,11 @@ int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
old_volt = rdev->desc->ops->list_voltage(rdev, old_selector);
new_volt = rdev->desc->ops->list_voltage(rdev, new_selector);
- return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay);
+ if (rdev->desc->ops->set_voltage_time)
+ return rdev->desc->ops->set_voltage_time(rdev, old_volt,
+ new_volt);
+ else
+ return _regulator_set_voltage_time(rdev, old_volt, new_volt);
}
EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel);
@@ -3483,10 +3515,8 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].consumer = NULL;
for (i = 0; i < num_consumers; i++) {
- consumers[i].consumer = _regulator_get(dev,
- consumers[i].supply,
- false,
- !consumers[i].optional);
+ consumers[i].consumer = regulator_get(dev,
+ consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
ret = PTR_ERR(consumers[i].consumer);
dev_err(dev, "Failed to get supply '%s': %d\n",
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
index 3963dfad766c..8976141c1438 100644
--- a/drivers/regulator/dbx500-prcmu.c
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -75,24 +75,6 @@ static struct ux500_regulator_debug {
u8 *state_after_suspend;
} rdebug;
-void ux500_regulator_suspend_debug(void)
-{
- int i;
-
- for (i = 0; i < rdebug.num_regulators; i++)
- rdebug.state_before_suspend[i] =
- rdebug.regulator_array[i].is_enabled;
-}
-
-void ux500_regulator_resume_debug(void)
-{
- int i;
-
- for (i = 0; i < rdebug.num_regulators; i++)
- rdebug.state_after_suspend[i] =
- rdebug.regulator_array[i].is_enabled;
-}
-
static int ux500_regulator_power_state_cnt_print(struct seq_file *s, void *p)
{
/* print power state count */
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
index 6ad8ab4c578d..6ec1d400adae 100644
--- a/drivers/regulator/devres.c
+++ b/drivers/regulator/devres.c
@@ -164,11 +164,8 @@ int devm_regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].consumer = NULL;
for (i = 0; i < num_consumers; i++) {
- consumers[i].consumer = _devm_regulator_get(dev,
- consumers[i].supply,
- consumers[i].optional ?
- OPTIONAL_GET :
- NORMAL_GET);
+ consumers[i].consumer = devm_regulator_get(dev,
+ consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
ret = PTR_ERR(consumers[i].consumer);
dev_err(dev, "Failed to get supply '%s': %d\n",
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index 42dc5fb8c899..62c5f5445d44 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -477,7 +477,8 @@ static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev,
return 0;
}
-unsigned int hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev,
+static unsigned int
+hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev,
int input_uV, int output_uV, int load_uA)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
new file mode 100644
index 000000000000..e2b476ca2b4d
--- /dev/null
+++ b/drivers/regulator/ltc3676.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright (C) 2016 Gateworks Corporation, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#define DRIVER_NAME "ltc3676"
+
+/* LTC3676 Registers */
+#define LTC3676_BUCK1 0x01
+#define LTC3676_BUCK2 0x02
+#define LTC3676_BUCK3 0x03
+#define LTC3676_BUCK4 0x04
+#define LTC3676_LDOA 0x05
+#define LTC3676_LDOB 0x06
+#define LTC3676_SQD1 0x07
+#define LTC3676_SQD2 0x08
+#define LTC3676_CNTRL 0x09
+#define LTC3676_DVB1A 0x0A
+#define LTC3676_DVB1B 0x0B
+#define LTC3676_DVB2A 0x0C
+#define LTC3676_DVB2B 0x0D
+#define LTC3676_DVB3A 0x0E
+#define LTC3676_DVB3B 0x0F
+#define LTC3676_DVB4A 0x10
+#define LTC3676_DVB4B 0x11
+#define LTC3676_MSKIRQ 0x12
+#define LTC3676_MSKPG 0x13
+#define LTC3676_USER 0x14
+#define LTC3676_IRQSTAT 0x15
+#define LTC3676_PGSTATL 0x16
+#define LTC3676_PGSTATRT 0x17
+#define LTC3676_HRST 0x1E
+#define LTC3676_CLIRQ 0x1F
+
+#define LTC3676_DVBxA_REF_SELECT BIT(5)
+
+#define LTC3676_IRQSTAT_PGOOD_TIMEOUT BIT(3)
+#define LTC3676_IRQSTAT_UNDERVOLT_WARN BIT(4)
+#define LTC3676_IRQSTAT_UNDERVOLT_FAULT BIT(5)
+#define LTC3676_IRQSTAT_THERMAL_WARN BIT(6)
+#define LTC3676_IRQSTAT_THERMAL_FAULT BIT(7)
+
+enum ltc3676_reg {
+ LTC3676_SW1,
+ LTC3676_SW2,
+ LTC3676_SW3,
+ LTC3676_SW4,
+ LTC3676_LDO1,
+ LTC3676_LDO2,
+ LTC3676_LDO3,
+ LTC3676_LDO4,
+ LTC3676_NUM_REGULATORS,
+};
+
+struct ltc3676 {
+ struct regmap *regmap;
+ struct device *dev;
+ struct regulator_desc regulator_descs[LTC3676_NUM_REGULATORS];
+ struct regulator_dev *regulators[LTC3676_NUM_REGULATORS];
+};
+
+static int ltc3676_set_suspend_voltage(struct regulator_dev *rdev, int uV)
+{
+ struct ltc3676 *ltc3676 = rdev_get_drvdata(rdev);
+ struct device *dev = ltc3676->dev;
+ int dcdc = rdev_get_id(rdev);
+ int sel;
+
+ dev_dbg(dev, "%s id=%d uV=%d\n", __func__, dcdc, uV);
+ sel = regulator_map_voltage_linear(rdev, uV, uV);
+ if (sel < 0)
+ return sel;
+
+ /* DVBB register follows right after the corresponding DVBA register */
+ return regmap_update_bits(ltc3676->regmap, rdev->desc->vsel_reg + 1,
+ rdev->desc->vsel_mask, sel);
+}
+
+static int ltc3676_set_suspend_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct ltc3676 *ltc3676= rdev_get_drvdata(rdev);
+ struct device *dev = ltc3676->dev;
+ int mask, val;
+ int dcdc = rdev_get_id(rdev);
+
+ dev_dbg(dev, "%s id=%d mode=%d\n", __func__, dcdc, mode);
+
+ mask = LTC3676_DVBxA_REF_SELECT;
+ switch (mode) {
+ case REGULATOR_MODE_STANDBY:
+ val = 0; /* select DVBxA */
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = LTC3676_DVBxA_REF_SELECT; /* select DVBxB */
+ break;
+ default:
+ dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n",
+ rdev->desc->name, mode);
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(ltc3676->regmap, rdev->desc->vsel_reg,
+ mask, val);
+}
+
+static inline unsigned int ltc3676_scale(unsigned int uV, u32 r1, u32 r2)
+{
+ uint64_t tmp;
+ if (uV == 0)
+ return 0;
+ tmp = (uint64_t)uV * r1;
+ do_div(tmp, r2);
+ return uV + (unsigned int)tmp;
+}
+
+static int ltc3676_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ struct ltc3676 *ltc3676 = config->driver_data;
+ struct regulator_desc *rdesc = &ltc3676->regulator_descs[desc->id];
+ u32 r[2];
+ int ret;
+
+ /* LDO3 has a fixed output */
+ if (desc->id == LTC3676_LDO3)
+ return 0;
+
+ ret = of_property_read_u32_array(np, "lltc,fb-voltage-divider", r, 2);
+ if (ret) {
+ dev_err(ltc3676->dev, "Failed to parse voltage divider: %d\n",
+ ret);
+ return ret;
+ }
+
+ rdesc->min_uV = ltc3676_scale(desc->min_uV, r[0], r[1]);
+ rdesc->uV_step = ltc3676_scale(desc->uV_step, r[0], r[1]);
+ rdesc->fixed_uV = ltc3676_scale(desc->fixed_uV, r[0], r[1]);
+
+ return 0;
+}
+
+/* SW1, SW2, SW3, SW4 linear 0.8V-3.3V with scalar via R1/R2 feeback res */
+static struct regulator_ops ltc3676_linear_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_suspend_voltage = ltc3676_set_suspend_voltage,
+ .set_suspend_mode = ltc3676_set_suspend_mode,
+};
+
+/* LDO1 always on fixed 0.8V-3.3V via scalar via R1/R2 feeback res */
+static struct regulator_ops ltc3676_fixed_standby_regulator_ops = {
+};
+
+/* LDO2, LDO3 fixed (LDO2 has external scalar via R1/R2 feedback res) */
+static struct regulator_ops ltc3676_fixed_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+#define LTC3676_REG(_id, _name, _ops, en_reg, en_bit, dvba_reg, dvb_mask) \
+ [LTC3676_ ## _id] = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(#_name), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .of_parse_cb = ltc3676_of_parse_cb, \
+ .n_voltages = (dvb_mask) + 1, \
+ .min_uV = (dvba_reg) ? 412500 : 0, \
+ .uV_step = (dvba_reg) ? 12500 : 0, \
+ .ramp_delay = (dvba_reg) ? 800 : 0, \
+ .fixed_uV = (dvb_mask) ? 0 : 725000, \
+ .ops = &ltc3676_ ## _ops ## _regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = LTC3676_ ## _id, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = (dvba_reg), \
+ .vsel_mask = (dvb_mask), \
+ .enable_reg = (en_reg), \
+ .enable_mask = (1 << en_bit), \
+ }
+
+#define LTC3676_LINEAR_REG(_id, _name, _en, _dvba) \
+ LTC3676_REG(_id, _name, linear, \
+ LTC3676_ ## _en, 7, \
+ LTC3676_ ## _dvba, 0x1f)
+
+#define LTC3676_FIXED_REG(_id, _name, _en_reg, _en_bit) \
+ LTC3676_REG(_id, _name, fixed, LTC3676_ ## _en_reg, _en_bit, 0, 0)
+
+static struct regulator_desc ltc3676_regulators[LTC3676_NUM_REGULATORS] = {
+ LTC3676_LINEAR_REG(SW1, sw1, BUCK1, DVB1A),
+ LTC3676_LINEAR_REG(SW2, sw2, BUCK2, DVB2A),
+ LTC3676_LINEAR_REG(SW3, sw3, BUCK3, DVB3A),
+ LTC3676_LINEAR_REG(SW4, sw4, BUCK4, DVB4A),
+ LTC3676_REG(LDO1, ldo1, fixed_standby, 0, 0, 0, 0),
+ LTC3676_FIXED_REG(LDO2, ldo2, LDOA, 2),
+ LTC3676_FIXED_REG(LDO3, ldo3, LDOA, 5),
+ LTC3676_FIXED_REG(LDO4, ldo4, LDOB, 2),
+};
+
+static bool ltc3676_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LTC3676_IRQSTAT:
+ case LTC3676_BUCK1:
+ case LTC3676_BUCK2:
+ case LTC3676_BUCK3:
+ case LTC3676_BUCK4:
+ case LTC3676_LDOA:
+ case LTC3676_LDOB:
+ case LTC3676_SQD1:
+ case LTC3676_SQD2:
+ case LTC3676_CNTRL:
+ case LTC3676_DVB1A:
+ case LTC3676_DVB1B:
+ case LTC3676_DVB2A:
+ case LTC3676_DVB2B:
+ case LTC3676_DVB3A:
+ case LTC3676_DVB3B:
+ case LTC3676_DVB4A:
+ case LTC3676_DVB4B:
+ case LTC3676_MSKIRQ:
+ case LTC3676_MSKPG:
+ case LTC3676_USER:
+ case LTC3676_HRST:
+ case LTC3676_CLIRQ:
+ return true;
+ }
+ return false;
+}
+
+static bool ltc3676_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LTC3676_IRQSTAT:
+ case LTC3676_BUCK1:
+ case LTC3676_BUCK2:
+ case LTC3676_BUCK3:
+ case LTC3676_BUCK4:
+ case LTC3676_LDOA:
+ case LTC3676_LDOB:
+ case LTC3676_SQD1:
+ case LTC3676_SQD2:
+ case LTC3676_CNTRL:
+ case LTC3676_DVB1A:
+ case LTC3676_DVB1B:
+ case LTC3676_DVB2A:
+ case LTC3676_DVB2B:
+ case LTC3676_DVB3A:
+ case LTC3676_DVB3B:
+ case LTC3676_DVB4A:
+ case LTC3676_DVB4B:
+ case LTC3676_MSKIRQ:
+ case LTC3676_MSKPG:
+ case LTC3676_USER:
+ case LTC3676_HRST:
+ case LTC3676_CLIRQ:
+ return true;
+ }
+ return false;
+}
+
+static bool ltc3676_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LTC3676_IRQSTAT:
+ case LTC3676_PGSTATL:
+ case LTC3676_PGSTATRT:
+ return true;
+ }
+ return false;
+}
+
+static const struct regmap_config ltc3676_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = ltc3676_writeable_reg,
+ .readable_reg = ltc3676_readable_reg,
+ .volatile_reg = ltc3676_volatile_reg,
+ .max_register = LTC3676_CLIRQ,
+ .use_single_rw = true,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static irqreturn_t ltc3676_isr(int irq, void *dev_id)
+{
+ struct ltc3676 *ltc3676 = dev_id;
+ struct device *dev = ltc3676->dev;
+ unsigned int i, irqstat, event;
+
+ regmap_read(ltc3676->regmap, LTC3676_IRQSTAT, &irqstat);
+
+ dev_dbg(dev, "irq%d irqstat=0x%02x\n", irq, irqstat);
+ if (irqstat & LTC3676_IRQSTAT_THERMAL_WARN) {
+ dev_warn(dev, "Over-temperature Warning\n");
+ event = REGULATOR_EVENT_OVER_TEMP;
+ for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
+ regulator_notifier_call_chain(ltc3676->regulators[i],
+ event, NULL);
+ }
+
+ if (irqstat & LTC3676_IRQSTAT_UNDERVOLT_WARN) {
+ dev_info(dev, "Undervoltage Warning\n");
+ event = REGULATOR_EVENT_UNDER_VOLTAGE;
+ for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
+ regulator_notifier_call_chain(ltc3676->regulators[i],
+ event, NULL);
+ }
+
+ /* Clear warning condition */
+ regmap_write(ltc3676->regmap, LTC3676_CLIRQ, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int ltc3676_regulator_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct regulator_init_data *init_data = dev_get_platdata(dev);
+ struct regulator_desc *descs;
+ struct ltc3676 *ltc3676;
+ int i, ret;
+
+ ltc3676 = devm_kzalloc(dev, sizeof(*ltc3676), GFP_KERNEL);
+ if (!ltc3676)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ltc3676);
+ ltc3676->dev = dev;
+
+ descs = ltc3676->regulator_descs;
+ memcpy(descs, ltc3676_regulators, sizeof(ltc3676_regulators));
+ descs[LTC3676_LDO3].fixed_uV = 1800000; /* LDO3 is fixed 1.8V */
+
+ ltc3676->regmap = devm_regmap_init_i2c(client, &ltc3676_regmap_config);
+ if (IS_ERR(ltc3676->regmap)) {
+ ret = PTR_ERR(ltc3676->regmap);
+ dev_err(dev, "failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
+ struct regulator_desc *desc = &ltc3676->regulator_descs[i];
+ struct regulator_config config = { };
+
+ if (init_data)
+ config.init_data = &init_data[i];
+
+ config.dev = dev;
+ config.driver_data = ltc3676;
+
+ ltc3676->regulators[i] = devm_regulator_register(dev, desc,
+ &config);
+ if (IS_ERR(ltc3676->regulators[i])) {
+ ret = PTR_ERR(ltc3676->regulators[i]);
+ dev_err(dev, "failed to register regulator %s: %d\n",
+ desc->name, ret);
+ return ret;
+ }
+ }
+
+ regmap_write(ltc3676->regmap, LTC3676_CLIRQ, 0);
+ if (client->irq) {
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ ltc3676_isr,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->name, ltc3676);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id ltc3676_i2c_id[] = {
+ { "ltc3676" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ltc3676_i2c_id);
+
+static struct i2c_driver ltc3676_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = ltc3676_regulator_probe,
+ .id_table = ltc3676_i2c_id,
+};
+module_i2c_driver(ltc3676_driver);
+
+MODULE_AUTHOR("Tim Harvey <tharvey@gateworks.com>");
+MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC1376");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
index 81950bdb1cc4..954a20eeb26f 100644
--- a/drivers/regulator/pv88080-regulator.c
+++ b/drivers/regulator/pv88080-regulator.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/regulator/driver.h>
@@ -26,7 +27,7 @@
#include <linux/regulator/of_regulator.h>
#include "pv88080-regulator.h"
-#define PV88080_MAX_REGULATORS 3
+#define PV88080_MAX_REGULATORS 4
/* PV88080 REGULATOR IDs */
enum {
@@ -34,6 +35,12 @@ enum {
PV88080_ID_BUCK1,
PV88080_ID_BUCK2,
PV88080_ID_BUCK3,
+ PV88080_ID_HVBUCK,
+};
+
+enum pv88080_types {
+ TYPE_PV88080_AA,
+ TYPE_PV88080_BA,
};
struct pv88080_regulator {
@@ -42,7 +49,8 @@ struct pv88080_regulator {
unsigned int n_current_limits;
const int *current_limits;
unsigned int limit_mask;
- unsigned int conf;
+ unsigned int mode_reg;
+ unsigned int limit_reg;
unsigned int conf2;
unsigned int conf5;
};
@@ -51,6 +59,8 @@ struct pv88080 {
struct device *dev;
struct regmap *regmap;
struct regulator_dev *rdev[PV88080_MAX_REGULATORS];
+ unsigned long type;
+ const struct pv88080_compatible_regmap *regmap_config;
};
struct pv88080_buck_voltage {
@@ -59,6 +69,30 @@ struct pv88080_buck_voltage {
int uV_step;
};
+struct pv88080_buck_regmap {
+ /* REGS */
+ int buck_enable_reg;
+ int buck_vsel_reg;
+ int buck_mode_reg;
+ int buck_limit_reg;
+ int buck_vdac_range_reg;
+ int buck_vrange_gain_reg;
+ /* MASKS */
+ int buck_enable_mask;
+ int buck_vsel_mask;
+ int buck_limit_mask;
+};
+
+struct pv88080_compatible_regmap {
+ /* BUCK1, 2, 3 */
+ struct pv88080_buck_regmap buck_regmap[PV88080_MAX_REGULATORS-1];
+ /* HVBUCK */
+ int hvbuck_enable_reg;
+ int hvbuck_vsel_reg;
+ int hvbuck_enable_mask;
+ int hvbuck_vsel_mask;
+};
+
static const struct regmap_config pv88080_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -89,13 +123,111 @@ static const struct pv88080_buck_voltage pv88080_buck_vol[2] = {
},
};
+static const struct pv88080_compatible_regmap pv88080_aa_regs = {
+ /* BUCK1 */
+ .buck_regmap[0] = {
+ .buck_enable_reg = PV88080AA_REG_BUCK1_CONF0,
+ .buck_vsel_reg = PV88080AA_REG_BUCK1_CONF0,
+ .buck_mode_reg = PV88080AA_REG_BUCK1_CONF1,
+ .buck_limit_reg = PV88080AA_REG_BUCK1_CONF1,
+ .buck_vdac_range_reg = PV88080AA_REG_BUCK1_CONF2,
+ .buck_vrange_gain_reg = PV88080AA_REG_BUCK1_CONF5,
+ .buck_enable_mask = PV88080_BUCK1_EN,
+ .buck_vsel_mask = PV88080_VBUCK1_MASK,
+ .buck_limit_mask = PV88080_BUCK1_ILIM_MASK,
+ },
+ /* BUCK2 */
+ .buck_regmap[1] = {
+ .buck_enable_reg = PV88080AA_REG_BUCK2_CONF0,
+ .buck_vsel_reg = PV88080AA_REG_BUCK2_CONF0,
+ .buck_mode_reg = PV88080AA_REG_BUCK2_CONF1,
+ .buck_limit_reg = PV88080AA_REG_BUCK2_CONF1,
+ .buck_vdac_range_reg = PV88080AA_REG_BUCK2_CONF2,
+ .buck_vrange_gain_reg = PV88080AA_REG_BUCK2_CONF5,
+ .buck_enable_mask = PV88080_BUCK2_EN,
+ .buck_vsel_mask = PV88080_VBUCK2_MASK,
+ .buck_limit_mask = PV88080_BUCK2_ILIM_MASK,
+ },
+ /* BUCK3 */
+ .buck_regmap[2] = {
+ .buck_enable_reg = PV88080AA_REG_BUCK3_CONF0,
+ .buck_vsel_reg = PV88080AA_REG_BUCK3_CONF0,
+ .buck_mode_reg = PV88080AA_REG_BUCK3_CONF1,
+ .buck_limit_reg = PV88080AA_REG_BUCK3_CONF1,
+ .buck_vdac_range_reg = PV88080AA_REG_BUCK3_CONF2,
+ .buck_vrange_gain_reg = PV88080AA_REG_BUCK3_CONF5,
+ .buck_enable_mask = PV88080_BUCK3_EN,
+ .buck_vsel_mask = PV88080_VBUCK3_MASK,
+ .buck_limit_mask = PV88080_BUCK3_ILIM_MASK,
+ },
+ /* HVBUCK */
+ .hvbuck_enable_reg = PV88080AA_REG_HVBUCK_CONF2,
+ .hvbuck_vsel_reg = PV88080AA_REG_HVBUCK_CONF1,
+ .hvbuck_enable_mask = PV88080_HVBUCK_EN,
+ .hvbuck_vsel_mask = PV88080_VHVBUCK_MASK,
+};
+
+static const struct pv88080_compatible_regmap pv88080_ba_regs = {
+ /* BUCK1 */
+ .buck_regmap[0] = {
+ .buck_enable_reg = PV88080BA_REG_BUCK1_CONF0,
+ .buck_vsel_reg = PV88080BA_REG_BUCK1_CONF0,
+ .buck_mode_reg = PV88080BA_REG_BUCK1_CONF1,
+ .buck_limit_reg = PV88080BA_REG_BUCK1_CONF1,
+ .buck_vdac_range_reg = PV88080BA_REG_BUCK1_CONF2,
+ .buck_vrange_gain_reg = PV88080BA_REG_BUCK1_CONF5,
+ .buck_enable_mask = PV88080_BUCK1_EN,
+ .buck_vsel_mask = PV88080_VBUCK1_MASK,
+ .buck_limit_mask = PV88080_BUCK1_ILIM_MASK,
+ },
+ /* BUCK2 */
+ .buck_regmap[1] = {
+ .buck_enable_reg = PV88080BA_REG_BUCK2_CONF0,
+ .buck_vsel_reg = PV88080BA_REG_BUCK2_CONF0,
+ .buck_mode_reg = PV88080BA_REG_BUCK2_CONF1,
+ .buck_limit_reg = PV88080BA_REG_BUCK2_CONF1,
+ .buck_vdac_range_reg = PV88080BA_REG_BUCK2_CONF2,
+ .buck_vrange_gain_reg = PV88080BA_REG_BUCK2_CONF5,
+ .buck_enable_mask = PV88080_BUCK2_EN,
+ .buck_vsel_mask = PV88080_VBUCK2_MASK,
+ .buck_limit_mask = PV88080_BUCK2_ILIM_MASK,
+ },
+ /* BUCK3 */
+ .buck_regmap[2] = {
+ .buck_enable_reg = PV88080BA_REG_BUCK3_CONF0,
+ .buck_vsel_reg = PV88080BA_REG_BUCK3_CONF0,
+ .buck_mode_reg = PV88080BA_REG_BUCK3_CONF1,
+ .buck_limit_reg = PV88080BA_REG_BUCK3_CONF1,
+ .buck_vdac_range_reg = PV88080BA_REG_BUCK3_CONF2,
+ .buck_vrange_gain_reg = PV88080BA_REG_BUCK3_CONF5,
+ .buck_enable_mask = PV88080_BUCK3_EN,
+ .buck_vsel_mask = PV88080_VBUCK3_MASK,
+ .buck_limit_mask = PV88080_BUCK3_ILIM_MASK,
+ },
+ /* HVBUCK */
+ .hvbuck_enable_reg = PV88080BA_REG_HVBUCK_CONF2,
+ .hvbuck_vsel_reg = PV88080BA_REG_HVBUCK_CONF1,
+ .hvbuck_enable_mask = PV88080_HVBUCK_EN,
+ .hvbuck_vsel_mask = PV88080_VHVBUCK_MASK,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id pv88080_dt_ids[] = {
+ { .compatible = "pvs,pv88080", .data = (void *)TYPE_PV88080_AA },
+ { .compatible = "pvs,pv88080-aa", .data = (void *)TYPE_PV88080_AA },
+ { .compatible = "pvs,pv88080-ba", .data = (void *)TYPE_PV88080_BA },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pv88080_dt_ids);
+#endif
+
static unsigned int pv88080_buck_get_mode(struct regulator_dev *rdev)
{
struct pv88080_regulator *info = rdev_get_drvdata(rdev);
unsigned int data;
int ret, mode = 0;
- ret = regmap_read(rdev->regmap, info->conf, &data);
+ ret = regmap_read(rdev->regmap, info->mode_reg, &data);
if (ret < 0)
return ret;
@@ -136,7 +268,7 @@ static int pv88080_buck_set_mode(struct regulator_dev *rdev,
return -EINVAL;
}
- return regmap_update_bits(rdev->regmap, info->conf,
+ return regmap_update_bits(rdev->regmap, info->mode_reg,
PV88080_BUCK1_MODE_MASK, val);
}
@@ -151,7 +283,7 @@ static int pv88080_set_current_limit(struct regulator_dev *rdev, int min,
if (min <= info->current_limits[i]
&& max >= info->current_limits[i]) {
return regmap_update_bits(rdev->regmap,
- info->conf,
+ info->limit_reg,
info->limit_mask,
i << PV88080_BUCK1_ILIM_SHIFT);
}
@@ -166,7 +298,7 @@ static int pv88080_get_current_limit(struct regulator_dev *rdev)
unsigned int data;
int ret;
- ret = regmap_read(rdev->regmap, info->conf, &data);
+ ret = regmap_read(rdev->regmap, info->limit_reg, &data);
if (ret < 0)
return ret;
@@ -187,6 +319,15 @@ static struct regulator_ops pv88080_buck_ops = {
.get_current_limit = pv88080_get_current_limit,
};
+static struct regulator_ops pv88080_hvbuck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+};
+
#define PV88080_BUCK(chip, regl_name, min, step, max, limits_array) \
{\
.desc = {\
@@ -200,17 +341,25 @@ static struct regulator_ops pv88080_buck_ops = {
.min_uV = min, \
.uV_step = step, \
.n_voltages = ((max) - (min))/(step) + 1, \
- .enable_reg = PV88080_REG_##regl_name##_CONF0, \
- .enable_mask = PV88080_##regl_name##_EN, \
- .vsel_reg = PV88080_REG_##regl_name##_CONF0, \
- .vsel_mask = PV88080_V##regl_name##_MASK, \
},\
.current_limits = limits_array, \
.n_current_limits = ARRAY_SIZE(limits_array), \
- .limit_mask = PV88080_##regl_name##_ILIM_MASK, \
- .conf = PV88080_REG_##regl_name##_CONF1, \
- .conf2 = PV88080_REG_##regl_name##_CONF2, \
- .conf5 = PV88080_REG_##regl_name##_CONF5, \
+}
+
+#define PV88080_HVBUCK(chip, regl_name, min, step, max) \
+{\
+ .desc = {\
+ .id = chip##_ID_##regl_name,\
+ .name = __stringify(chip##_##regl_name),\
+ .of_match = of_match_ptr(#regl_name),\
+ .regulators_node = of_match_ptr("regulators"),\
+ .type = REGULATOR_VOLTAGE,\
+ .owner = THIS_MODULE,\
+ .ops = &pv88080_hvbuck_ops,\
+ .min_uV = min, \
+ .uV_step = step, \
+ .n_voltages = ((max) - (min))/(step) + 1, \
+ },\
}
static struct pv88080_regulator pv88080_regulator_info[] = {
@@ -220,6 +369,7 @@ static struct pv88080_regulator pv88080_regulator_info[] = {
pv88080_buck23_limits),
PV88080_BUCK(PV88080, BUCK3, 600000, 6250, 1393750,
pv88080_buck23_limits),
+ PV88080_HVBUCK(PV88080, HVBUCK, 0, 5000, 1275000),
};
static irqreturn_t pv88080_irq_handler(int irq, void *data)
@@ -280,6 +430,8 @@ static int pv88080_i2c_probe(struct i2c_client *i2c,
{
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct pv88080 *chip;
+ const struct pv88080_compatible_regmap *regmap_config;
+ const struct of_device_id *match;
struct regulator_config config = { };
int i, error, ret;
unsigned int conf2, conf5;
@@ -297,6 +449,17 @@ static int pv88080_i2c_probe(struct i2c_client *i2c,
return error;
}
+ if (i2c->dev.of_node) {
+ match = of_match_node(pv88080_dt_ids, i2c->dev.of_node);
+ if (!match) {
+ dev_err(chip->dev, "Failed to get of_match_node\n");
+ return -EINVAL;
+ }
+ chip->type = (unsigned long)match->data;
+ } else {
+ chip->type = id->driver_data;
+ }
+
i2c_set_clientdata(i2c, chip);
if (i2c->irq != 0) {
@@ -336,31 +499,58 @@ static int pv88080_i2c_probe(struct i2c_client *i2c,
"Failed to update mask reg: %d\n", ret);
return ret;
}
-
} else {
dev_warn(chip->dev, "No IRQ configured\n");
}
+ switch (chip->type) {
+ case TYPE_PV88080_AA:
+ chip->regmap_config = &pv88080_aa_regs;
+ break;
+ case TYPE_PV88080_BA:
+ chip->regmap_config = &pv88080_ba_regs;
+ break;
+ }
+
+ regmap_config = chip->regmap_config;
config.dev = chip->dev;
config.regmap = chip->regmap;
- for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+ /* Registeration for BUCK1, 2, 3 */
+ for (i = 0; i < PV88080_MAX_REGULATORS-1; i++) {
if (init_data)
config.init_data = &init_data[i];
+ pv88080_regulator_info[i].limit_reg
+ = regmap_config->buck_regmap[i].buck_limit_reg;
+ pv88080_regulator_info[i].limit_mask
+ = regmap_config->buck_regmap[i].buck_limit_mask;
+ pv88080_regulator_info[i].mode_reg
+ = regmap_config->buck_regmap[i].buck_mode_reg;
+ pv88080_regulator_info[i].conf2
+ = regmap_config->buck_regmap[i].buck_vdac_range_reg;
+ pv88080_regulator_info[i].conf5
+ = regmap_config->buck_regmap[i].buck_vrange_gain_reg;
+ pv88080_regulator_info[i].desc.enable_reg
+ = regmap_config->buck_regmap[i].buck_enable_reg;
+ pv88080_regulator_info[i].desc.enable_mask
+ = regmap_config->buck_regmap[i].buck_enable_mask;
+ pv88080_regulator_info[i].desc.vsel_reg
+ = regmap_config->buck_regmap[i].buck_vsel_reg;
+ pv88080_regulator_info[i].desc.vsel_mask
+ = regmap_config->buck_regmap[i].buck_vsel_mask;
+
ret = regmap_read(chip->regmap,
- pv88080_regulator_info[i].conf2, &conf2);
+ pv88080_regulator_info[i].conf2, &conf2);
if (ret < 0)
return ret;
-
conf2 = ((conf2 >> PV88080_BUCK_VDAC_RANGE_SHIFT) &
PV88080_BUCK_VDAC_RANGE_MASK);
ret = regmap_read(chip->regmap,
- pv88080_regulator_info[i].conf5, &conf5);
+ pv88080_regulator_info[i].conf5, &conf5);
if (ret < 0)
return ret;
-
conf5 = ((conf5 >> PV88080_BUCK_VRANGE_GAIN_SHIFT) &
PV88080_BUCK_VRANGE_GAIN_MASK);
@@ -383,23 +573,38 @@ static int pv88080_i2c_probe(struct i2c_client *i2c,
}
}
+ pv88080_regulator_info[PV88080_ID_HVBUCK].desc.enable_reg
+ = regmap_config->hvbuck_enable_reg;
+ pv88080_regulator_info[PV88080_ID_HVBUCK].desc.enable_mask
+ = regmap_config->hvbuck_enable_mask;
+ pv88080_regulator_info[PV88080_ID_HVBUCK].desc.vsel_reg
+ = regmap_config->hvbuck_vsel_reg;
+ pv88080_regulator_info[PV88080_ID_HVBUCK].desc.vsel_mask
+ = regmap_config->hvbuck_vsel_mask;
+
+ /* Registeration for HVBUCK */
+ if (init_data)
+ config.init_data = &init_data[PV88080_ID_HVBUCK];
+
+ config.driver_data = (void *)&pv88080_regulator_info[PV88080_ID_HVBUCK];
+ chip->rdev[PV88080_ID_HVBUCK] = devm_regulator_register(chip->dev,
+ &pv88080_regulator_info[PV88080_ID_HVBUCK].desc, &config);
+ if (IS_ERR(chip->rdev[PV88080_ID_HVBUCK])) {
+ dev_err(chip->dev, "Failed to register PV88080 regulator\n");
+ return PTR_ERR(chip->rdev[PV88080_ID_HVBUCK]);
+ }
+
return 0;
}
static const struct i2c_device_id pv88080_i2c_id[] = {
- {"pv88080", 0},
+ { "pv88080", TYPE_PV88080_AA },
+ { "pv88080-aa", TYPE_PV88080_AA },
+ { "pv88080-ba", TYPE_PV88080_BA },
{},
};
MODULE_DEVICE_TABLE(i2c, pv88080_i2c_id);
-#ifdef CONFIG_OF
-static const struct of_device_id pv88080_dt_ids[] = {
- { .compatible = "pvs,pv88080", .data = &pv88080_i2c_id[0] },
- {},
-};
-MODULE_DEVICE_TABLE(of, pv88080_dt_ids);
-#endif
-
static struct i2c_driver pv88080_regulator_driver = {
.driver = {
.name = "pv88080",
diff --git a/drivers/regulator/pv88080-regulator.h b/drivers/regulator/pv88080-regulator.h
index 5e9afde606f4..ae25ff360e3d 100644
--- a/drivers/regulator/pv88080-regulator.h
+++ b/drivers/regulator/pv88080-regulator.h
@@ -17,55 +17,75 @@
#define __PV88080_REGISTERS_H__
/* System Control and Event Registers */
-#define PV88080_REG_EVENT_A 0x04
-#define PV88080_REG_MASK_A 0x09
-#define PV88080_REG_MASK_B 0x0a
-#define PV88080_REG_MASK_C 0x0b
-
-/* Regulator Registers */
-#define PV88080_REG_BUCK1_CONF0 0x27
-#define PV88080_REG_BUCK1_CONF1 0x28
-#define PV88080_REG_BUCK1_CONF2 0x59
-#define PV88080_REG_BUCK1_CONF5 0x5c
-#define PV88080_REG_BUCK2_CONF0 0x29
-#define PV88080_REG_BUCK2_CONF1 0x2a
-#define PV88080_REG_BUCK2_CONF2 0x61
-#define PV88080_REG_BUCK2_CONF5 0x64
-#define PV88080_REG_BUCK3_CONF0 0x2b
-#define PV88080_REG_BUCK3_CONF1 0x2c
-#define PV88080_REG_BUCK3_CONF2 0x69
-#define PV88080_REG_BUCK3_CONF5 0x6c
+#define PV88080_REG_EVENT_A 0x04
+#define PV88080_REG_MASK_A 0x09
+#define PV88080_REG_MASK_B 0x0A
+#define PV88080_REG_MASK_C 0x0B
+
+/* Regulator Registers - rev. AA */
+#define PV88080AA_REG_HVBUCK_CONF1 0x2D
+#define PV88080AA_REG_HVBUCK_CONF2 0x2E
+#define PV88080AA_REG_BUCK1_CONF0 0x27
+#define PV88080AA_REG_BUCK1_CONF1 0x28
+#define PV88080AA_REG_BUCK1_CONF2 0x59
+#define PV88080AA_REG_BUCK1_CONF5 0x5C
+#define PV88080AA_REG_BUCK2_CONF0 0x29
+#define PV88080AA_REG_BUCK2_CONF1 0x2A
+#define PV88080AA_REG_BUCK2_CONF2 0x61
+#define PV88080AA_REG_BUCK2_CONF5 0x64
+#define PV88080AA_REG_BUCK3_CONF0 0x2B
+#define PV88080AA_REG_BUCK3_CONF1 0x2C
+#define PV88080AA_REG_BUCK3_CONF2 0x69
+#define PV88080AA_REG_BUCK3_CONF5 0x6C
+
+/* Regulator Registers - rev. BA */
+#define PV88080BA_REG_HVBUCK_CONF1 0x33
+#define PV88080BA_REG_HVBUCK_CONF2 0x34
+#define PV88080BA_REG_BUCK1_CONF0 0x2A
+#define PV88080BA_REG_BUCK1_CONF1 0x2C
+#define PV88080BA_REG_BUCK1_CONF2 0x5A
+#define PV88080BA_REG_BUCK1_CONF5 0x5D
+#define PV88080BA_REG_BUCK2_CONF0 0x2D
+#define PV88080BA_REG_BUCK2_CONF1 0x2F
+#define PV88080BA_REG_BUCK2_CONF2 0x63
+#define PV88080BA_REG_BUCK2_CONF5 0x66
+#define PV88080BA_REG_BUCK3_CONF0 0x30
+#define PV88080BA_REG_BUCK3_CONF1 0x32
+#define PV88080BA_REG_BUCK3_CONF2 0x6C
+#define PV88080BA_REG_BUCK3_CONF5 0x6F
/* PV88080_REG_EVENT_A (addr=0x04) */
#define PV88080_E_VDD_FLT 0x01
-#define PV88080_E_OVER_TEMP 0x02
+#define PV88080_E_OVER_TEMP 0x02
/* PV88080_REG_MASK_A (addr=0x09) */
#define PV88080_M_VDD_FLT 0x01
-#define PV88080_M_OVER_TEMP 0x02
+#define PV88080_M_OVER_TEMP 0x02
-/* PV88080_REG_BUCK1_CONF0 (addr=0x27) */
+/* PV88080_REG_BUCK1_CONF0 (addr=0x27|0x2A) */
#define PV88080_BUCK1_EN 0x80
-#define PV88080_VBUCK1_MASK 0x7F
-/* PV88080_REG_BUCK2_CONF0 (addr=0x29) */
+#define PV88080_VBUCK1_MASK 0x7F
+
+/* PV88080_REG_BUCK2_CONF0 (addr=0x29|0x2D) */
#define PV88080_BUCK2_EN 0x80
-#define PV88080_VBUCK2_MASK 0x7F
-/* PV88080_REG_BUCK3_CONF0 (addr=0x2b) */
+#define PV88080_VBUCK2_MASK 0x7F
+
+/* PV88080_REG_BUCK3_CONF0 (addr=0x2B|0x30) */
#define PV88080_BUCK3_EN 0x80
-#define PV88080_VBUCK3_MASK 0x7F
+#define PV88080_VBUCK3_MASK 0x7F
-/* PV88080_REG_BUCK1_CONF1 (addr=0x28) */
-#define PV88080_BUCK1_ILIM_SHIFT 2
+/* PV88080_REG_BUCK1_CONF1 (addr=0x28|0x2C) */
+#define PV88080_BUCK1_ILIM_SHIFT 2
#define PV88080_BUCK1_ILIM_MASK 0x0C
#define PV88080_BUCK1_MODE_MASK 0x03
-/* PV88080_REG_BUCK2_CONF1 (addr=0x2a) */
-#define PV88080_BUCK2_ILIM_SHIFT 2
+/* PV88080_REG_BUCK2_CONF1 (addr=0x2A|0x2F) */
+#define PV88080_BUCK2_ILIM_SHIFT 2
#define PV88080_BUCK2_ILIM_MASK 0x0C
#define PV88080_BUCK2_MODE_MASK 0x03
-/* PV88080_REG_BUCK3_CONF1 (addr=0x2c) */
-#define PV88080_BUCK3_ILIM_SHIFT 2
+/* PV88080_REG_BUCK3_CONF1 (addr=0x2C|0x32) */
+#define PV88080_BUCK3_ILIM_SHIFT 2
#define PV88080_BUCK3_ILIM_MASK 0x0C
#define PV88080_BUCK3_MODE_MASK 0x03
@@ -73,20 +93,26 @@
#define PV88080_BUCK_MODE_AUTO 0x01
#define PV88080_BUCK_MODE_SYNC 0x02
-/* PV88080_REG_BUCK2_CONF2 (addr=0x61) */
-/* PV88080_REG_BUCK3_CONF2 (addr=0x69) */
-#define PV88080_BUCK_VDAC_RANGE_SHIFT 7
-#define PV88080_BUCK_VDAC_RANGE_MASK 0x01
+/* PV88080_REG_HVBUCK_CONF1 (addr=0x2D|0x33) */
+#define PV88080_VHVBUCK_MASK 0xFF
+
+/* PV88080_REG_HVBUCK_CONF1 (addr=0x2E|0x34) */
+#define PV88080_HVBUCK_EN 0x01
+
+/* PV88080_REG_BUCK2_CONF2 (addr=0x61|0x63) */
+/* PV88080_REG_BUCK3_CONF2 (addr=0x69|0x6C) */
+#define PV88080_BUCK_VDAC_RANGE_SHIFT 7
+#define PV88080_BUCK_VDAC_RANGE_MASK 0x01
-#define PV88080_BUCK_VDAC_RANGE_1 0x00
-#define PV88080_BUCK_VDAC_RANGE_2 0x01
+#define PV88080_BUCK_VDAC_RANGE_1 0x00
+#define PV88080_BUCK_VDAC_RANGE_2 0x01
-/* PV88080_REG_BUCK2_CONF5 (addr=0x64) */
-/* PV88080_REG_BUCK3_CONF5 (addr=0x6c) */
-#define PV88080_BUCK_VRANGE_GAIN_SHIFT 0
-#define PV88080_BUCK_VRANGE_GAIN_MASK 0x01
+/* PV88080_REG_BUCK2_CONF5 (addr=0x64|0x66) */
+/* PV88080_REG_BUCK3_CONF5 (addr=0x6C|0x6F) */
+#define PV88080_BUCK_VRANGE_GAIN_SHIFT 0
+#define PV88080_BUCK_VRANGE_GAIN_MASK 0x01
-#define PV88080_BUCK_VRANGE_GAIN_1 0x00
-#define PV88080_BUCK_VRANGE_GAIN_2 0x01
+#define PV88080_BUCK_VRANGE_GAIN_1 0x00
+#define PV88080_BUCK_VRANGE_GAIN_2 0x01
#endif /* __PV88080_REGISTERS_H__ */
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index c24524242da2..1b88e0e15a70 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -10,7 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
@@ -194,12 +193,10 @@ static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
unsigned int min_uV_duty = drvdata->continuous.min_uV_dutycycle;
unsigned int max_uV_duty = drvdata->continuous.max_uV_dutycycle;
unsigned int duty_unit = drvdata->continuous.dutycycle_unit;
- unsigned int ramp_delay = rdev->constraints->ramp_delay;
int min_uV = rdev->constraints->min_uV;
int max_uV = rdev->constraints->max_uV;
int diff_uV = max_uV - min_uV;
struct pwm_state pstate;
- int old_uV = pwm_regulator_get_voltage(rdev);
unsigned int diff_duty;
unsigned int dutycycle;
int ret;
@@ -233,13 +230,6 @@ static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
return ret;
}
- if ((ramp_delay == 0) || !pwm_regulator_is_enabled(rdev))
- return 0;
-
- /* Ramp delay is in uV/uS. Adjust to uS and delay */
- ramp_delay = DIV_ROUND_UP(abs(req_min_uV - old_uV), ramp_delay);
- usleep_range(ramp_delay, ramp_delay + DIV_ROUND_UP(ramp_delay, 10));
-
return 0;
}
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index e254272585b2..1b2acc43fea1 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -448,6 +448,44 @@ static struct regulator_ops switch_ops = {
};
/*
+ * PM8018 regulators
+ */
+static const struct qcom_rpm_reg pm8018_pldo = {
+ .desc.linear_ranges = pldo_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges),
+ .desc.n_voltages = 161,
+ .desc.ops = &uV_ops,
+ .parts = &rpm8960_ldo_parts,
+ .supports_force_mode_auto = false,
+ .supports_force_mode_bypass = false,
+};
+
+static const struct qcom_rpm_reg pm8018_nldo = {
+ .desc.linear_ranges = nldo_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges),
+ .desc.n_voltages = 64,
+ .desc.ops = &uV_ops,
+ .parts = &rpm8960_ldo_parts,
+ .supports_force_mode_auto = false,
+ .supports_force_mode_bypass = false,
+};
+
+static const struct qcom_rpm_reg pm8018_smps = {
+ .desc.linear_ranges = smps_ranges,
+ .desc.n_linear_ranges = ARRAY_SIZE(smps_ranges),
+ .desc.n_voltages = 154,
+ .desc.ops = &uV_ops,
+ .parts = &rpm8960_smps_parts,
+ .supports_force_mode_auto = false,
+ .supports_force_mode_bypass = false,
+};
+
+static const struct qcom_rpm_reg pm8018_switch = {
+ .desc.ops = &switch_ops,
+ .parts = &rpm8960_switch_parts,
+};
+
+/*
* PM8058 regulators
*/
static const struct qcom_rpm_reg pm8058_pldo = {
@@ -755,6 +793,32 @@ struct rpm_regulator_data {
const char *supply;
};
+static const struct rpm_regulator_data rpm_pm8018_regulators[] = {
+ { "s1", QCOM_RPM_PM8018_SMPS1, &pm8018_smps, "vdd_s1" },
+ { "s2", QCOM_RPM_PM8018_SMPS2, &pm8018_smps, "vdd_s2" },
+ { "s3", QCOM_RPM_PM8018_SMPS3, &pm8018_smps, "vdd_s3" },
+ { "s4", QCOM_RPM_PM8018_SMPS4, &pm8018_smps, "vdd_s4" },
+ { "s5", QCOM_RPM_PM8018_SMPS5, &pm8018_smps, "vdd_s5" },
+
+ { "l2", QCOM_RPM_PM8018_LDO2, &pm8018_pldo, "vdd_l2" },
+ { "l3", QCOM_RPM_PM8018_LDO3, &pm8018_pldo, "vdd_l3" },
+ { "l4", QCOM_RPM_PM8018_LDO4, &pm8018_pldo, "vdd_l4" },
+ { "l5", QCOM_RPM_PM8018_LDO5, &pm8018_pldo, "vdd_l5" },
+ { "l6", QCOM_RPM_PM8018_LDO6, &pm8018_pldo, "vdd_l7" },
+ { "l7", QCOM_RPM_PM8018_LDO7, &pm8018_pldo, "vdd_l7" },
+ { "l8", QCOM_RPM_PM8018_LDO8, &pm8018_nldo, "vdd_l8" },
+ { "l9", QCOM_RPM_PM8018_LDO9, &pm8921_nldo1200,
+ "vdd_l9_l10_l11_l12" },
+ { "l10", QCOM_RPM_PM8018_LDO10, &pm8018_nldo, "vdd_l9_l10_l11_l12" },
+ { "l11", QCOM_RPM_PM8018_LDO11, &pm8018_nldo, "vdd_l9_l10_l11_l12" },
+ { "l12", QCOM_RPM_PM8018_LDO12, &pm8018_nldo, "vdd_l9_l10_l11_l12" },
+ { "l14", QCOM_RPM_PM8018_LDO14, &pm8018_pldo, "vdd_l14" },
+
+ { "lvs1", QCOM_RPM_PM8018_LVS1, &pm8018_switch, "lvs1_in" },
+
+ { }
+};
+
static const struct rpm_regulator_data rpm_pm8058_regulators[] = {
{ "l0", QCOM_RPM_PM8058_LDO0, &pm8058_nldo, "vdd_l0_l1_lvs" },
{ "l1", QCOM_RPM_PM8058_LDO1, &pm8058_nldo, "vdd_l0_l1_lvs" },
@@ -870,6 +934,8 @@ static const struct rpm_regulator_data rpm_pm8921_regulators[] = {
};
static const struct of_device_id rpm_of_match[] = {
+ { .compatible = "qcom,rpm-pm8018-regulators",
+ .data = &rpm_pm8018_regulators },
{ .compatible = "qcom,rpm-pm8058-regulators", .data = &rpm_pm8058_regulators },
{ .compatible = "qcom,rpm-pm8901-regulators", .data = &rpm_pm8901_regulators },
{ .compatible = "qcom,rpm-pm8921-regulators", .data = &rpm_pm8921_regulators },
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 40d07ba036e7..3314bf299a51 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -1,11 +1,15 @@
/*
- * Regulator driver for Rockchip RK808
+ * Regulator driver for Rockchip RK808/RK818
*
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
*
* Author: Chris Zhong <zyw@rock-chips.com>
* Author: Zhang Qing <zhangqing@rock-chips.com>
*
+ * Copyright (C) 2016 PHYTEC Messtechnik GmbH
+ *
+ * Author: Wadim Egorov <w.egorov@phytec.de>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -32,6 +36,12 @@
#define RK808_BUCK4_VSEL_MASK 0xf
#define RK808_LDO_VSEL_MASK 0x1f
+#define RK818_BUCK_VSEL_MASK 0x3f
+#define RK818_BUCK4_VSEL_MASK 0x1f
+#define RK818_LDO_VSEL_MASK 0x1f
+#define RK818_LDO3_ON_VSEL_MASK 0xf
+#define RK818_BOOST_ON_VSEL_MASK 0xe0
+
/* Ramp rate definitions for buck1 / buck2 only */
#define RK808_RAMP_RATE_OFFSET 3
#define RK808_RAMP_RATE_MASK (3 << RK808_RAMP_RATE_OFFSET)
@@ -454,6 +464,108 @@ static const struct regulator_desc rk808_reg[] = {
RK808_DCDC_EN_REG, BIT(6)),
};
+static const struct regulator_desc rk818_reg[] = {
+ {
+ .name = "DCDC_REG1",
+ .supply_name = "vcc1",
+ .of_match = of_match_ptr("DCDC_REG1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = RK818_ID_DCDC1,
+ .ops = &rk808_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .min_uV = 712500,
+ .uV_step = 12500,
+ .n_voltages = 64,
+ .vsel_reg = RK818_BUCK1_ON_VSEL_REG,
+ .vsel_mask = RK818_BUCK_VSEL_MASK,
+ .enable_reg = RK818_DCDC_EN_REG,
+ .enable_mask = BIT(0),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "DCDC_REG2",
+ .supply_name = "vcc2",
+ .of_match = of_match_ptr("DCDC_REG2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = RK818_ID_DCDC2,
+ .ops = &rk808_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+ .min_uV = 712500,
+ .uV_step = 12500,
+ .n_voltages = 64,
+ .vsel_reg = RK818_BUCK2_ON_VSEL_REG,
+ .vsel_mask = RK818_BUCK_VSEL_MASK,
+ .enable_reg = RK818_DCDC_EN_REG,
+ .enable_mask = BIT(1),
+ .owner = THIS_MODULE,
+ }, {
+ .name = "DCDC_REG3",
+ .supply_name = "vcc3",
+ .of_match = of_match_ptr("DCDC_REG3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = RK818_ID_DCDC3,
+ .ops = &rk808_switch_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 1,
+ .enable_reg = RK818_DCDC_EN_REG,
+ .enable_mask = BIT(2),
+ .owner = THIS_MODULE,
+ },
+ RK8XX_DESC(RK818_ID_DCDC4, "DCDC_REG4", "vcc4", 1800, 3600, 100,
+ RK818_BUCK4_ON_VSEL_REG, RK818_BUCK4_VSEL_MASK,
+ RK818_DCDC_EN_REG, BIT(3), 0),
+ RK8XX_DESC(RK818_ID_BOOST, "DCDC_BOOST", "boost", 4700, 5400, 100,
+ RK818_BOOST_LDO9_ON_VSEL_REG, RK818_BOOST_ON_VSEL_MASK,
+ RK818_DCDC_EN_REG, BIT(4), 0),
+ RK8XX_DESC(RK818_ID_LDO1, "LDO_REG1", "vcc6", 1800, 3400, 100,
+ RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
+ BIT(0), 400),
+ RK8XX_DESC(RK818_ID_LDO2, "LDO_REG2", "vcc6", 1800, 3400, 100,
+ RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
+ BIT(1), 400),
+ {
+ .name = "LDO_REG3",
+ .supply_name = "vcc7",
+ .of_match = of_match_ptr("LDO_REG3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = RK818_ID_LDO3,
+ .ops = &rk808_reg_ops_ranges,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 16,
+ .linear_ranges = rk808_ldo3_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk808_ldo3_voltage_ranges),
+ .vsel_reg = RK818_LDO3_ON_VSEL_REG,
+ .vsel_mask = RK818_LDO3_ON_VSEL_MASK,
+ .enable_reg = RK818_LDO_EN_REG,
+ .enable_mask = BIT(2),
+ .enable_time = 400,
+ .owner = THIS_MODULE,
+ },
+ RK8XX_DESC(RK818_ID_LDO4, "LDO_REG4", "vcc8", 1800, 3400, 100,
+ RK818_LDO4_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
+ BIT(3), 400),
+ RK8XX_DESC(RK818_ID_LDO5, "LDO_REG5", "vcc7", 1800, 3400, 100,
+ RK818_LDO5_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
+ BIT(4), 400),
+ RK8XX_DESC(RK818_ID_LDO6, "LDO_REG6", "vcc8", 800, 2500, 100,
+ RK818_LDO6_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
+ BIT(5), 400),
+ RK8XX_DESC(RK818_ID_LDO7, "LDO_REG7", "vcc7", 800, 2500, 100,
+ RK818_LDO7_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
+ BIT(6), 400),
+ RK8XX_DESC(RK818_ID_LDO8, "LDO_REG8", "vcc8", 1800, 3400, 100,
+ RK818_LDO8_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
+ BIT(7), 400),
+ RK8XX_DESC(RK818_ID_LDO9, "LDO_REG9", "vcc9", 1800, 3400, 100,
+ RK818_BOOST_LDO9_ON_VSEL_REG, RK818_LDO_VSEL_MASK,
+ RK818_DCDC_EN_REG, BIT(5), 400),
+ RK8XX_DESC_SWITCH(RK818_ID_SWITCH, "SWITCH_REG", "vcc9",
+ RK818_DCDC_EN_REG, BIT(6)),
+ RK8XX_DESC_SWITCH(RK818_ID_HDMI_SWITCH, "HDMI_SWITCH", "h_5v",
+ RK818_H5V_EN_REG, BIT(0)),
+ RK8XX_DESC_SWITCH(RK818_ID_OTG_SWITCH, "OTG_SWITCH", "usb",
+ RK818_DCDC_EN_REG, BIT(7)),
+};
+
static int rk808_regulator_dt_parse_pdata(struct device *dev,
struct device *client_dev,
struct regmap *map,
@@ -499,7 +611,8 @@ static int rk808_regulator_probe(struct platform_device *pdev)
struct regulator_config config = {};
struct regulator_dev *rk808_rdev;
struct rk808_regulator_data *pdata;
- int ret, i;
+ const struct regulator_desc *regulators;
+ int ret, i, nregulators;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -512,14 +625,29 @@ static int rk808_regulator_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pdata);
+ switch (rk808->variant) {
+ case RK808_ID:
+ regulators = rk808_reg;
+ nregulators = RK808_NUM_REGULATORS;
+ break;
+ case RK818_ID:
+ regulators = rk818_reg;
+ nregulators = RK818_NUM_REGULATORS;
+ break;
+ default:
+ dev_err(&client->dev, "unsupported RK8XX ID %lu\n",
+ rk808->variant);
+ return -EINVAL;
+ }
+
config.dev = &client->dev;
config.driver_data = pdata;
config.regmap = rk808->regmap;
/* Instantiate the regulators */
- for (i = 0; i < RK808_NUM_REGULATORS; i++) {
+ for (i = 0; i < nregulators; i++) {
rk808_rdev = devm_regulator_register(&pdev->dev,
- &rk808_reg[i], &config);
+ &regulators[i], &config);
if (IS_ERR(rk808_rdev)) {
dev_err(&client->dev,
"failed to register %d regulator\n", i);
@@ -533,15 +661,15 @@ static int rk808_regulator_probe(struct platform_device *pdev)
static struct platform_driver rk808_regulator_driver = {
.probe = rk808_regulator_probe,
.driver = {
- .name = "rk808-regulator",
- .owner = THIS_MODULE,
+ .name = "rk808-regulator"
},
};
module_platform_driver(rk808_regulator_driver);
-MODULE_DESCRIPTION("regulator driver for the rk808 series PMICs");
-MODULE_AUTHOR("Chris Zhong<zyw@rock-chips.com>");
-MODULE_AUTHOR("Zhang Qing<zhangqing@rock-chips.com>");
+MODULE_DESCRIPTION("regulator driver for the RK808/RK818 series PMICs");
+MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_AUTHOR("Zhang Qing <zhangqing@rock-chips.com>");
+MODULE_AUTHOR("Wadim Egorov <w.egorov@phytec.de>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rk808-regulator");
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index d1e631d64a20..eb0f5b13841a 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -180,6 +180,14 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
if (rid < TPS65218_DCDC_1 || rid > TPS65218_LDO_1)
return -EINVAL;
+ /*
+ * Certain revisions of TPS65218 will need to have DCDC3 regulator
+ * enabled always, otherwise an immediate system reboot will occur
+ * during poweroff.
+ */
+ if (rid == TPS65218_DCDC_3 && tps->rev == TPS65218_REV_2_1)
+ return 0;
+
if (!tps->info[rid]->strobe) {
if (rid == TPS65218_DCDC_3)
tps->info[rid]->strobe = 3;
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index fb991ec76423..696116ebdf50 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -1111,6 +1111,12 @@ static int tps65910_probe(struct platform_device *pdev)
pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
pmic->ext_sleep_control = tps65910_ext_sleep_control;
info = tps65910_regs;
+ /* Work around silicon erratum SWCZ010: output programmed
+ * voltage level can go higher than expected or crash
+ * Workaround: use no synchronization of DCDC clocks
+ */
+ tps65910_reg_clear_bits(pmic->mfd, TPS65910_DCDCCTRL,
+ DCDCCTRL_DCDCCKSYNC_MASK);
break;
case TPS65911:
pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 1a8bf76a925f..f396bfef5d42 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -17,7 +17,7 @@ config OMAP_REMOTEPROC
select REMOTEPROC
select MAILBOX
select OMAP2PLUS_MBOX
- select RPMSG
+ select RPMSG_VIRTIO
help
Say y here to support OMAP's remote processors (dual M3
and DSP on OMAP4) via the remote processor framework.
@@ -59,7 +59,7 @@ config DA8XX_REMOTEPROC
depends on ARCH_DAVINCI_DA8XX
select CMA if MMU
select REMOTEPROC
- select RPMSG
+ select RPMSG_VIRTIO
help
Say y here to support DA8xx/OMAP-L13x remote processors via the
remote processor framework.
@@ -91,6 +91,22 @@ config QCOM_Q6V5_PIL
Say y here to support the Qualcomm Peripherial Image Loader for the
Hexagon V5 based remote processors.
+config QCOM_WCNSS_IRIS
+ tristate
+ depends on OF && ARCH_QCOM
+
+config QCOM_WCNSS_PIL
+ tristate "Qualcomm WCNSS Peripheral Image Loader"
+ depends on OF && ARCH_QCOM
+ depends on QCOM_SMEM
+ select QCOM_MDT_LOADER
+ select QCOM_SCM
+ select QCOM_WCNSS_IRIS
+ select REMOTEPROC
+ help
+ Say y here to support the Peripheral Image Loader for the Qualcomm
+ Wireless Connectivity Subsystem.
+
config ST_REMOTEPROC
tristate "ST remoteproc support"
depends on ARCH_STI
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 92d3758bd15c..6dfb62ed643f 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -13,4 +13,6 @@ obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
obj-$(CONFIG_QCOM_MDT_LOADER) += qcom_mdt_loader.o
obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
+obj-$(CONFIG_QCOM_WCNSS_IRIS) += qcom_wcnss_iris.o
+obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 009e56f67de2..1afac8f31be0 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -147,7 +147,7 @@ static void da8xx_rproc_kick(struct rproc *rproc, int vqid)
{
struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv;
- /* Interupt remote proc */
+ /* Interrupt remote proc */
writel(SYSCFG_CHIPSIG2, drproc->chipsig);
}
@@ -261,7 +261,7 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
return 0;
free_rproc:
- rproc_put(rproc);
+ rproc_free(rproc);
return ret;
}
@@ -290,7 +290,7 @@ static int da8xx_rproc_remove(struct platform_device *pdev)
disable_irq(drproc->irq);
rproc_del(rproc);
- rproc_put(rproc);
+ rproc_free(rproc);
return 0;
}
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index b74368a91235..fa63bf2eb885 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -96,7 +96,8 @@ static void omap_rproc_kick(struct rproc *rproc, int vqid)
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(oproc->mbox, (void *)vqid);
if (ret < 0)
- dev_err(dev, "omap_mbox_msg_send failed: %d\n", ret);
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
}
/*
@@ -196,7 +197,7 @@ static int omap_rproc_probe(struct platform_device *pdev)
}
rproc = rproc_alloc(&pdev->dev, pdata->name, &omap_rproc_ops,
- pdata->firmware, sizeof(*oproc));
+ pdata->firmware, sizeof(*oproc));
if (!rproc)
return -ENOMEM;
@@ -214,7 +215,7 @@ static int omap_rproc_probe(struct platform_device *pdev)
return 0;
free_rproc:
- rproc_put(rproc);
+ rproc_free(rproc);
return ret;
}
@@ -223,7 +224,7 @@ static int omap_rproc_remove(struct platform_device *pdev)
struct rproc *rproc = platform_get_drvdata(pdev);
rproc_del(rproc);
- rproc_put(rproc);
+ rproc_free(rproc);
return 0;
}
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
index 2a1b2c7d8f2c..2e0caaaa766a 100644
--- a/drivers/remoteproc/qcom_q6v5_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -863,8 +863,10 @@ static int q6v5_probe(struct platform_device *pdev)
goto free_rproc;
qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit);
- if (IS_ERR(qproc->state))
+ if (IS_ERR(qproc->state)) {
+ ret = PTR_ERR(qproc->state);
goto free_rproc;
+ }
ret = rproc_add(rproc);
if (ret)
@@ -873,7 +875,7 @@ static int q6v5_probe(struct platform_device *pdev)
return 0;
free_rproc:
- rproc_put(rproc);
+ rproc_free(rproc);
return ret;
}
@@ -883,7 +885,7 @@ static int q6v5_remove(struct platform_device *pdev)
struct q6v5 *qproc = platform_get_drvdata(pdev);
rproc_del(qproc->rproc);
- rproc_put(qproc->rproc);
+ rproc_free(qproc->rproc);
return 0;
}
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
new file mode 100644
index 000000000000..f5cedeaafba1
--- /dev/null
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -0,0 +1,624 @@
+/*
+ * Qualcomm Wireless Connectivity Subsystem Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/remoteproc.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "qcom_mdt_loader.h"
+#include "remoteproc_internal.h"
+#include "qcom_wcnss.h"
+
+#define WCNSS_CRASH_REASON_SMEM 422
+#define WCNSS_FIRMWARE_NAME "wcnss.mdt"
+#define WCNSS_PAS_ID 6
+
+#define WCNSS_SPARE_NVBIN_DLND BIT(25)
+
+#define WCNSS_PMU_IRIS_XO_CFG BIT(3)
+#define WCNSS_PMU_IRIS_XO_EN BIT(4)
+#define WCNSS_PMU_GC_BUS_MUX_SEL_TOP BIT(5)
+#define WCNSS_PMU_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */
+
+#define WCNSS_PMU_IRIS_RESET BIT(7)
+#define WCNSS_PMU_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */
+#define WCNSS_PMU_IRIS_XO_READ BIT(9)
+#define WCNSS_PMU_IRIS_XO_READ_STS BIT(10)
+
+#define WCNSS_PMU_XO_MODE_MASK GENMASK(2, 1)
+#define WCNSS_PMU_XO_MODE_19p2 0
+#define WCNSS_PMU_XO_MODE_48 3
+
+struct wcnss_data {
+ size_t pmu_offset;
+ size_t spare_offset;
+
+ const struct wcnss_vreg_info *vregs;
+ size_t num_vregs;
+};
+
+struct qcom_wcnss {
+ struct device *dev;
+ struct rproc *rproc;
+
+ void __iomem *pmu_cfg;
+ void __iomem *spare_out;
+
+ bool use_48mhz_xo;
+
+ int wdog_irq;
+ int fatal_irq;
+ int ready_irq;
+ int handover_irq;
+ int stop_ack_irq;
+
+ struct qcom_smem_state *state;
+ unsigned stop_bit;
+
+ struct mutex iris_lock;
+ struct qcom_iris *iris;
+
+ struct regulator_bulk_data *vregs;
+ size_t num_vregs;
+
+ struct completion start_done;
+ struct completion stop_done;
+
+ phys_addr_t mem_phys;
+ phys_addr_t mem_reloc;
+ void *mem_region;
+ size_t mem_size;
+};
+
+static const struct wcnss_data riva_data = {
+ .pmu_offset = 0x28,
+ .spare_offset = 0xb4,
+
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddmx", 1050000, 1150000, 0 },
+ { "vddcx", 1050000, 1150000, 0 },
+ { "vddpx", 1800000, 1800000, 0 },
+ },
+ .num_vregs = 3,
+};
+
+static const struct wcnss_data pronto_v1_data = {
+ .pmu_offset = 0x1004,
+ .spare_offset = 0x1088,
+
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddmx", 950000, 1150000, 0 },
+ { "vddcx", .super_turbo = true},
+ { "vddpx", 1800000, 1800000, 0 },
+ },
+ .num_vregs = 3,
+};
+
+static const struct wcnss_data pronto_v2_data = {
+ .pmu_offset = 0x1004,
+ .spare_offset = 0x1088,
+
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddmx", 1287500, 1287500, 0 },
+ { "vddcx", .super_turbo = true },
+ { "vddpx", 1800000, 1800000, 0 },
+ },
+ .num_vregs = 3,
+};
+
+void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
+ struct qcom_iris *iris,
+ bool use_48mhz_xo)
+{
+ mutex_lock(&wcnss->iris_lock);
+
+ wcnss->iris = iris;
+ wcnss->use_48mhz_xo = use_48mhz_xo;
+
+ mutex_unlock(&wcnss->iris_lock);
+}
+EXPORT_SYMBOL_GPL(qcom_wcnss_assign_iris);
+
+static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ phys_addr_t fw_addr;
+ size_t fw_size;
+ bool relocate;
+ int ret;
+
+ ret = qcom_scm_pas_init_image(WCNSS_PAS_ID, fw->data, fw->size);
+ if (ret) {
+ dev_err(&rproc->dev, "invalid firmware metadata\n");
+ return ret;
+ }
+
+ ret = qcom_mdt_parse(fw, &fw_addr, &fw_size, &relocate);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to parse mdt header\n");
+ return ret;
+ }
+
+ if (relocate) {
+ wcnss->mem_reloc = fw_addr;
+
+ ret = qcom_scm_pas_mem_setup(WCNSS_PAS_ID, wcnss->mem_phys, fw_size);
+ if (ret) {
+ dev_err(&rproc->dev, "unable to setup memory for image\n");
+ return ret;
+ }
+ }
+
+ return qcom_mdt_load(rproc, fw, rproc->firmware);
+}
+
+static const struct rproc_fw_ops wcnss_fw_ops = {
+ .find_rsc_table = qcom_mdt_find_rsc_table,
+ .load = wcnss_load,
+};
+
+static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
+{
+ u32 val;
+
+ /* Indicate NV download capability */
+ val = readl(wcnss->spare_out);
+ val |= WCNSS_SPARE_NVBIN_DLND;
+ writel(val, wcnss->spare_out);
+}
+
+static void wcnss_configure_iris(struct qcom_wcnss *wcnss)
+{
+ u32 val;
+
+ /* Clear PMU cfg register */
+ writel(0, wcnss->pmu_cfg);
+
+ val = WCNSS_PMU_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_IRIS_XO_EN;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Clear XO_MODE */
+ val &= ~WCNSS_PMU_XO_MODE_MASK;
+ if (wcnss->use_48mhz_xo)
+ val |= WCNSS_PMU_XO_MODE_48 << 1;
+ else
+ val |= WCNSS_PMU_XO_MODE_19p2 << 1;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Reset IRIS */
+ val |= WCNSS_PMU_IRIS_RESET;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Wait for PMU.iris_reg_reset_sts */
+ while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_RESET_STS)
+ cpu_relax();
+
+ /* Clear IRIS reset */
+ val &= ~WCNSS_PMU_IRIS_RESET;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Start IRIS XO configuration */
+ val |= WCNSS_PMU_IRIS_XO_CFG;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Wait for XO configuration to finish */
+ while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_XO_CFG_STS)
+ cpu_relax();
+
+ /* Stop IRIS XO configuration */
+ val &= ~WCNSS_PMU_GC_BUS_MUX_SEL_TOP;
+ val &= ~WCNSS_PMU_IRIS_XO_CFG;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Add some delay for XO to settle */
+ msleep(20);
+}
+
+static int wcnss_start(struct rproc *rproc)
+{
+ struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int ret;
+
+ mutex_lock(&wcnss->iris_lock);
+ if (!wcnss->iris) {
+ dev_err(wcnss->dev, "no iris registered\n");
+ ret = -EINVAL;
+ goto release_iris_lock;
+ }
+
+ ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs);
+ if (ret)
+ goto release_iris_lock;
+
+ ret = qcom_iris_enable(wcnss->iris);
+ if (ret)
+ goto disable_regulators;
+
+ wcnss_indicate_nv_download(wcnss);
+ wcnss_configure_iris(wcnss);
+
+ ret = qcom_scm_pas_auth_and_reset(WCNSS_PAS_ID);
+ if (ret) {
+ dev_err(wcnss->dev,
+ "failed to authenticate image and release reset\n");
+ goto disable_iris;
+ }
+
+ ret = wait_for_completion_timeout(&wcnss->start_done,
+ msecs_to_jiffies(5000));
+ if (wcnss->ready_irq > 0 && ret == 0) {
+ /* We have a ready_irq, but it didn't fire in time. */
+ dev_err(wcnss->dev, "start timed out\n");
+ qcom_scm_pas_shutdown(WCNSS_PAS_ID);
+ ret = -ETIMEDOUT;
+ goto disable_iris;
+ }
+
+ ret = 0;
+
+disable_iris:
+ qcom_iris_disable(wcnss->iris);
+disable_regulators:
+ regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs);
+release_iris_lock:
+ mutex_unlock(&wcnss->iris_lock);
+
+ return ret;
+}
+
+static int wcnss_stop(struct rproc *rproc)
+{
+ struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int ret;
+
+ if (wcnss->state) {
+ qcom_smem_state_update_bits(wcnss->state,
+ BIT(wcnss->stop_bit),
+ BIT(wcnss->stop_bit));
+
+ ret = wait_for_completion_timeout(&wcnss->stop_done,
+ msecs_to_jiffies(5000));
+ if (ret == 0)
+ dev_err(wcnss->dev, "timed out on wait\n");
+
+ qcom_smem_state_update_bits(wcnss->state,
+ BIT(wcnss->stop_bit),
+ 0);
+ }
+
+ ret = qcom_scm_pas_shutdown(WCNSS_PAS_ID);
+ if (ret)
+ dev_err(wcnss->dev, "failed to shutdown: %d\n", ret);
+
+ return ret;
+}
+
+static void *wcnss_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int offset;
+
+ offset = da - wcnss->mem_reloc;
+ if (offset < 0 || offset + len > wcnss->mem_size)
+ return NULL;
+
+ return wcnss->mem_region + offset;
+}
+
+static const struct rproc_ops wcnss_ops = {
+ .start = wcnss_start,
+ .stop = wcnss_stop,
+ .da_to_va = wcnss_da_to_va,
+};
+
+static irqreturn_t wcnss_wdog_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+
+ rproc_report_crash(wcnss->rproc, RPROC_WATCHDOG);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_fatal_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+ size_t len;
+ char *msg;
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, WCNSS_CRASH_REASON_SMEM, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(wcnss->dev, "fatal error received: %s\n", msg);
+
+ rproc_report_crash(wcnss->rproc, RPROC_FATAL_ERROR);
+
+ if (!IS_ERR(msg))
+ msg[0] = '\0';
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_ready_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+
+ complete(&wcnss->start_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_handover_interrupt(int irq, void *dev)
+{
+ /*
+ * XXX: At this point we're supposed to release the resources that we
+ * have been holding on behalf of the WCNSS. Unfortunately this
+ * interrupt comes way before the other side seems to be done.
+ *
+ * So we're currently relying on the ready interrupt firing later then
+ * this and we just disable the resources at the end of wcnss_start().
+ */
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+
+ complete(&wcnss->stop_done);
+
+ return IRQ_HANDLED;
+}
+
+static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
+ const struct wcnss_vreg_info *info,
+ int num_vregs)
+{
+ struct regulator_bulk_data *bulk;
+ int ret;
+ int i;
+
+ bulk = devm_kcalloc(wcnss->dev,
+ num_vregs, sizeof(struct regulator_bulk_data),
+ GFP_KERNEL);
+ if (!bulk)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vregs; i++)
+ bulk[i].supply = info[i].name;
+
+ ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_vregs; i++) {
+ if (info[i].max_voltage)
+ regulator_set_voltage(bulk[i].consumer,
+ info[i].min_voltage,
+ info[i].max_voltage);
+
+ if (info[i].load_uA)
+ regulator_set_load(bulk[i].consumer, info[i].load_uA);
+ }
+
+ wcnss->vregs = bulk;
+ wcnss->num_vregs = num_vregs;
+
+ return 0;
+}
+
+static int wcnss_request_irq(struct qcom_wcnss *wcnss,
+ struct platform_device *pdev,
+ const char *name,
+ bool optional,
+ irq_handler_t thread_fn)
+{
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, name);
+ if (ret < 0 && optional) {
+ dev_dbg(&pdev->dev, "no %s IRQ defined, ignoring\n", name);
+ return 0;
+ } else if (ret < 0) {
+ dev_err(&pdev->dev, "no %s IRQ defined\n", name);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret,
+ NULL, thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "wcnss", wcnss);
+ if (ret)
+ dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+
+ return ret;
+}
+
+static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
+{
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(wcnss->dev, "no memory-region specified\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+
+ wcnss->mem_phys = wcnss->mem_reloc = r.start;
+ wcnss->mem_size = resource_size(&r);
+ wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
+ if (!wcnss->mem_region) {
+ dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, wcnss->mem_size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int wcnss_probe(struct platform_device *pdev)
+{
+ const struct wcnss_data *data;
+ struct qcom_wcnss *wcnss;
+ struct resource *res;
+ struct rproc *rproc;
+ void __iomem *mmio;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ if (!qcom_scm_pas_supported(WCNSS_PAS_ID)) {
+ dev_err(&pdev->dev, "PAS is not available for WCNSS\n");
+ return -ENXIO;
+ }
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
+ WCNSS_FIRMWARE_NAME, sizeof(*wcnss));
+ if (!rproc) {
+ dev_err(&pdev->dev, "unable to allocate remoteproc\n");
+ return -ENOMEM;
+ }
+
+ rproc->fw_ops = &wcnss_fw_ops;
+
+ wcnss = (struct qcom_wcnss *)rproc->priv;
+ wcnss->dev = &pdev->dev;
+ wcnss->rproc = rproc;
+ platform_set_drvdata(pdev, wcnss);
+
+ init_completion(&wcnss->start_done);
+ init_completion(&wcnss->stop_done);
+
+ mutex_init(&wcnss->iris_lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu");
+ mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mmio)) {
+ ret = PTR_ERR(mmio);
+ goto free_rproc;
+ };
+
+ ret = wcnss_alloc_memory_region(wcnss);
+ if (ret)
+ goto free_rproc;
+
+ wcnss->pmu_cfg = mmio + data->pmu_offset;
+ wcnss->spare_out = mmio + data->spare_offset;
+
+ ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs);
+ if (ret)
+ goto free_rproc;
+
+ ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ wcnss->wdog_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ wcnss->fatal_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ wcnss->ready_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ wcnss->handover_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ wcnss->stop_ack_irq = ret;
+
+ if (wcnss->stop_ack_irq) {
+ wcnss->state = qcom_smem_state_get(&pdev->dev, "stop",
+ &wcnss->stop_bit);
+ if (IS_ERR(wcnss->state)) {
+ ret = PTR_ERR(wcnss->state);
+ goto free_rproc;
+ }
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_rproc;
+
+ return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+free_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int wcnss_remove(struct platform_device *pdev)
+{
+ struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+
+ qcom_smem_state_put(wcnss->state);
+ rproc_del(wcnss->rproc);
+ rproc_free(wcnss->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id wcnss_of_match[] = {
+ { .compatible = "qcom,riva-pil", &riva_data },
+ { .compatible = "qcom,pronto-v1-pil", &pronto_v1_data },
+ { .compatible = "qcom,pronto-v2-pil", &pronto_v2_data },
+ { },
+};
+
+static struct platform_driver wcnss_driver = {
+ .probe = wcnss_probe,
+ .remove = wcnss_remove,
+ .driver = {
+ .name = "qcom-wcnss-pil",
+ .of_match_table = wcnss_of_match,
+ },
+};
+
+module_platform_driver(wcnss_driver);
+MODULE_DESCRIPTION("Qualcomm Peripherial Image Loader for Wireless Subsystem");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h
new file mode 100644
index 000000000000..9dc4a9fe41e1
--- /dev/null
+++ b/drivers/remoteproc/qcom_wcnss.h
@@ -0,0 +1,22 @@
+#ifndef __QCOM_WNCSS_H__
+#define __QCOM_WNCSS_H__
+
+struct qcom_iris;
+struct qcom_wcnss;
+
+struct wcnss_vreg_info {
+ const char * const name;
+ int min_voltage;
+ int max_voltage;
+
+ int load_uA;
+
+ bool super_turbo;
+};
+
+int qcom_iris_enable(struct qcom_iris *iris);
+void qcom_iris_disable(struct qcom_iris *iris);
+
+void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss, struct qcom_iris *iris, bool use_48mhz_xo);
+
+#endif
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
new file mode 100644
index 000000000000..f0ca24a8dd0b
--- /dev/null
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -0,0 +1,188 @@
+/*
+ * Qualcomm Wireless Connectivity Subsystem Iris driver
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "qcom_wcnss.h"
+
+struct qcom_iris {
+ struct device *dev;
+
+ struct clk *xo_clk;
+
+ struct regulator_bulk_data *vregs;
+ size_t num_vregs;
+};
+
+struct iris_data {
+ const struct wcnss_vreg_info *vregs;
+ size_t num_vregs;
+
+ bool use_48mhz_xo;
+};
+
+static const struct iris_data wcn3620_data = {
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddxo", 1800000, 1800000, 10000 },
+ { "vddrfa", 1300000, 1300000, 100000 },
+ { "vddpa", 3300000, 3300000, 515000 },
+ { "vdddig", 1800000, 1800000, 10000 },
+ },
+ .num_vregs = 4,
+ .use_48mhz_xo = false,
+};
+
+static const struct iris_data wcn3660_data = {
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddxo", 1800000, 1800000, 10000 },
+ { "vddrfa", 1300000, 1300000, 100000 },
+ { "vddpa", 2900000, 3000000, 515000 },
+ { "vdddig", 1200000, 1225000, 10000 },
+ },
+ .num_vregs = 4,
+ .use_48mhz_xo = true,
+};
+
+static const struct iris_data wcn3680_data = {
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddxo", 1800000, 1800000, 10000 },
+ { "vddrfa", 1300000, 1300000, 100000 },
+ { "vddpa", 3300000, 3300000, 515000 },
+ { "vdddig", 1800000, 1800000, 10000 },
+ },
+ .num_vregs = 4,
+ .use_48mhz_xo = true,
+};
+
+int qcom_iris_enable(struct qcom_iris *iris)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(iris->num_vregs, iris->vregs);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(iris->xo_clk);
+ if (ret) {
+ dev_err(iris->dev, "failed to enable xo clk\n");
+ goto disable_regulators;
+ }
+
+ return 0;
+
+disable_regulators:
+ regulator_bulk_disable(iris->num_vregs, iris->vregs);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_iris_enable);
+
+void qcom_iris_disable(struct qcom_iris *iris)
+{
+ clk_disable_unprepare(iris->xo_clk);
+ regulator_bulk_disable(iris->num_vregs, iris->vregs);
+}
+EXPORT_SYMBOL_GPL(qcom_iris_disable);
+
+static int qcom_iris_probe(struct platform_device *pdev)
+{
+ const struct iris_data *data;
+ struct qcom_wcnss *wcnss;
+ struct qcom_iris *iris;
+ int ret;
+ int i;
+
+ iris = devm_kzalloc(&pdev->dev, sizeof(struct qcom_iris), GFP_KERNEL);
+ if (!iris)
+ return -ENOMEM;
+
+ data = of_device_get_match_data(&pdev->dev);
+ wcnss = dev_get_drvdata(pdev->dev.parent);
+
+ iris->xo_clk = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(iris->xo_clk)) {
+ if (PTR_ERR(iris->xo_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to acquire xo clk\n");
+ return PTR_ERR(iris->xo_clk);
+ }
+
+ iris->num_vregs = data->num_vregs;
+ iris->vregs = devm_kcalloc(&pdev->dev,
+ iris->num_vregs,
+ sizeof(struct regulator_bulk_data),
+ GFP_KERNEL);
+ if (!iris->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < iris->num_vregs; i++)
+ iris->vregs[i].supply = data->vregs[i].name;
+
+ ret = devm_regulator_bulk_get(&pdev->dev, iris->num_vregs, iris->vregs);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get regulators\n");
+ return ret;
+ }
+
+ for (i = 0; i < iris->num_vregs; i++) {
+ if (data->vregs[i].max_voltage)
+ regulator_set_voltage(iris->vregs[i].consumer,
+ data->vregs[i].min_voltage,
+ data->vregs[i].max_voltage);
+
+ if (data->vregs[i].load_uA)
+ regulator_set_load(iris->vregs[i].consumer,
+ data->vregs[i].load_uA);
+ }
+
+ qcom_wcnss_assign_iris(wcnss, iris, data->use_48mhz_xo);
+
+ return 0;
+}
+
+static int qcom_iris_remove(struct platform_device *pdev)
+{
+ struct qcom_wcnss *wcnss = dev_get_drvdata(pdev->dev.parent);
+
+ qcom_wcnss_assign_iris(wcnss, NULL, false);
+
+ return 0;
+}
+
+static const struct of_device_id iris_of_match[] = {
+ { .compatible = "qcom,wcn3620", .data = &wcn3620_data },
+ { .compatible = "qcom,wcn3660", .data = &wcn3660_data },
+ { .compatible = "qcom,wcn3680", .data = &wcn3680_data },
+ {}
+};
+
+static struct platform_driver wcnss_driver = {
+ .probe = qcom_iris_probe,
+ .remove = qcom_iris_remove,
+ .driver = {
+ .name = "qcom-iris",
+ .of_match_table = iris_of_match,
+ },
+};
+
+module_platform_driver(wcnss_driver);
+MODULE_DESCRIPTION("Qualcomm Wireless Subsystem Iris driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index fe0539ed9cb5..c6bfb3496684 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -78,7 +78,7 @@ static const char *rproc_crash_to_string(enum rproc_crash_type type)
* will try to access an unmapped device address.
*/
static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
- unsigned long iova, int flags, void *token)
+ unsigned long iova, int flags, void *token)
{
struct rproc *rproc = token;
@@ -236,8 +236,8 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
}
notifyid = ret;
- dev_dbg(dev, "vring%d: va %p dma %llx size %x idr %d\n", i, va,
- (unsigned long long)dma, size, notifyid);
+ dev_dbg(dev, "vring%d: va %p dma %pad size 0x%x idr %d\n",
+ i, va, &dma, size, notifyid);
rvring->va = va;
rvring->dma = dma;
@@ -263,19 +263,13 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
struct rproc_vring *rvring = &rvdev->vring[i];
- dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n",
- i, vring->da, vring->num, vring->align);
-
- /* make sure reserved bytes are zeroes */
- if (vring->reserved) {
- dev_err(dev, "vring rsc has non zero reserved bytes\n");
- return -EINVAL;
- }
+ dev_dbg(dev, "vdev rsc: vring%d: da 0x%x, qsz %d, align %d\n",
+ i, vring->da, vring->num, vring->align);
/* verify queue size and vring alignment are sane */
if (!vring->num || !vring->align) {
dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
- vring->num, vring->align);
+ vring->num, vring->align);
return -EINVAL;
}
@@ -330,7 +324,7 @@ void rproc_free_vring(struct rproc_vring *rvring)
* Returns 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
- int offset, int avail)
+ int offset, int avail)
{
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
@@ -349,7 +343,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
return -EINVAL;
}
- dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
+ dev_dbg(dev, "vdev rsc: id %d, dfeatures 0x%x, cfg len %d, %d vrings\n",
rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
/* we currently support only two vrings per rvdev */
@@ -358,7 +352,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
return -EINVAL;
}
- rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL);
+ rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL);
if (!rvdev)
return -ENOMEM;
@@ -407,7 +401,7 @@ free_rvdev:
* Returns 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
- int offset, int avail)
+ int offset, int avail)
{
struct rproc_mem_entry *trace;
struct device *dev = &rproc->dev;
@@ -455,8 +449,8 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
rproc->num_traces++;
- dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n", name, ptr,
- rsc->da, rsc->len);
+ dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n",
+ name, ptr, rsc->da, rsc->len);
return 0;
}
@@ -487,7 +481,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
* are outside those ranges.
*/
static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
- int offset, int avail)
+ int offset, int avail)
{
struct rproc_mem_entry *mapping;
struct device *dev = &rproc->dev;
@@ -530,7 +524,7 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
list_add_tail(&mapping->node, &rproc->mappings);
dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
- rsc->pa, rsc->da, rsc->len);
+ rsc->pa, rsc->da, rsc->len);
return 0;
@@ -558,9 +552,8 @@ out:
* pressure is important; it may have a substantial impact on performance.
*/
static int rproc_handle_carveout(struct rproc *rproc,
- struct fw_rsc_carveout *rsc,
- int offset, int avail)
-
+ struct fw_rsc_carveout *rsc,
+ int offset, int avail)
{
struct rproc_mem_entry *carveout, *mapping;
struct device *dev = &rproc->dev;
@@ -579,8 +572,8 @@ static int rproc_handle_carveout(struct rproc *rproc,
return -EINVAL;
}
- dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n",
- rsc->da, rsc->pa, rsc->len, rsc->flags);
+ dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n",
+ rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
if (!carveout)
@@ -588,13 +581,14 @@ static int rproc_handle_carveout(struct rproc *rproc,
va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL);
if (!va) {
- dev_err(dev->parent, "dma_alloc_coherent err: %d\n", rsc->len);
+ dev_err(dev->parent,
+ "failed to allocate dma memory: len 0x%x\n", rsc->len);
ret = -ENOMEM;
goto free_carv;
}
- dev_dbg(dev, "carveout va %p, dma %llx, len 0x%x\n", va,
- (unsigned long long)dma, rsc->len);
+ dev_dbg(dev, "carveout va %p, dma %pad, len 0x%x\n",
+ va, &dma, rsc->len);
/*
* Ok, this is non-standard.
@@ -616,13 +610,12 @@ static int rproc_handle_carveout(struct rproc *rproc,
if (rproc->domain) {
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) {
- dev_err(dev, "kzalloc mapping failed\n");
ret = -ENOMEM;
goto dma_free;
}
ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
- rsc->flags);
+ rsc->flags);
if (ret) {
dev_err(dev, "iommu_map failed: %d\n", ret);
goto free_mapping;
@@ -639,8 +632,8 @@ static int rproc_handle_carveout(struct rproc *rproc,
mapping->len = rsc->len;
list_add_tail(&mapping->node, &rproc->mappings);
- dev_dbg(dev, "carveout mapped 0x%x to 0x%llx\n",
- rsc->da, (unsigned long long)dma);
+ dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
+ rsc->da, &dma);
}
/*
@@ -697,17 +690,13 @@ static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
[RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
[RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
[RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
- [RSC_VDEV] = NULL, /* VDEVs were handled upon registrarion */
+ [RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings,
};
static rproc_handle_resource_t rproc_vdev_handler[RSC_LAST] = {
[RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
};
-static rproc_handle_resource_t rproc_count_vrings_handler[RSC_LAST] = {
- [RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings,
-};
-
/* handle firmware resource entries before booting the remote processor */
static int rproc_handle_resources(struct rproc *rproc, int len,
rproc_handle_resource_t handlers[RSC_LAST])
@@ -757,6 +746,7 @@ static int rproc_handle_resources(struct rproc *rproc, int len,
static void rproc_resource_cleanup(struct rproc *rproc)
{
struct rproc_mem_entry *entry, *tmp;
+ struct rproc_vdev *rvdev, *rvtmp;
struct device *dev = &rproc->dev;
/* clean up debugfs trace entries */
@@ -775,7 +765,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
if (unmapped != entry->len) {
/* nothing much to do besides complaining */
dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
- unmapped);
+ unmapped);
}
list_del(&entry->node);
@@ -789,6 +779,10 @@ static void rproc_resource_cleanup(struct rproc *rproc)
list_del(&entry->node);
kfree(entry);
}
+
+ /* clean up remote vdev entries */
+ list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
+ rproc_remove_virtio_dev(rvdev);
}
/*
@@ -801,9 +795,6 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
struct resource_table *table, *loaded_table;
int ret, tablesz;
- if (!rproc->table_ptr)
- return -ENOMEM;
-
ret = rproc_fw_sanity_check(rproc, fw);
if (ret)
return ret;
@@ -830,9 +821,25 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
goto clean_up;
}
- /* Verify that resource table in loaded fw is unchanged */
- if (rproc->table_csum != crc32(0, table, tablesz)) {
- dev_err(dev, "resource checksum failed, fw changed?\n");
+ /*
+ * Create a copy of the resource table. When a virtio device starts
+ * and calls vring_new_virtqueue() the address of the allocated vring
+ * will be stored in the cached_table. Before the device is started,
+ * cached_table will be copied into device memory.
+ */
+ rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ goto clean_up;
+
+ rproc->table_ptr = rproc->cached_table;
+
+ /* reset max_notifyid */
+ rproc->max_notifyid = -1;
+
+ /* look for virtio devices and register them */
+ ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);
+ if (ret) {
+ dev_err(dev, "Failed to handle vdev resources: %d\n", ret);
goto clean_up;
}
@@ -840,49 +847,50 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
ret = rproc_handle_resources(rproc, tablesz, rproc_loading_handlers);
if (ret) {
dev_err(dev, "Failed to process resources: %d\n", ret);
- goto clean_up;
+ goto clean_up_resources;
}
/* load the ELF segments to memory */
ret = rproc_load_segments(rproc, fw);
if (ret) {
dev_err(dev, "Failed to load program segments: %d\n", ret);
- goto clean_up;
+ goto clean_up_resources;
}
/*
* The starting device has been given the rproc->cached_table as the
* resource table. The address of the vring along with the other
* allocated resources (carveouts etc) is stored in cached_table.
- * In order to pass this information to the remote device we must
- * copy this information to device memory.
+ * In order to pass this information to the remote device we must copy
+ * this information to device memory. We also update the table_ptr so
+ * that any subsequent changes will be applied to the loaded version.
*/
loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
- if (loaded_table)
+ if (loaded_table) {
memcpy(loaded_table, rproc->cached_table, tablesz);
+ rproc->table_ptr = loaded_table;
+ }
/* power up the remote processor */
ret = rproc->ops->start(rproc);
if (ret) {
dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
- goto clean_up;
+ goto clean_up_resources;
}
- /*
- * Update table_ptr so that all subsequent vring allocations and
- * virtio fields manipulation update the actual loaded resource table
- * in device memory.
- */
- rproc->table_ptr = loaded_table;
-
rproc->state = RPROC_RUNNING;
dev_info(dev, "remote processor %s is now up\n", rproc->name);
return 0;
-clean_up:
+clean_up_resources:
rproc_resource_cleanup(rproc);
+clean_up:
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
+ rproc->table_ptr = NULL;
+
rproc_disable_iommu(rproc);
return ret;
}
@@ -898,42 +906,11 @@ clean_up:
static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
{
struct rproc *rproc = context;
- struct resource_table *table;
- int ret, tablesz;
-
- if (rproc_fw_sanity_check(rproc, fw) < 0)
- goto out;
-
- /* look for the resource table */
- table = rproc_find_rsc_table(rproc, fw, &tablesz);
- if (!table)
- goto out;
-
- rproc->table_csum = crc32(0, table, tablesz);
-
- /*
- * Create a copy of the resource table. When a virtio device starts
- * and calls vring_new_virtqueue() the address of the allocated vring
- * will be stored in the cached_table. Before the device is started,
- * cached_table will be copied into devic memory.
- */
- rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
- if (!rproc->cached_table)
- goto out;
-
- rproc->table_ptr = rproc->cached_table;
- /* count the number of notify-ids */
- rproc->max_notifyid = -1;
- ret = rproc_handle_resources(rproc, tablesz,
- rproc_count_vrings_handler);
- if (ret)
- goto out;
-
- /* look for virtio devices and register them */
- ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);
+ /* if rproc is marked always-on, request it to boot */
+ if (rproc->auto_boot)
+ rproc_boot_nowait(rproc);
-out:
release_firmware(fw);
/* allow rproc_del() contexts, if any, to proceed */
complete_all(&rproc->firmware_loading_complete);
@@ -969,7 +946,7 @@ static int rproc_add_virtio_devices(struct rproc *rproc)
* rproc_trigger_recovery() - recover a remoteproc
* @rproc: the remote processor
*
- * The recovery is done by reseting all the virtio devices, that way all the
+ * The recovery is done by resetting all the virtio devices, that way all the
* rpmsg drivers will be reseted along with the remote processor making the
* remoteproc functional again.
*
@@ -977,23 +954,23 @@ static int rproc_add_virtio_devices(struct rproc *rproc)
*/
int rproc_trigger_recovery(struct rproc *rproc)
{
- struct rproc_vdev *rvdev, *rvtmp;
-
dev_err(&rproc->dev, "recovering %s\n", rproc->name);
init_completion(&rproc->crash_comp);
- /* clean up remote vdev entries */
- list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
- rproc_remove_virtio_dev(rvdev);
+ /* shut down the remote */
+ /* TODO: make sure this works with rproc->power > 1 */
+ rproc_shutdown(rproc);
/* wait until there is no more rproc users */
wait_for_completion(&rproc->crash_comp);
- /* Free the copy of the resource table */
- kfree(rproc->cached_table);
+ /*
+ * boot the remote processor up again
+ */
+ rproc_boot(rproc);
- return rproc_add_virtio_devices(rproc);
+ return 0;
}
/**
@@ -1058,20 +1035,6 @@ static int __rproc_boot(struct rproc *rproc, bool wait)
return ret;
}
- /* loading a firmware is required */
- if (!rproc->firmware) {
- dev_err(dev, "%s: no firmware to load\n", __func__);
- ret = -EINVAL;
- goto unlock_mutex;
- }
-
- /* prevent underlying implementation from being removed */
- if (!try_module_get(dev->parent->driver->owner)) {
- dev_err(dev, "%s: can't get owner\n", __func__);
- ret = -EINVAL;
- goto unlock_mutex;
- }
-
/* skip the boot process if rproc is already powered up */
if (atomic_inc_return(&rproc->power) > 1) {
ret = 0;
@@ -1096,10 +1059,8 @@ static int __rproc_boot(struct rproc *rproc, bool wait)
release_firmware(firmware_p);
downref_rproc:
- if (ret) {
- module_put(dev->parent->driver->owner);
+ if (ret)
atomic_dec(&rproc->power);
- }
unlock_mutex:
mutex_unlock(&rproc->lock);
return ret;
@@ -1173,8 +1134,10 @@ void rproc_shutdown(struct rproc *rproc)
rproc_disable_iommu(rproc);
- /* Give the next start a clean resource table */
- rproc->table_ptr = rproc->cached_table;
+ /* Free the copy of the resource table */
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
+ rproc->table_ptr = NULL;
/* if in crash state, unlock crash handler */
if (rproc->state == RPROC_CRASHED)
@@ -1186,8 +1149,6 @@ void rproc_shutdown(struct rproc *rproc)
out:
mutex_unlock(&rproc->lock);
- if (!ret)
- module_put(dev->parent->driver->owner);
}
EXPORT_SYMBOL(rproc_shutdown);
@@ -1216,6 +1177,12 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
mutex_lock(&rproc_list_mutex);
list_for_each_entry(r, &rproc_list, node) {
if (r->dev.parent && r->dev.parent->of_node == np) {
+ /* prevent underlying implementation from being removed */
+ if (!try_module_get(r->dev.parent->driver->owner)) {
+ dev_err(&r->dev, "can't get owner\n");
+ break;
+ }
+
rproc = r;
get_device(&rproc->dev);
break;
@@ -1335,11 +1302,11 @@ static struct device_type rproc_type = {
* On success the new rproc is returned, and on failure, NULL.
*
* Note: _never_ directly deallocate @rproc, even if it was not registered
- * yet. Instead, when you need to unroll rproc_alloc(), use rproc_put().
+ * yet. Instead, when you need to unroll rproc_alloc(), use rproc_free().
*/
struct rproc *rproc_alloc(struct device *dev, const char *name,
- const struct rproc_ops *ops,
- const char *firmware, int len)
+ const struct rproc_ops *ops,
+ const char *firmware, int len)
{
struct rproc *rproc;
char *p, *template = "rproc-%s-fw";
@@ -1359,7 +1326,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
*/
name_len = strlen(name) + strlen(template) - 2 + 1;
- rproc = kzalloc(sizeof(struct rproc) + len + name_len, GFP_KERNEL);
+ rproc = kzalloc(sizeof(*rproc) + len + name_len, GFP_KERNEL);
if (!rproc)
return NULL;
@@ -1374,6 +1341,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->name = name;
rproc->ops = ops;
rproc->priv = &rproc[1];
+ rproc->auto_boot = true;
device_initialize(&rproc->dev);
rproc->dev.parent = dev;
@@ -1413,7 +1381,22 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
EXPORT_SYMBOL(rproc_alloc);
/**
- * rproc_put() - unroll rproc_alloc()
+ * rproc_free() - unroll rproc_alloc()
+ * @rproc: the remote processor handle
+ *
+ * This function decrements the rproc dev refcount.
+ *
+ * If no one holds any reference to rproc anymore, then its refcount would
+ * now drop to zero, and it would be freed.
+ */
+void rproc_free(struct rproc *rproc)
+{
+ put_device(&rproc->dev);
+}
+EXPORT_SYMBOL(rproc_free);
+
+/**
+ * rproc_put() - release rproc reference
* @rproc: the remote processor handle
*
* This function decrements the rproc dev refcount.
@@ -1423,6 +1406,7 @@ EXPORT_SYMBOL(rproc_alloc);
*/
void rproc_put(struct rproc *rproc)
{
+ module_put(rproc->dev.parent->driver->owner);
put_device(&rproc->dev);
}
EXPORT_SYMBOL(rproc_put);
@@ -1438,7 +1422,7 @@ EXPORT_SYMBOL(rproc_put);
*
* After rproc_del() returns, @rproc isn't freed yet, because
* of the outstanding reference created by rproc_alloc. To decrement that
- * one last refcount, one still needs to call rproc_put().
+ * one last refcount, one still needs to call rproc_free().
*
* Returns 0 on success and -EINVAL if @rproc isn't valid.
*/
@@ -1452,13 +1436,15 @@ int rproc_del(struct rproc *rproc)
/* if rproc is just being registered, wait */
wait_for_completion(&rproc->firmware_loading_complete);
+ /* if rproc is marked always-on, rproc_add() booted it */
+ /* TODO: make sure this works with rproc->power > 1 */
+ if (rproc->auto_boot)
+ rproc_shutdown(rproc);
+
/* clean up remote vdev entries */
list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node)
rproc_remove_virtio_dev(rvdev);
- /* Free the copy of the resource table */
- kfree(rproc->cached_table);
-
/* the rproc is downref'ed as soon as it's removed from the klist */
mutex_lock(&rproc_list_mutex);
list_del(&rproc->node);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 74a120b6e206..374797206c79 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -45,7 +45,7 @@ static struct dentry *rproc_dbg;
* as it provides very early tracing with little to no dependencies at all.
*/
static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct rproc_mem_entry *trace = filp->private_data;
int len = strnlen(trace->va, trace->len);
@@ -73,7 +73,7 @@ static const char * const rproc_state_string[] = {
/* expose the state of the remote processor via debugfs */
static ssize_t rproc_state_read(struct file *filp, char __user *userbuf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct rproc *rproc = filp->private_data;
unsigned int state;
@@ -83,7 +83,7 @@ static ssize_t rproc_state_read(struct file *filp, char __user *userbuf,
state = rproc->state > RPROC_LAST ? RPROC_LAST : rproc->state;
i = scnprintf(buf, 30, "%.28s (%d)\n", rproc_state_string[state],
- rproc->state);
+ rproc->state);
return simple_read_from_buffer(userbuf, count, ppos, buf, i);
}
@@ -130,7 +130,7 @@ static const struct file_operations rproc_state_ops = {
/* expose the name of the remote processor via debugfs */
static ssize_t rproc_name_read(struct file *filp, char __user *userbuf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct rproc *rproc = filp->private_data;
/* need room for the name, a newline and a terminating null */
@@ -230,12 +230,12 @@ void rproc_remove_trace_file(struct dentry *tfile)
}
struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
- struct rproc_mem_entry *trace)
+ struct rproc_mem_entry *trace)
{
struct dentry *tfile;
- tfile = debugfs_create_file(name, 0400, rproc->dbg_dir,
- trace, &trace_rproc_ops);
+ tfile = debugfs_create_file(name, 0400, rproc->dbg_dir, trace,
+ &trace_rproc_ops);
if (!tfile) {
dev_err(&rproc->dev, "failed to create debugfs trace entry\n");
return NULL;
@@ -264,11 +264,11 @@ void rproc_create_debug_dir(struct rproc *rproc)
return;
debugfs_create_file("name", 0400, rproc->dbg_dir,
- rproc, &rproc_name_ops);
+ rproc, &rproc_name_ops);
debugfs_create_file("state", 0400, rproc->dbg_dir,
- rproc, &rproc_state_ops);
+ rproc, &rproc_state_ops);
debugfs_create_file("recovery", 0400, rproc->dbg_dir,
- rproc, &rproc_recovery_ops);
+ rproc, &rproc_recovery_ops);
}
void __init rproc_init_debugfs(void)
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
index ce283a5b42a1..c523983a4aec 100644
--- a/drivers/remoteproc/remoteproc_elf_loader.c
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -166,18 +166,18 @@ rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
continue;
dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
- phdr->p_type, da, memsz, filesz);
+ phdr->p_type, da, memsz, filesz);
if (filesz > memsz) {
dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
- filesz, memsz);
+ filesz, memsz);
ret = -EINVAL;
break;
}
if (offset + filesz > fw->size) {
dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
- offset + filesz, fw->size);
+ offset + filesz, fw->size);
ret = -EINVAL;
break;
}
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 57e1de59bec8..4cf93ca2816e 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -36,10 +36,10 @@ struct rproc;
*/
struct rproc_fw_ops {
struct resource_table *(*find_rsc_table)(struct rproc *rproc,
- const struct firmware *fw,
- int *tablesz);
- struct resource_table *(*find_loaded_rsc_table)(struct rproc *rproc,
- const struct firmware *fw);
+ const struct firmware *fw,
+ int *tablesz);
+ struct resource_table *(*find_loaded_rsc_table)(
+ struct rproc *rproc, const struct firmware *fw);
int (*load)(struct rproc *rproc, const struct firmware *fw);
int (*sanity_check)(struct rproc *rproc, const struct firmware *fw);
u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
@@ -57,7 +57,7 @@ void rproc_remove_virtio_dev(struct rproc_vdev *rvdev);
/* from remoteproc_debugfs.c */
void rproc_remove_trace_file(struct dentry *tfile);
struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
- struct rproc_mem_entry *trace);
+ struct rproc_mem_entry *trace);
void rproc_delete_debug_dir(struct rproc *rproc);
void rproc_create_debug_dir(struct rproc *rproc);
void rproc_init_debugfs(void);
@@ -98,7 +98,8 @@ int rproc_load_segments(struct rproc *rproc, const struct firmware *fw)
static inline
struct resource_table *rproc_find_rsc_table(struct rproc *rproc,
- const struct firmware *fw, int *tablesz)
+ const struct firmware *fw,
+ int *tablesz)
{
if (rproc->fw_ops->find_rsc_table)
return rproc->fw_ops->find_rsc_table(rproc, fw, tablesz);
@@ -108,7 +109,7 @@ struct resource_table *rproc_find_rsc_table(struct rproc *rproc,
static inline
struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
- const struct firmware *fw)
+ const struct firmware *fw)
{
if (rproc->fw_ops->find_loaded_rsc_table)
return rproc->fw_ops->find_loaded_rsc_table(rproc, fw);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index cc91556313e1..01870a16d6d2 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -69,7 +69,7 @@ irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
EXPORT_SYMBOL(rproc_vq_interrupt);
static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
- unsigned id,
+ unsigned int id,
void (*callback)(struct virtqueue *vq),
const char *name)
{
@@ -101,14 +101,14 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
memset(addr, 0, size);
dev_dbg(dev, "vring%d: va %p qsz %d notifyid %d\n",
- id, addr, len, rvring->notifyid);
+ id, addr, len, rvring->notifyid);
/*
* Create the new vq, and tell virtio we're not interested in
* the 'weak' smp barriers, since we're talking with a real device.
*/
vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, addr,
- rproc_virtio_notify, callback, name);
+ rproc_virtio_notify, callback, name);
if (!vq) {
dev_err(dev, "vring_new_virtqueue %s failed\n", name);
rproc_free_vring(rvring);
@@ -136,20 +136,14 @@ static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
static void rproc_virtio_del_vqs(struct virtio_device *vdev)
{
- struct rproc *rproc = vdev_to_rproc(vdev);
-
- /* power down the remote processor before deleting vqs */
- rproc_shutdown(rproc);
-
__rproc_virtio_del_vqs(vdev);
}
-static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[])
+static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[])
{
- struct rproc *rproc = vdev_to_rproc(vdev);
int i, ret;
for (i = 0; i < nvqs; ++i) {
@@ -160,13 +154,6 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
}
}
- /* now that the vqs are all set, boot the remote processor */
- ret = rproc_boot_nowait(rproc);
- if (ret) {
- dev_err(&rproc->dev, "rproc_boot() failed %d\n", ret);
- goto error;
- }
-
return 0;
error:
@@ -239,8 +226,8 @@ static int rproc_virtio_finalize_features(struct virtio_device *vdev)
return 0;
}
-static void rproc_virtio_get(struct virtio_device *vdev, unsigned offset,
- void *buf, unsigned len)
+static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct fw_rsc_vdev *rsc;
@@ -257,8 +244,8 @@ static void rproc_virtio_get(struct virtio_device *vdev, unsigned offset,
memcpy(buf, cfg + offset, len);
}
-static void rproc_virtio_set(struct virtio_device *vdev, unsigned offset,
- const void *buf, unsigned len)
+static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct fw_rsc_vdev *rsc;
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index 6f056caa8a56..ae8963fcc8c8 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -262,7 +262,7 @@ static int st_rproc_probe(struct platform_device *pdev)
return 0;
free_rproc:
- rproc_put(rproc);
+ rproc_free(rproc);
return ret;
}
@@ -277,7 +277,7 @@ static int st_rproc_remove(struct platform_device *pdev)
of_reserved_mem_device_release(&pdev->dev);
- rproc_put(rproc);
+ rproc_free(rproc);
return 0;
}
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c
index 53dc17bdd54e..03d69a9a3c5b 100644
--- a/drivers/remoteproc/ste_modem_rproc.c
+++ b/drivers/remoteproc/ste_modem_rproc.c
@@ -257,7 +257,7 @@ static int sproc_drv_remove(struct platform_device *pdev)
rproc_del(sproc->rproc);
dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE,
sproc->fw_addr, sproc->fw_dma_addr);
- rproc_put(sproc->rproc);
+ rproc_free(sproc->rproc);
mdev->drv_data = NULL;
@@ -325,7 +325,7 @@ free_mem:
free_rproc:
/* Reset device data upon error */
mdev->drv_data = NULL;
- rproc_put(rproc);
+ rproc_free(rproc);
return err;
}
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index 02d271d101b4..18175d0331fd 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -167,6 +167,8 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
goto err;
}
+ rproc->auto_boot = false;
+
wkupm3 = rproc->priv;
wkupm3->rproc = rproc;
wkupm3->pdev = pdev;
@@ -206,7 +208,7 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
return 0;
err_put_rproc:
- rproc_put(rproc);
+ rproc_free(rproc);
err:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
@@ -218,7 +220,7 @@ static int wkup_m3_rproc_remove(struct platform_device *pdev)
struct rproc *rproc = platform_get_drvdata(pdev);
rproc_del(rproc);
- rproc_put(rproc);
+ rproc_free(rproc);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 4be1b8c21f6f..06d9fa2f3bc0 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -14,9 +14,58 @@ menuconfig RESET_CONTROLLER
if RESET_CONTROLLER
+config RESET_ATH79
+ bool "AR71xx Reset Driver" if COMPILE_TEST
+ default ATH79
+ help
+ This enables the ATH79 reset controller driver that supports the
+ AR71xx SoC reset controller.
+
+config RESET_BERLIN
+ bool "Berlin Reset Driver" if COMPILE_TEST
+ default ARCH_BERLIN
+ help
+ This enables the reset controller driver for Marvell Berlin SoCs.
+
+config RESET_LPC18XX
+ bool "LPC18xx/43xx Reset Driver" if COMPILE_TEST
+ default ARCH_LPC18XX
+ help
+ This enables the reset controller driver for NXP LPC18xx/43xx SoCs.
+
+config RESET_MESON
+ bool "Meson Reset Driver" if COMPILE_TEST
+ default ARCH_MESON
+ help
+ This enables the reset driver for Amlogic Meson SoCs.
+
config RESET_OXNAS
bool
+config RESET_PISTACHIO
+ bool "Pistachio Reset Driver" if COMPILE_TEST
+ default MACH_PISTACHIO
+ help
+ This enables the reset driver for ImgTec Pistachio SoCs.
+
+config RESET_SOCFPGA
+ bool "SoCFPGA Reset Driver" if COMPILE_TEST
+ default ARCH_SOCFPGA
+ help
+ This enables the reset controller driver for Altera SoCFPGAs.
+
+config RESET_STM32
+ bool "STM32 Reset Driver" if COMPILE_TEST
+ default ARCH_STM32
+ help
+ This enables the RCC reset controller driver for STM32 MCUs.
+
+config RESET_SUNXI
+ bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
+ default ARCH_SUNXI
+ help
+ This enables the reset driver for Allwinner SoCs.
+
config TI_SYSCON_RESET
tristate "TI SYSCON Reset Driver"
depends on HAS_IOMEM
@@ -27,6 +76,22 @@ config TI_SYSCON_RESET
you wish to use the reset framework for such memory-mapped devices,
say Y here. Otherwise, say N.
+config RESET_UNIPHIER
+ tristate "Reset controller driver for UniPhier SoCs"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && MFD_SYSCON
+ default ARCH_UNIPHIER
+ help
+ Support for reset controllers on UniPhier SoCs.
+ Say Y if you want to control reset signals provided by System Control
+ block, Media I/O block, Peripheral Block.
+
+config RESET_ZYNQ
+ bool "ZYNQ Reset Driver" if COMPILE_TEST
+ default ARCH_ZYNQ
+ help
+ This enables the reset controller driver for Xilinx Zynq SoCs.
+
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 5d65a93d3c43..bbe7026617fc 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,13 +1,15 @@
obj-y += core.o
-obj-$(CONFIG_ARCH_LPC18XX) += reset-lpc18xx.o
-obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
-obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o
-obj-$(CONFIG_MACH_PISTACHIO) += reset-pistachio.o
-obj-$(CONFIG_ARCH_MESON) += reset-meson.o
-obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
+obj-y += hisilicon/
obj-$(CONFIG_ARCH_STI) += sti/
-obj-$(CONFIG_ARCH_HISI) += hisilicon/
-obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o
-obj-$(CONFIG_ATH79) += reset-ath79.o
+obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
+obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
+obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
+obj-$(CONFIG_RESET_MESON) += reset-meson.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
+obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
+obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
+obj-$(CONFIG_RESET_STM32) += reset-stm32.o
+obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_TI_SYSCON_RESET) += reset-ti-syscon.o
+obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
+obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 395dc9ce492e..b8ae1dbd4c17 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -138,7 +138,8 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register);
*/
int reset_control_reset(struct reset_control *rstc)
{
- if (WARN_ON(rstc->shared))
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)) ||
+ WARN_ON(rstc->shared))
return -EINVAL;
if (rstc->rcdev->ops->reset)
@@ -161,6 +162,9 @@ EXPORT_SYMBOL_GPL(reset_control_reset);
*/
int reset_control_assert(struct reset_control *rstc)
{
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
+ return -EINVAL;
+
if (!rstc->rcdev->ops->assert)
return -ENOTSUPP;
@@ -184,6 +188,9 @@ EXPORT_SYMBOL_GPL(reset_control_assert);
*/
int reset_control_deassert(struct reset_control *rstc)
{
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
+ return -EINVAL;
+
if (!rstc->rcdev->ops->deassert)
return -ENOTSUPP;
@@ -204,6 +211,9 @@ EXPORT_SYMBOL_GPL(reset_control_deassert);
*/
int reset_control_status(struct reset_control *rstc)
{
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
+ return -EINVAL;
+
if (rstc->rcdev->ops->status)
return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
diff --git a/drivers/reset/hisilicon/Kconfig b/drivers/reset/hisilicon/Kconfig
index 26bf95a83a8e..1ff8b0c80980 100644
--- a/drivers/reset/hisilicon/Kconfig
+++ b/drivers/reset/hisilicon/Kconfig
@@ -1,5 +1,6 @@
config COMMON_RESET_HI6220
tristate "Hi6220 Reset Driver"
- depends on (ARCH_HISI && RESET_CONTROLLER)
+ depends on ARCH_HISI || COMPILE_TEST
+ default ARCH_HISI
help
Build the Hisilicon Hi6220 reset driver.
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
index 16d410cd6146..6b97631f5489 100644
--- a/drivers/reset/reset-ath79.c
+++ b/drivers/reset/reset-ath79.c
@@ -12,6 +12,7 @@
* GNU General Public License for more details.
*/
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 12add9b0fa49..78ebf8424375 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -28,7 +28,6 @@
struct socfpga_reset_data {
spinlock_t lock;
void __iomem *membase;
- u32 modrst_offset;
struct reset_controller_dev rcdev;
};
@@ -45,9 +44,8 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
- writel(reg | BIT(offset), data->membase + data->modrst_offset +
- (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * NR_BANKS));
+ writel(reg | BIT(offset), data->membase + (bank * NR_BANKS));
spin_unlock_irqrestore(&data->lock, flags);
return 0;
@@ -67,9 +65,8 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
- writel(reg & ~BIT(offset), data->membase + data->modrst_offset +
- (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * NR_BANKS));
+ writel(reg & ~BIT(offset), data->membase + (bank * NR_BANKS));
spin_unlock_irqrestore(&data->lock, flags);
@@ -85,7 +82,7 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
int offset = id % BITS_PER_LONG;
u32 reg;
- reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * NR_BANKS));
return !(reg & BIT(offset));
}
@@ -102,6 +99,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ u32 modrst_offset;
/*
* The binding was mainlined without the required property.
@@ -122,10 +120,11 @@ static int socfpga_reset_probe(struct platform_device *pdev)
if (IS_ERR(data->membase))
return PTR_ERR(data->membase);
- if (of_property_read_u32(np, "altr,modrst-offset", &data->modrst_offset)) {
+ if (of_property_read_u32(np, "altr,modrst-offset", &modrst_offset)) {
dev_warn(dev, "missing altr,modrst-offset property, assuming 0x10!\n");
- data->modrst_offset = 0x10;
+ modrst_offset = 0x10;
}
+ data->membase += modrst_offset;
spin_lock_init(&data->lock);
diff --git a/drivers/reset/reset-stm32.c b/drivers/reset/reset-stm32.c
new file mode 100644
index 000000000000..3a7c8527e66a
--- /dev/null
+++ b/drivers/reset/reset-stm32.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Heavily based on sunxi driver from Maxime Ripard.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct stm32_reset_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+};
+
+static int stm32_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct stm32_reset_data *data = container_of(rcdev,
+ struct stm32_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * 4));
+ writel(reg | BIT(offset), data->membase + (bank * 4));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int stm32_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct stm32_reset_data *data = container_of(rcdev,
+ struct stm32_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * 4));
+ writel(reg & ~BIT(offset), data->membase + (bank * 4));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static const struct reset_control_ops stm32_reset_ops = {
+ .assert = stm32_reset_assert,
+ .deassert = stm32_reset_deassert,
+};
+
+static const struct of_device_id stm32_reset_dt_ids[] = {
+ { .compatible = "st,stm32-rcc", },
+ { /* sentinel */ },
+};
+
+static int stm32_reset_probe(struct platform_device *pdev)
+{
+ struct stm32_reset_data *data;
+ struct resource *res;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->membase))
+ return PTR_ERR(data->membase);
+
+ spin_lock_init(&data->lock);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = resource_size(res) * 8;
+ data->rcdev.ops = &stm32_reset_ops;
+ data->rcdev.of_node = pdev->dev.of_node;
+
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
+}
+
+static struct platform_driver stm32_reset_driver = {
+ .probe = stm32_reset_probe,
+ .driver = {
+ .name = "stm32-rcc-reset",
+ .of_match_table = stm32_reset_dt_ids,
+ },
+};
+builtin_platform_driver(stm32_reset_driver);
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
new file mode 100644
index 000000000000..8b2558e7363e
--- /dev/null
+++ b/drivers/reset/reset-uniphier.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+struct uniphier_reset_data {
+ unsigned int id;
+ unsigned int reg;
+ unsigned int bit;
+ unsigned int flags;
+#define UNIPHIER_RESET_ACTIVE_LOW BIT(0)
+};
+
+#define UNIPHIER_RESET_ID_END (unsigned int)(-1)
+
+#define UNIPHIER_RESET_END \
+ { .id = UNIPHIER_RESET_ID_END }
+
+#define UNIPHIER_RESET(_id, _reg, _bit) \
+ { \
+ .id = (_id), \
+ .reg = (_reg), \
+ .bit = (_bit), \
+ }
+
+#define UNIPHIER_RESETX(_id, _reg, _bit) \
+ { \
+ .id = (_id), \
+ .reg = (_reg), \
+ .bit = (_bit), \
+ .flags = UNIPHIER_RESET_ACTIVE_LOW, \
+ }
+
+/* System reset data */
+#define UNIPHIER_SLD3_SYS_RESET_STDMAC(id) \
+ UNIPHIER_RESETX((id), 0x2000, 10)
+
+#define UNIPHIER_LD11_SYS_RESET_STDMAC(id) \
+ UNIPHIER_RESETX((id), 0x200c, 8)
+
+#define UNIPHIER_PRO4_SYS_RESET_GIO(id) \
+ UNIPHIER_RESETX((id), 0x2000, 6)
+
+#define UNIPHIER_LD20_SYS_RESET_GIO(id) \
+ UNIPHIER_RESETX((id), 0x200c, 5)
+
+#define UNIPHIER_PRO4_SYS_RESET_USB3(id, ch) \
+ UNIPHIER_RESETX((id), 0x2000 + 0x4 * (ch), 17)
+
+const struct uniphier_reset_data uniphier_sld3_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* Ether, HSC, MIO */
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, MIO, RLE */
+ UNIPHIER_PRO4_SYS_RESET_GIO(12), /* Ether, SATA, USB3 */
+ UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_RESET_USB3(15, 1),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC */
+ UNIPHIER_PRO4_SYS_RESET_GIO(12), /* PCIe, USB3 */
+ UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_RESET_USB3(15, 1),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, RLE */
+ UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_RESET_USB3(15, 1),
+ UNIPHIER_RESETX(16, 0x2014, 4), /* USB30-PHY0 */
+ UNIPHIER_RESETX(17, 0x2014, 0), /* USB30-PHY1 */
+ UNIPHIER_RESETX(18, 0x2014, 2), /* USB30-PHY2 */
+ UNIPHIER_RESETX(20, 0x2014, 5), /* USB31-PHY0 */
+ UNIPHIER_RESETX(21, 0x2014, 1), /* USB31-PHY1 */
+ UNIPHIER_RESETX(28, 0x2014, 12), /* SATA */
+ UNIPHIER_RESET(29, 0x2014, 8), /* SATA-PHY (active high) */
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = {
+ UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC, MIO */
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
+ UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC */
+ UNIPHIER_LD20_SYS_RESET_GIO(12), /* PCIe, USB3 */
+ UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */
+ UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */
+ UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */
+ UNIPHIER_RESETX(19, 0x200c, 15), /* USB30-PHY3 */
+ UNIPHIER_RESET_END,
+};
+
+/* Media I/O reset data */
+#define UNIPHIER_MIO_RESET_SD(id, ch) \
+ UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 0)
+
+#define UNIPHIER_MIO_RESET_SD_BRIDGE(id, ch) \
+ UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 26)
+
+#define UNIPHIER_MIO_RESET_EMMC_HW_RESET(id, ch) \
+ UNIPHIER_RESETX((id), 0x80 + 0x200 * (ch), 0)
+
+#define UNIPHIER_MIO_RESET_USB2(id, ch) \
+ UNIPHIER_RESETX((id), 0x114 + 0x200 * (ch), 0)
+
+#define UNIPHIER_MIO_RESET_USB2_BRIDGE(id, ch) \
+ UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 24)
+
+#define UNIPHIER_MIO_RESET_DMAC(id) \
+ UNIPHIER_RESETX((id), 0x110, 17)
+
+const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = {
+ UNIPHIER_MIO_RESET_SD(0, 0),
+ UNIPHIER_MIO_RESET_SD(1, 1),
+ UNIPHIER_MIO_RESET_SD(2, 2),
+ UNIPHIER_MIO_RESET_SD_BRIDGE(3, 0),
+ UNIPHIER_MIO_RESET_SD_BRIDGE(4, 1),
+ UNIPHIER_MIO_RESET_SD_BRIDGE(5, 2),
+ UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
+ UNIPHIER_MIO_RESET_DMAC(7),
+ UNIPHIER_MIO_RESET_USB2(8, 0),
+ UNIPHIER_MIO_RESET_USB2(9, 1),
+ UNIPHIER_MIO_RESET_USB2(10, 2),
+ UNIPHIER_MIO_RESET_USB2(11, 3),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(12, 0),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(13, 1),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(14, 2),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(15, 3),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro5_mio_reset_data[] = {
+ UNIPHIER_MIO_RESET_SD(0, 0),
+ UNIPHIER_MIO_RESET_SD(1, 1),
+ UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
+ UNIPHIER_RESET_END,
+};
+
+/* Peripheral reset data */
+#define UNIPHIER_PERI_RESET_UART(id, ch) \
+ UNIPHIER_RESETX((id), 0x114, 19 + (ch))
+
+#define UNIPHIER_PERI_RESET_I2C(id, ch) \
+ UNIPHIER_RESETX((id), 0x114, 5 + (ch))
+
+#define UNIPHIER_PERI_RESET_FI2C(id, ch) \
+ UNIPHIER_RESETX((id), 0x114, 24 + (ch))
+
+const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
+ UNIPHIER_PERI_RESET_UART(0, 0),
+ UNIPHIER_PERI_RESET_UART(1, 1),
+ UNIPHIER_PERI_RESET_UART(2, 2),
+ UNIPHIER_PERI_RESET_UART(3, 3),
+ UNIPHIER_PERI_RESET_I2C(4, 0),
+ UNIPHIER_PERI_RESET_I2C(5, 1),
+ UNIPHIER_PERI_RESET_I2C(6, 2),
+ UNIPHIER_PERI_RESET_I2C(7, 3),
+ UNIPHIER_PERI_RESET_I2C(8, 4),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = {
+ UNIPHIER_PERI_RESET_UART(0, 0),
+ UNIPHIER_PERI_RESET_UART(1, 1),
+ UNIPHIER_PERI_RESET_UART(2, 2),
+ UNIPHIER_PERI_RESET_UART(3, 3),
+ UNIPHIER_PERI_RESET_FI2C(4, 0),
+ UNIPHIER_PERI_RESET_FI2C(5, 1),
+ UNIPHIER_PERI_RESET_FI2C(6, 2),
+ UNIPHIER_PERI_RESET_FI2C(7, 3),
+ UNIPHIER_PERI_RESET_FI2C(8, 4),
+ UNIPHIER_PERI_RESET_FI2C(9, 5),
+ UNIPHIER_PERI_RESET_FI2C(10, 6),
+ UNIPHIER_RESET_END,
+};
+
+/* core implementaton */
+struct uniphier_reset_priv {
+ struct reset_controller_dev rcdev;
+ struct device *dev;
+ struct regmap *regmap;
+ const struct uniphier_reset_data *data;
+};
+
+#define to_uniphier_reset_priv(_rcdev) \
+ container_of(_rcdev, struct uniphier_reset_priv, rcdev)
+
+static int uniphier_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, int assert)
+{
+ struct uniphier_reset_priv *priv = to_uniphier_reset_priv(rcdev);
+ const struct uniphier_reset_data *p;
+
+ for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) {
+ unsigned int mask, val;
+
+ if (p->id != id)
+ continue;
+
+ mask = BIT(p->bit);
+
+ if (assert)
+ val = mask;
+ else
+ val = ~mask;
+
+ if (p->flags & UNIPHIER_RESET_ACTIVE_LOW)
+ val = ~val;
+
+ return regmap_write_bits(priv->regmap, p->reg, mask, val);
+ }
+
+ dev_err(priv->dev, "reset_id=%lu was not handled\n", id);
+ return -EINVAL;
+}
+
+static int uniphier_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return uniphier_reset_update(rcdev, id, 1);
+}
+
+static int uniphier_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return uniphier_reset_update(rcdev, id, 0);
+}
+
+static int uniphier_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct uniphier_reset_priv *priv = to_uniphier_reset_priv(rcdev);
+ const struct uniphier_reset_data *p;
+
+ for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) {
+ unsigned int val;
+ int ret, asserted;
+
+ if (p->id != id)
+ continue;
+
+ ret = regmap_read(priv->regmap, p->reg, &val);
+ if (ret)
+ return ret;
+
+ asserted = !!(val & BIT(p->bit));
+
+ if (p->flags & UNIPHIER_RESET_ACTIVE_LOW)
+ asserted = !asserted;
+
+ return asserted;
+ }
+
+ dev_err(priv->dev, "reset_id=%lu was not found\n", id);
+ return -EINVAL;
+}
+
+static const struct reset_control_ops uniphier_reset_ops = {
+ .assert = uniphier_reset_assert,
+ .deassert = uniphier_reset_deassert,
+ .status = uniphier_reset_status,
+};
+
+static int uniphier_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_reset_priv *priv;
+ const struct uniphier_reset_data *p, *data;
+ struct regmap *regmap;
+ struct device_node *parent;
+ unsigned int nr_resets = 0;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ parent = of_get_parent(dev->of_node); /* parent should be syscon node */
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to get regmap (error %ld)\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ for (p = data; p->id != UNIPHIER_RESET_ID_END; p++)
+ nr_resets = max(nr_resets, p->id + 1);
+
+ priv->rcdev.ops = &uniphier_reset_ops;
+ priv->rcdev.owner = dev->driver->owner;
+ priv->rcdev.of_node = dev->of_node;
+ priv->rcdev.nr_resets = nr_resets;
+ priv->dev = dev;
+ priv->regmap = regmap;
+ priv->data = data;
+
+ return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
+}
+
+static const struct of_device_id uniphier_reset_match[] = {
+ /* System reset */
+ {
+ .compatible = "socionext,uniphier-sld3-reset",
+ .data = uniphier_sld3_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld4-reset",
+ .data = uniphier_sld3_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-reset",
+ .data = uniphier_pro4_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-reset",
+ .data = uniphier_sld3_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-reset",
+ .data = uniphier_pro5_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-reset",
+ .data = uniphier_pxs2_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-reset",
+ .data = uniphier_ld11_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-reset",
+ .data = uniphier_ld20_sys_reset_data,
+ },
+ /* Media I/O reset */
+ {
+ .compatible = "socionext,uniphier-sld3-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld4-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-mio-reset",
+ .data = uniphier_pro5_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-mio-reset",
+ .data = uniphier_pro5_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-mio-reset",
+ .data = uniphier_pro5_mio_reset_data,
+ },
+ /* Peripheral reset */
+ {
+ .compatible = "socionext,uniphier-ld4-peri-reset",
+ .data = uniphier_ld4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-peri-reset",
+ .data = uniphier_ld4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_reset_match);
+
+static struct platform_driver uniphier_reset_driver = {
+ .probe = uniphier_reset_probe,
+ .driver = {
+ .name = "uniphier-reset",
+ .of_match_table = uniphier_reset_match,
+ },
+};
+module_platform_driver(uniphier_reset_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier Reset Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index 69a219387582..de31c5f14dd9 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -3,6 +3,20 @@ menu "Rpmsg drivers"
# RPMSG always gets selected by whoever wants it
config RPMSG
tristate
+
+config RPMSG_QCOM_SMD
+ tristate "Qualcomm Shared Memory Driver (SMD)"
+ depends on QCOM_SMEM
+ depends on QCOM_SMD=n
+ select RPMSG
+ help
+ Say y here to enable support for the Qualcomm Shared Memory Driver
+ providing communication channels to remote processors in Qualcomm
+ platforms.
+
+config RPMSG_VIRTIO
+ tristate
+ select RPMSG
select VIRTIO
select VIRTUALIZATION
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index 7617fcb8259f..ae9c9132cf76 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -1 +1,3 @@
-obj-$(CONFIG_RPMSG) += virtio_rpmsg_bus.o
+obj-$(CONFIG_RPMSG) += rpmsg_core.o
+obj-$(CONFIG_RPMSG_QCOM_SMD) += qcom_smd.o
+obj-$(CONFIG_RPMSG_VIRTIO) += virtio_rpmsg_bus.o
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
new file mode 100644
index 000000000000..06fef2b4c814
--- /dev/null
+++ b/drivers/rpmsg/qcom_smd.c
@@ -0,0 +1,1434 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/wait.h>
+#include <linux/rpmsg.h>
+
+#include "rpmsg_internal.h"
+
+/*
+ * The Qualcomm Shared Memory communication solution provides point-to-point
+ * channels for clients to send and receive streaming or packet based data.
+ *
+ * Each channel consists of a control item (channel info) and a ring buffer
+ * pair. The channel info carry information related to channel state, flow
+ * control and the offsets within the ring buffer.
+ *
+ * All allocated channels are listed in an allocation table, identifying the
+ * pair of items by name, type and remote processor.
+ *
+ * Upon creating a new channel the remote processor allocates channel info and
+ * ring buffer items from the smem heap and populate the allocation table. An
+ * interrupt is sent to the other end of the channel and a scan for new
+ * channels should be done. A channel never goes away, it will only change
+ * state.
+ *
+ * The remote processor signals it intent for bring up the communication
+ * channel by setting the state of its end of the channel to "opening" and
+ * sends out an interrupt. We detect this change and register a smd device to
+ * consume the channel. Upon finding a consumer we finish the handshake and the
+ * channel is up.
+ *
+ * Upon closing a channel, the remote processor will update the state of its
+ * end of the channel and signal us, we will then unregister any attached
+ * device and close our end of the channel.
+ *
+ * Devices attached to a channel can use the qcom_smd_send function to push
+ * data to the channel, this is done by copying the data into the tx ring
+ * buffer, updating the pointers in the channel info and signaling the remote
+ * processor.
+ *
+ * The remote processor does the equivalent when it transfer data and upon
+ * receiving the interrupt we check the channel info for new data and delivers
+ * this to the attached device. If the device is not ready to receive the data
+ * we leave it in the ring buffer for now.
+ */
+
+struct smd_channel_info;
+struct smd_channel_info_pair;
+struct smd_channel_info_word;
+struct smd_channel_info_word_pair;
+
+static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops;
+
+#define SMD_ALLOC_TBL_COUNT 2
+#define SMD_ALLOC_TBL_SIZE 64
+
+/*
+ * This lists the various smem heap items relevant for the allocation table and
+ * smd channel entries.
+ */
+static const struct {
+ unsigned alloc_tbl_id;
+ unsigned info_base_id;
+ unsigned fifo_base_id;
+} smem_items[SMD_ALLOC_TBL_COUNT] = {
+ {
+ .alloc_tbl_id = 13,
+ .info_base_id = 14,
+ .fifo_base_id = 338
+ },
+ {
+ .alloc_tbl_id = 266,
+ .info_base_id = 138,
+ .fifo_base_id = 202,
+ },
+};
+
+/**
+ * struct qcom_smd_edge - representing a remote processor
+ * @of_node: of_node handle for information related to this edge
+ * @edge_id: identifier of this edge
+ * @remote_pid: identifier of remote processor
+ * @irq: interrupt for signals on this edge
+ * @ipc_regmap: regmap handle holding the outgoing ipc register
+ * @ipc_offset: offset within @ipc_regmap of the register for ipc
+ * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap
+ * @channels: list of all channels detected on this edge
+ * @channels_lock: guard for modifications of @channels
+ * @allocated: array of bitmaps representing already allocated channels
+ * @smem_available: last available amount of smem triggering a channel scan
+ * @scan_work: work item for discovering new channels
+ * @state_work: work item for edge state changes
+ */
+struct qcom_smd_edge {
+ struct device dev;
+
+ struct device_node *of_node;
+ unsigned edge_id;
+ unsigned remote_pid;
+
+ int irq;
+
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+
+ struct list_head channels;
+ spinlock_t channels_lock;
+
+ DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
+
+ unsigned smem_available;
+
+ wait_queue_head_t new_channel_event;
+
+ struct work_struct scan_work;
+ struct work_struct state_work;
+};
+
+/*
+ * SMD channel states.
+ */
+enum smd_channel_state {
+ SMD_CHANNEL_CLOSED,
+ SMD_CHANNEL_OPENING,
+ SMD_CHANNEL_OPENED,
+ SMD_CHANNEL_FLUSHING,
+ SMD_CHANNEL_CLOSING,
+ SMD_CHANNEL_RESET,
+ SMD_CHANNEL_RESET_OPENING
+};
+
+struct qcom_smd_device {
+ struct rpmsg_device rpdev;
+
+ struct qcom_smd_edge *edge;
+};
+
+struct qcom_smd_endpoint {
+ struct rpmsg_endpoint ept;
+
+ struct qcom_smd_channel *qsch;
+};
+
+#define to_smd_device(_rpdev) container_of(_rpdev, struct qcom_smd_device, rpdev)
+#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev)
+#define to_smd_endpoint(ept) container_of(ept, struct qcom_smd_endpoint, ept)
+
+/**
+ * struct qcom_smd_channel - smd channel struct
+ * @edge: qcom_smd_edge this channel is living on
+ * @qsdev: reference to a associated smd client device
+ * @name: name of the channel
+ * @state: local state of the channel
+ * @remote_state: remote state of the channel
+ * @info: byte aligned outgoing/incoming channel info
+ * @info_word: word aligned outgoing/incoming channel info
+ * @tx_lock: lock to make writes to the channel mutually exclusive
+ * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
+ * @tx_fifo: pointer to the outgoing ring buffer
+ * @rx_fifo: pointer to the incoming ring buffer
+ * @fifo_size: size of each ring buffer
+ * @bounce_buffer: bounce buffer for reading wrapped packets
+ * @cb: callback function registered for this channel
+ * @recv_lock: guard for rx info modifications and cb pointer
+ * @pkt_size: size of the currently handled packet
+ * @list: lite entry for @channels in qcom_smd_edge
+ */
+struct qcom_smd_channel {
+ struct qcom_smd_edge *edge;
+
+ struct qcom_smd_endpoint *qsept;
+ bool registered;
+
+ char *name;
+ enum smd_channel_state state;
+ enum smd_channel_state remote_state;
+
+ struct smd_channel_info_pair *info;
+ struct smd_channel_info_word_pair *info_word;
+
+ struct mutex tx_lock;
+ wait_queue_head_t fblockread_event;
+
+ void *tx_fifo;
+ void *rx_fifo;
+ int fifo_size;
+
+ void *bounce_buffer;
+
+ spinlock_t recv_lock;
+
+ int pkt_size;
+
+ void *drvdata;
+
+ struct list_head list;
+};
+
+/*
+ * Format of the smd_info smem items, for byte aligned channels.
+ */
+struct smd_channel_info {
+ __le32 state;
+ u8 fDSR;
+ u8 fCTS;
+ u8 fCD;
+ u8 fRI;
+ u8 fHEAD;
+ u8 fTAIL;
+ u8 fSTATE;
+ u8 fBLOCKREADINTR;
+ __le32 tail;
+ __le32 head;
+};
+
+struct smd_channel_info_pair {
+ struct smd_channel_info tx;
+ struct smd_channel_info rx;
+};
+
+/*
+ * Format of the smd_info smem items, for word aligned channels.
+ */
+struct smd_channel_info_word {
+ __le32 state;
+ __le32 fDSR;
+ __le32 fCTS;
+ __le32 fCD;
+ __le32 fRI;
+ __le32 fHEAD;
+ __le32 fTAIL;
+ __le32 fSTATE;
+ __le32 fBLOCKREADINTR;
+ __le32 tail;
+ __le32 head;
+};
+
+struct smd_channel_info_word_pair {
+ struct smd_channel_info_word tx;
+ struct smd_channel_info_word rx;
+};
+
+#define GET_RX_CHANNEL_FLAG(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
+ channel->info_word ? \
+ le32_to_cpu(channel->info_word->rx.param) : \
+ channel->info->rx.param; \
+ })
+
+#define GET_RX_CHANNEL_INFO(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
+ le32_to_cpu(channel->info_word ? \
+ channel->info_word->rx.param : \
+ channel->info->rx.param); \
+ })
+
+#define SET_RX_CHANNEL_FLAG(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
+ if (channel->info_word) \
+ channel->info_word->rx.param = cpu_to_le32(value); \
+ else \
+ channel->info->rx.param = value; \
+ })
+
+#define SET_RX_CHANNEL_INFO(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
+ if (channel->info_word) \
+ channel->info_word->rx.param = cpu_to_le32(value); \
+ else \
+ channel->info->rx.param = cpu_to_le32(value); \
+ })
+
+#define GET_TX_CHANNEL_FLAG(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
+ channel->info_word ? \
+ le32_to_cpu(channel->info_word->tx.param) : \
+ channel->info->tx.param; \
+ })
+
+#define GET_TX_CHANNEL_INFO(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
+ le32_to_cpu(channel->info_word ? \
+ channel->info_word->tx.param : \
+ channel->info->tx.param); \
+ })
+
+#define SET_TX_CHANNEL_FLAG(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
+ if (channel->info_word) \
+ channel->info_word->tx.param = cpu_to_le32(value); \
+ else \
+ channel->info->tx.param = value; \
+ })
+
+#define SET_TX_CHANNEL_INFO(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
+ if (channel->info_word) \
+ channel->info_word->tx.param = cpu_to_le32(value); \
+ else \
+ channel->info->tx.param = cpu_to_le32(value); \
+ })
+
+/**
+ * struct qcom_smd_alloc_entry - channel allocation entry
+ * @name: channel name
+ * @cid: channel index
+ * @flags: channel flags and edge id
+ * @ref_count: reference count of the channel
+ */
+struct qcom_smd_alloc_entry {
+ u8 name[20];
+ __le32 cid;
+ __le32 flags;
+ __le32 ref_count;
+} __packed;
+
+#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
+#define SMD_CHANNEL_FLAGS_STREAM BIT(8)
+#define SMD_CHANNEL_FLAGS_PACKET BIT(9)
+
+/*
+ * Each smd packet contains a 20 byte header, with the first 4 being the length
+ * of the packet.
+ */
+#define SMD_PACKET_HEADER_LEN 20
+
+/*
+ * Signal the remote processor associated with 'channel'.
+ */
+static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
+{
+ struct qcom_smd_edge *edge = channel->edge;
+
+ regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
+}
+
+/*
+ * Initialize the tx channel info
+ */
+static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
+{
+ SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
+ SET_TX_CHANNEL_FLAG(channel, fDSR, 0);
+ SET_TX_CHANNEL_FLAG(channel, fCTS, 0);
+ SET_TX_CHANNEL_FLAG(channel, fCD, 0);
+ SET_TX_CHANNEL_FLAG(channel, fRI, 0);
+ SET_TX_CHANNEL_FLAG(channel, fHEAD, 0);
+ SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
+ SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
+ SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
+ SET_TX_CHANNEL_INFO(channel, head, 0);
+ SET_RX_CHANNEL_INFO(channel, tail, 0);
+
+ qcom_smd_signal_channel(channel);
+
+ channel->state = SMD_CHANNEL_CLOSED;
+ channel->pkt_size = 0;
+}
+
+/*
+ * Set the callback for a channel, with appropriate locking
+ */
+static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel,
+ rpmsg_rx_cb_t cb)
+{
+ struct rpmsg_endpoint *ept = &channel->qsept->ept;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->recv_lock, flags);
+ ept->cb = cb;
+ spin_unlock_irqrestore(&channel->recv_lock, flags);
+};
+
+/*
+ * Calculate the amount of data available in the rx fifo
+ */
+static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
+{
+ unsigned head;
+ unsigned tail;
+
+ head = GET_RX_CHANNEL_INFO(channel, head);
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ return (head - tail) & (channel->fifo_size - 1);
+}
+
+/*
+ * Set tx channel state and inform the remote processor
+ */
+static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
+ int state)
+{
+ struct qcom_smd_edge *edge = channel->edge;
+ bool is_open = state == SMD_CHANNEL_OPENED;
+
+ if (channel->state == state)
+ return;
+
+ dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state);
+
+ SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
+ SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
+ SET_TX_CHANNEL_FLAG(channel, fCD, is_open);
+
+ SET_TX_CHANNEL_INFO(channel, state, state);
+ SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
+
+ channel->state = state;
+ qcom_smd_signal_channel(channel);
+}
+
+/*
+ * Copy count bytes of data using 32bit accesses, if that's required.
+ */
+static void smd_copy_to_fifo(void __iomem *dst,
+ const void *src,
+ size_t count,
+ bool word_aligned)
+{
+ if (word_aligned) {
+ __iowrite32_copy(dst, src, count / sizeof(u32));
+ } else {
+ memcpy_toio(dst, src, count);
+ }
+}
+
+/*
+ * Copy count bytes of data using 32bit accesses, if that is required.
+ */
+static void smd_copy_from_fifo(void *dst,
+ const void __iomem *src,
+ size_t count,
+ bool word_aligned)
+{
+ if (word_aligned) {
+ __ioread32_copy(dst, src, count / sizeof(u32));
+ } else {
+ memcpy_fromio(dst, src, count);
+ }
+}
+
+/*
+ * Read count bytes of data from the rx fifo into buf, but don't advance the
+ * tail.
+ */
+static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
+ void *buf, size_t count)
+{
+ bool word_aligned;
+ unsigned tail;
+ size_t len;
+
+ word_aligned = channel->info_word;
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ len = min_t(size_t, count, channel->fifo_size - tail);
+ if (len) {
+ smd_copy_from_fifo(buf,
+ channel->rx_fifo + tail,
+ len,
+ word_aligned);
+ }
+
+ if (len != count) {
+ smd_copy_from_fifo(buf + len,
+ channel->rx_fifo,
+ count - len,
+ word_aligned);
+ }
+
+ return count;
+}
+
+/*
+ * Advance the rx tail by count bytes.
+ */
+static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
+ size_t count)
+{
+ unsigned tail;
+
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+ tail += count;
+ tail &= (channel->fifo_size - 1);
+ SET_RX_CHANNEL_INFO(channel, tail, tail);
+}
+
+/*
+ * Read out a single packet from the rx fifo and deliver it to the device
+ */
+static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
+{
+ struct rpmsg_endpoint *ept = &channel->qsept->ept;
+ unsigned tail;
+ size_t len;
+ void *ptr;
+ int ret;
+
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ /* Use bounce buffer if the data wraps */
+ if (tail + channel->pkt_size >= channel->fifo_size) {
+ ptr = channel->bounce_buffer;
+ len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
+ } else {
+ ptr = channel->rx_fifo + tail;
+ len = channel->pkt_size;
+ }
+
+ ret = ept->cb(ept->rpdev, ptr, len, ept->priv, RPMSG_ADDR_ANY);
+ if (ret < 0)
+ return ret;
+
+ /* Only forward the tail if the client consumed the data */
+ qcom_smd_channel_advance(channel, len);
+
+ channel->pkt_size = 0;
+
+ return 0;
+}
+
+/*
+ * Per channel interrupt handling
+ */
+static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
+{
+ bool need_state_scan = false;
+ int remote_state;
+ __le32 pktlen;
+ int avail;
+ int ret;
+
+ /* Handle state changes */
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state != channel->remote_state) {
+ channel->remote_state = remote_state;
+ need_state_scan = true;
+ }
+ /* Indicate that we have seen any state change */
+ SET_RX_CHANNEL_FLAG(channel, fSTATE, 0);
+
+ /* Signal waiting qcom_smd_send() about the interrupt */
+ if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR))
+ wake_up_interruptible(&channel->fblockread_event);
+
+ /* Don't consume any data until we've opened the channel */
+ if (channel->state != SMD_CHANNEL_OPENED)
+ goto out;
+
+ /* Indicate that we've seen the new data */
+ SET_RX_CHANNEL_FLAG(channel, fHEAD, 0);
+
+ /* Consume data */
+ for (;;) {
+ avail = qcom_smd_channel_get_rx_avail(channel);
+
+ if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
+ qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
+ qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
+ channel->pkt_size = le32_to_cpu(pktlen);
+ } else if (channel->pkt_size && avail >= channel->pkt_size) {
+ ret = qcom_smd_channel_recv_single(channel);
+ if (ret)
+ break;
+ } else {
+ break;
+ }
+ }
+
+ /* Indicate that we have seen and updated tail */
+ SET_RX_CHANNEL_FLAG(channel, fTAIL, 1);
+
+ /* Signal the remote that we've consumed the data (if requested) */
+ if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) {
+ /* Ensure ordering of channel info updates */
+ wmb();
+
+ qcom_smd_signal_channel(channel);
+ }
+
+out:
+ return need_state_scan;
+}
+
+/*
+ * The edge interrupts are triggered by the remote processor on state changes,
+ * channel info updates or when new channels are created.
+ */
+static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
+{
+ struct qcom_smd_edge *edge = data;
+ struct qcom_smd_channel *channel;
+ unsigned available;
+ bool kick_scanner = false;
+ bool kick_state = false;
+
+ /*
+ * Handle state changes or data on each of the channels on this edge
+ */
+ spin_lock(&edge->channels_lock);
+ list_for_each_entry(channel, &edge->channels, list) {
+ spin_lock(&channel->recv_lock);
+ kick_state |= qcom_smd_channel_intr(channel);
+ spin_unlock(&channel->recv_lock);
+ }
+ spin_unlock(&edge->channels_lock);
+
+ /*
+ * Creating a new channel requires allocating an smem entry, so we only
+ * have to scan if the amount of available space in smem have changed
+ * since last scan.
+ */
+ available = qcom_smem_get_free_space(edge->remote_pid);
+ if (available != edge->smem_available) {
+ edge->smem_available = available;
+ kick_scanner = true;
+ }
+
+ if (kick_scanner)
+ schedule_work(&edge->scan_work);
+ if (kick_state)
+ schedule_work(&edge->state_work);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Calculate how much space is available in the tx fifo.
+ */
+static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
+{
+ unsigned head;
+ unsigned tail;
+ unsigned mask = channel->fifo_size - 1;
+
+ head = GET_TX_CHANNEL_INFO(channel, head);
+ tail = GET_TX_CHANNEL_INFO(channel, tail);
+
+ return mask - ((head - tail) & mask);
+}
+
+/*
+ * Write count bytes of data into channel, possibly wrapping in the ring buffer
+ */
+static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
+ const void *data,
+ size_t count)
+{
+ bool word_aligned;
+ unsigned head;
+ size_t len;
+
+ word_aligned = channel->info_word;
+ head = GET_TX_CHANNEL_INFO(channel, head);
+
+ len = min_t(size_t, count, channel->fifo_size - head);
+ if (len) {
+ smd_copy_to_fifo(channel->tx_fifo + head,
+ data,
+ len,
+ word_aligned);
+ }
+
+ if (len != count) {
+ smd_copy_to_fifo(channel->tx_fifo,
+ data + len,
+ count - len,
+ word_aligned);
+ }
+
+ head += count;
+ head &= (channel->fifo_size - 1);
+ SET_TX_CHANNEL_INFO(channel, head, head);
+
+ return count;
+}
+
+/**
+ * qcom_smd_send - write data to smd channel
+ * @channel: channel handle
+ * @data: buffer of data to write
+ * @len: number of bytes to write
+ *
+ * This is a blocking write of len bytes into the channel's tx ring buffer and
+ * signal the remote end. It will sleep until there is enough space available
+ * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
+ * polling.
+ */
+static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
+ int len, bool wait)
+{
+ __le32 hdr[5] = { cpu_to_le32(len), };
+ int tlen = sizeof(hdr) + len;
+ int ret;
+
+ /* Word aligned channels only accept word size aligned data */
+ if (channel->info_word && len % 4)
+ return -EINVAL;
+
+ /* Reject packets that are too big */
+ if (tlen >= channel->fifo_size)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&channel->tx_lock);
+ if (ret)
+ return ret;
+
+ while (qcom_smd_get_tx_avail(channel) < tlen) {
+ if (!wait) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (channel->state != SMD_CHANNEL_OPENED) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0);
+
+ ret = wait_event_interruptible(channel->fblockread_event,
+ qcom_smd_get_tx_avail(channel) >= tlen ||
+ channel->state != SMD_CHANNEL_OPENED);
+ if (ret)
+ goto out;
+
+ SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
+ }
+
+ SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
+
+ qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
+ qcom_smd_write_fifo(channel, data, len);
+
+ SET_TX_CHANNEL_FLAG(channel, fHEAD, 1);
+
+ /* Ensure ordering of channel info updates */
+ wmb();
+
+ qcom_smd_signal_channel(channel);
+
+out:
+ mutex_unlock(&channel->tx_lock);
+
+ return ret;
+}
+
+/*
+ * Helper for opening a channel
+ */
+static int qcom_smd_channel_open(struct qcom_smd_channel *channel,
+ rpmsg_rx_cb_t cb)
+{
+ size_t bb_size;
+
+ /*
+ * Packets are maximum 4k, but reduce if the fifo is smaller
+ */
+ bb_size = min(channel->fifo_size, SZ_4K);
+ channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
+ if (!channel->bounce_buffer)
+ return -ENOMEM;
+
+ qcom_smd_channel_set_callback(channel, cb);
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
+
+ return 0;
+}
+
+/*
+ * Helper for closing and resetting a channel
+ */
+static void qcom_smd_channel_close(struct qcom_smd_channel *channel)
+{
+ qcom_smd_channel_set_callback(channel, NULL);
+
+ kfree(channel->bounce_buffer);
+ channel->bounce_buffer = NULL;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+ qcom_smd_channel_reset(channel);
+}
+
+static struct qcom_smd_channel *
+qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_channel *ret = NULL;
+ unsigned long flags;
+ unsigned state;
+
+ spin_lock_irqsave(&edge->channels_lock, flags);
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (strcmp(channel->name, name))
+ continue;
+
+ state = GET_RX_CHANNEL_INFO(channel, state);
+ if (state != SMD_CHANNEL_OPENING &&
+ state != SMD_CHANNEL_OPENED)
+ continue;
+
+ ret = channel;
+ break;
+ }
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+
+ return ret;
+}
+
+static void __ept_release(struct kref *kref)
+{
+ struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+ refcount);
+ kfree(to_smd_endpoint(ept));
+}
+
+static struct rpmsg_endpoint *qcom_smd_create_ept(struct rpmsg_device *rpdev,
+ rpmsg_rx_cb_t cb, void *priv,
+ struct rpmsg_channel_info chinfo)
+{
+ struct qcom_smd_endpoint *qsept;
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_device *qsdev = to_smd_device(rpdev);
+ struct qcom_smd_edge *edge = qsdev->edge;
+ struct rpmsg_endpoint *ept;
+ const char *name = chinfo.name;
+ int ret;
+
+ /* Wait up to HZ for the channel to appear */
+ ret = wait_event_interruptible_timeout(edge->new_channel_event,
+ (channel = qcom_smd_find_channel(edge, name)) != NULL,
+ HZ);
+ if (!ret)
+ return NULL;
+
+ if (channel->state != SMD_CHANNEL_CLOSED) {
+ dev_err(&rpdev->dev, "channel %s is busy\n", channel->name);
+ return NULL;
+ }
+
+ qsept = kzalloc(sizeof(*qsept), GFP_KERNEL);
+ if (!qsept)
+ return NULL;
+
+ ept = &qsept->ept;
+
+ kref_init(&ept->refcount);
+
+ ept->rpdev = rpdev;
+ ept->cb = cb;
+ ept->priv = priv;
+ ept->ops = &qcom_smd_endpoint_ops;
+
+ channel->qsept = qsept;
+ qsept->qsch = channel;
+
+ ret = qcom_smd_channel_open(channel, cb);
+ if (ret)
+ goto free_ept;
+
+ return ept;
+
+free_ept:
+ channel->qsept = NULL;
+ kref_put(&ept->refcount, __ept_release);
+ return NULL;
+}
+
+static void qcom_smd_destroy_ept(struct rpmsg_endpoint *ept)
+{
+ struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
+ struct qcom_smd_channel *ch = qsept->qsch;
+
+ qcom_smd_channel_close(ch);
+ ch->qsept = NULL;
+ kref_put(&ept->refcount, __ept_release);
+}
+
+static int qcom_smd_send(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
+
+ return __qcom_smd_send(qsept->qsch, data, len, true);
+}
+
+static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
+
+ return __qcom_smd_send(qsept->qsch, data, len, false);
+}
+
+/*
+ * Finds the device_node for the smd child interested in this channel.
+ */
+static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
+ const char *channel)
+{
+ struct device_node *child;
+ const char *name;
+ const char *key;
+ int ret;
+
+ for_each_available_child_of_node(edge_node, child) {
+ key = "qcom,smd-channels";
+ ret = of_property_read_string(child, key, &name);
+ if (ret)
+ continue;
+
+ if (strcmp(name, channel) == 0)
+ return child;
+ }
+
+ return NULL;
+}
+
+static const struct rpmsg_device_ops qcom_smd_device_ops = {
+ .create_ept = qcom_smd_create_ept,
+};
+
+static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = {
+ .destroy_ept = qcom_smd_destroy_ept,
+ .send = qcom_smd_send,
+ .trysend = qcom_smd_trysend,
+};
+
+/*
+ * Create a smd client device for channel that is being opened.
+ */
+static int qcom_smd_create_device(struct qcom_smd_channel *channel)
+{
+ struct qcom_smd_device *qsdev;
+ struct rpmsg_device *rpdev;
+ struct qcom_smd_edge *edge = channel->edge;
+
+ dev_dbg(&edge->dev, "registering '%s'\n", channel->name);
+
+ qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
+ if (!qsdev)
+ return -ENOMEM;
+
+ /* Link qsdev to our SMD edge */
+ qsdev->edge = edge;
+
+ /* Assign callbacks for rpmsg_device */
+ qsdev->rpdev.ops = &qcom_smd_device_ops;
+
+ /* Assign public information to the rpmsg_device */
+ rpdev = &qsdev->rpdev;
+ strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
+ rpdev->src = RPMSG_ADDR_ANY;
+ rpdev->dst = RPMSG_ADDR_ANY;
+
+ rpdev->dev.of_node = qcom_smd_match_channel(edge->of_node, channel->name);
+ rpdev->dev.parent = &edge->dev;
+
+ return rpmsg_register_device(rpdev);
+}
+
+/*
+ * Allocate the qcom_smd_channel object for a newly found smd channel,
+ * retrieving and validating the smem items involved.
+ */
+static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
+ unsigned smem_info_item,
+ unsigned smem_fifo_item,
+ char *name)
+{
+ struct qcom_smd_channel *channel;
+ size_t fifo_size;
+ size_t info_size;
+ void *fifo_base;
+ void *info;
+ int ret;
+
+ channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return ERR_PTR(-ENOMEM);
+
+ channel->edge = edge;
+ channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL);
+ if (!channel->name)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&channel->tx_lock);
+ spin_lock_init(&channel->recv_lock);
+ init_waitqueue_head(&channel->fblockread_event);
+
+ info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto free_name_and_channel;
+ }
+
+ /*
+ * Use the size of the item to figure out which channel info struct to
+ * use.
+ */
+ if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
+ channel->info_word = info;
+ } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
+ channel->info = info;
+ } else {
+ dev_err(&edge->dev,
+ "channel info of size %zu not supported\n", info_size);
+ ret = -EINVAL;
+ goto free_name_and_channel;
+ }
+
+ fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size);
+ if (IS_ERR(fifo_base)) {
+ ret = PTR_ERR(fifo_base);
+ goto free_name_and_channel;
+ }
+
+ /* The channel consist of a rx and tx fifo of equal size */
+ fifo_size /= 2;
+
+ dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
+ name, info_size, fifo_size);
+
+ channel->tx_fifo = fifo_base;
+ channel->rx_fifo = fifo_base + fifo_size;
+ channel->fifo_size = fifo_size;
+
+ qcom_smd_channel_reset(channel);
+
+ return channel;
+
+free_name_and_channel:
+ devm_kfree(&edge->dev, channel->name);
+ devm_kfree(&edge->dev, channel);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * Scans the allocation table for any newly allocated channels, calls
+ * qcom_smd_create_channel() to create representations of these and add
+ * them to the edge's list of channels.
+ */
+static void qcom_channel_scan_worker(struct work_struct *work)
+{
+ struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
+ struct qcom_smd_alloc_entry *alloc_tbl;
+ struct qcom_smd_alloc_entry *entry;
+ struct qcom_smd_channel *channel;
+ unsigned long flags;
+ unsigned fifo_id;
+ unsigned info_id;
+ int tbl;
+ int i;
+ u32 eflags, cid;
+
+ for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
+ alloc_tbl = qcom_smem_get(edge->remote_pid,
+ smem_items[tbl].alloc_tbl_id, NULL);
+ if (IS_ERR(alloc_tbl))
+ continue;
+
+ for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
+ entry = &alloc_tbl[i];
+ eflags = le32_to_cpu(entry->flags);
+ if (test_bit(i, edge->allocated[tbl]))
+ continue;
+
+ if (entry->ref_count == 0)
+ continue;
+
+ if (!entry->name[0])
+ continue;
+
+ if (!(eflags & SMD_CHANNEL_FLAGS_PACKET))
+ continue;
+
+ if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
+ continue;
+
+ cid = le32_to_cpu(entry->cid);
+ info_id = smem_items[tbl].info_base_id + cid;
+ fifo_id = smem_items[tbl].fifo_base_id + cid;
+
+ channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
+ if (IS_ERR(channel))
+ continue;
+
+ spin_lock_irqsave(&edge->channels_lock, flags);
+ list_add(&channel->list, &edge->channels);
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+
+ dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name);
+ set_bit(i, edge->allocated[tbl]);
+
+ wake_up_interruptible(&edge->new_channel_event);
+ }
+ }
+
+ schedule_work(&edge->state_work);
+}
+
+/*
+ * This per edge worker scans smem for any new channels and register these. It
+ * then scans all registered channels for state changes that should be handled
+ * by creating or destroying smd client devices for the registered channels.
+ *
+ * LOCKING: edge->channels_lock only needs to cover the list operations, as the
+ * worker is killed before any channels are deallocated
+ */
+static void qcom_channel_state_worker(struct work_struct *work)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge = container_of(work,
+ struct qcom_smd_edge,
+ state_work);
+ struct rpmsg_channel_info chinfo;
+ unsigned remote_state;
+ unsigned long flags;
+
+ /*
+ * Register a device for any closed channel where the remote processor
+ * is showing interest in opening the channel.
+ */
+ spin_lock_irqsave(&edge->channels_lock, flags);
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (channel->state != SMD_CHANNEL_CLOSED)
+ continue;
+
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state != SMD_CHANNEL_OPENING &&
+ remote_state != SMD_CHANNEL_OPENED)
+ continue;
+
+ if (channel->registered)
+ continue;
+
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+ qcom_smd_create_device(channel);
+ channel->registered = true;
+ spin_lock_irqsave(&edge->channels_lock, flags);
+
+ channel->registered = true;
+ }
+
+ /*
+ * Unregister the device for any channel that is opened where the
+ * remote processor is closing the channel.
+ */
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (channel->state != SMD_CHANNEL_OPENING &&
+ channel->state != SMD_CHANNEL_OPENED)
+ continue;
+
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state == SMD_CHANNEL_OPENING ||
+ remote_state == SMD_CHANNEL_OPENED)
+ continue;
+
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+
+ strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = RPMSG_ADDR_ANY;
+ rpmsg_unregister_device(&edge->dev, &chinfo);
+ channel->registered = false;
+ spin_lock_irqsave(&edge->channels_lock, flags);
+ }
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+}
+
+/*
+ * Parses an of_node describing an edge.
+ */
+static int qcom_smd_parse_edge(struct device *dev,
+ struct device_node *node,
+ struct qcom_smd_edge *edge)
+{
+ struct device_node *syscon_np;
+ const char *key;
+ int irq;
+ int ret;
+
+ INIT_LIST_HEAD(&edge->channels);
+ spin_lock_init(&edge->channels_lock);
+
+ INIT_WORK(&edge->scan_work, qcom_channel_scan_worker);
+ INIT_WORK(&edge->state_work, qcom_channel_state_worker);
+
+ edge->of_node = of_node_get(node);
+
+ key = "qcom,smd-edge";
+ ret = of_property_read_u32(node, key, &edge->edge_id);
+ if (ret) {
+ dev_err(dev, "edge missing %s property\n", key);
+ return -EINVAL;
+ }
+
+ edge->remote_pid = QCOM_SMEM_HOST_ANY;
+ key = "qcom,remote-pid";
+ of_property_read_u32(node, key, &edge->remote_pid);
+
+ syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
+ if (!syscon_np) {
+ dev_err(dev, "no qcom,ipc node\n");
+ return -ENODEV;
+ }
+
+ edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ if (IS_ERR(edge->ipc_regmap))
+ return PTR_ERR(edge->ipc_regmap);
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
+ if (ret < 0) {
+ dev_err(dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
+ if (ret < 0) {
+ dev_err(dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ dev_err(dev, "required smd interrupt missing\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(dev, irq,
+ qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
+ node->name, edge);
+ if (ret) {
+ dev_err(dev, "failed to request smd irq\n");
+ return ret;
+ }
+
+ edge->irq = irq;
+
+ return 0;
+}
+
+/*
+ * Release function for an edge.
+ * Reset the state of each associated channel and free the edge context.
+ */
+static void qcom_smd_edge_release(struct device *dev)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge = to_smd_edge(dev);
+
+ list_for_each_entry(channel, &edge->channels, list) {
+ SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
+ SET_RX_CHANNEL_INFO(channel, head, 0);
+ SET_RX_CHANNEL_INFO(channel, tail, 0);
+ }
+
+ kfree(edge);
+}
+
+/**
+ * qcom_smd_register_edge() - register an edge based on an device_node
+ * @parent: parent device for the edge
+ * @node: device_node describing the edge
+ *
+ * Returns an edge reference, or negative ERR_PTR() on failure.
+ */
+struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
+ struct device_node *node)
+{
+ struct qcom_smd_edge *edge;
+ int ret;
+
+ edge = kzalloc(sizeof(*edge), GFP_KERNEL);
+ if (!edge)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&edge->new_channel_event);
+
+ edge->dev.parent = parent;
+ edge->dev.release = qcom_smd_edge_release;
+ dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name);
+ ret = device_register(&edge->dev);
+ if (ret) {
+ pr_err("failed to register smd edge\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = qcom_smd_parse_edge(&edge->dev, node, edge);
+ if (ret) {
+ dev_err(&edge->dev, "failed to parse smd edge\n");
+ goto unregister_dev;
+ }
+
+ schedule_work(&edge->scan_work);
+
+ return edge;
+
+unregister_dev:
+ put_device(&edge->dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(qcom_smd_register_edge);
+
+static int qcom_smd_remove_device(struct device *dev, void *data)
+{
+ device_unregister(dev);
+
+ return 0;
+}
+
+/**
+ * qcom_smd_unregister_edge() - release an edge and its children
+ * @edge: edge reference acquired from qcom_smd_register_edge
+ */
+int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
+{
+ int ret;
+
+ disable_irq(edge->irq);
+ cancel_work_sync(&edge->scan_work);
+ cancel_work_sync(&edge->state_work);
+
+ ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device);
+ if (ret)
+ dev_warn(&edge->dev, "can't remove smd device: %d\n", ret);
+
+ device_unregister(&edge->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_smd_unregister_edge);
+
+static int qcom_smd_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ void *p;
+
+ /* Wait for smem */
+ p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL);
+ if (PTR_ERR(p) == -EPROBE_DEFER)
+ return PTR_ERR(p);
+
+ for_each_available_child_of_node(pdev->dev.of_node, node)
+ qcom_smd_register_edge(&pdev->dev, node);
+
+ return 0;
+}
+
+static int qcom_smd_remove_edge(struct device *dev, void *data)
+{
+ struct qcom_smd_edge *edge = to_smd_edge(dev);
+
+ return qcom_smd_unregister_edge(edge);
+}
+
+/*
+ * Shut down all smd clients by making sure that each edge stops processing
+ * events and scanning for new channels, then call destroy on the devices.
+ */
+static int qcom_smd_remove(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge);
+ if (ret)
+ dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id qcom_smd_of_match[] = {
+ { .compatible = "qcom,smd" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
+
+static struct platform_driver qcom_smd_driver = {
+ .probe = qcom_smd_probe,
+ .remove = qcom_smd_remove,
+ .driver = {
+ .name = "qcom-smd",
+ .of_match_table = qcom_smd_of_match,
+ },
+};
+
+static int __init qcom_smd_init(void)
+{
+ return platform_driver_register(&qcom_smd_driver);
+}
+subsys_initcall(qcom_smd_init);
+
+static void __exit qcom_smd_exit(void)
+{
+ platform_driver_unregister(&qcom_smd_driver);
+}
+module_exit(qcom_smd_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
new file mode 100644
index 000000000000..b6ea9ffa7381
--- /dev/null
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -0,0 +1,498 @@
+/*
+ * remote processor messaging bus
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rpmsg.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#include "rpmsg_internal.h"
+
+/**
+ * rpmsg_create_ept() - create a new rpmsg_endpoint
+ * @rpdev: rpmsg channel device
+ * @cb: rx callback handler
+ * @priv: private data for the driver's use
+ * @chinfo: channel_info with the local rpmsg address to bind with @cb
+ *
+ * Every rpmsg address in the system is bound to an rx callback (so when
+ * inbound messages arrive, they are dispatched by the rpmsg bus using the
+ * appropriate callback handler) by means of an rpmsg_endpoint struct.
+ *
+ * This function allows drivers to create such an endpoint, and by that,
+ * bind a callback, and possibly some private data too, to an rpmsg address
+ * (either one that is known in advance, or one that will be dynamically
+ * assigned for them).
+ *
+ * Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint
+ * is already created for them when they are probed by the rpmsg bus
+ * (using the rx callback provided when they registered to the rpmsg bus).
+ *
+ * So things should just work for simple drivers: they already have an
+ * endpoint, their rx callback is bound to their rpmsg address, and when
+ * relevant inbound messages arrive (i.e. messages which their dst address
+ * equals to the src address of their rpmsg channel), the driver's handler
+ * is invoked to process it.
+ *
+ * That said, more complicated drivers might do need to allocate
+ * additional rpmsg addresses, and bind them to different rx callbacks.
+ * To accomplish that, those drivers need to call this function.
+ *
+ * Drivers should provide their @rpdev channel (so the new endpoint would belong
+ * to the same remote processor their channel belongs to), an rx callback
+ * function, an optional private data (which is provided back when the
+ * rx callback is invoked), and an address they want to bind with the
+ * callback. If @addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will
+ * dynamically assign them an available rpmsg address (drivers should have
+ * a very good reason why not to always use RPMSG_ADDR_ANY here).
+ *
+ * Returns a pointer to the endpoint on success, or NULL on error.
+ */
+struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev,
+ rpmsg_rx_cb_t cb, void *priv,
+ struct rpmsg_channel_info chinfo)
+{
+ return rpdev->ops->create_ept(rpdev, cb, priv, chinfo);
+}
+EXPORT_SYMBOL(rpmsg_create_ept);
+
+/**
+ * rpmsg_destroy_ept() - destroy an existing rpmsg endpoint
+ * @ept: endpoing to destroy
+ *
+ * Should be used by drivers to destroy an rpmsg endpoint previously
+ * created with rpmsg_create_ept().
+ */
+void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
+{
+ ept->ops->destroy_ept(ept);
+}
+EXPORT_SYMBOL(rpmsg_destroy_ept);
+
+/**
+ * rpmsg_send() - send a message across to the remote processor
+ * @ept: the rpmsg endpoint
+ * @data: payload of message
+ * @len: length of payload
+ *
+ * This function sends @data of length @len on the @ept endpoint.
+ * The message will be sent to the remote processor which the @ept
+ * endpoint belongs to, using @ept's address and its associated rpmsg
+ * device destination addresses.
+ * In case there are no TX buffers available, the function will block until
+ * one becomes available, or a timeout of 15 seconds elapses. When the latter
+ * happens, -ERESTARTSYS is returned.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ return ept->ops->send(ept, data, len);
+}
+EXPORT_SYMBOL(rpmsg_send);
+
+/**
+ * rpmsg_sendto() - send a message across to the remote processor, specify dst
+ * @ept: the rpmsg endpoint
+ * @data: payload of message
+ * @len: length of payload
+ * @dst: destination address
+ *
+ * This function sends @data of length @len to the remote @dst address.
+ * The message will be sent to the remote processor which the @ept
+ * endpoint belongs to, using @ept's address as source.
+ * In case there are no TX buffers available, the function will block until
+ * one becomes available, or a timeout of 15 seconds elapses. When the latter
+ * happens, -ERESTARTSYS is returned.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
+{
+ return ept->ops->sendto(ept, data, len, dst);
+}
+EXPORT_SYMBOL(rpmsg_sendto);
+
+/**
+ * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
+ * @ept: the rpmsg endpoint
+ * @src: source address
+ * @dst: destination address
+ * @data: payload of message
+ * @len: length of payload
+ *
+ * This function sends @data of length @len to the remote @dst address,
+ * and uses @src as the source address.
+ * The message will be sent to the remote processor which the @ept
+ * endpoint belongs to.
+ * In case there are no TX buffers available, the function will block until
+ * one becomes available, or a timeout of 15 seconds elapses. When the latter
+ * happens, -ERESTARTSYS is returned.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len)
+{
+ return ept->ops->send_offchannel(ept, src, dst, data, len);
+}
+EXPORT_SYMBOL(rpmsg_send_offchannel);
+
+/**
+ * rpmsg_send() - send a message across to the remote processor
+ * @ept: the rpmsg endpoint
+ * @data: payload of message
+ * @len: length of payload
+ *
+ * This function sends @data of length @len on the @ept endpoint.
+ * The message will be sent to the remote processor which the @ept
+ * endpoint belongs to, using @ept's address as source and its associated
+ * rpdev's address as destination.
+ * In case there are no TX buffers available, the function will immediately
+ * return -ENOMEM without waiting until one becomes available.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ return ept->ops->trysend(ept, data, len);
+}
+EXPORT_SYMBOL(rpmsg_trysend);
+
+/**
+ * rpmsg_sendto() - send a message across to the remote processor, specify dst
+ * @ept: the rpmsg endpoint
+ * @data: payload of message
+ * @len: length of payload
+ * @dst: destination address
+ *
+ * This function sends @data of length @len to the remote @dst address.
+ * The message will be sent to the remote processor which the @ept
+ * endpoint belongs to, using @ept's address as source.
+ * In case there are no TX buffers available, the function will immediately
+ * return -ENOMEM without waiting until one becomes available.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
+{
+ return ept->ops->trysendto(ept, data, len, dst);
+}
+EXPORT_SYMBOL(rpmsg_trysendto);
+
+/**
+ * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
+ * @ept: the rpmsg endpoint
+ * @src: source address
+ * @dst: destination address
+ * @data: payload of message
+ * @len: length of payload
+ *
+ * This function sends @data of length @len to the remote @dst address,
+ * and uses @src as the source address.
+ * The message will be sent to the remote processor which the @ept
+ * endpoint belongs to.
+ * In case there are no TX buffers available, the function will immediately
+ * return -ENOMEM without waiting until one becomes available.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len)
+{
+ return ept->ops->trysend_offchannel(ept, src, dst, data, len);
+}
+EXPORT_SYMBOL(rpmsg_trysend_offchannel);
+
+/*
+ * match an rpmsg channel with a channel info struct.
+ * this is used to make sure we're not creating rpmsg devices for channels
+ * that already exist.
+ */
+static int rpmsg_device_match(struct device *dev, void *data)
+{
+ struct rpmsg_channel_info *chinfo = data;
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+
+ if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src)
+ return 0;
+
+ if (chinfo->dst != RPMSG_ADDR_ANY && chinfo->dst != rpdev->dst)
+ return 0;
+
+ if (strncmp(chinfo->name, rpdev->id.name, RPMSG_NAME_SIZE))
+ return 0;
+
+ /* found a match ! */
+ return 1;
+}
+
+struct device *rpmsg_find_device(struct device *parent,
+ struct rpmsg_channel_info *chinfo)
+{
+ return device_find_child(parent, chinfo, rpmsg_device_match);
+
+}
+EXPORT_SYMBOL(rpmsg_find_device);
+
+/* sysfs show configuration fields */
+#define rpmsg_show_attr(field, path, format_string) \
+static ssize_t \
+field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev); \
+ \
+ return sprintf(buf, format_string, rpdev->path); \
+}
+
+/* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */
+rpmsg_show_attr(name, id.name, "%s\n");
+rpmsg_show_attr(src, src, "0x%x\n");
+rpmsg_show_attr(dst, dst, "0x%x\n");
+rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n");
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+
+ return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name);
+}
+
+static struct device_attribute rpmsg_dev_attrs[] = {
+ __ATTR_RO(name),
+ __ATTR_RO(modalias),
+ __ATTR_RO(dst),
+ __ATTR_RO(src),
+ __ATTR_RO(announce),
+ __ATTR_NULL
+};
+
+/* rpmsg devices and drivers are matched using the service name */
+static inline int rpmsg_id_match(const struct rpmsg_device *rpdev,
+ const struct rpmsg_device_id *id)
+{
+ return strncmp(id->name, rpdev->id.name, RPMSG_NAME_SIZE) == 0;
+}
+
+/* match rpmsg channel and rpmsg driver */
+static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv);
+ const struct rpmsg_device_id *ids = rpdrv->id_table;
+ unsigned int i;
+
+ if (ids)
+ for (i = 0; ids[i].name[0]; i++)
+ if (rpmsg_id_match(rpdev, &ids[i]))
+ return 1;
+
+ return of_driver_match_device(dev, drv);
+}
+
+static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" RPMSG_DEVICE_MODALIAS_FMT,
+ rpdev->id.name);
+}
+
+/*
+ * when an rpmsg driver is probed with a channel, we seamlessly create
+ * it an endpoint, binding its rx callback to a unique local rpmsg
+ * address.
+ *
+ * if we need to, we also announce about this channel to the remote
+ * processor (needed in case the driver is exposing an rpmsg service).
+ */
+static int rpmsg_dev_probe(struct device *dev)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
+ struct rpmsg_channel_info chinfo = {};
+ struct rpmsg_endpoint *ept;
+ int err;
+
+ strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
+ chinfo.src = rpdev->src;
+ chinfo.dst = RPMSG_ADDR_ANY;
+
+ ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, chinfo);
+ if (!ept) {
+ dev_err(dev, "failed to create endpoint\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rpdev->ept = ept;
+ rpdev->src = ept->addr;
+
+ err = rpdrv->probe(rpdev);
+ if (err) {
+ dev_err(dev, "%s: failed: %d\n", __func__, err);
+ rpmsg_destroy_ept(ept);
+ goto out;
+ }
+
+ if (rpdev->ops->announce_create)
+ err = rpdev->ops->announce_create(rpdev);
+out:
+ return err;
+}
+
+static int rpmsg_dev_remove(struct device *dev)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
+ int err = 0;
+
+ if (rpdev->ops->announce_destroy)
+ err = rpdev->ops->announce_destroy(rpdev);
+
+ rpdrv->remove(rpdev);
+
+ rpmsg_destroy_ept(rpdev->ept);
+
+ return err;
+}
+
+static struct bus_type rpmsg_bus = {
+ .name = "rpmsg",
+ .match = rpmsg_dev_match,
+ .dev_attrs = rpmsg_dev_attrs,
+ .uevent = rpmsg_uevent,
+ .probe = rpmsg_dev_probe,
+ .remove = rpmsg_dev_remove,
+};
+
+static void rpmsg_release_device(struct device *dev)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+
+ kfree(rpdev);
+}
+
+int rpmsg_register_device(struct rpmsg_device *rpdev)
+{
+ struct device *dev = &rpdev->dev;
+ int ret;
+
+ dev_set_name(&rpdev->dev, "%s:%s",
+ dev_name(dev->parent), rpdev->id.name);
+
+ rpdev->dev.bus = &rpmsg_bus;
+ rpdev->dev.release = rpmsg_release_device;
+
+ ret = device_register(&rpdev->dev);
+ if (ret) {
+ dev_err(dev, "device_register failed: %d\n", ret);
+ put_device(&rpdev->dev);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmsg_register_device);
+
+/*
+ * find an existing channel using its name + address properties,
+ * and destroy it
+ */
+int rpmsg_unregister_device(struct device *parent,
+ struct rpmsg_channel_info *chinfo)
+{
+ struct device *dev;
+
+ dev = rpmsg_find_device(parent, chinfo);
+ if (!dev)
+ return -EINVAL;
+
+ device_unregister(dev);
+
+ put_device(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(rpmsg_unregister_device);
+
+/**
+ * __register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus
+ * @rpdrv: pointer to a struct rpmsg_driver
+ * @owner: owning module/driver
+ *
+ * Returns 0 on success, and an appropriate error value on failure.
+ */
+int __register_rpmsg_driver(struct rpmsg_driver *rpdrv, struct module *owner)
+{
+ rpdrv->drv.bus = &rpmsg_bus;
+ rpdrv->drv.owner = owner;
+ return driver_register(&rpdrv->drv);
+}
+EXPORT_SYMBOL(__register_rpmsg_driver);
+
+/**
+ * unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus
+ * @rpdrv: pointer to a struct rpmsg_driver
+ *
+ * Returns 0 on success, and an appropriate error value on failure.
+ */
+void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv)
+{
+ driver_unregister(&rpdrv->drv);
+}
+EXPORT_SYMBOL(unregister_rpmsg_driver);
+
+
+static int __init rpmsg_init(void)
+{
+ int ret;
+
+ ret = bus_register(&rpmsg_bus);
+ if (ret)
+ pr_err("failed to register rpmsg bus: %d\n", ret);
+
+ return ret;
+}
+postcore_initcall(rpmsg_init);
+
+static void __exit rpmsg_fini(void)
+{
+ bus_unregister(&rpmsg_bus);
+}
+module_exit(rpmsg_fini);
+
+MODULE_DESCRIPTION("remote processor messaging bus");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
new file mode 100644
index 000000000000..8075a20f919b
--- /dev/null
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -0,0 +1,82 @@
+/*
+ * remote processor messaging bus internals
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __RPMSG_INTERNAL_H__
+#define __RPMSG_INTERNAL_H__
+
+#include <linux/rpmsg.h>
+
+#define to_rpmsg_device(d) container_of(d, struct rpmsg_device, dev)
+#define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
+
+/**
+ * struct rpmsg_device_ops - indirection table for the rpmsg_device operations
+ * @create_ept: create backend-specific endpoint, requried
+ * @announce_create: announce presence of new channel, optional
+ * @announce_destroy: announce destruction of channel, optional
+ *
+ * Indirection table for the operations that a rpmsg backend should implement.
+ * @announce_create and @announce_destroy are optional as the backend might
+ * advertise new channels implicitly by creating the endpoints.
+ */
+struct rpmsg_device_ops {
+ struct rpmsg_endpoint *(*create_ept)(struct rpmsg_device *rpdev,
+ rpmsg_rx_cb_t cb, void *priv,
+ struct rpmsg_channel_info chinfo);
+
+ int (*announce_create)(struct rpmsg_device *ept);
+ int (*announce_destroy)(struct rpmsg_device *ept);
+};
+
+/**
+ * struct rpmsg_endpoint_ops - indirection table for rpmsg_endpoint operations
+ * @destroy_ept: destroy the given endpoint, required
+ * @send: see @rpmsg_send(), required
+ * @sendto: see @rpmsg_sendto(), optional
+ * @send_offchannel: see @rpmsg_send_offchannel(), optional
+ * @trysend: see @rpmsg_trysend(), required
+ * @trysendto: see @rpmsg_trysendto(), optional
+ * @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional
+ *
+ * Indirection table for the operations that a rpmsg backend should implement.
+ * In addition to @destroy_ept, the backend must at least implement @send and
+ * @trysend, while the variants sending data off-channel are optional.
+ */
+struct rpmsg_endpoint_ops {
+ void (*destroy_ept)(struct rpmsg_endpoint *ept);
+
+ int (*send)(struct rpmsg_endpoint *ept, void *data, int len);
+ int (*sendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+ int (*send_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len);
+
+ int (*trysend)(struct rpmsg_endpoint *ept, void *data, int len);
+ int (*trysendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+ int (*trysend_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len);
+};
+
+int rpmsg_register_device(struct rpmsg_device *rpdev);
+int rpmsg_unregister_device(struct device *parent,
+ struct rpmsg_channel_info *chinfo);
+
+struct device *rpmsg_find_device(struct device *parent,
+ struct rpmsg_channel_info *chinfo);
+
+#endif
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index fe03b2aef450..3090b0d3072f 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -33,6 +33,9 @@
#include <linux/wait.h>
#include <linux/rpmsg.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
+
+#include "rpmsg_internal.h"
/**
* struct virtproc_info - virtual remote processor state
@@ -72,20 +75,69 @@ struct virtproc_info {
struct rpmsg_endpoint *ns_ept;
};
+/* The feature bitmap for virtio rpmsg */
+#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */
+
/**
- * struct rpmsg_channel_info - internal channel info representation
- * @name: name of service
- * @src: local address
+ * struct rpmsg_hdr - common header for all rpmsg messages
+ * @src: source address
* @dst: destination address
+ * @reserved: reserved for future use
+ * @len: length of payload (in bytes)
+ * @flags: message flags
+ * @data: @len bytes of message payload data
+ *
+ * Every message sent(/received) on the rpmsg bus begins with this header.
*/
-struct rpmsg_channel_info {
- char name[RPMSG_NAME_SIZE];
+struct rpmsg_hdr {
u32 src;
u32 dst;
+ u32 reserved;
+ u16 len;
+ u16 flags;
+ u8 data[0];
+} __packed;
+
+/**
+ * struct rpmsg_ns_msg - dynamic name service announcement message
+ * @name: name of remote service that is published
+ * @addr: address of remote service that is published
+ * @flags: indicates whether service is created or destroyed
+ *
+ * This message is sent across to publish a new service, or announce
+ * about its removal. When we receive these messages, an appropriate
+ * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
+ * or ->remove() handler of the appropriate rpmsg driver will be invoked
+ * (if/as-soon-as one is registered).
+ */
+struct rpmsg_ns_msg {
+ char name[RPMSG_NAME_SIZE];
+ u32 addr;
+ u32 flags;
+} __packed;
+
+/**
+ * enum rpmsg_ns_flags - dynamic name service announcement flags
+ *
+ * @RPMSG_NS_CREATE: a new remote service was just created
+ * @RPMSG_NS_DESTROY: a known remote service was just destroyed
+ */
+enum rpmsg_ns_flags {
+ RPMSG_NS_CREATE = 0,
+ RPMSG_NS_DESTROY = 1,
+};
+
+/**
+ * @vrp: the remote processor this channel belongs to
+ */
+struct virtio_rpmsg_channel {
+ struct rpmsg_device rpdev;
+
+ struct virtproc_info *vrp;
};
-#define to_rpmsg_channel(d) container_of(d, struct rpmsg_channel, dev)
-#define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
+#define to_virtio_rpmsg_channel(_rpdev) \
+ container_of(_rpdev, struct virtio_rpmsg_channel, rpdev)
/*
* We're allocating buffers of 512 bytes each for communications. The
@@ -118,78 +170,28 @@ struct rpmsg_channel_info {
/* Address 53 is reserved for advertising remote services */
#define RPMSG_NS_ADDR (53)
-/* sysfs show configuration fields */
-#define rpmsg_show_attr(field, path, format_string) \
-static ssize_t \
-field##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); \
- \
- return sprintf(buf, format_string, rpdev->path); \
-}
-
-/* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */
-rpmsg_show_attr(name, id.name, "%s\n");
-rpmsg_show_attr(src, src, "0x%x\n");
-rpmsg_show_attr(dst, dst, "0x%x\n");
-rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n");
-
-/*
- * Unique (and free running) index for rpmsg devices.
- *
- * Yeah, we're not recycling those numbers (yet?). will be easy
- * to change if/when we want to.
- */
-static unsigned int rpmsg_dev_index;
-
-static ssize_t modalias_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
-
- return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name);
-}
-
-static struct device_attribute rpmsg_dev_attrs[] = {
- __ATTR_RO(name),
- __ATTR_RO(modalias),
- __ATTR_RO(dst),
- __ATTR_RO(src),
- __ATTR_RO(announce),
- __ATTR_NULL
+static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept);
+static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
+static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
+ u32 dst);
+static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
+ u32 dst, void *data, int len);
+static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
+static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
+ int len, u32 dst);
+static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
+ u32 dst, void *data, int len);
+
+static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {
+ .destroy_ept = virtio_rpmsg_destroy_ept,
+ .send = virtio_rpmsg_send,
+ .sendto = virtio_rpmsg_sendto,
+ .send_offchannel = virtio_rpmsg_send_offchannel,
+ .trysend = virtio_rpmsg_trysend,
+ .trysendto = virtio_rpmsg_trysendto,
+ .trysend_offchannel = virtio_rpmsg_trysend_offchannel,
};
-/* rpmsg devices and drivers are matched using the service name */
-static inline int rpmsg_id_match(const struct rpmsg_channel *rpdev,
- const struct rpmsg_device_id *id)
-{
- return strncmp(id->name, rpdev->id.name, RPMSG_NAME_SIZE) == 0;
-}
-
-/* match rpmsg channel and rpmsg driver */
-static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
-{
- struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
- struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv);
- const struct rpmsg_device_id *ids = rpdrv->id_table;
- unsigned int i;
-
- for (i = 0; ids[i].name[0]; i++)
- if (rpmsg_id_match(rpdev, &ids[i]))
- return 1;
-
- return 0;
-}
-
-static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
-
- return add_uevent_var(env, "MODALIAS=" RPMSG_DEVICE_MODALIAS_FMT,
- rpdev->id.name);
-}
-
/**
* __ept_release() - deallocate an rpmsg endpoint
* @kref: the ept's reference count
@@ -212,18 +214,17 @@ static void __ept_release(struct kref *kref)
/* for more info, see below documentation of rpmsg_create_ept() */
static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
- struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
- void *priv, u32 addr)
+ struct rpmsg_device *rpdev,
+ rpmsg_rx_cb_t cb,
+ void *priv, u32 addr)
{
int id_min, id_max, id;
struct rpmsg_endpoint *ept;
struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
ept = kzalloc(sizeof(*ept), GFP_KERNEL);
- if (!ept) {
- dev_err(dev, "failed to kzalloc a new ept\n");
+ if (!ept)
return NULL;
- }
kref_init(&ept->refcount);
mutex_init(&ept->cb_lock);
@@ -231,6 +232,7 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
ept->rpdev = rpdev;
ept->cb = cb;
ept->priv = priv;
+ ept->ops = &virtio_endpoint_ops;
/* do we need to allocate a local address ? */
if (addr == RPMSG_ADDR_ANY) {
@@ -261,52 +263,15 @@ free_ept:
return NULL;
}
-/**
- * rpmsg_create_ept() - create a new rpmsg_endpoint
- * @rpdev: rpmsg channel device
- * @cb: rx callback handler
- * @priv: private data for the driver's use
- * @addr: local rpmsg address to bind with @cb
- *
- * Every rpmsg address in the system is bound to an rx callback (so when
- * inbound messages arrive, they are dispatched by the rpmsg bus using the
- * appropriate callback handler) by means of an rpmsg_endpoint struct.
- *
- * This function allows drivers to create such an endpoint, and by that,
- * bind a callback, and possibly some private data too, to an rpmsg address
- * (either one that is known in advance, or one that will be dynamically
- * assigned for them).
- *
- * Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint
- * is already created for them when they are probed by the rpmsg bus
- * (using the rx callback provided when they registered to the rpmsg bus).
- *
- * So things should just work for simple drivers: they already have an
- * endpoint, their rx callback is bound to their rpmsg address, and when
- * relevant inbound messages arrive (i.e. messages which their dst address
- * equals to the src address of their rpmsg channel), the driver's handler
- * is invoked to process it.
- *
- * That said, more complicated drivers might do need to allocate
- * additional rpmsg addresses, and bind them to different rx callbacks.
- * To accomplish that, those drivers need to call this function.
- *
- * Drivers should provide their @rpdev channel (so the new endpoint would belong
- * to the same remote processor their channel belongs to), an rx callback
- * function, an optional private data (which is provided back when the
- * rx callback is invoked), and an address they want to bind with the
- * callback. If @addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will
- * dynamically assign them an available rpmsg address (drivers should have
- * a very good reason why not to always use RPMSG_ADDR_ANY here).
- *
- * Returns a pointer to the endpoint on success, or NULL on error.
- */
-struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev,
- rpmsg_rx_cb_t cb, void *priv, u32 addr)
+static struct rpmsg_endpoint *virtio_rpmsg_create_ept(struct rpmsg_device *rpdev,
+ rpmsg_rx_cb_t cb,
+ void *priv,
+ struct rpmsg_channel_info chinfo)
{
- return __rpmsg_create_ept(rpdev->vrp, rpdev, cb, priv, addr);
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+
+ return __rpmsg_create_ept(vch->vrp, rpdev, cb, priv, chinfo.src);
}
-EXPORT_SYMBOL(rpmsg_create_ept);
/**
* __rpmsg_destroy_ept() - destroy an existing rpmsg endpoint
@@ -334,178 +299,82 @@ __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
kref_put(&ept->refcount, __ept_release);
}
-/**
- * rpmsg_destroy_ept() - destroy an existing rpmsg endpoint
- * @ept: endpoing to destroy
- *
- * Should be used by drivers to destroy an rpmsg endpoint previously
- * created with rpmsg_create_ept().
- */
-void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
+static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
{
- __rpmsg_destroy_ept(ept->rpdev->vrp, ept);
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(ept->rpdev);
+
+ __rpmsg_destroy_ept(vch->vrp, ept);
}
-EXPORT_SYMBOL(rpmsg_destroy_ept);
-/*
- * when an rpmsg driver is probed with a channel, we seamlessly create
- * it an endpoint, binding its rx callback to a unique local rpmsg
- * address.
- *
- * if we need to, we also announce about this channel to the remote
- * processor (needed in case the driver is exposing an rpmsg service).
- */
-static int rpmsg_dev_probe(struct device *dev)
+static int virtio_rpmsg_announce_create(struct rpmsg_device *rpdev)
{
- struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
- struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
- struct virtproc_info *vrp = rpdev->vrp;
- struct rpmsg_endpoint *ept;
- int err;
-
- ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, rpdev->src);
- if (!ept) {
- dev_err(dev, "failed to create endpoint\n");
- err = -ENOMEM;
- goto out;
- }
-
- rpdev->ept = ept;
- rpdev->src = ept->addr;
-
- err = rpdrv->probe(rpdev);
- if (err) {
- dev_err(dev, "%s: failed: %d\n", __func__, err);
- rpmsg_destroy_ept(ept);
- goto out;
- }
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+ struct virtproc_info *vrp = vch->vrp;
+ struct device *dev = &rpdev->dev;
+ int err = 0;
/* need to tell remote processor's name service about this channel ? */
if (rpdev->announce &&
- virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
+ virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
struct rpmsg_ns_msg nsm;
strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
- nsm.addr = rpdev->src;
+ nsm.addr = rpdev->ept->addr;
nsm.flags = RPMSG_NS_CREATE;
- err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
+ err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
if (err)
dev_err(dev, "failed to announce service %d\n", err);
}
-out:
return err;
}
-static int rpmsg_dev_remove(struct device *dev)
+static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev)
{
- struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
- struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
- struct virtproc_info *vrp = rpdev->vrp;
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+ struct virtproc_info *vrp = vch->vrp;
+ struct device *dev = &rpdev->dev;
int err = 0;
/* tell remote processor's name service we're removing this channel */
if (rpdev->announce &&
- virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
+ virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
struct rpmsg_ns_msg nsm;
strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
nsm.addr = rpdev->src;
nsm.flags = RPMSG_NS_DESTROY;
- err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
+ err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
if (err)
dev_err(dev, "failed to announce service %d\n", err);
}
- rpdrv->remove(rpdev);
-
- rpmsg_destroy_ept(rpdev->ept);
-
return err;
}
-static struct bus_type rpmsg_bus = {
- .name = "rpmsg",
- .match = rpmsg_dev_match,
- .dev_attrs = rpmsg_dev_attrs,
- .uevent = rpmsg_uevent,
- .probe = rpmsg_dev_probe,
- .remove = rpmsg_dev_remove,
+static const struct rpmsg_device_ops virtio_rpmsg_ops = {
+ .create_ept = virtio_rpmsg_create_ept,
+ .announce_create = virtio_rpmsg_announce_create,
+ .announce_destroy = virtio_rpmsg_announce_destroy,
};
-/**
- * __register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus
- * @rpdrv: pointer to a struct rpmsg_driver
- * @owner: owning module/driver
- *
- * Returns 0 on success, and an appropriate error value on failure.
- */
-int __register_rpmsg_driver(struct rpmsg_driver *rpdrv, struct module *owner)
-{
- rpdrv->drv.bus = &rpmsg_bus;
- rpdrv->drv.owner = owner;
- return driver_register(&rpdrv->drv);
-}
-EXPORT_SYMBOL(__register_rpmsg_driver);
-
-/**
- * unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus
- * @rpdrv: pointer to a struct rpmsg_driver
- *
- * Returns 0 on success, and an appropriate error value on failure.
- */
-void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv)
-{
- driver_unregister(&rpdrv->drv);
-}
-EXPORT_SYMBOL(unregister_rpmsg_driver);
-
-static void rpmsg_release_device(struct device *dev)
-{
- struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
-
- kfree(rpdev);
-}
-
-/*
- * match an rpmsg channel with a channel info struct.
- * this is used to make sure we're not creating rpmsg devices for channels
- * that already exist.
- */
-static int rpmsg_channel_match(struct device *dev, void *data)
-{
- struct rpmsg_channel_info *chinfo = data;
- struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
-
- if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src)
- return 0;
-
- if (chinfo->dst != RPMSG_ADDR_ANY && chinfo->dst != rpdev->dst)
- return 0;
-
- if (strncmp(chinfo->name, rpdev->id.name, RPMSG_NAME_SIZE))
- return 0;
-
- /* found a match ! */
- return 1;
-}
-
/*
* create an rpmsg channel using its name and address info.
* this function will be used to create both static and dynamic
* channels.
*/
-static struct rpmsg_channel *rpmsg_create_channel(struct virtproc_info *vrp,
- struct rpmsg_channel_info *chinfo)
+static struct rpmsg_device *rpmsg_create_channel(struct virtproc_info *vrp,
+ struct rpmsg_channel_info *chinfo)
{
- struct rpmsg_channel *rpdev;
+ struct virtio_rpmsg_channel *vch;
+ struct rpmsg_device *rpdev;
struct device *tmp, *dev = &vrp->vdev->dev;
int ret;
/* make sure a similar channel doesn't already exist */
- tmp = device_find_child(dev, chinfo, rpmsg_channel_match);
+ tmp = rpmsg_find_device(dev, chinfo);
if (tmp) {
/* decrement the matched device's refcount back */
put_device(tmp);
@@ -514,62 +383,38 @@ static struct rpmsg_channel *rpmsg_create_channel(struct virtproc_info *vrp,
return NULL;
}
- rpdev = kzalloc(sizeof(struct rpmsg_channel), GFP_KERNEL);
- if (!rpdev) {
- pr_err("kzalloc failed\n");
+ vch = kzalloc(sizeof(*vch), GFP_KERNEL);
+ if (!vch)
return NULL;
- }
- rpdev->vrp = vrp;
+ /* Link the channel to our vrp */
+ vch->vrp = vrp;
+
+ /* Assign callbacks for rpmsg_channel */
+ vch->rpdev.ops = &virtio_rpmsg_ops;
+
+ /* Assign public information to the rpmsg_device */
+ rpdev = &vch->rpdev;
rpdev->src = chinfo->src;
rpdev->dst = chinfo->dst;
+ rpdev->ops = &virtio_rpmsg_ops;
/*
* rpmsg server channels has predefined local address (for now),
* and their existence needs to be announced remotely
*/
- rpdev->announce = rpdev->src != RPMSG_ADDR_ANY ? true : false;
+ rpdev->announce = rpdev->src != RPMSG_ADDR_ANY;
strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE);
- /* very simple device indexing plumbing which is enough for now */
- dev_set_name(&rpdev->dev, "rpmsg%d", rpmsg_dev_index++);
-
rpdev->dev.parent = &vrp->vdev->dev;
- rpdev->dev.bus = &rpmsg_bus;
- rpdev->dev.release = rpmsg_release_device;
-
- ret = device_register(&rpdev->dev);
- if (ret) {
- dev_err(dev, "device_register failed: %d\n", ret);
- put_device(&rpdev->dev);
+ ret = rpmsg_register_device(rpdev);
+ if (ret)
return NULL;
- }
return rpdev;
}
-/*
- * find an existing channel using its name + address properties,
- * and destroy it
- */
-static int rpmsg_destroy_channel(struct virtproc_info *vrp,
- struct rpmsg_channel_info *chinfo)
-{
- struct virtio_device *vdev = vrp->vdev;
- struct device *dev;
-
- dev = device_find_child(&vdev->dev, chinfo, rpmsg_channel_match);
- if (!dev)
- return -EINVAL;
-
- device_unregister(dev);
-
- put_device(dev);
-
- return 0;
-}
-
/* super simple buffer "allocator" that is just enough for now */
static void *get_a_tx_buf(struct virtproc_info *vrp)
{
@@ -684,10 +529,12 @@ static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
*
* Returns 0 on success and an appropriate error value on failure.
*/
-int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
- void *data, int len, bool wait)
+static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev,
+ u32 src, u32 dst,
+ void *data, int len, bool wait)
{
- struct virtproc_info *vrp = rpdev->vrp;
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+ struct virtproc_info *vrp = vch->vrp;
struct device *dev = &rpdev->dev;
struct scatterlist sg;
struct rpmsg_hdr *msg;
@@ -751,10 +598,11 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
memcpy(msg->data, data, len);
dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n",
- msg->src, msg->dst, msg->len,
- msg->flags, msg->reserved);
- print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
- msg, sizeof(*msg) + msg->len, true);
+ msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
+#if defined(CONFIG_DYNAMIC_DEBUG)
+ dynamic_hex_dump("rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
+ msg, sizeof(*msg) + msg->len, true);
+#endif
sg_init_one(&sg, msg, sizeof(*msg) + len);
@@ -780,6 +628,56 @@ out:
}
EXPORT_SYMBOL(rpmsg_send_offchannel_raw);
+static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ struct rpmsg_device *rpdev = ept->rpdev;
+ u32 src = ept->addr, dst = rpdev->dst;
+
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
+}
+
+static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
+ u32 dst)
+{
+ struct rpmsg_device *rpdev = ept->rpdev;
+ u32 src = ept->addr;
+
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
+}
+
+static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
+ u32 dst, void *data, int len)
+{
+ struct rpmsg_device *rpdev = ept->rpdev;
+
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
+}
+
+static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ struct rpmsg_device *rpdev = ept->rpdev;
+ u32 src = ept->addr, dst = rpdev->dst;
+
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
+}
+
+static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
+ int len, u32 dst)
+{
+ struct rpmsg_device *rpdev = ept->rpdev;
+ u32 src = ept->addr;
+
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
+}
+
+static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
+ u32 dst, void *data, int len)
+{
+ struct rpmsg_device *rpdev = ept->rpdev;
+
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
+}
+
static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
struct rpmsg_hdr *msg, unsigned int len)
{
@@ -788,17 +686,18 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
int err;
dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
- msg->src, msg->dst, msg->len,
- msg->flags, msg->reserved);
- print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
- msg, sizeof(*msg) + msg->len, true);
+ msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
+#if defined(CONFIG_DYNAMIC_DEBUG)
+ dynamic_hex_dump("rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
+ msg, sizeof(*msg) + msg->len, true);
+#endif
/*
* We currently use fixed-sized buffers, so trivially sanitize
* the reported payload length.
*/
if (len > RPMSG_BUF_SIZE ||
- msg->len > (len - sizeof(struct rpmsg_hdr))) {
+ msg->len > (len - sizeof(struct rpmsg_hdr))) {
dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
return -EINVAL;
}
@@ -865,7 +764,7 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
msgs_received++;
msg = virtqueue_get_buf(rvq, &len);
- };
+ }
dev_dbg(dev, "Received %u messages\n", msgs_received);
@@ -892,23 +791,24 @@ static void rpmsg_xmit_done(struct virtqueue *svq)
}
/* invoked when a name service announcement arrives */
-static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len,
- void *priv, u32 src)
+static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
+ void *priv, u32 src)
{
struct rpmsg_ns_msg *msg = data;
- struct rpmsg_channel *newch;
+ struct rpmsg_device *newch;
struct rpmsg_channel_info chinfo;
struct virtproc_info *vrp = priv;
struct device *dev = &vrp->vdev->dev;
int ret;
- print_hex_dump(KERN_DEBUG, "NS announcement: ",
- DUMP_PREFIX_NONE, 16, 1,
- data, len, true);
+#if defined(CONFIG_DYNAMIC_DEBUG)
+ dynamic_hex_dump("NS announcement: ", DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+#endif
if (len != sizeof(*msg)) {
dev_err(dev, "malformed ns msg (%d)\n", len);
- return;
+ return -EINVAL;
}
/*
@@ -919,22 +819,22 @@ static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len,
*/
if (rpdev) {
dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
- return;
+ return -EINVAL;
}
/* don't trust the remote processor for null terminating the name */
msg->name[RPMSG_NAME_SIZE - 1] = '\0';
dev_info(dev, "%sing channel %s addr 0x%x\n",
- msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat",
- msg->name, msg->addr);
+ msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat",
+ msg->name, msg->addr);
strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = msg->addr;
if (msg->flags & RPMSG_NS_DESTROY) {
- ret = rpmsg_destroy_channel(vrp, &chinfo);
+ ret = rpmsg_unregister_device(&vrp->vdev->dev, &chinfo);
if (ret)
dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
} else {
@@ -942,6 +842,8 @@ static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len,
if (!newch)
dev_err(dev, "rpmsg_create_channel failed\n");
}
+
+ return 0;
}
static int rpmsg_probe(struct virtio_device *vdev)
@@ -995,8 +897,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
goto vqs_del;
}
- dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
- (unsigned long long)vrp->bufs_dma);
+ dev_dbg(&vdev->dev, "buffers: va %p, dma %pad\n",
+ bufs_va, &vrp->bufs_dma);
/* half of the buffers is dedicated for RX */
vrp->rbufs = bufs_va;
@@ -1012,7 +914,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);
err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
- GFP_KERNEL);
+ GFP_KERNEL);
WARN_ON(err); /* sanity check; this can't really happen */
}
@@ -1119,17 +1021,9 @@ static int __init rpmsg_init(void)
{
int ret;
- ret = bus_register(&rpmsg_bus);
- if (ret) {
- pr_err("failed to register rpmsg bus: %d\n", ret);
- return ret;
- }
-
ret = register_virtio_driver(&virtio_ipc_driver);
- if (ret) {
+ if (ret)
pr_err("failed to register virtio driver: %d\n", ret);
- bus_unregister(&rpmsg_bus);
- }
return ret;
}
@@ -1138,7 +1032,6 @@ subsys_initcall(rpmsg_init);
static void __exit rpmsg_fini(void)
{
unregister_virtio_driver(&virtio_ipc_driver);
- bus_unregister(&rpmsg_bus);
}
module_exit(rpmsg_fini);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e215f50794b6..d1e080701264 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -187,6 +187,16 @@ config RTC_DRV_ABX80X
This driver can also be built as a module. If so, the module
will be called rtc-abx80x.
+config RTC_DRV_AC100
+ tristate "X-Powers AC100"
+ depends on MFD_AC100
+ help
+ If you say yes here you get support for the real-time clock found
+ in X-Powers AC100 family peripheral ICs.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-ac100.
+
config RTC_DRV_AS3722
tristate "ams AS3722 RTC driver"
depends on MFD_AS3722
@@ -328,11 +338,11 @@ config RTC_DRV_MAX77686
will be called rtc-max77686.
config RTC_DRV_RK808
- tristate "Rockchip RK808 RTC"
+ tristate "Rockchip RK808/RK818 RTC"
depends on MFD_RK808
help
If you say yes here you will get support for the
- RTC of RK808 PMIC.
+ RTC of RK808 and RK818 PMIC.
This driver can also be built as a module. If so, the module
will be called rk808-rtc.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 7cf7ad559c79..8fb994bacdf7 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o
obj-$(CONFIG_RTC_DRV_ABX80X) += rtc-abx80x.o
+obj-$(CONFIG_RTC_DRV_AC100) += rtc-ac100.o
obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o
obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o
obj-$(CONFIG_RTC_DRV_ASM9260) += rtc-asm9260.o
diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c
new file mode 100644
index 000000000000..70b4fd0f6122
--- /dev/null
+++ b/drivers/rtc/rtc-ac100.c
@@ -0,0 +1,627 @@
+/*
+ * RTC Driver for X-Powers AC100
+ *
+ * Copyright (c) 2016 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bcd.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/ac100.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/types.h>
+
+/* Control register */
+#define AC100_RTC_CTRL_24HOUR BIT(0)
+
+/* Clock output register bits */
+#define AC100_CLKOUT_PRE_DIV_SHIFT 5
+#define AC100_CLKOUT_PRE_DIV_WIDTH 3
+#define AC100_CLKOUT_MUX_SHIFT 4
+#define AC100_CLKOUT_MUX_WIDTH 1
+#define AC100_CLKOUT_DIV_SHIFT 1
+#define AC100_CLKOUT_DIV_WIDTH 3
+#define AC100_CLKOUT_EN BIT(0)
+
+/* RTC */
+#define AC100_RTC_SEC_MASK GENMASK(6, 0)
+#define AC100_RTC_MIN_MASK GENMASK(6, 0)
+#define AC100_RTC_HOU_MASK GENMASK(5, 0)
+#define AC100_RTC_WEE_MASK GENMASK(2, 0)
+#define AC100_RTC_DAY_MASK GENMASK(5, 0)
+#define AC100_RTC_MON_MASK GENMASK(4, 0)
+#define AC100_RTC_YEA_MASK GENMASK(7, 0)
+#define AC100_RTC_YEA_LEAP BIT(15)
+#define AC100_RTC_UPD_TRIGGER BIT(15)
+
+/* Alarm (wall clock) */
+#define AC100_ALM_INT_ENABLE BIT(0)
+
+#define AC100_ALM_SEC_MASK GENMASK(6, 0)
+#define AC100_ALM_MIN_MASK GENMASK(6, 0)
+#define AC100_ALM_HOU_MASK GENMASK(5, 0)
+#define AC100_ALM_WEE_MASK GENMASK(2, 0)
+#define AC100_ALM_DAY_MASK GENMASK(5, 0)
+#define AC100_ALM_MON_MASK GENMASK(4, 0)
+#define AC100_ALM_YEA_MASK GENMASK(7, 0)
+#define AC100_ALM_ENABLE_FLAG BIT(15)
+#define AC100_ALM_UPD_TRIGGER BIT(15)
+
+/*
+ * The year parameter passed to the driver is usually an offset relative to
+ * the year 1900. This macro is used to convert this offset to another one
+ * relative to the minimum year allowed by the hardware.
+ *
+ * The year range is 1970 - 2069. This range is selected to match Allwinner's
+ * driver.
+ */
+#define AC100_YEAR_MIN 1970
+#define AC100_YEAR_MAX 2069
+#define AC100_YEAR_OFF (AC100_YEAR_MIN - 1900)
+
+struct ac100_clkout {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u8 offset;
+};
+
+#define to_ac100_clkout(_hw) container_of(_hw, struct ac100_clkout, hw)
+
+#define AC100_RTC_32K_NAME "ac100-rtc-32k"
+#define AC100_RTC_32K_RATE 32768
+#define AC100_CLKOUT_NUM 3
+
+static const char * const ac100_clkout_names[AC100_CLKOUT_NUM] = {
+ "ac100-cko1-rtc",
+ "ac100-cko2-rtc",
+ "ac100-cko3-rtc",
+};
+
+struct ac100_rtc_dev {
+ struct rtc_device *rtc;
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+ unsigned long alarm;
+
+ struct clk_hw *rtc_32k_clk;
+ struct ac100_clkout clks[AC100_CLKOUT_NUM];
+ struct clk_hw_onecell_data *clk_data;
+};
+
+/**
+ * Clock controls for 3 clock output pins
+ */
+
+static const struct clk_div_table ac100_clkout_prediv[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { .val = 4, .div = 16 },
+ { .val = 5, .div = 32 },
+ { .val = 6, .div = 64 },
+ { .val = 7, .div = 122 },
+ { },
+};
+
+/* Abuse the fact that one parent is 32768 Hz, and the other is 4 MHz */
+static unsigned long ac100_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct ac100_clkout *clk = to_ac100_clkout(hw);
+ unsigned int reg, div;
+
+ regmap_read(clk->regmap, clk->offset, &reg);
+
+ /* Handle pre-divider first */
+ if (prate != AC100_RTC_32K_RATE) {
+ div = (reg >> AC100_CLKOUT_PRE_DIV_SHIFT) &
+ ((1 << AC100_CLKOUT_PRE_DIV_WIDTH) - 1);
+ prate = divider_recalc_rate(hw, prate, div,
+ ac100_clkout_prediv, 0);
+ }
+
+ div = (reg >> AC100_CLKOUT_DIV_SHIFT) &
+ (BIT(AC100_CLKOUT_DIV_WIDTH) - 1);
+ return divider_recalc_rate(hw, prate, div, NULL,
+ CLK_DIVIDER_POWER_OF_TWO);
+}
+
+static long ac100_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ unsigned long best_rate = 0, tmp_rate, tmp_prate;
+ int i;
+
+ if (prate == AC100_RTC_32K_RATE)
+ return divider_round_rate(hw, rate, &prate, NULL,
+ AC100_CLKOUT_DIV_WIDTH,
+ CLK_DIVIDER_POWER_OF_TWO);
+
+ for (i = 0; ac100_clkout_prediv[i].div; i++) {
+ tmp_prate = DIV_ROUND_UP(prate, ac100_clkout_prediv[i].val);
+ tmp_rate = divider_round_rate(hw, rate, &tmp_prate, NULL,
+ AC100_CLKOUT_DIV_WIDTH,
+ CLK_DIVIDER_POWER_OF_TWO);
+
+ if (tmp_rate > rate)
+ continue;
+ if (rate - tmp_rate < best_rate - tmp_rate)
+ best_rate = tmp_rate;
+ }
+
+ return best_rate;
+}
+
+static int ac100_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *best_parent;
+ unsigned long best = 0;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ for (i = 0; i < num_parents; i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long tmp, prate = clk_hw_get_rate(parent);
+
+ tmp = ac100_clkout_round_rate(hw, req->rate, prate);
+
+ if (tmp > req->rate)
+ continue;
+ if (req->rate - tmp < req->rate - best) {
+ best = tmp;
+ best_parent = parent;
+ }
+ }
+
+ if (!best)
+ return -EINVAL;
+
+ req->best_parent_hw = best_parent;
+ req->best_parent_rate = best;
+ req->rate = best;
+
+ return 0;
+}
+
+static int ac100_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct ac100_clkout *clk = to_ac100_clkout(hw);
+ int div = 0, pre_div = 0;
+
+ do {
+ div = divider_get_val(rate * ac100_clkout_prediv[pre_div].div,
+ prate, NULL, AC100_CLKOUT_DIV_WIDTH,
+ CLK_DIVIDER_POWER_OF_TWO);
+ if (div >= 0)
+ break;
+ } while (prate != AC100_RTC_32K_RATE &&
+ ac100_clkout_prediv[++pre_div].div);
+
+ if (div < 0)
+ return div;
+
+ pre_div = ac100_clkout_prediv[pre_div].val;
+
+ regmap_update_bits(clk->regmap, clk->offset,
+ ((1 << AC100_CLKOUT_DIV_WIDTH) - 1) << AC100_CLKOUT_DIV_SHIFT |
+ ((1 << AC100_CLKOUT_PRE_DIV_WIDTH) - 1) << AC100_CLKOUT_PRE_DIV_SHIFT,
+ (div - 1) << AC100_CLKOUT_DIV_SHIFT |
+ (pre_div - 1) << AC100_CLKOUT_PRE_DIV_SHIFT);
+
+ return 0;
+}
+
+static int ac100_clkout_prepare(struct clk_hw *hw)
+{
+ struct ac100_clkout *clk = to_ac100_clkout(hw);
+
+ return regmap_update_bits(clk->regmap, clk->offset, AC100_CLKOUT_EN,
+ AC100_CLKOUT_EN);
+}
+
+static void ac100_clkout_unprepare(struct clk_hw *hw)
+{
+ struct ac100_clkout *clk = to_ac100_clkout(hw);
+
+ regmap_update_bits(clk->regmap, clk->offset, AC100_CLKOUT_EN, 0);
+}
+
+static int ac100_clkout_is_prepared(struct clk_hw *hw)
+{
+ struct ac100_clkout *clk = to_ac100_clkout(hw);
+ unsigned int reg;
+
+ regmap_read(clk->regmap, clk->offset, &reg);
+
+ return reg & AC100_CLKOUT_EN;
+}
+
+static u8 ac100_clkout_get_parent(struct clk_hw *hw)
+{
+ struct ac100_clkout *clk = to_ac100_clkout(hw);
+ unsigned int reg;
+
+ regmap_read(clk->regmap, clk->offset, &reg);
+
+ return (reg >> AC100_CLKOUT_MUX_SHIFT) & 0x1;
+}
+
+static int ac100_clkout_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ac100_clkout *clk = to_ac100_clkout(hw);
+
+ return regmap_update_bits(clk->regmap, clk->offset,
+ BIT(AC100_CLKOUT_MUX_SHIFT),
+ index ? BIT(AC100_CLKOUT_MUX_SHIFT) : 0);
+}
+
+static const struct clk_ops ac100_clkout_ops = {
+ .prepare = ac100_clkout_prepare,
+ .unprepare = ac100_clkout_unprepare,
+ .is_prepared = ac100_clkout_is_prepared,
+ .recalc_rate = ac100_clkout_recalc_rate,
+ .determine_rate = ac100_clkout_determine_rate,
+ .get_parent = ac100_clkout_get_parent,
+ .set_parent = ac100_clkout_set_parent,
+ .set_rate = ac100_clkout_set_rate,
+};
+
+static int ac100_rtc_register_clks(struct ac100_rtc_dev *chip)
+{
+ struct device_node *np = chip->dev->of_node;
+ const char *parents[2] = {AC100_RTC_32K_NAME};
+ int i, ret;
+
+ chip->clk_data = devm_kzalloc(chip->dev, sizeof(*chip->clk_data) +
+ sizeof(*chip->clk_data->hws) *
+ AC100_CLKOUT_NUM,
+ GFP_KERNEL);
+ if (!chip->clk_data)
+ return -ENOMEM;
+
+ chip->rtc_32k_clk = clk_hw_register_fixed_rate(chip->dev,
+ AC100_RTC_32K_NAME,
+ NULL, 0,
+ AC100_RTC_32K_RATE);
+ if (IS_ERR(chip->rtc_32k_clk)) {
+ ret = PTR_ERR(chip->rtc_32k_clk);
+ dev_err(chip->dev, "Failed to register RTC-32k clock: %d\n",
+ ret);
+ return ret;
+ }
+
+ parents[1] = of_clk_get_parent_name(np, 0);
+ if (!parents[1]) {
+ dev_err(chip->dev, "Failed to get ADDA 4M clock\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < AC100_CLKOUT_NUM; i++) {
+ struct ac100_clkout *clk = &chip->clks[i];
+ struct clk_init_data init = {
+ .name = ac100_clkout_names[i],
+ .ops = &ac100_clkout_ops,
+ .parent_names = parents,
+ .num_parents = ARRAY_SIZE(parents),
+ .flags = 0,
+ };
+
+ clk->regmap = chip->regmap;
+ clk->offset = AC100_CLKOUT_CTRL1 + i;
+ clk->hw.init = &init;
+
+ ret = devm_clk_hw_register(chip->dev, &clk->hw);
+ if (ret) {
+ dev_err(chip->dev, "Failed to register clk '%s': %d\n",
+ init.name, ret);
+ goto err_unregister_rtc_32k;
+ }
+
+ chip->clk_data->hws[i] = &clk->hw;
+ }
+
+ chip->clk_data->num = i;
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, chip->clk_data);
+ if (ret)
+ goto err_unregister_rtc_32k;
+
+ return 0;
+
+err_unregister_rtc_32k:
+ clk_unregister_fixed_rate(chip->rtc_32k_clk->clk);
+
+ return ret;
+}
+
+static void ac100_rtc_unregister_clks(struct ac100_rtc_dev *chip)
+{
+ of_clk_del_provider(chip->dev->of_node);
+ clk_unregister_fixed_rate(chip->rtc_32k_clk->clk);
+}
+
+/**
+ * RTC related bits
+ */
+static int ac100_rtc_get_time(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct ac100_rtc_dev *chip = dev_get_drvdata(dev);
+ struct regmap *regmap = chip->regmap;
+ u16 reg[7];
+ int ret;
+
+ ret = regmap_bulk_read(regmap, AC100_RTC_SEC, reg, 7);
+ if (ret)
+ return ret;
+
+ rtc_tm->tm_sec = bcd2bin(reg[0] & AC100_RTC_SEC_MASK);
+ rtc_tm->tm_min = bcd2bin(reg[1] & AC100_RTC_MIN_MASK);
+ rtc_tm->tm_hour = bcd2bin(reg[2] & AC100_RTC_HOU_MASK);
+ rtc_tm->tm_wday = bcd2bin(reg[3] & AC100_RTC_WEE_MASK);
+ rtc_tm->tm_mday = bcd2bin(reg[4] & AC100_RTC_DAY_MASK);
+ rtc_tm->tm_mon = bcd2bin(reg[5] & AC100_RTC_MON_MASK) - 1;
+ rtc_tm->tm_year = bcd2bin(reg[6] & AC100_RTC_YEA_MASK) +
+ AC100_YEAR_OFF;
+
+ return rtc_valid_tm(rtc_tm);
+}
+
+static int ac100_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct ac100_rtc_dev *chip = dev_get_drvdata(dev);
+ struct regmap *regmap = chip->regmap;
+ int year;
+ u16 reg[8];
+
+ /* our RTC has a limited year range... */
+ year = rtc_tm->tm_year - AC100_YEAR_OFF;
+ if (year < 0 || year > (AC100_YEAR_MAX - 1900)) {
+ dev_err(dev, "rtc only supports year in range %d - %d\n",
+ AC100_YEAR_MIN, AC100_YEAR_MAX);
+ return -EINVAL;
+ }
+
+ /* convert to BCD */
+ reg[0] = bin2bcd(rtc_tm->tm_sec) & AC100_RTC_SEC_MASK;
+ reg[1] = bin2bcd(rtc_tm->tm_min) & AC100_RTC_MIN_MASK;
+ reg[2] = bin2bcd(rtc_tm->tm_hour) & AC100_RTC_HOU_MASK;
+ reg[3] = bin2bcd(rtc_tm->tm_wday) & AC100_RTC_WEE_MASK;
+ reg[4] = bin2bcd(rtc_tm->tm_mday) & AC100_RTC_DAY_MASK;
+ reg[5] = bin2bcd(rtc_tm->tm_mon + 1) & AC100_RTC_MON_MASK;
+ reg[6] = bin2bcd(year) & AC100_RTC_YEA_MASK;
+ /* trigger write */
+ reg[7] = AC100_RTC_UPD_TRIGGER;
+
+ /* Is it a leap year? */
+ if (is_leap_year(year + AC100_YEAR_OFF + 1900))
+ reg[6] |= AC100_RTC_YEA_LEAP;
+
+ return regmap_bulk_write(regmap, AC100_RTC_SEC, reg, 8);
+}
+
+static int ac100_rtc_alarm_irq_enable(struct device *dev, unsigned int en)
+{
+ struct ac100_rtc_dev *chip = dev_get_drvdata(dev);
+ struct regmap *regmap = chip->regmap;
+ unsigned int val;
+
+ val = en ? AC100_ALM_INT_ENABLE : 0;
+
+ return regmap_write(regmap, AC100_ALM_INT_ENA, val);
+}
+
+static int ac100_rtc_get_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct ac100_rtc_dev *chip = dev_get_drvdata(dev);
+ struct regmap *regmap = chip->regmap;
+ struct rtc_time *alrm_tm = &alrm->time;
+ u16 reg[7];
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, AC100_ALM_INT_ENA, &val);
+ if (ret)
+ return ret;
+
+ alrm->enabled = !!(val & AC100_ALM_INT_ENABLE);
+
+ ret = regmap_bulk_read(regmap, AC100_ALM_SEC, reg, 7);
+ if (ret)
+ return ret;
+
+ alrm_tm->tm_sec = bcd2bin(reg[0] & AC100_ALM_SEC_MASK);
+ alrm_tm->tm_min = bcd2bin(reg[1] & AC100_ALM_MIN_MASK);
+ alrm_tm->tm_hour = bcd2bin(reg[2] & AC100_ALM_HOU_MASK);
+ alrm_tm->tm_wday = bcd2bin(reg[3] & AC100_ALM_WEE_MASK);
+ alrm_tm->tm_mday = bcd2bin(reg[4] & AC100_ALM_DAY_MASK);
+ alrm_tm->tm_mon = bcd2bin(reg[5] & AC100_ALM_MON_MASK) - 1;
+ alrm_tm->tm_year = bcd2bin(reg[6] & AC100_ALM_YEA_MASK) +
+ AC100_YEAR_OFF;
+
+ return 0;
+}
+
+static int ac100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct ac100_rtc_dev *chip = dev_get_drvdata(dev);
+ struct regmap *regmap = chip->regmap;
+ struct rtc_time *alrm_tm = &alrm->time;
+ u16 reg[8];
+ int year;
+ int ret;
+
+ /* our alarm has a limited year range... */
+ year = alrm_tm->tm_year - AC100_YEAR_OFF;
+ if (year < 0 || year > (AC100_YEAR_MAX - 1900)) {
+ dev_err(dev, "alarm only supports year in range %d - %d\n",
+ AC100_YEAR_MIN, AC100_YEAR_MAX);
+ return -EINVAL;
+ }
+
+ /* convert to BCD */
+ reg[0] = (bin2bcd(alrm_tm->tm_sec) & AC100_ALM_SEC_MASK) |
+ AC100_ALM_ENABLE_FLAG;
+ reg[1] = (bin2bcd(alrm_tm->tm_min) & AC100_ALM_MIN_MASK) |
+ AC100_ALM_ENABLE_FLAG;
+ reg[2] = (bin2bcd(alrm_tm->tm_hour) & AC100_ALM_HOU_MASK) |
+ AC100_ALM_ENABLE_FLAG;
+ /* Do not enable weekday alarm */
+ reg[3] = bin2bcd(alrm_tm->tm_wday) & AC100_ALM_WEE_MASK;
+ reg[4] = (bin2bcd(alrm_tm->tm_mday) & AC100_ALM_DAY_MASK) |
+ AC100_ALM_ENABLE_FLAG;
+ reg[5] = (bin2bcd(alrm_tm->tm_mon + 1) & AC100_ALM_MON_MASK) |
+ AC100_ALM_ENABLE_FLAG;
+ reg[6] = (bin2bcd(year) & AC100_ALM_YEA_MASK) |
+ AC100_ALM_ENABLE_FLAG;
+ /* trigger write */
+ reg[7] = AC100_ALM_UPD_TRIGGER;
+
+ ret = regmap_bulk_write(regmap, AC100_ALM_SEC, reg, 8);
+ if (ret)
+ return ret;
+
+ return ac100_rtc_alarm_irq_enable(dev, alrm->enabled);
+}
+
+static irqreturn_t ac100_rtc_irq(int irq, void *data)
+{
+ struct ac100_rtc_dev *chip = data;
+ struct regmap *regmap = chip->regmap;
+ unsigned int val = 0;
+ int ret;
+
+ mutex_lock(&chip->rtc->ops_lock);
+
+ /* read status */
+ ret = regmap_read(regmap, AC100_ALM_INT_STA, &val);
+ if (ret)
+ goto out;
+
+ if (val & AC100_ALM_INT_ENABLE) {
+ /* signal rtc framework */
+ rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF);
+
+ /* clear status */
+ ret = regmap_write(regmap, AC100_ALM_INT_STA, val);
+ if (ret)
+ goto out;
+
+ /* disable interrupt */
+ ret = ac100_rtc_alarm_irq_enable(chip->dev, 0);
+ if (ret)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&chip->rtc->ops_lock);
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops ac100_rtc_ops = {
+ .read_time = ac100_rtc_get_time,
+ .set_time = ac100_rtc_set_time,
+ .read_alarm = ac100_rtc_get_alarm,
+ .set_alarm = ac100_rtc_set_alarm,
+ .alarm_irq_enable = ac100_rtc_alarm_irq_enable,
+};
+
+static int ac100_rtc_probe(struct platform_device *pdev)
+{
+ struct ac100_dev *ac100 = dev_get_drvdata(pdev->dev.parent);
+ struct ac100_rtc_dev *chip;
+ int ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ platform_set_drvdata(pdev, chip);
+ chip->dev = &pdev->dev;
+ chip->regmap = ac100->regmap;
+
+ chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return chip->irq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, chip->irq, NULL,
+ ac100_rtc_irq,
+ IRQF_SHARED | IRQF_ONESHOT,
+ dev_name(&pdev->dev), chip);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request IRQ\n");
+ return ret;
+ }
+
+ /* always use 24 hour mode */
+ regmap_write_bits(chip->regmap, AC100_RTC_CTRL, AC100_RTC_CTRL_24HOUR,
+ AC100_RTC_CTRL_24HOUR);
+
+ /* disable counter alarm interrupt */
+ regmap_write(chip->regmap, AC100_ALM_INT_ENA, 0);
+
+ /* clear counter alarm pending interrupts */
+ regmap_write(chip->regmap, AC100_ALM_INT_STA, AC100_ALM_INT_ENABLE);
+
+ chip->rtc = devm_rtc_device_register(&pdev->dev, "rtc-ac100",
+ &ac100_rtc_ops, THIS_MODULE);
+ if (IS_ERR(chip->rtc)) {
+ dev_err(&pdev->dev, "unable to register device\n");
+ return PTR_ERR(chip->rtc);
+ }
+
+ ret = ac100_rtc_register_clks(chip);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "RTC enabled\n");
+
+ return 0;
+}
+
+static int ac100_rtc_remove(struct platform_device *pdev)
+{
+ struct ac100_rtc_dev *chip = platform_get_drvdata(pdev);
+
+ ac100_rtc_unregister_clks(chip);
+
+ return 0;
+}
+
+static const struct of_device_id ac100_rtc_match[] = {
+ { .compatible = "x-powers,ac100-rtc" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ac100_rtc_match);
+
+static struct platform_driver ac100_rtc_driver = {
+ .probe = ac100_rtc_probe,
+ .remove = ac100_rtc_remove,
+ .driver = {
+ .name = "ac100-rtc",
+ .of_match_table = of_match_ptr(ac100_rtc_match),
+ },
+};
+module_platform_driver(ac100_rtc_driver);
+
+MODULE_DESCRIPTION("X-Powers AC100 RTC driver");
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 795fcbd02ea3..fac835530671 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -428,6 +428,7 @@ static const struct pm8xxx_rtc_regs pm8941_regs = {
*/
static const struct of_device_id pm8xxx_id_table[] = {
{ .compatible = "qcom,pm8921-rtc", .data = &pm8921_regs },
+ { .compatible = "qcom,pm8018-rtc", .data = &pm8921_regs },
{ .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs },
{ .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs },
{ },
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fb1b56a71475..1de089019268 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -212,16 +212,6 @@ static int dasd_state_known_to_new(struct dasd_device *device)
{
/* Disable extended error reporting for this device. */
dasd_eer_disable(device);
- /* Forget the discipline information. */
- if (device->discipline) {
- if (device->discipline->uncheck_device)
- device->discipline->uncheck_device(device);
- module_put(device->discipline->owner);
- }
- device->discipline = NULL;
- if (device->base_discipline)
- module_put(device->base_discipline->owner);
- device->base_discipline = NULL;
device->state = DASD_STATE_NEW;
if (device->block)
@@ -336,6 +326,7 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
{
int rc;
struct dasd_block *block;
+ struct gendisk *disk;
rc = 0;
block = device->block;
@@ -346,6 +337,9 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
if (rc) {
if (rc != -EAGAIN) {
device->state = DASD_STATE_UNFMT;
+ disk = device->block->gdp;
+ kobject_uevent(&disk_to_dev(disk)->kobj,
+ KOBJ_CHANGE);
goto out;
}
return rc;
@@ -2274,6 +2268,15 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
continue;
}
/*
+ * Don't try to start requests if device is in
+ * offline processing, it might wait forever
+ */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = -ENODEV;
+ continue;
+ }
+ /*
* Don't try to start requests if device is stopped
* except path verification requests
*/
@@ -3364,6 +3367,22 @@ int dasd_generic_probe(struct ccw_device *cdev,
}
EXPORT_SYMBOL_GPL(dasd_generic_probe);
+void dasd_generic_free_discipline(struct dasd_device *device)
+{
+ /* Forget the discipline information. */
+ if (device->discipline) {
+ if (device->discipline->uncheck_device)
+ device->discipline->uncheck_device(device);
+ module_put(device->discipline->owner);
+ device->discipline = NULL;
+ }
+ if (device->base_discipline) {
+ module_put(device->base_discipline->owner);
+ device->base_discipline = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
+
/*
* This will one day be called from a global not_oper handler.
* It is also used by driver_unregister during module unload.
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 3cdbce45e464..15a1a70cace9 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -617,6 +617,7 @@ dasd_delete_device(struct dasd_device *device)
/* Wait for reference counter to drop to zero. */
wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
+ dasd_generic_free_discipline(device);
/* Disconnect dasd_device structure from ccw_device structure. */
cdev = device->cdev;
device->cdev = NULL;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 98bbec44bcd0..831935af7389 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -5201,7 +5201,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
- rc = dasd_sleep_on(cqr);
+ rc = dasd_sleep_on_interruptible(cqr);
if (rc == 0) {
*data = *host_access;
} else {
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index e1e88486b2b4..d138d0116734 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -169,12 +169,12 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
device = cqr->startdev;
if (cqr->intrc == -ETIMEDOUT) {
dev_err(&device->cdev->dev,
- "A timeout error occurred for cqr %p", cqr);
+ "A timeout error occurred for cqr %p\n", cqr);
return;
}
if (cqr->intrc == -ENOLINK) {
dev_err(&device->cdev->dev,
- "A transport error occurred for cqr %p", cqr);
+ "A transport error occurred for cqr %p\n", cqr);
return;
}
/* dump sense data */
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index ac7027e6d52b..87ff6cef872f 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -725,6 +725,7 @@ void dasd_block_clear_timer(struct dasd_block *);
int dasd_cancel_req(struct dasd_ccw_req *);
int dasd_flush_device_queue(struct dasd_device *);
int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
+void dasd_generic_free_discipline(struct dasd_device *);
void dasd_generic_remove (struct ccw_device *cdev);
int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
int dasd_generic_set_offline (struct ccw_device *cdev);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 6b1577c73fe7..285b4006f44b 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -124,7 +124,12 @@ con3270_create_status(struct con3270 *cp)
static void
con3270_update_string(struct con3270 *cp, struct string *s, int nr)
{
- if (s->len >= cp->view.cols - 5)
+ if (s->len < 4) {
+ /* This indicates a bug, but printing a warning would
+ * cause a deadlock. */
+ return;
+ }
+ if (s->string[s->len - 4] != TO_RA)
return;
raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
cp->view.cols * (nr + 1));
@@ -460,11 +465,11 @@ con3270_cline_end(struct con3270 *cp)
cp->cline->len + 4 : cp->view.cols;
s = con3270_alloc_string(cp, size);
memcpy(s->string, cp->cline->string, cp->cline->len);
- if (s->len < cp->view.cols - 5) {
+ if (cp->cline->len < cp->view.cols - 5) {
s->string[s->len - 4] = TO_RA;
s->string[s->len - 1] = 0;
} else {
- while (--size > cp->cline->len)
+ while (--size >= cp->cline->len)
s->string[size] = cp->view.ascebc[' '];
}
/* Replace cline with allocated line s and reset cline. */
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index ea607a4a1bdd..554eaa1e347d 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -126,21 +126,4 @@ static struct miscdevice sclp_ctl_device = {
.name = "sclp",
.fops = &sclp_ctl_fops,
};
-
-/*
- * Register sclp_ctl misc device
- */
-static int __init sclp_ctl_init(void)
-{
- return misc_register(&sclp_ctl_device);
-}
-module_init(sclp_ctl_init);
-
-/*
- * Deregister sclp_ctl misc device
- */
-static void __exit sclp_ctl_exit(void)
-{
- misc_deregister(&sclp_ctl_device);
-}
-module_exit(sclp_ctl_exit);
+module_misc_device(sclp_ctl_device);
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index d3d1936057b4..e352047ed9f7 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -312,15 +312,10 @@ static int tape_3592_ioctl_kekl_set(struct tape_device *device,
return -ENOSYS;
if (!crypt_enabled(device))
return -EUNATCH;
- ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
- if (!ext_kekls)
- return -ENOMEM;
- if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) {
- rc = -EFAULT;
- goto out;
- }
+ ext_kekls = memdup_user((char __user *)arg, sizeof(*ext_kekls));
+ if (IS_ERR(ext_kekls))
+ return PTR_ERR(ext_kekls);
rc = tape_3592_kekl_set(device, ext_kekls);
-out:
kfree(ext_kekls);
return rc;
}
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 6c30e93ab8fa..ff18f373af9a 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -306,10 +306,11 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
{
struct urdev *urd;
- TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
- intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
- irb->scsw.cmd.count);
-
+ if (!IS_ERR(irb)) {
+ TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
+ intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
+ irb->scsw.cmd.count);
+ }
if (!intparm) {
TRACE("ur_int_handler: unsolicited interrupt\n");
return;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 940e725bde1e..11674698b36d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -95,12 +95,13 @@ struct chsc_ssd_area {
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
struct chsc_ssd_area *ssd_area;
+ unsigned long flags;
int ccode;
int ret;
int i;
int mask;
- spin_lock_irq(&chsc_page_lock);
+ spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
ssd_area = chsc_page;
ssd_area->request.length = 0x0010;
@@ -144,7 +145,7 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
ssd->fla[i] = ssd_area->fla[i];
}
out:
- spin_unlock_irq(&chsc_page_lock);
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -832,9 +833,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
u32 fmt : 4;
u32 : 16;
} __attribute__ ((packed)) *secm_area;
+ unsigned long flags;
int ret, ccode;
- spin_lock_irq(&chsc_page_lock);
+ spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
secm_area = chsc_page;
secm_area->request.length = 0x0050;
@@ -864,7 +866,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
secm_area->response.code);
out:
- spin_unlock_irq(&chsc_page_lock);
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -992,6 +994,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
+ unsigned long flags;
int ccode, ret;
struct {
@@ -1021,7 +1024,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
return -EINVAL;
- spin_lock_irq(&chsc_page_lock);
+ spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
scmc_area = chsc_page;
scmc_area->request.length = 0x0010;
@@ -1053,7 +1056,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
(struct cmg_chars *) &scmc_area->data);
out:
- spin_unlock_irq(&chsc_page_lock);
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -1134,6 +1137,7 @@ struct css_chsc_char css_chsc_characteristics;
int __init
chsc_determine_css_characteristics(void)
{
+ unsigned long flags;
int result;
struct {
struct chsc_header request;
@@ -1146,7 +1150,7 @@ chsc_determine_css_characteristics(void)
u32 chsc_char[508];
} __attribute__ ((packed)) *scsc_area;
- spin_lock_irq(&chsc_page_lock);
+ spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
scsc_area = chsc_page;
scsc_area->request.length = 0x0010;
@@ -1168,7 +1172,7 @@ chsc_determine_css_characteristics(void)
CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
scsc_area->response.code);
exit:
- spin_unlock_irq(&chsc_page_lock);
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return result;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 93de0b46b489..f0e57aefb5f2 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -127,7 +127,6 @@ extern int cio_resume (struct subchannel *);
extern int cio_halt (struct subchannel *);
extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
-extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int);
extern int cio_update_schib(struct subchannel *sch);
extern int cio_commit_config(struct subchannel *sch);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index c00ac4650dce..bcc8f3dfd4c4 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -310,7 +310,7 @@ static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
snprintf(name, sizeof(name), "zfcp_q_%s",
dev_name(&adapter->ccw_device->dev));
- adapter->work_queue = create_singlethread_workqueue(name);
+ adapter->work_queue = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (adapter->work_queue)
return 0;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 5d7fbe4e907e..637cf8973c9e 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2016
*/
#define KMSG_COMPONENT "zfcp"
@@ -65,7 +65,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
* @tag: tag indicating which kind of unsolicited status has been received
* @req: request for which a response was received
*/
-void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
+void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
@@ -85,6 +85,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
rec->u.res.req_issued = req->issued;
rec->u.res.prot_status = q_pref->prot_status;
rec->u.res.fsf_status = q_head->fsf_status;
+ rec->u.res.port_handle = q_head->port_handle;
+ rec->u.res.lun_handle = q_head->lun_handle;
memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
FSF_PROT_STATUS_QUAL_SIZE);
@@ -97,7 +99,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
rec->pl_len, "fsf_res", req->req_id);
}
- debug_event(dbf->hba, 1, rec, sizeof(*rec));
+ debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
@@ -241,7 +243,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
if (sdev) {
rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
rec->lun = zfcp_scsi_dev_lun(sdev);
- }
+ } else
+ rec->lun = ZFCP_DBF_INVALID_LUN;
}
/**
@@ -320,13 +323,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
+/**
+ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
+ * @tag: identifier for event
+ * @wka_port: well known address port
+ * @req_id: request ID to correlate with potential HBA trace record
+ */
+void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
+ u64 req_id)
+{
+ struct zfcp_dbf *dbf = wka_port->adapter->dbf;
+ struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dbf->rec_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ rec->id = ZFCP_DBF_REC_RUN;
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->port_status = wka_port->status;
+ rec->d_id = wka_port->d_id;
+ rec->lun = ZFCP_DBF_INVALID_LUN;
+
+ rec->u.run.fsf_req_id = req_id;
+ rec->u.run.rec_status = ~0;
+ rec->u.run.rec_step = ~0;
+ rec->u.run.rec_action = ~0;
+ rec->u.run.rec_count = ~0;
+
+ debug_event(dbf->rec, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->rec_lock, flags);
+}
+
static inline
-void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
- u64 req_id, u32 d_id)
+void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
+ char *paytag, struct scatterlist *sg, u8 id, u16 len,
+ u64 req_id, u32 d_id, u16 cap_len)
{
struct zfcp_dbf_san *rec = &dbf->san_buf;
u16 rec_len;
unsigned long flags;
+ struct zfcp_dbf_pay *payload = &dbf->pay_buf;
+ u16 pay_sum = 0;
spin_lock_irqsave(&dbf->san_lock, flags);
memset(rec, 0, sizeof(*rec));
@@ -334,10 +372,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
rec->id = id;
rec->fsf_req_id = req_id;
rec->d_id = d_id;
- rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
- memcpy(rec->payload, data, rec_len);
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->pl_len = len; /* full length even if we cap pay below */
+ if (!sg)
+ goto out;
+ rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
+ memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
+ if (len <= rec_len)
+ goto out; /* skip pay record if full content in rec->payload */
+
+ /* if (len > rec_len):
+ * dump data up to cap_len ignoring small duplicate in rec->payload
+ */
+ spin_lock_irqsave(&dbf->pay_lock, flags);
+ memset(payload, 0, sizeof(*payload));
+ memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
+ payload->fsf_req_id = req_id;
+ payload->counter = 0;
+ for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
+ u16 pay_len, offset = 0;
+
+ while (offset < sg->length && pay_sum < cap_len) {
+ pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
+ (u16)(sg->length - offset));
+ /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
+ memcpy(payload->data, sg_virt(sg) + offset, pay_len);
+ debug_event(dbf->pay, 1, payload,
+ zfcp_dbf_plen(pay_len));
+ payload->counter++;
+ offset += pay_len;
+ pay_sum += pay_len;
+ }
+ }
+ spin_unlock(&dbf->pay_lock);
+out:
debug_event(dbf->san, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->san_lock, flags);
}
@@ -354,9 +423,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
- length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
- fsf->req_id, d_id);
+ length = (u16)zfcp_qdio_real_bytes(ct_els->req);
+ zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
+ length, fsf->req_id, d_id, length);
+}
+
+static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
+ struct zfcp_fsf_req *fsf,
+ u16 len)
+{
+ struct zfcp_fsf_ct_els *ct_els = fsf->data;
+ struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
+ struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
+ struct scatterlist *resp_entry = ct_els->resp;
+ struct fc_gpn_ft_resp *acc;
+ int max_entries, x, last = 0;
+
+ if (!(memcmp(tag, "fsscth2", 7) == 0
+ && ct_els->d_id == FC_FID_DIR_SERV
+ && reqh->ct_rev == FC_CT_REV
+ && reqh->ct_in_id[0] == 0
+ && reqh->ct_in_id[1] == 0
+ && reqh->ct_in_id[2] == 0
+ && reqh->ct_fs_type == FC_FST_DIR
+ && reqh->ct_fs_subtype == FC_NS_SUBTYPE
+ && reqh->ct_options == 0
+ && reqh->_ct_resvd1 == 0
+ && reqh->ct_cmd == FC_NS_GPN_FT
+ /* reqh->ct_mr_size can vary so do not match but read below */
+ && reqh->_ct_resvd2 == 0
+ && reqh->ct_reason == 0
+ && reqh->ct_explan == 0
+ && reqh->ct_vendor == 0
+ && reqn->fn_resvd == 0
+ && reqn->fn_domain_id_scope == 0
+ && reqn->fn_area_id_scope == 0
+ && reqn->fn_fc4_type == FC_TYPE_FCP))
+ return len; /* not GPN_FT response so do not cap */
+
+ acc = sg_virt(resp_entry);
+ max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
+ * to account for header as 1st pseudo "entry" */;
+
+ /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
+ * response, allowing us to skip special handling for it - just skip it
+ */
+ for (x = 1; x < max_entries && !last; x++) {
+ if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+ acc++;
+ else
+ acc = sg_virt(++resp_entry);
+
+ last = acc->fp_flags & FC_NS_FID_LAST;
+ }
+ len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
+ return len; /* cap after last entry */
}
/**
@@ -370,9 +492,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
- length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
- fsf->req_id, 0);
+ length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
+ zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
+ length, fsf->req_id, ct_els->d_id,
+ zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
}
/**
@@ -386,11 +509,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
struct fsf_status_read_buffer *srb =
(struct fsf_status_read_buffer *) fsf->data;
u16 length;
+ struct scatterlist sg;
length = (u16)(srb->length -
offsetof(struct fsf_status_read_buffer, payload));
- zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
- fsf->req_id, ntoh24(srb->d_id));
+ sg_init_one(&sg, srb->payload.data, length);
+ zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
+ fsf->req_id, ntoh24(srb->d_id), length);
}
/**
@@ -399,7 +524,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
* @sc: pointer to struct scsi_cmnd
* @fsf: pointer to struct zfcp_fsf_req
*/
-void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+ struct zfcp_fsf_req *fsf)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) sc->device->host->hostdata[0];
@@ -442,7 +568,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
}
}
- debug_event(dbf->scsi, 1, rec, sizeof(*rec));
+ debug_event(dbf->scsi, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 0be3d48681ae..36d07584271d 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2010
+ * Copyright IBM Corp. 2008, 2015
*/
#ifndef ZFCP_DBF_H
@@ -17,6 +17,11 @@
#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
+enum zfcp_dbf_pseudo_erp_act_type {
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
+};
+
/**
* struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
* @ready: number of ready recovery actions
@@ -110,6 +115,7 @@ struct zfcp_dbf_san {
u32 d_id;
#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+ u16 pl_len;
} __packed;
/**
@@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res {
u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
u32 fsf_status;
u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+ u32 port_handle;
+ u32 lun_handle;
} __packed;
/**
@@ -279,7 +287,7 @@ static inline
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
{
if (debug_level_enabled(req->adapter->dbf->hba, level))
- zfcp_dbf_hba_fsf_res(tag, req);
+ zfcp_dbf_hba_fsf_res(tag, level, req);
}
/**
@@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
scmd->device->host->hostdata[0];
if (debug_level_enabled(adapter->dbf->scsi, level))
- zfcp_dbf_scsi(tag, scmd, req);
+ zfcp_dbf_scsi(tag, level, scmd, req);
}
/**
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 3fb410977014..a59d678125bd 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
*/
#define KMSG_COMPONENT "zfcp"
@@ -1217,8 +1217,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
- if (result == ZFCP_ERP_SUCCEEDED)
- zfcp_scsi_schedule_rport_register(port);
+ /* This switch case might also happen after a forced reopen
+ * was successfully done and thus overwritten with a new
+ * non-forced reopen at `ersfs_2'. In this case, we must not
+ * do the clean-up of the non-forced version.
+ */
+ if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
+ if (result == ZFCP_ERP_SUCCEEDED)
+ zfcp_scsi_schedule_rport_register(port);
/* fall through */
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 5b500652572b..c8fed9fa1cca 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
*/
#ifndef ZFCP_EXT_H
@@ -35,8 +35,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
@@ -44,7 +45,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
+ struct zfcp_fsf_req *);
/* zfcp_erp.c */
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 522a633c866a..75f820ca17b7 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2015
*/
#define KMSG_COMPONENT "zfcp"
@@ -508,7 +508,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
break;
case FSF_TOPO_FABRIC:
- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
@@ -613,7 +616,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
fc_host_permanent_port_name(shost) = bottom->wwpn;
- fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
} else
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
@@ -982,8 +984,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
if (zfcp_adapter_multi_buffer_active(adapter)) {
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
return -EIO;
+ qtcb->bottom.support.req_buf_length =
+ zfcp_qdio_real_bytes(sg_req);
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
return -EIO;
+ qtcb->bottom.support.resp_buf_length =
+ zfcp_qdio_real_bytes(sg_resp);
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
zfcp_qdio_sbale_count(sg_req));
@@ -1073,6 +1079,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
req->handler = zfcp_fsf_send_ct_handler;
req->qtcb->header.port_handle = wka_port->handle;
+ ct->d_id = wka_port->d_id;
req->data = ct;
zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
@@ -1169,6 +1176,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
hton24(req->qtcb->bottom.support.d_id, d_id);
req->handler = zfcp_fsf_send_els_handler;
+ els->d_id = d_id;
req->data = els;
zfcp_dbf_san_req("fssels1", req, d_id);
@@ -1575,7 +1583,7 @@ out:
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req;
+ struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1604,6 +1612,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
+ if (req && !IS_ERR(req))
+ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
return retval;
}
@@ -1628,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req;
+ struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1657,6 +1667,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
+ if (req && !IS_ERR(req))
+ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
return retval;
}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 57ae3ae1046d..be1c04b334c5 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
*/
#ifndef FSF_H
@@ -436,6 +436,7 @@ struct zfcp_blk_drv_data {
* @handler_data: data passed to handler function
* @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
* @status: used to pass error status to calling function
+ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
*/
struct zfcp_fsf_ct_els {
struct scatterlist *req;
@@ -444,6 +445,7 @@ struct zfcp_fsf_ct_els {
void *handler_data;
struct zfcp_port *port;
int status;
+ u32 d_id;
};
#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index b3c6ff49103b..9069f98a1817 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2015
*/
#define KMSG_COMPONENT "zfcp"
@@ -556,6 +556,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
ids.port_id = port->d_id;
ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+ zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!rport) {
dev_err(&port->adapter->ccw_device->dev,
@@ -577,6 +580,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
struct fc_rport *rport = port->rport;
if (rport) {
+ zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
fc_remote_port_delete(rport);
port->rport = NULL;
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7d1b4317eccc..3e2bdb90813c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -396,18 +396,6 @@ config SCSI_3W_SAS
Please read the comments at the top of
<file:drivers/scsi/3w-sas.c>.
-config SCSI_7000FASST
- tristate "7000FASST SCSI support"
- depends on ISA && SCSI && ISA_DMA_API
- select CHECK_SIGNATURE
- help
- This driver supports the Western Digital 7000 SCSI host adapter
- family. Some information is in the source:
- <file:drivers/scsi/wd7000.c>.
-
- To compile this driver as a module, choose M here: the
- module will be called wd7000.
-
config SCSI_ACARD
tristate "ACARD SCSI support"
depends on PCI && SCSI
@@ -512,18 +500,6 @@ config SCSI_ADVANSYS
To compile this driver as a module, choose M here: the
module will be called advansys.
-config SCSI_IN2000
- tristate "Always IN2000 SCSI support"
- depends on ISA && SCSI
- help
- This is support for an ISA bus SCSI host adapter. You'll find more
- information in <file:Documentation/scsi/in2000.txt>. If it doesn't work
- out of the box, you may have to change the jumpers for IRQ or
- address selection.
-
- To compile this driver as a module, choose M here: the
- module will be called in2000.
-
config SCSI_ARCMSR
tristate "ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Adapter"
depends on PCI && SCSI
@@ -540,6 +516,7 @@ config SCSI_ARCMSR
source "drivers/scsi/esas2r/Kconfig"
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt3sas/Kconfig"
+source "drivers/scsi/smartpqi/Kconfig"
source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
@@ -660,20 +637,6 @@ config SCSI_DMX3191D
To compile this driver as a module, choose M here: the
module will be called dmx3191d.
-config SCSI_DTC3280
- tristate "DTC3180/3280 SCSI support"
- depends on ISA && SCSI
- select SCSI_SPI_ATTRS
- select CHECK_SIGNATURE
- help
- This is support for DTC 3180/3280 SCSI Host Adapters. Please read
- the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, and the file
- <file:Documentation/scsi/dtc3x80.txt>.
-
- To compile this driver as a module, choose M here: the
- module will be called dtc.
-
config SCSI_EATA
tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support"
depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
@@ -1248,20 +1211,6 @@ config SCSI_NCR53C8XX_NO_DISCONNECT
not allow targets to disconnect is not reasonable if there is more
than 1 device on a SCSI bus. The normal answer therefore is N.
-config SCSI_PAS16
- tristate "PAS16 SCSI support"
- depends on ISA && SCSI
- select SCSI_SPI_ATTRS
- ---help---
- This is support for a SCSI host adapter. It is explained in section
- 3.10 of the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. If it doesn't work out
- of the box, you may have to change some settings in
- <file:drivers/scsi/pas16.h>.
-
- To compile this driver as a module, choose M here: the
- module will be called pas16.
-
config SCSI_QLOGIC_FAS
tristate "Qlogic FAS SCSI support"
depends on ISA && SCSI
@@ -1382,89 +1331,6 @@ config SCSI_AM53C974
To compile this driver as a module, choose M here: the
module will be called am53c974.
-config SCSI_T128
- tristate "Trantor T128/T128F/T228 SCSI support"
- depends on ISA && SCSI
- select SCSI_SPI_ATTRS
- select CHECK_SIGNATURE
- ---help---
- This is support for a SCSI host adapter. It is explained in section
- 3.11 of the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. If it doesn't work out
- of the box, you may have to change some settings in
- <file:drivers/scsi/t128.h>. Note that Trantor was purchased by
- Adaptec, and some former Trantor products are being sold under the
- Adaptec name.
-
- To compile this driver as a module, choose M here: the
- module will be called t128.
-
-config SCSI_U14_34F
- tristate "UltraStor 14F/34F support"
- depends on ISA && SCSI && ISA_DMA_API
- ---help---
- This is support for the UltraStor 14F and 34F SCSI-2 host adapters.
- The source at <file:drivers/scsi/u14-34f.c> contains some
- information about this hardware. If the driver doesn't work out of
- the box, you may have to change some settings in
- <file: drivers/scsi/u14-34f.c>. Read the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that there is also
- another driver for the same hardware: "UltraStor SCSI support",
- below. You should say Y to both only if you want 24F support as
- well.
-
- To compile this driver as a module, choose M here: the
- module will be called u14-34f.
-
-config SCSI_U14_34F_TAGGED_QUEUE
- bool "enable tagged command queueing"
- depends on SCSI_U14_34F
- help
- This is a feature of SCSI-2 which improves performance: the host
- adapter can send several SCSI commands to a device's queue even if
- previous commands haven't finished yet.
- This is equivalent to the "u14-34f=tc:y" boot option.
-
-config SCSI_U14_34F_LINKED_COMMANDS
- bool "enable elevator sorting"
- depends on SCSI_U14_34F
- help
- This option enables elevator sorting for all probed SCSI disks and
- CD-ROMs. It definitely reduces the average seek distance when doing
- random seeks, but this does not necessarily result in a noticeable
- performance improvement: your mileage may vary...
- This is equivalent to the "u14-34f=lc:y" boot option.
-
-config SCSI_U14_34F_MAX_TAGS
- int "maximum number of queued commands"
- depends on SCSI_U14_34F
- default "8"
- help
- This specifies how many SCSI commands can be maximally queued for
- each probed SCSI device. You should reduce the default value of 8
- only if you have disks with buggy or limited tagged command support.
- Minimum is 2 and maximum is 14. This value is also the window size
- used by the elevator sorting option above. The effective value used
- by the driver for each probed SCSI device is reported at boot time.
- This is equivalent to the "u14-34f=mq:8" boot option.
-
-config SCSI_ULTRASTOR
- tristate "UltraStor SCSI support"
- depends on X86 && ISA && SCSI && ISA_DMA_API
- ---help---
- This is support for the UltraStor 14F, 24F and 34F SCSI-2 host
- adapter family. This driver is explained in section 3.12 of the
- SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. If it doesn't work out
- of the box, you may have to change some settings in
- <file:drivers/scsi/ultrastor.h>.
-
- Note that there is also another driver for the same hardware:
- "UltraStor 14F/34F support", above.
-
- To compile this driver as a module, choose M here: the
- module will be called ultrastor.
-
config SCSI_NSP32
tristate "Workbit NinjaSCSI-32Bi/UDE support"
depends on PCI && SCSI && !64BIT
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index d5397987e731..38d938d7fe67 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -61,9 +61,7 @@ obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
-obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o
obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/
-obj-$(CONFIG_SCSI_ULTRASTOR) += ultrastor.o
obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
obj-$(CONFIG_SCSI_AHA1740) += aha1740.o
@@ -75,7 +73,6 @@ obj-$(CONFIG_SCSI_PM8001) += pm8001/
obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
-obj-$(CONFIG_SCSI_IN2000) += in2000.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o
@@ -90,15 +87,12 @@ obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
-obj-$(CONFIG_SCSI_PAS16) += pas16.o
-obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
obj-$(CONFIG_SCSI_HPSA) += hpsa.o
-obj-$(CONFIG_SCSI_DTC3280) += dtc.o
+obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi/
obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
-obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
obj-$(CONFIG_SCSI_EATA) += eata.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 43908bbb3b23..db2739079cbb 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -230,13 +230,6 @@ static int NCR5380_poll_politely2(struct Scsi_Host *instance,
return -ETIMEDOUT;
}
-static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
- int reg, int bit, int val, int wait)
-{
- return NCR5380_poll_politely2(instance, reg, bit, val,
- reg, bit, val, wait);
-}
-
#if NDEBUG
static struct {
unsigned char mask;
@@ -1854,11 +1847,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* XXX - need to source or sink data here, as appropriate */
}
} else {
- /* Break up transfer into 3 ms chunks,
- * presuming 6 accesses per handshake.
+ /* Transfer a small chunk so that the
+ * irq mode lock is not held too long.
*/
- transfersize = min((unsigned long)cmd->SCp.this_residual,
- hostdata->accesses_per_ms / 2);
+ transfersize = min(cmd->SCp.this_residual,
+ NCR5380_PIO_CHUNK_SIZE);
len = transfersize;
NCR5380_transfer_pio(instance, &phase, &len,
(unsigned char **)&cmd->SCp.ptr);
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index c60728785d89..965d92339455 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -250,6 +250,8 @@ struct NCR5380_cmd {
#define NCR5380_CMD_SIZE (sizeof(struct NCR5380_cmd))
+#define NCR5380_PIO_CHUNK_SIZE 256
+
static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
{
return ((struct scsi_cmnd *)ncmd_ptr) - 1;
@@ -292,8 +294,14 @@ static void NCR5380_reselect(struct Scsi_Host *instance);
static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_poll_politely(struct Scsi_Host *, int, int, int, int);
static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
+static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
+ int reg, int bit, int val, int wait)
+{
+ return NCR5380_poll_politely2(instance, reg, bit, val,
+ reg, bit, val, wait);
+}
+
#endif /* __KERNEL__ */
#endif /* NCR5380_H */
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 28f8b8a1b8a4..0c453880f214 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -613,7 +613,7 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
* @dev: Adapter
* @comm: communications method
*/
-int aac_src_select_comm(struct aac_dev *dev, int comm)
+static int aac_src_select_comm(struct aac_dev *dev, int comm)
{
switch (comm) {
case AAC_COMM_MESSAGE:
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 0fdc98bc2338..7c713f797535 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -632,7 +632,7 @@ int asd_init_hw(struct asd_ha_struct *asd_ha)
pci_name(asd_ha->pcidev));
return err;
}
- pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
+ err = pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
v | SC_TMR_DIS);
if (err) {
asd_printk("couldn't disable split completion timer of %s\n",
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 7640498964a5..3d53d636b17b 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2388,15 +2388,23 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
}
case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
unsigned char *ver_addr;
- int32_t user_len, cnt2end;
+ uint32_t user_len;
+ int32_t cnt2end;
uint8_t *pQbuffer, *ptmpuserbuffer;
+
+ user_len = pcmdmessagefld->cmdmessage.Length;
+ if (user_len > ARCMSR_API_DATA_BUFLEN) {
+ retvalue = ARCMSR_MESSAGE_FAIL;
+ goto message_out;
+ }
+
ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
if (!ver_addr) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
ptmpuserbuffer = ver_addr;
- user_len = pcmdmessagefld->cmdmessage.Length;
+
memcpy(ptmpuserbuffer,
pcmdmessagefld->messagedatabuffer, user_len);
spin_lock_irqsave(&acb->wqbuffer_lock, flags);
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index ee5ace873535..b1d0fdc5d5e1 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
*
* Emulex
* 3333 Susan Street
@@ -89,7 +89,7 @@ struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
u32 max_eqd; /* in usecs */
u32 prev_eqd; /* in usecs */
u32 et_eqd; /* configured val when aic is off */
- ulong jiffs;
+ ulong jiffies;
u64 eq_prev; /* Used to calculate eqe */
};
@@ -100,7 +100,7 @@ struct be_eq_obj {
struct be_queue_info q;
struct beiscsi_hba *phba;
struct be_queue_info *cq;
- struct work_struct work_cqs; /* Work Item */
+ struct work_struct mcc_work; /* Work Item */
struct irq_poll iopoll;
};
@@ -111,8 +111,11 @@ struct be_mcc_obj {
struct beiscsi_mcc_tag_state {
unsigned long tag_state;
-#define MCC_TAG_STATE_RUNNING 1
-#define MCC_TAG_STATE_TIMEOUT 2
+#define MCC_TAG_STATE_RUNNING 0
+#define MCC_TAG_STATE_TIMEOUT 1
+#define MCC_TAG_STATE_ASYNC 2
+#define MCC_TAG_STATE_IGNORE 3
+ void (*cbfn)(struct beiscsi_hba *, unsigned int);
struct be_dma_mem tag_mem_state;
};
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index a55eaeea37e7..be65da2988fb 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
*
* Emulex
* 3333 Susan Street
@@ -21,110 +21,77 @@
#include "be.h"
#include "be_mgmt.h"
-int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
-{
- u32 sreset;
- u8 *pci_reset_offset = 0;
- u8 *pci_online0_offset = 0;
- u8 *pci_online1_offset = 0;
- u32 pconline0 = 0;
- u32 pconline1 = 0;
- u32 i;
-
- pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
- pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
- pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
- sreset = readl((void *)pci_reset_offset);
- sreset |= BE2_SET_RESET;
- writel(sreset, (void *)pci_reset_offset);
-
- i = 0;
- while (sreset & BE2_SET_RESET) {
- if (i > 64)
- break;
- msleep(100);
- sreset = readl((void *)pci_reset_offset);
- i++;
- }
-
- if (sreset & BE2_SET_RESET) {
- printk(KERN_ERR DRV_NAME
- " Soft Reset did not deassert\n");
- return -EIO;
- }
- pconline1 = BE2_MPU_IRAM_ONLINE;
- writel(pconline0, (void *)pci_online0_offset);
- writel(pconline1, (void *)pci_online1_offset);
-
- sreset |= BE2_SET_RESET;
- writel(sreset, (void *)pci_reset_offset);
-
- i = 0;
- while (sreset & BE2_SET_RESET) {
- if (i > 64)
- break;
- msleep(1);
- sreset = readl((void *)pci_reset_offset);
- i++;
- }
- if (sreset & BE2_SET_RESET) {
- printk(KERN_ERR DRV_NAME
- " MPU Online Soft Reset did not deassert\n");
- return -EIO;
- }
- return 0;
-}
-
-int be_chk_reset_complete(struct beiscsi_hba *phba)
-{
- unsigned int num_loop;
- u8 *mpu_sem = 0;
- u32 status;
-
- num_loop = 1000;
- mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
- msleep(5000);
-
- while (num_loop) {
- status = readl((void *)mpu_sem);
-
- if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
- break;
- msleep(60);
- num_loop--;
- }
-
- if ((status & 0x80000000) || (!num_loop)) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BC_%d : Failed in be_chk_reset_complete"
- "status = 0x%x\n", status);
- return -EIO;
- }
-
- return 0;
-}
-
-unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
-{
- unsigned int tag = 0;
+/* UE Status Low CSR */
+static const char * const desc_ue_status_low[] = {
+ "CEV",
+ "CTX",
+ "DBUF",
+ "ERX",
+ "Host",
+ "MPU",
+ "NDMA",
+ "PTC ",
+ "RDMA ",
+ "RXF ",
+ "RXIPS ",
+ "RXULP0 ",
+ "RXULP1 ",
+ "RXULP2 ",
+ "TIM ",
+ "TPOST ",
+ "TPRE ",
+ "TXIPS ",
+ "TXULP0 ",
+ "TXULP1 ",
+ "UC ",
+ "WDMA ",
+ "TXULP2 ",
+ "HOST1 ",
+ "P0_OB_LINK ",
+ "P1_OB_LINK ",
+ "HOST_GPIO ",
+ "MBOX ",
+ "AXGMAC0",
+ "AXGMAC1",
+ "JTAG",
+ "MPU_INTPEND"
+};
- spin_lock(&phba->ctrl.mcc_lock);
- if (phba->ctrl.mcc_tag_available) {
- tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
- phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
- phba->ctrl.mcc_tag_status[tag] = 0;
- phba->ctrl.ptag_state[tag].tag_state = 0;
- }
- if (tag) {
- phba->ctrl.mcc_tag_available--;
- if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
- phba->ctrl.mcc_alloc_index = 0;
- else
- phba->ctrl.mcc_alloc_index++;
- }
- spin_unlock(&phba->ctrl.mcc_lock);
- return tag;
-}
+/* UE Status High CSR */
+static const char * const desc_ue_status_hi[] = {
+ "LPCMEMHOST",
+ "MGMT_MAC",
+ "PCS0ONLINE",
+ "MPU_IRAM",
+ "PCS1ONLINE",
+ "PCTL0",
+ "PCTL1",
+ "PMEM",
+ "RR",
+ "TXPB",
+ "RXPP",
+ "XAUI",
+ "TXP",
+ "ARM",
+ "IPC",
+ "HOST2",
+ "HOST3",
+ "HOST4",
+ "HOST5",
+ "HOST6",
+ "HOST7",
+ "HOST8",
+ "HOST9",
+ "NETC",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown"
+};
struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
unsigned int *ref_tag)
@@ -133,7 +100,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
struct be_mcc_wrb *wrb = NULL;
unsigned int tag;
- spin_lock_bh(&phba->ctrl.mcc_lock);
+ spin_lock(&phba->ctrl.mcc_lock);
if (mccq->used == mccq->len) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -160,6 +127,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
phba->ctrl.mcc_tag_status[tag] = 0;
phba->ctrl.ptag_state[tag].tag_state = 0;
+ phba->ctrl.ptag_state[tag].cbfn = NULL;
phba->ctrl.mcc_tag_available--;
if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
phba->ctrl.mcc_alloc_index = 0;
@@ -174,7 +142,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
mccq->used++;
alloc_failed:
- spin_unlock_bh(&phba->ctrl.mcc_lock);
+ spin_unlock(&phba->ctrl.mcc_lock);
return wrb;
}
@@ -182,7 +150,7 @@ void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
{
struct be_queue_info *mccq = &ctrl->mcc_obj.q;
- spin_lock_bh(&ctrl->mcc_lock);
+ spin_lock(&ctrl->mcc_lock);
tag = tag & MCC_Q_CMD_TAG_MASK;
ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
@@ -191,16 +159,71 @@ void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
ctrl->mcc_free_index++;
ctrl->mcc_tag_available++;
mccq->used--;
- spin_unlock_bh(&ctrl->mcc_lock);
+ spin_unlock(&ctrl->mcc_lock);
}
-/**
- * beiscsi_fail_session(): Closing session with appropriate error
- * @cls_session: ptr to session
- **/
-void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
+/*
+ * beiscsi_mcc_compl_status - Return the status of MCC completion
+ * @phba: Driver private structure
+ * @tag: Tag for the MBX Command
+ * @wrb: the WRB used for the MBX Command
+ * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ */
+int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
+ unsigned int tag,
+ struct be_mcc_wrb **wrb,
+ struct be_dma_mem *mbx_cmd_mem)
{
- iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ uint16_t status = 0, addl_status = 0, wrb_num = 0;
+ struct be_cmd_resp_hdr *mbx_resp_hdr;
+ struct be_cmd_req_hdr *mbx_hdr;
+ struct be_mcc_wrb *temp_wrb;
+ uint32_t mcc_tag_status;
+ int rc = 0;
+
+ mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
+ status = (mcc_tag_status & CQE_STATUS_MASK);
+ addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
+ CQE_STATUS_ADDL_SHIFT);
+
+ if (mbx_cmd_mem) {
+ mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
+ } else {
+ wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
+ CQE_STATUS_WRB_SHIFT;
+ temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
+ mbx_hdr = embedded_payload(temp_wrb);
+
+ if (wrb)
+ *wrb = temp_wrb;
+ }
+
+ if (status || addl_status) {
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
+ mbx_hdr->subsystem, mbx_hdr->opcode,
+ status, addl_status);
+ rc = -EIO;
+ if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+ mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
+ mbx_resp_hdr->response_length,
+ mbx_resp_hdr->actual_resp_len);
+ rc = -EAGAIN;
+ }
+ }
+
+ return rc;
}
/*
@@ -217,26 +240,34 @@ void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
* Failure: Non-Zero
**/
int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
- uint32_t tag, struct be_mcc_wrb **wrb,
+ unsigned int tag,
+ struct be_mcc_wrb **wrb,
struct be_dma_mem *mbx_cmd_mem)
{
int rc = 0;
- uint32_t mcc_tag_status;
- uint16_t status = 0, addl_status = 0, wrb_num = 0;
- struct be_mcc_wrb *temp_wrb;
- struct be_cmd_req_hdr *mbx_hdr;
- struct be_cmd_resp_hdr *mbx_resp_hdr;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
- if (beiscsi_error(phba))
- return -EPERM;
+ if (beiscsi_hba_in_error(phba)) {
+ clear_bit(MCC_TAG_STATE_RUNNING,
+ &phba->ctrl.ptag_state[tag].tag_state);
+ return -EIO;
+ }
/* wait for the mccq completion */
- rc = wait_event_interruptible_timeout(
- phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_tag_status[tag],
- msecs_to_jiffies(
- BEISCSI_HOST_MBX_TIMEOUT));
+ rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_tag_status[tag],
+ msecs_to_jiffies(
+ BEISCSI_HOST_MBX_TIMEOUT));
+ /**
+ * Return EIO if port is being disabled. Associated DMA memory, if any,
+ * is freed by the caller. When port goes offline, MCCQ is cleaned up
+ * so does WRB.
+ */
+ if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
+ clear_bit(MCC_TAG_STATE_RUNNING,
+ &phba->ctrl.ptag_state[tag].tag_state);
+ return -EIO;
+ }
+
/**
* If MBOX cmd timeout expired, tag and resource allocated
* for cmd is not freed until FW returns completion.
@@ -270,47 +301,7 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
return -EBUSY;
}
- rc = 0;
- mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
- status = (mcc_tag_status & CQE_STATUS_MASK);
- addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
- CQE_STATUS_ADDL_SHIFT);
-
- if (mbx_cmd_mem) {
- mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
- } else {
- wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
- CQE_STATUS_WRB_SHIFT;
- temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
- mbx_hdr = embedded_payload(temp_wrb);
-
- if (wrb)
- *wrb = temp_wrb;
- }
-
- if (status || addl_status) {
- beiscsi_log(phba, KERN_WARNING,
- BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
- BEISCSI_LOG_CONFIG,
- "BC_%d : MBX Cmd Failed for "
- "Subsys : %d Opcode : %d with "
- "Status : %d and Extd_Status : %d\n",
- mbx_hdr->subsystem,
- mbx_hdr->opcode,
- status, addl_status);
- rc = -EIO;
- if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
- mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
- beiscsi_log(phba, KERN_WARNING,
- BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
- BEISCSI_LOG_CONFIG,
- "BC_%d : Insufficient Buffer Error "
- "Resp_Len : %d Actual_Resp_Len : %d\n",
- mbx_resp_hdr->response_length,
- mbx_resp_hdr->actual_resp_len);
- rc = -EAGAIN;
- }
- }
+ rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
free_mcc_wrb(&phba->ctrl, tag);
return rc;
@@ -330,11 +321,10 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl)
{
- u16 compl_status, extd_status;
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
- struct be_cmd_resp_hdr *resp_hdr;
+ u16 compl_status, extd_status;
/**
* To check if valid bit is set, check the entire word as we don't know
@@ -368,14 +358,7 @@ static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
hdr->subsystem, hdr->opcode, compl_status, extd_status);
-
- if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
- /* if status is insufficient buffer, check the length */
- resp_hdr = (struct be_cmd_resp_hdr *) hdr;
- if (resp_hdr->response_length)
- return 0;
- }
- return -EINVAL;
+ return compl_status;
}
static void beiscsi_process_async_link(struct beiscsi_hba *phba,
@@ -391,18 +374,19 @@ static void beiscsi_process_async_link(struct beiscsi_hba *phba,
* This has been newly introduced in SKH-R Firmware 10.0.338.45.
**/
if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
- phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
- phba->get_boot = BE_GET_BOOT_RETRIES;
+ set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
+ if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
+ beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
__beiscsi_log(phba, KERN_ERR,
"BC_%d : Link Up on Port %d tag 0x%x\n",
evt->physical_port, evt->event_tag);
} else {
- phba->state = BE_ADAPTER_LINK_DOWN;
+ clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
__beiscsi_log(phba, KERN_ERR,
"BC_%d : Link Down on Port %d tag 0x%x\n",
evt->physical_port, evt->event_tag);
iscsi_host_for_each_session(phba->shost,
- beiscsi_fail_session);
+ beiscsi_session_fail);
}
}
@@ -482,8 +466,8 @@ void beiscsi_process_async_event(struct beiscsi_hba *phba,
beiscsi_process_async_link(phba, compl);
break;
case ASYNC_EVENT_CODE_ISCSI:
- phba->state |= BE_ADAPTER_CHECK_BOOT;
- phba->get_boot = BE_GET_BOOT_RETRIES;
+ if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
+ beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
sev = KERN_ERR;
break;
case ASYNC_EVENT_CODE_SLI:
@@ -519,6 +503,9 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
return 0;
}
+ /* end MCC with this tag */
+ clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
+
if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
@@ -529,9 +516,11 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
* Only for non-embedded cmd, PCI resource is allocated.
**/
tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
- if (tag_mem->size)
+ if (tag_mem->size) {
pci_free_consistent(ctrl->pdev, tag_mem->size,
tag_mem->va, tag_mem->dma);
+ tag_mem->size = 0;
+ }
free_mcc_wrb(ctrl, tag);
return 0;
}
@@ -550,57 +539,25 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
CQE_STATUS_ADDL_MASK;
ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
- /* write ordering forced in wake_up_interruptible */
- clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
- wake_up_interruptible(&ctrl->mcc_wait[tag]);
- return 0;
-}
-
-/*
- * be_mcc_compl_poll()- Wait for MBX completion
- * @phba: driver private structure
- *
- * Wait till no more pending mcc requests are present
- *
- * return
- * Success: 0
- * Failure: Non-Zero
- *
- **/
-int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- int i;
-
- if (!test_bit(MCC_TAG_STATE_RUNNING,
- &ctrl->ptag_state[tag].tag_state)) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d: tag %u state not running\n", tag);
+ if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
+ if (ctrl->ptag_state[tag].cbfn)
+ ctrl->ptag_state[tag].cbfn(phba, tag);
+ else
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : MBX ASYNC command with no callback\n");
+ free_mcc_wrb(ctrl, tag);
return 0;
}
- for (i = 0; i < mcc_timeout; i++) {
- if (beiscsi_error(phba))
- return -EIO;
- beiscsi_process_mcc_cq(phba);
- /* after polling, wrb and tag need to be released */
- if (!test_bit(MCC_TAG_STATE_RUNNING,
- &ctrl->ptag_state[tag].tag_state)) {
- free_mcc_wrb(ctrl, tag);
- break;
- }
- udelay(100);
- }
-
- if (i < mcc_timeout)
+ if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
+ /* just check completion status and free wrb */
+ __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
+ free_mcc_wrb(ctrl, tag);
return 0;
+ }
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : FW Timed Out\n");
- phba->fw_timeout = true;
- beiscsi_ue_detect(phba);
- return -EBUSY;
+ wake_up_interruptible(&ctrl->mcc_wait[tag]);
+ return 0;
}
void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
@@ -642,7 +599,7 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
*/
timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
do {
- if (beiscsi_error(phba))
+ if (beiscsi_hba_in_error(phba))
return -EIO;
ready = ioread32(db);
@@ -655,16 +612,14 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
if (time_after(jiffies, timeout))
break;
- msleep(20);
+ /* 1ms sleep is enough in most cases */
+ schedule_timeout_uninterruptible(msecs_to_jiffies(1));
} while (!ready);
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : FW Timed Out\n");
-
- phba->fw_timeout = true;
- beiscsi_ue_detect(phba);
-
+ set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
return -EBUSY;
}
@@ -679,7 +634,7 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
* Success: 0
* Failure: Non-Zero
**/
-int be_mbox_notify(struct be_ctrl_info *ctrl)
+static int be_mbox_notify(struct be_ctrl_info *ctrl)
{
int status;
u32 val = 0;
@@ -819,87 +774,6 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
return status;
}
-/**
- * be_cmd_fw_initialize()- Initialize FW
- * @ctrl: Pointer to function control structure
- *
- * Send FW initialize pattern for the function.
- *
- * return
- * Success: 0
- * Failure: Non-Zero value
- **/
-int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
-{
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
- int status;
- u8 *endian_check;
-
- mutex_lock(&ctrl->mbox_lock);
- memset(wrb, 0, sizeof(*wrb));
-
- endian_check = (u8 *) wrb;
- *endian_check++ = 0xFF;
- *endian_check++ = 0x12;
- *endian_check++ = 0x34;
- *endian_check++ = 0xFF;
- *endian_check++ = 0xFF;
- *endian_check++ = 0x56;
- *endian_check++ = 0x78;
- *endian_check++ = 0xFF;
- be_dws_cpu_to_le(wrb, sizeof(*wrb));
-
- status = be_mbox_notify(ctrl);
- if (status)
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BC_%d : be_cmd_fw_initialize Failed\n");
-
- mutex_unlock(&ctrl->mbox_lock);
- return status;
-}
-
-/**
- * be_cmd_fw_uninit()- Uinitialize FW
- * @ctrl: Pointer to function control structure
- *
- * Send FW uninitialize pattern for the function
- *
- * return
- * Success: 0
- * Failure: Non-Zero value
- **/
-int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
-{
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
- int status;
- u8 *endian_check;
-
- mutex_lock(&ctrl->mbox_lock);
- memset(wrb, 0, sizeof(*wrb));
-
- endian_check = (u8 *) wrb;
- *endian_check++ = 0xFF;
- *endian_check++ = 0xAA;
- *endian_check++ = 0xBB;
- *endian_check++ = 0xFF;
- *endian_check++ = 0xFF;
- *endian_check++ = 0xCC;
- *endian_check++ = 0xDD;
- *endian_check = 0xFF;
-
- be_dws_cpu_to_le(wrb, sizeof(*wrb));
-
- status = be_mbox_notify(ctrl);
- if (status)
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BC_%d : be_cmd_fw_uninit Failed\n");
-
- mutex_unlock(&ctrl->mbox_lock);
- return status;
-}
-
int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *cq, struct be_queue_info *eq,
bool sol_evts, bool no_delay, int coalesce_wm)
@@ -1343,25 +1217,6 @@ error:
return status;
}
-int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_post_sgl_pages_req *req = embedded_payload(wrb);
- int status;
-
- mutex_lock(&ctrl->mbox_lock);
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
- status = be_mbox_notify(ctrl);
-
- mutex_unlock(&ctrl->mbox_lock);
- return status;
-}
-
/**
* be_cmd_set_vlan()- Configure VLAN paramters on the adapter
* @phba: device priv structure instance
@@ -1402,3 +1257,564 @@ int be_cmd_set_vlan(struct beiscsi_hba *phba,
return tag;
}
+
+int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba)
+{
+ struct be_dma_mem nonemb_cmd;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_mgmt_controller_attributes *req;
+ struct be_sge *sge = nonembedded_sgl(wrb);
+ int status = 0;
+
+ nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
+ sizeof(struct be_mgmt_controller_attributes),
+ &nonemb_cmd.dma);
+ if (nonemb_cmd.va == NULL) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : pci_alloc_consistent failed in %s\n",
+ __func__);
+ return -ENOMEM;
+ }
+ nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
+ req = nonemb_cmd.va;
+ memset(req, 0, sizeof(*req));
+ mutex_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd.size);
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : Firmware Version of CMD : %s\n"
+ "Firmware Version is : %s\n"
+ "Developer Build, not performing version check...\n",
+ resp->params.hba_attribs
+ .flashrom_version_string,
+ resp->params.hba_attribs.
+ firmware_version_string);
+
+ phba->fw_config.iscsi_features =
+ resp->params.hba_attribs.iscsi_features;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : phba->fw_config.iscsi_features = %d\n",
+ phba->fw_config.iscsi_features);
+ memcpy(phba->fw_ver_str, resp->params.hba_attribs.
+ firmware_version_string, BEISCSI_VER_STRLEN);
+ } else
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : Failed in beiscsi_check_supported_fw\n");
+ mutex_unlock(&ctrl->mbox_lock);
+ if (nonemb_cmd.va)
+ pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+
+ return status;
+}
+
+/**
+ * beiscsi_get_fw_config()- Get the FW config for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the FW config and resources available for the function.
+ * The resources are created based on the count received here.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
+int beiscsi_get_fw_config(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
+ uint32_t cid_count, icd_count;
+ int status = -EINVAL;
+ uint8_t ulp_num = 0;
+
+ mutex_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
+
+ be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ EMBED_MBX_MAX_PAYLOAD_SIZE);
+
+ if (be_mbox_notify(ctrl)) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : Failed in beiscsi_get_fw_config\n");
+ goto fail_init;
+ }
+
+ /* FW response formats depend on port id */
+ phba->fw_config.phys_port = pfw_cfg->phys_port;
+ if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : invalid physical port id %d\n",
+ phba->fw_config.phys_port);
+ goto fail_init;
+ }
+
+ /* populate and check FW config against min and max values */
+ if (!is_chip_be2_be3r(phba)) {
+ phba->fw_config.eqid_count = pfw_cfg->eqid_count;
+ phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+ if (phba->fw_config.eqid_count == 0 ||
+ phba->fw_config.eqid_count > 2048) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : invalid EQ count %d\n",
+ phba->fw_config.eqid_count);
+ goto fail_init;
+ }
+ if (phba->fw_config.cqid_count == 0 ||
+ phba->fw_config.cqid_count > 4096) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : invalid CQ count %d\n",
+ phba->fw_config.cqid_count);
+ goto fail_init;
+ }
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : EQ_Count : %d CQ_Count : %d\n",
+ phba->fw_config.eqid_count,
+ phba->fw_config.cqid_count);
+ }
+
+ /**
+ * Check on which all ULP iSCSI Protocol is loaded.
+ * Set the Bit for those ULP. This set flag is used
+ * at all places in the code to check on which ULP
+ * iSCSi Protocol is loaded
+ **/
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (pfw_cfg->ulp[ulp_num].ulp_mode &
+ BEISCSI_ULP_ISCSI_INI_MODE) {
+ set_bit(ulp_num, &phba->fw_config.ulp_supported);
+
+ /* Get the CID, ICD and Chain count for each ULP */
+ phba->fw_config.iscsi_cid_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_base;
+ phba->fw_config.iscsi_cid_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_count;
+
+ phba->fw_config.iscsi_icd_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_base;
+ phba->fw_config.iscsi_icd_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_count;
+
+ phba->fw_config.iscsi_chain_start[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_base;
+ phba->fw_config.iscsi_chain_count[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_count;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : Function loaded on ULP : %d\n"
+ "\tiscsi_cid_count : %d\n"
+ "\tiscsi_cid_start : %d\n"
+ "\t iscsi_icd_count : %d\n"
+ "\t iscsi_icd_start : %d\n",
+ ulp_num,
+ phba->fw_config.
+ iscsi_cid_count[ulp_num],
+ phba->fw_config.
+ iscsi_cid_start[ulp_num],
+ phba->fw_config.
+ iscsi_icd_count[ulp_num],
+ phba->fw_config.
+ iscsi_icd_start[ulp_num]);
+ }
+ }
+
+ if (phba->fw_config.ulp_supported == 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
+ pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
+ pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
+ goto fail_init;
+ }
+
+ /**
+ * ICD is shared among ULPs. Use icd_count of any one loaded ULP
+ **/
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+ break;
+ icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+ if (icd_count == 0 || icd_count > 65536) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d: invalid ICD count %d\n", icd_count);
+ goto fail_init;
+ }
+
+ cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
+ BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
+ if (cid_count == 0 || cid_count > 4096) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d: invalid CID count %d\n", cid_count);
+ goto fail_init;
+ }
+
+ /**
+ * Check FW is dual ULP aware i.e. can handle either
+ * of the protocols.
+ */
+ phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
+ BEISCSI_FUNC_DUA_MODE);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : DUA Mode : 0x%x\n",
+ phba->fw_config.dual_ulp_aware);
+
+ /* all set, continue using this FW config */
+ status = 0;
+fail_init:
+ mutex_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+/**
+ * beiscsi_get_port_name()- Get port name for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the alphanumeric character for port
+ *
+ **/
+int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
+{
+ int ret = 0;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_get_port_name *ioctl;
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ memset(wrb, 0, sizeof(*wrb));
+ ioctl = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
+ be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PORT_NAME,
+ EMBED_MBX_MAX_PAYLOAD_SIZE);
+ ret = be_mbox_notify(ctrl);
+ phba->port_name = 0;
+ if (!ret) {
+ phba->port_name = ioctl->p.resp.port_names >>
+ (phba->fw_config.phys_port * 8) & 0xff;
+ } else {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
+ ret, ioctl->h.resp_hdr.status);
+ }
+
+ if (phba->port_name == 0)
+ phba->port_name = '?';
+
+ mutex_unlock(&ctrl->mbox_lock);
+ return ret;
+}
+
+int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_cmd_set_features *ioctl;
+ struct be_mcc_wrb *wrb;
+ int ret = 0;
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ memset(wrb, 0, sizeof(*wrb));
+ ioctl = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
+ be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FEATURES,
+ EMBED_MBX_MAX_PAYLOAD_SIZE);
+ ioctl->feature = BE_CMD_SET_FEATURE_UER;
+ ioctl->param_len = sizeof(ioctl->param.req);
+ ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
+ ret = be_mbox_notify(ctrl);
+ if (!ret) {
+ phba->ue2rp = ioctl->param.resp.ue2rp;
+ set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : HBA error recovery supported\n");
+ } else {
+ /**
+ * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
+ * Older FW versions return this error.
+ */
+ if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
+ ret == MCC_STATUS_INVALID_LENGTH)
+ __beiscsi_log(phba, KERN_INFO,
+ "BG_%d : HBA error recovery not supported\n");
+ }
+
+ mutex_unlock(&ctrl->mbox_lock);
+ return ret;
+}
+
+static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba)
+{
+ u32 sem;
+
+ if (is_chip_be2_be3r(phba))
+ sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx);
+ else
+ pci_read_config_dword(phba->pcidev,
+ SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
+ return sem;
+}
+
+int beiscsi_check_fw_rdy(struct beiscsi_hba *phba)
+{
+ u32 loop, post, rdy = 0;
+
+ loop = 1000;
+ while (loop--) {
+ post = beiscsi_get_post_stage(phba);
+ if (post & POST_ERROR_BIT)
+ break;
+ if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) {
+ rdy = 1;
+ break;
+ }
+ msleep(60);
+ }
+
+ if (!rdy) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : FW not ready 0x%x\n", post);
+ }
+
+ return rdy;
+}
+
+int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_post_sgl_pages_req *req = embedded_payload(wrb);
+ int status;
+
+ mutex_lock(&ctrl->mbox_lock);
+
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
+ status = be_mbox_notify(ctrl);
+
+ mutex_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ u8 *endian_check;
+ int status;
+
+ mutex_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ endian_check = (u8 *) wrb;
+ if (load) {
+ /* to start communicating */
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0x12;
+ *endian_check++ = 0x34;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0x56;
+ *endian_check++ = 0x78;
+ *endian_check++ = 0xFF;
+ } else {
+ /* to stop communicating */
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0xAA;
+ *endian_check++ = 0xBB;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0xCC;
+ *endian_check++ = 0xDD;
+ *endian_check = 0xFF;
+ }
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+
+ status = be_mbox_notify(ctrl);
+ if (status)
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BC_%d : special WRB message failed\n");
+ mutex_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int beiscsi_init_sliport(struct beiscsi_hba *phba)
+{
+ int status;
+
+ /* check POST stage before talking to FW */
+ status = beiscsi_check_fw_rdy(phba);
+ if (!status)
+ return -EIO;
+
+ /* clear all error states after checking FW rdy */
+ phba->state &= ~BEISCSI_HBA_IN_ERR;
+
+ /* check again UER support */
+ phba->state &= ~BEISCSI_HBA_UER_SUPP;
+
+ /*
+ * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
+ * It should clean up any stale info in FW for this fn.
+ */
+ status = beiscsi_cmd_function_reset(phba);
+ if (status) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BC_%d : SLI Function Reset failed\n");
+ return status;
+ }
+
+ /* indicate driver is loading */
+ return beiscsi_cmd_special_wrb(&phba->ctrl, 1);
+}
+
+/**
+ * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
+ * @phba: pointer to dev priv structure
+ * @ulp: ULP number.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
+int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct iscsi_cleanup_req_v1 *req_v1;
+ struct iscsi_cleanup_req *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
+
+ /**
+ * TODO: Check with FW folks the chute value to be set.
+ * For now, use the ULP_MASK as the chute value.
+ */
+ if (is_chip_be2_be3r(phba)) {
+ req->chute = (1 << ulp);
+ req->hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
+ req->data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
+ } else {
+ req_v1 = (struct iscsi_cleanup_req_v1 *)req;
+ req_v1->hdr.version = 1;
+ req_v1->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba,
+ ulp));
+ req_v1->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba,
+ ulp));
+ }
+
+ status = be_mbox_notify(ctrl);
+ if (status)
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BG_%d : %s failed %d\n", __func__, ulp);
+ mutex_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+/*
+ * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
+ * @phba: Driver priv structure
+ *
+ * Read registers linked to UE and check for the UE status
+ **/
+int beiscsi_detect_ue(struct beiscsi_hba *phba)
+{
+ uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
+ uint32_t ue_hi = 0, ue_lo = 0;
+ uint8_t i = 0;
+ int ret = 0;
+
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_LOW, &ue_lo);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_MASK_LOW,
+ &ue_mask_lo);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_HIGH,
+ &ue_hi);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_MASK_HI,
+ &ue_mask_hi);
+
+ ue_lo = (ue_lo & ~ue_mask_lo);
+ ue_hi = (ue_hi & ~ue_mask_hi);
+
+
+ if (ue_lo || ue_hi) {
+ set_bit(BEISCSI_HBA_IN_UE, &phba->state);
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : HBA error detected\n");
+ ret = 1;
+ }
+
+ if (ue_lo) {
+ for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+ if (ue_lo & 1)
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : UE_LOW %s bit set\n",
+ desc_ue_status_low[i]);
+ }
+ }
+
+ if (ue_hi) {
+ for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+ if (ue_hi & 1)
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : UE_HIGH %s bit set\n",
+ desc_ue_status_hi[i]);
+ }
+ }
+ return ret;
+}
+
+/*
+ * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
+ * @phba: Driver priv structure
+ *
+ * Read SLIPORT SEMAPHORE register to check for UER
+ *
+ **/
+int beiscsi_detect_tpe(struct beiscsi_hba *phba)
+{
+ u32 post, status;
+ int ret = 0;
+
+ post = beiscsi_get_post_stage(phba);
+ status = post & POST_STAGE_MASK;
+ if ((status & POST_ERR_RECOVERY_CODE_MASK) ==
+ POST_STAGE_RECOVERABLE_ERR) {
+ set_bit(BEISCSI_HBA_IN_TPE, &phba->state);
+ __beiscsi_log(phba, KERN_INFO,
+ "BC_%d : HBA error recoverable: 0x%x\n", post);
+ ret = 1;
+ } else {
+ __beiscsi_log(phba, KERN_INFO,
+ "BC_%d : HBA in UE: 0x%x\n", post);
+ }
+
+ return ret;
+}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index deeb951e6874..328fb5b973cd 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
*
* Emulex
* 3333 Susan Street
@@ -57,6 +57,7 @@ struct be_mcc_wrb {
#define MCC_STATUS_ILLEGAL_REQUEST 0x2
#define MCC_STATUS_ILLEGAL_FIELD 0x3
#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
+#define MCC_STATUS_INVALID_LENGTH 0x74
#define CQE_STATUS_COMPL_MASK 0xFFFF
#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
@@ -97,11 +98,23 @@ struct be_mcc_compl {
#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
-/********** MPU semphore ******************/
-#define MPU_EP_SEMAPHORE_OFFSET 0xac
-#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
-#define EP_SEMAPHORE_POST_ERR_MASK 0x1
-#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+/********** MPU semphore: used for SH & BE ******************/
+#define SLIPORT_SOFTRESET_OFFSET 0x5c /* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
+#define POST_STAGE_MASK 0x0000FFFF
+#define POST_ERROR_BIT 0x80000000
+#define POST_ERR_RECOVERY_CODE_MASK 0xF000
+
+/* Soft Reset register masks */
+#define SLIPORT_SOFTRESET_SR_MASK 0x00000080 /* SR bit */
+
+/* MPU semphore POST stage values */
+#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
+#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
+#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
+#define POST_STAGE_ARMFW_RDY 0xC000 /* FW is done with POST */
+#define POST_STAGE_RECOVERABLE_ERR 0xE000 /* Recoverable err detected */
/********** MCC door bell ************/
#define DB_MCCQ_OFFSET 0x140
@@ -109,9 +122,6 @@ struct be_mcc_compl {
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */
-/* MPU semphore POST stage values */
-#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
-
/**
* When the async bit of mcc_compl is set, the last 4 bytes of
* mcc_compl is interpreted as follows:
@@ -217,6 +227,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
#define OPCODE_COMMON_FUNCTION_RESET 61
#define OPCODE_COMMON_GET_PORT_NAME 77
+#define OPCODE_COMMON_SET_FEATURES 191
/**
* LIST of opcodes that are common between Initiator and Target
@@ -345,8 +356,8 @@ struct be_cmd_req_logout_fw_sess {
struct be_cmd_resp_logout_fw_sess {
struct be_cmd_resp_hdr hdr; /* dw[4] */
-#define BEISCSI_MGMT_SESSION_CLOSE 0x20
uint32_t session_status;
+#define BE_SESS_STATUS_CLOSE 0x20
} __packed;
struct mgmt_conn_login_options {
@@ -365,6 +376,14 @@ struct ip_addr_format {
u16 size_of_structure;
u8 reserved;
u8 ip_type;
+#define BEISCSI_IP_TYPE_V4 0x1
+#define BEISCSI_IP_TYPE_STATIC_V4 0x3
+#define BEISCSI_IP_TYPE_DHCP_V4 0x5
+/* type v4 values < type v6 values */
+#define BEISCSI_IP_TYPE_V6 0x10
+#define BEISCSI_IP_TYPE_ROUTABLE_V6 0x30
+#define BEISCSI_IP_TYPE_LINK_LOCAL_V6 0x50
+#define BEISCSI_IP_TYPE_AUTO_V6 0x90
u8 addr[16];
u32 rsvd0;
} __packed;
@@ -430,8 +449,13 @@ struct be_cmd_get_boot_target_req {
struct be_cmd_get_boot_target_resp {
struct be_cmd_resp_hdr hdr;
- u32 boot_session_count;
- int boot_session_handle;
+ u32 boot_session_count;
+ u32 boot_session_handle;
+/**
+ * FW returns 0xffffffff if it couldn't establish connection with
+ * configured boot target.
+ */
+#define BE_BOOT_INVALID_SHANDLE 0xffffffff
};
struct be_cmd_reopen_session_req {
@@ -699,16 +723,59 @@ struct be_cmd_get_nic_conf_resp {
u8 mac_address[ETH_ALEN];
} __packed;
-#define BEISCSI_ALIAS_LEN 32
+/******************** Get HBA NAME *******************/
struct be_cmd_hba_name {
struct be_cmd_req_hdr hdr;
u16 flags;
u16 rsvd0;
u8 initiator_name[ISCSI_NAME_LEN];
- u8 initiator_alias[BEISCSI_ALIAS_LEN];
+#define BE_INI_ALIAS_LEN 32
+ u8 initiator_alias[BE_INI_ALIAS_LEN];
} __packed;
+/******************** COMMON SET Features *******************/
+#define BE_CMD_SET_FEATURE_UER 0x10
+#define BE_CMD_UER_SUPP_BIT 0x1
+struct be_uer_req {
+ u32 uer;
+ u32 rsvd;
+};
+
+struct be_uer_resp {
+ u32 uer;
+ u16 ue2rp;
+ u16 ue2sr;
+};
+
+struct be_cmd_set_features {
+ union {
+ struct be_cmd_req_hdr req_hdr;
+ struct be_cmd_resp_hdr resp_hdr;
+ } h;
+ u32 feature;
+ u32 param_len;
+ union {
+ struct be_uer_req req;
+ struct be_uer_resp resp;
+ u32 rsvd[2];
+ } param;
+} __packed;
+
+int beiscsi_cmd_function_reset(struct beiscsi_hba *phba);
+
+int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load);
+
+int beiscsi_check_fw_rdy(struct beiscsi_hba *phba);
+
+int beiscsi_init_sliport(struct beiscsi_hba *phba);
+
+int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num);
+
+int beiscsi_detect_ue(struct beiscsi_hba *phba);
+
+int beiscsi_detect_tpe(struct beiscsi_hba *phba);
+
int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay);
@@ -723,24 +790,21 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
struct be_queue_info *mccq,
struct be_queue_info *cq);
-int be_poll_mcc(struct be_ctrl_info *ctrl);
-int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
- struct beiscsi_hba *phba);
unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag);
-int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
+int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
int num);
int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
- uint32_t tag, struct be_mcc_wrb **wrb,
+ unsigned int tag,
+ struct be_mcc_wrb **wrb,
struct be_dma_mem *mbx_cmd_mem);
-/*ISCSI Functuions */
-int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
-int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
-
+int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
+ unsigned int tag,
+ struct be_mcc_wrb **wrb,
+ struct be_dma_mem *mbx_cmd_mem);
struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
-int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag);
void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag);
struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
unsigned int *ref_tag);
@@ -749,9 +813,6 @@ void beiscsi_process_async_event(struct beiscsi_hba *phba,
int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl);
-
-int be_mbox_notify(struct be_ctrl_info *ctrl);
-
int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
struct be_queue_info *cq,
struct be_queue_info *dq, int length,
@@ -767,8 +828,6 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem, u32 page_offset,
u32 num_pages);
-int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
-
int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
struct be_queue_info *wrbq,
struct hwi_wrb_context *pwrb_context,
@@ -777,6 +836,15 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
/* Configuration Functions */
int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
+int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba);
+
+int beiscsi_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
+
+int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
+
+int beiscsi_set_uer_feature(struct beiscsi_hba *phba);
+
struct be_default_pdu_context {
u32 dw[4];
} __packed;
@@ -999,7 +1067,16 @@ struct iscsi_cleanup_req {
u16 chute;
u8 hdr_ring_id;
u8 data_ring_id;
+} __packed;
+struct iscsi_cleanup_req_v1 {
+ struct be_cmd_req_hdr hdr;
+ u16 chute;
+ u16 rsvd1;
+ u16 hdr_ring_id;
+ u16 rsvd2;
+ u16 data_ring_id;
+ u16 rsvd3;
} __packed;
struct eq_delay {
@@ -1368,14 +1445,9 @@ struct be_cmd_get_port_name {
* the cxn
*/
-int beiscsi_pci_soft_reset(struct beiscsi_hba *phba);
-int be_chk_reset_complete(struct beiscsi_hba *phba);
-
void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
bool embedded, u8 sge_cnt);
void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
u8 subsystem, u8 opcode, int cmd_len);
-
-void beiscsi_fail_session(struct iscsi_cls_session *cls_session);
#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 09f89a3eaa87..ba258217614e 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
*
* Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
*
* Emulex
* 3333 Susan Street
@@ -52,22 +52,20 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
if (!ep) {
- printk(KERN_ERR
- "beiscsi_session_create: invalid ep\n");
+ pr_err("beiscsi_session_create: invalid ep\n");
return NULL;
}
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
- if (phba->state & BE_ADAPTER_PCI_ERR) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : PCI_ERROR Recovery\n");
- return NULL;
- } else {
+ if (!beiscsi_hba_is_online(phba)) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : In beiscsi_session_create\n");
+ "BS_%d : HBA in error 0x%lx\n", phba->state);
+ return NULL;
}
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_session_create\n");
if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Cannot handle %d cmds."
@@ -120,6 +118,16 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
}
/**
+ * beiscsi_session_fail(): Closing session with appropriate error
+ * @cls_session: ptr to session
+ **/
+void beiscsi_session_fail(struct iscsi_cls_session *cls_session)
+{
+ iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+
+/**
* beiscsi_conn_create - create an instance of iscsi connection
* @cls_session: ptr to iscsi_cls_session
* @cid: iscsi cid
@@ -237,7 +245,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
}
-static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
+static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
{
if (phba->ipv4_iface)
return 0;
@@ -256,7 +264,7 @@ static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
return 0;
}
-static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
+static int beiscsi_iface_create_ipv6(struct beiscsi_hba *phba)
{
if (phba->ipv6_iface)
return 0;
@@ -275,79 +283,31 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
return 0;
}
-void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
+void beiscsi_iface_create_default(struct beiscsi_hba *phba)
{
struct be_cmd_get_if_info_resp *if_info;
- if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) {
- beiscsi_create_ipv4_iface(phba);
+ if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V4, &if_info)) {
+ beiscsi_iface_create_ipv4(phba);
kfree(if_info);
}
- if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) {
- beiscsi_create_ipv6_iface(phba);
+ if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V6, &if_info)) {
+ beiscsi_iface_create_ipv6(phba);
kfree(if_info);
}
}
-void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
+void beiscsi_iface_destroy_default(struct beiscsi_hba *phba)
{
- if (phba->ipv6_iface)
+ if (phba->ipv6_iface) {
iscsi_destroy_iface(phba->ipv6_iface);
- if (phba->ipv4_iface)
- iscsi_destroy_iface(phba->ipv4_iface);
-}
-
-static int
-beiscsi_set_static_ip(struct Scsi_Host *shost,
- struct iscsi_iface_param_info *iface_param,
- void *data, uint32_t dt_len)
-{
- struct beiscsi_hba *phba = iscsi_host_priv(shost);
- struct iscsi_iface_param_info *iface_ip = NULL;
- struct iscsi_iface_param_info *iface_subnet = NULL;
- struct nlattr *nla;
- int ret;
-
-
- switch (iface_param->param) {
- case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
- nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
- if (nla)
- iface_ip = nla_data(nla);
-
- nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
- if (nla)
- iface_subnet = nla_data(nla);
- break;
- case ISCSI_NET_PARAM_IPV4_ADDR:
- iface_ip = iface_param;
- nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
- if (nla)
- iface_subnet = nla_data(nla);
- break;
- case ISCSI_NET_PARAM_IPV4_SUBNET:
- iface_subnet = iface_param;
- nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
- if (nla)
- iface_ip = nla_data(nla);
- break;
- default:
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Unsupported param %d\n",
- iface_param->param);
+ phba->ipv6_iface = NULL;
}
-
- if (!iface_ip || !iface_subnet) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : IP and Subnet Mask required\n");
- return -EINVAL;
+ if (phba->ipv4_iface) {
+ iscsi_destroy_iface(phba->ipv4_iface);
+ phba->ipv4_iface = NULL;
}
-
- ret = mgmt_set_ip(phba, iface_ip, iface_subnet,
- ISCSI_BOOTPROTO_STATIC);
-
- return ret;
}
/**
@@ -363,137 +323,141 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
* Failure: Non-Zero Value
**/
static int
-beiscsi_set_vlan_tag(struct Scsi_Host *shost,
- struct iscsi_iface_param_info *iface_param)
+beiscsi_iface_config_vlan(struct Scsi_Host *shost,
+ struct iscsi_iface_param_info *iface_param)
{
struct beiscsi_hba *phba = iscsi_host_priv(shost);
- int ret;
-
- /* Get the Interface Handle */
- ret = mgmt_get_all_if_id(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Getting Interface Handle Failed\n");
- return ret;
- }
+ int ret = -EPERM;
switch (iface_param->param) {
case ISCSI_NET_PARAM_VLAN_ENABLED:
+ ret = 0;
if (iface_param->value[0] != ISCSI_VLAN_ENABLE)
- ret = mgmt_set_vlan(phba, BEISCSI_VLAN_DISABLE);
+ ret = beiscsi_if_set_vlan(phba, BEISCSI_VLAN_DISABLE);
break;
case ISCSI_NET_PARAM_VLAN_TAG:
- ret = mgmt_set_vlan(phba,
- *((uint16_t *)iface_param->value));
+ ret = beiscsi_if_set_vlan(phba,
+ *((uint16_t *)iface_param->value));
break;
- default:
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
- "BS_%d : Unknown Param Type : %d\n",
- iface_param->param);
- return -ENOSYS;
}
return ret;
}
static int
-beiscsi_set_ipv4(struct Scsi_Host *shost,
- struct iscsi_iface_param_info *iface_param,
- void *data, uint32_t dt_len)
+beiscsi_iface_config_ipv4(struct Scsi_Host *shost,
+ struct iscsi_iface_param_info *info,
+ void *data, uint32_t dt_len)
{
struct beiscsi_hba *phba = iscsi_host_priv(shost);
- int ret = 0;
+ u8 *ip = NULL, *subnet = NULL, *gw;
+ struct nlattr *nla;
+ int ret = -EPERM;
/* Check the param */
- switch (iface_param->param) {
+ switch (info->param) {
+ case ISCSI_NET_PARAM_IFACE_ENABLE:
+ if (info->value[0] == ISCSI_IFACE_ENABLE)
+ ret = beiscsi_iface_create_ipv4(phba);
+ else {
+ iscsi_destroy_iface(phba->ipv4_iface);
+ phba->ipv4_iface = NULL;
+ }
+ break;
case ISCSI_NET_PARAM_IPV4_GW:
- ret = mgmt_set_gateway(phba, iface_param);
+ gw = info->value;
+ ret = beiscsi_if_set_gw(phba, BEISCSI_IP_TYPE_V4, gw);
break;
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
- if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
- ret = mgmt_set_ip(phba, iface_param,
- NULL, ISCSI_BOOTPROTO_DHCP);
- else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
- ret = beiscsi_set_static_ip(shost, iface_param,
- data, dt_len);
+ if (info->value[0] == ISCSI_BOOTPROTO_DHCP)
+ ret = beiscsi_if_en_dhcp(phba, BEISCSI_IP_TYPE_V4);
+ else if (info->value[0] == ISCSI_BOOTPROTO_STATIC)
+ /* release DHCP IP address */
+ ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
+ NULL, NULL);
else
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Invalid BOOTPROTO: %d\n",
- iface_param->value[0]);
+ info->value[0]);
break;
- case ISCSI_NET_PARAM_IFACE_ENABLE:
- if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
- ret = beiscsi_create_ipv4_iface(phba);
- else
- iscsi_destroy_iface(phba->ipv4_iface);
- break;
- case ISCSI_NET_PARAM_IPV4_SUBNET:
case ISCSI_NET_PARAM_IPV4_ADDR:
- ret = beiscsi_set_static_ip(shost, iface_param,
- data, dt_len);
+ ip = info->value;
+ nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
+ if (nla) {
+ info = nla_data(nla);
+ subnet = info->value;
+ }
+ ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
+ ip, subnet);
break;
- case ISCSI_NET_PARAM_VLAN_ENABLED:
- case ISCSI_NET_PARAM_VLAN_TAG:
- ret = beiscsi_set_vlan_tag(shost, iface_param);
+ case ISCSI_NET_PARAM_IPV4_SUBNET:
+ /*
+ * OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR ioctl needs IP
+ * and subnet both. Find IP to be applied for this subnet.
+ */
+ subnet = info->value;
+ nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
+ if (nla) {
+ info = nla_data(nla);
+ ip = info->value;
+ }
+ ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
+ ip, subnet);
break;
- default:
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Param %d not supported\n",
- iface_param->param);
}
return ret;
}
static int
-beiscsi_set_ipv6(struct Scsi_Host *shost,
- struct iscsi_iface_param_info *iface_param,
- void *data, uint32_t dt_len)
+beiscsi_iface_config_ipv6(struct Scsi_Host *shost,
+ struct iscsi_iface_param_info *iface_param,
+ void *data, uint32_t dt_len)
{
struct beiscsi_hba *phba = iscsi_host_priv(shost);
- int ret = 0;
+ int ret = -EPERM;
switch (iface_param->param) {
case ISCSI_NET_PARAM_IFACE_ENABLE:
if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
- ret = beiscsi_create_ipv6_iface(phba);
+ ret = beiscsi_iface_create_ipv6(phba);
else {
iscsi_destroy_iface(phba->ipv6_iface);
- ret = 0;
+ phba->ipv6_iface = NULL;
}
break;
case ISCSI_NET_PARAM_IPV6_ADDR:
- ret = mgmt_set_ip(phba, iface_param, NULL,
- ISCSI_BOOTPROTO_STATIC);
+ ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V6,
+ iface_param->value, NULL);
break;
- case ISCSI_NET_PARAM_VLAN_ENABLED:
- case ISCSI_NET_PARAM_VLAN_TAG:
- ret = beiscsi_set_vlan_tag(shost, iface_param);
- break;
- default:
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Param %d not supported\n",
- iface_param->param);
}
return ret;
}
-int be2iscsi_iface_set_param(struct Scsi_Host *shost,
- void *data, uint32_t dt_len)
+int beiscsi_iface_set_param(struct Scsi_Host *shost,
+ void *data, uint32_t dt_len)
{
struct iscsi_iface_param_info *iface_param = NULL;
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct nlattr *attrib;
uint32_t rm_len = dt_len;
- int ret = 0 ;
+ int ret;
- if (phba->state & BE_ADAPTER_PCI_ERR) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : In PCI_ERROR Recovery\n");
+ if (!beiscsi_hba_is_online(phba)) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : HBA in error 0x%lx\n", phba->state);
return -EBUSY;
}
+ /* update interface_handle */
+ ret = beiscsi_if_get_handle(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Getting Interface Handle Failed\n");
+ return ret;
+ }
+
nla_for_each_attr(attrib, data, dt_len, rm_len) {
iface_param = nla_data(attrib);
@@ -512,40 +476,58 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
return -EINVAL;
}
- switch (iface_param->iface_type) {
- case ISCSI_IFACE_TYPE_IPV4:
- ret = beiscsi_set_ipv4(shost, iface_param,
- data, dt_len);
- break;
- case ISCSI_IFACE_TYPE_IPV6:
- ret = beiscsi_set_ipv6(shost, iface_param,
- data, dt_len);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : %s.0 set param %d",
+ (iface_param->iface_type == ISCSI_IFACE_TYPE_IPV4) ?
+ "ipv4" : "ipv6", iface_param->param);
+
+ ret = -EPERM;
+ switch (iface_param->param) {
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ case ISCSI_NET_PARAM_VLAN_TAG:
+ ret = beiscsi_iface_config_vlan(shost, iface_param);
break;
default:
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Invalid iface type :%d passed\n",
- iface_param->iface_type);
- break;
+ switch (iface_param->iface_type) {
+ case ISCSI_IFACE_TYPE_IPV4:
+ ret = beiscsi_iface_config_ipv4(shost,
+ iface_param,
+ data, dt_len);
+ break;
+ case ISCSI_IFACE_TYPE_IPV6:
+ ret = beiscsi_iface_config_ipv6(shost,
+ iface_param,
+ data, dt_len);
+ break;
+ }
}
+ if (ret == -EPERM) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : %s.0 set param %d not permitted",
+ (iface_param->iface_type ==
+ ISCSI_IFACE_TYPE_IPV4) ? "ipv4" : "ipv6",
+ iface_param->param);
+ ret = 0;
+ }
if (ret)
- return ret;
+ break;
}
return ret;
}
-static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
- struct iscsi_iface *iface, int param,
- char *buf)
+static int __beiscsi_iface_get_param(struct beiscsi_hba *phba,
+ struct iscsi_iface *iface,
+ int param, char *buf)
{
struct be_cmd_get_if_info_resp *if_info;
- int len, ip_type = BE2_IPV4;
+ int len, ip_type = BEISCSI_IP_TYPE_V4;
if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
- ip_type = BE2_IPV6;
+ ip_type = BEISCSI_IP_TYPE_V6;
- len = mgmt_get_if_info(phba, ip_type, &if_info);
+ len = beiscsi_if_get_info(phba, ip_type, &if_info);
if (len)
return len;
@@ -567,24 +549,24 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
break;
case ISCSI_NET_PARAM_VLAN_ENABLED:
len = sprintf(buf, "%s\n",
- (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
- ? "Disabled\n" : "Enabled\n");
+ (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) ?
+ "disable" : "enable");
break;
case ISCSI_NET_PARAM_VLAN_ID:
if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
len = -EINVAL;
else
len = sprintf(buf, "%d\n",
- (if_info->vlan_priority &
- ISCSI_MAX_VLAN_ID));
+ (if_info->vlan_priority &
+ ISCSI_MAX_VLAN_ID));
break;
case ISCSI_NET_PARAM_VLAN_PRIORITY:
if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
len = -EINVAL;
else
len = sprintf(buf, "%d\n",
- ((if_info->vlan_priority >> 13) &
- ISCSI_MAX_VLAN_PRIORITY));
+ ((if_info->vlan_priority >> 13) &
+ ISCSI_MAX_VLAN_PRIORITY));
break;
default:
WARN_ON(1);
@@ -594,18 +576,20 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
return len;
}
-int be2iscsi_iface_get_param(struct iscsi_iface *iface,
- enum iscsi_param_type param_type,
- int param, char *buf)
+int beiscsi_iface_get_param(struct iscsi_iface *iface,
+ enum iscsi_param_type param_type,
+ int param, char *buf)
{
struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct be_cmd_get_def_gateway_resp gateway;
- int len = -ENOSYS;
+ int len = -EPERM;
- if (phba->state & BE_ADAPTER_PCI_ERR) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : In PCI_ERROR Recovery\n");
+ if (param_type != ISCSI_NET_PARAM)
+ return 0;
+ if (!beiscsi_hba_is_online(phba)) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : HBA in error 0x%lx\n", phba->state);
return -EBUSY;
}
@@ -617,19 +601,22 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
case ISCSI_NET_PARAM_VLAN_ENABLED:
case ISCSI_NET_PARAM_VLAN_ID:
case ISCSI_NET_PARAM_VLAN_PRIORITY:
- len = be2iscsi_get_if_param(phba, iface, param, buf);
+ len = __beiscsi_iface_get_param(phba, iface, param, buf);
break;
case ISCSI_NET_PARAM_IFACE_ENABLE:
- len = sprintf(buf, "enabled\n");
+ if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+ len = sprintf(buf, "%s\n",
+ phba->ipv4_iface ? "enable" : "disable");
+ else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+ len = sprintf(buf, "%s\n",
+ phba->ipv6_iface ? "enable" : "disable");
break;
case ISCSI_NET_PARAM_IPV4_GW:
memset(&gateway, 0, sizeof(gateway));
- len = mgmt_get_gateway(phba, BE2_IPV4, &gateway);
+ len = beiscsi_if_get_gw(phba, BEISCSI_IP_TYPE_V4, &gateway);
if (!len)
len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr);
break;
- default:
- len = -ENOSYS;
}
return len;
@@ -647,7 +634,7 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf)
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
- int len = 0;
+ int len;
beiscsi_log(beiscsi_ep->phba, KERN_INFO,
BEISCSI_LOG_CONFIG,
@@ -659,13 +646,13 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport);
break;
case ISCSI_PARAM_CONN_ADDRESS:
- if (beiscsi_ep->ip_type == BE2_IPV4)
+ if (beiscsi_ep->ip_type == BEISCSI_IP_TYPE_V4)
len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr);
else
len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
break;
default:
- return -ENOSYS;
+ len = -EPERM;
}
return len;
}
@@ -758,7 +745,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
- ihost->port_state = (phba->state & BE_ADAPTER_LINK_UP) ?
+ ihost->port_state = test_bit(BEISCSI_HBA_LINK_UP, &phba->state) ?
ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
}
@@ -810,16 +797,13 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
struct beiscsi_hba *phba = iscsi_host_priv(shost);
int status = 0;
-
- if (phba->state & BE_ADAPTER_PCI_ERR) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : In PCI_ERROR Recovery\n");
- return -EBUSY;
- } else {
+ if (!beiscsi_hba_is_online(phba)) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : In beiscsi_get_host_param,"
- " param = %d\n", param);
+ "BS_%d : HBA in error 0x%lx\n", phba->state);
+ return -EBUSY;
}
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_get_host_param, param = %d\n", param);
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
@@ -961,15 +945,13 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
- if (phba->state & BE_ADAPTER_PCI_ERR) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : In PCI_ERROR Recovery\n");
+ if (!beiscsi_hba_is_online(phba)) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : HBA in error 0x%lx\n", phba->state);
return -EBUSY;
- } else {
- beiscsi_log(beiscsi_conn->phba, KERN_INFO,
- BEISCSI_LOG_CONFIG,
- "BS_%d : In beiscsi_conn_start\n");
}
+ beiscsi_log(beiscsi_conn->phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_start\n");
memset(&params, 0, sizeof(struct beiscsi_offload_params));
beiscsi_ep = beiscsi_conn->ep;
@@ -1186,28 +1168,20 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
struct iscsi_endpoint *ep;
int ret;
- if (shost)
- phba = iscsi_host_priv(shost);
- else {
+ if (!shost) {
ret = -ENXIO;
- printk(KERN_ERR
- "beiscsi_ep_connect shost is NULL\n");
+ pr_err("beiscsi_ep_connect shost is NULL\n");
return ERR_PTR(ret);
}
- if (beiscsi_error(phba)) {
+ phba = iscsi_host_priv(shost);
+ if (!beiscsi_hba_is_online(phba)) {
ret = -EIO;
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
- "BS_%d : The FW state Not Stable!!!\n");
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : HBA in error 0x%lx\n", phba->state);
return ERR_PTR(ret);
}
-
- if (phba->state & BE_ADAPTER_PCI_ERR) {
- ret = -EBUSY;
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : In PCI_ERROR Recovery\n");
- return ERR_PTR(ret);
- } else if (phba->state & BE_ADAPTER_LINK_DOWN) {
+ if (!test_bit(BEISCSI_HBA_LINK_UP, &phba->state)) {
ret = -EBUSY;
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BS_%d : The Adapter Port state is Down!!!\n");
@@ -1361,9 +1335,9 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
}
- if (phba->state & BE_ADAPTER_PCI_ERR) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : PCI_ERROR Recovery\n");
+ if (!beiscsi_hba_is_online(phba)) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : HBA in error 0x%lx\n", phba->state);
goto free_ep;
}
@@ -1386,7 +1360,7 @@ free_ep:
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
}
-umode_t be2iscsi_attr_is_visible(int param_type, int param)
+umode_t beiscsi_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_NET_PARAM:
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 0c84e1c0763a..e4d67dfea4cb 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
*
* Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
*
* Avago Technologies
* 3333 Susan Street
@@ -23,25 +23,18 @@
#include "be_main.h"
#include "be_mgmt.h"
-#define BE2_IPV4 0x1
-#define BE2_IPV6 0x10
-#define BE2_DHCP_V4 0x05
+void beiscsi_iface_create_default(struct beiscsi_hba *phba);
-#define NON_BLOCKING 0x0
-#define BLOCKING 0x1
+void beiscsi_iface_destroy_default(struct beiscsi_hba *phba);
-void beiscsi_create_def_ifaces(struct beiscsi_hba *phba);
-
-void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba);
-
-int be2iscsi_iface_get_param(struct iscsi_iface *iface,
+int beiscsi_iface_get_param(struct iscsi_iface *iface,
enum iscsi_param_type param_type,
int param, char *buf);
-int be2iscsi_iface_set_param(struct Scsi_Host *shost,
+int beiscsi_iface_set_param(struct Scsi_Host *shost,
void *data, uint32_t count);
-umode_t be2iscsi_attr_is_visible(int param_type, int param);
+umode_t beiscsi_attr_is_visible(int param_type, int param);
void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_offload_params *params);
@@ -57,6 +50,8 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
void beiscsi_session_destroy(struct iscsi_cls_session *cls_session);
+void beiscsi_session_fail(struct iscsi_cls_session *cls_session);
+
struct iscsi_cls_conn *beiscsi_conn_create(struct iscsi_cls_session
*cls_session, uint32_t cid);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index f05e7737107d..6a6906f847db 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
*
* Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
*
* Emulex
* 3333 Susan Street
@@ -374,170 +374,6 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
return iscsi_eh_device_reset(sc);
}
-static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
-{
- struct beiscsi_hba *phba = data;
- struct mgmt_session_info *boot_sess = &phba->boot_sess;
- struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
- char *str = buf;
- int rc;
-
- switch (type) {
- case ISCSI_BOOT_TGT_NAME:
- rc = sprintf(buf, "%.*s\n",
- (int)strlen(boot_sess->target_name),
- (char *)&boot_sess->target_name);
- break;
- case ISCSI_BOOT_TGT_IP_ADDR:
- if (boot_conn->dest_ipaddr.ip_type == 0x1)
- rc = sprintf(buf, "%pI4\n",
- (char *)&boot_conn->dest_ipaddr.addr);
- else
- rc = sprintf(str, "%pI6\n",
- (char *)&boot_conn->dest_ipaddr.addr);
- break;
- case ISCSI_BOOT_TGT_PORT:
- rc = sprintf(str, "%d\n", boot_conn->dest_port);
- break;
-
- case ISCSI_BOOT_TGT_CHAP_NAME:
- rc = sprintf(str, "%.*s\n",
- boot_conn->negotiated_login_options.auth_data.chap.
- target_chap_name_length,
- (char *)&boot_conn->negotiated_login_options.
- auth_data.chap.target_chap_name);
- break;
- case ISCSI_BOOT_TGT_CHAP_SECRET:
- rc = sprintf(str, "%.*s\n",
- boot_conn->negotiated_login_options.auth_data.chap.
- target_secret_length,
- (char *)&boot_conn->negotiated_login_options.
- auth_data.chap.target_secret);
- break;
- case ISCSI_BOOT_TGT_REV_CHAP_NAME:
- rc = sprintf(str, "%.*s\n",
- boot_conn->negotiated_login_options.auth_data.chap.
- intr_chap_name_length,
- (char *)&boot_conn->negotiated_login_options.
- auth_data.chap.intr_chap_name);
- break;
- case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
- rc = sprintf(str, "%.*s\n",
- boot_conn->negotiated_login_options.auth_data.chap.
- intr_secret_length,
- (char *)&boot_conn->negotiated_login_options.
- auth_data.chap.intr_secret);
- break;
- case ISCSI_BOOT_TGT_FLAGS:
- rc = sprintf(str, "2\n");
- break;
- case ISCSI_BOOT_TGT_NIC_ASSOC:
- rc = sprintf(str, "0\n");
- break;
- default:
- rc = -ENOSYS;
- break;
- }
- return rc;
-}
-
-static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
-{
- struct beiscsi_hba *phba = data;
- char *str = buf;
- int rc;
-
- switch (type) {
- case ISCSI_BOOT_INI_INITIATOR_NAME:
- rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
- break;
- default:
- rc = -ENOSYS;
- break;
- }
- return rc;
-}
-
-static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
-{
- struct beiscsi_hba *phba = data;
- char *str = buf;
- int rc;
-
- switch (type) {
- case ISCSI_BOOT_ETH_FLAGS:
- rc = sprintf(str, "2\n");
- break;
- case ISCSI_BOOT_ETH_INDEX:
- rc = sprintf(str, "0\n");
- break;
- case ISCSI_BOOT_ETH_MAC:
- rc = beiscsi_get_macaddr(str, phba);
- break;
- default:
- rc = -ENOSYS;
- break;
- }
- return rc;
-}
-
-
-static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
-{
- umode_t rc;
-
- switch (type) {
- case ISCSI_BOOT_TGT_NAME:
- case ISCSI_BOOT_TGT_IP_ADDR:
- case ISCSI_BOOT_TGT_PORT:
- case ISCSI_BOOT_TGT_CHAP_NAME:
- case ISCSI_BOOT_TGT_CHAP_SECRET:
- case ISCSI_BOOT_TGT_REV_CHAP_NAME:
- case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
- case ISCSI_BOOT_TGT_NIC_ASSOC:
- case ISCSI_BOOT_TGT_FLAGS:
- rc = S_IRUGO;
- break;
- default:
- rc = 0;
- break;
- }
- return rc;
-}
-
-static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
-{
- umode_t rc;
-
- switch (type) {
- case ISCSI_BOOT_INI_INITIATOR_NAME:
- rc = S_IRUGO;
- break;
- default:
- rc = 0;
- break;
- }
- return rc;
-}
-
-
-static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
-{
- umode_t rc;
-
- switch (type) {
- case ISCSI_BOOT_ETH_FLAGS:
- case ISCSI_BOOT_ETH_MAC:
- case ISCSI_BOOT_ETH_INDEX:
- rc = S_IRUGO;
- break;
- default:
- rc = 0;
- break;
- }
- return rc;
-}
-
/*------------------- PCI Driver operations and data ----------------- */
static const struct pci_device_id beiscsi_pci_id_table[] = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -850,12 +686,11 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba,
static irqreturn_t be_isr_mcc(int irq, void *dev_id)
{
struct beiscsi_hba *phba;
- struct be_eq_entry *eqe = NULL;
+ struct be_eq_entry *eqe;
struct be_queue_info *eq;
struct be_queue_info *mcc;
- unsigned int num_eq_processed;
+ unsigned int mcc_events;
struct be_eq_obj *pbe_eq;
- unsigned long flags;
pbe_eq = dev_id;
eq = &pbe_eq->q;
@@ -863,27 +698,23 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
mcc = &phba->ctrl.mcc_obj.cq;
eqe = queue_tail_node(eq);
- num_eq_processed = 0;
-
+ mcc_events = 0;
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
& EQE_VALID_MASK) {
if (((eqe->dw[offsetof(struct amap_eq_entry,
resource_id) / 32] &
EQE_RESID_MASK) >> 16) == mcc->id) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_mcc_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
+ mcc_events++;
}
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
queue_tail_inc(eq);
eqe = queue_tail_node(eq);
- num_eq_processed++;
}
- if (pbe_eq->todo_mcc_cq)
- queue_work(phba->wq, &pbe_eq->work_cqs);
- if (num_eq_processed)
- hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
+ if (mcc_events) {
+ queue_work(phba->wq, &pbe_eq->mcc_work);
+ hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1);
+ }
return IRQ_HANDLED;
}
@@ -902,7 +733,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
eq = &pbe_eq->q;
phba = pbe_eq->phba;
-
/* disable interrupt till iopoll completes */
hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1);
irq_poll_sched(&pbe_eq->iopoll);
@@ -920,14 +750,13 @@ static irqreturn_t be_isr(int irq, void *dev_id)
struct beiscsi_hba *phba;
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
- struct be_eq_entry *eqe = NULL;
+ struct be_eq_entry *eqe;
struct be_queue_info *eq;
struct be_queue_info *mcc;
- unsigned long flags, index;
- unsigned int num_mcceq_processed, num_ioeq_processed;
+ unsigned int mcc_events, io_events;
struct be_ctrl_info *ctrl;
struct be_eq_obj *pbe_eq;
- int isr;
+ int isr, rearm;
phba = dev_id;
ctrl = &phba->ctrl;
@@ -942,44 +771,35 @@ static irqreturn_t be_isr(int irq, void *dev_id)
eq = &phwi_context->be_eq[0].q;
mcc = &phba->ctrl.mcc_obj.cq;
- index = 0;
eqe = queue_tail_node(eq);
- num_ioeq_processed = 0;
- num_mcceq_processed = 0;
+ io_events = 0;
+ mcc_events = 0;
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
& EQE_VALID_MASK) {
if (((eqe->dw[offsetof(struct amap_eq_entry,
- resource_id) / 32] &
- EQE_RESID_MASK) >> 16) == mcc->id) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_mcc_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- num_mcceq_processed++;
- } else {
- irq_poll_sched(&pbe_eq->iopoll);
- num_ioeq_processed++;
- }
+ resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id)
+ mcc_events++;
+ else
+ io_events++;
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
queue_tail_inc(eq);
eqe = queue_tail_node(eq);
}
- if (num_ioeq_processed || num_mcceq_processed) {
- if (pbe_eq->todo_mcc_cq)
- queue_work(phba->wq, &pbe_eq->work_cqs);
-
- if ((num_mcceq_processed) && (!num_ioeq_processed))
- hwi_ring_eq_db(phba, eq->id, 0,
- (num_ioeq_processed +
- num_mcceq_processed) , 1, 1);
- else
- hwi_ring_eq_db(phba, eq->id, 0,
- (num_ioeq_processed +
- num_mcceq_processed), 0, 1);
-
- return IRQ_HANDLED;
- } else
+ if (!io_events && !mcc_events)
return IRQ_NONE;
+
+ /* no need to rearm if interrupt is only for IOs */
+ rearm = 0;
+ if (mcc_events) {
+ queue_work(phba->wq, &pbe_eq->mcc_work);
+ /* rearm for MCCQ */
+ rearm = 1;
+ }
+ if (io_events)
+ irq_poll_sched(&pbe_eq->iopoll);
+ hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1);
+ return IRQ_HANDLED;
}
@@ -1077,57 +897,6 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
iowrite32(val, phba->db_va + DB_CQ_OFFSET);
}
-static unsigned int
-beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
- struct beiscsi_hba *phba,
- struct pdu_base *ppdu,
- unsigned long pdu_len,
- void *pbuffer, unsigned long buf_len)
-{
- struct iscsi_conn *conn = beiscsi_conn->conn;
- struct iscsi_session *session = conn->session;
- struct iscsi_task *task;
- struct beiscsi_io_task *io_task;
- struct iscsi_hdr *login_hdr;
-
- switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
- PDUBASE_OPCODE_MASK) {
- case ISCSI_OP_NOOP_IN:
- pbuffer = NULL;
- buf_len = 0;
- break;
- case ISCSI_OP_ASYNC_EVENT:
- break;
- case ISCSI_OP_REJECT:
- WARN_ON(!pbuffer);
- WARN_ON(!(buf_len == 48));
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : In ISCSI_OP_REJECT\n");
- break;
- case ISCSI_OP_LOGIN_RSP:
- case ISCSI_OP_TEXT_RSP:
- task = conn->login_task;
- io_task = task->dd_data;
- login_hdr = (struct iscsi_hdr *)ppdu;
- login_hdr->itt = io_task->libiscsi_itt;
- break;
- default:
- beiscsi_log(phba, KERN_WARNING,
- BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Unrecognized opcode 0x%x in async msg\n",
- (ppdu->
- dw[offsetof(struct amap_pdu_base, opcode) / 32]
- & PDUBASE_OPCODE_MASK));
- return 1;
- }
-
- spin_lock_bh(&session->back_lock);
- __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
- spin_unlock_bh(&session->back_lock);
- return 0;
-}
-
static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
@@ -1199,6 +968,9 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
pwrb_context->alloc_index++;
spin_unlock_bh(&pwrb_context->wrb_lock);
+ if (pwrb_handle)
+ memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
+
return pwrb_handle;
}
@@ -1440,11 +1212,10 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_hba *phba, struct sol_cqe *psol)
{
struct hwi_wrb_context *pwrb_context;
- struct wrb_handle *pwrb_handle = NULL;
+ uint16_t wrb_index, cid, cri_index;
struct hwi_controller *phwi_ctrlr;
+ struct wrb_handle *pwrb_handle;
struct iscsi_task *task;
- struct beiscsi_io_task *io_task;
- uint16_t wrb_index, cid, cri_index;
phwi_ctrlr = phba->phwi_ctrlr;
if (is_chip_be2_be3r(phba)) {
@@ -1463,9 +1234,6 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
task = pwrb_handle->pio_handle;
-
- io_task = task->dd_data;
- memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
iscsi_put_task(task);
}
@@ -1614,431 +1382,428 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
spin_unlock_bh(&session->back_lock);
}
-static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
- *pasync_ctx, unsigned int is_header,
- unsigned int host_write_ptr)
+/**
+ * ASYNC PDUs include
+ * a. Unsolicited NOP-In (target initiated NOP-In)
+ * b. ASYNC Messages
+ * c. Reject PDU
+ * d. Login response
+ * These headers arrive unprocessed by the EP firmware.
+ * iSCSI layer processes them.
+ */
+static unsigned int
+beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn,
+ struct pdu_base *phdr, void *pdata, unsigned int dlen)
{
- if (is_header)
- return &pasync_ctx->async_entry[host_write_ptr].
- header_busy_list;
- else
- return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ struct beiscsi_io_task *io_task;
+ struct iscsi_hdr *login_hdr;
+ struct iscsi_task *task;
+ u8 code;
+
+ code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr);
+ switch (code) {
+ case ISCSI_OP_NOOP_IN:
+ pdata = NULL;
+ dlen = 0;
+ break;
+ case ISCSI_OP_ASYNC_EVENT:
+ break;
+ case ISCSI_OP_REJECT:
+ WARN_ON(!pdata);
+ WARN_ON(!(dlen == 48));
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : In ISCSI_OP_REJECT\n");
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+ task = conn->login_task;
+ io_task = task->dd_data;
+ login_hdr = (struct iscsi_hdr *)phdr;
+ login_hdr->itt = io_task->libiscsi_itt;
+ break;
+ default:
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : unrecognized async PDU opcode 0x%x\n",
+ code);
+ return 1;
+ }
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen);
+ return 0;
+}
+
+static inline void
+beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx,
+ struct hd_async_handle *pasync_handle)
+{
+ if (pasync_handle->is_header) {
+ list_add_tail(&pasync_handle->link,
+ &pasync_ctx->async_header.free_list);
+ pasync_ctx->async_header.free_entries++;
+ } else {
+ list_add_tail(&pasync_handle->link,
+ &pasync_ctx->async_data.free_list);
+ pasync_ctx->async_data.free_entries++;
+ }
}
-static struct async_pdu_handle *
-hwi_get_async_handle(struct beiscsi_hba *phba,
- struct beiscsi_conn *beiscsi_conn,
- struct hwi_async_pdu_context *pasync_ctx,
- struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
+static struct hd_async_handle *
+beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
+ struct hd_async_context *pasync_ctx,
+ struct i_t_dpdu_cqe *pdpdu_cqe)
{
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct hd_async_handle *pasync_handle;
struct be_bus_address phys_addr;
- struct list_head *pbusy_list;
- struct async_pdu_handle *pasync_handle = NULL;
- unsigned char is_header = 0;
- unsigned int index, dpl;
+ u8 final, error = 0;
+ u16 cid, code, ci;
+ u32 dpl;
+ cid = beiscsi_conn->beiscsi_conn_cid;
+ /**
+ * This function is invoked to get the right async_handle structure
+ * from a given DEF PDU CQ entry.
+ *
+ * - index in CQ entry gives the vertical index
+ * - address in CQ entry is the offset where the DMA last ended
+ * - final - no more notifications for this PDU
+ */
if (is_chip_be2_be3r(phba)) {
dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
dpl, pdpdu_cqe);
- index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
index, pdpdu_cqe);
+ final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ final, pdpdu_cqe);
} else {
dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
dpl, pdpdu_cqe);
- index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+ ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
index, pdpdu_cqe);
+ final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+ final, pdpdu_cqe);
}
- phys_addr.u.a32.address_lo =
- (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
- db_addr_lo) / 32] - dpl);
- phys_addr.u.a32.address_hi =
- pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
- db_addr_hi) / 32];
-
- phys_addr.u.a64.address =
- *((unsigned long long *)(&phys_addr.u.a64.address));
-
- switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
- & PDUCQE_CODE_MASK) {
+ /**
+ * DB addr Hi/Lo is same for BE and SKH.
+ * Subtract the dataplacementlength to get to the base.
+ */
+ phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ db_addr_lo, pdpdu_cqe);
+ phys_addr.u.a32.address_lo -= dpl;
+ phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ db_addr_hi, pdpdu_cqe);
+
+ code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe);
+ switch (code) {
case UNSOL_HDR_NOTIFY:
- is_header = 1;
-
- pbusy_list = hwi_get_async_busy_list(pasync_ctx,
- is_header, index);
+ pasync_handle = pasync_ctx->async_entry[ci].header;
break;
+ case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
+ error = 1;
case UNSOL_DATA_NOTIFY:
- pbusy_list = hwi_get_async_busy_list(pasync_ctx,
- is_header, index);
+ pasync_handle = pasync_ctx->async_entry[ci].data;
break;
+ /* called only for above codes */
default:
- pbusy_list = NULL;
- beiscsi_log(phba, KERN_WARNING,
- BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Unexpected code=%d\n",
- pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
- code) / 32] & PDUCQE_CODE_MASK);
- return NULL;
+ pasync_handle = NULL;
+ break;
}
- WARN_ON(list_empty(pbusy_list));
- list_for_each_entry(pasync_handle, pbusy_list, link) {
- if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
- break;
+ if (!pasync_handle) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+ "BM_%d : cid %d async PDU handle not found - code %d ci %d addr %llx\n",
+ cid, code, ci, phys_addr.u.a64.address);
+ return pasync_handle;
}
- WARN_ON(!pasync_handle);
+ if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address ||
+ pasync_handle->index != ci) {
+ /* driver bug - if ci does not match async handle index */
+ error = 1;
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+ "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n",
+ cid, pasync_handle->is_header ? 'H' : 'D',
+ pasync_handle->pa.u.a64.address,
+ pasync_handle->index,
+ phys_addr.u.a64.address, ci);
+ /* FW has stale address - attempt continuing by dropping */
+ }
- pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
- beiscsi_conn->beiscsi_conn_cid);
- pasync_handle->is_header = is_header;
+ /**
+ * Each CID is associated with unique CRI.
+ * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different.
+ **/
+ pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(cid);
+ pasync_handle->is_final = final;
pasync_handle->buffer_len = dpl;
- *pcq_index = index;
+ /* empty the slot */
+ if (pasync_handle->is_header)
+ pasync_ctx->async_entry[ci].header = NULL;
+ else
+ pasync_ctx->async_entry[ci].data = NULL;
+ /**
+ * DEF PDU header and data buffers with errors should be simply
+ * dropped as there are no consumers for it.
+ */
+ if (error) {
+ beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
+ pasync_handle = NULL;
+ }
return pasync_handle;
}
-static unsigned int
-hwi_update_async_writables(struct beiscsi_hba *phba,
- struct hwi_async_pdu_context *pasync_ctx,
- unsigned int is_header, unsigned int cq_index)
+static void
+beiscsi_hdl_purge_handles(struct beiscsi_hba *phba,
+ struct hd_async_context *pasync_ctx,
+ u16 cri)
{
- struct list_head *pbusy_list;
- struct async_pdu_handle *pasync_handle;
- unsigned int num_entries, writables = 0;
- unsigned int *pep_read_ptr, *pwritables;
-
- num_entries = pasync_ctx->num_entries;
- if (is_header) {
- pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
- pwritables = &pasync_ctx->async_header.writables;
- } else {
- pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
- pwritables = &pasync_ctx->async_data.writables;
- }
-
- while ((*pep_read_ptr) != cq_index) {
- (*pep_read_ptr)++;
- *pep_read_ptr = (*pep_read_ptr) % num_entries;
-
- pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
- *pep_read_ptr);
- if (writables == 0)
- WARN_ON(list_empty(pbusy_list));
-
- if (!list_empty(pbusy_list)) {
- pasync_handle = list_entry(pbusy_list->next,
- struct async_pdu_handle,
- link);
- WARN_ON(!pasync_handle);
- pasync_handle->consumed = 1;
- }
-
- writables++;
- }
+ struct hd_async_handle *pasync_handle, *tmp_handle;
+ struct list_head *plist;
- if (!writables) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : Duplicate notification received - index 0x%x!!\n",
- cq_index);
- WARN_ON(1);
+ plist = &pasync_ctx->async_entry[cri].wq.list;
+ list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
+ list_del(&pasync_handle->link);
+ beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
}
- *pwritables = *pwritables + writables;
- return 0;
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list);
+ pasync_ctx->async_entry[cri].wq.hdr_len = 0;
+ pasync_ctx->async_entry[cri].wq.bytes_received = 0;
+ pasync_ctx->async_entry[cri].wq.bytes_needed = 0;
}
-static void hwi_free_async_msg(struct beiscsi_hba *phba,
- struct hwi_async_pdu_context *pasync_ctx,
- unsigned int cri)
+static unsigned int
+beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn,
+ struct hd_async_context *pasync_ctx,
+ u16 cri)
{
- struct async_pdu_handle *pasync_handle, *tmp_handle;
+ struct iscsi_session *session = beiscsi_conn->conn->session;
+ struct hd_async_handle *pasync_handle, *plast_handle;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ void *phdr = NULL, *pdata = NULL;
+ u32 dlen = 0, status = 0;
struct list_head *plist;
- plist = &pasync_ctx->async_entry[cri].wait_queue.list;
- list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
- list_del(&pasync_handle->link);
-
- if (pasync_handle->is_header) {
- list_add_tail(&pasync_handle->link,
- &pasync_ctx->async_header.free_list);
- pasync_ctx->async_header.free_entries++;
- } else {
- list_add_tail(&pasync_handle->link,
- &pasync_ctx->async_data.free_list);
- pasync_ctx->async_data.free_entries++;
+ plist = &pasync_ctx->async_entry[cri].wq.list;
+ plast_handle = NULL;
+ list_for_each_entry(pasync_handle, plist, link) {
+ plast_handle = pasync_handle;
+ /* get the header, the first entry */
+ if (!phdr) {
+ phdr = pasync_handle->pbuffer;
+ continue;
+ }
+ /* use first buffer to collect all the data */
+ if (!pdata) {
+ pdata = pasync_handle->pbuffer;
+ dlen = pasync_handle->buffer_len;
+ continue;
}
+ memcpy(pdata + dlen, pasync_handle->pbuffer,
+ pasync_handle->buffer_len);
+ dlen += pasync_handle->buffer_len;
}
- INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
- pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
- pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
+ if (!plast_handle->is_final) {
+ /* last handle should have final PDU notification from FW */
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+ "BM_%d : cid %u %p fwd async PDU with last handle missing - HL%u:DN%u:DR%u\n",
+ beiscsi_conn->beiscsi_conn_cid, plast_handle,
+ pasync_ctx->async_entry[cri].wq.hdr_len,
+ pasync_ctx->async_entry[cri].wq.bytes_needed,
+ pasync_ctx->async_entry[cri].wq.bytes_received);
+ }
+ spin_lock_bh(&session->back_lock);
+ status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen);
+ spin_unlock_bh(&session->back_lock);
+ beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
+ return status;
}
-static struct phys_addr *
-hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
- unsigned int is_header, unsigned int host_write_ptr)
+static unsigned int
+beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn,
+ struct hd_async_context *pasync_ctx,
+ struct hd_async_handle *pasync_handle)
{
- struct phys_addr *pasync_sge = NULL;
+ unsigned int bytes_needed = 0, status = 0;
+ u16 cri = pasync_handle->cri;
+ struct cri_wait_queue *wq;
+ struct beiscsi_hba *phba;
+ struct pdu_base *ppdu;
+ char *err = "";
- if (is_header)
- pasync_sge = pasync_ctx->async_header.ring_base;
- else
- pasync_sge = pasync_ctx->async_data.ring_base;
+ phba = beiscsi_conn->phba;
+ wq = &pasync_ctx->async_entry[cri].wq;
+ if (pasync_handle->is_header) {
+ /* check if PDU hdr is rcv'd when old hdr not completed */
+ if (wq->hdr_len) {
+ err = "incomplete";
+ goto drop_pdu;
+ }
+ ppdu = pasync_handle->pbuffer;
+ bytes_needed = AMAP_GET_BITS(struct amap_pdu_base,
+ data_len_hi, ppdu);
+ bytes_needed <<= 16;
+ bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base,
+ data_len_lo, ppdu));
+ wq->hdr_len = pasync_handle->buffer_len;
+ wq->bytes_received = 0;
+ wq->bytes_needed = bytes_needed;
+ list_add_tail(&pasync_handle->link, &wq->list);
+ if (!bytes_needed)
+ status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
+ pasync_ctx, cri);
+ } else {
+ /* check if data received has header and is needed */
+ if (!wq->hdr_len || !wq->bytes_needed) {
+ err = "header less";
+ goto drop_pdu;
+ }
+ wq->bytes_received += pasync_handle->buffer_len;
+ /* Something got overwritten? Better catch it here. */
+ if (wq->bytes_received > wq->bytes_needed) {
+ err = "overflow";
+ goto drop_pdu;
+ }
+ list_add_tail(&pasync_handle->link, &wq->list);
+ if (wq->bytes_received == wq->bytes_needed)
+ status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
+ pasync_ctx, cri);
+ }
+ return status;
- return pasync_sge + host_write_ptr;
+drop_pdu:
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+ "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n",
+ beiscsi_conn->beiscsi_conn_cid, err,
+ pasync_handle->is_header ? 'H' : 'D',
+ wq->hdr_len, wq->bytes_needed,
+ pasync_handle->buffer_len);
+ /* discard this handle */
+ beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
+ /* free all the other handles in cri_wait_queue */
+ beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
+ /* try continuing */
+ return status;
}
-static void hwi_post_async_buffers(struct beiscsi_hba *phba,
- unsigned int is_header, uint8_t ulp_num)
+static void
+beiscsi_hdq_post_handles(struct beiscsi_hba *phba,
+ u8 header, u8 ulp_num)
{
+ struct hd_async_handle *pasync_handle, *tmp, **slot;
+ struct hd_async_context *pasync_ctx;
struct hwi_controller *phwi_ctrlr;
- struct hwi_async_pdu_context *pasync_ctx;
- struct async_pdu_handle *pasync_handle;
- struct list_head *pfree_link, *pbusy_list;
+ struct list_head *hfree_list;
struct phys_addr *pasync_sge;
- unsigned int ring_id, num_entries;
- unsigned int host_write_num, doorbell_offset;
- unsigned int writables;
- unsigned int i = 0;
- u32 doorbell = 0;
+ u32 ring_id, doorbell = 0;
+ u16 index, num_entries;
+ u32 doorbell_offset;
+ u16 prod = 0, cons;
phwi_ctrlr = phba->phwi_ctrlr;
pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
num_entries = pasync_ctx->num_entries;
-
- if (is_header) {
- writables = min(pasync_ctx->async_header.writables,
- pasync_ctx->async_header.free_entries);
- pfree_link = pasync_ctx->async_header.free_list.next;
- host_write_num = pasync_ctx->async_header.host_write_ptr;
+ if (header) {
+ cons = pasync_ctx->async_header.free_entries;
+ hfree_list = &pasync_ctx->async_header.free_list;
ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
- doorbell_offset;
+ doorbell_offset;
} else {
- writables = min(pasync_ctx->async_data.writables,
- pasync_ctx->async_data.free_entries);
- pfree_link = pasync_ctx->async_data.free_list.next;
- host_write_num = pasync_ctx->async_data.host_write_ptr;
+ cons = pasync_ctx->async_data.free_entries;
+ hfree_list = &pasync_ctx->async_data.free_list;
ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
- doorbell_offset;
+ doorbell_offset;
}
+ /* number of entries posted must be in multiples of 8 */
+ if (cons % 8)
+ return;
- writables = (writables / 8) * 8;
- if (writables) {
- for (i = 0; i < writables; i++) {
- pbusy_list =
- hwi_get_async_busy_list(pasync_ctx, is_header,
- host_write_num);
- pasync_handle =
- list_entry(pfree_link, struct async_pdu_handle,
- link);
- WARN_ON(!pasync_handle);
- pasync_handle->consumed = 0;
-
- pfree_link = pfree_link->next;
-
- pasync_sge = hwi_get_ring_address(pasync_ctx,
- is_header, host_write_num);
-
- pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
- pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
-
- list_move(&pasync_handle->link, pbusy_list);
-
- host_write_num++;
- host_write_num = host_write_num % num_entries;
- }
-
- if (is_header) {
- pasync_ctx->async_header.host_write_ptr =
- host_write_num;
- pasync_ctx->async_header.free_entries -= writables;
- pasync_ctx->async_header.writables -= writables;
- pasync_ctx->async_header.busy_entries += writables;
- } else {
- pasync_ctx->async_data.host_write_ptr = host_write_num;
- pasync_ctx->async_data.free_entries -= writables;
- pasync_ctx->async_data.writables -= writables;
- pasync_ctx->async_data.busy_entries += writables;
- }
-
- doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
- doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
- doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
- doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
- << DB_DEF_PDU_CQPROC_SHIFT;
-
- iowrite32(doorbell, phba->db_va + doorbell_offset);
- }
-}
-
-static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
- struct beiscsi_conn *beiscsi_conn,
- struct i_t_dpdu_cqe *pdpdu_cqe)
-{
- struct hwi_controller *phwi_ctrlr;
- struct hwi_async_pdu_context *pasync_ctx;
- struct async_pdu_handle *pasync_handle = NULL;
- unsigned int cq_index = -1;
- uint16_t cri_index = BE_GET_CRI_FROM_CID(
- beiscsi_conn->beiscsi_conn_cid);
-
- phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
- BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
- cri_index));
-
- pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
- pdpdu_cqe, &cq_index);
- BUG_ON(pasync_handle->is_header != 0);
- if (pasync_handle->consumed == 0)
- hwi_update_async_writables(phba, pasync_ctx,
- pasync_handle->is_header, cq_index);
-
- hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
- hwi_post_async_buffers(phba, pasync_handle->is_header,
- BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
- cri_index));
-}
-
-static unsigned int
-hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
- struct beiscsi_hba *phba,
- struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
-{
- struct list_head *plist;
- struct async_pdu_handle *pasync_handle;
- void *phdr = NULL;
- unsigned int hdr_len = 0, buf_len = 0;
- unsigned int status, index = 0, offset = 0;
- void *pfirst_buffer = NULL;
- unsigned int num_buf = 0;
-
- plist = &pasync_ctx->async_entry[cri].wait_queue.list;
+ list_for_each_entry_safe(pasync_handle, tmp, hfree_list, link) {
+ list_del_init(&pasync_handle->link);
+ pasync_handle->is_final = 0;
+ pasync_handle->buffer_len = 0;
- list_for_each_entry(pasync_handle, plist, link) {
- if (index == 0) {
- phdr = pasync_handle->pbuffer;
- hdr_len = pasync_handle->buffer_len;
- } else {
- buf_len = pasync_handle->buffer_len;
- if (!num_buf) {
- pfirst_buffer = pasync_handle->pbuffer;
- num_buf++;
- }
- memcpy(pfirst_buffer + offset,
- pasync_handle->pbuffer, buf_len);
- offset += buf_len;
+ /* handles can be consumed out of order, use index in handle */
+ index = pasync_handle->index;
+ WARN_ON(pasync_handle->is_header != header);
+ if (header)
+ slot = &pasync_ctx->async_entry[index].header;
+ else
+ slot = &pasync_ctx->async_entry[index].data;
+ /**
+ * The slot just tracks handle's hold and release, so
+ * overwriting at the same index won't do any harm but
+ * needs to be caught.
+ */
+ if (*slot != NULL) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+ "BM_%d : async PDU %s slot at %u not empty\n",
+ header ? "header" : "data", index);
}
- index++;
+ /**
+ * We use same freed index as in completion to post so this
+ * operation is not required for refills. Its required only
+ * for ring creation.
+ */
+ if (header)
+ pasync_sge = pasync_ctx->async_header.ring_base;
+ else
+ pasync_sge = pasync_ctx->async_data.ring_base;
+ pasync_sge += index;
+ /* if its a refill then address is same; hi is lo */
+ WARN_ON(pasync_sge->hi &&
+ pasync_sge->hi != pasync_handle->pa.u.a32.address_lo);
+ WARN_ON(pasync_sge->lo &&
+ pasync_sge->lo != pasync_handle->pa.u.a32.address_hi);
+ pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
+ pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
+
+ *slot = pasync_handle;
+ if (++prod == cons)
+ break;
}
+ if (header)
+ pasync_ctx->async_header.free_entries -= prod;
+ else
+ pasync_ctx->async_data.free_entries -= prod;
- status = beiscsi_process_async_pdu(beiscsi_conn, phba,
- phdr, hdr_len, pfirst_buffer,
- offset);
-
- hwi_free_async_msg(phba, pasync_ctx, cri);
- return 0;
-}
-
-static unsigned int
-hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
- struct beiscsi_hba *phba,
- struct async_pdu_handle *pasync_handle)
-{
- struct hwi_async_pdu_context *pasync_ctx;
- struct hwi_controller *phwi_ctrlr;
- unsigned int bytes_needed = 0, status = 0;
- unsigned short cri = pasync_handle->cri;
- struct pdu_base *ppdu;
-
- phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
- BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
- BE_GET_CRI_FROM_CID(beiscsi_conn->
- beiscsi_conn_cid)));
-
- list_del(&pasync_handle->link);
- if (pasync_handle->is_header) {
- pasync_ctx->async_header.busy_entries--;
- if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
- hwi_free_async_msg(phba, pasync_ctx, cri);
- BUG();
- }
-
- pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
- pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
- pasync_ctx->async_entry[cri].wait_queue.hdr_len =
- (unsigned short)pasync_handle->buffer_len;
- list_add_tail(&pasync_handle->link,
- &pasync_ctx->async_entry[cri].wait_queue.list);
-
- ppdu = pasync_handle->pbuffer;
- bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
- data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
- 0xFFFF0000) | ((be16_to_cpu((ppdu->
- dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
- & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
-
- if (status == 0) {
- pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
- bytes_needed;
-
- if (bytes_needed == 0)
- status = hwi_fwd_async_msg(beiscsi_conn, phba,
- pasync_ctx, cri);
- }
- } else {
- pasync_ctx->async_data.busy_entries--;
- if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
- list_add_tail(&pasync_handle->link,
- &pasync_ctx->async_entry[cri].wait_queue.
- list);
- pasync_ctx->async_entry[cri].wait_queue.
- bytes_received +=
- (unsigned short)pasync_handle->buffer_len;
-
- if (pasync_ctx->async_entry[cri].wait_queue.
- bytes_received >=
- pasync_ctx->async_entry[cri].wait_queue.
- bytes_needed)
- status = hwi_fwd_async_msg(beiscsi_conn, phba,
- pasync_ctx, cri);
- }
- }
- return status;
+ doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
+ doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
+ doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
+ doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT;
+ iowrite32(doorbell, phba->db_va + doorbell_offset);
}
-static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
- struct beiscsi_hba *phba,
- struct i_t_dpdu_cqe *pdpdu_cqe)
+static void
+beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn,
+ struct i_t_dpdu_cqe *pdpdu_cqe)
{
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct hd_async_handle *pasync_handle = NULL;
+ struct hd_async_context *pasync_ctx;
struct hwi_controller *phwi_ctrlr;
- struct hwi_async_pdu_context *pasync_ctx;
- struct async_pdu_handle *pasync_handle = NULL;
- unsigned int cq_index = -1;
- uint16_t cri_index = BE_GET_CRI_FROM_CID(
- beiscsi_conn->beiscsi_conn_cid);
+ u16 cid_cri;
+ u8 ulp_num;
phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
- BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
- cri_index));
-
- pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
- pdpdu_cqe, &cq_index);
-
- if (pasync_handle->consumed == 0)
- hwi_update_async_writables(phba, pasync_ctx,
- pasync_handle->is_header, cq_index);
+ cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
+ ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri);
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
+ pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx,
+ pdpdu_cqe);
+ if (!pasync_handle)
+ return;
- hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
- hwi_post_async_buffers(phba, pasync_handle->is_header,
- BEISCSI_GET_ULP_FROM_CRI(
- phwi_ctrlr, cri_index));
+ beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle);
+ beiscsi_hdq_post_handles(phba, pasync_handle->is_header, ulp_num);
}
void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
@@ -2051,6 +1816,9 @@ void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
mcc_compl = queue_tail_node(mcc_cq);
mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
+ if (beiscsi_hba_in_error(phba))
+ return;
+
if (num_processed >= 32) {
hwi_ring_cq_db(phba, mcc_cq->id,
num_processed, 0);
@@ -2073,6 +1841,19 @@ void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
}
+static void beiscsi_mcc_work(struct work_struct *work)
+{
+ struct be_eq_obj *pbe_eq;
+ struct beiscsi_hba *phba;
+
+ pbe_eq = container_of(work, struct be_eq_obj, mcc_work);
+ phba = pbe_eq->phba;
+ beiscsi_process_mcc_cq(phba);
+ /* rearm EQ for further interrupts */
+ if (!beiscsi_hba_in_error(phba))
+ hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
+}
+
/**
* beiscsi_process_cq()- Process the Completion Queue
* @pbe_eq: Event Q on which the Completion has come
@@ -2101,6 +1882,9 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
CQE_VALID_MASK) {
+ if (beiscsi_hba_in_error(phba))
+ return 0;
+
be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
@@ -2165,8 +1949,8 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
cqe_desc[code], code, cid);
spin_lock_bh(&phba->async_pdu_lock);
- hwi_process_default_pdu_ring(beiscsi_conn, phba,
- (struct i_t_dpdu_cqe *)sol);
+ beiscsi_hdq_process_compl(beiscsi_conn,
+ (struct i_t_dpdu_cqe *)sol);
spin_unlock_bh(&phba->async_pdu_lock);
break;
case UNSOL_DATA_NOTIFY:
@@ -2176,8 +1960,8 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
cqe_desc[code], code, cid);
spin_lock_bh(&phba->async_pdu_lock);
- hwi_process_default_pdu_ring(beiscsi_conn, phba,
- (struct i_t_dpdu_cqe *)sol);
+ beiscsi_hdq_process_compl(beiscsi_conn,
+ (struct i_t_dpdu_cqe *)sol);
spin_unlock_bh(&phba->async_pdu_lock);
break;
case CXN_INVALIDATE_INDEX_NOTIFY:
@@ -2213,8 +1997,9 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
"BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
cqe_desc[code], code, cid);
spin_lock_bh(&phba->async_pdu_lock);
- hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
- (struct i_t_dpdu_cqe *) sol);
+ /* driver consumes the entry and drops the contents */
+ beiscsi_hdq_process_compl(beiscsi_conn,
+ (struct i_t_dpdu_cqe *)sol);
spin_unlock_bh(&phba->async_pdu_lock);
break;
case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
@@ -2262,60 +2047,32 @@ proc_next_cqe:
return total;
}
-void beiscsi_process_all_cqs(struct work_struct *work)
-{
- unsigned long flags;
- struct hwi_controller *phwi_ctrlr;
- struct hwi_context_memory *phwi_context;
- struct beiscsi_hba *phba;
- struct be_eq_obj *pbe_eq =
- container_of(work, struct be_eq_obj, work_cqs);
-
- phba = pbe_eq->phba;
- phwi_ctrlr = phba->phwi_ctrlr;
- phwi_context = phwi_ctrlr->phwi_ctxt;
-
- if (pbe_eq->todo_mcc_cq) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_mcc_cq = false;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- beiscsi_process_mcc_cq(phba);
- }
-
- if (pbe_eq->todo_cq) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_cq = false;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC);
- }
-
- /* rearm EQ for further interrupts */
- hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
-}
-
static int be_iopoll(struct irq_poll *iop, int budget)
{
- unsigned int ret, num_eq_processed;
+ unsigned int ret, io_events;
struct beiscsi_hba *phba;
struct be_eq_obj *pbe_eq;
struct be_eq_entry *eqe = NULL;
struct be_queue_info *eq;
- num_eq_processed = 0;
pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
phba = pbe_eq->phba;
+ if (beiscsi_hba_in_error(phba)) {
+ irq_poll_complete(iop);
+ return 0;
+ }
+
+ io_events = 0;
eq = &pbe_eq->q;
eqe = queue_tail_node(eq);
-
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
EQE_VALID_MASK) {
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
queue_tail_inc(eq);
eqe = queue_tail_node(eq);
- num_eq_processed++;
+ io_events++;
}
-
- hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+ hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1);
ret = beiscsi_process_cq(pbe_eq, budget);
pbe_eq->cq_count += ret;
@@ -2325,7 +2082,8 @@ static int be_iopoll(struct irq_poll *iop, int budget)
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
"BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
pbe_eq->q.id, ret);
- hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
+ if (!beiscsi_hba_in_error(phba))
+ hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
}
return ret;
}
@@ -2691,20 +2449,20 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
(ulp_num * MEM_DESCR_OFFSET));
phba->mem_req[mem_descr_index] =
BEISCSI_GET_CID_COUNT(phba, ulp_num) *
- sizeof(struct async_pdu_handle);
+ sizeof(struct hd_async_handle);
mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
(ulp_num * MEM_DESCR_OFFSET));
phba->mem_req[mem_descr_index] =
BEISCSI_GET_CID_COUNT(phba, ulp_num) *
- sizeof(struct async_pdu_handle);
+ sizeof(struct hd_async_handle);
mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
(ulp_num * MEM_DESCR_OFFSET));
phba->mem_req[mem_descr_index] =
- sizeof(struct hwi_async_pdu_context) +
+ sizeof(struct hd_async_context) +
(BEISCSI_GET_CID_COUNT(phba, ulp_num) *
- sizeof(struct hwi_async_entry));
+ sizeof(struct hd_async_entry));
}
}
}
@@ -2963,35 +2721,34 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
uint8_t ulp_num;
struct hwi_controller *phwi_ctrlr;
struct hba_parameters *p = &phba->params;
- struct hwi_async_pdu_context *pasync_ctx;
- struct async_pdu_handle *pasync_header_h, *pasync_data_h;
+ struct hd_async_context *pasync_ctx;
+ struct hd_async_handle *pasync_header_h, *pasync_data_h;
unsigned int index, idx, num_per_mem, num_async_data;
struct be_mem_descriptor *mem_descr;
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
+ /* get async_ctx for each ULP */
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
(ulp_num * MEM_DESCR_OFFSET));
phwi_ctrlr = phba->phwi_ctrlr;
phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
- (struct hwi_async_pdu_context *)
+ (struct hd_async_context *)
mem_descr->mem_array[0].virtual_address;
pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
memset(pasync_ctx, 0, sizeof(*pasync_ctx));
pasync_ctx->async_entry =
- (struct hwi_async_entry *)
+ (struct hd_async_entry *)
((long unsigned int)pasync_ctx +
- sizeof(struct hwi_async_pdu_context));
+ sizeof(struct hd_async_context));
pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
ulp_num);
- pasync_ctx->buffer_size = p->defpdu_hdr_sz;
-
+ /* setup header buffers */
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
(ulp_num * MEM_DESCR_OFFSET);
@@ -3008,6 +2765,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
"BM_%d : No Virtual address for ULP : %d\n",
ulp_num);
+ pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
pasync_ctx->async_header.va_base =
mem_descr->mem_array[0].virtual_address;
@@ -3015,6 +2773,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
mem_descr->mem_array[0].
bus_address.u.a64.address;
+ /* setup header buffer sgls */
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
(ulp_num * MEM_DESCR_OFFSET);
@@ -3034,6 +2793,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_ctx->async_header.ring_base =
mem_descr->mem_array[0].virtual_address;
+ /* setup header buffer handles */
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
(ulp_num * MEM_DESCR_OFFSET);
@@ -3052,9 +2812,9 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_ctx->async_header.handle_base =
mem_descr->mem_array[0].virtual_address;
- pasync_ctx->async_header.writables = 0;
INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
+ /* setup data buffer sgls */
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
(ulp_num * MEM_DESCR_OFFSET);
@@ -3074,6 +2834,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_ctx->async_data.ring_base =
mem_descr->mem_array[0].virtual_address;
+ /* setup data buffer handles */
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
(ulp_num * MEM_DESCR_OFFSET);
@@ -3085,16 +2846,16 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_ctx->async_data.handle_base =
mem_descr->mem_array[0].virtual_address;
- pasync_ctx->async_data.writables = 0;
INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
pasync_header_h =
- (struct async_pdu_handle *)
+ (struct hd_async_handle *)
pasync_ctx->async_header.handle_base;
pasync_data_h =
- (struct async_pdu_handle *)
+ (struct hd_async_handle *)
pasync_ctx->async_data.handle_base;
+ /* setup data buffers */
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
(ulp_num * MEM_DESCR_OFFSET);
@@ -3112,6 +2873,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
ulp_num);
idx = 0;
+ pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
pasync_ctx->async_data.va_base =
mem_descr->mem_array[idx].virtual_address;
pasync_ctx->async_data.pa_base.u.a64.address =
@@ -3125,7 +2887,8 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
for (index = 0; index < BEISCSI_GET_CID_COUNT
(phba, ulp_num); index++) {
pasync_header_h->cri = -1;
- pasync_header_h->index = (char)index;
+ pasync_header_h->is_header = 1;
+ pasync_header_h->index = index;
INIT_LIST_HEAD(&pasync_header_h->link);
pasync_header_h->pbuffer =
(void *)((unsigned long)
@@ -3142,14 +2905,13 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
free_list);
pasync_header_h++;
pasync_ctx->async_header.free_entries++;
- pasync_ctx->async_header.writables++;
-
INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
- wait_queue.list);
- INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
- header_busy_list);
+ wq.list);
+ pasync_ctx->async_entry[index].header = NULL;
+
pasync_data_h->cri = -1;
- pasync_data_h->index = (char)index;
+ pasync_data_h->is_header = 0;
+ pasync_data_h->index = index;
INIT_LIST_HEAD(&pasync_data_h->link);
if (!num_async_data) {
@@ -3184,16 +2946,8 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
free_list);
pasync_data_h++;
pasync_ctx->async_data.free_entries++;
- pasync_ctx->async_data.writables++;
-
- INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
- data_busy_list);
+ pasync_ctx->async_entry[index].data = NULL;
}
-
- pasync_ctx->async_header.host_write_ptr = 0;
- pasync_ctx->async_header.ep_read_ptr = -1;
- pasync_ctx->async_data.host_write_ptr = 0;
- pasync_ctx->async_data.ep_read_ptr = -1;
}
}
@@ -3265,8 +3019,8 @@ static int be_fill_queue(struct be_queue_info *q,
static int beiscsi_create_eqs(struct beiscsi_hba *phba,
struct hwi_context_memory *phwi_context)
{
+ int ret = -ENOMEM, eq_for_mcc;
unsigned int i, num_eq_pages;
- int ret = 0, eq_for_mcc;
struct be_queue_info *eq;
struct be_dma_mem *mem;
void *eq_vaddress;
@@ -3284,8 +3038,8 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
mem = &eq->dma_mem;
phwi_context->be_eq[i].phba = phba;
eq_vaddress = pci_alloc_consistent(phba->pcidev,
- num_eq_pages * PAGE_SIZE,
- &paddr);
+ num_eq_pages * PAGE_SIZE,
+ &paddr);
if (!eq_vaddress)
goto create_eq_error;
@@ -3313,6 +3067,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
phwi_context->be_eq[i].q.id);
}
return 0;
+
create_eq_error:
for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
eq = &phwi_context->be_eq[i].q;
@@ -3329,11 +3084,11 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
struct hwi_context_memory *phwi_context)
{
unsigned int i, num_cq_pages;
- int ret = 0;
struct be_queue_info *cq, *eq;
struct be_dma_mem *mem;
struct be_eq_obj *pbe_eq;
void *cq_vaddress;
+ int ret = -ENOMEM;
dma_addr_t paddr;
num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
@@ -3347,10 +3102,11 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
pbe_eq->phba = phba;
mem = &cq->dma_mem;
cq_vaddress = pci_alloc_consistent(phba->pcidev,
- num_cq_pages * PAGE_SIZE,
- &paddr);
+ num_cq_pages * PAGE_SIZE,
+ &paddr);
if (!cq_vaddress)
goto create_cq_error;
+
ret = be_fill_queue(cq, phba->params.num_cq_entries,
sizeof(struct sol_cqe), cq_vaddress);
if (ret) {
@@ -3385,7 +3141,6 @@ create_cq_error:
mem->va, mem->dma);
}
return ret;
-
}
static int
@@ -3437,7 +3192,6 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
"BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
ulp_num,
phwi_context->be_def_hdrq[ulp_num].id);
- hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
return 0;
}
@@ -3492,11 +3246,9 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
ulp_num,
phwi_context->be_def_dataq[ulp_num].id);
- hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : DEFAULT PDU DATA RING CREATED"
"on ULP : %d\n", ulp_num);
-
return 0;
}
@@ -3716,10 +3468,53 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
{
- struct be_queue_info *q;
struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_dma_mem *ptag_mem;
+ struct be_queue_info *q;
+ int i, tag;
q = &phba->ctrl.mcc_obj.q;
+ for (i = 0; i < MAX_MCC_CMD; i++) {
+ tag = i + 1;
+ if (!test_bit(MCC_TAG_STATE_RUNNING,
+ &ctrl->ptag_state[tag].tag_state))
+ continue;
+
+ if (test_bit(MCC_TAG_STATE_TIMEOUT,
+ &ctrl->ptag_state[tag].tag_state)) {
+ ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+ if (ptag_mem->size) {
+ pci_free_consistent(ctrl->pdev,
+ ptag_mem->size,
+ ptag_mem->va,
+ ptag_mem->dma);
+ ptag_mem->size = 0;
+ }
+ continue;
+ }
+ /**
+ * If MCC is still active and waiting then wake up the process.
+ * We are here only because port is going offline. The process
+ * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is
+ * returned for the operation and allocated memory cleaned up.
+ */
+ if (waitqueue_active(&ctrl->mcc_wait[tag])) {
+ ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED;
+ ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK;
+ wake_up_interruptible(&ctrl->mcc_wait[tag]);
+ /*
+ * Control tag info gets reinitialized in enable
+ * so wait for the process to clear running state.
+ */
+ while (test_bit(MCC_TAG_STATE_RUNNING,
+ &ctrl->ptag_state[tag].tag_state))
+ schedule_timeout_uninterruptible(HZ);
+ }
+ /**
+ * For MCC with tag_states MCC_TAG_STATE_ASYNC and
+ * MCC_TAG_STATE_IGNORE nothing needs to done.
+ */
+ }
if (q->created) {
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
be_queue_free(phba, q);
@@ -3732,68 +3527,6 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
}
}
-static void hwi_cleanup(struct beiscsi_hba *phba)
-{
- struct be_queue_info *q;
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct hwi_controller *phwi_ctrlr;
- struct hwi_context_memory *phwi_context;
- struct hwi_async_pdu_context *pasync_ctx;
- int i, eq_for_mcc, ulp_num;
-
- phwi_ctrlr = phba->phwi_ctrlr;
- phwi_context = phwi_ctrlr->phwi_ctxt;
-
- be_cmd_iscsi_remove_template_hdr(ctrl);
-
- for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
- q = &phwi_context->be_wrbq[i];
- if (q->created)
- beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
- }
- kfree(phwi_context->be_wrbq);
- free_wrb_handles(phba);
-
- for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
- if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
- q = &phwi_context->be_def_hdrq[ulp_num];
- if (q->created)
- beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
-
- q = &phwi_context->be_def_dataq[ulp_num];
- if (q->created)
- beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
-
- pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
- }
- }
-
- beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
-
- for (i = 0; i < (phba->num_cpus); i++) {
- q = &phwi_context->be_cq[i];
- if (q->created) {
- be_queue_free(phba, q);
- beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
- }
- }
-
- be_mcc_queues_destroy(phba);
- if (phba->msix_enabled)
- eq_for_mcc = 1;
- else
- eq_for_mcc = 0;
- for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
- q = &phwi_context->be_eq[i].q;
- if (q->created) {
- be_queue_free(phba, q);
- beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
- }
- }
- be_cmd_fw_uninit(ctrl);
-}
-
static int be_mcc_queues_create(struct beiscsi_hba *phba,
struct hwi_context_memory *phwi_context)
{
@@ -3875,6 +3608,118 @@ static void find_num_cpus(struct beiscsi_hba *phba)
}
}
+static void hwi_purge_eq(struct beiscsi_hba *phba)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct be_queue_info *eq;
+ struct be_eq_entry *eqe = NULL;
+ int i, eq_msix;
+ unsigned int num_processed;
+
+ if (beiscsi_hba_in_error(phba))
+ return;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ if (phba->msix_enabled)
+ eq_msix = 1;
+ else
+ eq_msix = 0;
+
+ for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
+ eq = &phwi_context->be_eq[i].q;
+ eqe = queue_tail_node(eq);
+ num_processed = 0;
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ num_processed++;
+ }
+
+ if (num_processed)
+ hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
+ }
+}
+
+static void hwi_cleanup_port(struct beiscsi_hba *phba)
+{
+ struct be_queue_info *q;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct hd_async_context *pasync_ctx;
+ int i, eq_for_mcc, ulp_num;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+ beiscsi_cmd_iscsi_cleanup(phba, ulp_num);
+
+ /**
+ * Purge all EQ entries that may have been left out. This is to
+ * workaround a problem we've seen occasionally where driver gets an
+ * interrupt with EQ entry bit set after stopping the controller.
+ */
+ hwi_purge_eq(phba);
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ be_cmd_iscsi_remove_template_hdr(ctrl);
+
+ for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+ q = &phwi_context->be_wrbq[i];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
+ }
+ kfree(phwi_context->be_wrbq);
+ free_wrb_handles(phba);
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ q = &phwi_context->be_def_hdrq[ulp_num];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+ q = &phwi_context->be_def_dataq[ulp_num];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+ pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+ }
+ }
+
+ beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
+
+ for (i = 0; i < (phba->num_cpus); i++) {
+ q = &phwi_context->be_cq[i];
+ if (q->created) {
+ be_queue_free(phba, q);
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+ }
+ }
+
+ be_mcc_queues_destroy(phba);
+ if (phba->msix_enabled)
+ eq_for_mcc = 1;
+ else
+ eq_for_mcc = 0;
+ for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
+ q = &phwi_context->be_eq[i].q;
+ if (q->created) {
+ be_queue_free(phba, q);
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
+ }
+ }
+ /* this ensures complete FW cleanup */
+ beiscsi_cmd_function_reset(phba);
+ /* last communication, indicate driver is unloading */
+ beiscsi_cmd_special_wrb(&phba->ctrl, 0);
+}
+
static int hwi_init_port(struct beiscsi_hba *phba)
{
struct hwi_controller *phwi_ctrlr;
@@ -3887,9 +3732,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
phwi_context = phwi_ctrlr->phwi_ctxt;
phwi_context->max_eqd = 128;
phwi_context->min_eqd = 0;
- phwi_context->cur_eqd = 0;
- be_cmd_fw_initialize(&phba->ctrl);
- /* set optic state to unknown */
+ phwi_context->cur_eqd = 32;
+ /* set port optic state to unknown */
phba->optic_state = 0xff;
status = beiscsi_create_eqs(phba, phwi_context);
@@ -3903,7 +3747,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
if (status != 0)
goto error;
- status = mgmt_check_supported_fw(ctrl, phba);
+ status = beiscsi_check_supported_fw(ctrl, phba);
if (status != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Unsupported fw version\n");
@@ -3919,7 +3763,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
def_pdu_ring_sz =
BEISCSI_GET_CID_COUNT(phba, ulp_num) *
sizeof(struct phys_addr);
@@ -3945,6 +3788,15 @@ static int hwi_init_port(struct beiscsi_hba *phba)
ulp_num);
goto error;
}
+ /**
+ * Now that the default PDU rings have been created,
+ * let EP know about it.
+ * Call beiscsi_cmd_iscsi_cleanup before posting?
+ */
+ beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
+ ulp_num);
+ beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
+ ulp_num);
}
}
@@ -3973,7 +3825,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
uint16_t cri = 0;
- struct hwi_async_pdu_context *pasync_ctx;
+ struct hd_async_context *pasync_ctx;
pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
phwi_ctrlr, ulp_num);
@@ -3985,6 +3837,14 @@ static int hwi_init_port(struct beiscsi_hba *phba)
phwi_ctrlr->wrb_context[cri].cid] =
async_arr_idx++;
}
+ /**
+ * Now that the default PDU rings have been created,
+ * let EP know about it.
+ */
+ beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
+ ulp_num);
+ beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
+ ulp_num);
}
}
@@ -3995,7 +3855,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
error:
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : hwi_init_port failed");
- hwi_cleanup(phba);
+ hwi_cleanup_port(phba);
return status;
}
@@ -4354,149 +4214,6 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
"BM_%d : In hwi_disable_intr, Already Disabled\n");
}
-/**
- * beiscsi_get_boot_info()- Get the boot session info
- * @phba: The device priv structure instance
- *
- * Get the boot target info and store in driver priv structure
- *
- * return values
- * Success: 0
- * Failure: Non-Zero Value
- **/
-static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
-{
- struct be_cmd_get_session_resp *session_resp;
- struct be_dma_mem nonemb_cmd;
- unsigned int tag;
- unsigned int s_handle;
- int ret = -ENOMEM;
-
- /* Get the session handle of the boot target */
- ret = be_mgmt_get_boot_shandle(phba, &s_handle);
- if (ret) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BM_%d : No boot session\n");
-
- if (ret == -ENXIO)
- phba->get_boot = 0;
-
-
- return ret;
- }
- phba->get_boot = 0;
- nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
- sizeof(*session_resp),
- &nonemb_cmd.dma);
- if (nonemb_cmd.va == NULL) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BM_%d : Failed to allocate memory for"
- "beiscsi_get_session_info\n");
-
- return -ENOMEM;
- }
-
- tag = mgmt_get_session_info(phba, s_handle,
- &nonemb_cmd);
- if (!tag) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BM_%d : beiscsi_get_session_info"
- " Failed\n");
-
- goto boot_freemem;
- }
-
- ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
- if (ret) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BM_%d : beiscsi_get_session_info Failed");
-
- if (ret != -EBUSY)
- goto boot_freemem;
- else
- return ret;
- }
-
- session_resp = nonemb_cmd.va ;
-
- memcpy(&phba->boot_sess, &session_resp->session_info,
- sizeof(struct mgmt_session_info));
-
- beiscsi_logout_fw_sess(phba,
- phba->boot_sess.session_handle);
- ret = 0;
-
-boot_freemem:
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
- return ret;
-}
-
-static void beiscsi_boot_release(void *data)
-{
- struct beiscsi_hba *phba = data;
-
- scsi_host_put(phba->shost);
-}
-
-static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
-{
- struct iscsi_boot_kobj *boot_kobj;
-
- /* it has been created previously */
- if (phba->boot_kset)
- return 0;
-
- /* get boot info using mgmt cmd */
- if (beiscsi_get_boot_info(phba))
- /* Try to see if we can carry on without this */
- return 0;
-
- phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
- if (!phba->boot_kset)
- return -ENOMEM;
-
- /* get a ref because the show function will ref the phba */
- if (!scsi_host_get(phba->shost))
- goto free_kset;
- boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
- beiscsi_show_boot_tgt_info,
- beiscsi_tgt_get_attr_visibility,
- beiscsi_boot_release);
- if (!boot_kobj)
- goto put_shost;
-
- if (!scsi_host_get(phba->shost))
- goto free_kset;
- boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
- beiscsi_show_boot_ini_info,
- beiscsi_ini_get_attr_visibility,
- beiscsi_boot_release);
- if (!boot_kobj)
- goto put_shost;
-
- if (!scsi_host_get(phba->shost))
- goto free_kset;
- boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
- beiscsi_show_boot_eth_info,
- beiscsi_eth_get_attr_visibility,
- beiscsi_boot_release);
- if (!boot_kobj)
- goto put_shost;
- return 0;
-
-put_shost:
- scsi_host_put(phba->shost);
-free_kset:
- iscsi_boot_destroy_kset(phba->boot_kset);
- phba->boot_kset = NULL;
- return -ENOMEM;
-}
-
static int beiscsi_init_port(struct beiscsi_hba *phba)
{
int ret;
@@ -4516,7 +4233,8 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
goto do_cleanup_ctrlr;
}
- if (hba_setup_cid_tbls(phba)) {
+ ret = hba_setup_cid_tbls(phba);
+ if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Failed in hba_setup_cid_tbls\n");
kfree(phba->io_sgl_hndl_base);
@@ -4527,61 +4245,15 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
return ret;
do_cleanup_ctrlr:
- hwi_cleanup(phba);
+ hwi_cleanup_port(phba);
return ret;
}
-static void hwi_purge_eq(struct beiscsi_hba *phba)
-{
- struct hwi_controller *phwi_ctrlr;
- struct hwi_context_memory *phwi_context;
- struct be_queue_info *eq;
- struct be_eq_entry *eqe = NULL;
- int i, eq_msix;
- unsigned int num_processed;
-
- phwi_ctrlr = phba->phwi_ctrlr;
- phwi_context = phwi_ctrlr->phwi_ctxt;
- if (phba->msix_enabled)
- eq_msix = 1;
- else
- eq_msix = 0;
-
- for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
- eq = &phwi_context->be_eq[i].q;
- eqe = queue_tail_node(eq);
- num_processed = 0;
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- num_processed++;
- }
-
- if (num_processed)
- hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
- }
-}
-
-static void beiscsi_clean_port(struct beiscsi_hba *phba)
+static void beiscsi_cleanup_port(struct beiscsi_hba *phba)
{
- int mgmt_status, ulp_num;
struct ulp_cid_info *ptr_cid_info = NULL;
+ int ulp_num;
- for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
- if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
- mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
- if (mgmt_status)
- beiscsi_log(phba, KERN_WARNING,
- BEISCSI_LOG_INIT,
- "BM_%d : mgmt_epfw_cleanup FAILED"
- " for ULP_%d\n", ulp_num);
- }
- }
-
- hwi_purge_eq(phba);
- hwi_cleanup(phba);
kfree(phba->io_sgl_hndl_base);
kfree(phba->eh_sgl_hndl_base);
kfree(phba->ep_array);
@@ -4598,7 +4270,6 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
}
}
}
-
}
/**
@@ -4625,16 +4296,12 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
io_task = task->dd_data;
if (io_task->pwrb_handle) {
- memset(io_task->pwrb_handle->pwrb, 0,
- sizeof(struct iscsi_wrb));
- free_wrb_handle(phba, pwrb_context,
- io_task->pwrb_handle);
+ free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
io_task->pwrb_handle = NULL;
}
if (io_task->psgl_handle) {
- free_mgmt_sgl_handle(phba,
- io_task->psgl_handle);
+ free_mgmt_sgl_handle(phba, io_task->psgl_handle);
io_task->psgl_handle = NULL;
}
@@ -4671,6 +4338,7 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
io_task->bhs_pa.u.a64.address);
io_task->cmd_bhs = NULL;
+ task->hdr = NULL;
}
if (task->sc) {
@@ -4686,7 +4354,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
}
if (io_task->scsi_cmnd) {
- scsi_dma_unmap(io_task->scsi_cmnd);
+ if (io_task->num_sg)
+ scsi_dma_unmap(io_task->scsi_cmnd);
io_task->scsi_cmnd = NULL;
}
} else {
@@ -5051,7 +4720,6 @@ static int beiscsi_mtask(struct iscsi_task *task)
cid = beiscsi_conn->beiscsi_conn_cid;
pwrb = io_task->pwrb_handle->pwrb;
- memset(pwrb, 0, sizeof(*pwrb));
if (is_chip_be2_be3r(phba)) {
AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
@@ -5165,6 +4833,15 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
int num_sg;
unsigned int writedir = 0, xferlen = 0;
+ phba = io_task->conn->phba;
+ /**
+ * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be
+ * operational if FW still gets heartbeat from EP FW. Is management
+ * path really needed to continue further?
+ */
+ if (!beiscsi_hba_is_online(phba))
+ return -EIO;
+
if (!io_task->conn->login_in_progress)
task->hdr->exp_statsn = 0;
@@ -5172,8 +4849,8 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
return beiscsi_mtask(task);
io_task->scsi_cmnd = sc;
+ io_task->num_sg = 0;
num_sg = scsi_dma_map(sc);
- phba = io_task->conn->phba;
if (num_sg < 0) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
@@ -5184,6 +4861,11 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
return num_sg;
}
+ /**
+ * For scsi cmd task, check num_sg before unmapping in cleanup_task.
+ * For management task, cleanup_task checks mtask_addr before unmapping.
+ */
+ io_task->num_sg = num_sg;
xferlen = scsi_bufflen(sc);
sg = scsi_sglist(sc);
if (sc->sc_data_direction == DMA_TO_DEVICE)
@@ -5213,6 +4895,12 @@ static int beiscsi_bsg_request(struct bsg_job *job)
shost = iscsi_job_to_shost(job);
phba = iscsi_host_priv(shost);
+ if (!beiscsi_hba_is_online(phba)) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BM_%d : HBA in error 0x%lx\n", phba->state);
+ return -ENXIO;
+ }
+
switch (bsg_req->msgcode) {
case ISCSI_BSG_HST_VENDOR:
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -5240,6 +4928,14 @@ static int beiscsi_bsg_request(struct bsg_job *job)
phba->ctrl.mcc_tag_status[tag],
msecs_to_jiffies(
BEISCSI_HOST_MBX_TIMEOUT));
+
+ if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
+ clear_bit(MCC_TAG_STATE_RUNNING,
+ &phba->ctrl.ptag_state[tag].tag_state);
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return -EIO;
+ }
extd_status = (phba->ctrl.mcc_tag_status[tag] &
CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
@@ -5283,106 +4979,294 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
beiscsi_log_enable_init(phba, beiscsi_log_enable);
}
-/*
- * beiscsi_quiesce()- Cleanup Driver resources
- * @phba: Instance Priv structure
- * @unload_state:i Clean or EEH unload state
- *
- * Free the OS and HW resources held by the driver
- **/
-static void beiscsi_quiesce(struct beiscsi_hba *phba,
- uint32_t unload_state)
+void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
{
- struct hwi_controller *phwi_ctrlr;
- struct hwi_context_memory *phwi_context;
- struct be_eq_obj *pbe_eq;
- unsigned int i, msix_vec;
+ if (phba->boot_struct.boot_kset)
+ return;
- phwi_ctrlr = phba->phwi_ctrlr;
- phwi_context = phwi_ctrlr->phwi_ctxt;
- hwi_disable_intr(phba);
- if (phba->msix_enabled) {
- for (i = 0; i <= phba->num_cpus; i++) {
- msix_vec = phba->msix_entries[i].vector;
- free_irq(msix_vec, &phwi_context->be_eq[i]);
- kfree(phba->msi_name[i]);
- }
- } else
- if (phba->pcidev->irq)
- free_irq(phba->pcidev->irq, phba);
- pci_disable_msix(phba->pcidev);
- cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
+ /* skip if boot work is already in progress */
+ if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state))
+ return;
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- irq_poll_disable(&pbe_eq->iopoll);
+ phba->boot_struct.retry = 3;
+ phba->boot_struct.tag = 0;
+ phba->boot_struct.s_handle = s_handle;
+ phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE;
+ schedule_work(&phba->boot_work);
+}
+
+static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
+{
+ struct beiscsi_hba *phba = data;
+ struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess;
+ struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
+ char *str = buf;
+ int rc = -EPERM;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ rc = sprintf(buf, "%.*s\n",
+ (int)strlen(boot_sess->target_name),
+ (char *)&boot_sess->target_name);
+ break;
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4)
+ rc = sprintf(buf, "%pI4\n",
+ (char *)&boot_conn->dest_ipaddr.addr);
+ else
+ rc = sprintf(str, "%pI6\n",
+ (char *)&boot_conn->dest_ipaddr.addr);
+ break;
+ case ISCSI_BOOT_TGT_PORT:
+ rc = sprintf(str, "%d\n", boot_conn->dest_port);
+ break;
+
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->negotiated_login_options.auth_data.chap.
+ target_chap_name_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.target_chap_name);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->negotiated_login_options.auth_data.chap.
+ target_secret_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.target_secret);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->negotiated_login_options.auth_data.chap.
+ intr_chap_name_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.intr_chap_name);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->negotiated_login_options.auth_data.chap.
+ intr_secret_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.intr_secret);
+ break;
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = sprintf(str, "2\n");
+ break;
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ rc = sprintf(str, "0\n");
+ break;
}
+ return rc;
+}
- if (unload_state == BEISCSI_CLEAN_UNLOAD) {
- destroy_workqueue(phba->wq);
- beiscsi_clean_port(phba);
- beiscsi_free_mem(phba);
+static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
+{
+ struct beiscsi_hba *phba = data;
+ char *str = buf;
+ int rc = -EPERM;
- beiscsi_unmap_pci_function(phba);
- pci_free_consistent(phba->pcidev,
- phba->ctrl.mbox_mem_alloced.size,
- phba->ctrl.mbox_mem_alloced.va,
- phba->ctrl.mbox_mem_alloced.dma);
- } else {
- hwi_purge_eq(phba);
- hwi_cleanup(phba);
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = sprintf(str, "%s\n",
+ phba->boot_struct.boot_sess.initiator_iscsiname);
+ break;
}
+ return rc;
+}
+
+static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
+{
+ struct beiscsi_hba *phba = data;
+ char *str = buf;
+ int rc = -EPERM;
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ rc = sprintf(str, "2\n");
+ break;
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = sprintf(str, "0\n");
+ break;
+ case ISCSI_BOOT_ETH_MAC:
+ rc = beiscsi_get_macaddr(str, phba);
+ break;
+ }
+ return rc;
}
-static void beiscsi_remove(struct pci_dev *pcidev)
+static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
{
- struct beiscsi_hba *phba = NULL;
+ umode_t rc = 0;
- phba = pci_get_drvdata(pcidev);
- if (!phba) {
- dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
- return;
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ case ISCSI_BOOT_TGT_PORT:
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = S_IRUGO;
+ break;
}
+ return rc;
+}
- beiscsi_destroy_def_ifaces(phba);
- iscsi_boot_destroy_kset(phba->boot_kset);
- iscsi_host_remove(phba->shost);
- beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
- pci_dev_put(phba->pcidev);
- iscsi_host_free(phba->shost);
- pci_disable_pcie_error_reporting(pcidev);
- pci_set_drvdata(pcidev, NULL);
- pci_release_regions(pcidev);
- pci_disable_device(pcidev);
+static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
+{
+ umode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = S_IRUGO;
+ break;
+ }
+ return rc;
}
-static void beiscsi_msix_enable(struct beiscsi_hba *phba)
+static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
{
- int i, status;
+ umode_t rc = 0;
- for (i = 0; i <= phba->num_cpus; i++)
- phba->msix_entries[i].entry = i;
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ case ISCSI_BOOT_ETH_MAC:
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = S_IRUGO;
+ break;
+ }
+ return rc;
+}
- status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
- phba->num_cpus + 1, phba->num_cpus + 1);
- if (status > 0)
- phba->msix_enabled = true;
+static void beiscsi_boot_kobj_release(void *data)
+{
+ struct beiscsi_hba *phba = data;
- return;
+ scsi_host_put(phba->shost);
}
-static void be_eqd_update(struct beiscsi_hba *phba)
+static int beiscsi_boot_create_kset(struct beiscsi_hba *phba)
{
+ struct boot_struct *bs = &phba->boot_struct;
+ struct iscsi_boot_kobj *boot_kobj;
+
+ if (bs->boot_kset) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BM_%d: boot_kset already created\n");
+ return 0;
+ }
+
+ bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
+ if (!bs->boot_kset) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BM_%d: boot_kset alloc failed\n");
+ return -ENOMEM;
+ }
+
+ /* get shost ref because the show function will refer phba */
+ if (!scsi_host_get(phba->shost))
+ goto free_kset;
+
+ boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba,
+ beiscsi_show_boot_tgt_info,
+ beiscsi_tgt_get_attr_visibility,
+ beiscsi_boot_kobj_release);
+ if (!boot_kobj)
+ goto put_shost;
+
+ if (!scsi_host_get(phba->shost))
+ goto free_kset;
+
+ boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba,
+ beiscsi_show_boot_ini_info,
+ beiscsi_ini_get_attr_visibility,
+ beiscsi_boot_kobj_release);
+ if (!boot_kobj)
+ goto put_shost;
+
+ if (!scsi_host_get(phba->shost))
+ goto free_kset;
+
+ boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba,
+ beiscsi_show_boot_eth_info,
+ beiscsi_eth_get_attr_visibility,
+ beiscsi_boot_kobj_release);
+ if (!boot_kobj)
+ goto put_shost;
+
+ return 0;
+
+put_shost:
+ scsi_host_put(phba->shost);
+free_kset:
+ iscsi_boot_destroy_kset(bs->boot_kset);
+ bs->boot_kset = NULL;
+ return -ENOMEM;
+}
+
+static void beiscsi_boot_work(struct work_struct *work)
+{
+ struct beiscsi_hba *phba =
+ container_of(work, struct beiscsi_hba, boot_work);
+ struct boot_struct *bs = &phba->boot_struct;
+ unsigned int tag = 0;
+
+ if (!beiscsi_hba_is_online(phba))
+ return;
+
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BM_%d : %s action %d\n",
+ __func__, phba->boot_struct.action);
+
+ switch (phba->boot_struct.action) {
+ case BEISCSI_BOOT_REOPEN_SESS:
+ tag = beiscsi_boot_reopen_sess(phba);
+ break;
+ case BEISCSI_BOOT_GET_SHANDLE:
+ tag = __beiscsi_boot_get_shandle(phba, 1);
+ break;
+ case BEISCSI_BOOT_GET_SINFO:
+ tag = beiscsi_boot_get_sinfo(phba);
+ break;
+ case BEISCSI_BOOT_LOGOUT_SESS:
+ tag = beiscsi_boot_logout_sess(phba);
+ break;
+ case BEISCSI_BOOT_CREATE_KSET:
+ beiscsi_boot_create_kset(phba);
+ /**
+ * updated boot_kset is made visible to all before
+ * ending the boot work.
+ */
+ mb();
+ clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
+ return;
+ }
+ if (!tag) {
+ if (bs->retry--)
+ schedule_work(&phba->boot_work);
+ else
+ clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
+ }
+}
+
+static void beiscsi_eqd_update_work(struct work_struct *work)
+{
+ struct hwi_context_memory *phwi_context;
struct be_set_eqd set_eqd[MAX_CPUS];
- struct be_aic_obj *aic;
- struct be_eq_obj *pbe_eq;
struct hwi_controller *phwi_ctrlr;
- struct hwi_context_memory *phwi_context;
+ struct be_eq_obj *pbe_eq;
+ struct beiscsi_hba *phba;
+ unsigned int pps, delta;
+ struct be_aic_obj *aic;
int eqd, i, num = 0;
- ulong now;
- u32 pps, delta;
- unsigned int tag;
+ unsigned long now;
+
+ phba = container_of(work, struct beiscsi_hba, eqd_update.work);
+ if (!beiscsi_hba_is_online(phba))
+ return;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -5391,13 +5275,13 @@ static void be_eqd_update(struct beiscsi_hba *phba)
aic = &phba->aic_obj[i];
pbe_eq = &phwi_context->be_eq[i];
now = jiffies;
- if (!aic->jiffs || time_before(now, aic->jiffs) ||
+ if (!aic->jiffies || time_before(now, aic->jiffies) ||
pbe_eq->cq_count < aic->eq_prev) {
- aic->jiffs = now;
+ aic->jiffies = now;
aic->eq_prev = pbe_eq->cq_count;
continue;
}
- delta = jiffies_to_msecs(now - aic->jiffs);
+ delta = jiffies_to_msecs(now - aic->jiffies);
pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
eqd = (pps / 1500) << 2;
@@ -5406,7 +5290,7 @@ static void be_eqd_update(struct beiscsi_hba *phba)
eqd = min_t(u32, eqd, phwi_context->max_eqd);
eqd = max_t(u32, eqd, phwi_context->min_eqd);
- aic->jiffs = now;
+ aic->jiffies = now;
aic->eq_prev = pbe_eq->cq_count;
if (eqd != aic->prev_eqd) {
@@ -5416,53 +5300,242 @@ static void be_eqd_update(struct beiscsi_hba *phba)
num++;
}
}
- if (num) {
- tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
- if (tag)
- beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
+ if (num)
+ /* completion of this is ignored */
+ beiscsi_modify_eq_delay(phba, set_eqd, num);
+
+ schedule_delayed_work(&phba->eqd_update,
+ msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
+}
+
+static void beiscsi_msix_enable(struct beiscsi_hba *phba)
+{
+ int i, status;
+
+ for (i = 0; i <= phba->num_cpus; i++)
+ phba->msix_entries[i].entry = i;
+
+ status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
+ phba->num_cpus + 1, phba->num_cpus + 1);
+ if (status > 0)
+ phba->msix_enabled = true;
+}
+
+static void beiscsi_hw_tpe_check(unsigned long ptr)
+{
+ struct beiscsi_hba *phba;
+ u32 wait;
+
+ phba = (struct beiscsi_hba *)ptr;
+ /* if not TPE, do nothing */
+ if (!beiscsi_detect_tpe(phba))
+ return;
+
+ /* wait default 4000ms before recovering */
+ wait = 4000;
+ if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL)
+ wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL;
+ queue_delayed_work(phba->wq, &phba->recover_port,
+ msecs_to_jiffies(wait));
+}
+
+static void beiscsi_hw_health_check(unsigned long ptr)
+{
+ struct beiscsi_hba *phba;
+
+ phba = (struct beiscsi_hba *)ptr;
+ beiscsi_detect_ue(phba);
+ if (beiscsi_detect_ue(phba)) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BM_%d : port in error: %lx\n", phba->state);
+ /* sessions are no longer valid, so first fail the sessions */
+ queue_work(phba->wq, &phba->sess_work);
+
+ /* detect UER supported */
+ if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
+ return;
+ /* modify this timer to check TPE */
+ phba->hw_check.function = beiscsi_hw_tpe_check;
}
+
+ mod_timer(&phba->hw_check,
+ jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
}
-static void be_check_boot_session(struct beiscsi_hba *phba)
+/*
+ * beiscsi_enable_port()- Enables the disabled port.
+ * Only port resources freed in disable function are reallocated.
+ * This is called in HBA error handling path.
+ *
+ * @phba: Instance of driver private structure
+ *
+ **/
+static int beiscsi_enable_port(struct beiscsi_hba *phba)
{
- if (beiscsi_setup_boot_info(phba))
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Could not set up "
- "iSCSI boot info on async event.\n");
+ struct hwi_context_memory *phwi_context;
+ struct hwi_controller *phwi_ctrlr;
+ struct be_eq_obj *pbe_eq;
+ int ret, i;
+
+ if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BM_%d : %s : port is online %lx\n",
+ __func__, phba->state);
+ return 0;
+ }
+
+ ret = beiscsi_init_sliport(phba);
+ if (ret)
+ return ret;
+
+ if (enable_msix)
+ find_num_cpus(phba);
+ else
+ phba->num_cpus = 1;
+ if (enable_msix) {
+ beiscsi_msix_enable(phba);
+ if (!phba->msix_enabled)
+ phba->num_cpus = 1;
+ }
+
+ beiscsi_get_params(phba);
+ /* Re-enable UER. If different TPE occurs then it is recoverable. */
+ beiscsi_set_uer_feature(phba);
+
+ phba->shost->max_id = phba->params.cxns_per_ctrl;
+ phba->shost->can_queue = phba->params.ios_per_ctrl;
+ ret = hwi_init_controller(phba);
+ if (ret) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BM_%d : init controller failed %d\n", ret);
+ goto disable_msix;
+ }
+
+ for (i = 0; i < MAX_MCC_CMD; i++) {
+ init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
+ phba->ctrl.mcc_tag[i] = i + 1;
+ phba->ctrl.mcc_tag_status[i + 1] = 0;
+ phba->ctrl.mcc_tag_available++;
+ }
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
+ }
+
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
+
+ ret = beiscsi_init_irqs(phba);
+ if (ret < 0) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BM_%d : setup IRQs failed %d\n", ret);
+ goto cleanup_port;
+ }
+ hwi_enable_intr(phba);
+ /* port operational: clear all error bits */
+ set_bit(BEISCSI_HBA_ONLINE, &phba->state);
+ __beiscsi_log(phba, KERN_INFO,
+ "BM_%d : port online: 0x%lx\n", phba->state);
+
+ /* start hw_check timer and eqd_update work */
+ schedule_delayed_work(&phba->eqd_update,
+ msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
+
+ /**
+ * Timer function gets modified for TPE detection.
+ * Always reinit to do health check first.
+ */
+ phba->hw_check.function = beiscsi_hw_health_check;
+ mod_timer(&phba->hw_check,
+ jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
+ return 0;
+
+cleanup_port:
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ irq_poll_disable(&pbe_eq->iopoll);
+ }
+ hwi_cleanup_port(phba);
+
+disable_msix:
+ if (phba->msix_enabled)
+ pci_disable_msix(phba->pcidev);
+
+ return ret;
}
/*
- * beiscsi_hw_health_check()- Check adapter health
- * @work: work item to check HW health
+ * beiscsi_disable_port()- Disable port and cleanup driver resources.
+ * This is called in HBA error handling and driver removal.
+ * @phba: Instance Priv structure
+ * @unload: indicate driver is unloading
*
- * Check if adapter in an unrecoverable state or not.
+ * Free the OS and HW resources held by the driver
**/
-static void
-beiscsi_hw_health_check(struct work_struct *work)
+static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
{
- struct beiscsi_hba *phba =
- container_of(work, struct beiscsi_hba,
- beiscsi_hw_check_task.work);
+ struct hwi_context_memory *phwi_context;
+ struct hwi_controller *phwi_ctrlr;
+ struct be_eq_obj *pbe_eq;
+ unsigned int i, msix_vec;
- be_eqd_update(phba);
+ if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state))
+ return;
- if (phba->state & BE_ADAPTER_CHECK_BOOT) {
- if ((phba->get_boot > 0) && (!phba->boot_kset)) {
- phba->get_boot--;
- if (!(phba->get_boot % BE_GET_BOOT_TO))
- be_check_boot_session(phba);
- } else {
- phba->state &= ~BE_ADAPTER_CHECK_BOOT;
- phba->get_boot = 0;
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ hwi_disable_intr(phba);
+ if (phba->msix_enabled) {
+ for (i = 0; i <= phba->num_cpus; i++) {
+ msix_vec = phba->msix_entries[i].vector;
+ free_irq(msix_vec, &phwi_context->be_eq[i]);
+ kfree(phba->msi_name[i]);
}
+ } else
+ if (phba->pcidev->irq)
+ free_irq(phba->pcidev->irq, phba);
+ pci_disable_msix(phba->pcidev);
+
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ irq_poll_disable(&pbe_eq->iopoll);
}
+ cancel_delayed_work_sync(&phba->eqd_update);
+ cancel_work_sync(&phba->boot_work);
+ /* WQ might be running cancel queued mcc_work if we are not exiting */
+ if (!unload && beiscsi_hba_in_error(phba)) {
+ pbe_eq = &phwi_context->be_eq[i];
+ cancel_work_sync(&pbe_eq->mcc_work);
+ }
+ hwi_cleanup_port(phba);
+}
- beiscsi_ue_detect(phba);
+static void beiscsi_sess_work(struct work_struct *work)
+{
+ struct beiscsi_hba *phba;
- schedule_delayed_work(&phba->beiscsi_hw_check_task,
- msecs_to_jiffies(1000));
+ phba = container_of(work, struct beiscsi_hba, sess_work);
+ /*
+ * This work gets scheduled only in case of HBA error.
+ * Old sessions are gone so need to be re-established.
+ * iscsi_session_failure needs process context hence this work.
+ */
+ iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
}
+static void beiscsi_recover_port(struct work_struct *work)
+{
+ struct beiscsi_hba *phba;
+
+ phba = container_of(work, struct beiscsi_hba, recover_port.work);
+ beiscsi_disable_port(phba, 0);
+ beiscsi_enable_port(phba);
+}
static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
@@ -5470,12 +5543,18 @@ static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
struct beiscsi_hba *phba = NULL;
phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
- phba->state |= BE_ADAPTER_PCI_ERR;
+ set_bit(BEISCSI_HBA_PCI_ERR, &phba->state);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : EEH error detected\n");
- beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
+ /* first stop UE detection when PCI error detected */
+ del_timer_sync(&phba->hw_check);
+ cancel_delayed_work_sync(&phba->recover_port);
+
+ /* sessions are no longer valid, so first fail the sessions */
+ iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
+ beiscsi_disable_port(phba, 0);
if (state == pci_channel_io_perm_failure) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5515,9 +5594,8 @@ static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- /* Wait for the CHIP Reset to complete */
- status = be_chk_reset_complete(phba);
- if (!status) {
+ status = beiscsi_check_fw_rdy(phba);
+ if (status) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
"BM_%d : EEH Reset Completed\n");
} else {
@@ -5532,87 +5610,16 @@ static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
static void beiscsi_eeh_resume(struct pci_dev *pdev)
{
- int ret = 0, i;
- struct be_eq_obj *pbe_eq;
- struct beiscsi_hba *phba = NULL;
- struct hwi_controller *phwi_ctrlr;
- struct hwi_context_memory *phwi_context;
+ struct beiscsi_hba *phba;
+ int ret;
phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
pci_save_state(pdev);
- if (enable_msix)
- find_num_cpus(phba);
- else
- phba->num_cpus = 1;
-
- if (enable_msix) {
- beiscsi_msix_enable(phba);
- if (!phba->msix_enabled)
- phba->num_cpus = 1;
- }
-
- ret = beiscsi_cmd_reset_function(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Reset Failed\n");
- goto ret_err;
- }
-
- ret = be_chk_reset_complete(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to get out of reset.\n");
- goto ret_err;
- }
-
- beiscsi_get_params(phba);
- phba->shost->max_id = phba->params.cxns_per_ctrl;
- phba->shost->can_queue = phba->params.ios_per_ctrl;
- ret = hwi_init_controller(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_eeh_resume -"
- "Failed to initialize beiscsi_hba.\n");
- goto ret_err;
- }
-
- for (i = 0; i < MAX_MCC_CMD; i++) {
- init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
- phba->ctrl.mcc_tag[i] = i + 1;
- phba->ctrl.mcc_tag_status[i + 1] = 0;
- phba->ctrl.mcc_tag_available++;
- }
-
- phwi_ctrlr = phba->phwi_ctrlr;
- phwi_context = phwi_ctrlr->phwi_ctxt;
-
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
- be_iopoll);
- }
-
- i = (phba->msix_enabled) ? i : 0;
- /* Work item for MCC handling */
- pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
-
- ret = beiscsi_init_irqs(phba);
- if (ret < 0) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_eeh_resume - "
- "Failed to beiscsi_init_irqs\n");
- goto ret_err;
- }
-
- hwi_enable_intr(phba);
- phba->state &= ~BE_ADAPTER_PCI_ERR;
-
- return;
-ret_err:
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : AER EEH Resume Failed\n");
+ ret = beiscsi_enable_port(phba);
+ if (ret)
+ __beiscsi_log(phba, KERN_ERR,
+ "BM_%d : AER EEH resume failed\n");
}
static int beiscsi_dev_probe(struct pci_dev *pcidev,
@@ -5622,7 +5629,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct be_eq_obj *pbe_eq;
- int ret = 0, i;
+ unsigned int s_handle;
+ int ret, i;
ret = beiscsi_enable_pci(pcidev);
if (ret < 0) {
@@ -5635,6 +5643,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
if (!phba) {
dev_err(&pcidev->dev,
"beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
+ ret = -ENOMEM;
goto disable_pci;
}
@@ -5650,10 +5659,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
/* Initialize Driver configuration Paramters */
beiscsi_hba_attrs_init(phba);
- phba->fw_timeout = false;
phba->mac_addr_set = false;
-
switch (pcidev->device) {
case BE_DEVICE_ID1:
case OC_DEVICE_ID1:
@@ -5677,39 +5684,26 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
ret = be_ctrl_init(phba, pcidev);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : beiscsi_dev_probe-"
- "Failed in be_ctrl_init\n");
+ "BM_%d : be_ctrl_init failed\n");
goto hba_free;
}
- /*
- * FUNCTION_RESET should clean up any stale info in FW for this fn
- */
- ret = beiscsi_cmd_reset_function(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Reset Failed\n");
- goto hba_free;
- }
- ret = be_chk_reset_complete(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to get out of reset.\n");
+ ret = beiscsi_init_sliport(phba);
+ if (ret)
goto hba_free;
- }
spin_lock_init(&phba->io_sgl_lock);
spin_lock_init(&phba->mgmt_sgl_lock);
- spin_lock_init(&phba->isr_lock);
spin_lock_init(&phba->async_pdu_lock);
- ret = mgmt_get_fw_config(&phba->ctrl, phba);
+ ret = beiscsi_get_fw_config(&phba->ctrl, phba);
if (ret != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Error getting fw config\n");
goto free_port;
}
- mgmt_get_port_name(&phba->ctrl, phba);
+ beiscsi_get_port_name(&phba->ctrl, phba);
beiscsi_get_params(phba);
+ beiscsi_set_uer_feature(phba);
if (enable_msix)
find_num_cpus(phba);
@@ -5754,25 +5748,24 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_dev_probe-"
"Failed to allocate work queue\n");
+ ret = -ENOMEM;
goto free_twq;
}
- INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
- beiscsi_hw_health_check);
+ INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work);
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
- irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
- be_iopoll);
+ irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
}
i = (phba->msix_enabled) ? i : 0;
/* Work item for MCC handling */
pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+ INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
ret = beiscsi_init_irqs(phba);
if (ret < 0) {
@@ -5783,22 +5776,42 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
}
hwi_enable_intr(phba);
- if (iscsi_host_add(phba->shost, &phba->pcidev->dev))
+ ret = iscsi_host_add(phba->shost, &phba->pcidev->dev);
+ if (ret)
goto free_blkenbld;
- if (beiscsi_setup_boot_info(phba))
- /*
- * log error but continue, because we may not be using
- * iscsi boot.
+ /* set online bit after port is operational */
+ set_bit(BEISCSI_HBA_ONLINE, &phba->state);
+ __beiscsi_log(phba, KERN_INFO,
+ "BM_%d : port online: 0x%lx\n", phba->state);
+
+ INIT_WORK(&phba->boot_work, beiscsi_boot_work);
+ ret = beiscsi_boot_get_shandle(phba, &s_handle);
+ if (ret > 0) {
+ beiscsi_start_boot_work(phba, s_handle);
+ /**
+ * Set this bit after starting the work to let
+ * probe handle it first.
+ * ASYNC event can too schedule this work.
*/
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Could not set up "
- "iSCSI boot info.\n");
+ set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state);
+ }
- beiscsi_create_def_ifaces(phba);
- schedule_delayed_work(&phba->beiscsi_hw_check_task,
- msecs_to_jiffies(1000));
+ beiscsi_iface_create_default(phba);
+ schedule_delayed_work(&phba->eqd_update,
+ msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
+ INIT_WORK(&phba->sess_work, beiscsi_sess_work);
+ INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port);
+ /**
+ * Start UE detection here. UE before this will cause stall in probe
+ * and eventually fail the probe.
+ */
+ init_timer(&phba->hw_check);
+ phba->hw_check.function = beiscsi_hw_health_check;
+ phba->hw_check.data = (unsigned long)phba;
+ mod_timer(&phba->hw_check,
+ jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
return 0;
@@ -5810,7 +5823,8 @@ free_blkenbld:
irq_poll_disable(&pbe_eq->iopoll);
}
free_twq:
- beiscsi_clean_port(phba);
+ hwi_cleanup_port(phba);
+ beiscsi_cleanup_port(phba);
beiscsi_free_mem(phba);
free_port:
pci_free_consistent(phba->pcidev,
@@ -5830,6 +5844,49 @@ disable_pci:
return ret;
}
+static void beiscsi_remove(struct pci_dev *pcidev)
+{
+ struct beiscsi_hba *phba = NULL;
+
+ phba = pci_get_drvdata(pcidev);
+ if (!phba) {
+ dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
+ return;
+ }
+
+ /* first stop UE detection before unloading */
+ del_timer_sync(&phba->hw_check);
+ cancel_delayed_work_sync(&phba->recover_port);
+ cancel_work_sync(&phba->sess_work);
+
+ beiscsi_iface_destroy_default(phba);
+ iscsi_host_remove(phba->shost);
+ beiscsi_disable_port(phba, 1);
+
+ /* after cancelling boot_work */
+ iscsi_boot_destroy_kset(phba->boot_struct.boot_kset);
+
+ /* free all resources */
+ destroy_workqueue(phba->wq);
+ beiscsi_cleanup_port(phba);
+ beiscsi_free_mem(phba);
+
+ /* ctrl uninit */
+ beiscsi_unmap_pci_function(phba);
+ pci_free_consistent(phba->pcidev,
+ phba->ctrl.mbox_mem_alloced.size,
+ phba->ctrl.mbox_mem_alloced.va,
+ phba->ctrl.mbox_mem_alloced.dma);
+
+ pci_dev_put(phba->pcidev);
+ iscsi_host_free(phba->shost);
+ pci_disable_pcie_error_reporting(pcidev);
+ pci_set_drvdata(pcidev, NULL);
+ pci_release_regions(pcidev);
+ pci_disable_device(pcidev);
+}
+
+
static struct pci_error_handlers beiscsi_eeh_handlers = {
.error_detected = beiscsi_eeh_err_detected,
.slot_reset = beiscsi_eeh_reset,
@@ -5846,9 +5903,9 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.create_conn = beiscsi_conn_create,
.bind_conn = beiscsi_conn_bind,
.destroy_conn = iscsi_conn_teardown,
- .attr_is_visible = be2iscsi_attr_is_visible,
- .set_iface_param = be2iscsi_iface_set_param,
- .get_iface_param = be2iscsi_iface_get_param,
+ .attr_is_visible = beiscsi_attr_is_visible,
+ .set_iface_param = beiscsi_iface_set_param,
+ .get_iface_param = beiscsi_iface_get_param,
.set_param = beiscsi_set_param,
.get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
@@ -5877,7 +5934,6 @@ static struct pci_driver beiscsi_pci_driver = {
.err_handler = &beiscsi_eeh_handlers
};
-
static int __init beiscsi_module_init(void)
{
int ret;
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 30a4606d9a3b..6376657e45f7 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
*
* Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
*
* Emulex
* 3333 Susan Street
@@ -36,7 +36,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "11.0.0.0"
+#define BUILD_STR "11.2.0.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -82,36 +82,12 @@
#define BEISCSI_MAX_FRAGS_INIT 192
#define BE_NUM_MSIX_ENTRIES 1
-#define MPU_EP_CONTROL 0
-#define MPU_EP_SEMAPHORE 0xac
-#define BE2_SOFT_RESET 0x5c
-#define BE2_PCI_ONLINE0 0xb0
-#define BE2_PCI_ONLINE1 0xb4
-#define BE2_SET_RESET 0x80
-#define BE2_MPU_IRAM_ONLINE 0x00000080
-
#define BE_SENSE_INFO_SIZE 258
#define BE_ISCSI_PDU_HEADER_SIZE 64
#define BE_MIN_MEM_SIZE 16384
#define MAX_CMD_SZ 65536
#define IIOC_SCSI_DATA 0x05 /* Write Operation */
-#define INVALID_SESS_HANDLE 0xFFFFFFFF
-
-/**
- * Adapter States
- **/
-#define BE_ADAPTER_LINK_UP 0x001
-#define BE_ADAPTER_LINK_DOWN 0x002
-#define BE_ADAPTER_PCI_ERR 0x004
-#define BE_ADAPTER_CHECK_BOOT 0x008
-
-
-#define BEISCSI_CLEAN_UNLOAD 0x01
-#define BEISCSI_EEH_UNLOAD 0x02
-
-#define BE_GET_BOOT_RETRIES 45
-#define BE_GET_BOOT_TO 20
/**
* hardware needs the async PDU buffers to be posted in multiples of 8
* So have atleast 8 of them by default
@@ -378,7 +354,6 @@ struct beiscsi_hba {
struct sgl_handle **eh_sgl_hndl_base;
spinlock_t io_sgl_lock;
spinlock_t mgmt_sgl_lock;
- spinlock_t isr_lock;
spinlock_t async_pdu_lock;
unsigned int age;
struct list_head hba_queue;
@@ -390,7 +365,6 @@ struct beiscsi_hba {
struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];
struct iscsi_endpoint **ep_array;
struct beiscsi_conn **conn_table;
- struct iscsi_boot_kset *boot_kset;
struct Scsi_Host *shost;
struct iscsi_iface *ipv4_iface;
struct iscsi_iface *ipv6_iface;
@@ -418,12 +392,33 @@ struct beiscsi_hba {
unsigned long ulp_supported;
} fw_config;
- unsigned int state;
+ unsigned long state;
+#define BEISCSI_HBA_ONLINE 0
+#define BEISCSI_HBA_LINK_UP 1
+#define BEISCSI_HBA_BOOT_FOUND 2
+#define BEISCSI_HBA_BOOT_WORK 3
+#define BEISCSI_HBA_UER_SUPP 4
+#define BEISCSI_HBA_PCI_ERR 5
+#define BEISCSI_HBA_FW_TIMEOUT 6
+#define BEISCSI_HBA_IN_UE 7
+#define BEISCSI_HBA_IN_TPE 8
+
+/* error bits */
+#define BEISCSI_HBA_IN_ERR ((1 << BEISCSI_HBA_PCI_ERR) | \
+ (1 << BEISCSI_HBA_FW_TIMEOUT) | \
+ (1 << BEISCSI_HBA_IN_UE) | \
+ (1 << BEISCSI_HBA_IN_TPE))
+
u8 optic_state;
- int get_boot;
- bool fw_timeout;
- bool ue_detected;
- struct delayed_work beiscsi_hw_check_task;
+ struct delayed_work eqd_update;
+ /* update EQ delay timer every 1000ms */
+#define BEISCSI_EQD_UPDATE_INTERVAL 1000
+ struct timer_list hw_check;
+ /* check for UE every 1000ms */
+#define BEISCSI_UE_DETECT_INTERVAL 1000
+ u32 ue2rp;
+ struct delayed_work recover_port;
+ struct work_struct sess_work;
bool mac_addr_set;
u8 mac_address[ETH_ALEN];
@@ -435,7 +430,6 @@ struct beiscsi_hba {
struct be_ctrl_info ctrl;
unsigned int generation;
unsigned int interface_handle;
- struct mgmt_session_info boot_sess;
struct invalidate_command_table inv_tbl[128];
struct be_aic_obj aic_obj[MAX_CPUS];
@@ -444,8 +438,29 @@ struct beiscsi_hba {
struct scatterlist *sg,
uint32_t num_sg, uint32_t xferlen,
uint32_t writedir);
+ struct boot_struct {
+ int retry;
+ unsigned int tag;
+ unsigned int s_handle;
+ struct be_dma_mem nonemb_cmd;
+ enum {
+ BEISCSI_BOOT_REOPEN_SESS = 1,
+ BEISCSI_BOOT_GET_SHANDLE,
+ BEISCSI_BOOT_GET_SINFO,
+ BEISCSI_BOOT_LOGOUT_SESS,
+ BEISCSI_BOOT_CREATE_KSET,
+ } action;
+ struct mgmt_session_info boot_sess;
+ struct iscsi_boot_kset *boot_kset;
+ } boot_struct;
+ struct work_struct boot_work;
};
+#define beiscsi_hba_in_error(phba) ((phba)->state & BEISCSI_HBA_IN_ERR)
+#define beiscsi_hba_is_online(phba) \
+ (!beiscsi_hba_in_error((phba)) && \
+ test_bit(BEISCSI_HBA_ONLINE, &phba->state))
+
struct beiscsi_session {
struct pci_pool *bhs_pool;
};
@@ -508,6 +523,7 @@ struct beiscsi_io_task {
struct sgl_handle *psgl_handle;
struct beiscsi_conn *conn;
struct scsi_cmnd *scsi_cmnd;
+ int num_sg;
struct hwi_wrb_context *pwrb_context;
unsigned int cmd_sn;
unsigned int flags;
@@ -592,80 +608,81 @@ struct amap_beiscsi_offload_params {
u8 max_recv_data_segment_length[32];
};
-/* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
- struct beiscsi_hba *phba, struct sol_cqe *psol);*/
-
-struct async_pdu_handle {
+struct hd_async_handle {
struct list_head link;
struct be_bus_address pa;
void *pbuffer;
- unsigned int consumed;
- unsigned char index;
- unsigned char is_header;
- unsigned short cri;
- unsigned long buffer_len;
+ u32 buffer_len;
+ u16 index;
+ u16 cri;
+ u8 is_header;
+ u8 is_final;
};
-struct hwi_async_entry {
- struct {
- unsigned char hdr_received;
- unsigned char hdr_len;
- unsigned short bytes_received;
+/**
+ * This has list of async PDUs that are waiting to be processed.
+ * Buffers live in this list for a brief duration before they get
+ * processed and posted back to hardware.
+ * Note that we don't really need one cri_wait_queue per async_entry.
+ * We need one cri_wait_queue per CRI. Its easier to manage if this
+ * is tagged along with the async_entry.
+ */
+struct hd_async_entry {
+ struct cri_wait_queue {
+ unsigned short hdr_len;
+ unsigned int bytes_received;
unsigned int bytes_needed;
struct list_head list;
- } wait_queue;
-
- struct list_head header_busy_list;
- struct list_head data_busy_list;
+ } wq;
+ /* handles posted to FW resides here */
+ struct hd_async_handle *header;
+ struct hd_async_handle *data;
};
-struct hwi_async_pdu_context {
- struct {
- struct be_bus_address pa_base;
- void *va_base;
- void *ring_base;
- struct async_pdu_handle *handle_base;
-
- unsigned int host_write_ptr;
- unsigned int ep_read_ptr;
- unsigned int writables;
-
- unsigned int free_entries;
- unsigned int busy_entries;
-
- struct list_head free_list;
- } async_header;
+struct hd_async_buf_context {
+ struct be_bus_address pa_base;
+ void *va_base;
+ void *ring_base;
+ struct hd_async_handle *handle_base;
+ u16 free_entries;
+ u32 buffer_size;
+ /**
+ * Once iSCSI layer finishes processing an async PDU, the
+ * handles used for the PDU are added to this list.
+ * They are posted back to FW in groups of 8.
+ */
+ struct list_head free_list;
+};
- struct {
- struct be_bus_address pa_base;
- void *va_base;
- void *ring_base;
- struct async_pdu_handle *handle_base;
-
- unsigned int host_write_ptr;
- unsigned int ep_read_ptr;
- unsigned int writables;
-
- unsigned int free_entries;
- unsigned int busy_entries;
- struct list_head free_list;
- } async_data;
-
- unsigned int buffer_size;
- unsigned int num_entries;
+/**
+ * hd_async_context is declared for each ULP supporting iSCSI function.
+ */
+struct hd_async_context {
+ struct hd_async_buf_context async_header;
+ struct hd_async_buf_context async_data;
+ u16 num_entries;
+ /**
+ * When unsol PDU is in, it needs to be chained till all the bytes are
+ * received and then processing is done. hd_async_entry is created
+ * based on the cid_count for each ULP. When unsol PDU comes in based
+ * on the conn_id it needs to be added to the correct async_entry wq.
+ * Below defined cid_to_async_cri_map is used to reterive the
+ * async_cri_map for a particular connection.
+ *
+ * This array is initialized after beiscsi_create_wrb_rings returns.
+ *
+ * - this method takes more memory space, fixed to 2K
+ * - any support for connections greater than this the array size needs
+ * to be incremented
+ */
#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid])
unsigned short cid_to_async_cri_map[BE_MAX_SESSION];
/**
- * This is a varying size list! Do not add anything
- * after this entry!!
+ * This is a variable size array. Don`t add anything after this field!!
*/
- struct hwi_async_entry *async_entry;
+ struct hd_async_entry *async_entry;
};
-#define PDUCQE_CODE_MASK 0x0000003F
-#define PDUCQE_DPL_MASK 0xFFFF0000
-#define PDUCQE_INDEX_MASK 0x0000FFFF
-
struct i_t_dpdu_cqe {
u32 dw[4];
} __packed;
@@ -845,7 +862,6 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
-void beiscsi_process_all_cqs(struct work_struct *work);
void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
struct iscsi_task *task);
@@ -856,11 +872,6 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget);
void beiscsi_process_mcc_cq(struct beiscsi_hba *phba);
-static inline bool beiscsi_error(struct beiscsi_hba *phba)
-{
- return phba->ue_detected || phba->fw_timeout;
-}
-
struct pdu_nop_out {
u32 dw[12];
};
@@ -1067,11 +1078,18 @@ struct hwi_context_memory {
struct be_queue_info be_cq[MAX_CPUS - 1];
struct be_queue_info *be_wrbq;
+ /**
+ * Create array of ULP number for below entries as DEFQ
+ * will be created for both ULP if iSCSI Protocol is
+ * loaded on both ULP.
+ */
struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT];
struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT];
- struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT];
+ struct hd_async_context *pasync_ctx[BEISCSI_ULP_COUNT];
};
+void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle);
+
/* Logging related definitions */
#define BEISCSI_LOG_INIT 0x0001 /* Initialization events */
#define BEISCSI_LOG_MBOX 0x0002 /* Mailbox Events */
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 83926e221f1e..aebc4ddb3060 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
*
* Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
*
* Emulex
* 3333 Susan Street
@@ -24,139 +24,9 @@
#include "be_iscsi.h"
#include "be_main.h"
-/* UE Status Low CSR */
-static const char * const desc_ue_status_low[] = {
- "CEV",
- "CTX",
- "DBUF",
- "ERX",
- "Host",
- "MPU",
- "NDMA",
- "PTC ",
- "RDMA ",
- "RXF ",
- "RXIPS ",
- "RXULP0 ",
- "RXULP1 ",
- "RXULP2 ",
- "TIM ",
- "TPOST ",
- "TPRE ",
- "TXIPS ",
- "TXULP0 ",
- "TXULP1 ",
- "UC ",
- "WDMA ",
- "TXULP2 ",
- "HOST1 ",
- "P0_OB_LINK ",
- "P1_OB_LINK ",
- "HOST_GPIO ",
- "MBOX ",
- "AXGMAC0",
- "AXGMAC1",
- "JTAG",
- "MPU_INTPEND"
-};
-
-/* UE Status High CSR */
-static const char * const desc_ue_status_hi[] = {
- "LPCMEMHOST",
- "MGMT_MAC",
- "PCS0ONLINE",
- "MPU_IRAM",
- "PCS1ONLINE",
- "PCTL0",
- "PCTL1",
- "PMEM",
- "RR",
- "TXPB",
- "RXPP",
- "XAUI",
- "TXP",
- "ARM",
- "IPC",
- "HOST2",
- "HOST3",
- "HOST4",
- "HOST5",
- "HOST6",
- "HOST7",
- "HOST8",
- "HOST9",
- "NETC",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown"
-};
-
-/*
- * beiscsi_ue_detec()- Detect Unrecoverable Error on adapter
- * @phba: Driver priv structure
- *
- * Read registers linked to UE and check for the UE status
- **/
-void beiscsi_ue_detect(struct beiscsi_hba *phba)
-{
- uint32_t ue_hi = 0, ue_lo = 0;
- uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
- uint8_t i = 0;
-
- if (phba->ue_detected)
- return;
-
- pci_read_config_dword(phba->pcidev,
- PCICFG_UE_STATUS_LOW, &ue_lo);
- pci_read_config_dword(phba->pcidev,
- PCICFG_UE_STATUS_MASK_LOW,
- &ue_mask_lo);
- pci_read_config_dword(phba->pcidev,
- PCICFG_UE_STATUS_HIGH,
- &ue_hi);
- pci_read_config_dword(phba->pcidev,
- PCICFG_UE_STATUS_MASK_HI,
- &ue_mask_hi);
-
- ue_lo = (ue_lo & ~ue_mask_lo);
- ue_hi = (ue_hi & ~ue_mask_hi);
-
-
- if (ue_lo || ue_hi) {
- phba->ue_detected = true;
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BG_%d : Error detected on the adapter\n");
- }
-
- if (ue_lo) {
- for (i = 0; ue_lo; ue_lo >>= 1, i++) {
- if (ue_lo & 1)
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG,
- "BG_%d : UE_LOW %s bit set\n",
- desc_ue_status_low[i]);
- }
- }
-
- if (ue_hi) {
- for (i = 0; ue_hi; ue_hi >>= 1, i++) {
- if (ue_hi & 1)
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG,
- "BG_%d : UE_HIGH %s bit set\n",
- desc_ue_status_hi[i]);
- }
- }
-}
-
-int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
- struct be_set_eqd *set_eqd, int num)
+int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
+ struct be_set_eqd *set_eqd,
+ int num)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
@@ -174,7 +44,7 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
req->num_eq = cpu_to_le32(num);
for (i = 0; i < num; i++) {
@@ -184,386 +54,13 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
cpu_to_le32(set_eqd[i].delay_multiplier);
}
+ /* ignore the completion of this mbox command */
+ set_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state);
be_mcc_notify(phba, tag);
mutex_unlock(&ctrl->mbox_lock);
return tag;
}
-/**
- * mgmt_reopen_session()- Reopen a session based on reopen_type
- * @phba: Device priv structure instance
- * @reopen_type: Type of reopen_session FW should do.
- * @sess_handle: Session Handle of the session to be re-opened
- *
- * return
- * the TAG used for MBOX Command
- *
- **/
-unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
- unsigned int reopen_type,
- unsigned int sess_handle)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- struct be_cmd_reopen_session_req *req;
- unsigned int tag;
-
- beiscsi_log(phba, KERN_INFO,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BG_%d : In bescsi_get_boot_target\n");
-
- mutex_lock(&ctrl->mbox_lock);
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return 0;
- }
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
- OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
- sizeof(struct be_cmd_reopen_session_resp));
-
- /* set the reopen_type,sess_handle */
- req->reopen_type = reopen_type;
- req->session_handle = sess_handle;
-
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
- return tag;
-}
-
-unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- struct be_cmd_get_boot_target_req *req;
- unsigned int tag;
-
- beiscsi_log(phba, KERN_INFO,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BG_%d : In bescsi_get_boot_target\n");
-
- mutex_lock(&ctrl->mbox_lock);
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return 0;
- }
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
- OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
- sizeof(struct be_cmd_get_boot_target_resp));
-
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
- return tag;
-}
-
-unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
- u32 boot_session_handle,
- struct be_dma_mem *nonemb_cmd)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- unsigned int tag;
- struct be_cmd_get_session_req *req;
- struct be_cmd_get_session_resp *resp;
- struct be_sge *sge;
-
- beiscsi_log(phba, KERN_INFO,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BG_%d : In beiscsi_get_session_info\n");
-
- mutex_lock(&ctrl->mbox_lock);
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return 0;
- }
-
- nonemb_cmd->size = sizeof(*resp);
- req = nonemb_cmd->va;
- memset(req, 0, sizeof(*req));
- sge = nonembedded_sgl(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
- OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
- sizeof(*resp));
- req->session_handle = boot_session_handle;
- sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
- sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(nonemb_cmd->size);
-
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
- return tag;
-}
-
-/**
- * mgmt_get_port_name()- Get port name for the function
- * @ctrl: ptr to Ctrl Info
- * @phba: ptr to the dev priv structure
- *
- * Get the alphanumeric character for port
- *
- **/
-int mgmt_get_port_name(struct be_ctrl_info *ctrl,
- struct beiscsi_hba *phba)
-{
- int ret = 0;
- struct be_mcc_wrb *wrb;
- struct be_cmd_get_port_name *ioctl;
-
- mutex_lock(&ctrl->mbox_lock);
- wrb = wrb_from_mbox(&ctrl->mbox_mem);
- memset(wrb, 0, sizeof(*wrb));
- ioctl = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
- be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_PORT_NAME,
- EMBED_MBX_MAX_PAYLOAD_SIZE);
- ret = be_mbox_notify(ctrl);
- phba->port_name = 0;
- if (!ret) {
- phba->port_name = ioctl->p.resp.port_names >>
- (phba->fw_config.phys_port * 8) & 0xff;
- } else {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
- ret, ioctl->h.resp_hdr.status);
- }
-
- if (phba->port_name == 0)
- phba->port_name = '?';
-
- mutex_unlock(&ctrl->mbox_lock);
- return ret;
-}
-
-/**
- * mgmt_get_fw_config()- Get the FW config for the function
- * @ctrl: ptr to Ctrl Info
- * @phba: ptr to the dev priv structure
- *
- * Get the FW config and resources available for the function.
- * The resources are created based on the count received here.
- *
- * return
- * Success: 0
- * Failure: Non-Zero Value
- **/
-int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
- struct beiscsi_hba *phba)
-{
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
- uint32_t cid_count, icd_count;
- int status = -EINVAL;
- uint8_t ulp_num = 0;
-
- mutex_lock(&ctrl->mbox_lock);
- memset(wrb, 0, sizeof(*wrb));
- be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
-
- be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
- EMBED_MBX_MAX_PAYLOAD_SIZE);
-
- if (be_mbox_notify(ctrl)) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : Failed in mgmt_get_fw_config\n");
- goto fail_init;
- }
-
- /* FW response formats depend on port id */
- phba->fw_config.phys_port = pfw_cfg->phys_port;
- if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : invalid physical port id %d\n",
- phba->fw_config.phys_port);
- goto fail_init;
- }
-
- /* populate and check FW config against min and max values */
- if (!is_chip_be2_be3r(phba)) {
- phba->fw_config.eqid_count = pfw_cfg->eqid_count;
- phba->fw_config.cqid_count = pfw_cfg->cqid_count;
- if (phba->fw_config.eqid_count == 0 ||
- phba->fw_config.eqid_count > 2048) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : invalid EQ count %d\n",
- phba->fw_config.eqid_count);
- goto fail_init;
- }
- if (phba->fw_config.cqid_count == 0 ||
- phba->fw_config.cqid_count > 4096) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : invalid CQ count %d\n",
- phba->fw_config.cqid_count);
- goto fail_init;
- }
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BG_%d : EQ_Count : %d CQ_Count : %d\n",
- phba->fw_config.eqid_count,
- phba->fw_config.cqid_count);
- }
-
- /**
- * Check on which all ULP iSCSI Protocol is loaded.
- * Set the Bit for those ULP. This set flag is used
- * at all places in the code to check on which ULP
- * iSCSi Protocol is loaded
- **/
- for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
- if (pfw_cfg->ulp[ulp_num].ulp_mode &
- BEISCSI_ULP_ISCSI_INI_MODE) {
- set_bit(ulp_num, &phba->fw_config.ulp_supported);
-
- /* Get the CID, ICD and Chain count for each ULP */
- phba->fw_config.iscsi_cid_start[ulp_num] =
- pfw_cfg->ulp[ulp_num].sq_base;
- phba->fw_config.iscsi_cid_count[ulp_num] =
- pfw_cfg->ulp[ulp_num].sq_count;
-
- phba->fw_config.iscsi_icd_start[ulp_num] =
- pfw_cfg->ulp[ulp_num].icd_base;
- phba->fw_config.iscsi_icd_count[ulp_num] =
- pfw_cfg->ulp[ulp_num].icd_count;
-
- phba->fw_config.iscsi_chain_start[ulp_num] =
- pfw_cfg->chain_icd[ulp_num].chain_base;
- phba->fw_config.iscsi_chain_count[ulp_num] =
- pfw_cfg->chain_icd[ulp_num].chain_count;
-
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BG_%d : Function loaded on ULP : %d\n"
- "\tiscsi_cid_count : %d\n"
- "\tiscsi_cid_start : %d\n"
- "\t iscsi_icd_count : %d\n"
- "\t iscsi_icd_start : %d\n",
- ulp_num,
- phba->fw_config.
- iscsi_cid_count[ulp_num],
- phba->fw_config.
- iscsi_cid_start[ulp_num],
- phba->fw_config.
- iscsi_icd_count[ulp_num],
- phba->fw_config.
- iscsi_icd_start[ulp_num]);
- }
- }
-
- if (phba->fw_config.ulp_supported == 0) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
- pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
- pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
- goto fail_init;
- }
-
- /**
- * ICD is shared among ULPs. Use icd_count of any one loaded ULP
- **/
- for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
- if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
- break;
- icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
- if (icd_count == 0 || icd_count > 65536) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d: invalid ICD count %d\n", icd_count);
- goto fail_init;
- }
-
- cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
- BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
- if (cid_count == 0 || cid_count > 4096) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d: invalid CID count %d\n", cid_count);
- goto fail_init;
- }
-
- /**
- * Check FW is dual ULP aware i.e. can handle either
- * of the protocols.
- */
- phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
- BEISCSI_FUNC_DUA_MODE);
-
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BG_%d : DUA Mode : 0x%x\n",
- phba->fw_config.dual_ulp_aware);
-
- /* all set, continue using this FW config */
- status = 0;
-fail_init:
- mutex_unlock(&ctrl->mbox_lock);
- return status;
-}
-
-int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
- struct beiscsi_hba *phba)
-{
- struct be_dma_mem nonemb_cmd;
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_mgmt_controller_attributes *req;
- struct be_sge *sge = nonembedded_sgl(wrb);
- int status = 0;
-
- nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
- sizeof(struct be_mgmt_controller_attributes),
- &nonemb_cmd.dma);
- if (nonemb_cmd.va == NULL) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : Failed to allocate memory for "
- "mgmt_check_supported_fw\n");
- return -ENOMEM;
- }
- nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
- req = nonemb_cmd.va;
- memset(req, 0, sizeof(*req));
- mutex_lock(&ctrl->mbox_lock);
- memset(wrb, 0, sizeof(*wrb));
- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
- sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
- sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(nonemb_cmd.size);
- status = be_mbox_notify(ctrl);
- if (!status) {
- struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BG_%d : Firmware Version of CMD : %s\n"
- "Firmware Version is : %s\n"
- "Developer Build, not performing version check...\n",
- resp->params.hba_attribs
- .flashrom_version_string,
- resp->params.hba_attribs.
- firmware_version_string);
-
- phba->fw_config.iscsi_features =
- resp->params.hba_attribs.iscsi_features;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : phba->fw_config.iscsi_features = %d\n",
- phba->fw_config.iscsi_features);
- memcpy(phba->fw_ver_str, resp->params.hba_attribs.
- firmware_version_string, BEISCSI_VER_STRLEN);
- } else
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : Failed in mgmt_check_supported_fw\n");
- mutex_unlock(&ctrl->mbox_lock);
- if (nonemb_cmd.va)
- pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
-
- return status;
-}
-
unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba,
struct bsg_job *job,
@@ -609,7 +106,7 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
mutex_unlock(&ctrl->mbox_lock);
- return -ENOSYS;
+ return -EPERM;
}
wrb = alloc_mcc_wrb(phba, &tag);
@@ -631,48 +128,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
return tag;
}
-/**
- * mgmt_epfw_cleanup()- Inform FW to cleanup data structures.
- * @phba: pointer to dev priv structure
- * @ulp_num: ULP number.
- *
- * return
- * Success: 0
- * Failure: Non-Zero Value
- **/
-int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- struct iscsi_cleanup_req *req;
- unsigned int tag;
- int status;
-
- mutex_lock(&ctrl->mbox_lock);
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return -EBUSY;
- }
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
- OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
-
- req->chute = (1 << ulp_num);
- req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
- req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
-
- be_mcc_notify(phba, tag);
- status = be_mcc_compl_poll(phba, tag);
- if (status)
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BG_%d : mgmt_epfw_cleanup , FAILED\n");
- mutex_unlock(&ctrl->mbox_lock);
- return status;
-}
-
unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
struct invalidate_command_table *inv_tbl,
unsigned int num_invalidate, unsigned int cid,
@@ -844,7 +299,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
nonemb_cmd->size);
if (dst_addr->sa_family == PF_INET) {
__be32 s_addr = daddr_in->sin_addr.s_addr;
- req->ip_address.ip_type = BE2_IPV4;
+ req->ip_address.ip_type = BEISCSI_IP_TYPE_V4;
req->ip_address.addr[0] = s_addr & 0x000000ff;
req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8;
req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16;
@@ -852,17 +307,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
req->tcp_port = ntohs(daddr_in->sin_port);
beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
- beiscsi_ep->ip_type = BE2_IPV4;
+ beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V4;
} else {
/* else its PF_INET6 family */
- req->ip_address.ip_type = BE2_IPV6;
+ req->ip_address.ip_type = BEISCSI_IP_TYPE_V6;
memcpy(&req->ip_address.addr,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
req->tcp_port = ntohs(daddr_in6->sin6_port);
beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
memcpy(&beiscsi_ep->dst6_addr,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
- beiscsi_ep->ip_type = BE2_IPV6;
+ beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V6;
}
req->cid = cid;
i = phba->nxt_cqid++;
@@ -883,7 +338,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
if (!is_chip_be2_be3r(phba)) {
req->hdr.version = MBX_CMD_VER1;
- req->tcp_window_size = 0;
+ req->tcp_window_size = 0x8000;
req->tcp_window_scale_count = 2;
}
@@ -892,44 +347,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
return tag;
}
-unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- struct be_cmd_get_all_if_id_req *req;
- struct be_cmd_get_all_if_id_req *pbe_allid;
- unsigned int tag;
- int status = 0;
-
- if (mutex_lock_interruptible(&ctrl->mbox_lock))
- return -EINTR;
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return -ENOMEM;
- }
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
- OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
- sizeof(*req));
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
-
- status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
- if (status) {
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
- "BG_%d : Failed in mgmt_get_all_if_id\n");
- return -EBUSY;
- }
-
- pbe_allid = embedded_payload(wrb);
- phba->interface_handle = pbe_allid->if_hndl_list[0];
-
- return status;
-}
-
/*
* mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd
* @phba: Driver priv structure
@@ -1001,72 +418,68 @@ static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
}
cmd->size = size;
be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BG_%d : subsystem iSCSI cmd %d size %d\n",
+ iscsi_cmd, size);
return 0;
}
-static int
-mgmt_static_ip_modify(struct beiscsi_hba *phba,
- struct be_cmd_get_if_info_resp *if_info,
- struct iscsi_iface_param_info *ip_param,
- struct iscsi_iface_param_info *subnet_param,
- uint32_t ip_action)
+unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba)
{
- struct be_cmd_set_ip_addr_req *req;
- struct be_dma_mem nonemb_cmd;
- uint32_t ip_type;
- int rc;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_get_all_if_id_req *req;
+ struct be_cmd_get_all_if_id_req *pbe_allid;
+ unsigned int tag;
+ int status = 0;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
- sizeof(*req));
- if (rc)
- return rc;
+ if (mutex_lock_interruptible(&ctrl->mbox_lock))
+ return -EINTR;
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return -ENOMEM;
+ }
- ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
- BE2_IPV6 : BE2_IPV4 ;
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
+ sizeof(*req));
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
- req = nonemb_cmd.va;
- req->ip_params.record_entry_count = 1;
- req->ip_params.ip_record.action = ip_action;
- req->ip_params.ip_record.interface_hndl =
- phba->interface_handle;
- req->ip_params.ip_record.ip_addr.size_of_structure =
- sizeof(struct be_ip_addr_subnet_format);
- req->ip_params.ip_record.ip_addr.ip_type = ip_type;
+ status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
+ if (status) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : %s failed: %d\n", __func__, status);
+ return -EBUSY;
+ }
- if (ip_action == IP_ACTION_ADD) {
- memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
- sizeof(req->ip_params.ip_record.ip_addr.addr));
+ pbe_allid = embedded_payload(wrb);
+ /* we now support only one interface per function */
+ phba->interface_handle = pbe_allid->if_hndl_list[0];
- if (subnet_param)
- memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
- subnet_param->value,
- sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
- } else {
- memcpy(req->ip_params.ip_record.ip_addr.addr,
- if_info->ip_addr.addr,
- sizeof(req->ip_params.ip_record.ip_addr.addr));
+ return status;
+}
- memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
- if_info->ip_addr.subnet_mask,
- sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
- }
+static inline bool beiscsi_if_zero_ip(u8 *ip, u32 ip_type)
+{
+ u32 len;
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
- if (rc < 0)
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
- "BG_%d : Failed to Modify existing IP Address\n");
- return rc;
+ len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN;
+ while (len && !ip[len - 1])
+ len--;
+ return (len == 0);
}
-static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
- uint32_t gtway_action, uint32_t param_len)
+static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
+ u32 action, u32 ip_type, u8 *gw)
{
struct be_cmd_set_def_gateway_req *req;
struct be_dma_mem nonemb_cmd;
int rt_val;
-
rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
sizeof(*req));
@@ -1074,200 +487,300 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
return rt_val;
req = nonemb_cmd.va;
- req->action = gtway_action;
- req->ip_addr.ip_type = BE2_IPV4;
+ req->action = action;
+ req->ip_addr.ip_type = ip_type;
+ memcpy(req->ip_addr.addr, gw,
+ (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+}
- memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
+int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
+{
+ struct be_cmd_get_def_gateway_resp gw_resp;
+ int rt_val;
- return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ memset(&gw_resp, 0, sizeof(gw_resp));
+ rt_val = beiscsi_if_get_gw(phba, ip_type, &gw_resp);
+ if (rt_val) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Get Gateway Addr\n");
+ return rt_val;
+ }
+
+ if (!beiscsi_if_zero_ip(gw_resp.ip_addr.addr, ip_type)) {
+ rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_DEL, ip_type,
+ gw_resp.ip_addr.addr);
+ if (rt_val) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to clear Gateway Addr Set\n");
+ return rt_val;
+ }
+ }
+
+ rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_ADD, ip_type, gw);
+ if (rt_val)
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Set Gateway Addr\n");
+
+ return rt_val;
}
-int mgmt_set_ip(struct beiscsi_hba *phba,
- struct iscsi_iface_param_info *ip_param,
- struct iscsi_iface_param_info *subnet_param,
- uint32_t boot_proto)
+int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
+ struct be_cmd_get_def_gateway_resp *resp)
{
- struct be_cmd_get_def_gateway_resp gtway_addr_set;
- struct be_cmd_get_if_info_resp *if_info;
- struct be_cmd_set_dhcp_req *dhcpreq;
- struct be_cmd_rel_dhcp_req *reldhcp;
+ struct be_cmd_get_def_gateway_req *req;
struct be_dma_mem nonemb_cmd;
- uint8_t *gtway_addr;
- uint32_t ip_type;
int rc;
- rc = mgmt_get_all_if_id(phba);
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
+ sizeof(*resp));
if (rc)
return rc;
- ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
- BE2_IPV6 : BE2_IPV4 ;
+ req = nonemb_cmd.va;
+ req->ip_type = ip_type;
+
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, resp,
+ sizeof(*resp));
+}
+
+static int
+beiscsi_if_clr_ip(struct beiscsi_hba *phba,
+ struct be_cmd_get_if_info_resp *if_info)
+{
+ struct be_cmd_set_ip_addr_req *req;
+ struct be_dma_mem nonemb_cmd;
+ int rc;
- rc = mgmt_get_if_info(phba, ip_type, &if_info);
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+ sizeof(*req));
if (rc)
return rc;
- if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
- if (if_info->dhcp_state) {
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
- "BG_%d : DHCP Already Enabled\n");
- goto exit;
- }
- /* The ip_param->len is 1 in DHCP case. Setting
- proper IP len as this it is used while
- freeing the Static IP.
- */
- ip_param->len = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
- IP_V6_LEN : IP_V4_LEN;
-
- } else {
- if (if_info->dhcp_state) {
+ req = nonemb_cmd.va;
+ req->ip_params.record_entry_count = 1;
+ req->ip_params.ip_record.action = IP_ACTION_DEL;
+ req->ip_params.ip_record.interface_hndl =
+ phba->interface_handle;
+ req->ip_params.ip_record.ip_addr.size_of_structure =
+ sizeof(struct be_ip_addr_subnet_format);
+ req->ip_params.ip_record.ip_addr.ip_type = if_info->ip_addr.ip_type;
+ memcpy(req->ip_params.ip_record.ip_addr.addr,
+ if_info->ip_addr.addr,
+ sizeof(if_info->ip_addr.addr));
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+ if_info->ip_addr.subnet_mask,
+ sizeof(if_info->ip_addr.subnet_mask));
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ if (rc < 0 || req->ip_params.ip_record.status) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BG_%d : failed to clear IP: rc %d status %d\n",
+ rc, req->ip_params.ip_record.status);
+ }
+ return rc;
+}
- memset(if_info, 0, sizeof(*if_info));
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
- sizeof(*reldhcp));
+static int
+beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
+ u8 *subnet, u32 ip_type)
+{
+ struct be_cmd_set_ip_addr_req *req;
+ struct be_dma_mem nonemb_cmd;
+ uint32_t ip_len;
+ int rc;
- if (rc)
- goto exit;
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+ sizeof(*req));
+ if (rc)
+ return rc;
- reldhcp = nonemb_cmd.va;
- reldhcp->interface_hndl = phba->interface_handle;
- reldhcp->ip_type = ip_type;
+ req = nonemb_cmd.va;
+ req->ip_params.record_entry_count = 1;
+ req->ip_params.ip_record.action = IP_ACTION_ADD;
+ req->ip_params.ip_record.interface_hndl =
+ phba->interface_handle;
+ req->ip_params.ip_record.ip_addr.size_of_structure =
+ sizeof(struct be_ip_addr_subnet_format);
+ req->ip_params.ip_record.ip_addr.ip_type = ip_type;
+ ip_len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN;
+ memcpy(req->ip_params.ip_record.ip_addr.addr, ip, ip_len);
+ if (subnet)
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+ subnet, ip_len);
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
- if (rc < 0) {
- beiscsi_log(phba, KERN_WARNING,
- BEISCSI_LOG_CONFIG,
- "BG_%d : Failed to Delete existing dhcp\n");
- goto exit;
- }
- }
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ /**
+ * In some cases, host needs to look into individual record status
+ * even though FW reported success for that IOCTL.
+ */
+ if (rc < 0 || req->ip_params.ip_record.status) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BG_%d : failed to set IP: rc %d status %d\n",
+ rc, req->ip_params.ip_record.status);
+ if (req->ip_params.ip_record.status)
+ rc = -EINVAL;
}
+ return rc;
+}
- /* Delete the Static IP Set */
- if (if_info->ip_addr.addr[0]) {
- rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL,
- IP_ACTION_DEL);
+int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
+ u8 *ip, u8 *subnet)
+{
+ struct be_cmd_get_if_info_resp *if_info;
+ struct be_cmd_rel_dhcp_req *reldhcp;
+ struct be_dma_mem nonemb_cmd;
+ int rc;
+
+ rc = beiscsi_if_get_info(phba, ip_type, &if_info);
+ if (rc)
+ return rc;
+
+ if (if_info->dhcp_state) {
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
+ sizeof(*reldhcp));
if (rc)
goto exit;
- }
- /* Delete the Gateway settings if mode change is to DHCP */
- if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
- memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
- rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
- if (rc) {
+ reldhcp = nonemb_cmd.va;
+ reldhcp->interface_hndl = phba->interface_handle;
+ reldhcp->ip_type = ip_type;
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ if (rc < 0) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
- "BG_%d : Failed to Get Gateway Addr\n");
+ "BG_%d : failed to release existing DHCP: %d\n",
+ rc);
goto exit;
}
-
- if (gtway_addr_set.ip_addr.addr[0]) {
- gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
- rc = mgmt_modify_gateway(phba, gtway_addr,
- IP_ACTION_DEL, IP_V4_LEN);
-
- if (rc) {
- beiscsi_log(phba, KERN_WARNING,
- BEISCSI_LOG_CONFIG,
- "BG_%d : Failed to clear Gateway Addr Set\n");
- goto exit;
- }
- }
}
- /* Set Adapter to DHCP/Static Mode */
- if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
- sizeof(*dhcpreq));
+ /* first delete any IP set */
+ if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) {
+ rc = beiscsi_if_clr_ip(phba, if_info);
if (rc)
goto exit;
-
- dhcpreq = nonemb_cmd.va;
- dhcpreq->flags = BLOCKING;
- dhcpreq->retry_count = 1;
- dhcpreq->interface_hndl = phba->interface_handle;
- dhcpreq->ip_type = BE2_DHCP_V4;
-
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
- } else {
- rc = mgmt_static_ip_modify(phba, if_info, ip_param,
- subnet_param, IP_ACTION_ADD);
}
+ /* if ip == NULL then this is called just to release DHCP IP */
+ if (ip)
+ rc = beiscsi_if_set_ip(phba, ip, subnet, ip_type);
exit:
kfree(if_info);
return rc;
}
-int mgmt_set_gateway(struct beiscsi_hba *phba,
- struct iscsi_iface_param_info *gateway_param)
+int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
{
- struct be_cmd_get_def_gateway_resp gtway_addr_set;
- uint8_t *gtway_addr;
- int rt_val;
+ struct be_cmd_get_def_gateway_resp gw_resp;
+ struct be_cmd_get_if_info_resp *if_info;
+ struct be_cmd_set_dhcp_req *dhcpreq;
+ struct be_dma_mem nonemb_cmd;
+ u8 *gw;
+ int rc;
- memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
- rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
- if (rt_val) {
+ rc = beiscsi_if_get_info(phba, ip_type, &if_info);
+ if (rc)
+ return rc;
+
+ if (if_info->dhcp_state) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
- "BG_%d : Failed to Get Gateway Addr\n");
- return rt_val;
+ "BG_%d : DHCP Already Enabled\n");
+ goto exit;
}
- if (gtway_addr_set.ip_addr.addr[0]) {
- gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
- rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL,
- gateway_param->len);
- if (rt_val) {
+ /* first delete any IP set */
+ if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) {
+ rc = beiscsi_if_clr_ip(phba, if_info);
+ if (rc)
+ goto exit;
+ }
+
+ /* delete gateway settings if mode change is to DHCP */
+ memset(&gw_resp, 0, sizeof(gw_resp));
+ /* use ip_type provided in if_info */
+ rc = beiscsi_if_get_gw(phba, if_info->ip_addr.ip_type, &gw_resp);
+ if (rc) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BG_%d : Failed to Get Gateway Addr\n");
+ goto exit;
+ }
+ gw = (u8 *)&gw_resp.ip_addr.addr;
+ if (!beiscsi_if_zero_ip(gw, if_info->ip_addr.ip_type)) {
+ rc = beiscsi_if_mod_gw(phba, IP_ACTION_DEL,
+ if_info->ip_addr.ip_type, gw);
+ if (rc) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to clear Gateway Addr Set\n");
- return rt_val;
+ goto exit;
}
}
- gtway_addr = (uint8_t *)&gateway_param->value;
- rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_ADD,
- gateway_param->len);
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
+ sizeof(*dhcpreq));
+ if (rc)
+ goto exit;
- if (rt_val)
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
- "BG_%d : Failed to Set Gateway Addr\n");
+ dhcpreq = nonemb_cmd.va;
+ dhcpreq->flags = 1; /* 1 - blocking; 0 - non-blocking */
+ dhcpreq->retry_count = 1;
+ dhcpreq->interface_hndl = phba->interface_handle;
+ dhcpreq->ip_type = ip_type;
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
- return rt_val;
+exit:
+ kfree(if_info);
+ return rc;
}
-int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
- struct be_cmd_get_def_gateway_resp *gateway)
+/**
+ * beiscsi_if_set_vlan()- Issue and wait for CMD completion
+ * @phba: device private structure instance
+ * @vlan_tag: VLAN tag
+ *
+ * Issue the MBX Cmd and wait for the completion of the
+ * command.
+ *
+ * returns
+ * Success: 0
+ * Failure: Non-Xero Value
+ **/
+int beiscsi_if_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag)
{
- struct be_cmd_get_def_gateway_req *req;
- struct be_dma_mem nonemb_cmd;
int rc;
+ unsigned int tag;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
- sizeof(*gateway));
- if (rc)
- return rc;
-
- req = nonemb_cmd.va;
- req->ip_type = ip_type;
+ tag = be_cmd_set_vlan(phba, vlan_tag);
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR,
+ (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+ "BG_%d : VLAN Setting Failed\n");
+ return -EBUSY;
+ }
- return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, gateway,
- sizeof(*gateway));
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
+ if (rc) {
+ beiscsi_log(phba, KERN_ERR,
+ (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+ "BS_%d : VLAN MBX Cmd Failed\n");
+ return rc;
+ }
+ return rc;
}
-int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
- struct be_cmd_get_if_info_resp **if_info)
+
+int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
+ struct be_cmd_get_if_info_resp **if_info)
{
struct be_cmd_get_if_info_req *req;
struct be_dma_mem nonemb_cmd;
uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
int rc;
- rc = mgmt_get_all_if_id(phba);
+ rc = beiscsi_if_get_handle(phba);
if (rc)
return rc;
@@ -1364,123 +877,317 @@ unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
return tag;
}
+static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
+ unsigned int tag)
+{
+ struct be_cmd_get_boot_target_resp *boot_resp;
+ struct be_cmd_resp_logout_fw_sess *logo_resp;
+ struct be_cmd_get_session_resp *sess_resp;
+ struct be_mcc_wrb *wrb;
+ struct boot_struct *bs;
+ int boot_work, status;
+
+ if (!test_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BG_%d : %s no boot work %lx\n",
+ __func__, phba->state);
+ return;
+ }
+
+ if (phba->boot_struct.tag != tag) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BG_%d : %s tag mismatch %d:%d\n",
+ __func__, tag, phba->boot_struct.tag);
+ return;
+ }
+ bs = &phba->boot_struct;
+ boot_work = 1;
+ status = 0;
+ switch (bs->action) {
+ case BEISCSI_BOOT_REOPEN_SESS:
+ status = __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
+ if (!status)
+ bs->action = BEISCSI_BOOT_GET_SHANDLE;
+ else
+ bs->retry--;
+ break;
+ case BEISCSI_BOOT_GET_SHANDLE:
+ status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL);
+ if (!status) {
+ boot_resp = embedded_payload(wrb);
+ bs->s_handle = boot_resp->boot_session_handle;
+ }
+ if (bs->s_handle == BE_BOOT_INVALID_SHANDLE) {
+ bs->action = BEISCSI_BOOT_REOPEN_SESS;
+ bs->retry--;
+ } else {
+ bs->action = BEISCSI_BOOT_GET_SINFO;
+ }
+ break;
+ case BEISCSI_BOOT_GET_SINFO:
+ status = __beiscsi_mcc_compl_status(phba, tag, NULL,
+ &bs->nonemb_cmd);
+ if (!status) {
+ sess_resp = bs->nonemb_cmd.va;
+ memcpy(&bs->boot_sess, &sess_resp->session_info,
+ sizeof(struct mgmt_session_info));
+ bs->action = BEISCSI_BOOT_LOGOUT_SESS;
+ } else {
+ __beiscsi_log(phba, KERN_ERR,
+ "BG_%d : get boot session info error : 0x%x\n",
+ status);
+ boot_work = 0;
+ }
+ pci_free_consistent(phba->ctrl.pdev, bs->nonemb_cmd.size,
+ bs->nonemb_cmd.va, bs->nonemb_cmd.dma);
+ bs->nonemb_cmd.va = NULL;
+ break;
+ case BEISCSI_BOOT_LOGOUT_SESS:
+ status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL);
+ if (!status) {
+ logo_resp = embedded_payload(wrb);
+ if (logo_resp->session_status != BE_SESS_STATUS_CLOSE) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BG_%d : FW boot session logout error : 0x%x\n",
+ logo_resp->session_status);
+ }
+ }
+ /* continue to create boot_kset even if logout failed? */
+ bs->action = BEISCSI_BOOT_CREATE_KSET;
+ break;
+ default:
+ break;
+ }
+
+ /* clear the tag so no other completion matches this tag */
+ bs->tag = 0;
+ if (!bs->retry) {
+ boot_work = 0;
+ __beiscsi_log(phba, KERN_ERR,
+ "BG_%d : failed to setup boot target: status %d action %d\n",
+ status, bs->action);
+ }
+ if (!boot_work) {
+ /* wait for next event to start boot_work */
+ clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
+ return;
+ }
+ schedule_work(&phba->boot_work);
+}
+
/**
- * be_mgmt_get_boot_shandle()- Get the session handle
- * @phba: device priv structure instance
- * @s_handle: session handle returned for boot session.
+ * beiscsi_boot_logout_sess()- Logout from boot FW session
+ * @phba: Device priv structure instance
*
- * Get the boot target session handle. In case of
- * crashdump mode driver has to issue and MBX Cmd
- * for FW to login to boot target
+ * return
+ * the TAG used for MBOX Command
+ *
+ */
+unsigned int beiscsi_boot_logout_sess(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_logout_fw_sess *req;
+ unsigned int tag;
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
+ }
+
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
+ sizeof(struct be_cmd_req_logout_fw_sess));
+ /* Use the session handle copied into boot_sess */
+ req->session_handle = phba->boot_struct.boot_sess.session_handle;
+
+ phba->boot_struct.tag = tag;
+ set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+ ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
+
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
+
+ return tag;
+}
+/**
+ * beiscsi_boot_reopen_sess()- Reopen boot session
+ * @phba: Device priv structure instance
*
* return
- * Success: 0
- * Failure: Non-Zero value
+ * the TAG used for MBOX Command
*
**/
-int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
- unsigned int *s_handle)
+unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba)
{
- struct be_cmd_get_boot_target_resp *boot_resp;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
+ struct be_cmd_reopen_session_req *req;
unsigned int tag;
- uint8_t boot_retry = 3;
- int rc;
- do {
- /* Get the Boot Target Session Handle and Count*/
- tag = mgmt_get_boot_target(phba);
- if (!tag) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
- "BG_%d : Getting Boot Target Info Failed\n");
- return -EAGAIN;
- }
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
+ }
- rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
- if (rc) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : MBX CMD get_boot_target Failed\n");
- return -EBUSY;
- }
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
+ sizeof(struct be_cmd_reopen_session_resp));
+ req->reopen_type = BE_REOPEN_BOOT_SESSIONS;
+ req->session_handle = BE_BOOT_INVALID_SHANDLE;
- boot_resp = embedded_payload(wrb);
+ phba->boot_struct.tag = tag;
+ set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+ ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
- /* Check if the there are any Boot targets configured */
- if (!boot_resp->boot_session_count) {
- beiscsi_log(phba, KERN_INFO,
- BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d ;No boot targets configured\n");
- return -ENXIO;
- }
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
+ return tag;
+}
- /* FW returns the session handle of the boot session */
- if (boot_resp->boot_session_handle != INVALID_SESS_HANDLE) {
- *s_handle = boot_resp->boot_session_handle;
- return 0;
- }
- /* Issue MBX Cmd to FW to login to the boot target */
- tag = mgmt_reopen_session(phba, BE_REOPEN_BOOT_SESSIONS,
- INVALID_SESS_HANDLE);
- if (!tag) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : mgmt_reopen_session Failed\n");
- return -EAGAIN;
- }
+/**
+ * beiscsi_boot_get_sinfo()- Get boot session info
+ * @phba: device priv structure instance
+ *
+ * Fetches the boot_struct.s_handle info from FW.
+ * return
+ * the TAG used for MBOX Command
+ *
+ **/
+unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_cmd_get_session_resp *resp;
+ struct be_cmd_get_session_req *req;
+ struct be_dma_mem *nonemb_cmd;
+ struct be_mcc_wrb *wrb;
+ struct be_sge *sge;
+ unsigned int tag;
- rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
- if (rc) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : mgmt_reopen_session Failed");
- return rc;
- }
- } while (--boot_retry);
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
+ }
- /* Couldn't log into the boot target */
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : Login to Boot Target Failed\n");
- return -ENXIO;
+ nonemb_cmd = &phba->boot_struct.nonemb_cmd;
+ nonemb_cmd->size = sizeof(*resp);
+ nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
+ sizeof(nonemb_cmd->size),
+ &nonemb_cmd->dma);
+ if (!nonemb_cmd->va) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
+ }
+
+ req = nonemb_cmd->va;
+ memset(req, 0, sizeof(*req));
+ sge = nonembedded_sgl(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
+ sizeof(*resp));
+ req->session_handle = phba->boot_struct.s_handle;
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ phba->boot_struct.tag = tag;
+ set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+ ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
+
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+unsigned int __beiscsi_boot_get_shandle(struct beiscsi_hba *phba, int async)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_get_boot_target_req *req;
+ unsigned int tag;
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
+ }
+
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
+ sizeof(struct be_cmd_get_boot_target_resp));
+
+ if (async) {
+ phba->boot_struct.tag = tag;
+ set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+ ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
+ }
+
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
+ return tag;
}
/**
- * mgmt_set_vlan()- Issue and wait for CMD completion
- * @phba: device private structure instance
- * @vlan_tag: VLAN tag
+ * beiscsi_boot_get_shandle()- Get boot session handle
+ * @phba: device priv structure instance
+ * @s_handle: session handle returned for boot session.
*
- * Issue the MBX Cmd and wait for the completion of the
- * command.
+ * return
+ * Success: 1
+ * Failure: negative
*
- * returns
- * Success: 0
- * Failure: Non-Xero Value
**/
-int mgmt_set_vlan(struct beiscsi_hba *phba,
- uint16_t vlan_tag)
+int beiscsi_boot_get_shandle(struct beiscsi_hba *phba, unsigned int *s_handle)
{
- int rc;
+ struct be_cmd_get_boot_target_resp *boot_resp;
+ struct be_mcc_wrb *wrb;
unsigned int tag;
+ int rc;
- tag = be_cmd_set_vlan(phba, vlan_tag);
+ *s_handle = BE_BOOT_INVALID_SHANDLE;
+ /* get configured boot session count and handle */
+ tag = __beiscsi_boot_get_shandle(phba, 0);
if (!tag) {
beiscsi_log(phba, KERN_ERR,
- (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
- "BG_%d : VLAN Setting Failed\n");
- return -EBUSY;
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+ "BG_%d : Getting Boot Target Info Failed\n");
+ return -EAGAIN;
}
- rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
+ rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
- (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
- "BS_%d : VLAN MBX Cmd Failed\n");
- return rc;
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : MBX CMD get_boot_target Failed\n");
+ return -EBUSY;
}
- return rc;
+
+ boot_resp = embedded_payload(wrb);
+ /* check if there are any boot targets configured */
+ if (!boot_resp->boot_session_count) {
+ __beiscsi_log(phba, KERN_INFO,
+ "BG_%d : No boot targets configured\n");
+ return -ENXIO;
+ }
+
+ /* only if FW has logged in to the boot target, s_handle is valid */
+ *s_handle = boot_resp->boot_session_handle;
+ return 1;
}
/**
@@ -1645,7 +1352,6 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
{
struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
- memset(pwrb, 0, sizeof(*pwrb));
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
max_send_data_segment_length, pwrb,
params->dw[offsetof(struct amap_beiscsi_offload_params,
@@ -1717,8 +1423,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
{
struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
- memset(pwrb, 0, sizeof(*pwrb));
-
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
max_burst_length, pwrb, params->dw[offsetof
(struct amap_beiscsi_offload_params,
@@ -1790,70 +1494,3 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
(params->dw[offsetof(struct amap_beiscsi_offload_params,
exp_statsn) / 32] + 1));
}
-
-/**
- * beiscsi_logout_fw_sess()- Firmware Session Logout
- * @phba: Device priv structure instance
- * @fw_sess_handle: FW session handle
- *
- * Logout from the FW established sessions.
- * returns
- * Success: 0
- * Failure: Non-Zero Value
- *
- */
-int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
- uint32_t fw_sess_handle)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_logout_fw_sess *req;
- struct be_cmd_resp_logout_fw_sess *resp;
- unsigned int tag;
- int rc;
-
- beiscsi_log(phba, KERN_INFO,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BG_%d : In bescsi_logout_fwboot_sess\n");
-
- mutex_lock(&ctrl->mbox_lock);
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- beiscsi_log(phba, KERN_INFO,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BG_%d : MBX Tag Failure\n");
- return -EINVAL;
- }
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
- OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
- sizeof(struct be_cmd_req_logout_fw_sess));
-
- /* Set the session handle */
- req->session_handle = fw_sess_handle;
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
-
- rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
- if (rc) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : MBX CMD FW_SESSION_LOGOUT_TARGET Failed\n");
- return -EBUSY;
- }
-
- resp = embedded_payload(wrb);
- if (resp->session_status !=
- BEISCSI_MGMT_SESSION_CLOSE) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : FW_SESSION_LOGOUT_TARGET resp : 0x%x\n",
- resp->session_status);
- rc = -EINVAL;
- }
-
- return rc;
-}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index f3a48a04b2ca..b897cfd57c72 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
*
* Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
*
* Emulex
* 3333 Susan Street
@@ -96,7 +96,6 @@ struct mcc_wrb {
struct mcc_wrb_payload payload;
};
-int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute);
int mgmt_open_connection(struct beiscsi_hba *phba,
struct sockaddr *dst_addr,
struct beiscsi_endpoint *beiscsi_ep,
@@ -266,50 +265,41 @@ struct beiscsi_endpoint {
u16 cid_vld;
};
-int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
- struct beiscsi_hba *phba);
-int mgmt_get_port_name(struct be_ctrl_info *ctrl,
- struct beiscsi_hba *phba);
-
unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
struct beiscsi_endpoint *beiscsi_ep,
unsigned short cid,
unsigned short issue_reset,
unsigned short savecfg_flag);
-int mgmt_set_ip(struct beiscsi_hba *phba,
- struct iscsi_iface_param_info *ip_param,
- struct iscsi_iface_param_info *subnet_param,
- uint32_t boot_proto);
+int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type);
-unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba);
+int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
+ u8 *ip, u8 *subnet);
-unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
- unsigned int reopen_type,
- unsigned sess_handle);
+int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw);
-unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
- u32 boot_session_handle,
- struct be_dma_mem *nonemb_cmd);
+int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
+ struct be_cmd_get_def_gateway_resp *resp);
int mgmt_get_nic_conf(struct beiscsi_hba *phba,
struct be_cmd_get_nic_conf_resp *mac);
-int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
- struct be_cmd_get_if_info_resp **if_info);
+int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
+ struct be_cmd_get_if_info_resp **if_info);
+
+unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba);
+
+int beiscsi_if_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
-int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
- struct be_cmd_get_def_gateway_resp *gateway);
+unsigned int beiscsi_boot_logout_sess(struct beiscsi_hba *phba);
-int mgmt_set_gateway(struct beiscsi_hba *phba,
- struct iscsi_iface_param_info *gateway_param);
+unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba);
-int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
- unsigned int *s_handle);
+unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba);
-unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba);
+unsigned int __beiscsi_boot_get_shandle(struct beiscsi_hba *phba, int async);
-int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
+int beiscsi_boot_get_shandle(struct beiscsi_hba *phba, unsigned int *s_handle);
ssize_t beiscsi_drvr_ver_disp(struct device *dev,
struct device_attribute *attr, char *buf);
@@ -339,7 +329,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
struct wrb_handle *pwrb_handle,
struct hwi_wrb_context *pwrb_context);
-void beiscsi_ue_detect(struct beiscsi_hba *phba);
int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
struct be_set_eqd *, int num);
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 7733ad5305d4..4ddda72f60e6 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5827,13 +5827,13 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
bfa_port_speed_t max_speed = 0;
struct bfa_port_attr_s port_attr;
bfa_port_speed_t port_speed, rport_speed;
- bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
-
+ bfa_boolean_t trl_enabled;
if (port == NULL)
return 0;
fcs = port->fcs;
+ trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
/* Get Physical port's current speed */
bfa_fcport_get_attr(port->fcs->bfa, &port_attr);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 5beea776b9f5..68ca518d34b0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -254,7 +254,7 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
return rc;
}
-void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
+static void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
{
struct bnx2fc_mp_req *mp_req;
struct fc_frame_header *fc_hdr, *fh;
@@ -364,7 +364,7 @@ srr_compl_done:
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
}
-void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
+static void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
{
struct bnx2fc_cmd *orig_io_req, *new_io_req;
struct bnx2fc_cmd *rec_req;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index a5052dd8d7e6..f9ddb6156f14 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -625,7 +625,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
*
* @arg: ptr to bnx2fc_percpu_info structure
*/
-int bnx2fc_percpu_io_thread(void *arg)
+static int bnx2fc_percpu_io_thread(void *arg)
{
struct bnx2fc_percpu_s *p = arg;
struct bnx2fc_work *work, *tmp;
@@ -1410,9 +1410,10 @@ bind_err:
return NULL;
}
-struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
- struct net_device *netdev,
- enum fip_state fip_mode)
+static struct bnx2fc_interface *
+bnx2fc_interface_create(struct bnx2fc_hba *hba,
+ struct net_device *netdev,
+ enum fip_state fip_mode)
{
struct fcoe_ctlr_device *ctlr_dev;
struct bnx2fc_interface *interface;
@@ -2765,8 +2766,7 @@ static void __exit bnx2fc_mod_exit(void)
* held.
*/
mutex_lock(&bnx2fc_dev_lock);
- list_splice(&adapter_list, &to_be_deleted);
- INIT_LIST_HEAD(&adapter_list);
+ list_splice_init(&adapter_list, &to_be_deleted);
adapter_count = 0;
mutex_unlock(&bnx2fc_dev_lock);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 28c671b609b2..5ff9f89c17c7 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -994,7 +994,7 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
}
-struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
{
struct bnx2fc_work *work;
work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 8f24d60f09d7..f501095f91ac 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1079,7 +1079,7 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
}
-int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
+static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
{
struct bnx2fc_rport *tgt = io_req->tgt;
int rc = SUCCESS;
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index c2a6f9f29427..89a52b941ea8 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1721,7 +1721,7 @@ out:
/* Wake up waiting threads */
csio_scsi_cmnd(req) = NULL;
- complete_all(&req->cmplobj);
+ complete(&req->cmplobj);
}
/*
@@ -1945,6 +1945,7 @@ csio_eh_abort_handler(struct scsi_cmnd *cmnd)
ready = csio_is_lnode_ready(ln);
tmo = CSIO_SCSI_ABRT_TMO_MS;
+ reinit_completion(&ioreq->cmplobj);
spin_lock_irq(&hw->lock);
rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
spin_unlock_irq(&hw->lock);
@@ -1964,8 +1965,6 @@ csio_eh_abort_handler(struct scsi_cmnd *cmnd)
goto inval_scmnd;
}
- /* Wait for completion */
- init_completion(&ioreq->cmplobj);
wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
/* FW didnt respond to abort within our timeout */
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index e4ba2d2616cd..7c0d7af0d3b7 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -84,6 +84,9 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *);
static const struct cxgb4_uld_info cxgb4i_uld_info = {
.name = DRV_MODULE_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .rxq_size = 1024,
+ .lro = false,
.add = t4_uld_add,
.rx_handler = t4_uld_rx_handler,
.state_change = t4_uld_state_change,
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 661bb94e2548..b301655f91cd 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -823,17 +823,6 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
}
/**
- * cxlflash_shutdown() - shutdown handler
- * @pdev: PCI device associated with the host.
- */
-static void cxlflash_shutdown(struct pci_dev *pdev)
-{
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
-
- notify_shutdown(cfg, false);
-}
-
-/**
* cxlflash_remove() - PCI entry point to tear down host
* @pdev: PCI device associated with the host.
*
@@ -844,6 +833,11 @@ static void cxlflash_remove(struct pci_dev *pdev)
struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
ulong lock_flags;
+ if (!pci_is_enabled(pdev)) {
+ pr_debug("%s: Device is disabled\n", __func__);
+ return;
+ }
+
/* If a Task Management Function is active, wait for it to complete
* before continuing with remove.
*/
@@ -1046,6 +1040,8 @@ static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
do {
msleep(delay_us / 1000);
status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+ if (status == U64_MAX)
+ nretry /= 2;
} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
nretry--);
@@ -1077,6 +1073,8 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
do {
msleep(delay_us / 1000);
status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+ if (status == U64_MAX)
+ nretry /= 2;
} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
nretry--);
@@ -1095,42 +1093,25 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
* online. This toggling action can cause this routine to delay up to a few
* seconds. When configured to use the internal LUN feature of the AFU, a
* failure to come online is overridden.
- *
- * Return:
- * 0 when the WWPN is successfully written and the port comes back online
- * -1 when the port fails to go offline or come back up online
*/
-static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
- u64 wwpn)
+static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
+ u64 wwpn)
{
- int rc = 0;
-
set_port_offline(fc_regs);
-
if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
FC_PORT_STATUS_RETRY_CNT)) {
pr_debug("%s: wait on port %d to go offline timed out\n",
__func__, port);
- rc = -1; /* but continue on to leave the port back online */
}
- if (rc == 0)
- writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
-
- /* Always return success after programming WWPN */
- rc = 0;
+ writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
set_port_online(fc_regs);
-
if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
FC_PORT_STATUS_RETRY_CNT)) {
- pr_err("%s: wait on port %d to go online timed out\n",
- __func__, port);
+ pr_debug("%s: wait on port %d to go online timed out\n",
+ __func__, port);
}
-
- pr_debug("%s: returning rc=%d\n", __func__, rc);
-
- return rc;
}
/**
@@ -1187,7 +1168,7 @@ static const struct asyc_intr_info ainfo[] = {
{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
- {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST},
+ {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
@@ -1195,7 +1176,7 @@ static const struct asyc_intr_info ainfo[] = {
{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
- {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST},
+ {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
{0x0, "", 0, 0} /* terminator */
};
@@ -1631,15 +1612,10 @@ static int init_global(struct cxlflash_cfg *cfg)
[FC_CRC_THRESH / 8]);
/* Set WWPNs. If already programmed, wwpn[i] is 0 */
- if (wwpn[i] != 0 &&
- afu_set_wwpn(afu, i,
- &afu->afu_map->global.fc_regs[i][0],
- wwpn[i])) {
- dev_err(dev, "%s: failed to set WWPN on port %d\n",
- __func__, i);
- rc = -EIO;
- goto out;
- }
+ if (wwpn[i] != 0)
+ afu_set_wwpn(afu, i,
+ &afu->afu_map->global.fc_regs[i][0],
+ wwpn[i]);
/* Programming WWPN back to back causes additional
* offline/online transitions and a PLOGI
*/
@@ -2048,6 +2024,11 @@ retry:
* cxlflash_eh_host_reset_handler() - reset the host adapter
* @scp: SCSI command from stack identifying host.
*
+ * Following a reset, the state is evaluated again in case an EEH occurred
+ * during the reset. In such a scenario, the host reset will either yield
+ * until the EEH recovery is complete or return success or failure based
+ * upon the current device state.
+ *
* Return:
* SUCCESS as defined in scsi/scsi.h
* FAILED as defined in scsi/scsi.h
@@ -2080,7 +2061,8 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
} else
cfg->state = STATE_NORMAL;
wake_up_all(&cfg->reset_waitq);
- break;
+ ssleep(1);
+ /* fall through */
case STATE_RESET:
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
if (cfg->state == STATE_NORMAL)
@@ -2596,6 +2578,9 @@ out_remove:
* @pdev: PCI device struct.
* @state: PCI channel state.
*
+ * When an EEH occurs during an active reset, wait until the reset is
+ * complete and then take action based upon the device state.
+ *
* Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
*/
static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
@@ -2609,6 +2594,10 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
switch (state) {
case pci_channel_io_frozen:
+ wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
+ if (cfg->state == STATE_FAILTERM)
+ return PCI_ERS_RESULT_DISCONNECT;
+
cfg->state = STATE_RESET;
scsi_block_requests(cfg->host);
drain_ioctls(cfg);
@@ -2685,7 +2674,7 @@ static struct pci_driver cxlflash_driver = {
.id_table = cxlflash_pci_table,
.probe = cxlflash_probe,
.remove = cxlflash_remove,
- .shutdown = cxlflash_shutdown,
+ .shutdown = cxlflash_remove,
.err_handler = &cxlflash_err_handler,
};
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index ce1507023132..9636970d9611 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -709,14 +709,13 @@ int cxlflash_disk_release(struct scsi_device *sdev,
* @cfg: Internal structure associated with the host.
* @ctxi: Context to release.
*
- * This routine is safe to be called with a a non-initialized context
- * and is tolerant of being called with the context's mutex held (it
- * will be unlocked if necessary before freeing). Also note that the
- * routine conditionally checks for the existence of the context control
- * map before clearing the RHT registers and context capabilities because
- * it is possible to destroy a context while the context is in the error
- * state (previous mapping was removed [so there is no need to worry about
- * clearing] and context is waiting for a new mapping).
+ * This routine is safe to be called with a a non-initialized context.
+ * Also note that the routine conditionally checks for the existence
+ * of the context control map before clearing the RHT registers and
+ * context capabilities because it is possible to destroy a context
+ * while the context is in the error state (previous mapping was
+ * removed [so there is no need to worry about clearing] and context
+ * is waiting for a new mapping).
*/
static void destroy_context(struct cxlflash_cfg *cfg,
struct ctx_info *ctxi)
@@ -732,9 +731,6 @@ static void destroy_context(struct cxlflash_cfg *cfg,
writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
writeq_be(0, &ctxi->ctrl_map->ctx_cap);
}
-
- if (mutex_is_locked(&ctxi->mutex))
- mutex_unlock(&ctxi->mutex);
}
/* Free memory associated with context */
@@ -792,32 +788,58 @@ err:
* @cfg: Internal structure associated with the host.
* @ctx: Previously obtained CXL context reference.
* @ctxid: Previously obtained process element associated with CXL context.
- * @adap_fd: Previously obtained adapter fd associated with CXL context.
* @file: Previously obtained file associated with CXL context.
* @perms: User-specified permissions.
- *
- * Upon return, the context is marked as initialized and the context's mutex
- * is locked.
*/
static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
- struct cxl_context *ctx, int ctxid, int adap_fd,
- struct file *file, u32 perms)
+ struct cxl_context *ctx, int ctxid, struct file *file,
+ u32 perms)
{
struct afu *afu = cfg->afu;
ctxi->rht_perms = perms;
ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->lfd = adap_fd;
ctxi->pid = current->tgid; /* tgid = pid */
ctxi->ctx = ctx;
+ ctxi->cfg = cfg;
ctxi->file = file;
ctxi->initialized = true;
mutex_init(&ctxi->mutex);
+ kref_init(&ctxi->kref);
INIT_LIST_HEAD(&ctxi->luns);
INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
+}
+/**
+ * remove_context() - context kref release handler
+ * @kref: Kernel reference associated with context to be removed.
+ *
+ * When a context no longer has any references it can safely be removed
+ * from global access and destroyed. Note that it is assumed the thread
+ * relinquishing access to the context holds its mutex.
+ */
+static void remove_context(struct kref *kref)
+{
+ struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
+ struct cxlflash_cfg *cfg = ctxi->cfg;
+ u64 ctxid = DECODE_CTXID(ctxi->ctxid);
+
+ /* Remove context from table/error list */
+ WARN_ON(!mutex_is_locked(&ctxi->mutex));
+ ctxi->unavail = true;
+ mutex_unlock(&ctxi->mutex);
+ mutex_lock(&cfg->ctx_tbl_list_mutex);
mutex_lock(&ctxi->mutex);
+
+ if (!list_empty(&ctxi->list))
+ list_del(&ctxi->list);
+ cfg->ctx_tbl[ctxid] = NULL;
+ mutex_unlock(&cfg->ctx_tbl_list_mutex);
+ mutex_unlock(&ctxi->mutex);
+
+ /* Context now completely uncoupled/unreachable */
+ destroy_context(cfg, ctxi);
}
/**
@@ -845,7 +867,6 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
int i;
int rc = 0;
- int lfd;
u64 ctxid = DECODE_CTXID(detach->context_id),
rctxid = detach->context_id;
@@ -887,40 +908,13 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
break;
}
- /* Tear down context following last LUN cleanup */
- if (list_empty(&ctxi->luns)) {
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
-
- /* Might not have been in error list so conditionally remove */
- if (!list_empty(&ctxi->list))
- list_del(&ctxi->list);
- cfg->ctx_tbl[ctxid] = NULL;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- mutex_unlock(&ctxi->mutex);
-
- lfd = ctxi->lfd;
- destroy_context(cfg, ctxi);
- ctxi = NULL;
+ /*
+ * Release the context reference and the sdev reference that
+ * bound this LUN to the context.
+ */
+ if (kref_put(&ctxi->kref, remove_context))
put_ctx = false;
-
- /*
- * As a last step, clean up external resources when not
- * already on an external cleanup thread, i.e.: close(adap_fd).
- *
- * NOTE: this will free up the context from the CXL services,
- * allowing it to dole out the same context_id on a future
- * (or even currently in-flight) disk_attach operation.
- */
- if (lfd != -1)
- sys_close(lfd);
- }
-
- /* Release the sdev reference that bound this LUN to the context */
scsi_device_put(sdev);
-
out:
if (put_ctx)
put_context(ctxi);
@@ -941,34 +935,18 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
*
* This routine is the release handler for the fops registered with
* the CXL services on an initial attach for a context. It is called
- * when a close is performed on the adapter file descriptor returned
- * to the user. Programmatically, the user is not required to perform
- * the close, as it is handled internally via the detach ioctl when
- * a context is being removed. Note that nothing prevents the user
- * from performing a close, but the user should be aware that doing
- * so is considered catastrophic and subsequent usage of the superpipe
- * API with previously saved off tokens will fail.
- *
- * When initiated from an external close (either by the user or via
- * a process tear down), the routine derives the context reference
- * and calls detach for each LUN associated with the context. The
- * final detach operation will cause the context itself to be freed.
- * Note that the saved off lfd is reset prior to calling detach to
- * signify that the final detach should not perform a close.
- *
- * When initiated from a detach operation as part of the tear down
- * of a context, the context is first completely freed and then the
- * close is performed. This routine will fail to derive the context
- * reference (due to the context having already been freed) and then
- * call into the CXL release entry point.
+ * when a close (explicity by the user or as part of a process tear
+ * down) is performed on the adapter file descriptor returned to the
+ * user. The user should be aware that explicitly performing a close
+ * considered catastrophic and subsequent usage of the superpipe API
+ * with previously saved off tokens will fail.
*
- * Thus, with exception to when the CXL process element (context id)
- * lookup fails (a case that should theoretically never occur), every
- * call into this routine results in a complete freeing of a context.
- *
- * As part of the detach, all per-context resources associated with the LUN
- * are cleaned up. When detaching the last LUN for a context, the context
- * itself is cleaned up and released.
+ * This routine derives the context reference and calls detach for
+ * each LUN associated with the context.The final detach operation
+ * causes the context itself to be freed. With exception to when the
+ * CXL process element (context id) lookup fails (a case that should
+ * theoretically never occur), every call into this routine results
+ * in a complete freeing of a context.
*
* Return: 0 on success
*/
@@ -1006,11 +984,8 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
goto out;
}
- dev_dbg(dev, "%s: close(%d) for context %d\n",
- __func__, ctxi->lfd, ctxid);
+ dev_dbg(dev, "%s: close for context %d\n", __func__, ctxid);
- /* Reset the file descriptor to indicate we're on a close() thread */
- ctxi->lfd = -1;
detach.context_id = ctxi->ctxid;
list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
@@ -1110,8 +1085,7 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto err;
}
- dev_dbg(dev, "%s: fault(%d) for context %d\n",
- __func__, ctxi->lfd, ctxid);
+ dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
if (likely(!ctxi->err_recovery_active)) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -1186,8 +1160,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
goto out;
}
- dev_dbg(dev, "%s: mmap(%d) for context %d\n",
- __func__, ctxi->lfd, ctxid);
+ dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
rc = cxl_fd_mmap(file, vma);
if (likely(!rc)) {
@@ -1377,12 +1350,12 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
lun_access->lli = lli;
lun_access->sdev = sdev;
- /* Non-NULL context indicates reuse */
+ /* Non-NULL context indicates reuse (another context reference) */
if (ctxi) {
dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
__func__, rctxid);
+ kref_get(&ctxi->kref);
list_add(&lun_access->list, &ctxi->luns);
- fd = ctxi->lfd;
goto out_attach;
}
@@ -1430,7 +1403,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
perms = SISL_RHT_PERM(attach->hdr.flags + 1);
/* Context mutex is locked upon return */
- init_context(ctxi, cfg, ctx, ctxid, fd, file, perms);
+ init_context(ctxi, cfg, ctx, ctxid, file, perms);
rc = afu_attach(cfg, ctxi);
if (unlikely(rc)) {
@@ -1445,7 +1418,6 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
* knows about us yet; we can be the only one holding our mutex.
*/
list_add(&lun_access->list, &ctxi->luns);
- mutex_unlock(&ctxi->mutex);
mutex_lock(&cfg->ctx_tbl_list_mutex);
mutex_lock(&ctxi->mutex);
cfg->ctx_tbl[ctxid] = ctxi;
@@ -1453,7 +1425,11 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
fd_install(fd, file);
out_attach:
- attach->hdr.return_flags = 0;
+ if (fd != -1)
+ attach->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD;
+ else
+ attach->hdr.return_flags = 0;
+
attach->context_id = ctxi->ctxid;
attach->block_size = gli->blk_len;
attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
@@ -1494,7 +1470,7 @@ err:
file = NULL;
}
- /* Cleanup our context; safe to call even with mutex locked */
+ /* Cleanup our context */
if (ctxi) {
destroy_context(cfg, ctxi);
ctxi = NULL;
@@ -1509,16 +1485,19 @@ err:
* recover_context() - recovers a context in error
* @cfg: Internal structure associated with the host.
* @ctxi: Context to release.
+ * @adap_fd: Adapter file descriptor associated with new/recovered context.
*
* Restablishes the state for a context-in-error.
*
* Return: 0 on success, -errno on failure
*/
-static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
+static int recover_context(struct cxlflash_cfg *cfg,
+ struct ctx_info *ctxi,
+ int *adap_fd)
{
struct device *dev = &cfg->dev->dev;
int rc = 0;
- int old_fd, fd = -1;
+ int fd = -1;
int ctxid = -1;
struct file *file;
struct cxl_context *ctx;
@@ -1566,9 +1545,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
* No error paths after this point. Once the fd is installed it's
* visible to user space and can't be undone safely on this thread.
*/
- old_fd = ctxi->lfd;
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->lfd = fd;
ctxi->ctx = ctx;
ctxi->file = file;
@@ -1585,9 +1562,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
cfg->ctx_tbl[ctxid] = ctxi;
mutex_unlock(&cfg->ctx_tbl_list_mutex);
fd_install(fd, file);
-
- /* Release the original adapter fd and associated CXL resources */
- sys_close(old_fd);
+ *adap_fd = fd;
out:
dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
__func__, ctxid, fd, rc);
@@ -1646,6 +1621,7 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
rctxid = recover->context_id;
long reg;
int lretry = 20; /* up to 2 seconds */
+ int new_adap_fd = -1;
int rc = 0;
atomic_inc(&cfg->recovery_threads);
@@ -1675,7 +1651,7 @@ retry:
if (ctxi->err_recovery_active) {
retry_recover:
- rc = recover_context(cfg, ctxi);
+ rc = recover_context(cfg, ctxi, &new_adap_fd);
if (unlikely(rc)) {
dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
__func__, ctxid, rc);
@@ -1697,9 +1673,9 @@ retry_recover:
ctxi->err_recovery_active = false;
recover->context_id = ctxi->ctxid;
- recover->adap_fd = ctxi->lfd;
+ recover->adap_fd = new_adap_fd;
recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
- recover->hdr.return_flags |=
+ recover->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
goto out;
}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index 5f9a091fda95..9e62ff304e4b 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -100,13 +100,14 @@ struct ctx_info {
struct cxl_ioctl_start_work work;
u64 ctxid;
- int lfd;
pid_t pid;
bool initialized;
bool unavail;
bool err_recovery_active;
struct mutex mutex; /* Context protection */
+ struct kref kref;
struct cxl_context *ctx;
+ struct cxlflash_cfg *cfg;
struct list_head luns; /* LUNs attached to this context */
const struct vm_operations_struct *cxl_mmap_vmops;
struct file *file;
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 50f8e9300770..90c5d7f5278e 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -1135,14 +1135,13 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
ctxid_dst = DECODE_CTXID(clone->context_id_dst),
rctxid_src = clone->context_id_src,
rctxid_dst = clone->context_id_dst;
- int adap_fd_src = clone->adap_fd_src;
int i, j;
int rc = 0;
bool found;
LIST_HEAD(sidecar);
- pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu adap_fd_src=%d\n",
- __func__, ctxid_src, ctxid_dst, adap_fd_src);
+ pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu\n",
+ __func__, ctxid_src, ctxid_dst);
/* Do not clone yourself */
if (unlikely(rctxid_src == rctxid_dst)) {
@@ -1166,13 +1165,6 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
goto out;
}
- if (unlikely(adap_fd_src != ctxi_src->lfd)) {
- pr_debug("%s: Invalid source adapter fd! (%d)\n",
- __func__, adap_fd_src);
- rc = -EINVAL;
- goto out;
- }
-
/* Verify there is no open resource handle in the destination context */
for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
if (ctxi_dst->rht_start[i].nmask != 0) {
@@ -1257,7 +1249,6 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
out_success:
list_splice(&sidecar, &ctxi_dst->luns);
- sys_close(adap_fd_src);
/* fall through */
out:
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 752b5c9d1ab2..241829e59668 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -583,6 +583,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
ALUA_DH_NAME);
scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
+ kfree(buff);
return err;
}
sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
deleted file mode 100644
index 459863f94e46..000000000000
--- a/drivers/scsi/dtc.c
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * DTC 3180/3280 driver, by
- * Ray Van Tassle rayvt@comm.mot.com
- *
- * taken from ...
- * Trantor T128/T128F/T228 driver by...
- *
- * Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 440-4894
- */
-
-/*
- * The card is detected and initialized in one of several ways :
- * 1. Autoprobe (default) - since the board is memory mapped,
- * a BIOS signature is scanned for to locate the registers.
- * An interrupt is triggered to autoprobe for the interrupt
- * line.
- *
- * 2. With command line overrides - dtc=address,irq may be
- * used on the LILO command line to override the defaults.
- *
-*/
-
-/*----------------------------------------------------------------*/
-/* the following will set the monitor border color (useful to find
- where something crashed or gets stuck at */
-/* 1 = blue
- 2 = green
- 3 = cyan
- 4 = red
- 5 = magenta
- 6 = yellow
- 7 = white
-*/
-#if 0
-#define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);}
-#else
-#define rtrc(i) {}
-#endif
-
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <scsi/scsi_host.h>
-
-#include "dtc.h"
-#include "NCR5380.h"
-
-/*
- * The DTC3180 & 3280 boards are memory mapped.
- *
- */
-
-/*
- */
-/* Offset from DTC_5380_OFFSET */
-#define DTC_CONTROL_REG 0x100 /* rw */
-#define D_CR_ACCESS 0x80 /* ro set=can access 3280 registers */
-#define CSR_DIR_READ 0x40 /* rw direction, 1 = read 0 = write */
-
-#define CSR_RESET 0x80 /* wo Resets 53c400 */
-#define CSR_5380_REG 0x80 /* ro 5380 registers can be accessed */
-#define CSR_TRANS_DIR 0x40 /* rw Data transfer direction */
-#define CSR_SCSI_BUFF_INTR 0x20 /* rw Enable int on transfer ready */
-#define CSR_5380_INTR 0x10 /* rw Enable 5380 interrupts */
-#define CSR_SHARED_INTR 0x08 /* rw Interrupt sharing */
-#define CSR_HOST_BUF_NOT_RDY 0x04 /* ro Host buffer not ready */
-#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer ready */
-#define CSR_GATED_5380_IRQ 0x01 /* ro Last block xferred */
-#define CSR_INT_BASE (CSR_SCSI_BUFF_INTR | CSR_5380_INTR)
-
-
-#define DTC_BLK_CNT 0x101 /* rw
- * # of 128-byte blocks to transfer */
-
-
-#define D_CR_ACCESS 0x80 /* ro set=can access 3280 registers */
-
-#define DTC_SWITCH_REG 0x3982 /* ro - DIP switches */
-#define DTC_RESUME_XFER 0x3982 /* wo - resume data xfer
- * after disconnect/reconnect*/
-
-#define DTC_5380_OFFSET 0x3880 /* 8 registers here, see NCR5380.h */
-
-/*!!!! for dtc, it's a 128 byte buffer at 3900 !!! */
-#define DTC_DATA_BUF 0x3900 /* rw 128 bytes long */
-
-static struct override {
- unsigned int address;
- int irq;
-} overrides
-#ifdef OVERRIDE
-[] __initdata = OVERRIDE;
-#else
-[4] __initdata = {
- { 0, IRQ_AUTO }, { 0, IRQ_AUTO }, { 0, IRQ_AUTO }, { 0, IRQ_AUTO }
-};
-#endif
-
-#define NO_OVERRIDES ARRAY_SIZE(overrides)
-
-static struct base {
- unsigned long address;
- int noauto;
-} bases[] __initdata = {
- { 0xcc000, 0 },
- { 0xc8000, 0 },
- { 0xdc000, 0 },
- { 0xd8000, 0 }
-};
-
-#define NO_BASES ARRAY_SIZE(bases)
-
-static const struct signature {
- const char *string;
- int offset;
-} signatures[] = {
- {"DATA TECHNOLOGY CORPORATION BIOS", 0x25},
-};
-
-#define NO_SIGNATURES ARRAY_SIZE(signatures)
-
-#ifndef MODULE
-/*
- * Function : dtc_setup(char *str, int *ints)
- *
- * Purpose : LILO command line initialization of the overrides array,
- *
- * Inputs : str - unused, ints - array of integer parameters with ints[0]
- * equal to the number of ints.
- *
- */
-
-static int __init dtc_setup(char *str)
-{
- static int commandline_current;
- int i;
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- if (ints[0] != 2)
- printk("dtc_setup: usage dtc=address,irq\n");
- else if (commandline_current < NO_OVERRIDES) {
- overrides[commandline_current].address = ints[1];
- overrides[commandline_current].irq = ints[2];
- for (i = 0; i < NO_BASES; ++i)
- if (bases[i].address == ints[1]) {
- bases[i].noauto = 1;
- break;
- }
- ++commandline_current;
- }
- return 1;
-}
-
-__setup("dtc=", dtc_setup);
-#endif
-
-/*
- * Function : int dtc_detect(struct scsi_host_template * tpnt)
- *
- * Purpose : detects and initializes DTC 3180/3280 controllers
- * that were autoprobed, overridden on the LILO command line,
- * or specified at compile time.
- *
- * Inputs : tpnt - template for this SCSI adapter.
- *
- * Returns : 1 if a host adapter was found, 0 if not.
- *
-*/
-
-static int __init dtc_detect(struct scsi_host_template * tpnt)
-{
- static int current_override, current_base;
- struct Scsi_Host *instance;
- unsigned int addr;
- void __iomem *base;
- int sig, count;
-
- for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
- addr = 0;
- base = NULL;
-
- if (overrides[current_override].address) {
- addr = overrides[current_override].address;
- base = ioremap(addr, 0x2000);
- if (!base)
- addr = 0;
- } else
- for (; !addr && (current_base < NO_BASES); ++current_base) {
- dprintk(NDEBUG_INIT, "dtc: probing address 0x%08x\n",
- (unsigned int)bases[current_base].address);
- if (bases[current_base].noauto)
- continue;
- base = ioremap(bases[current_base].address, 0x2000);
- if (!base)
- continue;
- for (sig = 0; sig < NO_SIGNATURES; ++sig) {
- if (check_signature(base + signatures[sig].offset, signatures[sig].string, strlen(signatures[sig].string))) {
- addr = bases[current_base].address;
- dprintk(NDEBUG_INIT, "dtc: detected board\n");
- goto found;
- }
- }
- iounmap(base);
- }
-
- dprintk(NDEBUG_INIT, "dtc: addr = 0x%08x\n", addr);
-
- if (!addr)
- break;
-
-found:
- instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata));
- if (instance == NULL)
- goto out_unmap;
-
- instance->base = addr;
- ((struct NCR5380_hostdata *)(instance)->hostdata)->base = base;
-
- if (NCR5380_init(instance, FLAG_LATE_DMA_SETUP))
- goto out_unregister;
-
- NCR5380_maybe_reset_bus(instance);
-
- NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR); /* Enable int's */
- if (overrides[current_override].irq != IRQ_AUTO)
- instance->irq = overrides[current_override].irq;
- else
- instance->irq = NCR5380_probe_irq(instance, DTC_IRQS);
-
- /* Compatibility with documented NCR5380 kernel parameters */
- if (instance->irq == 255)
- instance->irq = NO_IRQ;
-
- /* With interrupts enabled, it will sometimes hang when doing heavy
- * reads. So better not enable them until I finger it out. */
- instance->irq = NO_IRQ;
-
- if (instance->irq != NO_IRQ)
- if (request_irq(instance->irq, dtc_intr, 0,
- "dtc", instance)) {
- printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
- instance->irq = NO_IRQ;
- }
-
- if (instance->irq == NO_IRQ) {
- printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
- printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
- }
-
- dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n",
- instance->host_no, instance->irq);
-
- ++current_override;
- ++count;
- }
- return count;
-
-out_unregister:
- scsi_unregister(instance);
-out_unmap:
- iounmap(base);
- return count;
-}
-
-/*
- * Function : int dtc_biosparam(Disk * disk, struct block_device *dev, int *ip)
- *
- * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
- * the specified device / size.
- *
- * Inputs : size = size of device in sectors (512 bytes), dev = block device
- * major / minor, ip[] = {heads, sectors, cylinders}
- *
- * Returns : always 0 (success), initializes ip
- *
-*/
-
-/*
- * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
- * using hard disks on a trantor should verify that this mapping corresponds
- * to that used by the BIOS / ASPI driver by running the linux fdisk program
- * and matching the H_C_S coordinates to what DOS uses.
-*/
-
-static int dtc_biosparam(struct scsi_device *sdev, struct block_device *dev,
- sector_t capacity, int *ip)
-{
- int size = capacity;
-
- ip[0] = 64;
- ip[1] = 32;
- ip[2] = size >> 11;
- return 0;
-}
-
-
-/****************************************************************
- * Function : int NCR5380_pread (struct Scsi_Host *instance,
- * unsigned char *dst, int len)
- *
- * Purpose : Fast 5380 pseudo-dma read function, reads len bytes to
- * dst
- *
- * Inputs : dst = destination, len = length in bytes
- *
- * Returns : 0 on success, non zero on a failure such as a watchdog
- * timeout.
-*/
-
-static inline int dtc_pread(struct Scsi_Host *instance,
- unsigned char *dst, int len)
-{
- unsigned char *d = dst;
- int i; /* For counting time spent in the poll-loop */
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- i = 0;
- if (instance->irq == NO_IRQ)
- NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ);
- else
- NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ | CSR_INT_BASE);
- NCR5380_write(DTC_BLK_CNT, len >> 7); /* Block count */
- rtrc(1);
- while (len > 0) {
- rtrc(2);
- while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
- ++i;
- rtrc(3);
- memcpy_fromio(d, hostdata->base + DTC_DATA_BUF, 128);
- d += 128;
- len -= 128;
- rtrc(7);
- /*** with int's on, it sometimes hangs after here.
- * Looks like something makes HBNR go away. */
- }
- rtrc(4);
- while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
- ++i;
- rtrc(0);
- return (0);
-}
-
-/****************************************************************
- * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
- * unsigned char *src, int len)
- *
- * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
- * src
- *
- * Inputs : src = source, len = length in bytes
- *
- * Returns : 0 on success, non zero on a failure such as a watchdog
- * timeout.
-*/
-
-static inline int dtc_pwrite(struct Scsi_Host *instance,
- unsigned char *src, int len)
-{
- int i;
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- if (instance->irq == NO_IRQ)
- NCR5380_write(DTC_CONTROL_REG, 0);
- else
- NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR);
- NCR5380_write(DTC_BLK_CNT, len >> 7); /* Block count */
- for (i = 0; len > 0; ++i) {
- rtrc(5);
- /* Poll until the host buffer can accept data. */
- while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
- ++i;
- rtrc(3);
- memcpy_toio(hostdata->base + DTC_DATA_BUF, src, 128);
- src += 128;
- len -= 128;
- }
- rtrc(4);
- while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
- ++i;
- rtrc(6);
- /* Wait until the last byte has been sent to the disk */
- while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
- ++i;
- rtrc(7);
- /* Check for parity error here. fixme. */
- rtrc(0);
- return (0);
-}
-
-static int dtc_dma_xfer_len(struct scsi_cmnd *cmd)
-{
- int transfersize = cmd->transfersize;
-
- /* Limit transfers to 32K, for xx400 & xx406
- * pseudoDMA that transfers in 128 bytes blocks.
- */
- if (transfersize > 32 * 1024 && cmd->SCp.this_residual &&
- !(cmd->SCp.this_residual % transfersize))
- transfersize = 32 * 1024;
-
- return transfersize;
-}
-
-MODULE_LICENSE("GPL");
-
-#include "NCR5380.c"
-
-static int dtc_release(struct Scsi_Host *shost)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(shost);
-
- if (shost->irq != NO_IRQ)
- free_irq(shost->irq, shost);
- NCR5380_exit(shost);
- scsi_unregister(shost);
- iounmap(hostdata->base);
- return 0;
-}
-
-static struct scsi_host_template driver_template = {
- .name = "DTC 3180/3280",
- .detect = dtc_detect,
- .release = dtc_release,
- .proc_name = "dtc3x80",
- .info = dtc_info,
- .queuecommand = dtc_queue_command,
- .eh_abort_handler = dtc_abort,
- .eh_bus_reset_handler = dtc_bus_reset,
- .bios_param = dtc_biosparam,
- .can_queue = 32,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
- .cmd_size = NCR5380_CMD_SIZE,
- .max_sectors = 128,
-};
-#include "scsi_module.c"
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h
deleted file mode 100644
index fcb0a8ea7bda..000000000000
--- a/drivers/scsi/dtc.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * DTC controller, taken from T128 driver by...
- * Copyright 1993, Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 440-4894
- */
-
-#ifndef DTC3280_H
-#define DTC3280_H
-
-#define NCR5380_implementation_fields \
- void __iomem *base
-
-#define DTC_address(reg) \
- (((struct NCR5380_hostdata *)shost_priv(instance))->base + DTC_5380_OFFSET + reg)
-
-#define NCR5380_read(reg) (readb(DTC_address(reg)))
-#define NCR5380_write(reg, value) (writeb(value, DTC_address(reg)))
-
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- dtc_dma_xfer_len(cmd)
-#define NCR5380_dma_recv_setup dtc_pread
-#define NCR5380_dma_send_setup dtc_pwrite
-#define NCR5380_dma_residual(instance) (0)
-
-#define NCR5380_intr dtc_intr
-#define NCR5380_queue_command dtc_queue_command
-#define NCR5380_abort dtc_abort
-#define NCR5380_bus_reset dtc_bus_reset
-#define NCR5380_info dtc_info
-
-#define NCR5380_io_delay(x) udelay(x)
-
-/* 15 12 11 10
- 1001 1100 0000 0000 */
-
-#define DTC_IRQS 0x9c00
-
-
-#endif /* DTC3280_H */
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 78ce4d61a69b..d6e53aee2295 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -963,10 +963,6 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
/* initialize the allocated memory */
if (test_bit(AF_FIRST_INIT, &a->flags)) {
- memset(a->req_table, 0,
- (num_requests + num_ae_requests +
- 1) * sizeof(struct esas2r_request *));
-
esas2r_targ_db_initialize(a);
/* prime parts of the inbound list */
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 2aca4d16f39e..5092c821d088 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -194,7 +194,7 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj,
int length = min(sizeof(struct atto_ioctl), count);
if (!a->local_atto_ioctl) {
- a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl),
+ a->local_atto_ioctl = kmalloc(sizeof(struct atto_ioctl),
GFP_KERNEL);
if (a->local_atto_ioctl == NULL) {
esas2r_log(ESAS2R_LOG_WARN,
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 7028dd37e5dd..375c536cbc68 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -83,6 +83,41 @@ static struct notifier_block libfcoe_notifier = {
.notifier_call = libfcoe_device_notification,
};
+static const struct {
+ u32 fc_port_speed;
+#define SPEED_2000 2000
+#define SPEED_4000 4000
+#define SPEED_8000 8000
+#define SPEED_16000 16000
+#define SPEED_32000 32000
+ u32 eth_port_speed;
+} fcoe_port_speed_mapping[] = {
+ { FC_PORTSPEED_1GBIT, SPEED_1000 },
+ { FC_PORTSPEED_2GBIT, SPEED_2000 },
+ { FC_PORTSPEED_4GBIT, SPEED_4000 },
+ { FC_PORTSPEED_8GBIT, SPEED_8000 },
+ { FC_PORTSPEED_10GBIT, SPEED_10000 },
+ { FC_PORTSPEED_16GBIT, SPEED_16000 },
+ { FC_PORTSPEED_20GBIT, SPEED_20000 },
+ { FC_PORTSPEED_25GBIT, SPEED_25000 },
+ { FC_PORTSPEED_32GBIT, SPEED_32000 },
+ { FC_PORTSPEED_40GBIT, SPEED_40000 },
+ { FC_PORTSPEED_50GBIT, SPEED_50000 },
+ { FC_PORTSPEED_100GBIT, SPEED_100000 },
+};
+
+static inline u32 eth2fc_speed(u32 eth_port_speed)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fcoe_port_speed_mapping); i++) {
+ if (fcoe_port_speed_mapping[i].eth_port_speed == eth_port_speed)
+ return fcoe_port_speed_mapping[i].fc_port_speed;
+ }
+
+ return FC_PORTSPEED_UNKNOWN;
+}
+
/**
* fcoe_link_speed_update() - Update the supported and actual link speeds
* @lport: The local port to update speeds for
@@ -126,23 +161,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
SUPPORTED_40000baseLR4_Full))
lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
- switch (ecmd.base.speed) {
- case SPEED_1000:
- lport->link_speed = FC_PORTSPEED_1GBIT;
- break;
- case SPEED_10000:
- lport->link_speed = FC_PORTSPEED_10GBIT;
- break;
- case SPEED_20000:
- lport->link_speed = FC_PORTSPEED_20GBIT;
- break;
- case SPEED_40000:
- lport->link_speed = FC_PORTSPEED_40GBIT;
- break;
- default:
- lport->link_speed = FC_PORTSPEED_UNKNOWN;
- break;
- }
+ lport->link_speed = eth2fc_speed(ecmd.base.speed);
return 0;
}
return -1;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 4731d3241323..72c98522bd26 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -23,7 +23,7 @@
#include <scsi/sas_ata.h>
#include <scsi/libsas.h>
-#define DRV_VERSION "v1.5"
+#define DRV_VERSION "v1.6"
#define HISI_SAS_MAX_PHYS 9
#define HISI_SAS_MAX_QUEUES 32
@@ -56,6 +56,11 @@ enum dev_status {
HISI_SAS_DEV_EH,
};
+enum {
+ HISI_SAS_INT_ABT_CMD = 0,
+ HISI_SAS_INT_ABT_DEV = 1,
+};
+
enum hisi_sas_dev_type {
HISI_SAS_DEV_TYPE_STP = 0,
HISI_SAS_DEV_TYPE_SSP,
@@ -89,6 +94,13 @@ struct hisi_sas_port {
struct hisi_sas_cq {
struct hisi_hba *hisi_hba;
+ int rd_point;
+ int id;
+};
+
+struct hisi_sas_dq {
+ struct hisi_hba *hisi_hba;
+ int wr_point;
int id;
};
@@ -146,6 +158,9 @@ struct hisi_sas_hw {
struct hisi_sas_slot *slot);
int (*prep_stp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
+ int (*prep_abort)(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ int device_id, int abort_flag, int tag_to_abort);
int (*slot_complete)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int abort);
void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
@@ -185,6 +200,7 @@ struct hisi_hba {
struct Scsi_Host *shost;
struct hisi_sas_cq cq[HISI_SAS_MAX_QUEUES];
+ struct hisi_sas_dq dq[HISI_SAS_MAX_QUEUES];
struct hisi_sas_phy phy[HISI_SAS_MAX_PHYS];
struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 18dd5ea2c721..2f872f784e10 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -17,6 +17,10 @@
static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
u8 *lun, struct hisi_sas_tmf_task *tmf);
+static int
+hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
+ struct domain_device *device,
+ int abort_flag, int tag);
static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
{
@@ -93,7 +97,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
slot->task = NULL;
slot->port = NULL;
hisi_sas_slot_index_free(hisi_hba, slot->idx);
- memset(slot, 0, sizeof(*slot));
+ /* slot memory is fully zeroed when it is reused */
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -116,6 +120,14 @@ static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
return hisi_hba->hw->prep_stp(hisi_hba, slot);
}
+static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ int device_id, int abort_flag, int tag_to_abort)
+{
+ return hisi_hba->hw->prep_abort(hisi_hba, slot,
+ device_id, abort_flag, tag_to_abort);
+}
+
/*
* This function will issue an abort TMF regardless of whether the
* task is in the sdev or not. Then it will do the task complete
@@ -192,27 +204,13 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
return rc;
}
port = device->port->lldd_port;
- if (port && !port->port_attached && !tmf) {
- if (sas_protocol_ata(task->task_proto)) {
- struct task_status_struct *ts = &task->task_status;
-
- dev_info(dev,
- "task prep: SATA/STP port%d not attach device\n",
- device->port->id);
- ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAS_PHY_DOWN;
- task->task_done(task);
- } else {
- struct task_status_struct *ts = &task->task_status;
-
- dev_info(dev,
- "task prep: SAS port%d does not attach device\n",
- device->port->id);
- ts->resp = SAS_TASK_UNDELIVERED;
- ts->stat = SAS_PHY_DOWN;
- task->task_done(task);
- }
- return 0;
+ if (port && !port->port_attached) {
+ dev_info(dev, "task prep: %s port%d not attach device\n",
+ (sas_protocol_ata(task->task_proto)) ?
+ "SATA/STP" : "SAS",
+ device->port->id);
+
+ return SAS_PHY_DOWN;
}
if (!sas_protocol_ata(task->task_proto)) {
@@ -609,6 +607,9 @@ static void hisi_sas_dev_gone(struct domain_device *device)
dev_info(dev, "found dev[%lld:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
+ hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+
hisi_hba->hw->free_device(hisi_hba, sas_dev);
device->lldd_dev = NULL;
memset(sas_dev, 0, sizeof(*sas_dev));
@@ -729,6 +730,12 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_SUCC) {
+ res = TMF_RESP_FUNC_SUCC;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_DATA_UNDERRUN) {
/* no error, but return the number of bytes of
* underrun
@@ -826,18 +833,22 @@ static int hisi_sas_abort_task(struct sas_task *task)
}
}
+ hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_CMD, tag);
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (task->dev->dev_type == SAS_SATA_DEV) {
- struct hisi_slot_info *slot = task->lldd_task;
-
- dev_notice(dev, "abort task: hba=%p task=%p slot=%p\n",
- hisi_hba, task, slot);
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
rc = TMF_RESP_FUNC_COMPLETE;
- goto out;
}
+ } else if (task->task_proto & SAS_PROTOCOL_SMP) {
+ /* SMP */
+ struct hisi_sas_slot *slot = task->lldd_task;
+ u32 tag = slot->idx;
+ hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_CMD, tag);
}
out:
@@ -954,6 +965,157 @@ static int hisi_sas_query_task(struct sas_task *task)
return rc;
}
+static int
+hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
+ struct sas_task *task, int abort_flag,
+ int task_tag)
+{
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_port *port;
+ struct hisi_sas_slot *slot;
+ struct hisi_sas_cmd_hdr *cmd_hdr_base;
+ int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
+
+ if (!device->port)
+ return -1;
+
+ port = device->port->lldd_port;
+
+ /* simply get a slot and send abort command */
+ rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
+ if (rc)
+ goto err_out;
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
+ &dlvry_queue_slot);
+ if (rc)
+ goto err_out_tag;
+
+ slot = &hisi_hba->slot_info[slot_idx];
+ memset(slot, 0, sizeof(struct hisi_sas_slot));
+
+ slot->idx = slot_idx;
+ slot->n_elem = n_elem;
+ slot->dlvry_queue = dlvry_queue;
+ slot->dlvry_queue_slot = dlvry_queue_slot;
+ cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
+ slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
+ slot->task = task;
+ slot->port = port;
+ task->lldd_task = slot;
+
+ memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
+
+ rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
+ abort_flag, task_tag);
+ if (rc)
+ goto err_out_tag;
+
+ /* Port structure is static for the HBA, so
+ * even if the port is deformed it is ok
+ * to reference.
+ */
+ list_add_tail(&slot->entry, &port->list);
+ spin_lock(&task->task_state_lock);
+ task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ spin_unlock(&task->task_state_lock);
+
+ hisi_hba->slot_prep = slot;
+
+ sas_dev->running_req++;
+ /* send abort command to our chip */
+ hisi_hba->hw->start_delivery(hisi_hba);
+
+ return 0;
+
+err_out_tag:
+ hisi_sas_slot_index_free(hisi_hba, slot_idx);
+err_out:
+ dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
+
+ return rc;
+}
+
+/**
+ * hisi_sas_internal_task_abort -- execute an internal
+ * abort command for single IO command or a device
+ * @hisi_hba: host controller struct
+ * @device: domain device
+ * @abort_flag: mode of operation, device or single IO
+ * @tag: tag of IO to be aborted (only relevant to single
+ * IO mode)
+ */
+static int
+hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
+ struct domain_device *device,
+ int abort_flag, int tag)
+{
+ struct sas_task *task;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct device *dev = &hisi_hba->pdev->dev;
+ int res;
+ unsigned long flags;
+
+ if (!hisi_hba->hw->prep_abort)
+ return -EOPNOTSUPP;
+
+ task = sas_alloc_slow_task(GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = device;
+ task->task_proto = device->tproto;
+ task->task_done = hisi_sas_task_done;
+ task->slow_task->timer.data = (unsigned long)task;
+ task->slow_task->timer.function = hisi_sas_tmf_timedout;
+ task->slow_task->timer.expires = jiffies + 20*HZ;
+ add_timer(&task->slow_task->timer);
+
+ /* Lock as we are alloc'ing a slot, which cannot be interrupted */
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
+ task, abort_flag, tag);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ if (res) {
+ del_timer(&task->slow_task->timer);
+ dev_err(dev, "internal task abort: executing internal task failed: %d\n",
+ res);
+ goto exit;
+ }
+ wait_for_completion(&task->slow_task->completion);
+ res = TMF_RESP_FUNC_FAILED;
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
+ res = TMF_RESP_FUNC_COMPLETE;
+ goto exit;
+ }
+
+ /* TMF timed out, return direct. */
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ dev_err(dev, "internal task abort: timeout.\n");
+ if (task->lldd_task) {
+ struct hisi_sas_slot *slot = task->lldd_task;
+
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
+ }
+ }
+ }
+
+exit:
+ dev_info(dev, "internal task abort: task to dev %016llx task=%p "
+ "resp: 0x%x sts 0x%x\n",
+ SAS_ADDR(device->sas_addr),
+ task,
+ task->task_status.resp, /* 0 is complete, -1 is undelivered */
+ task->task_status.stat);
+ sas_free_task(task);
+
+ return res;
+}
+
static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
{
hisi_sas_port_notify_formed(sas_phy);
@@ -1063,11 +1225,16 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
for (i = 0; i < hisi_hba->queue_count; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+ struct hisi_sas_dq *dq = &hisi_hba->dq[i];
/* Completion queue structure */
cq->id = i;
cq->hisi_hba = hisi_hba;
+ /* Delivery queue structure */
+ dq->id = i;
+ dq->hisi_hba = hisi_hba;
+
/* Delivery queue */
s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
@@ -1128,7 +1295,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
memset(hisi_hba->breakpoint, 0, s);
hisi_hba->slot_index_count = max_command_entries;
- s = hisi_hba->slot_index_count / sizeof(unsigned long);
+ s = hisi_hba->slot_index_count / BITS_PER_BYTE;
hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
if (!hisi_hba->slot_index_tags)
goto err_out;
@@ -1272,6 +1439,12 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
&hisi_hba->queue_count))
goto err_out;
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+ dev_err(dev, "No usable DMA addressing method\n");
+ goto err_out;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hisi_hba->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(hisi_hba->regs))
@@ -1319,13 +1492,6 @@ int hisi_sas_probe(struct platform_device *pdev,
hisi_hba = shost_priv(shost);
platform_set_drvdata(pdev, sha);
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
- dev_err(dev, "No usable DMA addressing method\n");
- rc = -EIO;
- goto err_out_ha;
- }
-
phy_nr = port_nr = hisi_hba->n_phy;
arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 1abbc2e162df..c0ac49d8bc8d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -490,25 +490,17 @@ static void config_id_frame_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
__swab32(identify_buffer[0]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
- identify_buffer[2]);
+ __swab32(identify_buffer[1]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
- identify_buffer[1]);
+ __swab32(identify_buffer[2]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
- identify_buffer[4]);
+ __swab32(identify_buffer[3]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
- identify_buffer[3]);
+ __swab32(identify_buffer[4]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
__swab32(identify_buffer[5]));
}
-static void init_id_frame_v1_hw(struct hisi_hba *hisi_hba)
-{
- int i;
-
- for (i = 0; i < hisi_hba->n_phy; i++)
- config_id_frame_v1_hw(hisi_hba, i);
-}
-
static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
@@ -774,8 +766,6 @@ static int hw_init_v1_hw(struct hisi_hba *hisi_hba)
msleep(100);
init_reg_v1_hw(hisi_hba);
- init_id_frame_v1_hw(hisi_hba);
-
return 0;
}
@@ -875,12 +865,13 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s)
{
struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_dq *dq;
u32 r, w;
int queue = hisi_hba->queue;
while (1) {
- w = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_WR_PTR + (queue * 0x14));
+ dq = &hisi_hba->dq[queue];
+ w = dq->wr_point;
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
@@ -903,10 +894,11 @@ static void start_delivery_v1_hw(struct hisi_hba *hisi_hba)
{
int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
+ struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
- hisi_sas_write32(hisi_hba,
- DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
- ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS);
+ dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
+ dq->wr_point);
}
static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
@@ -1565,14 +1557,11 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
struct hisi_sas_complete_v1_hdr *complete_queue =
(struct hisi_sas_complete_v1_hdr *)
hisi_hba->complete_hdr[queue];
- u32 irq_value, rd_point, wr_point;
+ u32 irq_value, rd_point = cq->rd_point, wr_point;
irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
-
- rd_point = hisi_sas_read32(hisi_hba,
- COMPL_Q_0_RD_PTR + (0x14 * queue));
wr_point = hisi_sas_read32(hisi_hba,
COMPL_Q_0_WR_PTR + (0x14 * queue));
@@ -1600,6 +1589,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
}
/* update rd_point */
+ cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
return IRQ_HANDLED;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index f96560431cf1..9825a3f49f53 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -117,6 +117,8 @@
#define SL_CONTROL (PORT_BASE + 0x94)
#define SL_CONTROL_NOTIFY_EN_OFF 0
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
+#define SL_CONTROL_CTA_OFF 17
+#define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF)
#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
@@ -124,6 +126,9 @@
#define TX_ID_DWORD4 (PORT_BASE + 0xaC)
#define TX_ID_DWORD5 (PORT_BASE + 0xb0)
#define TX_ID_DWORD6 (PORT_BASE + 0xb4)
+#define TXID_AUTO (PORT_BASE + 0xb8)
+#define TXID_AUTO_CT3_OFF 1
+#define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF)
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8)
#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc)
@@ -174,6 +179,10 @@
/* HW dma structures */
/* Delivery queue header */
/* dw0 */
+#define CMD_HDR_ABORT_FLAG_OFF 0
+#define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF)
+#define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2
+#define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
#define CMD_HDR_RESP_REPORT_OFF 5
#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
#define CMD_HDR_TLR_CTRL_OFF 6
@@ -214,6 +223,8 @@
#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
#define CMD_HDR_DATA_SGL_LEN_OFF 16
#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
+#define CMD_HDR_ABORT_IPTT_OFF 16
+#define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF)
/* Completion header */
/* dw0 */
@@ -221,6 +232,13 @@
#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
#define CMPLT_HDR_ERX_OFF 12
#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
+#define CMPLT_HDR_ABORT_STAT_OFF 13
+#define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
+/* abort_stat */
+#define STAT_IO_NOT_VALID 0x1
+#define STAT_IO_NO_DEVICE 0x2
+#define STAT_IO_COMPLETE 0x3
+#define STAT_IO_ABORTED 0x4
/* dw1 */
#define CMPLT_HDR_IPTT_OFF 0
#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
@@ -549,25 +567,17 @@ static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
__swab32(identify_buffer[0]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
- identify_buffer[2]);
+ __swab32(identify_buffer[1]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
- identify_buffer[1]);
+ __swab32(identify_buffer[2]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
- identify_buffer[4]);
+ __swab32(identify_buffer[3]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
- identify_buffer[3]);
+ __swab32(identify_buffer[4]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
__swab32(identify_buffer[5]));
}
-static void init_id_frame_v2_hw(struct hisi_hba *hisi_hba)
-{
- int i;
-
- for (i = 0; i < hisi_hba->n_phy; i++)
- config_id_frame_v2_hw(hisi_hba, i);
-}
-
static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
@@ -589,6 +599,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
break;
case SAS_SATA_DEV:
+ case SAS_SATA_PENDING:
if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
else
@@ -672,9 +683,7 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
else
reset_val = 0x7ffff;
- /* Disable all of the DQ */
- for (i = 0; i < HISI_SAS_MAX_QUEUES; i++)
- hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
/* Disable all of the PHYs */
for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -810,6 +819,8 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908);
hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
+ hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2);
hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
@@ -901,8 +912,6 @@ static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
msleep(100);
init_reg_v2_hw(hisi_hba);
- init_id_frame_v2_hw(hisi_hba);
-
return 0;
}
@@ -952,14 +961,8 @@ static void start_phys_v2_hw(unsigned long data)
static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
{
- int i;
struct timer_list *timer = &hisi_hba->timer;
- for (i = 0; i < hisi_hba->n_phy; i++) {
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a);
- hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK);
- }
-
setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba);
mod_timer(timer, jiffies + HZ);
}
@@ -1010,12 +1013,13 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
{
struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_dq *dq;
u32 r, w;
int queue = hisi_hba->queue;
while (1) {
- w = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_WR_PTR + (queue * 0x14));
+ dq = &hisi_hba->dq[queue];
+ w = dq->wr_point;
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
@@ -1038,9 +1042,11 @@ static void start_delivery_v2_hw(struct hisi_hba *hisi_hba)
{
int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
+ struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
+ dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
- ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS);
+ dq->wr_point);
}
static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
@@ -1563,6 +1569,30 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
goto out;
}
+ /* Use SAS+TMF status codes */
+ switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
+ >> CMPLT_HDR_ABORT_STAT_OFF) {
+ case STAT_IO_ABORTED:
+ /* this io has been aborted by abort command */
+ ts->stat = SAS_ABORTED_TASK;
+ goto out;
+ case STAT_IO_COMPLETE:
+ /* internal abort command complete */
+ ts->stat = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ case STAT_IO_NO_DEVICE:
+ ts->stat = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ case STAT_IO_NOT_VALID:
+ /* abort single io, controller don't find
+ * the io need to abort
+ */
+ ts->stat = TMF_RESP_FUNC_FAILED;
+ goto out;
+ default:
+ break;
+ }
+
if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
@@ -1775,6 +1805,32 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
return 0;
}
+static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ int device_id, int abort_flag, int tag_to_abort)
+{
+ struct sas_task *task = slot->task;
+ struct domain_device *dev = task->dev;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct hisi_sas_port *port = slot->port;
+
+ /* dw0 */
+ hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
+ (port->id << CMD_HDR_PORT_OFF) |
+ ((dev_is_sata(dev) ? 1:0) <<
+ CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
+ (abort_flag << CMD_HDR_ABORT_FLAG_OFF));
+
+ /* dw1 */
+ hdr->dw1 = cpu_to_le32(device_id << CMD_HDR_DEV_ID_OFF);
+
+ /* dw7 */
+ hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
+ hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+ return 0;
+}
+
static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
{
int i, res = 0;
@@ -1818,9 +1874,6 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
frame_rcvd[i] = __swab32(idaf);
}
- /* Get the linkrates */
- link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
- link_rate = (link_rate >> (phy_no * 4)) & 0xf;
sas_phy->linkrate = link_rate;
hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
HARD_PHY_LINKRATE);
@@ -1855,16 +1908,21 @@ end:
static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
{
int res = 0;
- u32 phy_cfg, phy_state;
+ u32 phy_state, sl_ctrl, txid_auto;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
- phy_cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
-
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
-
hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
+ sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
+ sl_ctrl & ~SL_CONTROL_CTA_MSK);
+
+ txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
+ txid_auto | TXID_AUTO_CT3_MSK);
+
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
@@ -1986,7 +2044,7 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
struct hisi_sas_slot *slot;
struct hisi_sas_itct *itct;
struct hisi_sas_complete_v2_hdr *complete_queue;
- u32 irq_value, rd_point, wr_point, dev_id;
+ u32 irq_value, rd_point = cq->rd_point, wr_point, dev_id;
int queue = cq->id;
complete_queue = hisi_hba->complete_hdr[queue];
@@ -1994,8 +2052,6 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
- rd_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_RD_PTR +
- (0x14 * queue));
wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
(0x14 * queue));
@@ -2043,6 +2099,7 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
}
/* update rd_point */
+ cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
return IRQ_HANDLED;
}
@@ -2239,6 +2296,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.prep_smp = prep_smp_v2_hw,
.prep_ssp = prep_ssp_v2_hw,
.prep_stp = prep_ata_v2_hw,
+ .prep_abort = prep_abort_v2_hw,
.get_free_slot = get_free_slot_v2_hw,
.start_delivery = start_delivery_v2_hw,
.slot_complete = slot_complete_v2_hw,
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index ec6381e57eb7..258a3f9a2519 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -246,10 +246,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
shost->dma_dev = dma_dev;
- error = device_add(&shost->shost_gendev);
- if (error)
- goto out_destroy_freelist;
-
/*
* Increase usage count temporarily here so that calling
* scsi_autopm_put_host() will trigger runtime idle if there is
@@ -260,6 +256,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
pm_runtime_enable(&shost->shost_gendev);
device_enable_async_suspend(&shost->shost_gendev);
+ error = device_add(&shost->shost_gendev);
+ if (error)
+ goto out_destroy_freelist;
+
scsi_host_set_state(shost, SHOST_RUNNING);
get_device(shost->shost_gendev.parent);
@@ -309,6 +309,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
out_del_gendev:
device_del(&shost->shost_gendev);
out_destroy_freelist:
+ device_disable_async_suspend(&shost->shost_gendev);
+ pm_runtime_disable(&shost->shost_gendev);
+ pm_runtime_set_suspended(&shost->shost_gendev);
+ pm_runtime_put_noidle(&shost->shost_gendev);
scsi_destroy_command_freelist(shost);
out_destroy_tags:
if (shost_use_blk_mq(shost))
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 030d0023e1d2..d007ec18179a 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -293,6 +293,8 @@ static int detect_controller_lockup(struct ctlr_info *h);
static void hpsa_disable_rld_caching(struct ctlr_info *h);
static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
struct ReportExtendedLUNdata *buf, int bufsize);
+static bool hpsa_vpd_page_supported(struct ctlr_info *h,
+ unsigned char scsi3addr[], u8 page);
static int hpsa_luns_changed(struct ctlr_info *h);
static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
struct hpsa_scsi_dev_t *dev,
@@ -2388,7 +2390,8 @@ static void hpsa_cmd_free_and_done(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd)
{
hpsa_cmd_resolve_and_free(h, c);
- cmd->scsi_done(cmd);
+ if (cmd && cmd->scsi_done)
+ cmd->scsi_done(cmd);
}
static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
@@ -2489,7 +2492,17 @@ static void complete_scsi_command(struct CommandList *cp)
ei = cp->err_info;
cmd = cp->scsi_cmd;
h = cp->h;
+
+ if (!cmd->device) {
+ cmd->result = DID_NO_CONNECT << 16;
+ return hpsa_cmd_free_and_done(h, cp, cmd);
+ }
+
dev = cmd->device->hostdata;
+ if (!dev) {
+ cmd->result = DID_NO_CONNECT << 16;
+ return hpsa_cmd_free_and_done(h, cp, cmd);
+ }
c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
scsi_dma_unmap(cmd); /* undo the DMA mappings */
@@ -2504,8 +2517,15 @@ static void complete_scsi_command(struct CommandList *cp)
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
- if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
- atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
+ if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
+ if (dev->physical_device && dev->expose_device &&
+ dev->removed) {
+ cmd->result = DID_NO_CONNECT << 16;
+ return hpsa_cmd_free_and_done(h, cp, cmd);
+ }
+ if (likely(cp->phys_disk != NULL))
+ atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
+ }
/*
* We check for lockup status here as it may be set for
@@ -3074,11 +3094,19 @@ static void hpsa_get_raid_level(struct ctlr_info *h,
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
return;
- rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
+
+ if (!hpsa_vpd_page_supported(h, scsi3addr,
+ HPSA_VPD_LV_DEVICE_GEOMETRY))
+ goto exit;
+
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
+ HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
+
if (rc == 0)
*raid_level = buf[8];
if (*raid_level > RAID_UNKNOWN)
*raid_level = RAID_UNKNOWN;
+exit:
kfree(buf);
return;
}
@@ -3436,7 +3464,7 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
}
/* Get a device id from inquiry page 0x83 */
-static int hpsa_vpd_page_supported(struct ctlr_info *h,
+static bool hpsa_vpd_page_supported(struct ctlr_info *h,
unsigned char scsi3addr[], u8 page)
{
int rc;
@@ -3446,7 +3474,7 @@ static int hpsa_vpd_page_supported(struct ctlr_info *h,
buf = kzalloc(256, GFP_KERNEL);
if (!buf)
- return 0;
+ return false;
/* Get the size of the page list first */
rc = hpsa_scsi_do_inquiry(h, scsi3addr,
@@ -3473,10 +3501,10 @@ static int hpsa_vpd_page_supported(struct ctlr_info *h,
goto exit_supported;
exit_unsupported:
kfree(buf);
- return 0;
+ return false;
exit_supported:
kfree(buf);
- return 1;
+ return true;
}
static void hpsa_get_ioaccel_status(struct ctlr_info *h,
@@ -3525,18 +3553,25 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
int rc;
unsigned char *buf;
- if (buflen > 16)
- buflen = 16;
+ /* Does controller have VPD for device id? */
+ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
+ return 1; /* not supported */
+
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
- if (rc == 0)
- memcpy(device_id, &buf[index], buflen);
+
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
+ HPSA_VPD_LV_DEVICE_ID, buf, 64);
+ if (rc == 0) {
+ if (buflen > 16)
+ buflen = 16;
+ memcpy(device_id, &buf[8], buflen);
+ }
kfree(buf);
- return rc != 0;
+ return rc; /*0 - got id, otherwise, didn't */
}
static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
@@ -3807,8 +3842,15 @@ static int hpsa_update_device_info(struct ctlr_info *h,
sizeof(this_device->model));
memset(this_device->device_id, 0,
sizeof(this_device->device_id));
- hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
- sizeof(this_device->device_id));
+ if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
+ sizeof(this_device->device_id)))
+ dev_err(&h->pdev->dev,
+ "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
+ h->ctlr, __func__,
+ h->scsi_host->host_no,
+ this_device->target, this_device->lun,
+ scsi_device_type(this_device->devtype),
+ this_device->model);
if ((this_device->devtype == TYPE_DISK ||
this_device->devtype == TYPE_ZBC) &&
@@ -4034,7 +4076,17 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
struct bmic_identify_physical_device *id_phys)
{
int rc;
- struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
+ struct ext_report_lun_entry *rle;
+
+ /*
+ * external targets don't support BMIC
+ */
+ if (dev->external) {
+ dev->queue_depth = 7;
+ return;
+ }
+
+ rle = &rlep->LUN[rle_index];
dev->ioaccel_handle = rle->ioaccel_handle;
if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
@@ -4270,6 +4322,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
i, nphysicals, nlogicals, physdev_list, logdev_list);
+ /* Determine if this is a lun from an external target array */
+ tmpdevice->external =
+ figure_external_status(h, raid_ctlr_position, i,
+ nphysicals, nlocal_logicals);
+
/*
* Skip over some devices such as a spare.
*/
@@ -4295,11 +4352,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
continue;
}
- /* Determine if this is a lun from an external target array */
- tmpdevice->external =
- figure_external_status(h, raid_ctlr_position, i,
- nphysicals, nlocal_logicals);
-
figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
this_device = currentsd[ncurrent];
@@ -4513,7 +4565,9 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
case READ_6:
case READ_12:
if (*cdb_len == 6) {
- block = get_unaligned_be16(&cdb[2]);
+ block = (((cdb[1] & 0x1F) << 16) |
+ (cdb[2] << 8) |
+ cdb[3]);
block_cnt = cdb[4];
if (block_cnt == 0)
block_cnt = 256;
@@ -4638,6 +4692,9 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
struct scsi_cmnd *cmd = c->scsi_cmd;
struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+ if (!dev)
+ return -1;
+
c->phys_disk = dev;
return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
@@ -4670,9 +4727,11 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h,
*/
switch (cmd->cmnd[0]) {
/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
- case WRITE_6:
case READ_6:
- first_block = get_unaligned_be16(&cmd->cmnd[2]);
+ case WRITE_6:
+ first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
+ (cmd->cmnd[2] << 8) |
+ cmd->cmnd[3]);
break;
case WRITE_10:
case READ_10:
@@ -4714,6 +4773,12 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
u32 len;
u32 total_len = 0;
+ if (!cmd->device)
+ return -1;
+
+ if (!cmd->device->hostdata)
+ return -1;
+
BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
@@ -4822,6 +4887,12 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
{
+ if (!c->scsi_cmd->device)
+ return -1;
+
+ if (!c->scsi_cmd->device->hostdata)
+ return -1;
+
/* Try to honor the device's queue depth */
if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
phys_disk->queue_depth) {
@@ -4902,12 +4973,17 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
#endif
int offload_to_mirror;
+ if (!dev)
+ return -1;
+
/* check for valid opcode, get LBA and block count */
switch (cmd->cmnd[0]) {
case WRITE_6:
is_write = 1;
case READ_6:
- first_block = get_unaligned_be16(&cmd->cmnd[2]);
+ first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
+ (cmd->cmnd[2] << 8) |
+ cmd->cmnd[3]);
block_cnt = cmd->cmnd[4];
if (block_cnt == 0)
block_cnt = 256;
@@ -5314,6 +5390,9 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
int rc = IO_ACCEL_INELIGIBLE;
+ if (!dev)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
cmd->host_scribble = (unsigned char *) c;
if (dev->offload_enabled) {
@@ -5852,6 +5931,9 @@ static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
+ if (!dev)
+ return;
+
/*
* We're overlaying struct hpsa_tmf_struct on top of something which
* was allocated as a struct io_accel2_cmd, so we better be sure it
@@ -5935,7 +6017,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
"Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
psa[0], psa[1], psa[2], psa[3],
psa[4], psa[5], psa[6], psa[7]);
- rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
+ rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
if (rc != 0) {
dev_warn(&h->pdev->dev,
"Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
@@ -5972,6 +6054,9 @@ static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
struct io_accel2_cmd *c2;
dev = abort->scsi_cmd->device->hostdata;
+ if (!dev)
+ return -1;
+
if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
return -1;
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index a1487e67f7a1..82cdfad874f3 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -312,7 +312,6 @@ struct offline_device_entry {
#define HPSA_DEVICE_RESET_MSG 1
#define HPSA_RESET_TYPE_CONTROLLER 0x00
#define HPSA_RESET_TYPE_BUS 0x01
-#define HPSA_RESET_TYPE_TARGET 0x03
#define HPSA_RESET_TYPE_LUN 0x04
#define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
#define HPSA_MSG_SEND_RETRY_LIMIT 10
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index a5be153d92d4..a584cdf07058 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -157,6 +157,7 @@
/* VPD Inquiry types */
#define HPSA_VPD_SUPPORTED_PAGES 0x00
+#define HPSA_VPD_LV_DEVICE_ID 0x83
#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
#define HPSA_VPD_LV_IOACCEL_STATUS 0xC2
#define HPSA_VPD_LV_STATUS 0xC3
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ab67ec4b6bd6..7e487c78279c 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -52,6 +52,7 @@ static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
+static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
static LIST_HEAD(ibmvfc_head);
static DEFINE_SPINLOCK(ibmvfc_driver_lock);
static struct scsi_transport_template *ibmvfc_transport_template;
@@ -86,6 +87,9 @@ MODULE_PARM_DESC(debug, "Enable driver debug information. "
module_param_named(log_level, log_level, uint, 0);
MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
"[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
+module_param_named(cls3_error, cls3_error, uint, 0);
+MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
+ "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
static const struct {
u16 status;
@@ -717,7 +721,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
spin_lock_irqsave(vhost->host->host_lock, flags);
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
/* Clean out the queue */
memset(crq->msgs, 0, PAGE_SIZE);
@@ -1335,6 +1338,9 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
struct srp_direct_buf *data = &vfc_cmd->ioba;
struct ibmvfc_host *vhost = dev_get_drvdata(dev);
+ if (cls3_error)
+ vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
+
sg_mapped = scsi_dma_map(scmd);
if (!sg_mapped) {
vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
@@ -3381,6 +3387,10 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
+ prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
+
+ if (cls3_error)
+ prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 5c70a52ad346..9a0696f68f37 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -54,6 +54,7 @@
#define IBMVFC_DEV_LOSS_TMO (5 * 60)
#define IBMVFC_DEFAULT_LOG_LEVEL 2
#define IBMVFC_MAX_CDB_LEN 16
+#define IBMVFC_CLS3_ERROR 0
/*
* Ensure we have resources for ERP and initialization:
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index b29fef9d0f27..642b739ad0da 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1606,8 +1606,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
- pr_debug("send_messages cmd %p\n", cmd);
-
iue = cmd->iue;
crq->valid = VALID_CMD_RESP_EL;
@@ -1934,6 +1932,8 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
/*
* Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
*/
+ target_wait_for_sess_cmds(se_sess);
+ transport_deregister_session_configfs(se_sess);
transport_deregister_session(se_sess);
tport->ibmv_nexus = NULL;
kfree(nexus);
@@ -1978,7 +1978,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
else if (fmt->buffers & (~SUPPORTED_FORMATS))
reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
- else if ((fmt->buffers | SUPPORTED_FORMATS) == 0)
+ else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
if (vscsi->state == SRP_PROCESSING)
@@ -2554,10 +2554,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
srp->lun.scsi_lun[0] &= 0x3f;
- pr_debug("calling submit_cmd, se_cmd %p, lun 0x%llx, cdb 0x%x, attr:%d\n",
- &cmd->se_cmd, scsilun_to_int(&srp->lun), (int)srp->cdb[0],
- attr);
-
rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
cmd->sense_buf, scsilun_to_int(&srp->lun),
data_len, attr, dir, 0);
@@ -3142,8 +3138,6 @@ static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
long tx_len;
long rc = 0;
- pr_debug("rdma: dir %d, bytes 0x%x\n", dir, bytes);
-
if (bytes == 0)
return 0;
@@ -3192,12 +3186,6 @@ static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
vscsi->dds.window[LOCAL].liobn,
server_ioba);
} else {
- /* write to client */
- struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
-
- if (!READ_CMD(srp->cdb))
- print_hex_dump_bytes(" data:", DUMP_PREFIX_NONE,
- sg_virt(sgp), buf_len);
/* The h_copy_rdma will cause phyp, running in another
* partition, to read memory, so we need to make sure
* the data has been written out, hence these syncs.
@@ -3322,12 +3310,9 @@ cmd_work:
rc = ibmvscsis_trans_event(vscsi, crq);
} else if (vscsi->flags & TRANS_EVENT) {
/*
- * if a tranport event has occurred leave
+ * if a transport event has occurred leave
* everything but transport events on the queue
- */
- pr_debug("handle_crq, ignoring\n");
-
- /*
+ *
* need to decrement the queue index so we can
* look at the elment again
*/
@@ -3461,6 +3446,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
+ rc = -ENOMEM;
dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
goto free_buf;
}
@@ -3693,12 +3679,9 @@ static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
se_cmd);
struct scsi_info *vscsi = cmd->adapter;
- pr_debug("release_cmd %p, flags %d\n", se_cmd, cmd->flags);
-
spin_lock_bh(&vscsi->intr_lock);
/* Remove from active_q */
- list_del(&cmd->list);
- list_add_tail(&cmd->list, &vscsi->waiting_rsp);
+ list_move_tail(&cmd->list, &vscsi->waiting_rsp);
ibmvscsis_send_messages(vscsi);
spin_unlock_bh(&vscsi->intr_lock);
}
@@ -3715,9 +3698,6 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
struct iu_entry *iue = cmd->iue;
int rc;
- pr_debug("write_pending, se_cmd %p, length 0x%x\n",
- se_cmd, se_cmd->data_length);
-
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
1, 1);
if (rc) {
@@ -3756,9 +3736,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
uint len = 0;
int rc;
- pr_debug("queue_data_in, se_cmd %p, length 0x%x\n",
- se_cmd, se_cmd->data_length);
-
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
1);
if (rc) {
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
deleted file mode 100644
index 3882d9f519c8..000000000000
--- a/drivers/scsi/in2000.c
+++ /dev/null
@@ -1,2302 +0,0 @@
-/*
- * in2000.c - Linux device driver for the
- * Always IN2000 ISA SCSI card.
- *
- * Copyright (c) 1996 John Shifflett, GeoLog Consulting
- * john@geolog.com
- * jshiffle@netcom.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * For the avoidance of doubt the "preferred form" of this code is one which
- * is in an open non patent encumbered format. Where cryptographic key signing
- * forms part of the process of creating an executable the information
- * including keys needed to generate an equivalently functional executable
- * are deemed to be part of the source code.
- *
- * Drew Eckhardt's excellent 'Generic NCR5380' sources provided
- * much of the inspiration and some of the code for this driver.
- * The Linux IN2000 driver distributed in the Linux kernels through
- * version 1.2.13 was an extremely valuable reference on the arcane
- * (and still mysterious) workings of the IN2000's fifo. It also
- * is where I lifted in2000_biosparam(), the gist of the card
- * detection scheme, and other bits of code. Many thanks to the
- * talented and courageous people who wrote, contributed to, and
- * maintained that driver (including Brad McLean, Shaun Savage,
- * Bill Earnest, Larry Doolittle, Roger Sunshine, John Luckey,
- * Matt Postiff, Peter Lu, zerucha@shell.portal.com, and Eric
- * Youngdale). I should also mention the driver written by
- * Hamish Macdonald for the (GASP!) Amiga A2091 card, included
- * in the Linux-m68k distribution; it gave me a good initial
- * understanding of the proper way to run a WD33c93 chip, and I
- * ended up stealing lots of code from it.
- *
- * _This_ driver is (I feel) an improvement over the old one in
- * several respects:
- * - All problems relating to the data size of a SCSI request are
- * gone (as far as I know). The old driver couldn't handle
- * swapping to partitions because that involved 4k blocks, nor
- * could it deal with the st.c tape driver unmodified, because
- * that usually involved 4k - 32k blocks. The old driver never
- * quite got away from a morbid dependence on 2k block sizes -
- * which of course is the size of the card's fifo.
- *
- * - Target Disconnection/Reconnection is now supported. Any
- * system with more than one device active on the SCSI bus
- * will benefit from this. The driver defaults to what I'm
- * calling 'adaptive disconnect' - meaning that each command
- * is evaluated individually as to whether or not it should
- * be run with the option to disconnect/reselect (if the
- * device chooses), or as a "SCSI-bus-hog".
- *
- * - Synchronous data transfers are now supported. Because there
- * are a few devices (and many improperly terminated systems)
- * that choke when doing sync, the default is sync DISABLED
- * for all devices. This faster protocol can (and should!)
- * be enabled on selected devices via the command-line.
- *
- * - Runtime operating parameters can now be specified through
- * either the LILO or the 'insmod' command line. For LILO do:
- * "in2000=blah,blah,blah"
- * and with insmod go like:
- * "insmod /usr/src/linux/modules/in2000.o setup_strings=blah,blah"
- * The defaults should be good for most people. See the comment
- * for 'setup_strings' below for more details.
- *
- * - The old driver relied exclusively on what the Western Digital
- * docs call "Combination Level 2 Commands", which are a great
- * idea in that the CPU is relieved of a lot of interrupt
- * overhead. However, by accepting a certain (user-settable)
- * amount of additional interrupts, this driver achieves
- * better control over the SCSI bus, and data transfers are
- * almost as fast while being much easier to define, track,
- * and debug.
- *
- * - You can force detection of a card whose BIOS has been disabled.
- *
- * - Multiple IN2000 cards might almost be supported. I've tried to
- * keep it in mind, but have no way to test...
- *
- *
- * TODO:
- * tagged queuing. multiple cards.
- *
- *
- * NOTE:
- * When using this or any other SCSI driver as a module, you'll
- * find that with the stock kernel, at most _two_ SCSI hard
- * drives will be linked into the device list (ie, usable).
- * If your IN2000 card has more than 2 disks on its bus, you
- * might want to change the define of 'SD_EXTRA_DEVS' in the
- * 'hosts.h' file from 2 to whatever is appropriate. It took
- * me a while to track down this surprisingly obscure and
- * undocumented little "feature".
- *
- *
- * People with bug reports, wish-lists, complaints, comments,
- * or improvements are asked to pah-leeez email me (John Shifflett)
- * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get
- * this thing into as good a shape as possible, and I'm positive
- * there are lots of lurking bugs and "Stupid Places".
- *
- * Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk>
- * - Using new_eh handler
- * - Hopefully got all the locking right again
- * See "FIXME" notes for items that could do with more work
- */
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/proc_fs.h>
-#include <linux/ioport.h>
-#include <linux/stat.h>
-
-#include <asm/io.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-
-#define IN2000_VERSION "1.33-2.5"
-#define IN2000_DATE "2002/11/03"
-
-#include "in2000.h"
-
-
-/*
- * 'setup_strings' is a single string used to pass operating parameters and
- * settings from the kernel/module command-line to the driver. 'setup_args[]'
- * is an array of strings that define the compile-time default values for
- * these settings. If Linux boots with a LILO or insmod command-line, those
- * settings are combined with 'setup_args[]'. Note that LILO command-lines
- * are prefixed with "in2000=" while insmod uses a "setup_strings=" prefix.
- * The driver recognizes the following keywords (lower case required) and
- * arguments:
- *
- * - ioport:addr -Where addr is IO address of a (usually ROM-less) card.
- * - noreset -No optional args. Prevents SCSI bus reset at boot time.
- * - nosync:x -x is a bitmask where the 1st 7 bits correspond with
- * the 7 possible SCSI devices (bit 0 for device #0, etc).
- * Set a bit to PREVENT sync negotiation on that device.
- * The driver default is sync DISABLED on all devices.
- * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer
- * period. Default is 500; acceptable values are 250 - 1000.
- * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them.
- * x = 1 does 'adaptive' disconnects, which is the default
- * and generally the best choice.
- * - debug:x -If 'DEBUGGING_ON' is defined, x is a bitmask that causes
- * various types of debug output to printed - see the DB_xxx
- * defines in in2000.h
- * - proc:x -If 'PROC_INTERFACE' is defined, x is a bitmask that
- * determines how the /proc interface works and what it
- * does - see the PR_xxx defines in in2000.h
- *
- * Syntax Notes:
- * - Numeric arguments can be decimal or the '0x' form of hex notation. There
- * _must_ be a colon between a keyword and its numeric argument, with no
- * spaces.
- * - Keywords are separated by commas, no spaces, in the standard kernel
- * command-line manner.
- * - A keyword in the 'nth' comma-separated command-line member will overwrite
- * the 'nth' element of setup_args[]. A blank command-line member (in
- * other words, a comma with no preceding keyword) will _not_ overwrite
- * the corresponding setup_args[] element.
- *
- * A few LILO examples (for insmod, use 'setup_strings' instead of 'in2000'):
- * - in2000=ioport:0x220,noreset
- * - in2000=period:250,disconnect:2,nosync:0x03
- * - in2000=debug:0x1e
- * - in2000=proc:3
- */
-
-/* Normally, no defaults are specified... */
-static char *setup_args[] = { "", "", "", "", "", "", "", "", "" };
-
-/* filled in by 'insmod' */
-static char *setup_strings;
-
-module_param(setup_strings, charp, 0);
-
-static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num)
-{
- write1_io(reg_num, IO_WD_ADDR);
- return read1_io(IO_WD_DATA);
-}
-
-
-#define READ_AUX_STAT() read1_io(IO_WD_ASR)
-
-
-static inline void write_3393(struct IN2000_hostdata *hostdata, uchar reg_num, uchar value)
-{
- write1_io(reg_num, IO_WD_ADDR);
- write1_io(value, IO_WD_DATA);
-}
-
-
-static inline void write_3393_cmd(struct IN2000_hostdata *hostdata, uchar cmd)
-{
-/* while (READ_AUX_STAT() & ASR_CIP)
- printk("|");*/
- write1_io(WD_COMMAND, IO_WD_ADDR);
- write1_io(cmd, IO_WD_DATA);
-}
-
-
-static uchar read_1_byte(struct IN2000_hostdata *hostdata)
-{
- uchar asr, x = 0;
-
- write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
- write_3393_cmd(hostdata, WD_CMD_TRANS_INFO | 0x80);
- do {
- asr = READ_AUX_STAT();
- if (asr & ASR_DBR)
- x = read_3393(hostdata, WD_DATA);
- } while (!(asr & ASR_INT));
- return x;
-}
-
-
-static void write_3393_count(struct IN2000_hostdata *hostdata, unsigned long value)
-{
- write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
- write1_io((value >> 16), IO_WD_DATA);
- write1_io((value >> 8), IO_WD_DATA);
- write1_io(value, IO_WD_DATA);
-}
-
-
-static unsigned long read_3393_count(struct IN2000_hostdata *hostdata)
-{
- unsigned long value;
-
- write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
- value = read1_io(IO_WD_DATA) << 16;
- value |= read1_io(IO_WD_DATA) << 8;
- value |= read1_io(IO_WD_DATA);
- return value;
-}
-
-
-/* The 33c93 needs to be told which direction a command transfers its
- * data; we use this function to figure it out. Returns true if there
- * will be a DATA_OUT phase with this command, false otherwise.
- * (Thanks to Joerg Dorchain for the research and suggestion.)
- */
-static int is_dir_out(Scsi_Cmnd * cmd)
-{
- switch (cmd->cmnd[0]) {
- case WRITE_6:
- case WRITE_10:
- case WRITE_12:
- case WRITE_LONG:
- case WRITE_SAME:
- case WRITE_BUFFER:
- case WRITE_VERIFY:
- case WRITE_VERIFY_12:
- case COMPARE:
- case COPY:
- case COPY_VERIFY:
- case SEARCH_EQUAL:
- case SEARCH_HIGH:
- case SEARCH_LOW:
- case SEARCH_EQUAL_12:
- case SEARCH_HIGH_12:
- case SEARCH_LOW_12:
- case FORMAT_UNIT:
- case REASSIGN_BLOCKS:
- case RESERVE:
- case MODE_SELECT:
- case MODE_SELECT_10:
- case LOG_SELECT:
- case SEND_DIAGNOSTIC:
- case CHANGE_DEFINITION:
- case UPDATE_BLOCK:
- case SET_WINDOW:
- case MEDIUM_SCAN:
- case SEND_VOLUME_TAG:
- case 0xea:
- return 1;
- default:
- return 0;
- }
-}
-
-
-
-static struct sx_period sx_table[] = {
- {1, 0x20},
- {252, 0x20},
- {376, 0x30},
- {500, 0x40},
- {624, 0x50},
- {752, 0x60},
- {876, 0x70},
- {1000, 0x00},
- {0, 0}
-};
-
-static int round_period(unsigned int period)
-{
- int x;
-
- for (x = 1; sx_table[x].period_ns; x++) {
- if ((period <= sx_table[x - 0].period_ns) && (period > sx_table[x - 1].period_ns)) {
- return x;
- }
- }
- return 7;
-}
-
-static uchar calc_sync_xfer(unsigned int period, unsigned int offset)
-{
- uchar result;
-
- period *= 4; /* convert SDTR code to ns */
- result = sx_table[round_period(period)].reg_value;
- result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF;
- return result;
-}
-
-
-
-static void in2000_execute(struct Scsi_Host *instance);
-
-static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
-{
- struct Scsi_Host *instance;
- struct IN2000_hostdata *hostdata;
- Scsi_Cmnd *tmp;
-
- instance = cmd->device->host;
- hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
- DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0]))
-
-/* Set up a few fields in the Scsi_Cmnd structure for our own use:
- * - host_scribble is the pointer to the next cmd in the input queue
- * - scsi_done points to the routine we call when a cmd is finished
- * - result is what you'd expect
- */
- cmd->host_scribble = NULL;
- cmd->scsi_done = done;
- cmd->result = 0;
-
-/* We use the Scsi_Pointer structure that's included with each command
- * as a scratchpad (as it's intended to be used!). The handy thing about
- * the SCp.xxx fields is that they're always associated with a given
- * cmd, and are preserved across disconnect-reselect. This means we
- * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages
- * if we keep all the critical pointers and counters in SCp:
- * - SCp.ptr is the pointer into the RAM buffer
- * - SCp.this_residual is the size of that buffer
- * - SCp.buffer points to the current scatter-gather buffer
- * - SCp.buffers_residual tells us how many S.G. buffers there are
- * - SCp.have_data_in helps keep track of >2048 byte transfers
- * - SCp.sent_command is not used
- * - SCp.phase records this command's SRCID_ER bit setting
- */
-
- if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- } else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.buffers_residual = 0;
- cmd->SCp.ptr = NULL;
- cmd->SCp.this_residual = 0;
- }
- cmd->SCp.have_data_in = 0;
-
-/* We don't set SCp.phase here - that's done in in2000_execute() */
-
-/* WD docs state that at the conclusion of a "LEVEL2" command, the
- * status byte can be retrieved from the LUN register. Apparently,
- * this is the case only for *uninterrupted* LEVEL2 commands! If
- * there are any unexpected phases entered, even if they are 100%
- * legal (different devices may choose to do things differently),
- * the LEVEL2 command sequence is exited. This often occurs prior
- * to receiving the status byte, in which case the driver does a
- * status phase interrupt and gets the status byte on its own.
- * While such a command can then be "resumed" (ie restarted to
- * finish up as a LEVEL2 command), the LUN register will NOT be
- * a valid status byte at the command's conclusion, and we must
- * use the byte obtained during the earlier interrupt. Here, we
- * preset SCp.Status to an illegal value (0xff) so that when
- * this command finally completes, we can tell where the actual
- * status byte is stored.
- */
-
- cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
-
-/* We need to disable interrupts before messing with the input
- * queue and calling in2000_execute().
- */
-
- /*
- * Add the cmd to the end of 'input_Q'. Note that REQUEST_SENSE
- * commands are added to the head of the queue so that the desired
- * sense data is not lost before REQUEST_SENSE executes.
- */
-
- if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) {
- cmd->host_scribble = (uchar *) hostdata->input_Q;
- hostdata->input_Q = cmd;
- } else { /* find the end of the queue */
- for (tmp = (Scsi_Cmnd *) hostdata->input_Q; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble);
- tmp->host_scribble = (uchar *) cmd;
- }
-
-/* We know that there's at least one command in 'input_Q' now.
- * Go see if any of them are runnable!
- */
-
- in2000_execute(cmd->device->host);
-
- DB(DB_QUEUE_COMMAND, printk(")Q "))
- return 0;
-}
-
-static DEF_SCSI_QCMD(in2000_queuecommand)
-
-
-
-/*
- * This routine attempts to start a scsi command. If the host_card is
- * already connected, we give up immediately. Otherwise, look through
- * the input_Q, using the first command we find that's intended
- * for a currently non-busy target/lun.
- * Note that this function is always called with interrupts already
- * disabled (either from in2000_queuecommand() or in2000_intr()).
- */
-static void in2000_execute(struct Scsi_Host *instance)
-{
- struct IN2000_hostdata *hostdata;
- Scsi_Cmnd *cmd, *prev;
- int i;
- unsigned short *sp;
- unsigned short f;
- unsigned short flushbuf[16];
-
-
- hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
- DB(DB_EXECUTE, printk("EX("))
-
- if (hostdata->selecting || hostdata->connected) {
-
- DB(DB_EXECUTE, printk(")EX-0 "))
-
- return;
- }
-
- /*
- * Search through the input_Q for a command destined
- * for an idle target/lun.
- */
-
- cmd = (Scsi_Cmnd *) hostdata->input_Q;
- prev = NULL;
- while (cmd) {
- if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
- break;
- prev = cmd;
- cmd = (Scsi_Cmnd *) cmd->host_scribble;
- }
-
- /* quit if queue empty or all possible targets are busy */
-
- if (!cmd) {
-
- DB(DB_EXECUTE, printk(")EX-1 "))
-
- return;
- }
-
- /* remove command from queue */
-
- if (prev)
- prev->host_scribble = cmd->host_scribble;
- else
- hostdata->input_Q = (Scsi_Cmnd *) cmd->host_scribble;
-
-#ifdef PROC_STATISTICS
- hostdata->cmd_cnt[cmd->device->id]++;
-#endif
-
-/*
- * Start the selection process
- */
-
- if (is_dir_out(cmd))
- write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
- else
- write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
-
-/* Now we need to figure out whether or not this command is a good
- * candidate for disconnect/reselect. We guess to the best of our
- * ability, based on a set of hierarchical rules. When several
- * devices are operating simultaneously, disconnects are usually
- * an advantage. In a single device system, or if only 1 device
- * is being accessed, transfers usually go faster if disconnects
- * are not allowed:
- *
- * + Commands should NEVER disconnect if hostdata->disconnect =
- * DIS_NEVER (this holds for tape drives also), and ALWAYS
- * disconnect if hostdata->disconnect = DIS_ALWAYS.
- * + Tape drive commands should always be allowed to disconnect.
- * + Disconnect should be allowed if disconnected_Q isn't empty.
- * + Commands should NOT disconnect if input_Q is empty.
- * + Disconnect should be allowed if there are commands in input_Q
- * for a different target/lun. In this case, the other commands
- * should be made disconnect-able, if not already.
- *
- * I know, I know - this code would flunk me out of any
- * "C Programming 101" class ever offered. But it's easy
- * to change around and experiment with for now.
- */
-
- cmd->SCp.phase = 0; /* assume no disconnect */
- if (hostdata->disconnect == DIS_NEVER)
- goto no;
- if (hostdata->disconnect == DIS_ALWAYS)
- goto yes;
- if (cmd->device->type == 1) /* tape drive? */
- goto yes;
- if (hostdata->disconnected_Q) /* other commands disconnected? */
- goto yes;
- if (!(hostdata->input_Q)) /* input_Q empty? */
- goto no;
- for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) {
- if ((prev->device->id != cmd->device->id) || (prev->device->lun != cmd->device->lun)) {
- for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble)
- prev->SCp.phase = 1;
- goto yes;
- }
- }
- goto no;
-
- yes:
- cmd->SCp.phase = 1;
-
-#ifdef PROC_STATISTICS
- hostdata->disc_allowed_cnt[cmd->device->id]++;
-#endif
-
- no:
- write_3393(hostdata, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0));
-
- write_3393(hostdata, WD_TARGET_LUN, cmd->device->lun);
- write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
- hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
-
- if ((hostdata->level2 <= L2_NONE) || (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) {
-
- /*
- * Do a 'Select-With-ATN' command. This will end with
- * one of the following interrupts:
- * CSR_RESEL_AM: failure - can try again later.
- * CSR_TIMEOUT: failure - give up.
- * CSR_SELECT: success - proceed.
- */
-
- hostdata->selecting = cmd;
-
-/* Every target has its own synchronous transfer setting, kept in
- * the sync_xfer array, and a corresponding status byte in sync_stat[].
- * Each target's sync_stat[] entry is initialized to SS_UNSET, and its
- * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET
- * means that the parameters are undetermined as yet, and that we
- * need to send an SDTR message to this device after selection is
- * complete. We set SS_FIRST to tell the interrupt routine to do so,
- * unless we don't want to even _try_ synchronous transfers: In this
- * case we set SS_SET to make the defaults final.
- */
- if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) {
- if (hostdata->sync_off & (1 << cmd->device->id))
- hostdata->sync_stat[cmd->device->id] = SS_SET;
- else
- hostdata->sync_stat[cmd->device->id] = SS_FIRST;
- }
- hostdata->state = S_SELECTING;
- write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */
- write_3393_cmd(hostdata, WD_CMD_SEL_ATN);
- }
-
- else {
-
- /*
- * Do a 'Select-With-ATN-Xfer' command. This will end with
- * one of the following interrupts:
- * CSR_RESEL_AM: failure - can try again later.
- * CSR_TIMEOUT: failure - give up.
- * anything else: success - proceed.
- */
-
- hostdata->connected = cmd;
- write_3393(hostdata, WD_COMMAND_PHASE, 0);
-
- /* copy command_descriptor_block into WD chip
- * (take advantage of auto-incrementing)
- */
-
- write1_io(WD_CDB_1, IO_WD_ADDR);
- for (i = 0; i < cmd->cmd_len; i++)
- write1_io(cmd->cmnd[i], IO_WD_DATA);
-
- /* The wd33c93 only knows about Group 0, 1, and 5 commands when
- * it's doing a 'select-and-transfer'. To be safe, we write the
- * size of the CDB into the OWN_ID register for every case. This
- * way there won't be problems with vendor-unique, audio, etc.
- */
-
- write_3393(hostdata, WD_OWN_ID, cmd->cmd_len);
-
- /* When doing a non-disconnect command, we can save ourselves a DATA
- * phase interrupt later by setting everything up now. With writes we
- * need to pre-fill the fifo; if there's room for the 32 flush bytes,
- * put them in there too - that'll avoid a fifo interrupt. Reads are
- * somewhat simpler.
- * KLUDGE NOTE: It seems that you can't completely fill the fifo here:
- * This results in the IO_FIFO_COUNT register rolling over to zero,
- * and apparently the gate array logic sees this as empty, not full,
- * so the 3393 chip is never signalled to start reading from the
- * fifo. Or maybe it's seen as a permanent fifo interrupt condition.
- * Regardless, we fix this by temporarily pretending that the fifo
- * is 16 bytes smaller. (I see now that the old driver has a comment
- * about "don't fill completely" in an analogous place - must be the
- * same deal.) This results in CDROM, swap partitions, and tape drives
- * needing an extra interrupt per write command - I think we can live
- * with that!
- */
-
- if (!(cmd->SCp.phase)) {
- write_3393_count(hostdata, cmd->SCp.this_residual);
- write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
- write1_io(0, IO_FIFO_WRITE); /* clear fifo counter, write mode */
-
- if (is_dir_out(cmd)) {
- hostdata->fifo = FI_FIFO_WRITING;
- if ((i = cmd->SCp.this_residual) > (IN2000_FIFO_SIZE - 16))
- i = IN2000_FIFO_SIZE - 16;
- cmd->SCp.have_data_in = i; /* this much data in fifo */
- i >>= 1; /* Gulp. Assuming modulo 2. */
- sp = (unsigned short *) cmd->SCp.ptr;
- f = hostdata->io_base + IO_FIFO;
-
-#ifdef FAST_WRITE_IO
-
- FAST_WRITE2_IO();
-#else
- while (i--)
- write2_io(*sp++, IO_FIFO);
-
-#endif
-
- /* Is there room for the flush bytes? */
-
- if (cmd->SCp.have_data_in <= ((IN2000_FIFO_SIZE - 16) - 32)) {
- sp = flushbuf;
- i = 16;
-
-#ifdef FAST_WRITE_IO
-
- FAST_WRITE2_IO();
-#else
- while (i--)
- write2_io(0, IO_FIFO);
-
-#endif
-
- }
- }
-
- else {
- write1_io(0, IO_FIFO_READ); /* put fifo in read mode */
- hostdata->fifo = FI_FIFO_READING;
- cmd->SCp.have_data_in = 0; /* nothing transferred yet */
- }
-
- } else {
- write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */
- }
- hostdata->state = S_RUNNING_LEVEL2;
- write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
- }
-
- /*
- * Since the SCSI bus can handle only 1 connection at a time,
- * we get out of here now. If the selection fails, or when
- * the command disconnects, we'll come back to this routine
- * to search the input_Q again...
- */
-
- DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
-
-}
-
-
-
-static void transfer_pio(uchar * buf, int cnt, int data_in_dir, struct IN2000_hostdata *hostdata)
-{
- uchar asr;
-
- DB(DB_TRANSFER, printk("(%p,%d,%s)", buf, cnt, data_in_dir ? "in" : "out"))
-
- write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
- write_3393_count(hostdata, cnt);
- write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
- if (data_in_dir) {
- do {
- asr = READ_AUX_STAT();
- if (asr & ASR_DBR)
- *buf++ = read_3393(hostdata, WD_DATA);
- } while (!(asr & ASR_INT));
- } else {
- do {
- asr = READ_AUX_STAT();
- if (asr & ASR_DBR)
- write_3393(hostdata, WD_DATA, *buf++);
- } while (!(asr & ASR_INT));
- }
-
- /* Note: we are returning with the interrupt UN-cleared.
- * Since (presumably) an entire I/O operation has
- * completed, the bus phase is probably different, and
- * the interrupt routine will discover this when it
- * responds to the uncleared int.
- */
-
-}
-
-
-
-static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir)
-{
- struct IN2000_hostdata *hostdata;
- unsigned short *sp;
- unsigned short f;
- int i;
-
- hostdata = (struct IN2000_hostdata *) cmd->device->host->hostdata;
-
-/* Normally, you'd expect 'this_residual' to be non-zero here.
- * In a series of scatter-gather transfers, however, this
- * routine will usually be called with 'this_residual' equal
- * to 0 and 'buffers_residual' non-zero. This means that a
- * previous transfer completed, clearing 'this_residual', and
- * now we need to setup the next scatter-gather buffer as the
- * source or destination for THIS transfer.
- */
- if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- ++cmd->SCp.buffer;
- --cmd->SCp.buffers_residual;
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- }
-
-/* Set up hardware registers */
-
- write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
- write_3393_count(hostdata, cmd->SCp.this_residual);
- write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
- write1_io(0, IO_FIFO_WRITE); /* zero counter, assume write */
-
-/* Reading is easy. Just issue the command and return - we'll
- * get an interrupt later when we have actual data to worry about.
- */
-
- if (data_in_dir) {
- write1_io(0, IO_FIFO_READ);
- if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
- write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
- write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
- hostdata->state = S_RUNNING_LEVEL2;
- } else
- write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
- hostdata->fifo = FI_FIFO_READING;
- cmd->SCp.have_data_in = 0;
- return;
- }
-
-/* Writing is more involved - we'll start the WD chip and write as
- * much data to the fifo as we can right now. Later interrupts will
- * write any bytes that don't make it at this stage.
- */
-
- if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
- write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
- write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
- hostdata->state = S_RUNNING_LEVEL2;
- } else
- write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
- hostdata->fifo = FI_FIFO_WRITING;
- sp = (unsigned short *) cmd->SCp.ptr;
-
- if ((i = cmd->SCp.this_residual) > IN2000_FIFO_SIZE)
- i = IN2000_FIFO_SIZE;
- cmd->SCp.have_data_in = i;
- i >>= 1; /* Gulp. We assume this_residual is modulo 2 */
- f = hostdata->io_base + IO_FIFO;
-
-#ifdef FAST_WRITE_IO
-
- FAST_WRITE2_IO();
-#else
- while (i--)
- write2_io(*sp++, IO_FIFO);
-
-#endif
-
-}
-
-
-/* We need to use spin_lock_irqsave() & spin_unlock_irqrestore() in this
- * function in order to work in an SMP environment. (I'd be surprised
- * if the driver is ever used by anyone on a real multi-CPU motherboard,
- * but it _does_ need to be able to compile and run in an SMP kernel.)
- */
-
-static irqreturn_t in2000_intr(int irqnum, void *dev_id)
-{
- struct Scsi_Host *instance = dev_id;
- struct IN2000_hostdata *hostdata;
- Scsi_Cmnd *patch, *cmd;
- uchar asr, sr, phs, id, lun, *ucp, msg;
- int i, j;
- unsigned long length;
- unsigned short *sp;
- unsigned short f;
- unsigned long flags;
-
- hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
-/* Get the spin_lock and disable further ints, for SMP */
-
- spin_lock_irqsave(instance->host_lock, flags);
-
-#ifdef PROC_STATISTICS
- hostdata->int_cnt++;
-#endif
-
-/* The IN2000 card has 2 interrupt sources OR'ed onto its IRQ line - the
- * WD3393 chip and the 2k fifo (which is actually a dual-port RAM combined
- * with a big logic array, so it's a little different than what you might
- * expect). As far as I know, there's no reason that BOTH can't be active
- * at the same time, but there's a problem: while we can read the 3393
- * to tell if _it_ wants an interrupt, I don't know of a way to ask the
- * fifo the same question. The best we can do is check the 3393 and if
- * it _isn't_ the source of the interrupt, then we can be pretty sure
- * that the fifo is the culprit.
- * UPDATE: I have it on good authority (Bill Earnest) that bit 0 of the
- * IO_FIFO_COUNT register mirrors the fifo interrupt state. I
- * assume that bit clear means interrupt active. As it turns
- * out, the driver really doesn't need to check for this after
- * all, so my remarks above about a 'problem' can safely be
- * ignored. The way the logic is set up, there's no advantage
- * (that I can see) to worrying about it.
- *
- * It seems that the fifo interrupt signal is negated when we extract
- * bytes during read or write bytes during write.
- * - fifo will interrupt when data is moving from it to the 3393, and
- * there are 31 (or less?) bytes left to go. This is sort of short-
- * sighted: what if you don't WANT to do more? In any case, our
- * response is to push more into the fifo - either actual data or
- * dummy bytes if need be. Note that we apparently have to write at
- * least 32 additional bytes to the fifo after an interrupt in order
- * to get it to release the ones it was holding on to - writing fewer
- * than 32 will result in another fifo int.
- * UPDATE: Again, info from Bill Earnest makes this more understandable:
- * 32 bytes = two counts of the fifo counter register. He tells
- * me that the fifo interrupt is a non-latching signal derived
- * from a straightforward boolean interpretation of the 7
- * highest bits of the fifo counter and the fifo-read/fifo-write
- * state. Who'd a thought?
- */
-
- write1_io(0, IO_LED_ON);
- asr = READ_AUX_STAT();
- if (!(asr & ASR_INT)) { /* no WD33c93 interrupt? */
-
-/* Ok. This is definitely a FIFO-only interrupt.
- *
- * If FI_FIFO_READING is set, there are up to 2048 bytes waiting to be read,
- * maybe more to come from the SCSI bus. Read as many as we can out of the
- * fifo and into memory at the location of SCp.ptr[SCp.have_data_in], and
- * update have_data_in afterwards.
- *
- * If we have FI_FIFO_WRITING, the FIFO has almost run out of bytes to move
- * into the WD3393 chip (I think the interrupt happens when there are 31
- * bytes left, but it may be fewer...). The 3393 is still waiting, so we
- * shove some more into the fifo, which gets things moving again. If the
- * original SCSI command specified more than 2048 bytes, there may still
- * be some of that data left: fine - use it (from SCp.ptr[SCp.have_data_in]).
- * Don't forget to update have_data_in. If we've already written out the
- * entire buffer, feed 32 dummy bytes to the fifo - they're needed to
- * push out the remaining real data.
- * (Big thanks to Bill Earnest for getting me out of the mud in here.)
- */
-
- cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */
- CHECK_NULL(cmd, "fifo_int")
-
- if (hostdata->fifo == FI_FIFO_READING) {
-
- DB(DB_FIFO, printk("{R:%02x} ", read1_io(IO_FIFO_COUNT)))
-
- sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
- i = read1_io(IO_FIFO_COUNT) & 0xfe;
- i <<= 2; /* # of words waiting in the fifo */
- f = hostdata->io_base + IO_FIFO;
-
-#ifdef FAST_READ_IO
-
- FAST_READ2_IO();
-#else
- while (i--)
- *sp++ = read2_io(IO_FIFO);
-
-#endif
-
- i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
- i <<= 1;
- cmd->SCp.have_data_in += i;
- }
-
- else if (hostdata->fifo == FI_FIFO_WRITING) {
-
- DB(DB_FIFO, printk("{W:%02x} ", read1_io(IO_FIFO_COUNT)))
-
-/* If all bytes have been written to the fifo, flush out the stragglers.
- * Note that while writing 16 dummy words seems arbitrary, we don't
- * have another choice that I can see. What we really want is to read
- * the 3393 transfer count register (that would tell us how many bytes
- * needed flushing), but the TRANSFER_INFO command hasn't completed
- * yet (not enough bytes!) and that register won't be accessible. So,
- * we use 16 words - a number obtained through trial and error.
- * UPDATE: Bill says this is exactly what Always does, so there.
- * More thanks due him for help in this section.
- */
- if (cmd->SCp.this_residual == cmd->SCp.have_data_in) {
- i = 16;
- while (i--) /* write 32 dummy bytes */
- write2_io(0, IO_FIFO);
- }
-
-/* If there are still bytes left in the SCSI buffer, write as many as we
- * can out to the fifo.
- */
-
- else {
- sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
- i = cmd->SCp.this_residual - cmd->SCp.have_data_in; /* bytes yet to go */
- j = read1_io(IO_FIFO_COUNT) & 0xfe;
- j <<= 2; /* how many words the fifo has room for */
- if ((j << 1) > i)
- j = (i >> 1);
- while (j--)
- write2_io(*sp++, IO_FIFO);
-
- i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
- i <<= 1;
- cmd->SCp.have_data_in += i;
- }
- }
-
- else {
- printk("*** Spurious FIFO interrupt ***");
- }
-
- write1_io(0, IO_LED_OFF);
-
-/* release the SMP spin_lock and restore irq state */
- spin_unlock_irqrestore(instance->host_lock, flags);
- return IRQ_HANDLED;
- }
-
-/* This interrupt was triggered by the WD33c93 chip. The fifo interrupt
- * may also be asserted, but we don't bother to check it: we get more
- * detailed info from FIFO_READING and FIFO_WRITING (see below).
- */
-
- cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */
- sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear the interrupt */
- phs = read_3393(hostdata, WD_COMMAND_PHASE);
-
- if (!cmd && (sr != CSR_RESEL_AM && sr != CSR_TIMEOUT && sr != CSR_SELECT)) {
- printk("\nNR:wd-intr-1\n");
- write1_io(0, IO_LED_OFF);
-
-/* release the SMP spin_lock and restore irq state */
- spin_unlock_irqrestore(instance->host_lock, flags);
- return IRQ_HANDLED;
- }
-
- DB(DB_INTR, printk("{%02x:%02x-", asr, sr))
-
-/* After starting a FIFO-based transfer, the next _WD3393_ interrupt is
- * guaranteed to be in response to the completion of the transfer.
- * If we were reading, there's probably data in the fifo that needs
- * to be copied into RAM - do that here. Also, we have to update
- * 'this_residual' and 'ptr' based on the contents of the
- * TRANSFER_COUNT register, in case the device decided to do an
- * intermediate disconnect (a device may do this if it has to
- * do a seek, or just to be nice and let other devices have
- * some bus time during long transfers).
- * After doing whatever is necessary with the fifo, we go on and
- * service the WD3393 interrupt normally.
- */
- if (hostdata->fifo == FI_FIFO_READING) {
-
-/* buffer index = start-of-buffer + #-of-bytes-already-read */
-
- sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
-
-/* bytes remaining in fifo = (total-wanted - #-not-got) - #-already-read */
-
- i = (cmd->SCp.this_residual - read_3393_count(hostdata)) - cmd->SCp.have_data_in;
- i >>= 1; /* Gulp. We assume this will always be modulo 2 */
- f = hostdata->io_base + IO_FIFO;
-
-#ifdef FAST_READ_IO
-
- FAST_READ2_IO();
-#else
- while (i--)
- *sp++ = read2_io(IO_FIFO);
-
-#endif
-
- hostdata->fifo = FI_FIFO_UNUSED;
- length = cmd->SCp.this_residual;
- cmd->SCp.this_residual = read_3393_count(hostdata);
- cmd->SCp.ptr += (length - cmd->SCp.this_residual);
-
- DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
-
- }
-
- else if (hostdata->fifo == FI_FIFO_WRITING) {
- hostdata->fifo = FI_FIFO_UNUSED;
- length = cmd->SCp.this_residual;
- cmd->SCp.this_residual = read_3393_count(hostdata);
- cmd->SCp.ptr += (length - cmd->SCp.this_residual);
-
- DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
-
- }
-
-/* Respond to the specific WD3393 interrupt - there are quite a few! */
-
- switch (sr) {
-
- case CSR_TIMEOUT:
- DB(DB_INTR, printk("TIMEOUT"))
-
- if (hostdata->state == S_RUNNING_LEVEL2)
- hostdata->connected = NULL;
- else {
- cmd = (Scsi_Cmnd *) hostdata->selecting; /* get a valid cmd */
- CHECK_NULL(cmd, "csr_timeout")
- hostdata->selecting = NULL;
- }
-
- cmd->result = DID_NO_CONNECT << 16;
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
- hostdata->state = S_UNCONNECTED;
- cmd->scsi_done(cmd);
-
-/* We are not connected to a target - check to see if there
- * are commands waiting to be executed.
- */
-
- in2000_execute(instance);
- break;
-
-
-/* Note: this interrupt should not occur in a LEVEL2 command */
-
- case CSR_SELECT:
- DB(DB_INTR, printk("SELECT"))
- hostdata->connected = cmd = (Scsi_Cmnd *) hostdata->selecting;
- CHECK_NULL(cmd, "csr_select")
- hostdata->selecting = NULL;
-
- /* construct an IDENTIFY message with correct disconnect bit */
-
- hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun);
- if (cmd->SCp.phase)
- hostdata->outgoing_msg[0] |= 0x40;
-
- if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) {
-#ifdef SYNC_DEBUG
- printk(" sending SDTR ");
-#endif
-
- hostdata->sync_stat[cmd->device->id] = SS_WAITING;
-
- /* tack on a 2nd message to ask about synchronous transfers */
-
- hostdata->outgoing_msg[1] = EXTENDED_MESSAGE;
- hostdata->outgoing_msg[2] = 3;
- hostdata->outgoing_msg[3] = EXTENDED_SDTR;
- hostdata->outgoing_msg[4] = OPTIMUM_SX_PER / 4;
- hostdata->outgoing_msg[5] = OPTIMUM_SX_OFF;
- hostdata->outgoing_len = 6;
- } else
- hostdata->outgoing_len = 1;
-
- hostdata->state = S_CONNECTED;
- break;
-
-
- case CSR_XFER_DONE | PHS_DATA_IN:
- case CSR_UNEXP | PHS_DATA_IN:
- case CSR_SRV_REQ | PHS_DATA_IN:
- DB(DB_INTR, printk("IN-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
- transfer_bytes(cmd, DATA_IN_DIR);
- if (hostdata->state != S_RUNNING_LEVEL2)
- hostdata->state = S_CONNECTED;
- break;
-
-
- case CSR_XFER_DONE | PHS_DATA_OUT:
- case CSR_UNEXP | PHS_DATA_OUT:
- case CSR_SRV_REQ | PHS_DATA_OUT:
- DB(DB_INTR, printk("OUT-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
- transfer_bytes(cmd, DATA_OUT_DIR);
- if (hostdata->state != S_RUNNING_LEVEL2)
- hostdata->state = S_CONNECTED;
- break;
-
-
-/* Note: this interrupt should not occur in a LEVEL2 command */
-
- case CSR_XFER_DONE | PHS_COMMAND:
- case CSR_UNEXP | PHS_COMMAND:
- case CSR_SRV_REQ | PHS_COMMAND:
- DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0]))
- transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata);
- hostdata->state = S_CONNECTED;
- break;
-
-
- case CSR_XFER_DONE | PHS_STATUS:
- case CSR_UNEXP | PHS_STATUS:
- case CSR_SRV_REQ | PHS_STATUS:
- DB(DB_INTR, printk("STATUS="))
-
- cmd->SCp.Status = read_1_byte(hostdata);
- DB(DB_INTR, printk("%02x", cmd->SCp.Status))
- if (hostdata->level2 >= L2_BASIC) {
- sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
- hostdata->state = S_RUNNING_LEVEL2;
- write_3393(hostdata, WD_COMMAND_PHASE, 0x50);
- write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
- } else {
- hostdata->state = S_CONNECTED;
- }
- break;
-
-
- case CSR_XFER_DONE | PHS_MESS_IN:
- case CSR_UNEXP | PHS_MESS_IN:
- case CSR_SRV_REQ | PHS_MESS_IN:
- DB(DB_INTR, printk("MSG_IN="))
-
- msg = read_1_byte(hostdata);
- sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
-
- hostdata->incoming_msg[hostdata->incoming_ptr] = msg;
- if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE)
- msg = EXTENDED_MESSAGE;
- else
- hostdata->incoming_ptr = 0;
-
- cmd->SCp.Message = msg;
- switch (msg) {
-
- case COMMAND_COMPLETE:
- DB(DB_INTR, printk("CCMP"))
- write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
- hostdata->state = S_PRE_CMP_DISC;
- break;
-
- case SAVE_POINTERS:
- DB(DB_INTR, printk("SDP"))
- write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
- hostdata->state = S_CONNECTED;
- break;
-
- case RESTORE_POINTERS:
- DB(DB_INTR, printk("RDP"))
- if (hostdata->level2 >= L2_BASIC) {
- write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
- write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
- hostdata->state = S_RUNNING_LEVEL2;
- } else {
- write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
- hostdata->state = S_CONNECTED;
- }
- break;
-
- case DISCONNECT:
- DB(DB_INTR, printk("DIS"))
- cmd->device->disconnect = 1;
- write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
- hostdata->state = S_PRE_TMP_DISC;
- break;
-
- case MESSAGE_REJECT:
- DB(DB_INTR, printk("REJ"))
-#ifdef SYNC_DEBUG
- printk("-REJ-");
-#endif
- if (hostdata->sync_stat[cmd->device->id] == SS_WAITING)
- hostdata->sync_stat[cmd->device->id] = SS_SET;
- write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
- hostdata->state = S_CONNECTED;
- break;
-
- case EXTENDED_MESSAGE:
- DB(DB_INTR, printk("EXT"))
-
- ucp = hostdata->incoming_msg;
-
-#ifdef SYNC_DEBUG
- printk("%02x", ucp[hostdata->incoming_ptr]);
-#endif
- /* Is this the last byte of the extended message? */
-
- if ((hostdata->incoming_ptr >= 2) && (hostdata->incoming_ptr == (ucp[1] + 1))) {
-
- switch (ucp[2]) { /* what's the EXTENDED code? */
- case EXTENDED_SDTR:
- id = calc_sync_xfer(ucp[3], ucp[4]);
- if (hostdata->sync_stat[cmd->device->id] != SS_WAITING) {
-
-/* A device has sent an unsolicited SDTR message; rather than go
- * through the effort of decoding it and then figuring out what
- * our reply should be, we're just gonna say that we have a
- * synchronous fifo depth of 0. This will result in asynchronous
- * transfers - not ideal but so much easier.
- * Actually, this is OK because it assures us that if we don't
- * specifically ask for sync transfers, we won't do any.
- */
-
- write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
- hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
- hostdata->outgoing_msg[1] = 3;
- hostdata->outgoing_msg[2] = EXTENDED_SDTR;
- hostdata->outgoing_msg[3] = hostdata->default_sx_per / 4;
- hostdata->outgoing_msg[4] = 0;
- hostdata->outgoing_len = 5;
- hostdata->sync_xfer[cmd->device->id] = calc_sync_xfer(hostdata->default_sx_per / 4, 0);
- } else {
- hostdata->sync_xfer[cmd->device->id] = id;
- }
-#ifdef SYNC_DEBUG
- printk("sync_xfer=%02x", hostdata->sync_xfer[cmd->device->id]);
-#endif
- hostdata->sync_stat[cmd->device->id] = SS_SET;
- write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
- hostdata->state = S_CONNECTED;
- break;
- case EXTENDED_WDTR:
- write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
- printk("sending WDTR ");
- hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
- hostdata->outgoing_msg[1] = 2;
- hostdata->outgoing_msg[2] = EXTENDED_WDTR;
- hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */
- hostdata->outgoing_len = 4;
- write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
- hostdata->state = S_CONNECTED;
- break;
- default:
- write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
- printk("Rejecting Unknown Extended Message(%02x). ", ucp[2]);
- hostdata->outgoing_msg[0] = MESSAGE_REJECT;
- hostdata->outgoing_len = 1;
- write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
- hostdata->state = S_CONNECTED;
- break;
- }
- hostdata->incoming_ptr = 0;
- }
-
- /* We need to read more MESS_IN bytes for the extended message */
-
- else {
- hostdata->incoming_ptr++;
- write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
- hostdata->state = S_CONNECTED;
- }
- break;
-
- default:
- printk("Rejecting Unknown Message(%02x) ", msg);
- write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
- hostdata->outgoing_msg[0] = MESSAGE_REJECT;
- hostdata->outgoing_len = 1;
- write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
- hostdata->state = S_CONNECTED;
- }
- break;
-
-
-/* Note: this interrupt will occur only after a LEVEL2 command */
-
- case CSR_SEL_XFER_DONE:
-
-/* Make sure that reselection is enabled at this point - it may
- * have been turned off for the command that just completed.
- */
-
- write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
- if (phs == 0x60) {
- DB(DB_INTR, printk("SX-DONE"))
- cmd->SCp.Message = COMMAND_COMPLETE;
- lun = read_3393(hostdata, WD_TARGET_LUN);
- DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
- hostdata->connected = NULL;
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
- hostdata->state = S_UNCONNECTED;
- if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
- cmd->SCp.Status = lun;
- if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
- cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
- cmd->scsi_done(cmd);
-
-/* We are no longer connected to a target - check to see if
- * there are commands waiting to be executed.
- */
-
- in2000_execute(instance);
- } else {
- printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs);
- }
- break;
-
-
-/* Note: this interrupt will occur only after a LEVEL2 command */
-
- case CSR_SDP:
- DB(DB_INTR, printk("SDP"))
- hostdata->state = S_RUNNING_LEVEL2;
- write_3393(hostdata, WD_COMMAND_PHASE, 0x41);
- write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
- break;
-
-
- case CSR_XFER_DONE | PHS_MESS_OUT:
- case CSR_UNEXP | PHS_MESS_OUT:
- case CSR_SRV_REQ | PHS_MESS_OUT:
- DB(DB_INTR, printk("MSG_OUT="))
-
-/* To get here, we've probably requested MESSAGE_OUT and have
- * already put the correct bytes in outgoing_msg[] and filled
- * in outgoing_len. We simply send them out to the SCSI bus.
- * Sometimes we get MESSAGE_OUT phase when we're not expecting
- * it - like when our SDTR message is rejected by a target. Some
- * targets send the REJECT before receiving all of the extended
- * message, and then seem to go back to MESSAGE_OUT for a byte
- * or two. Not sure why, or if I'm doing something wrong to
- * cause this to happen. Regardless, it seems that sending
- * NOP messages in these situations results in no harm and
- * makes everyone happy.
- */
- if (hostdata->outgoing_len == 0) {
- hostdata->outgoing_len = 1;
- hostdata->outgoing_msg[0] = NOP;
- }
- transfer_pio(hostdata->outgoing_msg, hostdata->outgoing_len, DATA_OUT_DIR, hostdata);
- DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0]))
- hostdata->outgoing_len = 0;
- hostdata->state = S_CONNECTED;
- break;
-
-
- case CSR_UNEXP_DISC:
-
-/* I think I've seen this after a request-sense that was in response
- * to an error condition, but not sure. We certainly need to do
- * something when we get this interrupt - the question is 'what?'.
- * Let's think positively, and assume some command has finished
- * in a legal manner (like a command that provokes a request-sense),
- * so we treat it as a normal command-complete-disconnect.
- */
-
-
-/* Make sure that reselection is enabled at this point - it may
- * have been turned off for the command that just completed.
- */
-
- write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
- if (cmd == NULL) {
- printk(" - Already disconnected! ");
- hostdata->state = S_UNCONNECTED;
-
-/* release the SMP spin_lock and restore irq state */
- spin_unlock_irqrestore(instance->host_lock, flags);
- return IRQ_HANDLED;
- }
- DB(DB_INTR, printk("UNEXP_DISC"))
- hostdata->connected = NULL;
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
- hostdata->state = S_UNCONNECTED;
- if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
- cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
- cmd->scsi_done(cmd);
-
-/* We are no longer connected to a target - check to see if
- * there are commands waiting to be executed.
- */
-
- in2000_execute(instance);
- break;
-
-
- case CSR_DISC:
-
-/* Make sure that reselection is enabled at this point - it may
- * have been turned off for the command that just completed.
- */
-
- write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
- DB(DB_INTR, printk("DISC"))
- if (cmd == NULL) {
- printk(" - Already disconnected! ");
- hostdata->state = S_UNCONNECTED;
- }
- switch (hostdata->state) {
- case S_PRE_CMP_DISC:
- hostdata->connected = NULL;
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
- hostdata->state = S_UNCONNECTED;
- DB(DB_INTR, printk(":%d", cmd->SCp.Status))
- if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
- cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
- cmd->scsi_done(cmd);
- break;
- case S_PRE_TMP_DISC:
- case S_RUNNING_LEVEL2:
- cmd->host_scribble = (uchar *) hostdata->disconnected_Q;
- hostdata->disconnected_Q = cmd;
- hostdata->connected = NULL;
- hostdata->state = S_UNCONNECTED;
-
-#ifdef PROC_STATISTICS
- hostdata->disc_done_cnt[cmd->device->id]++;
-#endif
-
- break;
- default:
- printk("*** Unexpected DISCONNECT interrupt! ***");
- hostdata->state = S_UNCONNECTED;
- }
-
-/* We are no longer connected to a target - check to see if
- * there are commands waiting to be executed.
- */
-
- in2000_execute(instance);
- break;
-
-
- case CSR_RESEL_AM:
- DB(DB_INTR, printk("RESEL"))
-
- /* First we have to make sure this reselection didn't */
- /* happen during Arbitration/Selection of some other device. */
- /* If yes, put losing command back on top of input_Q. */
- if (hostdata->level2 <= L2_NONE) {
-
- if (hostdata->selecting) {
- cmd = (Scsi_Cmnd *) hostdata->selecting;
- hostdata->selecting = NULL;
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
- cmd->host_scribble = (uchar *) hostdata->input_Q;
- hostdata->input_Q = cmd;
- }
- }
-
- else {
-
- if (cmd) {
- if (phs == 0x00) {
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
- cmd->host_scribble = (uchar *) hostdata->input_Q;
- hostdata->input_Q = cmd;
- } else {
- printk("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", asr, sr, phs);
- while (1)
- printk("\r");
- }
- }
-
- }
-
- /* OK - find out which device reselected us. */
-
- id = read_3393(hostdata, WD_SOURCE_ID);
- id &= SRCID_MASK;
-
- /* and extract the lun from the ID message. (Note that we don't
- * bother to check for a valid message here - I guess this is
- * not the right way to go, but....)
- */
-
- lun = read_3393(hostdata, WD_DATA);
- if (hostdata->level2 < L2_RESELECT)
- write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
- lun &= 7;
-
- /* Now we look for the command that's reconnecting. */
-
- cmd = (Scsi_Cmnd *) hostdata->disconnected_Q;
- patch = NULL;
- while (cmd) {
- if (id == cmd->device->id && lun == cmd->device->lun)
- break;
- patch = cmd;
- cmd = (Scsi_Cmnd *) cmd->host_scribble;
- }
-
- /* Hmm. Couldn't find a valid command.... What to do? */
-
- if (!cmd) {
- printk("---TROUBLE: target %d.%d not in disconnect queue---", id, lun);
- break;
- }
-
- /* Ok, found the command - now start it up again. */
-
- if (patch)
- patch->host_scribble = cmd->host_scribble;
- else
- hostdata->disconnected_Q = (Scsi_Cmnd *) cmd->host_scribble;
- hostdata->connected = cmd;
-
- /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]'
- * because these things are preserved over a disconnect.
- * But we DO need to fix the DPD bit so it's correct for this command.
- */
-
- if (is_dir_out(cmd))
- write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
- else
- write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
- if (hostdata->level2 >= L2_RESELECT) {
- write_3393_count(hostdata, 0); /* we want a DATA_PHASE interrupt */
- write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
- write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
- hostdata->state = S_RUNNING_LEVEL2;
- } else
- hostdata->state = S_CONNECTED;
-
- break;
-
- default:
- printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs);
- }
-
- write1_io(0, IO_LED_OFF);
-
- DB(DB_INTR, printk("} "))
-
-/* release the SMP spin_lock and restore irq state */
- spin_unlock_irqrestore(instance->host_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-
-#define RESET_CARD 0
-#define RESET_CARD_AND_BUS 1
-#define B_FLAG 0x80
-
-/*
- * Caller must hold instance lock!
- */
-
-static int reset_hardware(struct Scsi_Host *instance, int type)
-{
- struct IN2000_hostdata *hostdata;
- int qt, x;
-
- hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
- write1_io(0, IO_LED_ON);
- if (type == RESET_CARD_AND_BUS) {
- write1_io(0, IO_CARD_RESET);
- x = read1_io(IO_HARDWARE);
- }
- x = read_3393(hostdata, WD_SCSI_STATUS); /* clear any WD intrpt */
- write_3393(hostdata, WD_OWN_ID, instance->this_id | OWNID_EAF | OWNID_RAF | OWNID_FS_8);
- write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
- write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, calc_sync_xfer(hostdata->default_sx_per / 4, DEFAULT_SX_OFF));
-
- write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */
- write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
- write_3393(hostdata, WD_COMMAND, WD_CMD_RESET);
- /* FIXME: timeout ?? */
- while (!(READ_AUX_STAT() & ASR_INT))
- cpu_relax(); /* wait for RESET to complete */
-
- x = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
-
- write_3393(hostdata, WD_QUEUE_TAG, 0xa5); /* any random number */
- qt = read_3393(hostdata, WD_QUEUE_TAG);
- if (qt == 0xa5) {
- x |= B_FLAG;
- write_3393(hostdata, WD_QUEUE_TAG, 0);
- }
- write_3393(hostdata, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE);
- write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
- write1_io(0, IO_LED_OFF);
- return x;
-}
-
-
-
-static int in2000_bus_reset(Scsi_Cmnd * cmd)
-{
- struct Scsi_Host *instance;
- struct IN2000_hostdata *hostdata;
- int x;
- unsigned long flags;
-
- instance = cmd->device->host;
- hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
- printk(KERN_WARNING "scsi%d: Reset. ", instance->host_no);
-
- spin_lock_irqsave(instance->host_lock, flags);
-
- /* do scsi-reset here */
- reset_hardware(instance, RESET_CARD_AND_BUS);
- for (x = 0; x < 8; x++) {
- hostdata->busy[x] = 0;
- hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
- hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
- }
- hostdata->input_Q = NULL;
- hostdata->selecting = NULL;
- hostdata->connected = NULL;
- hostdata->disconnected_Q = NULL;
- hostdata->state = S_UNCONNECTED;
- hostdata->fifo = FI_FIFO_UNUSED;
- hostdata->incoming_ptr = 0;
- hostdata->outgoing_len = 0;
-
- cmd->result = DID_RESET << 16;
-
- spin_unlock_irqrestore(instance->host_lock, flags);
- return SUCCESS;
-}
-
-static int __in2000_abort(Scsi_Cmnd * cmd)
-{
- struct Scsi_Host *instance;
- struct IN2000_hostdata *hostdata;
- Scsi_Cmnd *tmp, *prev;
- uchar sr, asr;
- unsigned long timeout;
-
- instance = cmd->device->host;
- hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
- printk(KERN_DEBUG "scsi%d: Abort-", instance->host_no);
- printk("(asr=%02x,count=%ld,resid=%d,buf_resid=%d,have_data=%d,FC=%02x)- ", READ_AUX_STAT(), read_3393_count(hostdata), cmd->SCp.this_residual, cmd->SCp.buffers_residual, cmd->SCp.have_data_in, read1_io(IO_FIFO_COUNT));
-
-/*
- * Case 1 : If the command hasn't been issued yet, we simply remove it
- * from the inout_Q.
- */
-
- tmp = (Scsi_Cmnd *) hostdata->input_Q;
- prev = NULL;
- while (tmp) {
- if (tmp == cmd) {
- if (prev)
- prev->host_scribble = cmd->host_scribble;
- cmd->host_scribble = NULL;
- cmd->result = DID_ABORT << 16;
- printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no);
- cmd->scsi_done(cmd);
- return SUCCESS;
- }
- prev = tmp;
- tmp = (Scsi_Cmnd *) tmp->host_scribble;
- }
-
-/*
- * Case 2 : If the command is connected, we're going to fail the abort
- * and let the high level SCSI driver retry at a later time or
- * issue a reset.
- *
- * Timeouts, and therefore aborted commands, will be highly unlikely
- * and handling them cleanly in this situation would make the common
- * case of noresets less efficient, and would pollute our code. So,
- * we fail.
- */
-
- if (hostdata->connected == cmd) {
-
- printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no);
-
- printk("sending wd33c93 ABORT command - ");
- write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
- write_3393_cmd(hostdata, WD_CMD_ABORT);
-
-/* Now we have to attempt to flush out the FIFO... */
-
- printk("flushing fifo - ");
- timeout = 1000000;
- do {
- asr = READ_AUX_STAT();
- if (asr & ASR_DBR)
- read_3393(hostdata, WD_DATA);
- } while (!(asr & ASR_INT) && timeout-- > 0);
- sr = read_3393(hostdata, WD_SCSI_STATUS);
- printk("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", asr, sr, read_3393_count(hostdata), timeout);
-
- /*
- * Abort command processed.
- * Still connected.
- * We must disconnect.
- */
-
- printk("sending wd33c93 DISCONNECT command - ");
- write_3393_cmd(hostdata, WD_CMD_DISCONNECT);
-
- timeout = 1000000;
- asr = READ_AUX_STAT();
- while ((asr & ASR_CIP) && timeout-- > 0)
- asr = READ_AUX_STAT();
- sr = read_3393(hostdata, WD_SCSI_STATUS);
- printk("asr=%02x, sr=%02x.", asr, sr);
-
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
- hostdata->connected = NULL;
- hostdata->state = S_UNCONNECTED;
- cmd->result = DID_ABORT << 16;
- cmd->scsi_done(cmd);
-
- in2000_execute(instance);
-
- return SUCCESS;
- }
-
-/*
- * Case 3: If the command is currently disconnected from the bus,
- * we're not going to expend much effort here: Let's just return
- * an ABORT_SNOOZE and hope for the best...
- */
-
- for (tmp = (Scsi_Cmnd *) hostdata->disconnected_Q; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)
- if (cmd == tmp) {
- printk(KERN_DEBUG "scsi%d: unable to abort disconnected command.\n", instance->host_no);
- return FAILED;
- }
-
-/*
- * Case 4 : If we reached this point, the command was not found in any of
- * the queues.
- *
- * We probably reached this point because of an unlikely race condition
- * between the command completing successfully and the abortion code,
- * so we won't panic, but we will notify the user in case something really
- * broke.
- */
-
- in2000_execute(instance);
-
- printk("scsi%d: warning : SCSI command probably completed successfully" " before abortion. ", instance->host_no);
- return SUCCESS;
-}
-
-static int in2000_abort(Scsi_Cmnd * cmd)
-{
- int rc;
-
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __in2000_abort(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
-
- return rc;
-}
-
-
-#define MAX_IN2000_HOSTS 3
-#define MAX_SETUP_ARGS ARRAY_SIZE(setup_args)
-#define SETUP_BUFFER_SIZE 200
-static char setup_buffer[SETUP_BUFFER_SIZE];
-static char setup_used[MAX_SETUP_ARGS];
-static int done_setup = 0;
-
-static void __init in2000_setup(char *str, int *ints)
-{
- int i;
- char *p1, *p2;
-
- strlcpy(setup_buffer, str, SETUP_BUFFER_SIZE);
- p1 = setup_buffer;
- i = 0;
- while (*p1 && (i < MAX_SETUP_ARGS)) {
- p2 = strchr(p1, ',');
- if (p2) {
- *p2 = '\0';
- if (p1 != p2)
- setup_args[i] = p1;
- p1 = p2 + 1;
- i++;
- } else {
- setup_args[i] = p1;
- break;
- }
- }
- for (i = 0; i < MAX_SETUP_ARGS; i++)
- setup_used[i] = 0;
- done_setup = 1;
-}
-
-
-/* check_setup_args() returns index if key found, 0 if not
- */
-
-static int __init check_setup_args(char *key, int *val, char *buf)
-{
- int x;
- char *cp;
-
- for (x = 0; x < MAX_SETUP_ARGS; x++) {
- if (setup_used[x])
- continue;
- if (!strncmp(setup_args[x], key, strlen(key)))
- break;
- }
- if (x == MAX_SETUP_ARGS)
- return 0;
- setup_used[x] = 1;
- cp = setup_args[x] + strlen(key);
- *val = -1;
- if (*cp != ':')
- return ++x;
- cp++;
- if ((*cp >= '0') && (*cp <= '9')) {
- *val = simple_strtoul(cp, NULL, 0);
- }
- return ++x;
-}
-
-
-
-/* The "correct" (ie portable) way to access memory-mapped hardware
- * such as the IN2000 EPROM and dip switch is through the use of
- * special macros declared in 'asm/io.h'. We use readb() and readl()
- * when reading from the card's BIOS area in in2000_detect().
- */
-static u32 bios_tab[] in2000__INITDATA = {
- 0xc8000,
- 0xd0000,
- 0xd8000,
- 0
-};
-
-static unsigned short base_tab[] in2000__INITDATA = {
- 0x220,
- 0x200,
- 0x110,
- 0x100,
-};
-
-static int int_tab[] in2000__INITDATA = {
- 15,
- 14,
- 11,
- 10
-};
-
-static int probe_bios(u32 addr, u32 *s1, uchar *switches)
-{
- void __iomem *p = ioremap(addr, 0x34);
- if (!p)
- return 0;
- *s1 = readl(p + 0x10);
- if (*s1 == 0x41564f4e || readl(p + 0x30) == 0x61776c41) {
- /* Read the switch image that's mapped into EPROM space */
- *switches = ~readb(p + 0x20);
- iounmap(p);
- return 1;
- }
- iounmap(p);
- return 0;
-}
-
-static int __init in2000_detect(struct scsi_host_template * tpnt)
-{
- struct Scsi_Host *instance;
- struct IN2000_hostdata *hostdata;
- int detect_count;
- int bios;
- int x;
- unsigned short base;
- uchar switches;
- uchar hrev;
- unsigned long flags;
- int val;
- char buf[32];
-
-/* Thanks to help from Bill Earnest, probing for IN2000 cards is a
- * pretty straightforward and fool-proof operation. There are 3
- * possible locations for the IN2000 EPROM in memory space - if we
- * find a BIOS signature, we can read the dip switch settings from
- * the byte at BIOS+32 (shadowed in by logic on the card). From 2
- * of the switch bits we get the card's address in IO space. There's
- * an image of the dip switch there, also, so we have a way to back-
- * check that this really is an IN2000 card. Very nifty. Use the
- * 'ioport:xx' command-line parameter if your BIOS EPROM is absent
- * or disabled.
- */
-
- if (!done_setup && setup_strings)
- in2000_setup(setup_strings, NULL);
-
- detect_count = 0;
- for (bios = 0; bios_tab[bios]; bios++) {
- u32 s1 = 0;
- if (check_setup_args("ioport", &val, buf)) {
- base = val;
- switches = ~inb(base + IO_SWITCHES) & 0xff;
- printk("Forcing IN2000 detection at IOport 0x%x ", base);
- bios = 2;
- }
-/*
- * There have been a couple of BIOS versions with different layouts
- * for the obvious ID strings. We look for the 2 most common ones and
- * hope that they cover all the cases...
- */
- else if (probe_bios(bios_tab[bios], &s1, &switches)) {
- printk("Found IN2000 BIOS at 0x%x ", (unsigned int) bios_tab[bios]);
-
-/* Find out where the IO space is */
-
- x = switches & (SW_ADDR0 | SW_ADDR1);
- base = base_tab[x];
-
-/* Check for the IN2000 signature in IO space. */
-
- x = ~inb(base + IO_SWITCHES) & 0xff;
- if (x != switches) {
- printk("Bad IO signature: %02x vs %02x.\n", x, switches);
- continue;
- }
- } else
- continue;
-
-/* OK. We have a base address for the IO ports - run a few safety checks */
-
- if (!(switches & SW_BIT7)) { /* I _think_ all cards do this */
- printk("There is no IN-2000 SCSI card at IOport 0x%03x!\n", base);
- continue;
- }
-
-/* Let's assume any hardware version will work, although the driver
- * has only been tested on 0x21, 0x22, 0x25, 0x26, and 0x27. We'll
- * print out the rev number for reference later, but accept them all.
- */
-
- hrev = inb(base + IO_HARDWARE);
-
- /* Bit 2 tells us if interrupts are disabled */
- if (switches & SW_DISINT) {
- printk("The IN-2000 SCSI card at IOport 0x%03x ", base);
- printk("is not configured for interrupt operation!\n");
- printk("This driver requires an interrupt: cancelling detection.\n");
- continue;
- }
-
-/* Ok. We accept that there's an IN2000 at ioaddr 'base'. Now
- * initialize it.
- */
-
- tpnt->proc_name = "in2000";
- instance = scsi_register(tpnt, sizeof(struct IN2000_hostdata));
- if (instance == NULL)
- continue;
- detect_count++;
- hostdata = (struct IN2000_hostdata *) instance->hostdata;
- instance->io_port = hostdata->io_base = base;
- hostdata->dip_switch = switches;
- hostdata->hrev = hrev;
-
- write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */
- write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
- write1_io(0, IO_INTR_MASK); /* allow all ints */
- x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
- if (request_irq(x, in2000_intr, 0, "in2000", instance)) {
- printk("in2000_detect: Unable to allocate IRQ.\n");
- detect_count--;
- continue;
- }
- instance->irq = x;
- instance->n_io_port = 13;
- request_region(base, 13, "in2000"); /* lock in this IO space for our use */
-
- for (x = 0; x < 8; x++) {
- hostdata->busy[x] = 0;
- hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
- hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
-#ifdef PROC_STATISTICS
- hostdata->cmd_cnt[x] = 0;
- hostdata->disc_allowed_cnt[x] = 0;
- hostdata->disc_done_cnt[x] = 0;
-#endif
- }
- hostdata->input_Q = NULL;
- hostdata->selecting = NULL;
- hostdata->connected = NULL;
- hostdata->disconnected_Q = NULL;
- hostdata->state = S_UNCONNECTED;
- hostdata->fifo = FI_FIFO_UNUSED;
- hostdata->level2 = L2_BASIC;
- hostdata->disconnect = DIS_ADAPTIVE;
- hostdata->args = DEBUG_DEFAULTS;
- hostdata->incoming_ptr = 0;
- hostdata->outgoing_len = 0;
- hostdata->default_sx_per = DEFAULT_SX_PER;
-
-/* Older BIOS's had a 'sync on/off' switch - use its setting */
-
- if (s1 == 0x41564f4e && (switches & SW_SYNC_DOS5))
- hostdata->sync_off = 0x00; /* sync defaults to on */
- else
- hostdata->sync_off = 0xff; /* sync defaults to off */
-
-#ifdef PROC_INTERFACE
- hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP;
-#ifdef PROC_STATISTICS
- hostdata->int_cnt = 0;
-#endif
-#endif
-
- if (check_setup_args("nosync", &val, buf))
- hostdata->sync_off = val;
-
- if (check_setup_args("period", &val, buf))
- hostdata->default_sx_per = sx_table[round_period((unsigned int) val)].period_ns;
-
- if (check_setup_args("disconnect", &val, buf)) {
- if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS))
- hostdata->disconnect = val;
- else
- hostdata->disconnect = DIS_ADAPTIVE;
- }
-
- if (check_setup_args("noreset", &val, buf))
- hostdata->args ^= A_NO_SCSI_RESET;
-
- if (check_setup_args("level2", &val, buf))
- hostdata->level2 = val;
-
- if (check_setup_args("debug", &val, buf))
- hostdata->args = (val & DB_MASK);
-
-#ifdef PROC_INTERFACE
- if (check_setup_args("proc", &val, buf))
- hostdata->proc = val;
-#endif
-
-
- /* FIXME: not strictly needed I think but the called code expects
- to be locked */
- spin_lock_irqsave(instance->host_lock, flags);
- x = reset_hardware(instance, (hostdata->args & A_NO_SCSI_RESET) ? RESET_CARD : RESET_CARD_AND_BUS);
- spin_unlock_irqrestore(instance->host_lock, flags);
-
- hostdata->microcode = read_3393(hostdata, WD_CDB_1);
- if (x & 0x01) {
- if (x & B_FLAG)
- hostdata->chip = C_WD33C93B;
- else
- hostdata->chip = C_WD33C93A;
- } else
- hostdata->chip = C_WD33C93;
-
- printk("dip_switch=%02x irq=%d ioport=%02x floppy=%s sync/DOS5=%s ", (switches & 0x7f), instance->irq, hostdata->io_base, (switches & SW_FLOPPY) ? "Yes" : "No", (switches & SW_SYNC_DOS5) ? "Yes" : "No");
- printk("hardware_ver=%02x chip=%s microcode=%02x\n", hrev, (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == C_WD33C93A) ? "WD33c93A" : (hostdata->chip == C_WD33C93B) ? "WD33c93B" : "unknown", hostdata->microcode);
-#ifdef DEBUGGING_ON
- printk("setup_args = ");
- for (x = 0; x < MAX_SETUP_ARGS; x++)
- printk("%s,", setup_args[x]);
- printk("\n");
-#endif
- if (hostdata->sync_off == 0xff)
- printk("Sync-transfer DISABLED on all devices: ENABLE from command-line\n");
- printk("IN2000 driver version %s - %s\n", IN2000_VERSION, IN2000_DATE);
- }
-
- return detect_count;
-}
-
-static int in2000_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, shost);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- return 0;
-}
-
-/* NOTE: I lifted this function straight out of the old driver,
- * and have not tested it. Presumably it does what it's
- * supposed to do...
- */
-
-static int in2000_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *iinfo)
-{
- int size;
-
- size = capacity;
- iinfo[0] = 64;
- iinfo[1] = 32;
- iinfo[2] = size >> 11;
-
-/* This should approximate the large drive handling that the DOS ASPI manager
- uses. Drives very near the boundaries may not be handled correctly (i.e.
- near 2.0 Gb and 4.0 Gb) */
-
- if (iinfo[2] > 1024) {
- iinfo[0] = 64;
- iinfo[1] = 63;
- iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
- }
- if (iinfo[2] > 1024) {
- iinfo[0] = 128;
- iinfo[1] = 63;
- iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
- }
- if (iinfo[2] > 1024) {
- iinfo[0] = 255;
- iinfo[1] = 63;
- iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
- }
- return 0;
-}
-
-
-static int in2000_write_info(struct Scsi_Host *instance, char *buf, int len)
-{
-
-#ifdef PROC_INTERFACE
-
- char *bp;
- struct IN2000_hostdata *hd;
- int x, i;
-
- hd = (struct IN2000_hostdata *) instance->hostdata;
-
- buf[len] = '\0';
- bp = buf;
- if (!strncmp(bp, "debug:", 6)) {
- bp += 6;
- hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK;
- } else if (!strncmp(bp, "disconnect:", 11)) {
- bp += 11;
- x = simple_strtoul(bp, NULL, 0);
- if (x < DIS_NEVER || x > DIS_ALWAYS)
- x = DIS_ADAPTIVE;
- hd->disconnect = x;
- } else if (!strncmp(bp, "period:", 7)) {
- bp += 7;
- x = simple_strtoul(bp, NULL, 0);
- hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns;
- } else if (!strncmp(bp, "resync:", 7)) {
- bp += 7;
- x = simple_strtoul(bp, NULL, 0);
- for (i = 0; i < 7; i++)
- if (x & (1 << i))
- hd->sync_stat[i] = SS_UNSET;
- } else if (!strncmp(bp, "proc:", 5)) {
- bp += 5;
- hd->proc = simple_strtoul(bp, NULL, 0);
- } else if (!strncmp(bp, "level2:", 7)) {
- bp += 7;
- hd->level2 = simple_strtoul(bp, NULL, 0);
- }
-#endif
- return len;
-}
-
-static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
-{
-
-#ifdef PROC_INTERFACE
- unsigned long flags;
- struct IN2000_hostdata *hd;
- Scsi_Cmnd *cmd;
- int x;
-
- hd = (struct IN2000_hostdata *) instance->hostdata;
-
- spin_lock_irqsave(instance->host_lock, flags);
- if (hd->proc & PR_VERSION)
- seq_printf(m, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE);
-
- if (hd->proc & PR_INFO) {
- seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No");
- seq_puts(m, "\nsync_xfer[] = ");
- for (x = 0; x < 7; x++)
- seq_printf(m, "\t%02x", hd->sync_xfer[x]);
- seq_puts(m, "\nsync_stat[] = ");
- for (x = 0; x < 7; x++)
- seq_printf(m, "\t%02x", hd->sync_stat[x]);
- }
-#ifdef PROC_STATISTICS
- if (hd->proc & PR_STATISTICS) {
- seq_puts(m, "\ncommands issued: ");
- for (x = 0; x < 7; x++)
- seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
- seq_puts(m, "\ndisconnects allowed:");
- for (x = 0; x < 7; x++)
- seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
- seq_puts(m, "\ndisconnects done: ");
- for (x = 0; x < 7; x++)
- seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
- seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt);
- }
-#endif
- if (hd->proc & PR_CONNECTED) {
- seq_puts(m, "\nconnected: ");
- if (hd->connected) {
- cmd = (Scsi_Cmnd *) hd->connected;
- seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
- }
- }
- if (hd->proc & PR_INPUTQ) {
- seq_puts(m, "\ninput_Q: ");
- cmd = (Scsi_Cmnd *) hd->input_Q;
- while (cmd) {
- seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
- cmd = (Scsi_Cmnd *) cmd->host_scribble;
- }
- }
- if (hd->proc & PR_DISCQ) {
- seq_puts(m, "\ndisconnected_Q:");
- cmd = (Scsi_Cmnd *) hd->disconnected_Q;
- while (cmd) {
- seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
- cmd = (Scsi_Cmnd *) cmd->host_scribble;
- }
- }
- if (hd->proc & PR_TEST) {
- ; /* insert your own custom function here */
- }
- seq_putc(m, '\n');
- spin_unlock_irqrestore(instance->host_lock, flags);
-#endif /* PROC_INTERFACE */
- return 0;
-}
-
-MODULE_LICENSE("GPL");
-
-
-static struct scsi_host_template driver_template = {
- .proc_name = "in2000",
- .write_info = in2000_write_info,
- .show_info = in2000_show_info,
- .name = "Always IN2000",
- .detect = in2000_detect,
- .release = in2000_release,
- .queuecommand = in2000_queuecommand,
- .eh_abort_handler = in2000_abort,
- .eh_bus_reset_handler = in2000_bus_reset,
- .bios_param = in2000_biosparam,
- .can_queue = IN2000_CAN_Q,
- .this_id = IN2000_HOST_ID,
- .sg_tablesize = IN2000_SG,
- .cmd_per_lun = IN2000_CPL,
- .use_clustering = DISABLE_CLUSTERING,
-};
-#include "scsi_module.c"
diff --git a/drivers/scsi/in2000.h b/drivers/scsi/in2000.h
deleted file mode 100644
index 5821e1fbce08..000000000000
--- a/drivers/scsi/in2000.h
+++ /dev/null
@@ -1,412 +0,0 @@
-/*
- * in2000.h - Linux device driver definitions for the
- * Always IN2000 ISA SCSI card.
- *
- * IMPORTANT: This file is for version 1.33 - 26/Aug/1998
- *
- * Copyright (c) 1996 John Shifflett, GeoLog Consulting
- * john@geolog.com
- * jshiffle@netcom.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef IN2000_H
-#define IN2000_H
-
-#include <asm/io.h>
-
-#define PROC_INTERFACE /* add code for /proc/scsi/in2000/xxx interface */
-#ifdef PROC_INTERFACE
-#define PROC_STATISTICS /* add code for keeping various real time stats */
-#endif
-
-#define SYNC_DEBUG /* extra info on sync negotiation printed */
-#define DEBUGGING_ON /* enable command-line debugging bitmask */
-#define DEBUG_DEFAULTS 0 /* default bitmask - change from command-line */
-
-#ifdef __i386__
-#define FAST_READ_IO /* No problems with these on my machine */
-#define FAST_WRITE_IO
-#endif
-
-#ifdef DEBUGGING_ON
-#define DB(f,a) if (hostdata->args & (f)) a;
-#define CHECK_NULL(p,s) /* if (!(p)) {printk("\n"); while (1) printk("NP:%s\r",(s));} */
-#else
-#define DB(f,a)
-#define CHECK_NULL(p,s)
-#endif
-
-#define uchar unsigned char
-
-#define read1_io(a) (inb(hostdata->io_base+(a)))
-#define read2_io(a) (inw(hostdata->io_base+(a)))
-#define write1_io(b,a) (outb((b),hostdata->io_base+(a)))
-#define write2_io(w,a) (outw((w),hostdata->io_base+(a)))
-
-#ifdef __i386__
-/* These inline assembly defines are derived from a patch
- * sent to me by Bill Earnest. He's done a lot of very
- * valuable thinking, testing, and coding during his effort
- * to squeeze more speed out of this driver. I really think
- * that we are doing IO at close to the maximum now with
- * the fifo. (And yes, insw uses 'edi' while outsw uses
- * 'esi'. Thanks Bill!)
- */
-
-#define FAST_READ2_IO() \
-({ \
-int __dummy_1,__dummy_2; \
- __asm__ __volatile__ ("\n \
- cld \n \
- orl %%ecx, %%ecx \n \
- jz 1f \n \
- rep \n \
- insw (%%dx),%%es:(%%edi) \n \
-1: " \
- : "=D" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2) /* output */ \
- : "2" (f), "0" (sp), "1" (i) /* input */ \
- ); /* trashed */ \
-})
-
-#define FAST_WRITE2_IO() \
-({ \
-int __dummy_1,__dummy_2; \
- __asm__ __volatile__ ("\n \
- cld \n \
- orl %%ecx, %%ecx \n \
- jz 1f \n \
- rep \n \
- outsw %%ds:(%%esi),(%%dx) \n \
-1: " \
- : "=S" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2)/* output */ \
- : "2" (f), "0" (sp), "1" (i) /* input */ \
- ); /* trashed */ \
-})
-#endif
-
-/* IN2000 io_port offsets */
-#define IO_WD_ASR 0x00 /* R - 3393 auxstat reg */
-#define ASR_INT 0x80
-#define ASR_LCI 0x40
-#define ASR_BSY 0x20
-#define ASR_CIP 0x10
-#define ASR_PE 0x02
-#define ASR_DBR 0x01
-#define IO_WD_ADDR 0x00 /* W - 3393 address reg */
-#define IO_WD_DATA 0x01 /* R/W - rest of 3393 regs */
-#define IO_FIFO 0x02 /* R/W - in2000 dual-port fifo (16 bits) */
-#define IN2000_FIFO_SIZE 2048 /* fifo capacity in bytes */
-#define IO_CARD_RESET 0x03 /* W - in2000 start master reset */
-#define IO_FIFO_COUNT 0x04 /* R - in2000 fifo counter */
-#define IO_FIFO_WRITE 0x05 /* W - clear fifo counter, start write */
-#define IO_FIFO_READ 0x07 /* W - start fifo read */
-#define IO_LED_OFF 0x08 /* W - turn off in2000 activity LED */
-#define IO_SWITCHES 0x08 /* R - read in2000 dip switch */
-#define SW_ADDR0 0x01 /* bit 0 = bit 0 of index to io addr */
-#define SW_ADDR1 0x02 /* bit 1 = bit 1 of index io addr */
-#define SW_DISINT 0x04 /* bit 2 true if ints disabled */
-#define SW_INT0 0x08 /* bit 3 = bit 0 of index to interrupt */
-#define SW_INT1 0x10 /* bit 4 = bit 1 of index to interrupt */
-#define SW_INT_SHIFT 3 /* shift right this amount to right justify int bits */
-#define SW_SYNC_DOS5 0x20 /* bit 5 used by Always BIOS */
-#define SW_FLOPPY 0x40 /* bit 6 true if floppy enabled */
-#define SW_BIT7 0x80 /* bit 7 hardwired true (ground) */
-#define IO_LED_ON 0x09 /* W - turn on in2000 activity LED */
-#define IO_HARDWARE 0x0a /* R - read in2000 hardware rev, stop reset */
-#define IO_INTR_MASK 0x0c /* W - in2000 interrupt mask reg */
-#define IMASK_WD 0x01 /* WD33c93 interrupt mask */
-#define IMASK_FIFO 0x02 /* FIFO interrupt mask */
-
-/* wd register names */
-#define WD_OWN_ID 0x00
-#define WD_CONTROL 0x01
-#define WD_TIMEOUT_PERIOD 0x02
-#define WD_CDB_1 0x03
-#define WD_CDB_2 0x04
-#define WD_CDB_3 0x05
-#define WD_CDB_4 0x06
-#define WD_CDB_5 0x07
-#define WD_CDB_6 0x08
-#define WD_CDB_7 0x09
-#define WD_CDB_8 0x0a
-#define WD_CDB_9 0x0b
-#define WD_CDB_10 0x0c
-#define WD_CDB_11 0x0d
-#define WD_CDB_12 0x0e
-#define WD_TARGET_LUN 0x0f
-#define WD_COMMAND_PHASE 0x10
-#define WD_SYNCHRONOUS_TRANSFER 0x11
-#define WD_TRANSFER_COUNT_MSB 0x12
-#define WD_TRANSFER_COUNT 0x13
-#define WD_TRANSFER_COUNT_LSB 0x14
-#define WD_DESTINATION_ID 0x15
-#define WD_SOURCE_ID 0x16
-#define WD_SCSI_STATUS 0x17
-#define WD_COMMAND 0x18
-#define WD_DATA 0x19
-#define WD_QUEUE_TAG 0x1a
-#define WD_AUXILIARY_STATUS 0x1f
-
-/* WD commands */
-#define WD_CMD_RESET 0x00
-#define WD_CMD_ABORT 0x01
-#define WD_CMD_ASSERT_ATN 0x02
-#define WD_CMD_NEGATE_ACK 0x03
-#define WD_CMD_DISCONNECT 0x04
-#define WD_CMD_RESELECT 0x05
-#define WD_CMD_SEL_ATN 0x06
-#define WD_CMD_SEL 0x07
-#define WD_CMD_SEL_ATN_XFER 0x08
-#define WD_CMD_SEL_XFER 0x09
-#define WD_CMD_RESEL_RECEIVE 0x0a
-#define WD_CMD_RESEL_SEND 0x0b
-#define WD_CMD_WAIT_SEL_RECEIVE 0x0c
-#define WD_CMD_TRANS_ADDR 0x18
-#define WD_CMD_TRANS_INFO 0x20
-#define WD_CMD_TRANSFER_PAD 0x21
-#define WD_CMD_SBT_MODE 0x80
-
-/* SCSI Bus Phases */
-#define PHS_DATA_OUT 0x00
-#define PHS_DATA_IN 0x01
-#define PHS_COMMAND 0x02
-#define PHS_STATUS 0x03
-#define PHS_MESS_OUT 0x06
-#define PHS_MESS_IN 0x07
-
-/* Command Status Register definitions */
-
- /* reset state interrupts */
-#define CSR_RESET 0x00
-#define CSR_RESET_AF 0x01
-
- /* successful completion interrupts */
-#define CSR_RESELECT 0x10
-#define CSR_SELECT 0x11
-#define CSR_SEL_XFER_DONE 0x16
-#define CSR_XFER_DONE 0x18
-
- /* paused or aborted interrupts */
-#define CSR_MSGIN 0x20
-#define CSR_SDP 0x21
-#define CSR_SEL_ABORT 0x22
-#define CSR_RESEL_ABORT 0x25
-#define CSR_RESEL_ABORT_AM 0x27
-#define CSR_ABORT 0x28
-
- /* terminated interrupts */
-#define CSR_INVALID 0x40
-#define CSR_UNEXP_DISC 0x41
-#define CSR_TIMEOUT 0x42
-#define CSR_PARITY 0x43
-#define CSR_PARITY_ATN 0x44
-#define CSR_BAD_STATUS 0x45
-#define CSR_UNEXP 0x48
-
- /* service required interrupts */
-#define CSR_RESEL 0x80
-#define CSR_RESEL_AM 0x81
-#define CSR_DISC 0x85
-#define CSR_SRV_REQ 0x88
-
- /* Own ID/CDB Size register */
-#define OWNID_EAF 0x08
-#define OWNID_EHP 0x10
-#define OWNID_RAF 0x20
-#define OWNID_FS_8 0x00
-#define OWNID_FS_12 0x40
-#define OWNID_FS_16 0x80
-
- /* Control register */
-#define CTRL_HSP 0x01
-#define CTRL_HA 0x02
-#define CTRL_IDI 0x04
-#define CTRL_EDI 0x08
-#define CTRL_HHP 0x10
-#define CTRL_POLLED 0x00
-#define CTRL_BURST 0x20
-#define CTRL_BUS 0x40
-#define CTRL_DMA 0x80
-
- /* Timeout Period register */
-#define TIMEOUT_PERIOD_VALUE 20 /* results in 200 ms. */
-
- /* Synchronous Transfer Register */
-#define STR_FSS 0x80
-
- /* Destination ID register */
-#define DSTID_DPD 0x40
-#define DATA_OUT_DIR 0
-#define DATA_IN_DIR 1
-#define DSTID_SCC 0x80
-
- /* Source ID register */
-#define SRCID_MASK 0x07
-#define SRCID_SIV 0x08
-#define SRCID_DSP 0x20
-#define SRCID_ES 0x40
-#define SRCID_ER 0x80
-
-
-
-#define ILLEGAL_STATUS_BYTE 0xff
-
-
-#define DEFAULT_SX_PER 500 /* (ns) fairly safe */
-#define DEFAULT_SX_OFF 0 /* aka async */
-
-#define OPTIMUM_SX_PER 252 /* (ns) best we can do (mult-of-4) */
-#define OPTIMUM_SX_OFF 12 /* size of in2000 fifo */
-
-struct sx_period {
- unsigned int period_ns;
- uchar reg_value;
- };
-
-
-struct IN2000_hostdata {
- struct Scsi_Host *next;
- uchar chip; /* what kind of wd33c93 chip? */
- uchar microcode; /* microcode rev if 'B' */
- unsigned short io_base; /* IO port base */
- unsigned int dip_switch; /* dip switch settings */
- unsigned int hrev; /* hardware revision of card */
- volatile uchar busy[8]; /* index = target, bit = lun */
- volatile Scsi_Cmnd *input_Q; /* commands waiting to be started */
- volatile Scsi_Cmnd *selecting; /* trying to select this command */
- volatile Scsi_Cmnd *connected; /* currently connected command */
- volatile Scsi_Cmnd *disconnected_Q;/* commands waiting for reconnect */
- uchar state; /* what we are currently doing */
- uchar fifo; /* what the FIFO is up to */
- uchar level2; /* extent to which Level-2 commands are used */
- uchar disconnect; /* disconnect/reselect policy */
- unsigned int args; /* set from command-line argument */
- uchar incoming_msg[8]; /* filled during message_in phase */
- int incoming_ptr; /* mainly used with EXTENDED messages */
- uchar outgoing_msg[8]; /* send this during next message_out */
- int outgoing_len; /* length of outgoing message */
- unsigned int default_sx_per; /* default transfer period for SCSI bus */
- uchar sync_xfer[8]; /* sync_xfer reg settings per target */
- uchar sync_stat[8]; /* status of sync negotiation per target */
- uchar sync_off; /* bit mask: don't use sync with these targets */
-#ifdef PROC_INTERFACE
- uchar proc; /* bit mask: what's in proc output */
-#ifdef PROC_STATISTICS
- unsigned long cmd_cnt[8]; /* # of commands issued per target */
- unsigned long int_cnt; /* # of interrupts serviced */
- unsigned long disc_allowed_cnt[8]; /* # of disconnects allowed per target */
- unsigned long disc_done_cnt[8]; /* # of disconnects done per target*/
-#endif
-#endif
- };
-
-
-/* defines for hostdata->chip */
-
-#define C_WD33C93 0
-#define C_WD33C93A 1
-#define C_WD33C93B 2
-#define C_UNKNOWN_CHIP 100
-
-/* defines for hostdata->state */
-
-#define S_UNCONNECTED 0
-#define S_SELECTING 1
-#define S_RUNNING_LEVEL2 2
-#define S_CONNECTED 3
-#define S_PRE_TMP_DISC 4
-#define S_PRE_CMP_DISC 5
-
-/* defines for hostdata->fifo */
-
-#define FI_FIFO_UNUSED 0
-#define FI_FIFO_READING 1
-#define FI_FIFO_WRITING 2
-
-/* defines for hostdata->level2 */
-/* NOTE: only the first 3 are trustworthy at this point -
- * having trouble when more than 1 device is reading/writing
- * at the same time...
- */
-
-#define L2_NONE 0 /* no combination commands - we get lots of ints */
-#define L2_SELECT 1 /* start with SEL_ATN_XFER, but never resume it */
-#define L2_BASIC 2 /* resume after STATUS ints & RDP messages */
-#define L2_DATA 3 /* resume after DATA_IN/OUT ints */
-#define L2_MOST 4 /* resume after anything except a RESELECT int */
-#define L2_RESELECT 5 /* resume after everything, including RESELECT ints */
-#define L2_ALL 6 /* always resume */
-
-/* defines for hostdata->disconnect */
-
-#define DIS_NEVER 0
-#define DIS_ADAPTIVE 1
-#define DIS_ALWAYS 2
-
-/* defines for hostdata->args */
-
-#define DB_TEST 1<<0
-#define DB_FIFO 1<<1
-#define DB_QUEUE_COMMAND 1<<2
-#define DB_EXECUTE 1<<3
-#define DB_INTR 1<<4
-#define DB_TRANSFER 1<<5
-#define DB_MASK 0x3f
-
-#define A_NO_SCSI_RESET 1<<15
-
-
-/* defines for hostdata->sync_xfer[] */
-
-#define SS_UNSET 0
-#define SS_FIRST 1
-#define SS_WAITING 2
-#define SS_SET 3
-
-/* defines for hostdata->proc */
-
-#define PR_VERSION 1<<0
-#define PR_INFO 1<<1
-#define PR_STATISTICS 1<<2
-#define PR_CONNECTED 1<<3
-#define PR_INPUTQ 1<<4
-#define PR_DISCQ 1<<5
-#define PR_TEST 1<<6
-#define PR_STOP 1<<7
-
-
-# include <linux/init.h>
-# include <linux/spinlock.h>
-# define in2000__INITFUNC(function) __initfunc(function)
-# define in2000__INIT __init
-# define in2000__INITDATA __initdata
-# define CLISPIN_LOCK(host,flags) spin_lock_irqsave(host->host_lock, flags)
-# define CLISPIN_UNLOCK(host,flags) spin_unlock_irqrestore(host->host_lock, \
- flags)
-
-static int in2000_detect(struct scsi_host_template *) in2000__INIT;
-static int in2000_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
-static int in2000_abort(Scsi_Cmnd *);
-static void in2000_setup(char *, int *) in2000__INIT;
-static int in2000_biosparam(struct scsi_device *, struct block_device *,
- sector_t, int *);
-static int in2000_bus_reset(Scsi_Cmnd *);
-
-
-#define IN2000_CAN_Q 16
-#define IN2000_SG SG_ALL
-#define IN2000_CPL 2
-#define IN2000_HOST_ID 7
-
-#endif /* IN2000_H */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 17d04c702e1b..a8762a3efeef 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -493,15 +493,15 @@ struct ipr_error_table_t ipr_error_table[] = {
"9072: Link not operational transition"},
{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
"9032: Array exposed but still protected"},
- {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
+ {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
"70DD: Device forced failed by disrupt device command"},
{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
"4061: Multipath redundancy level got better"},
{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
"4060: Multipath redundancy level got worse"},
- {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
+ {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
"9083: Device raw mode enabled"},
- {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
+ {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
"9084: Device raw mode disabled"},
{0x07270000, 0, 0,
"Failure due to other device"},
@@ -1473,7 +1473,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- list_del(&hostrcb->queue);
+ list_del_init(&hostrcb->queue);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ioasc) {
@@ -2552,6 +2552,23 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
}
}
+static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
+{
+ struct ipr_hostrcb *hostrcb;
+
+ hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
+ struct ipr_hostrcb, queue);
+
+ if (unlikely(!hostrcb)) {
+ dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
+ hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
+ struct ipr_hostrcb, queue);
+ }
+
+ list_del_init(&hostrcb->queue);
+ return hostrcb;
+}
+
/**
* ipr_process_error - Op done function for an adapter error log.
* @ipr_cmd: ipr command struct
@@ -2569,13 +2586,14 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
u32 fd_ioasc;
+ char *envp[] = { "ASYNC_ERR_LOG=1", NULL };
if (ioa_cfg->sis64)
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
else
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
- list_del(&hostrcb->queue);
+ list_del_init(&hostrcb->queue);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (!ioasc) {
@@ -2588,6 +2606,10 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
"Host RCB failed with IOASC: 0x%08X\n", ioasc);
}
+ list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
+ hostrcb = ipr_get_free_hostrcb(ioa_cfg);
+ kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp);
+
ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
}
@@ -4095,6 +4117,64 @@ static struct device_attribute ipr_ioa_fw_type_attr = {
.show = ipr_show_fw_type
};
+static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *cdev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ struct ipr_hostrcb *hostrcb;
+ unsigned long lock_flags = 0;
+ int ret;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
+ struct ipr_hostrcb, queue);
+ if (!hostrcb) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return 0;
+ }
+ ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
+ sizeof(hostrcb->hcam));
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return ret;
+}
+
+static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *cdev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ struct ipr_hostrcb *hostrcb;
+ unsigned long lock_flags = 0;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
+ struct ipr_hostrcb, queue);
+ if (!hostrcb) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return count;
+ }
+
+ /* Reclaim hostrcb before exit */
+ list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return count;
+}
+
+static struct bin_attribute ipr_ioa_async_err_log = {
+ .attr = {
+ .name = "async_err_log",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .size = 0,
+ .read = ipr_read_async_err_log,
+ .write = ipr_next_async_err_log
+};
+
static struct device_attribute *ipr_ioa_attrs[] = {
&ipr_fw_version_attr,
&ipr_log_level_attr,
@@ -7026,8 +7106,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_resource_entry *res;
- struct ipr_hostrcb *hostrcb, *temp;
- int i = 0, j;
+ int j;
ENTER;
ioa_cfg->in_reset_reload = 0;
@@ -7048,12 +7127,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
}
schedule_work(&ioa_cfg->work_q);
- list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
- list_del(&hostrcb->queue);
- if (i++ < IPR_NUM_LOG_HCAMS)
- ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
+ for (j = 0; j < IPR_NUM_HCAMS; j++) {
+ list_del_init(&ioa_cfg->hostrcb[j]->queue);
+ if (j < IPR_NUM_LOG_HCAMS)
+ ipr_send_hcam(ioa_cfg,
+ IPR_HCAM_CDB_OP_CODE_LOG_DATA,
+ ioa_cfg->hostrcb[j]);
else
- ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
+ ipr_send_hcam(ioa_cfg,
+ IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
+ ioa_cfg->hostrcb[j]);
}
scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
@@ -7966,7 +8049,8 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
ENTER;
ipr_cmd->job_step = ipr_ioafp_std_inquiry;
- dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
+ if (ioa_cfg->identify_hrrq_index == 0)
+ dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
@@ -8335,7 +8419,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
struct ipr_hostrcb, queue);
- list_del(&hostrcb->queue);
+ list_del_init(&hostrcb->queue);
memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
rc = ipr_get_ldump_data_section(ioa_cfg,
@@ -9332,7 +9416,7 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
- for (i = 0; i < IPR_NUM_HCAMS; i++) {
+ for (i = 0; i < IPR_MAX_HCAMS; i++) {
dma_free_coherent(&ioa_cfg->pdev->dev,
sizeof(struct ipr_hostrcb),
ioa_cfg->hostrcb[i],
@@ -9572,7 +9656,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
if (!ioa_cfg->u.cfg_table)
goto out_free_host_rrq;
- for (i = 0; i < IPR_NUM_HCAMS; i++) {
+ for (i = 0; i < IPR_MAX_HCAMS; i++) {
ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
sizeof(struct ipr_hostrcb),
&ioa_cfg->hostrcb_dma[i],
@@ -9714,6 +9798,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
+ INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
INIT_LIST_HEAD(&ioa_cfg->free_res_q);
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
@@ -10352,6 +10437,8 @@ static void ipr_remove(struct pci_dev *pdev)
&ipr_trace_attr);
ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
&ipr_dump_attr);
+ sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
+ &ipr_ioa_async_err_log);
scsi_remove_host(ioa_cfg->host);
__ipr_remove(pdev);
@@ -10400,10 +10487,25 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
return rc;
}
+ rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
+ &ipr_ioa_async_err_log);
+
+ if (rc) {
+ ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
+ &ipr_dump_attr);
+ ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
+ &ipr_trace_attr);
+ scsi_remove_host(ioa_cfg->host);
+ __ipr_remove(pdev);
+ return rc;
+ }
+
rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
&ipr_dump_attr);
if (rc) {
+ sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
+ &ipr_ioa_async_err_log);
ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
&ipr_trace_attr);
scsi_remove_host(ioa_cfg->host);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index cdb51960b53c..8995053d01b3 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -154,7 +154,9 @@
#define IPR_DEFAULT_MAX_ERROR_DUMP 984
#define IPR_NUM_LOG_HCAMS 2
#define IPR_NUM_CFG_CHG_HCAMS 2
+#define IPR_NUM_HCAM_QUEUE 12
#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
+#define IPR_MAX_HCAMS (IPR_NUM_HCAMS + IPR_NUM_HCAM_QUEUE)
#define IPR_MAX_SIS64_TARGETS_PER_BUS 1024
#define IPR_MAX_SIS64_LUNS_PER_TARGET 0xffffffff
@@ -1504,6 +1506,7 @@ struct ipr_ioa_cfg {
u8 log_level;
#define IPR_MAX_LOG_LEVEL 4
#define IPR_DEFAULT_LOG_LEVEL 2
+#define IPR_DEBUG_LOG_LEVEL 3
#define IPR_NUM_TRACE_INDEX_BITS 8
#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
@@ -1532,10 +1535,11 @@ struct ipr_ioa_cfg {
char ipr_hcam_label[8];
#define IPR_HCAM_LABEL "hcams"
- struct ipr_hostrcb *hostrcb[IPR_NUM_HCAMS];
- dma_addr_t hostrcb_dma[IPR_NUM_HCAMS];
+ struct ipr_hostrcb *hostrcb[IPR_MAX_HCAMS];
+ dma_addr_t hostrcb_dma[IPR_MAX_HCAMS];
struct list_head hostrcb_free_q;
struct list_head hostrcb_pending_q;
+ struct list_head hostrcb_report_q;
struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
u32 hrrq_num;
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index e72673b0a8fb..16ca31ad5ec0 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1837,7 +1837,6 @@ static void fc_exch_reset(struct fc_exch *ep)
int rc = 1;
spin_lock_bh(&ep->ex_lock);
- fc_exch_abort_locked(ep, 0);
ep->state |= FC_EX_RST_CLEANUP;
fc_exch_timer_cancel(ep);
if (ep->esb_stat & ESB_ST_REC_QUAL)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 93f596182145..97aeaddd600d 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -457,6 +457,9 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
*/
static int fc_rport_logoff(struct fc_rport_priv *rdata)
{
+ struct fc_lport *lport = rdata->local_port;
+ u32 port_id = rdata->ids.port_id;
+
mutex_lock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Remove port\n");
@@ -466,6 +469,15 @@ static int fc_rport_logoff(struct fc_rport_priv *rdata)
FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
goto out;
}
+ /*
+ * FC-LS states:
+ * To explicitly Logout, the initiating Nx_Port shall terminate
+ * other open Sequences that it initiated with the destination
+ * Nx_Port prior to performing Logout.
+ */
+ lport->tt.exch_mgr_reset(lport, 0, port_id);
+ lport->tt.exch_mgr_reset(lport, port_id, 0);
+
fc_rport_enter_logo(rdata);
/*
@@ -547,16 +559,24 @@ static void fc_rport_timeout(struct work_struct *work)
*/
static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
{
+ struct fc_lport *lport = rdata->local_port;
+
FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
IS_ERR(fp) ? -PTR_ERR(fp) : 0,
fc_rport_state(rdata), rdata->retries);
switch (rdata->rp_state) {
case RPORT_ST_FLOGI:
- case RPORT_ST_PLOGI:
rdata->flags &= ~FC_RP_STARTED;
fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
break;
+ case RPORT_ST_PLOGI:
+ if (lport->point_to_multipoint) {
+ rdata->flags &= ~FC_RP_STARTED;
+ fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
+ } else
+ fc_rport_enter_logo(rdata);
+ break;
case RPORT_ST_RTV:
fc_rport_enter_ready(rdata);
break;
@@ -1877,7 +1897,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
spp->spp_type_ext = rspp->spp_type_ext;
spp->spp_flags = FC_SPP_RESP_ACK;
- fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+ fc_rport_enter_prli(rdata);
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
@@ -1915,7 +1935,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
fc_rport_state(rdata));
- fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+ fc_rport_enter_delete(rdata, RPORT_EV_STOP);
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
} else
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 63e48d4277b0..4ac03b16d17f 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1535,7 +1535,7 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
}
/* Routines for all individual HBA attributes */
-int
+static int
lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
{
struct lpfc_fdmi_attr_entry *ae;
@@ -1551,7 +1551,7 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
ad->AttrType = cpu_to_be16(RHBA_NODENAME);
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1573,7 +1573,7 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
{
struct lpfc_hba *phba = vport->phba;
@@ -1594,7 +1594,7 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1615,7 +1615,7 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1637,7 +1637,7 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1669,7 +1669,7 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1690,7 +1690,7 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1715,7 +1715,7 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1736,7 +1736,7 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1759,7 +1759,7 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1775,7 +1775,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1794,7 +1794,7 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1811,7 +1811,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1828,7 +1828,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1846,7 +1846,7 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1867,7 +1867,7 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1884,7 +1884,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1906,7 +1906,7 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
}
/* Routines for all individual PORT attributes */
-int
+static int
lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1925,7 +1925,7 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -1975,7 +1975,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2039,7 +2039,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2059,7 +2059,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2081,7 +2081,7 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2102,7 +2102,7 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2120,7 +2120,7 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2138,7 +2138,7 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2156,7 +2156,7 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2175,7 +2175,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2190,7 +2190,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2208,7 +2208,7 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2227,7 +2227,7 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2243,7 +2243,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2259,7 +2259,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2274,7 +2274,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2295,7 +2295,7 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2316,7 +2316,7 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2337,7 +2337,7 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2358,7 +2358,7 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2378,7 +2378,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
@@ -2393,7 +2393,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
return size;
}
-int
+static int
lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c0af32f24954..b7d54bfb1df9 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4617,7 +4617,7 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
return sentplogi;
}
-uint32_t
+static uint32_t
lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
uint32_t word0)
{
@@ -4629,7 +4629,7 @@ lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
return sizeof(struct fc_rdp_link_service_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
uint8_t *page_a0, uint8_t *page_a2)
{
@@ -4694,7 +4694,7 @@ lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
return sizeof(struct fc_rdp_sfp_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
READ_LNK_VAR *stat)
{
@@ -4723,7 +4723,7 @@ lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
return sizeof(struct fc_rdp_link_error_status_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
struct lpfc_vport *vport)
{
@@ -4748,7 +4748,7 @@ lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
return sizeof(struct fc_rdp_bbc_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
{
@@ -4776,7 +4776,7 @@ lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
return sizeof(struct fc_rdp_oed_sfp_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
struct fc_rdp_oed_sfp_desc *desc,
uint8_t *page_a2)
@@ -4805,7 +4805,7 @@ lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
return sizeof(struct fc_rdp_oed_sfp_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
struct fc_rdp_oed_sfp_desc *desc,
uint8_t *page_a2)
@@ -4834,7 +4834,7 @@ lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
return sizeof(struct fc_rdp_oed_sfp_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
struct fc_rdp_oed_sfp_desc *desc,
uint8_t *page_a2)
@@ -4864,7 +4864,7 @@ lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
}
-uint32_t
+static uint32_t
lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
struct fc_rdp_oed_sfp_desc *desc,
uint8_t *page_a2)
@@ -4893,7 +4893,7 @@ lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
return sizeof(struct fc_rdp_oed_sfp_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
uint8_t *page_a0, struct lpfc_vport *vport)
{
@@ -4907,7 +4907,7 @@ lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
return sizeof(struct fc_rdp_opd_sfp_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
{
if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
@@ -4924,7 +4924,7 @@ lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
return sizeof(struct fc_fec_rdp_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
{
uint16_t rdp_cap = 0;
@@ -4986,7 +4986,7 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
return sizeof(struct fc_rdp_port_speed_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
struct lpfc_hba *phba)
{
@@ -5003,7 +5003,7 @@ lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
return sizeof(struct fc_rdp_port_name_desc);
}
-uint32_t
+static uint32_t
lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
@@ -5027,7 +5027,7 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
return sizeof(struct fc_rdp_port_name_desc);
}
-void
+static void
lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
int status)
{
@@ -5165,7 +5165,7 @@ free_rdp_context:
kfree(rdp_context);
}
-int
+static int
lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
{
LPFC_MBOXQ_t *mbox = NULL;
@@ -7995,7 +7995,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
-void
+static void
lpfc_start_fdmi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 12dbe99ccc50..b234c50c255f 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2260,7 +2260,7 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
return 0;
}
-void
+static void
lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
MAILBOX_t *mb;
@@ -2281,7 +2281,7 @@ mbx_failed:
rdp_context->cmpl(phba, rdp_context, rc);
}
-void
+static void
lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7080ce2920fd..c5326055beee 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5689,7 +5689,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
return rc;
}
-void
+static void
lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
uint32_t feature)
{
@@ -8968,7 +8968,7 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
* Since ABORTS must go on the same WQ of the command they are
* aborting, we use command's fcp_wqidx.
*/
-int
+static int
lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb)
{
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index c1ed25adb17e..9ff57dee72d7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -189,25 +189,12 @@ u32
megasas_build_and_issue_cmd(struct megasas_instance *instance,
struct scsi_cmnd *scmd);
static void megasas_complete_cmd_dpc(unsigned long instance_addr);
-void
-megasas_release_fusion(struct megasas_instance *instance);
-int
-megasas_ioc_init_fusion(struct megasas_instance *instance);
-void
-megasas_free_cmds_fusion(struct megasas_instance *instance);
-u8
-megasas_get_map_info(struct megasas_instance *instance);
-int
-megasas_sync_map_info(struct megasas_instance *instance);
int
wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
int seconds);
-void megasas_reset_reply_desc(struct megasas_instance *instance);
void megasas_fusion_ocr_wq(struct work_struct *work);
static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
int initial);
-int megasas_check_mpio_paths(struct megasas_instance *instance,
- struct scsi_cmnd *scmd);
int
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -5036,7 +5023,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
- instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
+ instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
"megasas: LSI")) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
@@ -5782,7 +5769,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
&instance->consumer_h);
if (!instance->producer || !instance->consumer) {
- dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate"
+ dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
"memory for producer, consumer\n");
goto fail_alloc_dma_buf;
}
@@ -6711,14 +6698,9 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
unsigned long flags;
u32 wait_time = MEGASAS_RESET_WAIT_TIME;
- ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
- if (!ioc)
- return -ENOMEM;
-
- if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
- error = -EFAULT;
- goto out_kfree_ioc;
- }
+ ioc = memdup_user(user_ioc, sizeof(*ioc));
+ if (IS_ERR(ioc))
+ return PTR_ERR(ioc);
instance = megasas_lookup_instance(ioc->host_no);
if (!instance) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 80eaee22f5bc..e3bee04c1eb1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -991,5 +991,14 @@ union desc_value {
} u;
};
+void megasas_free_cmds_fusion(struct megasas_instance *instance);
+int megasas_ioc_init_fusion(struct megasas_instance *instance);
+u8 megasas_get_map_info(struct megasas_instance *instance);
+int megasas_sync_map_info(struct megasas_instance *instance);
+void megasas_release_fusion(struct megasas_instance *instance);
+void megasas_reset_reply_desc(struct megasas_instance *instance);
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd);
+void megasas_fusion_ocr_wq(struct work_struct *work);
#endif /* _MEGARAID_SAS_FUSION_H_ */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 750f82c339d4..a1a5ceb42ce6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -98,7 +98,7 @@ MODULE_PARM_DESC(mpt3sas_fwfault_debug,
" enable detection of firmware fault and halt firmware - (default=0)");
static int
-_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
/**
* _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
@@ -218,8 +218,7 @@ _base_fault_reset_work(struct work_struct *work)
ioc->non_operational_loop = 0;
if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
- rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
__func__, (rc == 0) ? "success" : "failed");
doorbell = mpt3sas_base_get_iocstate(ioc, 0);
@@ -2040,7 +2039,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_base_unmap_resources - free controller resources
* @ioc: per adapter object
*/
-void
+static void
mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
{
struct pci_dev *pdev = ioc->pdev;
@@ -2145,7 +2144,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
_base_mask_interrupts(ioc);
- r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ r = _base_get_ioc_facts(ioc);
if (r)
goto out_fail;
@@ -3183,12 +3182,11 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
/**
* _base_allocate_memory_pools - allocate start of day memory pools
* @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 success, anything else error
*/
static int
-_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
{
struct mpt3sas_facts *facts;
u16 max_sge_elements;
@@ -3658,29 +3656,25 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
* _base_wait_on_iocstate - waiting on a particular ioc state
* @ioc_state: controller state { READY, OPERATIONAL, or RESET }
* @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*/
static int
-_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
- int sleep_flag)
+_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
{
u32 count, cntdn;
u32 current_state;
count = 0;
- cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ cntdn = 1000 * timeout;
do {
current_state = mpt3sas_base_get_iocstate(ioc, 1);
if (current_state == ioc_state)
return 0;
if (count && current_state == MPI2_IOC_STATE_FAULT)
break;
- if (sleep_flag == CAN_SLEEP)
- usleep_range(1000, 1500);
- else
- udelay(500);
+
+ usleep_range(1000, 1500);
count++;
} while (--cntdn);
@@ -3692,24 +3686,22 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
* a write to the doorbell)
* @ioc: per adapter object
* @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*
* Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
*/
static int
-_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
static int
-_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
- int sleep_flag)
+_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
{
u32 cntdn, count;
u32 int_status;
count = 0;
- cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ cntdn = 1000 * timeout;
do {
int_status = readl(&ioc->chip->HostInterruptStatus);
if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
@@ -3718,10 +3710,35 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
ioc->name, __func__, count, timeout));
return 0;
}
- if (sleep_flag == CAN_SLEEP)
- usleep_range(1000, 1500);
- else
- udelay(500);
+
+ usleep_range(1000, 1500);
+ count++;
+ } while (--cntdn);
+
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ ioc->name, __func__, count, int_status);
+ return -EFAULT;
+}
+
+static int
+_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
+{
+ u32 cntdn, count;
+ u32 int_status;
+
+ count = 0;
+ cntdn = 2000 * timeout;
+ do {
+ int_status = readl(&ioc->chip->HostInterruptStatus);
+ if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ }
+
+ udelay(500);
count++;
} while (--cntdn);
@@ -3729,13 +3746,13 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
"%s: failed due to timeout count(%d), int_status(%x)!\n",
ioc->name, __func__, count, int_status);
return -EFAULT;
+
}
/**
* _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
* @ioc: per adapter object
* @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*
@@ -3743,15 +3760,14 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
* doorbell.
*/
static int
-_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
- int sleep_flag)
+_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
{
u32 cntdn, count;
u32 int_status;
u32 doorbell;
count = 0;
- cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ cntdn = 1000 * timeout;
do {
int_status = readl(&ioc->chip->HostInterruptStatus);
if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
@@ -3769,10 +3785,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
} else if (int_status == 0xFFFFFFFF)
goto out;
- if (sleep_flag == CAN_SLEEP)
- usleep_range(1000, 1500);
- else
- udelay(500);
+ usleep_range(1000, 1500);
count++;
} while (--cntdn);
@@ -3787,20 +3800,18 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
* _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
* @ioc: per adapter object
* @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*
*/
static int
-_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
- int sleep_flag)
+_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
{
u32 cntdn, count;
u32 doorbell_reg;
count = 0;
- cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ cntdn = 1000 * timeout;
do {
doorbell_reg = readl(&ioc->chip->Doorbell);
if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
@@ -3809,10 +3820,8 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
ioc->name, __func__, count, timeout));
return 0;
}
- if (sleep_flag == CAN_SLEEP)
- usleep_range(1000, 1500);
- else
- udelay(500);
+
+ usleep_range(1000, 1500);
count++;
} while (--cntdn);
@@ -3827,13 +3836,11 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
* @ioc: per adapter object
* @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
* @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*/
static int
-_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
- int sleep_flag)
+_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
{
u32 ioc_state;
int r = 0;
@@ -3852,12 +3859,11 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
&ioc->chip->Doorbell);
- if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
+ if ((_base_wait_for_doorbell_ack(ioc, 15))) {
r = -EFAULT;
goto out;
}
- ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
- timeout, sleep_flag);
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
if (ioc_state) {
pr_err(MPT3SAS_FMT
"%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -3879,18 +3885,16 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
* @reply_bytes: reply length
* @reply: pointer to reply payload
* @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*/
static int
_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
- u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
+ u32 *request, int reply_bytes, u16 *reply, int timeout)
{
MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
int i;
u8 failed;
- u16 dummy;
__le32 *mfp;
/* make sure doorbell is not in use */
@@ -3911,7 +3915,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
&ioc->chip->Doorbell);
- if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
+ if ((_base_spin_on_doorbell_int(ioc, 5))) {
pr_err(MPT3SAS_FMT
"doorbell handshake int failed (line=%d)\n",
ioc->name, __LINE__);
@@ -3919,7 +3923,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
writel(0, &ioc->chip->HostInterruptStatus);
- if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
+ if ((_base_wait_for_doorbell_ack(ioc, 5))) {
pr_err(MPT3SAS_FMT
"doorbell handshake ack failed (line=%d)\n",
ioc->name, __LINE__);
@@ -3929,7 +3933,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
/* send message 32-bits at a time */
for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
- if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
+ if ((_base_wait_for_doorbell_ack(ioc, 5)))
failed = 1;
}
@@ -3941,7 +3945,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
/* now wait for the reply */
- if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
+ if ((_base_wait_for_doorbell_int(ioc, timeout))) {
pr_err(MPT3SAS_FMT
"doorbell handshake int failed (line=%d)\n",
ioc->name, __LINE__);
@@ -3952,7 +3956,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
- if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ if ((_base_wait_for_doorbell_int(ioc, 5))) {
pr_err(MPT3SAS_FMT
"doorbell handshake int failed (line=%d)\n",
ioc->name, __LINE__);
@@ -3963,22 +3967,22 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
writel(0, &ioc->chip->HostInterruptStatus);
for (i = 2; i < default_reply->MsgLength * 2; i++) {
- if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ if ((_base_wait_for_doorbell_int(ioc, 5))) {
pr_err(MPT3SAS_FMT
"doorbell handshake int failed (line=%d)\n",
ioc->name, __LINE__);
return -EFAULT;
}
if (i >= reply_bytes/2) /* overflow case */
- dummy = readl(&ioc->chip->Doorbell);
+ readl(&ioc->chip->Doorbell);
else
reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
}
- _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
- if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
+ _base_wait_for_doorbell_int(ioc, 5);
+ if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
dhsprintk(ioc, pr_info(MPT3SAS_FMT
"doorbell is in use (line=%d)\n", ioc->name, __LINE__));
}
@@ -4015,7 +4019,6 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
{
u16 smid;
u32 ioc_state;
- unsigned long timeleft;
bool issue_reset = false;
int rc;
void *request;
@@ -4068,7 +4071,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
ioc->ioc_link_reset_in_progress = 1;
init_completion(&ioc->base_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
@@ -4093,8 +4096,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
issue_host_reset:
if (issue_reset)
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
rc = -EFAULT;
out:
@@ -4119,7 +4121,6 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
{
u16 smid;
u32 ioc_state;
- unsigned long timeleft;
bool issue_reset = false;
int rc;
void *request;
@@ -4170,7 +4171,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
init_completion(&ioc->base_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -4191,8 +4192,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
issue_host_reset:
if (issue_reset)
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
rc = -EFAULT;
out:
@@ -4203,12 +4203,11 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
/**
* _base_get_port_facts - obtain port facts reply and save in ioc
* @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*/
static int
-_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
+_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
{
Mpi2PortFactsRequest_t mpi_request;
Mpi2PortFactsReply_t mpi_reply;
@@ -4224,7 +4223,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
mpi_request.PortNumber = port;
r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
- (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
if (r != 0) {
pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4247,13 +4246,11 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
* _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
* @ioc: per adapter object
* @timeout:
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*/
static int
-_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
- int sleep_flag)
+_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
{
u32 ioc_state;
int rc;
@@ -4287,8 +4284,7 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
goto issue_diag_reset;
}
- ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
- timeout, sleep_flag);
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
if (ioc_state) {
dfailprintk(ioc, printk(MPT3SAS_FMT
"%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -4297,19 +4293,18 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
}
issue_diag_reset:
- rc = _base_diag_reset(ioc, sleep_flag);
+ rc = _base_diag_reset(ioc);
return rc;
}
/**
* _base_get_ioc_facts - obtain ioc facts reply and save in ioc
* @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*/
static int
-_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2IOCFactsRequest_t mpi_request;
Mpi2IOCFactsReply_t mpi_reply;
@@ -4319,7 +4314,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
- r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
+ r = _base_wait_for_iocstate(ioc, 10);
if (r) {
dfailprintk(ioc, printk(MPT3SAS_FMT
"%s: failed getting to correct state\n",
@@ -4331,7 +4326,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
memset(&mpi_request, 0, mpi_request_sz);
mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
- (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
if (r != 0) {
pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4391,12 +4386,11 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
/**
* _base_send_ioc_init - send ioc_init to firmware
* @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*/
static int
-_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2IOCInitRequest_t mpi_request;
Mpi2IOCInitReply_t mpi_reply;
@@ -4479,8 +4473,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
r = _base_handshake_req_reply_wait(ioc,
sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
- sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
- sleep_flag);
+ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
if (r != 0) {
pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4555,16 +4548,14 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
/**
* _base_send_port_enable - send port_enable(discovery stuff) to firmware
* @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*/
static int
-_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2PortEnableRequest_t *mpi_request;
Mpi2PortEnableReply_t *mpi_reply;
- unsigned long timeleft;
int r = 0;
u16 smid;
u16 ioc_status;
@@ -4592,8 +4583,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
init_completion(&ioc->port_enable_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
- 300*HZ);
+ wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
ioc->name, __func__);
@@ -4737,15 +4727,13 @@ _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
/**
* _base_event_notification - send event notification
* @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*/
static int
-_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2EventNotificationRequest_t *mpi_request;
- unsigned long timeleft;
u16 smid;
int r = 0;
int i;
@@ -4777,7 +4765,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
cpu_to_le32(ioc->event_masks[i]);
init_completion(&ioc->base_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
+ wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
ioc->name, __func__);
@@ -4827,19 +4815,18 @@ mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
return;
mutex_lock(&ioc->base_cmds.mutex);
- _base_event_notification(ioc, CAN_SLEEP);
+ _base_event_notification(ioc);
mutex_unlock(&ioc->base_cmds.mutex);
}
/**
* _base_diag_reset - the "big hammer" start of day reset
* @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*/
static int
-_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
{
u32 host_diagnostic;
u32 ioc_state;
@@ -4867,10 +4854,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
/* wait 100 msec */
- if (sleep_flag == CAN_SLEEP)
- msleep(100);
- else
- mdelay(100);
+ msleep(100);
if (count++ > 20)
goto out;
@@ -4890,10 +4874,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
&ioc->chip->HostDiagnostic);
/*This delay allows the chip PCIe hardware time to finish reset tasks*/
- if (sleep_flag == CAN_SLEEP)
- msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
- else
- mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
+ msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
/* Approximately 300 second max wait */
for (count = 0; count < (300000000 /
@@ -4906,13 +4887,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
break;
- /* Wait to pass the second read delay window */
- if (sleep_flag == CAN_SLEEP)
- msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
- / 1000);
- else
- mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
- / 1000);
+ msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
}
if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
@@ -4941,8 +4916,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
drsprintk(ioc, pr_info(MPT3SAS_FMT
"Wait for FW to go to the READY state\n", ioc->name));
- ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
- sleep_flag);
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
if (ioc_state) {
pr_err(MPT3SAS_FMT
"%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -4961,14 +4935,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
/**
* _base_make_ioc_ready - put controller in READY state
* @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
* @type: FORCE_BIG_HAMMER or SOFT_RESET
*
* Returns 0 for success, non-zero for failure.
*/
static int
-_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
- enum reset_type type)
+_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
{
u32 ioc_state;
int rc;
@@ -4995,10 +4967,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
ioc->name, __func__, ioc_state);
return -EFAULT;
}
- if (sleep_flag == CAN_SLEEP)
- ssleep(1);
- else
- mdelay(1000);
+ ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
}
}
@@ -5024,24 +4993,23 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
if (!(_base_send_ioc_reset(ioc,
- MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
+ MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
return 0;
}
issue_diag_reset:
- rc = _base_diag_reset(ioc, CAN_SLEEP);
+ rc = _base_diag_reset(ioc);
return rc;
}
/**
* _base_make_ioc_operational - put controller in OPERATIONAL state
* @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* Returns 0 for success, non-zero for failure.
*/
static int
-_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
{
int r, i, index;
unsigned long flags;
@@ -5160,7 +5128,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
}
skip_init_reply_post_free_queue:
- r = _base_send_ioc_init(ioc, sleep_flag);
+ r = _base_send_ioc_init(ioc);
if (r)
return r;
@@ -5186,13 +5154,11 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
skip_init_reply_post_host_index:
_base_unmask_interrupts(ioc);
- r = _base_event_notification(ioc, sleep_flag);
+ r = _base_event_notification(ioc);
if (r)
return r;
- if (sleep_flag == CAN_SLEEP)
- _base_static_config_pages(ioc);
-
+ _base_static_config_pages(ioc);
if (ioc->is_driver_loading) {
@@ -5211,7 +5177,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
return r; /* scan_start and scan_finished support */
}
- r = _base_send_port_enable(ioc, sleep_flag);
+ r = _base_send_port_enable(ioc);
if (r)
return r;
@@ -5235,7 +5201,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
if (ioc->chip_phys && ioc->chip) {
_base_mask_interrupts(ioc);
ioc->shost_recovery = 1;
- _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ _base_make_ioc_ready(ioc, SOFT_RESET);
ioc->shost_recovery = 0;
}
@@ -5292,7 +5258,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
goto out_free_resources;
pci_set_drvdata(ioc->pdev, ioc->shost);
- r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ r = _base_get_ioc_facts(ioc);
if (r)
goto out_free_resources;
@@ -5326,7 +5292,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_sg_mpi = &_base_build_sg;
ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
- r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ r = _base_make_ioc_ready(ioc, SOFT_RESET);
if (r)
goto out_free_resources;
@@ -5338,12 +5304,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
}
for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
- r = _base_get_port_facts(ioc, i, CAN_SLEEP);
+ r = _base_get_port_facts(ioc, i);
if (r)
goto out_free_resources;
}
- r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
+ r = _base_allocate_memory_pools(ioc);
if (r)
goto out_free_resources;
@@ -5429,7 +5395,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
_base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
- r = _base_make_ioc_operational(ioc, CAN_SLEEP);
+ r = _base_make_ioc_operational(ioc);
if (r)
goto out_free_resources;
@@ -5565,21 +5531,18 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
/**
* _wait_for_commands_to_complete - reset controller
* @ioc: Pointer to MPT_ADAPTER structure
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
*
* This function waiting(3s) for all pending commands to complete
* prior to putting controller in reset.
*/
static void
-_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
{
u32 ioc_state;
unsigned long flags;
u16 i;
ioc->pending_io_count = 0;
- if (sleep_flag != CAN_SLEEP)
- return;
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
@@ -5602,13 +5565,12 @@ _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
/**
* mpt3sas_base_hard_reset_handler - reset controller
* @ioc: Pointer to MPT_ADAPTER structure
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
* @type: FORCE_BIG_HAMMER or SOFT_RESET
*
* Returns 0 for success, non-zero for failure.
*/
int
-mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
enum reset_type type)
{
int r;
@@ -5629,13 +5591,6 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
if (mpt3sas_fwfault_debug)
mpt3sas_halt_firmware(ioc);
- /* TODO - What we really should be doing is pulling
- * out all the code associated with NO_SLEEP; its never used.
- * That is legacy code from mpt fusion driver, ported over.
- * I will leave this BUG_ON here for now till its been resolved.
- */
- BUG_ON(sleep_flag == NO_SLEEP);
-
/* wait for an active reset in progress to complete */
if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
do {
@@ -5660,9 +5615,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
is_fault = 1;
}
_base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
- _wait_for_commands_to_complete(ioc, sleep_flag);
+ _wait_for_commands_to_complete(ioc);
_base_mask_interrupts(ioc);
- r = _base_make_ioc_ready(ioc, sleep_flag, type);
+ r = _base_make_ioc_ready(ioc, type);
if (r)
goto out;
_base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
@@ -5675,7 +5630,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
r = -EFAULT;
goto out;
}
- r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ r = _base_get_ioc_facts(ioc);
if (r)
goto out;
@@ -5684,7 +5639,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
"Please reboot the system and ensure that the correct"
" firmware version is running\n", ioc->name);
- r = _base_make_ioc_operational(ioc, sleep_flag);
+ r = _base_make_ioc_operational(ioc);
if (!r)
_base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 892c9be008b5..3e71bc1b4a80 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -119,10 +119,6 @@
#define MPT_MAX_CALLBACKS 32
-
-#define CAN_SLEEP 1
-#define NO_SLEEP 0
-
#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
/* reserved for issuing internally framed scsi io cmds */
#define INTERNAL_SCSIIO_CMDS_COUNT 3
@@ -478,7 +474,7 @@ struct _sas_device {
u8 pfa_led_on;
u8 pend_sas_rphy_add;
u8 enclosure_level;
- u8 connector_name[4];
+ u8 connector_name[5];
struct kref refcount;
};
@@ -794,16 +790,6 @@ struct reply_post_struct {
dma_addr_t reply_post_free_dma;
};
-/**
- * enum mutex_type - task management mutex type
- * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
- * @TM_MUTEX_ON: mutex is required
- */
-enum mutex_type {
- TM_MUTEX_OFF = 0,
- TM_MUTEX_ON = 1,
-};
-
typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
/**
* struct MPT3SAS_ADAPTER - per adapter struct
@@ -1229,7 +1215,7 @@ int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
-int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
enum reset_type type);
void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
@@ -1291,7 +1277,11 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout, enum mutex_type m_type);
+ ulong timeout);
+int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task,
+ ulong timeout);
+
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index a6914ec99cc0..cebfd734fd76 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -285,7 +285,6 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
{
u16 smid;
u32 ioc_state;
- unsigned long timeleft;
Mpi2ConfigRequest_t *config_request;
int r;
u8 retry_count, issue_host_reset = 0;
@@ -386,8 +385,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
_config_display_some_debug(ioc, smid, "config_request", NULL);
init_completion(&ioc->config_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
- timeout*HZ);
+ wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
ioc->name, __func__);
@@ -491,8 +489,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
mutex_unlock(&ioc->config_cmds.mutex);
if (issue_host_reset)
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
return r;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 7d00f09666b6..26cdc127ac89 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -518,7 +518,7 @@ mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
*
* Called when application request fasyn callback handler.
*/
-int
+static int
_ctl_fasync(int fd, struct file *filep, int mode)
{
return fasync_helper(fd, filep, mode, &async_queue);
@@ -530,7 +530,7 @@ _ctl_fasync(int fd, struct file *filep, int mode)
* @wait -
*
*/
-unsigned int
+static unsigned int
_ctl_poll(struct file *filep, poll_table *wait)
{
struct MPT3SAS_ADAPTER *ioc;
@@ -641,9 +641,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
MPI2RequestHeader_t *mpi_request = NULL, *request;
MPI2DefaultReply_t *mpi_reply;
u32 ioc_state;
- u16 ioc_status;
u16 smid;
- unsigned long timeout, timeleft;
+ unsigned long timeout;
u8 issue_reset;
u32 sz;
void *psge;
@@ -914,8 +913,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
else
timeout = karg.timeout;
- timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
- timeout*HZ);
+ wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
Mpi2SCSITaskManagementRequest_t *tm_request =
(Mpi2SCSITaskManagementRequest_t *)mpi_request;
@@ -938,7 +936,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
mpi_reply = ioc->ctl_cmds.reply;
- ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
(ioc->logging_level & MPT_DEBUG_TM)) {
@@ -1001,13 +998,11 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->name,
le16_to_cpu(mpi_request->FunctionDependent1));
mpt3sas_halt_firmware(ioc);
- mpt3sas_scsih_issue_tm(ioc,
+ mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30,
- TM_MUTEX_ON);
+ 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
} else
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
out:
@@ -1220,8 +1215,7 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
__func__));
- retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
pr_info(MPT3SAS_FMT "host reset: %s\n",
ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
return 0;
@@ -1381,7 +1375,6 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
Mpi2DiagBufferPostRequest_t *mpi_request;
Mpi2DiagBufferPostReply_t *mpi_reply;
u8 buffer_type;
- unsigned long timeleft;
u16 smid;
u16 ioc_status;
u32 ioc_state;
@@ -1499,7 +1492,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
init_completion(&ioc->ctl_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1538,8 +1531,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
issue_host_reset:
if (issue_reset)
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
out:
@@ -1800,7 +1792,6 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
u16 ioc_status;
u32 ioc_state;
int rc;
- unsigned long timeleft;
dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -1848,7 +1839,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
init_completion(&ioc->ctl_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1974,8 +1965,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
if (issue_reset)
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
return rc;
}
@@ -1995,7 +1985,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
Mpi2DiagBufferPostReply_t *mpi_reply;
int rc, i;
u8 buffer_type;
- unsigned long timeleft, request_size, copy_size;
+ unsigned long request_size, copy_size;
u16 smid;
u16 ioc_status;
u8 issue_reset = 0;
@@ -2116,7 +2106,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
init_completion(&ioc->ctl_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2155,8 +2145,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
issue_host_reset:
if (issue_reset)
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
out:
@@ -2352,7 +2341,7 @@ out_unlock_pciaccess:
* @cmd - ioctl opcode
* @arg -
*/
-long
+static long
_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret;
@@ -2372,7 +2361,7 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* @cmd - ioctl opcode
* @arg -
*/
-long
+static long
_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret;
@@ -2392,7 +2381,7 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
*
* This routine handles 32 bit applications in 64bit os.
*/
-long
+static long
_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
{
long ret;
@@ -2410,7 +2399,7 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
*
* This routine handles 32 bit applications in 64bit os.
*/
-long
+static long
_ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
{
long ret;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index cd91a684c945..209a969a979d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1195,7 +1195,7 @@ _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
*
* Returns queue depth.
*/
-int
+static int
scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
struct Scsi_Host *shost = sdev->host;
@@ -1244,7 +1244,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
* Returns 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
-int
+static int
scsih_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
@@ -1311,7 +1311,7 @@ scsih_target_alloc(struct scsi_target *starget)
*
* Returns nothing.
*/
-void
+static void
scsih_target_destroy(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
@@ -1320,7 +1320,6 @@ scsih_target_destroy(struct scsi_target *starget)
struct _sas_device *sas_device;
struct _raid_device *raid_device;
unsigned long flags;
- struct sas_rphy *rphy;
sas_target_priv_data = starget->hostdata;
if (!sas_target_priv_data)
@@ -1339,7 +1338,6 @@ scsih_target_destroy(struct scsi_target *starget)
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- rphy = dev_to_rphy(starget->dev.parent);
sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
if (sas_device && (sas_device->starget == starget) &&
(sas_device->id == starget->id) &&
@@ -1369,7 +1367,7 @@ scsih_target_destroy(struct scsi_target *starget)
* Returns 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
-int
+static int
scsih_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
@@ -1434,7 +1432,7 @@ scsih_slave_alloc(struct scsi_device *sdev)
*
* Returns nothing.
*/
-void
+static void
scsih_slave_destroy(struct scsi_device *sdev)
{
struct MPT3SAS_TARGET *sas_target_priv_data;
@@ -1527,7 +1525,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
* scsih_is_raid - return boolean indicating device is raid volume
* @dev the device struct object
*/
-int
+static int
scsih_is_raid(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
@@ -1542,7 +1540,7 @@ scsih_is_raid(struct device *dev)
* scsih_get_resync - get raid volume resync percent complete
* @dev the device struct object
*/
-void
+static void
scsih_get_resync(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
@@ -1603,7 +1601,7 @@ scsih_get_resync(struct device *dev)
* scsih_get_state - get raid volume level
* @dev the device struct object
*/
-void
+static void
scsih_get_state(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
@@ -1805,7 +1803,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
* Returns 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
-int
+static int
scsih_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
@@ -2021,7 +2019,7 @@ scsih_slave_configure(struct scsi_device *sdev)
*
* Return nothing.
*/
-int
+static int
scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int params[])
{
@@ -2201,7 +2199,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
* @timeout: timeout in seconds
- * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
* Context: user
*
* A generic API for sending task management requests to firmware.
@@ -2212,60 +2209,51 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
*/
int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
- uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
- enum mutex_type m_type)
+ uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
u16 smid = 0;
u32 ioc_state;
- unsigned long timeleft;
struct scsiio_tracker *scsi_lookup = NULL;
int rc;
u16 msix_task = 0;
- if (m_type == TM_MUTEX_ON)
- mutex_lock(&ioc->tm_cmds.mutex);
+ lockdep_assert_held(&ioc->tm_cmds.mutex);
+
if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
__func__, ioc->name);
- rc = FAILED;
- goto err_out;
+ return FAILED;
}
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery) {
pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
- rc = FAILED;
- goto err_out;
+ return FAILED;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
if (ioc_state & MPI2_DOORBELL_USED) {
dhsprintk(ioc, pr_info(MPT3SAS_FMT
"unexpected doorbell active!\n", ioc->name));
- rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
- rc = (!rc) ? SUCCESS : FAILED;
- goto err_out;
+ rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ return (!rc) ? SUCCESS : FAILED;
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt3sas_base_fault_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
- rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
- rc = (!rc) ? SUCCESS : FAILED;
- goto err_out;
+ rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ return (!rc) ? SUCCESS : FAILED;
}
smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
if (!smid) {
pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
- rc = FAILED;
- goto err_out;
+ return FAILED;
}
if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
@@ -2292,19 +2280,17 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
else
msix_task = 0;
mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
- timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
+ wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
ioc->name, __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
- rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ rc = mpt3sas_base_hard_reset_handler(ioc,
+ FORCE_BIG_HAMMER);
rc = (!rc) ? SUCCESS : FAILED;
- ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
- mpt3sas_scsih_clear_tm_flag(ioc, handle);
- goto err_out;
+ goto out;
}
}
@@ -2356,17 +2342,23 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
break;
}
+out:
mpt3sas_scsih_clear_tm_flag(ioc, handle);
ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
- if (m_type == TM_MUTEX_ON)
- mutex_unlock(&ioc->tm_cmds.mutex);
-
return rc;
+}
- err_out:
- if (m_type == TM_MUTEX_ON)
- mutex_unlock(&ioc->tm_cmds.mutex);
- return rc;
+int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+{
+ int ret;
+
+ mutex_lock(&ioc->tm_cmds.mutex);
+ ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
+ smid_task, timeout);
+ mutex_unlock(&ioc->tm_cmds.mutex);
+
+ return ret;
}
/**
@@ -2439,7 +2431,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
*
* Returns SUCCESS if command aborted else FAILED
*/
-int
+static int
scsih_abort(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2482,9 +2474,9 @@ scsih_abort(struct scsi_cmnd *scmd)
mpt3sas_halt_firmware(ioc);
handle = sas_device_priv_data->sas_target->handle;
- r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2498,7 +2490,7 @@ scsih_abort(struct scsi_cmnd *scmd)
*
* Returns SUCCESS if command aborted else FAILED
*/
-int
+static int
scsih_dev_reset(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2541,9 +2533,9 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30);
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2561,7 +2553,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
*
* Returns SUCCESS if command aborted else FAILED
*/
-int
+static int
scsih_target_reset(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2603,9 +2595,9 @@ scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
- 30, TM_MUTEX_ON);
+ 30);
out:
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -2624,7 +2616,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
*
* Returns SUCCESS if command aborted else FAILED
*/
-int
+static int
scsih_host_reset(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2641,8 +2633,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
goto out;
}
- retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
r = (retval < 0) ? FAILED : SUCCESS;
out:
pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
@@ -3455,7 +3446,7 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
*
* Context - processed in interrupt context.
*/
-void
+static void
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
u32 event_context)
{
@@ -3494,7 +3485,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
*
* Context - processed in interrupt context.
*/
-void
+static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
u16 smid, u16 handle)
{
@@ -4032,7 +4023,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
* SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
*/
-int
+static int
scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -4701,7 +4692,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
le16_to_cpu(mpi_reply->DevHandle));
mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
- if (!(ioc->logging_level & MPT_DEBUG_REPLY) &&
+ if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
(scmd->sense_buffer[2] == MEDIUM_ERROR) ||
(scmd->sense_buffer[2] == HARDWARE_ERROR)))
@@ -5380,8 +5371,9 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
le16_to_cpu(sas_device_pg0.EnclosureLevel);
- memcpy(&sas_device->connector_name[0],
- &sas_device_pg0.ConnectorName[0], 4);
+ memcpy(sas_device->connector_name,
+ sas_device_pg0.ConnectorName, 4);
+ sas_device->connector_name[4] = '\0';
} else {
sas_device->enclosure_level = 0;
sas_device->connector_name[0] = '\0';
@@ -5508,8 +5500,9 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
le16_to_cpu(sas_device_pg0.EnclosureLevel);
- memcpy(&sas_device->connector_name[0],
- &sas_device_pg0.ConnectorName[0], 4);
+ memcpy(sas_device->connector_name,
+ sas_device_pg0.ConnectorName, 4);
+ sas_device->connector_name[4] = '\0';
} else {
sas_device->enclosure_level = 0;
sas_device->connector_name[0] = '\0';
@@ -6087,8 +6080,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
- TM_MUTEX_OFF);
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -6128,8 +6120,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
goto out_no_lock;
r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
- sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
- TM_MUTEX_OFF);
+ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid,
+ 30);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
@@ -6297,8 +6289,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
mutex_unlock(&ioc->scsih_cmds.mutex);
if (issue_reset)
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
return rc;
}
@@ -6311,11 +6302,10 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
static void
_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
{
- int rc;
sdev->no_uld_attach = no_uld_attach ? 1 : 0;
sdev_printk(KERN_INFO, sdev, "%s raid component\n",
sdev->no_uld_attach ? "hidding" : "exposing");
- rc = scsi_device_reprobe(sdev);
+ WARN_ON(scsi_device_reprobe(sdev));
}
/**
@@ -8137,7 +8127,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
* Routine called when unloading the driver.
* Return nothing.
*/
-void scsih_remove(struct pci_dev *pdev)
+static void scsih_remove(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8210,7 +8200,7 @@ void scsih_remove(struct pci_dev *pdev)
*
* Return nothing.
*/
-void
+static void
scsih_shutdown(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8451,7 +8441,7 @@ _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
* of scanning the entire bus. In our implemention, we will kick off
* firmware discovery.
*/
-void
+static void
scsih_scan_start(struct Scsi_Host *shost)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8478,7 +8468,7 @@ scsih_scan_start(struct Scsi_Host *shost)
* scsi_host and the elapsed time of the scan in jiffies. In our implemention,
* we wait for firmware discovery to complete, then return 1.
*/
-int
+static int
scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8608,7 +8598,7 @@ static struct raid_function_template mpt3sas_raid_functions = {
* MPI25_VERSION for SAS 3.0 HBA devices, and
* MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
*/
-u16
+static u16
_scsih_determine_hba_mpi_version(struct pci_dev *pdev)
{
@@ -8660,7 +8650,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
*
* Returns 0 success, anything else error.
*/
-int
+static int
_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct MPT3SAS_ADAPTER *ioc;
@@ -8869,7 +8859,7 @@ out_add_shost_fail:
*
* Returns 0 success, anything else error.
*/
-int
+static int
scsih_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8896,7 +8886,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
*
* Returns 0 success, anything else error.
*/
-int
+static int
scsih_resume(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8916,7 +8906,7 @@ scsih_resume(struct pci_dev *pdev)
if (r)
return r;
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
+ mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
scsi_unblock_requests(shost);
mpt3sas_base_start_watchdog(ioc);
return 0;
@@ -8933,7 +8923,7 @@ scsih_resume(struct pci_dev *pdev)
* Return value:
* PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
*/
-pci_ers_result_t
+static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8970,7 +8960,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
* code after the PCI slot has been reset, just before we
* should resume normal operations.
*/
-pci_ers_result_t
+static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8987,8 +8977,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
if (rc)
return PCI_ERS_RESULT_DISCONNECT;
- rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
(rc == 0) ? "success" : "failed");
@@ -9007,7 +8996,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
* OK to resume normal operation. Use completion to allow
* halted scsi ops to resume.
*/
-void
+static void
scsih_pci_resume(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -9024,7 +9013,7 @@ scsih_pci_resume(struct pci_dev *pdev)
* scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
* @pdev: pointer to PCI device
*/
-pci_ers_result_t
+static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -9152,7 +9141,7 @@ static struct pci_driver mpt3sas_driver = {
*
* Returns 0 success, anything else error.
*/
-int
+static int
scsih_init(void)
{
mpt2_ids = 0;
@@ -9202,7 +9191,7 @@ scsih_init(void)
*
* Returns 0 success, anything else error.
*/
-void
+static void
scsih_exit(void)
{
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index ff93286bc32f..b74faf1a69b2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -300,7 +300,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
int rc;
u16 smid;
u32 ioc_state;
- unsigned long timeleft;
void *psge;
u8 issue_reset = 0;
void *data_out = NULL;
@@ -394,8 +393,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
ioc->name, (unsigned long long)sas_address));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
- 10*HZ);
+ wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -446,8 +444,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
issue_host_reset:
if (issue_reset)
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
@@ -1107,7 +1104,6 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
int rc;
u16 smid;
u32 ioc_state;
- unsigned long timeleft;
void *psge;
u8 issue_reset = 0;
void *data_out = NULL;
@@ -1203,8 +1199,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
phy->number));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
- 10*HZ);
+ wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -1253,8 +1248,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
issue_host_reset:
if (issue_reset)
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
@@ -1421,7 +1415,6 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
int rc;
u16 smid;
u32 ioc_state;
- unsigned long timeleft;
void *psge;
u8 issue_reset = 0;
void *data_out = NULL;
@@ -1522,8 +1515,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
phy->number, phy_operation));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
- 10*HZ);
+ wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -1564,8 +1556,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
issue_host_reset:
if (issue_reset)
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
@@ -1899,7 +1890,6 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
int rc;
u16 smid;
u32 ioc_state;
- unsigned long timeleft;
void *psge;
u8 issue_reset = 0;
dma_addr_t dma_addr_in = 0;
@@ -2043,8 +2033,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
- 10*HZ);
+ wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s : timeout\n",
@@ -2103,8 +2092,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
issue_host_reset:
if (issue_reset) {
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
rc = -ETIMEDOUT;
}
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 8bb06995adfb..b757d389e32f 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -136,7 +136,8 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
}
}
-void mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
+static void
+mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
{
void __iomem *regs = mvi->regs;
u32 tmp;
@@ -563,7 +564,7 @@ static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
return MVS_ID_NOT_MAPPED;
}
-void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+static void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
{
int i;
struct scatterlist *sg;
@@ -633,7 +634,7 @@ static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
mvs_write_port_vsr_data(mvi, i, tmp);
}
-void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+static void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
struct sas_phy_linkrates *rates)
{
u32 lrmin = 0, lrmax = 0;
@@ -668,20 +669,20 @@ static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
}
-u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
+static u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs_ex;
return ior32(SPI_DATA_REG_64XX);
}
-void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
+static void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
{
void __iomem *regs = mvi->regs_ex;
iow32(SPI_DATA_REG_64XX, data);
}
-int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
+static int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
u32 *dwCmd,
u8 cmd,
u8 read,
@@ -705,7 +706,7 @@ int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
}
-int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+static int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
{
void __iomem *regs = mvi->regs_ex;
int retry;
@@ -720,7 +721,7 @@ int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
return 0;
}
-int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+static int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
{
void __iomem *regs = mvi->regs_ex;
u32 i, dwTmp;
@@ -735,7 +736,7 @@ int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
return -1;
}
-void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
+static void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
int buf_len, int from, void *prd)
{
int i;
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index f6fc4a705924..4c57d9abce7b 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -48,8 +48,8 @@ static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
}
}
-void set_phy_tuning(struct mvs_info *mvi, int phy_id,
- struct phy_tuning phy_tuning)
+static void set_phy_tuning(struct mvs_info *mvi, int phy_id,
+ struct phy_tuning phy_tuning)
{
u32 tmp, setting_0 = 0, setting_1 = 0;
u8 i;
@@ -110,8 +110,8 @@ void set_phy_tuning(struct mvs_info *mvi, int phy_id,
}
}
-void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
- struct ffe_control ffe)
+static void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
+ struct ffe_control ffe)
{
u32 tmp;
@@ -177,7 +177,7 @@ void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
}
/*Notice: this function must be called when phy is disabled*/
-void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
+static void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
{
union reg_phy_cfg phy_cfg, phy_cfg_tmp;
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
@@ -679,7 +679,8 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
}
}
-void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
+static void
+mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
{
void __iomem *regs = mvi->regs;
u32 tmp;
@@ -906,8 +907,8 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
}
-void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
- struct sas_phy_linkrates *rates)
+static void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+ struct sas_phy_linkrates *rates)
{
u32 lrmax = 0;
u32 tmp;
@@ -936,25 +937,25 @@ static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
}
-u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
+static u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
return mr32(SPI_RD_DATA_REG_94XX);
}
-void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
+static void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
mw32(SPI_RD_DATA_REG_94XX, data);
}
-int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
- u32 *dwCmd,
- u8 cmd,
- u8 read,
- u8 length,
- u32 addr
+static int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
+ u32 *dwCmd,
+ u8 cmd,
+ u8 read,
+ u8 length,
+ u32 addr
)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
@@ -974,7 +975,7 @@ int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
}
-int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+static int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
@@ -982,7 +983,7 @@ int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
return 0;
}
-int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+static int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
u32 i, dwTmp;
@@ -997,8 +998,8 @@ int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
return -1;
}
-void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
- int buf_len, int from, void *prd)
+static void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
+ int buf_len, int from, void *prd)
{
int i;
struct mvs_prd *buf_prd = prd;
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 5b9fcff6cd94..86eb19902bac 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -74,7 +74,7 @@ void mvs_tag_init(struct mvs_info *mvi)
mvs_tag_clear(mvi, i);
}
-struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
+static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
{
unsigned long i = 0, j = 0, hi = 0;
struct sas_ha_struct *sha = dev->port->ha;
@@ -102,7 +102,7 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
}
-int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
+static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
{
unsigned long i = 0, j = 0, n = 0, num = 0;
struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
@@ -1158,7 +1158,7 @@ void mvs_port_deformed(struct asd_sas_phy *sas_phy)
mvs_port_notify_deformed(sas_phy, 1);
}
-struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
+static struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
{
u32 dev;
for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
@@ -1175,7 +1175,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
return NULL;
}
-void mvs_free_dev(struct mvs_device *mvi_dev)
+static void mvs_free_dev(struct mvs_device *mvi_dev)
{
u32 id = mvi_dev->device_id;
memset(mvi_dev, 0, sizeof(*mvi_dev));
@@ -1185,7 +1185,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev)
mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
}
-int mvs_dev_found_notify(struct domain_device *dev, int lock)
+static int mvs_dev_found_notify(struct domain_device *dev, int lock)
{
unsigned long flags = 0;
int res = 0;
@@ -1241,7 +1241,7 @@ int mvs_dev_found(struct domain_device *dev)
return mvs_dev_found_notify(dev, 1);
}
-void mvs_dev_gone_notify(struct domain_device *dev)
+static void mvs_dev_gone_notify(struct domain_device *dev)
{
unsigned long flags = 0;
struct mvs_device *mvi_dev = dev->lldd_dev;
@@ -1611,7 +1611,7 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
return stat;
}
-void mvs_set_sense(u8 *buffer, int len, int d_sense,
+static void mvs_set_sense(u8 *buffer, int len, int d_sense,
int key, int asc, int ascq)
{
memset(buffer, 0, len);
@@ -1650,7 +1650,7 @@ void mvs_set_sense(u8 *buffer, int len, int d_sense,
return;
}
-void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
+static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
u8 key, u8 asc, u8 asc_q)
{
iu->datapres = 2;
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
deleted file mode 100644
index 2f689ae7a803..000000000000
--- a/drivers/scsi/pas16.c
+++ /dev/null
@@ -1,565 +0,0 @@
-/*
- * This driver adapted from Drew Eckhardt's Trantor T128 driver
- *
- * Copyright 1993, Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 666-5836
- *
- * ( Based on T128 - DISTRIBUTION RELEASE 3. )
- *
- * Modified to work with the Pro Audio Spectrum/Studio 16
- * by John Weidman.
- *
- *
- * For more information, please consult
- *
- * Media Vision
- * (510) 770-8600
- * (800) 348-7116
- */
-
-/*
- * The card is detected and initialized in one of several ways :
- * 1. Autoprobe (default) - There are many different models of
- * the Pro Audio Spectrum/Studio 16, and I only have one of
- * them, so this may require a little tweaking. An interrupt
- * is triggered to autoprobe for the interrupt line. Note:
- * with the newer model boards, the interrupt is set via
- * software after reset using the default_irq for the
- * current board number.
- *
- * 2. With command line overrides - pas16=port,irq may be
- * used on the LILO command line to override the defaults.
- *
- * 3. With the PAS16_OVERRIDE compile time define. This is
- * specified as an array of address, irq tuples. Ie, for
- * one board at the default 0x388 address, IRQ10, I could say
- * -DPAS16_OVERRIDE={{0x388, 10}}
- * NOTE: Untested.
- *
- * 4. When included as a module, with arguments passed on the command line:
- * pas16_irq=xx the interrupt
- * pas16_addr=xx the port
- * e.g. "modprobe pas16 pas16_addr=0x388 pas16_irq=5"
- *
- * Note that if the override methods are used, place holders must
- * be specified for other boards in the system.
- *
- *
- * Configuration notes :
- * The current driver does not support interrupt sharing with the
- * sound portion of the card. If you use the same irq for the
- * scsi port and sound you will have problems. Either use
- * a different irq for the scsi port or don't use interrupts
- * for the scsi port.
- *
- * If you have problems with your card not being recognized, use
- * the LILO command line override. Try to get it recognized without
- * interrupts. Ie, for a board at the default 0x388 base port,
- * boot: linux pas16=0x388,0
- *
- * NO_IRQ (0) should be specified for no interrupt,
- * IRQ_AUTO (254) to autoprobe for an IRQ line if overridden
- * on the command line.
- */
-
-#include <linux/module.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-
-#include <scsi/scsi_host.h>
-#include "pas16.h"
-#include "NCR5380.h"
-
-
-static unsigned short pas16_addr;
-static int pas16_irq;
-
-
-static const int scsi_irq_translate[] =
- { 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 7, 8, 9, 0, 10, 11 };
-
-/* The default_irqs array contains values used to set the irq into the
- * board via software (as must be done on newer model boards without
- * irq jumpers on the board). The first value in the array will be
- * assigned to logical board 0, the next to board 1, etc.
- */
-static int default_irqs[] __initdata =
- { PAS16_DEFAULT_BOARD_1_IRQ,
- PAS16_DEFAULT_BOARD_2_IRQ,
- PAS16_DEFAULT_BOARD_3_IRQ,
- PAS16_DEFAULT_BOARD_4_IRQ
- };
-
-static struct override {
- unsigned short io_port;
- int irq;
-} overrides
-#ifdef PAS16_OVERRIDE
- [] __initdata = PAS16_OVERRIDE;
-#else
- [4] __initdata = {{0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO},
- {0,IRQ_AUTO}};
-#endif
-
-#define NO_OVERRIDES ARRAY_SIZE(overrides)
-
-static struct base {
- unsigned short io_port;
- int noauto;
-} bases[] __initdata =
- { {PAS16_DEFAULT_BASE_1, 0},
- {PAS16_DEFAULT_BASE_2, 0},
- {PAS16_DEFAULT_BASE_3, 0},
- {PAS16_DEFAULT_BASE_4, 0}
- };
-
-#define NO_BASES ARRAY_SIZE(bases)
-
-static const unsigned short pas16_offset[ 8 ] =
- {
- 0x1c00, /* OUTPUT_DATA_REG */
- 0x1c01, /* INITIATOR_COMMAND_REG */
- 0x1c02, /* MODE_REG */
- 0x1c03, /* TARGET_COMMAND_REG */
- 0x3c00, /* STATUS_REG ro, SELECT_ENABLE_REG wo */
- 0x3c01, /* BUS_AND_STATUS_REG ro, START_DMA_SEND_REG wo */
- 0x3c02, /* INPUT_DATA_REGISTER ro, (N/A on PAS16 ?)
- * START_DMA_TARGET_RECEIVE_REG wo
- */
- 0x3c03, /* RESET_PARITY_INTERRUPT_REG ro,
- * START_DMA_INITIATOR_RECEIVE_REG wo
- */
- };
-
-
-/*
- * Function : enable_board( int board_num, unsigned short port )
- *
- * Purpose : set address in new model board
- *
- * Inputs : board_num - logical board number 0-3, port - base address
- *
- */
-
-static void __init
- enable_board( int board_num, unsigned short port )
-{
- outb( 0xbc + board_num, MASTER_ADDRESS_PTR );
- outb( port >> 2, MASTER_ADDRESS_PTR );
-}
-
-
-
-/*
- * Function : init_board( unsigned short port, int irq )
- *
- * Purpose : Set the board up to handle the SCSI interface
- *
- * Inputs : port - base address of the board,
- * irq - irq to assign to the SCSI port
- * force_irq - set it even if it conflicts with sound driver
- *
- */
-
-static void __init
- init_board( unsigned short io_port, int irq, int force_irq )
-{
- unsigned int tmp;
- unsigned int pas_irq_code;
-
- /* Initialize the SCSI part of the board */
-
- outb( 0x30, io_port + P_TIMEOUT_COUNTER_REG ); /* Timeout counter */
- outb( 0x01, io_port + P_TIMEOUT_STATUS_REG_OFFSET ); /* Reset TC */
- outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */
-
- inb(io_port + pas16_offset[RESET_PARITY_INTERRUPT_REG]);
-
- /* Set the SCSI interrupt pointer without mucking up the sound
- * interrupt pointer in the same byte.
- */
- pas_irq_code = ( irq < 16 ) ? scsi_irq_translate[irq] : 0;
- tmp = inb( io_port + IO_CONFIG_3 );
-
- if( (( tmp & 0x0f ) == pas_irq_code) && pas_irq_code > 0
- && !force_irq )
- {
- printk( "pas16: WARNING: Can't use same irq as sound "
- "driver -- interrupts disabled\n" );
- /* Set up the drive parameters, disable 5380 interrupts */
- outb( 0x4d, io_port + SYS_CONFIG_4 );
- }
- else
- {
- tmp = ( tmp & 0x0f ) | ( pas_irq_code << 4 );
- outb( tmp, io_port + IO_CONFIG_3 );
-
- /* Set up the drive parameters and enable 5380 interrupts */
- outb( 0x6d, io_port + SYS_CONFIG_4 );
- }
-}
-
-
-/*
- * Function : pas16_hw_detect( unsigned short board_num )
- *
- * Purpose : determine if a pas16 board is present
- *
- * Inputs : board_num - logical board number ( 0 - 3 )
- *
- * Returns : 0 if board not found, 1 if found.
- */
-
-static int __init
- pas16_hw_detect( unsigned short board_num )
-{
- unsigned char board_rev, tmp;
- unsigned short io_port = bases[ board_num ].io_port;
-
- /* See if we can find a PAS16 board at the address associated
- * with this logical board number.
- */
-
- /* First, attempt to take a newer model board out of reset and
- * give it a base address. This shouldn't affect older boards.
- */
- enable_board( board_num, io_port );
-
- /* Now see if it looks like a PAS16 board */
- board_rev = inb( io_port + PCB_CONFIG );
-
- if( board_rev == 0xff )
- return 0;
-
- tmp = board_rev ^ 0xe0;
-
- outb( tmp, io_port + PCB_CONFIG );
- tmp = inb( io_port + PCB_CONFIG );
- outb( board_rev, io_port + PCB_CONFIG );
-
- if( board_rev != tmp ) /* Not a PAS-16 */
- return 0;
-
- if( ( inb( io_port + OPERATION_MODE_1 ) & 0x03 ) != 0x03 )
- return 0; /* return if no SCSI interface found */
-
- /* Mediavision has some new model boards that return ID bits
- * that indicate a SCSI interface, but they're not (LMS). We'll
- * put in an additional test to try to weed them out.
- */
-
- outb(0x01, io_port + WAIT_STATE); /* 1 Wait state */
- outb(0x20, io_port + pas16_offset[MODE_REG]); /* Is it really SCSI? */
- if (inb(io_port + pas16_offset[MODE_REG]) != 0x20) /* Write to a reg. */
- return 0; /* and try to read */
- outb(0x00, io_port + pas16_offset[MODE_REG]); /* it back. */
- if (inb(io_port + pas16_offset[MODE_REG]) != 0x00)
- return 0;
-
- return 1;
-}
-
-
-#ifndef MODULE
-/*
- * Function : pas16_setup(char *str, int *ints)
- *
- * Purpose : LILO command line initialization of the overrides array,
- *
- * Inputs : str - unused, ints - array of integer parameters with ints[0]
- * equal to the number of ints.
- *
- */
-
-static int __init pas16_setup(char *str)
-{
- static int commandline_current;
- int i;
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- if (ints[0] != 2)
- printk("pas16_setup : usage pas16=io_port,irq\n");
- else
- if (commandline_current < NO_OVERRIDES) {
- overrides[commandline_current].io_port = (unsigned short) ints[1];
- overrides[commandline_current].irq = ints[2];
- for (i = 0; i < NO_BASES; ++i)
- if (bases[i].io_port == (unsigned short) ints[1]) {
- bases[i].noauto = 1;
- break;
- }
- ++commandline_current;
- }
- return 1;
-}
-
-__setup("pas16=", pas16_setup);
-#endif
-
-/*
- * Function : int pas16_detect(struct scsi_host_template * tpnt)
- *
- * Purpose : detects and initializes PAS16 controllers
- * that were autoprobed, overridden on the LILO command line,
- * or specified at compile time.
- *
- * Inputs : tpnt - template for this SCSI adapter.
- *
- * Returns : 1 if a host adapter was found, 0 if not.
- *
- */
-
-static int __init pas16_detect(struct scsi_host_template *tpnt)
-{
- static int current_override;
- static unsigned short current_base;
- struct Scsi_Host *instance;
- unsigned short io_port;
- int count;
-
- if (pas16_addr != 0) {
- overrides[0].io_port = pas16_addr;
- /*
- * This is how we avoid seeing more than
- * one host adapter at the same I/O port.
- * Cribbed shamelessly from pas16_setup().
- */
- for (count = 0; count < NO_BASES; ++count)
- if (bases[count].io_port == pas16_addr) {
- bases[count].noauto = 1;
- break;
- }
- }
- if (pas16_irq != 0)
- overrides[0].irq = pas16_irq;
-
- for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
- io_port = 0;
-
- if (overrides[current_override].io_port)
- {
- io_port = overrides[current_override].io_port;
- enable_board( current_override, io_port );
- init_board( io_port, overrides[current_override].irq, 1 );
- }
- else
- for (; !io_port && (current_base < NO_BASES); ++current_base) {
- dprintk(NDEBUG_INIT, "pas16: probing io_port 0x%04x\n",
- (unsigned int)bases[current_base].io_port);
- if ( !bases[current_base].noauto &&
- pas16_hw_detect( current_base ) ){
- io_port = bases[current_base].io_port;
- init_board( io_port, default_irqs[ current_base ], 0 );
- dprintk(NDEBUG_INIT, "pas16: detected board\n");
- }
- }
-
- dprintk(NDEBUG_INIT, "pas16: io_port = 0x%04x\n",
- (unsigned int)io_port);
-
- if (!io_port)
- break;
-
- instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
- if(instance == NULL)
- goto out;
-
- instance->io_port = io_port;
-
- if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP))
- goto out_unregister;
-
- NCR5380_maybe_reset_bus(instance);
-
- if (overrides[current_override].irq != IRQ_AUTO)
- instance->irq = overrides[current_override].irq;
- else
- instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
-
- /* Compatibility with documented NCR5380 kernel parameters */
- if (instance->irq == 255)
- instance->irq = NO_IRQ;
-
- if (instance->irq != NO_IRQ)
- if (request_irq(instance->irq, pas16_intr, 0,
- "pas16", instance)) {
- printk("scsi%d : IRQ%d not free, interrupts disabled\n",
- instance->host_no, instance->irq);
- instance->irq = NO_IRQ;
- }
-
- if (instance->irq == NO_IRQ) {
- printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
- printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
- /* Disable 5380 interrupts, leave drive params the same */
- outb( 0x4d, io_port + SYS_CONFIG_4 );
- outb( (inb(io_port + IO_CONFIG_3) & 0x0f), io_port + IO_CONFIG_3 );
- }
-
- dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n",
- instance->host_no, instance->irq);
-
- ++current_override;
- ++count;
- }
- return count;
-
-out_unregister:
- scsi_unregister(instance);
-out:
- return count;
-}
-
-/*
- * Function : int pas16_biosparam(Disk *disk, struct block_device *dev, int *ip)
- *
- * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
- * the specified device / size.
- *
- * Inputs : size = size of device in sectors (512 bytes), dev = block device
- * major / minor, ip[] = {heads, sectors, cylinders}
- *
- * Returns : always 0 (success), initializes ip
- *
- */
-
-/*
- * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
- * using hard disks on a trantor should verify that this mapping corresponds
- * to that used by the BIOS / ASPI driver by running the linux fdisk program
- * and matching the H_C_S coordinates to what DOS uses.
- */
-
-static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev,
- sector_t capacity, int *ip)
-{
- int size = capacity;
- ip[0] = 64;
- ip[1] = 32;
- ip[2] = size >> 11; /* I think I have it as /(32*64) */
- if( ip[2] > 1024 ) { /* yes, >, not >= */
- ip[0]=255;
- ip[1]=63;
- ip[2]=size/(63*255);
- if( ip[2] > 1023 ) /* yes >1023... */
- ip[2] = 1023;
- }
-
- return 0;
-}
-
-/*
- * Function : int pas16_pread (struct Scsi_Host *instance,
- * unsigned char *dst, int len)
- *
- * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
- * dst
- *
- * Inputs : dst = destination, len = length in bytes
- *
- * Returns : 0 on success, non zero on a failure such as a watchdog
- * timeout.
- */
-
-static inline int pas16_pread(struct Scsi_Host *instance,
- unsigned char *dst, int len)
-{
- register unsigned char *d = dst;
- register unsigned short reg = (unsigned short) (instance->io_port +
- P_DATA_REG_OFFSET);
- register int i = len;
- int ii = 0;
-
- while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) )
- ++ii;
-
- insb( reg, d, i );
-
- if ( inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
- outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
- printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
- instance->host_no);
- return -1;
- }
- return 0;
-}
-
-/*
- * Function : int pas16_pwrite (struct Scsi_Host *instance,
- * unsigned char *src, int len)
- *
- * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
- * src
- *
- * Inputs : src = source, len = length in bytes
- *
- * Returns : 0 on success, non zero on a failure such as a watchdog
- * timeout.
- */
-
-static inline int pas16_pwrite(struct Scsi_Host *instance,
- unsigned char *src, int len)
-{
- register unsigned char *s = src;
- register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET);
- register int i = len;
- int ii = 0;
-
- while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) )
- ++ii;
-
- outsb( reg, s, i );
-
- if (inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
- outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
- printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
- instance->host_no);
- return -1;
- }
- return 0;
-}
-
-#include "NCR5380.c"
-
-static int pas16_release(struct Scsi_Host *shost)
-{
- if (shost->irq != NO_IRQ)
- free_irq(shost->irq, shost);
- NCR5380_exit(shost);
- scsi_unregister(shost);
- return 0;
-}
-
-static struct scsi_host_template driver_template = {
- .name = "Pro Audio Spectrum-16 SCSI",
- .detect = pas16_detect,
- .release = pas16_release,
- .proc_name = "pas16",
- .info = pas16_info,
- .queuecommand = pas16_queue_command,
- .eh_abort_handler = pas16_abort,
- .eh_bus_reset_handler = pas16_bus_reset,
- .bios_param = pas16_biosparam,
- .can_queue = 32,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
- .cmd_size = NCR5380_CMD_SIZE,
- .max_sectors = 128,
-};
-#include "scsi_module.c"
-
-#ifdef MODULE
-module_param(pas16_addr, ushort, 0);
-module_param(pas16_irq, int, 0);
-#endif
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h
deleted file mode 100644
index 9fe7f33660b4..000000000000
--- a/drivers/scsi/pas16.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * This driver adapted from Drew Eckhardt's Trantor T128 driver
- *
- * Copyright 1993, Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 666-5836
- *
- * ( Based on T128 - DISTRIBUTION RELEASE 3. )
- *
- * Modified to work with the Pro Audio Spectrum/Studio 16
- * by John Weidman.
- *
- *
- * For more information, please consult
- *
- * Media Vision
- * (510) 770-8600
- * (800) 348-7116
- */
-
-
-#ifndef PAS16_H
-#define PAS16_H
-
-#define PAS16_DEFAULT_BASE_1 0x388
-#define PAS16_DEFAULT_BASE_2 0x384
-#define PAS16_DEFAULT_BASE_3 0x38c
-#define PAS16_DEFAULT_BASE_4 0x288
-
-#define PAS16_DEFAULT_BOARD_1_IRQ 10
-#define PAS16_DEFAULT_BOARD_2_IRQ 12
-#define PAS16_DEFAULT_BOARD_3_IRQ 14
-#define PAS16_DEFAULT_BOARD_4_IRQ 15
-
-
-/*
- * The Pro Audio Spectrum boards are I/O mapped. They use a Zilog 5380
- * SCSI controller, which is the equivalent of NCR's 5380. "Pseudo-DMA"
- * architecture is used, where a PAL drives the DMA signals on the 5380
- * allowing fast, blind transfers with proper handshaking.
- */
-
-
-/* The Time-out Counter register is used to safe-guard against a stuck
- * bus (in the case of RDY driven handshake) or a stuck byte (if 16-Bit
- * DMA conversion is used). The counter uses a 28.224MHz clock
- * divided by 14 as its clock source. In the case of a stuck byte in
- * the holding register, an interrupt is generated (and mixed with the
- * one with the drive) using the CD-ROM interrupt pointer.
- */
-
-#define P_TIMEOUT_COUNTER_REG 0x4000
-#define P_TC_DISABLE 0x80 /* Set to 0 to enable timeout int. */
- /* Bits D6-D0 contain timeout count */
-
-
-#define P_TIMEOUT_STATUS_REG_OFFSET 0x4001
-#define P_TS_TIM 0x80 /* check timeout status */
- /* Bits D6-D4 N/U */
-#define P_TS_ARM_DRQ_INT 0x08 /* Arm DRQ Int. When set high,
- * the next rising edge will
- * cause a CD-ROM interrupt.
- * When set low, the interrupt
- * will be cleared. There is
- * no status available for
- * this interrupt.
- */
-#define P_TS_ENABLE_TO_ERR_INTERRUPT /* Enable timeout error int. */
-#define P_TS_ENABLE_WAIT /* Enable Wait */
-
-#define P_TS_CT 0x01 /* clear timeout. Note: writing
- * to this register clears the
- * timeout error int. or status
- */
-
-
-/*
- * The data register reads/writes to/from the 5380 in pseudo-DMA mode
- */
-
-#define P_DATA_REG_OFFSET 0x5c00 /* rw */
-
-#define P_STATUS_REG_OFFSET 0x5c01 /* ro */
-#define P_ST_RDY 0x80 /* 5380 DDRQ Status */
-
-#define P_IRQ_STATUS 0x5c03
-#define P_IS_IRQ 0x80 /* DIRQ status */
-
-#define PCB_CONFIG 0x803
-#define MASTER_ADDRESS_PTR 0x9a01 /* Fixed position - no relo */
-#define SYS_CONFIG_4 0x8003
-#define WAIT_STATE 0xbc00
-#define OPERATION_MODE_1 0xec03
-#define IO_CONFIG_3 0xf002
-
-#define NCR5380_implementation_fields /* none */
-
-#define PAS16_io_port(reg) (instance->io_port + pas16_offset[(reg)])
-
-#define NCR5380_read(reg) ( inb(PAS16_io_port(reg)) )
-#define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) )
-
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
-#define NCR5380_dma_recv_setup pas16_pread
-#define NCR5380_dma_send_setup pas16_pwrite
-#define NCR5380_dma_residual(instance) (0)
-
-#define NCR5380_intr pas16_intr
-#define NCR5380_queue_command pas16_queue_command
-#define NCR5380_abort pas16_abort
-#define NCR5380_bus_reset pas16_bus_reset
-#define NCR5380_info pas16_info
-
-/* 15 14 12 10 7 5 3
- 1101 0100 1010 1000 */
-
-#define PAS16_IRQS 0xd4a8
-
-#endif /* PAS16_H */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 04e67a190652..10546faac58c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4492,8 +4492,8 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
* @num: the inbound queue number
* @phy_id: the phy id which we wanted to start up.
*/
-int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
- u8 phy_id)
+static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+ u8 phy_id)
{
struct phy_stop_req payload;
struct inbound_queue_table *circularQ;
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index dc33dfa8f994..ce584c31d36e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -527,7 +527,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
* pm8001_alloc_dev - find a empty pm8001_device
* @pm8001_ha: our hba card information
*/
-struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
+static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
{
u32 dev;
for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index b2a88200fe54..68a5c347fae9 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -306,7 +306,7 @@ static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
* Return Value
* None
*/
-void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
+static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
{
struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
@@ -401,7 +401,7 @@ static struct pmcraid_cmd *pmcraid_get_free_cmd(
* Return Value:
* nothing
*/
-void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
+static void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
@@ -1710,7 +1710,7 @@ static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
* @ioasc: ioasc code
* @cmd: pointer to command that resulted in 'ioasc'
*/
-void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
+static void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
{
struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
@@ -3137,7 +3137,7 @@ static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
* returns pointer pmcraid_ioadl_desc, initialized to point to internal
* or external IOADLs
*/
-struct pmcraid_ioadl_desc *
+static struct pmcraid_ioadl_desc *
pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
{
struct pmcraid_ioadl_desc *ioadl;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ae4a74756128..73b12e41d992 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -279,16 +279,6 @@ struct req_que;
struct qla_tgt_sess;
/*
- * (sd.h is not exported, hence local inclusion)
- * Data Integrity Field tuple.
- */
-struct sd_dif_tuple {
- __be16 guard_tag; /* Checksum */
- __be16 app_tag; /* Opaque storage */
- __be32 ref_tag; /* Target LBA or indirect LBA */
-};
-
-/*
* SCSI Request Block
*/
struct srb_cmd {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 987f1c729e9c..068c4e47fac9 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1828,7 +1828,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
if (scsi_prot_sg_count(cmd)) {
uint32_t i, j = 0, k = 0, num_ent;
struct scatterlist *sg;
- struct sd_dif_tuple *spt;
+ struct t10_pi_tuple *spt;
/* Patch the corresponding protection tags */
scsi_for_each_prot_sg(cmd, sg,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2674f4c16bc3..ace65db1d2a2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -899,12 +899,12 @@ qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- while (((qla2x00_reset_active(vha)) || ha->dpc_active ||
- ha->flags.mbox_busy) ||
- test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
- test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- break;
+ while ((qla2x00_reset_active(vha) || ha->dpc_active ||
+ ha->flags.mbox_busy) ||
+ test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
+ test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ break;
msleep(1000);
}
}
@@ -4694,7 +4694,7 @@ retry_unlock:
qla83xx_wait_logic();
retry++;
ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
- "Failed to release IDC lock, retyring=%d\n", retry);
+ "Failed to release IDC lock, retrying=%d\n", retry);
goto retry_unlock;
}
} else if (retry < 10) {
@@ -4702,7 +4702,7 @@ retry_unlock:
qla83xx_wait_logic();
retry++;
ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
- "Failed to read drv-lockid, retyring=%d\n", retry);
+ "Failed to read drv-lockid, retrying=%d\n", retry);
goto retry_unlock;
}
@@ -4718,7 +4718,7 @@ retry_unlock2:
qla83xx_wait_logic();
retry++;
ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
- "Failed to release IDC lock, retyring=%d\n", retry);
+ "Failed to release IDC lock, retrying=%d\n", retry);
goto retry_unlock2;
}
}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index ae87d6c19f17..06ddd13cb7cc 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1843,7 +1843,7 @@ static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
return rval;
}
-uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
+static uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
uint32_t addr3, uint32_t mask, uint32_t addr,
uint32_t *data_ptr)
{
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 6a219a0844d3..c905709707f0 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -42,6 +42,7 @@
#include <linux/atomic.h>
#include <linux/hrtimer.h>
#include <linux/uuid.h>
+#include <linux/t10-pi.h>
#include <net/checksum.h>
@@ -627,7 +628,7 @@ static LIST_HEAD(sdebug_host_list);
static DEFINE_SPINLOCK(sdebug_host_list_lock);
static unsigned char *fake_storep; /* ramdisk storage */
-static struct sd_dif_tuple *dif_storep; /* protection info */
+static struct t10_pi_tuple *dif_storep; /* protection info */
static void *map_storep; /* provisioning map */
static unsigned long map_size;
@@ -682,7 +683,7 @@ static void *fake_store(unsigned long long lba)
return fake_storep + lba * sdebug_sector_size;
}
-static struct sd_dif_tuple *dif_store(sector_t sector)
+static struct t10_pi_tuple *dif_store(sector_t sector)
{
sector = sector_div(sector, sdebug_store_sectors);
@@ -1349,7 +1350,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
} else if (0x86 == cmd[2]) { /* extended inquiry */
arr[1] = cmd[2]; /*sanity */
arr[3] = 0x3c; /* number of following entries */
- if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
+ if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
arr[4] = 0x4; /* SPT: GRD_CHK:1 */
else if (have_dif_prot)
arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
@@ -2430,7 +2431,7 @@ static __be16 dif_compute_csum(const void *buf, int len)
return csum;
}
-static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
+static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
sector_t sector, u32 ei_lba)
{
__be16 csum = dif_compute_csum(data, sdebug_sector_size);
@@ -2442,13 +2443,13 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
be16_to_cpu(csum));
return 0x01;
}
- if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
+ if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
pr_err("REF check failed on sector %lu\n",
(unsigned long)sector);
return 0x03;
}
- if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
+ if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
be32_to_cpu(sdt->ref_tag) != ei_lba) {
pr_err("REF check failed on sector %lu\n",
(unsigned long)sector);
@@ -2504,7 +2505,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
unsigned int i;
- struct sd_dif_tuple *sdt;
+ struct t10_pi_tuple *sdt;
sector_t sector;
for (i = 0; i < sectors; i++, ei_lba++) {
@@ -2580,13 +2581,13 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
break;
}
if (unlikely(have_dif_prot && check_prot)) {
- if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
+ if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
(cmd[1] & 0xe0)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
- sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
+ if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
+ sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
(cmd[1] & 0xe0) == 0)
sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
"to DIF device\n");
@@ -2696,7 +2697,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
int ret;
- struct sd_dif_tuple *sdt;
+ struct t10_pi_tuple *sdt;
void *daddr;
sector_t sector = start_sec;
int ppage_offset;
@@ -2722,7 +2723,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
}
for (ppage_offset = 0; ppage_offset < piter.length;
- ppage_offset += sizeof(struct sd_dif_tuple)) {
+ ppage_offset += sizeof(struct t10_pi_tuple)) {
/* If we're at the end of the current
* data page advance to the next one
*/
@@ -2893,13 +2894,13 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
break;
}
if (unlikely(have_dif_prot && check_prot)) {
- if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
+ if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
(cmd[1] & 0xe0)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
- sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
+ if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
+ sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
(cmd[1] & 0xe0) == 0)
sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
"to DIF device\n");
@@ -3135,13 +3136,13 @@ static int resp_comp_write(struct scsi_cmnd *scp,
num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
if (0 == num)
return 0; /* degenerate case, not an error */
- if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
+ if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
(cmd[1] & 0xe0)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
- sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
+ if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
+ sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
(cmd[1] & 0xe0) == 0)
sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
"to DIF device\n");
@@ -4939,12 +4940,11 @@ static int __init scsi_debug_init(void)
}
switch (sdebug_dif) {
-
- case SD_DIF_TYPE0_PROTECTION:
+ case T10_PI_TYPE0_PROTECTION:
break;
- case SD_DIF_TYPE1_PROTECTION:
- case SD_DIF_TYPE2_PROTECTION:
- case SD_DIF_TYPE3_PROTECTION:
+ case T10_PI_TYPE1_PROTECTION:
+ case T10_PI_TYPE2_PROTECTION:
+ case T10_PI_TYPE3_PROTECTION:
have_dif_prot = true;
break;
@@ -5026,7 +5026,7 @@ static int __init scsi_debug_init(void)
if (sdebug_dix) {
int dif_size;
- dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
+ dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
dif_storep = vmalloc(dif_size);
pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
@@ -5480,19 +5480,19 @@ static int sdebug_driver_probe(struct device * dev)
switch (sdebug_dif) {
- case SD_DIF_TYPE1_PROTECTION:
+ case T10_PI_TYPE1_PROTECTION:
hprot = SHOST_DIF_TYPE1_PROTECTION;
if (sdebug_dix)
hprot |= SHOST_DIX_TYPE1_PROTECTION;
break;
- case SD_DIF_TYPE2_PROTECTION:
+ case T10_PI_TYPE2_PROTECTION:
hprot = SHOST_DIF_TYPE2_PROTECTION;
if (sdebug_dix)
hprot |= SHOST_DIX_TYPE2_PROTECTION;
break;
- case SD_DIF_TYPE3_PROTECTION:
+ case T10_PI_TYPE3_PROTECTION:
hprot = SHOST_DIF_TYPE3_PROTECTION;
if (sdebug_dix)
hprot |= SHOST_DIX_TYPE3_PROTECTION;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c71344aebdbb..2cca9cffc63f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2077,7 +2077,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
}
static struct blk_mq_ops scsi_mq_ops = {
- .map_queue = blk_mq_map_queue,
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 85c8a51bc563..193636a59adf 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -86,12 +86,14 @@ extern void scsi_device_unbusy(struct scsi_device *sdev);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
+extern void scsi_requeue_run_queue(struct work_struct *work);
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
extern int scsi_init_queue(void);
extern void scsi_exit_queue(void);
+extern void scsi_evt_thread(struct work_struct *work);
struct request_queue;
struct request;
extern struct kmem_cache *scsi_sdb_cache;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e0a78f53d809..212e98d940bc 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -217,8 +217,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
struct scsi_device *sdev;
int display_failure_msg = 1, ret;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
- extern void scsi_evt_thread(struct work_struct *work);
- extern void scsi_requeue_run_queue(struct work_struct *work);
sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
GFP_ATOMIC);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d3e852ad5aa3..51e56296f465 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -52,6 +52,7 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pr.h>
+#include <linux/t10-pi.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
@@ -314,7 +315,7 @@ protection_type_store(struct device *dev, struct device_attribute *attr,
if (err)
return err;
- if (val >= 0 && val <= SD_DIF_TYPE3_PROTECTION)
+ if (val >= 0 && val <= T10_PI_TYPE3_PROTECTION)
sdkp->protection_type = val;
return count;
@@ -332,7 +333,7 @@ protection_mode_show(struct device *dev, struct device_attribute *attr,
dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
- if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) {
+ if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
dif = 0;
dix = 1;
}
@@ -608,7 +609,7 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
}
- if (dif != SD_DIF_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
+ if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
@@ -1031,7 +1032,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
else
protect = 0;
- if (protect && sdkp->protection_type == SD_DIF_TYPE2_PROTECTION) {
+ if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
if (unlikely(SCpnt->cmnd == NULL)) {
@@ -1997,7 +1998,7 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
- if (type > SD_DIF_TYPE3_PROTECTION)
+ if (type > T10_PI_TYPE3_PROTECTION)
ret = -ENODEV;
else if (scsi_host_dif_capable(sdp->host, type))
ret = 1;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 765a6f1ac1b7..c8d986368da9 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -157,27 +157,6 @@ static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t b
}
/*
- * A DIF-capable target device can be formatted with different
- * protection schemes. Currently 0 through 3 are defined:
- *
- * Type 0 is regular (unprotected) I/O
- *
- * Type 1 defines the contents of the guard and reference tags
- *
- * Type 2 defines the contents of the guard and reference tags and
- * uses 32-byte commands to seed the latter
- *
- * Type 3 defines the contents of the guard tag only
- */
-
-enum sd_dif_target_protection_types {
- SD_DIF_TYPE0_PROTECTION = 0x0,
- SD_DIF_TYPE1_PROTECTION = 0x1,
- SD_DIF_TYPE2_PROTECTION = 0x2,
- SD_DIF_TYPE3_PROTECTION = 0x3,
-};
-
-/*
* Look up the DIX operation based on whether the command is read or
* write and whether dix and dif are enabled.
*/
@@ -239,15 +218,6 @@ static inline unsigned int sd_prot_flag_mask(unsigned int prot_op)
return flag_mask[prot_op];
}
-/*
- * Data Integrity Field tuple.
- */
-struct sd_dif_tuple {
- __be16 guard_tag; /* Checksum */
- __be16 app_tag; /* Opaque storage */
- __be32 ref_tag; /* Target LBA or indirect LBA */
-};
-
#ifdef CONFIG_BLK_DEV_INTEGRITY
extern void sd_dif_config_host(struct scsi_disk *);
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 987bf392c336..9035380c0dda 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -60,14 +60,14 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
/* Enable DMA of protection information */
if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
- if (type == SD_DIF_TYPE3_PROTECTION)
+ if (type == T10_PI_TYPE3_PROTECTION)
bi.profile = &t10_pi_type3_ip;
else
bi.profile = &t10_pi_type1_ip;
bi.flags |= BLK_INTEGRITY_IP_CHECKSUM;
} else
- if (type == SD_DIF_TYPE3_PROTECTION)
+ if (type == T10_PI_TYPE3_PROTECTION)
bi.profile = &t10_pi_type3_crc;
else
bi.profile = &t10_pi_type1_crc;
@@ -82,7 +82,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
if (!sdkp->ATO)
goto out;
- if (type == SD_DIF_TYPE3_PROTECTION)
+ if (type == T10_PI_TYPE3_PROTECTION)
bi.tag_size = sizeof(u16) + sizeof(u32);
else
bi.tag_size = sizeof(u16);
@@ -121,7 +121,7 @@ void sd_dif_prepare(struct scsi_cmnd *scmd)
sdkp = scsi_disk(scmd->request->rq_disk);
- if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
+ if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION)
return;
phys = scsi_prot_ref_tag(scmd);
@@ -172,7 +172,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
sdkp = scsi_disk(scmd->request->rq_disk);
- if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
+ if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION || good_bytes == 0)
return;
intervals = good_bytes / scsi_prot_interval(scmd);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ae7d9bdf409c..070332eb41f3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -79,18 +79,7 @@ static void sg_proc_cleanup(void);
*/
#define SG_MAX_CDB_SIZE 252
-/*
- * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
- * Then when using 32 bit integers x * m may overflow during the calculation.
- * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
- * calculates the same, but prevents the overflow when both m and d
- * are "small" numbers (like HZ and USER_HZ).
- * Of course an overflow is inavoidable if the result of muldiv doesn't fit
- * in 32 bits.
- */
-#define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
-
-#define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
+#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
int sg_big_buff = SG_DEF_RESERVED_SIZE;
/* N.B. This variable is readable and writeable via
@@ -884,10 +873,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return result;
if (val < 0)
return -EIO;
- if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
- val = MULDIV (INT_MAX, USER_HZ, HZ);
+ if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ))
+ val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ),
+ INT_MAX);
sfp->timeout_user = val;
- sfp->timeout = MULDIV (val, HZ, USER_HZ);
+ sfp->timeout = mult_frac(val, HZ, USER_HZ);
return 0;
case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
new file mode 100644
index 000000000000..97e159c2cecd
--- /dev/null
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -0,0 +1,54 @@
+#
+# Kernel configuration file for the SMARTPQI
+#
+# Copyright (c) 2016 Microsemi Corporation
+# Copyright (c) 2016 PMC-Sierra, Inc.
+# (mailto:esc.storagedev@microsemi.com)
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+config SCSI_SMARTPQI
+ tristate "Microsemi PQI Driver"
+ depends on PCI && SCSI && !S390
+ select SCSI_SAS_ATTRS
+ select RAID_ATTRS
+ ---help---
+ This driver supports Microsemi PQI controllers.
+
+ <http://www.microsemi.com>
+
+ To compile this driver as a module, choose M here: the
+ module will be called smartpqi.
+
+ Note: the aacraid driver will not manage a smartpqi
+ controller. You need to enable smartpqi for smartpqi
+ controllers. For more information, please see
+ Documentation/scsi/smartpqi.txt
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
new file mode 100644
index 000000000000..0f42a225a664
--- /dev/null
+++ b/drivers/scsi/smartpqi/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -I.
+obj-m += smartpqi.o
+smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
new file mode 100644
index 000000000000..07b6444d3e0a
--- /dev/null
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -0,0 +1,1136 @@
+/*
+ * driver for Microsemi PQI-based storage controllers
+ * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#if !defined(_SMARTPQI_H)
+#define _SMARTPQI_H
+
+#pragma pack(1)
+
+#define PQI_DEVICE_SIGNATURE "PQI DREG"
+
+/* This structure is defined by the PQI specification. */
+struct pqi_device_registers {
+ __le64 signature;
+ u8 function_and_status_code;
+ u8 reserved[7];
+ u8 max_admin_iq_elements;
+ u8 max_admin_oq_elements;
+ u8 admin_iq_element_length; /* in 16-byte units */
+ u8 admin_oq_element_length; /* in 16-byte units */
+ __le16 max_reset_timeout; /* in 100-millisecond units */
+ u8 reserved1[2];
+ __le32 legacy_intx_status;
+ __le32 legacy_intx_mask_set;
+ __le32 legacy_intx_mask_clear;
+ u8 reserved2[28];
+ __le32 device_status;
+ u8 reserved3[4];
+ __le64 admin_iq_pi_offset;
+ __le64 admin_oq_ci_offset;
+ __le64 admin_iq_element_array_addr;
+ __le64 admin_oq_element_array_addr;
+ __le64 admin_iq_ci_addr;
+ __le64 admin_oq_pi_addr;
+ u8 admin_iq_num_elements;
+ u8 admin_oq_num_elements;
+ __le16 admin_queue_int_msg_num;
+ u8 reserved4[4];
+ __le32 device_error;
+ u8 reserved5[4];
+ __le64 error_details;
+ __le32 device_reset;
+ __le32 power_action;
+ u8 reserved6[104];
+};
+
+/*
+ * controller registers
+ *
+ * These are defined by the PMC implementation.
+ *
+ * Some registers (those named sis_*) are only used when in
+ * legacy SIS mode before we transition the controller into
+ * PQI mode. There are a number of other SIS mode registers,
+ * but we don't use them, so only the SIS registers that we
+ * care about are defined here. The offsets mentioned in the
+ * comments are the offsets from the PCIe BAR 0.
+ */
+struct pqi_ctrl_registers {
+ u8 reserved[0x20];
+ __le32 sis_host_to_ctrl_doorbell; /* 20h */
+ u8 reserved1[0x34 - (0x20 + sizeof(__le32))];
+ __le32 sis_interrupt_mask; /* 34h */
+ u8 reserved2[0x9c - (0x34 + sizeof(__le32))];
+ __le32 sis_ctrl_to_host_doorbell; /* 9Ch */
+ u8 reserved3[0xa0 - (0x9c + sizeof(__le32))];
+ __le32 sis_ctrl_to_host_doorbell_clear; /* A0h */
+ u8 reserved4[0xb0 - (0xa0 + sizeof(__le32))];
+ __le32 sis_driver_scratch; /* B0h */
+ u8 reserved5[0xbc - (0xb0 + sizeof(__le32))];
+ __le32 sis_firmware_status; /* BCh */
+ u8 reserved6[0x1000 - (0xbc + sizeof(__le32))];
+ __le32 sis_mailbox[8]; /* 1000h */
+ u8 reserved7[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
+ /*
+ * The PQI spec states that the PQI registers should be at
+ * offset 0 from the PCIe BAR 0. However, we can't map
+ * them at offset 0 because that would break compatibility
+ * with the SIS registers. So we map them at offset 4000h.
+ */
+ struct pqi_device_registers pqi_registers; /* 4000h */
+};
+
+#define PQI_DEVICE_REGISTERS_OFFSET 0x4000
+
+enum pqi_io_path {
+ RAID_PATH = 0,
+ AIO_PATH = 1
+};
+
+struct pqi_sg_descriptor {
+ __le64 address;
+ __le32 length;
+ __le32 flags;
+};
+
+/* manifest constants for the flags field of pqi_sg_descriptor */
+#define CISS_SG_LAST 0x40000000
+#define CISS_SG_CHAIN 0x80000000
+
+struct pqi_iu_header {
+ u8 iu_type;
+ u8 reserved;
+ __le16 iu_length; /* in bytes - does not include the length */
+ /* of this header */
+ __le16 response_queue_id; /* specifies the OQ where the */
+ /* response IU is to be delivered */
+ u8 work_area[2]; /* reserved for driver use */
+};
+
+/*
+ * According to the PQI spec, the IU header is only the first 4 bytes of our
+ * pqi_iu_header structure.
+ */
+#define PQI_REQUEST_HEADER_LENGTH 4
+
+struct pqi_general_admin_request {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ u8 function_code;
+ union {
+ struct {
+ u8 reserved[33];
+ __le32 buffer_length;
+ struct pqi_sg_descriptor sg_descriptor;
+ } report_device_capability;
+
+ struct {
+ u8 reserved;
+ __le16 queue_id;
+ u8 reserved1[2];
+ __le64 element_array_addr;
+ __le64 ci_addr;
+ __le16 num_elements;
+ __le16 element_length;
+ u8 queue_protocol;
+ u8 reserved2[23];
+ __le32 vendor_specific;
+ } create_operational_iq;
+
+ struct {
+ u8 reserved;
+ __le16 queue_id;
+ u8 reserved1[2];
+ __le64 element_array_addr;
+ __le64 pi_addr;
+ __le16 num_elements;
+ __le16 element_length;
+ u8 queue_protocol;
+ u8 reserved2[3];
+ __le16 int_msg_num;
+ __le16 coalescing_count;
+ __le32 min_coalescing_time;
+ __le32 max_coalescing_time;
+ u8 reserved3[8];
+ __le32 vendor_specific;
+ } create_operational_oq;
+
+ struct {
+ u8 reserved;
+ __le16 queue_id;
+ u8 reserved1[50];
+ } delete_operational_queue;
+
+ struct {
+ u8 reserved;
+ __le16 queue_id;
+ u8 reserved1[46];
+ __le32 vendor_specific;
+ } change_operational_iq_properties;
+
+ } data;
+};
+
+struct pqi_general_admin_response {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ u8 function_code;
+ u8 status;
+ union {
+ struct {
+ u8 status_descriptor[4];
+ __le64 iq_pi_offset;
+ u8 reserved[40];
+ } create_operational_iq;
+
+ struct {
+ u8 status_descriptor[4];
+ __le64 oq_ci_offset;
+ u8 reserved[40];
+ } create_operational_oq;
+ } data;
+};
+
+struct pqi_iu_layer_descriptor {
+ u8 inbound_spanning_supported : 1;
+ u8 reserved : 7;
+ u8 reserved1[5];
+ __le16 max_inbound_iu_length;
+ u8 outbound_spanning_supported : 1;
+ u8 reserved2 : 7;
+ u8 reserved3[5];
+ __le16 max_outbound_iu_length;
+};
+
+struct pqi_device_capability {
+ __le16 data_length;
+ u8 reserved[6];
+ u8 iq_arbitration_priority_support_bitmask;
+ u8 maximum_aw_a;
+ u8 maximum_aw_b;
+ u8 maximum_aw_c;
+ u8 max_arbitration_burst : 3;
+ u8 reserved1 : 4;
+ u8 iqa : 1;
+ u8 reserved2[2];
+ u8 iq_freeze : 1;
+ u8 reserved3 : 7;
+ __le16 max_inbound_queues;
+ __le16 max_elements_per_iq;
+ u8 reserved4[4];
+ __le16 max_iq_element_length;
+ __le16 min_iq_element_length;
+ u8 reserved5[2];
+ __le16 max_outbound_queues;
+ __le16 max_elements_per_oq;
+ __le16 intr_coalescing_time_granularity;
+ __le16 max_oq_element_length;
+ __le16 min_oq_element_length;
+ u8 reserved6[24];
+ struct pqi_iu_layer_descriptor iu_layer_descriptors[32];
+};
+
+#define PQI_MAX_EMBEDDED_SG_DESCRIPTORS 4
+
+struct pqi_raid_path_request {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ __le16 nexus_id;
+ __le32 buffer_length;
+ u8 lun_number[8];
+ __le16 protocol_specific;
+ u8 data_direction : 2;
+ u8 partial : 1;
+ u8 reserved1 : 4;
+ u8 fence : 1;
+ __le16 error_index;
+ u8 reserved2;
+ u8 task_attribute : 3;
+ u8 command_priority : 4;
+ u8 reserved3 : 1;
+ u8 reserved4 : 2;
+ u8 additional_cdb_bytes_usage : 3;
+ u8 reserved5 : 3;
+ u8 cdb[32];
+ struct pqi_sg_descriptor
+ sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+struct pqi_aio_path_request {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ u8 reserved1[2];
+ __le32 nexus_id;
+ __le32 buffer_length;
+ u8 data_direction : 2;
+ u8 partial : 1;
+ u8 memory_type : 1;
+ u8 fence : 1;
+ u8 encryption_enable : 1;
+ u8 reserved2 : 2;
+ u8 task_attribute : 3;
+ u8 command_priority : 4;
+ u8 reserved3 : 1;
+ __le16 data_encryption_key_index;
+ __le32 encrypt_tweak_lower;
+ __le32 encrypt_tweak_upper;
+ u8 cdb[16];
+ __le16 error_index;
+ u8 num_sg_descriptors;
+ u8 cdb_length;
+ u8 lun_number[8];
+ u8 reserved4[4];
+ struct pqi_sg_descriptor
+ sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+struct pqi_io_response {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ __le16 error_index;
+ u8 reserved2[4];
+};
+
+struct pqi_general_management_request {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ union {
+ struct {
+ u8 reserved[2];
+ __le32 buffer_length;
+ struct pqi_sg_descriptor sg_descriptors[3];
+ } report_event_configuration;
+
+ struct {
+ __le16 global_event_oq_id;
+ __le32 buffer_length;
+ struct pqi_sg_descriptor sg_descriptors[3];
+ } set_event_configuration;
+ } data;
+};
+
+struct pqi_event_descriptor {
+ u8 event_type;
+ u8 reserved;
+ __le16 oq_id;
+};
+
+struct pqi_event_config {
+ u8 reserved[2];
+ u8 num_event_descriptors;
+ u8 reserved1;
+ struct pqi_event_descriptor descriptors[1];
+};
+
+#define PQI_MAX_EVENT_DESCRIPTORS 255
+
+struct pqi_event_response {
+ struct pqi_iu_header header;
+ u8 event_type;
+ u8 reserved2 : 7;
+ u8 request_acknowlege : 1;
+ __le16 event_id;
+ __le32 additional_event_id;
+ u8 data[16];
+};
+
+struct pqi_event_acknowledge_request {
+ struct pqi_iu_header header;
+ u8 event_type;
+ u8 reserved2;
+ __le16 event_id;
+ __le32 additional_event_id;
+};
+
+struct pqi_task_management_request {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ __le16 nexus_id;
+ u8 reserved[4];
+ u8 lun_number[8];
+ __le16 protocol_specific;
+ __le16 outbound_queue_id_to_manage;
+ __le16 request_id_to_manage;
+ u8 task_management_function;
+ u8 reserved2 : 7;
+ u8 fence : 1;
+};
+
+#define SOP_TASK_MANAGEMENT_LUN_RESET 0x8
+
+struct pqi_task_management_response {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ __le16 nexus_id;
+ u8 additional_response_info[3];
+ u8 response_code;
+};
+
+struct pqi_aio_error_info {
+ u8 status;
+ u8 service_response;
+ u8 data_present;
+ u8 reserved;
+ __le32 residual_count;
+ __le16 data_length;
+ __le16 reserved1;
+ u8 data[256];
+};
+
+struct pqi_raid_error_info {
+ u8 data_in_result;
+ u8 data_out_result;
+ u8 reserved[3];
+ u8 status;
+ __le16 status_qualifier;
+ __le16 sense_data_length;
+ __le16 response_data_length;
+ __le32 data_in_transferred;
+ __le32 data_out_transferred;
+ u8 data[256];
+};
+
+#define PQI_REQUEST_IU_TASK_MANAGEMENT 0x13
+#define PQI_REQUEST_IU_RAID_PATH_IO 0x14
+#define PQI_REQUEST_IU_AIO_PATH_IO 0x15
+#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
+#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
+#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
+#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
+
+#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
+#define PQI_RESPONSE_IU_TASK_MANAGEMENT 0x93
+#define PQI_RESPONSE_IU_GENERAL_ADMIN 0xe0
+#define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS 0xf0
+#define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS 0xf1
+#define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR 0xf2
+#define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3
+#define PQI_RESPONSE_IU_AIO_PATH_DISABLED 0xf4
+#define PQI_RESPONSE_IU_VENDOR_EVENT 0xf5
+
+#define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY 0x0
+#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ 0x10
+#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ 0x11
+#define PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ 0x12
+#define PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ 0x13
+#define PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY 0x14
+
+#define PQI_GENERAL_ADMIN_STATUS_SUCCESS 0x0
+
+#define PQI_IQ_PROPERTY_IS_AIO_QUEUE 0x1
+
+#define PQI_GENERAL_ADMIN_IU_LENGTH 0x3c
+#define PQI_PROTOCOL_SOP 0x0
+
+#define PQI_DATA_IN_OUT_GOOD 0x0
+#define PQI_DATA_IN_OUT_UNDERFLOW 0x1
+#define PQI_DATA_IN_OUT_BUFFER_ERROR 0x40
+#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW 0x41
+#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA 0x42
+#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE 0x43
+#define PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR 0x60
+#define PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT 0x61
+#define PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED 0x62
+#define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED 0x63
+#define PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED 0x64
+#define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST 0x65
+#define PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION 0x66
+#define PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED 0x67
+#define PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ 0x6F
+#define PQI_DATA_IN_OUT_ERROR 0xf0
+#define PQI_DATA_IN_OUT_PROTOCOL_ERROR 0xf1
+#define PQI_DATA_IN_OUT_HARDWARE_ERROR 0xf2
+#define PQI_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3
+#define PQI_DATA_IN_OUT_ABORTED 0xf4
+#define PQI_DATA_IN_OUT_TIMEOUT 0xf5
+
+#define CISS_CMD_STATUS_SUCCESS 0x0
+#define CISS_CMD_STATUS_TARGET_STATUS 0x1
+#define CISS_CMD_STATUS_DATA_UNDERRUN 0x2
+#define CISS_CMD_STATUS_DATA_OVERRUN 0x3
+#define CISS_CMD_STATUS_INVALID 0x4
+#define CISS_CMD_STATUS_PROTOCOL_ERROR 0x5
+#define CISS_CMD_STATUS_HARDWARE_ERROR 0x6
+#define CISS_CMD_STATUS_CONNECTION_LOST 0x7
+#define CISS_CMD_STATUS_ABORTED 0x8
+#define CISS_CMD_STATUS_ABORT_FAILED 0x9
+#define CISS_CMD_STATUS_UNSOLICITED_ABORT 0xa
+#define CISS_CMD_STATUS_TIMEOUT 0xb
+#define CISS_CMD_STATUS_UNABORTABLE 0xc
+#define CISS_CMD_STATUS_TMF 0xd
+#define CISS_CMD_STATUS_AIO_DISABLED 0xe
+
+#define PQI_NUM_EVENT_QUEUE_ELEMENTS 32
+#define PQI_EVENT_OQ_ELEMENT_LENGTH sizeof(struct pqi_event_response)
+
+#define PQI_EVENT_TYPE_HOTPLUG 0x1
+#define PQI_EVENT_TYPE_HARDWARE 0x2
+#define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4
+#define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5
+#define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd
+#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe
+#define PQI_EVENT_TYPE_HEARTBEAT 0xff
+
+#pragma pack()
+
+#define PQI_ERROR_BUFFER_ELEMENT_LENGTH \
+ sizeof(struct pqi_raid_error_info)
+
+/* these values are based on our implementation */
+#define PQI_ADMIN_IQ_NUM_ELEMENTS 8
+#define PQI_ADMIN_OQ_NUM_ELEMENTS 20
+#define PQI_ADMIN_IQ_ELEMENT_LENGTH 64
+#define PQI_ADMIN_OQ_ELEMENT_LENGTH 64
+
+#define PQI_OPERATIONAL_IQ_ELEMENT_LENGTH 128
+#define PQI_OPERATIONAL_OQ_ELEMENT_LENGTH 16
+
+#define PQI_MIN_MSIX_VECTORS 1
+#define PQI_MAX_MSIX_VECTORS 64
+
+/* these values are defined by the PQI spec */
+#define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE 255
+#define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE 65535
+#define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT 64
+#define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT 16
+#define PQI_ADMIN_INDEX_ALIGNMENT 64
+#define PQI_OPERATIONAL_INDEX_ALIGNMENT 4
+
+#define PQI_MIN_OPERATIONAL_QUEUE_ID 1
+#define PQI_MAX_OPERATIONAL_QUEUE_ID 65535
+
+#define PQI_AIO_SERV_RESPONSE_COMPLETE 0
+#define PQI_AIO_SERV_RESPONSE_FAILURE 1
+#define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE 2
+#define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED 3
+#define PQI_AIO_SERV_RESPONSE_TMF_REJECTED 4
+#define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5
+
+#define PQI_AIO_STATUS_IO_ERROR 0x1
+#define PQI_AIO_STATUS_IO_ABORTED 0x2
+#define PQI_AIO_STATUS_NO_PATH_TO_DEVICE 0x3
+#define PQI_AIO_STATUS_INVALID_DEVICE 0x4
+#define PQI_AIO_STATUS_AIO_PATH_DISABLED 0xe
+#define PQI_AIO_STATUS_UNDERRUN 0x51
+#define PQI_AIO_STATUS_OVERRUN 0x75
+
+typedef u32 pqi_index_t;
+
+/* SOP data direction flags */
+#define SOP_NO_DIRECTION_FLAG 0
+#define SOP_WRITE_FLAG 1 /* host writes data to Data-Out */
+ /* buffer */
+#define SOP_READ_FLAG 2 /* host receives data from Data-In */
+ /* buffer */
+#define SOP_BIDIRECTIONAL 3 /* data is transferred from the */
+ /* Data-Out buffer and data is */
+ /* transferred to the Data-In buffer */
+
+#define SOP_TASK_ATTRIBUTE_SIMPLE 0
+#define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1
+#define SOP_TASK_ATTRIBUTE_ORDERED 2
+#define SOP_TASK_ATTRIBUTE_ACA 4
+
+#define SOP_TMF_COMPLETE 0x0
+#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
+
+/* additional CDB bytes usage field codes */
+#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_4 1 /* 20-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_8 2 /* 24-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_12 3 /* 28-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_16 4 /* 32-byte CDB */
+
+/*
+ * The purpose of this structure is to obtain proper alignment of objects in
+ * an admin queue pair.
+ */
+struct pqi_admin_queues_aligned {
+ __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
+ u8 iq_element_array[PQI_ADMIN_IQ_ELEMENT_LENGTH]
+ [PQI_ADMIN_IQ_NUM_ELEMENTS];
+ __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
+ u8 oq_element_array[PQI_ADMIN_OQ_ELEMENT_LENGTH]
+ [PQI_ADMIN_OQ_NUM_ELEMENTS];
+ __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t iq_ci;
+ __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t oq_pi;
+};
+
+struct pqi_admin_queues {
+ void *iq_element_array;
+ void *oq_element_array;
+ volatile pqi_index_t *iq_ci;
+ volatile pqi_index_t *oq_pi;
+ dma_addr_t iq_element_array_bus_addr;
+ dma_addr_t oq_element_array_bus_addr;
+ dma_addr_t iq_ci_bus_addr;
+ dma_addr_t oq_pi_bus_addr;
+ __le32 __iomem *iq_pi;
+ pqi_index_t iq_pi_copy;
+ __le32 __iomem *oq_ci;
+ pqi_index_t oq_ci_copy;
+ struct task_struct *task;
+ u16 int_msg_num;
+};
+
+struct pqi_queue_group {
+ struct pqi_ctrl_info *ctrl_info; /* backpointer */
+ u16 iq_id[2];
+ u16 oq_id;
+ u16 int_msg_num;
+ void *iq_element_array[2];
+ void *oq_element_array;
+ dma_addr_t iq_element_array_bus_addr[2];
+ dma_addr_t oq_element_array_bus_addr;
+ __le32 __iomem *iq_pi[2];
+ pqi_index_t iq_pi_copy[2];
+ volatile pqi_index_t *iq_ci[2];
+ volatile pqi_index_t *oq_pi;
+ dma_addr_t iq_ci_bus_addr[2];
+ dma_addr_t oq_pi_bus_addr;
+ __le32 __iomem *oq_ci;
+ pqi_index_t oq_ci_copy;
+ spinlock_t submit_lock[2]; /* protect submission queue */
+ struct list_head request_list[2];
+};
+
+struct pqi_event_queue {
+ u16 oq_id;
+ u16 int_msg_num;
+ void *oq_element_array;
+ volatile pqi_index_t *oq_pi;
+ dma_addr_t oq_element_array_bus_addr;
+ dma_addr_t oq_pi_bus_addr;
+ __le32 __iomem *oq_ci;
+ pqi_index_t oq_ci_copy;
+};
+
+#define PQI_DEFAULT_QUEUE_GROUP 0
+#define PQI_MAX_QUEUE_GROUPS PQI_MAX_MSIX_VECTORS
+
+struct pqi_encryption_info {
+ u16 data_encryption_key_index;
+ u32 encrypt_tweak_lower;
+ u32 encrypt_tweak_upper;
+};
+
+#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
+#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
+
+#define RAID_MAP_MAX_ENTRIES 1024
+
+#define PQI_PHYSICAL_DEVICE_BUS 0
+#define PQI_RAID_VOLUME_BUS 1
+#define PQI_HBA_BUS 2
+#define PQI_MAX_BUS PQI_HBA_BUS
+
+#pragma pack(1)
+
+struct report_lun_header {
+ __be32 list_length;
+ u8 extended_response;
+ u8 reserved[3];
+};
+
+struct report_log_lun_extended_entry {
+ u8 lunid[8];
+ u8 volume_id[16];
+};
+
+struct report_log_lun_extended {
+ struct report_lun_header header;
+ struct report_log_lun_extended_entry lun_entries[1];
+};
+
+struct report_phys_lun_extended_entry {
+ u8 lunid[8];
+ __be64 wwid;
+ u8 device_type;
+ u8 device_flags;
+ u8 lun_count; /* number of LUNs in a multi-LUN device */
+ u8 redundant_paths;
+ u32 aio_handle;
+};
+
+/* for device_flags field of struct report_phys_lun_extended_entry */
+#define REPORT_PHYS_LUN_DEV_FLAG_NON_DISK 0x1
+#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8
+
+struct report_phys_lun_extended {
+ struct report_lun_header header;
+ struct report_phys_lun_extended_entry lun_entries[1];
+};
+
+struct raid_map_disk_data {
+ u32 aio_handle;
+ u8 xor_mult[2];
+ u8 reserved[2];
+};
+
+/* constants for flags field of RAID map */
+#define RAID_MAP_ENCRYPTION_ENABLED 0x1
+
+struct raid_map {
+ __le32 structure_size; /* size of entire structure in bytes */
+ __le32 volume_blk_size; /* bytes / block in the volume */
+ __le64 volume_blk_cnt; /* logical blocks on the volume */
+ u8 phys_blk_shift; /* shift factor to convert between */
+ /* units of logical blocks and */
+ /* physical disk blocks */
+ u8 parity_rotation_shift; /* shift factor to convert between */
+ /* units of logical stripes and */
+ /* physical stripes */
+ __le16 strip_size; /* blocks used on each disk / stripe */
+ __le64 disk_starting_blk; /* first disk block used in volume */
+ __le64 disk_blk_cnt; /* disk blocks used by volume / disk */
+ __le16 data_disks_per_row; /* data disk entries / row in the map */
+ __le16 metadata_disks_per_row; /* mirror/parity disk entries / row */
+ /* in the map */
+ __le16 row_cnt; /* rows in each layout map */
+ __le16 layout_map_count; /* layout maps (1 map per */
+ /* mirror parity group) */
+ __le16 flags;
+ __le16 data_encryption_key_index;
+ u8 reserved[16];
+ struct raid_map_disk_data disk_data[RAID_MAP_MAX_ENTRIES];
+};
+
+#pragma pack()
+
+#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
+
+struct pqi_scsi_dev {
+ int devtype; /* as reported by INQUIRY commmand */
+ u8 device_type; /* as reported by */
+ /* BMIC_IDENTIFY_PHYSICAL_DEVICE */
+ /* only valid for devtype = TYPE_DISK */
+ int bus;
+ int target;
+ int lun;
+ u8 scsi3addr[8];
+ __be64 wwid;
+ u8 volume_id[16];
+ u8 is_physical_device : 1;
+ u8 target_lun_valid : 1;
+ u8 expose_device : 1;
+ u8 no_uld_attach : 1;
+ u8 aio_enabled : 1; /* only valid for physical disks */
+ u8 device_gone : 1;
+ u8 new_device : 1;
+ u8 keep_device : 1;
+ u8 volume_offline : 1;
+ u8 vendor[8]; /* bytes 8-15 of inquiry data */
+ u8 model[16]; /* bytes 16-31 of inquiry data */
+ u64 sas_address;
+ u8 raid_level;
+ u16 queue_depth; /* max. queue_depth for this device */
+ u16 advertised_queue_depth;
+ u32 aio_handle;
+ u8 volume_status;
+ u8 active_path_index;
+ u8 path_map;
+ u8 bay;
+ u8 box[8];
+ u16 phys_connector[8];
+ int offload_configured; /* I/O accel RAID offload configured */
+ int offload_enabled; /* I/O accel RAID offload enabled */
+ int offload_enabled_pending;
+ int offload_to_mirror; /* Send next I/O accelerator RAID */
+ /* offload request to mirror drive. */
+ struct raid_map *raid_map; /* I/O accelerator RAID map */
+
+ struct pqi_sas_port *sas_port;
+ struct scsi_device *sdev;
+
+ struct list_head scsi_device_list_entry;
+ struct list_head new_device_list_entry;
+ struct list_head add_list_entry;
+ struct list_head delete_list_entry;
+};
+
+/* VPD inquiry pages */
+#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
+#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
+#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
+#define CISS_VPD_LV_OFFLOAD_STATUS 0xc2 /* vendor-specific page */
+#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
+
+#define VPD_PAGE (1 << 8)
+
+#pragma pack(1)
+
+/* structure for CISS_VPD_LV_STATUS */
+struct ciss_vpd_logical_volume_status {
+ u8 peripheral_info;
+ u8 page_code;
+ u8 reserved;
+ u8 page_length;
+ u8 volume_status;
+ u8 reserved2[3];
+ __be32 flags;
+};
+
+#pragma pack()
+
+/* constants for volume_status field of ciss_vpd_logical_volume_status */
+#define CISS_LV_OK 0
+#define CISS_LV_FAILED 1
+#define CISS_LV_NOT_CONFIGURED 2
+#define CISS_LV_DEGRADED 3
+#define CISS_LV_READY_FOR_RECOVERY 4
+#define CISS_LV_UNDERGOING_RECOVERY 5
+#define CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED 6
+#define CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM 7
+#define CISS_LV_HARDWARE_OVERHEATING 8
+#define CISS_LV_HARDWARE_HAS_OVERHEATED 9
+#define CISS_LV_UNDERGOING_EXPANSION 10
+#define CISS_LV_NOT_AVAILABLE 11
+#define CISS_LV_QUEUED_FOR_EXPANSION 12
+#define CISS_LV_DISABLED_SCSI_ID_CONFLICT 13
+#define CISS_LV_EJECTED 14
+#define CISS_LV_UNDERGOING_ERASE 15
+/* state 16 not used */
+#define CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD 17
+#define CISS_LV_UNDERGOING_RPI 18
+#define CISS_LV_PENDING_RPI 19
+#define CISS_LV_ENCRYPTED_NO_KEY 20
+/* state 21 not used */
+#define CISS_LV_UNDERGOING_ENCRYPTION 22
+#define CISS_LV_UNDERGOING_ENCRYPTION_REKEYING 23
+#define CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 24
+#define CISS_LV_PENDING_ENCRYPTION 25
+#define CISS_LV_PENDING_ENCRYPTION_REKEYING 26
+#define CISS_LV_NOT_SUPPORTED 27
+#define CISS_LV_STATUS_UNAVAILABLE 255
+
+/* constants for flags field of ciss_vpd_logical_volume_status */
+#define CISS_LV_FLAGS_NO_HOST_IO 0x1 /* volume not available for */
+ /* host I/O */
+
+/* for SAS hosts and SAS expanders */
+struct pqi_sas_node {
+ struct device *parent_dev;
+ struct list_head port_list_head;
+};
+
+struct pqi_sas_port {
+ struct list_head port_list_entry;
+ u64 sas_address;
+ struct sas_port *port;
+ int next_phy_index;
+ struct list_head phy_list_head;
+ struct pqi_sas_node *parent_node;
+ struct sas_rphy *rphy;
+};
+
+struct pqi_sas_phy {
+ struct list_head phy_list_entry;
+ struct sas_phy *phy;
+ struct pqi_sas_port *parent_port;
+ bool added_to_port;
+};
+
+struct pqi_io_request {
+ atomic_t refcount;
+ u16 index;
+ void (*io_complete_callback)(struct pqi_io_request *io_request,
+ void *context);
+ void *context;
+ int status;
+ struct scsi_cmnd *scmd;
+ void *error_info;
+ struct pqi_sg_descriptor *sg_chain_buffer;
+ dma_addr_t sg_chain_buffer_dma_handle;
+ void *iu;
+ struct list_head request_list_entry;
+};
+
+/* for indexing into the pending_events[] field of struct pqi_ctrl_info */
+#define PQI_EVENT_HEARTBEAT 0
+#define PQI_EVENT_HOTPLUG 1
+#define PQI_EVENT_HARDWARE 2
+#define PQI_EVENT_PHYSICAL_DEVICE 3
+#define PQI_EVENT_LOGICAL_DEVICE 4
+#define PQI_EVENT_AIO_STATE_CHANGE 5
+#define PQI_EVENT_AIO_CONFIG_CHANGE 6
+#define PQI_NUM_SUPPORTED_EVENTS 7
+
+struct pqi_event {
+ bool pending;
+ u8 event_type;
+ __le16 event_id;
+ __le32 additional_event_id;
+};
+
+#define PQI_RESERVED_IO_SLOTS_LUN_RESET 1
+#define PQI_RESERVED_IO_SLOTS_EVENT_ACK PQI_NUM_SUPPORTED_EVENTS
+#define PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS 3
+#define PQI_RESERVED_IO_SLOTS \
+ (PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
+ PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
+
+struct pqi_ctrl_info {
+ unsigned int ctrl_id;
+ struct pci_dev *pci_dev;
+ char firmware_version[11];
+ void __iomem *iomem_base;
+ struct pqi_ctrl_registers __iomem *registers;
+ struct pqi_device_registers __iomem *pqi_registers;
+ u32 max_sg_entries;
+ u32 config_table_offset;
+ u32 config_table_length;
+ u16 max_inbound_queues;
+ u16 max_elements_per_iq;
+ u16 max_iq_element_length;
+ u16 max_outbound_queues;
+ u16 max_elements_per_oq;
+ u16 max_oq_element_length;
+ u32 max_transfer_size;
+ u32 max_outstanding_requests;
+ u32 max_io_slots;
+ unsigned int scsi_ml_can_queue;
+ unsigned short sg_tablesize;
+ unsigned int max_sectors;
+ u32 error_buffer_length;
+ void *error_buffer;
+ dma_addr_t error_buffer_dma_handle;
+ size_t sg_chain_buffer_length;
+ unsigned int num_queue_groups;
+ unsigned int num_active_queue_groups;
+ u16 num_elements_per_iq;
+ u16 num_elements_per_oq;
+ u16 max_inbound_iu_length_per_firmware;
+ u16 max_inbound_iu_length;
+ unsigned int max_sg_per_iu;
+ void *admin_queue_memory_base;
+ u32 admin_queue_memory_length;
+ dma_addr_t admin_queue_memory_base_dma_handle;
+ void *queue_memory_base;
+ u32 queue_memory_length;
+ dma_addr_t queue_memory_base_dma_handle;
+ struct pqi_admin_queues admin_queues;
+ struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
+ struct pqi_event_queue event_queue;
+ int max_msix_vectors;
+ int num_msix_vectors_enabled;
+ int num_msix_vectors_initialized;
+ u32 msix_vectors[PQI_MAX_MSIX_VECTORS];
+ void *intr_data[PQI_MAX_MSIX_VECTORS];
+ int event_irq;
+ struct Scsi_Host *scsi_host;
+
+ struct mutex scan_mutex;
+ u8 inbound_spanning_supported : 1;
+ u8 outbound_spanning_supported : 1;
+ u8 pqi_mode_enabled : 1;
+ u8 controller_online : 1;
+ u8 heartbeat_timer_started : 1;
+
+ struct list_head scsi_device_list;
+ spinlock_t scsi_device_list_lock;
+
+ struct delayed_work rescan_work;
+ struct delayed_work update_time_work;
+
+ struct pqi_sas_node *sas_host;
+ u64 sas_address;
+
+ struct pqi_io_request *io_request_pool;
+ u16 next_io_request_slot;
+
+ struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS];
+ struct work_struct event_work;
+
+ atomic_t num_interrupts;
+ int previous_num_interrupts;
+ unsigned int num_heartbeats_requested;
+ struct timer_list heartbeat_timer;
+
+ struct semaphore sync_request_sem;
+ struct semaphore lun_reset_sem;
+};
+
+enum pqi_ctrl_mode {
+ UNKNOWN,
+ PQI_MODE
+};
+
+/*
+ * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
+ */
+#define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27
+
+/* 0 = no limit */
+#define PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH 0
+
+/* CISS commands */
+#define CISS_READ 0xc0
+#define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */
+#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
+#define CISS_GET_RAID_MAP 0xc8
+
+/* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */
+#define CISS_REPORT_LOG_EXTENDED 0x1
+#define CISS_REPORT_PHYS_EXTENDED 0x2
+
+/* BMIC commands */
+#define BMIC_IDENTIFY_CONTROLLER 0x11
+#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
+#define BMIC_READ 0x26
+#define BMIC_WRITE 0x27
+#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
+#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
+#define BMIC_WRITE_HOST_WELLNESS 0xa5
+#define BMIC_CACHE_FLUSH 0xc2
+
+#define SA_CACHE_FLUSH 0x01
+
+#define MASKED_DEVICE(lunid) ((lunid)[3] & 0xc0)
+#define CISS_GET_BUS(lunid) ((lunid)[7] & 0x3f)
+#define CISS_GET_LEVEL_2_TARGET(lunid) ((lunid)[6])
+#define CISS_GET_DRIVE_NUMBER(lunid) \
+ (((CISS_GET_BUS((lunid)) - 1) << 8) + \
+ CISS_GET_LEVEL_2_TARGET((lunid)))
+
+#define NO_TIMEOUT ((unsigned long) -1)
+
+#pragma pack(1)
+
+struct bmic_identify_controller {
+ u8 configured_logical_drive_count;
+ __le32 configuration_signature;
+ u8 firmware_version[4];
+ u8 reserved[145];
+ __le16 extended_logical_unit_count;
+ u8 reserved1[34];
+ __le16 firmware_build_number;
+ u8 reserved2[100];
+ u8 controller_mode;
+ u8 reserved3[32];
+};
+
+struct bmic_identify_physical_device {
+ u8 scsi_bus; /* SCSI Bus number on controller */
+ u8 scsi_id; /* SCSI ID on this bus */
+ __le16 block_size; /* sector size in bytes */
+ __le32 total_blocks; /* number for sectors on drive */
+ __le32 reserved_blocks; /* controller reserved (RIS) */
+ u8 model[40]; /* Physical Drive Model */
+ u8 serial_number[40]; /* Drive Serial Number */
+ u8 firmware_revision[8]; /* drive firmware revision */
+ u8 scsi_inquiry_bits; /* inquiry byte 7 bits */
+ u8 compaq_drive_stamp; /* 0 means drive not stamped */
+ u8 last_failure_reason;
+ u8 flags;
+ u8 more_flags;
+ u8 scsi_lun; /* SCSI LUN for phys drive */
+ u8 yet_more_flags;
+ u8 even_more_flags;
+ __le32 spi_speed_rules;
+ u8 phys_connector[2]; /* connector number on controller */
+ u8 phys_box_on_bus; /* phys enclosure this drive resides */
+ u8 phys_bay_in_box; /* phys drv bay this drive resides */
+ __le32 rpm; /* drive rotational speed in RPM */
+ u8 device_type; /* type of drive */
+ u8 sata_version; /* only valid when device_type = */
+ /* BMIC_DEVICE_TYPE_SATA */
+ __le64 big_total_block_count;
+ __le64 ris_starting_lba;
+ __le32 ris_size;
+ u8 wwid[20];
+ u8 controller_phy_map[32];
+ __le16 phy_count;
+ u8 phy_connected_dev_type[256];
+ u8 phy_to_drive_bay_num[256];
+ __le16 phy_to_attached_dev_index[256];
+ u8 box_index;
+ u8 reserved;
+ __le16 extra_physical_drive_flags;
+ u8 negotiated_link_rate[256];
+ u8 phy_to_phy_map[256];
+ u8 redundant_path_present_map;
+ u8 redundant_path_failure_map;
+ u8 active_path_number;
+ __le16 alternate_paths_phys_connector[8];
+ u8 alternate_paths_phys_box_on_port[8];
+ u8 multi_lun_device_lun_count;
+ u8 minimum_good_fw_revision[8];
+ u8 unique_inquiry_bytes[20];
+ u8 current_temperature_degreesC;
+ u8 temperature_threshold_degreesC;
+ u8 max_temperature_degreesC;
+ u8 logical_blocks_per_phys_block_exp;
+ __le16 current_queue_depth_limit;
+ u8 switch_name[10];
+ __le16 switch_port;
+ u8 alternate_paths_switch_name[40];
+ u8 alternate_paths_switch_port[8];
+ __le16 power_on_hours;
+ __le16 percent_endurance_used;
+ u8 drive_authentication;
+ u8 smart_carrier_authentication;
+ u8 smart_carrier_app_fw_version;
+ u8 smart_carrier_bootloader_fw_version;
+ u8 encryption_key_name[64];
+ __le32 misc_drive_flags;
+ __le16 dek_index;
+ u8 padding[112];
+};
+
+#pragma pack()
+
+int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
+void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
+int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
+ struct pqi_scsi_dev *device);
+void pqi_remove_sas_device(struct pqi_scsi_dev *device);
+struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
+ struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
+
+extern struct sas_function_template pqi_sas_transport_functions;
+
+#if !defined(readq)
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ u32 lower32;
+ u32 upper32;
+
+ lower32 = readl(addr);
+ upper32 = readl(addr + 4);
+
+ return ((u64)upper32 << 32) | lower32;
+}
+#endif
+
+#if !defined(writeq)
+#define writeq writeq
+static inline void writeq(u64 value, volatile void __iomem *addr)
+{
+ u32 lower32;
+ u32 upper32;
+
+ lower32 = lower_32_bits(value);
+ upper32 = upper_32_bits(value);
+
+ writel(lower32, addr);
+ writel(upper32, addr + 4);
+}
+#endif
+
+#endif /* _SMARTPQI_H */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
new file mode 100644
index 000000000000..a535b2661f38
--- /dev/null
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -0,0 +1,6303 @@
+/*
+ * driver for Microsemi PQI-based storage controllers
+ * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/cciss_ioctl.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_transport_sas.h>
+#include <asm/unaligned.h>
+#include "smartpqi.h"
+#include "smartpqi_sis.h"
+
+#if !defined(BUILD_TIMESTAMP)
+#define BUILD_TIMESTAMP
+#endif
+
+#define DRIVER_VERSION "0.9.13-370"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 9
+#define DRIVER_RELEASE 13
+#define DRIVER_REVISION 370
+
+#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
+#define DRIVER_NAME_SHORT "smartpqi"
+
+MODULE_AUTHOR("Microsemi");
+MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
+ DRIVER_VERSION);
+MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
+
+static char *hpe_branded_controller = "HPE Smart Array Controller";
+static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
+
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
+static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
+static void pqi_scan_start(struct Scsi_Host *shost);
+static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_queue_group *queue_group, enum pqi_io_path path,
+ struct pqi_io_request *io_request);
+static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_iu_header *request, unsigned int flags,
+ struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
+static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
+ unsigned int cdb_length, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info);
+
+/* for flags argument to pqi_submit_raid_request_synchronous() */
+#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
+
+static struct scsi_transport_template *pqi_sas_transport_template;
+
+static atomic_t pqi_controller_count = ATOMIC_INIT(0);
+
+static int pqi_disable_device_id_wildcards;
+module_param_named(disable_device_id_wildcards,
+ pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_device_id_wildcards,
+ "Disable device ID wildcards.");
+
+static char *raid_levels[] = {
+ "RAID-0",
+ "RAID-4",
+ "RAID-1(1+0)",
+ "RAID-5",
+ "RAID-5+1",
+ "RAID-ADG",
+ "RAID-1(ADM)",
+};
+
+static char *pqi_raid_level_to_string(u8 raid_level)
+{
+ if (raid_level < ARRAY_SIZE(raid_levels))
+ return raid_levels[raid_level];
+
+ return "";
+}
+
+#define SA_RAID_0 0
+#define SA_RAID_4 1
+#define SA_RAID_1 2 /* also used for RAID 10 */
+#define SA_RAID_5 3 /* also used for RAID 50 */
+#define SA_RAID_51 4
+#define SA_RAID_6 5 /* also used for RAID 60 */
+#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
+#define SA_RAID_MAX SA_RAID_ADM
+#define SA_RAID_UNKNOWN 0xff
+
+static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
+{
+ scmd->scsi_done(scmd);
+}
+
+static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
+{
+ return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
+}
+
+static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
+{
+ void *hostdata = shost_priv(shost);
+
+ return *((struct pqi_ctrl_info **)hostdata);
+}
+
+static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
+{
+ return !device->is_physical_device;
+}
+
+static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ return !ctrl_info->controller_online;
+}
+
+static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
+{
+ if (ctrl_info->controller_online)
+ if (!sis_is_firmware_running(ctrl_info))
+ pqi_take_ctrl_offline(ctrl_info);
+}
+
+static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
+{
+ return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
+}
+
+static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ return sis_read_driver_scratch(ctrl_info);
+}
+
+static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_mode mode)
+{
+ sis_write_driver_scratch(ctrl_info, mode);
+}
+
+#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
+
+static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
+{
+ schedule_delayed_work(&ctrl_info->rescan_work,
+ PQI_RESCAN_WORK_INTERVAL);
+}
+
+static int pqi_map_single(struct pci_dev *pci_dev,
+ struct pqi_sg_descriptor *sg_descriptor, void *buffer,
+ size_t buffer_length, int data_direction)
+{
+ dma_addr_t bus_address;
+
+ if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
+ return 0;
+
+ bus_address = pci_map_single(pci_dev, buffer, buffer_length,
+ data_direction);
+ if (pci_dma_mapping_error(pci_dev, bus_address))
+ return -ENOMEM;
+
+ put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
+ put_unaligned_le32(buffer_length, &sg_descriptor->length);
+ put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+
+ return 0;
+}
+
+static void pqi_pci_unmap(struct pci_dev *pci_dev,
+ struct pqi_sg_descriptor *descriptors, int num_descriptors,
+ int data_direction)
+{
+ int i;
+
+ if (data_direction == PCI_DMA_NONE)
+ return;
+
+ for (i = 0; i < num_descriptors; i++)
+ pci_unmap_single(pci_dev,
+ (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
+ get_unaligned_le32(&descriptors[i].length),
+ data_direction);
+}
+
+static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_raid_path_request *request, u8 cmd,
+ u8 *scsi3addr, void *buffer, size_t buffer_length,
+ u16 vpd_page, int *pci_direction)
+{
+ u8 *cdb;
+ int pci_dir;
+
+ memset(request, 0, sizeof(*request));
+
+ request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
+ put_unaligned_le16(offsetof(struct pqi_raid_path_request,
+ sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
+ &request->header.iu_length);
+ put_unaligned_le32(buffer_length, &request->buffer_length);
+ memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
+ request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
+
+ cdb = request->cdb;
+
+ switch (cmd) {
+ case INQUIRY:
+ request->data_direction = SOP_READ_FLAG;
+ cdb[0] = INQUIRY;
+ if (vpd_page & VPD_PAGE) {
+ cdb[1] = 0x1;
+ cdb[2] = (u8)vpd_page;
+ }
+ cdb[4] = (u8)buffer_length;
+ break;
+ case CISS_REPORT_LOG:
+ case CISS_REPORT_PHYS:
+ request->data_direction = SOP_READ_FLAG;
+ cdb[0] = cmd;
+ if (cmd == CISS_REPORT_PHYS)
+ cdb[1] = CISS_REPORT_PHYS_EXTENDED;
+ else
+ cdb[1] = CISS_REPORT_LOG_EXTENDED;
+ put_unaligned_be32(buffer_length, &cdb[6]);
+ break;
+ case CISS_GET_RAID_MAP:
+ request->data_direction = SOP_READ_FLAG;
+ cdb[0] = CISS_READ;
+ cdb[1] = CISS_GET_RAID_MAP;
+ put_unaligned_be32(buffer_length, &cdb[6]);
+ break;
+ case SA_CACHE_FLUSH:
+ request->data_direction = SOP_WRITE_FLAG;
+ cdb[0] = BMIC_WRITE;
+ cdb[6] = BMIC_CACHE_FLUSH;
+ put_unaligned_be16(buffer_length, &cdb[7]);
+ break;
+ case BMIC_IDENTIFY_CONTROLLER:
+ case BMIC_IDENTIFY_PHYSICAL_DEVICE:
+ request->data_direction = SOP_READ_FLAG;
+ cdb[0] = BMIC_READ;
+ cdb[6] = cmd;
+ put_unaligned_be16(buffer_length, &cdb[7]);
+ break;
+ case BMIC_WRITE_HOST_WELLNESS:
+ request->data_direction = SOP_WRITE_FLAG;
+ cdb[0] = BMIC_WRITE;
+ cdb[6] = cmd;
+ put_unaligned_be16(buffer_length, &cdb[7]);
+ break;
+ default:
+ dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
+ cmd);
+ WARN_ON(cmd);
+ break;
+ }
+
+ switch (request->data_direction) {
+ case SOP_READ_FLAG:
+ pci_dir = PCI_DMA_FROMDEVICE;
+ break;
+ case SOP_WRITE_FLAG:
+ pci_dir = PCI_DMA_TODEVICE;
+ break;
+ case SOP_NO_DIRECTION_FLAG:
+ pci_dir = PCI_DMA_NONE;
+ break;
+ default:
+ pci_dir = PCI_DMA_BIDIRECTIONAL;
+ break;
+ }
+
+ *pci_direction = pci_dir;
+
+ return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
+ buffer, buffer_length, pci_dir);
+}
+
+static struct pqi_io_request *pqi_alloc_io_request(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ struct pqi_io_request *io_request;
+ u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
+
+ while (1) {
+ io_request = &ctrl_info->io_request_pool[i];
+ if (atomic_inc_return(&io_request->refcount) == 1)
+ break;
+ atomic_dec(&io_request->refcount);
+ i = (i + 1) % ctrl_info->max_io_slots;
+ }
+
+ /* benignly racy */
+ ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
+
+ io_request->scmd = NULL;
+ io_request->status = 0;
+ io_request->error_info = NULL;
+
+ return io_request;
+}
+
+static void pqi_free_io_request(struct pqi_io_request *io_request)
+{
+ atomic_dec(&io_request->refcount);
+}
+
+static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
+ struct bmic_identify_controller *buffer)
+{
+ int rc;
+ int pci_direction;
+ struct pqi_raid_path_request request;
+
+ rc = pqi_build_raid_path_request(ctrl_info, &request,
+ BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
+ sizeof(*buffer), 0, &pci_direction);
+ if (rc)
+ return rc;
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+ NULL, NO_TIMEOUT);
+
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+ pci_direction);
+
+ return rc;
+}
+
+static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
+ u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
+{
+ int rc;
+ int pci_direction;
+ struct pqi_raid_path_request request;
+
+ rc = pqi_build_raid_path_request(ctrl_info, &request,
+ INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
+ &pci_direction);
+ if (rc)
+ return rc;
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+ NULL, NO_TIMEOUT);
+
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+ pci_direction);
+
+ return rc;
+}
+
+static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device,
+ struct bmic_identify_physical_device *buffer,
+ size_t buffer_length)
+{
+ int rc;
+ int pci_direction;
+ u16 bmic_device_index;
+ struct pqi_raid_path_request request;
+
+ rc = pqi_build_raid_path_request(ctrl_info, &request,
+ BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
+ buffer_length, 0, &pci_direction);
+ if (rc)
+ return rc;
+
+ bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
+ request.cdb[2] = (u8)bmic_device_index;
+ request.cdb[9] = (u8)(bmic_device_index >> 8);
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+ 0, NULL, NO_TIMEOUT);
+
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+ pci_direction);
+
+ return rc;
+}
+
+#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
+
+static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct pqi_raid_path_request request;
+ int pci_direction;
+ u8 *buffer;
+
+ /*
+ * Don't bother trying to flush the cache if the controller is
+ * locked up.
+ */
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+
+ buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ rc = pqi_build_raid_path_request(ctrl_info, &request,
+ SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
+ SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
+ if (rc)
+ goto out;
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+ 0, NULL, NO_TIMEOUT);
+
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+ pci_direction);
+
+out:
+ kfree(buffer);
+
+ return rc;
+}
+
+static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
+ void *buffer, size_t buffer_length)
+{
+ int rc;
+ struct pqi_raid_path_request request;
+ int pci_direction;
+
+ rc = pqi_build_raid_path_request(ctrl_info, &request,
+ BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
+ buffer_length, 0, &pci_direction);
+ if (rc)
+ return rc;
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+ 0, NULL, NO_TIMEOUT);
+
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+ pci_direction);
+
+ return rc;
+}
+
+#pragma pack(1)
+
+struct bmic_host_wellness_driver_version {
+ u8 start_tag[4];
+ u8 driver_version_tag[2];
+ __le16 driver_version_length;
+ char driver_version[32];
+ u8 end_tag[2];
+};
+
+#pragma pack()
+
+static int pqi_write_driver_version_to_host_wellness(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct bmic_host_wellness_driver_version *buffer;
+ size_t buffer_length;
+
+ buffer_length = sizeof(*buffer);
+
+ buffer = kmalloc(buffer_length, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ buffer->start_tag[0] = '<';
+ buffer->start_tag[1] = 'H';
+ buffer->start_tag[2] = 'W';
+ buffer->start_tag[3] = '>';
+ buffer->driver_version_tag[0] = 'D';
+ buffer->driver_version_tag[1] = 'V';
+ put_unaligned_le16(sizeof(buffer->driver_version),
+ &buffer->driver_version_length);
+ strncpy(buffer->driver_version, DRIVER_VERSION,
+ sizeof(buffer->driver_version) - 1);
+ buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
+ buffer->end_tag[0] = 'Z';
+ buffer->end_tag[1] = 'Z';
+
+ rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
+
+ kfree(buffer);
+
+ return rc;
+}
+
+#pragma pack(1)
+
+struct bmic_host_wellness_time {
+ u8 start_tag[4];
+ u8 time_tag[2];
+ __le16 time_length;
+ u8 time[8];
+ u8 dont_write_tag[2];
+ u8 end_tag[2];
+};
+
+#pragma pack()
+
+static int pqi_write_current_time_to_host_wellness(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct bmic_host_wellness_time *buffer;
+ size_t buffer_length;
+ time64_t local_time;
+ unsigned int year;
+ struct timeval time;
+ struct rtc_time tm;
+
+ buffer_length = sizeof(*buffer);
+
+ buffer = kmalloc(buffer_length, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ buffer->start_tag[0] = '<';
+ buffer->start_tag[1] = 'H';
+ buffer->start_tag[2] = 'W';
+ buffer->start_tag[3] = '>';
+ buffer->time_tag[0] = 'T';
+ buffer->time_tag[1] = 'D';
+ put_unaligned_le16(sizeof(buffer->time),
+ &buffer->time_length);
+
+ do_gettimeofday(&time);
+ local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
+ rtc_time64_to_tm(local_time, &tm);
+ year = tm.tm_year + 1900;
+
+ buffer->time[0] = bin2bcd(tm.tm_hour);
+ buffer->time[1] = bin2bcd(tm.tm_min);
+ buffer->time[2] = bin2bcd(tm.tm_sec);
+ buffer->time[3] = 0;
+ buffer->time[4] = bin2bcd(tm.tm_mon + 1);
+ buffer->time[5] = bin2bcd(tm.tm_mday);
+ buffer->time[6] = bin2bcd(year / 100);
+ buffer->time[7] = bin2bcd(year % 100);
+
+ buffer->dont_write_tag[0] = 'D';
+ buffer->dont_write_tag[1] = 'W';
+ buffer->end_tag[0] = 'Z';
+ buffer->end_tag[1] = 'Z';
+
+ rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
+
+ kfree(buffer);
+
+ return rc;
+}
+
+#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
+
+static void pqi_update_time_worker(struct work_struct *work)
+{
+ int rc;
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
+ update_time_work);
+
+ rc = pqi_write_current_time_to_host_wellness(ctrl_info);
+ if (rc)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "error updating time on controller\n");
+
+ schedule_delayed_work(&ctrl_info->update_time_work,
+ PQI_UPDATE_TIME_WORK_INTERVAL);
+}
+
+static inline void pqi_schedule_update_time_worker(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ schedule_delayed_work(&ctrl_info->update_time_work, 0);
+}
+
+static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+ void *buffer, size_t buffer_length)
+{
+ int rc;
+ int pci_direction;
+ struct pqi_raid_path_request request;
+
+ rc = pqi_build_raid_path_request(ctrl_info, &request,
+ cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
+ if (rc)
+ return rc;
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+ NULL, NO_TIMEOUT);
+
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+ pci_direction);
+
+ return rc;
+}
+
+static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+ void **buffer)
+{
+ int rc;
+ size_t lun_list_length;
+ size_t lun_data_length;
+ size_t new_lun_list_length;
+ void *lun_data = NULL;
+ struct report_lun_header *report_lun_header;
+
+ report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
+ if (!report_lun_header) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
+ sizeof(*report_lun_header));
+ if (rc)
+ goto out;
+
+ lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
+
+again:
+ lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
+
+ lun_data = kmalloc(lun_data_length, GFP_KERNEL);
+ if (!lun_data) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (lun_list_length == 0) {
+ memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
+ goto out;
+ }
+
+ rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
+ if (rc)
+ goto out;
+
+ new_lun_list_length = get_unaligned_be32(
+ &((struct report_lun_header *)lun_data)->list_length);
+
+ if (new_lun_list_length > lun_list_length) {
+ lun_list_length = new_lun_list_length;
+ kfree(lun_data);
+ goto again;
+ }
+
+out:
+ kfree(report_lun_header);
+
+ if (rc) {
+ kfree(lun_data);
+ lun_data = NULL;
+ }
+
+ *buffer = lun_data;
+
+ return rc;
+}
+
+static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
+ void **buffer)
+{
+ return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
+ buffer);
+}
+
+static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
+ void **buffer)
+{
+ return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
+}
+
+static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
+ struct report_phys_lun_extended **physdev_list,
+ struct report_log_lun_extended **logdev_list)
+{
+ int rc;
+ size_t logdev_list_length;
+ size_t logdev_data_length;
+ struct report_log_lun_extended *internal_logdev_list;
+ struct report_log_lun_extended *logdev_data;
+ struct report_lun_header report_lun_header;
+
+ rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "report physical LUNs failed\n");
+
+ rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "report logical LUNs failed\n");
+
+ /*
+ * Tack the controller itself onto the end of the logical device list.
+ */
+
+ logdev_data = *logdev_list;
+
+ if (logdev_data) {
+ logdev_list_length =
+ get_unaligned_be32(&logdev_data->header.list_length);
+ } else {
+ memset(&report_lun_header, 0, sizeof(report_lun_header));
+ logdev_data =
+ (struct report_log_lun_extended *)&report_lun_header;
+ logdev_list_length = 0;
+ }
+
+ logdev_data_length = sizeof(struct report_lun_header) +
+ logdev_list_length;
+
+ internal_logdev_list = kmalloc(logdev_data_length +
+ sizeof(struct report_log_lun_extended), GFP_KERNEL);
+ if (!internal_logdev_list) {
+ kfree(*logdev_list);
+ *logdev_list = NULL;
+ return -ENOMEM;
+ }
+
+ memcpy(internal_logdev_list, logdev_data, logdev_data_length);
+ memset((u8 *)internal_logdev_list + logdev_data_length, 0,
+ sizeof(struct report_log_lun_extended_entry));
+ put_unaligned_be32(logdev_list_length +
+ sizeof(struct report_log_lun_extended_entry),
+ &internal_logdev_list->header.list_length);
+
+ kfree(*logdev_list);
+ *logdev_list = internal_logdev_list;
+
+ return 0;
+}
+
+static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
+ int bus, int target, int lun)
+{
+ device->bus = bus;
+ device->target = target;
+ device->lun = lun;
+}
+
+static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
+{
+ u8 *scsi3addr;
+ u32 lunid;
+
+ scsi3addr = device->scsi3addr;
+ lunid = get_unaligned_le32(scsi3addr);
+
+ if (pqi_is_hba_lunid(scsi3addr)) {
+ /* The specified device is the controller. */
+ pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
+ device->target_lun_valid = true;
+ return;
+ }
+
+ if (pqi_is_logical_device(device)) {
+ pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
+ lunid & 0x3fff);
+ device->target_lun_valid = true;
+ return;
+ }
+
+ /*
+ * Defer target and LUN assignment for non-controller physical devices
+ * because the SAS transport layer will make these assignments later.
+ */
+ pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
+}
+
+static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ int rc;
+ u8 raid_level;
+ u8 *buffer;
+
+ raid_level = SA_RAID_UNKNOWN;
+
+ buffer = kmalloc(64, GFP_KERNEL);
+ if (buffer) {
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
+ VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
+ if (rc == 0) {
+ raid_level = buffer[8];
+ if (raid_level > SA_RAID_MAX)
+ raid_level = SA_RAID_UNKNOWN;
+ }
+ kfree(buffer);
+ }
+
+ device->raid_level = raid_level;
+}
+
+static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct raid_map *raid_map)
+{
+ char *err_msg;
+ u32 raid_map_size;
+ u32 r5or6_blocks_per_row;
+ unsigned int num_phys_disks;
+ unsigned int num_raid_map_entries;
+
+ raid_map_size = get_unaligned_le32(&raid_map->structure_size);
+
+ if (raid_map_size < offsetof(struct raid_map, disk_data)) {
+ err_msg = "RAID map too small";
+ goto bad_raid_map;
+ }
+
+ if (raid_map_size > sizeof(*raid_map)) {
+ err_msg = "RAID map too large";
+ goto bad_raid_map;
+ }
+
+ num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
+ (get_unaligned_le16(&raid_map->data_disks_per_row) +
+ get_unaligned_le16(&raid_map->metadata_disks_per_row));
+ num_raid_map_entries = num_phys_disks *
+ get_unaligned_le16(&raid_map->row_cnt);
+
+ if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
+ err_msg = "invalid number of map entries in RAID map";
+ goto bad_raid_map;
+ }
+
+ if (device->raid_level == SA_RAID_1) {
+ if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
+ err_msg = "invalid RAID-1 map";
+ goto bad_raid_map;
+ }
+ } else if (device->raid_level == SA_RAID_ADM) {
+ if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
+ err_msg = "invalid RAID-1(ADM) map";
+ goto bad_raid_map;
+ }
+ } else if ((device->raid_level == SA_RAID_5 ||
+ device->raid_level == SA_RAID_6) &&
+ get_unaligned_le16(&raid_map->layout_map_count) > 1) {
+ /* RAID 50/60 */
+ r5or6_blocks_per_row =
+ get_unaligned_le16(&raid_map->strip_size) *
+ get_unaligned_le16(&raid_map->data_disks_per_row);
+ if (r5or6_blocks_per_row == 0) {
+ err_msg = "invalid RAID-5 or RAID-6 map";
+ goto bad_raid_map;
+ }
+ }
+
+ return 0;
+
+bad_raid_map:
+ dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
+
+ return -EINVAL;
+}
+
+static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ int rc;
+ int pci_direction;
+ struct pqi_raid_path_request request;
+ struct raid_map *raid_map;
+
+ raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
+ if (!raid_map)
+ return -ENOMEM;
+
+ rc = pqi_build_raid_path_request(ctrl_info, &request,
+ CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
+ sizeof(*raid_map), 0, &pci_direction);
+ if (rc)
+ goto error;
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+ NULL, NO_TIMEOUT);
+
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+ pci_direction);
+
+ if (rc)
+ goto error;
+
+ rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
+ if (rc)
+ goto error;
+
+ device->raid_map = raid_map;
+
+ return 0;
+
+error:
+ kfree(raid_map);
+
+ return rc;
+}
+
+static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ int rc;
+ u8 *buffer;
+ u8 offload_status;
+
+ buffer = kmalloc(64, GFP_KERNEL);
+ if (!buffer)
+ return;
+
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
+ VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
+ if (rc)
+ goto out;
+
+#define OFFLOAD_STATUS_BYTE 4
+#define OFFLOAD_CONFIGURED_BIT 0x1
+#define OFFLOAD_ENABLED_BIT 0x2
+
+ offload_status = buffer[OFFLOAD_STATUS_BYTE];
+ device->offload_configured =
+ !!(offload_status & OFFLOAD_CONFIGURED_BIT);
+ if (device->offload_configured) {
+ device->offload_enabled_pending =
+ !!(offload_status & OFFLOAD_ENABLED_BIT);
+ if (pqi_get_raid_map(ctrl_info, device))
+ device->offload_enabled_pending = false;
+ }
+
+out:
+ kfree(buffer);
+}
+
+/*
+ * Use vendor-specific VPD to determine online/offline status of a volume.
+ */
+
+static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ int rc;
+ size_t page_length;
+ u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
+ bool volume_offline = true;
+ u32 volume_flags;
+ struct ciss_vpd_logical_volume_status *vpd;
+
+ vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
+ if (!vpd)
+ goto no_buffer;
+
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
+ VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
+ if (rc)
+ goto out;
+
+ page_length = offsetof(struct ciss_vpd_logical_volume_status,
+ volume_status) + vpd->page_length;
+ if (page_length < sizeof(*vpd))
+ goto out;
+
+ volume_status = vpd->volume_status;
+ volume_flags = get_unaligned_be32(&vpd->flags);
+ volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
+
+out:
+ kfree(vpd);
+no_buffer:
+ device->volume_status = volume_status;
+ device->volume_offline = volume_offline;
+}
+
+static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ int rc;
+ u8 *buffer;
+
+ buffer = kmalloc(64, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Send an inquiry to the device to see what it is. */
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
+ if (rc)
+ goto out;
+
+ scsi_sanitize_inquiry_string(&buffer[8], 8);
+ scsi_sanitize_inquiry_string(&buffer[16], 16);
+
+ device->devtype = buffer[0] & 0x1f;
+ memcpy(device->vendor, &buffer[8],
+ sizeof(device->vendor));
+ memcpy(device->model, &buffer[16],
+ sizeof(device->model));
+
+ if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
+ pqi_get_raid_level(ctrl_info, device);
+ pqi_get_offload_status(ctrl_info, device);
+ pqi_get_volume_status(ctrl_info, device);
+ }
+
+out:
+ kfree(buffer);
+
+ return rc;
+}
+
+static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device,
+ struct bmic_identify_physical_device *id_phys)
+{
+ int rc;
+
+ memset(id_phys, 0, sizeof(*id_phys));
+
+ rc = pqi_identify_physical_device(ctrl_info, device,
+ id_phys, sizeof(*id_phys));
+ if (rc) {
+ device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
+ return;
+ }
+
+ device->queue_depth =
+ get_unaligned_le16(&id_phys->current_queue_depth_limit);
+ device->device_type = id_phys->device_type;
+ device->active_path_index = id_phys->active_path_number;
+ device->path_map = id_phys->redundant_path_present_map;
+ memcpy(&device->box,
+ &id_phys->alternate_paths_phys_box_on_port,
+ sizeof(device->box));
+ memcpy(&device->phys_connector,
+ &id_phys->alternate_paths_phys_connector,
+ sizeof(device->phys_connector));
+ device->bay = id_phys->phys_bay_in_box;
+}
+
+static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ char *status;
+ static const char unknown_state_str[] =
+ "Volume is in an unknown state (%u)";
+ char unknown_state_buffer[sizeof(unknown_state_str) + 10];
+
+ switch (device->volume_status) {
+ case CISS_LV_OK:
+ status = "Volume online";
+ break;
+ case CISS_LV_FAILED:
+ status = "Volume failed";
+ break;
+ case CISS_LV_NOT_CONFIGURED:
+ status = "Volume not configured";
+ break;
+ case CISS_LV_DEGRADED:
+ status = "Volume degraded";
+ break;
+ case CISS_LV_READY_FOR_RECOVERY:
+ status = "Volume ready for recovery operation";
+ break;
+ case CISS_LV_UNDERGOING_RECOVERY:
+ status = "Volume undergoing recovery";
+ break;
+ case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
+ status = "Wrong physical drive was replaced";
+ break;
+ case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
+ status = "A physical drive not properly connected";
+ break;
+ case CISS_LV_HARDWARE_OVERHEATING:
+ status = "Hardware is overheating";
+ break;
+ case CISS_LV_HARDWARE_HAS_OVERHEATED:
+ status = "Hardware has overheated";
+ break;
+ case CISS_LV_UNDERGOING_EXPANSION:
+ status = "Volume undergoing expansion";
+ break;
+ case CISS_LV_NOT_AVAILABLE:
+ status = "Volume waiting for transforming volume";
+ break;
+ case CISS_LV_QUEUED_FOR_EXPANSION:
+ status = "Volume queued for expansion";
+ break;
+ case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
+ status = "Volume disabled due to SCSI ID conflict";
+ break;
+ case CISS_LV_EJECTED:
+ status = "Volume has been ejected";
+ break;
+ case CISS_LV_UNDERGOING_ERASE:
+ status = "Volume undergoing background erase";
+ break;
+ case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
+ status = "Volume ready for predictive spare rebuild";
+ break;
+ case CISS_LV_UNDERGOING_RPI:
+ status = "Volume undergoing rapid parity initialization";
+ break;
+ case CISS_LV_PENDING_RPI:
+ status = "Volume queued for rapid parity initialization";
+ break;
+ case CISS_LV_ENCRYPTED_NO_KEY:
+ status = "Encrypted volume inaccessible - key not present";
+ break;
+ case CISS_LV_UNDERGOING_ENCRYPTION:
+ status = "Volume undergoing encryption process";
+ break;
+ case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
+ status = "Volume undergoing encryption re-keying process";
+ break;
+ case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+ status =
+ "Encrypted volume inaccessible - disabled on ctrl";
+ break;
+ case CISS_LV_PENDING_ENCRYPTION:
+ status = "Volume pending migration to encrypted state";
+ break;
+ case CISS_LV_PENDING_ENCRYPTION_REKEYING:
+ status = "Volume pending encryption rekeying";
+ break;
+ case CISS_LV_NOT_SUPPORTED:
+ status = "Volume not supported on this controller";
+ break;
+ case CISS_LV_STATUS_UNAVAILABLE:
+ status = "Volume status not available";
+ break;
+ default:
+ snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
+ unknown_state_str, device->volume_status);
+ status = unknown_state_buffer;
+ break;
+ }
+
+ dev_info(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d %s\n",
+ ctrl_info->scsi_host->host_no,
+ device->bus, device->target, device->lun, status);
+}
+
+static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
+ struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
+{
+ struct pqi_scsi_dev *device;
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
+ continue;
+ if (pqi_is_logical_device(device))
+ continue;
+ if (device->aio_handle == aio_handle)
+ return device;
+ }
+
+ return NULL;
+}
+
+static void pqi_update_logical_drive_queue_depth(
+ struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
+{
+ unsigned int i;
+ struct raid_map *raid_map;
+ struct raid_map_disk_data *disk_data;
+ struct pqi_scsi_dev *phys_disk;
+ unsigned int num_phys_disks;
+ unsigned int num_raid_map_entries;
+ unsigned int queue_depth;
+
+ logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
+
+ raid_map = logical_drive->raid_map;
+ if (!raid_map)
+ return;
+
+ disk_data = raid_map->disk_data;
+ num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
+ (get_unaligned_le16(&raid_map->data_disks_per_row) +
+ get_unaligned_le16(&raid_map->metadata_disks_per_row));
+ num_raid_map_entries = num_phys_disks *
+ get_unaligned_le16(&raid_map->row_cnt);
+
+ queue_depth = 0;
+ for (i = 0; i < num_raid_map_entries; i++) {
+ phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
+ disk_data[i].aio_handle);
+
+ if (!phys_disk) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "failed to find physical disk for logical drive %016llx\n",
+ get_unaligned_be64(logical_drive->scsi3addr));
+ logical_drive->offload_enabled = false;
+ logical_drive->offload_enabled_pending = false;
+ kfree(raid_map);
+ logical_drive->raid_map = NULL;
+ return;
+ }
+
+ queue_depth += phys_disk->queue_depth;
+ }
+
+ logical_drive->queue_depth = queue_depth;
+}
+
+static void pqi_update_all_logical_drive_queue_depths(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ struct pqi_scsi_dev *device;
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
+ continue;
+ if (!pqi_is_logical_device(device))
+ continue;
+ pqi_update_logical_drive_queue_depth(ctrl_info, device);
+ }
+}
+
+static void pqi_rescan_worker(struct work_struct *work)
+{
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
+ rescan_work);
+
+ pqi_scan_scsi_devices(ctrl_info);
+}
+
+static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ int rc;
+
+ if (pqi_is_logical_device(device))
+ rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
+ device->target, device->lun);
+ else
+ rc = pqi_add_sas_device(ctrl_info->sas_host, device);
+
+ return rc;
+}
+
+static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ if (pqi_is_logical_device(device))
+ scsi_remove_device(device->sdev);
+ else
+ pqi_remove_sas_device(device);
+}
+
+/* Assumes the SCSI device list lock is held. */
+
+static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
+ int bus, int target, int lun)
+{
+ struct pqi_scsi_dev *device;
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry)
+ if (device->bus == bus && device->target == target &&
+ device->lun == lun)
+ return device;
+
+ return NULL;
+}
+
+static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
+ struct pqi_scsi_dev *dev2)
+{
+ if (dev1->is_physical_device != dev2->is_physical_device)
+ return false;
+
+ if (dev1->is_physical_device)
+ return dev1->wwid == dev2->wwid;
+
+ return memcmp(dev1->volume_id, dev2->volume_id,
+ sizeof(dev1->volume_id)) == 0;
+}
+
+enum pqi_find_result {
+ DEVICE_NOT_FOUND,
+ DEVICE_CHANGED,
+ DEVICE_SAME,
+};
+
+static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device_to_find,
+ struct pqi_scsi_dev **matching_device)
+{
+ struct pqi_scsi_dev *device;
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
+ device->scsi3addr)) {
+ *matching_device = device;
+ if (pqi_device_equal(device_to_find, device)) {
+ if (device_to_find->volume_offline)
+ return DEVICE_CHANGED;
+ return DEVICE_SAME;
+ }
+ return DEVICE_CHANGED;
+ }
+ }
+
+ return DEVICE_NOT_FOUND;
+}
+
+static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
+ char *action, struct pqi_scsi_dev *device)
+{
+ dev_info(&ctrl_info->pci_dev->dev,
+ "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
+ action,
+ ctrl_info->scsi_host->host_no,
+ device->bus,
+ device->target,
+ device->lun,
+ scsi_device_type(device->devtype),
+ device->vendor,
+ device->model,
+ pqi_raid_level_to_string(device->raid_level),
+ device->offload_configured ? '+' : '-',
+ device->offload_enabled_pending ? '+' : '-',
+ device->expose_device ? '+' : '-',
+ device->queue_depth);
+}
+
+/* Assumes the SCSI device list lock is held. */
+
+static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
+ struct pqi_scsi_dev *new_device)
+{
+ existing_device->devtype = new_device->devtype;
+ existing_device->device_type = new_device->device_type;
+ existing_device->bus = new_device->bus;
+ if (new_device->target_lun_valid) {
+ existing_device->target = new_device->target;
+ existing_device->lun = new_device->lun;
+ existing_device->target_lun_valid = true;
+ }
+
+ /* By definition, the scsi3addr and wwid fields are already the same. */
+
+ existing_device->is_physical_device = new_device->is_physical_device;
+ existing_device->expose_device = new_device->expose_device;
+ existing_device->no_uld_attach = new_device->no_uld_attach;
+ existing_device->aio_enabled = new_device->aio_enabled;
+ memcpy(existing_device->vendor, new_device->vendor,
+ sizeof(existing_device->vendor));
+ memcpy(existing_device->model, new_device->model,
+ sizeof(existing_device->model));
+ existing_device->sas_address = new_device->sas_address;
+ existing_device->raid_level = new_device->raid_level;
+ existing_device->queue_depth = new_device->queue_depth;
+ existing_device->aio_handle = new_device->aio_handle;
+ existing_device->volume_status = new_device->volume_status;
+ existing_device->active_path_index = new_device->active_path_index;
+ existing_device->path_map = new_device->path_map;
+ existing_device->bay = new_device->bay;
+ memcpy(existing_device->box, new_device->box,
+ sizeof(existing_device->box));
+ memcpy(existing_device->phys_connector, new_device->phys_connector,
+ sizeof(existing_device->phys_connector));
+ existing_device->offload_configured = new_device->offload_configured;
+ existing_device->offload_enabled = false;
+ existing_device->offload_enabled_pending =
+ new_device->offload_enabled_pending;
+ existing_device->offload_to_mirror = 0;
+ kfree(existing_device->raid_map);
+ existing_device->raid_map = new_device->raid_map;
+
+ /* To prevent this from being freed later. */
+ new_device->raid_map = NULL;
+}
+
+static inline void pqi_free_device(struct pqi_scsi_dev *device)
+{
+ if (device) {
+ kfree(device->raid_map);
+ kfree(device);
+ }
+}
+
+/*
+ * Called when exposing a new device to the OS fails in order to re-adjust
+ * our internal SCSI device list to match the SCSI ML's view.
+ */
+
+static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_del(&device->scsi_device_list_entry);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ /* Allow the device structure to be freed later. */
+ device->keep_device = false;
+}
+
+static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
+{
+ int rc;
+ unsigned int i;
+ unsigned long flags;
+ enum pqi_find_result find_result;
+ struct pqi_scsi_dev *device;
+ struct pqi_scsi_dev *next;
+ struct pqi_scsi_dev *matching_device;
+ struct list_head add_list;
+ struct list_head delete_list;
+
+ INIT_LIST_HEAD(&add_list);
+ INIT_LIST_HEAD(&delete_list);
+
+ /*
+ * The idea here is to do as little work as possible while holding the
+ * spinlock. That's why we go to great pains to defer anything other
+ * than updating the internal device list until after we release the
+ * spinlock.
+ */
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ /* Assume that all devices in the existing list have gone away. */
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry)
+ device->device_gone = true;
+
+ for (i = 0; i < num_new_devices; i++) {
+ device = new_device_list[i];
+
+ find_result = pqi_scsi_find_entry(ctrl_info, device,
+ &matching_device);
+
+ switch (find_result) {
+ case DEVICE_SAME:
+ /*
+ * The newly found device is already in the existing
+ * device list.
+ */
+ device->new_device = false;
+ matching_device->device_gone = false;
+ pqi_scsi_update_device(matching_device, device);
+ break;
+ case DEVICE_NOT_FOUND:
+ /*
+ * The newly found device is NOT in the existing device
+ * list.
+ */
+ device->new_device = true;
+ break;
+ case DEVICE_CHANGED:
+ /*
+ * The original device has gone away and we need to add
+ * the new device.
+ */
+ device->new_device = true;
+ break;
+ default:
+ WARN_ON(find_result);
+ break;
+ }
+ }
+
+ /* Process all devices that have gone away. */
+ list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (device->device_gone) {
+ list_del(&device->scsi_device_list_entry);
+ list_add_tail(&device->delete_list_entry, &delete_list);
+ }
+ }
+
+ /* Process all new devices. */
+ for (i = 0; i < num_new_devices; i++) {
+ device = new_device_list[i];
+ if (!device->new_device)
+ continue;
+ if (device->volume_offline)
+ continue;
+ list_add_tail(&device->scsi_device_list_entry,
+ &ctrl_info->scsi_device_list);
+ list_add_tail(&device->add_list_entry, &add_list);
+ /* To prevent this device structure from being freed later. */
+ device->keep_device = true;
+ }
+
+ pqi_update_all_logical_drive_queue_depths(ctrl_info);
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry)
+ device->offload_enabled =
+ device->offload_enabled_pending;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ /* Remove all devices that have gone away. */
+ list_for_each_entry_safe(device, next, &delete_list,
+ delete_list_entry) {
+ if (device->sdev)
+ pqi_remove_device(ctrl_info, device);
+ if (device->volume_offline) {
+ pqi_dev_info(ctrl_info, "offline", device);
+ pqi_show_volume_status(ctrl_info, device);
+ } else {
+ pqi_dev_info(ctrl_info, "removed", device);
+ }
+ list_del(&device->delete_list_entry);
+ pqi_free_device(device);
+ }
+
+ /*
+ * Notify the SCSI ML if the queue depth of any existing device has
+ * changed.
+ */
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (device->sdev && device->queue_depth !=
+ device->advertised_queue_depth) {
+ device->advertised_queue_depth = device->queue_depth;
+ scsi_change_queue_depth(device->sdev,
+ device->advertised_queue_depth);
+ }
+ }
+
+ /* Expose any new devices. */
+ list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
+ if (device->expose_device && !device->sdev) {
+ rc = pqi_add_device(ctrl_info, device);
+ if (rc) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d addition failed, device not added\n",
+ ctrl_info->scsi_host->host_no,
+ device->bus, device->target,
+ device->lun);
+ pqi_fixup_botched_add(ctrl_info, device);
+ continue;
+ }
+ }
+ pqi_dev_info(ctrl_info, "added", device);
+ }
+}
+
+static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
+{
+ bool is_supported = false;
+
+ switch (device->devtype) {
+ case TYPE_DISK:
+ case TYPE_ZBC:
+ case TYPE_TAPE:
+ case TYPE_MEDIUM_CHANGER:
+ case TYPE_ENCLOSURE:
+ is_supported = true;
+ break;
+ case TYPE_RAID:
+ /*
+ * Only support the HBA controller itself as a RAID
+ * controller. If it's a RAID controller other than
+ * the HBA itself (an external RAID controller, MSA500
+ * or similar), we don't support it.
+ */
+ if (pqi_is_hba_lunid(device->scsi3addr))
+ is_supported = true;
+ break;
+ }
+
+ return is_supported;
+}
+
+static inline bool pqi_skip_device(u8 *scsi3addr,
+ struct report_phys_lun_extended_entry *phys_lun_ext_entry)
+{
+ u8 device_flags;
+
+ if (!MASKED_DEVICE(scsi3addr))
+ return false;
+
+ /* The device is masked. */
+
+ device_flags = phys_lun_ext_entry->device_flags;
+
+ if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
+ /*
+ * It's a non-disk device. We ignore all devices of this type
+ * when they're masked.
+ */
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
+{
+ /* Expose all devices except for physical devices that are masked. */
+ if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
+ return false;
+
+ return true;
+}
+
+static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+{
+ int i;
+ int rc;
+ struct list_head new_device_list_head;
+ struct report_phys_lun_extended *physdev_list = NULL;
+ struct report_log_lun_extended *logdev_list = NULL;
+ struct report_phys_lun_extended_entry *phys_lun_ext_entry;
+ struct report_log_lun_extended_entry *log_lun_ext_entry;
+ struct bmic_identify_physical_device *id_phys = NULL;
+ u32 num_physicals;
+ u32 num_logicals;
+ struct pqi_scsi_dev **new_device_list = NULL;
+ struct pqi_scsi_dev *device;
+ struct pqi_scsi_dev *next;
+ unsigned int num_new_devices;
+ unsigned int num_valid_devices;
+ bool is_physical_device;
+ u8 *scsi3addr;
+ static char *out_of_memory_msg =
+ "out of memory, device discovery stopped";
+
+ INIT_LIST_HEAD(&new_device_list_head);
+
+ rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
+ if (rc)
+ goto out;
+
+ if (physdev_list)
+ num_physicals =
+ get_unaligned_be32(&physdev_list->header.list_length)
+ / sizeof(physdev_list->lun_entries[0]);
+ else
+ num_physicals = 0;
+
+ if (logdev_list)
+ num_logicals =
+ get_unaligned_be32(&logdev_list->header.list_length)
+ / sizeof(logdev_list->lun_entries[0]);
+ else
+ num_logicals = 0;
+
+ if (num_physicals) {
+ /*
+ * We need this buffer for calls to pqi_get_physical_disk_info()
+ * below. We allocate it here instead of inside
+ * pqi_get_physical_disk_info() because it's a fairly large
+ * buffer.
+ */
+ id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
+ if (!id_phys) {
+ dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
+ out_of_memory_msg);
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ num_new_devices = num_physicals + num_logicals;
+
+ new_device_list = kmalloc(sizeof(*new_device_list) *
+ num_new_devices, GFP_KERNEL);
+ if (!new_device_list) {
+ dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_new_devices; i++) {
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
+ out_of_memory_msg);
+ rc = -ENOMEM;
+ goto out;
+ }
+ list_add_tail(&device->new_device_list_entry,
+ &new_device_list_head);
+ }
+
+ device = NULL;
+ num_valid_devices = 0;
+
+ for (i = 0; i < num_new_devices; i++) {
+
+ if (i < num_physicals) {
+ is_physical_device = true;
+ phys_lun_ext_entry = &physdev_list->lun_entries[i];
+ log_lun_ext_entry = NULL;
+ scsi3addr = phys_lun_ext_entry->lunid;
+ } else {
+ is_physical_device = false;
+ phys_lun_ext_entry = NULL;
+ log_lun_ext_entry =
+ &logdev_list->lun_entries[i - num_physicals];
+ scsi3addr = log_lun_ext_entry->lunid;
+ }
+
+ if (is_physical_device &&
+ pqi_skip_device(scsi3addr, phys_lun_ext_entry))
+ continue;
+
+ if (device)
+ device = list_next_entry(device, new_device_list_entry);
+ else
+ device = list_first_entry(&new_device_list_head,
+ struct pqi_scsi_dev, new_device_list_entry);
+
+ memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
+ device->is_physical_device = is_physical_device;
+ device->raid_level = SA_RAID_UNKNOWN;
+
+ /* Gather information about the device. */
+ rc = pqi_get_device_info(ctrl_info, device);
+ if (rc == -ENOMEM) {
+ dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
+ out_of_memory_msg);
+ goto out;
+ }
+ if (rc) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "obtaining device info failed, skipping device %016llx\n",
+ get_unaligned_be64(device->scsi3addr));
+ rc = 0;
+ continue;
+ }
+
+ if (!pqi_is_supported_device(device))
+ continue;
+
+ pqi_assign_bus_target_lun(device);
+
+ device->expose_device = pqi_expose_device(device);
+
+ if (device->is_physical_device) {
+ device->wwid = phys_lun_ext_entry->wwid;
+ if ((phys_lun_ext_entry->device_flags &
+ REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
+ phys_lun_ext_entry->aio_handle)
+ device->aio_enabled = true;
+ } else {
+ memcpy(device->volume_id, log_lun_ext_entry->volume_id,
+ sizeof(device->volume_id));
+ }
+
+ switch (device->devtype) {
+ case TYPE_DISK:
+ case TYPE_ZBC:
+ case TYPE_ENCLOSURE:
+ if (device->is_physical_device) {
+ device->sas_address =
+ get_unaligned_be64(&device->wwid);
+ if (device->devtype == TYPE_DISK ||
+ device->devtype == TYPE_ZBC) {
+ device->aio_handle =
+ phys_lun_ext_entry->aio_handle;
+ pqi_get_physical_disk_info(ctrl_info,
+ device, id_phys);
+ }
+ }
+ break;
+ }
+
+ new_device_list[num_valid_devices++] = device;
+ }
+
+ pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
+
+out:
+ list_for_each_entry_safe(device, next, &new_device_list_head,
+ new_device_list_entry) {
+ if (device->keep_device)
+ continue;
+ list_del(&device->new_device_list_entry);
+ pqi_free_device(device);
+ }
+
+ kfree(new_device_list);
+ kfree(physdev_list);
+ kfree(logdev_list);
+ kfree(id_phys);
+
+ return rc;
+}
+
+static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+ struct pqi_scsi_dev *next;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (device->sdev)
+ pqi_remove_device(ctrl_info, device);
+ list_del(&device->scsi_device_list_entry);
+ pqi_free_device(device);
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
+static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+
+ mutex_lock(&ctrl_info->scan_mutex);
+
+ rc = pqi_update_scsi_devices(ctrl_info);
+ if (rc)
+ pqi_schedule_rescan_worker(ctrl_info);
+
+ mutex_unlock(&ctrl_info->scan_mutex);
+
+ return rc;
+}
+
+static void pqi_scan_start(struct Scsi_Host *shost)
+{
+ pqi_scan_scsi_devices(shost_to_hba(shost));
+}
+
+/* Returns TRUE if scan is finished. */
+
+static int pqi_scan_finished(struct Scsi_Host *shost,
+ unsigned long elapsed_time)
+{
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = shost_priv(shost);
+
+ return !mutex_is_locked(&ctrl_info->scan_mutex);
+}
+
+static inline void pqi_set_encryption_info(
+ struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
+ u64 first_block)
+{
+ u32 volume_blk_size;
+
+ /*
+ * Set the encryption tweak values based on logical block address.
+ * If the block size is 512, the tweak value is equal to the LBA.
+ * For other block sizes, tweak value is (LBA * block size) / 512.
+ */
+ volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
+ if (volume_blk_size != 512)
+ first_block = (first_block * volume_blk_size) / 512;
+
+ encryption_info->data_encryption_key_index =
+ get_unaligned_le16(&raid_map->data_encryption_key_index);
+ encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
+ encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
+}
+
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+
+#define PQI_RAID_BYPASS_INELIGIBLE 1
+
+static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+ struct pqi_queue_group *queue_group)
+{
+ struct raid_map *raid_map;
+ bool is_write = false;
+ u32 map_index;
+ u64 first_block;
+ u64 last_block;
+ u32 block_cnt;
+ u32 blocks_per_row;
+ u64 first_row;
+ u64 last_row;
+ u32 first_row_offset;
+ u32 last_row_offset;
+ u32 first_column;
+ u32 last_column;
+ u64 r0_first_row;
+ u64 r0_last_row;
+ u32 r5or6_blocks_per_row;
+ u64 r5or6_first_row;
+ u64 r5or6_last_row;
+ u32 r5or6_first_row_offset;
+ u32 r5or6_last_row_offset;
+ u32 r5or6_first_column;
+ u32 r5or6_last_column;
+ u16 data_disks_per_row;
+ u32 total_disks_per_row;
+ u16 layout_map_count;
+ u32 stripesize;
+ u16 strip_size;
+ u32 first_group;
+ u32 last_group;
+ u32 current_group;
+ u32 map_row;
+ u32 aio_handle;
+ u64 disk_block;
+ u32 disk_block_cnt;
+ u8 cdb[16];
+ u8 cdb_length;
+ int offload_to_mirror;
+ struct pqi_encryption_info *encryption_info_ptr;
+ struct pqi_encryption_info encryption_info;
+#if BITS_PER_LONG == 32
+ u64 tmpdiv;
+#endif
+
+ /* Check for valid opcode, get LBA and block count. */
+ switch (scmd->cmnd[0]) {
+ case WRITE_6:
+ is_write = true;
+ /* fall through */
+ case READ_6:
+ first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
+ (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
+ block_cnt = (u32)scmd->cmnd[4];
+ if (block_cnt == 0)
+ block_cnt = 256;
+ break;
+ case WRITE_10:
+ is_write = true;
+ /* fall through */
+ case READ_10:
+ first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+ block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ break;
+ case WRITE_12:
+ is_write = true;
+ /* fall through */
+ case READ_12:
+ first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+ block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+ break;
+ case WRITE_16:
+ is_write = true;
+ /* fall through */
+ case READ_16:
+ first_block = get_unaligned_be64(&scmd->cmnd[2]);
+ block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
+ break;
+ default:
+ /* Process via normal I/O path. */
+ return PQI_RAID_BYPASS_INELIGIBLE;
+ }
+
+ /* Check for write to non-RAID-0. */
+ if (is_write && device->raid_level != SA_RAID_0)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ if (unlikely(block_cnt == 0))
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ last_block = first_block + block_cnt - 1;
+ raid_map = device->raid_map;
+
+ /* Check for invalid block or wraparound. */
+ if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
+ last_block < first_block)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
+ strip_size = get_unaligned_le16(&raid_map->strip_size);
+ layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
+
+ /* Calculate stripe information for the request. */
+ blocks_per_row = data_disks_per_row * strip_size;
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ do_div(tmpdiv, blocks_per_row);
+ first_row = tmpdiv;
+ tmpdiv = last_block;
+ do_div(tmpdiv, blocks_per_row);
+ last_row = tmpdiv;
+ first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
+ last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
+ tmpdiv = first_row_offset;
+ do_div(tmpdiv, strip_size);
+ first_column = tmpdiv;
+ tmpdiv = last_row_offset;
+ do_div(tmpdiv, strip_size);
+ last_column = tmpdiv;
+#else
+ first_row = first_block / blocks_per_row;
+ last_row = last_block / blocks_per_row;
+ first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
+ last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
+ first_column = first_row_offset / strip_size;
+ last_column = last_row_offset / strip_size;
+#endif
+
+ /* If this isn't a single row/column then give to the controller. */
+ if (first_row != last_row || first_column != last_column)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ /* Proceeding with driver mapping. */
+ total_disks_per_row = data_disks_per_row +
+ get_unaligned_le16(&raid_map->metadata_disks_per_row);
+ map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
+ get_unaligned_le16(&raid_map->row_cnt);
+ map_index = (map_row * total_disks_per_row) + first_column;
+
+ /* RAID 1 */
+ if (device->raid_level == SA_RAID_1) {
+ if (device->offload_to_mirror)
+ map_index += data_disks_per_row;
+ device->offload_to_mirror = !device->offload_to_mirror;
+ } else if (device->raid_level == SA_RAID_ADM) {
+ /* RAID ADM */
+ /*
+ * Handles N-way mirrors (R1-ADM) and R10 with # of drives
+ * divisible by 3.
+ */
+ offload_to_mirror = device->offload_to_mirror;
+ if (offload_to_mirror == 0) {
+ /* use physical disk in the first mirrored group. */
+ map_index %= data_disks_per_row;
+ } else {
+ do {
+ /*
+ * Determine mirror group that map_index
+ * indicates.
+ */
+ current_group = map_index / data_disks_per_row;
+
+ if (offload_to_mirror != current_group) {
+ if (current_group <
+ layout_map_count - 1) {
+ /*
+ * Select raid index from
+ * next group.
+ */
+ map_index += data_disks_per_row;
+ current_group++;
+ } else {
+ /*
+ * Select raid index from first
+ * group.
+ */
+ map_index %= data_disks_per_row;
+ current_group = 0;
+ }
+ }
+ } while (offload_to_mirror != current_group);
+ }
+
+ /* Set mirror group to use next time. */
+ offload_to_mirror =
+ (offload_to_mirror >= layout_map_count - 1) ?
+ 0 : offload_to_mirror + 1;
+ WARN_ON(offload_to_mirror >= layout_map_count);
+ device->offload_to_mirror = offload_to_mirror;
+ /*
+ * Avoid direct use of device->offload_to_mirror within this
+ * function since multiple threads might simultaneously
+ * increment it beyond the range of device->layout_map_count -1.
+ */
+ } else if ((device->raid_level == SA_RAID_5 ||
+ device->raid_level == SA_RAID_6) && layout_map_count > 1) {
+ /* RAID 50/60 */
+ /* Verify first and last block are in same RAID group */
+ r5or6_blocks_per_row = strip_size * data_disks_per_row;
+ stripesize = r5or6_blocks_per_row * layout_map_count;
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ first_group = do_div(tmpdiv, stripesize);
+ tmpdiv = first_group;
+ do_div(tmpdiv, r5or6_blocks_per_row);
+ first_group = tmpdiv;
+ tmpdiv = last_block;
+ last_group = do_div(tmpdiv, stripesize);
+ tmpdiv = last_group;
+ do_div(tmpdiv, r5or6_blocks_per_row);
+ last_group = tmpdiv;
+#else
+ first_group = (first_block % stripesize) / r5or6_blocks_per_row;
+ last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+#endif
+ if (first_group != last_group)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ /* Verify request is in a single row of RAID 5/6 */
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ do_div(tmpdiv, stripesize);
+ first_row = r5or6_first_row = r0_first_row = tmpdiv;
+ tmpdiv = last_block;
+ do_div(tmpdiv, stripesize);
+ r5or6_last_row = r0_last_row = tmpdiv;
+#else
+ first_row = r5or6_first_row = r0_first_row =
+ first_block / stripesize;
+ r5or6_last_row = r0_last_row = last_block / stripesize;
+#endif
+ if (r5or6_first_row != r5or6_last_row)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ /* Verify request is in a single column */
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ first_row_offset = do_div(tmpdiv, stripesize);
+ tmpdiv = first_row_offset;
+ first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
+ r5or6_first_row_offset = first_row_offset;
+ tmpdiv = last_block;
+ r5or6_last_row_offset = do_div(tmpdiv, stripesize);
+ tmpdiv = r5or6_last_row_offset;
+ r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
+ tmpdiv = r5or6_first_row_offset;
+ do_div(tmpdiv, strip_size);
+ first_column = r5or6_first_column = tmpdiv;
+ tmpdiv = r5or6_last_row_offset;
+ do_div(tmpdiv, strip_size);
+ r5or6_last_column = tmpdiv;
+#else
+ first_row_offset = r5or6_first_row_offset =
+ (u32)((first_block % stripesize) %
+ r5or6_blocks_per_row);
+
+ r5or6_last_row_offset =
+ (u32)((last_block % stripesize) %
+ r5or6_blocks_per_row);
+
+ first_column = r5or6_first_row_offset / strip_size;
+ r5or6_first_column = first_column;
+ r5or6_last_column = r5or6_last_row_offset / strip_size;
+#endif
+ if (r5or6_first_column != r5or6_last_column)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ /* Request is eligible */
+ map_row =
+ ((u32)(first_row >> raid_map->parity_rotation_shift)) %
+ get_unaligned_le16(&raid_map->row_cnt);
+
+ map_index = (first_group *
+ (get_unaligned_le16(&raid_map->row_cnt) *
+ total_disks_per_row)) +
+ (map_row * total_disks_per_row) + first_column;
+ }
+
+ if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ aio_handle = raid_map->disk_data[map_index].aio_handle;
+ disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
+ first_row * strip_size +
+ (first_row_offset - first_column * strip_size);
+ disk_block_cnt = block_cnt;
+
+ /* Handle differing logical/physical block sizes. */
+ if (raid_map->phys_blk_shift) {
+ disk_block <<= raid_map->phys_blk_shift;
+ disk_block_cnt <<= raid_map->phys_blk_shift;
+ }
+
+ if (unlikely(disk_block_cnt > 0xffff))
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ /* Build the new CDB for the physical disk I/O. */
+ if (disk_block > 0xffffffff) {
+ cdb[0] = is_write ? WRITE_16 : READ_16;
+ cdb[1] = 0;
+ put_unaligned_be64(disk_block, &cdb[2]);
+ put_unaligned_be32(disk_block_cnt, &cdb[10]);
+ cdb[14] = 0;
+ cdb[15] = 0;
+ cdb_length = 16;
+ } else {
+ cdb[0] = is_write ? WRITE_10 : READ_10;
+ cdb[1] = 0;
+ put_unaligned_be32((u32)disk_block, &cdb[2]);
+ cdb[6] = 0;
+ put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
+ cdb[9] = 0;
+ cdb_length = 10;
+ }
+
+ if (get_unaligned_le16(&raid_map->flags) &
+ RAID_MAP_ENCRYPTION_ENABLED) {
+ pqi_set_encryption_info(&encryption_info, raid_map,
+ first_block);
+ encryption_info_ptr = &encryption_info;
+ } else {
+ encryption_info_ptr = NULL;
+ }
+
+ return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
+ cdb, cdb_length, queue_group, encryption_info_ptr);
+}
+
+#define PQI_STATUS_IDLE 0x0
+
+#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
+#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
+
+#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
+#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
+#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
+#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
+#define PQI_DEVICE_STATE_ERROR 0x4
+
+#define PQI_MODE_READY_TIMEOUT_SECS 30
+#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
+
+static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
+{
+ struct pqi_device_registers __iomem *pqi_registers;
+ unsigned long timeout;
+ u64 signature;
+ u8 status;
+
+ pqi_registers = ctrl_info->pqi_registers;
+ timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
+
+ while (1) {
+ signature = readq(&pqi_registers->signature);
+ if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
+ sizeof(signature)) == 0)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "timed out waiting for PQI signature\n");
+ return -ETIMEDOUT;
+ }
+ msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
+ }
+
+ while (1) {
+ status = readb(&pqi_registers->function_and_status_code);
+ if (status == PQI_STATUS_IDLE)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "timed out waiting for PQI IDLE\n");
+ return -ETIMEDOUT;
+ }
+ msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
+ }
+
+ while (1) {
+ if (readl(&pqi_registers->device_status) ==
+ PQI_DEVICE_STATE_ALL_REGISTERS_READY)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "timed out waiting for PQI all registers ready\n");
+ return -ETIMEDOUT;
+ }
+ msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
+ }
+
+ return 0;
+}
+
+static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
+{
+ struct pqi_scsi_dev *device;
+
+ device = io_request->scmd->device->hostdata;
+ device->offload_enabled = false;
+}
+
+static inline void pqi_take_device_offline(struct scsi_device *sdev)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+
+ if (scsi_device_online(sdev)) {
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ ctrl_info = shost_to_hba(sdev->host);
+ schedule_delayed_work(&ctrl_info->rescan_work, 0);
+ device = sdev->hostdata;
+ dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
+ ctrl_info->scsi_host->host_no, device->bus,
+ device->target, device->lun);
+ }
+}
+
+static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
+{
+ u8 scsi_status;
+ u8 host_byte;
+ struct scsi_cmnd *scmd;
+ struct pqi_raid_error_info *error_info;
+ size_t sense_data_length;
+ int residual_count;
+ int xfer_count;
+ struct scsi_sense_hdr sshdr;
+
+ scmd = io_request->scmd;
+ if (!scmd)
+ return;
+
+ error_info = io_request->error_info;
+ scsi_status = error_info->status;
+ host_byte = DID_OK;
+
+ if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
+ xfer_count =
+ get_unaligned_le32(&error_info->data_out_transferred);
+ residual_count = scsi_bufflen(scmd) - xfer_count;
+ scsi_set_resid(scmd, residual_count);
+ if (xfer_count < scmd->underflow)
+ host_byte = DID_SOFT_ERROR;
+ }
+
+ sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
+ if (sense_data_length == 0)
+ sense_data_length =
+ get_unaligned_le16(&error_info->response_data_length);
+ if (sense_data_length) {
+ if (sense_data_length > sizeof(error_info->data))
+ sense_data_length = sizeof(error_info->data);
+
+ if (scsi_status == SAM_STAT_CHECK_CONDITION &&
+ scsi_normalize_sense(error_info->data,
+ sense_data_length, &sshdr) &&
+ sshdr.sense_key == HARDWARE_ERROR &&
+ sshdr.asc == 0x3e &&
+ sshdr.ascq == 0x1) {
+ pqi_take_device_offline(scmd->device);
+ host_byte = DID_NO_CONNECT;
+ }
+
+ if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
+ sense_data_length = SCSI_SENSE_BUFFERSIZE;
+ memcpy(scmd->sense_buffer, error_info->data,
+ sense_data_length);
+ }
+
+ scmd->result = scsi_status;
+ set_host_byte(scmd, host_byte);
+}
+
+static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
+{
+ u8 scsi_status;
+ u8 host_byte;
+ struct scsi_cmnd *scmd;
+ struct pqi_aio_error_info *error_info;
+ size_t sense_data_length;
+ int residual_count;
+ int xfer_count;
+ bool device_offline;
+
+ scmd = io_request->scmd;
+ error_info = io_request->error_info;
+ host_byte = DID_OK;
+ sense_data_length = 0;
+ device_offline = false;
+
+ switch (error_info->service_response) {
+ case PQI_AIO_SERV_RESPONSE_COMPLETE:
+ scsi_status = error_info->status;
+ break;
+ case PQI_AIO_SERV_RESPONSE_FAILURE:
+ switch (error_info->status) {
+ case PQI_AIO_STATUS_IO_ABORTED:
+ scsi_status = SAM_STAT_TASK_ABORTED;
+ break;
+ case PQI_AIO_STATUS_UNDERRUN:
+ scsi_status = SAM_STAT_GOOD;
+ residual_count = get_unaligned_le32(
+ &error_info->residual_count);
+ scsi_set_resid(scmd, residual_count);
+ xfer_count = scsi_bufflen(scmd) - residual_count;
+ if (xfer_count < scmd->underflow)
+ host_byte = DID_SOFT_ERROR;
+ break;
+ case PQI_AIO_STATUS_OVERRUN:
+ scsi_status = SAM_STAT_GOOD;
+ break;
+ case PQI_AIO_STATUS_AIO_PATH_DISABLED:
+ pqi_aio_path_disabled(io_request);
+ scsi_status = SAM_STAT_GOOD;
+ io_request->status = -EAGAIN;
+ break;
+ case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
+ case PQI_AIO_STATUS_INVALID_DEVICE:
+ device_offline = true;
+ pqi_take_device_offline(scmd->device);
+ host_byte = DID_NO_CONNECT;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ break;
+ case PQI_AIO_STATUS_IO_ERROR:
+ default:
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ break;
+ }
+ break;
+ case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
+ case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
+ scsi_status = SAM_STAT_GOOD;
+ break;
+ case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
+ case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
+ default:
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ break;
+ }
+
+ if (error_info->data_present) {
+ sense_data_length =
+ get_unaligned_le16(&error_info->data_length);
+ if (sense_data_length) {
+ if (sense_data_length > sizeof(error_info->data))
+ sense_data_length = sizeof(error_info->data);
+ if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
+ sense_data_length = SCSI_SENSE_BUFFERSIZE;
+ memcpy(scmd->sense_buffer, error_info->data,
+ sense_data_length);
+ }
+ }
+
+ if (device_offline && sense_data_length == 0)
+ scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
+ 0x3e, 0x1);
+
+ scmd->result = scsi_status;
+ set_host_byte(scmd, host_byte);
+}
+
+static void pqi_process_io_error(unsigned int iu_type,
+ struct pqi_io_request *io_request)
+{
+ switch (iu_type) {
+ case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
+ pqi_process_raid_io_error(io_request);
+ break;
+ case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
+ pqi_process_aio_io_error(io_request);
+ break;
+ }
+}
+
+static int pqi_interpret_task_management_response(
+ struct pqi_task_management_response *response)
+{
+ int rc;
+
+ switch (response->response_code) {
+ case SOP_TMF_COMPLETE:
+ case SOP_TMF_FUNCTION_SUCCEEDED:
+ rc = 0;
+ break;
+ default:
+ rc = -EIO;
+ break;
+ }
+
+ return rc;
+}
+
+static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_queue_group *queue_group)
+{
+ unsigned int num_responses;
+ pqi_index_t oq_pi;
+ pqi_index_t oq_ci;
+ struct pqi_io_request *io_request;
+ struct pqi_io_response *response;
+ u16 request_id;
+
+ num_responses = 0;
+ oq_ci = queue_group->oq_ci_copy;
+
+ while (1) {
+ oq_pi = *queue_group->oq_pi;
+ if (oq_pi == oq_ci)
+ break;
+
+ num_responses++;
+ response = queue_group->oq_element_array +
+ (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
+
+ request_id = get_unaligned_le16(&response->request_id);
+ WARN_ON(request_id >= ctrl_info->max_io_slots);
+
+ io_request = &ctrl_info->io_request_pool[request_id];
+ WARN_ON(atomic_read(&io_request->refcount) == 0);
+
+ switch (response->header.iu_type) {
+ case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
+ case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
+ case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
+ break;
+ case PQI_RESPONSE_IU_TASK_MANAGEMENT:
+ io_request->status =
+ pqi_interpret_task_management_response(
+ (void *)response);
+ break;
+ case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
+ pqi_aio_path_disabled(io_request);
+ io_request->status = -EAGAIN;
+ break;
+ case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
+ case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
+ io_request->error_info = ctrl_info->error_buffer +
+ (get_unaligned_le16(&response->error_index) *
+ PQI_ERROR_BUFFER_ELEMENT_LENGTH);
+ pqi_process_io_error(response->header.iu_type,
+ io_request);
+ break;
+ default:
+ dev_err(&ctrl_info->pci_dev->dev,
+ "unexpected IU type: 0x%x\n",
+ response->header.iu_type);
+ WARN_ON(response->header.iu_type);
+ break;
+ }
+
+ io_request->io_complete_callback(io_request,
+ io_request->context);
+
+ /*
+ * Note that the I/O request structure CANNOT BE TOUCHED after
+ * returning from the I/O completion callback!
+ */
+
+ oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
+ }
+
+ if (num_responses) {
+ queue_group->oq_ci_copy = oq_ci;
+ writel(oq_ci, queue_group->oq_ci);
+ }
+
+ return num_responses;
+}
+
+static inline unsigned int pqi_num_elements_free(unsigned int pi,
+ unsigned int ci, unsigned int elements_in_queue)
+{
+ unsigned int num_elements_used;
+
+ if (pi >= ci)
+ num_elements_used = pi - ci;
+ else
+ num_elements_used = elements_in_queue - ci + pi;
+
+ return elements_in_queue - num_elements_used - 1;
+}
+
+#define PQI_EVENT_ACK_TIMEOUT 30
+
+static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_event_acknowledge_request *iu, size_t iu_length)
+{
+ pqi_index_t iq_pi;
+ pqi_index_t iq_ci;
+ unsigned long flags;
+ void *next_element;
+ unsigned long timeout;
+ struct pqi_queue_group *queue_group;
+
+ queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
+ put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
+
+ timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
+
+ while (1) {
+ spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
+
+ iq_pi = queue_group->iq_pi_copy[RAID_PATH];
+ iq_ci = *queue_group->iq_ci[RAID_PATH];
+
+ if (pqi_num_elements_free(iq_pi, iq_ci,
+ ctrl_info->num_elements_per_iq))
+ break;
+
+ spin_unlock_irqrestore(
+ &queue_group->submit_lock[RAID_PATH], flags);
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "sending event acknowledge timed out\n");
+ return;
+ }
+ }
+
+ next_element = queue_group->iq_element_array[RAID_PATH] +
+ (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+ memcpy(next_element, iu, iu_length);
+
+ iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
+
+ queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
+
+ /*
+ * This write notifies the controller that an IU is available to be
+ * processed.
+ */
+ writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
+
+ spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
+}
+
+static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_event *event)
+{
+ struct pqi_event_acknowledge_request request;
+
+ memset(&request, 0, sizeof(request));
+
+ request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
+ put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
+ &request.header.iu_length);
+ request.event_type = event->event_type;
+ request.event_id = event->event_id;
+ request.additional_event_id = event->additional_event_id;
+
+ pqi_start_event_ack(ctrl_info, &request, sizeof(request));
+}
+
+static void pqi_event_worker(struct work_struct *work)
+{
+ unsigned int i;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_event *pending_event;
+ bool got_non_heartbeat_event = false;
+
+ ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
+
+ pending_event = ctrl_info->pending_events;
+ for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
+ if (pending_event->pending) {
+ pending_event->pending = false;
+ pqi_acknowledge_event(ctrl_info, pending_event);
+ if (i != PQI_EVENT_HEARTBEAT)
+ got_non_heartbeat_event = true;
+ }
+ pending_event++;
+ }
+
+ if (got_non_heartbeat_event)
+ pqi_schedule_rescan_worker(ctrl_info);
+}
+
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ unsigned int path;
+ struct pqi_queue_group *queue_group;
+ unsigned long flags;
+ struct pqi_io_request *io_request;
+ struct pqi_io_request *next;
+ struct scsi_cmnd *scmd;
+
+ ctrl_info->controller_online = false;
+ dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+
+ for (path = 0; path < 2; path++) {
+ spin_lock_irqsave(
+ &queue_group->submit_lock[path], flags);
+
+ list_for_each_entry_safe(io_request, next,
+ &queue_group->request_list[path],
+ request_list_entry) {
+
+ scmd = io_request->scmd;
+ if (scmd) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ pqi_scsi_done(scmd);
+ }
+
+ list_del(&io_request->request_list_entry);
+ }
+
+ spin_unlock_irqrestore(
+ &queue_group->submit_lock[path], flags);
+ }
+ }
+}
+
+#define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
+#define PQI_MAX_HEARTBEAT_REQUESTS 5
+
+static void pqi_heartbeat_timer_handler(unsigned long data)
+{
+ int num_interrupts;
+ struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
+
+ num_interrupts = atomic_read(&ctrl_info->num_interrupts);
+
+ if (num_interrupts == ctrl_info->previous_num_interrupts) {
+ ctrl_info->num_heartbeats_requested++;
+ if (ctrl_info->num_heartbeats_requested >
+ PQI_MAX_HEARTBEAT_REQUESTS) {
+ pqi_take_ctrl_offline(ctrl_info);
+ return;
+ }
+ ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
+ schedule_work(&ctrl_info->event_work);
+ } else {
+ ctrl_info->num_heartbeats_requested = 0;
+ }
+
+ ctrl_info->previous_num_interrupts = num_interrupts;
+ mod_timer(&ctrl_info->heartbeat_timer,
+ jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
+}
+
+static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->previous_num_interrupts =
+ atomic_read(&ctrl_info->num_interrupts);
+
+ init_timer(&ctrl_info->heartbeat_timer);
+ ctrl_info->heartbeat_timer.expires =
+ jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
+ ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
+ ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
+ add_timer(&ctrl_info->heartbeat_timer);
+ ctrl_info->heartbeat_timer_started = true;
+}
+
+static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
+{
+ if (ctrl_info->heartbeat_timer_started)
+ del_timer_sync(&ctrl_info->heartbeat_timer);
+}
+
+static int pqi_event_type_to_event_index(unsigned int event_type)
+{
+ int index;
+
+ switch (event_type) {
+ case PQI_EVENT_TYPE_HEARTBEAT:
+ index = PQI_EVENT_HEARTBEAT;
+ break;
+ case PQI_EVENT_TYPE_HOTPLUG:
+ index = PQI_EVENT_HOTPLUG;
+ break;
+ case PQI_EVENT_TYPE_HARDWARE:
+ index = PQI_EVENT_HARDWARE;
+ break;
+ case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
+ index = PQI_EVENT_PHYSICAL_DEVICE;
+ break;
+ case PQI_EVENT_TYPE_LOGICAL_DEVICE:
+ index = PQI_EVENT_LOGICAL_DEVICE;
+ break;
+ case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
+ index = PQI_EVENT_AIO_STATE_CHANGE;
+ break;
+ case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
+ index = PQI_EVENT_AIO_CONFIG_CHANGE;
+ break;
+ default:
+ index = -1;
+ break;
+ }
+
+ return index;
+}
+
+static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int num_events;
+ pqi_index_t oq_pi;
+ pqi_index_t oq_ci;
+ struct pqi_event_queue *event_queue;
+ struct pqi_event_response *response;
+ struct pqi_event *pending_event;
+ bool need_delayed_work;
+ int event_index;
+
+ event_queue = &ctrl_info->event_queue;
+ num_events = 0;
+ need_delayed_work = false;
+ oq_ci = event_queue->oq_ci_copy;
+
+ while (1) {
+ oq_pi = *event_queue->oq_pi;
+ if (oq_pi == oq_ci)
+ break;
+
+ num_events++;
+ response = event_queue->oq_element_array +
+ (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
+
+ event_index =
+ pqi_event_type_to_event_index(response->event_type);
+
+ if (event_index >= 0) {
+ if (response->request_acknowlege) {
+ pending_event =
+ &ctrl_info->pending_events[event_index];
+ pending_event->event_type =
+ response->event_type;
+ pending_event->event_id = response->event_id;
+ pending_event->additional_event_id =
+ response->additional_event_id;
+ if (event_index != PQI_EVENT_HEARTBEAT) {
+ pending_event->pending = true;
+ need_delayed_work = true;
+ }
+ }
+ }
+
+ oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
+ }
+
+ if (num_events) {
+ event_queue->oq_ci_copy = oq_ci;
+ writel(oq_ci, event_queue->oq_ci);
+
+ if (need_delayed_work)
+ schedule_work(&ctrl_info->event_work);
+ }
+
+ return num_events;
+}
+
+static irqreturn_t pqi_irq_handler(int irq, void *data)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_queue_group *queue_group;
+ unsigned int num_responses_handled;
+
+ queue_group = data;
+ ctrl_info = queue_group->ctrl_info;
+
+ if (!ctrl_info || !queue_group->oq_ci)
+ return IRQ_NONE;
+
+ num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+
+ if (irq == ctrl_info->event_irq)
+ num_responses_handled += pqi_process_event_intr(ctrl_info);
+
+ if (num_responses_handled)
+ atomic_inc(&ctrl_info->num_interrupts);
+
+ pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
+ pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
+{
+ int i;
+ int rc;
+
+ ctrl_info->event_irq = ctrl_info->msix_vectors[0];
+
+ for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
+ rc = request_irq(ctrl_info->msix_vectors[i],
+ pqi_irq_handler, 0,
+ DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "irq %u init failed with error %d\n",
+ ctrl_info->msix_vectors[i], rc);
+ return rc;
+ }
+ ctrl_info->num_msix_vectors_initialized++;
+ }
+
+ return 0;
+}
+
+static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
+{
+ int i;
+
+ for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
+ free_irq(ctrl_info->msix_vectors[i],
+ ctrl_info->intr_data[i]);
+}
+
+static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ int max_vectors;
+ int num_vectors_enabled;
+ struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
+
+ max_vectors = ctrl_info->num_queue_groups;
+
+ for (i = 0; i < max_vectors; i++)
+ msix_entries[i].entry = i;
+
+ num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
+ msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
+
+ if (num_vectors_enabled < 0) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "MSI-X init failed with error %d\n",
+ num_vectors_enabled);
+ return num_vectors_enabled;
+ }
+
+ ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
+ for (i = 0; i < num_vectors_enabled; i++) {
+ ctrl_info->msix_vectors[i] = msix_entries[i].vector;
+ ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
+ }
+
+ return 0;
+}
+
+static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
+{
+ int i;
+ int rc;
+ int cpu;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
+ rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
+ get_cpu_mask(cpu));
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error %d setting affinity hint for irq vector %u\n",
+ rc, ctrl_info->msix_vectors[i]);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+}
+
+static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
+{
+ int i;
+
+ for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
+ irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
+}
+
+static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ size_t alloc_length;
+ size_t element_array_length_per_iq;
+ size_t element_array_length_per_oq;
+ void *element_array;
+ void *next_queue_index;
+ void *aligned_pointer;
+ unsigned int num_inbound_queues;
+ unsigned int num_outbound_queues;
+ unsigned int num_queue_indexes;
+ struct pqi_queue_group *queue_group;
+
+ element_array_length_per_iq =
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
+ ctrl_info->num_elements_per_iq;
+ element_array_length_per_oq =
+ PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
+ ctrl_info->num_elements_per_oq;
+ num_inbound_queues = ctrl_info->num_queue_groups * 2;
+ num_outbound_queues = ctrl_info->num_queue_groups;
+ num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
+
+ aligned_pointer = NULL;
+
+ for (i = 0; i < num_inbound_queues; i++) {
+ aligned_pointer = PTR_ALIGN(aligned_pointer,
+ PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+ aligned_pointer += element_array_length_per_iq;
+ }
+
+ for (i = 0; i < num_outbound_queues; i++) {
+ aligned_pointer = PTR_ALIGN(aligned_pointer,
+ PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+ aligned_pointer += element_array_length_per_oq;
+ }
+
+ aligned_pointer = PTR_ALIGN(aligned_pointer,
+ PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+ aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
+ PQI_EVENT_OQ_ELEMENT_LENGTH;
+
+ for (i = 0; i < num_queue_indexes; i++) {
+ aligned_pointer = PTR_ALIGN(aligned_pointer,
+ PQI_OPERATIONAL_INDEX_ALIGNMENT);
+ aligned_pointer += sizeof(pqi_index_t);
+ }
+
+ alloc_length = (size_t)aligned_pointer +
+ PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
+
+ ctrl_info->queue_memory_base =
+ dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
+ alloc_length,
+ &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
+
+ if (!ctrl_info->queue_memory_base) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to allocate memory for PQI admin queues\n");
+ return -ENOMEM;
+ }
+
+ ctrl_info->queue_memory_length = alloc_length;
+
+ element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
+ PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+ queue_group->iq_element_array[RAID_PATH] = element_array;
+ queue_group->iq_element_array_bus_addr[RAID_PATH] =
+ ctrl_info->queue_memory_base_dma_handle +
+ (element_array - ctrl_info->queue_memory_base);
+ element_array += element_array_length_per_iq;
+ element_array = PTR_ALIGN(element_array,
+ PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+ queue_group->iq_element_array[AIO_PATH] = element_array;
+ queue_group->iq_element_array_bus_addr[AIO_PATH] =
+ ctrl_info->queue_memory_base_dma_handle +
+ (element_array - ctrl_info->queue_memory_base);
+ element_array += element_array_length_per_iq;
+ element_array = PTR_ALIGN(element_array,
+ PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+ }
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+ queue_group->oq_element_array = element_array;
+ queue_group->oq_element_array_bus_addr =
+ ctrl_info->queue_memory_base_dma_handle +
+ (element_array - ctrl_info->queue_memory_base);
+ element_array += element_array_length_per_oq;
+ element_array = PTR_ALIGN(element_array,
+ PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+ }
+
+ ctrl_info->event_queue.oq_element_array = element_array;
+ ctrl_info->event_queue.oq_element_array_bus_addr =
+ ctrl_info->queue_memory_base_dma_handle +
+ (element_array - ctrl_info->queue_memory_base);
+ element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
+ PQI_EVENT_OQ_ELEMENT_LENGTH;
+
+ next_queue_index = PTR_ALIGN(element_array,
+ PQI_OPERATIONAL_INDEX_ALIGNMENT);
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+ queue_group->iq_ci[RAID_PATH] = next_queue_index;
+ queue_group->iq_ci_bus_addr[RAID_PATH] =
+ ctrl_info->queue_memory_base_dma_handle +
+ (next_queue_index - ctrl_info->queue_memory_base);
+ next_queue_index += sizeof(pqi_index_t);
+ next_queue_index = PTR_ALIGN(next_queue_index,
+ PQI_OPERATIONAL_INDEX_ALIGNMENT);
+ queue_group->iq_ci[AIO_PATH] = next_queue_index;
+ queue_group->iq_ci_bus_addr[AIO_PATH] =
+ ctrl_info->queue_memory_base_dma_handle +
+ (next_queue_index - ctrl_info->queue_memory_base);
+ next_queue_index += sizeof(pqi_index_t);
+ next_queue_index = PTR_ALIGN(next_queue_index,
+ PQI_OPERATIONAL_INDEX_ALIGNMENT);
+ queue_group->oq_pi = next_queue_index;
+ queue_group->oq_pi_bus_addr =
+ ctrl_info->queue_memory_base_dma_handle +
+ (next_queue_index - ctrl_info->queue_memory_base);
+ next_queue_index += sizeof(pqi_index_t);
+ next_queue_index = PTR_ALIGN(next_queue_index,
+ PQI_OPERATIONAL_INDEX_ALIGNMENT);
+ }
+
+ ctrl_info->event_queue.oq_pi = next_queue_index;
+ ctrl_info->event_queue.oq_pi_bus_addr =
+ ctrl_info->queue_memory_base_dma_handle +
+ (next_queue_index - ctrl_info->queue_memory_base);
+
+ return 0;
+}
+
+static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
+ u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
+
+ /*
+ * Initialize the backpointers to the controller structure in
+ * each operational queue group structure.
+ */
+ for (i = 0; i < ctrl_info->num_queue_groups; i++)
+ ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
+
+ /*
+ * Assign IDs to all operational queues. Note that the IDs
+ * assigned to operational IQs are independent of the IDs
+ * assigned to operational OQs.
+ */
+ ctrl_info->event_queue.oq_id = next_oq_id++;
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
+ ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
+ ctrl_info->queue_groups[i].oq_id = next_oq_id++;
+ }
+
+ /*
+ * Assign MSI-X table entry indexes to all queues. Note that the
+ * interrupt for the event queue is shared with the first queue group.
+ */
+ ctrl_info->event_queue.int_msg_num = 0;
+ for (i = 0; i < ctrl_info->num_queue_groups; i++)
+ ctrl_info->queue_groups[i].int_msg_num = i;
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
+ spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
+ INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
+ INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
+ }
+}
+
+static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
+{
+ size_t alloc_length;
+ struct pqi_admin_queues_aligned *admin_queues_aligned;
+ struct pqi_admin_queues *admin_queues;
+
+ alloc_length = sizeof(struct pqi_admin_queues_aligned) +
+ PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
+
+ ctrl_info->admin_queue_memory_base =
+ dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
+ alloc_length,
+ &ctrl_info->admin_queue_memory_base_dma_handle,
+ GFP_KERNEL);
+
+ if (!ctrl_info->admin_queue_memory_base)
+ return -ENOMEM;
+
+ ctrl_info->admin_queue_memory_length = alloc_length;
+
+ admin_queues = &ctrl_info->admin_queues;
+ admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
+ PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+ admin_queues->iq_element_array =
+ &admin_queues_aligned->iq_element_array;
+ admin_queues->oq_element_array =
+ &admin_queues_aligned->oq_element_array;
+ admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
+ admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
+
+ admin_queues->iq_element_array_bus_addr =
+ ctrl_info->admin_queue_memory_base_dma_handle +
+ (admin_queues->iq_element_array -
+ ctrl_info->admin_queue_memory_base);
+ admin_queues->oq_element_array_bus_addr =
+ ctrl_info->admin_queue_memory_base_dma_handle +
+ (admin_queues->oq_element_array -
+ ctrl_info->admin_queue_memory_base);
+ admin_queues->iq_ci_bus_addr =
+ ctrl_info->admin_queue_memory_base_dma_handle +
+ ((void *)admin_queues->iq_ci -
+ ctrl_info->admin_queue_memory_base);
+ admin_queues->oq_pi_bus_addr =
+ ctrl_info->admin_queue_memory_base_dma_handle +
+ ((void *)admin_queues->oq_pi -
+ ctrl_info->admin_queue_memory_base);
+
+ return 0;
+}
+
+#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
+#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
+
+static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
+{
+ struct pqi_device_registers __iomem *pqi_registers;
+ struct pqi_admin_queues *admin_queues;
+ unsigned long timeout;
+ u8 status;
+ u32 reg;
+
+ pqi_registers = ctrl_info->pqi_registers;
+ admin_queues = &ctrl_info->admin_queues;
+
+ writeq((u64)admin_queues->iq_element_array_bus_addr,
+ &pqi_registers->admin_iq_element_array_addr);
+ writeq((u64)admin_queues->oq_element_array_bus_addr,
+ &pqi_registers->admin_oq_element_array_addr);
+ writeq((u64)admin_queues->iq_ci_bus_addr,
+ &pqi_registers->admin_iq_ci_addr);
+ writeq((u64)admin_queues->oq_pi_bus_addr,
+ &pqi_registers->admin_oq_pi_addr);
+
+ reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
+ (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
+ (admin_queues->int_msg_num << 16);
+ writel(reg, &pqi_registers->admin_iq_num_elements);
+ writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
+ &pqi_registers->function_and_status_code);
+
+ timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
+ while (1) {
+ status = readb(&pqi_registers->function_and_status_code);
+ if (status == PQI_STATUS_IDLE)
+ break;
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
+ }
+
+ /*
+ * The offset registers are not initialized to the correct
+ * offsets until *after* the create admin queue pair command
+ * completes successfully.
+ */
+ admin_queues->iq_pi = ctrl_info->iomem_base +
+ PQI_DEVICE_REGISTERS_OFFSET +
+ readq(&pqi_registers->admin_iq_pi_offset);
+ admin_queues->oq_ci = ctrl_info->iomem_base +
+ PQI_DEVICE_REGISTERS_OFFSET +
+ readq(&pqi_registers->admin_oq_ci_offset);
+
+ return 0;
+}
+
+static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_general_admin_request *request)
+{
+ struct pqi_admin_queues *admin_queues;
+ void *next_element;
+ pqi_index_t iq_pi;
+
+ admin_queues = &ctrl_info->admin_queues;
+ iq_pi = admin_queues->iq_pi_copy;
+
+ next_element = admin_queues->iq_element_array +
+ (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
+
+ memcpy(next_element, request, sizeof(*request));
+
+ iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
+ admin_queues->iq_pi_copy = iq_pi;
+
+ /*
+ * This write notifies the controller that an IU is available to be
+ * processed.
+ */
+ writel(iq_pi, admin_queues->iq_pi);
+}
+
+static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_general_admin_response *response)
+{
+ struct pqi_admin_queues *admin_queues;
+ pqi_index_t oq_pi;
+ pqi_index_t oq_ci;
+ unsigned long timeout;
+
+ admin_queues = &ctrl_info->admin_queues;
+ oq_ci = admin_queues->oq_ci_copy;
+
+ timeout = (3 * HZ) + jiffies;
+
+ while (1) {
+ oq_pi = *admin_queues->oq_pi;
+ if (oq_pi != oq_ci)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "timed out waiting for admin response\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ memcpy(response, admin_queues->oq_element_array +
+ (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
+
+ oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
+ admin_queues->oq_ci_copy = oq_ci;
+ writel(oq_ci, admin_queues->oq_ci);
+
+ return 0;
+}
+
+static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_queue_group *queue_group, enum pqi_io_path path,
+ struct pqi_io_request *io_request)
+{
+ struct pqi_io_request *next;
+ void *next_element;
+ pqi_index_t iq_pi;
+ pqi_index_t iq_ci;
+ size_t iu_length;
+ unsigned long flags;
+ unsigned int num_elements_needed;
+ unsigned int num_elements_to_end_of_queue;
+ size_t copy_count;
+ struct pqi_iu_header *request;
+
+ spin_lock_irqsave(&queue_group->submit_lock[path], flags);
+
+ if (io_request)
+ list_add_tail(&io_request->request_list_entry,
+ &queue_group->request_list[path]);
+
+ iq_pi = queue_group->iq_pi_copy[path];
+
+ list_for_each_entry_safe(io_request, next,
+ &queue_group->request_list[path], request_list_entry) {
+
+ request = io_request->iu;
+
+ iu_length = get_unaligned_le16(&request->iu_length) +
+ PQI_REQUEST_HEADER_LENGTH;
+ num_elements_needed =
+ DIV_ROUND_UP(iu_length,
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+ iq_ci = *queue_group->iq_ci[path];
+
+ if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
+ ctrl_info->num_elements_per_iq))
+ break;
+
+ put_unaligned_le16(queue_group->oq_id,
+ &request->response_queue_id);
+
+ next_element = queue_group->iq_element_array[path] +
+ (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+ num_elements_to_end_of_queue =
+ ctrl_info->num_elements_per_iq - iq_pi;
+
+ if (num_elements_needed <= num_elements_to_end_of_queue) {
+ memcpy(next_element, request, iu_length);
+ } else {
+ copy_count = num_elements_to_end_of_queue *
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
+ memcpy(next_element, request, copy_count);
+ memcpy(queue_group->iq_element_array[path],
+ (u8 *)request + copy_count,
+ iu_length - copy_count);
+ }
+
+ iq_pi = (iq_pi + num_elements_needed) %
+ ctrl_info->num_elements_per_iq;
+
+ list_del(&io_request->request_list_entry);
+ }
+
+ if (iq_pi != queue_group->iq_pi_copy[path]) {
+ queue_group->iq_pi_copy[path] = iq_pi;
+ /*
+ * This write notifies the controller that one or more IUs are
+ * available to be processed.
+ */
+ writel(iq_pi, queue_group->iq_pi[path]);
+ }
+
+ spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
+}
+
+static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
+ void *context)
+{
+ struct completion *waiting = context;
+
+ complete(waiting);
+}
+
+static int pqi_submit_raid_request_synchronous_with_io_request(
+ struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
+ unsigned long timeout_msecs)
+{
+ int rc = 0;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ io_request->io_complete_callback = pqi_raid_synchronous_complete;
+ io_request->context = &wait;
+
+ pqi_start_io(ctrl_info,
+ &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+ io_request);
+
+ if (timeout_msecs == NO_TIMEOUT) {
+ wait_for_completion_io(&wait);
+ } else {
+ if (!wait_for_completion_io_timeout(&wait,
+ msecs_to_jiffies(timeout_msecs))) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "command timed out\n");
+ rc = -ETIMEDOUT;
+ }
+ }
+
+ return rc;
+}
+
+static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_iu_header *request, unsigned int flags,
+ struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+{
+ int rc;
+ struct pqi_io_request *io_request;
+ unsigned long start_jiffies;
+ unsigned long msecs_blocked;
+ size_t iu_length;
+
+ /*
+ * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
+ * are mutually exclusive.
+ */
+
+ if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
+ if (down_interruptible(&ctrl_info->sync_request_sem))
+ return -ERESTARTSYS;
+ } else {
+ if (timeout_msecs == NO_TIMEOUT) {
+ down(&ctrl_info->sync_request_sem);
+ } else {
+ start_jiffies = jiffies;
+ if (down_timeout(&ctrl_info->sync_request_sem,
+ msecs_to_jiffies(timeout_msecs)))
+ return -ETIMEDOUT;
+ msecs_blocked =
+ jiffies_to_msecs(jiffies - start_jiffies);
+ if (msecs_blocked >= timeout_msecs)
+ return -ETIMEDOUT;
+ timeout_msecs -= msecs_blocked;
+ }
+ }
+
+ io_request = pqi_alloc_io_request(ctrl_info);
+
+ put_unaligned_le16(io_request->index,
+ &(((struct pqi_raid_path_request *)request)->request_id));
+
+ if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
+ ((struct pqi_raid_path_request *)request)->error_index =
+ ((struct pqi_raid_path_request *)request)->request_id;
+
+ iu_length = get_unaligned_le16(&request->iu_length) +
+ PQI_REQUEST_HEADER_LENGTH;
+ memcpy(io_request->iu, request, iu_length);
+
+ rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
+ io_request, timeout_msecs);
+
+ if (error_info) {
+ if (io_request->error_info)
+ memcpy(error_info, io_request->error_info,
+ sizeof(*error_info));
+ else
+ memset(error_info, 0, sizeof(*error_info));
+ } else if (rc == 0 && io_request->error_info) {
+ u8 scsi_status;
+ struct pqi_raid_error_info *raid_error_info;
+
+ raid_error_info = io_request->error_info;
+ scsi_status = raid_error_info->status;
+
+ if (scsi_status == SAM_STAT_CHECK_CONDITION &&
+ raid_error_info->data_out_result ==
+ PQI_DATA_IN_OUT_UNDERFLOW)
+ scsi_status = SAM_STAT_GOOD;
+
+ if (scsi_status != SAM_STAT_GOOD)
+ rc = -EIO;
+ }
+
+ pqi_free_io_request(io_request);
+
+ up(&ctrl_info->sync_request_sem);
+
+ return rc;
+}
+
+static int pqi_validate_admin_response(
+ struct pqi_general_admin_response *response, u8 expected_function_code)
+{
+ if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
+ return -EINVAL;
+
+ if (get_unaligned_le16(&response->header.iu_length) !=
+ PQI_GENERAL_ADMIN_IU_LENGTH)
+ return -EINVAL;
+
+ if (response->function_code != expected_function_code)
+ return -EINVAL;
+
+ if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int pqi_submit_admin_request_synchronous(
+ struct pqi_ctrl_info *ctrl_info,
+ struct pqi_general_admin_request *request,
+ struct pqi_general_admin_response *response)
+{
+ int rc;
+
+ pqi_submit_admin_request(ctrl_info, request);
+
+ rc = pqi_poll_for_admin_response(ctrl_info, response);
+
+ if (rc == 0)
+ rc = pqi_validate_admin_response(response,
+ request->function_code);
+
+ return rc;
+}
+
+static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct pqi_general_admin_request request;
+ struct pqi_general_admin_response response;
+ struct pqi_device_capability *capability;
+ struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
+
+ capability = kmalloc(sizeof(*capability), GFP_KERNEL);
+ if (!capability)
+ return -ENOMEM;
+
+ memset(&request, 0, sizeof(request));
+
+ request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+ put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+ &request.header.iu_length);
+ request.function_code =
+ PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
+ put_unaligned_le32(sizeof(*capability),
+ &request.data.report_device_capability.buffer_length);
+
+ rc = pqi_map_single(ctrl_info->pci_dev,
+ &request.data.report_device_capability.sg_descriptor,
+ capability, sizeof(*capability),
+ PCI_DMA_FROMDEVICE);
+ if (rc)
+ goto out;
+
+ rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+ &response);
+
+ pqi_pci_unmap(ctrl_info->pci_dev,
+ &request.data.report_device_capability.sg_descriptor, 1,
+ PCI_DMA_FROMDEVICE);
+
+ if (rc)
+ goto out;
+
+ if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
+ rc = -EIO;
+ goto out;
+ }
+
+ ctrl_info->max_inbound_queues =
+ get_unaligned_le16(&capability->max_inbound_queues);
+ ctrl_info->max_elements_per_iq =
+ get_unaligned_le16(&capability->max_elements_per_iq);
+ ctrl_info->max_iq_element_length =
+ get_unaligned_le16(&capability->max_iq_element_length)
+ * 16;
+ ctrl_info->max_outbound_queues =
+ get_unaligned_le16(&capability->max_outbound_queues);
+ ctrl_info->max_elements_per_oq =
+ get_unaligned_le16(&capability->max_elements_per_oq);
+ ctrl_info->max_oq_element_length =
+ get_unaligned_le16(&capability->max_oq_element_length)
+ * 16;
+
+ sop_iu_layer_descriptor =
+ &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
+
+ ctrl_info->max_inbound_iu_length_per_firmware =
+ get_unaligned_le16(
+ &sop_iu_layer_descriptor->max_inbound_iu_length);
+ ctrl_info->inbound_spanning_supported =
+ sop_iu_layer_descriptor->inbound_spanning_supported;
+ ctrl_info->outbound_spanning_supported =
+ sop_iu_layer_descriptor->outbound_spanning_supported;
+
+out:
+ kfree(capability);
+
+ return rc;
+}
+
+static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
+{
+ if (ctrl_info->max_iq_element_length <
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "max. inbound queue element length of %d is less than the required length of %d\n",
+ ctrl_info->max_iq_element_length,
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+ return -EINVAL;
+ }
+
+ if (ctrl_info->max_oq_element_length <
+ PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "max. outbound queue element length of %d is less than the required length of %d\n",
+ ctrl_info->max_oq_element_length,
+ PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
+ return -EINVAL;
+ }
+
+ if (ctrl_info->max_inbound_iu_length_per_firmware <
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "max. inbound IU length of %u is less than the min. required length of %d\n",
+ ctrl_info->max_inbound_iu_length_per_firmware,
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+ return -EINVAL;
+ }
+
+ if (!ctrl_info->inbound_spanning_supported) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "the controller does not support inbound spanning\n");
+ return -EINVAL;
+ }
+
+ if (ctrl_info->outbound_spanning_supported) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "the controller supports outbound spanning but this driver does not\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
+ bool inbound_queue, u16 queue_id)
+{
+ struct pqi_general_admin_request request;
+ struct pqi_general_admin_response response;
+
+ memset(&request, 0, sizeof(request));
+ request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+ put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+ &request.header.iu_length);
+ if (inbound_queue)
+ request.function_code =
+ PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
+ else
+ request.function_code =
+ PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
+ put_unaligned_le16(queue_id,
+ &request.data.delete_operational_queue.queue_id);
+
+ return pqi_submit_admin_request_synchronous(ctrl_info, &request,
+ &response);
+}
+
+static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct pqi_event_queue *event_queue;
+ struct pqi_general_admin_request request;
+ struct pqi_general_admin_response response;
+
+ event_queue = &ctrl_info->event_queue;
+
+ /*
+ * Create OQ (Outbound Queue - device to host queue) to dedicate
+ * to events.
+ */
+ memset(&request, 0, sizeof(request));
+ request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+ put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+ &request.header.iu_length);
+ request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
+ put_unaligned_le16(event_queue->oq_id,
+ &request.data.create_operational_oq.queue_id);
+ put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
+ &request.data.create_operational_oq.element_array_addr);
+ put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
+ &request.data.create_operational_oq.pi_addr);
+ put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
+ &request.data.create_operational_oq.num_elements);
+ put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
+ &request.data.create_operational_oq.element_length);
+ request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
+ put_unaligned_le16(event_queue->int_msg_num,
+ &request.data.create_operational_oq.int_msg_num);
+
+ rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+ &response);
+ if (rc)
+ return rc;
+
+ event_queue->oq_ci = ctrl_info->iomem_base +
+ PQI_DEVICE_REGISTERS_OFFSET +
+ get_unaligned_le64(
+ &response.data.create_operational_oq.oq_ci_offset);
+
+ return 0;
+}
+
+static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ int rc;
+ struct pqi_queue_group *queue_group;
+ struct pqi_general_admin_request request;
+ struct pqi_general_admin_response response;
+
+ i = ctrl_info->num_active_queue_groups;
+ queue_group = &ctrl_info->queue_groups[i];
+
+ /*
+ * Create IQ (Inbound Queue - host to device queue) for
+ * RAID path.
+ */
+ memset(&request, 0, sizeof(request));
+ request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+ put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+ &request.header.iu_length);
+ request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
+ put_unaligned_le16(queue_group->iq_id[RAID_PATH],
+ &request.data.create_operational_iq.queue_id);
+ put_unaligned_le64(
+ (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
+ &request.data.create_operational_iq.element_array_addr);
+ put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
+ &request.data.create_operational_iq.ci_addr);
+ put_unaligned_le16(ctrl_info->num_elements_per_iq,
+ &request.data.create_operational_iq.num_elements);
+ put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
+ &request.data.create_operational_iq.element_length);
+ request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
+
+ rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+ &response);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error creating inbound RAID queue\n");
+ return rc;
+ }
+
+ queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
+ PQI_DEVICE_REGISTERS_OFFSET +
+ get_unaligned_le64(
+ &response.data.create_operational_iq.iq_pi_offset);
+
+ /*
+ * Create IQ (Inbound Queue - host to device queue) for
+ * Advanced I/O (AIO) path.
+ */
+ memset(&request, 0, sizeof(request));
+ request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+ put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+ &request.header.iu_length);
+ request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
+ put_unaligned_le16(queue_group->iq_id[AIO_PATH],
+ &request.data.create_operational_iq.queue_id);
+ put_unaligned_le64((u64)queue_group->
+ iq_element_array_bus_addr[AIO_PATH],
+ &request.data.create_operational_iq.element_array_addr);
+ put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
+ &request.data.create_operational_iq.ci_addr);
+ put_unaligned_le16(ctrl_info->num_elements_per_iq,
+ &request.data.create_operational_iq.num_elements);
+ put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
+ &request.data.create_operational_iq.element_length);
+ request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
+
+ rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+ &response);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error creating inbound AIO queue\n");
+ goto delete_inbound_queue_raid;
+ }
+
+ queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
+ PQI_DEVICE_REGISTERS_OFFSET +
+ get_unaligned_le64(
+ &response.data.create_operational_iq.iq_pi_offset);
+
+ /*
+ * Designate the 2nd IQ as the AIO path. By default, all IQs are
+ * assumed to be for RAID path I/O unless we change the queue's
+ * property.
+ */
+ memset(&request, 0, sizeof(request));
+ request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+ put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+ &request.header.iu_length);
+ request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
+ put_unaligned_le16(queue_group->iq_id[AIO_PATH],
+ &request.data.change_operational_iq_properties.queue_id);
+ put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
+ &request.data.change_operational_iq_properties.vendor_specific);
+
+ rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+ &response);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error changing queue property\n");
+ goto delete_inbound_queue_aio;
+ }
+
+ /*
+ * Create OQ (Outbound Queue - device to host queue).
+ */
+ memset(&request, 0, sizeof(request));
+ request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+ put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+ &request.header.iu_length);
+ request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
+ put_unaligned_le16(queue_group->oq_id,
+ &request.data.create_operational_oq.queue_id);
+ put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
+ &request.data.create_operational_oq.element_array_addr);
+ put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
+ &request.data.create_operational_oq.pi_addr);
+ put_unaligned_le16(ctrl_info->num_elements_per_oq,
+ &request.data.create_operational_oq.num_elements);
+ put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
+ &request.data.create_operational_oq.element_length);
+ request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
+ put_unaligned_le16(queue_group->int_msg_num,
+ &request.data.create_operational_oq.int_msg_num);
+
+ rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+ &response);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error creating outbound queue\n");
+ goto delete_inbound_queue_aio;
+ }
+
+ queue_group->oq_ci = ctrl_info->iomem_base +
+ PQI_DEVICE_REGISTERS_OFFSET +
+ get_unaligned_le64(
+ &response.data.create_operational_oq.oq_ci_offset);
+
+ ctrl_info->num_active_queue_groups++;
+
+ return 0;
+
+delete_inbound_queue_aio:
+ pqi_delete_operational_queue(ctrl_info, true,
+ queue_group->iq_id[AIO_PATH]);
+
+delete_inbound_queue_raid:
+ pqi_delete_operational_queue(ctrl_info, true,
+ queue_group->iq_id[RAID_PATH]);
+
+ return rc;
+}
+
+static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ unsigned int i;
+
+ rc = pqi_create_event_queue(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error creating event queue\n");
+ return rc;
+ }
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ rc = pqi_create_queue_group(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error creating queue group number %u/%u\n",
+ i, ctrl_info->num_queue_groups);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
+ (offsetof(struct pqi_event_config, descriptors) + \
+ (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
+
+static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ unsigned int i;
+ struct pqi_event_config *event_config;
+ struct pqi_general_management_request request;
+
+ event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+ GFP_KERNEL);
+ if (!event_config)
+ return -ENOMEM;
+
+ memset(&request, 0, sizeof(request));
+
+ request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
+ put_unaligned_le16(offsetof(struct pqi_general_management_request,
+ data.report_event_configuration.sg_descriptors[1]) -
+ PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
+ put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+ &request.data.report_event_configuration.buffer_length);
+
+ rc = pqi_map_single(ctrl_info->pci_dev,
+ request.data.report_event_configuration.sg_descriptors,
+ event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+ PCI_DMA_FROMDEVICE);
+ if (rc)
+ goto out;
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+ 0, NULL, NO_TIMEOUT);
+
+ pqi_pci_unmap(ctrl_info->pci_dev,
+ request.data.report_event_configuration.sg_descriptors, 1,
+ PCI_DMA_FROMDEVICE);
+
+ if (rc)
+ goto out;
+
+ for (i = 0; i < event_config->num_event_descriptors; i++)
+ put_unaligned_le16(ctrl_info->event_queue.oq_id,
+ &event_config->descriptors[i].oq_id);
+
+ memset(&request, 0, sizeof(request));
+
+ request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
+ put_unaligned_le16(offsetof(struct pqi_general_management_request,
+ data.report_event_configuration.sg_descriptors[1]) -
+ PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
+ put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+ &request.data.report_event_configuration.buffer_length);
+
+ rc = pqi_map_single(ctrl_info->pci_dev,
+ request.data.report_event_configuration.sg_descriptors,
+ event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+ PCI_DMA_TODEVICE);
+ if (rc)
+ goto out;
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+ NULL, NO_TIMEOUT);
+
+ pqi_pci_unmap(ctrl_info->pci_dev,
+ request.data.report_event_configuration.sg_descriptors, 1,
+ PCI_DMA_TODEVICE);
+
+out:
+ kfree(event_config);
+
+ return rc;
+}
+
+static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ struct device *dev;
+ size_t sg_chain_buffer_length;
+ struct pqi_io_request *io_request;
+
+ if (!ctrl_info->io_request_pool)
+ return;
+
+ dev = &ctrl_info->pci_dev->dev;
+ sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
+ io_request = ctrl_info->io_request_pool;
+
+ for (i = 0; i < ctrl_info->max_io_slots; i++) {
+ kfree(io_request->iu);
+ if (!io_request->sg_chain_buffer)
+ break;
+ dma_free_coherent(dev, sg_chain_buffer_length,
+ io_request->sg_chain_buffer,
+ io_request->sg_chain_buffer_dma_handle);
+ io_request++;
+ }
+
+ kfree(ctrl_info->io_request_pool);
+ ctrl_info->io_request_pool = NULL;
+}
+
+static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
+ ctrl_info->error_buffer_length,
+ &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
+
+ if (!ctrl_info->error_buffer)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ void *sg_chain_buffer;
+ size_t sg_chain_buffer_length;
+ dma_addr_t sg_chain_buffer_dma_handle;
+ struct device *dev;
+ struct pqi_io_request *io_request;
+
+ ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
+ sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
+
+ if (!ctrl_info->io_request_pool) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to allocate I/O request pool\n");
+ goto error;
+ }
+
+ dev = &ctrl_info->pci_dev->dev;
+ sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
+ io_request = ctrl_info->io_request_pool;
+
+ for (i = 0; i < ctrl_info->max_io_slots; i++) {
+ io_request->iu =
+ kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
+
+ if (!io_request->iu) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to allocate IU buffers\n");
+ goto error;
+ }
+
+ sg_chain_buffer = dma_alloc_coherent(dev,
+ sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
+ GFP_KERNEL);
+
+ if (!sg_chain_buffer) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to allocate PQI scatter-gather chain buffers\n");
+ goto error;
+ }
+
+ io_request->index = i;
+ io_request->sg_chain_buffer = sg_chain_buffer;
+ io_request->sg_chain_buffer_dma_handle =
+ sg_chain_buffer_dma_handle;
+ io_request++;
+ }
+
+ return 0;
+
+error:
+ pqi_free_all_io_requests(ctrl_info);
+
+ return -ENOMEM;
+}
+
+/*
+ * Calculate required resources that are sized based on max. outstanding
+ * requests and max. transfer size.
+ */
+
+static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
+{
+ u32 max_transfer_size;
+ u32 max_sg_entries;
+
+ ctrl_info->scsi_ml_can_queue =
+ ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
+ ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
+
+ ctrl_info->error_buffer_length =
+ ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
+
+ max_transfer_size =
+ min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
+
+ max_sg_entries = max_transfer_size / PAGE_SIZE;
+
+ /* +1 to cover when the buffer is not page-aligned. */
+ max_sg_entries++;
+
+ max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
+
+ max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
+
+ ctrl_info->sg_chain_buffer_length =
+ max_sg_entries * sizeof(struct pqi_sg_descriptor);
+ ctrl_info->sg_tablesize = max_sg_entries;
+ ctrl_info->max_sectors = max_transfer_size / 512;
+}
+
+static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
+{
+ int num_cpus;
+ int max_queue_groups;
+ int num_queue_groups;
+ u16 num_elements_per_iq;
+ u16 num_elements_per_oq;
+
+ max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
+ ctrl_info->max_outbound_queues - 1);
+ max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
+
+ num_cpus = num_online_cpus();
+ num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
+ num_queue_groups = min(num_queue_groups, max_queue_groups);
+
+ ctrl_info->num_queue_groups = num_queue_groups;
+
+ /*
+ * Make sure that the max. inbound IU length is an even multiple
+ * of our inbound element length.
+ */
+ ctrl_info->max_inbound_iu_length =
+ (ctrl_info->max_inbound_iu_length_per_firmware /
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
+
+ num_elements_per_iq =
+ (ctrl_info->max_inbound_iu_length /
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+ /* Add one because one element in each queue is unusable. */
+ num_elements_per_iq++;
+
+ num_elements_per_iq = min(num_elements_per_iq,
+ ctrl_info->max_elements_per_iq);
+
+ num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
+ num_elements_per_oq = min(num_elements_per_oq,
+ ctrl_info->max_elements_per_oq);
+
+ ctrl_info->num_elements_per_iq = num_elements_per_iq;
+ ctrl_info->num_elements_per_oq = num_elements_per_oq;
+
+ ctrl_info->max_sg_per_iu =
+ ((ctrl_info->max_inbound_iu_length -
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
+ sizeof(struct pqi_sg_descriptor)) +
+ PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
+}
+
+static inline void pqi_set_sg_descriptor(
+ struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
+{
+ u64 address = (u64)sg_dma_address(sg);
+ unsigned int length = sg_dma_len(sg);
+
+ put_unaligned_le64(address, &sg_descriptor->address);
+ put_unaligned_le32(length, &sg_descriptor->length);
+ put_unaligned_le32(0, &sg_descriptor->flags);
+}
+
+static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
+ struct pqi_io_request *io_request)
+{
+ int i;
+ u16 iu_length;
+ int sg_count;
+ bool chained;
+ unsigned int num_sg_in_iu;
+ unsigned int max_sg_per_iu;
+ struct scatterlist *sg;
+ struct pqi_sg_descriptor *sg_descriptor;
+
+ sg_count = scsi_dma_map(scmd);
+ if (sg_count < 0)
+ return sg_count;
+
+ iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
+ PQI_REQUEST_HEADER_LENGTH;
+
+ if (sg_count == 0)
+ goto out;
+
+ sg = scsi_sglist(scmd);
+ sg_descriptor = request->sg_descriptors;
+ max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
+ chained = false;
+ num_sg_in_iu = 0;
+ i = 0;
+
+ while (1) {
+ pqi_set_sg_descriptor(sg_descriptor, sg);
+ if (!chained)
+ num_sg_in_iu++;
+ i++;
+ if (i == sg_count)
+ break;
+ sg_descriptor++;
+ if (i == max_sg_per_iu) {
+ put_unaligned_le64(
+ (u64)io_request->sg_chain_buffer_dma_handle,
+ &sg_descriptor->address);
+ put_unaligned_le32((sg_count - num_sg_in_iu)
+ * sizeof(*sg_descriptor),
+ &sg_descriptor->length);
+ put_unaligned_le32(CISS_SG_CHAIN,
+ &sg_descriptor->flags);
+ chained = true;
+ num_sg_in_iu++;
+ sg_descriptor = io_request->sg_chain_buffer;
+ }
+ sg = sg_next(sg);
+ }
+
+ put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+ request->partial = chained;
+ iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+
+out:
+ put_unaligned_le16(iu_length, &request->header.iu_length);
+
+ return 0;
+}
+
+static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
+ struct pqi_io_request *io_request)
+{
+ int i;
+ u16 iu_length;
+ int sg_count;
+ bool chained;
+ unsigned int num_sg_in_iu;
+ unsigned int max_sg_per_iu;
+ struct scatterlist *sg;
+ struct pqi_sg_descriptor *sg_descriptor;
+
+ sg_count = scsi_dma_map(scmd);
+ if (sg_count < 0)
+ return sg_count;
+
+ iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
+ PQI_REQUEST_HEADER_LENGTH;
+ num_sg_in_iu = 0;
+
+ if (sg_count == 0)
+ goto out;
+
+ sg = scsi_sglist(scmd);
+ sg_descriptor = request->sg_descriptors;
+ max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
+ chained = false;
+ i = 0;
+
+ while (1) {
+ pqi_set_sg_descriptor(sg_descriptor, sg);
+ if (!chained)
+ num_sg_in_iu++;
+ i++;
+ if (i == sg_count)
+ break;
+ sg_descriptor++;
+ if (i == max_sg_per_iu) {
+ put_unaligned_le64(
+ (u64)io_request->sg_chain_buffer_dma_handle,
+ &sg_descriptor->address);
+ put_unaligned_le32((sg_count - num_sg_in_iu)
+ * sizeof(*sg_descriptor),
+ &sg_descriptor->length);
+ put_unaligned_le32(CISS_SG_CHAIN,
+ &sg_descriptor->flags);
+ chained = true;
+ num_sg_in_iu++;
+ sg_descriptor = io_request->sg_chain_buffer;
+ }
+ sg = sg_next(sg);
+ }
+
+ put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+ request->partial = chained;
+ iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+
+out:
+ put_unaligned_le16(iu_length, &request->header.iu_length);
+ request->num_sg_descriptors = num_sg_in_iu;
+
+ return 0;
+}
+
+static void pqi_raid_io_complete(struct pqi_io_request *io_request,
+ void *context)
+{
+ struct scsi_cmnd *scmd;
+
+ scmd = io_request->scmd;
+ pqi_free_io_request(io_request);
+ scsi_dma_unmap(scmd);
+ pqi_scsi_done(scmd);
+}
+
+static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+ struct pqi_queue_group *queue_group)
+{
+ int rc;
+ size_t cdb_length;
+ struct pqi_io_request *io_request;
+ struct pqi_raid_path_request *request;
+
+ io_request = pqi_alloc_io_request(ctrl_info);
+ io_request->io_complete_callback = pqi_raid_io_complete;
+ io_request->scmd = scmd;
+
+ scmd->host_scribble = (unsigned char *)io_request;
+
+ request = io_request->iu;
+ memset(request, 0,
+ offsetof(struct pqi_raid_path_request, sg_descriptors));
+
+ request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
+ put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
+ request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ put_unaligned_le16(io_request->index, &request->request_id);
+ request->error_index = request->request_id;
+ memcpy(request->lun_number, device->scsi3addr,
+ sizeof(request->lun_number));
+
+ cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
+ memcpy(request->cdb, scmd->cmnd, cdb_length);
+
+ switch (cdb_length) {
+ case 6:
+ case 10:
+ case 12:
+ case 16:
+ /* No bytes in the Additional CDB bytes field */
+ request->additional_cdb_bytes_usage =
+ SOP_ADDITIONAL_CDB_BYTES_0;
+ break;
+ case 20:
+ /* 4 bytes in the Additional cdb field */
+ request->additional_cdb_bytes_usage =
+ SOP_ADDITIONAL_CDB_BYTES_4;
+ break;
+ case 24:
+ /* 8 bytes in the Additional cdb field */
+ request->additional_cdb_bytes_usage =
+ SOP_ADDITIONAL_CDB_BYTES_8;
+ break;
+ case 28:
+ /* 12 bytes in the Additional cdb field */
+ request->additional_cdb_bytes_usage =
+ SOP_ADDITIONAL_CDB_BYTES_12;
+ break;
+ case 32:
+ default:
+ /* 16 bytes in the Additional cdb field */
+ request->additional_cdb_bytes_usage =
+ SOP_ADDITIONAL_CDB_BYTES_16;
+ break;
+ }
+
+ switch (scmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ request->data_direction = SOP_READ_FLAG;
+ break;
+ case DMA_FROM_DEVICE:
+ request->data_direction = SOP_WRITE_FLAG;
+ break;
+ case DMA_NONE:
+ request->data_direction = SOP_NO_DIRECTION_FLAG;
+ break;
+ case DMA_BIDIRECTIONAL:
+ request->data_direction = SOP_BIDIRECTIONAL;
+ break;
+ default:
+ dev_err(&ctrl_info->pci_dev->dev,
+ "unknown data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(scmd->sc_data_direction);
+ break;
+ }
+
+ rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
+ if (rc) {
+ pqi_free_io_request(io_request);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
+
+ return 0;
+}
+
+static void pqi_aio_io_complete(struct pqi_io_request *io_request,
+ void *context)
+{
+ struct scsi_cmnd *scmd;
+
+ scmd = io_request->scmd;
+ scsi_dma_unmap(scmd);
+ if (io_request->status == -EAGAIN)
+ set_host_byte(scmd, DID_IMM_RETRY);
+ pqi_free_io_request(io_request);
+ pqi_scsi_done(scmd);
+}
+
+static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+ struct pqi_queue_group *queue_group)
+{
+ return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
+ scmd->cmnd, scmd->cmd_len, queue_group, NULL);
+}
+
+static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
+ unsigned int cdb_length, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info)
+{
+ int rc;
+ struct pqi_io_request *io_request;
+ struct pqi_aio_path_request *request;
+
+ io_request = pqi_alloc_io_request(ctrl_info);
+ io_request->io_complete_callback = pqi_aio_io_complete;
+ io_request->scmd = scmd;
+
+ scmd->host_scribble = (unsigned char *)io_request;
+
+ request = io_request->iu;
+ memset(request, 0,
+ offsetof(struct pqi_raid_path_request, sg_descriptors));
+
+ request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
+ put_unaligned_le32(aio_handle, &request->nexus_id);
+ put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
+ request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ put_unaligned_le16(io_request->index, &request->request_id);
+ request->error_index = request->request_id;
+ if (cdb_length > sizeof(request->cdb))
+ cdb_length = sizeof(request->cdb);
+ request->cdb_length = cdb_length;
+ memcpy(request->cdb, cdb, cdb_length);
+
+ switch (scmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ request->data_direction = SOP_READ_FLAG;
+ break;
+ case DMA_FROM_DEVICE:
+ request->data_direction = SOP_WRITE_FLAG;
+ break;
+ case DMA_NONE:
+ request->data_direction = SOP_NO_DIRECTION_FLAG;
+ break;
+ case DMA_BIDIRECTIONAL:
+ request->data_direction = SOP_BIDIRECTIONAL;
+ break;
+ default:
+ dev_err(&ctrl_info->pci_dev->dev,
+ "unknown data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(scmd->sc_data_direction);
+ break;
+ }
+
+ if (encryption_info) {
+ request->encryption_enable = true;
+ put_unaligned_le16(encryption_info->data_encryption_key_index,
+ &request->data_encryption_key_index);
+ put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+ &request->encrypt_tweak_lower);
+ put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+ &request->encrypt_tweak_upper);
+ }
+
+ rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
+ if (rc) {
+ pqi_free_io_request(io_request);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+
+ return 0;
+}
+
+static int pqi_scsi_queue_command(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ int rc;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ u16 hwq;
+ struct pqi_queue_group *queue_group;
+ bool raid_bypassed;
+
+ device = scmd->device->hostdata;
+ ctrl_info = shost_to_hba(shost);
+
+ if (pqi_ctrl_offline(ctrl_info)) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ pqi_scsi_done(scmd);
+ return 0;
+ }
+
+ /*
+ * This is necessary because the SML doesn't zero out this field during
+ * error recovery.
+ */
+ scmd->result = 0;
+
+ hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
+ if (hwq >= ctrl_info->num_queue_groups)
+ hwq = 0;
+
+ queue_group = &ctrl_info->queue_groups[hwq];
+
+ if (pqi_is_logical_device(device)) {
+ raid_bypassed = false;
+ if (device->offload_enabled &&
+ scmd->request->cmd_type == REQ_TYPE_FS) {
+ rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
+ scmd, queue_group);
+ if (rc == 0 ||
+ rc == SCSI_MLQUEUE_HOST_BUSY ||
+ rc == SAM_STAT_CHECK_CONDITION ||
+ rc == SAM_STAT_RESERVATION_CONFLICT)
+ raid_bypassed = true;
+ }
+ if (!raid_bypassed)
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
+ queue_group);
+ } else {
+ if (device->aio_enabled)
+ rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
+ queue_group);
+ else
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
+ queue_group);
+ }
+
+ return rc;
+}
+
+static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
+ void *context)
+{
+ struct completion *waiting = context;
+
+ complete(waiting);
+}
+
+#define PQI_LUN_RESET_TIMEOUT_SECS 10
+
+static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct completion *wait)
+{
+ int rc;
+ unsigned int wait_secs = 0;
+
+ while (1) {
+ if (wait_for_completion_io_timeout(wait,
+ PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
+ rc = 0;
+ break;
+ }
+
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info)) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+
+ wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
+
+ dev_err(&ctrl_info->pci_dev->dev,
+ "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
+ ctrl_info->scsi_host->host_no, device->bus,
+ device->target, device->lun, wait_secs);
+ }
+
+ return rc;
+}
+
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ int rc;
+ struct pqi_io_request *io_request;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct pqi_task_management_request *request;
+
+ down(&ctrl_info->lun_reset_sem);
+
+ io_request = pqi_alloc_io_request(ctrl_info);
+ io_request->io_complete_callback = pqi_lun_reset_complete;
+ io_request->context = &wait;
+
+ request = io_request->iu;
+ memset(request, 0, sizeof(*request));
+
+ request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
+ put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
+ &request->header.iu_length);
+ put_unaligned_le16(io_request->index, &request->request_id);
+ memcpy(request->lun_number, device->scsi3addr,
+ sizeof(request->lun_number));
+ request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
+
+ pqi_start_io(ctrl_info,
+ &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+ io_request);
+
+ rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
+ if (rc == 0)
+ rc = io_request->status;
+
+ pqi_free_io_request(io_request);
+ up(&ctrl_info->lun_reset_sem);
+
+ return rc;
+}
+
+/* Performs a reset at the LUN level. */
+
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ int rc;
+
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return FAILED;
+
+ rc = pqi_lun_reset(ctrl_info, device);
+
+ return rc == 0 ? SUCCESS : FAILED;
+}
+
+static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
+{
+ int rc;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+
+ ctrl_info = shost_to_hba(scmd->device->host);
+ device = scmd->device->hostdata;
+
+ dev_err(&ctrl_info->pci_dev->dev,
+ "resetting scsi %d:%d:%d:%d\n",
+ ctrl_info->scsi_host->host_no,
+ device->bus, device->target, device->lun);
+
+ rc = pqi_device_reset(ctrl_info, device);
+
+ dev_err(&ctrl_info->pci_dev->dev,
+ "reset of scsi %d:%d:%d:%d: %s\n",
+ ctrl_info->scsi_host->host_no,
+ device->bus, device->target, device->lun,
+ rc == SUCCESS ? "SUCCESS" : "FAILED");
+
+ return rc;
+}
+
+static int pqi_slave_alloc(struct scsi_device *sdev)
+{
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_target *starget;
+ struct sas_rphy *rphy;
+
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
+ starget = scsi_target(sdev);
+ rphy = target_to_rphy(starget);
+ device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
+ if (device) {
+ device->target = sdev_id(sdev);
+ device->lun = sdev->lun;
+ device->target_lun_valid = true;
+ }
+ } else {
+ device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
+ sdev_id(sdev), sdev->lun);
+ }
+
+ if (device && device->expose_device) {
+ sdev->hostdata = device;
+ device->sdev = sdev;
+ if (device->queue_depth) {
+ device->advertised_queue_depth = device->queue_depth;
+ scsi_change_queue_depth(sdev,
+ device->advertised_queue_depth);
+ }
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return 0;
+}
+
+static int pqi_slave_configure(struct scsi_device *sdev)
+{
+ struct pqi_scsi_dev *device;
+
+ device = sdev->hostdata;
+ if (!device->expose_device)
+ sdev->no_uld_attach = true;
+
+ return 0;
+}
+
+static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
+ void __user *arg)
+{
+ struct pci_dev *pci_dev;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+ cciss_pci_info_struct pciinfo;
+
+ if (!arg)
+ return -EINVAL;
+
+ pci_dev = ctrl_info->pci_dev;
+
+ pciinfo.domain = pci_domain_nr(pci_dev->bus);
+ pciinfo.bus = pci_dev->bus->number;
+ pciinfo.dev_fn = pci_dev->devfn;
+ subsystem_vendor = pci_dev->subsystem_vendor;
+ subsystem_device = pci_dev->subsystem_device;
+ pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
+ subsystem_vendor;
+
+ if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int pqi_getdrivver_ioctl(void __user *arg)
+{
+ u32 version;
+
+ if (!arg)
+ return -EINVAL;
+
+ version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
+ (DRIVER_RELEASE << 16) | DRIVER_REVISION;
+
+ if (copy_to_user(arg, &version, sizeof(version)))
+ return -EFAULT;
+
+ return 0;
+}
+
+struct ciss_error_info {
+ u8 scsi_status;
+ int command_status;
+ size_t sense_data_length;
+};
+
+static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
+ struct ciss_error_info *ciss_error_info)
+{
+ int ciss_cmd_status;
+ size_t sense_data_length;
+
+ switch (pqi_error_info->data_out_result) {
+ case PQI_DATA_IN_OUT_GOOD:
+ ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
+ break;
+ case PQI_DATA_IN_OUT_UNDERFLOW:
+ ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
+ break;
+ case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
+ ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
+ break;
+ case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
+ case PQI_DATA_IN_OUT_BUFFER_ERROR:
+ case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
+ case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
+ case PQI_DATA_IN_OUT_ERROR:
+ ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
+ break;
+ case PQI_DATA_IN_OUT_HARDWARE_ERROR:
+ case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
+ case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
+ case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
+ case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
+ case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
+ case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
+ case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
+ case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
+ case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
+ ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
+ break;
+ case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
+ ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
+ break;
+ case PQI_DATA_IN_OUT_ABORTED:
+ ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
+ break;
+ case PQI_DATA_IN_OUT_TIMEOUT:
+ ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
+ break;
+ default:
+ ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
+ break;
+ }
+
+ sense_data_length =
+ get_unaligned_le16(&pqi_error_info->sense_data_length);
+ if (sense_data_length == 0)
+ sense_data_length =
+ get_unaligned_le16(&pqi_error_info->response_data_length);
+ if (sense_data_length)
+ if (sense_data_length > sizeof(pqi_error_info->data))
+ sense_data_length = sizeof(pqi_error_info->data);
+
+ ciss_error_info->scsi_status = pqi_error_info->status;
+ ciss_error_info->command_status = ciss_cmd_status;
+ ciss_error_info->sense_data_length = sense_data_length;
+}
+
+static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
+{
+ int rc;
+ char *kernel_buffer = NULL;
+ u16 iu_length;
+ size_t sense_data_length;
+ IOCTL_Command_struct iocommand;
+ struct pqi_raid_path_request request;
+ struct pqi_raid_error_info pqi_error_info;
+ struct ciss_error_info ciss_error_info;
+
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ if (!arg)
+ return -EINVAL;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
+ return -EFAULT;
+ if (iocommand.buf_size < 1 &&
+ iocommand.Request.Type.Direction != XFER_NONE)
+ return -EINVAL;
+ if (iocommand.Request.CDBLen > sizeof(request.cdb))
+ return -EINVAL;
+ if (iocommand.Request.Type.Type != TYPE_CMD)
+ return -EINVAL;
+
+ switch (iocommand.Request.Type.Direction) {
+ case XFER_NONE:
+ case XFER_WRITE:
+ case XFER_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (iocommand.buf_size > 0) {
+ kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
+ if (!kernel_buffer)
+ return -ENOMEM;
+ if (iocommand.Request.Type.Direction & XFER_WRITE) {
+ if (copy_from_user(kernel_buffer, iocommand.buf,
+ iocommand.buf_size)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ } else {
+ memset(kernel_buffer, 0, iocommand.buf_size);
+ }
+ }
+
+ memset(&request, 0, sizeof(request));
+
+ request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
+ iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
+ PQI_REQUEST_HEADER_LENGTH;
+ memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
+ sizeof(request.lun_number));
+ memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
+ request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
+
+ switch (iocommand.Request.Type.Direction) {
+ case XFER_NONE:
+ request.data_direction = SOP_NO_DIRECTION_FLAG;
+ break;
+ case XFER_WRITE:
+ request.data_direction = SOP_WRITE_FLAG;
+ break;
+ case XFER_READ:
+ request.data_direction = SOP_READ_FLAG;
+ break;
+ }
+
+ request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+
+ if (iocommand.buf_size > 0) {
+ put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
+
+ rc = pqi_map_single(ctrl_info->pci_dev,
+ &request.sg_descriptors[0], kernel_buffer,
+ iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ if (rc)
+ goto out;
+
+ iu_length += sizeof(request.sg_descriptors[0]);
+ }
+
+ put_unaligned_le16(iu_length, &request.header.iu_length);
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+ PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
+
+ if (iocommand.buf_size > 0)
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+ PCI_DMA_BIDIRECTIONAL);
+
+ memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
+
+ if (rc == 0) {
+ pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
+ iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
+ iocommand.error_info.CommandStatus =
+ ciss_error_info.command_status;
+ sense_data_length = ciss_error_info.sense_data_length;
+ if (sense_data_length) {
+ if (sense_data_length >
+ sizeof(iocommand.error_info.SenseInfo))
+ sense_data_length =
+ sizeof(iocommand.error_info.SenseInfo);
+ memcpy(iocommand.error_info.SenseInfo,
+ pqi_error_info.data, sense_data_length);
+ iocommand.error_info.SenseLen = sense_data_length;
+ }
+ }
+
+ if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (rc == 0 && iocommand.buf_size > 0 &&
+ (iocommand.Request.Type.Direction & XFER_READ)) {
+ if (copy_to_user(iocommand.buf, kernel_buffer,
+ iocommand.buf_size)) {
+ rc = -EFAULT;
+ }
+ }
+
+out:
+ kfree(kernel_buffer);
+
+ return rc;
+}
+
+static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ int rc;
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = shost_to_hba(sdev->host);
+
+ switch (cmd) {
+ case CCISS_DEREGDISK:
+ case CCISS_REGNEWDISK:
+ case CCISS_REGNEWD:
+ rc = pqi_scan_scsi_devices(ctrl_info);
+ break;
+ case CCISS_GETPCIINFO:
+ rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
+ break;
+ case CCISS_GETDRIVVER:
+ rc = pqi_getdrivver_ioctl(arg);
+ break;
+ case CCISS_PASSTHRU:
+ rc = pqi_passthru_ioctl(ctrl_info, arg);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static ssize_t pqi_version_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ ssize_t count = 0;
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+
+ shost = class_to_shost(dev);
+ ctrl_info = shost_to_hba(shost);
+
+ count += snprintf(buffer + count, PAGE_SIZE - count,
+ " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+
+ count += snprintf(buffer + count, PAGE_SIZE - count,
+ "firmware: %s\n", ctrl_info->firmware_version);
+
+ return count;
+}
+
+static ssize_t pqi_host_rescan_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ pqi_scan_start(shost);
+
+ return count;
+}
+
+static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
+
+static struct device_attribute *pqi_shost_attrs[] = {
+ &dev_attr_version,
+ &dev_attr_rescan,
+ NULL
+};
+
+static ssize_t pqi_sas_address_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ u64 sas_address;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (pqi_is_logical_device(device)) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+ flags);
+ return -ENODEV;
+ }
+ sas_address = device->sas_address;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
+}
+
+static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ buffer[0] = device->offload_enabled ? '1' : '0';
+ buffer[1] = '\n';
+ buffer[2] = '\0';
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return 2;
+}
+
+static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
+static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
+ pqi_ssd_smart_path_enabled_show, NULL);
+
+static struct device_attribute *pqi_sdev_attrs[] = {
+ &dev_attr_sas_address,
+ &dev_attr_ssd_smart_path_enabled,
+ NULL
+};
+
+static struct scsi_host_template pqi_driver_template = {
+ .module = THIS_MODULE,
+ .name = DRIVER_NAME_SHORT,
+ .proc_name = DRIVER_NAME_SHORT,
+ .queuecommand = pqi_scsi_queue_command,
+ .scan_start = pqi_scan_start,
+ .scan_finished = pqi_scan_finished,
+ .this_id = -1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = pqi_eh_device_reset_handler,
+ .ioctl = pqi_ioctl,
+ .slave_alloc = pqi_slave_alloc,
+ .slave_configure = pqi_slave_configure,
+ .sdev_attrs = pqi_sdev_attrs,
+ .shost_attrs = pqi_shost_attrs,
+};
+
+static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct Scsi_Host *shost;
+
+ shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
+ if (!shost) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi_host_alloc failed for controller %u\n",
+ ctrl_info->ctrl_id);
+ return -ENOMEM;
+ }
+
+ shost->io_port = 0;
+ shost->n_io_port = 0;
+ shost->this_id = -1;
+ shost->max_channel = PQI_MAX_BUS;
+ shost->max_cmd_len = MAX_COMMAND_SIZE;
+ shost->max_lun = ~0;
+ shost->max_id = ~0;
+ shost->max_sectors = ctrl_info->max_sectors;
+ shost->can_queue = ctrl_info->scsi_ml_can_queue;
+ shost->cmd_per_lun = shost->can_queue;
+ shost->sg_tablesize = ctrl_info->sg_tablesize;
+ shost->transportt = pqi_sas_transport_template;
+ shost->irq = ctrl_info->msix_vectors[0];
+ shost->unique_id = shost->irq;
+ shost->nr_hw_queues = ctrl_info->num_queue_groups;
+ shost->hostdata[0] = (unsigned long)ctrl_info;
+
+ rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi_add_host failed for controller %u\n",
+ ctrl_info->ctrl_id);
+ goto free_host;
+ }
+
+ rc = pqi_add_sas_host(shost, ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "add SAS host failed for controller %u\n",
+ ctrl_info->ctrl_id);
+ goto remove_host;
+ }
+
+ ctrl_info->scsi_host = shost;
+
+ return 0;
+
+remove_host:
+ scsi_remove_host(shost);
+free_host:
+ scsi_host_put(shost);
+
+ return rc;
+}
+
+static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
+{
+ struct Scsi_Host *shost;
+
+ pqi_delete_sas_host(ctrl_info);
+
+ shost = ctrl_info->scsi_host;
+ if (!shost)
+ return;
+
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
+}
+
+#define PQI_RESET_ACTION_RESET 0x1
+
+#define PQI_RESET_TYPE_NO_RESET 0x0
+#define PQI_RESET_TYPE_SOFT_RESET 0x1
+#define PQI_RESET_TYPE_FIRM_RESET 0x2
+#define PQI_RESET_TYPE_HARD_RESET 0x3
+
+static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ u32 reset_params;
+
+ reset_params = (PQI_RESET_ACTION_RESET << 5) |
+ PQI_RESET_TYPE_HARD_RESET;
+
+ writel(reset_params,
+ &ctrl_info->pqi_registers->device_reset);
+
+ rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "PQI reset failed\n");
+
+ return rc;
+}
+
+static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct bmic_identify_controller *identify;
+
+ identify = kmalloc(sizeof(*identify), GFP_KERNEL);
+ if (!identify)
+ return -ENOMEM;
+
+ rc = pqi_identify_controller(ctrl_info, identify);
+ if (rc)
+ goto out;
+
+ memcpy(ctrl_info->firmware_version, identify->firmware_version,
+ sizeof(identify->firmware_version));
+ ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
+ snprintf(ctrl_info->firmware_version +
+ strlen(ctrl_info->firmware_version),
+ sizeof(ctrl_info->firmware_version),
+ "-%u", get_unaligned_le16(&identify->firmware_build_number));
+
+out:
+ kfree(identify);
+
+ return rc;
+}
+
+static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
+{
+ if (!sis_is_firmware_running(ctrl_info))
+ return -ENXIO;
+
+ if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
+ sis_disable_msix(ctrl_info);
+ if (pqi_reset(ctrl_info) == 0)
+ sis_reenable_sis_mode(ctrl_info);
+ }
+
+ return 0;
+}
+
+static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+
+ if (reset_devices) {
+ rc = pqi_kdump_init(ctrl_info);
+ if (rc)
+ return rc;
+ }
+
+ /*
+ * When the controller comes out of reset, it is always running
+ * in legacy SIS mode. This is so that it can be compatible
+ * with legacy drivers shipped with OSes. So we have to talk
+ * to it using SIS commands at first. Once we are satisified
+ * that the controller supports PQI, we transition it into PQI
+ * mode.
+ */
+
+ /*
+ * Wait until the controller is ready to start accepting SIS
+ * commands.
+ */
+ rc = sis_wait_for_ctrl_ready(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error initializing SIS interface\n");
+ return rc;
+ }
+
+ /*
+ * Get the controller properties. This allows us to determine
+ * whether or not it supports PQI mode.
+ */
+ rc = sis_get_ctrl_properties(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining controller properties\n");
+ return rc;
+ }
+
+ rc = sis_get_pqi_capabilities(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining controller capabilities\n");
+ return rc;
+ }
+
+ if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
+ ctrl_info->max_outstanding_requests =
+ PQI_MAX_OUTSTANDING_REQUESTS;
+
+ pqi_calculate_io_resources(ctrl_info);
+
+ rc = pqi_alloc_error_buffer(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to allocate PQI error buffer\n");
+ return rc;
+ }
+
+ /*
+ * If the function we are about to call succeeds, the
+ * controller will transition from legacy SIS mode
+ * into PQI mode.
+ */
+ rc = sis_init_base_struct_addr(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error initializing PQI mode\n");
+ return rc;
+ }
+
+ /* Wait for the controller to complete the SIS -> PQI transition. */
+ rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "transition to PQI mode failed\n");
+ return rc;
+ }
+
+ /* From here on, we are running in PQI mode. */
+ ctrl_info->pqi_mode_enabled = true;
+ pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
+
+ rc = pqi_alloc_admin_queues(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error allocating admin queues\n");
+ return rc;
+ }
+
+ rc = pqi_create_admin_queues(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error creating admin queues\n");
+ return rc;
+ }
+
+ rc = pqi_report_device_capability(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "obtaining device capability failed\n");
+ return rc;
+ }
+
+ rc = pqi_validate_device_capability(ctrl_info);
+ if (rc)
+ return rc;
+
+ pqi_calculate_queue_resources(ctrl_info);
+
+ rc = pqi_enable_msix_interrupts(ctrl_info);
+ if (rc)
+ return rc;
+
+ if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
+ ctrl_info->max_msix_vectors =
+ ctrl_info->num_msix_vectors_enabled;
+ pqi_calculate_queue_resources(ctrl_info);
+ }
+
+ rc = pqi_alloc_io_resources(ctrl_info);
+ if (rc)
+ return rc;
+
+ rc = pqi_alloc_operational_queues(ctrl_info);
+ if (rc)
+ return rc;
+
+ pqi_init_operational_queues(ctrl_info);
+
+ rc = pqi_request_irqs(ctrl_info);
+ if (rc)
+ return rc;
+
+ pqi_irq_set_affinity_hint(ctrl_info);
+
+ rc = pqi_create_queues(ctrl_info);
+ if (rc)
+ return rc;
+
+ sis_enable_msix(ctrl_info);
+
+ rc = pqi_configure_events(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error configuring events\n");
+ return rc;
+ }
+
+ pqi_start_heartbeat_timer(ctrl_info);
+
+ ctrl_info->controller_online = true;
+
+ /* Register with the SCSI subsystem. */
+ rc = pqi_register_scsi(ctrl_info);
+ if (rc)
+ return rc;
+
+ rc = pqi_get_ctrl_firmware_version(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining firmware version\n");
+ return rc;
+ }
+
+ rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error updating host wellness\n");
+ return rc;
+ }
+
+ pqi_schedule_update_time_worker(ctrl_info);
+
+ pqi_scan_scsi_devices(ctrl_info);
+
+ return 0;
+}
+
+static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ u64 mask;
+
+ rc = pci_enable_device(ctrl_info->pci_dev);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to enable PCI device\n");
+ return rc;
+ }
+
+ if (sizeof(dma_addr_t) > 4)
+ mask = DMA_BIT_MASK(64);
+ else
+ mask = DMA_BIT_MASK(32);
+
+ rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
+ goto disable_device;
+ }
+
+ rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to obtain PCI resources\n");
+ goto disable_device;
+ }
+
+ ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
+ ctrl_info->pci_dev, 0),
+ sizeof(struct pqi_ctrl_registers));
+ if (!ctrl_info->iomem_base) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to map memory for controller registers\n");
+ rc = -ENOMEM;
+ goto release_regions;
+ }
+
+ ctrl_info->registers = ctrl_info->iomem_base;
+ ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
+
+ /* Enable bus mastering. */
+ pci_set_master(ctrl_info->pci_dev);
+
+ pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
+
+ return 0;
+
+release_regions:
+ pci_release_regions(ctrl_info->pci_dev);
+disable_device:
+ pci_disable_device(ctrl_info->pci_dev);
+
+ return rc;
+}
+
+static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
+{
+ iounmap(ctrl_info->iomem_base);
+ pci_release_regions(ctrl_info->pci_dev);
+ pci_disable_device(ctrl_info->pci_dev);
+ pci_set_drvdata(ctrl_info->pci_dev, NULL);
+}
+
+static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
+{
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
+ GFP_KERNEL, numa_node);
+ if (!ctrl_info)
+ return NULL;
+
+ mutex_init(&ctrl_info->scan_mutex);
+
+ INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
+ spin_lock_init(&ctrl_info->scsi_device_list_lock);
+
+ INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
+ atomic_set(&ctrl_info->num_interrupts, 0);
+
+ INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
+ INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
+
+ sema_init(&ctrl_info->sync_request_sem,
+ PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
+ sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
+
+ ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
+ ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
+
+ return ctrl_info;
+}
+
+static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
+{
+ kfree(ctrl_info);
+}
+
+static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
+{
+ pqi_irq_unset_affinity_hint(ctrl_info);
+ pqi_free_irqs(ctrl_info);
+ if (ctrl_info->num_msix_vectors_enabled)
+ pci_disable_msix(ctrl_info->pci_dev);
+}
+
+static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
+{
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_free_interrupts(ctrl_info);
+ if (ctrl_info->queue_memory_base)
+ dma_free_coherent(&ctrl_info->pci_dev->dev,
+ ctrl_info->queue_memory_length,
+ ctrl_info->queue_memory_base,
+ ctrl_info->queue_memory_base_dma_handle);
+ if (ctrl_info->admin_queue_memory_base)
+ dma_free_coherent(&ctrl_info->pci_dev->dev,
+ ctrl_info->admin_queue_memory_length,
+ ctrl_info->admin_queue_memory_base,
+ ctrl_info->admin_queue_memory_base_dma_handle);
+ pqi_free_all_io_requests(ctrl_info);
+ if (ctrl_info->error_buffer)
+ dma_free_coherent(&ctrl_info->pci_dev->dev,
+ ctrl_info->error_buffer_length,
+ ctrl_info->error_buffer,
+ ctrl_info->error_buffer_dma_handle);
+ if (ctrl_info->iomem_base)
+ pqi_cleanup_pci_init(ctrl_info);
+ pqi_free_ctrl_info(ctrl_info);
+}
+
+static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
+{
+ cancel_delayed_work_sync(&ctrl_info->rescan_work);
+ cancel_delayed_work_sync(&ctrl_info->update_time_work);
+ pqi_remove_all_scsi_devices(ctrl_info);
+ pqi_unregister_scsi(ctrl_info);
+
+ if (ctrl_info->pqi_mode_enabled) {
+ sis_disable_msix(ctrl_info);
+ if (pqi_reset(ctrl_info) == 0)
+ sis_reenable_sis_mode(ctrl_info);
+ }
+ pqi_free_ctrl_resources(ctrl_info);
+}
+
+static void pqi_print_ctrl_info(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ char *ctrl_description;
+
+ if (id->driver_data) {
+ ctrl_description = (char *)id->driver_data;
+ } else {
+ switch (id->subvendor) {
+ case PCI_VENDOR_ID_HP:
+ ctrl_description = hpe_branded_controller;
+ break;
+ case PCI_VENDOR_ID_ADAPTEC2:
+ default:
+ ctrl_description = microsemi_branded_controller;
+ break;
+ }
+ }
+
+ dev_info(&pdev->dev, "%s found\n", ctrl_description);
+}
+
+static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int rc;
+ int node;
+ struct pqi_ctrl_info *ctrl_info;
+
+ pqi_print_ctrl_info(pdev, id);
+
+ if (pqi_disable_device_id_wildcards &&
+ id->subvendor == PCI_ANY_ID &&
+ id->subdevice == PCI_ANY_ID) {
+ dev_warn(&pdev->dev,
+ "controller not probed because device ID wildcards are disabled\n");
+ return -ENODEV;
+ }
+
+ if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
+ dev_warn(&pdev->dev,
+ "controller device ID matched using wildcards\n");
+
+ node = dev_to_node(&pdev->dev);
+ if (node == NUMA_NO_NODE)
+ set_dev_node(&pdev->dev, 0);
+
+ ctrl_info = pqi_alloc_ctrl_info(node);
+ if (!ctrl_info) {
+ dev_err(&pdev->dev,
+ "failed to allocate controller info block\n");
+ return -ENOMEM;
+ }
+
+ ctrl_info->pci_dev = pdev;
+
+ rc = pqi_pci_init(ctrl_info);
+ if (rc)
+ goto error;
+
+ rc = pqi_ctrl_init(ctrl_info);
+ if (rc)
+ goto error;
+
+ return 0;
+
+error:
+ pqi_remove_ctrl(ctrl_info);
+
+ return rc;
+}
+
+static void pqi_pci_remove(struct pci_dev *pdev)
+{
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = pci_get_drvdata(pdev);
+ if (!ctrl_info)
+ return;
+
+ pqi_remove_ctrl(ctrl_info);
+}
+
+static void pqi_shutdown(struct pci_dev *pdev)
+{
+ int rc;
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = pci_get_drvdata(pdev);
+ if (!ctrl_info)
+ goto error;
+
+ /*
+ * Write all data in the controller's battery-backed cache to
+ * storage.
+ */
+ rc = pqi_flush_cache(ctrl_info);
+ if (rc == 0)
+ return;
+
+error:
+ dev_warn(&pdev->dev,
+ "unable to flush controller cache\n");
+}
+
+/* Define the PCI IDs for the controllers that we support. */
+static const struct pci_device_id pqi_pci_id_table[] = {
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0110)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0600)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0601)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0602)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0603)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0650)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0651)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0652)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0653)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0654)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0655)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0700)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0701)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0800)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0801)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0802)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0803)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0804)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0805)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0900)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0901)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0902)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0903)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0904)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0905)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0906)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x1001)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x1100)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x1101)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x1102)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x1150)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_ANY_ID, PCI_ANY_ID)
+ },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
+
+static struct pci_driver pqi_pci_driver = {
+ .name = DRIVER_NAME_SHORT,
+ .id_table = pqi_pci_id_table,
+ .probe = pqi_pci_probe,
+ .remove = pqi_pci_remove,
+ .shutdown = pqi_shutdown,
+};
+
+static int __init pqi_init(void)
+{
+ int rc;
+
+ pr_info(DRIVER_NAME "\n");
+
+ pqi_sas_transport_template =
+ sas_attach_transport(&pqi_sas_transport_functions);
+ if (!pqi_sas_transport_template)
+ return -ENODEV;
+
+ rc = pci_register_driver(&pqi_pci_driver);
+ if (rc)
+ sas_release_transport(pqi_sas_transport_template);
+
+ return rc;
+}
+
+static void __exit pqi_cleanup(void)
+{
+ pci_unregister_driver(&pqi_pci_driver);
+ sas_release_transport(pqi_sas_transport_template);
+}
+
+module_init(pqi_init);
+module_exit(pqi_cleanup);
+
+static void __attribute__((unused)) verify_structures(void)
+{
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_host_to_ctrl_doorbell) != 0x20);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_interrupt_mask) != 0x34);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_ctrl_to_host_doorbell) != 0x9c);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_ctrl_to_host_doorbell_clear) != 0xa0);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_driver_scratch) != 0xb0);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_firmware_status) != 0xbc);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_mailbox) != 0x1000);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ pqi_registers) != 0x4000);
+
+ BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+ iu_type) != 0x0);
+ BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+ iu_length) != 0x2);
+ BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+ response_queue_id) != 0x4);
+ BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+ work_area) != 0x6);
+ BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
+
+ BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+ status) != 0x0);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+ service_response) != 0x1);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+ data_present) != 0x2);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+ reserved) != 0x3);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+ residual_count) != 0x4);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+ data_length) != 0x8);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+ reserved1) != 0xa);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+ data) != 0xc);
+ BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
+
+ BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+ data_in_result) != 0x0);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+ data_out_result) != 0x1);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+ reserved) != 0x2);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+ status) != 0x5);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+ status_qualifier) != 0x6);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+ sense_data_length) != 0x8);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+ response_data_length) != 0xa);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+ data_in_transferred) != 0xc);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+ data_out_transferred) != 0x10);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+ data) != 0x14);
+ BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
+
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ signature) != 0x0);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ function_and_status_code) != 0x8);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ max_admin_iq_elements) != 0x10);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ max_admin_oq_elements) != 0x11);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ admin_iq_element_length) != 0x12);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ admin_oq_element_length) != 0x13);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ max_reset_timeout) != 0x14);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ legacy_intx_status) != 0x18);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ legacy_intx_mask_set) != 0x1c);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ legacy_intx_mask_clear) != 0x20);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ device_status) != 0x40);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ admin_iq_pi_offset) != 0x48);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ admin_oq_ci_offset) != 0x50);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ admin_iq_element_array_addr) != 0x58);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ admin_oq_element_array_addr) != 0x60);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ admin_iq_ci_addr) != 0x68);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ admin_oq_pi_addr) != 0x70);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ admin_iq_num_elements) != 0x78);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ admin_oq_num_elements) != 0x79);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ admin_queue_int_msg_num) != 0x7a);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ device_error) != 0x80);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ error_details) != 0x88);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ device_reset) != 0x90);
+ BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+ power_action) != 0x94);
+ BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
+
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ header.iu_type) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ header.iu_length) != 2);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ header.work_area) != 6);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ request_id) != 8);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ function_code) != 10);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.report_device_capability.buffer_length) != 44);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.report_device_capability.sg_descriptor) != 48);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_iq.queue_id) != 12);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_iq.element_array_addr) != 16);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_iq.ci_addr) != 24);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_iq.num_elements) != 32);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_iq.element_length) != 34);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_iq.queue_protocol) != 36);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_oq.queue_id) != 12);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_oq.element_array_addr) != 16);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_oq.pi_addr) != 24);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_oq.num_elements) != 32);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_oq.element_length) != 34);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_oq.queue_protocol) != 36);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_oq.int_msg_num) != 40);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_oq.coalescing_count) != 42);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_oq.min_coalescing_time) != 44);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.create_operational_oq.max_coalescing_time) != 48);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+ data.delete_operational_queue.queue_id) != 12);
+ BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+ data.create_operational_iq) != 64 - 11);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+ data.create_operational_oq) != 64 - 11);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+ data.delete_operational_queue) != 64 - 11);
+
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+ header.iu_type) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+ header.iu_length) != 2);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+ header.work_area) != 6);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+ request_id) != 8);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+ function_code) != 10);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+ status) != 11);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+ data.create_operational_iq.status_descriptor) != 12);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+ data.create_operational_iq.iq_pi_offset) != 16);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+ data.create_operational_oq.status_descriptor) != 12);
+ BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+ data.create_operational_oq.oq_ci_offset) != 16);
+ BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
+
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ header.iu_type) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ header.iu_length) != 2);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ header.response_queue_id) != 4);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ header.work_area) != 6);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ request_id) != 8);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ nexus_id) != 10);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ buffer_length) != 12);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ lun_number) != 16);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ protocol_specific) != 24);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ error_index) != 27);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ cdb) != 32);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ sg_descriptors) != 64);
+ BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ header.iu_type) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ header.iu_length) != 2);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ header.response_queue_id) != 4);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ header.work_area) != 6);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ request_id) != 8);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ nexus_id) != 12);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ buffer_length) != 16);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ data_encryption_key_index) != 22);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ encrypt_tweak_lower) != 24);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ encrypt_tweak_upper) != 28);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ cdb) != 32);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ error_index) != 48);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ num_sg_descriptors) != 50);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ cdb_length) != 51);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ lun_number) != 52);
+ BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+ sg_descriptors) != 64);
+ BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+ BUILD_BUG_ON(offsetof(struct pqi_io_response,
+ header.iu_type) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_io_response,
+ header.iu_length) != 2);
+ BUILD_BUG_ON(offsetof(struct pqi_io_response,
+ request_id) != 8);
+ BUILD_BUG_ON(offsetof(struct pqi_io_response,
+ error_index) != 10);
+
+ BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+ header.iu_type) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+ header.iu_length) != 2);
+ BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+ header.response_queue_id) != 4);
+ BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+ request_id) != 8);
+ BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+ data.report_event_configuration.buffer_length) != 12);
+ BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+ data.report_event_configuration.sg_descriptors) != 16);
+ BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+ data.set_event_configuration.global_event_oq_id) != 10);
+ BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+ data.set_event_configuration.buffer_length) != 12);
+ BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+ data.set_event_configuration.sg_descriptors) != 16);
+
+ BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
+ max_inbound_iu_length) != 6);
+ BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
+ max_outbound_iu_length) != 14);
+ BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
+
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ data_length) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ iq_arbitration_priority_support_bitmask) != 8);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ maximum_aw_a) != 9);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ maximum_aw_b) != 10);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ maximum_aw_c) != 11);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ max_inbound_queues) != 16);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ max_elements_per_iq) != 18);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ max_iq_element_length) != 24);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ min_iq_element_length) != 26);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ max_outbound_queues) != 30);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ max_elements_per_oq) != 32);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ intr_coalescing_time_granularity) != 34);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ max_oq_element_length) != 36);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ min_oq_element_length) != 38);
+ BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+ iu_layer_descriptors) != 64);
+ BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
+
+ BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
+ event_type) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
+ oq_id) != 2);
+ BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
+
+ BUILD_BUG_ON(offsetof(struct pqi_event_config,
+ num_event_descriptors) != 2);
+ BUILD_BUG_ON(offsetof(struct pqi_event_config,
+ descriptors) != 4);
+
+ BUILD_BUG_ON(offsetof(struct pqi_event_response,
+ header.iu_type) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_event_response,
+ header.iu_length) != 2);
+ BUILD_BUG_ON(offsetof(struct pqi_event_response,
+ event_type) != 8);
+ BUILD_BUG_ON(offsetof(struct pqi_event_response,
+ event_id) != 10);
+ BUILD_BUG_ON(offsetof(struct pqi_event_response,
+ additional_event_id) != 12);
+ BUILD_BUG_ON(offsetof(struct pqi_event_response,
+ data) != 16);
+ BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
+
+ BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+ header.iu_type) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+ header.iu_length) != 2);
+ BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+ event_type) != 8);
+ BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+ event_id) != 10);
+ BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+ additional_event_id) != 12);
+ BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
+
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ header.iu_type) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ header.iu_length) != 2);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ request_id) != 8);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ nexus_id) != 10);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ lun_number) != 16);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ protocol_specific) != 24);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ outbound_queue_id_to_manage) != 26);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ request_id_to_manage) != 28);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ task_management_function) != 30);
+ BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
+
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+ header.iu_type) != 0);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+ header.iu_length) != 2);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+ request_id) != 8);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+ nexus_id) != 10);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+ additional_response_info) != 12);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+ response_code) != 15);
+ BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
+
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ configured_logical_drive_count) != 0);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ configuration_signature) != 1);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ firmware_version) != 5);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ extended_logical_unit_count) != 154);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ firmware_build_number) != 190);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ controller_mode) != 292);
+
+ BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
+ BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
+ BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
+ PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+ BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
+ PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+ BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
+ BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
+ PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+ BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
+ BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
+ PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+
+ BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
+}
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
new file mode 100644
index 000000000000..52ca4f93f1b2
--- /dev/null
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -0,0 +1,350 @@
+/*
+ * driver for Microsemi PQI-based storage controllers
+ * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#include <linux/kernel.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_transport_sas.h>
+#include "smartpqi.h"
+
+static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port)
+{
+ struct pqi_sas_phy *pqi_sas_phy;
+ struct sas_phy *phy;
+
+ pqi_sas_phy = kzalloc(sizeof(*pqi_sas_phy), GFP_KERNEL);
+ if (!pqi_sas_phy)
+ return NULL;
+
+ phy = sas_phy_alloc(pqi_sas_port->parent_node->parent_dev,
+ pqi_sas_port->next_phy_index);
+ if (!phy) {
+ kfree(pqi_sas_phy);
+ return NULL;
+ }
+
+ pqi_sas_port->next_phy_index++;
+ pqi_sas_phy->phy = phy;
+ pqi_sas_phy->parent_port = pqi_sas_port;
+
+ return pqi_sas_phy;
+}
+
+static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy)
+{
+ struct sas_phy *phy = pqi_sas_phy->phy;
+
+ sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy);
+ sas_phy_free(phy);
+ if (pqi_sas_phy->added_to_port)
+ list_del(&pqi_sas_phy->phy_list_entry);
+ kfree(pqi_sas_phy);
+}
+
+static int pqi_sas_port_add_phy(struct pqi_sas_phy *pqi_sas_phy)
+{
+ int rc;
+ struct pqi_sas_port *pqi_sas_port;
+ struct sas_phy *phy;
+ struct sas_identify *identify;
+
+ pqi_sas_port = pqi_sas_phy->parent_port;
+ phy = pqi_sas_phy->phy;
+
+ identify = &phy->identify;
+ memset(identify, 0, sizeof(*identify));
+ identify->sas_address = pqi_sas_port->sas_address;
+ identify->device_type = SAS_END_DEVICE;
+ identify->initiator_port_protocols = SAS_PROTOCOL_STP;
+ identify->target_port_protocols = SAS_PROTOCOL_STP;
+ phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
+ phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
+ phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
+ phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
+ phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+
+ rc = sas_phy_add(pqi_sas_phy->phy);
+ if (rc)
+ return rc;
+
+ sas_port_add_phy(pqi_sas_port->port, pqi_sas_phy->phy);
+ list_add_tail(&pqi_sas_phy->phy_list_entry,
+ &pqi_sas_port->phy_list_head);
+ pqi_sas_phy->added_to_port = true;
+
+ return 0;
+}
+
+static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
+ struct sas_rphy *rphy)
+{
+ struct sas_identify *identify;
+
+ identify = &rphy->identify;
+ identify->sas_address = pqi_sas_port->sas_address;
+ identify->initiator_port_protocols = SAS_PROTOCOL_STP;
+ identify->target_port_protocols = SAS_PROTOCOL_STP;
+
+ return sas_rphy_add(rphy);
+}
+
+static struct pqi_sas_port *pqi_alloc_sas_port(
+ struct pqi_sas_node *pqi_sas_node, u64 sas_address)
+{
+ int rc;
+ struct pqi_sas_port *pqi_sas_port;
+ struct sas_port *port;
+
+ pqi_sas_port = kzalloc(sizeof(*pqi_sas_port), GFP_KERNEL);
+ if (!pqi_sas_port)
+ return NULL;
+
+ INIT_LIST_HEAD(&pqi_sas_port->phy_list_head);
+ pqi_sas_port->parent_node = pqi_sas_node;
+
+ port = sas_port_alloc_num(pqi_sas_node->parent_dev);
+ if (!port)
+ goto free_pqi_port;
+
+ rc = sas_port_add(port);
+ if (rc)
+ goto free_sas_port;
+
+ pqi_sas_port->port = port;
+ pqi_sas_port->sas_address = sas_address;
+ list_add_tail(&pqi_sas_port->port_list_entry,
+ &pqi_sas_node->port_list_head);
+
+ return pqi_sas_port;
+
+free_sas_port:
+ sas_port_free(port);
+free_pqi_port:
+ kfree(pqi_sas_port);
+
+ return NULL;
+}
+
+static void pqi_free_sas_port(struct pqi_sas_port *pqi_sas_port)
+{
+ struct pqi_sas_phy *pqi_sas_phy;
+ struct pqi_sas_phy *next;
+
+ list_for_each_entry_safe(pqi_sas_phy, next,
+ &pqi_sas_port->phy_list_head, phy_list_entry)
+ pqi_free_sas_phy(pqi_sas_phy);
+
+ sas_port_delete(pqi_sas_port->port);
+ list_del(&pqi_sas_port->port_list_entry);
+ kfree(pqi_sas_port);
+}
+
+static struct pqi_sas_node *pqi_alloc_sas_node(struct device *parent_dev)
+{
+ struct pqi_sas_node *pqi_sas_node;
+
+ pqi_sas_node = kzalloc(sizeof(*pqi_sas_node), GFP_KERNEL);
+ if (pqi_sas_node) {
+ pqi_sas_node->parent_dev = parent_dev;
+ INIT_LIST_HEAD(&pqi_sas_node->port_list_head);
+ }
+
+ return pqi_sas_node;
+}
+
+static void pqi_free_sas_node(struct pqi_sas_node *pqi_sas_node)
+{
+ struct pqi_sas_port *pqi_sas_port;
+ struct pqi_sas_port *next;
+
+ if (!pqi_sas_node)
+ return;
+
+ list_for_each_entry_safe(pqi_sas_port, next,
+ &pqi_sas_node->port_list_head, port_list_entry)
+ pqi_free_sas_port(pqi_sas_port);
+
+ kfree(pqi_sas_node);
+}
+
+struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
+ struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy)
+{
+ struct pqi_scsi_dev *device;
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (!device->sas_port)
+ continue;
+ if (device->sas_port->rphy == rphy)
+ return device;
+ }
+
+ return NULL;
+}
+
+int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct device *parent_dev;
+ struct pqi_sas_node *pqi_sas_node;
+ struct pqi_sas_port *pqi_sas_port;
+ struct pqi_sas_phy *pqi_sas_phy;
+
+ parent_dev = &shost->shost_gendev;
+
+ pqi_sas_node = pqi_alloc_sas_node(parent_dev);
+ if (!pqi_sas_node)
+ return -ENOMEM;
+
+ pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, ctrl_info->sas_address);
+ if (!pqi_sas_port) {
+ rc = -ENODEV;
+ goto free_sas_node;
+ }
+
+ pqi_sas_phy = pqi_alloc_sas_phy(pqi_sas_port);
+ if (!pqi_sas_phy) {
+ rc = -ENODEV;
+ goto free_sas_port;
+ }
+
+ rc = pqi_sas_port_add_phy(pqi_sas_phy);
+ if (rc)
+ goto free_sas_phy;
+
+ ctrl_info->sas_host = pqi_sas_node;
+
+ return 0;
+
+free_sas_phy:
+ pqi_free_sas_phy(pqi_sas_phy);
+free_sas_port:
+ pqi_free_sas_port(pqi_sas_port);
+free_sas_node:
+ pqi_free_sas_node(pqi_sas_node);
+
+ return rc;
+}
+
+void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info)
+{
+ pqi_free_sas_node(ctrl_info->sas_host);
+}
+
+int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
+ struct pqi_scsi_dev *device)
+{
+ int rc;
+ struct pqi_sas_port *pqi_sas_port;
+ struct sas_rphy *rphy;
+
+ pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, device->sas_address);
+ if (!pqi_sas_port)
+ return -ENOMEM;
+
+ rphy = sas_end_device_alloc(pqi_sas_port->port);
+ if (!rphy) {
+ rc = -ENODEV;
+ goto free_sas_port;
+ }
+
+ pqi_sas_port->rphy = rphy;
+ device->sas_port = pqi_sas_port;
+
+ rc = pqi_sas_port_add_rphy(pqi_sas_port, rphy);
+ if (rc)
+ goto free_sas_port;
+
+ return 0;
+
+free_sas_port:
+ pqi_free_sas_port(pqi_sas_port);
+ device->sas_port = NULL;
+
+ return rc;
+}
+
+void pqi_remove_sas_device(struct pqi_scsi_dev *device)
+{
+ if (device->sas_port) {
+ pqi_free_sas_port(device->sas_port);
+ device->sas_port = NULL;
+ }
+}
+
+static int pqi_sas_get_linkerrors(struct sas_phy *phy)
+{
+ return 0;
+}
+
+static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
+ u64 *identifier)
+{
+ return 0;
+}
+
+static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy)
+{
+ return -ENXIO;
+}
+
+static int pqi_sas_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ return 0;
+}
+
+static int pqi_sas_phy_enable(struct sas_phy *phy, int enable)
+{
+ return 0;
+}
+
+static int pqi_sas_phy_setup(struct sas_phy *phy)
+{
+ return 0;
+}
+
+static void pqi_sas_phy_release(struct sas_phy *phy)
+{
+}
+
+static int pqi_sas_phy_speed(struct sas_phy *phy,
+ struct sas_phy_linkrates *rates)
+{
+ return -EINVAL;
+}
+
+/* SMP = Serial Management Protocol */
+
+static int pqi_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+ struct request *req)
+{
+ return -EINVAL;
+}
+
+struct sas_function_template pqi_sas_transport_functions = {
+ .get_linkerrors = pqi_sas_get_linkerrors,
+ .get_enclosure_identifier = pqi_sas_get_enclosure_identifier,
+ .get_bay_identifier = pqi_sas_get_bay_identifier,
+ .phy_reset = pqi_sas_phy_reset,
+ .phy_enable = pqi_sas_phy_enable,
+ .phy_setup = pqi_sas_phy_setup,
+ .phy_release = pqi_sas_phy_release,
+ .set_phy_speed = pqi_sas_phy_speed,
+ .smp_handler = pqi_sas_smp_handler,
+};
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
new file mode 100644
index 000000000000..71408f9e8f75
--- /dev/null
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -0,0 +1,404 @@
+/*
+ * driver for Microsemi PQI-based storage controllers
+ * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <scsi/scsi_device.h>
+#include <asm/unaligned.h>
+#include "smartpqi.h"
+#include "smartpqi_sis.h"
+
+/* legacy SIS interface commands */
+#define SIS_CMD_GET_ADAPTER_PROPERTIES 0x19
+#define SIS_CMD_INIT_BASE_STRUCT_ADDRESS 0x1b
+#define SIS_CMD_GET_PQI_CAPABILITIES 0x3000
+
+/* for submission of legacy SIS commands */
+#define SIS_REENABLE_SIS_MODE 0x1
+#define SIS_ENABLE_MSIX 0x40
+#define SIS_SOFT_RESET 0x100
+#define SIS_CMD_READY 0x200
+#define SIS_CMD_COMPLETE 0x1000
+#define SIS_CLEAR_CTRL_TO_HOST_DOORBELL 0x1000
+#define SIS_CMD_STATUS_SUCCESS 0x1
+#define SIS_CMD_COMPLETE_TIMEOUT_SECS 30
+#define SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS 10
+
+/* used with SIS_CMD_GET_ADAPTER_PROPERTIES command */
+#define SIS_EXTENDED_PROPERTIES_SUPPORTED 0x800000
+#define SIS_SMARTARRAY_FEATURES_SUPPORTED 0x2
+#define SIS_PQI_MODE_SUPPORTED 0x4
+#define SIS_REQUIRED_EXTENDED_PROPERTIES \
+ (SIS_SMARTARRAY_FEATURES_SUPPORTED | SIS_PQI_MODE_SUPPORTED)
+
+/* used with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
+#define SIS_BASE_STRUCT_REVISION 9
+#define SIS_BASE_STRUCT_ALIGNMENT 16
+
+#define SIS_CTRL_KERNEL_UP 0x80
+#define SIS_CTRL_KERNEL_PANIC 0x100
+#define SIS_CTRL_READY_TIMEOUT_SECS 30
+#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
+
+#pragma pack(1)
+
+/* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
+struct sis_base_struct {
+ __le32 revision; /* revision of this structure */
+ __le32 flags; /* reserved */
+ __le32 error_buffer_paddr_low; /* lower 32 bits of physical memory */
+ /* buffer for PQI error response */
+ /* data */
+ __le32 error_buffer_paddr_high; /* upper 32 bits of physical */
+ /* memory buffer for PQI */
+ /* error response data */
+ __le32 error_buffer_element_length; /* length of each PQI error */
+ /* response buffer element */
+ /* in bytes */
+ __le32 error_buffer_num_elements; /* total number of PQI error */
+ /* response buffers available */
+};
+
+#pragma pack()
+
+int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long timeout;
+ u32 status;
+
+ timeout = (SIS_CTRL_READY_TIMEOUT_SECS * HZ) + jiffies;
+
+ while (1) {
+ status = readl(&ctrl_info->registers->sis_firmware_status);
+ if (status != ~0) {
+ if (status & SIS_CTRL_KERNEL_PANIC) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "controller is offline: status code 0x%x\n",
+ readl(
+ &ctrl_info->registers->sis_mailbox[7]));
+ return -ENODEV;
+ }
+ if (status & SIS_CTRL_KERNEL_UP)
+ break;
+ }
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ msleep(SIS_CTRL_READY_POLL_INTERVAL_MSECS);
+ }
+
+ return 0;
+}
+
+bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
+{
+ bool running;
+ u32 status;
+
+ status = readl(&ctrl_info->registers->sis_firmware_status);
+
+ if (status & SIS_CTRL_KERNEL_PANIC)
+ running = false;
+ else
+ running = true;
+
+ if (!running)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "controller is offline: status code 0x%x\n",
+ readl(&ctrl_info->registers->sis_mailbox[7]));
+
+ return running;
+}
+
+/* used for passing command parameters/results when issuing SIS commands */
+struct sis_sync_cmd_params {
+ u32 mailbox[6]; /* mailboxes 0-5 */
+};
+
+static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
+ u32 cmd, struct sis_sync_cmd_params *params)
+{
+ struct pqi_ctrl_registers __iomem *registers;
+ unsigned int i;
+ unsigned long timeout;
+ u32 doorbell;
+ u32 cmd_status;
+
+ registers = ctrl_info->registers;
+
+ /* Write the command to mailbox 0. */
+ writel(cmd, &registers->sis_mailbox[0]);
+
+ /*
+ * Write the command parameters to mailboxes 1-4 (mailbox 5 is not used
+ * when sending a command to the controller).
+ */
+ for (i = 1; i <= 4; i++)
+ writel(params->mailbox[i], &registers->sis_mailbox[i]);
+
+ /* Clear the command doorbell. */
+ writel(SIS_CLEAR_CTRL_TO_HOST_DOORBELL,
+ &registers->sis_ctrl_to_host_doorbell_clear);
+
+ /* Disable doorbell interrupts by masking all interrupts. */
+ writel(~0, &registers->sis_interrupt_mask);
+
+ /*
+ * Force the completion of the interrupt mask register write before
+ * submitting the command.
+ */
+ readl(&registers->sis_interrupt_mask);
+
+ /* Submit the command to the controller. */
+ writel(SIS_CMD_READY, &registers->sis_host_to_ctrl_doorbell);
+
+ /*
+ * Poll for command completion. Note that the call to msleep() is at
+ * the top of the loop in order to give the controller time to start
+ * processing the command before we start polling.
+ */
+ timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies;
+ while (1) {
+ msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS);
+ doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
+ if (doorbell & SIS_CMD_COMPLETE)
+ break;
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ }
+
+ /* Read the command status from mailbox 0. */
+ cmd_status = readl(&registers->sis_mailbox[0]);
+ if (cmd_status != SIS_CMD_STATUS_SUCCESS) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "SIS command failed for command 0x%x: status = 0x%x\n",
+ cmd, cmd_status);
+ return -EINVAL;
+ }
+
+ /*
+ * The command completed successfully, so save the command status and
+ * read the values returned in mailboxes 1-5.
+ */
+ params->mailbox[0] = cmd_status;
+ for (i = 1; i < ARRAY_SIZE(params->mailbox); i++)
+ params->mailbox[i] = readl(&registers->sis_mailbox[i]);
+
+ return 0;
+}
+
+/*
+ * This function verifies that we are talking to a controller that speaks PQI.
+ */
+
+int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ u32 properties;
+ u32 extended_properties;
+ struct sis_sync_cmd_params params;
+
+ memset(&params, 0, sizeof(params));
+
+ rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_GET_ADAPTER_PROPERTIES,
+ &params);
+ if (rc)
+ return rc;
+
+ properties = params.mailbox[1];
+
+ if (!(properties & SIS_EXTENDED_PROPERTIES_SUPPORTED))
+ return -ENODEV;
+
+ extended_properties = params.mailbox[4];
+
+ if ((extended_properties & SIS_REQUIRED_EXTENDED_PROPERTIES) !=
+ SIS_REQUIRED_EXTENDED_PROPERTIES)
+ return -ENODEV;
+
+ return 0;
+}
+
+int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct sis_sync_cmd_params params;
+
+ memset(&params, 0, sizeof(params));
+
+ rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_GET_PQI_CAPABILITIES,
+ &params);
+ if (rc)
+ return rc;
+
+ ctrl_info->max_sg_entries = params.mailbox[1];
+ ctrl_info->max_transfer_size = params.mailbox[2];
+ ctrl_info->max_outstanding_requests = params.mailbox[3];
+ ctrl_info->config_table_offset = params.mailbox[4];
+ ctrl_info->config_table_length = params.mailbox[5];
+
+ return 0;
+}
+
+int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ void *base_struct_unaligned;
+ struct sis_base_struct *base_struct;
+ struct sis_sync_cmd_params params;
+ unsigned long error_buffer_paddr;
+ dma_addr_t bus_address;
+
+ base_struct_unaligned = kzalloc(sizeof(*base_struct)
+ + SIS_BASE_STRUCT_ALIGNMENT - 1, GFP_KERNEL);
+ if (!base_struct_unaligned)
+ return -ENOMEM;
+
+ base_struct = PTR_ALIGN(base_struct_unaligned,
+ SIS_BASE_STRUCT_ALIGNMENT);
+ error_buffer_paddr = (unsigned long)ctrl_info->error_buffer_dma_handle;
+
+ put_unaligned_le32(SIS_BASE_STRUCT_REVISION, &base_struct->revision);
+ put_unaligned_le32(lower_32_bits(error_buffer_paddr),
+ &base_struct->error_buffer_paddr_low);
+ put_unaligned_le32(upper_32_bits(error_buffer_paddr),
+ &base_struct->error_buffer_paddr_high);
+ put_unaligned_le32(PQI_ERROR_BUFFER_ELEMENT_LENGTH,
+ &base_struct->error_buffer_element_length);
+ put_unaligned_le32(ctrl_info->max_io_slots,
+ &base_struct->error_buffer_num_elements);
+
+ bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
+ sizeof(*base_struct), PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.mailbox[1] = lower_32_bits((u64)bus_address);
+ params.mailbox[2] = upper_32_bits((u64)bus_address);
+ params.mailbox[3] = sizeof(*base_struct);
+
+ rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
+ &params);
+
+ pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
+ PCI_DMA_TODEVICE);
+
+out:
+ kfree(base_struct_unaligned);
+
+ return rc;
+}
+
+/* Enable MSI-X interrupts on the controller. */
+
+void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
+{
+ u32 doorbell_register;
+
+ doorbell_register =
+ readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ doorbell_register |= SIS_ENABLE_MSIX;
+
+ writel(doorbell_register,
+ &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
+/* Disable MSI-X interrupts on the controller. */
+
+void sis_disable_msix(struct pqi_ctrl_info *ctrl_info)
+{
+ u32 doorbell_register;
+
+ doorbell_register =
+ readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ doorbell_register &= ~SIS_ENABLE_MSIX;
+
+ writel(doorbell_register,
+ &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
+void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ writel(SIS_SOFT_RESET,
+ &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
+#define SIS_MODE_READY_TIMEOUT_SECS 30
+
+int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ unsigned long timeout;
+ struct pqi_ctrl_registers __iomem *registers;
+ u32 doorbell;
+
+ registers = ctrl_info->registers;
+
+ writel(SIS_REENABLE_SIS_MODE,
+ &registers->sis_host_to_ctrl_doorbell);
+
+ rc = 0;
+ timeout = (SIS_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
+
+ while (1) {
+ doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
+ if ((doorbell & SIS_REENABLE_SIS_MODE) == 0)
+ break;
+ if (time_after(jiffies, timeout)) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "re-enabling SIS mode failed\n");
+
+ return rc;
+}
+
+void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value)
+{
+ writel(value, &ctrl_info->registers->sis_driver_scratch);
+}
+
+u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
+{
+ return readl(&ctrl_info->registers->sis_driver_scratch);
+}
+
+static void __attribute__((unused)) verify_structures(void)
+{
+ BUILD_BUG_ON(offsetof(struct sis_base_struct,
+ revision) != 0x0);
+ BUILD_BUG_ON(offsetof(struct sis_base_struct,
+ flags) != 0x4);
+ BUILD_BUG_ON(offsetof(struct sis_base_struct,
+ error_buffer_paddr_low) != 0x8);
+ BUILD_BUG_ON(offsetof(struct sis_base_struct,
+ error_buffer_paddr_high) != 0xc);
+ BUILD_BUG_ON(offsetof(struct sis_base_struct,
+ error_buffer_element_length) != 0x10);
+ BUILD_BUG_ON(offsetof(struct sis_base_struct,
+ error_buffer_num_elements) != 0x14);
+ BUILD_BUG_ON(sizeof(struct sis_base_struct) != 0x18);
+}
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
new file mode 100644
index 000000000000..bd6e7b08338e
--- /dev/null
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -0,0 +1,34 @@
+/*
+ * driver for Microsemi PQI-based storage controllers
+ * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#if !defined(_SMARTPQI_SIS_H)
+#define _SMARTPQI_SIS_H
+
+int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info);
+bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info);
+int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info);
+int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info);
+int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info);
+void sis_enable_msix(struct pqi_ctrl_info *ctrl_info);
+void sis_disable_msix(struct pqi_ctrl_info *ctrl_info);
+void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
+int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
+void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
+u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
+
+#endif /* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index ed179348de80..bed2bbd6b923 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -83,7 +83,7 @@ static int sr_init_command(struct scsi_cmnd *SCpnt);
static int sr_done(struct scsi_cmnd *);
static int sr_runtime_suspend(struct device *dev);
-static struct dev_pm_ops sr_pm_ops = {
+static const struct dev_pm_ops sr_pm_ops = {
.runtime_suspend = sr_runtime_suspend,
};
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 5d00e514ff28..d32e3ba8863e 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1874,7 +1874,7 @@ static void sym2_io_resume(struct pci_dev *pdev)
spin_lock_irq(shost->host_lock);
if (sym_data->io_reset)
- complete_all(sym_data->io_reset);
+ complete(sym_data->io_reset);
spin_unlock_irq(shost->host_lock);
}
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
deleted file mode 100644
index 8a8608ac62e6..000000000000
--- a/drivers/scsi/t128.c
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- * Trantor T128/T128F/T228 driver
- * Note : architecturally, the T100 and T130 are different and won't
- * work
- *
- * Copyright 1993, Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 440-4894
- *
- * For more information, please consult
- *
- * Trantor Systems, Ltd.
- * T128/T128F/T228 SCSI Host Adapter
- * Hardware Specifications
- *
- * Trantor Systems, Ltd.
- * 5415 Randall Place
- * Fremont, CA 94538
- * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
- */
-
-/*
- * The card is detected and initialized in one of several ways :
- * 1. Autoprobe (default) - since the board is memory mapped,
- * a BIOS signature is scanned for to locate the registers.
- * An interrupt is triggered to autoprobe for the interrupt
- * line.
- *
- * 2. With command line overrides - t128=address,irq may be
- * used on the LILO command line to override the defaults.
- *
- * 3. With the T128_OVERRIDE compile time define. This is
- * specified as an array of address, irq tuples. Ie, for
- * one board at the default 0xcc000 address, IRQ5, I could say
- * -DT128_OVERRIDE={{0xcc000, 5}}
- *
- * Note that if the override methods are used, place holders must
- * be specified for other boards in the system.
- *
- * T128/T128F jumper/dipswitch settings (note : on my sample, the switches
- * were epoxy'd shut, meaning I couldn't change the 0xcc000 base address) :
- *
- * T128 Sw7 Sw8 Sw6 = 0ws Sw5 = boot
- * T128F Sw6 Sw7 Sw5 = 0ws Sw4 = boot Sw8 = floppy disable
- * cc000 off off
- * c8000 off on
- * dc000 on off
- * d8000 on on
- *
- *
- * Interrupts
- * There is a 12 pin jumper block, jp1, numbered as follows :
- * T128 (JP1) T128F (J5)
- * 2 4 6 8 10 12 11 9 7 5 3 1
- * 1 3 5 7 9 11 12 10 8 6 4 2
- *
- * 3 2-4
- * 5 1-3
- * 7 3-5
- * T128F only
- * 10 8-10
- * 12 7-9
- * 14 10-12
- * 15 9-11
- */
-
-#include <linux/io.h>
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <scsi/scsi_host.h>
-#include "t128.h"
-#include "NCR5380.h"
-
-static struct override {
- unsigned long address;
- int irq;
-} overrides
-#ifdef T128_OVERRIDE
- [] __initdata = T128_OVERRIDE;
-#else
- [4] __initdata = {{0, IRQ_AUTO}, {0, IRQ_AUTO},
- {0 ,IRQ_AUTO}, {0, IRQ_AUTO}};
-#endif
-
-#define NO_OVERRIDES ARRAY_SIZE(overrides)
-
-static struct base {
- unsigned int address;
- int noauto;
-} bases[] __initdata = {
- { 0xcc000, 0}, { 0xc8000, 0}, { 0xdc000, 0}, { 0xd8000, 0}
-};
-
-#define NO_BASES ARRAY_SIZE(bases)
-
-static struct signature {
- const char *string;
- int offset;
-} signatures[] __initdata = {
-{"TSROM: SCSI BIOS, Version 1.12", 0x36},
-};
-
-#define NO_SIGNATURES ARRAY_SIZE(signatures)
-
-#ifndef MODULE
-/*
- * Function : t128_setup(char *str, int *ints)
- *
- * Purpose : LILO command line initialization of the overrides array,
- *
- * Inputs : str - unused, ints - array of integer parameters with ints[0]
- * equal to the number of ints.
- *
- */
-
-static int __init t128_setup(char *str)
-{
- static int commandline_current;
- int i;
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- if (ints[0] != 2)
- printk("t128_setup : usage t128=address,irq\n");
- else
- if (commandline_current < NO_OVERRIDES) {
- overrides[commandline_current].address = ints[1];
- overrides[commandline_current].irq = ints[2];
- for (i = 0; i < NO_BASES; ++i)
- if (bases[i].address == ints[1]) {
- bases[i].noauto = 1;
- break;
- }
- ++commandline_current;
- }
- return 1;
-}
-
-__setup("t128=", t128_setup);
-#endif
-
-/*
- * Function : int t128_detect(struct scsi_host_template * tpnt)
- *
- * Purpose : detects and initializes T128,T128F, or T228 controllers
- * that were autoprobed, overridden on the LILO command line,
- * or specified at compile time.
- *
- * Inputs : tpnt - template for this SCSI adapter.
- *
- * Returns : 1 if a host adapter was found, 0 if not.
- *
- */
-
-static int __init t128_detect(struct scsi_host_template *tpnt)
-{
- static int current_override, current_base;
- struct Scsi_Host *instance;
- unsigned long base;
- void __iomem *p;
- int sig, count;
-
- for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
- base = 0;
- p = NULL;
-
- if (overrides[current_override].address) {
- base = overrides[current_override].address;
- p = ioremap(bases[current_base].address, 0x2000);
- if (!p)
- base = 0;
- } else
- for (; !base && (current_base < NO_BASES); ++current_base) {
- dprintk(NDEBUG_INIT, "t128: probing address 0x%08x\n",
- bases[current_base].address);
- if (bases[current_base].noauto)
- continue;
- p = ioremap(bases[current_base].address, 0x2000);
- if (!p)
- continue;
- for (sig = 0; sig < NO_SIGNATURES; ++sig)
- if (check_signature(p + signatures[sig].offset,
- signatures[sig].string,
- strlen(signatures[sig].string))) {
- base = bases[current_base].address;
- dprintk(NDEBUG_INIT, "t128: detected board\n");
- goto found;
- }
- iounmap(p);
- }
-
- dprintk(NDEBUG_INIT, "t128: base = 0x%08x\n", (unsigned int)base);
-
- if (!base)
- break;
-
-found:
- instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
- if(instance == NULL)
- goto out_unmap;
-
- instance->base = base;
- ((struct NCR5380_hostdata *)instance->hostdata)->base = p;
-
- if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP))
- goto out_unregister;
-
- NCR5380_maybe_reset_bus(instance);
-
- if (overrides[current_override].irq != IRQ_AUTO)
- instance->irq = overrides[current_override].irq;
- else
- instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
-
- /* Compatibility with documented NCR5380 kernel parameters */
- if (instance->irq == 255)
- instance->irq = NO_IRQ;
-
- if (instance->irq != NO_IRQ)
- if (request_irq(instance->irq, t128_intr, 0, "t128",
- instance)) {
- printk("scsi%d : IRQ%d not free, interrupts disabled\n",
- instance->host_no, instance->irq);
- instance->irq = NO_IRQ;
- }
-
- if (instance->irq == NO_IRQ) {
- printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
- printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
- }
-
- dprintk(NDEBUG_INIT, "scsi%d: irq = %d\n",
- instance->host_no, instance->irq);
-
- ++current_override;
- ++count;
- }
- return count;
-
-out_unregister:
- scsi_unregister(instance);
-out_unmap:
- iounmap(p);
- return count;
-}
-
-static int t128_release(struct Scsi_Host *shost)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(shost);
-
- if (shost->irq != NO_IRQ)
- free_irq(shost->irq, shost);
- NCR5380_exit(shost);
- scsi_unregister(shost);
- iounmap(hostdata->base);
- return 0;
-}
-
-/*
- * Function : int t128_biosparam(Disk * disk, struct block_device *dev, int *ip)
- *
- * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
- * the specified device / size.
- *
- * Inputs : size = size of device in sectors (512 bytes), dev = block device
- * major / minor, ip[] = {heads, sectors, cylinders}
- *
- * Returns : always 0 (success), initializes ip
- *
- */
-
-/*
- * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
- * using hard disks on a trantor should verify that this mapping corresponds
- * to that used by the BIOS / ASPI driver by running the linux fdisk program
- * and matching the H_C_S coordinates to what DOS uses.
- */
-
-static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev,
- sector_t capacity, int *ip)
-{
- ip[0] = 64;
- ip[1] = 32;
- ip[2] = capacity >> 11;
- return 0;
-}
-
-/*
- * Function : int t128_pread (struct Scsi_Host *instance,
- * unsigned char *dst, int len)
- *
- * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
- * dst
- *
- * Inputs : dst = destination, len = length in bytes
- *
- * Returns : 0 on success, non zero on a failure such as a watchdog
- * timeout.
- */
-
-static inline int t128_pread(struct Scsi_Host *instance,
- unsigned char *dst, int len)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- void __iomem *reg, *base = hostdata->base;
- unsigned char *d = dst;
- register int i = len;
-
- reg = base + T_DATA_REG_OFFSET;
-
-#if 0
- for (; i; --i) {
- while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
-#else
- while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
- for (; i; --i) {
-#endif
- *d++ = readb(reg);
- }
-
- if (readb(base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
- unsigned char tmp;
- void __iomem *foo = base + T_CONTROL_REG_OFFSET;
- tmp = readb(foo);
- writeb(tmp | T_CR_CT, foo);
- writeb(tmp, foo);
- printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
- instance->host_no);
- return -1;
- } else
- return 0;
-}
-
-/*
- * Function : int t128_pwrite (struct Scsi_Host *instance,
- * unsigned char *src, int len)
- *
- * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
- * src
- *
- * Inputs : src = source, len = length in bytes
- *
- * Returns : 0 on success, non zero on a failure such as a watchdog
- * timeout.
- */
-
-static inline int t128_pwrite(struct Scsi_Host *instance,
- unsigned char *src, int len)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- void __iomem *reg, *base = hostdata->base;
- unsigned char *s = src;
- register int i = len;
-
- reg = base + T_DATA_REG_OFFSET;
-
-#if 0
- for (; i; --i) {
- while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
-#else
- while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
- for (; i; --i) {
-#endif
- writeb(*s++, reg);
- }
-
- if (readb(base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
- unsigned char tmp;
- void __iomem *foo = base + T_CONTROL_REG_OFFSET;
- tmp = readb(foo);
- writeb(tmp | T_CR_CT, foo);
- writeb(tmp, foo);
- printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
- instance->host_no);
- return -1;
- } else
- return 0;
-}
-
-MODULE_LICENSE("GPL");
-
-#include "NCR5380.c"
-
-static struct scsi_host_template driver_template = {
- .name = "Trantor T128/T128F/T228",
- .detect = t128_detect,
- .release = t128_release,
- .proc_name = "t128",
- .info = t128_info,
- .queuecommand = t128_queue_command,
- .eh_abort_handler = t128_abort,
- .eh_bus_reset_handler = t128_bus_reset,
- .bios_param = t128_biosparam,
- .can_queue = 32,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 2,
- .use_clustering = DISABLE_CLUSTERING,
- .cmd_size = NCR5380_CMD_SIZE,
- .max_sectors = 128,
-};
-#include "scsi_module.c"
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
deleted file mode 100644
index c95bcd839109..000000000000
--- a/drivers/scsi/t128.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Trantor T128/T128F/T228 defines
- * Note : architecturally, the T100 and T128 are different and won't work
- *
- * Copyright 1993, Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 440-4894
- *
- * For more information, please consult
- *
- * Trantor Systems, Ltd.
- * T128/T128F/T228 SCSI Host Adapter
- * Hardware Specifications
- *
- * Trantor Systems, Ltd.
- * 5415 Randall Place
- * Fremont, CA 94538
- * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
- */
-
-#ifndef T128_H
-#define T128_H
-
-/*
- * The trantor boards are memory mapped. They use an NCR5380 or
- * equivalent (my sample board had part second sourced from ZILOG).
- * NCR's recommended "Pseudo-DMA" architecture is used, where
- * a PAL drives the DMA signals on the 5380 allowing fast, blind
- * transfers with proper handshaking.
- */
-
-/*
- * Note : a boot switch is provided for the purpose of informing the
- * firmware to boot or not boot from attached SCSI devices. So, I imagine
- * there are fewer people who've yanked the ROM like they do on the Seagate
- * to make bootup faster, and I'll probably use this for autodetection.
- */
-#define T_ROM_OFFSET 0
-
-/*
- * Note : my sample board *WAS NOT* populated with the SRAM, so this
- * can't be used for autodetection without a ROM present.
- */
-#define T_RAM_OFFSET 0x1800
-
-/*
- * All of the registers are allocated 32 bytes of address space, except
- * for the data register (read/write to/from the 5380 in pseudo-DMA mode)
- */
-#define T_CONTROL_REG_OFFSET 0x1c00 /* rw */
-#define T_CR_INT 0x10 /* Enable interrupts */
-#define T_CR_CT 0x02 /* Reset watchdog timer */
-
-#define T_STATUS_REG_OFFSET 0x1c20 /* ro */
-#define T_ST_BOOT 0x80 /* Boot switch */
-#define T_ST_S3 0x40 /* User settable switches, */
-#define T_ST_S2 0x20 /* read 0 when switch is on, 1 off */
-#define T_ST_S1 0x10
-#define T_ST_PS2 0x08 /* Set for Microchannel 228 */
-#define T_ST_RDY 0x04 /* 5380 DRQ */
-#define T_ST_TIM 0x02 /* indicates 40us watchdog timer fired */
-#define T_ST_ZERO 0x01 /* Always zero */
-
-#define T_5380_OFFSET 0x1d00 /* 8 registers here, see NCR5380.h */
-
-#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */
-
-#define NCR5380_implementation_fields \
- void __iomem *base
-
-#define T128_address(reg) \
- (((struct NCR5380_hostdata *)shost_priv(instance))->base + T_5380_OFFSET + ((reg) * 0x20))
-
-#define NCR5380_read(reg) readb(T128_address(reg))
-#define NCR5380_write(reg, value) writeb((value),(T128_address(reg)))
-
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
-#define NCR5380_dma_recv_setup t128_pread
-#define NCR5380_dma_send_setup t128_pwrite
-#define NCR5380_dma_residual(instance) (0)
-
-#define NCR5380_intr t128_intr
-#define NCR5380_queue_command t128_queue_command
-#define NCR5380_abort t128_abort
-#define NCR5380_bus_reset t128_bus_reset
-#define NCR5380_info t128_info
-
-#define NCR5380_io_delay(x) udelay(x)
-
-/* 15 14 12 10 7 5 3
- 1101 0100 1010 1000 */
-
-#define T128_IRQS 0xc4a8
-
-#endif /* T128_H */
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
deleted file mode 100644
index 14eb50b95a1e..000000000000
--- a/drivers/scsi/u14-34f.c
+++ /dev/null
@@ -1,1971 +0,0 @@
-/*
- * u14-34f.c - Low-level driver for UltraStor 14F/34F SCSI host adapters.
- *
- * 03 Jun 2003 Rev. 8.10 for linux-2.5.70
- * + Update for new IRQ API.
- * + Use "goto" when appropriate.
- * + Drop u14-34f.h.
- * + Update for new module_param API.
- * + Module parameters can now be specified only in the
- * same format as the kernel boot options.
- *
- * boot option old module param
- * ----------- ------------------
- * addr,... io_port=addr,...
- * lc:[y|n] linked_comm=[1|0]
- * mq:xx max_queue_depth=xx
- * tm:[0|1|2] tag_mode=[0|1|2]
- * et:[y|n] ext_tran=[1|0]
- * of:[y|n] have_old_firmware=[1|0]
- *
- * A valid example using the new parameter format is:
- * modprobe u14-34f "u14-34f=0x340,0x330,lc:y,tm:0,mq:4"
- *
- * which is equivalent to the old format:
- * modprobe u14-34f io_port=0x340,0x330 linked_comm=1 tag_mode=0 \
- * max_queue_depth=4
- *
- * With actual module code, u14-34f and u14_34f are equivalent
- * as module parameter names.
- *
- * 12 Feb 2003 Rev. 8.04 for linux 2.5.60
- * + Release irq before calling scsi_register.
- *
- * 12 Nov 2002 Rev. 8.02 for linux 2.5.47
- * + Release driver_lock before calling scsi_register.
- *
- * 11 Nov 2002 Rev. 8.01 for linux 2.5.47
- * + Fixed bios_param and scsicam_bios_param calling parameters.
- *
- * 28 Oct 2002 Rev. 8.00 for linux 2.5.44-ac4
- * + Use new tcq and adjust_queue_depth api.
- * + New command line option (tm:[0-2]) to choose the type of tags:
- * 0 -> disable tagging ; 1 -> simple tags ; 2 -> ordered tags.
- * Default is tm:0 (tagged commands disabled).
- * For compatibility the "tc:" option is an alias of the "tm:"
- * option; tc:n is equivalent to tm:0 and tc:y is equivalent to
- * tm:1.
- *
- * 10 Oct 2002 Rev. 7.70 for linux 2.5.42
- * + Foreport from revision 6.70.
- *
- * 25 Jun 2002 Rev. 6.70 for linux 2.4.19
- * + Fixed endian-ness problem due to bitfields.
- *
- * 21 Feb 2002 Rev. 6.52 for linux 2.4.18
- * + Backport from rev. 7.22 (use io_request_lock).
- *
- * 20 Feb 2002 Rev. 7.22 for linux 2.5.5
- * + Remove any reference to virt_to_bus().
- * + Fix pio hang while detecting multiple HBAs.
- *
- * 01 Jan 2002 Rev. 7.20 for linux 2.5.1
- * + Use the dynamic DMA mapping API.
- *
- * 19 Dec 2001 Rev. 7.02 for linux 2.5.1
- * + Use SCpnt->sc_data_direction if set.
- * + Use sglist.page instead of sglist.address.
- *
- * 11 Dec 2001 Rev. 7.00 for linux 2.5.1
- * + Use host->host_lock instead of io_request_lock.
- *
- * 1 May 2001 Rev. 6.05 for linux 2.4.4
- * + Fix data transfer direction for opcode SEND_CUE_SHEET (0x5d)
- *
- * 25 Jan 2001 Rev. 6.03 for linux 2.4.0
- * + "check_region" call replaced by "request_region".
- *
- * 22 Nov 2000 Rev. 6.02 for linux 2.4.0-test11
- * + Removed old scsi error handling support.
- * + The obsolete boot option flag eh:n is silently ignored.
- * + Removed error messages while a disk drive is powered up at
- * boot time.
- * + Improved boot messages: all tagged capable device are
- * indicated as "tagged".
- *
- * 16 Sep 1999 Rev. 5.11 for linux 2.2.12 and 2.3.18
- * + Updated to the new __setup interface for boot command line options.
- * + When loaded as a module, accepts the new parameter boot_options
- * which value is a string with the same format of the kernel boot
- * command line options. A valid example is:
- * modprobe u14-34f 'boot_options="0x230,0x340,lc:y,mq:4"'
- *
- * 22 Jul 1999 Rev. 5.00 for linux 2.2.10 and 2.3.11
- * + Removed pre-2.2 source code compatibility.
- *
- * 26 Jul 1998 Rev. 4.33 for linux 2.0.35 and 2.1.111
- * Added command line option (et:[y|n]) to use the existing
- * translation (returned by scsicam_bios_param) as disk geometry.
- * The default is et:n, which uses the disk geometry jumpered
- * on the board.
- * The default value et:n is compatible with all previous revisions
- * of this driver.
- *
- * 28 May 1998 Rev. 4.32 for linux 2.0.33 and 2.1.104
- * Increased busy timeout from 10 msec. to 200 msec. while
- * processing interrupts.
- *
- * 18 May 1998 Rev. 4.31 for linux 2.0.33 and 2.1.102
- * Improved abort handling during the eh recovery process.
- *
- * 13 May 1998 Rev. 4.30 for linux 2.0.33 and 2.1.101
- * The driver is now fully SMP safe, including the
- * abort and reset routines.
- * Added command line options (eh:[y|n]) to choose between
- * new_eh_code and the old scsi code.
- * If linux version >= 2.1.101 the default is eh:y, while the eh
- * option is ignored for previous releases and the old scsi code
- * is used.
- *
- * 18 Apr 1998 Rev. 4.20 for linux 2.0.33 and 2.1.97
- * Reworked interrupt handler.
- *
- * 11 Apr 1998 rev. 4.05 for linux 2.0.33 and 2.1.95
- * Major reliability improvement: when a batch with overlapping
- * requests is detected, requests are queued one at a time
- * eliminating any possible board or drive reordering.
- *
- * 10 Apr 1998 rev. 4.04 for linux 2.0.33 and 2.1.95
- * Improved SMP support (if linux version >= 2.1.95).
- *
- * 9 Apr 1998 rev. 4.03 for linux 2.0.33 and 2.1.94
- * Performance improvement: when sequential i/o is detected,
- * always use direct sort instead of reverse sort.
- *
- * 4 Apr 1998 rev. 4.02 for linux 2.0.33 and 2.1.92
- * io_port is now unsigned long.
- *
- * 17 Mar 1998 rev. 4.01 for linux 2.0.33 and 2.1.88
- * Use new scsi error handling code (if linux version >= 2.1.88).
- * Use new interrupt code.
- *
- * 12 Sep 1997 rev. 3.11 for linux 2.0.30 and 2.1.55
- * Use of udelay inside the wait loops to avoid timeout
- * problems with fast cpus.
- * Removed check about useless calls to the interrupt service
- * routine (reported on SMP systems only).
- * At initialization time "sorted/unsorted" is displayed instead
- * of "linked/unlinked" to reinforce the fact that "linking" is
- * nothing but "elevator sorting" in the actual implementation.
- *
- * 17 May 1997 rev. 3.10 for linux 2.0.30 and 2.1.38
- * Use of serial_number_at_timeout in abort and reset processing.
- * Use of the __initfunc and __initdata macro in setup code.
- * Minor cleanups in the list_statistics code.
- *
- * 24 Feb 1997 rev. 3.00 for linux 2.0.29 and 2.1.26
- * When loading as a module, parameter passing is now supported
- * both in 2.0 and in 2.1 style.
- * Fixed data transfer direction for some SCSI opcodes.
- * Immediate acknowledge to request sense commands.
- * Linked commands to each disk device are now reordered by elevator
- * sorting. Rare cases in which reordering of write requests could
- * cause wrong results are managed.
- *
- * 18 Jan 1997 rev. 2.60 for linux 2.1.21 and 2.0.28
- * Added command line options to enable/disable linked commands
- * (lc:[y|n]), old firmware support (of:[y|n]) and to set the max
- * queue depth (mq:xx). Default is "u14-34f=lc:n,of:n,mq:8".
- * Improved command linking.
- *
- * 8 Jan 1997 rev. 2.50 for linux 2.1.20 and 2.0.27
- * Added linked command support.
- *
- * 3 Dec 1996 rev. 2.40 for linux 2.1.14 and 2.0.27
- * Added queue depth adjustment.
- *
- * 22 Nov 1996 rev. 2.30 for linux 2.1.12 and 2.0.26
- * The list of i/o ports to be probed can be overwritten by the
- * "u14-34f=port0,port1,...." boot command line option.
- * Scatter/gather lists are now allocated by a number of kmalloc
- * calls, in order to avoid the previous size limit of 64Kb.
- *
- * 16 Nov 1996 rev. 2.20 for linux 2.1.10 and 2.0.25
- * Added multichannel support.
- *
- * 27 Sep 1996 rev. 2.12 for linux 2.1.0
- * Portability cleanups (virtual/bus addressing, little/big endian
- * support).
- *
- * 09 Jul 1996 rev. 2.11 for linux 2.0.4
- * "Data over/under-run" no longer implies a redo on all targets.
- * Number of internal retries is now limited.
- *
- * 16 Apr 1996 rev. 2.10 for linux 1.3.90
- * New argument "reset_flags" to the reset routine.
- *
- * 21 Jul 1995 rev. 2.02 for linux 1.3.11
- * Fixed Data Transfer Direction for some SCSI commands.
- *
- * 13 Jun 1995 rev. 2.01 for linux 1.2.10
- * HAVE_OLD_UX4F_FIRMWARE should be defined for U34F boards when
- * the firmware prom is not the latest one (28008-006).
- *
- * 11 Mar 1995 rev. 2.00 for linux 1.2.0
- * Fixed a bug which prevented media change detection for removable
- * disk drives.
- *
- * 23 Feb 1995 rev. 1.18 for linux 1.1.94
- * Added a check for scsi_register returning NULL.
- *
- * 11 Feb 1995 rev. 1.17 for linux 1.1.91
- * U14F qualified to run with 32 sglists.
- * Now DEBUG_RESET is disabled by default.
- *
- * 9 Feb 1995 rev. 1.16 for linux 1.1.90
- * Use host->wish_block instead of host->block.
- *
- * 8 Feb 1995 rev. 1.15 for linux 1.1.89
- * Cleared target_time_out counter while performing a reset.
- *
- * 28 Jan 1995 rev. 1.14 for linux 1.1.86
- * Added module support.
- * Log and do a retry when a disk drive returns a target status
- * different from zero on a recovered error.
- * Auto detects if U14F boards have an old firmware revision.
- * Max number of scatter/gather lists set to 16 for all boards
- * (most installation run fine using 33 sglists, while other
- * has problems when using more than 16).
- *
- * 16 Jan 1995 rev. 1.13 for linux 1.1.81
- * Display a message if check_region detects a port address
- * already in use.
- *
- * 15 Dec 1994 rev. 1.12 for linux 1.1.74
- * The host->block flag is set for all the detected ISA boards.
- *
- * 30 Nov 1994 rev. 1.11 for linux 1.1.68
- * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only.
- * Added optional support for using a single board at a time.
- *
- * 14 Nov 1994 rev. 1.10 for linux 1.1.63
- *
- * 28 Oct 1994 rev. 1.09 for linux 1.1.58 Final BETA release.
- * 16 Jul 1994 rev. 1.00 for linux 1.1.29 Initial ALPHA release.
- *
- * This driver is a total replacement of the original UltraStor
- * scsi driver, but it supports ONLY the 14F and 34F boards.
- * It can be configured in the same kernel in which the original
- * ultrastor driver is configured to allow the original U24F
- * support.
- *
- * Multiple U14F and/or U34F host adapters are supported.
- *
- * Copyright (C) 1994-2003 Dario Ballabio (ballabio_dario@emc.com)
- *
- * Alternate email: dario.ballabio@inwind.it, dario.ballabio@tiscalinet.it
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that redistributions of source
- * code retain the above copyright notice and this comment without
- * modification.
- *
- * WARNING: if your 14/34F board has an old firmware revision (see below)
- * you must change "#undef" into "#define" in the following
- * statement.
- */
-#undef HAVE_OLD_UX4F_FIRMWARE
-/*
- * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
- * performance SCSI-2 host adapters.
- * Here is the scoop on the various models:
- *
- * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
- * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
- * 34F - VESA Local-Bus Bus Master HA (no WD1003 emulation).
- *
- * This code has been tested with up to two U14F boards, using both
- * firmware 28004-005/38004-004 (BIOS rev. 2.00) and the latest firmware
- * 28004-006/38004-005 (BIOS rev. 2.01).
- *
- * The latest firmware is required in order to get reliable operations when
- * clustering is enabled. ENABLE_CLUSTERING provides a performance increase
- * up to 50% on sequential access.
- *
- * Since the struct scsi_host_template structure is shared among all 14F and 34F,
- * the last setting of use_clustering is in effect for all of these boards.
- *
- * Here a sample configuration using two U14F boards:
- *
- U14F0: ISA 0x330, BIOS 0xc8000, IRQ 11, DMA 5, SG 32, MB 16, of:n, lc:y, mq:8.
- U14F1: ISA 0x340, BIOS 0x00000, IRQ 10, DMA 6, SG 32, MB 16, of:n, lc:y, mq:8.
- *
- * The boot controller must have its BIOS enabled, while other boards can
- * have their BIOS disabled, or enabled to an higher address.
- * Boards are named Ux4F0, Ux4F1..., according to the port address order in
- * the io_port[] array.
- *
- * The following facts are based on real testing results (not on
- * documentation) on the above U14F board.
- *
- * - The U14F board should be jumpered for bus on time less or equal to 7
- * microseconds, while the default is 11 microseconds. This is order to
- * get acceptable performance while using floppy drive and hard disk
- * together. The jumpering for 7 microseconds is: JP13 pin 15-16,
- * JP14 pin 7-8 and pin 9-10.
- * The reduction has a little impact on scsi performance.
- *
- * - If scsi bus length exceeds 3m., the scsi bus speed needs to be reduced
- * from 10Mhz to 5Mhz (do this by inserting a jumper on JP13 pin 7-8).
- *
- * - If U14F on board firmware is older than 28004-006/38004-005,
- * the U14F board is unable to provide reliable operations if the scsi
- * request length exceeds 16Kbyte. When this length is exceeded the
- * behavior is:
- * - adapter_status equal 0x96 or 0xa3 or 0x93 or 0x94;
- * - adapter_status equal 0 and target_status equal 2 on for all targets
- * in the next operation following the reset.
- * This sequence takes a long time (>3 seconds), so in the meantime
- * the SD_TIMEOUT in sd.c could expire giving rise to scsi aborts
- * (SD_TIMEOUT has been increased from 3 to 6 seconds in 1.1.31).
- * Because of this I had to DISABLE_CLUSTERING and to work around the
- * bus reset in the interrupt service routine, returning DID_BUS_BUSY
- * so that the operations are retried without complains from the scsi.c
- * code.
- * Any reset of the scsi bus is going to kill tape operations, since
- * no retry is allowed for tapes. Bus resets are more likely when the
- * scsi bus is under heavy load.
- * Requests using scatter/gather have a maximum length of 16 x 1024 bytes
- * when DISABLE_CLUSTERING is in effect, but unscattered requests could be
- * larger than 16Kbyte.
- *
- * The new firmware has fixed all the above problems.
- *
- * For U34F boards the latest bios prom is 38008-002 (BIOS rev. 2.01),
- * the latest firmware prom is 28008-006. Older firmware 28008-005 has
- * problems when using more than 16 scatter/gather lists.
- *
- * The list of i/o ports to be probed can be totally replaced by the
- * boot command line option: "u14-34f=port0,port1,port2,...", where the
- * port0, port1... arguments are ISA/VESA addresses to be probed.
- * For example using "u14-34f=0x230,0x340", the driver probes only the two
- * addresses 0x230 and 0x340 in this order; "u14-34f=0" totally disables
- * this driver.
- *
- * After the optional list of detection probes, other possible command line
- * options are:
- *
- * et:y use disk geometry returned by scsicam_bios_param;
- * et:n use disk geometry jumpered on the board;
- * lc:y enables linked commands;
- * lc:n disables linked commands;
- * tm:0 disables tagged commands (same as tc:n);
- * tm:1 use simple queue tags (same as tc:y);
- * tm:2 use ordered queue tags (same as tc:2);
- * of:y enables old firmware support;
- * of:n disables old firmware support;
- * mq:xx set the max queue depth to the value xx (2 <= xx <= 8).
- *
- * The default value is: "u14-34f=lc:n,of:n,mq:8,tm:0,et:n".
- * An example using the list of detection probes could be:
- * "u14-34f=0x230,0x340,lc:y,tm:2,of:n,mq:4,et:n".
- *
- * When loading as a module, parameters can be specified as well.
- * The above example would be (use 1 in place of y and 0 in place of n):
- *
- * modprobe u14-34f io_port=0x230,0x340 linked_comm=1 have_old_firmware=0 \
- * max_queue_depth=4 ext_tran=0 tag_mode=2
- *
- * ----------------------------------------------------------------------------
- * In this implementation, linked commands are designed to work with any DISK
- * or CD-ROM, since this linking has only the intent of clustering (time-wise)
- * and reordering by elevator sorting commands directed to each device,
- * without any relation with the actual SCSI protocol between the controller
- * and the device.
- * If Q is the queue depth reported at boot time for each device (also named
- * cmds/lun) and Q > 2, whenever there is already an active command to the
- * device all other commands to the same device (up to Q-1) are kept waiting
- * in the elevator sorting queue. When the active command completes, the
- * commands in this queue are sorted by sector address. The sort is chosen
- * between increasing or decreasing by minimizing the seek distance between
- * the sector of the commands just completed and the sector of the first
- * command in the list to be sorted.
- * Trivial math assures that the unsorted average seek distance when doing
- * random seeks over S sectors is S/3.
- * When (Q-1) requests are uniformly distributed over S sectors, the average
- * distance between two adjacent requests is S/((Q-1) + 1), so the sorted
- * average seek distance for (Q-1) random requests over S sectors is S/Q.
- * The elevator sorting hence divides the seek distance by a factor Q/3.
- * The above pure geometric remarks are valid in all cases and the
- * driver effectively reduces the seek distance by the predicted factor
- * when there are Q concurrent read i/o operations on the device, but this
- * does not necessarily results in a noticeable performance improvement:
- * your mileage may vary....
- *
- * Note: command reordering inside a batch of queued commands could cause
- * wrong results only if there is at least one write request and the
- * intersection (sector-wise) of all requests is not empty.
- * When the driver detects a batch including overlapping requests
- * (a really rare event) strict serial (pid) order is enforced.
- * ----------------------------------------------------------------------------
- *
- * The boards are named Ux4F0, Ux4F1,... according to the detection order.
- *
- * In order to support multiple ISA boards in a reliable way,
- * the driver sets host->wish_block = TRUE for all ISA boards.
- */
-
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include <asm/byteorder.h>
-#include <linux/proc_fs.h>
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/stat.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ctype.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <asm/dma.h>
-#include <asm/irq.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsicam.h>
-
-static int u14_34f_detect(struct scsi_host_template *);
-static int u14_34f_release(struct Scsi_Host *);
-static int u14_34f_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
-static int u14_34f_eh_abort(struct scsi_cmnd *);
-static int u14_34f_eh_host_reset(struct scsi_cmnd *);
-static int u14_34f_bios_param(struct scsi_device *, struct block_device *,
- sector_t, int *);
-static int u14_34f_slave_configure(struct scsi_device *);
-
-static struct scsi_host_template driver_template = {
- .name = "UltraStor 14F/34F rev. 8.10.00 ",
- .detect = u14_34f_detect,
- .release = u14_34f_release,
- .queuecommand = u14_34f_queuecommand,
- .eh_abort_handler = u14_34f_eh_abort,
- .eh_host_reset_handler = u14_34f_eh_host_reset,
- .bios_param = u14_34f_bios_param,
- .slave_configure = u14_34f_slave_configure,
- .this_id = 7,
- .unchecked_isa_dma = 1,
- .use_clustering = ENABLE_CLUSTERING,
- };
-
-#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
-#error "Adjust your <asm/byteorder.h> defines"
-#endif
-
-/* Values for the PRODUCT_ID ports for the 14/34F */
-#define PRODUCT_ID1 0x56
-#define PRODUCT_ID2 0x40 /* NOTE: Only upper nibble is used */
-
-/* Subversion values */
-#define ISA 0
-#define ESA 1
-
-#define OP_HOST_ADAPTER 0x1
-#define OP_SCSI 0x2
-#define OP_RESET 0x4
-#define DTD_SCSI 0x0
-#define DTD_IN 0x1
-#define DTD_OUT 0x2
-#define DTD_NONE 0x3
-#define HA_CMD_INQUIRY 0x1
-#define HA_CMD_SELF_DIAG 0x2
-#define HA_CMD_READ_BUFF 0x3
-#define HA_CMD_WRITE_BUFF 0x4
-
-#undef DEBUG_LINKED_COMMANDS
-#undef DEBUG_DETECT
-#undef DEBUG_INTERRUPT
-#undef DEBUG_RESET
-#undef DEBUG_GENERATE_ERRORS
-#undef DEBUG_GENERATE_ABORTS
-#undef DEBUG_GEOMETRY
-
-#define MAX_ISA 3
-#define MAX_VESA 1
-#define MAX_EISA 0
-#define MAX_PCI 0
-#define MAX_BOARDS (MAX_ISA + MAX_VESA + MAX_EISA + MAX_PCI)
-#define MAX_CHANNEL 1
-#define MAX_LUN 8
-#define MAX_TARGET 8
-#define MAX_MAILBOXES 16
-#define MAX_SGLIST 32
-#define MAX_SAFE_SGLIST 16
-#define MAX_INTERNAL_RETRIES 64
-#define MAX_CMD_PER_LUN 2
-#define MAX_TAGGED_CMD_PER_LUN (MAX_MAILBOXES - MAX_CMD_PER_LUN)
-
-#define SKIP ULONG_MAX
-#define FALSE 0
-#define TRUE 1
-#define FREE 0
-#define IN_USE 1
-#define LOCKED 2
-#define IN_RESET 3
-#define IGNORE 4
-#define READY 5
-#define ABORTING 6
-#define NO_DMA 0xff
-#define MAXLOOP 10000
-#define TAG_DISABLED 0
-#define TAG_SIMPLE 1
-#define TAG_ORDERED 2
-
-#define REG_LCL_MASK 0
-#define REG_LCL_INTR 1
-#define REG_SYS_MASK 2
-#define REG_SYS_INTR 3
-#define REG_PRODUCT_ID1 4
-#define REG_PRODUCT_ID2 5
-#define REG_CONFIG1 6
-#define REG_CONFIG2 7
-#define REG_OGM 8
-#define REG_ICM 12
-#define REGION_SIZE 13UL
-#define BSY_ASSERTED 0x01
-#define IRQ_ASSERTED 0x01
-#define CMD_RESET 0xc0
-#define CMD_OGM_INTR 0x01
-#define CMD_CLR_INTR 0x01
-#define CMD_ENA_INTR 0x81
-#define ASOK 0x00
-#define ASST 0x91
-
-#define YESNO(a) ((a) ? 'y' : 'n')
-#define TLDEV(type) ((type) == TYPE_DISK || (type) == TYPE_ROM)
-
-#define PACKED __attribute__((packed))
-
-struct sg_list {
- unsigned int address; /* Segment Address */
- unsigned int num_bytes; /* Segment Length */
- };
-
-/* MailBox SCSI Command Packet */
-struct mscp {
-
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned char sg:1, ca:1, dcn:1, xdir:2, opcode:3;
- unsigned char lun: 3, channel:2, target:3;
-#else
- unsigned char opcode: 3, /* type of command */
- xdir: 2, /* data transfer direction */
- dcn: 1, /* disable disconnect */
- ca: 1, /* use cache (if available) */
- sg: 1; /* scatter/gather operation */
- unsigned char target: 3, /* SCSI target id */
- channel: 2, /* SCSI channel number */
- lun: 3; /* SCSI logical unit number */
-#endif
-
- unsigned int data_address PACKED; /* transfer data pointer */
- unsigned int data_len PACKED; /* length in bytes */
- unsigned int link_address PACKED; /* for linking command chains */
- unsigned char clink_id; /* identifies command in chain */
- unsigned char use_sg; /* (if sg is set) 8 bytes per list */
- unsigned char sense_len;
- unsigned char cdb_len; /* 6, 10, or 12 */
- unsigned char cdb[12]; /* SCSI Command Descriptor Block */
- unsigned char adapter_status; /* non-zero indicates HA error */
- unsigned char target_status; /* non-zero indicates target error */
- unsigned int sense_addr PACKED;
-
- /* Additional fields begin here. */
- struct scsi_cmnd *SCpnt;
- unsigned int cpp_index; /* cp index */
-
- /* All the cp structure is zero filled by queuecommand except the
- following CP_TAIL_SIZE bytes, initialized by detect */
- dma_addr_t cp_dma_addr; /* dma handle for this cp structure */
- struct sg_list *sglist; /* pointer to the allocated SG list */
- };
-
-#define CP_TAIL_SIZE (sizeof(struct sglist *) + sizeof(dma_addr_t))
-
-struct hostdata {
- struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */
- unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */
- unsigned int last_cp_used; /* Index of last mailbox used */
- unsigned int iocount; /* Total i/o done for this board */
- int board_number; /* Number of this board */
- char board_name[16]; /* Name of this board */
- int in_reset; /* True if board is doing a reset */
- int target_to[MAX_TARGET][MAX_CHANNEL]; /* N. of timeout errors on target */
- int target_redo[MAX_TARGET][MAX_CHANNEL]; /* If TRUE redo i/o on target */
- unsigned int retries; /* Number of internal retries */
- unsigned long last_retried_pid; /* Pid of last retried command */
- unsigned char subversion; /* Bus type, either ISA or ESA */
- struct pci_dev *pdev; /* Always NULL */
- unsigned char heads;
- unsigned char sectors;
- char board_id[256]; /* data from INQUIRY on this board */
- };
-
-static struct Scsi_Host *sh[MAX_BOARDS + 1];
-static const char *driver_name = "Ux4F";
-static char sha[MAX_BOARDS];
-static DEFINE_SPINLOCK(driver_lock);
-
-/* Initialize num_boards so that ihdlr can work while detect is in progress */
-static unsigned int num_boards = MAX_BOARDS;
-
-static unsigned long io_port[] = {
-
- /* Space for MAX_INT_PARAM ports usable while loading as a module */
- SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
- SKIP, SKIP,
-
- /* Possible ISA/VESA ports */
- 0x330, 0x340, 0x230, 0x240, 0x210, 0x130, 0x140,
-
- /* End of list */
- 0x0
- };
-
-#define HD(board) ((struct hostdata *) &sh[board]->hostdata)
-#define BN(board) (HD(board)->board_name)
-
-/* Device is Little Endian */
-#define H2DEV(x) cpu_to_le32(x)
-#define DEV2H(x) le32_to_cpu(x)
-
-static irqreturn_t do_interrupt_handler(int, void *);
-static void flush_dev(struct scsi_device *, unsigned long, unsigned int, unsigned int);
-static int do_trace = FALSE;
-static int setup_done = FALSE;
-static int link_statistics;
-static int ext_tran = FALSE;
-
-#if defined(HAVE_OLD_UX4F_FIRMWARE)
-static int have_old_firmware = TRUE;
-#else
-static int have_old_firmware = FALSE;
-#endif
-
-#if defined(CONFIG_SCSI_U14_34F_TAGGED_QUEUE)
-static int tag_mode = TAG_SIMPLE;
-#else
-static int tag_mode = TAG_DISABLED;
-#endif
-
-#if defined(CONFIG_SCSI_U14_34F_LINKED_COMMANDS)
-static int linked_comm = TRUE;
-#else
-static int linked_comm = FALSE;
-#endif
-
-#if defined(CONFIG_SCSI_U14_34F_MAX_TAGS)
-static int max_queue_depth = CONFIG_SCSI_U14_34F_MAX_TAGS;
-#else
-static int max_queue_depth = MAX_CMD_PER_LUN;
-#endif
-
-#define MAX_INT_PARAM 10
-#define MAX_BOOT_OPTIONS_SIZE 256
-static char boot_options[MAX_BOOT_OPTIONS_SIZE];
-
-#if defined(MODULE)
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-module_param_string(u14_34f, boot_options, MAX_BOOT_OPTIONS_SIZE, 0);
-MODULE_PARM_DESC(u14_34f, " equivalent to the \"u14-34f=...\" kernel boot " \
-"option." \
-" Example: modprobe u14-34f \"u14_34f=0x340,0x330,lc:y,tm:0,mq:4\"");
-MODULE_AUTHOR("Dario Ballabio");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("UltraStor 14F/34F SCSI Driver");
-
-#endif
-
-static int u14_34f_slave_configure(struct scsi_device *dev) {
- int j, tqd, utqd;
- char *tag_suffix, *link_suffix;
- struct Scsi_Host *host = dev->host;
-
- j = ((struct hostdata *) host->hostdata)->board_number;
-
- utqd = MAX_CMD_PER_LUN;
- tqd = max_queue_depth;
-
- if (TLDEV(dev->type) && dev->tagged_supported)
-
- if (tag_mode == TAG_SIMPLE) {
- scsi_change_queue_depth(dev, tqd);
- tag_suffix = ", simple tags";
- }
- else if (tag_mode == TAG_ORDERED) {
- scsi_change_queue_depth(dev, tqd);
- tag_suffix = ", ordered tags";
- }
- else {
- scsi_change_queue_depth(dev, tqd);
- tag_suffix = ", no tags";
- }
-
- else if (TLDEV(dev->type) && linked_comm) {
- scsi_change_queue_depth(dev, tqd);
- tag_suffix = ", untagged";
- }
-
- else {
- scsi_change_queue_depth(dev, utqd);
- tag_suffix = "";
- }
-
- if (TLDEV(dev->type) && linked_comm && dev->queue_depth > 2)
- link_suffix = ", sorted";
- else if (TLDEV(dev->type))
- link_suffix = ", unsorted";
- else
- link_suffix = "";
-
- sdev_printk(KERN_INFO, dev, "cmds/lun %d%s%s.\n",
- dev->queue_depth, link_suffix, tag_suffix);
-
- return FALSE;
-}
-
-static int wait_on_busy(unsigned long iobase, unsigned int loop) {
-
- while (inb(iobase + REG_LCL_INTR) & BSY_ASSERTED) {
- udelay(1L);
- if (--loop == 0) return TRUE;
- }
-
- return FALSE;
-}
-
-static int board_inquiry(unsigned int j) {
- struct mscp *cpp;
- dma_addr_t id_dma_addr;
- unsigned int limit = 0;
- unsigned long time;
-
- id_dma_addr = pci_map_single(HD(j)->pdev, HD(j)->board_id,
- sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL);
- cpp = &HD(j)->cp[0];
- cpp->cp_dma_addr = pci_map_single(HD(j)->pdev, cpp, sizeof(struct mscp),
- PCI_DMA_BIDIRECTIONAL);
- memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE);
- cpp->opcode = OP_HOST_ADAPTER;
- cpp->xdir = DTD_IN;
- cpp->data_address = H2DEV(id_dma_addr);
- cpp->data_len = H2DEV(sizeof(HD(j)->board_id));
- cpp->cdb_len = 6;
- cpp->cdb[0] = HA_CMD_INQUIRY;
-
- if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
- printk("%s: board_inquiry, adapter busy.\n", BN(j));
- return TRUE;
- }
-
- HD(j)->cp_stat[0] = IGNORE;
-
- /* Clear the interrupt indication */
- outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
-
- /* Store pointer in OGM address bytes */
- outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
-
- /* Issue OGM interrupt */
- outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
-
- spin_unlock_irq(&driver_lock);
- time = jiffies;
- while ((jiffies - time) < HZ && limit++ < 20000) udelay(100L);
- spin_lock_irq(&driver_lock);
-
- if (cpp->adapter_status || HD(j)->cp_stat[0] != FREE) {
- HD(j)->cp_stat[0] = FREE;
- printk("%s: board_inquiry, err 0x%x.\n", BN(j), cpp->adapter_status);
- return TRUE;
- }
-
- pci_unmap_single(HD(j)->pdev, cpp->cp_dma_addr, sizeof(struct mscp),
- PCI_DMA_BIDIRECTIONAL);
- pci_unmap_single(HD(j)->pdev, id_dma_addr, sizeof(HD(j)->board_id),
- PCI_DMA_BIDIRECTIONAL);
- return FALSE;
-}
-
-static int port_detect \
- (unsigned long port_base, unsigned int j, struct scsi_host_template *tpnt) {
- unsigned char irq, dma_channel, subversion, i;
- unsigned char in_byte;
- char *bus_type, dma_name[16];
-
- /* Allowed BIOS base addresses (NULL indicates reserved) */
- unsigned long bios_segment_table[8] = {
- 0,
- 0xc4000, 0xc8000, 0xcc000, 0xd0000,
- 0xd4000, 0xd8000, 0xdc000
- };
-
- /* Allowed IRQs */
- unsigned char interrupt_table[4] = { 15, 14, 11, 10 };
-
- /* Allowed DMA channels for ISA (0 indicates reserved) */
- unsigned char dma_channel_table[4] = { 5, 6, 7, 0 };
-
- /* Head/sector mappings */
- struct {
- unsigned char heads;
- unsigned char sectors;
- } mapping_table[4] = {
- { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 }
- };
-
- struct config_1 {
-
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned char dma_channel: 2, interrupt:2,
- removable_disks_as_fixed:1, bios_segment: 3;
-#else
- unsigned char bios_segment: 3, removable_disks_as_fixed: 1,
- interrupt: 2, dma_channel: 2;
-#endif
-
- } config_1;
-
- struct config_2 {
-
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned char tfr_port: 2, bios_drive_number: 1,
- mapping_mode: 2, ha_scsi_id: 3;
-#else
- unsigned char ha_scsi_id: 3, mapping_mode: 2,
- bios_drive_number: 1, tfr_port: 2;
-#endif
-
- } config_2;
-
- char name[16];
-
- sprintf(name, "%s%d", driver_name, j);
-
- if (!request_region(port_base, REGION_SIZE, driver_name)) {
-#if defined(DEBUG_DETECT)
- printk("%s: address 0x%03lx in use, skipping probe.\n", name, port_base);
-#endif
- goto fail;
- }
-
- spin_lock_irq(&driver_lock);
-
- if (inb(port_base + REG_PRODUCT_ID1) != PRODUCT_ID1) goto freelock;
-
- in_byte = inb(port_base + REG_PRODUCT_ID2);
-
- if ((in_byte & 0xf0) != PRODUCT_ID2) goto freelock;
-
- *(char *)&config_1 = inb(port_base + REG_CONFIG1);
- *(char *)&config_2 = inb(port_base + REG_CONFIG2);
-
- irq = interrupt_table[config_1.interrupt];
- dma_channel = dma_channel_table[config_1.dma_channel];
- subversion = (in_byte & 0x0f);
-
- /* Board detected, allocate its IRQ */
- if (request_irq(irq, do_interrupt_handler,
- (subversion == ESA) ? IRQF_SHARED : 0,
- driver_name, (void *) &sha[j])) {
- printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
- goto freelock;
- }
-
- if (subversion == ISA && request_dma(dma_channel, driver_name)) {
- printk("%s: unable to allocate DMA channel %u, detaching.\n",
- name, dma_channel);
- goto freeirq;
- }
-
- if (have_old_firmware) tpnt->use_clustering = DISABLE_CLUSTERING;
-
- spin_unlock_irq(&driver_lock);
- sh[j] = scsi_register(tpnt, sizeof(struct hostdata));
- spin_lock_irq(&driver_lock);
-
- if (sh[j] == NULL) {
- printk("%s: unable to register host, detaching.\n", name);
- goto freedma;
- }
-
- sh[j]->io_port = port_base;
- sh[j]->unique_id = port_base;
- sh[j]->n_io_port = REGION_SIZE;
- sh[j]->base = bios_segment_table[config_1.bios_segment];
- sh[j]->irq = irq;
- sh[j]->sg_tablesize = MAX_SGLIST;
- sh[j]->this_id = config_2.ha_scsi_id;
- sh[j]->can_queue = MAX_MAILBOXES;
- sh[j]->cmd_per_lun = MAX_CMD_PER_LUN;
-
-#if defined(DEBUG_DETECT)
- {
- unsigned char sys_mask, lcl_mask;
-
- sys_mask = inb(sh[j]->io_port + REG_SYS_MASK);
- lcl_mask = inb(sh[j]->io_port + REG_LCL_MASK);
- printk("SYS_MASK 0x%x, LCL_MASK 0x%x.\n", sys_mask, lcl_mask);
- }
-#endif
-
- /* Probably a bogus host scsi id, set it to the dummy value */
- if (sh[j]->this_id == 0) sh[j]->this_id = -1;
-
- /* If BIOS is disabled, force enable interrupts */
- if (sh[j]->base == 0) outb(CMD_ENA_INTR, sh[j]->io_port + REG_SYS_MASK);
-
- memset(HD(j), 0, sizeof(struct hostdata));
- HD(j)->heads = mapping_table[config_2.mapping_mode].heads;
- HD(j)->sectors = mapping_table[config_2.mapping_mode].sectors;
- HD(j)->subversion = subversion;
- HD(j)->pdev = NULL;
- HD(j)->board_number = j;
-
- if (have_old_firmware) sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
-
- if (HD(j)->subversion == ESA) {
- sh[j]->unchecked_isa_dma = FALSE;
- sh[j]->dma_channel = NO_DMA;
- sprintf(BN(j), "U34F%d", j);
- bus_type = "VESA";
- }
- else {
- unsigned long flags;
- sh[j]->unchecked_isa_dma = TRUE;
-
- flags=claim_dma_lock();
- disable_dma(dma_channel);
- clear_dma_ff(dma_channel);
- set_dma_mode(dma_channel, DMA_MODE_CASCADE);
- enable_dma(dma_channel);
- release_dma_lock(flags);
-
- sh[j]->dma_channel = dma_channel;
- sprintf(BN(j), "U14F%d", j);
- bus_type = "ISA";
- }
-
- sh[j]->max_channel = MAX_CHANNEL - 1;
- sh[j]->max_id = MAX_TARGET;
- sh[j]->max_lun = MAX_LUN;
-
- if (HD(j)->subversion == ISA && !board_inquiry(j)) {
- HD(j)->board_id[40] = 0;
-
- if (strcmp(&HD(j)->board_id[32], "06000600")) {
- printk("%s: %s.\n", BN(j), &HD(j)->board_id[8]);
- printk("%s: firmware %s is outdated, FW PROM should be 28004-006.\n",
- BN(j), &HD(j)->board_id[32]);
- sh[j]->hostt->use_clustering = DISABLE_CLUSTERING;
- sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
- }
- }
-
- if (dma_channel == NO_DMA) sprintf(dma_name, "%s", "BMST");
- else sprintf(dma_name, "DMA %u", dma_channel);
-
- spin_unlock_irq(&driver_lock);
-
- for (i = 0; i < sh[j]->can_queue; i++)
- HD(j)->cp[i].cp_dma_addr = pci_map_single(HD(j)->pdev,
- &HD(j)->cp[i], sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL);
-
- for (i = 0; i < sh[j]->can_queue; i++)
- if (! ((&HD(j)->cp[i])->sglist = kmalloc(
- sh[j]->sg_tablesize * sizeof(struct sg_list),
- (sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) {
- printk("%s: kmalloc SGlist failed, mbox %d, detaching.\n", BN(j), i);
- goto release;
- }
-
- if (max_queue_depth > MAX_TAGGED_CMD_PER_LUN)
- max_queue_depth = MAX_TAGGED_CMD_PER_LUN;
-
- if (max_queue_depth < MAX_CMD_PER_LUN) max_queue_depth = MAX_CMD_PER_LUN;
-
- if (tag_mode != TAG_DISABLED && tag_mode != TAG_SIMPLE)
- tag_mode = TAG_ORDERED;
-
- if (j == 0) {
- printk("UltraStor 14F/34F: Copyright (C) 1994-2003 Dario Ballabio.\n");
- printk("%s config options -> of:%c, tm:%d, lc:%c, mq:%d, et:%c.\n",
- driver_name, YESNO(have_old_firmware), tag_mode,
- YESNO(linked_comm), max_queue_depth, YESNO(ext_tran));
- }
-
- printk("%s: %s 0x%03lx, BIOS 0x%05x, IRQ %u, %s, SG %d, MB %d.\n",
- BN(j), bus_type, (unsigned long)sh[j]->io_port, (int)sh[j]->base,
- sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue);
-
- if (sh[j]->max_id > 8 || sh[j]->max_lun > 8)
- printk("%s: wide SCSI support enabled, max_id %u, max_lun %llu.\n",
- BN(j), sh[j]->max_id, sh[j]->max_lun);
-
- for (i = 0; i <= sh[j]->max_channel; i++)
- printk("%s: SCSI channel %u enabled, host target ID %d.\n",
- BN(j), i, sh[j]->this_id);
-
- return TRUE;
-
-freedma:
- if (subversion == ISA) free_dma(dma_channel);
-freeirq:
- free_irq(irq, &sha[j]);
-freelock:
- spin_unlock_irq(&driver_lock);
- release_region(port_base, REGION_SIZE);
-fail:
- return FALSE;
-
-release:
- u14_34f_release(sh[j]);
- return FALSE;
-}
-
-static void internal_setup(char *str, int *ints) {
- int i, argc = ints[0];
- char *cur = str, *pc;
-
- if (argc > 0) {
-
- if (argc > MAX_INT_PARAM) argc = MAX_INT_PARAM;
-
- for (i = 0; i < argc; i++) io_port[i] = ints[i + 1];
-
- io_port[i] = 0;
- setup_done = TRUE;
- }
-
- while (cur && (pc = strchr(cur, ':'))) {
- int val = 0, c = *++pc;
-
- if (c == 'n' || c == 'N') val = FALSE;
- else if (c == 'y' || c == 'Y') val = TRUE;
- else val = (int) simple_strtoul(pc, NULL, 0);
-
- if (!strncmp(cur, "lc:", 3)) linked_comm = val;
- else if (!strncmp(cur, "of:", 3)) have_old_firmware = val;
- else if (!strncmp(cur, "tm:", 3)) tag_mode = val;
- else if (!strncmp(cur, "tc:", 3)) tag_mode = val;
- else if (!strncmp(cur, "mq:", 3)) max_queue_depth = val;
- else if (!strncmp(cur, "ls:", 3)) link_statistics = val;
- else if (!strncmp(cur, "et:", 3)) ext_tran = val;
-
- if ((cur = strchr(cur, ','))) ++cur;
- }
-
- return;
-}
-
-static int option_setup(char *str) {
- int ints[MAX_INT_PARAM];
- char *cur = str;
- int i = 1;
-
- while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
- ints[i++] = simple_strtoul(cur, NULL, 0);
-
- if ((cur = strchr(cur, ',')) != NULL) cur++;
- }
-
- ints[0] = i - 1;
- internal_setup(cur, ints);
- return 1;
-}
-
-static int u14_34f_detect(struct scsi_host_template *tpnt) {
- unsigned int j = 0, k;
-
- tpnt->proc_name = "u14-34f";
-
- if(strlen(boot_options)) option_setup(boot_options);
-
-#if defined(MODULE)
- /* io_port could have been modified when loading as a module */
- if(io_port[0] != SKIP) {
- setup_done = TRUE;
- io_port[MAX_INT_PARAM] = 0;
- }
-#endif
-
- for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL;
-
- for (k = 0; io_port[k]; k++) {
-
- if (io_port[k] == SKIP) continue;
-
- if (j < MAX_BOARDS && port_detect(io_port[k], j, tpnt)) j++;
- }
-
- num_boards = j;
- return j;
-}
-
-static void map_dma(unsigned int i, unsigned int j) {
- unsigned int data_len = 0;
- unsigned int k, pci_dir;
- int count;
- struct scatterlist *sg;
- struct mscp *cpp;
- struct scsi_cmnd *SCpnt;
-
- cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
- pci_dir = SCpnt->sc_data_direction;
-
- if (SCpnt->sense_buffer)
- cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));
-
- cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
-
- if (scsi_bufflen(SCpnt)) {
- count = scsi_dma_map(SCpnt);
- BUG_ON(count < 0);
-
- scsi_for_each_sg(SCpnt, sg, count, k) {
- cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
- cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
- data_len += sg->length;
- }
-
- cpp->sg = TRUE;
- cpp->use_sg = scsi_sg_count(SCpnt);
- cpp->data_address =
- H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
- cpp->use_sg * sizeof(struct sg_list),
- pci_dir));
- cpp->data_len = H2DEV(data_len);
-
- } else {
- pci_dir = PCI_DMA_BIDIRECTIONAL;
- cpp->data_len = H2DEV(scsi_bufflen(SCpnt));
- }
-}
-
-static void unmap_dma(unsigned int i, unsigned int j) {
- unsigned int pci_dir;
- struct mscp *cpp;
- struct scsi_cmnd *SCpnt;
-
- cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
- pci_dir = SCpnt->sc_data_direction;
-
- if (DEV2H(cpp->sense_addr))
- pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr),
- DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
-
- scsi_dma_unmap(SCpnt);
-
- if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
-
- if (DEV2H(cpp->data_address))
- pci_unmap_single(HD(j)->pdev, DEV2H(cpp->data_address),
- DEV2H(cpp->data_len), pci_dir);
-}
-
-static void sync_dma(unsigned int i, unsigned int j) {
- unsigned int pci_dir;
- struct mscp *cpp;
- struct scsi_cmnd *SCpnt;
-
- cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
- pci_dir = SCpnt->sc_data_direction;
-
- if (DEV2H(cpp->sense_addr))
- pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr),
- DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
-
- if (scsi_sg_count(SCpnt))
- pci_dma_sync_sg_for_cpu(HD(j)->pdev, scsi_sglist(SCpnt),
- scsi_sg_count(SCpnt), pci_dir);
-
- if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
-
- if (DEV2H(cpp->data_address))
- pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->data_address),
- DEV2H(cpp->data_len), pci_dir);
-}
-
-static void scsi_to_dev_dir(unsigned int i, unsigned int j) {
- unsigned int k;
-
- static const unsigned char data_out_cmds[] = {
- 0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x18, 0x1d, 0x24, 0x2e,
- 0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d, 0x3f, 0x40,
- 0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea, 0x1b, 0x5d
- };
-
- static const unsigned char data_none_cmds[] = {
- 0x01, 0x0b, 0x10, 0x11, 0x13, 0x16, 0x17, 0x19, 0x2b, 0x1e,
- 0x2c, 0xac, 0x2f, 0xaf, 0x33, 0xb3, 0x35, 0x36, 0x45, 0x47,
- 0x48, 0x49, 0xa9, 0x4b, 0xa5, 0xa6, 0xb5, 0x00
- };
-
- struct mscp *cpp;
- struct scsi_cmnd *SCpnt;
-
- cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
-
- if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
- cpp->xdir = DTD_IN;
- return;
- }
- else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
- cpp->xdir = DTD_OUT;
- return;
- }
- else if (SCpnt->sc_data_direction == DMA_NONE) {
- cpp->xdir = DTD_NONE;
- return;
- }
-
- if (SCpnt->sc_data_direction != DMA_BIDIRECTIONAL)
- panic("%s: qcomm, invalid SCpnt->sc_data_direction.\n", BN(j));
-
- cpp->xdir = DTD_IN;
-
- for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++)
- if (SCpnt->cmnd[0] == data_out_cmds[k]) {
- cpp->xdir = DTD_OUT;
- break;
- }
-
- if (cpp->xdir == DTD_IN)
- for (k = 0; k < ARRAY_SIZE(data_none_cmds); k++)
- if (SCpnt->cmnd[0] == data_none_cmds[k]) {
- cpp->xdir = DTD_NONE;
- break;
- }
-
-}
-
-static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) {
- unsigned int i, j, k;
- struct mscp *cpp;
-
- /* j is the board number */
- j = ((struct hostdata *) SCpnt->device->host->hostdata)->board_number;
-
- if (SCpnt->host_scribble)
- panic("%s: qcomm, SCpnt %p already active.\n",
- BN(j), SCpnt);
-
- /* i is the mailbox number, look for the first free mailbox
- starting from last_cp_used */
- i = HD(j)->last_cp_used + 1;
-
- for (k = 0; k < sh[j]->can_queue; k++, i++) {
-
- if (i >= sh[j]->can_queue) i = 0;
-
- if (HD(j)->cp_stat[i] == FREE) {
- HD(j)->last_cp_used = i;
- break;
- }
- }
-
- if (k == sh[j]->can_queue) {
- printk("%s: qcomm, no free mailbox.\n", BN(j));
- return 1;
- }
-
- /* Set pointer to control packet structure */
- cpp = &HD(j)->cp[i];
-
- memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE);
- SCpnt->scsi_done = done;
- cpp->cpp_index = i;
- SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index;
-
- if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%u.\n",
- BN(j), i, SCpnt->device->channel, SCpnt->device->id,
- (u8)SCpnt->device->lun);
-
- cpp->opcode = OP_SCSI;
- cpp->channel = SCpnt->device->channel;
- cpp->target = SCpnt->device->id;
- cpp->lun = (u8)SCpnt->device->lun;
- cpp->SCpnt = SCpnt;
- cpp->cdb_len = SCpnt->cmd_len;
- memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len);
-
- /* Use data transfer direction SCpnt->sc_data_direction */
- scsi_to_dev_dir(i, j);
-
- /* Map DMA buffers and SG list */
- map_dma(i, j);
-
- if (linked_comm && SCpnt->device->queue_depth > 2
- && TLDEV(SCpnt->device->type)) {
- HD(j)->cp_stat[i] = READY;
- flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
- return 0;
- }
-
- if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
- unmap_dma(i, j);
- SCpnt->host_scribble = NULL;
- scmd_printk(KERN_INFO, SCpnt,
- "qcomm, adapter busy.\n");
- return 1;
- }
-
- /* Store pointer in OGM address bytes */
- outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
-
- /* Issue OGM interrupt */
- outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
-
- HD(j)->cp_stat[i] = IN_USE;
- return 0;
-}
-
-static DEF_SCSI_QCMD(u14_34f_queuecommand)
-
-static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
- unsigned int i, j;
-
- j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
-
- if (SCarg->host_scribble == NULL) {
- scmd_printk(KERN_INFO, SCarg, "abort, command inactive.\n");
- return SUCCESS;
- }
-
- i = *(unsigned int *)SCarg->host_scribble;
- scmd_printk(KERN_INFO, SCarg, "abort, mbox %d.\n", i);
-
- if (i >= sh[j]->can_queue)
- panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
-
- if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
- printk("%s: abort, timeout error.\n", BN(j));
- return FAILED;
- }
-
- if (HD(j)->cp_stat[i] == FREE) {
- printk("%s: abort, mbox %d is free.\n", BN(j), i);
- return SUCCESS;
- }
-
- if (HD(j)->cp_stat[i] == IN_USE) {
- printk("%s: abort, mbox %d is in use.\n", BN(j), i);
-
- if (SCarg != HD(j)->cp[i].SCpnt)
- panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
- BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
-
- if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED)
- printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
-
- return FAILED;
- }
-
- if (HD(j)->cp_stat[i] == IN_RESET) {
- printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
- return FAILED;
- }
-
- if (HD(j)->cp_stat[i] == LOCKED) {
- printk("%s: abort, mbox %d is locked.\n", BN(j), i);
- return SUCCESS;
- }
-
- if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
- unmap_dma(i, j);
- SCarg->result = DID_ABORT << 16;
- SCarg->host_scribble = NULL;
- HD(j)->cp_stat[i] = FREE;
- printk("%s, abort, mbox %d ready, DID_ABORT, done.\n", BN(j), i);
- SCarg->scsi_done(SCarg);
- return SUCCESS;
- }
-
- panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
-}
-
-static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
- unsigned int i, j, k, c, limit = 0;
- unsigned long time;
- int arg_done = FALSE;
- struct scsi_cmnd *SCpnt;
-
- j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
- scmd_printk(KERN_INFO, SCarg, "reset, enter.\n");
-
- spin_lock_irq(sh[j]->host_lock);
-
- if (SCarg->host_scribble == NULL)
- printk("%s: reset, inactive.\n", BN(j));
-
- if (HD(j)->in_reset) {
- printk("%s: reset, exit, already in reset.\n", BN(j));
- spin_unlock_irq(sh[j]->host_lock);
- return FAILED;
- }
-
- if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
- printk("%s: reset, exit, timeout error.\n", BN(j));
- spin_unlock_irq(sh[j]->host_lock);
- return FAILED;
- }
-
- HD(j)->retries = 0;
-
- for (c = 0; c <= sh[j]->max_channel; c++)
- for (k = 0; k < sh[j]->max_id; k++) {
- HD(j)->target_redo[k][c] = TRUE;
- HD(j)->target_to[k][c] = 0;
- }
-
- for (i = 0; i < sh[j]->can_queue; i++) {
-
- if (HD(j)->cp_stat[i] == FREE) continue;
-
- if (HD(j)->cp_stat[i] == LOCKED) {
- HD(j)->cp_stat[i] = FREE;
- printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
- continue;
- }
-
- if (!(SCpnt = HD(j)->cp[i].SCpnt))
- panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
-
- if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
- HD(j)->cp_stat[i] = ABORTING;
- printk("%s: reset, mbox %d aborting.\n", BN(j), i);
- }
-
- else {
- HD(j)->cp_stat[i] = IN_RESET;
- printk("%s: reset, mbox %d in reset.\n", BN(j), i);
- }
-
- if (SCpnt->host_scribble == NULL)
- panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
-
- if (*(unsigned int *)SCpnt->host_scribble != i)
- panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
-
- if (SCpnt->scsi_done == NULL)
- panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
-
- if (SCpnt == SCarg) arg_done = TRUE;
- }
-
- if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
- printk("%s: reset, cannot reset, timeout error.\n", BN(j));
- spin_unlock_irq(sh[j]->host_lock);
- return FAILED;
- }
-
- outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR);
- printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
-
-#if defined(DEBUG_RESET)
- do_trace = TRUE;
-#endif
-
- HD(j)->in_reset = TRUE;
-
- spin_unlock_irq(sh[j]->host_lock);
- time = jiffies;
- while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
- spin_lock_irq(sh[j]->host_lock);
-
- printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
-
- for (i = 0; i < sh[j]->can_queue; i++) {
-
- if (HD(j)->cp_stat[i] == IN_RESET) {
- SCpnt = HD(j)->cp[i].SCpnt;
- unmap_dma(i, j);
- SCpnt->result = DID_RESET << 16;
- SCpnt->host_scribble = NULL;
-
- /* This mailbox is still waiting for its interrupt */
- HD(j)->cp_stat[i] = LOCKED;
-
- printk("%s, reset, mbox %d locked, DID_RESET, done.\n", BN(j), i);
- }
-
- else if (HD(j)->cp_stat[i] == ABORTING) {
- SCpnt = HD(j)->cp[i].SCpnt;
- unmap_dma(i, j);
- SCpnt->result = DID_RESET << 16;
- SCpnt->host_scribble = NULL;
-
- /* This mailbox was never queued to the adapter */
- HD(j)->cp_stat[i] = FREE;
-
- printk("%s, reset, mbox %d aborting, DID_RESET, done.\n", BN(j), i);
- }
-
- else
-
- /* Any other mailbox has already been set free by interrupt */
- continue;
-
- SCpnt->scsi_done(SCpnt);
- }
-
- HD(j)->in_reset = FALSE;
- do_trace = FALSE;
-
- if (arg_done) printk("%s: reset, exit, done.\n", BN(j));
- else printk("%s: reset, exit.\n", BN(j));
-
- spin_unlock_irq(sh[j]->host_lock);
- return SUCCESS;
-}
-
-static int u14_34f_bios_param(struct scsi_device *disk,
- struct block_device *bdev, sector_t capacity, int *dkinfo) {
- unsigned int j = 0;
- unsigned int size = capacity;
-
- dkinfo[0] = HD(j)->heads;
- dkinfo[1] = HD(j)->sectors;
- dkinfo[2] = size / (HD(j)->heads * HD(j)->sectors);
-
- if (ext_tran && (scsicam_bios_param(bdev, capacity, dkinfo) < 0)) {
- dkinfo[0] = 255;
- dkinfo[1] = 63;
- dkinfo[2] = size / (dkinfo[0] * dkinfo[1]);
- }
-
-#if defined (DEBUG_GEOMETRY)
- printk ("%s: bios_param, head=%d, sec=%d, cyl=%d.\n", driver_name,
- dkinfo[0], dkinfo[1], dkinfo[2]);
-#endif
-
- return FALSE;
-}
-
-static void sort(unsigned long sk[], unsigned int da[], unsigned int n,
- unsigned int rev) {
- unsigned int i, j, k, y;
- unsigned long x;
-
- for (i = 0; i < n - 1; i++) {
- k = i;
-
- for (j = k + 1; j < n; j++)
- if (rev) {
- if (sk[j] > sk[k]) k = j;
- }
- else {
- if (sk[j] < sk[k]) k = j;
- }
-
- if (k != i) {
- x = sk[k]; sk[k] = sk[i]; sk[i] = x;
- y = da[k]; da[k] = da[i]; da[i] = y;
- }
- }
-
- return;
- }
-
-static int reorder(unsigned int j, unsigned long cursec,
- unsigned int ihdlr, unsigned int il[], unsigned int n_ready) {
- struct scsi_cmnd *SCpnt;
- struct mscp *cpp;
- unsigned int k, n;
- unsigned int rev = FALSE, s = TRUE, r = TRUE;
- unsigned int input_only = TRUE, overlap = FALSE;
- unsigned long sl[n_ready], pl[n_ready], ll[n_ready];
- unsigned long maxsec = 0, minsec = ULONG_MAX, seek = 0, iseek = 0;
- unsigned long ioseek = 0;
-
- static unsigned int flushcount = 0, batchcount = 0, sortcount = 0;
- static unsigned int readycount = 0, ovlcount = 0, inputcount = 0;
- static unsigned int readysorted = 0, revcount = 0;
- static unsigned long seeksorted = 0, seeknosort = 0;
-
- if (link_statistics && !(++flushcount % link_statistics))
- printk("fc %d bc %d ic %d oc %d rc %d rs %d sc %d re %d"\
- " av %ldK as %ldK.\n", flushcount, batchcount, inputcount,
- ovlcount, readycount, readysorted, sortcount, revcount,
- seeknosort / (readycount + 1),
- seeksorted / (readycount + 1));
-
- if (n_ready <= 1) return FALSE;
-
- for (n = 0; n < n_ready; n++) {
- k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
-
- if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
-
- if (blk_rq_pos(SCpnt->request) < minsec)
- minsec = blk_rq_pos(SCpnt->request);
- if (blk_rq_pos(SCpnt->request) > maxsec)
- maxsec = blk_rq_pos(SCpnt->request);
-
- sl[n] = blk_rq_pos(SCpnt->request);
- ioseek += blk_rq_sectors(SCpnt->request);
-
- if (!n) continue;
-
- if (sl[n] < sl[n - 1]) s = FALSE;
- if (sl[n] > sl[n - 1]) r = FALSE;
-
- if (link_statistics) {
- if (sl[n] > sl[n - 1])
- seek += sl[n] - sl[n - 1];
- else
- seek += sl[n - 1] - sl[n];
- }
-
- }
-
- if (link_statistics) {
- if (cursec > sl[0]) seek += cursec - sl[0]; else seek += sl[0] - cursec;
- }
-
- if (cursec > ((maxsec + minsec) / 2)) rev = TRUE;
-
- if (ioseek > ((maxsec - minsec) / 2)) rev = FALSE;
-
- if (!((rev && r) || (!rev && s))) sort(sl, il, n_ready, rev);
-
- if (!input_only) for (n = 0; n < n_ready; n++) {
- k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
- ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
-
- if (!n) continue;
-
- if ((sl[n] == sl[n - 1]) || (!rev && ((sl[n - 1] + ll[n - 1]) > sl[n]))
- || (rev && ((sl[n] + ll[n]) > sl[n - 1]))) overlap = TRUE;
- }
-
- if (overlap) sort(pl, il, n_ready, FALSE);
-
- if (link_statistics) {
- if (cursec > sl[0]) iseek = cursec - sl[0]; else iseek = sl[0] - cursec;
- batchcount++; readycount += n_ready; seeknosort += seek / 1024;
- if (input_only) inputcount++;
- if (overlap) { ovlcount++; seeksorted += iseek / 1024; }
- else seeksorted += (iseek + maxsec - minsec) / 1024;
- if (rev && !r) { revcount++; readysorted += n_ready; }
- if (!rev && !s) { sortcount++; readysorted += n_ready; }
- }
-
-#if defined(DEBUG_LINKED_COMMANDS)
- if (link_statistics && (overlap || !(flushcount % link_statistics)))
- for (n = 0; n < n_ready; n++) {
- k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
- printk("%s %d.%d:%llu mb %d fc %d nr %d sec %ld ns %u"\
- " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
- (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
- (u8)SCpnt->lun, k, flushcount, n_ready,
- blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
- cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
- YESNO(overlap), cpp->xdir);
- }
-#endif
- return overlap;
-}
-
-static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned int j,
- unsigned int ihdlr) {
- struct scsi_cmnd *SCpnt;
- struct mscp *cpp;
- unsigned int k, n, n_ready = 0, il[MAX_MAILBOXES];
-
- for (k = 0; k < sh[j]->can_queue; k++) {
-
- if (HD(j)->cp_stat[k] != READY && HD(j)->cp_stat[k] != IN_USE) continue;
-
- cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
-
- if (SCpnt->device != dev) continue;
-
- if (HD(j)->cp_stat[k] == IN_USE) return;
-
- il[n_ready++] = k;
- }
-
- if (reorder(j, cursec, ihdlr, il, n_ready)) n_ready = 1;
-
- for (n = 0; n < n_ready; n++) {
- k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
-
- if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
- scmd_printk(KERN_INFO, SCpnt,
- "%s, mbox %d, adapter"
- " busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"),
- k);
- HD(j)->cp_stat[k] = ABORTING;
- continue;
- }
-
- outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
- outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
- HD(j)->cp_stat[k] = IN_USE;
- }
-
-}
-
-static irqreturn_t ihdlr(unsigned int j)
-{
- struct scsi_cmnd *SCpnt;
- unsigned int i, k, c, status, tstatus, reg, ret;
- struct mscp *spp, *cpp;
- int irq = sh[j]->irq;
-
- /* Check if this board need to be serviced */
- if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none;
-
- HD(j)->iocount++;
-
- if (do_trace) printk("%s: ihdlr, enter, irq %d, count %d.\n", BN(j), irq,
- HD(j)->iocount);
-
- /* Check if this board is still busy */
- if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) {
- outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
- printk("%s: ihdlr, busy timeout error, irq %d, reg 0x%x, count %d.\n",
- BN(j), irq, reg, HD(j)->iocount);
- goto none;
- }
-
- ret = inl(sh[j]->io_port + REG_ICM);
-
- /* Clear interrupt pending flag */
- outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
-
- /* Find the mailbox to be serviced on this board */
- for (i = 0; i < sh[j]->can_queue; i++)
- if (H2DEV(HD(j)->cp[i].cp_dma_addr) == ret) break;
-
- if (i >= sh[j]->can_queue)
- panic("%s: ihdlr, invalid mscp bus address %p, cp0 %p.\n", BN(j),
- (void *)ret, (void *)H2DEV(HD(j)->cp[0].cp_dma_addr));
-
- cpp = &(HD(j)->cp[i]);
- spp = cpp;
-
-#if defined(DEBUG_GENERATE_ABORTS)
- if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 500) < 3)) goto handled;
-#endif
-
- if (HD(j)->cp_stat[i] == IGNORE) {
- HD(j)->cp_stat[i] = FREE;
- goto handled;
- }
- else if (HD(j)->cp_stat[i] == LOCKED) {
- HD(j)->cp_stat[i] = FREE;
- printk("%s: ihdlr, mbox %d unlocked, count %d.\n", BN(j), i,
- HD(j)->iocount);
- goto handled;
- }
- else if (HD(j)->cp_stat[i] == FREE) {
- printk("%s: ihdlr, mbox %d is free, count %d.\n", BN(j), i,
- HD(j)->iocount);
- goto handled;
- }
- else if (HD(j)->cp_stat[i] == IN_RESET)
- printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i);
- else if (HD(j)->cp_stat[i] != IN_USE)
- panic("%s: ihdlr, mbox %d, invalid cp_stat: %d.\n",
- BN(j), i, HD(j)->cp_stat[i]);
-
- HD(j)->cp_stat[i] = FREE;
- SCpnt = cpp->SCpnt;
-
- if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
-
- if (SCpnt->host_scribble == NULL)
- panic("%s: ihdlr, mbox %d, SCpnt %p garbled.\n", BN(j), i,
- SCpnt);
-
- if (*(unsigned int *)SCpnt->host_scribble != i)
- panic("%s: ihdlr, mbox %d, index mismatch %d.\n",
- BN(j), i, *(unsigned int *)SCpnt->host_scribble);
-
- sync_dma(i, j);
-
- if (linked_comm && SCpnt->device->queue_depth > 2
- && TLDEV(SCpnt->device->type))
- flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
-
- tstatus = status_byte(spp->target_status);
-
-#if defined(DEBUG_GENERATE_ERRORS)
- if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 200) < 2))
- spp->adapter_status = 0x01;
-#endif
-
- switch (spp->adapter_status) {
- case ASOK: /* status OK */
-
- /* Forces a reset if a disk drive keeps returning BUSY */
- if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE)
- status = DID_ERROR << 16;
-
- /* If there was a bus reset, redo operation on each target */
- else if (tstatus != GOOD && SCpnt->device->type == TYPE_DISK
- && HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)])
- status = DID_BUS_BUSY << 16;
-
- /* Works around a flaw in scsi.c */
- else if (tstatus == CHECK_CONDITION
- && SCpnt->device->type == TYPE_DISK
- && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
- status = DID_BUS_BUSY << 16;
-
- else
- status = DID_OK << 16;
-
- if (tstatus == GOOD)
- HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)] = FALSE;
-
- if (spp->target_status && SCpnt->device->type == TYPE_DISK &&
- (!(tstatus == CHECK_CONDITION && HD(j)->iocount <= 1000 &&
- (SCpnt->sense_buffer[2] & 0xf) == NOT_READY)))
- scmd_printk(KERN_INFO, SCpnt,
- "ihdlr, target_status 0x%x, sense key 0x%x.\n",
- spp->target_status,
- SCpnt->sense_buffer[2]);
-
- HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] = 0;
-
- if (HD(j)->last_retried_pid == SCpnt->serial_number) HD(j)->retries = 0;
-
- break;
- case ASST: /* Selection Time Out */
-
- if (HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] > 1)
- status = DID_ERROR << 16;
- else {
- status = DID_TIME_OUT << 16;
- HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)]++;
- }
-
- break;
-
- /* Perform a limited number of internal retries */
- case 0x93: /* Unexpected bus free */
- case 0x94: /* Target bus phase sequence failure */
- case 0x96: /* Illegal SCSI command */
- case 0xa3: /* SCSI bus reset error */
-
- for (c = 0; c <= sh[j]->max_channel; c++)
- for (k = 0; k < sh[j]->max_id; k++)
- HD(j)->target_redo[k][c] = TRUE;
-
-
- case 0x92: /* Data over/under-run */
-
- if (SCpnt->device->type != TYPE_TAPE
- && HD(j)->retries < MAX_INTERNAL_RETRIES) {
-
-#if defined(DID_SOFT_ERROR)
- status = DID_SOFT_ERROR << 16;
-#else
- status = DID_BUS_BUSY << 16;
-#endif
-
- HD(j)->retries++;
- HD(j)->last_retried_pid = SCpnt->serial_number;
- }
- else
- status = DID_ERROR << 16;
-
- break;
- case 0x01: /* Invalid command */
- case 0x02: /* Invalid parameters */
- case 0x03: /* Invalid data list */
- case 0x84: /* SCSI bus abort error */
- case 0x9b: /* Auto request sense error */
- case 0x9f: /* Unexpected command complete message error */
- case 0xff: /* Invalid parameter in the S/G list */
- default:
- status = DID_ERROR << 16;
- break;
- }
-
- SCpnt->result = status | spp->target_status;
-
-#if defined(DEBUG_INTERRUPT)
- if (SCpnt->result || do_trace)
-#else
- if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) ||
- (spp->adapter_status != ASOK &&
- spp->adapter_status != ASST && HD(j)->iocount <= 1000) ||
- do_trace || msg_byte(spp->target_status))
-#endif
- scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"\
- " reg 0x%x, count %d.\n",
- i, spp->adapter_status, spp->target_status,
- reg, HD(j)->iocount);
-
- unmap_dma(i, j);
-
- /* Set the command state to inactive */
- SCpnt->host_scribble = NULL;
-
- SCpnt->scsi_done(SCpnt);
-
- if (do_trace) printk("%s: ihdlr, exit, irq %d, count %d.\n", BN(j), irq,
- HD(j)->iocount);
-
-handled:
- return IRQ_HANDLED;
-none:
- return IRQ_NONE;
-}
-
-static irqreturn_t do_interrupt_handler(int irq, void *shap) {
- unsigned int j;
- unsigned long spin_flags;
- irqreturn_t ret;
-
- /* Check if the interrupt must be processed by this handler */
- if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE;
-
- spin_lock_irqsave(sh[j]->host_lock, spin_flags);
- ret = ihdlr(j);
- spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
- return ret;
-}
-
-static int u14_34f_release(struct Scsi_Host *shpnt) {
- unsigned int i, j;
-
- for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++);
-
- if (sh[j] == NULL)
- panic("%s: release, invalid Scsi_Host pointer.\n", driver_name);
-
- for (i = 0; i < sh[j]->can_queue; i++)
- kfree((&HD(j)->cp[i])->sglist);
-
- for (i = 0; i < sh[j]->can_queue; i++)
- pci_unmap_single(HD(j)->pdev, HD(j)->cp[i].cp_dma_addr,
- sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL);
-
- free_irq(sh[j]->irq, &sha[j]);
-
- if (sh[j]->dma_channel != NO_DMA)
- free_dma(sh[j]->dma_channel);
-
- release_region(sh[j]->io_port, sh[j]->n_io_port);
- scsi_unregister(sh[j]);
- return FALSE;
-}
-
-#include "scsi_module.c"
-
-#ifndef MODULE
-__setup("u14-34f=", option_setup);
-#endif /* end MODULE */
diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/scsi/ufs/tc-dwc-g210.c
index 70db6d999ca3..dc03e47f7c58 100644
--- a/drivers/scsi/ufs/tc-dwc-g210.c
+++ b/drivers/scsi/ufs/tc-dwc-g210.c
@@ -15,6 +15,7 @@
#include "ufshcd-dwc.h"
#include "ufshci-dwc.h"
+#include "tc-dwc-g210.h"
/**
* tc_dwc_g210_setup_40bit_rmmi()
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index b291fa6ed2ad..845b874e2977 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -327,6 +327,7 @@ enum {
MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
MASK_RSP_EXCEPTION_EVENT = 0x10000,
+ MASK_TM_SERVICE_RESP = 0xFF,
};
/* Task management service response */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f08d41a2d70b..37f3c51e9d92 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2568,7 +2568,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
status = ufshcd_get_upmcrs(hba);
if (status != PWR_LOCAL) {
dev_err(hba->dev,
- "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
+ "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
cmd->command, status);
ret = (status != PWR_OK) ? status : -1;
}
@@ -3364,8 +3364,8 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
if (ocs_value == OCS_SUCCESS) {
task_rsp_upiup = (struct utp_upiu_task_rsp *)
task_req_descp[index].task_rsp_upiu;
- task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
- task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
+ task_result = be32_to_cpu(task_rsp_upiup->output_param1);
+ task_result = task_result & MASK_TM_SERVICE_RESP;
if (resp)
*resp = (u8)task_result;
} else {
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
deleted file mode 100644
index 14e0c40a68c9..000000000000
--- a/drivers/scsi/ultrastor.c
+++ /dev/null
@@ -1,1210 +0,0 @@
-/*
- * ultrastor.c Copyright (C) 1992 David B. Gentzel
- * Low-level SCSI driver for UltraStor 14F, 24F, and 34F
- * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
- * (gentzel@nova.enet.dec.com)
- * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
- * 24F and multiple command support by John F. Carr (jfc@athena.mit.edu)
- * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
- * Eric Youngdale (ericy@cais.com).
- * Thanks to UltraStor for providing the necessary documentation
- *
- * This is an old driver, for the 14F and 34F you should be using the
- * u14-34f driver instead.
- */
-
-/*
- * TODO:
- * 1. Find out why scatter/gather is limited to 16 requests per command.
- * This is fixed, at least on the 24F, as of version 1.12 - CAE.
- * 2. Look at command linking (mscp.command_link and
- * mscp.command_link_id). (Does not work with many disks,
- * and no performance increase. ERY).
- * 3. Allow multiple adapters.
- */
-
-/*
- * NOTES:
- * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
- * performance SCSI-2 host adapters. They all support command queueing
- * and scatter/gather I/O. Some of them can also emulate the standard
- * WD1003 interface for use with OS's which don't support SCSI. Here
- * is the scoop on the various models:
- * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
- * 14N - ISA HA with floppy support. I think that this is a non-DMA
- * HA. Nothing further known.
- * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
- * 34F - VL-Bus Bus Master HA with floppy support (no WD1003 emulation).
- *
- * The 14F, 24F, and 34F are supported by this driver.
- *
- * Places flagged with a triple question-mark are things which are either
- * unfinished, questionable, or wrong.
- */
-
-/* Changes from version 1.11 alpha to 1.12
- *
- * Increased the size of the scatter-gather list to 33 entries for
- * the 24F adapter (it was 16). I don't have the specs for the 14F
- * or the 34F, so they may support larger s-g lists as well.
- *
- * Caleb Epstein <cae@jpmorgan.com>
- */
-
-/* Changes from version 1.9 to 1.11
- *
- * Patches to bring this driver up to speed with the default kernel
- * driver which supports only the 14F and 34F adapters. This version
- * should compile cleanly into 0.99.13, 0.99.12 and probably 0.99.11.
- *
- * Fixes from Eric Youngdale to fix a few possible race conditions and
- * several problems with bit testing operations (insufficient
- * parentheses).
- *
- * Removed the ultrastor_abort() and ultrastor_reset() functions
- * (enclosed them in #if 0 / #endif). These functions, at least on
- * the 24F, cause the SCSI bus to do odd things and generally lead to
- * kernel panics and machine hangs. This is like the Adaptec code.
- *
- * Use check/snarf_region for 14f, 34f to avoid I/O space address conflicts.
- */
-
-/* Changes from version 1.8 to version 1.9
- *
- * 0.99.11 patches (cae@jpmorgan.com) */
-
-/* Changes from version 1.7 to version 1.8
- *
- * Better error reporting.
- */
-
-/* Changes from version 1.6 to version 1.7
- *
- * Removed CSIR command code.
- *
- * Better race condition avoidance (xchgb function added).
- *
- * Set ICM and OGM status to zero at probe (24F)
- *
- * reset sends soft reset to UltraStor adapter
- *
- * reset adapter if adapter interrupts with an invalid MSCP address
- *
- * handle aborted command interrupt (24F)
- *
- */
-
-/* Changes from version 1.5 to version 1.6:
- *
- * Read MSCP address from ICM _before_ clearing the interrupt flag.
- * This fixes a race condition.
- */
-
-/* Changes from version 1.4 to version 1.5:
- *
- * Abort now calls done when multiple commands are enabled.
- *
- * Clear busy when aborted command finishes, not when abort is called.
- *
- * More debugging messages for aborts.
- */
-
-/* Changes from version 1.3 to version 1.4:
- *
- * Enable automatic request of sense data on error (requires newer version
- * of scsi.c to be useful).
- *
- * Fix PORT_OVERRIDE for 14F.
- *
- * Fix abort and reset to work properly (config.aborted wasn't cleared
- * after it was tested, so after a command abort no further commands would
- * work).
- *
- * Boot time test to enable SCSI bus reset (defaults to not allowing reset).
- *
- * Fix test for OGM busy -- the busy bit is in different places on the 24F.
- *
- * Release ICM slot by clearing first byte on 24F.
- */
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/proc_fs.h>
-#include <linux/spinlock.h>
-#include <linux/stat.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#define ULTRASTOR_PRIVATE /* Get the private stuff from ultrastor.h */
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "ultrastor.h"
-
-#define FALSE 0
-#define TRUE 1
-
-#ifndef ULTRASTOR_DEBUG
-#define ULTRASTOR_DEBUG (UD_ABORT|UD_CSIR|UD_RESET)
-#endif
-
-#define VERSION "1.12"
-
-#define PACKED __attribute__((packed))
-#define ALIGNED(x) __attribute__((aligned(x)))
-
-
-/* The 14F uses an array of 4-byte ints for its scatter/gather list.
- The data can be unaligned, but need not be. It's easier to give
- the list normal alignment since it doesn't need to fit into a
- packed structure. */
-
-typedef struct {
- u32 address;
- u32 num_bytes;
-} ultrastor_sg_list;
-
-
-/* MailBox SCSI Command Packet. Basic command structure for communicating
- with controller. */
-struct mscp {
- unsigned char opcode: 3; /* type of command */
- unsigned char xdir: 2; /* data transfer direction */
- unsigned char dcn: 1; /* disable disconnect */
- unsigned char ca: 1; /* use cache (if available) */
- unsigned char sg: 1; /* scatter/gather operation */
- unsigned char target_id: 3; /* target SCSI id */
- unsigned char ch_no: 2; /* SCSI channel (always 0 for 14f) */
- unsigned char lun: 3; /* logical unit number */
- unsigned int transfer_data PACKED; /* transfer data pointer */
- unsigned int transfer_data_length PACKED; /* length in bytes */
- unsigned int command_link PACKED; /* for linking command chains */
- unsigned char scsi_command_link_id; /* identifies command in chain */
- unsigned char number_of_sg_list; /* (if sg is set) 8 bytes per list */
- unsigned char length_of_sense_byte;
- unsigned char length_of_scsi_cdbs; /* 6, 10, or 12 */
- unsigned char scsi_cdbs[12]; /* SCSI commands */
- unsigned char adapter_status; /* non-zero indicates HA error */
- unsigned char target_status; /* non-zero indicates target error */
- u32 sense_data PACKED;
- /* The following fields are for software only. They are included in
- the MSCP structure because they are associated with SCSI requests. */
- void (*done) (struct scsi_cmnd *);
- struct scsi_cmnd *SCint;
- ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */
-};
-
-
-/* Port addresses (relative to the base address) */
-#define U14F_PRODUCT_ID(port) ((port) + 0x4)
-#define CONFIG(port) ((port) + 0x6)
-
-/* Port addresses relative to the doorbell base address. */
-#define LCL_DOORBELL_MASK(port) ((port) + 0x0)
-#define LCL_DOORBELL_INTR(port) ((port) + 0x1)
-#define SYS_DOORBELL_MASK(port) ((port) + 0x2)
-#define SYS_DOORBELL_INTR(port) ((port) + 0x3)
-
-
-/* Used to store configuration info read from config i/o registers. Most of
- this is not used yet, but might as well save it.
-
- This structure also holds port addresses that are not at the same offset
- on the 14F and 24F.
-
- This structure holds all data that must be duplicated to support multiple
- adapters. */
-
-static struct ultrastor_config
-{
- unsigned short port_address; /* base address of card */
- unsigned short doorbell_address; /* base address of doorbell CSRs */
- unsigned short ogm_address; /* base address of OGM */
- unsigned short icm_address; /* base address of ICM */
- const void *bios_segment;
- unsigned char interrupt: 4;
- unsigned char dma_channel: 3;
- unsigned char bios_drive_number: 1;
- unsigned char heads;
- unsigned char sectors;
- unsigned char ha_scsi_id: 3;
- unsigned char subversion: 4;
- unsigned char revision;
- /* The slot number is used to distinguish the 24F (slot != 0) from
- the 14F and 34F (slot == 0). */
- unsigned char slot;
-
-#ifdef PRINT_U24F_VERSION
- volatile int csir_done;
-#endif
-
- /* A pool of MSCP structures for this adapter, and a bitmask of
- busy structures. (If ULTRASTOR_14F_MAX_CMDS == 1, a 1 byte
- busy flag is used instead.) */
-
-#if ULTRASTOR_MAX_CMDS == 1
- unsigned char mscp_busy;
-#else
- unsigned long mscp_free;
-#endif
- volatile unsigned char aborted[ULTRASTOR_MAX_CMDS];
- struct mscp mscp[ULTRASTOR_MAX_CMDS];
-} config = {0};
-
-/* Set this to 1 to reset the SCSI bus on error. */
-static int ultrastor_bus_reset;
-
-
-/* Allowed BIOS base addresses (NULL indicates reserved) */
-static const void *const bios_segment_table[8] = {
- NULL, (void *)0xC4000, (void *)0xC8000, (void *)0xCC000,
- (void *)0xD0000, (void *)0xD4000, (void *)0xD8000, (void *)0xDC000,
-};
-
-/* Allowed IRQs for 14f */
-static const unsigned char interrupt_table_14f[4] = { 15, 14, 11, 10 };
-
-/* Allowed DMA channels for 14f (0 indicates reserved) */
-static const unsigned char dma_channel_table_14f[4] = { 5, 6, 7, 0 };
-
-/* Head/sector mappings allowed by 14f */
-static const struct {
- unsigned char heads;
- unsigned char sectors;
-} mapping_table[4] = { { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 } };
-
-#ifndef PORT_OVERRIDE
-/* ??? A probe of address 0x310 screws up NE2000 cards */
-static const unsigned short ultrastor_ports_14f[] = {
- 0x330, 0x340, /*0x310,*/ 0x230, 0x240, 0x210, 0x130, 0x140,
-};
-#endif
-
-static void ultrastor_interrupt(void *);
-static irqreturn_t do_ultrastor_interrupt(int, void *);
-static inline void build_sg_list(struct mscp *, struct scsi_cmnd *SCpnt);
-
-
-/* Always called with host lock held */
-
-static inline int find_and_clear_bit_16(unsigned long *field)
-{
- int rv;
-
- if (*field == 0)
- panic("No free mscp");
-
- asm volatile (
- "xorl %0,%0\n\t"
- "0: bsfw %1,%w0\n\t"
- "btr %0,%1\n\t"
- "jnc 0b"
- : "=&r" (rv), "+m" (*field) :);
-
- return rv;
-}
-
-/* This has been re-implemented with the help of Richard Earnshaw,
- <rwe@pegasus.esprit.ec.org> and works with gcc-2.5.8 and gcc-2.6.0.
- The instability noted by jfc below appears to be a bug in
- gcc-2.5.x when compiling w/o optimization. --Caleb
-
- This asm is fragile: it doesn't work without the casts and it may
- not work without optimization. Maybe I should add a swap builtin
- to gcc. --jfc */
-static inline unsigned char xchgb(unsigned char reg,
- volatile unsigned char *mem)
-{
- __asm__ ("xchgb %0,%1" : "=q" (reg), "=m" (*mem) : "0" (reg));
- return reg;
-}
-
-#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
-
-/* Always called with the host lock held */
-static void log_ultrastor_abort(struct ultrastor_config *config,
- int command)
-{
- static char fmt[80] = "abort %d (%x); MSCP free pool: %x;";
- int i;
-
- for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
- {
- fmt[20 + i*2] = ' ';
- if (! (config->mscp_free & (1 << i)))
- fmt[21 + i*2] = '0' + config->mscp[i].target_id;
- else
- fmt[21 + i*2] = '-';
- }
- fmt[20 + ULTRASTOR_MAX_CMDS * 2] = '\n';
- fmt[21 + ULTRASTOR_MAX_CMDS * 2] = 0;
- printk(fmt, command, &config->mscp[command], config->mscp_free);
-
-}
-#endif
-
-static int ultrastor_14f_detect(struct scsi_host_template * tpnt)
-{
- size_t i;
- unsigned char in_byte, version_byte = 0;
- struct config_1 {
- unsigned char bios_segment: 3;
- unsigned char removable_disks_as_fixed: 1;
- unsigned char interrupt: 2;
- unsigned char dma_channel: 2;
- } config_1;
- struct config_2 {
- unsigned char ha_scsi_id: 3;
- unsigned char mapping_mode: 2;
- unsigned char bios_drive_number: 1;
- unsigned char tfr_port: 2;
- } config_2;
-
-#if (ULTRASTOR_DEBUG & UD_DETECT)
- printk("US14F: detect: called\n");
-#endif
-
- /* If a 24F has already been configured, don't look for a 14F. */
- if (config.bios_segment)
- return FALSE;
-
-#ifdef PORT_OVERRIDE
- if(!request_region(PORT_OVERRIDE, 0xc, "ultrastor")) {
- printk("Ultrastor I/O space already in use\n");
- return FALSE;
- };
- config.port_address = PORT_OVERRIDE;
-#else
- for (i = 0; i < ARRAY_SIZE(ultrastor_ports_14f); i++) {
- if(!request_region(ultrastor_ports_14f[i], 0x0c, "ultrastor")) continue;
- config.port_address = ultrastor_ports_14f[i];
-#endif
-
-#if (ULTRASTOR_DEBUG & UD_DETECT)
- printk("US14F: detect: testing port address %03X\n", config.port_address);
-#endif
-
- in_byte = inb(U14F_PRODUCT_ID(config.port_address));
- if (in_byte != US14F_PRODUCT_ID_0) {
-#if (ULTRASTOR_DEBUG & UD_DETECT)
-# ifdef PORT_OVERRIDE
- printk("US14F: detect: wrong product ID 0 - %02X\n", in_byte);
-# else
- printk("US14F: detect: no adapter at port %03X\n", config.port_address);
-# endif
-#endif
-#ifdef PORT_OVERRIDE
- goto out_release_port;
-#else
- release_region(config.port_address, 0x0c);
- continue;
-#endif
- }
- in_byte = inb(U14F_PRODUCT_ID(config.port_address) + 1);
- /* Only upper nibble is significant for Product ID 1 */
- if ((in_byte & 0xF0) != US14F_PRODUCT_ID_1) {
-#if (ULTRASTOR_DEBUG & UD_DETECT)
-# ifdef PORT_OVERRIDE
- printk("US14F: detect: wrong product ID 1 - %02X\n", in_byte);
-# else
- printk("US14F: detect: no adapter at port %03X\n", config.port_address);
-# endif
-#endif
-#ifdef PORT_OVERRIDE
- goto out_release_port;
-#else
- release_region(config.port_address, 0x0c);
- continue;
-#endif
- }
- version_byte = in_byte;
-#ifndef PORT_OVERRIDE
- break;
- }
- if (i == ARRAY_SIZE(ultrastor_ports_14f)) {
-# if (ULTRASTOR_DEBUG & UD_DETECT)
- printk("US14F: detect: no port address found!\n");
-# endif
- /* all ports probed already released - we can just go straight out */
- return FALSE;
- }
-#endif
-
-#if (ULTRASTOR_DEBUG & UD_DETECT)
- printk("US14F: detect: adapter found at port address %03X\n",
- config.port_address);
-#endif
-
- /* Set local doorbell mask to disallow bus reset unless
- ultrastor_bus_reset is true. */
- outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(config.port_address));
-
- /* All above tests passed, must be the right thing. Get some useful
- info. */
-
- /* Register the I/O space that we use */
-
- *(char *)&config_1 = inb(CONFIG(config.port_address + 0));
- *(char *)&config_2 = inb(CONFIG(config.port_address + 1));
- config.bios_segment = bios_segment_table[config_1.bios_segment];
- config.doorbell_address = config.port_address;
- config.ogm_address = config.port_address + 0x8;
- config.icm_address = config.port_address + 0xC;
- config.interrupt = interrupt_table_14f[config_1.interrupt];
- config.ha_scsi_id = config_2.ha_scsi_id;
- config.heads = mapping_table[config_2.mapping_mode].heads;
- config.sectors = mapping_table[config_2.mapping_mode].sectors;
- config.bios_drive_number = config_2.bios_drive_number;
- config.subversion = (version_byte & 0x0F);
- if (config.subversion == U34F)
- config.dma_channel = 0;
- else
- config.dma_channel = dma_channel_table_14f[config_1.dma_channel];
-
- if (!config.bios_segment) {
-#if (ULTRASTOR_DEBUG & UD_DETECT)
- printk("US14F: detect: not detected.\n");
-#endif
- goto out_release_port;
- }
-
- /* Final consistency check, verify previous info. */
- if (config.subversion != U34F)
- if (!config.dma_channel || !(config_2.tfr_port & 0x2)) {
-#if (ULTRASTOR_DEBUG & UD_DETECT)
- printk("US14F: detect: consistency check failed\n");
-#endif
- goto out_release_port;
- }
-
- /* If we were TRULY paranoid, we could issue a host adapter inquiry
- command here and verify the data returned. But frankly, I'm
- exhausted! */
-
- /* Finally! Now I'm satisfied... */
-#if (ULTRASTOR_DEBUG & UD_DETECT)
- printk("US14F: detect: detect succeeded\n"
- " Port address: %03X\n"
- " BIOS segment: %05X\n"
- " Interrupt: %u\n"
- " DMA channel: %u\n"
- " H/A SCSI ID: %u\n"
- " Subversion: %u\n",
- config.port_address, config.bios_segment, config.interrupt,
- config.dma_channel, config.ha_scsi_id, config.subversion);
-#endif
- tpnt->this_id = config.ha_scsi_id;
- tpnt->unchecked_isa_dma = (config.subversion != U34F);
-
-#if ULTRASTOR_MAX_CMDS > 1
- config.mscp_free = ~0;
-#endif
-
- /*
- * Brrr, &config.mscp[0].SCint->host) it is something magical....
- * XXX and FIXME
- */
- if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", &config.mscp[0].SCint->device->host)) {
- printk("Unable to allocate IRQ%u for UltraStor controller.\n",
- config.interrupt);
- goto out_release_port;
- }
- if (config.dma_channel && request_dma(config.dma_channel,"Ultrastor")) {
- printk("Unable to allocate DMA channel %u for UltraStor controller.\n",
- config.dma_channel);
- free_irq(config.interrupt, NULL);
- goto out_release_port;
- }
- tpnt->sg_tablesize = ULTRASTOR_14F_MAX_SG;
- printk("UltraStor driver version" VERSION ". Using %d SG lists.\n",
- ULTRASTOR_14F_MAX_SG);
-
- return TRUE;
-out_release_port:
- release_region(config.port_address, 0x0c);
- return FALSE;
-}
-
-static int ultrastor_24f_detect(struct scsi_host_template * tpnt)
-{
- int i;
- struct Scsi_Host * shpnt = NULL;
-
-#if (ULTRASTOR_DEBUG & UD_DETECT)
- printk("US24F: detect");
-#endif
-
- /* probe each EISA slot at slot address C80 */
- for (i = 1; i < 15; i++)
- {
- unsigned char config_1, config_2;
- unsigned short addr = (i << 12) | ULTRASTOR_24F_PORT;
-
- if (inb(addr) != US24F_PRODUCT_ID_0 &&
- inb(addr+1) != US24F_PRODUCT_ID_1 &&
- inb(addr+2) != US24F_PRODUCT_ID_2)
- continue;
-
- config.revision = inb(addr+3);
- config.slot = i;
- if (! (inb(addr+4) & 1))
- {
-#if (ULTRASTOR_DEBUG & UD_DETECT)
- printk("U24F: found disabled card in slot %u\n", i);
-#endif
- continue;
- }
-#if (ULTRASTOR_DEBUG & UD_DETECT)
- printk("U24F: found card in slot %u\n", i);
-#endif
- config_1 = inb(addr + 5);
- config.bios_segment = bios_segment_table[config_1 & 7];
- switch(config_1 >> 4)
- {
- case 1:
- config.interrupt = 15;
- break;
- case 2:
- config.interrupt = 14;
- break;
- case 4:
- config.interrupt = 11;
- break;
- case 8:
- config.interrupt = 10;
- break;
- default:
- printk("U24F: invalid IRQ\n");
- return FALSE;
- }
-
- /* BIOS addr set */
- /* base port set */
- config.port_address = addr;
- config.doorbell_address = addr + 12;
- config.ogm_address = addr + 0x17;
- config.icm_address = addr + 0x1C;
- config_2 = inb(addr + 7);
- config.ha_scsi_id = config_2 & 7;
- config.heads = mapping_table[(config_2 >> 3) & 3].heads;
- config.sectors = mapping_table[(config_2 >> 3) & 3].sectors;
-#if (ULTRASTOR_DEBUG & UD_DETECT)
- printk("US24F: detect: detect succeeded\n"
- " Port address: %03X\n"
- " BIOS segment: %05X\n"
- " Interrupt: %u\n"
- " H/A SCSI ID: %u\n",
- config.port_address, config.bios_segment,
- config.interrupt, config.ha_scsi_id);
-#endif
- tpnt->this_id = config.ha_scsi_id;
- tpnt->unchecked_isa_dma = 0;
- tpnt->sg_tablesize = ULTRASTOR_24F_MAX_SG;
-
- shpnt = scsi_register(tpnt, 0);
- if (!shpnt) {
- printk(KERN_WARNING "(ultrastor:) Could not register scsi device. Aborting registration.\n");
- free_irq(config.interrupt, do_ultrastor_interrupt);
- return FALSE;
- }
-
- if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", shpnt))
- {
- printk("Unable to allocate IRQ%u for UltraStor controller.\n",
- config.interrupt);
- return FALSE;
- }
-
- shpnt->irq = config.interrupt;
- shpnt->dma_channel = config.dma_channel;
- shpnt->io_port = config.port_address;
-
-#if ULTRASTOR_MAX_CMDS > 1
- config.mscp_free = ~0;
-#endif
- /* Mark ICM and OGM free */
- outb(0, addr + 0x16);
- outb(0, addr + 0x1B);
-
- /* Set local doorbell mask to disallow bus reset unless
- ultrastor_bus_reset is true. */
- outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(addr+12));
- outb(0x02, SYS_DOORBELL_MASK(addr+12));
- printk("UltraStor driver version " VERSION ". Using %d SG lists.\n",
- tpnt->sg_tablesize);
- return TRUE;
- }
- return FALSE;
-}
-
-static int ultrastor_detect(struct scsi_host_template * tpnt)
-{
- tpnt->proc_name = "ultrastor";
- return ultrastor_14f_detect(tpnt) || ultrastor_24f_detect(tpnt);
-}
-
-static int ultrastor_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, NULL);
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
- return 0;
-}
-
-static const char *ultrastor_info(struct Scsi_Host * shpnt)
-{
- static char buf[64];
-
- if (config.slot)
- sprintf(buf, "UltraStor 24F SCSI @ Slot %u IRQ%u",
- config.slot, config.interrupt);
- else if (config.subversion)
- sprintf(buf, "UltraStor 34F SCSI @ Port %03X BIOS %05X IRQ%u",
- config.port_address, (int)config.bios_segment,
- config.interrupt);
- else
- sprintf(buf, "UltraStor 14F SCSI @ Port %03X BIOS %05X IRQ%u DMA%u",
- config.port_address, (int)config.bios_segment,
- config.interrupt, config.dma_channel);
- return buf;
-}
-
-static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
-{
- struct scatterlist *sg;
- long transfer_length = 0;
- int i, max;
-
- max = scsi_sg_count(SCpnt);
- scsi_for_each_sg(SCpnt, sg, max, i) {
- mscp->sglist[i].address = isa_page_to_bus(sg_page(sg)) + sg->offset;
- mscp->sglist[i].num_bytes = sg->length;
- transfer_length += sg->length;
- }
- mscp->number_of_sg_list = max;
- mscp->transfer_data = isa_virt_to_bus(mscp->sglist);
- /* ??? May not be necessary. Docs are unclear as to whether transfer
- length field is ignored or whether it should be set to the total
- number of bytes of the transfer. */
- mscp->transfer_data_length = transfer_length;
-}
-
-static int ultrastor_queuecommand_lck(struct scsi_cmnd *SCpnt,
- void (*done) (struct scsi_cmnd *))
-{
- struct mscp *my_mscp;
-#if ULTRASTOR_MAX_CMDS > 1
- int mscp_index;
-#endif
- unsigned int status;
-
- /* Next test is for debugging; "can't happen" */
- if ((config.mscp_free & ((1U << ULTRASTOR_MAX_CMDS) - 1)) == 0)
- panic("ultrastor_queuecommand: no free MSCP\n");
- mscp_index = find_and_clear_bit_16(&config.mscp_free);
-
- /* Has the command been aborted? */
- if (xchgb(0xff, &config.aborted[mscp_index]) != 0)
- {
- status = DID_ABORT << 16;
- goto aborted;
- }
-
- my_mscp = &config.mscp[mscp_index];
-
- *(unsigned char *)my_mscp = OP_SCSI | (DTD_SCSI << 3);
-
- /* Tape drives don't work properly if the cache is used. The SCSI
- READ command for a tape doesn't have a block offset, and the adapter
- incorrectly assumes that all reads from the tape read the same
- blocks. Results will depend on read buffer size and other disk
- activity.
-
- ??? Which other device types should never use the cache? */
- my_mscp->ca = SCpnt->device->type != TYPE_TAPE;
- my_mscp->target_id = SCpnt->device->id;
- my_mscp->ch_no = 0;
- my_mscp->lun = SCpnt->device->lun;
- if (scsi_sg_count(SCpnt)) {
- /* Set scatter/gather flag in SCSI command packet */
- my_mscp->sg = TRUE;
- build_sg_list(my_mscp, SCpnt);
- } else {
- /* Unset scatter/gather flag in SCSI command packet */
- my_mscp->sg = FALSE;
- my_mscp->transfer_data = isa_virt_to_bus(scsi_sglist(SCpnt));
- my_mscp->transfer_data_length = scsi_bufflen(SCpnt);
- }
- my_mscp->command_link = 0; /*???*/
- my_mscp->scsi_command_link_id = 0; /*???*/
- my_mscp->length_of_sense_byte = SCSI_SENSE_BUFFERSIZE;
- my_mscp->length_of_scsi_cdbs = SCpnt->cmd_len;
- memcpy(my_mscp->scsi_cdbs, SCpnt->cmnd, my_mscp->length_of_scsi_cdbs);
- my_mscp->adapter_status = 0;
- my_mscp->target_status = 0;
- my_mscp->sense_data = isa_virt_to_bus(&SCpnt->sense_buffer);
- my_mscp->done = done;
- my_mscp->SCint = SCpnt;
- SCpnt->host_scribble = (unsigned char *)my_mscp;
-
- /* Find free OGM slot. On 24F, look for OGM status byte == 0.
- On 14F and 34F, wait for local interrupt pending flag to clear.
-
- FIXME: now we are using new_eh we should punt here and let the
- midlayer sort it out */
-
-retry:
- if (config.slot)
- while (inb(config.ogm_address - 1) != 0 && config.aborted[mscp_index] == 0xff)
- barrier();
-
- /* else??? */
-
- while ((inb(LCL_DOORBELL_INTR(config.doorbell_address)) & (config.slot ? 2 : 1)) && config.aborted[mscp_index] == 0xff)
- barrier();
-
- /* To avoid race conditions, keep the code to write to the adapter
- atomic. This simplifies the abort code. Right now the
- scsi mid layer has the host_lock already held
- */
-
- if (inb(LCL_DOORBELL_INTR(config.doorbell_address)) & (config.slot ? 2 : 1))
- goto retry;
-
- status = xchgb(0, &config.aborted[mscp_index]);
- if (status != 0xff) {
-
-#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
- printk("USx4F: queuecommand: aborted\n");
-#if ULTRASTOR_MAX_CMDS > 1
- log_ultrastor_abort(&config, mscp_index);
-#endif
-#endif
- status <<= 16;
-
- aborted:
- set_bit(mscp_index, &config.mscp_free);
- /* If the driver queues commands, call the done proc here. Otherwise
- return an error. */
-#if ULTRASTOR_MAX_CMDS > 1
- SCpnt->result = status;
- done(SCpnt);
- return 0;
-#else
- return status;
-#endif
- }
-
- /* Store pointer in OGM address bytes */
- outl(isa_virt_to_bus(my_mscp), config.ogm_address);
-
- /* Issue OGM interrupt */
- if (config.slot) {
- /* Write OGM command register on 24F */
- outb(1, config.ogm_address - 1);
- outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
- } else {
- outb(0x1, LCL_DOORBELL_INTR(config.doorbell_address));
- }
-
-#if (ULTRASTOR_DEBUG & UD_COMMAND)
- printk("USx4F: queuecommand: returning\n");
-#endif
-
- return 0;
-}
-
-static DEF_SCSI_QCMD(ultrastor_queuecommand)
-
-/* This code must deal with 2 cases:
-
- 1. The command has not been written to the OGM. In this case, set
- the abort flag and return.
-
- 2. The command has been written to the OGM and is stuck somewhere in
- the adapter.
-
- 2a. On a 24F, ask the adapter to abort the command. It will interrupt
- when it does.
-
- 2b. Call the command's done procedure.
-
- */
-
-static int ultrastor_abort(struct scsi_cmnd *SCpnt)
-{
-#if ULTRASTOR_DEBUG & UD_ABORT
- char out[108];
- unsigned char icm_status = 0, ogm_status = 0;
- unsigned int icm_addr = 0, ogm_addr = 0;
-#endif
- unsigned int mscp_index;
- unsigned char old_aborted;
- unsigned long flags;
- void (*done)(struct scsi_cmnd *);
- struct Scsi_Host *host = SCpnt->device->host;
-
- if(config.slot)
- return FAILED; /* Do not attempt an abort for the 24f */
-
- /* Simple consistency checking */
- if(!SCpnt->host_scribble)
- return FAILED;
-
- mscp_index = ((struct mscp *)SCpnt->host_scribble) - config.mscp;
- if (mscp_index >= ULTRASTOR_MAX_CMDS)
- panic("Ux4F aborting invalid MSCP");
-
-#if ULTRASTOR_DEBUG & UD_ABORT
- if (config.slot)
- {
- int port0 = (config.slot << 12) | 0xc80;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(host->host_lock, flags);
- strcpy(out, "OGM %d:%x ICM %d:%x ports: ");
- for (i = 0; i < 16; i++)
- {
- unsigned char p = inb(port0 + i);
- out[28 + i * 3] = "0123456789abcdef"[p >> 4];
- out[29 + i * 3] = "0123456789abcdef"[p & 15];
- out[30 + i * 3] = ' ';
- }
- out[28 + i * 3] = '\n';
- out[29 + i * 3] = 0;
- ogm_status = inb(port0 + 22);
- ogm_addr = (unsigned int)isa_bus_to_virt(inl(port0 + 23));
- icm_status = inb(port0 + 27);
- icm_addr = (unsigned int)isa_bus_to_virt(inl(port0 + 28));
- spin_unlock_irqrestore(host->host_lock, flags);
- }
-
- /* First check to see if an interrupt is pending. I suspect the SiS
- chipset loses interrupts. (I also suspect is mangles data, but
- one bug at a time... */
- if (config.slot ? inb(config.icm_address - 1) == 2 :
- (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
- {
- printk("Ux4F: abort while completed command pending\n");
-
- spin_lock_irqsave(host->host_lock, flags);
- /* FIXME: Ewww... need to think about passing host around properly */
- ultrastor_interrupt(NULL);
- spin_unlock_irqrestore(host->host_lock, flags);
- return SUCCESS;
- }
-#endif
-
- old_aborted = xchgb(DID_ABORT, &config.aborted[mscp_index]);
-
- /* aborted == 0xff is the signal that queuecommand has not yet sent
- the command. It will notice the new abort flag and fail. */
- if (old_aborted == 0xff)
- return SUCCESS;
-
- /* On 24F, send an abort MSCP request. The adapter will interrupt
- and the interrupt handler will call done. */
- if (config.slot && inb(config.ogm_address - 1) == 0)
- {
- unsigned long flags;
-
- spin_lock_irqsave(host->host_lock, flags);
- outl(isa_virt_to_bus(&config.mscp[mscp_index]), config.ogm_address);
- udelay(8);
- outb(0x80, config.ogm_address - 1);
- outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
-#if ULTRASTOR_DEBUG & UD_ABORT
- log_ultrastor_abort(&config, mscp_index);
- printk(out, ogm_status, ogm_addr, icm_status, icm_addr);
-#endif
- spin_unlock_irqrestore(host->host_lock, flags);
- /* FIXME: add a wait for the abort to complete */
- return SUCCESS;
- }
-
-#if ULTRASTOR_DEBUG & UD_ABORT
- log_ultrastor_abort(&config, mscp_index);
-#endif
-
- /* Can't request a graceful abort. Either this is not a 24F or
- the OGM is busy. Don't free the command -- the adapter might
- still be using it. Setting SCint = 0 causes the interrupt
- handler to ignore the command. */
-
- /* FIXME - devices that implement soft resets will still be running
- the command after a bus reset. We would probably rather leave
- the command in the queue. The upper level code will automatically
- leave the command in the active state instead of requeueing it. ERY */
-
-#if ULTRASTOR_DEBUG & UD_ABORT
- if (config.mscp[mscp_index].SCint != SCpnt)
- printk("abort: command mismatch, %p != %p\n",
- config.mscp[mscp_index].SCint, SCpnt);
-#endif
- if (config.mscp[mscp_index].SCint == NULL)
- return FAILED;
-
- if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
- config.mscp[mscp_index].SCint = NULL;
- done = config.mscp[mscp_index].done;
- config.mscp[mscp_index].done = NULL;
- SCpnt->result = DID_ABORT << 16;
-
- /* Take the host lock to guard against scsi layer re-entry */
- done(SCpnt);
-
- /* Need to set a timeout here in case command never completes. */
- return SUCCESS;
-}
-
-static int ultrastor_host_reset(struct scsi_cmnd * SCpnt)
-{
- unsigned long flags;
- int i;
- struct Scsi_Host *host = SCpnt->device->host;
-
-#if (ULTRASTOR_DEBUG & UD_RESET)
- printk("US14F: reset: called\n");
-#endif
-
- if(config.slot)
- return FAILED;
-
- spin_lock_irqsave(host->host_lock, flags);
- /* Reset the adapter and SCSI bus. The SCSI bus reset can be
- inhibited by clearing ultrastor_bus_reset before probe. */
- outb(0xc0, LCL_DOORBELL_INTR(config.doorbell_address));
- if (config.slot)
- {
- outb(0, config.ogm_address - 1);
- outb(0, config.icm_address - 1);
- }
-
-#if ULTRASTOR_MAX_CMDS == 1
- if (config.mscp_busy && config.mscp->done && config.mscp->SCint)
- {
- config.mscp->SCint->result = DID_RESET << 16;
- config.mscp->done(config.mscp->SCint);
- }
- config.mscp->SCint = 0;
-#else
- for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
- {
- if (! (config.mscp_free & (1 << i)) &&
- config.mscp[i].done && config.mscp[i].SCint)
- {
- config.mscp[i].SCint->result = DID_RESET << 16;
- config.mscp[i].done(config.mscp[i].SCint);
- config.mscp[i].done = NULL;
- }
- config.mscp[i].SCint = NULL;
- }
-#endif
-
- /* FIXME - if the device implements soft resets, then the command
- will still be running. ERY
-
- Even bigger deal with new_eh!
- */
-
- memset((unsigned char *)config.aborted, 0, sizeof config.aborted);
-#if ULTRASTOR_MAX_CMDS == 1
- config.mscp_busy = 0;
-#else
- config.mscp_free = ~0;
-#endif
-
- spin_unlock_irqrestore(host->host_lock, flags);
- return SUCCESS;
-
-}
-
-int ultrastor_biosparam(struct scsi_device *sdev, struct block_device *bdev,
- sector_t capacity, int * dkinfo)
-{
- int size = capacity;
- unsigned int s = config.heads * config.sectors;
-
- dkinfo[0] = config.heads;
- dkinfo[1] = config.sectors;
- dkinfo[2] = size / s; /* Ignore partial cylinders */
-#if 0
- if (dkinfo[2] > 1024)
- dkinfo[2] = 1024;
-#endif
- return 0;
-}
-
-static void ultrastor_interrupt(void *dev_id)
-{
- unsigned int status;
-#if ULTRASTOR_MAX_CMDS > 1
- unsigned int mscp_index;
-#endif
- struct mscp *mscp;
- void (*done) (struct scsi_cmnd *);
- struct scsi_cmnd *SCtmp;
-
-#if ULTRASTOR_MAX_CMDS == 1
- mscp = &config.mscp[0];
-#else
- mscp = (struct mscp *)isa_bus_to_virt(inl(config.icm_address));
- mscp_index = mscp - config.mscp;
- if (mscp_index >= ULTRASTOR_MAX_CMDS) {
- printk("Ux4F interrupt: bad MSCP address %x\n", (unsigned int) mscp);
- /* A command has been lost. Reset and report an error
- for all commands. */
- ultrastor_host_reset(dev_id);
- return;
- }
-#endif
-
- /* Clean ICM slot (set ICMINT bit to 0) */
- if (config.slot) {
- unsigned char icm_status = inb(config.icm_address - 1);
-#if ULTRASTOR_DEBUG & (UD_INTERRUPT|UD_ERROR|UD_ABORT)
- if (icm_status != 1 && icm_status != 2)
- printk("US24F: ICM status %x for MSCP %d (%x)\n", icm_status,
- mscp_index, (unsigned int) mscp);
-#endif
- /* The manual says clear interrupt then write 0 to ICM status.
- This seems backwards, but I'll do it anyway. --jfc */
- outb(2, SYS_DOORBELL_INTR(config.doorbell_address));
- outb(0, config.icm_address - 1);
- if (icm_status == 4) {
- printk("UltraStor abort command failed\n");
- return;
- }
- if (icm_status == 3) {
- void (*done)(struct scsi_cmnd *) = mscp->done;
- if (done) {
- mscp->done = NULL;
- mscp->SCint->result = DID_ABORT << 16;
- done(mscp->SCint);
- }
- return;
- }
- } else {
- outb(1, SYS_DOORBELL_INTR(config.doorbell_address));
- }
-
- SCtmp = mscp->SCint;
- mscp->SCint = NULL;
-
- if (!SCtmp)
- {
-#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
- printk("MSCP %d (%x): no command\n", mscp_index, (unsigned int) mscp);
-#endif
-#if ULTRASTOR_MAX_CMDS == 1
- config.mscp_busy = FALSE;
-#else
- set_bit(mscp_index, &config.mscp_free);
-#endif
- config.aborted[mscp_index] = 0;
- return;
- }
-
- /* Save done locally and zero before calling. This is needed as
- once we call done, we may get another command queued before this
- interrupt service routine can return. */
- done = mscp->done;
- mscp->done = NULL;
-
- /* Let the higher levels know that we're done */
- switch (mscp->adapter_status)
- {
- case 0:
- status = DID_OK << 16;
- break;
- case 0x01: /* invalid command */
- case 0x02: /* invalid parameters */
- case 0x03: /* invalid data list */
- default:
- status = DID_ERROR << 16;
- break;
- case 0x84: /* SCSI bus abort */
- status = DID_ABORT << 16;
- break;
- case 0x91:
- status = DID_TIME_OUT << 16;
- break;
- }
-
- SCtmp->result = status | mscp->target_status;
-
- SCtmp->host_scribble = NULL;
-
- /* Free up mscp block for next command */
-#if ULTRASTOR_MAX_CMDS == 1
- config.mscp_busy = FALSE;
-#else
- set_bit(mscp_index, &config.mscp_free);
-#endif
-
-#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
- if (config.aborted[mscp_index])
- printk("Ux4 interrupt: MSCP %d (%x) aborted = %d\n",
- mscp_index, (unsigned int) mscp, config.aborted[mscp_index]);
-#endif
- config.aborted[mscp_index] = 0;
-
- if (done)
- done(SCtmp);
- else
- printk("US14F: interrupt: unexpected interrupt\n");
-
- if (config.slot ? inb(config.icm_address - 1) :
- (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
-#if (ULTRASTOR_DEBUG & UD_MULTI_CMD)
- printk("Ux4F: multiple commands completed\n");
-#else
- ;
-#endif
-
-#if (ULTRASTOR_DEBUG & UD_INTERRUPT)
- printk("USx4F: interrupt: returning\n");
-#endif
-}
-
-static irqreturn_t do_ultrastor_interrupt(int irq, void *dev_id)
-{
- unsigned long flags;
- struct Scsi_Host *dev = dev_id;
-
- spin_lock_irqsave(dev->host_lock, flags);
- ultrastor_interrupt(dev_id);
- spin_unlock_irqrestore(dev->host_lock, flags);
- return IRQ_HANDLED;
-}
-
-MODULE_LICENSE("GPL");
-
-static struct scsi_host_template driver_template = {
- .name = "UltraStor 14F/24F/34F",
- .detect = ultrastor_detect,
- .release = ultrastor_release,
- .info = ultrastor_info,
- .queuecommand = ultrastor_queuecommand,
- .eh_abort_handler = ultrastor_abort,
- .eh_host_reset_handler = ultrastor_host_reset,
- .bios_param = ultrastor_biosparam,
- .can_queue = ULTRASTOR_MAX_CMDS,
- .sg_tablesize = ULTRASTOR_14F_MAX_SG,
- .cmd_per_lun = ULTRASTOR_MAX_CMDS_PER_LUN,
- .unchecked_isa_dma = 1,
- .use_clustering = ENABLE_CLUSTERING,
-};
-#include "scsi_module.c"
diff --git a/drivers/scsi/ultrastor.h b/drivers/scsi/ultrastor.h
deleted file mode 100644
index 165c18b5cf5f..000000000000
--- a/drivers/scsi/ultrastor.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * ultrastor.c (C) 1991 David B. Gentzel
- * Low-level scsi driver for UltraStor 14F
- * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
- * (gentzel@nova.enet.dec.com)
- * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
- * 24F support by John F. Carr (jfc@athena.mit.edu)
- * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
- * Eric Youngdale (eric@tantalus.nrl.navy.mil).
- * Thanks to UltraStor for providing the necessary documentation
- */
-
-#ifndef _ULTRASTOR_H
-#define _ULTRASTOR_H
-
-static int ultrastor_detect(struct scsi_host_template *);
-static const char *ultrastor_info(struct Scsi_Host *shpnt);
-static int ultrastor_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
-static int ultrastor_abort(struct scsi_cmnd *);
-static int ultrastor_host_reset(struct scsi_cmnd *);
-static int ultrastor_biosparam(struct scsi_device *, struct block_device *,
- sector_t, int *);
-
-
-#define ULTRASTOR_14F_MAX_SG 16
-#define ULTRASTOR_24F_MAX_SG 33
-
-#define ULTRASTOR_MAX_CMDS_PER_LUN 5
-#define ULTRASTOR_MAX_CMDS 16
-
-#define ULTRASTOR_24F_PORT 0xC80
-
-
-#ifdef ULTRASTOR_PRIVATE
-
-#define UD_ABORT 0x0001
-#define UD_COMMAND 0x0002
-#define UD_DETECT 0x0004
-#define UD_INTERRUPT 0x0008
-#define UD_RESET 0x0010
-#define UD_MULTI_CMD 0x0020
-#define UD_CSIR 0x0040
-#define UD_ERROR 0x0080
-
-/* #define PORT_OVERRIDE 0x330 */
-
-/* Values for the PRODUCT_ID ports for the 14F */
-#define US14F_PRODUCT_ID_0 0x56
-#define US14F_PRODUCT_ID_1 0x40 /* NOTE: Only upper nibble is used */
-
-#define US24F_PRODUCT_ID_0 0x56
-#define US24F_PRODUCT_ID_1 0x63
-#define US24F_PRODUCT_ID_2 0x02
-
-/* Subversion values */
-#define U14F 0
-#define U34F 1
-
-/* MSCP field values */
-
-/* Opcode */
-#define OP_HOST_ADAPTER 0x1
-#define OP_SCSI 0x2
-#define OP_RESET 0x4
-
-/* Date Transfer Direction */
-#define DTD_SCSI 0x0
-#define DTD_IN 0x1
-#define DTD_OUT 0x2
-#define DTD_NONE 0x3
-
-/* Host Adapter command subcodes */
-#define HA_CMD_INQUIRY 0x1
-#define HA_CMD_SELF_DIAG 0x2
-#define HA_CMD_READ_BUFF 0x3
-#define HA_CMD_WRITE_BUFF 0x4
-
-#endif
-
-#endif
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index deefab3a94d0..ec91bd07f00a 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -259,7 +259,7 @@ static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
struct virtio_scsi_cmd *cmd = buf;
if (cmd->comp)
- complete_all(cmd->comp);
+ complete(cmd->comp);
}
static void virtscsi_ctrl_done(struct virtqueue *vq)
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
deleted file mode 100644
index 409f959845c4..000000000000
--- a/drivers/scsi/wd7000.c
+++ /dev/null
@@ -1,1657 +0,0 @@
-/* $Id: $
- * linux/drivers/scsi/wd7000.c
- *
- * Copyright (C) 1992 Thomas Wuensche
- * closely related to the aha1542 driver from Tommy Thorn
- * ( as close as different hardware allows on a lowlevel-driver :-) )
- *
- * Revised (and renamed) by John Boyd <boyd@cis.ohio-state.edu> to
- * accommodate Eric Youngdale's modifications to scsi.c. Nov 1992.
- *
- * Additional changes to support scatter/gather. Dec. 1992. tw/jb
- *
- * No longer tries to reset SCSI bus at boot (it wasn't working anyway).
- * Rewritten to support multiple host adapters.
- * Miscellaneous cleanup.
- * So far, still doesn't do reset or abort correctly, since I have no idea
- * how to do them with this board (8^(. Jan 1994 jb
- *
- * This driver now supports both of the two standard configurations (per
- * the 3.36 Owner's Manual, my latest reference) by the same method as
- * before; namely, by looking for a BIOS signature. Thus, the location of
- * the BIOS signature determines the board configuration. Until I have
- * time to do something more flexible, users should stick to one of the
- * following:
- *
- * Standard configuration for single-adapter systems:
- * - BIOS at CE00h
- * - I/O base address 350h
- * - IRQ level 15
- * - DMA channel 6
- * Standard configuration for a second adapter in a system:
- * - BIOS at C800h
- * - I/O base address 330h
- * - IRQ level 11
- * - DMA channel 5
- *
- * Anyone who can recompile the kernel is welcome to add others as need
- * arises, but unpredictable results may occur if there are conflicts.
- * In any event, if there are multiple adapters in a system, they MUST
- * use different I/O bases, IRQ levels, and DMA channels, since they will be
- * indistinguishable (and in direct conflict) otherwise.
- *
- * As a point of information, the NO_OP command toggles the CMD_RDY bit
- * of the status port, and this fact could be used as a test for the I/O
- * base address (or more generally, board detection). There is an interrupt
- * status port, so IRQ probing could also be done. I suppose the full
- * DMA diagnostic could be used to detect the DMA channel being used. I
- * haven't done any of this, though, because I think there's too much of
- * a chance that such explorations could be destructive, if some other
- * board's resources are used inadvertently. So, call me a wimp, but I
- * don't want to try it. The only kind of exploration I trust is memory
- * exploration, since it's more certain that reading memory won't be
- * destructive.
- *
- * More to my liking would be a LILO boot command line specification, such
- * as is used by the aha152x driver (and possibly others). I'll look into
- * it, as I have time...
- *
- * I get mail occasionally from people who either are using or are
- * considering using a WD7000 with Linux. There is a variety of
- * nomenclature describing WD7000's. To the best of my knowledge, the
- * following is a brief summary (from an old WD doc - I don't work for
- * them or anything like that):
- *
- * WD7000-FASST2: This is a WD7000 board with the real-mode SST ROM BIOS
- * installed. Last I heard, the BIOS was actually done by Columbia
- * Data Products. The BIOS is only used by this driver (and thus
- * by Linux) to identify the board; none of it can be executed under
- * Linux.
- *
- * WD7000-ASC: This is the original adapter board, with or without BIOS.
- * The board uses a WD33C93 or WD33C93A SBIC, which in turn is
- * controlled by an onboard Z80 processor. The board interface
- * visible to the host CPU is defined effectively by the Z80's
- * firmware, and it is this firmware's revision level that is
- * determined and reported by this driver. (The version of the
- * on-board BIOS is of no interest whatsoever.) The host CPU has
- * no access to the SBIC; hence the fact that it is a WD33C93 is
- * also of no interest to this driver.
- *
- * WD7000-AX:
- * WD7000-MX:
- * WD7000-EX: These are newer versions of the WD7000-ASC. The -ASC is
- * largely built from discrete components; these boards use more
- * integration. The -AX is an ISA bus board (like the -ASC),
- * the -MX is an MCA (i.e., PS/2) bus board), and the -EX is an
- * EISA bus board.
- *
- * At the time of my documentation, the -?X boards were "future" products,
- * and were not yet available. However, I vaguely recall that Thomas
- * Wuensche had an -AX, so I believe at least it is supported by this
- * driver. I have no personal knowledge of either -MX or -EX boards.
- *
- * P.S. Just recently, I've discovered (directly from WD and Future
- * Domain) that all but the WD7000-EX have been out of production for
- * two years now. FD has production rights to the 7000-EX, and are
- * producing it under a new name, and with a new BIOS. If anyone has
- * one of the FD boards, it would be nice to come up with a signature
- * for it.
- * J.B. Jan 1994.
- *
- *
- * Revisions by Miroslav Zagorac <zaga@fly.cc.fer.hr>
- *
- * 08/24/1996.
- *
- * Enhancement for wd7000_detect function has been made, so you don't have
- * to enter BIOS ROM address in initialisation data (see struct Config).
- * We cannot detect IRQ, DMA and I/O base address for now, so we have to
- * enter them as arguments while wd_7000 is detected. If someone has IRQ,
- * DMA or I/O base address set to some other value, he can enter them in
- * configuration without any problem. Also I wrote a function wd7000_setup,
- * so now you can enter WD-7000 definition as kernel arguments,
- * as in lilo.conf:
- *
- * append="wd7000=IRQ,DMA,IO"
- *
- * PS: If card BIOS ROM is disabled, function wd7000_detect now will recognize
- * adapter, unlike the old one. Anyway, BIOS ROM from WD7000 adapter is
- * useless for Linux. B^)
- *
- *
- * 09/06/1996.
- *
- * Autodetecting of I/O base address from wd7000_detect function is removed,
- * some little bugs removed, etc...
- *
- * Thanks to Roger Scott for driver debugging.
- *
- * 06/07/1997
- *
- * Added support for /proc file system (/proc/scsi/wd7000/[0...] files).
- * Now, driver can handle hard disks with capacity >1GB.
- *
- * 01/15/1998
- *
- * Added support for BUS_ON and BUS_OFF parameters in config line.
- * Miscellaneous cleanup.
- *
- * 03/01/1998
- *
- * WD7000 driver now work on kernels >= 2.1.x
- *
- *
- * 12/31/2001 - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
- *
- * use host->host_lock, not io_request_lock, cleanups
- *
- * 2002/10/04 - Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * Use dev_id for interrupts, kill __func__ pasting
- * Add a lock for the scb pool, clean up all other cli/sti usage stuff
- * Use the adapter lock for the other places we had the cli's
- *
- * 2002/10/06 - Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * Switch to new style error handling
- * Clean up delay to udelay, and yielding sleeps
- * Make host reset actually reset the card
- * Make everything static
- *
- * 2003/02/12 - Christoph Hellwig <hch@infradead.org>
- *
- * Cleaned up host template definition
- * Removed now obsolete wd7000.h
- */
-
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/ioport.h>
-#include <linux/proc_fs.h>
-#include <linux/blkdev.h>
-#include <linux/init.h>
-#include <linux/stat.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsicam.h>
-
-
-#undef WD7000_DEBUG /* general debug */
-#ifdef WD7000_DEBUG
-#define dprintk printk
-#else
-#define dprintk no_printk
-#endif
-
-/*
- * Mailbox structure sizes.
- * I prefer to keep the number of ICMBs much larger than the number of
- * OGMBs. OGMBs are used very quickly by the driver to start one or
- * more commands, while ICMBs are used by the host adapter per command.
- */
-#define OGMB_CNT 16
-#define ICMB_CNT 32
-
-/*
- * Scb's are shared by all active adapters. So, if they all become busy,
- * callers may be made to wait in alloc_scbs for them to free. That can
- * be avoided by setting MAX_SCBS to NUM_CONFIG * WD7000_Q. If you'd
- * rather conserve memory, use a smaller number (> 0, of course) - things
- * will should still work OK.
- */
-#define MAX_SCBS 32
-
-/*
- * In this version, sg_tablesize now defaults to WD7000_SG, and will
- * be set to SG_NONE for older boards. This is the reverse of the
- * previous default, and was changed so that the driver-level
- * scsi_host_template would reflect the driver's support for scatter/
- * gather.
- *
- * Also, it has been reported that boards at Revision 6 support scatter/
- * gather, so the new definition of an "older" board has been changed
- * accordingly.
- */
-#define WD7000_Q 16
-#define WD7000_SG 16
-
-
-/*
- * WD7000-specific mailbox structure
- *
- */
-typedef volatile struct mailbox {
- unchar status;
- unchar scbptr[3]; /* SCSI-style - MSB first (big endian) */
-} Mailbox;
-
-/*
- * This structure should contain all per-adapter global data. I.e., any
- * new global per-adapter data should put in here.
- */
-typedef struct adapter {
- struct Scsi_Host *sh; /* Pointer to Scsi_Host structure */
- int iobase; /* This adapter's I/O base address */
- int irq; /* This adapter's IRQ level */
- int dma; /* This adapter's DMA channel */
- int int_counter; /* This adapter's interrupt counter */
- int bus_on; /* This adapter's BUS_ON time */
- int bus_off; /* This adapter's BUS_OFF time */
- struct { /* This adapter's mailboxes */
- Mailbox ogmb[OGMB_CNT]; /* Outgoing mailboxes */
- Mailbox icmb[ICMB_CNT]; /* Incoming mailboxes */
- } mb;
- int next_ogmb; /* to reduce contention at mailboxes */
- unchar control; /* shadows CONTROL port value */
- unchar rev1, rev2; /* filled in by wd7000_revision */
-} Adapter;
-
-/*
- * (linear) base address for ROM BIOS
- */
-static const long wd7000_biosaddr[] = {
- 0xc0000, 0xc2000, 0xc4000, 0xc6000, 0xc8000, 0xca000, 0xcc000, 0xce000,
- 0xd0000, 0xd2000, 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0xde000
-};
-#define NUM_ADDRS ARRAY_SIZE(wd7000_biosaddr)
-
-static const unsigned short wd7000_iobase[] = {
- 0x0300, 0x0308, 0x0310, 0x0318, 0x0320, 0x0328, 0x0330, 0x0338,
- 0x0340, 0x0348, 0x0350, 0x0358, 0x0360, 0x0368, 0x0370, 0x0378,
- 0x0380, 0x0388, 0x0390, 0x0398, 0x03a0, 0x03a8, 0x03b0, 0x03b8,
- 0x03c0, 0x03c8, 0x03d0, 0x03d8, 0x03e0, 0x03e8, 0x03f0, 0x03f8
-};
-#define NUM_IOPORTS ARRAY_SIZE(wd7000_iobase)
-
-static const short wd7000_irq[] = { 3, 4, 5, 7, 9, 10, 11, 12, 14, 15 };
-#define NUM_IRQS ARRAY_SIZE(wd7000_irq)
-
-static const short wd7000_dma[] = { 5, 6, 7 };
-#define NUM_DMAS ARRAY_SIZE(wd7000_dma)
-
-/*
- * The following is set up by wd7000_detect, and used thereafter for
- * proc and other global ookups
- */
-
-#define UNITS 8
-static struct Scsi_Host *wd7000_host[UNITS];
-
-#define BUS_ON 64 /* x 125ns = 8000ns (BIOS default) */
-#define BUS_OFF 15 /* x 125ns = 1875ns (BIOS default) */
-
-/*
- * Standard Adapter Configurations - used by wd7000_detect
- */
-typedef struct {
- short irq; /* IRQ level */
- short dma; /* DMA channel */
- unsigned iobase; /* I/O base address */
- short bus_on; /* Time that WD7000 spends on the AT-bus when */
- /* transferring data. BIOS default is 8000ns. */
- short bus_off; /* Time that WD7000 spends OFF THE BUS after */
- /* while it is transferring data. */
- /* BIOS default is 1875ns */
-} Config;
-
-/*
- * Add here your configuration...
- */
-static Config configs[] = {
- {15, 6, 0x350, BUS_ON, BUS_OFF}, /* defaults for single adapter */
- {11, 5, 0x320, BUS_ON, BUS_OFF}, /* defaults for second adapter */
- {7, 6, 0x350, BUS_ON, BUS_OFF}, /* My configuration (Zaga) */
- {-1, -1, 0x0, BUS_ON, BUS_OFF} /* Empty slot */
-};
-#define NUM_CONFIGS ARRAY_SIZE(configs)
-
-/*
- * The following list defines strings to look for in the BIOS that identify
- * it as the WD7000-FASST2 SST BIOS. I suspect that something should be
- * added for the Future Domain version.
- */
-typedef struct signature {
- const char *sig; /* String to look for */
- unsigned long ofs; /* offset from BIOS base address */
- unsigned len; /* length of string */
-} Signature;
-
-static const Signature signatures[] = {
- {"SSTBIOS", 0x0000d, 7} /* "SSTBIOS" @ offset 0x0000d */
-};
-#define NUM_SIGNATURES ARRAY_SIZE(signatures)
-
-
-/*
- * I/O Port Offsets and Bit Definitions
- * 4 addresses are used. Those not defined here are reserved.
- */
-#define ASC_STAT 0 /* Status, Read */
-#define ASC_COMMAND 0 /* Command, Write */
-#define ASC_INTR_STAT 1 /* Interrupt Status, Read */
-#define ASC_INTR_ACK 1 /* Acknowledge, Write */
-#define ASC_CONTROL 2 /* Control, Write */
-
-/*
- * ASC Status Port
- */
-#define INT_IM 0x80 /* Interrupt Image Flag */
-#define CMD_RDY 0x40 /* Command Port Ready */
-#define CMD_REJ 0x20 /* Command Port Byte Rejected */
-#define ASC_INIT 0x10 /* ASC Initialized Flag */
-#define ASC_STATMASK 0xf0 /* The lower 4 Bytes are reserved */
-
-/*
- * COMMAND opcodes
- *
- * Unfortunately, I have no idea how to properly use some of these commands,
- * as the OEM manual does not make it clear. I have not been able to use
- * enable/disable unsolicited interrupts or the reset commands with any
- * discernible effect whatsoever. I think they may be related to certain
- * ICB commands, but again, the OEM manual doesn't make that clear.
- */
-#define NO_OP 0 /* NO-OP toggles CMD_RDY bit in ASC_STAT */
-#define INITIALIZATION 1 /* initialization (10 bytes) */
-#define DISABLE_UNS_INTR 2 /* disable unsolicited interrupts */
-#define ENABLE_UNS_INTR 3 /* enable unsolicited interrupts */
-#define INTR_ON_FREE_OGMB 4 /* interrupt on free OGMB */
-#define SOFT_RESET 5 /* SCSI bus soft reset */
-#define HARD_RESET_ACK 6 /* SCSI bus hard reset acknowledge */
-#define START_OGMB 0x80 /* start command in OGMB (n) */
-#define SCAN_OGMBS 0xc0 /* start multiple commands, signature (n) */
- /* where (n) = lower 6 bits */
-/*
- * For INITIALIZATION:
- */
-typedef struct initCmd {
- unchar op; /* command opcode (= 1) */
- unchar ID; /* Adapter's SCSI ID */
- unchar bus_on; /* Bus on time, x 125ns (see below) */
- unchar bus_off; /* Bus off time, "" "" */
- unchar rsvd; /* Reserved */
- unchar mailboxes[3]; /* Address of Mailboxes, MSB first */
- unchar ogmbs; /* Number of outgoing MBs, max 64, 0,1 = 1 */
- unchar icmbs; /* Number of incoming MBs, "" "" */
-} InitCmd;
-
-/*
- * Interrupt Status Port - also returns diagnostic codes at ASC reset
- *
- * if msb is zero, the lower bits are diagnostic status
- * Diagnostics:
- * 01 No diagnostic error occurred
- * 02 RAM failure
- * 03 FIFO R/W failed
- * 04 SBIC register read/write failed
- * 05 Initialization D-FF failed
- * 06 Host IRQ D-FF failed
- * 07 ROM checksum error
- * Interrupt status (bitwise):
- * 10NNNNNN outgoing mailbox NNNNNN is free
- * 11NNNNNN incoming mailbox NNNNNN needs service
- */
-#define MB_INTR 0xC0 /* Mailbox Service possible/required */
-#define IMB_INTR 0x40 /* 1 Incoming / 0 Outgoing */
-#define MB_MASK 0x3f /* mask for mailbox number */
-
-/*
- * CONTROL port bits
- */
-#define INT_EN 0x08 /* Interrupt Enable */
-#define DMA_EN 0x04 /* DMA Enable */
-#define SCSI_RES 0x02 /* SCSI Reset */
-#define ASC_RES 0x01 /* ASC Reset */
-
-/*
- * Driver data structures:
- * - mb and scbs are required for interfacing with the host adapter.
- * An SCB has extra fields not visible to the adapter; mb's
- * _cannot_ do this, since the adapter assumes they are contiguous in
- * memory, 4 bytes each, with ICMBs following OGMBs, and uses this fact
- * to access them.
- * - An icb is for host-only (non-SCSI) commands. ICBs are 16 bytes each;
- * the additional bytes are used only by the driver.
- * - For now, a pool of SCBs are kept in global storage by this driver,
- * and are allocated and freed as needed.
- *
- * The 7000-FASST2 marks OGMBs empty as soon as it has _started_ a command,
- * not when it has finished. Since the SCB must be around for completion,
- * problems arise when SCBs correspond to OGMBs, which may be reallocated
- * earlier (or delayed unnecessarily until a command completes).
- * Mailboxes are used as transient data structures, simply for
- * carrying SCB addresses to/from the 7000-FASST2.
- *
- * Note also since SCBs are not "permanently" associated with mailboxes,
- * there is no need to keep a global list of scsi_cmnd pointers indexed
- * by OGMB. Again, SCBs reference their scsi_cmnds directly, so mailbox
- * indices need not be involved.
- */
-
-/*
- * WD7000-specific scatter/gather element structure
- */
-typedef struct sgb {
- unchar len[3];
- unchar ptr[3]; /* Also SCSI-style - MSB first */
-} Sgb;
-
-typedef struct scb { /* Command Control Block 5.4.1 */
- unchar op; /* Command Control Block Operation Code */
- unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
- /* Outbound data transfer, length is checked */
- /* Inbound data transfer, length is checked */
- /* Logical Unit Number */
- unchar cdb[12]; /* SCSI Command Block */
- volatile unchar status; /* SCSI Return Status */
- volatile unchar vue; /* Vendor Unique Error Code */
- unchar maxlen[3]; /* Maximum Data Transfer Length */
- unchar dataptr[3]; /* SCSI Data Block Pointer */
- unchar linkptr[3]; /* Next Command Link Pointer */
- unchar direc; /* Transfer Direction */
- unchar reserved2[6]; /* SCSI Command Descriptor Block */
- /* end of hardware SCB */
- struct scsi_cmnd *SCpnt;/* scsi_cmnd using this SCB */
- Sgb sgb[WD7000_SG]; /* Scatter/gather list for this SCB */
- Adapter *host; /* host adapter */
- struct scb *next; /* for lists of scbs */
-} Scb;
-
-/*
- * This driver is written to allow host-only commands to be executed.
- * These use a 16-byte block called an ICB. The format is extended by the
- * driver to 18 bytes, to support the status returned in the ICMB and
- * an execution phase code.
- *
- * There are other formats besides these; these are the ones I've tried
- * to use. Formats for some of the defined ICB opcodes are not defined
- * (notably, get/set unsolicited interrupt status) in my copy of the OEM
- * manual, and others are ambiguous/hard to follow.
- */
-#define ICB_OP_MASK 0x80 /* distinguishes scbs from icbs */
-#define ICB_OP_OPEN_RBUF 0x80 /* open receive buffer */
-#define ICB_OP_RECV_CMD 0x81 /* receive command from initiator */
-#define ICB_OP_RECV_DATA 0x82 /* receive data from initiator */
-#define ICB_OP_RECV_SDATA 0x83 /* receive data with status from init. */
-#define ICB_OP_SEND_DATA 0x84 /* send data with status to initiator */
-#define ICB_OP_SEND_STAT 0x86 /* send command status to initiator */
- /* 0x87 is reserved */
-#define ICB_OP_READ_INIT 0x88 /* read initialization bytes */
-#define ICB_OP_READ_ID 0x89 /* read adapter's SCSI ID */
-#define ICB_OP_SET_UMASK 0x8A /* set unsolicited interrupt mask */
-#define ICB_OP_GET_UMASK 0x8B /* read unsolicited interrupt mask */
-#define ICB_OP_GET_REVISION 0x8C /* read firmware revision level */
-#define ICB_OP_DIAGNOSTICS 0x8D /* execute diagnostics */
-#define ICB_OP_SET_EPARMS 0x8E /* set execution parameters */
-#define ICB_OP_GET_EPARMS 0x8F /* read execution parameters */
-
-typedef struct icbRecvCmd {
- unchar op;
- unchar IDlun; /* Initiator SCSI ID/lun */
- unchar len[3]; /* command buffer length */
- unchar ptr[3]; /* command buffer address */
- unchar rsvd[7]; /* reserved */
- volatile unchar vue; /* vendor-unique error code */
- volatile unchar status; /* returned (icmb) status */
- volatile unchar phase; /* used by interrupt handler */
-} IcbRecvCmd;
-
-typedef struct icbSendStat {
- unchar op;
- unchar IDlun; /* Target SCSI ID/lun */
- unchar stat; /* (outgoing) completion status byte 1 */
- unchar rsvd[12]; /* reserved */
- volatile unchar vue; /* vendor-unique error code */
- volatile unchar status; /* returned (icmb) status */
- volatile unchar phase; /* used by interrupt handler */
-} IcbSendStat;
-
-typedef struct icbRevLvl {
- unchar op;
- volatile unchar primary; /* primary revision level (returned) */
- volatile unchar secondary; /* secondary revision level (returned) */
- unchar rsvd[12]; /* reserved */
- volatile unchar vue; /* vendor-unique error code */
- volatile unchar status; /* returned (icmb) status */
- volatile unchar phase; /* used by interrupt handler */
-} IcbRevLvl;
-
-typedef struct icbUnsMask { /* I'm totally guessing here */
- unchar op;
- volatile unchar mask[14]; /* mask bits */
-#if 0
- unchar rsvd[12]; /* reserved */
-#endif
- volatile unchar vue; /* vendor-unique error code */
- volatile unchar status; /* returned (icmb) status */
- volatile unchar phase; /* used by interrupt handler */
-} IcbUnsMask;
-
-typedef struct icbDiag {
- unchar op;
- unchar type; /* diagnostics type code (0-3) */
- unchar len[3]; /* buffer length */
- unchar ptr[3]; /* buffer address */
- unchar rsvd[7]; /* reserved */
- volatile unchar vue; /* vendor-unique error code */
- volatile unchar status; /* returned (icmb) status */
- volatile unchar phase; /* used by interrupt handler */
-} IcbDiag;
-
-#define ICB_DIAG_POWERUP 0 /* Power-up diags only */
-#define ICB_DIAG_WALKING 1 /* walking 1's pattern */
-#define ICB_DIAG_DMA 2 /* DMA - system memory diags */
-#define ICB_DIAG_FULL 3 /* do both 1 & 2 */
-
-typedef struct icbParms {
- unchar op;
- unchar rsvd1; /* reserved */
- unchar len[3]; /* parms buffer length */
- unchar ptr[3]; /* parms buffer address */
- unchar idx[2]; /* index (MSB-LSB) */
- unchar rsvd2[5]; /* reserved */
- volatile unchar vue; /* vendor-unique error code */
- volatile unchar status; /* returned (icmb) status */
- volatile unchar phase; /* used by interrupt handler */
-} IcbParms;
-
-typedef struct icbAny {
- unchar op;
- unchar data[14]; /* format-specific data */
- volatile unchar vue; /* vendor-unique error code */
- volatile unchar status; /* returned (icmb) status */
- volatile unchar phase; /* used by interrupt handler */
-} IcbAny;
-
-typedef union icb {
- unchar op; /* ICB opcode */
- IcbRecvCmd recv_cmd; /* format for receive command */
- IcbSendStat send_stat; /* format for send status */
- IcbRevLvl rev_lvl; /* format for get revision level */
- IcbDiag diag; /* format for execute diagnostics */
- IcbParms eparms; /* format for get/set exec parms */
- IcbAny icb; /* generic format */
- unchar data[18];
-} Icb;
-
-#ifdef MODULE
-static char *wd7000;
-module_param(wd7000, charp, 0);
-#endif
-
-/*
- * Driver SCB structure pool.
- *
- * The SCBs declared here are shared by all host adapters; hence, this
- * structure is not part of the Adapter structure.
- */
-static Scb scbs[MAX_SCBS];
-static Scb *scbfree; /* free list */
-static int freescbs = MAX_SCBS; /* free list counter */
-static spinlock_t scbpool_lock; /* guards the scb free list and count */
-
-/*
- * END of data/declarations - code follows.
- */
-static void __init setup_error(char *mesg, int *ints)
-{
- if (ints[0] == 3)
- printk(KERN_ERR "wd7000_setup: \"wd7000=%d,%d,0x%x\" -> %s\n", ints[1], ints[2], ints[3], mesg);
- else if (ints[0] == 4)
- printk(KERN_ERR "wd7000_setup: \"wd7000=%d,%d,0x%x,%d\" -> %s\n", ints[1], ints[2], ints[3], ints[4], mesg);
- else
- printk(KERN_ERR "wd7000_setup: \"wd7000=%d,%d,0x%x,%d,%d\" -> %s\n", ints[1], ints[2], ints[3], ints[4], ints[5], mesg);
-}
-
-
-/*
- * Note: You can now set these options from the kernel's "command line".
- * The syntax is:
- *
- * wd7000=<IRQ>,<DMA>,<IO>[,<BUS_ON>[,<BUS_OFF>]]
- *
- * , where BUS_ON and BUS_OFF are in nanoseconds. BIOS default values
- * are 8000ns for BUS_ON and 1875ns for BUS_OFF.
- * eg:
- * wd7000=7,6,0x350
- *
- * will configure the driver for a WD-7000 controller
- * using IRQ 15 with a DMA channel 6, at IO base address 0x350.
- */
-static int __init wd7000_setup(char *str)
-{
- static short wd7000_card_num; /* .bss will zero this */
- short i;
- int ints[6];
-
- (void) get_options(str, ARRAY_SIZE(ints), ints);
-
- if (wd7000_card_num >= NUM_CONFIGS) {
- printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __func__);
- return 0;
- }
-
- if ((ints[0] < 3) || (ints[0] > 5)) {
- printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __func__);
- } else {
- for (i = 0; i < NUM_IRQS; i++)
- if (ints[1] == wd7000_irq[i])
- break;
-
- if (i == NUM_IRQS) {
- setup_error("invalid IRQ.", ints);
- return 0;
- } else
- configs[wd7000_card_num].irq = ints[1];
-
- for (i = 0; i < NUM_DMAS; i++)
- if (ints[2] == wd7000_dma[i])
- break;
-
- if (i == NUM_DMAS) {
- setup_error("invalid DMA channel.", ints);
- return 0;
- } else
- configs[wd7000_card_num].dma = ints[2];
-
- for (i = 0; i < NUM_IOPORTS; i++)
- if (ints[3] == wd7000_iobase[i])
- break;
-
- if (i == NUM_IOPORTS) {
- setup_error("invalid I/O base address.", ints);
- return 0;
- } else
- configs[wd7000_card_num].iobase = ints[3];
-
- if (ints[0] > 3) {
- if ((ints[4] < 500) || (ints[4] > 31875)) {
- setup_error("BUS_ON value is out of range (500" " to 31875 nanoseconds)!", ints);
- configs[wd7000_card_num].bus_on = BUS_ON;
- } else
- configs[wd7000_card_num].bus_on = ints[4] / 125;
- } else
- configs[wd7000_card_num].bus_on = BUS_ON;
-
- if (ints[0] > 4) {
- if ((ints[5] < 500) || (ints[5] > 31875)) {
- setup_error("BUS_OFF value is out of range (500" " to 31875 nanoseconds)!", ints);
- configs[wd7000_card_num].bus_off = BUS_OFF;
- } else
- configs[wd7000_card_num].bus_off = ints[5] / 125;
- } else
- configs[wd7000_card_num].bus_off = BUS_OFF;
-
- if (wd7000_card_num) {
- for (i = 0; i < (wd7000_card_num - 1); i++) {
- int j = i + 1;
-
- for (; j < wd7000_card_num; j++)
- if (configs[i].irq == configs[j].irq) {
- setup_error("duplicated IRQ!", ints);
- return 0;
- }
- if (configs[i].dma == configs[j].dma) {
- setup_error("duplicated DMA " "channel!", ints);
- return 0;
- }
- if (configs[i].iobase == configs[j].iobase) {
- setup_error("duplicated I/O " "base address!", ints);
- return 0;
- }
- }
- }
-
- dprintk(KERN_DEBUG "wd7000_setup: IRQ=%d, DMA=%d, I/O=0x%x, "
- "BUS_ON=%dns, BUS_OFF=%dns\n", configs[wd7000_card_num].irq, configs[wd7000_card_num].dma, configs[wd7000_card_num].iobase, configs[wd7000_card_num].bus_on * 125, configs[wd7000_card_num].bus_off * 125);
-
- wd7000_card_num++;
- }
- return 1;
-}
-
-__setup("wd7000=", wd7000_setup);
-
-static inline void any2scsi(unchar * scsi, int any)
-{
- *scsi++ = (unsigned)any >> 16;
- *scsi++ = (unsigned)any >> 8;
- *scsi++ = any;
-}
-
-static inline int scsi2int(unchar * scsi)
-{
- return (scsi[0] << 16) | (scsi[1] << 8) | scsi[2];
-}
-
-static inline void wd7000_enable_intr(Adapter * host)
-{
- host->control |= INT_EN;
- outb(host->control, host->iobase + ASC_CONTROL);
-}
-
-
-static inline void wd7000_enable_dma(Adapter * host)
-{
- unsigned long flags;
- host->control |= DMA_EN;
- outb(host->control, host->iobase + ASC_CONTROL);
-
- flags = claim_dma_lock();
- set_dma_mode(host->dma, DMA_MODE_CASCADE);
- enable_dma(host->dma);
- release_dma_lock(flags);
-
-}
-
-
-#define WAITnexttimeout 200 /* 2 seconds */
-
-static inline short WAIT(unsigned port, unsigned mask, unsigned allof, unsigned noneof)
-{
- unsigned WAITbits;
- unsigned long WAITtimeout = jiffies + WAITnexttimeout;
-
- while (time_before_eq(jiffies, WAITtimeout)) {
- WAITbits = inb(port) & mask;
-
- if (((WAITbits & allof) == allof) && ((WAITbits & noneof) == 0))
- return (0);
- }
-
- return (1);
-}
-
-
-static inline int command_out(Adapter * host, unchar * cmd, int len)
-{
- if (!WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
- while (len--) {
- do {
- outb(*cmd, host->iobase + ASC_COMMAND);
- WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0);
- } while (inb(host->iobase + ASC_STAT) & CMD_REJ);
-
- cmd++;
- }
-
- return (1);
- }
-
- printk(KERN_WARNING "wd7000 command_out: WAIT failed(%d)\n", len + 1);
-
- return (0);
-}
-
-
-/*
- * This version of alloc_scbs is in preparation for supporting multiple
- * commands per lun and command chaining, by queueing pending commands.
- * We will need to allocate Scbs in blocks since they will wait to be
- * executed so there is the possibility of deadlock otherwise.
- * Also, to keep larger requests from being starved by smaller requests,
- * we limit access to this routine with an internal busy flag, so that
- * the satisfiability of a request is not dependent on the size of the
- * request.
- */
-static inline Scb *alloc_scbs(struct Scsi_Host *host, int needed)
-{
- Scb *scb, *p = NULL;
- unsigned long flags;
- unsigned long timeout = jiffies + WAITnexttimeout;
- unsigned long now;
- int i;
-
- if (needed <= 0)
- return (NULL); /* sanity check */
-
- spin_unlock_irq(host->host_lock);
-
- retry:
- while (freescbs < needed) {
- timeout = jiffies + WAITnexttimeout;
- do {
- /* FIXME: can we actually just yield here ?? */
- for (now = jiffies; now == jiffies;)
- cpu_relax(); /* wait a jiffy */
- } while (freescbs < needed && time_before_eq(jiffies, timeout));
- /*
- * If we get here with enough free Scbs, we can take them.
- * Otherwise, we timed out and didn't get enough.
- */
- if (freescbs < needed) {
- printk(KERN_ERR "wd7000: can't get enough free SCBs.\n");
- return (NULL);
- }
- }
-
- /* Take the lock, then check we didn't get beaten, if so try again */
- spin_lock_irqsave(&scbpool_lock, flags);
- if (freescbs < needed) {
- spin_unlock_irqrestore(&scbpool_lock, flags);
- goto retry;
- }
-
- scb = scbfree;
- freescbs -= needed;
- for (i = 0; i < needed; i++) {
- p = scbfree;
- scbfree = p->next;
- }
- p->next = NULL;
-
- spin_unlock_irqrestore(&scbpool_lock, flags);
-
- spin_lock_irq(host->host_lock);
- return (scb);
-}
-
-
-static inline void free_scb(Scb * scb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&scbpool_lock, flags);
-
- memset(scb, 0, sizeof(Scb));
- scb->next = scbfree;
- scbfree = scb;
- freescbs++;
-
- spin_unlock_irqrestore(&scbpool_lock, flags);
-}
-
-
-static inline void init_scbs(void)
-{
- int i;
-
- spin_lock_init(&scbpool_lock);
-
- /* This is only ever called before the SCB pool is active */
-
- scbfree = &(scbs[0]);
- memset(scbs, 0, sizeof(scbs));
- for (i = 0; i < MAX_SCBS - 1; i++) {
- scbs[i].next = &(scbs[i + 1]);
- scbs[i].SCpnt = NULL;
- }
- scbs[MAX_SCBS - 1].next = NULL;
- scbs[MAX_SCBS - 1].SCpnt = NULL;
-}
-
-
-static int mail_out(Adapter * host, Scb * scbptr)
-/*
- * Note: this can also be used for ICBs; just cast to the parm type.
- */
-{
- int i, ogmb;
- unsigned long flags;
- unchar start_ogmb;
- Mailbox *ogmbs = host->mb.ogmb;
- int *next_ogmb = &(host->next_ogmb);
-
- dprintk("wd7000_mail_out: 0x%06lx", (long) scbptr);
-
- /* We first look for a free outgoing mailbox */
- spin_lock_irqsave(host->sh->host_lock, flags);
- ogmb = *next_ogmb;
- for (i = 0; i < OGMB_CNT; i++) {
- if (ogmbs[ogmb].status == 0) {
- dprintk(" using OGMB 0x%x", ogmb);
- ogmbs[ogmb].status = 1;
- any2scsi((unchar *) ogmbs[ogmb].scbptr, (int) scbptr);
-
- *next_ogmb = (ogmb + 1) % OGMB_CNT;
- break;
- } else
- ogmb = (ogmb + 1) % OGMB_CNT;
- }
- spin_unlock_irqrestore(host->sh->host_lock, flags);
-
- dprintk(", scb is 0x%06lx", (long) scbptr);
-
- if (i >= OGMB_CNT) {
- /*
- * Alternatively, we might issue the "interrupt on free OGMB",
- * and sleep, but it must be ensured that it isn't the init
- * task running. Instead, this version assumes that the caller
- * will be persistent, and try again. Since it's the adapter
- * that marks OGMB's free, waiting even with interrupts off
- * should work, since they are freed very quickly in most cases.
- */
- dprintk(", no free OGMBs.\n");
- return (0);
- }
-
- wd7000_enable_intr(host);
-
- start_ogmb = START_OGMB | ogmb;
- command_out(host, &start_ogmb, 1);
-
- dprintk(", awaiting interrupt.\n");
-
- return (1);
-}
-
-
-static int make_code(unsigned hosterr, unsigned scsierr)
-{
-#ifdef WD7000_DEBUG
- int in_error = hosterr;
-#endif
-
- switch ((hosterr >> 8) & 0xff) {
- case 0: /* Reserved */
- hosterr = DID_ERROR;
- break;
- case 1: /* Command Complete, no errors */
- hosterr = DID_OK;
- break;
- case 2: /* Command complete, error logged in scb status (scsierr) */
- hosterr = DID_OK;
- break;
- case 4: /* Command failed to complete - timeout */
- hosterr = DID_TIME_OUT;
- break;
- case 5: /* Command terminated; Bus reset by external device */
- hosterr = DID_RESET;
- break;
- case 6: /* Unexpected Command Received w/ host as target */
- hosterr = DID_BAD_TARGET;
- break;
- case 80: /* Unexpected Reselection */
- case 81: /* Unexpected Selection */
- hosterr = DID_BAD_INTR;
- break;
- case 82: /* Abort Command Message */
- hosterr = DID_ABORT;
- break;
- case 83: /* SCSI Bus Software Reset */
- case 84: /* SCSI Bus Hardware Reset */
- hosterr = DID_RESET;
- break;
- default: /* Reserved */
- hosterr = DID_ERROR;
- }
-#ifdef WD7000_DEBUG
- if (scsierr || hosterr)
- dprintk("\nSCSI command error: SCSI 0x%02x host 0x%04x return %d\n", scsierr, in_error, hosterr);
-#endif
- return (scsierr | (hosterr << 16));
-}
-
-#define wd7000_intr_ack(host) outb (0, host->iobase + ASC_INTR_ACK)
-
-
-static irqreturn_t wd7000_intr(int irq, void *dev_id)
-{
- Adapter *host = (Adapter *) dev_id;
- int flag, icmb, errstatus, icmb_status;
- int host_error, scsi_error;
- Scb *scb; /* for SCSI commands */
- IcbAny *icb; /* for host commands */
- struct scsi_cmnd *SCpnt;
- Mailbox *icmbs = host->mb.icmb;
- unsigned long flags;
-
- spin_lock_irqsave(host->sh->host_lock, flags);
- host->int_counter++;
-
- dprintk("wd7000_intr: irq = %d, host = 0x%06lx\n", irq, (long) host);
-
- flag = inb(host->iobase + ASC_INTR_STAT);
-
- dprintk("wd7000_intr: intr stat = 0x%02x\n", flag);
-
- if (!(inb(host->iobase + ASC_STAT) & INT_IM)) {
- /* NB: these are _very_ possible if IRQ 15 is being used, since
- * it's the "garbage collector" on the 2nd 8259 PIC. Specifically,
- * any interrupt signal into the 8259 which can't be identified
- * comes out as 7 from the 8259, which is 15 to the host. Thus, it
- * is a good thing the WD7000 has an interrupt status port, so we
- * can sort these out. Otherwise, electrical noise and other such
- * problems would be indistinguishable from valid interrupts...
- */
- dprintk("wd7000_intr: phantom interrupt...\n");
- goto ack;
- }
-
- if (!(flag & MB_INTR))
- goto ack;
-
- /* The interrupt is for a mailbox */
- if (!(flag & IMB_INTR)) {
- dprintk("wd7000_intr: free outgoing mailbox\n");
- /*
- * If sleep_on() and the "interrupt on free OGMB" command are
- * used in mail_out(), wake_up() should correspondingly be called
- * here. For now, we don't need to do anything special.
- */
- goto ack;
- }
-
- /* The interrupt is for an incoming mailbox */
- icmb = flag & MB_MASK;
- icmb_status = icmbs[icmb].status;
- if (icmb_status & 0x80) { /* unsolicited - result in ICMB */
- dprintk("wd7000_intr: unsolicited interrupt 0x%02x\n", icmb_status);
- goto ack;
- }
-
- /* Aaaargh! (Zaga) */
- scb = isa_bus_to_virt(scsi2int((unchar *) icmbs[icmb].scbptr));
- icmbs[icmb].status = 0;
- if (scb->op & ICB_OP_MASK) { /* an SCB is done */
- icb = (IcbAny *) scb;
- icb->status = icmb_status;
- icb->phase = 0;
- goto ack;
- }
-
- SCpnt = scb->SCpnt;
- if (--(SCpnt->SCp.phase) <= 0) { /* all scbs are done */
- host_error = scb->vue | (icmb_status << 8);
- scsi_error = scb->status;
- errstatus = make_code(host_error, scsi_error);
- SCpnt->result = errstatus;
-
- free_scb(scb);
-
- SCpnt->scsi_done(SCpnt);
- }
-
- ack:
- dprintk("wd7000_intr: return from interrupt handler\n");
- wd7000_intr_ack(host);
-
- spin_unlock_irqrestore(host->sh->host_lock, flags);
- return IRQ_HANDLED;
-}
-
-static int wd7000_queuecommand_lck(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
-{
- Scb *scb;
- Sgb *sgb;
- unchar *cdb = (unchar *) SCpnt->cmnd;
- unchar idlun;
- short cdblen;
- int nseg;
- Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
-
- cdblen = SCpnt->cmd_len;
- idlun = ((SCpnt->device->id << 5) & 0xe0) | (SCpnt->device->lun & 7);
- SCpnt->scsi_done = done;
- SCpnt->SCp.phase = 1;
- scb = alloc_scbs(SCpnt->device->host, 1);
- scb->idlun = idlun;
- memcpy(scb->cdb, cdb, cdblen);
- scb->direc = 0x40; /* Disable direction check */
-
- scb->SCpnt = SCpnt; /* so we can find stuff later */
- SCpnt->host_scribble = (unchar *) scb;
- scb->host = host;
-
- nseg = scsi_sg_count(SCpnt);
- if (nseg > 1) {
- struct scatterlist *sg;
- unsigned i;
-
- dprintk("Using scatter/gather with %d elements.\n", nseg);
-
- sgb = scb->sgb;
- scb->op = 1;
- any2scsi(scb->dataptr, (int) sgb);
- any2scsi(scb->maxlen, nseg * sizeof(Sgb));
-
- scsi_for_each_sg(SCpnt, sg, nseg, i) {
- any2scsi(sgb[i].ptr, isa_page_to_bus(sg_page(sg)) + sg->offset);
- any2scsi(sgb[i].len, sg->length);
- }
- } else {
- scb->op = 0;
- if (nseg) {
- struct scatterlist *sg = scsi_sglist(SCpnt);
- any2scsi(scb->dataptr, isa_page_to_bus(sg_page(sg)) + sg->offset);
- }
- any2scsi(scb->maxlen, scsi_bufflen(SCpnt));
- }
-
- /* FIXME: drop lock and yield here ? */
-
- while (!mail_out(host, scb))
- cpu_relax(); /* keep trying */
-
- return 0;
-}
-
-static DEF_SCSI_QCMD(wd7000_queuecommand)
-
-static int wd7000_diagnostics(Adapter * host, int code)
-{
- static IcbDiag icb = { ICB_OP_DIAGNOSTICS };
- static unchar buf[256];
- unsigned long timeout;
-
- icb.type = code;
- any2scsi(icb.len, sizeof(buf));
- any2scsi(icb.ptr, (int) &buf);
- icb.phase = 1;
- /*
- * This routine is only called at init, so there should be OGMBs
- * available. I'm assuming so here. If this is going to
- * fail, I can just let the timeout catch the failure.
- */
- mail_out(host, (struct scb *) &icb);
- timeout = jiffies + WAITnexttimeout; /* wait up to 2 seconds */
- while (icb.phase && time_before(jiffies, timeout)) {
- cpu_relax(); /* wait for completion */
- barrier();
- }
-
- if (icb.phase) {
- printk("wd7000_diagnostics: timed out.\n");
- return (0);
- }
- if (make_code(icb.vue | (icb.status << 8), 0)) {
- printk("wd7000_diagnostics: failed (0x%02x,0x%02x)\n", icb.vue, icb.status);
- return (0);
- }
-
- return (1);
-}
-
-
-static int wd7000_adapter_reset(Adapter * host)
-{
- InitCmd init_cmd = {
- INITIALIZATION,
- 7,
- host->bus_on,
- host->bus_off,
- 0,
- {0, 0, 0},
- OGMB_CNT,
- ICMB_CNT
- };
- int diag;
- /*
- * Reset the adapter - only. The SCSI bus was initialized at power-up,
- * and we need to do this just so we control the mailboxes, etc.
- */
- outb(ASC_RES, host->iobase + ASC_CONTROL);
- udelay(40); /* reset pulse: this is 40us, only need 25us */
- outb(0, host->iobase + ASC_CONTROL);
- host->control = 0; /* this must always shadow ASC_CONTROL */
-
- if (WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
- printk(KERN_ERR "wd7000_init: WAIT timed out.\n");
- return -1; /* -1 = not ok */
- }
-
- if ((diag = inb(host->iobase + ASC_INTR_STAT)) != 1) {
- printk("wd7000_init: ");
-
- switch (diag) {
- case 2:
- printk(KERN_ERR "RAM failure.\n");
- break;
- case 3:
- printk(KERN_ERR "FIFO R/W failed\n");
- break;
- case 4:
- printk(KERN_ERR "SBIC register R/W failed\n");
- break;
- case 5:
- printk(KERN_ERR "Initialization D-FF failed.\n");
- break;
- case 6:
- printk(KERN_ERR "Host IRQ D-FF failed.\n");
- break;
- case 7:
- printk(KERN_ERR "ROM checksum error.\n");
- break;
- default:
- printk(KERN_ERR "diagnostic code 0x%02Xh received.\n", diag);
- }
- return -1;
- }
- /* Clear mailboxes */
- memset(&(host->mb), 0, sizeof(host->mb));
-
- /* Execute init command */
- any2scsi((unchar *) & (init_cmd.mailboxes), (int) &(host->mb));
- if (!command_out(host, (unchar *) & init_cmd, sizeof(init_cmd))) {
- printk(KERN_ERR "wd7000_adapter_reset: adapter initialization failed.\n");
- return -1;
- }
-
- if (WAIT(host->iobase + ASC_STAT, ASC_STATMASK, ASC_INIT, 0)) {
- printk("wd7000_adapter_reset: WAIT timed out.\n");
- return -1;
- }
- return 0;
-}
-
-static int wd7000_init(Adapter * host)
-{
- if (wd7000_adapter_reset(host) == -1)
- return 0;
-
-
- if (request_irq(host->irq, wd7000_intr, 0, "wd7000", host)) {
- printk("wd7000_init: can't get IRQ %d.\n", host->irq);
- return (0);
- }
- if (request_dma(host->dma, "wd7000")) {
- printk("wd7000_init: can't get DMA channel %d.\n", host->dma);
- free_irq(host->irq, host);
- return (0);
- }
- wd7000_enable_dma(host);
- wd7000_enable_intr(host);
-
- if (!wd7000_diagnostics(host, ICB_DIAG_FULL)) {
- free_dma(host->dma);
- free_irq(host->irq, NULL);
- return (0);
- }
-
- return (1);
-}
-
-
-static void wd7000_revision(Adapter * host)
-{
- static IcbRevLvl icb = { ICB_OP_GET_REVISION };
-
- icb.phase = 1;
- /*
- * Like diagnostics, this is only done at init time, in fact, from
- * wd7000_detect, so there should be OGMBs available. If it fails,
- * the only damage will be that the revision will show up as 0.0,
- * which in turn means that scatter/gather will be disabled.
- */
- mail_out(host, (struct scb *) &icb);
- while (icb.phase) {
- cpu_relax(); /* wait for completion */
- barrier();
- }
- host->rev1 = icb.primary;
- host->rev2 = icb.secondary;
-}
-
-
-static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length)
-{
- dprintk("Buffer = <%.*s>, length = %d\n", length, buffer, length);
-
- /*
- * Currently this is a no-op
- */
- dprintk("Sorry, this function is currently out of order...\n");
- return (length);
-}
-
-
-static int wd7000_show_info(struct seq_file *m, struct Scsi_Host *host)
-{
- Adapter *adapter = (Adapter *)host->hostdata;
- unsigned long flags;
-#ifdef WD7000_DEBUG
- Mailbox *ogmbs, *icmbs;
- short count;
-#endif
-
- spin_lock_irqsave(host->host_lock, flags);
- seq_printf(m, "Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2);
- seq_printf(m, " IO base: 0x%x\n", adapter->iobase);
- seq_printf(m, " IRQ: %d\n", adapter->irq);
- seq_printf(m, " DMA channel: %d\n", adapter->dma);
- seq_printf(m, " Interrupts: %d\n", adapter->int_counter);
- seq_printf(m, " BUS_ON time: %d nanoseconds\n", adapter->bus_on * 125);
- seq_printf(m, " BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125);
-
-#ifdef WD7000_DEBUG
- ogmbs = adapter->mb.ogmb;
- icmbs = adapter->mb.icmb;
-
- seq_printf(m, "\nControl port value: 0x%x\n", adapter->control);
- seq_puts(m, "Incoming mailbox:\n");
- seq_printf(m, " size: %d\n", ICMB_CNT);
- seq_puts(m, " queued messages: ");
-
- for (i = count = 0; i < ICMB_CNT; i++)
- if (icmbs[i].status) {
- count++;
- seq_printf(m, "0x%x ", i);
- }
-
- seq_puts(m, count ? "\n" : "none\n");
-
- seq_puts(m, "Outgoing mailbox:\n");
- seq_printf(m, " size: %d\n", OGMB_CNT);
- seq_printf(m, " next message: 0x%x\n", adapter->next_ogmb);
- seq_puts(m, " queued messages: ");
-
- for (i = count = 0; i < OGMB_CNT; i++)
- if (ogmbs[i].status) {
- count++;
- seq_printf(m, "0x%x ", i);
- }
-
- seq_puts(m, count ? "\n" : "none\n");
-#endif
-
- spin_unlock_irqrestore(host->host_lock, flags);
-
- return 0;
-}
-
-
-/*
- * Returns the number of adapters this driver is supporting.
- *
- * The source for hosts.c says to wait to call scsi_register until 100%
- * sure about an adapter. We need to do it a little sooner here; we
- * need the storage set up by scsi_register before wd7000_init, and
- * changing the location of an Adapter structure is more trouble than
- * calling scsi_unregister.
- *
- */
-
-static __init int wd7000_detect(struct scsi_host_template *tpnt)
-{
- short present = 0, biosaddr_ptr, sig_ptr, i, pass;
- short biosptr[NUM_CONFIGS];
- unsigned iobase;
- Adapter *host = NULL;
- struct Scsi_Host *sh;
- int unit = 0;
-
- dprintk("wd7000_detect: started\n");
-
-#ifdef MODULE
- if (wd7000)
- wd7000_setup(wd7000);
-#endif
-
- for (i = 0; i < UNITS; wd7000_host[i++] = NULL);
- for (i = 0; i < NUM_CONFIGS; biosptr[i++] = -1);
-
- tpnt->proc_name = "wd7000";
- tpnt->show_info = &wd7000_show_info;
- tpnt->write_info = wd7000_set_info;
-
- /*
- * Set up SCB free list, which is shared by all adapters
- */
- init_scbs();
-
- for (pass = 0; pass < NUM_CONFIGS; pass++) {
- /*
- * First, search for BIOS SIGNATURE...
- */
- for (biosaddr_ptr = 0; biosaddr_ptr < NUM_ADDRS; biosaddr_ptr++)
- for (sig_ptr = 0; sig_ptr < NUM_SIGNATURES; sig_ptr++) {
- for (i = 0; i < pass; i++)
- if (biosptr[i] == biosaddr_ptr)
- break;
-
- if (i == pass) {
- void __iomem *biosaddr = ioremap(wd7000_biosaddr[biosaddr_ptr] + signatures[sig_ptr].ofs,
- signatures[sig_ptr].len);
- short bios_match = 1;
-
- if (biosaddr)
- bios_match = check_signature(biosaddr, signatures[sig_ptr].sig, signatures[sig_ptr].len);
-
- iounmap(biosaddr);
-
- if (bios_match)
- goto bios_matched;
- }
- }
-
- bios_matched:
- /*
- * BIOS SIGNATURE has been found.
- */
-#ifdef WD7000_DEBUG
- dprintk("wd7000_detect: pass %d\n", pass + 1);
-
- if (biosaddr_ptr == NUM_ADDRS)
- dprintk("WD-7000 SST BIOS not detected...\n");
- else
- dprintk("WD-7000 SST BIOS detected at 0x%lx: checking...\n", wd7000_biosaddr[biosaddr_ptr]);
-#endif
-
- if (configs[pass].irq < 0)
- continue;
-
- if (unit == UNITS)
- continue;
-
- iobase = configs[pass].iobase;
-
- dprintk("wd7000_detect: check IO 0x%x region...\n", iobase);
-
- if (request_region(iobase, 4, "wd7000")) {
-
- dprintk("wd7000_detect: ASC reset (IO 0x%x) ...", iobase);
- /*
- * ASC reset...
- */
- outb(ASC_RES, iobase + ASC_CONTROL);
- msleep(10);
- outb(0, iobase + ASC_CONTROL);
-
- if (WAIT(iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
- dprintk("failed!\n");
- goto err_release;
- } else
- dprintk("ok!\n");
-
- if (inb(iobase + ASC_INTR_STAT) == 1) {
- /*
- * We register here, to get a pointer to the extra space,
- * which we'll use as the Adapter structure (host) for
- * this adapter. It is located just after the registered
- * Scsi_Host structure (sh), and is located by the empty
- * array hostdata.
- */
- sh = scsi_register(tpnt, sizeof(Adapter));
- if (sh == NULL)
- goto err_release;
-
- host = (Adapter *) sh->hostdata;
-
- dprintk("wd7000_detect: adapter allocated at 0x%x\n", (int) host);
- memset(host, 0, sizeof(Adapter));
-
- host->irq = configs[pass].irq;
- host->dma = configs[pass].dma;
- host->iobase = iobase;
- host->int_counter = 0;
- host->bus_on = configs[pass].bus_on;
- host->bus_off = configs[pass].bus_off;
- host->sh = wd7000_host[unit] = sh;
- unit++;
-
- dprintk("wd7000_detect: Trying init WD-7000 card at IO " "0x%x, IRQ %d, DMA %d...\n", host->iobase, host->irq, host->dma);
-
- if (!wd7000_init(host)) /* Initialization failed */
- goto err_unregister;
-
- /*
- * OK from here - we'll use this adapter/configuration.
- */
- wd7000_revision(host); /* important for scatter/gather */
-
- /*
- * For boards before rev 6.0, scatter/gather isn't supported.
- */
- if (host->rev1 < 6)
- sh->sg_tablesize = 1;
-
- present++; /* count it */
-
- if (biosaddr_ptr != NUM_ADDRS)
- biosptr[pass] = biosaddr_ptr;
-
- printk(KERN_INFO "Western Digital WD-7000 (rev %d.%d) ", host->rev1, host->rev2);
- printk("using IO 0x%x, IRQ %d, DMA %d.\n", host->iobase, host->irq, host->dma);
- printk(" BUS_ON time: %dns, BUS_OFF time: %dns\n", host->bus_on * 125, host->bus_off * 125);
- }
- } else
- dprintk("wd7000_detect: IO 0x%x region already allocated!\n", iobase);
-
- continue;
-
- err_unregister:
- scsi_unregister(sh);
- err_release:
- release_region(iobase, 4);
-
- }
-
- if (!present)
- printk("Failed initialization of WD-7000 SCSI card!\n");
-
- return (present);
-}
-
-static int wd7000_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, NULL);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
- return 0;
-}
-
-#if 0
-/*
- * I have absolutely NO idea how to do an abort with the WD7000...
- */
-static int wd7000_abort(Scsi_Cmnd * SCpnt)
-{
- Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
-
- if (inb(host->iobase + ASC_STAT) & INT_IM) {
- printk("wd7000_abort: lost interrupt\n");
- wd7000_intr_handle(host->irq, NULL, NULL);
- return FAILED;
- }
- return FAILED;
-}
-#endif
-
-/*
- * Last resort. Reinitialize the board.
- */
-
-static int wd7000_host_reset(struct scsi_cmnd *SCpnt)
-{
- Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
-
- spin_lock_irq(SCpnt->device->host->host_lock);
-
- if (wd7000_adapter_reset(host) < 0) {
- spin_unlock_irq(SCpnt->device->host->host_lock);
- return FAILED;
- }
-
- wd7000_enable_intr(host);
-
- spin_unlock_irq(SCpnt->device->host->host_lock);
- return SUCCESS;
-}
-
-/*
- * This was borrowed directly from aha1542.c. (Zaga)
- */
-
-static int wd7000_biosparam(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int *ip)
-{
- char b[BDEVNAME_SIZE];
-
- dprintk("wd7000_biosparam: dev=%s, size=%llu, ",
- bdevname(bdev, b), (u64)capacity);
- (void)b; /* unused var warning? */
-
- /*
- * try default translation
- */
- ip[0] = 64;
- ip[1] = 32;
- ip[2] = capacity >> 11;
-
- /*
- * for disks >1GB do some guessing
- */
- if (ip[2] >= 1024) {
- int info[3];
-
- /*
- * try to figure out the geometry from the partition table
- */
- if ((scsicam_bios_param(bdev, capacity, info) < 0) || !(((info[0] == 64) && (info[1] == 32)) || ((info[0] == 255) && (info[1] == 63)))) {
- printk("wd7000_biosparam: unable to verify geometry for disk with >1GB.\n" " using extended translation.\n");
-
- ip[0] = 255;
- ip[1] = 63;
- ip[2] = (unsigned long) capacity / (255 * 63);
- } else {
- ip[0] = info[0];
- ip[1] = info[1];
- ip[2] = info[2];
-
- if (info[0] == 255)
- printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __func__);
- }
- }
-
- dprintk("bios geometry: head=%d, sec=%d, cyl=%d\n", ip[0], ip[1], ip[2]);
- dprintk("WARNING: check, if the bios geometry is correct.\n");
-
- return (0);
-}
-
-MODULE_AUTHOR("Thomas Wuensche, John Boyd, Miroslav Zagorac");
-MODULE_DESCRIPTION("Driver for the WD7000 series ISA controllers");
-MODULE_LICENSE("GPL");
-
-static struct scsi_host_template driver_template = {
- .proc_name = "wd7000",
- .show_info = wd7000_show_info,
- .write_info = wd7000_set_info,
- .name = "Western Digital WD-7000",
- .detect = wd7000_detect,
- .release = wd7000_release,
- .queuecommand = wd7000_queuecommand,
- .eh_host_reset_handler = wd7000_host_reset,
- .bios_param = wd7000_biosparam,
- .can_queue = WD7000_Q,
- .this_id = 7,
- .sg_tablesize = WD7000_SG,
- .unchecked_isa_dma = 1,
- .use_clustering = ENABLE_CLUSTERING,
-};
-
-#include "scsi_module.c"
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index a003ba26ca6e..a5f10936fb9c 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -583,7 +583,7 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
{
unsigned long timeout;
- timeout = jiffies + usecs_to_jiffies(255);
+ timeout = jiffies + usecs_to_jiffies(10000);
do {
if (time_after(jiffies, timeout))
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
index ac1957dfdf24..322034ab9d37 100644
--- a/drivers/soc/qcom/smd.c
+++ b/drivers/soc/qcom/smd.c
@@ -95,7 +95,7 @@ static const struct {
/**
* struct qcom_smd_edge - representing a remote processor
- * @smd: handle to qcom_smd
+ * @dev: device for this edge
* @of_node: of_node handle for information related to this edge
* @edge_id: identifier of this edge
* @remote_pid: identifier of remote processor
@@ -111,7 +111,8 @@ static const struct {
* @state_work: work item for edge state changes
*/
struct qcom_smd_edge {
- struct qcom_smd *smd;
+ struct device dev;
+
struct device_node *of_node;
unsigned edge_id;
unsigned remote_pid;
@@ -135,6 +136,8 @@ struct qcom_smd_edge {
struct work_struct state_work;
};
+#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev)
+
/*
* SMD channel states.
*/
@@ -197,20 +200,6 @@ struct qcom_smd_channel {
void *drvdata;
struct list_head list;
- struct list_head dev_list;
-};
-
-/**
- * struct qcom_smd - smd struct
- * @dev: device struct
- * @num_edges: number of entries in @edges
- * @edges: array of edges to be handled
- */
-struct qcom_smd {
- struct device *dev;
-
- unsigned num_edges;
- struct qcom_smd_edge edges[0];
};
/*
@@ -374,7 +363,7 @@ static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
SET_TX_CHANNEL_INFO(channel, head, 0);
- SET_TX_CHANNEL_INFO(channel, tail, 0);
+ SET_RX_CHANNEL_INFO(channel, tail, 0);
qcom_smd_signal_channel(channel);
@@ -421,7 +410,7 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
if (channel->state == state)
return;
- dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
+ dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state);
SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
@@ -891,8 +880,6 @@ static int qcom_smd_dev_remove(struct device *dev)
struct qcom_smd_device *qsdev = to_smd_device(dev);
struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
struct qcom_smd_channel *channel = qsdev->channel;
- struct qcom_smd_channel *tmp;
- struct qcom_smd_channel *ch;
qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
@@ -911,15 +898,9 @@ static int qcom_smd_dev_remove(struct device *dev)
if (qsdrv->remove)
qsdrv->remove(qsdev);
- /*
- * The client is now gone, close and release all channels associated
- * with this sdev
- */
- list_for_each_entry_safe(ch, tmp, &channel->dev_list, dev_list) {
- qcom_smd_channel_close(ch);
- list_del(&ch->dev_list);
- ch->qsdev = NULL;
- }
+ /* The client is now gone, close the primary channel */
+ qcom_smd_channel_close(channel);
+ channel->qsdev = NULL;
return 0;
}
@@ -973,13 +954,12 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
struct qcom_smd_device *qsdev;
struct qcom_smd_edge *edge = channel->edge;
struct device_node *node;
- struct qcom_smd *smd = edge->smd;
int ret;
if (channel->qsdev)
return -EEXIST;
- dev_dbg(smd->dev, "registering '%s'\n", channel->name);
+ dev_dbg(&edge->dev, "registering '%s'\n", channel->name);
qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
if (!qsdev)
@@ -990,7 +970,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
edge->of_node->name,
node ? node->name : channel->name);
- qsdev->dev.parent = smd->dev;
+ qsdev->dev.parent = &edge->dev;
qsdev->dev.bus = &qcom_smd_bus;
qsdev->dev.release = qcom_smd_release_device;
qsdev->dev.of_node = node;
@@ -1001,7 +981,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
ret = device_register(&qsdev->dev);
if (ret) {
- dev_err(smd->dev, "device_register failed: %d\n", ret);
+ dev_err(&edge->dev, "device_register failed: %d\n", ret);
put_device(&qsdev->dev);
}
@@ -1091,6 +1071,8 @@ qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
*
* Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't
* ready.
+ *
+ * Any channels returned must be closed with a call to qcom_smd_close_channel()
*/
struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent,
const char *name,
@@ -1120,15 +1102,21 @@ struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent,
return ERR_PTR(ret);
}
- /*
- * Append the list of channel to the channels associated with the sdev
- */
- list_add_tail(&channel->dev_list, &sdev->channel->dev_list);
-
return channel;
}
EXPORT_SYMBOL(qcom_smd_open_channel);
+/**
+ * qcom_smd_close_channel() - close an additionally opened channel
+ * @channel: channel handle, returned by qcom_smd_open_channel()
+ */
+void qcom_smd_close_channel(struct qcom_smd_channel *channel)
+{
+ qcom_smd_channel_close(channel);
+ channel->qsdev = NULL;
+}
+EXPORT_SYMBOL(qcom_smd_close_channel);
+
/*
* Allocate the qcom_smd_channel object for a newly found smd channel,
* retrieving and validating the smem items involved.
@@ -1139,20 +1127,18 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
char *name)
{
struct qcom_smd_channel *channel;
- struct qcom_smd *smd = edge->smd;
size_t fifo_size;
size_t info_size;
void *fifo_base;
void *info;
int ret;
- channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL);
+ channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL);
if (!channel)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&channel->dev_list);
channel->edge = edge;
- channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
+ channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL);
if (!channel->name)
return ERR_PTR(-ENOMEM);
@@ -1175,7 +1161,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
} else if (info_size == 2 * sizeof(struct smd_channel_info)) {
channel->info = info;
} else {
- dev_err(smd->dev,
+ dev_err(&edge->dev,
"channel info of size %zu not supported\n", info_size);
ret = -EINVAL;
goto free_name_and_channel;
@@ -1190,7 +1176,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
/* The channel consist of a rx and tx fifo of equal size */
fifo_size /= 2;
- dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
+ dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
name, info_size, fifo_size);
channel->tx_fifo = fifo_base;
@@ -1202,8 +1188,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
return channel;
free_name_and_channel:
- devm_kfree(smd->dev, channel->name);
- devm_kfree(smd->dev, channel);
+ devm_kfree(&edge->dev, channel->name);
+ devm_kfree(&edge->dev, channel);
return ERR_PTR(ret);
}
@@ -1219,7 +1205,6 @@ static void qcom_channel_scan_worker(struct work_struct *work)
struct qcom_smd_alloc_entry *alloc_tbl;
struct qcom_smd_alloc_entry *entry;
struct qcom_smd_channel *channel;
- struct qcom_smd *smd = edge->smd;
unsigned long flags;
unsigned fifo_id;
unsigned info_id;
@@ -1263,7 +1248,7 @@ static void qcom_channel_scan_worker(struct work_struct *work)
list_add(&channel->list, &edge->channels);
spin_unlock_irqrestore(&edge->channels_lock, flags);
- dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
+ dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name);
set_bit(i, edge->allocated[tbl]);
wake_up_interruptible(&edge->new_channel_event);
@@ -1350,22 +1335,6 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->of_node = of_node_get(node);
- irq = irq_of_parse_and_map(node, 0);
- if (irq < 0) {
- dev_err(dev, "required smd interrupt missing\n");
- return -EINVAL;
- }
-
- ret = devm_request_irq(dev, irq,
- qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
- node->name, edge);
- if (ret) {
- dev_err(dev, "failed to request smd irq\n");
- return ret;
- }
-
- edge->irq = irq;
-
key = "qcom,smd-edge";
ret = of_property_read_u32(node, key, &edge->edge_id);
if (ret) {
@@ -1400,18 +1369,121 @@ static int qcom_smd_parse_edge(struct device *dev,
return -EINVAL;
}
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ dev_err(dev, "required smd interrupt missing\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(dev, irq,
+ qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
+ node->name, edge);
+ if (ret) {
+ dev_err(dev, "failed to request smd irq\n");
+ return ret;
+ }
+
+ edge->irq = irq;
+
return 0;
}
-static int qcom_smd_probe(struct platform_device *pdev)
+/*
+ * Release function for an edge.
+ * Reset the state of each associated channel and free the edge context.
+ */
+static void qcom_smd_edge_release(struct device *dev)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge = to_smd_edge(dev);
+
+ list_for_each_entry(channel, &edge->channels, list) {
+ SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
+ SET_RX_CHANNEL_INFO(channel, head, 0);
+ SET_RX_CHANNEL_INFO(channel, tail, 0);
+ }
+
+ kfree(edge);
+}
+
+/**
+ * qcom_smd_register_edge() - register an edge based on an device_node
+ * @parent: parent device for the edge
+ * @node: device_node describing the edge
+ *
+ * Returns an edge reference, or negative ERR_PTR() on failure.
+ */
+struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
+ struct device_node *node)
{
struct qcom_smd_edge *edge;
- struct device_node *node;
- struct qcom_smd *smd;
- size_t array_size;
- int num_edges;
int ret;
- int i = 0;
+
+ edge = kzalloc(sizeof(*edge), GFP_KERNEL);
+ if (!edge)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&edge->new_channel_event);
+
+ edge->dev.parent = parent;
+ edge->dev.release = qcom_smd_edge_release;
+ dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name);
+ ret = device_register(&edge->dev);
+ if (ret) {
+ pr_err("failed to register smd edge\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = qcom_smd_parse_edge(&edge->dev, node, edge);
+ if (ret) {
+ dev_err(&edge->dev, "failed to parse smd edge\n");
+ goto unregister_dev;
+ }
+
+ schedule_work(&edge->scan_work);
+
+ return edge;
+
+unregister_dev:
+ put_device(&edge->dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(qcom_smd_register_edge);
+
+static int qcom_smd_remove_device(struct device *dev, void *data)
+{
+ device_unregister(dev);
+ of_node_put(dev->of_node);
+ put_device(dev);
+
+ return 0;
+}
+
+/**
+ * qcom_smd_unregister_edge() - release an edge and its children
+ * @edge: edge reference acquired from qcom_smd_register_edge
+ */
+int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
+{
+ int ret;
+
+ disable_irq(edge->irq);
+ cancel_work_sync(&edge->scan_work);
+ cancel_work_sync(&edge->state_work);
+
+ ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device);
+ if (ret)
+ dev_warn(&edge->dev, "can't remove smd device: %d\n", ret);
+
+ device_unregister(&edge->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_smd_unregister_edge);
+
+static int qcom_smd_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
void *p;
/* Wait for smem */
@@ -1419,29 +1491,17 @@ static int qcom_smd_probe(struct platform_device *pdev)
if (PTR_ERR(p) == -EPROBE_DEFER)
return PTR_ERR(p);
- num_edges = of_get_available_child_count(pdev->dev.of_node);
- array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
- smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL);
- if (!smd)
- return -ENOMEM;
- smd->dev = &pdev->dev;
-
- smd->num_edges = num_edges;
- for_each_available_child_of_node(pdev->dev.of_node, node) {
- edge = &smd->edges[i++];
- edge->smd = smd;
- init_waitqueue_head(&edge->new_channel_event);
-
- ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
- if (ret)
- continue;
+ for_each_available_child_of_node(pdev->dev.of_node, node)
+ qcom_smd_register_edge(&pdev->dev, node);
- schedule_work(&edge->scan_work);
- }
+ return 0;
+}
- platform_set_drvdata(pdev, smd);
+static int qcom_smd_remove_edge(struct device *dev, void *data)
+{
+ struct qcom_smd_edge *edge = to_smd_edge(dev);
- return 0;
+ return qcom_smd_unregister_edge(edge);
}
/*
@@ -1450,28 +1510,13 @@ static int qcom_smd_probe(struct platform_device *pdev)
*/
static int qcom_smd_remove(struct platform_device *pdev)
{
- struct qcom_smd_channel *channel;
- struct qcom_smd_edge *edge;
- struct qcom_smd *smd = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < smd->num_edges; i++) {
- edge = &smd->edges[i];
-
- disable_irq(edge->irq);
- cancel_work_sync(&edge->scan_work);
- cancel_work_sync(&edge->state_work);
-
- /* No need to lock here, because the writer is gone */
- list_for_each_entry(channel, &edge->channels, list) {
- if (!channel->qsdev)
- continue;
+ int ret;
- qcom_smd_destroy_device(channel);
- }
- }
+ ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge);
+ if (ret)
+ dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret);
- return 0;
+ return ret;
}
static const struct of_device_id qcom_smd_of_match[] = {
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 2e1aa9f130f4..18ec52f2078a 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -740,7 +740,8 @@ static int qcom_smem_probe(struct platform_device *pdev)
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
if (hwlock_id < 0) {
- dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ if (hwlock_id != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
return hwlock_id;
}
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 44842a205e4b..7acd1517dd37 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -27,6 +27,7 @@ struct rockchip_domain_info {
int req_mask;
int idle_mask;
int ack_mask;
+ bool active_wakeup;
};
struct rockchip_pmu_info {
@@ -75,23 +76,24 @@ struct rockchip_pmu {
#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
-#define DOMAIN(pwr, status, req, idle, ack) \
+#define DOMAIN(pwr, status, req, idle, ack, wakeup) \
{ \
.pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
.status_mask = (status >= 0) ? BIT(status) : 0, \
.req_mask = (req >= 0) ? BIT(req) : 0, \
.idle_mask = (idle >= 0) ? BIT(idle) : 0, \
.ack_mask = (ack >= 0) ? BIT(ack) : 0, \
+ .active_wakeup = wakeup, \
}
-#define DOMAIN_RK3288(pwr, status, req) \
- DOMAIN(pwr, status, req, req, (req) + 16)
+#define DOMAIN_RK3288(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, req, (req) + 16, wakeup)
-#define DOMAIN_RK3368(pwr, status, req) \
- DOMAIN(pwr, status, req, (req) + 16, req)
+#define DOMAIN_RK3368(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, (req) + 16, req, wakeup)
-#define DOMAIN_RK3399(pwr, status, req) \
- DOMAIN(pwr, status, req, req, req)
+#define DOMAIN_RK3399(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, req, req, wakeup)
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
@@ -295,6 +297,17 @@ static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
pm_clk_destroy(dev);
}
+static bool rockchip_active_wakeup(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ struct rockchip_pm_domain *pd;
+
+ genpd = pd_to_genpd(dev->pm_domain);
+ pd = container_of(genpd, struct rockchip_pm_domain, genpd);
+
+ return pd->info->active_wakeup;
+}
+
static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
struct device_node *node)
{
@@ -415,6 +428,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->genpd.power_on = rockchip_pd_power_on;
pd->genpd.attach_dev = rockchip_pd_attach_dev;
pd->genpd.detach_dev = rockchip_pd_detach_dev;
+ pd->genpd.dev_ops.active_wakeup = rockchip_active_wakeup;
pd->genpd.flags = GENPD_FLAG_PM_CLK;
pm_genpd_init(&pd->genpd, NULL, false);
@@ -623,48 +637,48 @@ err_out:
}
static const struct rockchip_domain_info rk3288_pm_domains[] = {
- [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4),
- [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9),
- [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3),
- [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2),
+ [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false),
+ [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false),
+ [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3, false),
+ [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2, false),
};
static const struct rockchip_domain_info rk3368_pm_domains[] = {
- [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6),
- [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8),
- [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7),
- [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2),
- [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2),
+ [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true),
+ [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false),
+ [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7, false),
+ [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2, false),
+ [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2, false),
};
static const struct rockchip_domain_info rk3399_pm_domains[] = {
- [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1),
- [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1),
- [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1),
- [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15),
- [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16),
- [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1),
- [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2),
- [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14),
- [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17),
- [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0),
- [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3),
- [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4),
- [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5),
- [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6),
- [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1),
- [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7),
- [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8),
- [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9),
- [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10),
- [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11),
- [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23),
- [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24),
- [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12),
- [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22),
- [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27),
- [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28),
- [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29),
+ [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1, false),
+ [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1, false),
+ [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1, true),
+ [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15, true),
+ [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16, true),
+ [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1, true),
+ [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2, true),
+ [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14, true),
+ [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17, false),
+ [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0, false),
+ [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3, false),
+ [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4, false),
+ [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5, false),
+ [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6, false),
+ [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1, false),
+ [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7, false),
+ [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8, false),
+ [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9, false),
+ [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10, false),
+ [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11, false),
+ [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23, true),
+ [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24, true),
+ [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12, true),
+ [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22, false),
+ [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true),
+ [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true),
+ [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true),
};
static const struct rockchip_pmu_info rk3288_pmu = {
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 71c834f3847e..7792ed88d80b 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -967,8 +967,8 @@ static void tegra_io_rail_unprepare(void)
int tegra_io_rail_power_on(unsigned int id)
{
- unsigned long request, status, value;
- unsigned int bit, mask;
+ unsigned long request, status;
+ unsigned int bit;
int err;
mutex_lock(&pmc->powergates_lock);
@@ -977,15 +977,9 @@ int tegra_io_rail_power_on(unsigned int id)
if (err)
goto error;
- mask = 1 << bit;
+ tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | BIT(bit), request);
- value = tegra_pmc_readl(request);
- value |= mask;
- value &= ~IO_DPD_REQ_CODE_MASK;
- value |= IO_DPD_REQ_CODE_OFF;
- tegra_pmc_writel(value, request);
-
- err = tegra_io_rail_poll(status, mask, 0, 250);
+ err = tegra_io_rail_poll(status, BIT(bit), 0, 250);
if (err) {
pr_info("tegra_io_rail_poll() failed: %d\n", err);
goto error;
@@ -1002,8 +996,8 @@ EXPORT_SYMBOL(tegra_io_rail_power_on);
int tegra_io_rail_power_off(unsigned int id)
{
- unsigned long request, status, value;
- unsigned int bit, mask;
+ unsigned long request, status;
+ unsigned int bit;
int err;
mutex_lock(&pmc->powergates_lock);
@@ -1014,15 +1008,9 @@ int tegra_io_rail_power_off(unsigned int id)
goto error;
}
- mask = 1 << bit;
-
- value = tegra_pmc_readl(request);
- value |= mask;
- value &= ~IO_DPD_REQ_CODE_MASK;
- value |= IO_DPD_REQ_CODE_ON;
- tegra_pmc_writel(value, request);
+ tegra_pmc_writel(IO_DPD_REQ_CODE_ON | BIT(bit), request);
- err = tegra_io_rail_poll(status, mask, mask, 250);
+ err = tegra_io_rail_poll(status, BIT(bit), BIT(bit), 250);
if (err)
goto error;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index d6fb8d4b7786..b7995474148c 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -153,6 +153,16 @@ config SPI_BCM63XX_HSSPI
This enables support for the High Speed SPI controller present on
newer Broadcom BCM63XX SoCs.
+config SPI_BCM_QSPI
+ tristate "Broadcom BSPI and MSPI controller support"
+ depends on ARCH_BRCMSTB || ARCH_BCM || ARCH_BCM_IPROC || COMPILE_TEST
+ default ARCH_BCM_IPROC
+ help
+ Enables support for the Broadcom SPI flash and MSPI controller.
+ Select this option for any one of BRCMSTB, iProc NSP and NS2 SoCs
+ based platforms. This driver works for both SPI master for spi-nor
+ flash device as well as MSPI device.
+
config SPI_BITBANG
tristate "Utilities for Bitbanging SPI masters"
help
@@ -285,6 +295,13 @@ config SPI_IMX
This enables using the Freescale i.MX SPI controllers in master
mode.
+config SPI_JCORE
+ tristate "J-Core SPI Master"
+ depends on OF && (SUPERH || COMPILE_TEST)
+ help
+ This enables support for the SPI master controller in the J-Core
+ synthesizable, open source SoC.
+
config SPI_LM70_LLP
tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
depends on PARPORT
@@ -549,7 +566,7 @@ config SPI_SC18IS602
config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller"
depends on HAVE_CLK && HAS_DMA
- depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
+ depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST
help
SPI driver for SuperH and SH Mobile MSIOF blocks.
@@ -631,6 +648,13 @@ config SPI_TEGRA20_SLINK
help
SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
+config SPI_THUNDERX
+ tristate "Cavium ThunderX SPI controller"
+ depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
+ help
+ SPI host driver for the hardware found on Cavium ThunderX
+ SOCs.
+
config SPI_TOPCLIFF_PCH
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 185367ef6576..aa939d955521 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o
obj-$(CONFIG_SPI_BCM53XX) += spi-bcm53xx.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
+obj-$(CONFIG_SPI_BCM_QSPI) += spi-iproc-qspi.o spi-brcmstb-qspi.o spi-bcm-qspi.o
obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
obj-$(CONFIG_SPI_ADI_V3) += spi-adi-v3.o
obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
+obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
@@ -91,6 +93,8 @@ obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
+spi-thunderx-objs := spi-cavium.o spi-cavium-thunderx.o
+obj-$(CONFIG_SPI_THUNDERX) += spi-thunderx.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
new file mode 100644
index 000000000000..14f9dea3173f
--- /dev/null
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -0,0 +1,1397 @@
+/*
+ * Driver for Broadcom BRCMSTB, NSP, NS2, Cygnus SPI Controllers
+ *
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include "spi-bcm-qspi.h"
+
+#define DRIVER_NAME "bcm_qspi"
+
+
+/* BSPI register offsets */
+#define BSPI_REVISION_ID 0x000
+#define BSPI_SCRATCH 0x004
+#define BSPI_MAST_N_BOOT_CTRL 0x008
+#define BSPI_BUSY_STATUS 0x00c
+#define BSPI_INTR_STATUS 0x010
+#define BSPI_B0_STATUS 0x014
+#define BSPI_B0_CTRL 0x018
+#define BSPI_B1_STATUS 0x01c
+#define BSPI_B1_CTRL 0x020
+#define BSPI_STRAP_OVERRIDE_CTRL 0x024
+#define BSPI_FLEX_MODE_ENABLE 0x028
+#define BSPI_BITS_PER_CYCLE 0x02c
+#define BSPI_BITS_PER_PHASE 0x030
+#define BSPI_CMD_AND_MODE_BYTE 0x034
+#define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
+#define BSPI_BSPI_XOR_VALUE 0x03c
+#define BSPI_BSPI_XOR_ENABLE 0x040
+#define BSPI_BSPI_PIO_MODE_ENABLE 0x044
+#define BSPI_BSPI_PIO_IODIR 0x048
+#define BSPI_BSPI_PIO_DATA 0x04c
+
+/* RAF register offsets */
+#define BSPI_RAF_START_ADDR 0x100
+#define BSPI_RAF_NUM_WORDS 0x104
+#define BSPI_RAF_CTRL 0x108
+#define BSPI_RAF_FULLNESS 0x10c
+#define BSPI_RAF_WATERMARK 0x110
+#define BSPI_RAF_STATUS 0x114
+#define BSPI_RAF_READ_DATA 0x118
+#define BSPI_RAF_WORD_CNT 0x11c
+#define BSPI_RAF_CURR_ADDR 0x120
+
+/* Override mode masks */
+#define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE BIT(0)
+#define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL BIT(1)
+#define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE BIT(2)
+#define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD BIT(3)
+#define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE BIT(4)
+
+#define BSPI_ADDRLEN_3BYTES 3
+#define BSPI_ADDRLEN_4BYTES 4
+
+#define BSPI_RAF_STATUS_FIFO_EMPTY_MASK BIT(1)
+
+#define BSPI_RAF_CTRL_START_MASK BIT(0)
+#define BSPI_RAF_CTRL_CLEAR_MASK BIT(1)
+
+#define BSPI_BPP_MODE_SELECT_MASK BIT(8)
+#define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
+
+#define BSPI_READ_LENGTH 256
+
+/* MSPI register offsets */
+#define MSPI_SPCR0_LSB 0x000
+#define MSPI_SPCR0_MSB 0x004
+#define MSPI_SPCR1_LSB 0x008
+#define MSPI_SPCR1_MSB 0x00c
+#define MSPI_NEWQP 0x010
+#define MSPI_ENDQP 0x014
+#define MSPI_SPCR2 0x018
+#define MSPI_MSPI_STATUS 0x020
+#define MSPI_CPTQP 0x024
+#define MSPI_SPCR3 0x028
+#define MSPI_TXRAM 0x040
+#define MSPI_RXRAM 0x0c0
+#define MSPI_CDRAM 0x140
+#define MSPI_WRITE_LOCK 0x180
+
+#define MSPI_MASTER_BIT BIT(7)
+
+#define MSPI_NUM_CDRAM 16
+#define MSPI_CDRAM_CONT_BIT BIT(7)
+#define MSPI_CDRAM_BITSE_BIT BIT(6)
+#define MSPI_CDRAM_PCS 0xf
+
+#define MSPI_SPCR2_SPE BIT(6)
+#define MSPI_SPCR2_CONT_AFTER_CMD BIT(7)
+
+#define MSPI_MSPI_STATUS_SPIF BIT(0)
+
+#define INTR_BASE_BIT_SHIFT 0x02
+#define INTR_COUNT 0x07
+
+#define NUM_CHIPSELECT 4
+#define QSPI_SPBR_MIN 8U
+#define QSPI_SPBR_MAX 255U
+
+#define OPCODE_DIOR 0xBB
+#define OPCODE_QIOR 0xEB
+#define OPCODE_DIOR_4B 0xBC
+#define OPCODE_QIOR_4B 0xEC
+
+#define MAX_CMD_SIZE 6
+
+#define ADDR_4MB_MASK GENMASK(22, 0)
+
+/* stop at end of transfer, no other reason */
+#define TRANS_STATUS_BREAK_NONE 0
+/* stop at end of spi_message */
+#define TRANS_STATUS_BREAK_EOM 1
+/* stop at end of spi_transfer if delay */
+#define TRANS_STATUS_BREAK_DELAY 2
+/* stop at end of spi_transfer if cs_change */
+#define TRANS_STATUS_BREAK_CS_CHANGE 4
+/* stop if we run out of bytes */
+#define TRANS_STATUS_BREAK_NO_BYTES 8
+
+/* events that make us stop filling TX slots */
+#define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM | \
+ TRANS_STATUS_BREAK_DELAY | \
+ TRANS_STATUS_BREAK_CS_CHANGE)
+
+/* events that make us deassert CS */
+#define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
+ TRANS_STATUS_BREAK_CS_CHANGE)
+
+struct bcm_qspi_parms {
+ u32 speed_hz;
+ u8 mode;
+ u8 bits_per_word;
+};
+
+struct bcm_xfer_mode {
+ bool flex_mode;
+ unsigned int width;
+ unsigned int addrlen;
+ unsigned int hp;
+};
+
+enum base_type {
+ MSPI,
+ BSPI,
+ CHIP_SELECT,
+ BASEMAX,
+};
+
+enum irq_source {
+ SINGLE_L2,
+ MUXED_L1,
+};
+
+struct bcm_qspi_irq {
+ const char *irq_name;
+ const irq_handler_t irq_handler;
+ int irq_source;
+ u32 mask;
+};
+
+struct bcm_qspi_dev_id {
+ const struct bcm_qspi_irq *irqp;
+ void *dev;
+};
+
+struct qspi_trans {
+ struct spi_transfer *trans;
+ int byte;
+};
+
+struct bcm_qspi {
+ struct platform_device *pdev;
+ struct spi_master *master;
+ struct clk *clk;
+ u32 base_clk;
+ u32 max_speed_hz;
+ void __iomem *base[BASEMAX];
+
+ /* Some SoCs provide custom interrupt status register(s) */
+ struct bcm_qspi_soc_intc *soc_intc;
+
+ struct bcm_qspi_parms last_parms;
+ struct qspi_trans trans_pos;
+ int curr_cs;
+ int bspi_maj_rev;
+ int bspi_min_rev;
+ int bspi_enabled;
+ struct spi_flash_read_message *bspi_rf_msg;
+ u32 bspi_rf_msg_idx;
+ u32 bspi_rf_msg_len;
+ u32 bspi_rf_msg_status;
+ struct bcm_xfer_mode xfer_mode;
+ u32 s3_strap_override_ctrl;
+ bool bspi_mode;
+ bool big_endian;
+ int num_irqs;
+ struct bcm_qspi_dev_id *dev_ids;
+ struct completion mspi_done;
+ struct completion bspi_done;
+};
+
+static inline bool has_bspi(struct bcm_qspi *qspi)
+{
+ return qspi->bspi_mode;
+}
+
+/* Read qspi controller register*/
+static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
+ unsigned int offset)
+{
+ return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
+}
+
+/* Write qspi controller register*/
+static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
+ unsigned int offset, unsigned int data)
+{
+ bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
+}
+
+/* BSPI helpers */
+static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
+{
+ int i;
+
+ /* this should normally finish within 10us */
+ for (i = 0; i < 1000; i++) {
+ if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
+ return 0;
+ udelay(1);
+ }
+ dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
+ return -EIO;
+}
+
+static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
+{
+ if (qspi->bspi_maj_rev < 4)
+ return true;
+ return false;
+}
+
+static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
+{
+ bcm_qspi_bspi_busy_poll(qspi);
+ /* Force rising edge for the b0/b1 'flush' field */
+ bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
+ bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
+ bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
+ bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
+}
+
+static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
+{
+ return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
+ BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
+}
+
+static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
+{
+ u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
+
+ /* BSPI v3 LR is LE only, convert data to host endianness */
+ if (bcm_qspi_bspi_ver_three(qspi))
+ data = le32_to_cpu(data);
+
+ return data;
+}
+
+static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
+{
+ bcm_qspi_bspi_busy_poll(qspi);
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
+ BSPI_RAF_CTRL_START_MASK);
+}
+
+static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
+{
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
+ BSPI_RAF_CTRL_CLEAR_MASK);
+ bcm_qspi_bspi_flush_prefetch_buffers(qspi);
+}
+
+static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
+{
+ u32 *buf = (u32 *)qspi->bspi_rf_msg->buf;
+ u32 data = 0;
+
+ dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_msg,
+ qspi->bspi_rf_msg->buf, qspi->bspi_rf_msg_len);
+ while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
+ data = bcm_qspi_bspi_lr_read_fifo(qspi);
+ if (likely(qspi->bspi_rf_msg_len >= 4) &&
+ IS_ALIGNED((uintptr_t)buf, 4)) {
+ buf[qspi->bspi_rf_msg_idx++] = data;
+ qspi->bspi_rf_msg_len -= 4;
+ } else {
+ /* Read out remaining bytes, make sure*/
+ u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_msg_idx];
+
+ data = cpu_to_le32(data);
+ while (qspi->bspi_rf_msg_len) {
+ *cbuf++ = (u8)data;
+ data >>= 8;
+ qspi->bspi_rf_msg_len--;
+ }
+ }
+ }
+}
+
+static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
+ int bpp, int bpc, int flex_mode)
+{
+ bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
+ bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
+ bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
+ bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
+ bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
+}
+
+static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi, int width,
+ int addrlen, int hp)
+{
+ int bpc = 0, bpp = 0;
+ u8 command = SPINOR_OP_READ_FAST;
+ int flex_mode = 1, rv = 0;
+ bool spans_4byte = false;
+
+ dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
+ width, addrlen, hp);
+
+ if (addrlen == BSPI_ADDRLEN_4BYTES) {
+ bpp = BSPI_BPP_ADDR_SELECT_MASK;
+ spans_4byte = true;
+ }
+
+ bpp |= 8;
+
+ switch (width) {
+ case SPI_NBITS_SINGLE:
+ if (addrlen == BSPI_ADDRLEN_3BYTES)
+ /* default mode, does not need flex_cmd */
+ flex_mode = 0;
+ else
+ command = SPINOR_OP_READ4_FAST;
+ break;
+ case SPI_NBITS_DUAL:
+ bpc = 0x00000001;
+ if (hp) {
+ bpc |= 0x00010100; /* address and mode are 2-bit */
+ bpp = BSPI_BPP_MODE_SELECT_MASK;
+ command = OPCODE_DIOR;
+ if (spans_4byte)
+ command = OPCODE_DIOR_4B;
+ } else {
+ command = SPINOR_OP_READ_1_1_2;
+ if (spans_4byte)
+ command = SPINOR_OP_READ4_1_1_2;
+ }
+ break;
+ case SPI_NBITS_QUAD:
+ bpc = 0x00000002;
+ if (hp) {
+ bpc |= 0x00020200; /* address and mode are 4-bit */
+ bpp = 4; /* dummy cycles */
+ bpp |= BSPI_BPP_ADDR_SELECT_MASK;
+ command = OPCODE_QIOR;
+ if (spans_4byte)
+ command = OPCODE_QIOR_4B;
+ } else {
+ command = SPINOR_OP_READ_1_1_4;
+ if (spans_4byte)
+ command = SPINOR_OP_READ4_1_1_4;
+ }
+ break;
+ default:
+ rv = -EINVAL;
+ break;
+ }
+
+ if (rv == 0)
+ bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc,
+ flex_mode);
+
+ return rv;
+}
+
+static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi, int width,
+ int addrlen, int hp)
+{
+ u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
+
+ dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
+ width, addrlen, hp);
+
+ switch (width) {
+ case SPI_NBITS_SINGLE:
+ /* clear quad/dual mode */
+ data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
+ BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
+ break;
+
+ case SPI_NBITS_QUAD:
+ /* clear dual mode and set quad mode */
+ data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
+ data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
+ break;
+ case SPI_NBITS_DUAL:
+ /* clear quad mode set dual mode */
+ data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
+ data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (addrlen == BSPI_ADDRLEN_4BYTES)
+ /* set 4byte mode*/
+ data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
+ else
+ /* clear 4 byte mode */
+ data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
+
+ /* set the override mode */
+ data |= BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
+ bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
+ bcm_qspi_bspi_set_xfer_params(qspi, SPINOR_OP_READ_FAST, 0, 0, 0);
+
+ return 0;
+}
+
+static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
+ int width, int addrlen, int hp)
+{
+ int error = 0;
+
+ /* default mode */
+ qspi->xfer_mode.flex_mode = true;
+
+ if (!bcm_qspi_bspi_ver_three(qspi)) {
+ u32 val, mask;
+
+ val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
+ mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
+ if (val & mask || qspi->s3_strap_override_ctrl & mask) {
+ qspi->xfer_mode.flex_mode = false;
+ bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE,
+ 0);
+
+ if ((val | qspi->s3_strap_override_ctrl) &
+ BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL)
+ width = SPI_NBITS_DUAL;
+ else if ((val | qspi->s3_strap_override_ctrl) &
+ BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD)
+ width = SPI_NBITS_QUAD;
+
+ error = bcm_qspi_bspi_set_override(qspi, width, addrlen,
+ hp);
+ }
+ }
+
+ if (qspi->xfer_mode.flex_mode)
+ error = bcm_qspi_bspi_set_flex_mode(qspi, width, addrlen, hp);
+
+ if (error) {
+ dev_warn(&qspi->pdev->dev,
+ "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
+ width, addrlen, hp);
+ } else if (qspi->xfer_mode.width != width ||
+ qspi->xfer_mode.addrlen != addrlen ||
+ qspi->xfer_mode.hp != hp) {
+ qspi->xfer_mode.width = width;
+ qspi->xfer_mode.addrlen = addrlen;
+ qspi->xfer_mode.hp = hp;
+ dev_dbg(&qspi->pdev->dev,
+ "cs:%d %d-lane output, %d-byte address%s\n",
+ qspi->curr_cs,
+ qspi->xfer_mode.width,
+ qspi->xfer_mode.addrlen,
+ qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
+ }
+
+ return error;
+}
+
+static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
+{
+ if (!has_bspi(qspi) || (qspi->bspi_enabled))
+ return;
+
+ qspi->bspi_enabled = 1;
+ if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
+ return;
+
+ bcm_qspi_bspi_flush_prefetch_buffers(qspi);
+ udelay(1);
+ bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
+ udelay(1);
+}
+
+static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
+{
+ if (!has_bspi(qspi) || (!qspi->bspi_enabled))
+ return;
+
+ qspi->bspi_enabled = 0;
+ if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
+ return;
+
+ bcm_qspi_bspi_busy_poll(qspi);
+ bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
+ udelay(1);
+}
+
+static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
+{
+ u32 data = 0;
+
+ if (qspi->curr_cs == cs)
+ return;
+ if (qspi->base[CHIP_SELECT]) {
+ data = bcm_qspi_read(qspi, CHIP_SELECT, 0);
+ data = (data & ~0xff) | (1 << cs);
+ bcm_qspi_write(qspi, CHIP_SELECT, 0, data);
+ usleep_range(10, 20);
+ }
+ qspi->curr_cs = cs;
+}
+
+/* MSPI helpers */
+static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
+ const struct bcm_qspi_parms *xp)
+{
+ u32 spcr, spbr = 0;
+
+ if (xp->speed_hz)
+ spbr = qspi->base_clk / (2 * xp->speed_hz);
+
+ spcr = clamp_val(spbr, QSPI_SPBR_MIN, QSPI_SPBR_MAX);
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spcr);
+
+ spcr = MSPI_MASTER_BIT;
+ /* for 16 bit the data should be zero */
+ if (xp->bits_per_word != 16)
+ spcr |= xp->bits_per_word << 2;
+ spcr |= xp->mode & 3;
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
+
+ qspi->last_parms = *xp;
+}
+
+static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
+ struct spi_device *spi,
+ struct spi_transfer *trans)
+{
+ struct bcm_qspi_parms xp;
+
+ xp.speed_hz = trans->speed_hz;
+ xp.bits_per_word = trans->bits_per_word;
+ xp.mode = spi->mode;
+
+ bcm_qspi_hw_set_parms(qspi, &xp);
+}
+
+static int bcm_qspi_setup(struct spi_device *spi)
+{
+ struct bcm_qspi_parms *xp;
+
+ if (spi->bits_per_word > 16)
+ return -EINVAL;
+
+ xp = spi_get_ctldata(spi);
+ if (!xp) {
+ xp = kzalloc(sizeof(*xp), GFP_KERNEL);
+ if (!xp)
+ return -ENOMEM;
+ spi_set_ctldata(spi, xp);
+ }
+ xp->speed_hz = spi->max_speed_hz;
+ xp->mode = spi->mode;
+
+ if (spi->bits_per_word)
+ xp->bits_per_word = spi->bits_per_word;
+ else
+ xp->bits_per_word = 8;
+
+ return 0;
+}
+
+static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
+ struct qspi_trans *qt, int flags)
+{
+ int ret = TRANS_STATUS_BREAK_NONE;
+
+ /* count the last transferred bytes */
+ if (qt->trans->bits_per_word <= 8)
+ qt->byte++;
+ else
+ qt->byte += 2;
+
+ if (qt->byte >= qt->trans->len) {
+ /* we're at the end of the spi_transfer */
+
+ /* in TX mode, need to pause for a delay or CS change */
+ if (qt->trans->delay_usecs &&
+ (flags & TRANS_STATUS_BREAK_DELAY))
+ ret |= TRANS_STATUS_BREAK_DELAY;
+ if (qt->trans->cs_change &&
+ (flags & TRANS_STATUS_BREAK_CS_CHANGE))
+ ret |= TRANS_STATUS_BREAK_CS_CHANGE;
+ if (ret)
+ goto done;
+
+ dev_dbg(&qspi->pdev->dev, "advance msg exit\n");
+ if (spi_transfer_is_last(qspi->master, qt->trans))
+ ret = TRANS_STATUS_BREAK_EOM;
+ else
+ ret = TRANS_STATUS_BREAK_NO_BYTES;
+
+ qt->trans = NULL;
+ }
+
+done:
+ dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
+ qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
+ return ret;
+}
+
+static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
+{
+ u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
+
+ /* mask out reserved bits */
+ return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
+}
+
+static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
+{
+ u32 reg_offset = MSPI_RXRAM;
+ u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
+ u32 msb_offset = reg_offset + (slot << 3);
+
+ return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
+ ((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
+}
+
+static void read_from_hw(struct bcm_qspi *qspi, int slots)
+{
+ struct qspi_trans tp;
+ int slot;
+
+ bcm_qspi_disable_bspi(qspi);
+
+ if (slots > MSPI_NUM_CDRAM) {
+ /* should never happen */
+ dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
+ return;
+ }
+
+ tp = qspi->trans_pos;
+
+ for (slot = 0; slot < slots; slot++) {
+ if (tp.trans->bits_per_word <= 8) {
+ u8 *buf = tp.trans->rx_buf;
+
+ if (buf)
+ buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
+ dev_dbg(&qspi->pdev->dev, "RD %02x\n",
+ buf ? buf[tp.byte] : 0xff);
+ } else {
+ u16 *buf = tp.trans->rx_buf;
+
+ if (buf)
+ buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
+ slot);
+ dev_dbg(&qspi->pdev->dev, "RD %04x\n",
+ buf ? buf[tp.byte] : 0xffff);
+ }
+
+ update_qspi_trans_byte_count(qspi, &tp,
+ TRANS_STATUS_BREAK_NONE);
+ }
+
+ qspi->trans_pos = tp;
+}
+
+static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
+ u8 val)
+{
+ u32 reg_offset = MSPI_TXRAM + (slot << 3);
+
+ /* mask out reserved bits */
+ bcm_qspi_write(qspi, MSPI, reg_offset, val);
+}
+
+static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
+ u16 val)
+{
+ u32 reg_offset = MSPI_TXRAM;
+ u32 msb_offset = reg_offset + (slot << 3);
+ u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
+
+ bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
+ bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
+}
+
+static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
+{
+ return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
+}
+
+static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
+{
+ bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
+}
+
+/* Return number of slots written */
+static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
+{
+ struct qspi_trans tp;
+ int slot = 0, tstatus = 0;
+ u32 mspi_cdram = 0;
+
+ bcm_qspi_disable_bspi(qspi);
+ tp = qspi->trans_pos;
+ bcm_qspi_update_parms(qspi, spi, tp.trans);
+
+ /* Run until end of transfer or reached the max data */
+ while (!tstatus && slot < MSPI_NUM_CDRAM) {
+ if (tp.trans->bits_per_word <= 8) {
+ const u8 *buf = tp.trans->tx_buf;
+ u8 val = buf ? buf[tp.byte] : 0xff;
+
+ write_txram_slot_u8(qspi, slot, val);
+ dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
+ } else {
+ const u16 *buf = tp.trans->tx_buf;
+ u16 val = buf ? buf[tp.byte / 2] : 0xffff;
+
+ write_txram_slot_u16(qspi, slot, val);
+ dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
+ }
+ mspi_cdram = MSPI_CDRAM_CONT_BIT;
+ mspi_cdram |= (~(1 << spi->chip_select) &
+ MSPI_CDRAM_PCS);
+ mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
+ MSPI_CDRAM_BITSE_BIT);
+
+ write_cdram_slot(qspi, slot, mspi_cdram);
+
+ tstatus = update_qspi_trans_byte_count(qspi, &tp,
+ TRANS_STATUS_BREAK_TX);
+ slot++;
+ }
+
+ if (!slot) {
+ dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
+ goto done;
+ }
+
+ dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
+ bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
+ bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
+
+ if (tstatus & TRANS_STATUS_BREAK_DESELECT) {
+ mspi_cdram = read_cdram_slot(qspi, slot - 1) &
+ ~MSPI_CDRAM_CONT_BIT;
+ write_cdram_slot(qspi, slot - 1, mspi_cdram);
+ }
+
+ if (has_bspi(qspi))
+ bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
+
+ /* Must flush previous writes before starting MSPI operation */
+ mb();
+ /* Set cont | spe | spifie */
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
+
+done:
+ return slot;
+}
+
+static int bcm_qspi_bspi_flash_read(struct spi_device *spi,
+ struct spi_flash_read_message *msg)
+{
+ struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
+ u32 addr = 0, len, len_words;
+ int ret = 0;
+ unsigned long timeo = msecs_to_jiffies(100);
+ struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
+
+ if (bcm_qspi_bspi_ver_three(qspi))
+ if (msg->addr_width == BSPI_ADDRLEN_4BYTES)
+ return -EIO;
+
+ bcm_qspi_chip_select(qspi, spi->chip_select);
+ bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
+
+ /*
+ * when using flex mode mode we need to send
+ * the upper address byte to bspi
+ */
+ if (bcm_qspi_bspi_ver_three(qspi) == false) {
+ addr = msg->from & 0xff000000;
+ bcm_qspi_write(qspi, BSPI,
+ BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
+ }
+
+ if (!qspi->xfer_mode.flex_mode)
+ addr = msg->from;
+ else
+ addr = msg->from & 0x00ffffff;
+
+ /* set BSPI RAF buffer max read length */
+ len = msg->len;
+ if (len > BSPI_READ_LENGTH)
+ len = BSPI_READ_LENGTH;
+
+ if (bcm_qspi_bspi_ver_three(qspi) == true)
+ addr = (addr + 0xc00000) & 0xffffff;
+
+ reinit_completion(&qspi->bspi_done);
+ bcm_qspi_enable_bspi(qspi);
+ len_words = (len + 3) >> 2;
+ qspi->bspi_rf_msg = msg;
+ qspi->bspi_rf_msg_status = 0;
+ qspi->bspi_rf_msg_idx = 0;
+ qspi->bspi_rf_msg_len = len;
+ dev_dbg(&qspi->pdev->dev, "bspi xfr addr 0x%x len 0x%x", addr, len);
+
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
+ bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
+
+ if (qspi->soc_intc) {
+ /*
+ * clear soc MSPI and BSPI interrupts and enable
+ * BSPI interrupts.
+ */
+ soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
+ soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
+ }
+
+ /* Must flush previous writes before starting BSPI operation */
+ mb();
+
+ bcm_qspi_bspi_lr_start(qspi);
+ if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
+ dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
+ ret = -ETIMEDOUT;
+ } else {
+ /* set the return length for the caller */
+ msg->retlen = len;
+ }
+
+ return ret;
+}
+
+static int bcm_qspi_flash_read(struct spi_device *spi,
+ struct spi_flash_read_message *msg)
+{
+ struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
+ int ret = 0;
+ bool mspi_read = false;
+ u32 io_width, addrlen, addr, len;
+ u_char *buf;
+
+ buf = msg->buf;
+ addr = msg->from;
+ len = msg->len;
+
+ if (bcm_qspi_bspi_ver_three(qspi) == true) {
+ /*
+ * The address coming into this function is a raw flash offset.
+ * But for BSPI <= V3, we need to convert it to a remapped BSPI
+ * address. If it crosses a 4MB boundary, just revert back to
+ * using MSPI.
+ */
+ addr = (addr + 0xc00000) & 0xffffff;
+
+ if ((~ADDR_4MB_MASK & addr) ^
+ (~ADDR_4MB_MASK & (addr + len - 1)))
+ mspi_read = true;
+ }
+
+ /* non-aligned and very short transfers are handled by MSPI */
+ if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
+ len < 4)
+ mspi_read = true;
+
+ if (mspi_read)
+ /* this will make the m25p80 read to fallback to mspi read */
+ return -EAGAIN;
+
+ io_width = msg->data_nbits ? msg->data_nbits : SPI_NBITS_SINGLE;
+ addrlen = msg->addr_width;
+ ret = bcm_qspi_bspi_set_mode(qspi, io_width, addrlen, -1);
+
+ if (!ret)
+ ret = bcm_qspi_bspi_flash_read(spi, msg);
+
+ return ret;
+}
+
+static int bcm_qspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *trans)
+{
+ struct bcm_qspi *qspi = spi_master_get_devdata(master);
+ int slots;
+ unsigned long timeo = msecs_to_jiffies(100);
+
+ bcm_qspi_chip_select(qspi, spi->chip_select);
+ qspi->trans_pos.trans = trans;
+ qspi->trans_pos.byte = 0;
+
+ while (qspi->trans_pos.byte < trans->len) {
+ reinit_completion(&qspi->mspi_done);
+
+ slots = write_to_hw(qspi, spi);
+ if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
+ dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
+ return -ETIMEDOUT;
+ }
+
+ read_from_hw(qspi, slots);
+ }
+
+ return 0;
+}
+
+static void bcm_qspi_cleanup(struct spi_device *spi)
+{
+ struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
+
+ kfree(xp);
+}
+
+static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
+{
+ struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
+ struct bcm_qspi *qspi = qspi_dev_id->dev;
+ u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
+
+ if (status & MSPI_MSPI_STATUS_SPIF) {
+ struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
+ /* clear interrupt */
+ status &= ~MSPI_MSPI_STATUS_SPIF;
+ bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
+ if (qspi->soc_intc)
+ soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
+ complete(&qspi->mspi_done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
+{
+ struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
+ struct bcm_qspi *qspi = qspi_dev_id->dev;
+ struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
+ u32 status = qspi_dev_id->irqp->mask;
+
+ if (qspi->bspi_enabled && qspi->bspi_rf_msg) {
+ bcm_qspi_bspi_lr_data_read(qspi);
+ if (qspi->bspi_rf_msg_len == 0) {
+ qspi->bspi_rf_msg = NULL;
+ if (qspi->soc_intc) {
+ /* disable soc BSPI interrupt */
+ soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
+ false);
+ /* indicate done */
+ status = INTR_BSPI_LR_SESSION_DONE_MASK;
+ }
+
+ if (qspi->bspi_rf_msg_status)
+ bcm_qspi_bspi_lr_clear(qspi);
+ else
+ bcm_qspi_bspi_flush_prefetch_buffers(qspi);
+ }
+
+ if (qspi->soc_intc)
+ /* clear soc BSPI interrupt */
+ soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
+ }
+
+ status &= INTR_BSPI_LR_SESSION_DONE_MASK;
+ if (qspi->bspi_enabled && status && qspi->bspi_rf_msg_len == 0)
+ complete(&qspi->bspi_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
+{
+ struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
+ struct bcm_qspi *qspi = qspi_dev_id->dev;
+ struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
+
+ dev_err(&qspi->pdev->dev, "BSPI INT error\n");
+ qspi->bspi_rf_msg_status = -EIO;
+ if (qspi->soc_intc)
+ /* clear soc interrupt */
+ soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
+
+ complete(&qspi->bspi_done);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
+{
+ struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
+ struct bcm_qspi *qspi = qspi_dev_id->dev;
+ struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (soc_intc) {
+ u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
+
+ if (status & MSPI_DONE)
+ ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
+ else if (status & BSPI_DONE)
+ ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
+ else if (status & BSPI_ERR)
+ ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
+ }
+
+ return ret;
+}
+
+static const struct bcm_qspi_irq qspi_irq_tab[] = {
+ {
+ .irq_name = "spi_lr_fullness_reached",
+ .irq_handler = bcm_qspi_bspi_lr_l2_isr,
+ .mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
+ },
+ {
+ .irq_name = "spi_lr_session_aborted",
+ .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
+ .mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
+ },
+ {
+ .irq_name = "spi_lr_impatient",
+ .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
+ .mask = INTR_BSPI_LR_IMPATIENT_MASK,
+ },
+ {
+ .irq_name = "spi_lr_session_done",
+ .irq_handler = bcm_qspi_bspi_lr_l2_isr,
+ .mask = INTR_BSPI_LR_SESSION_DONE_MASK,
+ },
+#ifdef QSPI_INT_DEBUG
+ /* this interrupt is for debug purposes only, dont request irq */
+ {
+ .irq_name = "spi_lr_overread",
+ .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
+ .mask = INTR_BSPI_LR_OVERREAD_MASK,
+ },
+#endif
+ {
+ .irq_name = "mspi_done",
+ .irq_handler = bcm_qspi_mspi_l2_isr,
+ .mask = INTR_MSPI_DONE_MASK,
+ },
+ {
+ .irq_name = "mspi_halted",
+ .irq_handler = bcm_qspi_mspi_l2_isr,
+ .mask = INTR_MSPI_HALTED_MASK,
+ },
+ {
+ /* single muxed L1 interrupt source */
+ .irq_name = "spi_l1_intr",
+ .irq_handler = bcm_qspi_l1_isr,
+ .irq_source = MUXED_L1,
+ .mask = QSPI_INTERRUPTS_ALL,
+ },
+};
+
+static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
+{
+ u32 val = 0;
+
+ val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
+ qspi->bspi_maj_rev = (val >> 8) & 0xff;
+ qspi->bspi_min_rev = val & 0xff;
+ if (!(bcm_qspi_bspi_ver_three(qspi))) {
+ /* Force mapping of BSPI address -> flash offset */
+ bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
+ bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
+ }
+ qspi->bspi_enabled = 1;
+ bcm_qspi_disable_bspi(qspi);
+ bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
+ bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
+}
+
+static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
+{
+ struct bcm_qspi_parms parms;
+
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
+ bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
+ bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
+
+ parms.mode = SPI_MODE_3;
+ parms.bits_per_word = 8;
+ parms.speed_hz = qspi->max_speed_hz;
+ bcm_qspi_hw_set_parms(qspi, &parms);
+
+ if (has_bspi(qspi))
+ bcm_qspi_bspi_init(qspi);
+}
+
+static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
+{
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
+ if (has_bspi(qspi))
+ bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
+
+}
+
+static const struct of_device_id bcm_qspi_of_match[] = {
+ { .compatible = "brcm,spi-bcm-qspi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
+
+int bcm_qspi_probe(struct platform_device *pdev,
+ struct bcm_qspi_soc_intc *soc_intc)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm_qspi *qspi;
+ struct spi_master *master;
+ struct resource *res;
+ int irq, ret = 0, num_ints = 0;
+ u32 val;
+ const char *name = NULL;
+ int num_irqs = ARRAY_SIZE(qspi_irq_tab);
+
+ /* We only support device-tree instantiation */
+ if (!dev->of_node)
+ return -ENODEV;
+
+ if (!of_match_node(bcm_qspi_of_match, dev->of_node))
+ return -ENODEV;
+
+ master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
+ if (!master) {
+ dev_err(dev, "error allocating spi_master\n");
+ return -ENOMEM;
+ }
+
+ qspi = spi_master_get_devdata(master);
+ qspi->pdev = pdev;
+ qspi->trans_pos.trans = NULL;
+ qspi->trans_pos.byte = 0;
+ qspi->master = master;
+
+ master->bus_num = -1;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD;
+ master->setup = bcm_qspi_setup;
+ master->transfer_one = bcm_qspi_transfer_one;
+ master->spi_flash_read = bcm_qspi_flash_read;
+ master->cleanup = bcm_qspi_cleanup;
+ master->dev.of_node = dev->of_node;
+ master->num_chipselect = NUM_CHIPSELECT;
+
+ qspi->big_endian = of_device_is_big_endian(dev->of_node);
+
+ if (!of_property_read_u32(dev->of_node, "num-cs", &val))
+ master->num_chipselect = val;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
+ if (!res)
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mspi");
+
+ if (res) {
+ qspi->base[MSPI] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->base[MSPI])) {
+ ret = PTR_ERR(qspi->base[MSPI]);
+ goto qspi_probe_err;
+ }
+ } else {
+ goto qspi_probe_err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
+ if (res) {
+ qspi->base[BSPI] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->base[BSPI])) {
+ ret = PTR_ERR(qspi->base[BSPI]);
+ goto qspi_probe_err;
+ }
+ qspi->bspi_mode = true;
+ } else {
+ qspi->bspi_mode = false;
+ }
+
+ dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
+ if (res) {
+ qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->base[CHIP_SELECT])) {
+ ret = PTR_ERR(qspi->base[CHIP_SELECT]);
+ goto qspi_probe_err;
+ }
+ }
+
+ qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
+ GFP_KERNEL);
+ if (!qspi->dev_ids) {
+ ret = -ENOMEM;
+ goto qspi_probe_err;
+ }
+
+ for (val = 0; val < num_irqs; val++) {
+ irq = -1;
+ name = qspi_irq_tab[val].irq_name;
+ if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
+ /* get the l2 interrupts */
+ irq = platform_get_irq_byname(pdev, name);
+ } else if (!num_ints && soc_intc) {
+ /* all mspi, bspi intrs muxed to one L1 intr */
+ irq = platform_get_irq(pdev, 0);
+ }
+
+ if (irq >= 0) {
+ ret = devm_request_irq(&pdev->dev, irq,
+ qspi_irq_tab[val].irq_handler, 0,
+ name,
+ &qspi->dev_ids[val]);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "IRQ %s not found\n", name);
+ goto qspi_probe_err;
+ }
+
+ qspi->dev_ids[val].dev = qspi;
+ qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
+ num_ints++;
+ dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
+ qspi_irq_tab[val].irq_name,
+ irq);
+ }
+ }
+
+ if (!num_ints) {
+ dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
+ ret = -EINVAL;
+ goto qspi_probe_err;
+ }
+
+ /*
+ * Some SoCs integrate spi controller (e.g., its interrupt bits)
+ * in specific ways
+ */
+ if (soc_intc) {
+ qspi->soc_intc = soc_intc;
+ soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
+ } else {
+ qspi->soc_intc = NULL;
+ }
+
+ qspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(qspi->clk)) {
+ dev_warn(dev, "unable to get clock\n");
+ ret = PTR_ERR(qspi->clk);
+ goto qspi_probe_err;
+ }
+
+ ret = clk_prepare_enable(qspi->clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+ goto qspi_probe_err;
+ }
+
+ qspi->base_clk = clk_get_rate(qspi->clk);
+ qspi->max_speed_hz = qspi->base_clk / (QSPI_SPBR_MIN * 2);
+
+ bcm_qspi_hw_init(qspi);
+ init_completion(&qspi->mspi_done);
+ init_completion(&qspi->bspi_done);
+ qspi->curr_cs = -1;
+
+ platform_set_drvdata(pdev, qspi);
+
+ qspi->xfer_mode.width = -1;
+ qspi->xfer_mode.addrlen = -1;
+ qspi->xfer_mode.hp = -1;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(dev, "can't register master\n");
+ goto qspi_reg_err;
+ }
+
+ return 0;
+
+qspi_reg_err:
+ bcm_qspi_hw_uninit(qspi);
+ clk_disable_unprepare(qspi->clk);
+qspi_probe_err:
+ spi_master_put(master);
+ kfree(qspi->dev_ids);
+ return ret;
+}
+/* probe function to be called by SoC specific platform driver probe */
+EXPORT_SYMBOL_GPL(bcm_qspi_probe);
+
+int bcm_qspi_remove(struct platform_device *pdev)
+{
+ struct bcm_qspi *qspi = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ bcm_qspi_hw_uninit(qspi);
+ clk_disable_unprepare(qspi->clk);
+ kfree(qspi->dev_ids);
+ spi_unregister_master(qspi->master);
+
+ return 0;
+}
+/* function to be called by SoC specific platform driver remove() */
+EXPORT_SYMBOL_GPL(bcm_qspi_remove);
+
+static int __maybe_unused bcm_qspi_suspend(struct device *dev)
+{
+ struct bcm_qspi *qspi = dev_get_drvdata(dev);
+
+ spi_master_suspend(qspi->master);
+ clk_disable(qspi->clk);
+ bcm_qspi_hw_uninit(qspi);
+
+ return 0;
+};
+
+static int __maybe_unused bcm_qspi_resume(struct device *dev)
+{
+ struct bcm_qspi *qspi = dev_get_drvdata(dev);
+ int ret = 0;
+
+ bcm_qspi_hw_init(qspi);
+ bcm_qspi_chip_select(qspi, qspi->curr_cs);
+ if (qspi->soc_intc)
+ /* enable MSPI interrupt */
+ qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
+ true);
+
+ ret = clk_enable(qspi->clk);
+ if (!ret)
+ spi_master_resume(qspi->master);
+
+ return ret;
+}
+
+SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
+
+/* pm_ops to be called by SoC specific platform driver */
+EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
+
+MODULE_AUTHOR("Kamal Dasu");
+MODULE_DESCRIPTION("Broadcom QSPI driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-bcm-qspi.h b/drivers/spi/spi-bcm-qspi.h
new file mode 100644
index 000000000000..7abfc75a3860
--- /dev/null
+++ b/drivers/spi/spi-bcm-qspi.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#ifndef __SPI_BCM_QSPI_H__
+#define __SPI_BCM_QSPI_H__
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+/* BSPI interrupt masks */
+#define INTR_BSPI_LR_OVERREAD_MASK BIT(4)
+#define INTR_BSPI_LR_SESSION_DONE_MASK BIT(3)
+#define INTR_BSPI_LR_IMPATIENT_MASK BIT(2)
+#define INTR_BSPI_LR_SESSION_ABORTED_MASK BIT(1)
+#define INTR_BSPI_LR_FULLNESS_REACHED_MASK BIT(0)
+
+#define BSPI_LR_INTERRUPTS_DATA \
+ (INTR_BSPI_LR_SESSION_DONE_MASK | \
+ INTR_BSPI_LR_FULLNESS_REACHED_MASK)
+
+#define BSPI_LR_INTERRUPTS_ERROR \
+ (INTR_BSPI_LR_OVERREAD_MASK | \
+ INTR_BSPI_LR_IMPATIENT_MASK | \
+ INTR_BSPI_LR_SESSION_ABORTED_MASK)
+
+#define BSPI_LR_INTERRUPTS_ALL \
+ (BSPI_LR_INTERRUPTS_ERROR | \
+ BSPI_LR_INTERRUPTS_DATA)
+
+/* MSPI Interrupt masks */
+#define INTR_MSPI_HALTED_MASK BIT(6)
+#define INTR_MSPI_DONE_MASK BIT(5)
+
+#define MSPI_INTERRUPTS_ALL \
+ (INTR_MSPI_DONE_MASK | \
+ INTR_MSPI_HALTED_MASK)
+
+#define QSPI_INTERRUPTS_ALL \
+ (MSPI_INTERRUPTS_ALL | \
+ BSPI_LR_INTERRUPTS_ALL)
+
+struct platform_device;
+struct dev_pm_ops;
+
+enum {
+ MSPI_DONE = 0x1,
+ BSPI_DONE = 0x2,
+ BSPI_ERR = 0x4,
+ MSPI_BSPI_DONE = 0x7
+};
+
+struct bcm_qspi_soc_intc {
+ void (*bcm_qspi_int_ack)(struct bcm_qspi_soc_intc *soc_intc, int type);
+ void (*bcm_qspi_int_set)(struct bcm_qspi_soc_intc *soc_intc, int type,
+ bool en);
+ u32 (*bcm_qspi_get_int_status)(struct bcm_qspi_soc_intc *soc_intc);
+};
+
+/* Read controller register*/
+static inline u32 bcm_qspi_readl(bool be, void __iomem *addr)
+{
+ if (be)
+ return ioread32be(addr);
+ else
+ return readl_relaxed(addr);
+}
+
+/* Write controller register*/
+static inline void bcm_qspi_writel(bool be,
+ unsigned int data, void __iomem *addr)
+{
+ if (be)
+ iowrite32be(data, addr);
+ else
+ writel_relaxed(data, addr);
+}
+
+static inline u32 get_qspi_mask(int type)
+{
+ switch (type) {
+ case MSPI_DONE:
+ return INTR_MSPI_DONE_MASK;
+ case BSPI_DONE:
+ return BSPI_LR_INTERRUPTS_ALL;
+ case MSPI_BSPI_DONE:
+ return QSPI_INTERRUPTS_ALL;
+ case BSPI_ERR:
+ return BSPI_LR_INTERRUPTS_ERROR;
+ }
+
+ return 0;
+}
+
+/* The common driver functions to be called by the SoC platform driver */
+int bcm_qspi_probe(struct platform_device *pdev,
+ struct bcm_qspi_soc_intc *soc_intc);
+int bcm_qspi_remove(struct platform_device *pdev);
+
+/* pm_ops used by the SoC platform driver called on PM suspend/resume */
+extern const struct dev_pm_ops bcm_qspi_pm_ops;
+
+#endif /* __SPI_BCM_QSPI_H__ */
diff --git a/drivers/spi/spi-brcmstb-qspi.c b/drivers/spi/spi-brcmstb-qspi.c
new file mode 100644
index 000000000000..c7df92e7cf6f
--- /dev/null
+++ b/drivers/spi/spi-brcmstb-qspi.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include "spi-bcm-qspi.h"
+
+static const struct of_device_id brcmstb_qspi_of_match[] = {
+ { .compatible = "brcm,spi-brcmstb-qspi" },
+ { .compatible = "brcm,spi-brcmstb-mspi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcmstb_qspi_of_match);
+
+static int brcmstb_qspi_probe(struct platform_device *pdev)
+{
+ return bcm_qspi_probe(pdev, NULL);
+}
+
+static int brcmstb_qspi_remove(struct platform_device *pdev)
+{
+ return bcm_qspi_remove(pdev);
+}
+
+static struct platform_driver brcmstb_qspi_driver = {
+ .probe = brcmstb_qspi_probe,
+ .remove = brcmstb_qspi_remove,
+ .driver = {
+ .name = "brcmstb_qspi",
+ .pm = &bcm_qspi_pm_ops,
+ .of_match_table = brcmstb_qspi_of_match,
+ }
+};
+module_platform_driver(brcmstb_qspi_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kamal Dasu");
+MODULE_DESCRIPTION("Broadcom SPI driver for settop SoC");
diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
new file mode 100644
index 000000000000..877937706240
--- /dev/null
+++ b/drivers/spi/spi-cavium-thunderx.c
@@ -0,0 +1,120 @@
+/*
+ * Cavium ThunderX SPI driver.
+ *
+ * Copyright (C) 2016 Cavium Inc.
+ * Authors: Jan Glauber <jglauber@cavium.com>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spi/spi.h>
+
+#include "spi-cavium.h"
+
+#define DRV_NAME "spi-thunderx"
+
+#define SYS_FREQ_DEFAULT 700000000 /* 700 Mhz */
+
+static int thunderx_spi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct octeon_spi *p;
+ int ret;
+
+ master = spi_alloc_master(dev, sizeof(struct octeon_spi));
+ if (!master)
+ return -ENOMEM;
+
+ p = spi_master_get_devdata(master);
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ goto error;
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret)
+ goto error;
+
+ p->register_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!p->register_base) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ p->regs.config = 0x1000;
+ p->regs.status = 0x1008;
+ p->regs.tx = 0x1010;
+ p->regs.data = 0x1080;
+
+ p->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(p->clk)) {
+ ret = PTR_ERR(p->clk);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(p->clk);
+ if (ret)
+ goto error;
+
+ p->sys_freq = clk_get_rate(p->clk);
+ if (!p->sys_freq)
+ p->sys_freq = SYS_FREQ_DEFAULT;
+ dev_info(dev, "Set system clock to %u\n", p->sys_freq);
+
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH |
+ SPI_LSB_FIRST | SPI_3WIRE;
+ master->transfer_one_message = octeon_spi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+ master->dev.of_node = pdev->dev.of_node;
+
+ pci_set_drvdata(pdev, master);
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ clk_disable_unprepare(p->clk);
+ spi_master_put(master);
+ return ret;
+}
+
+static void thunderx_spi_remove(struct pci_dev *pdev)
+{
+ struct spi_master *master = pci_get_drvdata(pdev);
+ struct octeon_spi *p;
+
+ p = spi_master_get_devdata(master);
+ if (!p)
+ return;
+
+ clk_disable_unprepare(p->clk);
+ /* Put everything in a known state. */
+ writeq(0, p->register_base + OCTEON_SPI_CFG(p));
+}
+
+static const struct pci_device_id thunderx_spi_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa00b) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, thunderx_spi_pci_id_table);
+
+static struct pci_driver thunderx_spi_driver = {
+ .name = DRV_NAME,
+ .id_table = thunderx_spi_pci_id_table,
+ .probe = thunderx_spi_probe,
+ .remove = thunderx_spi_remove,
+};
+
+module_pci_driver(thunderx_spi_driver);
+
+MODULE_DESCRIPTION("Cavium, Inc. ThunderX SPI bus driver");
+MODULE_AUTHOR("Jan Glauber");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-cavium.h b/drivers/spi/spi-cavium.h
index 88c5f36e7ea7..1f91d61b745b 100644
--- a/drivers/spi/spi-cavium.h
+++ b/drivers/spi/spi-cavium.h
@@ -1,6 +1,8 @@
#ifndef __SPI_CAVIUM_H
#define __SPI_CAVIUM_H
+#include <linux/clk.h>
+
#define OCTEON_SPI_MAX_BYTES 9
#define OCTEON_SPI_MAX_CLOCK_HZ 16000000
@@ -17,6 +19,7 @@ struct octeon_spi {
u64 cs_enax;
int sys_freq;
struct octeon_spi_regs regs;
+ struct clk *clk;
};
#define OCTEON_SPI_CFG(x) (x->regs.config)
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index c09bb745693a..27960e46135d 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -283,7 +283,6 @@ static int dw_spi_transfer_one(struct spi_master *master,
struct chip_data *chip = spi_get_ctldata(spi);
u8 imask = 0;
u16 txlevel = 0;
- u16 clk_div;
u32 cr0;
int ret;
@@ -298,13 +297,13 @@ static int dw_spi_transfer_one(struct spi_master *master,
spi_enable_chip(dws, 0);
/* Handle per transfer options for bpw and speed */
- if (transfer->speed_hz != chip->speed_hz) {
- /* clk_div doesn't support odd number */
- clk_div = (dws->max_freq / transfer->speed_hz + 1) & 0xfffe;
-
- chip->speed_hz = transfer->speed_hz;
- chip->clk_div = clk_div;
-
+ if (transfer->speed_hz != dws->current_freq) {
+ if (transfer->speed_hz != chip->speed_hz) {
+ /* clk_div doesn't support odd number */
+ chip->clk_div = (DIV_ROUND_UP(dws->max_freq, transfer->speed_hz) + 1) & 0xfffe;
+ chip->speed_hz = transfer->speed_hz;
+ }
+ dws->current_freq = transfer->speed_hz;
spi_set_clk(dws, chip->clk_div);
}
if (transfer->bits_per_word == 8) {
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 61bc3cbab38d..c21ca02f8ec5 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -123,6 +123,7 @@ struct dw_spi {
u8 n_bytes; /* current is a 1/2 bytes op */
u32 dma_width;
irqreturn_t (*transfer_handler)(struct dw_spi *dws);
+ u32 current_freq; /* frequency in hz */
/* DMA info */
int dma_inited;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 9e9dadb52b3d..35c0dd945668 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -159,7 +159,7 @@ struct fsl_dspi {
u8 cs;
u16 void_write_data;
u32 cs_change;
- struct fsl_dspi_devtype_data *devtype_data;
+ const struct fsl_dspi_devtype_data *devtype_data;
wait_queue_head_t waitq;
u32 waitflags;
@@ -624,10 +624,13 @@ static int dspi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ int ret;
pinctrl_pm_select_default_state(dev);
- clk_prepare_enable(dspi->clk);
+ ret = clk_prepare_enable(dspi->clk);
+ if (ret)
+ return ret;
spi_master_resume(master);
return 0;
@@ -651,8 +654,6 @@ static int dspi_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
int ret = 0, cs_num, bus_num;
- const struct of_device_id *of_id =
- of_match_device(fsl_dspi_dt_ids, &pdev->dev);
master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
if (!master)
@@ -686,7 +687,7 @@ static int dspi_probe(struct platform_device *pdev)
}
master->bus_num = bus_num;
- dspi->devtype_data = (struct fsl_dspi_devtype_data *)of_id->data;
+ dspi->devtype_data = of_device_get_match_data(&pdev->dev);
if (!dspi->devtype_data) {
dev_err(&pdev->dev, "can't get devtype_data\n");
ret = -EFAULT;
@@ -728,7 +729,9 @@ static int dspi_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to get clock\n");
goto out_master_put;
}
- clk_prepare_enable(dspi->clk);
+ ret = clk_prepare_enable(dspi->clk);
+ if (ret)
+ goto out_master_put;
master->max_speed_hz =
clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
@@ -760,7 +763,6 @@ static int dspi_remove(struct platform_device *pdev)
/* Disconnect from the SPI framework */
clk_disable_unprepare(dspi->clk);
spi_unregister_master(dspi->master);
- spi_master_put(dspi->master);
return 0;
}
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 8d85a3c343da..7451585a080e 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -12,7 +12,6 @@
#include <linux/err.h>
#include <linux/fsl_devices.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/of.h>
@@ -27,40 +26,29 @@
#include "spi-fsl-lib.h"
/* eSPI Controller registers */
-struct fsl_espi_reg {
- __be32 mode; /* 0x000 - eSPI mode register */
- __be32 event; /* 0x004 - eSPI event register */
- __be32 mask; /* 0x008 - eSPI mask register */
- __be32 command; /* 0x00c - eSPI command register */
- __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/
- __be32 receive; /* 0x014 - eSPI receive FIFO access register*/
- u8 res[8]; /* 0x018 - 0x01c reserved */
- __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */
-};
+#define ESPI_SPMODE 0x00 /* eSPI mode register */
+#define ESPI_SPIE 0x04 /* eSPI event register */
+#define ESPI_SPIM 0x08 /* eSPI mask register */
+#define ESPI_SPCOM 0x0c /* eSPI command register */
+#define ESPI_SPITF 0x10 /* eSPI transmit FIFO access register*/
+#define ESPI_SPIRF 0x14 /* eSPI receive FIFO access register*/
+#define ESPI_SPMODE0 0x20 /* eSPI cs0 mode register */
-struct fsl_espi_transfer {
- const void *tx_buf;
- void *rx_buf;
- unsigned len;
- unsigned n_tx;
- unsigned n_rx;
- unsigned actual_length;
- int status;
-};
+#define ESPI_SPMODEx(x) (ESPI_SPMODE0 + (x) * 4)
/* eSPI Controller mode register definitions */
-#define SPMODE_ENABLE (1 << 31)
-#define SPMODE_LOOP (1 << 30)
+#define SPMODE_ENABLE BIT(31)
+#define SPMODE_LOOP BIT(30)
#define SPMODE_TXTHR(x) ((x) << 8)
#define SPMODE_RXTHR(x) ((x) << 0)
/* eSPI Controller CS mode register definitions */
-#define CSMODE_CI_INACTIVEHIGH (1 << 31)
-#define CSMODE_CP_BEGIN_EDGECLK (1 << 30)
-#define CSMODE_REV (1 << 29)
-#define CSMODE_DIV16 (1 << 28)
+#define CSMODE_CI_INACTIVEHIGH BIT(31)
+#define CSMODE_CP_BEGIN_EDGECLK BIT(30)
+#define CSMODE_REV BIT(29)
+#define CSMODE_DIV16 BIT(28)
#define CSMODE_PM(x) ((x) << 24)
-#define CSMODE_POL_1 (1 << 20)
+#define CSMODE_POL_1 BIT(20)
#define CSMODE_LEN(x) ((x) << 16)
#define CSMODE_BEF(x) ((x) << 12)
#define CSMODE_AFT(x) ((x) << 8)
@@ -72,29 +60,114 @@ struct fsl_espi_transfer {
| CSMODE_AFT(0) | CSMODE_CG(1))
/* SPIE register values */
-#define SPIE_NE 0x00000200 /* Not empty */
-#define SPIE_NF 0x00000100 /* Not full */
-
-/* SPIM register values */
-#define SPIM_NE 0x00000200 /* Not empty */
-#define SPIM_NF 0x00000100 /* Not full */
#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F)
#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F)
+#define SPIE_TXE BIT(15) /* TX FIFO empty */
+#define SPIE_DON BIT(14) /* TX done */
+#define SPIE_RXT BIT(13) /* RX FIFO threshold */
+#define SPIE_RXF BIT(12) /* RX FIFO full */
+#define SPIE_TXT BIT(11) /* TX FIFO threshold*/
+#define SPIE_RNE BIT(9) /* RX FIFO not empty */
+#define SPIE_TNF BIT(8) /* TX FIFO not full */
+
+/* SPIM register values */
+#define SPIM_TXE BIT(15) /* TX FIFO empty */
+#define SPIM_DON BIT(14) /* TX done */
+#define SPIM_RXT BIT(13) /* RX FIFO threshold */
+#define SPIM_RXF BIT(12) /* RX FIFO full */
+#define SPIM_TXT BIT(11) /* TX FIFO threshold*/
+#define SPIM_RNE BIT(9) /* RX FIFO not empty */
+#define SPIM_TNF BIT(8) /* TX FIFO not full */
/* SPCOM register values */
#define SPCOM_CS(x) ((x) << 30)
+#define SPCOM_DO BIT(28) /* Dual output */
+#define SPCOM_TO BIT(27) /* TX only */
+#define SPCOM_RXSKIP(x) ((x) << 16)
#define SPCOM_TRANLEN(x) ((x) << 0)
+
#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
#define AUTOSUSPEND_TIMEOUT 2000
+static inline u32 fsl_espi_read_reg(struct mpc8xxx_spi *mspi, int offset)
+{
+ return ioread32be(mspi->reg_base + offset);
+}
+
+static inline u8 fsl_espi_read_reg8(struct mpc8xxx_spi *mspi, int offset)
+{
+ return ioread8(mspi->reg_base + offset);
+}
+
+static inline void fsl_espi_write_reg(struct mpc8xxx_spi *mspi, int offset,
+ u32 val)
+{
+ iowrite32be(val, mspi->reg_base + offset);
+}
+
+static inline void fsl_espi_write_reg8(struct mpc8xxx_spi *mspi, int offset,
+ u8 val)
+{
+ iowrite8(val, mspi->reg_base + offset);
+}
+
+static void fsl_espi_copy_to_buf(struct spi_message *m,
+ struct mpc8xxx_spi *mspi)
+{
+ struct spi_transfer *t;
+ u8 *buf = mspi->local_buf;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf)
+ memcpy(buf, t->tx_buf, t->len);
+ else
+ memset(buf, 0, t->len);
+ buf += t->len;
+ }
+}
+
+static void fsl_espi_copy_from_buf(struct spi_message *m,
+ struct mpc8xxx_spi *mspi)
+{
+ struct spi_transfer *t;
+ u8 *buf = mspi->local_buf;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->rx_buf)
+ memcpy(t->rx_buf, buf, t->len);
+ buf += t->len;
+ }
+}
+
+static int fsl_espi_check_message(struct spi_message *m)
+{
+ struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
+ struct spi_transfer *t, *first;
+
+ if (m->frame_length > SPCOM_TRANLEN_MAX) {
+ dev_err(mspi->dev, "message too long, size is %u bytes\n",
+ m->frame_length);
+ return -EMSGSIZE;
+ }
+
+ first = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (first->bits_per_word != t->bits_per_word ||
+ first->speed_hz != t->speed_hz) {
+ dev_err(mspi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void fsl_espi_change_mode(struct spi_device *spi)
{
struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
struct spi_mpc8xxx_cs *cs = spi->controller_state;
- struct fsl_espi_reg *reg_base = mspi->reg_base;
- __be32 __iomem *mode = &reg_base->csmode[spi->chip_select];
- __be32 __iomem *espi_mode = &reg_base->mode;
u32 tmp;
unsigned long flags;
@@ -102,10 +175,11 @@ static void fsl_espi_change_mode(struct spi_device *spi)
local_irq_save(flags);
/* Turn off SPI unit prior changing mode */
- tmp = mpc8xxx_spi_read_reg(espi_mode);
- mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE);
- mpc8xxx_spi_write_reg(mode, cs->hw_mode);
- mpc8xxx_spi_write_reg(espi_mode, tmp);
+ tmp = fsl_espi_read_reg(mspi, ESPI_SPMODE);
+ fsl_espi_write_reg(mspi, ESPI_SPMODE, tmp & ~SPMODE_ENABLE);
+ fsl_espi_write_reg(mspi, ESPI_SPMODEx(spi->chip_select),
+ cs->hw_mode);
+ fsl_espi_write_reg(mspi, ESPI_SPMODE, tmp);
local_irq_restore(flags);
}
@@ -131,27 +205,15 @@ static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi)
return data;
}
-static int fsl_espi_setup_transfer(struct spi_device *spi,
+static void fsl_espi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- int bits_per_word = 0;
+ int bits_per_word = t ? t->bits_per_word : spi->bits_per_word;
+ u32 hz = t ? t->speed_hz : spi->max_speed_hz;
u8 pm;
- u32 hz = 0;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
- if (t) {
- bits_per_word = t->bits_per_word;
- hz = t->speed_hz;
- }
-
- /* spi_transfer level calls that work per-word */
- if (!bits_per_word)
- bits_per_word = spi->bits_per_word;
-
- if (!hz)
- hz = spi->max_speed_hz;
-
cs->rx_shift = 0;
cs->tx_shift = 0;
cs->get_rx = mpc8xxx_spi_rx_buf_u32;
@@ -169,12 +231,10 @@ static int fsl_espi_setup_transfer(struct spi_device *spi,
mpc8xxx_spi->get_rx = cs->get_rx;
mpc8xxx_spi->get_tx = cs->get_tx;
- bits_per_word = bits_per_word - 1;
-
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
- cs->hw_mode |= CSMODE_LEN(bits_per_word);
+ cs->hw_mode |= CSMODE_LEN(bits_per_word - 1);
if ((mpc8xxx_spi->spibrg / hz) > 64) {
cs->hw_mode |= CSMODE_DIV16;
@@ -196,36 +256,16 @@ static int fsl_espi_setup_transfer(struct spi_device *spi,
cs->hw_mode |= CSMODE_PM(pm);
fsl_espi_change_mode(spi);
- return 0;
-}
-
-static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t,
- unsigned int len)
-{
- u32 word;
- struct fsl_espi_reg *reg_base = mspi->reg_base;
-
- mspi->count = len;
-
- /* enable rx ints */
- mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE);
-
- /* transmit word */
- word = mspi->get_tx(mspi);
- mpc8xxx_spi_write_reg(&reg_base->transmit, word);
-
- return 0;
}
static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
- unsigned int len = t->len;
+ u32 word;
int ret;
mpc8xxx_spi->len = t->len;
- len = roundup(len, 4) / 4;
+ mpc8xxx_spi->count = roundup(t->len, 4) / 4;
mpc8xxx_spi->tx = t->tx_buf;
mpc8xxx_spi->rx = t->rx_buf;
@@ -233,17 +273,15 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
reinit_completion(&mpc8xxx_spi->done);
/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
- if (t->len > SPCOM_TRANLEN_MAX) {
- dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
- " beyond the SPCOM[TRANLEN] field\n", t->len);
- return -EINVAL;
- }
- mpc8xxx_spi_write_reg(&reg_base->command,
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM,
(SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1)));
- ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len);
- if (ret)
- return ret;
+ /* enable rx ints */
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, SPIM_RNE);
+
+ /* transmit word */
+ word = mpc8xxx_spi->get_tx(mpc8xxx_spi);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPITF, word);
/* Won't hang up forever, SPI bus sometimes got lost interrupts... */
ret = wait_for_completion_timeout(&mpc8xxx_spi->done, 2 * HZ);
@@ -253,230 +291,76 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
mpc8xxx_spi->count);
/* disable rx ints */
- mpc8xxx_spi_write_reg(&reg_base->mask, 0);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
- return mpc8xxx_spi->count;
+ return mpc8xxx_spi->count > 0 ? -EMSGSIZE : 0;
}
-static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd)
-{
- if (cmd) {
- cmd[1] = (u8)(addr >> 16);
- cmd[2] = (u8)(addr >> 8);
- cmd[3] = (u8)(addr >> 0);
- }
-}
-
-static inline unsigned int fsl_espi_cmd2addr(u8 *cmd)
-{
- if (cmd)
- return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0;
-
- return 0;
-}
-
-static void fsl_espi_do_trans(struct spi_message *m,
- struct fsl_espi_transfer *tr)
+static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
{
+ struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
struct spi_device *spi = m->spi;
- struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
- struct fsl_espi_transfer *espi_trans = tr;
- struct spi_message message;
- struct spi_transfer *t, *first, trans;
- int status = 0;
-
- spi_message_init(&message);
- memset(&trans, 0, sizeof(trans));
-
- first = list_first_entry(&m->transfers, struct spi_transfer,
- transfer_list);
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if ((first->bits_per_word != t->bits_per_word) ||
- (first->speed_hz != t->speed_hz)) {
- espi_trans->status = -EINVAL;
- dev_err(mspi->dev,
- "bits_per_word/speed_hz should be same for the same SPI transfer\n");
- return;
- }
-
- trans.speed_hz = t->speed_hz;
- trans.bits_per_word = t->bits_per_word;
- trans.delay_usecs = max(first->delay_usecs, t->delay_usecs);
- }
-
- trans.len = espi_trans->len;
- trans.tx_buf = espi_trans->tx_buf;
- trans.rx_buf = espi_trans->rx_buf;
- spi_message_add_tail(&trans, &message);
-
- list_for_each_entry(t, &message.transfers, transfer_list) {
- if (t->bits_per_word || t->speed_hz) {
- status = -EINVAL;
-
- status = fsl_espi_setup_transfer(spi, t);
- if (status < 0)
- break;
- }
+ int ret;
- if (t->len)
- status = fsl_espi_bufs(spi, t);
+ fsl_espi_copy_to_buf(m, mspi);
+ fsl_espi_setup_transfer(spi, trans);
- if (status) {
- status = -EMSGSIZE;
- break;
- }
+ ret = fsl_espi_bufs(spi, trans);
- if (t->delay_usecs)
- udelay(t->delay_usecs);
- }
+ if (trans->delay_usecs)
+ udelay(trans->delay_usecs);
- espi_trans->status = status;
fsl_espi_setup_transfer(spi, NULL);
-}
-
-static void fsl_espi_cmd_trans(struct spi_message *m,
- struct fsl_espi_transfer *trans, u8 *rx_buff)
-{
- struct spi_transfer *t;
- u8 *local_buf;
- int i = 0;
- struct fsl_espi_transfer *espi_trans = trans;
-
- local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
- if (!local_buf) {
- espi_trans->status = -ENOMEM;
- return;
- }
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->tx_buf) {
- memcpy(local_buf + i, t->tx_buf, t->len);
- i += t->len;
- }
- }
-
- espi_trans->tx_buf = local_buf;
- espi_trans->rx_buf = local_buf;
- fsl_espi_do_trans(m, espi_trans);
- espi_trans->actual_length = espi_trans->len;
- kfree(local_buf);
-}
-
-static void fsl_espi_rw_trans(struct spi_message *m,
- struct fsl_espi_transfer *trans, u8 *rx_buff)
-{
- struct fsl_espi_transfer *espi_trans = trans;
- unsigned int total_len = espi_trans->len;
- struct spi_transfer *t;
- u8 *local_buf;
- u8 *rx_buf = rx_buff;
- unsigned int trans_len;
- unsigned int addr;
- unsigned int tx_only;
- unsigned int rx_pos = 0;
- unsigned int pos;
- int i, loop;
-
- local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
- if (!local_buf) {
- espi_trans->status = -ENOMEM;
- return;
- }
-
- for (pos = 0, loop = 0; pos < total_len; pos += trans_len, loop++) {
- trans_len = total_len - pos;
-
- i = 0;
- tx_only = 0;
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->tx_buf) {
- memcpy(local_buf + i, t->tx_buf, t->len);
- i += t->len;
- if (!t->rx_buf)
- tx_only += t->len;
- }
- }
-
- /* Add additional TX bytes to compensate SPCOM_TRANLEN_MAX */
- if (loop > 0)
- trans_len += tx_only;
-
- if (trans_len > SPCOM_TRANLEN_MAX)
- trans_len = SPCOM_TRANLEN_MAX;
-
- /* Update device offset */
- if (pos > 0) {
- addr = fsl_espi_cmd2addr(local_buf);
- addr += rx_pos;
- fsl_espi_addr2cmd(addr, local_buf);
- }
-
- espi_trans->len = trans_len;
- espi_trans->tx_buf = local_buf;
- espi_trans->rx_buf = local_buf;
- fsl_espi_do_trans(m, espi_trans);
-
- /* If there is at least one RX byte then copy it to rx_buf */
- if (tx_only < SPCOM_TRANLEN_MAX)
- memcpy(rx_buf + rx_pos, espi_trans->rx_buf + tx_only,
- trans_len - tx_only);
-
- rx_pos += trans_len - tx_only;
-
- if (loop > 0)
- espi_trans->actual_length += espi_trans->len - tx_only;
- else
- espi_trans->actual_length += espi_trans->len;
- }
+ if (!ret)
+ fsl_espi_copy_from_buf(m, mspi);
- kfree(local_buf);
+ return ret;
}
static int fsl_espi_do_one_msg(struct spi_master *master,
struct spi_message *m)
{
- struct spi_transfer *t;
- u8 *rx_buf = NULL;
- unsigned int n_tx = 0;
- unsigned int n_rx = 0;
- unsigned int xfer_len = 0;
- struct fsl_espi_transfer espi_trans;
+ struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
+ unsigned int delay_usecs = 0;
+ struct spi_transfer *t, trans = {};
+ int ret;
+
+ ret = fsl_espi_check_message(m);
+ if (ret)
+ goto out;
list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->tx_buf)
- n_tx += t->len;
- if (t->rx_buf) {
- n_rx += t->len;
- rx_buf = t->rx_buf;
- }
- if ((t->tx_buf) || (t->rx_buf))
- xfer_len += t->len;
+ if (t->delay_usecs > delay_usecs)
+ delay_usecs = t->delay_usecs;
}
- espi_trans.n_tx = n_tx;
- espi_trans.n_rx = n_rx;
- espi_trans.len = xfer_len;
- espi_trans.actual_length = 0;
- espi_trans.status = 0;
+ t = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+
+ trans.len = m->frame_length;
+ trans.speed_hz = t->speed_hz;
+ trans.bits_per_word = t->bits_per_word;
+ trans.delay_usecs = delay_usecs;
+ trans.tx_buf = mspi->local_buf;
+ trans.rx_buf = mspi->local_buf;
+
+ if (trans.len)
+ ret = fsl_espi_trans(m, &trans);
- if (!rx_buf)
- fsl_espi_cmd_trans(m, &espi_trans, NULL);
- else
- fsl_espi_rw_trans(m, &espi_trans, rx_buf);
+ m->actual_length = ret ? 0 : trans.len;
+out:
+ if (m->status == -EINPROGRESS)
+ m->status = ret;
- m->actual_length = espi_trans.actual_length;
- m->status = espi_trans.status;
spi_finalize_current_message(master);
- return 0;
+
+ return ret;
}
static int fsl_espi_setup(struct spi_device *spi)
{
struct mpc8xxx_spi *mpc8xxx_spi;
- struct fsl_espi_reg *reg_base;
- int retval;
- u32 hw_mode;
u32 loop_mode;
struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
@@ -491,13 +375,11 @@ static int fsl_espi_setup(struct spi_device *spi)
}
mpc8xxx_spi = spi_master_get_devdata(spi->master);
- reg_base = mpc8xxx_spi->reg_base;
pm_runtime_get_sync(mpc8xxx_spi->dev);
- hw_mode = cs->hw_mode; /* Save original settings */
- cs->hw_mode = mpc8xxx_spi_read_reg(
- &reg_base->csmode[spi->chip_select]);
+ cs->hw_mode = fsl_espi_read_reg(mpc8xxx_spi,
+ ESPI_SPMODEx(spi->chip_select));
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
| CSMODE_REV);
@@ -510,21 +392,17 @@ static int fsl_espi_setup(struct spi_device *spi)
cs->hw_mode |= CSMODE_REV;
/* Handle the loop mode */
- loop_mode = mpc8xxx_spi_read_reg(&reg_base->mode);
+ loop_mode = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
loop_mode &= ~SPMODE_LOOP;
if (spi->mode & SPI_LOOP)
loop_mode |= SPMODE_LOOP;
- mpc8xxx_spi_write_reg(&reg_base->mode, loop_mode);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, loop_mode);
- retval = fsl_espi_setup_transfer(spi, NULL);
+ fsl_espi_setup_transfer(spi, NULL);
pm_runtime_mark_last_busy(mpc8xxx_spi->dev);
pm_runtime_put_autosuspend(mpc8xxx_spi->dev);
- if (retval < 0) {
- cs->hw_mode = hw_mode; /* Restore settings */
- return retval;
- }
return 0;
}
@@ -536,12 +414,10 @@ static void fsl_espi_cleanup(struct spi_device *spi)
spi_set_ctldata(spi, NULL);
}
-void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
+static void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
{
- struct fsl_espi_reg *reg_base = mspi->reg_base;
-
/* We need handle RX first */
- if (events & SPIE_NE) {
+ if (events & SPIE_RNE) {
u32 rx_data, tmp;
u8 rx_data_8;
int rx_nr_bytes = 4;
@@ -551,7 +427,7 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
if (SPIE_RXCNT(events) < min(4, mspi->len)) {
ret = spin_event_timeout(
!(SPIE_RXCNT(events =
- mpc8xxx_spi_read_reg(&reg_base->event)) <
+ fsl_espi_read_reg(mspi, ESPI_SPIE)) <
min(4, mspi->len)),
10000, 0); /* 10 msec */
if (!ret)
@@ -560,10 +436,10 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
}
if (mspi->len >= 4) {
- rx_data = mpc8xxx_spi_read_reg(&reg_base->receive);
+ rx_data = fsl_espi_read_reg(mspi, ESPI_SPIRF);
} else if (mspi->len <= 0) {
dev_err(mspi->dev,
- "unexpected RX(SPIE_NE) interrupt occurred,\n"
+ "unexpected RX(SPIE_RNE) interrupt occurred,\n"
"(local rxlen %d bytes, reg rxlen %d bytes)\n",
min(4, mspi->len), SPIE_RXCNT(events));
rx_nr_bytes = 0;
@@ -572,7 +448,8 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
tmp = mspi->len;
rx_data = 0;
while (tmp--) {
- rx_data_8 = in_8((u8 *)&reg_base->receive);
+ rx_data_8 = fsl_espi_read_reg8(mspi,
+ ESPI_SPIRF);
rx_data |= (rx_data_8 << (tmp * 8));
}
@@ -585,30 +462,24 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
mspi->get_rx(rx_data, mspi);
}
- if (!(events & SPIE_NF)) {
+ if (!(events & SPIE_TNF)) {
int ret;
/* spin until TX is done */
- ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg(
- &reg_base->event)) & SPIE_NF), 1000, 0);
+ ret = spin_event_timeout(((events = fsl_espi_read_reg(
+ mspi, ESPI_SPIE)) & SPIE_TNF), 1000, 0);
if (!ret) {
- dev_err(mspi->dev, "tired waiting for SPIE_NF\n");
-
- /* Clear the SPIE bits */
- mpc8xxx_spi_write_reg(&reg_base->event, events);
+ dev_err(mspi->dev, "tired waiting for SPIE_TNF\n");
complete(&mspi->done);
return;
}
}
- /* Clear the events */
- mpc8xxx_spi_write_reg(&reg_base->event, events);
-
mspi->count -= 1;
if (mspi->count) {
u32 word = mspi->get_tx(mspi);
- mpc8xxx_spi_write_reg(&reg_base->transmit, word);
+ fsl_espi_write_reg(mspi, ESPI_SPITF, word);
} else {
complete(&mspi->done);
}
@@ -617,20 +488,21 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
{
struct mpc8xxx_spi *mspi = context_data;
- struct fsl_espi_reg *reg_base = mspi->reg_base;
- irqreturn_t ret = IRQ_NONE;
u32 events;
/* Get interrupt events(tx/rx) */
- events = mpc8xxx_spi_read_reg(&reg_base->event);
- if (events)
- ret = IRQ_HANDLED;
+ events = fsl_espi_read_reg(mspi, ESPI_SPIE);
+ if (!events)
+ return IRQ_NONE;
dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events);
fsl_espi_cpu_irq(mspi, events);
- return ret;
+ /* Clear the events */
+ fsl_espi_write_reg(mspi, ESPI_SPIE, events);
+
+ return IRQ_HANDLED;
}
#ifdef CONFIG_PM
@@ -638,12 +510,11 @@ static int fsl_espi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
- struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
u32 regval;
- regval = mpc8xxx_spi_read_reg(&reg_base->mode);
+ regval = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
regval &= ~SPMODE_ENABLE;
- mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
return 0;
}
@@ -652,39 +523,35 @@ static int fsl_espi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
- struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
u32 regval;
- regval = mpc8xxx_spi_read_reg(&reg_base->mode);
+ regval = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
regval |= SPMODE_ENABLE;
- mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
return 0;
}
#endif
-static size_t fsl_espi_max_transfer_size(struct spi_device *spi)
+static size_t fsl_espi_max_message_size(struct spi_device *spi)
{
return SPCOM_TRANLEN_MAX;
}
-static struct spi_master * fsl_espi_probe(struct device *dev,
- struct resource *mem, unsigned int irq)
+static int fsl_espi_probe(struct device *dev, struct resource *mem,
+ unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
- struct fsl_espi_reg *reg_base;
struct device_node *nc;
const __be32 *prop;
u32 regval, csmode;
- int i, len, ret = 0;
+ int i, len, ret;
master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
- if (!master) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!master)
+ return -ENOMEM;
dev_set_drvdata(dev, master);
@@ -695,18 +562,23 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
master->cleanup = fsl_espi_cleanup;
master->transfer_one_message = fsl_espi_do_one_msg;
master->auto_runtime_pm = true;
- master->max_transfer_size = fsl_espi_max_transfer_size;
+ master->max_message_size = fsl_espi_max_message_size;
mpc8xxx_spi = spi_master_get_devdata(master);
+ mpc8xxx_spi->local_buf =
+ devm_kmalloc(dev, SPCOM_TRANLEN_MAX, GFP_KERNEL);
+ if (!mpc8xxx_spi->local_buf) {
+ ret = -ENOMEM;
+ goto err_probe;
+ }
+
mpc8xxx_spi->reg_base = devm_ioremap_resource(dev, mem);
if (IS_ERR(mpc8xxx_spi->reg_base)) {
ret = PTR_ERR(mpc8xxx_spi->reg_base);
goto err_probe;
}
- reg_base = mpc8xxx_spi->reg_base;
-
/* Register for SPI Interrupt */
ret = devm_request_irq(dev, mpc8xxx_spi->irq, fsl_espi_irq,
0, "fsl_espi", mpc8xxx_spi);
@@ -719,10 +591,10 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
}
/* SPI controller initializations */
- mpc8xxx_spi_write_reg(&reg_base->mode, 0);
- mpc8xxx_spi_write_reg(&reg_base->mask, 0);
- mpc8xxx_spi_write_reg(&reg_base->command, 0);
- mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, 0);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM, 0);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIE, 0xffffffff);
/* Init eSPI CS mode register */
for_each_available_child_of_node(master->dev.of_node, nc) {
@@ -747,7 +619,7 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
csmode &= ~(CSMODE_AFT(0xf));
csmode |= CSMODE_AFT(be32_to_cpup(prop));
}
- mpc8xxx_spi_write_reg(&reg_base->csmode[i], csmode);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODEx(i), csmode);
dev_info(dev, "cs=%d, init_csmode=0x%x\n", i, csmode);
}
@@ -755,7 +627,7 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
- mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
@@ -767,12 +639,13 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
if (ret < 0)
goto err_pm;
- dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq);
+ dev_info(dev, "at 0x%p (irq = %d)\n", mpc8xxx_spi->reg_base,
+ mpc8xxx_spi->irq);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
- return master;
+ return 0;
err_pm:
pm_runtime_put_noidle(dev);
@@ -780,8 +653,7 @@ err_pm:
pm_runtime_set_suspended(dev);
err_probe:
spi_master_put(master);
-err:
- return ERR_PTR(ret);
+ return ret;
}
static int of_fsl_espi_get_chipselects(struct device *dev)
@@ -807,10 +679,9 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
- struct spi_master *master;
struct resource mem;
unsigned int irq;
- int ret = -ENOMEM;
+ int ret;
ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
@@ -818,28 +689,17 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
ret = of_fsl_espi_get_chipselects(dev);
if (ret)
- goto err;
+ return ret;
ret = of_address_to_resource(np, 0, &mem);
if (ret)
- goto err;
+ return ret;
irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- ret = -EINVAL;
- goto err;
- }
-
- master = fsl_espi_probe(dev, &mem, irq);
- if (IS_ERR(master)) {
- ret = PTR_ERR(master);
- goto err;
- }
-
- return 0;
+ if (!irq)
+ return -EINVAL;
-err:
- return ret;
+ return fsl_espi_probe(dev, &mem, irq);
}
static int of_fsl_espi_remove(struct platform_device *dev)
@@ -873,27 +733,26 @@ static int of_fsl_espi_resume(struct device *dev)
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master = dev_get_drvdata(dev);
struct mpc8xxx_spi *mpc8xxx_spi;
- struct fsl_espi_reg *reg_base;
u32 regval;
int i, ret;
mpc8xxx_spi = spi_master_get_devdata(master);
- reg_base = mpc8xxx_spi->reg_base;
/* SPI controller initializations */
- mpc8xxx_spi_write_reg(&reg_base->mode, 0);
- mpc8xxx_spi_write_reg(&reg_base->mask, 0);
- mpc8xxx_spi_write_reg(&reg_base->command, 0);
- mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, 0);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM, 0);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIE, 0xffffffff);
/* Init eSPI CS mode register */
for (i = 0; i < pdata->max_chipselect; i++)
- mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODEx(i),
+ CSMODE_INIT_VAL);
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
- mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+ fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
ret = pm_runtime_force_resume(dev);
if (ret < 0)
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 84f5dcb7a897..2925c8089fd9 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -23,13 +23,14 @@
/* SPI/eSPI Controller driver's private data. */
struct mpc8xxx_spi {
struct device *dev;
- void *reg_base;
+ void __iomem *reg_base;
/* rx & tx bufs from the spi_transfer */
const void *tx;
void *rx;
#if IS_ENABLED(CONFIG_SPI_FSL_ESPI)
int len;
+ u8 *local_buf;
#endif
int subblock;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index f63cb30f9010..deb782f6556c 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -186,17 +186,19 @@ static unsigned int spi_imx_clkdiv_1(unsigned int fin,
/* MX1, MX31, MX35, MX51 CSPI */
static unsigned int spi_imx_clkdiv_2(unsigned int fin,
- unsigned int fspi)
+ unsigned int fspi, unsigned int *fres)
{
int i, div = 4;
for (i = 0; i < 7; i++) {
if (fspi * div >= fin)
- return i;
+ goto out;
div <<= 1;
}
- return 7;
+out:
+ *fres = fin / div;
+ return i;
}
static int spi_imx_bytes_per_word(const int bpw)
@@ -453,6 +455,9 @@ static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
#define MX31_CSPISTATUS 0x14
#define MX31_STATUS_RR (1 << 3)
+#define MX31_CSPI_TESTREG 0x1C
+#define MX31_TEST_LBC (1 << 14)
+
/* These functions also work for the i.MX35, but be aware that
* the i.MX35 has a slightly different register layout for bits
* we do not use here.
@@ -482,9 +487,11 @@ static int mx31_config(struct spi_device *spi, struct spi_imx_config *config)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
+ unsigned int clk;
- reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
+ reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz, &clk) <<
MX31_CSPICTRL_DR_SHIFT;
+ spi_imx->spi_bus_clk = clk;
if (is_imx35_cspi(spi_imx)) {
reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
@@ -506,6 +513,13 @@ static int mx31_config(struct spi_device *spi, struct spi_imx_config *config)
writel(reg, spi_imx->base + MXC_CSPICTRL);
+ reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
+ if (spi->mode & SPI_LOOP)
+ reg |= MX31_TEST_LBC;
+ else
+ reg &= ~MX31_TEST_LBC;
+ writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
+
return 0;
}
@@ -625,9 +639,12 @@ static int mx1_config(struct spi_device *spi, struct spi_imx_config *config)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
+ unsigned int clk;
- reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
+ reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz, &clk) <<
MX1_CSPICTRL_DR_SHIFT;
+ spi_imx->spi_bus_clk = clk;
+
reg |= config->bpw - 1;
if (spi->mode & SPI_CPHA)
@@ -1179,7 +1196,7 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- if (is_imx51_ecspi(spi_imx))
+ if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx))
spi_imx->bitbang.master->mode_bits |= SPI_LOOP;
init_completion(&spi_imx->xfer_done);
@@ -1251,6 +1268,12 @@ static int spi_imx_probe(struct platform_device *pdev)
goto out_clk_put;
}
+ if (!master->cs_gpios) {
+ dev_err(&pdev->dev, "No CS GPIOs available\n");
+ ret = -EINVAL;
+ goto out_clk_put;
+ }
+
for (i = 0; i < master->num_chipselect; i++) {
if (!gpio_is_valid(master->cs_gpios[i]))
continue;
diff --git a/drivers/spi/spi-iproc-qspi.c b/drivers/spi/spi-iproc-qspi.c
new file mode 100644
index 000000000000..be6ccb204a66
--- /dev/null
+++ b/drivers/spi/spi-iproc-qspi.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2016 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "spi-bcm-qspi.h"
+
+#define INTR_BASE_BIT_SHIFT 0x02
+#define INTR_COUNT 0x07
+
+struct bcm_iproc_intc {
+ struct bcm_qspi_soc_intc soc_intc;
+ struct platform_device *pdev;
+ void __iomem *int_reg;
+ void __iomem *int_status_reg;
+ spinlock_t soclock;
+ bool big_endian;
+};
+
+static u32 bcm_iproc_qspi_get_l2_int_status(struct bcm_qspi_soc_intc *soc_intc)
+{
+ struct bcm_iproc_intc *priv =
+ container_of(soc_intc, struct bcm_iproc_intc, soc_intc);
+ void __iomem *mmio = priv->int_status_reg;
+ int i;
+ u32 val = 0, sts = 0;
+
+ for (i = 0; i < INTR_COUNT; i++) {
+ if (bcm_qspi_readl(priv->big_endian, mmio + (i * 4)))
+ val |= 1UL << i;
+ }
+
+ if (val & INTR_MSPI_DONE_MASK)
+ sts |= MSPI_DONE;
+
+ if (val & BSPI_LR_INTERRUPTS_ALL)
+ sts |= BSPI_DONE;
+
+ if (val & BSPI_LR_INTERRUPTS_ERROR)
+ sts |= BSPI_ERR;
+
+ return sts;
+}
+
+static void bcm_iproc_qspi_int_ack(struct bcm_qspi_soc_intc *soc_intc, int type)
+{
+ struct bcm_iproc_intc *priv =
+ container_of(soc_intc, struct bcm_iproc_intc, soc_intc);
+ void __iomem *mmio = priv->int_status_reg;
+ u32 mask = get_qspi_mask(type);
+ int i;
+
+ for (i = 0; i < INTR_COUNT; i++) {
+ if (mask & (1UL << i))
+ bcm_qspi_writel(priv->big_endian, 1, mmio + (i * 4));
+ }
+}
+
+static void bcm_iproc_qspi_int_set(struct bcm_qspi_soc_intc *soc_intc, int type,
+ bool en)
+{
+ struct bcm_iproc_intc *priv =
+ container_of(soc_intc, struct bcm_iproc_intc, soc_intc);
+ void __iomem *mmio = priv->int_reg;
+ u32 mask = get_qspi_mask(type);
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->soclock, flags);
+
+ val = bcm_qspi_readl(priv->big_endian, mmio);
+
+ if (en)
+ val = val | (mask << INTR_BASE_BIT_SHIFT);
+ else
+ val = val & ~(mask << INTR_BASE_BIT_SHIFT);
+
+ bcm_qspi_writel(priv->big_endian, val, mmio);
+
+ spin_unlock_irqrestore(&priv->soclock, flags);
+}
+
+static int bcm_iproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm_iproc_intc *priv;
+ struct bcm_qspi_soc_intc *soc_intc;
+ struct resource *res;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc_intc = &priv->soc_intc;
+ priv->pdev = pdev;
+
+ spin_lock_init(&priv->soclock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr_regs");
+ priv->int_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->int_reg))
+ return PTR_ERR(priv->int_reg);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "intr_status_reg");
+ priv->int_status_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->int_status_reg))
+ return PTR_ERR(priv->int_status_reg);
+
+ priv->big_endian = of_device_is_big_endian(dev->of_node);
+
+ bcm_iproc_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
+ bcm_iproc_qspi_int_set(soc_intc, MSPI_BSPI_DONE, false);
+
+ soc_intc->bcm_qspi_int_ack = bcm_iproc_qspi_int_ack;
+ soc_intc->bcm_qspi_int_set = bcm_iproc_qspi_int_set;
+ soc_intc->bcm_qspi_get_int_status = bcm_iproc_qspi_get_l2_int_status;
+
+ return bcm_qspi_probe(pdev, soc_intc);
+}
+
+static int bcm_iproc_remove(struct platform_device *pdev)
+{
+ return bcm_qspi_remove(pdev);
+}
+
+static const struct of_device_id bcm_iproc_of_match[] = {
+ { .compatible = "brcm,spi-nsp-qspi" },
+ { .compatible = "brcm,spi-ns2-qspi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm_iproc_of_match);
+
+static struct platform_driver bcm_iproc_driver = {
+ .probe = bcm_iproc_probe,
+ .remove = bcm_iproc_remove,
+ .driver = {
+ .name = "bcm_iproc",
+ .pm = &bcm_qspi_pm_ops,
+ .of_match_table = bcm_iproc_of_match,
+ }
+};
+module_platform_driver(bcm_iproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kamal Dasu");
+MODULE_DESCRIPTION("SPI flash driver for Broadcom iProc SoCs");
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
new file mode 100644
index 000000000000..f8117b80fa22
--- /dev/null
+++ b/drivers/spi/spi-jcore.c
@@ -0,0 +1,231 @@
+/*
+ * J-Core SPI controller driver
+ *
+ * Copyright (C) 2012-2016 Smart Energy Instruments, Inc.
+ *
+ * Current version by Rich Felker
+ * Based loosely on initial version by Oleksandr G Zhadan
+ *
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+
+#define DRV_NAME "jcore_spi"
+
+#define CTRL_REG 0x0
+#define DATA_REG 0x4
+
+#define JCORE_SPI_CTRL_XMIT 0x02
+#define JCORE_SPI_STAT_BUSY 0x02
+#define JCORE_SPI_CTRL_LOOP 0x08
+#define JCORE_SPI_CTRL_CS_BITS 0x15
+
+#define JCORE_SPI_WAIT_RDY_MAX_LOOP 2000000
+
+struct jcore_spi {
+ struct spi_master *master;
+ void __iomem *base;
+ unsigned int cs_reg;
+ unsigned int speed_reg;
+ unsigned int speed_hz;
+ unsigned int clock_freq;
+};
+
+static int jcore_spi_wait(void __iomem *ctrl_reg)
+{
+ unsigned timeout = JCORE_SPI_WAIT_RDY_MAX_LOOP;
+
+ do {
+ if (!(readl(ctrl_reg) & JCORE_SPI_STAT_BUSY))
+ return 0;
+ cpu_relax();
+ } while (--timeout);
+
+ return -EBUSY;
+}
+
+static void jcore_spi_program(struct jcore_spi *hw)
+{
+ void __iomem *ctrl_reg = hw->base + CTRL_REG;
+
+ if (jcore_spi_wait(ctrl_reg))
+ dev_err(hw->master->dev.parent,
+ "timeout waiting to program ctrl reg.\n");
+
+ writel(hw->cs_reg | hw->speed_reg, ctrl_reg);
+}
+
+static void jcore_spi_chipsel(struct spi_device *spi, bool value)
+{
+ struct jcore_spi *hw = spi_master_get_devdata(spi->master);
+ u32 csbit = 1U << (2 * spi->chip_select);
+
+ dev_dbg(hw->master->dev.parent, "chipselect %d\n", spi->chip_select);
+
+ if (value)
+ hw->cs_reg |= csbit;
+ else
+ hw->cs_reg &= ~csbit;
+
+ jcore_spi_program(hw);
+}
+
+static void jcore_spi_baudrate(struct jcore_spi *hw, int speed)
+{
+ if (speed == hw->speed_hz) return;
+ hw->speed_hz = speed;
+ if (speed >= hw->clock_freq / 2)
+ hw->speed_reg = 0;
+ else
+ hw->speed_reg = ((hw->clock_freq / 2 / speed) - 1) << 27;
+ jcore_spi_program(hw);
+ dev_dbg(hw->master->dev.parent, "speed=%d reg=0x%x\n",
+ speed, hw->speed_reg);
+}
+
+static int jcore_spi_txrx(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct jcore_spi *hw = spi_master_get_devdata(master);
+
+ void __iomem *ctrl_reg = hw->base + CTRL_REG;
+ void __iomem *data_reg = hw->base + DATA_REG;
+ u32 xmit;
+
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+ unsigned int len;
+ unsigned int count;
+
+ jcore_spi_baudrate(hw, t->speed_hz);
+
+ xmit = hw->cs_reg | hw->speed_reg | JCORE_SPI_CTRL_XMIT;
+ tx = t->tx_buf;
+ rx = t->rx_buf;
+ len = t->len;
+
+ for (count = 0; count < len; count++) {
+ if (jcore_spi_wait(ctrl_reg))
+ break;
+
+ writel(tx ? *tx++ : 0, data_reg);
+ writel(xmit, ctrl_reg);
+
+ if (jcore_spi_wait(ctrl_reg))
+ break;
+
+ if (rx)
+ *rx++ = readl(data_reg);
+ }
+
+ spi_finalize_current_transfer(master);
+
+ if (count < len)
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static int jcore_spi_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct jcore_spi *hw;
+ struct spi_master *master;
+ struct resource *res;
+ u32 clock_freq;
+ struct clk *clk;
+ int err = -ENODEV;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct jcore_spi));
+ if (!master)
+ return err;
+
+ /* Setup the master state. */
+ master->num_chipselect = 3;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->transfer_one = jcore_spi_txrx;
+ master->set_cs = jcore_spi_chipsel;
+ master->dev.of_node = node;
+ master->bus_num = pdev->id;
+
+ hw = spi_master_get_devdata(master);
+ hw->master = master;
+ platform_set_drvdata(pdev, hw);
+
+ /* Find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto exit_busy;
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name))
+ goto exit_busy;
+ hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!hw->base)
+ goto exit_busy;
+
+ /*
+ * The SPI clock rate controlled via a configurable clock divider
+ * which is applied to the reference clock. A 50 MHz reference is
+ * most suitable for obtaining standard SPI clock rates, but some
+ * designs may have a different reference clock, and the DT must
+ * make the driver aware so that it can properly program the
+ * requested rate. If the clock is omitted, 50 MHz is assumed.
+ */
+ clock_freq = 50000000;
+ clk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (!IS_ERR_OR_NULL(clk)) {
+ if (clk_enable(clk) == 0)
+ clock_freq = clk_get_rate(clk);
+ else
+ dev_warn(&pdev->dev, "could not enable ref_clk\n");
+ }
+ hw->clock_freq = clock_freq;
+
+ /* Initialize all CS bits to high. */
+ hw->cs_reg = JCORE_SPI_CTRL_CS_BITS;
+ jcore_spi_baudrate(hw, 400000);
+
+ /* Register our spi controller */
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err)
+ goto exit;
+
+ return 0;
+
+exit_busy:
+ err = -EBUSY;
+exit:
+ spi_master_put(master);
+ return err;
+}
+
+static const struct of_device_id jcore_spi_of_match[] = {
+ { .compatible = "jcore,spi2" },
+ {},
+};
+
+static struct platform_driver jcore_spi_driver = {
+ .probe = jcore_spi_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = jcore_spi_of_match,
+ },
+};
+
+module_platform_driver(jcore_spi_driver);
+
+MODULE_DESCRIPTION("J-Core SPI driver");
+MODULE_AUTHOR("Rich Felker <dalias@libc.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index faa1b133b0f1..50e620f4e8fe 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -405,7 +405,7 @@ struct rx_ranges {
u8 *end;
};
-int rx_ranges_cmp(void *priv, struct list_head *a, struct list_head *b)
+static int rx_ranges_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list);
struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list);
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 2465259f6241..616566e793c6 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -442,6 +442,7 @@ static const struct dev_pm_ops meson_spifc_pm_ops = {
static const struct of_device_id meson_spifc_dt_match[] = {
{ .compatible = "amlogic,meson6-spifc", },
+ { .compatible = "amlogic,meson-gxbb-spifc", },
{ },
};
MODULE_DEVICE_TABLE(of, meson_spifc_dt_match);
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
index c41abddab318..bd1c6b53283f 100644
--- a/drivers/spi/spi-pic32-sqi.c
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -253,15 +253,13 @@ static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
return NULL;
rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
- list_del(&rdesc->list);
- list_add_tail(&rdesc->list, &sqi->bd_list_used);
+ list_move_tail(&rdesc->list, &sqi->bd_list_used);
return rdesc;
}
static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
{
- list_del(&rdesc->list);
- list_add(&rdesc->list, &sqi->bd_list_free);
+ list_move(&rdesc->list, &sqi->bd_list_free);
}
static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index db3ae1dd829e..04f3eecf5cf3 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -23,7 +23,7 @@
static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
bool error)
{
- struct spi_message *msg = drv_data->cur_msg;
+ struct spi_message *msg = drv_data->master->cur_msg;
/*
* It is possible that one CPU is handling ROR interrupt and other
@@ -76,7 +76,8 @@ static struct dma_async_tx_descriptor *
pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
enum dma_transfer_direction dir)
{
- struct chip_data *chip = drv_data->cur_chip;
+ struct chip_data *chip =
+ spi_get_ctldata(drv_data->master->cur_msg->spi);
struct spi_transfer *xfer = drv_data->cur_transfer;
enum dma_slave_buswidth width;
struct dma_slave_config cfg;
@@ -146,7 +147,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
{
struct dma_async_tx_descriptor *tx_desc, *rx_desc;
- int err = 0;
+ int err;
tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV);
if (!tx_desc) {
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 87150a1049bd..dd7b5b47291d 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -28,6 +28,7 @@
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
@@ -62,6 +63,13 @@ MODULE_ALIAS("platform:pxa2xx-spi");
| QUARK_X1000_SSCR1_TFT \
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+#define CE4100_SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+ | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+ | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+ | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+ | CE4100_SSCR1_RFT | CE4100_SSCR1_TFT | SSCR1_MWDS \
+ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
#define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
#define LPSS_CS_CONTROL_SW_MODE BIT(0)
#define LPSS_CS_CONTROL_CS_HIGH BIT(1)
@@ -175,6 +183,8 @@ static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
return QUARK_X1000_SSCR1_CHANGE_MASK;
+ case CE4100_SSP:
+ return CE4100_SSCR1_CHANGE_MASK;
default:
return SSCR1_CHANGE_MASK;
}
@@ -186,6 +196,8 @@ pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
return RX_THRESH_QUARK_X1000_DFLT;
+ case CE4100_SSP:
+ return RX_THRESH_CE4100_DFLT;
default:
return RX_THRESH_DFLT;
}
@@ -199,6 +211,9 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
case QUARK_X1000_SSP:
mask = QUARK_X1000_SSSR_TFL_MASK;
break;
+ case CE4100_SSP:
+ mask = CE4100_SSSR_TFL_MASK;
+ break;
default:
mask = SSSR_TFL_MASK;
break;
@@ -216,6 +231,9 @@ static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
case QUARK_X1000_SSP:
mask = QUARK_X1000_SSCR1_RFT;
break;
+ case CE4100_SSP:
+ mask = CE4100_SSCR1_RFT;
+ break;
default:
mask = SSCR1_RFT;
break;
@@ -230,6 +248,9 @@ static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
case QUARK_X1000_SSP:
*sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
break;
+ case CE4100_SSP:
+ *sccr1_reg |= CE4100_SSCR1_RxTresh(threshold);
+ break;
default:
*sccr1_reg |= SSCR1_RxTresh(threshold);
break;
@@ -316,7 +337,7 @@ static void lpss_ssp_select_cs(struct driver_data *drv_data,
value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
- cs = drv_data->cur_msg->spi->chip_select;
+ cs = drv_data->master->cur_msg->spi->chip_select;
cs <<= config->cs_sel_shift;
if (cs != (value & config->cs_sel_mask)) {
/*
@@ -355,10 +376,11 @@ static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
static void cs_assert(struct driver_data *drv_data)
{
- struct chip_data *chip = drv_data->cur_chip;
+ struct chip_data *chip =
+ spi_get_ctldata(drv_data->master->cur_msg->spi);
if (drv_data->ssp_type == CE4100_SSP) {
- pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
+ pxa2xx_spi_write(drv_data, SSSR, chip->frm);
return;
}
@@ -378,7 +400,8 @@ static void cs_assert(struct driver_data *drv_data)
static void cs_deassert(struct driver_data *drv_data)
{
- struct chip_data *chip = drv_data->cur_chip;
+ struct chip_data *chip =
+ spi_get_ctldata(drv_data->master->cur_msg->spi);
if (drv_data->ssp_type == CE4100_SSP)
return;
@@ -508,7 +531,7 @@ static int u32_reader(struct driver_data *drv_data)
void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
{
- struct spi_message *msg = drv_data->cur_msg;
+ struct spi_message *msg = drv_data->master->cur_msg;
struct spi_transfer *trans = drv_data->cur_transfer;
/* Move to next transfer */
@@ -529,8 +552,7 @@ static void giveback(struct driver_data *drv_data)
struct spi_message *msg;
unsigned long timeout;
- msg = drv_data->cur_msg;
- drv_data->cur_msg = NULL;
+ msg = drv_data->master->cur_msg;
drv_data->cur_transfer = NULL;
last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
@@ -575,13 +597,13 @@ static void giveback(struct driver_data *drv_data)
cs_deassert(drv_data);
}
- drv_data->cur_chip = NULL;
spi_finalize_current_message(drv_data->master);
}
static void reset_sccr1(struct driver_data *drv_data)
{
- struct chip_data *chip = drv_data->cur_chip;
+ struct chip_data *chip =
+ spi_get_ctldata(drv_data->master->cur_msg->spi);
u32 sccr1_reg;
sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
@@ -589,6 +611,9 @@ static void reset_sccr1(struct driver_data *drv_data)
case QUARK_X1000_SSP:
sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
break;
+ case CE4100_SSP:
+ sccr1_reg &= ~CE4100_SSCR1_RFT;
+ break;
default:
sccr1_reg &= ~SSCR1_RFT;
break;
@@ -610,7 +635,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
dev_err(&drv_data->pdev->dev, "%s\n", msg);
- drv_data->cur_msg->state = ERROR_STATE;
+ drv_data->master->cur_msg->state = ERROR_STATE;
tasklet_schedule(&drv_data->pump_transfers);
}
@@ -623,7 +648,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
pxa2xx_spi_write(drv_data, SSTO, 0);
/* Update total byte transferred return count actual bytes read */
- drv_data->cur_msg->actual_length += drv_data->len -
+ drv_data->master->cur_msg->actual_length += drv_data->len -
(drv_data->rx_end - drv_data->rx);
/* Transfer delays and chip select release are
@@ -631,7 +656,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
*/
/* Move to next transfer */
- drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
+ drv_data->master->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
/* Schedule transfer tasklet */
tasklet_schedule(&drv_data->pump_transfers);
@@ -746,7 +771,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
if (!(status & mask))
return IRQ_NONE;
- if (!drv_data->cur_msg) {
+ if (!drv_data->master->cur_msg) {
pxa2xx_spi_write(drv_data, SSCR0,
pxa2xx_spi_read(drv_data, SSCR0)
@@ -905,7 +930,8 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
int rate)
{
- struct chip_data *chip = drv_data->cur_chip;
+ struct chip_data *chip =
+ spi_get_ctldata(drv_data->master->cur_msg->spi);
unsigned int clk_div;
switch (drv_data->ssp_type) {
@@ -934,25 +960,23 @@ static void pump_transfers(unsigned long data)
{
struct driver_data *drv_data = (struct driver_data *)data;
struct spi_master *master = drv_data->master;
- struct spi_message *message = NULL;
- struct spi_transfer *transfer = NULL;
- struct spi_transfer *previous = NULL;
- struct chip_data *chip = NULL;
- u32 clk_div = 0;
- u8 bits = 0;
- u32 speed = 0;
+ struct spi_message *message = master->cur_msg;
+ struct chip_data *chip = spi_get_ctldata(message->spi);
+ u32 dma_thresh = chip->dma_threshold;
+ u32 dma_burst = chip->dma_burst_size;
+ u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
+ struct spi_transfer *transfer;
+ struct spi_transfer *previous;
+ u32 clk_div;
+ u8 bits;
+ u32 speed;
u32 cr0;
u32 cr1;
- u32 dma_thresh = drv_data->cur_chip->dma_threshold;
- u32 dma_burst = drv_data->cur_chip->dma_burst_size;
- u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
int err;
int dma_mapped;
/* Get current state information */
- message = drv_data->cur_msg;
transfer = drv_data->cur_transfer;
- chip = drv_data->cur_chip;
/* Handle for abort */
if (message->state == ERROR_STATE) {
@@ -1146,17 +1170,12 @@ static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
{
struct driver_data *drv_data = spi_master_get_devdata(master);
- drv_data->cur_msg = msg;
/* Initial message state*/
- drv_data->cur_msg->state = START_STATE;
- drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ msg->state = START_STATE;
+ drv_data->cur_transfer = list_entry(msg->transfers.next,
struct spi_transfer,
transfer_list);
- /* prepare to setup the SSP, in pump_transfers, using the per
- * chip configuration */
- drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
-
/* Mark as busy and launch transfers */
tasklet_schedule(&drv_data->pump_transfers);
return 0;
@@ -1176,9 +1195,26 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
static int setup_cs(struct spi_device *spi, struct chip_data *chip,
struct pxa2xx_spi_chip *chip_info)
{
+ struct driver_data *drv_data = spi_master_get_devdata(spi->master);
int err = 0;
- if (chip == NULL || chip_info == NULL)
+ if (chip == NULL)
+ return 0;
+
+ if (drv_data->cs_gpiods) {
+ struct gpio_desc *gpiod;
+
+ gpiod = drv_data->cs_gpiods[spi->chip_select];
+ if (gpiod) {
+ chip->gpio_cs = desc_to_gpio(gpiod);
+ chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
+ gpiod_set_value(gpiod, chip->gpio_cs_inverted);
+ }
+
+ return 0;
+ }
+
+ if (chip_info == NULL)
return 0;
/* NOTE: setup() can be called multiple times, possibly with
@@ -1213,7 +1249,7 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
static int setup(struct spi_device *spi)
{
- struct pxa2xx_spi_chip *chip_info = NULL;
+ struct pxa2xx_spi_chip *chip_info;
struct chip_data *chip;
const struct lpss_config *config;
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
@@ -1225,6 +1261,11 @@ static int setup(struct spi_device *spi)
tx_hi_thres = 0;
rx_thres = RX_THRESH_QUARK_X1000_DFLT;
break;
+ case CE4100_SSP:
+ tx_thres = TX_THRESH_CE4100_DFLT;
+ tx_hi_thres = 0;
+ rx_thres = RX_THRESH_CE4100_DFLT;
+ break;
case LPSS_LPT_SSP:
case LPSS_BYT_SSP:
case LPSS_BSW_SSP:
@@ -1309,6 +1350,10 @@ static int setup(struct spi_device *spi)
| (QUARK_X1000_SSCR1_TxTresh(tx_thres)
& QUARK_X1000_SSCR1_TFT);
break;
+ case CE4100_SSP:
+ chip->threshold = (CE4100_SSCR1_RxTresh(rx_thres) & CE4100_SSCR1_RFT) |
+ (CE4100_SSCR1_TxTresh(tx_thres) & CE4100_SSCR1_TFT);
+ break;
default:
chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
@@ -1352,7 +1397,8 @@ static void cleanup(struct spi_device *spi)
if (!chip)
return;
- if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs))
+ if (drv_data->ssp_type != CE4100_SSP && !drv_data->cs_gpiods &&
+ gpio_is_valid(chip->gpio_cs))
gpio_free(chip->gpio_cs);
kfree(chip);
@@ -1530,7 +1576,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
struct driver_data *drv_data;
struct ssp_device *ssp;
const struct lpss_config *config;
- int status;
+ int status, count;
u32 tmp;
platform_info = dev_get_platdata(dev);
@@ -1630,15 +1676,20 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
pxa2xx_spi_write(drv_data, SSCR0, 0);
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
- tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
- | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
+ tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT) |
+ QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
pxa2xx_spi_write(drv_data, SSCR1, tmp);
/* using the Motorola SPI protocol and use 8 bit frame */
- pxa2xx_spi_write(drv_data, SSCR0,
- QUARK_X1000_SSCR0_Motorola
- | QUARK_X1000_SSCR0_DataSize(8));
+ tmp = QUARK_X1000_SSCR0_Motorola | QUARK_X1000_SSCR0_DataSize(8);
+ pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
+ case CE4100_SSP:
+ tmp = CE4100_SSCR1_RxTresh(RX_THRESH_CE4100_DFLT) |
+ CE4100_SSCR1_TxTresh(TX_THRESH_CE4100_DFLT);
+ pxa2xx_spi_write(drv_data, SSCR1, tmp);
+ tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
+ pxa2xx_spi_write(drv_data, SSCR0, tmp);
default:
tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
SSCR1_TxTresh(TX_THRESH_DFLT);
@@ -1669,6 +1720,39 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
master->num_chipselect = platform_info->num_chipselect;
+ count = gpiod_count(&pdev->dev, "cs");
+ if (count > 0) {
+ int i;
+
+ master->num_chipselect = max_t(int, count,
+ master->num_chipselect);
+
+ drv_data->cs_gpiods = devm_kcalloc(&pdev->dev,
+ master->num_chipselect, sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!drv_data->cs_gpiods) {
+ status = -ENOMEM;
+ goto out_error_clock_enabled;
+ }
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ struct gpio_desc *gpiod;
+
+ gpiod = devm_gpiod_get_index(dev, "cs", i,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod)) {
+ /* Means use native chip select */
+ if (PTR_ERR(gpiod) == -ENOENT)
+ continue;
+
+ status = (int)PTR_ERR(gpiod);
+ goto out_error_clock_enabled;
+ } else {
+ drv_data->cs_gpiods[i] = gpiod;
+ }
+ }
+ }
+
tasklet_init(&drv_data->pump_transfers, pump_transfers,
(unsigned long)drv_data);
@@ -1742,7 +1826,7 @@ static int pxa2xx_spi_suspend(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
struct ssp_device *ssp = drv_data->ssp;
- int status = 0;
+ int status;
status = spi_master_suspend(drv_data->master);
if (status != 0)
@@ -1759,7 +1843,7 @@ static int pxa2xx_spi_resume(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
struct ssp_device *ssp = drv_data->ssp;
- int status = 0;
+ int status;
/* Enable the SSP clock */
if (!pm_runtime_suspended(dev))
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index d217ad55cc12..ce31b8199bb3 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -53,9 +53,7 @@ struct driver_data {
atomic_t dma_running;
/* Current message transfer state info */
- struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
- struct chip_data *cur_chip;
size_t len;
void *tx;
void *tx_end;
@@ -68,6 +66,9 @@ struct driver_data {
void (*cs_control)(u32 command);
void __iomem *lpss_base;
+
+ /* GPIOs for chip selects */
+ struct gpio_desc **cs_gpiods;
};
struct chip_data {
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 7f1555621f8e..1bfa889b8427 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -982,8 +982,10 @@ static int spi_qup_suspend(struct device *device)
if (ret)
return ret;
- clk_disable_unprepare(controller->cclk);
- clk_disable_unprepare(controller->iclk);
+ if (!pm_runtime_suspended(device)) {
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ }
return 0;
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 818843336932..a816f07e168e 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -295,14 +295,24 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
{
int spbr;
+ int div = 0;
+ unsigned long clksrc;
/* Sets output mode, MOSI signal, and (optionally) loopback */
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
+ clksrc = clk_get_rate(rspi->clk);
+ while (div < 3) {
+ if (rspi->max_speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
+ break;
+ div++;
+ clksrc /= 2;
+ }
+
/* Sets transfer bit rate */
- spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
- 2 * rspi->max_speed_hz) - 1;
+ spbr = DIV_ROUND_UP(clksrc, 2 * rspi->max_speed_hz) - 1;
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+ rspi->spcmd |= div << 2;
/* Disable dummy transmission, set byte access */
rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 36af4d48a700..f63714ffb62f 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -23,6 +23,7 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/platform_data/sc18is602.h>
+#include <linux/gpio/consumer.h>
enum chips { sc18is602, sc18is602b, sc18is603 };
@@ -50,6 +51,8 @@ struct sc18is602 {
u8 buffer[SC18IS602_BUFSIZ + 1];
int tlen; /* Data queued for tx in buffer */
int rindex; /* Receive data index in buffer */
+
+ struct gpio_desc *reset;
};
static int sc18is602_wait_ready(struct sc18is602 *hw, int len)
@@ -257,6 +260,12 @@ static int sc18is602_probe(struct i2c_client *client,
hw = spi_master_get_devdata(master);
i2c_set_clientdata(client, hw);
+ /* assert reset and then release */
+ hw->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(hw->reset))
+ return PTR_ERR(hw->reset);
+ gpiod_set_value_cansleep(hw->reset, 0);
+
hw->master = master;
hw->client = client;
hw->dev = dev;
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index a56eca0e95a6..e54b59638458 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -175,10 +175,7 @@ static int spi_st_transfer_one(struct spi_master *master,
static void spi_st_cleanup(struct spi_device *spi)
{
- int cs = spi->cs_gpio;
-
- if (gpio_is_valid(cs))
- devm_gpio_free(&spi->dev, cs);
+ gpio_free(spi->cs_gpio);
}
/* the spi->mode bits understood by this driver: */
@@ -201,14 +198,15 @@ static int spi_st_setup(struct spi_device *spi)
return -EINVAL;
}
- if (devm_gpio_request(&spi->dev, cs, dev_name(&spi->dev))) {
+ ret = gpio_request(cs, dev_name(&spi->dev));
+ if (ret) {
dev_err(&spi->dev, "could not request gpio:%d\n", cs);
- return -EINVAL;
+ return ret;
}
ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH);
if (ret)
- return ret;
+ goto out_free_gpio;
spi_st_clk = clk_get_rate(spi_st->clk);
@@ -217,7 +215,8 @@ static int spi_st_setup(struct spi_device *spi)
if (sscbrg < 0x07 || sscbrg > BIT(16)) {
dev_err(&spi->dev,
"baudrate %d outside valid range %d\n", sscbrg, hz);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free_gpio;
}
spi_st->baud = spi_st_clk / (2 * sscbrg);
@@ -266,6 +265,10 @@ static int spi_st_setup(struct spi_device *spi)
readl_relaxed(spi_st->base + SSC_RBUF);
return 0;
+
+out_free_gpio:
+ gpio_free(cs);
+ return ret;
}
/* Interrupt fired when TX shift register becomes empty */
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index ac0b072815a3..caeac66a3977 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -41,6 +41,8 @@ struct ti_qspi_regs {
};
struct ti_qspi {
+ struct completion transfer_complete;
+
/* list synchronization */
struct mutex list_lock;
@@ -54,6 +56,9 @@ struct ti_qspi {
struct ti_qspi_regs ctx_reg;
+ dma_addr_t mmap_phys_base;
+ struct dma_chan *rx_chan;
+
u32 spi_max_frequency;
u32 cmd;
u32 dc;
@@ -379,6 +384,72 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
return 0;
}
+static void ti_qspi_dma_callback(void *param)
+{
+ struct ti_qspi *qspi = param;
+
+ complete(&qspi->transfer_complete);
+}
+
+static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len)
+{
+ struct dma_chan *chan = qspi->rx_chan;
+ struct dma_device *dma_dev = chan->device;
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ struct dma_async_tx_descriptor *tx;
+ int ret;
+
+ tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
+ len, flags);
+ if (!tx) {
+ dev_err(qspi->dev, "device_prep_dma_memcpy error\n");
+ return -EIO;
+ }
+
+ tx->callback = ti_qspi_dma_callback;
+ tx->callback_param = qspi;
+ cookie = tx->tx_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(qspi->dev, "dma_submit_error %d\n", cookie);
+ return -EIO;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ msecs_to_jiffies(len));
+ if (ret <= 0) {
+ dmaengine_terminate_sync(chan);
+ dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ti_qspi_dma_xfer_sg(struct ti_qspi *qspi, struct sg_table rx_sg,
+ loff_t from)
+{
+ struct scatterlist *sg;
+ dma_addr_t dma_src = qspi->mmap_phys_base + from;
+ dma_addr_t dma_dst;
+ int i, len, ret;
+
+ for_each_sg(rx_sg.sgl, sg, rx_sg.nents, i) {
+ dma_dst = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ ret = ti_qspi_dma_xfer(qspi, dma_dst, dma_src, len);
+ if (ret)
+ return ret;
+ dma_src += len;
+ }
+
+ return 0;
+}
+
static void ti_qspi_enable_memory_map(struct spi_device *spi)
{
struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
@@ -426,7 +497,7 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi,
QSPI_SPI_SETUP_REG(spi->chip_select));
}
-static int ti_qspi_spi_flash_read(struct spi_device *spi,
+static int ti_qspi_spi_flash_read(struct spi_device *spi,
struct spi_flash_read_message *msg)
{
struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
@@ -437,9 +508,23 @@ static int ti_qspi_spi_flash_read(struct spi_device *spi,
if (!qspi->mmap_enabled)
ti_qspi_enable_memory_map(spi);
ti_qspi_setup_mmap_read(spi, msg);
- memcpy_fromio(msg->buf, qspi->mmap_base + msg->from, msg->len);
+
+ if (qspi->rx_chan) {
+ if (msg->cur_msg_mapped) {
+ ret = ti_qspi_dma_xfer_sg(qspi, msg->rx_sg, msg->from);
+ if (ret)
+ goto err_unlock;
+ } else {
+ dev_err(qspi->dev, "Invalid address for DMA\n");
+ ret = -EIO;
+ goto err_unlock;
+ }
+ } else {
+ memcpy_fromio(msg->buf, qspi->mmap_base + msg->from, msg->len);
+ }
msg->retlen = msg->len;
+err_unlock:
mutex_unlock(&qspi->list_lock);
return ret;
@@ -536,6 +621,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
u32 max_freq;
int ret = 0, num_cs, irq;
+ dma_cap_mask_t mask;
master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
if (!master)
@@ -550,6 +636,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
+ master->spi_flash_read = ti_qspi_spi_flash_read;
if (!of_property_read_u32(np, "num-cs", &num_cs))
master->num_chipselect = num_cs;
@@ -592,17 +679,6 @@ static int ti_qspi_probe(struct platform_device *pdev)
goto free_master;
}
- if (res_mmap) {
- qspi->mmap_base = devm_ioremap_resource(&pdev->dev,
- res_mmap);
- master->spi_flash_read = ti_qspi_spi_flash_read;
- if (IS_ERR(qspi->mmap_base)) {
- dev_err(&pdev->dev,
- "falling back to PIO mode\n");
- master->spi_flash_read = NULL;
- }
- }
- qspi->mmap_enabled = false;
if (of_property_read_bool(np, "syscon-chipselects")) {
qspi->ctrl_base =
@@ -633,11 +709,37 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
qspi->spi_max_frequency = max_freq;
- ret = devm_spi_register_master(&pdev->dev, master);
- if (ret)
- goto free_master;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
- return 0;
+ qspi->rx_chan = dma_request_chan_by_mask(&mask);
+ if (!qspi->rx_chan) {
+ dev_err(qspi->dev,
+ "No Rx DMA available, trying mmap mode\n");
+ ret = 0;
+ goto no_dma;
+ }
+ master->dma_rx = qspi->rx_chan;
+ init_completion(&qspi->transfer_complete);
+ if (res_mmap)
+ qspi->mmap_phys_base = (dma_addr_t)res_mmap->start;
+
+no_dma:
+ if (!qspi->rx_chan && res_mmap) {
+ qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
+ if (IS_ERR(qspi->mmap_base)) {
+ dev_info(&pdev->dev,
+ "mmap failed with error %ld using PIO mode\n",
+ PTR_ERR(qspi->mmap_base));
+ qspi->mmap_base = NULL;
+ master->spi_flash_read = NULL;
+ }
+ }
+ qspi->mmap_enabled = false;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (!ret)
+ return 0;
free_master:
spi_master_put(master);
@@ -656,6 +758,9 @@ static int ti_qspi_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (qspi->rx_chan)
+ dma_release_channel(qspi->rx_chan);
+
return 0;
}
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 7492ea346b43..51759d3fd45f 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -346,7 +346,7 @@ static int txx9spi_probe(struct platform_device *dev)
c->clk = NULL;
goto exit;
}
- ret = clk_enable(c->clk);
+ ret = clk_prepare_enable(c->clk);
if (ret) {
c->clk = NULL;
goto exit;
@@ -395,7 +395,7 @@ static int txx9spi_probe(struct platform_device *dev)
exit_busy:
ret = -EBUSY;
exit:
- clk_disable(c->clk);
+ clk_disable_unprepare(c->clk);
spi_master_put(master);
return ret;
}
@@ -406,7 +406,7 @@ static int txx9spi_remove(struct platform_device *dev)
struct txx9spi *c = spi_master_get_devdata(master);
flush_work(&c->work);
- clk_disable(c->clk);
+ clk_disable_unprepare(c->clk);
return 0;
}
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
index 8f04feca6ee3..4071a729eb2f 100644
--- a/drivers/spi/spi-xlp.c
+++ b/drivers/spi/spi-xlp.c
@@ -11,6 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -405,8 +406,9 @@ static int xlp_spi_probe(struct platform_device *pdev)
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "could not get spi clock\n");
- return -ENODEV;
+ return PTR_ERR(clk);
}
+
xspi->spi_clk = clk_get_rate(clk);
master = spi_alloc_master(&pdev->dev, 0);
@@ -437,6 +439,14 @@ static int xlp_spi_probe(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xlp_spi_acpi_match[] = {
+ { "BRCM900D", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, xlp_spi_acpi_match);
+#endif
+
static const struct of_device_id xlp_spi_dt_id[] = {
{ .compatible = "netlogic,xlp832-spi" },
{ },
@@ -447,6 +457,7 @@ static struct platform_driver xlp_spi_driver = {
.driver = {
.name = "xlp-spi",
.of_match_table = xlp_spi_dt_id,
+ .acpi_match_table = ACPI_PTR(xlp_spi_acpi_match),
},
};
module_platform_driver(xlp_spi_driver);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 200ca228d885..8146ccd35a1a 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -37,6 +37,7 @@
#include <linux/kthread.h>
#include <linux/ioport.h>
#include <linux/acpi.h>
+#include <linux/highmem.h>
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
@@ -709,6 +710,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
{
const bool vmalloced_buf = is_vmalloc_addr(buf);
unsigned int max_seg_size = dma_get_max_seg_size(dev);
+#ifdef CONFIG_HIGHMEM
+ const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
+ (unsigned long)buf < (PKMAP_BASE +
+ (LAST_PKMAP * PAGE_SIZE)));
+#else
+ const bool kmap_buf = false;
+#endif
int desc_len;
int sgs;
struct page *vm_page;
@@ -716,7 +724,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
size_t min;
int i, ret;
- if (vmalloced_buf) {
+ if (vmalloced_buf || kmap_buf) {
desc_len = min_t(int, max_seg_size, PAGE_SIZE);
sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
} else if (virt_addr_valid(buf)) {
@@ -732,10 +740,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
for (i = 0; i < sgs; i++) {
- if (vmalloced_buf) {
+ if (vmalloced_buf || kmap_buf) {
min = min_t(size_t,
len, desc_len - offset_in_page(buf));
- vm_page = vmalloc_to_page(buf);
+ if (vmalloced_buf)
+ vm_page = vmalloc_to_page(buf);
+ else
+ vm_page = kmap_to_page(buf);
if (!vm_page) {
sg_free_table(sgt);
return -ENOMEM;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index aca282d45421..5ec3a595dc7d 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -954,6 +954,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
if (channel > 5) {
dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
channel);
+ err = -EINVAL;
goto err_put_ctrl;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index af9476460023..58a7b3504b82 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -40,8 +40,6 @@ source "drivers/staging/rtl8712/Kconfig"
source "drivers/staging/rtl8188eu/Kconfig"
-source "drivers/staging/rtl8723au/Kconfig"
-
source "drivers/staging/rts5208/Kconfig"
source "drivers/staging/octeon/Kconfig"
@@ -104,4 +102,8 @@ source "drivers/staging/i4l/Kconfig"
source "drivers/staging/ks7010/Kconfig"
+source "drivers/staging/greybus/Kconfig"
+
+source "drivers/staging/vc04_services/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 9f6009dcafa8..2fa9745db614 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
-obj-$(CONFIG_R8723AU) += rtl8723au/
obj-$(CONFIG_RTS5208) += rts5208/
obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
@@ -41,3 +40,5 @@ obj-$(CONFIG_WILC1000) += wilc1000/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_ISDN_I4L) += i4l/
obj-$(CONFIG_KS7010) += ks7010/
+obj-$(CONFIG_GREYBUS) += greybus/
+obj-$(CONFIG_BCM2708_VCHIQ) += vc04_services/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 06e41d24ec62..6c00d6f765c6 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -24,19 +24,6 @@ config ANDROID_LOW_MEMORY_KILLER
scripts (/init.rc), and it defines priority values with minimum free memory size
for each priority.
-config SW_SYNC
- bool "Software synchronization framework"
- default n
- depends on SYNC_FILE
- depends on DEBUG_FS
- ---help---
- A sync object driver that uses a 32bit counter to coordinate
- synchronization. Useful when there is no hardware primitive backing
- the synchronization.
-
- WARNING: improper use of this can result in deadlocking kernel
- drivers from userspace. Intended for test and debug only.
-
source "drivers/staging/android/ion/Kconfig"
endif # if ANDROID
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 7ca61b77a8d4..7ed1be798909 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -4,4 +4,3 @@ obj-y += ion/
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
-obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index 19c1572f1525..c8fb4134c55d 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -36,7 +36,19 @@ config ION_TEGRA
config ION_HISI
tristate "Ion for Hisilicon"
depends on ARCH_HISI && ION
+ select ION_OF
help
Choose this option if you wish to use ion on Hisilicon Platform.
source "drivers/staging/android/ion/hisilicon/Kconfig"
+
+config ION_OF
+ bool "Devicetree support for Ion"
+ depends on ION && OF_ADDRESS
+ help
+ Provides base support for defining Ion heaps in devicetree
+ and setting them up. Also includes functions for platforms
+ to parse the devicetree and expand for their own custom
+ extensions
+
+ If using Ion and devicetree, you should say Y here
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 18cc2aa593c2..5d630a088381 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,4 +1,5 @@
-obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \
+ ion_page_pool.o ion_system_heap.o \
ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
obj-$(CONFIG_ION_TEST) += ion_test.o
ifdef CONFIG_COMPAT
@@ -8,4 +9,5 @@ endif
obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
obj-$(CONFIG_ION_TEGRA) += tegra/
obj-$(CONFIG_ION_HISI) += hisilicon/
+obj-$(CONFIG_ION_OF) += ion_of.o
diff --git a/drivers/staging/android/ion/devicetree.txt b/drivers/staging/android/ion/devicetree.txt
new file mode 100644
index 000000000000..168715271f06
--- /dev/null
+++ b/drivers/staging/android/ion/devicetree.txt
@@ -0,0 +1,51 @@
+Ion Memory Manager
+
+Ion is a memory manager that allows for sharing of buffers via dma-buf.
+Ion allows for different types of allocation via an abstraction called
+a 'heap'. A heap represents a specific type of memory. Each heap has
+a different type. There can be multiple instances of the same heap
+type.
+
+Specific heap instances are tied to heap IDs. Heap IDs are not to be specified
+in the devicetree.
+
+Required properties for Ion
+
+- compatible: "linux,ion" PLUS a compatible property for the device
+
+All child nodes of a linux,ion node are interpreted as heaps
+
+required properties for heaps
+
+- compatible: compatible string for a heap type PLUS a compatible property
+for the specific instance of the heap. Current heap types
+-- linux,ion-heap-system
+-- linux,ion-heap-system-contig
+-- linux,ion-heap-carveout
+-- linux,ion-heap-chunk
+-- linux,ion-heap-dma
+-- linux,ion-heap-custom
+
+Optional properties
+- memory-region: A phandle to a memory region. Required for DMA heap type
+(see reserved-memory.txt for details on the reservation)
+
+Example:
+
+ ion {
+ compatbile = "hisilicon,ion", "linux,ion";
+
+ ion-system-heap {
+ compatbile = "hisilicon,system-heap", "linux,ion-heap-system"
+ };
+
+ ion-camera-region {
+ compatible = "hisilicon,camera-heap", "linux,ion-heap-dma"
+ memory-region = <&camera_region>;
+ };
+
+ ion-fb-region {
+ compatbile = "hisilicon,fb-heap", "linux,ion-heap-dma"
+ memory-region = <&fb_region>;
+ };
+ }
diff --git a/drivers/staging/android/ion/hisilicon/hi6220_ion.c b/drivers/staging/android/ion/hisilicon/hi6220_ion.c
index fe9f0fd210cd..0de7897fd4bf 100644
--- a/drivers/staging/android/ion/hisilicon/hi6220_ion.c
+++ b/drivers/staging/android/ion/hisilicon/hi6220_ion.c
@@ -19,181 +19,74 @@
#include <linux/mm.h>
#include "../ion_priv.h"
#include "../ion.h"
+#include "../ion_of.h"
-struct hi6220_ion_type_table {
- const char *name;
- enum ion_heap_type type;
+struct hisi_ion_dev {
+ struct ion_heap **heaps;
+ struct ion_device *idev;
+ struct ion_platform_data *data;
};
-static struct hi6220_ion_type_table ion_type_table[] = {
- {"ion_system", ION_HEAP_TYPE_SYSTEM},
- {"ion_system_contig", ION_HEAP_TYPE_SYSTEM_CONTIG},
- {"ion_carveout", ION_HEAP_TYPE_CARVEOUT},
- {"ion_chunk", ION_HEAP_TYPE_CHUNK},
- {"ion_dma", ION_HEAP_TYPE_DMA},
- {"ion_custom", ION_HEAP_TYPE_CUSTOM},
+static struct ion_of_heap hisi_heaps[] = {
+ PLATFORM_HEAP("hisilicon,sys_user", 0,
+ ION_HEAP_TYPE_SYSTEM, "sys_user"),
+ PLATFORM_HEAP("hisilicon,sys_contig", 1,
+ ION_HEAP_TYPE_SYSTEM_CONTIG, "sys_contig"),
+ PLATFORM_HEAP("hisilicon,cma", ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_DMA,
+ "cma"),
+ {}
};
-static struct ion_device *idev;
-static int num_heaps;
-static struct ion_heap **heaps;
-static struct ion_platform_heap **heaps_data;
-
-static int get_type_by_name(const char *name, enum ion_heap_type *type)
+static int hi6220_ion_probe(struct platform_device *pdev)
{
+ struct hisi_ion_dev *ipdev;
int i;
- for (i = 0; i < ARRAY_SIZE(ion_type_table); i++) {
- if (strncmp(name, ion_type_table[i].name, strlen(name)))
- continue;
-
- *type = ion_type_table[i].type;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int hi6220_set_platform_data(struct platform_device *pdev)
-{
- unsigned int base;
- unsigned int size;
- unsigned int id;
- const char *heap_name;
- const char *type_name;
- enum ion_heap_type type;
- int ret;
- struct device_node *np;
- struct ion_platform_heap *p_data;
- const struct device_node *dt_node = pdev->dev.of_node;
- int index = 0;
-
- for_each_child_of_node(dt_node, np)
- num_heaps++;
-
- heaps_data = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_platform_heap *) *
- num_heaps,
- GFP_KERNEL);
- if (!heaps_data)
+ ipdev = devm_kzalloc(&pdev->dev, sizeof(*ipdev), GFP_KERNEL);
+ if (!ipdev)
return -ENOMEM;
- for_each_child_of_node(dt_node, np) {
- ret = of_property_read_string(np, "heap-name", &heap_name);
- if (ret < 0) {
- pr_err("check the name of node %s\n", np->name);
- continue;
- }
+ platform_set_drvdata(pdev, ipdev);
- ret = of_property_read_u32(np, "heap-id", &id);
- if (ret < 0) {
- pr_err("check the id %s\n", np->name);
- continue;
- }
+ ipdev->idev = ion_device_create(NULL);
+ if (IS_ERR(ipdev->idev))
+ return PTR_ERR(ipdev->idev);
- ret = of_property_read_u32(np, "heap-base", &base);
- if (ret < 0) {
- pr_err("check the base of node %s\n", np->name);
- continue;
- }
-
- ret = of_property_read_u32(np, "heap-size", &size);
- if (ret < 0) {
- pr_err("check the size of node %s\n", np->name);
- continue;
- }
-
- ret = of_property_read_string(np, "heap-type", &type_name);
- if (ret < 0) {
- pr_err("check the type of node %s\n", np->name);
- continue;
- }
+ ipdev->data = ion_parse_dt(pdev, hisi_heaps);
+ if (IS_ERR(ipdev->data))
+ return PTR_ERR(ipdev->data);
- ret = get_type_by_name(type_name, &type);
- if (ret < 0) {
- pr_err("type name error %s!\n", type_name);
- continue;
- }
- pr_info("heap index %d : name %s base 0x%x size 0x%x id %d type %d\n",
- index, heap_name, base, size, id, type);
+ ipdev->heaps = devm_kzalloc(&pdev->dev,
+ sizeof(struct ion_heap) * ipdev->data->nr,
+ GFP_KERNEL);
+ if (!ipdev->heaps) {
+ ion_destroy_platform_data(ipdev->data);
+ return -ENOMEM;
+ }
- p_data = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_platform_heap),
- GFP_KERNEL);
- if (!p_data)
+ for (i = 0; i < ipdev->data->nr; i++) {
+ ipdev->heaps[i] = ion_heap_create(&ipdev->data->heaps[i]);
+ if (!ipdev->heaps) {
+ ion_destroy_platform_data(ipdev->data);
return -ENOMEM;
-
- p_data->name = heap_name;
- p_data->base = base;
- p_data->size = size;
- p_data->id = id;
- p_data->type = type;
-
- heaps_data[index] = p_data;
- index++;
+ }
+ ion_device_add_heap(ipdev->idev, ipdev->heaps[i]);
}
return 0;
}
-static int hi6220_ion_probe(struct platform_device *pdev)
+static int hi6220_ion_remove(struct platform_device *pdev)
{
+ struct hisi_ion_dev *ipdev;
int i;
- int err;
- static struct ion_platform_heap *p_heap;
-
- idev = ion_device_create(NULL);
- err = hi6220_set_platform_data(pdev);
- if (err) {
- pr_err("ion set platform data error!\n");
- goto err_free_idev;
- }
- heaps = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_heap *) * num_heaps,
- GFP_KERNEL);
- if (!heaps) {
- err = -ENOMEM;
- goto err_free_idev;
- }
-
- /*
- * create the heaps as specified in the dts file
- */
- for (i = 0; i < num_heaps; i++) {
- p_heap = heaps_data[i];
- heaps[i] = ion_heap_create(p_heap);
- if (IS_ERR_OR_NULL(heaps[i])) {
- err = PTR_ERR(heaps[i]);
- goto err_free_heaps;
- }
-
- ion_device_add_heap(idev, heaps[i]);
- pr_info("%s: adding heap %s of type %d with %lx@%lx\n",
- __func__, p_heap->name, p_heap->type,
- p_heap->base, (unsigned long)p_heap->size);
- }
- return err;
+ ipdev = platform_get_drvdata(pdev);
-err_free_heaps:
- for (i = 0; i < num_heaps; ++i) {
- ion_heap_destroy(heaps[i]);
- heaps[i] = NULL;
- }
-err_free_idev:
- ion_device_destroy(idev);
+ for (i = 0; i < ipdev->data->nr; i++)
+ ion_heap_destroy(ipdev->heaps[i]);
- return err;
-}
-
-static int hi6220_ion_remove(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < num_heaps; i++) {
- ion_heap_destroy(heaps[i]);
- heaps[i] = NULL;
- }
- ion_device_destroy(idev);
+ ion_destroy_platform_data(ipdev->data);
+ ion_device_destroy(ipdev->idev);
return 0;
}
diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
new file mode 100644
index 000000000000..7e7431d8d49f
--- /dev/null
+++ b/drivers/staging/android/ion/ion-ioctl.c
@@ -0,0 +1,177 @@
+/*
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+#include "compat_ion.h"
+
+union ion_ioctl_arg {
+ struct ion_fd_data fd;
+ struct ion_allocation_data allocation;
+ struct ion_handle_data handle;
+ struct ion_custom_data custom;
+ struct ion_heap_query query;
+};
+
+static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case ION_IOC_HEAP_QUERY:
+ ret = arg->query.reserved0 != 0;
+ ret |= arg->query.reserved1 != 0;
+ ret |= arg->query.reserved2 != 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret ? -EINVAL : 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+ switch (cmd) {
+ case ION_IOC_SYNC:
+ case ION_IOC_FREE:
+ case ION_IOC_CUSTOM:
+ return _IOC_WRITE;
+ default:
+ return _IOC_DIR(cmd);
+ }
+}
+
+long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct ion_client *client = filp->private_data;
+ struct ion_device *dev = client->dev;
+ struct ion_handle *cleanup_handle = NULL;
+ int ret = 0;
+ unsigned int dir;
+ union ion_ioctl_arg data;
+
+ dir = ion_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ /*
+ * The copy_from_user is unconditional here for both read and write
+ * to do the validate. If there is no write for the ioctl, the
+ * buffer is cleared
+ */
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ ret = validate_ioctl_arg(cmd, &data);
+ if (WARN_ON_ONCE(ret))
+ return ret;
+
+ if (!(dir & _IOC_WRITE))
+ memset(&data, 0, sizeof(data));
+
+ switch (cmd) {
+ case ION_IOC_ALLOC:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_alloc(client, data.allocation.len,
+ data.allocation.align,
+ data.allocation.heap_id_mask,
+ data.allocation.flags);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ data.allocation.handle = handle->id;
+
+ cleanup_handle = handle;
+ break;
+ }
+ case ION_IOC_FREE:
+ {
+ struct ion_handle *handle;
+
+ mutex_lock(&client->lock);
+ handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
+ if (IS_ERR(handle)) {
+ mutex_unlock(&client->lock);
+ return PTR_ERR(handle);
+ }
+ ion_free_nolock(client, handle);
+ ion_handle_put_nolock(handle);
+ mutex_unlock(&client->lock);
+ break;
+ }
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ data.fd.fd = ion_share_dma_buf_fd(client, handle);
+ ion_handle_put(handle);
+ if (data.fd.fd < 0)
+ ret = data.fd.fd;
+ break;
+ }
+ case ION_IOC_IMPORT:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_import_dma_buf_fd(client, data.fd.fd);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ else
+ data.handle.handle = handle->id;
+ break;
+ }
+ case ION_IOC_SYNC:
+ {
+ ret = ion_sync_for_device(client, data.fd.fd);
+ break;
+ }
+ case ION_IOC_CUSTOM:
+ {
+ if (!dev->custom_ioctl)
+ return -ENOTTY;
+ ret = dev->custom_ioctl(client, data.custom.cmd,
+ data.custom.arg);
+ break;
+ }
+ case ION_IOC_HEAP_QUERY:
+ ret = ion_query_heaps(client, &data.query);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+ if (cleanup_handle)
+ ion_free(client, cleanup_handle);
+ return -EFAULT;
+ }
+ }
+ return ret;
+}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a2cf93b59016..396ded52ab70 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -41,80 +41,6 @@
#include "ion_priv.h"
#include "compat_ion.h"
-/**
- * struct ion_device - the metadata of the ion device node
- * @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @buffer_lock: lock protecting the tree of buffers
- * @lock: rwsem protecting the tree of heaps and clients
- * @heaps: list of all the heaps in the system
- * @user_clients: list of all the clients created from userspace
- */
-struct ion_device {
- struct miscdevice dev;
- struct rb_root buffers;
- struct mutex buffer_lock;
- struct rw_semaphore lock;
- struct plist_head heaps;
- long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
- unsigned long arg);
- struct rb_root clients;
- struct dentry *debug_root;
- struct dentry *heaps_debug_root;
- struct dentry *clients_debug_root;
-};
-
-/**
- * struct ion_client - a process/hw block local address space
- * @node: node in the tree of all clients
- * @dev: backpointer to ion device
- * @handles: an rb tree of all the handles in this client
- * @idr: an idr space for allocating handle ids
- * @lock: lock protecting the tree of handles
- * @name: used for debugging
- * @display_name: used for debugging (unique version of @name)
- * @display_serial: used for debugging (to make display_name unique)
- * @task: used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
-struct ion_client {
- struct rb_node node;
- struct ion_device *dev;
- struct rb_root handles;
- struct idr idr;
- struct mutex lock;
- const char *name;
- char *display_name;
- int display_serial;
- struct task_struct *task;
- pid_t pid;
- struct dentry *debug_root;
-};
-
-/**
- * ion_handle - a client local reference to a buffer
- * @ref: reference count
- * @client: back pointer to the client the buffer resides in
- * @buffer: pointer to the buffer
- * @node: node in the client's handle rbtree
- * @kmap_cnt: count of times this client has mapped to kernel
- * @id: client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client. Other fields are never changed after initialization.
- */
-struct ion_handle {
- struct kref ref;
- struct ion_client *client;
- struct ion_buffer *buffer;
- struct rb_node node;
- unsigned int kmap_cnt;
- int id;
-};
-
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return (buffer->flags & ION_FLAG_CACHED) &&
@@ -174,10 +100,10 @@ static void ion_buffer_add(struct ion_device *dev,
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
- struct ion_device *dev,
- unsigned long len,
- unsigned long align,
- unsigned long flags)
+ struct ion_device *dev,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
@@ -205,19 +131,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
goto err2;
}
- buffer->dev = dev;
- buffer->size = len;
-
- table = heap->ops->map_dma(heap, buffer);
- if (WARN_ONCE(table == NULL,
- "heap->ops->map_dma should return ERR_PTR on error"))
- table = ERR_PTR(-EINVAL);
- if (IS_ERR(table)) {
+ if (buffer->sg_table == NULL) {
+ WARN_ONCE(1, "This heap needs to set the sgtable");
ret = -EINVAL;
goto err1;
}
- buffer->sg_table = table;
+ table = buffer->sg_table;
+ buffer->dev = dev;
+ buffer->size = len;
+
if (ion_buffer_fault_user_mappings(buffer)) {
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
struct scatterlist *sg;
@@ -226,7 +149,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
if (!buffer->pages) {
ret = -ENOMEM;
- goto err;
+ goto err1;
}
for_each_sg(table->sgl, sg, table->nents, i) {
@@ -260,8 +183,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
mutex_unlock(&dev->buffer_lock);
return buffer;
-err:
- heap->ops->unmap_dma(heap, buffer);
err1:
heap->ops->free(buffer);
err2:
@@ -273,7 +194,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
{
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
- buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
vfree(buffer->pages);
kfree(buffer);
@@ -337,7 +257,7 @@ static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
}
static struct ion_handle *ion_handle_create(struct ion_client *client,
- struct ion_buffer *buffer)
+ struct ion_buffer *buffer)
{
struct ion_handle *handle;
@@ -377,26 +297,17 @@ static void ion_handle_destroy(struct kref *kref)
kfree(handle);
}
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
-{
- return handle->buffer;
-}
-
static void ion_handle_get(struct ion_handle *handle)
{
kref_get(&handle->ref);
}
-static int ion_handle_put_nolock(struct ion_handle *handle)
+int ion_handle_put_nolock(struct ion_handle *handle)
{
- int ret;
-
- ret = kref_put(&handle->ref, ion_handle_destroy);
-
- return ret;
+ return kref_put(&handle->ref, ion_handle_destroy);
}
-static int ion_handle_put(struct ion_handle *handle)
+int ion_handle_put(struct ion_handle *handle)
{
struct ion_client *client = handle->client;
int ret;
@@ -426,8 +337,8 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client,
return ERR_PTR(-EINVAL);
}
-static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id)
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+ int id)
{
struct ion_handle *handle;
@@ -438,7 +349,7 @@ static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
return handle ? handle : ERR_PTR(-EINVAL);
}
-static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
int id)
{
struct ion_handle *handle;
@@ -551,15 +462,10 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
}
EXPORT_SYMBOL(ion_alloc);
-static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
+void ion_free_nolock(struct ion_client *client,
+ struct ion_handle *handle)
{
- bool valid_handle;
-
- BUG_ON(client != handle->client);
-
- valid_handle = ion_handle_validate(client, handle);
-
- if (!valid_handle) {
+ if (!ion_handle_validate(client, handle)) {
WARN(1, "%s: invalid handle passed to free.\n", __func__);
return;
}
@@ -576,32 +482,6 @@ void ion_free(struct ion_client *client, struct ion_handle *handle)
}
EXPORT_SYMBOL(ion_free);
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct ion_buffer *buffer;
- int ret;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
-
- buffer = handle->buffer;
-
- if (!buffer->heap->ops->phys) {
- pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
- __func__, buffer->heap->name, buffer->heap->type);
- mutex_unlock(&client->lock);
- return -ENODEV;
- }
- mutex_unlock(&client->lock);
- ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
- return ret;
-}
-EXPORT_SYMBOL(ion_phys);
-
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
void *vaddr;
@@ -612,7 +492,7 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
if (WARN_ONCE(vaddr == NULL,
- "heap->ops->map_kernel should return ERR_PTR on error"))
+ "heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr))
return vaddr;
@@ -781,14 +661,14 @@ static const struct file_operations debug_client_fops = {
};
static int ion_get_client_serial(const struct rb_root *root,
- const unsigned char *name)
+ const unsigned char *name)
{
int serial = -1;
struct rb_node *node;
for (node = rb_first(root); node; node = rb_next(node)) {
struct ion_client *client = rb_entry(node, struct ion_client,
- node);
+ node);
if (strcmp(client->name, name))
continue;
@@ -863,14 +743,14 @@ struct ion_client *ion_client_create(struct ion_device *dev,
rb_insert_color(&client->node, &dev->clients);
client->debug_root = debugfs_create_file(client->display_name, 0664,
- dev->clients_debug_root,
- client, &debug_client_fops);
+ dev->clients_debug_root,
+ client, &debug_client_fops);
if (!client->debug_root) {
char buf[256], *path;
path = dentry_path(dev->clients_debug_root, buf, 256);
pr_err("Failed to create client debugfs at %s/%s\n",
- path, client->display_name);
+ path, client->display_name);
}
up_write(&dev->lock);
@@ -917,26 +797,6 @@ void ion_client_destroy(struct ion_client *client)
}
EXPORT_SYMBOL(ion_client_destroy);
-struct sg_table *ion_sg_table(struct ion_client *client,
- struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
- struct sg_table *table;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_dma.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
- buffer = handle->buffer;
- table = buffer->sg_table;
- mutex_unlock(&client->lock);
- return table;
-}
-EXPORT_SYMBOL(ion_sg_table);
-
static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
struct device *dev,
enum dma_data_direction direction);
@@ -958,7 +818,7 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
}
void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir)
+ size_t size, enum dma_data_direction dir)
{
struct scatterlist sg;
@@ -998,7 +858,7 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
if (ion_buffer_page_is_dirty(page))
ion_pages_sync_for_device(dev, ion_buffer_page(page),
- PAGE_SIZE, dir);
+ PAGE_SIZE, dir);
ion_buffer_page_clean(buffer->pages + i);
}
@@ -1076,7 +936,7 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
if (!buffer->heap->ops->map_user) {
pr_err("%s: this heap does not define a method for mapping to userspace\n",
- __func__);
+ __func__);
return -EINVAL;
}
@@ -1167,7 +1027,7 @@ static struct dma_buf_ops dma_buf_ops = {
};
struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle)
+ struct ion_handle *handle)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct ion_buffer *buffer;
@@ -1275,7 +1135,7 @@ struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
}
EXPORT_SYMBOL(ion_import_dma_buf_fd);
-static int ion_sync_for_device(struct ion_client *client, int fd)
+int ion_sync_for_device(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
struct ion_buffer *buffer;
@@ -1299,124 +1159,45 @@ static int ion_sync_for_device(struct ion_client *client, int fd)
return 0;
}
-/* fix up the cases where the ioctl direction bits are incorrect */
-static unsigned int ion_ioctl_dir(unsigned int cmd)
-{
- switch (cmd) {
- case ION_IOC_SYNC:
- case ION_IOC_FREE:
- case ION_IOC_CUSTOM:
- return _IOC_WRITE;
- default:
- return _IOC_DIR(cmd);
- }
-}
-
-static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
{
- struct ion_client *client = filp->private_data;
struct ion_device *dev = client->dev;
- struct ion_handle *cleanup_handle = NULL;
- int ret = 0;
- unsigned int dir;
-
- union {
- struct ion_fd_data fd;
- struct ion_allocation_data allocation;
- struct ion_handle_data handle;
- struct ion_custom_data custom;
- } data;
+ struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
+ int ret = -EINVAL, cnt = 0, max_cnt;
+ struct ion_heap *heap;
+ struct ion_heap_data hdata;
- dir = ion_ioctl_dir(cmd);
+ memset(&hdata, 0, sizeof(hdata));
- if (_IOC_SIZE(cmd) > sizeof(data))
- return -EINVAL;
+ down_read(&dev->lock);
+ if (!buffer) {
+ query->cnt = dev->heap_cnt;
+ ret = 0;
+ goto out;
+ }
- if (dir & _IOC_WRITE)
- if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
- return -EFAULT;
+ if (query->cnt <= 0)
+ goto out;
- switch (cmd) {
- case ION_IOC_ALLOC:
- {
- struct ion_handle *handle;
+ max_cnt = query->cnt;
- handle = ion_alloc(client, data.allocation.len,
- data.allocation.align,
- data.allocation.heap_id_mask,
- data.allocation.flags);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
+ hdata.name[sizeof(hdata.name) - 1] = '\0';
+ hdata.type = heap->type;
+ hdata.heap_id = heap->id;
- data.allocation.handle = handle->id;
+ ret = copy_to_user(&buffer[cnt],
+ &hdata, sizeof(hdata));
- cleanup_handle = handle;
- break;
- }
- case ION_IOC_FREE:
- {
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
- if (IS_ERR(handle)) {
- mutex_unlock(&client->lock);
- return PTR_ERR(handle);
- }
- ion_free_nolock(client, handle);
- ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
- break;
- }
- case ION_IOC_SHARE:
- case ION_IOC_MAP:
- {
- struct ion_handle *handle;
-
- handle = ion_handle_get_by_id(client, data.handle.handle);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- data.fd.fd = ion_share_dma_buf_fd(client, handle);
- ion_handle_put(handle);
- if (data.fd.fd < 0)
- ret = data.fd.fd;
- break;
- }
- case ION_IOC_IMPORT:
- {
- struct ion_handle *handle;
-
- handle = ion_import_dma_buf_fd(client, data.fd.fd);
- if (IS_ERR(handle))
- ret = PTR_ERR(handle);
- else
- data.handle.handle = handle->id;
- break;
- }
- case ION_IOC_SYNC:
- {
- ret = ion_sync_for_device(client, data.fd.fd);
- break;
- }
- case ION_IOC_CUSTOM:
- {
- if (!dev->custom_ioctl)
- return -ENOTTY;
- ret = dev->custom_ioctl(client, data.custom.cmd,
- data.custom.arg);
- break;
- }
- default:
- return -ENOTTY;
+ cnt++;
+ if (cnt >= max_cnt)
+ break;
}
- if (dir & _IOC_READ) {
- if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
- if (cleanup_handle)
- ion_free(client, cleanup_handle);
- return -EFAULT;
- }
- }
+ query->cnt = cnt;
+out:
+ up_read(&dev->lock);
return ret;
}
@@ -1528,7 +1309,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
seq_printf(s, "%16s %16zu\n", "total ", total_size);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
seq_printf(s, "%16s %16zu\n", "deferred free",
- heap->free_list_size);
+ heap->free_list_size);
seq_puts(s, "----------------------------------------------------\n");
if (heap->debug_show)
@@ -1588,8 +1369,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
struct dentry *debug_file;
- if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
- !heap->ops->unmap_dma)
+ if (!heap->ops->allocate || !heap->ops->free)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
@@ -1611,15 +1391,15 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
debug_file = debugfs_create_file(heap->name, 0664,
- dev->heaps_debug_root, heap,
- &debug_heap_fops);
+ dev->heaps_debug_root, heap,
+ &debug_heap_fops);
if (!debug_file) {
char buf[256], *path;
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap debugfs at %s/%s\n",
- path, heap->name);
+ path, heap->name);
}
if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
@@ -1634,10 +1414,11 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
- path, debug_name);
+ path, debug_name);
}
}
+ dev->heap_cnt++;
up_write(&dev->lock);
}
EXPORT_SYMBOL(ion_device_add_heap);
@@ -1702,38 +1483,3 @@ void ion_device_destroy(struct ion_device *dev)
kfree(dev);
}
EXPORT_SYMBOL(ion_device_destroy);
-
-void __init ion_reserve(struct ion_platform_data *data)
-{
- int i;
-
- for (i = 0; i < data->nr; i++) {
- if (data->heaps[i].size == 0)
- continue;
-
- if (data->heaps[i].base == 0) {
- phys_addr_t paddr;
-
- paddr = memblock_alloc_base(data->heaps[i].size,
- data->heaps[i].align,
- MEMBLOCK_ALLOC_ANYWHERE);
- if (!paddr) {
- pr_err("%s: error allocating memblock for heap %d\n",
- __func__, i);
- continue;
- }
- data->heaps[i].base = paddr;
- } else {
- int ret = memblock_reserve(data->heaps[i].base,
- data->heaps[i].size);
- if (ret)
- pr_err("memblock reserve of %zx@%lx failed\n",
- data->heaps[i].size,
- data->heaps[i].base);
- }
- pr_info("%s: %s reserved base %lx size %zu\n", __func__,
- data->heaps[i].name,
- data->heaps[i].base,
- data->heaps[i].size);
- }
-}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index a1331fc169a1..93dafb4586e4 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -73,17 +73,6 @@ struct ion_platform_data {
};
/**
- * ion_reserve() - reserve memory for ion heaps if applicable
- * @data: platform data specifying starting physical address and
- * size
- *
- * Calls memblock reserve to set aside memory for heaps that are
- * located at specific memory addresses or of specific sizes not
- * managed by the kernel
- */
-void ion_reserve(struct ion_platform_data *data);
-
-/**
* ion_client_create() - allocate a client and returns it
* @dev: the global ion device
* @name: used for debugging
@@ -130,36 +119,6 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
void ion_free(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_phys - returns the physical address and len of a handle
- * @client: the client
- * @handle: the handle
- * @addr: a pointer to put the address in
- * @len: a pointer to put the length in
- *
- * This function queries the heap for a particular handle to get the
- * handle's physical address. It't output is only correct if
- * a heap returns physically contiguous memory -- in other cases
- * this api should not be implemented -- ion_sg_table should be used
- * instead. Returns -EINVAL if the handle is invalid. This has
- * no implications on the reference counting of the handle --
- * the returned value may not be valid if the caller is not
- * holding a reference.
- */
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
- ion_phys_addr_t *addr, size_t *len);
-
-/**
- * ion_map_dma - return an sg_table describing a handle
- * @client: the client
- * @handle: the handle
- *
- * This function returns the sg_table describing
- * a particular ion handle.
- */
-struct sg_table *ion_sg_table(struct ion_client *client,
- struct ion_handle *handle);
-
-/**
* ion_map_kernel - create mapping for the given handle
* @client: the client
* @handle: handle to map
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index 1fb0d81556da..a8ea97391c40 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -25,15 +25,17 @@
#include "ion.h"
#include "ion_priv.h"
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
ion_phys_addr_t base;
};
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
+static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -45,8 +47,8 @@ ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
return offset;
}
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size)
+static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -56,19 +58,6 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
gen_pool_free(carveout_heap->pool, addr, size);
}
-static int ion_carveout_heap_phys(struct ion_heap *heap,
- struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct sg_table *table = buffer->priv_virt;
- struct page *page = sg_page(table->sgl);
- ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
-
- *addr = paddr;
- *len = buffer->size;
- return 0;
-}
-
static int ion_carveout_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
@@ -95,7 +84,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
}
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
- buffer->priv_virt = table;
+ buffer->sg_table = table;
return 0;
@@ -109,7 +98,7 @@ err_free:
static void ion_carveout_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
- struct sg_table *table = buffer->priv_virt;
+ struct sg_table *table = buffer->sg_table;
struct page *page = sg_page(table->sgl);
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
@@ -124,23 +113,9 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
kfree(table);
}
-static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
- .phys = ion_carveout_heap_phys,
- .map_dma = ion_carveout_heap_map_dma,
- .unmap_dma = ion_carveout_heap_unmap_dma,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index e0553fee9b8a..70495dc645ea 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -34,9 +34,9 @@ struct ion_chunk_heap {
};
static int ion_chunk_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
{
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
@@ -71,11 +71,11 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
if (!paddr)
goto err;
sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
- chunk_heap->chunk_size, 0);
+ chunk_heap->chunk_size, 0);
sg = sg_next(sg);
}
- buffer->priv_virt = table;
+ buffer->sg_table = table;
chunk_heap->allocated += allocated_size;
return 0;
err:
@@ -95,7 +95,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
struct ion_heap *heap = buffer->heap;
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
- struct sg_table *table = buffer->priv_virt;
+ struct sg_table *table = buffer->sg_table;
struct scatterlist *sg;
int i;
unsigned long allocated_size;
@@ -106,7 +106,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
@@ -117,22 +117,9 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
kfree(table);
}
-static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
static struct ion_heap_ops chunk_heap_ops = {
.allocate = ion_chunk_heap_allocate,
.free = ion_chunk_heap_free,
- .map_dma = ion_chunk_heap_map_dma,
- .unmap_dma = ion_chunk_heap_unmap_dma,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
@@ -174,7 +161,7 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
pr_debug("%s: base %lu size %zu align %ld\n", __func__,
- chunk_heap->base, heap_data->size, heap_data->align);
+ chunk_heap->base, heap_data->size, heap_data->align);
return &chunk_heap->heap;
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index a3446da4fdc2..6c7de74bc7ab 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -78,6 +78,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
goto free_table;
/* keep this for memory release */
buffer->priv_virt = info;
+ buffer->sg_table = info->table;
dev_dbg(dev, "Allocate buffer %p\n", buffer);
return 0;
@@ -105,36 +106,6 @@ static void ion_cma_free(struct ion_buffer *buffer)
kfree(info);
}
-/* return physical address in addr */
-static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
- &info->handle);
-
- *addr = info->handle;
- *len = buffer->size;
-
- return 0;
-}
-
-static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- return info->table;
-}
-
-static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
@@ -155,16 +126,13 @@ static void *ion_cma_map_kernel(struct ion_heap *heap,
}
static void ion_cma_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
+ struct ion_buffer *buffer)
{
}
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
- .map_dma = ion_cma_heap_map_dma,
- .unmap_dma = ion_cma_heap_unmap_dma,
- .phys = ion_cma_phys,
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 814a3c92a56e..b23f2c76c753 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -99,7 +99,7 @@ static int __init ion_dummy_init(void)
struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
- !heap_data->base)
+ !heap_data->base)
continue;
if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
@@ -120,12 +120,12 @@ err:
if (carveout_ptr) {
free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
carveout_ptr = NULL;
}
if (chunk_ptr) {
free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
chunk_ptr = NULL;
}
return err;
@@ -144,12 +144,12 @@ static void __exit ion_dummy_exit(void)
if (carveout_ptr) {
free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
carveout_ptr = NULL;
}
if (chunk_ptr) {
free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
chunk_ptr = NULL;
}
}
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index ca15a87f6fd3..4e5c0f17f579 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -93,7 +93,7 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
}
len = min(len, remainder);
ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
- vma->vm_page_prot);
+ vma->vm_page_prot);
if (ret)
return ret;
addr += len;
@@ -116,7 +116,7 @@ static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
}
static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
- pgprot_t pgprot)
+ pgprot_t pgprot)
{
int p = 0;
int ret = 0;
@@ -181,7 +181,7 @@ size_t ion_heap_freelist_size(struct ion_heap *heap)
}
static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
- bool skip_pools)
+ bool skip_pools)
{
struct ion_buffer *buffer;
size_t total_drained = 0;
@@ -266,7 +266,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
}
static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
- struct shrink_control *sc)
+ struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
@@ -279,7 +279,7 @@ static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
}
static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
- struct shrink_control *sc)
+ struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
diff --git a/drivers/staging/android/ion/ion_of.c b/drivers/staging/android/ion/ion_of.c
new file mode 100644
index 000000000000..15bac92b7f04
--- /dev/null
+++ b/drivers/staging/android/ion/ion_of.c
@@ -0,0 +1,185 @@
+/*
+ * Based on work from:
+ * Andrew Andrianov <andrew@ncrmnt.org>
+ * Google
+ * The Linux Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/cma.h>
+#include <linux/dma-contiguous.h>
+#include <linux/io.h>
+#include <linux/of_reserved_mem.h>
+#include "ion.h"
+#include "ion_priv.h"
+#include "ion_of.h"
+
+static int ion_parse_dt_heap_common(struct device_node *heap_node,
+ struct ion_platform_heap *heap,
+ struct ion_of_heap *compatible)
+{
+ int i;
+
+ for (i = 0; compatible[i].name; i++) {
+ if (of_device_is_compatible(heap_node, compatible[i].compat))
+ break;
+ }
+
+ if (!compatible[i].name)
+ return -ENODEV;
+
+ heap->id = compatible[i].heap_id;
+ heap->type = compatible[i].type;
+ heap->name = compatible[i].name;
+ heap->align = compatible[i].align;
+
+ /* Some kind of callback function pointer? */
+
+ pr_info("%s: id %d type %d name %s align %lx\n", __func__,
+ heap->id, heap->type, heap->name, heap->align);
+ return 0;
+}
+
+static int ion_setup_heap_common(struct platform_device *parent,
+ struct device_node *heap_node,
+ struct ion_platform_heap *heap)
+{
+ int ret = 0;
+
+ switch (heap->type) {
+ case ION_HEAP_TYPE_CARVEOUT:
+ case ION_HEAP_TYPE_CHUNK:
+ if (heap->base && heap->size)
+ return 0;
+
+ ret = of_reserved_mem_device_init(heap->priv);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
+ struct ion_of_heap *compatible)
+{
+ int num_heaps, ret;
+ const struct device_node *dt_node = pdev->dev.of_node;
+ struct device_node *node;
+ struct ion_platform_heap *heaps;
+ struct ion_platform_data *data;
+ int i = 0;
+
+ num_heaps = of_get_available_child_count(dt_node);
+
+ if (!num_heaps)
+ return ERR_PTR(-EINVAL);
+
+ heaps = devm_kzalloc(&pdev->dev,
+ sizeof(struct ion_platform_heap) * num_heaps,
+ GFP_KERNEL);
+ if (!heaps)
+ return ERR_PTR(-ENOMEM);
+
+ data = devm_kzalloc(&pdev->dev, sizeof(struct ion_platform_data),
+ GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_available_child_of_node(dt_node, node) {
+ struct platform_device *heap_pdev;
+
+ ret = ion_parse_dt_heap_common(node, &heaps[i], compatible);
+ if (ret)
+ return ERR_PTR(ret);
+
+ heap_pdev = of_platform_device_create(node, heaps[i].name,
+ &pdev->dev);
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
+ heap_pdev->dev.platform_data = &heaps[i];
+
+ heaps[i].priv = &heap_pdev->dev;
+
+ ret = ion_setup_heap_common(pdev, node, &heaps[i]);
+ if (ret)
+ goto out_err;
+ i++;
+ }
+
+ data->heaps = heaps;
+ data->nr = num_heaps;
+ return data;
+
+out_err:
+ for ( ; i >= 0; i--)
+ if (heaps[i].priv)
+ of_device_unregister(to_platform_device(heaps[i].priv));
+
+ return ERR_PTR(ret);
+}
+
+void ion_destroy_platform_data(struct ion_platform_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->nr; i++)
+ if (data->heaps[i].priv)
+ of_device_unregister(to_platform_device(
+ data->heaps[i].priv));
+}
+
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+static int rmem_ion_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ion_platform_heap *heap = pdev->dev.platform_data;
+
+ heap->base = rmem->base;
+ heap->base = rmem->size;
+ pr_debug("%s: heap %s base %pa size %pa dev %p\n", __func__,
+ heap->name, &rmem->base, &rmem->size, dev);
+ return 0;
+}
+
+static void rmem_ion_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ return;
+}
+
+static const struct reserved_mem_ops rmem_dma_ops = {
+ .device_init = rmem_ion_device_init,
+ .device_release = rmem_ion_device_release,
+};
+
+static int __init rmem_ion_setup(struct reserved_mem *rmem)
+{
+ phys_addr_t size = rmem->size;
+
+ size = size / 1024;
+
+ pr_info("Ion memory setup at %pa size %pa MiB\n",
+ &rmem->base, &size);
+ rmem->ops = &rmem_dma_ops;
+ return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(ion, "ion-region", rmem_ion_setup);
+#endif
diff --git a/drivers/staging/android/ion/ion_of.h b/drivers/staging/android/ion/ion_of.h
new file mode 100644
index 000000000000..8241a1770f0a
--- /dev/null
+++ b/drivers/staging/android/ion/ion_of.h
@@ -0,0 +1,37 @@
+/*
+ * Based on work from:
+ * Andrew Andrianov <andrew@ncrmnt.org>
+ * Google
+ * The Linux Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ION_OF_H
+#define _ION_OF_H
+
+struct ion_of_heap {
+ const char *compat;
+ int heap_id;
+ int type;
+ const char *name;
+ int align;
+};
+
+#define PLATFORM_HEAP(_compat, _id, _type, _name) \
+{ \
+ .compat = _compat, \
+ .heap_id = _id, \
+ .type = _type, \
+ .name = _name, \
+ .align = PAGE_SIZE, \
+}
+
+struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
+ struct ion_of_heap *compatible);
+
+void ion_destroy_platform_data(struct ion_platform_data *data);
+
+#endif
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 1fe80165a462..aea89c1ec345 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -30,8 +30,9 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
if (!page)
return NULL;
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
- DMA_BIDIRECTIONAL);
+ if (!pool->cached)
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
return page;
}
@@ -114,7 +115,7 @@ static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
}
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
- int nr_to_scan)
+ int nr_to_scan)
{
int freed = 0;
bool high;
@@ -147,7 +148,8 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
return freed;
}
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+ bool cached)
{
struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
@@ -161,6 +163,8 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
pool->order = order;
mutex_init(&pool->mutex);
plist_node_init(&pool->list, order);
+ if (cached)
+ pool->cached = true;
return pool;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index 0239883bffb7..3c3b3245275d 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -26,11 +26,10 @@
#include <linux/sched.h>
#include <linux/shrinker.h>
#include <linux/types.h>
+#include <linux/miscdevice.h>
#include "ion.h"
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
-
/**
* struct ion_buffer - metadata for a particular buffer
* @ref: reference count
@@ -42,8 +41,6 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
* @size: size of the buffer
* @priv_virt: private data to the buffer representable as
* a void *
- * @priv_phys: private data to the buffer representable as
- * an ion_phys_addr_t (and someday a phys_addr_t)
* @lock: protects the buffers cnt fields
* @kmap_cnt: number of times the buffer is mapped to the kernel
* @vaddr: the kernel mapping if kmap_cnt is not zero
@@ -69,10 +66,7 @@ struct ion_buffer {
unsigned long flags;
unsigned long private_flags;
size_t size;
- union {
- void *priv_virt;
- ion_phys_addr_t priv_phys;
- };
+ void *priv_virt;
struct mutex lock;
int kmap_cnt;
void *vaddr;
@@ -88,13 +82,84 @@ struct ion_buffer {
void ion_buffer_destroy(struct ion_buffer *buffer);
/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
+ * @heaps: list of all the heaps in the system
+ * @user_clients: list of all the clients created from userspace
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
+ long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
+ struct rb_root clients;
+ struct dentry *debug_root;
+ struct dentry *heaps_debug_root;
+ struct dentry *clients_debug_root;
+ int heap_cnt;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node: node in the tree of all clients
+ * @dev: backpointer to ion device
+ * @handles: an rb tree of all the handles in this client
+ * @idr: an idr space for allocating handle ids
+ * @lock: lock protecting the tree of handles
+ * @name: used for debugging
+ * @display_name: used for debugging (unique version of @name)
+ * @display_serial: used for debugging (to make display_name unique)
+ * @task: used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+ struct rb_node node;
+ struct ion_device *dev;
+ struct rb_root handles;
+ struct idr idr;
+ struct mutex lock;
+ const char *name;
+ char *display_name;
+ int display_serial;
+ struct task_struct *task;
+ pid_t pid;
+ struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref: reference count
+ * @client: back pointer to the client the buffer resides in
+ * @buffer: pointer to the buffer
+ * @node: node in the client's handle rbtree
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @id: client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client. Other fields are never changed after initialization.
+ */
+struct ion_handle {
+ struct kref ref;
+ struct ion_client *client;
+ struct ion_buffer *buffer;
+ struct rb_node node;
+ unsigned int kmap_cnt;
+ int id;
+};
+
+/**
* struct ion_heap_ops - ops to operate on a given heap
* @allocate: allocate memory
* @free: free memory
- * @phys get physical address of a buffer (only define on
- * physically contiguous heaps)
- * @map_dma map the memory for dma to a scatterlist
- * @unmap_dma unmap the memory for dma
* @map_kernel map memory to the kernel
* @unmap_kernel unmap memory to the kernel
* @map_user map memory to userspace
@@ -111,11 +176,6 @@ struct ion_heap_ops {
struct ion_buffer *buffer, unsigned long len,
unsigned long align, unsigned long flags);
void (*free)(struct ion_buffer *buffer);
- int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len);
- struct sg_table * (*map_dma)(struct ion_heap *heap,
- struct ion_buffer *buffer);
- void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
@@ -328,20 +388,6 @@ struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
void ion_cma_heap_destroy(struct ion_heap *);
/**
- * kernel api to allocate/free from carveout -- used when carveout is
- * used to back an architecture specific custom heap
- */
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
- unsigned long align);
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size);
-/**
- * The carveout heap returns physical addresses, since 0 may be a valid
- * physical address, this is used to indicate allocation failed
- */
-#define ION_CARVEOUT_ALLOCATE_FAIL -1
-
-/**
* functions for creating and destroying a heap pool -- allows you
* to keep a pool of pre allocated memory to use from your heap. Keeping
* a pool of memory that is ready for dma, ie any cached mapping have been
@@ -360,6 +406,7 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
* @gfp_mask: gfp_mask to use from alloc
* @order: order of pages in the pool
* @list: plist node for list of pools
+ * @cached: it's cached pool or not
*
* Allows you to keep a pool of pre allocated pages to use from your heap.
* Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -369,6 +416,7 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
struct ion_page_pool {
int high_count;
int low_count;
+ bool cached;
struct list_head high_items;
struct list_head low_items;
struct mutex mutex;
@@ -377,7 +425,8 @@ struct ion_page_pool {
struct plist_node list;
};
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+ bool cached);
void ion_page_pool_destroy(struct ion_page_pool *);
struct page *ion_page_pool_alloc(struct ion_page_pool *);
void ion_page_pool_free(struct ion_page_pool *, struct page *);
@@ -403,4 +452,22 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir);
+long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+int ion_sync_for_device(struct ion_client *client, int fd);
+
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+ int id);
+
+void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
+
+int ion_handle_put_nolock(struct ion_handle *handle);
+
+struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+ int id);
+
+int ion_handle_put(struct ion_handle *handle);
+
+int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query);
+
#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index b69dfc706440..7e023d505af8 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -26,16 +26,18 @@
#include "ion.h"
#include "ion_priv.h"
+#define NUM_ORDERS ARRAY_SIZE(orders)
+
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
__GFP_NORETRY) & ~__GFP_RECLAIM;
-static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
+static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO);
static const unsigned int orders[] = {8, 4, 0};
-static const int num_orders = ARRAY_SIZE(orders);
+
static int order_to_index(unsigned int order)
{
int i;
- for (i = 0; i < num_orders; i++)
+ for (i = 0; i < NUM_ORDERS; i++)
if (order == orders[i])
return i;
BUG();
@@ -49,47 +51,55 @@ static inline unsigned int order_to_size(int order)
struct ion_system_heap {
struct ion_heap heap;
- struct ion_page_pool *pools[0];
+ struct ion_page_pool *uncached_pools[NUM_ORDERS];
+ struct ion_page_pool *cached_pools[NUM_ORDERS];
};
+/**
+ * The page from page-pool are all zeroed before. We need do cache
+ * clean for cached buffer. The uncached buffer are always non-cached
+ * since it's allocated. So no need for non-cached pages.
+ */
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long order)
{
bool cached = ion_buffer_cached(buffer);
- struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ struct ion_page_pool *pool;
struct page *page;
- if (!cached) {
- page = ion_page_pool_alloc(pool);
- } else {
- gfp_t gfp_flags = low_order_gfp_flags;
+ if (!cached)
+ pool = heap->uncached_pools[order_to_index(order)];
+ else
+ pool = heap->cached_pools[order_to_index(order)];
- if (order > 4)
- gfp_flags = high_order_gfp_flags;
- page = alloc_pages(gfp_flags | __GFP_COMP, order);
- if (!page)
- return NULL;
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
- }
+ page = ion_page_pool_alloc(pool);
+ if (cached)
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
return page;
}
static void free_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer, struct page *page)
{
+ struct ion_page_pool *pool;
unsigned int order = compound_order(page);
bool cached = ion_buffer_cached(buffer);
- if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
- struct ion_page_pool *pool = heap->pools[order_to_index(order)];
-
- ion_page_pool_free(pool, page);
- } else {
+ /* go to system */
+ if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
__free_pages(page, order);
+ return;
}
+
+ if (!cached)
+ pool = heap->uncached_pools[order_to_index(order)];
+ else
+ pool = heap->cached_pools[order_to_index(order)];
+
+ ion_page_pool_free(pool, page);
}
@@ -101,7 +111,7 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap,
struct page *page;
int i;
- for (i = 0; i < num_orders; i++) {
+ for (i = 0; i < NUM_ORDERS; i++) {
if (size < order_to_size(orders[i]))
continue;
if (max_order < orders[i])
@@ -118,9 +128,9 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap,
}
static int ion_system_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
{
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
@@ -142,7 +152,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
page = alloc_largest_available(sys_heap, buffer, size_remaining,
- max_order);
+ max_order);
if (!page)
goto free_pages;
list_add_tail(&page->lru, &pages);
@@ -164,7 +174,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
list_del(&page->lru);
}
- buffer->priv_virt = table;
+ buffer->sg_table = table;
return 0;
free_table:
@@ -181,16 +191,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
struct ion_system_heap,
heap);
struct sg_table *table = buffer->sg_table;
- bool cached = ion_buffer_cached(buffer);
struct scatterlist *sg;
int i;
- /*
- * uncached pages come from the page pools, zero them before returning
- * for security purposes (other allocations are zerod at
- * alloc time
- */
- if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
+ /* zero the buffer before goto page pool */
+ if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
@@ -199,20 +204,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
kfree(table);
}
-static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_system_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
- int nr_to_scan)
+ int nr_to_scan)
{
+ struct ion_page_pool *uncached_pool;
+ struct ion_page_pool *cached_pool;
struct ion_system_heap *sys_heap;
int nr_total = 0;
int i, nr_freed;
@@ -223,28 +219,41 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
if (!nr_to_scan)
only_scan = 1;
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->pools[i];
-
- nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
- nr_total += nr_freed;
-
- if (!only_scan) {
+ for (i = 0; i < NUM_ORDERS; i++) {
+ uncached_pool = sys_heap->uncached_pools[i];
+ cached_pool = sys_heap->cached_pools[i];
+
+ if (only_scan) {
+ nr_total += ion_page_pool_shrink(uncached_pool,
+ gfp_mask,
+ nr_to_scan);
+
+ nr_total += ion_page_pool_shrink(cached_pool,
+ gfp_mask,
+ nr_to_scan);
+ } else {
+ nr_freed = ion_page_pool_shrink(uncached_pool,
+ gfp_mask,
+ nr_to_scan);
+ nr_to_scan -= nr_freed;
+ nr_total += nr_freed;
+ if (nr_to_scan <= 0)
+ break;
+ nr_freed = ion_page_pool_shrink(cached_pool,
+ gfp_mask,
+ nr_to_scan);
nr_to_scan -= nr_freed;
- /* shrink completed */
+ nr_total += nr_freed;
if (nr_to_scan <= 0)
break;
}
}
-
return nr_total;
}
static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
- .map_dma = ion_system_heap_map_dma,
- .unmap_dma = ion_system_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
@@ -259,52 +268,89 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
struct ion_system_heap,
heap);
int i;
+ struct ion_page_pool *pool;
+
+ for (i = 0; i < NUM_ORDERS; i++) {
+ pool = sys_heap->uncached_pools[i];
+
+ seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
+ pool->high_count, pool->order,
+ (PAGE_SIZE << pool->order) * pool->high_count);
+ seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
+ pool->low_count, pool->order,
+ (PAGE_SIZE << pool->order) * pool->low_count);
+ }
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->pools[i];
+ for (i = 0; i < NUM_ORDERS; i++) {
+ pool = sys_heap->cached_pools[i];
- seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+ seq_printf(s, "%d order %u highmem pages cached %lu total\n",
pool->high_count, pool->order,
(PAGE_SIZE << pool->order) * pool->high_count);
- seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+ seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
pool->low_count, pool->order,
(PAGE_SIZE << pool->order) * pool->low_count);
}
return 0;
}
+static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
+{
+ int i;
+
+ for (i = 0; i < NUM_ORDERS; i++)
+ if (pools[i])
+ ion_page_pool_destroy(pools[i]);
+}
+
+static int ion_system_heap_create_pools(struct ion_page_pool **pools,
+ bool cached)
+{
+ int i;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ for (i = 0; i < NUM_ORDERS; i++) {
+ struct ion_page_pool *pool;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+
+ pool = ion_page_pool_create(gfp_flags, orders[i], cached);
+ if (!pool)
+ goto err_create_pool;
+ pools[i] = pool;
+ }
+ return 0;
+
+err_create_pool:
+ ion_system_heap_destroy_pools(pools);
+ return -ENOMEM;
+}
+
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
{
struct ion_system_heap *heap;
- int i;
- heap = kzalloc(sizeof(struct ion_system_heap) +
- sizeof(struct ion_page_pool *) * num_orders,
- GFP_KERNEL);
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
heap->heap.ops = &system_heap_ops;
heap->heap.type = ION_HEAP_TYPE_SYSTEM;
heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool;
- gfp_t gfp_flags = low_order_gfp_flags;
+ if (ion_system_heap_create_pools(heap->uncached_pools, false))
+ goto free_heap;
- if (orders[i] > 4)
- gfp_flags = high_order_gfp_flags;
- pool = ion_page_pool_create(gfp_flags, orders[i]);
- if (!pool)
- goto destroy_pools;
- heap->pools[i] = pool;
- }
+ if (ion_system_heap_create_pools(heap->cached_pools, true))
+ goto destroy_uncached_pools;
heap->heap.debug_show = ion_system_heap_debug_show;
return &heap->heap;
-destroy_pools:
- while (i--)
- ion_page_pool_destroy(heap->pools[i]);
+destroy_uncached_pools:
+ ion_system_heap_destroy_pools(heap->uncached_pools);
+
+free_heap:
kfree(heap);
return ERR_PTR(-ENOMEM);
}
@@ -316,8 +362,10 @@ void ion_system_heap_destroy(struct ion_heap *heap)
heap);
int i;
- for (i = 0; i < num_orders; i++)
- ion_page_pool_destroy(sys_heap->pools[i]);
+ for (i = 0; i < NUM_ORDERS; i++) {
+ ion_page_pool_destroy(sys_heap->uncached_pools[i]);
+ ion_page_pool_destroy(sys_heap->cached_pools[i]);
+ }
kfree(sys_heap);
}
@@ -358,7 +406,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
sg_set_page(table->sgl, page, len, 0);
- buffer->priv_virt = table;
+ buffer->sg_table = table;
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
@@ -375,7 +423,7 @@ free_pages:
static void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
- struct sg_table *table = buffer->priv_virt;
+ struct sg_table *table = buffer->sg_table;
struct page *page = sg_page(table->sgl);
unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
unsigned long i;
@@ -386,34 +434,9 @@ static void ion_system_contig_heap_free(struct ion_buffer *buffer)
kfree(table);
}
-static int ion_system_contig_heap_phys(struct ion_heap *heap,
- struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct sg_table *table = buffer->priv_virt;
- struct page *page = sg_page(table->sgl);
- *addr = page_to_phys(page);
- *len = buffer->size;
- return 0;
-}
-
-static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
static struct ion_heap_ops kmalloc_ops = {
.allocate = ion_system_contig_heap_allocate,
.free = ion_system_contig_heap_free,
- .phys = ion_system_contig_heap_phys,
- .map_dma = ion_system_contig_heap_map_dma,
- .unmap_dma = ion_system_contig_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index 5a396a1a8238..5abf8320a96a 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -42,7 +42,8 @@ struct ion_test_data {
};
static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
- void __user *ptr, size_t offset, size_t size, bool write)
+ void __user *ptr, size_t offset, size_t size,
+ bool write)
{
int ret = 0;
struct dma_buf_attachment *attach;
@@ -98,7 +99,7 @@ err:
}
static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
- size_t offset, size_t size, bool write)
+ size_t offset, size_t size, bool write)
{
int ret;
unsigned long page_offset = offset >> PAGE_SHIFT;
@@ -144,7 +145,7 @@ err:
}
static long ion_test_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
struct ion_test_data *test_data = filp->private_data;
int ret = 0;
@@ -179,17 +180,19 @@ static long ion_test_ioctl(struct file *filp, unsigned int cmd,
case ION_IOC_TEST_DMA_MAPPING:
{
ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset, data.test_rw.size,
- data.test_rw.write);
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset,
+ data.test_rw.size,
+ data.test_rw.write);
break;
}
case ION_IOC_TEST_KERNEL_MAPPING:
{
ret = ion_handle_test_kernel(test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset, data.test_rw.size,
- data.test_rw.write);
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset,
+ data.test_rw.size,
+ data.test_rw.write);
break;
}
default:
@@ -242,7 +245,7 @@ static int __init ion_test_probe(struct platform_device *pdev)
struct ion_test_device *testdev;
testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!testdev)
return -ENOMEM;
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 45a1b4ec4ca3..ec3b66561412 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -92,8 +92,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
int other_file = global_node_page_state(NR_FILE_PAGES) -
- global_node_page_state(NR_SHMEM) -
- total_swapcache_pages();
+ global_node_page_state(NR_SHMEM) -
+ total_swapcache_pages();
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
@@ -204,10 +204,9 @@ device_initcall(lowmem_init);
* not really modular, but the easiest way to keep compat with existing
* bootargs behaviour is to continue using module_param here.
*/
-module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
-module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
- S_IRUGO | S_IWUSR);
+module_param_named(cost, lowmem_shrinker.seeks, int, 0644);
+module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, 0644);
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
- S_IRUGO | S_IWUSR);
-module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+ 0644);
+module_param_named(debug_level, lowmem_debug_level, uint, 0644);
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
index 0a8e40f92cd7..14cd8738ecfc 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/drivers/staging/android/uapi/ion.h
@@ -44,32 +44,26 @@ enum ion_heap_type {
* must be last so device specific heaps always
* are at the end of this enum
*/
- ION_NUM_HEAPS = 16,
};
-#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
-#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
-#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
-#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
-
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
/**
* allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
-#define ION_FLAG_CACHED 1 /*
- * mappings of this buffer should be
- * cached, ion will do cache
- * maintenance when the buffer is
- * mapped for dma
- */
-#define ION_FLAG_CACHED_NEEDS_SYNC 2 /*
- * mappings of this buffer will created
- * at mmap time, if this is set
- * caches must be managed
- * manually
- */
+
+/*
+ * mappings of this buffer should be cached, ion will do cache maintenance
+ * when the buffer is mapped for dma
+ */
+#define ION_FLAG_CACHED 1
+
+/*
+ * mappings of this buffer will created at mmap time, if this is set
+ * caches must be managed manually
+ */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2
/**
* DOC: Ion Userspace API
@@ -134,6 +128,36 @@ struct ion_custom_data {
unsigned long arg;
};
+#define MAX_HEAP_NAME 32
+
+/**
+ * struct ion_heap_data - data about a heap
+ * @name - first 32 characters of the heap name
+ * @type - heap type
+ * @heap_id - heap id for the heap
+ */
+struct ion_heap_data {
+ char name[MAX_HEAP_NAME];
+ __u32 type;
+ __u32 heap_id;
+ __u32 reserved0;
+ __u32 reserved1;
+ __u32 reserved2;
+};
+
+/**
+ * struct ion_heap_query - collection of data about all heaps
+ * @cnt - total number of heaps to be copied
+ * @heaps - buffer to copy heap data
+ */
+struct ion_heap_query {
+ __u32 cnt; /* Total number of heaps to be copied */
+ __u32 reserved0; /* align to 64bits */
+ __u64 heaps; /* buffer to be populated */
+ __u32 reserved1;
+ __u32 reserved2;
+};
+
#define ION_IOC_MAGIC 'I'
/**
@@ -200,4 +224,13 @@ struct ion_custom_data {
*/
#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+/**
+ * DOC: ION_IOC_HEAP_QUERY - information about available heaps
+ *
+ * Takes an ion_heap_query structure and populates information about
+ * available Ion heaps.
+ */
+#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, \
+ struct ion_heap_query)
+
#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 1999eed4f4c5..64b3966c5f1f 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -81,20 +81,20 @@ struct comedi_file {
(COMEDI_NUM_MINORS - COMEDI_NUM_BOARD_MINORS)
static int comedi_num_legacy_minors;
-module_param(comedi_num_legacy_minors, int, S_IRUGO);
+module_param(comedi_num_legacy_minors, int, 0444);
MODULE_PARM_DESC(comedi_num_legacy_minors,
"number of comedi minor devices to reserve for non-auto-configured devices (default 0)"
);
unsigned int comedi_default_buf_size_kb = CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB;
-module_param(comedi_default_buf_size_kb, uint, S_IRUGO | S_IWUSR);
+module_param(comedi_default_buf_size_kb, uint, 0644);
MODULE_PARM_DESC(comedi_default_buf_size_kb,
"default asynchronous buffer size in KiB (default "
__MODULE_STRING(CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB) ")");
unsigned int comedi_default_buf_maxsize_kb
= CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB;
-module_param(comedi_default_buf_maxsize_kb, uint, S_IRUGO | S_IWUSR);
+module_param(comedi_default_buf_maxsize_kb, uint, 0644);
MODULE_PARM_DESC(comedi_default_buf_maxsize_kb,
"default maximum size of asynchronous buffer in KiB (default "
__MODULE_STRING(CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB) ")");
@@ -2233,7 +2233,7 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
goto done;
}
- n_pages = size >> PAGE_SHIFT;
+ n_pages = vma_pages(vma);
/* get reference to current buf map (if any) */
bm = comedi_buf_map_from_subdev_get(s);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 44511d729450..a5bf2cc165c0 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -15,7 +15,7 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
-*/
+ */
#include <linux/device.h>
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
deleted file mode 100644
index 375707497896..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/* Watchdog Related Defines */
-
-#define ADDIDATA_TIMER 0
-#define ADDIDATA_WATCHDOG 2
-
-/*
- * (*insn_config) for the timer subdevice
- *
- * Configures The Timer, Counter or Watchdog
- * Data Pointer contains configuration parameters as below
- * data[0] : 0 Configure As Timer
- * 1 Configure As Counter
- * 2 Configure As Watchdog
- * data[1] : 1 Enable Interrupt
- * 0 Disable Interrupt
- * data[2] : Time Unit
- * data[3] : Reload Value
- */
-static int apci3501_config_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci3501_private *devpriv = dev->private;
- unsigned int ctrl;
-
- if (data[0] != ADDIDATA_WATCHDOG &&
- data[0] != ADDIDATA_TIMER)
- return -EINVAL;
-
- devpriv->tsk_Current = current;
-
- devpriv->timer_mode = data[0];
-
- /* first, disable the watchdog or stop the timer */
- if (devpriv->timer_mode == ADDIDATA_WATCHDOG) {
- ctrl = 0;
- } else {
- ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_ENA);
- }
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
-
- /* enable/disable the timer interrupt */
- ctrl = (data[1] == 1) ? ADDI_TCW_CTRL_IRQ_ENA : 0;
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
-
- outl(data[2], devpriv->tcw + ADDI_TCW_TIMEBASE_REG);
- outl(data[3], devpriv->tcw + ADDI_TCW_RELOAD_REG);
-
- ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
- if (devpriv->timer_mode == ADDIDATA_WATCHDOG) {
- /* Set the mode (e2->e0) NOTE: this doesn't look correct */
- ctrl |= ~(ADDI_TCW_CTRL_CNT_UP | ADDI_TCW_CTRL_EXT_CLK_MASK |
- ADDI_TCW_CTRL_MODE_MASK | ADDI_TCW_CTRL_GATE |
- ADDI_TCW_CTRL_TRIG | ADDI_TCW_CTRL_TIMER_ENA |
- ADDI_TCW_CTRL_RESET_ENA | ADDI_TCW_CTRL_WARN_ENA |
- ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_ENA);
- } else {
- /* mode 2 */
- ctrl &= ~(ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE_MASK |
- ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
- ADDI_TCW_CTRL_WARN_ENA | ADDI_TCW_CTRL_ENA);
- ctrl |= ADDI_TCW_CTRL_MODE(2) | ADDI_TCW_CTRL_TIMER_ENA;
- }
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
-
- return insn->n;
-}
-
-/*
- * (*insn_write) for the timer subdevice
- *
- * Start / Stop The Selected Timer , Counter or Watchdog
- * Data Pointer contains configuration parameters as below
- * data[0] : 0 Timer
- * 1 Counter
- * 2 Watchdog
- * data[1] : 1 Start
- * 0 Stop
- * 2 Trigger
- */
-static int apci3501_write_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci3501_private *devpriv = dev->private;
- unsigned int ctrl;
-
- if (devpriv->timer_mode == ADDIDATA_WATCHDOG ||
- devpriv->timer_mode == ADDIDATA_TIMER) {
- ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
-
- if (data[1] == 1) { /* enable */
- ctrl |= ADDI_TCW_CTRL_ENA;
- } else if (data[1] == 0) { /* stop */
- if (devpriv->timer_mode == ADDIDATA_WATCHDOG)
- ctrl = 0;
- else
- ctrl &= ~ADDI_TCW_CTRL_ENA;
- } else if (data[1] == 2) { /* trigger */
- ctrl |= ADDI_TCW_CTRL_TRIG;
- }
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
- }
-
- inl(devpriv->tcw + ADDI_TCW_STATUS_REG);
- return insn->n;
-}
-
-/*
- * (*insn_read) for the timer subdevice
- *
- * Read The Selected Timer, Counter or Watchdog
- * Data Pointer contains configuration parameters as below
- * data[0] : 0 Timer
- * 1 Counter
- * 2 Watchdog
- * data[1] : Timer Counter Watchdog Number
- */
-static int apci3501_read_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci3501_private *devpriv = dev->private;
-
- if (devpriv->timer_mode != ADDIDATA_TIMER &&
- devpriv->timer_mode != ADDIDATA_WATCHDOG)
- return -EINVAL;
-
- data[0] = inl(devpriv->tcw + ADDI_TCW_STATUS_REG) &
- ADDI_TCW_STATUS_OVERFLOW;
- data[1] = inl(devpriv->tcw + ADDI_TCW_VAL_REG);
-
- return insn->n;
-}
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index 40ff91411139..57f0f46de0be 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -22,12 +22,36 @@
* more details.
*/
+/*
+ * Driver: addi_apci_3501
+ * Description: ADDI-DATA APCI-3501 Analog output board
+ * Devices: [ADDI-DATA] APCI-3501 (addi_apci_3501)
+ * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
+ * Updated: Mon, 20 Jun 2016 10:57:01 -0700
+ * Status: untested
+ *
+ * Configuration Options: not applicable, uses comedi PCI auto config
+ *
+ * This board has the following features:
+ * - 4 or 8 analog output channels
+ * - 2 optically isolated digital inputs
+ * - 2 optically isolated digital outputs
+ * - 1 12-bit watchdog/timer
+ *
+ * There are 2 versions of the APCI-3501:
+ * - APCI-3501-4 4 analog output channels
+ * - APCI-3501-8 8 analog output channels
+ *
+ * These boards use the same PCI Vendor/Device IDs. The number of output
+ * channels used by this driver is determined by reading the EEPROM on
+ * the board.
+ *
+ * The watchdog/timer subdevice is not currently supported.
+ */
+
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
#include "../comedi_pci.h"
-#include "addi_tcw.h"
#include "amcc_s5933.h"
/*
@@ -67,8 +91,6 @@
struct apci3501_private {
unsigned long amcc;
- unsigned long tcw;
- struct task_struct *tsk_Current;
unsigned char timer_mode;
};
@@ -139,8 +161,6 @@ static int apci3501_ao_insn_write(struct comedi_device *dev,
return insn->n;
}
-#include "addi-data/hwdrv_apci3501.c"
-
static int apci3501_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -253,37 +273,6 @@ static int apci3501_eeprom_insn_read(struct comedi_device *dev,
return insn->n;
}
-static irqreturn_t apci3501_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct apci3501_private *devpriv = dev->private;
- unsigned int status;
- unsigned int ctrl;
-
- /* Disable Interrupt */
- ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_IRQ_ENA);
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
-
- status = inl(devpriv->tcw + ADDI_TCW_IRQ_REG);
- if (!(status & ADDI_TCW_IRQ)) {
- dev_err(dev->class_dev, "IRQ from unknown source\n");
- return IRQ_NONE;
- }
-
- /* Enable Interrupt Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
- ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_IRQ_ENA);
- ctrl |= ADDI_TCW_CTRL_IRQ_ENA;
- outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
- inl(devpriv->tcw + ADDI_TCW_STATUS_REG);
-
- return IRQ_HANDLED;
-}
-
static int apci3501_reset(struct comedi_device *dev)
{
unsigned int val;
@@ -333,17 +322,9 @@ static int apci3501_auto_attach(struct comedi_device *dev,
devpriv->amcc = pci_resource_start(pcidev, 0);
dev->iobase = pci_resource_start(pcidev, 1);
- devpriv->tcw = dev->iobase + APCI3501_TIMER_BASE;
ao_n_chan = apci3501_eeprom_get_ao_n_chan(dev);
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, apci3501_interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
ret = comedi_alloc_subdevices(dev, 5);
if (ret)
return ret;
@@ -383,17 +364,9 @@ static int apci3501_auto_attach(struct comedi_device *dev,
s->range_table = &range_digital;
s->insn_bits = apci3501_do_insn_bits;
- /* Initialize the timer/watchdog subdevice */
+ /* Timer/Watchdog subdevice */
s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 1;
- s->maxdata = 0;
- s->len_chanlist = 1;
- s->range_table = &range_digital;
- s->insn_write = apci3501_write_insn_timer;
- s->insn_read = apci3501_read_insn_timer;
- s->insn_config = apci3501_config_insn_timer;
+ s->type = COMEDI_SUBD_UNUSED;
/* Initialize the eeprom subdevice */
s = &dev->subdevices[4];
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index be70bd333807..86450c08f291 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -1693,8 +1693,7 @@ static void pci9118_detach(struct comedi_device *dev)
pci9118_reset(dev);
comedi_pci_detach(dev);
pci9118_free_dma(dev);
- if (pcidev)
- pci_dev_put(pcidev);
+ pci_dev_put(pcidev);
}
static struct comedi_driver adl_pci9118_driver = {
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 1f9c08a845b6..cb9c2699277e 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1,34 +1,34 @@
/*
- comedi/drivers/cb_pcidas64.c
- This is a driver for the ComputerBoards/MeasurementComputing PCI-DAS
- 64xx, 60xx, and 4020 cards.
-
- Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- Copyright (C) 2001, 2002 Frank Mori Hess
-
- Thanks also go to the following people:
-
- Steve Rosenbluth, for providing the source code for
- his pci-das6402 driver, and source code for working QNX pci-6402
- drivers by Greg Laird and Mariusz Bogacz. None of the code was
- used directly here, but it was useful as an additional source of
- documentation on how to program the boards.
-
- John Sims, for much testing and feedback on pcidas-4020 support.
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/cb_pcidas64.c
+ * This is a driver for the ComputerBoards/MeasurementComputing PCI-DAS
+ * 64xx, 60xx, and 4020 cards.
+ *
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2001, 2002 Frank Mori Hess
+ *
+ * Thanks also go to the following people:
+ *
+ * Steve Rosenbluth, for providing the source code for
+ * his pci-das6402 driver, and source code for working QNX pci-6402
+ * drivers by Greg Laird and Mariusz Bogacz. None of the code was
+ * used directly here, but it was useful as an additional source of
+ * documentation on how to program the boards.
+ *
+ * John Sims, for much testing and feedback on pcidas-4020 support.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Driver: cb_pcidas64
@@ -66,19 +66,18 @@
*/
/*
-
-TODO:
- make it return error if user attempts an ai command that uses the
- external queue, and an ao command simultaneously user counter subdevice
- there are a number of boards this driver will support when they are
- fully released, but does not yet since the pci device id numbers
- are not yet available.
-
- support prescaled 100khz clock for slow pacing (not available on 6000
- series?)
-
- make ao fifo size adjustable like ai fifo
-*/
+ * TODO:
+ * make it return error if user attempts an ai command that uses the
+ * external queue, and an ao command simultaneously user counter subdevice
+ * there are a number of boards this driver will support when they are
+ * fully released, but does not yet since the pci device id numbers
+ * are not yet available.
+ *
+ * support prescaled 100khz clock for slow pacing (not available on 6000
+ * series?)
+ *
+ * make ao fifo size adjustable like ai fifo
+ */
#include <linux/module.h>
#include <linux/delay.h>
@@ -90,53 +89,56 @@ TODO:
#include "plx9080.h"
#define TIMER_BASE 25 /* 40MHz master clock */
-/* 100kHz 'prescaled' clock for slow acquisition,
- * maybe I'll support this someday */
+/*
+ * 100kHz 'prescaled' clock for slow acquisition,
+ * maybe I'll support this someday
+ */
#define PRESCALED_TIMER_BASE 10000
-#define DMA_BUFFER_SIZE 0x1000
+#define DMA_BUFFER_SIZE 0x1000
+#define DAC_FIFO_SIZE 0x2000
-/* maximum value that can be loaded into board's 24-bit counters*/
+/* maximum value that can be loaded into board's 24-bit counters */
static const int max_counter_value = 0xffffff;
/* PCI-DAS64xxx base addresses */
/* devpriv->main_iobase registers */
enum write_only_registers {
- INTR_ENABLE_REG = 0x0, /* interrupt enable register */
- HW_CONFIG_REG = 0x2, /* hardware config register */
+ INTR_ENABLE_REG = 0x0, /* interrupt enable register */
+ HW_CONFIG_REG = 0x2, /* hardware config register */
DAQ_SYNC_REG = 0xc,
DAQ_ATRIG_LOW_4020_REG = 0xc,
- ADC_CONTROL0_REG = 0x10, /* adc control register 0 */
- ADC_CONTROL1_REG = 0x12, /* adc control register 1 */
+ ADC_CONTROL0_REG = 0x10, /* adc control register 0 */
+ ADC_CONTROL1_REG = 0x12, /* adc control register 1 */
CALIBRATION_REG = 0x14,
- /* lower 16 bits of adc sample interval counter */
+ /* lower 16 bits of adc sample interval counter */
ADC_SAMPLE_INTERVAL_LOWER_REG = 0x16,
- /* upper 8 bits of adc sample interval counter */
+ /* upper 8 bits of adc sample interval counter */
ADC_SAMPLE_INTERVAL_UPPER_REG = 0x18,
- /* lower 16 bits of delay interval counter */
+ /* lower 16 bits of delay interval counter */
ADC_DELAY_INTERVAL_LOWER_REG = 0x1a,
- /* upper 8 bits of delay interval counter */
+ /* upper 8 bits of delay interval counter */
ADC_DELAY_INTERVAL_UPPER_REG = 0x1c,
- /* lower 16 bits of hardware conversion/scan counter */
+ /* lower 16 bits of hardware conversion/scan counter */
ADC_COUNT_LOWER_REG = 0x1e,
- /* upper 8 bits of hardware conversion/scan counter */
+ /* upper 8 bits of hardware conversion/scan counter */
ADC_COUNT_UPPER_REG = 0x20,
- ADC_START_REG = 0x22, /* software trigger to start acquisition */
- ADC_CONVERT_REG = 0x24, /* initiates single conversion */
- ADC_QUEUE_CLEAR_REG = 0x26, /* clears adc queue */
- ADC_QUEUE_LOAD_REG = 0x28, /* loads adc queue */
+ ADC_START_REG = 0x22, /* software trigger to start acquisition */
+ ADC_CONVERT_REG = 0x24, /* initiates single conversion */
+ ADC_QUEUE_CLEAR_REG = 0x26, /* clears adc queue */
+ ADC_QUEUE_LOAD_REG = 0x28, /* loads adc queue */
ADC_BUFFER_CLEAR_REG = 0x2a,
- /* high channel for internal queue, use adc_chan_bits() inline above */
+ /* high channel for internal queue, use adc_chan_bits() inline above */
ADC_QUEUE_HIGH_REG = 0x2c,
- DAC_CONTROL0_REG = 0x50, /* dac control register 0 */
- DAC_CONTROL1_REG = 0x52, /* dac control register 0 */
- /* lower 16 bits of dac sample interval counter */
+ DAC_CONTROL0_REG = 0x50, /* dac control register 0 */
+ DAC_CONTROL1_REG = 0x52, /* dac control register 0 */
+ /* lower 16 bits of dac sample interval counter */
DAC_SAMPLE_INTERVAL_LOWER_REG = 0x54,
- /* upper 8 bits of dac sample interval counter */
+ /* upper 8 bits of dac sample interval counter */
DAC_SAMPLE_INTERVAL_UPPER_REG = 0x56,
DAC_SELECT_REG = 0x60,
DAC_START_REG = 0x64,
- DAC_BUFFER_CLEAR_REG = 0x66, /* clear dac buffer */
+ DAC_BUFFER_CLEAR_REG = 0x66, /* clear dac buffer */
};
static inline unsigned int dac_convert_reg(unsigned int channel)
@@ -168,8 +170,8 @@ enum read_only_registers {
};
enum read_write_registers {
- I8255_4020_REG = 0x48, /* 8255 offset, for 4020 only */
- /* external channel/gain queue, uses same bits as ADC_QUEUE_LOAD_REG */
+ I8255_4020_REG = 0x48, /* 8255 offset, for 4020 only */
+ /* external channel/gain queue, uses same bits as ADC_QUEUE_LOAD_REG */
ADC_QUEUE_FIFO_REG = 0x100,
ADC_FIFO_REG = 0x200, /* adc data fifo */
/* dac data fifo, has weird interactions with external channel queue */
@@ -188,50 +190,51 @@ enum dio_counter_registers {
/* bit definitions for write-only registers */
enum intr_enable_contents {
- ADC_INTR_SRC_MASK = 0x3, /* adc interrupt source mask */
- ADC_INTR_QFULL_BITS = 0x0, /* interrupt fifo quarter full */
- ADC_INTR_EOC_BITS = 0x1, /* interrupt end of conversion */
- ADC_INTR_EOSCAN_BITS = 0x2, /* interrupt end of scan */
- ADC_INTR_EOSEQ_BITS = 0x3, /* interrupt end of sequence mask */
- EN_ADC_INTR_SRC_BIT = 0x4, /* enable adc interrupt source */
- EN_ADC_DONE_INTR_BIT = 0x8, /* enable adc acquisition done intr */
+ ADC_INTR_SRC_MASK = 0x3, /* adc interrupt source mask */
+ ADC_INTR_QFULL_BITS = 0x0, /* interrupt fifo quarter full */
+ ADC_INTR_EOC_BITS = 0x1, /* interrupt end of conversion */
+ ADC_INTR_EOSCAN_BITS = 0x2, /* interrupt end of scan */
+ ADC_INTR_EOSEQ_BITS = 0x3, /* interrupt end of sequence mask */
+ EN_ADC_INTR_SRC_BIT = 0x4, /* enable adc interrupt source */
+ EN_ADC_DONE_INTR_BIT = 0x8, /* enable adc acquisition done intr */
DAC_INTR_SRC_MASK = 0x30,
DAC_INTR_QEMPTY_BITS = 0x0,
DAC_INTR_HIGH_CHAN_BITS = 0x10,
- EN_DAC_INTR_SRC_BIT = 0x40, /* enable dac interrupt source */
+ EN_DAC_INTR_SRC_BIT = 0x40, /* enable dac interrupt source */
EN_DAC_DONE_INTR_BIT = 0x80,
- EN_ADC_ACTIVE_INTR_BIT = 0x200, /* enable adc active interrupt */
- EN_ADC_STOP_INTR_BIT = 0x400, /* enable adc stop trigger interrupt */
- EN_DAC_ACTIVE_INTR_BIT = 0x800, /* enable dac active interrupt */
- EN_DAC_UNDERRUN_BIT = 0x4000, /* enable dac underrun status bit */
- EN_ADC_OVERRUN_BIT = 0x8000, /* enable adc overrun status bit */
+ EN_ADC_ACTIVE_INTR_BIT = 0x200, /* enable adc active interrupt */
+ EN_ADC_STOP_INTR_BIT = 0x400, /* enable adc stop trigger interrupt */
+ EN_DAC_ACTIVE_INTR_BIT = 0x800, /* enable dac active interrupt */
+ EN_DAC_UNDERRUN_BIT = 0x4000, /* enable dac underrun status bit */
+ EN_ADC_OVERRUN_BIT = 0x8000, /* enable adc overrun status bit */
};
enum hw_config_contents {
- MASTER_CLOCK_4020_MASK = 0x3, /* master clock source mask for 4020 */
- INTERNAL_CLOCK_4020_BITS = 0x1, /* use 40 MHz internal master clock */
- BNC_CLOCK_4020_BITS = 0x2, /* use BNC input for master clock */
- EXT_CLOCK_4020_BITS = 0x3, /* use dio input for master clock */
- EXT_QUEUE_BIT = 0x200, /* use external channel/gain queue */
- /* use 225 nanosec strobe when loading dac instead of 50 nanosec */
+ MASTER_CLOCK_4020_MASK = 0x3, /* master clock source mask for 4020 */
+ INTERNAL_CLOCK_4020_BITS = 0x1, /* use 40 MHz internal master clock */
+ BNC_CLOCK_4020_BITS = 0x2, /* use BNC input for master clock */
+ EXT_CLOCK_4020_BITS = 0x3, /* use dio input for master clock */
+ EXT_QUEUE_BIT = 0x200, /* use external channel/gain queue */
+ /* use 225 nanosec strobe when loading dac instead of 50 nanosec */
SLOW_DAC_BIT = 0x400,
- /* bit with unknown function yet given as default value in pci-das64
- * manual */
+ /*
+ * bit with unknown function yet given as default value in pci-das64
+ * manual
+ */
HW_CONFIG_DUMMY_BITS = 0x2000,
- /* bit selects channels 1/0 for analog input/output, otherwise 0/1 */
+ /* bit selects channels 1/0 for analog input/output, otherwise 0/1 */
DMA_CH_SELECT_BIT = 0x8000,
- FIFO_SIZE_REG = 0x4, /* allows adjustment of fifo sizes */
- DAC_FIFO_SIZE_MASK = 0xff00, /* bits that set dac fifo size */
- DAC_FIFO_BITS = 0xf800, /* 8k sample ao fifo */
+ FIFO_SIZE_REG = 0x4, /* allows adjustment of fifo sizes */
+ DAC_FIFO_SIZE_MASK = 0xff00, /* bits that set dac fifo size */
+ DAC_FIFO_BITS = 0xf800, /* 8k sample ao fifo */
};
-#define DAC_FIFO_SIZE 0x2000
enum daq_atrig_low_4020_contents {
- /* use trig/ext clk bnc input for analog gate signal */
+ /* use trig/ext clk bnc input for analog gate signal */
EXT_AGATE_BNC_BIT = 0x8000,
- /* use trig/ext clk bnc input for external stop trigger signal */
+ /* use trig/ext clk bnc input for external stop trigger signal */
EXT_STOP_TRIG_BNC_BIT = 0x4000,
- /* use trig/ext clk bnc input for external start trigger signal */
+ /* use trig/ext clk bnc input for external start trigger signal */
EXT_START_TRIG_BNC_BIT = 0x2000,
};
@@ -241,38 +244,38 @@ static inline uint16_t analog_trig_low_threshold_bits(uint16_t threshold)
}
enum adc_control0_contents {
- ADC_GATE_SRC_MASK = 0x3, /* bits that select gate */
- ADC_SOFT_GATE_BITS = 0x1, /* software gate */
- ADC_EXT_GATE_BITS = 0x2, /* external digital gate */
- ADC_ANALOG_GATE_BITS = 0x3, /* analog level gate */
- /* level-sensitive gate (for digital) */
+ ADC_GATE_SRC_MASK = 0x3, /* bits that select gate */
+ ADC_SOFT_GATE_BITS = 0x1, /* software gate */
+ ADC_EXT_GATE_BITS = 0x2, /* external digital gate */
+ ADC_ANALOG_GATE_BITS = 0x3, /* analog level gate */
+ /* level-sensitive gate (for digital) */
ADC_GATE_LEVEL_BIT = 0x4,
- ADC_GATE_POLARITY_BIT = 0x8, /* gate active low */
+ ADC_GATE_POLARITY_BIT = 0x8, /* gate active low */
ADC_START_TRIG_SOFT_BITS = 0x10,
ADC_START_TRIG_EXT_BITS = 0x20,
ADC_START_TRIG_ANALOG_BITS = 0x30,
ADC_START_TRIG_MASK = 0x30,
- ADC_START_TRIG_FALLING_BIT = 0x40, /* trig 1 uses falling edge */
- /* external pacing uses falling edge */
+ ADC_START_TRIG_FALLING_BIT = 0x40, /* trig 1 uses falling edge */
+ /* external pacing uses falling edge */
ADC_EXT_CONV_FALLING_BIT = 0x800,
- /* enable hardware scan counter */
+ /* enable hardware scan counter */
ADC_SAMPLE_COUNTER_EN_BIT = 0x1000,
- ADC_DMA_DISABLE_BIT = 0x4000, /* disables dma */
- ADC_ENABLE_BIT = 0x8000, /* master adc enable */
+ ADC_DMA_DISABLE_BIT = 0x4000, /* disables dma */
+ ADC_ENABLE_BIT = 0x8000, /* master adc enable */
};
enum adc_control1_contents {
- /* should be set for boards with > 16 channels */
+ /* should be set for boards with > 16 channels */
ADC_QUEUE_CONFIG_BIT = 0x1,
CONVERT_POLARITY_BIT = 0x10,
EOC_POLARITY_BIT = 0x20,
- ADC_SW_GATE_BIT = 0x40, /* software gate of adc */
- ADC_DITHER_BIT = 0x200, /* turn on extra noise for dithering */
+ ADC_SW_GATE_BIT = 0x40, /* software gate of adc */
+ ADC_DITHER_BIT = 0x200, /* turn on extra noise for dithering */
RETRIGGER_BIT = 0x800,
ADC_LO_CHANNEL_4020_MASK = 0x300,
ADC_HI_CHANNEL_4020_MASK = 0xc00,
- TWO_CHANNEL_4020_BITS = 0x1000, /* two channel mode for 4020 */
- FOUR_CHANNEL_4020_BITS = 0x2000, /* four channel mode for 4020 */
+ TWO_CHANNEL_4020_BITS = 0x1000, /* two channel mode for 4020 */
+ FOUR_CHANNEL_4020_BITS = 0x2000, /* four channel mode for 4020 */
CHANNEL_MODE_4020_MASK = 0x3000,
ADC_MODE_MASK = 0xf000,
};
@@ -296,10 +299,10 @@ enum calibration_contents {
SELECT_8800_BIT = 0x1,
SELECT_8402_64XX_BIT = 0x2,
SELECT_1590_60XX_BIT = 0x2,
- CAL_EN_64XX_BIT = 0x40, /* calibration enable for 64xx series */
+ CAL_EN_64XX_BIT = 0x40, /* calibration enable for 64xx series */
SERIAL_DATA_IN_BIT = 0x80,
SERIAL_CLOCK_BIT = 0x100,
- CAL_EN_60XX_BIT = 0x200, /* calibration enable for 60xx series */
+ CAL_EN_60XX_BIT = 0x200, /* calibration enable for 60xx series */
CAL_GAIN_BIT = 0x800,
};
@@ -326,12 +329,12 @@ static inline uint16_t adc_convert_chan_4020_bits(unsigned int channel)
};
enum adc_queue_load_contents {
- UNIP_BIT = 0x800, /* unipolar/bipolar bit */
- ADC_SE_DIFF_BIT = 0x1000, /* single-ended/ differential bit */
- /* non-referenced single-ended (common-mode input) */
+ UNIP_BIT = 0x800, /* unipolar/bipolar bit */
+ ADC_SE_DIFF_BIT = 0x1000, /* single-ended/ differential bit */
+ /* non-referenced single-ended (common-mode input) */
ADC_COMMON_BIT = 0x2000,
- QUEUE_EOSEQ_BIT = 0x4000, /* queue end of sequence */
- QUEUE_EOSCAN_BIT = 0x8000, /* queue end of scan */
+ QUEUE_EOSEQ_BIT = 0x4000, /* queue end of sequence */
+ QUEUE_EOSCAN_BIT = 0x8000, /* queue end of scan */
};
static inline uint16_t adc_chan_bits(unsigned int channel)
@@ -340,7 +343,7 @@ static inline uint16_t adc_chan_bits(unsigned int channel)
};
enum dac_control0_contents {
- DAC_ENABLE_BIT = 0x8000, /* dac controller enable bit */
+ DAC_ENABLE_BIT = 0x8000, /* dac controller enable bit */
DAC_CYCLIC_STOP_BIT = 0x4000,
DAC_WAVEFORM_MODE_BIT = 0x100,
DAC_EXT_UPDATE_FALLING_BIT = 0x80,
@@ -360,7 +363,7 @@ enum dac_control1_contents {
DAC_WRITE_POLARITY_BIT = 0x800, /* board-dependent setting */
DAC1_EXT_REF_BIT = 0x200,
DAC0_EXT_REF_BIT = 0x100,
- DAC_OUTPUT_ENABLE_BIT = 0x80, /* dac output enable bit */
+ DAC_OUTPUT_ENABLE_BIT = 0x80, /* dac output enable bit */
DAC_UPDATE_POLARITY_BIT = 0x40, /* board-dependent setting */
DAC_SW_GATE_BIT = 0x20,
DAC1_UNIPOLAR_BIT = 0x8,
@@ -409,9 +412,9 @@ enum i2c_addresses {
};
enum range_cal_i2c_contents {
- /* bits that set what source the adc converter measures */
+ /* bits that set what source the adc converter measures */
ADC_SRC_4020_MASK = 0x70,
- /* make bnc trig/ext clock threshold 0V instead of 2.5V */
+ /* make bnc trig/ext clock threshold 0V instead of 2.5V */
BNC_TRIG_THRESHOLD_0V_BIT = 0x80,
};
@@ -422,7 +425,7 @@ static inline uint8_t adc_src_4020_bits(unsigned int source)
static inline uint8_t attenuate_bit(unsigned int channel)
{
- /* attenuate channel (+-5V input range) */
+ /* attenuate channel (+-5V input range) */
return 1 << (channel & 0x3);
};
@@ -627,18 +630,18 @@ enum pcidas64_boardid {
struct pcidas64_board {
const char *name;
- int ai_se_chans; /* number of ai inputs in single-ended mode */
- int ai_bits; /* analog input resolution */
- int ai_speed; /* fastest conversion period in ns */
+ int ai_se_chans; /* number of ai inputs in single-ended mode */
+ int ai_bits; /* analog input resolution */
+ int ai_speed; /* fastest conversion period in ns */
const struct comedi_lrange *ai_range_table;
const uint8_t *ai_range_code;
- int ao_nchan; /* number of analog out channels */
- int ao_bits; /* analog output resolution */
- int ao_scan_speed; /* analog output scan speed */
+ int ao_nchan; /* number of analog out channels */
+ int ao_bits; /* analog output resolution */
+ int ao_scan_speed; /* analog output scan speed */
const struct comedi_lrange *ao_range_table;
const int *ao_range_code;
const struct hw_fifo_info *const ai_fifo;
- /* different board families have slightly different registers */
+ /* different board families have slightly different registers */
enum register_layout layout;
unsigned has_8255:1;
};
@@ -699,7 +702,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.has_8255 = 1,
},
[BOARD_PCIDAS6402_12] = {
- .name = "pci-das6402/12", /* XXX check */
+ .name = "pci-das6402/12", /* XXX check */
.ai_se_chans = 64,
.ai_bits = 12,
.ai_speed = 5000,
@@ -996,7 +999,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.ai_speed = 50,
.ao_bits = 12,
.ao_nchan = 2,
- .ao_scan_speed = 0, /* no hardware pacing on ao */
+ .ao_scan_speed = 0, /* no hardware pacing on ao */
.layout = LAYOUT_4020,
.ai_range_table = &ai_ranges_4020,
.ao_range_table = &ao_ranges_4020,
@@ -1005,9 +1008,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
.has_8255 = 1,
},
#if 0
- /*
- * The device id for these boards is unknown
- */
+ /* The device id for these boards is unknown */
[BOARD_PCIDAS6402_16_JR] = {
.name = "pci-das6402/16/jr",
@@ -1116,62 +1117,66 @@ static inline unsigned short se_diff_bit_6xxx(struct comedi_device *dev,
}
struct ext_clock_info {
- /* master clock divisor to use for scans with external master clock */
+ /* master clock divisor to use for scans with external master clock */
unsigned int divisor;
- /* chanspec for master clock input when used as scan begin src */
+ /* chanspec for master clock input when used as scan begin src */
unsigned int chanspec;
};
/* this structure is for data unique to this hardware driver. */
struct pcidas64_private {
- /* base addresses (physical) */
+ /* base addresses (physical) */
resource_size_t main_phys_iobase;
resource_size_t dio_counter_phys_iobase;
- /* base addresses (ioremapped) */
+ /* base addresses (ioremapped) */
void __iomem *plx9080_iobase;
void __iomem *main_iobase;
- /* local address (used by dma controller) */
+ /* local address (used by dma controller) */
uint32_t local0_iobase;
uint32_t local1_iobase;
- /* dma buffers for analog input */
+ /* dma buffers for analog input */
uint16_t *ai_buffer[MAX_AI_DMA_RING_COUNT];
- /* physical addresses of ai dma buffers */
+ /* physical addresses of ai dma buffers */
dma_addr_t ai_buffer_bus_addr[MAX_AI_DMA_RING_COUNT];
- /* array of ai dma descriptors read by plx9080,
- * allocated to get proper alignment */
+ /*
+ * array of ai dma descriptors read by plx9080,
+ * allocated to get proper alignment
+ */
struct plx_dma_desc *ai_dma_desc;
- /* physical address of ai dma descriptor array */
+ /* physical address of ai dma descriptor array */
dma_addr_t ai_dma_desc_bus_addr;
- /* index of the ai dma descriptor/buffer
- * that is currently being used */
+ /*
+ * index of the ai dma descriptor/buffer
+ * that is currently being used
+ */
unsigned int ai_dma_index;
- /* dma buffers for analog output */
+ /* dma buffers for analog output */
uint16_t *ao_buffer[AO_DMA_RING_COUNT];
- /* physical addresses of ao dma buffers */
+ /* physical addresses of ao dma buffers */
dma_addr_t ao_buffer_bus_addr[AO_DMA_RING_COUNT];
struct plx_dma_desc *ao_dma_desc;
dma_addr_t ao_dma_desc_bus_addr;
- /* keeps track of buffer where the next ao sample should go */
+ /* keeps track of buffer where the next ao sample should go */
unsigned int ao_dma_index;
- unsigned int hw_revision; /* stc chip hardware revision number */
- /* last bits sent to INTR_ENABLE_REG register */
+ unsigned int hw_revision; /* stc chip hardware revision number */
+ /* last bits sent to INTR_ENABLE_REG register */
unsigned int intr_enable_bits;
- /* last bits sent to ADC_CONTROL1_REG register */
+ /* last bits sent to ADC_CONTROL1_REG register */
uint16_t adc_control1_bits;
- /* last bits sent to FIFO_SIZE_REG register */
+ /* last bits sent to FIFO_SIZE_REG register */
uint16_t fifo_size_bits;
- /* last bits sent to HW_CONFIG_REG register */
+ /* last bits sent to HW_CONFIG_REG register */
uint16_t hw_config_bits;
uint16_t dac_control1_bits;
- /* last bits written to plx9080 control register */
+ /* last bits written to plx9080 control register */
uint32_t plx_control_bits;
- /* last bits written to plx interrupt control and status register */
+ /* last bits written to plx interrupt control and status register */
uint32_t plx_intcsr_bits;
- /* index of calibration source readable through ai ch0 */
+ /* index of calibration source readable through ai ch0 */
int calibration_source;
- /* bits written to i2c calibration/range register */
+ /* bits written to i2c calibration/range register */
uint8_t i2c_cal_range_bits;
- /* configure digital triggers to trigger on falling edge */
+ /* configure digital triggers to trigger on falling edge */
unsigned int ext_trig_falling;
short ai_cmd_running;
unsigned int ai_fifo_segment_length;
@@ -1224,7 +1229,7 @@ static void abort_dma(struct comedi_device *dev, unsigned int channel)
struct pcidas64_private *devpriv = dev->private;
unsigned long flags;
- /* spinlock for plx dma control/status reg */
+ /* spinlock for plx dma control/status reg */
spin_lock_irqsave(&dev->spinlock, flags);
plx9080_abort_dma(devpriv->plx9080_iobase, channel);
@@ -1271,7 +1276,7 @@ static void enable_ai_interrupts(struct comedi_device *dev,
* if CMDF_WAKE_EOS flag is set.
*/
if (cmd->flags & CMDF_WAKE_EOS) {
- /* 4020 doesn't support pio transfers except for fifo dregs */
+ /* 4020 doesn't support pio transfers except for fifo dregs */
if (board->layout != LAYOUT_4020)
bits |= ADC_INTR_EOSCAN_BITS | EN_ADC_INTR_SRC_BIT;
}
@@ -1305,36 +1310,40 @@ static void init_plx9080(struct comedi_device *dev)
abort_dma(dev, 0);
abort_dma(dev, 1);
- /* configure dma0 mode */
+ /* configure dma0 mode */
bits = 0;
- /* enable ready input, not sure if this is necessary */
+ /* enable ready input, not sure if this is necessary */
bits |= PLX_DMAMODE_READYIEN;
- /* enable bterm, not sure if this is necessary */
+ /* enable bterm, not sure if this is necessary */
bits |= PLX_DMAMODE_BTERMIEN;
- /* enable dma chaining */
+ /* enable dma chaining */
bits |= PLX_DMAMODE_CHAINEN;
- /* enable interrupt on dma done
- * (probably don't need this, since chain never finishes) */
+ /*
+ * enable interrupt on dma done
+ * (probably don't need this, since chain never finishes)
+ */
bits |= PLX_DMAMODE_DONEIEN;
- /* don't increment local address during transfers
- * (we are transferring from a fixed fifo register) */
+ /*
+ * don't increment local address during transfers
+ * (we are transferring from a fixed fifo register)
+ */
bits |= PLX_DMAMODE_LACONST;
- /* route dma interrupt to pci bus */
+ /* route dma interrupt to pci bus */
bits |= PLX_DMAMODE_INTRPCI;
- /* enable demand mode */
+ /* enable demand mode */
bits |= PLX_DMAMODE_DEMAND;
- /* enable local burst mode */
+ /* enable local burst mode */
bits |= PLX_DMAMODE_BURSTEN;
- /* 4020 uses 32 bit dma */
+ /* 4020 uses 32 bit dma */
if (board->layout == LAYOUT_4020)
- bits |= PLX_DMAMODE_WIDTH32;
- else /* localspace0 bus is 16 bits wide */
- bits |= PLX_DMAMODE_WIDTH16;
+ bits |= PLX_DMAMODE_WIDTH_32;
+ else /* localspace0 bus is 16 bits wide */
+ bits |= PLX_DMAMODE_WIDTH_16;
writel(bits, plx_iobase + PLX_REG_DMAMODE1);
if (ao_cmd_is_supported(board))
writel(bits, plx_iobase + PLX_REG_DMAMODE0);
- /* enable interrupts on plx 9080 */
+ /* enable interrupts on plx 9080 */
devpriv->plx_intcsr_bits |=
PLX_INTCSR_LSEABORTEN | PLX_INTCSR_LSEPARITYEN | PLX_INTCSR_PIEN |
PLX_INTCSR_PLIEN | PLX_INTCSR_PABORTIEN | PLX_INTCSR_LIOEN |
@@ -1376,7 +1385,7 @@ static int set_ai_fifo_segment_length(struct comedi_device *dev,
if (num_entries > fifo->max_segment_length)
num_entries = fifo->max_segment_length;
- /* 1 == 256 entries, 2 == 512 entries, etc */
+ /* 1 == 256 entries, 2 == 512 entries, etc */
num_increments = DIV_ROUND_CLOSEST(num_entries, increment_size);
bits = (~(num_increments - 1)) & fifo->fifo_size_reg_mask;
@@ -1442,7 +1451,7 @@ static void init_stc_registers(struct comedi_device *dev)
writew(devpriv->adc_control1_bits,
devpriv->main_iobase + ADC_CONTROL1_REG);
- /* 6402/16 manual says this register must be initialized to 0xff? */
+ /* 6402/16 manual says this register must be initialized to 0xff? */
writew(0xff, devpriv->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG);
bits = SLOW_DAC_BIT | DMA_CH_SELECT_BIT;
@@ -1457,7 +1466,7 @@ static void init_stc_registers(struct comedi_device *dev)
spin_unlock_irqrestore(&dev->spinlock, flags);
- /* set fifos to maximum size */
+ /* set fifos to maximum size */
devpriv->fifo_size_bits |= DAC_FIFO_BITS;
set_ai_fifo_segment_length(dev, board->ai_fifo->max_segment_length);
@@ -1478,7 +1487,7 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
struct pcidas64_private *devpriv = dev->private;
int i;
- /* allocate pci dma buffers */
+ /* allocate pci dma buffers */
for (i = 0; i < ai_dma_ring_count(board); i++) {
devpriv->ai_buffer[i] =
dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE,
@@ -1499,7 +1508,7 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
return -ENOMEM;
}
}
- /* allocate dma descriptors */
+ /* allocate dma descriptors */
devpriv->ai_dma_desc =
dma_alloc_coherent(&pcidev->dev, sizeof(struct plx_dma_desc) *
ai_dma_ring_count(board),
@@ -1517,7 +1526,7 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
if (!devpriv->ao_dma_desc)
return -ENOMEM;
}
- /* initialize dma descriptors */
+ /* initialize dma descriptors */
for (i = 0; i < ai_dma_ring_count(board); i++) {
devpriv->ai_dma_desc[i].pci_start_addr =
cpu_to_le32(devpriv->ai_buffer_bus_addr[i]);
@@ -1618,13 +1627,11 @@ static void i2c_set_sda(struct comedi_device *dev, int state)
void __iomem *plx_control_addr = devpriv->plx9080_iobase +
PLX_REG_CNTRL;
- if (state) {
- /* set data line high */
+ if (state) { /* set data line high */
devpriv->plx_control_bits &= ~data_bit;
writel(devpriv->plx_control_bits, plx_control_addr);
udelay(i2c_high_udelay);
- } else { /* set data line low */
-
+ } else { /* set data line low */
devpriv->plx_control_bits |= data_bit;
writel(devpriv->plx_control_bits, plx_control_addr);
udelay(i2c_low_udelay);
@@ -1639,13 +1646,11 @@ static void i2c_set_scl(struct comedi_device *dev, int state)
void __iomem *plx_control_addr = devpriv->plx9080_iobase +
PLX_REG_CNTRL;
- if (state) {
- /* set clock line high */
+ if (state) { /* set clock line high */
devpriv->plx_control_bits &= ~clock_bit;
writel(devpriv->plx_control_bits, plx_control_addr);
udelay(i2c_high_udelay);
- } else { /* set clock line low */
-
+ } else { /* set clock line low */
devpriv->plx_control_bits |= clock_bit;
writel(devpriv->plx_control_bits, plx_control_addr);
udelay(i2c_low_udelay);
@@ -1674,7 +1679,7 @@ static int i2c_read_ack(struct comedi_device *dev)
i2c_set_sda(dev, 1);
i2c_set_scl(dev, 1);
- return 0; /* return fake acknowledge bit */
+ return 0; /* return fake acknowledge bit */
}
/* send start bit */
@@ -1707,23 +1712,23 @@ static void i2c_write(struct comedi_device *dev, unsigned int address,
* eeprom and i2c bus
*/
- /* make sure we dont send anything to eeprom */
+ /* make sure we dont send anything to eeprom */
devpriv->plx_control_bits &= ~PLX_CNTRL_EECS;
i2c_stop(dev);
i2c_start(dev);
- /* send address and write bit */
+ /* send address and write bit */
bitstream = (address << 1) & ~read_bit;
i2c_write_byte(dev, bitstream);
- /* get acknowledge */
+ /* get acknowledge */
if (i2c_read_ack(dev) != 0) {
dev_err(dev->class_dev, "failed: no acknowledge\n");
i2c_stop(dev);
return;
}
- /* write data bytes */
+ /* write data bytes */
for (i = 0; i < length; i++) {
i2c_write_byte(dev, data[i]);
if (i2c_read_ack(dev) != 0) {
@@ -1770,8 +1775,8 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
range = CR_RANGE(insn->chanspec);
aref = CR_AREF(insn->chanspec);
- /* disable card's analog input interrupt sources and pacing */
- /* 4020 generates dac done interrupts even though they are disabled */
+ /* disable card's analog input interrupt sources and pacing */
+ /* 4020 generates dac done interrupts even though they are disabled */
disable_ai_pacing(dev);
spin_lock_irqsave(&dev->spinlock, flags);
@@ -1784,12 +1789,12 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
spin_unlock_irqrestore(&dev->spinlock, flags);
if (board->layout != LAYOUT_4020) {
- /* use internal queue */
+ /* use internal queue */
devpriv->hw_config_bits &= ~EXT_QUEUE_BIT;
writew(devpriv->hw_config_bits,
devpriv->main_iobase + HW_CONFIG_REG);
- /* ALT_SOURCE is internal calibration reference */
+ /* ALT_SOURCE is internal calibration reference */
if (insn->chanspec & CR_ALT_SOURCE) {
unsigned int cal_en_bit;
@@ -1811,19 +1816,19 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
*/
writew(0, devpriv->main_iobase + CALIBRATION_REG);
}
- /* load internal queue */
+ /* load internal queue */
bits = 0;
- /* set gain */
+ /* set gain */
bits |= ai_range_bits_6xxx(dev, CR_RANGE(insn->chanspec));
- /* set single-ended / differential */
+ /* set single-ended / differential */
bits |= se_diff_bit_6xxx(dev, aref == AREF_DIFF);
if (aref == AREF_COMMON)
bits |= ADC_COMMON_BIT;
bits |= adc_chan_bits(channel);
- /* set stop channel */
+ /* set stop channel */
writew(adc_chan_bits(channel),
devpriv->main_iobase + ADC_QUEUE_HIGH_REG);
- /* set start channel, and rest of settings */
+ /* set start channel, and rest of settings */
writew(bits, devpriv->main_iobase + ADC_QUEUE_LOAD_REG);
} else {
uint8_t old_cal_range_bits = devpriv->i2c_cal_range_bits;
@@ -1835,7 +1840,7 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
} else { /* select BNC inputs */
devpriv->i2c_cal_range_bits |= adc_src_4020_bits(4);
}
- /* select range */
+ /* select range */
if (range == 0)
devpriv->i2c_cal_range_bits |= attenuate_bit(channel);
else
@@ -1862,14 +1867,14 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
}
for (n = 0; n < insn->n; n++) {
- /* clear adc buffer (inside loop for 4020 sake) */
+ /* clear adc buffer (inside loop for 4020 sake) */
writew(0, devpriv->main_iobase + ADC_BUFFER_CLEAR_REG);
/* trigger conversion, bits sent only matter for 4020 */
writew(adc_convert_chan_4020_bits(CR_CHAN(insn->chanspec)),
devpriv->main_iobase + ADC_CONVERT_REG);
- /* wait for data */
+ /* wait for data */
ret = comedi_timeout(dev, s, insn, cb_pcidas64_ai_eoc, 0);
if (ret)
return ret;
@@ -2249,7 +2254,7 @@ static void setup_sample_counters(struct comedi_device *dev,
{
struct pcidas64_private *devpriv = dev->private;
- /* load hardware conversion counter */
+ /* load hardware conversion counter */
if (use_hw_sample_counter(cmd)) {
writew(cmd->stop_arg & 0xffff,
devpriv->main_iobase + ADC_COUNT_LOWER_REG);
@@ -2277,7 +2282,7 @@ static inline unsigned int dma_transfer_size(struct comedi_device *dev)
static uint32_t ai_convert_counter_6xxx(const struct comedi_device *dev,
const struct comedi_cmd *cmd)
{
- /* supposed to load counter with desired divisor minus 3 */
+ /* supposed to load counter with desired divisor minus 3 */
return cmd->convert_arg / TIMER_BASE - 3;
}
@@ -2286,7 +2291,7 @@ static uint32_t ai_scan_counter_6xxx(struct comedi_device *dev,
{
uint32_t count;
- /* figure out how long we need to delay at end of scan */
+ /* figure out how long we need to delay at end of scan */
switch (cmd->scan_begin_src) {
case TRIG_TIMER:
count = (cmd->scan_begin_arg -
@@ -2315,13 +2320,13 @@ static uint32_t ai_convert_counter_4020(struct comedi_device *dev,
case TRIG_OTHER:
divisor = devpriv->ext_clock.divisor;
break;
- default: /* should never happen */
+ default: /* should never happen */
dev_err(dev->class_dev, "bug! failed to set ai pacing!\n");
divisor = 1000;
break;
}
- /* supposed to load counter with desired divisor minus 2 for 4020 */
+ /* supposed to load counter with desired divisor minus 2 for 4020 */
return divisor - 2;
}
@@ -2330,7 +2335,7 @@ static void select_master_clock_4020(struct comedi_device *dev,
{
struct pcidas64_private *devpriv = dev->private;
- /* select internal/external master clock */
+ /* select internal/external master clock */
devpriv->hw_config_bits &= ~MASTER_CLOCK_4020_MASK;
if (cmd->scan_begin_src == TRIG_OTHER) {
int chanspec = devpriv->ext_clock.chanspec;
@@ -2366,7 +2371,7 @@ static inline void dma_start_sync(struct comedi_device *dev,
struct pcidas64_private *devpriv = dev->private;
unsigned long flags;
- /* spinlock for plx dma control/status reg */
+ /* spinlock for plx dma control/status reg */
spin_lock_irqsave(&dev->spinlock, flags);
writeb(PLX_DMACSR_ENABLE | PLX_DMACSR_START | PLX_DMACSR_CLEARINTR,
devpriv->plx9080_iobase + PLX_REG_DMACSR(channel));
@@ -2390,16 +2395,16 @@ static void set_ai_pacing(struct comedi_device *dev, struct comedi_cmd *cmd)
scan_counter = ai_scan_counter_6xxx(dev, cmd);
}
- /* load lower 16 bits of convert interval */
+ /* load lower 16 bits of convert interval */
writew(convert_counter & 0xffff,
devpriv->main_iobase + ADC_SAMPLE_INTERVAL_LOWER_REG);
- /* load upper 8 bits of convert interval */
+ /* load upper 8 bits of convert interval */
writew((convert_counter >> 16) & 0xff,
devpriv->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG);
- /* load lower 16 bits of scan delay */
+ /* load lower 16 bits of scan delay */
writew(scan_counter & 0xffff,
devpriv->main_iobase + ADC_DELAY_INTERVAL_LOWER_REG);
- /* load upper 8 bits of scan delay */
+ /* load upper 8 bits of scan delay */
writew((scan_counter >> 16) & 0xff,
devpriv->main_iobase + ADC_DELAY_INTERVAL_UPPER_REG);
}
@@ -2435,26 +2440,26 @@ static int setup_channel_queue(struct comedi_device *dev,
writew(devpriv->hw_config_bits,
devpriv->main_iobase + HW_CONFIG_REG);
bits = 0;
- /* set channel */
+ /* set channel */
bits |= adc_chan_bits(CR_CHAN(cmd->chanlist[0]));
- /* set gain */
+ /* set gain */
bits |= ai_range_bits_6xxx(dev,
CR_RANGE(cmd->chanlist[0]));
- /* set single-ended / differential */
+ /* set single-ended / differential */
bits |= se_diff_bit_6xxx(dev,
CR_AREF(cmd->chanlist[0]) ==
AREF_DIFF);
if (CR_AREF(cmd->chanlist[0]) == AREF_COMMON)
bits |= ADC_COMMON_BIT;
- /* set stop channel */
+ /* set stop channel */
writew(adc_chan_bits
(CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1])),
devpriv->main_iobase + ADC_QUEUE_HIGH_REG);
- /* set start channel, and rest of settings */
+ /* set start channel, and rest of settings */
writew(bits,
devpriv->main_iobase + ADC_QUEUE_LOAD_REG);
} else {
- /* use external queue */
+ /* use external queue */
if (dev->write_subdev && dev->write_subdev->busy) {
warn_external_queue(dev);
return -EBUSY;
@@ -2462,30 +2467,30 @@ static int setup_channel_queue(struct comedi_device *dev,
devpriv->hw_config_bits |= EXT_QUEUE_BIT;
writew(devpriv->hw_config_bits,
devpriv->main_iobase + HW_CONFIG_REG);
- /* clear DAC buffer to prevent weird interactions */
+ /* clear DAC buffer to prevent weird interactions */
writew(0,
devpriv->main_iobase + DAC_BUFFER_CLEAR_REG);
- /* clear queue pointer */
+ /* clear queue pointer */
writew(0, devpriv->main_iobase + ADC_QUEUE_CLEAR_REG);
- /* load external queue */
+ /* load external queue */
for (i = 0; i < cmd->chanlist_len; i++) {
bits = 0;
- /* set channel */
+ /* set channel */
bits |= adc_chan_bits(CR_CHAN(cmd->
chanlist[i]));
- /* set gain */
+ /* set gain */
bits |= ai_range_bits_6xxx(dev,
CR_RANGE(cmd->
chanlist
[i]));
- /* set single-ended / differential */
+ /* set single-ended / differential */
bits |= se_diff_bit_6xxx(dev,
CR_AREF(cmd->
chanlist[i]) ==
AREF_DIFF);
if (CR_AREF(cmd->chanlist[i]) == AREF_COMMON)
bits |= ADC_COMMON_BIT;
- /* mark end of queue */
+ /* mark end of queue */
if (i == cmd->chanlist_len - 1)
bits |= QUEUE_EOSCAN_BIT |
QUEUE_EOSEQ_BIT;
@@ -2498,7 +2503,7 @@ static int setup_channel_queue(struct comedi_device *dev,
* but required for reliable operation
*/
writew(0, devpriv->main_iobase + ADC_QUEUE_CLEAR_REG);
- /* prime queue holding register */
+ /* prime queue holding register */
writew(0, devpriv->main_iobase + ADC_QUEUE_LOAD_REG);
}
} else {
@@ -2507,7 +2512,7 @@ static int setup_channel_queue(struct comedi_device *dev,
devpriv->i2c_cal_range_bits &= ~ADC_SRC_4020_MASK;
/* select BNC inputs */
devpriv->i2c_cal_range_bits |= adc_src_4020_bits(4);
- /* select ranges */
+ /* select ranges */
for (i = 0; i < cmd->chanlist_len; i++) {
unsigned int channel = CR_CHAN(cmd->chanlist[i]);
unsigned int range = CR_RANGE(cmd->chanlist[i]);
@@ -2579,7 +2584,7 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (retval < 0)
return retval;
- /* make sure internal calibration source is turned off */
+ /* make sure internal calibration source is turned off */
writew(0, devpriv->main_iobase + CALIBRATION_REG);
set_ai_pacing(dev, cmd);
@@ -2595,10 +2600,10 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (board->layout != LAYOUT_4020) {
devpriv->adc_control1_bits &= ~ADC_MODE_MASK;
if (cmd->convert_src == TRIG_EXT)
- /* good old mode 13 */
+ /* good old mode 13 */
devpriv->adc_control1_bits |= adc_mode_bits(13);
else
- /* mode 8. What else could you need? */
+ /* mode 8. What else could you need? */
devpriv->adc_control1_bits |= adc_mode_bits(8);
} else {
devpriv->adc_control1_bits &= ~CHANNEL_MODE_4020_MASK;
@@ -2618,20 +2623,20 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->main_iobase + ADC_CONTROL1_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
- /* clear adc buffer */
+ /* clear adc buffer */
writew(0, devpriv->main_iobase + ADC_BUFFER_CLEAR_REG);
if ((cmd->flags & CMDF_WAKE_EOS) == 0 ||
board->layout == LAYOUT_4020) {
devpriv->ai_dma_index = 0;
- /* set dma transfer size */
+ /* set dma transfer size */
for (i = 0; i < ai_dma_ring_count(board); i++)
devpriv->ai_dma_desc[i].transfer_size =
cpu_to_le32(dma_transfer_size(dev) *
sizeof(uint16_t));
- /* give location of first dma descriptor */
+ /* give location of first dma descriptor */
load_first_dma_descriptor(dev, 1,
devpriv->ai_dma_desc_bus_addr |
PLX_DMADPR_DESCPCI |
@@ -2657,7 +2662,7 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
bits = ADC_ENABLE_BIT | ADC_SOFT_GATE_BITS | ADC_GATE_LEVEL_BIT;
if (cmd->flags & CMDF_WAKE_EOS)
bits |= ADC_DMA_DISABLE_BIT;
- /* set start trigger */
+ /* set start trigger */
if (cmd->start_src == TRIG_EXT) {
bits |= ADC_START_TRIG_EXT_BITS;
if (cmd->start_arg & CR_INVERT)
@@ -2673,7 +2678,7 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
spin_unlock_irqrestore(&dev->spinlock, flags);
- /* start acquisition */
+ /* start acquisition */
if (cmd->start_src == TRIG_NOW)
writew(0, devpriv->main_iobase + ADC_START_REG);
@@ -2691,7 +2696,7 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
int num_samples;
do {
- /* get least significant 15 bits */
+ /* get least significant 15 bits */
read_index = readw(devpriv->main_iobase + ADC_READ_PNTR_REG) &
0x7fff;
write_index = readw(devpriv->main_iobase + ADC_WRITE_PNTR_REG) &
@@ -2796,14 +2801,14 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
pci_addr_reg = devpriv->plx9080_iobase + PLX_REG_DMAPADR(channel);
- /* loop until we have read all the full buffers */
+ /* loop until we have read all the full buffers */
for (j = 0, next_transfer_addr = readl(pci_addr_reg);
(next_transfer_addr <
devpriv->ai_buffer_bus_addr[devpriv->ai_dma_index] ||
next_transfer_addr >=
devpriv->ai_buffer_bus_addr[devpriv->ai_dma_index] +
DMA_BUFFER_SIZE) && j < ai_dma_ring_count(board); j++) {
- /* transfer data from dma buffer to comedi buffer */
+ /* transfer data from dma buffer to comedi buffer */
num_samples = comedi_nsamples_left(s, dma_transfer_size(dev));
comedi_buf_write_samples(s,
devpriv->ai_buffer[devpriv->ai_dma_index],
@@ -2829,15 +2834,15 @@ static void handle_ai_interrupt(struct comedi_device *dev,
uint8_t dma1_status;
unsigned long flags;
- /* check for fifo overrun */
+ /* check for fifo overrun */
if (status & ADC_OVERRUN_BIT) {
dev_err(dev->class_dev, "fifo overrun\n");
async->events |= COMEDI_CB_ERROR;
}
- /* spin lock makes sure no one else changes plx dma control reg */
+ /* spin lock makes sure no one else changes plx dma control reg */
spin_lock_irqsave(&dev->spinlock, flags);
dma1_status = readb(devpriv->plx9080_iobase + PLX_REG_DMACSR1);
- if (plx_status & PLX_INTCSR_DMA1IA) { /* dma chan 1 interrupt */
+ if (plx_status & PLX_INTCSR_DMA1IA) { /* dma chan 1 interrupt */
writeb((dma1_status & PLX_DMACSR_ENABLE) | PLX_DMACSR_CLEARINTR,
devpriv->plx9080_iobase + PLX_REG_DMACSR1);
@@ -2846,7 +2851,7 @@ static void handle_ai_interrupt(struct comedi_device *dev,
}
spin_unlock_irqrestore(&dev->spinlock, flags);
- /* drain fifo with pio */
+ /* drain fifo with pio */
if ((status & ADC_DONE_BIT) ||
((cmd->flags & CMDF_WAKE_EOS) &&
(status & ADC_INTR_PENDING_BIT) &&
@@ -2859,7 +2864,7 @@ static void handle_ai_interrupt(struct comedi_device *dev,
spin_unlock_irqrestore(&dev->spinlock, flags);
}
}
- /* if we are have all the data, then quit */
+ /* if we are have all the data, then quit */
if ((cmd->stop_src == TRIG_COUNT &&
async->scans_done >= cmd->stop_arg) ||
(cmd->stop_src == TRIG_EXT && (status & ADC_STOP_BIT)))
@@ -3012,7 +3017,7 @@ static void handle_ao_interrupt(struct comedi_device *dev,
async = s->async;
cmd = &async->cmd;
- /* spin lock makes sure no one else changes plx dma control reg */
+ /* spin lock makes sure no one else changes plx dma control reg */
spin_lock_irqsave(&dev->spinlock, flags);
dma0_status = readb(devpriv->plx9080_iobase + PLX_REG_DMACSR0);
if (plx_status & PLX_INTCSR_DMA0IA) { /* dma chan 0 interrupt */
@@ -3106,15 +3111,15 @@ static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
int chan = CR_CHAN(insn->chanspec);
int range = CR_RANGE(insn->chanspec);
- /* do some initializing */
+ /* do some initializing */
writew(0, devpriv->main_iobase + DAC_CONTROL0_REG);
- /* set range */
+ /* set range */
set_dac_range_bits(dev, &devpriv->dac_control1_bits, chan, range);
writew(devpriv->dac_control1_bits,
devpriv->main_iobase + DAC_CONTROL1_REG);
- /* write to channel */
+ /* write to channel */
if (board->layout == LAYOUT_4020) {
writew(data[0] & 0xff,
devpriv->main_iobase + dac_lsb_4020_reg(chan));
@@ -3124,7 +3129,7 @@ static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
writew(data[0], devpriv->main_iobase + dac_convert_reg(chan));
}
- /* remember output value */
+ /* remember output value */
s->readback[chan] = data[0];
return 1;
@@ -3556,7 +3561,7 @@ static int caldac_i2c_write(struct comedi_device *dev,
uint8_t serial_bytes[3];
uint8_t i2c_addr;
enum pointer_bits {
- /* manual has gain and offset bits switched */
+ /* manual has gain and offset bits switched */
OFFSET_0_2 = 0x1,
GAIN_0_2 = 0x2,
OFFSET_1_3 = 0x4,
@@ -3567,35 +3572,35 @@ static int caldac_i2c_write(struct comedi_device *dev,
};
switch (caldac_channel) {
- case 0: /* chan 0 offset */
+ case 0: /* chan 0 offset */
i2c_addr = CALDAC0_I2C_ADDR;
serial_bytes[0] = OFFSET_0_2;
break;
- case 1: /* chan 1 offset */
+ case 1: /* chan 1 offset */
i2c_addr = CALDAC0_I2C_ADDR;
serial_bytes[0] = OFFSET_1_3;
break;
- case 2: /* chan 2 offset */
+ case 2: /* chan 2 offset */
i2c_addr = CALDAC1_I2C_ADDR;
serial_bytes[0] = OFFSET_0_2;
break;
- case 3: /* chan 3 offset */
+ case 3: /* chan 3 offset */
i2c_addr = CALDAC1_I2C_ADDR;
serial_bytes[0] = OFFSET_1_3;
break;
- case 4: /* chan 0 gain */
+ case 4: /* chan 0 gain */
i2c_addr = CALDAC0_I2C_ADDR;
serial_bytes[0] = GAIN_0_2;
break;
- case 5: /* chan 1 gain */
+ case 5: /* chan 1 gain */
i2c_addr = CALDAC0_I2C_ADDR;
serial_bytes[0] = GAIN_1_3;
break;
- case 6: /* chan 2 gain */
+ case 6: /* chan 2 gain */
i2c_addr = CALDAC1_I2C_ADDR;
serial_bytes[0] = GAIN_0_2;
break;
- case 7: /* chan 3 gain */
+ case 7: /* chan 3 gain */
i2c_addr = CALDAC1_I2C_ADDR;
serial_bytes[0] = GAIN_1_3;
break;
@@ -3718,24 +3723,24 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
udelay(eeprom_udelay);
devpriv->plx_control_bits &= ~PLX_CNTRL_EESK & ~PLX_CNTRL_EECS;
- /* make sure we don't send anything to the i2c bus on 4020 */
+ /* make sure we don't send anything to the i2c bus on 4020 */
devpriv->plx_control_bits |= PLX_CNTRL_USERO;
writel(devpriv->plx_control_bits, plx_control_addr);
- /* activate serial eeprom */
+ /* activate serial eeprom */
udelay(eeprom_udelay);
devpriv->plx_control_bits |= PLX_CNTRL_EECS;
writel(devpriv->plx_control_bits, plx_control_addr);
- /* write read command and desired memory address */
+ /* write read command and desired memory address */
for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) {
- /* set bit to be written */
+ /* set bit to be written */
udelay(eeprom_udelay);
if (bitstream & bit)
devpriv->plx_control_bits |= PLX_CNTRL_EEWB;
else
devpriv->plx_control_bits &= ~PLX_CNTRL_EEWB;
writel(devpriv->plx_control_bits, plx_control_addr);
- /* clock in bit */
+ /* clock in bit */
udelay(eeprom_udelay);
devpriv->plx_control_bits |= PLX_CNTRL_EESK;
writel(devpriv->plx_control_bits, plx_control_addr);
@@ -3743,10 +3748,10 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
devpriv->plx_control_bits &= ~PLX_CNTRL_EESK;
writel(devpriv->plx_control_bits, plx_control_addr);
}
- /* read back value from eeprom memory location */
+ /* read back value from eeprom memory location */
value = 0;
for (bit = 1 << (value_length - 1); bit; bit >>= 1) {
- /* clock out bit */
+ /* clock out bit */
udelay(eeprom_udelay);
devpriv->plx_control_bits |= PLX_CNTRL_EESK;
writel(devpriv->plx_control_bits, plx_control_addr);
@@ -3758,7 +3763,7 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
value |= bit;
}
- /* deactivate eeprom serial input */
+ /* deactivate eeprom serial input */
udelay(eeprom_udelay);
devpriv->plx_control_bits &= ~PLX_CNTRL_EECS;
writel(devpriv->plx_control_bits, plx_control_addr);
@@ -3775,9 +3780,7 @@ static int eeprom_read_insn(struct comedi_device *dev,
return 1;
}
-/*
- * Allocate and initialize the subdevice structures.
- */
+/* Allocate and initialize the subdevice structures. */
static int setup_subdevices(struct comedi_device *dev)
{
const struct pcidas64_board *board = dev->board_ptr;
@@ -3816,7 +3819,7 @@ static int setup_subdevices(struct comedi_device *dev)
* (not internal calibration sources)
*/
devpriv->i2c_cal_range_bits = adc_src_4020_bits(4);
- /* set channels to +-5 volt input ranges */
+ /* set channels to +-5 volt input ranges */
for (i = 0; i < s->n_chan; i++)
devpriv->i2c_cal_range_bits |= attenuate_bit(i);
data = devpriv->i2c_cal_range_bits;
@@ -3849,7 +3852,7 @@ static int setup_subdevices(struct comedi_device *dev)
s->type = COMEDI_SUBD_UNUSED;
}
- /* digital input */
+ /* digital input */
s = &dev->subdevices[2];
if (board->layout == LAYOUT_64XX) {
s->type = COMEDI_SUBD_DI;
@@ -3862,7 +3865,7 @@ static int setup_subdevices(struct comedi_device *dev)
s->type = COMEDI_SUBD_UNUSED;
}
- /* digital output */
+ /* digital output */
if (board->layout == LAYOUT_64XX) {
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
@@ -3891,7 +3894,7 @@ static int setup_subdevices(struct comedi_device *dev)
s->type = COMEDI_SUBD_UNUSED;
}
- /* 8 channel dio for 60xx */
+ /* 8 channel dio for 60xx */
s = &dev->subdevices[5];
if (board->layout == LAYOUT_60XX) {
s->type = COMEDI_SUBD_DIO;
@@ -3905,7 +3908,7 @@ static int setup_subdevices(struct comedi_device *dev)
s->type = COMEDI_SUBD_UNUSED;
}
- /* caldac */
+ /* caldac */
s = &dev->subdevices[6];
s->type = COMEDI_SUBD_CALIB;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
@@ -3925,7 +3928,7 @@ static int setup_subdevices(struct comedi_device *dev)
s->readback[i] = s->maxdata / 2;
}
- /* 2 channel ad8402 potentiometer */
+ /* 2 channel ad8402 potentiometer */
s = &dev->subdevices[7];
if (board->layout == LAYOUT_64XX) {
s->type = COMEDI_SUBD_CALIB;
@@ -3959,7 +3962,7 @@ static int setup_subdevices(struct comedi_device *dev)
s->type = COMEDI_SUBD_UNUSED;
}
- /* user counter subd XXX */
+ /* user counter subd XXX */
s = &dev->subdevices[9];
s->type = COMEDI_SUBD_UNUSED;
@@ -4005,7 +4008,7 @@ static int auto_attach(struct comedi_device *dev,
return -ENOMEM;
}
- /* figure out what local addresses are */
+ /* figure out what local addresses are */
local_range = readl(devpriv->plx9080_iobase + PLX_REG_LAS0RR) &
PLX_LASRR_MEM_MASK;
local_decode = readl(devpriv->plx9080_iobase + PLX_REG_LAS0BA) &
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 9c02b17a2834..317a9b5e4a3b 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -1,43 +1,42 @@
/*
- comedi/drivers/das08_cs.c
- DAS08 driver
+ * Comedi driver for DAS008 PCMCIA boards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * PCMCIA support code for this driver is adapted from the dummy_cs.c
+ * driver of the Linux PCMCIA Card Services package.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ */
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- PCMCIA support code for this driver is adapted from the dummy_cs.c
- driver of the Linux PCMCIA Card Services package.
-
- The initial developer of the original code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-*/
/*
-Driver: das08_cs
-Description: DAS-08 PCMCIA boards
-Author: Warren Jasper, ds, Frank Hess
-Devices: [ComputerBoards] PCM-DAS08 (pcm-das08)
-Status: works
-
-This is the PCMCIA-specific support split off from the
-das08 driver.
-
-Options (for pcm-das08):
- NONE
-
-Command support does not exist, but could be added for this board.
-*/
+ * Driver: das08_cs
+ * Description: DAS-08 PCMCIA boards
+ * Author: Warren Jasper, ds, Frank Hess
+ * Devices: [ComputerBoards] PCM-DAS08 (pcm-das08)
+ * Status: works
+ *
+ * This is the PCMCIA-specific support split off from the
+ * das08 driver.
+ *
+ * Configuration Options: none, uses PCMCIA auto config
+ *
+ * Command support does not exist, but could be added for this board.
+ */
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 8bbd93814340..fcd85475e429 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -96,11 +96,11 @@
* 6 6 100 kHz 6 1000000
* 7 12 50 kHz 7 10000000
*/
-const unsigned int dt2811_clk_dividers[] = {
+static const unsigned int dt2811_clk_dividers[] = {
1, 10, 2, 3, 4, 5, 6, 12
};
-const unsigned int dt2811_clk_multipliers[] = {
+static const unsigned int dt2811_clk_multipliers[] = {
1, 10, 100, 1000, 10000, 100000, 1000000, 10000000
};
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 3295bb4ac8c4..7ebca862ecaa 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -660,12 +660,12 @@ static int dt9812_find_endpoints(struct comedi_device *dev)
case 1:
dir = USB_DIR_OUT;
devpriv->cmd_wr.addr = ep->bEndpointAddress;
- devpriv->cmd_wr.size = le16_to_cpu(ep->wMaxPacketSize);
+ devpriv->cmd_wr.size = usb_endpoint_maxp(ep);
break;
case 2:
dir = USB_DIR_IN;
devpriv->cmd_rd.addr = ep->bEndpointAddress;
- devpriv->cmd_rd.size = le16_to_cpu(ep->wMaxPacketSize);
+ devpriv->cmd_rd.size = usb_endpoint_maxp(ep);
break;
case 3:
/* unused write stream */
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index af4b4175af4d..e5b948405fd9 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -582,7 +582,7 @@ static void gsc_hpdi_init_plx9080(struct comedi_device *dev)
bits |= PLX_DMAMODE_DEMAND;
/* enable local burst mode */
bits |= PLX_DMAMODE_BURSTEN;
- bits |= PLX_DMAMODE_WIDTH32;
+ bits |= PLX_DMAMODE_WIDTH_32;
writel(bits, plx_iobase + PLX_REG_DMAMODE0);
}
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 6c4ff023717f..70390de66e0e 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -141,7 +141,7 @@ static void set_transforms(struct jr3_channel __iomem *channel,
{
int i;
- num &= 0x000f; /* Make sure that 0 <= num <= 15 */
+ num &= 0x000f; /* Make sure that 0 <= num <= 15 */
for (i = 0; i < 8; i++) {
set_u16(&channel->transforms[num].link[i].link_type,
transf.link[i].link_type);
@@ -323,10 +323,10 @@ static int read_idm_word(const u8 *data, size_t size, int *pos,
int value;
if (pos && val) {
- /* Skip over non hex */
+ /* Skip over non hex */
for (; *pos < size && !isxdigit(data[*pos]); (*pos)++)
;
- /* Collect value */
+ /* Collect value */
*val = 0;
for (; *pos < size; (*pos)++) {
value = hex_to_bin(data[*pos]);
@@ -448,7 +448,8 @@ static int jr3_download_firmware(struct comedi_device *dev,
return 0;
}
-static struct jr3_pci_poll_delay jr3_pci_poll_subdevice(struct comedi_subdevice *s)
+static struct jr3_pci_poll_delay
+jr3_pci_poll_subdevice(struct comedi_subdevice *s)
{
struct jr3_pci_subdev_private *spriv = s->private;
struct jr3_pci_poll_delay result = poll_delay_min_max(1000, 2000);
@@ -733,13 +734,13 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
}
}
- /* Reset DSP card */
+ /* Reset DSP card */
writel(0, &devpriv->iobase->channel[0].reset);
ret = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
"comedi/jr3pci.idm",
jr3_download_firmware, 0);
- dev_dbg(dev->class_dev, "Firmare load %d\n", ret);
+ dev_dbg(dev->class_dev, "Firmware load %d\n", ret);
if (ret < 0)
return ret;
/*
@@ -763,7 +764,7 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
data.copyright[i]) >> 8);
}
- /* Start card timer */
+ /* Start card timer */
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
spriv = s->private;
diff --git a/drivers/staging/comedi/drivers/jr3_pci.h b/drivers/staging/comedi/drivers/jr3_pci.h
index 356811defaf4..f10a84fb6c14 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.h
+++ b/drivers/staging/comedi/drivers/jr3_pci.h
@@ -1,4 +1,5 @@
-/* Helper types to take care of the fact that the DSP card memory
+/*
+ * Helper types to take care of the fact that the DSP card memory
* is 16 bits, but aligned on a 32 bit PCI boundary
*/
@@ -22,7 +23,8 @@ static inline void set_s16(s32 __iomem *p, s16 val)
writel(val, p);
}
-/* The raw data is stored in a format which facilitates rapid
+/*
+ * The raw data is stored in a format which facilitates rapid
* processing by the JR3 DSP chip. The raw_channel structure shows the
* format for a single channel of data. Each channel takes four,
* two-byte words.
@@ -47,7 +49,8 @@ struct raw_channel {
s32 reserved[2];
};
-/* The force_array structure shows the layout for the decoupled and
+/*
+ * The force_array structure shows the layout for the decoupled and
* filtered force data.
*/
struct force_array {
@@ -61,7 +64,8 @@ struct force_array {
s32 v2;
};
-/* The six_axis_array structure shows the layout for the offsets and
+/*
+ * The six_axis_array structure shows the layout for the offsets and
* the full scales.
*/
struct six_axis_array {
@@ -74,7 +78,8 @@ struct six_axis_array {
};
/* VECT_BITS */
-/* The vect_bits structure shows the layout for indicating
+/*
+ * The vect_bits structure shows the layout for indicating
* which axes to use in computing the vectors. Each bit signifies
* selection of a single axis. The V1x axis bit corresponds to a hex
* value of 0x0001 and the V2z bit corresponds to a hex value of
@@ -100,12 +105,14 @@ enum {
};
/* WARNING_BITS */
-/* The warning_bits structure shows the bit pattern for the warning
+/*
+ * The warning_bits structure shows the bit pattern for the warning
* word. The bit fields are shown from bit 0 (lsb) to bit 15 (msb).
*/
-/* XX_NEAR_SET */
-/* The xx_near_sat bits signify that the indicated axis has reached or
+/* XX_NEAR_SET */
+/*
+ * The xx_near_sat bits signify that the indicated axis has reached or
* exceeded the near saturation value.
*/
@@ -118,12 +125,13 @@ enum {
mz_near_sat = 0x0020
};
-/* ERROR_BITS */
-/* XX_SAT */
-/* MEMORY_ERROR */
-/* SENSOR_CHANGE */
+/* ERROR_BITS */
+/* XX_SAT */
+/* MEMORY_ERROR */
+/* SENSOR_CHANGE */
-/* The error_bits structure shows the bit pattern for the error word.
+/*
+ * The error_bits structure shows the bit pattern for the error word.
* The bit fields are shown from bit 0 (lsb) to bit 15 (msb). The
* xx_sat bits signify that the indicated axis has reached or exceeded
* the saturation value. The memory_error bit indicates that a problem
@@ -134,9 +142,10 @@ enum {
*
*/
-/* SYSTEM_BUSY */
+/* SYSTEM_BUSY */
-/* The system_busy bit indicates that the JR3 DSP is currently busy
+/*
+ * The system_busy bit indicates that the JR3 DSP is currently busy
* and is not calculating force data. This occurs when a new
* coordinate transformation, or new sensor full scale is set by the
* user. A very fast system using the force data for feedback might
@@ -146,9 +155,10 @@ enum {
* calibration CRC.
*/
-/* CAL_CRC_BAD */
+/* CAL_CRC_BAD */
-/* The cal_crc_bad bit indicates that the calibration CRC has not
+/*
+ * The cal_crc_bad bit indicates that the calibration CRC has not
* calculated to zero. CRC is short for cyclic redundancy code. It is
* a method for determining the integrity of messages in data
* communication. The calibration data stored inside the sensor is
@@ -168,7 +178,8 @@ enum {
/* WATCH_DOG */
/* WATCH_DOG2 */
-/* The watch_dog and watch_dog2 bits are sensor, not processor, watch
+/*
+ * The watch_dog and watch_dog2 bits are sensor, not processor, watch
* dog bits. Watch_dog indicates that the sensor data line seems to be
* acting correctly, while watch_dog2 indicates that sensor data and
* clock are being received. It is possible for watch_dog2 to go off
@@ -192,9 +203,10 @@ enum error_bits_t {
watch_dog = 0x8000
};
-/* THRESH_STRUCT */
+/* THRESH_STRUCT */
-/* This structure shows the layout for a single threshold packet inside of a
+/*
+ * This structure shows the layout for a single threshold packet inside of a
* load envelope. Each load envelope can contain several threshold structures.
* 1. data_address contains the address of the data for that threshold. This
* includes filtered, unfiltered, raw, rate, counters, error and warning data
@@ -210,9 +222,10 @@ struct thresh_struct {
s32 bit_pattern;
};
-/* LE_STRUCT */
+/* LE_STRUCT */
-/* Layout of a load enveloped packet. Four thresholds are showed ... for more
+/*
+ * Layout of a load enveloped packet. Four thresholds are showed ... for more
* see manual (pag.25)
* 1. latch_bits is a bit pattern that show which bits the user wants to latch.
* The latched bits will not be reset once the threshold which set them is
@@ -228,8 +241,9 @@ struct le_struct {
s32 reserved;
};
-/* LINK_TYPES */
-/* Link types is an enumerated value showing the different possible transform
+/* LINK_TYPES */
+/*
+ * Link types is an enumerated value showing the different possible transform
* link types.
* 0 - end transform packet
* 1 - translate along X axis (TX)
@@ -252,8 +266,8 @@ enum link_types {
neg
};
-/* TRANSFORM */
-/* Structure used to describe a transform. */
+/* TRANSFORM */
+/* Structure used to describe a transform. */
struct intern_transform {
struct {
u32 link_type;
@@ -261,23 +275,29 @@ struct intern_transform {
} link[8];
};
-/* JR3 force/torque sensor data definition. For more information see sensor
- * and hardware manuals.
+/*
+ * JR3 force/torque sensor data definition. For more information see sensor
+ * and hardware manuals.
*/
struct jr3_channel {
- /* Raw_channels is the area used to store the raw data coming from */
- /* the sensor. */
+ /*
+ * Raw_channels is the area used to store the raw data coming from
+ * the sensor.
+ */
struct raw_channel raw_channels[16]; /* offset 0x0000 */
- /* Copyright is a null terminated ASCII string containing the JR3 */
- /* copyright notice. */
+ /*
+ * Copyright is a null terminated ASCII string containing the JR3
+ * copyright notice.
+ */
u32 copyright[0x0018]; /* offset 0x0040 */
s32 reserved1[0x0008]; /* offset 0x0058 */
- /* Shunts contains the sensor shunt readings. Some JR3 sensors have
+ /*
+ * Shunts contains the sensor shunt readings. Some JR3 sensors have
* the ability to have their gains adjusted. This allows the
* hardware full scales to be adjusted to potentially allow
* better resolution or dynamic range. For sensors that have
@@ -298,25 +318,29 @@ struct jr3_channel {
* command (10) set new full scales (pg. 38).
*/
- struct six_axis_array shunts; /* offset 0x0060 */
- s32 reserved2[2]; /* offset 0x0066 */
+ struct six_axis_array shunts; /* offset 0x0060 */
+ s32 reserved2[2]; /* offset 0x0066 */
- /* Default_FS contains the full scale that is used if the user does */
- /* not set a full scale. */
+ /*
+ * Default_FS contains the full scale that is used if the user does
+ * not set a full scale.
+ */
struct six_axis_array default_FS; /* offset 0x0068 */
- s32 reserved3; /* offset 0x006e */
+ s32 reserved3; /* offset 0x006e */
- /* Load_envelope_num is the load envelope number that is currently
+ /*
+ * Load_envelope_num is the load envelope number that is currently
* in use. This value is set by the user after one of the load
* envelopes has been initialized.
*/
- s32 load_envelope_num; /* offset 0x006f */
+ s32 load_envelope_num; /* offset 0x006f */
/* Min_full_scale is the recommend minimum full scale. */
- /* These values in conjunction with max_full_scale (pg. 9) helps
+ /*
+ * These values in conjunction with max_full_scale (pg. 9) helps
* determine the appropriate value for setting the full scales. The
* software allows the user to set the sensor full scale to an
* arbitrary value. But setting the full scales has some hazards. If
@@ -342,30 +366,35 @@ struct jr3_channel {
*/
struct six_axis_array min_full_scale; /* offset 0x0070 */
- s32 reserved4; /* offset 0x0076 */
+ s32 reserved4; /* offset 0x0076 */
- /* Transform_num is the transform number that is currently in use.
+ /*
+ * Transform_num is the transform number that is currently in use.
* This value is set by the JR3 DSP after the user has used command
* (5) use transform # (pg. 33).
*/
- s32 transform_num; /* offset 0x0077 */
+ s32 transform_num; /* offset 0x0077 */
- /* Max_full_scale is the recommended maximum full scale. See */
- /* min_full_scale (pg. 9) for more details. */
+ /*
+ * Max_full_scale is the recommended maximum full scale.
+ * See min_full_scale (pg. 9) for more details.
+ */
struct six_axis_array max_full_scale; /* offset 0x0078 */
- s32 reserved5; /* offset 0x007e */
+ s32 reserved5; /* offset 0x007e */
- /* Peak_address is the address of the data which will be monitored
+ /*
+ * Peak_address is the address of the data which will be monitored
* by the peak routine. This value is set by the user. The peak
* routine will monitor any 8 contiguous addresses for peak values.
* (ex. to watch filter3 data for peaks, set this value to 0x00a8).
*/
- s32 peak_address; /* offset 0x007f */
+ s32 peak_address; /* offset 0x007f */
- /* Full_scale is the sensor full scales which are currently in use.
+ /*
+ * Full_scale is the sensor full scales which are currently in use.
* Decoupled and filtered data is scaled so that +/- 16384 is equal
* to the full scales. The engineering units used are indicated by
* the units value discussed on page 16. The full scales for Fx, Fy,
@@ -377,9 +406,10 @@ struct jr3_channel {
* axes used for each vector respectively.
*/
- struct force_array full_scale; /* offset 0x0080 */
+ struct force_array full_scale; /* offset 0x0080 */
- /* Offsets contains the sensor offsets. These values are subtracted from
+ /*
+ * Offsets contains the sensor offsets. These values are subtracted from
* the sensor data to obtain the decoupled data. The offsets are set a
* few seconds (< 10) after the calibration data has been received.
* They are set so that the output data will be zero. These values
@@ -392,23 +422,26 @@ struct jr3_channel {
* about Z by 90 degrees, FY would be 5 and all others would be zero.
*/
- struct six_axis_array offsets; /* offset 0x0088 */
+ struct six_axis_array offsets; /* offset 0x0088 */
- /* Offset_num is the number of the offset currently in use. This
+ /*
+ * Offset_num is the number of the offset currently in use. This
* value is set by the JR3 DSP after the user has executed the use
* offset # command (pg. 34). It can vary between 0 and 15.
*/
- s32 offset_num; /* offset 0x008e */
+ s32 offset_num; /* offset 0x008e */
- /* Vect_axes is a bit map showing which of the axes are being used
+ /*
+ * Vect_axes is a bit map showing which of the axes are being used
* in the vector calculations. This value is set by the JR3 DSP
* after the user has executed the set vector axes command (pg. 37).
*/
- u32 vect_axes; /* offset 0x008f */
+ u32 vect_axes; /* offset 0x008f */
- /* Filter0 is the decoupled, unfiltered data from the JR3 sensor.
+ /*
+ * Filter0 is the decoupled, unfiltered data from the JR3 sensor.
* This data has had the offsets removed.
*
* These force_arrays hold the filtered data. The decoupled data is
@@ -420,23 +453,27 @@ struct jr3_channel {
* cutoff at 125 Hz, 31.25 Hz, 7.813 Hz, 1.953 Hz and 0.4883 Hz.
*/
- struct force_array filter[7]; /* offset 0x0090,
- offset 0x0098,
- offset 0x00a0,
- offset 0x00a8,
- offset 0x00b0,
- offset 0x00b8 ,
- offset 0x00c0 */
-
- /* Rate_data is the calculated rate data. It is a first derivative
+ struct force_array filter[7]; /*
+ * offset 0x0090,
+ * offset 0x0098,
+ * offset 0x00a0,
+ * offset 0x00a8,
+ * offset 0x00b0,
+ * offset 0x00b8,
+ * offset 0x00c0
+ */
+
+ /*
+ * Rate_data is the calculated rate data. It is a first derivative
* calculation. It is calculated at a frequency specified by the
* variable rate_divisor (pg. 12). The data on which the rate is
* calculated is specified by the variable rate_address (pg. 12).
*/
- struct force_array rate_data; /* offset 0x00c8 */
+ struct force_array rate_data; /* offset 0x00c8 */
- /* Minimum_data & maximum_data are the minimum and maximum (peak)
+ /*
+ * Minimum_data & maximum_data are the minimum and maximum (peak)
* data values. The JR3 DSP can monitor any 8 contiguous data items
* for minimums and maximums at full sensor bandwidth. This area is
* only updated at user request. This is done so that the user does
@@ -451,7 +488,8 @@ struct jr3_channel {
struct force_array minimum_data; /* offset 0x00d0 */
struct force_array maximum_data; /* offset 0x00d8 */
- /* Near_sat_value & sat_value contain the value used to determine if
+ /*
+ * Near_sat_value & sat_value contain the value used to determine if
* the raw sensor is saturated. Because of decoupling and offset
* removal, it is difficult to tell from the processed data if the
* sensor is saturated. These values, in conjunction with the error
@@ -465,10 +503,11 @@ struct jr3_channel {
* sat_value = 32768 - 2^(16 - ADC bits)
*/
- s32 near_sat_value; /* offset 0x00e0 */
- s32 sat_value; /* offset 0x00e1 */
+ s32 near_sat_value; /* offset 0x00e0 */
+ s32 sat_value; /* offset 0x00e1 */
- /* Rate_address, rate_divisor & rate_count contain the data used to
+ /*
+ * Rate_address, rate_divisor & rate_count contain the data used to
* control the calculations of the rates. Rate_address is the
* address of the data used for the rate calculation. The JR3 DSP
* will calculate rates for any 8 contiguous values (ex. to
@@ -485,11 +524,12 @@ struct jr3_channel {
* will minimize the time necessary to start the rate calculations.
*/
- s32 rate_address; /* offset 0x00e2 */
- u32 rate_divisor; /* offset 0x00e3 */
- u32 rate_count; /* offset 0x00e4 */
+ s32 rate_address; /* offset 0x00e2 */
+ u32 rate_divisor; /* offset 0x00e3 */
+ u32 rate_count; /* offset 0x00e4 */
- /* Command_word2 through command_word0 are the locations used to
+ /*
+ * Command_word2 through command_word0 are the locations used to
* send commands to the JR3 DSP. Their usage varies with the command
* and is detailed later in the Command Definitions section (pg.
* 29). In general the user places values into various memory
@@ -502,11 +542,12 @@ struct jr3_channel {
* command_word1).
*/
- s32 command_word2; /* offset 0x00e5 */
- s32 command_word1; /* offset 0x00e6 */
- s32 command_word0; /* offset 0x00e7 */
+ s32 command_word2; /* offset 0x00e5 */
+ s32 command_word1; /* offset 0x00e6 */
+ s32 command_word0; /* offset 0x00e7 */
- /* Count1 through count6 are unsigned counters which are incremented
+ /*
+ * Count1 through count6 are unsigned counters which are incremented
* every time the matching filters are calculated. Filter1 is
* calculated at the sensor data bandwidth. So this counter would
* increment at 8 kHz for a typical sensor. The rest of the counters
@@ -518,14 +559,15 @@ struct jr3_channel {
* once.
*/
- u32 count1; /* offset 0x00e8 */
- u32 count2; /* offset 0x00e9 */
- u32 count3; /* offset 0x00ea */
- u32 count4; /* offset 0x00eb */
- u32 count5; /* offset 0x00ec */
- u32 count6; /* offset 0x00ed */
+ u32 count1; /* offset 0x00e8 */
+ u32 count2; /* offset 0x00e9 */
+ u32 count3; /* offset 0x00ea */
+ u32 count4; /* offset 0x00eb */
+ u32 count5; /* offset 0x00ec */
+ u32 count6; /* offset 0x00ed */
- /* Error_count is a running count of data reception errors. If this
+ /*
+ * Error_count is a running count of data reception errors. If this
* counter is changing rapidly, it probably indicates a bad sensor
* cable connection or other hardware problem. In most installations
* error_count should not change at all. But it is possible in an
@@ -535,75 +577,84 @@ struct jr3_channel {
* where this counter counts a bad sample, that sample is ignored.
*/
- u32 error_count; /* offset 0x00ee */
+ u32 error_count; /* offset 0x00ee */
- /* Count_x is a counter which is incremented every time the JR3 DSP
+ /*
+ * Count_x is a counter which is incremented every time the JR3 DSP
* searches its job queues and finds nothing to do. It indicates the
* amount of idle time the JR3 DSP has available. It can also be
* used to determine if the JR3 DSP is alive. See the Performance
* Issues section on pg. 49 for more details.
*/
- u32 count_x; /* offset 0x00ef */
+ u32 count_x; /* offset 0x00ef */
- /* Warnings & errors contain the warning and error bits
+ /*
+ * Warnings & errors contain the warning and error bits
* respectively. The format of these two words is discussed on page
* 21 under the headings warnings_bits and error_bits.
*/
- u32 warnings; /* offset 0x00f0 */
- u32 errors; /* offset 0x00f1 */
+ u32 warnings; /* offset 0x00f0 */
+ u32 errors; /* offset 0x00f1 */
- /* Threshold_bits is a word containing the bits that are set by the
+ /*
+ * Threshold_bits is a word containing the bits that are set by the
* load envelopes. See load_envelopes (pg. 17) and thresh_struct
* (pg. 23) for more details.
*/
- s32 threshold_bits; /* offset 0x00f2 */
+ s32 threshold_bits; /* offset 0x00f2 */
- /* Last_crc is the value that shows the actual calculated CRC. CRC
+ /*
+ * Last_crc is the value that shows the actual calculated CRC. CRC
* is short for cyclic redundancy code. It should be zero. See the
* description for cal_crc_bad (pg. 21) for more information.
*/
- s32 last_CRC; /* offset 0x00f3 */
+ s32 last_CRC; /* offset 0x00f3 */
- /* EEProm_ver_no contains the version number of the sensor EEProm.
+ /*
+ * EEProm_ver_no contains the version number of the sensor EEProm.
* EEProm version numbers can vary between 0 and 255.
* Software_ver_no contains the software version number. Version
* 3.02 would be stored as 302.
*/
- s32 eeprom_ver_no; /* offset 0x00f4 */
- s32 software_ver_no; /* offset 0x00f5 */
+ s32 eeprom_ver_no; /* offset 0x00f4 */
+ s32 software_ver_no; /* offset 0x00f5 */
- /* Software_day & software_year are the release date of the software
+ /*
+ * Software_day & software_year are the release date of the software
* the JR3 DSP is currently running. Day is the day of the year,
* with January 1 being 1, and December 31, being 365 for non leap
* years.
*/
- s32 software_day; /* offset 0x00f6 */
- s32 software_year; /* offset 0x00f7 */
+ s32 software_day; /* offset 0x00f6 */
+ s32 software_year; /* offset 0x00f7 */
- /* Serial_no & model_no are the two values which uniquely identify a
+ /*
+ * Serial_no & model_no are the two values which uniquely identify a
* sensor. This model number does not directly correspond to the JR3
* model number, but it will provide a unique identifier for
* different sensor configurations.
*/
- u32 serial_no; /* offset 0x00f8 */
- u32 model_no; /* offset 0x00f9 */
+ u32 serial_no; /* offset 0x00f8 */
+ u32 model_no; /* offset 0x00f9 */
- /* Cal_day & cal_year are the sensor calibration date. Day is the
+ /*
+ * Cal_day & cal_year are the sensor calibration date. Day is the
* day of the year, with January 1 being 1, and December 31, being
* 366 for leap years.
*/
- s32 cal_day; /* offset 0x00fa */
- s32 cal_year; /* offset 0x00fb */
+ s32 cal_day; /* offset 0x00fa */
+ s32 cal_year; /* offset 0x00fb */
- /* Units is an enumerated read only value defining the engineering
+ /*
+ * Units is an enumerated read only value defining the engineering
* units used in the sensor full scale. The meanings of particular
* values are discussed in the section detailing the force_units
* structure on page 22. The engineering units are setto customer
@@ -626,20 +677,22 @@ struct jr3_channel {
* received.
*/
- u32 units; /* offset 0x00fc */
- s32 bits; /* offset 0x00fd */
- s32 channels; /* offset 0x00fe */
+ u32 units; /* offset 0x00fc */
+ s32 bits; /* offset 0x00fd */
+ s32 channels; /* offset 0x00fe */
- /* Thickness specifies the overall thickness of the sensor from
+ /*
+ * Thickness specifies the overall thickness of the sensor from
* flange to flange. The engineering units for this value are
* contained in units (pg. 16). The sensor calibration is relative
* to the center of the sensor. This value allows easy coordinate
* transformation from the center of the sensor to either flange.
*/
- s32 thickness; /* offset 0x00ff */
+ s32 thickness; /* offset 0x00ff */
- /* Load_envelopes is a table containing the load envelope
+ /*
+ * Load_envelopes is a table containing the load envelope
* descriptions. There are 16 possible load envelope slots in the
* table. The slots are on 16 word boundaries and are numbered 0-15.
* Each load envelope needs to start at the beginning of a slot but
@@ -655,7 +708,8 @@ struct jr3_channel {
struct le_struct load_envelopes[0x10]; /* offset 0x0100 */
- /* Transforms is a table containing the transform descriptions.
+ /*
+ * Transforms is a table containing the transform descriptions.
* There are 16 possible transform slots in the table. The slots are
* on 16 word boundaries and are numbered 0-15. Each transform needs
* to start at the beginning of a slot but need not be fully
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index 3e72718801a9..74911dbb2561 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -1,40 +1,34 @@
/*
- comedi/drivers/ni_670x.c
- Hardware driver for NI 670x devices
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_670x
-Description: National Instruments 670x
-Author: Bart Joris <bjoris@advalvas.be>
-Updated: Wed, 11 Dec 2002 18:25:35 -0800
-Devices: [National Instruments] PCI-6703 (ni_670x), PCI-6704
-Status: unknown
-
-Commands are not supported.
-*/
+ * Comedi driver for NI 670x devices
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- Bart Joris <bjoris@advalvas.be> Last updated on 20/08/2001
-
- Manuals:
-
- 322110a.pdf PCI/PXI-6704 User Manual
- 322110b.pdf PCI/PXI-6703/6704 User Manual
-
-*/
+ * Driver: ni_670x
+ * Description: National Instruments 670x
+ * Author: Bart Joris <bjoris@advalvas.be>
+ * Updated: Wed, 11 Dec 2002 18:25:35 -0800
+ * Devices: [National Instruments] PCI-6703 (ni_670x), PCI-6704
+ * Status: unknown
+ *
+ * Commands are not supported.
+ *
+ * Manuals:
+ * 322110a.pdf PCI/PXI-6704 User Manual
+ * 322110b.pdf PCI/PXI-6703/6704 User Manual
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 9b444f8c4e33..5a4dcc6e61d8 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -1,62 +1,47 @@
/*
- comedi/drivers/ni_at_a2150.c
- Driver for National Instruments AT-A2150 boards
- Copyright (C) 2001, 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_at_a2150
-Description: National Instruments AT-A2150
-Author: Frank Mori Hess
-Status: works
-Devices: [National Instruments] AT-A2150C (at_a2150c), AT-2150S (at_a2150s)
-
-If you want to ac couple the board's inputs, use AREF_OTHER.
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ (optional, required for timed conversions)
- [2] - DMA (optional, required for timed conversions)
+ * Comedi driver for National Instruments AT-A2150 boards
+ * Copyright (C) 2001, 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
-*/
/*
-Yet another driver for obsolete hardware brought to you by Frank Hess.
-Testing and debugging help provided by Dave Andruczyk.
-
-This driver supports the boards:
-
-AT-A2150C
-AT-A2150S
-
-The only difference is their master clock frequencies.
-
-Options:
- [0] - base io address
- [1] - irq
- [2] - dma channel
-
-References (from ftp://ftp.natinst.com/support/manuals):
-
- 320360.pdf AT-A2150 User Manual
-
-TODO:
-
-analog level triggering
-TRIG_WAKE_EOS
-
-*/
+ * Driver: ni_at_a2150
+ * Description: National Instruments AT-A2150
+ * Author: Frank Mori Hess
+ * Status: works
+ * Devices: [National Instruments] AT-A2150C (at_a2150c), AT-2150S (at_a2150s)
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - IRQ (optional, required for timed conversions)
+ * [2] - DMA (optional, required for timed conversions)
+ *
+ * Yet another driver for obsolete hardware brought to you by Frank Hess.
+ * Testing and debugging help provided by Dave Andruczyk.
+ *
+ * If you want to ac couple the board's inputs, use AREF_OTHER.
+ *
+ * The only difference in the boards is their master clock frequencies.
+ *
+ * References (from ftp://ftp.natinst.com/support/manuals):
+ * 320360.pdf AT-A2150 User Manual
+ *
+ * TODO:
+ * - analog level triggering
+ * - TRIG_WAKE_EOS
+ */
#include <linux/module.h>
#include <linux/delay.h>
@@ -73,48 +58,52 @@ TRIG_WAKE_EOS
/* Registers and bits */
#define CONFIG_REG 0x0
-#define CHANNEL_BITS(x) ((x) & 0x7)
+#define CHANNEL_BITS(x) ((x) & 0x7)
#define CHANNEL_MASK 0x7
-#define CLOCK_SELECT_BITS(x) (((x) & 0x3) << 3)
-#define CLOCK_DIVISOR_BITS(x) (((x) & 0x3) << 5)
+#define CLOCK_SELECT_BITS(x) (((x) & 0x3) << 3)
+#define CLOCK_DIVISOR_BITS(x) (((x) & 0x3) << 5)
#define CLOCK_MASK (0xf << 3)
-#define ENABLE0_BIT 0x80 /* enable (don't internally ground) channels 0 and 1 */
-#define ENABLE1_BIT 0x100 /* enable (don't internally ground) channels 2 and 3 */
-#define AC0_BIT 0x200 /* ac couple channels 0,1 */
-#define AC1_BIT 0x400 /* ac couple channels 2,3 */
-#define APD_BIT 0x800 /* analog power down */
-#define DPD_BIT 0x1000 /* digital power down */
-#define TRIGGER_REG 0x2 /* trigger config register */
-#define POST_TRIGGER_BITS 0x2
-#define DELAY_TRIGGER_BITS 0x3
-#define HW_TRIG_EN 0x10 /* enable hardware trigger */
-#define FIFO_START_REG 0x6 /* software start aquistion trigger */
-#define FIFO_RESET_REG 0x8 /* clears fifo + fifo flags */
-#define FIFO_DATA_REG 0xa /* read data */
-#define DMA_TC_CLEAR_REG 0xe /* clear dma terminal count interrupt */
-#define STATUS_REG 0x12 /* read only */
-#define FNE_BIT 0x1 /* fifo not empty */
-#define OVFL_BIT 0x8 /* fifo overflow */
-#define EDAQ_BIT 0x10 /* end of acquisition interrupt */
-#define DCAL_BIT 0x20 /* offset calibration in progress */
-#define INTR_BIT 0x40 /* interrupt has occurred */
-#define DMA_TC_BIT 0x80 /* dma terminal count interrupt has occurred */
-#define ID_BITS(x) (((x) >> 8) & 0x3)
-#define IRQ_DMA_CNTRL_REG 0x12 /* write only */
-#define DMA_CHAN_BITS(x) ((x) & 0x7) /* sets dma channel */
-#define DMA_EN_BIT 0x8 /* enables dma */
-#define IRQ_LVL_BITS(x) (((x) & 0xf) << 4) /* sets irq level */
-#define FIFO_INTR_EN_BIT 0x100 /* enable fifo interrupts */
-#define FIFO_INTR_FHF_BIT 0x200 /* interrupt fifo half full */
-#define DMA_INTR_EN_BIT 0x800 /* enable interrupt on dma terminal count */
-#define DMA_DEM_EN_BIT 0x1000 /* enables demand mode dma */
+/* enable (don't internally ground) channels 0 and 1 */
+#define ENABLE0_BIT 0x80
+/* enable (don't internally ground) channels 2 and 3 */
+#define ENABLE1_BIT 0x100
+#define AC0_BIT 0x200 /* ac couple channels 0,1 */
+#define AC1_BIT 0x400 /* ac couple channels 2,3 */
+#define APD_BIT 0x800 /* analog power down */
+#define DPD_BIT 0x1000 /* digital power down */
+#define TRIGGER_REG 0x2 /* trigger config register */
+#define POST_TRIGGER_BITS 0x2
+#define DELAY_TRIGGER_BITS 0x3
+#define HW_TRIG_EN 0x10 /* enable hardware trigger */
+#define FIFO_START_REG 0x6 /* software start aquistion trigger */
+#define FIFO_RESET_REG 0x8 /* clears fifo + fifo flags */
+#define FIFO_DATA_REG 0xa /* read data */
+#define DMA_TC_CLEAR_REG 0xe /* clear dma terminal count interrupt */
+#define STATUS_REG 0x12 /* read only */
+#define FNE_BIT 0x1 /* fifo not empty */
+#define OVFL_BIT 0x8 /* fifo overflow */
+#define EDAQ_BIT 0x10 /* end of acquisition interrupt */
+#define DCAL_BIT 0x20 /* offset calibration in progress */
+#define INTR_BIT 0x40 /* interrupt has occurred */
+/* dma terminal count interrupt has occurred */
+#define DMA_TC_BIT 0x80
+#define ID_BITS(x) (((x) >> 8) & 0x3)
+#define IRQ_DMA_CNTRL_REG 0x12 /* write only */
+#define DMA_CHAN_BITS(x) ((x) & 0x7) /* sets dma channel */
+#define DMA_EN_BIT 0x8 /* enables dma */
+#define IRQ_LVL_BITS(x) (((x) & 0xf) << 4) /* sets irq level */
+#define FIFO_INTR_EN_BIT 0x100 /* enable fifo interrupts */
+#define FIFO_INTR_FHF_BIT 0x200 /* interrupt fifo half full */
+/* enable interrupt on dma terminal count */
+#define DMA_INTR_EN_BIT 0x800
+#define DMA_DEM_EN_BIT 0x1000 /* enables demand mode dma */
#define I8253_BASE_REG 0x14
struct a2150_board {
const char *name;
- int clock[4]; /* master clock periods, in nanoseconds */
- int num_clocks; /* number of available master clock speeds */
- int ai_speed; /* maximum conversion rate in nanoseconds */
+ int clock[4]; /* master clock periods, in nanoseconds */
+ int num_clocks; /* number of available master clock speeds */
+ int ai_speed; /* maximum conversion rate in nanoseconds */
};
/* analog input range */
@@ -144,8 +133,8 @@ static const struct a2150_board a2150_boards[] = {
struct a2150_private {
struct comedi_isadma *dma;
unsigned int count; /* number of data points left to be taken */
- int irq_dma_bits; /* irq/dma register bits */
- int config_bits; /* config register bits */
+ int irq_dma_bits; /* irq/dma register bits */
+ int config_bits; /* config register bits */
};
/* interrupt service routine */
@@ -189,13 +178,13 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
*/
residue = comedi_isadma_disable(desc->chan);
- /* figure out how many points to read */
+ /* figure out how many points to read */
max_points = comedi_bytes_to_samples(s, desc->size);
num_points = max_points - comedi_bytes_to_samples(s, residue);
if (devpriv->count < num_points && cmd->stop_src == TRIG_COUNT)
num_points = devpriv->count;
- /* figure out how many points will be stored next time */
+ /* figure out how many points will be stored next time */
leftover = 0;
if (cmd->stop_src == TRIG_NONE) {
leftover = comedi_bytes_to_samples(s, desc->size);
@@ -204,7 +193,8 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
if (leftover > max_points)
leftover = max_points;
}
- /* there should only be a residue if collection was stopped by having
+ /*
+ * There should only be a residue if collection was stopped by having
* the stop_src set to an external trigger, in which case there
* will be no more data
*/
@@ -214,7 +204,7 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
for (i = 0; i < num_points; i++) {
/* write data point to comedi buffer */
dpnt = buf[i];
- /* convert from 2's complement to unsigned coding */
+ /* convert from 2's complement to unsigned coding */
dpnt ^= 0x8000;
comedi_buf_write_samples(s, &dpnt, 1);
if (cmd->stop_src == TRIG_COUNT) {
@@ -244,14 +234,14 @@ static int a2150_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
struct comedi_isadma *dma = devpriv->dma;
struct comedi_isadma_desc *desc = &dma->desc[0];
- /* disable dma on card */
+ /* disable dma on card */
devpriv->irq_dma_bits &= ~DMA_INTR_EN_BIT & ~DMA_EN_BIT;
outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
- /* disable computer's dma */
+ /* disable computer's dma */
comedi_isadma_disable(desc->chan);
- /* clear fifo and reset triggering circuitry */
+ /* clear fifo and reset triggering circuitry */
outw(0, dev->iobase + FIFO_RESET_REG);
return 0;
@@ -270,7 +260,7 @@ static int a2150_get_timing(struct comedi_device *dev, unsigned int *period,
int lub_divisor_shift, lub_index, glb_divisor_shift, glb_index;
int i, j;
- /* initialize greatest lower and least upper bounds */
+ /* initialize greatest lower and least upper bounds */
lub_divisor_shift = 3;
lub_index = 0;
lub = board->clock[lub_index] * (1 << lub_divisor_shift);
@@ -278,19 +268,19 @@ static int a2150_get_timing(struct comedi_device *dev, unsigned int *period,
glb_index = board->num_clocks - 1;
glb = board->clock[glb_index] * (1 << glb_divisor_shift);
- /* make sure period is in available range */
+ /* make sure period is in available range */
if (*period < glb)
*period = glb;
if (*period > lub)
*period = lub;
- /* we can multiply period by 1, 2, 4, or 8, using (1 << i) */
+ /* we can multiply period by 1, 2, 4, or 8, using (1 << i) */
for (i = 0; i < 4; i++) {
- /* there are a maximum of 4 master clocks */
+ /* there are a maximum of 4 master clocks */
for (j = 0; j < board->num_clocks; j++) {
- /* temp is the period in nanosec we are evaluating */
+ /* temp is the period in nanosec we are evaluating */
temp = board->clock[j] * (1 << i);
- /* if it is the best match yet */
+ /* if it is the best match yet */
if (temp < lub && temp >= *period) {
lub_divisor_shift = i;
lub_index = j;
@@ -306,7 +296,7 @@ static int a2150_get_timing(struct comedi_device *dev, unsigned int *period,
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
- /* if least upper bound is better approximation */
+ /* if least upper bound is better approximation */
if (lub - *period < *period - glb)
*period = lub;
else
@@ -320,7 +310,7 @@ static int a2150_get_timing(struct comedi_device *dev, unsigned int *period,
break;
}
- /* set clock bits for config register appropriately */
+ /* set clock bits for config register appropriately */
devpriv->config_bits &= ~CLOCK_MASK;
if (*period == lub) {
devpriv->config_bits |=
@@ -495,7 +485,7 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
"dma incompatible with hard real-time interrupt (CMDF_PRIORITY), aborting\n");
return -1;
}
- /* clear fifo and reset triggering circuitry */
+ /* clear fifo and reset triggering circuitry */
outw(0, dev->iobase + FIFO_RESET_REG);
/* setup chanlist */
@@ -503,7 +493,7 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
cmd->chanlist_len) < 0)
return -1;
- /* setup ac/dc coupling */
+ /* setup ac/dc coupling */
if (CR_AREF(cmd->chanlist[0]) == AREF_OTHER)
devpriv->config_bits |= AC0_BIT;
else
@@ -513,18 +503,18 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
else
devpriv->config_bits &= ~AC1_BIT;
- /* setup timing */
+ /* setup timing */
a2150_get_timing(dev, &cmd->scan_begin_arg, cmd->flags);
- /* send timing, channel, config bits */
+ /* send timing, channel, config bits */
outw(devpriv->config_bits, dev->iobase + CONFIG_REG);
- /* initialize number of samples remaining */
+ /* initialize number of samples remaining */
devpriv->count = cmd->stop_arg * cmd->chanlist_len;
comedi_isadma_disable(desc->chan);
- /* set size of transfer to fill in 1/3 second */
+ /* set size of transfer to fill in 1/3 second */
#define ONE_THIRD_SECOND 333333333
desc->size = comedi_bytes_per_sample(s) * cmd->chanlist_len *
ONE_THIRD_SECOND / cmd->scan_begin_arg;
@@ -536,40 +526,45 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
comedi_isadma_program(desc);
- /* clear dma interrupt before enabling it, to try and get rid of that
- * one spurious interrupt that has been happening */
+ /*
+ * Clear dma interrupt before enabling it, to try and get rid of
+ * that one spurious interrupt that has been happening.
+ */
outw(0x00, dev->iobase + DMA_TC_CLEAR_REG);
- /* enable dma on card */
+ /* enable dma on card */
devpriv->irq_dma_bits |= DMA_INTR_EN_BIT | DMA_EN_BIT;
outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
- /* may need to wait 72 sampling periods if timing was changed */
+ /* may need to wait 72 sampling periods if timing was changed */
comedi_8254_load(dev->pacer, 2, 72, I8254_MODE0 | I8254_BINARY);
- /* setup start triggering */
+ /* setup start triggering */
trigger_bits = 0;
- /* decide if we need to wait 72 periods for valid data */
+ /* decide if we need to wait 72 periods for valid data */
if (cmd->start_src == TRIG_NOW &&
(old_config_bits & CLOCK_MASK) !=
(devpriv->config_bits & CLOCK_MASK)) {
- /* set trigger source to delay trigger */
+ /* set trigger source to delay trigger */
trigger_bits |= DELAY_TRIGGER_BITS;
} else {
- /* otherwise no delay */
+ /* otherwise no delay */
trigger_bits |= POST_TRIGGER_BITS;
}
- /* enable external hardware trigger */
+ /* enable external hardware trigger */
if (cmd->start_src == TRIG_EXT) {
trigger_bits |= HW_TRIG_EN;
} else if (cmd->start_src == TRIG_OTHER) {
- /* XXX add support for level/slope start trigger using TRIG_OTHER */
+ /*
+ * XXX add support for level/slope start trigger
+ * using TRIG_OTHER
+ */
dev_err(dev->class_dev, "you shouldn't see this?\n");
}
- /* send trigger config bits */
+ /* send trigger config bits */
outw(trigger_bits, dev->iobase + TRIGGER_REG);
- /* start acquisition for soft trigger */
+ /* start acquisition for soft trigger */
if (cmd->start_src == TRIG_NOW)
outw(0, dev->iobase + FIFO_START_REG);
@@ -596,28 +591,28 @@ static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int n;
int ret;
- /* clear fifo and reset triggering circuitry */
+ /* clear fifo and reset triggering circuitry */
outw(0, dev->iobase + FIFO_RESET_REG);
/* setup chanlist */
if (a2150_set_chanlist(dev, CR_CHAN(insn->chanspec), 1) < 0)
return -1;
- /* set dc coupling */
+ /* set dc coupling */
devpriv->config_bits &= ~AC0_BIT;
devpriv->config_bits &= ~AC1_BIT;
- /* send timing, channel, config bits */
+ /* send timing, channel, config bits */
outw(devpriv->config_bits, dev->iobase + CONFIG_REG);
- /* disable dma on card */
+ /* disable dma on card */
devpriv->irq_dma_bits &= ~DMA_INTR_EN_BIT & ~DMA_EN_BIT;
outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
- /* setup start triggering */
+ /* setup start triggering */
outw(0, dev->iobase + TRIGGER_REG);
- /* start acquisition for soft trigger */
+ /* start acquisition for soft trigger */
outw(0, dev->iobase + FIFO_START_REG);
/*
@@ -632,7 +627,7 @@ static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
inw(dev->iobase + FIFO_DATA_REG);
}
- /* read data */
+ /* read data */
for (n = 0; n < insn->n; n++) {
ret = comedi_timeout(dev, s, insn, a2150_ai_eoc, 0);
if (ret)
@@ -642,7 +637,7 @@ static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
data[n] ^= 0x8000;
}
- /* clear fifo and reset triggering circuitry */
+ /* clear fifo and reset triggering circuitry */
outw(0, dev->iobase + FIFO_RESET_REG);
return n;
@@ -749,16 +744,16 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->cancel = a2150_cancel;
}
- /* set card's irq and dma levels */
+ /* set card's irq and dma levels */
outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG);
- /* reset and sync adc clock circuitry */
+ /* reset and sync adc clock circuitry */
outw_p(DPD_BIT | APD_BIT, dev->iobase + CONFIG_REG);
outw_p(DPD_BIT, dev->iobase + CONFIG_REG);
- /* initialize configuration register */
+ /* initialize configuration register */
devpriv->config_bits = 0;
outw(devpriv->config_bits, dev->iobase + CONFIG_REG);
- /* wait until offset calibration is done, then enable analog inputs */
+ /* wait until offset calibration is done, then enable analog inputs */
for (i = 0; i < timeout; i++) {
if ((DCAL_BIT & inw(dev->iobase + STATUS_REG)) == 0)
break;
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index 95435b81aa55..ffcf7afce684 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -1,93 +1,84 @@
/*
- comedi/drivers/ni_atmio.c
- Hardware driver for NI AT-MIO E series cards
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_atmio
-Description: National Instruments AT-MIO-E series
-Author: ds
-Devices: [National Instruments] AT-MIO-16E-1 (ni_atmio),
- AT-MIO-16E-2, AT-MIO-16E-10, AT-MIO-16DE-10, AT-MIO-64E-3,
- AT-MIO-16XE-50, AT-MIO-16XE-10, AT-AI-16XE-10
-Status: works
-Updated: Thu May 1 20:03:02 CDT 2003
-
-The driver has 2.6 kernel isapnp support, and
-will automatically probe for a supported board if the
-I/O base is left unspecified with comedi_config.
-However, many of
-the isapnp id numbers are unknown. If your board is not
-recognized, please send the output of 'cat /proc/isapnp'
-(you may need to modprobe the isa-pnp module for
-/proc/isapnp to exist) so the
-id numbers for your board can be added to the driver.
-
-Otherwise, you can use the isapnptools package to configure
-your board. Use isapnp to
-configure the I/O base and IRQ for the board, and then pass
-the same values as
-parameters in comedi_config. A sample isapnp.conf file is included
-in the etc/ directory of Comedilib.
-
-Comedilib includes a utility to autocalibrate these boards. The
-boards seem to boot into a state where the all calibration DACs
-are at one extreme of their range, thus the default calibration
-is terrible. Calibration at boot is strongly encouraged.
-
-To use the extended digital I/O on some of the boards, enable the
-8255 driver when configuring the Comedi source tree.
-
-External triggering is supported for some events. The channel index
-(scan_begin_arg, etc.) maps to PFI0 - PFI9.
-
-Some of the more esoteric triggering possibilities of these boards
-are not supported.
-*/
-/*
- The real guts of the driver is in ni_mio_common.c, which is included
- both here and in ni_pcimio.c
-
- Interrupt support added by Truxton Fulton <trux@truxton.com>
-
- References for specifications:
-
- 340747b.pdf Register Level Programmer Manual (obsolete)
- 340747c.pdf Register Level Programmer Manual (new)
- DAQ-STC reference manual
-
- Other possibly relevant info:
-
- 320517c.pdf User manual (obsolete)
- 320517f.pdf User manual (new)
- 320889a.pdf delete
- 320906c.pdf maximum signal ratings
- 321066a.pdf about 16x
- 321791a.pdf discontinuation of at-mio-16e-10 rev. c
- 321808a.pdf about at-mio-16e-10 rev P
- 321837a.pdf discontinuation of at-mio-16de-10 rev d
- 321838a.pdf about at-mio-16de-10 rev N
-
- ISSUES:
-
- need to deal with external reference for DAC, and other DAC
- properties in board properties
+ * Comedi driver for NI AT-MIO E series cards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- deal with at-mio-16de-10 revision D to N changes, etc.
+/*
+ * Driver: ni_atmio
+ * Description: National Instruments AT-MIO-E series
+ * Author: ds
+ * Devices: [National Instruments] AT-MIO-16E-1 (ni_atmio),
+ * AT-MIO-16E-2, AT-MIO-16E-10, AT-MIO-16DE-10, AT-MIO-64E-3,
+ * AT-MIO-16XE-50, AT-MIO-16XE-10, AT-AI-16XE-10
+ * Status: works
+ * Updated: Thu May 1 20:03:02 CDT 2003
+ *
+ * The driver has 2.6 kernel isapnp support, and will automatically probe for
+ * a supported board if the I/O base is left unspecified with comedi_config.
+ * However, many of the isapnp id numbers are unknown. If your board is not
+ * recognized, please send the output of 'cat /proc/isapnp' (you may need to
+ * modprobe the isa-pnp module for /proc/isapnp to exist) so the id numbers
+ * for your board can be added to the driver.
+ *
+ * Otherwise, you can use the isapnptools package to configure your board.
+ * Use isapnp to configure the I/O base and IRQ for the board, and then pass
+ * the same values as parameters in comedi_config. A sample isapnp.conf file
+ * is included in the etc/ directory of Comedilib.
+ *
+ * Comedilib includes a utility to autocalibrate these boards. The boards
+ * seem to boot into a state where the all calibration DACs are at one
+ * extreme of their range, thus the default calibration is terrible.
+ * Calibration at boot is strongly encouraged.
+ *
+ * To use the extended digital I/O on some of the boards, enable the
+ * 8255 driver when configuring the Comedi source tree.
+ *
+ * External triggering is supported for some events. The channel index
+ * (scan_begin_arg, etc.) maps to PFI0 - PFI9.
+ *
+ * Some of the more esoteric triggering possibilities of these boards are
+ * not supported.
+ */
-*/
+/*
+ * The real guts of the driver is in ni_mio_common.c, which is included
+ * both here and in ni_pcimio.c
+ *
+ * Interrupt support added by Truxton Fulton <trux@truxton.com>
+ *
+ * References for specifications:
+ * 340747b.pdf Register Level Programmer Manual (obsolete)
+ * 340747c.pdf Register Level Programmer Manual (new)
+ * DAQ-STC reference manual
+ *
+ * Other possibly relevant info:
+ * 320517c.pdf User manual (obsolete)
+ * 320517f.pdf User manual (new)
+ * 320889a.pdf delete
+ * 320906c.pdf maximum signal ratings
+ * 321066a.pdf about 16x
+ * 321791a.pdf discontinuation of at-mio-16e-10 rev. c
+ * 321808a.pdf about at-mio-16e-10 rev P
+ * 321837a.pdf discontinuation of at-mio-16de-10 rev d
+ * 321838a.pdf about at-mio-16de-10 rev N
+ *
+ * ISSUES:
+ * - need to deal with external reference for DAC, and other DAC
+ * properties in board properties
+ * - deal with at-mio-16de-10 revision D to N changes, etc.
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -98,10 +89,7 @@ are not supported.
#include "ni_stc.h"
#include "8255.h"
-/*
- * AT specific setup
- */
-
+/* AT specific setup */
static const struct ni_board_struct ni_boards[] = {
{
.name = "at-mio-16e-1",
@@ -215,7 +203,7 @@ static const struct ni_board_struct ni_boards[] = {
.n_adchan = 16,
.ai_maxdata = 0xffff,
.ai_fifo_depth = 512,
- .alwaysdither = 1, /* unknown */
+ .alwaysdither = 1, /* unknown */
.gainlkup = ai_gain_14,
.ai_speed = 10000,
.caldac = { dac8800, dac8043, ad8522 },
@@ -287,10 +275,10 @@ static const struct ni_board_struct *ni_atmio_probe(struct comedi_device *dev)
}
if (device_id == 255)
dev_err(dev->class_dev, "can't find board\n");
- else if (device_id == 0)
+ else if (device_id == 0)
dev_err(dev->class_dev,
"EEPROM read error (?) or device not found\n");
- else
+ else
dev_err(dev->class_dev,
"unknown device ID %d -- contact author\n", device_id);
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index c3eb54622bc3..fb59b0ffbba6 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -1,25 +1,41 @@
/*
- comedi/drivers/ni_atmio16d.c
- Hardware driver for National Instruments AT-MIO16D board
- Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * Comedi driver for National Instruments AT-MIO16D board
+ * Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
+
/*
-Driver: ni_atmio16d
-Description: National Instruments AT-MIO-16D
-Author: Chris R. Baugher <baugher@enteract.com>
-Status: unknown
-Devices: [National Instruments] AT-MIO-16 (atmio16), AT-MIO-16D (atmio16d)
-*/
+ * Driver: ni_atmio16d
+ * Description: National Instruments AT-MIO-16D
+ * Author: Chris R. Baugher <baugher@enteract.com>
+ * Status: unknown
+ * Devices: [National Instruments] AT-MIO-16 (atmio16), AT-MIO-16D (atmio16d)
+ *
+ * Configuration options:
+ * [0] - I/O port
+ * [1] - MIO irq (0 == no irq; or 3,4,5,6,7,9,10,11,12,14,15)
+ * [2] - DIO irq (0 == no irq; or 3,4,5,6,7,9)
+ * [3] - DMA1 channel (0 == no DMA; or 5,6,7)
+ * [4] - DMA2 channel (0 == no DMA; or 5,6,7)
+ * [5] - a/d mux (0=differential; 1=single)
+ * [6] - a/d range (0=bipolar10; 1=bipolar5; 2=unipolar10)
+ * [7] - dac0 range (0=bipolar; 1=unipolar)
+ * [8] - dac0 reference (0=internal; 1=external)
+ * [9] - dac0 coding (0=2's comp; 1=straight binary)
+ * [10] - dac1 range (same as dac0 options)
+ * [11] - dac1 reference (same as dac0 options)
+ * [12] - dac1 coding (same as dac0 options)
+ */
+
/*
* I must give credit here to Michal Dobes <dobes@tesnet.cz> who
* wrote the driver for Advantec's pcl812 boards. I used the interrupt
@@ -295,8 +311,10 @@ static int atmio16d_ai_cmd(struct comedi_device *dev,
unsigned int sample_count, tmp, chan, gain;
int i;
- /* This is slowly becoming a working command interface. *
- * It is still uber-experimental */
+ /*
+ * This is slowly becoming a working command interface.
+ * It is still uber-experimental
+ */
reset_counters(dev);
@@ -322,9 +340,10 @@ static int atmio16d_ai_cmd(struct comedi_device *dev,
outw(tmp, dev->iobase + MUX_GAIN_REG);
}
- /* Now program the sample interval timer */
- /* Figure out which clock to use then get an
- * appropriate timer value */
+ /*
+ * Now program the sample interval timer.
+ * Figure out which clock to use then get an appropriate timer value.
+ */
if (cmd->convert_arg < 65536000) {
base_clock = CLOCK_1_MHZ;
timer = cmd->convert_arg / 1000;
@@ -386,9 +405,10 @@ static int atmio16d_ai_cmd(struct comedi_device *dev,
outw(devpriv->com_reg_1_state, dev->iobase + COM_REG_1);
}
- /* Program the scan interval timer ONLY IF SCANNING IS ENABLED */
- /* Figure out which clock to use then get an
- * appropriate timer value */
+ /*
+ * Program the scan interval timer ONLY IF SCANNING IS ENABLED.
+ * Figure out which clock to use then get an appropriate timer value.
+ */
if (cmd->chanlist_len > 1) {
if (cmd->scan_begin_arg < 65536000) {
base_clock = CLOCK_1_MHZ;
@@ -566,38 +586,6 @@ static int atmio16d_dio_insn_config(struct comedi_device *dev,
return insn->n;
}
-/*
- options[0] - I/O port
- options[1] - MIO irq
- 0 == no irq
- N == irq N {3,4,5,6,7,9,10,11,12,14,15}
- options[2] - DIO irq
- 0 == no irq
- N == irq N {3,4,5,6,7,9}
- options[3] - DMA1 channel
- 0 == no DMA
- N == DMA N {5,6,7}
- options[4] - DMA2 channel
- 0 == no DMA
- N == DMA N {5,6,7}
-
- options[5] - a/d mux
- 0=differential, 1=single
- options[6] - a/d range
- 0=bipolar10, 1=bipolar5, 2=unipolar10
-
- options[7] - dac0 range
- 0=bipolar, 1=unipolar
- options[8] - dac0 reference
- 0=internal, 1=external
- options[9] - dac0 coding
- 0=2's comp, 1=straight binary
-
- options[10] - dac1 range
- options[11] - dac1 reference
- options[12] - dac1 coding
- */
-
static int atmio16d_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index d9de83ab0267..733d3fbafa4d 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -1,35 +1,35 @@
/*
- comedi/drivers/ni_daq_dio24.c
- Driver for National Instruments PCMCIA DAQ-Card DIO-24
- Copyright (C) 2002 Daniel Vecino Castel <dvecino@able.es>
+ * Comedi driver for National Instruments PCMCIA DAQ-Card DIO-24
+ * Copyright (C) 2002 Daniel Vecino Castel <dvecino@able.es>
+ *
+ * PCMCIA crap at end of file is adapted from dummy_cs.c 1.31
+ * 2001/08/24 12:13:13 from the pcmcia package.
+ * The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- PCMCIA crap at end of file is adapted from dummy_cs.c 1.31
- 2001/08/24 12:13:13 from the pcmcia package.
- The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
/*
-Driver: ni_daq_dio24
-Description: National Instruments PCMCIA DAQ-Card DIO-24
-Author: Daniel Vecino Castel <dvecino@able.es>
-Devices: [National Instruments] PCMCIA DAQ-Card DIO-24 (ni_daq_dio24)
-Status: ?
-Updated: Thu, 07 Nov 2002 21:53:06 -0800
-
-This is just a wrapper around the 8255.o driver to properly handle
-the PCMCIA interface.
-*/
+ * Driver: ni_daq_dio24
+ * Description: National Instruments PCMCIA DAQ-Card DIO-24
+ * Author: Daniel Vecino Castel <dvecino@able.es>
+ * Devices: [National Instruments] PCMCIA DAQ-Card DIO-24 (ni_daq_dio24)
+ * Status: ?
+ * Updated: Thu, 07 Nov 2002 21:53:06 -0800
+ *
+ * This is just a wrapper around the 8255.o driver to properly handle
+ * the PCMCIA interface.
+ */
#include <linux/module.h>
#include "../comedi_pcmcia.h"
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index e3d821bf2d6a..21f823179356 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -1,40 +1,39 @@
/*
- comedi/drivers/ni_mio_cs.c
- Hardware driver for NI PCMCIA MIO E series cards
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_mio_cs
-Description: National Instruments DAQCard E series
-Author: ds
-Status: works
-Devices: [National Instruments] DAQCard-AI-16XE-50 (ni_mio_cs),
- DAQCard-AI-16E-4, DAQCard-6062E, DAQCard-6024E, DAQCard-6036E
-Updated: Thu Oct 23 19:43:17 CDT 2003
-
-See the notes in the ni_atmio.o driver.
-*/
-/*
- The real guts of the driver is in ni_mio_common.c, which is
- included by all the E series drivers.
-
- References for specifications:
+ * Comedi driver for NI PCMCIA MIO E series cards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- 341080a.pdf DAQCard E Series Register Level Programmer Manual
+/*
+ * Driver: ni_mio_cs
+ * Description: National Instruments DAQCard E series
+ * Author: ds
+ * Status: works
+ * Devices: [National Instruments] DAQCard-AI-16XE-50 (ni_mio_cs),
+ * DAQCard-AI-16E-4, DAQCard-6062E, DAQCard-6024E, DAQCard-6036E
+ * Updated: Thu Oct 23 19:43:17 CDT 2003
+ *
+ * See the notes in the ni_atmio.o driver.
+ */
-*/
+/*
+ * The real guts of the driver is in ni_mio_common.c, which is
+ * included by all the E series drivers.
+ *
+ * References for specifications:
+ * 341080a.pdf DAQCard E Series Register Level Programmer Manual
+ */
#include <linux/module.h>
#include <linux/delay.h>
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 35ef1925703f..daeb4ad7a75f 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -1,50 +1,49 @@
/*
- comedi/drivers/ni_pcidio.c
- driver for National Instruments PCI-DIO-32HS
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999,2002 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Comedi driver for National Instruments PCI-DIO-32HS
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1999,2002 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
-Driver: ni_pcidio
-Description: National Instruments PCI-DIO32HS, PCI-6533
-Author: ds
-Status: works
-Devices: [National Instruments] PCI-DIO-32HS (ni_pcidio)
- [National Instruments] PXI-6533, PCI-6533 (pxi-6533)
- [National Instruments] PCI-6534 (pci-6534)
-Updated: Mon, 09 Jan 2012 14:27:23 +0000
-
-The DIO32HS board appears as one subdevice, with 32 channels.
-Each channel is individually I/O configurable. The channel order
-is 0=A0, 1=A1, 2=A2, ... 8=B0, 16=C0, 24=D0. The driver only
-supports simple digital I/O; no handshaking is supported.
-
-DMA mostly works for the PCI-DIO32HS, but only in timed input mode.
-
-The PCI-DIO-32HS/PCI-6533 has a configurable external trigger. Setting
-scan_begin_arg to 0 or CR_EDGE triggers on the leading edge. Setting
-scan_begin_arg to CR_INVERT or (CR_EDGE | CR_INVERT) triggers on the
-trailing edge.
-
-This driver could be easily modified to support AT-MIO32HS and
-AT-MIO96.
-
-The PCI-6534 requires a firmware upload after power-up to work, the
-firmware data and instructions for loading it with comedi_config
-it are contained in the
-comedi_nonfree_firmware tarball available from http://www.comedi.org
-*/
+ * Driver: ni_pcidio
+ * Description: National Instruments PCI-DIO32HS, PCI-6533
+ * Author: ds
+ * Status: works
+ * Devices: [National Instruments] PCI-DIO-32HS (ni_pcidio)
+ * [National Instruments] PXI-6533, PCI-6533 (pxi-6533)
+ * [National Instruments] PCI-6534 (pci-6534)
+ * Updated: Mon, 09 Jan 2012 14:27:23 +0000
+ *
+ * The DIO32HS board appears as one subdevice, with 32 channels. Each
+ * channel is individually I/O configurable. The channel order is 0=A0,
+ * 1=A1, 2=A2, ... 8=B0, 16=C0, 24=D0. The driver only supports simple
+ * digital I/O; no handshaking is supported.
+ *
+ * DMA mostly works for the PCI-DIO32HS, but only in timed input mode.
+ *
+ * The PCI-DIO-32HS/PCI-6533 has a configurable external trigger. Setting
+ * scan_begin_arg to 0 or CR_EDGE triggers on the leading edge. Setting
+ * scan_begin_arg to CR_INVERT or (CR_EDGE | CR_INVERT) triggers on the
+ * trailing edge.
+ *
+ * This driver could be easily modified to support AT-MIO32HS and AT-MIO96.
+ *
+ * The PCI-6534 requires a firmware upload after power-up to work, the
+ * firmware data and instructions for loading it with comedi_config
+ * it are contained in the comedi_nonfree_firmware tarball available from
+ * http://www.comedi.org
+ */
#define USE_DMA
@@ -61,36 +60,36 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
#define Window_Address 4 /* W */
#define Interrupt_And_Window_Status 4 /* R */
-#define IntStatus1 (1<<0)
-#define IntStatus2 (1<<1)
+#define IntStatus1 BIT(0)
+#define IntStatus2 BIT(1)
#define WindowAddressStatus_mask 0x7c
#define Master_DMA_And_Interrupt_Control 5 /* W */
#define InterruptLine(x) ((x)&3)
-#define OpenInt (1<<2)
+#define OpenInt BIT(2)
#define Group_Status 5 /* R */
-#define DataLeft (1<<0)
-#define Req (1<<2)
-#define StopTrig (1<<3)
+#define DataLeft BIT(0)
+#define Req BIT(2)
+#define StopTrig BIT(3)
#define Group_1_Flags 6 /* R */
#define Group_2_Flags 7 /* R */
-#define TransferReady (1<<0)
-#define CountExpired (1<<1)
-#define Waited (1<<5)
-#define PrimaryTC (1<<6)
-#define SecondaryTC (1<<7)
+#define TransferReady BIT(0)
+#define CountExpired BIT(1)
+#define Waited BIT(5)
+#define PrimaryTC BIT(6)
+#define SecondaryTC BIT(7)
/* #define SerialRose */
/* #define ReqRose */
/* #define Paused */
#define Group_1_First_Clear 6 /* W */
#define Group_2_First_Clear 7 /* W */
-#define ClearWaited (1<<3)
-#define ClearPrimaryTC (1<<4)
-#define ClearSecondaryTC (1<<5)
-#define DMAReset (1<<6)
-#define FIFOReset (1<<7)
+#define ClearWaited BIT(3)
+#define ClearPrimaryTC BIT(4)
+#define ClearSecondaryTC BIT(5)
+#define DMAReset BIT(6)
+#define FIFOReset BIT(7)
#define ClearAll 0xf8
#define Group_1_FIFO 8 /* W */
@@ -111,27 +110,27 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
#define Group_1_Second_Clear 46 /* W */
#define Group_2_Second_Clear 47 /* W */
-#define ClearExpired (1<<0)
+#define ClearExpired BIT(0)
#define Port_Pattern(x) (48+(x))
#define Data_Path 64
-#define FIFOEnableA (1<<0)
-#define FIFOEnableB (1<<1)
-#define FIFOEnableC (1<<2)
-#define FIFOEnableD (1<<3)
+#define FIFOEnableA BIT(0)
+#define FIFOEnableB BIT(1)
+#define FIFOEnableC BIT(2)
+#define FIFOEnableD BIT(3)
#define Funneling(x) (((x)&3)<<4)
-#define GroupDirection (1<<7)
+#define GroupDirection BIT(7)
#define Protocol_Register_1 65
#define OpMode Protocol_Register_1
#define RunMode(x) ((x)&7)
-#define Numbered (1<<3)
+#define Numbered BIT(3)
#define Protocol_Register_2 66
#define ClockReg Protocol_Register_2
#define ClockLine(x) (((x)&3)<<5)
-#define InvertStopTrig (1<<7)
+#define InvertStopTrig BIT(7)
#define DataLatching(x) (((x)&3)<<5)
#define Protocol_Register_3 67
@@ -152,17 +151,17 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
#define Protocol_Register_6 73
#define LinePolarities Protocol_Register_6
-#define InvertAck (1<<0)
-#define InvertReq (1<<1)
-#define InvertClock (1<<2)
-#define InvertSerial (1<<3)
-#define OpenAck (1<<4)
-#define OpenClock (1<<5)
+#define InvertAck BIT(0)
+#define InvertReq BIT(1)
+#define InvertClock BIT(2)
+#define InvertSerial BIT(3)
+#define OpenAck BIT(4)
+#define OpenClock BIT(5)
#define Protocol_Register_7 74
#define AckSer Protocol_Register_7
#define AckLine(x) (((x)&3)<<2)
-#define ExchangePins (1<<7)
+#define ExchangePins BIT(7)
#define Interrupt_Control 75
/* bits same as flags */
@@ -183,20 +182,20 @@ static inline unsigned int secondary_DMAChannel_bits(unsigned int channel)
#define Transfer_Size_Control 77
#define TransferWidth(x) ((x)&3)
#define TransferLength(x) (((x)&3)<<3)
-#define RequireRLevel (1<<5)
+#define RequireRLevel BIT(5)
#define Protocol_Register_15 79
#define DAQOptions Protocol_Register_15
#define StartSource(x) ((x)&0x3)
-#define InvertStart (1<<2)
+#define InvertStart BIT(2)
#define StopSource(x) (((x)&0x3)<<3)
-#define ReqStart (1<<6)
-#define PreStart (1<<7)
+#define ReqStart BIT(6)
+#define PreStart BIT(7)
#define Pattern_Detection 81
-#define DetectionMethod (1<<0)
-#define InvertMatch (1<<1)
-#define IE_Pattern_Detection (1<<2)
+#define DetectionMethod BIT(0)
+#define InvertMatch BIT(1)
+#define IE_Pattern_Detection BIT(2)
#define Protocol_Register_9 82
#define ReqDelay Protocol_Register_9
@@ -649,8 +648,10 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
writeb(1, dev->mmio + AckDelay);
writeb(0x0b, dev->mmio + AckNotDelay);
writeb(0x01, dev->mmio + Data1Delay);
- /* manual, page 4-5: ClockSpeed comment is incorrectly listed
- * on DAQOptions */
+ /*
+ * manual, page 4-5:
+ * ClockSpeed comment is incorrectly listed on DAQOptions
+ */
writew(0, dev->mmio + ClockSpeed);
writeb(0, dev->mmio + DAQOptions);
} else {
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index d8917392b9f9..f13a2f7360b3 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1,111 +1,106 @@
/*
- comedi/drivers/ni_pcimio.c
- Hardware driver for NI PCI-MIO E series cards
+ * Comedi driver for NI PCI-MIO E series cards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
/*
-Driver: ni_pcimio
-Description: National Instruments PCI-MIO-E series and M series (all boards)
-Author: ds, John Hallen, Frank Mori Hess, Rolf Mueller, Herbert Peremans,
- Herman Bruyninckx, Terry Barnaby
-Status: works
-Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
- PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014, PCI-6040E,
- PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E,
- PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E,
- PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224,
- PCI-6225, PXI-6225, PCI-6229, PCI-6250,
- PCI-6251, PXI-6251, PCIe-6251, PXIe-6251,
- PCI-6254, PCI-6259, PCIe-6259,
- PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289,
- PCI-6711, PXI-6711, PCI-6713, PXI-6713,
- PXI-6071E, PCI-6070E, PXI-6070E,
- PXI-6052E, PCI-6036E, PCI-6731, PCI-6733, PXI-6733,
- PCI-6143, PXI-6143
-Updated: Mon, 09 Jan 2012 14:52:48 +0000
-
-These boards are almost identical to the AT-MIO E series, except that
-they use the PCI bus instead of ISA (i.e., AT). See the notes for
-the ni_atmio.o driver for additional information about these boards.
-
-Autocalibration is supported on many of the devices, using the
-comedi_calibrate (or comedi_soft_calibrate for m-series) utility.
-M-Series boards do analog input and analog output calibration entirely
-in software. The software calibration corrects
-the analog input for offset, gain and
-nonlinearity. The analog outputs are corrected for offset and gain.
-See the comedilib documentation on comedi_get_softcal_converter() for
-more information.
-
-By default, the driver uses DMA to transfer analog input data to
-memory. When DMA is enabled, not all triggering features are
-supported.
-
-Digital I/O may not work on 673x.
-
-Note that the PCI-6143 is a simultaineous sampling device with 8 convertors.
-With this board all of the convertors perform one simultaineous sample during
-a scan interval. The period for a scan is used for the convert time in a
-Comedi cmd. The convert trigger source is normally set to TRIG_NOW by default.
-
-The RTSI trigger bus is supported on these cards on
-subdevice 10. See the comedilib documentation for details.
-
-Information (number of channels, bits, etc.) for some devices may be
-incorrect. Please check this and submit a bug if there are problems
-for your device.
-
-SCXI is probably broken for m-series boards.
-
-Bugs:
- - When DMA is enabled, COMEDI_EV_CONVERT does
- not work correctly.
+ * Driver: ni_pcimio
+ * Description: National Instruments PCI-MIO-E series and M series (all boards)
+ * Author: ds, John Hallen, Frank Mori Hess, Rolf Mueller, Herbert Peremans,
+ * Herman Bruyninckx, Terry Barnaby
+ * Status: works
+ * Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
+ * PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014,
+ * PCI-6040E, PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E,
+ * PCI-6071E, PCI-6023E, PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E,
+ * PCI-6035E, PCI-6052E,
+ * PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224,
+ * PCI-6225, PXI-6225, PCI-6229, PCI-6250,
+ * PCI-6251, PXI-6251, PCIe-6251, PXIe-6251,
+ * PCI-6254, PCI-6259, PCIe-6259,
+ * PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289,
+ * PCI-6711, PXI-6711, PCI-6713, PXI-6713,
+ * PXI-6071E, PCI-6070E, PXI-6070E,
+ * PXI-6052E, PCI-6036E, PCI-6731, PCI-6733, PXI-6733,
+ * PCI-6143, PXI-6143
+ * Updated: Mon, 09 Jan 2012 14:52:48 +0000
+ *
+ * These boards are almost identical to the AT-MIO E series, except that
+ * they use the PCI bus instead of ISA (i.e., AT). See the notes for the
+ * ni_atmio.o driver for additional information about these boards.
+ *
+ * Autocalibration is supported on many of the devices, using the
+ * comedi_calibrate (or comedi_soft_calibrate for m-series) utility.
+ * M-Series boards do analog input and analog output calibration entirely
+ * in software. The software calibration corrects the analog input for
+ * offset, gain and nonlinearity. The analog outputs are corrected for
+ * offset and gain. See the comedilib documentation on
+ * comedi_get_softcal_converter() for more information.
+ *
+ * By default, the driver uses DMA to transfer analog input data to
+ * memory. When DMA is enabled, not all triggering features are
+ * supported.
+ *
+ * Digital I/O may not work on 673x.
+ *
+ * Note that the PCI-6143 is a simultaineous sampling device with 8
+ * convertors. With this board all of the convertors perform one
+ * simultaineous sample during a scan interval. The period for a scan
+ * is used for the convert time in a Comedi cmd. The convert trigger
+ * source is normally set to TRIG_NOW by default.
+ *
+ * The RTSI trigger bus is supported on these cards on subdevice 10.
+ * See the comedilib documentation for details.
+ *
+ * Information (number of channels, bits, etc.) for some devices may be
+ * incorrect. Please check this and submit a bug if there are problems
+ * for your device.
+ *
+ * SCXI is probably broken for m-series boards.
+ *
+ * Bugs:
+ * - When DMA is enabled, COMEDI_EV_CONVERT does not work correctly.
+ */
-*/
/*
- The PCI-MIO E series driver was originally written by
- Tomasz Motylewski <...>, and ported to comedi by ds.
-
- References:
-
- 341079b.pdf PCI E Series Register-Level Programmer Manual
- 340934b.pdf DAQ-STC reference manual
-
- 322080b.pdf 6711/6713/6715 User Manual
-
- 320945c.pdf PCI E Series User Manual
- 322138a.pdf PCI-6052E and DAQPad-6052E User Manual
-
- ISSUES:
-
- need to deal with external reference for DAC, and other DAC
- properties in board properties
-
- deal with at-mio-16de-10 revision D to N changes, etc.
-
- need to add other CALDAC type
-
- need to slow down DAC loading. I don't trust NI's claim that
- two writes to the PCI bus slows IO enough. I would prefer to
- use udelay(). Timing specs: (clock)
- AD8522 30ns
- DAC8043 120ns
- DAC8800 60ns
- MB88341 ?
-
-*/
+ * The PCI-MIO E series driver was originally written by
+ * Tomasz Motylewski <...>, and ported to comedi by ds.
+ *
+ * References:
+ * 341079b.pdf PCI E Series Register-Level Programmer Manual
+ * 340934b.pdf DAQ-STC reference manual
+ *
+ * 322080b.pdf 6711/6713/6715 User Manual
+ *
+ * 320945c.pdf PCI E Series User Manual
+ * 322138a.pdf PCI-6052E and DAQPad-6052E User Manual
+ *
+ * ISSUES:
+ * - need to deal with external reference for DAC, and other DAC
+ * properties in board properties
+ * - deal with at-mio-16de-10 revision D to N changes, etc.
+ * - need to add other CALDAC type
+ * - need to slow down DAC loading. I don't trust NI's claim that
+ * two writes to the PCI bus slows IO enough. I would prefer to
+ * use udelay().
+ * Timing specs: (clock)
+ * AD8522 30ns
+ * DAC8043 120ns
+ * DAC8800 60ns
+ * MB88341 ?
+ */
#include <linux/module.h>
#include <linux/delay.h>
@@ -119,13 +114,14 @@ Bugs:
#define PCIDMA
-/* These are not all the possible ao ranges for 628x boards.
- They can do OFFSET +- REFERENCE where OFFSET can be
- 0V, 5V, APFI<0,1>, or AO<0...3> and RANGE can
- be 10V, 5V, 2V, 1V, APFI<0,1>, AO<0...3>. That's
- 63 different possibilities. An AO channel
- can not act as it's own OFFSET or REFERENCE.
-*/
+/*
+ * These are not all the possible ao ranges for 628x boards.
+ * They can do OFFSET +- REFERENCE where OFFSET can be
+ * 0V, 5V, APFI<0,1>, or AO<0...3> and RANGE can
+ * be 10V, 5V, 2V, 1V, APFI<0,1>, AO<0...3>. That's
+ * 63 different possibilities. An AO channel
+ * can not act as it's own OFFSET or REFERENCE.
+ */
static const struct comedi_lrange range_ni_M_628x_ao = {
8, {
BIP_RANGE(10),
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index 95b537a8ecdb..5036eebb9162 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -465,12 +465,12 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
struct ni6501_private *devpriv = dev->private;
size_t size;
- size = le16_to_cpu(devpriv->ep_rx->wMaxPacketSize);
+ size = usb_endpoint_maxp(devpriv->ep_rx);
devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_rx_buf)
return -ENOMEM;
- size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
+ size = usb_endpoint_maxp(devpriv->ep_tx);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_tx_buf) {
kfree(devpriv->usb_rx_buf);
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index 0e20cc5c9a69..e23e63a097b5 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -60,9 +60,9 @@ struct plx_dma_desc {
#define PLX_REG_LAS1RR 0x00f0
#define PLX_LASRR_IO BIT(0) /* Map to: 1=I/O, 0=Mem */
-#define PLX_LASRR_ANY32 (BIT(1) * 0) /* Locate anywhere in 32 bit */
-#define PLX_LASRR_LT1MB (BIT(1) * 1) /* Locate in 1st meg */
-#define PLX_LASRR_ANY64 (BIT(1) * 2) /* Locate anywhere in 64 bit */
+#define PLX_LASRR_MLOC_ANY32 (BIT(1) * 0) /* Locate anywhere in 32 bit */
+#define PLX_LASRR_MLOC_LT1MB (BIT(1) * 1) /* Locate in 1st meg */
+#define PLX_LASRR_MLOC_ANY64 (BIT(1) * 2) /* Locate anywhere in 64 bit */
#define PLX_LASRR_MLOC_MASK GENMASK(2, 1) /* Memory location bits */
#define PLX_LASRR_PREFETCH BIT(3) /* Memory is prefetchable */
/* bits that specify range for memory space decode bits */
@@ -89,11 +89,11 @@ struct plx_dma_desc {
/* Local Bus Latency Timer */
#define PLX_MARBR_LT(x) (BIT(0) * ((x) & 0xff))
#define PLX_MARBR_LT_MASK GENMASK(7, 0)
-#define PLX_MARBR_LT_SHIFT 0
+#define PLX_MARBR_TO_LT(r) ((r) & PLX_MARBR_LT_MASK)
/* Local Bus Pause Timer */
#define PLX_MARBR_PT(x) (BIT(8) * ((x) & 0xff))
#define PLX_MARBR_PT_MASK GENMASK(15, 8)
-#define PLX_MARBR_PT_SHIFT 8
+#define PLX_MARBR_TO_PT(r) (((r) & PLX_MARBR_PT_MASK) >> 8)
/* Local Bus Latency Timer Enable */
#define PLX_MARBR_LTEN BIT(16)
/* Local Bus Pause Timer Enable */
@@ -166,16 +166,15 @@ struct plx_dma_desc {
#define PLX_REG_LBRD1 0x00f8
/* Memory Space Local Bus Width */
-#define PLX_LBRD_MSWIDTH8 (BIT(0) * 0) /* 8 bits wide */
-#define PLX_LBRD_MSWIDTH16 (BIT(0) * 1) /* 16 bits wide */
-#define PLX_LBRD_MSWIDTH32 (BIT(0) * 2) /* 32 bits wide */
-#define PLX_LBRD_MSWIDTH32A (BIT(0) * 3) /* 32 bits wide */
+#define PLX_LBRD_MSWIDTH_8 (BIT(0) * 0) /* 8 bits wide */
+#define PLX_LBRD_MSWIDTH_16 (BIT(0) * 1) /* 16 bits wide */
+#define PLX_LBRD_MSWIDTH_32 (BIT(0) * 2) /* 32 bits wide */
+#define PLX_LBRD_MSWIDTH_32A (BIT(0) * 3) /* 32 bits wide */
#define PLX_LBRD_MSWIDTH_MASK GENMASK(1, 0)
-#define PLX_LBRD_MSWIDTH_SHIFT 0
/* Memory Space Internal Wait States */
#define PLX_LBRD_MSIWS(x) (BIT(2) * ((x) & 0xf))
#define PLX_LBRD_MSIWS_MASK GENMASK(5, 2)
-#define PLX_LBRD_MSIWS_SHIFT 2
+#define PLX_LBRD_TO_MSIWS(r) (((r) & PLS_LBRD_MSIWS_MASK) >> 2)
/* Memory Space Ready Input Enable */
#define PLX_LBRD_MSREADYIEN BIT(6)
/* Memory Space BTERM# Input Enable */
@@ -193,18 +192,17 @@ struct plx_dma_desc {
/* Prefetch Counter */
#define PLX_LBRD_PFCOUNT(x) (BIT(11) * ((x) & 0xf))
#define PLX_LBRD_PFCOUNT_MASK GENMASK(14, 11)
-#define PLX_LBRD_PFCOUNT_SHIFT 11
+#define PLX_LBRD_TO_PFCOUNT(r) (((r) & PLX_LBRD_PFCOUNT_MASK) >> 11)
/* Expansion ROM Space Local Bus Width (LBRD0 only) */
-#define PLX_LBRD0_EROMWIDTH8 (BIT(16) * 0) /* 8 bits wide */
-#define PLX_LBRD0_EROMWIDTH16 (BIT(16) * 1) /* 16 bits wide */
-#define PLX_LBRD0_EROMWIDTH32 (BIT(16) * 2) /* 32 bits wide */
-#define PLX_LBRD0_EROMWIDTH32A (BIT(16) * 3) /* 32 bits wide */
+#define PLX_LBRD0_EROMWIDTH_8 (BIT(16) * 0) /* 8 bits wide */
+#define PLX_LBRD0_EROMWIDTH_16 (BIT(16) * 1) /* 16 bits wide */
+#define PLX_LBRD0_EROMWIDTH_32 (BIT(16) * 2) /* 32 bits wide */
+#define PLX_LBRD0_EROMWIDTH_32A (BIT(16) * 3) /* 32 bits wide */
#define PLX_LBRD0_EROMWIDTH_MASK GENMASK(17, 16)
-#define PLX_LBRD0_EROMWIDTH_SHIFT 16
/* Expansion ROM Space Internal Wait States (LBRD0 only) */
#define PLX_LBRD0_EROMIWS(x) (BIT(18) * ((x) & 0xf))
#define PLX_LBRD0_EROMIWS_MASK GENMASK(21, 18)
-#define PLX_LBRD0_EROMIWS_SHIFT 18
+#define PLX_LBRD0_TO_EROMIWS(r) (((r) & PLX_LBRD0_EROMIWS_MASK) >> 18)
/* Expansion ROM Space Ready Input Enable (LBDR0 only) */
#define PLX_LBRD0_EROMREADYIEN BIT(22)
/* Expansion ROM Space BTERM# Input Enable (LBRD0 only) */
@@ -220,7 +218,7 @@ struct plx_dma_desc {
/* PCI Target Retry Delay Clocks / 8 (LBRD0 only) */
#define PLX_LBRD0_TRDELAY(x) (BIT(28) * ((x) & 0xF))
#define PLX_LBRD0_TRDELAY_MASK GENMASK(31, 28)
-#define PLX_LBRD0_TRDELAY_SHIFT 28
+#define PLX_LBRD0_TO_TRDELAY(r) (((r) & PLX_LBRD0_TRDELAY_MASK) >> 28)
/* Local Range Register for Direct Master to PCI */
#define PLX_REG_DMRR 0x001c
@@ -241,10 +239,10 @@ struct plx_dma_desc {
/* LLOCK# Input Enable */
#define PLX_DMPBAM_LLOCKIEN BIT(2)
/* Direct Master Read Prefetch Size Control (bits 12, 3) */
-#define PLX_DMPBAM_RPSIZECONT ((BIT(12) * 0) | (BIT(3) * 0))
-#define PLX_DMPBAM_RPSIZE4 ((BIT(12) * 0) | (BIT(3) * 1))
-#define PLX_DMPBAM_RPSIZE8 ((BIT(12) * 1) | (BIT(3) * 0))
-#define PLX_DMPBAM_RPSIZE16 ((BIT(12) * 1) | (BIT(3) * 1))
+#define PLX_DMPBAM_RPSIZE_CONT ((BIT(12) * 0) | (BIT(3) * 0))
+#define PLX_DMPBAM_RPSIZE_4 ((BIT(12) * 0) | (BIT(3) * 1))
+#define PLX_DMPBAM_RPSIZE_8 ((BIT(12) * 1) | (BIT(3) * 0))
+#define PLX_DMPBAM_RPSIZE_16 ((BIT(12) * 1) | (BIT(3) * 1))
#define PLX_DMPBAM_RPSIZE_MASK (BIT(12) | BIT(3))
/* Direct Master PCI Read Mode - deassert IRDY when FIFO full */
#define PLX_DMPBAM_RMIRDY BIT(4)
@@ -261,10 +259,10 @@ struct plx_dma_desc {
/* I/O Remap Select */
#define PLX_DMPBAM_IOREMAPSEL BIT(13)
/* Direct Master Write Delay */
-#define PLX_DMPBAM_WDELAYNONE (BIT(14) * 0)
-#define PLX_DMPBAM_WDELAY4 (BIT(14) * 1)
-#define PLX_DMPBAM_WDELAY8 (BIT(14) * 2)
-#define PLX_DMPBAM_WDELAY16 (BIT(14) * 3)
+#define PLX_DMPBAM_WDELAY_NONE (BIT(14) * 0)
+#define PLX_DMPBAM_WDELAY_4 (BIT(14) * 1)
+#define PLX_DMPBAM_WDELAY_8 (BIT(14) * 2)
+#define PLX_DMPBAM_WDELAY_16 (BIT(14) * 3)
#define PLX_DMPBAM_WDELAY_MASK GENMASK(15, 14)
/* Remap of Local-to-PCI Space Into PCI Address Space */
#define PLX_DMPBAM_REMAP_MASK GENMASK(31, 16)
@@ -279,19 +277,19 @@ struct plx_dma_desc {
/* Register Number */
#define PLX_DMCFGA_REGNUM(x) (BIT(2) * ((x) & 0x3f))
#define PLX_DMCFGA_REGNUM_MASK GENMASK(7, 2)
-#define PLX_DMCFGA_REGNUM_SHIFT 2
+#define PLX_DMCFGA_TO_REGNUM(r) (((r) & PLX_DMCFGA_REGNUM_MASK) >> 2)
/* Function Number */
#define PLX_DMCFGA_FUNCNUM(x) (BIT(8) * ((x) & 0x7))
#define PLX_DMCFGA_FUNCNUM_MASK GENMASK(10, 8)
-#define PLX_DMCFGA_FUNCNUM_SHIFT 8
+#define PLX_DMCFGA_TO_FUNCNUM(r) (((r) & PLX_DMCFGA_FUNCNUM_MASK) >> 8)
/* Device Number */
#define PLX_DMCFGA_DEVNUM(x) (BIT(11) * ((x) & 0x1f))
#define PLX_DMCFGA_DEVNUM_MASK GENMASK(15, 11)
-#define PLX_DMCFGA_DEVNUM_SHIFT 11
+#define PLX_DMCFGA_TO_DEVNUM(r) (((r) & PLX_DMCFGA_DEVNUM_MASK) >> 11)
/* Bus Number */
#define PLX_DMCFGA_BUSNUM(x) (BIT(16) * ((x) & 0xff))
#define PLX_DMCFGA_BUSNUM_MASK GENMASK(23, 16)
-#define PLX_DMCFGA_BUSNUM_SHIFT 16
+#define PLX_DMCFGA_TO_BUSNUM(r) (((r) & PLX_DMCFGA_BUSNUM_MASK) >> 16)
/* Configuration Enable */
#define PLX_DMCFGA_CONFIGEN BIT(31)
@@ -402,22 +400,22 @@ struct plx_dma_desc {
/* PCI Read Command Code For DMA */
#define PLX_CNTRL_CCRDMA(x) (BIT(0) * ((x) & 0xf))
#define PLX_CNTRL_CCRDMA_MASK GENMASK(3, 0)
-#define PLX_CNTRL_CCRDMA_SHIFT 0
+#define PLX_CNTRL_TO_CCRDMA(r) ((r) & PLX_CNTRL_CCRDMA_MASK)
#define PLX_CNTRL_CCRDMA_NORMAL PLX_CNTRL_CCRDMA(14) /* value after reset */
/* PCI Write Command Code For DMA 0 */
#define PLX_CNTRL_CCWDMA(x) (BIT(4) * ((x) & 0xf))
#define PLX_CNTRL_CCWDMA_MASK GENMASK(7, 4)
-#define PLX_CNTRL_CCWDMA_SHIFT 4
+#define PLX_CNTRL_TO_CCWDMA(r) (((r) & PLX_CNTRL_CCWDMA_MASK) >> 4)
#define PLX_CNTRL_CCWDMA_NORMAL PLX_CNTRL_CCWDMA(7) /* value after reset */
/* PCI Memory Read Command Code For Direct Master */
#define PLX_CNTRL_CCRDM(x) (BIT(8) * ((x) & 0xf))
#define PLX_CNTRL_CCRDM_MASK GENMASK(11, 8)
-#define PLX_CNTRL_CCRDM_SHIFT 8
+#define PLX_CNTRL_TO_CCRDM(r) (((r) & PLX_CNTRL_CCRDM_MASK) >> 8)
#define PLX_CNTRL_CCRDM_NORMAL PLX_CNTRL_CCRDM(6) /* value after reset */
/* PCI Memory Write Command Code For Direct Master */
#define PLX_CNTRL_CCWDM(x) (BIT(12) * ((x) & 0xf))
#define PLX_CNTRL_CCWDM_MASK GENMASK(15, 12)
-#define PLX_CNTRL_CCWDM_SHIFT 12
+#define PLX_CNTRL_TO_CCWDM(r) (((r) & PLX_CNTRL_CCWDM_MASK) >> 12)
#define PLX_CNTRL_CCWDM_NORMAL PLX_CNTRL_CCWDM(7) /* value after reset */
/* General Purpose Output (USERO) */
#define PLX_CNTRL_USERO BIT(16)
@@ -464,16 +462,15 @@ struct plx_dma_desc {
#define PLX_REG_DMAMODE1 0x0094
/* Local Bus Width */
-#define PLX_DMAMODE_WIDTH8 (BIT(0) * 0) /* 8 bits wide */
-#define PLX_DMAMODE_WIDTH16 (BIT(0) * 1) /* 16 bits wide */
-#define PLX_DMAMODE_WIDTH32 (BIT(0) * 2) /* 32 bits wide */
-#define PLX_DMAMODE_WIDTH32A (BIT(0) * 3) /* 32 bits wide */
+#define PLX_DMAMODE_WIDTH_8 (BIT(0) * 0) /* 8 bits wide */
+#define PLX_DMAMODE_WIDTH_16 (BIT(0) * 1) /* 16 bits wide */
+#define PLX_DMAMODE_WIDTH_32 (BIT(0) * 2) /* 32 bits wide */
+#define PLX_DMAMODE_WIDTH_32A (BIT(0) * 3) /* 32 bits wide */
#define PLX_DMAMODE_WIDTH_MASK GENMASK(1, 0)
-#define PLX_DMAMODE_WIDTH_SHIFT 0
/* Internal Wait States */
#define PLX_DMAMODE_IWS(x) (BIT(2) * ((x) & 0xf))
#define PLX_DMAMODE_IWS_MASK GENMASK(5, 2)
-#define PLX_DMAMODE_SHIFT 2
+#define PLX_DMAMODE_TO_IWS(r) (((r) & PLX_DMAMODE_IWS_MASK) >> 2)
/* Ready Input Enable */
#define PLX_DMAMODE_READYIEN BIT(6)
/* BTERM# Input Enable */
@@ -560,35 +557,35 @@ struct plx_dma_desc {
/* DMA Channel 0 PCI-to-Local Almost Full (divided by 2, minus 1) */
#define PLX_DMATHR_C0PLAF(x) (BIT(0) * ((x) & 0xf))
#define PLX_DMATHR_C0PLAF_MASK GENMASK(3, 0)
-#define PLX_DMATHR_C0PLAF_SHIFT 0
+#define PLX_DMATHR_TO_C0PLAF(r) ((r) & PLX_DMATHR_C0PLAF_MASK)
/* DMA Channel 0 Local-to-PCI Almost Empty (divided by 2, minus 1) */
#define PLX_DMATHR_C0LPAE(x) (BIT(4) * ((x) & 0xf))
#define PLX_DMATHR_C0LPAE_MASK GENMASK(7, 4)
-#define PLX_DMATHR_C0LPAE_SHIFT 4
+#define PLX_DMATHR_TO_C0LPAE(r) (((r) & PLX_DMATHR_C0LPAE_MASK) >> 4)
/* DMA Channel 0 Local-to-PCI Almost Full (divided by 2, minus 1) */
#define PLX_DMATHR_C0LPAF(x) (BIT(8) * ((x) & 0xf))
#define PLX_DMATHR_C0LPAF_MASK GENMASK(11, 8)
-#define PLX_DMATHR_C0LPAF_SHIFT 8
+#define PLX_DMATHR_TO_C0LPAF(r) (((r) & PLX_DMATHR_C0LPAF_MASK) >> 8)
/* DMA Channel 0 PCI-to-Local Almost Empty (divided by 2, minus 1) */
#define PLX_DMATHR_C0PLAE(x) (BIT(12) * ((x) & 0xf))
#define PLX_DMATHR_C0PLAE_MASK GENMASK(15, 12)
-#define PLX_DMATHR_C0PLAE_SHIFT 12
+#define PLX_DMATHR_TO_C0PLAE(r) (((r) & PLX_DMATHR_C0PLAE_MASK) >> 12)
/* DMA Channel 1 PCI-to-Local Almost Full (divided by 2, minus 1) */
#define PLX_DMATHR_C1PLAF(x) (BIT(16) * ((x) & 0xf))
#define PLX_DMATHR_C1PLAF_MASK GENMASK(19, 16)
-#define PLX_DMATHR_C1PLAF_SHIFT 16
+#define PLX_DMATHR_TO_C1PLAF(r) (((r) & PLX_DMATHR_C1PLAF_MASK) >> 16)
/* DMA Channel 1 Local-to-PCI Almost Empty (divided by 2, minus 1) */
#define PLX_DMATHR_C1LPAE(x) (BIT(20) * ((x) & 0xf))
#define PLX_DMATHR_C1LPAE_MASK GENMASK(23, 20)
-#define PLX_DMATHR_C1LPAE_SHIFT 20
+#define PLX_DMATHR_TO_C1LPAE(r) (((r) & PLX_DMATHR_C1LPAE_MASK) >> 20)
/* DMA Channel 1 Local-to-PCI Almost Full (divided by 2, minus 1) */
#define PLX_DMATHR_C1LPAF(x) (BIT(24) * ((x) & 0xf))
#define PLX_DMATHR_C1LPAF_MASK GENMASK(27, 24)
-#define PLX_DMATHR_C1LPAF_SHIFT 24
+#define PLX_DMATHR_TO_C1LPAF(r) (((r) & PLX_DMATHR_C1LPAF_MASK) >> 24)
/* DMA Channel 1 PCI-to-Local Almost Empty (divided by 2, minus 1) */
#define PLX_DMATHR_C1PLAE(x) (BIT(28) * ((x) & 0xf))
#define PLX_DMATHR_C1PLAE_MASK GENMASK(31, 28)
-#define PLX_DMATHR_C1PLAE_SHIFT 28
+#define PLX_DMATHR_TO_C1PLAE(r) (((r) & PLX_DMATHR_C1PLAE_MASK) >> 28)
/*
* Messaging Queue Registers OPLFIS, OPLFIM, IQP, OQP, MQCR, QBAR, IFHPR,
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 4a87b4b52400..c14a02564432 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -697,134 +697,6 @@ static void s626_reset_cap_flags(struct comedi_device *dev,
s626_debi_replace(dev, S626_LP_CRB(chan), ~S626_CRBMSK_INTCTRL, set);
}
-#ifdef unused
-/*
- * Return counter setup in a format (COUNTER_SETUP) that is consistent
- * for both A and B counters.
- */
-static uint16_t s626_get_mode_a(struct comedi_device *dev,
- unsigned int chan)
-{
- uint16_t cra;
- uint16_t crb;
- uint16_t setup;
- unsigned int cntsrc, clkmult, clkpol, encmode;
-
- /* Fetch CRA and CRB register images. */
- cra = s626_debi_read(dev, S626_LP_CRA(chan));
- crb = s626_debi_read(dev, S626_LP_CRB(chan));
-
- /*
- * Populate the standardized counter setup bit fields.
- */
- setup =
- /* LoadSrc = LoadSrcA. */
- S626_SET_STD_LOADSRC(S626_GET_CRA_LOADSRC_A(cra)) |
- /* LatchSrc = LatchSrcA. */
- S626_SET_STD_LATCHSRC(S626_GET_CRB_LATCHSRC(crb)) |
- /* IntSrc = IntSrcA. */
- S626_SET_STD_INTSRC(S626_GET_CRA_INTSRC_A(cra)) |
- /* IndxSrc = IndxSrcA. */
- S626_SET_STD_INDXSRC(S626_GET_CRA_INDXSRC_A(cra)) |
- /* IndxPol = IndxPolA. */
- S626_SET_STD_INDXPOL(S626_GET_CRA_INDXPOL_A(cra)) |
- /* ClkEnab = ClkEnabA. */
- S626_SET_STD_CLKENAB(S626_GET_CRB_CLKENAB_A(crb));
-
- /* Adjust mode-dependent parameters. */
- cntsrc = S626_GET_CRA_CNTSRC_A(cra);
- if (cntsrc & S626_CNTSRC_SYSCLK) {
- /* Timer mode (CntSrcA<1> == 1): */
- encmode = S626_ENCMODE_TIMER;
- /* Set ClkPol to indicate count direction (CntSrcA<0>). */
- clkpol = cntsrc & 1;
- /* ClkMult must be 1x in Timer mode. */
- clkmult = S626_CLKMULT_1X;
- } else {
- /* Counter mode (CntSrcA<1> == 0): */
- encmode = S626_ENCMODE_COUNTER;
- /* Pass through ClkPol. */
- clkpol = S626_GET_CRA_CLKPOL_A(cra);
- /* Force ClkMult to 1x if not legal, else pass through. */
- clkmult = S626_GET_CRA_CLKMULT_A(cra);
- if (clkmult == S626_CLKMULT_SPECIAL)
- clkmult = S626_CLKMULT_1X;
- }
- setup |= S626_SET_STD_ENCMODE(encmode) | S626_SET_STD_CLKMULT(clkmult) |
- S626_SET_STD_CLKPOL(clkpol);
-
- /* Return adjusted counter setup. */
- return setup;
-}
-
-static uint16_t s626_get_mode_b(struct comedi_device *dev,
- unsigned int chan)
-{
- uint16_t cra;
- uint16_t crb;
- uint16_t setup;
- unsigned int cntsrc, clkmult, clkpol, encmode;
-
- /* Fetch CRA and CRB register images. */
- cra = s626_debi_read(dev, S626_LP_CRA(chan));
- crb = s626_debi_read(dev, S626_LP_CRB(chan));
-
- /*
- * Populate the standardized counter setup bit fields.
- */
- setup =
- /* IntSrc = IntSrcB. */
- S626_SET_STD_INTSRC(S626_GET_CRB_INTSRC_B(crb)) |
- /* LatchSrc = LatchSrcB. */
- S626_SET_STD_LATCHSRC(S626_GET_CRB_LATCHSRC(crb)) |
- /* LoadSrc = LoadSrcB. */
- S626_SET_STD_LOADSRC(S626_GET_CRB_LOADSRC_B(crb)) |
- /* IndxPol = IndxPolB. */
- S626_SET_STD_INDXPOL(S626_GET_CRB_INDXPOL_B(crb)) |
- /* ClkEnab = ClkEnabB. */
- S626_SET_STD_CLKENAB(S626_GET_CRB_CLKENAB_B(crb)) |
- /* IndxSrc = IndxSrcB. */
- S626_SET_STD_INDXSRC(S626_GET_CRA_INDXSRC_B(cra));
-
- /* Adjust mode-dependent parameters. */
- cntsrc = S626_GET_CRA_CNTSRC_B(cra);
- clkmult = S626_GET_CRB_CLKMULT_B(crb);
- if (clkmult == S626_CLKMULT_SPECIAL) {
- /* Extender mode (ClkMultB == S626_CLKMULT_SPECIAL): */
- encmode = S626_ENCMODE_EXTENDER;
- /* Indicate multiplier is 1x. */
- clkmult = S626_CLKMULT_1X;
- /* Set ClkPol equal to Timer count direction (CntSrcB<0>). */
- clkpol = cntsrc & 1;
- } else if (cntsrc & S626_CNTSRC_SYSCLK) {
- /* Timer mode (CntSrcB<1> == 1): */
- encmode = S626_ENCMODE_TIMER;
- /* Indicate multiplier is 1x. */
- clkmult = S626_CLKMULT_1X;
- /* Set ClkPol equal to Timer count direction (CntSrcB<0>). */
- clkpol = cntsrc & 1;
- } else {
- /* If Counter mode (CntSrcB<1> == 0): */
- encmode = S626_ENCMODE_COUNTER;
- /* Clock multiplier is passed through. */
- /* Clock polarity is passed through. */
- clkpol = S626_GET_CRB_CLKPOL_B(crb);
- }
- setup |= S626_SET_STD_ENCMODE(encmode) | S626_SET_STD_CLKMULT(clkmult) |
- S626_SET_STD_CLKPOL(clkpol);
-
- /* Return adjusted counter setup. */
- return setup;
-}
-
-static uint16_t s626_get_mode(struct comedi_device *dev,
- unsigned int chan)
-{
- return (chan < 3) ? s626_get_mode_a(dev, chan)
- : s626_get_mode_b(dev, chan);
-}
-#endif
-
/*
* Set the operating mode for the specified counter. The setup
* parameter is treated as a COUNTER_SETUP data type. The following
@@ -1023,25 +895,6 @@ static void s626_set_enable(struct comedi_device *dev,
s626_debi_replace(dev, S626_LP_CRB(chan), ~mask, set);
}
-#ifdef unused
-static uint16_t s626_get_enable(struct comedi_device *dev,
- unsigned int chan)
-{
- uint16_t crb = s626_debi_read(dev, S626_LP_CRB(chan));
-
- return (chan < 3) ? S626_GET_CRB_CLKENAB_A(crb)
- : S626_GET_CRB_CLKENAB_B(crb);
-}
-#endif
-
-#ifdef unused
-static uint16_t s626_get_latch_source(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_CRB_LATCHSRC(s626_debi_read(dev, S626_LP_CRB(chan)));
-}
-#endif
-
/*
* Return/set the event that will trigger transfer of the preload
* register into the counter. 0=ThisCntr_Index, 1=ThisCntr_Overflow,
@@ -1066,19 +919,6 @@ static void s626_set_load_trig(struct comedi_device *dev,
s626_debi_replace(dev, reg, ~mask, set);
}
-#ifdef unused
-static uint16_t s626_get_load_trig(struct comedi_device *dev,
- unsigned int chan)
-{
- if (chan < 3)
- return S626_GET_CRA_LOADSRC_A(s626_debi_read(dev,
- S626_LP_CRA(chan)));
- else
- return S626_GET_CRB_LOADSRC_B(s626_debi_read(dev,
- S626_LP_CRB(chan)));
-}
-#endif
-
/*
* Return/set counter interrupt source and clear any captured
* index/overflow events. int_source: 0=Disabled, 1=OverflowOnly,
@@ -1138,93 +978,6 @@ static void s626_set_int_src(struct comedi_device *dev,
}
}
-#ifdef unused
-static uint16_t s626_get_int_src(struct comedi_device *dev,
- unsigned int chan)
-{
- if (chan < 3)
- return S626_GET_CRA_INTSRC_A(s626_debi_read(dev,
- S626_LP_CRA(chan)));
- else
- return S626_GET_CRB_INTSRC_B(s626_debi_read(dev,
- S626_LP_CRB(chan)));
-}
-#endif
-
-#ifdef unused
-/*
- * Return/set the clock multiplier.
- */
-static void s626_set_clk_mult(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
-{
- uint16_t mode;
-
- mode = s626_get_mode(dev, chan);
- mode &= ~S626_STDMSK_CLKMULT;
- mode |= S626_SET_STD_CLKMULT(value);
-
- s626_set_mode(dev, chan, mode, false);
-}
-
-/*
- * Return/set the clock polarity.
- */
-static void s626_set_clk_pol(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
-{
- uint16_t mode;
-
- mode = s626_get_mode(dev, chan);
- mode &= ~S626_STDMSK_CLKPOL;
- mode |= S626_SET_STD_CLKPOL(value);
-
- s626_set_mode(dev, chan, mode, false);
-}
-
-/*
- * Return/set the encoder mode.
- */
-static void s626_set_enc_mode(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
-{
- uint16_t mode;
-
- mode = s626_get_mode(dev, chan);
- mode &= ~S626_STDMSK_ENCMODE;
- mode |= S626_SET_STD_ENCMODE(value);
-
- s626_set_mode(dev, chan, mode, false);
-}
-
-static uint16_t s626_get_index_pol(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_STD_INDXPOL(s626_get_mode(dev, chan));
-}
-
-/*
- * Return/set the index source.
- */
-static void s626_set_index_src(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
-{
- uint16_t mode;
-
- mode = s626_get_mode(dev, chan);
- mode &= ~S626_STDMSK_INDXSRC;
- mode |= S626_SET_STD_INDXSRC(value != 0);
-
- s626_set_mode(dev, chan, mode, false);
-}
-
-static uint16_t s626_get_index_src(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_STD_INDXSRC(s626_get_mode(dev, chan));
-}
-#endif
-
/*
* Generate an index pulse.
*/
@@ -1717,43 +1470,6 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
/* End of RPS program build */
}
-#ifdef unused_code
-static int s626_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct s626_private *devpriv = dev->private;
- uint8_t i;
- int32_t *readaddr;
-
- /* Trigger ADC scan loop start */
- s626_mc_enable(dev, S626_MC2_ADC_RPS, S626_P_MC2);
-
- /* Wait until ADC scan loop is finished (RPS Signal 0 reset) */
- while (s626_mc_test(dev, S626_MC2_ADC_RPS, S626_P_MC2))
- ;
-
- /*
- * Init ptr to DMA buffer that holds new ADC data. We skip the
- * first uint16_t in the buffer because it contains junk data from
- * the final ADC of the previous poll list scan.
- */
- readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1;
-
- /*
- * Convert ADC data to 16-bit integer values and
- * copy to application buffer.
- */
- for (i = 0; i < devpriv->adc_items; i++) {
- *data = s626_ai_reg_to_uint(*readaddr++);
- data++;
- }
-
- return i;
-}
-#endif
-
static int s626_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -2500,7 +2216,8 @@ static int s626_initialize(struct comedi_device *dev)
for (i = 0; i < 2; i++) {
writel(S626_I2C_CLKSEL, dev->mmio + S626_P_I2CSTAT);
s626_mc_enable(dev, S626_MC2_UPLD_IIC, S626_P_MC2);
- ret = comedi_timeout(dev, NULL, NULL, s626_i2c_handshake_eoc, 0);
+ ret = comedi_timeout(dev, NULL,
+ NULL, s626_i2c_handshake_eoc, 0);
if (ret)
return ret;
}
diff --git a/drivers/staging/comedi/drivers/s626.h b/drivers/staging/comedi/drivers/s626.h
index 6a00a64c6f3a..4cef45263267 100644
--- a/drivers/staging/comedi/drivers/s626.h
+++ b/drivers/staging/comedi/drivers/s626.h
@@ -79,7 +79,7 @@
/* Address offsets, in DWORDS, from base of DMA buffer. */
#define S626_DAC_WDMABUF_OS S626_ADC_DMABUF_DWORDS
-/* Interrupt enable bit in ISR and IER. */
+/* Interrupt enable bit in ISR and IER. */
#define S626_IRQ_GPIO3 0x00000040 /* IRQ enable for GPIO3. */
#define S626_IRQ_RPS1 0x10000000
#define S626_ISR_AFOU 0x00000800
@@ -329,7 +329,7 @@
* WS1-WS4 = CS* outputs.
*/
-#if S626_PLATFORM == S626_INTEL /*
+#if (S626_PLATFORM == S626_INTEL) /*
* Base ACON1 config: always run
* A1 based on TSL1.
*/
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 10f94ec34536..608403c7586b 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -946,10 +946,8 @@ static int usbduxfast_auto_attach(struct comedi_device *dev,
}
devpriv->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!devpriv->urb) {
- dev_err(dev->class_dev, "Could not alloc. urb\n");
+ if (!devpriv->urb)
return -ENOMEM;
- }
devpriv->inbuf = kmalloc(SIZEINBUF, GFP_KERNEL);
if (!devpriv->inbuf)
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 8c7393ef762d..a004aed0147a 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -177,7 +177,7 @@ static void vmk80xx_do_bulk_msg(struct comedi_device *dev)
* The max packet size attributes of the K8061
* input/output endpoints are identical
*/
- size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
+ size = usb_endpoint_maxp(devpriv->ep_tx);
usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf,
size, NULL, devpriv->ep_tx->bInterval);
@@ -199,7 +199,7 @@ static int vmk80xx_read_packet(struct comedi_device *dev)
ep = devpriv->ep_rx;
pipe = usb_rcvintpipe(usb, ep->bEndpointAddress);
return usb_interrupt_msg(usb, pipe, devpriv->usb_rx_buf,
- le16_to_cpu(ep->wMaxPacketSize), NULL,
+ usb_endpoint_maxp(ep), NULL,
HZ * 10);
}
@@ -220,7 +220,7 @@ static int vmk80xx_write_packet(struct comedi_device *dev, int cmd)
ep = devpriv->ep_tx;
pipe = usb_sndintpipe(usb, ep->bEndpointAddress);
return usb_interrupt_msg(usb, pipe, devpriv->usb_tx_buf,
- le16_to_cpu(ep->wMaxPacketSize), NULL,
+ usb_endpoint_maxp(ep), NULL,
HZ * 10);
}
@@ -230,7 +230,7 @@ static int vmk80xx_reset_device(struct comedi_device *dev)
size_t size;
int retval;
- size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
+ size = usb_endpoint_maxp(devpriv->ep_tx);
memset(devpriv->usb_tx_buf, 0, size);
retval = vmk80xx_write_packet(dev, VMK8055_CMD_RST);
if (retval)
@@ -684,12 +684,12 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
size_t size;
- size = le16_to_cpu(devpriv->ep_rx->wMaxPacketSize);
+ size = usb_endpoint_maxp(devpriv->ep_rx);
devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_rx_buf)
return -ENOMEM;
- size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
+ size = usb_endpoint_maxp(devpriv->ep_tx);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_tx_buf) {
kfree(devpriv->usb_rx_buf);
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index 46c050cc7dbe..aedca66cbe41 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -26,56 +26,6 @@
#include "dgnc_cls.h"
#include "dgnc_tty.h"
-static inline void cls_parse_isr(struct dgnc_board *brd, uint port);
-static inline void cls_clear_break(struct channel_t *ch, int force);
-static inline void cls_set_cts_flow_control(struct channel_t *ch);
-static inline void cls_set_rts_flow_control(struct channel_t *ch);
-static inline void cls_set_ixon_flow_control(struct channel_t *ch);
-static inline void cls_set_ixoff_flow_control(struct channel_t *ch);
-static inline void cls_set_no_output_flow_control(struct channel_t *ch);
-static inline void cls_set_no_input_flow_control(struct channel_t *ch);
-static void cls_parse_modem(struct channel_t *ch, unsigned char signals);
-static void cls_tasklet(unsigned long data);
-static void cls_vpd(struct dgnc_board *brd);
-static void cls_uart_init(struct channel_t *ch);
-static void cls_uart_off(struct channel_t *ch);
-static int cls_drain(struct tty_struct *tty, uint seconds);
-static void cls_param(struct tty_struct *tty);
-static void cls_assert_modem_signals(struct channel_t *ch);
-static void cls_flush_uart_write(struct channel_t *ch);
-static void cls_flush_uart_read(struct channel_t *ch);
-static void cls_disable_receiver(struct channel_t *ch);
-static void cls_enable_receiver(struct channel_t *ch);
-static void cls_send_break(struct channel_t *ch, int msecs);
-static void cls_send_start_character(struct channel_t *ch);
-static void cls_send_stop_character(struct channel_t *ch);
-static void cls_copy_data_from_uart_to_queue(struct channel_t *ch);
-static void cls_copy_data_from_queue_to_uart(struct channel_t *ch);
-static uint cls_get_uart_bytes_left(struct channel_t *ch);
-static void cls_send_immediate_char(struct channel_t *ch, unsigned char);
-static irqreturn_t cls_intr(int irq, void *voidbrd);
-
-struct board_ops dgnc_cls_ops = {
- .tasklet = cls_tasklet,
- .intr = cls_intr,
- .uart_init = cls_uart_init,
- .uart_off = cls_uart_off,
- .drain = cls_drain,
- .param = cls_param,
- .vpd = cls_vpd,
- .assert_modem_signals = cls_assert_modem_signals,
- .flush_uart_write = cls_flush_uart_write,
- .flush_uart_read = cls_flush_uart_read,
- .disable_receiver = cls_disable_receiver,
- .enable_receiver = cls_enable_receiver,
- .send_break = cls_send_break,
- .send_start_character = cls_send_start_character,
- .send_stop_character = cls_send_stop_character,
- .copy_data_from_queue_to_uart = cls_copy_data_from_queue_to_uart,
- .get_uart_bytes_left = cls_get_uart_bytes_left,
- .send_immediate_char = cls_send_immediate_char
-};
-
static inline void cls_set_cts_flow_control(struct channel_t *ch)
{
unsigned char lcrb = readb(&ch->ch_cls_uart->lcr);
@@ -357,6 +307,253 @@ static inline void cls_clear_break(struct channel_t *ch, int force)
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
+static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
+{
+ int qleft = 0;
+ unsigned char linestatus = 0;
+ unsigned char error_mask = 0;
+ ushort head;
+ ushort tail;
+ unsigned long flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ spin_lock_irqsave(&ch->ch_lock, flags);
+
+ /* cache head and tail of queue */
+ head = ch->ch_r_head;
+ tail = ch->ch_r_tail;
+
+ /* Store how much space we have left in the queue */
+ qleft = tail - head - 1;
+ if (qleft < 0)
+ qleft += RQUEUEMASK + 1;
+
+ /*
+ * Create a mask to determine whether we should
+ * insert the character (if any) into our queue.
+ */
+ if (ch->ch_c_iflag & IGNBRK)
+ error_mask |= UART_LSR_BI;
+
+ while (1) {
+ linestatus = readb(&ch->ch_cls_uart->lsr);
+
+ if (!(linestatus & (UART_LSR_DR)))
+ break;
+
+ /*
+ * Discard character if we are ignoring the error mask.
+ */
+ if (linestatus & error_mask) {
+ linestatus = 0;
+ readb(&ch->ch_cls_uart->txrx);
+ continue;
+ }
+
+ /*
+ * If our queue is full, we have no choice but to drop some
+ * data. The assumption is that HWFLOW or SWFLOW should have
+ * stopped things way way before we got to this point.
+ *
+ * I decided that I wanted to ditch the oldest data first,
+ * I hope thats okay with everyone? Yes? Good.
+ */
+ while (qleft < 1) {
+ tail = (tail + 1) & RQUEUEMASK;
+ ch->ch_r_tail = tail;
+ ch->ch_err_overrun++;
+ qleft++;
+ }
+
+ ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE
+ | UART_LSR_FE);
+ ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx);
+
+ qleft--;
+
+ if (ch->ch_equeue[head] & UART_LSR_PE)
+ ch->ch_err_parity++;
+ if (ch->ch_equeue[head] & UART_LSR_BI)
+ ch->ch_err_break++;
+ if (ch->ch_equeue[head] & UART_LSR_FE)
+ ch->ch_err_frame++;
+
+ /* Add to, and flip head if needed */
+ head = (head + 1) & RQUEUEMASK;
+ ch->ch_rxcount++;
+ }
+
+ /*
+ * Write new final heads to channel structure.
+ */
+ ch->ch_r_head = head & RQUEUEMASK;
+ ch->ch_e_head = head & EQUEUEMASK;
+
+ spin_unlock_irqrestore(&ch->ch_lock, flags);
+}
+
+/* Make the UART raise any of the output signals we want up */
+static void cls_assert_modem_signals(struct channel_t *ch)
+{
+ unsigned char out;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ out = ch->ch_mostat;
+
+ if (ch->ch_flags & CH_LOOPBACK)
+ out |= UART_MCR_LOOP;
+
+ writeb(out, &ch->ch_cls_uart->mcr);
+
+ /* Give time for the UART to actually drop the signals */
+ udelay(10);
+}
+
+static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
+{
+ ushort head;
+ ushort tail;
+ int n;
+ int qlen;
+ uint len_written = 0;
+ unsigned long flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ spin_lock_irqsave(&ch->ch_lock, flags);
+
+ /* No data to write to the UART */
+ if (ch->ch_w_tail == ch->ch_w_head)
+ goto exit_unlock;
+
+ /* If port is "stopped", don't send any data to the UART */
+ if ((ch->ch_flags & CH_FORCED_STOP) ||
+ (ch->ch_flags & CH_BREAK_SENDING))
+ goto exit_unlock;
+
+ if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
+ goto exit_unlock;
+
+ n = 32;
+
+ /* cache head and tail of queue */
+ head = ch->ch_w_head & WQUEUEMASK;
+ tail = ch->ch_w_tail & WQUEUEMASK;
+ qlen = (head - tail) & WQUEUEMASK;
+
+ /* Find minimum of the FIFO space, versus queue length */
+ n = min(n, qlen);
+
+ while (n > 0) {
+ /*
+ * If RTS Toggle mode is on, turn on RTS now if not already set,
+ * and make sure we get an event when the data transfer has
+ * completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_RTS)) {
+ ch->ch_mostat |= (UART_MCR_RTS);
+ cls_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+
+ /*
+ * If DTR Toggle mode is on, turn on DTR now if not already set,
+ * and make sure we get an event when the data transfer has
+ * completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_DTR)) {
+ ch->ch_mostat |= (UART_MCR_DTR);
+ cls_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+ writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_cls_uart->txrx);
+ ch->ch_w_tail++;
+ ch->ch_w_tail &= WQUEUEMASK;
+ ch->ch_txcount++;
+ len_written++;
+ n--;
+ }
+
+ if (len_written > 0)
+ ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+
+exit_unlock:
+ spin_unlock_irqrestore(&ch->ch_lock, flags);
+}
+
+static void cls_parse_modem(struct channel_t *ch, unsigned char signals)
+{
+ unsigned char msignals = signals;
+ unsigned long flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Do altpin switching. Altpin switches DCD and DSR.
+ * This prolly breaks DSRPACE, so we should be more clever here.
+ */
+ spin_lock_irqsave(&ch->ch_lock, flags);
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
+ unsigned char mswap = signals;
+
+ if (mswap & UART_MSR_DDCD) {
+ msignals &= ~UART_MSR_DDCD;
+ msignals |= UART_MSR_DDSR;
+ }
+ if (mswap & UART_MSR_DDSR) {
+ msignals &= ~UART_MSR_DDSR;
+ msignals |= UART_MSR_DDCD;
+ }
+ if (mswap & UART_MSR_DCD) {
+ msignals &= ~UART_MSR_DCD;
+ msignals |= UART_MSR_DSR;
+ }
+ if (mswap & UART_MSR_DSR) {
+ msignals &= ~UART_MSR_DSR;
+ msignals |= UART_MSR_DCD;
+ }
+ }
+ spin_unlock_irqrestore(&ch->ch_lock, flags);
+
+ /*
+ * Scrub off lower bits. They signify delta's, which I don't
+ * care about
+ */
+ signals &= 0xf0;
+
+ spin_lock_irqsave(&ch->ch_lock, flags);
+ if (msignals & UART_MSR_DCD)
+ ch->ch_mistat |= UART_MSR_DCD;
+ else
+ ch->ch_mistat &= ~UART_MSR_DCD;
+
+ if (msignals & UART_MSR_DSR)
+ ch->ch_mistat |= UART_MSR_DSR;
+ else
+ ch->ch_mistat &= ~UART_MSR_DSR;
+
+ if (msignals & UART_MSR_RI)
+ ch->ch_mistat |= UART_MSR_RI;
+ else
+ ch->ch_mistat &= ~UART_MSR_RI;
+
+ if (msignals & UART_MSR_CTS)
+ ch->ch_mistat |= UART_MSR_CTS;
+ else
+ ch->ch_mistat &= ~UART_MSR_CTS;
+ spin_unlock_irqrestore(&ch->ch_lock, flags);
+}
+
/* Parse the ISR register for the specific port */
static inline void cls_parse_isr(struct dgnc_board *brd, uint port)
{
@@ -387,8 +584,6 @@ static inline void cls_parse_isr(struct dgnc_board *brd, uint port)
/* Receive Interrupt pending */
if (isr & (UART_IIR_RDI | UART_IIR_RDI_TIMEOUT)) {
/* Read data from uart -> queue */
- brd->intr_rx++;
- ch->ch_intr_rx++;
cls_copy_data_from_uart_to_queue(ch);
dgnc_check_queue_flow_control(ch);
}
@@ -398,27 +593,48 @@ static inline void cls_parse_isr(struct dgnc_board *brd, uint port)
/* Transfer data (if any) from Write Queue -> UART. */
spin_lock_irqsave(&ch->ch_lock, flags);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- brd->intr_tx++;
- ch->ch_intr_tx++;
spin_unlock_irqrestore(&ch->ch_lock, flags);
cls_copy_data_from_queue_to_uart(ch);
}
- /* CTS/RTS change of state */
- if (isr & UART_IIR_CTSRTS) {
- brd->intr_modem++;
- ch->ch_intr_modem++;
- /*
- * Don't need to do anything, the cls_parse_modem
- * below will grab the updated modem signals.
- */
- }
-
/* Parse any modem signal changes */
cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
}
}
+/* Channel lock MUST be held before calling this function! */
+static void cls_flush_uart_write(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
+ &ch->ch_cls_uart->isr_fcr);
+ usleep_range(10, 20);
+
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+}
+
+/* Channel lock MUST be held before calling this function! */
+static void cls_flush_uart_read(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * For complete POSIX compatibility, we should be purging the
+ * read FIFO in the UART here.
+ *
+ * However, clearing the read FIFO (UART_FCR_CLEAR_RCVR) also
+ * incorrectly flushes write data as well as just basically trashing the
+ * FIFO.
+ *
+ * Presumably, this is a bug in this UART.
+ */
+
+ udelay(10);
+}
+
/*
* cls_param()
* Send any/all changes to the line to the UART.
@@ -760,8 +976,6 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
spin_lock_irqsave(&brd->bd_intr_lock, flags);
- brd->intr_count++;
-
/*
* Check the board's global interrupt offset to see if we
* we actually do have an interrupt pending for us.
@@ -804,93 +1018,6 @@ static void cls_enable_receiver(struct channel_t *ch)
writeb(tmp, &ch->ch_cls_uart->ier);
}
-static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
-{
- int qleft = 0;
- unsigned char linestatus = 0;
- unsigned char error_mask = 0;
- ushort head;
- ushort tail;
- unsigned long flags;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* cache head and tail of queue */
- head = ch->ch_r_head;
- tail = ch->ch_r_tail;
-
- /* Store how much space we have left in the queue */
- qleft = tail - head - 1;
- if (qleft < 0)
- qleft += RQUEUEMASK + 1;
-
- /*
- * Create a mask to determine whether we should
- * insert the character (if any) into our queue.
- */
- if (ch->ch_c_iflag & IGNBRK)
- error_mask |= UART_LSR_BI;
-
- while (1) {
- linestatus = readb(&ch->ch_cls_uart->lsr);
-
- if (!(linestatus & (UART_LSR_DR)))
- break;
-
- /*
- * Discard character if we are ignoring the error mask.
- */
- if (linestatus & error_mask) {
- linestatus = 0;
- readb(&ch->ch_cls_uart->txrx);
- continue;
- }
-
- /*
- * If our queue is full, we have no choice but to drop some
- * data. The assumption is that HWFLOW or SWFLOW should have
- * stopped things way way before we got to this point.
- *
- * I decided that I wanted to ditch the oldest data first,
- * I hope thats okay with everyone? Yes? Good.
- */
- while (qleft < 1) {
- tail = (tail + 1) & RQUEUEMASK;
- ch->ch_r_tail = tail;
- ch->ch_err_overrun++;
- qleft++;
- }
-
- ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE
- | UART_LSR_FE);
- ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx);
-
- qleft--;
-
- if (ch->ch_equeue[head] & UART_LSR_PE)
- ch->ch_err_parity++;
- if (ch->ch_equeue[head] & UART_LSR_BI)
- ch->ch_err_break++;
- if (ch->ch_equeue[head] & UART_LSR_FE)
- ch->ch_err_frame++;
-
- /* Add to, and flip head if needed */
- head = (head + 1) & RQUEUEMASK;
- ch->ch_rxcount++;
- }
-
- /*
- * Write new final heads to channel structure.
- */
- ch->ch_r_head = head & RQUEUEMASK;
- ch->ch_e_head = head & EQUEUEMASK;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
/*
* This function basically goes to sleep for secs, or until
* it gets signalled that the port has fully drained.
@@ -926,199 +1053,6 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
((un->un_flags & UN_EMPTY) == 0));
}
-/* Channel lock MUST be held before calling this function! */
-static void cls_flush_uart_write(struct channel_t *ch)
-{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
- &ch->ch_cls_uart->isr_fcr);
- usleep_range(10, 20);
-
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
-}
-
-/* Channel lock MUST be held before calling this function! */
-static void cls_flush_uart_read(struct channel_t *ch)
-{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- /*
- * For complete POSIX compatibility, we should be purging the
- * read FIFO in the UART here.
- *
- * However, clearing the read FIFO (UART_FCR_CLEAR_RCVR) also
- * incorrectly flushes write data as well as just basically trashing the
- * FIFO.
- *
- * Presumably, this is a bug in this UART.
- */
-
- udelay(10);
-}
-
-static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
-{
- ushort head;
- ushort tail;
- int n;
- int qlen;
- uint len_written = 0;
- unsigned long flags;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- /* No data to write to the UART */
- if (ch->ch_w_tail == ch->ch_w_head)
- goto exit_unlock;
-
- /* If port is "stopped", don't send any data to the UART */
- if ((ch->ch_flags & CH_FORCED_STOP) ||
- (ch->ch_flags & CH_BREAK_SENDING))
- goto exit_unlock;
-
- if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
- goto exit_unlock;
-
- n = 32;
-
- /* cache head and tail of queue */
- head = ch->ch_w_head & WQUEUEMASK;
- tail = ch->ch_w_tail & WQUEUEMASK;
- qlen = (head - tail) & WQUEUEMASK;
-
- /* Find minimum of the FIFO space, versus queue length */
- n = min(n, qlen);
-
- while (n > 0) {
- /*
- * If RTS Toggle mode is on, turn on RTS now if not already set,
- * and make sure we get an event when the data transfer has
- * completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_RTS)) {
- ch->ch_mostat |= (UART_MCR_RTS);
- cls_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
-
- /*
- * If DTR Toggle mode is on, turn on DTR now if not already set,
- * and make sure we get an event when the data transfer has
- * completed.
- */
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
- if (!(ch->ch_mostat & UART_MCR_DTR)) {
- ch->ch_mostat |= (UART_MCR_DTR);
- cls_assert_modem_signals(ch);
- }
- ch->ch_tun.un_flags |= (UN_EMPTY);
- }
- writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_cls_uart->txrx);
- ch->ch_w_tail++;
- ch->ch_w_tail &= WQUEUEMASK;
- ch->ch_txcount++;
- len_written++;
- n--;
- }
-
- if (len_written > 0)
- ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
-
-exit_unlock:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-static void cls_parse_modem(struct channel_t *ch, unsigned char signals)
-{
- unsigned char msignals = signals;
- unsigned long flags;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- /*
- * Do altpin switching. Altpin switches DCD and DSR.
- * This prolly breaks DSRPACE, so we should be more clever here.
- */
- spin_lock_irqsave(&ch->ch_lock, flags);
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
- unsigned char mswap = signals;
-
- if (mswap & UART_MSR_DDCD) {
- msignals &= ~UART_MSR_DDCD;
- msignals |= UART_MSR_DDSR;
- }
- if (mswap & UART_MSR_DDSR) {
- msignals &= ~UART_MSR_DDSR;
- msignals |= UART_MSR_DDCD;
- }
- if (mswap & UART_MSR_DCD) {
- msignals &= ~UART_MSR_DCD;
- msignals |= UART_MSR_DSR;
- }
- if (mswap & UART_MSR_DSR) {
- msignals &= ~UART_MSR_DSR;
- msignals |= UART_MSR_DCD;
- }
- }
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- /*
- * Scrub off lower bits. They signify delta's, which I don't
- * care about
- */
- signals &= 0xf0;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
- if (msignals & UART_MSR_DCD)
- ch->ch_mistat |= UART_MSR_DCD;
- else
- ch->ch_mistat &= ~UART_MSR_DCD;
-
- if (msignals & UART_MSR_DSR)
- ch->ch_mistat |= UART_MSR_DSR;
- else
- ch->ch_mistat &= ~UART_MSR_DSR;
-
- if (msignals & UART_MSR_RI)
- ch->ch_mistat |= UART_MSR_RI;
- else
- ch->ch_mistat &= ~UART_MSR_RI;
-
- if (msignals & UART_MSR_CTS)
- ch->ch_mistat |= UART_MSR_CTS;
- else
- ch->ch_mistat &= ~UART_MSR_CTS;
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-}
-
-/* Make the UART raise any of the output signals we want up */
-static void cls_assert_modem_signals(struct channel_t *ch)
-{
- unsigned char out;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return;
-
- out = ch->ch_mostat;
-
- if (ch->ch_flags & CH_LOOPBACK)
- out |= UART_MCR_LOOP;
-
- writeb(out, &ch->ch_cls_uart->mcr);
-
- /* Give time for the UART to actually drop the signals */
- udelay(10);
-}
-
static void cls_send_start_character(struct channel_t *ch)
{
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
@@ -1298,3 +1232,24 @@ static void cls_vpd(struct dgnc_board *brd)
iounmap(re_map_vpdbase);
}
+
+struct board_ops dgnc_cls_ops = {
+ .tasklet = cls_tasklet,
+ .intr = cls_intr,
+ .uart_init = cls_uart_init,
+ .uart_off = cls_uart_off,
+ .drain = cls_drain,
+ .param = cls_param,
+ .vpd = cls_vpd,
+ .assert_modem_signals = cls_assert_modem_signals,
+ .flush_uart_write = cls_flush_uart_write,
+ .flush_uart_read = cls_flush_uart_read,
+ .disable_receiver = cls_disable_receiver,
+ .enable_receiver = cls_enable_receiver,
+ .send_break = cls_send_break,
+ .send_start_character = cls_send_start_character,
+ .send_stop_character = cls_send_stop_character,
+ .copy_data_from_queue_to_uart = cls_copy_data_from_queue_to_uart,
+ .get_uart_bytes_left = cls_get_uart_bytes_left,
+ .send_immediate_char = cls_send_immediate_char
+};
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index af2e835efa1b..fd372d3afa46 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -37,13 +37,14 @@ MODULE_SUPPORTED_DEVICE("dgnc");
*
*/
static int dgnc_start(void);
-static int dgnc_finalize_board_init(struct dgnc_board *brd);
-static int dgnc_found_board(struct pci_dev *pdev, int id);
+static int dgnc_request_irq(struct dgnc_board *brd);
+static void dgnc_free_irq(struct dgnc_board *brd);
+static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id);
static void dgnc_cleanup_board(struct dgnc_board *brd);
static void dgnc_poll_handler(ulong dummy);
static int dgnc_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
-static void dgnc_do_remap(struct dgnc_board *brd);
+static int dgnc_do_remap(struct dgnc_board *brd);
/*
* File operations permitted on Control/Management major.
@@ -146,7 +147,7 @@ static void cleanup(bool sysfiles)
for (i = 0; i < dgnc_num_boards; ++i) {
dgnc_remove_ports_sysfiles(dgnc_board[i]);
- dgnc_tty_uninit(dgnc_board[i]);
+ dgnc_cleanup_tty(dgnc_board[i]);
dgnc_cleanup_board(dgnc_board[i]);
}
@@ -158,7 +159,7 @@ static void cleanup(bool sysfiles)
*
* Module unload. This is where it all ends.
*/
-static void dgnc_cleanup_module(void)
+static void __exit dgnc_cleanup_module(void)
{
cleanup(true);
pci_unregister_driver(&dgnc_driver);
@@ -274,6 +275,7 @@ failed_class:
static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
+ struct dgnc_board *brd;
/* wake up and enable device */
rc = pci_enable_device(pdev);
@@ -281,9 +283,48 @@ static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return -EIO;
- rc = dgnc_found_board(pdev, ent->driver_data);
- if (rc == 0)
- dgnc_num_boards++;
+ brd = dgnc_found_board(pdev, ent->driver_data);
+ if (IS_ERR(brd))
+ return PTR_ERR(brd);
+
+ /*
+ * Do tty device initialization.
+ */
+
+ rc = dgnc_tty_register(brd);
+ if (rc < 0) {
+ pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
+ goto failed;
+ }
+
+ rc = dgnc_request_irq(brd);
+ if (rc < 0) {
+ pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
+ goto unregister_tty;
+ }
+
+ rc = dgnc_tty_init(brd);
+ if (rc < 0) {
+ pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
+ goto free_irq;
+ }
+
+ brd->state = BOARD_READY;
+ brd->dpastatus = BD_RUNNING;
+
+ dgnc_create_ports_sysfiles(brd);
+
+ dgnc_board[dgnc_num_boards++] = brd;
+
+ return 0;
+
+free_irq:
+ dgnc_free_irq(brd);
+unregister_tty:
+ dgnc_tty_unregister(brd);
+
+failed:
+ kfree(brd);
return rc;
}
@@ -324,17 +365,6 @@ static void dgnc_cleanup_board(struct dgnc_board *brd)
brd->re_map_membase = NULL;
}
- if (brd->msgbuf_head) {
- unsigned long flags;
-
- spin_lock_irqsave(&dgnc_global_lock, flags);
- brd->msgbuf = NULL;
- dev_dbg(&brd->pdev->dev, "%s\n", brd->msgbuf_head);
- kfree(brd->msgbuf_head);
- brd->msgbuf_head = NULL;
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
- }
-
/* Free all allocated channels structs */
for (i = 0; i < MAXPORTS ; i++) {
if (brd->channels[i]) {
@@ -356,29 +386,17 @@ static void dgnc_cleanup_board(struct dgnc_board *brd)
*
* A board has been found, init it.
*/
-static int dgnc_found_board(struct pci_dev *pdev, int id)
+static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
{
struct dgnc_board *brd;
unsigned int pci_irq;
int i = 0;
int rc = 0;
- unsigned long flags;
/* get the board structure and prep it */
- dgnc_board[dgnc_num_boards] = kzalloc(sizeof(*brd), GFP_KERNEL);
- brd = dgnc_board[dgnc_num_boards];
-
+ brd = kzalloc(sizeof(*brd), GFP_KERNEL);
if (!brd)
- return -ENOMEM;
-
- /* make a temporary message buffer for the boot messages */
- brd->msgbuf_head = kcalloc(8192, sizeof(u8), GFP_KERNEL);
- brd->msgbuf = brd->msgbuf_head;
-
- if (!brd->msgbuf) {
- kfree(brd);
- return -ENOMEM;
- }
+ return ERR_PTR(-ENOMEM);
/* store the info for the board we've found */
brd->magic = DGNC_BOARD_MAGIC;
@@ -400,9 +418,6 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
brd->state = BOARD_FOUND;
- for (i = 0; i < MAXPORTS; i++)
- brd->channels[i] = NULL;
-
/* store which card & revision we have */
pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
@@ -435,7 +450,8 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
if (!brd->membase) {
dev_err(&brd->pdev->dev,
"Card has no PCI IO resources, failing.\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto failed;
}
brd->membase_end = pci_resource_end(pdev, 4);
@@ -455,7 +471,10 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
brd->bd_uart_offset = 0x8;
brd->bd_dividend = 921600;
- dgnc_do_remap(brd);
+ rc = dgnc_do_remap(brd);
+
+ if (rc < 0)
+ goto failed;
/* Get and store the board VPD, if it exists */
brd->bd_ops->vpd(brd);
@@ -507,81 +526,45 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
brd->bd_uart_offset = 0x200;
brd->bd_dividend = 921600;
- dgnc_do_remap(brd);
+ rc = dgnc_do_remap(brd);
- if (brd->re_map_membase) {
- /* Read and store the dvid after remapping */
- brd->dvid = readb(brd->re_map_membase + 0x8D);
+ if (rc < 0)
+ goto failed;
+
+ /* Read and store the dvid after remapping */
+ brd->dvid = readb(brd->re_map_membase + 0x8D);
+
+ /* Get and store the board VPD, if it exists */
+ brd->bd_ops->vpd(brd);
- /* Get and store the board VPD, if it exists */
- brd->bd_ops->vpd(brd);
- }
break;
default:
dev_err(&brd->pdev->dev,
"Didn't find any compatible Neo/Classic PCI boards.\n");
- return -ENXIO;
- }
-
- /*
- * Do tty device initialization.
- */
-
- rc = dgnc_tty_register(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
- goto failed;
- }
-
- rc = dgnc_finalize_board_init(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
- goto failed;
- }
-
- rc = dgnc_tty_init(brd);
- if (rc < 0) {
- pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
+ rc = -ENXIO;
goto failed;
}
- brd->state = BOARD_READY;
- brd->dpastatus = BD_RUNNING;
-
- dgnc_create_ports_sysfiles(brd);
-
/* init our poll helper tasklet */
tasklet_init(&brd->helper_tasklet,
brd->bd_ops->tasklet,
(unsigned long)brd);
- spin_lock_irqsave(&dgnc_global_lock, flags);
- brd->msgbuf = NULL;
- dev_dbg(&brd->pdev->dev, "%s\n", brd->msgbuf_head);
- kfree(brd->msgbuf_head);
- brd->msgbuf_head = NULL;
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
-
wake_up_interruptible(&brd->state_wait);
- return 0;
+ return brd;
failed:
- dgnc_tty_uninit(brd);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
+ kfree(brd);
- return -ENXIO;
+ return ERR_PTR(rc);
}
-static int dgnc_finalize_board_init(struct dgnc_board *brd)
+static int dgnc_request_irq(struct dgnc_board *brd)
{
int rc = 0;
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return -ENODEV;
-
if (brd->irq) {
rc = request_irq(brd->irq, brd->bd_ops->intr,
IRQF_SHARED, "DGNC", brd);
@@ -597,42 +580,51 @@ static int dgnc_finalize_board_init(struct dgnc_board *brd)
return rc;
}
+static void dgnc_free_irq(struct dgnc_board *brd)
+{
+ if (brd->irq)
+ free_irq(brd->irq, brd);
+}
+
/*
* Remap PCI memory.
*/
-static void dgnc_do_remap(struct dgnc_board *brd)
+static int dgnc_do_remap(struct dgnc_board *brd)
{
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return;
+ int rc = 0;
brd->re_map_membase = ioremap(brd->membase, 0x1000);
+ if (!brd->re_map_membase)
+ rc = -ENOMEM;
+
+ return rc;
}
-/*****************************************************************************
-*
-* Function:
-*
-* dgnc_poll_handler
-*
-* Author:
-*
-* Scott H Kilau
-*
-* Parameters:
-*
-* dummy -- ignored
-*
-* Return Values:
-*
-* none
-*
-* Description:
-*
-* As each timer expires, it determines (a) whether the "transmit"
-* waiter needs to be woken up, and (b) whether the poller needs to
-* be rescheduled.
-*
-******************************************************************************/
+/*
+ *
+ * Function:
+ *
+ * dgnc_poll_handler
+ *
+ * Author:
+ *
+ * Scott H Kilau
+ *
+ * Parameters:
+ *
+ * dummy -- ignored
+ *
+ * Return Values:
+ *
+ * none
+ *
+ * Description:
+ *
+ * As each timer expires, it determines (a) whether the "transmit"
+ * waiter needs to be woken up, and (b) whether the poller needs to
+ * be rescheduled.
+ *
+ */
static void dgnc_poll_handler(ulong dummy)
{
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index 95ec729fae38..879202663a98 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -183,10 +183,6 @@ struct dgnc_board {
uint nasync; /* Number of ports on card */
uint irq; /* Interrupt request number */
- ulong intr_count; /* Count of interrupts */
- ulong intr_modem; /* Count of interrupts */
- ulong intr_tx; /* Count of interrupts */
- ulong intr_rx; /* Count of interrupts */
ulong membase; /* Start of base memory of the card */
ulong membase_end; /* End of base memory of the card */
@@ -207,9 +203,6 @@ struct dgnc_board {
struct tty_driver *print_driver;
char print_name[200];
- bool dgnc_major_serial_registered;
- bool dgnc_major_transparent_print_registered;
-
u16 dpatype; /* The board "type",
* as defined by DPA
*/
@@ -217,12 +210,6 @@ struct dgnc_board {
* as defined by DPA
*/
- /*
- * Mgmt data.
- */
- char *msgbuf_head;
- char *msgbuf;
-
uint bd_dividend; /* Board/UARTs specific dividend */
struct board_ops *bd_ops;
@@ -381,10 +368,6 @@ struct channel_t {
ulong ch_xon_sends; /* Count of xons transmitted */
ulong ch_xoff_sends; /* Count of xoffs transmitted */
- ulong ch_intr_modem; /* Count of interrupts */
- ulong ch_intr_tx; /* Count of interrupts */
- ulong ch_intr_rx; /* Count of interrupts */
-
/* /proc/<board>/<channel> entries */
struct proc_dir_entry *proc_entry_pointer;
struct dgnc_proc_entry *dgnc_channel_table;
@@ -398,7 +381,7 @@ extern uint dgnc_major; /* Our driver/mgmt major */
extern int dgnc_poll_tick; /* Poll interval - 20 ms */
extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */
-extern uint dgnc_num_boards; /* Total number of boards */
+extern uint dgnc_num_boards; /* Total number of boards */
extern struct dgnc_board *dgnc_board[MAXBOARDS]; /* Array of board
* structs
*/
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index ba57e9546f72..5becb3741b67 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -107,7 +107,9 @@ static inline void neo_set_cts_flow_control(struct channel_t *ch)
/* Turn off auto Xon flow control */
efr &= ~UART_17158_EFR_IXON;
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ /* Why? Because Exar's spec says we have to zero it
+ * out before setting it
+ */
writeb(0, &ch->ch_neo_uart->efr);
/* Turn on UART enhanced bits */
@@ -143,7 +145,9 @@ static inline void neo_set_rts_flow_control(struct channel_t *ch)
ier &= ~UART_17158_IER_XOFF;
efr &= ~UART_17158_EFR_IXOFF;
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ /* Why? Because Exar's spec says we have to zero it
+ * out before setting it
+ */
writeb(0, &ch->ch_neo_uart->efr);
/* Turn on UART enhanced bits */
@@ -181,7 +185,9 @@ static inline void neo_set_ixon_flow_control(struct channel_t *ch)
/* Turn on auto Xon flow control */
efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON);
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ /* Why? Because Exar's spec says we have to zero it
+ * out before setting it
+ */
writeb(0, &ch->ch_neo_uart->efr);
/* Turn on UART enhanced bits */
@@ -219,7 +225,9 @@ static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
ier |= UART_17158_IER_XOFF;
efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ /* Why? Because Exar's spec says we have to zero it
+ * out before setting it
+ */
writeb(0, &ch->ch_neo_uart->efr);
/* Turn on UART enhanced bits */
@@ -260,7 +268,9 @@ static inline void neo_set_no_input_flow_control(struct channel_t *ch)
else
efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ /* Why? Because Exar's spec says we have to zero
+ * it out before setting it
+ */
writeb(0, &ch->ch_neo_uart->efr);
/* Turn on UART enhanced bits */
@@ -298,7 +308,9 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
else
efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON);
- /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ /* Why? Because Exar's spec says we have to zero it
+ * out before setting it
+ */
writeb(0, &ch->ch_neo_uart->efr);
/* Turn on UART enhanced bits */
@@ -399,19 +411,17 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) {
/* Read data from uart -> queue */
- brd->intr_rx++;
- ch->ch_intr_rx++;
neo_copy_data_from_uart_to_queue(ch);
- /* Call our tty layer to enforce queue flow control if needed. */
+ /* Call our tty layer to enforce queue
+ * flow control if needed.
+ */
spin_lock_irqsave(&ch->ch_lock, flags);
dgnc_check_queue_flow_control(ch);
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
if (isr & UART_IIR_THRI) {
- brd->intr_tx++;
- ch->ch_intr_tx++;
/* Transfer data (if any) from Write Queue -> UART. */
spin_lock_irqsave(&ch->ch_lock, flags);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
@@ -428,7 +438,9 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
* one it was, so we can suspend or resume data flow.
*/
if (cause == UART_17158_XON_DETECT) {
- /* Is output stopped right now, if so, resume it */
+ /* Is output stopped right now, if so,
+ * resume it
+ */
if (brd->channels[port]->ch_flags & CH_STOP) {
spin_lock_irqsave(&ch->ch_lock,
flags);
@@ -437,7 +449,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
flags);
}
} else if (cause == UART_17158_XOFF_DETECT) {
- if (!(brd->channels[port]->ch_flags & CH_STOP)) {
+ if (!(brd->channels[port]->ch_flags &
+ CH_STOP)) {
spin_lock_irqsave(&ch->ch_lock,
flags);
ch->ch_flags |= CH_STOP;
@@ -449,11 +462,10 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
if (isr & UART_17158_IIR_HWFLOW_STATE_CHANGE) {
/*
- * If we get here, this means the hardware is doing auto flow control.
- * Check to see whether RTS/DTR or CTS/DSR caused this interrupt.
+ * If we get here, this means the hardware is
+ * doing auto flow control. Check to see whether
+ * RTS/DTR or CTS/DSR caused this interrupt.
*/
- brd->intr_modem++;
- ch->ch_intr_modem++;
cause = readb(&ch->ch_neo_uart->mcr);
/* Which pin is doing auto flow? RTS or DTR? */
if ((cause & 0x4) == 0) {
@@ -517,8 +529,6 @@ static inline void neo_parse_lsr(struct dgnc_board *brd, uint port)
ch->ch_cached_lsr |= linestatus;
if (ch->ch_cached_lsr & UART_LSR_DR) {
- brd->intr_rx++;
- ch->ch_intr_rx++;
/* Read data from uart -> queue */
neo_copy_data_from_uart_to_queue(ch);
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -545,14 +555,13 @@ static inline void neo_parse_lsr(struct dgnc_board *brd, uint port)
* Rx Oruns. Exar says that an orun will NOT corrupt
* the FIFO. It will just replace the holding register
* with this new data byte. So basically just ignore this.
- * Probably we should eventually have an orun stat in our driver...
+ * Probably we should eventually have an orun stat in our
+ * driver...
*/
ch->ch_err_overrun++;
}
if (linestatus & UART_LSR_THRE) {
- brd->intr_tx++;
- ch->ch_intr_tx++;
spin_lock_irqsave(&ch->ch_lock, flags);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -560,8 +569,6 @@ static inline void neo_parse_lsr(struct dgnc_board *brd, uint port)
/* Transfer data (if any) from Write Queue -> UART. */
neo_copy_data_from_queue_to_uart(ch);
} else if (linestatus & UART_17158_TX_AND_FIFO_CLR) {
- brd->intr_tx++;
- ch->ch_intr_tx++;
spin_lock_irqsave(&ch->ch_lock, flags);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -665,7 +672,9 @@ static void neo_param(struct tty_struct *tty)
4800, 9600, 19200, 38400 }
};
- /* Only use the TXPrint baud rate if the terminal unit is NOT open */
+ /* Only use the TXPrint baud rate if the terminal unit
+ * is NOT open
+ */
if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
(un->un_type == DGNC_PRINT))
baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
@@ -788,7 +797,9 @@ static void neo_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
neo_set_cts_flow_control(ch);
} else if (ch->ch_c_iflag & IXON) {
- /* If start/stop is set to disable, then we should disable flow control */
+ /* If start/stop is set to disable, then we should
+ * disable flow control
+ */
if ((ch->ch_startc == _POSIX_VDISABLE) ||
(ch->ch_stopc == _POSIX_VDISABLE))
neo_set_no_output_flow_control(ch);
@@ -801,7 +812,9 @@ static void neo_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
neo_set_rts_flow_control(ch);
} else if (ch->ch_c_iflag & IXOFF) {
- /* If start/stop is set to disable, then we should disable flow control */
+ /* If start/stop is set to disable, then we should
+ * disable flow control
+ */
if ((ch->ch_startc == _POSIX_VDISABLE) ||
(ch->ch_stopc == _POSIX_VDISABLE))
neo_set_no_input_flow_control(ch);
@@ -926,8 +939,6 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
if (!brd || brd->magic != DGNC_BOARD_MAGIC)
return IRQ_NONE;
- brd->intr_count++;
-
/* Lock out the slow poller from running on this board. */
spin_lock_irqsave(&brd->bd_intr_lock, flags);
@@ -940,14 +951,18 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
/*
* If 0, no interrupts pending.
- * This can happen if the IRQ is shared among a couple Neo/Classic boards.
+ * This can happen if the IRQ is shared among a couple Neo/Classic
+ * boards.
*/
if (!uart_poll) {
spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
return IRQ_NONE;
}
- /* At this point, we have at least SOMETHING to service, dig further... */
+ /*
+ * At this point, we have at least SOMETHING to service, dig
+ * further...
+ */
/* Loop on each port */
while ((uart_poll & 0xff) != 0) {
@@ -971,7 +986,10 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
ch = brd->channels[port];
neo_copy_data_from_uart_to_queue(ch);
- /* Call our tty layer to enforce queue flow control if needed. */
+ /*
+ * Call our tty layer to enforce queue flow control if
+ * needed.
+ */
spin_lock_irqsave(&ch->ch_lock, flags2);
dgnc_check_queue_flow_control(ch);
spin_unlock_irqrestore(&ch->ch_lock, flags2);
@@ -987,16 +1005,18 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
case UART_17158_TXRDY:
/*
- * TXRDY interrupt clears after reading ISR register for the UART channel.
+ * TXRDY interrupt clears after reading ISR register
+ * for the UART channel.
*/
/*
* Yes, this is odd...
* Why would I check EVERY possibility of type of
* interrupt, when we know its TXRDY???
- * Becuz for some reason, even tho we got triggered for TXRDY,
- * it seems to be occasionally wrong. Instead of TX, which
- * it should be, I was getting things like RXDY too. Weird.
+ * Becuz for some reason, even tho we got triggered for
+ * TXRDY, it seems to be occasionally wrong. Instead of
+ * TX, which it should be, I was getting things like
+ * RXDY too. Weird.
*/
neo_parse_isr(brd, port);
break;
@@ -1011,8 +1031,8 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
default:
/*
* The UART triggered us with a bogus interrupt type.
- * It appears the Exar chip, when REALLY bogged down, will throw
- * these once and awhile.
+ * It appears the Exar chip, when REALLY bogged down,
+ * will throw these once and awhile.
* Its harmless, just ignore it and move on.
*/
break;
@@ -1230,7 +1250,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
}
/*
- * If our queue is full, we have no choice but to drop some data.
+ * If our queue is full, we have no choice but to drop some
+ * data.
* The assumption is that HWFLOW or SWFLOW should have stopped
* things way way before we got to this point.
*
@@ -1323,7 +1344,10 @@ static void neo_flush_uart_write(struct channel_t *ch)
neo_pci_posting_flush(ch->ch_bd);
for (i = 0; i < 10; i++) {
- /* Check to see if the UART feels it completely flushed the FIFO. */
+ /*
+ * Check to see if the UART feels it completely flushed the
+ * FIFO.
+ */
tmp = readb(&ch->ch_neo_uart->isr_fcr);
if (tmp & 4)
udelay(10);
@@ -1352,7 +1376,10 @@ static void neo_flush_uart_read(struct channel_t *ch)
neo_pci_posting_flush(ch->ch_bd);
for (i = 0; i < 10; i++) {
- /* Check to see if the UART feels it completely flushed the FIFO. */
+ /*
+ * Check to see if the UART feels it completely flushed the
+ * FIFO.
+ */
tmp = readb(&ch->ch_neo_uart->isr_fcr);
if (tmp & 2)
udelay(10);
@@ -1397,8 +1424,9 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_cached_lsr &= ~(UART_LSR_THRE);
/*
- * If RTS Toggle mode is on, turn on RTS now if not already set,
- * and make sure we get an event when the data transfer has completed.
+ * If RTS Toggle mode is on, turn on RTS now if not
+ * already set, and make sure we get an event when the
+ * data transfer has completed.
*/
if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
if (!(ch->ch_mostat & UART_MCR_RTS)) {
@@ -1408,8 +1436,9 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_tun.un_flags |= (UN_EMPTY);
}
/*
- * If DTR Toggle mode is on, turn on DTR now if not already set,
- * and make sure we get an event when the data transfer has completed.
+ * If DTR Toggle mode is on, turn on DTR now if not
+ * already set, and make sure we get an event when the
+ * data transfer has completed.
*/
if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
if (!(ch->ch_mostat & UART_MCR_DTR)) {
@@ -1465,7 +1494,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
/*
* If RTS Toggle mode is on, turn on RTS now if not already set,
- * and make sure we get an event when the data transfer has completed.
+ * and make sure we get an event when the data transfer has
+ * completed.
*/
if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
if (!(ch->ch_mostat & UART_MCR_RTS)) {
@@ -1477,7 +1507,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
/*
* If DTR Toggle mode is on, turn on DTR now if not already set,
- * and make sure we get an event when the data transfer has completed.
+ * and make sure we get an event when the data transfer has
+ * completed.
*/
if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
if (!(ch->ch_mostat & UART_MCR_DTR)) {
@@ -1541,7 +1572,10 @@ static void neo_parse_modem(struct channel_t *ch, unsigned char signals)
}
}
- /* Scrub off lower bits. They signify delta's, which I don't care about */
+ /*
+ * Scrub off lower bits. They signify delta's, which I don't care
+ * about
+ */
msignals &= 0xf0;
if (msignals & UART_MSR_DCD)
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
index b8d41c5617e2..290bf6e226ac 100644
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ b/drivers/staging/dgnc/dgnc_sysfs.c
@@ -25,31 +25,31 @@
#include "dgnc_driver.h"
#include "dgnc_mgmt.h"
-static ssize_t dgnc_driver_version_show(struct device_driver *ddp, char *buf)
+static ssize_t version_show(struct device_driver *ddp, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
}
-static DRIVER_ATTR(version, S_IRUSR, dgnc_driver_version_show, NULL);
+static DRIVER_ATTR_RO(version);
-static ssize_t dgnc_driver_boards_show(struct device_driver *ddp, char *buf)
+static ssize_t boards_show(struct device_driver *ddp, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_num_boards);
}
-static DRIVER_ATTR(boards, S_IRUSR, dgnc_driver_boards_show, NULL);
+static DRIVER_ATTR_RO(boards);
-static ssize_t dgnc_driver_maxboards_show(struct device_driver *ddp, char *buf)
+static ssize_t maxboards_show(struct device_driver *ddp, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
}
-static DRIVER_ATTR(maxboards, S_IRUSR, dgnc_driver_maxboards_show, NULL);
+static DRIVER_ATTR_RO(maxboards);
-static ssize_t dgnc_driver_pollrate_show(struct device_driver *ddp, char *buf)
+static ssize_t pollrate_show(struct device_driver *ddp, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%dms\n", dgnc_poll_tick);
}
-static ssize_t dgnc_driver_pollrate_store(struct device_driver *ddp,
- const char *buf, size_t count)
+static ssize_t pollrate_store(struct device_driver *ddp,
+ const char *buf, size_t count)
{
unsigned long flags;
int tick;
@@ -65,8 +65,7 @@ static ssize_t dgnc_driver_pollrate_store(struct device_driver *ddp,
return count;
}
-static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgnc_driver_pollrate_show,
- dgnc_driver_pollrate_store);
+static DRIVER_ATTR_RW(pollrate);
void dgnc_create_driver_sysfiles(struct pci_driver *dgnc_driver)
{
@@ -103,8 +102,8 @@ void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver)
return 0; \
} while (0)
-static ssize_t dgnc_vpd_show(struct device *p, struct device_attribute *attr,
- char *buf)
+static ssize_t vpd_show(struct device *p, struct device_attribute *attr,
+ char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -123,10 +122,10 @@ static ssize_t dgnc_vpd_show(struct device *p, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(vpd, S_IRUSR, dgnc_vpd_show, NULL);
+static DEVICE_ATTR_RO(vpd);
-static ssize_t dgnc_serial_number_show(struct device *p,
- struct device_attribute *attr, char *buf)
+static ssize_t serial_number_show(struct device *p,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -140,10 +139,10 @@ static ssize_t dgnc_serial_number_show(struct device *p,
return count;
}
-static DEVICE_ATTR(serial_number, S_IRUSR, dgnc_serial_number_show, NULL);
+static DEVICE_ATTR_RO(serial_number);
-static ssize_t dgnc_ports_state_show(struct device *p,
- struct device_attribute *attr, char *buf)
+static ssize_t ports_state_show(struct device *p,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -158,10 +157,10 @@ static ssize_t dgnc_ports_state_show(struct device *p,
}
return count;
}
-static DEVICE_ATTR(ports_state, S_IRUSR, dgnc_ports_state_show, NULL);
+static DEVICE_ATTR_RO(ports_state);
-static ssize_t dgnc_ports_baud_show(struct device *p,
- struct device_attribute *attr, char *buf)
+static ssize_t ports_baud_show(struct device *p,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -176,11 +175,10 @@ static ssize_t dgnc_ports_baud_show(struct device *p,
}
return count;
}
-static DEVICE_ATTR(ports_baud, S_IRUSR, dgnc_ports_baud_show, NULL);
+static DEVICE_ATTR_RO(ports_baud);
-static ssize_t dgnc_ports_msignals_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ports_msignals_show(struct device *p,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -208,10 +206,10 @@ static ssize_t dgnc_ports_msignals_show(struct device *p,
}
return count;
}
-static DEVICE_ATTR(ports_msignals, S_IRUSR, dgnc_ports_msignals_show, NULL);
+static DEVICE_ATTR_RO(ports_msignals);
-static ssize_t dgnc_ports_iflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
+static ssize_t ports_iflag_show(struct device *p,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -226,10 +224,10 @@ static ssize_t dgnc_ports_iflag_show(struct device *p,
}
return count;
}
-static DEVICE_ATTR(ports_iflag, S_IRUSR, dgnc_ports_iflag_show, NULL);
+static DEVICE_ATTR_RO(ports_iflag);
-static ssize_t dgnc_ports_cflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
+static ssize_t ports_cflag_show(struct device *p,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -244,10 +242,10 @@ static ssize_t dgnc_ports_cflag_show(struct device *p,
}
return count;
}
-static DEVICE_ATTR(ports_cflag, S_IRUSR, dgnc_ports_cflag_show, NULL);
+static DEVICE_ATTR_RO(ports_cflag);
-static ssize_t dgnc_ports_oflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
+static ssize_t ports_oflag_show(struct device *p,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -262,10 +260,10 @@ static ssize_t dgnc_ports_oflag_show(struct device *p,
}
return count;
}
-static DEVICE_ATTR(ports_oflag, S_IRUSR, dgnc_ports_oflag_show, NULL);
+static DEVICE_ATTR_RO(ports_oflag);
-static ssize_t dgnc_ports_lflag_show(struct device *p,
- struct device_attribute *attr, char *buf)
+static ssize_t ports_lflag_show(struct device *p,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -280,11 +278,10 @@ static ssize_t dgnc_ports_lflag_show(struct device *p,
}
return count;
}
-static DEVICE_ATTR(ports_lflag, S_IRUSR, dgnc_ports_lflag_show, NULL);
+static DEVICE_ATTR_RO(ports_lflag);
-static ssize_t dgnc_ports_digi_flag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ports_digi_flag_show(struct device *p,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -299,10 +296,10 @@ static ssize_t dgnc_ports_digi_flag_show(struct device *p,
}
return count;
}
-static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgnc_ports_digi_flag_show, NULL);
+static DEVICE_ATTR_RO(ports_digi_flag);
-static ssize_t dgnc_ports_rxcount_show(struct device *p,
- struct device_attribute *attr, char *buf)
+static ssize_t ports_rxcount_show(struct device *p,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -317,10 +314,10 @@ static ssize_t dgnc_ports_rxcount_show(struct device *p,
}
return count;
}
-static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgnc_ports_rxcount_show, NULL);
+static DEVICE_ATTR_RO(ports_rxcount);
-static ssize_t dgnc_ports_txcount_show(struct device *p,
- struct device_attribute *attr, char *buf)
+static ssize_t ports_txcount_show(struct device *p,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
int count = 0;
@@ -335,7 +332,7 @@ static ssize_t dgnc_ports_txcount_show(struct device *p,
}
return count;
}
-static DEVICE_ATTR(ports_txcount, S_IRUSR, dgnc_ports_txcount_show, NULL);
+static DEVICE_ATTR_RO(ports_txcount);
/* this function creates the sys files that will export each signal status
* to sysfs each value will be put in a separate filename
@@ -378,8 +375,8 @@ void dgnc_remove_ports_sysfiles(struct dgnc_board *bd)
device_remove_file(&bd->pdev->dev, &dev_attr_serial_number);
}
-static ssize_t dgnc_tty_state_show(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t tty_state_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
struct channel_t *ch;
@@ -402,10 +399,10 @@ static ssize_t dgnc_tty_state_show(struct device *d,
return snprintf(buf, PAGE_SIZE, "%s",
un->un_open_count ? "Open" : "Closed");
}
-static DEVICE_ATTR(state, S_IRUSR, dgnc_tty_state_show, NULL);
+static DEVICE_ATTR_RO(tty_state);
-static ssize_t dgnc_tty_baud_show(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t tty_baud_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
struct channel_t *ch;
@@ -427,10 +424,10 @@ static ssize_t dgnc_tty_baud_show(struct device *d,
return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_old_baud);
}
-static DEVICE_ATTR(baud, S_IRUSR, dgnc_tty_baud_show, NULL);
+static DEVICE_ATTR_RO(tty_baud);
-static ssize_t dgnc_tty_msignals_show(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t tty_msignals_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
struct channel_t *ch;
@@ -461,10 +458,10 @@ static ssize_t dgnc_tty_msignals_show(struct device *d,
}
return 0;
}
-static DEVICE_ATTR(msignals, S_IRUSR, dgnc_tty_msignals_show, NULL);
+static DEVICE_ATTR_RO(tty_msignals);
-static ssize_t dgnc_tty_iflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t tty_iflag_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
struct channel_t *ch;
@@ -486,10 +483,10 @@ static ssize_t dgnc_tty_iflag_show(struct device *d,
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
}
-static DEVICE_ATTR(iflag, S_IRUSR, dgnc_tty_iflag_show, NULL);
+static DEVICE_ATTR_RO(tty_iflag);
-static ssize_t dgnc_tty_cflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t tty_cflag_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
struct channel_t *ch;
@@ -511,10 +508,10 @@ static ssize_t dgnc_tty_cflag_show(struct device *d,
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
}
-static DEVICE_ATTR(cflag, S_IRUSR, dgnc_tty_cflag_show, NULL);
+static DEVICE_ATTR_RO(tty_cflag);
-static ssize_t dgnc_tty_oflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t tty_oflag_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
struct channel_t *ch;
@@ -536,10 +533,10 @@ static ssize_t dgnc_tty_oflag_show(struct device *d,
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
}
-static DEVICE_ATTR(oflag, S_IRUSR, dgnc_tty_oflag_show, NULL);
+static DEVICE_ATTR_RO(tty_oflag);
-static ssize_t dgnc_tty_lflag_show(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t tty_lflag_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
struct channel_t *ch;
@@ -561,10 +558,10 @@ static ssize_t dgnc_tty_lflag_show(struct device *d,
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
}
-static DEVICE_ATTR(lflag, S_IRUSR, dgnc_tty_lflag_show, NULL);
+static DEVICE_ATTR_RO(tty_lflag);
-static ssize_t dgnc_tty_digi_flag_show(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t tty_digi_flag_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
struct channel_t *ch;
@@ -586,10 +583,10 @@ static ssize_t dgnc_tty_digi_flag_show(struct device *d,
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
}
-static DEVICE_ATTR(digi_flag, S_IRUSR, dgnc_tty_digi_flag_show, NULL);
+static DEVICE_ATTR_RO(tty_digi_flag);
-static ssize_t dgnc_tty_rxcount_show(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t tty_rxcount_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
struct channel_t *ch;
@@ -611,10 +608,10 @@ static ssize_t dgnc_tty_rxcount_show(struct device *d,
return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
}
-static DEVICE_ATTR(rxcount, S_IRUSR, dgnc_tty_rxcount_show, NULL);
+static DEVICE_ATTR_RO(tty_rxcount);
-static ssize_t dgnc_tty_txcount_show(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t tty_txcount_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
struct channel_t *ch;
@@ -636,10 +633,10 @@ static ssize_t dgnc_tty_txcount_show(struct device *d,
return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
}
-static DEVICE_ATTR(txcount, S_IRUSR, dgnc_tty_txcount_show, NULL);
+static DEVICE_ATTR_RO(tty_txcount);
-static ssize_t dgnc_tty_name_show(struct device *d,
- struct device_attribute *attr, char *buf)
+static ssize_t tty_custom_name_show(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct dgnc_board *bd;
struct channel_t *ch;
@@ -663,24 +660,24 @@ static ssize_t dgnc_tty_name_show(struct device *d,
(un->un_type == DGNC_PRINT) ? "pr" : "tty",
bd->boardnum + 1, 'a' + ch->ch_portnum);
}
-static DEVICE_ATTR(custom_name, S_IRUSR, dgnc_tty_name_show, NULL);
+static DEVICE_ATTR_RO(tty_custom_name);
static struct attribute *dgnc_sysfs_tty_entries[] = {
- &dev_attr_state.attr,
- &dev_attr_baud.attr,
- &dev_attr_msignals.attr,
- &dev_attr_iflag.attr,
- &dev_attr_cflag.attr,
- &dev_attr_oflag.attr,
- &dev_attr_lflag.attr,
- &dev_attr_digi_flag.attr,
- &dev_attr_rxcount.attr,
- &dev_attr_txcount.attr,
- &dev_attr_custom_name.attr,
+ &dev_attr_tty_state.attr,
+ &dev_attr_tty_baud.attr,
+ &dev_attr_tty_msignals.attr,
+ &dev_attr_tty_iflag.attr,
+ &dev_attr_tty_cflag.attr,
+ &dev_attr_tty_oflag.attr,
+ &dev_attr_tty_lflag.attr,
+ &dev_attr_tty_digi_flag.attr,
+ &dev_attr_tty_rxcount.attr,
+ &dev_attr_tty_txcount.attr,
+ &dev_attr_tty_custom_name.attr,
NULL
};
-static struct attribute_group dgnc_tty_attribute_group = {
+static const struct attribute_group dgnc_tty_attribute_group = {
.name = NULL,
.attrs = dgnc_sysfs_tty_entries,
};
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index 4eeecc992a02..953d9310fa74 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -45,7 +45,6 @@
/*
* internal variables
*/
-static struct dgnc_board *dgnc_BoardsByMajor[256];
static unsigned char *dgnc_TmpWriteBuf;
/*
@@ -100,7 +99,7 @@ static void dgnc_tty_unthrottle(struct tty_struct *tty);
static void dgnc_tty_flush_chars(struct tty_struct *tty);
static void dgnc_tty_flush_buffer(struct tty_struct *tty);
static void dgnc_tty_hangup(struct tty_struct *tty);
-static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command,
+static int dgnc_set_modem_info(struct channel_t *ch, unsigned int command,
unsigned int __user *value);
static int dgnc_get_modem_info(struct channel_t *ch,
unsigned int __user *value);
@@ -186,7 +185,8 @@ int dgnc_tty_register(struct dgnc_board *brd)
if (IS_ERR(brd->serial_driver))
return PTR_ERR(brd->serial_driver);
- snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
+ snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgnc_%d_",
+ brd->boardnum);
brd->serial_driver->name = brd->serial_name;
brd->serial_driver->name_base = 0;
@@ -203,15 +203,11 @@ int dgnc_tty_register(struct dgnc_board *brd)
*/
tty_set_operations(brd->serial_driver, &dgnc_tty_ops);
- if (!brd->dgnc_major_serial_registered) {
- /* Register tty devices */
- rc = tty_register_driver(brd->serial_driver);
- if (rc < 0) {
- dev_dbg(&brd->pdev->dev,
- "Can't register tty device (%d)\n", rc);
- goto free_serial_driver;
- }
- brd->dgnc_major_serial_registered = true;
+ rc = tty_register_driver(brd->serial_driver);
+ if (rc < 0) {
+ dev_dbg(&brd->pdev->dev,
+ "Can't register tty device (%d)\n", rc);
+ goto free_serial_driver;
}
/*
@@ -246,20 +242,14 @@ int dgnc_tty_register(struct dgnc_board *brd)
*/
tty_set_operations(brd->print_driver, &dgnc_tty_ops);
- if (!brd->dgnc_major_transparent_print_registered) {
- /* Register Transparent Print devices */
- rc = tty_register_driver(brd->print_driver);
- if (rc < 0) {
- dev_dbg(&brd->pdev->dev,
- "Can't register Transparent Print device(%d)\n",
- rc);
- goto free_print_driver;
- }
- brd->dgnc_major_transparent_print_registered = true;
+ rc = tty_register_driver(brd->print_driver);
+ if (rc < 0) {
+ dev_dbg(&brd->pdev->dev,
+ "Can't register Transparent Print device(%d)\n",
+ rc);
+ goto free_print_driver;
}
- dgnc_BoardsByMajor[brd->serial_driver->major] = brd;
-
return 0;
free_print_driver:
@@ -272,6 +262,14 @@ free_serial_driver:
return rc;
}
+void dgnc_tty_unregister(struct dgnc_board *brd)
+{
+ tty_unregister_driver(brd->print_driver);
+ tty_unregister_driver(brd->serial_driver);
+ put_tty_driver(brd->print_driver);
+ put_tty_driver(brd->serial_driver);
+}
+
/*
* dgnc_tty_init()
*
@@ -378,38 +376,30 @@ void dgnc_tty_post_uninit(void)
}
/*
- * dgnc_tty_uninit()
+ * dgnc_cleanup_tty()
*
* Uninitialize the TTY portion of this driver. Free all memory and
* resources.
*/
-void dgnc_tty_uninit(struct dgnc_board *brd)
+void dgnc_cleanup_tty(struct dgnc_board *brd)
{
int i = 0;
- if (brd->dgnc_major_serial_registered) {
- dgnc_BoardsByMajor[brd->serial_driver->major] = NULL;
- for (i = 0; i < brd->nasync; i++) {
- if (brd->channels[i])
- dgnc_remove_tty_sysfs(brd->channels[i]->
- ch_tun.un_sysfs);
- tty_unregister_device(brd->serial_driver, i);
- }
- tty_unregister_driver(brd->serial_driver);
- brd->dgnc_major_serial_registered = false;
+ for (i = 0; i < brd->nasync; i++) {
+ if (brd->channels[i])
+ dgnc_remove_tty_sysfs(brd->channels[i]->
+ ch_tun.un_sysfs);
+ tty_unregister_device(brd->serial_driver, i);
}
+ tty_unregister_driver(brd->serial_driver);
- if (brd->dgnc_major_transparent_print_registered) {
- dgnc_BoardsByMajor[brd->print_driver->major] = NULL;
- for (i = 0; i < brd->nasync; i++) {
- if (brd->channels[i])
- dgnc_remove_tty_sysfs(brd->channels[i]->
- ch_pun.un_sysfs);
- tty_unregister_device(brd->print_driver, i);
- }
- tty_unregister_driver(brd->print_driver);
- brd->dgnc_major_transparent_print_registered = false;
+ for (i = 0; i < brd->nasync; i++) {
+ if (brd->channels[i])
+ dgnc_remove_tty_sysfs(brd->channels[i]->
+ ch_pun.un_sysfs);
+ tty_unregister_device(brd->print_driver, i);
}
+ tty_unregister_driver(brd->print_driver);
put_tty_driver(brd->serial_driver);
put_tty_driver(brd->print_driver);
@@ -640,19 +630,12 @@ exit_unlock:
************************************************************************/
void dgnc_carrier(struct channel_t *ch)
{
- struct dgnc_board *bd;
-
int virt_carrier = 0;
int phys_carrier = 0;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- bd = ch->ch_bd;
-
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return;
-
if (ch->ch_mistat & UART_MSR_DCD)
phys_carrier = 1;
@@ -947,6 +930,24 @@ void dgnc_wakeup_writes(struct channel_t *ch)
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
+struct dgnc_board *find_board_by_major(unsigned int major)
+{
+ int i;
+
+ for (i = 0; i < MAXBOARDS; i++) {
+ struct dgnc_board *brd = dgnc_board[i];
+
+ if (!brd)
+ return NULL;
+
+ if (major == brd->serial_driver->major ||
+ major == brd->print_driver->major)
+ return brd;
+ }
+
+ return NULL;
+}
+
/************************************************************************
*
* TTY Entry points and helper functions
@@ -976,7 +977,7 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
return -ENXIO;
/* Get board pointer from our array of majors we have allocated */
- brd = dgnc_BoardsByMajor[major];
+ brd = find_board_by_major(major);
if (!brd)
return -ENXIO;
@@ -1172,17 +1173,12 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
struct channel_t *ch)
{
int retval = 0;
- struct un_t *un = NULL;
+ struct un_t *un = tty->driver_data;
unsigned long flags;
uint old_flags = 0;
int sleep_on_un_flags = 0;
- if (!tty || tty->magic != TTY_MAGIC || !file || !ch ||
- ch->magic != DGNC_CHANNEL_MAGIC)
- return -ENXIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!file)
return -ENXIO;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -1301,15 +1297,9 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
*/
static void dgnc_tty_hangup(struct tty_struct *tty)
{
- struct un_t *un;
-
if (!tty || tty->magic != TTY_MAGIC)
return;
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return;
-
/* flush the transmit queues */
dgnc_tty_flush_buffer(tty);
}
@@ -1510,18 +1500,8 @@ static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
* returns the new bytes_available. This only affects printer
* output.
*/
-static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available)
+static int dgnc_maxcps_room(struct channel_t *ch, int bytes_available)
{
- struct un_t *un = tty->driver_data;
- struct channel_t *ch = un->un_ch;
-
- /*
- * If its not the Transparent print device, return
- * the full data amount.
- */
- if (un->un_type != DGNC_PRINT)
- return bytes_available;
-
if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) {
int cps_limit = 0;
unsigned long current_time = jiffies;
@@ -1585,7 +1565,8 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
ret += WQUEUESIZE;
/* Limit printer to maxcps */
- ret = dgnc_maxcps_room(tty, ret);
+ if (un->un_type != DGNC_PRINT)
+ ret = dgnc_maxcps_room(ch, ret);
/*
* If we are printer device, leave space for
@@ -1677,7 +1658,8 @@ static int dgnc_tty_write(struct tty_struct *tty,
* Limit printer output to maxcps overall, with bursts allowed
* up to bufsize characters.
*/
- bufcount = dgnc_maxcps_room(tty, bufcount);
+ if (un->un_type != DGNC_PRINT)
+ bufcount = dgnc_maxcps_room(ch, bufcount);
/*
* Take minimum of what the user wants to send, and the
@@ -1984,7 +1966,7 @@ static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
static inline int dgnc_get_mstat(struct channel_t *ch)
{
unsigned char mstat;
- int result = -EIO;
+ int result = 0;
unsigned long flags;
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
@@ -1996,8 +1978,6 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
spin_unlock_irqrestore(&ch->ch_lock, flags);
- result = 0;
-
if (mstat & UART_MCR_DTR)
result |= TIOCM_DTR;
if (mstat & UART_MCR_RTS)
@@ -2028,32 +2008,14 @@ static int dgnc_get_modem_info(struct channel_t *ch,
*
* Set modem signals, called by ld.
*/
-static int dgnc_set_modem_info(struct tty_struct *tty,
+static int dgnc_set_modem_info(struct channel_t *ch,
unsigned int command,
unsigned int __user *value)
{
- struct dgnc_board *bd;
- struct channel_t *ch;
- struct un_t *un;
int ret = -ENXIO;
unsigned int arg = 0;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return ret;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return ret;
-
ret = get_user(arg, value);
if (ret)
return ret;
@@ -2593,9 +2555,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = put_user(C_CLOCAL(tty) ? 1 : 0,
- (unsigned long __user *)arg);
- return rc;
+ return put_user(C_CLOCAL(tty) ? 1 : 0,
+ (unsigned long __user *)arg);
case TIOCSSOFTCAR:
@@ -2620,7 +2581,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case TIOCMBIC:
case TIOCMSET:
spin_unlock_irqrestore(&ch->ch_lock, flags);
- return dgnc_set_modem_info(tty, cmd, uarg);
+ return dgnc_set_modem_info(ch, cmd, uarg);
/*
* Here are any additional ioctl's that we want to implement
@@ -2766,8 +2727,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case DIGI_GETCUSTOMBAUD:
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = put_user(ch->ch_custom_speed, (unsigned int __user *)arg);
- return rc;
+ return put_user(ch->ch_custom_speed,
+ (unsigned int __user *)arg);
case DIGI_SETCUSTOMBAUD:
{
@@ -2853,8 +2814,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
events |= (EV_IPU | EV_IPS);
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = put_user(events, (unsigned int __user *)arg);
- return rc;
+ return put_user(events, (unsigned int __user *)arg);
}
/*
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
index 21d3369b875c..24c9a412211e 100644
--- a/drivers/staging/dgnc/dgnc_tty.h
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -19,12 +19,13 @@
#include "dgnc_driver.h"
int dgnc_tty_register(struct dgnc_board *brd);
+void dgnc_tty_unregister(struct dgnc_board *brd);
int dgnc_tty_preinit(void);
int dgnc_tty_init(struct dgnc_board *);
void dgnc_tty_post_uninit(void);
-void dgnc_tty_uninit(struct dgnc_board *);
+void dgnc_cleanup_tty(struct dgnc_board *);
void dgnc_input(struct channel_t *ch);
void dgnc_carrier(struct channel_t *ch);
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 3b56b2826263..c3e298843b43 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -131,7 +131,6 @@ static void _nbu2ss_dump_register(struct nbu2ss_udc *udc)
reg_data = _nbu2ss_readl(
(u32 *)IO_ADDRESS(USB_BASE_ADDRESS + i + 12));
dev_dbg(&udc->dev, " %08x\n", (int)reg_data);
-
}
spin_lock(&udc->lock);
@@ -478,9 +477,9 @@ static void _nbu2ss_dma_map_single(
)
{
if (req->req.dma == DMA_ADDR_INVALID) {
- if (req->unaligned)
+ if (req->unaligned) {
req->req.dma = ep->phys_buf;
- else {
+ } else {
req->req.dma = dma_map_single(
udc->gadget.dev.parent,
req->req.buf,
@@ -1236,9 +1235,9 @@ static int _nbu2ss_start_transfer(
req->dma_flag = FALSE;
req->div_len = 0;
- if (req->req.length == 0)
+ if (req->req.length == 0) {
req->zero = false;
- else {
+ } else {
if ((req->req.length % ep->ep.maxpacket) == 0)
req->zero = req->req.zero;
else
@@ -1941,9 +1940,9 @@ static void _nbu2ss_ep_done(
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
- if (ep->stalled)
+ if (ep->stalled) {
_nbu2ss_epn_set_stall(udc, ep);
- else {
+ } else {
if (!list_empty(&ep->queue))
_nbu2ss_restert_transfer(ep);
}
@@ -2264,9 +2263,7 @@ static int _nbu2ss_enable_controller(struct nbu2ss_udc *udc)
if (udc->udc_enabled)
return 0;
- /*
- Reset
- */
+ /* Reset */
_nbu2ss_bitset(&udc->p_regs->EPCTR, (DIRPD | EPC_RST));
udelay(EPC_RST_DISABLE_TIME); /* 1us wait */
@@ -2476,8 +2473,9 @@ static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
_nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW);
_nbu2ss_writel(&preg->USB_INT_ENA, 0);
status = 0;
- } else
+ } else {
status = _nbu2ss_readl(&preg->USB_INT_STA);
+ }
if (status == 0)
break;
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index 39769e3a801c..789bfb97143c 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -586,7 +586,7 @@ struct nbu2ss_udc {
unsigned remote_wakeup:1;
unsigned udc_enabled:1;
- unsigned mA;
+ unsigned int mA;
u32 curr_config; /* Current Configuration Number */
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index 82b46cd27ca7..7561385761e9 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -350,8 +350,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
}
}
- /* 1 string = 2 pages */
- for (y = addr_win.ys_page; y <= addr_win.ye_page; ++y) {
+ /* 1 string = 2 pages */
+ for (y = addr_win.ys_page; y <= addr_win.ye_page; ++y) {
/* left half of display */
if (addr_win.xs < par->info->var.xres / 2) {
construct_line_bitmap(par, buf, convert_buf,
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
index 6ff222d6d6d6..278e4c7e95e5 100644
--- a/drivers/staging/fbtft/fb_ili9320.c
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -29,7 +29,7 @@
#define DEFAULT_GAMMA "07 07 6 0 0 0 5 5 4 0\n" \
"07 08 4 7 5 1 2 0 7 7"
-static unsigned read_devicecode(struct fbtft_par *par)
+static unsigned int read_devicecode(struct fbtft_par *par)
{
int ret;
u8 rxbuf[8] = {0, };
@@ -41,7 +41,7 @@ static unsigned read_devicecode(struct fbtft_par *par)
static int init_display(struct fbtft_par *par)
{
- unsigned devcode;
+ unsigned int devcode;
par->fbtftops.reset(par);
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index fdf98d37550e..c31e2e051d4a 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -32,26 +32,26 @@
#define DEFAULT_GAMMA "0F 00 7 2 0 0 6 5 4 1\n" \
"04 16 2 7 6 3 2 1 7 7"
-static unsigned bt = 6; /* VGL=Vci*4 , VGH=Vci*4 */
+static unsigned int bt = 6; /* VGL=Vci*4 , VGH=Vci*4 */
module_param(bt, uint, 0);
MODULE_PARM_DESC(bt, "Sets the factor used in the step-up circuits");
-static unsigned vc = 0x03; /* Vci1=Vci*0.80 */
+static unsigned int vc = 0x03; /* Vci1=Vci*0.80 */
module_param(vc, uint, 0);
MODULE_PARM_DESC(vc,
"Sets the ratio factor of Vci to generate the reference voltages Vci1");
-static unsigned vrh = 0x0d; /* VREG1OUT=Vci*1.85 */
+static unsigned int vrh = 0x0d; /* VREG1OUT=Vci*1.85 */
module_param(vrh, uint, 0);
MODULE_PARM_DESC(vrh,
"Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
-static unsigned vdv = 0x12; /* VCOMH amplitude=VREG1OUT*0.98 */
+static unsigned int vdv = 0x12; /* VCOMH amplitude=VREG1OUT*0.98 */
module_param(vdv, uint, 0);
MODULE_PARM_DESC(vdv,
"Select the factor of VREG1OUT to set the amplitude of Vcom");
-static unsigned vcm = 0x0a; /* VCOMH=VREG1OUT*0.735 */
+static unsigned int vcm = 0x0a; /* VCOMH=VREG1OUT*0.735 */
module_param(vcm, uint, 0);
MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage");
diff --git a/drivers/staging/fbtft/fb_pcd8544.c b/drivers/staging/fbtft/fb_pcd8544.c
index a6b43323f29a..a4710dc067ef 100644
--- a/drivers/staging/fbtft/fb_pcd8544.c
+++ b/drivers/staging/fbtft/fb_pcd8544.c
@@ -32,11 +32,11 @@
#define TXBUFLEN (84 * 6)
#define DEFAULT_GAMMA "40" /* gamma controls the contrast in this driver */
-static unsigned tc;
+static unsigned int tc;
module_param(tc, uint, 0);
MODULE_PARM_DESC(tc, "TC[1:0] Temperature coefficient: 0-3 (default: 0)");
-static unsigned bs = 4;
+static unsigned int bs = 4;
module_param(bs, uint, 0);
MODULE_PARM_DESC(bs, "BS[2:0] Bias voltage level: 0-7 (default: 4)");
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
index 3113355062fc..774b0ff69e6d 100644
--- a/drivers/staging/fbtft/fb_s6d02a1.c
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -113,12 +113,14 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
#define MV BIT(5)
static int set_var(struct fbtft_par *par)
{
- /* Memory data access control (0x36h)
- RGB/BGR:
- 1. Mode selection pin SRGB
- RGB H/W pin for color filter setting: 0=RGB, 1=BGR
- 2. MADCTL RGB bit
- RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */
+ /*
+ * Memory data access control (0x36h)
+ * RGB/BGR:
+ * 1. Mode selection pin SRGB
+ * RGB H/W pin for color filter setting: 0=RGB, 1=BGR
+ * 2. MADCTL RGB bit
+ * RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR
+ */
switch (par->info->var.rotate) {
case 0:
write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
index d6ae76b318ad..9b1d70b218df 100644
--- a/drivers/staging/fbtft/fb_s6d1121.c
+++ b/drivers/staging/fbtft/fb_s6d1121.c
@@ -125,10 +125,10 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- PKP0 PKP1 PKP2 PKP3 PKP4 PKP5 PKP6 PKP7 PKP8 PKP9 PKP10 PKP11 VRP0 VRP1
- PKN0 PKN1 PKN2 PKN3 PKN4 PKN5 PKN6 PKN7 PRN8 PRN9 PRN10 PRN11 VRN0 VRN1
-*/
+ * Gamma string format:
+ * PKP0 PKP1 PKP2 PKP3 PKP4 PKP5 PKP6 PKP7 PKP8 PKP9 PKP10 PKP11 VRP0 VRP1
+ * PKN0 PKN1 PKN2 PKN3 PKN4 PKN5 PKN6 PKN7 PRN8 PRN9 PRN10 PRN11 VRN0 VRN1
+ */
#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
static int set_gamma(struct fbtft_par *par, unsigned long *curves)
{
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
index 1162c08613fa..25f9fbe1e76f 100644
--- a/drivers/staging/fbtft/fb_ssd1289.c
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -29,7 +29,7 @@
#define DEFAULT_GAMMA "02 03 2 5 7 7 4 2 4 2\n" \
"02 03 2 5 7 5 4 2 4 2"
-static unsigned reg11 = 0x6040;
+static unsigned int reg11 = 0x6040;
module_param(reg11, uint, 0);
MODULE_PARM_DESC(reg11, "Register 11h value");
@@ -131,10 +131,10 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- VRP0 VRP1 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 PKP5
- VRN0 VRN1 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 PKN5
-*/
+ * Gamma string format:
+ * VRP0 VRP1 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 PKP5
+ * VRN0 VRN1 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 PKN5
+ */
#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
static int set_gamma(struct fbtft_par *par, unsigned long *curves)
{
diff --git a/drivers/staging/fbtft/fb_ssd1306.c b/drivers/staging/fbtft/fb_ssd1306.c
index e0b34a42c9c6..80fc57029fee 100644
--- a/drivers/staging/fbtft/fb_ssd1306.c
+++ b/drivers/staging/fbtft/fb_ssd1306.c
@@ -27,15 +27,15 @@
#define HEIGHT 64
/*
- write_reg() caveat:
-
- This doesn't work because D/C has to be LOW for both values:
- write_reg(par, val1, val2);
-
- Do it like this:
- write_reg(par, val1);
- write_reg(par, val2);
-*/
+ * write_reg() caveat:
+ *
+ * This doesn't work because D/C has to be LOW for both values:
+ * write_reg(par, val1, val2);
+ *
+ * Do it like this:
+ * write_reg(par, val1);
+ * write_reg(par, val2);
+ */
/* Init sequence taken from the Adafruit SSD1306 Arduino library */
static int init_display(struct fbtft_par *par)
@@ -113,8 +113,9 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0xA4);
/* Set Normal Display
- 0 in RAM: OFF in display panel
- 1 in RAM: ON in display panel */
+ * 0 in RAM: OFF in display panel
+ * 1 in RAM: ON in display panel
+ */
write_reg(par, 0xA6);
/* Set Display ON */
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index bd294f886c5f..1d74ac1343a8 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -102,26 +102,26 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
}
/*
- Grayscale Lookup Table
- GS1 - GS63
- The driver Gamma curve contains the relative values between the entries
- in the Lookup table.
-
- From datasheet:
- 8.8 Gray Scale Decoder
-
- there are total 180 Gamma Settings (Setting 0 to Setting 180)
- available for the Gray Scale table.
-
- The gray scale is defined in incremental way, with reference
- to the length of previous table entry:
- Setting of GS1 has to be >= 0
- Setting of GS2 has to be > Setting of GS1 +1
- Setting of GS3 has to be > Setting of GS2 +1
- :
- Setting of GS63 has to be > Setting of GS62 +1
-
-*/
+ * Grayscale Lookup Table
+ * GS1 - GS63
+ * The driver Gamma curve contains the relative values between the entries
+ * in the Lookup table.
+ *
+ * From datasheet:
+ * 8.8 Gray Scale Decoder
+ *
+ * there are total 180 Gamma Settings (Setting 0 to Setting 180)
+ * available for the Gray Scale table.
+ *
+ * The gray scale is defined in incremental way, with reference
+ * to the length of previous table entry:
+ * Setting of GS1 has to be >= 0
+ * Setting of GS2 has to be > Setting of GS1 +1
+ * Setting of GS3 has to be > Setting of GS2 +1
+ * :
+ * Setting of GS63 has to be > Setting of GS62 +1
+ *
+ */
static int set_gamma(struct fbtft_par *par, unsigned long *curves)
{
unsigned long tmp[GAMMA_NUM * GAMMA_LEN];
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index cef33e439f46..200aa9ba98f9 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -25,8 +25,8 @@ static void register_onboard_backlight(struct fbtft_par *par);
static int init_display(struct fbtft_par *par)
{
- if (par->pdata
- && par->pdata->display.backlight == FBTFT_ONBOARD_BACKLIGHT) {
+ if (par->pdata &&
+ par->pdata->display.backlight == FBTFT_ONBOARD_BACKLIGHT) {
/* module uses onboard GPIO for panel power */
par->fbtftops.register_backlight = register_onboard_backlight;
}
@@ -66,7 +66,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int set_var(struct fbtft_par *par)
{
- unsigned remap;
+ unsigned int remap;
if (par->fbtftops.init_display != init_display) {
/* don't risk messing up register A0h */
@@ -80,10 +80,10 @@ static int set_var(struct fbtft_par *par)
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0xA0, remap | 0x00 | 1<<4);
+ write_reg(par, 0xA0, remap | 0x00 | 1 << 4);
break;
case 270:
- write_reg(par, 0xA0, remap | 0x03 | 1<<4);
+ write_reg(par, 0xA0, remap | 0x03 | 1 << 4);
break;
case 180:
write_reg(par, 0xA0, remap | 0x02);
@@ -189,8 +189,8 @@ static int update_onboard_backlight(struct backlight_device *bd)
"%s: power=%d, fb_blank=%d\n",
__func__, bd->props.power, bd->props.fb_blank);
- on = (bd->props.power == FB_BLANK_UNBLANK)
- && (bd->props.fb_blank == FB_BLANK_UNBLANK);
+ on = (bd->props.power == FB_BLANK_UNBLANK) &&
+ (bd->props.fb_blank == FB_BLANK_UNBLANK);
/* Onboard backlight connected to GPIO0 on SSD1351, GPIO1 unused */
write_reg(par, 0xB5, on ? 0x03 : 0x02);
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
index c5e51fe1aad5..6670f2bb62ec 100644
--- a/drivers/staging/fbtft/fb_st7735r.c
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -33,35 +33,43 @@ static int default_init_sequence[] = {
-2, 500, /* delay */
/* FRMCTR1 - frame rate control: normal mode
- frame rate = fosc / (1 x 2 + 40) * (LINE + 2C + 2D) */
+ * frame rate = fosc / (1 x 2 + 40) * (LINE + 2C + 2D)
+ */
-1, 0xB1, 0x01, 0x2C, 0x2D,
/* FRMCTR2 - frame rate control: idle mode
- frame rate = fosc / (1 x 2 + 40) * (LINE + 2C + 2D) */
+ * frame rate = fosc / (1 x 2 + 40) * (LINE + 2C + 2D)
+ */
-1, 0xB2, 0x01, 0x2C, 0x2D,
/* FRMCTR3 - frame rate control - partial mode
- dot inversion mode, line inversion mode */
+ * dot inversion mode, line inversion mode
+ */
-1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
/* INVCTR - display inversion control
- no inversion */
+ * no inversion
+ */
-1, 0xB4, 0x07,
/* PWCTR1 - Power Control
- -4.6V, AUTO mode */
+ * -4.6V, AUTO mode
+ */
-1, 0xC0, 0xA2, 0x02, 0x84,
/* PWCTR2 - Power Control
- VGH25 = 2.4C VGSEL = -10 VGH = 3 * AVDD */
+ * VGH25 = 2.4C VGSEL = -10 VGH = 3 * AVDD
+ */
-1, 0xC1, 0xC5,
/* PWCTR3 - Power Control
- Opamp current small, Boost frequency */
+ * Opamp current small, Boost frequency
+ */
-1, 0xC2, 0x0A, 0x00,
/* PWCTR4 - Power Control
- BCLK/2, Opamp current small & Medium low */
+ * BCLK/2, Opamp current small & Medium low
+ */
-1, 0xC3, 0x8A, 0x2A,
/* PWCTR5 - Power Control */
@@ -101,11 +109,12 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int set_var(struct fbtft_par *par)
{
/* MADCTL - Memory data access control
- RGB/BGR:
- 1. Mode selection pin SRGB
- RGB H/W pin for color filter setting: 0=RGB, 1=BGR
- 2. MADCTL RGB bit
- RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */
+ * RGB/BGR:
+ * 1. Mode selection pin SRGB
+ * RGB H/W pin for color filter setting: 0=RGB, 1=BGR
+ * 2. MADCTL RGB bit
+ * RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR
+ */
switch (par->info->var.rotate) {
case 0:
write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
@@ -129,10 +138,10 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- VRF0P VOS0P PK0P PK1P PK2P PK3P PK4P PK5P PK6P PK7P PK8P PK9P SELV0P SELV1P SELV62P SELV63P
- VRF0N VOS0N PK0N PK1N PK2N PK3N PK4N PK5N PK6N PK7N PK8N PK9N SELV0N SELV1N SELV62N SELV63N
-*/
+ * Gamma string format:
+ * VRF0P VOS0P PK0P PK1P PK2P PK3P PK4P PK5P PK6P PK7P PK8P PK9P SELV0P SELV1P SELV62P SELV63P
+ * VRF0N VOS0N PK0N PK1N PK2N PK3N PK4N PK5N PK6N PK7N PK8N PK9N SELV0N SELV1N SELV62N SELV63N
+ */
#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
static int set_gamma(struct fbtft_par *par, unsigned long *curves)
{
diff --git a/drivers/staging/fbtft/fb_tls8204.c b/drivers/staging/fbtft/fb_tls8204.c
index 2183f98c8315..ea2ddacb9468 100644
--- a/drivers/staging/fbtft/fb_tls8204.c
+++ b/drivers/staging/fbtft/fb_tls8204.c
@@ -35,7 +35,7 @@
/* gamma is used to control contrast in this driver */
#define DEFAULT_GAMMA "40"
-static unsigned bs = 4;
+static unsigned int bs = 4;
module_param(bs, uint, 0);
MODULE_PARM_DESC(bs, "BS[2:0] Bias voltage level: 0-7 (default: 4)");
@@ -44,21 +44,21 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
/* Enter extended command mode */
- write_reg(par, 0x21); /* 5:1 1
- 2:0 PD - Powerdown control: chip is active
- 1:0 V - Entry mode: horizontal addressing
- 0:1 H - Extended instruction set control:
- extended
- */
+ write_reg(par, 0x21); /* 5:1 1
+ * 2:0 PD - Powerdown control: chip is active
+ * 1:0 V - Entry mode: horizontal addressing
+ * 0:1 H - Extended instruction set control:
+ * extended
+ */
/* H=1 Bias system */
- write_reg(par, 0x10 | (bs & 0x7)); /*
- 4:1 1
- 3:0 0
- 2:x BS2 - Bias System
- 1:x BS1
- 0:x BS0
- */
+ write_reg(par, 0x10 | (bs & 0x7));
+ /* 4:1 1
+ * 3:0 0
+ * 2:x BS2 - Bias System
+ * 1:x BS1
+ * 0:x BS0
+ */
/* Set the address of the first display line. */
write_reg(par, 0x04 | (64 >> 6));
@@ -68,12 +68,12 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x20);
/* H=0 Display control */
- write_reg(par, 0x08 | 4); /*
- 3:1 1
- 2:1 D - DE: 10=normal mode
- 1:0 0
- 0:0 E
- */
+ write_reg(par, 0x08 | 4);
+ /* 3:1 1
+ * 2:1 D - DE: 10=normal mode
+ * 1:0 0
+ * 0:0 E
+ */
return 0;
}
@@ -81,15 +81,15 @@ static int init_display(struct fbtft_par *par)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
/* H=0 Set X address of RAM */
- write_reg(par, 0x80); /* 7:1 1
- 6-0: X[6:0] - 0x00
- */
+ write_reg(par, 0x80); /* 7:1 1
+ * 6-0: X[6:0] - 0x00
+ */
/* H=0 Set Y address of RAM */
- write_reg(par, 0x40); /* 7:0 0
- 6:1 1
- 2-0: Y[2:0] - 0x0
- */
+ write_reg(par, 0x40); /* 7:0 0
+ * 6:1 1
+ * 2-0: Y[2:0] - 0x0
+ */
}
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
@@ -100,8 +100,9 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
for (y = 0; y < HEIGHT / 8; y++) {
u8 *buf = par->txbuf.buf;
- /* The display is 102x68 but the LCD is 84x48. Set
- the write pointer at the start of each row. */
+ /* The display is 102x68 but the LCD is 84x48.
+ * Set the write pointer at the start of each row.
+ */
gpio_set_value(par->gpio.dc, 0);
write_reg(par, 0x80 | 0);
write_reg(par, 0x40 | y);
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index e87401aacfb3..b33b73f17da4 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -41,30 +41,30 @@
*/
/* BR -> actual ratio: 0-3 -> 5, 10, 11, 13 */
-static unsigned ratio = 2;
+static unsigned int ratio = 2;
module_param(ratio, uint, 0);
MODULE_PARM_DESC(ratio, "BR[1:0] Bias voltage ratio: 0-3 (default: 2)");
-static unsigned gain = 3;
+static unsigned int gain = 3;
module_param(gain, uint, 0);
MODULE_PARM_DESC(gain, "GN[1:0] Bias voltage gain: 0-3 (default: 3)");
-static unsigned pot = 16;
+static unsigned int pot = 16;
module_param(pot, uint, 0);
MODULE_PARM_DESC(pot, "PM[6:0] Bias voltage pot.: 0-63 (default: 16)");
/* TC -> % compensation per deg C: 0-3 -> -.05, -.10, -.015, -.20 */
-static unsigned temp;
+static unsigned int temp;
module_param(temp, uint, 0);
MODULE_PARM_DESC(temp, "TC[1:0] Temperature compensation: 0-3 (default: 0)");
/* PC[1:0] -> LCD capacitance: 0-3 -> <20nF, 20-28 nF, 29-40 nF, 40-56 nF */
-static unsigned load = 1;
+static unsigned int load = 1;
module_param(load, uint, 0);
MODULE_PARM_DESC(load, "PC[1:0] Panel Loading: 0-3 (default: 1)");
/* PC[3:2] -> V_LCD: 0, 1, 3 -> ext., int. with ratio = 5, int. standard */
-static unsigned pump = 3;
+static unsigned int pump = 3;
module_param(pump, uint, 0);
MODULE_PARM_DESC(pump, "PC[3:2] Pump control: 0,1,3 (default: 3)");
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index f8cb610a7b69..a52e28a48825 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -67,7 +67,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
- unsigned start_line, end_line;
+ unsigned int start_line, end_line;
u16 *vmem16 = (u16 *)(par->info->screen_buffer + offset);
u16 *pos = par->txbuf.buf + 1;
u16 *buf16 = par->txbuf.buf + 10;
@@ -104,7 +104,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
static int write_vmem_8bit(struct fbtft_par *par, size_t offset, size_t len)
{
- unsigned start_line, end_line;
+ unsigned int start_line, end_line;
u16 *vmem16 = (u16 *)(par->info->screen_buffer + offset);
u16 *pos = par->txbuf.buf + 1;
u8 *buf8 = par->txbuf.buf + 10;
@@ -137,7 +137,7 @@ static int write_vmem_8bit(struct fbtft_par *par, size_t offset, size_t len)
return 0;
}
-static unsigned firmware_version(struct fbtft_par *par)
+static unsigned int firmware_version(struct fbtft_par *par)
{
u8 rxbuf[4] = {0, };
@@ -152,7 +152,7 @@ static unsigned firmware_version(struct fbtft_par *par)
static int init_display(struct fbtft_par *par)
{
int ret;
- unsigned version;
+ unsigned int version;
u8 save_mode;
/* enable SPI interface by having CS and MOSI low during reset */
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index 83505bce628a..ec45043c0830 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -92,7 +92,8 @@ void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...)
if (par->spi && (par->spi->bits_per_word == 8)) {
/* we're emulating 9-bit, pad start of buffer with no-ops
- (assuming here that zero is a no-op) */
+ * (assuming here that zero is a no-op)
+ */
pad = (len % 4) ? 4 - (len % 4) : 0;
for (i = 0; i < pad; i++)
*buf++ = 0x000;
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 0c1a77cafe14..587f68aa466c 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -341,8 +341,8 @@ static void fbtft_reset(struct fbtft_par *par)
mdelay(120);
}
-static void fbtft_update_display(struct fbtft_par *par, unsigned start_line,
- unsigned end_line)
+static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
+ unsigned int end_line)
{
size_t offset, len;
ktime_t ts_start, ts_end;
@@ -391,11 +391,11 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned start_line,
if (unlikely(timeit)) {
ts_end = ktime_get();
- if (ktime_to_ns(par->update_time))
+ if (!ktime_to_ns(par->update_time))
par->update_time = ts_start;
- par->update_time = ts_start;
fps = ktime_us_delta(ts_start, par->update_time);
+ par->update_time = ts_start;
fps = fps ? 1000000 / fps : 0;
throughput = ktime_us_delta(ts_end, ts_start);
@@ -435,10 +435,10 @@ static void fbtft_mkdirty(struct fb_info *info, int y, int height)
static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagelist)
{
struct fbtft_par *par = info->par;
- unsigned dirty_lines_start, dirty_lines_end;
+ unsigned int dirty_lines_start, dirty_lines_end;
struct page *page;
unsigned long index;
- unsigned y_low = 0, y_high = 0;
+ unsigned int y_low = 0, y_high = 0;
int count = 0;
spin_lock(&par->dirty_lock);
@@ -526,18 +526,18 @@ static ssize_t fbtft_fb_write(struct fb_info *info, const char __user *buf,
}
/* from pxafb.c */
-static unsigned int chan_to_field(unsigned chan, struct fb_bitfield *bf)
+static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf)
{
chan &= 0xffff;
chan >>= 16 - bf->length;
return chan << bf->offset;
}
-static int fbtft_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
+static int fbtft_fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
+ unsigned int blue, unsigned int transp,
struct fb_info *info)
{
- unsigned val;
+ unsigned int val;
int ret = 1;
dev_dbg(info->dev,
@@ -654,11 +654,11 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
u8 *vmem = NULL;
void *txbuf = NULL;
void *buf = NULL;
- unsigned width;
- unsigned height;
+ unsigned int width;
+ unsigned int height;
int txbuflen = display->txbuflen;
- unsigned bpp = display->bpp;
- unsigned fps = display->fps;
+ unsigned int bpp = display->bpp;
+ unsigned int fps = display->fps;
int vmem_size, i;
int *init_sequence = display->init_sequence;
char *gamma = display->gamma;
@@ -820,6 +820,8 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
/* Transmit buffer */
if (txbuflen == -1)
txbuflen = vmem_size + 2; /* add in case startbyte is used */
+ if (txbuflen >= vmem_size + 2)
+ txbuflen = 0;
#ifdef __LITTLE_ENDIAN
if ((!txbuflen) && (bpp > 8))
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index d3bc3943a983..89c4b5b76ce6 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -38,7 +38,7 @@
*/
struct fbtft_gpio {
char name[FBTFT_GPIO_NAME_SIZE];
- unsigned gpio;
+ unsigned int gpio;
};
struct fbtft_par;
@@ -79,7 +79,7 @@ struct fbtft_ops {
void (*reset)(struct fbtft_par *par);
void (*mkdirty)(struct fb_info *info, int from, int to);
void (*update_display)(struct fbtft_par *par,
- unsigned start_line, unsigned end_line);
+ unsigned int start_line, unsigned int end_line);
int (*init_display)(struct fbtft_par *par);
int (*blank)(struct fbtft_par *par, bool on);
@@ -115,14 +115,14 @@ struct fbtft_ops {
* This structure is not stored by FBTFT except for init_sequence.
*/
struct fbtft_display {
- unsigned width;
- unsigned height;
- unsigned regwidth;
- unsigned buswidth;
- unsigned backlight;
+ unsigned int width;
+ unsigned int height;
+ unsigned int regwidth;
+ unsigned int buswidth;
+ unsigned int backlight;
struct fbtft_ops fbtftops;
- unsigned bpp;
- unsigned fps;
+ unsigned int bpp;
+ unsigned int fps;
int txbuflen;
int *init_sequence;
char *gamma;
@@ -146,9 +146,9 @@ struct fbtft_display {
struct fbtft_platform_data {
struct fbtft_display display;
const struct fbtft_gpio *gpios;
- unsigned rotate;
+ unsigned int rotate;
bool bgr;
- unsigned fps;
+ unsigned int fps;
int txbuflen;
u8 startbyte;
char *gamma;
@@ -216,8 +216,8 @@ struct fbtft_par {
u8 startbyte;
struct fbtft_ops fbtftops;
spinlock_t dirty_lock;
- unsigned dirty_lines_start;
- unsigned dirty_lines_end;
+ unsigned int dirty_lines_start;
+ unsigned int dirty_lines_end;
struct {
int reset;
int dc;
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index e4a355aefb25..e9211831b6a1 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -32,20 +32,20 @@ static char *name;
module_param(name, charp, 0);
MODULE_PARM_DESC(name, "Devicename (required). name=list => list all supported devices.");
-static unsigned rotate;
+static unsigned int rotate;
module_param(rotate, uint, 0);
MODULE_PARM_DESC(rotate,
"Angle to rotate display counter clockwise: 0, 90, 180, 270");
-static unsigned busnum;
+static unsigned int busnum;
module_param(busnum, uint, 0);
MODULE_PARM_DESC(busnum, "SPI bus number (default=0)");
-static unsigned cs;
+static unsigned int cs;
module_param(cs, uint, 0);
MODULE_PARM_DESC(cs, "SPI chip select (default=0)");
-static unsigned speed;
+static unsigned int speed;
module_param(speed, uint, 0);
MODULE_PARM_DESC(speed, "SPI speed (override device default)");
@@ -58,7 +58,7 @@ module_param(gpios, charp, 0);
MODULE_PARM_DESC(gpios,
"List of gpios. Comma separated with the form: reset:23,dc:24 (when overriding the default, all gpios must be specified)");
-static unsigned fps;
+static unsigned int fps;
module_param(fps, uint, 0);
MODULE_PARM_DESC(fps, "Frames per second (override driver default)");
@@ -76,7 +76,7 @@ module_param(bgr, int, 0);
MODULE_PARM_DESC(bgr,
"BGR bit (supported by some drivers).");
-static unsigned startbyte;
+static unsigned int startbyte;
module_param(startbyte, uint, 0);
MODULE_PARM_DESC(startbyte, "Sets the Start byte used by some SPI displays.");
@@ -84,15 +84,15 @@ static bool custom;
module_param(custom, bool, 0);
MODULE_PARM_DESC(custom, "Add a custom display device. Use speed= argument to make it a SPI device, else platform_device");
-static unsigned width;
+static unsigned int width;
module_param(width, uint, 0);
MODULE_PARM_DESC(width, "Display width, used with the custom argument");
-static unsigned height;
+static unsigned int height;
module_param(height, uint, 0);
MODULE_PARM_DESC(height, "Display height, used with the custom argument");
-static unsigned buswidth = 8;
+static unsigned int buswidth = 8;
module_param(buswidth, uint, 0);
MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument");
@@ -106,7 +106,7 @@ module_param(debug, ulong, 0);
MODULE_PARM_DESC(debug,
"level: 0-7 (the remaining 29 bits is for advanced usage)");
-static unsigned verbose = 3;
+static unsigned int verbose = 3;
module_param(verbose, uint, 0);
MODULE_PARM_DESC(verbose,
"0 silent, >0 show gpios, >1 show devices, >2 show devices before (default=3)");
@@ -1215,7 +1215,8 @@ static struct fbtft_device_display displays[] = {
}
}, {
/* This should be the last item.
- Used with the custom argument */
+ * Used with the custom argument
+ */
.name = "",
.spi = &(struct spi_board_info) {
.modalias = "",
@@ -1306,8 +1307,9 @@ static struct fbtft_gpio fbtft_device_param_gpios[MAX_GPIOS + 1] = { };
static void fbtft_device_pdev_release(struct device *dev)
{
/* Needed to silence this message:
-Device 'xxx' does not have a release() function, it is broken and must be fixed
-*/
+ * Device 'xxx' does not have a release() function,
+ * it is broken and must be fixed
+ */
}
static int spi_device_found(struct device *dev, void *data)
@@ -1346,7 +1348,7 @@ static void pr_p_devices(void)
}
#ifdef MODULE
-static void fbtft_device_spi_delete(struct spi_master *master, unsigned cs)
+static void fbtft_device_spi_delete(struct spi_master *master, unsigned int cs)
{
struct device *dev;
char str[32];
@@ -1399,7 +1401,7 @@ static int __init fbtft_device_init(void)
long val;
int ret = 0;
- if (name == NULL) {
+ if (!name) {
#ifdef MODULE
pr_err("missing module parameter: 'name'\n");
return -EINVAL;
@@ -1416,14 +1418,14 @@ static int __init fbtft_device_init(void)
/* parse module parameter: gpios */
while ((p_gpio = strsep(&gpios, ","))) {
- if (strchr(p_gpio, ':') == NULL) {
+ if (!strchr(p_gpio, ':')) {
pr_err("error: missing ':' in gpios parameter: %s\n",
p_gpio);
return -EINVAL;
}
p_num = p_gpio;
p_name = strsep(&p_num, ":");
- if (p_name == NULL || p_num == NULL) {
+ if (!p_name || !p_num) {
pr_err("something bad happened parsing gpios parameter: %s\n",
p_gpio);
return -EINVAL;
diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile
index e7315170b7a3..38716fd5cb58 100644
--- a/drivers/staging/fsl-mc/bus/Makefile
+++ b/drivers/staging/fsl-mc/bus/Makefile
@@ -7,13 +7,14 @@
#
obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
-mc-bus-driver-objs := mc-bus.o \
+mc-bus-driver-objs := fsl-mc-bus.o \
mc-sys.o \
+ mc-io.o \
dprc.o \
dpmng.o \
dprc-driver.o \
- mc-allocator.o \
- mc-msi.o \
+ fsl-mc-allocator.o \
+ fsl-mc-msi.o \
irq-gic-v3-its-fsl-mc-msi.o \
dpmcp.o \
dpbp.o
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index fe271fbd629b..5d4cd812a400 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -1,34 +1,34 @@
/* Copyright 2013-2016 Freescale Semiconductor Inc.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in the
-* documentation and/or other materials provided with the distribution.
-* * Neither the name of the above-listed copyright holders nor the
-* names of any contributors may be used to endorse or promote products
-* derived from this software without specific prior written permission.
-*
-*
-* ALTERNATIVELY, this software may be distributed under the terms of the
-* GNU General Public License ("GPL") as published by the Free Software
-* Foundation, either version 2 of that License or (at your option) any
-* later version.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
#include "../include/mc-sys.h"
#include "../include/mc-cmd.h"
#include "../include/dpbp.h"
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index 06440176243a..55766f78a528 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -31,6 +31,7 @@
*/
#include "../include/mc-sys.h"
#include "../include/mc-cmd.h"
+
#include "dpmcp.h"
#include "dpmcp-cmd.h"
@@ -368,7 +369,6 @@ int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
struct mc_command cmd = { 0 };
struct dpmcp_cmd_set_irq_mask *cmd_params;
-
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK,
cmd_flags, token);
diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
index 779bf9c25bc0..a7b77d58c8cd 100644
--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -30,12 +31,12 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-/*************************************************************************//*
- dpmng-cmd.h
-
- defines portal commands
-
- *//**************************************************************************/
+/*
+ * dpmng-cmd.h
+ *
+ * defines portal commands
+ *
+ */
#ifndef __FSL_DPMNG_CMD_H
#define __FSL_DPMNG_CMD_H
diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c
index 660bbe7ea899..96b1d67756fa 100644
--- a/drivers/staging/fsl-mc/bus/dpmng.c
+++ b/drivers/staging/fsl-mc/bus/dpmng.c
@@ -1,37 +1,38 @@
/* Copyright 2013-2016 Freescale Semiconductor Inc.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in the
-* documentation and/or other materials provided with the distribution.
-* * Neither the name of the above-listed copyright holders nor the
-* names of any contributors may be used to endorse or promote products
-* derived from this software without specific prior written permission.
-*
-*
-* ALTERNATIVELY, this software may be distributed under the terms of the
-* GNU General Public License ("GPL") as published by the Free Software
-* Foundation, either version 2 of that License or (at your option) any
-* later version.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
#include "../include/mc-sys.h"
#include "../include/mc-cmd.h"
#include "../include/dpmng.h"
+
#include "dpmng-cmd.h"
/**
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index bb127f4a3ae7..009d65673155 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -30,12 +31,12 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-/*************************************************************************//*
- dprc-cmd.h
-
- defines dprc portal commands
-
- *//**************************************************************************/
+/*
+ * dprc-cmd.h
+ *
+ * defines dprc portal commands
+ *
+ */
#ifndef _FSL_DPRC_CMD_H
#define _FSL_DPRC_CMD_H
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index d2a71f14bf72..c5ee4639682b 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -9,13 +9,21 @@
* warranty of any kind, whether express or implied.
*/
-#include "../include/mc-private.h"
-#include "../include/mc-sys.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/msi.h>
+#include "../include/mc-bus.h"
+#include "../include/mc-sys.h"
+
#include "dprc-cmd.h"
+#include "fsl-mc-private.h"
+
+#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
+
+#define FSL_MC_DEVICE_MATCH(_mc_dev, _obj_desc) \
+ (strcmp((_mc_dev)->obj_desc.type, (_obj_desc)->type) == 0 && \
+ (_mc_dev)->obj_desc.id == (_obj_desc)->id)
struct dprc_child_objs {
int child_count;
@@ -190,55 +198,6 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
}
}
-static void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-{
- int pool_type;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-
- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
- struct fsl_mc_resource_pool *res_pool =
- &mc_bus->resource_pools[pool_type];
-
- res_pool->type = pool_type;
- res_pool->max_count = 0;
- res_pool->free_count = 0;
- res_pool->mc_bus = mc_bus;
- INIT_LIST_HEAD(&res_pool->free_list);
- mutex_init(&res_pool->mutex);
- }
-}
-
-static void dprc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
- enum fsl_mc_pool_type pool_type)
-{
- struct fsl_mc_resource *resource;
- struct fsl_mc_resource *next;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
- struct fsl_mc_resource_pool *res_pool =
- &mc_bus->resource_pools[pool_type];
- int free_count = 0;
-
- WARN_ON(res_pool->type != pool_type);
- WARN_ON(res_pool->free_count != res_pool->max_count);
-
- list_for_each_entry_safe(resource, next, &res_pool->free_list, node) {
- free_count++;
- WARN_ON(resource->type != res_pool->type);
- WARN_ON(resource->parent_pool != res_pool);
- devm_kfree(&mc_bus_dev->dev, resource);
- }
-
- WARN_ON(free_count != res_pool->free_count);
-}
-
-static void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-{
- int pool_type;
-
- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
- dprc_cleanup_resource_pool(mc_bus_dev, pool_type);
-}
-
/**
* dprc_scan_objects - Discover objects in a DPRC
*
@@ -363,7 +322,7 @@ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
unsigned int irq_count;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
- dprc_init_all_resource_pools(mc_bus_dev);
+ fsl_mc_init_all_resource_pools(mc_bus_dev);
/*
* Discover objects in the DPRC:
@@ -390,7 +349,7 @@ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
return 0;
error:
- dprc_cleanup_all_resource_pools(mc_bus_dev);
+ fsl_mc_cleanup_all_resource_pools(mc_bus_dev);
return error;
}
EXPORT_SYMBOL_GPL(dprc_scan_container);
@@ -649,7 +608,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
/*
* This is a child DPRC:
*/
- if (WARN_ON(parent_dev->bus != &fsl_mc_bus_type))
+ if (WARN_ON(!dev_is_fsl_mc(parent_dev)))
return -EINVAL;
if (WARN_ON(mc_dev->obj_desc.region_count == 0))
@@ -681,7 +640,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
*/
struct irq_domain *mc_msi_domain;
- if (WARN_ON(parent_dev->bus == &fsl_mc_bus_type))
+ if (WARN_ON(dev_is_fsl_mc(parent_dev)))
return -EINVAL;
error = fsl_mc_find_msi_domain(parent_dev,
@@ -802,7 +761,7 @@ static int dprc_remove(struct fsl_mc_device *mc_dev)
dev_set_msi_domain(&mc_dev->dev, NULL);
}
- dprc_cleanup_all_resource_pools(mc_dev);
+ fsl_mc_cleanup_all_resource_pools(mc_dev);
error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
if (error < 0)
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index c26054981333..9fea3def6041 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -1,37 +1,38 @@
/* Copyright 2013-2016 Freescale Semiconductor Inc.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in the
-* documentation and/or other materials provided with the distribution.
-* * Neither the name of the above-listed copyright holders nor the
-* names of any contributors may be used to endorse or promote products
-* derived from this software without specific prior written permission.
-*
-*
-* ALTERNATIVELY, this software may be distributed under the terms of the
-* GNU General Public License ("GPL") as published by the Free Software
-* Foundation, either version 2 of that License or (at your option) any
-* later version.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
#include "../include/mc-sys.h"
#include "../include/mc-cmd.h"
#include "../include/dprc.h"
+
#include "dprc-cmd.h"
/**
@@ -1334,20 +1335,20 @@ int dprc_disconnect(struct fsl_mc_io *mc_io,
}
/**
-* dprc_get_connection() - Get connected endpoint and link status if connection
-* exists.
-* @mc_io: Pointer to MC portal's I/O object
-* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-* @token: Token of DPRC object
-* @endpoint1: Endpoint 1 configuration parameters
-* @endpoint2: Returned endpoint 2 configuration parameters
-* @state: Returned link state:
-* 1 - link is up;
-* 0 - link is down;
-* -1 - no connection (endpoint2 information is irrelevant)
-*
-* Return: '0' on Success; -ENAVAIL if connection does not exist.
-*/
+ * dprc_get_connection() - Get connected endpoint and link status if connection
+ * exists.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+ * @state: Returned link state:
+ * 1 - link is up;
+ * 0 - link is down;
+ * -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return: '0' on Success; -ENAVAIL if connection does not exist.
+ */
int dprc_get_connection(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
index e59d85060c7b..e93ab53bae67 100644
--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
@@ -8,14 +8,19 @@
* warranty of any kind, whether express or implied.
*/
-#include "../include/mc-private.h"
-#include "../include/mc-sys.h"
#include <linux/module.h>
+#include <linux/msi.h>
+#include "../include/mc-bus.h"
+#include "../include/mc-sys.h"
#include "../include/dpbp-cmd.h"
#include "../include/dpcon-cmd.h"
-#include "dpmcp-cmd.h"
-#include "dpmcp.h"
-#include <linux/msi.h>
+
+#include "fsl-mc-private.h"
+
+#define FSL_MC_IS_ALLOCATABLE(_obj_type) \
+ (strcmp(_obj_type, "dpbp") == 0 || \
+ strcmp(_obj_type, "dpmcp") == 0 || \
+ strcmp(_obj_type, "dpcon") == 0)
/**
* fsl_mc_resource_pool_add_device - add allocatable device to a resource
@@ -137,8 +142,7 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
goto out_unlock;
}
- list_del(&resource->node);
- INIT_LIST_HEAD(&resource->node);
+ list_del_init(&resource->node);
res_pool->free_count--;
res_pool->max_count--;
@@ -215,8 +219,7 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
res_pool->free_count > res_pool->max_count))
goto out_unlock;
- list_del(&resource->node);
- INIT_LIST_HEAD(&resource->node);
+ list_del_init(&resource->node);
res_pool->free_count--;
error = 0;
@@ -252,144 +255,6 @@ out_unlock:
EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
/**
- * fsl_mc_portal_allocate - Allocates an MC portal
- *
- * @mc_dev: MC device for which the MC portal is to be allocated
- * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
- * MC portal.
- * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
- * that wraps the allocated MC portal is to be returned
- *
- * This function allocates an MC portal from the device's parent DPRC,
- * from the corresponding MC bus' pool of MC portals and wraps
- * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
- * portal is allocated from its own MC bus.
- */
-int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
- u16 mc_io_flags,
- struct fsl_mc_io **new_mc_io)
-{
- struct fsl_mc_device *mc_bus_dev;
- struct fsl_mc_bus *mc_bus;
- phys_addr_t mc_portal_phys_addr;
- size_t mc_portal_size;
- struct fsl_mc_device *dpmcp_dev;
- int error = -EINVAL;
- struct fsl_mc_resource *resource = NULL;
- struct fsl_mc_io *mc_io = NULL;
-
- if (mc_dev->flags & FSL_MC_IS_DPRC) {
- mc_bus_dev = mc_dev;
- } else {
- if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type))
- return error;
-
- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
- }
-
- mc_bus = to_fsl_mc_bus(mc_bus_dev);
- *new_mc_io = NULL;
- error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
- if (error < 0)
- return error;
-
- error = -EINVAL;
- dpmcp_dev = resource->data;
- if (WARN_ON(!dpmcp_dev))
- goto error_cleanup_resource;
-
- if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
- (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
- dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
- dev_err(&dpmcp_dev->dev,
- "ERROR: Version %d.%d of DPMCP not supported.\n",
- dpmcp_dev->obj_desc.ver_major,
- dpmcp_dev->obj_desc.ver_minor);
- error = -ENOTSUPP;
- goto error_cleanup_resource;
- }
-
- if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0))
- goto error_cleanup_resource;
-
- mc_portal_phys_addr = dpmcp_dev->regions[0].start;
- mc_portal_size = dpmcp_dev->regions[0].end -
- dpmcp_dev->regions[0].start + 1;
-
- if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size))
- goto error_cleanup_resource;
-
- error = fsl_create_mc_io(&mc_bus_dev->dev,
- mc_portal_phys_addr,
- mc_portal_size, dpmcp_dev,
- mc_io_flags, &mc_io);
- if (error < 0)
- goto error_cleanup_resource;
-
- *new_mc_io = mc_io;
- return 0;
-
-error_cleanup_resource:
- fsl_mc_resource_free(resource);
- return error;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
-
-/**
- * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
- * of a given MC bus
- *
- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
- */
-void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
-{
- struct fsl_mc_device *dpmcp_dev;
- struct fsl_mc_resource *resource;
-
- /*
- * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed
- * to have a DPMCP object associated with.
- */
- dpmcp_dev = mc_io->dpmcp_dev;
- if (WARN_ON(!dpmcp_dev))
- return;
-
- resource = dpmcp_dev->resource;
- if (WARN_ON(!resource || resource->type != FSL_MC_POOL_DPMCP))
- return;
-
- if (WARN_ON(resource->data != dpmcp_dev))
- return;
-
- fsl_destroy_mc_io(mc_io);
- fsl_mc_resource_free(resource);
-}
-EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
-
-/**
- * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
- *
- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
- */
-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
-{
- int error;
- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
-
- if (WARN_ON(!dpmcp_dev))
- return -EINVAL;
-
- error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
- if (error < 0) {
- dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
- return error;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
-
-/**
* fsl_mc_object_allocate - Allocates a MC object device of the given
* pool type from a given MC bus
*
@@ -420,7 +285,7 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
if (WARN_ON(mc_dev->flags & FSL_MC_IS_DPRC))
goto error;
- if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type))
+ if (WARN_ON(!dev_is_fsl_mc(mc_dev->dev.parent)))
goto error;
if (WARN_ON(pool_type == FSL_MC_POOL_DPMCP))
@@ -663,6 +528,55 @@ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+{
+ int pool_type;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[pool_type];
+
+ res_pool->type = pool_type;
+ res_pool->max_count = 0;
+ res_pool->free_count = 0;
+ res_pool->mc_bus = mc_bus;
+ INIT_LIST_HEAD(&res_pool->free_list);
+ mutex_init(&res_pool->mutex);
+ }
+}
+
+static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
+ enum fsl_mc_pool_type pool_type)
+{
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_resource *next;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[pool_type];
+ int free_count = 0;
+
+ WARN_ON(res_pool->type != pool_type);
+ WARN_ON(res_pool->free_count != res_pool->max_count);
+
+ list_for_each_entry_safe(resource, next, &res_pool->free_list, node) {
+ free_count++;
+ WARN_ON(resource->type != res_pool->type);
+ WARN_ON(resource->parent_pool != res_pool);
+ devm_kfree(&mc_bus_dev->dev, resource);
+ }
+
+ WARN_ON(free_count != res_pool->free_count);
+}
+
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+{
+ int pool_type;
+
+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
+ fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
+}
+
/**
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
@@ -678,7 +592,7 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
return -EINVAL;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
- if (WARN_ON(mc_bus_dev->dev.bus != &fsl_mc_bus_type))
+ if (WARN_ON(!dev_is_fsl_mc(&mc_bus_dev->dev)))
return -EINVAL;
mc_bus = to_fsl_mc_bus(mc_bus_dev);
@@ -736,7 +650,6 @@ static const struct fsl_mc_device_id match_id_table[] = {
static struct fsl_mc_driver fsl_mc_allocator_driver = {
.driver = {
.name = "fsl_mc_allocator",
- .owner = THIS_MODULE,
.pm = NULL,
},
.match_id_table = match_id_table,
diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
index db3afdbdf4ae..44f64b6f0fc9 100644
--- a/drivers/staging/fsl-mc/bus/mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
@@ -9,7 +9,6 @@
* warranty of any kind, whether express or implied.
*/
-#include "../include/mc-private.h"
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -18,13 +17,50 @@
#include <linux/limits.h>
#include <linux/bitops.h>
#include <linux/msi.h>
+#include <linux/dma-mapping.h>
+#include "../include/mc-bus.h"
#include "../include/dpmng.h"
#include "../include/mc-sys.h"
+
+#include "fsl-mc-private.h"
#include "dprc-cmd.h"
static struct kmem_cache *mc_dev_cache;
/**
+ * Default DMA mask for devices on a fsl-mc bus
+ */
+#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
+
+/**
+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
+ * @root_mc_bus_dev: MC object device representing the root DPRC
+ * @num_translation_ranges: number of entries in addr_translation_ranges
+ * @translation_ranges: array of bus to system address translation ranges
+ */
+struct fsl_mc {
+ struct fsl_mc_device *root_mc_bus_dev;
+ u8 num_translation_ranges;
+ struct fsl_mc_addr_translation_range *translation_ranges;
+};
+
+/**
+ * struct fsl_mc_addr_translation_range - bus to system address translation
+ * range
+ * @mc_region_type: Type of MC region for the range being translated
+ * @start_mc_offset: Start MC offset of the range being translated
+ * @end_mc_offset: MC offset of the first byte after the range (last MC
+ * offset of the range is end_mc_offset - 1)
+ * @start_phys_addr: system physical address corresponding to start_mc_addr
+ */
+struct fsl_mc_addr_translation_range {
+ enum dprc_region_type mc_region_type;
+ u64 start_mc_offset;
+ u64 end_mc_offset;
+ phys_addr_t start_phys_addr;
+};
+
+/**
* fsl_mc_bus_match - device to driver matching callback
* @dev: the MC object device structure to match against
* @drv: the device driver to search for matching MC object device id
@@ -101,14 +137,7 @@ static struct attribute *fsl_mc_dev_attrs[] = {
NULL,
};
-static const struct attribute_group fsl_mc_dev_group = {
- .attrs = fsl_mc_dev_attrs,
-};
-
-static const struct attribute_group *fsl_mc_dev_groups[] = {
- &fsl_mc_dev_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(fsl_mc_dev);
struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
@@ -229,21 +258,22 @@ bool fsl_mc_bus_exists(void)
EXPORT_SYMBOL_GPL(fsl_mc_bus_exists);
/**
-* fsl_mc_get_root_dprc - function to traverse to the root dprc
-*/
-static void fsl_mc_get_root_dprc(struct device *dev,
- struct device **root_dprc_dev)
+ * fsl_mc_get_root_dprc - function to traverse to the root dprc
+ */
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev)
{
if (WARN_ON(!dev)) {
*root_dprc_dev = NULL;
- } else if (WARN_ON(dev->bus != &fsl_mc_bus_type)) {
+ } else if (WARN_ON(!dev_is_fsl_mc(dev))) {
*root_dprc_dev = NULL;
} else {
*root_dprc_dev = dev;
- while ((*root_dprc_dev)->parent->bus == &fsl_mc_bus_type)
+ while (dev_is_fsl_mc((*root_dprc_dev)->parent))
*root_dprc_dev = (*root_dprc_dev)->parent;
}
}
+EXPORT_SYMBOL_GPL(fsl_mc_get_root_dprc);
static int get_dprc_attr(struct fsl_mc_io *mc_io,
int container_id, struct dprc_attributes *attr)
@@ -434,7 +464,7 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
struct fsl_mc_bus *mc_bus = NULL;
struct fsl_mc_device *parent_mc_dev;
- if (parent_dev->bus == &fsl_mc_bus_type)
+ if (dev_is_fsl_mc(parent_dev))
parent_mc_dev = to_fsl_mc_device(parent_dev);
else
parent_mc_dev = NULL;
@@ -887,25 +917,4 @@ error_cleanup_cache:
kmem_cache_destroy(mc_dev_cache);
return error;
}
-
postcore_initcall(fsl_mc_bus_driver_init);
-
-static void __exit fsl_mc_bus_driver_exit(void)
-{
- if (WARN_ON(!mc_dev_cache))
- return;
-
- its_fsl_mc_msi_cleanup();
- fsl_mc_allocator_driver_exit();
- dprc_driver_exit();
- platform_driver_unregister(&fsl_mc_bus_driver);
- bus_unregister(&fsl_mc_bus_type);
- kmem_cache_destroy(mc_dev_cache);
- pr_info("MC bus unregistered\n");
-}
-
-module_exit(fsl_mc_bus_driver_exit);
-
-MODULE_AUTHOR("Freescale Semiconductor Inc.");
-MODULE_DESCRIPTION("Freescale Management Complex (MC) bus driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fsl-mc/bus/mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
index 4fd8e41ef468..3d46b1b1fa18 100644
--- a/drivers/staging/fsl-mc/bus/mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
@@ -9,7 +9,6 @@
* warranty of any kind, whether express or implied.
*/
-#include "../include/mc-private.h"
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/irqchip/arm-gic-v3.h>
@@ -17,8 +16,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
-#include "../include/mc-sys.h"
-#include "dprc-cmd.h"
+#include "../include/mc-bus.h"
/*
* Generate a unique ID identifying the interrupt (only used within the MSI
@@ -52,7 +50,7 @@ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
/*
* set_desc should not be set by the caller
*/
- if (ops->set_desc == NULL)
+ if (!ops->set_desc)
ops->set_desc = fsl_mc_msi_set_desc;
}
@@ -142,7 +140,7 @@ static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
/*
* irq_write_msi_msg should not be set by the caller
*/
- if (chip->irq_write_msi_msg == NULL)
+ if (!chip->irq_write_msi_msg)
chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
}
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-private.h b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
new file mode 100644
index 000000000000..d459c2673f39
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
@@ -0,0 +1,52 @@
+/*
+ * Freescale Management Complex (MC) bus private declarations
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#ifndef _FSL_MC_PRIVATE_H_
+#define _FSL_MC_PRIVATE_H_
+
+int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
+ struct fsl_mc_device **new_mc_dev);
+
+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
+
+int __init dprc_driver_init(void);
+
+void dprc_driver_exit(void);
+
+int __init fsl_mc_allocator_driver_init(void);
+
+void fsl_mc_allocator_driver_exit(void);
+
+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_resource
+ **new_resource);
+
+void fsl_mc_resource_free(struct fsl_mc_resource *resource);
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+ unsigned int irq_count);
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev);
+
+int __init its_fsl_mc_msi_init(void);
+
+void its_fsl_mc_msi_cleanup(void);
+
+int __must_check fsl_create_mc_io(struct device *dev,
+ phys_addr_t mc_portal_phys_addr,
+ u32 mc_portal_size,
+ struct fsl_mc_device *dpmcp_dev,
+ u32 flags, struct fsl_mc_io **new_mc_io);
+
+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
+
+#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index 720e2b018d00..7a6ac640752f 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -9,7 +9,6 @@
* warranty of any kind, whether express or implied.
*/
-#include "../include/mc-private.h"
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/irqchip/arm-gic-v3.h>
@@ -17,8 +16,7 @@
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include "../include/mc-sys.h"
-#include "dprc-cmd.h"
+#include "../include/mc-bus.h"
static struct irq_chip its_msi_irq_chip = {
.name = "fsl-mc-bus-msi",
@@ -35,7 +33,7 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
struct fsl_mc_device *mc_bus_dev;
struct msi_domain_info *msi_info;
- if (WARN_ON(dev->bus != &fsl_mc_bus_type))
+ if (WARN_ON(!dev_is_fsl_mc(dev)))
return -EINVAL;
mc_bus_dev = to_fsl_mc_device(dev);
diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c
new file mode 100644
index 000000000000..798c965fe203
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/mc-io.c
@@ -0,0 +1,320 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/io.h>
+#include "../include/mc-bus.h"
+#include "../include/mc-sys.h"
+
+#include "fsl-mc-private.h"
+#include "dpmcp.h"
+#include "dpmcp-cmd.h"
+
+static int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
+ struct fsl_mc_device *dpmcp_dev)
+{
+ int error;
+
+ if (WARN_ON(!dpmcp_dev))
+ return -EINVAL;
+
+ if (WARN_ON(mc_io->dpmcp_dev))
+ return -EINVAL;
+
+ if (WARN_ON(dpmcp_dev->mc_io))
+ return -EINVAL;
+
+ error = dpmcp_open(mc_io,
+ 0,
+ dpmcp_dev->obj_desc.id,
+ &dpmcp_dev->mc_handle);
+ if (error < 0)
+ return error;
+
+ mc_io->dpmcp_dev = dpmcp_dev;
+ dpmcp_dev->mc_io = mc_io;
+ return 0;
+}
+
+static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
+{
+ int error;
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ if (WARN_ON(!dpmcp_dev))
+ return;
+
+ if (WARN_ON(dpmcp_dev->mc_io != mc_io))
+ return;
+
+ error = dpmcp_close(mc_io,
+ 0,
+ dpmcp_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
+ error);
+ }
+
+ mc_io->dpmcp_dev = NULL;
+ dpmcp_dev->mc_io = NULL;
+}
+
+/**
+ * Creates an MC I/O object
+ *
+ * @dev: device to be associated with the MC I/O object
+ * @mc_portal_phys_addr: physical address of the MC portal to use
+ * @mc_portal_size: size in bytes of the MC portal
+ * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O
+ * object or NULL if none.
+ * @flags: flags for the new MC I/O object
+ * @new_mc_io: Area to return pointer to newly created MC I/O object
+ *
+ * Returns '0' on Success; Error code otherwise.
+ */
+int __must_check fsl_create_mc_io(struct device *dev,
+ phys_addr_t mc_portal_phys_addr,
+ u32 mc_portal_size,
+ struct fsl_mc_device *dpmcp_dev,
+ u32 flags, struct fsl_mc_io **new_mc_io)
+{
+ int error;
+ struct fsl_mc_io *mc_io;
+ void __iomem *mc_portal_virt_addr;
+ struct resource *res;
+
+ mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
+ if (!mc_io)
+ return -ENOMEM;
+
+ mc_io->dev = dev;
+ mc_io->flags = flags;
+ mc_io->portal_phys_addr = mc_portal_phys_addr;
+ mc_io->portal_size = mc_portal_size;
+ if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ spin_lock_init(&mc_io->spinlock);
+ else
+ mutex_init(&mc_io->mutex);
+
+ res = devm_request_mem_region(dev,
+ mc_portal_phys_addr,
+ mc_portal_size,
+ "mc_portal");
+ if (!res) {
+ dev_err(dev,
+ "devm_request_mem_region failed for MC portal %#llx\n",
+ mc_portal_phys_addr);
+ return -EBUSY;
+ }
+
+ mc_portal_virt_addr = devm_ioremap_nocache(dev,
+ mc_portal_phys_addr,
+ mc_portal_size);
+ if (!mc_portal_virt_addr) {
+ dev_err(dev,
+ "devm_ioremap_nocache failed for MC portal %#llx\n",
+ mc_portal_phys_addr);
+ return -ENXIO;
+ }
+
+ mc_io->portal_virt_addr = mc_portal_virt_addr;
+ if (dpmcp_dev) {
+ error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
+ if (error < 0)
+ goto error_destroy_mc_io;
+ }
+
+ *new_mc_io = mc_io;
+ return 0;
+
+error_destroy_mc_io:
+ fsl_destroy_mc_io(mc_io);
+ return error;
+}
+
+/**
+ * Destroys an MC I/O object
+ *
+ * @mc_io: MC I/O object to destroy
+ */
+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ if (dpmcp_dev)
+ fsl_mc_io_unset_dpmcp(mc_io);
+
+ devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
+ devm_release_mem_region(mc_io->dev,
+ mc_io->portal_phys_addr,
+ mc_io->portal_size);
+
+ mc_io->portal_virt_addr = NULL;
+ devm_kfree(mc_io->dev, mc_io);
+}
+
+/**
+ * fsl_mc_portal_allocate - Allocates an MC portal
+ *
+ * @mc_dev: MC device for which the MC portal is to be allocated
+ * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
+ * MC portal.
+ * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
+ * that wraps the allocated MC portal is to be returned
+ *
+ * This function allocates an MC portal from the device's parent DPRC,
+ * from the corresponding MC bus' pool of MC portals and wraps
+ * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
+ * portal is allocated from its own MC bus.
+ */
+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+ u16 mc_io_flags,
+ struct fsl_mc_io **new_mc_io)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ phys_addr_t mc_portal_phys_addr;
+ size_t mc_portal_size;
+ struct fsl_mc_device *dpmcp_dev;
+ int error = -EINVAL;
+ struct fsl_mc_resource *resource = NULL;
+ struct fsl_mc_io *mc_io = NULL;
+
+ if (mc_dev->flags & FSL_MC_IS_DPRC) {
+ mc_bus_dev = mc_dev;
+ } else {
+ if (WARN_ON(!dev_is_fsl_mc(mc_dev->dev.parent)))
+ return error;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ }
+
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ *new_mc_io = NULL;
+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
+ if (error < 0)
+ return error;
+
+ error = -EINVAL;
+ dpmcp_dev = resource->data;
+ if (WARN_ON(!dpmcp_dev))
+ goto error_cleanup_resource;
+
+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
+ dev_err(&dpmcp_dev->dev,
+ "ERROR: Version %d.%d of DPMCP not supported.\n",
+ dpmcp_dev->obj_desc.ver_major,
+ dpmcp_dev->obj_desc.ver_minor);
+ error = -ENOTSUPP;
+ goto error_cleanup_resource;
+ }
+
+ if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0))
+ goto error_cleanup_resource;
+
+ mc_portal_phys_addr = dpmcp_dev->regions[0].start;
+ mc_portal_size = dpmcp_dev->regions[0].end -
+ dpmcp_dev->regions[0].start + 1;
+
+ if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size))
+ goto error_cleanup_resource;
+
+ error = fsl_create_mc_io(&mc_bus_dev->dev,
+ mc_portal_phys_addr,
+ mc_portal_size, dpmcp_dev,
+ mc_io_flags, &mc_io);
+ if (error < 0)
+ goto error_cleanup_resource;
+
+ *new_mc_io = mc_io;
+ return 0;
+
+error_cleanup_resource:
+ fsl_mc_resource_free(resource);
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
+
+/**
+ * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
+ * of a given MC bus
+ *
+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+ */
+void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_device *dpmcp_dev;
+ struct fsl_mc_resource *resource;
+
+ /*
+ * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed
+ * to have a DPMCP object associated with.
+ */
+ dpmcp_dev = mc_io->dpmcp_dev;
+ if (WARN_ON(!dpmcp_dev))
+ return;
+
+ resource = dpmcp_dev->resource;
+ if (WARN_ON(!resource || resource->type != FSL_MC_POOL_DPMCP))
+ return;
+
+ if (WARN_ON(resource->data != dpmcp_dev))
+ return;
+
+ fsl_destroy_mc_io(mc_io);
+ fsl_mc_resource_free(resource);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
+
+/**
+ * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
+ *
+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+ */
+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
+{
+ int error;
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ if (WARN_ON(!dpmcp_dev))
+ return -EINVAL;
+
+ error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
index 0c185abe665e..285917c7c8e4 100644
--- a/drivers/staging/fsl-mc/bus/mc-sys.c
+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
@@ -32,13 +32,15 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
-#include "../include/mc.h"
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/device.h>
+#include <linux/io.h>
+#include "../include/mc-sys.h"
+#include "../include/mc-cmd.h"
+#include "../include/mc.h"
+
#include "dpmcp.h"
/**
@@ -68,153 +70,6 @@ static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
}
-/**
- * Creates an MC I/O object
- *
- * @dev: device to be associated with the MC I/O object
- * @mc_portal_phys_addr: physical address of the MC portal to use
- * @mc_portal_size: size in bytes of the MC portal
- * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O
- * object or NULL if none.
- * @flags: flags for the new MC I/O object
- * @new_mc_io: Area to return pointer to newly created MC I/O object
- *
- * Returns '0' on Success; Error code otherwise.
- */
-int __must_check fsl_create_mc_io(struct device *dev,
- phys_addr_t mc_portal_phys_addr,
- u32 mc_portal_size,
- struct fsl_mc_device *dpmcp_dev,
- u32 flags, struct fsl_mc_io **new_mc_io)
-{
- int error;
- struct fsl_mc_io *mc_io;
- void __iomem *mc_portal_virt_addr;
- struct resource *res;
-
- mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
- if (!mc_io)
- return -ENOMEM;
-
- mc_io->dev = dev;
- mc_io->flags = flags;
- mc_io->portal_phys_addr = mc_portal_phys_addr;
- mc_io->portal_size = mc_portal_size;
- if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_lock_init(&mc_io->spinlock);
- else
- mutex_init(&mc_io->mutex);
-
- res = devm_request_mem_region(dev,
- mc_portal_phys_addr,
- mc_portal_size,
- "mc_portal");
- if (!res) {
- dev_err(dev,
- "devm_request_mem_region failed for MC portal %#llx\n",
- mc_portal_phys_addr);
- return -EBUSY;
- }
-
- mc_portal_virt_addr = devm_ioremap_nocache(dev,
- mc_portal_phys_addr,
- mc_portal_size);
- if (!mc_portal_virt_addr) {
- dev_err(dev,
- "devm_ioremap_nocache failed for MC portal %#llx\n",
- mc_portal_phys_addr);
- return -ENXIO;
- }
-
- mc_io->portal_virt_addr = mc_portal_virt_addr;
- if (dpmcp_dev) {
- error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
- if (error < 0)
- goto error_destroy_mc_io;
- }
-
- *new_mc_io = mc_io;
- return 0;
-
-error_destroy_mc_io:
- fsl_destroy_mc_io(mc_io);
- return error;
-}
-EXPORT_SYMBOL_GPL(fsl_create_mc_io);
-
-/**
- * Destroys an MC I/O object
- *
- * @mc_io: MC I/O object to destroy
- */
-void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
-{
- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
-
- if (dpmcp_dev)
- fsl_mc_io_unset_dpmcp(mc_io);
-
- devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
- devm_release_mem_region(mc_io->dev,
- mc_io->portal_phys_addr,
- mc_io->portal_size);
-
- mc_io->portal_virt_addr = NULL;
- devm_kfree(mc_io->dev, mc_io);
-}
-EXPORT_SYMBOL_GPL(fsl_destroy_mc_io);
-
-int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
- struct fsl_mc_device *dpmcp_dev)
-{
- int error;
-
- if (WARN_ON(!dpmcp_dev))
- return -EINVAL;
-
- if (WARN_ON(mc_io->dpmcp_dev))
- return -EINVAL;
-
- if (WARN_ON(dpmcp_dev->mc_io))
- return -EINVAL;
-
- error = dpmcp_open(mc_io,
- 0,
- dpmcp_dev->obj_desc.id,
- &dpmcp_dev->mc_handle);
- if (error < 0)
- return error;
-
- mc_io->dpmcp_dev = dpmcp_dev;
- dpmcp_dev->mc_io = mc_io;
- return 0;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_io_set_dpmcp);
-
-void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
-{
- int error;
- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
-
- if (WARN_ON(!dpmcp_dev))
- return;
-
- if (WARN_ON(dpmcp_dev->mc_io != mc_io))
- return;
-
- error = dpmcp_close(mc_io,
- 0,
- dpmcp_dev->mc_handle);
- if (error < 0) {
- dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
- error);
- }
-
- mc_io->dpmcp_dev = NULL;
- dpmcp_dev->mc_io = NULL;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_io_unset_dpmcp);
-
static int mc_status_to_error(enum mc_cmd_status status)
{
static const int mc_status_to_error_map[] = {
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h
index 4828ccd0cffd..2860411ddb51 100644
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h
@@ -1,34 +1,34 @@
/* Copyright 2013-2016 Freescale Semiconductor Inc.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are met:
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in the
-* documentation and/or other materials provided with the distribution.
-* * Neither the name of the above-listed copyright holders nor the
-* names of any contributors may be used to endorse or promote products
-* derived from this software without specific prior written permission.
-*
-*
-* ALTERNATIVELY, this software may be distributed under the terms of the
-* GNU General Public License ("GPL") as published by the Free Software
-* Foundation, either version 2 of that License or (at your option) any
-* later version.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-*/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
#ifndef _FSL_DPBP_CMD_H
#define _FSL_DPBP_CMD_H
diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-bus.h
index cab1ae90f09e..170684a57ca2 100644
--- a/drivers/staging/fsl-mc/include/mc-private.h
+++ b/drivers/staging/fsl-mc/include/mc-bus.h
@@ -1,5 +1,5 @@
/*
- * Freescale Management Complex (MC) bus private declarations
+ * Freescale Management Complex (MC) bus declarations
*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
* Author: German Rivera <German.Rivera@freescale.com>
@@ -8,23 +8,11 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#ifndef _FSL_MC_PRIVATE_H_
-#define _FSL_MC_PRIVATE_H_
+#ifndef _FSL_MC_MCBUS_H_
+#define _FSL_MC_MCBUS_H_
#include "../include/mc.h"
#include <linux/mutex.h>
-#include <linux/stringify.h>
-
-#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
-
-#define FSL_MC_DEVICE_MATCH(_mc_dev, _obj_desc) \
- (strcmp((_mc_dev)->obj_desc.type, (_obj_desc)->type) == 0 && \
- (_mc_dev)->obj_desc.id == (_obj_desc)->id)
-
-#define FSL_MC_IS_ALLOCATABLE(_obj_type) \
- (strcmp(_obj_type, "dpbp") == 0 || \
- strcmp(_obj_type, "dpmcp") == 0 || \
- strcmp(_obj_type, "dpcon") == 0)
struct irq_domain;
struct msi_domain_info;
@@ -35,37 +23,12 @@ struct msi_domain_info;
*/
#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
-struct device_node;
-struct irq_domain;
-struct msi_domain_info;
-
-/**
- * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
- * @root_mc_bus_dev: MC object device representing the root DPRC
- * @num_translation_ranges: number of entries in addr_translation_ranges
- * @translation_ranges: array of bus to system address translation ranges
- */
-struct fsl_mc {
- struct fsl_mc_device *root_mc_bus_dev;
- u8 num_translation_ranges;
- struct fsl_mc_addr_translation_range *translation_ranges;
-};
-
-/**
- * struct fsl_mc_addr_translation_range - bus to system address translation
- * range
- * @mc_region_type: Type of MC region for the range being translated
- * @start_mc_offset: Start MC offset of the range being translated
- * @end_mc_offset: MC offset of the first byte after the range (last MC
- * offset of the range is end_mc_offset - 1)
- * @start_phys_addr: system physical address corresponding to start_mc_addr
- */
-struct fsl_mc_addr_translation_range {
- enum dprc_region_type mc_region_type;
- u64 start_mc_offset;
- u64 end_mc_offset;
- phys_addr_t start_phys_addr;
-};
+#ifdef CONFIG_FSL_MC_BUS
+#define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type)
+#else
+/* If fsl-mc bus is not present device cannot belong to fsl-mc bus */
+#define dev_is_fsl_mc(_dev) (0)
+#endif
/**
* struct fsl_mc_resource_pool - Pool of MC resources of a given
@@ -107,13 +70,6 @@ struct fsl_mc_bus {
#define to_fsl_mc_bus(_mc_dev) \
container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
-int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
- struct fsl_mc_io *mc_io,
- struct device *parent_dev,
- struct fsl_mc_device **new_mc_dev);
-
-void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
-
int dprc_scan_container(struct fsl_mc_device *mc_bus_dev);
int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
@@ -127,13 +83,6 @@ int __init fsl_mc_allocator_driver_init(void);
void fsl_mc_allocator_driver_exit(void);
-int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
- enum fsl_mc_pool_type pool_type,
- struct fsl_mc_resource
- **new_resource);
-
-void fsl_mc_resource_free(struct fsl_mc_resource *resource);
-
struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
@@ -141,18 +90,22 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
struct irq_domain **mc_msi_domain);
-int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
- unsigned int irq_count);
+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+ unsigned int irq_count);
+
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
-void fsl_mc_msi_domain_free_irqs(struct device *dev);
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-int __init its_fsl_mc_msi_init(void);
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-void its_fsl_mc_msi_cleanup(void);
+bool fsl_mc_bus_exists(void);
-int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
- unsigned int irq_count);
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev);
-void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
+bool fsl_mc_is_root_dprc(struct device *dev);
+
+extern struct bus_type fsl_mc_bus_type;
-#endif /* _FSL_MC_PRIVATE_H_ */
+#endif /* _FSL_MC_MCBUS_H_ */
diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h
index c5038cc77240..89ad0cf54702 100644
--- a/drivers/staging/fsl-mc/include/mc-sys.h
+++ b/drivers/staging/fsl-mc/include/mc-sys.h
@@ -37,8 +37,6 @@
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
@@ -95,19 +93,6 @@ struct fsl_mc_io {
};
};
-int __must_check fsl_create_mc_io(struct device *dev,
- phys_addr_t mc_portal_phys_addr,
- u32 mc_portal_size,
- struct fsl_mc_device *dpmcp_dev,
- u32 flags, struct fsl_mc_io **new_mc_io);
-
-void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
-
-int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
- struct fsl_mc_device *dpmcp_dev);
-
-void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io);
-
int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
#endif /* _FSL_MC_SYS_H */
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
index 853cbf38a400..f6e720e84460 100644
--- a/drivers/staging/fsl-mc/include/mc.h
+++ b/drivers/staging/fsl-mc/include/mc.h
@@ -13,7 +13,6 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
-#include <linux/list.h>
#include <linux/interrupt.h>
#include "../include/dprc.h"
@@ -21,7 +20,6 @@
struct fsl_mc_device;
struct fsl_mc_io;
-struct fsl_mc_bus;
/**
* struct fsl_mc_driver - MC object device driver object
@@ -112,11 +110,6 @@ struct fsl_mc_device_irq {
#define FSL_MC_IS_DPRC 0x0001
/**
- * Default DMA mask for devices on a fsl-mc bus
- */
-#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
-
-/**
* struct fsl_mc_device - MC object device object
* @dev: Linux driver model device object
* @dma_mask: Default DMA mask
@@ -187,8 +180,6 @@ int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
void fsl_mc_driver_unregister(struct fsl_mc_driver *driver);
-bool fsl_mc_bus_exists(void);
-
int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
u16 mc_io_flags,
struct fsl_mc_io **new_mc_io);
@@ -207,8 +198,4 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
-bool fsl_mc_is_root_dprc(struct device *dev);
-
-extern struct bus_type fsl_mc_bus_type;
-
#endif /* _FSL_MC_H_ */
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index c241c0ae3f20..49c718b91e55 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -39,9 +39,9 @@ static int num_ttys = 4; /* # of std ttys to create per fw_card */
static bool auto_connect = true; /* try to VIRT_CABLE to every peer */
static bool create_loop_dev = true; /* create a loopback device for each card */
-module_param_named(ttys, num_ttys, int, S_IRUGO | S_IWUSR);
-module_param_named(auto, auto_connect, bool, S_IRUGO | S_IWUSR);
-module_param_named(loop, create_loop_dev, bool, S_IRUGO | S_IWUSR);
+module_param_named(ttys, num_ttys, int, 0644);
+module_param_named(auto, auto_connect, bool, 0644);
+module_param_named(loop, create_loop_dev, bool, 0644);
/*
* Threshold below which the tty is woken for writing
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index bb552193e4ba..e72dfa9699f3 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -828,7 +828,7 @@ void start_rx_proc(struct phy_dev *phy_dev)
rx_complete, phy_dev, USB_COMPLETE);
}
-static struct net_device_ops gdm_netdev_ops = {
+static const struct net_device_ops gdm_netdev_ops = {
.ndo_open = gdm_lte_open,
.ndo_stop = gdm_lte_close,
.ndo_set_config = gdm_lte_set_config,
diff --git a/drivers/staging/gdm724x/gdm_mux.h b/drivers/staging/gdm724x/gdm_mux.h
index 3d50383c6ced..0871b8feec55 100644
--- a/drivers/staging/gdm724x/gdm_mux.h
+++ b/drivers/staging/gdm724x/gdm_mux.h
@@ -27,8 +27,8 @@
#define START_FLAG 0xA512485A
#define MUX_HEADER_SIZE 14
-#define MUX_TX_MAX_SIZE (1024*10)
-#define MUX_RX_MAX_SIZE (1024*30)
+#define MUX_TX_MAX_SIZE (1024 * 10)
+#define MUX_RX_MAX_SIZE (1024 * 30)
#define AT_PKT_TYPE 0xF011
#define DM_PKT_TYPE 0xF010
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
index eb7e2523c354..ae396638f897 100644
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -225,7 +225,6 @@ int register_lte_tty_device(struct tty_dev *tty_dev, struct device *device)
int j;
for (i = 0; i < TTY_MAX_COUNT; i++) {
-
gdm = kmalloc(sizeof(*gdm), GFP_KERNEL);
if (!gdm)
return -ENOMEM;
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index d650d772095b..15a7e81ec2d2 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -415,10 +415,10 @@ static void do_rx(struct work_struct *work)
switch (cmd_evt) {
case LTE_GET_INFORMATION_RESULT:
if (set_mac_address(hci->data, r->cb_data) == 0) {
- ret = r->callback(r->cb_data,
- r->buf,
- r->urb->actual_length,
- KERNEL_THREAD);
+ r->callback(r->cb_data,
+ r->buf,
+ r->urb->actual_length,
+ KERNEL_THREAD);
}
break;
diff --git a/drivers/staging/gdm724x/gdm_usb.h b/drivers/staging/gdm724x/gdm_usb.h
index e6486e71a428..ffb3d995097d 100644
--- a/drivers/staging/gdm724x/gdm_usb.h
+++ b/drivers/staging/gdm724x/gdm_usb.h
@@ -26,10 +26,10 @@
#define PM_SUSPEND 1
#define AUTO_SUSPEND_TIMER 5000 /* ms */
-#define RX_BUF_SIZE (1024*32)
-#define TX_BUF_SIZE (1024*32)
+#define RX_BUF_SIZE (1024 * 32)
+#define TX_BUF_SIZE (1024 * 32)
#define SDU_BUF_SIZE 2048
-#define MAX_SDU_SIZE (1024*30)
+#define MAX_SDU_SIZE (1024 * 30)
#define MAX_PACKET_IN_MULTI_SDU 256
#define VID_GCT 0x1076
diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h
index dbc4446cf78d..4644f84038c9 100644
--- a/drivers/staging/gdm724x/hci_packet.h
+++ b/drivers/staging/gdm724x/hci_packet.h
@@ -89,5 +89,4 @@ struct hci_connect_ind {
u32 connect;
} __packed;
-
#endif /* _HCI_PACKET_H_ */
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
index a0232e8aec10..abe242505882 100644
--- a/drivers/staging/gdm724x/netlink_k.c
+++ b/drivers/staging/gdm724x/netlink_k.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/export.h>
+#include <linux/mutex.h>
#include <linux/etherdevice.h>
#include <linux/netlink.h>
#include <asm/byteorder.h>
@@ -21,13 +22,7 @@
#include "netlink_k.h"
-#if defined(DEFINE_MUTEX)
static DEFINE_MUTEX(netlink_mutex);
-#else
-static struct semaphore netlink_mutex;
-#define mutex_lock(x) down(x)
-#define mutex_unlock(x) up(x)
-#endif
#define ND_MAX_GROUP 30
#define ND_IFINDEX_LEN sizeof(int)
@@ -96,10 +91,6 @@ struct sock *netlink_init(int unit,
.input = netlink_rcv,
};
-#if !defined(DEFINE_MUTEX)
- init_MUTEX(&netlink_mutex);
-#endif
-
sock = netlink_kernel_create(&init_net, unit, &cfg);
if (sock)
diff --git a/drivers/staging/greybus/Documentation/firmware/authenticate.c b/drivers/staging/greybus/Documentation/firmware/authenticate.c
new file mode 100644
index 000000000000..ab0688ad1e37
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/firmware/authenticate.c
@@ -0,0 +1,139 @@
+/*
+ * Sample code to test CAP protocol
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "../../greybus_authentication.h"
+
+struct cap_ioc_get_endpoint_uid uid;
+struct cap_ioc_get_ims_certificate cert = {
+ .certificate_class = 0,
+ .certificate_id = 0,
+};
+
+struct cap_ioc_authenticate authenticate = {
+ .auth_type = 0,
+ .challenge = {0},
+};
+
+int main(int argc, char *argv[])
+{
+ unsigned int timeout = 10000;
+ char *capdev;
+ int fd, ret;
+
+ /* Make sure arguments are correct */
+ if (argc != 2) {
+ printf("\nUsage: ./firmware <Path of the gb-cap-X dev>\n");
+ return 0;
+ }
+
+ capdev = argv[1];
+
+ printf("Opening %s authentication device\n", capdev);
+
+ fd = open(capdev, O_RDWR);
+ if (fd < 0) {
+ printf("Failed to open: %s\n", capdev);
+ return -1;
+ }
+
+ /* Get UID */
+ printf("Get UID\n");
+
+ ret = ioctl(fd, CAP_IOC_GET_ENDPOINT_UID, &uid);
+ if (ret < 0) {
+ printf("Failed to get UID: %s (%d)\n", capdev, ret);
+ ret = -1;
+ goto close_fd;
+ }
+
+ printf("UID received: 0x%llx\n", *(long long unsigned int *)(uid.uid));
+
+ /* Get certificate */
+ printf("Get IMS certificate\n");
+
+ ret = ioctl(fd, CAP_IOC_GET_IMS_CERTIFICATE, &cert);
+ if (ret < 0) {
+ printf("Failed to get IMS certificate: %s (%d)\n", capdev, ret);
+ ret = -1;
+ goto close_fd;
+ }
+
+ printf("IMS Certificate size: %d\n", cert.cert_size);
+
+ /* Authenticate */
+ printf("Authenticate module\n");
+
+ memcpy(authenticate.uid, uid.uid, 8);
+
+ ret = ioctl(fd, CAP_IOC_AUTHENTICATE, &authenticate);
+ if (ret < 0) {
+ printf("Failed to authenticate module: %s (%d)\n", capdev, ret);
+ ret = -1;
+ goto close_fd;
+ }
+
+ printf("Authenticated, result (%02x), sig-size (%02x)\n",
+ authenticate.result_code, authenticate.signature_size);
+
+close_fd:
+ close(fd);
+
+ return ret;
+}
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware-management b/drivers/staging/greybus/Documentation/firmware/firmware-management
new file mode 100644
index 000000000000..7918257e5b3b
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/firmware/firmware-management
@@ -0,0 +1,333 @@
+
+Firmware Management
+-------------------
+ Copyright 2016 Google Inc.
+ Copyright 2016 Linaro Ltd.
+
+Interface-Manifest
+------------------
+
+All firmware packages on the Modules or Interfaces are managed by a special
+Firmware Management Protocol. To support Firmware Management by the AP, the
+Interface Manifest shall at least contain the Firmware Management Bundle and a
+Firmware Management Protocol CPort within it.
+
+The bundle may contain additional CPorts based on the extra functionality
+required to manage firmware packages.
+
+For example, this is how the Firmware Management part of the Interface Manifest
+may look like:
+
+ ; Firmware Management Bundle (Bundle 1):
+ [bundle-descriptor 1]
+ class = 0x16
+
+ ; (Mandatory) Firmware Management Protocol on CPort 1
+ [cport-descriptor 2]
+ bundle = 1
+ protocol = 0x18
+
+ ; (Optional) Firmware Download Protocol on CPort 2
+ [cport-descriptor 1]
+ bundle = 1
+ protocol = 0x17
+
+ ; (Optional) SPI protocol on CPort 3
+ [cport-descriptor 3]
+ bundle = 1
+ protocol = 0x0b
+
+ ; (Optional) Component Authentication Protocol (CAP) on CPort 4
+ [cport-descriptor 4]
+ bundle = 1
+ protocol = 0x19
+
+
+Sysfs Interfaces - Firmware Management
+--------------------------------------
+
+The Firmware Management Protocol interacts with Userspace using the character
+device interface. The character device will be present in /dev/ directory
+and will be named gb-fw-mgmt-<N>. The number <N> is assigned at runtime.
+
+Identifying the Character Device
+================================
+
+There can be multiple devices present in /dev/ directory with name gb-fw-mgmt-N
+and user first needs to identify the character device used for
+firmware-management for a particular interface.
+
+The Firmware Management core creates a device of class 'gb_fw_mgmt', which shall
+be used by the user to identify the right character device for it. The class
+device is created within the Bundle directory for a particular Interface.
+
+For example this is how the class-device can be present:
+
+/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_fw_mgmt/gb-fw-mgmt-0
+
+The last name in this path: gb-fw-mgmt-0 is precisely the name of the char
+device and so the device in this case will be:
+
+/dev/gb-fw-mgmt-0.
+
+Operations on the Char device
+=============================
+
+The Character device (gb-fw-mgmt-0 in example) can be opened by the userspace
+application and it can perform various 'ioctl' operations on the device. The
+device doesn't support any read/write operations.
+
+Following are the IOCTLs and their data structures available to the user:
+
+/* IOCTL support */
+#define GB_FW_LOAD_METHOD_UNIPRO 0x01
+#define GB_FW_LOAD_METHOD_INTERNAL 0x02
+
+#define GB_FW_LOAD_STATUS_FAILED 0x00
+#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01
+#define GB_FW_LOAD_STATUS_VALIDATED 0x02
+#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03
+
+#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03
+#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04
+#define GB_FW_BACKEND_FW_STATUS_INT 0x05
+#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06
+#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
+
+#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
+#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04
+#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05
+
+
+struct fw_mgmt_ioc_get_intf_version {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u16 major;
+ __u16 minor;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_get_backend_version {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u16 major;
+ __u16 minor;
+ __u8 status;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_intf_load_and_validate {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+ __u8 load_method;
+ __u8 status;
+ __u16 major;
+ __u16 minor;
+} __packed;
+
+struct fw_mgmt_ioc_backend_fw_update {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+ __u8 status;
+} __packed;
+
+#define FW_MGMT_IOCTL_BASE 'S'
+#define FW_MGMT_IOC_GET_INTF_FW _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
+#define FW_MGMT_IOC_GET_BACKEND_FW _IOWR(FW_MGMT_IOCTL_BASE, 1, struct fw_mgmt_ioc_get_backend_version)
+#define FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE _IOWR(FW_MGMT_IOCTL_BASE, 2, struct fw_mgmt_ioc_intf_load_and_validate)
+#define FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE _IOWR(FW_MGMT_IOCTL_BASE, 3, struct fw_mgmt_ioc_backend_fw_update)
+#define FW_MGMT_IOC_SET_TIMEOUT_MS _IOW(FW_MGMT_IOCTL_BASE, 4, unsigned int)
+#define FW_MGMT_IOC_MODE_SWITCH _IO(FW_MGMT_IOCTL_BASE, 5)
+
+1. FW_MGMT_IOC_GET_INTF_FW:
+
+ This ioctl shall be used by the user to get the version and firmware-tag of
+ the currently running Interface Firmware. All the fields of the 'struct
+ fw_mgmt_ioc_get_fw' are filled by the kernel.
+
+2. FW_MGMT_IOC_GET_BACKEND_FW:
+
+ This ioctl shall be used by the user to get the version of a currently
+ running Backend Interface Firmware identified by a firmware-tag. The user is
+ required to fill the 'firmware_tag' field of the 'struct fw_mgmt_ioc_get_fw'
+ in this case. The 'major' and 'minor' fields are set by the kernel in
+ response.
+
+3. FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
+
+ This ioctl shall be used by the user to load an Interface Firmware package on
+ an Interface. The user needs to fill the 'firmware_tag' and 'load_method'
+ fields of the 'struct fw_mgmt_ioc_intf_load_and_validate'. The 'status',
+ 'major' and 'minor' fields are set by the kernel in response.
+
+4. FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
+
+ This ioctl shall be used by the user to request an Interface to update a
+ Backend Interface Firmware. The user is required to fill the 'firmware_tag'
+ field of the 'struct fw_mgmt_ioc_get_fw' in this case. The 'status' field is
+ set by the kernel in response.
+
+5. FW_MGMT_IOC_SET_TIMEOUT_MS:
+
+ This ioctl shall be used by the user to increase the timeout interval within
+ which the firmware must get loaded by the Module. The default timeout is 1
+ second. The user needs to pass the timeout in milliseconds.
+
+6. FW_MGMT_IOC_MODE_SWITCH:
+
+ This ioctl shall be used by the user to mode-switch the module to the
+ previously loaded interface firmware. If the interface firmware isn't loaded
+ previously, or if another unsuccessful FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE
+ operation is started after loading interface firmware, then the firmware core
+ wouldn't allow mode-switch.
+
+
+Sysfs Interfaces - Authentication
+---------------------------------
+
+The Component Authentication Protocol interacts with Userspace using the
+character device interface. The character device will be present in /dev/
+directory and will be named gb-authenticate-<N>. The number <N> is assigned at
+runtime.
+
+Identifying the Character Device
+================================
+
+There can be multiple devices present in /dev/ directory with name
+gb-authenticate-N and user first needs to identify the character device used for
+authentication a of particular interface.
+
+The Authentication core creates a device of class 'gb_authenticate', which shall
+be used by the user to identify the right character device for it. The class
+device is created within the Bundle directory for a particular Interface.
+
+For example this is how the class-device can be present:
+
+/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_authenticate/gb-authenticate-0
+
+The last name in this path: gb-authenticate-0 is precisely the name of the char
+device and so the device in this case will be:
+
+/dev/gb-authenticate-0.
+
+Operations on the Char device
+=============================
+
+The Character device (/dev/gb-authenticate-0 in above example) can be opened by
+the userspace application and it can perform various 'ioctl' operations on the
+device. The device doesn't support any read/write operations.
+
+Following are the IOCTLs and their data structures available to the user:
+
+#define CAP_CERTIFICATE_MAX_SIZE 1600
+#define CAP_SIGNATURE_MAX_SIZE 320
+
+/* Certificate class types */
+#define CAP_CERT_IMS_EAPC 0x00000001
+#define CAP_CERT_IMS_EASC 0x00000002
+#define CAP_CERT_IMS_EARC 0x00000003
+#define CAP_CERT_IMS_IAPC 0x00000004
+#define CAP_CERT_IMS_IASC 0x00000005
+#define CAP_CERT_IMS_IARC 0x00000006
+
+/* IMS Certificate response result codes */
+#define CAP_IMS_RESULT_CERT_FOUND 0x00
+#define CAP_IMS_RESULT_CERT_CLASS_INVAL 0x01
+#define CAP_IMS_RESULT_CERT_CORRUPT 0x02
+#define CAP_IMS_RESULT_CERT_NOT_FOUND 0x03
+
+/* Authentication types */
+#define CAP_AUTH_IMS_PRI 0x00000001
+#define CAP_AUTH_IMS_SEC 0x00000002
+#define CAP_AUTH_IMS_RSA 0x00000003
+
+/* Authenticate response result codes */
+#define CAP_AUTH_RESULT_CR_SUCCESS 0x00
+#define CAP_AUTH_RESULT_CR_BAD_TYPE 0x01
+#define CAP_AUTH_RESULT_CR_WRONG_EP 0x02
+#define CAP_AUTH_RESULT_CR_NO_KEY 0x03
+#define CAP_AUTH_RESULT_CR_SIG_FAIL 0x04
+
+
+/* IOCTL support */
+struct cap_ioc_get_endpoint_uid {
+ __u8 uid[8];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_get_ims_certificate {
+ __u32 certificate_class;
+ __u32 certificate_id;
+
+ __u8 result_code;
+ __u32 cert_size;
+ __u8 certificate[CAP_CERTIFICATE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_authenticate {
+ __u32 auth_type;
+ __u8 uid[8];
+ __u8 challenge[32];
+
+ __u8 result_code;
+ __u8 response[64];
+ __u32 signature_size;
+ __u8 signature[CAP_SIGNATURE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+#define CAP_IOCTL_BASE 'C'
+#define CAP_IOC_GET_ENDPOINT_UID _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
+#define CAP_IOC_GET_IMS_CERTIFICATE _IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
+#define CAP_IOC_AUTHENTICATE _IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
+
+
+1. CAP_IOC_GET_ENDPOINT_UID:
+
+ This ioctl shall be used by the user to get the endpoint UID associated with
+ the Interface. All the fields of the 'struct cap_ioc_get_endpoint_uid' are
+ filled by the kernel.
+
+2. CAP_IOC_GET_IMS_CERTIFICATE:
+
+ This ioctl shall be used by the user to retrieve one of the available
+ cryptographic certificates held by the Interface for use in Component
+ Authentication. The user is required to fill the 'certificate_class' and
+ 'certificate_id' field of the 'struct cap_ioc_get_ims_certificate' in this
+ case. The other fields will be set by the kernel in response. The first
+ 'cert_size' bytes of the 'certificate' shall be read by the user and others
+ must be discarded.
+
+3. CAP_IOC_AUTHENTICATE:
+
+ This ioctl shall be used by the user to authenticate the Module attached to
+ an Interface. The user needs to fill the 'auth_type', 'uid', and 'challenge'
+ fields of the 'struct cap_ioc_authenticate'. The other fields will be set by
+ the kernel in response. The first 'signature_size' bytes of the 'signature'
+ shall be read by the user and others must be discarded.
+
+
+Sysfs Interfaces - Firmware Download
+------------------------------------
+
+The Firmware Download Protocol uses the existing Linux Kernel's Firmware class
+and the interface provided to userspace are described in:
+Documentation/firmware_class/.
+
+
+Sysfs Interfaces - SPI Flash
+----------------------------
+
+The SPI flash is exposed in userspace as a MTD device and is created
+within the Bundle directory. For example, this is how the path may look like:
+
+$ ls /sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/spi_master/spi32766/spi32766.0/mtd
+mtd0 mtd0ro
+
+
+Sample Applications
+-------------------
+
+The current directory also provides a firmware.c test application, which can be
+referenced while developing userspace application to talk to firmware-management
+protocol.
+
+The current directory also provides a authenticate.c test application, which can
+be referenced while developing userspace application to talk to
+component authentication protocol.
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware.c b/drivers/staging/greybus/Documentation/firmware/firmware.c
new file mode 100644
index 000000000000..ff9382401030
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/firmware/firmware.c
@@ -0,0 +1,262 @@
+/*
+ * Sample code to test firmware-management protocol
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "../../greybus_firmware.h"
+
+#define FW_DEV_DEFAULT "/dev/gb-fw-mgmt-0"
+#define FW_TAG_INT_DEFAULT "s3f"
+#define FW_TAG_BCND_DEFAULT "bf_01"
+#define FW_UPDATE_TYPE_DEFAULT 0
+#define FW_TIMEOUT_DEFAULT 10000;
+
+static const char *firmware_tag;
+static const char *fwdev = FW_DEV_DEFAULT;
+static int fw_update_type = FW_UPDATE_TYPE_DEFAULT;
+static int fw_timeout = FW_TIMEOUT_DEFAULT;
+
+static struct fw_mgmt_ioc_get_intf_version intf_fw_info;
+static struct fw_mgmt_ioc_get_backend_version backend_fw_info;
+static struct fw_mgmt_ioc_intf_load_and_validate intf_load;
+static struct fw_mgmt_ioc_backend_fw_update backend_update;
+
+static void usage(void)
+{
+ printf("\nUsage: ./firmware <gb-fw-mgmt-X (default: gb-fw-mgmt-0)> <interface: 0, backend: 1 (default: 0)> <firmware-tag> (default: \"s3f\"/\"bf_01\") <timeout (default: 10000 ms)>\n");
+}
+
+static int update_intf_firmware(int fd)
+{
+ int ret;
+
+ /* Get Interface Firmware Version */
+ printf("Get Interface Firmware Version\n");
+
+ ret = ioctl(fd, FW_MGMT_IOC_GET_INTF_FW, &intf_fw_info);
+ if (ret < 0) {
+ printf("Failed to get interface firmware version: %s (%d)\n",
+ fwdev, ret);
+ return -1;
+ }
+
+ printf("Interface Firmware tag (%s), major (%d), minor (%d)\n",
+ intf_fw_info.firmware_tag, intf_fw_info.major,
+ intf_fw_info.minor);
+
+ /* Try Interface Firmware load over Unipro */
+ printf("Loading Interface Firmware\n");
+
+ intf_load.load_method = GB_FW_U_LOAD_METHOD_UNIPRO;
+ intf_load.status = 0;
+ intf_load.major = 0;
+ intf_load.minor = 0;
+
+ strncpy((char *)&intf_load.firmware_tag, firmware_tag,
+ GB_FIRMWARE_U_TAG_MAX_SIZE);
+
+ ret = ioctl(fd, FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE, &intf_load);
+ if (ret < 0) {
+ printf("Failed to load interface firmware: %s (%d)\n", fwdev,
+ ret);
+ return -1;
+ }
+
+ if (intf_load.status != GB_FW_U_LOAD_STATUS_VALIDATED &&
+ intf_load.status != GB_FW_U_LOAD_STATUS_UNVALIDATED) {
+ printf("Load status says loading failed: %d\n",
+ intf_load.status);
+ return -1;
+ }
+
+ printf("Interface Firmware (%s) Load done: major: %d, minor: %d, status: %d\n",
+ firmware_tag, intf_load.major, intf_load.minor,
+ intf_load.status);
+
+ /* Initiate Mode-switch to the newly loaded firmware */
+ printf("Initiate Mode switch\n");
+
+ ret = ioctl(fd, FW_MGMT_IOC_MODE_SWITCH);
+ if (ret < 0)
+ printf("Failed to initiate mode-switch (%d)\n", ret);
+
+ return ret;
+}
+
+static int update_backend_firmware(int fd)
+{
+ int ret;
+
+ /* Get Backend Firmware Version */
+ printf("Getting Backend Firmware Version\n");
+
+ strncpy((char *)&backend_fw_info.firmware_tag, firmware_tag,
+ GB_FIRMWARE_U_TAG_MAX_SIZE);
+
+retry_fw_version:
+ ret = ioctl(fd, FW_MGMT_IOC_GET_BACKEND_FW, &backend_fw_info);
+ if (ret < 0) {
+ printf("Failed to get backend firmware version: %s (%d)\n",
+ fwdev, ret);
+ return -1;
+ }
+
+ printf("Backend Firmware tag (%s), major (%d), minor (%d), status (%d)\n",
+ backend_fw_info.firmware_tag, backend_fw_info.major,
+ backend_fw_info.minor, backend_fw_info.status);
+
+ if (backend_fw_info.status == GB_FW_U_BACKEND_VERSION_STATUS_RETRY)
+ goto retry_fw_version;
+
+ if ((backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_SUCCESS)
+ && (backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_NOT_AVAILABLE)) {
+ printf("Failed to get backend firmware version: %s (%d)\n",
+ fwdev, backend_fw_info.status);
+ return -1;
+ }
+
+ /* Try Backend Firmware Update over Unipro */
+ printf("Updating Backend Firmware\n");
+
+ strncpy((char *)&backend_update.firmware_tag, firmware_tag,
+ GB_FIRMWARE_U_TAG_MAX_SIZE);
+
+retry_fw_update:
+ backend_update.status = 0;
+
+ ret = ioctl(fd, FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE, &backend_update);
+ if (ret < 0) {
+ printf("Failed to load backend firmware: %s (%d)\n", fwdev, ret);
+ return -1;
+ }
+
+ if (backend_update.status == GB_FW_U_BACKEND_FW_STATUS_RETRY) {
+ printf("Retrying firmware update: %d\n", backend_update.status);
+ goto retry_fw_update;
+ }
+
+ if (backend_update.status != GB_FW_U_BACKEND_FW_STATUS_SUCCESS) {
+ printf("Load status says loading failed: %d\n",
+ backend_update.status);
+ } else {
+ printf("Backend Firmware (%s) Load done: status: %d\n",
+ firmware_tag, backend_update.status);
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int fd, ret;
+
+ if (argc > 1 &&
+ (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help"))) {
+ usage();
+ return -1;
+ }
+
+ if (argc > 1)
+ fwdev = argv[1];
+
+ if (argc > 2)
+ sscanf(argv[2], "%u", &fw_update_type);
+
+ if (argc > 3) {
+ firmware_tag = argv[3];
+ } else if (!fw_update_type) {
+ firmware_tag = FW_TAG_INT_DEFAULT;
+ } else {
+ firmware_tag = FW_TAG_BCND_DEFAULT;
+ }
+
+ if (argc > 4)
+ sscanf(argv[4], "%u", &fw_timeout);
+
+ printf("Trying Firmware update: fwdev: %s, type: %s, tag: %s, timeout: %d\n",
+ fwdev, fw_update_type == 0 ? "interface" : "backend",
+ firmware_tag, fw_timeout);
+
+ printf("Opening %s firmware management device\n", fwdev);
+
+ fd = open(fwdev, O_RDWR);
+ if (fd < 0) {
+ printf("Failed to open: %s\n", fwdev);
+ return -1;
+ }
+
+ /* Set Timeout */
+ printf("Setting timeout to %u ms\n", fw_timeout);
+
+ ret = ioctl(fd, FW_MGMT_IOC_SET_TIMEOUT_MS, &fw_timeout);
+ if (ret < 0) {
+ printf("Failed to set timeout: %s (%d)\n", fwdev, ret);
+ ret = -1;
+ goto close_fd;
+ }
+
+ if (!fw_update_type)
+ ret = update_intf_firmware(fd);
+ else
+ ret = update_backend_firmware(fd);
+
+close_fd:
+ close(fd);
+
+ return ret;
+}
diff --git a/drivers/staging/greybus/Documentation/sysfs-bus-greybus b/drivers/staging/greybus/Documentation/sysfs-bus-greybus
new file mode 100644
index 000000000000..2e998966cbe1
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/sysfs-bus-greybus
@@ -0,0 +1,275 @@
+What: /sys/bus/greybus/devices/greybusN
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The "root" greybus device for the Greybus device tree, or bus,
+ where N is a dynamically assigned 1-based id.
+
+What: /sys/bus/greybus/devices/greybusN/bus_id
+Date: April 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The ID of the "root" greybus device, or bus.
+
+What: /sys/bus/greybus/devices/N-M
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ A Module M on the bus N, where M is the 1-byte interface
+ ID of the module's primary interface.
+
+What: /sys/bus/greybus/devices/N-M/eject
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Writing a non-zero argument to this attibute disables the
+ module's interfaces before physically ejecting it.
+
+What: /sys/bus/greybus/devices/N-M/module_id
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The ID of a Greybus module, corresponding to the ID of its
+ primary interface.
+
+What: /sys/bus/greybus/devices/N-M/num_interfaces
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The number of interfaces of a module.
+
+What: /sys/bus/greybus/devices/N-M.I
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ An Interface I on the bus N and module N-M, where I is the
+ 1-byte interface ID.
+
+What: /sys/bus/greybus/devices/N-M.I/current_now
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Current measurement of the interface in microamps (uA)
+
+What: /sys/bus/greybus/devices/N-M.I/ddbl1_manufacturer_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Unipro Device Descriptor Block Level 1 manufacturer ID for the
+ greybus Interface.
+
+What: /sys/bus/greybus/devices/N-M.I/ddbl1_product_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Unipro Device Descriptor Block Level 1 product ID for the
+ greybus Interface.
+
+What: /sys/bus/greybus/devices/N-M.I/interface_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The ID of a Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I/interface_type
+Date: June 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The type of a Greybus interface; "dummy", "unipro", "greybus",
+ or "unknown".
+
+What: /sys/bus/greybus/devices/N-M.I/power_now
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Power measurement of the interface in microwatts (uW)
+
+What: /sys/bus/greybus/devices/N-M.I/power_state
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ This file reflects the power state of a Greybus interface. If
+ the value read from it is "on", then power is currently
+ supplied to the interface. Otherwise it will read "off" and
+ power is currently not supplied to the interface.
+
+ If the value read is "off", then writing "on" (or '1', 'y',
+ 'Y') to this file will enable power to the interface and an
+ attempt to boot and possibly enumerate it will be made. Note
+ that on errors, the interface will again be powered down.
+
+ If the value read is "on", then writing "off" (or '0', 'n',
+ 'N') to this file will power down the interface.
+
+What: /sys/bus/greybus/devices/N-M.I/product_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Product ID of a Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I/serial_number
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Serial Number of the Greybus interface, represented by a 64 bit
+ hexadecimal number.
+
+What: /sys/bus/greybus/devices/N-M.I/vendor_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Vendor ID of a Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I/voltage_now
+Date: March 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Voltage measurement of the interface in microvolts (uV)
+
+What: /sys/bus/greybus/devices/N-M.I.ctrl
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Abstract control device for interface I that represents the
+ current mode of an enumerated Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I.ctrl/product_string
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Product ID string of a Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I.ctrl/vendor_string
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Vendor ID string of a Greybus interface.
+
+What: /sys/bus/greybus/devices/N-M.I.B
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ A bundle B on the Interface I, B is replaced by a 1-byte
+ number representing the bundle.
+
+What: /sys/bus/greybus/devices/N-M.I.B/bundle_class
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The greybus class of the bundle B.
+
+What: /sys/bus/greybus/devices/N-M.I.B/bundle_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The interface-unique id of the bundle B.
+
+What: /sys/bus/greybus/devices/N-M.I.B/gpbX
+Date: April 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The General Purpose Bridged PHY device of the bundle B,
+ where X is a dynamically assigned 0-based id.
+
+What: /sys/bus/greybus/devices/N-M.I.B/state
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ A bundle has a state that is managed by the userspace
+ Endo process. This file allows that Endo to signal
+ other Android HALs that the state of the bundle has
+ changed to a specific value. When written to, any
+ process watching the file will be woken up, and the new
+ value can be read. It's a "poor-man's IPC", yes, but
+ simplifies the Android userspace code immensely.
+
+What: /sys/bus/greybus/devices/N-svc
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The singleton SVC device of bus N.
+
+What: /sys/bus/greybus/devices/N-svc/ap_intf_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The AP interface ID, a 1-byte non-zero integer which
+ defines the position of the AP module on the frame.
+ The interface positions are defined in the GMP
+ Module Developer Kit.
+
+What: /sys/bus/greybus/devices/N-svc/endo_id
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The Endo ID, which is a 2-byte hexadecimal value
+ defined by the Endo layout scheme, documented in
+ the GMP Module Developer Kit.
+
+What: /sys/bus/greybus/devices/N-svc/intf_eject
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ Write the number of the interface that you wish to
+ forcibly eject from the system.
+
+What: /sys/bus/greybus/devices/N-svc/version
+Date: October 2015
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ The version number of the firmware in the SVC device.
+
+What: /sys/bus/greybus/devices/N-svc/watchdog
+Date: October 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ If the SVC watchdog is enabled or not. Writing 0 to this
+ file will disable the watchdog, writing 1 will enable it.
+
+What: /sys/bus/greybus/devices/N-svc/watchdog_action
+Date: July 2016
+KernelVersion: 4.XX
+Contact: Greg Kroah-Hartman <greg@kroah.com>
+Description:
+ This attribute indicates the action to be performed upon SVC
+ watchdog bite.
+
+ The action can be one of the "reset" or "panic". Writing either
+ one of the "reset" or "panic" will change the behavior of SVC
+ watchdog bite. Default value is "reset".
+
+ "reset" means the UniPro subsystem is to be reset.
+
+ "panic" means SVC watchdog bite will cause kernel to panic.
diff --git a/drivers/staging/greybus/Kconfig b/drivers/staging/greybus/Kconfig
new file mode 100644
index 000000000000..50de2d72dde0
--- /dev/null
+++ b/drivers/staging/greybus/Kconfig
@@ -0,0 +1,219 @@
+menuconfig GREYBUS
+ tristate "Greybus support"
+ depends on SYSFS
+ ---help---
+ This option enables the Greybus driver core. Greybus is an
+ hardware protocol that was designed to provide Unipro with a
+ sane application layer. It was originally designed for the
+ ARA project, a module phone system, but has shown up in other
+ phones, and can be tunneled over other busses in order to
+ control hardware devices.
+
+ Say Y here to enable support for these types of drivers.
+
+ To compile this code as a module, chose M here: the module
+ will be called greybus.ko
+
+if GREYBUS
+
+config GREYBUS_ES2
+ tristate "Greybus ES3 USB host controller"
+ depends on USB
+ ---help---
+ Select this option if you have a Toshiba ES3 USB device that
+ acts as a Greybus "host controller". This device is a bridge
+ from a USB device to a Unipro network.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-es2.ko
+
+config GREYBUS_AUDIO
+ tristate "Greybus Audio Class driver"
+ depends on SOUND
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus Audio Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-audio.ko
+
+config GREYBUS_BOOTROM
+ tristate "Greybus Bootrom Class driver"
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus Bootrom Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-bootrom.ko
+
+config GREYBUS_CAMERA
+ tristate "Greybus Camera Class driver"
+ depends on MEDIA_SUPPORT && LEDS_CLASS_FLASH && BROKEN
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus Camera Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-camera.ko
+
+config GREYBUS_FIRMWARE
+ tristate "Greybus Firmware Download Class driver"
+ depends on SPI
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus Firmware Download Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-firmware.ko
+
+config GREYBUS_HID
+ tristate "Greybus HID Class driver"
+ depends on HID && INPUT
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus HID Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-hid.ko
+
+config GREYBUS_LIGHT
+ tristate "Greybus LED Class driver"
+ depends on LEDS_CLASS
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus LED Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-light.ko
+
+config GREYBUS_LOG
+ tristate "Greybus Debug Log Class driver"
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus Debug Log Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-log.ko
+
+config GREYBUS_LOOPBACK
+ tristate "Greybus Loopback Class driver"
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus Debug Log Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-log.ko
+
+config GREYBUS_POWER
+ tristate "Greybus Powersupply Class driver"
+ depends on POWER_SUPPLY
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus Powersupply Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-power-supply.ko
+
+config GREYBUS_RAW
+ tristate "Greybus Raw Class driver"
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus Raw Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-raw.ko
+
+config GREYBUS_VIBRATOR
+ tristate "Greybus Vibrator Motor Class driver"
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus Vibrator Motor Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-vibrator.ko
+
+menuconfig GREYBUS_BRIDGED_PHY
+ tristate "Greybus Bridged PHY Class drivers"
+ ---help---
+ Select this option to pick from a variety of Greybus Bridged
+ PHY class drivers. These drivers emulate a number of
+ different "traditional" busses by tunneling them over Greybus.
+ Examples of this include serial, SPI, USB, and others.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-phy.ko
+
+if GREYBUS_BRIDGED_PHY
+
+config GREYBUS_GPIO
+ tristate "Greybus GPIO Bridged PHY driver"
+ depends on GPIOLIB
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus GPIO Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-gpio.ko
+
+config GREYBUS_I2C
+ tristate "Greybus I2C Bridged PHY driver"
+ depends on I2C
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus I2C Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-i2c.ko
+
+config GREYBUS_PWM
+ tristate "Greybus PWM Bridged PHY driver"
+ depends on PWM
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus PWM Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-pwm.ko
+
+config GREYBUS_SDIO
+ tristate "Greybus SDIO Bridged PHY driver"
+ depends on MMC
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus SDIO Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-sdio.ko
+
+config GREYBUS_SPI
+ tristate "Greybus SPI Bridged PHY driver"
+ depends on SPI
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus SPI Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-spi.ko
+
+config GREYBUS_UART
+ tristate "Greybus UART Bridged PHY driver"
+ depends on TTY
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus UART Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-uart.ko
+
+config GREYBUS_USB
+ tristate "Greybus USB Host Bridged PHY driver"
+ depends on USB
+ ---help---
+ Select this option if you have a device that follows the
+ Greybus USB Host Bridged PHY Class specification.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-usb.ko
+
+endif # GREYBUS_BRIDGED_PHY
+endif # GREYBUS
diff --git a/drivers/staging/greybus/Makefile b/drivers/staging/greybus/Makefile
new file mode 100644
index 000000000000..f337b7b70782
--- /dev/null
+++ b/drivers/staging/greybus/Makefile
@@ -0,0 +1,96 @@
+# Greybus core
+greybus-y := core.o \
+ debugfs.o \
+ hd.o \
+ manifest.o \
+ module.o \
+ interface.o \
+ bundle.o \
+ connection.o \
+ control.o \
+ svc.o \
+ svc_watchdog.o \
+ operation.o \
+ timesync.o \
+ timesync_platform.o
+
+obj-$(CONFIG_GREYBUS) += greybus.o
+
+# needed for trace events
+ccflags-y += -I$(src)
+
+
+# Greybus Host controller drivers
+gb-es2-y := es2.o
+
+obj-$(CONFIG_GREYBUS_ES2) += gb-es2.o
+
+# Greybus class drivers
+gb-bootrom-y := bootrom.o
+gb-camera-y := camera.o
+gb-firmware-y := fw-core.o fw-download.o fw-management.o authentication.o
+gb-spilib-y := spilib.o
+gb-hid-y := hid.o
+gb-light-y := light.o
+gb-log-y := log.o
+gb-loopback-y := loopback.o
+gb-power-supply-y := power_supply.o
+gb-raw-y := raw.o
+gb-vibrator-y := vibrator.o
+
+obj-$(CONFIG_GREYBUS_BOOTROM) += gb-bootrom.o
+obj-$(CONFIG_GREYBUS_CAMERA) += gb-camera.o
+obj-$(CONFIG_GREYBUS_FIRMWARE) += gb-firmware.o gb-spilib.o
+obj-$(CONFIG_GREYBUS_HID) += gb-hid.o
+obj-$(CONFIG_GREYBUS_LIGHT) += gb-light.o
+obj-$(CONFIG_GREYBUS_LOG) += gb-log.o
+obj-$(CONFIG_GREYBUS_LOOPBACK) += gb-loopback.o
+obj-$(CONFIG_GREYBUS_POWER) += gb-power-supply.o
+obj-$(CONFIG_GREYBUS_RAW) += gb-raw.o
+obj-$(CONFIG_GREYBUS_VIBRATOR) += gb-vibrator.o
+
+# Greybus Audio is a bunch of modules
+gb-audio-module-y := audio_module.o audio_topology.o
+gb-audio-codec-y := audio_codec.o
+gb-audio-gb-y := audio_gb.o
+gb-audio-apbridgea-y := audio_apbridgea.o
+gb-audio-manager-y := audio_manager.o audio_manager_module.o
+
+# Greybus Audio sysfs helpers can be useful when debugging
+#GB_AUDIO_MANAGER_SYSFS ?= true
+#ifeq ($(GB_AUDIO_MANAGER_SYSFS),true)
+#gb-audio-manager-y += audio_manager_sysfs.o
+#ccflags-y += -DGB_AUDIO_MANAGER_SYSFS
+#endif
+
+obj-$(CONFIG_GREYBUS_AUDIO_MSM8994) += gb-audio-codec.o
+obj-$(CONFIG_GREYBUS_AUDIO_MSM8994) += gb-audio-module.o
+obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-gb.o
+obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-apbridgea.o
+obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-manager.o
+
+
+# Greybus Bridged PHY drivers
+gb-gbphy-y := gbphy.o
+gb-gpio-y := gpio.o
+gb-i2c-y := i2c.o
+gb-pwm-y := pwm.o
+gb-sdio-y := sdio.o
+gb-spi-y := spi.o
+gb-uart-y := uart.o
+gb-usb-y := usb.o
+
+obj-$(CONFIG_GREYBUS_BRIDGED_PHY) += gb-gbphy.o
+obj-$(CONFIG_GREYBUS_GPIO) += gb-gpio.o
+obj-$(CONFIG_GREYBUS_I2C) += gb-i2c.o
+obj-$(CONFIG_GREYBUS_PWM) += gb-pwm.o
+obj-$(CONFIG_GREYBUS_SDIO) += gb-sdio.o
+obj-$(CONFIG_GREYBUS_SPI) += gb-spi.o gb-spilib.o
+obj-$(CONFIG_GREYBUS_UART) += gb-uart.o
+obj-$(CONFIG_GREYBUS_USB) += gb-usb.o
+
+
+# Greybus Platform driver
+gb-arche-y := arche-platform.o arche-apb-ctrl.o
+
+obj-$(CONFIG_USB_HSIC_USB3613) += gb-arche.o
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
new file mode 100644
index 000000000000..70323aa11f24
--- /dev/null
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -0,0 +1,522 @@
+/*
+ * Arche Platform driver to control APB.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include "arche_platform.h"
+
+
+struct arche_apb_ctrl_drvdata {
+ /* Control GPIO signals to and from AP <=> AP Bridges */
+ int resetn_gpio;
+ int boot_ret_gpio;
+ int pwroff_gpio;
+ int wake_in_gpio;
+ int wake_out_gpio;
+ int pwrdn_gpio;
+
+ enum arche_platform_state state;
+ bool init_disabled;
+
+ struct regulator *vcore;
+ struct regulator *vio;
+
+ int clk_en_gpio;
+ struct clk *clk;
+
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pin_default;
+
+ /* V2: SPI Bus control */
+ int spi_en_gpio;
+ bool spi_en_polarity_high;
+};
+
+/*
+ * Note that these low level api's are active high
+ */
+static inline void deassert_reset(unsigned int gpio)
+{
+ gpio_set_value(gpio, 1);
+}
+
+static inline void assert_reset(unsigned int gpio)
+{
+ gpio_set_value(gpio, 0);
+}
+
+/*
+ * Note: Please do not modify the below sequence, as it is as per the spec
+ */
+static int coldboot_seq(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+ int ret;
+
+ if (apb->init_disabled ||
+ apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
+ return 0;
+
+ /* Hold APB in reset state */
+ assert_reset(apb->resetn_gpio);
+
+ if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
+ gpio_is_valid(apb->spi_en_gpio))
+ devm_gpio_free(dev, apb->spi_en_gpio);
+
+ /* Enable power to APB */
+ if (!IS_ERR(apb->vcore)) {
+ ret = regulator_enable(apb->vcore);
+ if (ret) {
+ dev_err(dev, "failed to enable core regulator\n");
+ return ret;
+ }
+ }
+
+ if (!IS_ERR(apb->vio)) {
+ ret = regulator_enable(apb->vio);
+ if (ret) {
+ dev_err(dev, "failed to enable IO regulator\n");
+ return ret;
+ }
+ }
+
+ apb_bootret_deassert(dev);
+
+ /* On DB3 clock was not mandatory */
+ if (gpio_is_valid(apb->clk_en_gpio))
+ gpio_set_value(apb->clk_en_gpio, 1);
+
+ usleep_range(100, 200);
+
+ /* deassert reset to APB : Active-low signal */
+ deassert_reset(apb->resetn_gpio);
+
+ apb->state = ARCHE_PLATFORM_STATE_ACTIVE;
+
+ return 0;
+}
+
+static int fw_flashing_seq(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+ int ret;
+
+ if (apb->init_disabled ||
+ apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+ return 0;
+
+ ret = regulator_enable(apb->vcore);
+ if (ret) {
+ dev_err(dev, "failed to enable core regulator\n");
+ return ret;
+ }
+
+ ret = regulator_enable(apb->vio);
+ if (ret) {
+ dev_err(dev, "failed to enable IO regulator\n");
+ return ret;
+ }
+
+ if (gpio_is_valid(apb->spi_en_gpio)) {
+ unsigned long flags;
+
+ if (apb->spi_en_polarity_high)
+ flags = GPIOF_OUT_INIT_HIGH;
+ else
+ flags = GPIOF_OUT_INIT_LOW;
+
+ ret = devm_gpio_request_one(dev, apb->spi_en_gpio,
+ flags, "apb_spi_en");
+ if (ret) {
+ dev_err(dev, "Failed requesting SPI bus en gpio %d\n",
+ apb->spi_en_gpio);
+ return ret;
+ }
+ }
+
+ /* for flashing device should be in reset state */
+ assert_reset(apb->resetn_gpio);
+ apb->state = ARCHE_PLATFORM_STATE_FW_FLASHING;
+
+ return 0;
+}
+
+static int standby_boot_seq(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+
+ if (apb->init_disabled)
+ return 0;
+
+ /* Even if it is in OFF state, then we do not want to change the state */
+ if (apb->state == ARCHE_PLATFORM_STATE_STANDBY ||
+ apb->state == ARCHE_PLATFORM_STATE_OFF)
+ return 0;
+
+ if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
+ gpio_is_valid(apb->spi_en_gpio))
+ devm_gpio_free(dev, apb->spi_en_gpio);
+
+ /*
+ * As per WDM spec, do nothing
+ *
+ * Pasted from WDM spec,
+ * - A falling edge on POWEROFF_L is detected (a)
+ * - WDM enters standby mode, but no output signals are changed
+ * */
+
+ /* TODO: POWEROFF_L is input to WDM module */
+ apb->state = ARCHE_PLATFORM_STATE_STANDBY;
+ return 0;
+}
+
+static void poweroff_seq(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+
+ if (apb->init_disabled || apb->state == ARCHE_PLATFORM_STATE_OFF)
+ return;
+
+ if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
+ gpio_is_valid(apb->spi_en_gpio))
+ devm_gpio_free(dev, apb->spi_en_gpio);
+
+ /* disable the clock */
+ if (gpio_is_valid(apb->clk_en_gpio))
+ gpio_set_value(apb->clk_en_gpio, 0);
+
+ if (!IS_ERR(apb->vcore) && regulator_is_enabled(apb->vcore) > 0)
+ regulator_disable(apb->vcore);
+
+ if (!IS_ERR(apb->vio) && regulator_is_enabled(apb->vio) > 0)
+ regulator_disable(apb->vio);
+
+ /* As part of exit, put APB back in reset state */
+ assert_reset(apb->resetn_gpio);
+ apb->state = ARCHE_PLATFORM_STATE_OFF;
+
+ /* TODO: May have to send an event to SVC about this exit */
+}
+
+void apb_bootret_assert(struct device *dev)
+{
+ struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
+
+ gpio_set_value(apb->boot_ret_gpio, 1);
+}
+
+void apb_bootret_deassert(struct device *dev)
+{
+ struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
+
+ gpio_set_value(apb->boot_ret_gpio, 0);
+}
+
+int apb_ctrl_coldboot(struct device *dev)
+{
+ return coldboot_seq(to_platform_device(dev));
+}
+
+int apb_ctrl_fw_flashing(struct device *dev)
+{
+ return fw_flashing_seq(to_platform_device(dev));
+}
+
+int apb_ctrl_standby_boot(struct device *dev)
+{
+ return standby_boot_seq(to_platform_device(dev));
+}
+
+void apb_ctrl_poweroff(struct device *dev)
+{
+ poweroff_seq(to_platform_device(dev));
+}
+
+static ssize_t state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
+ int ret = 0;
+ bool is_disabled;
+
+ if (sysfs_streq(buf, "off")) {
+ if (apb->state == ARCHE_PLATFORM_STATE_OFF)
+ return count;
+
+ poweroff_seq(pdev);
+ } else if (sysfs_streq(buf, "active")) {
+ if (apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
+ return count;
+
+ poweroff_seq(pdev);
+ is_disabled = apb->init_disabled;
+ apb->init_disabled = false;
+ ret = coldboot_seq(pdev);
+ if (ret)
+ apb->init_disabled = is_disabled;
+ } else if (sysfs_streq(buf, "standby")) {
+ if (apb->state == ARCHE_PLATFORM_STATE_STANDBY)
+ return count;
+
+ ret = standby_boot_seq(pdev);
+ } else if (sysfs_streq(buf, "fw_flashing")) {
+ if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+ return count;
+
+ /* First we want to make sure we power off everything
+ * and then enter FW flashing state */
+ poweroff_seq(pdev);
+ ret = fw_flashing_seq(pdev);
+ } else {
+ dev_err(dev, "unknown state\n");
+ ret = -EINVAL;
+ }
+
+ return ret ? ret : count;
+}
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
+
+ switch (apb->state) {
+ case ARCHE_PLATFORM_STATE_OFF:
+ return sprintf(buf, "off%s\n",
+ apb->init_disabled ? ",disabled" : "");
+ case ARCHE_PLATFORM_STATE_ACTIVE:
+ return sprintf(buf, "active\n");
+ case ARCHE_PLATFORM_STATE_STANDBY:
+ return sprintf(buf, "standby\n");
+ case ARCHE_PLATFORM_STATE_FW_FLASHING:
+ return sprintf(buf, "fw_flashing\n");
+ default:
+ return sprintf(buf, "unknown state\n");
+ }
+}
+
+static DEVICE_ATTR_RW(state);
+
+static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
+ struct arche_apb_ctrl_drvdata *apb)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ apb->resetn_gpio = of_get_named_gpio(np, "reset-gpios", 0);
+ if (apb->resetn_gpio < 0) {
+ dev_err(dev, "failed to get reset gpio\n");
+ return apb->resetn_gpio;
+ }
+ ret = devm_gpio_request_one(dev, apb->resetn_gpio,
+ GPIOF_OUT_INIT_LOW, "apb-reset");
+ if (ret) {
+ dev_err(dev, "Failed requesting reset gpio %d\n",
+ apb->resetn_gpio);
+ return ret;
+ }
+
+ apb->boot_ret_gpio = of_get_named_gpio(np, "boot-ret-gpios", 0);
+ if (apb->boot_ret_gpio < 0) {
+ dev_err(dev, "failed to get boot retention gpio\n");
+ return apb->boot_ret_gpio;
+ }
+ ret = devm_gpio_request_one(dev, apb->boot_ret_gpio,
+ GPIOF_OUT_INIT_LOW, "boot retention");
+ if (ret) {
+ dev_err(dev, "Failed requesting bootret gpio %d\n",
+ apb->boot_ret_gpio);
+ return ret;
+ }
+
+ /* It's not mandatory to support power management interface */
+ apb->pwroff_gpio = of_get_named_gpio(np, "pwr-off-gpios", 0);
+ if (apb->pwroff_gpio < 0) {
+ dev_err(dev, "failed to get power off gpio\n");
+ return apb->pwroff_gpio;
+ }
+ ret = devm_gpio_request_one(dev, apb->pwroff_gpio,
+ GPIOF_IN, "pwroff_n");
+ if (ret) {
+ dev_err(dev, "Failed requesting pwroff_n gpio %d\n",
+ apb->pwroff_gpio);
+ return ret;
+ }
+
+ /* Do not make clock mandatory as of now (for DB3) */
+ apb->clk_en_gpio = of_get_named_gpio(np, "clock-en-gpio", 0);
+ if (apb->clk_en_gpio < 0) {
+ dev_warn(dev, "failed to get clock en gpio\n");
+ } else if (gpio_is_valid(apb->clk_en_gpio)) {
+ ret = devm_gpio_request_one(dev, apb->clk_en_gpio,
+ GPIOF_OUT_INIT_LOW, "apb_clk_en");
+ if (ret) {
+ dev_warn(dev, "Failed requesting APB clock en gpio %d\n",
+ apb->clk_en_gpio);
+ return ret;
+ }
+ }
+
+ apb->pwrdn_gpio = of_get_named_gpio(np, "pwr-down-gpios", 0);
+ if (apb->pwrdn_gpio < 0)
+ dev_warn(dev, "failed to get power down gpio\n");
+
+ /* Regulators are optional, as we may have fixed supply coming in */
+ apb->vcore = devm_regulator_get(dev, "vcore");
+ if (IS_ERR(apb->vcore))
+ dev_warn(dev, "no core regulator found\n");
+
+ apb->vio = devm_regulator_get(dev, "vio");
+ if (IS_ERR(apb->vio))
+ dev_warn(dev, "no IO regulator found\n");
+
+ apb->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(apb->pinctrl)) {
+ dev_err(&pdev->dev, "could not get pinctrl handle\n");
+ return PTR_ERR(apb->pinctrl);
+ }
+ apb->pin_default = pinctrl_lookup_state(apb->pinctrl, "default");
+ if (IS_ERR(apb->pin_default)) {
+ dev_err(&pdev->dev, "could not get default pin state\n");
+ return PTR_ERR(apb->pin_default);
+ }
+
+ /* Only applicable for platform >= V2 */
+ apb->spi_en_gpio = of_get_named_gpio(np, "spi-en-gpio", 0);
+ if (apb->spi_en_gpio >= 0) {
+ if (of_property_read_bool(pdev->dev.of_node,
+ "spi-en-active-high"))
+ apb->spi_en_polarity_high = true;
+ }
+
+ return 0;
+}
+
+static int arche_apb_ctrl_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct arche_apb_ctrl_drvdata *apb;
+ struct device *dev = &pdev->dev;
+
+ apb = devm_kzalloc(&pdev->dev, sizeof(*apb), GFP_KERNEL);
+ if (!apb)
+ return -ENOMEM;
+
+ ret = apb_ctrl_get_devtree_data(pdev, apb);
+ if (ret) {
+ dev_err(dev, "failed to get apb devicetree data %d\n", ret);
+ return ret;
+ }
+
+ /* Initially set APB to OFF state */
+ apb->state = ARCHE_PLATFORM_STATE_OFF;
+ /* Check whether device needs to be enabled on boot */
+ if (of_property_read_bool(pdev->dev.of_node, "arche,init-disable"))
+ apb->init_disabled = true;
+
+ platform_set_drvdata(pdev, apb);
+
+ /* Create sysfs interface to allow user to change state dynamically */
+ ret = device_create_file(dev, &dev_attr_state);
+ if (ret) {
+ dev_err(dev, "failed to create state file in sysfs\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Device registered successfully\n");
+ return 0;
+}
+
+static int arche_apb_ctrl_remove(struct platform_device *pdev)
+{
+ device_remove_file(&pdev->dev, &dev_attr_state);
+ poweroff_seq(pdev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int arche_apb_ctrl_suspend(struct device *dev)
+{
+ /*
+ * If timing profile permits, we may shutdown bridge
+ * completely
+ *
+ * TODO: sequence ??
+ *
+ * Also, need to make sure we meet precondition for unipro suspend
+ * Precondition: Definition ???
+ */
+ return 0;
+}
+
+static int arche_apb_ctrl_resume(struct device *dev)
+{
+ /*
+ * Atleast for ES2 we have to meet the delay requirement between
+ * unipro switch and AP bridge init, depending on whether bridge is in
+ * OFF state or standby state.
+ *
+ * Based on whether bridge is in standby or OFF state we may have to
+ * assert multiple signals. Please refer to WDM spec, for more info.
+ *
+ */
+ return 0;
+}
+
+static void arche_apb_ctrl_shutdown(struct platform_device *pdev)
+{
+ apb_ctrl_poweroff(&pdev->dev);
+}
+
+static SIMPLE_DEV_PM_OPS(arche_apb_ctrl_pm_ops, arche_apb_ctrl_suspend,
+ arche_apb_ctrl_resume);
+
+static const struct of_device_id arche_apb_ctrl_of_match[] = {
+ { .compatible = "usbffff,2", },
+ { },
+};
+
+static struct platform_driver arche_apb_ctrl_device_driver = {
+ .probe = arche_apb_ctrl_probe,
+ .remove = arche_apb_ctrl_remove,
+ .shutdown = arche_apb_ctrl_shutdown,
+ .driver = {
+ .name = "arche-apb-ctrl",
+ .pm = &arche_apb_ctrl_pm_ops,
+ .of_match_table = arche_apb_ctrl_of_match,
+ }
+};
+
+int __init arche_apb_init(void)
+{
+ return platform_driver_register(&arche_apb_ctrl_device_driver);
+}
+
+void __exit arche_apb_exit(void)
+{
+ platform_driver_unregister(&arche_apb_ctrl_device_driver);
+}
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
new file mode 100644
index 000000000000..e36ee984485b
--- /dev/null
+++ b/drivers/staging/greybus/arche-platform.c
@@ -0,0 +1,827 @@
+/*
+ * Arche Platform driver to enable Unipro link.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/suspend.h>
+#include <linux/time.h>
+#include "arche_platform.h"
+#include "greybus.h"
+
+#include <linux/usb/usb3613.h>
+
+#define WD_COLDBOOT_PULSE_WIDTH_MS 30
+
+enum svc_wakedetect_state {
+ WD_STATE_IDLE, /* Default state = pulled high/low */
+ WD_STATE_BOOT_INIT, /* WD = falling edge (low) */
+ WD_STATE_COLDBOOT_TRIG, /* WD = rising edge (high), > 30msec */
+ WD_STATE_STANDBYBOOT_TRIG, /* As of now not used ?? */
+ WD_STATE_COLDBOOT_START, /* Cold boot process started */
+ WD_STATE_STANDBYBOOT_START, /* Not used */
+ WD_STATE_TIMESYNC,
+};
+
+struct arche_platform_drvdata {
+ /* Control GPIO signals to and from AP <=> SVC */
+ int svc_reset_gpio;
+ bool is_reset_act_hi;
+ int svc_sysboot_gpio;
+ int wake_detect_gpio; /* bi-dir,maps to WAKE_MOD & WAKE_FRAME signals */
+
+ enum arche_platform_state state;
+
+ int svc_refclk_req;
+ struct clk *svc_ref_clk;
+
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pin_default;
+
+ int num_apbs;
+
+ enum svc_wakedetect_state wake_detect_state;
+ int wake_detect_irq;
+ spinlock_t wake_lock; /* Protect wake_detect_state */
+ struct mutex platform_state_mutex; /* Protect state */
+ wait_queue_head_t wq; /* WQ for arche_pdata->state */
+ unsigned long wake_detect_start;
+ struct notifier_block pm_notifier;
+
+ struct device *dev;
+ struct gb_timesync_svc *timesync_svc_pdata;
+};
+
+static int arche_apb_bootret_assert(struct device *dev, void *data)
+{
+ apb_bootret_assert(dev);
+ return 0;
+}
+
+static int arche_apb_bootret_deassert(struct device *dev, void *data)
+{
+ apb_bootret_deassert(dev);
+ return 0;
+}
+
+/* Requires calling context to hold arche_pdata->platform_state_mutex */
+static void arche_platform_set_state(struct arche_platform_drvdata *arche_pdata,
+ enum arche_platform_state state)
+{
+ arche_pdata->state = state;
+}
+
+/*
+ * arche_platform_change_state: Change the operational state
+ *
+ * This exported function allows external drivers to change the state
+ * of the arche-platform driver.
+ * Note that this function only supports transitions between two states
+ * with limited functionality.
+ *
+ * - ARCHE_PLATFORM_STATE_TIME_SYNC:
+ * Once set, allows timesync operations between SVC <=> AP and makes
+ * sure that arche-platform driver ignores any subsequent events/pulses
+ * from SVC over wake/detect.
+ *
+ * - ARCHE_PLATFORM_STATE_ACTIVE:
+ * Puts back driver to active state, where any pulse from SVC on wake/detect
+ * line would trigger either cold/standby boot.
+ * Note: Transition request from this function does not trigger cold/standby
+ * boot. It just puts back driver book keeping variable back to ACTIVE
+ * state and restores the interrupt.
+ *
+ * Returns -ENODEV if device not found, -EAGAIN if the driver cannot currently
+ * satisfy the requested state-transition or -EINVAL for all other
+ * state-transition requests.
+ */
+int arche_platform_change_state(enum arche_platform_state state,
+ struct gb_timesync_svc *timesync_svc_pdata)
+{
+ struct arche_platform_drvdata *arche_pdata;
+ struct platform_device *pdev;
+ struct device_node *np;
+ int ret = -EAGAIN;
+ unsigned long flags;
+
+ np = of_find_compatible_node(NULL, NULL, "google,arche-platform");
+ if (!np) {
+ pr_err("google,arche-platform device node not found\n");
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ pr_err("arche-platform device not found\n");
+ return -ENODEV;
+ }
+
+ arche_pdata = platform_get_drvdata(pdev);
+
+ mutex_lock(&arche_pdata->platform_state_mutex);
+ spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+
+ if (arche_pdata->state == state) {
+ ret = 0;
+ goto exit;
+ }
+
+ switch (state) {
+ case ARCHE_PLATFORM_STATE_TIME_SYNC:
+ if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (arche_pdata->wake_detect_state != WD_STATE_IDLE) {
+ dev_err(arche_pdata->dev,
+ "driver busy with wake/detect line ops\n");
+ goto exit;
+ }
+ device_for_each_child(arche_pdata->dev, NULL,
+ arche_apb_bootret_assert);
+ arche_pdata->wake_detect_state = WD_STATE_TIMESYNC;
+ break;
+ case ARCHE_PLATFORM_STATE_ACTIVE:
+ if (arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ device_for_each_child(arche_pdata->dev, NULL,
+ arche_apb_bootret_deassert);
+ arche_pdata->wake_detect_state = WD_STATE_IDLE;
+ break;
+ case ARCHE_PLATFORM_STATE_OFF:
+ case ARCHE_PLATFORM_STATE_STANDBY:
+ case ARCHE_PLATFORM_STATE_FW_FLASHING:
+ dev_err(arche_pdata->dev, "busy, request to retry later\n");
+ goto exit;
+ default:
+ ret = -EINVAL;
+ dev_err(arche_pdata->dev,
+ "invalid state transition request\n");
+ goto exit;
+ }
+ arche_pdata->timesync_svc_pdata = timesync_svc_pdata;
+ arche_platform_set_state(arche_pdata, state);
+ if (state == ARCHE_PLATFORM_STATE_ACTIVE)
+ wake_up(&arche_pdata->wq);
+
+ ret = 0;
+exit:
+ spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+ mutex_unlock(&arche_pdata->platform_state_mutex);
+ of_node_put(np);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(arche_platform_change_state);
+
+/* Requires arche_pdata->wake_lock is held by calling context */
+static void arche_platform_set_wake_detect_state(
+ struct arche_platform_drvdata *arche_pdata,
+ enum svc_wakedetect_state state)
+{
+ arche_pdata->wake_detect_state = state;
+}
+
+static inline void svc_reset_onoff(unsigned int gpio, bool onoff)
+{
+ gpio_set_value(gpio, onoff);
+}
+
+static int apb_cold_boot(struct device *dev, void *data)
+{
+ int ret;
+
+ ret = apb_ctrl_coldboot(dev);
+ if (ret)
+ dev_warn(dev, "failed to coldboot\n");
+
+ /*Child nodes are independent, so do not exit coldboot operation */
+ return 0;
+}
+
+static int apb_poweroff(struct device *dev, void *data)
+{
+ apb_ctrl_poweroff(dev);
+
+ /* Enable HUB3613 into HUB mode. */
+ if (usb3613_hub_mode_ctrl(false))
+ dev_warn(dev, "failed to control hub device\n");
+
+ return 0;
+}
+
+static void arche_platform_wd_irq_en(struct arche_platform_drvdata *arche_pdata)
+{
+ /* Enable interrupt here, to read event back from SVC */
+ gpio_direction_input(arche_pdata->wake_detect_gpio);
+ enable_irq(arche_pdata->wake_detect_irq);
+}
+
+static irqreturn_t arche_platform_wd_irq_thread(int irq, void *devid)
+{
+ struct arche_platform_drvdata *arche_pdata = devid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+ if (arche_pdata->wake_detect_state != WD_STATE_COLDBOOT_TRIG) {
+ /* Something is wrong */
+ spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ arche_platform_set_wake_detect_state(arche_pdata,
+ WD_STATE_COLDBOOT_START);
+ spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+
+ /* It should complete power cycle, so first make sure it is poweroff */
+ device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+
+ /* Bring APB out of reset: cold boot sequence */
+ device_for_each_child(arche_pdata->dev, NULL, apb_cold_boot);
+
+ /* Enable HUB3613 into HUB mode. */
+ if (usb3613_hub_mode_ctrl(true))
+ dev_warn(arche_pdata->dev, "failed to control hub device\n");
+
+ spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+ arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
+ spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arche_platform_wd_irq(int irq, void *devid)
+{
+ struct arche_platform_drvdata *arche_pdata = devid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+
+ if (arche_pdata->wake_detect_state == WD_STATE_TIMESYNC) {
+ gb_timesync_irq(arche_pdata->timesync_svc_pdata);
+ goto exit;
+ }
+
+ if (gpio_get_value(arche_pdata->wake_detect_gpio)) {
+ /* wake/detect rising */
+
+ /*
+ * If wake/detect line goes high after low, within less than
+ * 30msec, then standby boot sequence is initiated, which is not
+ * supported/implemented as of now. So ignore it.
+ */
+ if (arche_pdata->wake_detect_state == WD_STATE_BOOT_INIT) {
+ if (time_before(jiffies,
+ arche_pdata->wake_detect_start +
+ msecs_to_jiffies(WD_COLDBOOT_PULSE_WIDTH_MS))) {
+ arche_platform_set_wake_detect_state(arche_pdata,
+ WD_STATE_IDLE);
+ } else {
+ /* Check we are not in middle of irq thread already */
+ if (arche_pdata->wake_detect_state !=
+ WD_STATE_COLDBOOT_START) {
+ arche_platform_set_wake_detect_state(arche_pdata,
+ WD_STATE_COLDBOOT_TRIG);
+ spin_unlock_irqrestore(
+ &arche_pdata->wake_lock,
+ flags);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+ }
+ } else {
+ /* wake/detect falling */
+ if (arche_pdata->wake_detect_state == WD_STATE_IDLE) {
+ arche_pdata->wake_detect_start = jiffies;
+ /*
+ * In the begining, when wake/detect goes low (first time), we assume
+ * it is meant for coldboot and set the flag. If wake/detect line stays low
+ * beyond 30msec, then it is coldboot else fallback to standby boot.
+ */
+ arche_platform_set_wake_detect_state(arche_pdata,
+ WD_STATE_BOOT_INIT);
+ }
+ }
+
+exit:
+ spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Requires arche_pdata->platform_state_mutex to be held
+ */
+static int arche_platform_coldboot_seq(struct arche_platform_drvdata *arche_pdata)
+{
+ int ret;
+
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
+ return 0;
+
+ dev_info(arche_pdata->dev, "Booting from cold boot state\n");
+
+ svc_reset_onoff(arche_pdata->svc_reset_gpio,
+ arche_pdata->is_reset_act_hi);
+
+ gpio_set_value(arche_pdata->svc_sysboot_gpio, 0);
+ usleep_range(100, 200);
+
+ ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
+ if (ret) {
+ dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* bring SVC out of reset */
+ svc_reset_onoff(arche_pdata->svc_reset_gpio,
+ !arche_pdata->is_reset_act_hi);
+
+ arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_ACTIVE);
+
+ return 0;
+}
+
+/*
+ * Requires arche_pdata->platform_state_mutex to be held
+ */
+static int arche_platform_fw_flashing_seq(struct arche_platform_drvdata *arche_pdata)
+{
+ int ret;
+
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+ return 0;
+
+ dev_info(arche_pdata->dev, "Switching to FW flashing state\n");
+
+ svc_reset_onoff(arche_pdata->svc_reset_gpio,
+ arche_pdata->is_reset_act_hi);
+
+ gpio_set_value(arche_pdata->svc_sysboot_gpio, 1);
+
+ usleep_range(100, 200);
+
+ ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
+ if (ret) {
+ dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
+ ret);
+ return ret;
+ }
+
+ svc_reset_onoff(arche_pdata->svc_reset_gpio,
+ !arche_pdata->is_reset_act_hi);
+
+ arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_FW_FLASHING);
+
+ return 0;
+}
+
+/*
+ * Requires arche_pdata->platform_state_mutex to be held
+ */
+static void arche_platform_poweroff_seq(struct arche_platform_drvdata *arche_pdata)
+{
+ unsigned long flags;
+
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
+ return;
+
+ /* If in fw_flashing mode, then no need to repeate things again */
+ if (arche_pdata->state != ARCHE_PLATFORM_STATE_FW_FLASHING) {
+ disable_irq(arche_pdata->wake_detect_irq);
+
+ spin_lock_irqsave(&arche_pdata->wake_lock, flags);
+ arche_platform_set_wake_detect_state(arche_pdata,
+ WD_STATE_IDLE);
+ spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
+ }
+
+ clk_disable_unprepare(arche_pdata->svc_ref_clk);
+
+ /* As part of exit, put APB back in reset state */
+ svc_reset_onoff(arche_pdata->svc_reset_gpio,
+ arche_pdata->is_reset_act_hi);
+
+ arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
+}
+
+static ssize_t state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
+ int ret = 0;
+
+retry:
+ mutex_lock(&arche_pdata->platform_state_mutex);
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_TIME_SYNC) {
+ mutex_unlock(&arche_pdata->platform_state_mutex);
+ ret = wait_event_interruptible(
+ arche_pdata->wq,
+ arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC);
+ if (ret)
+ return ret;
+ goto retry;
+ }
+
+ if (sysfs_streq(buf, "off")) {
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
+ goto exit;
+
+ /* If SVC goes down, bring down APB's as well */
+ device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+
+ arche_platform_poweroff_seq(arche_pdata);
+
+ } else if (sysfs_streq(buf, "active")) {
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
+ goto exit;
+
+ /* First we want to make sure we power off everything
+ * and then activate back again */
+ device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+ arche_platform_poweroff_seq(arche_pdata);
+
+ arche_platform_wd_irq_en(arche_pdata);
+ ret = arche_platform_coldboot_seq(arche_pdata);
+ if (ret)
+ goto exit;
+
+ } else if (sysfs_streq(buf, "standby")) {
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_STANDBY)
+ goto exit;
+
+ dev_warn(arche_pdata->dev, "standby state not supported\n");
+ } else if (sysfs_streq(buf, "fw_flashing")) {
+ if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
+ goto exit;
+
+ /*
+ * Here we only control SVC.
+ *
+ * In case of FW_FLASHING mode we do not want to control
+ * APBs, as in case of V2, SPI bus is shared between both
+ * the APBs. So let user chose which APB he wants to flash.
+ */
+ arche_platform_poweroff_seq(arche_pdata);
+
+ ret = arche_platform_fw_flashing_seq(arche_pdata);
+ if (ret)
+ goto exit;
+ } else {
+ dev_err(arche_pdata->dev, "unknown state\n");
+ ret = -EINVAL;
+ }
+
+exit:
+ mutex_unlock(&arche_pdata->platform_state_mutex);
+ return ret ? ret : count;
+}
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arche_platform_drvdata *arche_pdata = dev_get_drvdata(dev);
+
+ switch (arche_pdata->state) {
+ case ARCHE_PLATFORM_STATE_OFF:
+ return sprintf(buf, "off\n");
+ case ARCHE_PLATFORM_STATE_ACTIVE:
+ return sprintf(buf, "active\n");
+ case ARCHE_PLATFORM_STATE_STANDBY:
+ return sprintf(buf, "standby\n");
+ case ARCHE_PLATFORM_STATE_FW_FLASHING:
+ return sprintf(buf, "fw_flashing\n");
+ case ARCHE_PLATFORM_STATE_TIME_SYNC:
+ return sprintf(buf, "time_sync\n");
+ default:
+ return sprintf(buf, "unknown state\n");
+ }
+}
+
+static DEVICE_ATTR_RW(state);
+
+static int arche_platform_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ struct arche_platform_drvdata *arche_pdata =
+ container_of(notifier, struct arche_platform_drvdata,
+ pm_notifier);
+ int ret = NOTIFY_DONE;
+
+ mutex_lock(&arche_pdata->platform_state_mutex);
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
+ ret = NOTIFY_STOP;
+ break;
+ }
+ device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
+ arche_platform_poweroff_seq(arche_pdata);
+ break;
+ case PM_POST_SUSPEND:
+ if (arche_pdata->state != ARCHE_PLATFORM_STATE_OFF)
+ break;
+
+ arche_platform_wd_irq_en(arche_pdata);
+ arche_platform_coldboot_seq(arche_pdata);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&arche_pdata->platform_state_mutex);
+
+ return ret;
+}
+
+static int arche_platform_probe(struct platform_device *pdev)
+{
+ struct arche_platform_drvdata *arche_pdata;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ arche_pdata = devm_kzalloc(&pdev->dev, sizeof(*arche_pdata), GFP_KERNEL);
+ if (!arche_pdata)
+ return -ENOMEM;
+
+ /* setup svc reset gpio */
+ arche_pdata->is_reset_act_hi = of_property_read_bool(np,
+ "svc,reset-active-high");
+ arche_pdata->svc_reset_gpio = of_get_named_gpio(np, "svc,reset-gpio", 0);
+ if (arche_pdata->svc_reset_gpio < 0) {
+ dev_err(dev, "failed to get reset-gpio\n");
+ return arche_pdata->svc_reset_gpio;
+ }
+ ret = devm_gpio_request(dev, arche_pdata->svc_reset_gpio, "svc-reset");
+ if (ret) {
+ dev_err(dev, "failed to request svc-reset gpio:%d\n", ret);
+ return ret;
+ }
+ ret = gpio_direction_output(arche_pdata->svc_reset_gpio,
+ arche_pdata->is_reset_act_hi);
+ if (ret) {
+ dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
+ return ret;
+ }
+ arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
+
+ arche_pdata->svc_sysboot_gpio = of_get_named_gpio(np,
+ "svc,sysboot-gpio", 0);
+ if (arche_pdata->svc_sysboot_gpio < 0) {
+ dev_err(dev, "failed to get sysboot gpio\n");
+ return arche_pdata->svc_sysboot_gpio;
+ }
+ ret = devm_gpio_request(dev, arche_pdata->svc_sysboot_gpio, "sysboot0");
+ if (ret) {
+ dev_err(dev, "failed to request sysboot0 gpio:%d\n", ret);
+ return ret;
+ }
+ ret = gpio_direction_output(arche_pdata->svc_sysboot_gpio, 0);
+ if (ret) {
+ dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
+ return ret;
+ }
+
+ /* setup the clock request gpio first */
+ arche_pdata->svc_refclk_req = of_get_named_gpio(np,
+ "svc,refclk-req-gpio", 0);
+ if (arche_pdata->svc_refclk_req < 0) {
+ dev_err(dev, "failed to get svc clock-req gpio\n");
+ return arche_pdata->svc_refclk_req;
+ }
+ ret = devm_gpio_request(dev, arche_pdata->svc_refclk_req, "svc-clk-req");
+ if (ret) {
+ dev_err(dev, "failed to request svc-clk-req gpio: %d\n", ret);
+ return ret;
+ }
+ ret = gpio_direction_input(arche_pdata->svc_refclk_req);
+ if (ret) {
+ dev_err(dev, "failed to set svc-clk-req gpio dir :%d\n", ret);
+ return ret;
+ }
+
+ /* setup refclk2 to follow the pin */
+ arche_pdata->svc_ref_clk = devm_clk_get(dev, "svc_ref_clk");
+ if (IS_ERR(arche_pdata->svc_ref_clk)) {
+ ret = PTR_ERR(arche_pdata->svc_ref_clk);
+ dev_err(dev, "failed to get svc_ref_clk: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, arche_pdata);
+
+ arche_pdata->num_apbs = of_get_child_count(np);
+ dev_dbg(dev, "Number of APB's available - %d\n", arche_pdata->num_apbs);
+
+ arche_pdata->wake_detect_gpio = of_get_named_gpio(np, "svc,wake-detect-gpio", 0);
+ if (arche_pdata->wake_detect_gpio < 0) {
+ dev_err(dev, "failed to get wake detect gpio\n");
+ return arche_pdata->wake_detect_gpio;
+ }
+
+ ret = devm_gpio_request(dev, arche_pdata->wake_detect_gpio, "wake detect");
+ if (ret) {
+ dev_err(dev, "Failed requesting wake_detect gpio %d\n",
+ arche_pdata->wake_detect_gpio);
+ return ret;
+ }
+
+ arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
+
+ arche_pdata->dev = &pdev->dev;
+
+ spin_lock_init(&arche_pdata->wake_lock);
+ mutex_init(&arche_pdata->platform_state_mutex);
+ init_waitqueue_head(&arche_pdata->wq);
+ arche_pdata->wake_detect_irq =
+ gpio_to_irq(arche_pdata->wake_detect_gpio);
+
+ ret = devm_request_threaded_irq(dev, arche_pdata->wake_detect_irq,
+ arche_platform_wd_irq,
+ arche_platform_wd_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ dev_name(dev), arche_pdata);
+ if (ret) {
+ dev_err(dev, "failed to request wake detect IRQ %d\n", ret);
+ return ret;
+ }
+ disable_irq(arche_pdata->wake_detect_irq);
+
+ ret = device_create_file(dev, &dev_attr_state);
+ if (ret) {
+ dev_err(dev, "failed to create state file in sysfs\n");
+ return ret;
+ }
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate child nodes %d\n", ret);
+ goto err_device_remove;
+ }
+
+ arche_pdata->pm_notifier.notifier_call = arche_platform_pm_notifier;
+ ret = register_pm_notifier(&arche_pdata->pm_notifier);
+
+ if (ret) {
+ dev_err(dev, "failed to register pm notifier %d\n", ret);
+ goto err_device_remove;
+ }
+
+ /* Register callback pointer */
+ arche_platform_change_state_cb = arche_platform_change_state;
+
+ /* Explicitly power off if requested */
+ if (!of_property_read_bool(pdev->dev.of_node, "arche,init-off")) {
+ mutex_lock(&arche_pdata->platform_state_mutex);
+ ret = arche_platform_coldboot_seq(arche_pdata);
+ if (ret) {
+ dev_err(dev, "Failed to cold boot svc %d\n", ret);
+ goto err_coldboot;
+ }
+ arche_platform_wd_irq_en(arche_pdata);
+ mutex_unlock(&arche_pdata->platform_state_mutex);
+ }
+
+ dev_info(dev, "Device registered successfully\n");
+ return 0;
+
+err_coldboot:
+ mutex_unlock(&arche_pdata->platform_state_mutex);
+err_device_remove:
+ device_remove_file(&pdev->dev, &dev_attr_state);
+ return ret;
+}
+
+static int arche_remove_child(struct device *dev, void *unused)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int arche_platform_remove(struct platform_device *pdev)
+{
+ struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
+
+ unregister_pm_notifier(&arche_pdata->pm_notifier);
+ device_remove_file(&pdev->dev, &dev_attr_state);
+ device_for_each_child(&pdev->dev, NULL, arche_remove_child);
+ arche_platform_poweroff_seq(arche_pdata);
+ platform_set_drvdata(pdev, NULL);
+
+ if (usb3613_hub_mode_ctrl(false))
+ dev_warn(arche_pdata->dev, "failed to control hub device\n");
+ /* TODO: Should we do anything more here ?? */
+ return 0;
+}
+
+static int arche_platform_suspend(struct device *dev)
+{
+ /*
+ * If timing profile premits, we may shutdown bridge
+ * completely
+ *
+ * TODO: sequence ??
+ *
+ * Also, need to make sure we meet precondition for unipro suspend
+ * Precondition: Definition ???
+ */
+ return 0;
+}
+
+static int arche_platform_resume(struct device *dev)
+{
+ /*
+ * Atleast for ES2 we have to meet the delay requirement between
+ * unipro switch and AP bridge init, depending on whether bridge is in
+ * OFF state or standby state.
+ *
+ * Based on whether bridge is in standby or OFF state we may have to
+ * assert multiple signals. Please refer to WDM spec, for more info.
+ *
+ */
+ return 0;
+}
+
+static void arche_platform_shutdown(struct platform_device *pdev)
+{
+ struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
+
+ arche_platform_poweroff_seq(arche_pdata);
+
+ usb3613_hub_mode_ctrl(false);
+}
+
+static SIMPLE_DEV_PM_OPS(arche_platform_pm_ops,
+ arche_platform_suspend,
+ arche_platform_resume);
+
+static const struct of_device_id arche_platform_of_match[] = {
+ { .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
+ { },
+};
+
+static const struct of_device_id arche_combined_id[] = {
+ { .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
+ { .compatible = "usbffff,2", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, arche_combined_id);
+
+static struct platform_driver arche_platform_device_driver = {
+ .probe = arche_platform_probe,
+ .remove = arche_platform_remove,
+ .shutdown = arche_platform_shutdown,
+ .driver = {
+ .name = "arche-platform-ctrl",
+ .pm = &arche_platform_pm_ops,
+ .of_match_table = arche_platform_of_match,
+ }
+};
+
+static int __init arche_init(void)
+{
+ int retval;
+
+ retval = platform_driver_register(&arche_platform_device_driver);
+ if (retval)
+ return retval;
+
+ retval = arche_apb_init();
+ if (retval)
+ platform_driver_unregister(&arche_platform_device_driver);
+
+ return retval;
+}
+module_init(arche_init);
+
+static void __exit arche_exit(void)
+{
+ arche_apb_exit();
+ platform_driver_unregister(&arche_platform_device_driver);
+}
+module_exit(arche_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vaibhav Hiremath <vaibhav.hiremath@linaro.org>");
+MODULE_DESCRIPTION("Arche Platform Driver");
diff --git a/drivers/staging/greybus/arche_platform.h b/drivers/staging/greybus/arche_platform.h
new file mode 100644
index 000000000000..bd12345b82a2
--- /dev/null
+++ b/drivers/staging/greybus/arche_platform.h
@@ -0,0 +1,39 @@
+/*
+ * Arche Platform driver to enable Unipro link.
+ *
+ * Copyright 2015-2016 Google Inc.
+ * Copyright 2015-2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __ARCHE_PLATFORM_H
+#define __ARCHE_PLATFORM_H
+
+#include "timesync.h"
+
+enum arche_platform_state {
+ ARCHE_PLATFORM_STATE_OFF,
+ ARCHE_PLATFORM_STATE_ACTIVE,
+ ARCHE_PLATFORM_STATE_STANDBY,
+ ARCHE_PLATFORM_STATE_FW_FLASHING,
+ ARCHE_PLATFORM_STATE_TIME_SYNC,
+};
+
+int arche_platform_change_state(enum arche_platform_state state,
+ struct gb_timesync_svc *pdata);
+
+extern int (*arche_platform_change_state_cb)(enum arche_platform_state state,
+ struct gb_timesync_svc *pdata);
+int __init arche_apb_init(void);
+void __exit arche_apb_exit(void);
+
+/* Operational states for the APB device */
+int apb_ctrl_coldboot(struct device *dev);
+int apb_ctrl_fw_flashing(struct device *dev);
+int apb_ctrl_standby_boot(struct device *dev);
+void apb_ctrl_poweroff(struct device *dev);
+void apb_bootret_assert(struct device *dev);
+void apb_bootret_deassert(struct device *dev);
+
+#endif /* __ARCHE_PLATFORM_H */
diff --git a/drivers/staging/greybus/arpc.h b/drivers/staging/greybus/arpc.h
new file mode 100644
index 000000000000..7fbddfc40d83
--- /dev/null
+++ b/drivers/staging/greybus/arpc.h
@@ -0,0 +1,109 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ARPC_H
+#define __ARPC_H
+
+/* APBridgeA RPC (ARPC) */
+
+enum arpc_result {
+ ARPC_SUCCESS = 0x00,
+ ARPC_NO_MEMORY = 0x01,
+ ARPC_INVALID = 0x02,
+ ARPC_TIMEOUT = 0x03,
+ ARPC_UNKNOWN_ERROR = 0xff,
+};
+
+struct arpc_request_message {
+ __le16 id; /* RPC unique id */
+ __le16 size; /* Size in bytes of header + payload */
+ __u8 type; /* RPC type */
+ __u8 data[0]; /* ARPC data */
+} __packed;
+
+struct arpc_response_message {
+ __le16 id; /* RPC unique id */
+ __u8 result; /* Result of RPC */
+} __packed;
+
+
+/* ARPC requests */
+#define ARPC_TYPE_CPORT_CONNECTED 0x01
+#define ARPC_TYPE_CPORT_QUIESCE 0x02
+#define ARPC_TYPE_CPORT_CLEAR 0x03
+#define ARPC_TYPE_CPORT_FLUSH 0x04
+#define ARPC_TYPE_CPORT_SHUTDOWN 0x05
+
+struct arpc_cport_connected_req {
+ __le16 cport_id;
+} __packed;
+
+struct arpc_cport_quiesce_req {
+ __le16 cport_id;
+ __le16 peer_space;
+ __le16 timeout;
+} __packed;
+
+struct arpc_cport_clear_req {
+ __le16 cport_id;
+} __packed;
+
+struct arpc_cport_flush_req {
+ __le16 cport_id;
+} __packed;
+
+struct arpc_cport_shutdown_req {
+ __le16 cport_id;
+ __le16 timeout;
+ __u8 phase;
+} __packed;
+
+#endif /* __ARPC_H */
diff --git a/drivers/staging/greybus/audio_apbridgea.c b/drivers/staging/greybus/audio_apbridgea.c
new file mode 100644
index 000000000000..1b4252d5d255
--- /dev/null
+++ b/drivers/staging/greybus/audio_apbridgea.c
@@ -0,0 +1,207 @@
+/*
+ * Greybus Audio Device Class Protocol helpers
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+#include "greybus_protocols.h"
+#include "audio_apbridgea.h"
+#include "audio_codec.h"
+
+int gb_audio_apbridgea_set_config(struct gb_connection *connection,
+ __u16 i2s_port, __u32 format, __u32 rate,
+ __u32 mclk_freq)
+{
+ struct audio_apbridgea_set_config_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.format = cpu_to_le32(format);
+ req.rate = cpu_to_le32(rate);
+ req.mclk_freq = cpu_to_le32(mclk_freq);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_config);
+
+int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
+ __u16 i2s_port, __u16 cportid,
+ __u8 direction)
+{
+ struct audio_apbridgea_register_cport_request req;
+ int ret;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.cport = cpu_to_le16(cportid);
+ req.direction = direction;
+
+ ret = gb_pm_runtime_get_sync(connection->bundle);
+ if (ret)
+ return ret;
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_register_cport);
+
+int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
+ __u16 i2s_port, __u16 cportid,
+ __u8 direction)
+{
+ struct audio_apbridgea_unregister_cport_request req;
+ int ret;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.cport = cpu_to_le16(cportid);
+ req.direction = direction;
+
+ ret = gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+
+ gb_pm_runtime_put_autosuspend(connection->bundle);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_unregister_cport);
+
+int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
+ __u16 i2s_port, __u16 size)
+{
+ struct audio_apbridgea_set_tx_data_size_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.size = cpu_to_le16(size);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_tx_data_size);
+
+int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
+ __u16 i2s_port)
+{
+ struct audio_apbridgea_prepare_tx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_TX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_tx);
+
+int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
+ __u16 i2s_port, __u64 timestamp)
+{
+ struct audio_apbridgea_start_tx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_TX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.timestamp = cpu_to_le64(timestamp);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_tx);
+
+int gb_audio_apbridgea_stop_tx(struct gb_connection *connection, __u16 i2s_port)
+{
+ struct audio_apbridgea_stop_tx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_TX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_tx);
+
+int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
+ __u16 i2s_port)
+{
+ struct audio_apbridgea_shutdown_tx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_tx);
+
+int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
+ __u16 i2s_port, __u16 size)
+{
+ struct audio_apbridgea_set_rx_data_size_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+ req.size = cpu_to_le16(size);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_rx_data_size);
+
+int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
+ __u16 i2s_port)
+{
+ struct audio_apbridgea_prepare_rx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_RX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_rx);
+
+int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
+ __u16 i2s_port)
+{
+ struct audio_apbridgea_start_rx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_RX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_rx);
+
+int gb_audio_apbridgea_stop_rx(struct gb_connection *connection, __u16 i2s_port)
+{
+ struct audio_apbridgea_stop_rx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_RX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_rx);
+
+int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
+ __u16 i2s_port)
+{
+ struct audio_apbridgea_shutdown_rx_request req;
+
+ req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX;
+ req.hdr.i2s_port = cpu_to_le16(i2s_port);
+
+ return gb_hd_output(connection->hd, &req, sizeof(req),
+ GB_APB_REQUEST_AUDIO_CONTROL, true);
+}
+EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_rx);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("greybus:audio-apbridgea");
+MODULE_DESCRIPTION("Greybus Special APBridgeA Audio Protocol library");
+MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");
diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h
new file mode 100644
index 000000000000..b94cb05c89e4
--- /dev/null
+++ b/drivers/staging/greybus/audio_apbridgea.h
@@ -0,0 +1,156 @@
+/**
+ * Copyright (c) 2015-2016 Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * This is a special protocol for configuring communication over the
+ * I2S bus between the DSP on the MSM8994 and APBridgeA. Therefore,
+ * we can predefine several low-level attributes of the communication
+ * because we know that they are supported. In particular, the following
+ * assumptions are made:
+ * - there are two channels (i.e., stereo)
+ * - the low-level protocol is I2S as defined by Philips/NXP
+ * - the DSP on the MSM8994 is the clock master for MCLK, BCLK, and WCLK
+ * - WCLK changes on the falling edge of BCLK
+ * - WCLK low for left channel; high for right channel
+ * - TX data is sent on the falling edge of BCLK
+ * - RX data is received/latched on the rising edge of BCLK
+ */
+
+#ifndef __AUDIO_APBRIDGEA_H
+#define __AUDIO_APBRIDGEA_H
+
+#define AUDIO_APBRIDGEA_TYPE_SET_CONFIG 0x01
+#define AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT 0x02
+#define AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT 0x03
+#define AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE 0x04
+ /* 0x05 unused */
+#define AUDIO_APBRIDGEA_TYPE_PREPARE_TX 0x06
+#define AUDIO_APBRIDGEA_TYPE_START_TX 0x07
+#define AUDIO_APBRIDGEA_TYPE_STOP_TX 0x08
+#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX 0x09
+#define AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE 0x0a
+ /* 0x0b unused */
+#define AUDIO_APBRIDGEA_TYPE_PREPARE_RX 0x0c
+#define AUDIO_APBRIDGEA_TYPE_START_RX 0x0d
+#define AUDIO_APBRIDGEA_TYPE_STOP_RX 0x0e
+#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX 0x0f
+
+#define AUDIO_APBRIDGEA_PCM_FMT_8 BIT(0)
+#define AUDIO_APBRIDGEA_PCM_FMT_16 BIT(1)
+#define AUDIO_APBRIDGEA_PCM_FMT_24 BIT(2)
+#define AUDIO_APBRIDGEA_PCM_FMT_32 BIT(3)
+#define AUDIO_APBRIDGEA_PCM_FMT_64 BIT(4)
+
+#define AUDIO_APBRIDGEA_PCM_RATE_5512 BIT(0)
+#define AUDIO_APBRIDGEA_PCM_RATE_8000 BIT(1)
+#define AUDIO_APBRIDGEA_PCM_RATE_11025 BIT(2)
+#define AUDIO_APBRIDGEA_PCM_RATE_16000 BIT(3)
+#define AUDIO_APBRIDGEA_PCM_RATE_22050 BIT(4)
+#define AUDIO_APBRIDGEA_PCM_RATE_32000 BIT(5)
+#define AUDIO_APBRIDGEA_PCM_RATE_44100 BIT(6)
+#define AUDIO_APBRIDGEA_PCM_RATE_48000 BIT(7)
+#define AUDIO_APBRIDGEA_PCM_RATE_64000 BIT(8)
+#define AUDIO_APBRIDGEA_PCM_RATE_88200 BIT(9)
+#define AUDIO_APBRIDGEA_PCM_RATE_96000 BIT(10)
+#define AUDIO_APBRIDGEA_PCM_RATE_176400 BIT(11)
+#define AUDIO_APBRIDGEA_PCM_RATE_192000 BIT(12)
+
+#define AUDIO_APBRIDGEA_DIRECTION_TX BIT(0)
+#define AUDIO_APBRIDGEA_DIRECTION_RX BIT(1)
+
+/* The I2S port is passed in the 'index' parameter of the USB request */
+/* The CPort is passed in the 'value' parameter of the USB request */
+
+struct audio_apbridgea_hdr {
+ __u8 type;
+ __le16 i2s_port;
+ __u8 data[0];
+} __packed;
+
+struct audio_apbridgea_set_config_request {
+ struct audio_apbridgea_hdr hdr;
+ __le32 format; /* AUDIO_APBRIDGEA_PCM_FMT_* */
+ __le32 rate; /* AUDIO_APBRIDGEA_PCM_RATE_* */
+ __le32 mclk_freq; /* XXX Remove? */
+} __packed;
+
+struct audio_apbridgea_register_cport_request {
+ struct audio_apbridgea_hdr hdr;
+ __le16 cport;
+ __u8 direction;
+} __packed;
+
+struct audio_apbridgea_unregister_cport_request {
+ struct audio_apbridgea_hdr hdr;
+ __le16 cport;
+ __u8 direction;
+} __packed;
+
+struct audio_apbridgea_set_tx_data_size_request {
+ struct audio_apbridgea_hdr hdr;
+ __le16 size;
+} __packed;
+
+struct audio_apbridgea_prepare_tx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_start_tx_request {
+ struct audio_apbridgea_hdr hdr;
+ __le64 timestamp;
+} __packed;
+
+struct audio_apbridgea_stop_tx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_shutdown_tx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_set_rx_data_size_request {
+ struct audio_apbridgea_hdr hdr;
+ __le16 size;
+} __packed;
+
+struct audio_apbridgea_prepare_rx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_start_rx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_stop_rx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+struct audio_apbridgea_shutdown_rx_request {
+ struct audio_apbridgea_hdr hdr;
+} __packed;
+
+#endif /*__AUDIO_APBRIDGEA_H */
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
new file mode 100644
index 000000000000..8a0744b58a32
--- /dev/null
+++ b/drivers/staging/greybus/audio_codec.c
@@ -0,0 +1,1132 @@
+/*
+ * APBridge ALSA SoC dummy codec driver
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include <uapi/linux/input.h>
+
+#include "audio_codec.h"
+#include "audio_apbridgea.h"
+#include "audio_manager.h"
+
+static struct gbaudio_codec_info *gbcodec;
+
+static struct gbaudio_data_connection *
+find_data(struct gbaudio_module_info *module, int id)
+{
+ struct gbaudio_data_connection *data;
+
+ list_for_each_entry(data, &module->data_list, list) {
+ if (id == data->id)
+ return data;
+ }
+ return NULL;
+}
+
+static struct gbaudio_stream_params *
+find_dai_stream_params(struct gbaudio_codec_info *codec, int id, int stream)
+{
+ struct gbaudio_codec_dai *dai;
+
+ list_for_each_entry(dai, &codec->dai_list, list) {
+ if (dai->id == id)
+ return &dai->params[stream];
+ }
+ return NULL;
+}
+
+static int gbaudio_module_enable_tx(struct gbaudio_codec_info *codec,
+ struct gbaudio_module_info *module, int id)
+{
+ int module_state, ret = 0;
+ u16 data_cport, i2s_port, cportid;
+ u8 sig_bits, channels;
+ uint32_t format, rate;
+ struct gbaudio_data_connection *data;
+ struct gbaudio_stream_params *params;
+
+ /* find the dai */
+ data = find_data(module, id);
+ if (!data) {
+ dev_err(module->dev, "%d:DATA connection missing\n", id);
+ return -ENODEV;
+ }
+ module_state = data->state[SNDRV_PCM_STREAM_PLAYBACK];
+
+ params = find_dai_stream_params(codec, id, SNDRV_PCM_STREAM_PLAYBACK);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ return -EINVAL;
+ }
+
+ /* register cport */
+ if (module_state < GBAUDIO_CODEC_STARTUP) {
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_register_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_TX);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "reg_cport failed:%d\n", ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_PLAYBACK] =
+ GBAUDIO_CODEC_STARTUP;
+ dev_dbg(module->dev, "Dynamic Register %d DAI\n", cportid);
+ }
+
+ /* hw_params */
+ if (module_state < GBAUDIO_CODEC_HWPARAMS) {
+ format = params->format;
+ channels = params->channels;
+ rate = params->rate;
+ sig_bits = params->sig_bits;
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_set_pcm(module->mgmt_connection, data_cport,
+ format, rate, channels, sig_bits);
+ if (ret) {
+ dev_err_ratelimited(module->dev, "set_pcm failed:%d\n",
+ ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_PLAYBACK] =
+ GBAUDIO_CODEC_HWPARAMS;
+ dev_dbg(module->dev, "Dynamic hw_params %d DAI\n", data_cport);
+ }
+
+ /* prepare */
+ if (module_state < GBAUDIO_CODEC_PREPARE) {
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_set_tx_data_size(module->mgmt_connection,
+ data_cport, 192);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "set_tx_data_size failed:%d\n",
+ ret);
+ return ret;
+ }
+ ret = gb_audio_gb_activate_tx(module->mgmt_connection,
+ data_cport);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "activate_tx failed:%d\n", ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_PLAYBACK] =
+ GBAUDIO_CODEC_PREPARE;
+ dev_dbg(module->dev, "Dynamic prepare %d DAI\n", data_cport);
+ }
+
+ return 0;
+}
+
+static int gbaudio_module_disable_tx(struct gbaudio_module_info *module, int id)
+{
+ int ret;
+ u16 data_cport, cportid, i2s_port;
+ int module_state;
+ struct gbaudio_data_connection *data;
+
+ /* find the dai */
+ data = find_data(module, id);
+ if (!data) {
+ dev_err(module->dev, "%d:DATA connection missing\n", id);
+ return -ENODEV;
+ }
+ module_state = data->state[SNDRV_PCM_STREAM_PLAYBACK];
+
+ if (module_state > GBAUDIO_CODEC_HWPARAMS) {
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_deactivate_tx(module->mgmt_connection,
+ data_cport);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "deactivate_tx failed:%d\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Dynamic deactivate %d DAI\n", data_cport);
+ data->state[SNDRV_PCM_STREAM_PLAYBACK] =
+ GBAUDIO_CODEC_HWPARAMS;
+ }
+
+ if (module_state > GBAUDIO_CODEC_SHUTDOWN) {
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_unregister_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_TX);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "unregister_cport failed:%d\n",
+ ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Dynamic Unregister %d DAI\n", cportid);
+ data->state[SNDRV_PCM_STREAM_PLAYBACK] =
+ GBAUDIO_CODEC_SHUTDOWN;
+ }
+
+ return 0;
+}
+
+static int gbaudio_module_enable_rx(struct gbaudio_codec_info *codec,
+ struct gbaudio_module_info *module, int id)
+{
+ int module_state, ret = 0;
+ u16 data_cport, i2s_port, cportid;
+ u8 sig_bits, channels;
+ uint32_t format, rate;
+ struct gbaudio_data_connection *data;
+ struct gbaudio_stream_params *params;
+
+ /* find the dai */
+ data = find_data(module, id);
+ if (!data) {
+ dev_err(module->dev, "%d:DATA connection missing\n", id);
+ return -ENODEV;
+ }
+ module_state = data->state[SNDRV_PCM_STREAM_CAPTURE];
+
+ params = find_dai_stream_params(codec, id, SNDRV_PCM_STREAM_CAPTURE);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ return -EINVAL;
+ }
+
+ /* register cport */
+ if (module_state < GBAUDIO_CODEC_STARTUP) {
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_register_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_RX);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "reg_cport failed:%d\n", ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_CAPTURE] =
+ GBAUDIO_CODEC_STARTUP;
+ dev_dbg(module->dev, "Dynamic Register %d DAI\n", cportid);
+ }
+
+ /* hw_params */
+ if (module_state < GBAUDIO_CODEC_HWPARAMS) {
+ format = params->format;
+ channels = params->channels;
+ rate = params->rate;
+ sig_bits = params->sig_bits;
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_set_pcm(module->mgmt_connection, data_cport,
+ format, rate, channels, sig_bits);
+ if (ret) {
+ dev_err_ratelimited(module->dev, "set_pcm failed:%d\n",
+ ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_CAPTURE] =
+ GBAUDIO_CODEC_HWPARAMS;
+ dev_dbg(module->dev, "Dynamic hw_params %d DAI\n", data_cport);
+ }
+
+ /* prepare */
+ if (module_state < GBAUDIO_CODEC_PREPARE) {
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_set_rx_data_size(module->mgmt_connection,
+ data_cport, 192);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "set_rx_data_size failed:%d\n",
+ ret);
+ return ret;
+ }
+ ret = gb_audio_gb_activate_rx(module->mgmt_connection,
+ data_cport);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "activate_rx failed:%d\n", ret);
+ return ret;
+ }
+ data->state[SNDRV_PCM_STREAM_CAPTURE] =
+ GBAUDIO_CODEC_PREPARE;
+ dev_dbg(module->dev, "Dynamic prepare %d DAI\n", data_cport);
+ }
+
+ return 0;
+}
+
+static int gbaudio_module_disable_rx(struct gbaudio_module_info *module, int id)
+{
+ int ret;
+ u16 data_cport, cportid, i2s_port;
+ int module_state;
+ struct gbaudio_data_connection *data;
+
+ /* find the dai */
+ data = find_data(module, id);
+ if (!data) {
+ dev_err(module->dev, "%d:DATA connection missing\n", id);
+ return -ENODEV;
+ }
+ module_state = data->state[SNDRV_PCM_STREAM_CAPTURE];
+
+ if (module_state > GBAUDIO_CODEC_HWPARAMS) {
+ data_cport = data->connection->intf_cport_id;
+ ret = gb_audio_gb_deactivate_rx(module->mgmt_connection,
+ data_cport);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "deactivate_rx failed:%d\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Dynamic deactivate %d DAI\n", data_cport);
+ data->state[SNDRV_PCM_STREAM_CAPTURE] =
+ GBAUDIO_CODEC_HWPARAMS;
+ }
+
+ if (module_state > GBAUDIO_CODEC_SHUTDOWN) {
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_unregister_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_RX);
+ if (ret) {
+ dev_err_ratelimited(module->dev,
+ "unregister_cport failed:%d\n",
+ ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Dynamic Unregister %d DAI\n", cportid);
+ data->state[SNDRV_PCM_STREAM_CAPTURE] =
+ GBAUDIO_CODEC_SHUTDOWN;
+ }
+
+ return 0;
+}
+
+int gbaudio_module_update(struct gbaudio_codec_info *codec,
+ struct snd_soc_dapm_widget *w,
+ struct gbaudio_module_info *module, int enable)
+{
+ int dai_id, ret;
+ char intf_name[NAME_SIZE], dir[NAME_SIZE];
+
+ dev_dbg(module->dev, "%s:Module update %s sequence\n", w->name,
+ enable ? "Enable":"Disable");
+
+ if ((w->id != snd_soc_dapm_aif_in) && (w->id != snd_soc_dapm_aif_out)) {
+ dev_dbg(codec->dev, "No action required for %s\n", w->name);
+ return 0;
+ }
+
+ /* parse dai_id from AIF widget's stream_name */
+ ret = sscanf(w->sname, "%s %d %s", intf_name, &dai_id, dir);
+ if (ret < 3) {
+ dev_err(codec->dev, "Error while parsing dai_id for %s\n",
+ w->name);
+ return -EINVAL;
+ }
+
+ mutex_lock(&codec->lock);
+ if (w->id == snd_soc_dapm_aif_in) {
+ if (enable)
+ ret = gbaudio_module_enable_tx(codec, module, dai_id);
+ else
+ ret = gbaudio_module_disable_tx(module, dai_id);
+ } else if (w->id == snd_soc_dapm_aif_out) {
+ if (enable)
+ ret = gbaudio_module_enable_rx(codec, module, dai_id);
+ else
+ ret = gbaudio_module_disable_rx(module, dai_id);
+ }
+
+ mutex_unlock(&codec->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(gbaudio_module_update);
+
+/*
+ * codec DAI ops
+ */
+static int gbcodec_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+ struct gbaudio_stream_params *params;
+
+ mutex_lock(&codec->lock);
+
+ if (list_empty(&codec->module_list)) {
+ dev_err(codec->dev, "No codec module available\n");
+ mutex_unlock(&codec->lock);
+ return -ENODEV;
+ }
+
+ params = find_dai_stream_params(codec, dai->id, substream->stream);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+ params->state = GBAUDIO_CODEC_STARTUP;
+ mutex_unlock(&codec->lock);
+ /* to prevent suspend in case of active audio */
+ pm_stay_awake(dai->dev);
+
+ return 0;
+}
+
+static void gbcodec_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+ struct gbaudio_stream_params *params;
+
+ mutex_lock(&codec->lock);
+
+ if (list_empty(&codec->module_list))
+ dev_info(codec->dev, "No codec module available during shutdown\n");
+
+ params = find_dai_stream_params(codec, dai->id, substream->stream);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ mutex_unlock(&codec->lock);
+ return;
+ }
+ params->state = GBAUDIO_CODEC_SHUTDOWN;
+ mutex_unlock(&codec->lock);
+ pm_relax(dai->dev);
+ return;
+}
+
+static int gbcodec_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hwparams,
+ struct snd_soc_dai *dai)
+{
+ int ret;
+ u8 sig_bits, channels;
+ uint32_t format, rate;
+ struct gbaudio_module_info *module;
+ struct gbaudio_data_connection *data;
+ struct gb_bundle *bundle;
+ struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+ struct gbaudio_stream_params *params;
+
+ mutex_lock(&codec->lock);
+
+ if (list_empty(&codec->module_list)) {
+ dev_err(codec->dev, "No codec module available\n");
+ mutex_unlock(&codec->lock);
+ return -ENODEV;
+ }
+
+ /*
+ * assuming, currently only 48000 Hz, 16BIT_LE, stereo
+ * is supported, validate params before configuring codec
+ */
+ if (params_channels(hwparams) != 2) {
+ dev_err(dai->dev, "Invalid channel count:%d\n",
+ params_channels(hwparams));
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+ channels = params_channels(hwparams);
+
+ if (params_rate(hwparams) != 48000) {
+ dev_err(dai->dev, "Invalid sampling rate:%d\n",
+ params_rate(hwparams));
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+ rate = GB_AUDIO_PCM_RATE_48000;
+
+ if (params_format(hwparams) != SNDRV_PCM_FORMAT_S16_LE) {
+ dev_err(dai->dev, "Invalid format:%d\n",
+ params_format(hwparams));
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+ format = GB_AUDIO_PCM_FMT_S16_LE;
+
+ /* find the data connection */
+ list_for_each_entry(module, &codec->module_list, list) {
+ data = find_data(module, dai->id);
+ if (data)
+ break;
+ }
+
+ if (!data) {
+ dev_err(dai->dev, "DATA connection missing\n");
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+
+ params = find_dai_stream_params(codec, dai->id, substream->stream);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+
+ bundle = to_gb_bundle(module->dev);
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret) {
+ mutex_unlock(&codec->lock);
+ return ret;
+ }
+
+ ret = gb_audio_apbridgea_set_config(data->connection, 0,
+ AUDIO_APBRIDGEA_PCM_FMT_16,
+ AUDIO_APBRIDGEA_PCM_RATE_48000,
+ 6144000);
+ if (ret) {
+ dev_err_ratelimited(dai->dev, "%d: Error during set_config\n",
+ ret);
+ mutex_unlock(&codec->lock);
+ return ret;
+ }
+
+ gb_pm_runtime_put_noidle(bundle);
+
+ params->state = GBAUDIO_CODEC_HWPARAMS;
+ params->format = format;
+ params->rate = rate;
+ params->channels = channels;
+ params->sig_bits = sig_bits;
+
+ mutex_unlock(&codec->lock);
+ return 0;
+}
+
+static int gbcodec_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int ret;
+ struct gbaudio_module_info *module;
+ struct gbaudio_data_connection *data;
+ struct gb_bundle *bundle;
+ struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+ struct gbaudio_stream_params *params;
+
+ mutex_lock(&codec->lock);
+
+ if (list_empty(&codec->module_list)) {
+ dev_err(codec->dev, "No codec module available\n");
+ mutex_unlock(&codec->lock);
+ return -ENODEV;
+ }
+
+ list_for_each_entry(module, &codec->module_list, list) {
+ /* find the dai */
+ data = find_data(module, dai->id);
+ if (data)
+ break;
+ }
+ if (!data) {
+ dev_err(dai->dev, "DATA connection missing\n");
+ mutex_unlock(&codec->lock);
+ return -ENODEV;
+ }
+
+ params = find_dai_stream_params(codec, dai->id, substream->stream);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+
+ bundle = to_gb_bundle(module->dev);
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret) {
+ mutex_unlock(&codec->lock);
+ return ret;
+ }
+
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ ret = gb_audio_apbridgea_set_tx_data_size(data->connection, 0,
+ 192);
+ break;
+ case SNDRV_PCM_STREAM_CAPTURE:
+ ret = gb_audio_apbridgea_set_rx_data_size(data->connection, 0,
+ 192);
+ break;
+ }
+ if (ret) {
+ mutex_unlock(&codec->lock);
+ dev_err_ratelimited(dai->dev, "set_data_size failed:%d\n",
+ ret);
+ return ret;
+ }
+
+ gb_pm_runtime_put_noidle(bundle);
+
+ params->state = GBAUDIO_CODEC_PREPARE;
+ mutex_unlock(&codec->lock);
+ return 0;
+}
+
+static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
+{
+ int ret;
+ struct gbaudio_data_connection *data;
+ struct gbaudio_module_info *module;
+ struct gb_bundle *bundle;
+ struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
+ struct gbaudio_stream_params *params;
+
+
+ dev_dbg(dai->dev, "Mute:%d, Direction:%s\n", mute,
+ stream ? "CAPTURE":"PLAYBACK");
+
+ mutex_lock(&codec->lock);
+
+ params = find_dai_stream_params(codec, dai->id, stream);
+ if (!params) {
+ dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
+ mutex_unlock(&codec->lock);
+ return -EINVAL;
+ }
+
+ if (list_empty(&codec->module_list)) {
+ dev_err(codec->dev, "No codec module available\n");
+ if (mute) {
+ params->state = GBAUDIO_CODEC_STOP;
+ ret = 0;
+ } else {
+ ret = -ENODEV;
+ }
+ mutex_unlock(&codec->lock);
+ return ret;
+ }
+
+ list_for_each_entry(module, &codec->module_list, list) {
+ /* find the dai */
+ data = find_data(module, dai->id);
+ if (data)
+ break;
+ }
+ if (!data) {
+ dev_err(dai->dev, "%s:%s DATA connection missing\n",
+ dai->name, module->name);
+ mutex_unlock(&codec->lock);
+ return -ENODEV;
+ }
+
+ bundle = to_gb_bundle(module->dev);
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret) {
+ mutex_unlock(&codec->lock);
+ return ret;
+ }
+
+ if (!mute && !stream) {/* start playback */
+ ret = gb_audio_apbridgea_prepare_tx(data->connection,
+ 0);
+ if (!ret)
+ ret = gb_audio_apbridgea_start_tx(data->connection,
+ 0, 0);
+ params->state = GBAUDIO_CODEC_START;
+ } else if (!mute && stream) {/* start capture */
+ ret = gb_audio_apbridgea_prepare_rx(data->connection,
+ 0);
+ if (!ret)
+ ret = gb_audio_apbridgea_start_rx(data->connection,
+ 0);
+ params->state = GBAUDIO_CODEC_START;
+ } else if (mute && !stream) {/* stop playback */
+ ret = gb_audio_apbridgea_stop_tx(data->connection, 0);
+ if (!ret)
+ ret = gb_audio_apbridgea_shutdown_tx(data->connection,
+ 0);
+ params->state = GBAUDIO_CODEC_STOP;
+ } else if (mute && stream) {/* stop capture */
+ ret = gb_audio_apbridgea_stop_rx(data->connection, 0);
+ if (!ret)
+ ret = gb_audio_apbridgea_shutdown_rx(data->connection,
+ 0);
+ params->state = GBAUDIO_CODEC_STOP;
+ } else
+ ret = -EINVAL;
+ if (ret)
+ dev_err_ratelimited(dai->dev,
+ "%s:Error during %s %s stream:%d\n",
+ module->name, mute ? "Mute" : "Unmute",
+ stream ? "Capture" : "Playback", ret);
+
+ gb_pm_runtime_put_noidle(bundle);
+ mutex_unlock(&codec->lock);
+ return ret;
+}
+
+static struct snd_soc_dai_ops gbcodec_dai_ops = {
+ .startup = gbcodec_startup,
+ .shutdown = gbcodec_shutdown,
+ .hw_params = gbcodec_hw_params,
+ .prepare = gbcodec_prepare,
+ .mute_stream = gbcodec_mute_stream,
+};
+
+static struct snd_soc_dai_driver gbaudio_dai[] = {
+ {
+ .name = "apb-i2s0",
+ .id = 0,
+ .playback = {
+ .stream_name = "I2S 0 Playback",
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FORMAT_S16_LE,
+ .rate_max = 48000,
+ .rate_min = 48000,
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+ .capture = {
+ .stream_name = "I2S 0 Capture",
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FORMAT_S16_LE,
+ .rate_max = 48000,
+ .rate_min = 48000,
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+ .ops = &gbcodec_dai_ops,
+ },
+};
+
+static int gbaudio_init_jack(struct gbaudio_module_info *module,
+ struct snd_soc_codec *codec)
+{
+ int ret;
+
+ if (!module->jack_mask)
+ return 0;
+
+ snprintf(module->jack_name, NAME_SIZE, "GB %d Headset Jack",
+ module->dev_id);
+ ret = snd_soc_jack_new(codec, module->jack_name, module->jack_mask,
+ &module->headset_jack);
+ if (ret) {
+ dev_err(module->dev, "Failed to create new jack\n");
+ return ret;
+ }
+
+ if (!module->button_mask)
+ return 0;
+
+ snprintf(module->button_name, NAME_SIZE, "GB %d Button Jack",
+ module->dev_id);
+ ret = snd_soc_jack_new(codec, module->button_name, module->button_mask,
+ &module->button_jack);
+ if (ret) {
+ dev_err(module->dev, "Failed to create button jack\n");
+ return ret;
+ }
+
+ /*
+ * Currently, max 4 buttons are supported with following key mapping
+ * BTN_0 = KEY_MEDIA
+ * BTN_1 = KEY_VOICECOMMAND
+ * BTN_2 = KEY_VOLUMEUP
+ * BTN_3 = KEY_VOLUMEDOWN
+ */
+
+ if (module->button_mask & SND_JACK_BTN_0) {
+ ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_0,
+ KEY_MEDIA);
+ if (ret) {
+ dev_err(module->dev, "Failed to set BTN_0\n");
+ return ret;
+ }
+ }
+
+ if (module->button_mask & SND_JACK_BTN_1) {
+ ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_1,
+ KEY_VOICECOMMAND);
+ if (ret) {
+ dev_err(module->dev, "Failed to set BTN_1\n");
+ return ret;
+ }
+ }
+
+ if (module->button_mask & SND_JACK_BTN_2) {
+ ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_2,
+ KEY_VOLUMEUP);
+ if (ret) {
+ dev_err(module->dev, "Failed to set BTN_2\n");
+ return ret;
+ }
+ }
+
+ if (module->button_mask & SND_JACK_BTN_3) {
+ ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_3,
+ KEY_VOLUMEDOWN);
+ if (ret) {
+ dev_err(module->dev, "Failed to set BTN_0\n");
+ return ret;
+ }
+ }
+
+ /* FIXME
+ * verify if this is really required
+ set_bit(INPUT_PROP_NO_DUMMY_RELEASE,
+ module->button_jack.jack->input_dev->propbit);
+ */
+
+ return 0;
+}
+
+int gbaudio_register_module(struct gbaudio_module_info *module)
+{
+ int ret;
+ struct snd_soc_codec *codec;
+ struct snd_card *card;
+ struct snd_soc_jack *jack = NULL;
+
+ if (!gbcodec) {
+ dev_err(module->dev, "GB Codec not yet probed\n");
+ return -EAGAIN;
+ }
+
+ codec = gbcodec->codec;
+ card = codec->card->snd_card;
+
+ down_write(&card->controls_rwsem);
+
+ if (module->num_dais) {
+ dev_err(gbcodec->dev,
+ "%d:DAIs not supported via gbcodec driver\n",
+ module->num_dais);
+ up_write(&card->controls_rwsem);
+ return -EINVAL;
+ }
+
+ ret = gbaudio_init_jack(module, codec);
+ if (ret) {
+ up_write(&card->controls_rwsem);
+ return ret;
+ }
+
+ if (module->dapm_widgets)
+ snd_soc_dapm_new_controls(&codec->dapm, module->dapm_widgets,
+ module->num_dapm_widgets);
+ if (module->controls)
+ snd_soc_add_codec_controls(codec, module->controls,
+ module->num_controls);
+ if (module->dapm_routes)
+ snd_soc_dapm_add_routes(&codec->dapm, module->dapm_routes,
+ module->num_dapm_routes);
+
+ /* card already instantiated, create widgets here only */
+ if (codec->card->instantiated) {
+ snd_soc_dapm_link_component_dai_widgets(codec->card,
+ &codec->dapm);
+#ifdef CONFIG_SND_JACK
+ /* register jack devices for this module from codec->jack_list */
+ list_for_each_entry(jack, &codec->jack_list, list) {
+ if ((jack == &module->headset_jack)
+ || (jack == &module->button_jack))
+ snd_device_register(codec->card->snd_card,
+ jack->jack);
+ }
+#endif
+ }
+
+ mutex_lock(&gbcodec->lock);
+ list_add(&module->list, &gbcodec->module_list);
+ mutex_unlock(&gbcodec->lock);
+
+ if (codec->card->instantiated)
+ ret = snd_soc_dapm_new_widgets(&codec->dapm);
+ dev_dbg(codec->dev, "Registered %s module\n", module->name);
+
+ up_write(&card->controls_rwsem);
+ return ret;
+}
+EXPORT_SYMBOL(gbaudio_register_module);
+
+static void gbaudio_codec_clean_data_tx(struct gbaudio_data_connection *data)
+{
+ u16 i2s_port, cportid;
+ int ret;
+
+ if (list_is_singular(&gbcodec->module_list)) {
+ ret = gb_audio_apbridgea_stop_tx(data->connection, 0);
+ if (ret)
+ return;
+ ret = gb_audio_apbridgea_shutdown_tx(data->connection,
+ 0);
+ if (ret)
+ return;
+ }
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_unregister_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_TX);
+ data->state[0] = GBAUDIO_CODEC_SHUTDOWN;
+}
+
+static void gbaudio_codec_clean_data_rx(struct gbaudio_data_connection *data)
+{
+ u16 i2s_port, cportid;
+ int ret;
+
+ if (list_is_singular(&gbcodec->module_list)) {
+ ret = gb_audio_apbridgea_stop_rx(data->connection, 0);
+ if (ret)
+ return;
+ ret = gb_audio_apbridgea_shutdown_rx(data->connection,
+ 0);
+ if (ret)
+ return;
+ }
+ i2s_port = 0; /* fixed for now */
+ cportid = data->connection->hd_cport_id;
+ ret = gb_audio_apbridgea_unregister_cport(data->connection,
+ i2s_port, cportid,
+ AUDIO_APBRIDGEA_DIRECTION_RX);
+ data->state[1] = GBAUDIO_CODEC_SHUTDOWN;
+}
+
+
+static void gbaudio_codec_cleanup(struct gbaudio_module_info *module)
+{
+ struct gbaudio_data_connection *data;
+ int pb_state, cap_state;
+
+ dev_dbg(gbcodec->dev, "%s: removed, cleanup APBridge\n", module->name);
+ list_for_each_entry(data, &module->data_list, list) {
+ pb_state = data->state[0];
+ cap_state = data->state[1];
+
+ if (pb_state > GBAUDIO_CODEC_SHUTDOWN)
+ gbaudio_codec_clean_data_tx(data);
+
+ if (cap_state > GBAUDIO_CODEC_SHUTDOWN)
+ gbaudio_codec_clean_data_rx(data);
+
+ }
+}
+
+void gbaudio_unregister_module(struct gbaudio_module_info *module)
+{
+ struct snd_soc_codec *codec = gbcodec->codec;
+ struct snd_card *card = codec->card->snd_card;
+ struct snd_soc_jack *jack, *next_j;
+ int mask;
+
+ dev_dbg(codec->dev, "Unregister %s module\n", module->name);
+
+ down_write(&card->controls_rwsem);
+ mutex_lock(&gbcodec->lock);
+ gbaudio_codec_cleanup(module);
+ list_del(&module->list);
+ dev_dbg(codec->dev, "Process Unregister %s module\n", module->name);
+ mutex_unlock(&gbcodec->lock);
+
+#ifdef CONFIG_SND_JACK
+ /* free jack devices for this module from codec->jack_list */
+ list_for_each_entry_safe(jack, next_j, &codec->jack_list, list) {
+ if (jack == &module->headset_jack)
+ mask = GBCODEC_JACK_MASK;
+ else if (jack == &module->button_jack)
+ mask = GBCODEC_JACK_BUTTON_MASK;
+ else
+ mask = 0;
+ if (mask) {
+ dev_dbg(module->dev, "Report %s removal\n",
+ jack->jack->id);
+ snd_soc_jack_report(jack, 0, mask);
+ snd_device_free(codec->card->snd_card, jack->jack);
+ list_del(&jack->list);
+ }
+ }
+#endif
+
+ if (module->dapm_routes) {
+ dev_dbg(codec->dev, "Removing %d routes\n",
+ module->num_dapm_routes);
+ snd_soc_dapm_del_routes(&codec->dapm, module->dapm_routes,
+ module->num_dapm_routes);
+ }
+ if (module->controls) {
+ dev_dbg(codec->dev, "Removing %d controls\n",
+ module->num_controls);
+ snd_soc_remove_codec_controls(codec, module->controls,
+ module->num_controls);
+ }
+ if (module->dapm_widgets) {
+ dev_dbg(codec->dev, "Removing %d widgets\n",
+ module->num_dapm_widgets);
+ snd_soc_dapm_free_controls(&codec->dapm, module->dapm_widgets,
+ module->num_dapm_widgets);
+ }
+
+ dev_dbg(codec->dev, "Unregistered %s module\n", module->name);
+
+ up_write(&card->controls_rwsem);
+}
+EXPORT_SYMBOL(gbaudio_unregister_module);
+
+/*
+ * codec driver ops
+ */
+static int gbcodec_probe(struct snd_soc_codec *codec)
+{
+ int i;
+ struct gbaudio_codec_info *info;
+ struct gbaudio_codec_dai *dai;
+
+ info = devm_kzalloc(codec->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = codec->dev;
+ INIT_LIST_HEAD(&info->module_list);
+ mutex_init(&info->lock);
+ INIT_LIST_HEAD(&info->dai_list);
+
+ /* init dai_list used to maintain runtime stream info */
+ for (i = 0; i < ARRAY_SIZE(gbaudio_dai); i++) {
+ dai = devm_kzalloc(codec->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+ dai->id = gbaudio_dai[i].id;
+ list_add(&dai->list, &info->dai_list);
+ }
+
+ info->codec = codec;
+ snd_soc_codec_set_drvdata(codec, info);
+ gbcodec = info;
+
+ device_init_wakeup(codec->dev, 1);
+ return 0;
+}
+
+static int gbcodec_remove(struct snd_soc_codec *codec)
+{
+ /* Empty function for now */
+ return 0;
+}
+
+static u8 gbcodec_reg[GBCODEC_REG_COUNT] = {
+ [GBCODEC_CTL_REG] = GBCODEC_CTL_REG_DEFAULT,
+ [GBCODEC_MUTE_REG] = GBCODEC_MUTE_REG_DEFAULT,
+ [GBCODEC_PB_LVOL_REG] = GBCODEC_PB_VOL_REG_DEFAULT,
+ [GBCODEC_PB_RVOL_REG] = GBCODEC_PB_VOL_REG_DEFAULT,
+ [GBCODEC_CAP_LVOL_REG] = GBCODEC_CAP_VOL_REG_DEFAULT,
+ [GBCODEC_CAP_RVOL_REG] = GBCODEC_CAP_VOL_REG_DEFAULT,
+ [GBCODEC_APB1_MUX_REG] = GBCODEC_APB1_MUX_REG_DEFAULT,
+ [GBCODEC_APB2_MUX_REG] = GBCODEC_APB2_MUX_REG_DEFAULT,
+};
+
+static int gbcodec_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ int ret = 0;
+
+ if (reg == SND_SOC_NOPM)
+ return 0;
+
+ BUG_ON(reg >= GBCODEC_REG_COUNT);
+
+ gbcodec_reg[reg] = value;
+ dev_dbg(codec->dev, "reg[%d] = 0x%x\n", reg, value);
+
+ return ret;
+}
+
+static unsigned int gbcodec_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ unsigned int val = 0;
+
+ if (reg == SND_SOC_NOPM)
+ return 0;
+
+ BUG_ON(reg >= GBCODEC_REG_COUNT);
+
+ val = gbcodec_reg[reg];
+ dev_dbg(codec->dev, "reg[%d] = 0x%x\n", reg, val);
+
+ return val;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_gbaudio = {
+ .probe = gbcodec_probe,
+ .remove = gbcodec_remove,
+
+ .read = gbcodec_read,
+ .write = gbcodec_write,
+
+ .reg_cache_size = GBCODEC_REG_COUNT,
+ .reg_cache_default = gbcodec_reg_defaults,
+ .reg_word_size = 1,
+
+ .idle_bias_off = true,
+ .ignore_pmdown_time = 1,
+};
+
+#ifdef CONFIG_PM
+static int gbaudio_codec_suspend(struct device *dev)
+{
+ dev_dbg(dev, "%s: suspend\n", __func__);
+ return 0;
+}
+
+static int gbaudio_codec_resume(struct device *dev)
+{
+ dev_dbg(dev, "%s: resume\n", __func__);
+ return 0;
+}
+
+static const struct dev_pm_ops gbaudio_codec_pm_ops = {
+ .suspend = gbaudio_codec_suspend,
+ .resume = gbaudio_codec_resume,
+};
+#endif
+
+static int gbaudio_codec_probe(struct platform_device *pdev)
+{
+ return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_gbaudio,
+ gbaudio_dai, ARRAY_SIZE(gbaudio_dai));
+}
+
+static int gbaudio_codec_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id greybus_asoc_machine_of_match[] = {
+ { .compatible = "toshiba,apb-dummy-codec", },
+ {},
+};
+
+static struct platform_driver gbaudio_codec_driver = {
+ .driver = {
+ .name = "apb-dummy-codec",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &gbaudio_codec_pm_ops,
+#endif
+ .of_match_table = greybus_asoc_machine_of_match,
+ },
+ .probe = gbaudio_codec_probe,
+ .remove = gbaudio_codec_remove,
+};
+module_platform_driver(gbaudio_codec_driver);
+
+MODULE_DESCRIPTION("APBridge ALSA SoC dummy codec driver");
+MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:apb-dummy-codec");
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
new file mode 100644
index 000000000000..ca027bd99ad7
--- /dev/null
+++ b/drivers/staging/greybus/audio_codec.h
@@ -0,0 +1,283 @@
+/*
+ * Greybus audio driver
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __LINUX_GBAUDIO_CODEC_H
+#define __LINUX_GBAUDIO_CODEC_H
+
+#include <sound/soc.h>
+#include <sound/jack.h>
+
+#include "greybus.h"
+#include "greybus_protocols.h"
+
+#define NAME_SIZE 32
+#define MAX_DAIS 2 /* APB1, APB2 */
+
+enum {
+ APB1_PCM = 0,
+ APB2_PCM,
+ NUM_CODEC_DAIS,
+};
+
+enum gbcodec_reg_index {
+ GBCODEC_CTL_REG,
+ GBCODEC_MUTE_REG,
+ GBCODEC_PB_LVOL_REG,
+ GBCODEC_PB_RVOL_REG,
+ GBCODEC_CAP_LVOL_REG,
+ GBCODEC_CAP_RVOL_REG,
+ GBCODEC_APB1_MUX_REG,
+ GBCODEC_APB2_MUX_REG,
+ GBCODEC_REG_COUNT
+};
+
+/* device_type should be same as defined in audio.h (Android media layer) */
+enum {
+ GBAUDIO_DEVICE_NONE = 0x0,
+ /* reserved bits */
+ GBAUDIO_DEVICE_BIT_IN = 0x80000000,
+ GBAUDIO_DEVICE_BIT_DEFAULT = 0x40000000,
+ /* output devices */
+ GBAUDIO_DEVICE_OUT_SPEAKER = 0x2,
+ GBAUDIO_DEVICE_OUT_WIRED_HEADSET = 0x4,
+ GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE = 0x8,
+ /* input devices */
+ GBAUDIO_DEVICE_IN_BUILTIN_MIC = GBAUDIO_DEVICE_BIT_IN | 0x4,
+ GBAUDIO_DEVICE_IN_WIRED_HEADSET = GBAUDIO_DEVICE_BIT_IN | 0x10,
+};
+
+/* bit 0-SPK, 1-HP, 2-DAC,
+ * 4-MIC, 5-HSMIC, 6-MIC2
+ */
+#define GBCODEC_CTL_REG_DEFAULT 0x00
+
+/* bit 0,1 - APB1-PB-L/R
+ * bit 2,3 - APB2-PB-L/R
+ * bit 4,5 - APB1-Cap-L/R
+ * bit 6,7 - APB2-Cap-L/R
+ */
+#define GBCODEC_MUTE_REG_DEFAULT 0x00
+
+/* 0-127 steps */
+#define GBCODEC_PB_VOL_REG_DEFAULT 0x00
+#define GBCODEC_CAP_VOL_REG_DEFAULT 0x00
+
+/* bit 0,1,2 - PB stereo, left, right
+ * bit 8,9,10 - Cap stereo, left, right
+ */
+#define GBCODEC_APB1_MUX_REG_DEFAULT 0x00
+#define GBCODEC_APB2_MUX_REG_DEFAULT 0x00
+
+#define GBCODEC_JACK_MASK 0x0000FFFF
+#define GBCODEC_JACK_BUTTON_MASK 0xFFFF0000
+
+static const u8 gbcodec_reg_defaults[GBCODEC_REG_COUNT] = {
+ GBCODEC_CTL_REG_DEFAULT,
+ GBCODEC_MUTE_REG_DEFAULT,
+ GBCODEC_PB_VOL_REG_DEFAULT,
+ GBCODEC_PB_VOL_REG_DEFAULT,
+ GBCODEC_CAP_VOL_REG_DEFAULT,
+ GBCODEC_CAP_VOL_REG_DEFAULT,
+ GBCODEC_APB1_MUX_REG_DEFAULT,
+ GBCODEC_APB2_MUX_REG_DEFAULT,
+};
+
+enum gbaudio_codec_state {
+ GBAUDIO_CODEC_SHUTDOWN = 0,
+ GBAUDIO_CODEC_STARTUP,
+ GBAUDIO_CODEC_HWPARAMS,
+ GBAUDIO_CODEC_PREPARE,
+ GBAUDIO_CODEC_START,
+ GBAUDIO_CODEC_STOP,
+};
+
+struct gbaudio_stream_params {
+ int state;
+ u8 sig_bits, channels;
+ uint32_t format, rate;
+};
+
+struct gbaudio_codec_dai {
+ int id;
+ /* runtime params for playback/capture streams */
+ struct gbaudio_stream_params params[2];
+ struct list_head list;
+};
+
+struct gbaudio_codec_info {
+ struct device *dev;
+ struct snd_soc_codec *codec;
+ struct list_head module_list;
+ /* to maintain runtime stream params for each DAI */
+ struct list_head dai_list;
+ struct mutex lock;
+ u8 reg[GBCODEC_REG_COUNT];
+};
+
+struct gbaudio_widget {
+ __u8 id;
+ const char *name;
+ struct list_head list;
+};
+
+struct gbaudio_control {
+ __u8 id;
+ char *name;
+ char *wname;
+ const char * const *texts;
+ int items;
+ struct list_head list;
+};
+
+struct gbaudio_data_connection {
+ int id;
+ __le16 data_cport;
+ struct gb_connection *connection;
+ struct list_head list;
+ /* maintain runtime state for playback/capture stream */
+ int state[2];
+};
+
+/* stream direction */
+#define GB_PLAYBACK BIT(0)
+#define GB_CAPTURE BIT(1)
+
+enum gbaudio_module_state {
+ GBAUDIO_MODULE_OFF = 0,
+ GBAUDIO_MODULE_ON,
+};
+
+struct gbaudio_module_info {
+ /* module info */
+ struct device *dev;
+ int dev_id; /* check if it should be bundle_id/hd_cport_id */
+ int vid;
+ int pid;
+ int slot;
+ int type;
+ int set_uevent;
+ char vstr[NAME_SIZE];
+ char pstr[NAME_SIZE];
+ struct list_head list;
+ /* need to share this info to above user space */
+ int manager_id;
+ char name[NAME_SIZE];
+ unsigned int ip_devices;
+ unsigned int op_devices;
+
+ /* jack related */
+ char jack_name[NAME_SIZE];
+ char button_name[NAME_SIZE];
+ int jack_type;
+ int jack_mask;
+ int button_mask;
+ int button_status;
+ struct snd_soc_jack headset_jack;
+ struct snd_soc_jack button_jack;
+
+ /* connection info */
+ struct gb_connection *mgmt_connection;
+ size_t num_data_connections;
+ struct list_head data_list;
+
+ /* topology related */
+ int num_dais;
+ int num_controls;
+ int num_dapm_widgets;
+ int num_dapm_routes;
+ unsigned long dai_offset;
+ unsigned long widget_offset;
+ unsigned long control_offset;
+ unsigned long route_offset;
+ struct snd_kcontrol_new *controls;
+ struct snd_soc_dapm_widget *dapm_widgets;
+ struct snd_soc_dapm_route *dapm_routes;
+ struct snd_soc_dai_driver *dais;
+
+ struct list_head widget_list;
+ struct list_head ctl_list;
+ struct list_head widget_ctl_list;
+
+ struct gb_audio_topology *topology;
+};
+
+int gbaudio_tplg_parse_data(struct gbaudio_module_info *module,
+ struct gb_audio_topology *tplg_data);
+void gbaudio_tplg_release(struct gbaudio_module_info *module);
+
+int gbaudio_module_update(struct gbaudio_codec_info *codec,
+ struct snd_soc_dapm_widget *w,
+ struct gbaudio_module_info *module,
+ int enable);
+int gbaudio_register_module(struct gbaudio_module_info *module);
+void gbaudio_unregister_module(struct gbaudio_module_info *module);
+
+/* protocol related */
+extern int gb_audio_gb_get_topology(struct gb_connection *connection,
+ struct gb_audio_topology **topology);
+extern int gb_audio_gb_get_control(struct gb_connection *connection,
+ u8 control_id, u8 index,
+ struct gb_audio_ctl_elem_value *value);
+extern int gb_audio_gb_set_control(struct gb_connection *connection,
+ u8 control_id, u8 index,
+ struct gb_audio_ctl_elem_value *value);
+extern int gb_audio_gb_enable_widget(struct gb_connection *connection,
+ u8 widget_id);
+extern int gb_audio_gb_disable_widget(struct gb_connection *connection,
+ u8 widget_id);
+extern int gb_audio_gb_get_pcm(struct gb_connection *connection,
+ u16 data_cport, uint32_t *format,
+ uint32_t *rate, u8 *channels,
+ u8 *sig_bits);
+extern int gb_audio_gb_set_pcm(struct gb_connection *connection,
+ u16 data_cport, uint32_t format,
+ uint32_t rate, u8 channels,
+ u8 sig_bits);
+extern int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
+ u16 data_cport, u16 size);
+extern int gb_audio_gb_activate_tx(struct gb_connection *connection,
+ u16 data_cport);
+extern int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
+ u16 data_cport);
+extern int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
+ u16 data_cport, u16 size);
+extern int gb_audio_gb_activate_rx(struct gb_connection *connection,
+ u16 data_cport);
+extern int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
+ u16 data_cport);
+extern int gb_audio_apbridgea_set_config(struct gb_connection *connection,
+ __u16 i2s_port, __u32 format,
+ __u32 rate, __u32 mclk_freq);
+extern int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
+ __u16 i2s_port, __u16 cportid,
+ __u8 direction);
+extern int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
+ __u16 i2s_port, __u16 cportid,
+ __u8 direction);
+extern int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
+ __u16 i2s_port, __u16 size);
+extern int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
+ __u16 i2s_port);
+extern int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
+ __u16 i2s_port, __u64 timestamp);
+extern int gb_audio_apbridgea_stop_tx(struct gb_connection *connection,
+ __u16 i2s_port);
+extern int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
+ __u16 i2s_port);
+extern int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
+ __u16 i2s_port, __u16 size);
+extern int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
+ __u16 i2s_port);
+extern int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
+ __u16 i2s_port);
+extern int gb_audio_apbridgea_stop_rx(struct gb_connection *connection,
+ __u16 i2s_port);
+extern int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
+ __u16 i2s_port);
+
+#endif /* __LINUX_GBAUDIO_CODEC_H */
diff --git a/drivers/staging/greybus/audio_gb.c b/drivers/staging/greybus/audio_gb.c
new file mode 100644
index 000000000000..42f287dd7b84
--- /dev/null
+++ b/drivers/staging/greybus/audio_gb.c
@@ -0,0 +1,228 @@
+/*
+ * Greybus Audio Device Class Protocol helpers
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+#include "greybus_protocols.h"
+#include "operation.h"
+#include "audio_codec.h"
+
+/* TODO: Split into separate calls */
+int gb_audio_gb_get_topology(struct gb_connection *connection,
+ struct gb_audio_topology **topology)
+{
+ struct gb_audio_get_topology_size_response size_resp;
+ struct gb_audio_topology *topo;
+ u16 size;
+ int ret;
+
+ ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE,
+ NULL, 0, &size_resp, sizeof(size_resp));
+ if (ret)
+ return ret;
+
+ size = le16_to_cpu(size_resp.size);
+ if (size < sizeof(*topo))
+ return -ENODATA;
+
+ topo = kzalloc(size, GFP_KERNEL);
+ if (!topo)
+ return -ENOMEM;
+
+ ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY, NULL, 0,
+ topo, size);
+ if (ret) {
+ kfree(topo);
+ return ret;
+ }
+
+ *topology = topo;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_get_topology);
+
+int gb_audio_gb_get_control(struct gb_connection *connection,
+ u8 control_id, u8 index,
+ struct gb_audio_ctl_elem_value *value)
+{
+ struct gb_audio_get_control_request req;
+ struct gb_audio_get_control_response resp;
+ int ret;
+
+ req.control_id = control_id;
+ req.index = index;
+
+ ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_CONTROL,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret)
+ return ret;
+
+ memcpy(value, &resp.value, sizeof(*value));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_get_control);
+
+int gb_audio_gb_set_control(struct gb_connection *connection,
+ u8 control_id, u8 index,
+ struct gb_audio_ctl_elem_value *value)
+{
+ struct gb_audio_set_control_request req;
+
+ req.control_id = control_id;
+ req.index = index;
+ memcpy(&req.value, value, sizeof(req.value));
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_CONTROL,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_control);
+
+int gb_audio_gb_enable_widget(struct gb_connection *connection,
+ u8 widget_id)
+{
+ struct gb_audio_enable_widget_request req;
+
+ req.widget_id = widget_id;
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_ENABLE_WIDGET,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_enable_widget);
+
+int gb_audio_gb_disable_widget(struct gb_connection *connection,
+ u8 widget_id)
+{
+ struct gb_audio_disable_widget_request req;
+
+ req.widget_id = widget_id;
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_DISABLE_WIDGET,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_disable_widget);
+
+int gb_audio_gb_get_pcm(struct gb_connection *connection, u16 data_cport,
+ uint32_t *format, uint32_t *rate, u8 *channels,
+ u8 *sig_bits)
+{
+ struct gb_audio_get_pcm_request req;
+ struct gb_audio_get_pcm_response resp;
+ int ret;
+
+ req.data_cport = cpu_to_le16(data_cport);
+
+ ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_PCM,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret)
+ return ret;
+
+ *format = le32_to_cpu(resp.format);
+ *rate = le32_to_cpu(resp.rate);
+ *channels = resp.channels;
+ *sig_bits = resp.sig_bits;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_get_pcm);
+
+int gb_audio_gb_set_pcm(struct gb_connection *connection, u16 data_cport,
+ uint32_t format, uint32_t rate, u8 channels,
+ u8 sig_bits)
+{
+ struct gb_audio_set_pcm_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+ req.format = cpu_to_le32(format);
+ req.rate = cpu_to_le32(rate);
+ req.channels = channels;
+ req.sig_bits = sig_bits;
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_PCM,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_pcm);
+
+int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
+ u16 data_cport, u16 size)
+{
+ struct gb_audio_set_tx_data_size_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+ req.size = cpu_to_le16(size);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_TX_DATA_SIZE,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_tx_data_size);
+
+int gb_audio_gb_activate_tx(struct gb_connection *connection,
+ u16 data_cport)
+{
+ struct gb_audio_activate_tx_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_TX,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_activate_tx);
+
+int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
+ u16 data_cport)
+{
+ struct gb_audio_deactivate_tx_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_TX,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_tx);
+
+int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
+ u16 data_cport, u16 size)
+{
+ struct gb_audio_set_rx_data_size_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+ req.size = cpu_to_le16(size);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_RX_DATA_SIZE,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_set_rx_data_size);
+
+int gb_audio_gb_activate_rx(struct gb_connection *connection,
+ u16 data_cport)
+{
+ struct gb_audio_activate_rx_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_RX,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_activate_rx);
+
+int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
+ u16 data_cport)
+{
+ struct gb_audio_deactivate_rx_request req;
+
+ req.data_cport = cpu_to_le16(data_cport);
+
+ return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_RX,
+ &req, sizeof(req), NULL, 0);
+}
+EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_rx);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("greybus:audio-gb");
+MODULE_DESCRIPTION("Greybus Audio Device Class Protocol library");
+MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
new file mode 100644
index 000000000000..aa6508b44fab
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager.c
@@ -0,0 +1,184 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rwlock.h>
+#include <linux/idr.h>
+
+#include "audio_manager.h"
+#include "audio_manager_private.h"
+
+static struct kset *manager_kset;
+
+static LIST_HEAD(modules_list);
+static DECLARE_RWSEM(modules_rwsem);
+static DEFINE_IDA(module_id);
+
+/* helpers */
+static struct gb_audio_manager_module *gb_audio_manager_get_locked(int id)
+{
+ struct gb_audio_manager_module *module;
+
+ if (id < 0)
+ return NULL;
+
+ list_for_each_entry(module, &modules_list, list) {
+ if (module->id == id)
+ return module;
+ }
+
+ return NULL;
+}
+
+/* public API */
+int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc)
+{
+ struct gb_audio_manager_module *module;
+ int id;
+ int err;
+
+ id = ida_simple_get(&module_id, 0, 0, GFP_KERNEL);
+ err = gb_audio_manager_module_create(&module, manager_kset,
+ id, desc);
+ if (err) {
+ ida_simple_remove(&module_id, id);
+ return err;
+ }
+
+ /* Add it to the list */
+ down_write(&modules_rwsem);
+ list_add_tail(&module->list, &modules_list);
+ up_write(&modules_rwsem);
+
+ return module->id;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_add);
+
+int gb_audio_manager_remove(int id)
+{
+ struct gb_audio_manager_module *module;
+
+ down_write(&modules_rwsem);
+
+ module = gb_audio_manager_get_locked(id);
+ if (!module) {
+ up_write(&modules_rwsem);
+ return -EINVAL;
+ }
+ list_del(&module->list);
+ kobject_put(&module->kobj);
+ up_write(&modules_rwsem);
+ ida_simple_remove(&module_id, id);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_remove);
+
+void gb_audio_manager_remove_all(void)
+{
+ struct gb_audio_manager_module *module, *next;
+ int is_empty = 1;
+
+ down_write(&modules_rwsem);
+
+ list_for_each_entry_safe(module, next, &modules_list, list) {
+ list_del(&module->list);
+ kobject_put(&module->kobj);
+ ida_simple_remove(&module_id, module->id);
+ }
+
+ is_empty = list_empty(&modules_list);
+
+ up_write(&modules_rwsem);
+
+ if (!is_empty)
+ pr_warn("Not all nodes were deleted\n");
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_remove_all);
+
+struct gb_audio_manager_module *gb_audio_manager_get_module(int id)
+{
+ struct gb_audio_manager_module *module;
+
+ down_read(&modules_rwsem);
+ module = gb_audio_manager_get_locked(id);
+ kobject_get(&module->kobj);
+ up_read(&modules_rwsem);
+ return module;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_get_module);
+
+void gb_audio_manager_put_module(struct gb_audio_manager_module *module)
+{
+ kobject_put(&module->kobj);
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_put_module);
+
+int gb_audio_manager_dump_module(int id)
+{
+ struct gb_audio_manager_module *module;
+
+ down_read(&modules_rwsem);
+ module = gb_audio_manager_get_locked(id);
+ up_read(&modules_rwsem);
+
+ if (!module)
+ return -EINVAL;
+
+ gb_audio_manager_module_dump(module);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_dump_module);
+
+void gb_audio_manager_dump_all(void)
+{
+ struct gb_audio_manager_module *module;
+ int count = 0;
+
+ down_read(&modules_rwsem);
+ list_for_each_entry(module, &modules_list, list) {
+ gb_audio_manager_module_dump(module);
+ count++;
+ }
+ up_read(&modules_rwsem);
+
+ pr_info("Number of connected modules: %d\n", count);
+}
+EXPORT_SYMBOL_GPL(gb_audio_manager_dump_all);
+
+/*
+ * module init/deinit
+ */
+static int __init manager_init(void)
+{
+ manager_kset = kset_create_and_add(GB_AUDIO_MANAGER_NAME, NULL,
+ kernel_kobj);
+ if (!manager_kset)
+ return -ENOMEM;
+
+#ifdef GB_AUDIO_MANAGER_SYSFS
+ gb_audio_manager_sysfs_init(&manager_kset->kobj);
+#endif
+
+ return 0;
+}
+
+static void __exit manager_exit(void)
+{
+ gb_audio_manager_remove_all();
+ kset_unregister(manager_kset);
+ ida_destroy(&module_id);
+}
+
+module_init(manager_init);
+module_exit(manager_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Svetlin Ankov <ankov_svetlin@projectara.com>");
diff --git a/drivers/staging/greybus/audio_manager.h b/drivers/staging/greybus/audio_manager.h
new file mode 100644
index 000000000000..c4ca09754a6a
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager.h
@@ -0,0 +1,83 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef _GB_AUDIO_MANAGER_H_
+#define _GB_AUDIO_MANAGER_H_
+
+#include <linux/kobject.h>
+#include <linux/list.h>
+
+#define GB_AUDIO_MANAGER_NAME "gb_audio_manager"
+#define GB_AUDIO_MANAGER_MODULE_NAME_LEN 64
+#define GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "63"
+
+struct gb_audio_manager_module_descriptor {
+ char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN];
+ int slot;
+ int vid;
+ int pid;
+ int cport;
+ unsigned int ip_devices;
+ unsigned int op_devices;
+};
+
+struct gb_audio_manager_module {
+ struct kobject kobj;
+ struct list_head list;
+ int id;
+ struct gb_audio_manager_module_descriptor desc;
+};
+
+/*
+ * Creates a new gb_audio_manager_module_descriptor, using the specified
+ * descriptor.
+ *
+ * Returns a negative result on error, or the id of the newly created module.
+ *
+ */
+int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc);
+
+/*
+ * Removes a connected gb_audio_manager_module_descriptor for the specified ID.
+ *
+ * Returns zero on success, or a negative value on error.
+ */
+int gb_audio_manager_remove(int id);
+
+/*
+ * Removes all connected gb_audio_modules
+ *
+ * Returns zero on success, or a negative value on error.
+ */
+void gb_audio_manager_remove_all(void);
+
+/*
+ * Retrieves a gb_audio_manager_module_descriptor for the specified id.
+ * Returns the gb_audio_manager_module_descriptor structure,
+ * or NULL if there is no module with the specified ID.
+ */
+struct gb_audio_manager_module *gb_audio_manager_get_module(int id);
+
+/*
+ * Decreases the refcount of the module, obtained by the get function.
+ * Modules are removed via gb_audio_manager_remove
+ */
+void gb_audio_manager_put_module(struct gb_audio_manager_module *module);
+
+/*
+ * Dumps the module for the specified id
+ * Return 0 on success
+ */
+int gb_audio_manager_dump_module(int id);
+
+/*
+ * Dumps all connected modules
+ */
+void gb_audio_manager_dump_all(void);
+
+#endif /* _GB_AUDIO_MANAGER_H_ */
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
new file mode 100644
index 000000000000..a10e96ad79c1
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager_module.c
@@ -0,0 +1,258 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/slab.h>
+
+#include "audio_manager.h"
+#include "audio_manager_private.h"
+
+#define to_gb_audio_module_attr(x) \
+ container_of(x, struct gb_audio_manager_module_attribute, attr)
+#define to_gb_audio_module(x) \
+ container_of(x, struct gb_audio_manager_module, kobj)
+
+struct gb_audio_manager_module_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr,
+ const char *buf, size_t count);
+};
+
+static ssize_t gb_audio_module_attr_show(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct gb_audio_manager_module_attribute *attribute;
+ struct gb_audio_manager_module *module;
+
+ attribute = to_gb_audio_module_attr(attr);
+ module = to_gb_audio_module(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(module, attribute, buf);
+}
+
+static ssize_t gb_audio_module_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct gb_audio_manager_module_attribute *attribute;
+ struct gb_audio_manager_module *module;
+
+ attribute = to_gb_audio_module_attr(attr);
+ module = to_gb_audio_module(kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(module, attribute, buf, len);
+}
+
+static const struct sysfs_ops gb_audio_module_sysfs_ops = {
+ .show = gb_audio_module_attr_show,
+ .store = gb_audio_module_attr_store,
+};
+
+static void gb_audio_module_release(struct kobject *kobj)
+{
+ struct gb_audio_manager_module *module = to_gb_audio_module(kobj);
+
+ pr_info("Destroying audio module #%d\n", module->id);
+ /* TODO -> delete from list */
+ kfree(module);
+}
+
+static ssize_t gb_audio_module_name_show(
+ struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", module->desc.name);
+}
+
+static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute =
+ __ATTR(name, 0664, gb_audio_module_name_show, NULL);
+
+static ssize_t gb_audio_module_slot_show(
+ struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d", module->desc.slot);
+}
+
+static struct gb_audio_manager_module_attribute gb_audio_module_slot_attribute =
+ __ATTR(slot, 0664, gb_audio_module_slot_show, NULL);
+
+static ssize_t gb_audio_module_vid_show(
+ struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d", module->desc.vid);
+}
+
+static struct gb_audio_manager_module_attribute gb_audio_module_vid_attribute =
+ __ATTR(vid, 0664, gb_audio_module_vid_show, NULL);
+
+static ssize_t gb_audio_module_pid_show(
+ struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d", module->desc.pid);
+}
+
+static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute =
+ __ATTR(pid, 0664, gb_audio_module_pid_show, NULL);
+
+static ssize_t gb_audio_module_cport_show(
+ struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d", module->desc.cport);
+}
+
+static struct gb_audio_manager_module_attribute
+ gb_audio_module_cport_attribute =
+ __ATTR(cport, 0664, gb_audio_module_cport_show, NULL);
+
+static ssize_t gb_audio_module_ip_devices_show(
+ struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%X", module->desc.ip_devices);
+}
+
+static struct gb_audio_manager_module_attribute
+ gb_audio_module_ip_devices_attribute =
+ __ATTR(ip_devices, 0664, gb_audio_module_ip_devices_show, NULL);
+
+static ssize_t gb_audio_module_op_devices_show(
+ struct gb_audio_manager_module *module,
+ struct gb_audio_manager_module_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%X", module->desc.op_devices);
+}
+
+static struct gb_audio_manager_module_attribute
+ gb_audio_module_op_devices_attribute =
+ __ATTR(op_devices, 0664, gb_audio_module_op_devices_show, NULL);
+
+static struct attribute *gb_audio_module_default_attrs[] = {
+ &gb_audio_module_name_attribute.attr,
+ &gb_audio_module_slot_attribute.attr,
+ &gb_audio_module_vid_attribute.attr,
+ &gb_audio_module_pid_attribute.attr,
+ &gb_audio_module_cport_attribute.attr,
+ &gb_audio_module_ip_devices_attribute.attr,
+ &gb_audio_module_op_devices_attribute.attr,
+ NULL, /* need to NULL terminate the list of attributes */
+};
+
+static struct kobj_type gb_audio_module_type = {
+ .sysfs_ops = &gb_audio_module_sysfs_ops,
+ .release = gb_audio_module_release,
+ .default_attrs = gb_audio_module_default_attrs,
+};
+
+static void send_add_uevent(struct gb_audio_manager_module *module)
+{
+ char name_string[128];
+ char slot_string[64];
+ char vid_string[64];
+ char pid_string[64];
+ char cport_string[64];
+ char ip_devices_string[64];
+ char op_devices_string[64];
+
+ char *envp[] = {
+ name_string,
+ slot_string,
+ vid_string,
+ pid_string,
+ cport_string,
+ ip_devices_string,
+ op_devices_string,
+ NULL
+ };
+
+ snprintf(name_string, 128, "NAME=%s", module->desc.name);
+ snprintf(slot_string, 64, "SLOT=%d", module->desc.slot);
+ snprintf(vid_string, 64, "VID=%d", module->desc.vid);
+ snprintf(pid_string, 64, "PID=%d", module->desc.pid);
+ snprintf(cport_string, 64, "CPORT=%d", module->desc.cport);
+ snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X",
+ module->desc.ip_devices);
+ snprintf(op_devices_string, 64, "O/P DEVICES=0x%X",
+ module->desc.op_devices);
+
+ kobject_uevent_env(&module->kobj, KOBJ_ADD, envp);
+}
+
+int gb_audio_manager_module_create(
+ struct gb_audio_manager_module **module,
+ struct kset *manager_kset,
+ int id, struct gb_audio_manager_module_descriptor *desc)
+{
+ int err;
+ struct gb_audio_manager_module *m;
+
+ m = kzalloc(sizeof(*m), GFP_ATOMIC);
+ if (!m)
+ return -ENOMEM;
+
+ /* Initialize the node */
+ INIT_LIST_HEAD(&m->list);
+
+ /* Set the module id */
+ m->id = id;
+
+ /* Copy the provided descriptor */
+ memcpy(&m->desc, desc, sizeof(*desc));
+
+ /* set the kset */
+ m->kobj.kset = manager_kset;
+
+ /*
+ * Initialize and add the kobject to the kernel. All the default files
+ * will be created here. As we have already specified a kset for this
+ * kobject, we don't have to set a parent for the kobject, the kobject
+ * will be placed beneath that kset automatically.
+ */
+ err = kobject_init_and_add(&m->kobj, &gb_audio_module_type, NULL, "%d",
+ id);
+ if (err) {
+ pr_err("failed initializing kobject for audio module #%d\n",
+ id);
+ kobject_put(&m->kobj);
+ return err;
+ }
+
+ /*
+ * Notify the object was created
+ */
+ send_add_uevent(m);
+
+ *module = m;
+ pr_info("Created audio module #%d\n", id);
+ return 0;
+}
+
+void gb_audio_manager_module_dump(struct gb_audio_manager_module *module)
+{
+ pr_info("audio module #%d name=%s slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X o/p devices=0x%X\n",
+ module->id,
+ module->desc.name,
+ module->desc.slot,
+ module->desc.vid,
+ module->desc.pid,
+ module->desc.cport,
+ module->desc.ip_devices,
+ module->desc.op_devices);
+}
diff --git a/drivers/staging/greybus/audio_manager_private.h b/drivers/staging/greybus/audio_manager_private.h
new file mode 100644
index 000000000000..079ce953c256
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager_private.h
@@ -0,0 +1,28 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef _GB_AUDIO_MANAGER_PRIVATE_H_
+#define _GB_AUDIO_MANAGER_PRIVATE_H_
+
+#include <linux/kobject.h>
+
+#include "audio_manager.h"
+
+int gb_audio_manager_module_create(
+ struct gb_audio_manager_module **module,
+ struct kset *manager_kset,
+ int id, struct gb_audio_manager_module_descriptor *desc);
+
+/* module destroyed via kobject_put */
+
+void gb_audio_manager_module_dump(struct gb_audio_manager_module *module);
+
+/* sysfs control */
+void gb_audio_manager_sysfs_init(struct kobject *kobj);
+
+#endif /* _GB_AUDIO_MANAGER_PRIVATE_H_ */
diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c
new file mode 100644
index 000000000000..d8bf8591ff9e
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager_sysfs.c
@@ -0,0 +1,102 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2015-2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+#include "audio_manager.h"
+#include "audio_manager_private.h"
+
+static ssize_t manager_sysfs_add_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gb_audio_manager_module_descriptor desc = { {0} };
+
+ int num = sscanf(buf,
+ "name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s "
+ "slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X"
+ "o/p devices=0x%X",
+ desc.name, &desc.slot, &desc.vid, &desc.pid,
+ &desc.cport, &desc.ip_devices, &desc.op_devices);
+
+ if (num != 7)
+ return -EINVAL;
+
+ num = gb_audio_manager_add(&desc);
+ if (num < 0)
+ return -EINVAL;
+
+ return count;
+}
+
+static struct kobj_attribute manager_add_attribute =
+ __ATTR(add, 0664, NULL, manager_sysfs_add_store);
+
+static ssize_t manager_sysfs_remove_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int id;
+
+ int num = sscanf(buf, "%d", &id);
+
+ if (num != 1)
+ return -EINVAL;
+
+ num = gb_audio_manager_remove(id);
+ if (num)
+ return num;
+
+ return count;
+}
+
+static struct kobj_attribute manager_remove_attribute =
+ __ATTR(remove, 0664, NULL, manager_sysfs_remove_store);
+
+static ssize_t manager_sysfs_dump_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int id;
+
+ int num = sscanf(buf, "%d", &id);
+
+ if (num == 1) {
+ num = gb_audio_manager_dump_module(id);
+ if (num)
+ return num;
+ } else if (!strncmp("all", buf, 3))
+ gb_audio_manager_dump_all();
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static struct kobj_attribute manager_dump_attribute =
+ __ATTR(dump, 0664, NULL, manager_sysfs_dump_store);
+
+static void manager_sysfs_init_attribute(
+ struct kobject *kobj, struct kobj_attribute *kattr)
+{
+ int err;
+
+ err = sysfs_create_file(kobj, &kattr->attr);
+ if (err) {
+ pr_warn("creating the sysfs entry for %s failed: %d\n",
+ kattr->attr.name, err);
+ }
+}
+
+void gb_audio_manager_sysfs_init(struct kobject *kobj)
+{
+ manager_sysfs_init_attribute(kobj, &manager_add_attribute);
+ manager_sysfs_init_attribute(kobj, &manager_remove_attribute);
+ manager_sysfs_init_attribute(kobj, &manager_dump_attribute);
+}
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
new file mode 100644
index 000000000000..ae1c0fa85752
--- /dev/null
+++ b/drivers/staging/greybus/audio_module.c
@@ -0,0 +1,482 @@
+/*
+ * Greybus audio driver
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+#include "audio_codec.h"
+#include "audio_apbridgea.h"
+#include "audio_manager.h"
+
+/*
+ * gb_snd management functions
+ */
+
+static int gbaudio_request_jack(struct gbaudio_module_info *module,
+ struct gb_audio_jack_event_request *req)
+{
+ int report;
+ struct snd_jack *jack = module->headset_jack.jack;
+ struct snd_jack *btn_jack = module->button_jack.jack;
+
+ if (!jack) {
+ dev_err_ratelimited(module->dev,
+ "Invalid jack event received:type: %u, event: %u\n",
+ req->jack_attribute, req->event);
+ return -EINVAL;
+ }
+
+ dev_warn_ratelimited(module->dev,
+ "Jack Event received: type: %u, event: %u\n",
+ req->jack_attribute, req->event);
+
+ if (req->event == GB_AUDIO_JACK_EVENT_REMOVAL) {
+ module->jack_type = 0;
+ if (btn_jack && module->button_status) {
+ snd_soc_jack_report(&module->button_jack, 0,
+ module->button_mask);
+ module->button_status = 0;
+ }
+ snd_soc_jack_report(&module->headset_jack, 0,
+ module->jack_mask);
+ return 0;
+ }
+
+ report = req->jack_attribute & module->jack_mask;
+ if (!report) {
+ dev_err_ratelimited(module->dev,
+ "Invalid jack event received:type: %u, event: %u\n",
+ req->jack_attribute, req->event);
+ return -EINVAL;
+ }
+
+ if (module->jack_type)
+ dev_warn_ratelimited(module->dev,
+ "Modifying jack from %d to %d\n",
+ module->jack_type, report);
+
+ module->jack_type = report;
+ snd_soc_jack_report(&module->headset_jack, report, module->jack_mask);
+
+ return 0;
+}
+
+static int gbaudio_request_button(struct gbaudio_module_info *module,
+ struct gb_audio_button_event_request *req)
+{
+ int soc_button_id, report;
+ struct snd_jack *btn_jack = module->button_jack.jack;
+
+ if (!btn_jack) {
+ dev_err_ratelimited(module->dev,
+ "Invalid button event received:type: %u, event: %u\n",
+ req->button_id, req->event);
+ return -EINVAL;
+ }
+
+ dev_warn_ratelimited(module->dev,
+ "Button Event received: id: %u, event: %u\n",
+ req->button_id, req->event);
+
+ /* currently supports 4 buttons only */
+ if (!module->jack_type) {
+ dev_err_ratelimited(module->dev,
+ "Jack not present. Bogus event!!\n");
+ return -EINVAL;
+ }
+
+ report = module->button_status & module->button_mask;
+ soc_button_id = 0;
+
+ switch (req->button_id) {
+ case 1:
+ soc_button_id = SND_JACK_BTN_0 & module->button_mask;
+ break;
+
+ case 2:
+ soc_button_id = SND_JACK_BTN_1 & module->button_mask;
+ break;
+
+ case 3:
+ soc_button_id = SND_JACK_BTN_2 & module->button_mask;
+ break;
+
+ case 4:
+ soc_button_id = SND_JACK_BTN_3 & module->button_mask;
+ break;
+ }
+
+ if (!soc_button_id) {
+ dev_err_ratelimited(module->dev,
+ "Invalid button request received\n");
+ return -EINVAL;
+ }
+
+ if (req->event == GB_AUDIO_BUTTON_EVENT_PRESS)
+ report = report | soc_button_id;
+ else
+ report = report & ~soc_button_id;
+
+ module->button_status = report;
+
+ snd_soc_jack_report(&module->button_jack, report, module->button_mask);
+
+ return 0;
+}
+
+static int gbaudio_request_stream(struct gbaudio_module_info *module,
+ struct gb_audio_streaming_event_request *req)
+{
+ dev_warn(module->dev, "Audio Event received: cport: %u, event: %u\n",
+ req->data_cport, req->event);
+
+ return 0;
+}
+
+static int gbaudio_codec_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gbaudio_module_info *module =
+ greybus_get_drvdata(connection->bundle);
+ struct gb_operation_msg_hdr *header = op->request->header;
+ struct gb_audio_streaming_event_request *stream_req;
+ struct gb_audio_jack_event_request *jack_req;
+ struct gb_audio_button_event_request *button_req;
+ int ret;
+
+ switch (header->type) {
+ case GB_AUDIO_TYPE_STREAMING_EVENT:
+ stream_req = op->request->payload;
+ ret = gbaudio_request_stream(module, stream_req);
+ break;
+
+ case GB_AUDIO_TYPE_JACK_EVENT:
+ jack_req = op->request->payload;
+ ret = gbaudio_request_jack(module, jack_req);
+ break;
+
+ case GB_AUDIO_TYPE_BUTTON_EVENT:
+ button_req = op->request->payload;
+ ret = gbaudio_request_button(module, button_req);
+ break;
+
+ default:
+ dev_err_ratelimited(&connection->bundle->dev,
+ "Invalid Audio Event received\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int gb_audio_add_mgmt_connection(struct gbaudio_module_info *gbmodule,
+ struct greybus_descriptor_cport *cport_desc,
+ struct gb_bundle *bundle)
+{
+ struct gb_connection *connection;
+
+ /* Management Cport */
+ if (gbmodule->mgmt_connection) {
+ dev_err(&bundle->dev,
+ "Can't have multiple Management connections\n");
+ return -ENODEV;
+ }
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gbaudio_codec_request_handler);
+ if (IS_ERR(connection))
+ return PTR_ERR(connection);
+
+ greybus_set_drvdata(bundle, gbmodule);
+ gbmodule->mgmt_connection = connection;
+
+ return 0;
+}
+
+static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule,
+ struct greybus_descriptor_cport *cport_desc,
+ struct gb_bundle *bundle)
+{
+ struct gb_connection *connection;
+ struct gbaudio_data_connection *dai;
+
+ dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai) {
+ dev_err(gbmodule->dev, "DAI Malloc failure\n");
+ return -ENOMEM;
+ }
+
+ connection = gb_connection_create_offloaded(bundle,
+ le16_to_cpu(cport_desc->id),
+ GB_CONNECTION_FLAG_CSD);
+ if (IS_ERR(connection)) {
+ devm_kfree(gbmodule->dev, dai);
+ return PTR_ERR(connection);
+ }
+
+ greybus_set_drvdata(bundle, gbmodule);
+ dai->id = 0;
+ dai->data_cport = connection->intf_cport_id;
+ dai->connection = connection;
+ list_add(&dai->list, &gbmodule->data_list);
+
+ return 0;
+}
+
+/*
+ * This is the basic hook get things initialized and registered w/ gb
+ */
+
+static int gb_audio_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct device *dev = &bundle->dev;
+ struct gbaudio_module_info *gbmodule;
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_audio_manager_module_descriptor desc;
+ struct gbaudio_data_connection *dai, *_dai;
+ int ret, i;
+ struct gb_audio_topology *topology;
+
+ /* There should be at least one Management and one Data cport */
+ if (bundle->num_cports < 2)
+ return -ENODEV;
+
+ /*
+ * There can be only one Management connection and any number of data
+ * connections.
+ */
+ gbmodule = devm_kzalloc(dev, sizeof(*gbmodule), GFP_KERNEL);
+ if (!gbmodule)
+ return -ENOMEM;
+
+ gbmodule->num_data_connections = bundle->num_cports - 1;
+ INIT_LIST_HEAD(&gbmodule->data_list);
+ INIT_LIST_HEAD(&gbmodule->widget_list);
+ INIT_LIST_HEAD(&gbmodule->ctl_list);
+ INIT_LIST_HEAD(&gbmodule->widget_ctl_list);
+ gbmodule->dev = dev;
+ snprintf(gbmodule->name, NAME_SIZE, "%s.%s", dev->driver->name,
+ dev_name(dev));
+ greybus_set_drvdata(bundle, gbmodule);
+
+ /* Create all connections */
+ for (i = 0; i < bundle->num_cports; i++) {
+ cport_desc = &bundle->cport_desc[i];
+
+ switch (cport_desc->protocol_id) {
+ case GREYBUS_PROTOCOL_AUDIO_MGMT:
+ ret = gb_audio_add_mgmt_connection(gbmodule, cport_desc,
+ bundle);
+ if (ret)
+ goto destroy_connections;
+ break;
+ case GREYBUS_PROTOCOL_AUDIO_DATA:
+ ret = gb_audio_add_data_connection(gbmodule, cport_desc,
+ bundle);
+ if (ret)
+ goto destroy_connections;
+ break;
+ default:
+ dev_err(dev, "Unsupported protocol: 0x%02x\n",
+ cport_desc->protocol_id);
+ ret = -ENODEV;
+ goto destroy_connections;
+ }
+ }
+
+ /* There must be a management cport */
+ if (!gbmodule->mgmt_connection) {
+ ret = -EINVAL;
+ dev_err(dev, "Missing management connection\n");
+ goto destroy_connections;
+ }
+
+ /* Initialize management connection */
+ ret = gb_connection_enable(gbmodule->mgmt_connection);
+ if (ret) {
+ dev_err(dev, "%d: Error while enabling mgmt connection\n", ret);
+ goto destroy_connections;
+ }
+ gbmodule->dev_id = gbmodule->mgmt_connection->intf->interface_id;
+
+ /*
+ * FIXME: malloc for topology happens via audio_gb driver
+ * should be done within codec driver itself
+ */
+ ret = gb_audio_gb_get_topology(gbmodule->mgmt_connection, &topology);
+ if (ret) {
+ dev_err(dev, "%d:Error while fetching topology\n", ret);
+ goto disable_connection;
+ }
+
+ /* process topology data */
+ ret = gbaudio_tplg_parse_data(gbmodule, topology);
+ if (ret) {
+ dev_err(dev, "%d:Error while parsing topology data\n",
+ ret);
+ goto free_topology;
+ }
+ gbmodule->topology = topology;
+
+ /* Initialize data connections */
+ list_for_each_entry(dai, &gbmodule->data_list, list) {
+ ret = gb_connection_enable(dai->connection);
+ if (ret) {
+ dev_err(dev,
+ "%d:Error while enabling %d:data connection\n",
+ ret, dai->data_cport);
+ goto disable_data_connection;
+ }
+ }
+
+ /* register module with gbcodec */
+ ret = gbaudio_register_module(gbmodule);
+ if (ret)
+ goto disable_data_connection;
+
+ /* inform above layer for uevent */
+ dev_dbg(dev, "Inform set_event:%d to above layer\n", 1);
+ /* prepare for the audio manager */
+ strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN);
+ desc.slot = 1; /* todo */
+ desc.vid = 2; /* todo */
+ desc.pid = 3; /* todo */
+ desc.cport = gbmodule->dev_id;
+ desc.op_devices = gbmodule->op_devices;
+ desc.ip_devices = gbmodule->ip_devices;
+ gbmodule->manager_id = gb_audio_manager_add(&desc);
+
+ dev_dbg(dev, "Add GB Audio device:%s\n", gbmodule->name);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+disable_data_connection:
+ list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list)
+ gb_connection_disable(dai->connection);
+ gbaudio_tplg_release(gbmodule);
+ gbmodule->topology = NULL;
+
+free_topology:
+ kfree(topology);
+
+disable_connection:
+ gb_connection_disable(gbmodule->mgmt_connection);
+
+destroy_connections:
+ list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
+ gb_connection_destroy(dai->connection);
+ list_del(&dai->list);
+ devm_kfree(dev, dai);
+ }
+
+ if (gbmodule->mgmt_connection)
+ gb_connection_destroy(gbmodule->mgmt_connection);
+
+ devm_kfree(dev, gbmodule);
+
+ return ret;
+}
+
+static void gb_audio_disconnect(struct gb_bundle *bundle)
+{
+ struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
+ struct gbaudio_data_connection *dai, *_dai;
+
+ gb_pm_runtime_get_sync(bundle);
+
+ /* cleanup module related resources first */
+ gbaudio_unregister_module(gbmodule);
+
+ /* inform uevent to above layers */
+ gb_audio_manager_remove(gbmodule->manager_id);
+
+ gbaudio_tplg_release(gbmodule);
+ kfree(gbmodule->topology);
+ gbmodule->topology = NULL;
+ gb_connection_disable(gbmodule->mgmt_connection);
+ list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
+ gb_connection_disable(dai->connection);
+ gb_connection_destroy(dai->connection);
+ list_del(&dai->list);
+ devm_kfree(gbmodule->dev, dai);
+ }
+ gb_connection_destroy(gbmodule->mgmt_connection);
+ gbmodule->mgmt_connection = NULL;
+
+ devm_kfree(&bundle->dev, gbmodule);
+}
+
+static const struct greybus_bundle_id gb_audio_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_AUDIO) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_audio_id_table);
+
+#ifdef CONFIG_PM
+static int gb_audio_suspend(struct device *dev)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
+ struct gbaudio_data_connection *dai;
+
+ list_for_each_entry(dai, &gbmodule->data_list, list)
+ gb_connection_disable(dai->connection);
+
+ gb_connection_disable(gbmodule->mgmt_connection);
+
+ return 0;
+}
+
+static int gb_audio_resume(struct device *dev)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
+ struct gbaudio_data_connection *dai;
+ int ret;
+
+ ret = gb_connection_enable(gbmodule->mgmt_connection);
+ if (ret) {
+ dev_err(dev, "%d:Error while enabling mgmt connection\n", ret);
+ return ret;
+ }
+
+ list_for_each_entry(dai, &gbmodule->data_list, list) {
+ ret = gb_connection_enable(dai->connection);
+ if (ret) {
+ dev_err(dev,
+ "%d:Error while enabling %d:data connection\n",
+ ret, dai->data_cport);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_audio_pm_ops = {
+ SET_RUNTIME_PM_OPS(gb_audio_suspend, gb_audio_resume, NULL)
+};
+
+static struct greybus_driver gb_audio_driver = {
+ .name = "gb-audio",
+ .probe = gb_audio_probe,
+ .disconnect = gb_audio_disconnect,
+ .id_table = gb_audio_id_table,
+ .driver.pm = &gb_audio_pm_ops,
+};
+module_greybus_driver(gb_audio_driver);
+
+MODULE_DESCRIPTION("Greybus Audio module driver");
+MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gbaudio-module");
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
new file mode 100644
index 000000000000..b6251691a33d
--- /dev/null
+++ b/drivers/staging/greybus/audio_topology.c
@@ -0,0 +1,1443 @@
+/*
+ * Greybus audio driver
+ * Copyright 2015-2016 Google Inc.
+ * Copyright 2015-2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "audio_codec.h"
+#include "greybus_protocols.h"
+
+#define GBAUDIO_INVALID_ID 0xFF
+
+/* mixer control */
+struct gb_mixer_control {
+ int min, max;
+ unsigned int reg, rreg, shift, rshift, invert;
+};
+
+struct gbaudio_ctl_pvt {
+ unsigned int ctl_id;
+ unsigned int data_cport;
+ unsigned int access;
+ unsigned int vcount;
+ struct gb_audio_ctl_elem_info *info;
+};
+
+static struct gbaudio_module_info *find_gb_module(
+ struct gbaudio_codec_info *codec,
+ char const *name)
+{
+ int dev_id, ret;
+ char begin[NAME_SIZE];
+ struct gbaudio_module_info *module;
+
+ if (!name)
+ return NULL;
+
+ ret = sscanf(name, "%s %d", begin, &dev_id);
+ dev_dbg(codec->dev, "%s:Find module#%d\n", __func__, dev_id);
+
+ mutex_lock(&codec->lock);
+ list_for_each_entry(module, &codec->module_list, list) {
+ if (module->dev_id == dev_id) {
+ mutex_unlock(&codec->lock);
+ return module;
+ }
+ }
+ mutex_unlock(&codec->lock);
+ dev_warn(codec->dev, "%s: module#%d missing in codec list\n", name,
+ dev_id);
+ return NULL;
+}
+
+static const char *gbaudio_map_controlid(struct gbaudio_module_info *module,
+ __u8 control_id, __u8 index)
+{
+ struct gbaudio_control *control;
+
+ if (control_id == GBAUDIO_INVALID_ID)
+ return NULL;
+
+ list_for_each_entry(control, &module->ctl_list, list) {
+ if (control->id == control_id) {
+ if (index == GBAUDIO_INVALID_ID)
+ return control->name;
+ if (index >= control->items)
+ return NULL;
+ return control->texts[index];
+ }
+ }
+ list_for_each_entry(control, &module->widget_ctl_list, list) {
+ if (control->id == control_id) {
+ if (index == GBAUDIO_INVALID_ID)
+ return control->name;
+ if (index >= control->items)
+ return NULL;
+ return control->texts[index];
+ }
+ }
+ return NULL;
+}
+
+static int gbaudio_map_controlname(struct gbaudio_module_info *module,
+ const char *name)
+{
+ struct gbaudio_control *control;
+
+ list_for_each_entry(control, &module->ctl_list, list) {
+ if (!strncmp(control->name, name, NAME_SIZE))
+ return control->id;
+ }
+
+ dev_warn(module->dev, "%s: missing in modules controls list\n", name);
+
+ return -EINVAL;
+}
+
+static int gbaudio_map_wcontrolname(struct gbaudio_module_info *module,
+ const char *name)
+{
+ struct gbaudio_control *control;
+
+ list_for_each_entry(control, &module->widget_ctl_list, list) {
+ if (!strncmp(control->wname, name, NAME_SIZE))
+ return control->id;
+ }
+ dev_warn(module->dev, "%s: missing in modules controls list\n", name);
+
+ return -EINVAL;
+}
+
+static int gbaudio_map_widgetname(struct gbaudio_module_info *module,
+ const char *name)
+{
+ struct gbaudio_widget *widget;
+ list_for_each_entry(widget, &module->widget_list, list) {
+ if (!strncmp(widget->name, name, NAME_SIZE))
+ return widget->id;
+ }
+ dev_warn(module->dev, "%s: missing in modules widgets list\n", name);
+
+ return -EINVAL;
+}
+
+static const char *gbaudio_map_widgetid(struct gbaudio_module_info *module,
+ __u8 widget_id)
+{
+ struct gbaudio_widget *widget;
+
+ list_for_each_entry(widget, &module->widget_list, list) {
+ if (widget->id == widget_id)
+ return widget->name;
+ }
+ return NULL;
+}
+
+static const char **gb_generate_enum_strings(struct gbaudio_module_info *gb,
+ struct gb_audio_enumerated *gbenum)
+{
+ const char **strings;
+ int i;
+ __u8 *data;
+
+ strings = devm_kzalloc(gb->dev, sizeof(char *) * gbenum->items,
+ GFP_KERNEL);
+ data = gbenum->names;
+
+ for (i = 0; i < gbenum->items; i++) {
+ strings[i] = (const char *)data;
+ while (*data != '\0')
+ data++;
+ data++;
+ }
+
+ return strings;
+}
+
+static int gbcodec_mixer_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ unsigned int max;
+ const char *name;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_info *info;
+ struct gbaudio_module_info *module;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct gbaudio_codec_info *gbcodec = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ info = (struct gb_audio_ctl_elem_info *)data->info;
+
+ if (!info) {
+ dev_err(codec->dev, "NULL info for %s\n", uinfo->id.name);
+ return -EINVAL;
+ }
+
+ /* update uinfo */
+ uinfo->access = data->access;
+ uinfo->count = data->vcount;
+ uinfo->type = (snd_ctl_elem_type_t)info->type;
+
+ switch (info->type) {
+ case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
+ case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
+ uinfo->value.integer.min = info->value.integer.min;
+ uinfo->value.integer.max = info->value.integer.max;
+ break;
+ case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+ max = info->value.enumerated.items;
+ uinfo->value.enumerated.items = max;
+ if (uinfo->value.enumerated.item > max - 1)
+ uinfo->value.enumerated.item = max - 1;
+ module = find_gb_module(gbcodec, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+ name = gbaudio_map_controlid(module, data->ctl_id,
+ uinfo->value.enumerated.item);
+ strlcpy(uinfo->value.enumerated.name, name, NAME_SIZE);
+ break;
+ default:
+ dev_err(codec->dev, "Invalid type: %d for %s:kcontrol\n",
+ info->type, kcontrol->id.name);
+ break;
+ }
+ return 0;
+}
+
+static int gbcodec_mixer_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret;
+ struct gb_audio_ctl_elem_info *info;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+ struct gb_bundle *bundle;
+
+ dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ info = (struct gb_audio_ctl_elem_info *)data->info;
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+ }
+
+ /* update ucontrol */
+ switch (info->type) {
+ case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
+ case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
+ ucontrol->value.integer.value[0] =
+ gbvalue.value.integer_value[0];
+ if (data->vcount == 2)
+ ucontrol->value.integer.value[1] =
+ gbvalue.value.integer_value[1];
+ break;
+ case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+ ucontrol->value.enumerated.item[0] =
+ gbvalue.value.enumerated_item[0];
+ if (data->vcount == 2)
+ ucontrol->value.enumerated.item[1] =
+ gbvalue.value.enumerated_item[1];
+ break;
+ default:
+ dev_err(codec->dev, "Invalid type: %d for %s:kcontrol\n",
+ info->type, kcontrol->id.name);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int gbcodec_mixer_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ struct gb_audio_ctl_elem_info *info;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+ struct gb_bundle *bundle;
+
+ dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ info = (struct gb_audio_ctl_elem_info *)data->info;
+ bundle = to_gb_bundle(module->dev);
+
+ /* update ucontrol */
+ switch (info->type) {
+ case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
+ case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
+ gbvalue.value.integer_value[0] =
+ ucontrol->value.integer.value[0];
+ if (data->vcount == 2)
+ gbvalue.value.integer_value[1] =
+ ucontrol->value.integer.value[1];
+ break;
+ case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+ gbvalue.value.enumerated_item[0] =
+ ucontrol->value.enumerated.item[0];
+ if (data->vcount == 2)
+ gbvalue.value.enumerated_item[1] =
+ ucontrol->value.enumerated.item[1];
+ break;
+ default:
+ dev_err(codec->dev, "Invalid type: %d for %s:kcontrol\n",
+ info->type, kcontrol->id.name);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_set_control(module->mgmt_connection, data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ }
+
+ return ret;
+}
+
+#define SOC_MIXER_GB(xname, kcount, data) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .count = kcount, .info = gbcodec_mixer_ctl_info, \
+ .get = gbcodec_mixer_ctl_get, .put = gbcodec_mixer_ctl_put, \
+ .private_value = (unsigned long)data }
+
+/*
+ * although below callback functions seems redundant to above functions.
+ * same are kept to allow provision for different handling in case
+ * of DAPM related sequencing, etc.
+ */
+static int gbcodec_mixer_dapm_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ int platform_max, platform_min;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_info *info;
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_codec *codec = widget->codec;
+
+ dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ info = (struct gb_audio_ctl_elem_info *)data->info;
+
+ /* update uinfo */
+ platform_max = info->value.integer.max;
+ platform_min = info->value.integer.min;
+
+ if (platform_max == 1 &&
+ !strnstr(kcontrol->id.name, " Volume", NAME_SIZE))
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ else
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+
+ uinfo->count = data->vcount;
+ uinfo->value.integer.min = 0;
+ if (info->value.integer.min < 0 &&
+ (uinfo->type == SNDRV_CTL_ELEM_TYPE_INTEGER))
+ uinfo->value.integer.max = platform_max - platform_min;
+ else
+ uinfo->value.integer.max = platform_max;
+
+ return 0;
+}
+
+static int gbcodec_mixer_dapm_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret;
+ struct gb_audio_ctl_elem_info *info;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_codec *codec = widget->codec;
+ struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+ struct gb_bundle *bundle;
+
+ dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ info = (struct gb_audio_ctl_elem_info *)data->info;
+ bundle = to_gb_bundle(module->dev);
+
+ if (data->vcount == 2)
+ dev_warn(widget->dapm->dev,
+ "GB: Control '%s' is stereo, which is not supported\n",
+ kcontrol->id.name);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+ }
+ /* update ucontrol */
+ ucontrol->value.integer.value[0] = gbvalue.value.integer_value[0];
+
+ return ret;
+}
+
+static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret, wi, max, connect;
+ unsigned int mask, val;
+ struct gb_audio_ctl_elem_info *info;
+ struct gbaudio_ctl_pvt *data;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_codec *codec = widget->codec;
+ struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+ struct gb_bundle *bundle;
+
+ dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
+ info = (struct gb_audio_ctl_elem_info *)data->info;
+ bundle = to_gb_bundle(module->dev);
+
+ if (data->vcount == 2)
+ dev_warn(widget->dapm->dev,
+ "GB: Control '%s' is stereo, which is not supported\n",
+ kcontrol->id.name);
+
+ max = info->value.integer.max;
+ mask = (1 << fls(max)) - 1;
+ val = ucontrol->value.integer.value[0] & mask;
+ connect = !!val;
+
+ /* update ucontrol */
+ if (gbvalue.value.integer_value[0] != val) {
+ for (wi = 0; wi < wlist->num_widgets; wi++) {
+ widget = wlist->widgets[wi];
+
+ widget->value = val;
+ widget->dapm->update = NULL;
+ snd_soc_dapm_mixer_update_power(widget, kcontrol,
+ connect);
+ }
+ gbvalue.value.integer_value[0] =
+ ucontrol->value.integer.value[0];
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_set_control(module->mgmt_connection,
+ data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec->dev,
+ "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#define SOC_DAPM_MIXER_GB(xname, kcount, data) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .count = kcount, .info = gbcodec_mixer_dapm_ctl_info, \
+ .get = gbcodec_mixer_dapm_ctl_get, .put = gbcodec_mixer_dapm_ctl_put, \
+ .private_value = (unsigned long)data}
+
+static int gbcodec_event_spk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ /* Ensure GB speaker is connected */
+
+ return 0;
+}
+
+static int gbcodec_event_hp(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ /* Ensure GB module supports jack slot */
+
+ return 0;
+}
+
+static int gbcodec_event_int_mic(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ /* Ensure GB module supports jack slot */
+
+ return 0;
+}
+
+static int gbaudio_validate_kcontrol_count(struct gb_audio_widget *w)
+{
+ int ret = 0;
+
+ switch (w->type) {
+ case snd_soc_dapm_spk:
+ case snd_soc_dapm_hp:
+ case snd_soc_dapm_mic:
+ case snd_soc_dapm_output:
+ case snd_soc_dapm_input:
+ if (w->ncontrols)
+ ret = -EINVAL;
+ break;
+ case snd_soc_dapm_switch:
+ case snd_soc_dapm_mux:
+ if (w->ncontrols != 1)
+ ret = -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int gbcodec_enum_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret, ctl_id;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+ struct gb_bundle *bundle;
+
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ ctl_id = gbaudio_map_controlname(module, kcontrol->id.name);
+ if (ctl_id < 0)
+ return -EINVAL;
+
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+ }
+
+ ucontrol->value.enumerated.item[0] = gbvalue.value.enumerated_item[0];
+ if (e->shift_l != e->shift_r)
+ ucontrol->value.enumerated.item[1] =
+ gbvalue.value.enumerated_item[1];
+
+ return 0;
+}
+
+static int gbcodec_enum_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret, ctl_id;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+ struct gb_bundle *bundle;
+
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ ctl_id = gbaudio_map_controlname(module, kcontrol->id.name);
+ if (ctl_id < 0)
+ return -EINVAL;
+
+ if (ucontrol->value.enumerated.item[0] > e->max - 1)
+ return -EINVAL;
+ gbvalue.value.enumerated_item[0] = ucontrol->value.enumerated.item[0];
+
+ if (e->shift_l != e->shift_r) {
+ if (ucontrol->value.enumerated.item[1] > e->max - 1)
+ return -EINVAL;
+ gbvalue.value.enumerated_item[1] =
+ ucontrol->value.enumerated.item[1];
+ }
+
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_set_control(module->mgmt_connection, ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ }
+
+ return ret;
+}
+
+static int gbaudio_tplg_create_enum_kctl(struct gbaudio_module_info *gb,
+ struct snd_kcontrol_new *kctl,
+ struct gb_audio_control *ctl)
+{
+ struct soc_enum *gbe;
+ struct gb_audio_enumerated *gb_enum;
+ int i;
+
+ gbe = devm_kzalloc(gb->dev, sizeof(*gbe), GFP_KERNEL);
+ if (!gbe)
+ return -ENOMEM;
+
+ gb_enum = &ctl->info.value.enumerated;
+
+ /* since count=1, and reg is dummy */
+ gbe->max = gb_enum->items;
+ gbe->texts = gb_generate_enum_strings(gb, gb_enum);
+
+ /* debug enum info */
+ dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gb_enum->items,
+ gb_enum->names_length);
+ for (i = 0; i < gb_enum->items; i++)
+ dev_dbg(gb->dev, "src[%d]: %s\n", i, gbe->texts[i]);
+
+ *kctl = (struct snd_kcontrol_new)
+ SOC_ENUM_EXT(ctl->name, *gbe, gbcodec_enum_ctl_get,
+ gbcodec_enum_ctl_put);
+ return 0;
+}
+
+static int gbaudio_tplg_create_kcontrol(struct gbaudio_module_info *gb,
+ struct snd_kcontrol_new *kctl,
+ struct gb_audio_control *ctl)
+{
+ int ret = 0;
+ struct gbaudio_ctl_pvt *ctldata;
+
+ switch (ctl->iface) {
+ case SNDRV_CTL_ELEM_IFACE_MIXER:
+ switch (ctl->info.type) {
+ case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+ ret = gbaudio_tplg_create_enum_kctl(gb, kctl, ctl);
+ break;
+ default:
+ ctldata = devm_kzalloc(gb->dev,
+ sizeof(struct gbaudio_ctl_pvt),
+ GFP_KERNEL);
+ if (!ctldata)
+ return -ENOMEM;
+ ctldata->ctl_id = ctl->id;
+ ctldata->data_cport = ctl->data_cport;
+ ctldata->access = ctl->access;
+ ctldata->vcount = ctl->count_values;
+ ctldata->info = &ctl->info;
+ *kctl = (struct snd_kcontrol_new)
+ SOC_MIXER_GB(ctl->name, ctl->count, ctldata);
+ ctldata = NULL;
+ break;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(gb->dev, "%s:%d control created\n", ctl->name, ctl->id);
+ return ret;
+}
+
+static int gbcodec_enum_dapm_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret, ctl_id;
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct gbaudio_module_info *module;
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct snd_soc_codec *codec = widget->codec;
+ struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ struct gb_bundle *bundle;
+
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ ctl_id = gbaudio_map_wcontrolname(module, kcontrol->id.name);
+ if (ctl_id < 0)
+ return -EINVAL;
+
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+ }
+
+ ucontrol->value.enumerated.item[0] = gbvalue.value.enumerated_item[0];
+ if (e->shift_l != e->shift_r)
+ ucontrol->value.enumerated.item[1] =
+ gbvalue.value.enumerated_item[1];
+
+ return 0;
+}
+
+static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret, wi, ctl_id;
+ unsigned int val, mux, change;
+ unsigned int mask;
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct gb_audio_ctl_elem_value gbvalue;
+ struct gbaudio_module_info *module;
+ struct snd_soc_codec *codec = widget->codec;
+ struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ struct gb_bundle *bundle;
+
+ if (ucontrol->value.enumerated.item[0] > e->max - 1)
+ return -EINVAL;
+
+ module = find_gb_module(gb, kcontrol->id.name);
+ if (!module)
+ return -EINVAL;
+
+ ctl_id = gbaudio_map_wcontrolname(module, kcontrol->id.name);
+ if (ctl_id < 0)
+ return -EINVAL;
+
+ change = 0;
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
+ }
+
+ mux = ucontrol->value.enumerated.item[0];
+ val = mux << e->shift_l;
+ mask = e->mask << e->shift_l;
+
+ if (gbvalue.value.enumerated_item[0] !=
+ ucontrol->value.enumerated.item[0]) {
+ change = 1;
+ gbvalue.value.enumerated_item[0] =
+ ucontrol->value.enumerated.item[0];
+ }
+
+ if (e->shift_l != e->shift_r) {
+ if (ucontrol->value.enumerated.item[1] > e->max - 1)
+ return -EINVAL;
+ val |= ucontrol->value.enumerated.item[1] << e->shift_r;
+ mask |= e->mask << e->shift_r;
+ if (gbvalue.value.enumerated_item[1] !=
+ ucontrol->value.enumerated.item[1]) {
+ change = 1;
+ gbvalue.value.enumerated_item[1] =
+ ucontrol->value.enumerated.item[1];
+ }
+ }
+
+ if (change) {
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_set_control(module->mgmt_connection, ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ if (ret) {
+ dev_err_ratelimited(codec->dev,
+ "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ }
+ for (wi = 0; wi < wlist->num_widgets; wi++) {
+ widget = wlist->widgets[wi];
+
+ widget->value = val;
+ widget->dapm->update = NULL;
+ snd_soc_dapm_mux_update_power(widget, kcontrol, mux, e);
+ }
+ }
+
+ return change;
+}
+
+static int gbaudio_tplg_create_enum_ctl(struct gbaudio_module_info *gb,
+ struct snd_kcontrol_new *kctl,
+ struct gb_audio_control *ctl)
+{
+ struct soc_enum *gbe;
+ struct gb_audio_enumerated *gb_enum;
+ int i;
+
+ gbe = devm_kzalloc(gb->dev, sizeof(*gbe), GFP_KERNEL);
+ if (!gbe)
+ return -ENOMEM;
+
+ gb_enum = &ctl->info.value.enumerated;
+
+ /* since count=1, and reg is dummy */
+ gbe->max = gb_enum->items;
+ gbe->texts = gb_generate_enum_strings(gb, gb_enum);
+
+ /* debug enum info */
+ dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gb_enum->items,
+ gb_enum->names_length);
+ for (i = 0; i < gb_enum->items; i++)
+ dev_dbg(gb->dev, "src[%d]: %s\n", i, gbe->texts[i]);
+
+ *kctl = (struct snd_kcontrol_new)
+ SOC_DAPM_ENUM_EXT(ctl->name, *gbe, gbcodec_enum_dapm_ctl_get,
+ gbcodec_enum_dapm_ctl_put);
+ return 0;
+}
+
+static int gbaudio_tplg_create_mixer_ctl(struct gbaudio_module_info *gb,
+ struct snd_kcontrol_new *kctl,
+ struct gb_audio_control *ctl)
+{
+ struct gbaudio_ctl_pvt *ctldata;
+
+ ctldata = devm_kzalloc(gb->dev, sizeof(struct gbaudio_ctl_pvt),
+ GFP_KERNEL);
+ if (!ctldata)
+ return -ENOMEM;
+ ctldata->ctl_id = ctl->id;
+ ctldata->data_cport = ctl->data_cport;
+ ctldata->access = ctl->access;
+ ctldata->vcount = ctl->count_values;
+ ctldata->info = &ctl->info;
+ *kctl = (struct snd_kcontrol_new)
+ SOC_DAPM_MIXER_GB(ctl->name, ctl->count, ctldata);
+
+ return 0;
+}
+
+static int gbaudio_tplg_create_wcontrol(struct gbaudio_module_info *gb,
+ struct snd_kcontrol_new *kctl,
+ struct gb_audio_control *ctl)
+{
+ int ret;
+
+ switch (ctl->iface) {
+ case SNDRV_CTL_ELEM_IFACE_MIXER:
+ switch (ctl->info.type) {
+ case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
+ ret = gbaudio_tplg_create_enum_ctl(gb, kctl, ctl);
+ break;
+ default:
+ ret = gbaudio_tplg_create_mixer_ctl(gb, kctl, ctl);
+ break;
+ }
+ break;
+ default:
+ return -EINVAL;
+
+ }
+
+ dev_dbg(gb->dev, "%s:%d DAPM control created, ret:%d\n", ctl->name,
+ ctl->id, ret);
+ return ret;
+}
+
+static int gbaudio_widget_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ int wid;
+ int ret;
+ struct snd_soc_codec *codec = w->codec;
+ struct gbaudio_codec_info *gbcodec = snd_soc_codec_get_drvdata(codec);
+ struct gbaudio_module_info *module;
+ struct gb_bundle *bundle;
+
+ dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
+
+ /* Find relevant module */
+ module = find_gb_module(gbcodec, w->name);
+ if (!module)
+ return -EINVAL;
+
+ /* map name to widget id */
+ wid = gbaudio_map_widgetname(module, w->name);
+ if (wid < 0) {
+ dev_err(codec->dev, "Invalid widget name:%s\n", w->name);
+ return -EINVAL;
+ }
+
+ bundle = to_gb_bundle(module->dev);
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ ret = gb_audio_gb_enable_widget(module->mgmt_connection, wid);
+ if (!ret)
+ ret = gbaudio_module_update(gbcodec, w, module, 1);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ ret = gb_audio_gb_disable_widget(module->mgmt_connection, wid);
+ if (!ret)
+ ret = gbaudio_module_update(gbcodec, w, module, 0);
+ break;
+ }
+ if (ret)
+ dev_err_ratelimited(codec->dev,
+ "%d: widget, event:%d failed:%d\n", wid,
+ event, ret);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
+ struct snd_soc_dapm_widget *dw,
+ struct gb_audio_widget *w, int *w_size)
+{
+ int i, ret, csize;
+ struct snd_kcontrol_new *widget_kctls;
+ struct gb_audio_control *curr;
+ struct gbaudio_control *control, *_control;
+ size_t size;
+ char temp_name[NAME_SIZE];
+
+ ret = gbaudio_validate_kcontrol_count(w);
+ if (ret) {
+ dev_err(module->dev, "Inavlid kcontrol count=%d for %s\n",
+ w->ncontrols, w->name);
+ return ret;
+ }
+
+ /* allocate memory for kcontrol */
+ if (w->ncontrols) {
+ size = sizeof(struct snd_kcontrol_new) * w->ncontrols;
+ widget_kctls = devm_kzalloc(module->dev, size, GFP_KERNEL);
+ if (!widget_kctls)
+ return -ENOMEM;
+ }
+
+ *w_size = sizeof(struct gb_audio_widget);
+
+ /* create relevant kcontrols */
+ curr = w->ctl;
+ for (i = 0; i < w->ncontrols; i++) {
+ ret = gbaudio_tplg_create_wcontrol(module, &widget_kctls[i],
+ curr);
+ if (ret) {
+ dev_err(module->dev,
+ "%s:%d type widget_ctl not supported\n",
+ curr->name, curr->iface);
+ goto error;
+ }
+ control = devm_kzalloc(module->dev,
+ sizeof(struct gbaudio_control),
+ GFP_KERNEL);
+ if (!control) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ control->id = curr->id;
+ control->name = curr->name;
+ control->wname = w->name;
+
+ if (curr->info.type == GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED) {
+ struct gb_audio_enumerated *gbenum =
+ &curr->info.value.enumerated;
+
+ csize = offsetof(struct gb_audio_control, info);
+ csize += offsetof(struct gb_audio_ctl_elem_info, value);
+ csize += offsetof(struct gb_audio_enumerated, names);
+ csize += gbenum->names_length;
+ control->texts = (const char * const *)
+ gb_generate_enum_strings(module, gbenum);
+ control->items = gbenum->items;
+ } else
+ csize = sizeof(struct gb_audio_control);
+ *w_size += csize;
+ curr = (void *)curr + csize;
+ list_add(&control->list, &module->widget_ctl_list);
+ dev_dbg(module->dev, "%s: control of type %d created\n",
+ widget_kctls[i].name, widget_kctls[i].iface);
+ }
+
+ /* Prefix dev_id to widget control_name */
+ strlcpy(temp_name, w->name, NAME_SIZE);
+ snprintf(w->name, NAME_SIZE, "GB %d %s", module->dev_id, temp_name);
+
+ switch (w->type) {
+ case snd_soc_dapm_spk:
+ *dw = (struct snd_soc_dapm_widget)
+ SND_SOC_DAPM_SPK(w->name, gbcodec_event_spk);
+ module->op_devices |= GBAUDIO_DEVICE_OUT_SPEAKER;
+ break;
+ case snd_soc_dapm_hp:
+ *dw = (struct snd_soc_dapm_widget)
+ SND_SOC_DAPM_HP(w->name, gbcodec_event_hp);
+ module->op_devices |= (GBAUDIO_DEVICE_OUT_WIRED_HEADSET
+ | GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE);
+ module->ip_devices |= GBAUDIO_DEVICE_IN_WIRED_HEADSET;
+ break;
+ case snd_soc_dapm_mic:
+ *dw = (struct snd_soc_dapm_widget)
+ SND_SOC_DAPM_MIC(w->name, gbcodec_event_int_mic);
+ module->ip_devices |= GBAUDIO_DEVICE_IN_BUILTIN_MIC;
+ break;
+ case snd_soc_dapm_output:
+ *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_OUTPUT(w->name);
+ break;
+ case snd_soc_dapm_input:
+ *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_INPUT(w->name);
+ break;
+ case snd_soc_dapm_switch:
+ *dw = (struct snd_soc_dapm_widget)
+ SND_SOC_DAPM_SWITCH_E(w->name, SND_SOC_NOPM, 0, 0,
+ widget_kctls, gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD);
+ break;
+ case snd_soc_dapm_pga:
+ *dw = (struct snd_soc_dapm_widget)
+ SND_SOC_DAPM_PGA_E(w->name, SND_SOC_NOPM, 0, 0, NULL, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD);
+ break;
+ case snd_soc_dapm_mixer:
+ *dw = (struct snd_soc_dapm_widget)
+ SND_SOC_DAPM_MIXER_E(w->name, SND_SOC_NOPM, 0, 0, NULL,
+ 0, gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD);
+ break;
+ case snd_soc_dapm_mux:
+ *dw = (struct snd_soc_dapm_widget)
+ SND_SOC_DAPM_MUX_E(w->name, SND_SOC_NOPM, 0, 0,
+ widget_kctls, gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD);
+ break;
+ case snd_soc_dapm_aif_in:
+ *dw = (struct snd_soc_dapm_widget)
+ SND_SOC_DAPM_AIF_IN_E(w->name, w->sname, 0,
+ SND_SOC_NOPM,
+ 0, 0, gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD);
+ break;
+ case snd_soc_dapm_aif_out:
+ *dw = (struct snd_soc_dapm_widget)
+ SND_SOC_DAPM_AIF_OUT_E(w->name, w->sname, 0,
+ SND_SOC_NOPM,
+ 0, 0, gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD);
+ break;
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+
+ dev_dbg(module->dev, "%s: widget of type %d created\n", dw->name,
+ dw->id);
+ return 0;
+error:
+ list_for_each_entry_safe(control, _control, &module->widget_ctl_list,
+ list) {
+ list_del(&control->list);
+ devm_kfree(module->dev, control);
+ }
+ return ret;
+}
+
+static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module,
+ struct gb_audio_control *controls)
+{
+ int i, csize, ret;
+ struct snd_kcontrol_new *dapm_kctls;
+ struct gb_audio_control *curr;
+ struct gbaudio_control *control, *_control;
+ size_t size;
+ char temp_name[NAME_SIZE];
+
+ size = sizeof(struct snd_kcontrol_new) * module->num_controls;
+ dapm_kctls = devm_kzalloc(module->dev, size, GFP_KERNEL);
+ if (!dapm_kctls)
+ return -ENOMEM;
+
+ curr = controls;
+ for (i = 0; i < module->num_controls; i++) {
+ ret = gbaudio_tplg_create_kcontrol(module, &dapm_kctls[i],
+ curr);
+ if (ret) {
+ dev_err(module->dev, "%s:%d type not supported\n",
+ curr->name, curr->iface);
+ goto error;
+ }
+ control = devm_kzalloc(module->dev, sizeof(struct
+ gbaudio_control),
+ GFP_KERNEL);
+ if (!control) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ control->id = curr->id;
+ /* Prefix dev_id to widget_name */
+ strlcpy(temp_name, curr->name, NAME_SIZE);
+ snprintf(curr->name, NAME_SIZE, "GB %d %s", module->dev_id,
+ temp_name);
+ control->name = curr->name;
+ if (curr->info.type == GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED) {
+ struct gb_audio_enumerated *gbenum =
+ &curr->info.value.enumerated;
+
+ csize = offsetof(struct gb_audio_control, info);
+ csize += offsetof(struct gb_audio_ctl_elem_info, value);
+ csize += offsetof(struct gb_audio_enumerated, names);
+ csize += gbenum->names_length;
+ control->texts = (const char * const *)
+ gb_generate_enum_strings(module, gbenum);
+ control->items = gbenum->items;
+ } else
+ csize = sizeof(struct gb_audio_control);
+
+ list_add(&control->list, &module->ctl_list);
+ dev_dbg(module->dev, "%d:%s created of type %d\n", curr->id,
+ curr->name, curr->info.type);
+ curr = (void *)curr + csize;
+ }
+ module->controls = dapm_kctls;
+
+ return 0;
+error:
+ list_for_each_entry_safe(control, _control, &module->ctl_list,
+ list) {
+ list_del(&control->list);
+ devm_kfree(module->dev, control);
+ }
+ devm_kfree(module->dev, dapm_kctls);
+ return ret;
+}
+
+static int gbaudio_tplg_process_widgets(struct gbaudio_module_info *module,
+ struct gb_audio_widget *widgets)
+{
+ int i, ret, w_size;
+ struct snd_soc_dapm_widget *dapm_widgets;
+ struct gb_audio_widget *curr;
+ struct gbaudio_widget *widget, *_widget;
+ size_t size;
+
+ size = sizeof(struct snd_soc_dapm_widget) * module->num_dapm_widgets;
+ dapm_widgets = devm_kzalloc(module->dev, size, GFP_KERNEL);
+ if (!dapm_widgets)
+ return -ENOMEM;
+
+ curr = widgets;
+ for (i = 0; i < module->num_dapm_widgets; i++) {
+ ret = gbaudio_tplg_create_widget(module, &dapm_widgets[i],
+ curr, &w_size);
+ if (ret) {
+ dev_err(module->dev, "%s:%d type not supported\n",
+ curr->name, curr->type);
+ goto error;
+ }
+ widget = devm_kzalloc(module->dev, sizeof(struct
+ gbaudio_widget),
+ GFP_KERNEL);
+ if (!widget) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ widget->id = curr->id;
+ widget->name = curr->name;
+ list_add(&widget->list, &module->widget_list);
+ curr = (void *)curr + w_size;
+ }
+ module->dapm_widgets = dapm_widgets;
+
+ return 0;
+
+error:
+ list_for_each_entry_safe(widget, _widget, &module->widget_list,
+ list) {
+ list_del(&widget->list);
+ devm_kfree(module->dev, widget);
+ }
+ devm_kfree(module->dev, dapm_widgets);
+ return ret;
+}
+
+static int gbaudio_tplg_process_routes(struct gbaudio_module_info *module,
+ struct gb_audio_route *routes)
+{
+ int i, ret;
+ struct snd_soc_dapm_route *dapm_routes;
+ struct gb_audio_route *curr;
+ size_t size;
+
+ size = sizeof(struct snd_soc_dapm_route) * module->num_dapm_routes;
+ dapm_routes = devm_kzalloc(module->dev, size, GFP_KERNEL);
+ if (!dapm_routes)
+ return -ENOMEM;
+
+ module->dapm_routes = dapm_routes;
+ curr = routes;
+
+ for (i = 0; i < module->num_dapm_routes; i++) {
+ dapm_routes->sink =
+ gbaudio_map_widgetid(module, curr->destination_id);
+ if (!dapm_routes->sink) {
+ dev_err(module->dev, "%d:%d:%d:%d - Invalid sink\n",
+ curr->source_id, curr->destination_id,
+ curr->control_id, curr->index);
+ ret = -EINVAL;
+ goto error;
+ }
+ dapm_routes->source =
+ gbaudio_map_widgetid(module, curr->source_id);
+ if (!dapm_routes->source) {
+ dev_err(module->dev, "%d:%d:%d:%d - Invalid source\n",
+ curr->source_id, curr->destination_id,
+ curr->control_id, curr->index);
+ ret = -EINVAL;
+ goto error;
+ }
+ dapm_routes->control =
+ gbaudio_map_controlid(module,
+ curr->control_id,
+ curr->index);
+ if ((curr->control_id != GBAUDIO_INVALID_ID) &&
+ !dapm_routes->control) {
+ dev_err(module->dev, "%d:%d:%d:%d - Invalid control\n",
+ curr->source_id, curr->destination_id,
+ curr->control_id, curr->index);
+ ret = -EINVAL;
+ goto error;
+ }
+ dev_dbg(module->dev, "Route {%s, %s, %s}\n", dapm_routes->sink,
+ (dapm_routes->control) ? dapm_routes->control:"NULL",
+ dapm_routes->source);
+ dapm_routes++;
+ curr++;
+ }
+
+ return 0;
+
+error:
+ devm_kfree(module->dev, module->dapm_routes);
+ return ret;
+}
+
+static int gbaudio_tplg_process_header(struct gbaudio_module_info *module,
+ struct gb_audio_topology *tplg_data)
+{
+ /* fetch no. of kcontrols, widgets & routes */
+ module->num_controls = tplg_data->num_controls;
+ module->num_dapm_widgets = tplg_data->num_widgets;
+ module->num_dapm_routes = tplg_data->num_routes;
+
+ /* update block offset */
+ module->dai_offset = (unsigned long)&tplg_data->data;
+ module->control_offset = module->dai_offset + tplg_data->size_dais;
+ module->widget_offset = module->control_offset +
+ tplg_data->size_controls;
+ module->route_offset = module->widget_offset +
+ tplg_data->size_widgets;
+
+ dev_dbg(module->dev, "DAI offset is 0x%lx\n", module->dai_offset);
+ dev_dbg(module->dev, "control offset is %lx\n",
+ module->control_offset);
+ dev_dbg(module->dev, "widget offset is %lx\n", module->widget_offset);
+ dev_dbg(module->dev, "route offset is %lx\n", module->route_offset);
+
+ return 0;
+}
+
+int gbaudio_tplg_parse_data(struct gbaudio_module_info *module,
+ struct gb_audio_topology *tplg_data)
+{
+ int ret;
+ struct gb_audio_control *controls;
+ struct gb_audio_widget *widgets;
+ struct gb_audio_route *routes;
+
+ if (!tplg_data)
+ return -EINVAL;
+
+ ret = gbaudio_tplg_process_header(module, tplg_data);
+ if (ret) {
+ dev_err(module->dev, "%d: Error in parsing topology header\n",
+ ret);
+ return ret;
+ }
+
+ /* process control */
+ controls = (struct gb_audio_control *)module->control_offset;
+ ret = gbaudio_tplg_process_kcontrols(module, controls);
+ if (ret) {
+ dev_err(module->dev,
+ "%d: Error in parsing controls data\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Control parsing finished\n");
+
+ /* process widgets */
+ widgets = (struct gb_audio_widget *)module->widget_offset;
+ ret = gbaudio_tplg_process_widgets(module, widgets);
+ if (ret) {
+ dev_err(module->dev,
+ "%d: Error in parsing widgets data\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Widget parsing finished\n");
+
+ /* process route */
+ routes = (struct gb_audio_route *)module->route_offset;
+ ret = gbaudio_tplg_process_routes(module, routes);
+ if (ret) {
+ dev_err(module->dev,
+ "%d: Error in parsing routes data\n", ret);
+ return ret;
+ }
+ dev_dbg(module->dev, "Route parsing finished\n");
+
+ /* parse jack capabilities */
+ if (tplg_data->jack_type) {
+ module->jack_mask = tplg_data->jack_type & GBCODEC_JACK_MASK;
+ module->button_mask = tplg_data->jack_type &
+ GBCODEC_JACK_BUTTON_MASK;
+ }
+
+ return ret;
+}
+
+void gbaudio_tplg_release(struct gbaudio_module_info *module)
+{
+ struct gbaudio_control *control, *_control;
+ struct gbaudio_widget *widget, *_widget;
+
+ if (!module->topology)
+ return;
+
+ /* release kcontrols */
+ list_for_each_entry_safe(control, _control, &module->ctl_list,
+ list) {
+ list_del(&control->list);
+ devm_kfree(module->dev, control);
+ }
+ if (module->controls)
+ devm_kfree(module->dev, module->controls);
+
+ /* release widget controls */
+ list_for_each_entry_safe(control, _control, &module->widget_ctl_list,
+ list) {
+ list_del(&control->list);
+ devm_kfree(module->dev, control);
+ }
+
+ /* release widgets */
+ list_for_each_entry_safe(widget, _widget, &module->widget_list,
+ list) {
+ list_del(&widget->list);
+ devm_kfree(module->dev, widget);
+ }
+ if (module->dapm_widgets)
+ devm_kfree(module->dev, module->dapm_widgets);
+
+ /* release routes */
+ if (module->dapm_routes)
+ devm_kfree(module->dev, module->dapm_routes);
+}
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
new file mode 100644
index 000000000000..168626ba0c03
--- /dev/null
+++ b/drivers/staging/greybus/authentication.c
@@ -0,0 +1,429 @@
+/*
+ * Greybus Component Authentication Protocol (CAP) Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+
+#include "greybus_authentication.h"
+#include "firmware.h"
+#include "greybus.h"
+
+#define CAP_TIMEOUT_MS 1000
+
+/*
+ * Number of minor devices this driver supports.
+ * There will be exactly one required per Interface.
+ */
+#define NUM_MINORS U8_MAX
+
+struct gb_cap {
+ struct device *parent;
+ struct gb_connection *connection;
+ struct kref kref;
+ struct list_head node;
+ bool disabled; /* connection getting disabled */
+
+ struct mutex mutex;
+ struct cdev cdev;
+ struct device *class_device;
+ dev_t dev_num;
+};
+
+static struct class *cap_class;
+static dev_t cap_dev_num;
+static DEFINE_IDA(cap_minors_map);
+static LIST_HEAD(cap_list);
+static DEFINE_MUTEX(list_mutex);
+
+static void cap_kref_release(struct kref *kref)
+{
+ struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
+
+ kfree(cap);
+}
+
+/*
+ * All users of cap take a reference (from within list_mutex lock), before
+ * they get a pointer to play with. And the structure will be freed only after
+ * the last user has put the reference to it.
+ */
+static void put_cap(struct gb_cap *cap)
+{
+ kref_put(&cap->kref, cap_kref_release);
+}
+
+/* Caller must call put_cap() after using struct gb_cap */
+static struct gb_cap *get_cap(struct cdev *cdev)
+{
+ struct gb_cap *cap;
+
+ mutex_lock(&list_mutex);
+
+ list_for_each_entry(cap, &cap_list, node) {
+ if (&cap->cdev == cdev) {
+ kref_get(&cap->kref);
+ goto unlock;
+ }
+ }
+
+ cap = NULL;
+
+unlock:
+ mutex_unlock(&list_mutex);
+
+ return cap;
+}
+
+static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
+{
+ struct gb_connection *connection = cap->connection;
+ struct gb_cap_get_endpoint_uid_response response;
+ int ret;
+
+ ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
+ 0, &response, sizeof(response));
+ if (ret) {
+ dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
+ return ret;
+ }
+
+ memcpy(euid, response.uid, sizeof(response.uid));
+
+ return 0;
+}
+
+static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
+ u8 *certificate, u32 *size, u8 *result)
+{
+ struct gb_connection *connection = cap->connection;
+ struct gb_cap_get_ims_certificate_request *request;
+ struct gb_cap_get_ims_certificate_response *response;
+ size_t max_size = gb_operation_get_payload_size_max(connection);
+ struct gb_operation *op;
+ int ret;
+
+ op = gb_operation_create_flags(connection,
+ GB_CAP_TYPE_GET_IMS_CERTIFICATE,
+ sizeof(*request), max_size,
+ GB_OPERATION_FLAG_SHORT_RESPONSE,
+ GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ request = op->request->payload;
+ request->certificate_class = cpu_to_le32(class);
+ request->certificate_id = cpu_to_le32(id);
+
+ ret = gb_operation_request_send_sync(op);
+ if (ret) {
+ dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
+ goto done;
+ }
+
+ response = op->response->payload;
+ *result = response->result_code;
+ *size = op->response->payload_size - sizeof(*response);
+ memcpy(certificate, response->certificate, *size);
+
+done:
+ gb_operation_put(op);
+ return ret;
+}
+
+static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
+ u8 *challenge, u8 *result, u8 *auth_response,
+ u32 *signature_size, u8 *signature)
+{
+ struct gb_connection *connection = cap->connection;
+ struct gb_cap_authenticate_request *request;
+ struct gb_cap_authenticate_response *response;
+ size_t max_size = gb_operation_get_payload_size_max(connection);
+ struct gb_operation *op;
+ int ret;
+
+ op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
+ sizeof(*request), max_size,
+ GB_OPERATION_FLAG_SHORT_RESPONSE,
+ GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ request = op->request->payload;
+ request->auth_type = cpu_to_le32(auth_type);
+ memcpy(request->uid, uid, sizeof(request->uid));
+ memcpy(request->challenge, challenge, sizeof(request->challenge));
+
+ ret = gb_operation_request_send_sync(op);
+ if (ret) {
+ dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
+ goto done;
+ }
+
+ response = op->response->payload;
+ *result = response->result_code;
+ *signature_size = op->response->payload_size - sizeof(*response);
+ memcpy(auth_response, response->response, sizeof(response->response));
+ memcpy(signature, response->signature, *signature_size);
+
+done:
+ gb_operation_put(op);
+ return ret;
+}
+
+/* Char device fops */
+
+static int cap_open(struct inode *inode, struct file *file)
+{
+ struct gb_cap *cap = get_cap(inode->i_cdev);
+
+ /* cap structure can't get freed until file descriptor is closed */
+ if (cap) {
+ file->private_data = cap;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int cap_release(struct inode *inode, struct file *file)
+{
+ struct gb_cap *cap = file->private_data;
+
+ put_cap(cap);
+ return 0;
+}
+
+static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
+ void __user *buf)
+{
+ struct cap_ioc_get_endpoint_uid endpoint_uid;
+ struct cap_ioc_get_ims_certificate *ims_cert;
+ struct cap_ioc_authenticate *authenticate;
+ size_t size;
+ int ret;
+
+ switch (cmd) {
+ case CAP_IOC_GET_ENDPOINT_UID:
+ ret = cap_get_endpoint_uid(cap, endpoint_uid.uid);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buf, &endpoint_uid, sizeof(endpoint_uid)))
+ return -EFAULT;
+
+ return 0;
+ case CAP_IOC_GET_IMS_CERTIFICATE:
+ size = sizeof(*ims_cert);
+ ims_cert = memdup_user(buf, size);
+ if (IS_ERR(ims_cert))
+ return PTR_ERR(ims_cert);
+
+ ret = cap_get_ims_certificate(cap, ims_cert->certificate_class,
+ ims_cert->certificate_id,
+ ims_cert->certificate,
+ &ims_cert->cert_size,
+ &ims_cert->result_code);
+ if (!ret && copy_to_user(buf, ims_cert, size))
+ ret = -EFAULT;
+ kfree(ims_cert);
+
+ return ret;
+ case CAP_IOC_AUTHENTICATE:
+ size = sizeof(*authenticate);
+ authenticate = memdup_user(buf, size);
+ if (IS_ERR(authenticate))
+ return PTR_ERR(authenticate);
+
+ ret = cap_authenticate(cap, authenticate->auth_type,
+ authenticate->uid,
+ authenticate->challenge,
+ &authenticate->result_code,
+ authenticate->response,
+ &authenticate->signature_size,
+ authenticate->signature);
+ if (!ret && copy_to_user(buf, authenticate, size))
+ ret = -EFAULT;
+ kfree(authenticate);
+
+ return ret;
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct gb_cap *cap = file->private_data;
+ struct gb_bundle *bundle = cap->connection->bundle;
+ int ret = -ENODEV;
+
+ /*
+ * Serialize ioctls.
+ *
+ * We don't want the user to do multiple authentication operations in
+ * parallel.
+ *
+ * This is also used to protect ->disabled, which is used to check if
+ * the connection is getting disconnected, so that we don't start any
+ * new operations.
+ */
+ mutex_lock(&cap->mutex);
+ if (!cap->disabled) {
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (!ret) {
+ ret = cap_ioctl(cap, cmd, (void __user *)arg);
+ gb_pm_runtime_put_autosuspend(bundle);
+ }
+ }
+ mutex_unlock(&cap->mutex);
+
+ return ret;
+}
+
+static const struct file_operations cap_fops = {
+ .owner = THIS_MODULE,
+ .open = cap_open,
+ .release = cap_release,
+ .unlocked_ioctl = cap_ioctl_unlocked,
+};
+
+int gb_cap_connection_init(struct gb_connection *connection)
+{
+ struct gb_cap *cap;
+ int ret, minor;
+
+ if (!connection)
+ return 0;
+
+ cap = kzalloc(sizeof(*cap), GFP_KERNEL);
+ if (!cap)
+ return -ENOMEM;
+
+ cap->parent = &connection->bundle->dev;
+ cap->connection = connection;
+ mutex_init(&cap->mutex);
+ gb_connection_set_data(connection, cap);
+ kref_init(&cap->kref);
+
+ mutex_lock(&list_mutex);
+ list_add(&cap->node, &cap_list);
+ mutex_unlock(&list_mutex);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto err_list_del;
+
+ minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
+ if (minor < 0) {
+ ret = minor;
+ goto err_connection_disable;
+ }
+
+ /* Add a char device to allow userspace to interact with cap */
+ cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
+ cdev_init(&cap->cdev, &cap_fops);
+
+ ret = cdev_add(&cap->cdev, cap->dev_num, 1);
+ if (ret)
+ goto err_remove_ida;
+
+ /* Add a soft link to the previously added char-dev within the bundle */
+ cap->class_device = device_create(cap_class, cap->parent, cap->dev_num,
+ NULL, "gb-authenticate-%d", minor);
+ if (IS_ERR(cap->class_device)) {
+ ret = PTR_ERR(cap->class_device);
+ goto err_del_cdev;
+ }
+
+ return 0;
+
+err_del_cdev:
+ cdev_del(&cap->cdev);
+err_remove_ida:
+ ida_simple_remove(&cap_minors_map, minor);
+err_connection_disable:
+ gb_connection_disable(connection);
+err_list_del:
+ mutex_lock(&list_mutex);
+ list_del(&cap->node);
+ mutex_unlock(&list_mutex);
+
+ put_cap(cap);
+
+ return ret;
+}
+
+void gb_cap_connection_exit(struct gb_connection *connection)
+{
+ struct gb_cap *cap;
+
+ if (!connection)
+ return;
+
+ cap = gb_connection_get_data(connection);
+
+ device_destroy(cap_class, cap->dev_num);
+ cdev_del(&cap->cdev);
+ ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
+
+ /*
+ * Disallow any new ioctl operations on the char device and wait for
+ * existing ones to finish.
+ */
+ mutex_lock(&cap->mutex);
+ cap->disabled = true;
+ mutex_unlock(&cap->mutex);
+
+ /* All pending greybus operations should have finished by now */
+ gb_connection_disable(cap->connection);
+
+ /* Disallow new users to get access to the cap structure */
+ mutex_lock(&list_mutex);
+ list_del(&cap->node);
+ mutex_unlock(&list_mutex);
+
+ /*
+ * All current users of cap would have taken a reference to it by
+ * now, we can drop our reference and wait the last user will get
+ * cap freed.
+ */
+ put_cap(cap);
+}
+
+int cap_init(void)
+{
+ int ret;
+
+ cap_class = class_create(THIS_MODULE, "gb_authenticate");
+ if (IS_ERR(cap_class))
+ return PTR_ERR(cap_class);
+
+ ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
+ "gb_authenticate");
+ if (ret)
+ goto err_remove_class;
+
+ return 0;
+
+err_remove_class:
+ class_destroy(cap_class);
+ return ret;
+}
+
+void cap_exit(void)
+{
+ unregister_chrdev_region(cap_dev_num, NUM_MINORS);
+ class_destroy(cap_class);
+ ida_destroy(&cap_minors_map);
+}
diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c
new file mode 100644
index 000000000000..5f90721bcc51
--- /dev/null
+++ b/drivers/staging/greybus/bootrom.c
@@ -0,0 +1,524 @@
+/*
+ * BOOTROM Greybus driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#include "greybus.h"
+#include "firmware.h"
+
+/* Timeout, in jiffies, within which the next request must be received */
+#define NEXT_REQ_TIMEOUT_MS 1000
+
+/*
+ * FIXME: Reduce this timeout once svc core handles parallel processing of
+ * events from the SVC, which are handled sequentially today.
+ */
+#define MODE_SWITCH_TIMEOUT_MS 10000
+
+enum next_request_type {
+ NEXT_REQ_FIRMWARE_SIZE,
+ NEXT_REQ_GET_FIRMWARE,
+ NEXT_REQ_READY_TO_BOOT,
+ NEXT_REQ_MODE_SWITCH,
+};
+
+struct gb_bootrom {
+ struct gb_connection *connection;
+ const struct firmware *fw;
+ u8 protocol_major;
+ u8 protocol_minor;
+ enum next_request_type next_request;
+ struct delayed_work dwork;
+ struct mutex mutex; /* Protects bootrom->fw */
+};
+
+static void free_firmware(struct gb_bootrom *bootrom)
+{
+ if (!bootrom->fw)
+ return;
+
+ release_firmware(bootrom->fw);
+ bootrom->fw = NULL;
+}
+
+static void gb_bootrom_timedout(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct gb_bootrom *bootrom = container_of(dwork, struct gb_bootrom, dwork);
+ struct device *dev = &bootrom->connection->bundle->dev;
+ const char *reason;
+
+ switch (bootrom->next_request) {
+ case NEXT_REQ_FIRMWARE_SIZE:
+ reason = "Firmware Size Request";
+ break;
+ case NEXT_REQ_GET_FIRMWARE:
+ reason = "Get Firmware Request";
+ break;
+ case NEXT_REQ_READY_TO_BOOT:
+ reason = "Ready to Boot Request";
+ break;
+ case NEXT_REQ_MODE_SWITCH:
+ reason = "Interface Mode Switch";
+ break;
+ default:
+ reason = NULL;
+ dev_err(dev, "Invalid next-request: %u", bootrom->next_request);
+ break;
+ }
+
+ dev_err(dev, "Timed out waiting for %s from the Module\n", reason);
+
+ mutex_lock(&bootrom->mutex);
+ free_firmware(bootrom);
+ mutex_unlock(&bootrom->mutex);
+
+ /* TODO: Power-off Module ? */
+}
+
+static void gb_bootrom_set_timeout(struct gb_bootrom *bootrom,
+ enum next_request_type next, unsigned long timeout)
+{
+ bootrom->next_request = next;
+ schedule_delayed_work(&bootrom->dwork, msecs_to_jiffies(timeout));
+}
+
+static void gb_bootrom_cancel_timeout(struct gb_bootrom *bootrom)
+{
+ cancel_delayed_work_sync(&bootrom->dwork);
+}
+
+/*
+ * The es2 chip doesn't have VID/PID programmed into the hardware and we need to
+ * hack that up to distinguish different modules and their firmware blobs.
+ *
+ * This fetches VID/PID (over bootrom protocol) for es2 chip only, when VID/PID
+ * already sent during hotplug are 0.
+ *
+ * Otherwise, we keep intf->vendor_id/product_id same as what's passed
+ * during hotplug.
+ */
+static void bootrom_es2_fixup_vid_pid(struct gb_bootrom *bootrom)
+{
+ struct gb_bootrom_get_vid_pid_response response;
+ struct gb_connection *connection = bootrom->connection;
+ struct gb_interface *intf = connection->bundle->intf;
+ int ret;
+
+ if (!(intf->quirks & GB_INTERFACE_QUIRK_NO_GMP_IDS))
+ return;
+
+ ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_GET_VID_PID,
+ NULL, 0, &response, sizeof(response));
+ if (ret) {
+ dev_err(&connection->bundle->dev,
+ "Bootrom get vid/pid operation failed (%d)\n", ret);
+ return;
+ }
+
+ /*
+ * NOTE: This is hacked, so that the same values of VID/PID can be used
+ * by next firmware level as well. The uevent for bootrom will still
+ * have VID/PID as 0, though after this point the sysfs files will start
+ * showing the updated values. But yeah, that's a bit racy as the same
+ * sysfs files would be showing 0 before this point.
+ */
+ intf->vendor_id = le32_to_cpu(response.vendor_id);
+ intf->product_id = le32_to_cpu(response.product_id);
+
+ dev_dbg(&connection->bundle->dev, "Bootrom got vid (0x%x)/pid (0x%x)\n",
+ intf->vendor_id, intf->product_id);
+}
+
+/* This returns path of the firmware blob on the disk */
+static int find_firmware(struct gb_bootrom *bootrom, u8 stage)
+{
+ struct gb_connection *connection = bootrom->connection;
+ struct gb_interface *intf = connection->bundle->intf;
+ char firmware_name[49];
+ int rc;
+
+ /* Already have a firmware, free it */
+ free_firmware(bootrom);
+
+ /* Bootrom protocol is only supported for loading Stage 2 firmware */
+ if (stage != 2) {
+ dev_err(&connection->bundle->dev, "Invalid boot stage: %u\n",
+ stage);
+ return -EINVAL;
+ }
+
+ /*
+ * Create firmware name
+ *
+ * XXX Name it properly..
+ */
+ snprintf(firmware_name, sizeof(firmware_name),
+ FW_NAME_PREFIX "%08x_%08x_%08x_%08x_s2l.tftf",
+ intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
+ intf->vendor_id, intf->product_id);
+
+ // FIXME:
+ // Turn to dev_dbg later after everyone has valid bootloaders with good
+ // ids, but leave this as dev_info for now to make it easier to track
+ // down "empty" vid/pid modules.
+ dev_info(&connection->bundle->dev, "Firmware file '%s' requested\n",
+ firmware_name);
+
+ rc = request_firmware(&bootrom->fw, firmware_name,
+ &connection->bundle->dev);
+ if (rc) {
+ dev_err(&connection->bundle->dev,
+ "failed to find %s firmware (%d)\n", firmware_name, rc);
+ }
+
+ return rc;
+}
+
+static int gb_bootrom_firmware_size_request(struct gb_operation *op)
+{
+ struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
+ struct gb_bootrom_firmware_size_request *size_request = op->request->payload;
+ struct gb_bootrom_firmware_size_response *size_response;
+ struct device *dev = &op->connection->bundle->dev;
+ int ret;
+
+ /* Disable timeouts */
+ gb_bootrom_cancel_timeout(bootrom);
+
+ if (op->request->payload_size != sizeof(*size_request)) {
+ dev_err(dev, "%s: illegal size of firmware size request (%zu != %zu)\n",
+ __func__, op->request->payload_size,
+ sizeof(*size_request));
+ ret = -EINVAL;
+ goto queue_work;
+ }
+
+ mutex_lock(&bootrom->mutex);
+
+ ret = find_firmware(bootrom, size_request->stage);
+ if (ret)
+ goto unlock;
+
+ if (!gb_operation_response_alloc(op, sizeof(*size_response),
+ GFP_KERNEL)) {
+ dev_err(dev, "%s: error allocating response\n", __func__);
+ free_firmware(bootrom);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ size_response = op->response->payload;
+ size_response->size = cpu_to_le32(bootrom->fw->size);
+
+ dev_dbg(dev, "%s: firmware size %d bytes\n", __func__, size_response->size);
+
+unlock:
+ mutex_unlock(&bootrom->mutex);
+
+queue_work:
+ if (!ret) {
+ /* Refresh timeout */
+ gb_bootrom_set_timeout(bootrom, NEXT_REQ_GET_FIRMWARE,
+ NEXT_REQ_TIMEOUT_MS);
+ }
+
+ return ret;
+}
+
+static int gb_bootrom_get_firmware(struct gb_operation *op)
+{
+ struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
+ const struct firmware *fw;
+ struct gb_bootrom_get_firmware_request *firmware_request;
+ struct gb_bootrom_get_firmware_response *firmware_response;
+ struct device *dev = &op->connection->bundle->dev;
+ unsigned int offset, size;
+ enum next_request_type next_request;
+ int ret = 0;
+
+ /* Disable timeouts */
+ gb_bootrom_cancel_timeout(bootrom);
+
+ if (op->request->payload_size != sizeof(*firmware_request)) {
+ dev_err(dev, "%s: Illegal size of get firmware request (%zu %zu)\n",
+ __func__, op->request->payload_size,
+ sizeof(*firmware_request));
+ ret = -EINVAL;
+ goto queue_work;
+ }
+
+ mutex_lock(&bootrom->mutex);
+
+ fw = bootrom->fw;
+ if (!fw) {
+ dev_err(dev, "%s: firmware not available\n", __func__);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ firmware_request = op->request->payload;
+ offset = le32_to_cpu(firmware_request->offset);
+ size = le32_to_cpu(firmware_request->size);
+
+ if (offset >= fw->size || size > fw->size - offset) {
+ dev_warn(dev, "bad firmware request (offs = %u, size = %u)\n",
+ offset, size);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!gb_operation_response_alloc(op, sizeof(*firmware_response) + size,
+ GFP_KERNEL)) {
+ dev_err(dev, "%s: error allocating response\n", __func__);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ firmware_response = op->response->payload;
+ memcpy(firmware_response->data, fw->data + offset, size);
+
+ dev_dbg(dev, "responding with firmware (offs = %u, size = %u)\n", offset,
+ size);
+
+unlock:
+ mutex_unlock(&bootrom->mutex);
+
+queue_work:
+ /* Refresh timeout */
+ if (!ret && (offset + size == fw->size))
+ next_request = NEXT_REQ_READY_TO_BOOT;
+ else
+ next_request = NEXT_REQ_GET_FIRMWARE;
+
+ gb_bootrom_set_timeout(bootrom, next_request, NEXT_REQ_TIMEOUT_MS);
+
+ return ret;
+}
+
+static int gb_bootrom_ready_to_boot(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_bootrom *bootrom = gb_connection_get_data(connection);
+ struct gb_bootrom_ready_to_boot_request *rtb_request;
+ struct device *dev = &connection->bundle->dev;
+ u8 status;
+ int ret = 0;
+
+ /* Disable timeouts */
+ gb_bootrom_cancel_timeout(bootrom);
+
+ if (op->request->payload_size != sizeof(*rtb_request)) {
+ dev_err(dev, "%s: Illegal size of ready to boot request (%zu %zu)\n",
+ __func__, op->request->payload_size,
+ sizeof(*rtb_request));
+ ret = -EINVAL;
+ goto queue_work;
+ }
+
+ rtb_request = op->request->payload;
+ status = rtb_request->status;
+
+ /* Return error if the blob was invalid */
+ if (status == GB_BOOTROM_BOOT_STATUS_INVALID) {
+ ret = -EINVAL;
+ goto queue_work;
+ }
+
+ /*
+ * XXX Should we return error for insecure firmware?
+ */
+ dev_dbg(dev, "ready to boot: 0x%x, 0\n", status);
+
+queue_work:
+ /*
+ * Refresh timeout, the Interface shall load the new personality and
+ * send a new hotplug request, which shall get rid of the bootrom
+ * connection. As that can take some time, increase the timeout a bit.
+ */
+ gb_bootrom_set_timeout(bootrom, NEXT_REQ_MODE_SWITCH,
+ MODE_SWITCH_TIMEOUT_MS);
+
+ return ret;
+}
+
+static int gb_bootrom_request_handler(struct gb_operation *op)
+{
+ u8 type = op->type;
+
+ switch (type) {
+ case GB_BOOTROM_TYPE_FIRMWARE_SIZE:
+ return gb_bootrom_firmware_size_request(op);
+ case GB_BOOTROM_TYPE_GET_FIRMWARE:
+ return gb_bootrom_get_firmware(op);
+ case GB_BOOTROM_TYPE_READY_TO_BOOT:
+ return gb_bootrom_ready_to_boot(op);
+ default:
+ dev_err(&op->connection->bundle->dev,
+ "unsupported request: %u\n", type);
+ return -EINVAL;
+ }
+}
+
+static int gb_bootrom_get_version(struct gb_bootrom *bootrom)
+{
+ struct gb_bundle *bundle = bootrom->connection->bundle;
+ struct gb_bootrom_version_request request;
+ struct gb_bootrom_version_response response;
+ int ret;
+
+ request.major = GB_BOOTROM_VERSION_MAJOR;
+ request.minor = GB_BOOTROM_VERSION_MINOR;
+
+ ret = gb_operation_sync(bootrom->connection,
+ GB_BOOTROM_TYPE_VERSION,
+ &request, sizeof(request), &response,
+ sizeof(response));
+ if (ret) {
+ dev_err(&bundle->dev,
+ "failed to get protocol version: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (response.major > request.major) {
+ dev_err(&bundle->dev,
+ "unsupported major protocol version (%u > %u)\n",
+ response.major, request.major);
+ return -ENOTSUPP;
+ }
+
+ bootrom->protocol_major = response.major;
+ bootrom->protocol_minor = response.minor;
+
+ dev_dbg(&bundle->dev, "%s - %u.%u\n", __func__, response.major,
+ response.minor);
+
+ return 0;
+}
+
+static int gb_bootrom_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_bootrom *bootrom;
+ int ret;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_BOOTROM)
+ return -ENODEV;
+
+ bootrom = kzalloc(sizeof(*bootrom), GFP_KERNEL);
+ if (!bootrom)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle,
+ le16_to_cpu(cport_desc->id),
+ gb_bootrom_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto err_free_bootrom;
+ }
+
+ gb_connection_set_data(connection, bootrom);
+
+ bootrom->connection = connection;
+
+ mutex_init(&bootrom->mutex);
+ INIT_DELAYED_WORK(&bootrom->dwork, gb_bootrom_timedout);
+ greybus_set_drvdata(bundle, bootrom);
+
+ ret = gb_connection_enable_tx(connection);
+ if (ret)
+ goto err_connection_destroy;
+
+ ret = gb_bootrom_get_version(bootrom);
+ if (ret)
+ goto err_connection_disable;
+
+ bootrom_es2_fixup_vid_pid(bootrom);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto err_connection_disable;
+
+ /* Refresh timeout */
+ gb_bootrom_set_timeout(bootrom, NEXT_REQ_FIRMWARE_SIZE,
+ NEXT_REQ_TIMEOUT_MS);
+
+ /* Tell bootrom we're ready. */
+ ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_AP_READY, NULL, 0,
+ NULL, 0);
+ if (ret) {
+ dev_err(&connection->bundle->dev,
+ "failed to send AP READY: %d\n", ret);
+ goto err_cancel_timeout;
+ }
+
+ dev_dbg(&bundle->dev, "AP_READY sent\n");
+
+ return 0;
+
+err_cancel_timeout:
+ gb_bootrom_cancel_timeout(bootrom);
+err_connection_disable:
+ gb_connection_disable(connection);
+err_connection_destroy:
+ gb_connection_destroy(connection);
+err_free_bootrom:
+ kfree(bootrom);
+
+ return ret;
+}
+
+static void gb_bootrom_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_bootrom *bootrom = greybus_get_drvdata(bundle);
+
+ dev_dbg(&bundle->dev, "%s\n", __func__);
+
+ gb_connection_disable(bootrom->connection);
+
+ /* Disable timeouts */
+ gb_bootrom_cancel_timeout(bootrom);
+
+ /*
+ * Release firmware:
+ *
+ * As the connection and the delayed work are already disabled, we don't
+ * need to lock access to bootrom->fw here.
+ */
+ free_firmware(bootrom);
+
+ gb_connection_destroy(bootrom->connection);
+ kfree(bootrom);
+}
+
+static const struct greybus_bundle_id gb_bootrom_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BOOTROM) },
+ { }
+};
+
+static struct greybus_driver gb_bootrom_driver = {
+ .name = "bootrom",
+ .probe = gb_bootrom_probe,
+ .disconnect = gb_bootrom_disconnect,
+ .id_table = gb_bootrom_id_table,
+};
+
+module_greybus_driver(gb_bootrom_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/bundle.c b/drivers/staging/greybus/bundle.c
new file mode 100644
index 000000000000..d2ef57d090be
--- /dev/null
+++ b/drivers/staging/greybus/bundle.c
@@ -0,0 +1,253 @@
+/*
+ * Greybus bundles
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+static ssize_t bundle_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+
+ return sprintf(buf, "0x%02x\n", bundle->class);
+}
+static DEVICE_ATTR_RO(bundle_class);
+
+static ssize_t bundle_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+
+ return sprintf(buf, "%u\n", bundle->id);
+}
+static DEVICE_ATTR_RO(bundle_id);
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+
+ if (bundle->state == NULL)
+ return sprintf(buf, "\n");
+
+ return sprintf(buf, "%s\n", bundle->state);
+}
+
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+
+ kfree(bundle->state);
+ bundle->state = kstrdup(buf, GFP_KERNEL);
+ if (!bundle->state)
+ return -ENOMEM;
+
+ /* Tell userspace that the file contents changed */
+ sysfs_notify(&bundle->dev.kobj, NULL, "state");
+
+ return size;
+}
+static DEVICE_ATTR_RW(state);
+
+static struct attribute *bundle_attrs[] = {
+ &dev_attr_bundle_class.attr,
+ &dev_attr_bundle_id.attr,
+ &dev_attr_state.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(bundle);
+
+static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
+ u8 bundle_id)
+{
+ struct gb_bundle *bundle;
+
+ list_for_each_entry(bundle, &intf->bundles, links) {
+ if (bundle->id == bundle_id)
+ return bundle;
+ }
+
+ return NULL;
+}
+
+static void gb_bundle_release(struct device *dev)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+
+ trace_gb_bundle_release(bundle);
+
+ kfree(bundle->state);
+ kfree(bundle->cport_desc);
+ kfree(bundle);
+}
+
+#ifdef CONFIG_PM
+static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
+{
+ struct gb_connection *connection;
+
+ list_for_each_entry(connection, &bundle->connections, bundle_links)
+ gb_connection_disable(connection);
+}
+
+static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
+{
+ struct gb_connection *connection;
+
+ list_for_each_entry(connection, &bundle->connections, bundle_links)
+ gb_connection_enable(connection);
+}
+
+static int gb_bundle_suspend(struct device *dev)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ const struct dev_pm_ops *pm = dev->driver->pm;
+ int ret;
+
+ if (pm && pm->runtime_suspend) {
+ ret = pm->runtime_suspend(&bundle->dev);
+ if (ret)
+ return ret;
+ } else {
+ gb_bundle_disable_all_connections(bundle);
+ }
+
+ ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
+ if (ret) {
+ if (pm && pm->runtime_resume)
+ ret = pm->runtime_resume(dev);
+ else
+ gb_bundle_enable_all_connections(bundle);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_bundle_resume(struct device *dev)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ const struct dev_pm_ops *pm = dev->driver->pm;
+ int ret;
+
+ ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
+ if (ret)
+ return ret;
+
+ if (pm && pm->runtime_resume) {
+ ret = pm->runtime_resume(dev);
+ if (ret)
+ return ret;
+ } else {
+ gb_bundle_enable_all_connections(bundle);
+ }
+
+ return 0;
+}
+
+static int gb_bundle_idle(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_bundle_pm_ops = {
+ SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
+};
+
+struct device_type greybus_bundle_type = {
+ .name = "greybus_bundle",
+ .release = gb_bundle_release,
+ .pm = &gb_bundle_pm_ops,
+};
+
+/*
+ * Create a gb_bundle structure to represent a discovered
+ * bundle. Returns a pointer to the new bundle or a null
+ * pointer if a failure occurs due to memory exhaustion.
+ */
+struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
+ u8 class)
+{
+ struct gb_bundle *bundle;
+
+ if (bundle_id == BUNDLE_ID_NONE) {
+ dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
+ return NULL;
+ }
+
+ /*
+ * Reject any attempt to reuse a bundle id. We initialize
+ * these serially, so there's no need to worry about keeping
+ * the interface bundle list locked here.
+ */
+ if (gb_bundle_find(intf, bundle_id)) {
+ dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
+ return NULL;
+ }
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+ if (!bundle)
+ return NULL;
+
+ bundle->intf = intf;
+ bundle->id = bundle_id;
+ bundle->class = class;
+ INIT_LIST_HEAD(&bundle->connections);
+
+ bundle->dev.parent = &intf->dev;
+ bundle->dev.bus = &greybus_bus_type;
+ bundle->dev.type = &greybus_bundle_type;
+ bundle->dev.groups = bundle_groups;
+ bundle->dev.dma_mask = intf->dev.dma_mask;
+ device_initialize(&bundle->dev);
+ dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
+
+ list_add(&bundle->links, &intf->bundles);
+
+ trace_gb_bundle_create(bundle);
+
+ return bundle;
+}
+
+int gb_bundle_add(struct gb_bundle *bundle)
+{
+ int ret;
+
+ ret = device_add(&bundle->dev);
+ if (ret) {
+ dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
+ return ret;
+ }
+
+ trace_gb_bundle_add(bundle);
+
+ return 0;
+}
+
+/*
+ * Tear down a previously set up bundle.
+ */
+void gb_bundle_destroy(struct gb_bundle *bundle)
+{
+ trace_gb_bundle_destroy(bundle);
+
+ if (device_is_registered(&bundle->dev))
+ device_del(&bundle->dev);
+
+ list_del(&bundle->links);
+
+ put_device(&bundle->dev);
+}
diff --git a/drivers/staging/greybus/bundle.h b/drivers/staging/greybus/bundle.h
new file mode 100644
index 000000000000..0c3491def96c
--- /dev/null
+++ b/drivers/staging/greybus/bundle.h
@@ -0,0 +1,90 @@
+/*
+ * Greybus bundles
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __BUNDLE_H
+#define __BUNDLE_H
+
+#include <linux/list.h>
+
+#define BUNDLE_ID_NONE U8_MAX
+
+/* Greybus "public" definitions" */
+struct gb_bundle {
+ struct device dev;
+ struct gb_interface *intf;
+
+ u8 id;
+ u8 class;
+ u8 class_major;
+ u8 class_minor;
+
+ size_t num_cports;
+ struct greybus_descriptor_cport *cport_desc;
+
+ struct list_head connections;
+ u8 *state;
+
+ struct list_head links; /* interface->bundles */
+};
+#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev)
+
+/* Greybus "private" definitions" */
+struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
+ u8 class);
+int gb_bundle_add(struct gb_bundle *bundle);
+void gb_bundle_destroy(struct gb_bundle *bundle);
+
+/* Bundle Runtime PM wrappers */
+#ifdef CONFIG_PM
+static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
+{
+ int retval;
+
+ retval = pm_runtime_get_sync(&bundle->dev);
+ if (retval < 0) {
+ dev_err(&bundle->dev,
+ "pm_runtime_get_sync failed: %d\n", retval);
+ pm_runtime_put_noidle(&bundle->dev);
+ return retval;
+ }
+
+ return 0;
+}
+
+static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
+{
+ int retval;
+
+ pm_runtime_mark_last_busy(&bundle->dev);
+ retval = pm_runtime_put_autosuspend(&bundle->dev);
+
+ return retval;
+}
+
+static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle)
+{
+ pm_runtime_get_noresume(&bundle->dev);
+}
+
+static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle)
+{
+ pm_runtime_put_noidle(&bundle->dev);
+}
+
+#else
+static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
+{ return 0; }
+static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
+{ return 0; }
+
+static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {}
+static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {}
+#endif
+
+#endif /* __BUNDLE_H */
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
new file mode 100644
index 000000000000..491bdd720c0c
--- /dev/null
+++ b/drivers/staging/greybus/camera.c
@@ -0,0 +1,1400 @@
+/*
+ * Greybus Camera protocol driver.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "gb-camera.h"
+#include "greybus.h"
+#include "greybus_protocols.h"
+
+enum gb_camera_debugs_buffer_id {
+ GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES,
+ GB_CAMERA_DEBUGFS_BUFFER_STREAMS,
+ GB_CAMERA_DEBUGFS_BUFFER_CAPTURE,
+ GB_CAMERA_DEBUGFS_BUFFER_FLUSH,
+ GB_CAMERA_DEBUGFS_BUFFER_MAX,
+};
+
+struct gb_camera_debugfs_buffer {
+ char data[PAGE_SIZE];
+ size_t length;
+};
+
+enum gb_camera_state {
+ GB_CAMERA_STATE_UNCONFIGURED,
+ GB_CAMERA_STATE_CONFIGURED,
+};
+
+/**
+ * struct gb_camera - A Greybus Camera Device
+ * @connection: the greybus connection for camera management
+ * @data_connection: the greybus connection for camera data
+ * @data_cport_id: the data CPort ID on the module side
+ * @mutex: protects the connection and state fields
+ * @state: the current module state
+ * @debugfs: debugfs entries for camera protocol operations testing
+ * @module: Greybus camera module registered to HOST processor.
+ */
+struct gb_camera {
+ struct gb_bundle *bundle;
+ struct gb_connection *connection;
+ struct gb_connection *data_connection;
+ u16 data_cport_id;
+
+ struct mutex mutex;
+ enum gb_camera_state state;
+
+ struct {
+ struct dentry *root;
+ struct gb_camera_debugfs_buffer *buffers;
+ } debugfs;
+
+ struct gb_camera_module module;
+};
+
+struct gb_camera_stream_config {
+ unsigned int width;
+ unsigned int height;
+ unsigned int format;
+ unsigned int vc;
+ unsigned int dt[2];
+ unsigned int max_size;
+};
+
+struct gb_camera_fmt_info {
+ enum v4l2_mbus_pixelcode mbus_code;
+ unsigned int gb_format;
+ unsigned int bpp;
+};
+
+/* GB format to media code map */
+static const struct gb_camera_fmt_info gb_fmt_info[] = {
+ {
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_1X16,
+ .gb_format = 0x01,
+ .bpp = 16,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_NV12_1x8,
+ .gb_format = 0x12,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_NV21_1x8,
+ .gb_format = 0x13,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_YU12_1x8,
+ .gb_format = 0x16,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_YV12_1x8,
+ .gb_format = 0x17,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_JPEG_1X8,
+ .gb_format = 0x40,
+ .bpp = 0,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_GB_CAM_METADATA_1X8,
+ .gb_format = 0x41,
+ .bpp = 0,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_GB_CAM_DEBUG_DATA_1X8,
+ .gb_format = 0x42,
+ .bpp = 0,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SBGGR10_1X10,
+ .gb_format = 0x80,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SGBRG10_1X10,
+ .gb_format = 0x81,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .gb_format = 0x82,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SRGGB10_1X10,
+ .gb_format = 0x83,
+ .bpp = 10,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SBGGR12_1X12,
+ .gb_format = 0x84,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SGBRG12_1X12,
+ .gb_format = 0x85,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .gb_format = 0x86,
+ .bpp = 12,
+ },
+ {
+ .mbus_code = V4L2_MBUS_FMT_SRGGB12_1X12,
+ .gb_format = 0x87,
+ .bpp = 12,
+ },
+};
+
+static const struct gb_camera_fmt_info *gb_camera_get_format_info(u16 gb_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
+ if (gb_fmt_info[i].gb_format == gb_fmt)
+ return &gb_fmt_info[i];
+ }
+
+ return NULL;
+}
+
+#define ES2_APB_CDSI0_CPORT 16
+#define ES2_APB_CDSI1_CPORT 17
+
+#define GB_CAMERA_MAX_SETTINGS_SIZE 8192
+
+#define gcam_dbg(gcam, format...) dev_dbg(&gcam->bundle->dev, format)
+#define gcam_info(gcam, format...) dev_info(&gcam->bundle->dev, format)
+#define gcam_err(gcam, format...) dev_err(&gcam->bundle->dev, format)
+
+static int gb_camera_operation_sync_flags(struct gb_connection *connection,
+ int type, unsigned int flags,
+ void *request, size_t request_size,
+ void *response, size_t *response_size)
+{
+ struct gb_operation *operation;
+ int ret;
+
+ operation = gb_operation_create_flags(connection, type, request_size,
+ *response_size, flags,
+ GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ if (request_size)
+ memcpy(operation->request->payload, request, request_size);
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret) {
+ dev_err(&connection->hd->dev,
+ "%s: synchronous operation of type 0x%02x failed: %d\n",
+ connection->name, type, ret);
+ } else {
+ *response_size = operation->response->payload_size;
+
+ if (operation->response->payload_size)
+ memcpy(response, operation->response->payload,
+ operation->response->payload_size);
+ }
+
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static int gb_camera_get_max_pkt_size(struct gb_camera *gcam,
+ struct gb_camera_configure_streams_response *resp)
+{
+ unsigned int max_pkt_size = 0;
+ unsigned int i;
+
+ for (i = 0; i < resp->num_streams; i++) {
+ struct gb_camera_stream_config_response *cfg = &resp->config[i];
+ const struct gb_camera_fmt_info *fmt_info;
+ unsigned int pkt_size;
+
+ fmt_info = gb_camera_get_format_info(cfg->format);
+ if (!fmt_info) {
+ gcam_err(gcam, "unsupported greybus image format: %d\n",
+ cfg->format);
+ return -EIO;
+ }
+
+ if (fmt_info->bpp == 0) {
+ pkt_size = le32_to_cpu(cfg->max_pkt_size);
+
+ if (pkt_size == 0) {
+ gcam_err(gcam,
+ "Stream %u: invalid zero maximum packet size\n",
+ i);
+ return -EIO;
+ }
+ } else {
+ pkt_size = le16_to_cpu(cfg->width) * fmt_info->bpp / 8;
+
+ if (pkt_size != le32_to_cpu(cfg->max_pkt_size)) {
+ gcam_err(gcam,
+ "Stream %u: maximum packet size mismatch (%u/%u)\n",
+ i, pkt_size, cfg->max_pkt_size);
+ return -EIO;
+ }
+ }
+
+ max_pkt_size = max(pkt_size, max_pkt_size);
+ }
+
+ return max_pkt_size;
+}
+
+/*
+ * Validate the stream configuration response verifying padding is correctly
+ * set and the returned number of streams is supported
+ */
+static const int gb_camera_configure_streams_validate_response(
+ struct gb_camera *gcam,
+ struct gb_camera_configure_streams_response *resp,
+ unsigned int nstreams)
+{
+ unsigned int i;
+
+ /* Validate the returned response structure */
+ if (resp->padding[0] || resp->padding[1]) {
+ gcam_err(gcam, "response padding != 0\n");
+ return -EIO;
+ }
+
+ if (resp->num_streams > nstreams) {
+ gcam_err(gcam, "got #streams %u > request %u\n",
+ resp->num_streams, nstreams);
+ return -EIO;
+ }
+
+ for (i = 0; i < resp->num_streams; i++) {
+ struct gb_camera_stream_config_response *cfg = &resp->config[i];
+ if (cfg->padding) {
+ gcam_err(gcam, "stream #%u padding != 0\n", i);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware Configuration
+ */
+
+static int gb_camera_set_intf_power_mode(struct gb_camera *gcam, u8 intf_id,
+ bool hs)
+{
+ struct gb_svc *svc = gcam->connection->hd->svc;
+ int ret;
+
+ if (hs)
+ ret = gb_svc_intf_set_power_mode(svc, intf_id,
+ GB_SVC_UNIPRO_HS_SERIES_A,
+ GB_SVC_UNIPRO_FAST_MODE, 2, 2,
+ GB_SVC_SMALL_AMPLITUDE,
+ GB_SVC_NO_DE_EMPHASIS,
+ GB_SVC_UNIPRO_FAST_MODE, 2, 2,
+ GB_SVC_PWRM_RXTERMINATION |
+ GB_SVC_PWRM_TXTERMINATION, 0,
+ NULL, NULL);
+ else
+ ret = gb_svc_intf_set_power_mode(svc, intf_id,
+ GB_SVC_UNIPRO_HS_SERIES_A,
+ GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+ 2, 1,
+ GB_SVC_SMALL_AMPLITUDE,
+ GB_SVC_NO_DE_EMPHASIS,
+ GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+ 2, 1,
+ 0, 0,
+ NULL, NULL);
+
+ return ret;
+}
+
+static int gb_camera_set_power_mode(struct gb_camera *gcam, bool hs)
+{
+ struct gb_interface *intf = gcam->connection->intf;
+ struct gb_svc *svc = gcam->connection->hd->svc;
+ int ret;
+
+ ret = gb_camera_set_intf_power_mode(gcam, intf->interface_id, hs);
+ if (ret < 0) {
+ gcam_err(gcam, "failed to set module interface to %s (%d)\n",
+ hs ? "HS" : "PWM", ret);
+ return ret;
+ }
+
+ ret = gb_camera_set_intf_power_mode(gcam, svc->ap_intf_id, hs);
+ if (ret < 0) {
+ gb_camera_set_intf_power_mode(gcam, intf->interface_id, !hs);
+ gcam_err(gcam, "failed to set AP interface to %s (%d)\n",
+ hs ? "HS" : "PWM", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct ap_csi_config_request {
+ __u8 csi_id;
+ __u8 flags;
+#define GB_CAMERA_CSI_FLAG_CLOCK_CONTINUOUS 0x01
+ __u8 num_lanes;
+ __u8 padding;
+ __le32 csi_clk_freq;
+ __le32 max_pkt_size;
+} __packed;
+
+/*
+ * TODO: Compute the number of lanes dynamically based on bandwidth
+ * requirements.
+ */
+#define GB_CAMERA_CSI_NUM_DATA_LANES 4
+
+#define GB_CAMERA_CSI_CLK_FREQ_MAX 999000000U
+#define GB_CAMERA_CSI_CLK_FREQ_MIN 100000000U
+#define GB_CAMERA_CSI_CLK_FREQ_MARGIN 150000000U
+
+static int gb_camera_setup_data_connection(struct gb_camera *gcam,
+ struct gb_camera_configure_streams_response *resp,
+ struct gb_camera_csi_params *csi_params)
+{
+ struct ap_csi_config_request csi_cfg;
+ struct gb_connection *conn;
+ unsigned int clk_freq;
+ int ret;
+
+ /*
+ * Create the data connection between the camera module data CPort and
+ * APB CDSI1. The CDSI1 CPort ID is hardcoded by the ES2 bridge.
+ */
+ conn = gb_connection_create_offloaded(gcam->bundle, gcam->data_cport_id,
+ GB_CONNECTION_FLAG_NO_FLOWCTRL |
+ GB_CONNECTION_FLAG_CDSI1);
+ if (IS_ERR(conn))
+ return PTR_ERR(conn);
+
+ gcam->data_connection = conn;
+ gb_connection_set_data(conn, gcam);
+
+ ret = gb_connection_enable(conn);
+ if (ret)
+ goto error_conn_destroy;
+
+ /* Set the UniPro link to high speed mode. */
+ ret = gb_camera_set_power_mode(gcam, true);
+ if (ret < 0)
+ goto error_conn_disable;
+
+ /*
+ * Configure the APB-A CSI-2 transmitter.
+ *
+ * Hardcode the number of lanes to 4 and compute the bus clock frequency
+ * based on the module bandwidth requirements with a safety margin.
+ */
+ memset(&csi_cfg, 0, sizeof(csi_cfg));
+ csi_cfg.csi_id = 1;
+ csi_cfg.flags = 0;
+ csi_cfg.num_lanes = GB_CAMERA_CSI_NUM_DATA_LANES;
+
+ clk_freq = resp->data_rate / 2 / GB_CAMERA_CSI_NUM_DATA_LANES;
+ clk_freq = clamp(clk_freq + GB_CAMERA_CSI_CLK_FREQ_MARGIN,
+ GB_CAMERA_CSI_CLK_FREQ_MIN,
+ GB_CAMERA_CSI_CLK_FREQ_MAX);
+ csi_cfg.csi_clk_freq = clk_freq;
+
+ ret = gb_camera_get_max_pkt_size(gcam, resp);
+ if (ret < 0) {
+ ret = -EIO;
+ goto error_power;
+ }
+ csi_cfg.max_pkt_size = ret;
+
+ ret = gb_hd_output(gcam->connection->hd, &csi_cfg,
+ sizeof(csi_cfg),
+ GB_APB_REQUEST_CSI_TX_CONTROL, false);
+ if (ret < 0) {
+ gcam_err(gcam, "failed to start the CSI transmitter\n");
+ goto error_power;
+ }
+
+ if (csi_params) {
+ csi_params->clk_freq = csi_cfg.csi_clk_freq;
+ csi_params->num_lanes = csi_cfg.num_lanes;
+ }
+
+ return 0;
+
+error_power:
+ gb_camera_set_power_mode(gcam, false);
+error_conn_disable:
+ gb_connection_disable(gcam->data_connection);
+error_conn_destroy:
+ gb_connection_destroy(gcam->data_connection);
+ gcam->data_connection = NULL;
+ return ret;
+}
+
+static void gb_camera_teardown_data_connection(struct gb_camera *gcam)
+{
+ struct ap_csi_config_request csi_cfg;
+ int ret;
+
+ /* Stop the APB1 CSI transmitter. */
+ memset(&csi_cfg, 0, sizeof(csi_cfg));
+ csi_cfg.csi_id = 1;
+
+ ret = gb_hd_output(gcam->connection->hd, &csi_cfg,
+ sizeof(csi_cfg),
+ GB_APB_REQUEST_CSI_TX_CONTROL, false);
+
+ if (ret < 0)
+ gcam_err(gcam, "failed to stop the CSI transmitter\n");
+
+ /* Set the UniPro link to low speed mode. */
+ gb_camera_set_power_mode(gcam, false);
+
+ /* Destroy the data connection. */
+ gb_connection_disable(gcam->data_connection);
+ gb_connection_destroy(gcam->data_connection);
+ gcam->data_connection = NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Protocol Operations
+ */
+
+static int gb_camera_capabilities(struct gb_camera *gcam,
+ u8 *capabilities, size_t *size)
+{
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(gcam->bundle);
+ if (ret)
+ return ret;
+
+ mutex_lock(&gcam->mutex);
+
+ if (!gcam->connection) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = gb_camera_operation_sync_flags(gcam->connection,
+ GB_CAMERA_TYPE_CAPABILITIES,
+ GB_OPERATION_FLAG_SHORT_RESPONSE,
+ NULL, 0,
+ (void *)capabilities, size);
+ if (ret)
+ gcam_err(gcam, "failed to retrieve capabilities: %d\n", ret);
+
+done:
+ mutex_unlock(&gcam->mutex);
+
+ gb_pm_runtime_put_autosuspend(gcam->bundle);
+
+ return ret;
+}
+
+static int gb_camera_configure_streams(struct gb_camera *gcam,
+ unsigned int *num_streams,
+ unsigned int *flags,
+ struct gb_camera_stream_config *streams,
+ struct gb_camera_csi_params *csi_params)
+{
+ struct gb_camera_configure_streams_request *req;
+ struct gb_camera_configure_streams_response *resp;
+ unsigned int nstreams = *num_streams;
+ unsigned int i;
+ size_t req_size;
+ size_t resp_size;
+ int ret;
+
+ if (nstreams > GB_CAMERA_MAX_STREAMS)
+ return -EINVAL;
+
+ req_size = sizeof(*req) + nstreams * sizeof(req->config[0]);
+ resp_size = sizeof(*resp) + nstreams * sizeof(resp->config[0]);
+
+ req = kmalloc(req_size, GFP_KERNEL);
+ resp = kmalloc(resp_size, GFP_KERNEL);
+ if (!req || !resp) {
+ kfree(req);
+ kfree(resp);
+ return -ENOMEM;
+ }
+
+ req->num_streams = nstreams;
+ req->flags = *flags;
+ req->padding = 0;
+
+ for (i = 0; i < nstreams; ++i) {
+ struct gb_camera_stream_config_request *cfg = &req->config[i];
+
+ cfg->width = cpu_to_le16(streams[i].width);
+ cfg->height = cpu_to_le16(streams[i].height);
+ cfg->format = cpu_to_le16(streams[i].format);
+ cfg->padding = 0;
+ }
+
+ mutex_lock(&gcam->mutex);
+
+ ret = gb_pm_runtime_get_sync(gcam->bundle);
+ if (ret)
+ goto done_skip_pm_put;
+
+ if (!gcam->connection) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = gb_camera_operation_sync_flags(gcam->connection,
+ GB_CAMERA_TYPE_CONFIGURE_STREAMS,
+ GB_OPERATION_FLAG_SHORT_RESPONSE,
+ req, req_size,
+ resp, &resp_size);
+ if (ret < 0)
+ goto done;
+
+ ret = gb_camera_configure_streams_validate_response(gcam, resp,
+ nstreams);
+ if (ret < 0)
+ goto done;
+
+ *flags = resp->flags;
+ *num_streams = resp->num_streams;
+
+ for (i = 0; i < resp->num_streams; ++i) {
+ struct gb_camera_stream_config_response *cfg = &resp->config[i];
+
+ streams[i].width = le16_to_cpu(cfg->width);
+ streams[i].height = le16_to_cpu(cfg->height);
+ streams[i].format = le16_to_cpu(cfg->format);
+ streams[i].vc = cfg->virtual_channel;
+ streams[i].dt[0] = cfg->data_type[0];
+ streams[i].dt[1] = cfg->data_type[1];
+ streams[i].max_size = le32_to_cpu(cfg->max_size);
+ }
+
+ if ((resp->flags & GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED) ||
+ (req->flags & GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY))
+ goto done;
+
+ if (gcam->state == GB_CAMERA_STATE_CONFIGURED) {
+ gb_camera_teardown_data_connection(gcam);
+ gcam->state = GB_CAMERA_STATE_UNCONFIGURED;
+
+ /*
+ * When unconfiguring streams release the PM runtime reference
+ * that was acquired when streams were configured. The bundle
+ * won't be suspended until the PM runtime reference acquired at
+ * the beginning of this function gets released right before
+ * returning.
+ */
+ gb_pm_runtime_put_noidle(gcam->bundle);
+ }
+
+ if (resp->num_streams == 0)
+ goto done;
+
+ /*
+ * Make sure the bundle won't be suspended until streams get
+ * unconfigured after the stream is configured successfully
+ */
+ gb_pm_runtime_get_noresume(gcam->bundle);
+
+ /* Setup CSI-2 connection from APB-A to AP */
+ ret = gb_camera_setup_data_connection(gcam, resp, csi_params);
+ if (ret < 0) {
+ memset(req, 0, sizeof(*req));
+ gb_operation_sync(gcam->connection,
+ GB_CAMERA_TYPE_CONFIGURE_STREAMS,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ *flags = 0;
+ *num_streams = 0;
+ gb_pm_runtime_put_noidle(gcam->bundle);
+ goto done;
+ }
+
+ gcam->state = GB_CAMERA_STATE_CONFIGURED;
+
+done:
+ gb_pm_runtime_put_autosuspend(gcam->bundle);
+
+done_skip_pm_put:
+ mutex_unlock(&gcam->mutex);
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
+static int gb_camera_capture(struct gb_camera *gcam, u32 request_id,
+ unsigned int streams, unsigned int num_frames,
+ size_t settings_size, const void *settings)
+{
+ struct gb_camera_capture_request *req;
+ size_t req_size;
+ int ret;
+
+ if (settings_size > GB_CAMERA_MAX_SETTINGS_SIZE)
+ return -EINVAL;
+
+ req_size = sizeof(*req) + settings_size;
+ req = kmalloc(req_size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->request_id = cpu_to_le32(request_id);
+ req->streams = streams;
+ req->padding = 0;
+ req->num_frames = cpu_to_le16(num_frames);
+ memcpy(req->settings, settings, settings_size);
+
+ mutex_lock(&gcam->mutex);
+
+ if (!gcam->connection) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = gb_operation_sync(gcam->connection, GB_CAMERA_TYPE_CAPTURE,
+ req, req_size, NULL, 0);
+done:
+ mutex_unlock(&gcam->mutex);
+
+ kfree(req);
+
+ return ret;
+}
+
+static int gb_camera_flush(struct gb_camera *gcam, u32 *request_id)
+{
+ struct gb_camera_flush_response resp;
+ int ret;
+
+ mutex_lock(&gcam->mutex);
+
+ if (!gcam->connection) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = gb_operation_sync(gcam->connection, GB_CAMERA_TYPE_FLUSH, NULL, 0,
+ &resp, sizeof(resp));
+
+ if (ret < 0)
+ goto done;
+
+ if (request_id)
+ *request_id = le32_to_cpu(resp.request_id);
+
+done:
+ mutex_unlock(&gcam->mutex);
+
+ return ret;
+}
+
+static int gb_camera_request_handler(struct gb_operation *op)
+{
+ struct gb_camera *gcam = gb_connection_get_data(op->connection);
+ struct gb_camera_metadata_request *payload;
+ struct gb_message *request;
+
+ if (op->type != GB_CAMERA_TYPE_METADATA) {
+ gcam_err(gcam, "Unsupported unsolicited event: %u\n", op->type);
+ return -EINVAL;
+ }
+
+ request = op->request;
+
+ if (request->payload_size < sizeof(*payload)) {
+ gcam_err(gcam, "Wrong event size received (%zu < %zu)\n",
+ request->payload_size, sizeof(*payload));
+ return -EINVAL;
+ }
+
+ payload = request->payload;
+
+ gcam_dbg(gcam, "received metadata for request %u, frame %u, stream %u\n",
+ payload->request_id, payload->frame_number, payload->stream);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Interface with HOST gmp camera.
+ */
+static unsigned int gb_camera_mbus_to_gb(enum v4l2_mbus_pixelcode mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
+ if (gb_fmt_info[i].mbus_code == mbus_code)
+ return gb_fmt_info[i].gb_format;
+ }
+ return gb_fmt_info[0].gb_format;
+}
+
+static enum v4l2_mbus_pixelcode gb_camera_gb_to_mbus(u16 gb_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
+ if (gb_fmt_info[i].gb_format == gb_fmt)
+ return gb_fmt_info[i].mbus_code;
+ }
+ return gb_fmt_info[0].mbus_code;
+}
+
+static ssize_t gb_camera_op_capabilities(void *priv, char *data, size_t len)
+{
+ struct gb_camera *gcam = priv;
+ size_t capabilities_len = len;
+ int ret;
+
+ ret = gb_camera_capabilities(gcam, data, &capabilities_len);
+ if (ret)
+ return ret;
+
+ return capabilities_len;
+}
+
+static int gb_camera_op_configure_streams(void *priv, unsigned int *nstreams,
+ unsigned int *flags, struct gb_camera_stream *streams,
+ struct gb_camera_csi_params *csi_params)
+{
+ struct gb_camera *gcam = priv;
+ struct gb_camera_stream_config *gb_streams;
+ unsigned int gb_flags = 0;
+ unsigned int gb_nstreams = *nstreams;
+ unsigned int i;
+ int ret;
+
+ if (gb_nstreams > GB_CAMERA_MAX_STREAMS)
+ return -EINVAL;
+
+ gb_streams = kzalloc(gb_nstreams * sizeof(*gb_streams), GFP_KERNEL);
+ if (!gb_streams)
+ return -ENOMEM;
+
+ for (i = 0; i < gb_nstreams; i++) {
+ gb_streams[i].width = streams[i].width;
+ gb_streams[i].height = streams[i].height;
+ gb_streams[i].format =
+ gb_camera_mbus_to_gb(streams[i].pixel_code);
+ }
+
+ if (*flags & GB_CAMERA_IN_FLAG_TEST)
+ gb_flags |= GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY;
+
+ ret = gb_camera_configure_streams(gcam, &gb_nstreams,
+ &gb_flags, gb_streams, csi_params);
+ if (ret < 0)
+ goto done;
+ if (gb_nstreams > *nstreams) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ *flags = 0;
+ if (gb_flags & GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED)
+ *flags |= GB_CAMERA_OUT_FLAG_ADJUSTED;
+
+ for (i = 0; i < gb_nstreams; i++) {
+ streams[i].width = gb_streams[i].width;
+ streams[i].height = gb_streams[i].height;
+ streams[i].vc = gb_streams[i].vc;
+ streams[i].dt[0] = gb_streams[i].dt[0];
+ streams[i].dt[1] = gb_streams[i].dt[1];
+ streams[i].max_size = gb_streams[i].max_size;
+ streams[i].pixel_code =
+ gb_camera_gb_to_mbus(gb_streams[i].format);
+ }
+ *nstreams = gb_nstreams;
+
+done:
+ kfree(gb_streams);
+ return ret;
+}
+
+static int gb_camera_op_capture(void *priv, u32 request_id,
+ unsigned int streams, unsigned int num_frames,
+ size_t settings_size, const void *settings)
+{
+ struct gb_camera *gcam = priv;
+
+ return gb_camera_capture(gcam, request_id, streams, num_frames,
+ settings_size, settings);
+}
+
+static int gb_camera_op_flush(void *priv, u32 *request_id)
+{
+ struct gb_camera *gcam = priv;
+
+ return gb_camera_flush(gcam, request_id);
+}
+
+static const struct gb_camera_ops gb_cam_ops = {
+ .capabilities = gb_camera_op_capabilities,
+ .configure_streams = gb_camera_op_configure_streams,
+ .capture = gb_camera_op_capture,
+ .flush = gb_camera_op_flush,
+};
+
+/* -----------------------------------------------------------------------------
+ * DebugFS
+ */
+
+static ssize_t gb_camera_debugfs_capabilities(struct gb_camera *gcam,
+ char *buf, size_t len)
+{
+ struct gb_camera_debugfs_buffer *buffer =
+ &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES];
+ size_t size = 1024;
+ unsigned int i;
+ u8 *caps;
+ int ret;
+
+ caps = kmalloc(size, GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ ret = gb_camera_capabilities(gcam, caps, &size);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * hex_dump_to_buffer() doesn't return the number of bytes dumped prior
+ * to v4.0, we need our own implementation :-(
+ */
+ buffer->length = 0;
+
+ for (i = 0; i < size; i += 16) {
+ unsigned int nbytes = min_t(unsigned int, size - i, 16);
+
+ buffer->length += sprintf(buffer->data + buffer->length,
+ "%*ph\n", nbytes, caps + i);
+ }
+
+done:
+ kfree(caps);
+ return ret;
+}
+
+static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
+ char *buf, size_t len)
+{
+ struct gb_camera_debugfs_buffer *buffer =
+ &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_STREAMS];
+ struct gb_camera_stream_config *streams;
+ unsigned int nstreams;
+ unsigned int flags;
+ unsigned int i;
+ char *token;
+ int ret;
+
+ /* Retrieve number of streams to configure */
+ token = strsep(&buf, ";");
+ if (token == NULL)
+ return -EINVAL;
+
+ ret = kstrtouint(token, 10, &nstreams);
+ if (ret < 0)
+ return ret;
+
+ if (nstreams > GB_CAMERA_MAX_STREAMS)
+ return -EINVAL;
+
+ token = strsep(&buf, ";");
+ if (token == NULL)
+ return -EINVAL;
+
+ ret = kstrtouint(token, 10, &flags);
+ if (ret < 0)
+ return ret;
+
+ /* For each stream to configure parse width, height and format */
+ streams = kzalloc(nstreams * sizeof(*streams), GFP_KERNEL);
+ if (!streams)
+ return -ENOMEM;
+
+ for (i = 0; i < nstreams; ++i) {
+ struct gb_camera_stream_config *stream = &streams[i];
+
+ /* width */
+ token = strsep(&buf, ";");
+ if (token == NULL) {
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = kstrtouint(token, 10, &stream->width);
+ if (ret < 0)
+ goto done;
+
+ /* height */
+ token = strsep(&buf, ";");
+ if (token == NULL)
+ goto done;
+
+ ret = kstrtouint(token, 10, &stream->height);
+ if (ret < 0)
+ goto done;
+
+ /* Image format code */
+ token = strsep(&buf, ";");
+ if (token == NULL)
+ goto done;
+
+ ret = kstrtouint(token, 16, &stream->format);
+ if (ret < 0)
+ goto done;
+ }
+
+ ret = gb_camera_configure_streams(gcam, &nstreams, &flags, streams,
+ NULL);
+ if (ret < 0)
+ goto done;
+
+ buffer->length = sprintf(buffer->data, "%u;%u;", nstreams, flags);
+
+ for (i = 0; i < nstreams; ++i) {
+ struct gb_camera_stream_config *stream = &streams[i];
+
+ buffer->length += sprintf(buffer->data + buffer->length,
+ "%u;%u;%u;%u;%u;%u;%u;",
+ stream->width, stream->height,
+ stream->format, stream->vc,
+ stream->dt[0], stream->dt[1],
+ stream->max_size);
+ }
+
+ ret = len;
+
+done:
+ kfree(streams);
+ return ret;
+};
+
+static ssize_t gb_camera_debugfs_capture(struct gb_camera *gcam,
+ char *buf, size_t len)
+{
+ unsigned int request_id;
+ unsigned int streams_mask;
+ unsigned int num_frames;
+ char *token;
+ int ret;
+
+ /* Request id */
+ token = strsep(&buf, ";");
+ if (token == NULL)
+ return -EINVAL;
+ ret = kstrtouint(token, 10, &request_id);
+ if (ret < 0)
+ return ret;
+
+ /* Stream mask */
+ token = strsep(&buf, ";");
+ if (token == NULL)
+ return -EINVAL;
+ ret = kstrtouint(token, 16, &streams_mask);
+ if (ret < 0)
+ return ret;
+
+ /* number of frames */
+ token = strsep(&buf, ";");
+ if (token == NULL)
+ return -EINVAL;
+ ret = kstrtouint(token, 10, &num_frames);
+ if (ret < 0)
+ return ret;
+
+ ret = gb_camera_capture(gcam, request_id, streams_mask, num_frames, 0,
+ NULL);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static ssize_t gb_camera_debugfs_flush(struct gb_camera *gcam,
+ char *buf, size_t len)
+{
+ struct gb_camera_debugfs_buffer *buffer =
+ &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_FLUSH];
+ unsigned int req_id;
+ int ret;
+
+ ret = gb_camera_flush(gcam, &req_id);
+ if (ret < 0)
+ return ret;
+
+ buffer->length = sprintf(buffer->data, "%u", req_id);
+
+ return len;
+}
+
+struct gb_camera_debugfs_entry {
+ const char *name;
+ unsigned int mask;
+ unsigned int buffer;
+ ssize_t (*execute)(struct gb_camera *gcam, char *buf, size_t len);
+};
+
+static const struct gb_camera_debugfs_entry gb_camera_debugfs_entries[] = {
+ {
+ .name = "capabilities",
+ .mask = S_IFREG | S_IRUGO,
+ .buffer = GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES,
+ .execute = gb_camera_debugfs_capabilities,
+ }, {
+ .name = "configure_streams",
+ .mask = S_IFREG | S_IRUGO | S_IWUGO,
+ .buffer = GB_CAMERA_DEBUGFS_BUFFER_STREAMS,
+ .execute = gb_camera_debugfs_configure_streams,
+ }, {
+ .name = "capture",
+ .mask = S_IFREG | S_IRUGO | S_IWUGO,
+ .buffer = GB_CAMERA_DEBUGFS_BUFFER_CAPTURE,
+ .execute = gb_camera_debugfs_capture,
+ }, {
+ .name = "flush",
+ .mask = S_IFREG | S_IRUGO | S_IWUGO,
+ .buffer = GB_CAMERA_DEBUGFS_BUFFER_FLUSH,
+ .execute = gb_camera_debugfs_flush,
+ },
+};
+
+static ssize_t gb_camera_debugfs_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ const struct gb_camera_debugfs_entry *op = file->private_data;
+ struct gb_camera *gcam = file->f_inode->i_private;
+ struct gb_camera_debugfs_buffer *buffer;
+ ssize_t ret;
+
+ /* For read-only entries the operation is triggered by a read. */
+ if (!(op->mask & S_IWUGO)) {
+ ret = op->execute(gcam, NULL, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ buffer = &gcam->debugfs.buffers[op->buffer];
+
+ return simple_read_from_buffer(buf, len, offset, buffer->data,
+ buffer->length);
+}
+
+static ssize_t gb_camera_debugfs_write(struct file *file,
+ const char __user *buf, size_t len,
+ loff_t *offset)
+{
+ const struct gb_camera_debugfs_entry *op = file->private_data;
+ struct gb_camera *gcam = file->f_inode->i_private;
+ ssize_t ret;
+ char *kbuf;
+
+ if (len > 1024)
+ return -EINVAL;
+
+ kbuf = kmalloc(len + 1, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(kbuf, buf, len)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ kbuf[len] = '\0';
+
+ ret = op->execute(gcam, kbuf, len);
+
+done:
+ kfree(kbuf);
+ return ret;
+}
+
+static int gb_camera_debugfs_open(struct inode *inode, struct file *file)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gb_camera_debugfs_entries); ++i) {
+ const struct gb_camera_debugfs_entry *entry =
+ &gb_camera_debugfs_entries[i];
+
+ if (!strcmp(file->f_path.dentry->d_iname, entry->name)) {
+ file->private_data = (void *)entry;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static const struct file_operations gb_camera_debugfs_ops = {
+ .open = gb_camera_debugfs_open,
+ .read = gb_camera_debugfs_read,
+ .write = gb_camera_debugfs_write,
+};
+
+static int gb_camera_debugfs_init(struct gb_camera *gcam)
+{
+ struct gb_connection *connection = gcam->connection;
+ char dirname[27];
+ unsigned int i;
+
+ /*
+ * Create root debugfs entry and a file entry for each camera operation.
+ */
+ snprintf(dirname, 27, "camera-%u.%u", connection->intf->interface_id,
+ gcam->bundle->id);
+
+ gcam->debugfs.root = debugfs_create_dir(dirname, gb_debugfs_get());
+ if (IS_ERR(gcam->debugfs.root)) {
+ gcam_err(gcam, "debugfs root create failed (%ld)\n",
+ PTR_ERR(gcam->debugfs.root));
+ return PTR_ERR(gcam->debugfs.root);
+ }
+
+ gcam->debugfs.buffers = vmalloc(sizeof(*gcam->debugfs.buffers) *
+ GB_CAMERA_DEBUGFS_BUFFER_MAX);
+ if (!gcam->debugfs.buffers)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(gb_camera_debugfs_entries); ++i) {
+ const struct gb_camera_debugfs_entry *entry =
+ &gb_camera_debugfs_entries[i];
+ struct dentry *dentry;
+
+ gcam->debugfs.buffers[i].length = 0;
+
+ dentry = debugfs_create_file(entry->name, entry->mask,
+ gcam->debugfs.root, gcam,
+ &gb_camera_debugfs_ops);
+ if (IS_ERR(dentry)) {
+ gcam_err(gcam,
+ "debugfs operation %s create failed (%ld)\n",
+ entry->name, PTR_ERR(dentry));
+ return PTR_ERR(dentry);
+ }
+ }
+
+ return 0;
+}
+
+static void gb_camera_debugfs_cleanup(struct gb_camera *gcam)
+{
+ debugfs_remove_recursive(gcam->debugfs.root);
+
+ vfree(gcam->debugfs.buffers);
+}
+
+/* -----------------------------------------------------------------------------
+ * Init & Cleanup
+ */
+
+static void gb_camera_cleanup(struct gb_camera *gcam)
+{
+ gb_camera_debugfs_cleanup(gcam);
+
+ mutex_lock(&gcam->mutex);
+ if (gcam->data_connection) {
+ gb_connection_disable(gcam->data_connection);
+ gb_connection_destroy(gcam->data_connection);
+ gcam->data_connection = NULL;
+ }
+
+ if (gcam->connection) {
+ gb_connection_disable(gcam->connection);
+ gb_connection_destroy(gcam->connection);
+ gcam->connection = NULL;
+ }
+ mutex_unlock(&gcam->mutex);
+}
+
+static void gb_camera_release_module(struct kref *ref)
+{
+ struct gb_camera_module *cam_mod =
+ container_of(ref, struct gb_camera_module, refcount);
+ kfree(cam_mod->priv);
+}
+
+static int gb_camera_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct gb_connection *conn;
+ struct gb_camera *gcam;
+ u16 mgmt_cport_id = 0;
+ u16 data_cport_id = 0;
+ unsigned int i;
+ int ret;
+
+ /*
+ * The camera bundle must contain exactly two CPorts, one for the
+ * camera management protocol and one for the camera data protocol.
+ */
+ if (bundle->num_cports != 2)
+ return -ENODEV;
+
+ for (i = 0; i < bundle->num_cports; ++i) {
+ struct greybus_descriptor_cport *desc = &bundle->cport_desc[i];
+
+ switch (desc->protocol_id) {
+ case GREYBUS_PROTOCOL_CAMERA_MGMT:
+ mgmt_cport_id = le16_to_cpu(desc->id);
+ break;
+ case GREYBUS_PROTOCOL_CAMERA_DATA:
+ data_cport_id = le16_to_cpu(desc->id);
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
+
+ if (!mgmt_cport_id || !data_cport_id)
+ return -ENODEV;
+
+ gcam = kzalloc(sizeof(*gcam), GFP_KERNEL);
+ if (!gcam)
+ return -ENOMEM;
+
+ mutex_init(&gcam->mutex);
+
+ gcam->bundle = bundle;
+ gcam->state = GB_CAMERA_STATE_UNCONFIGURED;
+ gcam->data_cport_id = data_cport_id;
+
+ conn = gb_connection_create(bundle, mgmt_cport_id,
+ gb_camera_request_handler);
+ if (IS_ERR(conn)) {
+ ret = PTR_ERR(conn);
+ goto error;
+ }
+
+ gcam->connection = conn;
+ gb_connection_set_data(conn, gcam);
+
+ ret = gb_connection_enable(conn);
+ if (ret)
+ goto error;
+
+ ret = gb_camera_debugfs_init(gcam);
+ if (ret < 0)
+ goto error;
+
+ gcam->module.priv = gcam;
+ gcam->module.ops = &gb_cam_ops;
+ gcam->module.interface_id = gcam->connection->intf->interface_id;
+ gcam->module.release = gb_camera_release_module;
+ ret = gb_camera_register(&gcam->module);
+ if (ret < 0)
+ goto error;
+
+ greybus_set_drvdata(bundle, gcam);
+
+ gb_pm_runtime_put_autosuspend(gcam->bundle);
+
+ return 0;
+
+error:
+ gb_camera_cleanup(gcam);
+ kfree(gcam);
+ return ret;
+}
+
+static void gb_camera_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_camera *gcam = greybus_get_drvdata(bundle);
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ gb_pm_runtime_get_noresume(bundle);
+
+ gb_camera_cleanup(gcam);
+ gb_camera_unregister(&gcam->module);
+}
+
+static const struct greybus_bundle_id gb_camera_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_CAMERA) },
+ { },
+};
+
+#ifdef CONFIG_PM
+static int gb_camera_suspend(struct device *dev)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ struct gb_camera *gcam = greybus_get_drvdata(bundle);
+
+ if (gcam->data_connection)
+ gb_connection_disable(gcam->data_connection);
+
+ gb_connection_disable(gcam->connection);
+
+ return 0;
+}
+
+static int gb_camera_resume(struct device *dev)
+{
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ struct gb_camera *gcam = greybus_get_drvdata(bundle);
+ int ret;
+
+ ret = gb_connection_enable(gcam->connection);
+ if (ret) {
+ gcam_err(gcam, "failed to enable connection: %d\n", ret);
+ return ret;
+ }
+
+ if (gcam->data_connection) {
+ ret = gb_connection_enable(gcam->data_connection);
+ if (ret) {
+ gcam_err(gcam,
+ "failed to enable data connection: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_camera_pm_ops = {
+ SET_RUNTIME_PM_OPS(gb_camera_suspend, gb_camera_resume, NULL)
+};
+
+static struct greybus_driver gb_camera_driver = {
+ .name = "camera",
+ .probe = gb_camera_probe,
+ .disconnect = gb_camera_disconnect,
+ .id_table = gb_camera_id_table,
+ .driver.pm = &gb_camera_pm_ops,
+};
+
+module_greybus_driver(gb_camera_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c
new file mode 100644
index 000000000000..557075147f2d
--- /dev/null
+++ b/drivers/staging/greybus/connection.c
@@ -0,0 +1,938 @@
+/*
+ * Greybus connections
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/workqueue.h>
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+
+#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
+
+
+static void gb_connection_kref_release(struct kref *kref);
+
+
+static DEFINE_SPINLOCK(gb_connections_lock);
+static DEFINE_MUTEX(gb_connection_mutex);
+
+
+/* Caller holds gb_connection_mutex. */
+static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
+{
+ struct gb_host_device *hd = intf->hd;
+ struct gb_connection *connection;
+
+ list_for_each_entry(connection, &hd->connections, hd_links) {
+ if (connection->intf == intf &&
+ connection->intf_cport_id == cport_id)
+ return true;
+ }
+
+ return false;
+}
+
+static void gb_connection_get(struct gb_connection *connection)
+{
+ kref_get(&connection->kref);
+
+ trace_gb_connection_get(connection);
+}
+
+static void gb_connection_put(struct gb_connection *connection)
+{
+ trace_gb_connection_put(connection);
+
+ kref_put(&connection->kref, gb_connection_kref_release);
+}
+
+/*
+ * Returns a reference-counted pointer to the connection if found.
+ */
+static struct gb_connection *
+gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
+{
+ struct gb_connection *connection;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gb_connections_lock, flags);
+ list_for_each_entry(connection, &hd->connections, hd_links)
+ if (connection->hd_cport_id == cport_id) {
+ gb_connection_get(connection);
+ goto found;
+ }
+ connection = NULL;
+found:
+ spin_unlock_irqrestore(&gb_connections_lock, flags);
+
+ return connection;
+}
+
+/*
+ * Callback from the host driver to let us know that data has been
+ * received on the bundle.
+ */
+void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
+ u8 *data, size_t length)
+{
+ struct gb_connection *connection;
+
+ trace_gb_hd_in(hd);
+
+ connection = gb_connection_hd_find(hd, cport_id);
+ if (!connection) {
+ dev_err(&hd->dev,
+ "nonexistent connection (%zu bytes dropped)\n", length);
+ return;
+ }
+ gb_connection_recv(connection, data, length);
+ gb_connection_put(connection);
+}
+EXPORT_SYMBOL_GPL(greybus_data_rcvd);
+
+static void gb_connection_kref_release(struct kref *kref)
+{
+ struct gb_connection *connection;
+
+ connection = container_of(kref, struct gb_connection, kref);
+
+ trace_gb_connection_release(connection);
+
+ kfree(connection);
+}
+
+static void gb_connection_init_name(struct gb_connection *connection)
+{
+ u16 hd_cport_id = connection->hd_cport_id;
+ u16 cport_id = 0;
+ u8 intf_id = 0;
+
+ if (connection->intf) {
+ intf_id = connection->intf->interface_id;
+ cport_id = connection->intf_cport_id;
+ }
+
+ snprintf(connection->name, sizeof(connection->name),
+ "%u/%u:%u", hd_cport_id, intf_id, cport_id);
+}
+
+/*
+ * _gb_connection_create() - create a Greybus connection
+ * @hd: host device of the connection
+ * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
+ * @intf: remote interface, or NULL for static connections
+ * @bundle: remote-interface bundle (may be NULL)
+ * @cport_id: remote-interface cport id, or 0 for static connections
+ * @handler: request handler (may be NULL)
+ * @flags: connection flags
+ *
+ * Create a Greybus connection, representing the bidirectional link
+ * between a CPort on a (local) Greybus host device and a CPort on
+ * another Greybus interface.
+ *
+ * A connection also maintains the state of operations sent over the
+ * connection.
+ *
+ * Serialised against concurrent create and destroy using the
+ * gb_connection_mutex.
+ *
+ * Return: A pointer to the new connection if successful, or an ERR_PTR
+ * otherwise.
+ */
+static struct gb_connection *
+_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
+ struct gb_interface *intf,
+ struct gb_bundle *bundle, int cport_id,
+ gb_request_handler_t handler,
+ unsigned long flags)
+{
+ struct gb_connection *connection;
+ int ret;
+
+ mutex_lock(&gb_connection_mutex);
+
+ if (intf && gb_connection_cport_in_use(intf, cport_id)) {
+ dev_err(&intf->dev, "cport %u already in use\n", cport_id);
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
+ if (ret < 0) {
+ dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
+ goto err_unlock;
+ }
+ hd_cport_id = ret;
+
+ connection = kzalloc(sizeof(*connection), GFP_KERNEL);
+ if (!connection) {
+ ret = -ENOMEM;
+ goto err_hd_cport_release;
+ }
+
+ connection->hd_cport_id = hd_cport_id;
+ connection->intf_cport_id = cport_id;
+ connection->hd = hd;
+ connection->intf = intf;
+ connection->bundle = bundle;
+ connection->handler = handler;
+ connection->flags = flags;
+ if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
+ connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
+ connection->state = GB_CONNECTION_STATE_DISABLED;
+
+ atomic_set(&connection->op_cycle, 0);
+ mutex_init(&connection->mutex);
+ spin_lock_init(&connection->lock);
+ INIT_LIST_HEAD(&connection->operations);
+
+ connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
+ dev_name(&hd->dev), hd_cport_id);
+ if (!connection->wq) {
+ ret = -ENOMEM;
+ goto err_free_connection;
+ }
+
+ kref_init(&connection->kref);
+
+ gb_connection_init_name(connection);
+
+ spin_lock_irq(&gb_connections_lock);
+ list_add(&connection->hd_links, &hd->connections);
+
+ if (bundle)
+ list_add(&connection->bundle_links, &bundle->connections);
+ else
+ INIT_LIST_HEAD(&connection->bundle_links);
+
+ spin_unlock_irq(&gb_connections_lock);
+
+ mutex_unlock(&gb_connection_mutex);
+
+ trace_gb_connection_create(connection);
+
+ return connection;
+
+err_free_connection:
+ kfree(connection);
+err_hd_cport_release:
+ gb_hd_cport_release(hd, hd_cport_id);
+err_unlock:
+ mutex_unlock(&gb_connection_mutex);
+
+ return ERR_PTR(ret);
+}
+
+struct gb_connection *
+gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
+ gb_request_handler_t handler)
+{
+ return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
+ GB_CONNECTION_FLAG_HIGH_PRIO);
+}
+
+struct gb_connection *
+gb_connection_create_control(struct gb_interface *intf)
+{
+ return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
+ GB_CONNECTION_FLAG_CONTROL |
+ GB_CONNECTION_FLAG_HIGH_PRIO);
+}
+
+struct gb_connection *
+gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
+ gb_request_handler_t handler)
+{
+ struct gb_interface *intf = bundle->intf;
+
+ return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
+ handler, 0);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create);
+
+struct gb_connection *
+gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
+ gb_request_handler_t handler,
+ unsigned long flags)
+{
+ struct gb_interface *intf = bundle->intf;
+
+ if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
+ flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
+
+ return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
+ handler, flags);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create_flags);
+
+struct gb_connection *
+gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
+ unsigned long flags)
+{
+ flags |= GB_CONNECTION_FLAG_OFFLOADED;
+
+ return gb_connection_create_flags(bundle, cport_id, NULL, flags);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
+
+static int gb_connection_hd_cport_enable(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+ int ret;
+
+ if (!hd->driver->cport_enable)
+ return 0;
+
+ ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
+ connection->flags);
+ if (ret) {
+ dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
+ connection->name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gb_connection_hd_cport_disable(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+ int ret;
+
+ if (!hd->driver->cport_disable)
+ return;
+
+ ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
+ if (ret) {
+ dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
+ connection->name, ret);
+ }
+}
+
+static int gb_connection_hd_cport_connected(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+ int ret;
+
+ if (!hd->driver->cport_connected)
+ return 0;
+
+ ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
+ if (ret) {
+ dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
+ connection->name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_connection_hd_cport_flush(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+ int ret;
+
+ if (!hd->driver->cport_flush)
+ return 0;
+
+ ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
+ if (ret) {
+ dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
+ connection->name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+ size_t peer_space;
+ int ret;
+
+ peer_space = sizeof(struct gb_operation_msg_hdr) +
+ sizeof(struct gb_cport_shutdown_request);
+
+ if (connection->mode_switch)
+ peer_space += sizeof(struct gb_operation_msg_hdr);
+
+ ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
+ peer_space,
+ GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
+ if (ret) {
+ dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
+ connection->name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_connection_hd_cport_clear(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+ int ret;
+
+ ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
+ if (ret) {
+ dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
+ connection->name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Request the SVC to create a connection from AP's cport to interface's
+ * cport.
+ */
+static int
+gb_connection_svc_connection_create(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+ struct gb_interface *intf;
+ u8 cport_flags;
+ int ret;
+
+ if (gb_connection_is_static(connection))
+ return 0;
+
+ intf = connection->intf;
+
+ /*
+ * Enable either E2EFC or CSD, unless no flow control is requested.
+ */
+ cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
+ if (gb_connection_flow_control_disabled(connection)) {
+ cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
+ } else if (gb_connection_e2efc_enabled(connection)) {
+ cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
+ GB_SVC_CPORT_FLAG_E2EFC;
+ }
+
+ ret = gb_svc_connection_create(hd->svc,
+ hd->svc->ap_intf_id,
+ connection->hd_cport_id,
+ intf->interface_id,
+ connection->intf_cport_id,
+ cport_flags);
+ if (ret) {
+ dev_err(&connection->hd->dev,
+ "%s: failed to create svc connection: %d\n",
+ connection->name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+gb_connection_svc_connection_destroy(struct gb_connection *connection)
+{
+ if (gb_connection_is_static(connection))
+ return;
+
+ gb_svc_connection_destroy(connection->hd->svc,
+ connection->hd->svc->ap_intf_id,
+ connection->hd_cport_id,
+ connection->intf->interface_id,
+ connection->intf_cport_id);
+}
+
+/* Inform Interface about active CPorts */
+static int gb_connection_control_connected(struct gb_connection *connection)
+{
+ struct gb_control *control;
+ u16 cport_id = connection->intf_cport_id;
+ int ret;
+
+ if (gb_connection_is_static(connection))
+ return 0;
+
+ if (gb_connection_is_control(connection))
+ return 0;
+
+ control = connection->intf->control;
+
+ ret = gb_control_connected_operation(control, cport_id);
+ if (ret) {
+ dev_err(&connection->bundle->dev,
+ "failed to connect cport: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+gb_connection_control_disconnecting(struct gb_connection *connection)
+{
+ struct gb_control *control;
+ u16 cport_id = connection->intf_cport_id;
+ int ret;
+
+ if (gb_connection_is_static(connection))
+ return;
+
+ control = connection->intf->control;
+
+ ret = gb_control_disconnecting_operation(control, cport_id);
+ if (ret) {
+ dev_err(&connection->hd->dev,
+ "%s: failed to send disconnecting: %d\n",
+ connection->name, ret);
+ }
+}
+
+static void
+gb_connection_control_disconnected(struct gb_connection *connection)
+{
+ struct gb_control *control;
+ u16 cport_id = connection->intf_cport_id;
+ int ret;
+
+ if (gb_connection_is_static(connection))
+ return;
+
+ control = connection->intf->control;
+
+ if (gb_connection_is_control(connection)) {
+ if (connection->mode_switch) {
+ ret = gb_control_mode_switch_operation(control);
+ if (ret) {
+ /*
+ * Allow mode switch to time out waiting for
+ * mailbox event.
+ */
+ return;
+ }
+ }
+
+ return;
+ }
+
+ ret = gb_control_disconnected_operation(control, cport_id);
+ if (ret) {
+ dev_warn(&connection->bundle->dev,
+ "failed to disconnect cport: %d\n", ret);
+ }
+}
+
+static int gb_connection_shutdown_operation(struct gb_connection *connection,
+ u8 phase)
+{
+ struct gb_cport_shutdown_request *req;
+ struct gb_operation *operation;
+ int ret;
+
+ operation = gb_operation_create_core(connection,
+ GB_REQUEST_TYPE_CPORT_SHUTDOWN,
+ sizeof(*req), 0, 0,
+ GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ req = operation->request->payload;
+ req->phase = phase;
+
+ ret = gb_operation_request_send_sync(operation);
+
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static int gb_connection_cport_shutdown(struct gb_connection *connection,
+ u8 phase)
+{
+ struct gb_host_device *hd = connection->hd;
+ const struct gb_hd_driver *drv = hd->driver;
+ int ret;
+
+ if (gb_connection_is_static(connection))
+ return 0;
+
+ if (gb_connection_is_offloaded(connection)) {
+ if (!drv->cport_shutdown)
+ return 0;
+
+ ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
+ GB_OPERATION_TIMEOUT_DEFAULT);
+ } else {
+ ret = gb_connection_shutdown_operation(connection, phase);
+ }
+
+ if (ret) {
+ dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
+ connection->name, phase, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
+{
+ return gb_connection_cport_shutdown(connection, 1);
+}
+
+static int
+gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
+{
+ return gb_connection_cport_shutdown(connection, 2);
+}
+
+/*
+ * Cancel all active operations on a connection.
+ *
+ * Locking: Called with connection lock held and state set to DISABLED or
+ * DISCONNECTING.
+ */
+static void gb_connection_cancel_operations(struct gb_connection *connection,
+ int errno)
+ __must_hold(&connection->lock)
+{
+ struct gb_operation *operation;
+
+ while (!list_empty(&connection->operations)) {
+ operation = list_last_entry(&connection->operations,
+ struct gb_operation, links);
+ gb_operation_get(operation);
+ spin_unlock_irq(&connection->lock);
+
+ if (gb_operation_is_incoming(operation))
+ gb_operation_cancel_incoming(operation, errno);
+ else
+ gb_operation_cancel(operation, errno);
+
+ gb_operation_put(operation);
+
+ spin_lock_irq(&connection->lock);
+ }
+}
+
+/*
+ * Cancel all active incoming operations on a connection.
+ *
+ * Locking: Called with connection lock held and state set to ENABLED_TX.
+ */
+static void
+gb_connection_flush_incoming_operations(struct gb_connection *connection,
+ int errno)
+ __must_hold(&connection->lock)
+{
+ struct gb_operation *operation;
+ bool incoming;
+
+ while (!list_empty(&connection->operations)) {
+ incoming = false;
+ list_for_each_entry(operation, &connection->operations,
+ links) {
+ if (gb_operation_is_incoming(operation)) {
+ gb_operation_get(operation);
+ incoming = true;
+ break;
+ }
+ }
+
+ if (!incoming)
+ break;
+
+ spin_unlock_irq(&connection->lock);
+
+ /* FIXME: flush, not cancel? */
+ gb_operation_cancel_incoming(operation, errno);
+ gb_operation_put(operation);
+
+ spin_lock_irq(&connection->lock);
+ }
+}
+
+/*
+ * _gb_connection_enable() - enable a connection
+ * @connection: connection to enable
+ * @rx: whether to enable incoming requests
+ *
+ * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
+ * ENABLED_TX->ENABLED state transitions.
+ *
+ * Locking: Caller holds connection->mutex.
+ */
+static int _gb_connection_enable(struct gb_connection *connection, bool rx)
+{
+ int ret;
+
+ /* Handle ENABLED_TX -> ENABLED transitions. */
+ if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
+ if (!(connection->handler && rx))
+ return 0;
+
+ spin_lock_irq(&connection->lock);
+ connection->state = GB_CONNECTION_STATE_ENABLED;
+ spin_unlock_irq(&connection->lock);
+
+ return 0;
+ }
+
+ ret = gb_connection_hd_cport_enable(connection);
+ if (ret)
+ return ret;
+
+ ret = gb_connection_svc_connection_create(connection);
+ if (ret)
+ goto err_hd_cport_clear;
+
+ ret = gb_connection_hd_cport_connected(connection);
+ if (ret)
+ goto err_svc_connection_destroy;
+
+ spin_lock_irq(&connection->lock);
+ if (connection->handler && rx)
+ connection->state = GB_CONNECTION_STATE_ENABLED;
+ else
+ connection->state = GB_CONNECTION_STATE_ENABLED_TX;
+ spin_unlock_irq(&connection->lock);
+
+ ret = gb_connection_control_connected(connection);
+ if (ret)
+ goto err_control_disconnecting;
+
+ return 0;
+
+err_control_disconnecting:
+ spin_lock_irq(&connection->lock);
+ connection->state = GB_CONNECTION_STATE_DISCONNECTING;
+ gb_connection_cancel_operations(connection, -ESHUTDOWN);
+ spin_unlock_irq(&connection->lock);
+
+ /* Transmit queue should already be empty. */
+ gb_connection_hd_cport_flush(connection);
+
+ gb_connection_control_disconnecting(connection);
+ gb_connection_cport_shutdown_phase_1(connection);
+ gb_connection_hd_cport_quiesce(connection);
+ gb_connection_cport_shutdown_phase_2(connection);
+ gb_connection_control_disconnected(connection);
+ connection->state = GB_CONNECTION_STATE_DISABLED;
+err_svc_connection_destroy:
+ gb_connection_svc_connection_destroy(connection);
+err_hd_cport_clear:
+ gb_connection_hd_cport_clear(connection);
+
+ gb_connection_hd_cport_disable(connection);
+
+ return ret;
+}
+
+int gb_connection_enable(struct gb_connection *connection)
+{
+ int ret = 0;
+
+ mutex_lock(&connection->mutex);
+
+ if (connection->state == GB_CONNECTION_STATE_ENABLED)
+ goto out_unlock;
+
+ ret = _gb_connection_enable(connection, true);
+ if (!ret)
+ trace_gb_connection_enable(connection);
+
+out_unlock:
+ mutex_unlock(&connection->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_connection_enable);
+
+int gb_connection_enable_tx(struct gb_connection *connection)
+{
+ int ret = 0;
+
+ mutex_lock(&connection->mutex);
+
+ if (connection->state == GB_CONNECTION_STATE_ENABLED) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
+ goto out_unlock;
+
+ ret = _gb_connection_enable(connection, false);
+ if (!ret)
+ trace_gb_connection_enable(connection);
+
+out_unlock:
+ mutex_unlock(&connection->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
+
+void gb_connection_disable_rx(struct gb_connection *connection)
+{
+ mutex_lock(&connection->mutex);
+
+ spin_lock_irq(&connection->lock);
+ if (connection->state != GB_CONNECTION_STATE_ENABLED) {
+ spin_unlock_irq(&connection->lock);
+ goto out_unlock;
+ }
+ connection->state = GB_CONNECTION_STATE_ENABLED_TX;
+ gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
+ spin_unlock_irq(&connection->lock);
+
+ trace_gb_connection_disable(connection);
+
+out_unlock:
+ mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
+
+void gb_connection_mode_switch_prepare(struct gb_connection *connection)
+{
+ connection->mode_switch = true;
+}
+
+void gb_connection_mode_switch_complete(struct gb_connection *connection)
+{
+ gb_connection_svc_connection_destroy(connection);
+ gb_connection_hd_cport_clear(connection);
+
+ gb_connection_hd_cport_disable(connection);
+
+ connection->mode_switch = false;
+}
+
+void gb_connection_disable(struct gb_connection *connection)
+{
+ mutex_lock(&connection->mutex);
+
+ if (connection->state == GB_CONNECTION_STATE_DISABLED)
+ goto out_unlock;
+
+ trace_gb_connection_disable(connection);
+
+ spin_lock_irq(&connection->lock);
+ connection->state = GB_CONNECTION_STATE_DISCONNECTING;
+ gb_connection_cancel_operations(connection, -ESHUTDOWN);
+ spin_unlock_irq(&connection->lock);
+
+ gb_connection_hd_cport_flush(connection);
+
+ gb_connection_control_disconnecting(connection);
+ gb_connection_cport_shutdown_phase_1(connection);
+ gb_connection_hd_cport_quiesce(connection);
+ gb_connection_cport_shutdown_phase_2(connection);
+ gb_connection_control_disconnected(connection);
+
+ connection->state = GB_CONNECTION_STATE_DISABLED;
+
+ /* control-connection tear down is deferred when mode switching */
+ if (!connection->mode_switch) {
+ gb_connection_svc_connection_destroy(connection);
+ gb_connection_hd_cport_clear(connection);
+
+ gb_connection_hd_cport_disable(connection);
+ }
+
+out_unlock:
+ mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable);
+
+/* Disable a connection without communicating with the remote end. */
+void gb_connection_disable_forced(struct gb_connection *connection)
+{
+ mutex_lock(&connection->mutex);
+
+ if (connection->state == GB_CONNECTION_STATE_DISABLED)
+ goto out_unlock;
+
+ trace_gb_connection_disable(connection);
+
+ spin_lock_irq(&connection->lock);
+ connection->state = GB_CONNECTION_STATE_DISABLED;
+ gb_connection_cancel_operations(connection, -ESHUTDOWN);
+ spin_unlock_irq(&connection->lock);
+
+ gb_connection_hd_cport_flush(connection);
+
+ gb_connection_svc_connection_destroy(connection);
+ gb_connection_hd_cport_clear(connection);
+
+ gb_connection_hd_cport_disable(connection);
+out_unlock:
+ mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
+
+/* Caller must have disabled the connection before destroying it. */
+void gb_connection_destroy(struct gb_connection *connection)
+{
+ if (!connection)
+ return;
+
+ if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
+ gb_connection_disable(connection);
+
+ mutex_lock(&gb_connection_mutex);
+
+ spin_lock_irq(&gb_connections_lock);
+ list_del(&connection->bundle_links);
+ list_del(&connection->hd_links);
+ spin_unlock_irq(&gb_connections_lock);
+
+ destroy_workqueue(connection->wq);
+
+ gb_hd_cport_release(connection->hd, connection->hd_cport_id);
+ connection->hd_cport_id = CPORT_ID_BAD;
+
+ mutex_unlock(&gb_connection_mutex);
+
+ gb_connection_put(connection);
+}
+EXPORT_SYMBOL_GPL(gb_connection_destroy);
+
+void gb_connection_latency_tag_enable(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+ int ret;
+
+ if (!hd->driver->latency_tag_enable)
+ return;
+
+ ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
+ if (ret) {
+ dev_err(&connection->hd->dev,
+ "%s: failed to enable latency tag: %d\n",
+ connection->name, ret);
+ }
+}
+EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
+
+void gb_connection_latency_tag_disable(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+ int ret;
+
+ if (!hd->driver->latency_tag_disable)
+ return;
+
+ ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
+ if (ret) {
+ dev_err(&connection->hd->dev,
+ "%s: failed to disable latency tag: %d\n",
+ connection->name, ret);
+ }
+}
+EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
diff --git a/drivers/staging/greybus/connection.h b/drivers/staging/greybus/connection.h
new file mode 100644
index 000000000000..4d9f4c64176c
--- /dev/null
+++ b/drivers/staging/greybus/connection.h
@@ -0,0 +1,129 @@
+/*
+ * Greybus connections
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __CONNECTION_H
+#define __CONNECTION_H
+
+#include <linux/list.h>
+#include <linux/kfifo.h>
+
+#define GB_CONNECTION_FLAG_CSD BIT(0)
+#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1)
+#define GB_CONNECTION_FLAG_OFFLOADED BIT(2)
+#define GB_CONNECTION_FLAG_CDSI1 BIT(3)
+#define GB_CONNECTION_FLAG_CONTROL BIT(4)
+#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5)
+
+#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL
+
+enum gb_connection_state {
+ GB_CONNECTION_STATE_DISABLED = 0,
+ GB_CONNECTION_STATE_ENABLED_TX = 1,
+ GB_CONNECTION_STATE_ENABLED = 2,
+ GB_CONNECTION_STATE_DISCONNECTING = 3,
+};
+
+struct gb_operation;
+
+typedef int (*gb_request_handler_t)(struct gb_operation *);
+
+struct gb_connection {
+ struct gb_host_device *hd;
+ struct gb_interface *intf;
+ struct gb_bundle *bundle;
+ struct kref kref;
+ u16 hd_cport_id;
+ u16 intf_cport_id;
+
+ struct list_head hd_links;
+ struct list_head bundle_links;
+
+ gb_request_handler_t handler;
+ unsigned long flags;
+
+ struct mutex mutex;
+ spinlock_t lock;
+ enum gb_connection_state state;
+ struct list_head operations;
+
+ char name[16];
+ struct workqueue_struct *wq;
+
+ atomic_t op_cycle;
+
+ void *private;
+
+ bool mode_switch;
+};
+
+struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
+ u16 hd_cport_id, gb_request_handler_t handler);
+struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
+struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
+ u16 cport_id, gb_request_handler_t handler);
+struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
+ u16 cport_id, gb_request_handler_t handler,
+ unsigned long flags);
+struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
+ u16 cport_id, unsigned long flags);
+void gb_connection_destroy(struct gb_connection *connection);
+
+static inline bool gb_connection_is_static(struct gb_connection *connection)
+{
+ return !connection->intf;
+}
+
+int gb_connection_enable(struct gb_connection *connection);
+int gb_connection_enable_tx(struct gb_connection *connection);
+void gb_connection_disable_rx(struct gb_connection *connection);
+void gb_connection_disable(struct gb_connection *connection);
+void gb_connection_disable_forced(struct gb_connection *connection);
+
+void gb_connection_mode_switch_prepare(struct gb_connection *connection);
+void gb_connection_mode_switch_complete(struct gb_connection *connection);
+
+void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
+ u8 *data, size_t length);
+
+void gb_connection_latency_tag_enable(struct gb_connection *connection);
+void gb_connection_latency_tag_disable(struct gb_connection *connection);
+
+static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
+{
+ return !(connection->flags & GB_CONNECTION_FLAG_CSD);
+}
+
+static inline bool
+gb_connection_flow_control_disabled(struct gb_connection *connection)
+{
+ return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
+}
+
+static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
+{
+ return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
+}
+
+static inline bool gb_connection_is_control(struct gb_connection *connection)
+{
+ return connection->flags & GB_CONNECTION_FLAG_CONTROL;
+}
+
+static inline void *gb_connection_get_data(struct gb_connection *connection)
+{
+ return connection->private;
+}
+
+static inline void gb_connection_set_data(struct gb_connection *connection,
+ void *data)
+{
+ connection->private = data;
+}
+
+#endif /* __CONNECTION_H */
diff --git a/drivers/staging/greybus/control.c b/drivers/staging/greybus/control.c
new file mode 100644
index 000000000000..4716190e740a
--- /dev/null
+++ b/drivers/staging/greybus/control.c
@@ -0,0 +1,635 @@
+/*
+ * Greybus CPort control protocol.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "greybus.h"
+
+/* Highest control-protocol version supported */
+#define GB_CONTROL_VERSION_MAJOR 0
+#define GB_CONTROL_VERSION_MINOR 1
+
+
+static int gb_control_get_version(struct gb_control *control)
+{
+ struct gb_interface *intf = control->connection->intf;
+ struct gb_control_version_request request;
+ struct gb_control_version_response response;
+ int ret;
+
+ request.major = GB_CONTROL_VERSION_MAJOR;
+ request.minor = GB_CONTROL_VERSION_MINOR;
+
+ ret = gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_VERSION,
+ &request, sizeof(request), &response,
+ sizeof(response));
+ if (ret) {
+ dev_err(&intf->dev,
+ "failed to get control-protocol version: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (response.major > request.major) {
+ dev_err(&intf->dev,
+ "unsupported major control-protocol version (%u > %u)\n",
+ response.major, request.major);
+ return -ENOTSUPP;
+ }
+
+ control->protocol_major = response.major;
+ control->protocol_minor = response.minor;
+
+ dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
+ response.minor);
+
+ return 0;
+}
+
+static int gb_control_get_bundle_version(struct gb_control *control,
+ struct gb_bundle *bundle)
+{
+ struct gb_interface *intf = control->connection->intf;
+ struct gb_control_bundle_version_request request;
+ struct gb_control_bundle_version_response response;
+ int ret;
+
+ request.bundle_id = bundle->id;
+
+ ret = gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_BUNDLE_VERSION,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(&intf->dev,
+ "failed to get bundle %u class version: %d\n",
+ bundle->id, ret);
+ return ret;
+ }
+
+ bundle->class_major = response.major;
+ bundle->class_minor = response.minor;
+
+ dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
+ response.major, response.minor);
+
+ return 0;
+}
+
+int gb_control_get_bundle_versions(struct gb_control *control)
+{
+ struct gb_interface *intf = control->connection->intf;
+ struct gb_bundle *bundle;
+ int ret;
+
+ if (!control->has_bundle_version)
+ return 0;
+
+ list_for_each_entry(bundle, &intf->bundles, links) {
+ ret = gb_control_get_bundle_version(control, bundle);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Get Manifest's size from the interface */
+int gb_control_get_manifest_size_operation(struct gb_interface *intf)
+{
+ struct gb_control_get_manifest_size_response response;
+ struct gb_connection *connection = intf->control->connection;
+ int ret;
+
+ ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
+ NULL, 0, &response, sizeof(response));
+ if (ret) {
+ dev_err(&connection->intf->dev,
+ "failed to get manifest size: %d\n", ret);
+ return ret;
+ }
+
+ return le16_to_cpu(response.size);
+}
+
+/* Reads Manifest from the interface */
+int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
+ size_t size)
+{
+ struct gb_connection *connection = intf->control->connection;
+
+ return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
+ NULL, 0, manifest, size);
+}
+
+int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
+{
+ struct gb_control_connected_request request;
+
+ request.cport_id = cpu_to_le16(cport_id);
+ return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
+ &request, sizeof(request), NULL, 0);
+}
+
+int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
+{
+ struct gb_control_disconnected_request request;
+
+ request.cport_id = cpu_to_le16(cport_id);
+ return gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_DISCONNECTED, &request,
+ sizeof(request), NULL, 0);
+}
+
+int gb_control_disconnecting_operation(struct gb_control *control,
+ u16 cport_id)
+{
+ struct gb_control_disconnecting_request *request;
+ struct gb_operation *operation;
+ int ret;
+
+ operation = gb_operation_create_core(control->connection,
+ GB_CONTROL_TYPE_DISCONNECTING,
+ sizeof(*request), 0, 0,
+ GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ request = operation->request->payload;
+ request->cport_id = cpu_to_le16(cport_id);
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret) {
+ dev_err(&control->dev, "failed to send disconnecting: %d\n",
+ ret);
+ }
+
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+int gb_control_mode_switch_operation(struct gb_control *control)
+{
+ struct gb_operation *operation;
+ int ret;
+
+ operation = gb_operation_create_core(control->connection,
+ GB_CONTROL_TYPE_MODE_SWITCH,
+ 0, 0, GB_OPERATION_FLAG_UNIDIRECTIONAL,
+ GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret)
+ dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
+
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+int gb_control_timesync_enable(struct gb_control *control, u8 count,
+ u64 frame_time, u32 strobe_delay, u32 refclk)
+{
+ struct gb_control_timesync_enable_request request;
+
+ request.count = count;
+ request.frame_time = cpu_to_le64(frame_time);
+ request.strobe_delay = cpu_to_le32(strobe_delay);
+ request.refclk = cpu_to_le32(refclk);
+ return gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_TIMESYNC_ENABLE, &request,
+ sizeof(request), NULL, 0);
+}
+
+int gb_control_timesync_disable(struct gb_control *control)
+{
+ return gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_TIMESYNC_DISABLE, NULL, 0,
+ NULL, 0);
+}
+
+int gb_control_timesync_get_last_event(struct gb_control *control,
+ u64 *frame_time)
+{
+ struct gb_control_timesync_get_last_event_response response;
+ int ret;
+
+ ret = gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT,
+ NULL, 0, &response, sizeof(response));
+ if (!ret)
+ *frame_time = le64_to_cpu(response.frame_time);
+ return ret;
+}
+
+int gb_control_timesync_authoritative(struct gb_control *control,
+ u64 *frame_time)
+{
+ struct gb_control_timesync_authoritative_request request;
+ int i;
+
+ for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
+ request.frame_time[i] = cpu_to_le64(frame_time[i]);
+
+ return gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE,
+ &request, sizeof(request),
+ NULL, 0);
+}
+
+static int gb_control_bundle_pm_status_map(u8 status)
+{
+ switch (status) {
+ case GB_CONTROL_BUNDLE_PM_INVAL:
+ return -EINVAL;
+ case GB_CONTROL_BUNDLE_PM_BUSY:
+ return -EBUSY;
+ case GB_CONTROL_BUNDLE_PM_NA:
+ return -ENOMSG;
+ case GB_CONTROL_BUNDLE_PM_FAIL:
+ default:
+ return -EREMOTEIO;
+ }
+}
+
+int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
+{
+ struct gb_control_bundle_pm_request request;
+ struct gb_control_bundle_pm_response response;
+ int ret;
+
+ request.bundle_id = bundle_id;
+ ret = gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
+ sizeof(request), &response, sizeof(response));
+ if (ret) {
+ dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
+ bundle_id, ret);
+ return ret;
+ }
+
+ if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+ dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
+ bundle_id, response.status);
+ return gb_control_bundle_pm_status_map(response.status);
+ }
+
+ return 0;
+}
+
+int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
+{
+ struct gb_control_bundle_pm_request request;
+ struct gb_control_bundle_pm_response response;
+ int ret;
+
+ request.bundle_id = bundle_id;
+ ret = gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
+ sizeof(request), &response, sizeof(response));
+ if (ret) {
+ dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
+ bundle_id, ret);
+ return ret;
+ }
+
+ if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+ dev_err(&control->dev, "failed to resume bundle %u: %d\n",
+ bundle_id, response.status);
+ return gb_control_bundle_pm_status_map(response.status);
+ }
+
+ return 0;
+}
+
+int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
+{
+ struct gb_control_bundle_pm_request request;
+ struct gb_control_bundle_pm_response response;
+ int ret;
+
+ request.bundle_id = bundle_id;
+ ret = gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
+ sizeof(request), &response, sizeof(response));
+ if (ret) {
+ dev_err(&control->dev,
+ "failed to send bundle %u deactivate: %d\n", bundle_id,
+ ret);
+ return ret;
+ }
+
+ if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+ dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
+ bundle_id, response.status);
+ return gb_control_bundle_pm_status_map(response.status);
+ }
+
+ return 0;
+}
+
+int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
+{
+ struct gb_control_bundle_pm_request request;
+ struct gb_control_bundle_pm_response response;
+ int ret;
+
+ if (!control->has_bundle_activate)
+ return 0;
+
+ request.bundle_id = bundle_id;
+ ret = gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
+ sizeof(request), &response, sizeof(response));
+ if (ret) {
+ dev_err(&control->dev,
+ "failed to send bundle %u activate: %d\n", bundle_id,
+ ret);
+ return ret;
+ }
+
+ if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+ dev_err(&control->dev, "failed to activate bundle %u: %d\n",
+ bundle_id, response.status);
+ return gb_control_bundle_pm_status_map(response.status);
+ }
+
+ return 0;
+}
+
+static int gb_control_interface_pm_status_map(u8 status)
+{
+ switch (status) {
+ case GB_CONTROL_INTF_PM_BUSY:
+ return -EBUSY;
+ case GB_CONTROL_INTF_PM_NA:
+ return -ENOMSG;
+ default:
+ return -EREMOTEIO;
+ }
+}
+
+int gb_control_interface_suspend_prepare(struct gb_control *control)
+{
+ struct gb_control_intf_pm_response response;
+ int ret;
+
+ ret = gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(&control->dev,
+ "failed to send interface suspend prepare: %d\n", ret);
+ return ret;
+ }
+
+ if (response.status != GB_CONTROL_INTF_PM_OK) {
+ dev_err(&control->dev, "interface error while preparing suspend: %d\n",
+ response.status);
+ return gb_control_interface_pm_status_map(response.status);
+ }
+
+ return 0;
+}
+
+int gb_control_interface_deactivate_prepare(struct gb_control *control)
+{
+ struct gb_control_intf_pm_response response;
+ int ret;
+
+ ret = gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
+ 0, &response, sizeof(response));
+ if (ret) {
+ dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (response.status != GB_CONTROL_INTF_PM_OK) {
+ dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
+ response.status);
+ return gb_control_interface_pm_status_map(response.status);
+ }
+
+ return 0;
+}
+
+int gb_control_interface_hibernate_abort(struct gb_control *control)
+{
+ struct gb_control_intf_pm_response response;
+ int ret;
+
+ ret = gb_operation_sync(control->connection,
+ GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(&control->dev,
+ "failed to send interface aborting hibernate: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (response.status != GB_CONTROL_INTF_PM_OK) {
+ dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
+ response.status);
+ return gb_control_interface_pm_status_map(response.status);
+ }
+
+ return 0;
+}
+
+static ssize_t vendor_string_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_control *control = to_gb_control(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
+}
+static DEVICE_ATTR_RO(vendor_string);
+
+static ssize_t product_string_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_control *control = to_gb_control(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
+}
+static DEVICE_ATTR_RO(product_string);
+
+static struct attribute *control_attrs[] = {
+ &dev_attr_vendor_string.attr,
+ &dev_attr_product_string.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(control);
+
+static void gb_control_release(struct device *dev)
+{
+ struct gb_control *control = to_gb_control(dev);
+
+ gb_connection_destroy(control->connection);
+
+ kfree(control->vendor_string);
+ kfree(control->product_string);
+
+ kfree(control);
+}
+
+struct device_type greybus_control_type = {
+ .name = "greybus_control",
+ .release = gb_control_release,
+};
+
+struct gb_control *gb_control_create(struct gb_interface *intf)
+{
+ struct gb_connection *connection;
+ struct gb_control *control;
+
+ control = kzalloc(sizeof(*control), GFP_KERNEL);
+ if (!control)
+ return ERR_PTR(-ENOMEM);
+
+ control->intf = intf;
+
+ connection = gb_connection_create_control(intf);
+ if (IS_ERR(connection)) {
+ dev_err(&intf->dev,
+ "failed to create control connection: %ld\n",
+ PTR_ERR(connection));
+ kfree(control);
+ return ERR_CAST(connection);
+ }
+
+ control->connection = connection;
+
+ control->dev.parent = &intf->dev;
+ control->dev.bus = &greybus_bus_type;
+ control->dev.type = &greybus_control_type;
+ control->dev.groups = control_groups;
+ control->dev.dma_mask = intf->dev.dma_mask;
+ device_initialize(&control->dev);
+ dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
+
+ gb_connection_set_data(control->connection, control);
+
+ return control;
+}
+
+int gb_control_enable(struct gb_control *control)
+{
+ int ret;
+
+ dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
+
+ ret = gb_connection_enable_tx(control->connection);
+ if (ret) {
+ dev_err(&control->connection->intf->dev,
+ "failed to enable control connection: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = gb_control_get_version(control);
+ if (ret)
+ goto err_disable_connection;
+
+ if (control->protocol_major > 0 || control->protocol_minor > 1)
+ control->has_bundle_version = true;
+
+ /* FIXME: use protocol version instead */
+ if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
+ control->has_bundle_activate = true;
+
+ return 0;
+
+err_disable_connection:
+ gb_connection_disable(control->connection);
+
+ return ret;
+}
+
+void gb_control_disable(struct gb_control *control)
+{
+ dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
+
+ if (control->intf->disconnected)
+ gb_connection_disable_forced(control->connection);
+ else
+ gb_connection_disable(control->connection);
+}
+
+int gb_control_suspend(struct gb_control *control)
+{
+ gb_connection_disable(control->connection);
+
+ return 0;
+}
+
+int gb_control_resume(struct gb_control *control)
+{
+ int ret;
+
+ ret = gb_connection_enable_tx(control->connection);
+ if (ret) {
+ dev_err(&control->connection->intf->dev,
+ "failed to enable control connection: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int gb_control_add(struct gb_control *control)
+{
+ int ret;
+
+ ret = device_add(&control->dev);
+ if (ret) {
+ dev_err(&control->dev,
+ "failed to register control device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void gb_control_del(struct gb_control *control)
+{
+ if (device_is_registered(&control->dev))
+ device_del(&control->dev);
+}
+
+struct gb_control *gb_control_get(struct gb_control *control)
+{
+ get_device(&control->dev);
+
+ return control;
+}
+
+void gb_control_put(struct gb_control *control)
+{
+ put_device(&control->dev);
+}
+
+void gb_control_mode_switch_prepare(struct gb_control *control)
+{
+ gb_connection_mode_switch_prepare(control->connection);
+}
+
+void gb_control_mode_switch_complete(struct gb_control *control)
+{
+ gb_connection_mode_switch_complete(control->connection);
+}
diff --git a/drivers/staging/greybus/control.h b/drivers/staging/greybus/control.h
new file mode 100644
index 000000000000..f9a60daf9a72
--- /dev/null
+++ b/drivers/staging/greybus/control.h
@@ -0,0 +1,65 @@
+/*
+ * Greybus CPort control protocol
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __CONTROL_H
+#define __CONTROL_H
+
+struct gb_control {
+ struct device dev;
+ struct gb_interface *intf;
+
+ struct gb_connection *connection;
+
+ u8 protocol_major;
+ u8 protocol_minor;
+
+ bool has_bundle_activate;
+ bool has_bundle_version;
+
+ char *vendor_string;
+ char *product_string;
+};
+#define to_gb_control(d) container_of(d, struct gb_control, dev)
+
+struct gb_control *gb_control_create(struct gb_interface *intf);
+int gb_control_enable(struct gb_control *control);
+void gb_control_disable(struct gb_control *control);
+int gb_control_suspend(struct gb_control *control);
+int gb_control_resume(struct gb_control *control);
+int gb_control_add(struct gb_control *control);
+void gb_control_del(struct gb_control *control);
+struct gb_control *gb_control_get(struct gb_control *control);
+void gb_control_put(struct gb_control *control);
+
+int gb_control_get_bundle_versions(struct gb_control *control);
+int gb_control_connected_operation(struct gb_control *control, u16 cport_id);
+int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
+int gb_control_disconnecting_operation(struct gb_control *control,
+ u16 cport_id);
+int gb_control_mode_switch_operation(struct gb_control *control);
+void gb_control_mode_switch_prepare(struct gb_control *control);
+void gb_control_mode_switch_complete(struct gb_control *control);
+int gb_control_get_manifest_size_operation(struct gb_interface *intf);
+int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
+ size_t size);
+int gb_control_timesync_enable(struct gb_control *control, u8 count,
+ u64 frame_time, u32 strobe_delay, u32 refclk);
+int gb_control_timesync_disable(struct gb_control *control);
+int gb_control_timesync_get_last_event(struct gb_control *control,
+ u64 *frame_time);
+int gb_control_timesync_authoritative(struct gb_control *control,
+ u64 *frame_time);
+int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id);
+int gb_control_interface_suspend_prepare(struct gb_control *control);
+int gb_control_interface_deactivate_prepare(struct gb_control *control);
+int gb_control_interface_hibernate_abort(struct gb_control *control);
+#endif /* __CONTROL_H */
diff --git a/drivers/staging/greybus/core.c b/drivers/staging/greybus/core.c
new file mode 100644
index 000000000000..1049e9c0edb0
--- /dev/null
+++ b/drivers/staging/greybus/core.c
@@ -0,0 +1,361 @@
+/*
+ * Greybus "Core"
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define CREATE_TRACE_POINTS
+#include "greybus.h"
+#include "greybus_trace.h"
+
+#define GB_BUNDLE_AUTOSUSPEND_MS 3000
+
+/* Allow greybus to be disabled at boot if needed */
+static bool nogreybus;
+#ifdef MODULE
+module_param(nogreybus, bool, 0444);
+#else
+core_param(nogreybus, nogreybus, bool, 0444);
+#endif
+int greybus_disabled(void)
+{
+ return nogreybus;
+}
+EXPORT_SYMBOL_GPL(greybus_disabled);
+
+static bool greybus_match_one_id(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) &&
+ (id->vendor != bundle->intf->vendor_id))
+ return false;
+
+ if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) &&
+ (id->product != bundle->intf->product_id))
+ return false;
+
+ if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) &&
+ (id->class != bundle->class))
+ return false;
+
+ return true;
+}
+
+static const struct greybus_bundle_id *
+greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id)
+{
+ if (id == NULL)
+ return NULL;
+
+ for (; id->vendor || id->product || id->class || id->driver_info;
+ id++) {
+ if (greybus_match_one_id(bundle, id))
+ return id;
+ }
+
+ return NULL;
+}
+
+static int greybus_match_device(struct device *dev, struct device_driver *drv)
+{
+ struct greybus_driver *driver = to_greybus_driver(drv);
+ struct gb_bundle *bundle;
+ const struct greybus_bundle_id *id;
+
+ if (!is_gb_bundle(dev))
+ return 0;
+
+ bundle = to_gb_bundle(dev);
+
+ id = greybus_match_id(bundle, driver->id_table);
+ if (id)
+ return 1;
+ /* FIXME - Dynamic ids? */
+ return 0;
+}
+
+static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gb_host_device *hd;
+ struct gb_module *module = NULL;
+ struct gb_interface *intf = NULL;
+ struct gb_control *control = NULL;
+ struct gb_bundle *bundle = NULL;
+ struct gb_svc *svc = NULL;
+
+ if (is_gb_host_device(dev)) {
+ hd = to_gb_host_device(dev);
+ } else if (is_gb_module(dev)) {
+ module = to_gb_module(dev);
+ hd = module->hd;
+ } else if (is_gb_interface(dev)) {
+ intf = to_gb_interface(dev);
+ module = intf->module;
+ hd = intf->hd;
+ } else if (is_gb_control(dev)) {
+ control = to_gb_control(dev);
+ intf = control->intf;
+ module = intf->module;
+ hd = intf->hd;
+ } else if (is_gb_bundle(dev)) {
+ bundle = to_gb_bundle(dev);
+ intf = bundle->intf;
+ module = intf->module;
+ hd = intf->hd;
+ } else if (is_gb_svc(dev)) {
+ svc = to_gb_svc(dev);
+ hd = svc->hd;
+ } else {
+ dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n");
+ return -EINVAL;
+ }
+
+ if (add_uevent_var(env, "BUS=%u", hd->bus_id))
+ return -ENOMEM;
+
+ if (module) {
+ if (add_uevent_var(env, "MODULE=%u", module->module_id))
+ return -ENOMEM;
+ }
+
+ if (intf) {
+ if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
+ intf->vendor_id, intf->product_id))
+ return -ENOMEM;
+ }
+
+ if (bundle) {
+ // FIXME
+ // add a uevent that can "load" a bundle type
+ // This is what we need to bind a driver to so use the info
+ // in gmod here as well
+
+ if (add_uevent_var(env, "BUNDLE=%u", bundle->id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void greybus_shutdown(struct device *dev)
+{
+ if (is_gb_host_device(dev)) {
+ struct gb_host_device *hd;
+
+ hd = to_gb_host_device(dev);
+ gb_hd_shutdown(hd);
+ }
+}
+
+struct bus_type greybus_bus_type = {
+ .name = "greybus",
+ .match = greybus_match_device,
+ .uevent = greybus_uevent,
+ .shutdown = greybus_shutdown,
+};
+
+static int greybus_probe(struct device *dev)
+{
+ struct greybus_driver *driver = to_greybus_driver(dev->driver);
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ const struct greybus_bundle_id *id;
+ int retval;
+
+ /* match id */
+ id = greybus_match_id(bundle, driver->id_table);
+ if (!id)
+ return -ENODEV;
+
+ retval = pm_runtime_get_sync(&bundle->intf->dev);
+ if (retval < 0) {
+ pm_runtime_put_noidle(&bundle->intf->dev);
+ return retval;
+ }
+
+ retval = gb_control_bundle_activate(bundle->intf->control, bundle->id);
+ if (retval) {
+ pm_runtime_put(&bundle->intf->dev);
+ return retval;
+ }
+
+ /*
+ * Unbound bundle devices are always deactivated. During probe, the
+ * Runtime PM is set to enabled and active and the usage count is
+ * incremented. If the driver supports runtime PM, it should call
+ * pm_runtime_put() in its probe routine and pm_runtime_get_sync()
+ * in remove routine.
+ */
+ pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ retval = driver->probe(bundle, id);
+ if (retval) {
+ /*
+ * Catch buggy drivers that fail to destroy their connections.
+ */
+ WARN_ON(!list_empty(&bundle->connections));
+
+ gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_put(&bundle->intf->dev);
+
+ return retval;
+ }
+
+ gb_timesync_schedule_synchronous(bundle->intf);
+
+ pm_runtime_put(&bundle->intf->dev);
+
+ return 0;
+}
+
+static int greybus_remove(struct device *dev)
+{
+ struct greybus_driver *driver = to_greybus_driver(dev->driver);
+ struct gb_bundle *bundle = to_gb_bundle(dev);
+ struct gb_connection *connection;
+ int retval;
+
+ retval = pm_runtime_get_sync(dev);
+ if (retval < 0)
+ dev_err(dev, "failed to resume bundle: %d\n", retval);
+
+ /*
+ * Disable (non-offloaded) connections early in case the interface is
+ * already gone to avoid unceccessary operation timeouts during
+ * driver disconnect. Otherwise, only disable incoming requests.
+ */
+ list_for_each_entry(connection, &bundle->connections, bundle_links) {
+ if (gb_connection_is_offloaded(connection))
+ continue;
+
+ if (bundle->intf->disconnected)
+ gb_connection_disable_forced(connection);
+ else
+ gb_connection_disable_rx(connection);
+ }
+
+ driver->disconnect(bundle);
+
+ /* Catch buggy drivers that fail to destroy their connections. */
+ WARN_ON(!list_empty(&bundle->connections));
+
+ if (!bundle->intf->disconnected)
+ gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
+
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_put_noidle(dev);
+
+ return 0;
+}
+
+int greybus_register_driver(struct greybus_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ int retval;
+
+ if (greybus_disabled())
+ return -ENODEV;
+
+ driver->driver.bus = &greybus_bus_type;
+ driver->driver.name = driver->name;
+ driver->driver.probe = greybus_probe;
+ driver->driver.remove = greybus_remove;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
+
+ retval = driver_register(&driver->driver);
+ if (retval)
+ return retval;
+
+ pr_info("registered new driver %s\n", driver->name);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(greybus_register_driver);
+
+void greybus_deregister_driver(struct greybus_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(greybus_deregister_driver);
+
+static int __init gb_init(void)
+{
+ int retval;
+
+ if (greybus_disabled())
+ return -ENODEV;
+
+ BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD);
+
+ gb_debugfs_init();
+
+ retval = bus_register(&greybus_bus_type);
+ if (retval) {
+ pr_err("bus_register failed (%d)\n", retval);
+ goto error_bus;
+ }
+
+ retval = gb_hd_init();
+ if (retval) {
+ pr_err("gb_hd_init failed (%d)\n", retval);
+ goto error_hd;
+ }
+
+ retval = gb_operation_init();
+ if (retval) {
+ pr_err("gb_operation_init failed (%d)\n", retval);
+ goto error_operation;
+ }
+
+ retval = gb_timesync_init();
+ if (retval) {
+ pr_err("gb_timesync_init failed\n");
+ goto error_timesync;
+ }
+ return 0; /* Success */
+
+error_timesync:
+ gb_operation_exit();
+error_operation:
+ gb_hd_exit();
+error_hd:
+ bus_unregister(&greybus_bus_type);
+error_bus:
+ gb_debugfs_cleanup();
+
+ return retval;
+}
+module_init(gb_init);
+
+static void __exit gb_exit(void)
+{
+ gb_timesync_exit();
+ gb_operation_exit();
+ gb_hd_exit();
+ bus_unregister(&greybus_bus_type);
+ gb_debugfs_cleanup();
+ tracepoint_synchronize_unregister();
+}
+module_exit(gb_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
diff --git a/drivers/staging/greybus/debugfs.c b/drivers/staging/greybus/debugfs.c
new file mode 100644
index 000000000000..a9d4d3da99a0
--- /dev/null
+++ b/drivers/staging/greybus/debugfs.c
@@ -0,0 +1,31 @@
+/*
+ * Greybus debugfs code
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/debugfs.h>
+
+#include "greybus.h"
+
+static struct dentry *gb_debug_root;
+
+void __init gb_debugfs_init(void)
+{
+ gb_debug_root = debugfs_create_dir("greybus", NULL);
+}
+
+void gb_debugfs_cleanup(void)
+{
+ debugfs_remove_recursive(gb_debug_root);
+ gb_debug_root = NULL;
+}
+
+struct dentry *gb_debugfs_get(void)
+{
+ return gb_debug_root;
+}
+EXPORT_SYMBOL_GPL(gb_debugfs_get);
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
new file mode 100644
index 000000000000..071bb1cfd3ae
--- /dev/null
+++ b/drivers/staging/greybus/es2.c
@@ -0,0 +1,1597 @@
+/*
+ * Greybus "AP" USB driver for "ES2" controller chips
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/kthread.h>
+#include <linux/sizes.h>
+#include <linux/usb.h>
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <asm/unaligned.h>
+
+#include "arpc.h"
+#include "greybus.h"
+#include "greybus_trace.h"
+#include "connection.h"
+
+
+/* Default timeout for USB vendor requests. */
+#define ES2_USB_CTRL_TIMEOUT 500
+
+/* Default timeout for ARPC CPort requests */
+#define ES2_ARPC_CPORT_TIMEOUT 500
+
+/* Fixed CPort numbers */
+#define ES2_CPORT_CDSI0 16
+#define ES2_CPORT_CDSI1 17
+
+/* Memory sizes for the buffers sent to/from the ES2 controller */
+#define ES2_GBUF_MSG_SIZE_MAX 2048
+
+/* Memory sizes for the ARPC buffers */
+#define ARPC_OUT_SIZE_MAX U16_MAX
+#define ARPC_IN_SIZE_MAX 128
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x18d1, 0x1eaf) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+#define APB1_LOG_SIZE SZ_16K
+
+/*
+ * Number of CPort IN urbs in flight at any point in time.
+ * Adjust if we are having stalls in the USB buffer due to not enough urbs in
+ * flight.
+ */
+#define NUM_CPORT_IN_URB 4
+
+/* Number of CPort OUT urbs in flight at any point in time.
+ * Adjust if we get messages saying we are out of urbs in the system log.
+ */
+#define NUM_CPORT_OUT_URB 8
+
+/*
+ * Number of ARPC in urbs in flight at any point in time.
+ */
+#define NUM_ARPC_IN_URB 2
+
+/*
+ * @endpoint: bulk in endpoint for CPort data
+ * @urb: array of urbs for the CPort in messages
+ * @buffer: array of buffers for the @cport_in_urb urbs
+ */
+struct es2_cport_in {
+ __u8 endpoint;
+ struct urb *urb[NUM_CPORT_IN_URB];
+ u8 *buffer[NUM_CPORT_IN_URB];
+};
+
+/**
+ * es2_ap_dev - ES2 USB Bridge to AP structure
+ * @usb_dev: pointer to the USB device we are.
+ * @usb_intf: pointer to the USB interface we are bound to.
+ * @hd: pointer to our gb_host_device structure
+
+ * @cport_in: endpoint, urbs and buffer for cport in messages
+ * @cport_out_endpoint: endpoint for for cport out messages
+ * @cport_out_urb: array of urbs for the CPort out messages
+ * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
+ * not.
+ * @cport_out_urb_cancelled: array of flags indicating whether the
+ * corresponding @cport_out_urb is being cancelled
+ * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
+ *
+ * @apb_log_task: task pointer for logging thread
+ * @apb_log_dentry: file system entry for the log file interface
+ * @apb_log_enable_dentry: file system entry for enabling logging
+ * @apb_log_fifo: kernel FIFO to carry logged data
+ * @arpc_urb: array of urbs for the ARPC in messages
+ * @arpc_buffer: array of buffers for the @arpc_urb urbs
+ * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
+ * @arpc_id_cycle: gives an unique id to ARPC
+ * @arpc_lock: locks ARPC list
+ * @arpcs: list of in progress ARPCs
+ */
+struct es2_ap_dev {
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_intf;
+ struct gb_host_device *hd;
+
+ struct es2_cport_in cport_in;
+ __u8 cport_out_endpoint;
+ struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
+ bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
+ bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
+ spinlock_t cport_out_urb_lock;
+
+ bool cdsi1_in_use;
+
+ struct task_struct *apb_log_task;
+ struct dentry *apb_log_dentry;
+ struct dentry *apb_log_enable_dentry;
+ DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
+
+ __u8 arpc_endpoint_in;
+ struct urb *arpc_urb[NUM_ARPC_IN_URB];
+ u8 *arpc_buffer[NUM_ARPC_IN_URB];
+
+ int arpc_id_cycle;
+ spinlock_t arpc_lock;
+ struct list_head arpcs;
+};
+
+/**
+ * timesync_enable_request - Enable timesync in an APBridge
+ * @count: number of TimeSync Pulses to expect
+ * @frame_time: the initial FrameTime at the first TimeSync Pulse
+ * @strobe_delay: the expected delay in microseconds between each TimeSync Pulse
+ * @refclk: The AP mandated reference clock to run FrameTime at
+ */
+struct timesync_enable_request {
+ __u8 count;
+ __le64 frame_time;
+ __le32 strobe_delay;
+ __le32 refclk;
+} __packed;
+
+/**
+ * timesync_authoritative_request - Transmit authoritative FrameTime to APBridge
+ * @frame_time: An array of authoritative FrameTimes provided by the SVC
+ * and relayed to the APBridge by the AP
+ */
+struct timesync_authoritative_request {
+ __le64 frame_time[GB_TIMESYNC_MAX_STROBES];
+} __packed;
+
+struct arpc {
+ struct list_head list;
+ struct arpc_request_message *req;
+ struct arpc_response_message *resp;
+ struct completion response_received;
+ bool active;
+};
+
+static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
+{
+ return (struct es2_ap_dev *)&hd->hd_priv;
+}
+
+static void cport_out_callback(struct urb *urb);
+static void usb_log_enable(struct es2_ap_dev *es2);
+static void usb_log_disable(struct es2_ap_dev *es2);
+static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
+ size_t size, int *result, unsigned int timeout);
+
+static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
+{
+ struct usb_device *udev = es2->usb_dev;
+ u8 *data;
+ int retval;
+
+ data = kmalloc(size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ memcpy(data, req, size);
+
+ retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ cmd,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ 0, 0, data, size, ES2_USB_CTRL_TIMEOUT);
+ if (retval < 0)
+ dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
+ else
+ retval = 0;
+
+ kfree(data);
+ return retval;
+}
+
+static void ap_urb_complete(struct urb *urb)
+{
+ struct usb_ctrlrequest *dr = urb->context;
+
+ kfree(dr);
+ usb_free_urb(urb);
+}
+
+static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
+{
+ struct usb_device *udev = es2->usb_dev;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ u8 *buf;
+ int retval;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
+ if (!dr) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ buf = (u8 *)dr + sizeof(*dr);
+ memcpy(buf, req, size);
+
+ dr->bRequest = cmd;
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
+ dr->wValue = 0;
+ dr->wIndex = 0;
+ dr->wLength = cpu_to_le16(size);
+
+ usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
+ (unsigned char *)dr, buf, size,
+ ap_urb_complete, dr);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval) {
+ usb_free_urb(urb);
+ kfree(dr);
+ }
+ return retval;
+}
+
+static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+ bool async)
+{
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+
+ if (async)
+ return output_async(es2, req, size, cmd);
+
+ return output_sync(es2, req, size, cmd);
+}
+
+static int es2_cport_in_enable(struct es2_ap_dev *es2,
+ struct es2_cport_in *cport_in)
+{
+ struct urb *urb;
+ int ret;
+ int i;
+
+ for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
+ urb = cport_in->urb[i];
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(&es2->usb_dev->dev,
+ "failed to submit in-urb: %d\n", ret);
+ goto err_kill_urbs;
+ }
+ }
+
+ return 0;
+
+err_kill_urbs:
+ for (--i; i >= 0; --i) {
+ urb = cport_in->urb[i];
+ usb_kill_urb(urb);
+ }
+
+ return ret;
+}
+
+static void es2_cport_in_disable(struct es2_ap_dev *es2,
+ struct es2_cport_in *cport_in)
+{
+ struct urb *urb;
+ int i;
+
+ for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
+ urb = cport_in->urb[i];
+ usb_kill_urb(urb);
+ }
+}
+
+static int es2_arpc_in_enable(struct es2_ap_dev *es2)
+{
+ struct urb *urb;
+ int ret;
+ int i;
+
+ for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
+ urb = es2->arpc_urb[i];
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(&es2->usb_dev->dev,
+ "failed to submit arpc in-urb: %d\n", ret);
+ goto err_kill_urbs;
+ }
+ }
+
+ return 0;
+
+err_kill_urbs:
+ for (--i; i >= 0; --i) {
+ urb = es2->arpc_urb[i];
+ usb_kill_urb(urb);
+ }
+
+ return ret;
+}
+
+static void es2_arpc_in_disable(struct es2_ap_dev *es2)
+{
+ struct urb *urb;
+ int i;
+
+ for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
+ urb = es2->arpc_urb[i];
+ usb_kill_urb(urb);
+ }
+}
+
+static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
+{
+ struct urb *urb = NULL;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
+
+ /* Look in our pool of allocated urbs first, as that's the "fastest" */
+ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+ if (es2->cport_out_urb_busy[i] == false &&
+ es2->cport_out_urb_cancelled[i] == false) {
+ es2->cport_out_urb_busy[i] = true;
+ urb = es2->cport_out_urb[i];
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
+ if (urb)
+ return urb;
+
+ /*
+ * Crap, pool is empty, complain to the syslog and go allocate one
+ * dynamically as we have to succeed.
+ */
+ dev_dbg(&es2->usb_dev->dev,
+ "No free CPort OUT urbs, having to dynamically allocate one!\n");
+ return usb_alloc_urb(0, gfp_mask);
+}
+
+static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
+{
+ unsigned long flags;
+ int i;
+ /*
+ * See if this was an urb in our pool, if so mark it "free", otherwise
+ * we need to free it ourselves.
+ */
+ spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
+ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+ if (urb == es2->cport_out_urb[i]) {
+ es2->cport_out_urb_busy[i] = false;
+ urb = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
+
+ /* If urb is not NULL, then we need to free this urb */
+ usb_free_urb(urb);
+}
+
+/*
+ * We (ab)use the operation-message header pad bytes to transfer the
+ * cport id in order to minimise overhead.
+ */
+static void
+gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
+{
+ header->pad[0] = cport_id;
+}
+
+/* Clear the pad bytes used for the CPort id */
+static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
+{
+ header->pad[0] = 0;
+}
+
+/* Extract the CPort id packed into the header, and clear it */
+static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
+{
+ u16 cport_id = header->pad[0];
+
+ gb_message_cport_clear(header);
+
+ return cport_id;
+}
+
+/*
+ * Returns zero if the message was successfully queued, or a negative errno
+ * otherwise.
+ */
+static int message_send(struct gb_host_device *hd, u16 cport_id,
+ struct gb_message *message, gfp_t gfp_mask)
+{
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct usb_device *udev = es2->usb_dev;
+ size_t buffer_size;
+ int retval;
+ struct urb *urb;
+ unsigned long flags;
+
+ /*
+ * The data actually transferred will include an indication
+ * of where the data should be sent. Do one last check of
+ * the target CPort id before filling it in.
+ */
+ if (!cport_id_valid(hd, cport_id)) {
+ dev_err(&udev->dev, "invalid cport %u\n", cport_id);
+ return -EINVAL;
+ }
+
+ /* Find a free urb */
+ urb = next_free_urb(es2, gfp_mask);
+ if (!urb)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
+ message->hcpriv = urb;
+ spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
+
+ /* Pack the cport id into the message header */
+ gb_message_cport_pack(message->header, cport_id);
+
+ buffer_size = sizeof(*message->header) + message->payload_size;
+
+ usb_fill_bulk_urb(urb, udev,
+ usb_sndbulkpipe(udev,
+ es2->cport_out_endpoint),
+ message->buffer, buffer_size,
+ cport_out_callback, message);
+ urb->transfer_flags |= URB_ZERO_PACKET;
+
+ trace_gb_message_submit(message);
+
+ retval = usb_submit_urb(urb, gfp_mask);
+ if (retval) {
+ dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
+
+ spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
+ message->hcpriv = NULL;
+ spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
+
+ free_urb(es2, urb);
+ gb_message_cport_clear(message->header);
+
+ return retval;
+ }
+
+ return 0;
+}
+
+/*
+ * Can not be called in atomic context.
+ */
+static void message_cancel(struct gb_message *message)
+{
+ struct gb_host_device *hd = message->operation->connection->hd;
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct urb *urb;
+ int i;
+
+ might_sleep();
+
+ spin_lock_irq(&es2->cport_out_urb_lock);
+ urb = message->hcpriv;
+
+ /* Prevent dynamically allocated urb from being deallocated. */
+ usb_get_urb(urb);
+
+ /* Prevent pre-allocated urb from being reused. */
+ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+ if (urb == es2->cport_out_urb[i]) {
+ es2->cport_out_urb_cancelled[i] = true;
+ break;
+ }
+ }
+ spin_unlock_irq(&es2->cport_out_urb_lock);
+
+ usb_kill_urb(urb);
+
+ if (i < NUM_CPORT_OUT_URB) {
+ spin_lock_irq(&es2->cport_out_urb_lock);
+ es2->cport_out_urb_cancelled[i] = false;
+ spin_unlock_irq(&es2->cport_out_urb_lock);
+ }
+
+ usb_free_urb(urb);
+}
+
+static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
+ unsigned long flags)
+{
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct ida *id_map = &hd->cport_id_map;
+ int ida_start, ida_end;
+
+ switch (cport_id) {
+ case ES2_CPORT_CDSI0:
+ case ES2_CPORT_CDSI1:
+ dev_err(&hd->dev, "cport %d not available\n", cport_id);
+ return -EBUSY;
+ }
+
+ if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
+ flags & GB_CONNECTION_FLAG_CDSI1) {
+ if (es2->cdsi1_in_use) {
+ dev_err(&hd->dev, "CDSI1 already in use\n");
+ return -EBUSY;
+ }
+
+ es2->cdsi1_in_use = true;
+
+ return ES2_CPORT_CDSI1;
+ }
+
+ if (cport_id < 0) {
+ ida_start = 0;
+ ida_end = hd->num_cports;
+ } else if (cport_id < hd->num_cports) {
+ ida_start = cport_id;
+ ida_end = cport_id + 1;
+ } else {
+ dev_err(&hd->dev, "cport %d not available\n", cport_id);
+ return -EINVAL;
+ }
+
+ return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
+}
+
+static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
+{
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+
+ switch (cport_id) {
+ case ES2_CPORT_CDSI1:
+ es2->cdsi1_in_use = false;
+ return;
+ }
+
+ ida_simple_remove(&hd->cport_id_map, cport_id);
+}
+
+static int cport_enable(struct gb_host_device *hd, u16 cport_id,
+ unsigned long flags)
+{
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct usb_device *udev = es2->usb_dev;
+ struct gb_apb_request_cport_flags *req;
+ u32 connection_flags;
+ int ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ connection_flags = 0;
+ if (flags & GB_CONNECTION_FLAG_CONTROL)
+ connection_flags |= GB_APB_CPORT_FLAG_CONTROL;
+ if (flags & GB_CONNECTION_FLAG_HIGH_PRIO)
+ connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO;
+
+ req->flags = cpu_to_le32(connection_flags);
+
+ dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__,
+ cport_id, connection_flags);
+
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ GB_APB_REQUEST_CPORT_FLAGS,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE, cport_id, 0,
+ req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
+ if (ret != sizeof(*req)) {
+ dev_err(&udev->dev, "failed to set cport flags for port %d\n",
+ cport_id);
+ if (ret >= 0)
+ ret = -EIO;
+
+ goto out;
+ }
+
+ ret = 0;
+out:
+ kfree(req);
+
+ return ret;
+}
+
+static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id)
+{
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct device *dev = &es2->usb_dev->dev;
+ struct arpc_cport_connected_req req;
+ int ret;
+
+ req.cport_id = cpu_to_le16(cport_id);
+ ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req),
+ NULL, ES2_ARPC_CPORT_TIMEOUT);
+ if (ret) {
+ dev_err(dev, "failed to set connected state for cport %u: %d\n",
+ cport_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id)
+{
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct device *dev = &es2->usb_dev->dev;
+ struct arpc_cport_flush_req req;
+ int ret;
+
+ req.cport_id = cpu_to_le16(cport_id);
+ ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req),
+ NULL, ES2_ARPC_CPORT_TIMEOUT);
+ if (ret) {
+ dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id,
+ u8 phase, unsigned int timeout)
+{
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct device *dev = &es2->usb_dev->dev;
+ struct arpc_cport_shutdown_req req;
+ int result;
+ int ret;
+
+ if (timeout > U16_MAX)
+ return -EINVAL;
+
+ req.cport_id = cpu_to_le16(cport_id);
+ req.timeout = cpu_to_le16(timeout);
+ req.phase = phase;
+ ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req),
+ &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
+ if (ret) {
+ dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n",
+ cport_id, ret, result);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id,
+ size_t peer_space, unsigned int timeout)
+{
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct device *dev = &es2->usb_dev->dev;
+ struct arpc_cport_quiesce_req req;
+ int result;
+ int ret;
+
+ if (peer_space > U16_MAX)
+ return -EINVAL;
+
+ if (timeout > U16_MAX)
+ return -EINVAL;
+
+ req.cport_id = cpu_to_le16(cport_id);
+ req.peer_space = cpu_to_le16(peer_space);
+ req.timeout = cpu_to_le16(timeout);
+ ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req),
+ &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
+ if (ret) {
+ dev_err(dev, "failed to quiesce cport %u: %d (%d)\n",
+ cport_id, ret, result);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id)
+{
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct device *dev = &es2->usb_dev->dev;
+ struct arpc_cport_clear_req req;
+ int ret;
+
+ req.cport_id = cpu_to_le16(cport_id);
+ ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req),
+ NULL, ES2_ARPC_CPORT_TIMEOUT);
+ if (ret) {
+ dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
+{
+ int retval;
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct usb_device *udev = es2->usb_dev;
+
+ retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ GB_APB_REQUEST_LATENCY_TAG_EN,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE, cport_id, 0, NULL,
+ 0, ES2_USB_CTRL_TIMEOUT);
+
+ if (retval < 0)
+ dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
+ cport_id);
+ return retval;
+}
+
+static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
+{
+ int retval;
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct usb_device *udev = es2->usb_dev;
+
+ retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ GB_APB_REQUEST_LATENCY_TAG_DIS,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE, cport_id, 0, NULL,
+ 0, ES2_USB_CTRL_TIMEOUT);
+
+ if (retval < 0)
+ dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
+ cport_id);
+ return retval;
+}
+
+static int timesync_enable(struct gb_host_device *hd, u8 count,
+ u64 frame_time, u32 strobe_delay, u32 refclk)
+{
+ int retval;
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct usb_device *udev = es2->usb_dev;
+ struct gb_control_timesync_enable_request *request;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ request->count = count;
+ request->frame_time = cpu_to_le64(frame_time);
+ request->strobe_delay = cpu_to_le32(strobe_delay);
+ request->refclk = cpu_to_le32(refclk);
+ retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ GB_APB_REQUEST_TIMESYNC_ENABLE,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE, 0, 0, request,
+ sizeof(*request), ES2_USB_CTRL_TIMEOUT);
+ if (retval < 0)
+ dev_err(&udev->dev, "Cannot enable timesync %d\n", retval);
+
+ kfree(request);
+ return retval;
+}
+
+static int timesync_disable(struct gb_host_device *hd)
+{
+ int retval;
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct usb_device *udev = es2->usb_dev;
+
+ retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ GB_APB_REQUEST_TIMESYNC_DISABLE,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE, 0, 0, NULL,
+ 0, ES2_USB_CTRL_TIMEOUT);
+ if (retval < 0)
+ dev_err(&udev->dev, "Cannot disable timesync %d\n", retval);
+
+ return retval;
+}
+
+static int timesync_authoritative(struct gb_host_device *hd, u64 *frame_time)
+{
+ int retval, i;
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct usb_device *udev = es2->usb_dev;
+ struct timesync_authoritative_request *request;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
+ request->frame_time[i] = cpu_to_le64(frame_time[i]);
+
+ retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE, 0, 0, request,
+ sizeof(*request), ES2_USB_CTRL_TIMEOUT);
+ if (retval < 0)
+ dev_err(&udev->dev, "Cannot timesync authoritative out %d\n", retval);
+
+ kfree(request);
+ return retval;
+}
+
+static int timesync_get_last_event(struct gb_host_device *hd, u64 *frame_time)
+{
+ int retval;
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ struct usb_device *udev = es2->usb_dev;
+ __le64 *response_frame_time;
+
+ response_frame_time = kzalloc(sizeof(*response_frame_time), GFP_KERNEL);
+ if (!response_frame_time)
+ return -ENOMEM;
+
+ retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE, 0, 0, response_frame_time,
+ sizeof(*response_frame_time),
+ ES2_USB_CTRL_TIMEOUT);
+
+ if (retval != sizeof(*response_frame_time)) {
+ dev_err(&udev->dev, "Cannot get last TimeSync event: %d\n",
+ retval);
+
+ if (retval >= 0)
+ retval = -EIO;
+
+ goto out;
+ }
+ *frame_time = le64_to_cpu(*response_frame_time);
+ retval = 0;
+out:
+ kfree(response_frame_time);
+ return retval;
+}
+
+static struct gb_hd_driver es2_driver = {
+ .hd_priv_size = sizeof(struct es2_ap_dev),
+ .message_send = message_send,
+ .message_cancel = message_cancel,
+ .cport_allocate = es2_cport_allocate,
+ .cport_release = es2_cport_release,
+ .cport_enable = cport_enable,
+ .cport_connected = es2_cport_connected,
+ .cport_flush = es2_cport_flush,
+ .cport_shutdown = es2_cport_shutdown,
+ .cport_quiesce = es2_cport_quiesce,
+ .cport_clear = es2_cport_clear,
+ .latency_tag_enable = latency_tag_enable,
+ .latency_tag_disable = latency_tag_disable,
+ .output = output,
+ .timesync_enable = timesync_enable,
+ .timesync_disable = timesync_disable,
+ .timesync_authoritative = timesync_authoritative,
+ .timesync_get_last_event = timesync_get_last_event,
+};
+
+/* Common function to report consistent warnings based on URB status */
+static int check_urb_status(struct urb *urb)
+{
+ struct device *dev = &urb->dev->dev;
+ int status = urb->status;
+
+ switch (status) {
+ case 0:
+ return 0;
+
+ case -EOVERFLOW:
+ dev_err(dev, "%s: overflow actual length is %d\n",
+ __func__, urb->actual_length);
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -EILSEQ:
+ case -EPROTO:
+ /* device is gone, stop sending */
+ return status;
+ }
+ dev_err(dev, "%s: unknown status %d\n", __func__, status);
+
+ return -EAGAIN;
+}
+
+static void es2_destroy(struct es2_ap_dev *es2)
+{
+ struct usb_device *udev;
+ struct urb *urb;
+ int i;
+
+ debugfs_remove(es2->apb_log_enable_dentry);
+ usb_log_disable(es2);
+
+ /* Tear down everything! */
+ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+ urb = es2->cport_out_urb[i];
+ usb_kill_urb(urb);
+ usb_free_urb(urb);
+ es2->cport_out_urb[i] = NULL;
+ es2->cport_out_urb_busy[i] = false; /* just to be anal */
+ }
+
+ for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
+ usb_free_urb(es2->arpc_urb[i]);
+ kfree(es2->arpc_buffer[i]);
+ es2->arpc_buffer[i] = NULL;
+ }
+
+ for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
+ usb_free_urb(es2->cport_in.urb[i]);
+ kfree(es2->cport_in.buffer[i]);
+ es2->cport_in.buffer[i] = NULL;
+ }
+
+ /* release reserved CDSI0 and CDSI1 cports */
+ gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
+ gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);
+
+ udev = es2->usb_dev;
+ gb_hd_put(es2->hd);
+
+ usb_put_dev(udev);
+}
+
+static void cport_in_callback(struct urb *urb)
+{
+ struct gb_host_device *hd = urb->context;
+ struct device *dev = &urb->dev->dev;
+ struct gb_operation_msg_hdr *header;
+ int status = check_urb_status(urb);
+ int retval;
+ u16 cport_id;
+
+ if (status) {
+ if ((status == -EAGAIN) || (status == -EPROTO))
+ goto exit;
+
+ /* The urb is being unlinked */
+ if (status == -ENOENT || status == -ESHUTDOWN)
+ return;
+
+ dev_err(dev, "urb cport in error %d (dropped)\n", status);
+ return;
+ }
+
+ if (urb->actual_length < sizeof(*header)) {
+ dev_err(dev, "short message received\n");
+ goto exit;
+ }
+
+ /* Extract the CPort id, which is packed in the message header */
+ header = urb->transfer_buffer;
+ cport_id = gb_message_cport_unpack(header);
+
+ if (cport_id_valid(hd, cport_id)) {
+ greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
+ urb->actual_length);
+ } else {
+ dev_err(dev, "invalid cport id %u received\n", cport_id);
+ }
+exit:
+ /* put our urb back in the request pool */
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval)
+ dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
+}
+
+static void cport_out_callback(struct urb *urb)
+{
+ struct gb_message *message = urb->context;
+ struct gb_host_device *hd = message->operation->connection->hd;
+ struct es2_ap_dev *es2 = hd_to_es2(hd);
+ int status = check_urb_status(urb);
+ unsigned long flags;
+
+ gb_message_cport_clear(message->header);
+
+ spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
+ message->hcpriv = NULL;
+ spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
+
+ /*
+ * Tell the submitter that the message send (attempt) is
+ * complete, and report the status.
+ */
+ greybus_message_sent(hd, message, status);
+
+ free_urb(es2, urb);
+}
+
+static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
+{
+ struct arpc *rpc;
+
+ if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX)
+ return NULL;
+
+ rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
+ if (!rpc)
+ return NULL;
+
+ INIT_LIST_HEAD(&rpc->list);
+ rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL);
+ if (!rpc->req)
+ goto err_free_rpc;
+
+ rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL);
+ if (!rpc->resp)
+ goto err_free_req;
+
+ rpc->req->type = type;
+ rpc->req->size = cpu_to_le16(sizeof(rpc->req) + size);
+ memcpy(rpc->req->data, payload, size);
+
+ init_completion(&rpc->response_received);
+
+ return rpc;
+
+err_free_req:
+ kfree(rpc->req);
+err_free_rpc:
+ kfree(rpc);
+
+ return NULL;
+}
+
+static void arpc_free(struct arpc *rpc)
+{
+ kfree(rpc->req);
+ kfree(rpc->resp);
+ kfree(rpc);
+}
+
+static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id)
+{
+ struct arpc *rpc;
+
+ list_for_each_entry(rpc, &es2->arpcs, list) {
+ if (rpc->req->id == id)
+ return rpc;
+ }
+
+ return NULL;
+}
+
+static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc)
+{
+ rpc->active = true;
+ rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++);
+ list_add_tail(&rpc->list, &es2->arpcs);
+}
+
+static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc)
+{
+ if (rpc->active) {
+ rpc->active = false;
+ list_del(&rpc->list);
+ }
+}
+
+static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout)
+{
+ struct usb_device *udev = es2->usb_dev;
+ int retval;
+
+ retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ GB_APB_REQUEST_ARPC_RUN,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ 0, 0,
+ rpc->req, le16_to_cpu(rpc->req->size),
+ ES2_USB_CTRL_TIMEOUT);
+ if (retval != le16_to_cpu(rpc->req->size)) {
+ dev_err(&udev->dev,
+ "failed to send ARPC request %d: %d\n",
+ rpc->req->type, retval);
+ if (retval > 0)
+ retval = -EIO;
+ return retval;
+ }
+
+ return 0;
+}
+
+static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
+ size_t size, int *result, unsigned int timeout)
+{
+ struct arpc *rpc;
+ unsigned long flags;
+ int retval;
+
+ if (result)
+ *result = 0;
+
+ rpc = arpc_alloc(payload, size, type);
+ if (!rpc)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&es2->arpc_lock, flags);
+ arpc_add(es2, rpc);
+ spin_unlock_irqrestore(&es2->arpc_lock, flags);
+
+ retval = arpc_send(es2, rpc, timeout);
+ if (retval)
+ goto out_arpc_del;
+
+ retval = wait_for_completion_interruptible_timeout(
+ &rpc->response_received,
+ msecs_to_jiffies(timeout));
+ if (retval <= 0) {
+ if (!retval)
+ retval = -ETIMEDOUT;
+ goto out_arpc_del;
+ }
+
+ if (rpc->resp->result) {
+ retval = -EREMOTEIO;
+ if (result)
+ *result = rpc->resp->result;
+ } else {
+ retval = 0;
+ }
+
+out_arpc_del:
+ spin_lock_irqsave(&es2->arpc_lock, flags);
+ arpc_del(es2, rpc);
+ spin_unlock_irqrestore(&es2->arpc_lock, flags);
+ arpc_free(rpc);
+
+ if (retval < 0 && retval != -EREMOTEIO) {
+ dev_err(&es2->usb_dev->dev,
+ "failed to execute ARPC: %d\n", retval);
+ }
+
+ return retval;
+}
+
+static void arpc_in_callback(struct urb *urb)
+{
+ struct es2_ap_dev *es2 = urb->context;
+ struct device *dev = &urb->dev->dev;
+ int status = check_urb_status(urb);
+ struct arpc *rpc;
+ struct arpc_response_message *resp;
+ unsigned long flags;
+ int retval;
+
+ if (status) {
+ if ((status == -EAGAIN) || (status == -EPROTO))
+ goto exit;
+
+ /* The urb is being unlinked */
+ if (status == -ENOENT || status == -ESHUTDOWN)
+ return;
+
+ dev_err(dev, "arpc in-urb error %d (dropped)\n", status);
+ return;
+ }
+
+ if (urb->actual_length < sizeof(*resp)) {
+ dev_err(dev, "short aprc response received\n");
+ goto exit;
+ }
+
+ resp = urb->transfer_buffer;
+ spin_lock_irqsave(&es2->arpc_lock, flags);
+ rpc = arpc_find(es2, resp->id);
+ if (!rpc) {
+ dev_err(dev, "invalid arpc response id received: %u\n",
+ le16_to_cpu(resp->id));
+ spin_unlock_irqrestore(&es2->arpc_lock, flags);
+ goto exit;
+ }
+
+ arpc_del(es2, rpc);
+ memcpy(rpc->resp, resp, sizeof(*resp));
+ complete(&rpc->response_received);
+ spin_unlock_irqrestore(&es2->arpc_lock, flags);
+
+exit:
+ /* put our urb back in the request pool */
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval)
+ dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval);
+}
+
+#define APB1_LOG_MSG_SIZE 64
+static void apb_log_get(struct es2_ap_dev *es2, char *buf)
+{
+ int retval;
+
+ do {
+ retval = usb_control_msg(es2->usb_dev,
+ usb_rcvctrlpipe(es2->usb_dev, 0),
+ GB_APB_REQUEST_LOG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 0x00, 0x00,
+ buf,
+ APB1_LOG_MSG_SIZE,
+ ES2_USB_CTRL_TIMEOUT);
+ if (retval > 0)
+ kfifo_in(&es2->apb_log_fifo, buf, retval);
+ } while (retval > 0);
+}
+
+static int apb_log_poll(void *data)
+{
+ struct es2_ap_dev *es2 = data;
+ char *buf;
+
+ buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while (!kthread_should_stop()) {
+ msleep(1000);
+ apb_log_get(es2, buf);
+ }
+
+ kfree(buf);
+
+ return 0;
+}
+
+static ssize_t apb_log_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct es2_ap_dev *es2 = f->f_inode->i_private;
+ ssize_t ret;
+ size_t copied;
+ char *tmp_buf;
+
+ if (count > APB1_LOG_SIZE)
+ count = APB1_LOG_SIZE;
+
+ tmp_buf = kmalloc(count, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
+ ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
+
+ kfree(tmp_buf);
+
+ return ret;
+}
+
+static const struct file_operations apb_log_fops = {
+ .read = apb_log_read,
+};
+
+static void usb_log_enable(struct es2_ap_dev *es2)
+{
+ if (!IS_ERR_OR_NULL(es2->apb_log_task))
+ return;
+
+ /* get log from APB1 */
+ es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
+ if (IS_ERR(es2->apb_log_task))
+ return;
+ /* XXX We will need to rename this per APB */
+ es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
+ gb_debugfs_get(), es2,
+ &apb_log_fops);
+}
+
+static void usb_log_disable(struct es2_ap_dev *es2)
+{
+ if (IS_ERR_OR_NULL(es2->apb_log_task))
+ return;
+
+ debugfs_remove(es2->apb_log_dentry);
+ es2->apb_log_dentry = NULL;
+
+ kthread_stop(es2->apb_log_task);
+ es2->apb_log_task = NULL;
+}
+
+static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct es2_ap_dev *es2 = f->f_inode->i_private;
+ int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
+ char tmp_buf[3];
+
+ sprintf(tmp_buf, "%d\n", enable);
+ return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
+}
+
+static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int enable;
+ ssize_t retval;
+ struct es2_ap_dev *es2 = f->f_inode->i_private;
+
+ retval = kstrtoint_from_user(buf, count, 10, &enable);
+ if (retval)
+ return retval;
+
+ if (enable)
+ usb_log_enable(es2);
+ else
+ usb_log_disable(es2);
+
+ return count;
+}
+
+static const struct file_operations apb_log_enable_fops = {
+ .read = apb_log_enable_read,
+ .write = apb_log_enable_write,
+};
+
+static int apb_get_cport_count(struct usb_device *udev)
+{
+ int retval;
+ __le16 *cport_count;
+
+ cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
+ if (!cport_count)
+ return -ENOMEM;
+
+ retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ GB_APB_REQUEST_CPORT_COUNT,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE, 0, 0, cport_count,
+ sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT);
+ if (retval != sizeof(*cport_count)) {
+ dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
+ retval);
+
+ if (retval >= 0)
+ retval = -EIO;
+
+ goto out;
+ }
+
+ retval = le16_to_cpu(*cport_count);
+
+ /* We need to fit a CPort ID in one byte of a message header */
+ if (retval > U8_MAX) {
+ retval = U8_MAX;
+ dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
+ }
+
+out:
+ kfree(cport_count);
+ return retval;
+}
+
+/*
+ * The ES2 USB Bridge device has 15 endpoints
+ * 1 Control - usual USB stuff + AP -> APBridgeA messages
+ * 7 Bulk IN - CPort data in
+ * 7 Bulk OUT - CPort data out
+ */
+static int ap_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct es2_ap_dev *es2;
+ struct gb_host_device *hd;
+ struct usb_device *udev;
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ __u8 ep_addr;
+ int retval;
+ int i;
+ int num_cports;
+ bool bulk_out_found = false;
+ bool bulk_in_found = false;
+ bool arpc_in_found = false;
+
+ udev = usb_get_dev(interface_to_usbdev(interface));
+
+ num_cports = apb_get_cport_count(udev);
+ if (num_cports < 0) {
+ usb_put_dev(udev);
+ dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
+ num_cports);
+ return num_cports;
+ }
+
+ hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
+ num_cports);
+ if (IS_ERR(hd)) {
+ usb_put_dev(udev);
+ return PTR_ERR(hd);
+ }
+
+ es2 = hd_to_es2(hd);
+ es2->hd = hd;
+ es2->usb_intf = interface;
+ es2->usb_dev = udev;
+ spin_lock_init(&es2->cport_out_urb_lock);
+ INIT_KFIFO(es2->apb_log_fifo);
+ usb_set_intfdata(interface, es2);
+
+ /*
+ * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
+ * dynamically.
+ */
+ retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
+ if (retval)
+ goto error;
+ retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
+ if (retval)
+ goto error;
+
+ /* find all bulk endpoints */
+ iface_desc = interface->cur_altsetting;
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+ ep_addr = endpoint->bEndpointAddress;
+
+ if (usb_endpoint_is_bulk_in(endpoint)) {
+ if (!bulk_in_found) {
+ es2->cport_in.endpoint = ep_addr;
+ bulk_in_found = true;
+ } else if (!arpc_in_found) {
+ es2->arpc_endpoint_in = ep_addr;
+ arpc_in_found = true;
+ } else {
+ dev_warn(&udev->dev,
+ "Unused bulk IN endpoint found: 0x%02x\n",
+ ep_addr);
+ }
+ continue;
+ }
+ if (usb_endpoint_is_bulk_out(endpoint)) {
+ if (!bulk_out_found) {
+ es2->cport_out_endpoint = ep_addr;
+ bulk_out_found = true;
+ } else {
+ dev_warn(&udev->dev,
+ "Unused bulk OUT endpoint found: 0x%02x\n",
+ ep_addr);
+ }
+ continue;
+ }
+ dev_warn(&udev->dev,
+ "Unknown endpoint type found, address 0x%02x\n",
+ ep_addr);
+ }
+ if (!bulk_in_found || !arpc_in_found || !bulk_out_found) {
+ dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
+ /* Allocate buffers for our cport in messages */
+ for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
+ struct urb *urb;
+ u8 *buffer;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ retval = -ENOMEM;
+ goto error;
+ }
+ es2->cport_in.urb[i] = urb;
+
+ buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
+ if (!buffer) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ usb_fill_bulk_urb(urb, udev,
+ usb_rcvbulkpipe(udev, es2->cport_in.endpoint),
+ buffer, ES2_GBUF_MSG_SIZE_MAX,
+ cport_in_callback, hd);
+
+ es2->cport_in.buffer[i] = buffer;
+ }
+
+ /* Allocate buffers for ARPC in messages */
+ for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
+ struct urb *urb;
+ u8 *buffer;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ retval = -ENOMEM;
+ goto error;
+ }
+ es2->arpc_urb[i] = urb;
+
+ buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL);
+ if (!buffer) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ usb_fill_bulk_urb(urb, udev,
+ usb_rcvbulkpipe(udev,
+ es2->arpc_endpoint_in),
+ buffer, ARPC_IN_SIZE_MAX,
+ arpc_in_callback, es2);
+
+ es2->arpc_buffer[i] = buffer;
+ }
+
+ /* Allocate urbs for our CPort OUT messages */
+ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+ struct urb *urb;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ es2->cport_out_urb[i] = urb;
+ es2->cport_out_urb_busy[i] = false; /* just to be anal */
+ }
+
+ /* XXX We will need to rename this per APB */
+ es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
+ (S_IWUSR | S_IRUGO),
+ gb_debugfs_get(), es2,
+ &apb_log_enable_fops);
+
+ INIT_LIST_HEAD(&es2->arpcs);
+ spin_lock_init(&es2->arpc_lock);
+
+ if (es2_arpc_in_enable(es2))
+ goto error;
+
+ retval = gb_hd_add(hd);
+ if (retval)
+ goto err_disable_arpc_in;
+
+ retval = es2_cport_in_enable(es2, &es2->cport_in);
+ if (retval)
+ goto err_hd_del;
+
+ return 0;
+
+err_hd_del:
+ gb_hd_del(hd);
+err_disable_arpc_in:
+ es2_arpc_in_disable(es2);
+error:
+ es2_destroy(es2);
+
+ return retval;
+}
+
+static void ap_disconnect(struct usb_interface *interface)
+{
+ struct es2_ap_dev *es2 = usb_get_intfdata(interface);
+
+ gb_hd_del(es2->hd);
+
+ es2_cport_in_disable(es2, &es2->cport_in);
+ es2_arpc_in_disable(es2);
+
+ es2_destroy(es2);
+}
+
+static struct usb_driver es2_ap_driver = {
+ .name = "es2_ap_driver",
+ .probe = ap_probe,
+ .disconnect = ap_disconnect,
+ .id_table = id_table,
+ .soft_unbind = 1,
+};
+
+module_usb_driver(es2_ap_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
diff --git a/drivers/staging/greybus/firmware.h b/drivers/staging/greybus/firmware.h
new file mode 100644
index 000000000000..f4f0db1cefe8
--- /dev/null
+++ b/drivers/staging/greybus/firmware.h
@@ -0,0 +1,42 @@
+/*
+ * Greybus Firmware Management Header
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __FIRMWARE_H
+#define __FIRMWARE_H
+
+#include "greybus.h"
+
+#define FW_NAME_PREFIX "gmp_"
+
+/*
+ * Length of the string in format: "FW_NAME_PREFIX""%08x_%08x_%08x_%08x_%s.tftf"
+ * (3 + 1 + 4 * (8 + 1) + 10 + 1 + 4 + 1)
+ */
+#define FW_NAME_SIZE 56
+
+/* Firmware Management Protocol specific functions */
+int fw_mgmt_init(void);
+void fw_mgmt_exit(void);
+struct gb_connection *to_fw_mgmt_connection(struct device *dev);
+int gb_fw_mgmt_request_handler(struct gb_operation *op);
+int gb_fw_mgmt_connection_init(struct gb_connection *connection);
+void gb_fw_mgmt_connection_exit(struct gb_connection *connection);
+
+/* Firmware Download Protocol specific functions */
+int gb_fw_download_request_handler(struct gb_operation *op);
+int gb_fw_download_connection_init(struct gb_connection *connection);
+void gb_fw_download_connection_exit(struct gb_connection *connection);
+
+/* CAP Protocol specific functions */
+int cap_init(void);
+void cap_exit(void);
+int gb_cap_connection_init(struct gb_connection *connection);
+void gb_cap_connection_exit(struct gb_connection *connection);
+
+#endif /* __FIRMWARE_H */
diff --git a/drivers/staging/greybus/fw-core.c b/drivers/staging/greybus/fw-core.c
new file mode 100644
index 000000000000..454a98957ba5
--- /dev/null
+++ b/drivers/staging/greybus/fw-core.c
@@ -0,0 +1,312 @@
+/*
+ * Greybus Firmware Core Bundle Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/firmware.h>
+#include "firmware.h"
+#include "greybus.h"
+#include "spilib.h"
+
+struct gb_fw_core {
+ struct gb_connection *download_connection;
+ struct gb_connection *mgmt_connection;
+ struct gb_connection *spi_connection;
+ struct gb_connection *cap_connection;
+};
+
+static struct spilib_ops *spilib_ops;
+
+struct gb_connection *to_fw_mgmt_connection(struct device *dev)
+{
+ struct gb_fw_core *fw_core = dev_get_drvdata(dev);
+
+ return fw_core->mgmt_connection;
+}
+
+static int gb_fw_spi_connection_init(struct gb_connection *connection)
+{
+ int ret;
+
+ if (!connection)
+ return 0;
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ return ret;
+
+ ret = gb_spilib_master_init(connection, &connection->bundle->dev,
+ spilib_ops);
+ if (ret) {
+ gb_connection_disable(connection);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gb_fw_spi_connection_exit(struct gb_connection *connection)
+{
+ if (!connection)
+ return;
+
+ gb_spilib_master_exit(connection);
+ gb_connection_disable(connection);
+}
+
+static int gb_fw_core_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_fw_core *fw_core;
+ int ret, i;
+ u16 cport_id;
+ u8 protocol_id;
+
+ fw_core = kzalloc(sizeof(*fw_core), GFP_KERNEL);
+ if (!fw_core)
+ return -ENOMEM;
+
+ /* Parse CPorts and create connections */
+ for (i = 0; i < bundle->num_cports; i++) {
+ cport_desc = &bundle->cport_desc[i];
+ cport_id = le16_to_cpu(cport_desc->id);
+ protocol_id = cport_desc->protocol_id;
+
+ switch (protocol_id) {
+ case GREYBUS_PROTOCOL_FW_MANAGEMENT:
+ /* Disallow multiple Firmware Management CPorts */
+ if (fw_core->mgmt_connection) {
+ dev_err(&bundle->dev,
+ "multiple management CPorts found\n");
+ ret = -EINVAL;
+ goto err_destroy_connections;
+ }
+
+ connection = gb_connection_create(bundle, cport_id,
+ gb_fw_mgmt_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ dev_err(&bundle->dev,
+ "failed to create management connection (%d)\n",
+ ret);
+ goto err_destroy_connections;
+ }
+
+ fw_core->mgmt_connection = connection;
+ break;
+ case GREYBUS_PROTOCOL_FW_DOWNLOAD:
+ /* Disallow multiple Firmware Download CPorts */
+ if (fw_core->download_connection) {
+ dev_err(&bundle->dev,
+ "multiple download CPorts found\n");
+ ret = -EINVAL;
+ goto err_destroy_connections;
+ }
+
+ connection = gb_connection_create(bundle, cport_id,
+ gb_fw_download_request_handler);
+ if (IS_ERR(connection)) {
+ dev_err(&bundle->dev, "failed to create download connection (%ld)\n",
+ PTR_ERR(connection));
+ } else {
+ fw_core->download_connection = connection;
+ }
+
+ break;
+ case GREYBUS_PROTOCOL_SPI:
+ /* Disallow multiple SPI CPorts */
+ if (fw_core->spi_connection) {
+ dev_err(&bundle->dev,
+ "multiple SPI CPorts found\n");
+ ret = -EINVAL;
+ goto err_destroy_connections;
+ }
+
+ connection = gb_connection_create(bundle, cport_id,
+ NULL);
+ if (IS_ERR(connection)) {
+ dev_err(&bundle->dev, "failed to create SPI connection (%ld)\n",
+ PTR_ERR(connection));
+ } else {
+ fw_core->spi_connection = connection;
+ }
+
+ break;
+ case GREYBUS_PROTOCOL_AUTHENTICATION:
+ /* Disallow multiple CAP CPorts */
+ if (fw_core->cap_connection) {
+ dev_err(&bundle->dev, "multiple Authentication CPorts found\n");
+ ret = -EINVAL;
+ goto err_destroy_connections;
+ }
+
+ connection = gb_connection_create(bundle, cport_id,
+ NULL);
+ if (IS_ERR(connection)) {
+ dev_err(&bundle->dev, "failed to create Authentication connection (%ld)\n",
+ PTR_ERR(connection));
+ } else {
+ fw_core->cap_connection = connection;
+ }
+
+ break;
+ default:
+ dev_err(&bundle->dev, "invalid protocol id (0x%02x)\n",
+ protocol_id);
+ ret = -EINVAL;
+ goto err_destroy_connections;
+ }
+ }
+
+ /* Firmware Management connection is mandatory */
+ if (!fw_core->mgmt_connection) {
+ dev_err(&bundle->dev, "missing management connection\n");
+ ret = -ENODEV;
+ goto err_destroy_connections;
+ }
+
+ ret = gb_fw_download_connection_init(fw_core->download_connection);
+ if (ret) {
+ /* We may still be able to work with the Interface */
+ dev_err(&bundle->dev, "failed to initialize firmware download connection, disable it (%d)\n",
+ ret);
+ gb_connection_destroy(fw_core->download_connection);
+ fw_core->download_connection = NULL;
+ }
+
+ ret = gb_fw_spi_connection_init(fw_core->spi_connection);
+ if (ret) {
+ /* We may still be able to work with the Interface */
+ dev_err(&bundle->dev, "failed to initialize SPI connection, disable it (%d)\n",
+ ret);
+ gb_connection_destroy(fw_core->spi_connection);
+ fw_core->spi_connection = NULL;
+ }
+
+ ret = gb_cap_connection_init(fw_core->cap_connection);
+ if (ret) {
+ /* We may still be able to work with the Interface */
+ dev_err(&bundle->dev, "failed to initialize CAP connection, disable it (%d)\n",
+ ret);
+ gb_connection_destroy(fw_core->cap_connection);
+ fw_core->cap_connection = NULL;
+ }
+
+ ret = gb_fw_mgmt_connection_init(fw_core->mgmt_connection);
+ if (ret) {
+ /* We may still be able to work with the Interface */
+ dev_err(&bundle->dev, "failed to initialize firmware management connection, disable it (%d)\n",
+ ret);
+ goto err_exit_connections;
+ }
+
+ greybus_set_drvdata(bundle, fw_core);
+
+ /* FIXME: Remove this after S2 Loader gets runtime PM support */
+ if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM))
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+err_exit_connections:
+ gb_cap_connection_exit(fw_core->cap_connection);
+ gb_fw_spi_connection_exit(fw_core->spi_connection);
+ gb_fw_download_connection_exit(fw_core->download_connection);
+err_destroy_connections:
+ gb_connection_destroy(fw_core->mgmt_connection);
+ gb_connection_destroy(fw_core->cap_connection);
+ gb_connection_destroy(fw_core->spi_connection);
+ gb_connection_destroy(fw_core->download_connection);
+ kfree(fw_core);
+
+ return ret;
+}
+
+static void gb_fw_core_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_fw_core *fw_core = greybus_get_drvdata(bundle);
+ int ret;
+
+ /* FIXME: Remove this after S2 Loader gets runtime PM support */
+ if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM)) {
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ gb_pm_runtime_get_noresume(bundle);
+ }
+
+ gb_fw_mgmt_connection_exit(fw_core->mgmt_connection);
+ gb_cap_connection_exit(fw_core->cap_connection);
+ gb_fw_spi_connection_exit(fw_core->spi_connection);
+ gb_fw_download_connection_exit(fw_core->download_connection);
+
+ gb_connection_destroy(fw_core->mgmt_connection);
+ gb_connection_destroy(fw_core->cap_connection);
+ gb_connection_destroy(fw_core->spi_connection);
+ gb_connection_destroy(fw_core->download_connection);
+
+ kfree(fw_core);
+}
+
+static const struct greybus_bundle_id gb_fw_core_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_FW_MANAGEMENT) },
+ { }
+};
+
+static struct greybus_driver gb_fw_core_driver = {
+ .name = "gb-firmware",
+ .probe = gb_fw_core_probe,
+ .disconnect = gb_fw_core_disconnect,
+ .id_table = gb_fw_core_id_table,
+};
+
+static int fw_core_init(void)
+{
+ int ret;
+
+ ret = fw_mgmt_init();
+ if (ret) {
+ pr_err("Failed to initialize fw-mgmt core (%d)\n", ret);
+ return ret;
+ }
+
+ ret = cap_init();
+ if (ret) {
+ pr_err("Failed to initialize component authentication core (%d)\n",
+ ret);
+ goto fw_mgmt_exit;
+ }
+
+ ret = greybus_register(&gb_fw_core_driver);
+ if (ret)
+ goto cap_exit;
+
+ return 0;
+
+cap_exit:
+ cap_exit();
+fw_mgmt_exit:
+ fw_mgmt_exit();
+
+ return ret;
+}
+module_init(fw_core_init);
+
+static void __exit fw_core_exit(void)
+{
+ greybus_deregister(&gb_fw_core_driver);
+ cap_exit();
+ fw_mgmt_exit();
+}
+module_exit(fw_core_exit);
+
+MODULE_ALIAS("greybus:firmware");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_DESCRIPTION("Greybus Firmware Bundle Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/fw-download.c b/drivers/staging/greybus/fw-download.c
new file mode 100644
index 000000000000..2d7246887547
--- /dev/null
+++ b/drivers/staging/greybus/fw-download.c
@@ -0,0 +1,465 @@
+/*
+ * Greybus Firmware Download Protocol Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include "firmware.h"
+#include "greybus.h"
+
+/* Estimated minimum buffer size, actual size can be smaller than this */
+#define MIN_FETCH_SIZE 512
+/* Timeout, in jiffies, within which fetch or release firmware must be called */
+#define NEXT_REQ_TIMEOUT_J msecs_to_jiffies(1000)
+
+struct fw_request {
+ u8 firmware_id;
+ bool disabled;
+ bool timedout;
+ char name[FW_NAME_SIZE];
+ const struct firmware *fw;
+ struct list_head node;
+
+ struct delayed_work dwork;
+ /* Timeout, in jiffies, within which the firmware shall download */
+ unsigned long release_timeout_j;
+ struct kref kref;
+ struct fw_download *fw_download;
+};
+
+struct fw_download {
+ struct device *parent;
+ struct gb_connection *connection;
+ struct list_head fw_requests;
+ struct ida id_map;
+ struct mutex mutex;
+};
+
+static void fw_req_release(struct kref *kref)
+{
+ struct fw_request *fw_req = container_of(kref, struct fw_request, kref);
+
+ dev_dbg(fw_req->fw_download->parent, "firmware %s released\n",
+ fw_req->name);
+
+ release_firmware(fw_req->fw);
+
+ /*
+ * The request timed out and the module may send a fetch-fw or
+ * release-fw request later. Lets block the id we allocated for this
+ * request, so that the AP doesn't refer to a later fw-request (with
+ * same firmware_id) for the old timedout fw-request.
+ *
+ * NOTE:
+ *
+ * This also means that after 255 timeouts we will fail to service new
+ * firmware downloads. But what else can we do in that case anyway? Lets
+ * just hope that it never happens.
+ */
+ if (!fw_req->timedout)
+ ida_simple_remove(&fw_req->fw_download->id_map,
+ fw_req->firmware_id);
+
+ kfree(fw_req);
+}
+
+/*
+ * Incoming requests are serialized for a connection, and the only race possible
+ * is between the timeout handler freeing this and an incoming request.
+ *
+ * The operations on the fw-request list are protected by the mutex and
+ * get_fw_req() increments the reference count before returning a fw_req pointer
+ * to the users.
+ *
+ * free_firmware() also takes the mutex while removing an entry from the list,
+ * it guarantees that every user of fw_req has taken a kref-reference by now and
+ * we wouldn't have any new users.
+ *
+ * Once the last user drops the reference, the fw_req structure is freed.
+ */
+static void put_fw_req(struct fw_request *fw_req)
+{
+ kref_put(&fw_req->kref, fw_req_release);
+}
+
+/* Caller must call put_fw_req() after using struct fw_request */
+static struct fw_request *get_fw_req(struct fw_download *fw_download,
+ u8 firmware_id)
+{
+ struct fw_request *fw_req;
+
+ mutex_lock(&fw_download->mutex);
+
+ list_for_each_entry(fw_req, &fw_download->fw_requests, node) {
+ if (fw_req->firmware_id == firmware_id) {
+ kref_get(&fw_req->kref);
+ goto unlock;
+ }
+ }
+
+ fw_req = NULL;
+
+unlock:
+ mutex_unlock(&fw_download->mutex);
+
+ return fw_req;
+}
+
+static void free_firmware(struct fw_download *fw_download,
+ struct fw_request *fw_req)
+{
+ /* Already disabled from timeout handlers */
+ if (fw_req->disabled)
+ return;
+
+ mutex_lock(&fw_download->mutex);
+ list_del(&fw_req->node);
+ mutex_unlock(&fw_download->mutex);
+
+ fw_req->disabled = true;
+ put_fw_req(fw_req);
+}
+
+static void fw_request_timedout(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct fw_request *fw_req = container_of(dwork, struct fw_request, dwork);
+ struct fw_download *fw_download = fw_req->fw_download;
+
+ dev_err(fw_download->parent,
+ "Timed out waiting for fetch / release firmware requests: %u\n",
+ fw_req->firmware_id);
+
+ fw_req->timedout = true;
+ free_firmware(fw_download, fw_req);
+}
+
+static int exceeds_release_timeout(struct fw_request *fw_req)
+{
+ struct fw_download *fw_download = fw_req->fw_download;
+
+ if (time_before(jiffies, fw_req->release_timeout_j))
+ return 0;
+
+ dev_err(fw_download->parent,
+ "Firmware download didn't finish in time, abort: %d\n",
+ fw_req->firmware_id);
+
+ fw_req->timedout = true;
+ free_firmware(fw_download, fw_req);
+
+ return -ETIMEDOUT;
+}
+
+/* This returns path of the firmware blob on the disk */
+static struct fw_request *find_firmware(struct fw_download *fw_download,
+ const char *tag)
+{
+ struct gb_interface *intf = fw_download->connection->bundle->intf;
+ struct fw_request *fw_req;
+ int ret, req_count;
+
+ fw_req = kzalloc(sizeof(*fw_req), GFP_KERNEL);
+ if (!fw_req)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
+ ret = ida_simple_get(&fw_download->id_map, 1, 256, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(fw_download->parent,
+ "failed to allocate firmware id (%d)\n", ret);
+ goto err_free_req;
+ }
+ fw_req->firmware_id = ret;
+
+ snprintf(fw_req->name, sizeof(fw_req->name),
+ FW_NAME_PREFIX "%08x_%08x_%08x_%08x_%s.tftf",
+ intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
+ intf->vendor_id, intf->product_id, tag);
+
+ dev_info(fw_download->parent, "Requested firmware package '%s'\n",
+ fw_req->name);
+
+ ret = request_firmware(&fw_req->fw, fw_req->name, fw_download->parent);
+ if (ret) {
+ dev_err(fw_download->parent,
+ "firmware request failed for %s (%d)\n", fw_req->name,
+ ret);
+ goto err_free_id;
+ }
+
+ fw_req->fw_download = fw_download;
+ kref_init(&fw_req->kref);
+
+ mutex_lock(&fw_download->mutex);
+ list_add(&fw_req->node, &fw_download->fw_requests);
+ mutex_unlock(&fw_download->mutex);
+
+ /* Timeout, in jiffies, within which firmware should get loaded */
+ req_count = DIV_ROUND_UP(fw_req->fw->size, MIN_FETCH_SIZE);
+ fw_req->release_timeout_j = jiffies + req_count * NEXT_REQ_TIMEOUT_J;
+
+ INIT_DELAYED_WORK(&fw_req->dwork, fw_request_timedout);
+ schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
+
+ return fw_req;
+
+err_free_id:
+ ida_simple_remove(&fw_download->id_map, fw_req->firmware_id);
+err_free_req:
+ kfree(fw_req);
+
+ return ERR_PTR(ret);
+}
+
+static int fw_download_find_firmware(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct fw_download *fw_download = gb_connection_get_data(connection);
+ struct gb_fw_download_find_firmware_request *request;
+ struct gb_fw_download_find_firmware_response *response;
+ struct fw_request *fw_req;
+ const char *tag;
+
+ if (op->request->payload_size != sizeof(*request)) {
+ dev_err(fw_download->parent,
+ "illegal size of find firmware request (%zu != %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+ tag = (const char *)request->firmware_tag;
+
+ /* firmware_tag must be null-terminated */
+ if (strnlen(tag, GB_FIRMWARE_TAG_MAX_SIZE) == GB_FIRMWARE_TAG_MAX_SIZE) {
+ dev_err(fw_download->parent,
+ "firmware-tag is not null-terminated\n");
+ return -EINVAL;
+ }
+
+ fw_req = find_firmware(fw_download, tag);
+ if (IS_ERR(fw_req))
+ return PTR_ERR(fw_req);
+
+ if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) {
+ dev_err(fw_download->parent, "error allocating response\n");
+ free_firmware(fw_download, fw_req);
+ return -ENOMEM;
+ }
+
+ response = op->response->payload;
+ response->firmware_id = fw_req->firmware_id;
+ response->size = cpu_to_le32(fw_req->fw->size);
+
+ dev_dbg(fw_download->parent,
+ "firmware size is %zu bytes\n", fw_req->fw->size);
+
+ return 0;
+}
+
+static int fw_download_fetch_firmware(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct fw_download *fw_download = gb_connection_get_data(connection);
+ struct gb_fw_download_fetch_firmware_request *request;
+ struct gb_fw_download_fetch_firmware_response *response;
+ struct fw_request *fw_req;
+ const struct firmware *fw;
+ unsigned int offset, size;
+ u8 firmware_id;
+ int ret = 0;
+
+ if (op->request->payload_size != sizeof(*request)) {
+ dev_err(fw_download->parent,
+ "Illegal size of fetch firmware request (%zu %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+ offset = le32_to_cpu(request->offset);
+ size = le32_to_cpu(request->size);
+ firmware_id = request->firmware_id;
+
+ fw_req = get_fw_req(fw_download, firmware_id);
+ if (!fw_req) {
+ dev_err(fw_download->parent,
+ "firmware not available for id: %02u\n", firmware_id);
+ return -EINVAL;
+ }
+
+ /* Make sure work handler isn't running in parallel */
+ cancel_delayed_work_sync(&fw_req->dwork);
+
+ /* We timed-out before reaching here ? */
+ if (fw_req->disabled) {
+ ret = -ETIMEDOUT;
+ goto put_fw;
+ }
+
+ /*
+ * Firmware download must finish within a limited time interval. If it
+ * doesn't, then we might have a buggy Module on the other side. Abort
+ * download.
+ */
+ ret = exceeds_release_timeout(fw_req);
+ if (ret)
+ goto put_fw;
+
+ fw = fw_req->fw;
+
+ if (offset >= fw->size || size > fw->size - offset) {
+ dev_err(fw_download->parent,
+ "bad fetch firmware request (offs = %u, size = %u)\n",
+ offset, size);
+ ret = -EINVAL;
+ goto put_fw;
+ }
+
+ if (!gb_operation_response_alloc(op, sizeof(*response) + size,
+ GFP_KERNEL)) {
+ dev_err(fw_download->parent,
+ "error allocating fetch firmware response\n");
+ ret = -ENOMEM;
+ goto put_fw;
+ }
+
+ response = op->response->payload;
+ memcpy(response->data, fw->data + offset, size);
+
+ dev_dbg(fw_download->parent,
+ "responding with firmware (offs = %u, size = %u)\n", offset,
+ size);
+
+ /* Refresh timeout */
+ schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
+
+put_fw:
+ put_fw_req(fw_req);
+
+ return ret;
+}
+
+static int fw_download_release_firmware(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct fw_download *fw_download = gb_connection_get_data(connection);
+ struct gb_fw_download_release_firmware_request *request;
+ struct fw_request *fw_req;
+ u8 firmware_id;
+
+ if (op->request->payload_size != sizeof(*request)) {
+ dev_err(fw_download->parent,
+ "Illegal size of release firmware request (%zu %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+ firmware_id = request->firmware_id;
+
+ fw_req = get_fw_req(fw_download, firmware_id);
+ if (!fw_req) {
+ dev_err(fw_download->parent,
+ "firmware not available for id: %02u\n", firmware_id);
+ return -EINVAL;
+ }
+
+ cancel_delayed_work_sync(&fw_req->dwork);
+
+ free_firmware(fw_download, fw_req);
+ put_fw_req(fw_req);
+
+ dev_dbg(fw_download->parent, "release firmware\n");
+
+ return 0;
+}
+
+int gb_fw_download_request_handler(struct gb_operation *op)
+{
+ u8 type = op->type;
+
+ switch (type) {
+ case GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE:
+ return fw_download_find_firmware(op);
+ case GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE:
+ return fw_download_fetch_firmware(op);
+ case GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE:
+ return fw_download_release_firmware(op);
+ default:
+ dev_err(&op->connection->bundle->dev,
+ "unsupported request: %u\n", type);
+ return -EINVAL;
+ }
+}
+
+int gb_fw_download_connection_init(struct gb_connection *connection)
+{
+ struct fw_download *fw_download;
+ int ret;
+
+ if (!connection)
+ return 0;
+
+ fw_download = kzalloc(sizeof(*fw_download), GFP_KERNEL);
+ if (!fw_download)
+ return -ENOMEM;
+
+ fw_download->parent = &connection->bundle->dev;
+ INIT_LIST_HEAD(&fw_download->fw_requests);
+ ida_init(&fw_download->id_map);
+ gb_connection_set_data(connection, fw_download);
+ fw_download->connection = connection;
+ mutex_init(&fw_download->mutex);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto err_destroy_id_map;
+
+ return 0;
+
+err_destroy_id_map:
+ ida_destroy(&fw_download->id_map);
+ kfree(fw_download);
+
+ return ret;
+}
+
+void gb_fw_download_connection_exit(struct gb_connection *connection)
+{
+ struct fw_download *fw_download;
+ struct fw_request *fw_req, *tmp;
+
+ if (!connection)
+ return;
+
+ fw_download = gb_connection_get_data(connection);
+ gb_connection_disable(fw_download->connection);
+
+ /*
+ * Make sure we have a reference to the pending requests, before they
+ * are freed from the timeout handler.
+ */
+ mutex_lock(&fw_download->mutex);
+ list_for_each_entry(fw_req, &fw_download->fw_requests, node)
+ kref_get(&fw_req->kref);
+ mutex_unlock(&fw_download->mutex);
+
+ /* Release pending firmware packages */
+ list_for_each_entry_safe(fw_req, tmp, &fw_download->fw_requests, node) {
+ cancel_delayed_work_sync(&fw_req->dwork);
+ free_firmware(fw_download, fw_req);
+ put_fw_req(fw_req);
+ }
+
+ ida_destroy(&fw_download->id_map);
+ kfree(fw_download);
+}
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
new file mode 100644
index 000000000000..3cd6cf0a656b
--- /dev/null
+++ b/drivers/staging/greybus/fw-management.c
@@ -0,0 +1,721 @@
+/*
+ * Greybus Firmware Management Protocol Driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+
+#include "firmware.h"
+#include "greybus_firmware.h"
+#include "greybus.h"
+
+#define FW_MGMT_TIMEOUT_MS 1000
+
+struct fw_mgmt {
+ struct device *parent;
+ struct gb_connection *connection;
+ struct kref kref;
+ struct list_head node;
+
+ /* Common id-map for interface and backend firmware requests */
+ struct ida id_map;
+ struct mutex mutex;
+ struct completion completion;
+ struct cdev cdev;
+ struct device *class_device;
+ dev_t dev_num;
+ unsigned int timeout_jiffies;
+ bool disabled; /* connection getting disabled */
+
+ /* Interface Firmware specific fields */
+ bool mode_switch_started;
+ bool intf_fw_loaded;
+ u8 intf_fw_request_id;
+ u8 intf_fw_status;
+ u16 intf_fw_major;
+ u16 intf_fw_minor;
+
+ /* Backend Firmware specific fields */
+ u8 backend_fw_request_id;
+ u8 backend_fw_status;
+};
+
+/*
+ * Number of minor devices this driver supports.
+ * There will be exactly one required per Interface.
+ */
+#define NUM_MINORS U8_MAX
+
+static struct class *fw_mgmt_class;
+static dev_t fw_mgmt_dev_num;
+static DEFINE_IDA(fw_mgmt_minors_map);
+static LIST_HEAD(fw_mgmt_list);
+static DEFINE_MUTEX(list_mutex);
+
+static void fw_mgmt_kref_release(struct kref *kref)
+{
+ struct fw_mgmt *fw_mgmt = container_of(kref, struct fw_mgmt, kref);
+
+ ida_destroy(&fw_mgmt->id_map);
+ kfree(fw_mgmt);
+}
+
+/*
+ * All users of fw_mgmt take a reference (from within list_mutex lock), before
+ * they get a pointer to play with. And the structure will be freed only after
+ * the last user has put the reference to it.
+ */
+static void put_fw_mgmt(struct fw_mgmt *fw_mgmt)
+{
+ kref_put(&fw_mgmt->kref, fw_mgmt_kref_release);
+}
+
+/* Caller must call put_fw_mgmt() after using struct fw_mgmt */
+static struct fw_mgmt *get_fw_mgmt(struct cdev *cdev)
+{
+ struct fw_mgmt *fw_mgmt;
+
+ mutex_lock(&list_mutex);
+
+ list_for_each_entry(fw_mgmt, &fw_mgmt_list, node) {
+ if (&fw_mgmt->cdev == cdev) {
+ kref_get(&fw_mgmt->kref);
+ goto unlock;
+ }
+ }
+
+ fw_mgmt = NULL;
+
+unlock:
+ mutex_unlock(&list_mutex);
+
+ return fw_mgmt;
+}
+
+static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt,
+ struct fw_mgmt_ioc_get_intf_version *fw_info)
+{
+ struct gb_connection *connection = fw_mgmt->connection;
+ struct gb_fw_mgmt_interface_fw_version_response response;
+ int ret;
+
+ ret = gb_operation_sync(connection,
+ GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION, NULL, 0,
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(fw_mgmt->parent,
+ "failed to get interface firmware version (%d)\n", ret);
+ return ret;
+ }
+
+ fw_info->major = le16_to_cpu(response.major);
+ fw_info->minor = le16_to_cpu(response.minor);
+
+ strncpy(fw_info->firmware_tag, response.firmware_tag,
+ GB_FIRMWARE_TAG_MAX_SIZE);
+
+ /*
+ * The firmware-tag should be NULL terminated, otherwise throw error but
+ * don't fail.
+ */
+ if (fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
+ dev_err(fw_mgmt->parent,
+ "fw-version: firmware-tag is not NULL terminated\n");
+ fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] = '\0';
+ }
+
+ return 0;
+}
+
+static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
+ u8 load_method, const char *tag)
+{
+ struct gb_fw_mgmt_load_and_validate_fw_request request;
+ int ret;
+
+ if (load_method != GB_FW_LOAD_METHOD_UNIPRO &&
+ load_method != GB_FW_LOAD_METHOD_INTERNAL) {
+ dev_err(fw_mgmt->parent,
+ "invalid load-method (%d)\n", load_method);
+ return -EINVAL;
+ }
+
+ request.load_method = load_method;
+ strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
+
+ /*
+ * The firmware-tag should be NULL terminated, otherwise throw error and
+ * fail.
+ */
+ if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
+ dev_err(fw_mgmt->parent, "load-and-validate: firmware-tag is not NULL terminated\n");
+ return -EINVAL;
+ }
+
+ /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
+ ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
+ ret);
+ return ret;
+ }
+
+ fw_mgmt->intf_fw_request_id = ret;
+ fw_mgmt->intf_fw_loaded = false;
+ request.request_id = ret;
+
+ ret = gb_operation_sync(fw_mgmt->connection,
+ GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, &request,
+ sizeof(request), NULL, 0);
+ if (ret) {
+ ida_simple_remove(&fw_mgmt->id_map,
+ fw_mgmt->intf_fw_request_id);
+ fw_mgmt->intf_fw_request_id = 0;
+ dev_err(fw_mgmt->parent,
+ "load and validate firmware request failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
+ struct gb_fw_mgmt_loaded_fw_request *request;
+
+ /* No pending load and validate request ? */
+ if (!fw_mgmt->intf_fw_request_id) {
+ dev_err(fw_mgmt->parent,
+ "unexpected firmware loaded request received\n");
+ return -ENODEV;
+ }
+
+ if (op->request->payload_size != sizeof(*request)) {
+ dev_err(fw_mgmt->parent, "illegal size of firmware loaded request (%zu != %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+
+ /* Invalid request-id ? */
+ if (request->request_id != fw_mgmt->intf_fw_request_id) {
+ dev_err(fw_mgmt->parent, "invalid request id for firmware loaded request (%02u != %02u)\n",
+ fw_mgmt->intf_fw_request_id, request->request_id);
+ return -ENODEV;
+ }
+
+ ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
+ fw_mgmt->intf_fw_request_id = 0;
+ fw_mgmt->intf_fw_status = request->status;
+ fw_mgmt->intf_fw_major = le16_to_cpu(request->major);
+ fw_mgmt->intf_fw_minor = le16_to_cpu(request->minor);
+
+ if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_FAILED)
+ dev_err(fw_mgmt->parent,
+ "failed to load interface firmware, status:%02x\n",
+ fw_mgmt->intf_fw_status);
+ else if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_VALIDATION_FAILED)
+ dev_err(fw_mgmt->parent,
+ "failed to validate interface firmware, status:%02x\n",
+ fw_mgmt->intf_fw_status);
+ else
+ fw_mgmt->intf_fw_loaded = true;
+
+ complete(&fw_mgmt->completion);
+
+ return 0;
+}
+
+static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt,
+ struct fw_mgmt_ioc_get_backend_version *fw_info)
+{
+ struct gb_connection *connection = fw_mgmt->connection;
+ struct gb_fw_mgmt_backend_fw_version_request request;
+ struct gb_fw_mgmt_backend_fw_version_response response;
+ int ret;
+
+ strncpy(request.firmware_tag, fw_info->firmware_tag,
+ GB_FIRMWARE_TAG_MAX_SIZE);
+
+ /*
+ * The firmware-tag should be NULL terminated, otherwise throw error and
+ * fail.
+ */
+ if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
+ dev_err(fw_mgmt->parent, "backend-version: firmware-tag is not NULL terminated\n");
+ return -EINVAL;
+ }
+
+ ret = gb_operation_sync(connection,
+ GB_FW_MGMT_TYPE_BACKEND_FW_VERSION, &request,
+ sizeof(request), &response, sizeof(response));
+ if (ret) {
+ dev_err(fw_mgmt->parent, "failed to get version of %s backend firmware (%d)\n",
+ fw_info->firmware_tag, ret);
+ return ret;
+ }
+
+ fw_info->status = response.status;
+
+ /* Reset version as that should be non-zero only for success case */
+ fw_info->major = 0;
+ fw_info->minor = 0;
+
+ switch (fw_info->status) {
+ case GB_FW_BACKEND_VERSION_STATUS_SUCCESS:
+ fw_info->major = le16_to_cpu(response.major);
+ fw_info->minor = le16_to_cpu(response.minor);
+ break;
+ case GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE:
+ case GB_FW_BACKEND_VERSION_STATUS_RETRY:
+ break;
+ case GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED:
+ dev_err(fw_mgmt->parent,
+ "Firmware with tag %s is not supported by Interface\n",
+ fw_info->firmware_tag);
+ break;
+ default:
+ dev_err(fw_mgmt->parent, "Invalid status received: %u\n",
+ fw_info->status);
+ }
+
+ return 0;
+}
+
+static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
+ char *tag)
+{
+ struct gb_fw_mgmt_backend_fw_update_request request;
+ int ret;
+
+ strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
+
+ /*
+ * The firmware-tag should be NULL terminated, otherwise throw error and
+ * fail.
+ */
+ if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
+ dev_err(fw_mgmt->parent, "backend-update: firmware-tag is not NULL terminated\n");
+ return -EINVAL;
+ }
+
+ /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
+ ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
+ ret);
+ return ret;
+ }
+
+ fw_mgmt->backend_fw_request_id = ret;
+ request.request_id = ret;
+
+ ret = gb_operation_sync(fw_mgmt->connection,
+ GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, &request,
+ sizeof(request), NULL, 0);
+ if (ret) {
+ ida_simple_remove(&fw_mgmt->id_map,
+ fw_mgmt->backend_fw_request_id);
+ fw_mgmt->backend_fw_request_id = 0;
+ dev_err(fw_mgmt->parent,
+ "backend %s firmware update request failed (%d)\n", tag,
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
+ struct gb_fw_mgmt_backend_fw_updated_request *request;
+
+ /* No pending load and validate request ? */
+ if (!fw_mgmt->backend_fw_request_id) {
+ dev_err(fw_mgmt->parent, "unexpected backend firmware updated request received\n");
+ return -ENODEV;
+ }
+
+ if (op->request->payload_size != sizeof(*request)) {
+ dev_err(fw_mgmt->parent, "illegal size of backend firmware updated request (%zu != %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+
+ /* Invalid request-id ? */
+ if (request->request_id != fw_mgmt->backend_fw_request_id) {
+ dev_err(fw_mgmt->parent, "invalid request id for backend firmware updated request (%02u != %02u)\n",
+ fw_mgmt->backend_fw_request_id, request->request_id);
+ return -ENODEV;
+ }
+
+ ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
+ fw_mgmt->backend_fw_request_id = 0;
+ fw_mgmt->backend_fw_status = request->status;
+
+ if ((fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_SUCCESS) &&
+ (fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_RETRY))
+ dev_err(fw_mgmt->parent,
+ "failed to load backend firmware: %02x\n",
+ fw_mgmt->backend_fw_status);
+
+ complete(&fw_mgmt->completion);
+
+ return 0;
+}
+
+/* Char device fops */
+
+static int fw_mgmt_open(struct inode *inode, struct file *file)
+{
+ struct fw_mgmt *fw_mgmt = get_fw_mgmt(inode->i_cdev);
+
+ /* fw_mgmt structure can't get freed until file descriptor is closed */
+ if (fw_mgmt) {
+ file->private_data = fw_mgmt;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int fw_mgmt_release(struct inode *inode, struct file *file)
+{
+ struct fw_mgmt *fw_mgmt = file->private_data;
+
+ put_fw_mgmt(fw_mgmt);
+ return 0;
+}
+
+static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd,
+ void __user *buf)
+{
+ struct fw_mgmt_ioc_get_intf_version intf_fw_info;
+ struct fw_mgmt_ioc_get_backend_version backend_fw_info;
+ struct fw_mgmt_ioc_intf_load_and_validate intf_load;
+ struct fw_mgmt_ioc_backend_fw_update backend_update;
+ unsigned int timeout;
+ int ret;
+
+ /* Reject any operations after mode-switch has started */
+ if (fw_mgmt->mode_switch_started)
+ return -EBUSY;
+
+ switch (cmd) {
+ case FW_MGMT_IOC_GET_INTF_FW:
+ ret = fw_mgmt_interface_fw_version_operation(fw_mgmt,
+ &intf_fw_info);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buf, &intf_fw_info, sizeof(intf_fw_info)))
+ return -EFAULT;
+
+ return 0;
+ case FW_MGMT_IOC_GET_BACKEND_FW:
+ if (copy_from_user(&backend_fw_info, buf,
+ sizeof(backend_fw_info)))
+ return -EFAULT;
+
+ ret = fw_mgmt_backend_fw_version_operation(fw_mgmt,
+ &backend_fw_info);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buf, &backend_fw_info,
+ sizeof(backend_fw_info)))
+ return -EFAULT;
+
+ return 0;
+ case FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
+ if (copy_from_user(&intf_load, buf, sizeof(intf_load)))
+ return -EFAULT;
+
+ ret = fw_mgmt_load_and_validate_operation(fw_mgmt,
+ intf_load.load_method, intf_load.firmware_tag);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&fw_mgmt->completion,
+ fw_mgmt->timeout_jiffies)) {
+ dev_err(fw_mgmt->parent, "timed out waiting for firmware load and validation to finish\n");
+ return -ETIMEDOUT;
+ }
+
+ intf_load.status = fw_mgmt->intf_fw_status;
+ intf_load.major = fw_mgmt->intf_fw_major;
+ intf_load.minor = fw_mgmt->intf_fw_minor;
+
+ if (copy_to_user(buf, &intf_load, sizeof(intf_load)))
+ return -EFAULT;
+
+ return 0;
+ case FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
+ if (copy_from_user(&backend_update, buf,
+ sizeof(backend_update)))
+ return -EFAULT;
+
+ ret = fw_mgmt_backend_fw_update_operation(fw_mgmt,
+ backend_update.firmware_tag);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&fw_mgmt->completion,
+ fw_mgmt->timeout_jiffies)) {
+ dev_err(fw_mgmt->parent, "timed out waiting for backend firmware update to finish\n");
+ return -ETIMEDOUT;
+ }
+
+ backend_update.status = fw_mgmt->backend_fw_status;
+
+ if (copy_to_user(buf, &backend_update, sizeof(backend_update)))
+ return -EFAULT;
+
+ return 0;
+ case FW_MGMT_IOC_SET_TIMEOUT_MS:
+ if (get_user(timeout, (unsigned int __user *)buf))
+ return -EFAULT;
+
+ if (!timeout) {
+ dev_err(fw_mgmt->parent, "timeout can't be zero\n");
+ return -EINVAL;
+ }
+
+ fw_mgmt->timeout_jiffies = msecs_to_jiffies(timeout);
+
+ return 0;
+ case FW_MGMT_IOC_MODE_SWITCH:
+ if (!fw_mgmt->intf_fw_loaded) {
+ dev_err(fw_mgmt->parent,
+ "Firmware not loaded for mode-switch\n");
+ return -EPERM;
+ }
+
+ /*
+ * Disallow new ioctls as the fw-core bundle driver is going to
+ * get disconnected soon and the character device will get
+ * removed.
+ */
+ fw_mgmt->mode_switch_started = true;
+
+ ret = gb_interface_request_mode_switch(fw_mgmt->connection->intf);
+ if (ret) {
+ dev_err(fw_mgmt->parent, "Mode-switch failed: %d\n",
+ ret);
+ fw_mgmt->mode_switch_started = false;
+ return ret;
+ }
+
+ return 0;
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long fw_mgmt_ioctl_unlocked(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fw_mgmt *fw_mgmt = file->private_data;
+ struct gb_bundle *bundle = fw_mgmt->connection->bundle;
+ int ret = -ENODEV;
+
+ /*
+ * Serialize ioctls.
+ *
+ * We don't want the user to do few operations in parallel. For example,
+ * updating Interface firmware in parallel for the same Interface. There
+ * is no need to do things in parallel for speed and we can avoid having
+ * complicated code for now.
+ *
+ * This is also used to protect ->disabled, which is used to check if
+ * the connection is getting disconnected, so that we don't start any
+ * new operations.
+ */
+ mutex_lock(&fw_mgmt->mutex);
+ if (!fw_mgmt->disabled) {
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (!ret) {
+ ret = fw_mgmt_ioctl(fw_mgmt, cmd, (void __user *)arg);
+ gb_pm_runtime_put_autosuspend(bundle);
+ }
+ }
+ mutex_unlock(&fw_mgmt->mutex);
+
+ return ret;
+}
+
+static const struct file_operations fw_mgmt_fops = {
+ .owner = THIS_MODULE,
+ .open = fw_mgmt_open,
+ .release = fw_mgmt_release,
+ .unlocked_ioctl = fw_mgmt_ioctl_unlocked,
+};
+
+int gb_fw_mgmt_request_handler(struct gb_operation *op)
+{
+ u8 type = op->type;
+
+ switch (type) {
+ case GB_FW_MGMT_TYPE_LOADED_FW:
+ return fw_mgmt_interface_fw_loaded_operation(op);
+ case GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED:
+ return fw_mgmt_backend_fw_updated_operation(op);
+ default:
+ dev_err(&op->connection->bundle->dev,
+ "unsupported request: %u\n", type);
+ return -EINVAL;
+ }
+}
+
+int gb_fw_mgmt_connection_init(struct gb_connection *connection)
+{
+ struct fw_mgmt *fw_mgmt;
+ int ret, minor;
+
+ if (!connection)
+ return 0;
+
+ fw_mgmt = kzalloc(sizeof(*fw_mgmt), GFP_KERNEL);
+ if (!fw_mgmt)
+ return -ENOMEM;
+
+ fw_mgmt->parent = &connection->bundle->dev;
+ fw_mgmt->timeout_jiffies = msecs_to_jiffies(FW_MGMT_TIMEOUT_MS);
+ fw_mgmt->connection = connection;
+
+ gb_connection_set_data(connection, fw_mgmt);
+ init_completion(&fw_mgmt->completion);
+ ida_init(&fw_mgmt->id_map);
+ mutex_init(&fw_mgmt->mutex);
+ kref_init(&fw_mgmt->kref);
+
+ mutex_lock(&list_mutex);
+ list_add(&fw_mgmt->node, &fw_mgmt_list);
+ mutex_unlock(&list_mutex);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto err_list_del;
+
+ minor = ida_simple_get(&fw_mgmt_minors_map, 0, NUM_MINORS, GFP_KERNEL);
+ if (minor < 0) {
+ ret = minor;
+ goto err_connection_disable;
+ }
+
+ /* Add a char device to allow userspace to interact with fw-mgmt */
+ fw_mgmt->dev_num = MKDEV(MAJOR(fw_mgmt_dev_num), minor);
+ cdev_init(&fw_mgmt->cdev, &fw_mgmt_fops);
+
+ ret = cdev_add(&fw_mgmt->cdev, fw_mgmt->dev_num, 1);
+ if (ret)
+ goto err_remove_ida;
+
+ /* Add a soft link to the previously added char-dev within the bundle */
+ fw_mgmt->class_device = device_create(fw_mgmt_class, fw_mgmt->parent,
+ fw_mgmt->dev_num, NULL,
+ "gb-fw-mgmt-%d", minor);
+ if (IS_ERR(fw_mgmt->class_device)) {
+ ret = PTR_ERR(fw_mgmt->class_device);
+ goto err_del_cdev;
+ }
+
+ return 0;
+
+err_del_cdev:
+ cdev_del(&fw_mgmt->cdev);
+err_remove_ida:
+ ida_simple_remove(&fw_mgmt_minors_map, minor);
+err_connection_disable:
+ gb_connection_disable(connection);
+err_list_del:
+ mutex_lock(&list_mutex);
+ list_del(&fw_mgmt->node);
+ mutex_unlock(&list_mutex);
+
+ put_fw_mgmt(fw_mgmt);
+
+ return ret;
+}
+
+void gb_fw_mgmt_connection_exit(struct gb_connection *connection)
+{
+ struct fw_mgmt *fw_mgmt;
+
+ if (!connection)
+ return;
+
+ fw_mgmt = gb_connection_get_data(connection);
+
+ device_destroy(fw_mgmt_class, fw_mgmt->dev_num);
+ cdev_del(&fw_mgmt->cdev);
+ ida_simple_remove(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
+
+ /*
+ * Disallow any new ioctl operations on the char device and wait for
+ * existing ones to finish.
+ */
+ mutex_lock(&fw_mgmt->mutex);
+ fw_mgmt->disabled = true;
+ mutex_unlock(&fw_mgmt->mutex);
+
+ /* All pending greybus operations should have finished by now */
+ gb_connection_disable(fw_mgmt->connection);
+
+ /* Disallow new users to get access to the fw_mgmt structure */
+ mutex_lock(&list_mutex);
+ list_del(&fw_mgmt->node);
+ mutex_unlock(&list_mutex);
+
+ /*
+ * All current users of fw_mgmt would have taken a reference to it by
+ * now, we can drop our reference and wait the last user will get
+ * fw_mgmt freed.
+ */
+ put_fw_mgmt(fw_mgmt);
+}
+
+int fw_mgmt_init(void)
+{
+ int ret;
+
+ fw_mgmt_class = class_create(THIS_MODULE, "gb_fw_mgmt");
+ if (IS_ERR(fw_mgmt_class))
+ return PTR_ERR(fw_mgmt_class);
+
+ ret = alloc_chrdev_region(&fw_mgmt_dev_num, 0, NUM_MINORS,
+ "gb_fw_mgmt");
+ if (ret)
+ goto err_remove_class;
+
+ return 0;
+
+err_remove_class:
+ class_destroy(fw_mgmt_class);
+ return ret;
+}
+
+void fw_mgmt_exit(void)
+{
+ unregister_chrdev_region(fw_mgmt_dev_num, NUM_MINORS);
+ class_destroy(fw_mgmt_class);
+ ida_destroy(&fw_mgmt_minors_map);
+}
diff --git a/drivers/staging/greybus/gb-camera.h b/drivers/staging/greybus/gb-camera.h
new file mode 100644
index 000000000000..d45dabc5b367
--- /dev/null
+++ b/drivers/staging/greybus/gb-camera.h
@@ -0,0 +1,127 @@
+/*
+ * Greybus Camera protocol driver.
+ *
+ * Copyright 2015 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+#ifndef __GB_CAMERA_H
+#define __GB_CAMERA_H
+
+#include <linux/v4l2-mediabus.h>
+
+/* Input flags need to be set from the caller */
+#define GB_CAMERA_IN_FLAG_TEST (1 << 0)
+/* Output flags returned */
+#define GB_CAMERA_OUT_FLAG_ADJUSTED (1 << 0)
+
+/**
+ * struct gb_camera_stream - Represents greybus camera stream.
+ * @width: Stream width in pixels.
+ * @height: Stream height in pixels.
+ * @pixel_code: Media bus pixel code.
+ * @vc: MIPI CSI virtual channel.
+ * @dt: MIPI CSI data types. Most formats use a single data type, in which case
+ * the second element will be ignored.
+ * @max_size: Maximum size of a frame in bytes. The camera module guarantees
+ * that all data between the Frame Start and Frame End packet for
+ * the associated virtual channel and data type(s) will not exceed
+ * this size.
+ */
+struct gb_camera_stream {
+ unsigned int width;
+ unsigned int height;
+ enum v4l2_mbus_pixelcode pixel_code;
+ unsigned int vc;
+ unsigned int dt[2];
+ unsigned int max_size;
+};
+
+/**
+ * struct gb_camera_csi_params - CSI configuration parameters
+ * @num_lanes: number of CSI data lanes
+ * @clk_freq: CSI clock frequency in Hz
+ */
+struct gb_camera_csi_params {
+ unsigned int num_lanes;
+ unsigned int clk_freq;
+};
+
+/**
+ * struct gb_camera_ops - Greybus camera operations, used by the Greybus camera
+ * driver to expose operations to the host camera driver.
+ * @capabilities: Retrieve camera capabilities and store them in the buffer
+ * 'buf' capabilities. The buffer maximum size is specified by
+ * the caller in the 'size' parameter, and the effective
+ * capabilities size is returned from the function. If the buffer
+ * size is too small to hold the capabilities an error is
+ * returned and the buffer is left untouched.
+ *
+ * @configure_streams: Negotiate configuration and prepare the module for video
+ * capture. The caller specifies the number of streams it
+ * requests in the 'nstreams' argument and the associated
+ * streams configurations in the 'streams' argument. The
+ * GB_CAMERA_IN_FLAG_TEST 'flag' can be set to test a
+ * configuration without applying it, otherwise the
+ * configuration is applied by the module. The module can
+ * decide to modify the requested configuration, including
+ * using a different number of streams. In that case the
+ * modified configuration won't be applied, the
+ * GB_CAMERA_OUT_FLAG_ADJUSTED 'flag' will be set upon
+ * return, and the modified configuration and number of
+ * streams stored in 'streams' and 'array'. The module
+ * returns its CSI-2 bus parameters in the 'csi_params'
+ * structure in all cases.
+ *
+ * @capture: Submit a capture request. The supplied 'request_id' must be unique
+ * and higher than the IDs of all the previously submitted requests.
+ * The 'streams' argument specifies which streams are affected by the
+ * request in the form of a bitmask, with bits corresponding to the
+ * configured streams indexes. If the request contains settings, the
+ * 'settings' argument points to the settings buffer and its size is
+ * specified by the 'settings_size' argument. Otherwise the 'settings'
+ * argument should be set to NULL and 'settings_size' to 0.
+ *
+ * @flush: Flush the capture requests queue. Return the ID of the last request
+ * that will processed by the device before it stops transmitting video
+ * frames. All queued capture requests with IDs higher than the returned
+ * ID will be dropped without being processed.
+ */
+struct gb_camera_ops {
+ ssize_t (*capabilities)(void *priv, char *buf, size_t len);
+ int (*configure_streams)(void *priv, unsigned int *nstreams,
+ unsigned int *flags, struct gb_camera_stream *streams,
+ struct gb_camera_csi_params *csi_params);
+ int (*capture)(void *priv, u32 request_id,
+ unsigned int streams, unsigned int num_frames,
+ size_t settings_size, const void *settings);
+ int (*flush)(void *priv, u32 *request_id);
+};
+
+/**
+ * struct gb_camera_module - Represents greybus camera module.
+ * @priv: Module private data, passed to all camera operations.
+ * @ops: Greybus camera operation callbacks.
+ * @interface_id: Interface id of the module.
+ * @refcount: Reference counting object.
+ * @release: Module release function.
+ * @list: List entry in the camera modules list.
+ */
+struct gb_camera_module {
+ void *priv;
+ const struct gb_camera_ops *ops;
+
+ unsigned int interface_id;
+ struct kref refcount;
+ void (*release)(struct kref *kref);
+ struct list_head list; /* Global list */
+};
+
+#define gb_camera_call(f, op, args...) \
+ (!(f) ? -ENODEV : (((f)->ops->op) ? \
+ (f)->ops->op((f)->priv, ##args) : -ENOIOCTLCMD))
+
+int gb_camera_register(struct gb_camera_module *module);
+int gb_camera_unregister(struct gb_camera_module *module);
+
+#endif /* __GB_CAMERA_H */
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
new file mode 100644
index 000000000000..bcde7c9a0f17
--- /dev/null
+++ b/drivers/staging/greybus/gbphy.c
@@ -0,0 +1,360 @@
+/*
+ * Greybus Bridged-Phy Bus driver
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+#define GB_GBPHY_AUTOSUSPEND_MS 3000
+
+struct gbphy_host {
+ struct gb_bundle *bundle;
+ struct list_head devices;
+};
+
+static DEFINE_IDA(gbphy_id);
+
+static ssize_t protocol_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+
+ return sprintf(buf, "0x%02x\n", gbphy_dev->cport_desc->protocol_id);
+}
+static DEVICE_ATTR_RO(protocol_id);
+
+static struct attribute *gbphy_dev_attrs[] = {
+ &dev_attr_protocol_id.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(gbphy_dev);
+
+static void gbphy_dev_release(struct device *dev)
+{
+ struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+
+ ida_simple_remove(&gbphy_id, gbphy_dev->id);
+ kfree(gbphy_dev);
+}
+
+#ifdef CONFIG_PM
+static int gb_gbphy_idle(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_gbphy_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ gb_gbphy_idle)
+};
+
+static struct device_type greybus_gbphy_dev_type = {
+ .name = "gbphy_device",
+ .release = gbphy_dev_release,
+ .pm = &gb_gbphy_pm_ops,
+};
+
+static int gbphy_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+ struct greybus_descriptor_cport *cport_desc = gbphy_dev->cport_desc;
+ struct gb_bundle *bundle = gbphy_dev->bundle;
+ struct gb_interface *intf = bundle->intf;
+ struct gb_module *module = intf->module;
+ struct gb_host_device *hd = intf->hd;
+
+ if (add_uevent_var(env, "BUS=%u", hd->bus_id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "MODULE=%u", module->module_id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
+ intf->vendor_id, intf->product_id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "BUNDLE=%u", gbphy_dev->bundle->id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
+ return -ENOMEM;
+ if (add_uevent_var(env, "GBPHY=%u", gbphy_dev->id))
+ return -ENOMEM;
+ if (add_uevent_var(env, "PROTOCOL_ID=%02x", cport_desc->protocol_id))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct gbphy_device_id *
+gbphy_dev_match_id(struct gbphy_device *gbphy_dev, struct gbphy_driver *gbphy_drv)
+{
+ const struct gbphy_device_id *id = gbphy_drv->id_table;
+
+ if (!id)
+ return NULL;
+
+ for (; id->protocol_id; id++)
+ if (id->protocol_id == gbphy_dev->cport_desc->protocol_id)
+ return id;
+
+ return NULL;
+}
+
+static int gbphy_dev_match(struct device *dev, struct device_driver *drv)
+{
+ struct gbphy_driver *gbphy_drv = to_gbphy_driver(drv);
+ struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+ const struct gbphy_device_id *id;
+
+ id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
+ if (id)
+ return 1;
+
+ return 0;
+}
+
+static int gbphy_dev_probe(struct device *dev)
+{
+ struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
+ struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+ const struct gbphy_device_id *id;
+ int ret;
+
+ id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
+ if (!id)
+ return -ENODEV;
+
+ /* for old kernels we need get_sync to resume parent devices */
+ ret = gb_pm_runtime_get_sync(gbphy_dev->bundle);
+ if (ret < 0)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(dev, GB_GBPHY_AUTOSUSPEND_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ /*
+ * Drivers should call put on the gbphy dev before returning
+ * from probe if they support runtime pm.
+ */
+ ret = gbphy_drv->probe(gbphy_dev, id);
+ if (ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ }
+
+ gb_pm_runtime_put_autosuspend(gbphy_dev->bundle);
+
+ return ret;
+}
+
+static int gbphy_dev_remove(struct device *dev)
+{
+ struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
+ struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+
+ gbphy_drv->remove(gbphy_dev);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+
+ return 0;
+}
+
+static struct bus_type gbphy_bus_type = {
+ .name = "gbphy",
+ .match = gbphy_dev_match,
+ .probe = gbphy_dev_probe,
+ .remove = gbphy_dev_remove,
+ .uevent = gbphy_dev_uevent,
+};
+
+int gb_gbphy_register_driver(struct gbphy_driver *driver,
+ struct module *owner, const char *mod_name)
+{
+ int retval;
+
+ if (greybus_disabled())
+ return -ENODEV;
+
+ driver->driver.bus = &gbphy_bus_type;
+ driver->driver.name = driver->name;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
+
+ retval = driver_register(&driver->driver);
+ if (retval)
+ return retval;
+
+ pr_info("registered new driver %s\n", driver->name);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_gbphy_register_driver);
+
+void gb_gbphy_deregister_driver(struct gbphy_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(gb_gbphy_deregister_driver);
+
+static struct gbphy_device *gb_gbphy_create_dev(struct gb_bundle *bundle,
+ struct greybus_descriptor_cport *cport_desc)
+{
+ struct gbphy_device *gbphy_dev;
+ int retval;
+ int id;
+
+ id = ida_simple_get(&gbphy_id, 1, 0, GFP_KERNEL);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ gbphy_dev = kzalloc(sizeof(*gbphy_dev), GFP_KERNEL);
+ if (!gbphy_dev) {
+ ida_simple_remove(&gbphy_id, id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gbphy_dev->id = id;
+ gbphy_dev->bundle = bundle;
+ gbphy_dev->cport_desc = cport_desc;
+ gbphy_dev->dev.parent = &bundle->dev;
+ gbphy_dev->dev.bus = &gbphy_bus_type;
+ gbphy_dev->dev.type = &greybus_gbphy_dev_type;
+ gbphy_dev->dev.groups = gbphy_dev_groups;
+ gbphy_dev->dev.dma_mask = bundle->dev.dma_mask;
+ dev_set_name(&gbphy_dev->dev, "gbphy%d", id);
+
+ retval = device_register(&gbphy_dev->dev);
+ if (retval) {
+ put_device(&gbphy_dev->dev);
+ return ERR_PTR(retval);
+ }
+
+ return gbphy_dev;
+}
+
+static void gb_gbphy_disconnect(struct gb_bundle *bundle)
+{
+ struct gbphy_host *gbphy_host = greybus_get_drvdata(bundle);
+ struct gbphy_device *gbphy_dev, *temp;
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ gb_pm_runtime_get_noresume(bundle);
+
+ list_for_each_entry_safe(gbphy_dev, temp, &gbphy_host->devices, list) {
+ list_del(&gbphy_dev->list);
+ device_unregister(&gbphy_dev->dev);
+ }
+
+ kfree(gbphy_host);
+}
+
+static int gb_gbphy_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct gbphy_host *gbphy_host;
+ struct gbphy_device *gbphy_dev;
+ int i;
+
+ if (bundle->num_cports == 0)
+ return -ENODEV;
+
+ gbphy_host = kzalloc(sizeof(*gbphy_host), GFP_KERNEL);
+ if (!gbphy_host)
+ return -ENOMEM;
+
+ gbphy_host->bundle = bundle;
+ INIT_LIST_HEAD(&gbphy_host->devices);
+ greybus_set_drvdata(bundle, gbphy_host);
+
+ /*
+ * Create a bunch of children devices, one per cport, and bind the
+ * bridged phy drivers to them.
+ */
+ for (i = 0; i < bundle->num_cports; ++i) {
+ gbphy_dev = gb_gbphy_create_dev(bundle, &bundle->cport_desc[i]);
+ if (IS_ERR(gbphy_dev)) {
+ gb_gbphy_disconnect(bundle);
+ return PTR_ERR(gbphy_dev);
+ }
+ list_add(&gbphy_dev->list, &gbphy_host->devices);
+ }
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+}
+
+static const struct greybus_bundle_id gb_gbphy_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BRIDGED_PHY) },
+ { },
+};
+MODULE_DEVICE_TABLE(greybus, gb_gbphy_id_table);
+
+static struct greybus_driver gb_gbphy_driver = {
+ .name = "gbphy",
+ .probe = gb_gbphy_probe,
+ .disconnect = gb_gbphy_disconnect,
+ .id_table = gb_gbphy_id_table,
+};
+
+static int __init gbphy_init(void)
+{
+ int retval;
+
+ retval = bus_register(&gbphy_bus_type);
+ if (retval) {
+ pr_err("gbphy bus register failed (%d)\n", retval);
+ return retval;
+ }
+
+ retval = greybus_register(&gb_gbphy_driver);
+ if (retval) {
+ pr_err("error registering greybus driver\n");
+ goto error_gbphy;
+ }
+
+ return 0;
+
+error_gbphy:
+ bus_unregister(&gbphy_bus_type);
+ ida_destroy(&gbphy_id);
+ return retval;
+}
+module_init(gbphy_init);
+
+static void __exit gbphy_exit(void)
+{
+ greybus_deregister(&gb_gbphy_driver);
+ bus_unregister(&gbphy_bus_type);
+ ida_destroy(&gbphy_id);
+}
+module_exit(gbphy_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/gbphy.h b/drivers/staging/greybus/gbphy.h
new file mode 100644
index 000000000000..8ee68055ccc4
--- /dev/null
+++ b/drivers/staging/greybus/gbphy.h
@@ -0,0 +1,110 @@
+/*
+ * Greybus Bridged-Phy Bus driver
+ *
+ * Copyright 2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __GBPHY_H
+#define __GBPHY_H
+
+struct gbphy_device {
+ u32 id;
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_bundle *bundle;
+ struct list_head list;
+ struct device dev;
+};
+#define to_gbphy_dev(d) container_of(d, struct gbphy_device, dev)
+
+static inline void *gb_gbphy_get_data(struct gbphy_device *gdev)
+{
+ return dev_get_drvdata(&gdev->dev);
+}
+
+static inline void gb_gbphy_set_data(struct gbphy_device *gdev, void *data)
+{
+ dev_set_drvdata(&gdev->dev, data);
+}
+
+struct gbphy_device_id {
+ __u8 protocol_id;
+};
+
+#define GBPHY_PROTOCOL(p) \
+ .protocol_id = (p),
+
+struct gbphy_driver {
+ const char *name;
+ int (*probe)(struct gbphy_device *,
+ const struct gbphy_device_id *id);
+ void (*remove)(struct gbphy_device *);
+ const struct gbphy_device_id *id_table;
+
+ struct device_driver driver;
+};
+#define to_gbphy_driver(d) container_of(d, struct gbphy_driver, driver)
+
+int gb_gbphy_register_driver(struct gbphy_driver *driver,
+ struct module *owner, const char *mod_name);
+void gb_gbphy_deregister_driver(struct gbphy_driver *driver);
+
+#define gb_gbphy_register(driver) \
+ gb_gbphy_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+#define gb_gbphy_deregister(driver) \
+ gb_gbphy_deregister_driver(driver)
+
+/**
+ * module_gbphy_driver() - Helper macro for registering a gbphy driver
+ * @__gbphy_driver: gbphy_driver structure
+ *
+ * Helper macro for gbphy drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_gbphy_driver(__gbphy_driver) \
+ module_driver(__gbphy_driver, gb_gbphy_register, gb_gbphy_deregister)
+
+#ifdef CONFIG_PM
+static inline int gbphy_runtime_get_sync(struct gbphy_device *gbphy_dev)
+{
+ struct device *dev = &gbphy_dev->dev;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret);
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline void gbphy_runtime_put_autosuspend(struct gbphy_device *gbphy_dev)
+{
+ struct device *dev = &gbphy_dev->dev;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+static inline void gbphy_runtime_get_noresume(struct gbphy_device *gbphy_dev)
+{
+ pm_runtime_get_noresume(&gbphy_dev->dev);
+}
+
+static inline void gbphy_runtime_put_noidle(struct gbphy_device *gbphy_dev)
+{
+ pm_runtime_put_noidle(&gbphy_dev->dev);
+}
+#else
+static inline int gbphy_runtime_get_sync(struct gbphy_device *gbphy_dev) { return 0; }
+static inline void gbphy_runtime_put_autosuspend(struct gbphy_device *gbphy_dev) {}
+static inline void gbphy_runtime_get_noresume(struct gbphy_device *gbphy_dev) {}
+static inline void gbphy_runtime_put_noidle(struct gbphy_device *gbphy_dev) {}
+#endif
+
+#endif /* __GBPHY_H */
+
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
new file mode 100644
index 000000000000..5e06e4229e42
--- /dev/null
+++ b/drivers/staging/greybus/gpio.c
@@ -0,0 +1,766 @@
+/*
+ * GPIO Greybus driver.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mutex.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+struct gb_gpio_line {
+ /* The following has to be an array of line_max entries */
+ /* --> make them just a flags field */
+ u8 active: 1,
+ direction: 1, /* 0 = output, 1 = input */
+ value: 1; /* 0 = low, 1 = high */
+ u16 debounce_usec;
+
+ u8 irq_type;
+ bool irq_type_pending;
+ bool masked;
+ bool masked_pending;
+};
+
+struct gb_gpio_controller {
+ struct gbphy_device *gbphy_dev;
+ struct gb_connection *connection;
+ u8 line_max; /* max line number */
+ struct gb_gpio_line *lines;
+
+ struct gpio_chip chip;
+ struct irq_chip irqc;
+ struct irq_chip *irqchip;
+ struct irq_domain *irqdomain;
+ unsigned int irq_base;
+ irq_flow_handler_t irq_handler;
+ unsigned int irq_default_type;
+ struct mutex irq_lock;
+};
+#define gpio_chip_to_gb_gpio_controller(chip) \
+ container_of(chip, struct gb_gpio_controller, chip)
+#define irq_data_to_gpio_chip(d) (d->domain->host_data)
+
+static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
+{
+ struct gb_gpio_line_count_response response;
+ int ret;
+
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
+ NULL, 0, &response, sizeof(response));
+ if (!ret)
+ ggc->line_max = response.count;
+ return ret;
+}
+
+static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
+{
+ struct gb_gpio_activate_request request;
+ struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ request.which = which;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
+ &request, sizeof(request), NULL, 0);
+ if (ret) {
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return ret;
+ }
+
+ ggc->lines[which].active = true;
+
+ return 0;
+}
+
+static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
+ u8 which)
+{
+ struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
+ struct device *dev = &gbphy_dev->dev;
+ struct gb_gpio_deactivate_request request;
+ int ret;
+
+ request.which = which;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
+ &request, sizeof(request), NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to deactivate gpio %u\n", which);
+ goto out_pm_put;
+ }
+
+ ggc->lines[which].active = false;
+
+out_pm_put:
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+}
+
+static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
+ u8 which)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_get_direction_request request;
+ struct gb_gpio_get_direction_response response;
+ int ret;
+ u8 direction;
+
+ request.which = which;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret)
+ return ret;
+
+ direction = response.direction;
+ if (direction && direction != 1) {
+ dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
+ which, direction);
+ }
+ ggc->lines[which].direction = direction ? 1 : 0;
+ return 0;
+}
+
+static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
+ u8 which)
+{
+ struct gb_gpio_direction_in_request request;
+ int ret;
+
+ request.which = which;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
+ &request, sizeof(request), NULL, 0);
+ if (!ret)
+ ggc->lines[which].direction = 1;
+ return ret;
+}
+
+static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
+ u8 which, bool value_high)
+{
+ struct gb_gpio_direction_out_request request;
+ int ret;
+
+ request.which = which;
+ request.value = value_high ? 1 : 0;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
+ &request, sizeof(request), NULL, 0);
+ if (!ret)
+ ggc->lines[which].direction = 0;
+ return ret;
+}
+
+static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
+ u8 which)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_get_value_request request;
+ struct gb_gpio_get_value_response response;
+ int ret;
+ u8 value;
+
+ request.which = which;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(dev, "failed to get value of gpio %u\n", which);
+ return ret;
+ }
+
+ value = response.value;
+ if (value && value != 1) {
+ dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
+ which, value);
+ }
+ ggc->lines[which].value = value ? 1 : 0;
+ return 0;
+}
+
+static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
+ u8 which, bool value_high)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_set_value_request request;
+ int ret;
+
+ if (ggc->lines[which].direction == 1) {
+ dev_warn(dev, "refusing to set value of input gpio %u\n",
+ which);
+ return;
+ }
+
+ request.which = which;
+ request.value = value_high ? 1 : 0;
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
+ &request, sizeof(request), NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to set value of gpio %u\n", which);
+ return;
+ }
+
+ ggc->lines[which].value = request.value;
+}
+
+static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
+ u8 which, u16 debounce_usec)
+{
+ struct gb_gpio_set_debounce_request request;
+ int ret;
+
+ request.which = which;
+ request.usec = cpu_to_le16(debounce_usec);
+ ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
+ &request, sizeof(request), NULL, 0);
+ if (!ret)
+ ggc->lines[which].debounce_usec = debounce_usec;
+ return ret;
+}
+
+static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_irq_mask_request request;
+ int ret;
+
+ request.which = hwirq;
+ ret = gb_operation_sync(ggc->connection,
+ GB_GPIO_TYPE_IRQ_MASK,
+ &request, sizeof(request), NULL, 0);
+ if (ret)
+ dev_err(dev, "failed to mask irq: %d\n", ret);
+}
+
+static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_irq_unmask_request request;
+ int ret;
+
+ request.which = hwirq;
+ ret = gb_operation_sync(ggc->connection,
+ GB_GPIO_TYPE_IRQ_UNMASK,
+ &request, sizeof(request), NULL, 0);
+ if (ret)
+ dev_err(dev, "failed to unmask irq: %d\n", ret);
+}
+
+static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
+ u8 hwirq, u8 type)
+{
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_gpio_irq_type_request request;
+ int ret;
+
+ request.which = hwirq;
+ request.type = type;
+
+ ret = gb_operation_sync(ggc->connection,
+ GB_GPIO_TYPE_IRQ_TYPE,
+ &request, sizeof(request), NULL, 0);
+ if (ret)
+ dev_err(dev, "failed to set irq type: %d\n", ret);
+}
+
+static void gb_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+ struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+
+ line->masked = true;
+ line->masked_pending = true;
+}
+
+static void gb_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+ struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+
+ line->masked = false;
+ line->masked_pending = true;
+}
+
+static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+ struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+ struct device *dev = &ggc->gbphy_dev->dev;
+ u8 irq_type;
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ irq_type = GB_GPIO_IRQ_TYPE_NONE;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ dev_err(dev, "unsupported irq type: %u\n", type);
+ return -EINVAL;
+ }
+
+ line->irq_type = irq_type;
+ line->irq_type_pending = true;
+
+ return 0;
+}
+
+static void gb_gpio_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+ mutex_lock(&ggc->irq_lock);
+}
+
+static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_to_gpio_chip(d);
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+ struct gb_gpio_line *line = &ggc->lines[d->hwirq];
+
+ if (line->irq_type_pending) {
+ _gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
+ line->irq_type_pending = false;
+ }
+
+ if (line->masked_pending) {
+ if (line->masked)
+ _gb_gpio_irq_mask(ggc, d->hwirq);
+ else
+ _gb_gpio_irq_unmask(ggc, d->hwirq);
+ line->masked_pending = false;
+ }
+
+ mutex_unlock(&ggc->irq_lock);
+}
+
+static int gb_gpio_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
+ struct device *dev = &ggc->gbphy_dev->dev;
+ struct gb_message *request;
+ struct gb_gpio_irq_event_request *event;
+ u8 type = op->type;
+ int irq;
+ struct irq_desc *desc;
+
+ if (type != GB_GPIO_TYPE_IRQ_EVENT) {
+ dev_err(dev, "unsupported unsolicited request: %u\n", type);
+ return -EINVAL;
+ }
+
+ request = op->request;
+
+ if (request->payload_size < sizeof(*event)) {
+ dev_err(dev, "short event received (%zu < %zu)\n",
+ request->payload_size, sizeof(*event));
+ return -EINVAL;
+ }
+
+ event = request->payload;
+ if (event->which > ggc->line_max) {
+ dev_err(dev, "invalid hw irq: %d\n", event->which);
+ return -EINVAL;
+ }
+
+ irq = irq_find_mapping(ggc->irqdomain, event->which);
+ if (!irq) {
+ dev_err(dev, "failed to find IRQ\n");
+ return -EINVAL;
+ }
+ desc = irq_to_desc(irq);
+ if (!desc) {
+ dev_err(dev, "failed to look up irq\n");
+ return -EINVAL;
+ }
+
+ local_irq_disable();
+ generic_handle_irq_desc(desc);
+ local_irq_enable();
+
+ return 0;
+}
+
+static int gb_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+ return gb_gpio_activate_operation(ggc, (u8)offset);
+}
+
+static void gb_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+ gb_gpio_deactivate_operation(ggc, (u8)offset);
+}
+
+static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+ u8 which;
+ int ret;
+
+ which = (u8)offset;
+ ret = gb_gpio_get_direction_operation(ggc, which);
+ if (ret)
+ return ret;
+
+ return ggc->lines[which].direction ? 1 : 0;
+}
+
+static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+ return gb_gpio_direction_in_operation(ggc, (u8)offset);
+}
+
+static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+ return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
+}
+
+static int gb_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+ u8 which;
+ int ret;
+
+ which = (u8)offset;
+ ret = gb_gpio_get_value_operation(ggc, which);
+ if (ret)
+ return ret;
+
+ return ggc->lines[which].value;
+}
+
+static void gb_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+ gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
+}
+
+static int gb_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ unsigned debounce)
+{
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+ u16 usec;
+
+ if (debounce > U16_MAX)
+ return -EINVAL;
+ usec = (u16)debounce;
+
+ return gb_gpio_set_debounce_operation(ggc, (u8)offset, usec);
+}
+
+static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
+{
+ int ret;
+
+ /* Now find out how many lines there are */
+ ret = gb_gpio_line_count_operation(ggc);
+ if (ret)
+ return ret;
+
+ ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
+ GFP_KERNEL);
+ if (!ggc->lines)
+ return -ENOMEM;
+
+ return ret;
+}
+
+/**
+ * gb_gpio_irq_map() - maps an IRQ into a GB gpio irqchip
+ * @d: the irqdomain used by this irqchip
+ * @irq: the global irq number used by this GB gpio irqchip irq
+ * @hwirq: the local IRQ/GPIO line offset on this GB gpio
+ *
+ * This function will set up the mapping for a certain IRQ line on a
+ * GB gpio by assigning the GB gpio as chip data, and using the irqchip
+ * stored inside the GB gpio.
+ */
+static int gb_gpio_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct gpio_chip *chip = domain->host_data;
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+ irq_set_chip_data(irq, ggc);
+ irq_set_chip_and_handler(irq, ggc->irqchip, ggc->irq_handler);
+ irq_set_noprobe(irq);
+ /*
+ * No set-up of the hardware will happen if IRQ_TYPE_NONE
+ * is passed as default type.
+ */
+ if (ggc->irq_default_type != IRQ_TYPE_NONE)
+ irq_set_irq_type(irq, ggc->irq_default_type);
+
+ return 0;
+}
+
+static void gb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops gb_gpio_domain_ops = {
+ .map = gb_gpio_irq_map,
+ .unmap = gb_gpio_irq_unmap,
+};
+
+/**
+ * gb_gpio_irqchip_remove() - removes an irqchip added to a gb_gpio_controller
+ * @ggc: the gb_gpio_controller to remove the irqchip from
+ *
+ * This is called only from gb_gpio_remove()
+ */
+static void gb_gpio_irqchip_remove(struct gb_gpio_controller *ggc)
+{
+ unsigned int offset;
+
+ /* Remove all IRQ mappings and delete the domain */
+ if (ggc->irqdomain) {
+ for (offset = 0; offset < (ggc->line_max + 1); offset++)
+ irq_dispose_mapping(irq_find_mapping(ggc->irqdomain, offset));
+ irq_domain_remove(ggc->irqdomain);
+ }
+
+ if (ggc->irqchip)
+ ggc->irqchip = NULL;
+}
+
+/**
+ * gb_gpio_irqchip_add() - adds an irqchip to a gpio chip
+ * @chip: the gpio chip to add the irqchip to
+ * @irqchip: the irqchip to add to the adapter
+ * @first_irq: if not dynamically assigned, the base (first) IRQ to
+ * allocate gpio irqs from
+ * @handler: the irq handler to use (often a predefined irq core function)
+ * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
+ * to have the core avoid setting up any default type in the hardware.
+ *
+ * This function closely associates a certain irqchip with a certain
+ * gpio chip, providing an irq domain to translate the local IRQs to
+ * global irqs, and making sure that the gpio chip
+ * is passed as chip data to all related functions. Driver callbacks
+ * need to use container_of() to get their local state containers back
+ * from the gpio chip passed as chip data. An irqdomain will be stored
+ * in the gpio chip that shall be used by the driver to handle IRQ number
+ * translation. The gpio chip will need to be initialized and registered
+ * before calling this function.
+ */
+static int gb_gpio_irqchip_add(struct gpio_chip *chip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type)
+{
+ struct gb_gpio_controller *ggc;
+ unsigned int offset;
+ unsigned irq_base;
+
+ if (!chip || !irqchip)
+ return -EINVAL;
+
+ ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+ ggc->irqchip = irqchip;
+ ggc->irq_handler = handler;
+ ggc->irq_default_type = type;
+ ggc->irqdomain = irq_domain_add_simple(NULL,
+ ggc->line_max + 1, first_irq,
+ &gb_gpio_domain_ops, chip);
+ if (!ggc->irqdomain) {
+ ggc->irqchip = NULL;
+ return -EINVAL;
+ }
+
+ /*
+ * Prepare the mapping since the irqchip shall be orthogonal to
+ * any gpio calls. If the first_irq was zero, this is
+ * necessary to allocate descriptors for all IRQs.
+ */
+ for (offset = 0; offset < (ggc->line_max + 1); offset++) {
+ irq_base = irq_create_mapping(ggc->irqdomain, offset);
+ if (offset == 0)
+ ggc->irq_base = irq_base;
+ }
+
+ return 0;
+}
+
+static int gb_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
+
+ return irq_find_mapping(ggc->irqdomain, offset);
+}
+
+static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ struct gb_gpio_controller *ggc;
+ struct gpio_chip *gpio;
+ struct irq_chip *irqc;
+ int ret;
+
+ ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
+ if (!ggc)
+ return -ENOMEM;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ gb_gpio_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto exit_ggc_free;
+ }
+
+ ggc->connection = connection;
+ gb_connection_set_data(connection, ggc);
+ ggc->gbphy_dev = gbphy_dev;
+ gb_gbphy_set_data(gbphy_dev, ggc);
+
+ ret = gb_connection_enable_tx(connection);
+ if (ret)
+ goto exit_connection_destroy;
+
+ ret = gb_gpio_controller_setup(ggc);
+ if (ret)
+ goto exit_connection_disable;
+
+ irqc = &ggc->irqc;
+ irqc->irq_mask = gb_gpio_irq_mask;
+ irqc->irq_unmask = gb_gpio_irq_unmask;
+ irqc->irq_set_type = gb_gpio_irq_set_type;
+ irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
+ irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
+ irqc->name = "greybus_gpio";
+
+ mutex_init(&ggc->irq_lock);
+
+ gpio = &ggc->chip;
+
+ gpio->label = "greybus_gpio";
+ gpio->parent = &gbphy_dev->dev;
+ gpio->owner = THIS_MODULE;
+
+ gpio->request = gb_gpio_request;
+ gpio->free = gb_gpio_free;
+ gpio->get_direction = gb_gpio_get_direction;
+ gpio->direction_input = gb_gpio_direction_input;
+ gpio->direction_output = gb_gpio_direction_output;
+ gpio->get = gb_gpio_get;
+ gpio->set = gb_gpio_set;
+ gpio->set_debounce = gb_gpio_set_debounce;
+ gpio->to_irq = gb_gpio_to_irq;
+ gpio->base = -1; /* Allocate base dynamically */
+ gpio->ngpio = ggc->line_max + 1;
+ gpio->can_sleep = true;
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto exit_line_free;
+
+ ret = gb_gpio_irqchip_add(gpio, irqc, 0,
+ handle_level_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(&connection->bundle->dev,
+ "failed to add irq chip: %d\n", ret);
+ goto exit_line_free;
+ }
+
+ ret = gpiochip_add(gpio);
+ if (ret) {
+ dev_err(&connection->bundle->dev,
+ "failed to add gpio chip: %d\n", ret);
+ goto exit_gpio_irqchip_remove;
+ }
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return 0;
+
+exit_gpio_irqchip_remove:
+ gb_gpio_irqchip_remove(ggc);
+exit_line_free:
+ kfree(ggc->lines);
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+exit_ggc_free:
+ kfree(ggc);
+ return ret;
+}
+
+static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
+ struct gb_connection *connection = ggc->connection;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ gb_connection_disable_rx(connection);
+ gpiochip_remove(&ggc->chip);
+ gb_gpio_irqchip_remove(ggc);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+ kfree(ggc->lines);
+ kfree(ggc);
+}
+
+static const struct gbphy_device_id gb_gpio_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
+
+static struct gbphy_driver gpio_driver = {
+ .name = "gpio",
+ .probe = gb_gpio_probe,
+ .remove = gb_gpio_remove,
+ .id_table = gb_gpio_id_table,
+};
+
+module_gbphy_driver(gpio_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/greybus.h b/drivers/staging/greybus/greybus.h
new file mode 100644
index 000000000000..12526887ae2e
--- /dev/null
+++ b/drivers/staging/greybus/greybus.h
@@ -0,0 +1,154 @@
+/*
+ * Greybus driver and device API
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __LINUX_GREYBUS_H
+#define __LINUX_GREYBUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/idr.h>
+
+#include "greybus_id.h"
+#include "greybus_manifest.h"
+#include "greybus_protocols.h"
+#include "manifest.h"
+#include "hd.h"
+#include "svc.h"
+#include "control.h"
+#include "module.h"
+#include "interface.h"
+#include "bundle.h"
+#include "connection.h"
+#include "operation.h"
+#include "timesync.h"
+
+/* Matches up with the Greybus Protocol specification document */
+#define GREYBUS_VERSION_MAJOR 0x00
+#define GREYBUS_VERSION_MINOR 0x01
+
+#define GREYBUS_ID_MATCH_DEVICE \
+ (GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT)
+
+#define GREYBUS_DEVICE(v, p) \
+ .match_flags = GREYBUS_ID_MATCH_DEVICE, \
+ .vendor = (v), \
+ .product = (p),
+
+#define GREYBUS_DEVICE_CLASS(c) \
+ .match_flags = GREYBUS_ID_MATCH_CLASS, \
+ .class = (c),
+
+/* Maximum number of CPorts */
+#define CPORT_ID_MAX 4095 /* UniPro max id is 4095 */
+#define CPORT_ID_BAD U16_MAX
+
+struct greybus_driver {
+ const char *name;
+
+ int (*probe)(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id);
+ void (*disconnect)(struct gb_bundle *bundle);
+
+ const struct greybus_bundle_id *id_table;
+
+ struct device_driver driver;
+};
+#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver)
+
+static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
+{
+ dev_set_drvdata(&bundle->dev, data);
+}
+
+static inline void *greybus_get_drvdata(struct gb_bundle *bundle)
+{
+ return dev_get_drvdata(&bundle->dev);
+}
+
+/* Don't call these directly, use the module_greybus_driver() macro instead */
+int greybus_register_driver(struct greybus_driver *driver,
+ struct module *module, const char *mod_name);
+void greybus_deregister_driver(struct greybus_driver *driver);
+
+/* define to get proper THIS_MODULE and KBUILD_MODNAME values */
+#define greybus_register(driver) \
+ greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+#define greybus_deregister(driver) \
+ greybus_deregister_driver(driver)
+
+/**
+ * module_greybus_driver() - Helper macro for registering a Greybus driver
+ * @__greybus_driver: greybus_driver structure
+ *
+ * Helper macro for Greybus drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_greybus_driver(__greybus_driver) \
+ module_driver(__greybus_driver, greybus_register, greybus_deregister)
+
+int greybus_disabled(void);
+
+void gb_debugfs_init(void);
+void gb_debugfs_cleanup(void);
+struct dentry *gb_debugfs_get(void);
+
+extern struct bus_type greybus_bus_type;
+
+extern struct device_type greybus_hd_type;
+extern struct device_type greybus_module_type;
+extern struct device_type greybus_interface_type;
+extern struct device_type greybus_control_type;
+extern struct device_type greybus_bundle_type;
+extern struct device_type greybus_svc_type;
+
+static inline int is_gb_host_device(const struct device *dev)
+{
+ return dev->type == &greybus_hd_type;
+}
+
+static inline int is_gb_module(const struct device *dev)
+{
+ return dev->type == &greybus_module_type;
+}
+
+static inline int is_gb_interface(const struct device *dev)
+{
+ return dev->type == &greybus_interface_type;
+}
+
+static inline int is_gb_control(const struct device *dev)
+{
+ return dev->type == &greybus_control_type;
+}
+
+static inline int is_gb_bundle(const struct device *dev)
+{
+ return dev->type == &greybus_bundle_type;
+}
+
+static inline int is_gb_svc(const struct device *dev)
+{
+ return dev->type == &greybus_svc_type;
+}
+
+static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id)
+{
+ return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_GREYBUS_H */
diff --git a/drivers/staging/greybus/greybus_authentication.h b/drivers/staging/greybus/greybus_authentication.h
new file mode 100644
index 000000000000..4784ed98e8a3
--- /dev/null
+++ b/drivers/staging/greybus/greybus_authentication.h
@@ -0,0 +1,120 @@
+/*
+ * Greybus Component Authentication User Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __GREYBUS_AUTHENTICATION_USER_H
+#define __GREYBUS_AUTHENTICATION_USER_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define CAP_CERTIFICATE_MAX_SIZE 1600
+#define CAP_SIGNATURE_MAX_SIZE 320
+
+/* Certificate class types */
+#define CAP_CERT_IMS_EAPC 0x00000001
+#define CAP_CERT_IMS_EASC 0x00000002
+#define CAP_CERT_IMS_EARC 0x00000003
+#define CAP_CERT_IMS_IAPC 0x00000004
+#define CAP_CERT_IMS_IASC 0x00000005
+#define CAP_CERT_IMS_IARC 0x00000006
+
+/* IMS Certificate response result codes */
+#define CAP_IMS_RESULT_CERT_FOUND 0x00
+#define CAP_IMS_RESULT_CERT_CLASS_INVAL 0x01
+#define CAP_IMS_RESULT_CERT_CORRUPT 0x02
+#define CAP_IMS_RESULT_CERT_NOT_FOUND 0x03
+
+/* Authentication types */
+#define CAP_AUTH_IMS_PRI 0x00000001
+#define CAP_AUTH_IMS_SEC 0x00000002
+#define CAP_AUTH_IMS_RSA 0x00000003
+
+/* Authenticate response result codes */
+#define CAP_AUTH_RESULT_CR_SUCCESS 0x00
+#define CAP_AUTH_RESULT_CR_BAD_TYPE 0x01
+#define CAP_AUTH_RESULT_CR_WRONG_EP 0x02
+#define CAP_AUTH_RESULT_CR_NO_KEY 0x03
+#define CAP_AUTH_RESULT_CR_SIG_FAIL 0x04
+
+
+/* IOCTL support */
+struct cap_ioc_get_endpoint_uid {
+ __u8 uid[8];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_get_ims_certificate {
+ __u32 certificate_class;
+ __u32 certificate_id;
+
+ __u8 result_code;
+ __u32 cert_size;
+ __u8 certificate[CAP_CERTIFICATE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+struct cap_ioc_authenticate {
+ __u32 auth_type;
+ __u8 uid[8];
+ __u8 challenge[32];
+
+ __u8 result_code;
+ __u8 response[64];
+ __u32 signature_size;
+ __u8 signature[CAP_SIGNATURE_MAX_SIZE];
+} __attribute__ ((__packed__));
+
+#define CAP_IOCTL_BASE 'C'
+#define CAP_IOC_GET_ENDPOINT_UID _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
+#define CAP_IOC_GET_IMS_CERTIFICATE _IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
+#define CAP_IOC_AUTHENTICATE _IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
+
+#endif /* __GREYBUS_AUTHENTICATION_USER_H */
diff --git a/drivers/staging/greybus/greybus_firmware.h b/drivers/staging/greybus/greybus_firmware.h
new file mode 100644
index 000000000000..277a2acce6fd
--- /dev/null
+++ b/drivers/staging/greybus/greybus_firmware.h
@@ -0,0 +1,120 @@
+/*
+ * Greybus Firmware Management User Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __GREYBUS_FIRMWARE_USER_H
+#define __GREYBUS_FIRMWARE_USER_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define GB_FIRMWARE_U_TAG_MAX_SIZE 10
+
+#define GB_FW_U_LOAD_METHOD_UNIPRO 0x01
+#define GB_FW_U_LOAD_METHOD_INTERNAL 0x02
+
+#define GB_FW_U_LOAD_STATUS_FAILED 0x00
+#define GB_FW_U_LOAD_STATUS_UNVALIDATED 0x01
+#define GB_FW_U_LOAD_STATUS_VALIDATED 0x02
+#define GB_FW_U_LOAD_STATUS_VALIDATION_FAILED 0x03
+
+#define GB_FW_U_BACKEND_FW_STATUS_SUCCESS 0x01
+#define GB_FW_U_BACKEND_FW_STATUS_FAIL_FIND 0x02
+#define GB_FW_U_BACKEND_FW_STATUS_FAIL_FETCH 0x03
+#define GB_FW_U_BACKEND_FW_STATUS_FAIL_WRITE 0x04
+#define GB_FW_U_BACKEND_FW_STATUS_INT 0x05
+#define GB_FW_U_BACKEND_FW_STATUS_RETRY 0x06
+#define GB_FW_U_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
+
+#define GB_FW_U_BACKEND_VERSION_STATUS_SUCCESS 0x01
+#define GB_FW_U_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
+#define GB_FW_U_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
+#define GB_FW_U_BACKEND_VERSION_STATUS_RETRY 0x04
+#define GB_FW_U_BACKEND_VERSION_STATUS_FAIL_INT 0x05
+
+/* IOCTL support */
+struct fw_mgmt_ioc_get_intf_version {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u16 major;
+ __u16 minor;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_get_backend_version {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u16 major;
+ __u16 minor;
+ __u8 status;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_intf_load_and_validate {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u8 load_method;
+ __u8 status;
+ __u16 major;
+ __u16 minor;
+} __attribute__ ((__packed__));
+
+struct fw_mgmt_ioc_backend_fw_update {
+ __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
+ __u8 status;
+} __attribute__ ((__packed__));
+
+#define FW_MGMT_IOCTL_BASE 'F'
+#define FW_MGMT_IOC_GET_INTF_FW _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
+#define FW_MGMT_IOC_GET_BACKEND_FW _IOWR(FW_MGMT_IOCTL_BASE, 1, struct fw_mgmt_ioc_get_backend_version)
+#define FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE _IOWR(FW_MGMT_IOCTL_BASE, 2, struct fw_mgmt_ioc_intf_load_and_validate)
+#define FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE _IOWR(FW_MGMT_IOCTL_BASE, 3, struct fw_mgmt_ioc_backend_fw_update)
+#define FW_MGMT_IOC_SET_TIMEOUT_MS _IOW(FW_MGMT_IOCTL_BASE, 4, unsigned int)
+#define FW_MGMT_IOC_MODE_SWITCH _IO(FW_MGMT_IOCTL_BASE, 5)
+
+#endif /* __GREYBUS_FIRMWARE_USER_H */
+
diff --git a/drivers/staging/greybus/greybus_id.h b/drivers/staging/greybus/greybus_id.h
new file mode 100644
index 000000000000..4bb1fc1b811d
--- /dev/null
+++ b/drivers/staging/greybus/greybus_id.h
@@ -0,0 +1,26 @@
+/* FIXME
+ * move this to include/linux/mod_devicetable.h when merging
+ */
+
+#ifndef __LINUX_GREYBUS_ID_H
+#define __LINUX_GREYBUS_ID_H
+
+#include <linux/types.h>
+#include <linux/mod_devicetable.h>
+
+
+struct greybus_bundle_id {
+ __u16 match_flags;
+ __u32 vendor;
+ __u32 product;
+ __u8 class;
+
+ kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t));
+};
+
+/* Used to match the greybus_bundle_id */
+#define GREYBUS_ID_MATCH_VENDOR BIT(0)
+#define GREYBUS_ID_MATCH_PRODUCT BIT(1)
+#define GREYBUS_ID_MATCH_CLASS BIT(2)
+
+#endif /* __LINUX_GREYBUS_ID_H */
diff --git a/drivers/staging/greybus/greybus_manifest.h b/drivers/staging/greybus/greybus_manifest.h
new file mode 100644
index 000000000000..d135945cefe1
--- /dev/null
+++ b/drivers/staging/greybus/greybus_manifest.h
@@ -0,0 +1,177 @@
+/*
+ * Greybus manifest definition
+ *
+ * See "Greybus Application Protocol" document (version 0.1) for
+ * details on these values and structures.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 and BSD licenses.
+ */
+
+#ifndef __GREYBUS_MANIFEST_H
+#define __GREYBUS_MANIFEST_H
+
+enum greybus_descriptor_type {
+ GREYBUS_TYPE_INVALID = 0x00,
+ GREYBUS_TYPE_INTERFACE = 0x01,
+ GREYBUS_TYPE_STRING = 0x02,
+ GREYBUS_TYPE_BUNDLE = 0x03,
+ GREYBUS_TYPE_CPORT = 0x04,
+};
+
+enum greybus_protocol {
+ GREYBUS_PROTOCOL_CONTROL = 0x00,
+ /* 0x01 is unused */
+ GREYBUS_PROTOCOL_GPIO = 0x02,
+ GREYBUS_PROTOCOL_I2C = 0x03,
+ GREYBUS_PROTOCOL_UART = 0x04,
+ GREYBUS_PROTOCOL_HID = 0x05,
+ GREYBUS_PROTOCOL_USB = 0x06,
+ GREYBUS_PROTOCOL_SDIO = 0x07,
+ GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08,
+ GREYBUS_PROTOCOL_PWM = 0x09,
+ /* 0x0a is unused */
+ GREYBUS_PROTOCOL_SPI = 0x0b,
+ GREYBUS_PROTOCOL_DISPLAY = 0x0c,
+ GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d,
+ GREYBUS_PROTOCOL_SENSOR = 0x0e,
+ GREYBUS_PROTOCOL_LIGHTS = 0x0f,
+ GREYBUS_PROTOCOL_VIBRATOR = 0x10,
+ GREYBUS_PROTOCOL_LOOPBACK = 0x11,
+ GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12,
+ GREYBUS_PROTOCOL_AUDIO_DATA = 0x13,
+ GREYBUS_PROTOCOL_SVC = 0x14,
+ GREYBUS_PROTOCOL_BOOTROM = 0x15,
+ GREYBUS_PROTOCOL_CAMERA_DATA = 0x16,
+ GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17,
+ GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18,
+ GREYBUS_PROTOCOL_AUTHENTICATION = 0x19,
+ GREYBUS_PROTOCOL_LOG = 0x1a,
+ /* ... */
+ GREYBUS_PROTOCOL_RAW = 0xfe,
+ GREYBUS_PROTOCOL_VENDOR = 0xff,
+};
+
+enum greybus_class_type {
+ GREYBUS_CLASS_CONTROL = 0x00,
+ /* 0x01 is unused */
+ /* 0x02 is unused */
+ /* 0x03 is unused */
+ /* 0x04 is unused */
+ GREYBUS_CLASS_HID = 0x05,
+ /* 0x06 is unused */
+ /* 0x07 is unused */
+ GREYBUS_CLASS_POWER_SUPPLY = 0x08,
+ /* 0x09 is unused */
+ GREYBUS_CLASS_BRIDGED_PHY = 0x0a,
+ /* 0x0b is unused */
+ GREYBUS_CLASS_DISPLAY = 0x0c,
+ GREYBUS_CLASS_CAMERA = 0x0d,
+ GREYBUS_CLASS_SENSOR = 0x0e,
+ GREYBUS_CLASS_LIGHTS = 0x0f,
+ GREYBUS_CLASS_VIBRATOR = 0x10,
+ GREYBUS_CLASS_LOOPBACK = 0x11,
+ GREYBUS_CLASS_AUDIO = 0x12,
+ /* 0x13 is unused */
+ /* 0x14 is unused */
+ GREYBUS_CLASS_BOOTROM = 0x15,
+ GREYBUS_CLASS_FW_MANAGEMENT = 0x16,
+ GREYBUS_CLASS_LOG = 0x17,
+ /* ... */
+ GREYBUS_CLASS_RAW = 0xfe,
+ GREYBUS_CLASS_VENDOR = 0xff,
+};
+
+enum {
+ GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0),
+};
+
+/*
+ * The string in a string descriptor is not NUL-terminated. The
+ * size of the descriptor will be rounded up to a multiple of 4
+ * bytes, by padding the string with 0x00 bytes if necessary.
+ */
+struct greybus_descriptor_string {
+ __u8 length;
+ __u8 id;
+ __u8 string[0];
+} __packed;
+
+/*
+ * An interface descriptor describes information about an interface as a whole,
+ * *not* the functions within it.
+ */
+struct greybus_descriptor_interface {
+ __u8 vendor_stringid;
+ __u8 product_stringid;
+ __u8 features;
+ __u8 pad;
+} __packed;
+
+/*
+ * An bundle descriptor defines an identification number and a class for
+ * each bundle.
+ *
+ * @id: Uniquely identifies a bundle within a interface, its sole purpose is to
+ * allow CPort descriptors to specify which bundle they are associated with.
+ * The first bundle will have id 0, second will have 1 and so on.
+ *
+ * The largest CPort id associated with an bundle (defined by a
+ * CPort descriptor in the manifest) is used to determine how to
+ * encode the device id and module number in UniPro packets
+ * that use the bundle.
+ *
+ * @class: It is used by kernel to know the functionality provided by the
+ * bundle and will be matched against drivers functinality while probing greybus
+ * driver. It should contain one of the values defined in
+ * 'enum greybus_class_type'.
+ *
+ */
+struct greybus_descriptor_bundle {
+ __u8 id; /* interface-relative id (0..) */
+ __u8 class;
+ __u8 pad[2];
+} __packed;
+
+/*
+ * A CPort descriptor indicates the id of the bundle within the
+ * module it's associated with, along with the CPort id used to
+ * address the CPort. The protocol id defines the format of messages
+ * exchanged using the CPort.
+ */
+struct greybus_descriptor_cport {
+ __le16 id;
+ __u8 bundle;
+ __u8 protocol_id; /* enum greybus_protocol */
+} __packed;
+
+struct greybus_descriptor_header {
+ __le16 size;
+ __u8 type; /* enum greybus_descriptor_type */
+ __u8 pad;
+} __packed;
+
+struct greybus_descriptor {
+ struct greybus_descriptor_header header;
+ union {
+ struct greybus_descriptor_string string;
+ struct greybus_descriptor_interface interface;
+ struct greybus_descriptor_bundle bundle;
+ struct greybus_descriptor_cport cport;
+ };
+} __packed;
+
+struct greybus_manifest_header {
+ __le16 size;
+ __u8 version_major;
+ __u8 version_minor;
+} __packed;
+
+struct greybus_manifest {
+ struct greybus_manifest_header header;
+ struct greybus_descriptor descriptors[0];
+} __packed;
+
+#endif /* __GREYBUS_MANIFEST_H */
diff --git a/drivers/staging/greybus/greybus_protocols.h b/drivers/staging/greybus/greybus_protocols.h
new file mode 100644
index 000000000000..639578309c2a
--- /dev/null
+++ b/drivers/staging/greybus/greybus_protocols.h
@@ -0,0 +1,2268 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
+ * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
+ * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
+ * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __GREYBUS_PROTOCOLS_H
+#define __GREYBUS_PROTOCOLS_H
+
+/* Fixed IDs for control/svc protocols */
+
+/* SVC switch-port device ids */
+#define GB_SVC_DEVICE_ID_SVC 0
+#define GB_SVC_DEVICE_ID_AP 1
+#define GB_SVC_DEVICE_ID_MIN 2
+#define GB_SVC_DEVICE_ID_MAX 31
+
+#define GB_SVC_CPORT_ID 0
+#define GB_CONTROL_BUNDLE_ID 0
+#define GB_CONTROL_CPORT_ID 0
+
+
+/*
+ * All operation messages (both requests and responses) begin with
+ * a header that encodes the size of the message (header included).
+ * This header also contains a unique identifier, that associates a
+ * response message with its operation. The header contains an
+ * operation type field, whose interpretation is dependent on what
+ * type of protocol is used over the connection. The high bit
+ * (0x80) of the operation type field is used to indicate whether
+ * the message is a request (clear) or a response (set).
+ *
+ * Response messages include an additional result byte, which
+ * communicates the result of the corresponding request. A zero
+ * result value means the operation completed successfully. Any
+ * other value indicates an error; in this case, the payload of the
+ * response message (if any) is ignored. The result byte must be
+ * zero in the header for a request message.
+ *
+ * The wire format for all numeric fields in the header is little
+ * endian. Any operation-specific data begins immediately after the
+ * header.
+ */
+struct gb_operation_msg_hdr {
+ __le16 size; /* Size in bytes of header + payload */
+ __le16 operation_id; /* Operation unique id */
+ __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
+ __u8 result; /* Result of request (in responses only) */
+ __u8 pad[2]; /* must be zero (ignore when read) */
+} __packed;
+
+
+/* Generic request types */
+#define GB_REQUEST_TYPE_CPORT_SHUTDOWN 0x00
+#define GB_REQUEST_TYPE_INVALID 0x7f
+
+struct gb_cport_shutdown_request {
+ __u8 phase;
+} __packed;
+
+
+/* Control Protocol */
+
+/* Greybus control request types */
+#define GB_CONTROL_TYPE_VERSION 0x01
+#define GB_CONTROL_TYPE_PROBE_AP 0x02
+#define GB_CONTROL_TYPE_GET_MANIFEST_SIZE 0x03
+#define GB_CONTROL_TYPE_GET_MANIFEST 0x04
+#define GB_CONTROL_TYPE_CONNECTED 0x05
+#define GB_CONTROL_TYPE_DISCONNECTED 0x06
+#define GB_CONTROL_TYPE_TIMESYNC_ENABLE 0x07
+#define GB_CONTROL_TYPE_TIMESYNC_DISABLE 0x08
+#define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09
+/* Unused 0x0a */
+#define GB_CONTROL_TYPE_BUNDLE_VERSION 0x0b
+#define GB_CONTROL_TYPE_DISCONNECTING 0x0c
+#define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT 0x0d
+#define GB_CONTROL_TYPE_MODE_SWITCH 0x0e
+#define GB_CONTROL_TYPE_BUNDLE_SUSPEND 0x0f
+#define GB_CONTROL_TYPE_BUNDLE_RESUME 0x10
+#define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE 0x11
+#define GB_CONTROL_TYPE_BUNDLE_ACTIVATE 0x12
+#define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE 0x13
+#define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE 0x14
+#define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT 0x15
+
+struct gb_control_version_request {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_control_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_control_bundle_version_request {
+ __u8 bundle_id;
+} __packed;
+
+struct gb_control_bundle_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+/* Control protocol manifest get size request has no payload*/
+struct gb_control_get_manifest_size_response {
+ __le16 size;
+} __packed;
+
+/* Control protocol manifest get request has no payload */
+struct gb_control_get_manifest_response {
+ __u8 data[0];
+} __packed;
+
+/* Control protocol [dis]connected request */
+struct gb_control_connected_request {
+ __le16 cport_id;
+} __packed;
+
+struct gb_control_disconnecting_request {
+ __le16 cport_id;
+} __packed;
+/* disconnecting response has no payload */
+
+struct gb_control_disconnected_request {
+ __le16 cport_id;
+} __packed;
+/* Control protocol [dis]connected response has no payload */
+
+#define GB_TIMESYNC_MAX_STROBES 0x04
+
+struct gb_control_timesync_enable_request {
+ __u8 count;
+ __le64 frame_time;
+ __le32 strobe_delay;
+ __le32 refclk;
+} __packed;
+/* timesync enable response has no payload */
+
+struct gb_control_timesync_authoritative_request {
+ __le64 frame_time[GB_TIMESYNC_MAX_STROBES];
+} __packed;
+/* timesync authoritative response has no payload */
+
+/* timesync get_last_event_request has no payload */
+struct gb_control_timesync_get_last_event_response {
+ __le64 frame_time;
+} __packed;
+
+/*
+ * All Bundle power management operations use the same request and response
+ * layout and status codes.
+ */
+
+#define GB_CONTROL_BUNDLE_PM_OK 0x00
+#define GB_CONTROL_BUNDLE_PM_INVAL 0x01
+#define GB_CONTROL_BUNDLE_PM_BUSY 0x02
+#define GB_CONTROL_BUNDLE_PM_FAIL 0x03
+#define GB_CONTROL_BUNDLE_PM_NA 0x04
+
+struct gb_control_bundle_pm_request {
+ __u8 bundle_id;
+} __packed;
+
+struct gb_control_bundle_pm_response {
+ __u8 status;
+} __packed;
+
+/*
+ * Interface Suspend Prepare and Deactivate Prepare operations use the same
+ * response layout and error codes. Define a single response structure and reuse
+ * it. Both operations have no payload.
+ */
+
+#define GB_CONTROL_INTF_PM_OK 0x00
+#define GB_CONTROL_INTF_PM_BUSY 0x01
+#define GB_CONTROL_INTF_PM_NA 0x02
+
+struct gb_control_intf_pm_response {
+ __u8 status;
+} __packed;
+
+/* APBridge protocol */
+
+/* request APB1 log */
+#define GB_APB_REQUEST_LOG 0x02
+
+/* request to map a cport to bulk in and bulk out endpoints */
+#define GB_APB_REQUEST_EP_MAPPING 0x03
+
+/* request to get the number of cports available */
+#define GB_APB_REQUEST_CPORT_COUNT 0x04
+
+/* request to reset a cport state */
+#define GB_APB_REQUEST_RESET_CPORT 0x05
+
+/* request to time the latency of messages on a given cport */
+#define GB_APB_REQUEST_LATENCY_TAG_EN 0x06
+#define GB_APB_REQUEST_LATENCY_TAG_DIS 0x07
+
+/* request to control the CSI transmitter */
+#define GB_APB_REQUEST_CSI_TX_CONTROL 0x08
+
+/* request to control audio streaming */
+#define GB_APB_REQUEST_AUDIO_CONTROL 0x09
+
+/* TimeSync requests */
+#define GB_APB_REQUEST_TIMESYNC_ENABLE 0x0d
+#define GB_APB_REQUEST_TIMESYNC_DISABLE 0x0e
+#define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE 0x0f
+#define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10
+
+/* requests to set Greybus CPort flags */
+#define GB_APB_REQUEST_CPORT_FLAGS 0x11
+
+/* ARPC request */
+#define GB_APB_REQUEST_ARPC_RUN 0x12
+
+struct gb_apb_request_cport_flags {
+ __le32 flags;
+#define GB_APB_CPORT_FLAG_CONTROL 0x01
+#define GB_APB_CPORT_FLAG_HIGH_PRIO 0x02
+} __packed;
+
+
+/* Firmware Download Protocol */
+
+/* Request Types */
+#define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE 0x01
+#define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE 0x02
+#define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE 0x03
+
+#define GB_FIRMWARE_TAG_MAX_SIZE 10
+
+/* firmware download find firmware request/response */
+struct gb_fw_download_find_firmware_request {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+
+struct gb_fw_download_find_firmware_response {
+ __u8 firmware_id;
+ __le32 size;
+} __packed;
+
+/* firmware download fetch firmware request/response */
+struct gb_fw_download_fetch_firmware_request {
+ __u8 firmware_id;
+ __le32 offset;
+ __le32 size;
+} __packed;
+
+struct gb_fw_download_fetch_firmware_response {
+ __u8 data[0];
+} __packed;
+
+/* firmware download release firmware request */
+struct gb_fw_download_release_firmware_request {
+ __u8 firmware_id;
+} __packed;
+/* firmware download release firmware response has no payload */
+
+
+/* Firmware Management Protocol */
+
+/* Request Types */
+#define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION 0x01
+#define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW 0x02
+#define GB_FW_MGMT_TYPE_LOADED_FW 0x03
+#define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION 0x04
+#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE 0x05
+#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED 0x06
+
+#define GB_FW_LOAD_METHOD_UNIPRO 0x01
+#define GB_FW_LOAD_METHOD_INTERNAL 0x02
+
+#define GB_FW_LOAD_STATUS_FAILED 0x00
+#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01
+#define GB_FW_LOAD_STATUS_VALIDATED 0x02
+#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03
+
+#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03
+#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04
+#define GB_FW_BACKEND_FW_STATUS_INT 0x05
+#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06
+#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
+
+#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
+#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04
+#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05
+
+/* firmware management interface firmware version request has no payload */
+struct gb_fw_mgmt_interface_fw_version_response {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+ __le16 major;
+ __le16 minor;
+} __packed;
+
+/* firmware management load and validate firmware request/response */
+struct gb_fw_mgmt_load_and_validate_fw_request {
+ __u8 request_id;
+ __u8 load_method;
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+/* firmware management load and validate firmware response has no payload*/
+
+/* firmware management loaded firmware request */
+struct gb_fw_mgmt_loaded_fw_request {
+ __u8 request_id;
+ __u8 status;
+ __le16 major;
+ __le16 minor;
+} __packed;
+/* firmware management loaded firmware response has no payload */
+
+/* firmware management backend firmware version request/response */
+struct gb_fw_mgmt_backend_fw_version_request {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+
+struct gb_fw_mgmt_backend_fw_version_response {
+ __le16 major;
+ __le16 minor;
+ __u8 status;
+} __packed;
+
+/* firmware management backend firmware update request */
+struct gb_fw_mgmt_backend_fw_update_request {
+ __u8 request_id;
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+/* firmware management backend firmware update response has no payload */
+
+/* firmware management backend firmware updated request */
+struct gb_fw_mgmt_backend_fw_updated_request {
+ __u8 request_id;
+ __u8 status;
+} __packed;
+/* firmware management backend firmware updated response has no payload */
+
+
+/* Component Authentication Protocol (CAP) */
+
+/* Request Types */
+#define GB_CAP_TYPE_GET_ENDPOINT_UID 0x01
+#define GB_CAP_TYPE_GET_IMS_CERTIFICATE 0x02
+#define GB_CAP_TYPE_AUTHENTICATE 0x03
+
+/* CAP get endpoint uid request has no payload */
+struct gb_cap_get_endpoint_uid_response {
+ __u8 uid[8];
+} __packed;
+
+/* CAP get endpoint ims certificate request/response */
+struct gb_cap_get_ims_certificate_request {
+ __le32 certificate_class;
+ __le32 certificate_id;
+} __packed;
+
+struct gb_cap_get_ims_certificate_response {
+ __u8 result_code;
+ __u8 certificate[0];
+} __packed;
+
+/* CAP authenticate request/response */
+struct gb_cap_authenticate_request {
+ __le32 auth_type;
+ __u8 uid[8];
+ __u8 challenge[32];
+} __packed;
+
+struct gb_cap_authenticate_response {
+ __u8 result_code;
+ __u8 response[64];
+ __u8 signature[0];
+} __packed;
+
+
+/* Bootrom Protocol */
+
+/* Version of the Greybus bootrom protocol we support */
+#define GB_BOOTROM_VERSION_MAJOR 0x00
+#define GB_BOOTROM_VERSION_MINOR 0x01
+
+/* Greybus bootrom request types */
+#define GB_BOOTROM_TYPE_VERSION 0x01
+#define GB_BOOTROM_TYPE_FIRMWARE_SIZE 0x02
+#define GB_BOOTROM_TYPE_GET_FIRMWARE 0x03
+#define GB_BOOTROM_TYPE_READY_TO_BOOT 0x04
+#define GB_BOOTROM_TYPE_AP_READY 0x05 /* Request with no-payload */
+#define GB_BOOTROM_TYPE_GET_VID_PID 0x06 /* Request with no-payload */
+
+/* Greybus bootrom boot stages */
+#define GB_BOOTROM_BOOT_STAGE_ONE 0x01 /* Reserved for the boot ROM */
+#define GB_BOOTROM_BOOT_STAGE_TWO 0x02 /* Bootrom package to be loaded by the boot ROM */
+#define GB_BOOTROM_BOOT_STAGE_THREE 0x03 /* Module personality package loaded by Stage 2 firmware */
+
+/* Greybus bootrom ready to boot status */
+#define GB_BOOTROM_BOOT_STATUS_INVALID 0x00 /* Firmware blob could not be validated */
+#define GB_BOOTROM_BOOT_STATUS_INSECURE 0x01 /* Firmware blob is valid but insecure */
+#define GB_BOOTROM_BOOT_STATUS_SECURE 0x02 /* Firmware blob is valid and secure */
+
+/* Max bootrom data fetch size in bytes */
+#define GB_BOOTROM_FETCH_MAX 2000
+
+struct gb_bootrom_version_request {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_bootrom_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+/* Bootrom protocol firmware size request/response */
+struct gb_bootrom_firmware_size_request {
+ __u8 stage;
+} __packed;
+
+struct gb_bootrom_firmware_size_response {
+ __le32 size;
+} __packed;
+
+/* Bootrom protocol get firmware request/response */
+struct gb_bootrom_get_firmware_request {
+ __le32 offset;
+ __le32 size;
+} __packed;
+
+struct gb_bootrom_get_firmware_response {
+ __u8 data[0];
+} __packed;
+
+/* Bootrom protocol Ready to boot request */
+struct gb_bootrom_ready_to_boot_request {
+ __u8 status;
+} __packed;
+/* Bootrom protocol Ready to boot response has no payload */
+
+/* Bootrom protocol get VID/PID request has no payload */
+struct gb_bootrom_get_vid_pid_response {
+ __le32 vendor_id;
+ __le32 product_id;
+} __packed;
+
+
+/* Power Supply */
+
+/* Greybus power supply request types */
+#define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES 0x02
+#define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION 0x03
+#define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS 0x04
+#define GB_POWER_SUPPLY_TYPE_GET_PROPERTY 0x05
+#define GB_POWER_SUPPLY_TYPE_SET_PROPERTY 0x06
+#define GB_POWER_SUPPLY_TYPE_EVENT 0x07
+
+/* Greybus power supply battery technologies types */
+#define GB_POWER_SUPPLY_TECH_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_TECH_NiMH 0x0001
+#define GB_POWER_SUPPLY_TECH_LION 0x0002
+#define GB_POWER_SUPPLY_TECH_LIPO 0x0003
+#define GB_POWER_SUPPLY_TECH_LiFe 0x0004
+#define GB_POWER_SUPPLY_TECH_NiCd 0x0005
+#define GB_POWER_SUPPLY_TECH_LiMn 0x0006
+
+/* Greybus power supply types */
+#define GB_POWER_SUPPLY_UNKNOWN_TYPE 0x0000
+#define GB_POWER_SUPPLY_BATTERY_TYPE 0x0001
+#define GB_POWER_SUPPLY_UPS_TYPE 0x0002
+#define GB_POWER_SUPPLY_MAINS_TYPE 0x0003
+#define GB_POWER_SUPPLY_USB_TYPE 0x0004
+#define GB_POWER_SUPPLY_USB_DCP_TYPE 0x0005
+#define GB_POWER_SUPPLY_USB_CDP_TYPE 0x0006
+#define GB_POWER_SUPPLY_USB_ACA_TYPE 0x0007
+
+/* Greybus power supply health values */
+#define GB_POWER_SUPPLY_HEALTH_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_HEALTH_GOOD 0x0001
+#define GB_POWER_SUPPLY_HEALTH_OVERHEAT 0x0002
+#define GB_POWER_SUPPLY_HEALTH_DEAD 0x0003
+#define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE 0x0004
+#define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE 0x0005
+#define GB_POWER_SUPPLY_HEALTH_COLD 0x0006
+#define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE 0x0007
+#define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE 0x0008
+
+/* Greybus power supply status values */
+#define GB_POWER_SUPPLY_STATUS_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_STATUS_CHARGING 0x0001
+#define GB_POWER_SUPPLY_STATUS_DISCHARGING 0x0002
+#define GB_POWER_SUPPLY_STATUS_NOT_CHARGING 0x0003
+#define GB_POWER_SUPPLY_STATUS_FULL 0x0004
+
+/* Greybus power supply capacity level values */
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL 0x0001
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW 0x0002
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL 0x0003
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH 0x0004
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL 0x0005
+
+/* Greybus power supply scope values */
+#define GB_POWER_SUPPLY_SCOPE_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_SCOPE_SYSTEM 0x0001
+#define GB_POWER_SUPPLY_SCOPE_DEVICE 0x0002
+
+struct gb_power_supply_get_supplies_response {
+ __u8 supplies_count;
+} __packed;
+
+struct gb_power_supply_get_description_request {
+ __u8 psy_id;
+} __packed;
+
+struct gb_power_supply_get_description_response {
+ __u8 manufacturer[32];
+ __u8 model[32];
+ __u8 serial_number[32];
+ __le16 type;
+ __u8 properties_count;
+} __packed;
+
+struct gb_power_supply_props_desc {
+ __u8 property;
+#define GB_POWER_SUPPLY_PROP_STATUS 0x00
+#define GB_POWER_SUPPLY_PROP_CHARGE_TYPE 0x01
+#define GB_POWER_SUPPLY_PROP_HEALTH 0x02
+#define GB_POWER_SUPPLY_PROP_PRESENT 0x03
+#define GB_POWER_SUPPLY_PROP_ONLINE 0x04
+#define GB_POWER_SUPPLY_PROP_AUTHENTIC 0x05
+#define GB_POWER_SUPPLY_PROP_TECHNOLOGY 0x06
+#define GB_POWER_SUPPLY_PROP_CYCLE_COUNT 0x07
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX 0x08
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN 0x09
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN 0x0A
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN 0x0B
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW 0x0C
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG 0x0D
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV 0x0E
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT 0x0F
+#define GB_POWER_SUPPLY_PROP_CURRENT_MAX 0x10
+#define GB_POWER_SUPPLY_PROP_CURRENT_NOW 0x11
+#define GB_POWER_SUPPLY_PROP_CURRENT_AVG 0x12
+#define GB_POWER_SUPPLY_PROP_CURRENT_BOOT 0x13
+#define GB_POWER_SUPPLY_PROP_POWER_NOW 0x14
+#define GB_POWER_SUPPLY_PROP_POWER_AVG 0x15
+#define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN 0x16
+#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN 0x17
+#define GB_POWER_SUPPLY_PROP_CHARGE_FULL 0x18
+#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY 0x19
+#define GB_POWER_SUPPLY_PROP_CHARGE_NOW 0x1A
+#define GB_POWER_SUPPLY_PROP_CHARGE_AVG 0x1B
+#define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER 0x1C
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT 0x1D
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX 0x1E
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE 0x1F
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX 0x20
+#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT 0x21
+#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX 0x22
+#define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT 0x23
+#define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN 0x24
+#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN 0x25
+#define GB_POWER_SUPPLY_PROP_ENERGY_FULL 0x26
+#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY 0x27
+#define GB_POWER_SUPPLY_PROP_ENERGY_NOW 0x28
+#define GB_POWER_SUPPLY_PROP_ENERGY_AVG 0x29
+#define GB_POWER_SUPPLY_PROP_CAPACITY 0x2A
+#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN 0x2B
+#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX 0x2C
+#define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL 0x2D
+#define GB_POWER_SUPPLY_PROP_TEMP 0x2E
+#define GB_POWER_SUPPLY_PROP_TEMP_MAX 0x2F
+#define GB_POWER_SUPPLY_PROP_TEMP_MIN 0x30
+#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN 0x31
+#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX 0x32
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT 0x33
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN 0x34
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX 0x35
+#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW 0x36
+#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG 0x37
+#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW 0x38
+#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG 0x39
+#define GB_POWER_SUPPLY_PROP_TYPE 0x3A
+#define GB_POWER_SUPPLY_PROP_SCOPE 0x3B
+#define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT 0x3C
+#define GB_POWER_SUPPLY_PROP_CALIBRATE 0x3D
+ __u8 is_writeable;
+} __packed;
+
+struct gb_power_supply_get_property_descriptors_request {
+ __u8 psy_id;
+} __packed;
+
+struct gb_power_supply_get_property_descriptors_response {
+ __u8 properties_count;
+ struct gb_power_supply_props_desc props[];
+} __packed;
+
+struct gb_power_supply_get_property_request {
+ __u8 psy_id;
+ __u8 property;
+} __packed;
+
+struct gb_power_supply_get_property_response {
+ __le32 prop_val;
+};
+
+struct gb_power_supply_set_property_request {
+ __u8 psy_id;
+ __u8 property;
+ __le32 prop_val;
+} __packed;
+
+struct gb_power_supply_event_request {
+ __u8 psy_id;
+ __u8 event;
+#define GB_POWER_SUPPLY_UPDATE 0x01
+} __packed;
+
+
+/* HID */
+
+/* Greybus HID operation types */
+#define GB_HID_TYPE_GET_DESC 0x02
+#define GB_HID_TYPE_GET_REPORT_DESC 0x03
+#define GB_HID_TYPE_PWR_ON 0x04
+#define GB_HID_TYPE_PWR_OFF 0x05
+#define GB_HID_TYPE_GET_REPORT 0x06
+#define GB_HID_TYPE_SET_REPORT 0x07
+#define GB_HID_TYPE_IRQ_EVENT 0x08
+
+/* Report type */
+#define GB_HID_INPUT_REPORT 0
+#define GB_HID_OUTPUT_REPORT 1
+#define GB_HID_FEATURE_REPORT 2
+
+/* Different request/response structures */
+/* HID get descriptor response */
+struct gb_hid_desc_response {
+ __u8 bLength;
+ __le16 wReportDescLength;
+ __le16 bcdHID;
+ __le16 wProductID;
+ __le16 wVendorID;
+ __u8 bCountryCode;
+} __packed;
+
+/* HID get report request/response */
+struct gb_hid_get_report_request {
+ __u8 report_type;
+ __u8 report_id;
+} __packed;
+
+/* HID set report request */
+struct gb_hid_set_report_request {
+ __u8 report_type;
+ __u8 report_id;
+ __u8 report[0];
+} __packed;
+
+/* HID input report request, via interrupt pipe */
+struct gb_hid_input_report_request {
+ __u8 report[0];
+} __packed;
+
+
+/* I2C */
+
+/* Greybus i2c request types */
+#define GB_I2C_TYPE_FUNCTIONALITY 0x02
+#define GB_I2C_TYPE_TRANSFER 0x05
+
+/* functionality request has no payload */
+struct gb_i2c_functionality_response {
+ __le32 functionality;
+} __packed;
+
+/*
+ * Outgoing data immediately follows the op count and ops array.
+ * The data for each write (master -> slave) op in the array is sent
+ * in order, with no (e.g. pad) bytes separating them.
+ *
+ * Short reads cause the entire transfer request to fail So response
+ * payload consists only of bytes read, and the number of bytes is
+ * exactly what was specified in the corresponding op. Like
+ * outgoing data, the incoming data is in order and contiguous.
+ */
+struct gb_i2c_transfer_op {
+ __le16 addr;
+ __le16 flags;
+ __le16 size;
+} __packed;
+
+struct gb_i2c_transfer_request {
+ __le16 op_count;
+ struct gb_i2c_transfer_op ops[0]; /* op_count of these */
+} __packed;
+struct gb_i2c_transfer_response {
+ __u8 data[0]; /* inbound data */
+} __packed;
+
+
+/* GPIO */
+
+/* Greybus GPIO request types */
+#define GB_GPIO_TYPE_LINE_COUNT 0x02
+#define GB_GPIO_TYPE_ACTIVATE 0x03
+#define GB_GPIO_TYPE_DEACTIVATE 0x04
+#define GB_GPIO_TYPE_GET_DIRECTION 0x05
+#define GB_GPIO_TYPE_DIRECTION_IN 0x06
+#define GB_GPIO_TYPE_DIRECTION_OUT 0x07
+#define GB_GPIO_TYPE_GET_VALUE 0x08
+#define GB_GPIO_TYPE_SET_VALUE 0x09
+#define GB_GPIO_TYPE_SET_DEBOUNCE 0x0a
+#define GB_GPIO_TYPE_IRQ_TYPE 0x0b
+#define GB_GPIO_TYPE_IRQ_MASK 0x0c
+#define GB_GPIO_TYPE_IRQ_UNMASK 0x0d
+#define GB_GPIO_TYPE_IRQ_EVENT 0x0e
+
+#define GB_GPIO_IRQ_TYPE_NONE 0x00
+#define GB_GPIO_IRQ_TYPE_EDGE_RISING 0x01
+#define GB_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
+#define GB_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
+#define GB_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
+#define GB_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
+
+/* line count request has no payload */
+struct gb_gpio_line_count_response {
+ __u8 count;
+} __packed;
+
+struct gb_gpio_activate_request {
+ __u8 which;
+} __packed;
+/* activate response has no payload */
+
+struct gb_gpio_deactivate_request {
+ __u8 which;
+} __packed;
+/* deactivate response has no payload */
+
+struct gb_gpio_get_direction_request {
+ __u8 which;
+} __packed;
+struct gb_gpio_get_direction_response {
+ __u8 direction;
+} __packed;
+
+struct gb_gpio_direction_in_request {
+ __u8 which;
+} __packed;
+/* direction in response has no payload */
+
+struct gb_gpio_direction_out_request {
+ __u8 which;
+ __u8 value;
+} __packed;
+/* direction out response has no payload */
+
+struct gb_gpio_get_value_request {
+ __u8 which;
+} __packed;
+struct gb_gpio_get_value_response {
+ __u8 value;
+} __packed;
+
+struct gb_gpio_set_value_request {
+ __u8 which;
+ __u8 value;
+} __packed;
+/* set value response has no payload */
+
+struct gb_gpio_set_debounce_request {
+ __u8 which;
+ __le16 usec;
+} __packed;
+/* debounce response has no payload */
+
+struct gb_gpio_irq_type_request {
+ __u8 which;
+ __u8 type;
+} __packed;
+/* irq type response has no payload */
+
+struct gb_gpio_irq_mask_request {
+ __u8 which;
+} __packed;
+/* irq mask response has no payload */
+
+struct gb_gpio_irq_unmask_request {
+ __u8 which;
+} __packed;
+/* irq unmask response has no payload */
+
+/* irq event requests originate on another module and are handled on the AP */
+struct gb_gpio_irq_event_request {
+ __u8 which;
+} __packed;
+/* irq event has no response */
+
+
+/* PWM */
+
+/* Greybus PWM operation types */
+#define GB_PWM_TYPE_PWM_COUNT 0x02
+#define GB_PWM_TYPE_ACTIVATE 0x03
+#define GB_PWM_TYPE_DEACTIVATE 0x04
+#define GB_PWM_TYPE_CONFIG 0x05
+#define GB_PWM_TYPE_POLARITY 0x06
+#define GB_PWM_TYPE_ENABLE 0x07
+#define GB_PWM_TYPE_DISABLE 0x08
+
+/* pwm count request has no payload */
+struct gb_pwm_count_response {
+ __u8 count;
+} __packed;
+
+struct gb_pwm_activate_request {
+ __u8 which;
+} __packed;
+
+struct gb_pwm_deactivate_request {
+ __u8 which;
+} __packed;
+
+struct gb_pwm_config_request {
+ __u8 which;
+ __le32 duty;
+ __le32 period;
+} __packed;
+
+struct gb_pwm_polarity_request {
+ __u8 which;
+ __u8 polarity;
+} __packed;
+
+struct gb_pwm_enable_request {
+ __u8 which;
+} __packed;
+
+struct gb_pwm_disable_request {
+ __u8 which;
+} __packed;
+
+/* SPI */
+
+/* Should match up with modes in linux/spi/spi.h */
+#define GB_SPI_MODE_CPHA 0x01 /* clock phase */
+#define GB_SPI_MODE_CPOL 0x02 /* clock polarity */
+#define GB_SPI_MODE_MODE_0 (0|0) /* (original MicroWire) */
+#define GB_SPI_MODE_MODE_1 (0|GB_SPI_MODE_CPHA)
+#define GB_SPI_MODE_MODE_2 (GB_SPI_MODE_CPOL|0)
+#define GB_SPI_MODE_MODE_3 (GB_SPI_MODE_CPOL|GB_SPI_MODE_CPHA)
+#define GB_SPI_MODE_CS_HIGH 0x04 /* chipselect active high? */
+#define GB_SPI_MODE_LSB_FIRST 0x08 /* per-word bits-on-wire */
+#define GB_SPI_MODE_3WIRE 0x10 /* SI/SO signals shared */
+#define GB_SPI_MODE_LOOP 0x20 /* loopback mode */
+#define GB_SPI_MODE_NO_CS 0x40 /* 1 dev/bus, no chipselect */
+#define GB_SPI_MODE_READY 0x80 /* slave pulls low to pause */
+
+/* Should match up with flags in linux/spi/spi.h */
+#define GB_SPI_FLAG_HALF_DUPLEX BIT(0) /* can't do full duplex */
+#define GB_SPI_FLAG_NO_RX BIT(1) /* can't do buffer read */
+#define GB_SPI_FLAG_NO_TX BIT(2) /* can't do buffer write */
+
+/* Greybus spi operation types */
+#define GB_SPI_TYPE_MASTER_CONFIG 0x02
+#define GB_SPI_TYPE_DEVICE_CONFIG 0x03
+#define GB_SPI_TYPE_TRANSFER 0x04
+
+/* mode request has no payload */
+struct gb_spi_master_config_response {
+ __le32 bits_per_word_mask;
+ __le32 min_speed_hz;
+ __le32 max_speed_hz;
+ __le16 mode;
+ __le16 flags;
+ __u8 num_chipselect;
+} __packed;
+
+struct gb_spi_device_config_request {
+ __u8 chip_select;
+} __packed;
+
+struct gb_spi_device_config_response {
+ __le16 mode;
+ __u8 bits_per_word;
+ __le32 max_speed_hz;
+ __u8 device_type;
+#define GB_SPI_SPI_DEV 0x00
+#define GB_SPI_SPI_NOR 0x01
+#define GB_SPI_SPI_MODALIAS 0x02
+ __u8 name[32];
+} __packed;
+
+/**
+ * struct gb_spi_transfer - a read/write buffer pair
+ * @speed_hz: Select a speed other than the device default for this transfer. If
+ * 0 the default (from @spi_device) is used.
+ * @len: size of rx and tx buffers (in bytes)
+ * @delay_usecs: microseconds to delay after this transfer before (optionally)
+ * changing the chipselect status, then starting the next transfer or
+ * completing this spi_message.
+ * @cs_change: affects chipselect after this transfer completes
+ * @bits_per_word: select a bits_per_word other than the device default for this
+ * transfer. If 0 the default (from @spi_device) is used.
+ */
+struct gb_spi_transfer {
+ __le32 speed_hz;
+ __le32 len;
+ __le16 delay_usecs;
+ __u8 cs_change;
+ __u8 bits_per_word;
+ __u8 xfer_flags;
+#define GB_SPI_XFER_READ 0x01
+#define GB_SPI_XFER_WRITE 0x02
+#define GB_SPI_XFER_INPROGRESS 0x04
+} __packed;
+
+struct gb_spi_transfer_request {
+ __u8 chip_select; /* of the spi device */
+ __u8 mode; /* of the spi device */
+ __le16 count;
+ struct gb_spi_transfer transfers[0]; /* count of these */
+} __packed;
+
+struct gb_spi_transfer_response {
+ __u8 data[0]; /* inbound data */
+} __packed;
+
+/* Version of the Greybus SVC protocol we support */
+#define GB_SVC_VERSION_MAJOR 0x00
+#define GB_SVC_VERSION_MINOR 0x01
+
+/* Greybus SVC request types */
+#define GB_SVC_TYPE_PROTOCOL_VERSION 0x01
+#define GB_SVC_TYPE_SVC_HELLO 0x02
+#define GB_SVC_TYPE_INTF_DEVICE_ID 0x03
+#define GB_SVC_TYPE_INTF_RESET 0x06
+#define GB_SVC_TYPE_CONN_CREATE 0x07
+#define GB_SVC_TYPE_CONN_DESTROY 0x08
+#define GB_SVC_TYPE_DME_PEER_GET 0x09
+#define GB_SVC_TYPE_DME_PEER_SET 0x0a
+#define GB_SVC_TYPE_ROUTE_CREATE 0x0b
+#define GB_SVC_TYPE_ROUTE_DESTROY 0x0c
+#define GB_SVC_TYPE_TIMESYNC_ENABLE 0x0d
+#define GB_SVC_TYPE_TIMESYNC_DISABLE 0x0e
+#define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE 0x0f
+#define GB_SVC_TYPE_INTF_SET_PWRM 0x10
+#define GB_SVC_TYPE_INTF_EJECT 0x11
+#define GB_SVC_TYPE_PING 0x13
+#define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET 0x14
+#define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET 0x15
+#define GB_SVC_TYPE_PWRMON_SAMPLE_GET 0x16
+#define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET 0x17
+#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18
+#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19
+#define GB_SVC_TYPE_TIMESYNC_PING 0x1a
+#define GB_SVC_TYPE_MODULE_INSERTED 0x1f
+#define GB_SVC_TYPE_MODULE_REMOVED 0x20
+#define GB_SVC_TYPE_INTF_VSYS_ENABLE 0x21
+#define GB_SVC_TYPE_INTF_VSYS_DISABLE 0x22
+#define GB_SVC_TYPE_INTF_REFCLK_ENABLE 0x23
+#define GB_SVC_TYPE_INTF_REFCLK_DISABLE 0x24
+#define GB_SVC_TYPE_INTF_UNIPRO_ENABLE 0x25
+#define GB_SVC_TYPE_INTF_UNIPRO_DISABLE 0x26
+#define GB_SVC_TYPE_INTF_ACTIVATE 0x27
+#define GB_SVC_TYPE_INTF_RESUME 0x28
+#define GB_SVC_TYPE_INTF_MAILBOX_EVENT 0x29
+#define GB_SVC_TYPE_INTF_OOPS 0x2a
+
+/* Greybus SVC protocol status values */
+#define GB_SVC_OP_SUCCESS 0x00
+#define GB_SVC_OP_UNKNOWN_ERROR 0x01
+#define GB_SVC_INTF_NOT_DETECTED 0x02
+#define GB_SVC_INTF_NO_UPRO_LINK 0x03
+#define GB_SVC_INTF_UPRO_NOT_DOWN 0x04
+#define GB_SVC_INTF_UPRO_NOT_HIBERNATED 0x05
+#define GB_SVC_INTF_NO_V_SYS 0x06
+#define GB_SVC_INTF_V_CHG 0x07
+#define GB_SVC_INTF_WAKE_BUSY 0x08
+#define GB_SVC_INTF_NO_REFCLK 0x09
+#define GB_SVC_INTF_RELEASING 0x0a
+#define GB_SVC_INTF_NO_ORDER 0x0b
+#define GB_SVC_INTF_MBOX_SET 0x0c
+#define GB_SVC_INTF_BAD_MBOX 0x0d
+#define GB_SVC_INTF_OP_TIMEOUT 0x0e
+#define GB_SVC_PWRMON_OP_NOT_PRESENT 0x0f
+
+struct gb_svc_version_request {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_svc_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+/* SVC protocol hello request */
+struct gb_svc_hello_request {
+ __le16 endo_id;
+ __u8 interface_id;
+} __packed;
+/* hello response has no payload */
+
+struct gb_svc_intf_device_id_request {
+ __u8 intf_id;
+ __u8 device_id;
+} __packed;
+/* device id response has no payload */
+
+struct gb_svc_intf_reset_request {
+ __u8 intf_id;
+} __packed;
+/* interface reset response has no payload */
+
+struct gb_svc_intf_eject_request {
+ __u8 intf_id;
+} __packed;
+/* interface eject response has no payload */
+
+struct gb_svc_conn_create_request {
+ __u8 intf1_id;
+ __le16 cport1_id;
+ __u8 intf2_id;
+ __le16 cport2_id;
+ __u8 tc;
+ __u8 flags;
+} __packed;
+/* connection create response has no payload */
+
+struct gb_svc_conn_destroy_request {
+ __u8 intf1_id;
+ __le16 cport1_id;
+ __u8 intf2_id;
+ __le16 cport2_id;
+} __packed;
+/* connection destroy response has no payload */
+
+struct gb_svc_dme_peer_get_request {
+ __u8 intf_id;
+ __le16 attr;
+ __le16 selector;
+} __packed;
+
+struct gb_svc_dme_peer_get_response {
+ __le16 result_code;
+ __le32 attr_value;
+} __packed;
+
+struct gb_svc_dme_peer_set_request {
+ __u8 intf_id;
+ __le16 attr;
+ __le16 selector;
+ __le32 value;
+} __packed;
+
+struct gb_svc_dme_peer_set_response {
+ __le16 result_code;
+} __packed;
+
+/* Greybus init-status values, currently retrieved using DME peer gets. */
+#define GB_INIT_SPI_BOOT_STARTED 0x02
+#define GB_INIT_TRUSTED_SPI_BOOT_FINISHED 0x03
+#define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED 0x04
+#define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED 0x06
+#define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED 0x09
+#define GB_INIT_S2_LOADER_BOOT_STARTED 0x0D
+
+struct gb_svc_route_create_request {
+ __u8 intf1_id;
+ __u8 dev1_id;
+ __u8 intf2_id;
+ __u8 dev2_id;
+} __packed;
+/* route create response has no payload */
+
+struct gb_svc_route_destroy_request {
+ __u8 intf1_id;
+ __u8 intf2_id;
+} __packed;
+/* route destroy response has no payload */
+
+/* used for svc_intf_vsys_{enable,disable} */
+struct gb_svc_intf_vsys_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_vsys_response {
+ __u8 result_code;
+#define GB_SVC_INTF_VSYS_OK 0x00
+ /* 0x01 is reserved */
+#define GB_SVC_INTF_VSYS_FAIL 0x02
+} __packed;
+
+/* used for svc_intf_refclk_{enable,disable} */
+struct gb_svc_intf_refclk_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_refclk_response {
+ __u8 result_code;
+#define GB_SVC_INTF_REFCLK_OK 0x00
+ /* 0x01 is reserved */
+#define GB_SVC_INTF_REFCLK_FAIL 0x02
+} __packed;
+
+/* used for svc_intf_unipro_{enable,disable} */
+struct gb_svc_intf_unipro_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_unipro_response {
+ __u8 result_code;
+#define GB_SVC_INTF_UNIPRO_OK 0x00
+ /* 0x01 is reserved */
+#define GB_SVC_INTF_UNIPRO_FAIL 0x02
+#define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03
+} __packed;
+
+struct gb_svc_timesync_enable_request {
+ __u8 count;
+ __le64 frame_time;
+ __le32 strobe_delay;
+ __le32 refclk;
+} __packed;
+/* timesync enable response has no payload */
+
+/* timesync authoritative request has no payload */
+struct gb_svc_timesync_authoritative_response {
+ __le64 frame_time[GB_TIMESYNC_MAX_STROBES];
+};
+
+struct gb_svc_timesync_wake_pins_acquire_request {
+ __le32 strobe_mask;
+};
+
+/* timesync wake pins acquire response has no payload */
+
+/* timesync wake pins release request has no payload */
+/* timesync wake pins release response has no payload */
+
+/* timesync svc ping request has no payload */
+struct gb_svc_timesync_ping_response {
+ __le64 frame_time;
+} __packed;
+
+#define GB_SVC_UNIPRO_FAST_MODE 0x01
+#define GB_SVC_UNIPRO_SLOW_MODE 0x02
+#define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04
+#define GB_SVC_UNIPRO_SLOW_AUTO_MODE 0x05
+#define GB_SVC_UNIPRO_MODE_UNCHANGED 0x07
+#define GB_SVC_UNIPRO_HIBERNATE_MODE 0x11
+#define GB_SVC_UNIPRO_OFF_MODE 0x12
+
+#define GB_SVC_SMALL_AMPLITUDE 0x01
+#define GB_SVC_LARGE_AMPLITUDE 0x02
+
+#define GB_SVC_NO_DE_EMPHASIS 0x00
+#define GB_SVC_SMALL_DE_EMPHASIS 0x01
+#define GB_SVC_LARGE_DE_EMPHASIS 0x02
+
+#define GB_SVC_PWRM_RXTERMINATION 0x01
+#define GB_SVC_PWRM_TXTERMINATION 0x02
+#define GB_SVC_PWRM_LINE_RESET 0x04
+#define GB_SVC_PWRM_SCRAMBLING 0x20
+
+#define GB_SVC_PWRM_QUIRK_HSSER 0x00000001
+
+#define GB_SVC_UNIPRO_HS_SERIES_A 0x01
+#define GB_SVC_UNIPRO_HS_SERIES_B 0x02
+
+#define GB_SVC_SETPWRM_PWR_OK 0x00
+#define GB_SVC_SETPWRM_PWR_LOCAL 0x01
+#define GB_SVC_SETPWRM_PWR_REMOTE 0x02
+#define GB_SVC_SETPWRM_PWR_BUSY 0x03
+#define GB_SVC_SETPWRM_PWR_ERROR_CAP 0x04
+#define GB_SVC_SETPWRM_PWR_FATAL_ERROR 0x05
+
+struct gb_svc_l2_timer_cfg {
+ __le16 tsb_fc0_protection_timeout;
+ __le16 tsb_tc0_replay_timeout;
+ __le16 tsb_afc0_req_timeout;
+ __le16 tsb_fc1_protection_timeout;
+ __le16 tsb_tc1_replay_timeout;
+ __le16 tsb_afc1_req_timeout;
+ __le16 reserved_for_tc2[3];
+ __le16 reserved_for_tc3[3];
+} __packed;
+
+struct gb_svc_intf_set_pwrm_request {
+ __u8 intf_id;
+ __u8 hs_series;
+ __u8 tx_mode;
+ __u8 tx_gear;
+ __u8 tx_nlanes;
+ __u8 tx_amplitude;
+ __u8 tx_hs_equalizer;
+ __u8 rx_mode;
+ __u8 rx_gear;
+ __u8 rx_nlanes;
+ __u8 flags;
+ __le32 quirks;
+ struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata;
+} __packed;
+
+struct gb_svc_intf_set_pwrm_response {
+ __u8 result_code;
+} __packed;
+
+struct gb_svc_key_event_request {
+ __le16 key_code;
+#define GB_KEYCODE_ARA 0x00
+
+ __u8 key_event;
+#define GB_SVC_KEY_RELEASED 0x00
+#define GB_SVC_KEY_PRESSED 0x01
+} __packed;
+
+#define GB_SVC_PWRMON_MAX_RAIL_COUNT 254
+
+struct gb_svc_pwrmon_rail_count_get_response {
+ __u8 rail_count;
+} __packed;
+
+#define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE 32
+
+struct gb_svc_pwrmon_rail_names_get_response {
+ __u8 status;
+ __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
+} __packed;
+
+#define GB_SVC_PWRMON_TYPE_CURR 0x01
+#define GB_SVC_PWRMON_TYPE_VOL 0x02
+#define GB_SVC_PWRMON_TYPE_PWR 0x03
+
+#define GB_SVC_PWRMON_GET_SAMPLE_OK 0x00
+#define GB_SVC_PWRMON_GET_SAMPLE_INVAL 0x01
+#define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP 0x02
+#define GB_SVC_PWRMON_GET_SAMPLE_HWERR 0x03
+
+struct gb_svc_pwrmon_sample_get_request {
+ __u8 rail_id;
+ __u8 measurement_type;
+} __packed;
+
+struct gb_svc_pwrmon_sample_get_response {
+ __u8 result;
+ __le32 measurement;
+} __packed;
+
+struct gb_svc_pwrmon_intf_sample_get_request {
+ __u8 intf_id;
+ __u8 measurement_type;
+} __packed;
+
+struct gb_svc_pwrmon_intf_sample_get_response {
+ __u8 result;
+ __le32 measurement;
+} __packed;
+
+#define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001
+
+struct gb_svc_module_inserted_request {
+ __u8 primary_intf_id;
+ __u8 intf_count;
+ __le16 flags;
+} __packed;
+/* module_inserted response has no payload */
+
+struct gb_svc_module_removed_request {
+ __u8 primary_intf_id;
+} __packed;
+/* module_removed response has no payload */
+
+struct gb_svc_intf_activate_request {
+ __u8 intf_id;
+} __packed;
+
+#define GB_SVC_INTF_TYPE_UNKNOWN 0x00
+#define GB_SVC_INTF_TYPE_DUMMY 0x01
+#define GB_SVC_INTF_TYPE_UNIPRO 0x02
+#define GB_SVC_INTF_TYPE_GREYBUS 0x03
+
+struct gb_svc_intf_activate_response {
+ __u8 status;
+ __u8 intf_type;
+} __packed;
+
+struct gb_svc_intf_resume_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_resume_response {
+ __u8 status;
+} __packed;
+
+#define GB_SVC_INTF_MAILBOX_NONE 0x00
+#define GB_SVC_INTF_MAILBOX_AP 0x01
+#define GB_SVC_INTF_MAILBOX_GREYBUS 0x02
+
+struct gb_svc_intf_mailbox_event_request {
+ __u8 intf_id;
+ __le16 result_code;
+ __le32 mailbox;
+} __packed;
+/* intf_mailbox_event response has no payload */
+
+struct gb_svc_intf_oops_request {
+ __u8 intf_id;
+ __u8 reason;
+} __packed;
+/* intf_oops response has no payload */
+
+
+/* RAW */
+
+/* Greybus raw request types */
+#define GB_RAW_TYPE_SEND 0x02
+
+struct gb_raw_send_request {
+ __le32 len;
+ __u8 data[0];
+} __packed;
+
+
+/* UART */
+
+/* Greybus UART operation types */
+#define GB_UART_TYPE_SEND_DATA 0x02
+#define GB_UART_TYPE_RECEIVE_DATA 0x03 /* Unsolicited data */
+#define GB_UART_TYPE_SET_LINE_CODING 0x04
+#define GB_UART_TYPE_SET_CONTROL_LINE_STATE 0x05
+#define GB_UART_TYPE_SEND_BREAK 0x06
+#define GB_UART_TYPE_SERIAL_STATE 0x07 /* Unsolicited data */
+#define GB_UART_TYPE_RECEIVE_CREDITS 0x08
+#define GB_UART_TYPE_FLUSH_FIFOS 0x09
+
+/* Represents data from AP -> Module */
+struct gb_uart_send_data_request {
+ __le16 size;
+ __u8 data[0];
+} __packed;
+
+/* recv-data-request flags */
+#define GB_UART_RECV_FLAG_FRAMING 0x01 /* Framing error */
+#define GB_UART_RECV_FLAG_PARITY 0x02 /* Parity error */
+#define GB_UART_RECV_FLAG_OVERRUN 0x04 /* Overrun error */
+#define GB_UART_RECV_FLAG_BREAK 0x08 /* Break */
+
+/* Represents data from Module -> AP */
+struct gb_uart_recv_data_request {
+ __le16 size;
+ __u8 flags;
+ __u8 data[0];
+} __packed;
+
+struct gb_uart_receive_credits_request {
+ __le16 count;
+} __packed;
+
+struct gb_uart_set_line_coding_request {
+ __le32 rate;
+ __u8 format;
+#define GB_SERIAL_1_STOP_BITS 0
+#define GB_SERIAL_1_5_STOP_BITS 1
+#define GB_SERIAL_2_STOP_BITS 2
+
+ __u8 parity;
+#define GB_SERIAL_NO_PARITY 0
+#define GB_SERIAL_ODD_PARITY 1
+#define GB_SERIAL_EVEN_PARITY 2
+#define GB_SERIAL_MARK_PARITY 3
+#define GB_SERIAL_SPACE_PARITY 4
+
+ __u8 data_bits;
+
+ __u8 flow_control;
+#define GB_SERIAL_AUTO_RTSCTS_EN 0x1
+} __packed;
+
+/* output control lines */
+#define GB_UART_CTRL_DTR 0x01
+#define GB_UART_CTRL_RTS 0x02
+
+struct gb_uart_set_control_line_state_request {
+ __u8 control;
+} __packed;
+
+struct gb_uart_set_break_request {
+ __u8 state;
+} __packed;
+
+/* input control lines and line errors */
+#define GB_UART_CTRL_DCD 0x01
+#define GB_UART_CTRL_DSR 0x02
+#define GB_UART_CTRL_RI 0x04
+
+struct gb_uart_serial_state_request {
+ __u8 control;
+} __packed;
+
+struct gb_uart_serial_flush_request {
+ __u8 flags;
+#define GB_SERIAL_FLAG_FLUSH_TRANSMITTER 0x01
+#define GB_SERIAL_FLAG_FLUSH_RECEIVER 0x02
+} __packed;
+
+/* Loopback */
+
+/* Greybus loopback request types */
+#define GB_LOOPBACK_TYPE_PING 0x02
+#define GB_LOOPBACK_TYPE_TRANSFER 0x03
+#define GB_LOOPBACK_TYPE_SINK 0x04
+
+/*
+ * Loopback request/response header format should be identical
+ * to simplify bandwidth and data movement analysis.
+ */
+struct gb_loopback_transfer_request {
+ __le32 len;
+ __le32 reserved0;
+ __le32 reserved1;
+ __u8 data[0];
+} __packed;
+
+struct gb_loopback_transfer_response {
+ __le32 len;
+ __le32 reserved0;
+ __le32 reserved1;
+ __u8 data[0];
+} __packed;
+
+/* SDIO */
+/* Greybus SDIO operation types */
+#define GB_SDIO_TYPE_GET_CAPABILITIES 0x02
+#define GB_SDIO_TYPE_SET_IOS 0x03
+#define GB_SDIO_TYPE_COMMAND 0x04
+#define GB_SDIO_TYPE_TRANSFER 0x05
+#define GB_SDIO_TYPE_EVENT 0x06
+
+/* get caps response: request has no payload */
+struct gb_sdio_get_caps_response {
+ __le32 caps;
+#define GB_SDIO_CAP_NONREMOVABLE 0x00000001
+#define GB_SDIO_CAP_4_BIT_DATA 0x00000002
+#define GB_SDIO_CAP_8_BIT_DATA 0x00000004
+#define GB_SDIO_CAP_MMC_HS 0x00000008
+#define GB_SDIO_CAP_SD_HS 0x00000010
+#define GB_SDIO_CAP_ERASE 0x00000020
+#define GB_SDIO_CAP_1_2V_DDR 0x00000040
+#define GB_SDIO_CAP_1_8V_DDR 0x00000080
+#define GB_SDIO_CAP_POWER_OFF_CARD 0x00000100
+#define GB_SDIO_CAP_UHS_SDR12 0x00000200
+#define GB_SDIO_CAP_UHS_SDR25 0x00000400
+#define GB_SDIO_CAP_UHS_SDR50 0x00000800
+#define GB_SDIO_CAP_UHS_SDR104 0x00001000
+#define GB_SDIO_CAP_UHS_DDR50 0x00002000
+#define GB_SDIO_CAP_DRIVER_TYPE_A 0x00004000
+#define GB_SDIO_CAP_DRIVER_TYPE_C 0x00008000
+#define GB_SDIO_CAP_DRIVER_TYPE_D 0x00010000
+#define GB_SDIO_CAP_HS200_1_2V 0x00020000
+#define GB_SDIO_CAP_HS200_1_8V 0x00040000
+#define GB_SDIO_CAP_HS400_1_2V 0x00080000
+#define GB_SDIO_CAP_HS400_1_8V 0x00100000
+
+ /* see possible values below at vdd */
+ __le32 ocr;
+ __le32 f_min;
+ __le32 f_max;
+ __le16 max_blk_count;
+ __le16 max_blk_size;
+} __packed;
+
+/* set ios request: response has no payload */
+struct gb_sdio_set_ios_request {
+ __le32 clock;
+ __le32 vdd;
+#define GB_SDIO_VDD_165_195 0x00000001
+#define GB_SDIO_VDD_20_21 0x00000002
+#define GB_SDIO_VDD_21_22 0x00000004
+#define GB_SDIO_VDD_22_23 0x00000008
+#define GB_SDIO_VDD_23_24 0x00000010
+#define GB_SDIO_VDD_24_25 0x00000020
+#define GB_SDIO_VDD_25_26 0x00000040
+#define GB_SDIO_VDD_26_27 0x00000080
+#define GB_SDIO_VDD_27_28 0x00000100
+#define GB_SDIO_VDD_28_29 0x00000200
+#define GB_SDIO_VDD_29_30 0x00000400
+#define GB_SDIO_VDD_30_31 0x00000800
+#define GB_SDIO_VDD_31_32 0x00001000
+#define GB_SDIO_VDD_32_33 0x00002000
+#define GB_SDIO_VDD_33_34 0x00004000
+#define GB_SDIO_VDD_34_35 0x00008000
+#define GB_SDIO_VDD_35_36 0x00010000
+
+ __u8 bus_mode;
+#define GB_SDIO_BUSMODE_OPENDRAIN 0x00
+#define GB_SDIO_BUSMODE_PUSHPULL 0x01
+
+ __u8 power_mode;
+#define GB_SDIO_POWER_OFF 0x00
+#define GB_SDIO_POWER_UP 0x01
+#define GB_SDIO_POWER_ON 0x02
+#define GB_SDIO_POWER_UNDEFINED 0x03
+
+ __u8 bus_width;
+#define GB_SDIO_BUS_WIDTH_1 0x00
+#define GB_SDIO_BUS_WIDTH_4 0x02
+#define GB_SDIO_BUS_WIDTH_8 0x03
+
+ __u8 timing;
+#define GB_SDIO_TIMING_LEGACY 0x00
+#define GB_SDIO_TIMING_MMC_HS 0x01
+#define GB_SDIO_TIMING_SD_HS 0x02
+#define GB_SDIO_TIMING_UHS_SDR12 0x03
+#define GB_SDIO_TIMING_UHS_SDR25 0x04
+#define GB_SDIO_TIMING_UHS_SDR50 0x05
+#define GB_SDIO_TIMING_UHS_SDR104 0x06
+#define GB_SDIO_TIMING_UHS_DDR50 0x07
+#define GB_SDIO_TIMING_MMC_DDR52 0x08
+#define GB_SDIO_TIMING_MMC_HS200 0x09
+#define GB_SDIO_TIMING_MMC_HS400 0x0A
+
+ __u8 signal_voltage;
+#define GB_SDIO_SIGNAL_VOLTAGE_330 0x00
+#define GB_SDIO_SIGNAL_VOLTAGE_180 0x01
+#define GB_SDIO_SIGNAL_VOLTAGE_120 0x02
+
+ __u8 drv_type;
+#define GB_SDIO_SET_DRIVER_TYPE_B 0x00
+#define GB_SDIO_SET_DRIVER_TYPE_A 0x01
+#define GB_SDIO_SET_DRIVER_TYPE_C 0x02
+#define GB_SDIO_SET_DRIVER_TYPE_D 0x03
+} __packed;
+
+/* command request */
+struct gb_sdio_command_request {
+ __u8 cmd;
+ __u8 cmd_flags;
+#define GB_SDIO_RSP_NONE 0x00
+#define GB_SDIO_RSP_PRESENT 0x01
+#define GB_SDIO_RSP_136 0x02
+#define GB_SDIO_RSP_CRC 0x04
+#define GB_SDIO_RSP_BUSY 0x08
+#define GB_SDIO_RSP_OPCODE 0x10
+
+ __u8 cmd_type;
+#define GB_SDIO_CMD_AC 0x00
+#define GB_SDIO_CMD_ADTC 0x01
+#define GB_SDIO_CMD_BC 0x02
+#define GB_SDIO_CMD_BCR 0x03
+
+ __le32 cmd_arg;
+ __le16 data_blocks;
+ __le16 data_blksz;
+} __packed;
+
+struct gb_sdio_command_response {
+ __le32 resp[4];
+} __packed;
+
+/* transfer request */
+struct gb_sdio_transfer_request {
+ __u8 data_flags;
+#define GB_SDIO_DATA_WRITE 0x01
+#define GB_SDIO_DATA_READ 0x02
+#define GB_SDIO_DATA_STREAM 0x04
+
+ __le16 data_blocks;
+ __le16 data_blksz;
+ __u8 data[0];
+} __packed;
+
+struct gb_sdio_transfer_response {
+ __le16 data_blocks;
+ __le16 data_blksz;
+ __u8 data[0];
+} __packed;
+
+/* event request: generated by module and is defined as unidirectional */
+struct gb_sdio_event_request {
+ __u8 event;
+#define GB_SDIO_CARD_INSERTED 0x01
+#define GB_SDIO_CARD_REMOVED 0x02
+#define GB_SDIO_WP 0x04
+} __packed;
+
+/* Camera */
+
+/* Greybus Camera request types */
+#define GB_CAMERA_TYPE_CAPABILITIES 0x02
+#define GB_CAMERA_TYPE_CONFIGURE_STREAMS 0x03
+#define GB_CAMERA_TYPE_CAPTURE 0x04
+#define GB_CAMERA_TYPE_FLUSH 0x05
+#define GB_CAMERA_TYPE_METADATA 0x06
+
+#define GB_CAMERA_MAX_STREAMS 4
+#define GB_CAMERA_MAX_SETTINGS_SIZE 8192
+
+/* Greybus Camera Configure Streams request payload */
+struct gb_camera_stream_config_request {
+ __le16 width;
+ __le16 height;
+ __le16 format;
+ __le16 padding;
+} __packed;
+
+struct gb_camera_configure_streams_request {
+ __u8 num_streams;
+ __u8 flags;
+#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01
+ __le16 padding;
+ struct gb_camera_stream_config_request config[0];
+} __packed;
+
+/* Greybus Camera Configure Streams response payload */
+struct gb_camera_stream_config_response {
+ __le16 width;
+ __le16 height;
+ __le16 format;
+ __u8 virtual_channel;
+ __u8 data_type[2];
+ __le16 max_pkt_size;
+ __u8 padding;
+ __le32 max_size;
+} __packed;
+
+struct gb_camera_configure_streams_response {
+ __u8 num_streams;
+#define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED 0x01
+ __u8 flags;
+ __u8 padding[2];
+ __le32 data_rate;
+ struct gb_camera_stream_config_response config[0];
+};
+
+/* Greybus Camera Capture request payload - response has no payload */
+struct gb_camera_capture_request {
+ __le32 request_id;
+ __u8 streams;
+ __u8 padding;
+ __le16 num_frames;
+ __u8 settings[0];
+} __packed;
+
+/* Greybus Camera Flush response payload - request has no payload */
+struct gb_camera_flush_response {
+ __le32 request_id;
+} __packed;
+
+/* Greybus Camera Metadata request payload - operation has no response */
+struct gb_camera_metadata_request {
+ __le32 request_id;
+ __le16 frame_number;
+ __u8 stream;
+ __u8 padding;
+ __u8 metadata[0];
+} __packed;
+
+/* Lights */
+
+/* Greybus Lights request types */
+#define GB_LIGHTS_TYPE_GET_LIGHTS 0x02
+#define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG 0x03
+#define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG 0x04
+#define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG 0x05
+#define GB_LIGHTS_TYPE_SET_BRIGHTNESS 0x06
+#define GB_LIGHTS_TYPE_SET_BLINK 0x07
+#define GB_LIGHTS_TYPE_SET_COLOR 0x08
+#define GB_LIGHTS_TYPE_SET_FADE 0x09
+#define GB_LIGHTS_TYPE_EVENT 0x0A
+#define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY 0x0B
+#define GB_LIGHTS_TYPE_SET_FLASH_STROBE 0x0C
+#define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT 0x0D
+#define GB_LIGHTS_TYPE_GET_FLASH_FAULT 0x0E
+
+/* Greybus Light modes */
+
+/*
+ * if you add any specific mode below, update also the
+ * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly
+ */
+#define GB_CHANNEL_MODE_NONE 0x00000000
+#define GB_CHANNEL_MODE_BATTERY 0x00000001
+#define GB_CHANNEL_MODE_POWER 0x00000002
+#define GB_CHANNEL_MODE_WIRELESS 0x00000004
+#define GB_CHANNEL_MODE_BLUETOOTH 0x00000008
+#define GB_CHANNEL_MODE_KEYBOARD 0x00000010
+#define GB_CHANNEL_MODE_BUTTONS 0x00000020
+#define GB_CHANNEL_MODE_NOTIFICATION 0x00000040
+#define GB_CHANNEL_MODE_ATTENTION 0x00000080
+#define GB_CHANNEL_MODE_FLASH 0x00000100
+#define GB_CHANNEL_MODE_TORCH 0x00000200
+#define GB_CHANNEL_MODE_INDICATOR 0x00000400
+
+/* Lights Mode valid bit values */
+#define GB_CHANNEL_MODE_DEFINED_RANGE 0x000004FF
+#define GB_CHANNEL_MODE_VENDOR_RANGE 0x00F00000
+
+/* Greybus Light Channels Flags */
+#define GB_LIGHT_CHANNEL_MULTICOLOR 0x00000001
+#define GB_LIGHT_CHANNEL_FADER 0x00000002
+#define GB_LIGHT_CHANNEL_BLINK 0x00000004
+
+/* get count of lights in module */
+struct gb_lights_get_lights_response {
+ __u8 lights_count;
+} __packed;
+
+/* light config request payload */
+struct gb_lights_get_light_config_request {
+ __u8 id;
+} __packed;
+
+/* light config response payload */
+struct gb_lights_get_light_config_response {
+ __u8 channel_count;
+ __u8 name[32];
+} __packed;
+
+/* channel config request payload */
+struct gb_lights_get_channel_config_request {
+ __u8 light_id;
+ __u8 channel_id;
+} __packed;
+
+/* channel flash config request payload */
+struct gb_lights_get_channel_flash_config_request {
+ __u8 light_id;
+ __u8 channel_id;
+} __packed;
+
+/* channel config response payload */
+struct gb_lights_get_channel_config_response {
+ __u8 max_brightness;
+ __le32 flags;
+ __le32 color;
+ __u8 color_name[32];
+ __le32 mode;
+ __u8 mode_name[32];
+} __packed;
+
+/* channel flash config response payload */
+struct gb_lights_get_channel_flash_config_response {
+ __le32 intensity_min_uA;
+ __le32 intensity_max_uA;
+ __le32 intensity_step_uA;
+ __le32 timeout_min_us;
+ __le32 timeout_max_us;
+ __le32 timeout_step_us;
+} __packed;
+
+/* blink request payload: response have no payload */
+struct gb_lights_blink_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le16 time_on_ms;
+ __le16 time_off_ms;
+} __packed;
+
+/* set brightness request payload: response have no payload */
+struct gb_lights_set_brightness_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __u8 brightness;
+} __packed;
+
+/* set color request payload: response have no payload */
+struct gb_lights_set_color_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le32 color;
+} __packed;
+
+/* set fade request payload: response have no payload */
+struct gb_lights_set_fade_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __u8 fade_in;
+ __u8 fade_out;
+} __packed;
+
+/* event request: generated by module */
+struct gb_lights_event_request {
+ __u8 light_id;
+ __u8 event;
+#define GB_LIGHTS_LIGHT_CONFIG 0x01
+} __packed;
+
+/* set flash intensity request payload: response have no payload */
+struct gb_lights_set_flash_intensity_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le32 intensity_uA;
+} __packed;
+
+/* set flash strobe state request payload: response have no payload */
+struct gb_lights_set_flash_strobe_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __u8 state;
+} __packed;
+
+/* set flash timeout request payload: response have no payload */
+struct gb_lights_set_flash_timeout_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le32 timeout_us;
+} __packed;
+
+/* get flash fault request payload */
+struct gb_lights_get_flash_fault_request {
+ __u8 light_id;
+ __u8 channel_id;
+} __packed;
+
+/* get flash fault response payload */
+struct gb_lights_get_flash_fault_response {
+ __le32 fault;
+#define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE 0x00000000
+#define GB_LIGHTS_FLASH_FAULT_TIMEOUT 0x00000001
+#define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE 0x00000002
+#define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT 0x00000004
+#define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT 0x00000008
+#define GB_LIGHTS_FLASH_FAULT_INDICATOR 0x00000010
+#define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE 0x00000020
+#define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE 0x00000040
+#define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE 0x00000080
+} __packed;
+
+/* Audio */
+
+#define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE 0x02
+#define GB_AUDIO_TYPE_GET_TOPOLOGY 0x03
+#define GB_AUDIO_TYPE_GET_CONTROL 0x04
+#define GB_AUDIO_TYPE_SET_CONTROL 0x05
+#define GB_AUDIO_TYPE_ENABLE_WIDGET 0x06
+#define GB_AUDIO_TYPE_DISABLE_WIDGET 0x07
+#define GB_AUDIO_TYPE_GET_PCM 0x08
+#define GB_AUDIO_TYPE_SET_PCM 0x09
+#define GB_AUDIO_TYPE_SET_TX_DATA_SIZE 0x0a
+ /* 0x0b unused */
+#define GB_AUDIO_TYPE_ACTIVATE_TX 0x0c
+#define GB_AUDIO_TYPE_DEACTIVATE_TX 0x0d
+#define GB_AUDIO_TYPE_SET_RX_DATA_SIZE 0x0e
+ /* 0x0f unused */
+#define GB_AUDIO_TYPE_ACTIVATE_RX 0x10
+#define GB_AUDIO_TYPE_DEACTIVATE_RX 0x11
+#define GB_AUDIO_TYPE_JACK_EVENT 0x12
+#define GB_AUDIO_TYPE_BUTTON_EVENT 0x13
+#define GB_AUDIO_TYPE_STREAMING_EVENT 0x14
+#define GB_AUDIO_TYPE_SEND_DATA 0x15
+
+/* Module must be able to buffer 10ms of audio data, minimum */
+#define GB_AUDIO_SAMPLE_BUFFER_MIN_US 10000
+
+#define GB_AUDIO_PCM_NAME_MAX 32
+#define AUDIO_DAI_NAME_MAX 32
+#define AUDIO_CONTROL_NAME_MAX 32
+#define AUDIO_CTL_ELEM_NAME_MAX 44
+#define AUDIO_ENUM_NAME_MAX 64
+#define AUDIO_WIDGET_NAME_MAX 32
+
+/* See SNDRV_PCM_FMTBIT_* in Linux source */
+#define GB_AUDIO_PCM_FMT_S8 BIT(0)
+#define GB_AUDIO_PCM_FMT_U8 BIT(1)
+#define GB_AUDIO_PCM_FMT_S16_LE BIT(2)
+#define GB_AUDIO_PCM_FMT_S16_BE BIT(3)
+#define GB_AUDIO_PCM_FMT_U16_LE BIT(4)
+#define GB_AUDIO_PCM_FMT_U16_BE BIT(5)
+#define GB_AUDIO_PCM_FMT_S24_LE BIT(6)
+#define GB_AUDIO_PCM_FMT_S24_BE BIT(7)
+#define GB_AUDIO_PCM_FMT_U24_LE BIT(8)
+#define GB_AUDIO_PCM_FMT_U24_BE BIT(9)
+#define GB_AUDIO_PCM_FMT_S32_LE BIT(10)
+#define GB_AUDIO_PCM_FMT_S32_BE BIT(11)
+#define GB_AUDIO_PCM_FMT_U32_LE BIT(12)
+#define GB_AUDIO_PCM_FMT_U32_BE BIT(13)
+
+/* See SNDRV_PCM_RATE_* in Linux source */
+#define GB_AUDIO_PCM_RATE_5512 BIT(0)
+#define GB_AUDIO_PCM_RATE_8000 BIT(1)
+#define GB_AUDIO_PCM_RATE_11025 BIT(2)
+#define GB_AUDIO_PCM_RATE_16000 BIT(3)
+#define GB_AUDIO_PCM_RATE_22050 BIT(4)
+#define GB_AUDIO_PCM_RATE_32000 BIT(5)
+#define GB_AUDIO_PCM_RATE_44100 BIT(6)
+#define GB_AUDIO_PCM_RATE_48000 BIT(7)
+#define GB_AUDIO_PCM_RATE_64000 BIT(8)
+#define GB_AUDIO_PCM_RATE_88200 BIT(9)
+#define GB_AUDIO_PCM_RATE_96000 BIT(10)
+#define GB_AUDIO_PCM_RATE_176400 BIT(11)
+#define GB_AUDIO_PCM_RATE_192000 BIT(12)
+
+#define GB_AUDIO_STREAM_TYPE_CAPTURE 0x1
+#define GB_AUDIO_STREAM_TYPE_PLAYBACK 0x2
+
+#define GB_AUDIO_CTL_ELEM_ACCESS_READ BIT(0)
+#define GB_AUDIO_CTL_ELEM_ACCESS_WRITE BIT(1)
+
+/* See SNDRV_CTL_ELEM_TYPE_* in Linux source */
+#define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN 0x01
+#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER 0x02
+#define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED 0x03
+#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64 0x06
+
+/* See SNDRV_CTL_ELEM_IFACE_* in Linux source */
+#define GB_AUDIO_CTL_ELEM_IFACE_CARD 0x00
+#define GB_AUDIO_CTL_ELEM_IFACE_HWDEP 0x01
+#define GB_AUDIO_CTL_ELEM_IFACE_MIXER 0x02
+#define GB_AUDIO_CTL_ELEM_IFACE_PCM 0x03
+#define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI 0x04
+#define GB_AUDIO_CTL_ELEM_IFACE_TIMER 0x05
+#define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER 0x06
+
+/* SNDRV_CTL_ELEM_ACCESS_* in Linux source */
+#define GB_AUDIO_ACCESS_READ BIT(0)
+#define GB_AUDIO_ACCESS_WRITE BIT(1)
+#define GB_AUDIO_ACCESS_VOLATILE BIT(2)
+#define GB_AUDIO_ACCESS_TIMESTAMP BIT(3)
+#define GB_AUDIO_ACCESS_TLV_READ BIT(4)
+#define GB_AUDIO_ACCESS_TLV_WRITE BIT(5)
+#define GB_AUDIO_ACCESS_TLV_COMMAND BIT(6)
+#define GB_AUDIO_ACCESS_INACTIVE BIT(7)
+#define GB_AUDIO_ACCESS_LOCK BIT(8)
+#define GB_AUDIO_ACCESS_OWNER BIT(9)
+
+/* enum snd_soc_dapm_type */
+#define GB_AUDIO_WIDGET_TYPE_INPUT 0x0
+#define GB_AUDIO_WIDGET_TYPE_OUTPUT 0x1
+#define GB_AUDIO_WIDGET_TYPE_MUX 0x2
+#define GB_AUDIO_WIDGET_TYPE_VIRT_MUX 0x3
+#define GB_AUDIO_WIDGET_TYPE_VALUE_MUX 0x4
+#define GB_AUDIO_WIDGET_TYPE_MIXER 0x5
+#define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL 0x6
+#define GB_AUDIO_WIDGET_TYPE_PGA 0x7
+#define GB_AUDIO_WIDGET_TYPE_OUT_DRV 0x8
+#define GB_AUDIO_WIDGET_TYPE_ADC 0x9
+#define GB_AUDIO_WIDGET_TYPE_DAC 0xa
+#define GB_AUDIO_WIDGET_TYPE_MICBIAS 0xb
+#define GB_AUDIO_WIDGET_TYPE_MIC 0xc
+#define GB_AUDIO_WIDGET_TYPE_HP 0xd
+#define GB_AUDIO_WIDGET_TYPE_SPK 0xe
+#define GB_AUDIO_WIDGET_TYPE_LINE 0xf
+#define GB_AUDIO_WIDGET_TYPE_SWITCH 0x10
+#define GB_AUDIO_WIDGET_TYPE_VMID 0x11
+#define GB_AUDIO_WIDGET_TYPE_PRE 0x12
+#define GB_AUDIO_WIDGET_TYPE_POST 0x13
+#define GB_AUDIO_WIDGET_TYPE_SUPPLY 0x14
+#define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY 0x15
+#define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY 0x16
+#define GB_AUDIO_WIDGET_TYPE_AIF_IN 0x17
+#define GB_AUDIO_WIDGET_TYPE_AIF_OUT 0x18
+#define GB_AUDIO_WIDGET_TYPE_SIGGEN 0x19
+#define GB_AUDIO_WIDGET_TYPE_DAI_IN 0x1a
+#define GB_AUDIO_WIDGET_TYPE_DAI_OUT 0x1b
+#define GB_AUDIO_WIDGET_TYPE_DAI_LINK 0x1c
+
+#define GB_AUDIO_WIDGET_STATE_DISABLED 0x01
+#define GB_AUDIO_WIDGET_STATE_ENAABLED 0x02
+
+#define GB_AUDIO_JACK_EVENT_INSERTION 0x1
+#define GB_AUDIO_JACK_EVENT_REMOVAL 0x2
+
+#define GB_AUDIO_BUTTON_EVENT_PRESS 0x1
+#define GB_AUDIO_BUTTON_EVENT_RELEASE 0x2
+
+#define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED 0x1
+#define GB_AUDIO_STREAMING_EVENT_HALT 0x2
+#define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR 0x3
+#define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR 0x4
+#define GB_AUDIO_STREAMING_EVENT_FAILURE 0x5
+#define GB_AUDIO_STREAMING_EVENT_UNDERRUN 0x6
+#define GB_AUDIO_STREAMING_EVENT_OVERRUN 0x7
+#define GB_AUDIO_STREAMING_EVENT_CLOCKING 0x8
+#define GB_AUDIO_STREAMING_EVENT_DATA_LEN 0x9
+
+#define GB_AUDIO_INVALID_INDEX 0xff
+
+/* enum snd_jack_types */
+#define GB_AUDIO_JACK_HEADPHONE 0x0000001
+#define GB_AUDIO_JACK_MICROPHONE 0x0000002
+#define GB_AUDIO_JACK_HEADSET (GB_AUDIO_JACK_HEADPHONE | \
+ GB_AUDIO_JACK_MICROPHONE)
+#define GB_AUDIO_JACK_LINEOUT 0x0000004
+#define GB_AUDIO_JACK_MECHANICAL 0x0000008
+#define GB_AUDIO_JACK_VIDEOOUT 0x0000010
+#define GB_AUDIO_JACK_AVOUT (GB_AUDIO_JACK_LINEOUT | \
+ GB_AUDIO_JACK_VIDEOOUT)
+#define GB_AUDIO_JACK_LINEIN 0x0000020
+#define GB_AUDIO_JACK_OC_HPHL 0x0000040
+#define GB_AUDIO_JACK_OC_HPHR 0x0000080
+#define GB_AUDIO_JACK_MICROPHONE2 0x0000200
+#define GB_AUDIO_JACK_ANC_HEADPHONE (GB_AUDIO_JACK_HEADPHONE | \
+ GB_AUDIO_JACK_MICROPHONE | \
+ GB_AUDIO_JACK_MICROPHONE2)
+/* Kept separate from switches to facilitate implementation */
+#define GB_AUDIO_JACK_BTN_0 0x4000000
+#define GB_AUDIO_JACK_BTN_1 0x2000000
+#define GB_AUDIO_JACK_BTN_2 0x1000000
+#define GB_AUDIO_JACK_BTN_3 0x0800000
+
+struct gb_audio_pcm {
+ __u8 stream_name[GB_AUDIO_PCM_NAME_MAX];
+ __le32 formats; /* GB_AUDIO_PCM_FMT_* */
+ __le32 rates; /* GB_AUDIO_PCM_RATE_* */
+ __u8 chan_min;
+ __u8 chan_max;
+ __u8 sig_bits; /* number of bits of content */
+} __packed;
+
+struct gb_audio_dai {
+ __u8 name[AUDIO_DAI_NAME_MAX];
+ __le16 data_cport;
+ struct gb_audio_pcm capture;
+ struct gb_audio_pcm playback;
+} __packed;
+
+struct gb_audio_integer {
+ __le32 min;
+ __le32 max;
+ __le32 step;
+} __packed;
+
+struct gb_audio_integer64 {
+ __le64 min;
+ __le64 max;
+ __le64 step;
+} __packed;
+
+struct gb_audio_enumerated {
+ __le32 items;
+ __le16 names_length;
+ __u8 names[0];
+} __packed;
+
+struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */
+ __u8 type; /* GB_AUDIO_CTL_ELEM_TYPE_* */
+ __le16 dimen[4];
+ union {
+ struct gb_audio_integer integer;
+ struct gb_audio_integer64 integer64;
+ struct gb_audio_enumerated enumerated;
+ } value;
+} __packed;
+
+struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */
+ __le64 timestamp; /* XXX needed? */
+ union {
+ __le32 integer_value[2]; /* consider CTL_DOUBLE_xxx */
+ __le64 integer64_value[2];
+ __le32 enumerated_item[2];
+ } value;
+} __packed;
+
+struct gb_audio_control {
+ __u8 name[AUDIO_CONTROL_NAME_MAX];
+ __u8 id; /* 0-63 */
+ __u8 iface; /* GB_AUDIO_IFACE_* */
+ __le16 data_cport;
+ __le32 access; /* GB_AUDIO_ACCESS_* */
+ __u8 count; /* count of same elements */
+ __u8 count_values; /* count of values, max=2 for CTL_DOUBLE_xxx */
+ struct gb_audio_ctl_elem_info info;
+} __packed;
+
+struct gb_audio_widget {
+ __u8 name[AUDIO_WIDGET_NAME_MAX];
+ __u8 sname[AUDIO_WIDGET_NAME_MAX];
+ __u8 id;
+ __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */
+ __u8 state; /* GB_AUDIO_WIDGET_STATE_* */
+ __u8 ncontrols;
+ struct gb_audio_control ctl[0]; /* 'ncontrols' entries */
+} __packed;
+
+struct gb_audio_route {
+ __u8 source_id; /* widget id */
+ __u8 destination_id; /* widget id */
+ __u8 control_id; /* 0-63 */
+ __u8 index; /* Selection within the control */
+} __packed;
+
+struct gb_audio_topology {
+ __u8 num_dais;
+ __u8 num_controls;
+ __u8 num_widgets;
+ __u8 num_routes;
+ __le32 size_dais;
+ __le32 size_controls;
+ __le32 size_widgets;
+ __le32 size_routes;
+ __le32 jack_type;
+ /*
+ * struct gb_audio_dai dai[num_dais];
+ * struct gb_audio_control controls[num_controls];
+ * struct gb_audio_widget widgets[num_widgets];
+ * struct gb_audio_route routes[num_routes];
+ */
+ __u8 data[0];
+} __packed;
+
+struct gb_audio_get_topology_size_response {
+ __le16 size;
+} __packed;
+
+struct gb_audio_get_topology_response {
+ struct gb_audio_topology topology;
+} __packed;
+
+struct gb_audio_get_control_request {
+ __u8 control_id;
+ __u8 index;
+} __packed;
+
+struct gb_audio_get_control_response {
+ struct gb_audio_ctl_elem_value value;
+} __packed;
+
+struct gb_audio_set_control_request {
+ __u8 control_id;
+ __u8 index;
+ struct gb_audio_ctl_elem_value value;
+} __packed;
+
+struct gb_audio_enable_widget_request {
+ __u8 widget_id;
+} __packed;
+
+struct gb_audio_disable_widget_request {
+ __u8 widget_id;
+} __packed;
+
+struct gb_audio_get_pcm_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_get_pcm_response {
+ __le32 format;
+ __le32 rate;
+ __u8 channels;
+ __u8 sig_bits;
+} __packed;
+
+struct gb_audio_set_pcm_request {
+ __le16 data_cport;
+ __le32 format;
+ __le32 rate;
+ __u8 channels;
+ __u8 sig_bits;
+} __packed;
+
+struct gb_audio_set_tx_data_size_request {
+ __le16 data_cport;
+ __le16 size;
+} __packed;
+
+struct gb_audio_activate_tx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_deactivate_tx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_set_rx_data_size_request {
+ __le16 data_cport;
+ __le16 size;
+} __packed;
+
+struct gb_audio_activate_rx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_deactivate_rx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_jack_event_request {
+ __u8 widget_id;
+ __u8 jack_attribute;
+ __u8 event;
+} __packed;
+
+struct gb_audio_button_event_request {
+ __u8 widget_id;
+ __u8 button_id;
+ __u8 event;
+} __packed;
+
+struct gb_audio_streaming_event_request {
+ __le16 data_cport;
+ __u8 event;
+} __packed;
+
+struct gb_audio_send_data_request {
+ __le64 timestamp;
+ __u8 data[0];
+} __packed;
+
+
+/* Log */
+
+/* operations */
+#define GB_LOG_TYPE_SEND_LOG 0x02
+
+/* length */
+#define GB_LOG_MAX_LEN 1024
+
+struct gb_log_send_log_request {
+ __le16 len;
+ __u8 msg[0];
+} __packed;
+
+#endif /* __GREYBUS_PROTOCOLS_H */
+
diff --git a/drivers/staging/greybus/greybus_trace.h b/drivers/staging/greybus/greybus_trace.h
new file mode 100644
index 000000000000..6f8692da9ec8
--- /dev/null
+++ b/drivers/staging/greybus/greybus_trace.h
@@ -0,0 +1,531 @@
+/*
+ * Greybus driver and device API
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM greybus
+
+#if !defined(_TRACE_GREYBUS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GREYBUS_H
+
+#include <linux/tracepoint.h>
+
+struct gb_message;
+struct gb_operation;
+struct gb_connection;
+struct gb_bundle;
+struct gb_host_device;
+
+DECLARE_EVENT_CLASS(gb_message,
+
+ TP_PROTO(struct gb_message *message),
+
+ TP_ARGS(message),
+
+ TP_STRUCT__entry(
+ __field(u16, size)
+ __field(u16, operation_id)
+ __field(u8, type)
+ __field(u8, result)
+ ),
+
+ TP_fast_assign(
+ __entry->size = le16_to_cpu(message->header->size);
+ __entry->operation_id =
+ le16_to_cpu(message->header->operation_id);
+ __entry->type = message->header->type;
+ __entry->result = message->header->result;
+ ),
+
+ TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x",
+ __entry->size, __entry->operation_id,
+ __entry->type, __entry->result)
+);
+
+#define DEFINE_MESSAGE_EVENT(name) \
+ DEFINE_EVENT(gb_message, name, \
+ TP_PROTO(struct gb_message *message), \
+ TP_ARGS(message))
+
+/*
+ * Occurs immediately before calling a host device's message_send()
+ * method.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_send);
+
+/*
+ * Occurs after an incoming request message has been received
+ */
+DEFINE_MESSAGE_EVENT(gb_message_recv_request);
+
+/*
+ * Occurs after an incoming response message has been received,
+ * after its matching request has been found.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_recv_response);
+
+/*
+ * Occurs after an operation has been canceled, possibly before the
+ * cancellation is complete.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_cancel_outgoing);
+
+/*
+ * Occurs when an incoming request is cancelled; if the response has
+ * been queued for sending, this occurs after it is sent.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_cancel_incoming);
+
+/*
+ * Occurs in the host driver message_send() function just prior to
+ * handing off the data to be processed by hardware.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_submit);
+
+#undef DEFINE_MESSAGE_EVENT
+
+DECLARE_EVENT_CLASS(gb_operation,
+
+ TP_PROTO(struct gb_operation *operation),
+
+ TP_ARGS(operation),
+
+ TP_STRUCT__entry(
+ __field(u16, cport_id) /* CPort of HD side of connection */
+ __field(u16, id) /* Operation ID */
+ __field(u8, type)
+ __field(unsigned long, flags)
+ __field(int, active)
+ __field(int, waiters)
+ __field(int, errno)
+ ),
+
+ TP_fast_assign(
+ __entry->cport_id = operation->connection->hd_cport_id;
+ __entry->id = operation->id;
+ __entry->type = operation->type;
+ __entry->flags = operation->flags;
+ __entry->active = operation->active;
+ __entry->waiters = atomic_read(&operation->waiters);
+ __entry->errno = operation->errno;
+ ),
+
+ TP_printk("id=%04x type=0x%02x cport_id=%04x flags=0x%lx active=%d waiters=%d errno=%d",
+ __entry->id, __entry->cport_id, __entry->type, __entry->flags,
+ __entry->active, __entry->waiters, __entry->errno)
+);
+
+#define DEFINE_OPERATION_EVENT(name) \
+ DEFINE_EVENT(gb_operation, name, \
+ TP_PROTO(struct gb_operation *operation), \
+ TP_ARGS(operation))
+
+/*
+ * Occurs after a new operation is created for an outgoing request
+ * has been successfully created.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_create);
+
+/*
+ * Occurs after a new core operation has been created.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_create_core);
+
+/*
+ * Occurs after a new operation has been created for an incoming
+ * request has been successfully created and initialized.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_create_incoming);
+
+/*
+ * Occurs when the last reference to an operation has been dropped,
+ * prior to freeing resources.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_destroy);
+
+/*
+ * Occurs when an operation has been marked active, after updating
+ * its active count.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_get_active);
+
+/*
+ * Occurs when an operation has been marked active, before updating
+ * its active count.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_put_active);
+
+#undef DEFINE_OPERATION_EVENT
+
+DECLARE_EVENT_CLASS(gb_connection,
+
+ TP_PROTO(struct gb_connection *connection),
+
+ TP_ARGS(connection),
+
+ TP_STRUCT__entry(
+ __field(int, hd_bus_id)
+ __field(u8, bundle_id)
+ /* name contains "hd_cport_id/intf_id:cport_id" */
+ __dynamic_array(char, name, sizeof(connection->name))
+ __field(enum gb_connection_state, state)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->hd_bus_id = connection->hd->bus_id;
+ __entry->bundle_id = connection->bundle ?
+ connection->bundle->id : BUNDLE_ID_NONE;
+ memcpy(__get_str(name), connection->name,
+ sizeof(connection->name));
+ __entry->state = connection->state;
+ __entry->flags = connection->flags;
+ ),
+
+ TP_printk("hd_bus_id=%d bundle_id=0x%02x name=\"%s\" state=%u flags=0x%lx",
+ __entry->hd_bus_id, __entry->bundle_id, __get_str(name),
+ (unsigned int)__entry->state, __entry->flags)
+);
+
+#define DEFINE_CONNECTION_EVENT(name) \
+ DEFINE_EVENT(gb_connection, name, \
+ TP_PROTO(struct gb_connection *connection), \
+ TP_ARGS(connection))
+
+/*
+ * Occurs after a new connection is successfully created.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_create);
+
+/*
+ * Occurs when the last reference to a connection has been dropped,
+ * before its resources are freed.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_release);
+
+/*
+ * Occurs when a new reference to connection is added, currently
+ * only when a message over the connection is received.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_get);
+
+/*
+ * Occurs when a new reference to connection is dropped, after a
+ * a received message is handled, or when the connection is
+ * destroyed.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_put);
+
+/*
+ * Occurs when a request to enable a connection is made, either for
+ * transmit only, or for both transmit and receive.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_enable);
+
+/*
+ * Occurs when a request to disable a connection is made, either for
+ * receive only, or for both transmit and receive. Also occurs when
+ * a request to forcefully disable a connection is made.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_disable);
+
+#undef DEFINE_CONNECTION_EVENT
+
+DECLARE_EVENT_CLASS(gb_bundle,
+
+ TP_PROTO(struct gb_bundle *bundle),
+
+ TP_ARGS(bundle),
+
+ TP_STRUCT__entry(
+ __field(u8, intf_id)
+ __field(u8, id)
+ __field(u8, class)
+ __field(size_t, num_cports)
+ ),
+
+ TP_fast_assign(
+ __entry->intf_id = bundle->intf->interface_id;
+ __entry->id = bundle->id;
+ __entry->class = bundle->class;
+ __entry->num_cports = bundle->num_cports;
+ ),
+
+ TP_printk("intf_id=0x%02x id=%02x class=0x%02x num_cports=%zu",
+ __entry->intf_id, __entry->id, __entry->class,
+ __entry->num_cports)
+);
+
+#define DEFINE_BUNDLE_EVENT(name) \
+ DEFINE_EVENT(gb_bundle, name, \
+ TP_PROTO(struct gb_bundle *bundle), \
+ TP_ARGS(bundle))
+
+/*
+ * Occurs after a new bundle is successfully created.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_create);
+
+/*
+ * Occurs when the last reference to a bundle has been dropped,
+ * before its resources are freed.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_release);
+
+/*
+ * Occurs when a bundle is added to an interface when the interface
+ * is enabled.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_add);
+
+/*
+ * Occurs when a registered bundle gets destroyed, normally at the
+ * time an interface is disabled.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_destroy);
+
+#undef DEFINE_BUNDLE_EVENT
+
+DECLARE_EVENT_CLASS(gb_interface,
+
+ TP_PROTO(struct gb_interface *intf),
+
+ TP_ARGS(intf),
+
+ TP_STRUCT__entry(
+ __field(u8, module_id)
+ __field(u8, id) /* Interface id */
+ __field(u8, device_id)
+ __field(int, disconnected) /* bool */
+ __field(int, ejected) /* bool */
+ __field(int, active) /* bool */
+ __field(int, enabled) /* bool */
+ __field(int, mode_switch) /* bool */
+ ),
+
+ TP_fast_assign(
+ __entry->module_id = intf->module->module_id;
+ __entry->id = intf->interface_id;
+ __entry->device_id = intf->device_id;
+ __entry->disconnected = intf->disconnected;
+ __entry->ejected = intf->ejected;
+ __entry->active = intf->active;
+ __entry->enabled = intf->enabled;
+ __entry->mode_switch = intf->mode_switch;
+ ),
+
+ TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d",
+ __entry->id, __entry->device_id, __entry->module_id,
+ __entry->disconnected, __entry->ejected, __entry->active,
+ __entry->enabled, __entry->mode_switch)
+);
+
+#define DEFINE_INTERFACE_EVENT(name) \
+ DEFINE_EVENT(gb_interface, name, \
+ TP_PROTO(struct gb_interface *intf), \
+ TP_ARGS(intf))
+
+/*
+ * Occurs after a new interface is successfully created.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_create);
+
+/*
+ * Occurs after the last reference to an interface has been dropped.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_release);
+
+/*
+ * Occurs after an interface been registerd.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_add);
+
+/*
+ * Occurs when a registered interface gets deregisterd.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_del);
+
+/*
+ * Occurs when a registered interface has been successfully
+ * activated.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_activate);
+
+/*
+ * Occurs when an activated interface is being deactivated.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_deactivate);
+
+/*
+ * Occurs when an interface has been successfully enabled.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_enable);
+
+/*
+ * Occurs when an enabled interface is being disabled.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_disable);
+
+#undef DEFINE_INTERFACE_EVENT
+
+DECLARE_EVENT_CLASS(gb_module,
+
+ TP_PROTO(struct gb_module *module),
+
+ TP_ARGS(module),
+
+ TP_STRUCT__entry(
+ __field(int, hd_bus_id)
+ __field(u8, module_id)
+ __field(size_t, num_interfaces)
+ __field(int, disconnected) /* bool */
+ ),
+
+ TP_fast_assign(
+ __entry->hd_bus_id = module->hd->bus_id;
+ __entry->module_id = module->module_id;
+ __entry->num_interfaces = module->num_interfaces;
+ __entry->disconnected = module->disconnected;
+ ),
+
+ TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d",
+ __entry->hd_bus_id, __entry->module_id,
+ __entry->num_interfaces, __entry->disconnected)
+);
+
+#define DEFINE_MODULE_EVENT(name) \
+ DEFINE_EVENT(gb_module, name, \
+ TP_PROTO(struct gb_module *module), \
+ TP_ARGS(module))
+
+/*
+ * Occurs after a new module is successfully created, before
+ * creating any of its interfaces.
+ */
+DEFINE_MODULE_EVENT(gb_module_create);
+
+/*
+ * Occurs after the last reference to a module has been dropped.
+ */
+DEFINE_MODULE_EVENT(gb_module_release);
+
+/*
+ * Occurs after a module is successfully created, before registering
+ * any of its interfaces.
+ */
+DEFINE_MODULE_EVENT(gb_module_add);
+
+/*
+ * Occurs when a module is deleted, before deregistering its
+ * interfaces.
+ */
+DEFINE_MODULE_EVENT(gb_module_del);
+
+#undef DEFINE_MODULE_EVENT
+
+DECLARE_EVENT_CLASS(gb_host_device,
+
+ TP_PROTO(struct gb_host_device *hd),
+
+ TP_ARGS(hd),
+
+ TP_STRUCT__entry(
+ __field(int, bus_id)
+ __field(size_t, num_cports)
+ __field(size_t, buffer_size_max)
+ ),
+
+ TP_fast_assign(
+ __entry->bus_id = hd->bus_id;
+ __entry->num_cports = hd->num_cports;
+ __entry->buffer_size_max = hd->buffer_size_max;
+ ),
+
+ TP_printk("bus_id=%d num_cports=%zu mtu=%zu",
+ __entry->bus_id, __entry->num_cports,
+ __entry->buffer_size_max)
+);
+
+#define DEFINE_HD_EVENT(name) \
+ DEFINE_EVENT(gb_host_device, name, \
+ TP_PROTO(struct gb_host_device *hd), \
+ TP_ARGS(hd))
+
+/*
+ * Occurs after a new host device is successfully created, before
+ * its SVC has been set up.
+ */
+DEFINE_HD_EVENT(gb_hd_create);
+
+/*
+ * Occurs after the last reference to a host device has been
+ * dropped.
+ */
+DEFINE_HD_EVENT(gb_hd_release);
+
+/*
+ * Occurs after a new host device has been added, after the
+ * connection to its SVC has been enabled.
+ */
+DEFINE_HD_EVENT(gb_hd_add);
+
+/*
+ * Occurs when a host device is being disconnected from the AP USB
+ * host controller.
+ */
+DEFINE_HD_EVENT(gb_hd_del);
+
+/*
+ * Occurs when a host device has passed received data to the Greybus
+ * core, after it has been determined it is destined for a valid
+ * CPort.
+ */
+DEFINE_HD_EVENT(gb_hd_in);
+
+#undef DEFINE_HD_EVENT
+
+/*
+ * Occurs on a TimeSync synchronization event or a TimeSync ping event.
+ */
+TRACE_EVENT(gb_timesync_irq,
+
+ TP_PROTO(u8 ping, u8 strobe, u8 count, u64 frame_time),
+
+ TP_ARGS(ping, strobe, count, frame_time),
+
+ TP_STRUCT__entry(
+ __field(u8, ping)
+ __field(u8, strobe)
+ __field(u8, count)
+ __field(u64, frame_time)
+ ),
+
+ TP_fast_assign(
+ __entry->ping = ping;
+ __entry->strobe = strobe;
+ __entry->count = count;
+ __entry->frame_time = frame_time;
+ ),
+
+ TP_printk("%s %d/%d frame-time %llu\n",
+ __entry->ping ? "ping" : "strobe", __entry->strobe,
+ __entry->count, __entry->frame_time)
+);
+
+#endif /* _TRACE_GREYBUS_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+/*
+ * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
+ */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE greybus_trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/staging/greybus/hd.c b/drivers/staging/greybus/hd.c
new file mode 100644
index 000000000000..185ae3fa10fd
--- /dev/null
+++ b/drivers/staging/greybus/hd.c
@@ -0,0 +1,257 @@
+/*
+ * Greybus Host Device
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit);
+
+static struct ida gb_hd_bus_id_map;
+
+int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+ bool async)
+{
+ if (!hd || !hd->driver || !hd->driver->output)
+ return -EINVAL;
+ return hd->driver->output(hd, req, size, cmd, async);
+}
+EXPORT_SYMBOL_GPL(gb_hd_output);
+
+static ssize_t bus_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_host_device *hd = to_gb_host_device(dev);
+
+ return sprintf(buf, "%d\n", hd->bus_id);
+}
+static DEVICE_ATTR_RO(bus_id);
+
+static struct attribute *bus_attrs[] = {
+ &dev_attr_bus_id.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bus);
+
+int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
+{
+ struct ida *id_map = &hd->cport_id_map;
+ int ret;
+
+ ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_hd_cport_reserve);
+
+void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
+{
+ struct ida *id_map = &hd->cport_id_map;
+
+ ida_simple_remove(id_map, cport_id);
+}
+EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
+
+/* Locking: Caller guarantees serialisation */
+int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
+ unsigned long flags)
+{
+ struct ida *id_map = &hd->cport_id_map;
+ int ida_start, ida_end;
+
+ if (hd->driver->cport_allocate)
+ return hd->driver->cport_allocate(hd, cport_id, flags);
+
+ if (cport_id < 0) {
+ ida_start = 0;
+ ida_end = hd->num_cports;
+ } else if (cport_id < hd->num_cports) {
+ ida_start = cport_id;
+ ida_end = cport_id + 1;
+ } else {
+ dev_err(&hd->dev, "cport %d not available\n", cport_id);
+ return -EINVAL;
+ }
+
+ return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
+}
+
+/* Locking: Caller guarantees serialisation */
+void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
+{
+ if (hd->driver->cport_release) {
+ hd->driver->cport_release(hd, cport_id);
+ return;
+ }
+
+ ida_simple_remove(&hd->cport_id_map, cport_id);
+}
+
+static void gb_hd_release(struct device *dev)
+{
+ struct gb_host_device *hd = to_gb_host_device(dev);
+
+ trace_gb_hd_release(hd);
+
+ if (hd->svc)
+ gb_svc_put(hd->svc);
+ ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
+ ida_destroy(&hd->cport_id_map);
+ kfree(hd);
+}
+
+struct device_type greybus_hd_type = {
+ .name = "greybus_host_device",
+ .release = gb_hd_release,
+};
+
+struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
+ struct device *parent,
+ size_t buffer_size_max,
+ size_t num_cports)
+{
+ struct gb_host_device *hd;
+ int ret;
+
+ /*
+ * Validate that the driver implements all of the callbacks
+ * so that we don't have to every time we make them.
+ */
+ if ((!driver->message_send) || (!driver->message_cancel)) {
+ dev_err(parent, "mandatory hd-callbacks missing\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) {
+ dev_err(parent, "greybus host-device buffers too small\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) {
+ dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Make sure to never allocate messages larger than what the Greybus
+ * protocol supports.
+ */
+ if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) {
+ dev_warn(parent, "limiting buffer size to %u\n",
+ GB_OPERATION_MESSAGE_SIZE_MAX);
+ buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX;
+ }
+
+ hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL);
+ if (!hd)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(hd);
+ return ERR_PTR(ret);
+ }
+ hd->bus_id = ret;
+
+ hd->driver = driver;
+ INIT_LIST_HEAD(&hd->modules);
+ INIT_LIST_HEAD(&hd->connections);
+ ida_init(&hd->cport_id_map);
+ hd->buffer_size_max = buffer_size_max;
+ hd->num_cports = num_cports;
+
+ hd->dev.parent = parent;
+ hd->dev.bus = &greybus_bus_type;
+ hd->dev.type = &greybus_hd_type;
+ hd->dev.groups = bus_groups;
+ hd->dev.dma_mask = hd->dev.parent->dma_mask;
+ device_initialize(&hd->dev);
+ dev_set_name(&hd->dev, "greybus%d", hd->bus_id);
+
+ trace_gb_hd_create(hd);
+
+ hd->svc = gb_svc_create(hd);
+ if (!hd->svc) {
+ dev_err(&hd->dev, "failed to create svc\n");
+ put_device(&hd->dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return hd;
+}
+EXPORT_SYMBOL_GPL(gb_hd_create);
+
+int gb_hd_add(struct gb_host_device *hd)
+{
+ int ret;
+
+ ret = device_add(&hd->dev);
+ if (ret)
+ return ret;
+
+ ret = gb_svc_add(hd->svc);
+ if (ret) {
+ device_del(&hd->dev);
+ return ret;
+ }
+
+ trace_gb_hd_add(hd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_hd_add);
+
+void gb_hd_del(struct gb_host_device *hd)
+{
+ trace_gb_hd_del(hd);
+
+ /*
+ * Tear down the svc and flush any on-going hotplug processing before
+ * removing the remaining interfaces.
+ */
+ gb_svc_del(hd->svc);
+
+ device_del(&hd->dev);
+}
+EXPORT_SYMBOL_GPL(gb_hd_del);
+
+void gb_hd_shutdown(struct gb_host_device *hd)
+{
+ gb_svc_del(hd->svc);
+}
+EXPORT_SYMBOL_GPL(gb_hd_shutdown);
+
+void gb_hd_put(struct gb_host_device *hd)
+{
+ put_device(&hd->dev);
+}
+EXPORT_SYMBOL_GPL(gb_hd_put);
+
+int __init gb_hd_init(void)
+{
+ ida_init(&gb_hd_bus_id_map);
+
+ return 0;
+}
+
+void gb_hd_exit(void)
+{
+ ida_destroy(&gb_hd_bus_id_map);
+}
diff --git a/drivers/staging/greybus/hd.h b/drivers/staging/greybus/hd.h
new file mode 100644
index 000000000000..c4250cfe595f
--- /dev/null
+++ b/drivers/staging/greybus/hd.h
@@ -0,0 +1,90 @@
+/*
+ * Greybus Host Device
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __HD_H
+#define __HD_H
+
+struct gb_host_device;
+struct gb_message;
+
+struct gb_hd_driver {
+ size_t hd_priv_size;
+
+ int (*cport_allocate)(struct gb_host_device *hd, int cport_id,
+ unsigned long flags);
+ void (*cport_release)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_enable)(struct gb_host_device *hd, u16 cport_id,
+ unsigned long flags);
+ int (*cport_disable)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_connected)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_flush)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id,
+ u8 phase, unsigned int timeout);
+ int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id,
+ size_t peer_space, unsigned int timeout);
+ int (*cport_clear)(struct gb_host_device *hd, u16 cport_id);
+
+ int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id,
+ struct gb_message *message, gfp_t gfp_mask);
+ void (*message_cancel)(struct gb_message *message);
+ int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id);
+ int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id);
+ int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+ bool async);
+ int (*timesync_enable)(struct gb_host_device *hd, u8 count,
+ u64 frame_time, u32 strobe_delay, u32 refclk);
+ int (*timesync_disable)(struct gb_host_device *hd);
+ int (*timesync_authoritative)(struct gb_host_device *hd,
+ u64 *frame_time);
+ int (*timesync_get_last_event)(struct gb_host_device *hd,
+ u64 *frame_time);
+};
+
+struct gb_host_device {
+ struct device dev;
+ int bus_id;
+ const struct gb_hd_driver *driver;
+
+ struct list_head modules;
+ struct list_head connections;
+ struct ida cport_id_map;
+
+ /* Number of CPorts supported by the UniPro IP */
+ size_t num_cports;
+
+ /* Host device buffer constraints */
+ size_t buffer_size_max;
+
+ struct gb_svc *svc;
+ /* Private data for the host driver */
+ unsigned long hd_priv[0] __aligned(sizeof(s64));
+};
+#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
+
+int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id);
+void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id);
+int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
+ unsigned long flags);
+void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id);
+
+struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
+ struct device *parent,
+ size_t buffer_size_max,
+ size_t num_cports);
+int gb_hd_add(struct gb_host_device *hd);
+void gb_hd_del(struct gb_host_device *hd);
+void gb_hd_shutdown(struct gb_host_device *hd);
+void gb_hd_put(struct gb_host_device *hd);
+int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+ bool in_irq);
+
+int gb_hd_init(void);
+void gb_hd_exit(void);
+
+#endif /* __HD_H */
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
new file mode 100644
index 000000000000..730d746fc4c2
--- /dev/null
+++ b/drivers/staging/greybus/hid.c
@@ -0,0 +1,536 @@
+/*
+ * HID class driver for the Greybus.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/bitops.h>
+#include <linux/hid.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "greybus.h"
+
+/* Greybus HID device's structure */
+struct gb_hid {
+ struct gb_bundle *bundle;
+ struct gb_connection *connection;
+
+ struct hid_device *hid;
+ struct gb_hid_desc_response hdesc;
+
+ unsigned long flags;
+#define GB_HID_STARTED 0x01
+#define GB_HID_READ_PENDING 0x04
+
+ unsigned int bufsize;
+ char *inbuf;
+};
+
+static DEFINE_MUTEX(gb_hid_open_mutex);
+
+/* Routines to get controller's information over greybus */
+
+/* Operations performed on greybus */
+static int gb_hid_get_desc(struct gb_hid *ghid)
+{
+ return gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_DESC, NULL,
+ 0, &ghid->hdesc, sizeof(ghid->hdesc));
+}
+
+static int gb_hid_get_report_desc(struct gb_hid *ghid, char *rdesc)
+{
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(ghid->bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_REPORT_DESC,
+ NULL, 0, rdesc,
+ le16_to_cpu(ghid->hdesc.wReportDescLength));
+
+ gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+ return ret;
+}
+
+static int gb_hid_set_power(struct gb_hid *ghid, int type)
+{
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(ghid->bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(ghid->connection, type, NULL, 0, NULL, 0);
+
+ gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+ return ret;
+}
+
+static int gb_hid_get_report(struct gb_hid *ghid, u8 report_type, u8 report_id,
+ unsigned char *buf, int len)
+{
+ struct gb_hid_get_report_request request;
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(ghid->bundle);
+ if (ret)
+ return ret;
+
+ request.report_type = report_type;
+ request.report_id = report_id;
+
+ ret = gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_REPORT,
+ &request, sizeof(request), buf, len);
+
+ gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+ return ret;
+}
+
+static int gb_hid_set_report(struct gb_hid *ghid, u8 report_type, u8 report_id,
+ unsigned char *buf, int len)
+{
+ struct gb_hid_set_report_request *request;
+ struct gb_operation *operation;
+ int ret, size = sizeof(*request) + len - 1;
+
+ ret = gb_pm_runtime_get_sync(ghid->bundle);
+ if (ret)
+ return ret;
+
+ operation = gb_operation_create(ghid->connection,
+ GB_HID_TYPE_SET_REPORT, size, 0,
+ GFP_KERNEL);
+ if (!operation) {
+ gb_pm_runtime_put_autosuspend(ghid->bundle);
+ return -ENOMEM;
+ }
+
+ request = operation->request->payload;
+ request->report_type = report_type;
+ request->report_id = report_id;
+ memcpy(request->report, buf, len);
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret) {
+ dev_err(&operation->connection->bundle->dev,
+ "failed to set report: %d\n", ret);
+ } else {
+ ret = len;
+ }
+
+ gb_operation_put(operation);
+ gb_pm_runtime_put_autosuspend(ghid->bundle);
+
+ return ret;
+}
+
+static int gb_hid_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_hid *ghid = gb_connection_get_data(connection);
+ struct gb_hid_input_report_request *request = op->request->payload;
+
+ if (op->type != GB_HID_TYPE_IRQ_EVENT) {
+ dev_err(&connection->bundle->dev,
+ "unsupported unsolicited request\n");
+ return -EINVAL;
+ }
+
+ if (test_bit(GB_HID_STARTED, &ghid->flags))
+ hid_input_report(ghid->hid, HID_INPUT_REPORT,
+ request->report, op->request->payload_size, 1);
+
+ return 0;
+}
+
+static int gb_hid_report_len(struct hid_report *report)
+{
+ return ((report->size - 1) >> 3) + 1 +
+ report->device->report_enum[report->type].numbered;
+}
+
+static void gb_hid_find_max_report(struct hid_device *hid, unsigned int type,
+ unsigned int *max)
+{
+ struct hid_report *report;
+ unsigned int size;
+
+ list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
+ size = gb_hid_report_len(report);
+ if (*max < size)
+ *max = size;
+ }
+}
+
+static void gb_hid_free_buffers(struct gb_hid *ghid)
+{
+ kfree(ghid->inbuf);
+ ghid->inbuf = NULL;
+ ghid->bufsize = 0;
+}
+
+static int gb_hid_alloc_buffers(struct gb_hid *ghid, size_t bufsize)
+{
+ ghid->inbuf = kzalloc(bufsize, GFP_KERNEL);
+ if (!ghid->inbuf)
+ return -ENOMEM;
+
+ ghid->bufsize = bufsize;
+
+ return 0;
+}
+
+/* Routines dealing with reports */
+static void gb_hid_init_report(struct gb_hid *ghid, struct hid_report *report)
+{
+ unsigned int size;
+
+ size = gb_hid_report_len(report);
+ if (gb_hid_get_report(ghid, report->type, report->id, ghid->inbuf,
+ size))
+ return;
+
+ /*
+ * hid->driver_lock is held as we are in probe function,
+ * we just need to setup the input fields, so using
+ * hid_report_raw_event is safe.
+ */
+ hid_report_raw_event(ghid->hid, report->type, ghid->inbuf, size, 1);
+}
+
+static void gb_hid_init_reports(struct gb_hid *ghid)
+{
+ struct hid_device *hid = ghid->hid;
+ struct hid_report *report;
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_INPUT_REPORT].report_list, list)
+ gb_hid_init_report(ghid, report);
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_FEATURE_REPORT].report_list, list)
+ gb_hid_init_report(ghid, report);
+}
+
+static int __gb_hid_get_raw_report(struct hid_device *hid,
+ unsigned char report_number, __u8 *buf, size_t count,
+ unsigned char report_type)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ int ret;
+
+ if (report_type == HID_OUTPUT_REPORT)
+ return -EINVAL;
+
+ ret = gb_hid_get_report(ghid, report_type, report_number, buf, count);
+ if (!ret)
+ ret = count;
+
+ return ret;
+}
+
+static int __gb_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
+ size_t len, unsigned char report_type)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ int report_id = buf[0];
+ int ret;
+
+ if (report_type == HID_INPUT_REPORT)
+ return -EINVAL;
+
+ if (report_id) {
+ buf++;
+ len--;
+ }
+
+ ret = gb_hid_set_report(ghid, report_type, report_id, buf, len);
+ if (report_id && ret >= 0)
+ ret++; /* add report_id to the number of transfered bytes */
+
+ return 0;
+}
+
+static int gb_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
+{
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ return __gb_hid_get_raw_report(hid, reportnum, buf, len, rtype);
+ case HID_REQ_SET_REPORT:
+ if (buf[0] != reportnum)
+ return -EINVAL;
+ return __gb_hid_output_raw_report(hid, buf, len, rtype);
+ default:
+ return -EIO;
+ }
+}
+
+/* HID Callbacks */
+static int gb_hid_parse(struct hid_device *hid)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ unsigned int rsize;
+ char *rdesc;
+ int ret;
+
+ rsize = le16_to_cpu(ghid->hdesc.wReportDescLength);
+ if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
+ dbg_hid("weird size of report descriptor (%u)\n", rsize);
+ return -EINVAL;
+ }
+
+ rdesc = kzalloc(rsize, GFP_KERNEL);
+ if (!rdesc) {
+ dbg_hid("couldn't allocate rdesc memory\n");
+ return -ENOMEM;
+ }
+
+ ret = gb_hid_get_report_desc(ghid, rdesc);
+ if (ret) {
+ hid_err(hid, "reading report descriptor failed\n");
+ goto free_rdesc;
+ }
+
+ ret = hid_parse_report(hid, rdesc, rsize);
+ if (ret)
+ dbg_hid("parsing report descriptor failed\n");
+
+free_rdesc:
+ kfree(rdesc);
+
+ return ret;
+}
+
+static int gb_hid_start(struct hid_device *hid)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ unsigned int bufsize = HID_MIN_BUFFER_SIZE;
+ int ret;
+
+ gb_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
+ gb_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
+ gb_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
+
+ if (bufsize > HID_MAX_BUFFER_SIZE)
+ bufsize = HID_MAX_BUFFER_SIZE;
+
+ ret = gb_hid_alloc_buffers(ghid, bufsize);
+ if (ret)
+ return ret;
+
+ if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
+ gb_hid_init_reports(ghid);
+
+ return 0;
+}
+
+static void gb_hid_stop(struct hid_device *hid)
+{
+ struct gb_hid *ghid = hid->driver_data;
+
+ gb_hid_free_buffers(ghid);
+}
+
+static int gb_hid_open(struct hid_device *hid)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ int ret = 0;
+
+ mutex_lock(&gb_hid_open_mutex);
+ if (!hid->open++) {
+ ret = gb_hid_set_power(ghid, GB_HID_TYPE_PWR_ON);
+ if (ret < 0)
+ hid->open--;
+ else
+ set_bit(GB_HID_STARTED, &ghid->flags);
+ }
+ mutex_unlock(&gb_hid_open_mutex);
+
+ return ret;
+}
+
+static void gb_hid_close(struct hid_device *hid)
+{
+ struct gb_hid *ghid = hid->driver_data;
+ int ret;
+
+ /*
+ * Protecting hid->open to make sure we don't restart data acquistion
+ * due to a resumption we no longer care about..
+ */
+ mutex_lock(&gb_hid_open_mutex);
+ if (!--hid->open) {
+ clear_bit(GB_HID_STARTED, &ghid->flags);
+
+ /* Save some power */
+ ret = gb_hid_set_power(ghid, GB_HID_TYPE_PWR_OFF);
+ if (ret)
+ dev_err(&ghid->connection->bundle->dev,
+ "failed to power off (%d)\n", ret);
+ }
+ mutex_unlock(&gb_hid_open_mutex);
+}
+
+static int gb_hid_power(struct hid_device *hid, int lvl)
+{
+ struct gb_hid *ghid = hid->driver_data;
+
+ switch (lvl) {
+ case PM_HINT_FULLON:
+ return gb_hid_set_power(ghid, GB_HID_TYPE_PWR_ON);
+ case PM_HINT_NORMAL:
+ return gb_hid_set_power(ghid, GB_HID_TYPE_PWR_OFF);
+ }
+
+ return 0;
+}
+
+/* HID structure to pass callbacks */
+static struct hid_ll_driver gb_hid_ll_driver = {
+ .parse = gb_hid_parse,
+ .start = gb_hid_start,
+ .stop = gb_hid_stop,
+ .open = gb_hid_open,
+ .close = gb_hid_close,
+ .power = gb_hid_power,
+ .raw_request = gb_hid_raw_request,
+};
+
+static int gb_hid_init(struct gb_hid *ghid)
+{
+ struct hid_device *hid = ghid->hid;
+ int ret;
+
+ ret = gb_hid_get_desc(ghid);
+ if (ret)
+ return ret;
+
+ hid->version = le16_to_cpu(ghid->hdesc.bcdHID);
+ hid->vendor = le16_to_cpu(ghid->hdesc.wVendorID);
+ hid->product = le16_to_cpu(ghid->hdesc.wProductID);
+ hid->country = ghid->hdesc.bCountryCode;
+
+ hid->driver_data = ghid;
+ hid->ll_driver = &gb_hid_ll_driver;
+ hid->dev.parent = &ghid->connection->bundle->dev;
+// hid->bus = BUS_GREYBUS; /* Need a bustype for GREYBUS in <linux/input.h> */
+
+ /* Set HID device's name */
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+ dev_name(&ghid->connection->bundle->dev),
+ hid->vendor, hid->product);
+
+ return 0;
+}
+
+static int gb_hid_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct hid_device *hid;
+ struct gb_hid *ghid;
+ int ret;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_HID)
+ return -ENODEV;
+
+ ghid = kzalloc(sizeof(*ghid), GFP_KERNEL);
+ if (!ghid)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_hid_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto err_free_ghid;
+ }
+
+ gb_connection_set_data(connection, ghid);
+ ghid->connection = connection;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid)) {
+ ret = PTR_ERR(hid);
+ goto err_connection_destroy;
+ }
+
+ ghid->hid = hid;
+ ghid->bundle = bundle;
+
+ greybus_set_drvdata(bundle, ghid);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto err_destroy_hid;
+
+ ret = gb_hid_init(ghid);
+ if (ret)
+ goto err_connection_disable;
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ hid_err(hid, "can't add hid device: %d\n", ret);
+ goto err_connection_disable;
+ }
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+err_connection_disable:
+ gb_connection_disable(connection);
+err_destroy_hid:
+ hid_destroy_device(hid);
+err_connection_destroy:
+ gb_connection_destroy(connection);
+err_free_ghid:
+ kfree(ghid);
+
+ return ret;
+}
+
+static void gb_hid_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_hid *ghid = greybus_get_drvdata(bundle);
+
+ if (gb_pm_runtime_get_sync(bundle))
+ gb_pm_runtime_get_noresume(bundle);
+
+ hid_destroy_device(ghid->hid);
+ gb_connection_disable(ghid->connection);
+ gb_connection_destroy(ghid->connection);
+ kfree(ghid);
+}
+
+static const struct greybus_bundle_id gb_hid_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_HID) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_hid_id_table);
+
+static struct greybus_driver gb_hid_driver = {
+ .name = "hid",
+ .probe = gb_hid_probe,
+ .disconnect = gb_hid_disconnect,
+ .id_table = gb_hid_id_table,
+};
+module_greybus_driver(gb_hid_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c
new file mode 100644
index 000000000000..c2a50087000c
--- /dev/null
+++ b/drivers/staging/greybus/i2c.c
@@ -0,0 +1,343 @@
+/*
+ * I2C bridge driver for the Greybus "generic" I2C module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+struct gb_i2c_device {
+ struct gb_connection *connection;
+ struct gbphy_device *gbphy_dev;
+
+ u32 functionality;
+
+ struct i2c_adapter adapter;
+};
+
+/*
+ * Map Greybus i2c functionality bits into Linux ones
+ */
+static u32 gb_i2c_functionality_map(u32 gb_i2c_functionality)
+{
+ return gb_i2c_functionality; /* All bits the same for now */
+}
+
+static int gb_i2c_functionality_operation(struct gb_i2c_device *gb_i2c_dev)
+{
+ struct gb_i2c_functionality_response response;
+ u32 functionality;
+ int ret;
+
+ ret = gb_operation_sync(gb_i2c_dev->connection,
+ GB_I2C_TYPE_FUNCTIONALITY,
+ NULL, 0, &response, sizeof(response));
+ if (ret)
+ return ret;
+
+ functionality = le32_to_cpu(response.functionality);
+ gb_i2c_dev->functionality = gb_i2c_functionality_map(functionality);
+
+ return 0;
+}
+
+/*
+ * Map Linux i2c_msg flags into Greybus i2c transfer op flags.
+ */
+static u16 gb_i2c_transfer_op_flags_map(u16 flags)
+{
+ return flags; /* All flags the same for now */
+}
+
+static void
+gb_i2c_fill_transfer_op(struct gb_i2c_transfer_op *op, struct i2c_msg *msg)
+{
+ u16 flags = gb_i2c_transfer_op_flags_map(msg->flags);
+
+ op->addr = cpu_to_le16(msg->addr);
+ op->flags = cpu_to_le16(flags);
+ op->size = cpu_to_le16(msg->len);
+}
+
+static struct gb_operation *
+gb_i2c_operation_create(struct gb_connection *connection,
+ struct i2c_msg *msgs, u32 msg_count)
+{
+ struct gb_i2c_device *gb_i2c_dev = gb_connection_get_data(connection);
+ struct gb_i2c_transfer_request *request;
+ struct gb_operation *operation;
+ struct gb_i2c_transfer_op *op;
+ struct i2c_msg *msg;
+ u32 data_out_size = 0;
+ u32 data_in_size = 0;
+ size_t request_size;
+ void *data;
+ u16 op_count;
+ u32 i;
+
+ if (msg_count > (u32)U16_MAX) {
+ dev_err(&gb_i2c_dev->gbphy_dev->dev, "msg_count (%u) too big\n",
+ msg_count);
+ return NULL;
+ }
+ op_count = (u16)msg_count;
+
+ /*
+ * In addition to space for all message descriptors we need
+ * to have enough to hold all outbound message data.
+ */
+ msg = msgs;
+ for (i = 0; i < msg_count; i++, msg++)
+ if (msg->flags & I2C_M_RD)
+ data_in_size += (u32)msg->len;
+ else
+ data_out_size += (u32)msg->len;
+
+ request_size = sizeof(*request);
+ request_size += msg_count * sizeof(*op);
+ request_size += data_out_size;
+
+ /* Response consists only of incoming data */
+ operation = gb_operation_create(connection, GB_I2C_TYPE_TRANSFER,
+ request_size, data_in_size, GFP_KERNEL);
+ if (!operation)
+ return NULL;
+
+ request = operation->request->payload;
+ request->op_count = cpu_to_le16(op_count);
+ /* Fill in the ops array */
+ op = &request->ops[0];
+ msg = msgs;
+ for (i = 0; i < msg_count; i++)
+ gb_i2c_fill_transfer_op(op++, msg++);
+
+ if (!data_out_size)
+ return operation;
+
+ /* Copy over the outgoing data; it starts after the last op */
+ data = op;
+ msg = msgs;
+ for (i = 0; i < msg_count; i++) {
+ if (!(msg->flags & I2C_M_RD)) {
+ memcpy(data, msg->buf, msg->len);
+ data += msg->len;
+ }
+ msg++;
+ }
+
+ return operation;
+}
+
+static void gb_i2c_decode_response(struct i2c_msg *msgs, u32 msg_count,
+ struct gb_i2c_transfer_response *response)
+{
+ struct i2c_msg *msg = msgs;
+ u8 *data;
+ u32 i;
+
+ if (!response)
+ return;
+ data = response->data;
+ for (i = 0; i < msg_count; i++) {
+ if (msg->flags & I2C_M_RD) {
+ memcpy(msg->buf, data, msg->len);
+ data += msg->len;
+ }
+ msg++;
+ }
+}
+
+/*
+ * Some i2c transfer operations return results that are expected.
+ */
+static bool gb_i2c_expected_transfer_error(int errno)
+{
+ return errno == -EAGAIN || errno == -ENODEV;
+}
+
+static int gb_i2c_transfer_operation(struct gb_i2c_device *gb_i2c_dev,
+ struct i2c_msg *msgs, u32 msg_count)
+{
+ struct gb_connection *connection = gb_i2c_dev->connection;
+ struct device *dev = &gb_i2c_dev->gbphy_dev->dev;
+ struct gb_operation *operation;
+ int ret;
+
+ operation = gb_i2c_operation_create(connection, msgs, msg_count);
+ if (!operation)
+ return -ENOMEM;
+
+ ret = gbphy_runtime_get_sync(gb_i2c_dev->gbphy_dev);
+ if (ret)
+ goto exit_operation_put;
+
+ ret = gb_operation_request_send_sync(operation);
+ if (!ret) {
+ struct gb_i2c_transfer_response *response;
+
+ response = operation->response->payload;
+ gb_i2c_decode_response(msgs, msg_count, response);
+ ret = msg_count;
+ } else if (!gb_i2c_expected_transfer_error(ret)) {
+ dev_err(dev, "transfer operation failed (%d)\n", ret);
+ }
+
+ gbphy_runtime_put_autosuspend(gb_i2c_dev->gbphy_dev);
+
+exit_operation_put:
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static int gb_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int msg_count)
+{
+ struct gb_i2c_device *gb_i2c_dev;
+
+ gb_i2c_dev = i2c_get_adapdata(adap);
+
+ return gb_i2c_transfer_operation(gb_i2c_dev, msgs, msg_count);
+}
+
+#if 0
+/* Later */
+static int gb_i2c_smbus_xfer(struct i2c_adapter *adap,
+ u16 addr, unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data)
+{
+ struct gb_i2c_device *gb_i2c_dev;
+
+ gb_i2c_dev = i2c_get_adapdata(adap);
+
+ return 0;
+}
+#endif
+
+static u32 gb_i2c_functionality(struct i2c_adapter *adap)
+{
+ struct gb_i2c_device *gb_i2c_dev = i2c_get_adapdata(adap);
+
+ return gb_i2c_dev->functionality;
+}
+
+static const struct i2c_algorithm gb_i2c_algorithm = {
+ .master_xfer = gb_i2c_master_xfer,
+ /* .smbus_xfer = gb_i2c_smbus_xfer, */
+ .functionality = gb_i2c_functionality,
+};
+
+/*
+ * Do initial setup of the i2c device. This includes verifying we
+ * can support it (based on the protocol version it advertises).
+ * If that's OK, we get and cached its functionality bits.
+ *
+ * Note: gb_i2c_dev->connection is assumed to have been valid.
+ */
+static int gb_i2c_device_setup(struct gb_i2c_device *gb_i2c_dev)
+{
+ /* Assume the functionality never changes, just get it once */
+ return gb_i2c_functionality_operation(gb_i2c_dev);
+}
+
+static int gb_i2c_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ struct gb_i2c_device *gb_i2c_dev;
+ struct i2c_adapter *adapter;
+ int ret;
+
+ gb_i2c_dev = kzalloc(sizeof(*gb_i2c_dev), GFP_KERNEL);
+ if (!gb_i2c_dev)
+ return -ENOMEM;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ NULL);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto exit_i2cdev_free;
+ }
+
+ gb_i2c_dev->connection = connection;
+ gb_connection_set_data(connection, gb_i2c_dev);
+ gb_i2c_dev->gbphy_dev = gbphy_dev;
+ gb_gbphy_set_data(gbphy_dev, gb_i2c_dev);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto exit_connection_destroy;
+
+ ret = gb_i2c_device_setup(gb_i2c_dev);
+ if (ret)
+ goto exit_connection_disable;
+
+ /* Looks good; up our i2c adapter */
+ adapter = &gb_i2c_dev->adapter;
+ adapter->owner = THIS_MODULE;
+ adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ adapter->algo = &gb_i2c_algorithm;
+ /* adapter->algo_data = what? */
+
+ adapter->dev.parent = &gbphy_dev->dev;
+ snprintf(adapter->name, sizeof(adapter->name), "Greybus i2c adapter");
+ i2c_set_adapdata(adapter, gb_i2c_dev);
+
+ ret = i2c_add_adapter(adapter);
+ if (ret)
+ goto exit_connection_disable;
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return 0;
+
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+exit_i2cdev_free:
+ kfree(gb_i2c_dev);
+
+ return ret;
+}
+
+static void gb_i2c_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_i2c_device *gb_i2c_dev = gb_gbphy_get_data(gbphy_dev);
+ struct gb_connection *connection = gb_i2c_dev->connection;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ i2c_del_adapter(&gb_i2c_dev->adapter);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+ kfree(gb_i2c_dev);
+}
+
+static const struct gbphy_device_id gb_i2c_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_I2C) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_i2c_id_table);
+
+static struct gbphy_driver i2c_driver = {
+ .name = "i2c",
+ .probe = gb_i2c_probe,
+ .remove = gb_i2c_remove,
+ .id_table = gb_i2c_id_table,
+};
+
+module_gbphy_driver(i2c_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/interface.c b/drivers/staging/greybus/interface.c
new file mode 100644
index 000000000000..546b090e2d51
--- /dev/null
+++ b/drivers/staging/greybus/interface.c
@@ -0,0 +1,1316 @@
+/*
+ * Greybus interface code
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/delay.h>
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+#define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000
+
+#define GB_INTERFACE_DEVICE_ID_BAD 0xff
+
+#define GB_INTERFACE_AUTOSUSPEND_MS 3000
+
+/* Time required for interface to enter standby before disabling REFCLK */
+#define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20
+
+/* Don't-care selector index */
+#define DME_SELECTOR_INDEX_NULL 0
+
+/* DME attributes */
+/* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */
+#define DME_T_TST_SRC_INCREMENT 0x4083
+
+#define DME_DDBL1_MANUFACTURERID 0x5003
+#define DME_DDBL1_PRODUCTID 0x5004
+
+#define DME_TOSHIBA_GMP_VID 0x6000
+#define DME_TOSHIBA_GMP_PID 0x6001
+#define DME_TOSHIBA_GMP_SN0 0x6002
+#define DME_TOSHIBA_GMP_SN1 0x6003
+#define DME_TOSHIBA_GMP_INIT_STATUS 0x6101
+
+/* DDBL1 Manufacturer and Product ids */
+#define TOSHIBA_DMID 0x0126
+#define TOSHIBA_ES2_BRIDGE_DPID 0x1000
+#define TOSHIBA_ES3_APBRIDGE_DPID 0x1001
+#define TOSHIBA_ES3_GBPHY_DPID 0x1002
+
+static int gb_interface_hibernate_link(struct gb_interface *intf);
+static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
+
+static int gb_interface_dme_attr_get(struct gb_interface *intf,
+ u16 attr, u32 *val)
+{
+ return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
+ attr, DME_SELECTOR_INDEX_NULL, val);
+}
+
+static int gb_interface_read_ara_dme(struct gb_interface *intf)
+{
+ u32 sn0, sn1;
+ int ret;
+
+ /*
+ * Unless this is a Toshiba bridge, bail out until we have defined
+ * standard GMP attributes.
+ */
+ if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
+ dev_err(&intf->dev, "unknown manufacturer %08x\n",
+ intf->ddbl1_manufacturer_id);
+ return -ENODEV;
+ }
+
+ ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
+ &intf->vendor_id);
+ if (ret)
+ return ret;
+
+ ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
+ &intf->product_id);
+ if (ret)
+ return ret;
+
+ ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
+ if (ret)
+ return ret;
+
+ ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
+ if (ret)
+ return ret;
+
+ intf->serial_number = (u64)sn1 << 32 | sn0;
+
+ return 0;
+}
+
+static int gb_interface_read_dme(struct gb_interface *intf)
+{
+ int ret;
+
+ /* DME attributes have already been read */
+ if (intf->dme_read)
+ return 0;
+
+ ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
+ &intf->ddbl1_manufacturer_id);
+ if (ret)
+ return ret;
+
+ ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
+ &intf->ddbl1_product_id);
+ if (ret)
+ return ret;
+
+ if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
+ intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
+ intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
+ intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
+ }
+
+ ret = gb_interface_read_ara_dme(intf);
+ if (ret)
+ return ret;
+
+ intf->dme_read = true;
+
+ return 0;
+}
+
+static int gb_interface_route_create(struct gb_interface *intf)
+{
+ struct gb_svc *svc = intf->hd->svc;
+ u8 intf_id = intf->interface_id;
+ u8 device_id;
+ int ret;
+
+ /* Allocate an interface device id. */
+ ret = ida_simple_get(&svc->device_id_map,
+ GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
+ GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
+ return ret;
+ }
+ device_id = ret;
+
+ ret = gb_svc_intf_device_id(svc, intf_id, device_id);
+ if (ret) {
+ dev_err(&intf->dev, "failed to set device id %u: %d\n",
+ device_id, ret);
+ goto err_ida_remove;
+ }
+
+ /* FIXME: Hard-coded AP device id. */
+ ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
+ intf_id, device_id);
+ if (ret) {
+ dev_err(&intf->dev, "failed to create route: %d\n", ret);
+ goto err_svc_id_free;
+ }
+
+ intf->device_id = device_id;
+
+ return 0;
+
+err_svc_id_free:
+ /*
+ * XXX Should we tell SVC that this id doesn't belong to interface
+ * XXX anymore.
+ */
+err_ida_remove:
+ ida_simple_remove(&svc->device_id_map, device_id);
+
+ return ret;
+}
+
+static void gb_interface_route_destroy(struct gb_interface *intf)
+{
+ struct gb_svc *svc = intf->hd->svc;
+
+ if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
+ return;
+
+ gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
+ ida_simple_remove(&svc->device_id_map, intf->device_id);
+ intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
+}
+
+/* Locking: Caller holds the interface mutex. */
+static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
+{
+ int ret;
+
+ dev_info(&intf->dev, "legacy mode switch detected\n");
+
+ /* Mark as disconnected to prevent I/O during disable. */
+ intf->disconnected = true;
+ gb_interface_disable(intf);
+ intf->disconnected = false;
+
+ ret = gb_interface_enable(intf);
+ if (ret) {
+ dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
+ gb_interface_deactivate(intf);
+ }
+
+ return ret;
+}
+
+void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
+ u32 mailbox)
+{
+ mutex_lock(&intf->mutex);
+
+ if (result) {
+ dev_warn(&intf->dev,
+ "mailbox event with UniPro error: 0x%04x\n",
+ result);
+ goto err_disable;
+ }
+
+ if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
+ dev_warn(&intf->dev,
+ "mailbox event with unexpected value: 0x%08x\n",
+ mailbox);
+ goto err_disable;
+ }
+
+ if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
+ gb_interface_legacy_mode_switch(intf);
+ goto out_unlock;
+ }
+
+ if (!intf->mode_switch) {
+ dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
+ mailbox);
+ goto err_disable;
+ }
+
+ dev_info(&intf->dev, "mode switch detected\n");
+
+ complete(&intf->mode_switch_completion);
+
+out_unlock:
+ mutex_unlock(&intf->mutex);
+
+ return;
+
+err_disable:
+ gb_interface_disable(intf);
+ gb_interface_deactivate(intf);
+ mutex_unlock(&intf->mutex);
+}
+
+static void gb_interface_mode_switch_work(struct work_struct *work)
+{
+ struct gb_interface *intf;
+ struct gb_control *control;
+ unsigned long timeout;
+ int ret;
+
+ intf = container_of(work, struct gb_interface, mode_switch_work);
+
+ mutex_lock(&intf->mutex);
+ /* Make sure interface is still enabled. */
+ if (!intf->enabled) {
+ dev_dbg(&intf->dev, "mode switch aborted\n");
+ intf->mode_switch = false;
+ mutex_unlock(&intf->mutex);
+ goto out_interface_put;
+ }
+
+ /*
+ * Prepare the control device for mode switch and make sure to get an
+ * extra reference before it goes away during interface disable.
+ */
+ control = gb_control_get(intf->control);
+ gb_control_mode_switch_prepare(control);
+ gb_interface_disable(intf);
+ mutex_unlock(&intf->mutex);
+
+ timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
+ ret = wait_for_completion_interruptible_timeout(
+ &intf->mode_switch_completion, timeout);
+
+ /* Finalise control-connection mode switch. */
+ gb_control_mode_switch_complete(control);
+ gb_control_put(control);
+
+ if (ret < 0) {
+ dev_err(&intf->dev, "mode switch interrupted\n");
+ goto err_deactivate;
+ } else if (ret == 0) {
+ dev_err(&intf->dev, "mode switch timed out\n");
+ goto err_deactivate;
+ }
+
+ /* Re-enable (re-enumerate) interface if still active. */
+ mutex_lock(&intf->mutex);
+ intf->mode_switch = false;
+ if (intf->active) {
+ ret = gb_interface_enable(intf);
+ if (ret) {
+ dev_err(&intf->dev, "failed to re-enable interface: %d\n",
+ ret);
+ gb_interface_deactivate(intf);
+ }
+ }
+ mutex_unlock(&intf->mutex);
+
+out_interface_put:
+ gb_interface_put(intf);
+
+ return;
+
+err_deactivate:
+ mutex_lock(&intf->mutex);
+ intf->mode_switch = false;
+ gb_interface_deactivate(intf);
+ mutex_unlock(&intf->mutex);
+
+ gb_interface_put(intf);
+}
+
+int gb_interface_request_mode_switch(struct gb_interface *intf)
+{
+ int ret = 0;
+
+ mutex_lock(&intf->mutex);
+ if (intf->mode_switch) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ intf->mode_switch = true;
+ reinit_completion(&intf->mode_switch_completion);
+
+ /*
+ * Get a reference to the interface device, which will be put once the
+ * mode switch is complete.
+ */
+ get_device(&intf->dev);
+
+ if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
+ put_device(&intf->dev);
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&intf->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
+
+/*
+ * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
+ * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
+ * clear it after reading a non-zero value from it.
+ *
+ * FIXME: This is module-hardware dependent and needs to be extended for every
+ * type of module we want to support.
+ */
+static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
+{
+ struct gb_host_device *hd = intf->hd;
+ unsigned long bootrom_quirks;
+ unsigned long s2l_quirks;
+ int ret;
+ u32 value;
+ u16 attr;
+ u8 init_status;
+
+ /*
+ * ES2 bridges use T_TstSrcIncrement for the init status.
+ *
+ * FIXME: Remove ES2 support
+ */
+ if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
+ attr = DME_T_TST_SRC_INCREMENT;
+ else
+ attr = DME_TOSHIBA_GMP_INIT_STATUS;
+
+ ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
+ DME_SELECTOR_INDEX_NULL, &value);
+ if (ret)
+ return ret;
+
+ /*
+ * A nonzero init status indicates the module has finished
+ * initializing.
+ */
+ if (!value) {
+ dev_err(&intf->dev, "invalid init status\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Extract the init status.
+ *
+ * For ES2: We need to check lowest 8 bits of 'value'.
+ * For ES3: We need to check highest 8 bits out of 32 of 'value'.
+ *
+ * FIXME: Remove ES2 support
+ */
+ if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
+ init_status = value & 0xff;
+ else
+ init_status = value >> 24;
+
+ /*
+ * Check if the interface is executing the quirky ES3 bootrom that,
+ * for example, requires E2EFC, CSD and CSV to be disabled.
+ */
+ bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
+ GB_INTERFACE_QUIRK_FORCED_DISABLE |
+ GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
+ GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
+
+ s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
+
+ switch (init_status) {
+ case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
+ case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
+ intf->quirks |= bootrom_quirks;
+ break;
+ case GB_INIT_S2_LOADER_BOOT_STARTED:
+ /* S2 Loader doesn't support runtime PM */
+ intf->quirks &= ~bootrom_quirks;
+ intf->quirks |= s2l_quirks;
+ break;
+ default:
+ intf->quirks &= ~bootrom_quirks;
+ intf->quirks &= ~s2l_quirks;
+ }
+
+ /* Clear the init status. */
+ return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
+ DME_SELECTOR_INDEX_NULL, 0);
+}
+
+/* interface sysfs attributes */
+#define gb_interface_attr(field, type) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_interface *intf = to_gb_interface(dev); \
+ return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
+gb_interface_attr(ddbl1_product_id, "0x%08x");
+gb_interface_attr(interface_id, "%u");
+gb_interface_attr(vendor_id, "0x%08x");
+gb_interface_attr(product_id, "0x%08x");
+gb_interface_attr(serial_number, "0x%016llx");
+
+static ssize_t voltage_now_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_interface *intf = to_gb_interface(dev);
+ int ret;
+ u32 measurement;
+
+ ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
+ GB_SVC_PWRMON_TYPE_VOL,
+ &measurement);
+ if (ret) {
+ dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
+ return ret;
+ }
+
+ return sprintf(buf, "%u\n", measurement);
+}
+static DEVICE_ATTR_RO(voltage_now);
+
+static ssize_t current_now_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_interface *intf = to_gb_interface(dev);
+ int ret;
+ u32 measurement;
+
+ ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
+ GB_SVC_PWRMON_TYPE_CURR,
+ &measurement);
+ if (ret) {
+ dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
+ return ret;
+ }
+
+ return sprintf(buf, "%u\n", measurement);
+}
+static DEVICE_ATTR_RO(current_now);
+
+static ssize_t power_now_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_interface *intf = to_gb_interface(dev);
+ int ret;
+ u32 measurement;
+
+ ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
+ GB_SVC_PWRMON_TYPE_PWR,
+ &measurement);
+ if (ret) {
+ dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
+ return ret;
+ }
+
+ return sprintf(buf, "%u\n", measurement);
+}
+static DEVICE_ATTR_RO(power_now);
+
+static ssize_t power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_interface *intf = to_gb_interface(dev);
+
+ if (intf->active)
+ return scnprintf(buf, PAGE_SIZE, "on\n");
+ else
+ return scnprintf(buf, PAGE_SIZE, "off\n");
+}
+
+static ssize_t power_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ struct gb_interface *intf = to_gb_interface(dev);
+ bool activate;
+ int ret = 0;
+
+ if (kstrtobool(buf, &activate))
+ return -EINVAL;
+
+ mutex_lock(&intf->mutex);
+
+ if (activate == intf->active)
+ goto unlock;
+
+ if (activate) {
+ ret = gb_interface_activate(intf);
+ if (ret) {
+ dev_err(&intf->dev,
+ "failed to activate interface: %d\n", ret);
+ goto unlock;
+ }
+
+ ret = gb_interface_enable(intf);
+ if (ret) {
+ dev_err(&intf->dev,
+ "failed to enable interface: %d\n", ret);
+ gb_interface_deactivate(intf);
+ goto unlock;
+ }
+ } else {
+ gb_interface_disable(intf);
+ gb_interface_deactivate(intf);
+ }
+
+unlock:
+ mutex_unlock(&intf->mutex);
+
+ if (ret)
+ return ret;
+
+ return len;
+}
+static DEVICE_ATTR_RW(power_state);
+
+static const char *gb_interface_type_string(struct gb_interface *intf)
+{
+ static const char * const types[] = {
+ [GB_INTERFACE_TYPE_INVALID] = "invalid",
+ [GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
+ [GB_INTERFACE_TYPE_DUMMY] = "dummy",
+ [GB_INTERFACE_TYPE_UNIPRO] = "unipro",
+ [GB_INTERFACE_TYPE_GREYBUS] = "greybus",
+ };
+
+ return types[intf->type];
+}
+
+static ssize_t interface_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_interface *intf = to_gb_interface(dev);
+
+ return sprintf(buf, "%s\n", gb_interface_type_string(intf));
+}
+static DEVICE_ATTR_RO(interface_type);
+
+static struct attribute *interface_unipro_attrs[] = {
+ &dev_attr_ddbl1_manufacturer_id.attr,
+ &dev_attr_ddbl1_product_id.attr,
+ NULL
+};
+
+static struct attribute *interface_greybus_attrs[] = {
+ &dev_attr_vendor_id.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_serial_number.attr,
+ NULL
+};
+
+static struct attribute *interface_power_attrs[] = {
+ &dev_attr_voltage_now.attr,
+ &dev_attr_current_now.attr,
+ &dev_attr_power_now.attr,
+ &dev_attr_power_state.attr,
+ NULL
+};
+
+static struct attribute *interface_common_attrs[] = {
+ &dev_attr_interface_id.attr,
+ &dev_attr_interface_type.attr,
+ NULL
+};
+
+static umode_t interface_unipro_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gb_interface *intf = to_gb_interface(dev);
+
+ switch (intf->type) {
+ case GB_INTERFACE_TYPE_UNIPRO:
+ case GB_INTERFACE_TYPE_GREYBUS:
+ return attr->mode;
+ default:
+ return 0;
+ }
+}
+
+static umode_t interface_greybus_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gb_interface *intf = to_gb_interface(dev);
+
+ switch (intf->type) {
+ case GB_INTERFACE_TYPE_GREYBUS:
+ return attr->mode;
+ default:
+ return 0;
+ }
+}
+
+static umode_t interface_power_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gb_interface *intf = to_gb_interface(dev);
+
+ switch (intf->type) {
+ case GB_INTERFACE_TYPE_UNIPRO:
+ case GB_INTERFACE_TYPE_GREYBUS:
+ return attr->mode;
+ default:
+ return 0;
+ }
+}
+
+static const struct attribute_group interface_unipro_group = {
+ .is_visible = interface_unipro_is_visible,
+ .attrs = interface_unipro_attrs,
+};
+
+static const struct attribute_group interface_greybus_group = {
+ .is_visible = interface_greybus_is_visible,
+ .attrs = interface_greybus_attrs,
+};
+
+static const struct attribute_group interface_power_group = {
+ .is_visible = interface_power_is_visible,
+ .attrs = interface_power_attrs,
+};
+
+static const struct attribute_group interface_common_group = {
+ .attrs = interface_common_attrs,
+};
+
+static const struct attribute_group *interface_groups[] = {
+ &interface_unipro_group,
+ &interface_greybus_group,
+ &interface_power_group,
+ &interface_common_group,
+ NULL
+};
+
+static void gb_interface_release(struct device *dev)
+{
+ struct gb_interface *intf = to_gb_interface(dev);
+
+ trace_gb_interface_release(intf);
+
+ kfree(intf);
+}
+
+#ifdef CONFIG_PM
+static int gb_interface_suspend(struct device *dev)
+{
+ struct gb_interface *intf = to_gb_interface(dev);
+ int ret, timesync_ret;
+
+ ret = gb_control_interface_suspend_prepare(intf->control);
+ if (ret)
+ return ret;
+
+ gb_timesync_interface_remove(intf);
+
+ ret = gb_control_suspend(intf->control);
+ if (ret)
+ goto err_hibernate_abort;
+
+ ret = gb_interface_hibernate_link(intf);
+ if (ret)
+ return ret;
+
+ /* Delay to allow interface to enter standby before disabling refclk */
+ msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
+
+ ret = gb_interface_refclk_set(intf, false);
+ if (ret)
+ return ret;
+
+ return 0;
+
+err_hibernate_abort:
+ gb_control_interface_hibernate_abort(intf->control);
+
+ timesync_ret = gb_timesync_interface_add(intf);
+ if (timesync_ret) {
+ dev_err(dev, "failed to add to timesync: %d\n", timesync_ret);
+ return timesync_ret;
+ }
+
+ return ret;
+}
+
+static int gb_interface_resume(struct device *dev)
+{
+ struct gb_interface *intf = to_gb_interface(dev);
+ struct gb_svc *svc = intf->hd->svc;
+ int ret;
+
+ ret = gb_interface_refclk_set(intf, true);
+ if (ret)
+ return ret;
+
+ ret = gb_svc_intf_resume(svc, intf->interface_id);
+ if (ret)
+ return ret;
+
+ ret = gb_control_resume(intf->control);
+ if (ret)
+ return ret;
+
+ ret = gb_timesync_interface_add(intf);
+ if (ret) {
+ dev_err(dev, "failed to add to timesync: %d\n", ret);
+ return ret;
+ }
+
+ ret = gb_timesync_schedule_synchronous(intf);
+ if (ret) {
+ dev_err(dev, "failed to synchronize FrameTime: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_interface_runtime_idle(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_interface_pm_ops = {
+ SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
+ gb_interface_runtime_idle)
+};
+
+struct device_type greybus_interface_type = {
+ .name = "greybus_interface",
+ .release = gb_interface_release,
+ .pm = &gb_interface_pm_ops,
+};
+
+/*
+ * A Greybus module represents a user-replaceable component on a GMP
+ * phone. An interface is the physical connection on that module. A
+ * module may have more than one interface.
+ *
+ * Create a gb_interface structure to represent a discovered interface.
+ * The position of interface within the Endo is encoded in "interface_id"
+ * argument.
+ *
+ * Returns a pointer to the new interfce or a null pointer if a
+ * failure occurs due to memory exhaustion.
+ */
+struct gb_interface *gb_interface_create(struct gb_module *module,
+ u8 interface_id)
+{
+ struct gb_host_device *hd = module->hd;
+ struct gb_interface *intf;
+
+ intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+ if (!intf)
+ return NULL;
+
+ intf->hd = hd; /* XXX refcount? */
+ intf->module = module;
+ intf->interface_id = interface_id;
+ INIT_LIST_HEAD(&intf->bundles);
+ INIT_LIST_HEAD(&intf->manifest_descs);
+ mutex_init(&intf->mutex);
+ INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
+ init_completion(&intf->mode_switch_completion);
+
+ /* Invalid device id to start with */
+ intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
+
+ intf->dev.parent = &module->dev;
+ intf->dev.bus = &greybus_bus_type;
+ intf->dev.type = &greybus_interface_type;
+ intf->dev.groups = interface_groups;
+ intf->dev.dma_mask = module->dev.dma_mask;
+ device_initialize(&intf->dev);
+ dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
+ interface_id);
+
+ pm_runtime_set_autosuspend_delay(&intf->dev,
+ GB_INTERFACE_AUTOSUSPEND_MS);
+
+ trace_gb_interface_create(intf);
+
+ return intf;
+}
+
+static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
+{
+ struct gb_svc *svc = intf->hd->svc;
+ int ret;
+
+ dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+ ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
+ if (ret) {
+ dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
+{
+ struct gb_svc *svc = intf->hd->svc;
+ int ret;
+
+ dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+ ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
+ if (ret) {
+ dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
+{
+ struct gb_svc *svc = intf->hd->svc;
+ int ret;
+
+ dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+ ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
+ if (ret) {
+ dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_interface_activate_operation(struct gb_interface *intf,
+ enum gb_interface_type *intf_type)
+{
+ struct gb_svc *svc = intf->hd->svc;
+ u8 type;
+ int ret;
+
+ dev_dbg(&intf->dev, "%s\n", __func__);
+
+ ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
+ if (ret) {
+ dev_err(&intf->dev, "failed to activate: %d\n", ret);
+ return ret;
+ }
+
+ switch (type) {
+ case GB_SVC_INTF_TYPE_DUMMY:
+ *intf_type = GB_INTERFACE_TYPE_DUMMY;
+ /* FIXME: handle as an error for now */
+ return -ENODEV;
+ case GB_SVC_INTF_TYPE_UNIPRO:
+ *intf_type = GB_INTERFACE_TYPE_UNIPRO;
+ dev_err(&intf->dev, "interface type UniPro not supported\n");
+ /* FIXME: handle as an error for now */
+ return -ENODEV;
+ case GB_SVC_INTF_TYPE_GREYBUS:
+ *intf_type = GB_INTERFACE_TYPE_GREYBUS;
+ break;
+ default:
+ dev_err(&intf->dev, "unknown interface type: %u\n", type);
+ *intf_type = GB_INTERFACE_TYPE_UNKNOWN;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int gb_interface_hibernate_link(struct gb_interface *intf)
+{
+ struct gb_svc *svc = intf->hd->svc;
+
+ return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
+}
+
+static int _gb_interface_activate(struct gb_interface *intf,
+ enum gb_interface_type *type)
+{
+ int ret;
+
+ *type = GB_INTERFACE_TYPE_UNKNOWN;
+
+ if (intf->ejected || intf->removed)
+ return -ENODEV;
+
+ ret = gb_interface_vsys_set(intf, true);
+ if (ret)
+ return ret;
+
+ ret = gb_interface_refclk_set(intf, true);
+ if (ret)
+ goto err_vsys_disable;
+
+ ret = gb_interface_unipro_set(intf, true);
+ if (ret)
+ goto err_refclk_disable;
+
+ ret = gb_interface_activate_operation(intf, type);
+ if (ret) {
+ switch (*type) {
+ case GB_INTERFACE_TYPE_UNIPRO:
+ case GB_INTERFACE_TYPE_GREYBUS:
+ goto err_hibernate_link;
+ default:
+ goto err_unipro_disable;
+ }
+ }
+
+ ret = gb_interface_read_dme(intf);
+ if (ret)
+ goto err_hibernate_link;
+
+ ret = gb_interface_route_create(intf);
+ if (ret)
+ goto err_hibernate_link;
+
+ intf->active = true;
+
+ trace_gb_interface_activate(intf);
+
+ return 0;
+
+err_hibernate_link:
+ gb_interface_hibernate_link(intf);
+err_unipro_disable:
+ gb_interface_unipro_set(intf, false);
+err_refclk_disable:
+ gb_interface_refclk_set(intf, false);
+err_vsys_disable:
+ gb_interface_vsys_set(intf, false);
+
+ return ret;
+}
+
+/*
+ * At present, we assume a UniPro-only module to be a Greybus module that
+ * failed to send its mailbox poke. There is some reason to believe that this
+ * is because of a bug in the ES3 bootrom.
+ *
+ * FIXME: Check if this is a Toshiba bridge before retrying?
+ */
+static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
+ enum gb_interface_type *type)
+{
+ int retries = 3;
+ int ret;
+
+ while (retries--) {
+ ret = _gb_interface_activate(intf, type);
+ if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
+ continue;
+
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Activate an interface.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+int gb_interface_activate(struct gb_interface *intf)
+{
+ enum gb_interface_type type;
+ int ret;
+
+ switch (intf->type) {
+ case GB_INTERFACE_TYPE_INVALID:
+ case GB_INTERFACE_TYPE_GREYBUS:
+ ret = _gb_interface_activate_es3_hack(intf, &type);
+ break;
+ default:
+ ret = _gb_interface_activate(intf, &type);
+ }
+
+ /* Make sure type is detected correctly during reactivation. */
+ if (intf->type != GB_INTERFACE_TYPE_INVALID) {
+ if (type != intf->type) {
+ dev_err(&intf->dev, "failed to detect interface type\n");
+
+ if (!ret)
+ gb_interface_deactivate(intf);
+
+ return -EIO;
+ }
+ } else {
+ intf->type = type;
+ }
+
+ return ret;
+}
+
+/*
+ * Deactivate an interface.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+void gb_interface_deactivate(struct gb_interface *intf)
+{
+ if (!intf->active)
+ return;
+
+ trace_gb_interface_deactivate(intf);
+
+ /* Abort any ongoing mode switch. */
+ if (intf->mode_switch)
+ complete(&intf->mode_switch_completion);
+
+ gb_interface_route_destroy(intf);
+ gb_interface_hibernate_link(intf);
+ gb_interface_unipro_set(intf, false);
+ gb_interface_refclk_set(intf, false);
+ gb_interface_vsys_set(intf, false);
+
+ intf->active = false;
+}
+
+/*
+ * Enable an interface by enabling its control connection, fetching the
+ * manifest and other information over it, and finally registering its child
+ * devices.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+int gb_interface_enable(struct gb_interface *intf)
+{
+ struct gb_control *control;
+ struct gb_bundle *bundle, *tmp;
+ int ret, size;
+ void *manifest;
+
+ ret = gb_interface_read_and_clear_init_status(intf);
+ if (ret) {
+ dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
+ return ret;
+ }
+
+ /* Establish control connection */
+ control = gb_control_create(intf);
+ if (IS_ERR(control)) {
+ dev_err(&intf->dev, "failed to create control device: %ld\n",
+ PTR_ERR(control));
+ return PTR_ERR(control);
+ }
+ intf->control = control;
+
+ ret = gb_control_enable(intf->control);
+ if (ret)
+ goto err_put_control;
+
+ /* Get manifest size using control protocol on CPort */
+ size = gb_control_get_manifest_size_operation(intf);
+ if (size <= 0) {
+ dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
+
+ if (size)
+ ret = size;
+ else
+ ret = -EINVAL;
+
+ goto err_disable_control;
+ }
+
+ manifest = kmalloc(size, GFP_KERNEL);
+ if (!manifest) {
+ ret = -ENOMEM;
+ goto err_disable_control;
+ }
+
+ /* Get manifest using control protocol on CPort */
+ ret = gb_control_get_manifest_operation(intf, manifest, size);
+ if (ret) {
+ dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
+ goto err_free_manifest;
+ }
+
+ /*
+ * Parse the manifest and build up our data structures representing
+ * what's in it.
+ */
+ if (!gb_manifest_parse(intf, manifest, size)) {
+ dev_err(&intf->dev, "failed to parse manifest\n");
+ ret = -EINVAL;
+ goto err_destroy_bundles;
+ }
+
+ ret = gb_control_get_bundle_versions(intf->control);
+ if (ret)
+ goto err_destroy_bundles;
+
+ ret = gb_timesync_interface_add(intf);
+ if (ret) {
+ dev_err(&intf->dev, "failed to add to timesync: %d\n", ret);
+ goto err_destroy_bundles;
+ }
+
+ /* Register the control device and any bundles */
+ ret = gb_control_add(intf->control);
+ if (ret)
+ goto err_remove_timesync;
+
+ pm_runtime_use_autosuspend(&intf->dev);
+ pm_runtime_get_noresume(&intf->dev);
+ pm_runtime_set_active(&intf->dev);
+ pm_runtime_enable(&intf->dev);
+
+ list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
+ ret = gb_bundle_add(bundle);
+ if (ret) {
+ gb_bundle_destroy(bundle);
+ continue;
+ }
+ }
+
+ kfree(manifest);
+
+ intf->enabled = true;
+
+ pm_runtime_put(&intf->dev);
+
+ trace_gb_interface_enable(intf);
+
+ return 0;
+
+err_remove_timesync:
+ gb_timesync_interface_remove(intf);
+err_destroy_bundles:
+ list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
+ gb_bundle_destroy(bundle);
+err_free_manifest:
+ kfree(manifest);
+err_disable_control:
+ gb_control_disable(intf->control);
+err_put_control:
+ gb_control_put(intf->control);
+ intf->control = NULL;
+
+ return ret;
+}
+
+/*
+ * Disable an interface and destroy its bundles.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+void gb_interface_disable(struct gb_interface *intf)
+{
+ struct gb_bundle *bundle;
+ struct gb_bundle *next;
+
+ if (!intf->enabled)
+ return;
+
+ trace_gb_interface_disable(intf);
+
+ pm_runtime_get_sync(&intf->dev);
+
+ /* Set disconnected flag to avoid I/O during connection tear down. */
+ if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
+ intf->disconnected = true;
+
+ list_for_each_entry_safe(bundle, next, &intf->bundles, links)
+ gb_bundle_destroy(bundle);
+
+ if (!intf->mode_switch && !intf->disconnected)
+ gb_control_interface_deactivate_prepare(intf->control);
+
+ gb_control_del(intf->control);
+ gb_timesync_interface_remove(intf);
+ gb_control_disable(intf->control);
+ gb_control_put(intf->control);
+ intf->control = NULL;
+
+ intf->enabled = false;
+
+ pm_runtime_disable(&intf->dev);
+ pm_runtime_set_suspended(&intf->dev);
+ pm_runtime_dont_use_autosuspend(&intf->dev);
+ pm_runtime_put_noidle(&intf->dev);
+}
+
+/* Enable TimeSync on an Interface control connection. */
+int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
+ u64 frame_time, u32 strobe_delay, u32 refclk)
+{
+ return gb_control_timesync_enable(intf->control, count,
+ frame_time, strobe_delay,
+ refclk);
+}
+
+/* Disable TimeSync on an Interface control connection. */
+int gb_interface_timesync_disable(struct gb_interface *intf)
+{
+ return gb_control_timesync_disable(intf->control);
+}
+
+/* Transmit the Authoritative FrameTime via an Interface control connection. */
+int gb_interface_timesync_authoritative(struct gb_interface *intf,
+ u64 *frame_time)
+{
+ return gb_control_timesync_authoritative(intf->control,
+ frame_time);
+}
+
+/* Register an interface. */
+int gb_interface_add(struct gb_interface *intf)
+{
+ int ret;
+
+ ret = device_add(&intf->dev);
+ if (ret) {
+ dev_err(&intf->dev, "failed to register interface: %d\n", ret);
+ return ret;
+ }
+
+ trace_gb_interface_add(intf);
+
+ dev_info(&intf->dev, "Interface added (%s)\n",
+ gb_interface_type_string(intf));
+
+ switch (intf->type) {
+ case GB_INTERFACE_TYPE_GREYBUS:
+ dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
+ intf->vendor_id, intf->product_id);
+ /* fall-through */
+ case GB_INTERFACE_TYPE_UNIPRO:
+ dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
+ intf->ddbl1_manufacturer_id,
+ intf->ddbl1_product_id);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* Deregister an interface. */
+void gb_interface_del(struct gb_interface *intf)
+{
+ if (device_is_registered(&intf->dev)) {
+ trace_gb_interface_del(intf);
+
+ device_del(&intf->dev);
+ dev_info(&intf->dev, "Interface removed\n");
+ }
+}
+
+void gb_interface_put(struct gb_interface *intf)
+{
+ put_device(&intf->dev);
+}
diff --git a/drivers/staging/greybus/interface.h b/drivers/staging/greybus/interface.h
new file mode 100644
index 000000000000..03299d2a8be5
--- /dev/null
+++ b/drivers/staging/greybus/interface.h
@@ -0,0 +1,88 @@
+/*
+ * Greybus Interface Block code
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __INTERFACE_H
+#define __INTERFACE_H
+
+enum gb_interface_type {
+ GB_INTERFACE_TYPE_INVALID = 0,
+ GB_INTERFACE_TYPE_UNKNOWN,
+ GB_INTERFACE_TYPE_DUMMY,
+ GB_INTERFACE_TYPE_UNIPRO,
+ GB_INTERFACE_TYPE_GREYBUS,
+};
+
+#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0)
+#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1)
+#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2)
+#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3)
+#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4)
+#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5)
+#define GB_INTERFACE_QUIRK_NO_PM BIT(6)
+
+struct gb_interface {
+ struct device dev;
+ struct gb_control *control;
+
+ struct list_head bundles;
+ struct list_head module_node;
+ struct list_head manifest_descs;
+ u8 interface_id; /* Physical location within the Endo */
+ u8 device_id;
+ u8 features; /* Feature flags set in the manifest */
+
+ enum gb_interface_type type;
+
+ u32 ddbl1_manufacturer_id;
+ u32 ddbl1_product_id;
+ u32 vendor_id;
+ u32 product_id;
+ u64 serial_number;
+
+ struct gb_host_device *hd;
+ struct gb_module *module;
+
+ unsigned long quirks;
+
+ struct mutex mutex;
+
+ bool disconnected;
+
+ bool ejected;
+ bool removed;
+ bool active;
+ bool enabled;
+ bool mode_switch;
+ bool dme_read;
+
+ struct work_struct mode_switch_work;
+ struct completion mode_switch_completion;
+};
+#define to_gb_interface(d) container_of(d, struct gb_interface, dev)
+
+struct gb_interface *gb_interface_create(struct gb_module *module,
+ u8 interface_id);
+int gb_interface_activate(struct gb_interface *intf);
+void gb_interface_deactivate(struct gb_interface *intf);
+int gb_interface_enable(struct gb_interface *intf);
+void gb_interface_disable(struct gb_interface *intf);
+int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
+ u64 frame_time, u32 strobe_delay, u32 refclk);
+int gb_interface_timesync_authoritative(struct gb_interface *intf,
+ u64 *frame_time);
+int gb_interface_timesync_disable(struct gb_interface *intf);
+int gb_interface_add(struct gb_interface *intf);
+void gb_interface_del(struct gb_interface *intf);
+void gb_interface_put(struct gb_interface *intf);
+void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
+ u32 mailbox);
+
+int gb_interface_request_mode_switch(struct gb_interface *intf);
+
+#endif /* __INTERFACE_H */
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
new file mode 100644
index 000000000000..8dffd8a7e762
--- /dev/null
+++ b/drivers/staging/greybus/light.c
@@ -0,0 +1,1361 @@
+/*
+ * Greybus Lights protocol driver.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <media/v4l2-flash-led-class.h>
+
+#include "greybus.h"
+#include "greybus_protocols.h"
+
+#define NAMES_MAX 32
+
+struct gb_channel {
+ u8 id;
+ u32 flags;
+ u32 color;
+ char *color_name;
+ u8 fade_in;
+ u8 fade_out;
+ u32 mode;
+ char *mode_name;
+ struct attribute **attrs;
+ struct attribute_group *attr_group;
+ const struct attribute_group **attr_groups;
+ struct led_classdev *led;
+#if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH)
+ struct led_classdev_flash fled;
+ struct led_flash_setting intensity_uA;
+ struct led_flash_setting timeout_us;
+#else
+ struct led_classdev cled;
+#endif
+ struct gb_light *light;
+ bool is_registered;
+ bool releasing;
+ bool strobe_state;
+ bool active;
+ struct mutex lock;
+};
+
+struct gb_light {
+ u8 id;
+ char *name;
+ struct gb_lights *glights;
+ u32 flags;
+ u8 channels_count;
+ struct gb_channel *channels;
+ bool has_flash;
+ bool ready;
+#if IS_REACHABLE(CONFIG_V4L2_FLASH_LED_CLASS)
+ struct v4l2_flash *v4l2_flash;
+#endif
+};
+
+struct gb_lights {
+ struct gb_connection *connection;
+ u8 lights_count;
+ struct gb_light *lights;
+ struct mutex lights_lock;
+};
+
+static void gb_lights_channel_free(struct gb_channel *channel);
+
+static struct gb_connection *get_conn_from_channel(struct gb_channel *channel)
+{
+ return channel->light->glights->connection;
+}
+
+static struct gb_connection *get_conn_from_light(struct gb_light *light)
+{
+ return light->glights->connection;
+}
+
+static bool is_channel_flash(struct gb_channel *channel)
+{
+ return !!(channel->mode & (GB_CHANNEL_MODE_FLASH | GB_CHANNEL_MODE_TORCH
+ | GB_CHANNEL_MODE_INDICATOR));
+}
+
+#if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH)
+static struct gb_channel *get_channel_from_cdev(struct led_classdev *cdev)
+{
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(cdev);
+
+ return container_of(fled_cdev, struct gb_channel, fled);
+}
+
+static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
+{
+ return &channel->fled.led_cdev;
+}
+
+static struct gb_channel *get_channel_from_mode(struct gb_light *light,
+ u32 mode)
+{
+ struct gb_channel *channel = NULL;
+ int i;
+
+ for (i = 0; i < light->channels_count; i++) {
+ channel = &light->channels[i];
+ if (channel && channel->mode == mode)
+ break;
+ }
+ return channel;
+}
+
+static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
+ u32 intensity)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_set_flash_intensity_request req;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.intensity_uA = cpu_to_le32(intensity);
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_INTENSITY,
+ &req, sizeof(req), NULL, 0);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
+{
+ u32 intensity;
+
+ /* If the channel is flash we need to get the attached torch channel */
+ if (channel->mode & GB_CHANNEL_MODE_FLASH)
+ channel = get_channel_from_mode(channel->light,
+ GB_CHANNEL_MODE_TORCH);
+
+ /* For not flash we need to convert brightness to intensity */
+ intensity = channel->intensity_uA.min +
+ (channel->intensity_uA.step * channel->led->brightness);
+
+ return __gb_lights_flash_intensity_set(channel, intensity);
+}
+#else
+static struct gb_channel *get_channel_from_cdev(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct gb_channel, cled);
+}
+
+static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
+{
+ return &channel->cled;
+}
+
+static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
+{
+ return 0;
+}
+#endif
+
+static int gb_lights_color_set(struct gb_channel *channel, u32 color);
+static int gb_lights_fade_set(struct gb_channel *channel);
+
+static void led_lock(struct led_classdev *cdev)
+{
+ mutex_lock(&cdev->led_access);
+}
+
+static void led_unlock(struct led_classdev *cdev)
+{
+ mutex_unlock(&cdev->led_access);
+}
+
+#define gb_lights_fade_attr(__dir) \
+static ssize_t fade_##__dir##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct led_classdev *cdev = dev_get_drvdata(dev); \
+ struct gb_channel *channel = get_channel_from_cdev(cdev); \
+ \
+ return sprintf(buf, "%u\n", channel->fade_##__dir); \
+} \
+ \
+static ssize_t fade_##__dir##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ struct led_classdev *cdev = dev_get_drvdata(dev); \
+ struct gb_channel *channel = get_channel_from_cdev(cdev); \
+ u8 fade; \
+ int ret; \
+ \
+ led_lock(cdev); \
+ if (led_sysfs_is_disabled(cdev)) { \
+ ret = -EBUSY; \
+ goto unlock; \
+ } \
+ \
+ ret = kstrtou8(buf, 0, &fade); \
+ if (ret < 0) { \
+ dev_err(dev, "could not parse fade value %d\n", ret); \
+ goto unlock; \
+ } \
+ if (channel->fade_##__dir == fade) \
+ goto unlock; \
+ channel->fade_##__dir = fade; \
+ \
+ ret = gb_lights_fade_set(channel); \
+ if (ret < 0) \
+ goto unlock; \
+ \
+ ret = size; \
+unlock: \
+ led_unlock(cdev); \
+ return ret; \
+} \
+static DEVICE_ATTR_RW(fade_##__dir)
+
+gb_lights_fade_attr(in);
+gb_lights_fade_attr(out);
+
+static ssize_t color_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *cdev = dev_get_drvdata(dev);
+ struct gb_channel *channel = get_channel_from_cdev(cdev);
+
+ return sprintf(buf, "0x%08x\n", channel->color);
+}
+
+static ssize_t color_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct led_classdev *cdev = dev_get_drvdata(dev);
+ struct gb_channel *channel = get_channel_from_cdev(cdev);
+ u32 color;
+ int ret;
+
+ led_lock(cdev);
+ if (led_sysfs_is_disabled(cdev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+ ret = kstrtou32(buf, 0, &color);
+ if (ret < 0) {
+ dev_err(dev, "could not parse color value %d\n", ret);
+ goto unlock;
+ }
+
+ ret = gb_lights_color_set(channel, color);
+ if (ret < 0)
+ goto unlock;
+
+ channel->color = color;
+ ret = size;
+unlock:
+ led_unlock(cdev);
+ return ret;
+}
+static DEVICE_ATTR_RW(color);
+
+static int channel_attr_groups_set(struct gb_channel *channel,
+ struct led_classdev *cdev)
+{
+ int attr = 0;
+ int size = 0;
+
+ if (channel->flags & GB_LIGHT_CHANNEL_MULTICOLOR)
+ size++;
+ if (channel->flags & GB_LIGHT_CHANNEL_FADER)
+ size += 2;
+
+ if (!size)
+ return 0;
+
+ /* Set attributes based in the channel flags */
+ channel->attrs = kcalloc(size + 1, sizeof(*channel->attrs), GFP_KERNEL);
+ if (!channel->attrs)
+ return -ENOMEM;
+ channel->attr_group = kcalloc(1, sizeof(*channel->attr_group),
+ GFP_KERNEL);
+ if (!channel->attr_group)
+ return -ENOMEM;
+ channel->attr_groups = kcalloc(2, sizeof(*channel->attr_groups),
+ GFP_KERNEL);
+ if (!channel->attr_groups)
+ return -ENOMEM;
+
+ if (channel->flags & GB_LIGHT_CHANNEL_MULTICOLOR)
+ channel->attrs[attr++] = &dev_attr_color.attr;
+ if (channel->flags & GB_LIGHT_CHANNEL_FADER) {
+ channel->attrs[attr++] = &dev_attr_fade_in.attr;
+ channel->attrs[attr++] = &dev_attr_fade_out.attr;
+ }
+
+ channel->attr_group->attrs = channel->attrs;
+
+ channel->attr_groups[0] = channel->attr_group;
+
+ cdev->groups = channel->attr_groups;
+
+ return 0;
+}
+
+static int gb_lights_fade_set(struct gb_channel *channel)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_set_fade_request req;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.fade_in = channel->fade_in;
+ req.fade_out = channel->fade_out;
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FADE,
+ &req, sizeof(req), NULL, 0);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int gb_lights_color_set(struct gb_channel *channel, u32 color)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_set_color_request req;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.color = cpu_to_le32(color);
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_COLOR,
+ &req, sizeof(req), NULL, 0);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int __gb_lights_led_brightness_set(struct gb_channel *channel)
+{
+ struct gb_lights_set_brightness_request req;
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ bool old_active;
+ int ret;
+
+ mutex_lock(&channel->lock);
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ goto out_unlock;
+
+ old_active = channel->active;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.brightness = (u8)channel->led->brightness;
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_BRIGHTNESS,
+ &req, sizeof(req), NULL, 0);
+ if (ret < 0)
+ goto out_pm_put;
+
+ if (channel->led->brightness)
+ channel->active = true;
+ else
+ channel->active = false;
+
+ /* we need to keep module alive when turning to active state */
+ if (!old_active && channel->active)
+ goto out_unlock;
+
+ /*
+ * on the other hand if going to inactive we still hold a reference and
+ * need to put it, so we could go to suspend.
+ */
+ if (old_active && !channel->active)
+ gb_pm_runtime_put_autosuspend(bundle);
+
+out_pm_put:
+ gb_pm_runtime_put_autosuspend(bundle);
+out_unlock:
+ mutex_unlock(&channel->lock);
+
+ return ret;
+}
+
+static int __gb_lights_brightness_set(struct gb_channel *channel)
+{
+ int ret;
+
+ if (channel->releasing)
+ return 0;
+
+ if (is_channel_flash(channel))
+ ret = __gb_lights_flash_brightness_set(channel);
+ else
+ ret = __gb_lights_led_brightness_set(channel);
+
+ return ret;
+}
+
+static int gb_brightness_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct gb_channel *channel = get_channel_from_cdev(cdev);
+
+ channel->led->brightness = value;
+
+ return __gb_lights_brightness_set(channel);
+}
+
+static enum led_brightness gb_brightness_get(struct led_classdev *cdev)
+
+{
+ struct gb_channel *channel = get_channel_from_cdev(cdev);
+
+ return channel->led->brightness;
+}
+
+static int gb_blink_set(struct led_classdev *cdev, unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct gb_channel *channel = get_channel_from_cdev(cdev);
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_blink_request req;
+ bool old_active;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ if (!delay_on || !delay_off)
+ return -EINVAL;
+
+ mutex_lock(&channel->lock);
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ goto out_unlock;
+
+ old_active = channel->active;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.time_on_ms = cpu_to_le16(*delay_on);
+ req.time_off_ms = cpu_to_le16(*delay_off);
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_BLINK, &req,
+ sizeof(req), NULL, 0);
+ if (ret < 0)
+ goto out_pm_put;
+
+ if (*delay_on)
+ channel->active = true;
+ else
+ channel->active = false;
+
+ /* we need to keep module alive when turning to active state */
+ if (!old_active && channel->active)
+ goto out_unlock;
+
+ /*
+ * on the other hand if going to inactive we still hold a reference and
+ * need to put it, so we could go to suspend.
+ */
+ if (old_active && !channel->active)
+ gb_pm_runtime_put_autosuspend(bundle);
+
+out_pm_put:
+ gb_pm_runtime_put_autosuspend(bundle);
+out_unlock:
+ mutex_unlock(&channel->lock);
+
+ return ret;
+}
+
+static void gb_lights_led_operations_set(struct gb_channel *channel,
+ struct led_classdev *cdev)
+{
+ cdev->brightness_get = gb_brightness_get;
+ cdev->brightness_set_blocking = gb_brightness_set;
+
+ if (channel->flags & GB_LIGHT_CHANNEL_BLINK)
+ cdev->blink_set = gb_blink_set;
+}
+
+#if IS_REACHABLE(CONFIG_V4L2_FLASH_LED_CLASS)
+/* V4L2 specific helpers */
+static const struct v4l2_flash_ops v4l2_flash_ops;
+
+static void __gb_lights_channel_v4l2_config(struct led_flash_setting *channel_s,
+ struct led_flash_setting *v4l2_s)
+{
+ v4l2_s->min = channel_s->min;
+ v4l2_s->max = channel_s->max;
+ v4l2_s->step = channel_s->step;
+ /* For v4l2 val is the default value */
+ v4l2_s->val = channel_s->max;
+}
+
+static int gb_lights_light_v4l2_register(struct gb_light *light)
+{
+ struct gb_connection *connection = get_conn_from_light(light);
+ struct device *dev = &connection->bundle->dev;
+ struct v4l2_flash_config *sd_cfg;
+ struct led_classdev_flash *fled;
+ struct led_classdev_flash *iled = NULL;
+ struct gb_channel *channel_torch, *channel_ind, *channel_flash;
+ int ret = 0;
+
+ sd_cfg = kcalloc(1, sizeof(*sd_cfg), GFP_KERNEL);
+ if (!sd_cfg)
+ return -ENOMEM;
+
+ channel_torch = get_channel_from_mode(light, GB_CHANNEL_MODE_TORCH);
+ if (channel_torch)
+ __gb_lights_channel_v4l2_config(&channel_torch->intensity_uA,
+ &sd_cfg->torch_intensity);
+
+ channel_ind = get_channel_from_mode(light, GB_CHANNEL_MODE_INDICATOR);
+ if (channel_ind) {
+ __gb_lights_channel_v4l2_config(&channel_ind->intensity_uA,
+ &sd_cfg->indicator_intensity);
+ iled = &channel_ind->fled;
+ }
+
+ channel_flash = get_channel_from_mode(light, GB_CHANNEL_MODE_FLASH);
+ WARN_ON(!channel_flash);
+
+ fled = &channel_flash->fled;
+
+ snprintf(sd_cfg->dev_name, sizeof(sd_cfg->dev_name), "%s", light->name);
+
+ /* Set the possible values to faults, in our case all faults */
+ sd_cfg->flash_faults = LED_FAULT_OVER_VOLTAGE | LED_FAULT_TIMEOUT |
+ LED_FAULT_OVER_TEMPERATURE | LED_FAULT_SHORT_CIRCUIT |
+ LED_FAULT_OVER_CURRENT | LED_FAULT_INDICATOR |
+ LED_FAULT_UNDER_VOLTAGE | LED_FAULT_INPUT_VOLTAGE |
+ LED_FAULT_LED_OVER_TEMPERATURE;
+
+ light->v4l2_flash = v4l2_flash_init(dev, NULL, fled, iled,
+ &v4l2_flash_ops, sd_cfg);
+ if (IS_ERR_OR_NULL(light->v4l2_flash)) {
+ ret = PTR_ERR(light->v4l2_flash);
+ goto out_free;
+ }
+
+ return ret;
+
+out_free:
+ kfree(sd_cfg);
+ return ret;
+}
+
+static void gb_lights_light_v4l2_unregister(struct gb_light *light)
+{
+ v4l2_flash_release(light->v4l2_flash);
+}
+#else
+static int gb_lights_light_v4l2_register(struct gb_light *light)
+{
+ struct gb_connection *connection = get_conn_from_light(light);
+
+ dev_err(&connection->bundle->dev, "no support for v4l2 subdevices\n");
+ return 0;
+}
+
+static void gb_lights_light_v4l2_unregister(struct gb_light *light)
+{
+}
+#endif
+
+#if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH)
+/* Flash specific operations */
+static int gb_lights_flash_intensity_set(struct led_classdev_flash *fcdev,
+ u32 brightness)
+{
+ struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+ fled);
+ int ret;
+
+ ret = __gb_lights_flash_intensity_set(channel, brightness);
+ if (ret < 0)
+ return ret;
+
+ fcdev->brightness.val = brightness;
+
+ return 0;
+}
+
+static int gb_lights_flash_intensity_get(struct led_classdev_flash *fcdev,
+ u32 *brightness)
+{
+ *brightness = fcdev->brightness.val;
+
+ return 0;
+}
+
+static int gb_lights_flash_strobe_set(struct led_classdev_flash *fcdev,
+ bool state)
+{
+ struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+ fled);
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_set_flash_strobe_request req;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.state = state ? 1 : 0;
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_STROBE,
+ &req, sizeof(req), NULL, 0);
+ if (!ret)
+ channel->strobe_state = state;
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int gb_lights_flash_strobe_get(struct led_classdev_flash *fcdev,
+ bool *state)
+{
+ struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+ fled);
+
+ *state = channel->strobe_state;
+ return 0;
+}
+
+static int gb_lights_flash_timeout_set(struct led_classdev_flash *fcdev,
+ u32 timeout)
+{
+ struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+ fled);
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_set_flash_timeout_request req;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+ req.timeout_us = cpu_to_le32(timeout);
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT,
+ &req, sizeof(req), NULL, 0);
+ if (!ret)
+ fcdev->timeout.val = timeout;
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int gb_lights_flash_fault_get(struct led_classdev_flash *fcdev,
+ u32 *fault)
+{
+ struct gb_channel *channel = container_of(fcdev, struct gb_channel,
+ fled);
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_bundle *bundle = connection->bundle;
+ struct gb_lights_get_flash_fault_request req;
+ struct gb_lights_get_flash_fault_response resp;
+ int ret;
+
+ if (channel->releasing)
+ return -ESHUTDOWN;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret < 0)
+ return ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_GET_FLASH_FAULT,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (!ret)
+ *fault = le32_to_cpu(resp.fault);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static const struct led_flash_ops gb_lights_flash_ops = {
+ .flash_brightness_set = gb_lights_flash_intensity_set,
+ .flash_brightness_get = gb_lights_flash_intensity_get,
+ .strobe_set = gb_lights_flash_strobe_set,
+ .strobe_get = gb_lights_flash_strobe_get,
+ .timeout_set = gb_lights_flash_timeout_set,
+ .fault_get = gb_lights_flash_fault_get,
+};
+
+static int __gb_lights_channel_torch_attach(struct gb_channel *channel,
+ struct gb_channel *channel_torch)
+{
+ char *name;
+
+ /* we can only attach torch to a flash channel */
+ if (!(channel->mode & GB_CHANNEL_MODE_FLASH))
+ return 0;
+
+ /* Move torch brightness to the destination */
+ channel->led->max_brightness = channel_torch->led->max_brightness;
+
+ /* append mode name to flash name */
+ name = kasprintf(GFP_KERNEL, "%s_%s", channel->led->name,
+ channel_torch->mode_name);
+ if (!name)
+ return -ENOMEM;
+ kfree(channel->led->name);
+ channel->led->name = name;
+
+ channel_torch->led = channel->led;
+
+ return 0;
+}
+
+static int __gb_lights_flash_led_register(struct gb_channel *channel)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct led_classdev_flash *fled = &channel->fled;
+ struct led_flash_setting *fset;
+ struct gb_channel *channel_torch;
+ int ret;
+
+ fled->ops = &gb_lights_flash_ops;
+
+ fled->led_cdev.flags |= LED_DEV_CAP_FLASH;
+
+ fset = &fled->brightness;
+ fset->min = channel->intensity_uA.min;
+ fset->max = channel->intensity_uA.max;
+ fset->step = channel->intensity_uA.step;
+ fset->val = channel->intensity_uA.max;
+
+ /* Only the flash mode have the timeout constraints settings */
+ if (channel->mode & GB_CHANNEL_MODE_FLASH) {
+ fset = &fled->timeout;
+ fset->min = channel->timeout_us.min;
+ fset->max = channel->timeout_us.max;
+ fset->step = channel->timeout_us.step;
+ fset->val = channel->timeout_us.max;
+ }
+
+ /*
+ * If light have torch mode channel, this channel will be the led
+ * classdev of the registered above flash classdev
+ */
+ channel_torch = get_channel_from_mode(channel->light,
+ GB_CHANNEL_MODE_TORCH);
+ if (channel_torch) {
+ ret = __gb_lights_channel_torch_attach(channel, channel_torch);
+ if (ret < 0)
+ goto fail;
+ }
+
+ ret = led_classdev_flash_register(&connection->bundle->dev, fled);
+ if (ret < 0)
+ goto fail;
+
+ channel->is_registered = true;
+ return 0;
+fail:
+ channel->led = NULL;
+ return ret;
+}
+
+static void __gb_lights_flash_led_unregister(struct gb_channel *channel)
+{
+ if (!channel->is_registered)
+ return;
+
+ led_classdev_flash_unregister(&channel->fled);
+}
+
+static int gb_lights_channel_flash_config(struct gb_channel *channel)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct gb_lights_get_channel_flash_config_request req;
+ struct gb_lights_get_channel_flash_config_response conf;
+ struct led_flash_setting *fset;
+ int ret;
+
+ req.light_id = channel->light->id;
+ req.channel_id = channel->id;
+
+ ret = gb_operation_sync(connection,
+ GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG,
+ &req, sizeof(req), &conf, sizeof(conf));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Intensity constraints for flash related modes: flash, torch,
+ * indicator. They will be needed for v4l2 registration.
+ */
+ fset = &channel->intensity_uA;
+ fset->min = le32_to_cpu(conf.intensity_min_uA);
+ fset->max = le32_to_cpu(conf.intensity_max_uA);
+ fset->step = le32_to_cpu(conf.intensity_step_uA);
+
+ /*
+ * On flash type, max brightness is set as the number of intensity steps
+ * available.
+ */
+ channel->led->max_brightness = (fset->max - fset->min) / fset->step;
+
+ /* Only the flash mode have the timeout constraints settings */
+ if (channel->mode & GB_CHANNEL_MODE_FLASH) {
+ fset = &channel->timeout_us;
+ fset->min = le32_to_cpu(conf.timeout_min_us);
+ fset->max = le32_to_cpu(conf.timeout_max_us);
+ fset->step = le32_to_cpu(conf.timeout_step_us);
+ }
+
+ return 0;
+}
+#else
+static int gb_lights_channel_flash_config(struct gb_channel *channel)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+
+ dev_err(&connection->bundle->dev, "no support for flash devices\n");
+ return 0;
+}
+
+static int __gb_lights_flash_led_register(struct gb_channel *channel)
+{
+ return 0;
+}
+
+static void __gb_lights_flash_led_unregister(struct gb_channel *channel)
+{
+}
+
+#endif
+
+static int __gb_lights_led_register(struct gb_channel *channel)
+{
+ struct gb_connection *connection = get_conn_from_channel(channel);
+ struct led_classdev *cdev = get_channel_cdev(channel);
+ int ret;
+
+ ret = led_classdev_register(&connection->bundle->dev, cdev);
+ if (ret < 0)
+ channel->led = NULL;
+ else
+ channel->is_registered = true;
+ return ret;
+}
+
+static int gb_lights_channel_register(struct gb_channel *channel)
+{
+ /* Normal LED channel, just register in led classdev and we are done */
+ if (!is_channel_flash(channel))
+ return __gb_lights_led_register(channel);
+
+ /*
+ * Flash Type need more work, register flash classdev, indicator as
+ * flash classdev, torch will be led classdev of the flash classdev.
+ */
+ if (!(channel->mode & GB_CHANNEL_MODE_TORCH))
+ return __gb_lights_flash_led_register(channel);
+
+ return 0;
+}
+
+static void __gb_lights_led_unregister(struct gb_channel *channel)
+{
+ struct led_classdev *cdev = get_channel_cdev(channel);
+
+ if (!channel->is_registered)
+ return;
+
+ led_classdev_unregister(cdev);
+ channel->led = NULL;
+}
+
+static void gb_lights_channel_unregister(struct gb_channel *channel)
+{
+ /* The same as register, handle channels differently */
+ if (!is_channel_flash(channel)) {
+ __gb_lights_led_unregister(channel);
+ return;
+ }
+
+ if (channel->mode & GB_CHANNEL_MODE_TORCH)
+ __gb_lights_led_unregister(channel);
+ else
+ __gb_lights_flash_led_unregister(channel);
+}
+
+static int gb_lights_channel_config(struct gb_light *light,
+ struct gb_channel *channel)
+{
+ struct gb_lights_get_channel_config_response conf;
+ struct gb_lights_get_channel_config_request req;
+ struct gb_connection *connection = get_conn_from_light(light);
+ struct led_classdev *cdev = get_channel_cdev(channel);
+ char *name;
+ int ret;
+
+ req.light_id = light->id;
+ req.channel_id = channel->id;
+
+ ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG,
+ &req, sizeof(req), &conf, sizeof(conf));
+ if (ret < 0)
+ return ret;
+
+ channel->light = light;
+ channel->mode = le32_to_cpu(conf.mode);
+ channel->flags = le32_to_cpu(conf.flags);
+ channel->color = le32_to_cpu(conf.color);
+ channel->color_name = kstrndup(conf.color_name, NAMES_MAX, GFP_KERNEL);
+ if (!channel->color_name)
+ return -ENOMEM;
+ channel->mode_name = kstrndup(conf.mode_name, NAMES_MAX, GFP_KERNEL);
+ if (!channel->mode_name)
+ return -ENOMEM;
+
+ channel->led = cdev;
+
+ name = kasprintf(GFP_KERNEL, "%s:%s:%s", light->name,
+ channel->color_name, channel->mode_name);
+ if (!name)
+ return -ENOMEM;
+
+ cdev->name = name;
+
+ cdev->max_brightness = conf.max_brightness;
+
+ ret = channel_attr_groups_set(channel, cdev);
+ if (ret < 0)
+ return ret;
+
+ gb_lights_led_operations_set(channel, cdev);
+
+ /*
+ * If it is not a flash related channel (flash, torch or indicator) we
+ * are done here. If not, continue and fetch flash related
+ * configurations.
+ */
+ if (!is_channel_flash(channel))
+ return ret;
+
+ light->has_flash = true;
+
+ ret = gb_lights_channel_flash_config(channel);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int gb_lights_light_config(struct gb_lights *glights, u8 id)
+{
+ struct gb_light *light = &glights->lights[id];
+ struct gb_lights_get_light_config_request req;
+ struct gb_lights_get_light_config_response conf;
+ int ret;
+ int i;
+
+ light->glights = glights;
+ light->id = id;
+
+ req.id = id;
+
+ ret = gb_operation_sync(glights->connection,
+ GB_LIGHTS_TYPE_GET_LIGHT_CONFIG,
+ &req, sizeof(req), &conf, sizeof(conf));
+ if (ret < 0)
+ return ret;
+
+ if (!conf.channel_count)
+ return -EINVAL;
+ if (!strlen(conf.name))
+ return -EINVAL;
+
+ light->channels_count = conf.channel_count;
+ light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL);
+
+ light->channels = kzalloc(light->channels_count *
+ sizeof(struct gb_channel), GFP_KERNEL);
+ if (!light->channels)
+ return -ENOMEM;
+
+ /* First we collect all the configurations for all channels */
+ for (i = 0; i < light->channels_count; i++) {
+ light->channels[i].id = i;
+ ret = gb_lights_channel_config(light, &light->channels[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gb_lights_light_register(struct gb_light *light)
+{
+ int ret;
+ int i;
+
+ /*
+ * Then, if everything went ok in getting configurations, we register
+ * the classdev, flash classdev and v4l2 subsystem, if a flash device is
+ * found.
+ */
+ for (i = 0; i < light->channels_count; i++) {
+ ret = gb_lights_channel_register(&light->channels[i]);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&light->channels[i].lock);
+ }
+
+ light->ready = true;
+
+ if (light->has_flash) {
+ ret = gb_lights_light_v4l2_register(light);
+ if (ret < 0) {
+ light->has_flash = false;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void gb_lights_channel_free(struct gb_channel *channel)
+{
+ kfree(channel->attrs);
+ kfree(channel->attr_group);
+ kfree(channel->attr_groups);
+ kfree(channel->color_name);
+ kfree(channel->mode_name);
+ mutex_destroy(&channel->lock);
+}
+
+static void gb_lights_channel_release(struct gb_channel *channel)
+{
+ channel->releasing = true;
+
+ gb_lights_channel_unregister(channel);
+
+ gb_lights_channel_free(channel);
+}
+
+static void gb_lights_light_release(struct gb_light *light)
+{
+ int i;
+ int count;
+
+ light->ready = false;
+
+ count = light->channels_count;
+
+ if (light->has_flash)
+ gb_lights_light_v4l2_unregister(light);
+
+ for (i = 0; i < count; i++) {
+ gb_lights_channel_release(&light->channels[i]);
+ light->channels_count--;
+ }
+ kfree(light->channels);
+ kfree(light->name);
+}
+
+static void gb_lights_release(struct gb_lights *glights)
+{
+ int i;
+
+ if (!glights)
+ return;
+
+ mutex_lock(&glights->lights_lock);
+ if (!glights->lights)
+ goto free_glights;
+
+ for (i = 0; i < glights->lights_count; i++)
+ gb_lights_light_release(&glights->lights[i]);
+
+ kfree(glights->lights);
+
+free_glights:
+ mutex_unlock(&glights->lights_lock);
+ mutex_destroy(&glights->lights_lock);
+ kfree(glights);
+}
+
+static int gb_lights_get_count(struct gb_lights *glights)
+{
+ struct gb_lights_get_lights_response resp;
+ int ret;
+
+ ret = gb_operation_sync(glights->connection, GB_LIGHTS_TYPE_GET_LIGHTS,
+ NULL, 0, &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ if (!resp.lights_count)
+ return -EINVAL;
+
+ glights->lights_count = resp.lights_count;
+
+ return 0;
+}
+
+static int gb_lights_create_all(struct gb_lights *glights)
+{
+ struct gb_connection *connection = glights->connection;
+ int ret;
+ int i;
+
+ mutex_lock(&glights->lights_lock);
+ ret = gb_lights_get_count(glights);
+ if (ret < 0)
+ goto out;
+
+ glights->lights = kzalloc(glights->lights_count *
+ sizeof(struct gb_light), GFP_KERNEL);
+ if (!glights->lights) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < glights->lights_count; i++) {
+ ret = gb_lights_light_config(glights, i);
+ if (ret < 0) {
+ dev_err(&connection->bundle->dev,
+ "Fail to configure lights device\n");
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&glights->lights_lock);
+ return ret;
+}
+
+static int gb_lights_register_all(struct gb_lights *glights)
+{
+ struct gb_connection *connection = glights->connection;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&glights->lights_lock);
+ for (i = 0; i < glights->lights_count; i++) {
+ ret = gb_lights_light_register(&glights->lights[i]);
+ if (ret < 0) {
+ dev_err(&connection->bundle->dev,
+ "Fail to enable lights device\n");
+ break;
+ }
+ }
+
+ mutex_unlock(&glights->lights_lock);
+ return ret;
+}
+
+static int gb_lights_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct device *dev = &connection->bundle->dev;
+ struct gb_lights *glights = gb_connection_get_data(connection);
+ struct gb_light *light;
+ struct gb_message *request;
+ struct gb_lights_event_request *payload;
+ int ret = 0;
+ u8 light_id;
+ u8 event;
+
+ if (op->type != GB_LIGHTS_TYPE_EVENT) {
+ dev_err(dev, "Unsupported unsolicited event: %u\n", op->type);
+ return -EINVAL;
+ }
+
+ request = op->request;
+
+ if (request->payload_size < sizeof(*payload)) {
+ dev_err(dev, "Wrong event size received (%zu < %zu)\n",
+ request->payload_size, sizeof(*payload));
+ return -EINVAL;
+ }
+
+ payload = request->payload;
+ light_id = payload->light_id;
+
+ if (light_id >= glights->lights_count ||
+ !glights->lights[light_id].ready) {
+ dev_err(dev, "Event received for unconfigured light id: %d\n",
+ light_id);
+ return -EINVAL;
+ }
+
+ event = payload->event;
+
+ if (event & GB_LIGHTS_LIGHT_CONFIG) {
+ light = &glights->lights[light_id];
+
+ mutex_lock(&glights->lights_lock);
+ gb_lights_light_release(light);
+ ret = gb_lights_light_config(glights, light_id);
+ if (!ret)
+ ret = gb_lights_light_register(light);
+ if (ret < 0)
+ gb_lights_light_release(light);
+ mutex_unlock(&glights->lights_lock);
+ }
+
+ return ret;
+}
+
+static int gb_lights_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_lights *glights;
+ int ret;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LIGHTS)
+ return -ENODEV;
+
+ glights = kzalloc(sizeof(*glights), GFP_KERNEL);
+ if (!glights)
+ return -ENOMEM;
+
+ mutex_init(&glights->lights_lock);
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_lights_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto out;
+ }
+
+ glights->connection = connection;
+ gb_connection_set_data(connection, glights);
+
+ greybus_set_drvdata(bundle, glights);
+
+ /* We aren't ready to receive an incoming request yet */
+ ret = gb_connection_enable_tx(connection);
+ if (ret)
+ goto error_connection_destroy;
+
+ /*
+ * Setup all the lights devices over this connection, if anything goes
+ * wrong tear down all lights
+ */
+ ret = gb_lights_create_all(glights);
+ if (ret < 0)
+ goto error_connection_disable;
+
+ /* We are ready to receive an incoming request now, enable RX as well */
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto error_connection_disable;
+
+ /* Enable & register lights */
+ ret = gb_lights_register_all(glights);
+ if (ret < 0)
+ goto error_connection_disable;
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+error_connection_disable:
+ gb_connection_disable(connection);
+error_connection_destroy:
+ gb_connection_destroy(connection);
+out:
+ gb_lights_release(glights);
+ return ret;
+}
+
+static void gb_lights_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_lights *glights = greybus_get_drvdata(bundle);
+
+ if (gb_pm_runtime_get_sync(bundle))
+ gb_pm_runtime_get_noresume(bundle);
+
+ gb_connection_disable(glights->connection);
+ gb_connection_destroy(glights->connection);
+
+ gb_lights_release(glights);
+}
+
+static const struct greybus_bundle_id gb_lights_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LIGHTS) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_lights_id_table);
+
+static struct greybus_driver gb_lights_driver = {
+ .name = "lights",
+ .probe = gb_lights_probe,
+ .disconnect = gb_lights_disconnect,
+ .id_table = gb_lights_id_table,
+};
+module_greybus_driver(gb_lights_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
new file mode 100644
index 000000000000..70dd9e5a1cf2
--- /dev/null
+++ b/drivers/staging/greybus/log.c
@@ -0,0 +1,132 @@
+/*
+ * Greybus driver for the log protocol
+ *
+ * Copyright 2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/uaccess.h>
+
+#include "greybus.h"
+
+struct gb_log {
+ struct gb_connection *connection;
+};
+
+static int gb_log_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct device *dev = &connection->bundle->dev;
+ struct gb_log_send_log_request *receive;
+ u16 len;
+
+ if (op->type != GB_LOG_TYPE_SEND_LOG) {
+ dev_err(dev, "unknown request type 0x%02x\n", op->type);
+ return -EINVAL;
+ }
+
+ /* Verify size of payload */
+ if (op->request->payload_size < sizeof(*receive)) {
+ dev_err(dev, "log request too small (%zu < %zu)\n",
+ op->request->payload_size, sizeof(*receive));
+ return -EINVAL;
+ }
+ receive = op->request->payload;
+ len = le16_to_cpu(receive->len);
+ if (len != (int)(op->request->payload_size - sizeof(*receive))) {
+ dev_err(dev, "log request wrong size %d vs %d\n", len,
+ (int)(op->request->payload_size - sizeof(*receive)));
+ return -EINVAL;
+ }
+ if (len == 0) {
+ dev_err(dev, "log request of 0 bytes?\n");
+ return -EINVAL;
+ }
+
+ if (len > GB_LOG_MAX_LEN) {
+ dev_err(dev, "log request too big: %d\n", len);
+ return -EINVAL;
+ }
+
+ /* Ensure the buffer is 0 terminated */
+ receive->msg[len - 1] = '\0';
+
+ /* Print with dev_dbg() so that it can be easily turned off using
+ * dynamic debugging (and prevent any DoS) */
+ dev_dbg(dev, "%s", receive->msg);
+
+ return 0;
+}
+
+static int gb_log_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_log *log;
+ int retval;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOG)
+ return -ENODEV;
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_log_request_handler);
+ if (IS_ERR(connection)) {
+ retval = PTR_ERR(connection);
+ goto error_free;
+ }
+
+ log->connection = connection;
+ greybus_set_drvdata(bundle, log);
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto error_connection_destroy;
+
+ return 0;
+
+error_connection_destroy:
+ gb_connection_destroy(connection);
+error_free:
+ kfree(log);
+ return retval;
+}
+
+static void gb_log_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_log *log = greybus_get_drvdata(bundle);
+ struct gb_connection *connection = log->connection;
+
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+
+ kfree(log);
+}
+
+static const struct greybus_bundle_id gb_log_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOG) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_log_id_table);
+
+static struct greybus_driver gb_log_driver = {
+ .name = "log",
+ .probe = gb_log_probe,
+ .disconnect = gb_log_disconnect,
+ .id_table = gb_log_id_table,
+};
+module_greybus_driver(gb_log_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
new file mode 100644
index 000000000000..7882306adeca
--- /dev/null
+++ b/drivers/staging/greybus/loopback.c
@@ -0,0 +1,1364 @@
+/*
+ * Loopback bridge driver for the Greybus loopback module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+#include <linux/sizes.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/kfifo.h>
+#include <linux/debugfs.h>
+#include <linux/list_sort.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/div64.h>
+
+#include "greybus.h"
+#include "connection.h"
+
+#define NSEC_PER_DAY 86400000000000ULL
+
+struct gb_loopback_stats {
+ u32 min;
+ u32 max;
+ u64 sum;
+ u32 count;
+};
+
+struct gb_loopback_device {
+ struct dentry *root;
+ u32 count;
+ size_t size_max;
+
+ /* We need to take a lock in atomic context */
+ spinlock_t lock;
+ struct list_head list;
+ struct list_head list_op_async;
+ wait_queue_head_t wq;
+};
+
+static struct gb_loopback_device gb_dev;
+
+struct gb_loopback_async_operation {
+ struct gb_loopback *gb;
+ struct gb_operation *operation;
+ struct timeval ts;
+ struct timer_list timer;
+ struct list_head entry;
+ struct work_struct work;
+ struct kref kref;
+ bool pending;
+ int (*completion)(struct gb_loopback_async_operation *op_async);
+};
+
+struct gb_loopback {
+ struct gb_connection *connection;
+
+ struct dentry *file;
+ struct kfifo kfifo_lat;
+ struct kfifo kfifo_ts;
+ struct mutex mutex;
+ struct task_struct *task;
+ struct list_head entry;
+ struct device *dev;
+ wait_queue_head_t wq;
+ wait_queue_head_t wq_completion;
+ atomic_t outstanding_operations;
+
+ /* Per connection stats */
+ struct timeval ts;
+ struct gb_loopback_stats latency;
+ struct gb_loopback_stats throughput;
+ struct gb_loopback_stats requests_per_second;
+ struct gb_loopback_stats apbridge_unipro_latency;
+ struct gb_loopback_stats gbphy_firmware_latency;
+
+ int type;
+ int async;
+ int id;
+ u32 size;
+ u32 iteration_max;
+ u32 iteration_count;
+ int us_wait;
+ u32 error;
+ u32 requests_completed;
+ u32 requests_timedout;
+ u32 timeout;
+ u32 jiffy_timeout;
+ u32 timeout_min;
+ u32 timeout_max;
+ u32 outstanding_operations_max;
+ u32 lbid;
+ u64 elapsed_nsecs;
+ u32 apbridge_latency_ts;
+ u32 gbphy_latency_ts;
+
+ u32 send_count;
+};
+
+static struct class loopback_class = {
+ .name = "gb_loopback",
+ .owner = THIS_MODULE,
+};
+static DEFINE_IDA(loopback_ida);
+
+/* Min/max values in jiffies */
+#define GB_LOOPBACK_TIMEOUT_MIN 1
+#define GB_LOOPBACK_TIMEOUT_MAX 10000
+
+#define GB_LOOPBACK_FIFO_DEFAULT 8192
+
+static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
+module_param(kfifo_depth, uint, 0444);
+
+/* Maximum size of any one send data buffer we support */
+#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
+
+#define GB_LOOPBACK_US_WAIT_MAX 1000000
+
+/* interface sysfs attributes */
+#define gb_loopback_ro_attr(field) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ return sprintf(buf, "%u\n", gb->field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+#define gb_loopback_ro_stats_attr(name, field, type) \
+static ssize_t name##_##field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ /* Report 0 for min and max if no transfer successed */ \
+ if (!gb->requests_completed) \
+ return sprintf(buf, "0\n"); \
+ return sprintf(buf, "%"#type"\n", gb->name.field); \
+} \
+static DEVICE_ATTR_RO(name##_##field)
+
+#define gb_loopback_ro_avg_attr(name) \
+static ssize_t name##_avg_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback_stats *stats; \
+ struct gb_loopback *gb; \
+ u64 avg, rem; \
+ u32 count; \
+ gb = dev_get_drvdata(dev); \
+ stats = &gb->name; \
+ count = stats->count ? stats->count : 1; \
+ avg = stats->sum + count / 2000000; /* round closest */ \
+ rem = do_div(avg, count); \
+ rem *= 1000000; \
+ do_div(rem, count); \
+ return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \
+} \
+static DEVICE_ATTR_RO(name##_avg)
+
+#define gb_loopback_stats_attrs(field) \
+ gb_loopback_ro_stats_attr(field, min, u); \
+ gb_loopback_ro_stats_attr(field, max, u); \
+ gb_loopback_ro_avg_attr(field)
+
+#define gb_loopback_attr(field, type) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ return sprintf(buf, "%"#type"\n", gb->field); \
+} \
+static ssize_t field##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t len) \
+{ \
+ int ret; \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ mutex_lock(&gb->mutex); \
+ ret = sscanf(buf, "%"#type, &gb->field); \
+ if (ret != 1) \
+ len = -EINVAL; \
+ else \
+ gb_loopback_check_attr(gb, bundle); \
+ mutex_unlock(&gb->mutex); \
+ return len; \
+} \
+static DEVICE_ATTR_RW(field)
+
+#define gb_dev_loopback_ro_attr(field, conn) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ return sprintf(buf, "%u\n", gb->field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+#define gb_dev_loopback_rw_attr(field, type) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ return sprintf(buf, "%"#type"\n", gb->field); \
+} \
+static ssize_t field##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t len) \
+{ \
+ int ret; \
+ struct gb_loopback *gb = dev_get_drvdata(dev); \
+ mutex_lock(&gb->mutex); \
+ ret = sscanf(buf, "%"#type, &gb->field); \
+ if (ret != 1) \
+ len = -EINVAL; \
+ else \
+ gb_loopback_check_attr(gb); \
+ mutex_unlock(&gb->mutex); \
+ return len; \
+} \
+static DEVICE_ATTR_RW(field)
+
+static void gb_loopback_reset_stats(struct gb_loopback *gb);
+static void gb_loopback_check_attr(struct gb_loopback *gb)
+{
+ if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX)
+ gb->us_wait = GB_LOOPBACK_US_WAIT_MAX;
+ if (gb->size > gb_dev.size_max)
+ gb->size = gb_dev.size_max;
+ gb->requests_timedout = 0;
+ gb->requests_completed = 0;
+ gb->iteration_count = 0;
+ gb->send_count = 0;
+ gb->error = 0;
+
+ if (kfifo_depth < gb->iteration_max) {
+ dev_warn(gb->dev,
+ "cannot log bytes %u kfifo_depth %u\n",
+ gb->iteration_max, kfifo_depth);
+ }
+ kfifo_reset_out(&gb->kfifo_lat);
+ kfifo_reset_out(&gb->kfifo_ts);
+
+ switch (gb->type) {
+ case GB_LOOPBACK_TYPE_PING:
+ case GB_LOOPBACK_TYPE_TRANSFER:
+ case GB_LOOPBACK_TYPE_SINK:
+ gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
+ if (!gb->jiffy_timeout)
+ gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
+ else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
+ gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
+ gb_loopback_reset_stats(gb);
+ wake_up(&gb->wq);
+ break;
+ default:
+ gb->type = 0;
+ break;
+ }
+}
+
+/* Time to send and receive one message */
+gb_loopback_stats_attrs(latency);
+/* Number of requests sent per second on this cport */
+gb_loopback_stats_attrs(requests_per_second);
+/* Quantity of data sent and received on this cport */
+gb_loopback_stats_attrs(throughput);
+/* Latency across the UniPro link from APBridge's perspective */
+gb_loopback_stats_attrs(apbridge_unipro_latency);
+/* Firmware induced overhead in the GPBridge */
+gb_loopback_stats_attrs(gbphy_firmware_latency);
+
+/* Number of errors encountered during loop */
+gb_loopback_ro_attr(error);
+/* Number of requests successfully completed async */
+gb_loopback_ro_attr(requests_completed);
+/* Number of requests timed out async */
+gb_loopback_ro_attr(requests_timedout);
+/* Timeout minimum in useconds */
+gb_loopback_ro_attr(timeout_min);
+/* Timeout minimum in useconds */
+gb_loopback_ro_attr(timeout_max);
+
+/*
+ * Type of loopback message to send based on protocol type definitions
+ * 0 => Don't send message
+ * 2 => Send ping message continuously (message without payload)
+ * 3 => Send transfer message continuously (message with payload,
+ * payload returned in response)
+ * 4 => Send a sink message (message with payload, no payload in response)
+ */
+gb_dev_loopback_rw_attr(type, d);
+/* Size of transfer message payload: 0-4096 bytes */
+gb_dev_loopback_rw_attr(size, u);
+/* Time to wait between two messages: 0-1000 ms */
+gb_dev_loopback_rw_attr(us_wait, d);
+/* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
+gb_dev_loopback_rw_attr(iteration_max, u);
+/* The current index of the for (i = 0; i < iteration_max; i++) loop */
+gb_dev_loopback_ro_attr(iteration_count, false);
+/* A flag to indicate synchronous or asynchronous operations */
+gb_dev_loopback_rw_attr(async, u);
+/* Timeout of an individual asynchronous request */
+gb_dev_loopback_rw_attr(timeout, u);
+/* Maximum number of in-flight operations before back-off */
+gb_dev_loopback_rw_attr(outstanding_operations_max, u);
+
+static struct attribute *loopback_attrs[] = {
+ &dev_attr_latency_min.attr,
+ &dev_attr_latency_max.attr,
+ &dev_attr_latency_avg.attr,
+ &dev_attr_requests_per_second_min.attr,
+ &dev_attr_requests_per_second_max.attr,
+ &dev_attr_requests_per_second_avg.attr,
+ &dev_attr_throughput_min.attr,
+ &dev_attr_throughput_max.attr,
+ &dev_attr_throughput_avg.attr,
+ &dev_attr_apbridge_unipro_latency_min.attr,
+ &dev_attr_apbridge_unipro_latency_max.attr,
+ &dev_attr_apbridge_unipro_latency_avg.attr,
+ &dev_attr_gbphy_firmware_latency_min.attr,
+ &dev_attr_gbphy_firmware_latency_max.attr,
+ &dev_attr_gbphy_firmware_latency_avg.attr,
+ &dev_attr_type.attr,
+ &dev_attr_size.attr,
+ &dev_attr_us_wait.attr,
+ &dev_attr_iteration_count.attr,
+ &dev_attr_iteration_max.attr,
+ &dev_attr_async.attr,
+ &dev_attr_error.attr,
+ &dev_attr_requests_completed.attr,
+ &dev_attr_requests_timedout.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_outstanding_operations_max.attr,
+ &dev_attr_timeout_min.attr,
+ &dev_attr_timeout_max.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(loopback);
+
+static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
+
+static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
+{
+ u32 lat;
+
+ do_div(elapsed_nsecs, NSEC_PER_USEC);
+ lat = elapsed_nsecs;
+ return lat;
+}
+
+static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
+{
+ if (t2 > t1)
+ return t2 - t1;
+ else
+ return NSEC_PER_DAY - t2 + t1;
+}
+
+static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
+{
+ u64 t1, t2;
+
+ t1 = timeval_to_ns(ts);
+ t2 = timeval_to_ns(te);
+
+ return __gb_loopback_calc_latency(t1, t2);
+}
+
+static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
+ struct timeval *ts, struct timeval *te)
+{
+ kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
+ kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
+}
+
+static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
+ void *request, int request_size,
+ void *response, int response_size)
+{
+ struct gb_operation *operation;
+ struct timeval ts, te;
+ int ret;
+
+ do_gettimeofday(&ts);
+ operation = gb_operation_create(gb->connection, type, request_size,
+ response_size, GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ if (request_size)
+ memcpy(operation->request->payload, request, request_size);
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret) {
+ dev_err(&gb->connection->bundle->dev,
+ "synchronous operation failed: %d\n", ret);
+ goto out_put_operation;
+ } else {
+ if (response_size == operation->response->payload_size) {
+ memcpy(response, operation->response->payload,
+ response_size);
+ } else {
+ dev_err(&gb->connection->bundle->dev,
+ "response size %zu expected %d\n",
+ operation->response->payload_size,
+ response_size);
+ ret = -EINVAL;
+ goto out_put_operation;
+ }
+ }
+
+ do_gettimeofday(&te);
+
+ /* Calculate the total time the message took */
+ gb_loopback_push_latency_ts(gb, &ts, &te);
+ gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
+
+out_put_operation:
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static void __gb_loopback_async_operation_destroy(struct kref *kref)
+{
+ struct gb_loopback_async_operation *op_async;
+
+ op_async = container_of(kref, struct gb_loopback_async_operation, kref);
+
+ list_del(&op_async->entry);
+ if (op_async->operation)
+ gb_operation_put(op_async->operation);
+ atomic_dec(&op_async->gb->outstanding_operations);
+ wake_up(&op_async->gb->wq_completion);
+ kfree(op_async);
+}
+
+static void gb_loopback_async_operation_get(struct gb_loopback_async_operation
+ *op_async)
+{
+ kref_get(&op_async->kref);
+}
+
+static void gb_loopback_async_operation_put(struct gb_loopback_async_operation
+ *op_async)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gb_dev.lock, flags);
+ kref_put(&op_async->kref, __gb_loopback_async_operation_destroy);
+ spin_unlock_irqrestore(&gb_dev.lock, flags);
+}
+
+static struct gb_loopback_async_operation *
+ gb_loopback_operation_find(u16 id)
+{
+ struct gb_loopback_async_operation *op_async;
+ bool found = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gb_dev.lock, flags);
+ list_for_each_entry(op_async, &gb_dev.list_op_async, entry) {
+ if (op_async->operation->id == id) {
+ gb_loopback_async_operation_get(op_async);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gb_dev.lock, flags);
+
+ return found ? op_async : NULL;
+}
+
+static void gb_loopback_async_wait_all(struct gb_loopback *gb)
+{
+ wait_event(gb->wq_completion,
+ !atomic_read(&gb->outstanding_operations));
+}
+
+static void gb_loopback_async_operation_callback(struct gb_operation *operation)
+{
+ struct gb_loopback_async_operation *op_async;
+ struct gb_loopback *gb;
+ struct timeval te;
+ bool err = false;
+
+ do_gettimeofday(&te);
+ op_async = gb_loopback_operation_find(operation->id);
+ if (!op_async)
+ return;
+
+ gb = op_async->gb;
+ mutex_lock(&gb->mutex);
+
+ if (!op_async->pending || gb_operation_result(operation)) {
+ err = true;
+ } else {
+ if (op_async->completion)
+ if (op_async->completion(op_async))
+ err = true;
+ }
+
+ if (!err) {
+ gb_loopback_push_latency_ts(gb, &op_async->ts, &te);
+ gb->elapsed_nsecs = gb_loopback_calc_latency(&op_async->ts,
+ &te);
+ }
+
+ if (op_async->pending) {
+ if (err)
+ gb->error++;
+ gb->iteration_count++;
+ op_async->pending = false;
+ del_timer_sync(&op_async->timer);
+ gb_loopback_async_operation_put(op_async);
+ gb_loopback_calculate_stats(gb, err);
+ }
+ mutex_unlock(&gb->mutex);
+
+ dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
+ operation->id);
+
+ gb_loopback_async_operation_put(op_async);
+}
+
+static void gb_loopback_async_operation_work(struct work_struct *work)
+{
+ struct gb_loopback *gb;
+ struct gb_operation *operation;
+ struct gb_loopback_async_operation *op_async;
+
+ op_async = container_of(work, struct gb_loopback_async_operation, work);
+ gb = op_async->gb;
+ operation = op_async->operation;
+
+ mutex_lock(&gb->mutex);
+ if (op_async->pending) {
+ gb->requests_timedout++;
+ gb->error++;
+ gb->iteration_count++;
+ op_async->pending = false;
+ gb_loopback_async_operation_put(op_async);
+ gb_loopback_calculate_stats(gb, true);
+ }
+ mutex_unlock(&gb->mutex);
+
+ dev_dbg(&gb->connection->bundle->dev, "timeout operation %d\n",
+ operation->id);
+
+ gb_operation_cancel(operation, -ETIMEDOUT);
+ gb_loopback_async_operation_put(op_async);
+}
+
+static void gb_loopback_async_operation_timeout(unsigned long data)
+{
+ struct gb_loopback_async_operation *op_async;
+ u16 id = data;
+
+ op_async = gb_loopback_operation_find(id);
+ if (!op_async) {
+ pr_err("operation %d not found - time out ?\n", id);
+ return;
+ }
+ schedule_work(&op_async->work);
+}
+
+static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
+ void *request, int request_size,
+ int response_size,
+ void *completion)
+{
+ struct gb_loopback_async_operation *op_async;
+ struct gb_operation *operation;
+ int ret;
+ unsigned long flags;
+
+ op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
+ if (!op_async)
+ return -ENOMEM;
+
+ INIT_WORK(&op_async->work, gb_loopback_async_operation_work);
+ kref_init(&op_async->kref);
+
+ operation = gb_operation_create(gb->connection, type, request_size,
+ response_size, GFP_KERNEL);
+ if (!operation) {
+ kfree(op_async);
+ return -ENOMEM;
+ }
+
+ if (request_size)
+ memcpy(operation->request->payload, request, request_size);
+
+ op_async->gb = gb;
+ op_async->operation = operation;
+ op_async->completion = completion;
+
+ spin_lock_irqsave(&gb_dev.lock, flags);
+ list_add_tail(&op_async->entry, &gb_dev.list_op_async);
+ spin_unlock_irqrestore(&gb_dev.lock, flags);
+
+ do_gettimeofday(&op_async->ts);
+ op_async->pending = true;
+ atomic_inc(&gb->outstanding_operations);
+ mutex_lock(&gb->mutex);
+ ret = gb_operation_request_send(operation,
+ gb_loopback_async_operation_callback,
+ GFP_KERNEL);
+ if (ret)
+ goto error;
+
+ setup_timer(&op_async->timer, gb_loopback_async_operation_timeout,
+ (unsigned long)operation->id);
+ op_async->timer.expires = jiffies + gb->jiffy_timeout;
+ add_timer(&op_async->timer);
+
+ goto done;
+error:
+ gb_loopback_async_operation_put(op_async);
+done:
+ mutex_unlock(&gb->mutex);
+ return ret;
+}
+
+static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
+{
+ struct gb_loopback_transfer_request *request;
+ int retval;
+
+ request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ request->len = cpu_to_le32(len);
+ retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
+ request, len + sizeof(*request),
+ NULL, 0);
+ kfree(request);
+ return retval;
+}
+
+static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
+{
+ struct gb_loopback_transfer_request *request;
+ struct gb_loopback_transfer_response *response;
+ int retval;
+
+ gb->apbridge_latency_ts = 0;
+ gb->gbphy_latency_ts = 0;
+
+ request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+ response = kmalloc(len + sizeof(*response), GFP_KERNEL);
+ if (!response) {
+ kfree(request);
+ return -ENOMEM;
+ }
+
+ memset(request->data, 0x5A, len);
+
+ request->len = cpu_to_le32(len);
+ retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
+ request, len + sizeof(*request),
+ response, len + sizeof(*response));
+ if (retval)
+ goto gb_error;
+
+ if (memcmp(request->data, response->data, len)) {
+ dev_err(&gb->connection->bundle->dev,
+ "Loopback Data doesn't match\n");
+ retval = -EREMOTEIO;
+ }
+ gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
+ gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1);
+
+gb_error:
+ kfree(request);
+ kfree(response);
+
+ return retval;
+}
+
+static int gb_loopback_sync_ping(struct gb_loopback *gb)
+{
+ return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
+ NULL, 0, NULL, 0);
+}
+
+static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
+{
+ struct gb_loopback_transfer_request *request;
+ int retval;
+
+ request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ request->len = cpu_to_le32(len);
+ retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
+ request, len + sizeof(*request),
+ 0, NULL);
+ kfree(request);
+ return retval;
+}
+
+static int gb_loopback_async_transfer_complete(
+ struct gb_loopback_async_operation *op_async)
+{
+ struct gb_loopback *gb;
+ struct gb_operation *operation;
+ struct gb_loopback_transfer_request *request;
+ struct gb_loopback_transfer_response *response;
+ size_t len;
+ int retval = 0;
+
+ gb = op_async->gb;
+ operation = op_async->operation;
+ request = operation->request->payload;
+ response = operation->response->payload;
+ len = le32_to_cpu(request->len);
+
+ if (memcmp(request->data, response->data, len)) {
+ dev_err(&gb->connection->bundle->dev,
+ "Loopback Data doesn't match operation id %d\n",
+ operation->id);
+ retval = -EREMOTEIO;
+ } else {
+ gb->apbridge_latency_ts =
+ (u32)__le32_to_cpu(response->reserved0);
+ gb->gbphy_latency_ts =
+ (u32)__le32_to_cpu(response->reserved1);
+ }
+
+ return retval;
+}
+
+static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
+{
+ struct gb_loopback_transfer_request *request;
+ int retval, response_len;
+
+ request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ memset(request->data, 0x5A, len);
+
+ request->len = cpu_to_le32(len);
+ response_len = sizeof(struct gb_loopback_transfer_response);
+ retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
+ request, len + sizeof(*request),
+ len + response_len,
+ gb_loopback_async_transfer_complete);
+ if (retval)
+ goto gb_error;
+
+gb_error:
+ kfree(request);
+ return retval;
+}
+
+static int gb_loopback_async_ping(struct gb_loopback *gb)
+{
+ return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
+ NULL, 0, 0, NULL);
+}
+
+static int gb_loopback_request_handler(struct gb_operation *operation)
+{
+ struct gb_connection *connection = operation->connection;
+ struct gb_loopback_transfer_request *request;
+ struct gb_loopback_transfer_response *response;
+ struct device *dev = &connection->bundle->dev;
+ size_t len;
+
+ /* By convention, the AP initiates the version operation */
+ switch (operation->type) {
+ case GB_LOOPBACK_TYPE_PING:
+ case GB_LOOPBACK_TYPE_SINK:
+ return 0;
+ case GB_LOOPBACK_TYPE_TRANSFER:
+ if (operation->request->payload_size < sizeof(*request)) {
+ dev_err(dev, "transfer request too small (%zu < %zu)\n",
+ operation->request->payload_size,
+ sizeof(*request));
+ return -EINVAL; /* -EMSGSIZE */
+ }
+ request = operation->request->payload;
+ len = le32_to_cpu(request->len);
+ if (len > gb_dev.size_max) {
+ dev_err(dev, "transfer request too large (%zu > %zu)\n",
+ len, gb_dev.size_max);
+ return -EINVAL;
+ }
+
+ if (!gb_operation_response_alloc(operation,
+ len + sizeof(*response), GFP_KERNEL)) {
+ dev_err(dev, "error allocating response\n");
+ return -ENOMEM;
+ }
+ response = operation->response->payload;
+ response->len = cpu_to_le32(len);
+ if (len)
+ memcpy(response->data, request->data, len);
+
+ return 0;
+ default:
+ dev_err(dev, "unsupported request: %u\n", operation->type);
+ return -EINVAL;
+ }
+}
+
+static void gb_loopback_reset_stats(struct gb_loopback *gb)
+{
+ struct gb_loopback_stats reset = {
+ .min = U32_MAX,
+ };
+
+ /* Reset per-connection stats */
+ memcpy(&gb->latency, &reset,
+ sizeof(struct gb_loopback_stats));
+ memcpy(&gb->throughput, &reset,
+ sizeof(struct gb_loopback_stats));
+ memcpy(&gb->requests_per_second, &reset,
+ sizeof(struct gb_loopback_stats));
+ memcpy(&gb->apbridge_unipro_latency, &reset,
+ sizeof(struct gb_loopback_stats));
+ memcpy(&gb->gbphy_firmware_latency, &reset,
+ sizeof(struct gb_loopback_stats));
+
+ /* Should be initialized at least once per transaction set */
+ gb->apbridge_latency_ts = 0;
+ gb->gbphy_latency_ts = 0;
+ memset(&gb->ts, 0, sizeof(struct timeval));
+}
+
+static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
+{
+ if (stats->min > val)
+ stats->min = val;
+ if (stats->max < val)
+ stats->max = val;
+ stats->sum += val;
+ stats->count++;
+}
+
+static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats,
+ u64 val, u32 count)
+{
+ stats->sum += val;
+ stats->count += count;
+
+ do_div(val, count);
+ if (stats->min > val)
+ stats->min = val;
+ if (stats->max < val)
+ stats->max = val;
+}
+
+static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
+{
+ u64 req = gb->requests_completed * USEC_PER_SEC;
+
+ gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
+}
+
+static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
+{
+ u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
+
+ switch (gb->type) {
+ case GB_LOOPBACK_TYPE_PING:
+ break;
+ case GB_LOOPBACK_TYPE_SINK:
+ aggregate_size += sizeof(struct gb_loopback_transfer_request) +
+ gb->size;
+ break;
+ case GB_LOOPBACK_TYPE_TRANSFER:
+ aggregate_size += sizeof(struct gb_loopback_transfer_request) +
+ sizeof(struct gb_loopback_transfer_response) +
+ gb->size * 2;
+ break;
+ default:
+ return;
+ }
+
+ aggregate_size *= gb->requests_completed;
+ aggregate_size *= USEC_PER_SEC;
+ gb_loopback_update_stats_window(&gb->throughput, aggregate_size,
+ latency);
+}
+
+static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb)
+{
+ u32 lat;
+
+ /* Express latency in terms of microseconds */
+ lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
+
+ /* Log latency stastic */
+ gb_loopback_update_stats(&gb->latency, lat);
+
+ /* Raw latency log on a per thread basis */
+ kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
+
+ /* Log the firmware supplied latency values */
+ gb_loopback_update_stats(&gb->apbridge_unipro_latency,
+ gb->apbridge_latency_ts);
+ gb_loopback_update_stats(&gb->gbphy_firmware_latency,
+ gb->gbphy_latency_ts);
+}
+
+static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
+{
+ u64 nlat;
+ u32 lat;
+ struct timeval te;
+
+ if (!error) {
+ gb->requests_completed++;
+ gb_loopback_calculate_latency_stats(gb);
+ }
+
+ do_gettimeofday(&te);
+ nlat = gb_loopback_calc_latency(&gb->ts, &te);
+ if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
+ lat = gb_loopback_nsec_to_usec_latency(nlat);
+
+ gb_loopback_throughput_update(gb, lat);
+ gb_loopback_requests_update(gb, lat);
+
+ if (gb->iteration_count != gb->iteration_max) {
+ gb->ts = te;
+ gb->requests_completed = 0;
+ }
+ }
+}
+
+static void gb_loopback_async_wait_to_send(struct gb_loopback *gb)
+{
+ if (!(gb->async && gb->outstanding_operations_max))
+ return;
+ wait_event_interruptible(gb->wq_completion,
+ (atomic_read(&gb->outstanding_operations) <
+ gb->outstanding_operations_max) ||
+ kthread_should_stop());
+}
+
+static int gb_loopback_fn(void *data)
+{
+ int error = 0;
+ int us_wait = 0;
+ int type;
+ int ret;
+ u32 size;
+
+ struct gb_loopback *gb = data;
+ struct gb_bundle *bundle = gb->connection->bundle;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ while (1) {
+ if (!gb->type) {
+ gb_pm_runtime_put_autosuspend(bundle);
+ wait_event_interruptible(gb->wq, gb->type ||
+ kthread_should_stop());
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+ }
+
+ if (kthread_should_stop())
+ break;
+
+ /* Limit the maximum number of in-flight async operations */
+ gb_loopback_async_wait_to_send(gb);
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&gb->mutex);
+
+ /* Optionally terminate */
+ if (gb->send_count == gb->iteration_max) {
+ if (gb->iteration_count == gb->iteration_max) {
+ gb->type = 0;
+ gb->send_count = 0;
+ sysfs_notify(&gb->dev->kobj, NULL,
+ "iteration_count");
+ }
+ mutex_unlock(&gb->mutex);
+ continue;
+ }
+ size = gb->size;
+ us_wait = gb->us_wait;
+ type = gb->type;
+ if (gb->ts.tv_usec == 0 && gb->ts.tv_sec == 0)
+ do_gettimeofday(&gb->ts);
+ mutex_unlock(&gb->mutex);
+
+ /* Else operations to perform */
+ if (gb->async) {
+ if (type == GB_LOOPBACK_TYPE_PING) {
+ error = gb_loopback_async_ping(gb);
+ } else if (type == GB_LOOPBACK_TYPE_TRANSFER) {
+ error = gb_loopback_async_transfer(gb, size);
+ } else if (type == GB_LOOPBACK_TYPE_SINK) {
+ error = gb_loopback_async_sink(gb, size);
+ }
+
+ if (error)
+ gb->error++;
+ } else {
+ /* We are effectively single threaded here */
+ if (type == GB_LOOPBACK_TYPE_PING)
+ error = gb_loopback_sync_ping(gb);
+ else if (type == GB_LOOPBACK_TYPE_TRANSFER)
+ error = gb_loopback_sync_transfer(gb, size);
+ else if (type == GB_LOOPBACK_TYPE_SINK)
+ error = gb_loopback_sync_sink(gb, size);
+
+ if (error)
+ gb->error++;
+ gb->iteration_count++;
+ gb_loopback_calculate_stats(gb, !!error);
+ }
+ gb->send_count++;
+ if (us_wait)
+ udelay(us_wait);
+ }
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+}
+
+static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
+ struct kfifo *kfifo,
+ struct mutex *mutex)
+{
+ u32 latency;
+ int retval;
+
+ if (kfifo_len(kfifo) == 0) {
+ retval = -EAGAIN;
+ goto done;
+ }
+
+ mutex_lock(mutex);
+ retval = kfifo_out(kfifo, &latency, sizeof(latency));
+ if (retval > 0) {
+ seq_printf(s, "%u", latency);
+ retval = 0;
+ }
+ mutex_unlock(mutex);
+done:
+ return retval;
+}
+
+static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
+{
+ struct gb_loopback *gb = s->private;
+
+ return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
+ &gb->mutex);
+}
+
+static int gb_loopback_latency_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, gb_loopback_dbgfs_latency_show,
+ inode->i_private);
+}
+
+static const struct file_operations gb_loopback_debugfs_latency_ops = {
+ .open = gb_loopback_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha,
+ struct list_head *lhb)
+{
+ struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry);
+ struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry);
+ struct gb_connection *ca = a->connection;
+ struct gb_connection *cb = b->connection;
+
+ if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id)
+ return -1;
+ if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id)
+ return 1;
+ if (ca->bundle->id < cb->bundle->id)
+ return -1;
+ if (cb->bundle->id < ca->bundle->id)
+ return 1;
+ if (ca->intf_cport_id < cb->intf_cport_id)
+ return -1;
+ else if (cb->intf_cport_id < ca->intf_cport_id)
+ return 1;
+
+ return 0;
+}
+
+static void gb_loopback_insert_id(struct gb_loopback *gb)
+{
+ struct gb_loopback *gb_list;
+ u32 new_lbid = 0;
+
+ /* perform an insertion sort */
+ list_add_tail(&gb->entry, &gb_dev.list);
+ list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare);
+ list_for_each_entry(gb_list, &gb_dev.list, entry) {
+ gb_list->lbid = 1 << new_lbid;
+ new_lbid++;
+ }
+}
+
+#define DEBUGFS_NAMELEN 32
+
+static int gb_loopback_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_loopback *gb;
+ struct device *dev;
+ int retval;
+ char name[DEBUGFS_NAMELEN];
+ unsigned long flags;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK)
+ return -ENODEV;
+
+ gb = kzalloc(sizeof(*gb), GFP_KERNEL);
+ if (!gb)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_loopback_request_handler);
+ if (IS_ERR(connection)) {
+ retval = PTR_ERR(connection);
+ goto out_kzalloc;
+ }
+
+ gb->connection = connection;
+ greybus_set_drvdata(bundle, gb);
+
+ init_waitqueue_head(&gb->wq);
+ init_waitqueue_head(&gb->wq_completion);
+ atomic_set(&gb->outstanding_operations, 0);
+ gb_loopback_reset_stats(gb);
+
+ /* Reported values to user-space for min/max timeouts */
+ gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
+ gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
+
+ if (!gb_dev.count) {
+ /* Calculate maximum payload */
+ gb_dev.size_max = gb_operation_get_payload_size_max(connection);
+ if (gb_dev.size_max <=
+ sizeof(struct gb_loopback_transfer_request)) {
+ retval = -EINVAL;
+ goto out_connection_destroy;
+ }
+ gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
+ }
+
+ /* Create per-connection sysfs and debugfs data-points */
+ snprintf(name, sizeof(name), "raw_latency_%s",
+ dev_name(&connection->bundle->dev));
+ gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb,
+ &gb_loopback_debugfs_latency_ops);
+
+ gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL);
+ if (gb->id < 0) {
+ retval = gb->id;
+ goto out_debugfs_remove;
+ }
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto out_ida_remove;
+
+ dev = device_create_with_groups(&loopback_class,
+ &connection->bundle->dev,
+ MKDEV(0, 0), gb, loopback_groups,
+ "gb_loopback%d", gb->id);
+ if (IS_ERR(dev)) {
+ retval = PTR_ERR(dev);
+ goto out_connection_disable;
+ }
+ gb->dev = dev;
+
+ /* Allocate kfifo */
+ if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
+ GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_conn;
+ }
+ if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
+ GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_kfifo0;
+ }
+
+ /* Fork worker thread */
+ mutex_init(&gb->mutex);
+ gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
+ if (IS_ERR(gb->task)) {
+ retval = PTR_ERR(gb->task);
+ goto out_kfifo1;
+ }
+
+ spin_lock_irqsave(&gb_dev.lock, flags);
+ gb_loopback_insert_id(gb);
+ gb_dev.count++;
+ spin_unlock_irqrestore(&gb_dev.lock, flags);
+
+ gb_connection_latency_tag_enable(connection);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+out_kfifo1:
+ kfifo_free(&gb->kfifo_ts);
+out_kfifo0:
+ kfifo_free(&gb->kfifo_lat);
+out_conn:
+ device_unregister(dev);
+out_connection_disable:
+ gb_connection_disable(connection);
+out_ida_remove:
+ ida_simple_remove(&loopback_ida, gb->id);
+out_debugfs_remove:
+ debugfs_remove(gb->file);
+out_connection_destroy:
+ gb_connection_destroy(connection);
+out_kzalloc:
+ kfree(gb);
+
+ return retval;
+}
+
+static void gb_loopback_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_loopback *gb = greybus_get_drvdata(bundle);
+ unsigned long flags;
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ gb_pm_runtime_get_noresume(bundle);
+
+ gb_connection_disable(gb->connection);
+
+ if (!IS_ERR_OR_NULL(gb->task))
+ kthread_stop(gb->task);
+
+ kfifo_free(&gb->kfifo_lat);
+ kfifo_free(&gb->kfifo_ts);
+ gb_connection_latency_tag_disable(gb->connection);
+ debugfs_remove(gb->file);
+
+ /*
+ * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
+ * is disabled at the beginning and so we can't have any more
+ * incoming/outgoing requests.
+ */
+ gb_loopback_async_wait_all(gb);
+
+ spin_lock_irqsave(&gb_dev.lock, flags);
+ gb_dev.count--;
+ list_del(&gb->entry);
+ spin_unlock_irqrestore(&gb_dev.lock, flags);
+
+ device_unregister(gb->dev);
+ ida_simple_remove(&loopback_ida, gb->id);
+
+ gb_connection_destroy(gb->connection);
+ kfree(gb);
+}
+
+static const struct greybus_bundle_id gb_loopback_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table);
+
+static struct greybus_driver gb_loopback_driver = {
+ .name = "loopback",
+ .probe = gb_loopback_probe,
+ .disconnect = gb_loopback_disconnect,
+ .id_table = gb_loopback_id_table,
+};
+
+static int loopback_init(void)
+{
+ int retval;
+
+ INIT_LIST_HEAD(&gb_dev.list);
+ INIT_LIST_HEAD(&gb_dev.list_op_async);
+ spin_lock_init(&gb_dev.lock);
+ gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
+
+ retval = class_register(&loopback_class);
+ if (retval)
+ goto err;
+
+ retval = greybus_register(&gb_loopback_driver);
+ if (retval)
+ goto err_unregister;
+
+ return 0;
+
+err_unregister:
+ class_unregister(&loopback_class);
+err:
+ debugfs_remove_recursive(gb_dev.root);
+ return retval;
+}
+module_init(loopback_init);
+
+static void __exit loopback_exit(void)
+{
+ debugfs_remove_recursive(gb_dev.root);
+ greybus_deregister(&gb_loopback_driver);
+ class_unregister(&loopback_class);
+ ida_destroy(&loopback_ida);
+}
+module_exit(loopback_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/manifest.c b/drivers/staging/greybus/manifest.c
new file mode 100644
index 000000000000..7b903770a684
--- /dev/null
+++ b/drivers/staging/greybus/manifest.c
@@ -0,0 +1,535 @@
+/*
+ * Greybus manifest parsing
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+
+static const char *get_descriptor_type_string(u8 type)
+{
+ switch (type) {
+ case GREYBUS_TYPE_INVALID:
+ return "invalid";
+ case GREYBUS_TYPE_STRING:
+ return "string";
+ case GREYBUS_TYPE_INTERFACE:
+ return "interface";
+ case GREYBUS_TYPE_CPORT:
+ return "cport";
+ case GREYBUS_TYPE_BUNDLE:
+ return "bundle";
+ default:
+ WARN_ON(1);
+ return "unknown";
+ }
+}
+
+/*
+ * We scan the manifest once to identify where all the descriptors
+ * are. The result is a list of these manifest_desc structures. We
+ * then pick through them for what we're looking for (starting with
+ * the interface descriptor). As each is processed we remove it from
+ * the list. When we're done the list should (probably) be empty.
+ */
+struct manifest_desc {
+ struct list_head links;
+
+ size_t size;
+ void *data;
+ enum greybus_descriptor_type type;
+};
+
+static void release_manifest_descriptor(struct manifest_desc *descriptor)
+{
+ list_del(&descriptor->links);
+ kfree(descriptor);
+}
+
+static void release_manifest_descriptors(struct gb_interface *intf)
+{
+ struct manifest_desc *descriptor;
+ struct manifest_desc *next;
+
+ list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
+ release_manifest_descriptor(descriptor);
+}
+
+static void release_cport_descriptors(struct list_head *head, u8 bundle_id)
+{
+ struct manifest_desc *desc, *tmp;
+ struct greybus_descriptor_cport *desc_cport;
+
+ list_for_each_entry_safe(desc, tmp, head, links) {
+ desc_cport = desc->data;
+
+ if (desc->type != GREYBUS_TYPE_CPORT)
+ continue;
+
+ if (desc_cport->bundle == bundle_id)
+ release_manifest_descriptor(desc);
+ }
+}
+
+static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf)
+{
+ struct manifest_desc *descriptor;
+ struct manifest_desc *next;
+
+ list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
+ if (descriptor->type == GREYBUS_TYPE_BUNDLE)
+ return descriptor;
+
+ return NULL;
+}
+
+/*
+ * Validate the given descriptor. Its reported size must fit within
+ * the number of bytes remaining, and it must have a recognized
+ * type. Check that the reported size is at least as big as what
+ * we expect to see. (It could be bigger, perhaps for a new version
+ * of the format.)
+ *
+ * Returns the (non-zero) number of bytes consumed by the descriptor,
+ * or a negative errno.
+ */
+static int identify_descriptor(struct gb_interface *intf,
+ struct greybus_descriptor *desc, size_t size)
+{
+ struct greybus_descriptor_header *desc_header = &desc->header;
+ struct manifest_desc *descriptor;
+ size_t desc_size;
+ size_t expected_size;
+
+ if (size < sizeof(*desc_header)) {
+ dev_err(&intf->dev, "manifest too small (%zu < %zu)\n",
+ size, sizeof(*desc_header));
+ return -EINVAL; /* Must at least have header */
+ }
+
+ desc_size = le16_to_cpu(desc_header->size);
+ if (desc_size > size) {
+ dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n",
+ desc_size, size);
+ return -EINVAL;
+ }
+
+ /* Descriptor needs to at least have a header */
+ expected_size = sizeof(*desc_header);
+
+ switch (desc_header->type) {
+ case GREYBUS_TYPE_STRING:
+ expected_size += sizeof(struct greybus_descriptor_string);
+ expected_size += desc->string.length;
+
+ /* String descriptors are padded to 4 byte boundaries */
+ expected_size = ALIGN(expected_size, 4);
+ break;
+ case GREYBUS_TYPE_INTERFACE:
+ expected_size += sizeof(struct greybus_descriptor_interface);
+ break;
+ case GREYBUS_TYPE_BUNDLE:
+ expected_size += sizeof(struct greybus_descriptor_bundle);
+ break;
+ case GREYBUS_TYPE_CPORT:
+ expected_size += sizeof(struct greybus_descriptor_cport);
+ break;
+ case GREYBUS_TYPE_INVALID:
+ default:
+ dev_err(&intf->dev, "invalid descriptor type (%u)\n",
+ desc_header->type);
+ return -EINVAL;
+ }
+
+ if (desc_size < expected_size) {
+ dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n",
+ get_descriptor_type_string(desc_header->type),
+ desc_size, expected_size);
+ return -EINVAL;
+ }
+
+ /* Descriptor bigger than what we expect */
+ if (desc_size > expected_size) {
+ dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n",
+ get_descriptor_type_string(desc_header->type),
+ expected_size, desc_size);
+ }
+
+ descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
+ if (!descriptor)
+ return -ENOMEM;
+
+ descriptor->size = desc_size;
+ descriptor->data = (char *)desc + sizeof(*desc_header);
+ descriptor->type = desc_header->type;
+ list_add_tail(&descriptor->links, &intf->manifest_descs);
+
+ /* desc_size is positive and is known to fit in a signed int */
+
+ return desc_size;
+}
+
+/*
+ * Find the string descriptor having the given id, validate it, and
+ * allocate a duplicate copy of it. The duplicate has an extra byte
+ * which guarantees the returned string is NUL-terminated.
+ *
+ * String index 0 is valid (it represents "no string"), and for
+ * that a null pointer is returned.
+ *
+ * Otherwise returns a pointer to a newly-allocated copy of the
+ * descriptor string, or an error-coded pointer on failure.
+ */
+static char *gb_string_get(struct gb_interface *intf, u8 string_id)
+{
+ struct greybus_descriptor_string *desc_string;
+ struct manifest_desc *descriptor;
+ bool found = false;
+ char *string;
+
+ /* A zero string id means no string (but no error) */
+ if (!string_id)
+ return NULL;
+
+ list_for_each_entry(descriptor, &intf->manifest_descs, links) {
+ if (descriptor->type != GREYBUS_TYPE_STRING)
+ continue;
+
+ desc_string = descriptor->data;
+ if (desc_string->id == string_id) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return ERR_PTR(-ENOENT);
+
+ /* Allocate an extra byte so we can guarantee it's NUL-terminated */
+ string = kmemdup(&desc_string->string, desc_string->length + 1,
+ GFP_KERNEL);
+ if (!string)
+ return ERR_PTR(-ENOMEM);
+ string[desc_string->length] = '\0';
+
+ /* Ok we've used this string, so we're done with it */
+ release_manifest_descriptor(descriptor);
+
+ return string;
+}
+
+/*
+ * Find cport descriptors in the manifest associated with the given
+ * bundle, and set up data structures for the functions that use
+ * them. Returns the number of cports set up for the bundle, or 0
+ * if there is an error.
+ */
+static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
+{
+ struct gb_interface *intf = bundle->intf;
+ struct greybus_descriptor_cport *desc_cport;
+ struct manifest_desc *desc, *next, *tmp;
+ LIST_HEAD(list);
+ u8 bundle_id = bundle->id;
+ u16 cport_id;
+ u32 count = 0;
+ int i;
+
+ /* Set up all cport descriptors associated with this bundle */
+ list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) {
+ if (desc->type != GREYBUS_TYPE_CPORT)
+ continue;
+
+ desc_cport = desc->data;
+ if (desc_cport->bundle != bundle_id)
+ continue;
+
+ cport_id = le16_to_cpu(desc_cport->id);
+ if (cport_id > CPORT_ID_MAX)
+ goto exit;
+
+ /* Nothing else should have its cport_id as control cport id */
+ if (cport_id == GB_CONTROL_CPORT_ID) {
+ dev_err(&bundle->dev, "invalid cport id found (%02u)\n",
+ cport_id);
+ goto exit;
+ }
+
+ /*
+ * Found one, move it to our temporary list after checking for
+ * duplicates.
+ */
+ list_for_each_entry(tmp, &list, links) {
+ desc_cport = tmp->data;
+ if (cport_id == le16_to_cpu(desc_cport->id)) {
+ dev_err(&bundle->dev,
+ "duplicate CPort %u found\n",
+ cport_id);
+ goto exit;
+ }
+ }
+ list_move_tail(&desc->links, &list);
+ count++;
+ }
+
+ if (!count)
+ return 0;
+
+ bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc),
+ GFP_KERNEL);
+ if (!bundle->cport_desc)
+ goto exit;
+
+ bundle->num_cports = count;
+
+ i = 0;
+ list_for_each_entry_safe(desc, next, &list, links) {
+ desc_cport = desc->data;
+ memcpy(&bundle->cport_desc[i++], desc_cport,
+ sizeof(*desc_cport));
+
+ /* Release the cport descriptor */
+ release_manifest_descriptor(desc);
+ }
+
+ return count;
+exit:
+ release_cport_descriptors(&list, bundle_id);
+ /*
+ * Free all cports for this bundle to avoid 'excess descriptors'
+ * warnings.
+ */
+ release_cport_descriptors(&intf->manifest_descs, bundle_id);
+
+ return 0; /* Error; count should also be 0 */
+}
+
+/*
+ * Find bundle descriptors in the manifest and set up their data
+ * structures. Returns the number of bundles set up for the
+ * given interface.
+ */
+static u32 gb_manifest_parse_bundles(struct gb_interface *intf)
+{
+ struct manifest_desc *desc;
+ struct gb_bundle *bundle;
+ struct gb_bundle *bundle_next;
+ u32 count = 0;
+ u8 bundle_id;
+ u8 class;
+
+ while ((desc = get_next_bundle_desc(intf))) {
+ struct greybus_descriptor_bundle *desc_bundle;
+
+ /* Found one. Set up its bundle structure*/
+ desc_bundle = desc->data;
+ bundle_id = desc_bundle->id;
+ class = desc_bundle->class;
+
+ /* Done with this bundle descriptor */
+ release_manifest_descriptor(desc);
+
+ /* Ignore any legacy control bundles */
+ if (bundle_id == GB_CONTROL_BUNDLE_ID) {
+ dev_dbg(&intf->dev, "%s - ignoring control bundle\n",
+ __func__);
+ release_cport_descriptors(&intf->manifest_descs,
+ bundle_id);
+ continue;
+ }
+
+ /* Nothing else should have its class set to control class */
+ if (class == GREYBUS_CLASS_CONTROL) {
+ dev_err(&intf->dev,
+ "bundle %u cannot use control class\n",
+ bundle_id);
+ goto cleanup;
+ }
+
+ bundle = gb_bundle_create(intf, bundle_id, class);
+ if (!bundle)
+ goto cleanup;
+
+ /*
+ * Now go set up this bundle's functions and cports.
+ *
+ * A 'bundle' represents a device in greybus. It may require
+ * multiple cports for its functioning. If we fail to setup any
+ * cport of a bundle, we better reject the complete bundle as
+ * the device may not be able to function properly then.
+ *
+ * But, failing to setup a cport of bundle X doesn't mean that
+ * the device corresponding to bundle Y will not work properly.
+ * Bundles should be treated as separate independent devices.
+ *
+ * While parsing manifest for an interface, treat bundles as
+ * separate entities and don't reject entire interface and its
+ * bundles on failing to initialize a cport. But make sure the
+ * bundle which needs the cport, gets destroyed properly.
+ */
+ if (!gb_manifest_parse_cports(bundle)) {
+ gb_bundle_destroy(bundle);
+ continue;
+ }
+
+ count++;
+ }
+
+ return count;
+cleanup:
+ /* An error occurred; undo any changes we've made */
+ list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) {
+ gb_bundle_destroy(bundle);
+ count--;
+ }
+ return 0; /* Error; count should also be 0 */
+}
+
+static bool gb_manifest_parse_interface(struct gb_interface *intf,
+ struct manifest_desc *interface_desc)
+{
+ struct greybus_descriptor_interface *desc_intf = interface_desc->data;
+ struct gb_control *control = intf->control;
+ char *str;
+
+ /* Handle the strings first--they can fail */
+ str = gb_string_get(intf, desc_intf->vendor_stringid);
+ if (IS_ERR(str))
+ return false;
+ control->vendor_string = str;
+
+ str = gb_string_get(intf, desc_intf->product_stringid);
+ if (IS_ERR(str))
+ goto out_free_vendor_string;
+ control->product_string = str;
+
+ /* Assign feature flags communicated via manifest */
+ intf->features = desc_intf->features;
+
+ /* Release the interface descriptor, now that we're done with it */
+ release_manifest_descriptor(interface_desc);
+
+ /* An interface must have at least one bundle descriptor */
+ if (!gb_manifest_parse_bundles(intf)) {
+ dev_err(&intf->dev, "manifest bundle descriptors not valid\n");
+ goto out_err;
+ }
+
+ return true;
+out_err:
+ kfree(control->product_string);
+ control->product_string = NULL;
+out_free_vendor_string:
+ kfree(control->vendor_string);
+ control->vendor_string = NULL;
+
+ return false;
+}
+
+/*
+ * Parse a buffer containing an interface manifest.
+ *
+ * If we find anything wrong with the content/format of the buffer
+ * we reject it.
+ *
+ * The first requirement is that the manifest's version is
+ * one we can parse.
+ *
+ * We make an initial pass through the buffer and identify all of
+ * the descriptors it contains, keeping track for each its type
+ * and the location size of its data in the buffer.
+ *
+ * Next we scan the descriptors, looking for an interface descriptor;
+ * there must be exactly one of those. When found, we record the
+ * information it contains, and then remove that descriptor (and any
+ * string descriptors it refers to) from further consideration.
+ *
+ * After that we look for the interface's bundles--there must be at
+ * least one of those.
+ *
+ * Returns true if parsing was successful, false otherwise.
+ */
+bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
+{
+ struct greybus_manifest *manifest;
+ struct greybus_manifest_header *header;
+ struct greybus_descriptor *desc;
+ struct manifest_desc *descriptor;
+ struct manifest_desc *interface_desc = NULL;
+ u16 manifest_size;
+ u32 found = 0;
+ bool result;
+
+ /* Manifest descriptor list should be empty here */
+ if (WARN_ON(!list_empty(&intf->manifest_descs)))
+ return false;
+
+ /* we have to have at _least_ the manifest header */
+ if (size < sizeof(*header)) {
+ dev_err(&intf->dev, "short manifest (%zu < %zu)\n",
+ size, sizeof(*header));
+ return false;
+ }
+
+ /* Make sure the size is right */
+ manifest = data;
+ header = &manifest->header;
+ manifest_size = le16_to_cpu(header->size);
+ if (manifest_size != size) {
+ dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n",
+ size, manifest_size);
+ return false;
+ }
+
+ /* Validate major/minor number */
+ if (header->version_major > GREYBUS_VERSION_MAJOR) {
+ dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n",
+ header->version_major, header->version_minor,
+ GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
+ return false;
+ }
+
+ /* OK, find all the descriptors */
+ desc = manifest->descriptors;
+ size -= sizeof(*header);
+ while (size) {
+ int desc_size;
+
+ desc_size = identify_descriptor(intf, desc, size);
+ if (desc_size < 0) {
+ result = false;
+ goto out;
+ }
+ desc = (struct greybus_descriptor *)((char *)desc + desc_size);
+ size -= desc_size;
+ }
+
+ /* There must be a single interface descriptor */
+ list_for_each_entry(descriptor, &intf->manifest_descs, links) {
+ if (descriptor->type == GREYBUS_TYPE_INTERFACE)
+ if (!found++)
+ interface_desc = descriptor;
+ }
+ if (found != 1) {
+ dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n",
+ found);
+ result = false;
+ goto out;
+ }
+
+ /* Parse the manifest, starting with the interface descriptor */
+ result = gb_manifest_parse_interface(intf, interface_desc);
+
+ /*
+ * We really should have no remaining descriptors, but we
+ * don't know what newer format manifests might leave.
+ */
+ if (result && !list_empty(&intf->manifest_descs))
+ dev_info(&intf->dev, "excess descriptors in interface manifest\n");
+out:
+ release_manifest_descriptors(intf);
+
+ return result;
+}
diff --git a/drivers/staging/greybus/manifest.h b/drivers/staging/greybus/manifest.h
new file mode 100644
index 000000000000..d96428407cd7
--- /dev/null
+++ b/drivers/staging/greybus/manifest.h
@@ -0,0 +1,16 @@
+/*
+ * Greybus manifest parsing
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __MANIFEST_H
+#define __MANIFEST_H
+
+struct gb_interface;
+bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size);
+
+#endif /* __MANIFEST_H */
diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c
new file mode 100644
index 000000000000..69f67ddbd4a3
--- /dev/null
+++ b/drivers/staging/greybus/module.c
@@ -0,0 +1,238 @@
+/*
+ * Greybus Module code
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+
+static ssize_t eject_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct gb_module *module = to_gb_module(dev);
+ struct gb_interface *intf;
+ size_t i;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ return len;
+
+ for (i = 0; i < module->num_interfaces; ++i) {
+ intf = module->interfaces[i];
+
+ mutex_lock(&intf->mutex);
+ /* Set flag to prevent concurrent activation. */
+ intf->ejected = true;
+ gb_interface_disable(intf);
+ gb_interface_deactivate(intf);
+ mutex_unlock(&intf->mutex);
+ }
+
+ /* Tell the SVC to eject the primary interface. */
+ ret = gb_svc_intf_eject(module->hd->svc, module->module_id);
+ if (ret)
+ return ret;
+
+ return len;
+}
+static DEVICE_ATTR_WO(eject);
+
+static ssize_t module_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_module *module = to_gb_module(dev);
+
+ return sprintf(buf, "%u\n", module->module_id);
+}
+static DEVICE_ATTR_RO(module_id);
+
+static ssize_t num_interfaces_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_module *module = to_gb_module(dev);
+
+ return sprintf(buf, "%zu\n", module->num_interfaces);
+}
+static DEVICE_ATTR_RO(num_interfaces);
+
+static struct attribute *module_attrs[] = {
+ &dev_attr_eject.attr,
+ &dev_attr_module_id.attr,
+ &dev_attr_num_interfaces.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(module);
+
+static void gb_module_release(struct device *dev)
+{
+ struct gb_module *module = to_gb_module(dev);
+
+ trace_gb_module_release(module);
+
+ kfree(module);
+}
+
+struct device_type greybus_module_type = {
+ .name = "greybus_module",
+ .release = gb_module_release,
+};
+
+struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
+ size_t num_interfaces)
+{
+ struct gb_interface *intf;
+ struct gb_module *module;
+ int i;
+
+ module = kzalloc(sizeof(*module) + num_interfaces * sizeof(intf),
+ GFP_KERNEL);
+ if (!module)
+ return NULL;
+
+ module->hd = hd;
+ module->module_id = module_id;
+ module->num_interfaces = num_interfaces;
+
+ module->dev.parent = &hd->dev;
+ module->dev.bus = &greybus_bus_type;
+ module->dev.type = &greybus_module_type;
+ module->dev.groups = module_groups;
+ module->dev.dma_mask = hd->dev.dma_mask;
+ device_initialize(&module->dev);
+ dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id);
+
+ trace_gb_module_create(module);
+
+ for (i = 0; i < num_interfaces; ++i) {
+ intf = gb_interface_create(module, module_id + i);
+ if (!intf) {
+ dev_err(&module->dev, "failed to create interface %u\n",
+ module_id + i);
+ goto err_put_interfaces;
+ }
+ module->interfaces[i] = intf;
+ }
+
+ return module;
+
+err_put_interfaces:
+ for (--i; i > 0; --i)
+ gb_interface_put(module->interfaces[i]);
+
+ put_device(&module->dev);
+
+ return NULL;
+}
+
+/*
+ * Register and enable an interface after first attempting to activate it.
+ */
+static void gb_module_register_interface(struct gb_interface *intf)
+{
+ struct gb_module *module = intf->module;
+ u8 intf_id = intf->interface_id;
+ int ret;
+
+ mutex_lock(&intf->mutex);
+
+ ret = gb_interface_activate(intf);
+ if (ret) {
+ if (intf->type != GB_INTERFACE_TYPE_DUMMY) {
+ dev_err(&module->dev,
+ "failed to activate interface %u: %d\n",
+ intf_id, ret);
+ }
+
+ gb_interface_add(intf);
+ goto err_unlock;
+ }
+
+ ret = gb_interface_add(intf);
+ if (ret)
+ goto err_interface_deactivate;
+
+ ret = gb_interface_enable(intf);
+ if (ret) {
+ dev_err(&module->dev, "failed to enable interface %u: %d\n",
+ intf_id, ret);
+ goto err_interface_deactivate;
+ }
+
+ mutex_unlock(&intf->mutex);
+
+ return;
+
+err_interface_deactivate:
+ gb_interface_deactivate(intf);
+err_unlock:
+ mutex_unlock(&intf->mutex);
+}
+
+static void gb_module_deregister_interface(struct gb_interface *intf)
+{
+ /* Mark as disconnected to prevent I/O during disable. */
+ if (intf->module->disconnected)
+ intf->disconnected = true;
+
+ mutex_lock(&intf->mutex);
+ intf->removed = true;
+ gb_interface_disable(intf);
+ gb_interface_deactivate(intf);
+ mutex_unlock(&intf->mutex);
+
+ gb_interface_del(intf);
+}
+
+/* Register a module and its interfaces. */
+int gb_module_add(struct gb_module *module)
+{
+ size_t i;
+ int ret;
+
+ ret = device_add(&module->dev);
+ if (ret) {
+ dev_err(&module->dev, "failed to register module: %d\n", ret);
+ return ret;
+ }
+
+ trace_gb_module_add(module);
+
+ for (i = 0; i < module->num_interfaces; ++i)
+ gb_module_register_interface(module->interfaces[i]);
+
+ return 0;
+}
+
+/* Deregister a module and its interfaces. */
+void gb_module_del(struct gb_module *module)
+{
+ size_t i;
+
+ for (i = 0; i < module->num_interfaces; ++i)
+ gb_module_deregister_interface(module->interfaces[i]);
+
+ trace_gb_module_del(module);
+
+ device_del(&module->dev);
+}
+
+void gb_module_put(struct gb_module *module)
+{
+ size_t i;
+
+ for (i = 0; i < module->num_interfaces; ++i)
+ gb_interface_put(module->interfaces[i]);
+
+ put_device(&module->dev);
+}
diff --git a/drivers/staging/greybus/module.h b/drivers/staging/greybus/module.h
new file mode 100644
index 000000000000..88a97ce04243
--- /dev/null
+++ b/drivers/staging/greybus/module.h
@@ -0,0 +1,34 @@
+/*
+ * Greybus Module code
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __MODULE_H
+#define __MODULE_H
+
+struct gb_module {
+ struct device dev;
+ struct gb_host_device *hd;
+
+ struct list_head hd_node;
+
+ u8 module_id;
+ size_t num_interfaces;
+
+ bool disconnected;
+
+ struct gb_interface *interfaces[0];
+};
+#define to_gb_module(d) container_of(d, struct gb_module, dev)
+
+struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
+ size_t num_interfaces);
+int gb_module_add(struct gb_module *module);
+void gb_module_del(struct gb_module *module);
+void gb_module_put(struct gb_module *module);
+
+#endif /* __MODULE_H */
diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c
new file mode 100644
index 000000000000..0123109a1070
--- /dev/null
+++ b/drivers/staging/greybus/operation.c
@@ -0,0 +1,1239 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "greybus.h"
+#include "greybus_trace.h"
+
+static struct kmem_cache *gb_operation_cache;
+static struct kmem_cache *gb_message_cache;
+
+/* Workqueue to handle Greybus operation completions. */
+static struct workqueue_struct *gb_operation_completion_wq;
+
+/* Wait queue for synchronous cancellations. */
+static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
+
+/*
+ * Protects updates to operation->errno.
+ */
+static DEFINE_SPINLOCK(gb_operations_lock);
+
+static int gb_operation_response_send(struct gb_operation *operation,
+ int errno);
+
+/*
+ * Increment operation active count and add to connection list unless the
+ * connection is going away.
+ *
+ * Caller holds operation reference.
+ */
+static int gb_operation_get_active(struct gb_operation *operation)
+{
+ struct gb_connection *connection = operation->connection;
+ unsigned long flags;
+
+ spin_lock_irqsave(&connection->lock, flags);
+ switch (connection->state) {
+ case GB_CONNECTION_STATE_ENABLED:
+ break;
+ case GB_CONNECTION_STATE_ENABLED_TX:
+ if (gb_operation_is_incoming(operation))
+ goto err_unlock;
+ break;
+ case GB_CONNECTION_STATE_DISCONNECTING:
+ if (!gb_operation_is_core(operation))
+ goto err_unlock;
+ break;
+ default:
+ goto err_unlock;
+ }
+
+ if (operation->active++ == 0)
+ list_add_tail(&operation->links, &connection->operations);
+
+ trace_gb_operation_get_active(operation);
+
+ spin_unlock_irqrestore(&connection->lock, flags);
+
+ return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&connection->lock, flags);
+
+ return -ENOTCONN;
+}
+
+/* Caller holds operation reference. */
+static void gb_operation_put_active(struct gb_operation *operation)
+{
+ struct gb_connection *connection = operation->connection;
+ unsigned long flags;
+
+ spin_lock_irqsave(&connection->lock, flags);
+
+ trace_gb_operation_put_active(operation);
+
+ if (--operation->active == 0) {
+ list_del(&operation->links);
+ if (atomic_read(&operation->waiters))
+ wake_up(&gb_operation_cancellation_queue);
+ }
+ spin_unlock_irqrestore(&connection->lock, flags);
+}
+
+static bool gb_operation_is_active(struct gb_operation *operation)
+{
+ struct gb_connection *connection = operation->connection;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&connection->lock, flags);
+ ret = operation->active;
+ spin_unlock_irqrestore(&connection->lock, flags);
+
+ return ret;
+}
+
+/*
+ * Set an operation's result.
+ *
+ * Initially an outgoing operation's errno value is -EBADR.
+ * If no error occurs before sending the request message the only
+ * valid value operation->errno can be set to is -EINPROGRESS,
+ * indicating the request has been (or rather is about to be) sent.
+ * At that point nobody should be looking at the result until the
+ * response arrives.
+ *
+ * The first time the result gets set after the request has been
+ * sent, that result "sticks." That is, if two concurrent threads
+ * race to set the result, the first one wins. The return value
+ * tells the caller whether its result was recorded; if not the
+ * caller has nothing more to do.
+ *
+ * The result value -EILSEQ is reserved to signal an implementation
+ * error; if it's ever observed, the code performing the request has
+ * done something fundamentally wrong. It is an error to try to set
+ * the result to -EBADR, and attempts to do so result in a warning,
+ * and -EILSEQ is used instead. Similarly, the only valid result
+ * value to set for an operation in initial state is -EINPROGRESS.
+ * Attempts to do otherwise will also record a (successful) -EILSEQ
+ * operation result.
+ */
+static bool gb_operation_result_set(struct gb_operation *operation, int result)
+{
+ unsigned long flags;
+ int prev;
+
+ if (result == -EINPROGRESS) {
+ /*
+ * -EINPROGRESS is used to indicate the request is
+ * in flight. It should be the first result value
+ * set after the initial -EBADR. Issue a warning
+ * and record an implementation error if it's
+ * set at any other time.
+ */
+ spin_lock_irqsave(&gb_operations_lock, flags);
+ prev = operation->errno;
+ if (prev == -EBADR)
+ operation->errno = result;
+ else
+ operation->errno = -EILSEQ;
+ spin_unlock_irqrestore(&gb_operations_lock, flags);
+ WARN_ON(prev != -EBADR);
+
+ return true;
+ }
+
+ /*
+ * The first result value set after a request has been sent
+ * will be the final result of the operation. Subsequent
+ * attempts to set the result are ignored.
+ *
+ * Note that -EBADR is a reserved "initial state" result
+ * value. Attempts to set this value result in a warning,
+ * and the result code is set to -EILSEQ instead.
+ */
+ if (WARN_ON(result == -EBADR))
+ result = -EILSEQ; /* Nobody should be setting -EBADR */
+
+ spin_lock_irqsave(&gb_operations_lock, flags);
+ prev = operation->errno;
+ if (prev == -EINPROGRESS)
+ operation->errno = result; /* First and final result */
+ spin_unlock_irqrestore(&gb_operations_lock, flags);
+
+ return prev == -EINPROGRESS;
+}
+
+int gb_operation_result(struct gb_operation *operation)
+{
+ int result = operation->errno;
+
+ WARN_ON(result == -EBADR);
+ WARN_ON(result == -EINPROGRESS);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(gb_operation_result);
+
+/*
+ * Looks up an outgoing operation on a connection and returns a refcounted
+ * pointer if found, or NULL otherwise.
+ */
+static struct gb_operation *
+gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
+{
+ struct gb_operation *operation;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&connection->lock, flags);
+ list_for_each_entry(operation, &connection->operations, links)
+ if (operation->id == operation_id &&
+ !gb_operation_is_incoming(operation)) {
+ gb_operation_get(operation);
+ found = true;
+ break;
+ }
+ spin_unlock_irqrestore(&connection->lock, flags);
+
+ return found ? operation : NULL;
+}
+
+static int gb_message_send(struct gb_message *message, gfp_t gfp)
+{
+ struct gb_connection *connection = message->operation->connection;
+
+ trace_gb_message_send(message);
+ return connection->hd->driver->message_send(connection->hd,
+ connection->hd_cport_id,
+ message,
+ gfp);
+}
+
+/*
+ * Cancel a message we have passed to the host device layer to be sent.
+ */
+static void gb_message_cancel(struct gb_message *message)
+{
+ struct gb_host_device *hd = message->operation->connection->hd;
+
+ hd->driver->message_cancel(message);
+}
+
+static void gb_operation_request_handle(struct gb_operation *operation)
+{
+ struct gb_connection *connection = operation->connection;
+ int status;
+ int ret;
+
+ if (connection->handler) {
+ status = connection->handler(operation);
+ } else {
+ dev_err(&connection->hd->dev,
+ "%s: unexpected incoming request of type 0x%02x\n",
+ connection->name, operation->type);
+
+ status = -EPROTONOSUPPORT;
+ }
+
+ ret = gb_operation_response_send(operation, status);
+ if (ret) {
+ dev_err(&connection->hd->dev,
+ "%s: failed to send response %d for type 0x%02x: %d\n",
+ connection->name, status, operation->type, ret);
+ return;
+ }
+}
+
+/*
+ * Process operation work.
+ *
+ * For incoming requests, call the protocol request handler. The operation
+ * result should be -EINPROGRESS at this point.
+ *
+ * For outgoing requests, the operation result value should have
+ * been set before queueing this. The operation callback function
+ * allows the original requester to know the request has completed
+ * and its result is available.
+ */
+static void gb_operation_work(struct work_struct *work)
+{
+ struct gb_operation *operation;
+
+ operation = container_of(work, struct gb_operation, work);
+
+ if (gb_operation_is_incoming(operation))
+ gb_operation_request_handle(operation);
+ else
+ operation->callback(operation);
+
+ gb_operation_put_active(operation);
+ gb_operation_put(operation);
+}
+
+static void gb_operation_message_init(struct gb_host_device *hd,
+ struct gb_message *message, u16 operation_id,
+ size_t payload_size, u8 type)
+{
+ struct gb_operation_msg_hdr *header;
+
+ header = message->buffer;
+
+ message->header = header;
+ message->payload = payload_size ? header + 1 : NULL;
+ message->payload_size = payload_size;
+
+ /*
+ * The type supplied for incoming message buffers will be
+ * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
+ * arriving data so there's no need to initialize the message header.
+ */
+ if (type != GB_REQUEST_TYPE_INVALID) {
+ u16 message_size = (u16)(sizeof(*header) + payload_size);
+
+ /*
+ * For a request, the operation id gets filled in
+ * when the message is sent. For a response, it
+ * will be copied from the request by the caller.
+ *
+ * The result field in a request message must be
+ * zero. It will be set just prior to sending for
+ * a response.
+ */
+ header->size = cpu_to_le16(message_size);
+ header->operation_id = 0;
+ header->type = type;
+ header->result = 0;
+ }
+}
+
+/*
+ * Allocate a message to be used for an operation request or response.
+ * Both types of message contain a common header. The request message
+ * for an outgoing operation is outbound, as is the response message
+ * for an incoming operation. The message header for an outbound
+ * message is partially initialized here.
+ *
+ * The headers for inbound messages don't need to be initialized;
+ * they'll be filled in by arriving data.
+ *
+ * Our message buffers have the following layout:
+ * message header \_ these combined are
+ * message payload / the message size
+ */
+static struct gb_message *
+gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
+ size_t payload_size, gfp_t gfp_flags)
+{
+ struct gb_message *message;
+ struct gb_operation_msg_hdr *header;
+ size_t message_size = payload_size + sizeof(*header);
+
+ if (message_size > hd->buffer_size_max) {
+ dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
+ message_size, hd->buffer_size_max);
+ return NULL;
+ }
+
+ /* Allocate the message structure and buffer. */
+ message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
+ if (!message)
+ return NULL;
+
+ message->buffer = kzalloc(message_size, gfp_flags);
+ if (!message->buffer)
+ goto err_free_message;
+
+ /* Initialize the message. Operation id is filled in later. */
+ gb_operation_message_init(hd, message, 0, payload_size, type);
+
+ return message;
+
+err_free_message:
+ kmem_cache_free(gb_message_cache, message);
+
+ return NULL;
+}
+
+static void gb_operation_message_free(struct gb_message *message)
+{
+ kfree(message->buffer);
+ kmem_cache_free(gb_message_cache, message);
+}
+
+/*
+ * Map an enum gb_operation_status value (which is represented in a
+ * message as a single byte) to an appropriate Linux negative errno.
+ */
+static int gb_operation_status_map(u8 status)
+{
+ switch (status) {
+ case GB_OP_SUCCESS:
+ return 0;
+ case GB_OP_INTERRUPTED:
+ return -EINTR;
+ case GB_OP_TIMEOUT:
+ return -ETIMEDOUT;
+ case GB_OP_NO_MEMORY:
+ return -ENOMEM;
+ case GB_OP_PROTOCOL_BAD:
+ return -EPROTONOSUPPORT;
+ case GB_OP_OVERFLOW:
+ return -EMSGSIZE;
+ case GB_OP_INVALID:
+ return -EINVAL;
+ case GB_OP_RETRY:
+ return -EAGAIN;
+ case GB_OP_NONEXISTENT:
+ return -ENODEV;
+ case GB_OP_MALFUNCTION:
+ return -EILSEQ;
+ case GB_OP_UNKNOWN_ERROR:
+ default:
+ return -EIO;
+ }
+}
+
+/*
+ * Map a Linux errno value (from operation->errno) into the value
+ * that should represent it in a response message status sent
+ * over the wire. Returns an enum gb_operation_status value (which
+ * is represented in a message as a single byte).
+ */
+static u8 gb_operation_errno_map(int errno)
+{
+ switch (errno) {
+ case 0:
+ return GB_OP_SUCCESS;
+ case -EINTR:
+ return GB_OP_INTERRUPTED;
+ case -ETIMEDOUT:
+ return GB_OP_TIMEOUT;
+ case -ENOMEM:
+ return GB_OP_NO_MEMORY;
+ case -EPROTONOSUPPORT:
+ return GB_OP_PROTOCOL_BAD;
+ case -EMSGSIZE:
+ return GB_OP_OVERFLOW; /* Could be underflow too */
+ case -EINVAL:
+ return GB_OP_INVALID;
+ case -EAGAIN:
+ return GB_OP_RETRY;
+ case -EILSEQ:
+ return GB_OP_MALFUNCTION;
+ case -ENODEV:
+ return GB_OP_NONEXISTENT;
+ case -EIO:
+ default:
+ return GB_OP_UNKNOWN_ERROR;
+ }
+}
+
+bool gb_operation_response_alloc(struct gb_operation *operation,
+ size_t response_size, gfp_t gfp)
+{
+ struct gb_host_device *hd = operation->connection->hd;
+ struct gb_operation_msg_hdr *request_header;
+ struct gb_message *response;
+ u8 type;
+
+ type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
+ response = gb_operation_message_alloc(hd, type, response_size, gfp);
+ if (!response)
+ return false;
+ response->operation = operation;
+
+ /*
+ * Size and type get initialized when the message is
+ * allocated. The errno will be set before sending. All
+ * that's left is the operation id, which we copy from the
+ * request message header (as-is, in little-endian order).
+ */
+ request_header = operation->request->header;
+ response->header->operation_id = request_header->operation_id;
+ operation->response = response;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
+
+/*
+ * Create a Greybus operation to be sent over the given connection.
+ * The request buffer will be big enough for a payload of the given
+ * size.
+ *
+ * For outgoing requests, the request message's header will be
+ * initialized with the type of the request and the message size.
+ * Outgoing operations must also specify the response buffer size,
+ * which must be sufficient to hold all expected response data. The
+ * response message header will eventually be overwritten, so there's
+ * no need to initialize it here.
+ *
+ * Request messages for incoming operations can arrive in interrupt
+ * context, so they must be allocated with GFP_ATOMIC. In this case
+ * the request buffer will be immediately overwritten, so there is
+ * no need to initialize the message header. Responsibility for
+ * allocating a response buffer lies with the incoming request
+ * handler for a protocol. So we don't allocate that here.
+ *
+ * Returns a pointer to the new operation or a null pointer if an
+ * error occurs.
+ */
+static struct gb_operation *
+gb_operation_create_common(struct gb_connection *connection, u8 type,
+ size_t request_size, size_t response_size,
+ unsigned long op_flags, gfp_t gfp_flags)
+{
+ struct gb_host_device *hd = connection->hd;
+ struct gb_operation *operation;
+
+ operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
+ if (!operation)
+ return NULL;
+ operation->connection = connection;
+
+ operation->request = gb_operation_message_alloc(hd, type, request_size,
+ gfp_flags);
+ if (!operation->request)
+ goto err_cache;
+ operation->request->operation = operation;
+
+ /* Allocate the response buffer for outgoing operations */
+ if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
+ if (!gb_operation_response_alloc(operation, response_size,
+ gfp_flags)) {
+ goto err_request;
+ }
+ }
+
+ operation->flags = op_flags;
+ operation->type = type;
+ operation->errno = -EBADR; /* Initial value--means "never set" */
+
+ INIT_WORK(&operation->work, gb_operation_work);
+ init_completion(&operation->completion);
+ kref_init(&operation->kref);
+ atomic_set(&operation->waiters, 0);
+
+ return operation;
+
+err_request:
+ gb_operation_message_free(operation->request);
+err_cache:
+ kmem_cache_free(gb_operation_cache, operation);
+
+ return NULL;
+}
+
+/*
+ * Create a new operation associated with the given connection. The
+ * request and response sizes provided are the number of bytes
+ * required to hold the request/response payload only. Both of
+ * these are allowed to be 0. Note that 0x00 is reserved as an
+ * invalid operation type for all protocols, and this is enforced
+ * here.
+ */
+struct gb_operation *
+gb_operation_create_flags(struct gb_connection *connection,
+ u8 type, size_t request_size,
+ size_t response_size, unsigned long flags,
+ gfp_t gfp)
+{
+ struct gb_operation *operation;
+
+ if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
+ return NULL;
+ if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
+ type &= ~GB_MESSAGE_TYPE_RESPONSE;
+
+ if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
+ flags &= GB_OPERATION_FLAG_USER_MASK;
+
+ operation = gb_operation_create_common(connection, type,
+ request_size, response_size,
+ flags, gfp);
+ if (operation)
+ trace_gb_operation_create(operation);
+
+ return operation;
+}
+EXPORT_SYMBOL_GPL(gb_operation_create_flags);
+
+struct gb_operation *
+gb_operation_create_core(struct gb_connection *connection,
+ u8 type, size_t request_size,
+ size_t response_size, unsigned long flags,
+ gfp_t gfp)
+{
+ struct gb_operation *operation;
+
+ flags |= GB_OPERATION_FLAG_CORE;
+
+ operation = gb_operation_create_common(connection, type,
+ request_size, response_size,
+ flags, gfp);
+ if (operation)
+ trace_gb_operation_create_core(operation);
+
+ return operation;
+}
+/* Do not export this function. */
+
+size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
+{
+ struct gb_host_device *hd = connection->hd;
+
+ return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
+}
+EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
+
+static struct gb_operation *
+gb_operation_create_incoming(struct gb_connection *connection, u16 id,
+ u8 type, void *data, size_t size)
+{
+ struct gb_operation *operation;
+ size_t request_size;
+ unsigned long flags = GB_OPERATION_FLAG_INCOMING;
+
+ /* Caller has made sure we at least have a message header. */
+ request_size = size - sizeof(struct gb_operation_msg_hdr);
+
+ if (!id)
+ flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
+
+ operation = gb_operation_create_common(connection, type,
+ request_size,
+ GB_REQUEST_TYPE_INVALID,
+ flags, GFP_ATOMIC);
+ if (!operation)
+ return NULL;
+
+ operation->id = id;
+ memcpy(operation->request->header, data, size);
+ trace_gb_operation_create_incoming(operation);
+
+ return operation;
+}
+
+/*
+ * Get an additional reference on an operation.
+ */
+void gb_operation_get(struct gb_operation *operation)
+{
+ kref_get(&operation->kref);
+}
+EXPORT_SYMBOL_GPL(gb_operation_get);
+
+/*
+ * Destroy a previously created operation.
+ */
+static void _gb_operation_destroy(struct kref *kref)
+{
+ struct gb_operation *operation;
+
+ operation = container_of(kref, struct gb_operation, kref);
+
+ trace_gb_operation_destroy(operation);
+
+ if (operation->response)
+ gb_operation_message_free(operation->response);
+ gb_operation_message_free(operation->request);
+
+ kmem_cache_free(gb_operation_cache, operation);
+}
+
+/*
+ * Drop a reference on an operation, and destroy it when the last
+ * one is gone.
+ */
+void gb_operation_put(struct gb_operation *operation)
+{
+ if (WARN_ON(!operation))
+ return;
+
+ kref_put(&operation->kref, _gb_operation_destroy);
+}
+EXPORT_SYMBOL_GPL(gb_operation_put);
+
+/* Tell the requester we're done */
+static void gb_operation_sync_callback(struct gb_operation *operation)
+{
+ complete(&operation->completion);
+}
+
+/**
+ * gb_operation_request_send() - send an operation request message
+ * @operation: the operation to initiate
+ * @callback: the operation completion callback
+ * @gfp: the memory flags to use for any allocations
+ *
+ * The caller has filled in any payload so the request message is ready to go.
+ * The callback function supplied will be called when the response message has
+ * arrived, a unidirectional request has been sent, or the operation is
+ * cancelled, indicating that the operation is complete. The callback function
+ * can fetch the result of the operation using gb_operation_result() if
+ * desired.
+ *
+ * Return: 0 if the request was successfully queued in the host-driver queues,
+ * or a negative errno.
+ */
+int gb_operation_request_send(struct gb_operation *operation,
+ gb_operation_callback callback,
+ gfp_t gfp)
+{
+ struct gb_connection *connection = operation->connection;
+ struct gb_operation_msg_hdr *header;
+ unsigned int cycle;
+ int ret;
+
+ if (gb_connection_is_offloaded(connection))
+ return -EBUSY;
+
+ if (!callback)
+ return -EINVAL;
+
+ /*
+ * Record the callback function, which is executed in
+ * non-atomic (workqueue) context when the final result
+ * of an operation has been set.
+ */
+ operation->callback = callback;
+
+ /*
+ * Assign the operation's id, and store it in the request header.
+ * Zero is a reserved operation id for unidirectional operations.
+ */
+ if (gb_operation_is_unidirectional(operation)) {
+ operation->id = 0;
+ } else {
+ cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
+ operation->id = (u16)(cycle % U16_MAX + 1);
+ }
+
+ header = operation->request->header;
+ header->operation_id = cpu_to_le16(operation->id);
+
+ gb_operation_result_set(operation, -EINPROGRESS);
+
+ /*
+ * Get an extra reference on the operation. It'll be dropped when the
+ * operation completes.
+ */
+ gb_operation_get(operation);
+ ret = gb_operation_get_active(operation);
+ if (ret)
+ goto err_put;
+
+ ret = gb_message_send(operation->request, gfp);
+ if (ret)
+ goto err_put_active;
+
+ return 0;
+
+err_put_active:
+ gb_operation_put_active(operation);
+err_put:
+ gb_operation_put(operation);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_operation_request_send);
+
+/*
+ * Send a synchronous operation. This function is expected to
+ * block, returning only when the response has arrived, (or when an
+ * error is detected. The return value is the result of the
+ * operation.
+ */
+int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
+ unsigned int timeout)
+{
+ int ret;
+ unsigned long timeout_jiffies;
+
+ ret = gb_operation_request_send(operation, gb_operation_sync_callback,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ if (timeout)
+ timeout_jiffies = msecs_to_jiffies(timeout);
+ else
+ timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+ ret = wait_for_completion_interruptible_timeout(&operation->completion,
+ timeout_jiffies);
+ if (ret < 0) {
+ /* Cancel the operation if interrupted */
+ gb_operation_cancel(operation, -ECANCELED);
+ } else if (ret == 0) {
+ /* Cancel the operation if op timed out */
+ gb_operation_cancel(operation, -ETIMEDOUT);
+ }
+
+ return gb_operation_result(operation);
+}
+EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
+
+/*
+ * Send a response for an incoming operation request. A non-zero
+ * errno indicates a failed operation.
+ *
+ * If there is any response payload, the incoming request handler is
+ * responsible for allocating the response message. Otherwise the
+ * it can simply supply the result errno; this function will
+ * allocate the response message if necessary.
+ */
+static int gb_operation_response_send(struct gb_operation *operation,
+ int errno)
+{
+ struct gb_connection *connection = operation->connection;
+ int ret;
+
+ if (!operation->response &&
+ !gb_operation_is_unidirectional(operation)) {
+ if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
+ return -ENOMEM;
+ }
+
+ /* Record the result */
+ if (!gb_operation_result_set(operation, errno)) {
+ dev_err(&connection->hd->dev, "request result already set\n");
+ return -EIO; /* Shouldn't happen */
+ }
+
+ /* Sender of request does not care about response. */
+ if (gb_operation_is_unidirectional(operation))
+ return 0;
+
+ /* Reference will be dropped when message has been sent. */
+ gb_operation_get(operation);
+ ret = gb_operation_get_active(operation);
+ if (ret)
+ goto err_put;
+
+ /* Fill in the response header and send it */
+ operation->response->header->result = gb_operation_errno_map(errno);
+
+ ret = gb_message_send(operation->response, GFP_KERNEL);
+ if (ret)
+ goto err_put_active;
+
+ return 0;
+
+err_put_active:
+ gb_operation_put_active(operation);
+err_put:
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+/*
+ * This function is called when a message send request has completed.
+ */
+void greybus_message_sent(struct gb_host_device *hd,
+ struct gb_message *message, int status)
+{
+ struct gb_operation *operation = message->operation;
+ struct gb_connection *connection = operation->connection;
+
+ /*
+ * If the message was a response, we just need to drop our
+ * reference to the operation. If an error occurred, report
+ * it.
+ *
+ * For requests, if there's no error and the operation in not
+ * unidirectional, there's nothing more to do until the response
+ * arrives. If an error occurred attempting to send it, or if the
+ * operation is unidrectional, record the result of the operation and
+ * schedule its completion.
+ */
+ if (message == operation->response) {
+ if (status) {
+ dev_err(&connection->hd->dev,
+ "%s: error sending response 0x%02x: %d\n",
+ connection->name, operation->type, status);
+ }
+
+ gb_operation_put_active(operation);
+ gb_operation_put(operation);
+ } else if (status || gb_operation_is_unidirectional(operation)) {
+ if (gb_operation_result_set(operation, status)) {
+ queue_work(gb_operation_completion_wq,
+ &operation->work);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(greybus_message_sent);
+
+/*
+ * We've received data on a connection, and it doesn't look like a
+ * response, so we assume it's a request.
+ *
+ * This is called in interrupt context, so just copy the incoming
+ * data into the request buffer and handle the rest via workqueue.
+ */
+static void gb_connection_recv_request(struct gb_connection *connection,
+ const struct gb_operation_msg_hdr *header,
+ void *data, size_t size)
+{
+ struct gb_operation *operation;
+ u16 operation_id;
+ u8 type;
+ int ret;
+
+ operation_id = le16_to_cpu(header->operation_id);
+ type = header->type;
+
+ operation = gb_operation_create_incoming(connection, operation_id,
+ type, data, size);
+ if (!operation) {
+ dev_err(&connection->hd->dev,
+ "%s: can't create incoming operation\n",
+ connection->name);
+ return;
+ }
+
+ ret = gb_operation_get_active(operation);
+ if (ret) {
+ gb_operation_put(operation);
+ return;
+ }
+ trace_gb_message_recv_request(operation->request);
+
+ /*
+ * The initial reference to the operation will be dropped when the
+ * request handler returns.
+ */
+ if (gb_operation_result_set(operation, -EINPROGRESS))
+ queue_work(connection->wq, &operation->work);
+}
+
+/*
+ * We've received data that appears to be an operation response
+ * message. Look up the operation, and record that we've received
+ * its response.
+ *
+ * This is called in interrupt context, so just copy the incoming
+ * data into the response buffer and handle the rest via workqueue.
+ */
+static void gb_connection_recv_response(struct gb_connection *connection,
+ const struct gb_operation_msg_hdr *header,
+ void *data, size_t size)
+{
+ struct gb_operation *operation;
+ struct gb_message *message;
+ size_t message_size;
+ u16 operation_id;
+ int errno;
+
+ operation_id = le16_to_cpu(header->operation_id);
+
+ if (!operation_id) {
+ dev_err_ratelimited(&connection->hd->dev,
+ "%s: invalid response id 0 received\n",
+ connection->name);
+ return;
+ }
+
+ operation = gb_operation_find_outgoing(connection, operation_id);
+ if (!operation) {
+ dev_err_ratelimited(&connection->hd->dev,
+ "%s: unexpected response id 0x%04x received\n",
+ connection->name, operation_id);
+ return;
+ }
+
+ errno = gb_operation_status_map(header->result);
+ message = operation->response;
+ message_size = sizeof(*header) + message->payload_size;
+ if (!errno && size > message_size) {
+ dev_err_ratelimited(&connection->hd->dev,
+ "%s: malformed response 0x%02x received (%zu > %zu)\n",
+ connection->name, header->type,
+ size, message_size);
+ errno = -EMSGSIZE;
+ } else if (!errno && size < message_size) {
+ if (gb_operation_short_response_allowed(operation)) {
+ message->payload_size = size - sizeof(*header);
+ } else {
+ dev_err_ratelimited(&connection->hd->dev,
+ "%s: short response 0x%02x received (%zu < %zu)\n",
+ connection->name, header->type,
+ size, message_size);
+ errno = -EMSGSIZE;
+ }
+ }
+
+ /* We must ignore the payload if a bad status is returned */
+ if (errno)
+ size = sizeof(*header);
+
+ /* The rest will be handled in work queue context */
+ if (gb_operation_result_set(operation, errno)) {
+ memcpy(message->buffer, data, size);
+
+ trace_gb_message_recv_response(message);
+
+ queue_work(gb_operation_completion_wq, &operation->work);
+ }
+
+ gb_operation_put(operation);
+}
+
+/*
+ * Handle data arriving on a connection. As soon as we return the
+ * supplied data buffer will be reused (so unless we do something
+ * with, it's effectively dropped).
+ */
+void gb_connection_recv(struct gb_connection *connection,
+ void *data, size_t size)
+{
+ struct gb_operation_msg_hdr header;
+ struct device *dev = &connection->hd->dev;
+ size_t msg_size;
+
+ if (connection->state == GB_CONNECTION_STATE_DISABLED ||
+ gb_connection_is_offloaded(connection)) {
+ dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
+ connection->name, size);
+ return;
+ }
+
+ if (size < sizeof(header)) {
+ dev_err_ratelimited(dev, "%s: short message received\n",
+ connection->name);
+ return;
+ }
+
+ /* Use memcpy as data may be unaligned */
+ memcpy(&header, data, sizeof(header));
+ msg_size = le16_to_cpu(header.size);
+ if (size < msg_size) {
+ dev_err_ratelimited(dev,
+ "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
+ connection->name,
+ le16_to_cpu(header.operation_id),
+ header.type, size, msg_size);
+ return; /* XXX Should still complete operation */
+ }
+
+ if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
+ gb_connection_recv_response(connection, &header, data,
+ msg_size);
+ } else {
+ gb_connection_recv_request(connection, &header, data,
+ msg_size);
+ }
+}
+
+/*
+ * Cancel an outgoing operation synchronously, and record the given error to
+ * indicate why.
+ */
+void gb_operation_cancel(struct gb_operation *operation, int errno)
+{
+ if (WARN_ON(gb_operation_is_incoming(operation)))
+ return;
+
+ if (gb_operation_result_set(operation, errno)) {
+ gb_message_cancel(operation->request);
+ queue_work(gb_operation_completion_wq, &operation->work);
+ }
+ trace_gb_message_cancel_outgoing(operation->request);
+
+ atomic_inc(&operation->waiters);
+ wait_event(gb_operation_cancellation_queue,
+ !gb_operation_is_active(operation));
+ atomic_dec(&operation->waiters);
+}
+EXPORT_SYMBOL_GPL(gb_operation_cancel);
+
+/*
+ * Cancel an incoming operation synchronously. Called during connection tear
+ * down.
+ */
+void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
+{
+ if (WARN_ON(!gb_operation_is_incoming(operation)))
+ return;
+
+ if (!gb_operation_is_unidirectional(operation)) {
+ /*
+ * Make sure the request handler has submitted the response
+ * before cancelling it.
+ */
+ flush_work(&operation->work);
+ if (!gb_operation_result_set(operation, errno))
+ gb_message_cancel(operation->response);
+ }
+ trace_gb_message_cancel_incoming(operation->response);
+
+ atomic_inc(&operation->waiters);
+ wait_event(gb_operation_cancellation_queue,
+ !gb_operation_is_active(operation));
+ atomic_dec(&operation->waiters);
+}
+
+/**
+ * gb_operation_sync_timeout() - implement a "simple" synchronous operation
+ * @connection: the Greybus connection to send this to
+ * @type: the type of operation to send
+ * @request: pointer to a memory buffer to copy the request from
+ * @request_size: size of @request
+ * @response: pointer to a memory buffer to copy the response to
+ * @response_size: the size of @response.
+ * @timeout: operation timeout in milliseconds
+ *
+ * This function implements a simple synchronous Greybus operation. It sends
+ * the provided operation request and waits (sleeps) until the corresponding
+ * operation response message has been successfully received, or an error
+ * occurs. @request and @response are buffers to hold the request and response
+ * data respectively, and if they are not NULL, their size must be specified in
+ * @request_size and @response_size.
+ *
+ * If a response payload is to come back, and @response is not NULL,
+ * @response_size number of bytes will be copied into @response if the operation
+ * is successful.
+ *
+ * If there is an error, the response buffer is left alone.
+ */
+int gb_operation_sync_timeout(struct gb_connection *connection, int type,
+ void *request, int request_size,
+ void *response, int response_size,
+ unsigned int timeout)
+{
+ struct gb_operation *operation;
+ int ret;
+
+ if ((response_size && !response) ||
+ (request_size && !request))
+ return -EINVAL;
+
+ operation = gb_operation_create(connection, type,
+ request_size, response_size,
+ GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ if (request_size)
+ memcpy(operation->request->payload, request, request_size);
+
+ ret = gb_operation_request_send_sync_timeout(operation, timeout);
+ if (ret) {
+ dev_err(&connection->hd->dev,
+ "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
+ connection->name, operation->id, type, ret);
+ } else {
+ if (response_size) {
+ memcpy(response, operation->response->payload,
+ response_size);
+ }
+ }
+
+ gb_operation_put(operation);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
+
+/**
+ * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
+ * @connection: connection to use
+ * @type: type of operation to send
+ * @request: memory buffer to copy the request from
+ * @request_size: size of @request
+ * @timeout: send timeout in milliseconds
+ *
+ * Initiate a unidirectional operation by sending a request message and
+ * waiting for it to be acknowledged as sent by the host device.
+ *
+ * Note that successful send of a unidirectional operation does not imply that
+ * the request as actually reached the remote end of the connection.
+ */
+int gb_operation_unidirectional_timeout(struct gb_connection *connection,
+ int type, void *request, int request_size,
+ unsigned int timeout)
+{
+ struct gb_operation *operation;
+ int ret;
+
+ if (request_size && !request)
+ return -EINVAL;
+
+ operation = gb_operation_create_flags(connection, type,
+ request_size, 0,
+ GB_OPERATION_FLAG_UNIDIRECTIONAL,
+ GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ if (request_size)
+ memcpy(operation->request->payload, request, request_size);
+
+ ret = gb_operation_request_send_sync_timeout(operation, timeout);
+ if (ret) {
+ dev_err(&connection->hd->dev,
+ "%s: unidirectional operation of type 0x%02x failed: %d\n",
+ connection->name, type, ret);
+ }
+
+ gb_operation_put(operation);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
+
+int __init gb_operation_init(void)
+{
+ gb_message_cache = kmem_cache_create("gb_message_cache",
+ sizeof(struct gb_message), 0, 0, NULL);
+ if (!gb_message_cache)
+ return -ENOMEM;
+
+ gb_operation_cache = kmem_cache_create("gb_operation_cache",
+ sizeof(struct gb_operation), 0, 0, NULL);
+ if (!gb_operation_cache)
+ goto err_destroy_message_cache;
+
+ gb_operation_completion_wq = alloc_workqueue("greybus_completion",
+ 0, 0);
+ if (!gb_operation_completion_wq)
+ goto err_destroy_operation_cache;
+
+ return 0;
+
+err_destroy_operation_cache:
+ kmem_cache_destroy(gb_operation_cache);
+ gb_operation_cache = NULL;
+err_destroy_message_cache:
+ kmem_cache_destroy(gb_message_cache);
+ gb_message_cache = NULL;
+
+ return -ENOMEM;
+}
+
+void gb_operation_exit(void)
+{
+ destroy_workqueue(gb_operation_completion_wq);
+ gb_operation_completion_wq = NULL;
+ kmem_cache_destroy(gb_operation_cache);
+ gb_operation_cache = NULL;
+ kmem_cache_destroy(gb_message_cache);
+ gb_message_cache = NULL;
+}
diff --git a/drivers/staging/greybus/operation.h b/drivers/staging/greybus/operation.h
new file mode 100644
index 000000000000..de09a2c7de54
--- /dev/null
+++ b/drivers/staging/greybus/operation.h
@@ -0,0 +1,210 @@
+/*
+ * Greybus operations
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __OPERATION_H
+#define __OPERATION_H
+
+#include <linux/completion.h>
+
+struct gb_operation;
+
+/* The default amount of time a request is given to complete */
+#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
+
+/*
+ * The top bit of the type in an operation message header indicates
+ * whether the message is a request (bit clear) or response (bit set)
+ */
+#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80)
+
+enum gb_operation_result {
+ GB_OP_SUCCESS = 0x00,
+ GB_OP_INTERRUPTED = 0x01,
+ GB_OP_TIMEOUT = 0x02,
+ GB_OP_NO_MEMORY = 0x03,
+ GB_OP_PROTOCOL_BAD = 0x04,
+ GB_OP_OVERFLOW = 0x05,
+ GB_OP_INVALID = 0x06,
+ GB_OP_RETRY = 0x07,
+ GB_OP_NONEXISTENT = 0x08,
+ GB_OP_UNKNOWN_ERROR = 0xfe,
+ GB_OP_MALFUNCTION = 0xff,
+};
+
+#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr)
+#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX
+
+/*
+ * Protocol code should only examine the payload and payload_size fields, and
+ * host-controller drivers may use the hcpriv field. All other fields are
+ * intended to be private to the operations core code.
+ */
+struct gb_message {
+ struct gb_operation *operation;
+ struct gb_operation_msg_hdr *header;
+
+ void *payload;
+ size_t payload_size;
+
+ void *buffer;
+
+ void *hcpriv;
+};
+
+#define GB_OPERATION_FLAG_INCOMING BIT(0)
+#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1)
+#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2)
+#define GB_OPERATION_FLAG_CORE BIT(3)
+
+#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \
+ GB_OPERATION_FLAG_UNIDIRECTIONAL)
+
+/*
+ * A Greybus operation is a remote procedure call performed over a
+ * connection between two UniPro interfaces.
+ *
+ * Every operation consists of a request message sent to the other
+ * end of the connection coupled with a reply message returned to
+ * the sender. Every operation has a type, whose interpretation is
+ * dependent on the protocol associated with the connection.
+ *
+ * Only four things in an operation structure are intended to be
+ * directly usable by protocol handlers: the operation's connection
+ * pointer; the operation type; the request message payload (and
+ * size); and the response message payload (and size). Note that a
+ * message with a 0-byte payload has a null message payload pointer.
+ *
+ * In addition, every operation has a result, which is an errno
+ * value. Protocol handlers access the operation result using
+ * gb_operation_result().
+ */
+typedef void (*gb_operation_callback)(struct gb_operation *);
+struct gb_operation {
+ struct gb_connection *connection;
+ struct gb_message *request;
+ struct gb_message *response;
+
+ unsigned long flags;
+ u8 type;
+ u16 id;
+ int errno; /* Operation result */
+
+ struct work_struct work;
+ gb_operation_callback callback;
+ struct completion completion;
+
+ struct kref kref;
+ atomic_t waiters;
+
+ int active;
+ struct list_head links; /* connection->operations */
+};
+
+static inline bool
+gb_operation_is_incoming(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_INCOMING;
+}
+
+static inline bool
+gb_operation_is_unidirectional(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL;
+}
+
+static inline bool
+gb_operation_short_response_allowed(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE;
+}
+
+static inline bool gb_operation_is_core(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_CORE;
+}
+
+void gb_connection_recv(struct gb_connection *connection,
+ void *data, size_t size);
+
+int gb_operation_result(struct gb_operation *operation);
+
+size_t gb_operation_get_payload_size_max(struct gb_connection *connection);
+struct gb_operation *
+gb_operation_create_flags(struct gb_connection *connection,
+ u8 type, size_t request_size,
+ size_t response_size, unsigned long flags,
+ gfp_t gfp);
+
+static inline struct gb_operation *
+gb_operation_create(struct gb_connection *connection,
+ u8 type, size_t request_size,
+ size_t response_size, gfp_t gfp)
+{
+ return gb_operation_create_flags(connection, type, request_size,
+ response_size, 0, gfp);
+}
+
+struct gb_operation *
+gb_operation_create_core(struct gb_connection *connection,
+ u8 type, size_t request_size,
+ size_t response_size, unsigned long flags,
+ gfp_t gfp);
+
+void gb_operation_get(struct gb_operation *operation);
+void gb_operation_put(struct gb_operation *operation);
+
+bool gb_operation_response_alloc(struct gb_operation *operation,
+ size_t response_size, gfp_t gfp);
+
+int gb_operation_request_send(struct gb_operation *operation,
+ gb_operation_callback callback,
+ gfp_t gfp);
+int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
+ unsigned int timeout);
+static inline int
+gb_operation_request_send_sync(struct gb_operation *operation)
+{
+ return gb_operation_request_send_sync_timeout(operation,
+ GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+void gb_operation_cancel(struct gb_operation *operation, int errno);
+void gb_operation_cancel_incoming(struct gb_operation *operation, int errno);
+
+void greybus_message_sent(struct gb_host_device *hd,
+ struct gb_message *message, int status);
+
+int gb_operation_sync_timeout(struct gb_connection *connection, int type,
+ void *request, int request_size,
+ void *response, int response_size,
+ unsigned int timeout);
+int gb_operation_unidirectional_timeout(struct gb_connection *connection,
+ int type, void *request, int request_size,
+ unsigned int timeout);
+
+static inline int gb_operation_sync(struct gb_connection *connection, int type,
+ void *request, int request_size,
+ void *response, int response_size)
+{
+ return gb_operation_sync_timeout(connection, type,
+ request, request_size, response, response_size,
+ GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+static inline int gb_operation_unidirectional(struct gb_connection *connection,
+ int type, void *request, int request_size)
+{
+ return gb_operation_unidirectional_timeout(connection, type,
+ request, request_size, GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+int gb_operation_init(void);
+void gb_operation_exit(void);
+
+#endif /* !__OPERATION_H */
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
new file mode 100644
index 000000000000..e85c988b7034
--- /dev/null
+++ b/drivers/staging/greybus/power_supply.c
@@ -0,0 +1,1141 @@
+/*
+ * Power Supply driver for a Greybus module.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+
+#include "greybus.h"
+
+#define PROP_MAX 32
+
+struct gb_power_supply_prop {
+ enum power_supply_property prop;
+ u8 gb_prop;
+ int val;
+ int previous_val;
+ bool is_writeable;
+};
+
+struct gb_power_supply {
+ u8 id;
+ bool registered;
+ struct power_supply *psy;
+ struct power_supply_desc desc;
+ char name[64];
+ struct gb_power_supplies *supplies;
+ struct delayed_work work;
+ char *manufacturer;
+ char *model_name;
+ char *serial_number;
+ u8 type;
+ u8 properties_count;
+ u8 properties_count_str;
+ unsigned long last_update;
+ u8 cache_invalid;
+ unsigned int update_interval;
+ bool changed;
+ struct gb_power_supply_prop *props;
+ enum power_supply_property *props_raw;
+ bool pm_acquired;
+ struct mutex supply_lock;
+};
+
+struct gb_power_supplies {
+ struct gb_connection *connection;
+ u8 supplies_count;
+ struct gb_power_supply *supply;
+ struct mutex supplies_lock;
+};
+
+#define to_gb_power_supply(x) power_supply_get_drvdata(x)
+
+/*
+ * General power supply properties that could be absent from various reasons,
+ * like kernel versions or vendor specific versions
+ */
+#ifndef POWER_SUPPLY_PROP_VOLTAGE_BOOT
+ #define POWER_SUPPLY_PROP_VOLTAGE_BOOT -1
+#endif
+#ifndef POWER_SUPPLY_PROP_CURRENT_BOOT
+ #define POWER_SUPPLY_PROP_CURRENT_BOOT -1
+#endif
+#ifndef POWER_SUPPLY_PROP_CALIBRATE
+ #define POWER_SUPPLY_PROP_CALIBRATE -1
+#endif
+
+/* cache time in milliseconds, if cache_time is set to 0 cache is disable */
+static unsigned int cache_time = 1000;
+/*
+ * update interval initial and maximum value, between the two will
+ * back-off exponential
+ */
+static unsigned int update_interval_init = 1 * HZ;
+static unsigned int update_interval_max = 30 * HZ;
+
+struct gb_power_supply_changes {
+ enum power_supply_property prop;
+ u32 tolerance_change;
+ void (*prop_changed)(struct gb_power_supply *gbpsy,
+ struct gb_power_supply_prop *prop);
+};
+
+static void gb_power_supply_state_change(struct gb_power_supply *gbpsy,
+ struct gb_power_supply_prop *prop);
+
+static const struct gb_power_supply_changes psy_props_changes[] = {
+ { .prop = GB_POWER_SUPPLY_PROP_STATUS,
+ .tolerance_change = 0,
+ .prop_changed = gb_power_supply_state_change,
+ },
+ { .prop = GB_POWER_SUPPLY_PROP_TEMP,
+ .tolerance_change = 500,
+ .prop_changed = NULL,
+ },
+ { .prop = GB_POWER_SUPPLY_PROP_ONLINE,
+ .tolerance_change = 0,
+ .prop_changed = NULL,
+ },
+};
+
+static int get_psp_from_gb_prop(int gb_prop, enum power_supply_property *psp)
+{
+ int prop;
+
+ switch (gb_prop) {
+ case GB_POWER_SUPPLY_PROP_STATUS:
+ prop = POWER_SUPPLY_PROP_STATUS;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_TYPE:
+ prop = POWER_SUPPLY_PROP_CHARGE_TYPE;
+ break;
+ case GB_POWER_SUPPLY_PROP_HEALTH:
+ prop = POWER_SUPPLY_PROP_HEALTH;
+ break;
+ case GB_POWER_SUPPLY_PROP_PRESENT:
+ prop = POWER_SUPPLY_PROP_PRESENT;
+ break;
+ case GB_POWER_SUPPLY_PROP_ONLINE:
+ prop = POWER_SUPPLY_PROP_ONLINE;
+ break;
+ case GB_POWER_SUPPLY_PROP_AUTHENTIC:
+ prop = POWER_SUPPLY_PROP_AUTHENTIC;
+ break;
+ case GB_POWER_SUPPLY_PROP_TECHNOLOGY:
+ prop = POWER_SUPPLY_PROP_TECHNOLOGY;
+ break;
+ case GB_POWER_SUPPLY_PROP_CYCLE_COUNT:
+ prop = POWER_SUPPLY_PROP_CYCLE_COUNT;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_OCV;
+ break;
+ case GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT:
+ prop = POWER_SUPPLY_PROP_VOLTAGE_BOOT;
+ break;
+ case GB_POWER_SUPPLY_PROP_CURRENT_MAX:
+ prop = POWER_SUPPLY_PROP_CURRENT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_CURRENT_NOW:
+ prop = POWER_SUPPLY_PROP_CURRENT_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_CURRENT_AVG:
+ prop = POWER_SUPPLY_PROP_CURRENT_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_CURRENT_BOOT:
+ prop = POWER_SUPPLY_PROP_CURRENT_BOOT;
+ break;
+ case GB_POWER_SUPPLY_PROP_POWER_NOW:
+ prop = POWER_SUPPLY_PROP_POWER_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_POWER_AVG:
+ prop = POWER_SUPPLY_PROP_POWER_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ prop = POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN:
+ prop = POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_FULL:
+ prop = POWER_SUPPLY_PROP_CHARGE_FULL;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_EMPTY:
+ prop = POWER_SUPPLY_PROP_CHARGE_EMPTY;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_NOW:
+ prop = POWER_SUPPLY_PROP_CHARGE_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_AVG:
+ prop = POWER_SUPPLY_PROP_CHARGE_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ prop = POWER_SUPPLY_PROP_CHARGE_COUNTER;
+ break;
+ case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT;
+ break;
+ case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE;
+ break;
+ case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ prop = POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ prop = POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ prop = POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ prop = POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN:
+ prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_FULL:
+ prop = POWER_SUPPLY_PROP_ENERGY_FULL;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_EMPTY:
+ prop = POWER_SUPPLY_PROP_ENERGY_EMPTY;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_NOW:
+ prop = POWER_SUPPLY_PROP_ENERGY_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_ENERGY_AVG:
+ prop = POWER_SUPPLY_PROP_ENERGY_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_CAPACITY:
+ prop = POWER_SUPPLY_PROP_CAPACITY;
+ break;
+ case GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
+ prop = POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN;
+ break;
+ case GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX:
+ prop = POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ prop = POWER_SUPPLY_PROP_CAPACITY_LEVEL;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP:
+ prop = POWER_SUPPLY_PROP_TEMP;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_MAX:
+ prop = POWER_SUPPLY_PROP_TEMP_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_MIN:
+ prop = POWER_SUPPLY_PROP_TEMP_MIN;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+ prop = POWER_SUPPLY_PROP_TEMP_ALERT_MIN;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ prop = POWER_SUPPLY_PROP_TEMP_ALERT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT:
+ prop = POWER_SUPPLY_PROP_TEMP_AMBIENT;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
+ prop = POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN;
+ break;
+ case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
+ prop = POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX;
+ break;
+ case GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ prop = POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ prop = POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
+ prop = POWER_SUPPLY_PROP_TIME_TO_FULL_NOW;
+ break;
+ case GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+ prop = POWER_SUPPLY_PROP_TIME_TO_FULL_AVG;
+ break;
+ case GB_POWER_SUPPLY_PROP_TYPE:
+ prop = POWER_SUPPLY_PROP_TYPE;
+ break;
+ case GB_POWER_SUPPLY_PROP_SCOPE:
+ prop = POWER_SUPPLY_PROP_SCOPE;
+ break;
+ case GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ prop = POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT;
+ break;
+ case GB_POWER_SUPPLY_PROP_CALIBRATE:
+ prop = POWER_SUPPLY_PROP_CALIBRATE;
+ break;
+ default:
+ prop = -1;
+ break;
+ }
+
+ if (prop < 0)
+ return prop;
+
+ *psp = (enum power_supply_property)prop;
+
+ return 0;
+}
+
+static struct gb_connection *get_conn_from_psy(struct gb_power_supply *gbpsy)
+{
+ return gbpsy->supplies->connection;
+}
+
+static struct gb_power_supply_prop *get_psy_prop(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp)
+{
+ int i;
+
+ for (i = 0; i < gbpsy->properties_count; i++)
+ if (gbpsy->props[i].prop == psp)
+ return &gbpsy->props[i];
+ return NULL;
+}
+
+static int is_psy_prop_writeable(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp)
+{
+ struct gb_power_supply_prop *prop;
+
+ prop = get_psy_prop(gbpsy, psp);
+ if (!prop)
+ return -ENOENT;
+ return prop->is_writeable ? 1 : 0;
+}
+
+static int is_prop_valint(enum power_supply_property psp)
+{
+ return ((psp < POWER_SUPPLY_PROP_MODEL_NAME) ? 1 : 0);
+}
+
+static void next_interval(struct gb_power_supply *gbpsy)
+{
+ if (gbpsy->update_interval == update_interval_max)
+ return;
+
+ /* do some exponential back-off in the update interval */
+ gbpsy->update_interval *= 2;
+ if (gbpsy->update_interval > update_interval_max)
+ gbpsy->update_interval = update_interval_max;
+}
+
+static void __gb_power_supply_changed(struct gb_power_supply *gbpsy)
+{
+ power_supply_changed(gbpsy->psy);
+}
+
+static void gb_power_supply_state_change(struct gb_power_supply *gbpsy,
+ struct gb_power_supply_prop *prop)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ int ret;
+
+ /*
+ * Check gbpsy->pm_acquired to make sure only one pair of 'get_sync'
+ * and 'put_autosuspend' runtime pm call for state property change.
+ */
+ mutex_lock(&gbpsy->supply_lock);
+
+ if ((prop->val == GB_POWER_SUPPLY_STATUS_CHARGING) &&
+ !gbpsy->pm_acquired) {
+ ret = gb_pm_runtime_get_sync(connection->bundle);
+ if (ret)
+ dev_err(&connection->bundle->dev,
+ "Fail to set wake lock for charging state\n");
+ else
+ gbpsy->pm_acquired = true;
+ } else {
+ if (gbpsy->pm_acquired) {
+ ret = gb_pm_runtime_put_autosuspend(connection->bundle);
+ if (ret)
+ dev_err(&connection->bundle->dev,
+ "Fail to set wake unlock for none charging\n");
+ else
+ gbpsy->pm_acquired = false;
+ }
+ }
+
+ mutex_unlock(&gbpsy->supply_lock);
+}
+
+static void check_changed(struct gb_power_supply *gbpsy,
+ struct gb_power_supply_prop *prop)
+{
+ const struct gb_power_supply_changes *psyc;
+ int val = prop->val;
+ int prev_val = prop->previous_val;
+ bool changed = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(psy_props_changes); i++) {
+ psyc = &psy_props_changes[i];
+ if (prop->prop == psyc->prop) {
+ if (!psyc->tolerance_change)
+ changed = true;
+ else if (val < prev_val &&
+ prev_val - val > psyc->tolerance_change)
+ changed = true;
+ else if (val > prev_val &&
+ val - prev_val > psyc->tolerance_change)
+ changed = true;
+
+ if (changed && psyc->prop_changed)
+ psyc->prop_changed(gbpsy, prop);
+
+ if (changed)
+ gbpsy->changed = true;
+ break;
+ }
+ }
+}
+
+static int total_props(struct gb_power_supply *gbpsy)
+{
+ /* this return the intval plus the strval properties */
+ return (gbpsy->properties_count + gbpsy->properties_count_str);
+}
+
+static void prop_append(struct gb_power_supply *gbpsy,
+ enum power_supply_property prop)
+{
+ enum power_supply_property *new_props_raw;
+
+ gbpsy->properties_count_str++;
+ new_props_raw = krealloc(gbpsy->props_raw, total_props(gbpsy) *
+ sizeof(enum power_supply_property),
+ GFP_KERNEL);
+ if (!new_props_raw)
+ return;
+ gbpsy->props_raw = new_props_raw;
+ gbpsy->props_raw[total_props(gbpsy) - 1] = prop;
+}
+
+static int __gb_power_supply_set_name(char *init_name, char *name, size_t len)
+{
+ unsigned int i = 0;
+ int ret = 0;
+ struct power_supply *psy;
+
+ if (!strlen(init_name))
+ init_name = "gb_power_supply";
+ strlcpy(name, init_name, len);
+
+ while ((ret < len) && (psy = power_supply_get_by_name(name))) {
+ power_supply_put(psy);
+
+ ret = snprintf(name, len, "%s_%u", init_name, ++i);
+ }
+ if (ret >= len)
+ return -ENOMEM;
+ return i;
+}
+
+static void _gb_power_supply_append_props(struct gb_power_supply *gbpsy)
+{
+ if (strlen(gbpsy->manufacturer))
+ prop_append(gbpsy, POWER_SUPPLY_PROP_MANUFACTURER);
+ if (strlen(gbpsy->model_name))
+ prop_append(gbpsy, POWER_SUPPLY_PROP_MODEL_NAME);
+ if (strlen(gbpsy->serial_number))
+ prop_append(gbpsy, POWER_SUPPLY_PROP_SERIAL_NUMBER);
+}
+
+static int gb_power_supply_description_get(struct gb_power_supply *gbpsy)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ struct gb_power_supply_get_description_request req;
+ struct gb_power_supply_get_description_response resp;
+ int ret;
+
+ req.psy_id = gbpsy->id;
+
+ ret = gb_operation_sync(connection,
+ GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ gbpsy->manufacturer = kstrndup(resp.manufacturer, PROP_MAX, GFP_KERNEL);
+ if (!gbpsy->manufacturer)
+ return -ENOMEM;
+ gbpsy->model_name = kstrndup(resp.model, PROP_MAX, GFP_KERNEL);
+ if (!gbpsy->model_name)
+ return -ENOMEM;
+ gbpsy->serial_number = kstrndup(resp.serial_number, PROP_MAX,
+ GFP_KERNEL);
+ if (!gbpsy->serial_number)
+ return -ENOMEM;
+
+ gbpsy->type = le16_to_cpu(resp.type);
+ gbpsy->properties_count = resp.properties_count;
+
+ return 0;
+}
+
+static int gb_power_supply_prop_descriptors_get(struct gb_power_supply *gbpsy)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ struct gb_power_supply_get_property_descriptors_request *req;
+ struct gb_power_supply_get_property_descriptors_response *resp;
+ struct gb_operation *op;
+ u8 props_count = gbpsy->properties_count;
+ enum power_supply_property psp;
+ int ret;
+ int i, r = 0;
+
+ if (props_count == 0)
+ return 0;
+
+ op = gb_operation_create(connection,
+ GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS,
+ sizeof(req), sizeof(*resp) + props_count *
+ sizeof(struct gb_power_supply_props_desc),
+ GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ req = op->request->payload;
+ req->psy_id = gbpsy->id;
+
+ ret = gb_operation_request_send_sync(op);
+ if (ret < 0)
+ goto out_put_operation;
+
+ resp = op->response->payload;
+
+ /* validate received properties */
+ for (i = 0; i < props_count; i++) {
+ ret = get_psp_from_gb_prop(resp->props[i].property, &psp);
+ if (ret < 0) {
+ dev_warn(&connection->bundle->dev,
+ "greybus property %u it is not supported by this kernel, dropped\n",
+ resp->props[i].property);
+ gbpsy->properties_count--;
+ }
+ }
+
+ gbpsy->props = kcalloc(gbpsy->properties_count, sizeof(*gbpsy->props),
+ GFP_KERNEL);
+ if (!gbpsy->props) {
+ ret = -ENOMEM;
+ goto out_put_operation;
+ }
+
+ gbpsy->props_raw = kcalloc(gbpsy->properties_count,
+ sizeof(*gbpsy->props_raw), GFP_KERNEL);
+ if (!gbpsy->props_raw) {
+ ret = -ENOMEM;
+ goto out_put_operation;
+ }
+
+ /* Store available properties, skip the ones we do not support */
+ for (i = 0; i < props_count; i++) {
+ ret = get_psp_from_gb_prop(resp->props[i].property, &psp);
+ if (ret < 0) {
+ r++;
+ continue;
+ }
+ gbpsy->props[i - r].prop = psp;
+ gbpsy->props[i - r].gb_prop = resp->props[i].property;
+ gbpsy->props_raw[i - r] = psp;
+ if (resp->props[i].is_writeable)
+ gbpsy->props[i - r].is_writeable = true;
+ }
+
+ /*
+ * now append the properties that we already got information in the
+ * get_description operation. (char * ones)
+ */
+ _gb_power_supply_append_props(gbpsy);
+
+ ret = 0;
+out_put_operation:
+ gb_operation_put(op);
+
+ return ret;
+}
+
+static int __gb_power_supply_property_update(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ struct gb_power_supply_prop *prop;
+ struct gb_power_supply_get_property_request req;
+ struct gb_power_supply_get_property_response resp;
+ int val;
+ int ret;
+
+ prop = get_psy_prop(gbpsy, psp);
+ if (!prop)
+ return -EINVAL;
+ req.psy_id = gbpsy->id;
+ req.property = prop->gb_prop;
+
+ ret = gb_operation_sync(connection, GB_POWER_SUPPLY_TYPE_GET_PROPERTY,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ val = le32_to_cpu(resp.prop_val);
+ if (val == prop->val)
+ return 0;
+
+ prop->previous_val = prop->val;
+ prop->val = val;
+
+ check_changed(gbpsy, prop);
+
+ return 0;
+}
+
+static int __gb_power_supply_property_get(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct gb_power_supply_prop *prop;
+
+ prop = get_psy_prop(gbpsy, psp);
+ if (!prop)
+ return -EINVAL;
+
+ val->intval = prop->val;
+ return 0;
+}
+
+static int __gb_power_supply_property_strval_get(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = gbpsy->model_name;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = gbpsy->manufacturer;
+ break;
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ val->strval = gbpsy->serial_number;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int _gb_power_supply_property_get(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ int ret;
+
+ /*
+ * Properties of type const char *, were already fetched on
+ * get_description operation and should be cached in gb
+ */
+ if (is_prop_valint(psp))
+ ret = __gb_power_supply_property_get(gbpsy, psp, val);
+ else
+ ret = __gb_power_supply_property_strval_get(gbpsy, psp, val);
+
+ if (ret < 0)
+ dev_err(&connection->bundle->dev, "get property %u\n", psp);
+
+ return 0;
+}
+
+static int is_cache_valid(struct gb_power_supply *gbpsy)
+{
+ /* check if cache is good enough or it has expired */
+ if (gbpsy->cache_invalid) {
+ gbpsy->cache_invalid = 0;
+ return 0;
+ }
+
+ if (gbpsy->last_update &&
+ time_is_after_jiffies(gbpsy->last_update +
+ msecs_to_jiffies(cache_time)))
+ return 1;
+
+ return 0;
+}
+
+static int gb_power_supply_status_get(struct gb_power_supply *gbpsy)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ int ret = 0;
+ int i;
+
+ if (is_cache_valid(gbpsy))
+ return 0;
+
+ ret = gb_pm_runtime_get_sync(connection->bundle);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < gbpsy->properties_count; i++) {
+ ret = __gb_power_supply_property_update(gbpsy,
+ gbpsy->props[i].prop);
+ if (ret < 0)
+ break;
+ }
+
+ if (ret == 0)
+ gbpsy->last_update = jiffies;
+
+ gb_pm_runtime_put_autosuspend(connection->bundle);
+ return ret;
+}
+
+static void gb_power_supply_status_update(struct gb_power_supply *gbpsy)
+{
+ /* check if there a change that need to be reported */
+ gb_power_supply_status_get(gbpsy);
+
+ if (!gbpsy->changed)
+ return;
+
+ gbpsy->update_interval = update_interval_init;
+ __gb_power_supply_changed(gbpsy);
+ gbpsy->changed = false;
+}
+
+static void gb_power_supply_work(struct work_struct *work)
+{
+ struct gb_power_supply *gbpsy = container_of(work,
+ struct gb_power_supply,
+ work.work);
+
+ /*
+ * if the poll interval is not set, disable polling, this is helpful
+ * specially at unregister time.
+ */
+ if (!gbpsy->update_interval)
+ return;
+
+ gb_power_supply_status_update(gbpsy);
+ next_interval(gbpsy);
+ schedule_delayed_work(&gbpsy->work, gbpsy->update_interval);
+}
+
+static int get_property(struct power_supply *b,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct gb_power_supply *gbpsy = to_gb_power_supply(b);
+
+ gb_power_supply_status_get(gbpsy);
+
+ return _gb_power_supply_property_get(gbpsy, psp, val);
+}
+
+static int gb_power_supply_property_set(struct gb_power_supply *gbpsy,
+ enum power_supply_property psp,
+ int val)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ struct gb_power_supply_prop *prop;
+ struct gb_power_supply_set_property_request req;
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(connection->bundle);
+ if (ret)
+ return ret;
+
+ prop = get_psy_prop(gbpsy, psp);
+ if (!prop) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ req.psy_id = gbpsy->id;
+ req.property = prop->gb_prop;
+ req.prop_val = cpu_to_le32((s32)val);
+
+ ret = gb_operation_sync(connection, GB_POWER_SUPPLY_TYPE_SET_PROPERTY,
+ &req, sizeof(req), NULL, 0);
+ if (ret < 0)
+ goto out;
+
+ /* cache immediately the new value */
+ prop->val = val;
+
+out:
+ gb_pm_runtime_put_autosuspend(connection->bundle);
+ return ret;
+}
+
+static int set_property(struct power_supply *b,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct gb_power_supply *gbpsy = to_gb_power_supply(b);
+
+ return gb_power_supply_property_set(gbpsy, psp, val->intval);
+}
+
+static int property_is_writeable(struct power_supply *b,
+ enum power_supply_property psp)
+{
+ struct gb_power_supply *gbpsy = to_gb_power_supply(b);
+
+ return is_psy_prop_writeable(gbpsy, psp);
+}
+
+static int gb_power_supply_register(struct gb_power_supply *gbpsy)
+{
+ struct gb_connection *connection = get_conn_from_psy(gbpsy);
+ struct power_supply_config cfg = {};
+
+ cfg.drv_data = gbpsy;
+
+ gbpsy->desc.name = gbpsy->name;
+ gbpsy->desc.type = gbpsy->type;
+ gbpsy->desc.properties = gbpsy->props_raw;
+ gbpsy->desc.num_properties = total_props(gbpsy);
+ gbpsy->desc.get_property = get_property;
+ gbpsy->desc.set_property = set_property;
+ gbpsy->desc.property_is_writeable = property_is_writeable;
+
+ gbpsy->psy = power_supply_register(&connection->bundle->dev,
+ &gbpsy->desc, &cfg);
+ return PTR_ERR_OR_ZERO(gbpsy->psy);
+}
+
+static void _gb_power_supply_free(struct gb_power_supply *gbpsy)
+{
+ kfree(gbpsy->serial_number);
+ kfree(gbpsy->model_name);
+ kfree(gbpsy->manufacturer);
+ kfree(gbpsy->props_raw);
+ kfree(gbpsy->props);
+}
+
+static void _gb_power_supply_release(struct gb_power_supply *gbpsy)
+{
+ gbpsy->update_interval = 0;
+
+ cancel_delayed_work_sync(&gbpsy->work);
+
+ if (gbpsy->registered)
+ power_supply_unregister(gbpsy->psy);
+
+ _gb_power_supply_free(gbpsy);
+}
+
+static void _gb_power_supplies_release(struct gb_power_supplies *supplies)
+{
+ int i;
+
+ if (!supplies->supply)
+ return;
+
+ mutex_lock(&supplies->supplies_lock);
+ for (i = 0; i < supplies->supplies_count; i++)
+ _gb_power_supply_release(&supplies->supply[i]);
+ kfree(supplies->supply);
+ mutex_unlock(&supplies->supplies_lock);
+ kfree(supplies);
+}
+
+static int gb_power_supplies_get_count(struct gb_power_supplies *supplies)
+{
+ struct gb_power_supply_get_supplies_response resp;
+ int ret;
+
+ ret = gb_operation_sync(supplies->connection,
+ GB_POWER_SUPPLY_TYPE_GET_SUPPLIES,
+ NULL, 0, &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ if (!resp.supplies_count)
+ return -EINVAL;
+
+ supplies->supplies_count = resp.supplies_count;
+
+ return ret;
+}
+
+static int gb_power_supply_config(struct gb_power_supplies *supplies, int id)
+{
+ struct gb_power_supply *gbpsy = &supplies->supply[id];
+ int ret;
+
+ gbpsy->supplies = supplies;
+ gbpsy->id = id;
+
+ ret = gb_power_supply_description_get(gbpsy);
+ if (ret < 0)
+ return ret;
+
+ return gb_power_supply_prop_descriptors_get(gbpsy);
+}
+
+static int gb_power_supply_enable(struct gb_power_supply *gbpsy)
+{
+ int ret;
+
+ /* guarantee that we have an unique name, before register */
+ ret = __gb_power_supply_set_name(gbpsy->model_name, gbpsy->name,
+ sizeof(gbpsy->name));
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&gbpsy->supply_lock);
+
+ ret = gb_power_supply_register(gbpsy);
+ if (ret < 0)
+ return ret;
+
+ gbpsy->update_interval = update_interval_init;
+ INIT_DELAYED_WORK(&gbpsy->work, gb_power_supply_work);
+ schedule_delayed_work(&gbpsy->work, 0);
+
+ /* everything went fine, mark it for release code to know */
+ gbpsy->registered = true;
+
+ return 0;
+}
+
+static int gb_power_supplies_setup(struct gb_power_supplies *supplies)
+{
+ struct gb_connection *connection = supplies->connection;
+ int ret;
+ int i;
+
+ mutex_lock(&supplies->supplies_lock);
+
+ ret = gb_power_supplies_get_count(supplies);
+ if (ret < 0)
+ goto out;
+
+ supplies->supply = kzalloc(supplies->supplies_count *
+ sizeof(struct gb_power_supply),
+ GFP_KERNEL);
+
+ if (!supplies->supply) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < supplies->supplies_count; i++) {
+ ret = gb_power_supply_config(supplies, i);
+ if (ret < 0) {
+ dev_err(&connection->bundle->dev,
+ "Fail to configure supplies devices\n");
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&supplies->supplies_lock);
+ return ret;
+}
+
+static int gb_power_supplies_register(struct gb_power_supplies *supplies)
+{
+ struct gb_connection *connection = supplies->connection;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&supplies->supplies_lock);
+
+ for (i = 0; i < supplies->supplies_count; i++) {
+ ret = gb_power_supply_enable(&supplies->supply[i]);
+ if (ret < 0) {
+ dev_err(&connection->bundle->dev,
+ "Fail to enable supplies devices\n");
+ break;
+ }
+ }
+
+ mutex_unlock(&supplies->supplies_lock);
+ return ret;
+}
+
+static int gb_supplies_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_power_supplies *supplies = gb_connection_get_data(connection);
+ struct gb_power_supply *gbpsy;
+ struct gb_message *request;
+ struct gb_power_supply_event_request *payload;
+ u8 psy_id;
+ u8 event;
+ int ret = 0;
+
+ if (op->type != GB_POWER_SUPPLY_TYPE_EVENT) {
+ dev_err(&connection->bundle->dev,
+ "Unsupported unsolicited event: %u\n", op->type);
+ return -EINVAL;
+ }
+
+ request = op->request;
+
+ if (request->payload_size < sizeof(*payload)) {
+ dev_err(&connection->bundle->dev,
+ "Wrong event size received (%zu < %zu)\n",
+ request->payload_size, sizeof(*payload));
+ return -EINVAL;
+ }
+
+ payload = request->payload;
+ psy_id = payload->psy_id;
+ mutex_lock(&supplies->supplies_lock);
+ if (psy_id >= supplies->supplies_count ||
+ !supplies->supply[psy_id].registered) {
+ dev_err(&connection->bundle->dev,
+ "Event received for unconfigured power_supply id: %d\n",
+ psy_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ event = payload->event;
+ /*
+ * we will only handle events after setup is done and before release is
+ * running. For that just check update_interval.
+ */
+ gbpsy = &supplies->supply[psy_id];
+ if (!gbpsy->update_interval) {
+ ret = -ESHUTDOWN;
+ goto out_unlock;
+ }
+
+ if (event & GB_POWER_SUPPLY_UPDATE) {
+ /*
+ * we need to make sure we invalidate cache, if not no new
+ * values for the properties will be fetch and the all propose
+ * of this event is missed
+ */
+ gbpsy->cache_invalid = 1;
+ gb_power_supply_status_update(gbpsy);
+ }
+
+out_unlock:
+ mutex_unlock(&supplies->supplies_lock);
+ return ret;
+}
+
+static int gb_power_supply_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_power_supplies *supplies;
+ int ret;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_POWER_SUPPLY)
+ return -ENODEV;
+
+ supplies = kzalloc(sizeof(*supplies), GFP_KERNEL);
+ if (!supplies)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_supplies_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto out;
+ }
+
+ supplies->connection = connection;
+ gb_connection_set_data(connection, supplies);
+
+ mutex_init(&supplies->supplies_lock);
+
+ greybus_set_drvdata(bundle, supplies);
+
+ /* We aren't ready to receive an incoming request yet */
+ ret = gb_connection_enable_tx(connection);
+ if (ret)
+ goto error_connection_destroy;
+
+ ret = gb_power_supplies_setup(supplies);
+ if (ret < 0)
+ goto error_connection_disable;
+
+ /* We are ready to receive an incoming request now, enable RX as well */
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto error_connection_disable;
+
+ ret = gb_power_supplies_register(supplies);
+ if (ret < 0)
+ goto error_connection_disable;
+
+ gb_pm_runtime_put_autosuspend(bundle);
+ return 0;
+
+error_connection_disable:
+ gb_connection_disable(connection);
+error_connection_destroy:
+ gb_connection_destroy(connection);
+out:
+ _gb_power_supplies_release(supplies);
+ return ret;
+}
+
+static void gb_power_supply_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_power_supplies *supplies = greybus_get_drvdata(bundle);
+
+ gb_connection_disable(supplies->connection);
+ gb_connection_destroy(supplies->connection);
+
+ _gb_power_supplies_release(supplies);
+}
+
+static const struct greybus_bundle_id gb_power_supply_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_POWER_SUPPLY) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_power_supply_id_table);
+
+static struct greybus_driver gb_power_supply_driver = {
+ .name = "power_supply",
+ .probe = gb_power_supply_probe,
+ .disconnect = gb_power_supply_disconnect,
+ .id_table = gb_power_supply_id_table,
+};
+module_greybus_driver(gb_power_supply_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
new file mode 100644
index 000000000000..c4bf3298ba07
--- /dev/null
+++ b/drivers/staging/greybus/pwm.c
@@ -0,0 +1,338 @@
+/*
+ * PWM Greybus driver.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pwm.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+struct gb_pwm_chip {
+ struct gb_connection *connection;
+ u8 pwm_max; /* max pwm number */
+
+ struct pwm_chip chip;
+ struct pwm_chip *pwm;
+};
+#define pwm_chip_to_gb_pwm_chip(chip) \
+ container_of(chip, struct gb_pwm_chip, chip)
+
+
+static int gb_pwm_count_operation(struct gb_pwm_chip *pwmc)
+{
+ struct gb_pwm_count_response response;
+ int ret;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_PWM_COUNT,
+ NULL, 0, &response, sizeof(response));
+ if (ret)
+ return ret;
+ pwmc->pwm_max = response.count;
+ return 0;
+}
+
+static int gb_pwm_activate_operation(struct gb_pwm_chip *pwmc,
+ u8 which)
+{
+ struct gb_pwm_activate_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ if (which > pwmc->pwm_max)
+ return -EINVAL;
+
+ request.which = which;
+
+ gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_ACTIVATE,
+ &request, sizeof(request), NULL, 0);
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_deactivate_operation(struct gb_pwm_chip *pwmc,
+ u8 which)
+{
+ struct gb_pwm_deactivate_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ if (which > pwmc->pwm_max)
+ return -EINVAL;
+
+ request.which = which;
+
+ gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DEACTIVATE,
+ &request, sizeof(request), NULL, 0);
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_config_operation(struct gb_pwm_chip *pwmc,
+ u8 which, u32 duty, u32 period)
+{
+ struct gb_pwm_config_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ if (which > pwmc->pwm_max)
+ return -EINVAL;
+
+ request.which = which;
+ request.duty = cpu_to_le32(duty);
+ request.period = cpu_to_le32(period);
+
+ gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_CONFIG,
+ &request, sizeof(request), NULL, 0);
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_set_polarity_operation(struct gb_pwm_chip *pwmc,
+ u8 which, u8 polarity)
+{
+ struct gb_pwm_polarity_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ if (which > pwmc->pwm_max)
+ return -EINVAL;
+
+ request.which = which;
+ request.polarity = polarity;
+
+ gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_POLARITY,
+ &request, sizeof(request), NULL, 0);
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_enable_operation(struct gb_pwm_chip *pwmc,
+ u8 which)
+{
+ struct gb_pwm_enable_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ if (which > pwmc->pwm_max)
+ return -EINVAL;
+
+ request.which = which;
+
+ gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_ENABLE,
+ &request, sizeof(request), NULL, 0);
+ if (ret)
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_disable_operation(struct gb_pwm_chip *pwmc,
+ u8 which)
+{
+ struct gb_pwm_disable_request request;
+ struct gbphy_device *gbphy_dev;
+ int ret;
+
+ if (which > pwmc->pwm_max)
+ return -EINVAL;
+
+ request.which = which;
+
+ ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DISABLE,
+ &request, sizeof(request), NULL, 0);
+
+ gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+}
+
+static int gb_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+ return gb_pwm_activate_operation(pwmc, pwm->hwpwm);
+};
+
+static void gb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+ if (pwm_is_enabled(pwm))
+ dev_warn(chip->dev, "freeing PWM device without disabling\n");
+
+ gb_pwm_deactivate_operation(pwmc, pwm->hwpwm);
+}
+
+static int gb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+ return gb_pwm_config_operation(pwmc, pwm->hwpwm, duty_ns, period_ns);
+};
+
+static int gb_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+ return gb_pwm_set_polarity_operation(pwmc, pwm->hwpwm, polarity);
+};
+
+static int gb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+ return gb_pwm_enable_operation(pwmc, pwm->hwpwm);
+};
+
+static void gb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
+
+ gb_pwm_disable_operation(pwmc, pwm->hwpwm);
+};
+
+static const struct pwm_ops gb_pwm_ops = {
+ .request = gb_pwm_request,
+ .free = gb_pwm_free,
+ .config = gb_pwm_config,
+ .set_polarity = gb_pwm_set_polarity,
+ .enable = gb_pwm_enable,
+ .disable = gb_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ struct gb_pwm_chip *pwmc;
+ struct pwm_chip *pwm;
+ int ret;
+
+ pwmc = kzalloc(sizeof(*pwmc), GFP_KERNEL);
+ if (!pwmc)
+ return -ENOMEM;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ NULL);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto exit_pwmc_free;
+ }
+
+ pwmc->connection = connection;
+ gb_connection_set_data(connection, pwmc);
+ gb_gbphy_set_data(gbphy_dev, pwmc);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto exit_connection_destroy;
+
+ /* Query number of pwms present */
+ ret = gb_pwm_count_operation(pwmc);
+ if (ret)
+ goto exit_connection_disable;
+
+ pwm = &pwmc->chip;
+
+ pwm->dev = &gbphy_dev->dev;
+ pwm->ops = &gb_pwm_ops;
+ pwm->base = -1; /* Allocate base dynamically */
+ pwm->npwm = pwmc->pwm_max + 1;
+ pwm->can_sleep = true; /* FIXME */
+
+ ret = pwmchip_add(pwm);
+ if (ret) {
+ dev_err(&gbphy_dev->dev,
+ "failed to register PWM: %d\n", ret);
+ goto exit_connection_disable;
+ }
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return 0;
+
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+exit_pwmc_free:
+ kfree(pwmc);
+ return ret;
+}
+
+static void gb_pwm_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_pwm_chip *pwmc = gb_gbphy_get_data(gbphy_dev);
+ struct gb_connection *connection = pwmc->connection;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ pwmchip_remove(&pwmc->chip);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+ kfree(pwmc);
+}
+
+static const struct gbphy_device_id gb_pwm_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_PWM) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_pwm_id_table);
+
+static struct gbphy_driver pwm_driver = {
+ .name = "pwm",
+ .probe = gb_pwm_probe,
+ .remove = gb_pwm_remove,
+ .id_table = gb_pwm_id_table,
+};
+
+module_gbphy_driver(pwm_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
new file mode 100644
index 000000000000..729d25811568
--- /dev/null
+++ b/drivers/staging/greybus/raw.c
@@ -0,0 +1,381 @@
+/*
+ * Greybus driver for the Raw protocol
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/uaccess.h>
+
+#include "greybus.h"
+
+struct gb_raw {
+ struct gb_connection *connection;
+
+ struct list_head list;
+ int list_data;
+ struct mutex list_lock;
+ dev_t dev;
+ struct cdev cdev;
+ struct device *device;
+};
+
+struct raw_data {
+ struct list_head entry;
+ u32 len;
+ u8 data[0];
+};
+
+static struct class *raw_class;
+static int raw_major;
+static const struct file_operations raw_fops;
+static DEFINE_IDA(minors);
+
+/* Number of minor devices this driver supports */
+#define NUM_MINORS 256
+
+/* Maximum size of any one send data buffer we support */
+#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
+
+/*
+ * Maximum size of the data in the receive buffer we allow before we start to
+ * drop messages on the floor
+ */
+#define MAX_DATA_SIZE (MAX_PACKET_SIZE * 8)
+
+/*
+ * Add the raw data message to the list of received messages.
+ */
+static int receive_data(struct gb_raw *raw, u32 len, u8 *data)
+{
+ struct raw_data *raw_data;
+ struct device *dev = &raw->connection->bundle->dev;
+ int retval = 0;
+
+ if (len > MAX_PACKET_SIZE) {
+ dev_err(dev, "Too big of a data packet, rejected\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&raw->list_lock);
+ if ((raw->list_data + len) > MAX_DATA_SIZE) {
+ dev_err(dev, "Too much data in receive buffer, now dropping packets\n");
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ raw_data = kmalloc(sizeof(*raw_data) + len, GFP_KERNEL);
+ if (!raw_data) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ raw->list_data += len;
+ raw_data->len = len;
+ memcpy(&raw_data->data[0], data, len);
+
+ list_add_tail(&raw_data->entry, &raw->list);
+exit:
+ mutex_unlock(&raw->list_lock);
+ return retval;
+}
+
+static int gb_raw_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct device *dev = &connection->bundle->dev;
+ struct gb_raw *raw = greybus_get_drvdata(connection->bundle);
+ struct gb_raw_send_request *receive;
+ u32 len;
+
+ if (op->type != GB_RAW_TYPE_SEND) {
+ dev_err(dev, "unknown request type 0x%02x\n", op->type);
+ return -EINVAL;
+ }
+
+ /* Verify size of payload */
+ if (op->request->payload_size < sizeof(*receive)) {
+ dev_err(dev, "raw receive request too small (%zu < %zu)\n",
+ op->request->payload_size, sizeof(*receive));
+ return -EINVAL;
+ }
+ receive = op->request->payload;
+ len = le32_to_cpu(receive->len);
+ if (len != (int)(op->request->payload_size - sizeof(__le32))) {
+ dev_err(dev, "raw receive request wrong size %d vs %d\n", len,
+ (int)(op->request->payload_size - sizeof(__le32)));
+ return -EINVAL;
+ }
+ if (len == 0) {
+ dev_err(dev, "raw receive request of 0 bytes?\n");
+ return -EINVAL;
+ }
+
+ return receive_data(raw, len, receive->data);
+}
+
+static int gb_raw_send(struct gb_raw *raw, u32 len, const char __user *data)
+{
+ struct gb_connection *connection = raw->connection;
+ struct gb_raw_send_request *request;
+ int retval;
+
+ request = kmalloc(len + sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ if (copy_from_user(&request->data[0], data, len)) {
+ kfree(request);
+ return -EFAULT;
+ }
+
+ request->len = cpu_to_le32(len);
+
+ retval = gb_operation_sync(connection, GB_RAW_TYPE_SEND,
+ request, len + sizeof(*request),
+ NULL, 0);
+
+ kfree(request);
+ return retval;
+}
+
+static int gb_raw_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_raw *raw;
+ int retval;
+ int minor;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_RAW)
+ return -ENODEV;
+
+ raw = kzalloc(sizeof(*raw), GFP_KERNEL);
+ if (!raw)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ gb_raw_request_handler);
+ if (IS_ERR(connection)) {
+ retval = PTR_ERR(connection);
+ goto error_free;
+ }
+
+ INIT_LIST_HEAD(&raw->list);
+ mutex_init(&raw->list_lock);
+
+ raw->connection = connection;
+ greybus_set_drvdata(bundle, raw);
+
+ minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
+ if (minor < 0) {
+ retval = minor;
+ goto error_connection_destroy;
+ }
+
+ raw->dev = MKDEV(raw_major, minor);
+ cdev_init(&raw->cdev, &raw_fops);
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto error_remove_ida;
+
+ retval = cdev_add(&raw->cdev, raw->dev, 1);
+ if (retval)
+ goto error_connection_disable;
+
+ raw->device = device_create(raw_class, &connection->bundle->dev,
+ raw->dev, raw, "gb!raw%d", minor);
+ if (IS_ERR(raw->device)) {
+ retval = PTR_ERR(raw->device);
+ goto error_del_cdev;
+ }
+
+ return 0;
+
+error_del_cdev:
+ cdev_del(&raw->cdev);
+
+error_connection_disable:
+ gb_connection_disable(connection);
+
+error_remove_ida:
+ ida_simple_remove(&minors, minor);
+
+error_connection_destroy:
+ gb_connection_destroy(connection);
+
+error_free:
+ kfree(raw);
+ return retval;
+}
+
+static void gb_raw_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_raw *raw = greybus_get_drvdata(bundle);
+ struct gb_connection *connection = raw->connection;
+ struct raw_data *raw_data;
+ struct raw_data *temp;
+
+ // FIXME - handle removing a connection when the char device node is open.
+ device_destroy(raw_class, raw->dev);
+ cdev_del(&raw->cdev);
+ gb_connection_disable(connection);
+ ida_simple_remove(&minors, MINOR(raw->dev));
+ gb_connection_destroy(connection);
+
+ mutex_lock(&raw->list_lock);
+ list_for_each_entry_safe(raw_data, temp, &raw->list, entry) {
+ list_del(&raw_data->entry);
+ kfree(raw_data);
+ }
+ mutex_unlock(&raw->list_lock);
+
+ kfree(raw);
+}
+
+/*
+ * Character device node interfaces.
+ *
+ * Note, we are using read/write to only allow a single read/write per message.
+ * This means for read(), you have to provide a big enough buffer for the full
+ * message to be copied into. If the buffer isn't big enough, the read() will
+ * fail with -ENOSPC.
+ */
+
+static int raw_open(struct inode *inode, struct file *file)
+{
+ struct cdev *cdev = inode->i_cdev;
+ struct gb_raw *raw = container_of(cdev, struct gb_raw, cdev);
+
+ file->private_data = raw;
+ return 0;
+}
+
+static ssize_t raw_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gb_raw *raw = file->private_data;
+ int retval;
+
+ if (!count)
+ return 0;
+
+ if (count > MAX_PACKET_SIZE)
+ return -E2BIG;
+
+ retval = gb_raw_send(raw, count, buf);
+ if (retval)
+ return retval;
+
+ return count;
+}
+
+static ssize_t raw_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct gb_raw *raw = file->private_data;
+ int retval = 0;
+ struct raw_data *raw_data;
+
+ mutex_lock(&raw->list_lock);
+ if (list_empty(&raw->list))
+ goto exit;
+
+ raw_data = list_first_entry(&raw->list, struct raw_data, entry);
+ if (raw_data->len > count) {
+ retval = -ENOSPC;
+ goto exit;
+ }
+
+ if (copy_to_user(buf, &raw_data->data[0], raw_data->len)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ list_del(&raw_data->entry);
+ raw->list_data -= raw_data->len;
+ retval = raw_data->len;
+ kfree(raw_data);
+
+exit:
+ mutex_unlock(&raw->list_lock);
+ return retval;
+}
+
+static const struct file_operations raw_fops = {
+ .owner = THIS_MODULE,
+ .write = raw_write,
+ .read = raw_read,
+ .open = raw_open,
+ .llseek = noop_llseek,
+};
+
+static const struct greybus_bundle_id gb_raw_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_RAW) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_raw_id_table);
+
+static struct greybus_driver gb_raw_driver = {
+ .name = "raw",
+ .probe = gb_raw_probe,
+ .disconnect = gb_raw_disconnect,
+ .id_table = gb_raw_id_table,
+};
+
+static int raw_init(void)
+{
+ dev_t dev;
+ int retval;
+
+ raw_class = class_create(THIS_MODULE, "gb_raw");
+ if (IS_ERR(raw_class)) {
+ retval = PTR_ERR(raw_class);
+ goto error_class;
+ }
+
+ retval = alloc_chrdev_region(&dev, 0, NUM_MINORS, "gb_raw");
+ if (retval < 0)
+ goto error_chrdev;
+
+ raw_major = MAJOR(dev);
+
+ retval = greybus_register(&gb_raw_driver);
+ if (retval)
+ goto error_gb;
+
+ return 0;
+
+error_gb:
+ unregister_chrdev_region(dev, NUM_MINORS);
+error_chrdev:
+ class_destroy(raw_class);
+error_class:
+ return retval;
+}
+module_init(raw_init);
+
+static void __exit raw_exit(void)
+{
+ greybus_deregister(&gb_raw_driver);
+ unregister_chrdev_region(MKDEV(raw_major, 0), NUM_MINORS);
+ class_destroy(raw_class);
+ ida_destroy(&minors);
+}
+module_exit(raw_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
new file mode 100644
index 000000000000..5649ef1e379d
--- /dev/null
+++ b/drivers/staging/greybus/sdio.c
@@ -0,0 +1,884 @@
+/*
+ * SD/MMC Greybus driver.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+struct gb_sdio_host {
+ struct gb_connection *connection;
+ struct gbphy_device *gbphy_dev;
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mutex lock; /* lock for this host */
+ size_t data_max;
+ spinlock_t xfer; /* lock to cancel ongoing transfer */
+ bool xfer_stop;
+ struct workqueue_struct *mrq_workqueue;
+ struct work_struct mrqwork;
+ u8 queued_events;
+ bool removed;
+ bool card_present;
+ bool read_only;
+};
+
+
+#define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
+ GB_SDIO_RSP_OPCODE)
+#define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
+#define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
+ GB_SDIO_RSP_136)
+#define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
+ GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
+
+/* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
+#define GB_SDIO_VDD_SHIFT 8
+
+#ifndef MMC_CAP2_CORE_RUNTIME_PM
+#define MMC_CAP2_CORE_RUNTIME_PM 0
+#endif
+
+static inline bool single_op(struct mmc_command *cmd)
+{
+ uint32_t opcode = cmd->opcode;
+
+ return opcode == MMC_WRITE_BLOCK ||
+ opcode == MMC_READ_SINGLE_BLOCK;
+}
+
+static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
+{
+ u32 caps = 0;
+ u32 caps2 = 0;
+
+ caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
+ ((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
+ ((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
+ ((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
+ ((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
+ ((r & GB_SDIO_CAP_ERASE) ? MMC_CAP_ERASE : 0) |
+ ((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
+ ((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
+ ((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
+ ((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
+ ((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
+ ((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
+ ((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
+ ((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
+ ((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
+ ((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
+ ((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
+
+ caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
+ ((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
+ ((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
+ ((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
+
+ host->mmc->caps = caps;
+ host->mmc->caps2 = caps2 | MMC_CAP2_CORE_RUNTIME_PM;
+
+ if (caps & MMC_CAP_NONREMOVABLE)
+ host->card_present = true;
+}
+
+static u32 _gb_sdio_get_host_ocr(u32 ocr)
+{
+ return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
+ ((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
+ ((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
+ ((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
+ ((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
+ ((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
+ ((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
+ ((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
+ ((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
+ ((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
+ ((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
+ ((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
+ ((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
+ ((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
+ ((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
+ ((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
+ ((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
+ );
+}
+
+static int gb_sdio_get_caps(struct gb_sdio_host *host)
+{
+ struct gb_sdio_get_caps_response response;
+ struct mmc_host *mmc = host->mmc;
+ u16 data_max;
+ u32 blksz;
+ u32 ocr;
+ u32 r;
+ int ret;
+
+ ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
+ NULL, 0, &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+ r = le32_to_cpu(response.caps);
+
+ _gb_sdio_set_host_caps(host, r);
+
+ /* get the max block size that could fit our payload */
+ data_max = gb_operation_get_payload_size_max(host->connection);
+ data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
+ data_max - sizeof(struct gb_sdio_transfer_response));
+
+ blksz = min_t(u16, le16_to_cpu(response.max_blk_size), data_max);
+ blksz = max_t(u32, 512, blksz);
+
+ mmc->max_blk_size = rounddown_pow_of_two(blksz);
+ mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
+ host->data_max = data_max;
+
+ /* get ocr supported values */
+ ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
+ mmc->ocr_avail = ocr;
+ mmc->ocr_avail_sdio = mmc->ocr_avail;
+ mmc->ocr_avail_sd = mmc->ocr_avail;
+ mmc->ocr_avail_mmc = mmc->ocr_avail;
+
+ /* get frequency range values */
+ mmc->f_min = le32_to_cpu(response.f_min);
+ mmc->f_max = le32_to_cpu(response.f_max);
+
+ return 0;
+}
+
+static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
+{
+ if (event & GB_SDIO_CARD_INSERTED)
+ host->queued_events &= ~GB_SDIO_CARD_REMOVED;
+ else if (event & GB_SDIO_CARD_REMOVED)
+ host->queued_events &= ~GB_SDIO_CARD_INSERTED;
+
+ host->queued_events |= event;
+}
+
+static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
+{
+ u8 state_changed = 0;
+
+ if (event & GB_SDIO_CARD_INSERTED) {
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ return 0;
+ if (host->card_present)
+ return 0;
+ host->card_present = true;
+ state_changed = 1;
+ }
+
+ if (event & GB_SDIO_CARD_REMOVED) {
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ return 0;
+ if (!(host->card_present))
+ return 0;
+ host->card_present = false;
+ state_changed = 1;
+ }
+
+ if (event & GB_SDIO_WP) {
+ host->read_only = true;
+ }
+
+ if (state_changed) {
+ dev_info(mmc_dev(host->mmc), "card %s now event\n",
+ (host->card_present ? "inserted" : "removed"));
+ mmc_detect_change(host->mmc, 0);
+ }
+
+ return 0;
+}
+
+static int gb_sdio_request_handler(struct gb_operation *op)
+{
+ struct gb_sdio_host *host = gb_connection_get_data(op->connection);
+ struct gb_message *request;
+ struct gb_sdio_event_request *payload;
+ u8 type = op->type;
+ int ret = 0;
+ u8 event;
+
+ if (type != GB_SDIO_TYPE_EVENT) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported unsolicited event: %u\n", type);
+ return -EINVAL;
+ }
+
+ request = op->request;
+
+ if (request->payload_size < sizeof(*payload)) {
+ dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
+ request->payload_size, sizeof(*payload));
+ return -EINVAL;
+ }
+
+ payload = request->payload;
+ event = payload->event;
+
+ if (host->removed)
+ _gb_queue_event(host, event);
+ else
+ ret = _gb_sdio_process_events(host, event);
+
+ return ret;
+}
+
+static int gb_sdio_set_ios(struct gb_sdio_host *host,
+ struct gb_sdio_set_ios_request *request)
+{
+ int ret;
+
+ ret = gbphy_runtime_get_sync(host->gbphy_dev);
+ if (ret)
+ return ret;
+
+ ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS, request,
+ sizeof(*request), NULL, 0);
+
+ gbphy_runtime_put_autosuspend(host->gbphy_dev);
+
+ return ret;
+}
+
+static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
+ size_t len, u16 nblocks, off_t skip)
+{
+ struct gb_sdio_transfer_request *request;
+ struct gb_sdio_transfer_response *response;
+ struct gb_operation *operation;
+ struct scatterlist *sg = data->sg;
+ unsigned int sg_len = data->sg_len;
+ size_t copied;
+ u16 send_blksz;
+ u16 send_blocks;
+ int ret;
+
+ WARN_ON(len > host->data_max);
+
+ operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
+ len + sizeof(*request),
+ sizeof(*response), GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ request = operation->request->payload;
+ request->data_flags = (data->flags >> 8);
+ request->data_blocks = cpu_to_le16(nblocks);
+ request->data_blksz = cpu_to_le16(data->blksz);
+
+ copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
+
+ if (copied != len) {
+ ret = -EINVAL;
+ goto err_put_operation;
+ }
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret < 0)
+ goto err_put_operation;
+
+ response = operation->response->payload;
+
+ send_blocks = le16_to_cpu(response->data_blocks);
+ send_blksz = le16_to_cpu(response->data_blksz);
+
+ if (len != send_blksz * send_blocks) {
+ dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
+ len, send_blksz * send_blocks);
+ ret = -EINVAL;
+ }
+
+err_put_operation:
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
+ size_t len, u16 nblocks, off_t skip)
+{
+ struct gb_sdio_transfer_request *request;
+ struct gb_sdio_transfer_response *response;
+ struct gb_operation *operation;
+ struct scatterlist *sg = data->sg;
+ unsigned int sg_len = data->sg_len;
+ size_t copied;
+ u16 recv_blksz;
+ u16 recv_blocks;
+ int ret;
+
+ WARN_ON(len > host->data_max);
+
+ operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
+ sizeof(*request),
+ len + sizeof(*response), GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ request = operation->request->payload;
+ request->data_flags = (data->flags >> 8);
+ request->data_blocks = cpu_to_le16(nblocks);
+ request->data_blksz = cpu_to_le16(data->blksz);
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret < 0)
+ goto err_put_operation;
+
+ response = operation->response->payload;
+ recv_blocks = le16_to_cpu(response->data_blocks);
+ recv_blksz = le16_to_cpu(response->data_blksz);
+
+ if (len != recv_blksz * recv_blocks) {
+ dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
+ recv_blksz * recv_blocks, len);
+ ret = -EINVAL;
+ goto err_put_operation;
+ }
+
+ copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
+ skip);
+ if (copied != len)
+ ret = -EINVAL;
+
+err_put_operation:
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
+{
+ size_t left, len;
+ off_t skip = 0;
+ int ret = 0;
+ u16 nblocks;
+
+ if (single_op(data->mrq->cmd) && data->blocks > 1) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ left = data->blksz * data->blocks;
+
+ while (left) {
+ /* check is a stop transmission is pending */
+ spin_lock(&host->xfer);
+ if (host->xfer_stop) {
+ host->xfer_stop = false;
+ spin_unlock(&host->xfer);
+ ret = -EINTR;
+ goto out;
+ }
+ spin_unlock(&host->xfer);
+ len = min(left, host->data_max);
+ nblocks = len / data->blksz;
+ len = nblocks * data->blksz;
+
+ if (data->flags & MMC_DATA_READ) {
+ ret = _gb_sdio_recv(host, data, len, nblocks, skip);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = _gb_sdio_send(host, data, len, nblocks, skip);
+ if (ret < 0)
+ goto out;
+ }
+ data->bytes_xfered += len;
+ left -= len;
+ skip += len;
+ }
+
+out:
+ data->error = ret;
+ return ret;
+}
+
+static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
+{
+ struct gb_sdio_command_request request = {0};
+ struct gb_sdio_command_response response;
+ struct mmc_data *data = host->mrq->data;
+ u8 cmd_flags;
+ u8 cmd_type;
+ int i;
+ int ret;
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ cmd_flags = GB_SDIO_RSP_NONE;
+ break;
+ case MMC_RSP_R1:
+ cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
+ break;
+ case MMC_RSP_R1B:
+ cmd_flags = GB_SDIO_RSP_R1B;
+ break;
+ case MMC_RSP_R2:
+ cmd_flags = GB_SDIO_RSP_R2;
+ break;
+ case MMC_RSP_R3:
+ cmd_flags = GB_SDIO_RSP_R3_R4;
+ break;
+ default:
+ dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
+ mmc_resp_type(cmd));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (mmc_cmd_type(cmd)) {
+ case MMC_CMD_BC:
+ cmd_type = GB_SDIO_CMD_BC;
+ break;
+ case MMC_CMD_BCR:
+ cmd_type = GB_SDIO_CMD_BCR;
+ break;
+ case MMC_CMD_AC:
+ cmd_type = GB_SDIO_CMD_AC;
+ break;
+ case MMC_CMD_ADTC:
+ cmd_type = GB_SDIO_CMD_ADTC;
+ break;
+ default:
+ dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
+ mmc_cmd_type(cmd));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ request.cmd = cmd->opcode;
+ request.cmd_flags = cmd_flags;
+ request.cmd_type = cmd_type;
+ request.cmd_arg = cpu_to_le32(cmd->arg);
+ /* some controllers need to know at command time data details */
+ if (data) {
+ request.data_blocks = cpu_to_le16(data->blocks);
+ request.data_blksz = cpu_to_le16(data->blksz);
+ }
+
+ ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
+ &request, sizeof(request), &response,
+ sizeof(response));
+ if (ret < 0)
+ goto out;
+
+ /* no response expected */
+ if (cmd_flags == GB_SDIO_RSP_NONE)
+ goto out;
+
+ /* long response expected */
+ if (cmd_flags & GB_SDIO_RSP_R2)
+ for (i = 0; i < 4; i++)
+ cmd->resp[i] = le32_to_cpu(response.resp[i]);
+ else
+ cmd->resp[0] = le32_to_cpu(response.resp[0]);
+
+out:
+ cmd->error = ret;
+ return ret;
+}
+
+static void gb_sdio_mrq_work(struct work_struct *work)
+{
+ struct gb_sdio_host *host;
+ struct mmc_request *mrq;
+ int ret;
+
+ host = container_of(work, struct gb_sdio_host, mrqwork);
+
+ ret = gbphy_runtime_get_sync(host->gbphy_dev);
+ if (ret)
+ return;
+
+ mutex_lock(&host->lock);
+ mrq = host->mrq;
+ if (!mrq) {
+ mutex_unlock(&host->lock);
+ gbphy_runtime_put_autosuspend(host->gbphy_dev);
+ dev_err(mmc_dev(host->mmc), "mmc request is NULL");
+ return;
+ }
+
+ if (host->removed) {
+ mrq->cmd->error = -ESHUTDOWN;
+ goto done;
+ }
+
+ if (mrq->sbc) {
+ ret = gb_sdio_command(host, mrq->sbc);
+ if (ret < 0)
+ goto done;
+ }
+
+ ret = gb_sdio_command(host, mrq->cmd);
+ if (ret < 0)
+ goto done;
+
+ if (mrq->data) {
+ ret = gb_sdio_transfer(host, mrq->data);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (mrq->stop) {
+ ret = gb_sdio_command(host, mrq->stop);
+ if (ret < 0)
+ goto done;
+ }
+
+done:
+ host->mrq = NULL;
+ mutex_unlock(&host->lock);
+ mmc_request_done(host->mmc, mrq);
+ gbphy_runtime_put_autosuspend(host->gbphy_dev);
+}
+
+static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct gb_sdio_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+
+ /* Check if it is a cancel to ongoing transfer */
+ if (cmd->opcode == MMC_STOP_TRANSMISSION) {
+ spin_lock(&host->xfer);
+ host->xfer_stop = true;
+ spin_unlock(&host->xfer);
+ }
+
+ mutex_lock(&host->lock);
+
+ WARN_ON(host->mrq);
+ host->mrq = mrq;
+
+ if (host->removed) {
+ mrq->cmd->error = -ESHUTDOWN;
+ goto out;
+ }
+ if (!host->card_present) {
+ mrq->cmd->error = -ENOMEDIUM;
+ goto out;
+ }
+
+ queue_work(host->mrq_workqueue, &host->mrqwork);
+
+ mutex_unlock(&host->lock);
+ return;
+
+out:
+ host->mrq = NULL;
+ mutex_unlock(&host->lock);
+ mmc_request_done(mmc, mrq);
+}
+
+static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct gb_sdio_host *host = mmc_priv(mmc);
+ struct gb_sdio_set_ios_request request;
+ int ret;
+ u8 power_mode;
+ u8 bus_width;
+ u8 timing;
+ u8 signal_voltage;
+ u8 drv_type;
+ u32 vdd = 0;
+
+ mutex_lock(&host->lock);
+ request.clock = cpu_to_le32(ios->clock);
+
+ if (ios->vdd)
+ vdd = 1 << (ios->vdd - GB_SDIO_VDD_SHIFT);
+ request.vdd = cpu_to_le32(vdd);
+
+ request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
+ GB_SDIO_BUSMODE_OPENDRAIN :
+ GB_SDIO_BUSMODE_PUSHPULL);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ default:
+ power_mode = GB_SDIO_POWER_OFF;
+ break;
+ case MMC_POWER_UP:
+ power_mode = GB_SDIO_POWER_UP;
+ break;
+ case MMC_POWER_ON:
+ power_mode = GB_SDIO_POWER_ON;
+ break;
+ case MMC_POWER_UNDEFINED:
+ power_mode = GB_SDIO_POWER_UNDEFINED;
+ break;
+ }
+ request.power_mode = power_mode;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ bus_width = GB_SDIO_BUS_WIDTH_1;
+ break;
+ case MMC_BUS_WIDTH_4:
+ default:
+ bus_width = GB_SDIO_BUS_WIDTH_4;
+ break;
+ case MMC_BUS_WIDTH_8:
+ bus_width = GB_SDIO_BUS_WIDTH_8;
+ break;
+ }
+ request.bus_width = bus_width;
+
+ switch (ios->timing) {
+ case MMC_TIMING_LEGACY:
+ default:
+ timing = GB_SDIO_TIMING_LEGACY;
+ break;
+ case MMC_TIMING_MMC_HS:
+ timing = GB_SDIO_TIMING_MMC_HS;
+ break;
+ case MMC_TIMING_SD_HS:
+ timing = GB_SDIO_TIMING_SD_HS;
+ break;
+ case MMC_TIMING_UHS_SDR12:
+ timing = GB_SDIO_TIMING_UHS_SDR12;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ timing = GB_SDIO_TIMING_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ timing = GB_SDIO_TIMING_UHS_SDR50;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ timing = GB_SDIO_TIMING_UHS_SDR104;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ timing = GB_SDIO_TIMING_UHS_DDR50;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ timing = GB_SDIO_TIMING_MMC_DDR52;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ timing = GB_SDIO_TIMING_MMC_HS200;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ timing = GB_SDIO_TIMING_MMC_HS400;
+ break;
+ }
+ request.timing = timing;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ default:
+ signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
+ break;
+ case MMC_SIGNAL_VOLTAGE_120:
+ signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
+ break;
+ }
+ request.signal_voltage = signal_voltage;
+
+ switch (ios->drv_type) {
+ case MMC_SET_DRIVER_TYPE_A:
+ drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
+ break;
+ case MMC_SET_DRIVER_TYPE_C:
+ drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
+ break;
+ case MMC_SET_DRIVER_TYPE_D:
+ drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
+ break;
+ case MMC_SET_DRIVER_TYPE_B:
+ default:
+ drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
+ break;
+ }
+ request.drv_type = drv_type;
+
+ ret = gb_sdio_set_ios(host, &request);
+ if (ret < 0)
+ goto out;
+
+ memcpy(&mmc->ios, ios, sizeof(mmc->ios));
+
+out:
+ mutex_unlock(&host->lock);
+}
+
+static int gb_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct gb_sdio_host *host = mmc_priv(mmc);
+
+ mutex_lock(&host->lock);
+ if (host->removed) {
+ mutex_unlock(&host->lock);
+ return -ESHUTDOWN;
+ }
+ mutex_unlock(&host->lock);
+
+ return host->read_only;
+}
+
+static int gb_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct gb_sdio_host *host = mmc_priv(mmc);
+
+ mutex_lock(&host->lock);
+ if (host->removed) {
+ mutex_unlock(&host->lock);
+ return -ESHUTDOWN;
+ }
+ mutex_unlock(&host->lock);
+
+ return host->card_present;
+}
+
+static int gb_mmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ return 0;
+}
+
+static const struct mmc_host_ops gb_sdio_ops = {
+ .request = gb_mmc_request,
+ .set_ios = gb_mmc_set_ios,
+ .get_ro = gb_mmc_get_ro,
+ .get_cd = gb_mmc_get_cd,
+ .start_signal_voltage_switch = gb_mmc_switch_voltage,
+};
+
+static int gb_sdio_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ struct mmc_host *mmc;
+ struct gb_sdio_host *host;
+ int ret = 0;
+
+ mmc = mmc_alloc_host(sizeof(*host), &gbphy_dev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ gb_sdio_request_handler);
+ if (IS_ERR(connection)) {
+ ret = PTR_ERR(connection);
+ goto exit_mmc_free;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->removed = true;
+
+ host->connection = connection;
+ gb_connection_set_data(connection, host);
+ host->gbphy_dev = gbphy_dev;
+ gb_gbphy_set_data(gbphy_dev, host);
+
+ ret = gb_connection_enable_tx(connection);
+ if (ret)
+ goto exit_connection_destroy;
+
+ ret = gb_sdio_get_caps(host);
+ if (ret < 0)
+ goto exit_connection_disable;
+
+ mmc->ops = &gb_sdio_ops;
+
+ mmc->max_segs = host->mmc->max_blk_count;
+
+ /* for now we make a map 1:1 between max request and segment size */
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ mutex_init(&host->lock);
+ spin_lock_init(&host->xfer);
+ host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
+ dev_name(&gbphy_dev->dev));
+ if (!host->mrq_workqueue) {
+ ret = -ENOMEM;
+ goto exit_connection_disable;
+ }
+ INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto exit_wq_destroy;
+
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+ goto exit_wq_destroy;
+ host->removed = false;
+ ret = _gb_sdio_process_events(host, host->queued_events);
+ host->queued_events = 0;
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+
+ return ret;
+
+exit_wq_destroy:
+ destroy_workqueue(host->mrq_workqueue);
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+exit_mmc_free:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static void gb_sdio_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_sdio_host *host = gb_gbphy_get_data(gbphy_dev);
+ struct gb_connection *connection = host->connection;
+ struct mmc_host *mmc;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ mutex_lock(&host->lock);
+ host->removed = true;
+ mmc = host->mmc;
+ gb_connection_set_data(connection, NULL);
+ mutex_unlock(&host->lock);
+
+ flush_workqueue(host->mrq_workqueue);
+ destroy_workqueue(host->mrq_workqueue);
+ gb_connection_disable_rx(connection);
+ mmc_remove_host(mmc);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+ mmc_free_host(mmc);
+}
+
+static const struct gbphy_device_id gb_sdio_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_sdio_id_table);
+
+static struct gbphy_driver sdio_driver = {
+ .name = "sdio",
+ .probe = gb_sdio_probe,
+ .remove = gb_sdio_remove,
+ .id_table = gb_sdio_id_table,
+};
+
+module_gbphy_driver(sdio_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spi.c b/drivers/staging/greybus/spi.c
new file mode 100644
index 000000000000..c893552b5c0b
--- /dev/null
+++ b/drivers/staging/greybus/spi.c
@@ -0,0 +1,79 @@
+/*
+ * SPI bridge PHY driver.
+ *
+ * Copyright 2014-2016 Google Inc.
+ * Copyright 2014-2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/module.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+#include "spilib.h"
+
+static struct spilib_ops *spilib_ops;
+
+static int gb_spi_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ int ret;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ NULL);
+ if (IS_ERR(connection))
+ return PTR_ERR(connection);
+
+ ret = gb_connection_enable(connection);
+ if (ret)
+ goto exit_connection_destroy;
+
+ ret = gb_spilib_master_init(connection, &gbphy_dev->dev, spilib_ops);
+ if (ret)
+ goto exit_connection_disable;
+
+ gb_gbphy_set_data(gbphy_dev, connection);
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return 0;
+
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+
+ return ret;
+}
+
+static void gb_spi_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_connection *connection = gb_gbphy_get_data(gbphy_dev);
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ gb_spilib_master_exit(connection);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+}
+
+static const struct gbphy_device_id gb_spi_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SPI) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_spi_id_table);
+
+static struct gbphy_driver spi_driver = {
+ .name = "spi",
+ .probe = gb_spi_probe,
+ .remove = gb_spi_remove,
+ .id_table = gb_spi_id_table,
+};
+
+module_gbphy_driver(spi_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
new file mode 100644
index 000000000000..e97b19148497
--- /dev/null
+++ b/drivers/staging/greybus/spilib.c
@@ -0,0 +1,565 @@
+/*
+ * Greybus SPI library
+ *
+ * Copyright 2014-2016 Google Inc.
+ * Copyright 2014-2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include "greybus.h"
+#include "spilib.h"
+
+struct gb_spilib {
+ struct gb_connection *connection;
+ struct device *parent;
+ struct spi_transfer *first_xfer;
+ struct spi_transfer *last_xfer;
+ struct spilib_ops *ops;
+ u32 rx_xfer_offset;
+ u32 tx_xfer_offset;
+ u32 last_xfer_size;
+ unsigned int op_timeout;
+ u16 mode;
+ u16 flags;
+ u32 bits_per_word_mask;
+ u8 num_chipselect;
+ u32 min_speed_hz;
+ u32 max_speed_hz;
+};
+
+#define GB_SPI_STATE_MSG_DONE ((void *)0)
+#define GB_SPI_STATE_MSG_IDLE ((void *)1)
+#define GB_SPI_STATE_MSG_RUNNING ((void *)2)
+#define GB_SPI_STATE_OP_READY ((void *)3)
+#define GB_SPI_STATE_OP_DONE ((void *)4)
+#define GB_SPI_STATE_MSG_ERROR ((void *)-1)
+
+#define XFER_TIMEOUT_TOLERANCE 200
+
+static struct spi_master *get_master_from_spi(struct gb_spilib *spi)
+{
+ return gb_connection_get_data(spi->connection);
+}
+
+static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
+{
+ size_t headers_size;
+
+ data_max -= sizeof(struct gb_spi_transfer_request);
+ headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
+
+ return tx_size + headers_size > data_max ? 0 : 1;
+}
+
+static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
+ size_t data_max)
+{
+ size_t rx_xfer_size;
+
+ data_max -= sizeof(struct gb_spi_transfer_response);
+
+ if (rx_size + len > data_max)
+ rx_xfer_size = data_max - rx_size;
+ else
+ rx_xfer_size = len;
+
+ /* if this is a write_read, for symmetry read the same as write */
+ if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
+ rx_xfer_size = *tx_xfer_size;
+ if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
+ *tx_xfer_size = rx_xfer_size;
+
+ return rx_xfer_size;
+}
+
+static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
+ size_t data_max)
+{
+ size_t headers_size;
+
+ data_max -= sizeof(struct gb_spi_transfer_request);
+ headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
+
+ if (tx_size + headers_size + len > data_max)
+ return data_max - (tx_size + sizeof(struct gb_spi_transfer));
+
+ return len;
+}
+
+static void clean_xfer_state(struct gb_spilib *spi)
+{
+ spi->first_xfer = NULL;
+ spi->last_xfer = NULL;
+ spi->rx_xfer_offset = 0;
+ spi->tx_xfer_offset = 0;
+ spi->last_xfer_size = 0;
+ spi->op_timeout = 0;
+}
+
+static bool is_last_xfer_done(struct gb_spilib *spi)
+{
+ struct spi_transfer *last_xfer = spi->last_xfer;
+
+ if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
+ (spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
+ return true;
+
+ return false;
+}
+
+static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
+{
+ struct spi_transfer *last_xfer = spi->last_xfer;
+
+ if (msg->state != GB_SPI_STATE_OP_DONE)
+ return 0;
+
+ /*
+ * if we transferred all content of the last transfer, reset values and
+ * check if this was the last transfer in the message
+ */
+ if (is_last_xfer_done(spi)) {
+ spi->tx_xfer_offset = 0;
+ spi->rx_xfer_offset = 0;
+ spi->op_timeout = 0;
+ if (last_xfer == list_last_entry(&msg->transfers,
+ struct spi_transfer,
+ transfer_list))
+ msg->state = GB_SPI_STATE_MSG_DONE;
+ else
+ spi->first_xfer = list_next_entry(last_xfer,
+ transfer_list);
+ return 0;
+ }
+
+ spi->first_xfer = last_xfer;
+ if (last_xfer->tx_buf)
+ spi->tx_xfer_offset += spi->last_xfer_size;
+
+ if (last_xfer->rx_buf)
+ spi->rx_xfer_offset += spi->last_xfer_size;
+
+ return 0;
+}
+
+static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
+ struct spi_message *msg)
+{
+ if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
+ transfer_list))
+ return NULL;
+
+ return list_next_entry(xfer, transfer_list);
+}
+
+/* Routines to transfer data */
+static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
+ struct gb_connection *connection, struct spi_message *msg)
+{
+ struct gb_spi_transfer_request *request;
+ struct spi_device *dev = msg->spi;
+ struct spi_transfer *xfer;
+ struct gb_spi_transfer *gb_xfer;
+ struct gb_operation *operation;
+ u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
+ u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
+ u32 total_len = 0;
+ unsigned int xfer_timeout;
+ size_t data_max;
+ void *tx_data;
+
+ data_max = gb_operation_get_payload_size_max(connection);
+ xfer = spi->first_xfer;
+
+ /* Find number of transfers queued and tx/rx length in the message */
+
+ while (msg->state != GB_SPI_STATE_OP_READY) {
+ msg->state = GB_SPI_STATE_MSG_RUNNING;
+ spi->last_xfer = xfer;
+
+ if (!xfer->tx_buf && !xfer->rx_buf) {
+ dev_err(spi->parent,
+ "bufferless transfer, length %u\n", xfer->len);
+ msg->state = GB_SPI_STATE_MSG_ERROR;
+ return NULL;
+ }
+
+ tx_xfer_size = 0;
+ rx_xfer_size = 0;
+
+ if (xfer->tx_buf) {
+ len = xfer->len - spi->tx_xfer_offset;
+ if (!tx_header_fit_operation(tx_size, count, data_max))
+ break;
+ tx_xfer_size = calc_tx_xfer_size(tx_size, count,
+ len, data_max);
+ spi->last_xfer_size = tx_xfer_size;
+ }
+
+ if (xfer->rx_buf) {
+ len = xfer->len - spi->rx_xfer_offset;
+ rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
+ len, data_max);
+ spi->last_xfer_size = rx_xfer_size;
+ }
+
+ tx_size += tx_xfer_size;
+ rx_size += rx_xfer_size;
+
+ total_len += spi->last_xfer_size;
+ count++;
+
+ xfer = get_next_xfer(xfer, msg);
+ if (!xfer || total_len >= data_max)
+ msg->state = GB_SPI_STATE_OP_READY;
+ }
+
+ /*
+ * In addition to space for all message descriptors we need
+ * to have enough to hold all tx data.
+ */
+ request_size = sizeof(*request);
+ request_size += count * sizeof(*gb_xfer);
+ request_size += tx_size;
+
+ /* Response consists only of incoming data */
+ operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
+ request_size, rx_size, GFP_KERNEL);
+ if (!operation)
+ return NULL;
+
+ request = operation->request->payload;
+ request->count = cpu_to_le16(count);
+ request->mode = dev->mode;
+ request->chip_select = dev->chip_select;
+
+ gb_xfer = &request->transfers[0];
+ tx_data = gb_xfer + count; /* place tx data after last gb_xfer */
+
+ /* Fill in the transfers array */
+ xfer = spi->first_xfer;
+ while (msg->state != GB_SPI_STATE_OP_DONE) {
+ if (xfer == spi->last_xfer)
+ xfer_len = spi->last_xfer_size;
+ else
+ xfer_len = xfer->len;
+
+ /* make sure we do not timeout in a slow transfer */
+ xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
+ xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
+
+ if (xfer_timeout > spi->op_timeout)
+ spi->op_timeout = xfer_timeout;
+
+ gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
+ gb_xfer->len = cpu_to_le32(xfer_len);
+ gb_xfer->delay_usecs = cpu_to_le16(xfer->delay_usecs);
+ gb_xfer->cs_change = xfer->cs_change;
+ gb_xfer->bits_per_word = xfer->bits_per_word;
+
+ /* Copy tx data */
+ if (xfer->tx_buf) {
+ gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
+ memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
+ xfer_len);
+ tx_data += xfer_len;
+ }
+
+ if (xfer->rx_buf)
+ gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
+
+ if (xfer == spi->last_xfer) {
+ if (!is_last_xfer_done(spi))
+ gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
+ msg->state = GB_SPI_STATE_OP_DONE;
+ continue;
+ }
+
+ gb_xfer++;
+ xfer = get_next_xfer(xfer, msg);
+ }
+
+ msg->actual_length += total_len;
+
+ return operation;
+}
+
+static void gb_spi_decode_response(struct gb_spilib *spi,
+ struct spi_message *msg,
+ struct gb_spi_transfer_response *response)
+{
+ struct spi_transfer *xfer = spi->first_xfer;
+ void *rx_data = response->data;
+ u32 xfer_len;
+
+ while (xfer) {
+ /* Copy rx data */
+ if (xfer->rx_buf) {
+ if (xfer == spi->first_xfer)
+ xfer_len = xfer->len - spi->rx_xfer_offset;
+ else if (xfer == spi->last_xfer)
+ xfer_len = spi->last_xfer_size;
+ else
+ xfer_len = xfer->len;
+
+ memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
+ xfer_len);
+ rx_data += xfer_len;
+ }
+
+ if (xfer == spi->last_xfer)
+ break;
+
+ xfer = list_next_entry(xfer, transfer_list);
+ }
+}
+
+static int gb_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct gb_spilib *spi = spi_master_get_devdata(master);
+ struct gb_connection *connection = spi->connection;
+ struct gb_spi_transfer_response *response;
+ struct gb_operation *operation;
+ int ret = 0;
+
+ spi->first_xfer = list_first_entry_or_null(&msg->transfers,
+ struct spi_transfer,
+ transfer_list);
+ if (!spi->first_xfer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msg->state = GB_SPI_STATE_MSG_IDLE;
+
+ while (msg->state != GB_SPI_STATE_MSG_DONE &&
+ msg->state != GB_SPI_STATE_MSG_ERROR) {
+ operation = gb_spi_operation_create(spi, connection, msg);
+ if (!operation) {
+ msg->state = GB_SPI_STATE_MSG_ERROR;
+ ret = -EINVAL;
+ continue;
+ }
+
+ ret = gb_operation_request_send_sync_timeout(operation,
+ spi->op_timeout);
+ if (!ret) {
+ response = operation->response->payload;
+ if (response)
+ gb_spi_decode_response(spi, msg, response);
+ } else {
+ dev_err(spi->parent,
+ "transfer operation failed: %d\n", ret);
+ msg->state = GB_SPI_STATE_MSG_ERROR;
+ }
+
+ gb_operation_put(operation);
+ setup_next_xfer(spi, msg);
+ }
+
+out:
+ msg->status = ret;
+ clean_xfer_state(spi);
+ spi_finalize_current_message(master);
+
+ return ret;
+}
+
+static int gb_spi_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct gb_spilib *spi = spi_master_get_devdata(master);
+
+ return spi->ops->prepare_transfer_hardware(spi->parent);
+}
+
+static int gb_spi_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct gb_spilib *spi = spi_master_get_devdata(master);
+
+ spi->ops->unprepare_transfer_hardware(spi->parent);
+
+ return 0;
+}
+
+static int gb_spi_setup(struct spi_device *spi)
+{
+ /* Nothing to do for now */
+ return 0;
+}
+
+static void gb_spi_cleanup(struct spi_device *spi)
+{
+ /* Nothing to do for now */
+}
+
+/* Routines to get controller information */
+
+/*
+ * Map Greybus spi mode bits/flags/bpw into Linux ones.
+ * All bits are same for now and so these macro's return same values.
+ */
+#define gb_spi_mode_map(mode) mode
+#define gb_spi_flags_map(flags) flags
+
+static int gb_spi_get_master_config(struct gb_spilib *spi)
+{
+ struct gb_spi_master_config_response response;
+ u16 mode, flags;
+ int ret;
+
+ ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
+ NULL, 0, &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+
+ mode = le16_to_cpu(response.mode);
+ spi->mode = gb_spi_mode_map(mode);
+
+ flags = le16_to_cpu(response.flags);
+ spi->flags = gb_spi_flags_map(flags);
+
+ spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
+ spi->num_chipselect = response.num_chipselect;
+
+ spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
+ spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
+
+ return 0;
+}
+
+static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
+{
+ struct spi_master *master = get_master_from_spi(spi);
+ struct gb_spi_device_config_request request;
+ struct gb_spi_device_config_response response;
+ struct spi_board_info spi_board = { {0} };
+ struct spi_device *spidev;
+ int ret;
+ u8 dev_type;
+
+ request.chip_select = cs;
+
+ ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+
+ dev_type = response.device_type;
+
+ if (dev_type == GB_SPI_SPI_DEV)
+ strlcpy(spi_board.modalias, "spidev",
+ sizeof(spi_board.modalias));
+ else if (dev_type == GB_SPI_SPI_NOR)
+ strlcpy(spi_board.modalias, "spi-nor",
+ sizeof(spi_board.modalias));
+ else if (dev_type == GB_SPI_SPI_MODALIAS)
+ memcpy(spi_board.modalias, response.name,
+ sizeof(spi_board.modalias));
+ else
+ return -EINVAL;
+
+ spi_board.mode = le16_to_cpu(response.mode);
+ spi_board.bus_num = master->bus_num;
+ spi_board.chip_select = cs;
+ spi_board.max_speed_hz = le32_to_cpu(response.max_speed_hz);
+
+ spidev = spi_new_device(master, &spi_board);
+ if (!spidev)
+ return -EINVAL;
+
+ return 0;
+}
+
+int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
+ struct spilib_ops *ops)
+{
+ struct gb_spilib *spi;
+ struct spi_master *master;
+ int ret;
+ u8 i;
+
+ /* Allocate master with space for data */
+ master = spi_alloc_master(dev, sizeof(*spi));
+ if (!master) {
+ dev_err(dev, "cannot alloc SPI master\n");
+ return -ENOMEM;
+ }
+
+ spi = spi_master_get_devdata(master);
+ spi->connection = connection;
+ gb_connection_set_data(connection, master);
+ spi->parent = dev;
+ spi->ops = ops;
+
+ /* get master configuration */
+ ret = gb_spi_get_master_config(spi);
+ if (ret)
+ goto exit_spi_put;
+
+ master->bus_num = -1; /* Allow spi-core to allocate it dynamically */
+ master->num_chipselect = spi->num_chipselect;
+ master->mode_bits = spi->mode;
+ master->flags = spi->flags;
+ master->bits_per_word_mask = spi->bits_per_word_mask;
+
+ /* Attach methods */
+ master->cleanup = gb_spi_cleanup;
+ master->setup = gb_spi_setup;
+ master->transfer_one_message = gb_spi_transfer_one_message;
+
+ if (ops && ops->prepare_transfer_hardware) {
+ master->prepare_transfer_hardware =
+ gb_spi_prepare_transfer_hardware;
+ }
+
+ if (ops && ops->unprepare_transfer_hardware) {
+ master->unprepare_transfer_hardware =
+ gb_spi_unprepare_transfer_hardware;
+ }
+
+ master->auto_runtime_pm = true;
+
+ ret = spi_register_master(master);
+ if (ret < 0)
+ goto exit_spi_put;
+
+ /* now, fetch the devices configuration */
+ for (i = 0; i < spi->num_chipselect; i++) {
+ ret = gb_spi_setup_device(spi, i);
+ if (ret < 0) {
+ dev_err(dev, "failed to allocate spi device %d: %d\n",
+ i, ret);
+ goto exit_spi_unregister;
+ }
+ }
+
+ return 0;
+
+exit_spi_unregister:
+ spi_unregister_master(master);
+exit_spi_put:
+ spi_master_put(master);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_spilib_master_init);
+
+void gb_spilib_master_exit(struct gb_connection *connection)
+{
+ struct spi_master *master = gb_connection_get_data(connection);
+
+ spi_unregister_master(master);
+ spi_master_put(master);
+}
+EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spilib.h b/drivers/staging/greybus/spilib.h
new file mode 100644
index 000000000000..566d0dde7f79
--- /dev/null
+++ b/drivers/staging/greybus/spilib.h
@@ -0,0 +1,24 @@
+/*
+ * Greybus SPI library header
+ *
+ * copyright 2016 google inc.
+ * copyright 2016 linaro ltd.
+ *
+ * released under the gplv2 only.
+ */
+
+#ifndef __SPILIB_H
+#define __SPILIB_H
+
+struct device;
+struct gb_connection;
+
+struct spilib_ops {
+ int (*prepare_transfer_hardware)(struct device *dev);
+ void (*unprepare_transfer_hardware)(struct device *dev);
+};
+
+int gb_spilib_master_init(struct gb_connection *connection, struct device *dev, struct spilib_ops *ops);
+void gb_spilib_master_exit(struct gb_connection *connection);
+
+#endif /* __SPILIB_H */
diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c
new file mode 100644
index 000000000000..550055ec27a5
--- /dev/null
+++ b/drivers/staging/greybus/svc.c
@@ -0,0 +1,1486 @@
+/*
+ * SVC Greybus driver.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+
+#include "greybus.h"
+
+#define SVC_INTF_EJECT_TIMEOUT 9000
+#define SVC_INTF_ACTIVATE_TIMEOUT 6000
+#define SVC_INTF_RESUME_TIMEOUT 3000
+
+struct gb_svc_deferred_request {
+ struct work_struct work;
+ struct gb_operation *operation;
+};
+
+
+static int gb_svc_queue_deferred_request(struct gb_operation *operation);
+
+static ssize_t endo_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_svc *svc = to_gb_svc(dev);
+
+ return sprintf(buf, "0x%04x\n", svc->endo_id);
+}
+static DEVICE_ATTR_RO(endo_id);
+
+static ssize_t ap_intf_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_svc *svc = to_gb_svc(dev);
+
+ return sprintf(buf, "%u\n", svc->ap_intf_id);
+}
+static DEVICE_ATTR_RO(ap_intf_id);
+
+// FIXME
+// This is a hack, we need to do this "right" and clean the interface up
+// properly, not just forcibly yank the thing out of the system and hope for the
+// best. But for now, people want their modules to come out without having to
+// throw the thing to the ground or get out a screwdriver.
+static ssize_t intf_eject_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ struct gb_svc *svc = to_gb_svc(dev);
+ unsigned short intf_id;
+ int ret;
+
+ ret = kstrtou16(buf, 10, &intf_id);
+ if (ret < 0)
+ return ret;
+
+ dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
+
+ ret = gb_svc_intf_eject(svc, intf_id);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+static DEVICE_ATTR_WO(intf_eject);
+
+static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct gb_svc *svc = to_gb_svc(dev);
+
+ return sprintf(buf, "%s\n",
+ gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
+}
+
+static ssize_t watchdog_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ struct gb_svc *svc = to_gb_svc(dev);
+ int retval;
+ bool user_request;
+
+ retval = strtobool(buf, &user_request);
+ if (retval)
+ return retval;
+
+ if (user_request)
+ retval = gb_svc_watchdog_enable(svc);
+ else
+ retval = gb_svc_watchdog_disable(svc);
+ if (retval)
+ return retval;
+ return len;
+}
+static DEVICE_ATTR_RW(watchdog);
+
+static ssize_t watchdog_action_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gb_svc *svc = to_gb_svc(dev);
+
+ if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
+ return sprintf(buf, "panic\n");
+ else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
+ return sprintf(buf, "reset\n");
+
+ return -EINVAL;
+}
+
+static ssize_t watchdog_action_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct gb_svc *svc = to_gb_svc(dev);
+
+ if (sysfs_streq(buf, "panic"))
+ svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
+ else if (sysfs_streq(buf, "reset"))
+ svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
+ else
+ return -EINVAL;
+
+ return len;
+}
+static DEVICE_ATTR_RW(watchdog_action);
+
+static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
+{
+ struct gb_svc_pwrmon_rail_count_get_response response;
+ int ret;
+
+ ret = gb_operation_sync(svc->connection,
+ GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
+ return ret;
+ }
+
+ *value = response.rail_count;
+
+ return 0;
+}
+
+static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
+ struct gb_svc_pwrmon_rail_names_get_response *response,
+ size_t bufsize)
+{
+ int ret;
+
+ ret = gb_operation_sync(svc->connection,
+ GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
+ response, bufsize);
+ if (ret) {
+ dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
+ return ret;
+ }
+
+ if (response->status != GB_SVC_OP_SUCCESS) {
+ dev_err(&svc->dev,
+ "SVC error while getting rail names: %u\n",
+ response->status);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
+ u8 measurement_type, u32 *value)
+{
+ struct gb_svc_pwrmon_sample_get_request request;
+ struct gb_svc_pwrmon_sample_get_response response;
+ int ret;
+
+ request.rail_id = rail_id;
+ request.measurement_type = measurement_type;
+
+ ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
+ return ret;
+ }
+
+ if (response.result) {
+ dev_err(&svc->dev,
+ "UniPro error while getting rail power sample (%d %d): %d\n",
+ rail_id, measurement_type, response.result);
+ switch (response.result) {
+ case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
+ return -EINVAL;
+ case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
+ return -ENOMSG;
+ default:
+ return -EREMOTEIO;
+ }
+ }
+
+ *value = le32_to_cpu(response.measurement);
+
+ return 0;
+}
+
+int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
+ u8 measurement_type, u32 *value)
+{
+ struct gb_svc_pwrmon_intf_sample_get_request request;
+ struct gb_svc_pwrmon_intf_sample_get_response response;
+ int ret;
+
+ request.intf_id = intf_id;
+ request.measurement_type = measurement_type;
+
+ ret = gb_operation_sync(svc->connection,
+ GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
+ return ret;
+ }
+
+ if (response.result) {
+ dev_err(&svc->dev,
+ "UniPro error while getting intf power sample (%d %d): %d\n",
+ intf_id, measurement_type, response.result);
+ switch (response.result) {
+ case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
+ return -EINVAL;
+ case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
+ return -ENOMSG;
+ default:
+ return -EREMOTEIO;
+ }
+ }
+
+ *value = le32_to_cpu(response.measurement);
+
+ return 0;
+}
+
+static struct attribute *svc_attrs[] = {
+ &dev_attr_endo_id.attr,
+ &dev_attr_ap_intf_id.attr,
+ &dev_attr_intf_eject.attr,
+ &dev_attr_watchdog.attr,
+ &dev_attr_watchdog_action.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(svc);
+
+int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
+{
+ struct gb_svc_intf_device_id_request request;
+
+ request.intf_id = intf_id;
+ request.device_id = device_id;
+
+ return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
+ &request, sizeof(request), NULL, 0);
+}
+
+int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
+{
+ struct gb_svc_intf_eject_request request;
+ int ret;
+
+ request.intf_id = intf_id;
+
+ /*
+ * The pulse width for module release in svc is long so we need to
+ * increase the timeout so the operation will not return to soon.
+ */
+ ret = gb_operation_sync_timeout(svc->connection,
+ GB_SVC_TYPE_INTF_EJECT, &request,
+ sizeof(request), NULL, 0,
+ SVC_INTF_EJECT_TIMEOUT);
+ if (ret) {
+ dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
+{
+ struct gb_svc_intf_vsys_request request;
+ struct gb_svc_intf_vsys_response response;
+ int type, ret;
+
+ request.intf_id = intf_id;
+
+ if (enable)
+ type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
+ else
+ type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
+
+ ret = gb_operation_sync(svc->connection, type,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+ if (response.result_code != GB_SVC_INTF_VSYS_OK)
+ return -EREMOTEIO;
+ return 0;
+}
+
+int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
+{
+ struct gb_svc_intf_refclk_request request;
+ struct gb_svc_intf_refclk_response response;
+ int type, ret;
+
+ request.intf_id = intf_id;
+
+ if (enable)
+ type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
+ else
+ type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
+
+ ret = gb_operation_sync(svc->connection, type,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+ if (response.result_code != GB_SVC_INTF_REFCLK_OK)
+ return -EREMOTEIO;
+ return 0;
+}
+
+int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
+{
+ struct gb_svc_intf_unipro_request request;
+ struct gb_svc_intf_unipro_response response;
+ int type, ret;
+
+ request.intf_id = intf_id;
+
+ if (enable)
+ type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
+ else
+ type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
+
+ ret = gb_operation_sync(svc->connection, type,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+ if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
+ return -EREMOTEIO;
+ return 0;
+}
+
+int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
+{
+ struct gb_svc_intf_activate_request request;
+ struct gb_svc_intf_activate_response response;
+ int ret;
+
+ request.intf_id = intf_id;
+
+ ret = gb_operation_sync_timeout(svc->connection,
+ GB_SVC_TYPE_INTF_ACTIVATE,
+ &request, sizeof(request),
+ &response, sizeof(response),
+ SVC_INTF_ACTIVATE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (response.status != GB_SVC_OP_SUCCESS) {
+ dev_err(&svc->dev, "failed to activate interface %u: %u\n",
+ intf_id, response.status);
+ return -EREMOTEIO;
+ }
+
+ *intf_type = response.intf_type;
+
+ return 0;
+}
+
+int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
+{
+ struct gb_svc_intf_resume_request request;
+ struct gb_svc_intf_resume_response response;
+ int ret;
+
+ request.intf_id = intf_id;
+
+ ret = gb_operation_sync_timeout(svc->connection,
+ GB_SVC_TYPE_INTF_RESUME,
+ &request, sizeof(request),
+ &response, sizeof(response),
+ SVC_INTF_RESUME_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
+ intf_id, ret);
+ return ret;
+ }
+
+ if (response.status != GB_SVC_OP_SUCCESS) {
+ dev_err(&svc->dev, "failed to resume interface %u: %u\n",
+ intf_id, response.status);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+ u32 *value)
+{
+ struct gb_svc_dme_peer_get_request request;
+ struct gb_svc_dme_peer_get_response response;
+ u16 result;
+ int ret;
+
+ request.intf_id = intf_id;
+ request.attr = cpu_to_le16(attr);
+ request.selector = cpu_to_le16(selector);
+
+ ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
+ intf_id, attr, selector, ret);
+ return ret;
+ }
+
+ result = le16_to_cpu(response.result_code);
+ if (result) {
+ dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
+ intf_id, attr, selector, result);
+ return -EREMOTEIO;
+ }
+
+ if (value)
+ *value = le32_to_cpu(response.attr_value);
+
+ return 0;
+}
+
+int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+ u32 value)
+{
+ struct gb_svc_dme_peer_set_request request;
+ struct gb_svc_dme_peer_set_response response;
+ u16 result;
+ int ret;
+
+ request.intf_id = intf_id;
+ request.attr = cpu_to_le16(attr);
+ request.selector = cpu_to_le16(selector);
+ request.value = cpu_to_le32(value);
+
+ ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret) {
+ dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
+ intf_id, attr, selector, value, ret);
+ return ret;
+ }
+
+ result = le16_to_cpu(response.result_code);
+ if (result) {
+ dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
+ intf_id, attr, selector, value, result);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+int gb_svc_connection_create(struct gb_svc *svc,
+ u8 intf1_id, u16 cport1_id,
+ u8 intf2_id, u16 cport2_id,
+ u8 cport_flags)
+{
+ struct gb_svc_conn_create_request request;
+
+ request.intf1_id = intf1_id;
+ request.cport1_id = cpu_to_le16(cport1_id);
+ request.intf2_id = intf2_id;
+ request.cport2_id = cpu_to_le16(cport2_id);
+ request.tc = 0; /* TC0 */
+ request.flags = cport_flags;
+
+ return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
+ &request, sizeof(request), NULL, 0);
+}
+
+void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
+ u8 intf2_id, u16 cport2_id)
+{
+ struct gb_svc_conn_destroy_request request;
+ struct gb_connection *connection = svc->connection;
+ int ret;
+
+ request.intf1_id = intf1_id;
+ request.cport1_id = cpu_to_le16(cport1_id);
+ request.intf2_id = intf2_id;
+ request.cport2_id = cpu_to_le16(cport2_id);
+
+ ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
+ &request, sizeof(request), NULL, 0);
+ if (ret) {
+ dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
+ intf1_id, cport1_id, intf2_id, cport2_id, ret);
+ }
+}
+
+int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
+ u32 strobe_delay, u32 refclk)
+{
+ struct gb_connection *connection = svc->connection;
+ struct gb_svc_timesync_enable_request request;
+
+ request.count = count;
+ request.frame_time = cpu_to_le64(frame_time);
+ request.strobe_delay = cpu_to_le32(strobe_delay);
+ request.refclk = cpu_to_le32(refclk);
+ return gb_operation_sync(connection,
+ GB_SVC_TYPE_TIMESYNC_ENABLE,
+ &request, sizeof(request), NULL, 0);
+}
+
+int gb_svc_timesync_disable(struct gb_svc *svc)
+{
+ struct gb_connection *connection = svc->connection;
+
+ return gb_operation_sync(connection,
+ GB_SVC_TYPE_TIMESYNC_DISABLE,
+ NULL, 0, NULL, 0);
+}
+
+int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time)
+{
+ struct gb_connection *connection = svc->connection;
+ struct gb_svc_timesync_authoritative_response response;
+ int ret, i;
+
+ ret = gb_operation_sync(connection,
+ GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE, NULL, 0,
+ &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
+ frame_time[i] = le64_to_cpu(response.frame_time[i]);
+ return 0;
+}
+
+int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time)
+{
+ struct gb_connection *connection = svc->connection;
+ struct gb_svc_timesync_ping_response response;
+ int ret;
+
+ ret = gb_operation_sync(connection,
+ GB_SVC_TYPE_TIMESYNC_PING,
+ NULL, 0,
+ &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+
+ *frame_time = le64_to_cpu(response.frame_time);
+ return 0;
+}
+
+int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask)
+{
+ struct gb_connection *connection = svc->connection;
+ struct gb_svc_timesync_wake_pins_acquire_request request;
+
+ request.strobe_mask = cpu_to_le32(strobe_mask);
+ return gb_operation_sync(connection,
+ GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE,
+ &request, sizeof(request),
+ NULL, 0);
+}
+
+int gb_svc_timesync_wake_pins_release(struct gb_svc *svc)
+{
+ struct gb_connection *connection = svc->connection;
+
+ return gb_operation_sync(connection,
+ GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE,
+ NULL, 0, NULL, 0);
+}
+
+/* Creates bi-directional routes between the devices */
+int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
+ u8 intf2_id, u8 dev2_id)
+{
+ struct gb_svc_route_create_request request;
+
+ request.intf1_id = intf1_id;
+ request.dev1_id = dev1_id;
+ request.intf2_id = intf2_id;
+ request.dev2_id = dev2_id;
+
+ return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
+ &request, sizeof(request), NULL, 0);
+}
+
+/* Destroys bi-directional routes between the devices */
+void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
+{
+ struct gb_svc_route_destroy_request request;
+ int ret;
+
+ request.intf1_id = intf1_id;
+ request.intf2_id = intf2_id;
+
+ ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
+ &request, sizeof(request), NULL, 0);
+ if (ret) {
+ dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
+ intf1_id, intf2_id, ret);
+ }
+}
+
+int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
+ u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
+ u8 tx_amplitude, u8 tx_hs_equalizer,
+ u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
+ u8 flags, u32 quirks,
+ struct gb_svc_l2_timer_cfg *local,
+ struct gb_svc_l2_timer_cfg *remote)
+{
+ struct gb_svc_intf_set_pwrm_request request;
+ struct gb_svc_intf_set_pwrm_response response;
+ int ret;
+ u16 result_code;
+
+ memset(&request, 0, sizeof(request));
+
+ request.intf_id = intf_id;
+ request.hs_series = hs_series;
+ request.tx_mode = tx_mode;
+ request.tx_gear = tx_gear;
+ request.tx_nlanes = tx_nlanes;
+ request.tx_amplitude = tx_amplitude;
+ request.tx_hs_equalizer = tx_hs_equalizer;
+ request.rx_mode = rx_mode;
+ request.rx_gear = rx_gear;
+ request.rx_nlanes = rx_nlanes;
+ request.flags = flags;
+ request.quirks = cpu_to_le32(quirks);
+ if (local)
+ request.local_l2timerdata = *local;
+ if (remote)
+ request.remote_l2timerdata = *remote;
+
+ ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret < 0)
+ return ret;
+
+ result_code = response.result_code;
+ if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
+ dev_err(&svc->dev, "set power mode = %d\n", result_code);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
+
+int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
+{
+ struct gb_svc_intf_set_pwrm_request request;
+ struct gb_svc_intf_set_pwrm_response response;
+ int ret;
+ u16 result_code;
+
+ memset(&request, 0, sizeof(request));
+
+ request.intf_id = intf_id;
+ request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
+ request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
+ request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
+
+ ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
+ &request, sizeof(request),
+ &response, sizeof(response));
+ if (ret < 0) {
+ dev_err(&svc->dev,
+ "failed to send set power mode operation to interface %u: %d\n",
+ intf_id, ret);
+ return ret;
+ }
+
+ result_code = response.result_code;
+ if (result_code != GB_SVC_SETPWRM_PWR_OK) {
+ dev_err(&svc->dev,
+ "failed to hibernate the link for interface %u: %u\n",
+ intf_id, result_code);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int gb_svc_ping(struct gb_svc *svc)
+{
+ return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
+ NULL, 0, NULL, 0,
+ GB_OPERATION_TIMEOUT_DEFAULT * 2);
+}
+
+static int gb_svc_version_request(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_svc *svc = gb_connection_get_data(connection);
+ struct gb_svc_version_request *request;
+ struct gb_svc_version_response *response;
+
+ if (op->request->payload_size < sizeof(*request)) {
+ dev_err(&svc->dev, "short version request (%zu < %zu)\n",
+ op->request->payload_size,
+ sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+
+ if (request->major > GB_SVC_VERSION_MAJOR) {
+ dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
+ request->major, GB_SVC_VERSION_MAJOR);
+ return -ENOTSUPP;
+ }
+
+ svc->protocol_major = request->major;
+ svc->protocol_minor = request->minor;
+
+ if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
+ return -ENOMEM;
+
+ response = op->response->payload;
+ response->major = svc->protocol_major;
+ response->minor = svc->protocol_minor;
+
+ return 0;
+}
+
+static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+ struct gb_svc *svc = pwrmon_rails->svc;
+ int ret, desc;
+ u32 value;
+ char buff[16];
+
+ ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
+ GB_SVC_PWRMON_TYPE_VOL, &value);
+ if (ret) {
+ dev_err(&svc->dev,
+ "failed to get voltage sample %u: %d\n",
+ pwrmon_rails->id, ret);
+ return ret;
+ }
+
+ desc = scnprintf(buff, sizeof(buff), "%u\n", value);
+
+ return simple_read_from_buffer(buf, len, offset, buff, desc);
+}
+
+static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+ struct gb_svc *svc = pwrmon_rails->svc;
+ int ret, desc;
+ u32 value;
+ char buff[16];
+
+ ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
+ GB_SVC_PWRMON_TYPE_CURR, &value);
+ if (ret) {
+ dev_err(&svc->dev,
+ "failed to get current sample %u: %d\n",
+ pwrmon_rails->id, ret);
+ return ret;
+ }
+
+ desc = scnprintf(buff, sizeof(buff), "%u\n", value);
+
+ return simple_read_from_buffer(buf, len, offset, buff, desc);
+}
+
+static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+ struct gb_svc *svc = pwrmon_rails->svc;
+ int ret, desc;
+ u32 value;
+ char buff[16];
+
+ ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
+ GB_SVC_PWRMON_TYPE_PWR, &value);
+ if (ret) {
+ dev_err(&svc->dev, "failed to get power sample %u: %d\n",
+ pwrmon_rails->id, ret);
+ return ret;
+ }
+
+ desc = scnprintf(buff, sizeof(buff), "%u\n", value);
+
+ return simple_read_from_buffer(buf, len, offset, buff, desc);
+}
+
+static const struct file_operations pwrmon_debugfs_voltage_fops = {
+ .read = pwr_debugfs_voltage_read,
+};
+
+static const struct file_operations pwrmon_debugfs_current_fops = {
+ .read = pwr_debugfs_current_read,
+};
+
+static const struct file_operations pwrmon_debugfs_power_fops = {
+ .read = pwr_debugfs_power_read,
+};
+
+static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
+{
+ int i;
+ size_t bufsize;
+ struct dentry *dent;
+ struct gb_svc_pwrmon_rail_names_get_response *rail_names;
+ u8 rail_count;
+
+ dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
+ if (IS_ERR_OR_NULL(dent))
+ return;
+
+ if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
+ goto err_pwrmon_debugfs;
+
+ if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
+ goto err_pwrmon_debugfs;
+
+ bufsize = sizeof(*rail_names) +
+ GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
+
+ rail_names = kzalloc(bufsize, GFP_KERNEL);
+ if (!rail_names)
+ goto err_pwrmon_debugfs;
+
+ svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
+ GFP_KERNEL);
+ if (!svc->pwrmon_rails)
+ goto err_pwrmon_debugfs_free;
+
+ if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
+ goto err_pwrmon_debugfs_free;
+
+ for (i = 0; i < rail_count; i++) {
+ struct dentry *dir;
+ struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
+ char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
+
+ snprintf(fname, sizeof(fname), "%s",
+ (char *)&rail_names->name[i]);
+
+ rail->id = i;
+ rail->svc = svc;
+
+ dir = debugfs_create_dir(fname, dent);
+ debugfs_create_file("voltage_now", S_IRUGO, dir, rail,
+ &pwrmon_debugfs_voltage_fops);
+ debugfs_create_file("current_now", S_IRUGO, dir, rail,
+ &pwrmon_debugfs_current_fops);
+ debugfs_create_file("power_now", S_IRUGO, dir, rail,
+ &pwrmon_debugfs_power_fops);
+ }
+
+ kfree(rail_names);
+ return;
+
+err_pwrmon_debugfs_free:
+ kfree(rail_names);
+ kfree(svc->pwrmon_rails);
+ svc->pwrmon_rails = NULL;
+
+err_pwrmon_debugfs:
+ debugfs_remove(dent);
+}
+
+static void gb_svc_debugfs_init(struct gb_svc *svc)
+{
+ svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
+ gb_debugfs_get());
+ gb_svc_pwrmon_debugfs_init(svc);
+}
+
+static void gb_svc_debugfs_exit(struct gb_svc *svc)
+{
+ debugfs_remove_recursive(svc->debugfs_dentry);
+ kfree(svc->pwrmon_rails);
+ svc->pwrmon_rails = NULL;
+}
+
+static int gb_svc_hello(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_svc *svc = gb_connection_get_data(connection);
+ struct gb_svc_hello_request *hello_request;
+ int ret;
+
+ if (op->request->payload_size < sizeof(*hello_request)) {
+ dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
+ op->request->payload_size,
+ sizeof(*hello_request));
+ return -EINVAL;
+ }
+
+ hello_request = op->request->payload;
+ svc->endo_id = le16_to_cpu(hello_request->endo_id);
+ svc->ap_intf_id = hello_request->interface_id;
+
+ ret = device_add(&svc->dev);
+ if (ret) {
+ dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
+ return ret;
+ }
+
+ ret = gb_svc_watchdog_create(svc);
+ if (ret) {
+ dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
+ goto err_unregister_device;
+ }
+
+ gb_svc_debugfs_init(svc);
+
+ ret = gb_timesync_svc_add(svc);
+ if (ret) {
+ dev_err(&svc->dev, "failed to add SVC to timesync: %d\n", ret);
+ gb_svc_debugfs_exit(svc);
+ goto err_unregister_device;
+ }
+
+ return gb_svc_queue_deferred_request(op);
+
+err_unregister_device:
+ gb_svc_watchdog_destroy(svc);
+ device_del(&svc->dev);
+ return ret;
+}
+
+static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
+ u8 intf_id)
+{
+ struct gb_host_device *hd = svc->hd;
+ struct gb_module *module;
+ size_t num_interfaces;
+ u8 module_id;
+
+ list_for_each_entry(module, &hd->modules, hd_node) {
+ module_id = module->module_id;
+ num_interfaces = module->num_interfaces;
+
+ if (intf_id >= module_id &&
+ intf_id < module_id + num_interfaces) {
+ return module->interfaces[intf_id - module_id];
+ }
+ }
+
+ return NULL;
+}
+
+static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
+{
+ struct gb_host_device *hd = svc->hd;
+ struct gb_module *module;
+
+ list_for_each_entry(module, &hd->modules, hd_node) {
+ if (module->module_id == module_id)
+ return module;
+ }
+
+ return NULL;
+}
+
+static void gb_svc_process_hello_deferred(struct gb_operation *operation)
+{
+ struct gb_connection *connection = operation->connection;
+ struct gb_svc *svc = gb_connection_get_data(connection);
+ int ret;
+
+ /*
+ * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
+ * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
+ * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
+ * module.
+ *
+ * The code should be removed once SW-2217, Heuristic for UniPro
+ * Power Mode Changes is resolved.
+ */
+ ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
+ GB_SVC_UNIPRO_HS_SERIES_A,
+ GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+ 2, 1,
+ GB_SVC_SMALL_AMPLITUDE, GB_SVC_NO_DE_EMPHASIS,
+ GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+ 2, 1,
+ 0, 0,
+ NULL, NULL);
+
+ if (ret)
+ dev_warn(&svc->dev,
+ "power mode change failed on AP to switch link: %d\n",
+ ret);
+}
+
+static void gb_svc_process_module_inserted(struct gb_operation *operation)
+{
+ struct gb_svc_module_inserted_request *request;
+ struct gb_connection *connection = operation->connection;
+ struct gb_svc *svc = gb_connection_get_data(connection);
+ struct gb_host_device *hd = svc->hd;
+ struct gb_module *module;
+ size_t num_interfaces;
+ u8 module_id;
+ u16 flags;
+ int ret;
+
+ /* The request message size has already been verified. */
+ request = operation->request->payload;
+ module_id = request->primary_intf_id;
+ num_interfaces = request->intf_count;
+ flags = le16_to_cpu(request->flags);
+
+ dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
+ __func__, module_id, num_interfaces, flags);
+
+ if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
+ dev_warn(&svc->dev, "no primary interface detected on module %u\n",
+ module_id);
+ }
+
+ module = gb_svc_module_lookup(svc, module_id);
+ if (module) {
+ dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
+ module_id);
+ return;
+ }
+
+ module = gb_module_create(hd, module_id, num_interfaces);
+ if (!module) {
+ dev_err(&svc->dev, "failed to create module\n");
+ return;
+ }
+
+ ret = gb_module_add(module);
+ if (ret) {
+ gb_module_put(module);
+ return;
+ }
+
+ list_add(&module->hd_node, &hd->modules);
+}
+
+static void gb_svc_process_module_removed(struct gb_operation *operation)
+{
+ struct gb_svc_module_removed_request *request;
+ struct gb_connection *connection = operation->connection;
+ struct gb_svc *svc = gb_connection_get_data(connection);
+ struct gb_module *module;
+ u8 module_id;
+
+ /* The request message size has already been verified. */
+ request = operation->request->payload;
+ module_id = request->primary_intf_id;
+
+ dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
+
+ module = gb_svc_module_lookup(svc, module_id);
+ if (!module) {
+ dev_warn(&svc->dev, "unexpected module-removed event %u\n",
+ module_id);
+ return;
+ }
+
+ module->disconnected = true;
+
+ gb_module_del(module);
+ list_del(&module->hd_node);
+ gb_module_put(module);
+}
+
+static void gb_svc_process_intf_oops(struct gb_operation *operation)
+{
+ struct gb_svc_intf_oops_request *request;
+ struct gb_connection *connection = operation->connection;
+ struct gb_svc *svc = gb_connection_get_data(connection);
+ struct gb_interface *intf;
+ u8 intf_id;
+ u8 reason;
+
+ /* The request message size has already been verified. */
+ request = operation->request->payload;
+ intf_id = request->intf_id;
+ reason = request->reason;
+
+ intf = gb_svc_interface_lookup(svc, intf_id);
+ if (!intf) {
+ dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
+ intf_id);
+ return;
+ }
+
+ dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
+ intf_id, reason);
+
+ mutex_lock(&intf->mutex);
+ intf->disconnected = true;
+ gb_interface_disable(intf);
+ gb_interface_deactivate(intf);
+ mutex_unlock(&intf->mutex);
+}
+
+static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
+{
+ struct gb_svc_intf_mailbox_event_request *request;
+ struct gb_connection *connection = operation->connection;
+ struct gb_svc *svc = gb_connection_get_data(connection);
+ struct gb_interface *intf;
+ u8 intf_id;
+ u16 result_code;
+ u32 mailbox;
+
+ /* The request message size has already been verified. */
+ request = operation->request->payload;
+ intf_id = request->intf_id;
+ result_code = le16_to_cpu(request->result_code);
+ mailbox = le32_to_cpu(request->mailbox);
+
+ dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
+ __func__, intf_id, result_code, mailbox);
+
+ intf = gb_svc_interface_lookup(svc, intf_id);
+ if (!intf) {
+ dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
+ return;
+ }
+
+ gb_interface_mailbox_event(intf, result_code, mailbox);
+}
+
+static void gb_svc_process_deferred_request(struct work_struct *work)
+{
+ struct gb_svc_deferred_request *dr;
+ struct gb_operation *operation;
+ struct gb_svc *svc;
+ u8 type;
+
+ dr = container_of(work, struct gb_svc_deferred_request, work);
+ operation = dr->operation;
+ svc = gb_connection_get_data(operation->connection);
+ type = operation->request->header->type;
+
+ switch (type) {
+ case GB_SVC_TYPE_SVC_HELLO:
+ gb_svc_process_hello_deferred(operation);
+ break;
+ case GB_SVC_TYPE_MODULE_INSERTED:
+ gb_svc_process_module_inserted(operation);
+ break;
+ case GB_SVC_TYPE_MODULE_REMOVED:
+ gb_svc_process_module_removed(operation);
+ break;
+ case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
+ gb_svc_process_intf_mailbox_event(operation);
+ break;
+ case GB_SVC_TYPE_INTF_OOPS:
+ gb_svc_process_intf_oops(operation);
+ break;
+ default:
+ dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
+ }
+
+ gb_operation_put(operation);
+ kfree(dr);
+}
+
+static int gb_svc_queue_deferred_request(struct gb_operation *operation)
+{
+ struct gb_svc *svc = gb_connection_get_data(operation->connection);
+ struct gb_svc_deferred_request *dr;
+
+ dr = kmalloc(sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ gb_operation_get(operation);
+
+ dr->operation = operation;
+ INIT_WORK(&dr->work, gb_svc_process_deferred_request);
+
+ queue_work(svc->wq, &dr->work);
+
+ return 0;
+}
+
+static int gb_svc_intf_reset_recv(struct gb_operation *op)
+{
+ struct gb_svc *svc = gb_connection_get_data(op->connection);
+ struct gb_message *request = op->request;
+ struct gb_svc_intf_reset_request *reset;
+ u8 intf_id;
+
+ if (request->payload_size < sizeof(*reset)) {
+ dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
+ request->payload_size, sizeof(*reset));
+ return -EINVAL;
+ }
+ reset = request->payload;
+
+ intf_id = reset->intf_id;
+
+ /* FIXME Reset the interface here */
+
+ return 0;
+}
+
+static int gb_svc_module_inserted_recv(struct gb_operation *op)
+{
+ struct gb_svc *svc = gb_connection_get_data(op->connection);
+ struct gb_svc_module_inserted_request *request;
+
+ if (op->request->payload_size < sizeof(*request)) {
+ dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+
+ dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
+ request->primary_intf_id);
+
+ return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_module_removed_recv(struct gb_operation *op)
+{
+ struct gb_svc *svc = gb_connection_get_data(op->connection);
+ struct gb_svc_module_removed_request *request;
+
+ if (op->request->payload_size < sizeof(*request)) {
+ dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+
+ dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
+ request->primary_intf_id);
+
+ return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_intf_oops_recv(struct gb_operation *op)
+{
+ struct gb_svc *svc = gb_connection_get_data(op->connection);
+ struct gb_svc_intf_oops_request *request;
+
+ if (op->request->payload_size < sizeof(*request)) {
+ dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
+{
+ struct gb_svc *svc = gb_connection_get_data(op->connection);
+ struct gb_svc_intf_mailbox_event_request *request;
+
+ if (op->request->payload_size < sizeof(*request)) {
+ dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
+ op->request->payload_size, sizeof(*request));
+ return -EINVAL;
+ }
+
+ request = op->request->payload;
+
+ dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
+
+ return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_svc *svc = gb_connection_get_data(connection);
+ u8 type = op->type;
+ int ret = 0;
+
+ /*
+ * SVC requests need to follow a specific order (at least initially) and
+ * below code takes care of enforcing that. The expected order is:
+ * - PROTOCOL_VERSION
+ * - SVC_HELLO
+ * - Any other request, but the earlier two.
+ *
+ * Incoming requests are guaranteed to be serialized and so we don't
+ * need to protect 'state' for any races.
+ */
+ switch (type) {
+ case GB_SVC_TYPE_PROTOCOL_VERSION:
+ if (svc->state != GB_SVC_STATE_RESET)
+ ret = -EINVAL;
+ break;
+ case GB_SVC_TYPE_SVC_HELLO:
+ if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
+ ret = -EINVAL;
+ break;
+ default:
+ if (svc->state != GB_SVC_STATE_SVC_HELLO)
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
+ type, svc->state);
+ return ret;
+ }
+
+ switch (type) {
+ case GB_SVC_TYPE_PROTOCOL_VERSION:
+ ret = gb_svc_version_request(op);
+ if (!ret)
+ svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
+ return ret;
+ case GB_SVC_TYPE_SVC_HELLO:
+ ret = gb_svc_hello(op);
+ if (!ret)
+ svc->state = GB_SVC_STATE_SVC_HELLO;
+ return ret;
+ case GB_SVC_TYPE_INTF_RESET:
+ return gb_svc_intf_reset_recv(op);
+ case GB_SVC_TYPE_MODULE_INSERTED:
+ return gb_svc_module_inserted_recv(op);
+ case GB_SVC_TYPE_MODULE_REMOVED:
+ return gb_svc_module_removed_recv(op);
+ case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
+ return gb_svc_intf_mailbox_event_recv(op);
+ case GB_SVC_TYPE_INTF_OOPS:
+ return gb_svc_intf_oops_recv(op);
+ default:
+ dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
+ return -EINVAL;
+ }
+}
+
+static void gb_svc_release(struct device *dev)
+{
+ struct gb_svc *svc = to_gb_svc(dev);
+
+ if (svc->connection)
+ gb_connection_destroy(svc->connection);
+ ida_destroy(&svc->device_id_map);
+ destroy_workqueue(svc->wq);
+ kfree(svc);
+}
+
+struct device_type greybus_svc_type = {
+ .name = "greybus_svc",
+ .release = gb_svc_release,
+};
+
+struct gb_svc *gb_svc_create(struct gb_host_device *hd)
+{
+ struct gb_svc *svc;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return NULL;
+
+ svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
+ if (!svc->wq) {
+ kfree(svc);
+ return NULL;
+ }
+
+ svc->dev.parent = &hd->dev;
+ svc->dev.bus = &greybus_bus_type;
+ svc->dev.type = &greybus_svc_type;
+ svc->dev.groups = svc_groups;
+ svc->dev.dma_mask = svc->dev.parent->dma_mask;
+ device_initialize(&svc->dev);
+
+ dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
+
+ ida_init(&svc->device_id_map);
+ svc->state = GB_SVC_STATE_RESET;
+ svc->hd = hd;
+
+ svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
+ gb_svc_request_handler);
+ if (IS_ERR(svc->connection)) {
+ dev_err(&svc->dev, "failed to create connection: %ld\n",
+ PTR_ERR(svc->connection));
+ goto err_put_device;
+ }
+
+ gb_connection_set_data(svc->connection, svc);
+
+ return svc;
+
+err_put_device:
+ put_device(&svc->dev);
+ return NULL;
+}
+
+int gb_svc_add(struct gb_svc *svc)
+{
+ int ret;
+
+ /*
+ * The SVC protocol is currently driven by the SVC, so the SVC device
+ * is added from the connection request handler when enough
+ * information has been received.
+ */
+ ret = gb_connection_enable(svc->connection);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void gb_svc_remove_modules(struct gb_svc *svc)
+{
+ struct gb_host_device *hd = svc->hd;
+ struct gb_module *module, *tmp;
+
+ list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
+ gb_module_del(module);
+ list_del(&module->hd_node);
+ gb_module_put(module);
+ }
+}
+
+void gb_svc_del(struct gb_svc *svc)
+{
+ gb_connection_disable_rx(svc->connection);
+
+ /*
+ * The SVC device may have been registered from the request handler.
+ */
+ if (device_is_registered(&svc->dev)) {
+ gb_timesync_svc_remove(svc);
+ gb_svc_debugfs_exit(svc);
+ gb_svc_watchdog_destroy(svc);
+ device_del(&svc->dev);
+ }
+
+ flush_workqueue(svc->wq);
+
+ gb_svc_remove_modules(svc);
+
+ gb_connection_disable(svc->connection);
+}
+
+void gb_svc_put(struct gb_svc *svc)
+{
+ put_device(&svc->dev);
+}
diff --git a/drivers/staging/greybus/svc.h b/drivers/staging/greybus/svc.h
new file mode 100644
index 000000000000..d1d7ef967385
--- /dev/null
+++ b/drivers/staging/greybus/svc.h
@@ -0,0 +1,109 @@
+/*
+ * Greybus SVC code
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __SVC_H
+#define __SVC_H
+
+#define GB_SVC_CPORT_FLAG_E2EFC BIT(0)
+#define GB_SVC_CPORT_FLAG_CSD_N BIT(1)
+#define GB_SVC_CPORT_FLAG_CSV_N BIT(2)
+
+enum gb_svc_state {
+ GB_SVC_STATE_RESET,
+ GB_SVC_STATE_PROTOCOL_VERSION,
+ GB_SVC_STATE_SVC_HELLO,
+};
+
+enum gb_svc_watchdog_bite {
+ GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0,
+ GB_SVC_WATCHDOG_BITE_PANIC_KERNEL,
+};
+
+struct gb_svc_watchdog;
+
+struct svc_debugfs_pwrmon_rail {
+ u8 id;
+ struct gb_svc *svc;
+};
+
+struct gb_svc {
+ struct device dev;
+
+ struct gb_host_device *hd;
+ struct gb_connection *connection;
+ enum gb_svc_state state;
+ struct ida device_id_map;
+ struct workqueue_struct *wq;
+
+ u16 endo_id;
+ u8 ap_intf_id;
+
+ u8 protocol_major;
+ u8 protocol_minor;
+
+ struct gb_svc_watchdog *watchdog;
+ enum gb_svc_watchdog_bite action;
+
+ struct dentry *debugfs_dentry;
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails;
+};
+#define to_gb_svc(d) container_of(d, struct gb_svc, dev)
+
+struct gb_svc *gb_svc_create(struct gb_host_device *hd);
+int gb_svc_add(struct gb_svc *svc);
+void gb_svc_del(struct gb_svc *svc);
+void gb_svc_put(struct gb_svc *svc);
+
+int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
+ u8 measurement_type, u32 *value);
+int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id);
+int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
+ u8 intf2_id, u8 dev2_id);
+void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id);
+int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
+ u8 intf2_id, u16 cport2_id, u8 cport_flags);
+void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
+ u8 intf2_id, u16 cport2_id);
+int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id);
+int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type);
+int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id);
+
+int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+ u32 *value);
+int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+ u32 value);
+int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
+ u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
+ u8 tx_amplitude, u8 tx_hs_equalizer,
+ u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
+ u8 flags, u32 quirks,
+ struct gb_svc_l2_timer_cfg *local,
+ struct gb_svc_l2_timer_cfg *remote);
+int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id);
+int gb_svc_ping(struct gb_svc *svc);
+int gb_svc_watchdog_create(struct gb_svc *svc);
+void gb_svc_watchdog_destroy(struct gb_svc *svc);
+bool gb_svc_watchdog_enabled(struct gb_svc *svc);
+int gb_svc_watchdog_enable(struct gb_svc *svc);
+int gb_svc_watchdog_disable(struct gb_svc *svc);
+int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
+ u32 strobe_delay, u32 refclk);
+int gb_svc_timesync_disable(struct gb_svc *svc);
+int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time);
+int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time);
+int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask);
+int gb_svc_timesync_wake_pins_release(struct gb_svc *svc);
+
+int gb_svc_protocol_init(void);
+void gb_svc_protocol_exit(void);
+
+#endif /* __SVC_H */
diff --git a/drivers/staging/greybus/svc_watchdog.c b/drivers/staging/greybus/svc_watchdog.c
new file mode 100644
index 000000000000..3729460fb954
--- /dev/null
+++ b/drivers/staging/greybus/svc_watchdog.c
@@ -0,0 +1,198 @@
+/*
+ * SVC Greybus "watchdog" driver.
+ *
+ * Copyright 2016 Google Inc.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <linux/workqueue.h>
+#include "greybus.h"
+
+#define SVC_WATCHDOG_PERIOD (2*HZ)
+
+struct gb_svc_watchdog {
+ struct delayed_work work;
+ struct gb_svc *svc;
+ bool enabled;
+ struct notifier_block pm_notifier;
+};
+
+static struct delayed_work reset_work;
+
+static int svc_watchdog_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ struct gb_svc_watchdog *watchdog =
+ container_of(notifier, struct gb_svc_watchdog, pm_notifier);
+
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ gb_svc_watchdog_disable(watchdog->svc);
+ break;
+ case PM_POST_SUSPEND:
+ gb_svc_watchdog_enable(watchdog->svc);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void greybus_reset(struct work_struct *work)
+{
+ static char start_path[256] = "/system/bin/start";
+ static char *envp[] = {
+ "HOME=/",
+ "PATH=/sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin",
+ NULL,
+ };
+ static char *argv[] = {
+ start_path,
+ "unipro_reset",
+ NULL,
+ };
+
+ printk(KERN_ERR "svc_watchdog: calling \"%s %s\" to reset greybus network!\n",
+ argv[0], argv[1]);
+ call_usermodehelper(start_path, argv, envp, UMH_WAIT_EXEC);
+}
+
+static void do_work(struct work_struct *work)
+{
+ struct gb_svc_watchdog *watchdog;
+ struct gb_svc *svc;
+ int retval;
+
+ watchdog = container_of(work, struct gb_svc_watchdog, work.work);
+ svc = watchdog->svc;
+
+ dev_dbg(&svc->dev, "%s: ping.\n", __func__);
+ retval = gb_svc_ping(svc);
+ if (retval) {
+ /*
+ * Something went really wrong, let's warn userspace and then
+ * pull the plug and reset the whole greybus network.
+ * We need to do this outside of this workqueue as we will be
+ * tearing down the svc device itself. So queue up
+ * yet-another-callback to do that.
+ */
+ dev_err(&svc->dev,
+ "SVC ping has returned %d, something is wrong!!!\n",
+ retval);
+
+ if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) {
+ panic("SVC is not responding\n");
+ } else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) {
+ dev_err(&svc->dev, "Resetting the greybus network, watch out!!!\n");
+
+ INIT_DELAYED_WORK(&reset_work, greybus_reset);
+ schedule_delayed_work(&reset_work, HZ / 2);
+
+ /*
+ * Disable ourselves, we don't want to trip again unless
+ * userspace wants us to.
+ */
+ watchdog->enabled = false;
+ }
+ }
+
+ /* resubmit our work to happen again, if we are still "alive" */
+ if (watchdog->enabled)
+ schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
+}
+
+int gb_svc_watchdog_create(struct gb_svc *svc)
+{
+ struct gb_svc_watchdog *watchdog;
+ int retval;
+
+ if (svc->watchdog)
+ return 0;
+
+ watchdog = kmalloc(sizeof(*watchdog), GFP_KERNEL);
+ if (!watchdog)
+ return -ENOMEM;
+
+ watchdog->enabled = false;
+ watchdog->svc = svc;
+ INIT_DELAYED_WORK(&watchdog->work, do_work);
+ svc->watchdog = watchdog;
+
+ watchdog->pm_notifier.notifier_call = svc_watchdog_pm_notifier;
+ retval = register_pm_notifier(&watchdog->pm_notifier);
+ if (retval) {
+ dev_err(&svc->dev, "error registering pm notifier(%d)\n",
+ retval);
+ goto svc_watchdog_create_err;
+ }
+
+ retval = gb_svc_watchdog_enable(svc);
+ if (retval) {
+ dev_err(&svc->dev, "error enabling watchdog (%d)\n", retval);
+ unregister_pm_notifier(&watchdog->pm_notifier);
+ goto svc_watchdog_create_err;
+ }
+ return retval;
+
+svc_watchdog_create_err:
+ svc->watchdog = NULL;
+ kfree(watchdog);
+
+ return retval;
+}
+
+void gb_svc_watchdog_destroy(struct gb_svc *svc)
+{
+ struct gb_svc_watchdog *watchdog = svc->watchdog;
+
+ if (!watchdog)
+ return;
+
+ unregister_pm_notifier(&watchdog->pm_notifier);
+ gb_svc_watchdog_disable(svc);
+ svc->watchdog = NULL;
+ kfree(watchdog);
+}
+
+bool gb_svc_watchdog_enabled(struct gb_svc *svc)
+{
+ if (!svc || !svc->watchdog)
+ return false;
+ return svc->watchdog->enabled;
+}
+
+int gb_svc_watchdog_enable(struct gb_svc *svc)
+{
+ struct gb_svc_watchdog *watchdog;
+
+ if (!svc->watchdog)
+ return -ENODEV;
+
+ watchdog = svc->watchdog;
+ if (watchdog->enabled)
+ return 0;
+
+ watchdog->enabled = true;
+ schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
+ return 0;
+}
+
+int gb_svc_watchdog_disable(struct gb_svc *svc)
+{
+ struct gb_svc_watchdog *watchdog;
+
+ if (!svc->watchdog)
+ return -ENODEV;
+
+ watchdog = svc->watchdog;
+ if (!watchdog->enabled)
+ return 0;
+
+ watchdog->enabled = false;
+ cancel_delayed_work_sync(&watchdog->work);
+ return 0;
+}
diff --git a/drivers/staging/greybus/timesync.c b/drivers/staging/greybus/timesync.c
new file mode 100644
index 000000000000..2e68af7dea6d
--- /dev/null
+++ b/drivers/staging/greybus/timesync.c
@@ -0,0 +1,1357 @@
+/*
+ * TimeSync API driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+#include <linux/debugfs.h>
+#include <linux/hrtimer.h>
+#include "greybus.h"
+#include "timesync.h"
+#include "greybus_trace.h"
+
+/*
+ * Minimum inter-strobe value of one millisecond is chosen because it
+ * just-about fits the common definition of a jiffy.
+ *
+ * Maximum value OTOH is constrained by the number of bits the SVC can fit
+ * into a 16 bit up-counter. The SVC configures the timer in microseconds
+ * so the maximum allowable value is 65535 microseconds. We clip that value
+ * to 10000 microseconds for the sake of using nice round base 10 numbers
+ * and since right-now there's no imaginable use-case requiring anything
+ * other than a one millisecond inter-strobe time, let alone something
+ * higher than ten milliseconds.
+ */
+#define GB_TIMESYNC_STROBE_DELAY_US 1000
+#define GB_TIMESYNC_DEFAULT_OFFSET_US 1000
+
+/* Work queue timers long, short and SVC strobe timeout */
+#define GB_TIMESYNC_DELAYED_WORK_LONG msecs_to_jiffies(10)
+#define GB_TIMESYNC_DELAYED_WORK_SHORT msecs_to_jiffies(1)
+#define GB_TIMESYNC_MAX_WAIT_SVC msecs_to_jiffies(5000)
+#define GB_TIMESYNC_KTIME_UPDATE msecs_to_jiffies(1000)
+#define GB_TIMESYNC_MAX_KTIME_CONVERSION 15
+
+/* Maximum number of times we'll retry a failed synchronous sync */
+#define GB_TIMESYNC_MAX_RETRIES 5
+
+/* Reported nanoseconds/femtoseconds per clock */
+static u64 gb_timesync_ns_per_clock;
+static u64 gb_timesync_fs_per_clock;
+
+/* Maximum difference we will accept converting FrameTime to ktime */
+static u32 gb_timesync_max_ktime_diff;
+
+/* Reported clock rate */
+static unsigned long gb_timesync_clock_rate;
+
+/* Workqueue */
+static void gb_timesync_worker(struct work_struct *work);
+
+/* List of SVCs with one FrameTime per SVC */
+static LIST_HEAD(gb_timesync_svc_list);
+
+/* Synchronize parallel contexts accessing a valid timesync_svc pointer */
+static DEFINE_MUTEX(gb_timesync_svc_list_mutex);
+
+/* Structure to convert from FrameTime to timespec/ktime */
+struct gb_timesync_frame_time_data {
+ u64 frame_time;
+ struct timespec ts;
+};
+
+struct gb_timesync_svc {
+ struct list_head list;
+ struct list_head interface_list;
+ struct gb_svc *svc;
+ struct gb_timesync_host_device *timesync_hd;
+
+ spinlock_t spinlock; /* Per SVC spinlock to sync with ISR */
+ struct mutex mutex; /* Per SVC mutex for regular synchronization */
+
+ struct dentry *frame_time_dentry;
+ struct dentry *frame_ktime_dentry;
+ struct workqueue_struct *work_queue;
+ wait_queue_head_t wait_queue;
+ struct delayed_work delayed_work;
+ struct timer_list ktime_timer;
+
+ /* The current local FrameTime */
+ u64 frame_time_offset;
+ struct gb_timesync_frame_time_data strobe_data[GB_TIMESYNC_MAX_STROBES];
+ struct gb_timesync_frame_time_data ktime_data;
+
+ /* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */
+ u64 svc_ping_frame_time;
+ u64 ap_ping_frame_time;
+
+ /* Transitory settings */
+ u32 strobe_mask;
+ bool offset_down;
+ bool print_ping;
+ bool capture_ping;
+ int strobe;
+
+ /* Current state */
+ int state;
+};
+
+struct gb_timesync_host_device {
+ struct list_head list;
+ struct gb_host_device *hd;
+ u64 ping_frame_time;
+};
+
+struct gb_timesync_interface {
+ struct list_head list;
+ struct gb_interface *interface;
+ u64 ping_frame_time;
+};
+
+enum gb_timesync_state {
+ GB_TIMESYNC_STATE_INVALID = 0,
+ GB_TIMESYNC_STATE_INACTIVE = 1,
+ GB_TIMESYNC_STATE_INIT = 2,
+ GB_TIMESYNC_STATE_WAIT_SVC = 3,
+ GB_TIMESYNC_STATE_AUTHORITATIVE = 4,
+ GB_TIMESYNC_STATE_PING = 5,
+ GB_TIMESYNC_STATE_ACTIVE = 6,
+};
+
+static void gb_timesync_ktime_timer_fn(unsigned long data);
+
+static u64 gb_timesync_adjust_count(struct gb_timesync_svc *timesync_svc,
+ u64 counts)
+{
+ if (timesync_svc->offset_down)
+ return counts - timesync_svc->frame_time_offset;
+ else
+ return counts + timesync_svc->frame_time_offset;
+}
+
+/*
+ * This function provides the authoritative FrameTime to a calling function. It
+ * is designed to be lockless and should remain that way the caller is assumed
+ * to be state-aware.
+ */
+static u64 __gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
+{
+ u64 clocks = gb_timesync_platform_get_counter();
+
+ return gb_timesync_adjust_count(timesync_svc, clocks);
+}
+
+static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc
+ *timesync_svc)
+{
+ queue_delayed_work(timesync_svc->work_queue,
+ &timesync_svc->delayed_work,
+ GB_TIMESYNC_MAX_WAIT_SVC);
+}
+
+static void gb_timesync_set_state(struct gb_timesync_svc *timesync_svc,
+ int state)
+{
+ switch (state) {
+ case GB_TIMESYNC_STATE_INVALID:
+ timesync_svc->state = state;
+ wake_up(&timesync_svc->wait_queue);
+ break;
+ case GB_TIMESYNC_STATE_INACTIVE:
+ timesync_svc->state = state;
+ wake_up(&timesync_svc->wait_queue);
+ break;
+ case GB_TIMESYNC_STATE_INIT:
+ if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
+ timesync_svc->strobe = 0;
+ timesync_svc->frame_time_offset = 0;
+ timesync_svc->state = state;
+ cancel_delayed_work(&timesync_svc->delayed_work);
+ queue_delayed_work(timesync_svc->work_queue,
+ &timesync_svc->delayed_work,
+ GB_TIMESYNC_DELAYED_WORK_LONG);
+ }
+ break;
+ case GB_TIMESYNC_STATE_WAIT_SVC:
+ if (timesync_svc->state == GB_TIMESYNC_STATE_INIT)
+ timesync_svc->state = state;
+ break;
+ case GB_TIMESYNC_STATE_AUTHORITATIVE:
+ if (timesync_svc->state == GB_TIMESYNC_STATE_WAIT_SVC) {
+ timesync_svc->state = state;
+ cancel_delayed_work(&timesync_svc->delayed_work);
+ queue_delayed_work(timesync_svc->work_queue,
+ &timesync_svc->delayed_work, 0);
+ }
+ break;
+ case GB_TIMESYNC_STATE_PING:
+ if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
+ timesync_svc->state = state;
+ queue_delayed_work(timesync_svc->work_queue,
+ &timesync_svc->delayed_work,
+ GB_TIMESYNC_DELAYED_WORK_SHORT);
+ }
+ break;
+ case GB_TIMESYNC_STATE_ACTIVE:
+ if (timesync_svc->state == GB_TIMESYNC_STATE_AUTHORITATIVE ||
+ timesync_svc->state == GB_TIMESYNC_STATE_PING) {
+ timesync_svc->state = state;
+ wake_up(&timesync_svc->wait_queue);
+ }
+ break;
+ }
+
+ if (WARN_ON(timesync_svc->state != state)) {
+ pr_err("Invalid state transition %d=>%d\n",
+ timesync_svc->state, state);
+ }
+}
+
+static void gb_timesync_set_state_atomic(struct gb_timesync_svc *timesync_svc,
+ int state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&timesync_svc->spinlock, flags);
+ gb_timesync_set_state(timesync_svc, state);
+ spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+}
+
+static u64 gb_timesync_diff(u64 x, u64 y)
+{
+ if (x > y)
+ return x - y;
+ else
+ return y - x;
+}
+
+static void gb_timesync_adjust_to_svc(struct gb_timesync_svc *svc,
+ u64 svc_frame_time, u64 ap_frame_time)
+{
+ if (svc_frame_time > ap_frame_time) {
+ svc->frame_time_offset = svc_frame_time - ap_frame_time;
+ svc->offset_down = false;
+ } else {
+ svc->frame_time_offset = ap_frame_time - svc_frame_time;
+ svc->offset_down = true;
+ }
+}
+
+/*
+ * Associate a FrameTime with a ktime timestamp represented as struct timespec
+ * Requires the calling context to hold timesync_svc->mutex
+ */
+static void gb_timesync_store_ktime(struct gb_timesync_svc *timesync_svc,
+ struct timespec ts, u64 frame_time)
+{
+ timesync_svc->ktime_data.ts = ts;
+ timesync_svc->ktime_data.frame_time = frame_time;
+}
+
+/*
+ * Find the two pulses that best-match our expected inter-strobe gap and
+ * then calculate the difference between the SVC time at the second pulse
+ * to the local time at the second pulse.
+ */
+static void gb_timesync_collate_frame_time(struct gb_timesync_svc *timesync_svc,
+ u64 *frame_time)
+{
+ int i = 0;
+ u64 delta, ap_frame_time;
+ u64 strobe_delay_ns = GB_TIMESYNC_STROBE_DELAY_US * NSEC_PER_USEC;
+ u64 least = 0;
+
+ for (i = 1; i < GB_TIMESYNC_MAX_STROBES; i++) {
+ delta = timesync_svc->strobe_data[i].frame_time -
+ timesync_svc->strobe_data[i - 1].frame_time;
+ delta *= gb_timesync_ns_per_clock;
+ delta = gb_timesync_diff(delta, strobe_delay_ns);
+
+ if (!least || delta < least) {
+ least = delta;
+ gb_timesync_adjust_to_svc(timesync_svc, frame_time[i],
+ timesync_svc->strobe_data[i].frame_time);
+
+ ap_frame_time = timesync_svc->strobe_data[i].frame_time;
+ ap_frame_time = gb_timesync_adjust_count(timesync_svc,
+ ap_frame_time);
+ gb_timesync_store_ktime(timesync_svc,
+ timesync_svc->strobe_data[i].ts,
+ ap_frame_time);
+
+ pr_debug("adjust %s local %llu svc %llu delta %llu\n",
+ timesync_svc->offset_down ? "down" : "up",
+ timesync_svc->strobe_data[i].frame_time,
+ frame_time[i], delta);
+ }
+ }
+}
+
+static void gb_timesync_teardown(struct gb_timesync_svc *timesync_svc)
+{
+ struct gb_timesync_interface *timesync_interface;
+ struct gb_svc *svc = timesync_svc->svc;
+ struct gb_interface *interface;
+ struct gb_host_device *hd;
+ int ret;
+
+ list_for_each_entry(timesync_interface,
+ &timesync_svc->interface_list, list) {
+ interface = timesync_interface->interface;
+ ret = gb_interface_timesync_disable(interface);
+ if (ret) {
+ dev_err(&interface->dev,
+ "interface timesync_disable %d\n", ret);
+ }
+ }
+
+ hd = timesync_svc->timesync_hd->hd;
+ ret = hd->driver->timesync_disable(hd);
+ if (ret < 0) {
+ dev_err(&hd->dev, "host timesync_disable %d\n",
+ ret);
+ }
+
+ gb_svc_timesync_wake_pins_release(svc);
+ gb_svc_timesync_disable(svc);
+ gb_timesync_platform_unlock_bus();
+
+ gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
+}
+
+static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc
+ *timesync_svc, int ret)
+{
+ if (ret == -EAGAIN) {
+ gb_timesync_set_state(timesync_svc, timesync_svc->state);
+ } else {
+ pr_err("Failed to lock timesync bus %d\n", ret);
+ gb_timesync_set_state(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
+ }
+}
+
+static void gb_timesync_enable(struct gb_timesync_svc *timesync_svc)
+{
+ struct gb_svc *svc = timesync_svc->svc;
+ struct gb_host_device *hd;
+ struct gb_timesync_interface *timesync_interface;
+ struct gb_interface *interface;
+ u64 init_frame_time;
+ unsigned long clock_rate = gb_timesync_clock_rate;
+ int ret;
+
+ /*
+ * Get access to the wake pins in the AP and SVC
+ * Release these pins either in gb_timesync_teardown() or in
+ * gb_timesync_authoritative()
+ */
+ ret = gb_timesync_platform_lock_bus(timesync_svc);
+ if (ret < 0) {
+ gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
+ return;
+ }
+ ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
+ if (ret) {
+ dev_err(&svc->dev,
+ "gb_svc_timesync_wake_pins_acquire %d\n", ret);
+ gb_timesync_teardown(timesync_svc);
+ return;
+ }
+
+ /* Choose an initial time in the future */
+ init_frame_time = __gb_timesync_get_frame_time(timesync_svc) + 100000UL;
+
+ /* Send enable command to all relevant participants */
+ list_for_each_entry(timesync_interface, &timesync_svc->interface_list,
+ list) {
+ interface = timesync_interface->interface;
+ ret = gb_interface_timesync_enable(interface,
+ GB_TIMESYNC_MAX_STROBES,
+ init_frame_time,
+ GB_TIMESYNC_STROBE_DELAY_US,
+ clock_rate);
+ if (ret) {
+ dev_err(&interface->dev,
+ "interface timesync_enable %d\n", ret);
+ }
+ }
+
+ hd = timesync_svc->timesync_hd->hd;
+ ret = hd->driver->timesync_enable(hd, GB_TIMESYNC_MAX_STROBES,
+ init_frame_time,
+ GB_TIMESYNC_STROBE_DELAY_US,
+ clock_rate);
+ if (ret < 0) {
+ dev_err(&hd->dev, "host timesync_enable %d\n",
+ ret);
+ }
+
+ gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_WAIT_SVC);
+ ret = gb_svc_timesync_enable(svc, GB_TIMESYNC_MAX_STROBES,
+ init_frame_time,
+ GB_TIMESYNC_STROBE_DELAY_US,
+ clock_rate);
+ if (ret) {
+ dev_err(&svc->dev,
+ "gb_svc_timesync_enable %d\n", ret);
+ gb_timesync_teardown(timesync_svc);
+ return;
+ }
+
+ /* Schedule a timeout waiting for SVC to complete strobing */
+ gb_timesync_schedule_svc_timeout(timesync_svc);
+}
+
+static void gb_timesync_authoritative(struct gb_timesync_svc *timesync_svc)
+{
+ struct gb_svc *svc = timesync_svc->svc;
+ struct gb_host_device *hd;
+ struct gb_timesync_interface *timesync_interface;
+ struct gb_interface *interface;
+ u64 svc_frame_time[GB_TIMESYNC_MAX_STROBES];
+ int ret;
+
+ /* Get authoritative time from SVC and adjust local clock */
+ ret = gb_svc_timesync_authoritative(svc, svc_frame_time);
+ if (ret) {
+ dev_err(&svc->dev,
+ "gb_svc_timesync_authoritative %d\n", ret);
+ gb_timesync_teardown(timesync_svc);
+ return;
+ }
+ gb_timesync_collate_frame_time(timesync_svc, svc_frame_time);
+
+ /* Transmit authoritative time to downstream slaves */
+ hd = timesync_svc->timesync_hd->hd;
+ ret = hd->driver->timesync_authoritative(hd, svc_frame_time);
+ if (ret < 0)
+ dev_err(&hd->dev, "host timesync_authoritative %d\n", ret);
+
+ list_for_each_entry(timesync_interface,
+ &timesync_svc->interface_list, list) {
+ interface = timesync_interface->interface;
+ ret = gb_interface_timesync_authoritative(
+ interface,
+ svc_frame_time);
+ if (ret) {
+ dev_err(&interface->dev,
+ "interface timesync_authoritative %d\n", ret);
+ }
+ }
+
+ /* Release wake pins */
+ gb_svc_timesync_wake_pins_release(svc);
+ gb_timesync_platform_unlock_bus();
+
+ /* Transition to state ACTIVE */
+ gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
+
+ /* Schedule a ping to verify the synchronized system time */
+ timesync_svc->print_ping = true;
+ gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_PING);
+}
+
+static int __gb_timesync_get_status(struct gb_timesync_svc *timesync_svc)
+{
+ int ret = -EINVAL;
+
+ switch (timesync_svc->state) {
+ case GB_TIMESYNC_STATE_INVALID:
+ case GB_TIMESYNC_STATE_INACTIVE:
+ ret = -ENODEV;
+ break;
+ case GB_TIMESYNC_STATE_INIT:
+ case GB_TIMESYNC_STATE_WAIT_SVC:
+ case GB_TIMESYNC_STATE_AUTHORITATIVE:
+ ret = -EAGAIN;
+ break;
+ case GB_TIMESYNC_STATE_PING:
+ case GB_TIMESYNC_STATE_ACTIVE:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * This routine takes a FrameTime and derives the difference with-respect
+ * to a reference FrameTime/ktime pair. It then returns the calculated
+ * ktime based on the difference between the supplied FrameTime and
+ * the reference FrameTime.
+ *
+ * The time difference is calculated to six decimal places. Taking 19.2MHz
+ * as an example this means we have 52.083333~ nanoseconds per clock or
+ * 52083333~ femtoseconds per clock.
+ *
+ * Naively taking the count difference and converting to
+ * seconds/nanoseconds would quickly see the 0.0833 component produce
+ * noticeable errors. For example a time difference of one second would
+ * loose 19200000 * 0.08333x nanoseconds or 1.59 seconds.
+ *
+ * In contrast calculating in femtoseconds the same example of 19200000 *
+ * 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds!
+ *
+ * Continuing the example of 19.2 MHz we cap the maximum error difference
+ * at a worst-case 0.3 microseconds over a potential calculation window of
+ * abount 15 seconds, meaning you can convert a FrameTime that is <= 15
+ * seconds older/younger than the reference time with a maximum error of
+ * 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement.
+ */
+static int gb_timesync_to_timespec(struct gb_timesync_svc *timesync_svc,
+ u64 frame_time, struct timespec *ts)
+{
+ unsigned long flags;
+ u64 delta_fs, counts, sec, nsec;
+ bool add;
+ int ret = 0;
+
+ memset(ts, 0x00, sizeof(*ts));
+ mutex_lock(&timesync_svc->mutex);
+ spin_lock_irqsave(&timesync_svc->spinlock, flags);
+
+ ret = __gb_timesync_get_status(timesync_svc);
+ if (ret)
+ goto done;
+
+ /* Support calculating ktime upwards or downwards from the reference */
+ if (frame_time < timesync_svc->ktime_data.frame_time) {
+ add = false;
+ counts = timesync_svc->ktime_data.frame_time - frame_time;
+ } else {
+ add = true;
+ counts = frame_time - timesync_svc->ktime_data.frame_time;
+ }
+
+ /* Enforce the .23 of a usecond boundary @ 19.2MHz */
+ if (counts > gb_timesync_max_ktime_diff) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Determine the time difference in femtoseconds */
+ delta_fs = counts * gb_timesync_fs_per_clock;
+
+ /* Convert to seconds */
+ sec = delta_fs;
+ do_div(sec, NSEC_PER_SEC);
+ do_div(sec, 1000000UL);
+
+ /* Get the nanosecond remainder */
+ nsec = do_div(delta_fs, sec);
+ do_div(nsec, 1000000UL);
+
+ if (add) {
+ /* Add the calculated offset - overflow nanoseconds upwards */
+ ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec + sec;
+ ts->tv_nsec = timesync_svc->ktime_data.ts.tv_nsec + nsec;
+ if (ts->tv_nsec >= NSEC_PER_SEC) {
+ ts->tv_sec++;
+ ts->tv_nsec -= NSEC_PER_SEC;
+ }
+ } else {
+ /* Subtract the difference over/underflow as necessary */
+ if (nsec > timesync_svc->ktime_data.ts.tv_nsec) {
+ sec++;
+ nsec = nsec + timesync_svc->ktime_data.ts.tv_nsec;
+ nsec = do_div(nsec, NSEC_PER_SEC);
+ } else {
+ nsec = timesync_svc->ktime_data.ts.tv_nsec - nsec;
+ }
+ /* Cannot return a negative second value */
+ if (sec > timesync_svc->ktime_data.ts.tv_sec) {
+ ret = -EINVAL;
+ goto done;
+ }
+ ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec - sec;
+ ts->tv_nsec = nsec;
+ }
+done:
+ spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+ mutex_unlock(&timesync_svc->mutex);
+ return ret;
+}
+
+static size_t gb_timesync_log_frame_time(struct gb_timesync_svc *timesync_svc,
+ char *buf, size_t buflen)
+{
+ struct gb_svc *svc = timesync_svc->svc;
+ struct gb_host_device *hd;
+ struct gb_timesync_interface *timesync_interface;
+ struct gb_interface *interface;
+ unsigned int len;
+ size_t off;
+
+ /* AP/SVC */
+ off = snprintf(buf, buflen, "%s frametime: ap=%llu %s=%llu ",
+ greybus_bus_type.name,
+ timesync_svc->ap_ping_frame_time, dev_name(&svc->dev),
+ timesync_svc->svc_ping_frame_time);
+ len = buflen - off;
+
+ /* APB/GPB */
+ if (len < buflen) {
+ hd = timesync_svc->timesync_hd->hd;
+ off += snprintf(&buf[off], len, "%s=%llu ", dev_name(&hd->dev),
+ timesync_svc->timesync_hd->ping_frame_time);
+ len = buflen - off;
+ }
+
+ list_for_each_entry(timesync_interface,
+ &timesync_svc->interface_list, list) {
+ if (len < buflen) {
+ interface = timesync_interface->interface;
+ off += snprintf(&buf[off], len, "%s=%llu ",
+ dev_name(&interface->dev),
+ timesync_interface->ping_frame_time);
+ len = buflen - off;
+ }
+ }
+ if (len < buflen)
+ off += snprintf(&buf[off], len, "\n");
+ return off;
+}
+
+static size_t gb_timesync_log_frame_ktime(struct gb_timesync_svc *timesync_svc,
+ char *buf, size_t buflen)
+{
+ struct gb_svc *svc = timesync_svc->svc;
+ struct gb_host_device *hd;
+ struct gb_timesync_interface *timesync_interface;
+ struct gb_interface *interface;
+ struct timespec ts;
+ unsigned int len;
+ size_t off;
+
+ /* AP */
+ gb_timesync_to_timespec(timesync_svc, timesync_svc->ap_ping_frame_time,
+ &ts);
+ off = snprintf(buf, buflen, "%s frametime: ap=%lu.%lu ",
+ greybus_bus_type.name, ts.tv_sec, ts.tv_nsec);
+ len = buflen - off;
+ if (len >= buflen)
+ goto done;
+
+ /* SVC */
+ gb_timesync_to_timespec(timesync_svc, timesync_svc->svc_ping_frame_time,
+ &ts);
+ off += snprintf(&buf[off], len, "%s=%lu.%lu ", dev_name(&svc->dev),
+ ts.tv_sec, ts.tv_nsec);
+ len = buflen - off;
+ if (len >= buflen)
+ goto done;
+
+ /* APB/GPB */
+ hd = timesync_svc->timesync_hd->hd;
+ gb_timesync_to_timespec(timesync_svc,
+ timesync_svc->timesync_hd->ping_frame_time,
+ &ts);
+ off += snprintf(&buf[off], len, "%s=%lu.%lu ",
+ dev_name(&hd->dev),
+ ts.tv_sec, ts.tv_nsec);
+ len = buflen - off;
+ if (len >= buflen)
+ goto done;
+
+ list_for_each_entry(timesync_interface,
+ &timesync_svc->interface_list, list) {
+ interface = timesync_interface->interface;
+ gb_timesync_to_timespec(timesync_svc,
+ timesync_interface->ping_frame_time,
+ &ts);
+ off += snprintf(&buf[off], len, "%s=%lu.%lu ",
+ dev_name(&interface->dev),
+ ts.tv_sec, ts.tv_nsec);
+ len = buflen - off;
+ if (len >= buflen)
+ goto done;
+ }
+ off += snprintf(&buf[off], len, "\n");
+done:
+ return off;
+}
+
+/*
+ * Send an SVC initiated wake 'ping' to each TimeSync participant.
+ * Get the FrameTime from each participant associated with the wake
+ * ping.
+ */
+static void gb_timesync_ping(struct gb_timesync_svc *timesync_svc)
+{
+ struct gb_svc *svc = timesync_svc->svc;
+ struct gb_host_device *hd;
+ struct gb_timesync_interface *timesync_interface;
+ struct gb_control *control;
+ u64 *ping_frame_time;
+ int ret;
+
+ /* Get access to the wake pins in the AP and SVC */
+ ret = gb_timesync_platform_lock_bus(timesync_svc);
+ if (ret < 0) {
+ gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
+ return;
+ }
+ ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
+ if (ret) {
+ dev_err(&svc->dev,
+ "gb_svc_timesync_wake_pins_acquire %d\n", ret);
+ gb_timesync_teardown(timesync_svc);
+ return;
+ }
+
+ /* Have SVC generate a timesync ping */
+ timesync_svc->capture_ping = true;
+ timesync_svc->svc_ping_frame_time = 0;
+ ret = gb_svc_timesync_ping(svc, &timesync_svc->svc_ping_frame_time);
+ timesync_svc->capture_ping = false;
+ if (ret) {
+ dev_err(&svc->dev,
+ "gb_svc_timesync_ping %d\n", ret);
+ gb_timesync_teardown(timesync_svc);
+ return;
+ }
+
+ /* Get the ping FrameTime from each APB/GPB */
+ hd = timesync_svc->timesync_hd->hd;
+ timesync_svc->timesync_hd->ping_frame_time = 0;
+ ret = hd->driver->timesync_get_last_event(hd,
+ &timesync_svc->timesync_hd->ping_frame_time);
+ if (ret)
+ dev_err(&hd->dev, "host timesync_get_last_event %d\n", ret);
+
+ list_for_each_entry(timesync_interface,
+ &timesync_svc->interface_list, list) {
+ control = timesync_interface->interface->control;
+ timesync_interface->ping_frame_time = 0;
+ ping_frame_time = &timesync_interface->ping_frame_time;
+ ret = gb_control_timesync_get_last_event(control,
+ ping_frame_time);
+ if (ret) {
+ dev_err(&timesync_interface->interface->dev,
+ "gb_control_timesync_get_last_event %d\n", ret);
+ }
+ }
+
+ /* Ping success - move to timesync active */
+ gb_svc_timesync_wake_pins_release(svc);
+ gb_timesync_platform_unlock_bus();
+ gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
+}
+
+static void gb_timesync_log_ping_time(struct gb_timesync_svc *timesync_svc)
+{
+ char *buf;
+
+ if (!timesync_svc->print_ping)
+ return;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buf) {
+ gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
+ dev_dbg(&timesync_svc->svc->dev, "%s", buf);
+ kfree(buf);
+ }
+}
+
+/*
+ * Perform the actual work of scheduled TimeSync logic.
+ */
+static void gb_timesync_worker(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct gb_timesync_svc *timesync_svc =
+ container_of(delayed_work, struct gb_timesync_svc, delayed_work);
+
+ mutex_lock(&timesync_svc->mutex);
+
+ switch (timesync_svc->state) {
+ case GB_TIMESYNC_STATE_INIT:
+ gb_timesync_enable(timesync_svc);
+ break;
+
+ case GB_TIMESYNC_STATE_WAIT_SVC:
+ dev_err(&timesync_svc->svc->dev,
+ "timeout SVC strobe completion %d/%d\n",
+ timesync_svc->strobe, GB_TIMESYNC_MAX_STROBES);
+ gb_timesync_teardown(timesync_svc);
+ break;
+
+ case GB_TIMESYNC_STATE_AUTHORITATIVE:
+ gb_timesync_authoritative(timesync_svc);
+ break;
+
+ case GB_TIMESYNC_STATE_PING:
+ gb_timesync_ping(timesync_svc);
+ gb_timesync_log_ping_time(timesync_svc);
+ break;
+
+ default:
+ pr_err("Invalid state %d for delayed work\n",
+ timesync_svc->state);
+ break;
+ }
+
+ mutex_unlock(&timesync_svc->mutex);
+}
+
+/*
+ * Schedule a new TimeSync INIT or PING operation serialized w/r to
+ * gb_timesync_worker().
+ */
+static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
+{
+ int ret = 0;
+
+ if (state != GB_TIMESYNC_STATE_INIT && state != GB_TIMESYNC_STATE_PING)
+ return -EINVAL;
+
+ mutex_lock(&timesync_svc->mutex);
+ if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
+ gb_timesync_set_state_atomic(timesync_svc, state);
+ } else {
+ ret = -ENODEV;
+ }
+ mutex_unlock(&timesync_svc->mutex);
+ return ret;
+}
+
+static int __gb_timesync_schedule_synchronous(
+ struct gb_timesync_svc *timesync_svc, int state)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = gb_timesync_schedule(timesync_svc, state);
+ if (ret)
+ return ret;
+
+ ret = wait_event_interruptible(timesync_svc->wait_queue,
+ (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE ||
+ timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE ||
+ timesync_svc->state == GB_TIMESYNC_STATE_INVALID));
+ if (ret)
+ return ret;
+
+ mutex_lock(&timesync_svc->mutex);
+ spin_lock_irqsave(&timesync_svc->spinlock, flags);
+
+ ret = __gb_timesync_get_status(timesync_svc);
+
+ spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+ mutex_unlock(&timesync_svc->mutex);
+
+ return ret;
+}
+
+static struct gb_timesync_svc *gb_timesync_find_timesync_svc(
+ struct gb_host_device *hd)
+{
+ struct gb_timesync_svc *timesync_svc;
+
+ list_for_each_entry(timesync_svc, &gb_timesync_svc_list, list) {
+ if (timesync_svc->svc == hd->svc)
+ return timesync_svc;
+ }
+ return NULL;
+}
+
+static struct gb_timesync_interface *gb_timesync_find_timesync_interface(
+ struct gb_timesync_svc *timesync_svc,
+ struct gb_interface *interface)
+{
+ struct gb_timesync_interface *timesync_interface;
+
+ list_for_each_entry(timesync_interface, &timesync_svc->interface_list, list) {
+ if (timesync_interface->interface == interface)
+ return timesync_interface;
+ }
+ return NULL;
+}
+
+int gb_timesync_schedule_synchronous(struct gb_interface *interface)
+{
+ int ret;
+ struct gb_timesync_svc *timesync_svc;
+ int retries;
+
+ if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
+ return 0;
+
+ mutex_lock(&gb_timesync_svc_list_mutex);
+ for (retries = 0; retries < GB_TIMESYNC_MAX_RETRIES; retries++) {
+ timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+ if (!timesync_svc) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ ret = __gb_timesync_schedule_synchronous(timesync_svc,
+ GB_TIMESYNC_STATE_INIT);
+ if (!ret)
+ break;
+ }
+ if (ret && retries == GB_TIMESYNC_MAX_RETRIES)
+ ret = -ETIMEDOUT;
+done:
+ mutex_unlock(&gb_timesync_svc_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous);
+
+void gb_timesync_schedule_asynchronous(struct gb_interface *interface)
+{
+ struct gb_timesync_svc *timesync_svc;
+
+ if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
+ return;
+
+ mutex_lock(&gb_timesync_svc_list_mutex);
+ timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+ if (!timesync_svc)
+ goto done;
+
+ gb_timesync_schedule(timesync_svc, GB_TIMESYNC_STATE_INIT);
+done:
+ mutex_unlock(&gb_timesync_svc_list_mutex);
+ return;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
+
+static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
+ size_t len, loff_t *offset, bool ktime)
+{
+ struct gb_timesync_svc *timesync_svc = file->f_inode->i_private;
+ char *buf;
+ ssize_t ret = 0;
+
+ mutex_lock(&gb_timesync_svc_list_mutex);
+ mutex_lock(&timesync_svc->mutex);
+ if (list_empty(&timesync_svc->interface_list))
+ ret = -ENODEV;
+ timesync_svc->print_ping = false;
+ mutex_unlock(&timesync_svc->mutex);
+ if (ret)
+ goto done;
+
+ ret = __gb_timesync_schedule_synchronous(timesync_svc,
+ GB_TIMESYNC_STATE_PING);
+ if (ret)
+ goto done;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (ktime)
+ ret = gb_timesync_log_frame_ktime(timesync_svc, buf, PAGE_SIZE);
+ else
+ ret = gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
+ if (ret > 0)
+ ret = simple_read_from_buffer(ubuf, len, offset, buf, ret);
+ kfree(buf);
+done:
+ mutex_unlock(&gb_timesync_svc_list_mutex);
+ return ret;
+}
+
+static ssize_t gb_timesync_ping_read_frame_time(struct file *file,
+ char __user *buf,
+ size_t len, loff_t *offset)
+{
+ return gb_timesync_ping_read(file, buf, len, offset, false);
+}
+
+static ssize_t gb_timesync_ping_read_frame_ktime(struct file *file,
+ char __user *buf,
+ size_t len, loff_t *offset)
+{
+ return gb_timesync_ping_read(file, buf, len, offset, true);
+}
+
+static const struct file_operations gb_timesync_debugfs_frame_time_ops = {
+ .read = gb_timesync_ping_read_frame_time,
+};
+
+static const struct file_operations gb_timesync_debugfs_frame_ktime_ops = {
+ .read = gb_timesync_ping_read_frame_ktime,
+};
+
+static int gb_timesync_hd_add(struct gb_timesync_svc *timesync_svc,
+ struct gb_host_device *hd)
+{
+ struct gb_timesync_host_device *timesync_hd;
+
+ timesync_hd = kzalloc(sizeof(*timesync_hd), GFP_KERNEL);
+ if (!timesync_hd)
+ return -ENOMEM;
+
+ WARN_ON(timesync_svc->timesync_hd);
+ timesync_hd->hd = hd;
+ timesync_svc->timesync_hd = timesync_hd;
+
+ return 0;
+}
+
+static void gb_timesync_hd_remove(struct gb_timesync_svc *timesync_svc,
+ struct gb_host_device *hd)
+{
+ if (timesync_svc->timesync_hd->hd == hd) {
+ kfree(timesync_svc->timesync_hd);
+ timesync_svc->timesync_hd = NULL;
+ return;
+ }
+ WARN_ON(1);
+}
+
+int gb_timesync_svc_add(struct gb_svc *svc)
+{
+ struct gb_timesync_svc *timesync_svc;
+ int ret;
+
+ timesync_svc = kzalloc(sizeof(*timesync_svc), GFP_KERNEL);
+ if (!timesync_svc)
+ return -ENOMEM;
+
+ timesync_svc->work_queue =
+ create_singlethread_workqueue("gb-timesync-work_queue");
+
+ if (!timesync_svc->work_queue) {
+ kfree(timesync_svc);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&gb_timesync_svc_list_mutex);
+ INIT_LIST_HEAD(&timesync_svc->interface_list);
+ INIT_DELAYED_WORK(&timesync_svc->delayed_work, gb_timesync_worker);
+ mutex_init(&timesync_svc->mutex);
+ spin_lock_init(&timesync_svc->spinlock);
+ init_waitqueue_head(&timesync_svc->wait_queue);
+
+ timesync_svc->svc = svc;
+ timesync_svc->frame_time_offset = 0;
+ timesync_svc->capture_ping = false;
+ gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
+
+ timesync_svc->frame_time_dentry =
+ debugfs_create_file("frame-time", S_IRUGO, svc->debugfs_dentry,
+ timesync_svc,
+ &gb_timesync_debugfs_frame_time_ops);
+ timesync_svc->frame_ktime_dentry =
+ debugfs_create_file("frame-ktime", S_IRUGO, svc->debugfs_dentry,
+ timesync_svc,
+ &gb_timesync_debugfs_frame_ktime_ops);
+
+ list_add(&timesync_svc->list, &gb_timesync_svc_list);
+ ret = gb_timesync_hd_add(timesync_svc, svc->hd);
+ if (ret) {
+ list_del(&timesync_svc->list);
+ debugfs_remove(timesync_svc->frame_ktime_dentry);
+ debugfs_remove(timesync_svc->frame_time_dentry);
+ destroy_workqueue(timesync_svc->work_queue);
+ kfree(timesync_svc);
+ goto done;
+ }
+
+ init_timer(&timesync_svc->ktime_timer);
+ timesync_svc->ktime_timer.function = gb_timesync_ktime_timer_fn;
+ timesync_svc->ktime_timer.expires = jiffies + GB_TIMESYNC_KTIME_UPDATE;
+ timesync_svc->ktime_timer.data = (unsigned long)timesync_svc;
+ add_timer(&timesync_svc->ktime_timer);
+done:
+ mutex_unlock(&gb_timesync_svc_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_svc_add);
+
+void gb_timesync_svc_remove(struct gb_svc *svc)
+{
+ struct gb_timesync_svc *timesync_svc;
+ struct gb_timesync_interface *timesync_interface;
+ struct gb_timesync_interface *next;
+
+ mutex_lock(&gb_timesync_svc_list_mutex);
+ timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
+ if (!timesync_svc)
+ goto done;
+
+ cancel_delayed_work_sync(&timesync_svc->delayed_work);
+
+ mutex_lock(&timesync_svc->mutex);
+
+ gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INVALID);
+ del_timer_sync(&timesync_svc->ktime_timer);
+ gb_timesync_teardown(timesync_svc);
+
+ gb_timesync_hd_remove(timesync_svc, svc->hd);
+ list_for_each_entry_safe(timesync_interface, next,
+ &timesync_svc->interface_list, list) {
+ list_del(&timesync_interface->list);
+ kfree(timesync_interface);
+ }
+ debugfs_remove(timesync_svc->frame_ktime_dentry);
+ debugfs_remove(timesync_svc->frame_time_dentry);
+ destroy_workqueue(timesync_svc->work_queue);
+ list_del(&timesync_svc->list);
+
+ mutex_unlock(&timesync_svc->mutex);
+
+ kfree(timesync_svc);
+done:
+ mutex_unlock(&gb_timesync_svc_list_mutex);
+}
+EXPORT_SYMBOL_GPL(gb_timesync_svc_remove);
+
+/*
+ * Add a Greybus Interface to the set of TimeSync Interfaces.
+ */
+int gb_timesync_interface_add(struct gb_interface *interface)
+{
+ struct gb_timesync_svc *timesync_svc;
+ struct gb_timesync_interface *timesync_interface;
+ int ret = 0;
+
+ if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
+ return 0;
+
+ mutex_lock(&gb_timesync_svc_list_mutex);
+ timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+ if (!timesync_svc) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ timesync_interface = kzalloc(sizeof(*timesync_interface), GFP_KERNEL);
+ if (!timesync_interface) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ mutex_lock(&timesync_svc->mutex);
+ timesync_interface->interface = interface;
+ list_add(&timesync_interface->list, &timesync_svc->interface_list);
+ timesync_svc->strobe_mask |= 1 << interface->interface_id;
+ mutex_unlock(&timesync_svc->mutex);
+
+done:
+ mutex_unlock(&gb_timesync_svc_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_interface_add);
+
+/*
+ * Remove a Greybus Interface from the set of TimeSync Interfaces.
+ */
+void gb_timesync_interface_remove(struct gb_interface *interface)
+{
+ struct gb_timesync_svc *timesync_svc;
+ struct gb_timesync_interface *timesync_interface;
+
+ if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
+ return;
+
+ mutex_lock(&gb_timesync_svc_list_mutex);
+ timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+ if (!timesync_svc)
+ goto done;
+
+ timesync_interface = gb_timesync_find_timesync_interface(timesync_svc,
+ interface);
+ if (!timesync_interface)
+ goto done;
+
+ mutex_lock(&timesync_svc->mutex);
+ timesync_svc->strobe_mask &= ~(1 << interface->interface_id);
+ list_del(&timesync_interface->list);
+ kfree(timesync_interface);
+ mutex_unlock(&timesync_svc->mutex);
+done:
+ mutex_unlock(&gb_timesync_svc_list_mutex);
+}
+EXPORT_SYMBOL_GPL(gb_timesync_interface_remove);
+
+/*
+ * Give the authoritative FrameTime to the calling function. Returns zero if we
+ * are not in GB_TIMESYNC_STATE_ACTIVE.
+ */
+static u64 gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
+{
+ unsigned long flags;
+ u64 ret;
+
+ spin_lock_irqsave(&timesync_svc->spinlock, flags);
+ if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE)
+ ret = __gb_timesync_get_frame_time(timesync_svc);
+ else
+ ret = 0;
+ spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+ return ret;
+}
+
+u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface)
+{
+ struct gb_timesync_svc *timesync_svc;
+ u64 ret = 0;
+
+ mutex_lock(&gb_timesync_svc_list_mutex);
+ timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+ if (!timesync_svc)
+ goto done;
+
+ ret = gb_timesync_get_frame_time(timesync_svc);
+done:
+ mutex_unlock(&gb_timesync_svc_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface);
+
+u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc)
+{
+ struct gb_timesync_svc *timesync_svc;
+ u64 ret = 0;
+
+ mutex_lock(&gb_timesync_svc_list_mutex);
+ timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
+ if (!timesync_svc)
+ goto done;
+
+ ret = gb_timesync_get_frame_time(timesync_svc);
+done:
+ mutex_unlock(&gb_timesync_svc_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc);
+
+/* Incrementally updates the conversion base from FrameTime to ktime */
+static void gb_timesync_ktime_timer_fn(unsigned long data)
+{
+ struct gb_timesync_svc *timesync_svc =
+ (struct gb_timesync_svc *)data;
+ unsigned long flags;
+ u64 frame_time;
+ struct timespec ts;
+
+ spin_lock_irqsave(&timesync_svc->spinlock, flags);
+
+ if (timesync_svc->state != GB_TIMESYNC_STATE_ACTIVE)
+ goto done;
+
+ ktime_get_ts(&ts);
+ frame_time = __gb_timesync_get_frame_time(timesync_svc);
+ gb_timesync_store_ktime(timesync_svc, ts, frame_time);
+
+done:
+ spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+ mod_timer(&timesync_svc->ktime_timer,
+ jiffies + GB_TIMESYNC_KTIME_UPDATE);
+}
+
+int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
+ struct timespec *ts)
+{
+ struct gb_timesync_svc *timesync_svc;
+ int ret = 0;
+
+ mutex_lock(&gb_timesync_svc_list_mutex);
+ timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
+ if (!timesync_svc) {
+ ret = -ENODEV;
+ goto done;
+ }
+ ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
+done:
+ mutex_unlock(&gb_timesync_svc_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc);
+
+int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
+ u64 frame_time, struct timespec *ts)
+{
+ struct gb_timesync_svc *timesync_svc;
+ int ret = 0;
+
+ mutex_lock(&gb_timesync_svc_list_mutex);
+ timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
+ if (!timesync_svc) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
+done:
+ mutex_unlock(&gb_timesync_svc_list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface);
+
+void gb_timesync_irq(struct gb_timesync_svc *timesync_svc)
+{
+ unsigned long flags;
+ u64 strobe_time;
+ bool strobe_is_ping = true;
+ struct timespec ts;
+
+ ktime_get_ts(&ts);
+ strobe_time = __gb_timesync_get_frame_time(timesync_svc);
+
+ spin_lock_irqsave(&timesync_svc->spinlock, flags);
+
+ if (timesync_svc->state == GB_TIMESYNC_STATE_PING) {
+ if (!timesync_svc->capture_ping)
+ goto done_nolog;
+ timesync_svc->ap_ping_frame_time = strobe_time;
+ goto done_log;
+ } else if (timesync_svc->state != GB_TIMESYNC_STATE_WAIT_SVC) {
+ goto done_nolog;
+ }
+
+ timesync_svc->strobe_data[timesync_svc->strobe].frame_time = strobe_time;
+ timesync_svc->strobe_data[timesync_svc->strobe].ts = ts;
+
+ if (++timesync_svc->strobe == GB_TIMESYNC_MAX_STROBES) {
+ gb_timesync_set_state(timesync_svc,
+ GB_TIMESYNC_STATE_AUTHORITATIVE);
+ }
+ strobe_is_ping = false;
+done_log:
+ trace_gb_timesync_irq(strobe_is_ping, timesync_svc->strobe,
+ GB_TIMESYNC_MAX_STROBES, strobe_time);
+done_nolog:
+ spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
+}
+EXPORT_SYMBOL(gb_timesync_irq);
+
+int __init gb_timesync_init(void)
+{
+ int ret = 0;
+
+ ret = gb_timesync_platform_init();
+ if (ret) {
+ pr_err("timesync platform init fail!\n");
+ return ret;
+ }
+
+ gb_timesync_clock_rate = gb_timesync_platform_get_clock_rate();
+
+ /* Calculate nanoseconds and femtoseconds per clock */
+ gb_timesync_fs_per_clock = FSEC_PER_SEC;
+ do_div(gb_timesync_fs_per_clock, gb_timesync_clock_rate);
+ gb_timesync_ns_per_clock = NSEC_PER_SEC;
+ do_div(gb_timesync_ns_per_clock, gb_timesync_clock_rate);
+
+ /* Calculate the maximum number of clocks we will convert to ktime */
+ gb_timesync_max_ktime_diff =
+ GB_TIMESYNC_MAX_KTIME_CONVERSION * gb_timesync_clock_rate;
+
+ pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n",
+ gb_timesync_clock_rate, GB_TIMESYNC_MAX_KTIME_CONVERSION);
+ return 0;
+}
+
+void gb_timesync_exit(void)
+{
+ gb_timesync_platform_exit();
+}
diff --git a/drivers/staging/greybus/timesync.h b/drivers/staging/greybus/timesync.h
new file mode 100644
index 000000000000..72fc9a35a002
--- /dev/null
+++ b/drivers/staging/greybus/timesync.h
@@ -0,0 +1,45 @@
+/*
+ * TimeSync API driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#ifndef __TIMESYNC_H
+#define __TIMESYNC_H
+
+struct gb_svc;
+struct gb_interface;
+struct gb_timesync_svc;
+
+/* Platform */
+u64 gb_timesync_platform_get_counter(void);
+u32 gb_timesync_platform_get_clock_rate(void);
+int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata);
+void gb_timesync_platform_unlock_bus(void);
+
+int gb_timesync_platform_init(void);
+void gb_timesync_platform_exit(void);
+
+/* Core API */
+int gb_timesync_interface_add(struct gb_interface *interface);
+void gb_timesync_interface_remove(struct gb_interface *interface);
+int gb_timesync_svc_add(struct gb_svc *svc);
+void gb_timesync_svc_remove(struct gb_svc *svc);
+
+u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface);
+u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc);
+int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
+ struct timespec *ts);
+int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
+ u64 frame_time, struct timespec *ts);
+
+int gb_timesync_schedule_synchronous(struct gb_interface *intf);
+void gb_timesync_schedule_asynchronous(struct gb_interface *intf);
+void gb_timesync_irq(struct gb_timesync_svc *timesync_svc);
+int gb_timesync_init(void);
+void gb_timesync_exit(void);
+
+#endif /* __TIMESYNC_H */
diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
new file mode 100644
index 000000000000..113f3d6c4b3a
--- /dev/null
+++ b/drivers/staging/greybus/timesync_platform.c
@@ -0,0 +1,82 @@
+/*
+ * TimeSync API driver.
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ *
+ * This code reads directly from an ARMv7 memory-mapped timer that lives in
+ * MMIO space. Since this counter lives inside of MMIO space its shared between
+ * cores and that means we don't have to worry about issues like TSC on x86
+ * where each time-stamp-counter (TSC) is local to a particular core.
+ *
+ * Register-level access code is based on
+ * drivers/clocksource/arm_arch_timer.c
+ */
+#include <linux/cpufreq.h>
+#include <linux/of_platform.h>
+
+#include "greybus.h"
+#include "arche_platform.h"
+
+#define DEFAULT_FRAMETIME_CLOCK_HZ 19200000
+
+static u32 gb_timesync_clock_frequency;
+int (*arche_platform_change_state_cb)(enum arche_platform_state state,
+ struct gb_timesync_svc *pdata);
+EXPORT_SYMBOL_GPL(arche_platform_change_state_cb);
+
+u64 gb_timesync_platform_get_counter(void)
+{
+ return (u64)get_cycles();
+}
+
+u32 gb_timesync_platform_get_clock_rate(void)
+{
+ if (unlikely(!gb_timesync_clock_frequency)) {
+ gb_timesync_clock_frequency = cpufreq_get(0);
+ if (!gb_timesync_clock_frequency)
+ gb_timesync_clock_frequency = DEFAULT_FRAMETIME_CLOCK_HZ;
+ }
+
+ return gb_timesync_clock_frequency;
+}
+
+int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
+{
+ return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
+ pdata);
+}
+
+void gb_timesync_platform_unlock_bus(void)
+{
+ arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
+}
+
+static const struct of_device_id arch_timer_of_match[] = {
+ { .compatible = "google,greybus-frame-time-counter", },
+ {},
+};
+
+int __init gb_timesync_platform_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, arch_timer_of_match);
+ if (!np) {
+ /* Tolerate not finding to allow BBB etc to continue */
+ pr_warn("Unable to find a compatible ARMv7 timer\n");
+ return 0;
+ }
+
+ if (of_property_read_u32(np, "clock-frequency",
+ &gb_timesync_clock_frequency)) {
+ pr_err("Unable to find timer clock-frequency\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void gb_timesync_platform_exit(void) {}
diff --git a/drivers/staging/greybus/tools/.gitignore b/drivers/staging/greybus/tools/.gitignore
new file mode 100644
index 000000000000..023654c83068
--- /dev/null
+++ b/drivers/staging/greybus/tools/.gitignore
@@ -0,0 +1 @@
+loopback_test
diff --git a/drivers/staging/greybus/tools/Android.mk b/drivers/staging/greybus/tools/Android.mk
new file mode 100644
index 000000000000..fdadbf611757
--- /dev/null
+++ b/drivers/staging/greybus/tools/Android.mk
@@ -0,0 +1,10 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= loopback_test.c
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE := gb_loopback_test
+
+include $(BUILD_EXECUTABLE)
+
diff --git a/drivers/staging/greybus/tools/Makefile b/drivers/staging/greybus/tools/Makefile
new file mode 100644
index 000000000000..852b12b71149
--- /dev/null
+++ b/drivers/staging/greybus/tools/Makefile
@@ -0,0 +1,31 @@
+ifeq ($(strip $(V)), 1)
+ Q =
+else
+ Q = @
+endif
+
+CFLAGS += -std=gnu99 -Wall -Wextra -g \
+ -D_GNU_SOURCE \
+ -Wno-unused-parameter \
+ -Wmaybe-uninitialized \
+ -Wredundant-decls \
+ -Wcast-align \
+ -Wsign-compare \
+ -Wno-missing-field-initializers
+
+CC := $(CROSS_COMPILE)gcc
+
+TOOLS = loopback_test
+
+all: $(TOOLS)
+
+%.o: %.c ../greybus_protocols.h
+ @echo ' TARGET_CC $@'
+ $(Q)$(CC) $(CFLAGS) -c $< -o $@
+
+loopback_%: loopback_%.o
+ @echo ' TARGET_LD $@'
+ $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ -o $@
+
+clean::
+ rm -f *.o $(TOOLS)
diff --git a/drivers/staging/greybus/tools/README.loopback b/drivers/staging/greybus/tools/README.loopback
new file mode 100644
index 000000000000..845b08dc4696
--- /dev/null
+++ b/drivers/staging/greybus/tools/README.loopback
@@ -0,0 +1,198 @@
+
+
+ 1 - LOOPBACK DRIVER
+
+The driver implements the main logic of the loopback test and provides
+sysfs files to configure the test and retrieve the results.
+A user could run a test without the need of the test application given
+that he understands the sysfs interface of the loopback driver.
+
+The loopback kernel driver needs to be loaded and at least one module
+with the loopback feature enabled must be present for the sysfs files to be
+created and for the loopback test application to be able to run.
+
+To load the module:
+# modprobe gb-loopback
+
+
+When the module is probed, New files are available on the sysfs
+directory of the detected loopback device.
+(typically under "/sys/bus/graybus/devices").
+
+Here is a short summary of the sysfs interface files that should be visible:
+
+* Loopback Configuration Files:
+ async - Use asynchronous operations.
+ iteration_max - Number of tests iterations to perform.
+ size - payload size of the transfer.
+ timeout - The number of microseconds to give an individual
+ asynchronous request before timing out.
+ us_wait - Time to wait between 2 messages
+ type - By writing the test type to this file, the test starts.
+ Valid tests are:
+ 0 stop the test
+ 2 - ping
+ 3 - transfer
+ 4 - sink
+
+* Loopback feedback files:
+ error - number of errors that have occurred.
+ iteration_count - Number of iterations performed.
+ requests_completed - Number of requests successfully completed.
+ requests_timedout - Number of requests that have timed out.
+ timeout_max - Max allowed timeout
+ timeout_min - Min allowed timeout.
+
+* Loopback result files:
+ apbridge_unipro_latency_avg
+ apbridge_unipro_latency_max
+ apbridge_unipro_latency_min
+ gpbridge_firmware_latency_avg
+ gpbridge_firmware_latency_max
+ gpbridge_firmware_latency_min
+ requests_per_second_avg
+ requests_per_second_max
+ requests_per_second_min
+ latency_avg
+ latency_max
+ latency_min
+ throughput_avg
+ throughput_max
+ throughput_min
+
+
+
+ 2 - LOOPBACK TEST APPLICATION
+
+The loopback test application manages and formats the results provided by
+the loopback kernel module. The purpose of this application
+is to:
+ - Start and manage multiple loopback device tests concurrently.
+ - Calculate the aggregate results for multiple devices.
+ - Gather and format test results (csv or human readable).
+
+The best way to get up to date usage information for the application is
+usually to pass the "-h" parameter.
+Here is the summary of the available options:
+
+ Mandatory arguments
+ -t must be one of the test names - sink, transfer or ping
+ -i iteration count - the number of iterations to run the test over
+ Optional arguments
+ -S sysfs location - location for greybus 'endo' entires default /sys/bus/greybus/devices/
+ -D debugfs location - location for loopback debugfs entries default /sys/kernel/debug/gb_loopback/
+ -s size of data packet to send during test - defaults to zero
+ -m mask - a bit mask of connections to include example: -m 8 = 4th connection -m 9 = 1st and 4th connection etc
+ default is zero which means broadcast to all connections
+ -v verbose output
+ -d debug output
+ -r raw data output - when specified the full list of latency values are included in the output CSV
+ -p porcelain - when specified printout is in a user-friendly non-CSV format. This option suppresses writing to CSV file
+ -a aggregate - show aggregation of all enabled devies
+ -l list found loopback devices and exit.
+ -x Async - Enable async transfers.
+ -o Timeout - Timeout in microseconds for async operations.
+
+
+
+ 3 - REAL WORLD EXAMPLE USAGES
+
+ 3.1 - Using the driver sysfs files to run a test on a single device:
+
+* Run a 1000 transfers of a 100 byte packet. Each transfer is started only
+after the previous one finished successfully:
+ echo 0 > /sys/bus/greybus/devices/1-2.17/type
+ echo 0 > /sys/bus/greybus/devices/1-2.17/async
+ echo 2000 > /sys/bus/greybus/devices/1-2.17/us_wait
+ echo 100 > /sys/bus/greybus/devices/1-2.17/size
+ echo 1000 > /sys/bus/greybus/devices/1-2.17/iteration_max
+ echo 0 > /sys/bus/greybus/devices/1-2.17/mask
+ echo 200000 > /sys/bus/greybus/devices/1-2.17/timeout
+ echo 3 > /sys/bus/greybus/devices/1-2.17/type
+
+* Run a 1000 transfers of a 100 byte packet. Transfers are started without
+waiting for the previous one to finish:
+ echo 0 > /sys/bus/greybus/devices/1-2.17/type
+ echo 3 > /sys/bus/greybus/devices/1-2.17/async
+ echo 0 > /sys/bus/greybus/devices/1-2.17/us_wait
+ echo 100 > /sys/bus/greybus/devices/1-2.17/size
+ echo 1000 > /sys/bus/greybus/devices/1-2.17/iteration_max
+ echo 0 > /sys/bus/greybus/devices/1-2.17/mask
+ echo 200000 > /sys/bus/greybus/devices/1-2.17/timeout
+ echo 3 > /sys/bus/greybus/devices/1-2.17/type
+
+* Read the results from sysfs:
+ cat /sys/bus/greybus/devices/1-2.17/requests_per_second_min
+ cat /sys/bus/greybus/devices/1-2.17/requests_per_second_max
+ cat /sys/bus/greybus/devices/1-2.17/requests_per_second_avg
+
+ cat /sys/bus/greybus/devices/1-2.17/latency_min
+ cat /sys/bus/greybus/devices/1-2.17/latency_max
+ cat /sys/bus/greybus/devices/1-2.17/latency_avg
+
+ cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_min
+ cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_max
+ cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_avg
+
+ cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_min
+ cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_max
+ cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_avg
+
+ cat /sys/bus/greybus/devices/1-2.17/error
+ cat /sys/bus/greybus/devices/1-2.17/requests_completed
+ cat /sys/bus/greybus/devices/1-2.17/requests_timedout
+
+
+3.2 - using the test application:
+
+* Run a transfer test 10 iterations of size 100 bytes on all available devices
+ #/loopback_test -t transfer -i 10 -s 100
+ 1970-1-1 0:10:7,transfer,1-4.17,100,10,0,443,509,471.700012,66,1963,2256,2124.600098,293,102776,118088,109318.898438,15312,1620,1998,1894.099976,378,56,57,56.799999,1
+ 1970-1-1 0:10:7,transfer,1-5.17,100,10,0,399,542,463.399994,143,1845,2505,2175.800049,660,92568,125744,107393.296875,33176,1469,2305,1806.500000,836,56,57,56.799999,1
+
+
+* Show the aggregate results of both devices. ("-a")
+ #/loopback_test -t transfer -i 10 -s 100 -a
+ 1970-1-1 0:10:35,transfer,1-4.17,100,10,0,448,580,494.100006,132,1722,2230,2039.400024,508,103936,134560,114515.703125,30624,1513,1980,1806.900024,467,56,57,57.299999,1
+ 1970-1-1 0:10:35,transfer,1-5.17,100,10,0,383,558,478.600006,175,1791,2606,2115.199951,815,88856,129456,110919.703125,40600,1457,2246,1773.599976,789,56,57,57.099998,1
+ 1970-1-1 0:10:35,transfer,aggregate,100,10,0,383,580,486.000000,197,1722,2606,2077.000000,884,88856,134560,112717.000000,45704,1457,2246,1789.000000,789,56,57,57.000000,1
+
+* Example usage of the mask option to select which devices will
+ run the test (1st, 2nd, or both devices):
+ # /loopback_test -t transfer -i 10 -s 100 -m 1
+ 1970-1-1 0:11:56,transfer,1-4.17,100,10,0,514,558,544.900024,44,1791,1943,1836.599976,152,119248,129456,126301.296875,10208,1600,1001609,101613.601562,1000009,56,57,56.900002,1
+ # /loopback_test -t transfer -i 10 -s 100 -m 2
+ 1970-1-1 0:12:0,transfer,1-5.17,100,10,0,468,554,539.000000,86,1804,2134,1859.500000,330,108576,128528,124932.500000,19952,1606,1626,1619.300049,20,56,57,57.400002,1
+ # /loopback_test -t transfer -i 10 -s 100 -m 3
+ 1970-1-1 0:12:3,transfer,1-4.17,100,10,0,432,510,469.399994,78,1959,2313,2135.800049,354,100224,118320,108785.296875,18096,1610,2024,1893.500000,414,56,57,57.200001,1
+ 1970-1-1 0:12:3,transfer,1-5.17,100,10,0,404,542,468.799988,138,1843,2472,2152.500000,629,93728,125744,108646.101562,32016,1504,2247,1853.099976,743,56,57,57.099998,1
+
+* Show output in human readable format ("-p")
+ # /loopback_test -t transfer -i 10 -s 100 -m 3 -p
+
+ 1970-1-1 0:12:37
+ test: transfer
+ path: 1-4.17
+ size: 100
+ iterations: 10
+ errors: 0
+ async: Disabled
+ requests per-sec: min=390, max=547, average=469.299988, jitter=157
+ ap-throughput B/s: min=90480 max=126904 average=108762.101562 jitter=36424
+ ap-latency usec: min=1826 max=2560 average=2146.000000 jitter=734
+ apbridge-latency usec: min=1620 max=1982 average=1882.099976 jitter=362
+ gpbridge-latency usec: min=56 max=57 average=57.099998 jitter=1
+
+
+ 1970-1-1 0:12:37
+ test: transfer
+ path: 1-5.17
+ size: 100
+ iterations: 10
+ errors: 0
+ async: Disabled
+ requests per-sec: min=397, max=538, average=461.700012, jitter=141
+ ap-throughput B/s: min=92104 max=124816 average=106998.898438 jitter=32712
+ ap-latency usec: min=1856 max=2514 average=2185.699951 jitter=658
+ apbridge-latency usec: min=1460 max=2296 average=1828.599976 jitter=836
+ gpbridge-latency usec: min=56 max=57 average=57.099998 jitter=1
diff --git a/drivers/staging/greybus/tools/lbtest b/drivers/staging/greybus/tools/lbtest
new file mode 100755
index 000000000000..d7353f1a2a6f
--- /dev/null
+++ b/drivers/staging/greybus/tools/lbtest
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google, Inc.
+# Copyright (c) 2015 Linaro, Ltd.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from this
+# software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import print_function
+import csv
+import datetime
+import sys
+import time
+
+dict = {'ping': '2', 'transfer': '3', 'sink': '4'}
+verbose = 1
+
+def abort():
+ sys.exit(1)
+
+def usage():
+ print('Usage: looptest TEST SIZE ITERATIONS PATH\n\n'
+ ' Run TEST for a number of ITERATIONS with operation data SIZE bytes\n'
+ ' TEST may be \'ping\' \'transfer\' or \'sink\'\n'
+ ' SIZE indicates the size of transfer <= greybus max payload bytes\n'
+ ' ITERATIONS indicates the number of times to execute TEST at SIZE bytes\n'
+ ' Note if ITERATIONS is set to zero then this utility will\n'
+ ' initiate an infinite (non terminating) test and exit\n'
+ ' without logging any metrics data\n'
+ ' PATH indicates the sysfs path for the loopback greybus entries e.g.\n'
+ ' /sys/bus/greybus/devices/endo0:1:1:1:1/\n'
+ 'Examples:\n'
+ ' looptest transfer 128 10000\n'
+ ' looptest ping 0 128\n'
+ ' looptest sink 2030 32768\n'
+ .format(sys.argv[0]), file=sys.stderr)
+
+ abort()
+
+def read_sysfs_int(path):
+ try:
+ f = open(path, "r");
+ val = f.read();
+ f.close()
+ return int(val)
+ except IOError as e:
+ print("I/O error({0}): {1}".format(e.errno, e.strerror))
+ print("Invalid path %s" % path)
+
+def write_sysfs_val(path, val):
+ try:
+ f = open(path, "r+")
+ f.write(val)
+ f.close()
+ except IOError as e:
+ print("I/O error({0}): {1}".format(e.errno, e.strerror))
+ print("Invalid path %s" % path)
+
+def log_csv(test_name, size, iteration_max, sys_pfx):
+ # file name will test_name_size_iteration_max.csv
+ # every time the same test with the same parameters is run we will then
+ # append to the same CSV with datestamp - representing each test dataset
+ fname = test_name + '_' + size + '_' + str(iteration_max) + '.csv'
+
+ try:
+ # gather data set
+ date = str(datetime.datetime.now())
+ error = read_sysfs_int(sys_pfx + 'error')
+ request_min = read_sysfs_int(sys_pfx + 'requests_per_second_min')
+ request_max = read_sysfs_int(sys_pfx + 'requests_per_second_max')
+ request_avg = read_sysfs_int(sys_pfx + 'requests_per_second_avg')
+ latency_min = read_sysfs_int(sys_pfx + 'latency_min')
+ latency_max = read_sysfs_int(sys_pfx + 'latency_max')
+ latency_avg = read_sysfs_int(sys_pfx + 'latency_avg')
+ throughput_min = read_sysfs_int(sys_pfx + 'throughput_min')
+ throughput_max = read_sysfs_int(sys_pfx + 'throughput_max')
+ throughput_avg = read_sysfs_int(sys_pfx + 'throughput_avg')
+
+ # derive jitter
+ request_jitter = request_max - request_min
+ latency_jitter = latency_max - latency_min
+ throughput_jitter = throughput_max - throughput_min
+
+ # append data set to file
+ with open(fname, 'a') as csvf:
+ row = csv.writer(csvf, delimiter=",", quotechar="'",
+ quoting=csv.QUOTE_MINIMAL)
+ row.writerow([date, test_name, size, iteration_max, error,
+ request_min, request_max, request_avg, request_jitter,
+ latency_min, latency_max, latency_avg, latency_jitter,
+ throughput_min, throughput_max, throughput_avg, throughput_jitter])
+ except IOError as e:
+ print("I/O error({0}): {1}".format(e.errno, e.strerror))
+
+def loopback_run(test_name, size, iteration_max, sys_pfx):
+ test_id = dict[test_name]
+ try:
+ # Terminate any currently running test
+ write_sysfs_val(sys_pfx + 'type', '0')
+ # Set parameter for no wait between messages
+ write_sysfs_val(sys_pfx + 'ms_wait', '0')
+ # Set operation size
+ write_sysfs_val(sys_pfx + 'size', size)
+ # Set iterations
+ write_sysfs_val(sys_pfx + 'iteration_max', str(iteration_max))
+ # Initiate by setting loopback operation type
+ write_sysfs_val(sys_pfx + 'type', test_id)
+ time.sleep(1)
+
+ if iteration_max == 0:
+ print ("Infinite test initiated CSV won't be logged\n")
+ return
+
+ previous = 0
+ err = 0
+ while True:
+ # get current count bail out if it hasn't changed
+ iteration_count = read_sysfs_int(sys_pfx + 'iteration_count')
+ if previous == iteration_count:
+ err = 1
+ break
+ elif iteration_count == iteration_max:
+ break
+ previous = iteration_count
+ if verbose:
+ print('%02d%% complete %d of %d ' %
+ (100 * iteration_count / iteration_max,
+ iteration_count, iteration_max))
+ time.sleep(1)
+ if err:
+ print ('\nError executing test\n')
+ else:
+ log_csv(test_name, size, iteration_max, sys_pfx)
+ except ValueError as ve:
+ print("Error: %s " % format(e.strerror), file=sys.stderr)
+ abort()
+
+def main():
+ if len(sys.argv) < 5:
+ usage()
+
+ if sys.argv[1] in dict.keys():
+ loopback_run(sys.argv[1], sys.argv[2], int(sys.argv[3]), sys.argv[4])
+ else:
+ usage()
+if __name__ == '__main__':
+ main()
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
new file mode 100644
index 000000000000..f7f4cd6fb55b
--- /dev/null
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -0,0 +1,1000 @@
+/*
+ * Loopback test application
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ *
+ * Provided under the three clause BSD license found in the LICENSE file.
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <poll.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <signal.h>
+
+#define MAX_NUM_DEVICES 10
+#define MAX_SYSFS_PATH 0x200
+#define CSV_MAX_LINE 0x1000
+#define SYSFS_MAX_INT 0x20
+#define MAX_STR_LEN 255
+#define DEFAULT_ASYNC_TIMEOUT 200000
+
+struct dict {
+ char *name;
+ int type;
+};
+
+static struct dict dict[] = {
+ {"ping", 2},
+ {"transfer", 3},
+ {"sink", 4},
+ {NULL,} /* list termination */
+};
+
+struct loopback_results {
+ float latency_avg;
+ uint32_t latency_max;
+ uint32_t latency_min;
+ uint32_t latency_jitter;
+
+ float request_avg;
+ uint32_t request_max;
+ uint32_t request_min;
+ uint32_t request_jitter;
+
+ float throughput_avg;
+ uint32_t throughput_max;
+ uint32_t throughput_min;
+ uint32_t throughput_jitter;
+
+ float apbridge_unipro_latency_avg;
+ uint32_t apbridge_unipro_latency_max;
+ uint32_t apbridge_unipro_latency_min;
+ uint32_t apbridge_unipro_latency_jitter;
+
+ float gbphy_firmware_latency_avg;
+ uint32_t gbphy_firmware_latency_max;
+ uint32_t gbphy_firmware_latency_min;
+ uint32_t gbphy_firmware_latency_jitter;
+
+ uint32_t error;
+};
+
+struct loopback_device {
+ char name[MAX_SYSFS_PATH];
+ char sysfs_entry[MAX_SYSFS_PATH];
+ char debugfs_entry[MAX_SYSFS_PATH];
+ struct loopback_results results;
+};
+
+struct loopback_test {
+ int verbose;
+ int debug;
+ int raw_data_dump;
+ int porcelain;
+ int mask;
+ int size;
+ int iteration_max;
+ int aggregate_output;
+ int test_id;
+ int device_count;
+ int list_devices;
+ int use_async;
+ int async_timeout;
+ int async_outstanding_operations;
+ int us_wait;
+ int file_output;
+ int stop_all;
+ int poll_count;
+ char test_name[MAX_STR_LEN];
+ char sysfs_prefix[MAX_SYSFS_PATH];
+ char debugfs_prefix[MAX_SYSFS_PATH];
+ struct timespec poll_timeout;
+ struct loopback_device devices[MAX_NUM_DEVICES];
+ struct loopback_results aggregate_results;
+ struct pollfd fds[MAX_NUM_DEVICES];
+};
+
+struct loopback_test t;
+
+/* Helper macros to calculate the aggregate results for all devices */
+static inline int device_enabled(struct loopback_test *t, int dev_idx);
+
+#define GET_MAX(field) \
+static int get_##field##_aggregate(struct loopback_test *t) \
+{ \
+ uint32_t max = 0; \
+ int i; \
+ for (i = 0; i < t->device_count; i++) { \
+ if (!device_enabled(t, i)) \
+ continue; \
+ if (t->devices[i].results.field > max) \
+ max = t->devices[i].results.field; \
+ } \
+ return max; \
+} \
+
+#define GET_MIN(field) \
+static int get_##field##_aggregate(struct loopback_test *t) \
+{ \
+ uint32_t min = ~0; \
+ int i; \
+ for (i = 0; i < t->device_count; i++) { \
+ if (!device_enabled(t, i)) \
+ continue; \
+ if (t->devices[i].results.field < min) \
+ min = t->devices[i].results.field; \
+ } \
+ return min; \
+} \
+
+#define GET_AVG(field) \
+static int get_##field##_aggregate(struct loopback_test *t) \
+{ \
+ uint32_t val = 0; \
+ uint32_t count = 0; \
+ int i; \
+ for (i = 0; i < t->device_count; i++) { \
+ if (!device_enabled(t, i)) \
+ continue; \
+ count++; \
+ val += t->devices[i].results.field; \
+ } \
+ if (count) \
+ val /= count; \
+ return val; \
+} \
+
+GET_MAX(throughput_max);
+GET_MAX(request_max);
+GET_MAX(latency_max);
+GET_MAX(apbridge_unipro_latency_max);
+GET_MAX(gbphy_firmware_latency_max);
+GET_MIN(throughput_min);
+GET_MIN(request_min);
+GET_MIN(latency_min);
+GET_MIN(apbridge_unipro_latency_min);
+GET_MIN(gbphy_firmware_latency_min);
+GET_AVG(throughput_avg);
+GET_AVG(request_avg);
+GET_AVG(latency_avg);
+GET_AVG(apbridge_unipro_latency_avg);
+GET_AVG(gbphy_firmware_latency_avg);
+
+void abort()
+{
+ _exit(1);
+}
+
+void usage(void)
+{
+ fprintf(stderr, "Usage: loopback_test TEST [SIZE] ITERATIONS [SYSPATH] [DBGPATH]\n\n"
+ " Run TEST for a number of ITERATIONS with operation data SIZE bytes\n"
+ " TEST may be \'ping\' \'transfer\' or \'sink\'\n"
+ " SIZE indicates the size of transfer <= greybus max payload bytes\n"
+ " ITERATIONS indicates the number of times to execute TEST at SIZE bytes\n"
+ " Note if ITERATIONS is set to zero then this utility will\n"
+ " initiate an infinite (non terminating) test and exit\n"
+ " without logging any metrics data\n"
+ " SYSPATH indicates the sysfs path for the loopback greybus entries e.g.\n"
+ " /sys/bus/greybus/devices\n"
+ " DBGPATH indicates the debugfs path for the loopback greybus entries e.g.\n"
+ " /sys/kernel/debug/gb_loopback/\n"
+ " Mandatory arguments\n"
+ " -t must be one of the test names - sink, transfer or ping\n"
+ " -i iteration count - the number of iterations to run the test over\n"
+ " Optional arguments\n"
+ " -S sysfs location - location for greybus 'endo' entires default /sys/bus/greybus/devices/\n"
+ " -D debugfs location - location for loopback debugfs entries default /sys/kernel/debug/gb_loopback/\n"
+ " -s size of data packet to send during test - defaults to zero\n"
+ " -m mask - a bit mask of connections to include example: -m 8 = 4th connection -m 9 = 1st and 4th connection etc\n"
+ " default is zero which means broadcast to all connections\n"
+ " -v verbose output\n"
+ " -d debug output\n"
+ " -r raw data output - when specified the full list of latency values are included in the output CSV\n"
+ " -p porcelain - when specified printout is in a user-friendly non-CSV format. This option suppresses writing to CSV file\n"
+ " -a aggregate - show aggregation of all enabled devices\n"
+ " -l list found loopback devices and exit\n"
+ " -x Async - Enable async transfers\n"
+ " -o Async Timeout - Timeout in uSec for async operations\n"
+ " -O Poll loop time out in seconds(max time a test is expected to last, default: 30sec)\n"
+ " -c Max number of outstanding operations for async operations\n"
+ " -w Wait in uSec between operations\n"
+ " -z Enable output to a CSV file (incompatible with -p)\n"
+ " -f When starting new loopback test, stop currently running tests on all devices\n"
+ "Examples:\n"
+ " Send 10000 transfers with a packet size of 128 bytes to all active connections\n"
+ " loopback_test -t transfer -s 128 -i 10000 -S /sys/bus/greybus/devices/ -D /sys/kernel/debug/gb_loopback/\n"
+ " loopback_test -t transfer -s 128 -i 10000 -m 0\n"
+ " Send 10000 transfers with a packet size of 128 bytes to connection 1 and 4\n"
+ " loopback_test -t transfer -s 128 -i 10000 -m 9\n"
+ " loopback_test -t ping -s 0 128 -i -S /sys/bus/greybus/devices/ -D /sys/kernel/debug/gb_loopback/\n"
+ " loopback_test -t sink -s 2030 -i 32768 -S /sys/bus/greybus/devices/ -D /sys/kernel/debug/gb_loopback/\n");
+ abort();
+}
+
+static inline int device_enabled(struct loopback_test *t, int dev_idx)
+{
+ if (!t->mask || (t->mask & (1 << dev_idx)))
+ return 1;
+
+ return 0;
+}
+
+static void show_loopback_devices(struct loopback_test *t)
+{
+ int i;
+
+ if (t->device_count == 0) {
+ printf("No loopback devices.\n");
+ return;
+ }
+
+ for (i = 0; i < t->device_count; i++)
+ printf("device[%d] = %s\n", i, t->devices[i].name);
+
+}
+
+int open_sysfs(const char *sys_pfx, const char *node, int flags)
+{
+ int fd;
+ char path[MAX_SYSFS_PATH];
+
+ snprintf(path, sizeof(path), "%s%s", sys_pfx, node);
+ fd = open(path, flags);
+ if (fd < 0) {
+ fprintf(stderr, "unable to open %s\n", path);
+ abort();
+ }
+ return fd;
+}
+
+int read_sysfs_int_fd(int fd, const char *sys_pfx, const char *node)
+{
+ char buf[SYSFS_MAX_INT];
+
+ if (read(fd, buf, sizeof(buf)) < 0) {
+ fprintf(stderr, "unable to read from %s%s %s\n", sys_pfx, node,
+ strerror(errno));
+ close(fd);
+ abort();
+ }
+ return atoi(buf);
+}
+
+float read_sysfs_float_fd(int fd, const char *sys_pfx, const char *node)
+{
+ char buf[SYSFS_MAX_INT];
+
+ if (read(fd, buf, sizeof(buf)) < 0) {
+
+ fprintf(stderr, "unable to read from %s%s %s\n", sys_pfx, node,
+ strerror(errno));
+ close(fd);
+ abort();
+ }
+ return atof(buf);
+}
+
+int read_sysfs_int(const char *sys_pfx, const char *node)
+{
+ int fd, val;
+
+ fd = open_sysfs(sys_pfx, node, O_RDONLY);
+ val = read_sysfs_int_fd(fd, sys_pfx, node);
+ close(fd);
+ return val;
+}
+
+float read_sysfs_float(const char *sys_pfx, const char *node)
+{
+ int fd;
+ float val;
+
+ fd = open_sysfs(sys_pfx, node, O_RDONLY);
+ val = read_sysfs_float_fd(fd, sys_pfx, node);
+ close(fd);
+ return val;
+}
+
+void write_sysfs_val(const char *sys_pfx, const char *node, int val)
+{
+ int fd, len;
+ char buf[SYSFS_MAX_INT];
+
+ fd = open_sysfs(sys_pfx, node, O_RDWR);
+ len = snprintf(buf, sizeof(buf), "%d", val);
+ if (write(fd, buf, len) < 0) {
+ fprintf(stderr, "unable to write to %s%s %s\n", sys_pfx, node,
+ strerror(errno));
+ close(fd);
+ abort();
+ }
+ close(fd);
+}
+
+static int get_results(struct loopback_test *t)
+{
+ struct loopback_device *d;
+ struct loopback_results *r;
+ int i;
+
+ for (i = 0; i < t->device_count; i++) {
+ if (!device_enabled(t, i))
+ continue;
+
+ d = &t->devices[i];
+ r = &d->results;
+
+ r->error = read_sysfs_int(d->sysfs_entry, "error");
+ r->request_min = read_sysfs_int(d->sysfs_entry, "requests_per_second_min");
+ r->request_max = read_sysfs_int(d->sysfs_entry, "requests_per_second_max");
+ r->request_avg = read_sysfs_float(d->sysfs_entry, "requests_per_second_avg");
+
+ r->latency_min = read_sysfs_int(d->sysfs_entry, "latency_min");
+ r->latency_max = read_sysfs_int(d->sysfs_entry, "latency_max");
+ r->latency_avg = read_sysfs_float(d->sysfs_entry, "latency_avg");
+
+ r->throughput_min = read_sysfs_int(d->sysfs_entry, "throughput_min");
+ r->throughput_max = read_sysfs_int(d->sysfs_entry, "throughput_max");
+ r->throughput_avg = read_sysfs_float(d->sysfs_entry, "throughput_avg");
+
+ r->apbridge_unipro_latency_min =
+ read_sysfs_int(d->sysfs_entry, "apbridge_unipro_latency_min");
+ r->apbridge_unipro_latency_max =
+ read_sysfs_int(d->sysfs_entry, "apbridge_unipro_latency_max");
+ r->apbridge_unipro_latency_avg =
+ read_sysfs_float(d->sysfs_entry, "apbridge_unipro_latency_avg");
+
+ r->gbphy_firmware_latency_min =
+ read_sysfs_int(d->sysfs_entry, "gbphy_firmware_latency_min");
+ r->gbphy_firmware_latency_max =
+ read_sysfs_int(d->sysfs_entry, "gbphy_firmware_latency_max");
+ r->gbphy_firmware_latency_avg =
+ read_sysfs_float(d->sysfs_entry, "gbphy_firmware_latency_avg");
+
+ r->request_jitter = r->request_max - r->request_min;
+ r->latency_jitter = r->latency_max - r->latency_min;
+ r->throughput_jitter = r->throughput_max - r->throughput_min;
+ r->apbridge_unipro_latency_jitter =
+ r->apbridge_unipro_latency_max - r->apbridge_unipro_latency_min;
+ r->gbphy_firmware_latency_jitter =
+ r->gbphy_firmware_latency_max - r->gbphy_firmware_latency_min;
+
+ }
+
+ /*calculate the aggregate results of all enabled devices */
+ if (t->aggregate_output) {
+ r = &t->aggregate_results;
+
+ r->request_min = get_request_min_aggregate(t);
+ r->request_max = get_request_max_aggregate(t);
+ r->request_avg = get_request_avg_aggregate(t);
+
+ r->latency_min = get_latency_min_aggregate(t);
+ r->latency_max = get_latency_max_aggregate(t);
+ r->latency_avg = get_latency_avg_aggregate(t);
+
+ r->throughput_min = get_throughput_min_aggregate(t);
+ r->throughput_max = get_throughput_max_aggregate(t);
+ r->throughput_avg = get_throughput_avg_aggregate(t);
+
+ r->apbridge_unipro_latency_min =
+ get_apbridge_unipro_latency_min_aggregate(t);
+ r->apbridge_unipro_latency_max =
+ get_apbridge_unipro_latency_max_aggregate(t);
+ r->apbridge_unipro_latency_avg =
+ get_apbridge_unipro_latency_avg_aggregate(t);
+
+ r->gbphy_firmware_latency_min =
+ get_gbphy_firmware_latency_min_aggregate(t);
+ r->gbphy_firmware_latency_max =
+ get_gbphy_firmware_latency_max_aggregate(t);
+ r->gbphy_firmware_latency_avg =
+ get_gbphy_firmware_latency_avg_aggregate(t);
+
+ r->request_jitter = r->request_max - r->request_min;
+ r->latency_jitter = r->latency_max - r->latency_min;
+ r->throughput_jitter = r->throughput_max - r->throughput_min;
+ r->apbridge_unipro_latency_jitter =
+ r->apbridge_unipro_latency_max - r->apbridge_unipro_latency_min;
+ r->gbphy_firmware_latency_jitter =
+ r->gbphy_firmware_latency_max - r->gbphy_firmware_latency_min;
+
+ }
+
+ return 0;
+}
+
+void log_csv_error(int len, int err)
+{
+ fprintf(stderr, "unable to write %d bytes to csv %s\n", len,
+ strerror(err));
+}
+
+int format_output(struct loopback_test *t,
+ struct loopback_results *r,
+ const char *dev_name,
+ char *buf, int buf_len,
+ struct tm *tm)
+{
+ int len = 0;
+
+ memset(buf, 0x00, buf_len);
+ len = snprintf(buf, buf_len, "%u-%u-%u %u:%u:%u",
+ tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ if (t->porcelain) {
+ len += snprintf(&buf[len], buf_len - len,
+ "\n test:\t\t\t%s\n path:\t\t\t%s\n size:\t\t\t%u\n iterations:\t\t%u\n errors:\t\t%u\n async:\t\t\t%s\n",
+ t->test_name,
+ dev_name,
+ t->size,
+ t->iteration_max,
+ r->error,
+ t->use_async ? "Enabled" : "Disabled");
+
+ len += snprintf(&buf[len], buf_len - len,
+ " requests per-sec:\tmin=%u, max=%u, average=%f, jitter=%u\n",
+ r->request_min,
+ r->request_max,
+ r->request_avg,
+ r->request_jitter);
+
+ len += snprintf(&buf[len], buf_len - len,
+ " ap-throughput B/s:\tmin=%u max=%u average=%f jitter=%u\n",
+ r->throughput_min,
+ r->throughput_max,
+ r->throughput_avg,
+ r->throughput_jitter);
+ len += snprintf(&buf[len], buf_len - len,
+ " ap-latency usec:\tmin=%u max=%u average=%f jitter=%u\n",
+ r->latency_min,
+ r->latency_max,
+ r->latency_avg,
+ r->latency_jitter);
+ len += snprintf(&buf[len], buf_len - len,
+ " apbridge-latency usec:\tmin=%u max=%u average=%f jitter=%u\n",
+ r->apbridge_unipro_latency_min,
+ r->apbridge_unipro_latency_max,
+ r->apbridge_unipro_latency_avg,
+ r->apbridge_unipro_latency_jitter);
+
+ len += snprintf(&buf[len], buf_len - len,
+ " gbphy-latency usec:\tmin=%u max=%u average=%f jitter=%u\n",
+ r->gbphy_firmware_latency_min,
+ r->gbphy_firmware_latency_max,
+ r->gbphy_firmware_latency_avg,
+ r->gbphy_firmware_latency_jitter);
+
+ } else {
+ len += snprintf(&buf[len], buf_len- len, ",%s,%s,%u,%u,%u",
+ t->test_name, dev_name, t->size, t->iteration_max,
+ r->error);
+
+ len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
+ r->request_min,
+ r->request_max,
+ r->request_avg,
+ r->request_jitter);
+
+ len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
+ r->latency_min,
+ r->latency_max,
+ r->latency_avg,
+ r->latency_jitter);
+
+ len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
+ r->throughput_min,
+ r->throughput_max,
+ r->throughput_avg,
+ r->throughput_jitter);
+
+ len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
+ r->apbridge_unipro_latency_min,
+ r->apbridge_unipro_latency_max,
+ r->apbridge_unipro_latency_avg,
+ r->apbridge_unipro_latency_jitter);
+
+ len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
+ r->gbphy_firmware_latency_min,
+ r->gbphy_firmware_latency_max,
+ r->gbphy_firmware_latency_avg,
+ r->gbphy_firmware_latency_jitter);
+ }
+
+ printf("\n%s\n", buf);
+
+ return len;
+}
+
+static int log_results(struct loopback_test *t)
+{
+ int fd, i, len, ret;
+ struct tm tm;
+ time_t local_time;
+ mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+ char file_name[MAX_SYSFS_PATH];
+ char data[CSV_MAX_LINE];
+
+ local_time = time(NULL);
+ tm = *localtime(&local_time);
+
+ /*
+ * file name will test_name_size_iteration_max.csv
+ * every time the same test with the same parameters is run we will then
+ * append to the same CSV with datestamp - representing each test
+ * dataset.
+ */
+ if (t->file_output && !t->porcelain) {
+ snprintf(file_name, sizeof(file_name), "%s_%d_%d.csv",
+ t->test_name, t->size, t->iteration_max);
+
+ fd = open(file_name, O_WRONLY | O_CREAT | O_APPEND, mode);
+ if (fd < 0) {
+ fprintf(stderr, "unable to open %s for appendation\n", file_name);
+ abort();
+ }
+
+ }
+ for (i = 0; i < t->device_count; i++) {
+ if (!device_enabled(t, i))
+ continue;
+
+ len = format_output(t, &t->devices[i].results,
+ t->devices[i].name,
+ data, sizeof(data), &tm);
+ if (t->file_output && !t->porcelain) {
+ ret = write(fd, data, len);
+ if (ret == -1)
+ fprintf(stderr, "unable to write %d bytes to csv.\n", len);
+ }
+
+ }
+
+
+ if (t->aggregate_output) {
+ len = format_output(t, &t->aggregate_results, "aggregate",
+ data, sizeof(data), &tm);
+ if (t->file_output && !t->porcelain) {
+ ret = write(fd, data, len);
+ if (ret == -1)
+ fprintf(stderr, "unable to write %d bytes to csv.\n", len);
+ }
+ }
+
+ if (t->file_output && !t->porcelain)
+ close(fd);
+
+ return 0;
+}
+
+int is_loopback_device(const char *path, const char *node)
+{
+ char file[MAX_SYSFS_PATH];
+
+ snprintf(file, MAX_SYSFS_PATH, "%s%s/iteration_count", path, node);
+ if (access(file, F_OK) == 0)
+ return 1;
+ return 0;
+}
+
+int find_loopback_devices(struct loopback_test *t)
+{
+ struct dirent **namelist;
+ int i, n, ret;
+ unsigned int dev_id;
+ struct loopback_device *d;
+
+ n = scandir(t->sysfs_prefix, &namelist, NULL, alphasort);
+ if (n < 0) {
+ perror("scandir");
+ ret = -ENODEV;
+ goto baddir;
+ }
+
+ /* Don't include '.' and '..' */
+ if (n <= 2) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < n; i++) {
+ ret = sscanf(namelist[i]->d_name, "gb_loopback%u", &dev_id);
+ if (ret != 1)
+ continue;
+
+ if (!is_loopback_device(t->sysfs_prefix, namelist[i]->d_name))
+ continue;
+
+ if (t->device_count == MAX_NUM_DEVICES) {
+ fprintf(stderr, "max number of devices reached!\n");
+ break;
+ }
+
+ d = &t->devices[t->device_count++];
+ snprintf(d->name, MAX_STR_LEN, "gb_loopback%u", dev_id);
+
+ snprintf(d->sysfs_entry, MAX_SYSFS_PATH, "%s%s/",
+ t->sysfs_prefix, d->name);
+
+ snprintf(d->debugfs_entry, MAX_SYSFS_PATH, "%sraw_latency_%s",
+ t->debugfs_prefix, d->name);
+
+ if (t->debug)
+ printf("add %s %s\n", d->sysfs_entry,
+ d->debugfs_entry);
+ }
+
+ ret = 0;
+done:
+ for (i = 0; i < n; i++)
+ free(namelist[n]);
+ free(namelist);
+baddir:
+ return ret;
+}
+
+static int open_poll_files(struct loopback_test *t)
+{
+ struct loopback_device *dev;
+ char buf[MAX_STR_LEN];
+ char dummy;
+ int fds_idx = 0;
+ int i;
+
+ for (i = 0; i < t->device_count; i++) {
+ dev = &t->devices[i];
+
+ if (!device_enabled(t, i))
+ continue;
+
+ snprintf(buf, sizeof(buf), "%s%s", dev->sysfs_entry, "iteration_count");
+ t->fds[fds_idx].fd = open(buf, O_RDONLY);
+ if (t->fds[fds_idx].fd < 0) {
+ fprintf(stderr, "Error opening poll file!\n");
+ goto err;
+ }
+ read(t->fds[fds_idx].fd, &dummy, 1);
+ t->fds[fds_idx].events = POLLERR|POLLPRI;
+ t->fds[fds_idx].revents = 0;
+ fds_idx++;
+ }
+
+ t->poll_count = fds_idx;
+
+ return 0;
+
+err:
+ for (i = 0; i < fds_idx; i++)
+ close(t->fds[fds_idx].fd);
+
+ return -1;
+}
+
+static int close_poll_files(struct loopback_test *t)
+{
+ int i;
+ for (i = 0; i < t->poll_count; i++)
+ close(t->fds[i].fd);
+
+ return 0;
+}
+static int is_complete(struct loopback_test *t)
+{
+ int iteration_count;
+ int i;
+
+ for (i = 0; i < t->device_count; i++) {
+ if (!device_enabled(t, i))
+ continue;
+
+ iteration_count = read_sysfs_int(t->devices[i].sysfs_entry,
+ "iteration_count");
+
+ /* at least one device did not finish yet */
+ if (iteration_count != t->iteration_max)
+ return 0;
+ }
+
+ return 1;
+}
+
+static void stop_tests(struct loopback_test *t)
+{
+ int i;
+
+ for (i = 0; i < t->device_count; i++) {
+ if (!device_enabled(t, i))
+ continue;
+ write_sysfs_val(t->devices[i].sysfs_entry, "type", 0);
+ }
+}
+
+static void handler(int sig) { /* do nothing */ }
+
+static int wait_for_complete(struct loopback_test *t)
+{
+ int number_of_events = 0;
+ char dummy;
+ int ret;
+ int i;
+ struct timespec *ts = NULL;
+ struct sigaction sa;
+ sigset_t mask_old, mask;
+
+ sigemptyset(&mask);
+ sigemptyset(&mask_old);
+ sigaddset(&mask, SIGINT);
+ sigprocmask(SIG_BLOCK, &mask, &mask_old);
+
+ sa.sa_handler = handler;
+ sa.sa_flags = 0;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(SIGINT, &sa, NULL) == -1) {
+ fprintf(stderr, "sigaction error\n");
+ return -1;
+ }
+
+ if (t->poll_timeout.tv_sec != 0)
+ ts = &t->poll_timeout;
+
+ while (1) {
+
+ ret = ppoll(t->fds, t->poll_count, ts, &mask_old);
+ if (ret <= 0) {
+ stop_tests(t);
+ fprintf(stderr, "Poll exit with errno %d\n", errno);
+ return -1;
+ }
+
+ for (i = 0; i < t->poll_count; i++) {
+ if (t->fds[i].revents & POLLPRI) {
+ /* Dummy read to clear the event */
+ read(t->fds[i].fd, &dummy, 1);
+ number_of_events++;
+ }
+ }
+
+ if (number_of_events == t->poll_count)
+ break;
+ }
+
+ if (!is_complete(t)) {
+ fprintf(stderr, "Iteration count did not finish!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void prepare_devices(struct loopback_test *t)
+{
+ int i;
+
+ /* Cancel any running tests on enabled devices. If
+ * stop_all option is given, stop test on all devices.
+ */
+ for (i = 0; i < t->device_count; i++)
+ if (t->stop_all || device_enabled(t, i))
+ write_sysfs_val(t->devices[i].sysfs_entry, "type", 0);
+
+
+ for (i = 0; i < t->device_count; i++) {
+ if (!device_enabled(t, i))
+ continue;
+
+ write_sysfs_val(t->devices[i].sysfs_entry, "us_wait",
+ t->us_wait);
+
+ /* Set operation size */
+ write_sysfs_val(t->devices[i].sysfs_entry, "size", t->size);
+
+ /* Set iterations */
+ write_sysfs_val(t->devices[i].sysfs_entry, "iteration_max",
+ t->iteration_max);
+
+ if (t->use_async) {
+ write_sysfs_val(t->devices[i].sysfs_entry,
+ "async", 1);
+ write_sysfs_val(t->devices[i].sysfs_entry,
+ "timeout", t->async_timeout);
+ write_sysfs_val(t->devices[i].sysfs_entry,
+ "outstanding_operations_max",
+ t->async_outstanding_operations);
+ } else
+ write_sysfs_val(t->devices[i].sysfs_entry,
+ "async", 0);
+ }
+}
+
+static int start(struct loopback_test *t)
+{
+ int i;
+
+ /* the test starts by writing test_id to the type file. */
+ for (i = 0; i < t->device_count; i++) {
+ if (!device_enabled(t, i))
+ continue;
+
+ write_sysfs_val(t->devices[i].sysfs_entry, "type", t->test_id);
+ }
+
+ return 0;
+}
+
+
+void loopback_run(struct loopback_test *t)
+{
+ int i;
+ int ret;
+
+ for (i = 0; dict[i].name != NULL; i++) {
+ if (strstr(dict[i].name, t->test_name))
+ t->test_id = dict[i].type;
+ }
+ if (!t->test_id) {
+ fprintf(stderr, "invalid test %s\n", t->test_name);
+ usage();
+ return;
+ }
+
+ prepare_devices(t);
+
+ ret = open_poll_files(t);
+ if (ret)
+ goto err;
+
+ start(t);
+
+ ret = wait_for_complete(t);
+ close_poll_files(t);
+ if (ret)
+ goto err;
+
+
+ get_results(t);
+
+ log_results(t);
+
+ return;
+
+err:
+ printf("Error running test\n");
+ return;
+}
+
+static int sanity_check(struct loopback_test *t)
+{
+ int i;
+
+ if (t->device_count == 0) {
+ fprintf(stderr, "No loopback devices found\n");
+ return -1;
+ }
+
+ for (i = 0; i < MAX_NUM_DEVICES; i++) {
+ if (!device_enabled(t, i))
+ continue;
+
+ if (t->mask && !strcmp(t->devices[i].name, "")) {
+ fprintf(stderr, "Bad device mask %x\n", (1 << i));
+ return -1;
+ }
+
+ }
+
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int o, ret;
+ char *sysfs_prefix = "/sys/class/gb_loopback/";
+ char *debugfs_prefix = "/sys/kernel/debug/gb_loopback/";
+
+ memset(&t, 0, sizeof(t));
+
+ while ((o = getopt(argc, argv,
+ "t:s:i:S:D:m:v::d::r::p::a::l::x::o:O:c:w:z::f::")) != -1) {
+ switch (o) {
+ case 't':
+ snprintf(t.test_name, MAX_STR_LEN, "%s", optarg);
+ break;
+ case 's':
+ t.size = atoi(optarg);
+ break;
+ case 'i':
+ t.iteration_max = atoi(optarg);
+ break;
+ case 'S':
+ snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
+ break;
+ case 'D':
+ snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
+ break;
+ case 'm':
+ t.mask = atol(optarg);
+ break;
+ case 'v':
+ t.verbose = 1;
+ break;
+ case 'd':
+ t.debug = 1;
+ break;
+ case 'r':
+ t.raw_data_dump = 1;
+ break;
+ case 'p':
+ t.porcelain = 1;
+ break;
+ case 'a':
+ t.aggregate_output = 1;
+ break;
+ case 'l':
+ t.list_devices = 1;
+ break;
+ case 'x':
+ t.use_async = 1;
+ break;
+ case 'o':
+ t.async_timeout = atoi(optarg);
+ break;
+ case 'O':
+ t.poll_timeout.tv_sec = atoi(optarg);
+ break;
+ case 'c':
+ t.async_outstanding_operations = atoi(optarg);
+ break;
+ case 'w':
+ t.us_wait = atoi(optarg);
+ break;
+ case 'z':
+ t.file_output = 1;
+ break;
+ case 'f':
+ t.stop_all = 1;
+ break;
+ default:
+ usage();
+ return -EINVAL;
+ }
+ }
+
+ if (!strcmp(t.sysfs_prefix, ""))
+ snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", sysfs_prefix);
+
+ if (!strcmp(t.debugfs_prefix, ""))
+ snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", debugfs_prefix);
+
+ ret = find_loopback_devices(&t);
+ if (ret)
+ return ret;
+ ret = sanity_check(&t);
+ if (ret)
+ return ret;
+
+ if (t.list_devices) {
+ show_loopback_devices(&t);
+ return 0;
+ }
+
+ if (t.test_name[0] == '\0' || t.iteration_max == 0)
+ usage();
+
+ if (t.async_timeout == 0)
+ t.async_timeout = DEFAULT_ASYNC_TIMEOUT;
+
+ loopback_run(&t);
+
+ return 0;
+}
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
new file mode 100644
index 000000000000..5ee7954bd9f9
--- /dev/null
+++ b/drivers/staging/greybus/uart.c
@@ -0,0 +1,1075 @@
+/*
+ * UART driver for the Greybus "generic" UART module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ *
+ * Heavily based on drivers/usb/class/cdc-acm.c and
+ * drivers/usb/serial/usb-serial.c.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/idr.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/kfifo.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+#define GB_NUM_MINORS 16 /* 16 is is more than enough */
+#define GB_NAME "ttyGB"
+
+#define GB_UART_WRITE_FIFO_SIZE PAGE_SIZE
+#define GB_UART_WRITE_ROOM_MARGIN 1 /* leave some space in fifo */
+#define GB_UART_FIRMWARE_CREDITS 4096
+#define GB_UART_CREDIT_WAIT_TIMEOUT_MSEC 10000
+
+struct gb_tty_line_coding {
+ __le32 rate;
+ __u8 format;
+ __u8 parity;
+ __u8 data_bits;
+ __u8 flow_control;
+};
+
+struct gb_tty {
+ struct gbphy_device *gbphy_dev;
+ struct tty_port port;
+ void *buffer;
+ size_t buffer_payload_max;
+ struct gb_connection *connection;
+ u16 cport_id;
+ unsigned int minor;
+ unsigned char clocal;
+ bool disconnected;
+ spinlock_t read_lock;
+ spinlock_t write_lock;
+ struct async_icount iocount;
+ struct async_icount oldcount;
+ wait_queue_head_t wioctl;
+ struct mutex mutex;
+ u8 ctrlin; /* input control lines */
+ u8 ctrlout; /* output control lines */
+ struct gb_tty_line_coding line_coding;
+ struct work_struct tx_work;
+ struct kfifo write_fifo;
+ bool close_pending;
+ unsigned int credits;
+ struct completion credits_complete;
+};
+
+static struct tty_driver *gb_tty_driver;
+static DEFINE_IDR(tty_minors);
+static DEFINE_MUTEX(table_lock);
+
+static int gb_uart_receive_data_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_tty *gb_tty = gb_connection_get_data(connection);
+ struct tty_port *port = &gb_tty->port;
+ struct gb_message *request = op->request;
+ struct gb_uart_recv_data_request *receive_data;
+ u16 recv_data_size;
+ int count;
+ unsigned long tty_flags = TTY_NORMAL;
+
+ if (request->payload_size < sizeof(*receive_data)) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "short receive-data request received (%zu < %zu)\n",
+ request->payload_size, sizeof(*receive_data));
+ return -EINVAL;
+ }
+
+ receive_data = op->request->payload;
+ recv_data_size = le16_to_cpu(receive_data->size);
+
+ if (recv_data_size != request->payload_size - sizeof(*receive_data)) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "malformed receive-data request received (%u != %zu)\n",
+ recv_data_size,
+ request->payload_size - sizeof(*receive_data));
+ return -EINVAL;
+ }
+
+ if (!recv_data_size)
+ return -EINVAL;
+
+ if (receive_data->flags) {
+ if (receive_data->flags & GB_UART_RECV_FLAG_BREAK)
+ tty_flags = TTY_BREAK;
+ else if (receive_data->flags & GB_UART_RECV_FLAG_PARITY)
+ tty_flags = TTY_PARITY;
+ else if (receive_data->flags & GB_UART_RECV_FLAG_FRAMING)
+ tty_flags = TTY_FRAME;
+
+ /* overrun is special, not associated with a char */
+ if (receive_data->flags & GB_UART_RECV_FLAG_OVERRUN)
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
+ }
+ count = tty_insert_flip_string_fixed_flag(port, receive_data->data,
+ tty_flags, recv_data_size);
+ if (count != recv_data_size) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "UART: RX 0x%08x bytes only wrote 0x%08x\n",
+ recv_data_size, count);
+ }
+ if (count)
+ tty_flip_buffer_push(port);
+ return 0;
+}
+
+static int gb_uart_serial_state_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_tty *gb_tty = gb_connection_get_data(connection);
+ struct gb_message *request = op->request;
+ struct gb_uart_serial_state_request *serial_state;
+
+ if (request->payload_size < sizeof(*serial_state)) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "short serial-state event received (%zu < %zu)\n",
+ request->payload_size, sizeof(*serial_state));
+ return -EINVAL;
+ }
+
+ serial_state = request->payload;
+ gb_tty->ctrlin = serial_state->control;
+
+ return 0;
+}
+
+static int gb_uart_receive_credits_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_tty *gb_tty = gb_connection_get_data(connection);
+ struct gb_message *request = op->request;
+ struct gb_uart_receive_credits_request *credit_request;
+ unsigned long flags;
+ unsigned int incoming_credits;
+ int ret = 0;
+
+ if (request->payload_size < sizeof(*credit_request)) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "short receive_credits event received (%zu < %zu)\n",
+ request->payload_size,
+ sizeof(*credit_request));
+ return -EINVAL;
+ }
+
+ credit_request = request->payload;
+ incoming_credits = le16_to_cpu(credit_request->count);
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ gb_tty->credits += incoming_credits;
+ if (gb_tty->credits > GB_UART_FIRMWARE_CREDITS) {
+ gb_tty->credits -= incoming_credits;
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ if (ret) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "invalid number of incoming credits: %d\n",
+ incoming_credits);
+ return ret;
+ }
+
+ if (!gb_tty->close_pending)
+ schedule_work(&gb_tty->tx_work);
+
+ /*
+ * the port the tty layer may be waiting for credits
+ */
+ tty_port_tty_wakeup(&gb_tty->port);
+
+ if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
+ complete(&gb_tty->credits_complete);
+
+ return ret;
+}
+
+static int gb_uart_request_handler(struct gb_operation *op)
+{
+ struct gb_connection *connection = op->connection;
+ struct gb_tty *gb_tty = gb_connection_get_data(connection);
+ int type = op->type;
+ int ret;
+
+ switch (type) {
+ case GB_UART_TYPE_RECEIVE_DATA:
+ ret = gb_uart_receive_data_handler(op);
+ break;
+ case GB_UART_TYPE_SERIAL_STATE:
+ ret = gb_uart_serial_state_handler(op);
+ break;
+ case GB_UART_TYPE_RECEIVE_CREDITS:
+ ret = gb_uart_receive_credits_handler(op);
+ break;
+ default:
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "unsupported unsolicited request: 0x%02x\n", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void gb_uart_tx_write_work(struct work_struct *work)
+{
+ struct gb_uart_send_data_request *request;
+ struct gb_tty *gb_tty;
+ unsigned long flags;
+ unsigned int send_size;
+ int ret;
+
+ gb_tty = container_of(work, struct gb_tty, tx_work);
+ request = gb_tty->buffer;
+
+ while (1) {
+ if (gb_tty->close_pending)
+ break;
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ send_size = gb_tty->buffer_payload_max;
+ if (send_size > gb_tty->credits)
+ send_size = gb_tty->credits;
+
+ send_size = kfifo_out_peek(&gb_tty->write_fifo,
+ &request->data[0],
+ send_size);
+ if (!send_size) {
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+ break;
+ }
+
+ gb_tty->credits -= send_size;
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ request->size = cpu_to_le16(send_size);
+ ret = gb_operation_sync(gb_tty->connection,
+ GB_UART_TYPE_SEND_DATA,
+ request, sizeof(*request) + send_size,
+ NULL, 0);
+ if (ret) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "send data error: %d\n", ret);
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ gb_tty->credits += send_size;
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+ if (!gb_tty->close_pending)
+ schedule_work(work);
+ return;
+ }
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ ret = kfifo_out(&gb_tty->write_fifo, &request->data[0],
+ send_size);
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ tty_port_tty_wakeup(&gb_tty->port);
+ }
+}
+
+static int send_line_coding(struct gb_tty *tty)
+{
+ struct gb_uart_set_line_coding_request request;
+
+ memcpy(&request, &tty->line_coding,
+ sizeof(tty->line_coding));
+ return gb_operation_sync(tty->connection, GB_UART_TYPE_SET_LINE_CODING,
+ &request, sizeof(request), NULL, 0);
+}
+
+static int send_control(struct gb_tty *gb_tty, u8 control)
+{
+ struct gb_uart_set_control_line_state_request request;
+
+ request.control = control;
+ return gb_operation_sync(gb_tty->connection,
+ GB_UART_TYPE_SET_CONTROL_LINE_STATE,
+ &request, sizeof(request), NULL, 0);
+}
+
+static int send_break(struct gb_tty *gb_tty, u8 state)
+{
+ struct gb_uart_set_break_request request;
+
+ if ((state != 0) && (state != 1)) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "invalid break state of %d\n", state);
+ return -EINVAL;
+ }
+
+ request.state = state;
+ return gb_operation_sync(gb_tty->connection, GB_UART_TYPE_SEND_BREAK,
+ &request, sizeof(request), NULL, 0);
+}
+
+static int gb_uart_wait_for_all_credits(struct gb_tty *gb_tty)
+{
+ int ret;
+
+ if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
+ return 0;
+
+ ret = wait_for_completion_timeout(&gb_tty->credits_complete,
+ msecs_to_jiffies(GB_UART_CREDIT_WAIT_TIMEOUT_MSEC));
+ if (!ret) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "time out waiting for credits\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int gb_uart_flush(struct gb_tty *gb_tty, u8 flags)
+{
+ struct gb_uart_serial_flush_request request;
+
+ request.flags = flags;
+ return gb_operation_sync(gb_tty->connection, GB_UART_TYPE_FLUSH_FIFOS,
+ &request, sizeof(request), NULL, 0);
+}
+
+static struct gb_tty *get_gb_by_minor(unsigned int minor)
+{
+ struct gb_tty *gb_tty;
+
+ mutex_lock(&table_lock);
+ gb_tty = idr_find(&tty_minors, minor);
+ if (gb_tty) {
+ mutex_lock(&gb_tty->mutex);
+ if (gb_tty->disconnected) {
+ mutex_unlock(&gb_tty->mutex);
+ gb_tty = NULL;
+ } else {
+ tty_port_get(&gb_tty->port);
+ mutex_unlock(&gb_tty->mutex);
+ }
+ }
+ mutex_unlock(&table_lock);
+ return gb_tty;
+}
+
+static int alloc_minor(struct gb_tty *gb_tty)
+{
+ int minor;
+
+ mutex_lock(&table_lock);
+ minor = idr_alloc(&tty_minors, gb_tty, 0, GB_NUM_MINORS, GFP_KERNEL);
+ mutex_unlock(&table_lock);
+ if (minor >= 0)
+ gb_tty->minor = minor;
+ return minor;
+}
+
+static void release_minor(struct gb_tty *gb_tty)
+{
+ int minor = gb_tty->minor;
+
+ gb_tty->minor = 0; /* Maybe should use an invalid value instead */
+ mutex_lock(&table_lock);
+ idr_remove(&tty_minors, minor);
+ mutex_unlock(&table_lock);
+}
+
+static int gb_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty;
+ int retval;
+
+ gb_tty = get_gb_by_minor(tty->index);
+ if (!gb_tty)
+ return -ENODEV;
+
+ retval = tty_standard_install(driver, tty);
+ if (retval)
+ goto error;
+
+ tty->driver_data = gb_tty;
+ return 0;
+error:
+ tty_port_put(&gb_tty->port);
+ return retval;
+}
+
+static int gb_tty_open(struct tty_struct *tty, struct file *file)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ return tty_port_open(&gb_tty->port, tty, file);
+}
+
+static void gb_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ tty_port_close(&gb_tty->port, tty, file);
+}
+
+static void gb_tty_cleanup(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ tty_port_put(&gb_tty->port);
+}
+
+static void gb_tty_hangup(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ tty_port_hangup(&gb_tty->port);
+}
+
+static int gb_tty_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ count = kfifo_in_spinlocked(&gb_tty->write_fifo, buf, count,
+ &gb_tty->write_lock);
+ if (count && !gb_tty->close_pending)
+ schedule_work(&gb_tty->tx_work);
+
+ return count;
+}
+
+static int gb_tty_write_room(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ unsigned long flags;
+ int room;
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ room = kfifo_avail(&gb_tty->write_fifo);
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ room -= GB_UART_WRITE_ROOM_MARGIN;
+ if (room < 0)
+ return 0;
+
+ return room;
+}
+
+static int gb_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ unsigned long flags;
+ int chars;
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ chars = kfifo_len(&gb_tty->write_fifo);
+ if (gb_tty->credits < GB_UART_FIRMWARE_CREDITS)
+ chars += GB_UART_FIRMWARE_CREDITS - gb_tty->credits;
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ return chars;
+}
+
+static int gb_tty_break_ctl(struct tty_struct *tty, int state)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ return send_break(gb_tty, state ? 1 : 0);
+}
+
+static void gb_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *termios_old)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ struct ktermios *termios = &tty->termios;
+ struct gb_tty_line_coding newline;
+ u8 newctrl = gb_tty->ctrlout;
+
+ newline.rate = cpu_to_le32(tty_get_baud_rate(tty));
+ newline.format = termios->c_cflag & CSTOPB ?
+ GB_SERIAL_2_STOP_BITS : GB_SERIAL_1_STOP_BITS;
+ newline.parity = termios->c_cflag & PARENB ?
+ (termios->c_cflag & PARODD ? 1 : 2) +
+ (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ newline.data_bits = 5;
+ break;
+ case CS6:
+ newline.data_bits = 6;
+ break;
+ case CS7:
+ newline.data_bits = 7;
+ break;
+ case CS8:
+ default:
+ newline.data_bits = 8;
+ break;
+ }
+
+ /* FIXME: needs to clear unsupported bits in the termios */
+ gb_tty->clocal = ((termios->c_cflag & CLOCAL) != 0);
+
+ if (C_BAUD(tty) == B0) {
+ newline.rate = gb_tty->line_coding.rate;
+ newctrl &= ~(GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+ } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
+ newctrl |= (GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+ }
+
+ if (newctrl != gb_tty->ctrlout) {
+ gb_tty->ctrlout = newctrl;
+ send_control(gb_tty, newctrl);
+ }
+
+ if (C_CRTSCTS(tty) && C_BAUD(tty) != B0)
+ newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN;
+ else
+ newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN;
+
+ if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) {
+ memcpy(&gb_tty->line_coding, &newline, sizeof(newline));
+ send_line_coding(gb_tty);
+ }
+}
+
+static int gb_tty_tiocmget(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ return (gb_tty->ctrlout & GB_UART_CTRL_DTR ? TIOCM_DTR : 0) |
+ (gb_tty->ctrlout & GB_UART_CTRL_RTS ? TIOCM_RTS : 0) |
+ (gb_tty->ctrlin & GB_UART_CTRL_DSR ? TIOCM_DSR : 0) |
+ (gb_tty->ctrlin & GB_UART_CTRL_RI ? TIOCM_RI : 0) |
+ (gb_tty->ctrlin & GB_UART_CTRL_DCD ? TIOCM_CD : 0) |
+ TIOCM_CTS;
+}
+
+static int gb_tty_tiocmset(struct tty_struct *tty, unsigned int set,
+ unsigned int clear)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ u8 newctrl = gb_tty->ctrlout;
+
+ set = (set & TIOCM_DTR ? GB_UART_CTRL_DTR : 0) |
+ (set & TIOCM_RTS ? GB_UART_CTRL_RTS : 0);
+ clear = (clear & TIOCM_DTR ? GB_UART_CTRL_DTR : 0) |
+ (clear & TIOCM_RTS ? GB_UART_CTRL_RTS : 0);
+
+ newctrl = (newctrl & ~clear) | set;
+ if (gb_tty->ctrlout == newctrl)
+ return 0;
+
+ gb_tty->ctrlout = newctrl;
+ return send_control(gb_tty, newctrl);
+}
+
+static void gb_tty_throttle(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ unsigned char stop_char;
+ int retval;
+
+ if (I_IXOFF(tty)) {
+ stop_char = STOP_CHAR(tty);
+ retval = gb_tty_write(tty, &stop_char, 1);
+ if (retval <= 0)
+ return;
+ }
+
+ if (tty->termios.c_cflag & CRTSCTS) {
+ gb_tty->ctrlout &= ~GB_UART_CTRL_RTS;
+ retval = send_control(gb_tty, gb_tty->ctrlout);
+ }
+}
+
+static void gb_tty_unthrottle(struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+ unsigned char start_char;
+ int retval;
+
+ if (I_IXOFF(tty)) {
+ start_char = START_CHAR(tty);
+ retval = gb_tty_write(tty, &start_char, 1);
+ if (retval <= 0)
+ return;
+ }
+
+ if (tty->termios.c_cflag & CRTSCTS) {
+ gb_tty->ctrlout |= GB_UART_CTRL_RTS;
+ retval = send_control(gb_tty, gb_tty->ctrlout);
+ }
+}
+
+static int get_serial_info(struct gb_tty *gb_tty,
+ struct serial_struct __user *info)
+{
+ struct serial_struct tmp;
+
+ if (!info)
+ return -EINVAL;
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.flags = ASYNC_LOW_LATENCY | ASYNC_SKIP_TEST;
+ tmp.type = PORT_16550A;
+ tmp.line = gb_tty->minor;
+ tmp.xmit_fifo_size = 16;
+ tmp.baud_base = 9600;
+ tmp.close_delay = gb_tty->port.close_delay / 10;
+ tmp.closing_wait = gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10;
+
+ if (copy_to_user(info, &tmp, sizeof(tmp)))
+ return -EFAULT;
+ return 0;
+}
+
+static int set_serial_info(struct gb_tty *gb_tty,
+ struct serial_struct __user *newinfo)
+{
+ struct serial_struct new_serial;
+ unsigned int closing_wait;
+ unsigned int close_delay;
+ int retval = 0;
+
+ if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
+ return -EFAULT;
+
+ close_delay = new_serial.close_delay * 10;
+ closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+
+ mutex_lock(&gb_tty->port.mutex);
+ if (!capable(CAP_SYS_ADMIN)) {
+ if ((close_delay != gb_tty->port.close_delay) ||
+ (closing_wait != gb_tty->port.closing_wait))
+ retval = -EPERM;
+ else
+ retval = -EOPNOTSUPP;
+ } else {
+ gb_tty->port.close_delay = close_delay;
+ gb_tty->port.closing_wait = closing_wait;
+ }
+ mutex_unlock(&gb_tty->port.mutex);
+ return retval;
+}
+
+static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
+{
+ int retval = 0;
+ DECLARE_WAITQUEUE(wait, current);
+ struct async_icount old;
+ struct async_icount new;
+
+ if (!(arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD)))
+ return -EINVAL;
+
+ do {
+ spin_lock_irq(&gb_tty->read_lock);
+ old = gb_tty->oldcount;
+ new = gb_tty->iocount;
+ gb_tty->oldcount = new;
+ spin_unlock_irq(&gb_tty->read_lock);
+
+ if ((arg & TIOCM_DSR) && (old.dsr != new.dsr))
+ break;
+ if ((arg & TIOCM_CD) && (old.dcd != new.dcd))
+ break;
+ if ((arg & TIOCM_RI) && (old.rng != new.rng))
+ break;
+
+ add_wait_queue(&gb_tty->wioctl, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ remove_wait_queue(&gb_tty->wioctl, &wait);
+ if (gb_tty->disconnected) {
+ if (arg & TIOCM_CD)
+ break;
+ retval = -ENODEV;
+ } else if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ }
+ } while (!retval);
+
+ return retval;
+}
+
+static int get_serial_usage(struct gb_tty *gb_tty,
+ struct serial_icounter_struct __user *count)
+{
+ struct serial_icounter_struct icount;
+ int retval = 0;
+
+ memset(&icount, 0, sizeof(icount));
+ icount.dsr = gb_tty->iocount.dsr;
+ icount.rng = gb_tty->iocount.rng;
+ icount.dcd = gb_tty->iocount.dcd;
+ icount.frame = gb_tty->iocount.frame;
+ icount.overrun = gb_tty->iocount.overrun;
+ icount.parity = gb_tty->iocount.parity;
+ icount.brk = gb_tty->iocount.brk;
+
+ if (copy_to_user(count, &icount, sizeof(icount)) > 0)
+ retval = -EFAULT;
+
+ return retval;
+}
+
+static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ return get_serial_info(gb_tty,
+ (struct serial_struct __user *)arg);
+ case TIOCSSERIAL:
+ return set_serial_info(gb_tty,
+ (struct serial_struct __user *)arg);
+ case TIOCMIWAIT:
+ return wait_serial_change(gb_tty, arg);
+ case TIOCGICOUNT:
+ return get_serial_usage(gb_tty,
+ (struct serial_icounter_struct __user *)arg);
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static void gb_tty_dtr_rts(struct tty_port *port, int on)
+{
+ struct gb_tty *gb_tty;
+ u8 newctrl;
+
+ gb_tty = container_of(port, struct gb_tty, port);
+ newctrl = gb_tty->ctrlout;
+
+ if (on)
+ newctrl |= (GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+ else
+ newctrl &= ~(GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
+
+ gb_tty->ctrlout = newctrl;
+ send_control(gb_tty, newctrl);
+}
+
+static int gb_tty_port_activate(struct tty_port *port,
+ struct tty_struct *tty)
+{
+ struct gb_tty *gb_tty;
+
+ gb_tty = container_of(port, struct gb_tty, port);
+
+ return gbphy_runtime_get_sync(gb_tty->gbphy_dev);
+}
+
+static void gb_tty_port_shutdown(struct tty_port *port)
+{
+ struct gb_tty *gb_tty;
+ unsigned long flags;
+ int ret;
+
+ gb_tty = container_of(port, struct gb_tty, port);
+
+ gb_tty->close_pending = true;
+
+ cancel_work_sync(&gb_tty->tx_work);
+
+ spin_lock_irqsave(&gb_tty->write_lock, flags);
+ kfifo_reset_out(&gb_tty->write_fifo);
+ spin_unlock_irqrestore(&gb_tty->write_lock, flags);
+
+ if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
+ goto out;
+
+ ret = gb_uart_flush(gb_tty, GB_SERIAL_FLAG_FLUSH_TRANSMITTER);
+ if (ret) {
+ dev_err(&gb_tty->gbphy_dev->dev,
+ "error flushing transmitter: %d\n", ret);
+ }
+
+ gb_uart_wait_for_all_credits(gb_tty);
+
+out:
+ gb_tty->close_pending = false;
+
+ gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev);
+}
+
+static const struct tty_operations gb_ops = {
+ .install = gb_tty_install,
+ .open = gb_tty_open,
+ .close = gb_tty_close,
+ .cleanup = gb_tty_cleanup,
+ .hangup = gb_tty_hangup,
+ .write = gb_tty_write,
+ .write_room = gb_tty_write_room,
+ .ioctl = gb_tty_ioctl,
+ .throttle = gb_tty_throttle,
+ .unthrottle = gb_tty_unthrottle,
+ .chars_in_buffer = gb_tty_chars_in_buffer,
+ .break_ctl = gb_tty_break_ctl,
+ .set_termios = gb_tty_set_termios,
+ .tiocmget = gb_tty_tiocmget,
+ .tiocmset = gb_tty_tiocmset,
+};
+
+static struct tty_port_operations gb_port_ops = {
+ .dtr_rts = gb_tty_dtr_rts,
+ .activate = gb_tty_port_activate,
+ .shutdown = gb_tty_port_shutdown,
+};
+
+static int gb_uart_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ size_t max_payload;
+ struct gb_tty *gb_tty;
+ struct device *tty_dev;
+ int retval;
+ int minor;
+
+ gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
+ if (!gb_tty)
+ return -ENOMEM;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ gb_uart_request_handler);
+ if (IS_ERR(connection)) {
+ retval = PTR_ERR(connection);
+ goto exit_tty_free;
+ }
+
+ max_payload = gb_operation_get_payload_size_max(connection);
+ if (max_payload < sizeof(struct gb_uart_send_data_request)) {
+ retval = -EINVAL;
+ goto exit_connection_destroy;
+ }
+
+ gb_tty->buffer_payload_max = max_payload -
+ sizeof(struct gb_uart_send_data_request);
+
+ gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL);
+ if (!gb_tty->buffer) {
+ retval = -ENOMEM;
+ goto exit_connection_destroy;
+ }
+
+ INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work);
+
+ retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE,
+ GFP_KERNEL);
+ if (retval)
+ goto exit_buf_free;
+
+ gb_tty->credits = GB_UART_FIRMWARE_CREDITS;
+ init_completion(&gb_tty->credits_complete);
+
+ minor = alloc_minor(gb_tty);
+ if (minor < 0) {
+ if (minor == -ENOSPC) {
+ dev_err(&connection->bundle->dev,
+ "no more free minor numbers\n");
+ retval = -ENODEV;
+ } else {
+ retval = minor;
+ }
+ goto exit_kfifo_free;
+ }
+
+ gb_tty->minor = minor;
+ spin_lock_init(&gb_tty->write_lock);
+ spin_lock_init(&gb_tty->read_lock);
+ init_waitqueue_head(&gb_tty->wioctl);
+ mutex_init(&gb_tty->mutex);
+
+ tty_port_init(&gb_tty->port);
+ gb_tty->port.ops = &gb_port_ops;
+
+ gb_tty->connection = connection;
+ gb_tty->gbphy_dev = gbphy_dev;
+ gb_connection_set_data(connection, gb_tty);
+ gb_gbphy_set_data(gbphy_dev, gb_tty);
+
+ retval = gb_connection_enable_tx(connection);
+ if (retval)
+ goto exit_release_minor;
+
+ send_control(gb_tty, gb_tty->ctrlout);
+
+ /* initialize the uart to be 9600n81 */
+ gb_tty->line_coding.rate = cpu_to_le32(9600);
+ gb_tty->line_coding.format = GB_SERIAL_1_STOP_BITS;
+ gb_tty->line_coding.parity = GB_SERIAL_NO_PARITY;
+ gb_tty->line_coding.data_bits = 8;
+ send_line_coding(gb_tty);
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto exit_connection_disable;
+
+ tty_dev = tty_port_register_device(&gb_tty->port, gb_tty_driver, minor,
+ &gbphy_dev->dev);
+ if (IS_ERR(tty_dev)) {
+ retval = PTR_ERR(tty_dev);
+ goto exit_connection_disable;
+ }
+
+ gbphy_runtime_put_autosuspend(gbphy_dev);
+ return 0;
+
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_release_minor:
+ release_minor(gb_tty);
+exit_kfifo_free:
+ kfifo_free(&gb_tty->write_fifo);
+exit_buf_free:
+ kfree(gb_tty->buffer);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+exit_tty_free:
+ kfree(gb_tty);
+
+ return retval;
+}
+
+static void gb_uart_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_tty *gb_tty = gb_gbphy_get_data(gbphy_dev);
+ struct gb_connection *connection = gb_tty->connection;
+ struct tty_struct *tty;
+ int ret;
+
+ ret = gbphy_runtime_get_sync(gbphy_dev);
+ if (ret)
+ gbphy_runtime_get_noresume(gbphy_dev);
+
+ mutex_lock(&gb_tty->mutex);
+ gb_tty->disconnected = true;
+
+ wake_up_all(&gb_tty->wioctl);
+ mutex_unlock(&gb_tty->mutex);
+
+ tty = tty_port_tty_get(&gb_tty->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+
+ gb_connection_disable_rx(connection);
+ tty_unregister_device(gb_tty_driver, gb_tty->minor);
+
+ /* FIXME - free transmit / receive buffers */
+
+ gb_connection_disable(connection);
+ tty_port_destroy(&gb_tty->port);
+ gb_connection_destroy(connection);
+ release_minor(gb_tty);
+ kfifo_free(&gb_tty->write_fifo);
+ kfree(gb_tty->buffer);
+ kfree(gb_tty);
+}
+
+static int gb_tty_init(void)
+{
+ int retval = 0;
+
+ gb_tty_driver = tty_alloc_driver(GB_NUM_MINORS, 0);
+ if (IS_ERR(gb_tty_driver)) {
+ pr_err("Can not allocate tty driver\n");
+ retval = -ENOMEM;
+ goto fail_unregister_dev;
+ }
+
+ gb_tty_driver->driver_name = "gb";
+ gb_tty_driver->name = GB_NAME;
+ gb_tty_driver->major = 0;
+ gb_tty_driver->minor_start = 0;
+ gb_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ gb_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ gb_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ gb_tty_driver->init_termios = tty_std_termios;
+ gb_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ tty_set_operations(gb_tty_driver, &gb_ops);
+
+ retval = tty_register_driver(gb_tty_driver);
+ if (retval) {
+ pr_err("Can not register tty driver: %d\n", retval);
+ goto fail_put_gb_tty;
+ }
+
+ return 0;
+
+fail_put_gb_tty:
+ put_tty_driver(gb_tty_driver);
+fail_unregister_dev:
+ return retval;
+}
+
+static void gb_tty_exit(void)
+{
+ tty_unregister_driver(gb_tty_driver);
+ put_tty_driver(gb_tty_driver);
+ idr_destroy(&tty_minors);
+}
+
+static const struct gbphy_device_id gb_uart_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_UART) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_uart_id_table);
+
+static struct gbphy_driver uart_driver = {
+ .name = "uart",
+ .probe = gb_uart_probe,
+ .remove = gb_uart_remove,
+ .id_table = gb_uart_id_table,
+};
+
+static int gb_uart_driver_init(void)
+{
+ int ret;
+
+ ret = gb_tty_init();
+ if (ret)
+ return ret;
+
+ ret = gb_gbphy_register(&uart_driver);
+ if (ret) {
+ gb_tty_exit();
+ return ret;
+ }
+
+ return 0;
+}
+module_init(gb_uart_driver_init);
+
+static void gb_uart_driver_exit(void)
+{
+ gb_gbphy_deregister(&uart_driver);
+ gb_tty_exit();
+}
+
+module_exit(gb_uart_driver_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/usb.c b/drivers/staging/greybus/usb.c
new file mode 100644
index 000000000000..ccadda084b76
--- /dev/null
+++ b/drivers/staging/greybus/usb.c
@@ -0,0 +1,247 @@
+/*
+ * USB host driver for the Greybus "generic" USB module.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "greybus.h"
+#include "gbphy.h"
+
+/* Greybus USB request types */
+#define GB_USB_TYPE_HCD_START 0x02
+#define GB_USB_TYPE_HCD_STOP 0x03
+#define GB_USB_TYPE_HUB_CONTROL 0x04
+
+struct gb_usb_hub_control_request {
+ __le16 typeReq;
+ __le16 wValue;
+ __le16 wIndex;
+ __le16 wLength;
+};
+
+struct gb_usb_hub_control_response {
+ u8 buf[0];
+};
+
+struct gb_usb_device {
+ struct gb_connection *connection;
+ struct gbphy_device *gbphy_dev;
+};
+
+static inline struct gb_usb_device *to_gb_usb_device(struct usb_hcd *hcd)
+{
+ return (struct gb_usb_device *)hcd->hcd_priv;
+}
+
+static inline struct usb_hcd *gb_usb_device_to_hcd(struct gb_usb_device *dev)
+{
+ return container_of((void *)dev, struct usb_hcd, hcd_priv);
+}
+
+static void hcd_stop(struct usb_hcd *hcd)
+{
+ struct gb_usb_device *dev = to_gb_usb_device(hcd);
+ int ret;
+
+ ret = gb_operation_sync(dev->connection, GB_USB_TYPE_HCD_STOP,
+ NULL, 0, NULL, 0);
+ if (ret)
+ dev_err(&dev->gbphy_dev->dev, "HCD stop failed '%d'\n", ret);
+}
+
+static int hcd_start(struct usb_hcd *hcd)
+{
+ struct usb_bus *bus = hcd_to_bus(hcd);
+ struct gb_usb_device *dev = to_gb_usb_device(hcd);
+ int ret;
+
+ ret = gb_operation_sync(dev->connection, GB_USB_TYPE_HCD_START,
+ NULL, 0, NULL, 0);
+ if (ret) {
+ dev_err(&dev->gbphy_dev->dev, "HCD start failed '%d'\n", ret);
+ return ret;
+ }
+
+ hcd->state = HC_STATE_RUNNING;
+ if (bus->root_hub)
+ usb_hcd_resume_root_hub(hcd);
+ return 0;
+}
+
+static int urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+ return -ENXIO;
+}
+
+static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ return -ENXIO;
+}
+
+static int get_frame_number(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
+static int hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ return 0;
+}
+
+static int hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength)
+{
+ struct gb_usb_device *dev = to_gb_usb_device(hcd);
+ struct gb_operation *operation;
+ struct gb_usb_hub_control_request *request;
+ struct gb_usb_hub_control_response *response;
+ size_t response_size;
+ int ret;
+
+ /* FIXME: handle unspecified lengths */
+ response_size = sizeof(*response) + wLength;
+
+ operation = gb_operation_create(dev->connection,
+ GB_USB_TYPE_HUB_CONTROL,
+ sizeof(*request),
+ response_size,
+ GFP_KERNEL);
+ if (!operation)
+ return -ENOMEM;
+
+ request = operation->request->payload;
+ request->typeReq = cpu_to_le16(typeReq);
+ request->wValue = cpu_to_le16(wValue);
+ request->wIndex = cpu_to_le16(wIndex);
+ request->wLength = cpu_to_le16(wLength);
+
+ ret = gb_operation_request_send_sync(operation);
+ if (ret)
+ goto out;
+
+ if (wLength) {
+ /* Greybus core has verified response size */
+ response = operation->response->payload;
+ memcpy(buf, response->buf, wLength);
+ }
+out:
+ gb_operation_put(operation);
+
+ return ret;
+}
+
+static struct hc_driver usb_gb_hc_driver = {
+ .description = "greybus-hcd",
+ .product_desc = "Greybus USB Host Controller",
+ .hcd_priv_size = sizeof(struct gb_usb_device),
+
+ .flags = HCD_USB2,
+
+ .start = hcd_start,
+ .stop = hcd_stop,
+
+ .urb_enqueue = urb_enqueue,
+ .urb_dequeue = urb_dequeue,
+
+ .get_frame_number = get_frame_number,
+ .hub_status_data = hub_status_data,
+ .hub_control = hub_control,
+};
+
+static int gb_usb_probe(struct gbphy_device *gbphy_dev,
+ const struct gbphy_device_id *id)
+{
+ struct gb_connection *connection;
+ struct device *dev = &gbphy_dev->dev;
+ struct gb_usb_device *gb_usb_dev;
+ struct usb_hcd *hcd;
+ int retval;
+
+ hcd = usb_create_hcd(&usb_gb_hc_driver, dev, dev_name(dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ connection = gb_connection_create(gbphy_dev->bundle,
+ le16_to_cpu(gbphy_dev->cport_desc->id),
+ NULL);
+ if (IS_ERR(connection)) {
+ retval = PTR_ERR(connection);
+ goto exit_usb_put;
+ }
+
+ gb_usb_dev = to_gb_usb_device(hcd);
+ gb_usb_dev->connection = connection;
+ gb_connection_set_data(connection, gb_usb_dev);
+ gb_usb_dev->gbphy_dev = gbphy_dev;
+ gb_gbphy_set_data(gbphy_dev, gb_usb_dev);
+
+ hcd->has_tt = 1;
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto exit_connection_destroy;
+
+ /*
+ * FIXME: The USB bridged-PHY protocol driver depends on changes to
+ * USB core which are not yet upstream.
+ *
+ * Disable for now.
+ */
+ if (1) {
+ dev_warn(dev, "USB protocol disabled\n");
+ retval = -EPROTONOSUPPORT;
+ goto exit_connection_disable;
+ }
+
+ retval = usb_add_hcd(hcd, 0, 0);
+ if (retval)
+ goto exit_connection_disable;
+
+ return 0;
+
+exit_connection_disable:
+ gb_connection_disable(connection);
+exit_connection_destroy:
+ gb_connection_destroy(connection);
+exit_usb_put:
+ usb_put_hcd(hcd);
+
+ return retval;
+}
+
+static void gb_usb_remove(struct gbphy_device *gbphy_dev)
+{
+ struct gb_usb_device *gb_usb_dev = gb_gbphy_get_data(gbphy_dev);
+ struct gb_connection *connection = gb_usb_dev->connection;
+ struct usb_hcd *hcd = gb_usb_device_to_hcd(gb_usb_dev);
+
+ usb_remove_hcd(hcd);
+ gb_connection_disable(connection);
+ gb_connection_destroy(connection);
+ usb_put_hcd(hcd);
+}
+
+static const struct gbphy_device_id gb_usb_id_table[] = {
+ { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_USB) },
+ { },
+};
+MODULE_DEVICE_TABLE(gbphy, gb_usb_id_table);
+
+static struct gbphy_driver usb_driver = {
+ .name = "usb",
+ .probe = gb_usb_probe,
+ .remove = gb_usb_remove,
+ .id_table = gb_usb_id_table,
+};
+
+module_gbphy_driver(usb_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/vibrator.c b/drivers/staging/greybus/vibrator.c
new file mode 100644
index 000000000000..4ba0e168930f
--- /dev/null
+++ b/drivers/staging/greybus/vibrator.c
@@ -0,0 +1,249 @@
+/*
+ * Greybus Vibrator protocol driver.
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ *
+ * Released under the GPLv2 only.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+#include <linux/idr.h>
+#include <linux/pm_runtime.h>
+
+#include "greybus.h"
+
+struct gb_vibrator_device {
+ struct gb_connection *connection;
+ struct device *dev;
+ int minor; /* vibrator minor number */
+ struct delayed_work delayed_work;
+};
+
+/* Greybus Vibrator operation types */
+#define GB_VIBRATOR_TYPE_ON 0x02
+#define GB_VIBRATOR_TYPE_OFF 0x03
+
+static int turn_off(struct gb_vibrator_device *vib)
+{
+ struct gb_bundle *bundle = vib->connection->bundle;
+ int ret;
+
+ ret = gb_operation_sync(vib->connection, GB_VIBRATOR_TYPE_OFF,
+ NULL, 0, NULL, 0);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return ret;
+}
+
+static int turn_on(struct gb_vibrator_device *vib, u16 timeout_ms)
+{
+ struct gb_bundle *bundle = vib->connection->bundle;
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ /* Vibrator was switched ON earlier */
+ if (cancel_delayed_work_sync(&vib->delayed_work))
+ turn_off(vib);
+
+ ret = gb_operation_sync(vib->connection, GB_VIBRATOR_TYPE_ON,
+ NULL, 0, NULL, 0);
+ if (ret) {
+ gb_pm_runtime_put_autosuspend(bundle);
+ return ret;
+ }
+
+ schedule_delayed_work(&vib->delayed_work, msecs_to_jiffies(timeout_ms));
+
+ return 0;
+}
+
+static void gb_vibrator_worker(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct gb_vibrator_device *vib =
+ container_of(delayed_work, struct gb_vibrator_device, delayed_work);
+
+ turn_off(vib);
+}
+
+static ssize_t timeout_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gb_vibrator_device *vib = dev_get_drvdata(dev);
+ unsigned long val;
+ int retval;
+
+ retval = kstrtoul(buf, 10, &val);
+ if (retval < 0) {
+ dev_err(dev, "could not parse timeout value %d\n", retval);
+ return retval;
+ }
+
+ if (val)
+ retval = turn_on(vib, (u16)val);
+ else
+ retval = turn_off(vib);
+ if (retval)
+ return retval;
+
+ return count;
+}
+static DEVICE_ATTR_WO(timeout);
+
+static struct attribute *vibrator_attrs[] = {
+ &dev_attr_timeout.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vibrator);
+
+static struct class vibrator_class = {
+ .name = "vibrator",
+ .owner = THIS_MODULE,
+ .dev_groups = vibrator_groups,
+};
+
+static DEFINE_IDA(minors);
+
+static int gb_vibrator_probe(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id)
+{
+ struct greybus_descriptor_cport *cport_desc;
+ struct gb_connection *connection;
+ struct gb_vibrator_device *vib;
+ struct device *dev;
+ int retval;
+
+ if (bundle->num_cports != 1)
+ return -ENODEV;
+
+ cport_desc = &bundle->cport_desc[0];
+ if (cport_desc->protocol_id != GREYBUS_PROTOCOL_VIBRATOR)
+ return -ENODEV;
+
+ vib = kzalloc(sizeof(*vib), GFP_KERNEL);
+ if (!vib)
+ return -ENOMEM;
+
+ connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
+ NULL);
+ if (IS_ERR(connection)) {
+ retval = PTR_ERR(connection);
+ goto err_free_vib;
+ }
+ gb_connection_set_data(connection, vib);
+
+ vib->connection = connection;
+
+ greybus_set_drvdata(bundle, vib);
+
+ retval = gb_connection_enable(connection);
+ if (retval)
+ goto err_connection_destroy;
+
+ /*
+ * For now we create a device in sysfs for the vibrator, but odds are
+ * there is a "real" device somewhere in the kernel for this, but I
+ * can't find it at the moment...
+ */
+ vib->minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
+ if (vib->minor < 0) {
+ retval = vib->minor;
+ goto err_connection_disable;
+ }
+ dev = device_create(&vibrator_class, &bundle->dev,
+ MKDEV(0, 0), vib, "vibrator%d", vib->minor);
+ if (IS_ERR(dev)) {
+ retval = -EINVAL;
+ goto err_ida_remove;
+ }
+ vib->dev = dev;
+
+ INIT_DELAYED_WORK(&vib->delayed_work, gb_vibrator_worker);
+
+ gb_pm_runtime_put_autosuspend(bundle);
+
+ return 0;
+
+err_ida_remove:
+ ida_simple_remove(&minors, vib->minor);
+err_connection_disable:
+ gb_connection_disable(connection);
+err_connection_destroy:
+ gb_connection_destroy(connection);
+err_free_vib:
+ kfree(vib);
+
+ return retval;
+}
+
+static void gb_vibrator_disconnect(struct gb_bundle *bundle)
+{
+ struct gb_vibrator_device *vib = greybus_get_drvdata(bundle);
+ int ret;
+
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ gb_pm_runtime_get_noresume(bundle);
+
+ if (cancel_delayed_work_sync(&vib->delayed_work))
+ turn_off(vib);
+
+ device_unregister(vib->dev);
+ ida_simple_remove(&minors, vib->minor);
+ gb_connection_disable(vib->connection);
+ gb_connection_destroy(vib->connection);
+ kfree(vib);
+}
+
+static const struct greybus_bundle_id gb_vibrator_id_table[] = {
+ { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_VIBRATOR) },
+ { }
+};
+MODULE_DEVICE_TABLE(greybus, gb_vibrator_id_table);
+
+static struct greybus_driver gb_vibrator_driver = {
+ .name = "vibrator",
+ .probe = gb_vibrator_probe,
+ .disconnect = gb_vibrator_disconnect,
+ .id_table = gb_vibrator_id_table,
+};
+
+static __init int gb_vibrator_init(void)
+{
+ int retval;
+
+ retval = class_register(&vibrator_class);
+ if (retval)
+ return retval;
+
+ retval = greybus_register(&gb_vibrator_driver);
+ if (retval)
+ goto err_class_unregister;
+
+ return 0;
+
+err_class_unregister:
+ class_unregister(&vibrator_class);
+
+ return retval;
+}
+module_init(gb_vibrator_init);
+
+static __exit void gb_vibrator_exit(void)
+{
+ greybus_deregister(&gb_vibrator_driver);
+ class_unregister(&vibrator_class);
+ ida_destroy(&minors);
+}
+module_exit(gb_vibrator_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index a221f261c3d3..8ed4d395be58 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -8,10 +8,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
index 8cc32555dbf3..7b8cc3a25214 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
@@ -7,12 +7,7 @@
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/firmware.h>
diff --git a/drivers/staging/gs_fpgaboot/io.h b/drivers/staging/gs_fpgaboot/io.h
index 7b46ac24b74e..5e839b1fc884 100644
--- a/drivers/staging/gs_fpgaboot/io.h
+++ b/drivers/staging/gs_fpgaboot/io.h
@@ -66,10 +66,8 @@ enum wbus {
bus_2byte = 2,
};
-
#define MAX_WAIT_DONE 10000
-
struct gpiobus {
int ngpio;
void __iomem *r[4];
diff --git a/drivers/staging/i4l/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c
index f0eb8441deed..ad7a0391369f 100644
--- a/drivers/staging/i4l/act2000/act2000_isa.c
+++ b/drivers/staging/i4l/act2000/act2000_isa.c
@@ -134,9 +134,9 @@ act2000_isa_config_irq(act2000_card *card, short irq)
{
int old_irq;
- if (card->flags & ACT2000_FLAGS_IVALID) {
+ if (card->flags & ACT2000_FLAGS_IVALID)
free_irq(card->irq, card);
- }
+
card->flags &= ~ACT2000_FLAGS_IVALID;
outb(ISA_COR_IRQOFF, ISA_PORT_COR);
if (!irq)
@@ -166,7 +166,7 @@ act2000_isa_config_port(act2000_card *card, unsigned short portbase)
release_region(card->port, ISA_REGION);
card->flags &= ~ACT2000_FLAGS_PVALID;
}
- if (request_region(portbase, ACT2000_PORTLEN, card->regname) == NULL)
+ if (!request_region(portbase, ACT2000_PORTLEN, card->regname))
return -EBUSY;
else {
card->port = portbase;
@@ -176,7 +176,7 @@ act2000_isa_config_port(act2000_card *card, unsigned short portbase)
}
/*
- * Release ressources, used by an adaptor.
+ * Release resources, used by an adaptor.
*/
void
act2000_isa_release(act2000_card *card)
@@ -244,7 +244,7 @@ act2000_isa_receive(act2000_card *card)
if (valid) {
card->idat.isa.rcvlen = ((actcapi_msghdr *)&card->idat.isa.rcvhdr)->len;
card->idat.isa.rcvskb = dev_alloc_skb(card->idat.isa.rcvlen);
- if (card->idat.isa.rcvskb == NULL) {
+ if (!card->idat.isa.rcvskb) {
card->idat.isa.rcvignore = 1;
printk(KERN_WARNING
"act2000_isa_receive: no memory\n");
@@ -399,7 +399,6 @@ act2000_isa_download(act2000_card *card, act2000_ddef __user *cb)
unsigned int length;
int l;
int c;
- long timeout;
u_char *b;
u_char __user *p;
u_char *buf;
@@ -417,7 +416,6 @@ act2000_isa_download(act2000_card *card, act2000_ddef __user *cb)
buf = kmalloc(1024, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- timeout = 0;
while (length) {
l = (length > 1024) ? 1024 : length;
c = 0;
diff --git a/drivers/staging/i4l/act2000/capi.c b/drivers/staging/i4l/act2000/capi.c
index 3f66ca20b5e5..62f56294853c 100644
--- a/drivers/staging/i4l/act2000/capi.c
+++ b/drivers/staging/i4l/act2000/capi.c
@@ -113,7 +113,9 @@ actcapi_chkhdr(act2000_card *card, actcapi_msghdr *hdr)
m->hdr.cmd.cmd = c; \
m->hdr.cmd.subcmd = s; \
m->hdr.msgnum = actcapi_nextsmsg(card); \
- } else m = NULL; \
+ } else { \
+ m = NULL; \
+ } \
}
#define ACTCAPI_CHKSKB if (!skb) { \
@@ -547,12 +549,11 @@ static int
actcapi_data_b3_ind(act2000_card *card, struct sk_buff *skb) {
__u16 plci;
__u16 ncci;
- __u16 controller;
__u8 blocknr;
int chan;
actcapi_msg *msg = (actcapi_msg *)skb->data;
- EVAL_NCCI(msg->msg.data_b3_ind.fakencci, plci, controller, ncci);
+ EVAL_NCCI(msg->msg.data_b3_ind.fakencci, plci, ncci);
chan = find_ncci(card, ncci);
if (chan < 0)
return 0;
@@ -617,7 +618,7 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) {
spin_lock_irqsave(&card->lock, flags);
tmp = skb_peek((struct sk_buff_head *)tmp);
spin_unlock_irqrestore(&card->lock, flags);
- if ((tmp == skb) || (tmp == NULL)) {
+ if ((tmp == skb) || !tmp) {
/* reached end of queue */
printk(KERN_WARNING "act2000: handle_ack nothing found!\n");
return 0;
@@ -990,7 +991,8 @@ actcapi_debug_dlpd(actcapi_dlpd *dlpd)
}
#ifdef DEBUG_DUMP_SKB
-static void dump_skb(struct sk_buff *skb) {
+static void dump_skb(struct sk_buff *skb)
+{
char tmp[80];
char *p = skb->data;
char *t = tmp;
diff --git a/drivers/staging/i4l/act2000/capi.h b/drivers/staging/i4l/act2000/capi.h
index 01ccdecd43f7..34884a5efee5 100644
--- a/drivers/staging/i4l/act2000/capi.h
+++ b/drivers/staging/i4l/act2000/capi.h
@@ -114,9 +114,8 @@ typedef struct actcapi_ncpd {
#define MAKE_NCCI(plci, contr, ncci) \
((plci & 0x1f) | ((contr & 0x7) << 5) | ((ncci & 0xff) << 8))
-#define EVAL_NCCI(fakencci, plci, contr, ncci) { \
+#define EVAL_NCCI(fakencci, plci, ncci) { \
plci = fakencci & 0x1f; \
- contr = (fakencci >> 5) & 0x7; \
ncci = (fakencci >> 8) & 0xff; \
}
@@ -128,13 +127,6 @@ typedef struct actcapi_ncpd {
* Bit 5-7 = Controller
* Bit 8-15 = reserved (must be 0)
*/
-#define MAKE_PLCI(plci, contr) \
- ((plci & 0x1f) | ((contr & 0x7) << 5))
-
-#define EVAL_PLCI(fakeplci, plci, contr) { \
- plci = fakeplci & 0x1f; \
- contr = (fakeplci >> 5) & 0x7; \
- }
typedef struct actcapi_msg {
actcapi_msghdr hdr;
diff --git a/drivers/staging/i4l/act2000/module.c b/drivers/staging/i4l/act2000/module.c
index 68073d0da0e3..99c9c0a1c63e 100644
--- a/drivers/staging/i4l/act2000/module.c
+++ b/drivers/staging/i4l/act2000/module.c
@@ -28,7 +28,7 @@ static unsigned short act2000_isa_ports[] =
static act2000_card *cards = (act2000_card *) NULL;
/* Parameters to be set by insmod */
-static int act_bus = 0;
+static int act_bus;
static int act_port = -1; /* -1 = Autoprobe */
static int act_irq = -1;
static char *act_id = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
@@ -289,7 +289,8 @@ act2000_command(act2000_card *card, isdn_ctrl *c)
if (copy_from_user(tmp, arg,
sizeof(tmp)))
return -EFAULT;
- if ((ret = act2000_set_msn(card, tmp)))
+ ret = act2000_set_msn(card, tmp);
+ if (ret)
return ret;
if (card->flags & ACT2000_FLAGS_RUNNING)
return (actcapi_manufacturer_req_msn(card));
diff --git a/drivers/staging/i4l/icn/icn.c b/drivers/staging/i4l/icn/icn.c
index 46d957c34be1..514bfc2c5b53 100644
--- a/drivers/staging/i4l/icn/icn.c
+++ b/drivers/staging/i4l/icn/icn.c
@@ -62,7 +62,8 @@ icn_free_queue(icn_card *card, int channel)
skb_queue_purge(queue);
card->xlen[channel] = 0;
card->sndcount[channel] = 0;
- if ((skb = card->xskb[channel])) {
+ skb = card->xskb[channel];
+ if (skb) {
card->xskb[channel] = NULL;
dev_kfree_skb(skb);
}
@@ -81,12 +82,11 @@ icn_shiftout(unsigned short port,
int firstbit,
int bitcount)
{
-
register u_char s;
register u_char c;
for (s = firstbit, c = bitcount; c > 0; s--, c--)
- OUTB_P((u_char) ((val >> s) & 1) ? 0xff : 0, port);
+ OUTB_P((u_char)((val >> s) & 1) ? 0xff : 0, port);
}
/*
@@ -272,8 +272,10 @@ icn_pollbchan_receive(int channel, icn_card *card)
rbnext;
icn_maprelease_channel(card, mch & 2);
if (!eflag) {
- if ((cnt = card->rcvidx[channel])) {
- if (!(skb = dev_alloc_skb(cnt))) {
+ cnt = card->rcvidx[channel];
+ if (cnt) {
+ skb = dev_alloc_skb(cnt);
+ if (!skb) {
printk(KERN_WARNING "icn: receive out of memory\n");
break;
}
@@ -382,7 +384,7 @@ icn_pollbchan_send(int channel, icn_card *card)
static void
icn_pollbchan(unsigned long data)
{
- icn_card *card = (icn_card *) data;
+ icn_card *card = (icn_card *)data;
unsigned long flags;
if (card->flags & ICN_FLAGS_B1ACTIVE) {
@@ -472,7 +474,6 @@ icn_parse_status(u_char *status, int channel, icn_card *card)
if (card->flags &
((channel) ? ICN_FLAGS_B2ACTIVE : ICN_FLAGS_B1ACTIVE)) {
-
isdn_ctrl ncmd;
card->flags &= ~((channel) ?
@@ -544,7 +545,7 @@ icn_parse_status(u_char *status, int channel, icn_card *card)
break;
case 6:
snprintf(cmd.parm.num, sizeof(cmd.parm.num), "%d",
- (int) simple_strtoul(status + 7, NULL, 16));
+ (int)simple_strtoul(status + 7, NULL, 16));
break;
case 7:
status += 3;
@@ -604,7 +605,7 @@ icn_putmsg(icn_card *card, unsigned char c)
static void
icn_polldchan(unsigned long data)
{
- icn_card *card = (icn_card *) data;
+ icn_card *card = (icn_card *)data;
int mch = card->secondhalf ? 2 : 0;
int avail = 0;
int left;
@@ -656,9 +657,8 @@ icn_polldchan(unsigned long data)
*q = '\0';
strcat(vstr, "000");
vstr[3] = '\0';
- card->fw_rev = (int) simple_strtoul(vstr, NULL, 10);
+ card->fw_rev = (int)simple_strtoul(vstr, NULL, 10);
continue;
-
}
}
} else {
@@ -683,7 +683,7 @@ icn_polldchan(unsigned long data)
card->flags |= ICN_FLAGS_RBTIMER;
del_timer(&card->rb_timer);
card->rb_timer.function = icn_pollbchan;
- card->rb_timer.data = (unsigned long) card;
+ card->rb_timer.data = (unsigned long)card;
card->rb_timer.expires = jiffies + ICN_TIMER_BCREAD;
add_timer(&card->rb_timer);
}
@@ -805,17 +805,12 @@ icn_loadboot(u_char __user *buffer, icn_card *card)
unsigned long flags;
#ifdef BOOT_DEBUG
- printk(KERN_DEBUG "icn_loadboot called, buffaddr=%08lx\n", (ulong) buffer);
+ printk(KERN_DEBUG "icn_loadboot called, buffaddr=%08lx\n", (ulong)buffer);
#endif
- if (!(codebuf = kmalloc(ICN_CODE_STAGE1, GFP_KERNEL))) {
- printk(KERN_WARNING "icn: Could not allocate code buffer\n");
- ret = -ENOMEM;
- goto out;
- }
- if (copy_from_user(codebuf, buffer, ICN_CODE_STAGE1)) {
- ret = -EFAULT;
- goto out_kfree;
- }
+ codebuf = memdup_user(buffer, ICN_CODE_STAGE1);
+ if (IS_ERR(codebuf))
+ return PTR_ERR(codebuf);
+
if (!card->rvalid) {
if (!request_region(card->port, ICN_PORTLEN, card->regname)) {
printk(KERN_WARNING
@@ -878,9 +873,9 @@ icn_loadboot(u_char __user *buffer, icn_card *card)
}
SLEEP(1);
OUTB_P(0xff, ICN_RUN); /* Start Boot-Code */
- if ((ret = icn_check_loader(card->doubleS0 ? 2 : 1))) {
+ ret = icn_check_loader(card->doubleS0 ? 2 : 1);
+ if (ret)
goto out_kfree;
- }
if (!card->doubleS0) {
ret = 0;
goto out_kfree;
@@ -898,7 +893,6 @@ icn_loadboot(u_char __user *buffer, icn_card *card)
out_kfree:
kfree(codebuf);
-out:
return ret;
}
@@ -980,18 +974,17 @@ icn_loadproto(u_char __user *buffer, icn_card *card)
card->secondhalf);
#endif
spin_lock_irqsave(&card->lock, flags);
- init_timer(&card->st_timer);
- card->st_timer.expires = jiffies + ICN_TIMER_DCREAD;
- card->st_timer.function = icn_polldchan;
- card->st_timer.data = (unsigned long) card;
- add_timer(&card->st_timer);
+ setup_timer(&card->st_timer, icn_polldchan,
+ (unsigned long)card);
+ mod_timer(&card->st_timer,
+ jiffies + ICN_TIMER_DCREAD);
card->flags |= ICN_FLAGS_RUNNING;
if (card->doubleS0) {
- init_timer(&card->other->st_timer);
- card->other->st_timer.expires = jiffies + ICN_TIMER_DCREAD;
- card->other->st_timer.function = icn_polldchan;
- card->other->st_timer.data = (unsigned long) card->other;
- add_timer(&card->other->st_timer);
+ setup_timer(&card->other->st_timer,
+ icn_polldchan,
+ (unsigned long)card->other);
+ mod_timer(&card->other->st_timer,
+ jiffies + ICN_TIMER_DCREAD);
card->other->flags |= ICN_FLAGS_RUNNING;
}
spin_unlock_irqrestore(&card->lock, flags);
@@ -1022,7 +1015,8 @@ icn_readstatus(u_char __user *buf, int len, icn_card *card)
/* Put command-strings into the command-queue of the Interface */
static int
-icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
+icn_writecmd(const u_char __user *ubuf, const u_char *kbuf, int len,
+ int user, icn_card *card)
{
int mch = card->secondhalf ? 2 : 0;
int pp;
@@ -1045,10 +1039,10 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
if (count > len)
count = len;
if (user) {
- if (copy_from_user(msg, buf, count))
+ if (copy_from_user(msg, ubuf, count))
return -EFAULT;
} else
- memcpy(msg, buf, count);
+ memcpy(msg, kbuf, count);
spin_lock_irqsave(&dev.devlock, flags);
lastmap_card = dev.mcard;
@@ -1190,28 +1184,28 @@ icn_command(isdn_ctrl *c, icn_card *card)
}
break;
case ICN_IOCTL_GETMMIO:
- return (long) dev.memaddr;
+ return (long)dev.memaddr;
case ICN_IOCTL_SETPORT:
if (a == 0x300 || a == 0x310 || a == 0x320 || a == 0x330
|| a == 0x340 || a == 0x350 || a == 0x360 ||
a == 0x308 || a == 0x318 || a == 0x328 || a == 0x338
|| a == 0x348 || a == 0x358 || a == 0x368) {
- if (card->port != (unsigned short) a) {
- if (!request_region((unsigned short) a, ICN_PORTLEN, "icn-isdn")) {
+ if (card->port != (unsigned short)a) {
+ if (!request_region((unsigned short)a, ICN_PORTLEN, "icn-isdn")) {
printk(KERN_WARNING
"icn: (%s) ports 0x%03x-0x%03x in use.\n",
- CID, (int) a, (int) a + ICN_PORTLEN);
+ CID, (int)a, (int)a + ICN_PORTLEN);
return -EINVAL;
}
- release_region((unsigned short) a, ICN_PORTLEN);
+ release_region((unsigned short)a, ICN_PORTLEN);
icn_stopcard(card);
spin_lock_irqsave(&card->lock, flags);
if (card->rvalid)
release_region(card->port, ICN_PORTLEN);
- card->port = (unsigned short) a;
+ card->port = (unsigned short)a;
card->rvalid = 0;
if (card->doubleS0) {
- card->other->port = (unsigned short) a;
+ card->other->port = (unsigned short)a;
card->other->rvalid = 0;
}
spin_unlock_irqrestore(&card->lock, flags);
@@ -1223,9 +1217,9 @@ icn_command(isdn_ctrl *c, icn_card *card)
return -EINVAL;
break;
case ICN_IOCTL_GETPORT:
- return (int) card->port;
+ return (int)card->port;
case ICN_IOCTL_GETDOUBLE:
- return (int) card->doubleS0;
+ return (int)card->doubleS0;
case ICN_IOCTL_DEBUGVAR:
if (copy_to_user(arg,
&card,
@@ -1246,10 +1240,11 @@ icn_command(isdn_ctrl *c, icn_card *card)
dev.firstload = 0;
}
icn_stopcard(card);
- return (icn_loadboot(arg, card));
+ return icn_loadboot(arg, card);
case ICN_IOCTL_LOADPROTO:
icn_stopcard(card);
- if ((i = (icn_loadproto(arg, card))))
+ i = (icn_loadproto(arg, card));
+ if (i)
return i;
if (card->doubleS0)
i = icn_loadproto(arg + ICN_CODE_STAGE2, card->other);
@@ -1262,19 +1257,20 @@ icn_command(isdn_ctrl *c, icn_card *card)
arg,
sizeof(cdef)))
return -EFAULT;
- return (icn_addcard(cdef.port, cdef.id1, cdef.id2));
+ return icn_addcard(cdef.port, cdef.id1, cdef.id2);
break;
case ICN_IOCTL_LEASEDCFG:
if (a) {
if (!card->leased) {
card->leased = 1;
- while (card->ptype == ISDN_PTYPE_UNKNOWN) {
+ while (card->ptype == ISDN_PTYPE_UNKNOWN)
msleep_interruptible(ICN_BOOT_TIMEOUT1);
- }
msleep_interruptible(ICN_BOOT_TIMEOUT1);
sprintf(cbuf, "00;FV2ON\n01;EAZ%c\n02;EAZ%c\n",
(a & 1) ? '1' : 'C', (a & 2) ? '2' : 'C');
- i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
+ i = icn_writecmd(NULL, cbuf,
+ strlen(cbuf),
+ 0, card);
printk(KERN_INFO
"icn: (%s) Leased-line mode enabled\n",
CID);
@@ -1287,7 +1283,9 @@ icn_command(isdn_ctrl *c, icn_card *card)
if (card->leased) {
card->leased = 0;
sprintf(cbuf, "00;FV2OFF\n");
- i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
+ i = icn_writecmd(NULL, cbuf,
+ strlen(cbuf),
+ 0, card);
printk(KERN_INFO
"icn: (%s) Leased-line mode disabled\n",
CID);
@@ -1321,10 +1319,10 @@ icn_command(isdn_ctrl *c, icn_card *card)
/* Normal Dial */
strcpy(dcode, "CAL");
snprintf(cbuf, sizeof(cbuf),
- "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
+ "%02d;D%s_R%s,%02d,%02d,%s\n", (int)(a + 1),
dcode, p, c->parm.setup.si1,
c->parm.setup.si2, c->parm.setup.eazmsn);
- i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
+ i = icn_writecmd(NULL, cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_ACCEPTD:
@@ -1335,16 +1333,18 @@ icn_command(isdn_ctrl *c, icn_card *card)
if (card->fw_rev >= 300) {
switch (card->l2_proto[a - 1]) {
case ISDN_PROTO_L2_X75I:
- sprintf(cbuf, "%02d;BX75\n", (int) a);
+ sprintf(cbuf, "%02d;BX75\n", (int)a);
break;
case ISDN_PROTO_L2_HDLC:
- sprintf(cbuf, "%02d;BTRA\n", (int) a);
+ sprintf(cbuf, "%02d;BTRA\n", (int)a);
break;
}
- i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
+ i = icn_writecmd(NULL, cbuf,
+ strlen(cbuf), 0,
+ card);
}
- sprintf(cbuf, "%02d;DCON_R\n", (int) a);
- i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
+ sprintf(cbuf, "%02d;DCON_R\n", (int)a);
+ i = icn_writecmd(NULL, cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_ACCEPTB:
@@ -1355,14 +1355,14 @@ icn_command(isdn_ctrl *c, icn_card *card)
if (card->fw_rev >= 300)
switch (card->l2_proto[a - 1]) {
case ISDN_PROTO_L2_X75I:
- sprintf(cbuf, "%02d;BCON_R,BX75\n", (int) a);
+ sprintf(cbuf, "%02d;BCON_R,BX75\n", (int)a);
break;
case ISDN_PROTO_L2_HDLC:
- sprintf(cbuf, "%02d;BCON_R,BTRA\n", (int) a);
+ sprintf(cbuf, "%02d;BCON_R,BTRA\n", (int)a);
break;
} else
- sprintf(cbuf, "%02d;BCON_R\n", (int) a);
- i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
+ sprintf(cbuf, "%02d;BCON_R\n", (int)a);
+ i = icn_writecmd(NULL, cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_HANGUP:
@@ -1370,8 +1370,8 @@ icn_command(isdn_ctrl *c, icn_card *card)
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
- sprintf(cbuf, "%02d;BDIS_R\n%02d;DDIS_R\n", (int) a, (int) a);
- i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
+ sprintf(cbuf, "%02d;BDIS_R\n%02d;DDIS_R\n", (int)a, (int)a);
+ i = icn_writecmd(NULL, cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_SETEAZ:
@@ -1382,12 +1382,12 @@ icn_command(isdn_ctrl *c, icn_card *card)
if (c->arg < ICN_BCH) {
a = c->arg + 1;
if (card->ptype == ISDN_PTYPE_EURO) {
- sprintf(cbuf, "%02d;MS%s%s\n", (int) a,
+ sprintf(cbuf, "%02d;MS%s%s\n", (int)a,
c->parm.num[0] ? "N" : "ALL", c->parm.num);
} else
- sprintf(cbuf, "%02d;EAZ%s\n", (int) a,
+ sprintf(cbuf, "%02d;EAZ%s\n", (int)a,
c->parm.num[0] ? (char *)(c->parm.num) : "0123456789");
- i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
+ i = icn_writecmd(NULL, cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_CLREAZ:
@@ -1398,10 +1398,10 @@ icn_command(isdn_ctrl *c, icn_card *card)
if (c->arg < ICN_BCH) {
a = c->arg + 1;
if (card->ptype == ISDN_PTYPE_EURO)
- sprintf(cbuf, "%02d;MSNC\n", (int) a);
+ sprintf(cbuf, "%02d;MSNC\n", (int)a);
else
- sprintf(cbuf, "%02d;EAZC\n", (int) a);
- i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
+ sprintf(cbuf, "%02d;EAZC\n", (int)a);
+ i = icn_writecmd(NULL, cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_SETL2:
@@ -1411,15 +1411,15 @@ icn_command(isdn_ctrl *c, icn_card *card)
a = c->arg;
switch (a >> 8) {
case ISDN_PROTO_L2_X75I:
- sprintf(cbuf, "%02d;BX75\n", (int) (a & 255) + 1);
+ sprintf(cbuf, "%02d;BX75\n", (int)(a & 255) + 1);
break;
case ISDN_PROTO_L2_HDLC:
- sprintf(cbuf, "%02d;BTRA\n", (int) (a & 255) + 1);
+ sprintf(cbuf, "%02d;BTRA\n", (int)(a & 255) + 1);
break;
default:
return -EINVAL;
}
- i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
+ i = icn_writecmd(NULL, cbuf, strlen(cbuf), 0, card);
card->l2_proto[a & 255] = (a >> 8);
}
break;
@@ -1446,7 +1446,7 @@ icn_findcard(int driverid)
return p;
p = p->next;
}
- return (icn_card *) 0;
+ return (icn_card *)0;
}
/*
@@ -1458,7 +1458,7 @@ if_command(isdn_ctrl *c)
icn_card *card = icn_findcard(c->driver);
if (card)
- return (icn_command(c, card));
+ return icn_command(c, card);
printk(KERN_ERR
"icn: if_command %d called with invalid driverId %d!\n",
c->command, c->driver);
@@ -1473,7 +1473,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
if (card) {
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
- return (icn_writecmd(buf, len, 1, card));
+ return icn_writecmd(buf, NULL, len, 1, card);
}
printk(KERN_ERR
"icn: if_writecmd called with invalid driverId!\n");
@@ -1488,7 +1488,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
if (card) {
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
- return (icn_readstatus(buf, len, card));
+ return icn_readstatus(buf, len, card);
}
printk(KERN_ERR
"icn: if_readstatus called with invalid driverId!\n");
@@ -1503,7 +1503,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
if (card) {
if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
- return (icn_sendbuf(channel, ack, skb, card));
+ return icn_sendbuf(channel, ack, skb, card);
}
printk(KERN_ERR
"icn: if_sendbuf called with invalid driverId!\n");
@@ -1520,10 +1520,11 @@ icn_initcard(int port, char *id)
icn_card *card;
int i;
- if (!(card = kzalloc(sizeof(icn_card), GFP_KERNEL))) {
+ card = kzalloc(sizeof(icn_card), GFP_KERNEL);
+ if (!card) {
printk(KERN_WARNING
"icn: (%s) Could not allocate card-struct.\n", id);
- return (icn_card *) 0;
+ return (icn_card *)0;
}
spin_lock_init(&card->lock);
card->port = port;
@@ -1555,7 +1556,7 @@ icn_initcard(int port, char *id)
printk(KERN_WARNING
"icn: Unable to register %s\n", id);
kfree(card);
- return (icn_card *) 0;
+ return (icn_card *)0;
}
card->myid = card->interface.channels;
sprintf(card->regname, "icn-isdn (%s)", card->interface.id);
@@ -1568,16 +1569,17 @@ icn_addcard(int port, char *id1, char *id2)
icn_card *card;
icn_card *card2;
- if (!(card = icn_initcard(port, id1))) {
+ card = icn_initcard(port, id1);
+ if (!card)
return -EIO;
- }
if (!strlen(id2)) {
printk(KERN_INFO
"icn: (%s) ICN-2B, port 0x%x added\n",
card->interface.id, port);
return 0;
}
- if (!(card2 = icn_initcard(port, id2))) {
+ card2 = icn_initcard(port, id2);
+ if (!card2) {
printk(KERN_INFO
"icn: (%s) half ICN-4B, port 0x%x added\n", id2, port);
return 0;
@@ -1611,13 +1613,14 @@ icn_setup(char *line)
if (str && *str) {
strlcpy(sid, str, sizeof(sid));
icn_id = sid;
- if ((p = strchr(sid, ','))) {
+ p = strchr(sid, ',');
+ if (p) {
*p++ = 0;
strcpy(sid2, p);
icn_id2 = sid2;
}
}
- return (1);
+ return 1;
}
__setup("icn=", icn_setup);
#endif /* MODULE */
@@ -1634,7 +1637,8 @@ static int __init icn_init(void)
dev.firstload = 1;
spin_lock_init(&dev.devlock);
- if ((p = strchr(revision, ':'))) {
+ p = strchr(revision, ':');
+ if (p) {
strncpy(rev, p + 1, 20);
rev[20] = '\0';
p = strchr(rev, '$');
@@ -1644,7 +1648,7 @@ static int __init icn_init(void)
strcpy(rev, " ??? ");
printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev,
dev.memaddr);
- return (icn_addcard(portbase, icn_id, icn_id2));
+ return icn_addcard(portbase, icn_id, icn_id2);
}
static void __exit icn_exit(void)
diff --git a/drivers/staging/i4l/pcbit/capi.c b/drivers/staging/i4l/pcbit/capi.c
index 4e3cbf857d60..373f90feda5a 100644
--- a/drivers/staging/i4l/pcbit/capi.c
+++ b/drivers/staging/i4l/pcbit/capi.c
@@ -92,9 +92,7 @@ int capi_conn_req(const char *calledPN, struct sk_buff **skb, int proto)
*(skb_put(*skb, 1)) = 0x80; /* Speech */
*(skb_put(*skb, 1)) = 0x10; /* Circuit Mode */
*(skb_put(*skb, 1)) = 0x23; /* A-law */
- }
- else
- {
+ } else {
/* Bearer Capability - Mandatory*/
*(skb_put(*skb, 1)) = 2; /* BC0.Length */
*(skb_put(*skb, 1)) = 0x88; /* Digital Information */
diff --git a/drivers/staging/i4l/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c
index c5270e229efb..d417df5efb5f 100644
--- a/drivers/staging/i4l/pcbit/drv.c
+++ b/drivers/staging/i4l/pcbit/drv.c
@@ -359,11 +359,9 @@ static int pcbit_xmit(int driver, int chnum, int ack, struct sk_buff *skb)
*/
#ifdef BLOCK_TIMER
if (chan->block_timer.function == NULL) {
- init_timer(&chan->block_timer);
- chan->block_timer.function = &pcbit_block_timer;
- chan->block_timer.data = (long) chan;
- chan->block_timer.expires = jiffies + 1 * HZ;
- add_timer(&chan->block_timer);
+ setup_timer(&chan->block_timer, &pcbit_block_timer,
+ (long)chan);
+ mod_timer(&chan->block_timer, jiffies + 1 * HZ);
}
#endif
return 0;
@@ -804,11 +802,7 @@ static int set_protocol_running(struct pcbit_dev *dev)
{
isdn_ctrl ctl;
- init_timer(&dev->set_running_timer);
-
- dev->set_running_timer.function = &set_running_timeout;
- dev->set_running_timer.data = (ulong) dev;
- dev->set_running_timer.expires = jiffies + SET_RUN_TIMEOUT;
+ setup_timer(&dev->set_running_timer, &set_running_timeout, (ulong)dev);
/* kick it */
@@ -817,7 +811,7 @@ static int set_protocol_running(struct pcbit_dev *dev)
writeb((0x80U | ((dev->rcv_seq & 0x07) << 3) | (dev->send_seq & 0x07)),
dev->sh_mem + BANK4);
- add_timer(&dev->set_running_timer);
+ mod_timer(&dev->set_running_timer, jiffies + SET_RUN_TIMEOUT);
wait_event(dev->set_running_wq, dev->l2_state == L2_RUNNING ||
dev->l2_state == L2_DOWN);
diff --git a/drivers/staging/i4l/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c
index e72c16420712..6d291d548423 100644
--- a/drivers/staging/i4l/pcbit/edss1.c
+++ b/drivers/staging/i4l/pcbit/edss1.c
@@ -298,11 +298,8 @@ void pcbit_fsm_event(struct pcbit_dev *dev, struct pcbit_chan *chan,
break;
if (tentry->init != 0xff) {
- init_timer(&chan->fsm_timer);
- chan->fsm_timer.function = &pcbit_fsm_timer;
- chan->fsm_timer.data = (ulong) chan;
- chan->fsm_timer.expires = jiffies + tentry->timeout * HZ;
- add_timer(&chan->fsm_timer);
+ setup_timer(&chan->fsm_timer, &pcbit_fsm_timer, (ulong)chan);
+ mod_timer(&chan->fsm_timer, jiffies + tentry->timeout * HZ);
}
spin_unlock_irqrestore(&dev->lock, flags);
diff --git a/drivers/staging/i4l/pcbit/layer2.c b/drivers/staging/i4l/pcbit/layer2.c
index 46e1240ae074..a136c72547e5 100644
--- a/drivers/staging/i4l/pcbit/layer2.c
+++ b/drivers/staging/i4l/pcbit/layer2.c
@@ -645,11 +645,9 @@ pcbit_l2_error(struct pcbit_dev *dev)
dev->l2_state = L2_DOWN;
- init_timer(&dev->error_recover_timer);
- dev->error_recover_timer.function = &pcbit_l2_err_recover;
- dev->error_recover_timer.data = (ulong) dev;
- dev->error_recover_timer.expires = jiffies + ERRTIME;
- add_timer(&dev->error_recover_timer);
+ setup_timer(&dev->error_recover_timer, &pcbit_l2_err_recover,
+ (ulong)dev);
+ mod_timer(&dev->error_recover_timer, jiffies + ERRTIME);
}
}
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
index 9c8a9587df7d..4dcc8575cbe3 100644
--- a/drivers/staging/iio/accel/sca3000.h
+++ b/drivers/staging/iio/accel/sca3000.h
@@ -113,6 +113,7 @@
#define SCA3000_OUT_CTRL_BUF_X_EN 0x10
#define SCA3000_OUT_CTRL_BUF_Y_EN 0x08
#define SCA3000_OUT_CTRL_BUF_Z_EN 0x04
+#define SCA3000_OUT_CTRL_BUF_DIV_MASK 0x03
#define SCA3000_OUT_CTRL_BUF_DIV_4 0x02
#define SCA3000_OUT_CTRL_BUF_DIV_2 0x01
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index b5625f5d5e0e..d626125d7af9 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -402,6 +402,7 @@ static const struct iio_event_spec sca3000_event = {
.channel2 = mod, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
.address = index, \
.scan_index = index, \
.scan_type = { \
@@ -412,7 +413,7 @@ static const struct iio_event_spec sca3000_event = {
}, \
.event_spec = &sca3000_event, \
.num_event_specs = 1, \
- }
+ }
static const struct iio_chan_spec sca3000_channels[] = {
SCA3000_CHAN(0, IIO_MOD_X),
@@ -443,6 +444,97 @@ static u8 sca3000_addresses[3][3] = {
SCA3000_MD_CTRL_OR_Z},
};
+/**
+ * __sca3000_get_base_freq() obtain mode specific base frequency
+ *
+ * lock must be held
+ **/
+static inline int __sca3000_get_base_freq(struct sca3000_state *st,
+ const struct sca3000_chip_info *info,
+ int *base_freq)
+{
+ int ret;
+
+ ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
+ if (ret)
+ goto error_ret;
+ switch (0x03 & st->rx[0]) {
+ case SCA3000_MEAS_MODE_NORMAL:
+ *base_freq = info->measurement_mode_freq;
+ break;
+ case SCA3000_MEAS_MODE_OP_1:
+ *base_freq = info->option_mode_1_freq;
+ break;
+ case SCA3000_MEAS_MODE_OP_2:
+ *base_freq = info->option_mode_2_freq;
+ break;
+ }
+error_ret:
+ return ret;
+}
+
+/**
+ * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ *
+ * lock must be held
+ **/
+static int read_raw_samp_freq(struct sca3000_state *st, int *val)
+{
+ int ret;
+
+ ret = __sca3000_get_base_freq(st, st->info, val);
+ if (ret)
+ return ret;
+
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ return ret;
+
+ if (*val > 0) {
+ ret &= SCA3000_OUT_CTRL_BUF_DIV_MASK;
+ switch (ret) {
+ case SCA3000_OUT_CTRL_BUF_DIV_2:
+ *val /= 2;
+ break;
+ case SCA3000_OUT_CTRL_BUF_DIV_4:
+ *val /= 4;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ *
+ * lock must be held
+ **/
+static int write_raw_samp_freq(struct sca3000_state *st, int val)
+{
+ int ret, base_freq, ctrlval;
+
+ ret = __sca3000_get_base_freq(st, st->info, &base_freq);
+ if (ret)
+ return ret;
+
+ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ctrlval = ret & ~SCA3000_OUT_CTRL_BUF_DIV_MASK;
+
+ if (val == base_freq / 2)
+ ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_2;
+ if (val == base_freq / 4)
+ ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_4;
+ else if (val != base_freq)
+ return -EINVAL;
+
+ return sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
+ ctrlval);
+}
+
static int sca3000_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
@@ -495,9 +587,36 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
*val = -214;
*val2 = 600000;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&st->lock);
+ ret = read_raw_samp_freq(st, val);
+ mutex_unlock(&st->lock);
+ return ret ? ret : IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sca3000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct sca3000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&st->lock);
+ ret = write_raw_samp_freq(st, val);
+ mutex_unlock(&st->lock);
+ return ret;
default:
return -EINVAL;
}
+
+ return ret;
}
/**
@@ -548,133 +667,12 @@ error_ret:
return ret;
}
-/**
- * __sca3000_get_base_freq() obtain mode specific base frequency
- *
- * lock must be held
- **/
-static inline int __sca3000_get_base_freq(struct sca3000_state *st,
- const struct sca3000_chip_info *info,
- int *base_freq)
-{
- int ret;
-
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
- if (ret)
- goto error_ret;
- switch (0x03 & st->rx[0]) {
- case SCA3000_MEAS_MODE_NORMAL:
- *base_freq = info->measurement_mode_freq;
- break;
- case SCA3000_MEAS_MODE_OP_1:
- *base_freq = info->option_mode_1_freq;
- break;
- case SCA3000_MEAS_MODE_OP_2:
- *base_freq = info->option_mode_2_freq;
- break;
- }
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_read_frequency() sysfs interface to get the current frequency
- **/
-static ssize_t sca3000_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret, len = 0, base_freq = 0, val;
-
- mutex_lock(&st->lock);
- ret = __sca3000_get_base_freq(st, st->info, &base_freq);
- if (ret)
- goto error_ret_mut;
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
- mutex_unlock(&st->lock);
- if (ret < 0)
- goto error_ret;
- val = ret;
- if (base_freq > 0)
- switch (val & 0x03) {
- case 0x00:
- case 0x03:
- len = sprintf(buf, "%d\n", base_freq);
- break;
- case 0x01:
- len = sprintf(buf, "%d\n", base_freq / 2);
- break;
- case 0x02:
- len = sprintf(buf, "%d\n", base_freq / 4);
- break;
- }
-
- return len;
-error_ret_mut:
- mutex_unlock(&st->lock);
-error_ret:
- return ret;
-}
-
-/**
- * sca3000_set_frequency() sysfs interface to set the current frequency
- **/
-static ssize_t sca3000_set_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret, base_freq = 0;
- int ctrlval;
- int val;
-
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- mutex_lock(&st->lock);
- /* What mode are we in? */
- ret = __sca3000_get_base_freq(st, st->info, &base_freq);
- if (ret)
- goto error_free_lock;
-
- ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
- if (ret < 0)
- goto error_free_lock;
- ctrlval = ret;
- /* clear the bits */
- ctrlval &= ~0x03;
-
- if (val == base_freq / 2) {
- ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_2;
- } else if (val == base_freq / 4) {
- ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_4;
- } else if (val != base_freq) {
- ret = -EINVAL;
- goto error_free_lock;
- }
- ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
- ctrlval);
-error_free_lock:
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
/*
* Should only really be registered if ring buffer support is compiled in.
* Does no harm however and doing it right would add a fair bit of complexity
*/
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- sca3000_read_frequency,
- sca3000_set_frequency);
-
/**
* sca3000_read_thresh() - query of a threshold
**/
@@ -751,7 +749,6 @@ static struct attribute *sca3000_attributes[] = {
&iio_dev_attr_measurement_mode_available.dev_attr.attr,
&iio_dev_attr_measurement_mode.dev_attr.attr,
&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
NULL,
};
@@ -1086,6 +1083,7 @@ error_ret:
static const struct iio_info sca3000_info = {
.attrs = &sca3000_attribute_group,
.read_raw = &sca3000_read_raw,
+ .write_raw = &sca3000_write_raw,
.event_attrs = &sca3000_event_attribute_group,
.read_event_value = &sca3000_read_thresh,
.write_event_value = &sca3000_write_thresh,
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 2177f1dd2b5d..b460dda7eb65 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -478,7 +478,7 @@ static ssize_t ad7280_store_balance_timer(struct device *dev,
static struct attribute *ad7280_attributes[AD7280A_MAX_CHAIN *
AD7280A_CELLS_PER_DEV * 2 + 1];
-static struct attribute_group ad7280_attrs_group = {
+static const struct attribute_group ad7280_attrs_group = {
.attrs = ad7280_attributes,
};
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 24c348d2f5bb..5eecf1cb1028 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -156,8 +156,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
},
};
-static int ad5933_i2c_write(struct i2c_client *client,
- u8 reg, u8 len, u8 *data)
+static int ad5933_i2c_write(struct i2c_client *client, u8 reg, u8 len, u8 *data)
{
int ret;
@@ -171,8 +170,7 @@ static int ad5933_i2c_write(struct i2c_client *client,
return 0;
}
-static int ad5933_i2c_read(struct i2c_client *client,
- u8 reg, u8 len, u8 *data)
+static int ad5933_i2c_read(struct i2c_client *client, u8 reg, u8 len, u8 *data)
{
int ret;
@@ -269,7 +267,8 @@ static int ad5933_setup(struct ad5933_state *st)
dat = cpu_to_be16(st->settling_cycles);
ret = ad5933_i2c_write(st->client,
- AD5933_REG_SETTLING_CYCLES, 2, (u8 *)&dat);
+ AD5933_REG_SETTLING_CYCLES,
+ 2, (u8 *)&dat);
if (ret < 0)
return ret;
@@ -294,8 +293,8 @@ static void ad5933_calc_out_ranges(struct ad5933_state *st)
*/
static ssize_t ad5933_show_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5933_state *st = iio_priv(indio_dev);
@@ -322,9 +321,9 @@ static ssize_t ad5933_show_frequency(struct device *dev,
}
static ssize_t ad5933_store_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5933_state *st = iio_priv(indio_dev);
@@ -357,8 +356,8 @@ static IIO_DEVICE_ATTR(out_voltage0_freq_increment, S_IRUGO | S_IWUSR,
AD5933_REG_FREQ_INC);
static ssize_t ad5933_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5933_state *st = iio_priv(indio_dev);
@@ -399,9 +398,9 @@ static ssize_t ad5933_show(struct device *dev,
}
static ssize_t ad5933_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5933_state *st = iio_priv(indio_dev);
@@ -451,7 +450,8 @@ static ssize_t ad5933_store(struct device *dev,
dat = cpu_to_be16(val);
ret = ad5933_i2c_write(st->client,
- AD5933_REG_SETTLING_CYCLES, 2, (u8 *)&dat);
+ AD5933_REG_SETTLING_CYCLES,
+ 2, (u8 *)&dat);
break;
case AD5933_FREQ_POINTS:
val = clamp(val, (u16)0, (u16)511);
@@ -545,8 +545,8 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
goto out;
ret = ad5933_i2c_read(st->client,
- AD5933_REG_TEMP_DATA, 2,
- (u8 *)&dat);
+ AD5933_REG_TEMP_DATA,
+ 2, (u8 *)&dat);
if (ret < 0)
goto out;
mutex_unlock(&indio_dev->mlock);
@@ -705,7 +705,7 @@ static void ad5933_work(struct work_struct *work)
}
static int ad5933_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
int ret, voltage_uv = 0;
struct ad5933_platform_data *pdata = dev_get_platdata(&client->dev);
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 76d9f74e7dcb..a767a43c995c 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -15,10 +15,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/module.h>
@@ -32,25 +28,25 @@
#include <linux/iio/sysfs.h>
#include <linux/acpi.h>
-#define CONVERSION_TIME_MS 100
+#define ISL29018_CONV_TIME_MS 100
#define ISL29018_REG_ADD_COMMAND1 0x00
-#define COMMMAND1_OPMODE_SHIFT 5
-#define COMMMAND1_OPMODE_MASK (7 << COMMMAND1_OPMODE_SHIFT)
-#define COMMMAND1_OPMODE_POWER_DOWN 0
-#define COMMMAND1_OPMODE_ALS_ONCE 1
-#define COMMMAND1_OPMODE_IR_ONCE 2
-#define COMMMAND1_OPMODE_PROX_ONCE 3
+#define ISL29018_CMD1_OPMODE_SHIFT 5
+#define ISL29018_CMD1_OPMODE_MASK (7 << ISL29018_CMD1_OPMODE_SHIFT)
+#define ISL29018_CMD1_OPMODE_POWER_DOWN 0
+#define ISL29018_CMD1_OPMODE_ALS_ONCE 1
+#define ISL29018_CMD1_OPMODE_IR_ONCE 2
+#define ISL29018_CMD1_OPMODE_PROX_ONCE 3
-#define ISL29018_REG_ADD_COMMANDII 0x01
-#define COMMANDII_RESOLUTION_SHIFT 2
-#define COMMANDII_RESOLUTION_MASK (0x3 << COMMANDII_RESOLUTION_SHIFT)
+#define ISL29018_REG_ADD_COMMAND2 0x01
+#define ISL29018_CMD2_RESOLUTION_SHIFT 2
+#define ISL29018_CMD2_RESOLUTION_MASK (0x3 << ISL29018_CMD2_RESOLUTION_SHIFT)
-#define COMMANDII_RANGE_SHIFT 0
-#define COMMANDII_RANGE_MASK (0x3 << COMMANDII_RANGE_SHIFT)
+#define ISL29018_CMD2_RANGE_SHIFT 0
+#define ISL29018_CMD2_RANGE_MASK (0x3 << ISL29018_CMD2_RANGE_SHIFT)
-#define COMMANDII_SCHEME_SHIFT 7
-#define COMMANDII_SCHEME_MASK (0x1 << COMMANDII_SCHEME_SHIFT)
+#define ISL29018_CMD2_SCHEME_SHIFT 7
+#define ISL29018_CMD2_SCHEME_MASK (0x1 << ISL29018_CMD2_SCHEME_SHIFT)
#define ISL29018_REG_ADD_DATA_LSB 0x02
#define ISL29018_REG_ADD_DATA_MSB 0x03
@@ -127,13 +123,13 @@ static int isl29018_set_integration_time(struct isl29018_chip *chip,
if (i >= ARRAY_SIZE(isl29018_int_utimes[chip->type]))
return -EINVAL;
- ret = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII,
- COMMANDII_RESOLUTION_MASK,
- i << COMMANDII_RESOLUTION_SHIFT);
+ ret = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMAND2,
+ ISL29018_CMD2_RESOLUTION_MASK,
+ i << ISL29018_CMD2_RESOLUTION_SHIFT);
if (ret < 0)
return ret;
- /* keep the same range when integration time changes */
+ /* Keep the same range when integration time changes */
int_time = chip->int_time;
for (i = 0; i < ARRAY_SIZE(isl29018_scales[int_time]); ++i) {
if (chip->scale.scale == isl29018_scales[int_time][i].scale &&
@@ -163,9 +159,9 @@ static int isl29018_set_scale(struct isl29018_chip *chip, int scale, int uscale)
if (i >= ARRAY_SIZE(isl29018_scales[chip->int_time]))
return -EINVAL;
- ret = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII,
- COMMANDII_RANGE_MASK,
- i << COMMANDII_RANGE_SHIFT);
+ ret = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMAND2,
+ ISL29018_CMD2_RANGE_MASK,
+ i << ISL29018_CMD2_RANGE_SHIFT);
if (ret < 0)
return ret;
@@ -183,13 +179,13 @@ static int isl29018_read_sensor_input(struct isl29018_chip *chip, int mode)
/* Set mode */
status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1,
- mode << COMMMAND1_OPMODE_SHIFT);
+ mode << ISL29018_CMD1_OPMODE_SHIFT);
if (status) {
dev_err(dev,
"Error in setting operating mode err %d\n", status);
return status;
}
- msleep(CONVERSION_TIME_MS);
+ msleep(ISL29018_CONV_TIME_MS);
status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_LSB, &lsb);
if (status < 0) {
dev_err(dev,
@@ -213,8 +209,8 @@ static int isl29018_read_lux(struct isl29018_chip *chip, int *lux)
int lux_data;
unsigned int data_x_range;
- lux_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_ALS_ONCE);
-
+ lux_data = isl29018_read_sensor_input(chip,
+ ISL29018_CMD1_OPMODE_ALS_ONCE);
if (lux_data < 0)
return lux_data;
@@ -230,8 +226,8 @@ static int isl29018_read_ir(struct isl29018_chip *chip, int *ir)
{
int ir_data;
- ir_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_IR_ONCE);
-
+ ir_data = isl29018_read_sensor_input(chip,
+ ISL29018_CMD1_OPMODE_IR_ONCE);
if (ir_data < 0)
return ir_data;
@@ -249,16 +245,16 @@ static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
struct device *dev = regmap_get_device(chip->regmap);
/* Do proximity sensing with required scheme */
- status = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII,
- COMMANDII_SCHEME_MASK,
- scheme << COMMANDII_SCHEME_SHIFT);
+ status = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMAND2,
+ ISL29018_CMD2_SCHEME_MASK,
+ scheme << ISL29018_CMD2_SCHEME_SHIFT);
if (status) {
dev_err(dev, "Error in setting operating mode\n");
return status;
}
prox_data = isl29018_read_sensor_input(chip,
- COMMMAND1_OPMODE_PROX_ONCE);
+ ISL29018_CMD1_OPMODE_PROX_ONCE);
if (prox_data < 0)
return prox_data;
@@ -267,8 +263,8 @@ static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
return 0;
}
- ir_data = isl29018_read_sensor_input(chip, COMMMAND1_OPMODE_IR_ONCE);
-
+ ir_data = isl29018_read_sensor_input(chip,
+ ISL29018_CMD1_OPMODE_IR_ONCE);
if (ir_data < 0)
return ir_data;
@@ -280,7 +276,7 @@ static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
return 0;
}
-static ssize_t show_scale_available(struct device *dev,
+static ssize_t isl29018_show_scale_available(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -297,7 +293,7 @@ static ssize_t show_scale_available(struct device *dev,
return len;
}
-static ssize_t show_int_time_available(struct device *dev,
+static ssize_t isl29018_show_int_time_available(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -313,8 +309,7 @@ static ssize_t show_int_time_available(struct device *dev,
return len;
}
-/* proximity scheme */
-static ssize_t show_prox_infrared_suppression(struct device *dev,
+static ssize_t isl29018_show_prox_infrared_suppression(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -322,13 +317,13 @@ static ssize_t show_prox_infrared_suppression(struct device *dev,
struct isl29018_chip *chip = iio_priv(indio_dev);
/*
- * return the "proximity scheme" i.e. if the chip does on chip
+ * Return the "proximity scheme" i.e. if the chip does on chip
* infrared suppression (1 means perform on chip suppression)
*/
return sprintf(buf, "%d\n", chip->prox_scheme);
}
-static ssize_t store_prox_infrared_suppression(struct device *dev,
+static ssize_t isl29018_store_prox_infrared_suppression(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -338,13 +333,11 @@ static ssize_t store_prox_infrared_suppression(struct device *dev,
if (kstrtoint(buf, 10, &val))
return -EINVAL;
- if (!(val == 0 || val == 1)) {
- dev_err(dev, "The mode is not supported\n");
+ if (!(val == 0 || val == 1))
return -EINVAL;
- }
/*
- * get the "proximity scheme" i.e. if the chip does on chip
+ * Get the "proximity scheme" i.e. if the chip does on chip
* infrared suppression (1 means perform on chip suppression)
*/
mutex_lock(&chip->lock);
@@ -354,7 +347,6 @@ static ssize_t store_prox_infrared_suppression(struct device *dev,
return count;
}
-/* Channel IO */
static int isl29018_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
@@ -491,13 +483,13 @@ static const struct iio_chan_spec isl29023_channels[] = {
};
static IIO_DEVICE_ATTR(in_illuminance_integration_time_available, S_IRUGO,
- show_int_time_available, NULL, 0);
+ isl29018_show_int_time_available, NULL, 0);
static IIO_DEVICE_ATTR(in_illuminance_scale_available, S_IRUGO,
- show_scale_available, NULL, 0);
+ isl29018_show_scale_available, NULL, 0);
static IIO_DEVICE_ATTR(proximity_on_chip_ambient_infrared_suppression,
S_IRUGO | S_IWUSR,
- show_prox_infrared_suppression,
- store_prox_infrared_suppression, 0);
+ isl29018_show_prox_infrared_suppression,
+ isl29018_store_prox_infrared_suppression, 0);
#define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
@@ -541,7 +533,7 @@ static int isl29035_detect(struct isl29018_chip *chip)
if (id != ISL29035_DEVICE_ID)
return -ENODEV;
- /* clear out brownout bit */
+ /* Clear brownout bit */
return regmap_update_bits(chip->regmap, ISL29035_REG_DEVICE_ID,
ISL29035_BOUT_MASK, 0);
}
@@ -574,7 +566,7 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
* conversions, clear the test registers, and then rewrite all
* registers to the desired values.
* ...
- * FOR ISL29011, ISL29018, ISL29021, ISL29023
+ * For ISL29011, ISL29018, ISL29021, ISL29023
* 1. Write 0x00 to register 0x08 (TEST)
* 2. Write 0x00 to register 0x00 (CMD1)
* 3. Rewrite all registers to the desired values
@@ -603,7 +595,7 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
usleep_range(1000, 2000); /* per data sheet, page 10 */
- /* set defaults */
+ /* Set defaults */
status = isl29018_set_scale(chip, chip->scale.scale,
chip->scale.uscale);
if (status < 0) {
@@ -635,7 +627,7 @@ static const struct iio_info isl29023_info = {
.write_raw = isl29018_write_raw,
};
-static bool is_volatile_reg(struct device *dev, unsigned int reg)
+static bool isl29018_is_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case ISL29018_REG_ADD_DATA_LSB:
@@ -649,37 +641,32 @@ static bool is_volatile_reg(struct device *dev, unsigned int reg)
}
}
-/*
- * isl29018_regmap_config: regmap configuration.
- * Use RBTREE mechanism for caching.
- */
static const struct regmap_config isl29018_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .volatile_reg = is_volatile_reg,
+ .volatile_reg = isl29018_is_volatile_reg,
.max_register = ISL29018_REG_TEST,
.num_reg_defaults_raw = ISL29018_REG_TEST + 1,
.cache_type = REGCACHE_RBTREE,
};
-/* isl29035_regmap_config: regmap configuration for ISL29035 */
static const struct regmap_config isl29035_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .volatile_reg = is_volatile_reg,
+ .volatile_reg = isl29018_is_volatile_reg,
.max_register = ISL29035_REG_DEVICE_ID,
.num_reg_defaults_raw = ISL29035_REG_DEVICE_ID + 1,
.cache_type = REGCACHE_RBTREE,
};
-struct chip_info {
+struct isl29018_chip_info {
const struct iio_chan_spec *channels;
int num_channels;
const struct iio_info *indio_info;
const struct regmap_config *regmap_cfg;
};
-static const struct chip_info chip_info_tbl[] = {
+static const struct isl29018_chip_info isl29018_chip_info_tbl[] = {
[isl29018] = {
.channels = isl29018_channels,
.num_channels = ARRAY_SIZE(isl29018_channels),
@@ -724,10 +711,8 @@ static int isl29018_probe(struct i2c_client *client,
int dev_id = 0;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation fails\n");
+ if (!indio_dev)
return -ENOMEM;
- }
chip = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
@@ -750,7 +735,7 @@ static int isl29018_probe(struct i2c_client *client,
chip->suspended = false;
chip->regmap = devm_regmap_init_i2c(client,
- chip_info_tbl[dev_id].regmap_cfg);
+ isl29018_chip_info_tbl[dev_id].regmap_cfg);
if (IS_ERR(chip->regmap)) {
err = PTR_ERR(chip->regmap);
dev_err(&client->dev, "regmap initialization fails: %d\n", err);
@@ -761,19 +746,13 @@ static int isl29018_probe(struct i2c_client *client,
if (err)
return err;
- indio_dev->info = chip_info_tbl[dev_id].indio_info;
- indio_dev->channels = chip_info_tbl[dev_id].channels;
- indio_dev->num_channels = chip_info_tbl[dev_id].num_channels;
+ indio_dev->info = isl29018_chip_info_tbl[dev_id].indio_info;
+ indio_dev->channels = isl29018_chip_info_tbl[dev_id].channels;
+ indio_dev->num_channels = isl29018_chip_info_tbl[dev_id].num_channels;
indio_dev->name = name;
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
- err = devm_iio_device_register(&client->dev, indio_dev);
- if (err) {
- dev_err(&client->dev, "iio registration fails\n");
- return err;
- }
-
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
#ifdef CONFIG_PM_SLEEP
@@ -840,7 +819,6 @@ static const struct of_device_id isl29018_of_match[] = {
MODULE_DEVICE_TABLE(of, isl29018_of_match);
static struct i2c_driver isl29018_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "isl29018",
.acpi_match_table = ACPI_PTR(isl29018_acpi_match),
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 2e3b1d64e32a..aa413e5878b9 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -27,29 +27,27 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#define CONVERSION_TIME_MS 100
+#define ISL29028_CONV_TIME_MS 100
#define ISL29028_REG_CONFIGURE 0x01
-#define CONFIGURE_ALS_IR_MODE_ALS 0
-#define CONFIGURE_ALS_IR_MODE_IR BIT(0)
-#define CONFIGURE_ALS_IR_MODE_MASK BIT(0)
+#define ISL29028_CONF_ALS_IR_MODE_ALS 0
+#define ISL29028_CONF_ALS_IR_MODE_IR BIT(0)
+#define ISL29028_CONF_ALS_IR_MODE_MASK BIT(0)
-#define CONFIGURE_ALS_RANGE_LOW_LUX 0
-#define CONFIGURE_ALS_RANGE_HIGH_LUX BIT(1)
-#define CONFIGURE_ALS_RANGE_MASK BIT(1)
+#define ISL29028_CONF_ALS_RANGE_LOW_LUX 0
+#define ISL29028_CONF_ALS_RANGE_HIGH_LUX BIT(1)
+#define ISL29028_CONF_ALS_RANGE_MASK BIT(1)
-#define CONFIGURE_ALS_DIS 0
-#define CONFIGURE_ALS_EN BIT(2)
-#define CONFIGURE_ALS_EN_MASK BIT(2)
+#define ISL29028_CONF_ALS_DIS 0
+#define ISL29028_CONF_ALS_EN BIT(2)
+#define ISL29028_CONF_ALS_EN_MASK BIT(2)
-#define CONFIGURE_PROX_DRIVE BIT(3)
+#define ISL29028_CONF_PROX_SLP_SH 4
+#define ISL29028_CONF_PROX_SLP_MASK (7 << ISL29028_CONF_PROX_SLP_SH)
-#define CONFIGURE_PROX_SLP_SH 4
-#define CONFIGURE_PROX_SLP_MASK (7 << CONFIGURE_PROX_SLP_SH)
-
-#define CONFIGURE_PROX_EN BIT(7)
-#define CONFIGURE_PROX_EN_MASK BIT(7)
+#define ISL29028_CONF_PROX_EN BIT(7)
+#define ISL29028_CONF_PROX_EN_MASK BIT(7)
#define ISL29028_REG_INTERRUPT 0x02
@@ -62,10 +60,10 @@
#define ISL29028_NUM_REGS (ISL29028_REG_TEST2_MODE + 1)
-enum als_ir_mode {
- MODE_NONE = 0,
- MODE_ALS,
- MODE_IR
+enum isl29028_als_ir_mode {
+ ISL29028_MODE_NONE = 0,
+ ISL29028_MODE_ALS,
+ ISL29028_MODE_IR,
};
struct isl29028_chip {
@@ -76,7 +74,7 @@ struct isl29028_chip {
bool enable_prox;
int lux_scale;
- int als_ir_mode;
+ enum isl29028_als_ir_mode als_ir_mode;
};
static int isl29028_set_proxim_sampling(struct isl29028_chip *chip,
@@ -91,7 +89,8 @@ static int isl29028_set_proxim_sampling(struct isl29028_chip *chip,
break;
}
return regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_PROX_SLP_MASK, sel << CONFIGURE_PROX_SLP_SH);
+ ISL29028_CONF_PROX_SLP_MASK,
+ sel << ISL29028_CONF_PROX_SLP_SH);
}
static int isl29028_enable_proximity(struct isl29028_chip *chip, bool enable)
@@ -100,9 +99,9 @@ static int isl29028_enable_proximity(struct isl29028_chip *chip, bool enable)
int val = 0;
if (enable)
- val = CONFIGURE_PROX_EN;
+ val = ISL29028_CONF_PROX_EN;
ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_PROX_EN_MASK, val);
+ ISL29028_CONF_PROX_EN_MASK, val);
if (ret < 0)
return ret;
@@ -113,40 +112,40 @@ static int isl29028_enable_proximity(struct isl29028_chip *chip, bool enable)
static int isl29028_set_als_scale(struct isl29028_chip *chip, int lux_scale)
{
- int val = (lux_scale == 2000) ? CONFIGURE_ALS_RANGE_HIGH_LUX :
- CONFIGURE_ALS_RANGE_LOW_LUX;
+ int val = (lux_scale == 2000) ? ISL29028_CONF_ALS_RANGE_HIGH_LUX :
+ ISL29028_CONF_ALS_RANGE_LOW_LUX;
return regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_RANGE_MASK, val);
+ ISL29028_CONF_ALS_RANGE_MASK, val);
}
static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
- enum als_ir_mode mode)
+ enum isl29028_als_ir_mode mode)
{
int ret = 0;
switch (mode) {
- case MODE_ALS:
+ case ISL29028_MODE_ALS:
ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_IR_MODE_MASK,
- CONFIGURE_ALS_IR_MODE_ALS);
+ ISL29028_CONF_ALS_IR_MODE_MASK,
+ ISL29028_CONF_ALS_IR_MODE_ALS);
if (ret < 0)
return ret;
ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_RANGE_MASK,
- CONFIGURE_ALS_RANGE_HIGH_LUX);
+ ISL29028_CONF_ALS_RANGE_MASK,
+ ISL29028_CONF_ALS_RANGE_HIGH_LUX);
break;
- case MODE_IR:
+ case ISL29028_MODE_IR:
ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_IR_MODE_MASK,
- CONFIGURE_ALS_IR_MODE_IR);
+ ISL29028_CONF_ALS_IR_MODE_MASK,
+ ISL29028_CONF_ALS_IR_MODE_IR);
break;
- case MODE_NONE:
+ case ISL29028_MODE_NONE:
return regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_DIS);
+ ISL29028_CONF_ALS_EN_MASK, ISL29028_CONF_ALS_DIS);
}
if (ret < 0)
@@ -154,12 +153,13 @@ static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
/* Enable the ALS/IR */
ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_EN);
+ ISL29028_CONF_ALS_EN_MASK,
+ ISL29028_CONF_ALS_EN);
if (ret < 0)
return ret;
/* Need to wait for conversion time if ALS/IR mode enabled */
- mdelay(CONVERSION_TIME_MS);
+ mdelay(ISL29028_CONV_TIME_MS);
return 0;
}
@@ -223,14 +223,14 @@ static int isl29028_als_get(struct isl29028_chip *chip, int *als_data)
int ret;
int als_ir_data;
- if (chip->als_ir_mode != MODE_ALS) {
- ret = isl29028_set_als_ir_mode(chip, MODE_ALS);
+ if (chip->als_ir_mode != ISL29028_MODE_ALS) {
+ ret = isl29028_set_als_ir_mode(chip, ISL29028_MODE_ALS);
if (ret < 0) {
dev_err(dev,
"Error in enabling ALS mode err %d\n", ret);
return ret;
}
- chip->als_ir_mode = MODE_ALS;
+ chip->als_ir_mode = ISL29028_MODE_ALS;
}
ret = isl29028_read_als_ir(chip, &als_ir_data);
@@ -256,14 +256,14 @@ static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data)
struct device *dev = regmap_get_device(chip->regmap);
int ret;
- if (chip->als_ir_mode != MODE_IR) {
- ret = isl29028_set_als_ir_mode(chip, MODE_IR);
+ if (chip->als_ir_mode != ISL29028_MODE_IR) {
+ ret = isl29028_set_als_ir_mode(chip, ISL29028_MODE_IR);
if (ret < 0) {
dev_err(dev,
"Error in enabling IR mode err %d\n", ret);
return ret;
}
- chip->als_ir_mode = MODE_IR;
+ chip->als_ir_mode = ISL29028_MODE_IR;
}
return isl29028_read_als_ir(chip, ir_data);
}
@@ -383,8 +383,8 @@ static int isl29028_read_raw(struct iio_dev *indio_dev,
}
static IIO_CONST_ATTR(in_proximity_sampling_frequency_available,
- "1, 3, 5, 10, 13, 20, 83, 100");
-static IIO_CONST_ATTR(in_illuminance_scale_available, "125, 2000");
+ "1 3 5 10 13 20 83 100");
+static IIO_CONST_ATTR(in_illuminance_scale_available, "125 2000");
#define ISL29028_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
#define ISL29028_CONST_ATTR(name) (&iio_const_attr_##name.dev_attr.attr)
@@ -428,7 +428,7 @@ static int isl29028_chip_init(struct isl29028_chip *chip)
chip->enable_prox = false;
chip->prox_sampling = 20;
chip->lux_scale = 2000;
- chip->als_ir_mode = MODE_NONE;
+ chip->als_ir_mode = ISL29028_MODE_NONE;
ret = regmap_write(chip->regmap, ISL29028_REG_TEST1_MODE, 0x0);
if (ret < 0) {
@@ -462,7 +462,7 @@ static int isl29028_chip_init(struct isl29028_chip *chip)
return ret;
}
-static bool is_volatile_reg(struct device *dev, unsigned int reg)
+static bool isl29028_is_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case ISL29028_REG_INTERRUPT:
@@ -478,7 +478,7 @@ static bool is_volatile_reg(struct device *dev, unsigned int reg)
static const struct regmap_config isl29028_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .volatile_reg = is_volatile_reg,
+ .volatile_reg = isl29028_is_volatile_reg,
.max_register = ISL29028_NUM_REGS - 1,
.num_reg_defaults_raw = ISL29028_NUM_REGS,
.cache_type = REGCACHE_RBTREE,
@@ -546,7 +546,6 @@ static const struct of_device_id isl29028_of_match[] = {
MODULE_DEVICE_TABLE(of, isl29028_of_match);
static struct i2c_driver isl29028_driver = {
- .class = I2C_CLASS_HWMON,
.driver = {
.name = "isl29028",
.of_match_table = isl29028_of_match,
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index 05b4ad4e941c..08f1583ee34e 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -803,7 +803,7 @@ static struct attribute *sysfs_attrs_ctrl[] = {
NULL
};
-static struct attribute_group tsl2583_attribute_group = {
+static const struct attribute_group tsl2583_attribute_group = {
.attrs = sysfs_attrs_ctrl,
};
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index c46bef641613..17309591ca57 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -23,9 +23,7 @@
#include "meter.h"
#include "ade7754.h"
-static int ade7754_spi_write_reg_8(struct device *dev,
- u8 reg_address,
- u8 val)
+static int ade7754_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -42,8 +40,7 @@ static int ade7754_spi_write_reg_8(struct device *dev,
}
static int ade7754_spi_write_reg_16(struct device *dev,
- u8 reg_address,
- u16 value)
+ u8 reg_address, u16 value)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -59,9 +56,7 @@ static int ade7754_spi_write_reg_16(struct device *dev,
return ret;
}
-static int ade7754_spi_read_reg_8(struct device *dev,
- u8 reg_address,
- u8 *val)
+static int ade7754_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7754_state *st = iio_priv(indio_dev);
@@ -70,7 +65,7 @@ static int ade7754_spi_read_reg_8(struct device *dev,
ret = spi_w8r8(st->us, ADE7754_READ_REG(reg_address));
if (ret < 0) {
dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
+ reg_address);
return ret;
}
*val = ret;
@@ -79,8 +74,7 @@ static int ade7754_spi_read_reg_8(struct device *dev,
}
static int ade7754_spi_read_reg_16(struct device *dev,
- u8 reg_address,
- u16 *val)
+ u8 reg_address, u16 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7754_state *st = iio_priv(indio_dev);
@@ -99,8 +93,7 @@ static int ade7754_spi_read_reg_16(struct device *dev,
}
static int ade7754_spi_read_reg_24(struct device *dev,
- u8 reg_address,
- u32 *val)
+ u8 reg_address, u32 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7754_state *st = iio_priv(indio_dev);
@@ -123,7 +116,7 @@ static int ade7754_spi_read_reg_24(struct device *dev,
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
- reg_address);
+ reg_address);
goto error_ret;
}
*val = (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
@@ -134,8 +127,8 @@ error_ret:
}
static ssize_t ade7754_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
int ret;
u8 val = 0;
@@ -149,8 +142,8 @@ static ssize_t ade7754_read_8bit(struct device *dev,
}
static ssize_t ade7754_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
int ret;
u16 val = 0;
@@ -164,8 +157,8 @@ static ssize_t ade7754_read_16bit(struct device *dev,
}
static ssize_t ade7754_read_24bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
int ret;
u32 val = 0;
@@ -179,9 +172,9 @@ static ssize_t ade7754_read_24bit(struct device *dev,
}
static ssize_t ade7754_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
@@ -197,9 +190,9 @@ error_ret:
}
static ssize_t ade7754_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
@@ -403,16 +396,14 @@ err_ret:
}
static ssize_t ade7754_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
int ret;
u8 t;
int sps;
- ret = ade7754_spi_read_reg_8(dev,
- ADE7754_WAVMODE,
- &t);
+ ret = ade7754_spi_read_reg_8(dev, ADE7754_WAVMODE, &t);
if (ret)
return ret;
@@ -423,9 +414,9 @@ static ssize_t ade7754_read_frequency(struct device *dev,
}
static ssize_t ade7754_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7754_state *st = iio_priv(indio_dev);
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index a6b76d4b1c80..57c213dfadcc 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -38,18 +38,14 @@ static int ade7758_write_waveform_type(struct device *dev, unsigned int type)
int ret;
u8 reg;
- ret = ade7758_spi_read_reg_8(dev,
- ADE7758_WAVMODE,
- &reg);
+ ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &reg);
if (ret)
goto out;
reg &= ~0x1F;
reg |= type & 0x1F;
- ret = ade7758_spi_write_reg_8(dev,
- ADE7758_WAVMODE,
- reg);
+ ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
out:
return ret;
}
@@ -94,7 +90,7 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
indio_dev->masklength);
ade7758_write_waveform_type(&indio_dev->dev,
- indio_dev->channels[channel].address);
+ indio_dev->channels[channel].address);
return 0;
}
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 75e8685e6df2..24edbc39ab4e 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -23,8 +23,8 @@
#include "ade7854.h"
static ssize_t ade7854_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
int ret;
u8 val = 0;
@@ -40,8 +40,8 @@ static ssize_t ade7854_read_8bit(struct device *dev,
}
static ssize_t ade7854_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
int ret;
u16 val = 0;
@@ -57,8 +57,8 @@ static ssize_t ade7854_read_16bit(struct device *dev,
}
static ssize_t ade7854_read_24bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
int ret;
u32 val;
@@ -74,8 +74,8 @@ static ssize_t ade7854_read_24bit(struct device *dev,
}
static ssize_t ade7854_read_32bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
int ret;
u32 val = 0;
@@ -91,9 +91,9 @@ static ssize_t ade7854_read_32bit(struct device *dev,
}
static ssize_t ade7854_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -112,9 +112,9 @@ error_ret:
}
static ssize_t ade7854_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -133,9 +133,9 @@ error_ret:
}
static ssize_t ade7854_write_24bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -154,9 +154,9 @@ error_ret:
}
static ssize_t ade7854_write_32bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
diff --git a/drivers/staging/ks7010/eap_packet.h b/drivers/staging/ks7010/eap_packet.h
index 16a392abdaec..df7f760e4110 100644
--- a/drivers/staging/ks7010/eap_packet.h
+++ b/drivers/staging/ks7010/eap_packet.h
@@ -1,6 +1,8 @@
#ifndef EAP_PACKET_H
#define EAP_PACKET_H
+#include <linux/compiler.h>
+
#define WBIT(n) (1 << (n))
#ifndef ETH_ALEN
@@ -19,14 +21,14 @@ struct ether_hdr {
#define ETHER_PROTOCOL_TYPE_IP 0x0800
#define ETHER_PROTOCOL_TYPE_ARP 0x0806
/* followed by length octets of data */
-} __attribute__ ((packed));
+} __packed;
struct ieee802_1x_hdr {
unsigned char version;
unsigned char type;
unsigned short length;
/* followed by length octets of data */
-} __attribute__ ((packed));
+} __packed;
#define EAPOL_VERSION 2
@@ -51,25 +53,33 @@ enum { EAPOL_KEY_TYPE_RC4 = 1, EAPOL_KEY_TYPE_RSN = 2,
struct ieee802_1x_eapol_key {
unsigned char type;
unsigned short key_length;
- /* does not repeat within the life of the keying material used to
- * encrypt the Key field; 64-bit NTP timestamp MAY be used here */
+ /*
+ * does not repeat within the life of the keying material used to
+ * encrypt the Key field; 64-bit NTP timestamp MAY be used here
+ */
unsigned char replay_counter[IEEE8021X_REPLAY_COUNTER_LEN];
unsigned char key_iv[IEEE8021X_KEY_IV_LEN]; /* cryptographically random number */
- unsigned char key_index; /* key flag in the most significant bit:
+ unsigned char key_index; /*
+ * key flag in the most significant bit:
* 0 = broadcast (default key),
* 1 = unicast (key mapping key); key index is in the
- * 7 least significant bits */
- /* HMAC-MD5 message integrity check computed with MS-MPPE-Send-Key as
- * the key */
+ * 7 least significant bits
+ */
+ /*
+ * HMAC-MD5 message integrity check computed with MS-MPPE-Send-Key as
+ * the key
+ */
unsigned char key_signature[IEEE8021X_KEY_SIGN_LEN];
- /* followed by key: if packet body length = 44 + key length, then the
+ /*
+ * followed by key: if packet body length = 44 + key length, then the
* key field (of key_length bytes) contains the key in encrypted form;
* if packet body length = 44, key field is absent and key_length
* represents the number of least significant octets from
* MS-MPPE-Send-Key attribute to be used as the keying material;
- * RC4 key used in encryption = Key-IV + MS-MPPE-Recv-Key */
-} __attribute__ ((packed));
+ * RC4 key used in encryption = Key-IV + MS-MPPE-Recv-Key
+ */
+} __packed;
#define WPA_NONCE_LEN 32
#define WPA_REPLAY_COUNTER_LEN 8
@@ -86,7 +96,7 @@ struct wpa_eapol_key {
unsigned char key_mic[16];
unsigned short key_data_length;
/* followed by key_data_length bytes of key_data */
-} __attribute__ ((packed));
+} __packed;
#define WPA_KEY_INFO_TYPE_MASK (WBIT(0) | WBIT(1) | WBIT(2))
#define WPA_KEY_INFO_TYPE_HMAC_MD5_RC4 WBIT(0)
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
index b7337fd813d5..81c46f4d0935 100644
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -14,7 +14,7 @@
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "ks_wlan.h"
#include "ks_wlan_ioctl.h"
@@ -35,18 +35,18 @@ MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
/* macro */
#define inc_txqhead(priv) \
- ( priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE )
+ (priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE)
#define inc_txqtail(priv) \
- ( priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE )
+ (priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE)
#define cnt_txqbody(priv) \
- (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE )
+ (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE)
#define inc_rxqhead(priv) \
- ( priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE )
+ (priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE)
#define inc_rxqtail(priv) \
- ( priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE )
+ (priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE)
#define cnt_rxqbody(priv) \
- (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE )
+ (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE)
static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address,
unsigned char *buffer, int length)
@@ -87,7 +87,7 @@ static int ks7010_sdio_write(struct ks_wlan_private *priv, unsigned int address,
return rc;
}
-void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
+static void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
{
unsigned char rw_data;
int retval;
@@ -115,10 +115,9 @@ void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
out:
priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
- return;
}
-void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
+static void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
{
unsigned char rw_data;
int retval;
@@ -146,7 +145,6 @@ void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
out:
priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
- return;
}
void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
@@ -159,9 +157,9 @@ void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
rw_data = WAKEUP_REQ;
retval =
ks7010_sdio_write(priv, WAKEUP, &rw_data, sizeof(rw_data));
- if (retval) {
+ if (retval)
DPRINTK(1, " error : WAKEUP=%02X\n", rw_data);
- }
+
DPRINTK(4, "wake up : WAKEUP=%02X\n", rw_data);
priv->last_wakeup = jiffies;
++priv->wakeup_count;
@@ -171,19 +169,16 @@ void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
}
}
-int _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
+static int _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
{
- int rc = 0;
unsigned char rw_data;
int retval;
if (priv->reg.powermgt == POWMGT_ACTIVE_MODE)
- return rc;
+ return 0;
if (priv->reg.operation_mode == MODE_INFRASTRUCTURE &&
(priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
-
- //DPRINTK(1,"psstatus.status=%d\n",atomic_read(&priv->psstatus.status));
if (priv->dev_state == DEVICE_STATE_SLEEP) {
switch (atomic_read(&priv->psstatus.status)) {
case PS_SNOOZE: /* 4 */
@@ -246,10 +241,9 @@ int _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
break;
}
}
-
}
- return rc;
+ return 0;
}
int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
@@ -268,7 +262,7 @@ static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
if (priv->dev_state < DEVICE_STATE_BOOT) {
kfree(p);
- if (complete_handler != NULL)
+ if (complete_handler)
(*complete_handler) (arg1, arg2);
return 1;
}
@@ -277,7 +271,7 @@ static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
/* in case of buffer overflow */
DPRINTK(1, "tx buffer overflow\n");
kfree(p);
- if (complete_handler != NULL)
+ if (complete_handler)
(*complete_handler) (arg1, arg2);
return 1;
}
@@ -297,11 +291,10 @@ static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
unsigned long size)
{
- int rc, retval;
+ int retval;
unsigned char rw_data;
struct hostif_hdr *hdr;
hdr = (struct hostif_hdr *)buffer;
- rc = 0;
DPRINTK(4, "size=%d\n", hdr->size);
if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
@@ -346,10 +339,9 @@ static void tx_device_task(void *dev)
&priv->ks_wlan_hw.rw_wq, 1);
return;
}
-
}
kfree(sp->sendp); /* allocated memory free */
- if (sp->complete_handler != NULL) /* TX Complete */
+ if (sp->complete_handler) /* TX Complete */
(*sp->complete_handler) (sp->arg1, sp->arg2);
inc_txqhead(priv);
@@ -358,7 +350,6 @@ static void tx_device_task(void *dev)
&priv->ks_wlan_hw.rw_wq, 0);
}
}
- return;
}
int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
@@ -402,12 +393,9 @@ static void rx_event_task(unsigned long dev)
hostif_receive(priv, rp->data, rp->size);
inc_rxqhead(priv);
- if (cnt_rxqbody(priv) > 0) {
+ if (cnt_rxqbody(priv) > 0)
tasklet_schedule(&priv->ks_wlan_hw.rx_bh_task);
- }
}
-
- return;
}
static void ks_wlan_hw_rx(void *dev, uint16_t size)
@@ -432,9 +420,8 @@ static void ks_wlan_hw_rx(void *dev, uint16_t size)
retval =
ks7010_sdio_read(priv, DATA_WINDOW, &rx_buffer->data[0],
hif_align_size(size));
- if (retval) {
+ if (retval)
goto error_out;
- }
/* length check */
if (size > 2046 || size == 0) {
@@ -449,9 +436,9 @@ static void ks_wlan_hw_rx(void *dev, uint16_t size)
retval =
ks7010_sdio_write(priv, READ_STATUS, &read_status,
sizeof(read_status));
- if (retval) {
+ if (retval)
DPRINTK(1, " error : READ_STATUS=%02X\n", read_status);
- }
+
goto error_out;
}
@@ -465,9 +452,9 @@ static void ks_wlan_hw_rx(void *dev, uint16_t size)
retval =
ks7010_sdio_write(priv, READ_STATUS, &read_status,
sizeof(read_status));
- if (retval) {
+ if (retval)
DPRINTK(1, " error : READ_STATUS=%02X\n", read_status);
- }
+
DPRINTK(4, "READ_STATUS=%02X\n", read_status);
if (atomic_read(&priv->psstatus.confirm_wait)) {
@@ -498,7 +485,7 @@ static void ks7010_rw_function(struct work_struct *work)
/* wiat after DOZE */
if (time_after(priv->last_doze + ((30 * HZ) / 1000), jiffies)) {
- DPRINTK(4, "wait after DOZE \n");
+ DPRINTK(4, "wait after DOZE\n");
queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
&priv->ks_wlan_hw.rw_wq, 1);
return;
@@ -506,11 +493,13 @@ static void ks7010_rw_function(struct work_struct *work)
/* wiat after WAKEUP */
while (time_after(priv->last_wakeup + ((30 * HZ) / 1000), jiffies)) {
- DPRINTK(4, "wait after WAKEUP \n");
+ DPRINTK(4, "wait after WAKEUP\n");
/* queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,&priv->ks_wlan_hw.rw_wq,
(priv->last_wakeup + ((30*HZ)/1000) - jiffies));*/
- printk("wake: %lu %lu\n", priv->last_wakeup + (30 * HZ) / 1000,
- jiffies);
+ dev_info(&priv->ks_wlan_hw.sdio_card->func->dev,
+ "wake: %lu %lu\n",
+ priv->last_wakeup + (30 * HZ) / 1000,
+ jiffies);
msleep(30);
}
@@ -549,17 +538,15 @@ static void ks7010_rw_function(struct work_struct *work)
if (rw_data & RSIZE_MASK) { /* Read schedule */
ks_wlan_hw_rx((void *)priv,
- (uint16_t) (((rw_data & RSIZE_MASK) << 4)));
+ (uint16_t)((rw_data & RSIZE_MASK) << 4));
}
- if ((rw_data & WSTATUS_MASK)) {
+ if ((rw_data & WSTATUS_MASK))
tx_device_task((void *)priv);
- }
+
_ks_wlan_hw_power_save(priv);
err_out:
sdio_release_host(priv->ks_wlan_hw.sdio_card->func);
-
- return;
}
static void ks_sdio_interrupt(struct sdio_func *func)
@@ -607,7 +594,6 @@ static void ks_sdio_interrupt(struct sdio_func *func)
}
complete(&priv->psstatus.wakeup_wait);
}
-
}
do {
@@ -624,7 +610,7 @@ static void ks_sdio_interrupt(struct sdio_func *func)
rsize = rw_data & RSIZE_MASK;
if (rsize) { /* Read schedule */
ks_wlan_hw_rx((void *)priv,
- (uint16_t) (((rsize) << 4)));
+ (uint16_t)(rsize << 4));
}
if (rw_data & WSTATUS_MASK) {
#if 0
@@ -667,7 +653,6 @@ static void ks_sdio_interrupt(struct sdio_func *func)
intr_out:
queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
&priv->ks_wlan_hw.rw_wq, 0);
- return;
}
static int trx_device_init(struct ks_wlan_private *priv)
@@ -696,14 +681,12 @@ static void trx_device_exit(struct ks_wlan_private *priv)
while (cnt_txqbody(priv) > 0) {
sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
kfree(sp->sendp); /* allocated memory free */
- if (sp->complete_handler != NULL) /* TX Complete */
+ if (sp->complete_handler) /* TX Complete */
(*sp->complete_handler) (sp->arg1, sp->arg2);
inc_txqhead(priv);
}
tasklet_kill(&priv->ks_wlan_hw.rx_bh_task);
-
- return;
}
static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
@@ -711,7 +694,6 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
int rc = 0;
int retval;
unsigned char *data_buf;
- data_buf = NULL;
data_buf = kmalloc(sizeof(u32), GFP_KERNEL);
if (!data_buf) {
@@ -732,8 +714,7 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
goto error_out;
}
error_out:
- if (data_buf)
- kfree(data_buf);
+ kfree(data_buf);
return rc;
}
@@ -744,7 +725,7 @@ static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
int rc = 0;
int retval;
unsigned char *read_buf;
- read_buf = NULL;
+
read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
if (!read_buf) {
rc = 1;
@@ -758,13 +739,12 @@ static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
retval = memcmp(data, read_buf, size);
if (retval) {
- DPRINTK(0, "data compare error (%d) \n", retval);
+ DPRINTK(0, "data compare error (%d)\n", retval);
rc = 3;
goto error_out;
}
error_out:
- if (read_buf)
- kfree(read_buf);
+ kfree(read_buf);
return rc;
}
@@ -778,14 +758,10 @@ static int ks7010_upload_firmware(struct ks_wlan_private *priv,
int length;
const struct firmware *fw_entry = NULL;
- rom_buf = NULL;
-
/* buffer allocate */
rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
- if (!rom_buf) {
- rc = 3;
- goto error_out0;
- }
+ if (!rom_buf)
+ return 3;
sdio_claim_host(card->func);
@@ -799,7 +775,7 @@ static int ks7010_upload_firmware(struct ks_wlan_private *priv,
retval = request_firmware(&fw_entry, ROM_FILE, &priv->ks_wlan_hw.sdio_card->func->dev);
if (retval)
- return retval;
+ goto error_out0;
length = fw_entry->size;
@@ -879,8 +855,7 @@ static int ks7010_upload_firmware(struct ks_wlan_private *priv,
release_firmware(fw_entry);
error_out0:
sdio_release_host(card->func);
- if (rom_buf)
- kfree(rom_buf);
+ kfree(rom_buf);
return rc;
}
@@ -903,9 +878,9 @@ static void ks7010_card_init(struct ks_wlan_private *priv)
DPRINTK(1, "wait time out!! SME_START\n");
}
- if (priv->mac_address_valid && priv->version_size) {
+ if (priv->mac_address_valid && priv->version_size)
priv->dev_state = DEVICE_STATE_PREINIT;
- }
+
hostif_sme_enqueue(priv, SME_GET_EEPROM_CKSUM);
@@ -981,7 +956,7 @@ static int ks7010_sdio_probe(struct sdio_func *func,
netdev = NULL;
/* initilize ks_sdio_card */
- card = kzalloc(sizeof(struct ks_sdio_card), GFP_KERNEL);
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
@@ -1029,12 +1004,13 @@ static int ks7010_sdio_probe(struct sdio_func *func,
/* private memory allocate */
netdev = alloc_etherdev(sizeof(*priv));
- if (netdev == NULL) {
- printk(KERN_ERR "ks7010 : Unable to alloc new net device\n");
+ if (!netdev) {
+ dev_err(&card->func->dev, "ks7010 : Unable to alloc new net device\n");
goto error_release_irq;
}
if (dev_alloc_name(netdev, "wlan%d") < 0) {
- printk(KERN_ERR "ks7010 : Couldn't get name!\n");
+ dev_err(&card->func->dev,
+ "ks7010 : Couldn't get name!\n");
goto error_free_netdev;
}
@@ -1048,9 +1024,9 @@ static int ks7010_sdio_probe(struct sdio_func *func,
init_completion(&priv->ks_wlan_hw.ks7010_sdio_wait);
priv->ks_wlan_hw.read_buf = NULL;
priv->ks_wlan_hw.read_buf = kmalloc(RX_DATA_SIZE, GFP_KERNEL);
- if (!priv->ks_wlan_hw.read_buf) {
+ if (!priv->ks_wlan_hw.read_buf)
goto error_free_netdev;
- }
+
priv->dev_state = DEVICE_STATE_PREBOOT;
priv->net_dev = netdev;
priv->firmware_version[0] = '\0';
@@ -1074,9 +1050,9 @@ static int ks7010_sdio_probe(struct sdio_func *func,
/* Upload firmware */
ret = ks7010_upload_firmware(priv, card); /* firmware load */
if (ret) {
- printk(KERN_ERR
- "ks7010: firmware load failed !! retern code = %d\n",
- ret);
+ dev_err(&card->func->dev,
+ "ks7010: firmware load failed !! return code = %d\n",
+ ret);
goto error_free_read_buf;
}
@@ -1086,9 +1062,9 @@ static int ks7010_sdio_probe(struct sdio_func *func,
sdio_claim_host(func);
ret = ks7010_sdio_write(priv, INT_PENDING, &rw_data, sizeof(rw_data));
sdio_release_host(func);
- if (ret) {
+ if (ret)
DPRINTK(1, " error : INT_PENDING=%02X\n", rw_data);
- }
+
DPRINTK(4, " clear Interrupt : INT_PENDING=%02X\n", rw_data);
/* enable ks7010sdio interrupt (INT_GCR_B|INT_READ_STATUS|INT_WRITE_STATUS) */
@@ -1096,9 +1072,9 @@ static int ks7010_sdio_probe(struct sdio_func *func,
sdio_claim_host(func);
ret = ks7010_sdio_write(priv, INT_ENABLE, &rw_data, sizeof(rw_data));
sdio_release_host(func);
- if (ret) {
+ if (ret)
DPRINTK(1, " error : INT_ENABLE=%02X\n", rw_data);
- }
+
DPRINTK(4, " enable Interrupt : INT_ENABLE=%02X\n", rw_data);
priv->dev_state = DEVICE_STATE_BOOT;
@@ -1141,18 +1117,18 @@ static void ks7010_sdio_remove(struct sdio_func *func)
int ret;
struct ks_sdio_card *card;
struct ks_wlan_private *priv;
- struct net_device *netdev;
DPRINTK(1, "ks7010_sdio_remove()\n");
card = sdio_get_drvdata(func);
- if (card == NULL)
+ if (!card)
return;
DPRINTK(1, "priv = card->priv\n");
priv = card->priv;
- netdev = priv->net_dev;
if (priv) {
+ struct net_device *netdev = priv->net_dev;
+
ks_wlan_net_stop(netdev);
DPRINTK(1, "ks_wlan_net_stop\n");
@@ -1166,9 +1142,8 @@ static void ks7010_sdio_remove(struct sdio_func *func)
/* send stop request to MAC */
{
struct hostif_stop_request_t *pp;
- pp = (struct hostif_stop_request_t *)
- kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
- if (pp == NULL) {
+ pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return; /* to do goto ni suru */
}
@@ -1176,7 +1151,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
cpu_to_le16((uint16_t)
(sizeof(*pp) -
sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_STOP_REQ);
+ pp->header.event = cpu_to_le16((uint16_t)HIF_STOP_REQ);
sdio_claim_host(func);
write_to_device(priv, (unsigned char *)pp,
@@ -1199,9 +1174,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
unregister_netdev(netdev);
trx_device_exit(priv);
- if (priv->ks_wlan_hw.read_buf) {
- kfree(priv->ks_wlan_hw.read_buf);
- }
+ kfree(priv->ks_wlan_hw.read_buf);
free_netdev(priv->net_dev);
card->priv = NULL;
}
@@ -1219,7 +1192,6 @@ static void ks7010_sdio_remove(struct sdio_func *func)
DPRINTK(1, "kfree()\n");
DPRINTK(5, " Bye !!\n");
- return;
}
static struct sdio_driver ks7010_sdio_driver = {
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index a8822fe2bd60..c57ca581550a 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -14,21 +14,13 @@
#include "eap_packet.h"
#include "michael_mic.h"
+#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
/* Include Wireless Extension definition and check version */
#include <net/iw_handler.h> /* New driver API */
-extern int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p,
- unsigned long size,
- void (*complete_handler) (void *arg1, void *arg2),
- void *arg1, void *arg2);
-extern void send_packet_complete(void *, void *);
-
-extern void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv);
-extern int ks_wlan_hw_power_save(struct ks_wlan_private *priv);
-
/* macro */
#define inc_smeqhead(priv) \
( priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE )
@@ -43,6 +35,7 @@ static
inline u8 get_BYTE(struct ks_wlan_private *priv)
{
u8 data;
+
data = *(priv->rxp)++;
/* length check in advance ! */
--(priv->rx_size);
@@ -53,6 +46,7 @@ static
inline u16 get_WORD(struct ks_wlan_private *priv)
{
u16 data;
+
data = (get_BYTE(priv) & 0xff);
data |= ((get_BYTE(priv) << 8) & 0xff00);
return data;
@@ -62,6 +56,7 @@ static
inline u32 get_DWORD(struct ks_wlan_private *priv)
{
u32 data;
+
data = (get_BYTE(priv) & 0xff);
data |= ((get_BYTE(priv) << 8) & 0x0000ff00);
data |= ((get_BYTE(priv) << 16) & 0x00ff0000);
@@ -69,16 +64,20 @@ inline u32 get_DWORD(struct ks_wlan_private *priv)
return data;
}
-void ks_wlan_hw_wakeup_task(struct work_struct *work)
+static void ks_wlan_hw_wakeup_task(struct work_struct *work)
{
struct ks_wlan_private *priv =
container_of(work, struct ks_wlan_private, ks_wlan_wakeup_task);
int ps_status = atomic_read(&priv->psstatus.status);
+ long time_left;
if (ps_status == PS_SNOOZE) {
ks_wlan_hw_wakeup_request(priv);
- if (!wait_for_completion_interruptible_timeout(&priv->psstatus.wakeup_wait, HZ / 50)) { /* 20ms timeout */
- DPRINTK(1, "wake up timeout !!!\n");
+ time_left = wait_for_completion_interruptible_timeout(
+ &priv->psstatus.wakeup_wait,
+ msecs_to_jiffies(20));
+ if (time_left <= 0) {
+ DPRINTK(1, "wake up timeout or interrupted !!!\n");
schedule_work(&priv->ks_wlan_wakeup_task);
return;
}
@@ -96,8 +95,6 @@ void ks_wlan_hw_wakeup_task(struct work_struct *work)
static
int ks_wlan_do_power_save(struct ks_wlan_private *priv)
{
- int rc = 0;
-
DPRINTK(4, "psstatus.status=%d\n", atomic_read(&priv->psstatus.status));
if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
@@ -105,7 +102,7 @@ int ks_wlan_do_power_save(struct ks_wlan_private *priv)
} else {
priv->dev_state = DEVICE_STATE_READY;
}
- return rc;
+ return 0;
}
static
@@ -217,7 +214,6 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
{
unsigned char *bp;
int bsize, offset;
- int rc = 0;
DPRINTK(3, "\n");
memset(ap, 0, sizeof(struct local_ap_t));
@@ -240,13 +236,13 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
offset = 0;
while (bsize > offset) {
- /* DPRINTK(4, "Element ID=%d \n",*bp); */
+ /* DPRINTK(4, "Element ID=%d\n",*bp); */
switch (*bp) {
case 0: /* ssid */
if (*(bp + 1) <= SSID_MAX_SIZE) {
ap->ssid.size = *(bp + 1);
} else {
- DPRINTK(1, "size over :: ssid size=%d \n",
+ DPRINTK(1, "size over :: ssid size=%d\n",
*(bp + 1));
ap->ssid.size = SSID_MAX_SIZE;
}
@@ -260,7 +256,7 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
bp + 2, *(bp + 1));
ap->rate_set.size += *(bp + 1);
} else {
- DPRINTK(1, "size over :: rate size=%d \n",
+ DPRINTK(1, "size over :: rate size=%d\n",
(*(bp + 1) + ap->rate_set.size));
memcpy(&(ap->rate_set.body[ap->rate_set.size]),
bp + 2,
@@ -276,7 +272,7 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
if (*(bp + 1) <= RSN_IE_BODY_MAX) {
ap->rsn_ie.size = *(bp + 1);
} else {
- DPRINTK(1, "size over :: rsn size=%d \n",
+ DPRINTK(1, "size over :: rsn size=%d\n",
*(bp + 1));
ap->rsn_ie.size = RSN_IE_BODY_MAX;
}
@@ -289,7 +285,7 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
ap->wpa_ie.size = *(bp + 1);
} else {
DPRINTK(1,
- "size over :: wpa size=%d \n",
+ "size over :: wpa size=%d\n",
*(bp + 1));
ap->wpa_ie.size = RSN_IE_BODY_MAX;
}
@@ -307,7 +303,7 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
case 47: /* Reserve ID 47 Broadcom AP */
break;
default:
- DPRINTK(4, "unknown Element ID=%d \n", *bp);
+ DPRINTK(4, "unknown Element ID=%d\n", *bp);
break;
}
offset += 2; /* id & size field */
@@ -315,7 +311,7 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
bp += (*(bp + 1) + 2); /* pointer update */
}
- return rc;
+ return 0;
}
static
@@ -404,7 +400,7 @@ void hostif_data_indication(struct ks_wlan_private *priv)
HZ >= 60) {
mic_failure->failure = 0;
}
- DPRINTK(4, "MIC FAILURE \n");
+ DPRINTK(4, "MIC FAILURE\n");
if (mic_failure->failure == 0) {
mic_failure->failure = 1;
mic_failure->counter = 0;
@@ -481,8 +477,7 @@ void hostif_data_indication(struct ks_wlan_private *priv)
netif_rx(skb);
} else {
printk(KERN_WARNING
- "%s: Memory squeeze, dropping packet.\n",
- skb->dev->name);
+ "ks_wlan: Memory squeeze, dropping packet.\n");
priv->nstats.rx_dropped++;
}
break;
@@ -517,8 +512,7 @@ void hostif_data_indication(struct ks_wlan_private *priv)
netif_rx(skb);
} else {
printk(KERN_WARNING
- "%s: Memory squeeze, dropping packet.\n",
- skb->dev->name);
+ "ks_wlan: Memory squeeze, dropping packet.\n");
priv->nstats.rx_dropped++;
}
break;
@@ -778,7 +772,7 @@ void hostif_start_confirm(struct ks_wlan_private *priv)
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
- memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN);
+ eth_zero_addr(wrqu.ap_addr.sa_data);
DPRINTK(3, "IWEVENT: disconnect\n");
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
}
@@ -836,7 +830,7 @@ void hostif_connect_indication(struct ks_wlan_private *priv)
wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
if ((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS &&
(old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
- memset(wrqu0.ap_addr.sa_data, '\0', ETH_ALEN);
+ eth_zero_addr(wrqu0.ap_addr.sa_data);
DPRINTK(3, "IWEVENT: disconnect\n");
DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
priv->scan_ind_count);
@@ -908,7 +902,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv)
if ((priv->connect_status & CONNECT_STATUS_MASK) ==
DISCONNECT_STATUS
&& (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
- memset(wrqu0.ap_addr.sa_data, '\0', ETH_ALEN);
+ eth_zero_addr(wrqu0.ap_addr.sa_data);
DPRINTK(3, "IWEVENT: disconnect\n");
printk("IWEVENT: disconnect\n");
DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
@@ -1148,7 +1142,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
packet_len = packet->len;
if (packet_len > ETH_FRAME_LEN) {
- DPRINTK(1, "bad length packet_len=%d \n", packet_len);
+ DPRINTK(1, "bad length packet_len=%d\n", packet_len);
dev_kfree_skb(packet);
return -1;
}
@@ -1172,11 +1166,10 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
}
DPRINTK(4, "skb_buff length=%d\n", packet_len);
- pp = (struct hostif_data_request_t *)
- kmalloc(hif_align_size(sizeof(*pp) + 6 + packet_len + 8),
- KS_WLAN_MEM_FLAG);
+ pp = kmalloc(hif_align_size(sizeof(*pp) + 6 + packet_len + 8),
+ KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
dev_kfree_skb(packet);
return -2;
@@ -1194,6 +1187,8 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
DPRINTK(1, "ethernet->h_source=%02X:%02X:%02X:%02X:%02X:%02X\n",
eth->h_source[0], eth->h_source[1], eth->h_source[2],
eth->h_source[3], eth->h_source[4], eth->h_source[5]);
+ dev_kfree_skb(packet);
+ kfree(pp);
return -3;
}
@@ -1231,7 +1226,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
eth_hdr = (struct ether_hdr *)&pp->data[0];
eth_proto = ntohs(eth_hdr->h_proto);
- /* for MIC FAILUER REPORT check */
+ /* for MIC FAILURE REPORT check */
if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
&& priv->wpa.mic_failure.failure > 0) {
aa1x_hdr = (struct ieee802_1x_hdr *)(eth_hdr + 1);
@@ -1284,7 +1279,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
(void *)send_packet_complete, (void *)priv,
(void *)packet);
- /* MIC FAILUER REPORT check */
+ /* MIC FAILURE REPORT check */
if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
&& priv->wpa.mic_failure.failure > 0) {
if (keyinfo & WPA_KEY_INFO_ERROR
@@ -1313,9 +1308,8 @@ void hostif_mib_get_request(struct ks_wlan_private *priv,
DPRINTK(3, "\n");
/* make primitive */
- pp = (struct hostif_mib_get_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1344,9 +1338,8 @@ void hostif_mib_set_request(struct ks_wlan_private *priv,
}
/* make primitive */
- pp = (struct hostif_mib_set_request_t *)
- kmalloc(hif_align_size(sizeof(*pp) + size), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp) + size), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1374,9 +1367,8 @@ void hostif_start_request(struct ks_wlan_private *priv, unsigned char mode)
DPRINTK(3, "\n");
/* make primitive */
- pp = (struct hostif_start_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1402,9 +1394,8 @@ void hostif_ps_adhoc_set_request(struct ks_wlan_private *priv)
DPRINTK(3, "\n");
/* make primitive */
- pp = (struct hostif_ps_adhoc_set_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1443,12 +1434,11 @@ void hostif_infrastructure_set_request(struct ks_wlan_private *priv)
struct hostif_infrastructure_set_request_t *pp;
uint16_t capability;
- DPRINTK(3, "ssid.size=%d \n", priv->reg.ssid.size);
+ DPRINTK(3, "ssid.size=%d\n", priv->reg.ssid.size);
/* make primitive */
- pp = (struct hostif_infrastructure_set_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1505,17 +1495,16 @@ void hostif_infrastructure_set_request(struct ks_wlan_private *priv)
ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
}
-void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
+static void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
{
struct hostif_infrastructure_set2_request_t *pp;
uint16_t capability;
- DPRINTK(2, "ssid.size=%d \n", priv->reg.ssid.size);
+ DPRINTK(2, "ssid.size=%d\n", priv->reg.ssid.size);
/* make primitive */
- pp = (struct hostif_infrastructure_set2_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1583,9 +1572,8 @@ void hostif_adhoc_set_request(struct ks_wlan_private *priv)
DPRINTK(3, "\n");
/* make primitive */
- pp = (struct hostif_adhoc_set_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1629,9 +1617,8 @@ void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
DPRINTK(3, "\n");
/* make primitive */
- pp = (struct hostif_adhoc_set2_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1677,9 +1664,8 @@ void hostif_stop_request(struct ks_wlan_private *priv)
DPRINTK(3, "\n");
/* make primitive */
- pp = (struct hostif_stop_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1700,9 +1686,8 @@ void hostif_phy_information_request(struct ks_wlan_private *priv)
DPRINTK(3, "\n");
/* make primitive */
- pp = (struct hostif_phy_information_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1732,9 +1717,8 @@ void hostif_power_mngmt_request(struct ks_wlan_private *priv,
DPRINTK(3, "mode=%lu wake_up=%lu receiveDTIMs=%lu\n", mode, wake_up,
receiveDTIMs);
/* make primitive */
- pp = (struct hostif_power_mngmt_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1755,13 +1739,12 @@ void hostif_sleep_request(struct ks_wlan_private *priv, unsigned long mode)
{
struct hostif_sleep_request_t *pp;
- DPRINTK(3, "mode=%lu \n", mode);
+ DPRINTK(3, "mode=%lu\n", mode);
if (mode == SLP_SLEEP) {
/* make primitive */
- pp = (struct hostif_sleep_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1779,23 +1762,22 @@ void hostif_sleep_request(struct ks_wlan_private *priv, unsigned long mode)
queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
&priv->ks_wlan_hw.rw_wq, 1);
} else {
- DPRINTK(3, "invalid mode %ld \n", mode);
+ DPRINTK(3, "invalid mode %ld\n", mode);
return;
}
}
static
void hostif_bss_scan_request(struct ks_wlan_private *priv,
- unsigned long scan_type, uint8_t * scan_ssid,
+ unsigned long scan_type, uint8_t *scan_ssid,
uint8_t scan_ssid_len)
{
struct hostif_bss_scan_request_t *pp;
DPRINTK(2, "\n");
/* make primitive */
- pp = (struct hostif_bss_scan_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1850,9 +1832,8 @@ void hostif_mic_failure_request(struct ks_wlan_private *priv,
DPRINTK(3, "count=%d :: timer=%d\n", failure_count, timer);
/* make primitive */
- pp = (struct hostif_mic_failure_request_t *)
- kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (pp == NULL) {
+ pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (!pp) {
DPRINTK(3, "allocate memory failed..\n");
return;
}
@@ -1867,7 +1848,7 @@ void hostif_mic_failure_request(struct ks_wlan_private *priv,
ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
}
-/* Device I/O Recieve indicate */
+/* Device I/O Receive indicate */
static void devio_rec_ind(struct ks_wlan_private *priv, unsigned char *p,
unsigned int size)
{
@@ -2700,7 +2681,6 @@ void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event)
int hostif_init(struct ks_wlan_private *priv)
{
- int rc = 0;
int i;
DPRINTK(3, "\n");
@@ -2750,7 +2730,7 @@ int hostif_init(struct ks_wlan_private *priv)
tasklet_init(&priv->sme_task, hostif_sme_task, (unsigned long)priv);
- return rc;
+ return 0;
}
void hostif_exit(struct ks_wlan_private *priv)
diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
index dc806b5b47be..743f31ead56e 100644
--- a/drivers/staging/ks7010/ks_hostif.h
+++ b/drivers/staging/ks7010/ks_hostif.h
@@ -11,6 +11,9 @@
#ifndef _KS_HOSTIF_H_
#define _KS_HOSTIF_H_
+
+#include <linux/compiler.h>
+
/*
* HOST-MAC I/F events
*/
@@ -61,7 +64,7 @@
struct hostif_hdr {
uint16_t size;
uint16_t event;
-} __attribute__ ((packed));
+} __packed;
struct hostif_data_request_t {
struct hostif_hdr header;
@@ -70,7 +73,7 @@ struct hostif_data_request_t {
#define TYPE_AUTH 0x0001
uint16_t reserved;
uint8_t data[0];
-} __attribute__ ((packed));
+} __packed;
struct hostif_data_indication_t {
struct hostif_hdr header;
@@ -81,14 +84,14 @@ struct hostif_data_indication_t {
#define TYPE_GMK2 0x0003
uint16_t reserved;
uint8_t data[0];
-} __attribute__ ((packed));
+} __packed;
#define CHANNEL_LIST_MAX_SIZE 14
struct channel_list_t {
uint8_t size;
uint8_t body[CHANNEL_LIST_MAX_SIZE];
uint8_t pad;
-} __attribute__ ((packed));
+} __packed;
/* MIB Attribute */
#define DOT11_MAC_ADDRESS 0x21010100 /* MAC Address (R) */
@@ -141,7 +144,7 @@ struct channel_list_t {
struct hostif_mib_get_request_t {
struct hostif_hdr header;
uint32_t mib_attribute;
-} __attribute__ ((packed));
+} __packed;
struct hostif_mib_value_t {
uint16_t size;
@@ -152,7 +155,7 @@ struct hostif_mib_value_t {
#define MIB_VALUE_TYPE_COUNT32 3
#define MIB_VALUE_TYPE_OSTRING 4
uint8_t body[0];
-} __attribute__ ((packed));
+} __packed;
struct hostif_mib_get_confirm_t {
struct hostif_hdr header;
@@ -163,19 +166,19 @@ struct hostif_mib_get_confirm_t {
#define MIB_WRITE_ONLY 3
uint32_t mib_attribute;
struct hostif_mib_value_t mib_value;
-} __attribute__ ((packed));
+} __packed;
struct hostif_mib_set_request_t {
struct hostif_hdr header;
uint32_t mib_attribute;
struct hostif_mib_value_t mib_value;
-} __attribute__ ((packed));
+} __packed;
struct hostif_mib_set_confirm_t {
struct hostif_hdr header;
uint32_t mib_status;
uint32_t mib_attribute;
-} __attribute__ ((packed));
+} __packed;
struct hostif_power_mngmt_request_t {
struct hostif_hdr header;
@@ -188,7 +191,7 @@ struct hostif_power_mngmt_request_t {
uint32_t receiveDTIMs;
#define DTIM_FALSE 0
#define DTIM_TRUE 1
-} __attribute__ ((packed));
+} __packed;
/* power management mode */
enum {
@@ -206,7 +209,7 @@ enum {
struct hostif_power_mngmt_confirm_t {
struct hostif_hdr header;
uint16_t result_code;
-} __attribute__ ((packed));
+} __packed;
struct hostif_start_request_t {
struct hostif_hdr header;
@@ -215,64 +218,64 @@ struct hostif_start_request_t {
#define MODE_INFRASTRUCTURE 1
#define MODE_AP 2 /* not used */
#define MODE_ADHOC 3
-} __attribute__ ((packed));
+} __packed;
struct hostif_start_confirm_t {
struct hostif_hdr header;
uint16_t result_code;
-} __attribute__ ((packed));
+} __packed;
#define SSID_MAX_SIZE 32
struct ssid_t {
uint8_t size;
uint8_t body[SSID_MAX_SIZE];
uint8_t ssid_pad;
-} __attribute__ ((packed));
+} __packed;
#define RATE_SET_MAX_SIZE 16
struct rate_set8_t {
uint8_t size;
uint8_t body[8];
uint8_t rate_pad;
-} __attribute__ ((packed));
+} __packed;
struct FhParms_t {
uint16_t dwellTime;
uint8_t hopSet;
uint8_t hopPattern;
uint8_t hopIndex;
-} __attribute__ ((packed));
+} __packed;
struct DsParms_t {
uint8_t channel;
-} __attribute__ ((packed));
+} __packed;
struct CfParms_t {
uint8_t count;
uint8_t period;
uint16_t maxDuration;
uint16_t durRemaining;
-} __attribute__ ((packed));
+} __packed;
struct IbssParms_t {
uint16_t atimWindow;
-} __attribute__ ((packed));
+} __packed;
struct rsn_t {
uint8_t size;
#define RSN_BODY_SIZE 64
uint8_t body[RSN_BODY_SIZE];
-} __attribute__ ((packed));
+} __packed;
struct ErpParams_t {
uint8_t erp_info;
-} __attribute__ ((packed));
+} __packed;
struct rate_set16_t {
uint8_t size;
uint8_t body[16];
uint8_t rate_pad;
-} __attribute__ ((packed));
+} __packed;
struct ap_info_t {
uint8_t bssid[6]; /* +00 */
@@ -299,7 +302,7 @@ struct ap_info_t {
uint16_t body_size; /* +16 */
uint8_t body[1024]; /* +18 */
/* +1032 */
-} __attribute__ ((packed));
+} __packed;
struct link_ap_info_t {
uint8_t bssid[6]; /* +00 */
@@ -325,8 +328,8 @@ struct link_ap_info_t {
struct {
uint8_t size; /* +52 */
uint8_t body[128]; /* +53 */
- } __attribute__ ((packed)) rsn;
-} __attribute__ ((packed));
+ } __packed rsn;
+} __packed;
struct hostif_connect_indication_t {
struct hostif_hdr header;
@@ -334,16 +337,16 @@ struct hostif_connect_indication_t {
#define RESULT_CONNECT 0
#define RESULT_DISCONNECT 1
struct link_ap_info_t link_ap_info;
-} __attribute__ ((packed));
+} __packed;
struct hostif_stop_request_t {
struct hostif_hdr header;
-} __attribute__ ((packed));
+} __packed;
struct hostif_stop_confirm_t {
struct hostif_hdr header;
uint16_t result_code;
-} __attribute__ ((packed));
+} __packed;
struct hostif_ps_adhoc_set_request_t {
struct hostif_hdr header;
@@ -360,12 +363,12 @@ struct hostif_ps_adhoc_set_request_t {
uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
* bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
uint16_t scan_type;
-} __attribute__ ((packed));
+} __packed;
struct hostif_ps_adhoc_set_confirm_t {
struct hostif_hdr header;
uint16_t result_code;
-} __attribute__ ((packed));
+} __packed;
struct hostif_infrastructure_set_request_t {
struct hostif_hdr header;
@@ -381,7 +384,7 @@ struct hostif_infrastructure_set_request_t {
#define AUTH_TYPE_SHARED_KEY 1
struct channel_list_t channel_list;
uint16_t scan_type;
-} __attribute__ ((packed));
+} __packed;
struct hostif_infrastructure_set2_request_t {
struct hostif_hdr header;
@@ -398,12 +401,12 @@ struct hostif_infrastructure_set2_request_t {
struct channel_list_t channel_list;
uint16_t scan_type;
uint8_t bssid[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
struct hostif_infrastructure_set_confirm_t {
struct hostif_hdr header;
uint16_t result_code;
-} __attribute__ ((packed));
+} __packed;
struct hostif_adhoc_set_request_t {
struct hostif_hdr header;
@@ -415,7 +418,7 @@ struct hostif_adhoc_set_request_t {
uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
* bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
uint16_t scan_type;
-} __attribute__ ((packed));
+} __packed;
struct hostif_adhoc_set2_request_t {
struct hostif_hdr header;
@@ -429,17 +432,17 @@ struct hostif_adhoc_set2_request_t {
uint16_t scan_type;
struct channel_list_t channel_list;
uint8_t bssid[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
struct hostif_adhoc_set_confirm_t {
struct hostif_hdr header;
uint16_t result_code;
-} __attribute__ ((packed));
+} __packed;
struct last_associate_t {
uint8_t type;
uint8_t status;
-} __attribute__ ((packed));
+} __packed;
struct association_request_t {
uint8_t type;
@@ -450,7 +453,7 @@ struct association_request_t {
uint16_t listen_interval;
uint8_t ap_address[6];
uint16_t reqIEs_size;
-} __attribute__ ((packed));
+} __packed;
struct association_response_t {
uint8_t type;
@@ -461,7 +464,7 @@ struct association_response_t {
uint16_t status;
uint16_t association_id;
uint16_t respIEs_size;
-} __attribute__ ((packed));
+} __packed;
struct hostif_associate_indication_t {
struct hostif_hdr header;
@@ -469,7 +472,7 @@ struct hostif_associate_indication_t {
struct association_response_t assoc_resp;
/* followed by (reqIEs_size + respIEs_size) octets of data */
/* reqIEs data *//* respIEs data */
-} __attribute__ ((packed));
+} __packed;
struct hostif_bss_scan_request_t {
struct hostif_hdr header;
@@ -481,13 +484,13 @@ struct hostif_bss_scan_request_t {
uint32_t ch_time_max;
struct channel_list_t channel_list;
struct ssid_t ssid;
-} __attribute__ ((packed));
+} __packed;
struct hostif_bss_scan_confirm_t {
struct hostif_hdr header;
uint16_t result_code;
uint16_t reserved;
-} __attribute__ ((packed));
+} __packed;
struct hostif_phy_information_request_t {
struct hostif_hdr header;
@@ -495,7 +498,7 @@ struct hostif_phy_information_request_t {
#define NORMAL_TYPE 0
#define TIME_TYPE 1
uint16_t time; /* unit 100ms */
-} __attribute__ ((packed));
+} __packed;
struct hostif_phy_information_confirm_t {
struct hostif_hdr header;
@@ -507,30 +510,30 @@ struct hostif_phy_information_confirm_t {
uint32_t rx_frame;
uint32_t tx_error;
uint32_t rx_error;
-} __attribute__ ((packed));
+} __packed;
/* sleep mode */
#define SLP_ACTIVE 0
#define SLP_SLEEP 1
struct hostif_sleep_request_t {
struct hostif_hdr header;
-} __attribute__ ((packed));
+} __packed;
struct hostif_sleep_confirm_t {
struct hostif_hdr header;
uint16_t result_code;
-} __attribute__ ((packed));
+} __packed;
struct hostif_mic_failure_request_t {
struct hostif_hdr header;
uint16_t failure_count;
uint16_t timer;
-} __attribute__ ((packed));
+} __packed;
struct hostif_mic_failure_confirm_t {
struct hostif_hdr header;
uint16_t result_code;
-} __attribute__ ((packed));
+} __packed;
#define BASIC_RATE 0x80
#define RATE_MASK 0x7F
@@ -616,13 +619,21 @@ enum {
#include "ks_wlan.h"
/* function prototype */
-extern int hostif_data_request(struct ks_wlan_private *priv,
- struct sk_buff *packet);
-extern void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
- unsigned int size);
-extern void hostif_sme_enqueue(struct ks_wlan_private *priv, uint16_t event);
-extern int hostif_init(struct ks_wlan_private *priv);
-extern void hostif_exit(struct ks_wlan_private *priv);
+int hostif_data_request(struct ks_wlan_private *priv,
+ struct sk_buff *packet);
+void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
+ unsigned int size);
+void hostif_sme_enqueue(struct ks_wlan_private *priv, uint16_t event);
+int hostif_init(struct ks_wlan_private *priv);
+void hostif_exit(struct ks_wlan_private *priv);
+int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p,
+ unsigned long size,
+ void (*complete_handler) (void *arg1, void *arg2),
+ void *arg1, void *arg2);
+void send_packet_complete(void *, void *);
+
+void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv);
+int ks_wlan_hw_power_save(struct ks_wlan_private *priv);
static
inline int hif_align_size(int size)
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
index f05dc0122fcb..c2cc288ae899 100644
--- a/drivers/staging/ks7010/ks_wlan.h
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -219,7 +219,7 @@ struct rsn_ie_t {
uint8_t id; /* 0xdd = WPA or 0x30 = RSN */
uint8_t size; /* max ? 255 ? */
uint8_t body[RSN_IE_BODY_MAX];
-} __attribute__ ((packed));
+} __packed;
#ifdef WPS
#define WPS_IE_BODY_MAX 255
@@ -227,7 +227,7 @@ struct wps_ie_t {
uint8_t id; /* 221 'dd <len> 00 50 F2 04' */
uint8_t size; /* max ? 255 ? */
uint8_t body[WPS_IE_BODY_MAX];
-} __attribute__ ((packed));
+} __packed;
#endif /* WPS */
struct local_ap_t {
@@ -499,7 +499,7 @@ struct ks_wlan_private {
uint wakeup_count; /* for detect wakeup loop */
};
-extern int ks_wlan_net_start(struct net_device *dev);
-extern int ks_wlan_net_stop(struct net_device *dev);
+int ks_wlan_net_start(struct net_device *dev);
+int ks_wlan_net_stop(struct net_device *dev);
#endif /* _KS_WLAN_H */
diff --git a/drivers/staging/ks7010/ks_wlan_ioctl.h b/drivers/staging/ks7010/ks_wlan_ioctl.h
index 49369e497808..84554b6bb239 100644
--- a/drivers/staging/ks7010/ks_wlan_ioctl.h
+++ b/drivers/staging/ks7010/ks_wlan_ioctl.h
@@ -58,9 +58,9 @@
#include "ks_wlan.h"
#include <linux/netdevice.h>
-extern int ks_wlan_read_config_file(struct ks_wlan_private *priv);
-extern int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
- unsigned int commit_flag);
+int ks_wlan_read_config_file(struct ks_wlan_private *priv);
+int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
+ unsigned int commit_flag);
#endif /* __KERNEL__ */
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 1e21eb1c4667..b2b4fa4c3834 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -9,7 +9,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
@@ -70,10 +69,6 @@ static const struct iw_handler_def ks_wlan_handler_def;
/*
* function prototypes
*/
-extern int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p,
- unsigned long size,
- void (*complete_handler) (void *arg1, void *arg2),
- void *arg1, void *arg2);
static int ks_wlan_open(struct net_device *dev);
static void ks_wlan_tx_timeout(struct net_device *dev);
static int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -238,9 +233,9 @@ static int ks_wlan_set_freq(struct net_device *dev,
/* We should do a better check than that,
* based on the card capability !!! */
if ((channel < 1) || (channel > 14)) {
- printk(KERN_DEBUG
- "%s: New channel value of %d is invalid!\n",
- dev->name, fwrq->m);
+ netdev_dbg(dev,
+ "%s: New channel value of %d is invalid!\n",
+ dev->name, fwrq->m);
rc = -EINVAL;
} else {
/* Yes ! We can set it !!! */
@@ -402,7 +397,7 @@ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
priv->need_commit |= SME_MODE_SET;
}
} else {
- memset(priv->reg.bssid, 0x0, ETH_ALEN);
+ eth_zero_addr(priv->reg.bssid);
return -EOPNOTSUPP;
}
@@ -433,7 +428,7 @@ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
memcpy(awrq->sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN);
} else {
- memset(awrq->sa_data, 0, ETH_ALEN);
+ eth_zero_addr(awrq->sa_data);
}
awrq->sa_family = ARPHRD_ETHER;
@@ -2092,7 +2087,7 @@ static int ks_wlan_set_pmksa(struct net_device *dev,
list_for_each(ptr, &priv->pmklist.head) {
pmk = list_entry(ptr, struct pmk_t, list);
if (!memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN)) { /* match address! list del. */
- memset(pmk->bssid, 0, ETH_ALEN);
+ eth_zero_addr(pmk->bssid);
memset(pmk->pmkid, 0, IW_PMKID_LEN);
list_del_init(&pmk->list);
break;
@@ -2676,17 +2671,17 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev,
if (*uwrq == SLP_SLEEP) {
priv->sleep_mode = *uwrq;
- printk("SET_SLEEP_MODE %d\n", priv->sleep_mode);
+ netdev_info(dev, "SET_SLEEP_MODE %d\n", priv->sleep_mode);
hostif_sme_enqueue(priv, SME_STOP_REQUEST);
hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
} else if (*uwrq == SLP_ACTIVE) {
priv->sleep_mode = *uwrq;
- printk("SET_SLEEP_MODE %d\n", priv->sleep_mode);
+ netdev_info(dev, "SET_SLEEP_MODE %d\n", priv->sleep_mode);
hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
} else {
- printk("SET_SLEEP_MODE %d errror\n", *uwrq);
+ netdev_err(dev, "SET_SLEEP_MODE %d errror\n", *uwrq);
return -EINVAL;
}
@@ -2788,7 +2783,7 @@ static int ks_wlan_get_wps_enable(struct net_device *dev,
}
/* for SLEEP MODE */
*uwrq = priv->wps.wps_enabled;
- printk("return=%d\n", *uwrq);
+ netdev_info(dev, "return=%d\n", *uwrq);
return 0;
}
@@ -2978,117 +2973,117 @@ static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
return 0;
}
-static void print_hif_event(int event)
+static void print_hif_event(struct net_device *dev, int event)
{
switch (event) {
case HIF_DATA_REQ:
- printk("HIF_DATA_REQ\n");
+ netdev_info(dev, "HIF_DATA_REQ\n");
break;
case HIF_DATA_IND:
- printk("HIF_DATA_IND\n");
+ netdev_info(dev, "HIF_DATA_IND\n");
break;
case HIF_MIB_GET_REQ:
- printk("HIF_MIB_GET_REQ\n");
+ netdev_info(dev, "HIF_MIB_GET_REQ\n");
break;
case HIF_MIB_GET_CONF:
- printk("HIF_MIB_GET_CONF\n");
+ netdev_info(dev, "HIF_MIB_GET_CONF\n");
break;
case HIF_MIB_SET_REQ:
- printk("HIF_MIB_SET_REQ\n");
+ netdev_info(dev, "HIF_MIB_SET_REQ\n");
break;
case HIF_MIB_SET_CONF:
- printk("HIF_MIB_SET_CONF\n");
+ netdev_info(dev, "HIF_MIB_SET_CONF\n");
break;
case HIF_POWERMGT_REQ:
- printk("HIF_POWERMGT_REQ\n");
+ netdev_info(dev, "HIF_POWERMGT_REQ\n");
break;
case HIF_POWERMGT_CONF:
- printk("HIF_POWERMGT_CONF\n");
+ netdev_info(dev, "HIF_POWERMGT_CONF\n");
break;
case HIF_START_REQ:
- printk("HIF_START_REQ\n");
+ netdev_info(dev, "HIF_START_REQ\n");
break;
case HIF_START_CONF:
- printk("HIF_START_CONF\n");
+ netdev_info(dev, "HIF_START_CONF\n");
break;
case HIF_CONNECT_IND:
- printk("HIF_CONNECT_IND\n");
+ netdev_info(dev, "HIF_CONNECT_IND\n");
break;
case HIF_STOP_REQ:
- printk("HIF_STOP_REQ\n");
+ netdev_info(dev, "HIF_STOP_REQ\n");
break;
case HIF_STOP_CONF:
- printk("HIF_STOP_CONF\n");
+ netdev_info(dev, "HIF_STOP_CONF\n");
break;
case HIF_PS_ADH_SET_REQ:
- printk("HIF_PS_ADH_SET_REQ\n");
+ netdev_info(dev, "HIF_PS_ADH_SET_REQ\n");
break;
case HIF_PS_ADH_SET_CONF:
- printk("HIF_PS_ADH_SET_CONF\n");
+ netdev_info(dev, "HIF_PS_ADH_SET_CONF\n");
break;
case HIF_INFRA_SET_REQ:
- printk("HIF_INFRA_SET_REQ\n");
+ netdev_info(dev, "HIF_INFRA_SET_REQ\n");
break;
case HIF_INFRA_SET_CONF:
- printk("HIF_INFRA_SET_CONF\n");
+ netdev_info(dev, "HIF_INFRA_SET_CONF\n");
break;
case HIF_ADH_SET_REQ:
- printk("HIF_ADH_SET_REQ\n");
+ netdev_info(dev, "HIF_ADH_SET_REQ\n");
break;
case HIF_ADH_SET_CONF:
- printk("HIF_ADH_SET_CONF\n");
+ netdev_info(dev, "HIF_ADH_SET_CONF\n");
break;
case HIF_AP_SET_REQ:
- printk("HIF_AP_SET_REQ\n");
+ netdev_info(dev, "HIF_AP_SET_REQ\n");
break;
case HIF_AP_SET_CONF:
- printk("HIF_AP_SET_CONF\n");
+ netdev_info(dev, "HIF_AP_SET_CONF\n");
break;
case HIF_ASSOC_INFO_IND:
- printk("HIF_ASSOC_INFO_IND\n");
+ netdev_info(dev, "HIF_ASSOC_INFO_IND\n");
break;
case HIF_MIC_FAILURE_REQ:
- printk("HIF_MIC_FAILURE_REQ\n");
+ netdev_info(dev, "HIF_MIC_FAILURE_REQ\n");
break;
case HIF_MIC_FAILURE_CONF:
- printk("HIF_MIC_FAILURE_CONF\n");
+ netdev_info(dev, "HIF_MIC_FAILURE_CONF\n");
break;
case HIF_SCAN_REQ:
- printk("HIF_SCAN_REQ\n");
+ netdev_info(dev, "HIF_SCAN_REQ\n");
break;
case HIF_SCAN_CONF:
- printk("HIF_SCAN_CONF\n");
+ netdev_info(dev, "HIF_SCAN_CONF\n");
break;
case HIF_PHY_INFO_REQ:
- printk("HIF_PHY_INFO_REQ\n");
+ netdev_info(dev, "HIF_PHY_INFO_REQ\n");
break;
case HIF_PHY_INFO_CONF:
- printk("HIF_PHY_INFO_CONF\n");
+ netdev_info(dev, "HIF_PHY_INFO_CONF\n");
break;
case HIF_SLEEP_REQ:
- printk("HIF_SLEEP_REQ\n");
+ netdev_info(dev, "HIF_SLEEP_REQ\n");
break;
case HIF_SLEEP_CONF:
- printk("HIF_SLEEP_CONF\n");
+ netdev_info(dev, "HIF_SLEEP_CONF\n");
break;
case HIF_PHY_INFO_IND:
- printk("HIF_PHY_INFO_IND\n");
+ netdev_info(dev, "HIF_PHY_INFO_IND\n");
break;
case HIF_SCAN_IND:
- printk("HIF_SCAN_IND\n");
+ netdev_info(dev, "HIF_SCAN_IND\n");
break;
case HIF_INFRA_SET2_REQ:
- printk("HIF_INFRA_SET2_REQ\n");
+ netdev_info(dev, "HIF_INFRA_SET2_REQ\n");
break;
case HIF_INFRA_SET2_CONF:
- printk("HIF_INFRA_SET2_CONF\n");
+ netdev_info(dev, "HIF_INFRA_SET2_CONF\n");
break;
case HIF_ADH_SET2_REQ:
- printk("HIF_ADH_SET2_REQ\n");
+ netdev_info(dev, "HIF_ADH_SET2_REQ\n");
break;
case HIF_ADH_SET2_CONF:
- printk("HIF_ADH_SET2_CONF\n");
+ netdev_info(dev, "HIF_ADH_SET2_CONF\n");
}
}
@@ -3105,7 +3100,7 @@ static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
event =
priv->hostt.buff[(priv->hostt.qtail - 1 - i) %
SME_EVENT_BUFF_SIZE];
- print_hif_event(event);
+ print_hif_event(dev, event);
}
return 0;
}
@@ -3335,7 +3330,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
priv->mac_address_valid = 0;
hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
- printk(KERN_INFO
+ netdev_info(dev,
"ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
@@ -3354,8 +3349,6 @@ void ks_wlan_tx_timeout(struct net_device *dev)
}
priv->nstats.tx_errors++;
netif_wake_queue(dev);
-
- return;
}
static
@@ -3366,8 +3359,8 @@ int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
DPRINTK(3, "in_interrupt()=%ld\n", in_interrupt());
- if (skb == NULL) {
- printk(KERN_ERR "ks_wlan: skb == NULL!!!\n");
+ if (!skb) {
+ netdev_err(dev, "ks_wlan: skb == NULL!!!\n");
return 0;
}
if (priv->dev_state < DEVICE_STATE_READY) {
@@ -3396,13 +3389,13 @@ void send_packet_complete(void *arg1, void *arg2)
DPRINTK(3, "\n");
- priv->nstats.tx_bytes += packet->len;
priv->nstats.tx_packets++;
if (netif_queue_stopped(priv->net_dev))
netif_wake_queue(priv->net_dev);
if (packet) {
+ priv->nstats.tx_bytes += packet->len;
dev_kfree_skb(packet);
packet = NULL;
}
@@ -3421,8 +3414,6 @@ void ks_wlan_set_multicast_list(struct net_device *dev)
return; /* not finished initialize */
}
hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
-
- return;
}
static
@@ -3433,7 +3424,7 @@ int ks_wlan_open(struct net_device *dev)
priv->cur_rx = 0;
if (!priv->mac_address_valid) {
- printk(KERN_ERR "ks_wlan : %s Not READY !!\n", dev->name);
+ netdev_err(dev, "ks_wlan : %s Not READY !!\n", dev->name);
return -EBUSY;
} else
netif_start_queue(dev);
@@ -3512,17 +3503,11 @@ int ks_wlan_net_stop(struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
- int ret = 0;
priv->device_open_status = 0;
del_timer_sync(&update_phyinfo_timer);
if (netif_running(dev))
netif_stop_queue(dev);
- return ret;
-}
-
-int ks_wlan_reset(struct net_device *dev)
-{
return 0;
}
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c
index e14c109b3cab..78ae2b8fb7f3 100644
--- a/drivers/staging/ks7010/michael_mic.c
+++ b/drivers/staging/ks7010/michael_mic.c
@@ -20,18 +20,24 @@
#define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24)
// Convert from UInt32 to Byte[] in a portable way
-#define putUInt32( A, B, C ) A[B+0] = (uint8_t) (C & 0xff); \
- A[B+1] = (uint8_t) ((C>>8) & 0xff); \
- A[B+2] = (uint8_t) ((C>>16) & 0xff); \
- A[B+3] = (uint8_t) ((C>>24) & 0xff)
+#define putUInt32(A, B, C) \
+do { \
+ A[B + 0] = (uint8_t)(C & 0xff); \
+ A[B + 1] = (uint8_t)((C >> 8) & 0xff); \
+ A[B + 2] = (uint8_t)((C >> 16) & 0xff); \
+ A[B + 3] = (uint8_t)((C >> 24) & 0xff); \
+} while (0)
// Reset the state to the empty message.
-#define MichaelClear( A ) A->L = A->K0; \
- A->R = A->K1; \
- A->nBytesInM = 0;
+#define MichaelClear(A) \
+do { \
+ A->L = A->K0; \
+ A->R = A->K1; \
+ A->nBytesInM = 0; \
+} while (0)
static
-void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t * key)
+void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t *key)
{
// Set the key
Mic->K0 = getUInt32(key, 0);
@@ -54,7 +60,7 @@ do{ \
}while(0)
static
-void MichaelAppend(struct michel_mic_t *Mic, uint8_t * src, int nBytes)
+void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes)
{
int addlen;
if (Mic->nBytesInM) {
@@ -88,7 +94,7 @@ void MichaelAppend(struct michel_mic_t *Mic, uint8_t * src, int nBytes)
}
static
-void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t * dst)
+void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst)
{
uint8_t *data = Mic->M;
switch (Mic->nBytesInM) {
@@ -116,9 +122,9 @@ void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t * dst)
MichaelClear(Mic);
}
-void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t * Key,
- uint8_t * Data, int Len, uint8_t priority,
- uint8_t * Result)
+void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key,
+ uint8_t *Data, int Len, uint8_t priority,
+ uint8_t *Result)
{
uint8_t pad_data[4] = { priority, 0, 0, 0 };
// Compute the MIC value
diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h
index c7e4eb280961..efaa21788fc7 100644
--- a/drivers/staging/ks7010/michael_mic.h
+++ b/drivers/staging/ks7010/michael_mic.h
@@ -20,7 +20,6 @@ struct michel_mic_t {
uint8_t Result[8];
};
-extern
-void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t * Key,
- uint8_t * Data, int Len, uint8_t priority,
- uint8_t * Result);
+void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key,
+ uint8_t *Data, int Len, uint8_t priority,
+ uint8_t *Result);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 3f6447c65042..3b92d38d37e2 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -138,8 +138,8 @@ struct lnet_debugfs_symlink_def {
void lustre_insert_debugfs(struct ctl_table *table,
const struct lnet_debugfs_symlink_def *symlinks);
int lprocfs_call_handler(void *data, int write, loff_t *ppos,
- void __user *buffer, size_t *lenp,
- int (*handler)(void *data, int write,
- loff_t pos, void __user *buffer, int len));
+ void __user *buffer, size_t *lenp,
+ int (*handler)(void *data, int write, loff_t pos,
+ void __user *buffer, int len));
#endif /* _LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 25adab19fd86..b7bd6e8ab33f 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -247,19 +247,19 @@ do { \
#define LCONSOLE_EMERG(format, ...) CDEBUG(D_CONSOLE | D_EMERG, format, ## __VA_ARGS__)
int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
- const char *format1, ...)
+ const char *format1, ...)
__printf(2, 3);
int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
- const char *format1,
- va_list args, const char *format2, ...)
+ const char *format1,
+ va_list args, const char *format2, ...)
__printf(4, 5);
/* other external symbols that tracefile provides: */
int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
- const char __user *usr_buffer, int usr_buffer_nob);
+ const char __user *usr_buffer, int usr_buffer_nob);
int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
- const char *knl_buffer, char *append);
+ const char *knl_buffer, char *append);
#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log"
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index d3f9a6020ee3..bdbbe934584c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -143,6 +143,9 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
#define CFS_FAIL_TIMEOUT_ORSET(id, value, secs) \
cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_ORSET)
+#define CFS_FAIL_TIMEOUT_RESET(id, value, secs) \
+ cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_RESET)
+
#define CFS_FAIL_TIMEOUT_MS_ORSET(id, value, ms) \
cfs_fail_timeout_set(id, value, ms, CFS_FAIL_LOC_ORSET)
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 4daa3823f60a..e0e1a5d0949d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -310,13 +310,13 @@ do { \
#define MKSTR(ptr) ((ptr)) ? (ptr) : ""
-static inline int cfs_size_round4(int val)
+static inline size_t cfs_size_round4(int val)
{
return (val + 3) & (~0x3);
}
#ifndef HAVE_CFS_SIZE_ROUND
-static inline int cfs_size_round(int val)
+static inline size_t cfs_size_round(int val)
{
return (val + 7) & (~0x7);
}
@@ -324,17 +324,17 @@ static inline int cfs_size_round(int val)
#define HAVE_CFS_SIZE_ROUND
#endif
-static inline int cfs_size_round16(int val)
+static inline size_t cfs_size_round16(int val)
{
return (val + 0xf) & (~0xf);
}
-static inline int cfs_size_round32(int val)
+static inline size_t cfs_size_round32(int val)
{
return (val + 0x1f) & (~0x1f);
}
-static inline int cfs_size_round0(int val)
+static inline size_t cfs_size_round0(int val)
{
if (!val)
return 0;
@@ -343,7 +343,7 @@ static inline int cfs_size_round0(int val)
static inline size_t cfs_round_strlen(char *fset)
{
- return (size_t)cfs_size_round((int)strlen(fset) + 1);
+ return cfs_size_round((int)strlen(fset) + 1);
}
#define LOGL(var, len, ptr) \
@@ -360,13 +360,4 @@ do { \
ptr += cfs_size_round(len); \
} while (0)
-#define LOGL0(var, len, ptr) \
-do { \
- if (!len) \
- break; \
- memcpy((char *)ptr, (const char *)var, len); \
- *((char *)(ptr) + len) = 0; \
- ptr += cfs_size_round(len + 1); \
-} while (0)
-
#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index 513a8225f888..a59c5e99cbd3 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -605,73 +605,20 @@ void lnet_counters_reset(void);
unsigned int lnet_iov_nob(unsigned int niov, struct kvec *iov);
int lnet_extract_iov(int dst_niov, struct kvec *dst,
- int src_niov, struct kvec *src,
+ int src_niov, const struct kvec *src,
unsigned int offset, unsigned int len);
unsigned int lnet_kiov_nob(unsigned int niov, lnet_kiov_t *iov);
int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
- int src_niov, lnet_kiov_t *src,
+ int src_niov, const lnet_kiov_t *src,
unsigned int offset, unsigned int len);
-void lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov,
- unsigned int doffset,
- unsigned int nsiov, struct kvec *siov,
+void lnet_copy_iov2iter(struct iov_iter *to,
+ unsigned int nsiov, const struct kvec *siov,
unsigned int soffset, unsigned int nob);
-void lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov,
- unsigned int iovoffset,
- unsigned int nkiov, lnet_kiov_t *kiov,
+void lnet_copy_kiov2iter(struct iov_iter *to,
+ unsigned int nkiov, const lnet_kiov_t *kiov,
unsigned int kiovoffset, unsigned int nob);
-void lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int kiovoffset,
- unsigned int niov, struct kvec *iov,
- unsigned int iovoffset, unsigned int nob);
-void lnet_copy_kiov2kiov(unsigned int ndkiov, lnet_kiov_t *dkiov,
- unsigned int doffset,
- unsigned int nskiov, lnet_kiov_t *skiov,
- unsigned int soffset, unsigned int nob);
-
-static inline void
-lnet_copy_iov2flat(int dlen, void *dest, unsigned int doffset,
- unsigned int nsiov, struct kvec *siov, unsigned int soffset,
- unsigned int nob)
-{
- struct kvec diov = {/*.iov_base = */ dest, /*.iov_len = */ dlen};
-
- lnet_copy_iov2iov(1, &diov, doffset,
- nsiov, siov, soffset, nob);
-}
-
-static inline void
-lnet_copy_kiov2flat(int dlen, void *dest, unsigned int doffset,
- unsigned int nsiov, lnet_kiov_t *skiov,
- unsigned int soffset, unsigned int nob)
-{
- struct kvec diov = {/* .iov_base = */ dest, /* .iov_len = */ dlen};
-
- lnet_copy_kiov2iov(1, &diov, doffset,
- nsiov, skiov, soffset, nob);
-}
-
-static inline void
-lnet_copy_flat2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
- int slen, void *src, unsigned int soffset, unsigned int nob)
-{
- struct kvec siov = {/*.iov_base = */ src, /*.iov_len = */slen};
-
- lnet_copy_iov2iov(ndiov, diov, doffset,
- 1, &siov, soffset, nob);
-}
-
-static inline void
-lnet_copy_flat2kiov(unsigned int ndiov, lnet_kiov_t *dkiov,
- unsigned int doffset, int slen, void *src,
- unsigned int soffset, unsigned int nob)
-{
- struct kvec siov = {/* .iov_base = */ src, /* .iov_len = */ slen};
-
- lnet_copy_iov2kiov(ndiov, dkiov, doffset,
- 1, &siov, soffset, nob);
-}
void lnet_me_unlink(lnet_me_t *me);
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 7967b013cbae..b84a5bb9186c 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -220,10 +220,7 @@ typedef struct lnet_lnd {
* credit if the LND does flow control.
*/
int (*lnd_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
- int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen,
- unsigned int rlen);
+ int delayed, struct iov_iter *to, unsigned int rlen);
/*
* lnet_parse() has had to delay processing of this message
@@ -278,6 +275,8 @@ typedef struct lnet_ni {
struct lnet_ioctl_config_lnd_tunables *ni_lnd_tunables;
/* equivalent interfaces to use */
char *ni_interfaces[LNET_MAX_INTERFACES];
+ /* original net namespace */
+ struct net *ni_net_ns;
} lnet_ni_t;
#define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index e098b6c086e1..f8be0e2f7bf7 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -503,21 +503,7 @@ typedef struct {
/* NB lustre portals uses struct iovec internally! */
typedef struct iovec lnet_md_iovec_t;
-/**
- * A page-based fragment of a MD.
- */
-typedef struct {
- /** Pointer to the page where the fragment resides */
- struct page *kiov_page;
- /** Length in bytes of the fragment */
- unsigned int kiov_len;
- /**
- * Starting offset of the fragment within the page. Note that the
- * end of the fragment must not pass the end of the page; i.e.,
- * kiov_len + kiov_offset <= PAGE_SIZE.
- */
- unsigned int kiov_offset;
-} lnet_kiov_t;
+typedef struct bio_vec lnet_kiov_t;
/** @} lnet_md */
/** \addtogroup lnet_eq
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig
index 2b5930150cda..13b43278a38d 100644
--- a/drivers/staging/lustre/lnet/Kconfig
+++ b/drivers/staging/lustre/lnet/Kconfig
@@ -35,6 +35,7 @@ config LNET_SELFTEST
config LNET_XPRT_IB
tristate "LNET infiniband support"
depends on LNET && INFINIBAND && INFINIBAND_ADDR_TRANS
+ depends on BROKEN
default LNET && INFINIBAND
help
This option allows the LNET users to use infiniband as an
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 4f5978b3767b..9e8802181452 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -128,6 +128,7 @@ static int kiblnd_msgtype2size(int type)
static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
{
struct kib_rdma_desc *rd;
+ int msg_size;
int nob;
int n;
int i;
@@ -146,12 +147,6 @@ static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
n = rd->rd_nfrags;
- if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
- CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
- n, IBLND_MAX_RDMA_FRAGS);
- return 1;
- }
-
nob = offsetof(struct kib_msg, ibm_u) +
kiblnd_rd_msg_size(rd, msg->ibm_type, n);
@@ -161,6 +156,13 @@ static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
return 1;
}
+ msg_size = kiblnd_rd_size(rd);
+ if (msg_size <= 0 || msg_size > LNET_MAX_PAYLOAD) {
+ CERROR("Bad msg_size: %d, should be 0 < n <= %d\n",
+ msg_size, LNET_MAX_PAYLOAD);
+ return 1;
+ }
+
if (!flip)
return 0;
@@ -618,7 +620,7 @@ static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
}
struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid,
- int state, int version)
+ int state, int version)
{
/*
* CAVEAT EMPTOR:
@@ -2465,7 +2467,7 @@ int kiblnd_dev_failover(struct kib_dev *dev)
hdev->ibh_cmid = cmid;
hdev->ibh_ibdev = cmid->device;
- pd = ib_alloc_pd(cmid->device);
+ pd = ib_alloc_pd(cmid->device, 0);
if (IS_ERR(pd)) {
rc = PTR_ERR(pd);
CERROR("Can't allocate PD: %d\n", rc);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index 078a0c3e8845..14576977200f 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -113,8 +113,9 @@ extern struct kib_tunables kiblnd_tunables;
#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
-#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
-#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
+#define IBLND_FRAG_SHIFT (PAGE_SHIFT - 12) /* frag size on wire is in 4K units */
+#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
+#define IBLND_MAX_RDMA_FRAGS (LNET_MAX_PAYLOAD >> 12)/* max # of fragments supported in 4K size */
/************************/
/* derived constants... */
@@ -133,8 +134,8 @@ extern struct kib_tunables kiblnd_tunables;
/* WRs and CQEs (per connection) */
#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
#define IBLND_SEND_WRS(c) \
- ((c->ibc_max_frags + 1) * kiblnd_concurrent_sends(c->ibc_version, \
- c->ibc_peer->ibp_ni))
+ (((c->ibc_max_frags + 1) << IBLND_FRAG_SHIFT) * \
+ kiblnd_concurrent_sends(c->ibc_version, c->ibc_peer->ibp_ni))
#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
struct kib_hca_dev;
@@ -582,6 +583,8 @@ struct kib_peer {
unsigned short ibp_connecting;
/* reconnect this peer later */
unsigned short ibp_reconnecting:1;
+ /* counter of how many times we triggered a conn race */
+ unsigned char ibp_races;
/* # consecutive reconnection attempts to this peer */
unsigned int ibp_reconnected;
/* errno on closing this peer */
@@ -607,14 +610,14 @@ kiblnd_cfg_rdma_frags(struct lnet_ni *ni)
tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
mod = tunables->lnd_map_on_demand;
- return mod ? mod : IBLND_MAX_RDMA_FRAGS;
+ return mod ? mod : IBLND_MAX_RDMA_FRAGS >> IBLND_FRAG_SHIFT;
}
static inline int
kiblnd_rdma_frags(int version, struct lnet_ni *ni)
{
return version == IBLND_MSG_VERSION_1 ?
- IBLND_MAX_RDMA_FRAGS :
+ (IBLND_MAX_RDMA_FRAGS >> IBLND_FRAG_SHIFT) :
kiblnd_cfg_rdma_frags(ni);
}
@@ -1034,5 +1037,4 @@ int kiblnd_post_rx(struct kib_rx *rx, int credit);
int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen);
+ struct iov_iter *to, unsigned int rlen);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 596a697b9d39..b27de8888149 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -36,16 +36,19 @@
#include "o2iblnd.h"
+#define MAX_CONN_RACES_BEFORE_ABORT 20
+
static void kiblnd_peer_alive(struct kib_peer *peer);
static void kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error);
-static void kiblnd_check_sends(struct kib_conn *conn);
static void kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx,
- int type, int body_nob);
+ int type, int body_nob);
static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
- int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie);
+ int resid, struct kib_rdma_desc *dstrd,
+ __u64 dstcookie);
static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx);
+static void kiblnd_check_sends_locked(struct kib_conn *conn);
static void
kiblnd_tx_done(lnet_ni_t *ni, struct kib_tx *tx)
@@ -211,9 +214,9 @@ kiblnd_post_rx(struct kib_rx *rx, int credit)
conn->ibc_outstanding_credits++;
else
conn->ibc_reserved_credits++;
+ kiblnd_check_sends_locked(conn);
spin_unlock(&conn->ibc_lock);
- kiblnd_check_sends(conn);
out:
kiblnd_conn_decref(conn);
return rc;
@@ -344,8 +347,8 @@ kiblnd_handle_rx(struct kib_rx *rx)
!IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
conn->ibc_outstanding_credits++;
+ kiblnd_check_sends_locked(conn);
spin_unlock(&conn->ibc_lock);
- kiblnd_check_sends(conn);
}
switch (msg->ibm_type) {
@@ -648,7 +651,7 @@ static int kiblnd_map_tx(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc
static int
kiblnd_setup_rd_iov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
- unsigned int niov, struct kvec *iov, int offset, int nob)
+ unsigned int niov, const struct kvec *iov, int offset, int nob)
{
struct kib_net *net = ni->ni_data;
struct page *page;
@@ -705,7 +708,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
static int
kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
- int nkiov, lnet_kiov_t *kiov, int offset, int nob)
+ int nkiov, const lnet_kiov_t *kiov, int offset, int nob)
{
struct kib_net *net = ni->ni_data;
struct scatterlist *sg;
@@ -717,8 +720,8 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
LASSERT(nkiov > 0);
LASSERT(net);
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
nkiov--;
kiov++;
LASSERT(nkiov > 0);
@@ -728,10 +731,10 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
do {
LASSERT(nkiov > 0);
- fragnob = min((int)(kiov->kiov_len - offset), nob);
+ fragnob = min((int)(kiov->bv_len - offset), nob);
- sg_set_page(sg, kiov->kiov_page, fragnob,
- kiov->kiov_offset + offset);
+ sg_set_page(sg, kiov->bv_page, fragnob,
+ kiov->bv_offset + offset);
sg = sg_next(sg);
if (!sg) {
CERROR("lacking enough sg entries to map tx\n");
@@ -761,7 +764,6 @@ kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
LASSERT(tx->tx_queued);
/* We rely on this for QP sizing */
LASSERT(tx->tx_nwrq > 0);
- LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
LASSERT(!credit || credit == 1);
LASSERT(conn->ibc_outstanding_credits >= 0);
@@ -800,7 +802,7 @@ kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
/*
* OK to drop when posted enough NOOPs, since
- * kiblnd_check_sends will queue NOOP again when
+ * kiblnd_check_sends_locked will queue NOOP again when
* posted NOOPs complete
*/
spin_unlock(&conn->ibc_lock);
@@ -905,7 +907,7 @@ kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
}
static void
-kiblnd_check_sends(struct kib_conn *conn)
+kiblnd_check_sends_locked(struct kib_conn *conn)
{
int ver = conn->ibc_version;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
@@ -918,8 +920,6 @@ kiblnd_check_sends(struct kib_conn *conn)
return;
}
- spin_lock(&conn->ibc_lock);
-
LASSERT(conn->ibc_nsends_posted <= kiblnd_concurrent_sends(ver, ni));
LASSERT(!IBLND_OOB_CAPABLE(ver) ||
conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
@@ -969,8 +969,6 @@ kiblnd_check_sends(struct kib_conn *conn)
if (kiblnd_post_tx_locked(conn, tx, credit))
break;
}
-
- spin_unlock(&conn->ibc_lock);
}
static void
@@ -1016,16 +1014,11 @@ kiblnd_tx_complete(struct kib_tx *tx, int status)
if (idle)
list_del(&tx->tx_list);
- kiblnd_conn_addref(conn); /* 1 ref for me.... */
-
+ kiblnd_check_sends_locked(conn);
spin_unlock(&conn->ibc_lock);
if (idle)
kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
-
- kiblnd_check_sends(conn);
-
- kiblnd_conn_decref(conn); /* ...until here */
}
static void
@@ -1078,6 +1071,15 @@ kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
LASSERT(type == IBLND_MSG_GET_DONE ||
type == IBLND_MSG_PUT_DONE);
+ if (kiblnd_rd_size(srcrd) > conn->ibc_max_frags << PAGE_SHIFT) {
+ CERROR("RDMA is too large for peer %s (%d), src size: %d dst size: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ conn->ibc_max_frags << PAGE_SHIFT,
+ kiblnd_rd_size(srcrd), kiblnd_rd_size(dstrd));
+ rc = -EMSGSIZE;
+ goto too_big;
+ }
+
while (resid > 0) {
if (srcidx >= srcrd->rd_nfrags) {
CERROR("Src buffer exhausted: %d frags\n", srcidx);
@@ -1091,10 +1093,10 @@ kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
break;
}
- if (tx->tx_nwrq >= conn->ibc_max_frags) {
+ if (tx->tx_nwrq >= IBLND_MAX_RDMA_FRAGS) {
CERROR("RDMA has too many fragments for peer %s (%d), src idx/frags: %d/%d dst idx/frags: %d/%d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
- conn->ibc_max_frags,
+ IBLND_MAX_RDMA_FRAGS,
srcidx, srcrd->rd_nfrags,
dstidx, dstrd->rd_nfrags);
rc = -EMSGSIZE;
@@ -1132,7 +1134,7 @@ kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
wrq++;
sge++;
}
-
+too_big:
if (rc < 0) /* no RDMA if completing with failure */
tx->tx_nwrq = 0;
@@ -1204,9 +1206,8 @@ kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
{
spin_lock(&conn->ibc_lock);
kiblnd_queue_tx_locked(tx, conn);
+ kiblnd_check_sends_locked(conn);
spin_unlock(&conn->ibc_lock);
-
- kiblnd_check_sends(conn);
}
static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
@@ -1499,6 +1500,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
+ struct iov_iter from;
struct kib_msg *ibmsg;
struct kib_rdma_desc *rd;
struct kib_tx *tx;
@@ -1518,6 +1520,17 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
/* payload is either all vaddrs or all pages */
LASSERT(!(payload_kiov && payload_iov));
+ if (payload_kiov)
+ iov_iter_bvec(&from, ITER_BVEC | WRITE,
+ payload_kiov, payload_niov,
+ payload_nob + payload_offset);
+ else
+ iov_iter_kvec(&from, ITER_KVEC | WRITE,
+ payload_iov, payload_niov,
+ payload_nob + payload_offset);
+
+ iov_iter_advance(&from, payload_offset);
+
switch (type) {
default:
LBUG();
@@ -1637,17 +1650,8 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
ibmsg = tx->tx_msg;
ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
- if (payload_kiov)
- lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
- payload_niov, payload_kiov,
- payload_offset, payload_nob);
- else
- lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
- payload_niov, payload_iov,
- payload_offset, payload_nob);
-
+ copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, IBLND_MSG_SIZE,
+ &from);
nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
@@ -1719,8 +1723,7 @@ kiblnd_reply(lnet_ni_t *ni, struct kib_rx *rx, lnet_msg_t *lntmsg)
int
kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
+ struct iov_iter *to, unsigned int rlen)
{
struct kib_rx *rx = private;
struct kib_msg *rxmsg = rx->rx_msg;
@@ -1730,10 +1733,9 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
int post_credit = IBLND_POSTRX_PEER_CREDIT;
int rc = 0;
- LASSERT(mlen <= rlen);
+ LASSERT(iov_iter_count(to) <= rlen);
LASSERT(!in_interrupt());
/* Either all pages or all vaddrs */
- LASSERT(!(kiov && iov));
switch (rxmsg->ibm_type) {
default:
@@ -1749,16 +1751,8 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
break;
}
- if (kiov)
- lnet_copy_flat2kiov(niov, kiov, offset,
- IBLND_MSG_SIZE, rxmsg,
- offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
- mlen);
- else
- lnet_copy_flat2iov(niov, iov, offset,
- IBLND_MSG_SIZE, rxmsg,
- offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
- mlen);
+ copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload,
+ IBLND_MSG_SIZE, to);
lnet_finalize(ni, lntmsg, 0);
break;
@@ -1766,7 +1760,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
struct kib_msg *txmsg;
struct kib_rdma_desc *rd;
- if (!mlen) {
+ if (!iov_iter_count(to)) {
lnet_finalize(ni, lntmsg, 0);
kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
rxmsg->ibm_u.putreq.ibprm_cookie);
@@ -1784,12 +1778,16 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
txmsg = tx->tx_msg;
rd = &txmsg->ibm_u.putack.ibpam_rd;
- if (!kiov)
+ if (!(to->type & ITER_BVEC))
rc = kiblnd_setup_rd_iov(ni, tx, rd,
- niov, iov, offset, mlen);
+ to->nr_segs, to->kvec,
+ to->iov_offset,
+ iov_iter_count(to));
else
rc = kiblnd_setup_rd_kiov(ni, tx, rd,
- niov, kiov, offset, mlen);
+ to->nr_segs, to->bvec,
+ to->iov_offset,
+ iov_iter_count(to));
if (rc) {
CERROR("Can't setup PUT sink for %s: %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
@@ -2183,14 +2181,11 @@ kiblnd_connreq_done(struct kib_conn *conn, int status)
return;
}
- /**
- * refcount taken by cmid is not reliable after I released the glock
- * because this connection is visible to other threads now, another
- * thread can find and close this connection right after I released
- * the glock, if kiblnd_cm_callback for RDMA_CM_EVENT_DISCONNECTED is
- * called, it can release the connection refcount taken by cmid.
- * It means the connection could be destroyed before I finish my
- * operations on it.
+ /*
+ * +1 ref for myself, this connection is visible to other threads
+ * now, refcount of peer:ibp_conns can be released by connection
+ * close from either a different thread, or the calling of
+ * kiblnd_check_sends_locked() below. See bz21911 for details.
*/
kiblnd_conn_addref(conn);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
@@ -2202,10 +2197,9 @@ kiblnd_connreq_done(struct kib_conn *conn, int status)
kiblnd_queue_tx_locked(tx, conn);
}
+ kiblnd_check_sends_locked(conn);
spin_unlock(&conn->ibc_lock);
- kiblnd_check_sends(conn);
-
/* schedule blocked rxs */
kiblnd_handle_early_rxs(conn);
@@ -2240,6 +2234,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
struct kib_rej rej;
int version = IBLND_MSG_VERSION;
unsigned long flags;
+ int max_frags;
int rc;
struct sockaddr_in *peer_addr;
@@ -2346,22 +2341,20 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
- if (reqmsg->ibm_u.connparams.ibcp_max_frags >
- kiblnd_rdma_frags(version, ni)) {
- CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
- libcfs_nid2str(nid), version,
- reqmsg->ibm_u.connparams.ibcp_max_frags,
+ max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT;
+ if (max_frags > kiblnd_rdma_frags(version, ni)) {
+ CWARN("Can't accept conn from %s (version %x): max message size %d is too large (%d wanted)\n",
+ libcfs_nid2str(nid), version, max_frags,
kiblnd_rdma_frags(version, ni));
if (version >= IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
goto failed;
- } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
- kiblnd_rdma_frags(version, ni) && !net->ibn_fmr_ps) {
- CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
- libcfs_nid2str(nid), version,
- reqmsg->ibm_u.connparams.ibcp_max_frags,
+ } else if (max_frags < kiblnd_rdma_frags(version, ni) &&
+ !net->ibn_fmr_ps) {
+ CWARN("Can't accept conn from %s (version %x): max message size %d incompatible without FMR pool (%d wanted)\n",
+ libcfs_nid2str(nid), version, max_frags,
kiblnd_rdma_frags(version, ni));
if (version == IBLND_MSG_VERSION)
@@ -2387,7 +2380,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
/* We have validated the peer's parameters so use those */
- peer->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
+ peer->ibp_max_frags = max_frags;
peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
write_lock_irqsave(g_lock, flags);
@@ -2419,23 +2412,37 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
- /* tie-break connection race in favour of the higher NID */
+ /*
+ * Tie-break connection race in favour of the higher NID.
+ * If we keep running into a race condition multiple times,
+ * we have to assume that the connection attempt with the
+ * higher NID is stuck in a connecting state and will never
+ * recover. As such, we pass through this if-block and let
+ * the lower NID connection win so we can move forward.
+ */
if (peer2->ibp_connecting &&
- nid < ni->ni_nid) {
+ nid < ni->ni_nid && peer2->ibp_races <
+ MAX_CONN_RACES_BEFORE_ABORT) {
+ peer2->ibp_races++;
write_unlock_irqrestore(g_lock, flags);
- CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
+ CDEBUG(D_NET, "Conn race %s\n",
+ libcfs_nid2str(peer2->ibp_nid));
kiblnd_peer_decref(peer);
rej.ibr_why = IBLND_REJECT_CONN_RACE;
goto failed;
}
-
+ if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT)
+ CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n",
+ libcfs_nid2str(peer2->ibp_nid),
+ MAX_CONN_RACES_BEFORE_ABORT);
/**
* passive connection is allowed even this peer is waiting for
* reconnection.
*/
peer2->ibp_reconnecting = 0;
+ peer2->ibp_races = 0;
peer2->ibp_accepting++;
kiblnd_peer_addref(peer2);
@@ -2494,7 +2501,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
sizeof(ackmsg->ibm_u.connparams));
ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
- ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags;
+ ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags << IBLND_FRAG_SHIFT;
ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
@@ -2526,9 +2533,9 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
failed:
if (ni) {
- lnet_ni_decref(ni);
rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
+ lnet_ni_decref(ni);
}
rej.ibr_version = version;
@@ -2556,7 +2563,7 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version,
if (cp) {
msg_size = cp->ibcp_max_msg_size;
- frag_num = cp->ibcp_max_frags;
+ frag_num = cp->ibcp_max_frags << IBLND_FRAG_SHIFT;
queue_dep = cp->ibcp_queue_depth;
}
@@ -2821,11 +2828,11 @@ kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
goto failed;
}
- if (msg->ibm_u.connparams.ibcp_max_frags >
+ if ((msg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT) >
conn->ibc_max_frags) {
CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
libcfs_nid2str(peer->ibp_nid),
- msg->ibm_u.connparams.ibcp_max_frags,
+ msg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT,
conn->ibc_max_frags);
rc = -EPROTO;
goto failed;
@@ -2859,7 +2866,7 @@ kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
conn->ibc_credits = msg->ibm_u.connparams.ibcp_queue_depth;
conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
conn->ibc_queue_depth = msg->ibm_u.connparams.ibcp_queue_depth;
- conn->ibc_max_frags = msg->ibm_u.connparams.ibcp_max_frags;
+ conn->ibc_max_frags = msg->ibm_u.connparams.ibcp_max_frags >> IBLND_FRAG_SHIFT;
LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
@@ -2916,7 +2923,7 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
memset(msg, 0, sizeof(*msg));
kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
msg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
- msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags;
+ msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags << IBLND_FRAG_SHIFT;
msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
kiblnd_pack_msg(peer->ibp_ni, msg, version,
@@ -3233,7 +3240,11 @@ kiblnd_check_conns(int idx)
*/
list_for_each_entry_safe(conn, temp, &checksends, ibc_connd_list) {
list_del(&conn->ibc_connd_list);
- kiblnd_check_sends(conn);
+
+ spin_lock(&conn->ibc_lock);
+ kiblnd_check_sends_locked(conn);
+ spin_unlock(&conn->ibc_lock);
+
kiblnd_conn_decref(conn);
}
}
@@ -3419,6 +3430,12 @@ kiblnd_qp_event(struct ib_event *event, void *arg)
case IB_EVENT_COMM_EST:
CDEBUG(D_NET, "%s established\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ /*
+ * We received a packet but connection isn't established
+ * probably handshake packet was lost, so free to
+ * force make connection established
+ */
+ rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
return;
default:
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 07ec540946cd..cbc9a9c5385f 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -1468,11 +1468,6 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
conn->ksnc_route = NULL;
-#if 0 /* irrelevant with only eager routes */
- /* make route least favourite */
- list_del(&route->ksnr_list);
- list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
-#endif
ksocknal_route_decref(route); /* drop conn's ref on route */
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index a56632b4ee37..e6ca0cf52691 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -86,8 +86,6 @@ struct ksock_sched { /* per scheduler state */
int kss_nconns; /* # connections assigned to
* this scheduler */
struct ksock_sched_info *kss_info; /* owner of it */
- struct page *kss_rx_scratch_pgs[LNET_MAX_IOV];
- struct kvec kss_scratch_iov[LNET_MAX_IOV];
};
struct ksock_sched_info {
@@ -616,9 +614,7 @@ void ksocknal_shutdown(lnet_ni_t *ni);
int ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
int ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
- int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen);
+ int delayed, struct iov_iter *to, unsigned int rlen);
int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
@@ -635,7 +631,7 @@ int ksocknal_close_peer_conns_locked(struct ksock_peer *peer,
int ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why);
int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
struct ksock_conn *ksocknal_find_conn_locked(struct ksock_peer *peer,
- struct ksock_tx *tx, int nonblk);
+ struct ksock_tx *tx, int nonblk);
int ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx,
lnet_process_id_t id);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 303576d815c6..c1c6f604e6ad 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -35,8 +35,8 @@ ksocknal_alloc_tx(int type, int size)
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
- next, struct ksock_tx, tx_list);
+ tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
+ struct ksock_tx, tx_list);
LASSERT(tx->tx_desc_size == size);
list_del(&tx->tx_list);
}
@@ -164,13 +164,13 @@ ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
do {
LASSERT(tx->tx_nkiov > 0);
- if (nob < (int)kiov->kiov_len) {
- kiov->kiov_offset += nob;
- kiov->kiov_len -= nob;
+ if (nob < (int)kiov->bv_len) {
+ kiov->bv_offset += nob;
+ kiov->bv_len -= nob;
return rc;
}
- nob -= (int)kiov->kiov_len;
+ nob -= (int)kiov->bv_len;
tx->tx_kiov = ++kiov;
tx->tx_nkiov--;
} while (nob);
@@ -326,13 +326,13 @@ ksocknal_recv_kiov(struct ksock_conn *conn)
do {
LASSERT(conn->ksnc_rx_nkiov > 0);
- if (nob < (int)kiov->kiov_len) {
- kiov->kiov_offset += nob;
- kiov->kiov_len -= nob;
+ if (nob < (int)kiov->bv_len) {
+ kiov->bv_offset += nob;
+ kiov->bv_len -= nob;
return -EAGAIN;
}
- nob -= kiov->kiov_len;
+ nob -= kiov->bv_len;
conn->ksnc_rx_kiov = ++kiov;
conn->ksnc_rx_nkiov--;
} while (nob);
@@ -1325,39 +1325,36 @@ ksocknal_process_receive(struct ksock_conn *conn)
int
ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
+ struct iov_iter *to, unsigned int rlen)
{
struct ksock_conn *conn = private;
struct ksock_sched *sched = conn->ksnc_scheduler;
- LASSERT(mlen <= rlen);
- LASSERT(niov <= LNET_MAX_IOV);
+ LASSERT(iov_iter_count(to) <= rlen);
+ LASSERT(to->nr_segs <= LNET_MAX_IOV);
conn->ksnc_cookie = msg;
- conn->ksnc_rx_nob_wanted = mlen;
+ conn->ksnc_rx_nob_wanted = iov_iter_count(to);
conn->ksnc_rx_nob_left = rlen;
- if (!mlen || iov) {
+ if (to->type & ITER_KVEC) {
conn->ksnc_rx_nkiov = 0;
conn->ksnc_rx_kiov = NULL;
conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
conn->ksnc_rx_niov =
lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
- niov, iov, offset, mlen);
+ to->nr_segs, to->kvec,
+ to->iov_offset, iov_iter_count(to));
} else {
conn->ksnc_rx_niov = 0;
conn->ksnc_rx_iov = NULL;
conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
conn->ksnc_rx_nkiov =
lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
- niov, kiov, offset, mlen);
+ to->nr_segs, to->bvec,
+ to->iov_offset, iov_iter_count(to));
}
- LASSERT(mlen ==
- lnet_iov_nob(conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
- lnet_kiov_nob(conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
-
LASSERT(conn->ksnc_rx_scheduled);
spin_lock_bh(&sched->kss_lock);
@@ -2008,13 +2005,6 @@ ksocknal_connect(struct ksock_route *route)
list_splice_init(&peer->ksnp_tx_queue, &zombies);
}
-#if 0 /* irrelevant with only eager routes */
- if (!route->ksnr_deleted) {
- /* make this route least-favourite for re-selection */
- list_del(&route->ksnr_list);
- list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
- }
-#endif
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_peer_failed(peer);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 6a17757fce1e..6c95e989ca12 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -73,9 +73,9 @@ ksocknal_lib_zc_capable(struct ksock_conn *conn)
int
ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
{
+ struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
struct socket *sock = conn->ksnc_sock;
- int nob;
- int rc;
+ int nob, i;
if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
@@ -83,34 +83,16 @@ ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
!tx->tx_msg.ksm_csum) /* not checksummed */
ksocknal_lib_csum_tx(tx);
- /*
- * NB we can't trust socket ops to either consume our iovs
- * or leave them alone.
- */
- {
-#if SOCKNAL_SINGLE_FRAG_TX
- struct kvec scratch;
- struct kvec *scratchiov = &scratch;
- unsigned int niov = 1;
-#else
- struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
- unsigned int niov = tx->tx_niov;
-#endif
- struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
- int i;
+ for (nob = i = 0; i < tx->tx_niov; i++)
+ nob += tx->tx_iov[i].iov_len;
- for (nob = i = 0; i < niov; i++) {
- scratchiov[i] = tx->tx_iov[i];
- nob += scratchiov[i].iov_len;
- }
+ if (!list_empty(&conn->ksnc_tx_queue) ||
+ nob < tx->tx_resid)
+ msg.msg_flags |= MSG_MORE;
- if (!list_empty(&conn->ksnc_tx_queue) ||
- nob < tx->tx_resid)
- msg.msg_flags |= MSG_MORE;
-
- rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob);
- }
- return rc;
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC,
+ tx->tx_iov, tx->tx_niov, nob);
+ return sock_sendmsg(sock, &msg);
}
int
@@ -124,20 +106,16 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
/* Not NOOP message */
LASSERT(tx->tx_lnetmsg);
- /*
- * NB we can't trust socket ops to either consume our iovs
- * or leave them alone.
- */
if (tx->tx_msg.ksm_zc_cookies[0]) {
/* Zero copy is enabled */
struct sock *sk = sock->sk;
- struct page *page = kiov->kiov_page;
- int offset = kiov->kiov_offset;
- int fragsize = kiov->kiov_len;
+ struct page *page = kiov->bv_page;
+ int offset = kiov->bv_offset;
+ int fragsize = kiov->bv_len;
int msgflg = MSG_DONTWAIT;
CDEBUG(D_NET, "page %p + offset %x for %d\n",
- page, offset, kiov->kiov_len);
+ page, offset, kiov->bv_len);
if (!list_empty(&conn->ksnc_tx_queue) ||
fragsize < tx->tx_resid)
@@ -150,34 +128,19 @@ ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
rc = tcp_sendpage(sk, page, offset, fragsize, msgflg);
}
} else {
-#if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
- struct kvec scratch;
- struct kvec *scratchiov = &scratch;
- unsigned int niov = 1;
-#else
-#ifdef CONFIG_HIGHMEM
-#warning "XXX risk of kmap deadlock on multiple frags..."
-#endif
- struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
- unsigned int niov = tx->tx_nkiov;
-#endif
struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
int i;
- for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
- kiov[i].kiov_offset;
- nob += scratchiov[i].iov_len = kiov[i].kiov_len;
- }
+ for (nob = i = 0; i < tx->tx_nkiov; i++)
+ nob += kiov[i].bv_len;
if (!list_empty(&conn->ksnc_tx_queue) ||
nob < tx->tx_resid)
msg.msg_flags |= MSG_MORE;
- rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
-
- for (i = 0; i < niov; i++)
- kunmap(kiov[i].kiov_page);
+ iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC,
+ kiov, tx->tx_nkiov, nob);
+ rc = sock_sendmsg(sock, &msg);
}
return rc;
}
@@ -201,14 +164,7 @@ ksocknal_lib_eager_ack(struct ksock_conn *conn)
int
ksocknal_lib_recv_iov(struct ksock_conn *conn)
{
-#if SOCKNAL_SINGLE_FRAG_RX
- struct kvec scratch;
- struct kvec *scratchiov = &scratch;
- unsigned int niov = 1;
-#else
- struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = conn->ksnc_rx_niov;
-#endif
struct kvec *iov = conn->ksnc_rx_iov;
struct msghdr msg = {
.msg_flags = 0
@@ -220,20 +176,15 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn)
int sum;
__u32 saved_csum;
- /*
- * NB we can't trust socket ops to either consume our iovs
- * or leave them alone.
- */
LASSERT(niov > 0);
- for (nob = i = 0; i < niov; i++) {
- scratchiov[i] = iov[i];
- nob += scratchiov[i].iov_len;
- }
+ for (nob = i = 0; i < niov; i++)
+ nob += iov[i].iov_len;
+
LASSERT(nob <= conn->ksnc_rx_nob_wanted);
- rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob,
- MSG_DONTWAIT);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, niov, nob);
+ rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT);
saved_csum = 0;
if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
@@ -259,67 +210,10 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn)
return rc;
}
-static void
-ksocknal_lib_kiov_vunmap(void *addr)
-{
- if (!addr)
- return;
-
- vunmap(addr);
-}
-
-static void *
-ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
- struct kvec *iov, struct page **pages)
-{
- void *addr;
- int nob;
- int i;
-
- if (!*ksocknal_tunables.ksnd_zc_recv || !pages)
- return NULL;
-
- LASSERT(niov <= LNET_MAX_IOV);
-
- if (niov < 2 ||
- niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
- return NULL;
-
- for (nob = i = 0; i < niov; i++) {
- if ((kiov[i].kiov_offset && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
- return NULL;
-
- pages[i] = kiov[i].kiov_page;
- nob += kiov[i].kiov_len;
- }
-
- addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
- if (!addr)
- return NULL;
-
- iov->iov_base = addr + kiov[0].kiov_offset;
- iov->iov_len = nob;
-
- return addr;
-}
-
int
ksocknal_lib_recv_kiov(struct ksock_conn *conn)
{
-#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
- struct kvec scratch;
- struct kvec *scratchiov = &scratch;
- struct page **pages = NULL;
- unsigned int niov = 1;
-#else
-#ifdef CONFIG_HIGHMEM
-#warning "XXX risk of kmap deadlock on multiple frags..."
-#endif
- struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
- struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
unsigned int niov = conn->ksnc_rx_nkiov;
-#endif
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
struct msghdr msg = {
.msg_flags = 0
@@ -328,63 +222,32 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn)
int i;
int rc;
void *base;
- void *addr;
int sum;
int fragnob;
- int n;
-
- /*
- * NB we can't trust socket ops to either consume our iovs
- * or leave them alone.
- */
- addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
- if (addr) {
- nob = scratchiov[0].iov_len;
- n = 1;
- } else {
- for (nob = i = 0; i < niov; i++) {
- nob += scratchiov[i].iov_len = kiov[i].kiov_len;
- scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
- kiov[i].kiov_offset;
- }
- n = niov;
- }
+ for (nob = i = 0; i < niov; i++)
+ nob += kiov[i].bv_len;
LASSERT(nob <= conn->ksnc_rx_nob_wanted);
- rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov,
- n, nob, MSG_DONTWAIT);
+ iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, kiov, niov, nob);
+ rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT);
if (conn->ksnc_msg.ksm_csum) {
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT(i < niov);
- /*
- * Dang! have to kmap again because I have nowhere to
- * stash the mapped address. But by doing it while the
- * page is still mapped, the kernel just bumps the map
- * count and returns me the address it stashed.
- */
- base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
- fragnob = kiov[i].kiov_len;
+ base = kmap(kiov[i].bv_page) + kiov[i].bv_offset;
+ fragnob = kiov[i].bv_len;
if (fragnob > sum)
fragnob = sum;
conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
base, fragnob);
- kunmap(kiov[i].kiov_page);
+ kunmap(kiov[i].bv_page);
}
}
-
- if (addr) {
- ksocknal_lib_kiov_vunmap(addr);
- } else {
- for (i = 0; i < niov; i++)
- kunmap(kiov[i].kiov_page);
- }
-
return rc;
}
@@ -406,12 +269,12 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx)
if (tx->tx_kiov) {
for (i = 0; i < tx->tx_nkiov; i++) {
- base = kmap(tx->tx_kiov[i].kiov_page) +
- tx->tx_kiov[i].kiov_offset;
+ base = kmap(tx->tx_kiov[i].bv_page) +
+ tx->tx_kiov[i].bv_offset;
- csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
+ csum = ksocknal_csum(csum, base, tx->tx_kiov[i].bv_len);
- kunmap(tx->tx_kiov[i].kiov_page);
+ kunmap(tx->tx_kiov[i].bv_page);
}
} else {
for (i = 1; i < tx->tx_niov; i++)
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index 42b15a769183..23b36b890964 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -328,15 +328,20 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
*/
void libcfs_debug_dumplog_internal(void *arg)
{
+ static time64_t last_dump_time;
+ time64_t current_time;
void *journal_info;
journal_info = current->journal_info;
current->journal_info = NULL;
+ current_time = ktime_get_real_seconds();
- if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) {
+ if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) &&
+ current_time > last_dump_time) {
+ last_dump_time = current_time;
snprintf(debug_file_name, sizeof(debug_file_name) - 1,
"%s.%lld.%ld", libcfs_debug_file_path_arr,
- (s64)ktime_get_real_seconds(), (long_ptr_t)arg);
+ (s64)current_time, (long_ptr_t)arg);
pr_alert("LustreError: dumping log to %s\n", debug_file_name);
cfs_tracefile_dump_all_pages(debug_file_name);
libcfs_run_debug_log_upcall(debug_file_name);
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index 9288ee08d1f7..e4b1a0a86eae 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -90,8 +90,10 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
}
}
- if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
- (value & CFS_FAIL_ONCE))
+ /* Take into account the current call for FAIL_ONCE for ORSET only,
+ * as RESET is a new fail_loc, it does not change the current call
+ */
+ if ((set == CFS_FAIL_LOC_ORSET) && (value & CFS_FAIL_ONCE))
set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
/* Lost race to set CFS_FAILED_BIT. */
if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
index fc697cdfcdaf..56a614d7713b 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
@@ -229,8 +229,6 @@ cfs_str2num_check(char *str, int nob, unsigned *num,
char *endp, cache;
int rc;
- str = cfs_trimwhite(str);
-
/**
* kstrouint can only handle strings composed
* of only numbers. We need to scan the string
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index b52518c54efe..e8b1a61420de 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -74,6 +74,17 @@ struct cfs_cpt_data {
static struct cfs_cpt_data cpt_data;
+static void
+cfs_node_to_cpumask(int node, cpumask_t *mask)
+{
+ const cpumask_t *tmp = cpumask_of_node(node);
+
+ if (tmp)
+ cpumask_copy(mask, tmp);
+ else
+ cpumask_clear(mask);
+}
+
void
cfs_cpt_table_free(struct cfs_cpt_table *cptab)
{
@@ -403,7 +414,7 @@ cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
mutex_lock(&cpt_data.cpt_mutex);
mask = cpt_data.cpt_cpumask;
- cpumask_copy(mask, cpumask_of_node(node));
+ cfs_node_to_cpumask(node, mask);
rc = cfs_cpt_set_cpumask(cptab, cpt, mask);
@@ -427,7 +438,7 @@ cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
mutex_lock(&cpt_data.cpt_mutex);
mask = cpt_data.cpt_cpumask;
- cpumask_copy(mask, cpumask_of_node(node));
+ cfs_node_to_cpumask(node, mask);
cfs_cpt_unset_cpumask(cptab, cpt, mask);
@@ -749,7 +760,7 @@ cfs_cpt_table_create(int ncpt)
}
for_each_online_node(i) {
- cpumask_copy(mask, cpumask_of_node(i));
+ cfs_node_to_cpumask(i, mask);
while (!cpumask_empty(mask)) {
struct cfs_cpu_partition *part;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 5c0116ade909..7f56d2c9dd00 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -95,8 +95,8 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
err = crypto_ahash_setkey(tfm, key, key_len);
else if ((*type)->cht_key != 0)
err = crypto_ahash_setkey(tfm,
- (unsigned char *)&((*type)->cht_key),
- (*type)->cht_size);
+ (unsigned char *)&((*type)->cht_key),
+ (*type)->cht_size);
if (err != 0) {
ahash_request_free(*req);
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 346db892f275..4daf828198c3 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1286,6 +1286,25 @@ lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf)
sizeof(*ni->ni_lnd_tunables));
}
+ /*
+ * If given some LND tunable parameters, parse those now to
+ * override the values in the NI structure.
+ */
+ if (conf) {
+ if (conf->cfg_config_u.cfg_net.net_peer_rtr_credits >= 0)
+ ni->ni_peerrtrcredits =
+ conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
+ if (conf->cfg_config_u.cfg_net.net_peer_timeout >= 0)
+ ni->ni_peertimeout =
+ conf->cfg_config_u.cfg_net.net_peer_timeout;
+ if (conf->cfg_config_u.cfg_net.net_peer_tx_credits != -1)
+ ni->ni_peertxcredits =
+ conf->cfg_config_u.cfg_net.net_peer_tx_credits;
+ if (conf->cfg_config_u.cfg_net.net_max_tx_credits >= 0)
+ ni->ni_maxtxcredits =
+ conf->cfg_config_u.cfg_net.net_max_tx_credits;
+ }
+
rc = lnd->lnd_startup(ni);
mutex_unlock(&the_lnet.ln_lnd_mutex);
@@ -1299,33 +1318,6 @@ lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf)
goto failed0;
}
- /*
- * If given some LND tunable parameters, parse those now to
- * override the values in the NI structure.
- */
- if (conf && conf->cfg_config_u.cfg_net.net_peer_rtr_credits >= 0) {
- ni->ni_peerrtrcredits =
- conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
- }
- if (conf && conf->cfg_config_u.cfg_net.net_peer_timeout >= 0) {
- ni->ni_peertimeout =
- conf->cfg_config_u.cfg_net.net_peer_timeout;
- }
- /*
- * TODO
- * Note: For now, don't allow the user to change
- * peertxcredits as this number is used in the
- * IB LND to control queue depth.
- *
- * if (conf && conf->cfg_config_u.cfg_net.net_peer_tx_credits != -1)
- * ni->ni_peertxcredits =
- * conf->cfg_config_u.cfg_net.net_peer_tx_credits;
- */
- if (conf && conf->cfg_config_u.cfg_net.net_max_tx_credits >= 0) {
- ni->ni_maxtxcredits =
- conf->cfg_config_u.cfg_net.net_max_tx_credits;
- }
-
LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query);
lnet_net_lock(LNET_LOCK_EX);
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index a72afdf68bb2..9e2183ff847e 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -31,6 +31,8 @@
*/
#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
#include "../../include/linux/lnet/lib-lnet.h"
struct lnet_text_buf { /* tmp struct for parsing routes */
@@ -110,6 +112,11 @@ lnet_ni_free(struct lnet_ni *ni)
LIBCFS_FREE(ni->ni_interfaces[i],
strlen(ni->ni_interfaces[i]) + 1);
}
+
+ /* release reference to net namespace */
+ if (ni->ni_net_ns)
+ put_net(ni->ni_net_ns);
+
LIBCFS_FREE(ni, sizeof(*ni));
}
@@ -171,6 +178,13 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
/* LND will fill in the address part of the NID */
ni->ni_nid = LNET_MKNID(net, 0);
+
+ /* Store net namespace in which current ni is being created */
+ if (current->nsproxy->net_ns)
+ ni->ni_net_ns = get_net(current->nsproxy->net_ns);
+ else
+ ni->ni_net_ns = NULL;
+
ni->ni_last_alive = ktime_get_real_seconds();
list_add_tail(&ni->ni_list, nilist);
return ni;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index 1834bf7a27ef..eab53cd57296 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -134,11 +134,11 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
for (i = 0; i < (int)niov; i++) {
/* We take the page pointer on trust */
- if (lmd->md_iov.kiov[i].kiov_offset +
- lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
+ if (lmd->md_iov.kiov[i].bv_offset +
+ lmd->md_iov.kiov[i].bv_len > PAGE_SIZE)
return -EINVAL; /* invalid length */
- total_length += lmd->md_iov.kiov[i].kiov_len;
+ total_length += lmd->md_iov.kiov[i].bv_len;
}
lmd->md_length = total_length;
@@ -292,11 +292,12 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
return -ENOMEM;
rc = lnet_md_build(md, &umd, unlink);
+ if (rc)
+ goto out_free;
+
cpt = lnet_cpt_of_cookie(meh.cookie);
lnet_res_lock(cpt);
- if (rc)
- goto failed;
me = lnet_handle2me(&meh);
if (!me)
@@ -307,7 +308,7 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
rc = lnet_md_link(md, umd.eq_handle, cpt);
if (rc)
- goto failed;
+ goto out_unlock;
/*
* attach this MD to portal of ME and check if it matches any
@@ -324,10 +325,10 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
return 0;
- failed:
- lnet_md_free(md);
-
+out_unlock:
lnet_res_unlock(cpt);
+out_free:
+ lnet_md_free(md);
return rc;
}
EXPORT_SYMBOL(LNetMDAttach);
@@ -370,24 +371,25 @@ LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
return -ENOMEM;
rc = lnet_md_build(md, &umd, unlink);
+ if (rc)
+ goto out_free;
cpt = lnet_res_lock_current();
- if (rc)
- goto failed;
rc = lnet_md_link(md, umd.eq_handle, cpt);
if (rc)
- goto failed;
+ goto out_unlock;
lnet_md2handle(handle, md);
lnet_res_unlock(cpt);
return 0;
- failed:
+out_unlock:
+ lnet_res_unlock(cpt);
+out_free:
lnet_md_free(md);
- lnet_res_unlock(cpt);
return rc;
}
EXPORT_SYMBOL(LNetMDBind);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index e6d3b801d87d..48e6f8f2392f 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -37,6 +37,8 @@
#define DEBUG_SUBSYSTEM S_LNET
#include "../../include/linux/lnet/lib-lnet.h"
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
static int local_nid_dist_zero = 1;
module_param(local_nid_dist_zero, int, 0444);
@@ -166,25 +168,17 @@ lnet_iov_nob(unsigned int niov, struct kvec *iov)
EXPORT_SYMBOL(lnet_iov_nob);
void
-lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
- unsigned int nsiov, struct kvec *siov, unsigned int soffset,
- unsigned int nob)
+lnet_copy_iov2iter(struct iov_iter *to,
+ unsigned int nsiov, const struct kvec *siov,
+ unsigned int soffset, unsigned int nob)
{
/* NB diov, siov are READ-ONLY */
- unsigned int this_nob;
+ const char *s;
+ size_t left;
if (!nob)
return;
- /* skip complete frags before 'doffset' */
- LASSERT(ndiov > 0);
- while (doffset >= diov->iov_len) {
- doffset -= diov->iov_len;
- diov++;
- ndiov--;
- LASSERT(ndiov > 0);
- }
-
/* skip complete frags before 'soffset' */
LASSERT(nsiov > 0);
while (soffset >= siov->iov_len) {
@@ -194,39 +188,68 @@ lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
LASSERT(nsiov > 0);
}
+ s = (char *)siov->iov_base + soffset;
+ left = siov->iov_len - soffset;
do {
- LASSERT(ndiov > 0);
+ size_t n, copy = left;
LASSERT(nsiov > 0);
- this_nob = min(diov->iov_len - doffset,
- siov->iov_len - soffset);
- this_nob = min(this_nob, nob);
- memcpy((char *)diov->iov_base + doffset,
- (char *)siov->iov_base + soffset, this_nob);
- nob -= this_nob;
+ if (copy > nob)
+ copy = nob;
+ n = copy_to_iter(s, copy, to);
+ if (n != copy)
+ return;
+ nob -= n;
- if (diov->iov_len > doffset + this_nob) {
- doffset += this_nob;
- } else {
- diov++;
- ndiov--;
- doffset = 0;
- }
+ siov++;
+ s = (char *)siov->iov_base;
+ left = siov->iov_len;
+ nsiov--;
+ } while (nob > 0);
+}
+EXPORT_SYMBOL(lnet_copy_iov2iter);
- if (siov->iov_len > soffset + this_nob) {
- soffset += this_nob;
- } else {
- siov++;
- nsiov--;
- soffset = 0;
- }
+void
+lnet_copy_kiov2iter(struct iov_iter *to,
+ unsigned int nsiov, const lnet_kiov_t *siov,
+ unsigned int soffset, unsigned int nob)
+{
+ if (!nob)
+ return;
+
+ LASSERT(!in_interrupt());
+
+ LASSERT(nsiov > 0);
+ while (soffset >= siov->bv_len) {
+ soffset -= siov->bv_len;
+ siov++;
+ nsiov--;
+ LASSERT(nsiov > 0);
+ }
+
+ do {
+ size_t copy = siov->bv_len - soffset, n;
+
+ LASSERT(nsiov > 0);
+
+ if (copy > nob)
+ copy = nob;
+ n = copy_page_to_iter(siov->bv_page,
+ siov->bv_offset + soffset,
+ copy, to);
+ if (n != copy)
+ return;
+ nob -= n;
+ siov++;
+ nsiov--;
+ soffset = 0;
} while (nob > 0);
}
-EXPORT_SYMBOL(lnet_copy_iov2iov);
+EXPORT_SYMBOL(lnet_copy_kiov2iter);
int
lnet_extract_iov(int dst_niov, struct kvec *dst,
- int src_niov, struct kvec *src,
+ int src_niov, const struct kvec *src,
unsigned int offset, unsigned int len)
{
/*
@@ -280,238 +303,15 @@ lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
LASSERT(!niov || kiov);
while (niov-- > 0)
- nob += (kiov++)->kiov_len;
+ nob += (kiov++)->bv_len;
return nob;
}
EXPORT_SYMBOL(lnet_kiov_nob);
-void
-lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
- unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
- unsigned int nob)
-{
- /* NB diov, siov are READ-ONLY */
- unsigned int this_nob;
- char *daddr = NULL;
- char *saddr = NULL;
-
- if (!nob)
- return;
-
- LASSERT(!in_interrupt());
-
- LASSERT(ndiov > 0);
- while (doffset >= diov->kiov_len) {
- doffset -= diov->kiov_len;
- diov++;
- ndiov--;
- LASSERT(ndiov > 0);
- }
-
- LASSERT(nsiov > 0);
- while (soffset >= siov->kiov_len) {
- soffset -= siov->kiov_len;
- siov++;
- nsiov--;
- LASSERT(nsiov > 0);
- }
-
- do {
- LASSERT(ndiov > 0);
- LASSERT(nsiov > 0);
- this_nob = min(diov->kiov_len - doffset,
- siov->kiov_len - soffset);
- this_nob = min(this_nob, nob);
-
- if (!daddr)
- daddr = ((char *)kmap(diov->kiov_page)) +
- diov->kiov_offset + doffset;
- if (!saddr)
- saddr = ((char *)kmap(siov->kiov_page)) +
- siov->kiov_offset + soffset;
-
- /*
- * Vanishing risk of kmap deadlock when mapping 2 pages.
- * However in practice at least one of the kiovs will be mapped
- * kernel pages and the map/unmap will be NOOPs
- */
- memcpy(daddr, saddr, this_nob);
- nob -= this_nob;
-
- if (diov->kiov_len > doffset + this_nob) {
- daddr += this_nob;
- doffset += this_nob;
- } else {
- kunmap(diov->kiov_page);
- daddr = NULL;
- diov++;
- ndiov--;
- doffset = 0;
- }
-
- if (siov->kiov_len > soffset + this_nob) {
- saddr += this_nob;
- soffset += this_nob;
- } else {
- kunmap(siov->kiov_page);
- saddr = NULL;
- siov++;
- nsiov--;
- soffset = 0;
- }
- } while (nob > 0);
-
- if (daddr)
- kunmap(diov->kiov_page);
- if (saddr)
- kunmap(siov->kiov_page);
-}
-EXPORT_SYMBOL(lnet_copy_kiov2kiov);
-
-void
-lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
- unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int kiovoffset, unsigned int nob)
-{
- /* NB iov, kiov are READ-ONLY */
- unsigned int this_nob;
- char *addr = NULL;
-
- if (!nob)
- return;
-
- LASSERT(!in_interrupt());
-
- LASSERT(niov > 0);
- while (iovoffset >= iov->iov_len) {
- iovoffset -= iov->iov_len;
- iov++;
- niov--;
- LASSERT(niov > 0);
- }
-
- LASSERT(nkiov > 0);
- while (kiovoffset >= kiov->kiov_len) {
- kiovoffset -= kiov->kiov_len;
- kiov++;
- nkiov--;
- LASSERT(nkiov > 0);
- }
-
- do {
- LASSERT(niov > 0);
- LASSERT(nkiov > 0);
- this_nob = min(iov->iov_len - iovoffset,
- (__kernel_size_t)kiov->kiov_len - kiovoffset);
- this_nob = min(this_nob, nob);
-
- if (!addr)
- addr = ((char *)kmap(kiov->kiov_page)) +
- kiov->kiov_offset + kiovoffset;
-
- memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
- nob -= this_nob;
-
- if (iov->iov_len > iovoffset + this_nob) {
- iovoffset += this_nob;
- } else {
- iov++;
- niov--;
- iovoffset = 0;
- }
-
- if (kiov->kiov_len > kiovoffset + this_nob) {
- addr += this_nob;
- kiovoffset += this_nob;
- } else {
- kunmap(kiov->kiov_page);
- addr = NULL;
- kiov++;
- nkiov--;
- kiovoffset = 0;
- }
-
- } while (nob > 0);
-
- if (addr)
- kunmap(kiov->kiov_page);
-}
-EXPORT_SYMBOL(lnet_copy_kiov2iov);
-
-void
-lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int kiovoffset, unsigned int niov,
- struct kvec *iov, unsigned int iovoffset,
- unsigned int nob)
-{
- /* NB kiov, iov are READ-ONLY */
- unsigned int this_nob;
- char *addr = NULL;
-
- if (!nob)
- return;
-
- LASSERT(!in_interrupt());
-
- LASSERT(nkiov > 0);
- while (kiovoffset >= kiov->kiov_len) {
- kiovoffset -= kiov->kiov_len;
- kiov++;
- nkiov--;
- LASSERT(nkiov > 0);
- }
-
- LASSERT(niov > 0);
- while (iovoffset >= iov->iov_len) {
- iovoffset -= iov->iov_len;
- iov++;
- niov--;
- LASSERT(niov > 0);
- }
-
- do {
- LASSERT(nkiov > 0);
- LASSERT(niov > 0);
- this_nob = min((__kernel_size_t)kiov->kiov_len - kiovoffset,
- iov->iov_len - iovoffset);
- this_nob = min(this_nob, nob);
-
- if (!addr)
- addr = ((char *)kmap(kiov->kiov_page)) +
- kiov->kiov_offset + kiovoffset;
-
- memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob);
- nob -= this_nob;
-
- if (kiov->kiov_len > kiovoffset + this_nob) {
- addr += this_nob;
- kiovoffset += this_nob;
- } else {
- kunmap(kiov->kiov_page);
- addr = NULL;
- kiov++;
- nkiov--;
- kiovoffset = 0;
- }
-
- if (iov->iov_len > iovoffset + this_nob) {
- iovoffset += this_nob;
- } else {
- iov++;
- niov--;
- iovoffset = 0;
- }
- } while (nob > 0);
-
- if (addr)
- kunmap(kiov->kiov_page);
-}
-EXPORT_SYMBOL(lnet_copy_iov2kiov);
-
int
lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
- int src_niov, lnet_kiov_t *src,
+ int src_niov, const lnet_kiov_t *src,
unsigned int offset, unsigned int len)
{
/*
@@ -526,8 +326,8 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
return 0; /* no frags */
LASSERT(src_niov > 0);
- while (offset >= src->kiov_len) { /* skip initial frags */
- offset -= src->kiov_len;
+ while (offset >= src->bv_len) { /* skip initial frags */
+ offset -= src->bv_len;
src_niov--;
src++;
LASSERT(src_niov > 0);
@@ -538,19 +338,19 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
LASSERT(src_niov > 0);
LASSERT((int)niov <= dst_niov);
- frag_len = src->kiov_len - offset;
- dst->kiov_page = src->kiov_page;
- dst->kiov_offset = src->kiov_offset + offset;
+ frag_len = src->bv_len - offset;
+ dst->bv_page = src->bv_page;
+ dst->bv_offset = src->bv_offset + offset;
if (len <= frag_len) {
- dst->kiov_len = len;
- LASSERT(dst->kiov_offset + dst->kiov_len
+ dst->bv_len = len;
+ LASSERT(dst->bv_offset + dst->bv_len
<= PAGE_SIZE);
return niov;
}
- dst->kiov_len = frag_len;
- LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
+ dst->bv_len = frag_len;
+ LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
len -= frag_len;
dst++;
@@ -569,6 +369,7 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int niov = 0;
struct kvec *iov = NULL;
lnet_kiov_t *kiov = NULL;
+ struct iov_iter to;
int rc;
LASSERT(!in_interrupt());
@@ -594,8 +395,14 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
}
}
- rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed,
- niov, iov, kiov, offset, mlen, rlen);
+ if (iov) {
+ iov_iter_kvec(&to, ITER_KVEC | READ, iov, niov, mlen + offset);
+ iov_iter_advance(&to, offset);
+ } else {
+ iov_iter_bvec(&to, ITER_BVEC | READ, kiov, niov, mlen + offset);
+ iov_iter_advance(&to, offset);
+ }
+ rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed, &to, rlen);
if (rc < 0)
lnet_finalize(ni, msg, rc);
}
@@ -2002,6 +1809,9 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
lnet_msgtyp2str(type), rc);
lnet_msg_free(msg);
+ if (rc == -ESHUTDOWN)
+ /* We are shutting down. Don't do anything more */
+ return 0;
goto drop;
}
@@ -2512,6 +2322,15 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
}
if (LNET_NIDNET(ni->ni_nid) == dstnet) {
+ /*
+ * Check if ni was originally created in
+ * current net namespace.
+ * If not, assign order above 0xffff0000,
+ * to make this ni not a priority.
+ */
+ if (!net_eq(ni->ni_net_ns, current->nsproxy->net_ns))
+ order += 0xffff0000;
+
if (srcnidp)
*srcnidp = ni->ni_nid;
if (orderp)
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index 910e106e221d..0897e588bd54 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -449,23 +449,7 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
if (!msg)
return;
-#if 0
- CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n",
- lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target),
- msg->msg_target_is_router ? "t" : "",
- msg->msg_routing ? "X" : "",
- msg->msg_ack ? "A" : "",
- msg->msg_sending ? "S" : "",
- msg->msg_receiving ? "R" : "",
- msg->msg_delayed ? "d" : "",
- msg->msg_txcredit ? "C" : "",
- msg->msg_peertxcredit ? "c" : "",
- msg->msg_rtrcredit ? "F" : "",
- msg->msg_peerrtrcredit ? "f" : "",
- msg->msg_onactivelist ? "!" : "",
- !msg->msg_txpeer ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
- !msg->msg_rxpeer ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
-#endif
+
msg->msg_ev.status = status;
if (msg->msg_md) {
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 891fd59401d7..4e6dd5149b4f 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -265,21 +265,17 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
unsigned long then;
struct timeval tv;
+ struct kvec iov = { .iov_base = buffer, .iov_len = nob };
+ struct msghdr msg = {NULL,};
LASSERT(nob > 0);
/*
* Caller may pass a zero timeout if she thinks the socket buffer is
* empty enough to take the whole message immediately
*/
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, nob);
for (;;) {
- struct kvec iov = {
- .iov_base = buffer,
- .iov_len = nob
- };
- struct msghdr msg = {
- .msg_flags = !timeout ? MSG_DONTWAIT : 0
- };
-
+ msg.msg_flags = !timeout ? MSG_DONTWAIT : 0;
if (timeout) {
/* Set send timeout to remaining time */
jiffies_to_timeval(jiffies_left, &tv);
@@ -296,9 +292,6 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
rc = kernel_sendmsg(sock, &msg, &iov, 1, nob);
jiffies_left -= jiffies - then;
- if (rc == nob)
- return 0;
-
if (rc < 0)
return rc;
@@ -307,11 +300,11 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
return -ECONNABORTED;
}
+ if (!msg_data_left(&msg))
+ break;
+
if (jiffies_left <= 0)
return -EAGAIN;
-
- buffer = ((char *)buffer) + rc;
- nob -= rc;
}
return 0;
}
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index 08402712a452..cb213b8f51cf 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -42,36 +42,23 @@ lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
static int
lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
- int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
+ int delayed, struct iov_iter *to, unsigned int rlen)
{
lnet_msg_t *sendmsg = private;
if (lntmsg) { /* not discarding */
- if (sendmsg->msg_iov) {
- if (iov)
- lnet_copy_iov2iov(niov, iov, offset,
- sendmsg->msg_niov,
- sendmsg->msg_iov,
- sendmsg->msg_offset, mlen);
- else
- lnet_copy_iov2kiov(niov, kiov, offset,
- sendmsg->msg_niov,
- sendmsg->msg_iov,
- sendmsg->msg_offset, mlen);
- } else {
- if (iov)
- lnet_copy_kiov2iov(niov, iov, offset,
- sendmsg->msg_niov,
- sendmsg->msg_kiov,
- sendmsg->msg_offset, mlen);
- else
- lnet_copy_kiov2kiov(niov, kiov, offset,
- sendmsg->msg_niov,
- sendmsg->msg_kiov,
- sendmsg->msg_offset, mlen);
- }
+ if (sendmsg->msg_iov)
+ lnet_copy_iov2iter(to,
+ sendmsg->msg_niov,
+ sendmsg->msg_iov,
+ sendmsg->msg_offset,
+ iov_iter_count(to));
+ else
+ lnet_copy_kiov2iter(to,
+ sendmsg->msg_niov,
+ sendmsg->msg_kiov,
+ sendmsg->msg_offset,
+ iov_iter_count(to));
lnet_finalize(ni, lntmsg, 0);
}
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 063543233035..063ad55ec950 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -1307,7 +1307,7 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
while (--npages >= 0)
- __free_page(rb->rb_kiov[npages].kiov_page);
+ __free_page(rb->rb_kiov[npages].bv_page);
LIBCFS_FREE(rb, sz);
}
@@ -1333,15 +1333,15 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
GFP_KERNEL | __GFP_ZERO, 0);
if (!page) {
while (--i >= 0)
- __free_page(rb->rb_kiov[i].kiov_page);
+ __free_page(rb->rb_kiov[i].bv_page);
LIBCFS_FREE(rb, sz);
return NULL;
}
- rb->rb_kiov[i].kiov_len = PAGE_SIZE;
- rb->rb_kiov[i].kiov_offset = 0;
- rb->rb_kiov[i].kiov_page = page;
+ rb->rb_kiov[i].bv_len = PAGE_SIZE;
+ rb->rb_kiov[i].bv_offset = 0;
+ rb->rb_kiov[i].bv_page = page;
}
return rb;
@@ -1693,7 +1693,7 @@ lnet_rtrpools_adjust(int tiny, int small, int large)
int
lnet_rtrpools_enable(void)
{
- int rc;
+ int rc = 0;
if (the_lnet.ln_routing)
return 0;
@@ -1706,9 +1706,9 @@ lnet_rtrpools_enable(void)
* if we are just configuring this for the first
* time.
*/
- return lnet_rtrpools_alloc(1);
-
- rc = lnet_rtrpools_adjust_helper(0, 0, 0);
+ rc = lnet_rtrpools_alloc(1);
+ else
+ rc = lnet_rtrpools_adjust_helper(0, 0, 0);
if (rc)
return rc;
@@ -1718,7 +1718,7 @@ lnet_rtrpools_enable(void)
the_lnet.ln_ping_info->pi_features &= ~LNET_PING_FEAT_RTE_DISABLED;
lnet_net_unlock(LNET_LOCK_EX);
- return 0;
+ return rc;
}
void
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index 13d0454e7fcb..b20c5d394e3b 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -226,7 +226,7 @@ brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
- pg = bk->bk_iovs[i].kiov_page;
+ pg = bk->bk_iovs[i].bv_page;
brw_fill_page(pg, pattern, magic);
}
}
@@ -238,7 +238,7 @@ brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
- pg = bk->bk_iovs[i].kiov_page;
+ pg = bk->bk_iovs[i].bv_page;
if (brw_check_page(pg, pattern, magic)) {
CERROR("Bulk page %p (%d/%d) is corrupted!\n",
pg, i, bk->bk_niov);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 1be3cad727ae..55afb53b0743 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -152,10 +152,10 @@ lstcon_rpc_put(struct lstcon_rpc *crpc)
LASSERT(list_empty(&crpc->crp_link));
for (i = 0; i < bulk->bk_niov; i++) {
- if (!bulk->bk_iovs[i].kiov_page)
+ if (!bulk->bk_iovs[i].bv_page)
continue;
- __free_page(bulk->bk_iovs[i].kiov_page);
+ __free_page(bulk->bk_iovs[i].bv_page);
}
srpc_client_rpc_decref(crpc->crp_rpc);
@@ -705,7 +705,7 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
LASSERT(i < nkiov);
- pid = (lnet_process_id_packed_t *)page_address(kiov[i].kiov_page);
+ pid = (lnet_process_id_packed_t *)page_address(kiov[i].bv_page);
return &pid[idx % SFW_ID_PER_PAGE];
}
@@ -849,12 +849,11 @@ lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
min_t(int, nob, PAGE_SIZE);
nob -= len;
- bulk->bk_iovs[i].kiov_offset = 0;
- bulk->bk_iovs[i].kiov_len = len;
- bulk->bk_iovs[i].kiov_page =
- alloc_page(GFP_KERNEL);
+ bulk->bk_iovs[i].bv_offset = 0;
+ bulk->bk_iovs[i].bv_len = len;
+ bulk->bk_iovs[i].bv_page = alloc_page(GFP_KERNEL);
- if (!bulk->bk_iovs[i].kiov_page) {
+ if (!bulk->bk_iovs[i].bv_page) {
lstcon_rpc_put(*crpc);
return -ENOMEM;
}
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 4c33621f06da..a0fcbf3bcc95 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -1993,8 +1993,6 @@ static void lstcon_init_acceptor_service(void)
lstcon_acceptor_service.sv_wi_total = SFW_FRWK_WI_MAX;
}
-extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
-
static DECLARE_IOCTL_HANDLER(lstcon_ioctl_handler, lstcon_ioctl_entry);
/* initialize console */
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 78b147732615..78388a611c22 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -184,6 +184,7 @@ lstcon_id2hash(lnet_process_id_t id, struct list_head *hash)
return &hash[idx];
}
+int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
int lstcon_console_init(void);
int lstcon_console_fini(void);
int lstcon_session_match(lst_sid_t sid);
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index c2f121f44d33..abbd6287b4bd 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -784,8 +784,8 @@ sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
lnet_process_id_packed_t id;
int j;
- dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
- LASSERT(dests); /* my pages are within KVM always */
+ dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].bv_page);
+ LASSERT(dests); /* my pages are within KVM always */
id = dests[i % SFW_ID_PER_PAGE];
if (msg->msg_magic != SRPC_MSG_MAGIC)
sfw_unpack_id(id);
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 3b26d6eb4240..f5619d8744ef 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -91,9 +91,9 @@ srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob)
LASSERT(nob > 0);
LASSERT(i >= 0 && i < bk->bk_niov);
- bk->bk_iovs[i].kiov_offset = 0;
- bk->bk_iovs[i].kiov_page = pg;
- bk->bk_iovs[i].kiov_len = nob;
+ bk->bk_iovs[i].bv_offset = 0;
+ bk->bk_iovs[i].bv_page = pg;
+ bk->bk_iovs[i].bv_len = nob;
return nob;
}
@@ -106,7 +106,7 @@ srpc_free_bulk(struct srpc_bulk *bk)
LASSERT(bk);
for (i = 0; i < bk->bk_niov; i++) {
- pg = bk->bk_iovs[i].kiov_page;
+ pg = bk->bk_iovs[i].bv_page;
if (!pg)
break;
diff --git a/drivers/staging/lustre/lustre/fid/fid_lib.c b/drivers/staging/lustre/lustre/fid/fid_lib.c
index 99ae7eb6720e..4e49cb356d64 100644
--- a/drivers/staging/lustre/lustre/fid/fid_lib.c
+++ b/drivers/staging/lustre/lustre/fid/fid_lib.c
@@ -63,14 +63,12 @@ const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE = {
FID_SEQ_NORMAL,
(__u64)~0ULL
};
-EXPORT_SYMBOL(LUSTRE_SEQ_SPACE_RANGE);
/* Zero range, used for init and other purposes. */
const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE = {
0,
0
};
-EXPORT_SYMBOL(LUSTRE_SEQ_ZERO_RANGE);
/* Lustre Big Fs Lock fid. */
const struct lu_fid LUSTRE_BFL_FID = { .f_seq = FID_SEQ_SPECIAL,
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 454744d25956..edd72b926f81 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -125,19 +125,19 @@ static int seq_client_rpc(struct lu_client_seq *seq,
if (!range_is_sane(output)) {
CERROR("%s: Invalid range received from server: "
- DRANGE"\n", seq->lcs_name, PRANGE(output));
+ DRANGE "\n", seq->lcs_name, PRANGE(output));
rc = -EINVAL;
goto out_req;
}
if (range_is_exhausted(output)) {
CERROR("%s: Range received from server is exhausted: "
- DRANGE"]\n", seq->lcs_name, PRANGE(output));
+ DRANGE "]\n", seq->lcs_name, PRANGE(output));
rc = -EINVAL;
goto out_req;
}
- CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence "DRANGE"]\n",
+ CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence " DRANGE "]\n",
seq->lcs_name, opcname, PRANGE(output));
out_req:
@@ -179,7 +179,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
seq->lcs_name, rc);
return rc;
}
- CDEBUG(D_INFO, "%s: New range - "DRANGE"\n",
+ CDEBUG(D_INFO, "%s: New range - " DRANGE "\n",
seq->lcs_name, PRANGE(&seq->lcs_space));
} else {
rc = 0;
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 81b7ca9ea2fd..3ed32d77f38b 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -105,7 +105,7 @@ ldebugfs_fid_space_seq_write(struct file *file,
rc = ldebugfs_fid_write_common(buffer, count, &seq->lcs_space);
if (rc == 0) {
- CDEBUG(D_INFO, "%s: Space: "DRANGE"\n",
+ CDEBUG(D_INFO, "%s: Space: " DRANGE "\n",
seq->lcs_name, PRANGE(&seq->lcs_space));
}
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index f0efe5b9fbec..08eaec735d6f 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -31,6 +31,25 @@
*
* lustre/fld/fld_internal.h
*
+ * Subsystem Description:
+ * FLD is FID Location Database, which stores where (IE, on which MDT)
+ * FIDs are located.
+ * The database is basically a record file, each record consists of a FID
+ * sequence range, MDT/OST index, and flags. The FLD for the whole FS
+ * is only stored on the sequence controller(MDT0) right now, but each target
+ * also has its local FLD, which only stores the local sequence.
+ *
+ * The FLD subsystem usually has two tasks:
+ * 1. maintain the database, i.e. when the sequence controller allocates
+ * new sequence ranges to some nodes, it will call the FLD API to insert the
+ * location information <sequence_range, node_index> in FLDB.
+ *
+ * 2. Handle requests from other nodes, i.e. if client needs to know where
+ * the FID is located, if it can not find the information in the local cache,
+ * it will send a FLD lookup RPC to the FLD service, and the FLD service will
+ * look up the FLDB entry and return the location information to client.
+ *
+ *
* Author: Yury Umanets <umka@clusterfs.com>
* Author: Tom WangDi <wangdi@clusterfs.com>
*/
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index e59d626a1548..0de72b717ce5 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -53,57 +53,6 @@
#include "../include/lustre_mdc.h"
#include "fld_internal.h"
-/* TODO: these 3 functions are copies of flow-control code from mdc_lib.c
- * It should be common thing. The same about mdc RPC lock
- */
-static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
-{
- int rc;
-
- spin_lock(&cli->cl_loi_list_lock);
- rc = list_empty(&mcw->mcw_entry);
- spin_unlock(&cli->cl_loi_list_lock);
- return rc;
-};
-
-static void fld_enter_request(struct client_obd *cli)
-{
- struct mdc_cache_waiter mcw;
- struct l_wait_info lwi = { 0 };
-
- spin_lock(&cli->cl_loi_list_lock);
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
- init_waitqueue_head(&mcw.mcw_waitq);
- spin_unlock(&cli->cl_loi_list_lock);
- l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
- } else {
- cli->cl_r_in_flight++;
- spin_unlock(&cli->cl_loi_list_lock);
- }
-}
-
-static void fld_exit_request(struct client_obd *cli)
-{
- struct list_head *l, *tmp;
- struct mdc_cache_waiter *mcw;
-
- spin_lock(&cli->cl_loi_list_lock);
- cli->cl_r_in_flight--;
- list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- /* No free request slots anymore */
- break;
- }
-
- mcw = list_entry(l, struct mdc_cache_waiter, mcw_entry);
- list_del_init(&mcw->mcw_entry);
- cli->cl_r_in_flight++;
- wake_up(&mcw->mcw_waitq);
- }
- spin_unlock(&cli->cl_loi_list_lock);
-}
-
static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq)
{
LASSERT(fld->lcf_count > 0);
@@ -270,7 +219,6 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
spin_unlock(&fld->lcf_lock);
return -ENOENT;
}
-EXPORT_SYMBOL(fld_client_del_target);
static struct dentry *fld_debugfs_dir;
@@ -439,9 +387,9 @@ int fld_client_rpc(struct obd_export *exp,
req->rq_reply_portal = MDC_REPLY_PORTAL;
ptlrpc_at_set_req_timeout(req);
- fld_enter_request(&exp->exp_obd->u.cli);
+ obd_get_request_slot(&exp->exp_obd->u.cli);
rc = ptlrpc_queue_wait(req);
- fld_exit_request(&exp->exp_obd->u.cli);
+ obd_put_request_slot(&exp->exp_obd->u.cli);
if (rc)
goto out_req;
@@ -505,7 +453,6 @@ void fld_client_flush(struct lu_client_fld *fld)
{
fld_cache_flush(fld->lcf_cache);
}
-EXPORT_SYMBOL(fld_client_flush);
static int __init fld_init(void)
{
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index 3cd4a2577d90..89292c93dcd5 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -93,8 +93,8 @@
* super-class definitions.
*/
#include "lu_object.h"
+#include "lustre_compat.h"
#include <linux/atomic.h>
-#include "linux/lustre_compat25.h"
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/spinlock.h>
@@ -191,6 +191,9 @@ struct cl_attr {
* Group identifier for quota purposes.
*/
gid_t cat_gid;
+
+ /* nlink of the directory */
+ __u64 cat_nlink;
};
/**
@@ -320,7 +323,7 @@ struct cl_object_operations {
* to be used instead of newly created.
*/
int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index);
+ struct cl_page *page, pgoff_t index);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
@@ -366,8 +369,8 @@ struct cl_object_operations {
* \return the same convention as for
* cl_object_operations::coo_attr_get() is used.
*/
- int (*coo_attr_set)(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
+ int (*coo_attr_update)(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned int valid);
/**
* Update object configuration. Called top-to-bottom to modify object
* configuration.
@@ -392,6 +395,11 @@ struct cl_object_operations {
* mainly pages and locks.
*/
int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
+ /**
+ * Object getstripe method.
+ */
+ int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
+ struct lov_user_md __user *lum);
};
/**
@@ -687,17 +695,6 @@ enum cl_page_type {
};
/**
- * Flags maintained for every cl_page.
- */
-enum cl_page_flags {
- /**
- * Set when pagein completes. Used for debugging (read completes at
- * most once for a page).
- */
- CPF_READ_COMPLETED = 1 << 0
-};
-
-/**
* Fields are protected by the lock on struct page, except for atomics and
* immutables.
*
@@ -711,24 +708,19 @@ struct cl_page {
atomic_t cp_ref;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
- /** List of slices. Immutable after creation. */
- struct list_head cp_layers;
/** vmpage */
struct page *cp_vmpage;
+ /** Linkage of pages within group. Pages must be owned */
+ struct list_head cp_batch;
+ /** List of slices. Immutable after creation. */
+ struct list_head cp_layers;
+ /** Linkage of pages within cl_req. */
+ struct list_head cp_flight;
/**
* Page state. This field is const to avoid accidental update, it is
* modified only internally within cl_page.c. Protected by a VM lock.
*/
const enum cl_page_state cp_state;
- /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
- struct list_head cp_batch;
- /** Mutex serializing membership of a page in a batch. */
- struct mutex cp_mutex;
- /** Linkage of pages within cl_req. */
- struct list_head cp_flight;
- /** Transfer error. */
- int cp_error;
-
/**
* Page type. Only CPT_TRANSIENT is used so far. Immutable after
* creation.
@@ -741,10 +733,6 @@ struct cl_page {
*/
struct cl_io *cp_owner;
/**
- * Debug information, the task is owning the page.
- */
- struct task_struct *cp_task;
- /**
* Owning IO request in cl_page_state::CPS_PAGEOUT and
* cl_page_state::CPS_PAGEIN states. This field is maintained only in
* the top-level pages. Protected by a VM lock.
@@ -756,8 +744,6 @@ struct cl_page {
struct lu_ref_link cp_obj_ref;
/** Link to a queue, for debugging. */
struct lu_ref_link cp_queue_ref;
- /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
- unsigned cp_flags;
/** Assigned if doing a sync_io */
struct cl_sync_io *cp_sync_io;
};
@@ -1056,23 +1042,32 @@ do { \
} \
} while (0)
-static inline int __page_in_use(const struct cl_page *page, int refc)
-{
- if (page->cp_type == CPT_CACHEABLE)
- ++refc;
- LASSERT(atomic_read(&page->cp_ref) > 0);
- return (atomic_read(&page->cp_ref) > refc);
-}
-
-#define cl_page_in_use(pg) __page_in_use(pg, 1)
-#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
-
static inline struct page *cl_page_vmpage(struct cl_page *page)
{
LASSERT(page->cp_vmpage);
return page->cp_vmpage;
}
+/**
+ * Check if a cl_page is in use.
+ *
+ * Client cache holds a refcount, this refcount will be dropped when
+ * the page is taken out of cache, see vvp_page_delete().
+ */
+static inline bool __page_in_use(const struct cl_page *page, int refc)
+{
+ return (atomic_read(&page->cp_ref) > refc + 1);
+}
+
+/**
+ * Caller itself holds a refcount of cl_page.
+ */
+#define cl_page_in_use(pg) __page_in_use(pg, 1)
+/**
+ * Caller doesn't hold a refcount.
+ */
+#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
@@ -1771,12 +1766,14 @@ struct cl_io {
struct cl_setattr_io {
struct ost_lvb sa_attr;
unsigned int sa_valid;
+ int sa_stripe_index;
+ struct lu_fid *sa_parent_fid;
} ci_setattr;
struct cl_fault_io {
/** page index within file. */
pgoff_t ft_index;
/** bytes valid byte on a faulted page. */
- int ft_nob;
+ size_t ft_nob;
/** writable page? for nopage() only */
int ft_writable;
/** page of an executable? */
@@ -1909,7 +1906,7 @@ struct cl_req_attr {
/** Generic attributes for the server consumption. */
struct obdo *cra_oa;
/** Jobid */
- char cra_jobid[JOBSTATS_JOBID_SIZE];
+ char cra_jobid[LUSTRE_JOBID_SIZE];
};
/**
@@ -2176,14 +2173,16 @@ void cl_object_attr_lock(struct cl_object *o);
void cl_object_attr_unlock(struct cl_object *o);
int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr);
-int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
+int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned int valid);
int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
struct ost_lvb *lvb);
int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_object_conf *conf);
int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
+int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
+ struct lov_user_md __user *lum);
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
@@ -2197,6 +2196,7 @@ static inline void cl_object_page_init(struct cl_object *clob, int size)
{
clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
+ WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512);
}
static inline void *cl_object_page_slice(struct cl_object *clob,
@@ -2263,6 +2263,8 @@ void cl_page_unassume(const struct lu_env *env,
struct cl_io *io, struct cl_page *pg);
void cl_page_disown(const struct lu_env *env,
struct cl_io *io, struct cl_page *page);
+void cl_page_disown0(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *pg);
int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io);
/** @} ownership */
@@ -2304,7 +2306,7 @@ int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
struct cl_page *page, pgoff_t *max_index);
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
-int cl_page_size(const struct cl_object *obj);
+size_t cl_page_size(const struct cl_object *obj);
int cl_pages_prune(const struct lu_env *env, struct cl_object *obj);
void cl_lock_print(const struct lu_env *env, void *cookie,
@@ -2333,7 +2335,7 @@ struct cl_client_cache {
/**
* # of LRU entries available
*/
- atomic_t ccc_lru_left;
+ atomic_long_t ccc_lru_left;
/**
* List of entities(OSCs) for this LRU cache
*/
@@ -2347,14 +2349,18 @@ struct cl_client_cache {
*/
spinlock_t ccc_lru_lock;
/**
+ * Set if unstable check is enabled
+ */
+ unsigned int ccc_unstable_check:1;
+ /**
* # of unstable pages for this mount point
*/
- atomic_t ccc_unstable_nr;
+ atomic_long_t ccc_unstable_nr;
/**
* Waitq for awaiting unstable pages to reach zero.
* Used at umounting time and signaled on BRW commit
*/
- wait_queue_head_t ccc_unstable_waitq;
+ wait_queue_head_t ccc_unstable_waitq;
};
diff --git a/drivers/staging/lustre/lustre/include/interval_tree.h b/drivers/staging/lustre/lustre/include/interval_tree.h
index 4a15228b5570..5d387d372547 100644
--- a/drivers/staging/lustre/lustre/include/interval_tree.h
+++ b/drivers/staging/lustre/lustre/include/interval_tree.h
@@ -63,6 +63,11 @@ static inline int interval_is_intree(struct interval_node *node)
return node->in_intree == 1;
}
+static inline __u64 interval_low(struct interval_node *node)
+{
+ return node->in_extent.start;
+}
+
static inline __u64 interval_high(struct interval_node *node)
{
return node->in_extent.end;
@@ -77,8 +82,29 @@ static inline void interval_set(struct interval_node *node,
node->in_max_high = end;
}
+/*
+ * Rules to write an interval callback.
+ * - the callback returns INTERVAL_ITER_STOP when it thinks the iteration
+ * should be stopped. It will then cause the iteration function to return
+ * immediately with return value INTERVAL_ITER_STOP.
+ * - callbacks for interval_iterate and interval_iterate_reverse: Every
+ * nodes in the tree will be set to @node before the callback being called
+ * - callback for interval_search: Only overlapped node will be set to @node
+ * before the callback being called.
+ */
+typedef enum interval_iter (*interval_callback_t)(struct interval_node *node,
+ void *args);
+
struct interval_node *interval_insert(struct interval_node *node,
struct interval_node **root);
void interval_erase(struct interval_node *node, struct interval_node **root);
+/*
+ * Search the extents in the tree and call @func for each overlapped
+ * extents.
+ */
+enum interval_iter interval_search(struct interval_node *root,
+ struct interval_node_extent *ex,
+ interval_callback_t func, void *data);
+
#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
deleted file mode 100644
index d18e8a76bb25..000000000000
--- a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LINUX_LL_H
-#define _LINUX_LL_H
-
-#ifndef _LL_H
-#error Do not #include this file directly. #include <lustre_lite.h> instead
-#endif
-
-#include <linux/statfs.h>
-
-#include <linux/fs.h>
-#include <linux/dcache.h>
-
-#include "../obd_class.h"
-#include "../lustre_net.h"
-#include "../lustre_ha.h"
-
-#include <linux/rbtree.h>
-#include "../../include/linux/lustre_compat25.h"
-#include <linux/pagemap.h>
-
-/* lprocfs.c */
-enum {
- LPROC_LL_DIRTY_HITS = 0,
- LPROC_LL_DIRTY_MISSES,
- LPROC_LL_READ_BYTES,
- LPROC_LL_WRITE_BYTES,
- LPROC_LL_BRW_READ,
- LPROC_LL_BRW_WRITE,
- LPROC_LL_OSC_READ,
- LPROC_LL_OSC_WRITE,
- LPROC_LL_IOCTL,
- LPROC_LL_OPEN,
- LPROC_LL_RELEASE,
- LPROC_LL_MAP,
- LPROC_LL_LLSEEK,
- LPROC_LL_FSYNC,
- LPROC_LL_READDIR,
- LPROC_LL_SETATTR,
- LPROC_LL_TRUNC,
- LPROC_LL_FLOCK,
- LPROC_LL_GETATTR,
- LPROC_LL_CREATE,
- LPROC_LL_LINK,
- LPROC_LL_UNLINK,
- LPROC_LL_SYMLINK,
- LPROC_LL_MKDIR,
- LPROC_LL_RMDIR,
- LPROC_LL_MKNOD,
- LPROC_LL_RENAME,
- LPROC_LL_STAFS,
- LPROC_LL_ALLOC_INODE,
- LPROC_LL_SETXATTR,
- LPROC_LL_GETXATTR,
- LPROC_LL_GETXATTR_HITS,
- LPROC_LL_LISTXATTR,
- LPROC_LL_REMOVEXATTR,
- LPROC_LL_INODE_PERM,
- LPROC_LL_FILE_OPCODES
-};
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_user.h b/drivers/staging/lustre/lustre/include/linux/lustre_user.h
deleted file mode 100644
index e967950e8536..000000000000
--- a/drivers/staging/lustre/lustre/include/linux/lustre_user.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/include/linux/lustre_user.h
- *
- * Lustre public user-space interface definitions.
- */
-
-#ifndef _LINUX_LUSTRE_USER_H
-#define _LINUX_LUSTRE_USER_H
-
-# include <linux/quota.h>
-
-/*
- * asm-x86_64/processor.h on some SLES 9 distros seems to use
- * kernel-only typedefs. fortunately skipping it altogether is ok
- * (for now).
- */
-#define __ASM_X86_64_PROCESSOR_H
-
-#include <linux/string.h>
-
-/*
- * We need to always use 64bit version because the structure
- * is shared across entire cluster where 32bit and 64bit machines
- * are co-existing.
- */
-#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
-typedef struct stat64 lstat_t;
-#define lstat_f lstat64
-#else
-typedef struct stat lstat_t;
-#define lstat_f lstat
-#endif
-
-#define HAVE_LOV_USER_MDS_DATA
-
-#endif /* _LUSTRE_USER_H */
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index d68e60e7fef7..cc0713ef8ae5 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -165,8 +165,10 @@ struct lprocfs_percpu {
struct lprocfs_counter lp_cntr[0];
};
-#define LPROCFS_GET_NUM_CPU 0x0001
-#define LPROCFS_GET_SMP_ID 0x0002
+enum lprocfs_stats_lock_ops {
+ LPROCFS_GET_NUM_CPU = 0x0001, /* number allocated per-CPU stats */
+ LPROCFS_GET_SMP_ID = 0x0002, /* current stat to be updated */
+};
enum lprocfs_stats_flags {
LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */
@@ -363,82 +365,99 @@ static inline void s2dhms(struct dhms *ts, time64_t secs64)
#define JOBSTATS_PROCNAME_UID "procname_uid"
#define JOBSTATS_NODELOCAL "nodelocal"
+/* obd_config.c */
+void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg));
+
int lprocfs_write_frac_helper(const char __user *buffer,
unsigned long count, int *val, int mult);
int lprocfs_read_frac_helper(char *buffer, unsigned long count,
long val, int mult);
int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid);
-/*
- * \return value
- * < 0 : on error (only possible for opc as LPROCFS_GET_SMP_ID)
+
+/**
+ * Lock statistics structure for access, possibly only on this CPU.
+ *
+ * The statistics struct may be allocated with per-CPU structures for
+ * efficient concurrent update (usually only on server-wide stats), or
+ * as a single global struct (e.g. for per-client or per-job statistics),
+ * so the required locking depends on the type of structure allocated.
+ *
+ * For per-CPU statistics, pin the thread to the current cpuid so that
+ * will only access the statistics for that CPU. If the stats structure
+ * for the current CPU has not been allocated (or previously freed),
+ * allocate it now. The per-CPU statistics do not need locking since
+ * the thread is pinned to the CPU during update.
+ *
+ * For global statistics, lock the stats structure to prevent concurrent update.
+ *
+ * \param[in] stats statistics structure to lock
+ * \param[in] opc type of operation:
+ * LPROCFS_GET_SMP_ID: "lock" and return current CPU index
+ * for incrementing statistics for that CPU
+ * LPROCFS_GET_NUM_CPU: "lock" and return number of used
+ * CPU indices to iterate over all indices
+ * \param[out] flags CPU interrupt saved state for IRQ-safe locking
+ *
+ * \retval cpuid of current thread or number of allocated structs
+ * \retval negative on error (only for opc LPROCFS_GET_SMP_ID + per-CPU stats)
*/
-static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc,
+static inline int lprocfs_stats_lock(struct lprocfs_stats *stats,
+ enum lprocfs_stats_lock_ops opc,
unsigned long *flags)
{
- int rc = 0;
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_lock_irqsave(&stats->ls_lock, *flags);
+ else
+ spin_lock(&stats->ls_lock);
+ return opc == LPROCFS_GET_NUM_CPU ? 1 : 0;
+ }
switch (opc) {
- default:
- LBUG();
+ case LPROCFS_GET_SMP_ID: {
+ unsigned int cpuid = get_cpu();
- case LPROCFS_GET_SMP_ID:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_lock_irqsave(&stats->ls_lock, *flags);
- else
- spin_lock(&stats->ls_lock);
- return 0;
- } else {
- unsigned int cpuid = get_cpu();
-
- if (unlikely(!stats->ls_percpu[cpuid])) {
- rc = lprocfs_stats_alloc_one(stats, cpuid);
- if (rc < 0) {
- put_cpu();
- return rc;
- }
+ if (unlikely(!stats->ls_percpu[cpuid])) {
+ int rc = lprocfs_stats_alloc_one(stats, cpuid);
+
+ if (rc < 0) {
+ put_cpu();
+ return rc;
}
- return cpuid;
}
-
+ return cpuid;
+ }
case LPROCFS_GET_NUM_CPU:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_lock_irqsave(&stats->ls_lock, *flags);
- else
- spin_lock(&stats->ls_lock);
- return 1;
- }
return stats->ls_biggest_alloc_num;
+ default:
+ LBUG();
}
}
-static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc,
+/**
+ * Unlock statistics structure after access.
+ *
+ * Unlock the lock acquired via lprocfs_stats_lock() for global statistics,
+ * or unpin this thread from the current cpuid for per-CPU statistics.
+ *
+ * This function must be called using the same arguments as used when calling
+ * lprocfs_stats_lock() so that the correct operation can be performed.
+ *
+ * \param[in] stats statistics structure to unlock
+ * \param[in] opc type of operation (current cpuid or number of structs)
+ * \param[in] flags CPU interrupt saved state for IRQ-safe locking
+ */
+static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats,
+ enum lprocfs_stats_lock_ops opc,
unsigned long *flags)
{
- switch (opc) {
- default:
- LBUG();
-
- case LPROCFS_GET_SMP_ID:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_unlock_irqrestore(&stats->ls_lock, *flags);
- else
- spin_unlock(&stats->ls_lock);
- } else {
- put_cpu();
- }
- return;
-
- case LPROCFS_GET_NUM_CPU:
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_unlock_irqrestore(&stats->ls_lock, *flags);
- else
- spin_unlock(&stats->ls_lock);
- }
- return;
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_unlock_irqrestore(&stats->ls_lock, *flags);
+ else
+ spin_unlock(&stats->ls_lock);
+ } else if (opc == LPROCFS_GET_SMP_ID) {
+ put_cpu();
}
}
@@ -496,7 +515,7 @@ static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
int idx,
enum lprocfs_fields_flags field)
{
- int i;
+ unsigned int i;
unsigned int num_cpu;
unsigned long flags = 0;
__u64 ret = 0;
@@ -681,6 +700,12 @@ static struct lustre_attr lustre_attr_##name = __ATTR(name, mode, show, store)
extern const struct sysfs_ops lustre_sysfs_ops;
+struct root_squash_info;
+int lprocfs_wr_root_squash(const char *buffer, unsigned long count,
+ struct root_squash_info *squash, char *name);
+int lprocfs_wr_nosquash_nids(const char *buffer, unsigned long count,
+ struct root_squash_info *squash, char *name);
+
/* all quota proc functions */
int lprocfs_quota_rd_bunit(char *page, char **start,
loff_t off, int count,
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 6e25c1bb6aa3..260643ee0d48 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -327,7 +327,7 @@ struct lu_device_type {
/**
* Number of existing device type instances.
*/
- unsigned ldt_device_nr;
+ atomic_t ldt_device_nr;
/**
* Linkage into a global list of all device types.
*
@@ -602,7 +602,7 @@ struct lu_site {
/**
* index of bucket on hash table while purging
*/
- int ls_purge_start;
+ unsigned int ls_purge_start;
/**
* Top-level device for this stack.
*/
@@ -623,6 +623,11 @@ struct lu_site {
spinlock_t ls_ld_lock;
/**
+ * Lock to serialize site purge.
+ */
+ struct mutex ls_purge_mutex;
+
+ /**
* lu_site stats
*/
struct lprocfs_stats *ls_stats;
@@ -673,7 +678,6 @@ void lu_object_add(struct lu_object *before, struct lu_object *o);
int lu_device_type_init(struct lu_device_type *ldt);
void lu_device_type_fini(struct lu_device_type *ldt);
-void lu_types_stop(void);
/** @} ctors */
@@ -1025,7 +1029,8 @@ enum lu_context_tag {
/**
* Contexts usable in cache shrinker thread.
*/
- LCT_SHRINKER = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD|LCT_NOREF
+ LCT_SHRINKER = LCT_MD_THREAD | LCT_DT_THREAD | LCT_CL_THREAD |
+ LCT_NOREF
};
/**
@@ -1264,12 +1269,28 @@ struct lu_name {
};
/**
+ * Validate names (path components)
+ *
+ * To be valid \a name must be non-empty, '\0' terminated of length \a
+ * name_len, and not contain '/'. The maximum length of a name (before
+ * say -ENAMETOOLONG will be returned) is really controlled by llite
+ * and the server. We only check for something insane coming from bad
+ * integer handling here.
+ */
+static inline bool lu_name_is_valid_2(const char *name, size_t name_len)
+{
+ return name && name_len > 0 && name_len < INT_MAX &&
+ name[name_len] == '\0' && strlen(name) == name_len &&
+ !memchr(name, '/', name_len);
+}
+
+/**
* Common buffer structure to be passed around for various xattr_{s,g}et()
* methods.
*/
struct lu_buf {
void *lb_buf;
- ssize_t lb_len;
+ size_t lb_len;
};
#define DLUBUF "(%p %zu)"
@@ -1298,5 +1319,12 @@ struct lu_kmem_descr {
int lu_kmem_init(struct lu_kmem_descr *caches);
void lu_kmem_fini(struct lu_kmem_descr *caches);
+void lu_buf_free(struct lu_buf *buf);
+void lu_buf_alloc(struct lu_buf *buf, size_t size);
+void lu_buf_realloc(struct lu_buf *buf, size_t size);
+
+int lu_buf_check_and_grow(struct lu_buf *buf, size_t len);
+struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len);
+
/** @} lu */
#endif /* __LUSTRE_LU_OBJECT_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 051864c23b5b..72eaee95c6b8 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -93,6 +93,7 @@
/* Defn's shared with user-space. */
#include "lustre_user.h"
#include "lustre_errno.h"
+#include "../lustre_ver.h"
/*
* GENERAL STUFF
@@ -196,12 +197,12 @@ static inline unsigned fld_range_type(const struct lu_seq_range *range)
return range->lsr_flags & LU_SEQ_RANGE_MASK;
}
-static inline int fld_range_is_ost(const struct lu_seq_range *range)
+static inline bool fld_range_is_ost(const struct lu_seq_range *range)
{
return fld_range_type(range) == LU_SEQ_RANGE_OST;
}
-static inline int fld_range_is_mdt(const struct lu_seq_range *range)
+static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
{
return fld_range_type(range) == LU_SEQ_RANGE_MDT;
}
@@ -260,23 +261,23 @@ static inline void range_init(struct lu_seq_range *range)
* check if given seq id \a s is within given range \a r
*/
-static inline int range_within(const struct lu_seq_range *range,
- __u64 s)
+static inline bool range_within(const struct lu_seq_range *range,
+ __u64 s)
{
return s >= range->lsr_start && s < range->lsr_end;
}
-static inline int range_is_sane(const struct lu_seq_range *range)
+static inline bool range_is_sane(const struct lu_seq_range *range)
{
return (range->lsr_end >= range->lsr_start);
}
-static inline int range_is_zero(const struct lu_seq_range *range)
+static inline bool range_is_zero(const struct lu_seq_range *range)
{
return (range->lsr_start == 0 && range->lsr_end == 0);
}
-static inline int range_is_exhausted(const struct lu_seq_range *range)
+static inline bool range_is_exhausted(const struct lu_seq_range *range)
{
return range_space(range) == 0;
@@ -437,69 +438,69 @@ enum dot_lustre_oid {
FID_OID_DOT_LUSTRE_OBF = 2UL,
};
-static inline int fid_seq_is_mdt0(__u64 seq)
+static inline bool fid_seq_is_mdt0(__u64 seq)
{
return (seq == FID_SEQ_OST_MDT0);
}
-static inline int fid_seq_is_mdt(const __u64 seq)
+static inline bool fid_seq_is_mdt(__u64 seq)
{
return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
};
-static inline int fid_seq_is_echo(__u64 seq)
+static inline bool fid_seq_is_echo(__u64 seq)
{
return (seq == FID_SEQ_ECHO);
}
-static inline int fid_is_echo(const struct lu_fid *fid)
+static inline bool fid_is_echo(const struct lu_fid *fid)
{
return fid_seq_is_echo(fid_seq(fid));
}
-static inline int fid_seq_is_llog(__u64 seq)
+static inline bool fid_seq_is_llog(__u64 seq)
{
return (seq == FID_SEQ_LLOG);
}
-static inline int fid_is_llog(const struct lu_fid *fid)
+static inline bool fid_is_llog(const struct lu_fid *fid)
{
/* file with OID == 0 is not llog but contains last oid */
return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
}
-static inline int fid_seq_is_rsvd(const __u64 seq)
+static inline bool fid_seq_is_rsvd(__u64 seq)
{
return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
};
-static inline int fid_seq_is_special(const __u64 seq)
+static inline bool fid_seq_is_special(__u64 seq)
{
return seq == FID_SEQ_SPECIAL;
};
-static inline int fid_seq_is_local_file(const __u64 seq)
+static inline bool fid_seq_is_local_file(__u64 seq)
{
return seq == FID_SEQ_LOCAL_FILE ||
seq == FID_SEQ_LOCAL_NAME;
};
-static inline int fid_seq_is_root(const __u64 seq)
+static inline bool fid_seq_is_root(__u64 seq)
{
return seq == FID_SEQ_ROOT;
}
-static inline int fid_seq_is_dot(const __u64 seq)
+static inline bool fid_seq_is_dot(__u64 seq)
{
return seq == FID_SEQ_DOT_LUSTRE;
}
-static inline int fid_seq_is_default(const __u64 seq)
+static inline bool fid_seq_is_default(__u64 seq)
{
return seq == FID_SEQ_LOV_DEFAULT;
}
-static inline int fid_is_mdt0(const struct lu_fid *fid)
+static inline bool fid_is_mdt0(const struct lu_fid *fid)
{
return fid_seq_is_mdt0(fid_seq(fid));
}
@@ -516,12 +517,12 @@ static inline void lu_root_fid(struct lu_fid *fid)
* \param fid the fid to be tested.
* \return true if the fid is a igif; otherwise false.
*/
-static inline int fid_seq_is_igif(const __u64 seq)
+static inline bool fid_seq_is_igif(__u64 seq)
{
return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
}
-static inline int fid_is_igif(const struct lu_fid *fid)
+static inline bool fid_is_igif(const struct lu_fid *fid)
{
return fid_seq_is_igif(fid_seq(fid));
}
@@ -531,27 +532,27 @@ static inline int fid_is_igif(const struct lu_fid *fid)
* \param fid the fid to be tested.
* \return true if the fid is a idif; otherwise false.
*/
-static inline int fid_seq_is_idif(const __u64 seq)
+static inline bool fid_seq_is_idif(__u64 seq)
{
return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
}
-static inline int fid_is_idif(const struct lu_fid *fid)
+static inline bool fid_is_idif(const struct lu_fid *fid)
{
return fid_seq_is_idif(fid_seq(fid));
}
-static inline int fid_is_local_file(const struct lu_fid *fid)
+static inline bool fid_is_local_file(const struct lu_fid *fid)
{
return fid_seq_is_local_file(fid_seq(fid));
}
-static inline int fid_seq_is_norm(const __u64 seq)
+static inline bool fid_seq_is_norm(__u64 seq)
{
return (seq >= FID_SEQ_NORMAL);
}
-static inline int fid_is_norm(const struct lu_fid *fid)
+static inline bool fid_is_norm(const struct lu_fid *fid)
{
return fid_seq_is_norm(fid_seq(fid));
}
@@ -658,7 +659,7 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
oi->oi_fid.f_oid = oid;
oi->oi_fid.f_ver = oid >> 48;
} else {
- if (oid > OBIF_MAX_OID) {
+ if (oid >= OBIF_MAX_OID) {
CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
return;
}
@@ -683,7 +684,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
fid->f_oid = oid;
fid->f_ver = oid >> 48;
} else {
- if (oid > OBIF_MAX_OID) {
+ if (oid >= OBIF_MAX_OID) {
CERROR("Too large OID %#llx to set REG "DFID"\n",
(unsigned long long)oid, PFID(fid));
return -EBADF;
@@ -769,7 +770,7 @@ static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
}
/* Check whether the fid is for LAST_ID */
-static inline int fid_is_last_id(const struct lu_fid *fid)
+static inline bool fid_is_last_id(const struct lu_fid *fid)
{
return (fid_oid(fid) == 0);
}
@@ -838,7 +839,7 @@ static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
dst->f_ver = be32_to_cpu(fid_ver(src));
}
-static inline int fid_is_sane(const struct lu_fid *fid)
+static inline bool fid_is_sane(const struct lu_fid *fid)
{
return fid &&
((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
@@ -846,15 +847,10 @@ static inline int fid_is_sane(const struct lu_fid *fid)
fid_seq_is_rsvd(fid_seq(fid)));
}
-static inline int fid_is_zero(const struct lu_fid *fid)
-{
- return fid_seq(fid) == 0 && fid_oid(fid) == 0;
-}
-
void lustre_swab_lu_fid(struct lu_fid *fid);
void lustre_swab_lu_seq_range(struct lu_seq_range *range);
-static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
+static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
{
return memcmp(f0, f1, sizeof(*f0)) == 0;
}
@@ -1017,12 +1013,12 @@ static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
return next;
}
-static inline int lu_dirent_calc_size(int namelen, __u16 attr)
+static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr)
{
- int size;
+ size_t size;
if (attr & LUDA_TYPE) {
- const unsigned align = sizeof(struct luda_type) - 1;
+ const size_t align = sizeof(struct luda_type) - 1;
size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
size += sizeof(struct luda_type);
@@ -1033,15 +1029,6 @@ static inline int lu_dirent_calc_size(int namelen, __u16 attr)
return (size + 7) & ~7;
}
-static inline int lu_dirent_size(struct lu_dirent *ent)
-{
- if (le16_to_cpu(ent->lde_reclen) == 0) {
- return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen),
- le32_to_cpu(ent->lde_attrs));
- }
- return le16_to_cpu(ent->lde_reclen);
-}
-
#define MDS_DIR_END_OFF 0xfffffffffffffffeULL
/**
@@ -1067,19 +1054,19 @@ struct lustre_handle {
#define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
-static inline int lustre_handle_is_used(struct lustre_handle *lh)
+static inline bool lustre_handle_is_used(const struct lustre_handle *lh)
{
return lh->cookie != 0ull;
}
-static inline int lustre_handle_equal(const struct lustre_handle *lh1,
- const struct lustre_handle *lh2)
+static inline bool lustre_handle_equal(const struct lustre_handle *lh1,
+ const struct lustre_handle *lh2)
{
return lh1->cookie == lh2->cookie;
}
static inline void lustre_handle_copy(struct lustre_handle *tgt,
- struct lustre_handle *src)
+ const struct lustre_handle *src)
{
tgt->cookie = src->cookie;
}
@@ -1105,7 +1092,7 @@ struct lustre_msg_v2 {
/* without gss, ptlrpc_body is put at the first buffer. */
#define PTLRPC_NUM_VERSIONS 4
-#define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */
+
struct ptlrpc_body_v3 {
struct lustre_handle pb_handle;
__u32 pb_type;
@@ -1127,7 +1114,7 @@ struct ptlrpc_body_v3 {
__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
/* padding for future needs */
__u64 pb_padding[4];
- char pb_jobid[JOBSTATS_JOBID_SIZE];
+ char pb_jobid[LUSTRE_JOBID_SIZE];
};
#define ptlrpc_body ptlrpc_body_v3
@@ -1293,6 +1280,9 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
#define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack
* name in request
*/
+#define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */
+#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
+#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL/* striped DNE dir */
/* XXX README XXX:
* Please DO NOT add flag values here before first ensuring that this same
@@ -1318,14 +1308,6 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
OBD_CONNECT_FULL20)
-#define OBD_OCD_VERSION(major, minor, patch, fix) (((major)<<24) + \
- ((minor)<<16) + \
- ((patch)<<8) + (fix))
-#define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255)
-#define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255)
-#define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255)
-#define OBD_OCD_VERSION_FIX(version) ((int)(version)&255)
-
/* This structure is used for both request and reply.
*
* If we eventually have separate connect data for different types, which we
@@ -1478,10 +1460,23 @@ enum obdo_flags {
OBD_FL_LOCAL_MASK = 0xF0000000,
};
-#define LOV_MAGIC_V1 0x0BD10BD0
-#define LOV_MAGIC LOV_MAGIC_V1
-#define LOV_MAGIC_JOIN_V1 0x0BD20BD0
-#define LOV_MAGIC_V3 0x0BD30BD0
+/*
+ * All LOV EA magics should have the same postfix, if some new version
+ * Lustre instroduces new LOV EA magic, then when down-grade to an old
+ * Lustre, even though the old version system does not recognizes such
+ * new magic, it still can distinguish the corrupted cases by checking
+ * the magic's postfix.
+ */
+#define LOV_MAGIC_MAGIC 0x0BD0
+#define LOV_MAGIC_MASK 0xFFFF
+
+#define LOV_MAGIC_V1 (0x0BD10000 | LOV_MAGIC_MAGIC)
+#define LOV_MAGIC_JOIN_V1 (0x0BD20000 | LOV_MAGIC_MAGIC)
+#define LOV_MAGIC_V3 (0x0BD30000 | LOV_MAGIC_MAGIC)
+#define LOV_MAGIC_MIGRATE (0x0BD40000 | LOV_MAGIC_MAGIC)
+/* reserved for specifying OSTs */
+#define LOV_MAGIC_SPECIFIC (0x0BD50000 | LOV_MAGIC_MAGIC)
+#define LOV_MAGIC LOV_MAGIC_V1
/*
* magic for fully defined striping
@@ -1498,14 +1493,6 @@ enum obdo_flags {
#define LOV_MAGIC_V1_DEF 0x0CD10BD0
#define LOV_MAGIC_V3_DEF 0x0CD30BD0
-#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
-#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
-#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
-#define LOV_PATTERN_CMOBD 0x200
-
-#define LOV_PATTERN_F_MASK 0xffff0000
-#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */
-
#define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
#define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
@@ -1569,25 +1556,25 @@ static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
oi->oi.oi_id = oid;
}
-static inline __u64 lmm_oi_id(struct ost_id *oi)
+static inline __u64 lmm_oi_id(const struct ost_id *oi)
{
return oi->oi.oi_id;
}
-static inline __u64 lmm_oi_seq(struct ost_id *oi)
+static inline __u64 lmm_oi_seq(const struct ost_id *oi)
{
return oi->oi.oi_seq;
}
static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
- struct ost_id *src_oi)
+ const struct ost_id *src_oi)
{
dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
}
static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
- struct ost_id *src_oi)
+ const struct ost_id *src_oi)
{
dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
@@ -1610,6 +1597,7 @@ static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
#define XATTR_NAME_LOV "trusted.lov"
#define XATTR_NAME_LMA "trusted.lma"
#define XATTR_NAME_LMV "trusted.lmv"
+#define XATTR_NAME_DEFAULT_LMV "trusted.dmv"
#define XATTR_NAME_LINK "trusted.link"
#define XATTR_NAME_FID "trusted.fid"
#define XATTR_NAME_VERSION "trusted.version"
@@ -1625,7 +1613,7 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
/* lmm_stripe_count used to be __u32 */
__u16 lmm_stripe_count; /* num stripes in use for this object */
__u16 lmm_layout_gen; /* layout generation number */
- char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
+ char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */
struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
};
@@ -1727,6 +1715,8 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
+#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */
+
#define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
@@ -1782,7 +1772,7 @@ void lustre_swab_obd_statfs(struct obd_statfs *os);
* it to sync quickly
*/
-#define OBD_OBJECT_EOF 0xffffffffffffffffULL
+#define OBD_OBJECT_EOF LUSTRE_EOF
#define OST_MIN_PRECREATE 32
#define OST_MAX_PRECREATE 20000
@@ -1806,9 +1796,9 @@ void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
/* multiple of 8 bytes => can array */
struct niobuf_remote {
- __u64 offset;
- __u32 len;
- __u32 flags;
+ __u64 rnb_offset;
+ __u32 rnb_len;
+ __u32 rnb_flags;
};
void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
@@ -1878,12 +1868,6 @@ struct obd_quotactl {
void lustre_swab_obd_quotactl(struct obd_quotactl *q);
-#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */
-#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */
-#define Q_GETOINFO 0x800102 /* get obd quota info */
-#define Q_GETOQUOTA 0x800103 /* get obd quotas */
-#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
-
#define Q_COPY(out, in, member) (out)->member = (in)->member
#define QCTL_COPY(out, in) \
@@ -1946,8 +1930,8 @@ enum mds_cmd {
MDS_DISCONNECT = 39,
MDS_GETSTATUS = 40,
MDS_STATFS = 41,
- MDS_PIN = 42,
- MDS_UNPIN = 43,
+ MDS_PIN = 42, /* obsolete, never used in a release */
+ MDS_UNPIN = 43, /* obsolete, never used in a release */
MDS_SYNC = 44,
MDS_DONE_WRITING = 45,
MDS_SET_INFO = 46,
@@ -1956,7 +1940,7 @@ enum mds_cmd {
MDS_GETXATTR = 49,
MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
MDS_WRITEPAGE = 51,
- MDS_IS_SUBDIR = 52,
+ MDS_IS_SUBDIR = 52, /* obsolete, never used in a release */
MDS_GET_INFO = 53,
MDS_HSM_STATE_GET = 54,
MDS_HSM_STATE_SET = 55,
@@ -1984,7 +1968,7 @@ enum mdt_reint_cmd {
REINT_OPEN = 6,
REINT_SETXATTR = 7,
REINT_RMENTRY = 8,
-/* REINT_WRITE = 9, */
+ REINT_MIGRATE = 9,
REINT_MAX
};
@@ -2003,6 +1987,7 @@ void lustre_swab_generic_32s(__u32 *val);
#define DISP_OPEN_LOCK 0x02000000
#define DISP_OPEN_LEASE 0x04000000
#define DISP_OPEN_STRIPE 0x08000000
+#define DISP_OPEN_DENY 0x10000000
/* INODE LOCK PARTS */
#define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
@@ -2028,7 +2013,7 @@ void lustre_swab_generic_32s(__u32 *val);
#define MDS_INODELOCK_MAXSHIFT 5
/* This FULL lock is useful to take on unlink sort of operations */
-#define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
+#define MDS_INODELOCK_FULL ((1 << (MDS_INODELOCK_MAXSHIFT + 1)) - 1)
/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
* but was moved into name[1] along with the OID to avoid consuming the
@@ -2108,43 +2093,43 @@ enum md_transient_state {
};
struct mdt_body {
- struct lu_fid fid1;
- struct lu_fid fid2;
- struct lustre_handle handle;
- __u64 valid;
- __u64 size; /* Offset, in the case of MDS_READPAGE */
- __s64 mtime;
- __s64 atime;
- __s64 ctime;
- __u64 blocks; /* XID, in the case of MDS_READPAGE */
- __u64 ioepoch;
- __u64 t_state; /* transient file state defined in
- * enum md_transient_state
- * was "ino" until 2.4.0
- */
- __u32 fsuid;
- __u32 fsgid;
- __u32 capability;
- __u32 mode;
- __u32 uid;
- __u32 gid;
- __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
- __u32 rdev;
- __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */
- __u32 unused2; /* was "generation" until 2.4.0 */
- __u32 suppgid;
- __u32 eadatasize;
- __u32 aclsize;
- __u32 max_mdsize;
- __u32 max_cookiesize;
- __u32 uid_h; /* high 32-bits of uid, for FUID */
- __u32 gid_h; /* high 32-bits of gid, for FUID */
- __u32 padding_5; /* also fix lustre_swab_mdt_body */
- __u64 padding_6;
- __u64 padding_7;
- __u64 padding_8;
- __u64 padding_9;
- __u64 padding_10;
+ struct lu_fid mbo_fid1;
+ struct lu_fid mbo_fid2;
+ struct lustre_handle mbo_handle;
+ __u64 mbo_valid;
+ __u64 mbo_size; /* Offset, in the case of MDS_READPAGE */
+ __s64 mbo_mtime;
+ __s64 mbo_atime;
+ __s64 mbo_ctime;
+ __u64 mbo_blocks; /* XID, in the case of MDS_READPAGE */
+ __u64 mbo_ioepoch;
+ __u64 mbo_t_state; /* transient file state defined in
+ * enum md_transient_state
+ * was "ino" until 2.4.0
+ */
+ __u32 mbo_fsuid;
+ __u32 mbo_fsgid;
+ __u32 mbo_capability;
+ __u32 mbo_mode;
+ __u32 mbo_uid;
+ __u32 mbo_gid;
+ __u32 mbo_flags;
+ __u32 mbo_rdev;
+ __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */
+ __u32 mbo_unused2; /* was "generation" until 2.4.0 */
+ __u32 mbo_suppgid;
+ __u32 mbo_eadatasize;
+ __u32 mbo_aclsize;
+ __u32 mbo_max_mdsize;
+ __u32 mbo_max_cookiesize;
+ __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */
+ __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */
+ __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */
+ __u64 mbo_padding_6;
+ __u64 mbo_padding_7;
+ __u64 mbo_padding_8;
+ __u64 mbo_padding_9;
+ __u64 mbo_padding_10;
}; /* 216 */
void lustre_swab_mdt_body(struct mdt_body *b);
@@ -2263,6 +2248,11 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
*/
#define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
+#define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS | \
+ MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK | \
+ MDS_OPEN_BY_FID | MDS_OPEN_LEASE | \
+ MDS_OPEN_RELEASE)
+
enum mds_op_bias {
MDS_CHECK_SPLIT = 1 << 0,
MDS_CROSS_REF = 1 << 1,
@@ -2277,6 +2267,7 @@ enum mds_op_bias {
MDS_CREATE_VOLATILE = 1 << 10,
MDS_OWNEROVERRIDE = 1 << 11,
MDS_HSM_RELEASE = 1 << 12,
+ MDS_RENAME_MIGRATE = BIT(13),
};
/* instance of mdt_reint_rec */
@@ -2472,7 +2463,7 @@ struct lmv_desc {
__u32 ld_tgt_count; /* how many MDS's */
__u32 ld_active_tgt_count; /* how many active */
__u32 ld_default_stripe_count; /* how many objects are used */
- __u32 ld_pattern; /* default MEA_MAGIC_* */
+ __u32 ld_pattern; /* default hash pattern */
__u64 ld_default_hash_size;
__u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
__u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
@@ -2482,23 +2473,129 @@ struct lmv_desc {
struct obd_uuid ld_uuid;
};
-/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
-struct lmv_stripe_md {
- __u32 mea_magic;
- __u32 mea_count;
- __u32 mea_master;
- __u32 mea_padding;
- char mea_pool_name[LOV_MAXPOOLNAME];
- struct lu_fid mea_ids[0];
+/* LMV layout EA, and it will be stored both in master and slave object */
+struct lmv_mds_md_v1 {
+ __u32 lmv_magic;
+ __u32 lmv_stripe_count;
+ __u32 lmv_master_mdt_index; /* On master object, it is master
+ * MDT index, on slave object, it
+ * is stripe index of the slave obj
+ */
+ __u32 lmv_hash_type; /* dir stripe policy, i.e. indicate
+ * which hash function to be used,
+ * Note: only lower 16 bits is being
+ * used for now. Higher 16 bits will
+ * be used to mark the object status,
+ * for example migrating or dead.
+ */
+ __u32 lmv_layout_version; /* Used for directory restriping */
+ __u32 lmv_padding1;
+ __u64 lmv_padding2;
+ __u64 lmv_padding3;
+ char lmv_pool_name[LOV_MAXPOOLNAME + 1];/* pool name */
+ struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */
};
-#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
-#define MEA_MAGIC_ALL_CHARS 0xb222a11c
-#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
+#define LMV_MAGIC_V1 0x0CD20CD0 /* normal stripe lmv magic */
+#define LMV_MAGIC LMV_MAGIC_V1
+
+/* #define LMV_USER_MAGIC 0x0CD30CD0 */
+#define LMV_MAGIC_STRIPE 0x0CD40CD0 /* magic for dir sub_stripe */
+
+/*
+ *Right now only the lower part(0-16bits) of lmv_hash_type is being used,
+ * and the higher part will be the flag to indicate the status of object,
+ * for example the object is being migrated. And the hash function
+ * might be interpreted differently with different flags.
+ */
+#define LMV_HASH_TYPE_MASK 0x0000ffff
+
+#define LMV_HASH_FLAG_MIGRATION 0x80000000
+#define LMV_HASH_FLAG_DEAD 0x40000000
-#define MAX_HASH_SIZE_32 0x7fffffffUL
-#define MAX_HASH_SIZE 0x7fffffffffffffffULL
-#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
+/**
+ * The FNV-1a hash algorithm is as follows:
+ * hash = FNV_offset_basis
+ * for each octet_of_data to be hashed
+ * hash = hash XOR octet_of_data
+ * hash = hash × FNV_prime
+ * return hash
+ * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
+ *
+ * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source
+ * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL
+ **/
+#define LUSTRE_FNV_1A_64_PRIME 0x100000001b3ULL
+#define LUSTRE_FNV_1A_64_OFFSET_BIAS 0xcbf29ce484222325ULL
+static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size)
+{
+ __u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS;
+ const unsigned char *p = buf;
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ hash ^= p[i];
+ hash *= LUSTRE_FNV_1A_64_PRIME;
+ }
+
+ return hash;
+}
+
+union lmv_mds_md {
+ __u32 lmv_magic;
+ struct lmv_mds_md_v1 lmv_md_v1;
+ struct lmv_user_md lmv_user_md;
+};
+
+void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
+
+static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
+{
+ ssize_t len = -EINVAL;
+
+ switch (lmm_magic) {
+ case LMV_MAGIC_V1: {
+ struct lmv_mds_md_v1 *lmm1;
+
+ len = sizeof(*lmm1);
+ len += stripe_count * sizeof(lmm1->lmv_stripe_fids[0]);
+ break; }
+ default:
+ break;
+ }
+ return len;
+}
+
+static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm)
+{
+ switch (le32_to_cpu(lmm->lmv_magic)) {
+ case LMV_MAGIC_V1:
+ return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count);
+ case LMV_USER_MAGIC:
+ return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count);
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm,
+ unsigned int stripe_count)
+{
+ int rc = 0;
+
+ switch (le32_to_cpu(lmm->lmv_magic)) {
+ case LMV_MAGIC_V1:
+ lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count);
+ break;
+ case LMV_USER_MAGIC:
+ lmm->lmv_user_md.lum_stripe_count = cpu_to_le32(stripe_count);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
enum fld_rpc_opc {
FLD_QUERY = 900,
@@ -2582,8 +2679,8 @@ struct ldlm_res_id {
#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
(res)->lr_name.name[2], (res)->lr_name.name[3]
-static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
- const struct ldlm_res_id *res1)
+static inline bool ldlm_res_eq(const struct ldlm_res_id *res0,
+ const struct ldlm_res_id *res1)
{
return !memcmp(res0, res1, sizeof(*res0));
}
@@ -2620,17 +2717,15 @@ struct ldlm_extent {
__u64 gid;
};
-#define LDLM_GID_ANY ((__u64)-1)
-
-static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
- struct ldlm_extent *ex2)
+static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1,
+ const struct ldlm_extent *ex2)
{
return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
}
/* check if @ex1 contains @ex2 */
-static inline int ldlm_extent_contain(struct ldlm_extent *ex1,
- struct ldlm_extent *ex2)
+static inline int ldlm_extent_contain(const struct ldlm_extent *ex1,
+ const struct ldlm_extent *ex2)
{
return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
}
@@ -2833,7 +2928,29 @@ enum obd_cmd {
};
#define OBD_FIRST_OPC OBD_PING
-/* catalog of log objects */
+/**
+ * llog contexts indices.
+ *
+ * There is compatibility problem with indexes below, they are not
+ * continuous and must keep their numbers for compatibility needs.
+ * See LU-5218 for details.
+ */
+enum llog_ctxt_id {
+ LLOG_CONFIG_ORIG_CTXT = 0,
+ LLOG_CONFIG_REPL_CTXT = 1,
+ LLOG_MDS_OST_ORIG_CTXT = 2,
+ LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */
+ LLOG_SIZE_ORIG_CTXT = 4,
+ LLOG_SIZE_REPL_CTXT = 5,
+ LLOG_TEST_ORIG_CTXT = 8,
+ LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */
+ LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */
+ LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */
+ /* for multiple changelog consumers */
+ LLOG_CHANGELOG_USER_ORIG_CTXT = 14,
+ LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */
+ LLOG_MAX_CTXTS
+};
/** Identifier for a single log object */
struct llog_logid {
@@ -2939,7 +3056,7 @@ struct llog_setattr64_rec {
__u32 lsr_uid_h;
__u32 lsr_gid;
__u32 lsr_gid_h;
- __u64 lsr_padding;
+ __u64 lsr_valid;
struct llog_rec_tail lsr_tail;
} __packed;
@@ -2963,15 +3080,9 @@ struct changelog_setinfo {
/** changelog record */
struct llog_changelog_rec {
- struct llog_rec_hdr cr_hdr;
- struct changelog_rec cr;
- struct llog_rec_tail cr_tail; /**< for_sizezof_only */
-} __packed;
-
-struct llog_changelog_ext_rec {
- struct llog_rec_hdr cr_hdr;
- struct changelog_ext_rec cr;
- struct llog_rec_tail cr_tail; /**< for_sizezof_only */
+ struct llog_rec_hdr cr_hdr;
+ struct changelog_rec cr; /**< Variable length field */
+ struct llog_rec_tail cr_do_not_use; /**< for_sizezof_only */
} __packed;
struct llog_changelog_user_rec {
@@ -2990,7 +3101,7 @@ enum agent_req_status {
ARS_SUCCEED,
};
-static inline char *agent_req_status2name(enum agent_req_status ars)
+static inline const char *agent_req_status2name(const enum agent_req_status ars)
{
switch (ars) {
case ARS_WAITING:
@@ -3056,6 +3167,9 @@ enum llog_flag {
LLOG_F_ZAP_WHEN_EMPTY = 0x1,
LLOG_F_IS_CAT = 0x2,
LLOG_F_IS_PLAIN = 0x4,
+ LLOG_F_EXT_JOBID = BIT(3),
+
+ LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
};
struct llog_log_hdr {
@@ -3068,8 +3182,8 @@ struct llog_log_hdr {
__u32 llh_cat_idx;
/* for a catalog the first plain slot is next to it */
struct obd_uuid llh_tgtuuid;
- __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
- __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
+ __u32 llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23];
+ __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
struct llog_rec_tail llh_tail;
} __packed;
@@ -3166,7 +3280,7 @@ struct obdo {
#define o_cksum o_nlink
#define o_grant_used o_data_version
-static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
+static inline void lustre_set_wire_obdo(const struct obd_connect_data *ocd,
struct obdo *wobdo,
const struct obdo *lobdo)
{
@@ -3185,7 +3299,7 @@ static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
}
}
-static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
+static inline void lustre_get_wire_obdo(const struct obd_connect_data *ocd,
struct obdo *lobdo,
const struct obdo *wobdo)
{
@@ -3284,17 +3398,17 @@ void lustre_swab_lustre_capa(struct lustre_capa *c);
/** lustre_capa::lc_opc */
enum {
- CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
- CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
- CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
- CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
- CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
- CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
- CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
- CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
- CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */
- CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */
- CAPA_OPC_META_READ = 1<<10, /**< read object meta data */
+ CAPA_OPC_BODY_WRITE = 1 << 0, /**< write object data */
+ CAPA_OPC_BODY_READ = 1 << 1, /**< read object data */
+ CAPA_OPC_INDEX_LOOKUP = 1 << 2, /**< lookup object fid */
+ CAPA_OPC_INDEX_INSERT = 1 << 3, /**< insert object fid */
+ CAPA_OPC_INDEX_DELETE = 1 << 4, /**< delete object fid */
+ CAPA_OPC_OSS_WRITE = 1 << 5, /**< write oss object data */
+ CAPA_OPC_OSS_READ = 1 << 6, /**< read oss object data */
+ CAPA_OPC_OSS_TRUNC = 1 << 7, /**< truncate oss object */
+ CAPA_OPC_OSS_DESTROY = 1 << 8, /**< destroy oss object */
+ CAPA_OPC_META_WRITE = 1 << 9, /**< write object meta data */
+ CAPA_OPC_META_READ = 1 << 10, /**< read object meta data */
};
#define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
@@ -3346,6 +3460,14 @@ struct getinfo_fid2path {
void lustre_swab_fid2path(struct getinfo_fid2path *gf);
+/** path2parent request/reply structures */
+struct getparent {
+ struct lu_fid gp_fid; /**< parent FID */
+ __u32 gp_linkno; /**< hardlink number */
+ __u32 gp_name_size; /**< size of the name field */
+ char gp_name[0]; /**< zero-terminated link name */
+} __packed;
+
enum {
LAYOUT_INTENT_ACCESS = 0,
LAYOUT_INTENT_READ = 1,
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
new file mode 100644
index 000000000000..f3d7c94c3b50
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
@@ -0,0 +1,412 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2015, Intel Corporation.
+ */
+#ifndef LUSTRE_IOCTL_H_
+#define LUSTRE_IOCTL_H_
+
+#include <linux/types.h>
+#include "../../../include/linux/libcfs/libcfs.h"
+#include "lustre_idl.h"
+
+#ifdef __KERNEL__
+# include <linux/ioctl.h>
+# include <linux/string.h>
+# include "../obd_support.h"
+#else /* __KERNEL__ */
+# include <malloc.h>
+# include <string.h>
+#include <libcfs/util/ioctl.h>
+#endif /* !__KERNEL__ */
+
+#if !defined(__KERNEL__) && !defined(LUSTRE_UTILS)
+# error This file is for Lustre internal use only.
+#endif
+
+enum md_echo_cmd {
+ ECHO_MD_CREATE = 1, /* Open/Create file on MDT */
+ ECHO_MD_MKDIR = 2, /* Mkdir on MDT */
+ ECHO_MD_DESTROY = 3, /* Unlink file on MDT */
+ ECHO_MD_RMDIR = 4, /* Rmdir on MDT */
+ ECHO_MD_LOOKUP = 5, /* Lookup on MDT */
+ ECHO_MD_GETATTR = 6, /* Getattr on MDT */
+ ECHO_MD_SETATTR = 7, /* Setattr on MDT */
+ ECHO_MD_ALLOC_FID = 8, /* Get FIDs from MDT */
+};
+
+#define OBD_DEV_ID 1
+#define OBD_DEV_NAME "obd"
+#define OBD_DEV_PATH "/dev/" OBD_DEV_NAME
+#define OBD_DEV_MAJOR 10
+#define OBD_DEV_MINOR 241
+
+#define OBD_IOCTL_VERSION 0x00010004
+#define OBD_DEV_BY_DEVNAME 0xffffd0de
+#define OBD_MAX_IOCTL_BUFFER CONFIG_LUSTRE_OBD_MAX_IOCTL_BUFFER
+
+struct obd_ioctl_data {
+ __u32 ioc_len;
+ __u32 ioc_version;
+
+ union {
+ __u64 ioc_cookie;
+ __u64 ioc_u64_1;
+ };
+ union {
+ __u32 ioc_conn1;
+ __u32 ioc_u32_1;
+ };
+ union {
+ __u32 ioc_conn2;
+ __u32 ioc_u32_2;
+ };
+
+ struct obdo ioc_obdo1;
+ struct obdo ioc_obdo2;
+
+ __u64 ioc_count;
+ __u64 ioc_offset;
+ __u32 ioc_dev;
+ __u32 ioc_command;
+
+ __u64 ioc_nid;
+ __u32 ioc_nal;
+ __u32 ioc_type;
+
+ /* buffers the kernel will treat as user pointers */
+ __u32 ioc_plen1;
+ char __user *ioc_pbuf1;
+ __u32 ioc_plen2;
+ char __user *ioc_pbuf2;
+
+ /* inline buffers for various arguments */
+ __u32 ioc_inllen1;
+ char *ioc_inlbuf1;
+ __u32 ioc_inllen2;
+ char *ioc_inlbuf2;
+ __u32 ioc_inllen3;
+ char *ioc_inlbuf3;
+ __u32 ioc_inllen4;
+ char *ioc_inlbuf4;
+
+ char ioc_bulk[0];
+};
+
+struct obd_ioctl_hdr {
+ __u32 ioc_len;
+ __u32 ioc_version;
+};
+
+static inline __u32 obd_ioctl_packlen(struct obd_ioctl_data *data)
+{
+ __u32 len = cfs_size_round(sizeof(*data));
+
+ len += cfs_size_round(data->ioc_inllen1);
+ len += cfs_size_round(data->ioc_inllen2);
+ len += cfs_size_round(data->ioc_inllen3);
+ len += cfs_size_round(data->ioc_inllen4);
+
+ return len;
+}
+
+static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data)
+{
+ if (data->ioc_len > (1 << 30)) {
+ CERROR("OBD ioctl: ioc_len larger than 1<<30\n");
+ return 1;
+ }
+
+ if (data->ioc_inllen1 > (1 << 30)) {
+ CERROR("OBD ioctl: ioc_inllen1 larger than 1<<30\n");
+ return 1;
+ }
+
+ if (data->ioc_inllen2 > (1 << 30)) {
+ CERROR("OBD ioctl: ioc_inllen2 larger than 1<<30\n");
+ return 1;
+ }
+
+ if (data->ioc_inllen3 > (1 << 30)) {
+ CERROR("OBD ioctl: ioc_inllen3 larger than 1<<30\n");
+ return 1;
+ }
+
+ if (data->ioc_inllen4 > (1 << 30)) {
+ CERROR("OBD ioctl: ioc_inllen4 larger than 1<<30\n");
+ return 1;
+ }
+
+ if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
+ CERROR("OBD ioctl: inlbuf1 pointer but 0 length\n");
+ return 1;
+ }
+
+ if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
+ CERROR("OBD ioctl: inlbuf2 pointer but 0 length\n");
+ return 1;
+ }
+
+ if (data->ioc_inlbuf3 && !data->ioc_inllen3) {
+ CERROR("OBD ioctl: inlbuf3 pointer but 0 length\n");
+ return 1;
+ }
+
+ if (data->ioc_inlbuf4 && !data->ioc_inllen4) {
+ CERROR("OBD ioctl: inlbuf4 pointer but 0 length\n");
+ return 1;
+ }
+
+ if (data->ioc_pbuf1 && !data->ioc_plen1) {
+ CERROR("OBD ioctl: pbuf1 pointer but 0 length\n");
+ return 1;
+ }
+
+ if (data->ioc_pbuf2 && !data->ioc_plen2) {
+ CERROR("OBD ioctl: pbuf2 pointer but 0 length\n");
+ return 1;
+ }
+
+ if (!data->ioc_pbuf1 && data->ioc_plen1) {
+ CERROR("OBD ioctl: plen1 set but NULL pointer\n");
+ return 1;
+ }
+
+ if (!data->ioc_pbuf2 && data->ioc_plen2) {
+ CERROR("OBD ioctl: plen2 set but NULL pointer\n");
+ return 1;
+ }
+
+ if (obd_ioctl_packlen(data) > data->ioc_len) {
+ CERROR("OBD ioctl: packlen exceeds ioc_len (%d > %d)\n",
+ obd_ioctl_packlen(data), data->ioc_len);
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef __KERNEL__
+
+int obd_ioctl_getdata(char **buf, int *len, void __user *arg);
+int obd_ioctl_popdata(void __user *arg, void *data, int len);
+
+static inline void obd_ioctl_freedata(char *buf, size_t len)
+{
+ kvfree(buf);
+}
+
+#else /* __KERNEL__ */
+
+static inline int obd_ioctl_pack(struct obd_ioctl_data *data, char **pbuf,
+ int max_len)
+{
+ char *ptr;
+ struct obd_ioctl_data *overlay;
+
+ data->ioc_len = obd_ioctl_packlen(data);
+ data->ioc_version = OBD_IOCTL_VERSION;
+
+ if (*pbuf && data->ioc_len > max_len) {
+ fprintf(stderr, "pbuf = %p, ioc_len = %u, max_len = %d\n",
+ *pbuf, data->ioc_len, max_len);
+ return -EINVAL;
+ }
+
+ if (!*pbuf)
+ *pbuf = malloc(data->ioc_len);
+
+ if (!*pbuf)
+ return -ENOMEM;
+
+ overlay = (struct obd_ioctl_data *)*pbuf;
+ memcpy(*pbuf, data, sizeof(*data));
+
+ ptr = overlay->ioc_bulk;
+ if (data->ioc_inlbuf1)
+ LOGL(data->ioc_inlbuf1, data->ioc_inllen1, ptr);
+
+ if (data->ioc_inlbuf2)
+ LOGL(data->ioc_inlbuf2, data->ioc_inllen2, ptr);
+
+ if (data->ioc_inlbuf3)
+ LOGL(data->ioc_inlbuf3, data->ioc_inllen3, ptr);
+
+ if (data->ioc_inlbuf4)
+ LOGL(data->ioc_inlbuf4, data->ioc_inllen4, ptr);
+
+ if (obd_ioctl_is_invalid(overlay)) {
+ fprintf(stderr, "invalid ioctl data: ioc_len = %u, max_len = %d\n",
+ data->ioc_len, max_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+obd_ioctl_unpack(struct obd_ioctl_data *data, char *pbuf, int max_len)
+{
+ char *ptr;
+ struct obd_ioctl_data *overlay;
+
+ if (!pbuf)
+ return 1;
+
+ overlay = (struct obd_ioctl_data *)pbuf;
+
+ /* Preserve the caller's buffer pointers */
+ overlay->ioc_inlbuf1 = data->ioc_inlbuf1;
+ overlay->ioc_inlbuf2 = data->ioc_inlbuf2;
+ overlay->ioc_inlbuf3 = data->ioc_inlbuf3;
+ overlay->ioc_inlbuf4 = data->ioc_inlbuf4;
+
+ memcpy(data, pbuf, sizeof(*data));
+
+ ptr = overlay->ioc_bulk;
+ if (data->ioc_inlbuf1)
+ LOGU(data->ioc_inlbuf1, data->ioc_inllen1, ptr);
+
+ if (data->ioc_inlbuf2)
+ LOGU(data->ioc_inlbuf2, data->ioc_inllen2, ptr);
+
+ if (data->ioc_inlbuf3)
+ LOGU(data->ioc_inlbuf3, data->ioc_inllen3, ptr);
+
+ if (data->ioc_inlbuf4)
+ LOGU(data->ioc_inlbuf4, data->ioc_inllen4, ptr);
+
+ return 0;
+}
+
+#endif /* !__KERNEL__ */
+
+/*
+ * OBD_IOC_DATA_TYPE is only for compatibility reasons with older
+ * Linux Lustre user tools. New ioctls should NOT use this macro as
+ * the ioctl "size". Instead the ioctl should get a "size" argument
+ * which is the actual data type used by the ioctl, to ensure the
+ * ioctl interface is versioned correctly.
+ */
+#define OBD_IOC_DATA_TYPE long
+
+/* IOC_LDLM_TEST _IOWR('f', 40, long) */
+/* IOC_LDLM_DUMP _IOWR('f', 41, long) */
+/* IOC_LDLM_REGRESS_START _IOWR('f', 42, long) */
+/* IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long) */
+
+#define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_DESTROY _IOW('f', 104, OBD_IOC_DATA_TYPE)
+/* OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE) */
+
+#define OBD_IOC_SETATTR _IOW('f', 107, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_GETATTR _IOWR('f', 108, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SYNC _IOW('f', 114, OBD_IOC_DATA_TYPE)
+/* OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE) */
+/* OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE) */
+/* OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE) */
+/* OBD_IOC_COPY _IOWR('f', 120, OBD_IOC_DATA_TYPE) */
+/* OBD_IOC_MIGR _IOWR('f', 121, OBD_IOC_DATA_TYPE) */
+/* OBD_IOC_PUNCH _IOWR('f', 122, OBD_IOC_DATA_TYPE) */
+
+/* OBD_IOC_MODULE_DEBUG _IOWR('f', 124, OBD_IOC_DATA_TYPE) */
+#define OBD_IOC_BRW_READ _IOWR('f', 125, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_BRW_WRITE _IOWR('f', 126, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_NAME2DEV _IOWR('f', 127, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_UUID2DEV _IOWR('f', 130, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_GETNAME _IOWR('f', 131, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_GETMDNAME _IOR('f', 131, char[MAX_OBD_NAME])
+#define OBD_IOC_GETDTNAME OBD_IOC_GETNAME
+#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_CLIENT_RECOVER _IOW('f', 133, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PING_TARGET _IOW('f', 136, OBD_IOC_DATA_TYPE)
+
+/* OBD_IOC_DEC_FS_USE_COUNT _IO('f', 139) */
+#define OBD_IOC_NO_TRANSNO _IOW('f', 140, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SET_READONLY _IOW('f', 141, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_ABORT_RECOVERY _IOR('f', 142, OBD_IOC_DATA_TYPE)
+/* OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE) */
+#define OBD_GET_VERSION _IOWR('f', 144, OBD_IOC_DATA_TYPE)
+/* OBD_IOC_GSS_SUPPORT _IOWR('f', 145, OBD_IOC_DATA_TYPE) */
+/* OBD_IOC_CLOSE_UUID _IOWR('f', 147, OBD_IOC_DATA_TYPE) */
+#define OBD_IOC_CHANGELOG_SEND _IOW('f', 148, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_GETDEVICE _IOWR('f', 149, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_FID2PATH _IOWR('f', 150, OBD_IOC_DATA_TYPE)
+/* lustre/lustre_user.h 151-153 */
+/* OBD_IOC_LOV_SETSTRIPE 154 LL_IOC_LOV_SETSTRIPE */
+/* OBD_IOC_LOV_GETSTRIPE 155 LL_IOC_LOV_GETSTRIPE */
+/* OBD_IOC_LOV_SETEA 156 LL_IOC_LOV_SETEA */
+/* lustre/lustre_user.h 157-159 */
+#define OBD_IOC_QUOTACHECK _IOW('f', 160, int)
+#define OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
+#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
+/* lustre/lustre_user.h 163-176 */
+#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_DEREG _IOW('f', 178, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_CLEAR _IOW('f', 179, struct obd_ioctl_data)
+/* OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE) */
+/* OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE) */
+/* OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE) */
+/* OBD_IOC_DORECORD _IOWR('f', 183, OBD_IOC_DATA_TYPE) */
+#define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE)
+/* OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE) */
+/* OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE) */
+#define OBD_IOC_PARAM _IOW('f', 187, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_POOL _IOWR('f', 188, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_REPLACE_NIDS _IOWR('f', 189, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_CATLOGLIST _IOWR('f', 190, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LLOG_INFO _IOWR('f', 191, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LLOG_PRINT _IOWR('f', 192, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LLOG_CANCEL _IOWR('f', 193, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LLOG_REMOVE _IOWR('f', 194, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LLOG_CHECK _IOWR('f', 195, OBD_IOC_DATA_TYPE)
+/* OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE) */
+#define OBD_IOC_NODEMAP _IOWR('f', 197, OBD_IOC_DATA_TYPE)
+
+/* ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) */
+/* ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) */
+/* ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) */
+/* ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) */
+
+#define OBD_IOC_GET_OBJ_VERSION _IOR('f', 210, OBD_IOC_DATA_TYPE)
+
+/* lustre/lustre_user.h 212-217 */
+#define OBD_IOC_GET_MNTOPT _IOW('f', 220, mntopt_t)
+#define OBD_IOC_ECHO_MD _IOR('f', 221, struct obd_ioctl_data)
+#define OBD_IOC_ECHO_ALLOC_SEQ _IOWR('f', 222, struct obd_ioctl_data)
+#define OBD_IOC_START_LFSCK _IOWR('f', 230, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_STOP_LFSCK _IOW('f', 231, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_QUERY_LFSCK _IOR('f', 232, struct obd_ioctl_data)
+/* lustre/lustre_user.h 240-249 */
+/* LIBCFS_IOC_DEBUG_MASK 250 */
+
+#define IOC_OSC_SET_ACTIVE _IOWR('h', 21, void *)
+
+#endif /* LUSTRE_IOCTL_H_ */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index ef6f38ff359e..6fc985571cba 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -42,8 +42,35 @@
* @{
*/
+#ifdef __KERNEL__
+# include <linux/quota.h>
+# include <linux/string.h> /* snprintf() */
+# include <linux/version.h>
+#else /* !__KERNEL__ */
+# define NEED_QUOTA_DEFS
+# include <stdio.h> /* snprintf() */
+# include <string.h>
+# include <sys/quota.h>
+# include <sys/stat.h>
+#endif /* __KERNEL__ */
#include "ll_fiemap.h"
-#include "../linux/lustre_user.h"
+
+/*
+ * We need to always use 64bit version because the structure
+ * is shared across entire cluster where 32bit and 64bit machines
+ * are co-existing.
+ */
+#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
+typedef struct stat64 lstat_t;
+#define lstat_f lstat64
+#else
+typedef struct stat lstat_t;
+#define lstat_f lstat
+#endif
+
+#define HAVE_LOV_USER_MDS_DATA
+
+#define LUSTRE_EOF 0xffffffffffffffffULL
/* for statfs() */
#define LL_SUPER_MAGIC 0x0BD00BD0
@@ -117,6 +144,11 @@ struct lu_fid {
__u32 f_ver;
};
+static inline bool fid_is_zero(const struct lu_fid *fid)
+{
+ return !fid->f_seq && !fid->f_oid;
+}
+
struct filter_fid {
struct lu_fid ff_parent; /* ff_parent.f_ver == file stripe number */
};
@@ -167,7 +199,7 @@ struct lustre_mdt_attrs {
*/
struct ost_id {
union {
- struct ostid {
+ struct {
__u64 oi_id;
__u64 oi_seq;
} oi;
@@ -188,26 +220,20 @@ struct ost_id {
* *STRIPE* - set/get lov_user_md
* *INFO - set/get lov_user_mds_data
*/
-/* see <lustre_lib.h> for ioctl numberss 101-150 */
+/* lustre_ioctl.h 101-150 */
#define LL_IOC_GETFLAGS _IOR('f', 151, long)
#define LL_IOC_SETFLAGS _IOW('f', 152, long)
#define LL_IOC_CLRFLAGS _IOW('f', 153, long)
-/* LL_IOC_LOV_SETSTRIPE: See also OBD_IOC_LOV_SETSTRIPE */
#define LL_IOC_LOV_SETSTRIPE _IOW('f', 154, long)
-/* LL_IOC_LOV_GETSTRIPE: See also OBD_IOC_LOV_GETSTRIPE */
#define LL_IOC_LOV_GETSTRIPE _IOW('f', 155, long)
-/* LL_IOC_LOV_SETEA: See also OBD_IOC_LOV_SETEA */
#define LL_IOC_LOV_SETEA _IOW('f', 156, long)
-#define LL_IOC_RECREATE_OBJ _IOW('f', 157, long)
-#define LL_IOC_RECREATE_FID _IOW('f', 157, struct lu_fid)
+/* LL_IOC_RECREATE_OBJ 157 obsolete */
+/* LL_IOC_RECREATE_FID 158 obsolete */
#define LL_IOC_GROUP_LOCK _IOW('f', 158, long)
#define LL_IOC_GROUP_UNLOCK _IOW('f', 159, long)
-/* LL_IOC_QUOTACHECK: See also OBD_IOC_QUOTACHECK */
-#define LL_IOC_QUOTACHECK _IOW('f', 160, int)
-/* LL_IOC_POLL_QUOTACHECK: See also OBD_IOC_POLL_QUOTACHECK */
-#define LL_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
-/* LL_IOC_QUOTACTL: See also OBD_IOC_QUOTACTL */
-#define LL_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
+/* #define LL_IOC_QUOTACHECK 160 OBD_IOC_QUOTACHECK */
+/* #define LL_IOC_POLL_QUOTACHECK 161 OBD_IOC_POLL_QUOTACHECK */
+/* #define LL_IOC_QUOTACTL 162 OBD_IOC_QUOTACTL */
#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *)
#define LL_IOC_FLUSHCTX _IOW('f', 166, long)
@@ -221,8 +247,7 @@ struct ost_id {
#define LL_IOC_GET_CONNECT_FLAGS _IOWR('f', 174, __u64 *)
#define LL_IOC_GET_MDTIDX _IOR('f', 175, int)
-/* see <lustre_lib.h> for ioctl numbers 177-210 */
-
+/* lustre_ioctl.h 177-210 */
#define LL_IOC_HSM_STATE_GET _IOR('f', 211, struct hsm_user_state)
#define LL_IOC_HSM_STATE_SET _IOW('f', 212, struct hsm_state_set)
#define LL_IOC_HSM_CT_START _IOW('f', 213, struct lustre_kernelcomm)
@@ -242,6 +267,17 @@ struct ost_id {
#define LL_IOC_SET_LEASE _IOWR('f', 243, long)
#define LL_IOC_GET_LEASE _IO('f', 244)
#define LL_IOC_HSM_IMPORT _IOWR('f', 245, struct hsm_user_import)
+#define LL_IOC_LMV_SET_DEFAULT_STRIPE _IOWR('f', 246, struct lmv_user_md)
+#define LL_IOC_MIGRATE _IOR('f', 247, int)
+#define LL_IOC_FID2MDTIDX _IOWR('f', 248, struct lu_fid)
+#define LL_IOC_GETPARENT _IOWR('f', 249, struct getparent)
+
+/* Lease types for use as arg and return of LL_IOC_{GET,SET}_LEASE ioctl. */
+enum ll_lease_type {
+ LL_LEASE_RDLCK = 0x1,
+ LL_LEASE_WRLCK = 0x2,
+ LL_LEASE_UNLCK = 0x4,
+};
#define LL_STATFS_LMV 1
#define LL_STATFS_LOV 2
@@ -253,10 +289,6 @@ struct ost_id {
#define IOC_MDC_GETFILEINFO _IOWR(IOC_MDC_TYPE, 22, struct lov_user_mds_data *)
#define LL_IOC_MDC_GETINFO _IOWR(IOC_MDC_TYPE, 23, struct lov_user_mds_data *)
-/* Keep these for backward compartability. */
-#define LL_IOC_OBD_STATFS IOC_OBD_STATFS
-#define IOC_MDC_GETSTRIPE IOC_MDC_GETFILESTRIPE
-
#define MAX_OBD_NAME 128 /* If this changes, a NEW ioctl must be added */
/* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular
@@ -273,20 +305,26 @@ struct ost_id {
#define LL_FILE_LOCKLESS_IO 0x00000010 /* server-side locks with cio */
#define LL_FILE_RMTACL 0x00000020
-#define LOV_USER_MAGIC_V1 0x0BD10BD0
-#define LOV_USER_MAGIC LOV_USER_MAGIC_V1
-#define LOV_USER_MAGIC_JOIN_V1 0x0BD20BD0
-#define LOV_USER_MAGIC_V3 0x0BD30BD0
+#define LOV_USER_MAGIC_V1 0x0BD10BD0
+#define LOV_USER_MAGIC LOV_USER_MAGIC_V1
+#define LOV_USER_MAGIC_JOIN_V1 0x0BD20BD0
+#define LOV_USER_MAGIC_V3 0x0BD30BD0
+/* 0x0BD40BD0 is occupied by LOV_MAGIC_MIGRATE */
+#define LOV_USER_MAGIC_SPECIFIC 0x0BD50BD0 /* for specific OSTs */
+
+#define LMV_USER_MAGIC 0x0CD30CD0 /*default lmv magic*/
-#define LMV_MAGIC_V1 0x0CD10CD0 /*normal stripe lmv magic */
-#define LMV_USER_MAGIC 0x0CD20CD0 /*default lmv magic*/
+#define LOV_PATTERN_RAID0 0x001
+#define LOV_PATTERN_RAID1 0x002
+#define LOV_PATTERN_FIRST 0x100
+#define LOV_PATTERN_CMOBD 0x200
-#define LOV_PATTERN_RAID0 0x001
-#define LOV_PATTERN_RAID1 0x002
-#define LOV_PATTERN_FIRST 0x100
+#define LOV_PATTERN_F_MASK 0xffff0000
+#define LOV_PATTERN_F_HOLE 0x40000000 /* there is hole in LOV EA */
+#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */
-#define LOV_MAXPOOLNAME 16
-#define LOV_POOLNAMEF "%.16s"
+#define LOV_MAXPOOLNAME 15
+#define LOV_POOLNAMEF "%.15s"
#define LOV_MIN_STRIPE_BITS 16 /* maximum PAGE_SIZE (ia64), power of 2 */
#define LOV_MIN_STRIPE_SIZE (1 << LOV_MIN_STRIPE_BITS)
@@ -344,18 +382,17 @@ struct lov_user_md_v3 { /* LOV EA user data (host-endian) */
* used when reading
*/
};
- char lmm_pool_name[LOV_MAXPOOLNAME]; /* pool name */
+ char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* pool name */
struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
} __packed;
static inline __u32 lov_user_md_size(__u16 stripes, __u32 lmm_magic)
{
- if (lmm_magic == LOV_USER_MAGIC_V3)
- return sizeof(struct lov_user_md_v3) +
- stripes * sizeof(struct lov_user_ost_data_v1);
- else
+ if (lmm_magic == LOV_USER_MAGIC_V1)
return sizeof(struct lov_user_md_v1) +
stripes * sizeof(struct lov_user_ost_data_v1);
+ return sizeof(struct lov_user_md_v3) +
+ stripes * sizeof(struct lov_user_ost_data_v1);
}
/* Compile with -D_LARGEFILE64_SOURCE or -D_GNU_SOURCE (or #define) to
@@ -374,19 +411,26 @@ struct lov_user_mds_data_v3 {
} __packed;
#endif
-/* keep this to be the same size as lov_user_ost_data_v1 */
struct lmv_user_mds_data {
struct lu_fid lum_fid;
__u32 lum_padding;
__u32 lum_mds;
};
-/* lum_type */
-enum {
- LMV_STRIPE_TYPE = 0,
- LMV_DEFAULT_TYPE = 1,
+enum lmv_hash_type {
+ LMV_HASH_TYPE_UNKNOWN = 0, /* 0 is reserved for testing purpose */
+ LMV_HASH_TYPE_ALL_CHARS = 1,
+ LMV_HASH_TYPE_FNV_1A_64 = 2,
};
+#define LMV_HASH_NAME_ALL_CHARS "all_char"
+#define LMV_HASH_NAME_FNV_1A_64 "fnv_1a_64"
+
+/*
+ * Got this according to how get LOV_MAX_STRIPE_COUNT, see above,
+ * (max buffer size - lmv+rpc header) / sizeof(struct lmv_user_mds_data)
+ */
+#define LMV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */
#define lmv_user_md lmv_user_md_v1
struct lmv_user_md_v1 {
__u32 lum_magic; /* must be the first field */
@@ -397,9 +441,9 @@ struct lmv_user_md_v1 {
__u32 lum_padding1;
__u32 lum_padding2;
__u32 lum_padding3;
- char lum_pool_name[LOV_MAXPOOLNAME];
+ char lum_pool_name[LOV_MAXPOOLNAME + 1];
struct lmv_user_mds_data lum_objects[0];
-};
+} __packed;
static inline int lmv_user_md_size(int stripes, int lmm_magic)
{
@@ -407,6 +451,8 @@ static inline int lmv_user_md_size(int stripes, int lmm_magic)
stripes * sizeof(struct lmv_user_mds_data);
}
+void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
+
struct ll_recreate_obj {
__u64 lrc_id;
__u32 lrc_ost_idx;
@@ -498,6 +544,12 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
/********* Quotas **********/
+#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */
+#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */
+#define Q_GETOINFO 0x800102 /* get obd quota info */
+#define Q_GETOQUOTA 0x800103 /* get obd quotas */
+#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
+
/* these must be explicitly translated into linux Q_* in ll_dir_ioctl */
#define LUSTRE_Q_QUOTAON 0x800002 /* turn quotas on */
#define LUSTRE_Q_QUOTAOFF 0x800003 /* turn quotas off */
@@ -648,11 +700,16 @@ static inline const char *changelog_type2str(int type)
}
/* per-record flags */
-#define CLF_VERSION 0x1000
-#define CLF_EXT_VERSION 0x2000
#define CLF_FLAGSHIFT 12
#define CLF_FLAGMASK ((1U << CLF_FLAGSHIFT) - 1)
#define CLF_VERMASK (~CLF_FLAGMASK)
+enum changelog_rec_flags {
+ CLF_VERSION = 0x1000,
+ CLF_RENAME = 0x2000,
+ CLF_JOBID = 0x4000,
+ CLF_SUPPORTED = CLF_VERSION | CLF_RENAME | CLF_JOBID
+};
+
/* Anything under the flagmask may be per-type (if desired) */
/* Flags for unlink */
#define CLF_UNLINK_LAST 0x0001 /* Unlink of last hardlink */
@@ -736,12 +793,35 @@ static inline void hsm_set_cl_error(int *flags, int error)
*flags |= (error << CLF_HSM_ERR_L);
}
-#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \
- sizeof(struct changelog_ext_rec))
+enum changelog_send_flag {
+ /* Not yet implemented */
+ CHANGELOG_FLAG_FOLLOW = BIT(0),
+ /*
+ * Blocking IO makes sense in case of slow user parsing of the records,
+ * but it also prevents us from cleaning up if the records are not
+ * consumed.
+ */
+ CHANGELOG_FLAG_BLOCK = BIT(1),
+ /* Pack jobid into the changelog records if available. */
+ CHANGELOG_FLAG_JOBID = BIT(2),
+};
+
+#define CR_MAXSIZE cfs_size_round(2 * NAME_MAX + 2 + \
+ changelog_rec_offset(CLF_SUPPORTED))
+
+/* 31 usable bytes string + null terminator. */
+#define LUSTRE_JOBID_SIZE 32
+/*
+ * This is the minimal changelog record. It can contain extensions
+ * such as rename fields or process jobid. Its exact content is described
+ * by the cr_flags.
+ *
+ * Extensions are packed in the same order as their corresponding flags.
+ */
struct changelog_rec {
__u16 cr_namelen;
- __u16 cr_flags; /**< (flags&CLF_FLAGMASK)|CLF_VERSION */
+ __u16 cr_flags; /**< \a changelog_rec_flags */
__u32 cr_type; /**< \a changelog_rec_type */
__u64 cr_index; /**< changelog record number */
__u64 cr_prev; /**< last index for this target fid */
@@ -751,55 +831,138 @@ struct changelog_rec {
__u32 cr_markerflags; /**< CL_MARK flags */
};
struct lu_fid cr_pfid; /**< parent fid */
- char cr_name[0]; /**< last element */
} __packed;
-/* changelog_ext_rec is 2*sizeof(lu_fid) bigger than changelog_rec, to save
- * space, only rename uses changelog_ext_rec, while others use changelog_rec to
- * store records.
- */
-struct changelog_ext_rec {
- __u16 cr_namelen;
- __u16 cr_flags; /**< (flags & CLF_FLAGMASK) |
- * CLF_EXT_VERSION
- */
- __u32 cr_type; /**< \a changelog_rec_type */
- __u64 cr_index; /**< changelog record number */
- __u64 cr_prev; /**< last index for this target fid */
- __u64 cr_time;
- union {
- struct lu_fid cr_tfid; /**< target fid */
- __u32 cr_markerflags; /**< CL_MARK flags */
- };
- struct lu_fid cr_pfid; /**< target parent fid */
- struct lu_fid cr_sfid; /**< source fid, or zero */
- struct lu_fid cr_spfid; /**< source parent fid, or zero */
- char cr_name[0]; /**< last element */
-} __packed;
+/* Changelog extension for RENAME. */
+struct changelog_ext_rename {
+ struct lu_fid cr_sfid; /**< source fid, or zero */
+ struct lu_fid cr_spfid; /**< source parent fid, or zero */
+};
-#define CHANGELOG_REC_EXTENDED(rec) \
- (((rec)->cr_flags & CLF_VERMASK) == CLF_EXT_VERSION)
+/* Changelog extension to include JOBID. */
+struct changelog_ext_jobid {
+ char cr_jobid[LUSTRE_JOBID_SIZE]; /**< zero-terminated string. */
+};
+
+static inline size_t changelog_rec_offset(enum changelog_rec_flags crf)
+{
+ size_t size = sizeof(struct changelog_rec);
+
+ if (crf & CLF_RENAME)
+ size += sizeof(struct changelog_ext_rename);
+
+ if (crf & CLF_JOBID)
+ size += sizeof(struct changelog_ext_jobid);
-static inline int changelog_rec_size(struct changelog_rec *rec)
+ return size;
+}
+
+static inline size_t changelog_rec_size(struct changelog_rec *rec)
{
- return CHANGELOG_REC_EXTENDED(rec) ? sizeof(struct changelog_ext_rec) :
- sizeof(*rec);
+ return changelog_rec_offset(rec->cr_flags);
+}
+
+static inline size_t changelog_rec_varsize(struct changelog_rec *rec)
+{
+ return changelog_rec_size(rec) - sizeof(*rec) + rec->cr_namelen;
+}
+
+static inline
+struct changelog_ext_rename *changelog_rec_rename(struct changelog_rec *rec)
+{
+ enum changelog_rec_flags crf = rec->cr_flags & CLF_VERSION;
+
+ return (struct changelog_ext_rename *)((char *)rec +
+ changelog_rec_offset(crf));
+}
+
+/* The jobid follows the rename extension, if present */
+static inline
+struct changelog_ext_jobid *changelog_rec_jobid(struct changelog_rec *rec)
+{
+ enum changelog_rec_flags crf = rec->cr_flags &
+ (CLF_VERSION | CLF_RENAME);
+
+ return (struct changelog_ext_jobid *)((char *)rec +
+ changelog_rec_offset(crf));
}
+/* The name follows the rename and jobid extensions, if present */
static inline char *changelog_rec_name(struct changelog_rec *rec)
{
- return CHANGELOG_REC_EXTENDED(rec) ?
- ((struct changelog_ext_rec *)rec)->cr_name : rec->cr_name;
+ return (char *)rec + changelog_rec_offset(rec->cr_flags &
+ CLF_SUPPORTED);
}
-static inline int changelog_rec_snamelen(struct changelog_ext_rec *rec)
+static inline size_t changelog_rec_snamelen(struct changelog_rec *rec)
{
- return rec->cr_namelen - strlen(rec->cr_name) - 1;
+ return rec->cr_namelen - strlen(changelog_rec_name(rec)) - 1;
}
-static inline char *changelog_rec_sname(struct changelog_ext_rec *rec)
+static inline char *changelog_rec_sname(struct changelog_rec *rec)
{
- return rec->cr_name + strlen(rec->cr_name) + 1;
+ char *cr_name = changelog_rec_name(rec);
+
+ return cr_name + strlen(cr_name) + 1;
+}
+
+/**
+ * Remap a record to the desired format as specified by the crf flags.
+ * The record must be big enough to contain the final remapped version.
+ * Superfluous extension fields are removed and missing ones are added
+ * and zeroed. The flags of the record are updated accordingly.
+ *
+ * The jobid and rename extensions can be added to a record, to match the
+ * format an application expects, typically. In this case, the newly added
+ * fields will be zeroed.
+ * The Jobid field can be removed, to guarantee compatibility with older
+ * clients that don't expect this field in the records they process.
+ *
+ * The following assumptions are being made:
+ * - CLF_RENAME will not be removed
+ * - CLF_JOBID will not be added without CLF_RENAME being added too
+ *
+ * @param[in,out] rec The record to remap.
+ * @param[in] crf_wanted Flags describing the desired extensions.
+ */
+static inline void changelog_remap_rec(struct changelog_rec *rec,
+ enum changelog_rec_flags crf_wanted)
+{
+ char *jid_mov, *rnm_mov;
+
+ crf_wanted &= CLF_SUPPORTED;
+
+ if ((rec->cr_flags & CLF_SUPPORTED) == crf_wanted)
+ return;
+
+ /* First move the variable-length name field */
+ memmove((char *)rec + changelog_rec_offset(crf_wanted),
+ changelog_rec_name(rec), rec->cr_namelen);
+
+ /* Locations of jobid and rename extensions in the remapped record */
+ jid_mov = (char *)rec +
+ changelog_rec_offset(crf_wanted & ~CLF_JOBID);
+ rnm_mov = (char *)rec +
+ changelog_rec_offset(crf_wanted & ~(CLF_JOBID | CLF_RENAME));
+
+ /* Move the extension fields to the desired positions */
+ if ((crf_wanted & CLF_JOBID) && (rec->cr_flags & CLF_JOBID))
+ memmove(jid_mov, changelog_rec_jobid(rec),
+ sizeof(struct changelog_ext_jobid));
+
+ if ((crf_wanted & CLF_RENAME) && (rec->cr_flags & CLF_RENAME))
+ memmove(rnm_mov, changelog_rec_rename(rec),
+ sizeof(struct changelog_ext_rename));
+
+ /* Clear newly added fields */
+ if ((crf_wanted & CLF_JOBID) && !(rec->cr_flags & CLF_JOBID))
+ memset(jid_mov, 0, sizeof(struct changelog_ext_jobid));
+
+ if ((crf_wanted & CLF_RENAME) && !(rec->cr_flags & CLF_RENAME))
+ memset(rnm_mov, 0, sizeof(struct changelog_ext_rename));
+
+ /* Update the record's flags accordingly */
+ rec->cr_flags = (rec->cr_flags & CLF_FLAGMASK) | crf_wanted;
}
struct ioc_changelog {
@@ -978,7 +1141,7 @@ struct hsm_user_request {
/** Return pointer to data field in a hsm user request */
static inline void *hur_data(struct hsm_user_request *hur)
{
- return &(hur->hur_user_item[hur->hur_request.hr_itemcount]);
+ return &hur->hur_user_item[hur->hur_request.hr_itemcount];
}
/**
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index 95a0be13c0fb..8eb394e64b25 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -151,13 +151,11 @@ static inline void lustre_cfg_bufs_reset(struct lustre_cfg_bufs *bufs, char *nam
lustre_cfg_bufs_set_string(bufs, 0, name);
}
-static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
+static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, __u32 index)
{
- int i;
- int offset;
- int bufcount;
-
- LASSERT(index >= 0);
+ __u32 i;
+ size_t offset;
+ __u32 bufcount;
bufcount = lcfg->lcfg_bufcount;
if (index >= bufcount)
@@ -172,7 +170,7 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
static inline void lustre_cfg_bufs_init(struct lustre_cfg_bufs *bufs,
struct lustre_cfg *lcfg)
{
- int i;
+ __u32 i;
bufs->lcfg_bufcount = lcfg->lcfg_bufcount;
for (i = 0; i < bufs->lcfg_bufcount; i++) {
@@ -181,7 +179,7 @@ static inline void lustre_cfg_bufs_init(struct lustre_cfg_bufs *bufs,
}
}
-static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index)
+static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, __u32 index)
{
char *s;
@@ -197,8 +195,8 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index)
* of data. Try to use the padding first though.
*/
if (s[lcfg->lcfg_buflens[index] - 1] != '\0') {
- int last = min((int)lcfg->lcfg_buflens[index],
- cfs_size_round(lcfg->lcfg_buflens[index]) - 1);
+ size_t last = min((size_t)lcfg->lcfg_buflens[index],
+ cfs_size_round(lcfg->lcfg_buflens[index]) - 1);
char lost = s[last];
s[last] = '\0';
@@ -210,10 +208,10 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index)
return s;
}
-static inline int lustre_cfg_len(__u32 bufcount, __u32 *buflens)
+static inline __u32 lustre_cfg_len(__u32 bufcount, __u32 *buflens)
{
- int i;
- int len;
+ __u32 i;
+ __u32 len;
len = LCFG_HDR_SIZE(bufcount);
for (i = 0; i < bufcount; i++)
@@ -254,7 +252,7 @@ static inline void lustre_cfg_free(struct lustre_cfg *lcfg)
return;
}
-static inline int lustre_cfg_sanity_check(void *buf, int len)
+static inline int lustre_cfg_sanity_check(void *buf, size_t len)
{
struct lustre_cfg *lcfg = (struct lustre_cfg *)buf;
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/lustre_compat.h
index 1eb64ec4bed4..567c438e93cb 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/lustre_compat.h
@@ -30,8 +30,8 @@
* Lustre is a trademark of Sun Microsystems, Inc.
*/
-#ifndef _LINUX_COMPAT25_H
-#define _LINUX_COMPAT25_H
+#ifndef _LUSTRE_COMPAT_H
+#define _LUSTRE_COMPAT_H
#include <linux/fs_struct.h>
#include <linux/namei.h>
@@ -74,4 +74,4 @@
# define ext2_find_next_zero_bit find_next_zero_bit_le
#endif
-#endif /* _COMPAT25_H */
+#endif /* _LUSTRE_COMPAT_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 60051a5cfe20..d03534432624 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -573,6 +573,11 @@ enum lvb_type {
};
/**
+ * LDLM_GID_ANY is used to match any group id in ldlm_lock_match().
+ */
+#define LDLM_GID_ANY ((__u64)-1)
+
+/**
* LDLM lock structure
*
* Represents a single LDLM lock and its state in memory. Each lock is
@@ -968,6 +973,7 @@ struct ldlm_enqueue_info {
void *ei_cb_cp; /** lock completion callback */
void *ei_cb_gl; /** lock glimpse callback */
void *ei_cbdata; /** Data to be passed into callbacks. */
+ unsigned int ei_enq_slave:1; /* whether enqueue slave stripes */
};
extern struct obd_ops ldlm_obd_ops;
@@ -1281,16 +1287,6 @@ int ldlm_cli_cancel_list(struct list_head *head, int count,
int intent_disposition(struct ldlm_reply *rep, int flag);
void intent_set_disposition(struct ldlm_reply *rep, int flag);
-/* ioctls for trying requests */
-#define IOC_LDLM_TYPE 'f'
-#define IOC_LDLM_MIN_NR 40
-
-#define IOC_LDLM_TEST _IOWR('f', 40, long)
-#define IOC_LDLM_DUMP _IOWR('f', 41, long)
-#define IOC_LDLM_REGRESS_START _IOWR('f', 42, long)
-#define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
-#define IOC_LDLM_MAX_NR 43
-
/**
* "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
* than one lock_res is dead-lock safe.
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index e7e0c21a9b40..a0f064d237c9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -28,21 +28,6 @@
/** l_flags bits marked as "all_flags" bits */
#define LDLM_FL_ALL_FLAGS_MASK 0x00FFFFFFC08F932FULL
-/** l_flags bits marked as "ast" bits */
-#define LDLM_FL_AST_MASK 0x0000000080008000ULL
-
-/** l_flags bits marked as "blocked" bits */
-#define LDLM_FL_BLOCKED_MASK 0x000000000000000EULL
-
-/** l_flags bits marked as "gone" bits */
-#define LDLM_FL_GONE_MASK 0x0006004000000000ULL
-
-/** l_flags bits marked as "inherit" bits */
-#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
-
-/** l_flags bits marked as "off_wire" bits */
-#define LDLM_FL_OFF_WIRE_MASK 0x00FFFFFF00000000ULL
-
/** extent, mode, or resource changed */
#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL /* bit 0 */
#define ldlm_is_lock_changed(_l) LDLM_TEST_FLAG((_l), 1ULL << 0)
@@ -372,6 +357,27 @@
#define ldlm_set_excl(_l) LDLM_SET_FLAG((_l), 1ULL << 55)
#define ldlm_clear_excl(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 55)
+/** l_flags bits marked as "ast" bits */
+#define LDLM_FL_AST_MASK (LDLM_FL_FLOCK_DEADLOCK |\
+ LDLM_FL_AST_DISCARD_DATA)
+
+/** l_flags bits marked as "blocked" bits */
+#define LDLM_FL_BLOCKED_MASK (LDLM_FL_BLOCK_GRANTED |\
+ LDLM_FL_BLOCK_CONV |\
+ LDLM_FL_BLOCK_WAIT)
+
+/** l_flags bits marked as "gone" bits */
+#define LDLM_FL_GONE_MASK (LDLM_FL_DESTROYED |\
+ LDLM_FL_FAILED)
+
+/** l_flags bits marked as "inherit" bits */
+/* Flags inherited from wire on enqueue/reply between client/server. */
+/* NO_TIMEOUT flag to force ldlm_lock_match() to wait with no timeout. */
+/* TEST_LOCK flag to not let TEST lock to be granted. */
+#define LDLM_FL_INHERIT_MASK (LDLM_FL_CANCEL_ON_BLOCK |\
+ LDLM_FL_NO_TIMEOUT |\
+ LDLM_FL_TEST_LOCK)
+
/** test for ldlm_lock flag bit set */
#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
diff --git a/drivers/staging/lustre/lustre/include/lustre_eacl.h b/drivers/staging/lustre/lustre/include/lustre_eacl.h
index d1039e1ff70d..1e71a8638186 100644
--- a/drivers/staging/lustre/lustre/include/lustre_eacl.h
+++ b/drivers/staging/lustre/lustre/include/lustre_eacl.h
@@ -46,6 +46,7 @@
#ifdef CONFIG_FS_POSIX_ACL
+#include <linux/fs.h>
#include <linux/posix_acl_xattr.h>
typedef struct {
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 743671a547ef..316780693193 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -229,6 +229,7 @@ enum local_oid {
MDD_LOV_OBJ_OSEQ = 4121UL,
LFSCK_NAMESPACE_OID = 4122UL,
REMOTE_PARENT_DIR_OID = 4123UL,
+ SLAVE_LLOG_CATALOGS_OID = 4124UL,
};
static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid)
@@ -392,21 +393,19 @@ struct ldlm_namespace;
* but was moved into name[1] along with the OID to avoid consuming the
* renaming name[2,3] fields that need to be used for the quota identifier.
*/
-static inline struct ldlm_res_id *
+static inline void
fid_build_reg_res_name(const struct lu_fid *fid, struct ldlm_res_id *res)
{
memset(res, 0, sizeof(*res));
res->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(fid);
res->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(fid);
-
- return res;
}
/*
* Return true if resource is for object identified by FID.
*/
-static inline int fid_res_name_eq(const struct lu_fid *fid,
- const struct ldlm_res_id *res)
+static inline bool fid_res_name_eq(const struct lu_fid *fid,
+ const struct ldlm_res_id *res)
{
return res->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(fid) &&
res->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(fid);
@@ -415,29 +414,25 @@ static inline int fid_res_name_eq(const struct lu_fid *fid,
/*
* Extract FID from LDLM resource. Reverse of fid_build_reg_res_name().
*/
-static inline struct lu_fid *
+static inline void
fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res)
{
fid->f_seq = res->name[LUSTRE_RES_ID_SEQ_OFF];
fid->f_oid = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF]);
fid->f_ver = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
LASSERT(fid_res_name_eq(fid, res));
-
- return fid;
}
/*
* Build (DLM) resource identifier from global quota FID and quota ID.
*/
-static inline struct ldlm_res_id *
+static inline void
fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid,
struct ldlm_res_id *res)
{
fid_build_reg_res_name(glb_fid, res);
res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid);
res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] = fid_ver_oid(&qid->qid_fid);
-
- return res;
}
/*
@@ -454,14 +449,12 @@ static inline void fid_extract_from_quota_res(struct lu_fid *glb_fid,
(__u32)(res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] >> 32);
}
-static inline struct ldlm_res_id *
+static inline void
fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
struct ldlm_res_id *res)
{
fid_build_reg_res_name(fid, res);
res->name[LUSTRE_RES_ID_HSH_OFF] = hash;
-
- return res;
}
/**
@@ -482,7 +475,7 @@ fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
* res will be built from normal FID directly, i.e. res[0] = f_seq,
* res[1] = f_oid + f_ver.
*/
-static inline void ostid_build_res_name(struct ost_id *oi,
+static inline void ostid_build_res_name(const struct ost_id *oi,
struct ldlm_res_id *name)
{
memset(name, 0, sizeof(*name));
@@ -497,8 +490,8 @@ static inline void ostid_build_res_name(struct ost_id *oi,
/**
* Return true if the resource is for the object identified by this id & group.
*/
-static inline int ostid_res_name_eq(struct ost_id *oi,
- struct ldlm_res_id *name)
+static inline int ostid_res_name_eq(const struct ost_id *oi,
+ const struct ldlm_res_id *name)
{
/* Note: it is just a trick here to save some effort, probably the
* correct way would be turn them into the FID and compare
@@ -603,13 +596,14 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid)
* (from OID), or up to 128M inodes without collisions for new files.
*/
ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
- (seq >> (64 - (40-8)) & 0xffffff00) +
+ (seq >> (64 - (40 - 8)) & 0xffffff00) +
(fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
return ino ? ino : fid_oid(fid);
}
-static inline int lu_fid_diff(struct lu_fid *fid1, struct lu_fid *fid2)
+static inline int lu_fid_diff(const struct lu_fid *fid1,
+ const struct lu_fid *fid2)
{
LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:"DFID", fid2:"DFID"\n",
PFID(fid1), PFID(fid2));
diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h
index 1a63a6b9e116..e071bac9df57 100644
--- a/drivers/staging/lustre/lustre/include/lustre_handles.h
+++ b/drivers/staging/lustre/lustre/include/lustre_handles.h
@@ -66,6 +66,7 @@ struct portals_handle_ops {
struct portals_handle {
struct list_head h_link;
__u64 h_cookie;
+ const void *h_owner;
struct portals_handle_ops *h_ops;
/* newly added fields to handle the RCU issue. -jxiong */
@@ -75,15 +76,13 @@ struct portals_handle {
unsigned int h_in:1;
};
-#define RCU2HANDLE(rcu) container_of(rcu, struct portals_handle, h_rcu)
-
/* handles.c */
/* Add a handle to the hash table */
void class_handle_hash(struct portals_handle *,
struct portals_handle_ops *ops);
void class_handle_unhash(struct portals_handle *);
-void *class_handle2object(__u64 cookie);
+void *class_handle2object(__u64 cookie, const void *owner);
void class_handle_free_cb(struct rcu_head *rcu);
int class_handle_init(void);
void class_handle_cleanup(void);
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 4445be7a59dd..5461ba33d90c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -285,8 +285,10 @@ struct obd_import {
imp_resend_replay:1,
/* disable normal recovery, for test only. */
imp_no_pinger_recover:1,
+#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE
/* need IR MNE swab */
imp_need_mne_swab:1,
+#endif
/* import must be reconnected instead of
* chosing new connection
*/
@@ -305,28 +307,6 @@ struct obd_import {
time64_t imp_last_reply_time; /* for health check */
};
-typedef void (*obd_import_callback)(struct obd_import *imp, void *closure,
- int event, void *event_arg, void *cb_data);
-
-/**
- * Structure for import observer.
- * It is possible to register "observer" on an import and every time
- * something happens to an import (like connect/evict/disconnect)
- * obderver will get its callback called with event type
- */
-struct obd_import_observer {
- struct list_head oio_chain;
- obd_import_callback oio_cb;
- void *oio_cb_data;
-};
-
-void class_observe_import(struct obd_import *imp, obd_import_callback cb,
- void *cb_data);
-void class_unobserve_import(struct obd_import *imp, obd_import_callback cb,
- void *cb_data);
-void class_notify_import_observers(struct obd_import *imp, int event,
- void *event_arg);
-
/* import.c */
static inline unsigned int at_est2timeout(unsigned int val)
{
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 06958f217fc8..6b231913ba2e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -51,7 +51,6 @@
#include "lustre_cfg.h"
/* target.c */
-struct kstatfs;
struct ptlrpc_request;
struct obd_export;
struct lu_target;
@@ -74,325 +73,8 @@ int do_set_info_async(struct obd_import *imp,
u32 vallen, void *val,
struct ptlrpc_request_set *set);
-#define OBD_RECOVERY_MAX_TIME (obd_timeout * 18) /* b13079 */
-#define OBD_MAX_IOCTL_BUFFER CONFIG_LUSTRE_OBD_MAX_IOCTL_BUFFER
-
void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
-/* client.c */
-
-int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
-struct client_obd *client_conn2cli(struct lustre_handle *conn);
-
-struct md_open_data;
-struct obd_client_handle {
- struct lustre_handle och_fh;
- struct lu_fid och_fid;
- struct md_open_data *och_mod;
- struct lustre_handle och_lease_handle; /* open lock for lease */
- __u32 och_magic;
- fmode_t och_flags;
-};
-
-#define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
-
-/* statfs_pack.c */
-void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
-
-/*
- * For md echo client
- */
-enum md_echo_cmd {
- ECHO_MD_CREATE = 1, /* Open/Create file on MDT */
- ECHO_MD_MKDIR = 2, /* Mkdir on MDT */
- ECHO_MD_DESTROY = 3, /* Unlink file on MDT */
- ECHO_MD_RMDIR = 4, /* Rmdir on MDT */
- ECHO_MD_LOOKUP = 5, /* Lookup on MDT */
- ECHO_MD_GETATTR = 6, /* Getattr on MDT */
- ECHO_MD_SETATTR = 7, /* Setattr on MDT */
- ECHO_MD_ALLOC_FID = 8, /* Get FIDs from MDT */
-};
-
-/*
- * OBD IOCTLS
- */
-#define OBD_IOCTL_VERSION 0x00010004
-
-struct obd_ioctl_data {
- __u32 ioc_len;
- __u32 ioc_version;
-
- union {
- __u64 ioc_cookie;
- __u64 ioc_u64_1;
- };
- union {
- __u32 ioc_conn1;
- __u32 ioc_u32_1;
- };
- union {
- __u32 ioc_conn2;
- __u32 ioc_u32_2;
- };
-
- struct obdo ioc_obdo1;
- struct obdo ioc_obdo2;
-
- u64 ioc_count;
- u64 ioc_offset;
- __u32 ioc_dev;
- __u32 ioc_command;
-
- __u64 ioc_nid;
- __u32 ioc_nal;
- __u32 ioc_type;
-
- /* buffers the kernel will treat as user pointers */
- __u32 ioc_plen1;
- void __user *ioc_pbuf1;
- __u32 ioc_plen2;
- void __user *ioc_pbuf2;
-
- /* inline buffers for various arguments */
- __u32 ioc_inllen1;
- char *ioc_inlbuf1;
- __u32 ioc_inllen2;
- char *ioc_inlbuf2;
- __u32 ioc_inllen3;
- char *ioc_inlbuf3;
- __u32 ioc_inllen4;
- char *ioc_inlbuf4;
-
- char ioc_bulk[0];
-};
-
-struct obd_ioctl_hdr {
- __u32 ioc_len;
- __u32 ioc_version;
-};
-
-static inline int obd_ioctl_packlen(struct obd_ioctl_data *data)
-{
- int len = cfs_size_round(sizeof(struct obd_ioctl_data));
-
- len += cfs_size_round(data->ioc_inllen1);
- len += cfs_size_round(data->ioc_inllen2);
- len += cfs_size_round(data->ioc_inllen3);
- len += cfs_size_round(data->ioc_inllen4);
- return len;
-}
-
-static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data)
-{
- if (data->ioc_len > OBD_MAX_IOCTL_BUFFER) {
- CERROR("OBD ioctl: ioc_len larger than %d\n",
- OBD_MAX_IOCTL_BUFFER);
- return 1;
- }
- if (data->ioc_inllen1 > OBD_MAX_IOCTL_BUFFER) {
- CERROR("OBD ioctl: ioc_inllen1 larger than ioc_len\n");
- return 1;
- }
- if (data->ioc_inllen2 > OBD_MAX_IOCTL_BUFFER) {
- CERROR("OBD ioctl: ioc_inllen2 larger than ioc_len\n");
- return 1;
- }
- if (data->ioc_inllen3 > OBD_MAX_IOCTL_BUFFER) {
- CERROR("OBD ioctl: ioc_inllen3 larger than ioc_len\n");
- return 1;
- }
- if (data->ioc_inllen4 > OBD_MAX_IOCTL_BUFFER) {
- CERROR("OBD ioctl: ioc_inllen4 larger than ioc_len\n");
- return 1;
- }
- if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
- CERROR("OBD ioctl: inlbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
- CERROR("OBD ioctl: inlbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_inlbuf3 && !data->ioc_inllen3) {
- CERROR("OBD ioctl: inlbuf3 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_inlbuf4 && !data->ioc_inllen4) {
- CERROR("OBD ioctl: inlbuf4 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf1 && !data->ioc_plen1) {
- CERROR("OBD ioctl: pbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf2 && !data->ioc_plen2) {
- CERROR("OBD ioctl: pbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_plen1 && !data->ioc_pbuf1) {
- CERROR("OBD ioctl: plen1 set but NULL pointer\n");
- return 1;
- }
- if (data->ioc_plen2 && !data->ioc_pbuf2) {
- CERROR("OBD ioctl: plen2 set but NULL pointer\n");
- return 1;
- }
- if (obd_ioctl_packlen(data) > data->ioc_len) {
- CERROR("OBD ioctl: packlen exceeds ioc_len (%d > %d)\n",
- obd_ioctl_packlen(data), data->ioc_len);
- return 1;
- }
- return 0;
-}
-
-#include "obd_support.h"
-
-/* function defined in lustre/obdclass/<platform>/<platform>-module.c */
-int obd_ioctl_getdata(char **buf, int *len, void __user *arg);
-int obd_ioctl_popdata(void __user *arg, void *data, int len);
-
-static inline void obd_ioctl_freedata(char *buf, int len)
-{
- kvfree(buf);
- return;
-}
-
-/*
- * BSD ioctl description:
- * #define IOC_V1 _IOR(g, n1, long)
- * #define IOC_V2 _IOW(g, n2, long)
- *
- * ioctl(f, IOC_V1, arg);
- * arg will be treated as a long value,
- *
- * ioctl(f, IOC_V2, arg)
- * arg will be treated as a pointer, bsd will call
- * copyin(buf, arg, sizeof(long))
- *
- * To make BSD ioctl handles argument correctly and simplely,
- * we change _IOR to _IOWR so BSD will copyin obd_ioctl_data
- * for us. Does this change affect Linux? (XXX Liang)
- */
-#define OBD_IOC_DATA_TYPE long
-
-#define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_DESTROY _IOW('f', 104, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_SETATTR _IOW('f', 107, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_GETATTR _IOWR ('f', 108, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SYNC _IOW('f', 114, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_COPY _IOWR('f', 120, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_MIGR _IOWR('f', 121, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PUNCH _IOWR('f', 122, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_MODULE_DEBUG _IOWR('f', 124, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_BRW_READ _IOWR('f', 125, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_BRW_WRITE _IOWR('f', 126, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_NAME2DEV _IOWR('f', 127, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_UUID2DEV _IOWR('f', 130, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_GETNAME _IOWR('f', 131, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_GETMDNAME _IOR('f', 131, char[MAX_OBD_NAME])
-#define OBD_IOC_GETDTNAME OBD_IOC_GETNAME
-
-#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_CLIENT_RECOVER _IOW('f', 133, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PING_TARGET _IOW('f', 136, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 139)
-#define OBD_IOC_NO_TRANSNO _IOW('f', 140, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SET_READONLY _IOW('f', 141, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_ABORT_RECOVERY _IOR('f', 142, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE)
-
-#define OBD_GET_VERSION _IOWR ('f', 144, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_GSS_SUPPORT _IOWR('f', 145, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_CLOSE_UUID _IOWR ('f', 147, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_CHANGELOG_SEND _IOW('f', 148, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_GETDEVICE _IOWR ('f', 149, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_FID2PATH _IOWR ('f', 150, OBD_IOC_DATA_TYPE)
-/* see also <lustre/lustre_user.h> for ioctls 151-153 */
-/* OBD_IOC_LOV_SETSTRIPE: See also LL_IOC_LOV_SETSTRIPE */
-#define OBD_IOC_LOV_SETSTRIPE _IOW('f', 154, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_LOV_GETSTRIPE: See also LL_IOC_LOV_GETSTRIPE */
-#define OBD_IOC_LOV_GETSTRIPE _IOW('f', 155, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_LOV_SETEA: See also LL_IOC_LOV_SETEA */
-#define OBD_IOC_LOV_SETEA _IOW('f', 156, OBD_IOC_DATA_TYPE)
-/* see <lustre/lustre_user.h> for ioctls 157-159 */
-/* OBD_IOC_QUOTACHECK: See also LL_IOC_QUOTACHECK */
-#define OBD_IOC_QUOTACHECK _IOW('f', 160, int)
-/* OBD_IOC_POLL_QUOTACHECK: See also LL_IOC_POLL_QUOTACHECK */
-#define OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
-/* OBD_IOC_QUOTACTL: See also LL_IOC_QUOTACTL */
-#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
-/* see also <lustre/lustre_user.h> for ioctls 163-176 */
-#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_DEREG _IOW('f', 178, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_CLEAR _IOW('f', 179, struct obd_ioctl_data)
-#define OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_DORECORD _IOWR('f', 183, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PARAM _IOW('f', 187, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_POOL _IOWR('f', 188, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_REPLACE_NIDS _IOWR('f', 189, OBD_IOC_DATA_TYPE)
-
-#define OBD_IOC_CATLOGLIST _IOWR('f', 190, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_INFO _IOWR('f', 191, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_PRINT _IOWR('f', 192, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_CANCEL _IOWR('f', 193, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_REMOVE _IOWR('f', 194, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_LLOG_CHECK _IOWR('f', 195, OBD_IOC_DATA_TYPE)
-/* OBD_IOC_LLOG_CATINFO is deprecated */
-#define OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE)
-
-/* #define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) */
-/* #define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) */
-/* #define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) */
-/* #define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) */
-
-#define OBD_IOC_GET_OBJ_VERSION _IOR('f', 210, OBD_IOC_DATA_TYPE)
-
-/* <lustre/lustre_user.h> defines ioctl number 218-219 */
-#define OBD_IOC_GET_MNTOPT _IOW('f', 220, mntopt_t)
-
-#define OBD_IOC_ECHO_MD _IOR('f', 221, struct obd_ioctl_data)
-#define OBD_IOC_ECHO_ALLOC_SEQ _IOWR('f', 222, struct obd_ioctl_data)
-
-#define OBD_IOC_START_LFSCK _IOWR('f', 230, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_STOP_LFSCK _IOW('f', 231, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PAUSE_LFSCK _IOW('f', 232, OBD_IOC_DATA_TYPE)
-
-/* XXX _IOWR('f', 250, long) has been defined in
- * libcfs/include/libcfs/libcfs_private.h for debug, don't use it
- */
-
-/* Until such time as we get_info the per-stripe maximum from the OST,
- * we define this to be 2T - 4k, which is the ext3 maxbytes.
- */
-#define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
-
-/* Special values for remove LOV EA from disk */
-#define LOVEA_DELETE_VALUES(size, count, offset) (size == 0 && count == 0 && \
- offset == (typeof(offset))(-1))
-
-/* #define POISON_BULK 0 */
-
/*
* l_wait_event is a flexible sleeping function, permitting simple caller
* configuration of interrupt and timeout sensitivity along with actions to
diff --git a/drivers/staging/lustre/lustre/include/lustre_linkea.h b/drivers/staging/lustre/lustre/include/lustre_linkea.h
new file mode 100644
index 000000000000..249e8bf4fa22
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_linkea.h
@@ -0,0 +1,79 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2013, 2014, Intel Corporation.
+ * Use is subject to license terms.
+ *
+ * Author: di wang <di.wang@intel.com>
+ */
+
+#define DEFAULT_LINKEA_SIZE 4096
+
+struct linkea_data {
+ /**
+ * Buffer to keep link EA body.
+ */
+ struct lu_buf *ld_buf;
+ /**
+ * The matched header, entry and its length in the EA
+ */
+ struct link_ea_header *ld_leh;
+ struct link_ea_entry *ld_lee;
+ int ld_reclen;
+};
+
+int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf);
+int linkea_init(struct linkea_data *ldata);
+void linkea_entry_unpack(const struct link_ea_entry *lee, int *reclen,
+ struct lu_name *lname, struct lu_fid *pfid);
+int linkea_entry_pack(struct link_ea_entry *lee, const struct lu_name *lname,
+ const struct lu_fid *pfid);
+int linkea_add_buf(struct linkea_data *ldata, const struct lu_name *lname,
+ const struct lu_fid *pfid);
+void linkea_del_buf(struct linkea_data *ldata, const struct lu_name *lname);
+int linkea_links_find(struct linkea_data *ldata, const struct lu_name *lname,
+ const struct lu_fid *pfid);
+
+static inline void linkea_first_entry(struct linkea_data *ldata)
+{
+ LASSERT(ldata);
+ LASSERT(ldata->ld_leh);
+
+ if (ldata->ld_leh->leh_reccount == 0)
+ ldata->ld_lee = NULL;
+ else
+ ldata->ld_lee = (struct link_ea_entry *)(ldata->ld_leh + 1);
+}
+
+static inline void linkea_next_entry(struct linkea_data *ldata)
+{
+ LASSERT(ldata);
+ LASSERT(ldata->ld_leh);
+
+ if (ldata->ld_lee) {
+ ldata->ld_lee = (struct link_ea_entry *)((char *)ldata->ld_lee +
+ ldata->ld_reclen);
+ if ((char *)ldata->ld_lee >= ((char *)ldata->ld_leh +
+ ldata->ld_leh->leh_len))
+ ldata->ld_lee = NULL;
+ }
+}
diff --git a/drivers/staging/lustre/lustre/include/lustre_lite.h b/drivers/staging/lustre/lustre/include/lustre_lite.h
deleted file mode 100644
index b16897702559..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_lite.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef _LL_H
-#define _LL_H
-
-/** \defgroup lite lite
- *
- * @{
- */
-
-#include "linux/lustre_lite.h"
-
-#include "obd_class.h"
-#include "lustre_net.h"
-#include "lustre_mds.h"
-#include "lustre_ha.h"
-
-/* 4UL * 1024 * 1024 */
-#define LL_MAX_BLKSIZE_BITS (22)
-#define LL_MAX_BLKSIZE (1UL<<LL_MAX_BLKSIZE_BITS)
-
-/*
- * This is embedded into llite super-blocks to keep track of
- * connect flags (capabilities) supported by all imports given mount is
- * connected to.
- */
-struct lustre_client_ocd {
- /*
- * This is conjunction of connect_flags across all imports (LOVs) this
- * mount is connected to. This field is updated by cl_ocd_update()
- * under ->lco_lock.
- */
- __u64 lco_flags;
- struct mutex lco_lock;
- struct obd_export *lco_md_exp;
- struct obd_export *lco_dt_exp;
-};
-
-/*
- * Chain of hash overflow pages.
- */
-struct ll_dir_chain {
- /* XXX something. Later */
-};
-
-static inline void ll_dir_chain_init(struct ll_dir_chain *chain)
-{
-}
-
-static inline void ll_dir_chain_fini(struct ll_dir_chain *chain)
-{
-}
-
-static inline unsigned long hash_x_index(__u64 hash, int hash64)
-{
- if (BITS_PER_LONG == 32 && hash64)
- hash >>= 32;
- /* save hash 0 as index 0 because otherwise we'll save it at
- * page index end (~0UL) and it causes truncate_inode_pages_range()
- * to loop forever.
- */
- return ~0UL - (hash + !hash);
-}
-
-/** @} lite */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_lmv.h b/drivers/staging/lustre/lustre/include/lustre_lmv.h
new file mode 100644
index 000000000000..d7f7afa8dfa7
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_lmv.h
@@ -0,0 +1,184 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details. A copy is
+ * included in the COPYING file that accompanied this code.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2013, Intel Corporation.
+ */
+/*
+ * lustre/include/lustre_lmv.h
+ *
+ * Lustre LMV structures and functions.
+ *
+ * Author: Di Wang <di.wang@intel.com>
+ */
+
+#ifndef _LUSTRE_LMV_H
+#define _LUSTRE_LMV_H
+#include "lustre/lustre_idl.h"
+
+struct lmv_oinfo {
+ struct lu_fid lmo_fid;
+ u32 lmo_mds;
+ struct inode *lmo_root;
+};
+
+struct lmv_stripe_md {
+ __u32 lsm_md_magic;
+ __u32 lsm_md_stripe_count;
+ __u32 lsm_md_master_mdt_index;
+ __u32 lsm_md_hash_type;
+ __u32 lsm_md_layout_version;
+ __u32 lsm_md_default_count;
+ __u32 lsm_md_default_index;
+ char lsm_md_pool_name[LOV_MAXPOOLNAME + 1];
+ struct lmv_oinfo lsm_md_oinfo[0];
+};
+
+static inline bool
+lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2)
+{
+ __u32 idx;
+
+ if (lsm1->lsm_md_magic != lsm2->lsm_md_magic ||
+ lsm1->lsm_md_stripe_count != lsm2->lsm_md_stripe_count ||
+ lsm1->lsm_md_master_mdt_index != lsm2->lsm_md_master_mdt_index ||
+ lsm1->lsm_md_hash_type != lsm2->lsm_md_hash_type ||
+ lsm1->lsm_md_layout_version != lsm2->lsm_md_layout_version ||
+ !strcmp(lsm1->lsm_md_pool_name, lsm2->lsm_md_pool_name))
+ return false;
+
+ for (idx = 0; idx < lsm1->lsm_md_stripe_count; idx++) {
+ if (!lu_fid_eq(&lsm1->lsm_md_oinfo[idx].lmo_fid,
+ &lsm2->lsm_md_oinfo[idx].lmo_fid))
+ return false;
+ }
+
+ return true;
+}
+
+union lmv_mds_md;
+
+int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
+ const union lmv_mds_md *lmm, int stripe_count);
+
+static inline int lmv_alloc_memmd(struct lmv_stripe_md **lsmp, int stripe_count)
+{
+ return lmv_unpack_md(NULL, lsmp, NULL, stripe_count);
+}
+
+static inline void lmv_free_memmd(struct lmv_stripe_md *lsm)
+{
+ lmv_unpack_md(NULL, &lsm, NULL, 0);
+}
+
+static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst,
+ const struct lmv_mds_md_v1 *lmv_src)
+{
+ __u32 i;
+
+ lmv_dst->lmv_magic = le32_to_cpu(lmv_src->lmv_magic);
+ lmv_dst->lmv_stripe_count = le32_to_cpu(lmv_src->lmv_stripe_count);
+ lmv_dst->lmv_master_mdt_index =
+ le32_to_cpu(lmv_src->lmv_master_mdt_index);
+ lmv_dst->lmv_hash_type = le32_to_cpu(lmv_src->lmv_hash_type);
+ lmv_dst->lmv_layout_version = le32_to_cpu(lmv_src->lmv_layout_version);
+
+ for (i = 0; i < lmv_src->lmv_stripe_count; i++)
+ fid_le_to_cpu(&lmv_dst->lmv_stripe_fids[i],
+ &lmv_src->lmv_stripe_fids[i]);
+}
+
+static inline void lmv_le_to_cpu(union lmv_mds_md *lmv_dst,
+ const union lmv_mds_md *lmv_src)
+{
+ switch (le32_to_cpu(lmv_src->lmv_magic)) {
+ case LMV_MAGIC_V1:
+ lmv1_le_to_cpu(&lmv_dst->lmv_md_v1, &lmv_src->lmv_md_v1);
+ break;
+ default:
+ break;
+ }
+}
+
+/* This hash is only for testing purpose */
+static inline unsigned int
+lmv_hash_all_chars(unsigned int count, const char *name, int namelen)
+{
+ const unsigned char *p = (const unsigned char *)name;
+ unsigned int c = 0;
+
+ while (--namelen >= 0)
+ c += p[namelen];
+
+ c = c % count;
+
+ return c;
+}
+
+static inline unsigned int
+lmv_hash_fnv1a(unsigned int count, const char *name, int namelen)
+{
+ __u64 hash;
+
+ hash = lustre_hash_fnv_1a_64(name, namelen);
+
+ return do_div(hash, count);
+}
+
+static inline int lmv_name_to_stripe_index(__u32 lmv_hash_type,
+ unsigned int stripe_count,
+ const char *name, int namelen)
+{
+ __u32 hash_type = lmv_hash_type & LMV_HASH_TYPE_MASK;
+ int idx;
+
+ LASSERT(namelen > 0);
+ if (stripe_count <= 1)
+ return 0;
+
+ /* for migrating object, always start from 0 stripe */
+ if (lmv_hash_type & LMV_HASH_FLAG_MIGRATION)
+ return 0;
+
+ switch (hash_type) {
+ case LMV_HASH_TYPE_ALL_CHARS:
+ idx = lmv_hash_all_chars(stripe_count, name, namelen);
+ break;
+ case LMV_HASH_TYPE_FNV_1A_64:
+ idx = lmv_hash_fnv1a(stripe_count, name, namelen);
+ break;
+ default:
+ idx = -EBADFD;
+ break;
+ }
+ CDEBUG(D_INFO, "name %.*s hash_type %d idx %d\n", namelen, name,
+ hash_type, idx);
+
+ return idx;
+}
+
+static inline bool lmv_is_known_hash_type(__u32 type)
+{
+ return (type & LMV_HASH_TYPE_MASK) == LMV_HASH_TYPE_FNV_1A_64 ||
+ (type & LMV_HASH_TYPE_MASK) == LMV_HASH_TYPE_ALL_CHARS;
+}
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index b96e02317bfc..995b266932e3 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -277,12 +277,11 @@ static inline void llog_ctxt_put(struct llog_ctxt *ctxt)
__llog_ctxt_put(NULL, ctxt);
}
-static inline void llog_group_init(struct obd_llog_group *olg, int group)
+static inline void llog_group_init(struct obd_llog_group *olg)
{
init_waitqueue_head(&olg->olg_waitq);
spin_lock_init(&olg->olg_lock);
mutex_init(&olg->olg_cat_processing);
- olg->olg_seq = group;
}
static inline int llog_group_set_ctxt(struct obd_llog_group *olg,
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index fa62b95d351f..8fc2d3f2dfd6 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -96,7 +96,7 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
struct lookup_intent *it)
{
if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
- it->it_op == IT_LAYOUT))
+ it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
return;
/* This would normally block until the existing request finishes.
@@ -136,7 +136,7 @@ static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
struct lookup_intent *it)
{
if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
- it->it_op == IT_LAYOUT))
+ it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
return;
if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */
@@ -156,34 +156,44 @@ static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
mutex_unlock(&lck->rpcl_mutex);
}
-/* Update the maximum observed easize and cookiesize. The default easize
- * and cookiesize is initialized to the minimum value but allowed to grow
- * up to a single page in size if required to handle the common case.
+/**
+ * Update the maximum possible easize and cookiesize.
+ *
+ * The values are learned from ptlrpc replies sent by the MDT. The
+ * default easize and cookiesize is initialized to the minimum value but
+ * allowed to grow up to a single page in size if required to handle the
+ * common case.
+ *
+ * \see client_obd::cl_default_mds_easize and
+ * client_obd::cl_default_mds_cookiesize
+ *
+ * \param[in] exp export for MDC device
+ * \param[in] body body of ptlrpc reply from MDT
+ *
*/
static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
struct mdt_body *body)
{
- if (body->valid & OBD_MD_FLMODEASIZE) {
+ if (body->mbo_valid & OBD_MD_FLMODEASIZE) {
struct client_obd *cli = &exp->exp_obd->u.cli;
+ u32 def_cookiesize, def_easize;
- if (cli->cl_max_mds_easize < body->max_mdsize) {
- cli->cl_max_mds_easize = body->max_mdsize;
- cli->cl_default_mds_easize =
- min_t(__u32, body->max_mdsize, PAGE_SIZE);
- }
- if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
- cli->cl_max_mds_cookiesize = body->max_cookiesize;
- cli->cl_default_mds_cookiesize =
- min_t(__u32, body->max_cookiesize, PAGE_SIZE);
- }
+ if (cli->cl_max_mds_easize < body->mbo_max_mdsize)
+ cli->cl_max_mds_easize = body->mbo_max_mdsize;
+
+ def_easize = min_t(__u32, body->mbo_max_mdsize,
+ OBD_MAX_DEFAULT_EA_SIZE);
+ cli->cl_default_mds_easize = def_easize;
+
+ if (cli->cl_max_mds_cookiesize < body->mbo_max_cookiesize)
+ cli->cl_max_mds_cookiesize = body->mbo_max_cookiesize;
+
+ def_cookiesize = min_t(__u32, body->mbo_max_cookiesize,
+ OBD_MAX_DEFAULT_COOKIE_SIZE);
+ cli->cl_default_mds_cookiesize = def_cookiesize;
}
}
-struct mdc_cache_waiter {
- struct list_head mcw_entry;
- wait_queue_head_t mcw_waitq;
-};
-
/* mdc/mdc_locks.c */
int it_open_error(int phase, struct lookup_intent *it);
diff --git a/drivers/staging/lustre/lustre/include/lustre_mds.h b/drivers/staging/lustre/lustre/include/lustre_mds.h
index 4104bd9bd5c4..23a7e4f78e9a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mds.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mds.h
@@ -58,9 +58,6 @@ struct mds_group_info {
#define MDD_OBD_NAME "mdd_obd"
#define MDD_OBD_UUID "mdd_obd_uuid"
-/* these are local flags, used only on the client, private */
-#define M_CHECK_STALE 0200000000
-
/** @} mds */
#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index d5debd615fdf..e9aba99ee52a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -261,7 +261,10 @@
#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
-#define OST_MAXREQSIZE (5 * 1024)
+/**
+ * FIEMAP request can be 4K+ for now
+ */
+#define OST_MAXREQSIZE (16 * 1024)
/* Macro to hide a typecast. */
#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
@@ -570,13 +573,13 @@ struct ptlrpc_nrs_pol_ops {
*
* \param[in,out] policy The policy being initialized
*/
- int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
+ int (*op_policy_init)(struct ptlrpc_nrs_policy *policy);
/**
* Called during policy unregistration; this operation is optional.
*
* \param[in,out] policy The policy being unregistered/finalized
*/
- void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
+ void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy);
/**
* Called when activating a policy via lprocfs; policies allocate and
* initialize their resources here; this operation is optional.
@@ -585,7 +588,7 @@ struct ptlrpc_nrs_pol_ops {
*
* \see nrs_policy_start_locked()
*/
- int (*op_policy_start) (struct ptlrpc_nrs_policy *policy);
+ int (*op_policy_start)(struct ptlrpc_nrs_policy *policy);
/**
* Called when deactivating a policy via lprocfs; policies deallocate
* their resources here; this operation is optional
@@ -594,7 +597,7 @@ struct ptlrpc_nrs_pol_ops {
*
* \see nrs_policy_stop0()
*/
- void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
+ void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy);
/**
* Used for policy-specific operations; i.e. not generic ones like
* \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
@@ -610,8 +613,8 @@ struct ptlrpc_nrs_pol_ops {
*
* \see ptlrpc_nrs_policy_control()
*/
- int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy,
- enum ptlrpc_nrs_ctl opc, void *arg);
+ int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy,
+ enum ptlrpc_nrs_ctl opc, void *arg);
/**
* Called when obtaining references to the resources of the resource
@@ -648,11 +651,11 @@ struct ptlrpc_nrs_pol_ops {
* \see ptlrpc_nrs_req_initialize()
* \see ptlrpc_nrs_hpreq_add_nolock()
*/
- int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp,
- bool moving_req);
+ int (*op_res_get)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq,
+ const struct ptlrpc_nrs_resource *parent,
+ struct ptlrpc_nrs_resource **resp,
+ bool moving_req);
/**
* Called when releasing references taken for resources in the resource
* hierarchy for the request; this operation is optional.
@@ -663,8 +666,8 @@ struct ptlrpc_nrs_pol_ops {
* \see ptlrpc_nrs_req_finalize()
* \see ptlrpc_nrs_hpreq_add_nolock()
*/
- void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
- const struct ptlrpc_nrs_resource *res);
+ void (*op_res_put)(struct ptlrpc_nrs_policy *policy,
+ const struct ptlrpc_nrs_resource *res);
/**
* Obtains a request for handling from the policy, and optionally
@@ -683,8 +686,8 @@ struct ptlrpc_nrs_pol_ops {
* \see ptlrpc_nrs_req_get_nolock()
*/
struct ptlrpc_nrs_request *
- (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
- bool force);
+ (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek,
+ bool force);
/**
* Called when attempting to add a request to a policy for later
* handling; this operation is mandatory.
@@ -697,8 +700,8 @@ struct ptlrpc_nrs_pol_ops {
*
* \see ptlrpc_nrs_req_add_nolock()
*/
- int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
+ int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
/**
* Removes a request from the policy's set of pending requests. Normally
* called after a request has been polled successfully from the policy
@@ -707,8 +710,8 @@ struct ptlrpc_nrs_pol_ops {
* \param[in,out] policy The policy the request \a nrq belongs to
* \param[in,out] nrq The request to dequeue
*/
- void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
+ void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
/**
* Called after the request being carried out. Could be used for
* job/resource control; this operation is optional.
@@ -721,8 +724,8 @@ struct ptlrpc_nrs_pol_ops {
*
* \see ptlrpc_nrs_req_stop_nolock()
*/
- void (*op_req_stop) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
+ void (*op_req_stop)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
/**
* Registers the policy's lprocfs interface with a PTLRPC service.
*
@@ -731,7 +734,7 @@ struct ptlrpc_nrs_pol_ops {
* \retval 0 success
* \retval != 0 error
*/
- int (*op_lprocfs_init) (struct ptlrpc_service *svc);
+ int (*op_lprocfs_init)(struct ptlrpc_service *svc);
/**
* Unegisters the policy's lprocfs interface with a PTLRPC service.
*
@@ -743,7 +746,7 @@ struct ptlrpc_nrs_pol_ops {
*
* \param[in] svc The service
*/
- void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
+ void (*op_lprocfs_fini)(struct ptlrpc_service *svc);
};
/**
@@ -1628,7 +1631,7 @@ static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
/**
* Returns 1 if request buffer at offset \a index was already swabbed
*/
-static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
+static inline int lustre_req_swabbed(struct ptlrpc_request *req, size_t index)
{
LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
return req->rq_req_swab_mask & (1 << index);
@@ -1637,7 +1640,7 @@ static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
/**
* Returns 1 if request reply buffer at offset \a index was already swabbed
*/
-static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
+static inline int lustre_rep_swabbed(struct ptlrpc_request *req, size_t index)
{
LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
return req->rq_rep_swab_mask & (1 << index);
@@ -1662,7 +1665,8 @@ static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
/**
* Mark request buffer at offset \a index that it was already swabbed
*/
-static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
+static inline void lustre_set_req_swabbed(struct ptlrpc_request *req,
+ size_t index)
{
LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
@@ -1672,7 +1676,8 @@ static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
/**
* Mark request reply buffer at offset \a index that it was already swabbed
*/
-static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
+static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req,
+ size_t index)
{
LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
@@ -2403,7 +2408,6 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
int ptlrpc_reply(struct ptlrpc_request *req);
int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
int ptlrpc_error(struct ptlrpc_request *req);
-void ptlrpc_resend_req(struct ptlrpc_request *request);
int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
@@ -2423,23 +2427,17 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
int ptlrpc_queue_wait(struct ptlrpc_request *req);
int ptlrpc_replay_req(struct ptlrpc_request *req);
-int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
void ptlrpc_abort_inflight(struct obd_import *imp);
void ptlrpc_abort_set(struct ptlrpc_request_set *set);
struct ptlrpc_request_set *ptlrpc_prep_set(void);
struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
void *arg);
-int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
int ptlrpc_set_wait(struct ptlrpc_request_set *);
-int ptlrpc_expired_set(void *data);
-void ptlrpc_interrupted_set(void *data);
void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
void ptlrpc_set_destroy(struct ptlrpc_request_set *);
void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
-void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
- struct ptlrpc_request *req);
void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
@@ -2611,9 +2609,9 @@ int ptlrpc_reconnect_import(struct obd_import *imp);
* @{
*/
int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
- int index);
+ u32 index);
void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
- int index);
+ u32 index);
int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
@@ -2632,27 +2630,27 @@ int lustre_shrink_msg(struct lustre_msg *msg, int segment,
unsigned int newlen, int move_data);
void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
int __lustre_unpack_msg(struct lustre_msg *m, int len);
-int lustre_msg_hdr_size(__u32 magic, int count);
-int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
-int lustre_msg_size_v2(int count, __u32 *lengths);
-int lustre_packed_msg_size(struct lustre_msg *msg);
-int lustre_msg_early_size(void);
-void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
-void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
-int lustre_msg_buflen(struct lustre_msg *m, int n);
-int lustre_msg_bufcount(struct lustre_msg *m);
-char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
+u32 lustre_msg_hdr_size(__u32 magic, u32 count);
+u32 lustre_msg_size(__u32 magic, int count, __u32 *lengths);
+u32 lustre_msg_size_v2(int count, __u32 *lengths);
+u32 lustre_packed_msg_size(struct lustre_msg *msg);
+u32 lustre_msg_early_size(void);
+void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, u32 n, u32 min_size);
+void *lustre_msg_buf(struct lustre_msg *m, u32 n, u32 minlen);
+u32 lustre_msg_buflen(struct lustre_msg *m, u32 n);
+u32 lustre_msg_bufcount(struct lustre_msg *m);
+char *lustre_msg_string(struct lustre_msg *m, u32 n, u32 max_len);
__u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
__u32 lustre_msg_get_flags(struct lustre_msg *msg);
-void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
-void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
-void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
+void lustre_msg_add_flags(struct lustre_msg *msg, u32 flags);
+void lustre_msg_set_flags(struct lustre_msg *msg, u32 flags);
+void lustre_msg_clear_flags(struct lustre_msg *msg, u32 flags);
__u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
-void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
+void lustre_msg_add_op_flags(struct lustre_msg *msg, u32 flags);
struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
__u32 lustre_msg_get_type(struct lustre_msg *msg);
-void lustre_msg_add_version(struct lustre_msg *msg, int version);
+void lustre_msg_add_version(struct lustre_msg *msg, u32 version);
__u32 lustre_msg_get_opc(struct lustre_msg *msg);
__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
diff --git a/drivers/staging/lustre/lustre/include/lustre_param.h b/drivers/staging/lustre/lustre/include/lustre_param.h
index 82aadd32c2b8..8061a04ee806 100644
--- a/drivers/staging/lustre/lustre/include/lustre_param.h
+++ b/drivers/staging/lustre/lustre/include/lustre_param.h
@@ -39,6 +39,9 @@
#ifndef _LUSTRE_PARAM_H
#define _LUSTRE_PARAM_H
+#include "../../include/linux/libcfs/libcfs.h"
+#include "../../include/linux/lnet/types.h"
+
/** \defgroup param param
*
* @{
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h
index 5842cb18b49e..5842cb18b49e 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index 544a43c862b9..a13558e53274 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -76,7 +76,8 @@ void req_capsule_init(struct req_capsule *pill, struct ptlrpc_request *req,
void req_capsule_fini(struct req_capsule *pill);
void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt);
-int req_capsule_filled_sizes(struct req_capsule *pill, enum req_location loc);
+size_t req_capsule_filled_sizes(struct req_capsule *pill,
+ enum req_location loc);
int req_capsule_server_pack(struct req_capsule *pill);
void *req_capsule_client_get(struct req_capsule *pill,
@@ -86,27 +87,27 @@ void *req_capsule_client_swab_get(struct req_capsule *pill,
void *swabber);
void *req_capsule_client_sized_get(struct req_capsule *pill,
const struct req_msg_field *field,
- int len);
+ u32 len);
void *req_capsule_server_get(struct req_capsule *pill,
const struct req_msg_field *field);
void *req_capsule_server_sized_get(struct req_capsule *pill,
const struct req_msg_field *field,
- int len);
+ u32 len);
void *req_capsule_server_swab_get(struct req_capsule *pill,
const struct req_msg_field *field,
void *swabber);
void *req_capsule_server_sized_swab_get(struct req_capsule *pill,
const struct req_msg_field *field,
- int len, void *swabber);
+ u32 len, void *swabber);
void req_capsule_set_size(struct req_capsule *pill,
const struct req_msg_field *field,
- enum req_location loc, int size);
-int req_capsule_get_size(const struct req_capsule *pill,
+ enum req_location loc, u32 size);
+u32 req_capsule_get_size(const struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc);
-int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc);
-int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
+u32 req_capsule_msg_size(struct req_capsule *pill, enum req_location loc);
+u32 req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
enum req_location loc);
void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt);
@@ -115,8 +116,7 @@ int req_capsule_has_field(const struct req_capsule *pill,
enum req_location loc);
void req_capsule_shrink(struct req_capsule *pill,
const struct req_msg_field *field,
- unsigned int newlen,
- enum req_location loc);
+ u32 newlen, enum req_location loc);
int req_layout_init(void);
void req_layout_fini(void);
@@ -149,14 +149,11 @@ extern struct req_format RQF_MDS_GETATTR;
extern struct req_format RQF_MDS_GETATTR_NAME;
extern struct req_format RQF_MDS_CLOSE;
extern struct req_format RQF_MDS_RELEASE_CLOSE;
-extern struct req_format RQF_MDS_PIN;
-extern struct req_format RQF_MDS_UNPIN;
extern struct req_format RQF_MDS_CONNECT;
extern struct req_format RQF_MDS_DISCONNECT;
extern struct req_format RQF_MDS_GET_INFO;
extern struct req_format RQF_MDS_READPAGE;
extern struct req_format RQF_MDS_WRITEPAGE;
-extern struct req_format RQF_MDS_IS_SUBDIR;
extern struct req_format RQF_MDS_DONE_WRITING;
extern struct req_format RQF_MDS_REINT;
extern struct req_format RQF_MDS_REINT_CREATE;
diff --git a/drivers/staging/lustre/lustre/include/lustre_ver.h b/drivers/staging/lustre/lustre/include/lustre_ver.h
index 64559a16f4de..19c9135e2273 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ver.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ver.h
@@ -2,14 +2,21 @@
#define _LUSTRE_VER_H_
#define LUSTRE_MAJOR 2
-#define LUSTRE_MINOR 4
-#define LUSTRE_PATCH 60
+#define LUSTRE_MINOR 6
+#define LUSTRE_PATCH 99
#define LUSTRE_FIX 0
-#define LUSTRE_VERSION_STRING "2.4.60"
+#define LUSTRE_VERSION_STRING "2.6.99"
-#define LUSTRE_VERSION_CODE OBD_OCD_VERSION(LUSTRE_MAJOR, \
- LUSTRE_MINOR, LUSTRE_PATCH, \
- LUSTRE_FIX)
+#define OBD_OCD_VERSION(major, minor, patch, fix) \
+ (((major) << 24) + ((minor) << 16) + ((patch) << 8) + (fix))
+
+#define OBD_OCD_VERSION_MAJOR(version) ((int)((version) >> 24) & 255)
+#define OBD_OCD_VERSION_MINOR(version) ((int)((version) >> 16) & 255)
+#define OBD_OCD_VERSION_PATCH(version) ((int)((version) >> 8) & 255)
+#define OBD_OCD_VERSION_FIX(version) ((int)((version) >> 0) & 255)
+
+#define LUSTRE_VERSION_CODE \
+ OBD_OCD_VERSION(LUSTRE_MAJOR, LUSTRE_MINOR, LUSTRE_PATCH, LUSTRE_FIX)
/*
* If lustre version of client and servers it connects to differs by more
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index a1bc2c478ff9..f6fc4dd05bd6 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -35,21 +35,13 @@
#include <linux/spinlock.h>
-#define IOC_OSC_TYPE 'h'
-#define IOC_OSC_MIN_NR 20
-#define IOC_OSC_SET_ACTIVE _IOWR(IOC_OSC_TYPE, 21, struct obd_device *)
-#define IOC_OSC_MAX_NR 50
-
-#define IOC_MDC_TYPE 'i'
-#define IOC_MDC_MIN_NR 20
-#define IOC_MDC_MAX_NR 50
-
#include "lustre/lustre_idl.h"
#include "lustre_lib.h"
#include "lu_ref.h"
#include "lustre_export.h"
#include "lustre_fid.h"
#include "lustre_fld.h"
+#include "lustre_handles.h"
#include "lustre_intent.h"
#define MAX_OBD_DEVICES 8192
@@ -81,6 +73,13 @@ static inline void loi_init(struct lov_oinfo *loi)
{
}
+/*
+ * If we are unable to get the maximum object size from the OST in
+ * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using
+ * the old maximum object size from ext3.
+ */
+#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL
+
struct lov_stripe_md {
atomic_t lsm_refc;
spinlock_t lsm_lock;
@@ -89,31 +88,17 @@ struct lov_stripe_md {
/* maximum possible file size, might change as OSTs status changes,
* e.g. disconnected, deactivated
*/
- __u64 lsm_maxbytes;
- struct {
- /* Public members. */
- struct ost_id lw_object_oi; /* lov object id/seq */
-
- /* LOV-private members start here -- only for use in lov/. */
- __u32 lw_magic;
- __u32 lw_stripe_size; /* size of the stripe */
- __u32 lw_pattern; /* striping pattern (RAID0, RAID1) */
- __u16 lw_stripe_count; /* number of objects being striped over */
- __u16 lw_layout_gen; /* generation of the layout */
- char lw_pool_name[LOV_MAXPOOLNAME]; /* pool name */
- } lsm_wire;
-
+ __u64 lsm_maxbytes;
+ struct ost_id lsm_oi;
+ __u32 lsm_magic;
+ __u32 lsm_stripe_size;
+ __u32 lsm_pattern; /* striping pattern (RAID0, RAID1) */
+ __u16 lsm_stripe_count;
+ __u16 lsm_layout_gen;
+ char lsm_pool_name[LOV_MAXPOOLNAME + 1];
struct lov_oinfo *lsm_oinfo[0];
};
-#define lsm_oi lsm_wire.lw_object_oi
-#define lsm_magic lsm_wire.lw_magic
-#define lsm_layout_gen lsm_wire.lw_layout_gen
-#define lsm_stripe_size lsm_wire.lw_stripe_size
-#define lsm_pattern lsm_wire.lw_pattern
-#define lsm_stripe_count lsm_wire.lw_stripe_count
-#define lsm_pool_name lsm_wire.lw_pool_name
-
static inline bool lsm_is_released(struct lov_stripe_md *lsm)
{
return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
@@ -177,31 +162,10 @@ struct obd_type {
struct brw_page {
u64 off;
struct page *pg;
- int count;
+ unsigned int count;
u32 flag;
};
-/* llog contexts */
-enum llog_ctxt_id {
- LLOG_CONFIG_ORIG_CTXT = 0,
- LLOG_CONFIG_REPL_CTXT,
- LLOG_MDS_OST_ORIG_CTXT,
- LLOG_MDS_OST_REPL_CTXT,
- LLOG_SIZE_ORIG_CTXT,
- LLOG_SIZE_REPL_CTXT,
- LLOG_RD1_ORIG_CTXT,
- LLOG_RD1_REPL_CTXT,
- LLOG_TEST_ORIG_CTXT,
- LLOG_TEST_REPL_CTXT,
- LLOG_LOVEA_ORIG_CTXT,
- LLOG_LOVEA_REPL_CTXT,
- LLOG_CHANGELOG_ORIG_CTXT, /**< changelog generation on mdd */
- LLOG_CHANGELOG_REPL_CTXT, /**< changelog access on clients */
- LLOG_CHANGELOG_USER_ORIG_CTXT, /**< for multiple changelog consumers */
- LLOG_AGENT_ORIG_CTXT, /**< agent requests generation on cdt */
- LLOG_MAX_CTXTS
-};
-
struct timeout_item {
enum timeout_event ti_event;
unsigned long ti_timeout;
@@ -211,11 +175,12 @@ struct timeout_item {
struct list_head ti_chain;
};
-#define OSC_MAX_RIF_DEFAULT 8
-#define OSC_MAX_RIF_MAX 256
-#define OSC_MAX_DIRTY_DEFAULT (OSC_MAX_RIF_DEFAULT * 4)
-#define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */
-#define OSC_DEFAULT_RESENDS 10
+#define OBD_MAX_RIF_DEFAULT 8
+#define OBD_MAX_RIF_MAX 512
+#define OSC_MAX_RIF_MAX 256
+#define OSC_MAX_DIRTY_DEFAULT (OBD_MAX_RIF_DEFAULT * 4)
+#define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */
+#define OSC_DEFAULT_RESENDS 10
/* possible values for fo_sync_lock_cancel */
enum {
@@ -225,40 +190,74 @@ enum {
NUM_SYNC_ON_CANCEL_STATES
};
-#define MDC_MAX_RIF_DEFAULT 8
-#define MDC_MAX_RIF_MAX 512
-
enum obd_cl_sem_lock_class {
OBD_CLI_SEM_NORMAL,
OBD_CLI_SEM_MGC,
OBD_CLI_SEM_MDCOSC,
};
+/*
+ * Limit reply buffer size for striping data to one x86_64 page. This
+ * value is chosen to fit the striping data for common use cases while
+ * staying well below the limit at which the buffer must be backed by
+ * vmalloc(). Excessive use of vmalloc() may cause spinlock contention
+ * on the MDS.
+ */
+#define OBD_MAX_DEFAULT_EA_SIZE 4096
+#define OBD_MAX_DEFAULT_COOKIE_SIZE 4096
+
struct mdc_rpc_lock;
struct obd_import;
struct client_obd {
struct rw_semaphore cl_sem;
struct obd_uuid cl_target_uuid;
struct obd_import *cl_import; /* ptlrpc connection state */
- int cl_conn_count;
- /* max_mds_easize is purely a performance thing so we don't have to
- * call obd_size_diskmd() all the time.
+ size_t cl_conn_count;
+ /*
+ * Cache maximum and default values for easize and cookiesize. This is
+ * strictly a performance optimization to minimize calls to
+ * obd_size_diskmd(). The default values are used to calculate the
+ * initial size of a request buffer. The ptlrpc layer will resize the
+ * buffer as needed to accommodate a larger reply from the
+ * server. The default values should be small enough to avoid wasted
+ * memory and excessive use of vmalloc(), yet large enough to avoid
+ * reallocating the buffer in the common use case.
*/
- int cl_default_mds_easize;
- int cl_max_mds_easize;
- int cl_default_mds_cookiesize;
- int cl_max_mds_cookiesize;
+ /*
+ * Default EA size for striping attributes. It is initialized at
+ * mount-time based on the default stripe width of the filesystem,
+ * then it tracks the largest observed EA size advertised by
+ * the MDT, up to a maximum value of OBD_MAX_DEFAULT_EA_SIZE.
+ */
+ u32 cl_default_mds_easize;
+ /* Maximum possible EA size computed at mount-time based on
+ * the number of OSTs in the filesystem. May be increased at
+ * run-time if a larger observed size is advertised by the MDT.
+ */
+ u32 cl_max_mds_easize;
+ /* Default cookie size for llog cookies (see struct llog_cookie). It is
+ * initialized to zero at mount-time, then it tracks the largest
+ * observed cookie size advertised by the MDT, up to a maximum value of
+ * OBD_MAX_DEFAULT_COOKIE_SIZE. Note that llog_cookies are not
+ * used by clients communicating with MDS versions 2.4.0 and later.
+ */
+ u32 cl_default_mds_cookiesize;
+ /* Maximum possible cookie size computed at mount-time based on
+ * the number of OSTs in the filesystem. May be increased at
+ * run-time if a larger observed size is advertised by the MDT.
+ */
+ u32 cl_max_mds_cookiesize;
enum lustre_sec_part cl_sp_me;
enum lustre_sec_part cl_sp_to;
struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */
/* the grant values are protected by loi_list_lock below */
- long cl_dirty; /* all _dirty_ in bytes */
- long cl_dirty_max; /* allowed w/o rpc */
- long cl_dirty_transit; /* dirty synchronous */
- long cl_avail_grant; /* bytes of credit for ost */
- long cl_lost_grant; /* lost credits (trunc) */
+ unsigned long cl_dirty_pages; /* all _dirty_ in pahges */
+ unsigned long cl_dirty_max_pages; /* allowed w/o rpc */
+ unsigned long cl_dirty_transit; /* dirty synchronous */
+ unsigned long cl_avail_grant; /* bytes of credit for ost */
+ unsigned long cl_lost_grant; /* lost credits (trunc) */
/* since we allocate grant by blocks, we don't know how many grant will
* be used to add a page into cache. As a solution, we reserve maximum
@@ -275,8 +274,7 @@ struct client_obd {
* the extent size. A chunk is max(PAGE_SIZE, OST block size)
*/
int cl_chunkbits;
- int cl_chunk;
- int cl_extent_tax; /* extent overhead, by bytes */
+ unsigned int cl_extent_tax; /* extent overhead, by bytes */
/* keep track of objects that have lois that contain pages which
* have been queued for async brw. this lock also protects the
@@ -301,13 +299,13 @@ struct client_obd {
struct list_head cl_loi_hp_ready_list;
struct list_head cl_loi_write_list;
struct list_head cl_loi_read_list;
- int cl_r_in_flight;
- int cl_w_in_flight;
+ __u32 cl_r_in_flight;
+ __u32 cl_w_in_flight;
/* just a sum of the loi/lop pending numbers to be exported by sysfs */
atomic_t cl_pending_w_pages;
atomic_t cl_pending_r_pages;
__u32 cl_max_pages_per_rpc;
- int cl_max_rpcs_in_flight;
+ __u32 cl_max_rpcs_in_flight;
struct obd_histogram cl_read_rpc_hist;
struct obd_histogram cl_write_rpc_hist;
struct obd_histogram cl_read_page_hist;
@@ -318,13 +316,13 @@ struct client_obd {
/* lru for osc caching pages */
struct cl_client_cache *cl_cache;
struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */
- atomic_t *cl_lru_left;
- atomic_t cl_lru_busy;
+ atomic_long_t *cl_lru_left;
+ atomic_long_t cl_lru_busy;
+ atomic_long_t cl_lru_in_list;
atomic_t cl_lru_shrinkers;
- atomic_t cl_lru_in_list;
struct list_head cl_lru_list; /* lru page list */
spinlock_t cl_lru_list_lock; /* page list protector */
- atomic_t cl_unstable_count;
+ atomic_long_t cl_unstable_count;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
atomic_t cl_destroy_in_flight;
@@ -350,7 +348,7 @@ struct client_obd {
/* used by quotacheck when the servers are older than 2.4 */
int cl_qchk_stat; /* quotacheck stat of the peer */
#define CL_NOT_QUOTACHECKED 1 /* client->cl_qchk_stat init value */
-#if LUSTRE_VERSION_CODE >= OBD_OCD_VERSION(2, 7, 50, 0)
+#if OBD_OCD_VERSION(2, 7, 53, 0) < LUSTRE_VERSION_CODE
#warning "please consider removing quotacheck compatibility code"
#endif
@@ -431,7 +429,7 @@ struct lov_obd {
struct lmv_tgt_desc {
struct obd_uuid ltd_uuid;
struct obd_export *ltd_exp;
- int ltd_idx;
+ u32 ltd_idx;
struct mutex ltd_fid_mutex;
unsigned long ltd_active:1; /* target up for requests */
};
@@ -458,9 +456,8 @@ struct lmv_obd {
int max_def_easize;
int max_cookiesize;
int max_def_cookiesize;
- int server_timeout;
- int tgts_size; /* size of tgts array */
+ u32 tgts_size; /* size of tgts array */
struct lmv_tgt_desc **tgts;
struct obd_connect_data conn_data;
@@ -470,12 +467,11 @@ struct lmv_obd {
struct niobuf_local {
__u64 lnb_file_offset;
__u32 lnb_page_offset;
- __u32 len;
- __u32 flags;
- struct page *page;
- struct dentry *dentry;
- int lnb_grant_used;
- int rc;
+ __u32 lnb_len;
+ __u32 lnb_flags;
+ struct page *lnb_page;
+ void *lnb_data;
+ int lnb_rc;
};
#define LUSTRE_FLD_NAME "fld"
@@ -517,7 +513,6 @@ struct niobuf_local {
#define N_LOCAL_TEMP_PAGE 0x10000000
struct obd_trans_info {
- __u64 oti_transno;
__u64 oti_xid;
/* Only used on the server side for tracking acks. */
struct oti_req_ack_lock {
@@ -527,50 +522,11 @@ struct obd_trans_info {
void *oti_handle;
struct llog_cookie oti_onecookie;
struct llog_cookie *oti_logcookies;
- int oti_numcookies;
- /** synchronous write is needed */
- unsigned long oti_sync_write:1;
- /* initial thread handling transaction */
- struct ptlrpc_thread *oti_thread;
- __u32 oti_conn_cnt;
/** VBR: versions */
__u64 oti_pre_version;
- /** JobID */
- char *oti_jobid;
-
- struct obd_uuid *oti_ost_uuid;
};
-static inline void oti_alloc_cookies(struct obd_trans_info *oti,
- int num_cookies)
-{
- if (!oti)
- return;
-
- if (num_cookies == 1)
- oti->oti_logcookies = &oti->oti_onecookie;
- else
- oti->oti_logcookies = libcfs_kvzalloc(num_cookies * sizeof(oti->oti_onecookie),
- GFP_NOFS);
-
- oti->oti_numcookies = num_cookies;
-}
-
-static inline void oti_free_cookies(struct obd_trans_info *oti)
-{
- if (!oti || !oti->oti_logcookies)
- return;
-
- if (oti->oti_logcookies == &oti->oti_onecookie)
- LASSERT(oti->oti_numcookies == 1);
- else
- kvfree(oti->oti_logcookies);
-
- oti->oti_logcookies = NULL;
- oti->oti_numcookies = 0;
-}
-
/*
* Events signalled through obd_notify() upcall-chain.
*/
@@ -616,7 +572,6 @@ struct target_recovery_data {
};
struct obd_llog_group {
- int olg_seq;
struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS];
wait_queue_head_t olg_waitq;
spinlock_t olg_lock;
@@ -625,7 +580,6 @@ struct obd_llog_group {
/* corresponds to one of the obd's */
#define OBD_DEVICE_MAGIC 0XAB5CD6EF
-#define OBD_DEV_BY_DEVNAME 0xffffd0de
struct lvfs_run_ctxt {
struct dt_device *dt;
@@ -653,7 +607,6 @@ struct obd_device {
obd_starting:1, /* started setup */
obd_force:1, /* cleanup with > 0 obd refcount */
obd_fail:1, /* cleanup with failover */
- obd_async_recov:1, /* allow asynchronous orphan cleanup */
obd_no_conn:1, /* deny new connections */
obd_inactive:1, /* device active/inactive
* (for sysfs status only!!)
@@ -728,9 +681,6 @@ struct obd_device {
struct completion obd_kobj_unregister;
};
-#define OBD_LLOG_FL_SENDNOW 0x0001
-#define OBD_LLOG_FL_EXIT 0x0002
-
enum obd_cleanup_stage {
/* Special case hack for MDS LOVs */
OBD_CLEANUP_EARLY,
@@ -740,8 +690,6 @@ enum obd_cleanup_stage {
/* get/set_info keys */
#define KEY_ASYNC "async"
-#define KEY_BLOCKSIZE_BITS "blocksize_bits"
-#define KEY_BLOCKSIZE "blocksize"
#define KEY_CHANGELOG_CLEAR "changelog_clear"
#define KEY_FID2PATH "fid2path"
#define KEY_CHECKSUM "checksum"
@@ -753,30 +701,22 @@ enum obd_cleanup_stage {
#define KEY_GRANT_SHRINK "grant_shrink"
#define KEY_HSM_COPYTOOL_SEND "hsm_send"
#define KEY_INIT_RECOV_BACKUP "init_recov_bk"
-#define KEY_INIT_RECOV "initial_recov"
#define KEY_INTERMDS "inter_mds"
#define KEY_LAST_ID "last_id"
#define KEY_LAST_FID "last_fid"
-#define KEY_LOCK_TO_STRIPE "lock_to_stripe"
#define KEY_LOVDESC "lovdesc"
-#define KEY_LOV_IDX "lov_idx"
#define KEY_MAX_EASIZE "max_easize"
#define KEY_DEFAULT_EASIZE "default_easize"
-#define KEY_MDS_CONN "mds_conn"
#define KEY_MGSSEC "mgssec"
-#define KEY_NEXT_ID "next_id"
#define KEY_READ_ONLY "read-only"
#define KEY_REGISTER_TARGET "register_target"
#define KEY_SET_FS "set_fs"
#define KEY_TGT_COUNT "tgt_count"
/* KEY_SET_INFO in lustre_idl.h */
#define KEY_SPTLRPC_CONF "sptlrpc_conf"
-#define KEY_CONNECT_FLAG "connect_flags"
-#define KEY_SYNC_LOCK_CANCEL "sync_lock_cancel"
#define KEY_CACHE_SET "cache_set"
#define KEY_CACHE_LRU_SHRINK "cache_lru_shrink"
-#define KEY_CHANGELOG_INDEX "changelog_index"
struct lu_context;
@@ -801,9 +741,11 @@ static inline int it_to_lock_mode(struct lookup_intent *it)
/* CREAT needs to be tested before open (both could be set) */
if (it->it_op & IT_CREAT)
return LCK_CW;
- else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP |
+ else if (it->it_op & (IT_GETATTR | IT_OPEN | IT_LOOKUP |
IT_LAYOUT))
return LCK_CR;
+ else if (it->it_op & IT_READDIR)
+ return LCK_PR;
else if (it->it_op & IT_GETXATTR)
return LCK_PR;
else if (it->it_op & IT_SETXATTR)
@@ -813,6 +755,14 @@ static inline int it_to_lock_mode(struct lookup_intent *it)
return -EINVAL;
}
+enum md_cli_flags {
+ CLI_SET_MEA = BIT(0),
+ CLI_RM_ENTRY = BIT(1),
+ CLI_HASH64 = BIT(2),
+ CLI_API32 = BIT(3),
+ CLI_MIGRATE = BIT(4),
+};
+
struct md_op_data {
struct lu_fid op_fid1; /* operation fid1 (usually parent) */
struct lu_fid op_fid2; /* operation fid2 (usually child) */
@@ -822,7 +772,7 @@ struct md_op_data {
struct lustre_handle op_handle;
s64 op_mod_time;
const char *op_name;
- int op_namelen;
+ size_t op_namelen;
__u32 op_mode;
struct lmv_stripe_md *op_mea1;
struct lmv_stripe_md *op_mea2;
@@ -831,6 +781,7 @@ struct md_op_data {
__u32 op_fsgid;
cfs_cap_t op_cap;
void *op_data;
+ size_t op_data_size;
/* iattr fields and blocks. */
struct iattr op_attr;
@@ -845,28 +796,29 @@ struct md_op_data {
/* Various operation flags. */
enum mds_op_bias op_bias;
- /* Operation type */
- __u32 op_opc;
-
/* Used by readdir */
__u64 op_offset;
/* Used by readdir */
- __u32 op_npages;
+ __u32 op_max_pages;
/* used to transfer info between the stacks of MD client
* see enum op_cli_flags
*/
- __u32 op_cli_flags;
+ enum md_cli_flags op_cli_flags;
/* File object data version for HSM release, on client */
__u64 op_data_version;
struct lustre_handle op_lease_handle;
+
+ /* default stripe offset */
+ __u32 op_default_stripe_offset;
};
-enum op_cli_flags {
- CLI_SET_MEA = 1 << 0,
- CLI_RM_ENTRY = 1 << 1,
+struct md_callback {
+ int (*md_blocking_ast)(struct ldlm_lock *lock,
+ struct ldlm_lock_desc *desc,
+ void *data, int flag);
};
struct md_enqueue_info;
@@ -879,8 +831,7 @@ struct md_enqueue_info {
struct inode *mi_dir;
int (*mi_cb)(struct ptlrpc_request *req,
struct md_enqueue_info *minfo, int rc);
- __u64 mi_cbdata;
- unsigned int mi_generation;
+ void *mi_cbdata;
};
struct obd_ops {
@@ -894,8 +845,6 @@ struct obd_ops {
__u32 keylen, void *key,
__u32 vallen, void *val,
struct ptlrpc_request_set *set);
- int (*attach)(struct obd_device *dev, u32 len, void *data);
- int (*detach)(struct obd_device *dev);
int (*setup)(struct obd_device *dev, struct lustre_cfg *cfg);
int (*precleanup)(struct obd_device *dev,
enum obd_cleanup_stage cleanup_stage);
@@ -927,8 +876,8 @@ struct obd_ops {
int (*fid_fini)(struct obd_device *obd);
/* Allocate new fid according to passed @hint. */
- int (*fid_alloc)(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data);
+ int (*fid_alloc)(const struct lu_env *env, struct obd_export *exp,
+ struct lu_fid *fid, struct md_op_data *op_data);
/*
* Object with @fid is getting deleted, we may want to do something
@@ -943,13 +892,10 @@ struct obd_ops {
int (*unpackmd)(struct obd_export *exp,
struct lov_stripe_md **mem_tgt,
struct lov_mds_md *disk_src, int disk_len);
- int (*preallocate)(struct lustre_handle *, u32 *req, u64 *ids);
int (*create)(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct lov_stripe_md **ea,
- struct obd_trans_info *oti);
+ struct obdo *oa, struct obd_trans_info *oti);
int (*destroy)(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct lov_stripe_md *ea,
- struct obd_trans_info *oti, struct obd_export *md_exp);
+ struct obdo *oa, struct obd_trans_info *oti);
int (*setattr)(const struct lu_env *, struct obd_export *exp,
struct obd_info *oinfo, struct obd_trans_info *oti);
int (*setattr_async)(struct obd_export *exp, struct obd_info *oinfo,
@@ -959,8 +905,6 @@ struct obd_ops {
struct obd_info *oinfo);
int (*getattr_async)(struct obd_export *exp, struct obd_info *oinfo,
struct ptlrpc_request_set *set);
- int (*adjust_kms)(struct obd_export *exp, struct lov_stripe_md *lsm,
- u64 size, int shrink);
int (*preprw)(const struct lu_env *env, int cmd,
struct obd_export *exp, struct obdo *oa, int objcount,
struct obd_ioobj *obj, struct niobuf_remote *remote,
@@ -972,8 +916,6 @@ struct obd_ops {
struct niobuf_remote *remote, int pages,
struct niobuf_local *local,
struct obd_trans_info *oti, int rc);
- int (*find_cbdata)(struct obd_export *, struct lov_stripe_md *,
- ldlm_iterator_t it, void *data);
int (*init_export)(struct obd_export *exp);
int (*destroy_export)(struct obd_export *exp);
@@ -1009,27 +951,11 @@ struct obd_ops {
*/
};
-enum {
- LUSTRE_OPC_MKDIR = (1 << 0),
- LUSTRE_OPC_SYMLINK = (1 << 1),
- LUSTRE_OPC_MKNOD = (1 << 2),
- LUSTRE_OPC_CREATE = (1 << 3),
- LUSTRE_OPC_ANY = (1 << 4)
-};
-
/* lmv structures */
-#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
-#define MEA_MAGIC_ALL_CHARS 0xb222a11c
-#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
-
-#define MAX_HASH_SIZE_32 0x7fffffffUL
-#define MAX_HASH_SIZE 0x7fffffffffffffffULL
-#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
-
struct lustre_md {
struct mdt_body *body;
struct lov_stripe_md *lsm;
- struct lmv_stripe_md *mea;
+ struct lmv_stripe_md *lmv;
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *posix_acl;
#endif
@@ -1044,48 +970,55 @@ struct md_open_data {
bool mod_is_create;
};
+struct obd_client_handle {
+ struct lustre_handle och_fh;
+ struct lu_fid och_fid;
+ struct md_open_data *och_mod;
+ struct lustre_handle och_lease_handle; /* open lock for lease */
+ __u32 och_magic;
+ int och_flags;
+};
+
+#define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
+
struct lookup_intent;
+struct cl_attr;
struct md_ops {
int (*getstatus)(struct obd_export *, struct lu_fid *);
int (*null_inode)(struct obd_export *, const struct lu_fid *);
- int (*find_cbdata)(struct obd_export *, const struct lu_fid *,
- ldlm_iterator_t, void *);
int (*close)(struct obd_export *, struct md_op_data *,
struct md_open_data *, struct ptlrpc_request **);
int (*create)(struct obd_export *, struct md_op_data *,
- const void *, int, int, __u32, __u32, cfs_cap_t,
- __u64, struct ptlrpc_request **);
+ const void *, size_t, umode_t, uid_t, gid_t,
+ cfs_cap_t, __u64, struct ptlrpc_request **);
int (*done_writing)(struct obd_export *, struct md_op_data *,
struct md_open_data *);
int (*enqueue)(struct obd_export *, struct ldlm_enqueue_info *,
+ const ldlm_policy_data_t *,
struct lookup_intent *, struct md_op_data *,
- struct lustre_handle *, void *, int,
- struct ptlrpc_request **, __u64);
+ struct lustre_handle *, __u64);
int (*getattr)(struct obd_export *, struct md_op_data *,
struct ptlrpc_request **);
int (*getattr_name)(struct obd_export *, struct md_op_data *,
struct ptlrpc_request **);
int (*intent_lock)(struct obd_export *, struct md_op_data *,
- void *, int, struct lookup_intent *, int,
+ struct lookup_intent *,
struct ptlrpc_request **,
ldlm_blocking_callback, __u64);
int (*link)(struct obd_export *, struct md_op_data *,
struct ptlrpc_request **);
int (*rename)(struct obd_export *, struct md_op_data *,
- const char *, int, const char *, int,
+ const char *, size_t, const char *, size_t,
struct ptlrpc_request **);
- int (*is_subdir)(struct obd_export *, const struct lu_fid *,
- const struct lu_fid *,
- struct ptlrpc_request **);
int (*setattr)(struct obd_export *, struct md_op_data *, void *,
- int, void *, int, struct ptlrpc_request **,
+ size_t, void *, size_t, struct ptlrpc_request **,
struct md_open_data **mod);
int (*sync)(struct obd_export *, const struct lu_fid *,
struct ptlrpc_request **);
- int (*readpage)(struct obd_export *, struct md_op_data *,
- struct page **, struct ptlrpc_request **);
-
+ int (*read_page)(struct obd_export *, struct md_op_data *,
+ struct md_callback *cb_op, __u64 hash_offset,
+ struct page **ppage);
int (*unlink)(struct obd_export *, struct md_op_data *,
struct ptlrpc_request **);
@@ -1097,7 +1030,7 @@ struct md_ops {
u64, const char *, const char *, int, int, int,
struct ptlrpc_request **);
- int (*init_ea_size)(struct obd_export *, int, int, int, int);
+ int (*init_ea_size)(struct obd_export *, u32, u32, u32, u32);
int (*get_lustre_md)(struct obd_export *, struct ptlrpc_request *,
struct obd_export *, struct obd_export *,
@@ -1105,12 +1038,17 @@ struct md_ops {
int (*free_lustre_md)(struct obd_export *, struct lustre_md *);
+ int (*merge_attr)(struct obd_export *,
+ const struct lmv_stripe_md *lsm,
+ struct cl_attr *attr, ldlm_blocking_callback);
+
int (*set_open_replay_data)(struct obd_export *,
struct obd_client_handle *,
struct lookup_intent *);
int (*clear_open_replay_data)(struct obd_export *,
struct obd_client_handle *);
- int (*set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *);
+ int (*set_lock_data)(struct obd_export *, const struct lustre_handle *,
+ void *, __u64 *);
enum ldlm_mode (*lock_match)(struct obd_export *, __u64,
const struct lu_fid *, enum ldlm_type,
@@ -1121,6 +1059,11 @@ struct md_ops {
ldlm_policy_data_t *, enum ldlm_mode,
enum ldlm_cancel_flags flags, void *opaque);
+ int (*get_fid_from_lsm)(struct obd_export *,
+ const struct lmv_stripe_md *,
+ const char *name, int namelen,
+ struct lu_fid *fid);
+
int (*intent_getattr_async)(struct obd_export *,
struct md_enqueue_info *,
struct ldlm_enqueue_info *);
@@ -1137,8 +1080,6 @@ struct md_ops {
struct lsm_operations {
void (*lsm_free)(struct lov_stripe_md *);
- int (*lsm_destroy)(struct lov_stripe_md *, struct obdo *oa,
- struct obd_export *md_exp);
void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, u64 *,
u64 *);
void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *,
@@ -1164,10 +1105,6 @@ static inline const struct lsm_operations *lsm_op_find(int magic)
}
}
-/* Requests for obd_extent_calc() */
-#define OBD_CALC_STRIPE_START 1
-#define OBD_CALC_STRIPE_END 2
-
static inline struct md_open_data *obd_mod_alloc(void)
{
struct md_open_data *mod;
@@ -1211,7 +1148,8 @@ static inline const char *lu_dev_name(const struct lu_device *lu_dev)
return lu_dev->ld_obd->obd_name;
}
-static inline bool filename_is_volatile(const char *name, int namelen, int *idx)
+static inline bool filename_is_volatile(const char *name, size_t namelen,
+ int *idx)
{
const char *start;
char *end;
@@ -1259,4 +1197,28 @@ static inline int cli_brw_size(struct obd_device *obd)
return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
}
+/*
+ * when RPC size or the max RPCs in flight is increased, the max dirty pages
+ * of the client should be increased accordingly to avoid sending fragmented
+ * RPCs over the network when the client runs out of the maximum dirty space
+ * when so many RPCs are being generated.
+ */
+static inline void client_adjust_max_dirty(struct client_obd *cli)
+{
+ /* initializing */
+ if (cli->cl_dirty_max_pages <= 0)
+ cli->cl_dirty_max_pages =
+ (OSC_MAX_DIRTY_DEFAULT * 1024 * 1024) >> PAGE_SHIFT;
+ else {
+ unsigned long dirty_max = cli->cl_max_rpcs_in_flight *
+ cli->cl_max_pages_per_rpc;
+
+ if (dirty_max > cli->cl_dirty_max_pages)
+ cli->cl_dirty_max_pages = dirty_max;
+ }
+
+ if (cli->cl_dirty_max_pages > totalram_pages / 8)
+ cli->cl_dirty_max_pages = totalram_pages / 8;
+}
+
#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 6482a937000b..16094dbec08b 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -56,7 +56,6 @@
#define OBD_STATFS_FOR_MDT0 0x0008 /* The statfs is only for retrieving
* information from MDT0.
*/
-#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */
/* OBD Device Declarations */
extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
@@ -97,6 +96,11 @@ int obd_zombie_impexp_init(void);
void obd_zombie_impexp_stop(void);
void obd_zombie_barrier(void);
+int obd_get_request_slot(struct client_obd *cli);
+void obd_put_request_slot(struct client_obd *cli);
+__u32 obd_get_max_rpcs_in_flight(struct client_obd *cli);
+int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max);
+
struct llog_handle;
struct llog_rec_hdr;
typedef int (*llog_cb_t)(const struct lu_env *, struct llog_handle *,
@@ -265,10 +269,10 @@ static inline int lprocfs_climp_check(struct obd_device *obd)
struct inode;
struct lu_attr;
struct obdo;
-void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid);
+void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid);
-void obdo_to_ioobj(struct obdo *oa, struct obd_ioobj *ioobj);
-void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid);
+void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj);
+void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid);
#define OBT(dev) (dev)->obd_type
#define OBP(dev, op) (dev)->obd_type->typ_dt_ops->op
@@ -673,15 +677,6 @@ static inline int obd_unpackmd(struct obd_export *exp,
return rc;
}
-/* helper functions */
-static inline int obd_alloc_memmd(struct obd_export *exp,
- struct lov_stripe_md **mem_tgt)
-{
- LASSERT(mem_tgt);
- LASSERT(!*mem_tgt);
- return obd_unpackmd(exp, mem_tgt, NULL, 0);
-}
-
static inline int obd_free_memmd(struct obd_export *exp,
struct lov_stripe_md **mem_tgt)
{
@@ -695,29 +690,26 @@ static inline int obd_free_memmd(struct obd_export *exp,
}
static inline int obd_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *obdo, struct lov_stripe_md **ea,
- struct obd_trans_info *oti)
+ struct obdo *obdo, struct obd_trans_info *oti)
{
int rc;
EXP_CHECK_DT_OP(exp, create);
EXP_COUNTER_INCREMENT(exp, create);
- rc = OBP(exp->exp_obd, create)(env, exp, obdo, ea, oti);
+ rc = OBP(exp->exp_obd, create)(env, exp, obdo, oti);
return rc;
}
static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *obdo, struct lov_stripe_md *ea,
- struct obd_trans_info *oti,
- struct obd_export *md_exp)
+ struct obdo *obdo, struct obd_trans_info *oti)
{
int rc;
EXP_CHECK_DT_OP(exp, destroy);
EXP_COUNTER_INCREMENT(exp, destroy);
- rc = OBP(exp->exp_obd, destroy)(env, exp, obdo, ea, oti, md_exp);
+ rc = OBP(exp->exp_obd, destroy)(env, exp, obdo, oti);
return rc;
}
@@ -925,7 +917,8 @@ static inline int obd_fid_fini(struct obd_device *obd)
return rc;
}
-static inline int obd_fid_alloc(struct obd_export *exp,
+static inline int obd_fid_alloc(const struct lu_env *env,
+ struct obd_export *exp,
struct lu_fid *fid,
struct md_op_data *op_data)
{
@@ -934,7 +927,7 @@ static inline int obd_fid_alloc(struct obd_export *exp,
EXP_CHECK_DT_OP(exp, fid_alloc);
EXP_COUNTER_INCREMENT(exp, fid_alloc);
- rc = OBP(exp->exp_obd, fid_alloc)(exp, fid, op_data);
+ rc = OBP(exp->exp_obd, fid_alloc)(env, exp, fid, op_data);
return rc;
}
@@ -1147,19 +1140,6 @@ static inline int obd_commitrw(const struct lu_env *env, int cmd,
return rc;
}
-static inline int obd_adjust_kms(struct obd_export *exp,
- struct lov_stripe_md *lsm, u64 size,
- int shrink)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, adjust_kms);
- EXP_COUNTER_INCREMENT(exp, adjust_kms);
-
- rc = OBP(exp->exp_obd, adjust_kms)(exp, lsm, size, shrink);
- return rc;
-}
-
static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp,
int len, void *karg, void __user *uarg)
{
@@ -1172,19 +1152,6 @@ static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp,
return rc;
}
-static inline int obd_find_cbdata(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- ldlm_iterator_t it, void *data)
-{
- int rc;
-
- EXP_CHECK_DT_OP(exp, find_cbdata);
- EXP_COUNTER_INCREMENT(exp, find_cbdata);
-
- rc = OBP(exp->exp_obd, find_cbdata)(exp, lsm, it, data);
- return rc;
-}
-
static inline void obd_import_event(struct obd_device *obd,
struct obd_import *imp,
enum obd_import_event event)
@@ -1210,12 +1177,7 @@ static inline int obd_notify(struct obd_device *obd,
if (rc)
return rc;
- /* the check for async_recov is a complete hack - I'm hereby
- * overloading the meaning to also mean "this was called from
- * mds_postsetup". I know that my mds is able to handle notifies
- * by this point, and it needs to get them to execute mds_postrecov.
- */
- if (!obd->obd_set_up && !obd->obd_async_recov) {
+ if (!obd->obd_set_up) {
CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name);
return -EINVAL;
}
@@ -1358,18 +1320,6 @@ static inline int md_null_inode(struct obd_export *exp,
return rc;
}
-static inline int md_find_cbdata(struct obd_export *exp,
- const struct lu_fid *fid,
- ldlm_iterator_t it, void *data)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, find_cbdata);
- EXP_MD_COUNTER_INCREMENT(exp, find_cbdata);
- rc = MDP(exp->exp_obd, find_cbdata)(exp, fid, it, data);
- return rc;
-}
-
static inline int md_close(struct obd_export *exp, struct md_op_data *op_data,
struct md_open_data *mod,
struct ptlrpc_request **request)
@@ -1383,9 +1333,9 @@ static inline int md_close(struct obd_export *exp, struct md_op_data *op_data,
}
static inline int md_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, int datalen, int mode, __u32 uid,
- __u32 gid, cfs_cap_t cap_effective, __u64 rdev,
- struct ptlrpc_request **request)
+ const void *data, size_t datalen, umode_t mode,
+ uid_t uid, gid_t gid, cfs_cap_t cap_effective,
+ __u64 rdev, struct ptlrpc_request **request)
{
int rc;
@@ -1410,19 +1360,18 @@ static inline int md_done_writing(struct obd_export *exp,
static inline int md_enqueue(struct obd_export *exp,
struct ldlm_enqueue_info *einfo,
+ const ldlm_policy_data_t *policy,
struct lookup_intent *it,
struct md_op_data *op_data,
struct lustre_handle *lockh,
- void *lmm, int lmmsize,
- struct ptlrpc_request **req,
__u64 extra_lock_flags)
{
int rc;
EXP_CHECK_MD_OP(exp, enqueue);
EXP_MD_COUNTER_INCREMENT(exp, enqueue);
- rc = MDP(exp->exp_obd, enqueue)(exp, einfo, it, op_data, lockh,
- lmm, lmmsize, req, extra_lock_flags);
+ rc = MDP(exp->exp_obd, enqueue)(exp, einfo, policy, it, op_data, lockh,
+ extra_lock_flags);
return rc;
}
@@ -1439,9 +1388,9 @@ static inline int md_getattr_name(struct obd_export *exp,
}
static inline int md_intent_lock(struct obd_export *exp,
- struct md_op_data *op_data, void *lmm,
- int lmmsize, struct lookup_intent *it,
- int lookup_flags, struct ptlrpc_request **reqp,
+ struct md_op_data *op_data,
+ struct lookup_intent *it,
+ struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
__u64 extra_lock_flags)
{
@@ -1449,9 +1398,8 @@ static inline int md_intent_lock(struct obd_export *exp,
EXP_CHECK_MD_OP(exp, intent_lock);
EXP_MD_COUNTER_INCREMENT(exp, intent_lock);
- rc = MDP(exp->exp_obd, intent_lock)(exp, op_data, lmm, lmmsize,
- it, lookup_flags, reqp, cb_blocking,
- extra_lock_flags);
+ rc = MDP(exp->exp_obd, intent_lock)(exp, op_data, it, reqp,
+ cb_blocking, extra_lock_flags);
return rc;
}
@@ -1467,8 +1415,8 @@ static inline int md_link(struct obd_export *exp, struct md_op_data *op_data,
}
static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new,
- int newlen, struct ptlrpc_request **request)
+ const char *old, size_t oldlen, const char *new,
+ size_t newlen, struct ptlrpc_request **request)
{
int rc;
@@ -1479,21 +1427,8 @@ static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
-static inline int md_is_subdir(struct obd_export *exp,
- const struct lu_fid *pfid,
- const struct lu_fid *cfid,
- struct ptlrpc_request **request)
-{
- int rc;
-
- EXP_CHECK_MD_OP(exp, is_subdir);
- EXP_MD_COUNTER_INCREMENT(exp, is_subdir);
- rc = MDP(exp->exp_obd, is_subdir)(exp, pfid, cfid, request);
- return rc;
-}
-
static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len,
+ void *ea, size_t ealen, void *ea2, size_t ea2len,
struct ptlrpc_request **request,
struct md_open_data **mod)
{
@@ -1517,15 +1452,18 @@ static inline int md_sync(struct obd_export *exp, const struct lu_fid *fid,
return rc;
}
-static inline int md_readpage(struct obd_export *exp, struct md_op_data *opdata,
- struct page **pages,
- struct ptlrpc_request **request)
+static inline int md_read_page(struct obd_export *exp,
+ struct md_op_data *op_data,
+ struct md_callback *cb_op,
+ __u64 hash_offset,
+ struct page **ppage)
{
int rc;
- EXP_CHECK_MD_OP(exp, readpage);
- EXP_MD_COUNTER_INCREMENT(exp, readpage);
- rc = MDP(exp->exp_obd, readpage)(exp, opdata, pages, request);
+ EXP_CHECK_MD_OP(exp, read_page);
+ EXP_MD_COUNTER_INCREMENT(exp, read_page);
+ rc = MDP(exp->exp_obd, read_page)(exp, op_data, cb_op, hash_offset,
+ ppage);
return rc;
}
@@ -1559,6 +1497,16 @@ static inline int md_free_lustre_md(struct obd_export *exp,
return MDP(exp->exp_obd, free_lustre_md)(exp, md);
}
+static inline int md_merge_attr(struct obd_export *exp,
+ const struct lmv_stripe_md *lsm,
+ struct cl_attr *attr,
+ ldlm_blocking_callback cb)
+{
+ EXP_CHECK_MD_OP(exp, merge_attr);
+ EXP_MD_COUNTER_INCREMENT(exp, merge_attr);
+ return MDP(exp->exp_obd, merge_attr)(exp, lsm, attr, cb);
+}
+
static inline int md_setxattr(struct obd_export *exp, const struct lu_fid *fid,
u64 valid, const char *name,
const char *input, int input_size,
@@ -1603,7 +1551,8 @@ static inline int md_clear_open_replay_data(struct obd_export *exp,
}
static inline int md_set_lock_data(struct obd_export *exp,
- __u64 *lockh, void *data, __u64 *bits)
+ const struct lustre_handle *lockh,
+ void *data, __u64 *bits)
{
EXP_CHECK_MD_OP(exp, set_lock_data);
EXP_MD_COUNTER_INCREMENT(exp, set_lock_data);
@@ -1674,6 +1623,19 @@ static inline int md_revalidate_lock(struct obd_export *exp,
return rc;
}
+static inline int md_get_fid_from_lsm(struct obd_export *exp,
+ const struct lmv_stripe_md *lsm,
+ const char *name, int namelen,
+ struct lu_fid *fid)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, get_fid_from_lsm);
+ EXP_MD_COUNTER_INCREMENT(exp, get_fid_from_lsm);
+ rc = MDP(exp->exp_obd, get_fid_from_lsm)(exp, lsm, name, namelen, fid);
+ return rc;
+}
+
/* OBD Metadata Support */
int obd_init_caches(void);
@@ -1682,16 +1644,6 @@ void obd_cleanup_caches(void);
/* support routines */
extern struct kmem_cache *obdo_cachep;
-static inline void obdo2fid(struct obdo *oa, struct lu_fid *fid)
-{
- /* something here */
-}
-
-static inline void fid2obdo(struct lu_fid *fid, struct obdo *oa)
-{
- /* something here */
-}
-
typedef int (*register_lwp_cb)(void *data);
struct lwp_register_item {
@@ -1710,6 +1662,9 @@ struct lwp_register_item {
extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c);
/* obd_mount.c */
+int lustre_unregister_fs(void);
+int lustre_register_fs(void);
+int lustre_check_exclusion(struct super_block *sb, char *svname);
/* sysctl.c */
int obd_sysctl_init(void);
@@ -1730,8 +1685,24 @@ void class_exit_uuidlist(void);
extern char obd_jobid_node[];
extern struct miscdevice obd_psdev;
extern spinlock_t obd_types_lock;
+int class_procfs_init(void);
+int class_procfs_clean(void);
/* prng.c */
#define ll_generate_random_uuid(uuid_out) cfs_get_random_bytes(uuid_out, sizeof(class_uuid_t))
+/* statfs_pack.c */
+struct kstatfs;
+void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
+void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
+
+/* root squash info */
+struct rw_semaphore;
+struct root_squash_info {
+ uid_t rsi_uid;
+ gid_t rsi_gid;
+ struct list_head rsi_nosquash_nids;
+ struct rw_semaphore rsi_sem;
+};
+
#endif /* __LINUX_OBD_CLASS_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 845e64a56c21..b346a7f10aa4 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -35,7 +35,7 @@
#include <linux/slab.h>
#include "../../include/linux/libcfs/libcfs.h"
-#include "linux/lustre_compat25.h"
+#include "lustre_compat.h"
#include "lprocfs_status.h"
/* global variables */
@@ -52,11 +52,9 @@ extern unsigned int at_max;
extern unsigned int at_history;
extern int at_early_margin;
extern int at_extra;
-extern unsigned int obd_sync_filter;
-extern unsigned int obd_max_dirty_pages;
-extern atomic_t obd_unstable_pages;
-extern atomic_t obd_dirty_pages;
-extern atomic_t obd_dirty_transit_pages;
+extern unsigned long obd_max_dirty_pages;
+extern atomic_long_t obd_dirty_pages;
+extern atomic_long_t obd_dirty_transit_pages;
extern char obd_jobid_var[];
/* Some hash init argument constants */
@@ -117,17 +115,17 @@ extern char obd_jobid_var[];
* running on a backup server. (If it's too low, import_select_connection
* will increase the timeout anyhow.)
*/
-#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout/20)
+#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout / 20)
/* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */
#define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \
INITIAL_CONNECT_TIMEOUT)
/* The min time a target should wait for clients to reconnect in recovery */
-#define OBD_RECOVERY_TIME_MIN (2*RECONNECT_DELAY_MAX)
+#define OBD_RECOVERY_TIME_MIN (2 * RECONNECT_DELAY_MAX)
#define OBD_IR_FACTOR_MIN 1
#define OBD_IR_FACTOR_MAX 10
-#define OBD_IR_FACTOR_DEFAULT (OBD_IR_FACTOR_MAX/2)
+#define OBD_IR_FACTOR_DEFAULT (OBD_IR_FACTOR_MAX / 2)
/* default timeout for the MGS to become IR_FULL */
-#define OBD_IR_MGS_TIMEOUT (4*obd_timeout)
+#define OBD_IR_MGS_TIMEOUT (4 * obd_timeout)
#define LONG_UNLINK 300 /* Unlink should happen before now */
/**
@@ -318,6 +316,10 @@ extern char obd_jobid_var[];
#define OBD_FAIL_LDLM_AGL_NOLOCK 0x31b
#define OBD_FAIL_LDLM_OST_LVB 0x31c
#define OBD_FAIL_LDLM_ENQUEUE_HANG 0x31d
+#define OBD_FAIL_LDLM_CP_CB_WAIT2 0x320
+#define OBD_FAIL_LDLM_CP_CB_WAIT3 0x321
+#define OBD_FAIL_LDLM_CP_CB_WAIT4 0x322
+#define OBD_FAIL_LDLM_CP_CB_WAIT5 0x323
/* LOCKLESS IO */
#define OBD_FAIL_LDLM_SET_CONTENTION 0x385
@@ -400,6 +402,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_MDC_GETATTR_ENQUEUE 0x803
#define OBD_FAIL_MDC_RPCS_SEM 0x804
#define OBD_FAIL_MDC_LIGHTWEIGHT 0x805
+#define OBD_FAIL_MDC_CLOSE 0x806
#define OBD_FAIL_MGS 0x900
#define OBD_FAIL_MGS_ALL_REQUEST_NET 0x901
@@ -455,6 +458,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_LOV_INIT 0x1403
#define OBD_FAIL_GLIMPSE_DELAY 0x1404
#define OBD_FAIL_LLITE_XATTR_ENOMEM 0x1405
+#define OBD_FAIL_GETATTR_DELAY 0x1409
#define OBD_FAIL_FID_INDIR 0x1501
#define OBD_FAIL_FID_INLMA 0x1502
@@ -474,11 +478,16 @@ extern char obd_jobid_var[];
#define OBD_FAIL_LFSCK_CRASH 0x160a
#define OBD_FAIL_LFSCK_NO_AUTO 0x160b
#define OBD_FAIL_LFSCK_NO_DOUBLESCAN 0x160c
+#define OBD_FAIL_LFSCK_INVALID_PFID 0x1619
+#define OBD_FAIL_LFSCK_BAD_NAME_HASH 0x1628
/* UPDATE */
#define OBD_FAIL_UPDATE_OBJ_NET 0x1700
#define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
+/* LMV */
+#define OBD_FAIL_UNKNOWN_LMV_STRIPE 0x1901
+
/* Assign references to moved code to reduce code changes */
#define OBD_FAIL_PRECHECK(id) CFS_FAIL_PRECHECK(id)
#define OBD_FAIL_CHECK(id) CFS_FAIL_CHECK(id)
@@ -520,7 +529,8 @@ do { \
POISON_PTR(ptr); \
} while (0)
-#define KEY_IS(str) \
- (keylen >= (sizeof(str)-1) && memcmp(key, str, (sizeof(str)-1)) == 0)
+#define KEY_IS(str) \
+ (keylen >= (sizeof(str) - 1) && \
+ memcmp(key, str, (sizeof(str) - 1)) == 0)
#endif
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index f4a70ebddeaf..e134ecd21bb2 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -90,6 +90,17 @@ static inline int extent_equal(struct interval_node_extent *e1,
return (e1->start == e2->start) && (e1->end == e2->end);
}
+static inline int extent_overlapped(struct interval_node_extent *e1,
+ struct interval_node_extent *e2)
+{
+ return (e1->start <= e2->end) && (e2->start <= e1->end);
+}
+
+static inline int node_equal(struct interval_node *n1, struct interval_node *n2)
+{
+ return extent_equal(&n1->in_extent, &n2->in_extent);
+}
+
static inline __u64 max_u64(__u64 x, __u64 y)
{
return x > y ? x : y;
@@ -262,7 +273,7 @@ struct interval_node *interval_insert(struct interval_node *node,
p = root;
while (*p) {
parent = *p;
- if (extent_equal(&parent->in_extent, &node->in_extent))
+ if (node_equal(parent, node))
return parent;
/* max_high field must be updated after each iteration */
@@ -463,3 +474,90 @@ color:
interval_erase_color(child, parent, root);
}
EXPORT_SYMBOL(interval_erase);
+
+static inline int interval_may_overlap(struct interval_node *node,
+ struct interval_node_extent *ext)
+{
+ return (ext->start <= node->in_max_high &&
+ ext->end >= interval_low(node));
+}
+
+/*
+ * This function finds all intervals that overlap interval ext,
+ * and calls func to handle resulted intervals one by one.
+ * in lustre, this function will find all conflicting locks in
+ * the granted queue and add these locks to the ast work list.
+ *
+ * {
+ * if (!node)
+ * return 0;
+ * if (ext->end < interval_low(node)) {
+ * interval_search(node->in_left, ext, func, data);
+ * } else if (interval_may_overlap(node, ext)) {
+ * if (extent_overlapped(ext, &node->in_extent))
+ * func(node, data);
+ * interval_search(node->in_left, ext, func, data);
+ * interval_search(node->in_right, ext, func, data);
+ * }
+ * return 0;
+ * }
+ *
+ */
+enum interval_iter interval_search(struct interval_node *node,
+ struct interval_node_extent *ext,
+ interval_callback_t func,
+ void *data)
+{
+ enum interval_iter rc = INTERVAL_ITER_CONT;
+ struct interval_node *parent;
+
+ LASSERT(ext);
+ LASSERT(func);
+
+ while (node) {
+ if (ext->end < interval_low(node)) {
+ if (node->in_left) {
+ node = node->in_left;
+ continue;
+ }
+ } else if (interval_may_overlap(node, ext)) {
+ if (extent_overlapped(ext, &node->in_extent)) {
+ rc = func(node, data);
+ if (rc == INTERVAL_ITER_STOP)
+ break;
+ }
+
+ if (node->in_left) {
+ node = node->in_left;
+ continue;
+ }
+ if (node->in_right) {
+ node = node->in_right;
+ continue;
+ }
+ }
+
+ parent = node->in_parent;
+ while (parent) {
+ if (node_is_left_child(node) &&
+ parent->in_right) {
+ /*
+ * If we ever got the left, it means that the
+ * parent met ext->end<interval_low(parent), or
+ * may_overlap(parent). If the former is true,
+ * we needn't go back. So stop early and check
+ * may_overlap(parent) after this loop.
+ */
+ node = parent->in_right;
+ break;
+ }
+ node = parent;
+ parent = parent->in_parent;
+ }
+ if (!parent || !interval_may_overlap(parent, ext))
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(interval_search);
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
index ea8840cb9056..3845f386f1db 100644
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -45,6 +45,8 @@
* being an atomic operation.
*/
struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
+ __acquires(&lock->l_lock)
+ __acquires(&lock->l_resource->lr_lock)
{
spin_lock(&lock->l_lock);
@@ -59,6 +61,8 @@ EXPORT_SYMBOL(lock_res_and_lock);
* Unlock a lock and its resource previously locked with lock_res_and_lock
*/
void unlock_res_and_lock(struct ldlm_lock *lock)
+ __releases(&lock->l_resource->lr_lock)
+ __releases(&lock->l_lock)
{
/* on server-side resource of lock doesn't change */
ldlm_clear_res_locked(lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index f5023d9b78f5..ecf472e4813d 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -221,7 +221,7 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
}
void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+ ldlm_policy_data_t *lpolicy)
{
memset(lpolicy, 0, sizeof(*lpolicy));
lpolicy->l_extent.start = wpolicy->l_extent.start;
@@ -230,7 +230,7 @@ void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
}
void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+ ldlm_wire_policy_data_t *wpolicy)
{
memset(wpolicy, 0, sizeof(*wpolicy));
wpolicy->l_extent.start = lpolicy->l_extent.start;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index d6b61bc39135..861f36f039b5 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -97,7 +97,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
list_del_init(&lock->l_res_link);
- if (flags == LDLM_FL_WAIT_NOREPROC && !ldlm_is_failed(lock)) {
+ if (flags == LDLM_FL_WAIT_NOREPROC) {
/* client side - set a flag to prevent sending a CANCEL */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
@@ -166,7 +166,7 @@ reprocess:
*/
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
ownlocks = tmp;
break;
@@ -182,7 +182,7 @@ reprocess:
*/
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
if (!ownlocks)
@@ -339,10 +339,10 @@ reprocess:
lock->l_granted_mode, &null_cbs,
NULL, 0, LVB_T_NONE);
lock_res_and_lock(req);
- if (!new2) {
+ if (IS_ERR(new2)) {
ldlm_flock_destroy(req, lock->l_granted_mode,
*flags);
- *err = -ENOLCK;
+ *err = PTR_ERR(new2);
return LDLM_ITER_STOP;
}
goto reprocess;
@@ -455,29 +455,22 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
enum ldlm_error err;
int rc = 0;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) {
+ lock_res_and_lock(lock);
+ lock->l_flags |= LDLM_FL_FAIL_LOC;
+ unlock_res_and_lock(lock);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT3, 4);
+ }
CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
flags, data, getlk);
- /* Import invalidation. We need to actually release the lock
- * references being held, so that it can go away. No point in
- * holding the lock even if app still believes it has it, since
- * server already dropped it anyway. Only for granted locks too.
- */
- if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
- (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
- if (lock->l_req_mode == lock->l_granted_mode &&
- lock->l_granted_mode != LCK_NL && !data)
- ldlm_lock_decref_internal(lock, lock->l_req_mode);
-
- /* Need to wake up the waiter if we were evicted */
- wake_up(&lock->l_waitq);
- return 0;
- }
-
LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
+ if (flags & LDLM_FL_FAILED)
+ goto granted;
+
+ if (!(flags & LDLM_FL_BLOCKED_MASK)) {
if (!data)
/* mds granted the lock in the reply */
goto granted;
@@ -514,12 +507,21 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
- if (ldlm_is_failed(lock)) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
- return -EIO;
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT4)) {
+ lock_res_and_lock(lock);
+ /* DEADLOCK is always set with CBPENDING */
+ lock->l_flags |= LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
+ unlock_res_and_lock(lock);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT4, 4);
+ }
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT5)) {
+ lock_res_and_lock(lock);
+ /* DEADLOCK is always set with CBPENDING */
+ lock->l_flags |= LDLM_FL_FAIL_LOC |
+ LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
+ unlock_res_and_lock(lock);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT5, 4);
}
-
- LDLM_DEBUG(lock, "client-side enqueue granted");
lock_res_and_lock(lock);
@@ -530,20 +532,59 @@ granted:
if (ldlm_is_destroyed(lock)) {
unlock_res_and_lock(lock);
LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- return 0;
+ /*
+ * An error is still to be returned, to propagate it up to
+ * ldlm_cli_enqueue_fini() caller.
+ */
+ return -EIO;
}
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
- list_del_init(&lock->l_res_link);
+ ldlm_resource_unlink_lock(lock);
+
+ /*
+ * Import invalidation. We need to actually release the lock
+ * references being held, so that it can go away. No point in
+ * holding the lock even if app still believes it has it, since
+ * server already dropped it anyway. Only for granted locks too.
+ */
+ /* Do the same for DEADLOCK'ed locks. */
+ if (ldlm_is_failed(lock) || ldlm_is_flock_deadlock(lock)) {
+ int mode;
+
+ if (flags & LDLM_FL_TEST_LOCK)
+ LASSERT(ldlm_is_test_lock(lock));
+
+ if (ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
+ mode = getlk->fl_type;
+ else
+ mode = lock->l_granted_mode;
+
+ if (ldlm_is_flock_deadlock(lock)) {
+ LDLM_DEBUG(lock, "client-side enqueue deadlock received");
+ rc = -EDEADLK;
+ }
+ ldlm_flock_destroy(lock, mode, LDLM_FL_WAIT_NOREPROC);
+ unlock_res_and_lock(lock);
+
+ /* Need to wake up the waiter if we were evicted */
+ wake_up(&lock->l_waitq);
+
+ /*
+ * An error is still to be returned, to propagate it up to
+ * ldlm_cli_enqueue_fini() caller.
+ */
+ return rc ? : -EIO;
+ }
+
+ LDLM_DEBUG(lock, "client-side enqueue granted");
- if (ldlm_is_flock_deadlock(lock)) {
- LDLM_DEBUG(lock, "client-side enqueue deadlock received");
- rc = -EDEADLK;
- } else if (flags & LDLM_FL_TEST_LOCK) {
+ if (flags & LDLM_FL_TEST_LOCK) {
/* fcntl(F_GETLK) request */
/* The old mode was saved in getlk->fl_type so that if the mode
* in the lock changes we can decref the appropriate refcount.
*/
+ LASSERT(ldlm_is_test_lock(lock));
ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
switch (lock->l_granted_mode) {
case LCK_PR:
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index e4cf65d2d3b1..5e82cfc245b2 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -100,9 +100,10 @@ enum {
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
enum ldlm_cancel_flags sync, int flags);
int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
- struct list_head *cancels, int count, int max,
- enum ldlm_cancel_flags cancel_flags, int flags);
-extern int ldlm_enqueue_min;
+ struct list_head *cancels, int count, int max,
+ enum ldlm_cancel_flags cancel_flags, int flags);
+extern unsigned int ldlm_enqueue_min;
+extern unsigned int ldlm_cancel_unused_locks_before_replay;
/* ldlm_resource.c */
int ldlm_resource_putref_locked(struct ldlm_resource *res);
@@ -200,8 +201,7 @@ ldlm_interval_extent(struct ldlm_interval *node)
LASSERT(!list_empty(&node->li_group));
- lock = list_entry(node->li_group.next, struct ldlm_lock,
- l_sl_policy);
+ lock = list_entry(node->li_group.next, struct ldlm_lock, l_sl_policy);
return &lock->l_policy_data.l_extent;
}
@@ -302,7 +302,7 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
lock_res_and_lock(lock);
if ((lock->l_req_mode == lock->l_granted_mode) &&
- !ldlm_is_cp_reqd(lock))
+ !ldlm_is_cp_reqd(lock))
ret = 1;
else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
ret = 1;
@@ -326,13 +326,13 @@ void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
ldlm_wire_policy_data_t *wpolicy);
void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
+ ldlm_policy_data_t *lpolicy);
void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
+ ldlm_wire_policy_data_t *wpolicy);
void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
+ ldlm_policy_data_t *lpolicy);
void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
+ ldlm_policy_data_t *lpolicy);
void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
ldlm_wire_policy_data_t *wpolicy);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 7c832aae7d5e..153e990c494e 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -82,7 +82,7 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
if (priority) {
list_del(&item->oic_item);
list_add(&item->oic_item,
- &imp->imp_conn_list);
+ &imp->imp_conn_list);
item->oic_last_attempt = 0;
}
CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n",
@@ -102,7 +102,7 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
list_add(&imp_conn->oic_item, &imp->imp_conn_list);
else
list_add_tail(&imp_conn->oic_item,
- &imp->imp_conn_list);
+ &imp->imp_conn_list);
CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
imp, imp->imp_obd->obd_name, uuid->uuid,
(priority ? "head" : "tail"));
@@ -299,12 +299,14 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
sizeof(server_uuid)));
- cli->cl_dirty = 0;
+ cli->cl_dirty_pages = 0;
cli->cl_avail_grant = 0;
- /* FIXME: Should limit this for the sum of all cl_dirty_max. */
- cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
- if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
- cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
+ /* FIXME: Should limit this for the sum of all cl_dirty_max_pages. */
+ /*
+ * cl_dirty_max_pages may be changed at connect time in
+ * ptlrpc_connect_interpret().
+ */
+ client_adjust_max_dirty(cli);
INIT_LIST_HEAD(&cli->cl_cache_waiters);
INIT_LIST_HEAD(&cli->cl_loi_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -326,11 +328,11 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
/* lru for osc. */
INIT_LIST_HEAD(&cli->cl_lru_osc);
atomic_set(&cli->cl_lru_shrinkers, 0);
- atomic_set(&cli->cl_lru_busy, 0);
- atomic_set(&cli->cl_lru_in_list, 0);
+ atomic_long_set(&cli->cl_lru_busy, 0);
+ atomic_long_set(&cli->cl_lru_in_list, 0);
INIT_LIST_HEAD(&cli->cl_lru_list);
spin_lock_init(&cli->cl_lru_list_lock);
- atomic_set(&cli->cl_unstable_count, 0);
+ atomic_long_set(&cli->cl_unstable_count, 0);
init_waitqueue_head(&cli->cl_destroy_waitq);
atomic_set(&cli->cl_destroy_in_flight, 0);
@@ -360,7 +362,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_chunkbits = PAGE_SHIFT;
if (!strcmp(name, LUSTRE_MDC_NAME)) {
- cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
+ cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
cli->cl_max_rpcs_in_flight = 2;
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
@@ -368,7 +370,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
cli->cl_max_rpcs_in_flight = 4;
} else {
- cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
+ cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
}
rc = ldlm_get_ref();
if (rc) {
@@ -534,7 +536,7 @@ int client_disconnect_export(struct obd_export *exp)
imp = cli->cl_import;
down_write(&cli->cl_sem);
- CDEBUG(D_INFO, "disconnect %s - %d\n", obd->obd_name,
+ CDEBUG(D_INFO, "disconnect %s - %zu\n", obd->obd_name,
cli->cl_conn_count);
if (!cli->cl_conn_count) {
@@ -690,7 +692,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
if (rs->rs_transno > exp->exp_last_committed) {
/* not committed already */
list_add_tail(&rs->rs_obd_list,
- &exp->exp_uncommitted_replies);
+ &exp->exp_uncommitted_replies);
}
spin_unlock(&exp->exp_uncommitted_replies_lock);
@@ -795,7 +797,7 @@ void ldlm_dump_export_locks(struct obd_export *exp)
CERROR("dumping locks for export %p,ignore if the unmount doesn't hang\n",
exp);
list_for_each_entry(lock, &exp->exp_locks_list,
- l_exp_refs_link)
+ l_exp_refs_link)
LDLM_ERROR(lock, "lock:");
}
spin_unlock(&exp->exp_locks_list_guard);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index a5993f745ebe..3c48b4fb96f1 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -481,8 +481,8 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
unlock_res_and_lock(lock);
newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
- if (!newres)
- return -ENOMEM;
+ if (IS_ERR(newres))
+ return PTR_ERR(newres);
lu_ref_add(&newres->lr_reference, "lock", lock);
/*
@@ -542,7 +542,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
LASSERT(handle);
- lock = class_handle2object(handle->cookie);
+ lock = class_handle2object(handle->cookie, NULL);
if (!lock)
return NULL;
@@ -937,7 +937,7 @@ static void search_granted_lock(struct list_head *queue,
/* go to next policy group within mode group */
tmp = policy_end->l_res_link.next;
lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ l_res_link);
} /* loop over policy groups within the mode group */
/* insert point is last lock of the mode group,
@@ -1028,15 +1028,28 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
check_res_locked(res);
lock->l_granted_mode = lock->l_req_mode;
+
+ if (work_list && lock->l_completion_ast)
+ ldlm_add_ast_work_item(lock, NULL, work_list);
+
if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
ldlm_grant_lock_with_skiplist(lock);
else if (res->lr_type == LDLM_EXTENT)
ldlm_extent_add_lock(res, lock);
- else
+ else if (res->lr_type == LDLM_FLOCK) {
+ /*
+ * We should not add locks to granted list in the following cases:
+ * - this is an UNLOCK but not a real lock;
+ * - this is a TEST lock;
+ * - this is a F_CANCELLK lock (async flock has req_mode == 0)
+ * - this is a deadlock (flock cannot be granted)
+ */
+ if (!lock->l_req_mode || lock->l_req_mode == LCK_NL ||
+ ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
+ return;
ldlm_resource_add_lock(res, &res->lr_granted, lock);
-
- if (work_list && lock->l_completion_ast)
- ldlm_add_ast_work_item(lock, NULL, work_list);
+ } else
+ LBUG();
ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
}
@@ -1103,7 +1116,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
* of bits.
*/
if (lock->l_resource->lr_type == LDLM_IBITS &&
- ((lock->l_policy_data.l_inodebits.bits &
+ ((lock->l_policy_data.l_inodebits.bits &
policy->l_inodebits.bits) !=
policy->l_inodebits.bits))
continue;
@@ -1214,7 +1227,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
}
res = ldlm_resource_get(ns, NULL, res_id, type, 0);
- if (!res) {
+ if (IS_ERR(res)) {
LASSERT(!old_lock);
return 0;
}
@@ -1363,12 +1376,12 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
if (size == sizeof(struct ost_lvb)) {
if (loc == RCL_CLIENT)
lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb);
+ &RMF_DLM_LVB,
+ lustre_swab_ost_lvb);
else
lvb = req_capsule_server_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb);
+ &RMF_DLM_LVB,
+ lustre_swab_ost_lvb);
if (unlikely(!lvb)) {
LDLM_ERROR(lock, "no LVB");
return -EPROTO;
@@ -1380,8 +1393,8 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
if (loc == RCL_CLIENT)
lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb_v1);
+ &RMF_DLM_LVB,
+ lustre_swab_ost_lvb_v1);
else
lvb = req_capsule_server_sized_swab_get(pill,
&RMF_DLM_LVB, size,
@@ -1405,12 +1418,12 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
if (size == sizeof(struct lquota_lvb)) {
if (loc == RCL_CLIENT)
lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_lquota_lvb);
+ &RMF_DLM_LVB,
+ lustre_swab_lquota_lvb);
else
lvb = req_capsule_server_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_lquota_lvb);
+ &RMF_DLM_LVB,
+ lustre_swab_lquota_lvb);
if (unlikely(!lvb)) {
LDLM_ERROR(lock, "no LVB");
return -EPROTO;
@@ -1462,15 +1475,15 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
{
struct ldlm_lock *lock;
struct ldlm_resource *res;
+ int rc;
res = ldlm_resource_get(ns, NULL, res_id, type, 1);
- if (!res)
- return NULL;
+ if (IS_ERR(res))
+ return ERR_CAST(res);
lock = ldlm_lock_new(res);
-
if (!lock)
- return NULL;
+ return ERR_PTR(-ENOMEM);
lock->l_req_mode = mode;
lock->l_ast_data = data;
@@ -1484,27 +1497,33 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
lock->l_tree_node = NULL;
/* if this is the extent lock, allocate the interval tree node */
if (type == LDLM_EXTENT) {
- if (!ldlm_interval_alloc(lock))
+ if (!ldlm_interval_alloc(lock)) {
+ rc = -ENOMEM;
goto out;
+ }
}
if (lvb_len) {
lock->l_lvb_len = lvb_len;
lock->l_lvb_data = kzalloc(lvb_len, GFP_NOFS);
- if (!lock->l_lvb_data)
+ if (!lock->l_lvb_data) {
+ rc = -ENOMEM;
goto out;
+ }
}
lock->l_lvb_type = lvb_type;
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK)) {
+ rc = -ENOENT;
goto out;
+ }
return lock;
out:
ldlm_lock_destroy(lock);
LDLM_LOCK_RELEASE(lock);
- return NULL;
+ return ERR_PTR(rc);
}
/**
@@ -1522,16 +1541,13 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
struct ldlm_lock *lock = *lockp;
struct ldlm_resource *res = lock->l_resource;
- lock->l_last_activity = ktime_get_real_seconds();
-
lock_res_and_lock(lock);
if (lock->l_req_mode == lock->l_granted_mode) {
/* The server returned a blocked lock, but it was granted
* before we got a chance to actually enqueue it. We don't
* need to do anything else.
*/
- *flags &= ~(LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
+ *flags &= ~LDLM_FL_BLOCKED_MASK;
goto out;
}
@@ -1546,6 +1562,8 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
*/
if (*flags & LDLM_FL_AST_DISCARD_DATA)
ldlm_set_ast_discard_data(lock);
+ if (*flags & LDLM_FL_TEST_LOCK)
+ ldlm_set_test_lock(lock);
/*
* This distinction between local lock trees is very important; a client
@@ -1688,7 +1706,7 @@ static int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
return -ENOENT;
gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
- gl_list);
+ gl_list);
list_del_init(&gl_work->gl_list);
lock = gl_work->gl_lock;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 821939ff2e6b..fde697ebaadc 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -559,8 +559,11 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case LDLM_BL_CALLBACK:
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET)) {
+ if (cfs_fail_err)
+ ldlm_callback_reply(req, -(int)cfs_fail_err);
return 0;
+ }
break;
case LDLM_CP_CALLBACK:
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
@@ -706,12 +709,12 @@ static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
if (!list_empty(&blp->blp_list) &&
(list_empty(&blp->blp_prio_list) || num_bl == 0))
blwi = list_entry(blp->blp_list.next,
- struct ldlm_bl_work_item, blwi_entry);
+ struct ldlm_bl_work_item, blwi_entry);
else
if (!list_empty(&blp->blp_prio_list))
blwi = list_entry(blp->blp_prio_list.next,
- struct ldlm_bl_work_item,
- blwi_entry);
+ struct ldlm_bl_work_item,
+ blwi_entry);
if (blwi) {
if (++num_bl >= atomic_read(&blp->blp_num_threads))
@@ -741,7 +744,7 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
init_completion(&bltd.bltd_comp);
bltd.bltd_num = atomic_read(&blp->blp_num_threads);
snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
- "ldlm_bl_%02d", bltd.bltd_num);
+ "ldlm_bl_%02d", bltd.bltd_num);
task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
if (IS_ERR(task)) {
CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
@@ -786,8 +789,8 @@ static int ldlm_bl_thread_main(void *arg)
if (!blwi) {
atomic_dec(&blp->blp_busy_threads);
l_wait_event_exclusive(blp->blp_waitq,
- (blwi = ldlm_bl_get_work(blp)),
- &lwi);
+ (blwi = ldlm_bl_get_work(blp)),
+ &lwi);
busy = atomic_inc_return(&blp->blp_busy_threads);
} else {
busy = atomic_read(&blp->blp_busy_threads);
@@ -874,8 +877,6 @@ void ldlm_put_ref(void)
}
EXPORT_SYMBOL(ldlm_put_ref);
-extern unsigned int ldlm_cancel_unused_locks_before_replay;
-
static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
@@ -1094,16 +1095,17 @@ int ldlm_init(void)
return -ENOMEM;
ldlm_lock_slab = kmem_cache_create("ldlm_locks",
- sizeof(struct ldlm_lock), 0,
- SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL);
+ sizeof(struct ldlm_lock), 0,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_DESTROY_BY_RCU, NULL);
if (!ldlm_lock_slab) {
kmem_cache_destroy(ldlm_resource_slab);
return -ENOMEM;
}
ldlm_interval_slab = kmem_cache_create("interval_node",
- sizeof(struct ldlm_interval),
- 0, SLAB_HWCACHE_ALIGN, NULL);
+ sizeof(struct ldlm_interval),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!ldlm_interval_slab) {
kmem_cache_destroy(ldlm_resource_slab);
kmem_cache_destroy(ldlm_lock_slab);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 657ed4012776..9a1136e32dfc 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -357,38 +357,40 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
int count;
recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
- if (recalc_interval_sec <= 0)
- goto recalc;
-
- spin_lock(&pl->pl_lock);
if (recalc_interval_sec > 0) {
- /*
- * Update pool statistics every 1s.
- */
- ldlm_pool_recalc_stats(pl);
-
- /*
- * Zero out all rates and speed for the last period.
- */
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
+ spin_lock(&pl->pl_lock);
+ recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
+
+ if (recalc_interval_sec > 0) {
+ /*
+ * Update pool statistics every 1s.
+ */
+ ldlm_pool_recalc_stats(pl);
+
+ /*
+ * Zero out all rates and speed for the last period.
+ */
+ atomic_set(&pl->pl_grant_rate, 0);
+ atomic_set(&pl->pl_cancel_rate, 0);
+ }
+ spin_unlock(&pl->pl_lock);
}
- spin_unlock(&pl->pl_lock);
- recalc:
if (pl->pl_ops->po_recalc) {
count = pl->pl_ops->po_recalc(pl);
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
count);
}
+
recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
pl->pl_recalc_period;
if (recalc_interval_sec <= 0) {
+ /* DEBUG: should be re-removed after LU-4536 is fixed */
+ CDEBUG(D_DLMTRACE, "%s: Negative interval(%ld), too short period(%ld)\n",
+ pl->pl_name, (long)recalc_interval_sec,
+ (long)pl->pl_recalc_period);
+
/* Prevent too frequent recalculation. */
- CDEBUG(D_DLMTRACE,
- "Negative interval(%d), too short period(%lld)",
- recalc_interval_sec,
- (s64)pl->pl_recalc_period);
recalc_interval_sec = 1;
}
@@ -792,7 +794,8 @@ static struct completion ldlm_pools_comp;
*/
static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
{
- int total = 0, nr_ns;
+ unsigned long total = 0;
+ int nr_ns;
struct ldlm_namespace *ns;
struct ldlm_namespace *ns_old = NULL; /* loop detection */
void *cookie;
@@ -995,7 +998,7 @@ static int ldlm_pools_thread_main(void *arg)
wake_up(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
- "ldlm_poold", current_pid());
+ "ldlm_poold", current_pid());
while (1) {
struct l_wait_info lwi;
@@ -1025,7 +1028,7 @@ static int ldlm_pools_thread_main(void *arg)
wake_up(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
- "ldlm_poold", current_pid());
+ "ldlm_poold", current_pid());
complete_and_exit(&ldlm_pools_comp, 0);
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index af487f9937f4..35ba6f14d95f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -63,8 +63,8 @@
#include "ldlm_internal.h"
-int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
-module_param(ldlm_enqueue_min, int, 0644);
+unsigned int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
+module_param(ldlm_enqueue_min, uint, 0644);
MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
/* in client side, whether the cached locks will be canceled before replay */
@@ -123,44 +123,56 @@ static int ldlm_expired_completion_wait(void *data)
return 0;
}
+/**
+ * Calculate the Completion timeout (covering enqueue, BL AST, data flush,
+ * lock cancel, and their replies). Used for lock completion timeout on the
+ * client side.
+ *
+ * \param[in] lock lock which is waiting the completion callback
+ *
+ * \retval timeout in seconds to wait for the server reply
+ */
/* We use the same basis for both server side and client side functions
* from a single node.
*/
-static int ldlm_get_enq_timeout(struct ldlm_lock *lock)
+static unsigned int ldlm_cp_timeout(struct ldlm_lock *lock)
{
- int timeout = at_get(ldlm_lock_to_ns_at(lock));
+ unsigned int timeout;
if (AT_OFF)
- return obd_timeout / 2;
- /* Since these are non-updating timeouts, we should be conservative.
- * It would be nice to have some kind of "early reply" mechanism for
- * lock callbacks too...
+ return obd_timeout;
+
+ /*
+ * Wait a long time for enqueue - server may have to callback a
+ * lock from another client. Server will evict the other client if it
+ * doesn't respond reasonably, and then give us the lock.
*/
- timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
- return max(timeout, ldlm_enqueue_min);
+ timeout = at_get(ldlm_lock_to_ns_at(lock));
+ return max(3 * timeout, ldlm_enqueue_min);
}
/**
* Helper function for ldlm_completion_ast(), updating timings when lock is
* actually granted.
*/
-static int ldlm_completion_tail(struct ldlm_lock *lock)
+static int ldlm_completion_tail(struct ldlm_lock *lock, void *data)
{
long delay;
- int result;
+ int result = 0;
if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue: destroyed");
result = -EIO;
+ } else if (!data) {
+ LDLM_DEBUG(lock, "client-side enqueue: granted");
} else {
+ /* Take into AT only CP RPC, not immediately granted locks */
delay = ktime_get_real_seconds() - lock->l_last_activity;
LDLM_DEBUG(lock, "client-side enqueue: granted after %lds",
delay);
/* Update our time estimate */
- at_measured(ldlm_lock_to_ns_at(lock),
- delay);
- result = 0;
+ at_measured(ldlm_lock_to_ns_at(lock), delay);
}
return result;
}
@@ -177,10 +189,9 @@ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
return 0;
}
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
+ if (!(flags & LDLM_FL_BLOCKED_MASK)) {
wake_up(&lock->l_waitq);
- return ldlm_completion_tail(lock);
+ return ldlm_completion_tail(lock, data);
}
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, going forward");
@@ -224,8 +235,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
goto noreproc;
}
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
+ if (!(flags & LDLM_FL_BLOCKED_MASK)) {
wake_up(&lock->l_waitq);
return 0;
}
@@ -240,13 +250,10 @@ noreproc:
if (obd)
imp = obd->u.cli.cl_import;
- /* Wait a long time for enqueue - server may have to callback a
- * lock from another client. Server will evict the other client if it
- * doesn't respond reasonably, and then give us the lock.
- */
- timeout = ldlm_get_enq_timeout(lock) * 2;
+ timeout = ldlm_cp_timeout(lock);
lwd.lwd_lock = lock;
+ lock->l_last_activity = ktime_get_real_seconds();
if (ldlm_is_no_timeout(lock)) {
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
@@ -279,7 +286,7 @@ noreproc:
return rc;
}
- return ldlm_completion_tail(lock);
+ return ldlm_completion_tail(lock, data);
}
EXPORT_SYMBOL(ldlm_completion_ast);
@@ -309,8 +316,6 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
else
LDLM_DEBUG(lock, "lock was granted or failed in race");
- ldlm_lock_decref_internal(lock, mode);
-
/* XXX - HACK because we shouldn't call ldlm_lock_destroy()
* from llite/file.c/ll_file_flock().
*/
@@ -321,9 +326,14 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
*/
if (lock->l_resource->lr_type == LDLM_FLOCK) {
lock_res_and_lock(lock);
- ldlm_resource_unlink_lock(lock);
- ldlm_lock_destroy_nolock(lock);
+ if (!ldlm_is_destroyed(lock)) {
+ ldlm_resource_unlink_lock(lock);
+ ldlm_lock_decref_internal_nolock(lock, mode);
+ ldlm_lock_destroy_nolock(lock);
+ }
unlock_res_and_lock(lock);
+ } else {
+ ldlm_lock_decref_internal(lock, mode);
}
}
@@ -418,11 +428,6 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
*flags = ldlm_flags_from_wire(reply->lock_flags);
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
LDLM_FL_INHERIT_MASK);
- /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
- * to wait with no timeout as well
- */
- lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_FL_NO_TIMEOUT);
unlock_res_and_lock(lock);
CDEBUG(D_INFO, "local: %p, remote cookie: %#llx, flags: 0x%llx\n",
@@ -556,7 +561,7 @@ static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
enum req_location loc,
int off)
{
- int size = req_capsule_msg_size(pill, loc);
+ u32 size = req_capsule_msg_size(pill, loc);
return ldlm_req_handles_avail(size, off);
}
@@ -565,7 +570,7 @@ static inline int ldlm_format_handles_avail(struct obd_import *imp,
const struct req_format *fmt,
enum req_location loc, int off)
{
- int size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
+ u32 size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
return ldlm_req_handles_avail(size, off);
}
@@ -696,8 +701,8 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
einfo->ei_mode, &cbs, einfo->ei_cbdata,
lvb_len, lvb_type);
- if (!lock)
- return -ENOMEM;
+ if (IS_ERR(lock))
+ return PTR_ERR(lock);
/* for the local lock, add the reference */
ldlm_lock_addref_internal(lock, einfo->ei_mode);
ldlm_lock2handle(lock, lockh);
@@ -719,6 +724,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lock->l_export = NULL;
lock->l_blocking_ast = einfo->ei_cb_bl;
lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL));
+ lock->l_last_activity = ktime_get_real_seconds();
/* lock not sent to server yet */
@@ -819,7 +825,7 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
lock_res_and_lock(lock);
ldlm_set_cbpending(lock);
local_only = !!(lock->l_flags &
- (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
+ (LDLM_FL_LOCAL_ONLY | LDLM_FL_CANCEL_ON_BLOCK));
ldlm_cancel_callback(lock);
rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING;
unlock_res_and_lock(lock);
@@ -1180,8 +1186,7 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
slv = ldlm_pool_get_slv(pl);
lvf = ldlm_pool_get_lvf(pl);
- la = cfs_duration_sec(cfs_time_sub(cur,
- lock->l_last_used));
+ la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used));
lv = lvf * la * unused;
/* Inform pool about current CLV to see it via debugfs. */
@@ -1193,9 +1198,6 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
if (slv == 0 || lv < slv)
return LDLM_POLICY_KEEP_LOCK;
- if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
- return LDLM_POLICY_KEEP_LOCK;
-
return LDLM_POLICY_CANCEL_LOCK;
}
@@ -1239,9 +1241,6 @@ static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
cfs_time_add(lock->l_last_used, ns->ns_max_age)))
return LDLM_POLICY_KEEP_LOCK;
- if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
- return LDLM_POLICY_KEEP_LOCK;
-
return LDLM_POLICY_CANCEL_LOCK;
}
@@ -1374,7 +1373,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
break;
list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
- l_lru) {
+ l_lru) {
/* No locks which got blocking requests. */
LASSERT(!ldlm_is_bl_ast(lock));
@@ -1413,7 +1412,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
* That is, for shrinker policy we drop only
* old locks, but additionally choose them by
* their weight. Big extent locks will stay in
- * the cache. */
+ * the cache.
+ */
result = pf(ns, lock, unused, added, count);
if (result == LDLM_POLICY_KEEP_LOCK) {
lu_ref_del(&lock->l_reference,
@@ -1610,8 +1610,7 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count,
*/
while (count > 0) {
LASSERT(!list_empty(cancels));
- lock = list_entry(cancels->next, struct ldlm_lock,
- l_bl_ast);
+ lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast);
LASSERT(lock->l_conn_export);
if (exp_connect_cancelset(lock->l_conn_export)) {
@@ -1660,7 +1659,7 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
int rc;
res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (!res) {
+ if (IS_ERR(res)) {
/* This is not a problem. */
CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]);
return 0;
@@ -1704,7 +1703,8 @@ static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs,
* that have 0 readers/writers.
*
* If flags & LCF_LOCAL, throw the locks away without trying
- * to notify the server. */
+ * to notify the server.
+ */
int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
enum ldlm_cancel_flags flags, void *opaque)
@@ -1811,13 +1811,10 @@ int ldlm_resource_iterate(struct ldlm_namespace *ns,
struct ldlm_resource *res;
int rc;
- if (!ns) {
- CERROR("must pass in namespace\n");
- LBUG();
- }
+ LASSERTF(ns, "must pass in namespace\n");
res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (!res)
+ if (IS_ERR(res))
return 0;
LDLM_RESOURCE_ADDREF(res);
@@ -1843,7 +1840,7 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
* bug 17614: locks being actively cancelled. Get a reference
* on a lock so that it does not disappear under us (e.g. due to cancel)
*/
- if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
+ if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCELING))) {
list_add(&lock->l_pending_chain, list);
LDLM_LOCK_GET(lock);
}
@@ -2013,7 +2010,7 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
- canceled, ldlm_ns_name(ns));
+ canceled, ldlm_ns_name(ns));
}
int ldlm_replay_locks(struct obd_import *imp)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 51a28d96af39..a09c25aea698 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -449,8 +449,8 @@ static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
const void *key, unsigned mask)
{
const struct ldlm_res_id *id = key;
- unsigned val = 0;
- unsigned i;
+ unsigned int val = 0;
+ unsigned int i;
for (i = 0; i < RES_NAME_SIZE; i++)
val += id->name[i];
@@ -561,9 +561,9 @@ static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
struct ldlm_ns_hash_def {
enum ldlm_ns_type nsd_type;
/** hash bucket bits */
- unsigned nsd_bkt_bits;
+ unsigned int nsd_bkt_bits;
/** hash bits */
- unsigned nsd_all_bits;
+ unsigned int nsd_all_bits;
/** hash operations */
struct cfs_hash_ops *nsd_hops;
};
@@ -758,8 +758,7 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
*/
lock_res(res);
list_for_each(tmp, q) {
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
if (ldlm_is_cleaned(lock)) {
lock = NULL;
continue;
@@ -793,8 +792,14 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
*/
unlock_res(res);
LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
+ if (lock->l_flags & LDLM_FL_FAIL_LOC) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(4));
+ set_current_state(TASK_RUNNING);
+ }
if (lock->l_completion_ast)
- lock->l_completion_ast(lock, 0, NULL);
+ lock->l_completion_ast(lock, LDLM_FL_FAILED,
+ NULL);
LDLM_LOCK_RELEASE(lock);
continue;
}
@@ -875,7 +880,8 @@ static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
force_wait:
if (force)
- lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
+ lwi = LWI_TIMEOUT(msecs_to_jiffies(obd_timeout *
+ MSEC_PER_SEC) / 4, NULL, NULL);
rc = l_wait_event(ns->ns_waitq,
atomic_read(&ns->ns_bref) == 0, &lwi);
@@ -1082,10 +1088,11 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
int create)
{
struct hlist_node *hnode;
- struct ldlm_resource *res;
+ struct ldlm_resource *res = NULL;
struct cfs_hash_bd bd;
__u64 version;
int ns_refcount = 0;
+ int rc;
LASSERT(!parent);
LASSERT(ns->ns_rs_hash);
@@ -1095,31 +1102,20 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
if (hnode) {
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- /* Synchronize with regard to resource creation. */
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- mutex_lock(&res->lr_lvb_mutex);
- mutex_unlock(&res->lr_lvb_mutex);
- }
-
- if (unlikely(res->lr_lvb_len < 0)) {
- ldlm_resource_putref(res);
- res = NULL;
- }
- return res;
+ goto lvbo_init;
}
version = cfs_hash_bd_version_get(&bd);
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
if (create == 0)
- return NULL;
+ return ERR_PTR(-ENOENT);
LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
"type: %d\n", type);
res = ldlm_resource_new();
if (!res)
- return NULL;
+ return ERR_PTR(-ENOMEM);
res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
res->lr_name = *name;
@@ -1137,7 +1133,7 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
/* We have taken lr_lvb_mutex. Drop it. */
mutex_unlock(&res->lr_lvb_mutex);
kmem_cache_free(ldlm_resource_slab, res);
-
+lvbo_init:
res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
/* Synchronize with regard to resource creation. */
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
@@ -1146,8 +1142,9 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
}
if (unlikely(res->lr_lvb_len < 0)) {
+ rc = res->lr_lvb_len;
ldlm_resource_putref(res);
- res = NULL;
+ res = ERR_PTR(rc);
}
return res;
}
@@ -1158,8 +1155,6 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- int rc;
-
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
rc = ns->ns_lvbo->lvbo_init(res);
if (rc < 0) {
@@ -1169,7 +1164,7 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
res->lr_lvb_len = rc;
mutex_unlock(&res->lr_lvb_mutex);
ldlm_resource_putref(res);
- return NULL;
+ return ERR_PTR(rc);
}
}
@@ -1386,7 +1381,7 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
if (!list_empty(&res->lr_granted)) {
CDEBUG(level, "Granted locks (in reverse order):\n");
list_for_each_entry_reverse(lock, &res->lr_granted,
- l_res_link) {
+ l_res_link) {
LDLM_DEBUG_LIMIT(level, lock, "###");
if (!(level & D_CANTMASK) &&
++granted > ldlm_dump_granted_max) {
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index 2cbb1b80bd41..1ac0940bd8df 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_LUSTRE_FS) += lustre.o
lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
- rw.o namei.o symlink.o llite_mmap.o \
+ rw.o namei.o symlink.o llite_mmap.o range_lock.o \
xattr.o xattr_cache.o rw26.o super25.o statahead.o \
glimpse.o lcommon_cl.o lcommon_misc.o \
vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 463b1a360733..0e45d8fc4d7c 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -37,7 +37,6 @@
#define DEBUG_SUBSYSTEM S_LLITE
#include "../include/obd_support.h"
-#include "../include/lustre_lite.h"
#include "../include/lustre/lustre_idl.h"
#include "../include/lustre_dlm.h"
@@ -102,39 +101,6 @@ static int ll_dcompare(const struct dentry *dentry,
return 0;
}
-static inline int return_if_equal(struct ldlm_lock *lock, void *data)
-{
- return (ldlm_is_canceling(lock) && ldlm_is_discard_data(lock)) ?
- LDLM_ITER_CONTINUE : LDLM_ITER_STOP;
-}
-
-/* find any ldlm lock of the inode in mdc and lov
- * return 0 not find
- * 1 find one
- * < 0 error
- */
-static int find_cbdata(struct inode *inode)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct lov_stripe_md *lsm;
- int rc = 0;
-
- LASSERT(inode);
- rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
- return_if_equal, NULL);
- if (rc != 0)
- return rc;
-
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm)
- return rc;
-
- rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL);
- ccc_inode_lsm_put(inode, lsm);
-
- return rc;
-}
-
/**
* Called when last reference to a dentry is dropped and dcache wants to know
* whether or not it should cache it:
@@ -155,19 +121,6 @@ static int ll_ddelete(const struct dentry *de)
/* kernel >= 2.6.38 last refcount is decreased after this function. */
LASSERT(d_count(de) == 1);
- /* Disable this piece of code temporarily because this is called
- * inside dcache_lock so it's not appropriate to do lots of work
- * here. ATTENTION: Before this piece of code enabling, LU-2487 must be
- * resolved.
- */
-#if 0
- /* if not ldlm lock for this inode, set i_nlink to 0 so that
- * this inode can be recycled later b=20433
- */
- if (d_really_is_positive(de) && !find_cbdata(d_inode(de)))
- clear_nlink(d_inode(de));
-#endif
-
if (d_lustre_invalid((struct dentry *)de))
return 1;
return 0;
@@ -325,14 +278,13 @@ static int ll_revalidate_dentry(struct dentry *dentry,
if (lookup_flags & (LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE))
return 1;
- if (d_need_statahead(dir, dentry) <= 0)
+ if (!dentry_may_statahead(dir, dentry))
return 1;
if (lookup_flags & LOOKUP_RCU)
return -ECHILD;
- do_statahead_enter(dir, &dentry, !d_inode(dentry));
- ll_statahead_mark(dir, dentry);
+ ll_statahead(dir, &dentry, !d_inode(dentry));
return 1;
}
@@ -347,18 +299,9 @@ static int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
return ll_revalidate_dentry(dentry, flags);
}
-static void ll_d_iput(struct dentry *de, struct inode *inode)
-{
- LASSERT(inode);
- if (!find_cbdata(inode))
- clear_nlink(inode);
- iput(inode);
-}
-
const struct dentry_operations ll_d_ops = {
.d_revalidate = ll_revalidate_nd,
.d_release = ll_release,
.d_delete = ll_ddelete,
- .d_iput = ll_d_iput,
.d_compare = ll_dcompare,
};
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 5b381779c827..7f32a539d260 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -46,9 +46,8 @@
#include "../include/obd_support.h"
#include "../include/obd_class.h"
+#include "../include/lustre/lustre_ioctl.h"
#include "../include/lustre_lib.h"
-#include "../include/lustre/lustre_idl.h"
-#include "../include/lustre_lite.h"
#include "../include/lustre_dlm.h"
#include "../include/lustre_fid.h"
#include "../include/lustre_kernelcomm.h"
@@ -134,111 +133,35 @@
* for this integrated page will be adjusted. See lmv_adjust_dirpages().
*
*/
-
-/* returns the page unlocked, but with a reference */
-static int ll_dir_filler(void *_hash, struct page *page0)
+struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data,
+ __u64 offset)
{
- struct inode *inode = page0->mapping->host;
- int hash64 = ll_i2sbi(inode)->ll_flags & LL_SBI_64BIT_HASH;
- struct obd_export *exp = ll_i2sbi(inode)->ll_md_exp;
- struct ptlrpc_request *request;
- struct mdt_body *body;
- struct md_op_data *op_data;
- __u64 hash = *((__u64 *)_hash);
- struct page **page_pool;
+ struct md_callback cb_op;
struct page *page;
- struct lu_dirpage *dp;
- int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
- int nrdpgs = 0; /* number of pages read actually */
- int npages;
- int i;
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) hash %llu\n",
- PFID(ll_inode2fid(inode)), inode, hash);
-
- LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
-
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
- if (page_pool) {
- page_pool[0] = page0;
- } else {
- page_pool = &page0;
- max_pages = 1;
- }
- for (npages = 1; npages < max_pages; npages++) {
- page = page_cache_alloc_cold(inode->i_mapping);
- if (!page)
- break;
- page_pool[npages] = page;
- }
-
- op_data->op_npages = npages;
- op_data->op_offset = hash;
- rc = md_readpage(exp, op_data, page_pool, &request);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- /* page0 is special, which was added into page cache early */
- delete_from_page_cache(page0);
- } else if (rc == 0) {
- body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
- /* Checked by mdc_readpage() */
- if (body->valid & OBD_MD_FLSIZE)
- i_size_write(inode, body->size);
-
- nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
- >> PAGE_SHIFT;
- SetPageUptodate(page0);
- }
- unlock_page(page0);
- ptlrpc_req_finished(request);
-
- CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages);
-
- for (i = 1; i < npages; i++) {
- unsigned long offset;
- int ret;
-
- page = page_pool[i];
-
- if (rc < 0 || i >= nrdpgs) {
- put_page(page);
- continue;
- }
-
- SetPageUptodate(page);
-
- dp = kmap(page);
- hash = le64_to_cpu(dp->ldp_hash_start);
- kunmap(page);
-
- offset = hash_x_index(hash, hash64);
-
- prefetchw(&page->flags);
- ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
- GFP_NOFS);
- if (ret == 0) {
- unlock_page(page);
- } else {
- CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
- offset, ret);
- }
- put_page(page);
- }
+ cb_op.md_blocking_ast = ll_md_blocking_ast;
+ rc = md_read_page(ll_i2mdexp(dir), op_data, &cb_op, offset, &page);
+ if (rc)
+ return ERR_PTR(rc);
- if (page_pool != &page0)
- kfree(page_pool);
- return rc;
+ return page;
}
-void ll_release_page(struct page *page, int remove)
+void ll_release_page(struct inode *inode, struct page *page, bool remove)
{
kunmap(page);
+
+ /*
+ * Always remove the page for striped dir, because the page is
+ * built from temporarily in LMV layer
+ */
+ if (inode && S_ISDIR(inode->i_mode) &&
+ ll_i2info(inode)->lli_lsm_md) {
+ __free_page(page);
+ return;
+ }
+
if (remove) {
lock_page(page);
if (likely(page->mapping))
@@ -248,225 +171,6 @@ void ll_release_page(struct page *page, int remove)
put_page(page);
}
-/*
- * Find, kmap and return page that contains given hash.
- */
-static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
- __u64 *start, __u64 *end)
-{
- int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
- struct address_space *mapping = dir->i_mapping;
- /*
- * Complement of hash is used as an index so that
- * radix_tree_gang_lookup() can be used to find a page with starting
- * hash _smaller_ than one we are looking for.
- */
- unsigned long offset = hash_x_index(*hash, hash64);
- struct page *page;
- int found;
-
- spin_lock_irq(&mapping->tree_lock);
- found = radix_tree_gang_lookup(&mapping->page_tree,
- (void **)&page, offset, 1);
- if (found > 0 && !radix_tree_exceptional_entry(page)) {
- struct lu_dirpage *dp;
-
- get_page(page);
- spin_unlock_irq(&mapping->tree_lock);
- /*
- * In contrast to find_lock_page() we are sure that directory
- * page cannot be truncated (while DLM lock is held) and,
- * hence, can avoid restart.
- *
- * In fact, page cannot be locked here at all, because
- * ll_dir_filler() does synchronous io.
- */
- wait_on_page_locked(page);
- if (PageUptodate(page)) {
- dp = kmap(page);
- if (BITS_PER_LONG == 32 && hash64) {
- *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
- *end = le64_to_cpu(dp->ldp_hash_end) >> 32;
- *hash = *hash >> 32;
- } else {
- *start = le64_to_cpu(dp->ldp_hash_start);
- *end = le64_to_cpu(dp->ldp_hash_end);
- }
- LASSERTF(*start <= *hash, "start = %#llx,end = %#llx,hash = %#llx\n",
- *start, *end, *hash);
- CDEBUG(D_VFSTRACE, "page %lu [%llu %llu], hash %llu\n",
- offset, *start, *end, *hash);
- if (*hash > *end) {
- ll_release_page(page, 0);
- page = NULL;
- } else if (*end != *start && *hash == *end) {
- /*
- * upon hash collision, remove this page,
- * otherwise put page reference, and
- * ll_get_dir_page() will issue RPC to fetch
- * the page we want.
- */
- ll_release_page(page,
- le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
- page = NULL;
- }
- } else {
- put_page(page);
- page = ERR_PTR(-EIO);
- }
-
- } else {
- spin_unlock_irq(&mapping->tree_lock);
- page = NULL;
- }
- return page;
-}
-
-struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
- struct ll_dir_chain *chain)
-{
- ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_UPDATE} };
- struct address_space *mapping = dir->i_mapping;
- struct lustre_handle lockh;
- struct lu_dirpage *dp;
- struct page *page;
- enum ldlm_mode mode;
- int rc;
- __u64 start = 0;
- __u64 end = 0;
- __u64 lhash = hash;
- struct ll_inode_info *lli = ll_i2info(dir);
- int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
-
- mode = LCK_PR;
- rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
- ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
- if (!rc) {
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_IBITS,
- .ei_mode = mode,
- .ei_cb_bl = ll_md_blocking_ast,
- .ei_cb_cp = ldlm_completion_ast,
- };
- struct lookup_intent it = { .it_op = IT_READDIR };
- struct ptlrpc_request *request;
- struct md_op_data *op_data;
-
- op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return (void *)op_data;
-
- rc = md_enqueue(ll_i2sbi(dir)->ll_md_exp, &einfo, &it,
- op_data, &lockh, NULL, 0, NULL, 0);
-
- ll_finish_md_op_data(op_data);
-
- request = (struct ptlrpc_request *)it.it_request;
- if (request)
- ptlrpc_req_finished(request);
- if (rc < 0) {
- CERROR("lock enqueue: " DFID " at %llu: rc %d\n",
- PFID(ll_inode2fid(dir)), hash, rc);
- return ERR_PTR(rc);
- }
-
- CDEBUG(D_INODE, "setting lr_lvb_inode to inode "DFID"(%p)\n",
- PFID(ll_inode2fid(dir)), dir);
- md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
- &it.it_lock_handle, dir, NULL);
- } else {
- /* for cross-ref object, l_ast_data of the lock may not be set,
- * we reset it here
- */
- md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie,
- dir, NULL);
- }
- ldlm_lock_dump_handle(D_OTHER, &lockh);
-
- mutex_lock(&lli->lli_readdir_mutex);
- page = ll_dir_page_locate(dir, &lhash, &start, &end);
- if (IS_ERR(page)) {
- CERROR("dir page locate: "DFID" at %llu: rc %ld\n",
- PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page));
- goto out_unlock;
- } else if (page) {
- /*
- * XXX nikita: not entirely correct handling of a corner case:
- * suppose hash chain of entries with hash value HASH crosses
- * border between pages P0 and P1. First both P0 and P1 are
- * cached, seekdir() is called for some entry from the P0 part
- * of the chain. Later P0 goes out of cache. telldir(HASH)
- * happens and finds P1, as it starts with matching hash
- * value. Remaining entries from P0 part of the chain are
- * skipped. (Is that really a bug?)
- *
- * Possible solutions: 0. don't cache P1 is such case, handle
- * it as an "overflow" page. 1. invalidate all pages at
- * once. 2. use HASH|1 as an index for P1.
- */
- goto hash_collision;
- }
-
- page = read_cache_page(mapping, hash_x_index(hash, hash64),
- ll_dir_filler, &lhash);
- if (IS_ERR(page)) {
- CERROR("read cache page: "DFID" at %llu: rc %ld\n",
- PFID(ll_inode2fid(dir)), hash, PTR_ERR(page));
- goto out_unlock;
- }
-
- wait_on_page_locked(page);
- (void)kmap(page);
- if (!PageUptodate(page)) {
- CERROR("page not updated: "DFID" at %llu: rc %d\n",
- PFID(ll_inode2fid(dir)), hash, -5);
- goto fail;
- }
- if (!PageChecked(page))
- /* XXX: check page format later */
- SetPageChecked(page);
- if (PageError(page)) {
- CERROR("page error: "DFID" at %llu: rc %d\n",
- PFID(ll_inode2fid(dir)), hash, -5);
- goto fail;
- }
-hash_collision:
- dp = page_address(page);
- if (BITS_PER_LONG == 32 && hash64) {
- start = le64_to_cpu(dp->ldp_hash_start) >> 32;
- end = le64_to_cpu(dp->ldp_hash_end) >> 32;
- lhash = hash >> 32;
- } else {
- start = le64_to_cpu(dp->ldp_hash_start);
- end = le64_to_cpu(dp->ldp_hash_end);
- lhash = hash;
- }
- if (end == start) {
- LASSERT(start == lhash);
- CWARN("Page-wide hash collision: %llu\n", end);
- if (BITS_PER_LONG == 32 && hash64)
- CWARN("Real page-wide hash collision at [%llu %llu] with hash %llu\n",
- le64_to_cpu(dp->ldp_hash_start),
- le64_to_cpu(dp->ldp_hash_end), hash);
- /*
- * Fetch whole overflow chain...
- *
- * XXX not yet.
- */
- goto fail;
- }
-out_unlock:
- mutex_unlock(&lli->lli_readdir_mutex);
- ldlm_lock_decref(&lockh, mode);
- return page;
-
-fail:
- ll_release_page(page, 1);
- page = ERR_PTR(-EIO);
- goto out_unlock;
-}
-
/**
* return IF_* type for given lu_dirent entry.
* IF_* flag shld be converted to particular OS file type in
@@ -489,119 +193,100 @@ static __u16 ll_dirent_type_get(struct lu_dirent *ent)
return type;
}
-int ll_dir_read(struct inode *inode, struct dir_context *ctx)
+int ll_dir_read(struct inode *inode, __u64 *ppos, struct md_op_data *op_data,
+ struct dir_context *ctx)
{
- struct ll_inode_info *info = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- __u64 pos = ctx->pos;
- int api32 = ll_need_32bit_api(sbi);
- int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
+ __u64 pos = *ppos;
+ int is_api32 = ll_need_32bit_api(sbi);
+ int is_hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
struct page *page;
- struct ll_dir_chain chain;
- int done = 0;
+ bool done = false;
int rc = 0;
- ll_dir_chain_init(&chain);
-
- page = ll_get_dir_page(inode, pos, &chain);
+ page = ll_get_dir_page(inode, op_data, pos);
while (rc == 0 && !done) {
struct lu_dirpage *dp;
struct lu_dirent *ent;
+ __u64 hash;
+ __u64 next;
- if (!IS_ERR(page)) {
- /*
- * If page is empty (end of directory is reached),
- * use this value.
- */
- __u64 hash = MDS_DIR_END_OFF;
- __u64 next;
-
- dp = page_address(page);
- for (ent = lu_dirent_start(dp); ent && !done;
- ent = lu_dirent_next(ent)) {
- __u16 type;
- int namelen;
- struct lu_fid fid;
- __u64 lhash;
- __u64 ino;
+ if (IS_ERR(page)) {
+ rc = PTR_ERR(page);
+ break;
+ }
+ hash = MDS_DIR_END_OFF;
+ dp = page_address(page);
+ for (ent = lu_dirent_start(dp); ent && !done;
+ ent = lu_dirent_next(ent)) {
+ __u16 type;
+ int namelen;
+ struct lu_fid fid;
+ __u64 lhash;
+ __u64 ino;
+
+ hash = le64_to_cpu(ent->lde_hash);
+ if (hash < pos)
/*
- * XXX: implement correct swabbing here.
+ * Skip until we find target hash
+ * value.
*/
+ continue;
- hash = le64_to_cpu(ent->lde_hash);
- if (hash < pos)
- /*
- * Skip until we find target hash
- * value.
- */
- continue;
-
- namelen = le16_to_cpu(ent->lde_namelen);
- if (namelen == 0)
- /*
- * Skip dummy record.
- */
- continue;
-
- if (api32 && hash64)
- lhash = hash >> 32;
- else
- lhash = hash;
- fid_le_to_cpu(&fid, &ent->lde_fid);
- ino = cl_fid_build_ino(&fid, api32);
- type = ll_dirent_type_get(ent);
- ctx->pos = lhash;
- /* For 'll_nfs_get_name_filldir()', it will try
- * to access the 'ent' through its 'lde_name',
- * so the parameter 'name' for 'ctx->actor()'
- * must be part of the 'ent'.
+ namelen = le16_to_cpu(ent->lde_namelen);
+ if (namelen == 0)
+ /*
+ * Skip dummy record.
*/
- done = !dir_emit(ctx, ent->lde_name,
- namelen, ino, type);
- }
- next = le64_to_cpu(dp->ldp_hash_end);
- if (!done) {
- pos = next;
- if (pos == MDS_DIR_END_OFF) {
- /*
- * End of directory reached.
- */
- done = 1;
- ll_release_page(page, 0);
- } else if (1 /* chain is exhausted*/) {
- /*
- * Normal case: continue to the next
- * page.
- */
- ll_release_page(page,
- le32_to_cpu(dp->ldp_flags) &
- LDF_COLLIDE);
- next = pos;
- page = ll_get_dir_page(inode, pos,
- &chain);
- } else {
- /*
- * go into overflow page.
- */
- LASSERT(le32_to_cpu(dp->ldp_flags) &
- LDF_COLLIDE);
- ll_release_page(page, 1);
- }
- } else {
- pos = hash;
- ll_release_page(page, 0);
- }
+ continue;
+
+ if (is_api32 && is_hash64)
+ lhash = hash >> 32;
+ else
+ lhash = hash;
+ fid_le_to_cpu(&fid, &ent->lde_fid);
+ ino = cl_fid_build_ino(&fid, is_api32);
+ type = ll_dirent_type_get(ent);
+ ctx->pos = lhash;
+ /* For 'll_nfs_get_name_filldir()', it will try
+ * to access the 'ent' through its 'lde_name',
+ * so the parameter 'name' for 'ctx->actor()'
+ * must be part of the 'ent'.
+ */
+ done = !dir_emit(ctx, ent->lde_name,
+ namelen, ino, type);
+ }
+
+ if (done) {
+ pos = hash;
+ ll_release_page(inode, page, false);
+ break;
+ }
+
+ next = le64_to_cpu(dp->ldp_hash_end);
+ pos = next;
+ if (pos == MDS_DIR_END_OFF) {
+ /*
+ * End of directory reached.
+ */
+ done = 1;
+ ll_release_page(inode, page, false);
} else {
- rc = PTR_ERR(page);
- CERROR("error reading dir "DFID" at %lu: rc %d\n",
- PFID(&info->lli_fid), (unsigned long)pos, rc);
+ /*
+ * Normal case: continue to the next
+ * page.
+ */
+ ll_release_page(inode, page,
+ le32_to_cpu(dp->ldp_flags) &
+ LDF_COLLIDE);
+ next = pos;
+ page = ll_get_dir_page(inode, op_data, pos);
}
}
ctx->pos = pos;
- ll_dir_chain_fini(&chain);
return rc;
}
@@ -613,9 +298,10 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
__u64 pos = lfd ? lfd->lfd_pos : 0;
int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
int api32 = ll_need_32bit_api(sbi);
+ struct md_op_data *op_data;
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) pos %lu/%llu 32bit_api %d\n",
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) pos/size %lu/%llu 32bit_api %d\n",
PFID(ll_inode2fid(inode)), inode, (unsigned long)pos,
i_size_read(inode), api32);
@@ -627,19 +313,58 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
goto out;
}
+ op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
+ LUSTRE_OPC_ANY, inode);
+ if (IS_ERR(op_data)) {
+ rc = PTR_ERR(op_data);
+ goto out;
+ }
+
+ if (unlikely(op_data->op_mea1)) {
+ /*
+ * This is only needed for striped dir to fill ..,
+ * see lmv_read_page
+ */
+ if (file_dentry(filp)->d_parent &&
+ file_dentry(filp)->d_parent->d_inode) {
+ __u64 ibits = MDS_INODELOCK_UPDATE;
+ struct inode *parent;
+
+ parent = file_dentry(filp)->d_parent->d_inode;
+ if (ll_have_md_lock(parent, &ibits, LCK_MINMODE))
+ op_data->op_fid3 = *ll_inode2fid(parent);
+ }
+
+ /*
+ * If it can not find in cache, do lookup .. on the master
+ * object
+ */
+ if (fid_is_zero(&op_data->op_fid3)) {
+ rc = ll_dir_get_parent_fid(inode, &op_data->op_fid3);
+ if (rc) {
+ ll_finish_md_op_data(op_data);
+ return rc;
+ }
+ }
+ }
+ op_data->op_max_pages = sbi->ll_md_brw_pages;
ctx->pos = pos;
- rc = ll_dir_read(inode, ctx);
+ rc = ll_dir_read(inode, &pos, op_data, ctx);
+ pos = ctx->pos;
if (lfd)
- lfd->lfd_pos = ctx->pos;
- if (ctx->pos == MDS_DIR_END_OFF) {
+ lfd->lfd_pos = pos;
+
+ if (pos == MDS_DIR_END_OFF) {
if (api32)
- ctx->pos = LL_DIR_END_OFF_32BIT;
+ pos = LL_DIR_END_OFF_32BIT;
else
- ctx->pos = LL_DIR_END_OFF;
+ pos = LL_DIR_END_OFF;
} else {
if (api32 && hash64)
- ctx->pos >>= 32;
+ pos >>= 32;
}
+ ctx->pos = pos;
+ ll_finish_md_op_data(op_data);
filp->f_version = inode->i_version;
out:
@@ -668,18 +393,40 @@ static int ll_send_mgc_param(struct obd_export *mgc, char *string)
return rc;
}
-static int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
- char *filename)
+/**
+ * Create striped directory with specified stripe(@lump)
+ *
+ * param[in] parent the parent of the directory.
+ * param[in] lump the specified stripes.
+ * param[in] dirname the name of the directory.
+ * param[in] mode the specified mode of the directory.
+ *
+ * retval =0 if striped directory is being created successfully.
+ * <0 if the creation is failed.
+ */
+static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
+ const char *dirname, umode_t mode)
{
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- int mode;
+ struct ll_sb_info *sbi = ll_i2sbi(parent);
int err;
- mode = (~current_umask() & 0755) | S_IFDIR;
- op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
- strlen(filename), mode, LUSTRE_OPC_MKDIR,
+ if (unlikely(lump->lum_magic != LMV_USER_MAGIC))
+ return -EINVAL;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) name %s stripe_offset %d, stripe_count: %u\n",
+ PFID(ll_inode2fid(parent)), parent, dirname,
+ (int)lump->lum_stripe_offset, lump->lum_stripe_count);
+
+ if (lump->lum_magic != cpu_to_le32(LMV_USER_MAGIC))
+ lustre_swab_lmv_user_md(lump);
+
+ if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent)))
+ mode &= ~current_umask();
+ mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
+ op_data = ll_prep_md_op_data(NULL, parent, NULL, dirname,
+ strlen(dirname), mode, LUSTRE_OPC_MKDIR,
lump);
if (IS_ERR(op_data)) {
err = PTR_ERR(op_data);
@@ -730,6 +477,13 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
lum_size = sizeof(struct lov_user_md_v3);
break;
}
+ case LMV_USER_MAGIC: {
+ if (lump->lmm_magic != cpu_to_le32(LMV_USER_MAGIC))
+ lustre_swab_lmv_user_md(
+ (struct lmv_user_md *)lump);
+ lum_size = sizeof(struct lmv_user_md);
+ break;
+ }
default: {
CDEBUG(D_IOCTL, "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
lump->lmm_magic, LOV_USER_MAGIC_V1,
@@ -746,9 +500,6 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- if (lump && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
- op_data->op_cli_flags |= CLI_SET_MEA;
-
/* swabbing is done in lov_setstripe() on server side */
rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
NULL, 0, &req, NULL);
@@ -803,8 +554,16 @@ end:
return rc;
}
-int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
- int *lmm_size, struct ptlrpc_request **request)
+/**
+ * This function will be used to get default LOV/LMV/Default LMV
+ * @valid will be used to indicate which stripe it will retrieve
+ * OBD_MD_MEA LMV stripe EA
+ * OBD_MD_DEFAULT_MEA Default LMV stripe EA
+ * otherwise Default LOV EA.
+ * Each time, it can only retrieve 1 stripe EA
+ **/
+int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size,
+ struct ptlrpc_request **request, u64 valid)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct mdt_body *body;
@@ -813,7 +572,7 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
int rc, lmmsize;
struct md_op_data *op_data;
- rc = ll_get_default_mdsize(sbi, &lmmsize);
+ rc = ll_get_max_mdsize(sbi, &lmmsize);
if (rc)
return rc;
@@ -834,9 +593,9 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- lmmsize = body->eadatasize;
+ lmmsize = body->mbo_eadatasize;
- if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
+ if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
lmmsize == 0) {
rc = -ENODATA;
goto out;
@@ -844,6 +603,7 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
lmm = req_capsule_server_sized_get(&req->rq_pill,
&RMF_MDT_MD, lmmsize);
+ LASSERT(lmm);
/*
* This is coming from the MDS, so is probably in
@@ -860,40 +620,51 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC)
lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
break;
+ case LMV_MAGIC_V1:
+ if (cpu_to_le32(LMV_MAGIC) != LMV_MAGIC)
+ lustre_swab_lmv_mds_md((union lmv_mds_md *)lmm);
+ break;
+ case LMV_USER_MAGIC:
+ if (cpu_to_le32(LMV_USER_MAGIC) != LMV_USER_MAGIC)
+ lustre_swab_lmv_user_md((struct lmv_user_md *)lmm);
+ break;
default:
CERROR("unknown magic: %lX\n", (unsigned long)lmm->lmm_magic);
rc = -EPROTO;
}
out:
- *lmmp = lmm;
- *lmm_size = lmmsize;
+ *plmm = lmm;
+ *plmm_size = lmmsize;
*request = req;
return rc;
}
-/*
- * Get MDT index for the inode.
- */
-int ll_get_mdt_idx(struct inode *inode)
+int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid)
{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
struct md_op_data *op_data;
- int rc, mdtidx;
+ int mdt_index, rc;
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
+ op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
+ if (!op_data)
+ return -ENOMEM;
op_data->op_flags |= MF_GET_MDT_IDX;
+ op_data->op_fid1 = *fid;
rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
- mdtidx = op_data->op_mds;
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
+ mdt_index = op_data->op_mds;
+ kvfree(op_data);
+ if (rc < 0)
return rc;
- }
- return mdtidx;
+
+ return mdt_index;
+}
+
+/*
+ * Get MDT index for the inode.
+ */
+int ll_get_mdt_idx(struct inode *inode)
+{
+ return ll_get_mdt_idx_by_fid(ll_i2sbi(inode), ll_inode2fid(inode));
}
/**
@@ -1288,11 +1059,9 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
case IOC_MDC_LOOKUP: {
- struct ptlrpc_request *request = NULL;
int namelen, len = 0;
char *buf = NULL;
char *filename;
- struct md_op_data *op_data;
rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
if (rc)
@@ -1308,21 +1077,13 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
goto out_free;
}
- op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, namelen,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data)) {
- rc = PTR_ERR(op_data);
- goto out_free;
- }
-
- op_data->op_valid = OBD_MD_FLID;
- rc = md_getattr_name(sbi->ll_md_exp, op_data, &request);
- ll_finish_md_op_data(op_data);
+ rc = ll_get_fid_by_name(inode, filename, namelen, NULL);
if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
+ CERROR("%s: lookup %.*s failed: rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), namelen,
+ filename, rc);
goto out_free;
}
- ptlrpc_req_finished(request);
out_free:
obd_ioctl_freedata(buf, len);
return rc;
@@ -1333,6 +1094,7 @@ out_free:
char *filename;
int namelen = 0;
int lumlen = 0;
+ umode_t mode;
int len;
int rc;
@@ -1366,15 +1128,32 @@ out_free:
goto lmv_out_free;
}
- /**
- * ll_dir_setdirstripe will be used to set dir stripe
- * mdc_create--->mdt_reint_create (with dirstripe)
- */
- rc = ll_dir_setdirstripe(inode, lum, filename);
+#if OBD_OCD_VERSION(2, 9, 50, 0) > LUSTRE_VERSION_CODE
+ mode = data->ioc_type != 0 ? data->ioc_type : S_IRWXUGO;
+#else
+ mode = data->ioc_type;
+#endif
+ rc = ll_dir_setdirstripe(inode, lum, filename, mode);
lmv_out_free:
obd_ioctl_freedata(buf, len);
return rc;
}
+ case LL_IOC_LMV_SET_DEFAULT_STRIPE: {
+ struct lmv_user_md __user *ulump;
+ struct lmv_user_md lum;
+ int rc;
+
+ ulump = (struct lmv_user_md __user *)arg;
+ if (copy_from_user(&lum, ulump, sizeof(lum)))
+ return -EFAULT;
+
+ if (lum.lum_magic != LMV_USER_MAGIC)
+ return -EINVAL;
+
+ rc = ll_dir_setstripe(inode, (struct lov_user_md *)&lum, 0);
+
+ return rc;
+ }
case LL_IOC_LOV_SETSTRIPE: {
struct lov_user_md_v3 lumv3;
struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
@@ -1404,50 +1183,100 @@ lmv_out_free:
return rc;
}
case LL_IOC_LMV_GETSTRIPE: {
- struct lmv_user_md __user *lump = (void __user *)arg;
+ struct lmv_user_md __user *ulmv;
struct lmv_user_md lum;
- struct lmv_user_md *tmp;
+ struct ptlrpc_request *request = NULL;
+ struct lmv_user_md *tmp = NULL;
+ union lmv_mds_md *lmm = NULL;
+ u64 valid = 0;
+ int stripe_count;
+ int mdt_index;
int lum_size;
- int rc = 0;
- int mdtindex;
+ int lmmsize;
+ int rc;
+ int i;
- if (copy_from_user(&lum, lump, sizeof(struct lmv_user_md)))
+ ulmv = (struct lmv_user_md __user *)arg;
+ if (copy_from_user(&lum, ulmv, sizeof(*ulmv)))
return -EFAULT;
- if (lum.lum_magic != LMV_MAGIC_V1)
+ /*
+ * lum_magic will indicate which stripe the ioctl will like
+ * to get, LMV_MAGIC_V1 is for normal LMV stripe, LMV_USER_MAGIC
+ * is for default LMV stripe
+ */
+ if (lum.lum_magic == LMV_MAGIC_V1)
+ valid |= OBD_MD_MEA;
+ else if (lum.lum_magic == LMV_USER_MAGIC)
+ valid |= OBD_MD_DEFAULT_MEA;
+ else
return -EINVAL;
- lum_size = lmv_user_md_size(1, LMV_MAGIC_V1);
+ rc = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize, &request,
+ valid);
+ if (rc)
+ goto finish_req;
+
+ /* Get default LMV EA */
+ if (lum.lum_magic == LMV_USER_MAGIC) {
+ if (rc)
+ goto finish_req;
+
+ if (lmmsize > sizeof(*ulmv)) {
+ rc = -EINVAL;
+ goto finish_req;
+ }
+
+ if (copy_to_user(ulmv, lmm, lmmsize))
+ rc = -EFAULT;
+
+ goto finish_req;
+ }
+
+ stripe_count = lmv_mds_md_stripe_count_get(lmm);
+ lum_size = lmv_user_md_size(stripe_count, LMV_MAGIC_V1);
tmp = kzalloc(lum_size, GFP_NOFS);
if (!tmp) {
rc = -ENOMEM;
- goto free_lmv;
+ goto finish_req;
}
- *tmp = lum;
- tmp->lum_type = LMV_STRIPE_TYPE;
- tmp->lum_stripe_count = 1;
- mdtindex = ll_get_mdt_idx(inode);
- if (mdtindex < 0) {
+ mdt_index = ll_get_mdt_idx(inode);
+ if (mdt_index < 0) {
rc = -ENOMEM;
- goto free_lmv;
+ goto out_tmp;
+ }
+ tmp->lum_magic = LMV_MAGIC_V1;
+ tmp->lum_stripe_count = 0;
+ tmp->lum_stripe_offset = mdt_index;
+ for (i = 0; i < stripe_count; i++) {
+ struct lu_fid fid;
+
+ fid_le_to_cpu(&fid, &lmm->lmv_md_v1.lmv_stripe_fids[i]);
+ mdt_index = ll_get_mdt_idx_by_fid(sbi, &fid);
+ if (mdt_index < 0) {
+ rc = mdt_index;
+ goto out_tmp;
+ }
+ tmp->lum_objects[i].lum_mds = mdt_index;
+ tmp->lum_objects[i].lum_fid = fid;
+ tmp->lum_stripe_count++;
}
- tmp->lum_stripe_offset = mdtindex;
- tmp->lum_objects[0].lum_mds = mdtindex;
- memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode),
- sizeof(struct lu_fid));
- if (copy_to_user((void __user *)arg, tmp, lum_size)) {
+ if (copy_to_user(ulmv, tmp, lum_size)) {
rc = -EFAULT;
- goto free_lmv;
+ goto out_tmp;
}
-free_lmv:
+out_tmp:
kfree(tmp);
+finish_req:
+ ptlrpc_req_finished(request);
return rc;
}
+
case LL_IOC_LOV_SWAP_LAYOUTS:
return -EPERM;
- case LL_IOC_OBD_STATFS:
+ case IOC_OBD_STATFS:
return ll_obd_statfs(inode, (void __user *)arg);
case LL_IOC_LOV_GETSTRIPE:
case LL_IOC_MDC_GETINFO:
@@ -1469,7 +1298,8 @@ free_lmv:
rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
&lmmsize, &request);
} else {
- rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
+ rc = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize,
+ &request, 0);
}
if (request) {
@@ -1512,18 +1342,18 @@ skip_lmm:
lstat_t st = { 0 };
st.st_dev = inode->i_sb->s_dev;
- st.st_mode = body->mode;
- st.st_nlink = body->nlink;
- st.st_uid = body->uid;
- st.st_gid = body->gid;
- st.st_rdev = body->rdev;
- st.st_size = body->size;
+ st.st_mode = body->mbo_mode;
+ st.st_nlink = body->mbo_nlink;
+ st.st_uid = body->mbo_uid;
+ st.st_gid = body->mbo_gid;
+ st.st_rdev = body->mbo_rdev;
+ st.st_size = body->mbo_size;
st.st_blksize = PAGE_SIZE;
- st.st_blocks = body->blocks;
- st.st_atime = body->atime;
- st.st_mtime = body->mtime;
- st.st_ctime = body->ctime;
- st.st_ino = cl_fid_build_ino(&body->fid1,
+ st.st_blocks = body->mbo_blocks;
+ st.st_atime = body->mbo_atime;
+ st.st_mtime = body->mbo_mtime;
+ st.st_ctime = body->mbo_ctime;
+ st.st_ino = cl_fid_build_ino(&body->mbo_fid1,
sbi->ll_flags &
LL_SBI_32BIT_API);
@@ -1611,9 +1441,6 @@ free_lmm:
kvfree(lmm);
return rc;
}
- case OBD_IOC_LLOG_CATINFO: {
- return -EOPNOTSUPP;
- }
case OBD_IOC_QUOTACHECK: {
struct obd_quotactl *oqctl;
int error = 0;
@@ -1671,7 +1498,7 @@ out_poll:
kfree(check);
return rc;
}
- case LL_IOC_QUOTACTL: {
+ case OBD_IOC_QUOTACTL: {
struct if_quotactl *qctl;
qctl = kzalloc(sizeof(*qctl), GFP_NOFS);
@@ -1739,6 +1566,25 @@ out_quotactl:
return rc;
case OBD_IOC_FID2PATH:
return ll_fid2path(inode, (void __user *)arg);
+ case LL_IOC_GETPARENT:
+ return ll_getparent(file, (void __user *)arg);
+ case LL_IOC_FID2MDTIDX: {
+ struct obd_export *exp = ll_i2mdexp(inode);
+ struct lu_fid fid;
+ __u32 index;
+
+ if (copy_from_user(&fid, (const struct lu_fid __user *)arg,
+ sizeof(fid)))
+ return -EFAULT;
+
+ /* Call mdc_iocontrol */
+ rc = obd_iocontrol(LL_IOC_FID2MDTIDX, exp, sizeof(fid), &fid,
+ &index);
+ if (rc)
+ return rc;
+
+ return index;
+ }
case LL_IOC_HSM_REQUEST: {
struct hsm_user_request *hur;
ssize_t totalsize;
@@ -1853,6 +1699,45 @@ out_quotactl:
kfree(copy);
return rc;
}
+ case LL_IOC_MIGRATE: {
+ char *buf = NULL;
+ const char *filename;
+ int namelen = 0;
+ int len;
+ int rc;
+ int mdtidx;
+
+ rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
+ if (rc < 0)
+ return rc;
+
+ data = (struct obd_ioctl_data *)buf;
+ if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
+ !data->ioc_inllen1 || !data->ioc_inllen2) {
+ rc = -EINVAL;
+ goto migrate_free;
+ }
+
+ filename = data->ioc_inlbuf1;
+ namelen = data->ioc_inllen1;
+ if (namelen < 1 || namelen != strlen(filename) + 1) {
+ rc = -EINVAL;
+ goto migrate_free;
+ }
+
+ if (data->ioc_inllen2 != sizeof(mdtidx)) {
+ rc = -EINVAL;
+ goto migrate_free;
+ }
+ mdtidx = *(int *)data->ioc_inlbuf2;
+
+ rc = ll_migrate(inode, file, mdtidx, filename, namelen - 1);
+migrate_free:
+ obd_ioctl_freedata(buf, len);
+
+ return rc;
+ }
+
default:
return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL,
(void __user *)arg);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 57281b9e31ff..d56863ff5866 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -38,14 +38,15 @@
#define DEBUG_SUBSYSTEM S_LLITE
#include "../include/lustre_dlm.h"
-#include "../include/lustre_lite.h"
#include <linux/pagemap.h>
#include <linux/file.h>
+#include <linux/sched.h>
#include <linux/mount.h>
-#include "llite_internal.h"
#include "../include/lustre/ll_fiemap.h"
+#include "../include/lustre/lustre_ioctl.h"
#include "../include/cl_object.h"
+#include "llite_internal.h"
static int
ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
@@ -188,17 +189,11 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
spin_unlock(&lli->lli_lock);
}
- if (rc == 0) {
- rc = ll_objects_destroy(req, inode);
- if (rc)
- CERROR("inode %lu ll_objects destroy: rc = %d\n",
- inode->i_ino, rc);
- }
if (rc == 0 && op_data->op_bias & MDS_HSM_RELEASE) {
struct mdt_body *body;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (!(body->valid & OBD_MD_FLRELEASED))
+ if (!(body->mbo_valid & OBD_MD_FLRELEASED))
rc = -EBUSY;
}
@@ -349,13 +344,11 @@ int ll_file_release(struct inode *inode, struct file *file)
fd = LUSTRE_FPRIVATE(file);
LASSERT(fd);
- /* The last ref on @file, maybe not be the owner pid of statahead.
- * Different processes can open the same dir, "ll_opendir_key" means:
- * it is me that should stop the statahead thread.
+ /* The last ref on @file, maybe not be the owner pid of statahead,
+ * because parent and child process can share the same file handle.
*/
- if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd &&
- lli->lli_opendir_pid != 0)
- ll_stop_statahead(inode, lli->lli_opendir_key);
+ if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd)
+ ll_deauthorize_statahead(inode, fd);
if (is_root_inode(inode)) {
LUSTRE_FPRIVATE(file) = NULL;
@@ -364,7 +357,8 @@ int ll_file_release(struct inode *inode, struct file *file)
}
if (!S_ISDIR(inode->i_mode)) {
- lov_read_and_clear_async_rc(lli->lli_clob);
+ if (lli->lli_clob)
+ lov_read_and_clear_async_rc(lli->lli_clob);
lli->lli_async_rc = 0;
}
@@ -376,55 +370,39 @@ int ll_file_release(struct inode *inode, struct file *file)
return rc;
}
-static int ll_intent_file_open(struct dentry *dentry, void *lmm,
- int lmmsize, struct lookup_intent *itp)
+static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
+ struct lookup_intent *itp)
{
- struct inode *inode = d_inode(dentry);
+ struct inode *inode = d_inode(de);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct dentry *parent = dentry->d_parent;
- const char *name = dentry->d_name.name;
- const int len = dentry->d_name.len;
+ struct dentry *parent = de->d_parent;
+ const char *name = NULL;
struct md_op_data *op_data;
- struct ptlrpc_request *req;
- __u32 opc = LUSTRE_OPC_ANY;
- int rc;
+ struct ptlrpc_request *req = NULL;
+ int len = 0, rc;
- /* Usually we come here only for NFSD, and we want open lock. */
- /* We can also get here if there was cached open handle in revalidate_it
- * but it disappeared while we were getting from there to ll_file_open.
- * But this means this file was closed and immediately opened which
- * makes a good candidate for using OPEN lock
- */
- /* If lmmsize & lmm are not 0, we are just setting stripe info
- * parameters. No need for the open lock
+ LASSERT(parent);
+ LASSERT(itp->it_flags & MDS_OPEN_BY_FID);
+
+ /*
+ * if server supports open-by-fid, or file name is invalid, don't pack
+ * name in open request
*/
- if (!lmm && lmmsize == 0) {
- struct ll_dentry_data *ldd = ll_d2d(dentry);
- /*
- * If we came via ll_iget_for_nfs, then we need to request
- * struct ll_dentry_data *ldd = ll_d2d(file->f_dentry);
- *
- * NB: when ldd is NULL, it must have come via normal
- * lookup path only, since ll_iget_for_nfs always calls
- * ll_d_init().
- */
- if (ldd && ldd->lld_nfs_dentry) {
- ldd->lld_nfs_dentry = 0;
- itp->it_flags |= MDS_OPEN_LOCK;
- }
- if (itp->it_flags & FMODE_WRITE)
- opc = LUSTRE_OPC_CREATE;
+ if (!(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID) &&
+ lu_name_is_valid_2(de->d_name.name, de->d_name.len)) {
+ name = de->d_name.name;
+ len = de->d_name.len;
}
- op_data = ll_prep_md_op_data(NULL, d_inode(parent),
- inode, name, len,
- O_RDWR, opc, NULL);
+ op_data = ll_prep_md_op_data(NULL, d_inode(parent), inode, name, len,
+ O_RDWR, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return PTR_ERR(op_data);
+ op_data->op_data = lmm;
+ op_data->op_data_size = lmmsize;
- itp->it_flags |= MDS_OPEN_BY_FID;
- rc = md_intent_lock(sbi->ll_md_exp, op_data, lmm, lmmsize, itp,
- 0 /*unused */, &req, ll_md_blocking_ast, 0);
+ rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
+ &ll_md_blocking_ast, 0);
ll_finish_md_op_data(op_data);
if (rc == -ESTALE) {
/* reason for keep own exit path - don`t flood log
@@ -479,8 +457,8 @@ static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
struct mdt_body *body;
body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
- och->och_fh = body->handle;
- och->och_fid = body->fid1;
+ och->och_fh = body->mbo_handle;
+ och->och_fid = body->mbo_fid1;
och->och_lease_handle.cookie = it->it_lock_handle;
och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
och->och_flags = it->it_flags;
@@ -508,7 +486,7 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
body = req_capsule_server_get(&it->it_request->rq_pill,
&RMF_MDT_BODY);
- ll_ioepoch_open(lli, body->ioepoch);
+ ll_ioepoch_open(lli, body->mbo_ioepoch);
}
LUSTRE_FPRIVATE(file) = fd;
@@ -543,7 +521,7 @@ int ll_file_open(struct inode *inode, struct file *file)
struct obd_client_handle **och_p = NULL;
__u64 *och_usecount = NULL;
struct ll_file_data *fd;
- int rc = 0, opendir_set = 0;
+ int rc = 0;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), flags %o\n",
PFID(ll_inode2fid(inode)), inode, file->f_flags);
@@ -558,16 +536,8 @@ int ll_file_open(struct inode *inode, struct file *file)
}
fd->fd_file = file;
- if (S_ISDIR(inode->i_mode)) {
- spin_lock(&lli->lli_sa_lock);
- if (!lli->lli_opendir_key && !lli->lli_sai &&
- lli->lli_opendir_pid == 0) {
- lli->lli_opendir_key = fd;
- lli->lli_opendir_pid = current_pid();
- opendir_set = 1;
- }
- spin_unlock(&lli->lli_sa_lock);
- }
+ if (S_ISDIR(inode->i_mode))
+ ll_authorize_statahead(inode, fd);
if (is_root_inode(inode)) {
LUSTRE_FPRIVATE(file) = fd;
@@ -615,7 +585,7 @@ restart:
} else if (it->it_flags & FMODE_EXEC) {
och_p = &lli->lli_mds_exec_och;
och_usecount = &lli->lli_open_fd_exec_count;
- } else {
+ } else {
och_p = &lli->lli_mds_read_och;
och_usecount = &lli->lli_open_fd_read_count;
}
@@ -652,9 +622,19 @@ restart:
* result in a deadlock
*/
mutex_unlock(&lli->lli_och_mutex);
- it->it_create_mode |= M_CHECK_STALE;
+ /*
+ * Normally called under two situations:
+ * 1. NFS export.
+ * 2. revalidate with IT_OPEN (revalidate doesn't
+ * execute this intent any more).
+ *
+ * Always fetch MDS_OPEN_LOCK if this is not setstripe.
+ *
+ * Always specify MDS_OPEN_BY_FID because we don't want
+ * to get file with different fid.
+ */
+ it->it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID;
rc = ll_intent_file_open(file->f_path.dentry, NULL, 0, it);
- it->it_create_mode &= ~M_CHECK_STALE;
if (rc)
goto out_openerr;
@@ -716,9 +696,10 @@ out_och_free:
mutex_unlock(&lli->lli_och_mutex);
out_openerr:
- if (opendir_set != 0)
- ll_stop_statahead(inode, lli->lli_opendir_key);
- ll_file_data_put(fd);
+ if (lli->lli_opendir_key == fd)
+ ll_deauthorize_statahead(inode, fd);
+ if (fd)
+ ll_file_data_put(fd);
} else {
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
}
@@ -764,7 +745,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
struct lookup_intent it = { .it_op = IT_OPEN };
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct md_op_data *op_data;
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req = NULL;
struct lustre_handle old_handle = { 0 };
struct obd_client_handle *och = NULL;
int rc;
@@ -831,8 +812,8 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
it.it_flags = fmode | open_flags;
it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
- rc = md_intent_lock(sbi->ll_md_exp, op_data, NULL, 0, &it, 0, &req,
- ll_md_blocking_lease_ast,
+ rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
+ &ll_md_blocking_lease_ast,
/* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
* it can be cancelled which may mislead applications that the lease is
* broken;
@@ -840,7 +821,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
* open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
* doesn't deal with openhandle, so normal openhandle will be leaked.
*/
- LDLM_FL_NO_LRU | LDLM_FL_EXCL);
+ LDLM_FL_NO_LRU | LDLM_FL_EXCL);
ll_finish_md_op_data(op_data);
ptlrpc_req_finished(req);
if (rc < 0)
@@ -908,7 +889,6 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
{
struct ldlm_lock *lock;
bool cancelled = true;
- int rc;
lock = ldlm_handle2lock(&och->och_lease_handle);
if (lock) {
@@ -926,9 +906,8 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
if (lease_broken)
*lease_broken = cancelled;
- rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
- NULL);
- return rc;
+ return ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
+ inode, och, NULL);
}
/* Fills the obdo with the attributes for the lsm */
@@ -1138,10 +1117,11 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
{
struct ll_inode_info *lli = ll_i2info(file_inode(file));
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct range_lock range;
struct cl_io *io;
ssize_t result;
- CDEBUG(D_VFSTRACE, "file: %s, type: %d ppos: %llu, count: %zd\n",
+ CDEBUG(D_VFSTRACE, "file: %s, type: %d ppos: %llu, count: %zu\n",
file->f_path.dentry->d_name.name, iot, *ppos, count);
restart:
@@ -1150,41 +1130,45 @@ restart:
if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
struct vvp_io *vio = vvp_env_io(env);
- int write_mutex_locked = 0;
+ bool range_locked = false;
+
+ if (file->f_flags & O_APPEND)
+ range_lock_init(&range, 0, LUSTRE_EOF);
+ else
+ range_lock_init(&range, *ppos, *ppos + count - 1);
vio->vui_fd = LUSTRE_FPRIVATE(file);
- vio->vui_io_subtype = args->via_io_subtype;
-
- switch (vio->vui_io_subtype) {
- case IO_NORMAL:
- vio->vui_iter = args->u.normal.via_iter;
- vio->vui_iocb = args->u.normal.via_iocb;
- if ((iot == CIT_WRITE) &&
- !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- if (mutex_lock_interruptible(&lli->
- lli_write_mutex)) {
- result = -ERESTARTSYS;
- goto out;
- }
- write_mutex_locked = 1;
- }
- down_read(&lli->lli_trunc_sem);
- break;
- case IO_SPLICE:
- vio->u.splice.vui_pipe = args->u.splice.via_pipe;
- vio->u.splice.vui_flags = args->u.splice.via_flags;
- break;
- default:
- CERROR("Unknown IO type - %u\n", vio->vui_io_subtype);
- LBUG();
+ vio->vui_iter = args->u.normal.via_iter;
+ vio->vui_iocb = args->u.normal.via_iocb;
+ /*
+ * Direct IO reads must also take range lock,
+ * or multiple reads will try to work on the same pages
+ * See LU-6227 for details.
+ */
+ if (((iot == CIT_WRITE) ||
+ (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
+ !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n",
+ range.rl_node.in_extent.start,
+ range.rl_node.in_extent.end);
+ result = range_lock(&lli->lli_write_tree,
+ &range);
+ if (result < 0)
+ goto out;
+
+ range_locked = true;
}
+ down_read(&lli->lli_trunc_sem);
ll_cl_add(file, env, io);
result = cl_io_loop(env, io);
ll_cl_remove(file, env);
- if (args->via_io_subtype == IO_NORMAL)
- up_read(&lli->lli_trunc_sem);
- if (write_mutex_locked)
- mutex_unlock(&lli->lli_write_mutex);
+ up_read(&lli->lli_trunc_sem);
+ if (range_locked) {
+ CDEBUG(D_VFSTRACE, "Range unlock [%llu, %llu]\n",
+ range.rl_node.in_extent.start,
+ range.rl_node.in_extent.end);
+ range_unlock(&lli->lli_write_tree, &range);
+ }
} else {
/* cl_io_rw_init() handled IO */
result = io->ci_result;
@@ -1201,7 +1185,7 @@ out:
* short read/write instead of restart io.
*/
if ((result == 0 || result == -ENODATA) && io->ci_need_restart) {
- CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zd\n",
+ CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zu\n",
iot == CIT_READ ? "read" : "write",
file, *ppos, count);
LASSERTF(io->ci_nob == 0, "%zd\n", io->ci_nob);
@@ -1237,7 +1221,7 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (IS_ERR(env))
return PTR_ERR(env);
- args = ll_env_args(env, IO_NORMAL);
+ args = ll_env_args(env);
args->u.normal.via_iter = to;
args->u.normal.via_iocb = iocb;
@@ -1261,7 +1245,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (IS_ERR(env))
return PTR_ERR(env);
- args = ll_env_args(env, IO_NORMAL);
+ args = ll_env_args(env);
args->u.normal.via_iter = from;
args->u.normal.via_iocb = iocb;
@@ -1271,119 +1255,15 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
return result;
}
-/*
- * Send file content (through pagecache) somewhere with helper
- */
-static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t count,
- unsigned int flags)
-{
- struct lu_env *env;
- struct vvp_io_args *args;
- ssize_t result;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- args = ll_env_args(env, IO_SPLICE);
- args->u.splice.via_pipe = pipe;
- args->u.splice.via_flags = flags;
-
- result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
- cl_env_put(env, &refcheck);
- return result;
-}
-
-static int ll_lov_recreate(struct inode *inode, struct ost_id *oi, u32 ost_idx)
-{
- struct obd_export *exp = ll_i2dtexp(inode);
- struct obd_trans_info oti = { 0 };
- struct obdo *oa = NULL;
- int lsm_size;
- int rc = 0;
- struct lov_stripe_md *lsm = NULL, *lsm2;
-
- oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!oa)
- return -ENOMEM;
-
- lsm = ccc_inode_lsm_get(inode);
- if (!lsm_has_objects(lsm)) {
- rc = -ENOENT;
- goto out;
- }
-
- lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
- (lsm->lsm_stripe_count));
-
- lsm2 = libcfs_kvzalloc(lsm_size, GFP_NOFS);
- if (!lsm2) {
- rc = -ENOMEM;
- goto out;
- }
-
- oa->o_oi = *oi;
- oa->o_nlink = ost_idx;
- oa->o_flags |= OBD_FL_RECREATE_OBJS;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
- obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME);
- obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
- memcpy(lsm2, lsm, lsm_size);
- ll_inode_size_lock(inode);
- rc = obd_create(NULL, exp, oa, &lsm2, &oti);
- ll_inode_size_unlock(inode);
-
- kvfree(lsm2);
- goto out;
-out:
- ccc_inode_lsm_put(inode, lsm);
- kmem_cache_free(obdo_cachep, oa);
- return rc;
-}
-
-static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg)
-{
- struct ll_recreate_obj ucreat;
- struct ost_id oi;
-
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&ucreat, (struct ll_recreate_obj __user *)arg,
- sizeof(ucreat)))
- return -EFAULT;
-
- ostid_set_seq_mdt0(&oi);
- ostid_set_id(&oi, ucreat.lrc_id);
- return ll_lov_recreate(inode, &oi, ucreat.lrc_ost_idx);
-}
-
-static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
-{
- struct lu_fid fid;
- struct ost_id oi;
- u32 ost_idx;
-
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&fid, (struct lu_fid __user *)arg, sizeof(fid)))
- return -EFAULT;
-
- fid_to_ostid(&fid, &oi);
- ost_idx = (fid_seq(&fid) >> 16) & 0xffff;
- return ll_lov_recreate(inode, &oi, ost_idx);
-}
-
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
__u64 flags, struct lov_user_md *lum,
int lum_size)
{
struct lov_stripe_md *lsm = NULL;
- struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
+ struct lookup_intent oit = {
+ .it_op = IT_OPEN,
+ .it_flags = flags | MDS_OPEN_BY_FID,
+ };
int rc = 0;
lsm = ccc_inode_lsm_get(inode);
@@ -1397,11 +1277,11 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
ll_inode_size_lock(inode);
rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
- if (rc)
+ if (rc < 0)
goto out_unlock;
rc = oit.it_status;
if (rc < 0)
- goto out_req_free;
+ goto out_unlock;
ll_release_openhandle(inode, &oit);
@@ -1411,9 +1291,6 @@ out_unlock:
ccc_inode_lsm_put(inode, lsm);
out:
return rc;
-out_req_free:
- ptlrpc_req_finished((struct ptlrpc_request *)oit.it_request);
- goto out;
}
int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
@@ -1448,9 +1325,9 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- lmmsize = body->eadatasize;
+ lmmsize = body->mbo_eadatasize;
- if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
+ if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
lmmsize == 0) {
rc = -ENODATA;
goto out;
@@ -1481,13 +1358,13 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
*/
if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
- if (S_ISREG(body->mode))
+ if (S_ISREG(body->mbo_mode))
lustre_swab_lov_user_md_objects(
((struct lov_user_md_v1 *)lmm)->lmm_objects,
stripe_count);
} else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
- if (S_ISREG(body->mode))
+ if (S_ISREG(body->mbo_mode))
lustre_swab_lov_user_md_objects(
((struct lov_user_md_v3 *)lmm)->lmm_objects,
stripe_count);
@@ -1530,55 +1407,48 @@ static int ll_lov_setea(struct inode *inode, struct file *file,
return rc;
}
+static int ll_file_getstripe(struct inode *inode,
+ struct lov_user_md __user *lum)
+{
+ struct lu_env *env;
+ int refcheck;
+ int rc;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum);
+ cl_env_put(env, &refcheck);
+ return rc;
+}
+
static int ll_lov_setstripe(struct inode *inode, struct file *file,
unsigned long arg)
{
- struct lov_user_md_v3 lumv3;
- struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
- struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
- struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
+ struct lov_user_md __user *lum = (struct lov_user_md __user *)arg;
+ struct lov_user_md *klum;
int lum_size, rc;
__u64 flags = FMODE_WRITE;
- /* first try with v1 which is smaller than v3 */
- lum_size = sizeof(struct lov_user_md_v1);
- if (copy_from_user(lumv1, lumv1p, lum_size))
- return -EFAULT;
-
- if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
- lum_size = sizeof(struct lov_user_md_v3);
- if (copy_from_user(&lumv3, lumv3p, lum_size))
- return -EFAULT;
- }
+ rc = ll_copy_user_md(lum, &klum);
+ if (rc < 0)
+ return rc;
- rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, lumv1,
+ lum_size = rc;
+ rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, klum,
lum_size);
cl_lov_delay_create_clear(&file->f_flags);
if (rc == 0) {
- struct lov_stripe_md *lsm;
__u32 gen;
- put_user(0, &lumv1p->lmm_stripe_count);
+ put_user(0, &lum->lmm_stripe_count);
ll_layout_refresh(inode, &gen);
- lsm = ccc_inode_lsm_get(inode);
- rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode),
- 0, lsm, (void __user *)arg);
- ccc_inode_lsm_put(inode, lsm);
+ rc = ll_file_getstripe(inode, (struct lov_user_md __user *)arg);
}
- return rc;
-}
-static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
-{
- struct lov_stripe_md *lsm;
- int rc = -ENODATA;
-
- lsm = ccc_inode_lsm_get(inode);
- if (lsm)
- rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0,
- lsm, (void __user *)arg);
- ccc_inode_lsm_put(inode, lsm);
+ kfree(klum);
return rc;
}
@@ -2247,6 +2117,12 @@ free_hss:
return rc;
}
+static inline long ll_lease_type_from_fmode(fmode_t fmode)
+{
+ return ((fmode & FMODE_READ) ? LL_LEASE_RDLCK : 0) |
+ ((fmode & FMODE_WRITE) ? LL_LEASE_WRLCK : 0);
+}
+
static long
ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
@@ -2314,11 +2190,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return rc;
}
case LL_IOC_LOV_GETSTRIPE:
- return ll_lov_getstripe(inode, arg);
- case LL_IOC_RECREATE_OBJ:
- return ll_lov_recreate_obj(inode, arg);
- case LL_IOC_RECREATE_FID:
- return ll_lov_recreate_fid(inode, arg);
+ return ll_file_getstripe(inode,
+ (struct lov_user_md __user *)arg);
case FSFILT_IOC_FIEMAP:
return ll_ioctl_fiemap(inode, arg);
case FSFILT_IOC_GETFLAGS:
@@ -2349,6 +2222,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
+ case LL_IOC_GETPARENT:
+ return ll_getparent(file, (struct getparent __user *)arg);
case OBD_IOC_FID2PATH:
return ll_fid2path(inode, (void __user *)arg);
case LL_IOC_DATA_VERSION: {
@@ -2451,20 +2326,20 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_client_handle *och = NULL;
bool lease_broken;
- fmode_t mode = 0;
+ fmode_t fmode;
switch (arg) {
- case F_WRLCK:
+ case LL_LEASE_WRLCK:
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
- mode = FMODE_WRITE;
+ fmode = FMODE_WRITE;
break;
- case F_RDLCK:
+ case LL_LEASE_RDLCK:
if (!(file->f_mode & FMODE_READ))
return -EPERM;
- mode = FMODE_READ;
+ fmode = FMODE_READ;
break;
- case F_UNLCK:
+ case LL_LEASE_UNLCK:
mutex_lock(&lli->lli_och_mutex);
if (fd->fd_lease_och) {
och = fd->fd_lease_och;
@@ -2472,26 +2347,26 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
mutex_unlock(&lli->lli_och_mutex);
- if (och) {
- mode = och->och_flags &
- (FMODE_READ|FMODE_WRITE);
- rc = ll_lease_close(och, inode, &lease_broken);
- if (rc == 0 && lease_broken)
- mode = 0;
- } else {
- rc = -ENOLCK;
- }
+ if (!och)
+ return -ENOLCK;
+
+ fmode = och->och_flags;
+ rc = ll_lease_close(och, inode, &lease_broken);
+ if (rc < 0)
+ return rc;
+
+ if (lease_broken)
+ fmode = 0;
- /* return the type of lease or error */
- return rc < 0 ? rc : (int)mode;
+ return ll_lease_type_from_fmode(fmode);
default:
return -EINVAL;
}
- CDEBUG(D_INODE, "Set lease with mode %d\n", mode);
+ CDEBUG(D_INODE, "Set lease with mode %u\n", fmode);
/* apply for lease */
- och = ll_lease_open(inode, file, mode, 0);
+ och = ll_lease_open(inode, file, fmode, 0);
if (IS_ERR(och))
return PTR_ERR(och);
@@ -2512,8 +2387,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case LL_IOC_GET_LEASE: {
struct ll_inode_info *lli = ll_i2info(inode);
struct ldlm_lock *lock = NULL;
+ fmode_t fmode = 0;
- rc = 0;
mutex_lock(&lli->lli_och_mutex);
if (fd->fd_lease_och) {
struct obd_client_handle *och = fd->fd_lease_och;
@@ -2522,14 +2397,13 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (lock) {
lock_res_and_lock(lock);
if (!ldlm_is_cancel(lock))
- rc = och->och_flags &
- (FMODE_READ | FMODE_WRITE);
+ fmode = och->och_flags;
unlock_res_and_lock(lock);
LDLM_LOCK_PUT(lock);
}
}
mutex_unlock(&lli->lli_och_mutex);
- return rc;
+ return ll_lease_type_from_fmode(fmode);
}
case LL_IOC_HSM_IMPORT: {
struct hsm_user_import *hui;
@@ -2574,9 +2448,8 @@ static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
eof = i_size_read(inode);
}
- retval = generic_file_llseek_size(file, offset, origin,
- ll_file_maxbytes(inode), eof);
- return retval;
+ return generic_file_llseek_size(file, offset, origin,
+ ll_file_maxbytes(inode), eof);
}
static int ll_flush(struct file *file, fl_owner_t id)
@@ -2593,9 +2466,11 @@ static int ll_flush(struct file *file, fl_owner_t id)
*/
rc = lli->lli_async_rc;
lli->lli_async_rc = 0;
- err = lov_read_and_clear_async_rc(lli->lli_clob);
- if (rc == 0)
- rc = err;
+ if (lli->lli_clob) {
+ err = lov_read_and_clear_async_rc(lli->lli_clob);
+ if (!rc)
+ rc = err;
+ }
/* The application has been told about write failure already.
* Do not report failure again.
@@ -2714,6 +2589,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
struct md_op_data *op_data;
struct lustre_handle lockh = {0};
ldlm_policy_data_t flock = { {0} };
+ int fl_type = file_lock->fl_type;
__u64 flags = 0;
int rc;
int rc2 = 0;
@@ -2744,7 +2620,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
- switch (file_lock->fl_type) {
+ switch (fl_type) {
case F_RDLCK:
einfo.ei_mode = LCK_PR;
break;
@@ -2764,8 +2640,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
einfo.ei_mode = LCK_PW;
break;
default:
- CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n",
- file_lock->fl_type);
+ CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n", fl_type);
return -ENOTSUPP;
}
@@ -2787,16 +2662,18 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
case F_GETLK64:
#endif
flags = LDLM_FL_TEST_LOCK;
- /* Save the old mode so that if the mode in the lock changes we
- * can decrement the appropriate reader or writer refcount.
- */
- file_lock->fl_type = einfo.ei_mode;
break;
default:
CERROR("unknown fcntl lock command: %d\n", cmd);
return -EINVAL;
}
+ /*
+ * Save the old mode so that if the mode in the lock changes we
+ * can decrement the appropriate reader or writer refcount.
+ */
+ file_lock->fl_type = einfo.ei_mode;
+
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
@@ -2806,8 +2683,12 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
PFID(ll_inode2fid(inode)), flock.l_flock.pid, flags,
einfo.ei_mode, flock.l_flock.start, flock.l_flock.end);
- rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL,
- op_data, &lockh, &flock, 0, NULL /* req */, flags);
+ rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, NULL, op_data, &lockh,
+ flags);
+
+ /* Restore the file lock type if not TEST lock. */
+ if (!(flags & LDLM_FL_TEST_LOCK))
+ file_lock->fl_type = fl_type;
if ((rc == 0 || file_lock->fl_type == F_UNLCK) &&
!(flags & LDLM_FL_TEST_LOCK))
@@ -2815,8 +2696,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
if (rc2 && file_lock->fl_type != F_UNLCK) {
einfo.ei_mode = LCK_NL;
- md_enqueue(sbi->ll_md_exp, &einfo, NULL,
- op_data, &lockh, &flock, 0, NULL /* req */, flags);
+ md_enqueue(sbi->ll_md_exp, &einfo, &flock, NULL, op_data,
+ &lockh, flags);
rc = rc2;
}
@@ -2825,6 +2706,117 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
return rc;
}
+int ll_get_fid_by_name(struct inode *parent, const char *name,
+ int namelen, struct lu_fid *fid)
+{
+ struct md_op_data *op_data = NULL;
+ struct ptlrpc_request *req;
+ struct mdt_body *body;
+ int rc;
+
+ op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ op_data->op_valid = OBD_MD_FLID;
+ rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
+ if (rc < 0)
+ return rc;
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (!body) {
+ rc = -EFAULT;
+ goto out_req;
+ }
+ if (fid)
+ *fid = body->mbo_fid1;
+out_req:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
+ const char *name, int namelen)
+{
+ struct ptlrpc_request *request = NULL;
+ struct inode *child_inode = NULL;
+ struct dentry *dchild = NULL;
+ struct md_op_data *op_data;
+ struct qstr qstr;
+ int rc;
+
+ CDEBUG(D_VFSTRACE, "migrate %s under "DFID" to MDT%d\n",
+ name, PFID(ll_inode2fid(parent)), mdtidx);
+
+ op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
+ 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ /* Get child FID first */
+ qstr.hash = full_name_hash(parent, name, namelen);
+ qstr.name = name;
+ qstr.len = namelen;
+ dchild = d_lookup(file_dentry(file), &qstr);
+ if (dchild) {
+ op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
+ if (dchild->d_inode) {
+ child_inode = igrab(dchild->d_inode);
+ if (child_inode) {
+ inode_lock(child_inode);
+ op_data->op_fid3 = *ll_inode2fid(child_inode);
+ ll_invalidate_aliases(child_inode);
+ }
+ }
+ dput(dchild);
+ } else {
+ rc = ll_get_fid_by_name(parent, name, namelen,
+ &op_data->op_fid3);
+ if (rc)
+ goto out_free;
+ }
+
+ if (!fid_is_sane(&op_data->op_fid3)) {
+ CERROR("%s: migrate %s, but fid "DFID" is insane\n",
+ ll_get_fsname(parent->i_sb, NULL, 0), name,
+ PFID(&op_data->op_fid3));
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ rc = ll_get_mdt_idx_by_fid(ll_i2sbi(parent), &op_data->op_fid3);
+ if (rc < 0)
+ goto out_free;
+
+ if (rc == mdtidx) {
+ CDEBUG(D_INFO, "%s:"DFID" is already on MDT%d.\n", name,
+ PFID(&op_data->op_fid3), mdtidx);
+ rc = 0;
+ goto out_free;
+ }
+
+ op_data->op_mds = mdtidx;
+ op_data->op_cli_flags = CLI_MIGRATE;
+ rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data, name,
+ namelen, name, namelen, &request);
+ if (!rc)
+ ll_update_times(request, parent);
+
+ ptlrpc_req_finished(request);
+
+out_free:
+ if (child_inode) {
+ clear_nlink(child_inode);
+ inode_unlock(child_inode);
+ iput(child_inode);
+ }
+
+ ll_finish_md_op_data(op_data);
+ return rc;
+}
+
static int
ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
{
@@ -2847,7 +2839,7 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits,
struct lustre_handle lockh;
ldlm_policy_data_t policy;
enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
- (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
+ (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
struct lu_fid *fid;
__u64 flags;
int i;
@@ -2888,15 +2880,12 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
{
ldlm_policy_data_t policy = { .l_inodebits = {bits} };
struct lu_fid *fid;
- enum ldlm_mode rc;
fid = &ll_i2info(inode)->lli_fid;
CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
- rc = md_lock_match(ll_i2mdexp(inode), flags | LDLM_FL_BLOCK_GRANTED,
- fid, LDLM_IBITS, &policy, mode, lockh);
-
- return rc;
+ return md_lock_match(ll_i2mdexp(inode), flags | LDLM_FL_BLOCK_GRANTED,
+ fid, LDLM_IBITS, &policy, mode, lockh);
}
static int ll_inode_revalidate_fini(struct inode *inode, int rc)
@@ -2949,15 +2938,9 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- oit.it_create_mode |= M_CHECK_STALE;
- rc = md_intent_lock(exp, op_data, NULL, 0,
- /* we are not interested in name
- * based lookup
- */
- &oit, 0, &req,
- ll_md_blocking_ast, 0);
+ rc = md_intent_lock(exp, op_data, &oit, &req,
+ &ll_md_blocking_ast, 0);
ll_finish_md_op_data(op_data);
- oit.it_create_mode &= ~M_CHECK_STALE;
if (rc < 0) {
rc = ll_inode_revalidate_fini(inode, rc);
goto out;
@@ -3003,10 +2986,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
op_data->op_valid = valid;
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
- if (rc) {
- rc = ll_inode_revalidate_fini(inode, rc);
- return rc;
- }
+ if (rc)
+ return ll_inode_revalidate_fini(inode, rc);
rc = ll_prep_inode(&inode, req, NULL, NULL);
}
@@ -3015,6 +2996,28 @@ out:
return rc;
}
+static int ll_merge_md_attr(struct inode *inode)
+{
+ struct cl_attr attr = { 0 };
+ int rc;
+
+ LASSERT(ll_i2info(inode)->lli_lsm_md);
+ rc = md_merge_attr(ll_i2mdexp(inode), ll_i2info(inode)->lli_lsm_md,
+ &attr, ll_md_blocking_ast);
+ if (rc)
+ return rc;
+
+ set_nlink(inode, attr.cat_nlink);
+ inode->i_blocks = attr.cat_blocks;
+ i_size_write(inode, attr.cat_size);
+
+ ll_i2info(inode)->lli_atime = attr.cat_atime;
+ ll_i2info(inode)->lli_mtime = attr.cat_mtime;
+ ll_i2info(inode)->lli_ctime = attr.cat_ctime;
+
+ return 0;
+}
+
static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
{
struct inode *inode = d_inode(dentry);
@@ -3026,6 +3029,13 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
/* if object isn't regular file, don't validate size */
if (!S_ISREG(inode->i_mode)) {
+ if (S_ISDIR(inode->i_mode) &&
+ ll_i2info(inode)->lli_lsm_md) {
+ rc = ll_merge_md_attr(inode);
+ if (rc)
+ return rc;
+ }
+
LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_atime;
LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime;
LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime;
@@ -3057,13 +3067,14 @@ int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
if (res)
return res;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
+
stat->dev = inode->i_sb->s_dev;
if (ll_need_32bit_api(sbi))
stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
else
stat->ino = inode->i_ino;
stat->mode = inode->i_mode;
- stat->nlink = inode->i_nlink;
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
stat->rdev = inode->i_rdev;
@@ -3072,6 +3083,7 @@ int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
stat->ctime = inode->i_ctime;
stat->blksize = 1 << inode->i_blkbits;
+ stat->nlink = inode->i_nlink;
stat->size = i_size_read(inode);
stat->blocks = inode->i_blocks;
@@ -3139,6 +3151,12 @@ struct posix_acl *ll_get_acl(struct inode *inode, int type)
int ll_inode_permission(struct inode *inode, int mask)
{
+ struct ll_sb_info *sbi;
+ struct root_squash_info *squash;
+ const struct cred *old_cred = NULL;
+ struct cred *cred = NULL;
+ bool squash_id = false;
+ cfs_cap_t cap;
int rc = 0;
if (mask & MAY_NOT_BLOCK)
@@ -3158,9 +3176,46 @@ int ll_inode_permission(struct inode *inode, int mask)
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
+ /* squash fsuid/fsgid if needed */
+ sbi = ll_i2sbi(inode);
+ squash = &sbi->ll_squash;
+ if (unlikely(squash->rsi_uid &&
+ uid_eq(current_fsuid(), GLOBAL_ROOT_UID) &&
+ !(sbi->ll_flags & LL_SBI_NOROOTSQUASH))) {
+ squash_id = true;
+ }
+
+ if (squash_id) {
+ CDEBUG(D_OTHER, "squash creds (%d:%d)=>(%d:%d)\n",
+ __kuid_val(current_fsuid()), __kgid_val(current_fsgid()),
+ squash->rsi_uid, squash->rsi_gid);
+
+ /*
+ * update current process's credentials
+ * and FS capability
+ */
+ cred = prepare_creds();
+ if (!cred)
+ return -ENOMEM;
+
+ cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid);
+ cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid);
+ for (cap = 0; cap < sizeof(cfs_cap_t) * 8; cap++) {
+ if ((1 << cap) & CFS_CAP_FS_MASK)
+ cap_lower(cred->cap_effective, cap);
+ }
+ old_cred = override_creds(cred);
+ }
+
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
rc = generic_permission(inode, mask);
+ /* restore current process's credentials and FS capability */
+ if (squash_id) {
+ revert_creds(old_cred);
+ put_cred(cred);
+ }
+
return rc;
}
@@ -3173,7 +3228,7 @@ struct file_operations ll_file_operations = {
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+ .splice_read = generic_file_splice_read,
.fsync = ll_fsync,
.flush = ll_flush
};
@@ -3186,7 +3241,7 @@ struct file_operations ll_file_operations_flock = {
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+ .splice_read = generic_file_splice_read,
.fsync = ll_fsync,
.flush = ll_flush,
.flock = ll_file_flock,
@@ -3202,7 +3257,7 @@ struct file_operations ll_file_operations_noflock = {
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+ .splice_read = generic_file_splice_read,
.fsync = ll_fsync,
.flush = ll_flush,
.flock = ll_file_noflock,
@@ -3213,10 +3268,10 @@ const struct inode_operations ll_file_inode_operations = {
.setattr = ll_setattr,
.getattr = ll_getattr,
.permission = ll_inode_permission,
- .setxattr = ll_setxattr,
- .getxattr = ll_getxattr,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ll_listxattr,
- .removexattr = ll_removexattr,
+ .removexattr = generic_removexattr,
.fiemap = ll_fiemap,
.get_acl = ll_get_acl,
};
@@ -3251,7 +3306,6 @@ void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd)
if (!in_data)
return NULL;
- memset(in_data, 0, sizeof(*in_data));
in_data->iocd_size = size;
in_data->iocd_cb = cb;
in_data->iocd_count = count;
@@ -3389,7 +3443,7 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
goto out;
}
- lmmsize = body->eadatasize;
+ lmmsize = body->mbo_eadatasize;
if (lmmsize == 0) /* empty layout */ {
rc = 0;
goto out;
@@ -3447,7 +3501,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
PFID(&lli->lli_fid), inode, reconf);
/* in case this is a caching lock and reinstate with new inode */
- md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
+ md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
lock_res_and_lock(lock);
lvb_ready = ldlm_is_lvb_ready(lock);
@@ -3557,8 +3611,8 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
struct ldlm_enqueue_info einfo = {
.ei_type = LDLM_IBITS,
.ei_mode = LCK_CR,
- .ei_cb_bl = ll_md_blocking_ast,
- .ei_cb_cp = ldlm_completion_ast,
+ .ei_cb_bl = &ll_md_blocking_ast,
+ .ei_cb_cp = &ldlm_completion_ast,
};
int rc;
@@ -3604,8 +3658,7 @@ again:
ll_get_fsname(inode->i_sb, NULL, 0),
PFID(&lli->lli_fid), inode);
- rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
- NULL, 0, NULL, 0);
+ rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL, &it, op_data, &lockh, 0);
ptlrpc_req_finished(it.it_request);
it.it_request = NULL;
diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index 92004a05f9ee..22507b9c6d69 100644
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -42,7 +42,6 @@
#include "../include/obd.h"
#include "../include/lustre_dlm.h"
-#include "../include/lustre_lite.h"
#include "../include/lustre_mdc.h"
#include <linux/pagemap.h>
#include <linux/file.h>
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index 396e4e4f0715..084330d08f7a 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -49,7 +49,6 @@
#include "../include/obd.h"
#include "../include/obd_support.h"
#include "../include/lustre_fid.h"
-#include "../include/lustre_lite.h"
#include "../include/lustre_dlm.h"
#include "../include/lustre_ver.h"
#include "../include/lustre_mdc.h"
@@ -100,6 +99,7 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
io->u.ci_setattr.sa_valid = attr->ia_valid;
+ io->u.ci_setattr.sa_parent_fid = ll_inode2fid(inode);
again:
if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
@@ -154,7 +154,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
int result = 0;
int refcheck;
- LASSERT(md->body->valid & OBD_MD_FLID);
+ LASSERT(md->body->mbo_valid & OBD_MD_FLID);
LASSERT(S_ISREG(inode->i_mode));
env = cl_env_get(&refcheck);
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index f6be105eeef7..fb346c12dad2 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -38,7 +38,6 @@
#include "../include/obd.h"
#include "../include/cl_object.h"
-#include "../include/lustre_lite.h"
#include "llite_internal.h"
/* Initialize the default and maximum LOV EA and cookie sizes. This allows
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index 2326b40a0870..8644631bf2ba 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -38,7 +38,6 @@
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/lustre_lite.h"
#include "llite_internal.h"
/** records that a write is in flight */
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 4d6d589a1677..4bc551279aa4 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -36,14 +36,21 @@
#include "../include/lustre_ver.h"
#include "../include/lustre_disk.h" /* for s2sbi */
#include "../include/lustre_eacl.h"
+#include "../include/lustre_linkea.h"
/* for struct cl_lock_descr and struct cl_io */
+#include "../include/lustre_patchless_compat.h"
+#include "../include/lustre_compat.h"
#include "../include/cl_object.h"
+#include "../include/lustre_lmv.h"
#include "../include/lustre_mdc.h"
#include "../include/lustre_intent.h"
#include <linux/compat.h>
+#include <linux/namei.h>
+#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
#include "vvp_internal.h"
+#include "range_lock.h"
#ifndef FMODE_EXEC
#define FMODE_EXEC 0
@@ -57,6 +64,9 @@
#define LL_DIR_END_OFF 0x7fffffffffffffffULL
#define LL_DIR_END_OFF_32BIT 0x7fffffffUL
+/* 4UL * 1024 * 1024 */
+#define LL_MAX_BLKSIZE_BITS 22
+
#define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
#define LUSTRE_FPRIVATE(file) ((file)->private_data)
@@ -116,9 +126,7 @@ struct ll_inode_info {
/* identifying fields for both metadata and data stacks. */
struct lu_fid lli_fid;
- /* Parent fid for accessing default stripe data on parent directory
- * for allocating OST objects after a mknod() and later open-by-FID.
- */
+ /* master inode fid for stripe directory */
struct lu_fid lli_pfid;
struct list_head lli_close_list;
@@ -156,7 +164,7 @@ struct ll_inode_info {
/* for directory */
struct {
/* serialize normal readdir and statahead-readdir. */
- struct mutex d_readdir_mutex;
+ struct mutex lli_readdir_mutex;
/* metadata statahead */
/* since parent-child threads can share the same @file
@@ -164,27 +172,39 @@ struct ll_inode_info {
* case of parent exit before child -- it is me should
* cleanup the dir readahead.
*/
- void *d_opendir_key;
- struct ll_statahead_info *d_sai;
+ void *lli_opendir_key;
+ struct ll_statahead_info *lli_sai;
/* protect statahead stuff. */
- spinlock_t d_sa_lock;
+ spinlock_t lli_sa_lock;
/* "opendir_pid" is the token when lookup/revalidate
* -- I am the owner of dir statahead.
*/
- pid_t d_opendir_pid;
- } d;
-
-#define lli_readdir_mutex u.d.d_readdir_mutex
-#define lli_opendir_key u.d.d_opendir_key
-#define lli_sai u.d.d_sai
-#define lli_sa_lock u.d.d_sa_lock
-#define lli_opendir_pid u.d.d_opendir_pid
+ pid_t lli_opendir_pid;
+ /* stat will try to access statahead entries or start
+ * statahead if this flag is set, and this flag will be
+ * set upon dir open, and cleared when dir is closed,
+ * statahead hit ratio is too low, or start statahead
+ * thread failed.
+ */
+ unsigned int lli_sa_enabled:1;
+ /* generation for statahead */
+ unsigned int lli_sa_generation;
+ /* directory stripe information */
+ struct lmv_stripe_md *lli_lsm_md;
+ /* default directory stripe offset. This is extracted
+ * from the "dmv" xattr in order to decide which MDT to
+ * create a subdirectory on. The MDS itself fetches
+ * "dmv" and gets the rest of the default layout itself
+ * (count, hash, etc).
+ */
+ __u32 lli_def_stripe_offset;
+ };
/* for non-directory */
struct {
- struct mutex f_size_mutex;
- char *f_symlink_name;
- __u64 f_maxbytes;
+ struct mutex lli_size_mutex;
+ char *lli_symlink_name;
+ __u64 lli_maxbytes;
/*
* struct rw_semaphore {
* signed long count; // align d.d_def_acl
@@ -192,16 +212,16 @@ struct ll_inode_info {
* struct list_head wait_list;
* }
*/
- struct rw_semaphore f_trunc_sem;
- struct mutex f_write_mutex;
+ struct rw_semaphore lli_trunc_sem;
+ struct range_lock_tree lli_write_tree;
- struct rw_semaphore f_glimpse_sem;
- unsigned long f_glimpse_time;
- struct list_head f_agl_list;
- __u64 f_agl_index;
+ struct rw_semaphore lli_glimpse_sem;
+ unsigned long lli_glimpse_time;
+ struct list_head lli_agl_list;
+ __u64 lli_agl_index;
/* for writepage() only to communicate to fsync */
- int f_async_rc;
+ int lli_async_rc;
/*
* whenever a process try to read/write the file, the
@@ -211,22 +231,9 @@ struct ll_inode_info {
* so the read/write statistics for jobid will not be
* accurate if the file is shared by different jobs.
*/
- char f_jobid[JOBSTATS_JOBID_SIZE];
- } f;
-
-#define lli_size_mutex u.f.f_size_mutex
-#define lli_symlink_name u.f.f_symlink_name
-#define lli_maxbytes u.f.f_maxbytes
-#define lli_trunc_sem u.f.f_trunc_sem
-#define lli_write_mutex u.f.f_write_mutex
-#define lli_glimpse_sem u.f.f_glimpse_sem
-#define lli_glimpse_time u.f.f_glimpse_time
-#define lli_agl_list u.f.f_agl_list
-#define lli_agl_index u.f.f_agl_index
-#define lli_async_rc u.f.f_async_rc
-#define lli_jobid u.f.f_jobid
-
- } u;
+ char lli_jobid[LUSTRE_JOBID_SIZE];
+ };
+ };
/* XXX: For following frequent used members, although they maybe special
* used for non-directory object, it is some time-wasting to check
@@ -401,12 +408,13 @@ enum stats_track_type {
#define LL_SBI_LAYOUT_LOCK 0x20000 /* layout lock support */
#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
+#define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */
#define LL_SBI_FLAGS { \
"nolck", \
"checksum", \
"flock", \
- "xattr", \
+ "user_xattr", \
"acl", \
"???", \
"???", \
@@ -422,9 +430,27 @@ enum stats_track_type {
"verbose", \
"layout", \
"user_fid2path",\
- "xattr", \
+ "xattr_cache", \
+ "norootsquash", \
}
+/*
+ * This is embedded into llite super-blocks to keep track of connect
+ * flags (capabilities) supported by all imports given mount is
+ * connected to.
+ */
+struct lustre_client_ocd {
+ /*
+ * This is conjunction of connect_flags across all imports
+ * (LOVs) this mount is connected to. This field is updated by
+ * cl_ocd_update() under ->lco_lock.
+ */
+ __u64 lco_flags;
+ struct mutex lco_lock;
+ struct obd_export *lco_md_exp;
+ struct obd_export *lco_dt_exp;
+};
+
struct ll_sb_info {
/* this protects pglist and ra_info. It isn't safe to
* grab from interrupt contexts
@@ -461,7 +487,7 @@ struct ll_sb_info {
unsigned int ll_namelen;
struct file_operations *ll_fop;
- unsigned int ll_md_brw_size; /* used by readdir */
+ unsigned int ll_md_brw_pages; /* readdir pages per RPC */
struct lu_site *ll_site;
struct cl_device *ll_cl;
@@ -484,11 +510,17 @@ struct ll_sb_info {
atomic_t ll_sa_wrong; /* statahead thread stopped for
* low hit ratio
*/
+ atomic_t ll_sa_running; /* running statahead thread
+ * count
+ */
atomic_t ll_agl_total; /* AGL thread started count */
dev_t ll_sdev_orig; /* save s_dev before assign for
* clustered nfs
*/
+ /* root squash */
+ struct root_squash_info ll_squash;
+
__kernel_fsid_t ll_fsid;
struct kobject ll_kobj; /* sysfs object */
struct super_block *ll_sb; /* struct super_block (for sysfs code)*/
@@ -643,25 +675,66 @@ void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
struct ll_file_data *file, loff_t pos,
size_t count, int rw);
+enum {
+ LPROC_LL_DIRTY_HITS,
+ LPROC_LL_DIRTY_MISSES,
+ LPROC_LL_READ_BYTES,
+ LPROC_LL_WRITE_BYTES,
+ LPROC_LL_BRW_READ,
+ LPROC_LL_BRW_WRITE,
+ LPROC_LL_OSC_READ,
+ LPROC_LL_OSC_WRITE,
+ LPROC_LL_IOCTL,
+ LPROC_LL_OPEN,
+ LPROC_LL_RELEASE,
+ LPROC_LL_MAP,
+ LPROC_LL_LLSEEK,
+ LPROC_LL_FSYNC,
+ LPROC_LL_READDIR,
+ LPROC_LL_SETATTR,
+ LPROC_LL_TRUNC,
+ LPROC_LL_FLOCK,
+ LPROC_LL_GETATTR,
+ LPROC_LL_CREATE,
+ LPROC_LL_LINK,
+ LPROC_LL_UNLINK,
+ LPROC_LL_SYMLINK,
+ LPROC_LL_MKDIR,
+ LPROC_LL_RMDIR,
+ LPROC_LL_MKNOD,
+ LPROC_LL_RENAME,
+ LPROC_LL_STAFS,
+ LPROC_LL_ALLOC_INODE,
+ LPROC_LL_SETXATTR,
+ LPROC_LL_GETXATTR,
+ LPROC_LL_GETXATTR_HITS,
+ LPROC_LL_LISTXATTR,
+ LPROC_LL_REMOVEXATTR,
+ LPROC_LL_INODE_PERM,
+ LPROC_LL_FILE_OPCODES
+};
+
/* llite/dir.c */
-void ll_release_page(struct page *page, int remove);
extern const struct file_operations ll_dir_operations;
extern const struct inode_operations ll_dir_inode_operations;
-struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
- struct ll_dir_chain *chain);
-int ll_dir_read(struct inode *inode, struct dir_context *ctx);
-
+int ll_dir_read(struct inode *inode, __u64 *ppos, struct md_op_data *op_data,
+ struct dir_context *ctx);
int ll_get_mdt_idx(struct inode *inode);
+int ll_get_mdt_idx_by_fid(struct ll_sb_info *sbi, const struct lu_fid *fid);
+struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data,
+ __u64 offset);
+void ll_release_page(struct inode *inode, struct page *page, bool remove);
+
/* llite/namei.c */
extern const struct inode_operations ll_special_inode_operations;
-int ll_objects_destroy(struct ptlrpc_request *request,
- struct inode *dir);
struct inode *ll_iget(struct super_block *sb, ino_t hash,
struct lustre_md *lic);
+int ll_test_inode_by_fid(struct inode *inode, void *opaque);
int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
void *data, int flag);
struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
+void ll_update_times(struct ptlrpc_request *request, struct inode *inode);
/* llite/rw.c */
int ll_writepage(struct page *page, struct writeback_control *wbc);
@@ -704,7 +777,10 @@ void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
struct lustre_handle *fh);
int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
struct posix_acl *ll_get_acl(struct inode *inode, int type);
-
+int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
+ const char *name, int namelen);
+int ll_get_fid_by_name(struct inode *parent, const char *name,
+ int namelen, struct lu_fid *fid);
int ll_inode_permission(struct inode *inode, int mask);
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
@@ -715,8 +791,8 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
struct ptlrpc_request **request);
int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
int set_default);
-int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
- int *lmm_size, struct ptlrpc_request **request);
+int ll_dir_getstripe(struct inode *inode, void **lmmp, int *lmm_size,
+ struct ptlrpc_request **request, u64 valid);
int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
int ll_merge_attr(const struct lu_env *env, struct inode *inode);
int ll_fid2path(struct inode *inode, void __user *arg);
@@ -748,8 +824,8 @@ int ll_setattr(struct dentry *de, struct iattr *attr);
int ll_statfs(struct dentry *de, struct kstatfs *sfs);
int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
__u64 max_age, __u32 flags);
-void ll_update_inode(struct inode *inode, struct lustre_md *md);
-void ll_read_inode2(struct inode *inode, void *opaque);
+int ll_update_inode(struct inode *inode, struct lustre_md *md);
+int ll_read_inode2(struct inode *inode, void *opaque);
void ll_delete_inode(struct inode *inode);
int ll_iocontrol(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
@@ -763,15 +839,46 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
int ll_obd_statfs(struct inode *inode, void __user *arg);
int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize);
+int ll_set_default_mdsize(struct ll_sb_info *sbi, int default_mdsize);
int ll_process_config(struct lustre_cfg *lcfg);
+
+enum {
+ LUSTRE_OPC_MKDIR = 0,
+ LUSTRE_OPC_SYMLINK = 1,
+ LUSTRE_OPC_MKNOD = 2,
+ LUSTRE_OPC_CREATE = 3,
+ LUSTRE_OPC_ANY = 5,
+};
+
struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
struct inode *i1, struct inode *i2,
- const char *name, int namelen,
- int mode, __u32 opc, void *data);
+ const char *name, size_t namelen,
+ u32 mode, __u32 opc, void *data);
void ll_finish_md_op_data(struct md_op_data *op_data);
int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg);
char *ll_get_fsname(struct super_block *sb, char *buf, int buflen);
+void ll_compute_rootsquash_state(struct ll_sb_info *sbi);
void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req);
+ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
+ struct lov_user_md **kbuf);
+
+/* Compute expected user md size when passing in a md from user space */
+static inline ssize_t ll_lov_user_md_size(const struct lov_user_md *lum)
+{
+ switch (lum->lmm_magic) {
+ case LOV_USER_MAGIC_V1:
+ return sizeof(struct lov_user_md_v1);
+ case LOV_USER_MAGIC_V3:
+ return sizeof(struct lov_user_md_v3);
+ case LOV_USER_MAGIC_SPECIFIC:
+ if (lum->lmm_stripe_count > LOV_MAX_STRIPE_COUNT)
+ return -EINVAL;
+
+ return lov_user_md_size(lum->lmm_stripe_count,
+ LOV_USER_MAGIC_SPECIFIC);
+ }
+ return -EINVAL;
+}
/* llite/llite_nfs.c */
extern const struct export_operations lustre_export_operations;
@@ -779,6 +886,7 @@ __u32 get_uuid2int(const char *name, int len);
void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid);
struct inode *search_inode_for_lustre(struct super_block *sb,
const struct lu_fid *fid);
+int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid);
/* llite/symlink.c */
extern const struct inode_operations ll_fast_symlink_inode_operations;
@@ -800,17 +908,11 @@ void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
*/
struct vvp_io_args {
/** normal/splice */
- enum vvp_io_subtype via_io_subtype;
-
union {
struct {
struct kiocb *via_iocb;
struct iov_iter *via_iter;
} normal;
- struct {
- struct pipe_inode_info *via_pipe;
- unsigned int via_flags;
- } splice;
} u;
};
@@ -838,14 +940,9 @@ static inline struct ll_thread_info *ll_env_info(const struct lu_env *env)
return lti;
}
-static inline struct vvp_io_args *ll_env_args(const struct lu_env *env,
- enum vvp_io_subtype type)
+static inline struct vvp_io_args *ll_env_args(const struct lu_env *env)
{
- struct vvp_io_args *via = &ll_env_info(env)->lti_args;
-
- via->via_io_subtype = type;
-
- return via;
+ return &ll_env_info(env)->lti_args;
}
void ll_queue_done_writing(struct inode *inode, unsigned long flags);
@@ -933,12 +1030,19 @@ static inline __u64 ll_file_maxbytes(struct inode *inode)
}
/* llite/xattr.c */
-int ll_setxattr(struct dentry *dentry, struct inode *inode,
- const char *name, const void *value, size_t size, int flags);
-ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
- const char *name, void *buffer, size_t size);
+extern const struct xattr_handler *ll_xattr_handlers[];
+
+#define XATTR_USER_T 1
+#define XATTR_TRUSTED_T 2
+#define XATTR_SECURITY_T 3
+#define XATTR_ACL_ACCESS_T 4
+#define XATTR_ACL_DEFAULT_T 5
+#define XATTR_LUSTRE_T 6
+#define XATTR_OTHER_T 7
+
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
-int ll_removexattr(struct dentry *dentry, const char *name);
+int ll_xattr_list(struct inode *inode, const char *name, int type,
+ void *buffer, size_t size, __u64 valid);
/**
* Common IO arguments for various VFS I/O interfaces.
@@ -964,11 +1068,10 @@ void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
/* per inode struct, for dir only */
struct ll_statahead_info {
- struct inode *sai_inode;
+ struct dentry *sai_dentry;
atomic_t sai_refcount; /* when access this struct, hold
* refcount
*/
- unsigned int sai_generation; /* generation for statahead */
unsigned int sai_max; /* max ahead of lookup */
__u64 sai_sent; /* stat requests sent count */
__u64 sai_replied; /* stat requests which received
@@ -995,22 +1098,25 @@ struct ll_statahead_info {
unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for
* hidden entries
*/
- sai_agl_valid:1;/* AGL is valid for the dir */
+ sai_agl_valid:1,/* AGL is valid for the dir */
+ sai_in_readpage:1;/* statahead is in readdir() */
wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
struct ptlrpc_thread sai_thread; /* stat-ahead thread */
struct ptlrpc_thread sai_agl_thread; /* AGL thread */
- struct list_head sai_entries; /* entry list */
- struct list_head sai_entries_received; /* entries returned */
- struct list_head sai_entries_stated; /* entries stated */
- struct list_head sai_entries_agl; /* AGL entries to be sent */
+ struct list_head sai_interim_entries; /* entries which got async
+ * stat reply, but not
+ * instantiated
+ */
+ struct list_head sai_entries; /* completed entries */
+ struct list_head sai_agls; /* AGLs to be sent */
struct list_head sai_cache[LL_SA_CACHE_SIZE];
spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE];
atomic_t sai_cache_count; /* entry count in cache */
};
-int do_statahead_enter(struct inode *dir, struct dentry **dentry,
- int only_unplug);
-void ll_stop_statahead(struct inode *dir, void *key);
+int ll_statahead(struct inode *dir, struct dentry **dentry, bool unplug);
+void ll_authorize_statahead(struct inode *dir, void *key);
+void ll_deauthorize_statahead(struct inode *dir, void *key);
blkcnt_t dirty_cnt(struct inode *inode);
@@ -1040,73 +1146,53 @@ static inline int ll_glimpse_size(struct inode *inode)
return rc;
}
-static inline void
-ll_statahead_mark(struct inode *dir, struct dentry *dentry)
-{
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = lli->lli_sai;
- struct ll_dentry_data *ldd = ll_d2d(dentry);
-
- /* not the same process, don't mark */
- if (lli->lli_opendir_pid != current_pid())
- return;
-
- LASSERT(ldd);
- if (sai)
- ldd->lld_sa_generation = sai->sai_generation;
-}
-
-static inline int
-d_need_statahead(struct inode *dir, struct dentry *dentryp)
+/*
+ * dentry may statahead when statahead is enabled and current process has opened
+ * parent directory, and this dentry hasn't accessed statahead cache before
+ */
+static inline bool
+dentry_may_statahead(struct inode *dir, struct dentry *dentry)
{
struct ll_inode_info *lli;
struct ll_dentry_data *ldd;
if (ll_i2sbi(dir)->ll_sa_max == 0)
- return -EAGAIN;
+ return false;
lli = ll_i2info(dir);
+
+ /*
+ * statahead is not allowed for this dir, there may be three causes:
+ * 1. dir is not opened.
+ * 2. statahead hit ratio is too low.
+ * 3. previous stat started statahead thread failed.
+ */
+ if (!lli->lli_sa_enabled)
+ return false;
+
/* not the same process, don't statahead */
if (lli->lli_opendir_pid != current_pid())
- return -EAGAIN;
-
- /* statahead has been stopped */
- if (!lli->lli_opendir_key)
- return -EAGAIN;
+ return false;
- ldd = ll_d2d(dentryp);
/*
- * When stats a dentry, the system trigger more than once "revalidate"
- * or "lookup", for "getattr", for "getxattr", and maybe for others.
- * Under patchless client mode, the operation intent is not accurate,
- * which maybe misguide the statahead thread. For example:
- * The "revalidate" call for "getattr" and "getxattr" of a dentry maybe
- * have the same operation intent -- "IT_GETATTR".
- * In fact, one dentry should has only one chance to interact with the
- * statahead thread, otherwise the statahead windows will be confused.
+ * When stating a dentry, kernel may trigger 'revalidate' or 'lookup'
+ * multiple times, eg. for 'getattr', 'getxattr' and etc.
+ * For patchless client, lookup intent is not accurate, which may
+ * misguide statahead. For example:
+ * The 'revalidate' call for 'getattr' and 'getxattr' of a dentry will
+ * have the same intent -- IT_GETATTR, while one dentry should access
+ * statahead cache once, otherwise statahead windows is messed up.
* The solution is as following:
- * Assign "lld_sa_generation" with "sai_generation" when a dentry
- * "IT_GETATTR" for the first time, and the subsequent "IT_GETATTR"
- * will bypass interacting with statahead thread for checking:
- * "lld_sa_generation == lli_sai->sai_generation"
+ * Assign 'lld_sa_generation' with 'lli_sa_generation' when a dentry
+ * IT_GETATTR for the first time, and subsequent IT_GETATTR will
+ * bypass interacting with statahead cache by checking
+ * 'lld_sa_generation == lli->lli_sa_generation'.
*/
- if (ldd && lli->lli_sai &&
- ldd->lld_sa_generation == lli->lli_sai->sai_generation)
- return -EAGAIN;
+ ldd = ll_d2d(dentry);
+ if (ldd && ldd->lld_sa_generation == lli->lli_sa_generation)
+ return false;
- return 1;
-}
-
-static inline int
-ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int only_unplug)
-{
- int ret;
-
- ret = d_need_statahead(dir, *dentryp);
- if (ret <= 0)
- return ret;
-
- return do_statahead_enter(dir, dentryp, only_unplug);
+ return true;
}
/* llite ioctl register support routine */
@@ -1213,7 +1299,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for remote lock %#llx\n",
PFID(ll_inode2fid(inode)), inode,
handle.cookie);
- md_set_lock_data(exp, &handle.cookie, inode, NULL);
+ md_set_lock_data(exp, &handle, inode, NULL);
}
handle.cookie = it->it_lock_handle;
@@ -1221,8 +1307,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for lock %#llx\n",
PFID(ll_inode2fid(inode)), inode, handle.cookie);
- md_set_lock_data(exp, &handle.cookie, inode,
- &it->it_lock_bits);
+ md_set_lock_data(exp, &handle, inode, &it->it_lock_bits);
it->it_lock_set = 1;
}
@@ -1295,6 +1380,8 @@ void ll_xattr_fini(void);
int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
struct cl_page *page, enum cl_req_type crt);
+int ll_getparent(struct file *file, struct getparent __user *arg);
+
/* lcommon_cl.c */
int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 546063e728db..6bb41b09172e 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -41,7 +41,7 @@
#include <linux/types.h>
#include <linux/mm.h>
-#include "../include/lustre_lite.h"
+#include "../include/lustre/lustre_ioctl.h"
#include "../include/lustre_ha.h"
#include "../include/lustre_dlm.h"
#include "../include/lprocfs_status.h"
@@ -115,9 +115,16 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
sbi->ll_sa_max = LL_SA_RPC_DEF;
atomic_set(&sbi->ll_sa_total, 0);
atomic_set(&sbi->ll_sa_wrong, 0);
+ atomic_set(&sbi->ll_sa_running, 0);
atomic_set(&sbi->ll_agl_total, 0);
sbi->ll_flags |= LL_SBI_AGL_ENABLED;
+ /* root squash */
+ sbi->ll_squash.rsi_uid = 0;
+ sbi->ll_squash.rsi_gid = 0;
+ INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
+ init_rwsem(&sbi->ll_squash.rsi_sem);
+
sbi->ll_sb = sb;
return sbi;
@@ -128,6 +135,8 @@ static void ll_free_sbi(struct super_block *sb)
struct ll_sb_info *sbi = ll_s2sbi(sb);
if (sbi->ll_cache) {
+ if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
+ cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
cl_cache_decref(sbi->ll_cache);
sbi->ll_cache = NULL;
}
@@ -180,7 +189,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_PINGLESS |
OBD_CONNECT_MAX_EASIZE |
OBD_CONNECT_FLOCK_DEAD |
- OBD_CONNECT_DISP_STRIPE;
+ OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
+ OBD_CONNECT_OPEN_BY_FID |
+ OBD_CONNECT_DIR_STRIPE;
if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
data->ocd_connect_flags |= OBD_CONNECT_SOM;
@@ -310,9 +321,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
sbi->ll_flags |= LL_SBI_64BIT_HASH;
if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
- sbi->ll_md_brw_size = data->ocd_brw_size;
+ sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_SHIFT;
else
- sbi->ll_md_brw_size = PAGE_SIZE;
+ sbi->ll_md_brw_pages = 1;
if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
@@ -418,6 +429,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
sb->s_op = &lustre_super_operations;
+ sb->s_xattr = ll_xattr_handlers;
#if THREAD_SIZE >= 8192 /*b=17630*/
sb->s_export_op = &lustre_export_operations;
#endif
@@ -462,7 +474,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
md_free_lustre_md(sbi->ll_md_exp, &lmd);
ptlrpc_req_finished(request);
- if (!(root)) {
+ if (IS_ERR(root)) {
if (lmd.lsm)
obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
#ifdef CONFIG_FS_POSIX_ACL
@@ -486,11 +498,21 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
KEY_CHECKSUM, sizeof(checksum), &checksum,
NULL);
+ if (err) {
+ CERROR("%s: Set checksum failed: rc = %d\n",
+ sbi->ll_dt_exp->exp_obd->obd_name, err);
+ goto out_root;
+ }
cl_sb_init(sb);
err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
KEY_CACHE_SET, sizeof(*sbi->ll_cache),
sbi->ll_cache, NULL);
+ if (err) {
+ CERROR("%s: Set cache_set failed: rc = %d\n",
+ sbi->ll_dt_exp->exp_obd->obd_name, err);
+ goto out_root;
+ }
sb->s_root = d_make_root(root);
if (!sb->s_root) {
@@ -560,6 +582,17 @@ int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
return rc;
}
+/**
+ * Get the value of the default_easize parameter.
+ *
+ * \see client_obd::cl_default_mds_easize
+ *
+ * \param[in] sbi superblock info for this filesystem
+ * \param[out] lmmsize pointer to storage location for value
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on failure
+ */
int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
{
int size, rc;
@@ -573,6 +606,29 @@ int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
return rc;
}
+/**
+ * Set the default_easize parameter to the given value.
+ *
+ * \see client_obd::cl_default_mds_easize
+ *
+ * \param[in] sbi superblock info for this filesystem
+ * \param[in] lmmsize the size to set
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on failure
+ */
+int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
+{
+ if (lmmsize < sizeof(struct lov_mds_md) ||
+ lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
+ return -EINVAL;
+
+ return obd_set_info_async(NULL, sbi->ll_md_exp,
+ sizeof(KEY_DEFAULT_EASIZE),
+ KEY_DEFAULT_EASIZE,
+ sizeof(int), &lmmsize, NULL);
+}
+
static void client_common_put_super(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -608,6 +664,12 @@ void ll_kill_super(struct super_block *sb)
if (sbi) {
sb->s_dev = sbi->ll_sdev_orig;
sbi->ll_umounting = 1;
+
+ /* wait running statahead threads to quit */
+ while (atomic_read(&sbi->ll_sa_running) > 0) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC >> 3));
+ }
}
}
@@ -647,7 +709,8 @@ static int ll_options(char *options, int *flags)
*flags |= tmp;
goto next;
}
- tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
+ tmp = ll_set_opt("noflock", s1,
+ LL_SBI_FLOCK | LL_SBI_LOCALFLOCK);
if (tmp) {
*flags &= ~tmp;
goto next;
@@ -772,11 +835,13 @@ void ll_lli_init(struct ll_inode_info *lli)
lli->lli_sai = NULL;
spin_lock_init(&lli->lli_sa_lock);
lli->lli_opendir_pid = 0;
+ lli->lli_sa_enabled = 0;
+ lli->lli_def_stripe_offset = -1;
} else {
mutex_init(&lli->lli_size_mutex);
lli->lli_symlink_name = NULL;
init_rwsem(&lli->lli_trunc_sem);
- mutex_init(&lli->lli_write_mutex);
+ range_lock_tree_init(&lli->lli_write_tree);
init_rwsem(&lli->lli_glimpse_sem);
lli->lli_glimpse_time = 0;
INIT_LIST_HEAD(&lli->lli_agl_list);
@@ -896,7 +961,8 @@ void ll_put_super(struct super_block *sb)
struct lustre_sb_info *lsi = s2lsi(sb);
struct ll_sb_info *sbi = ll_s2sbi(sb);
char *profilenm = get_profile_name(sb);
- int ccc_count, next, force = 1, rc = 0;
+ int next, force = 1, rc = 0;
+ long ccc_count;
CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
@@ -917,13 +983,13 @@ void ll_put_super(struct super_block *sb)
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
- !atomic_read(&sbi->ll_cache->ccc_unstable_nr),
+ !atomic_long_read(&sbi->ll_cache->ccc_unstable_nr),
&lwi);
}
- ccc_count = atomic_read(&sbi->ll_cache->ccc_unstable_nr);
+ ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
if (!force && rc != -EINTR)
- LASSERTF(!ccc_count, "count: %i\n", ccc_count);
+ LASSERTF(!ccc_count, "count: %li\n", ccc_count);
/* We need to set force before the lov_disconnect in
* lustre_common_put_super, since l_d cleans up osc's as well.
@@ -991,6 +1057,206 @@ struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
return inode;
}
+static void ll_dir_clear_lsm_md(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ LASSERT(S_ISDIR(inode->i_mode));
+
+ if (lli->lli_lsm_md) {
+ lmv_free_memmd(lli->lli_lsm_md);
+ lli->lli_lsm_md = NULL;
+ }
+}
+
+static struct inode *ll_iget_anon_dir(struct super_block *sb,
+ const struct lu_fid *fid,
+ struct lustre_md *md)
+{
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct mdt_body *body = md->body;
+ struct inode *inode;
+ ino_t ino;
+
+ ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
+ inode = iget_locked(sb, ino);
+ if (!inode) {
+ CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
+ ll_get_fsname(sb, NULL, 0), PFID(fid));
+ return ERR_PTR(-ENOENT);
+ }
+
+ if (inode->i_state & I_NEW) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lmv_stripe_md *lsm = md->lmv;
+
+ inode->i_mode = (inode->i_mode & ~S_IFMT) |
+ (body->mbo_mode & S_IFMT);
+ LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
+ PFID(fid));
+
+ LTIME_S(inode->i_mtime) = 0;
+ LTIME_S(inode->i_atime) = 0;
+ LTIME_S(inode->i_ctime) = 0;
+ inode->i_rdev = 0;
+
+ inode->i_op = &ll_dir_inode_operations;
+ inode->i_fop = &ll_dir_operations;
+ lli->lli_fid = *fid;
+ ll_lli_init(lli);
+
+ LASSERT(lsm);
+ /* master object FID */
+ lli->lli_pfid = body->mbo_fid1;
+ CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
+ lli, PFID(fid), PFID(&lli->lli_pfid));
+ unlock_new_inode(inode);
+ }
+
+ return inode;
+}
+
+static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
+{
+ struct lmv_stripe_md *lsm = md->lmv;
+ struct lu_fid *fid;
+ int i;
+
+ LASSERT(lsm);
+ /*
+ * XXX sigh, this lsm_root initialization should be in
+ * LMV layer, but it needs ll_iget right now, so we
+ * put this here right now.
+ */
+ for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
+ fid = &lsm->lsm_md_oinfo[i].lmo_fid;
+ LASSERT(!lsm->lsm_md_oinfo[i].lmo_root);
+ /* Unfortunately ll_iget will call ll_update_inode,
+ * where the initialization of slave inode is slightly
+ * different, so it reset lsm_md to NULL to avoid
+ * initializing lsm for slave inode.
+ */
+ /* For migrating inode, master stripe and master object will
+ * be same, so we only need assign this inode
+ */
+ if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && !i)
+ lsm->lsm_md_oinfo[i].lmo_root = inode;
+ else
+ lsm->lsm_md_oinfo[i].lmo_root =
+ ll_iget_anon_dir(inode->i_sb, fid, md);
+ if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
+ int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
+
+ lsm->lsm_md_oinfo[i].lmo_root = NULL;
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static inline int lli_lsm_md_eq(const struct lmv_stripe_md *lsm_md1,
+ const struct lmv_stripe_md *lsm_md2)
+{
+ return lsm_md1->lsm_md_magic == lsm_md2->lsm_md_magic &&
+ lsm_md1->lsm_md_stripe_count == lsm_md2->lsm_md_stripe_count &&
+ lsm_md1->lsm_md_master_mdt_index ==
+ lsm_md2->lsm_md_master_mdt_index &&
+ lsm_md1->lsm_md_hash_type == lsm_md2->lsm_md_hash_type &&
+ lsm_md1->lsm_md_layout_version ==
+ lsm_md2->lsm_md_layout_version &&
+ !strcmp(lsm_md1->lsm_md_pool_name,
+ lsm_md2->lsm_md_pool_name);
+}
+
+static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lmv_stripe_md *lsm = md->lmv;
+ int rc;
+
+ LASSERT(S_ISDIR(inode->i_mode));
+ CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
+ PFID(ll_inode2fid(inode)));
+
+ /* no striped information from request. */
+ if (!lsm) {
+ if (!lli->lli_lsm_md) {
+ return 0;
+ } else if (lli->lli_lsm_md->lsm_md_hash_type &
+ LMV_HASH_FLAG_MIGRATION) {
+ /*
+ * migration is done, the temporay MIGRATE layout has
+ * been removed
+ */
+ CDEBUG(D_INODE, DFID" finish migration.\n",
+ PFID(ll_inode2fid(inode)));
+ lmv_free_memmd(lli->lli_lsm_md);
+ lli->lli_lsm_md = NULL;
+ return 0;
+ } else {
+ /*
+ * The lustre_md from req does not include stripeEA,
+ * see ll_md_setattr
+ */
+ return 0;
+ }
+ }
+
+ /* set the directory layout */
+ if (!lli->lli_lsm_md) {
+ rc = ll_init_lsm_md(inode, md);
+ if (rc)
+ return rc;
+
+ lli->lli_lsm_md = lsm;
+ /*
+ * set lsm_md to NULL, so the following free lustre_md
+ * will not free this lsm
+ */
+ md->lmv = NULL;
+ CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm,
+ lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
+ return 0;
+ }
+
+ /* Compare the old and new stripe information */
+ if (!lsm_md_eq(lli->lli_lsm_md, lsm)) {
+ struct lmv_stripe_md *old_lsm = lli->lli_lsm_md;
+ int idx;
+
+ CERROR("%s: inode "DFID"(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid),
+ inode, lsm, old_lsm,
+ lsm->lsm_md_magic, old_lsm->lsm_md_magic,
+ lsm->lsm_md_stripe_count,
+ old_lsm->lsm_md_stripe_count,
+ lsm->lsm_md_master_mdt_index,
+ old_lsm->lsm_md_master_mdt_index,
+ lsm->lsm_md_hash_type, old_lsm->lsm_md_hash_type,
+ lsm->lsm_md_layout_version,
+ old_lsm->lsm_md_layout_version,
+ lsm->lsm_md_pool_name,
+ old_lsm->lsm_md_pool_name);
+
+ for (idx = 0; idx < old_lsm->lsm_md_stripe_count; idx++) {
+ CERROR("%s: sub FIDs in old lsm idx %d, old: "DFID"\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), idx,
+ PFID(&old_lsm->lsm_md_oinfo[idx].lmo_fid));
+ }
+
+ for (idx = 0; idx < lsm->lsm_md_stripe_count; idx++) {
+ CERROR("%s: sub FIDs in new lsm idx %d, new: "DFID"\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), idx,
+ PFID(&lsm->lsm_md_oinfo[idx].lmo_fid));
+ }
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
void ll_clear_inode(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
@@ -1031,14 +1297,15 @@ void ll_clear_inode(struct inode *inode)
#ifdef CONFIG_FS_POSIX_ACL
if (lli->lli_posix_acl) {
- LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
posix_acl_release(lli->lli_posix_acl);
lli->lli_posix_acl = NULL;
}
#endif
lli->lli_inode_magic = LLI_INODE_DEAD;
- if (!S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode))
+ ll_dir_clear_lsm_md(inode);
+ if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
LASSERT(list_empty(&lli->lli_agl_list));
/*
@@ -1103,10 +1370,10 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
op_data->op_attr.ia_valid = ia_valid;
/* Extract epoch data if obtained. */
- op_data->op_handle = md.body->handle;
- op_data->op_ioepoch = md.body->ioepoch;
+ op_data->op_handle = md.body->mbo_handle;
+ op_data->op_ioepoch = md.body->mbo_ioepoch;
- ll_update_inode(inode, &md);
+ rc = ll_update_inode(inode, &md);
ptlrpc_req_finished(request);
return rc;
@@ -1138,8 +1405,8 @@ static int ll_setattr_done_writing(struct inode *inode,
rc = ll_som_update(inode, op_data);
else if (rc) {
CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
- ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
- PFID(ll_inode2fid(inode)), rc);
+ ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
}
return rc;
}
@@ -1331,14 +1598,14 @@ int ll_setattr(struct dentry *de, struct iattr *attr)
{
int mode = d_inode(de)->i_mode;
- if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
- (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
+ if ((attr->ia_valid & (ATTR_CTIME | ATTR_SIZE | ATTR_MODE)) ==
+ (ATTR_CTIME | ATTR_SIZE | ATTR_MODE))
attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
- if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
- (ATTR_SIZE|ATTR_MODE)) &&
+ if (((attr->ia_valid & (ATTR_MODE | ATTR_FORCE | ATTR_SIZE)) ==
+ (ATTR_SIZE | ATTR_MODE)) &&
(((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
- (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
+ (((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) &&
!(attr->ia_mode & S_ISGID))))
attr->ia_valid |= ATTR_FORCE;
@@ -1349,7 +1616,7 @@ int ll_setattr(struct dentry *de, struct iattr *attr)
attr->ia_valid |= ATTR_KILL_SUID;
if ((attr->ia_valid & ATTR_MODE) &&
- ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) &&
!(attr->ia_mode & S_ISGID) &&
!(attr->ia_valid & ATTR_KILL_SGID))
attr->ia_valid |= ATTR_KILL_SGID;
@@ -1465,14 +1732,14 @@ void ll_inode_size_unlock(struct inode *inode)
mutex_unlock(&lli->lli_size_mutex);
}
-void ll_update_inode(struct inode *inode, struct lustre_md *md)
+int ll_update_inode(struct inode *inode, struct lustre_md *md)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct mdt_body *body = md->body;
struct lov_stripe_md *lsm = md->lsm;
struct ll_sb_info *sbi = ll_i2sbi(inode);
- LASSERT((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
+ LASSERT((lsm != NULL) == ((body->mbo_valid & OBD_MD_FLEASIZE) != 0));
if (lsm) {
if (!lli->lli_has_smd &&
!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
@@ -1483,8 +1750,16 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
lli->lli_maxbytes = MAX_LFS_FILESIZE;
}
+ if (S_ISDIR(inode->i_mode)) {
+ int rc;
+
+ rc = ll_update_lsm_md(inode, md);
+ if (rc)
+ return rc;
+ }
+
#ifdef CONFIG_FS_POSIX_ACL
- if (body->valid & OBD_MD_FLACL) {
+ if (body->mbo_valid & OBD_MD_FLACL) {
spin_lock(&lli->lli_lock);
if (lli->lli_posix_acl)
posix_acl_release(lli->lli_posix_acl);
@@ -1492,65 +1767,67 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
spin_unlock(&lli->lli_lock);
}
#endif
- inode->i_ino = cl_fid_build_ino(&body->fid1,
+ inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
sbi->ll_flags & LL_SBI_32BIT_API);
- inode->i_generation = cl_fid_build_gen(&body->fid1);
+ inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
- if (body->valid & OBD_MD_FLATIME) {
- if (body->atime > LTIME_S(inode->i_atime))
- LTIME_S(inode->i_atime) = body->atime;
- lli->lli_atime = body->atime;
+ if (body->mbo_valid & OBD_MD_FLATIME) {
+ if (body->mbo_atime > LTIME_S(inode->i_atime))
+ LTIME_S(inode->i_atime) = body->mbo_atime;
+ lli->lli_atime = body->mbo_atime;
}
- if (body->valid & OBD_MD_FLMTIME) {
- if (body->mtime > LTIME_S(inode->i_mtime)) {
+ if (body->mbo_valid & OBD_MD_FLMTIME) {
+ if (body->mbo_mtime > LTIME_S(inode->i_mtime)) {
CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
inode->i_ino, LTIME_S(inode->i_mtime),
- body->mtime);
- LTIME_S(inode->i_mtime) = body->mtime;
+ body->mbo_mtime);
+ LTIME_S(inode->i_mtime) = body->mbo_mtime;
}
- lli->lli_mtime = body->mtime;
- }
- if (body->valid & OBD_MD_FLCTIME) {
- if (body->ctime > LTIME_S(inode->i_ctime))
- LTIME_S(inode->i_ctime) = body->ctime;
- lli->lli_ctime = body->ctime;
- }
- if (body->valid & OBD_MD_FLMODE)
- inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
- if (body->valid & OBD_MD_FLTYPE)
- inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
+ lli->lli_mtime = body->mbo_mtime;
+ }
+ if (body->mbo_valid & OBD_MD_FLCTIME) {
+ if (body->mbo_ctime > LTIME_S(inode->i_ctime))
+ LTIME_S(inode->i_ctime) = body->mbo_ctime;
+ lli->lli_ctime = body->mbo_ctime;
+ }
+ if (body->mbo_valid & OBD_MD_FLMODE)
+ inode->i_mode = (inode->i_mode & S_IFMT) |
+ (body->mbo_mode & ~S_IFMT);
+ if (body->mbo_valid & OBD_MD_FLTYPE)
+ inode->i_mode = (inode->i_mode & ~S_IFMT) |
+ (body->mbo_mode & S_IFMT);
LASSERT(inode->i_mode != 0);
if (S_ISREG(inode->i_mode))
inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
LL_MAX_BLKSIZE_BITS);
else
inode->i_blkbits = inode->i_sb->s_blocksize_bits;
- if (body->valid & OBD_MD_FLUID)
- inode->i_uid = make_kuid(&init_user_ns, body->uid);
- if (body->valid & OBD_MD_FLGID)
- inode->i_gid = make_kgid(&init_user_ns, body->gid);
- if (body->valid & OBD_MD_FLFLAGS)
- inode->i_flags = ll_ext_to_inode_flags(body->flags);
- if (body->valid & OBD_MD_FLNLINK)
- set_nlink(inode, body->nlink);
- if (body->valid & OBD_MD_FLRDEV)
- inode->i_rdev = old_decode_dev(body->rdev);
-
- if (body->valid & OBD_MD_FLID) {
+ if (body->mbo_valid & OBD_MD_FLUID)
+ inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
+ if (body->mbo_valid & OBD_MD_FLGID)
+ inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
+ if (body->mbo_valid & OBD_MD_FLFLAGS)
+ inode->i_flags = ll_ext_to_inode_flags(body->mbo_flags);
+ if (body->mbo_valid & OBD_MD_FLNLINK)
+ set_nlink(inode, body->mbo_nlink);
+ if (body->mbo_valid & OBD_MD_FLRDEV)
+ inode->i_rdev = old_decode_dev(body->mbo_rdev);
+
+ if (body->mbo_valid & OBD_MD_FLID) {
/* FID shouldn't be changed! */
if (fid_is_sane(&lli->lli_fid)) {
- LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
+ LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
"Trying to change FID "DFID" to the "DFID", inode "DFID"(%p)\n",
- PFID(&lli->lli_fid), PFID(&body->fid1),
+ PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
PFID(ll_inode2fid(inode)), inode);
} else {
- lli->lli_fid = body->fid1;
+ lli->lli_fid = body->mbo_fid1;
}
}
LASSERT(fid_seq(&lli->lli_fid) != 0);
- if (body->valid & OBD_MD_FLSIZE) {
+ if (body->mbo_valid & OBD_MD_FLSIZE) {
if (exp_connect_som(ll_i2mdexp(inode)) &&
S_ISREG(inode->i_mode)) {
struct lustre_handle lockh;
@@ -1577,7 +1854,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
/* Use old size assignment to avoid
* deadlock bz14138 & bz14326
*/
- i_size_write(inode, body->size);
+ i_size_write(inode, body->mbo_size);
spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
spin_unlock(&lli->lli_lock);
@@ -1588,26 +1865,29 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
/* Use old size assignment to avoid
* deadlock bz14138 & bz14326
*/
- i_size_write(inode, body->size);
+ i_size_write(inode, body->mbo_size);
CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
- inode->i_ino, (unsigned long long)body->size);
+ inode->i_ino, (unsigned long long)body->mbo_size);
}
- if (body->valid & OBD_MD_FLBLOCKS)
- inode->i_blocks = body->blocks;
+ if (body->mbo_valid & OBD_MD_FLBLOCKS)
+ inode->i_blocks = body->mbo_blocks;
}
- if (body->valid & OBD_MD_TSTATE) {
- if (body->t_state & MS_RESTORE)
+ if (body->mbo_valid & OBD_MD_TSTATE) {
+ if (body->mbo_t_state & MS_RESTORE)
lli->lli_flags |= LLIF_FILE_RESTORING;
}
+
+ return 0;
}
-void ll_read_inode2(struct inode *inode, void *opaque)
+int ll_read_inode2(struct inode *inode, void *opaque)
{
struct lustre_md *md = opaque;
struct ll_inode_info *lli = ll_i2info(inode);
+ int rc;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(&lli->lli_fid), inode);
@@ -1623,7 +1903,9 @@ void ll_read_inode2(struct inode *inode, void *opaque)
LTIME_S(inode->i_atime) = 0;
LTIME_S(inode->i_ctime) = 0;
inode->i_rdev = 0;
- ll_update_inode(inode, md);
+ rc = ll_update_inode(inode, md);
+ if (rc)
+ return rc;
/* OIDEBUG(inode); */
@@ -1644,6 +1926,8 @@ void ll_read_inode2(struct inode *inode, void *opaque)
init_special_inode(inode, inode->i_mode,
inode->i_rdev);
}
+
+ return 0;
}
void ll_delete_inode(struct inode *inode)
@@ -1655,20 +1939,13 @@ void ll_delete_inode(struct inode *inode)
* osc_extent implementation at LU-1030.
*/
cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
- CL_FSYNC_DISCARD, 1);
+ CL_FSYNC_LOCAL, 1);
truncate_inode_pages_final(&inode->i_data);
- /* Workaround for LU-118 */
- if (inode->i_data.nrpages) {
- spin_lock_irq(&inode->i_data.tree_lock);
- spin_unlock_irq(&inode->i_data.tree_lock);
- LASSERTF(inode->i_data.nrpages == 0,
- "inode="DFID"(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
- PFID(ll_inode2fid(inode)), inode,
- inode->i_data.nrpages);
- }
- /* Workaround end */
+ LASSERTF(!inode->i_data.nrpages,
+ "inode=" DFID "(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
+ PFID(ll_inode2fid(inode)), inode, inode->i_data.nrpages);
ll_clear_inode(inode);
clear_inode(inode);
@@ -1704,7 +1981,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- flags = body->flags;
+ flags = body->mbo_flags;
ptlrpc_req_finished(req);
@@ -1886,9 +2163,9 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
if (!op_data)
return;
- op_data->op_fid1 = body->fid1;
- op_data->op_ioepoch = body->ioepoch;
- op_data->op_handle = body->handle;
+ op_data->op_fid1 = body->mbo_fid1;
+ op_data->op_ioepoch = body->mbo_ioepoch;
+ op_data->op_handle = body->mbo_handle;
op_data->op_mod_time = get_seconds();
md_close(exp, op_data, NULL, &close_req);
ptlrpc_req_finished(close_req);
@@ -1910,7 +2187,9 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
goto cleanup;
if (*inode) {
- ll_update_inode(*inode, &md);
+ rc = ll_update_inode(*inode, &md);
+ if (rc)
+ goto out;
} else {
LASSERT(sb);
@@ -1918,18 +2197,18 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
* At this point server returns to client's same fid as client
* generated for creating. So using ->fid1 is okay here.
*/
- if (!fid_is_sane(&md.body->fid1)) {
+ if (!fid_is_sane(&md.body->mbo_fid1)) {
CERROR("%s: Fid is insane " DFID "\n",
ll_get_fsname(sb, NULL, 0),
- PFID(&md.body->fid1));
+ PFID(&md.body->mbo_fid1));
rc = -EINVAL;
goto out;
}
- *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
+ *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
sbi->ll_flags & LL_SBI_32BIT_API),
&md);
- if (!*inode) {
+ if (IS_ERR(*inode)) {
#ifdef CONFIG_FS_POSIX_ACL
if (md.posix_acl) {
posix_acl_release(md.posix_acl);
@@ -2075,11 +2354,20 @@ int ll_process_config(struct lustre_cfg *lcfg)
/* this function prepares md_op_data hint for passing ot down to MD stack. */
struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
struct inode *i1, struct inode *i2,
- const char *name, int namelen,
- int mode, __u32 opc, void *data)
+ const char *name, size_t namelen,
+ u32 mode, __u32 opc, void *data)
{
- if (namelen > ll_i2sbi(i1)->ll_namelen)
- return ERR_PTR(-ENAMETOOLONG);
+ if (!name) {
+ /* Do not reuse namelen for something else. */
+ if (namelen)
+ return ERR_PTR(-EINVAL);
+ } else {
+ if (namelen > ll_i2sbi(i1)->ll_namelen)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ if (!lu_name_is_valid_2(name, namelen))
+ return ERR_PTR(-EINVAL);
+ }
if (!op_data)
op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
@@ -2089,11 +2377,26 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
ll_i2gids(op_data->op_suppgids, i1, i2);
op_data->op_fid1 = *ll_inode2fid(i1);
+ op_data->op_default_stripe_offset = -1;
+ if (S_ISDIR(i1->i_mode)) {
+ op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
+ op_data->op_default_stripe_offset =
+ ll_i2info(i1)->lli_def_stripe_offset;
+ }
- if (i2)
+ if (i2) {
op_data->op_fid2 = *ll_inode2fid(i2);
- else
+ if (S_ISDIR(i2->i_mode))
+ op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
+ } else {
fid_zero(&op_data->op_fid2);
+ }
+
+ if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
+ op_data->op_cli_flags |= CLI_HASH64;
+
+ if (ll_need_32bit_api(ll_i2sbi(i1)))
+ op_data->op_cli_flags |= CLI_API32;
op_data->op_name = name;
op_data->op_namelen = namelen;
@@ -2105,26 +2408,12 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
op_data->op_bias = 0;
op_data->op_cli_flags = 0;
if ((opc == LUSTRE_OPC_CREATE) && name &&
- filename_is_volatile(name, namelen, NULL))
+ filename_is_volatile(name, namelen, &op_data->op_mds))
op_data->op_bias |= MDS_CREATE_VOLATILE;
- op_data->op_opc = opc;
- op_data->op_mds = 0;
+ else
+ op_data->op_mds = 0;
op_data->op_data = data;
- /* If the file is being opened after mknod() (normally due to NFS)
- * try to use the default stripe data from parent directory for
- * allocating OST objects. Try to pass the parent FID to MDS.
- */
- if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
- !ll_i2info(i2)->lli_has_smd) {
- struct ll_inode_info *lli = ll_i2info(i2);
-
- spin_lock(&lli->lli_lock);
- if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
- op_data->op_fid1 = lli->lli_pfid;
- spin_unlock(&lli->lli_lock);
- }
-
/* When called by ll_setattr_raw, file is i1. */
if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED)
op_data->op_bias |= MDS_DATA_MODIFIED;
@@ -2251,3 +2540,197 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
if (buf)
free_page((unsigned long)buf);
}
+
+ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
+ struct lov_user_md **kbuf)
+{
+ struct lov_user_md lum;
+ ssize_t lum_size;
+
+ if (copy_from_user(&lum, md, sizeof(lum))) {
+ lum_size = -EFAULT;
+ goto no_kbuf;
+ }
+
+ lum_size = ll_lov_user_md_size(&lum);
+ if (lum_size < 0)
+ goto no_kbuf;
+
+ *kbuf = kzalloc(lum_size, GFP_NOFS);
+ if (!*kbuf) {
+ lum_size = -ENOMEM;
+ goto no_kbuf;
+ }
+
+ if (copy_from_user(*kbuf, md, lum_size) != 0) {
+ kfree(*kbuf);
+ *kbuf = NULL;
+ lum_size = -EFAULT;
+ }
+no_kbuf:
+ return lum_size;
+}
+
+/*
+ * Compute llite root squash state after a change of root squash
+ * configuration setting or add/remove of a lnet nid
+ */
+void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
+{
+ struct root_squash_info *squash = &sbi->ll_squash;
+ lnet_process_id_t id;
+ bool matched;
+ int i;
+
+ /* Update norootsquash flag */
+ down_write(&squash->rsi_sem);
+ if (list_empty(&squash->rsi_nosquash_nids)) {
+ sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
+ } else {
+ /*
+ * Do not apply root squash as soon as one of our NIDs is
+ * in the nosquash_nids list
+ */
+ matched = false;
+ i = 0;
+
+ while (LNetGetId(i++, &id) != -ENOENT) {
+ if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND)
+ continue;
+ if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
+ matched = true;
+ break;
+ }
+ }
+ if (matched)
+ sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
+ else
+ sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
+ }
+ up_write(&squash->rsi_sem);
+}
+
+/**
+ * Parse linkea content to extract information about a given hardlink
+ *
+ * \param[in] ldata - Initialized linkea data
+ * \param[in] linkno - Link identifier
+ * \param[out] parent_fid - The entry's parent FID
+ * \param[in] size - Entry name destination buffer
+ *
+ * \retval 0 on success
+ * \retval Appropriate negative error code on failure
+ */
+static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
+ struct lu_fid *parent_fid, struct lu_name *ln)
+{
+ unsigned int idx;
+ int rc;
+
+ rc = linkea_init(ldata);
+ if (rc < 0)
+ return rc;
+
+ if (linkno >= ldata->ld_leh->leh_reccount)
+ /* beyond last link */
+ return -ENODATA;
+
+ linkea_first_entry(ldata);
+ for (idx = 0; ldata->ld_lee; idx++) {
+ linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
+ parent_fid);
+ if (idx == linkno)
+ break;
+
+ linkea_next_entry(ldata);
+ }
+
+ if (idx < linkno)
+ return -ENODATA;
+
+ return 0;
+}
+
+/**
+ * Get parent FID and name of an identified link. Operation is performed for
+ * a given link number, letting the caller iterate over linkno to list one or
+ * all links of an entry.
+ *
+ * \param[in] file - File descriptor against which to perform the operation
+ * \param[in,out] arg - User-filled structure containing the linkno to operate
+ * on and the available size. It is eventually filled with
+ * the requested information or left untouched on error
+ *
+ * \retval - 0 on success
+ * \retval - Appropriate negative error code on failure
+ */
+int ll_getparent(struct file *file, struct getparent __user *arg)
+{
+ struct inode *inode = file_inode(file);
+ struct linkea_data *ldata;
+ struct lu_fid parent_fid;
+ struct lu_buf buf = {
+ .lb_buf = NULL,
+ .lb_len = 0
+ };
+ struct lu_name ln;
+ u32 name_size;
+ u32 linkno;
+ int rc;
+
+ if (!capable(CFS_CAP_DAC_READ_SEARCH) &&
+ !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
+ return -EPERM;
+
+ if (get_user(name_size, &arg->gp_name_size))
+ return -EFAULT;
+
+ if (get_user(linkno, &arg->gp_linkno))
+ return -EFAULT;
+
+ if (name_size > PATH_MAX)
+ return -EINVAL;
+
+ ldata = kzalloc(sizeof(*ldata), GFP_NOFS);
+ if (!ldata)
+ return -ENOMEM;
+
+ rc = linkea_data_new(ldata, &buf);
+ if (rc < 0)
+ goto ldata_free;
+
+ rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
+ buf.lb_len, OBD_MD_FLXATTR);
+ if (rc < 0)
+ goto lb_free;
+
+ rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
+ if (rc < 0)
+ goto lb_free;
+
+ if (ln.ln_namelen >= name_size) {
+ rc = -EOVERFLOW;
+ goto lb_free;
+ }
+
+ if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid))) {
+ rc = -EFAULT;
+ goto lb_free;
+ }
+
+ if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen)) {
+ rc = -EFAULT;
+ goto lb_free;
+ }
+
+ if (put_user('\0', arg->gp_name + ln.ln_namelen)) {
+ rc = -EFAULT;
+ goto lb_free;
+ }
+
+lb_free:
+ lu_buf_free(&buf);
+ldata_free:
+ kfree(ldata);
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 66ee5db5fce8..436691814a5e 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -43,9 +43,7 @@
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/lustre_lite.h"
#include "llite_internal.h"
-#include "../include/linux/lustre_compat25.h"
static const struct vm_operations_struct ll_file_vm_ops;
@@ -126,7 +124,7 @@ restart:
fio = &io->u.ci_fault;
fio->ft_index = index;
- fio->ft_executable = vma->vm_flags&VM_EXEC;
+ fio->ft_executable = vma->vm_flags & VM_EXEC;
/*
* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
@@ -134,7 +132,7 @@ restart:
* filemap_nopage. we do our readahead in ll_readpage.
*/
if (ra_flags)
- *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
+ *ra_flags = vma->vm_flags & (VM_RAND_READ | VM_SEQ_READ);
vma->vm_flags &= ~VM_SEQ_READ;
vma->vm_flags |= VM_RAND_READ;
@@ -429,7 +427,6 @@ static void ll_vm_open(struct vm_area_struct *vma)
struct inode *inode = file_inode(vma->vm_file);
struct vvp_object *vob = cl_inode2vvp(inode);
- LASSERT(vma->vm_file);
LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
atomic_inc(&vob->vob_mmap_cnt);
}
@@ -442,7 +439,6 @@ static void ll_vm_close(struct vm_area_struct *vma)
struct inode *inode = file_inode(vma->vm_file);
struct vvp_object *vob = cl_inode2vvp(inode);
- LASSERT(vma->vm_file);
atomic_dec(&vob->vob_mmap_cnt);
LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 65972c892731..709230571b4b 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -38,7 +38,6 @@
*/
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/lustre_lite.h"
#include "llite_internal.h"
#include <linux/exportfs.h>
@@ -73,11 +72,6 @@ void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid)
fsid->val[1] = key >> 32;
}
-static int ll_nfs_test_inode(struct inode *inode, void *opaque)
-{
- return lu_fid_eq(&ll_i2info(inode)->lli_fid, opaque);
-}
-
struct inode *search_inode_for_lustre(struct super_block *sb,
const struct lu_fid *fid)
{
@@ -92,7 +86,7 @@ struct inode *search_inode_for_lustre(struct super_block *sb,
CDEBUG(D_INFO, "searching inode for:(%lu,"DFID")\n", hash, PFID(fid));
- inode = ilookup5(sb, hash, ll_nfs_test_inode, (void *)fid);
+ inode = ilookup5(sb, hash, ll_test_inode_by_fid, (void *)fid);
if (inode)
return inode;
@@ -153,12 +147,18 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
return ERR_PTR(-ESTALE);
}
+ result = d_obtain_alias(inode);
+ if (IS_ERR(result)) {
+ iput(inode);
+ return result;
+ }
+
/**
- * It is an anonymous dentry without OST objects created yet.
- * We have to find the parent to tell MDS how to init lov objects.
+ * In case d_obtain_alias() found a disconnected dentry, always update
+ * lli_pfid to allow later operation (normally open) have parent fid,
+ * which may be used by MDS to create data.
*/
- if (S_ISREG(inode->i_mode) && !ll_i2info(inode)->lli_has_smd &&
- parent && !fid_is_zero(parent)) {
+ if (parent) {
struct ll_inode_info *lli = ll_i2info(inode);
spin_lock(&lli->lli_lock);
@@ -255,6 +255,8 @@ static int ll_get_name(struct dentry *dentry, char *name,
.lgd_fid = ll_i2info(d_inode(child))->lli_fid,
.ctx.actor = ll_nfs_get_name_filldir,
};
+ struct md_op_data *op_data;
+ __u64 pos = 0;
if (!dir || !S_ISDIR(dir->i_mode)) {
rc = -ENOTDIR;
@@ -266,9 +268,18 @@ static int ll_get_name(struct dentry *dentry, char *name,
goto out;
}
+ op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
+ LUSTRE_OPC_ANY, dir);
+ if (IS_ERR(op_data)) {
+ rc = PTR_ERR(op_data);
+ goto out;
+ }
+
+ op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
inode_lock(dir);
- rc = ll_dir_read(dir, &lgd.ctx);
+ rc = ll_dir_read(dir, &pos, op_data, &lgd.ctx);
inode_unlock(dir);
+ ll_finish_md_op_data(op_data);
if (!rc && !lgd.lgd_found)
rc = -ENOENT;
out:
@@ -297,14 +308,12 @@ static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid,
return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL);
}
-static struct dentry *ll_get_parent(struct dentry *dchild)
+int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid)
{
struct ptlrpc_request *req = NULL;
- struct inode *dir = d_inode(dchild);
struct ll_sb_info *sbi;
- struct dentry *result = NULL;
struct mdt_body *body;
- static char dotdot[] = "..";
+ static const char dotdot[] = "..";
struct md_op_data *op_data;
int rc;
int lmmsize;
@@ -319,13 +328,13 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
rc = ll_get_default_mdsize(sbi, &lmmsize);
if (rc != 0)
- return ERR_PTR(rc);
+ return rc;
op_data = ll_prep_md_op_data(NULL, dir, NULL, dotdot,
strlen(dotdot), lmmsize,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- return (void *)op_data;
+ return PTR_ERR(op_data);
rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
@@ -333,21 +342,36 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
CERROR("%s: failure inode "DFID" get parent: rc = %d\n",
ll_get_fsname(dir->i_sb, NULL, 0),
PFID(ll_inode2fid(dir)), rc);
- return ERR_PTR(rc);
+ return rc;
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
/*
* LU-3952: MDT may lost the FID of its parent, we should not crash
* the NFS server, ll_iget_for_nfs() will handle the error.
*/
- if (body->valid & OBD_MD_FLID) {
+ if (body->mbo_valid & OBD_MD_FLID) {
CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n",
- PFID(ll_inode2fid(dir)), PFID(&body->fid1));
+ PFID(ll_inode2fid(dir)), PFID(&body->mbo_fid1));
+ *parent_fid = body->mbo_fid1;
}
- result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL);
ptlrpc_req_finished(req);
- return result;
+ return 0;
+}
+
+static struct dentry *ll_get_parent(struct dentry *dchild)
+{
+ struct lu_fid parent_fid = { 0 };
+ struct dentry *dentry;
+ int rc;
+
+ rc = ll_dir_get_parent_fid(dchild->d_inode, &parent_fid);
+ if (rc)
+ return ERR_PTR(rc);
+
+ dentry = ll_iget_for_nfs(dchild->d_inode->i_sb, &parent_fid, NULL);
+
+ return dentry;
}
const struct export_operations lustre_export_operations = {
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index e86bf3c53be3..6eae60595905 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -31,7 +31,6 @@
*/
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/lustre_lite.h"
#include "../include/lprocfs_status.h"
#include <linux/seq_file.h>
#include "../include/obd_support.h"
@@ -358,16 +357,16 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = sbi->ll_cache;
int shift = 20 - PAGE_SHIFT;
- int max_cached_mb;
- int unused_mb;
+ long max_cached_mb;
+ long unused_mb;
max_cached_mb = cache->ccc_lru_max >> shift;
- unused_mb = atomic_read(&cache->ccc_lru_left) >> shift;
+ unused_mb = atomic_long_read(&cache->ccc_lru_left) >> shift;
seq_printf(m,
"users: %d\n"
- "max_cached_mb: %d\n"
- "used_mb: %d\n"
- "unused_mb: %d\n"
+ "max_cached_mb: %ld\n"
+ "used_mb: %ld\n"
+ "unused_mb: %ld\n"
"reclaim_count: %u\n",
atomic_read(&cache->ccc_users),
max_cached_mb,
@@ -385,10 +384,13 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = sbi->ll_cache;
struct lu_env *env;
+ long diff = 0;
+ long nrpages = 0;
int refcheck;
- int mult, rc, pages_number;
- int diff = 0;
- int nrpages = 0;
+ long pages_number;
+ int mult;
+ long rc;
+ u64 val;
char kernbuf[128];
if (count >= sizeof(kernbuf))
@@ -401,10 +403,14 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
mult = 1 << (20 - PAGE_SHIFT);
buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
kernbuf;
- rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ rc = lprocfs_write_frac_u64_helper(buffer, count, &val, mult);
if (rc)
return rc;
+ if (val > LONG_MAX)
+ return -ERANGE;
+ pages_number = (long)val;
+
if (pages_number < 0 || pages_number > totalram_pages) {
CERROR("%s: can't set max cache more than %lu MB\n",
ll_get_fsname(sb, NULL, 0),
@@ -418,7 +424,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
/* easy - add more LRU slots. */
if (diff >= 0) {
- atomic_add(diff, &cache->ccc_lru_left);
+ atomic_long_add(diff, &cache->ccc_lru_left);
rc = 0;
goto out;
}
@@ -429,18 +435,18 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
diff = -diff;
while (diff > 0) {
- int tmp;
+ long tmp;
/* reduce LRU budget from free slots. */
do {
- int ov, nv;
+ long ov, nv;
- ov = atomic_read(&cache->ccc_lru_left);
+ ov = atomic_long_read(&cache->ccc_lru_left);
if (ov == 0)
break;
nv = ov > diff ? ov - diff : 0;
- rc = atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
+ rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
if (likely(ov == rc)) {
diff -= ov - nv;
nrpages += ov - nv;
@@ -474,7 +480,7 @@ out:
spin_unlock(&sbi->ll_lock);
rc = count;
} else {
- atomic_add(nrpages, &cache->ccc_lru_left);
+ atomic_long_add(nrpages, &cache->ccc_lru_left);
}
return rc;
}
@@ -738,6 +744,18 @@ static ssize_t max_easize_show(struct kobject *kobj,
}
LUSTRE_RO_ATTR(max_easize);
+/**
+ * Get default_easize.
+ *
+ * \see client_obd::cl_default_mds_easize
+ *
+ * \param[in] kobj kernel object for sysfs tree
+ * \param[in] attr attribute of this kernel object
+ * \param[in] buf buffer to write data into
+ *
+ * \retval positive \a count on success
+ * \retval negative negated errno on failure
+ */
static ssize_t default_easize_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
@@ -753,7 +771,44 @@ static ssize_t default_easize_show(struct kobject *kobj,
return sprintf(buf, "%u\n", ealen);
}
-LUSTRE_RO_ATTR(default_easize);
+
+/**
+ * Set default_easize.
+ *
+ * Range checking on the passed value is handled by
+ * ll_set_default_mdsize().
+ *
+ * \see client_obd::cl_default_mds_easize
+ *
+ * \param[in] kobj kernel object for sysfs tree
+ * \param[in] attr attribute of this kernel object
+ * \param[in] buffer string passed from user space
+ * \param[in] count \a buffer length
+ *
+ * \retval positive \a count on success
+ * \retval negative negated errno on failure
+ */
+static ssize_t default_easize_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer,
+ size_t count)
+{
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kobj);
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buffer, 10, &val);
+ if (rc)
+ return rc;
+
+ rc = ll_set_default_mdsize(sbi, val);
+ if (rc)
+ return rc;
+
+ return count;
+}
+LUSTRE_RW_ATTR(default_easize);
static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
{
@@ -774,7 +829,7 @@ static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
flags >>= 1;
++i;
}
- seq_printf(m, "\b\n");
+ seq_puts(m, "\b\n");
return 0;
}
@@ -823,15 +878,116 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kobj);
struct cl_client_cache *cache = sbi->ll_cache;
- int pages, mb;
+ long pages;
+ int mb;
- pages = atomic_read(&cache->ccc_unstable_nr);
+ pages = atomic_long_read(&cache->ccc_unstable_nr);
mb = (pages * PAGE_SIZE) >> 20;
- return sprintf(buf, "unstable_pages: %8d\n"
- "unstable_mb: %8d\n", pages, mb);
+ return sprintf(buf, "unstable_check: %8d\n"
+ "unstable_pages: %12ld\n"
+ "unstable_mb: %8d\n",
+ cache->ccc_unstable_check, pages, mb);
}
-LUSTRE_RO_ATTR(unstable_stats);
+
+static ssize_t unstable_stats_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer,
+ size_t count)
+{
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kobj);
+ char kernbuf[128];
+ int val, rc;
+
+ if (!count)
+ return 0;
+ if (count >= sizeof(kernbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kernbuf, buffer, count))
+ return -EFAULT;
+ kernbuf[count] = 0;
+
+ buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
+ kernbuf;
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc < 0)
+ return rc;
+
+ /* borrow lru lock to set the value */
+ spin_lock(&sbi->ll_cache->ccc_lru_lock);
+ sbi->ll_cache->ccc_unstable_check = !!val;
+ spin_unlock(&sbi->ll_cache->ccc_lru_lock);
+
+ return count;
+}
+LUSTRE_RW_ATTR(unstable_stats);
+
+static ssize_t root_squash_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kobj);
+ struct root_squash_info *squash = &sbi->ll_squash;
+
+ return sprintf(buf, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
+}
+
+static ssize_t root_squash_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kobj);
+ struct root_squash_info *squash = &sbi->ll_squash;
+
+ return lprocfs_wr_root_squash(buffer, count, squash,
+ ll_get_fsname(sbi->ll_sb, NULL, 0));
+}
+LUSTRE_RW_ATTR(root_squash);
+
+static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct root_squash_info *squash = &sbi->ll_squash;
+ int len;
+
+ down_read(&squash->rsi_sem);
+ if (!list_empty(&squash->rsi_nosquash_nids)) {
+ len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
+ &squash->rsi_nosquash_nids);
+ m->count += len;
+ seq_puts(m, "\n");
+ } else {
+ seq_puts(m, "NONE\n");
+ }
+ up_read(&squash->rsi_sem);
+
+ return 0;
+}
+
+static ssize_t ll_nosquash_nids_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
+{
+ struct seq_file *m = file->private_data;
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct root_squash_info *squash = &sbi->ll_squash;
+ int rc;
+
+ rc = lprocfs_wr_nosquash_nids(buffer, count, squash,
+ ll_get_fsname(sb, NULL, 0));
+ if (rc < 0)
+ return rc;
+
+ ll_compute_rootsquash_state(sbi);
+
+ return rc;
+}
+
+LPROC_SEQ_FOPS(ll_nosquash_nids);
static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
/* { "mntpt_path", ll_rd_path, 0, 0 }, */
@@ -840,6 +996,8 @@ static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
{ "max_cached_mb", &ll_max_cached_mb_fops, NULL },
{ "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
{ "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
+ { .name = "nosquash_nids",
+ .fops = &ll_nosquash_nids_fops },
{ NULL }
};
@@ -869,6 +1027,7 @@ static struct attribute *llite_attrs[] = {
&lustre_attr_default_easize.attr,
&lustre_attr_xattr_cache.attr,
&lustre_attr_unstable_stats.attr,
+ &lustre_attr_root_squash.attr,
NULL,
};
@@ -893,17 +1052,17 @@ static const struct llite_file_opcode {
/* file operation */
{ LPROC_LL_DIRTY_HITS, LPROCFS_TYPE_REGS, "dirty_pages_hits" },
{ LPROC_LL_DIRTY_MISSES, LPROCFS_TYPE_REGS, "dirty_pages_misses" },
- { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
"read_bytes" },
- { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
"write_bytes" },
- { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
+ { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
"brw_read" },
- { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
+ { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
"brw_write" },
- { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
"osc_read" },
- { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
"osc_write" },
{ LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
{ LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
@@ -1150,7 +1309,7 @@ static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
r, pct(r, read_tot), pct(read_cum, read_tot),
w, pct(w, write_tot), pct(write_cum, write_tot));
start = end;
- if (start == 1<<10) {
+ if (start == 1 << 10) {
start = 1;
units += 10;
unitp++;
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 2c4dc69731e8..dfa36d34c645 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -42,13 +42,12 @@
#include "../include/obd_support.h"
#include "../include/lustre_fid.h"
-#include "../include/lustre_lite.h"
#include "../include/lustre_dlm.h"
#include "../include/lustre_ver.h"
#include "llite_internal.h"
-static int ll_create_it(struct inode *, struct dentry *,
- int, struct lookup_intent *);
+static int ll_create_it(struct inode *dir, struct dentry *dentry,
+ struct lookup_intent *it);
/* called from iget5_locked->find_inode() under inode_hash_lock spinlock */
static int ll_test_inode(struct inode *inode, void *opaque)
@@ -56,12 +55,12 @@ static int ll_test_inode(struct inode *inode, void *opaque)
struct ll_inode_info *lli = ll_i2info(inode);
struct lustre_md *md = opaque;
- if (unlikely(!(md->body->valid & OBD_MD_FLID))) {
+ if (unlikely(!(md->body->mbo_valid & OBD_MD_FLID))) {
CERROR("MDS body missing FID\n");
return 0;
}
- if (!lu_fid_eq(&lli->lli_fid, &md->body->fid1))
+ if (!lu_fid_eq(&lli->lli_fid, &md->body->mbo_fid1))
return 0;
return 1;
@@ -72,20 +71,20 @@ static int ll_set_inode(struct inode *inode, void *opaque)
struct ll_inode_info *lli = ll_i2info(inode);
struct mdt_body *body = ((struct lustre_md *)opaque)->body;
- if (unlikely(!(body->valid & OBD_MD_FLID))) {
+ if (unlikely(!(body->mbo_valid & OBD_MD_FLID))) {
CERROR("MDS body missing FID\n");
return -EINVAL;
}
- lli->lli_fid = body->fid1;
- if (unlikely(!(body->valid & OBD_MD_FLTYPE))) {
+ lli->lli_fid = body->mbo_fid1;
+ if (unlikely(!(body->mbo_valid & OBD_MD_FLTYPE))) {
CERROR("Can not initialize inode " DFID
" without object type: valid = %#llx\n",
- PFID(&lli->lli_fid), body->valid);
+ PFID(&lli->lli_fid), body->mbo_valid);
return -EINVAL;
}
- inode->i_mode = (inode->i_mode & ~S_IFMT) | (body->mode & S_IFMT);
+ inode->i_mode = (inode->i_mode & ~S_IFMT) | (body->mbo_mode & S_IFMT);
if (unlikely(inode->i_mode == 0)) {
CERROR("Invalid inode "DFID" type\n", PFID(&lli->lli_fid));
return -EINVAL;
@@ -96,41 +95,45 @@ static int ll_set_inode(struct inode *inode, void *opaque)
return 0;
}
-/*
- * Get an inode by inode number (already instantiated by the intent lookup).
- * Returns inode or NULL
+/**
+ * Get an inode by inode number(@hash), which is already instantiated by
+ * the intent lookup).
*/
struct inode *ll_iget(struct super_block *sb, ino_t hash,
struct lustre_md *md)
{
struct inode *inode;
+ int rc = 0;
LASSERT(hash != 0);
inode = iget5_locked(sb, hash, ll_test_inode, ll_set_inode, md);
-
- if (inode) {
- if (inode->i_state & I_NEW) {
- int rc = 0;
-
- ll_read_inode2(inode, md);
- if (S_ISREG(inode->i_mode) &&
- !ll_i2info(inode)->lli_clob) {
- CDEBUG(D_INODE,
- "%s: apply lsm %p to inode " DFID ".\n",
- ll_get_fsname(sb, NULL, 0), md->lsm,
- PFID(ll_inode2fid(inode)));
- rc = cl_file_inode_init(inode, md);
- }
- if (rc != 0) {
- iget_failed(inode);
- inode = NULL;
- } else {
- unlock_new_inode(inode);
- }
- } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
- ll_update_inode(inode, md);
- CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p)\n",
- PFID(&md->body->fid1), inode);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (inode->i_state & I_NEW) {
+ rc = ll_read_inode2(inode, md);
+ if (!rc && S_ISREG(inode->i_mode) &&
+ !ll_i2info(inode)->lli_clob) {
+ CDEBUG(D_INODE, "%s: apply lsm %p to inode "DFID"\n",
+ ll_get_fsname(sb, NULL, 0), md->lsm,
+ PFID(ll_inode2fid(inode)));
+ rc = cl_file_inode_init(inode, md);
+ }
+ if (rc) {
+ make_bad_inode(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+ inode = ERR_PTR(rc);
+ } else {
+ unlock_new_inode(inode);
+ }
+ } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
+ rc = ll_update_inode(inode, md);
+ CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p): rc = %d\n",
+ PFID(&md->body->mbo_fid1), inode, rc);
+ if (rc) {
+ iput(inode);
+ inode = ERR_PTR(rc);
}
}
return inode;
@@ -158,6 +161,11 @@ static void ll_invalidate_negative_children(struct inode *dir)
spin_unlock(&dir->i_lock);
}
+int ll_test_inode_by_fid(struct inode *inode, void *opaque)
+{
+ return lu_fid_eq(&ll_i2info(inode)->lli_fid, opaque);
+}
+
int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag)
{
@@ -196,6 +204,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
}
if (bits & MDS_INODELOCK_XATTR) {
+ if (S_ISDIR(inode->i_mode))
+ ll_i2info(inode)->lli_def_stripe_offset = -1;
ll_xattr_cache_destroy(inode);
bits &= ~MDS_INODELOCK_XATTR;
}
@@ -253,10 +263,41 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
}
if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
- CDEBUG(D_INODE, "invalidating inode "DFID"\n",
- PFID(ll_inode2fid(inode)));
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ CDEBUG(D_INODE, "invalidating inode "DFID" lli = %p, pfid = "DFID"\n",
+ PFID(ll_inode2fid(inode)), lli,
+ PFID(&lli->lli_pfid));
+
truncate_inode_pages(inode->i_mapping, 0);
- ll_invalidate_negative_children(inode);
+
+ if (unlikely(!fid_is_zero(&lli->lli_pfid))) {
+ struct inode *master_inode = NULL;
+ unsigned long hash;
+
+ /*
+ * This is slave inode, since all of the child
+ * dentry is connected on the master inode, so
+ * we have to invalidate the negative children
+ * on master inode
+ */
+ CDEBUG(D_INODE, "Invalidate s"DFID" m"DFID"\n",
+ PFID(ll_inode2fid(inode)),
+ PFID(&lli->lli_pfid));
+
+ hash = cl_fid_build_ino(&lli->lli_pfid,
+ ll_need_32bit_api(ll_i2sbi(inode)));
+
+ master_inode = ilookup5(inode->i_sb, hash,
+ ll_test_inode_by_fid,
+ (void *)&lli->lli_pfid);
+ if (master_inode && !IS_ERR(master_inode)) {
+ ll_invalidate_negative_children(master_inode);
+ iput(master_inode);
+ }
+ } else {
+ ll_invalidate_negative_children(inode);
+ }
}
if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
@@ -322,7 +363,8 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
LASSERT(alias != dentry);
spin_lock(&alias->d_lock);
- if (alias->d_flags & DCACHE_DISCONNECTED)
+ if ((alias->d_flags & DCACHE_DISCONNECTED) &&
+ S_ISDIR(inode->i_mode))
/* LASSERT(last_discon == NULL); LU-405, bz 20055 */
discon_alias = alias;
else if (alias->d_parent == dentry->d_parent &&
@@ -433,9 +475,20 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
struct lookup_intent parent_it = {
.it_op = IT_GETATTR,
.it_lock_handle = 0 };
+ struct lu_fid fid = ll_i2info(parent)->lli_fid;
+
+ /* If it is striped directory, get the real stripe parent */
+ if (unlikely(ll_i2info(parent)->lli_lsm_md)) {
+ rc = md_get_fid_from_lsm(ll_i2mdexp(parent),
+ ll_i2info(parent)->lli_lsm_md,
+ (*de)->d_name.name,
+ (*de)->d_name.len, &fid);
+ if (rc)
+ return rc;
+ }
- if (md_revalidate_lock(ll_i2mdexp(parent), &parent_it,
- &ll_i2info(parent)->lli_fid, NULL)) {
+ if (md_revalidate_lock(ll_i2mdexp(parent), &parent_it, &fid,
+ NULL)) {
d_lustre_revalidate(*de);
ll_intent_release(&parent_it);
}
@@ -449,13 +502,13 @@ out:
}
static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
- struct lookup_intent *it, int lookup_flags)
+ struct lookup_intent *it)
{
struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
struct dentry *save = dentry, *retval;
struct ptlrpc_request *req = NULL;
+ struct md_op_data *op_data = NULL;
struct inode *inode;
- struct md_op_data *op_data;
__u32 opc;
int rc;
@@ -471,8 +524,8 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
if (!it || it->it_op == IT_GETXATTR)
it = &lookup_it;
- if (it->it_op == IT_GETATTR) {
- rc = ll_statahead_enter(parent, &dentry, 0);
+ if (it->it_op == IT_GETATTR && dentry_may_statahead(parent, dentry)) {
+ rc = ll_statahead(parent, &dentry, 0);
if (rc == 1) {
if (dentry == save)
retval = NULL;
@@ -488,8 +541,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
opc = LUSTRE_OPC_ANY;
op_data = ll_prep_md_op_data(NULL, parent, NULL, dentry->d_name.name,
- dentry->d_name.len, lookup_flags, opc,
- NULL);
+ dentry->d_name.len, 0, opc, NULL);
if (IS_ERR(op_data))
return (void *)op_data;
@@ -497,9 +549,38 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent)))
it->it_create_mode &= ~current_umask();
- rc = md_intent_lock(ll_i2mdexp(parent), op_data, NULL, 0, it,
- lookup_flags, &req, ll_md_blocking_ast, 0);
- ll_finish_md_op_data(op_data);
+ rc = md_intent_lock(ll_i2mdexp(parent), op_data, it, &req,
+ &ll_md_blocking_ast, 0);
+ /*
+ * If the MDS allows the client to chgrp (CFS_SETGRP_PERM), but the
+ * client does not know which suppgid should be sent to the MDS, or
+ * some other(s) changed the target file's GID after this RPC sent
+ * to the MDS with the suppgid as the original GID, then we should
+ * try again with right suppgid.
+ */
+ if (rc == -EACCES && it->it_op & IT_OPEN &&
+ it_disposition(it, DISP_OPEN_DENY)) {
+ struct mdt_body *body;
+
+ LASSERT(req);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (op_data->op_suppgids[0] == body->mbo_gid ||
+ op_data->op_suppgids[1] == body->mbo_gid ||
+ !in_group_p(make_kgid(&init_user_ns, body->mbo_gid))) {
+ retval = ERR_PTR(-EACCES);
+ goto out;
+ }
+
+ fid_zero(&op_data->op_fid2);
+ op_data->op_suppgids[1] = body->mbo_gid;
+ ptlrpc_req_finished(req);
+ req = NULL;
+ ll_intent_release(it);
+ rc = md_intent_lock(ll_i2mdexp(parent), op_data, it, &req,
+ ll_md_blocking_ast, 0);
+ }
+
if (rc < 0) {
retval = ERR_PTR(rc);
goto out;
@@ -524,11 +605,11 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
retval = NULL;
else
retval = dentry;
- out:
- if (req)
- ptlrpc_req_finished(req);
- if (it->it_op == IT_GETATTR && (!retval || retval == dentry))
- ll_statahead_mark(parent, dentry);
+out:
+ if (op_data && !IS_ERR(op_data))
+ ll_finish_md_op_data(op_data);
+
+ ptlrpc_req_finished(req);
return retval;
}
@@ -541,15 +622,19 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),flags=%u\n",
dentry, PFID(ll_inode2fid(parent)), parent, flags);
- /* Optimize away (CREATE && !OPEN). Let .create handle the race. */
- if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN))
+ /* Optimize away (CREATE && !OPEN). Let .create handle the race.
+ * but only if we have write permissions there, otherwise we need
+ * to proceed with lookup. LU-4185
+ */
+ if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN) &&
+ (inode_permission(parent, MAY_WRITE | MAY_EXEC) == 0))
return NULL;
- if (flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE))
+ if (flags & (LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE))
itp = NULL;
else
itp = &it;
- de = ll_lookup_it(parent, dentry, itp, 0);
+ de = ll_lookup_it(parent, dentry, itp);
if (itp)
ll_intent_release(itp);
@@ -567,7 +652,6 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
{
struct lookup_intent *it;
struct dentry *de;
- long long lookup_flags = LOOKUP_OPEN;
int rc = 0;
CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),file %p,open_flags %x,mode %x opened %d\n",
@@ -597,15 +681,14 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
return -ENOMEM;
it->it_op = IT_OPEN;
- if (open_flags & O_CREAT) {
+ if (open_flags & O_CREAT)
it->it_op |= IT_CREAT;
- lookup_flags |= LOOKUP_CREATE;
- }
it->it_create_mode = (mode & S_IALLUGO) | S_IFREG;
it->it_flags = (open_flags & ~O_ACCMODE) | OPEN_FMODE(open_flags);
+ it->it_flags &= ~MDS_OPEN_FL_INTERNAL;
/* Dentry added to dcache tree in ll_lookup_it */
- de = ll_lookup_it(dir, dentry, it, lookup_flags);
+ de = ll_lookup_it(dir, dentry, it);
if (IS_ERR(de))
rc = PTR_ERR(de);
else if (de)
@@ -614,7 +697,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
if (!rc) {
if (it_disposition(it, DISP_OPEN_CREATE)) {
/* Dentry instantiated in ll_create_it. */
- rc = ll_create_it(dir, dentry, mode, it);
+ rc = ll_create_it(dir, dentry, it);
if (rc) {
/* We dget in ll_splice_alias. */
if (de)
@@ -700,7 +783,7 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int ll_create_it(struct inode *dir, struct dentry *dentry, int mode,
+static int ll_create_it(struct inode *dir, struct dentry *dentry,
struct lookup_intent *it)
{
struct inode *inode;
@@ -721,27 +804,26 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry, int mode,
return 0;
}
-static void ll_update_times(struct ptlrpc_request *request,
- struct inode *inode)
+void ll_update_times(struct ptlrpc_request *request, struct inode *inode)
{
struct mdt_body *body = req_capsule_server_get(&request->rq_pill,
&RMF_MDT_BODY);
LASSERT(body);
- if (body->valid & OBD_MD_FLMTIME &&
- body->mtime > LTIME_S(inode->i_mtime)) {
+ if (body->mbo_valid & OBD_MD_FLMTIME &&
+ body->mbo_mtime > LTIME_S(inode->i_mtime)) {
CDEBUG(D_INODE, "setting fid "DFID" mtime from %lu to %llu\n",
PFID(ll_inode2fid(inode)), LTIME_S(inode->i_mtime),
- body->mtime);
- LTIME_S(inode->i_mtime) = body->mtime;
+ body->mbo_mtime);
+ LTIME_S(inode->i_mtime) = body->mbo_mtime;
}
- if (body->valid & OBD_MD_FLCTIME &&
- body->ctime > LTIME_S(inode->i_ctime))
- LTIME_S(inode->i_ctime) = body->ctime;
+ if (body->mbo_valid & OBD_MD_FLCTIME &&
+ body->mbo_ctime > LTIME_S(inode->i_ctime))
+ LTIME_S(inode->i_ctime) = body->mbo_ctime;
}
static int ll_new_node(struct inode *dir, struct dentry *dentry,
- const char *tgt, int mode, int rdev,
+ const char *tgt, umode_t mode, int rdev,
__u32 opc)
{
struct ptlrpc_request *request = NULL;
@@ -753,7 +835,7 @@ static int ll_new_node(struct inode *dir, struct dentry *dentry,
if (unlikely(tgt))
tgt_len = strlen(tgt) + 1;
-
+again:
op_data = ll_prep_md_op_data(NULL, dir, NULL,
dentry->d_name.name,
dentry->d_name.len,
@@ -768,9 +850,45 @@ static int ll_new_node(struct inode *dir, struct dentry *dentry,
from_kgid(&init_user_ns, current_fsgid()),
cfs_curproc_cap_pack(), rdev, &request);
ll_finish_md_op_data(op_data);
- if (err)
+ if (err < 0 && err != -EREMOTE)
goto err_exit;
+ /*
+ * If the client doesn't know where to create a subdirectory (or
+ * in case of a race that sends the RPC to the wrong MDS), the
+ * MDS will return -EREMOTE and the client will fetch the layout
+ * of the directory, then create the directory on the right MDT.
+ */
+ if (unlikely(err == -EREMOTE)) {
+ struct ll_inode_info *lli = ll_i2info(dir);
+ struct lmv_user_md *lum;
+ int lumsize, err2;
+
+ ptlrpc_req_finished(request);
+ request = NULL;
+
+ err2 = ll_dir_getstripe(dir, (void **)&lum, &lumsize, &request,
+ OBD_MD_DEFAULT_MEA);
+ if (!err2) {
+ /* Update stripe_offset and retry */
+ lli->lli_def_stripe_offset = lum->lum_stripe_offset;
+ } else if (err2 == -ENODATA &&
+ lli->lli_def_stripe_offset != -1) {
+ /*
+ * If there are no default stripe EA on the MDT, but the
+ * client has default stripe, then it probably means
+ * default stripe EA has just been deleted.
+ */
+ lli->lli_def_stripe_offset = -1;
+ } else {
+ goto err_exit;
+ }
+
+ ptlrpc_req_finished(request);
+ request = NULL;
+ goto again;
+ }
+
ll_update_times(request, dir);
err = ll_prep_inode(&inode, request, dir->i_sb, NULL);
@@ -779,7 +897,8 @@ static int ll_new_node(struct inode *dir, struct dentry *dentry,
d_instantiate(dentry, inode);
err_exit:
- ptlrpc_req_finished(request);
+ if (request)
+ ptlrpc_req_finished(request);
return err;
}
@@ -842,77 +961,6 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
return rc;
}
-int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
-{
- struct mdt_body *body;
- struct lov_mds_md *eadata;
- struct lov_stripe_md *lsm = NULL;
- struct obd_trans_info oti = { 0 };
- struct obdo *oa;
- int rc;
-
- /* req is swabbed so this is safe */
- body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
- if (!(body->valid & OBD_MD_FLEASIZE))
- return 0;
-
- if (body->eadatasize == 0) {
- CERROR("OBD_MD_FLEASIZE set but eadatasize zero\n");
- rc = -EPROTO;
- goto out;
- }
-
- /* The MDS sent back the EA because we unlinked the last reference
- * to this file. Use this EA to unlink the objects on the OST.
- * It's opaque so we don't swab here; we leave it to obd_unpackmd() to
- * check it is complete and sensible.
- */
- eadata = req_capsule_server_sized_get(&request->rq_pill, &RMF_MDT_MD,
- body->eadatasize);
- LASSERT(eadata);
-
- rc = obd_unpackmd(ll_i2dtexp(dir), &lsm, eadata, body->eadatasize);
- if (rc < 0) {
- CERROR("obd_unpackmd: %d\n", rc);
- goto out;
- }
- LASSERT(rc >= sizeof(*lsm));
-
- oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!oa) {
- rc = -ENOMEM;
- goto out_free_memmd;
- }
-
- oa->o_oi = lsm->lsm_oi;
- oa->o_mode = body->mode & S_IFMT;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLGROUP;
-
- if (body->valid & OBD_MD_FLCOOKIE) {
- oa->o_valid |= OBD_MD_FLCOOKIE;
- oti.oti_logcookies =
- req_capsule_server_sized_get(&request->rq_pill,
- &RMF_LOGCOOKIES,
- sizeof(struct llog_cookie) *
- lsm->lsm_stripe_count);
- if (!oti.oti_logcookies) {
- oa->o_valid &= ~OBD_MD_FLCOOKIE;
- body->valid &= ~OBD_MD_FLCOOKIE;
- }
- }
-
- rc = obd_destroy(NULL, ll_i2dtexp(dir), oa, lsm, &oti,
- ll_i2mdexp(dir));
- if (rc)
- CERROR("obd destroy objid "DOSTID" error %d\n",
- POSTID(&lsm->lsm_oi), rc);
-out_free_memmd:
- obd_free_memmd(ll_i2dtexp(dir), &lsm);
- kmem_cache_free(obdo_cachep, oa);
-out:
- return rc;
-}
-
/* ll_unlink() doesn't update the inode with the new link count.
* Instead, ll_ddelete() and ll_d_iput() will update it based upon if there
* is any lock existing. They will recycle dentries and inodes based upon locks
@@ -934,7 +982,7 @@ static int ll_unlink(struct inode *dir, struct dentry *dchild)
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- if (dchild && dchild->d_inode)
+ if (dchild->d_inode)
op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
op_data->op_fid2 = op_data->op_fid3;
@@ -946,7 +994,6 @@ static int ll_unlink(struct inode *dir, struct dentry *dchild)
ll_update_times(request, dir);
ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_UNLINK, 1);
- rc = ll_objects_destroy(request, dir);
out:
ptlrpc_req_finished(request);
return rc;
@@ -961,9 +1008,9 @@ static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
mode &= ~current_umask();
- mode = (mode & (S_IRWXUGO|S_ISVTX)) | S_IFDIR;
- err = ll_new_node(dir, dentry, NULL, mode, 0, LUSTRE_OPC_MKDIR);
+ mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
+ err = ll_new_node(dir, dentry, NULL, mode, 0, LUSTRE_OPC_MKDIR);
if (!err)
ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR, 1);
@@ -986,7 +1033,7 @@ static int ll_rmdir(struct inode *dir, struct dentry *dchild)
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- if (dchild && dchild->d_inode)
+ if (dchild->d_inode)
op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
op_data->op_fid2 = op_data->op_fid3;
@@ -1067,9 +1114,9 @@ static int ll_rename(struct inode *src, struct dentry *src_dchild,
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- if (src_dchild && src_dchild->d_inode)
+ if (src_dchild->d_inode)
op_data->op_fid3 = *ll_inode2fid(src_dchild->d_inode);
- if (tgt_dchild && tgt_dchild->d_inode)
+ if (tgt_dchild->d_inode)
op_data->op_fid4 = *ll_inode2fid(tgt_dchild->d_inode);
err = md_rename(sbi->ll_md_exp, op_data,
@@ -1082,7 +1129,6 @@ static int ll_rename(struct inode *src, struct dentry *src_dchild,
ll_update_times(request, src);
ll_update_times(request, tgt);
ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
- err = ll_objects_destroy(request, src);
}
ptlrpc_req_finished(request);
@@ -1106,10 +1152,10 @@ const struct inode_operations ll_dir_inode_operations = {
.setattr = ll_setattr,
.getattr = ll_getattr,
.permission = ll_inode_permission,
- .setxattr = ll_setxattr,
- .getxattr = ll_getxattr,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ll_listxattr,
- .removexattr = ll_removexattr,
+ .removexattr = generic_removexattr,
.get_acl = ll_get_acl,
};
@@ -1117,9 +1163,9 @@ const struct inode_operations ll_special_inode_operations = {
.setattr = ll_setattr,
.getattr = ll_getattr,
.permission = ll_inode_permission,
- .setxattr = ll_setxattr,
- .getxattr = ll_getxattr,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ll_listxattr,
- .removexattr = ll_removexattr,
+ .removexattr = generic_removexattr,
.get_acl = ll_get_acl,
};
diff --git a/drivers/staging/lustre/lustre/llite/range_lock.c b/drivers/staging/lustre/lustre/llite/range_lock.c
new file mode 100644
index 000000000000..94c818f1478b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/range_lock.c
@@ -0,0 +1,233 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Range lock is used to allow multiple threads writing a single shared
+ * file given each thread is writing to a non-overlapping portion of the
+ * file.
+ *
+ * Refer to the possible upstream kernel version of range lock by
+ * Jan Kara <jack@suse.cz>: https://lkml.org/lkml/2013/1/31/480
+ *
+ * This file could later replaced by the upstream kernel version.
+ */
+/*
+ * Author: Prakash Surya <surya1@llnl.gov>
+ * Author: Bobi Jam <bobijam.xu@intel.com>
+ */
+#include "range_lock.h"
+#include "../include/lustre/lustre_user.h"
+
+/**
+ * Initialize a range lock tree
+ *
+ * \param tree [in] an empty range lock tree
+ *
+ * Pre: Caller should have allocated the range lock tree.
+ * Post: The range lock tree is ready to function.
+ */
+void range_lock_tree_init(struct range_lock_tree *tree)
+{
+ tree->rlt_root = NULL;
+ tree->rlt_sequence = 0;
+ spin_lock_init(&tree->rlt_lock);
+}
+
+/**
+ * Initialize a range lock node
+ *
+ * \param lock [in] an empty range lock node
+ * \param start [in] start of the covering region
+ * \param end [in] end of the covering region
+ *
+ * Pre: Caller should have allocated the range lock node.
+ * Post: The range lock node is meant to cover [start, end] region
+ */
+void range_lock_init(struct range_lock *lock, __u64 start, __u64 end)
+{
+ memset(&lock->rl_node, 0, sizeof(lock->rl_node));
+ if (end != LUSTRE_EOF)
+ end >>= PAGE_SHIFT;
+ interval_set(&lock->rl_node, start >> PAGE_SHIFT, end);
+ INIT_LIST_HEAD(&lock->rl_next_lock);
+ lock->rl_task = NULL;
+ lock->rl_lock_count = 0;
+ lock->rl_blocking_ranges = 0;
+ lock->rl_sequence = 0;
+}
+
+static inline struct range_lock *next_lock(struct range_lock *lock)
+{
+ return list_entry(lock->rl_next_lock.next, typeof(*lock), rl_next_lock);
+}
+
+/**
+ * Helper function of range_unlock()
+ *
+ * \param node [in] a range lock found overlapped during interval node
+ * search
+ * \param arg [in] the range lock to be tested
+ *
+ * \retval INTERVAL_ITER_CONT indicate to continue the search for next
+ * overlapping range node
+ * \retval INTERVAL_ITER_STOP indicate to stop the search
+ */
+static enum interval_iter range_unlock_cb(struct interval_node *node, void *arg)
+{
+ struct range_lock *lock = arg;
+ struct range_lock *overlap = node2rangelock(node);
+ struct range_lock *iter;
+
+ list_for_each_entry(iter, &overlap->rl_next_lock, rl_next_lock) {
+ if (iter->rl_sequence > lock->rl_sequence) {
+ --iter->rl_blocking_ranges;
+ LASSERT(iter->rl_blocking_ranges > 0);
+ }
+ }
+ if (overlap->rl_sequence > lock->rl_sequence) {
+ --overlap->rl_blocking_ranges;
+ if (overlap->rl_blocking_ranges == 0)
+ wake_up_process(overlap->rl_task);
+ }
+ return INTERVAL_ITER_CONT;
+}
+
+/**
+ * Unlock a range lock, wake up locks blocked by this lock.
+ *
+ * \param tree [in] range lock tree
+ * \param lock [in] range lock to be deleted
+ *
+ * If this lock has been granted, relase it; if not, just delete it from
+ * the tree or the same region lock list. Wake up those locks only blocked
+ * by this lock through range_unlock_cb().
+ */
+void range_unlock(struct range_lock_tree *tree, struct range_lock *lock)
+{
+ spin_lock(&tree->rlt_lock);
+ if (!list_empty(&lock->rl_next_lock)) {
+ struct range_lock *next;
+
+ if (interval_is_intree(&lock->rl_node)) { /* first lock */
+ /* Insert the next same range lock into the tree */
+ next = next_lock(lock);
+ next->rl_lock_count = lock->rl_lock_count - 1;
+ interval_erase(&lock->rl_node, &tree->rlt_root);
+ interval_insert(&next->rl_node, &tree->rlt_root);
+ } else {
+ /* find the first lock in tree */
+ list_for_each_entry(next, &lock->rl_next_lock,
+ rl_next_lock) {
+ if (!interval_is_intree(&next->rl_node))
+ continue;
+
+ LASSERT(next->rl_lock_count > 0);
+ next->rl_lock_count--;
+ break;
+ }
+ }
+ list_del_init(&lock->rl_next_lock);
+ } else {
+ LASSERT(interval_is_intree(&lock->rl_node));
+ interval_erase(&lock->rl_node, &tree->rlt_root);
+ }
+
+ interval_search(tree->rlt_root, &lock->rl_node.in_extent,
+ range_unlock_cb, lock);
+ spin_unlock(&tree->rlt_lock);
+}
+
+/**
+ * Helper function of range_lock()
+ *
+ * \param node [in] a range lock found overlapped during interval node
+ * search
+ * \param arg [in] the range lock to be tested
+ *
+ * \retval INTERVAL_ITER_CONT indicate to continue the search for next
+ * overlapping range node
+ * \retval INTERVAL_ITER_STOP indicate to stop the search
+ */
+static enum interval_iter range_lock_cb(struct interval_node *node, void *arg)
+{
+ struct range_lock *lock = (struct range_lock *)arg;
+ struct range_lock *overlap = node2rangelock(node);
+
+ lock->rl_blocking_ranges += overlap->rl_lock_count + 1;
+ return INTERVAL_ITER_CONT;
+}
+
+/**
+ * Lock a region
+ *
+ * \param tree [in] range lock tree
+ * \param lock [in] range lock node containing the region span
+ *
+ * \retval 0 get the range lock
+ * \retval <0 error code while not getting the range lock
+ *
+ * If there exists overlapping range lock, the new lock will wait and
+ * retry, if later it find that it is not the chosen one to wake up,
+ * it wait again.
+ */
+int range_lock(struct range_lock_tree *tree, struct range_lock *lock)
+{
+ struct interval_node *node;
+ int rc = 0;
+
+ spin_lock(&tree->rlt_lock);
+ /*
+ * We need to check for all conflicting intervals
+ * already in the tree.
+ */
+ interval_search(tree->rlt_root, &lock->rl_node.in_extent,
+ range_lock_cb, lock);
+ /*
+ * Insert to the tree if I am unique, otherwise I've been linked to
+ * the rl_next_lock of another lock which has the same range as mine
+ * in range_lock_cb().
+ */
+ node = interval_insert(&lock->rl_node, &tree->rlt_root);
+ if (node) {
+ struct range_lock *tmp = node2rangelock(node);
+
+ list_add_tail(&lock->rl_next_lock, &tmp->rl_next_lock);
+ tmp->rl_lock_count++;
+ }
+ lock->rl_sequence = ++tree->rlt_sequence;
+
+ while (lock->rl_blocking_ranges > 0) {
+ lock->rl_task = current;
+ __set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&tree->rlt_lock);
+ schedule();
+
+ if (signal_pending(current)) {
+ range_unlock(tree, lock);
+ rc = -EINTR;
+ goto out;
+ }
+ spin_lock(&tree->rlt_lock);
+ }
+ spin_unlock(&tree->rlt_lock);
+out:
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/llite/range_lock.h b/drivers/staging/lustre/lustre/llite/range_lock.h
new file mode 100644
index 000000000000..c6d04a6f99fd
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/range_lock.h
@@ -0,0 +1,82 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Range lock is used to allow multiple threads writing a single shared
+ * file given each thread is writing to a non-overlapping portion of the
+ * file.
+ *
+ * Refer to the possible upstream kernel version of range lock by
+ * Jan Kara <jack@suse.cz>: https://lkml.org/lkml/2013/1/31/480
+ *
+ * This file could later replaced by the upstream kernel version.
+ */
+/*
+ * Author: Prakash Surya <surya1@llnl.gov>
+ * Author: Bobi Jam <bobijam.xu@intel.com>
+ */
+#ifndef _RANGE_LOCK_H
+#define _RANGE_LOCK_H
+
+#include "../../include/linux/libcfs/libcfs.h"
+#include "../include/interval_tree.h"
+
+struct range_lock {
+ struct interval_node rl_node;
+ /**
+ * Process to enqueue this lock.
+ */
+ struct task_struct *rl_task;
+ /**
+ * List of locks with the same range.
+ */
+ struct list_head rl_next_lock;
+ /**
+ * Number of locks in the list rl_next_lock
+ */
+ unsigned int rl_lock_count;
+ /**
+ * Number of ranges which are blocking acquisition of the lock
+ */
+ unsigned int rl_blocking_ranges;
+ /**
+ * Sequence number of range lock. This number is used to get to know
+ * the order the locks are queued; this is required for range_cancel().
+ */
+ __u64 rl_sequence;
+};
+
+static inline struct range_lock *node2rangelock(const struct interval_node *n)
+{
+ return container_of(n, struct range_lock, rl_node);
+}
+
+struct range_lock_tree {
+ struct interval_node *rlt_root;
+ spinlock_t rlt_lock; /* protect range lock tree */
+ __u64 rlt_sequence;
+};
+
+void range_lock_tree_init(struct range_lock_tree *tree);
+void range_lock_init(struct range_lock *lock, __u64 start, __u64 end);
+int range_lock(struct range_lock_tree *tree, struct range_lock *lock);
+void range_unlock(struct range_lock_tree *tree, struct range_lock *lock);
+#endif
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 87393c4bd51e..50c0152ba022 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -50,10 +50,8 @@
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/lustre_lite.h"
#include "../include/obd_cksum.h"
#include "llite_internal.h"
-#include "../include/linux/lustre_compat25.h"
static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
@@ -413,7 +411,7 @@ static int ll_read_ahead_pages(const struct lu_env *env,
* forward read-ahead, it will be fixed when backward
* read-ahead is implemented
*/
- LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
+ LASSERTF(page_idx >= ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
page_idx,
ria->ria_start, ria->ria_end, ria->ria_stoff,
ria->ria_length, ria->ria_pages);
@@ -474,10 +472,22 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
}
/* Reserve a part of the read-ahead window that we'll be issuing */
- if (ras->ras_window_len) {
- start = ras->ras_next_readahead;
+ if (ras->ras_window_len > 0) {
+ /*
+ * Note: other thread might rollback the ras_next_readahead,
+ * if it can not get the full size of prepared pages, see the
+ * end of this function. For stride read ahead, it needs to
+ * make sure the offset is no less than ras_stride_offset,
+ * so that stride read ahead can work correctly.
+ */
+ if (stride_io_mode(ras))
+ start = max(ras->ras_next_readahead,
+ ras->ras_stride_offset);
+ else
+ start = ras->ras_next_readahead;
end = ras->ras_window_start + ras->ras_window_len - 1;
}
+
if (end != 0) {
unsigned long rpc_boundary;
/*
@@ -648,10 +658,11 @@ static void ras_update_stride_detector(struct ll_readahead_state *ras,
{
unsigned long stride_gap = index - ras->ras_last_readpage - 1;
- if (!stride_io_mode(ras) && (stride_gap != 0 ||
- ras->ras_consecutive_stride_requests == 0)) {
+ if ((stride_gap != 0 || ras->ras_consecutive_stride_requests == 0) &&
+ !stride_io_mode(ras)) {
ras->ras_stride_pages = ras->ras_consecutive_pages;
- ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages;
+ ras->ras_stride_length = ras->ras_consecutive_pages +
+ stride_gap;
}
LASSERT(ras->ras_request_index == 0);
LASSERT(ras->ras_consecutive_stride_requests == 0);
@@ -663,10 +674,9 @@ static void ras_update_stride_detector(struct ll_readahead_state *ras,
}
ras->ras_stride_pages = ras->ras_consecutive_pages;
- ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages;
+ ras->ras_stride_length = stride_gap + ras->ras_consecutive_pages;
RAS_CDEBUG(ras);
- return;
}
/* Stride Read-ahead window will be increased inc_len according to
@@ -882,7 +892,6 @@ out_unlock:
RAS_CDEBUG(ras);
ras->ras_request_index++;
spin_unlock(&ras->ras_lock);
- return;
}
int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
@@ -1015,11 +1024,15 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
* is called later on.
*/
ignore_layout = 1;
+
+ if (!ll_i2info(inode)->lli_clob)
+ return 0;
+
result = cl_sync_file_range(inode, start, end, mode, ignore_layout);
if (result > 0) {
wbc->nr_to_write -= result;
result = 0;
- }
+ }
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
if (end == OBD_OBJECT_EOF)
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index d98c7acc0832..26f3a37873a7 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -51,9 +51,7 @@
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/lustre_lite.h"
#include "llite_internal.h"
-#include "../include/linux/lustre_compat25.h"
/**
* Implements Linux VM address_space::invalidatepage() method. This method is
@@ -161,7 +159,7 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
return result;
}
-#define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL)
+#define MAX_DIRECTIO_SIZE (2 * 1024 * 1024 * 1024UL)
static inline int ll_get_user_pages(int rw, unsigned long user_addr,
size_t size, struct page ***pages,
@@ -214,10 +212,10 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
int i;
ssize_t rc = 0;
loff_t file_offset = pv->ldp_start_offset;
- long size = pv->ldp_size;
+ size_t size = pv->ldp_size;
int page_count = pv->ldp_nr;
struct page **pages = pv->ldp_pages;
- long page_size = cl_page_size(obj);
+ size_t page_size = cl_page_size(obj);
bool do_io;
int io_pages = 0;
@@ -346,7 +344,6 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
struct cl_io *io;
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- struct vvp_object *obj = cl_inode2vvp(inode);
loff_t file_offset = iocb->ki_pos;
ssize_t count = iov_iter_count(iter);
ssize_t tot_bytes = 0, result = 0;
@@ -375,14 +372,6 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
io = vvp_env_io(env)->vui_cl.cis_io;
LASSERT(io);
- /* 0. Need locking between buffered and direct access. and race with
- * size changing by concurrent truncates and writes.
- * 1. Need inode mutex to operate transient pages.
- */
- if (iov_iter_rw(iter) == READ)
- inode_lock(inode);
-
- LASSERT(obj->vob_transient_pages == 0);
while (iov_iter_count(iter)) {
struct page **pages;
size_t offs;
@@ -430,10 +419,6 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
file_offset += result;
}
out:
- LASSERT(obj->vob_transient_pages == 0);
- if (iov_iter_rw(iter) == READ)
- inode_unlock(inode);
-
if (tot_bytes > 0) {
struct vvp_io *vio = vvp_env_io(env);
@@ -616,6 +601,13 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
LASSERT(from == 0);
vio->u.write.vui_to = from + copied;
+ /*
+ * To address the deadlock in balance_dirty_pages() where
+ * this dirty page may be written back in the same thread.
+ */
+ if (PageDirty(vmpage))
+ unplug = true;
+
/* We may have one full RPC, commit it soon */
if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
unplug = true;
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index c1cb6b19e724..0677513476ec 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -39,7 +39,6 @@
#define DEBUG_SUBSYSTEM S_LLITE
#include "../include/obd_support.h"
-#include "../include/lustre_lite.h"
#include "../include/lustre_dlm.h"
#include "llite_internal.h"
@@ -50,24 +49,26 @@ enum se_stat {
SA_ENTRY_INIT = 0, /** init entry */
SA_ENTRY_SUCC = 1, /** stat succeed */
SA_ENTRY_INVA = 2, /** invalid entry */
- SA_ENTRY_DEST = 3, /** entry to be destroyed */
};
-struct ll_sa_entry {
- /* link into sai->sai_entries */
- struct list_head se_link;
- /* link into sai->sai_entries_{received,stated} */
+/*
+ * sa_entry is not refcounted: statahead thread allocates it and do async stat,
+ * and in async stat callback ll_statahead_interpret() will add it into
+ * sai_interim_entries, later statahead thread will call sa_handle_callback() to
+ * instantiate entry and move it into sai_entries, and then only scanner process
+ * can access and free it.
+ */
+struct sa_entry {
+ /* link into sai_interim_entries or sai_entries */
struct list_head se_list;
/* link into sai hash table locally */
struct list_head se_hash;
- /* entry reference count */
- atomic_t se_refcount;
/* entry index in the sai */
__u64 se_index;
/* low layer ldlm lock handle */
__u64 se_handle;
/* entry status */
- enum se_stat se_stat;
+ enum se_stat se_state;
/* entry size, contains name */
int se_size;
/* pointer to async getattr enqueue info */
@@ -83,27 +84,24 @@ struct ll_sa_entry {
static unsigned int sai_generation;
static DEFINE_SPINLOCK(sai_generation_lock);
-/*
- * The entry only can be released by the caller, it is necessary to hold lock.
- */
-static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
+/* sa_entry is ready to use */
+static inline int sa_ready(struct sa_entry *entry)
{
smp_rmb();
- return (entry->se_stat != SA_ENTRY_INIT);
+ return (entry->se_state != SA_ENTRY_INIT);
}
-static inline int ll_sa_entry_hash(int val)
+/* hash value to put in sai_cache */
+static inline int sa_hash(int val)
{
return val & LL_SA_CACHE_MASK;
}
-/*
- * Insert entry to hash SA table.
- */
+/* hash entry into sai_cache */
static inline void
-ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
+sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry)
{
- int i = ll_sa_entry_hash(entry->se_qstr.hash);
+ int i = sa_hash(entry->se_qstr.hash);
spin_lock(&sai->sai_cache_lock[i]);
list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
@@ -114,9 +112,9 @@ ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
* Remove entry from SA table.
*/
static inline void
-ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
+sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry)
{
- int i = ll_sa_entry_hash(entry->se_qstr.hash);
+ int i = sa_hash(entry->se_qstr.hash);
spin_lock(&sai->sai_cache_lock[i]);
list_del_init(&entry->se_hash);
@@ -129,19 +127,21 @@ static inline int agl_should_run(struct ll_statahead_info *sai,
return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
}
+/* statahead window is full */
static inline int sa_sent_full(struct ll_statahead_info *sai)
{
return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
}
-static inline int sa_received_empty(struct ll_statahead_info *sai)
+/* got async stat replies */
+static inline int sa_has_callback(struct ll_statahead_info *sai)
{
- return list_empty(&sai->sai_entries_received);
+ return !list_empty(&sai->sai_interim_entries);
}
static inline int agl_list_empty(struct ll_statahead_info *sai)
{
- return list_empty(&sai->sai_entries_agl);
+ return list_empty(&sai->sai_agls);
}
/**
@@ -157,7 +157,7 @@ static inline int sa_low_hit(struct ll_statahead_info *sai)
}
/*
- * If the given index is behind of statahead window more than
+ * if the given index is behind of statahead window more than
* SA_OMITTED_ENTRY_MAX, then it is old.
*/
static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
@@ -166,20 +166,17 @@ static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
sai->sai_index);
}
-/*
- * Insert it into sai_entries tail when init.
- */
-static struct ll_sa_entry *
-ll_sa_entry_alloc(struct dentry *parent,
- struct ll_statahead_info *sai, __u64 index,
- const char *name, int len)
+/* allocate sa_entry and hash it to allow scanner process to find it */
+static struct sa_entry *
+sa_alloc(struct dentry *parent, struct ll_statahead_info *sai, __u64 index,
+ const char *name, int len)
{
struct ll_inode_info *lli;
- struct ll_sa_entry *entry;
+ struct sa_entry *entry;
int entry_size;
char *dname;
- entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
+ entry_size = sizeof(struct sa_entry) + (len & ~3) + 4;
entry = kzalloc(entry_size, GFP_NOFS);
if (unlikely(!entry))
return ERR_PTR(-ENOMEM);
@@ -188,34 +185,9 @@ ll_sa_entry_alloc(struct dentry *parent,
len, name, entry, index);
entry->se_index = index;
-
- /*
- * Statahead entry reference rules:
- *
- * 1) When statahead entry is initialized, its reference is set as 2.
- * One reference is used by the directory scanner. When the scanner
- * searches the statahead cache for the given name, it can perform
- * lockless hash lookup (only the scanner can remove entry from hash
- * list), and once found, it needn't to call "atomic_inc()" for the
- * entry reference. So the performance is improved. After using the
- * statahead entry, the scanner will call "atomic_dec()" to drop the
- * reference held when initialization. If it is the last reference,
- * the statahead entry will be freed.
- *
- * 2) All other threads, including statahead thread and ptlrpcd thread,
- * when they process the statahead entry, the reference for target
- * should be held to guarantee the entry will not be released by the
- * directory scanner. After processing the entry, these threads will
- * drop the entry reference. If it is the last reference, the entry
- * will be freed.
- *
- * The second reference when initializes the statahead entry is used
- * by the statahead thread, following the rule 2).
- */
- atomic_set(&entry->se_refcount, 2);
- entry->se_stat = SA_ENTRY_INIT;
+ entry->se_state = SA_ENTRY_INIT;
entry->se_size = entry_size;
- dname = (char *)entry + sizeof(struct ll_sa_entry);
+ dname = (char *)entry + sizeof(struct sa_entry);
memcpy(dname, name, len);
dname[len] = 0;
@@ -223,11 +195,10 @@ ll_sa_entry_alloc(struct dentry *parent,
entry->se_qstr.len = len;
entry->se_qstr.name = dname;
- lli = ll_i2info(sai->sai_inode);
+ lli = ll_i2info(sai->sai_dentry->d_inode);
spin_lock(&lli->lli_sa_lock);
- list_add_tail(&entry->se_link, &sai->sai_entries);
INIT_LIST_HEAD(&entry->se_list);
- ll_sa_entry_enhash(sai, entry);
+ sa_rehash(sai, entry);
spin_unlock(&lli->lli_sa_lock);
atomic_inc(&sai->sai_cache_count);
@@ -235,18 +206,29 @@ ll_sa_entry_alloc(struct dentry *parent,
return entry;
}
+/* free sa_entry, which should have been unhashed and not in any list */
+static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry)
+{
+ CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
+ entry->se_qstr.len, entry->se_qstr.name, entry,
+ entry->se_index);
+
+ LASSERT(list_empty(&entry->se_list));
+ LASSERT(list_empty(&entry->se_hash));
+
+ kfree(entry);
+ atomic_dec(&sai->sai_cache_count);
+}
+
/*
- * Used by the directory scanner to search entry with name.
- *
- * Only the caller can remove the entry from hash, so it is unnecessary to hold
- * hash lock. It is caller's duty to release the init refcount on the entry, so
- * it is also unnecessary to increase refcount on the entry.
+ * find sa_entry by name, used by directory scanner, lock is not needed because
+ * only scanner can remove the entry from cache.
*/
-static struct ll_sa_entry *
-ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
+static struct sa_entry *
+sa_get(struct ll_statahead_info *sai, const struct qstr *qstr)
{
- struct ll_sa_entry *entry;
- int i = ll_sa_entry_hash(qstr->hash);
+ struct sa_entry *entry;
+ int i = sa_hash(qstr->hash);
list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
if (entry->se_qstr.hash == qstr->hash &&
@@ -257,164 +239,126 @@ ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
return NULL;
}
-/*
- * Used by the async getattr request callback to find entry with index.
- *
- * Inside lli_sa_lock to prevent others to change the list during the search.
- * It needs to increase entry refcount before returning to guarantee that the
- * entry cannot be freed by others.
- */
-static struct ll_sa_entry *
-ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
-{
- struct ll_sa_entry *entry;
-
- list_for_each_entry(entry, &sai->sai_entries, se_link) {
- if (entry->se_index == index) {
- LASSERT(atomic_read(&entry->se_refcount) > 0);
- atomic_inc(&entry->se_refcount);
- return entry;
- }
- if (entry->se_index > index)
- break;
- }
- return NULL;
-}
-
-static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry)
-{
- struct md_enqueue_info *minfo = entry->se_minfo;
- struct ptlrpc_request *req = entry->se_req;
-
- if (minfo) {
- entry->se_minfo = NULL;
- ll_intent_release(&minfo->mi_it);
- iput(minfo->mi_dir);
- kfree(minfo);
- }
-
- if (req) {
- entry->se_req = NULL;
- ptlrpc_req_finished(req);
- }
-}
-
-static void ll_sa_entry_put(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry)
-{
- if (atomic_dec_and_test(&entry->se_refcount)) {
- CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
- entry->se_qstr.len, entry->se_qstr.name, entry,
- entry->se_index);
-
- LASSERT(list_empty(&entry->se_link));
- LASSERT(list_empty(&entry->se_list));
- LASSERT(list_empty(&entry->se_hash));
-
- ll_sa_entry_cleanup(sai, entry);
- iput(entry->se_inode);
-
- kfree(entry);
- atomic_dec(&sai->sai_cache_count);
- }
-}
-
+/* unhash and unlink sa_entry, and then free it */
static inline void
-do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
+sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry)
{
- struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
+ struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
LASSERT(!list_empty(&entry->se_hash));
- LASSERT(!list_empty(&entry->se_link));
+ LASSERT(!list_empty(&entry->se_list));
+ LASSERT(sa_ready(entry));
- ll_sa_entry_unhash(sai, entry);
+ sa_unhash(sai, entry);
spin_lock(&lli->lli_sa_lock);
- entry->se_stat = SA_ENTRY_DEST;
- list_del_init(&entry->se_link);
- if (likely(!list_empty(&entry->se_list)))
- list_del_init(&entry->se_list);
+ list_del_init(&entry->se_list);
spin_unlock(&lli->lli_sa_lock);
- ll_sa_entry_put(sai, entry);
+ if (entry->se_inode)
+ iput(entry->se_inode);
+
+ sa_free(sai, entry);
}
-/*
- * Delete it from sai_entries_stated list when fini.
- */
+/* called by scanner after use, sa_entry will be killed */
static void
-ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
+sa_put(struct ll_statahead_info *sai, struct sa_entry *entry)
{
- struct ll_sa_entry *pos, *next;
+ struct sa_entry *tmp, *next;
+
+ if (entry && entry->se_state == SA_ENTRY_SUCC) {
+ struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
+
+ sai->sai_hit++;
+ sai->sai_consecutive_miss = 0;
+ sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
+ } else {
+ sai->sai_miss++;
+ sai->sai_consecutive_miss++;
+ }
if (entry)
- do_sa_entry_fini(sai, entry);
+ sa_kill(sai, entry);
- /* drop old entry, only 'scanner' process does this, no need to lock */
- list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
- if (!is_omitted_entry(sai, pos->se_index))
+ /*
+ * kill old completed entries, only scanner process does this, no need
+ * to lock
+ */
+ list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) {
+ if (!is_omitted_entry(sai, tmp->se_index))
break;
- do_sa_entry_fini(sai, pos);
+ sa_kill(sai, tmp);
}
+
+ wake_up(&sai->sai_thread.t_ctl_waitq);
}
/*
- * Inside lli_sa_lock.
+ * update state and sort add entry to sai_entries by index, return true if
+ * scanner is waiting on this entry.
*/
-static void
-do_sa_entry_to_stated(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry, enum se_stat stat)
+static bool
+__sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
{
- struct ll_sa_entry *se;
- struct list_head *pos = &sai->sai_entries_stated;
+ struct list_head *pos = &sai->sai_entries;
+ __u64 index = entry->se_index;
+ struct sa_entry *se;
- if (!list_empty(&entry->se_list))
- list_del_init(&entry->se_list);
+ LASSERT(!sa_ready(entry));
+ LASSERT(list_empty(&entry->se_list));
- list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
+ list_for_each_entry_reverse(se, &sai->sai_entries, se_list) {
if (se->se_index < entry->se_index) {
pos = &se->se_list;
break;
}
}
-
list_add(&entry->se_list, pos);
- entry->se_stat = stat;
+ entry->se_state = ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC;
+
+ return (index == sai->sai_index_wait);
}
/*
- * Move entry to sai_entries_stated and sort with the index.
- * \retval 1 -- entry to be destroyed.
- * \retval 0 -- entry is inserted into stated list.
+ * release resources used in async stat RPC, update entry state and wakeup if
+ * scanner process it waiting on this entry.
*/
-static int
-ll_sa_entry_to_stated(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry, enum se_stat stat)
+static void
+sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
{
- struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
- int ret = 1;
+ struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
+ struct md_enqueue_info *minfo = entry->se_minfo;
+ struct ptlrpc_request *req = entry->se_req;
+ bool wakeup;
+
+ /* release resources used in RPC */
+ if (minfo) {
+ entry->se_minfo = NULL;
+ ll_intent_release(&minfo->mi_it);
+ iput(minfo->mi_dir);
+ kfree(minfo);
+ }
- ll_sa_entry_cleanup(sai, entry);
+ if (req) {
+ entry->se_req = NULL;
+ ptlrpc_req_finished(req);
+ }
spin_lock(&lli->lli_sa_lock);
- if (likely(entry->se_stat != SA_ENTRY_DEST)) {
- do_sa_entry_to_stated(sai, entry, stat);
- ret = 0;
- }
+ wakeup = __sa_make_ready(sai, entry, ret);
spin_unlock(&lli->lli_sa_lock);
- return ret;
+ if (wakeup)
+ wake_up(&sai->sai_waitq);
}
-/*
- * Insert inode into the list of sai_entries_agl.
- */
+/* Insert inode into the list of sai_agls. */
static void ll_agl_add(struct ll_statahead_info *sai,
struct inode *inode, int index)
{
struct ll_inode_info *child = ll_i2info(inode);
- struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
+ struct ll_inode_info *parent = ll_i2info(sai->sai_dentry->d_inode);
int added = 0;
spin_lock(&child->lli_agl_lock);
@@ -426,9 +370,9 @@ static void ll_agl_add(struct ll_statahead_info *sai,
igrab(inode);
spin_lock(&parent->lli_agl_lock);
- if (list_empty(&sai->sai_entries_agl))
+ if (list_empty(&sai->sai_agls))
added = 1;
- list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
+ list_add_tail(&child->lli_agl_list, &sai->sai_agls);
spin_unlock(&parent->lli_agl_lock);
} else {
spin_unlock(&child->lli_agl_lock);
@@ -438,8 +382,10 @@ static void ll_agl_add(struct ll_statahead_info *sai,
wake_up(&sai->sai_agl_thread.t_ctl_waitq);
}
-static struct ll_statahead_info *ll_sai_alloc(void)
+/* allocate sai */
+static struct ll_statahead_info *ll_sai_alloc(struct dentry *dentry)
{
+ struct ll_inode_info *lli = ll_i2info(dentry->d_inode);
struct ll_statahead_info *sai;
int i;
@@ -447,24 +393,18 @@ static struct ll_statahead_info *ll_sai_alloc(void)
if (!sai)
return NULL;
+ sai->sai_dentry = dget(dentry);
atomic_set(&sai->sai_refcount, 1);
- spin_lock(&sai_generation_lock);
- sai->sai_generation = ++sai_generation;
- if (unlikely(sai_generation == 0))
- sai->sai_generation = ++sai_generation;
- spin_unlock(&sai_generation_lock);
-
sai->sai_max = LL_SA_RPC_MIN;
sai->sai_index = 1;
init_waitqueue_head(&sai->sai_waitq);
init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
+ INIT_LIST_HEAD(&sai->sai_interim_entries);
INIT_LIST_HEAD(&sai->sai_entries);
- INIT_LIST_HEAD(&sai->sai_entries_received);
- INIT_LIST_HEAD(&sai->sai_entries_stated);
- INIT_LIST_HEAD(&sai->sai_entries_agl);
+ INIT_LIST_HEAD(&sai->sai_agls);
for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
INIT_LIST_HEAD(&sai->sai_cache[i]);
@@ -472,63 +412,74 @@ static struct ll_statahead_info *ll_sai_alloc(void)
}
atomic_set(&sai->sai_cache_count, 0);
+ spin_lock(&sai_generation_lock);
+ lli->lli_sa_generation = ++sai_generation;
+ if (unlikely(!sai_generation))
+ lli->lli_sa_generation = ++sai_generation;
+ spin_unlock(&sai_generation_lock);
+
return sai;
}
-static inline struct ll_statahead_info *
-ll_sai_get(struct ll_statahead_info *sai)
+/* free sai */
+static inline void ll_sai_free(struct ll_statahead_info *sai)
{
- atomic_inc(&sai->sai_refcount);
+ LASSERT(sai->sai_dentry);
+ dput(sai->sai_dentry);
+ kfree(sai);
+}
+
+/*
+ * take refcount of sai if sai for @dir exists, which means statahead is on for
+ * this directory.
+ */
+static inline struct ll_statahead_info *ll_sai_get(struct inode *dir)
+{
+ struct ll_inode_info *lli = ll_i2info(dir);
+ struct ll_statahead_info *sai = NULL;
+
+ spin_lock(&lli->lli_sa_lock);
+ sai = lli->lli_sai;
+ if (sai)
+ atomic_inc(&sai->sai_refcount);
+ spin_unlock(&lli->lli_sa_lock);
+
return sai;
}
+/*
+ * put sai refcount after use, if refcount reaches zero, free sai and sa_entries
+ * attached to it.
+ */
static void ll_sai_put(struct ll_statahead_info *sai)
{
- struct inode *inode = sai->sai_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
- struct ll_sa_entry *entry, *next;
-
- if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
- /* It is race case, the interpret callback just hold
- * a reference count
- */
- spin_unlock(&lli->lli_sa_lock);
- return;
- }
-
- LASSERT(!lli->lli_opendir_key);
- LASSERT(thread_is_stopped(&sai->sai_thread));
- LASSERT(thread_is_stopped(&sai->sai_agl_thread));
+ struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
+ struct sa_entry *entry, *next;
lli->lli_sai = NULL;
- lli->lli_opendir_pid = 0;
spin_unlock(&lli->lli_sa_lock);
- if (sai->sai_sent > sai->sai_replied)
- CDEBUG(D_READA, "statahead for dir "DFID
- " does not finish: [sent:%llu] [replied:%llu]\n",
- PFID(&lli->lli_fid),
- sai->sai_sent, sai->sai_replied);
+ LASSERT(thread_is_stopped(&sai->sai_thread));
+ LASSERT(thread_is_stopped(&sai->sai_agl_thread));
+ LASSERT(sai->sai_sent == sai->sai_replied);
+ LASSERT(!sa_has_callback(sai));
list_for_each_entry_safe(entry, next, &sai->sai_entries,
- se_link)
- do_sa_entry_fini(sai, entry);
-
- LASSERT(list_empty(&sai->sai_entries));
- LASSERT(list_empty(&sai->sai_entries_received));
- LASSERT(list_empty(&sai->sai_entries_stated));
+ se_list)
+ sa_kill(sai, entry);
LASSERT(atomic_read(&sai->sai_cache_count) == 0);
- LASSERT(list_empty(&sai->sai_entries_agl));
+ LASSERT(list_empty(&sai->sai_agls));
- iput(inode);
- kfree(sai);
+ ll_sai_free(sai);
+ atomic_dec(&sbi->ll_sa_running);
}
}
-/* Do NOT forget to drop inode refcount when into sai_entries_agl. */
+/* Do NOT forget to drop inode refcount when into sai_agls. */
static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
{
struct ll_inode_info *lli = ll_i2info(inode);
@@ -588,29 +539,21 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
iput(inode);
}
-static void ll_post_statahead(struct ll_statahead_info *sai)
+/*
+ * prepare inode for sa entry, add it into agl list, now sa_entry is ready
+ * to be used by scanner process.
+ */
+static void sa_instantiate(struct ll_statahead_info *sai,
+ struct sa_entry *entry)
{
- struct inode *dir = sai->sai_inode;
+ struct inode *dir = sai->sai_dentry->d_inode;
struct inode *child;
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_sa_entry *entry;
struct md_enqueue_info *minfo;
struct lookup_intent *it;
struct ptlrpc_request *req;
struct mdt_body *body;
int rc = 0;
- spin_lock(&lli->lli_sa_lock);
- if (unlikely(list_empty(&sai->sai_entries_received))) {
- spin_unlock(&lli->lli_sa_lock);
- return;
- }
- entry = list_entry(sai->sai_entries_received.next,
- struct ll_sa_entry, se_list);
- atomic_inc(&entry->se_refcount);
- list_del_init(&entry->se_list);
- spin_unlock(&lli->lli_sa_lock);
-
LASSERT(entry->se_handle != 0);
minfo = entry->se_minfo;
@@ -632,7 +575,7 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
/* XXX: No fid in reply, this is probably cross-ref case.
* SA can't handle it yet.
*/
- if (body->valid & OBD_MD_MDS) {
+ if (body->mbo_valid & OBD_MD_MDS) {
rc = -EAGAIN;
goto out;
}
@@ -641,7 +584,7 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
* revalidate.
*/
/* unlinked and re-created with the same name */
- if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))) {
+ if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->mbo_fid1))) {
entry->se_inode = NULL;
iput(child);
child = NULL;
@@ -659,8 +602,9 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
if (rc)
goto out;
- CDEBUG(D_DLMTRACE, "%s: setting l_data to inode "DFID"%p\n",
+ CDEBUG(D_READA, "%s: setting %.*s" DFID " l_data to inode %p\n",
ll_get_fsname(child->i_sb, NULL, 0),
+ entry->se_qstr.len, entry->se_qstr.name,
PFID(ll_inode2fid(child)), child);
ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
@@ -670,34 +614,75 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
ll_agl_add(sai, child, entry->se_index);
out:
- /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
- * reference count by calling "ll_intent_drop_lock()" in spite of the
- * above operations failed or not. Do not worry about calling
- * "ll_intent_drop_lock()" more than once.
+ /*
+ * sa_make_ready() will drop ldlm ibits lock refcount by calling
+ * ll_intent_drop_lock() in spite of failures. Do not worry about
+ * calling ll_intent_drop_lock() more than once.
*/
- rc = ll_sa_entry_to_stated(sai, entry,
- rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
- if (rc == 0 && entry->se_index == sai->sai_index_wait)
- wake_up(&sai->sai_waitq);
- ll_sa_entry_put(sai, entry);
+ sa_make_ready(sai, entry, rc);
}
+/* once there are async stat replies, instantiate sa_entry from replies */
+static void sa_handle_callback(struct ll_statahead_info *sai)
+{
+ struct ll_inode_info *lli;
+
+ lli = ll_i2info(sai->sai_dentry->d_inode);
+
+ while (sa_has_callback(sai)) {
+ struct sa_entry *entry;
+
+ spin_lock(&lli->lli_sa_lock);
+ if (unlikely(!sa_has_callback(sai))) {
+ spin_unlock(&lli->lli_sa_lock);
+ break;
+ }
+ entry = list_entry(sai->sai_interim_entries.next,
+ struct sa_entry, se_list);
+ list_del_init(&entry->se_list);
+ spin_unlock(&lli->lli_sa_lock);
+
+ sa_instantiate(sai, entry);
+ }
+}
+
+/*
+ * callback for async stat, because this is called in ptlrpcd context, we only
+ * put sa_entry in sai_cb_entries list, and let sa_handle_callback() to really
+ * prepare inode and instantiate sa_entry later.
+ */
static int ll_statahead_interpret(struct ptlrpc_request *req,
struct md_enqueue_info *minfo, int rc)
{
struct lookup_intent *it = &minfo->mi_it;
struct inode *dir = minfo->mi_dir;
struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = NULL;
- struct ll_sa_entry *entry;
- __u64 handle = 0;
- int wakeup;
+ struct ll_statahead_info *sai = lli->lli_sai;
+ struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
+ __u64 handle = 0;
+ bool wakeup;
if (it_disposition(it, DISP_LOOKUP_NEG))
rc = -ENOENT;
- if (rc == 0) {
- /* release ibits lock ASAP to avoid deadlock when statahead
+ /*
+ * because statahead thread will wait for all inflight RPC to finish,
+ * sai should be always valid, no need to refcount
+ */
+ LASSERT(sai);
+ LASSERT(!thread_is_stopped(&sai->sai_thread));
+ LASSERT(entry);
+
+ CDEBUG(D_READA, "sa_entry %.*s rc %d\n",
+ entry->se_qstr.len, entry->se_qstr.name, rc);
+
+ if (rc) {
+ ll_intent_release(it);
+ iput(dir);
+ kfree(minfo);
+ } else {
+ /*
+ * release ibits lock ASAP to avoid deadlock when statahead
* thread enqueues lock on parent in readdir and another
* process enqueues lock on child with parent lock held, eg.
* unlink.
@@ -707,65 +692,32 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
}
spin_lock(&lli->lli_sa_lock);
- /* stale entry */
- if (unlikely(!lli->lli_sai ||
- lli->lli_sai->sai_generation != minfo->mi_generation)) {
- spin_unlock(&lli->lli_sa_lock);
- rc = -ESTALE;
- goto out;
+ if (rc) {
+ wakeup = __sa_make_ready(sai, entry, rc);
} else {
- sai = ll_sai_get(lli->lli_sai);
- if (unlikely(!thread_is_running(&sai->sai_thread))) {
- sai->sai_replied++;
- spin_unlock(&lli->lli_sa_lock);
- rc = -EBADFD;
- goto out;
- }
-
- entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
- if (!entry) {
- sai->sai_replied++;
- spin_unlock(&lli->lli_sa_lock);
- rc = -EIDRM;
- goto out;
- }
-
- if (rc != 0) {
- do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA);
- wakeup = (entry->se_index == sai->sai_index_wait);
- } else {
- entry->se_minfo = minfo;
- entry->se_req = ptlrpc_request_addref(req);
- /* Release the async ibits lock ASAP to avoid deadlock
- * when statahead thread tries to enqueue lock on parent
- * for readpage and other tries to enqueue lock on child
- * with parent's lock held, for example: unlink.
- */
- entry->se_handle = handle;
- wakeup = list_empty(&sai->sai_entries_received);
- list_add_tail(&entry->se_list,
- &sai->sai_entries_received);
- }
- sai->sai_replied++;
- spin_unlock(&lli->lli_sa_lock);
-
- ll_sa_entry_put(sai, entry);
- if (wakeup)
- wake_up(&sai->sai_thread.t_ctl_waitq);
+ entry->se_minfo = minfo;
+ entry->se_req = ptlrpc_request_addref(req);
+ /*
+ * Release the async ibits lock ASAP to avoid deadlock
+ * when statahead thread tries to enqueue lock on parent
+ * for readpage and other tries to enqueue lock on child
+ * with parent's lock held, for example: unlink.
+ */
+ entry->se_handle = handle;
+ wakeup = !sa_has_callback(sai);
+ list_add_tail(&entry->se_list, &sai->sai_interim_entries);
}
+ sai->sai_replied++;
+
+ if (wakeup)
+ wake_up(&sai->sai_thread.t_ctl_waitq);
+ spin_unlock(&lli->lli_sa_lock);
-out:
- if (rc != 0) {
- ll_intent_release(it);
- iput(dir);
- kfree(minfo);
- }
- if (sai)
- ll_sai_put(sai);
return rc;
}
-static void sa_args_fini(struct md_enqueue_info *minfo,
+/* finish async stat RPC arguments */
+static void sa_fini_data(struct md_enqueue_info *minfo,
struct ldlm_enqueue_info *einfo)
{
LASSERT(minfo && einfo);
@@ -777,12 +729,11 @@ static void sa_args_fini(struct md_enqueue_info *minfo,
/**
* prepare arguments for async stat RPC.
*/
-static int sa_args_init(struct inode *dir, struct inode *child,
- struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
+static int sa_prep_data(struct inode *dir, struct inode *child,
+ struct sa_entry *entry, struct md_enqueue_info **pmi,
struct ldlm_enqueue_info **pei)
{
const struct qstr *qstr = &entry->se_qstr;
- struct ll_inode_info *lli = ll_i2info(dir);
struct md_enqueue_info *minfo;
struct ldlm_enqueue_info *einfo;
struct md_op_data *op_data;
@@ -808,8 +759,7 @@ static int sa_args_init(struct inode *dir, struct inode *child,
minfo->mi_it.it_op = IT_GETATTR;
minfo->mi_dir = igrab(dir);
minfo->mi_cb = ll_statahead_interpret;
- minfo->mi_generation = lli->lli_sai->sai_generation;
- minfo->mi_cbdata = entry->se_index;
+ minfo->mi_cbdata = entry;
einfo->ei_type = LDLM_IBITS;
einfo->ei_mode = it_to_lock_mode(&minfo->mi_it);
@@ -824,31 +774,33 @@ static int sa_args_init(struct inode *dir, struct inode *child,
return 0;
}
-static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
+/* async stat for file not found in dcache */
+static int sa_lookup(struct inode *dir, struct sa_entry *entry)
{
struct md_enqueue_info *minfo;
struct ldlm_enqueue_info *einfo;
int rc;
- rc = sa_args_init(dir, NULL, entry, &minfo, &einfo);
+ rc = sa_prep_data(dir, NULL, entry, &minfo, &einfo);
if (rc)
return rc;
rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
- if (rc < 0)
- sa_args_fini(minfo, einfo);
+ if (rc)
+ sa_fini_data(minfo, einfo);
return rc;
}
/**
- * similar to ll_revalidate_it().
- * \retval 1 -- dentry valid
- * \retval 0 -- will send stat-ahead request
- * \retval others -- prepare stat-ahead request failed
+ * async stat for file found in dcache, similar to .revalidate
+ *
+ * \retval 1 dentry valid, no RPC sent
+ * \retval 0 dentry invalid, will send async stat RPC
+ * \retval negative number upon error
*/
-static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
- struct dentry *dentry)
+static int sa_revalidate(struct inode *dir, struct sa_entry *entry,
+ struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
struct lookup_intent it = { .it_op = IT_GETATTR,
@@ -872,7 +824,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
return 1;
}
- rc = sa_args_init(dir, inode, entry, &minfo, &einfo);
+ rc = sa_prep_data(dir, inode, entry, &minfo, &einfo);
if (rc) {
entry->se_inode = NULL;
iput(inode);
@@ -880,56 +832,50 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
}
rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
- if (rc < 0) {
+ if (rc) {
entry->se_inode = NULL;
iput(inode);
- sa_args_fini(minfo, einfo);
+ sa_fini_data(minfo, einfo);
}
return rc;
}
-static void ll_statahead_one(struct dentry *parent, const char *entry_name,
- int entry_name_len)
+/* async stat for file with @name */
+static void sa_statahead(struct dentry *parent, const char *name, int len)
{
struct inode *dir = d_inode(parent);
struct ll_inode_info *lli = ll_i2info(dir);
struct ll_statahead_info *sai = lli->lli_sai;
struct dentry *dentry = NULL;
- struct ll_sa_entry *entry;
+ struct sa_entry *entry;
int rc;
- int rc1;
- entry = ll_sa_entry_alloc(parent, sai, sai->sai_index, entry_name,
- entry_name_len);
+ entry = sa_alloc(parent, sai, sai->sai_index, name, len);
if (IS_ERR(entry))
return;
dentry = d_lookup(parent, &entry->se_qstr);
if (!dentry) {
- rc = do_sa_lookup(dir, entry);
+ rc = sa_lookup(dir, entry);
} else {
- rc = do_sa_revalidate(dir, entry, dentry);
+ rc = sa_revalidate(dir, entry, dentry);
if (rc == 1 && agl_should_run(sai, d_inode(dentry)))
ll_agl_add(sai, d_inode(dentry), entry->se_index);
+ }
+ if (dentry)
dput(dentry);
- }
- if (rc) {
- rc1 = ll_sa_entry_to_stated(sai, entry,
- rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
- if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
- wake_up(&sai->sai_waitq);
- } else {
+ if (rc)
+ sa_make_ready(sai, entry, rc);
+ else
sai->sai_sent++;
- }
sai->sai_index++;
- /* drop one refcount on entry by ll_sa_entry_alloc */
- ll_sa_entry_put(sai, entry);
}
+/* async glimpse (agl) thread main function */
static int ll_agl_thread(void *arg)
{
struct dentry *parent = arg;
@@ -937,10 +883,12 @@ static int ll_agl_thread(void *arg)
struct ll_inode_info *plli = ll_i2info(dir);
struct ll_inode_info *clli;
struct ll_sb_info *sbi = ll_i2sbi(dir);
- struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
- struct ptlrpc_thread *thread = &sai->sai_agl_thread;
+ struct ll_statahead_info *sai;
+ struct ptlrpc_thread *thread;
struct l_wait_info lwi = { 0 };
+ sai = ll_sai_get(dir);
+ thread = &sai->sai_agl_thread;
thread->t_pid = current_pid();
CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
sai, parent);
@@ -959,7 +907,7 @@ static int ll_agl_thread(void *arg)
while (1) {
l_wait_event(thread->t_ctl_waitq,
- !list_empty(&sai->sai_entries_agl) ||
+ !list_empty(&sai->sai_agls) ||
!thread_is_running(thread),
&lwi);
@@ -970,8 +918,8 @@ static int ll_agl_thread(void *arg)
/* The statahead thread maybe help to process AGL entries,
* so check whether list empty again.
*/
- if (!list_empty(&sai->sai_entries_agl)) {
- clli = list_entry(sai->sai_entries_agl.next,
+ if (!list_empty(&sai->sai_agls)) {
+ clli = list_entry(sai->sai_agls.next,
struct ll_inode_info, lli_agl_list);
list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
@@ -983,8 +931,8 @@ static int ll_agl_thread(void *arg)
spin_lock(&plli->lli_agl_lock);
sai->sai_agl_valid = 0;
- while (!list_empty(&sai->sai_entries_agl)) {
- clli = list_entry(sai->sai_entries_agl.next,
+ while (!list_empty(&sai->sai_agls)) {
+ clli = list_entry(sai->sai_agls.next,
struct ll_inode_info, lli_agl_list);
list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
@@ -1001,6 +949,7 @@ static int ll_agl_thread(void *arg)
return 0;
}
+/* start agl thread */
static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
{
struct ptlrpc_thread *thread = &sai->sai_agl_thread;
@@ -1025,58 +974,71 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
&lwi);
}
+/* statahead thread main function */
static int ll_statahead_thread(void *arg)
{
struct dentry *parent = arg;
struct inode *dir = d_inode(parent);
- struct ll_inode_info *plli = ll_i2info(dir);
- struct ll_inode_info *clli;
+ struct ll_inode_info *lli = ll_i2info(dir);
struct ll_sb_info *sbi = ll_i2sbi(dir);
- struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
- struct ptlrpc_thread *thread = &sai->sai_thread;
- struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
- struct page *page;
+ struct ll_statahead_info *sai;
+ struct ptlrpc_thread *sa_thread;
+ struct ptlrpc_thread *agl_thread;
+ struct page *page = NULL;
__u64 pos = 0;
int first = 0;
int rc = 0;
- struct ll_dir_chain chain;
+ struct md_op_data *op_data;
struct l_wait_info lwi = { 0 };
- thread->t_pid = current_pid();
+ sai = ll_sai_get(dir);
+ sa_thread = &sai->sai_thread;
+ agl_thread = &sai->sai_agl_thread;
+ sa_thread->t_pid = current_pid();
CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
sai, parent);
+ op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
+ LUSTRE_OPC_ANY, dir);
+ if (IS_ERR(op_data)) {
+ rc = PTR_ERR(op_data);
+ goto out;
+ }
+
+ op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
+
if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
ll_start_agl(parent, sai);
atomic_inc(&sbi->ll_sa_total);
- spin_lock(&plli->lli_sa_lock);
- if (thread_is_init(thread))
+ spin_lock(&lli->lli_sa_lock);
+ if (thread_is_init(sa_thread))
/* If someone else has changed the thread state
* (e.g. already changed to SVC_STOPPING), we can't just
* blindly overwrite that setting.
*/
- thread_set_flags(thread, SVC_RUNNING);
- spin_unlock(&plli->lli_sa_lock);
- wake_up(&thread->t_ctl_waitq);
-
- ll_dir_chain_init(&chain);
- page = ll_get_dir_page(dir, pos, &chain);
+ thread_set_flags(sa_thread, SVC_RUNNING);
+ spin_unlock(&lli->lli_sa_lock);
+ wake_up(&sa_thread->t_ctl_waitq);
- while (1) {
+ while (pos != MDS_DIR_END_OFF && thread_is_running(sa_thread)) {
struct lu_dirpage *dp;
struct lu_dirent *ent;
+ sai->sai_in_readpage = 1;
+ page = ll_get_dir_page(dir, op_data, pos);
+ sai->sai_in_readpage = 0;
if (IS_ERR(page)) {
rc = PTR_ERR(page);
- CDEBUG(D_READA, "error reading dir "DFID" at %llu/%llu: [rc %d] [parent %u]\n",
+ CDEBUG(D_READA, "error reading dir "DFID" at %llu/%llu: opendir_pid = %u: rc = %d\n",
PFID(ll_inode2fid(dir)), pos, sai->sai_index,
- rc, plli->lli_opendir_pid);
- goto out;
+ lli->lli_opendir_pid, rc);
+ break;
}
dp = page_address(page);
- for (ent = lu_dirent_start(dp); ent;
+ for (ent = lu_dirent_start(dp);
+ ent && thread_is_running(sa_thread) && !sa_low_hit(sai);
ent = lu_dirent_next(ent)) {
__u64 hash;
int namelen;
@@ -1123,123 +1085,79 @@ static int ll_statahead_thread(void *arg)
if (unlikely(++first == 1))
continue;
-keep_it:
- l_wait_event(thread->t_ctl_waitq,
- !sa_sent_full(sai) ||
- !list_empty(&sai->sai_entries_received) ||
- !list_empty(&sai->sai_entries_agl) ||
- !thread_is_running(thread),
- &lwi);
-
-interpret_it:
- while (!list_empty(&sai->sai_entries_received))
- ll_post_statahead(sai);
-
- if (unlikely(!thread_is_running(thread))) {
- ll_release_page(page, 0);
- rc = 0;
- goto out;
- }
+ /* wait for spare statahead window */
+ do {
+ l_wait_event(sa_thread->t_ctl_waitq,
+ !sa_sent_full(sai) ||
+ sa_has_callback(sai) ||
+ !list_empty(&sai->sai_agls) ||
+ !thread_is_running(sa_thread),
+ &lwi);
+ sa_handle_callback(sai);
- /* If no window for metadata statahead, but there are
- * some AGL entries to be triggered, then try to help
- * to process the AGL entries.
- */
- if (sa_sent_full(sai)) {
- spin_lock(&plli->lli_agl_lock);
- while (!list_empty(&sai->sai_entries_agl)) {
- clli = list_entry(sai->sai_entries_agl.next,
+ spin_lock(&lli->lli_agl_lock);
+ while (sa_sent_full(sai) &&
+ !agl_list_empty(sai)) {
+ struct ll_inode_info *clli;
+
+ clli = list_entry(sai->sai_agls.next,
struct ll_inode_info, lli_agl_list);
list_del_init(&clli->lli_agl_list);
- spin_unlock(&plli->lli_agl_lock);
+ spin_unlock(&lli->lli_agl_lock);
+
ll_agl_trigger(&clli->lli_vfs_inode,
sai);
- if (!list_empty(&sai->sai_entries_received))
- goto interpret_it;
-
- if (unlikely(
- !thread_is_running(thread))) {
- ll_release_page(page, 0);
- rc = 0;
- goto out;
- }
-
- if (!sa_sent_full(sai))
- goto do_it;
-
- spin_lock(&plli->lli_agl_lock);
+ spin_lock(&lli->lli_agl_lock);
}
- spin_unlock(&plli->lli_agl_lock);
+ spin_unlock(&lli->lli_agl_lock);
+ } while (sa_sent_full(sai) &&
+ thread_is_running(sa_thread));
- goto keep_it;
- }
-
-do_it:
- ll_statahead_one(parent, name, namelen);
+ sa_statahead(parent, name, namelen);
}
- pos = le64_to_cpu(dp->ldp_hash_end);
- if (pos == MDS_DIR_END_OFF) {
- /*
- * End of directory reached.
- */
- ll_release_page(page, 0);
- while (1) {
- l_wait_event(thread->t_ctl_waitq,
- !list_empty(&sai->sai_entries_received) ||
- sai->sai_sent == sai->sai_replied ||
- !thread_is_running(thread),
- &lwi);
- while (!list_empty(&sai->sai_entries_received))
- ll_post_statahead(sai);
+ pos = le64_to_cpu(dp->ldp_hash_end);
+ ll_release_page(dir, page,
+ le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
- if (unlikely(!thread_is_running(thread))) {
- rc = 0;
- goto out;
- }
+ if (sa_low_hit(sai)) {
+ rc = -EFAULT;
+ atomic_inc(&sbi->ll_sa_wrong);
+ CDEBUG(D_READA, "Statahead for dir "DFID" hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread: pid %d\n",
+ PFID(&lli->lli_fid), sai->sai_hit,
+ sai->sai_miss, sai->sai_sent,
+ sai->sai_replied, current_pid());
+ break;
+ }
+ }
+ ll_finish_md_op_data(op_data);
- if (sai->sai_sent == sai->sai_replied &&
- list_empty(&sai->sai_entries_received))
- break;
- }
+ if (rc < 0) {
+ spin_lock(&lli->lli_sa_lock);
+ thread_set_flags(sa_thread, SVC_STOPPING);
+ lli->lli_sa_enabled = 0;
+ spin_unlock(&lli->lli_sa_lock);
+ }
- spin_lock(&plli->lli_agl_lock);
- while (!list_empty(&sai->sai_entries_agl) &&
- thread_is_running(thread)) {
- clli = list_entry(sai->sai_entries_agl.next,
- struct ll_inode_info, lli_agl_list);
- list_del_init(&clli->lli_agl_list);
- spin_unlock(&plli->lli_agl_lock);
- ll_agl_trigger(&clli->lli_vfs_inode, sai);
- spin_lock(&plli->lli_agl_lock);
- }
- spin_unlock(&plli->lli_agl_lock);
+ /*
+ * statahead is finished, but statahead entries need to be cached, wait
+ * for file release to stop me.
+ */
+ while (thread_is_running(sa_thread)) {
+ l_wait_event(sa_thread->t_ctl_waitq,
+ sa_has_callback(sai) ||
+ !agl_list_empty(sai) ||
+ !thread_is_running(sa_thread),
+ &lwi);
- rc = 0;
- goto out;
- } else if (1) {
- /*
- * chain is exhausted.
- * Normal case: continue to the next page.
- */
- ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
- LDF_COLLIDE);
- page = ll_get_dir_page(dir, pos, &chain);
- } else {
- LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
- ll_release_page(page, 1);
- /*
- * go into overflow page.
- */
- }
+ sa_handle_callback(sai);
}
-
out:
if (sai->sai_agl_valid) {
- spin_lock(&plli->lli_agl_lock);
+ spin_lock(&lli->lli_agl_lock);
thread_set_flags(agl_thread, SVC_STOPPING);
- spin_unlock(&plli->lli_agl_lock);
+ spin_unlock(&lli->lli_agl_lock);
wake_up(&agl_thread->t_ctl_waitq);
CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
@@ -1249,84 +1167,93 @@ out:
&lwi);
} else {
/* Set agl_thread flags anyway. */
- thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
+ thread_set_flags(agl_thread, SVC_STOPPED);
}
- ll_dir_chain_fini(&chain);
- spin_lock(&plli->lli_sa_lock);
- if (!list_empty(&sai->sai_entries_received)) {
- thread_set_flags(thread, SVC_STOPPING);
- spin_unlock(&plli->lli_sa_lock);
-
- /* To release the resources held by received entries. */
- while (!list_empty(&sai->sai_entries_received))
- ll_post_statahead(sai);
- spin_lock(&plli->lli_sa_lock);
+ /*
+ * wait for inflight statahead RPCs to finish, and then we can free sai
+ * safely because statahead RPC will access sai data
+ */
+ while (sai->sai_sent != sai->sai_replied) {
+ /* in case we're not woken up, timeout wait */
+ lwi = LWI_TIMEOUT(msecs_to_jiffies(MSEC_PER_SEC >> 3),
+ NULL, NULL);
+ l_wait_event(sa_thread->t_ctl_waitq,
+ sai->sai_sent == sai->sai_replied, &lwi);
}
- thread_set_flags(thread, SVC_STOPPED);
- spin_unlock(&plli->lli_sa_lock);
- wake_up(&sai->sai_waitq);
- wake_up(&thread->t_ctl_waitq);
- ll_sai_put(sai);
- dput(parent);
+
+ /* release resources held by statahead RPCs */
+ sa_handle_callback(sai);
+
+ spin_lock(&lli->lli_sa_lock);
+ thread_set_flags(sa_thread, SVC_STOPPED);
+ spin_unlock(&lli->lli_sa_lock);
+
CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %pd\n",
sai, parent);
+
+ wake_up(&sai->sai_waitq);
+ wake_up(&sa_thread->t_ctl_waitq);
+ ll_sai_put(sai);
+
return rc;
}
-/**
- * called in ll_file_release().
- */
-void ll_stop_statahead(struct inode *dir, void *key)
+/* authorize opened dir handle @key to statahead */
+void ll_authorize_statahead(struct inode *dir, void *key)
{
struct ll_inode_info *lli = ll_i2info(dir);
- if (unlikely(!key))
- return;
-
spin_lock(&lli->lli_sa_lock);
- if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
- spin_unlock(&lli->lli_sa_lock);
- return;
+ if (!lli->lli_opendir_key && !lli->lli_sai) {
+ /*
+ * if lli_sai is not NULL, it means previous statahead is not
+ * finished yet, we'd better not start a new statahead for now.
+ */
+ LASSERT(!lli->lli_opendir_pid);
+ lli->lli_opendir_key = key;
+ lli->lli_opendir_pid = current_pid();
+ lli->lli_sa_enabled = 1;
}
+ spin_unlock(&lli->lli_sa_lock);
+}
- lli->lli_opendir_key = NULL;
-
- if (lli->lli_sai) {
- struct l_wait_info lwi = { 0 };
- struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
+/*
+ * deauthorize opened dir handle @key to statahead, but statahead thread may
+ * still be running, notify it to quit.
+ */
+void ll_deauthorize_statahead(struct inode *dir, void *key)
+{
+ struct ll_inode_info *lli = ll_i2info(dir);
+ struct ll_statahead_info *sai;
- if (!thread_is_stopped(thread)) {
- thread_set_flags(thread, SVC_STOPPING);
- spin_unlock(&lli->lli_sa_lock);
- wake_up(&thread->t_ctl_waitq);
+ LASSERT(lli->lli_opendir_key == key);
+ LASSERT(lli->lli_opendir_pid);
- CDEBUG(D_READA, "stop statahead thread: sai %p pid %u\n",
- lli->lli_sai, (unsigned int)thread->t_pid);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread),
- &lwi);
- } else {
- spin_unlock(&lli->lli_sa_lock);
- }
+ CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
+ PFID(&lli->lli_fid));
+ spin_lock(&lli->lli_sa_lock);
+ lli->lli_opendir_key = NULL;
+ lli->lli_opendir_pid = 0;
+ lli->lli_sa_enabled = 0;
+ sai = lli->lli_sai;
+ if (sai && thread_is_running(&sai->sai_thread)) {
/*
- * Put the ref which was held when first statahead_enter.
- * It maybe not the last ref for some statahead requests
- * maybe inflight.
+ * statahead thread may not quit yet because it needs to cache
+ * entries, now it's time to tell it to quit.
*/
- ll_sai_put(lli->lli_sai);
- } else {
- lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_sa_lock);
+ thread_set_flags(&sai->sai_thread, SVC_STOPPING);
+ wake_up(&sai->sai_thread.t_ctl_waitq);
}
+ spin_unlock(&lli->lli_sa_lock);
}
enum {
/**
* not first dirent, or is "."
*/
- LS_NONE_FIRST_DE = 0,
+ LS_NOT_FIRST_DE = 0,
/**
* the first non-hidden dirent
*/
@@ -1337,17 +1264,26 @@ enum {
LS_FIRST_DOT_DE
};
+/* file is first dirent under @dir */
static int is_first_dirent(struct inode *dir, struct dentry *dentry)
{
- struct ll_dir_chain chain;
const struct qstr *target = &dentry->d_name;
+ struct md_op_data *op_data;
struct page *page;
__u64 pos = 0;
int dot_de;
- int rc = LS_NONE_FIRST_DE;
+ int rc = LS_NOT_FIRST_DE;
- ll_dir_chain_init(&chain);
- page = ll_get_dir_page(dir, pos, &chain);
+ op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
+ LUSTRE_OPC_ANY, dir);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+ /**
+ * FIXME choose the start offset of the readdir
+ */
+ op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
+
+ page = ll_get_dir_page(dir, op_data, pos);
while (1) {
struct lu_dirpage *dp;
@@ -1357,9 +1293,10 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry)
struct ll_inode_info *lli = ll_i2info(dir);
rc = PTR_ERR(page);
- CERROR("error reading dir "DFID" at %llu: [rc %d] [parent %u]\n",
+ CERROR("%s: error reading dir "DFID" at %llu: opendir_pid = %u : rc = %d\n",
+ ll_get_fsname(dir->i_sb, NULL, 0),
PFID(ll_inode2fid(dir)), pos,
- rc, lli->lli_opendir_pid);
+ lli->lli_opendir_pid, rc);
break;
}
@@ -1411,13 +1348,13 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry)
if (target->len != namelen ||
memcmp(target->name, name, namelen) != 0)
- rc = LS_NONE_FIRST_DE;
+ rc = LS_NOT_FIRST_DE;
else if (!dot_de)
rc = LS_FIRST_DE;
else
rc = LS_FIRST_DOT_DE;
- ll_release_page(page, 0);
+ ll_release_page(dir, page, false);
goto out;
}
pos = le64_to_cpu(dp->ldp_hash_end);
@@ -1425,261 +1362,228 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry)
/*
* End of directory reached.
*/
- ll_release_page(page, 0);
- break;
- } else if (1) {
+ ll_release_page(dir, page, false);
+ goto out;
+ } else {
/*
* chain is exhausted
* Normal case: continue to the next page.
*/
- ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
- LDF_COLLIDE);
- page = ll_get_dir_page(dir, pos, &chain);
- } else {
- /*
- * go into overflow page.
- */
- LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
- ll_release_page(page, 1);
+ ll_release_page(dir, page,
+ le32_to_cpu(dp->ldp_flags) &
+ LDF_COLLIDE);
+ page = ll_get_dir_page(dir, op_data, pos);
}
}
-
out:
- ll_dir_chain_fini(&chain);
+ ll_finish_md_op_data(op_data);
return rc;
}
-static void
-ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
-{
- struct ptlrpc_thread *thread = &sai->sai_thread;
- struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
- int hit;
-
- if (entry && entry->se_stat == SA_ENTRY_SUCC)
- hit = 1;
- else
- hit = 0;
-
- ll_sa_entry_fini(sai, entry);
- if (hit) {
- sai->sai_hit++;
- sai->sai_consecutive_miss = 0;
- sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
- } else {
- struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
-
- sai->sai_miss++;
- sai->sai_consecutive_miss++;
- if (sa_low_hit(sai) && thread_is_running(thread)) {
- atomic_inc(&sbi->ll_sa_wrong);
- CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
- PFID(&lli->lli_fid), sai->sai_hit,
- sai->sai_miss, sai->sai_sent,
- sai->sai_replied);
- spin_lock(&lli->lli_sa_lock);
- if (!thread_is_stopped(thread))
- thread_set_flags(thread, SVC_STOPPING);
- spin_unlock(&lli->lli_sa_lock);
- }
- }
-
- if (!thread_is_stopped(thread))
- wake_up(&thread->t_ctl_waitq);
-}
-
/**
- * Start statahead thread if this is the first dir entry.
- * Otherwise if a thread is started already, wait it until it is ahead of me.
- * \retval 1 -- find entry with lock in cache, the caller needs to do
- * nothing.
- * \retval 0 -- find entry in cache, but without lock, the caller needs
- * refresh from MDS.
- * \retval others -- the caller need to process as non-statahead.
+ * revalidate @dentryp from statahead cache
+ *
+ * \param[in] dir parent directory
+ * \param[in] sai sai structure
+ * \param[out] dentryp pointer to dentry which will be revalidated
+ * \param[in] unplug unplug statahead window only (normally for negative
+ * dentry)
+ * \retval 1 on success, dentry is saved in @dentryp
+ * \retval 0 if revalidation failed (no proper lock on client)
+ * \retval negative number upon error
*/
-int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
- int only_unplug)
+static int revalidate_statahead_dentry(struct inode *dir,
+ struct ll_statahead_info *sai,
+ struct dentry **dentryp,
+ bool unplug)
{
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = lli->lli_sai;
- struct dentry *parent;
- struct ll_sa_entry *entry;
- struct ptlrpc_thread *thread;
- struct l_wait_info lwi = { 0 };
- struct task_struct *task;
- int rc = 0;
- struct ll_inode_info *plli;
+ struct sa_entry *entry = NULL;
+ struct l_wait_info lwi = { 0 };
+ struct ll_dentry_data *ldd;
+ struct ll_inode_info *lli;
+ int rc = 0;
- LASSERT(lli->lli_opendir_pid == current_pid());
+ if ((*dentryp)->d_name.name[0] == '.') {
+ if (sai->sai_ls_all ||
+ sai->sai_miss_hidden >= sai->sai_skip_hidden) {
+ /*
+ * Hidden dentry is the first one, or statahead
+ * thread does not skip so many hidden dentries
+ * before "sai_ls_all" enabled as below.
+ */
+ } else {
+ if (!sai->sai_ls_all)
+ /*
+ * It maybe because hidden dentry is not
+ * the first one, "sai_ls_all" was not
+ * set, then "ls -al" missed. Enable
+ * "sai_ls_all" for such case.
+ */
+ sai->sai_ls_all = 1;
- if (sai) {
- thread = &sai->sai_thread;
- if (unlikely(thread_is_stopped(thread) &&
- list_empty(&sai->sai_entries_stated))) {
- /* to release resource */
- ll_stop_statahead(dir, lli->lli_opendir_key);
+ /*
+ * Such "getattr" has been skipped before
+ * "sai_ls_all" enabled as above.
+ */
+ sai->sai_miss_hidden++;
return -EAGAIN;
}
+ }
- if ((*dentryp)->d_name.name[0] == '.') {
- if (sai->sai_ls_all ||
- sai->sai_miss_hidden >= sai->sai_skip_hidden) {
- /*
- * Hidden dentry is the first one, or statahead
- * thread does not skip so many hidden dentries
- * before "sai_ls_all" enabled as below.
- */
- } else {
- if (!sai->sai_ls_all)
- /*
- * It maybe because hidden dentry is not
- * the first one, "sai_ls_all" was not
- * set, then "ls -al" missed. Enable
- * "sai_ls_all" for such case.
- */
- sai->sai_ls_all = 1;
+ if (unplug) {
+ rc = 1;
+ goto out_unplug;
+ }
- /*
- * Such "getattr" has been skipped before
- * "sai_ls_all" enabled as above.
- */
- sai->sai_miss_hidden++;
- return -EAGAIN;
- }
- }
+ entry = sa_get(sai, &(*dentryp)->d_name);
+ if (!entry) {
+ rc = -EAGAIN;
+ goto out_unplug;
+ }
- entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
- if (!entry || only_unplug) {
- ll_sai_unplug(sai, entry);
- return entry ? 1 : -EAGAIN;
- }
+ /* if statahead is busy in readdir, help it do post-work */
+ if (!sa_ready(entry) && sai->sai_in_readpage)
+ sa_handle_callback(sai);
- if (!ll_sa_entry_stated(entry)) {
- sai->sai_index_wait = entry->se_index;
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(sai->sai_waitq,
- ll_sa_entry_stated(entry) ||
- thread_is_stopped(thread),
- &lwi);
- if (rc < 0) {
- ll_sai_unplug(sai, entry);
- return -EAGAIN;
- }
+ if (!sa_ready(entry)) {
+ sai->sai_index_wait = entry->se_index;
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
+ LWI_ON_SIGNAL_NOOP, NULL);
+ rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi);
+ if (rc < 0) {
+ /*
+ * entry may not be ready, so it may be used by inflight
+ * statahead RPC, don't free it.
+ */
+ entry = NULL;
+ rc = -EAGAIN;
+ goto out_unplug;
}
+ }
- if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) {
- struct inode *inode = entry->se_inode;
- struct lookup_intent it = { .it_op = IT_GETATTR,
- .it_lock_handle =
- entry->se_handle };
- __u64 bits;
-
- rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
- ll_inode2fid(inode), &bits);
- if (rc == 1) {
- if (!d_inode(*dentryp)) {
- struct dentry *alias;
-
- alias = ll_splice_alias(inode,
- *dentryp);
- if (IS_ERR(alias)) {
- ll_sai_unplug(sai, entry);
- return PTR_ERR(alias);
- }
- *dentryp = alias;
- } else if (d_inode(*dentryp) != inode) {
- /* revalidate, but inode is recreated */
- CDEBUG(D_READA, "%s: stale dentry %pd inode "DFID", statahead inode "DFID"\n",
- ll_get_fsname(d_inode(*dentryp)->i_sb, NULL, 0),
- *dentryp,
- PFID(ll_inode2fid(d_inode(*dentryp))),
- PFID(ll_inode2fid(inode)));
- ll_sai_unplug(sai, entry);
- return -ESTALE;
- } else {
- iput(inode);
+ if (entry->se_state == SA_ENTRY_SUCC && entry->se_inode) {
+ struct inode *inode = entry->se_inode;
+ struct lookup_intent it = { .it_op = IT_GETATTR,
+ .it_lock_handle = entry->se_handle };
+ __u64 bits;
+
+ rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
+ ll_inode2fid(inode), &bits);
+ if (rc == 1) {
+ if (!(*dentryp)->d_inode) {
+ struct dentry *alias;
+
+ alias = ll_splice_alias(inode, *dentryp);
+ if (IS_ERR(alias)) {
+ rc = PTR_ERR(alias);
+ goto out_unplug;
}
+ *dentryp = alias;
+ /**
+ * statahead prepared this inode, transfer inode
+ * refcount from sa_entry to dentry
+ */
entry->se_inode = NULL;
-
- if ((bits & MDS_INODELOCK_LOOKUP) &&
- d_lustre_invalid(*dentryp))
- d_lustre_revalidate(*dentryp);
- ll_intent_release(&it);
+ } else if ((*dentryp)->d_inode != inode) {
+ /* revalidate, but inode is recreated */
+ CDEBUG(D_READA,
+ "%s: stale dentry %pd inode "DFID", statahead inode "DFID"\n",
+ ll_get_fsname((*dentryp)->d_inode->i_sb,
+ NULL, 0),
+ *dentryp,
+ PFID(ll_inode2fid((*dentryp)->d_inode)),
+ PFID(ll_inode2fid(inode)));
+ rc = -ESTALE;
+ goto out_unplug;
}
- }
- ll_sai_unplug(sai, entry);
- return rc;
+ if ((bits & MDS_INODELOCK_LOOKUP) &&
+ d_lustre_invalid(*dentryp))
+ d_lustre_revalidate(*dentryp);
+ ll_intent_release(&it);
+ }
}
+out_unplug:
+ /*
+ * statahead cached sa_entry can be used only once, and will be killed
+ * right after use, so if lookup/revalidate accessed statahead cache,
+ * set dentry ldd_sa_generation to parent lli_sa_generation, later if we
+ * stat this file again, we know we've done statahead before, see
+ * dentry_may_statahead().
+ */
+ ldd = ll_d2d(*dentryp);
+ lli = ll_i2info(dir);
+ /* ldd can be NULL if llite lookup failed. */
+ if (ldd)
+ ldd->lld_sa_generation = lli->lli_sa_generation;
+ sa_put(sai, entry);
+ return rc;
+}
+
+/**
+ * start statahead thread
+ *
+ * \param[in] dir parent directory
+ * \param[in] dentry dentry that triggers statahead, normally the first
+ * dirent under @dir
+ * \retval -EAGAIN on success, because when this function is
+ * called, it's already in lookup call, so client should
+ * do it itself instead of waiting for statahead thread
+ * to do it asynchronously.
+ * \retval negative number upon error
+ */
+static int start_statahead_thread(struct inode *dir, struct dentry *dentry)
+{
+ struct ll_inode_info *lli = ll_i2info(dir);
+ struct ll_statahead_info *sai = NULL;
+ struct l_wait_info lwi = { 0 };
+ struct ptlrpc_thread *thread;
+ struct task_struct *task;
+ struct dentry *parent = dentry->d_parent;
+ int rc;
/* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
- rc = is_first_dirent(dir, *dentryp);
- if (rc == LS_NONE_FIRST_DE) {
+ rc = is_first_dirent(dir, dentry);
+ if (rc == LS_NOT_FIRST_DE) {
/* It is not "ls -{a}l" operation, no need statahead for it. */
- rc = -EAGAIN;
+ rc = -EFAULT;
goto out;
}
- sai = ll_sai_alloc();
+ sai = ll_sai_alloc(parent);
if (!sai) {
rc = -ENOMEM;
goto out;
}
sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
- sai->sai_inode = igrab(dir);
- if (unlikely(!sai->sai_inode)) {
- CWARN("Do not start stat ahead on dying inode "DFID"\n",
- PFID(&lli->lli_fid));
- rc = -ESTALE;
- goto out;
- }
-
- /* get parent reference count here, and put it in ll_statahead_thread */
- parent = dget((*dentryp)->d_parent);
- if (unlikely(sai->sai_inode != d_inode(parent))) {
- struct ll_inode_info *nlli = ll_i2info(d_inode(parent));
-
- CWARN("Race condition, someone changed %pd just now: old parent "DFID", new parent "DFID"\n",
- *dentryp,
- PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
- dput(parent);
- iput(sai->sai_inode);
- rc = -EAGAIN;
+ /*
+ * if current lli_opendir_key was deauthorized, or dir re-opened by
+ * another process, don't start statahead, otherwise the newly spawned
+ * statahead thread won't be notified to quit.
+ */
+ spin_lock(&lli->lli_sa_lock);
+ if (unlikely(lli->lli_sai || lli->lli_opendir_key ||
+ lli->lli_opendir_pid != current->pid)) {
+ spin_unlock(&lli->lli_sa_lock);
+ rc = -EPERM;
goto out;
}
+ lli->lli_sai = sai;
+ spin_unlock(&lli->lli_sa_lock);
- CDEBUG(D_READA, "start statahead thread: sai %p, parent %pd\n",
- sai, parent);
+ atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_running);
- /* The sai buffer already has one reference taken at allocation time,
- * but as soon as we expose the sai by attaching it to the lli that
- * default reference can be dropped by another thread calling
- * ll_stop_statahead. We need to take a local reference to protect
- * the sai buffer while we intend to access it.
- */
- ll_sai_get(sai);
- lli->lli_sai = sai;
+ CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %pd]\n",
+ current_pid(), parent);
- plli = ll_i2info(d_inode(parent));
task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
- plli->lli_opendir_pid);
+ lli->lli_opendir_pid);
thread = &sai->sai_thread;
if (IS_ERR(task)) {
rc = PTR_ERR(task);
- CERROR("can't start ll_sa thread, rc: %d\n", rc);
- dput(parent);
- lli->lli_opendir_key = NULL;
- thread_set_flags(thread, SVC_STOPPED);
- thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
- /* Drop both our own local reference and the default
- * reference from allocation time.
- */
- ll_sai_put(sai);
- ll_sai_put(sai);
- LASSERT(!lli->lli_sai);
- return -EAGAIN;
+ CERROR("can't start ll_sa thread, rc : %d\n", rc);
+ goto out;
}
l_wait_event(thread->t_ctl_waitq,
@@ -1694,10 +1598,47 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
return -EAGAIN;
out:
- kfree(sai);
+ /*
+ * once we start statahead thread failed, disable statahead so
+ * that subsequent stat won't waste time to try it.
+ */
spin_lock(&lli->lli_sa_lock);
- lli->lli_opendir_key = NULL;
- lli->lli_opendir_pid = 0;
+ lli->lli_sa_enabled = 0;
+ lli->lli_sai = NULL;
spin_unlock(&lli->lli_sa_lock);
+ if (sai)
+ ll_sai_free(sai);
return rc;
}
+
+/**
+ * statahead entry function, this is called when client getattr on a file, it
+ * will start statahead thread if this is the first dir entry, else revalidate
+ * dentry from statahead cache.
+ *
+ * \param[in] dir parent directory
+ * \param[out] dentryp dentry to getattr
+ * \param[in] unplug unplug statahead window only (normally for negative
+ * dentry)
+ * \retval 1 on success
+ * \retval 0 revalidation from statahead cache failed, caller needs
+ * to getattr from server directly
+ * \retval negative number on error, caller often ignores this and
+ * then getattr from server
+ */
+int ll_statahead(struct inode *dir, struct dentry **dentryp, bool unplug)
+{
+ struct ll_statahead_info *sai;
+
+ sai = ll_sai_get(dir);
+ if (sai) {
+ int rc;
+
+ rc = revalidate_statahead_dentry(dir, sai, dentryp, unplug);
+ CDEBUG(D_READA, "revalidate statahead %pd: %d.\n",
+ *dentryp, rc);
+ ll_sai_put(sai);
+ return rc;
+ }
+ return start_statahead_thread(dir, *dentryp);
+}
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 3dd7e0eb0b54..106cd00910a7 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include "../include/lustre_lite.h"
#include "../include/lustre_ha.h"
#include "../include/lustre_dlm.h"
#include <linux/init.h>
@@ -83,8 +82,6 @@ struct super_operations lustre_super_operations = {
};
MODULE_ALIAS_FS("lustre");
-void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg));
-
static int __init lustre_init(void)
{
lnet_process_id_t lnet_id;
@@ -102,8 +99,8 @@ static int __init lustre_init(void)
rc = -ENOMEM;
ll_inode_cachep = kmem_cache_create("lustre_inode_cache",
- sizeof(struct ll_inode_info),
- 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT,
+ sizeof(struct ll_inode_info), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
NULL);
if (!ll_inode_cachep)
goto out_cache;
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 8c8bdfe1ad71..f8bc7ed59646 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -35,7 +35,6 @@
#include <linux/stat.h>
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/lustre_lite.h"
#include "llite_internal.h"
static int ll_readlink_internal(struct inode *inode,
@@ -80,17 +79,17 @@ static int ll_readlink_internal(struct inode *inode,
}
body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
- if ((body->valid & OBD_MD_LINKNAME) == 0) {
+ if ((body->mbo_valid & OBD_MD_LINKNAME) == 0) {
CERROR("OBD_MD_LINKNAME not set on reply\n");
rc = -EPROTO;
goto failed;
}
LASSERT(symlen != 0);
- if (body->eadatasize != symlen) {
+ if (body->mbo_eadatasize != symlen) {
CERROR("%s: inode "DFID": symlink length %d not expected %d\n",
ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(ll_inode2fid(inode)), body->eadatasize - 1,
+ PFID(ll_inode2fid(inode)), body->mbo_eadatasize - 1,
symlen - 1);
rc = -EPROTO;
goto failed;
@@ -155,8 +154,8 @@ const struct inode_operations ll_fast_symlink_inode_operations = {
.get_link = ll_get_link,
.getattr = ll_getattr,
.permission = ll_inode_permission,
- .setxattr = ll_setxattr,
- .getxattr = ll_getxattr,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ll_listxattr,
- .removexattr = ll_removexattr,
+ .removexattr = generic_removexattr,
};
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index e623216e962d..8aa8ecc09a48 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -38,7 +38,6 @@
#define DEBUG_SUBSYSTEM S_LLITE
#include "../include/obd.h"
-#include "../include/lustre_lite.h"
#include "llite_internal.h"
#include "vvp_internal.h"
@@ -368,12 +367,6 @@ int cl_sb_fini(struct super_block *sb)
CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
result = PTR_ERR(env);
}
- /*
- * If mount failed (sbi->ll_cl == NULL), and this there are no other
- * mounts, stop device types manually (this usually happens
- * automatically when last device is destroyed).
- */
- lu_types_stop();
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index 79fc428461ed..4464ad258387 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -49,14 +49,6 @@ struct obd_device;
struct obd_export;
struct page;
-/* specific architecture can implement only part of this list */
-enum vvp_io_subtype {
- /** normal IO */
- IO_NORMAL,
- /** io started from splice_{read|write} */
- IO_SPLICE
-};
-
/**
* IO state private to IO state private to VVP layer.
*/
@@ -99,10 +91,6 @@ struct vvp_io {
bool ft_flags_valid;
} fault;
struct {
- struct pipe_inode_info *vui_pipe;
- unsigned int vui_flags;
- } splice;
- struct {
struct cl_page_list vui_queue;
unsigned long vui_written;
int vui_from;
@@ -110,8 +98,6 @@ struct vvp_io {
} write;
} u;
- enum vvp_io_subtype vui_io_subtype;
-
/**
* Layout version when this IO is initialized
*/
@@ -217,11 +203,12 @@ struct vvp_object {
struct list_head vob_pending_list;
/**
- * Access this counter is protected by inode->i_sem. Now that
- * the lifetime of transient pages must be covered by inode sem,
- * we don't need to hold any lock..
+ * Number of transient pages. This is no longer protected by i_sem,
+ * and needs to be atomic. This is not actually used for anything,
+ * and can probably be removed.
*/
- int vob_transient_pages;
+ atomic_t vob_transient_pages;
+
/**
* Number of outstanding mmaps on this file.
*
@@ -247,9 +234,9 @@ struct vvp_object {
*/
struct vvp_page {
struct cl_page_slice vpg_cl;
- int vpg_defer_uptodate;
- int vpg_ra_used;
- int vpg_write_queued;
+ unsigned int vpg_defer_uptodate:1,
+ vpg_ra_used:1,
+ vpg_write_queued:1;
/**
* Non-empty iff this page is already counted in
* vvp_object::vob_pending_list. This list is only used as a flag,
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 94916dcc6caa..2b7f182a15e2 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -38,7 +38,6 @@
#define DEBUG_SUBSYSTEM S_LLITE
#include "../include/obd.h"
-#include "../include/lustre_lite.h"
#include "llite_internal.h"
#include "vvp_internal.h"
@@ -55,18 +54,6 @@ static struct vvp_io *cl2vvp_io(const struct lu_env *env,
}
/**
- * True, if \a io is a normal io, False for splice_{read,write}
- */
-static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
-{
- struct vvp_io *vio = vvp_env_io(env);
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- return vio->vui_io_subtype == IO_NORMAL;
-}
-
-/**
* For swapping layout. The file's layout may have changed.
* To avoid populating pages to a wrong stripe, we have to verify the
* correctness of layout. It works because swapping layout processes
@@ -391,9 +378,6 @@ static int vvp_mmap_locks(const struct lu_env *env,
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- if (!cl_is_normalio(env, io))
- return 0;
-
if (!vio->vui_iter) /* nfs or loop back device write */
return 0;
@@ -462,15 +446,10 @@ static void vvp_io_advance(const struct lu_env *env,
const struct cl_io_slice *ios,
size_t nob)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
struct cl_object *obj = ios->cis_io->ci_obj;
-
+ struct vvp_io *vio = cl2vvp_io(env, ios);
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
- if (!cl_is_normalio(env, io))
- return;
-
iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob);
}
@@ -479,7 +458,7 @@ static void vvp_io_update_iov(const struct lu_env *env,
{
size_t size = io->u.ci_rw.crw_count;
- if (!cl_is_normalio(env, io) || !vio->vui_iter)
+ if (!vio->vui_iter)
return;
iov_iter_truncate(vio->vui_iter, size);
@@ -628,7 +607,7 @@ static int vvp_io_setattr_time(const struct lu_env *env,
attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
valid |= CAT_MTIME;
}
- result = cl_object_attr_set(env, obj, attr, valid);
+ result = cl_object_attr_update(env, obj, attr, valid);
cl_object_attr_unlock(obj);
return result;
@@ -716,25 +695,8 @@ static int vvp_io_read_start(const struct lu_env *env,
/* BUG: 5972 */
file_accessed(file);
- switch (vio->vui_io_subtype) {
- case IO_NORMAL:
- LASSERT(vio->vui_iocb->ki_pos == pos);
- result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
- break;
- case IO_SPLICE:
- result = generic_file_splice_read(file, &pos,
- vio->u.splice.vui_pipe, cnt,
- vio->u.splice.vui_flags);
- /* LU-1109: do splice read stripe by stripe otherwise if it
- * may make nfsd stuck if this read occupied all internal pipe
- * buffers.
- */
- io->ci_continue = 0;
- break;
- default:
- CERROR("Wrong IO type %u\n", vio->vui_io_subtype);
- LBUG();
- }
+ LASSERT(vio->vui_iocb->ki_pos == pos);
+ result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
out:
if (result >= 0) {
@@ -821,7 +783,7 @@ static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
cl_page_disown(env, io, page);
/* held in ll_cl_init() */
- lu_ref_del(&page->cp_reference, "cl_io", io);
+ lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
cl_page_put(env, page);
}
@@ -959,10 +921,30 @@ static int vvp_io_write_start(const struct lu_env *env,
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
- if (!vio->vui_iter) /* from a temp io in ll_cl_init(). */
+ if (!vio->vui_iter) {
+ /* from a temp io in ll_cl_init(). */
result = 0;
- else
- result = generic_file_write_iter(vio->vui_iocb, vio->vui_iter);
+ } else {
+ /*
+ * When using the locked AIO function (generic_file_aio_write())
+ * testing has shown the inode mutex to be a limiting factor
+ * with multi-threaded single shared file performance. To get
+ * around this, we now use the lockless version. To maintain
+ * consistency, proper locking to protect against writes,
+ * trucates, etc. is handled in the higher layers of lustre.
+ */
+ bool lock_node = !IS_NOSEC(inode);
+
+ if (lock_node)
+ inode_lock(inode);
+ result = __generic_file_write_iter(vio->vui_iocb,
+ vio->vui_iter);
+ if (lock_node)
+ inode_unlock(inode);
+
+ if (result > 0 || result == -EIOCBQUEUED)
+ result = generic_write_sync(vio->vui_iocb, result);
+ }
if (result > 0) {
result = vvp_io_write_commit(env, io);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index 64be0c9df35b..07eb26cc43f5 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -37,7 +37,6 @@
#define DEBUG_SUBSYSTEM S_LLITE
#include "../include/obd_support.h"
-#include "../include/lustre_lite.h"
#include "vvp_internal.h"
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 2c520b0bf6ca..b57195d15674 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -39,7 +39,6 @@
#include "../../include/linux/libcfs/libcfs.h"
#include "../include/obd.h"
-#include "../include/lustre_lite.h"
#include "llite_internal.h"
#include "vvp_internal.h"
@@ -68,8 +67,8 @@ static int vvp_object_print(const struct lu_env *env, void *cookie,
(*p)(env, cookie, "(%s %d %d) inode: %p ",
list_empty(&obj->vob_pending_list) ? "-" : "+",
- obj->vob_transient_pages, atomic_read(&obj->vob_mmap_cnt),
- inode);
+ atomic_read(&obj->vob_transient_pages),
+ atomic_read(&obj->vob_mmap_cnt), inode);
if (inode) {
lli = ll_i2info(inode);
(*p)(env, cookie, "%lu/%u %o %u %d %p "DFID,
@@ -102,8 +101,8 @@ static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
return 0; /* layers below have to fill in the rest */
}
-static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid)
+static int vvp_attr_update(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned int valid)
{
struct inode *inode = vvp_object_inode(obj);
@@ -120,7 +119,7 @@ static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
if (0 && valid & CAT_SIZE)
i_size_write(inode, attr->cat_size);
/* not currently necessary */
- if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE))
+ if (0 && valid & (CAT_UID | CAT_GID | CAT_SIZE))
mark_inode_dirty(inode);
return 0;
}
@@ -210,7 +209,7 @@ static const struct cl_object_operations vvp_ops = {
.coo_lock_init = vvp_lock_init,
.coo_io_init = vvp_io_init,
.coo_attr_get = vvp_attr_get,
- .coo_attr_set = vvp_attr_set,
+ .coo_attr_update = vvp_attr_update,
.coo_conf_set = vvp_conf_set,
.coo_prune = vvp_prune,
.coo_glimpse = vvp_object_glimpse
@@ -221,7 +220,7 @@ static int vvp_object_init0(const struct lu_env *env,
const struct cl_object_conf *conf)
{
vob->vob_inode = conf->coc_inode;
- vob->vob_transient_pages = 0;
+ atomic_set(&vob->vob_transient_pages, 0);
cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
return 0;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 2e566d90bb94..5d79efc1aafe 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -44,8 +44,6 @@
#include <linux/page-flags.h>
#include <linux/pagemap.h>
-#include "../include/lustre_lite.h"
-
#include "llite_internal.h"
#include "vvp_internal.h"
@@ -249,7 +247,7 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret
set_bit(AS_EIO, &inode->i_mapping->flags);
if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
- obj->vob_discard_page_warned == 0) {
+ obj->vob_discard_page_warned == 0) {
obj->vob_discard_page_warned = 1;
ll_dirty_page_discard_warn(vmpage, ioret);
}
@@ -444,18 +442,10 @@ static int vvp_transient_page_prep(const struct lu_env *env,
return 0;
}
-static void vvp_transient_page_verify(const struct cl_page *page)
-{
- struct inode *inode = vvp_object_inode(page->cp_obj);
-
- LASSERT(!inode_trylock(inode));
-}
-
static int vvp_transient_page_own(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused, int nonblock)
{
- vvp_transient_page_verify(slice->cpl_page);
return 0;
}
@@ -463,21 +453,18 @@ static void vvp_transient_page_assume(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused)
{
- vvp_transient_page_verify(slice->cpl_page);
}
static void vvp_transient_page_unassume(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused)
{
- vvp_transient_page_verify(slice->cpl_page);
}
static void vvp_transient_page_disown(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused)
{
- vvp_transient_page_verify(slice->cpl_page);
}
static void vvp_transient_page_discard(const struct lu_env *env,
@@ -486,8 +473,6 @@ static void vvp_transient_page_discard(const struct lu_env *env,
{
struct cl_page *page = slice->cpl_page;
- vvp_transient_page_verify(slice->cpl_page);
-
/*
* For transient pages, remove it from the radix tree.
*/
@@ -511,7 +496,6 @@ vvp_transient_page_completion(const struct lu_env *env,
const struct cl_page_slice *slice,
int ioret)
{
- vvp_transient_page_verify(slice->cpl_page);
}
static void vvp_transient_page_fini(const struct lu_env *env,
@@ -522,8 +506,7 @@ static void vvp_transient_page_fini(const struct lu_env *env,
struct vvp_object *clobj = cl2vvp(clp->cp_obj);
vvp_page_fini_common(vpg);
- LASSERT(!inode_trylock(clobj->vob_inode));
- clobj->vob_transient_pages--;
+ atomic_dec(&clobj->vob_transient_pages);
}
static const struct cl_page_operations vvp_transient_page_ops = {
@@ -549,7 +532,7 @@ static const struct cl_page_operations vvp_transient_page_ops = {
};
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
+ struct cl_page *page, pgoff_t index)
{
struct vvp_page *vpg = cl_object_page_slice(obj, page);
struct page *vmpage = page->cp_vmpage;
@@ -570,10 +553,9 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
} else {
struct vvp_object *clobj = cl2vvp(obj);
- LASSERT(!inode_trylock(clobj->vob_inode));
cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
&vvp_transient_page_ops);
- clobj->vob_transient_pages++;
+ atomic_inc(&clobj->vob_transient_pages);
}
return 0;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c
index 9fe9d6c0a7d1..e3f4c790d646 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_req.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_req.c
@@ -32,7 +32,6 @@
#include "../include/cl_object.h"
#include "../include/obd.h"
#include "../include/obd_support.h"
-#include "../include/lustre_lite.h"
#include "llite_internal.h"
#include "vvp_internal.h"
@@ -83,8 +82,10 @@ static void vvp_req_attr_set(const struct lu_env *env,
}
obdo_from_inode(oa, inode, valid_flags & flags);
obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
+ oa->o_parent_oid++;
memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid,
- JOBSTATS_JOBID_SIZE);
+ LUSTRE_JOBID_SIZE);
}
static void vvp_req_completion(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 98303cf85815..e070adb7a3cc 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -38,21 +38,12 @@
#define DEBUG_SUBSYSTEM S_LLITE
#include "../include/obd_support.h"
-#include "../include/lustre_lite.h"
#include "../include/lustre_dlm.h"
#include "../include/lustre_ver.h"
#include "../include/lustre_eacl.h"
#include "llite_internal.h"
-#define XATTR_USER_T (1)
-#define XATTR_TRUSTED_T (2)
-#define XATTR_SECURITY_T (3)
-#define XATTR_ACL_ACCESS_T (4)
-#define XATTR_ACL_DEFAULT_T (5)
-#define XATTR_LUSTRE_T (6)
-#define XATTR_OTHER_T (7)
-
static
int get_xattr_type(const char *name)
{
@@ -99,46 +90,57 @@ int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type)
return 0;
}
-static
-int ll_setxattr_common(struct inode *inode, const char *name,
- const void *value, size_t size,
- int flags, __u64 valid)
+static int
+ll_xattr_set_common(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size,
+ int flags)
{
+ char fullname[strlen(handler->prefix) + strlen(name) + 1];
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *req = NULL;
- int xattr_type, rc;
const char *pv = value;
+ __u64 valid;
+ int rc;
+
+ if (flags == XATTR_REPLACE) {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
+ valid = OBD_MD_FLXATTRRM;
+ } else {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
+ valid = OBD_MD_FLXATTR;
+ }
- xattr_type = get_xattr_type(name);
- rc = xattr_type_filter(sbi, xattr_type);
+ rc = xattr_type_filter(sbi, handler->flags);
if (rc)
return rc;
- if ((xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T) &&
+ if ((handler->flags == XATTR_ACL_ACCESS_T ||
+ handler->flags == XATTR_ACL_DEFAULT_T) &&
!inode_owner_or_capable(inode))
return -EPERM;
/* b10667: ignore lustre special xattr for now */
- if ((xattr_type == XATTR_TRUSTED_T && strcmp(name, "trusted.lov") == 0) ||
- (xattr_type == XATTR_LUSTRE_T && strcmp(name, "lustre.lov") == 0))
+ if ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) ||
+ (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov")))
return 0;
/* b15587: ignore security.capability xattr for now */
- if ((xattr_type == XATTR_SECURITY_T &&
- strcmp(name, "security.capability") == 0))
+ if ((handler->flags == XATTR_SECURITY_T &&
+ !strcmp(name, "capability")))
return 0;
/* LU-549: Disable security.selinux when selinux is disabled */
- if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
- strcmp(name, "security.selinux") == 0)
+ if (handler->flags == XATTR_SECURITY_T && !selinux_is_enabled() &&
+ strcmp(name, "selinux") == 0)
return -EOPNOTSUPP;
+ sprintf(fullname, "%s%s\n", handler->prefix, name);
rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- valid, name, pv, size, 0, flags,
+ valid, fullname, pv, size, 0, flags,
ll_i2suppgid(inode), &req);
if (rc) {
- if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
+ if (rc == -EOPNOTSUPP && handler->flags == XATTR_USER_T) {
LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
sbi->ll_flags &= ~LL_SBI_USER_XATTR;
}
@@ -149,8 +151,10 @@ int ll_setxattr_common(struct inode *inode, const char *name,
return 0;
}
-int ll_setxattr(struct dentry *dentry, struct inode *inode,
- const char *name, const void *value, size_t size, int flags)
+static int ll_xattr_set(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size,
+ int flags)
{
LASSERT(inode);
LASSERT(name);
@@ -158,20 +162,24 @@ int ll_setxattr(struct dentry *dentry, struct inode *inode,
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
PFID(ll_inode2fid(inode)), inode, name);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
-
- if ((strncmp(name, XATTR_TRUSTED_PREFIX,
- sizeof(XATTR_TRUSTED_PREFIX) - 1) == 0 &&
- strcmp(name + sizeof(XATTR_TRUSTED_PREFIX) - 1, "lov") == 0) ||
- (strncmp(name, XATTR_LUSTRE_PREFIX,
- sizeof(XATTR_LUSTRE_PREFIX) - 1) == 0 &&
- strcmp(name + sizeof(XATTR_LUSTRE_PREFIX) - 1, "lov") == 0)) {
+ if (!strcmp(name, "lov")) {
struct lov_user_md *lump = (struct lov_user_md *)value;
+ int op_type = flags == XATTR_REPLACE ? LPROC_LL_REMOVEXATTR :
+ LPROC_LL_SETXATTR;
int rc = 0;
+ ll_stats_ops_tally(ll_i2sbi(inode), op_type, 1);
+
if (size != 0 && size < sizeof(struct lov_user_md))
return -EINVAL;
+ /*
+ * It is possible to set an xattr to a "" value of zero size.
+ * For this case we are going to treat it as a removal.
+ */
+ if (!size && lump)
+ lump = NULL;
+
/* Attributes that are saved via getxattr will always have
* the stripe_offset as 0. Instead, the MDS should be
* allowed to pick the starting OST index. b=17846
@@ -181,12 +189,15 @@ int ll_setxattr(struct dentry *dentry, struct inode *inode,
if (lump && S_ISREG(inode->i_mode)) {
__u64 it_flags = FMODE_WRITE;
- int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ?
- sizeof(*lump) : sizeof(struct lov_user_md_v3);
+ int lum_size;
+
+ lum_size = ll_lov_user_md_size(lump);
+ if (lum_size < 0 || size < lum_size)
+ return 0; /* b=10667: ignore error */
rc = ll_lov_setstripe_ea_info(inode, dentry, it_flags,
lump, lum_size);
- /* b10667: rc always be 0 here for now */
+ /* b=10667: rc always be 0 here for now */
rc = 0;
} else if (S_ISDIR(inode->i_mode)) {
rc = ll_dir_setstripe(inode, lump, 0);
@@ -194,92 +205,27 @@ int ll_setxattr(struct dentry *dentry, struct inode *inode,
return rc;
- } else if (strcmp(name, XATTR_NAME_LMA) == 0 ||
- strcmp(name, XATTR_NAME_LINK) == 0)
+ } else if (!strcmp(name, "lma") || !strcmp(name, "link")) {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
return 0;
+ }
- return ll_setxattr_common(inode, name, value, size, flags,
- OBD_MD_FLXATTR);
-}
-
-int ll_removexattr(struct dentry *dentry, const char *name)
-{
- struct inode *inode = d_inode(dentry);
-
- LASSERT(inode);
- LASSERT(name);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
- PFID(ll_inode2fid(inode)), inode, name);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
- return ll_setxattr_common(inode, name, NULL, 0, 0,
- OBD_MD_FLXATTRRM);
+ return ll_xattr_set_common(handler, dentry, inode, name, value, size,
+ flags);
}
-static
-int ll_getxattr_common(struct inode *inode, const char *name,
- void *buffer, size_t size, __u64 valid)
+int
+ll_xattr_list(struct inode *inode, const char *name, int type, void *buffer,
+ size_t size, __u64 valid)
{
+ struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *req = NULL;
struct mdt_body *body;
- int xattr_type, rc;
void *xdata;
- struct ll_inode_info *lli = ll_i2info(inode);
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
- PFID(ll_inode2fid(inode)), inode);
-
- /* listxattr have slightly different behavior from of ext3:
- * without 'user_xattr' ext3 will list all xattr names but
- * filtered out "^user..*"; we list them all for simplicity.
- */
- if (!name) {
- xattr_type = XATTR_OTHER_T;
- goto do_getxattr;
- }
+ int rc;
- xattr_type = get_xattr_type(name);
- rc = xattr_type_filter(sbi, xattr_type);
- if (rc)
- return rc;
-
- /* b15587: ignore security.capability xattr for now */
- if ((xattr_type == XATTR_SECURITY_T &&
- strcmp(name, "security.capability") == 0))
- return -ENODATA;
-
- /* LU-549: Disable security.selinux when selinux is disabled */
- if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
- strcmp(name, "security.selinux") == 0)
- return -EOPNOTSUPP;
-
-#ifdef CONFIG_FS_POSIX_ACL
- /* posix acl is under protection of LOOKUP lock. when calling to this,
- * we just have path resolution to the target inode, so we have great
- * chance that cached ACL is uptodate.
- */
- if (xattr_type == XATTR_ACL_ACCESS_T) {
- struct posix_acl *acl;
-
- spin_lock(&lli->lli_lock);
- acl = posix_acl_dup(lli->lli_posix_acl);
- spin_unlock(&lli->lli_lock);
-
- if (!acl)
- return -ENODATA;
-
- rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
- posix_acl_release(acl);
- return rc;
- }
- if (xattr_type == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
- return -ENODATA;
-#endif
-
-do_getxattr:
- if (sbi->ll_xattr_cache_enabled && xattr_type != XATTR_ACL_ACCESS_T) {
+ if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T) {
rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
if (rc == -EAGAIN)
goto getxattr_nocache;
@@ -311,36 +257,36 @@ getxattr_nocache:
/* only detect the xattr size */
if (size == 0) {
- rc = body->eadatasize;
+ rc = body->mbo_eadatasize;
goto out;
}
- if (size < body->eadatasize) {
+ if (size < body->mbo_eadatasize) {
CERROR("server bug: replied size %u > %u\n",
- body->eadatasize, (int)size);
+ body->mbo_eadatasize, (int)size);
rc = -ERANGE;
goto out;
}
- if (body->eadatasize == 0) {
+ if (body->mbo_eadatasize == 0) {
rc = -ENODATA;
goto out;
}
/* do not need swab xattr data */
xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
- body->eadatasize);
+ body->mbo_eadatasize);
if (!xdata) {
rc = -EFAULT;
goto out;
}
- memcpy(buffer, xdata, body->eadatasize);
- rc = body->eadatasize;
+ memcpy(buffer, xdata, body->mbo_eadatasize);
+ rc = body->mbo_eadatasize;
}
out_xattr:
- if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
+ if (rc == -EOPNOTSUPP && type == XATTR_USER_T) {
LCONSOLE_INFO(
"%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n",
ll_get_fsname(inode->i_sb, NULL, 0), rc);
@@ -351,8 +297,65 @@ out:
return rc;
}
-ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
- const char *name, void *buffer, size_t size)
+static int ll_xattr_get_common(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ char fullname[strlen(handler->prefix) + strlen(name) + 1];
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+#ifdef CONFIG_FS_POSIX_ACL
+ struct ll_inode_info *lli = ll_i2info(inode);
+#endif
+ int rc;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
+
+ rc = xattr_type_filter(sbi, handler->flags);
+ if (rc)
+ return rc;
+
+ /* b15587: ignore security.capability xattr for now */
+ if ((handler->flags == XATTR_SECURITY_T && !strcmp(name, "capability")))
+ return -ENODATA;
+
+ /* LU-549: Disable security.selinux when selinux is disabled */
+ if (handler->flags == XATTR_SECURITY_T && !selinux_is_enabled() &&
+ !strcmp(name, "selinux"))
+ return -EOPNOTSUPP;
+
+#ifdef CONFIG_FS_POSIX_ACL
+ /* posix acl is under protection of LOOKUP lock. when calling to this,
+ * we just have path resolution to the target inode, so we have great
+ * chance that cached ACL is uptodate.
+ */
+ if (handler->flags == XATTR_ACL_ACCESS_T) {
+ struct posix_acl *acl;
+
+ spin_lock(&lli->lli_lock);
+ acl = posix_acl_dup(lli->lli_posix_acl);
+ spin_unlock(&lli->lli_lock);
+
+ if (!acl)
+ return -ENODATA;
+
+ rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
+ posix_acl_release(acl);
+ return rc;
+ }
+ if (handler->flags == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
+ return -ENODATA;
+#endif
+ sprintf(fullname, "%s%s\n", handler->prefix, name);
+ return ll_xattr_list(inode, fullname, handler->flags, buffer, size,
+ OBD_MD_FLXATTR);
+}
+
+static int ll_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
{
LASSERT(inode);
LASSERT(name);
@@ -360,36 +363,23 @@ ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
PFID(ll_inode2fid(inode)), inode, name);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
-
- if ((strncmp(name, XATTR_TRUSTED_PREFIX,
- sizeof(XATTR_TRUSTED_PREFIX) - 1) == 0 &&
- strcmp(name + sizeof(XATTR_TRUSTED_PREFIX) - 1, "lov") == 0) ||
- (strncmp(name, XATTR_LUSTRE_PREFIX,
- sizeof(XATTR_LUSTRE_PREFIX) - 1) == 0 &&
- strcmp(name + sizeof(XATTR_LUSTRE_PREFIX) - 1, "lov") == 0)) {
+ if (!strcmp(name, "lov")) {
struct lov_stripe_md *lsm;
struct lov_user_md *lump;
struct lov_mds_md *lmm = NULL;
struct ptlrpc_request *request = NULL;
int rc = 0, lmmsize = 0;
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
+
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return -ENODATA;
- if (size == 0 && S_ISDIR(inode->i_mode)) {
- /* XXX directory EA is fix for now, optimize to save
- * RPC transfer
- */
- rc = sizeof(struct lov_user_md);
- goto out;
- }
-
lsm = ccc_inode_lsm_get(inode);
if (!lsm) {
if (S_ISDIR(inode->i_mode)) {
- rc = ll_dir_getstripe(inode, &lmm,
- &lmmsize, &request);
+ rc = ll_dir_getstripe(inode, (void **)&lmm,
+ &lmmsize, &request, 0);
} else {
rc = -ENODATA;
}
@@ -439,7 +429,7 @@ out:
return rc;
}
- return ll_getxattr_common(inode, name, buffer, size, OBD_MD_FLXATTR);
+ return ll_xattr_get_common(handler, dentry, inode, name, buffer, size);
}
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
@@ -457,7 +447,8 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
- rc = ll_getxattr_common(inode, NULL, buffer, size, OBD_MD_FLXATTRLS);
+ rc = ll_xattr_list(inode, NULL, XATTR_OTHER_T, buffer, size,
+ OBD_MD_FLXATTRLS);
if (rc < 0)
goto out;
@@ -488,7 +479,8 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
if (!ll_i2info(inode)->lli_has_smd)
rc2 = -1;
} else if (S_ISDIR(inode->i_mode)) {
- rc2 = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
+ rc2 = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize,
+ &request, 0);
}
if (rc2 < 0) {
@@ -518,3 +510,57 @@ out:
return rc;
}
+
+static const struct xattr_handler ll_user_xattr_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .flags = XATTR_USER_T,
+ .get = ll_xattr_get_common,
+ .set = ll_xattr_set_common,
+};
+
+static const struct xattr_handler ll_trusted_xattr_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .flags = XATTR_TRUSTED_T,
+ .get = ll_xattr_get,
+ .set = ll_xattr_set,
+};
+
+static const struct xattr_handler ll_security_xattr_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .flags = XATTR_SECURITY_T,
+ .get = ll_xattr_get_common,
+ .set = ll_xattr_set_common,
+};
+
+static const struct xattr_handler ll_acl_access_xattr_handler = {
+ .prefix = XATTR_NAME_POSIX_ACL_ACCESS,
+ .flags = XATTR_ACL_ACCESS_T,
+ .get = ll_xattr_get_common,
+ .set = ll_xattr_set_common,
+};
+
+static const struct xattr_handler ll_acl_default_xattr_handler = {
+ .prefix = XATTR_NAME_POSIX_ACL_DEFAULT,
+ .flags = XATTR_ACL_DEFAULT_T,
+ .get = ll_xattr_get_common,
+ .set = ll_xattr_set_common,
+};
+
+static const struct xattr_handler ll_lustre_xattr_handler = {
+ .prefix = XATTR_LUSTRE_PREFIX,
+ .flags = XATTR_LUSTRE_T,
+ .get = ll_xattr_get,
+ .set = ll_xattr_set,
+};
+
+const struct xattr_handler *ll_xattr_handlers[] = {
+ &ll_user_xattr_handler,
+ &ll_trusted_xattr_handler,
+ &ll_security_xattr_handler,
+#ifdef CONFIG_FS_POSIX_ACL
+ &ll_acl_access_xattr_handler,
+ &ll_acl_default_xattr_handler,
+#endif
+ &ll_lustre_xattr_handler,
+ NULL,
+};
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 8089da8143d9..50a19a40bd4e 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -13,7 +13,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include "../include/obd_support.h"
-#include "../include/lustre_lite.h"
#include "../include/lustre_dlm.h"
#include "../include/lustre_ver.h"
#include "llite_internal.h"
@@ -270,10 +269,12 @@ static int ll_xattr_find_get_lock(struct inode *inode,
struct lustre_handle lockh = { 0 };
struct md_op_data *op_data;
struct ll_inode_info *lli = ll_i2info(inode);
- struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS,
- .ei_mode = it_to_lock_mode(oit),
- .ei_cb_bl = ll_md_blocking_ast,
- .ei_cb_cp = ldlm_completion_ast };
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = it_to_lock_mode(oit),
+ .ei_cb_bl = &ll_md_blocking_ast,
+ .ei_cb_cp = &ldlm_completion_ast,
+ };
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct obd_export *exp = sbi->ll_md_exp;
int rc;
@@ -304,7 +305,7 @@ static int ll_xattr_find_get_lock(struct inode *inode,
op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
- rc = md_enqueue(exp, &einfo, oit, op_data, &lockh, NULL, 0, NULL, 0);
+ rc = md_enqueue(exp, &einfo, NULL, oit, op_data, &lockh, 0);
ll_finish_md_op_data(op_data);
if (rc < 0) {
@@ -380,25 +381,25 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
}
/* do not need swab xattr data */
xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
- body->eadatasize);
+ body->mbo_eadatasize);
xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
- body->aclsize);
+ body->mbo_aclsize);
xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
- body->max_mdsize * sizeof(__u32));
+ body->mbo_max_mdsize * sizeof(__u32));
if (!xdata || !xval || !xsizes) {
CERROR("wrong setxattr reply\n");
rc = -EPROTO;
goto out_destroy;
}
- xtail = xdata + body->eadatasize;
- xvtail = xval + body->aclsize;
+ xtail = xdata + body->mbo_eadatasize;
+ xvtail = xval + body->mbo_aclsize;
CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
ll_xattr_cache_init(lli);
- for (i = 0; i < body->max_mdsize; i++) {
+ for (i = 0; i < body->mbo_max_mdsize; i++) {
CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
/* Perform consistency checks: attr names and vals in pill */
if (!memchr(xdata, 0, xtail - xdata)) {
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index a3d170aa6fd2..a5265f9b5797 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -47,18 +47,20 @@
#include "../include/lprocfs_status.h"
#include "lmv_internal.h"
-int lmv_fld_lookup(struct lmv_obd *lmv,
- const struct lu_fid *fid,
- u32 *mds)
+int lmv_fld_lookup(struct lmv_obd *lmv, const struct lu_fid *fid, u32 *mds)
{
+ struct obd_device *obd = lmv2obd_dev(lmv);
int rc;
- /* FIXME: Currently ZFS still use local seq for ROOT unfortunately, and
+ /*
+ * FIXME: Currently ZFS still use local seq for ROOT unfortunately, and
* this fid_is_local check should be removed once LU-2240 is fixed
*/
- LASSERTF((fid_seq_in_fldb(fid_seq(fid)) ||
- fid_seq_is_local_file(fid_seq(fid))) &&
- fid_is_sane(fid), DFID" is insane!\n", PFID(fid));
+ if (!fid_is_sane(fid) || !(fid_seq_in_fldb(fid_seq(fid)) ||
+ fid_seq_is_local_file(fid_seq(fid)))) {
+ CERROR("%s: invalid FID " DFID "\n", obd->obd_name, PFID(fid));
+ return -EINVAL;
+ }
rc = fld_client_lookup(&lmv->lmv_fld, fid_seq(fid), mds,
LU_SEQ_RANGE_MDT, NULL);
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 2f58fdab8d1e..9f4e826bb0af 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -43,13 +43,13 @@
#include "../include/lustre_lib.h"
#include "../include/lustre_net.h"
#include "../include/lustre_dlm.h"
+#include "../include/lustre_mdc.h"
#include "../include/obd_class.h"
#include "../include/lprocfs_status.h"
#include "lmv_internal.h"
-static int lmv_intent_remote(struct obd_export *exp, void *lmm,
- int lmmsize, struct lookup_intent *it,
- const struct lu_fid *parent_fid, int flags,
+static int lmv_intent_remote(struct obd_export *exp, struct lookup_intent *it,
+ const struct lu_fid *parent_fid,
struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
__u64 extra_lock_flags)
@@ -68,7 +68,7 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
if (!body)
return -EPROTO;
- LASSERT((body->valid & OBD_MD_MDS));
+ LASSERT((body->mbo_valid & OBD_MD_MDS));
/*
* Unfortunately, we have to lie to MDC/MDS to retrieve
@@ -87,9 +87,9 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
it->it_request = NULL;
}
- LASSERT(fid_is_sane(&body->fid1));
+ LASSERT(fid_is_sane(&body->mbo_fid1));
- tgt = lmv_find_target(lmv, &body->fid1);
+ tgt = lmv_find_target(lmv, &body->mbo_fid1);
if (IS_ERR(tgt)) {
rc = PTR_ERR(tgt);
goto out;
@@ -101,7 +101,7 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
goto out;
}
- op_data->op_fid1 = body->fid1;
+ op_data->op_fid1 = body->mbo_fid1;
/* Sent the parent FID to the remote MDT */
if (parent_fid) {
/* The parent fid is only for remote open to
@@ -110,18 +110,14 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
*/
LASSERT(it->it_op & IT_OPEN);
op_data->op_fid2 = *parent_fid;
- /* Add object FID to op_fid3, in case it needs to check stale
- * (M_CHECK_STALE), see mdc_finish_intent_lock
- */
- op_data->op_fid3 = body->fid1;
}
op_data->op_bias = MDS_CROSS_REF;
- CDEBUG(D_INODE, "REMOTE_INTENT with fid="DFID" -> mds #%d\n",
- PFID(&body->fid1), tgt->ltd_idx);
+ CDEBUG(D_INODE, "REMOTE_INTENT with fid=" DFID " -> mds #%u\n",
+ PFID(&body->mbo_fid1), tgt->ltd_idx);
- rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it,
- flags, &req, cb_blocking, extra_lock_flags);
+ rc = md_intent_lock(tgt->ltd_exp, op_data, it, &req, cb_blocking,
+ extra_lock_flags);
if (rc)
goto out_free_op_data;
@@ -136,8 +132,10 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
it->it_remote_lock_mode = it->it_lock_mode;
}
- it->it_lock_handle = plock.cookie;
- it->it_lock_mode = pmode;
+ if (pmode) {
+ it->it_lock_handle = plock.cookie;
+ it->it_lock_mode = pmode;
+ }
out_free_op_data:
kfree(op_data);
@@ -150,13 +148,126 @@ out:
return rc;
}
+int lmv_revalidate_slaves(struct obd_export *exp,
+ const struct lmv_stripe_md *lsm,
+ ldlm_blocking_callback cb_blocking,
+ int extra_lock_flags)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct ptlrpc_request *req = NULL;
+ struct mdt_body *body;
+ struct md_op_data *op_data;
+ int rc = 0, i;
+
+ /**
+ * revalidate slaves has some problems, temporarily return,
+ * we may not need that
+ */
+ op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
+ if (!op_data)
+ return -ENOMEM;
+
+ /**
+ * Loop over the stripe information, check validity and update them
+ * from MDS if needed.
+ */
+ for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
+ struct lookup_intent it = { .it_op = IT_GETATTR };
+ struct lustre_handle *lockh = NULL;
+ struct lmv_tgt_desc *tgt = NULL;
+ struct inode *inode;
+ struct lu_fid fid;
+
+ fid = lsm->lsm_md_oinfo[i].lmo_fid;
+ inode = lsm->lsm_md_oinfo[i].lmo_root;
+
+ /*
+ * Prepare op_data for revalidating. Note that @fid2 shluld be
+ * defined otherwise it will go to server and take new lock
+ * which is not needed here.
+ */
+ memset(op_data, 0, sizeof(*op_data));
+ op_data->op_fid1 = fid;
+ op_data->op_fid2 = fid;
+
+ tgt = lmv_locate_mds(lmv, op_data, &fid);
+ if (IS_ERR(tgt)) {
+ rc = PTR_ERR(tgt);
+ goto cleanup;
+ }
+
+ CDEBUG(D_INODE, "Revalidate slave " DFID " -> mds #%u\n",
+ PFID(&fid), tgt->ltd_idx);
+
+ if (req) {
+ ptlrpc_req_finished(req);
+ req = NULL;
+ }
+
+ rc = md_intent_lock(tgt->ltd_exp, op_data, &it, &req,
+ cb_blocking, extra_lock_flags);
+ if (rc < 0)
+ goto cleanup;
+
+ lockh = (struct lustre_handle *)&it.it_lock_handle;
+ if (rc > 0 && !req) {
+ /* slave inode is still valid */
+ CDEBUG(D_INODE, "slave "DFID" is still valid.\n",
+ PFID(&fid));
+ rc = 0;
+ } else {
+ /* refresh slave from server */
+ body = req_capsule_server_get(&req->rq_pill,
+ &RMF_MDT_BODY);
+ LASSERT(body);
+
+ if (unlikely(body->mbo_nlink < 2)) {
+ CERROR("%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n",
+ obd->obd_name, body->mbo_nlink, i,
+ PFID(&lsm->lsm_md_oinfo[i].lmo_fid),
+ PFID(&lsm->lsm_md_oinfo[0].lmo_fid));
+
+ if (it.it_lock_mode && lockh) {
+ ldlm_lock_decref(lockh, it.it_lock_mode);
+ it.it_lock_mode = 0;
+ }
+
+ rc = -EIO;
+ goto cleanup;
+ }
+
+ i_size_write(inode, body->mbo_size);
+ inode->i_blocks = body->mbo_blocks;
+ set_nlink(inode, body->mbo_nlink);
+ LTIME_S(inode->i_atime) = body->mbo_atime;
+ LTIME_S(inode->i_ctime) = body->mbo_ctime;
+ LTIME_S(inode->i_mtime) = body->mbo_mtime;
+ }
+
+ md_set_lock_data(tgt->ltd_exp, lockh, inode, NULL);
+
+ if (it.it_lock_mode && lockh) {
+ ldlm_lock_decref(lockh, it.it_lock_mode);
+ it.it_lock_mode = 0;
+ }
+ }
+
+cleanup:
+ if (req)
+ ptlrpc_req_finished(req);
+
+ kfree(op_data);
+ return rc;
+}
+
/*
* IT_OPEN is intended to open (and create, possible) an object. Parent (pid)
* may be split dir.
*/
static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int flags, struct ptlrpc_request **reqp,
+ struct lookup_intent *it,
+ struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
__u64 extra_lock_flags)
{
@@ -166,35 +277,55 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
struct mdt_body *body;
int rc;
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
+ if (it->it_flags & MDS_OPEN_BY_FID) {
+ LASSERT(fid_is_sane(&op_data->op_fid2));
+
+ /*
+ * for striped directory, we can't know parent stripe fid
+ * without name, but we can set it to child fid, and MDT
+ * will obtain it from linkea in open in such case.
+ */
+ if (op_data->op_mea1)
+ op_data->op_fid1 = op_data->op_fid2;
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid2);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ op_data->op_mds = tgt->ltd_idx;
+ } else {
+ LASSERT(fid_is_sane(&op_data->op_fid1));
+ LASSERT(fid_is_zero(&op_data->op_fid2));
+ LASSERT(op_data->op_name);
+
+ tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+ }
/* If it is ready to open the file by FID, do not need
* allocate FID at all, otherwise it will confuse MDT
*/
- if ((it->it_op & IT_CREAT) &&
- !(it->it_flags & MDS_OPEN_BY_FID)) {
+ if ((it->it_op & IT_CREAT) && !(it->it_flags & MDS_OPEN_BY_FID)) {
/*
- * For open with IT_CREATE and for IT_CREATE cases allocate new
- * fid and setup FLD for it.
+ * For lookup(IT_CREATE) cases allocate new fid and setup FLD
+ * for it.
*/
- op_data->op_fid3 = op_data->op_fid2;
- rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
+ rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc != 0)
return rc;
}
- CDEBUG(D_INODE, "OPEN_INTENT with fid1=" DFID ", fid2=" DFID ", name='%s' -> mds #%d\n",
+ CDEBUG(D_INODE, "OPEN_INTENT with fid1=" DFID ", fid2=" DFID ", name='%s' -> mds #%u\n",
PFID(&op_data->op_fid1),
PFID(&op_data->op_fid2), op_data->op_name, tgt->ltd_idx);
- rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it, flags,
- reqp, cb_blocking, extra_lock_flags);
+ rc = md_intent_lock(tgt->ltd_exp, op_data, it, reqp, cb_blocking,
+ extra_lock_flags);
if (rc != 0)
return rc;
/*
- * Nothing is found, do not access body->fid1 as it is zero and thus
+ * Nothing is found, do not access body->mbo_fid1 as it is zero and thus
* pointless.
*/
if ((it->it_disposition & DISP_LOOKUP_NEG) &&
@@ -205,31 +336,17 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
if (!body)
return -EPROTO;
- /*
- * Not cross-ref case, just get out of here.
- */
- if (likely(!(body->valid & OBD_MD_MDS)))
- return 0;
- /*
- * Okay, MDS has returned success. Probably name has been resolved in
- * remote inode.
- */
- rc = lmv_intent_remote(exp, lmm, lmmsize, it, &op_data->op_fid1, flags,
- reqp, cb_blocking, extra_lock_flags);
- if (rc != 0) {
- LASSERT(rc < 0);
- /*
- * This is possible, that some userspace application will try to
- * open file as directory and we will have -ENOTDIR here. As
- * this is normal situation, we should not print error here,
- * only debug info.
- */
- CDEBUG(D_INODE, "Can't handle remote %s: dir " DFID "(" DFID "):%*s: %d\n",
- LL_IT2STR(it), PFID(&op_data->op_fid2),
- PFID(&op_data->op_fid1), op_data->op_namelen,
- op_data->op_name, rc);
- return rc;
+ /* Not cross-ref case, just get out of here. */
+ if (unlikely((body->mbo_valid & OBD_MD_MDS))) {
+ rc = lmv_intent_remote(exp, it, &op_data->op_fid1, reqp,
+ cb_blocking, extra_lock_flags);
+ if (rc != 0)
+ return rc;
+
+ body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
+ if (!body)
+ return -EPROTO;
}
return rc;
@@ -240,37 +357,102 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
*/
static int lmv_intent_lookup(struct obd_export *exp,
struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int flags, struct ptlrpc_request **reqp,
+ struct lookup_intent *it,
+ struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
__u64 extra_lock_flags)
{
+ struct lmv_stripe_md *lsm = op_data->op_mea1;
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt = NULL;
struct mdt_body *body;
int rc = 0;
+ /*
+ * If it returns ERR_PTR(-EBADFD) then it is an unknown hash type
+ * it will try all stripes to locate the object
+ */
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
- if (IS_ERR(tgt))
+ if (IS_ERR(tgt) && (PTR_ERR(tgt) != -EBADFD))
return PTR_ERR(tgt);
+ /*
+ * Both migrating dir and unknown hash dir need to try
+ * all of sub-stripes
+ */
+ if (lsm && !lmv_is_known_hash_type(lsm->lsm_md_hash_type)) {
+ struct lmv_oinfo *oinfo = &lsm->lsm_md_oinfo[0];
+
+ op_data->op_fid1 = oinfo->lmo_fid;
+ op_data->op_mds = oinfo->lmo_mds;
+ tgt = lmv_get_target(lmv, oinfo->lmo_mds, NULL);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+ }
+
if (!fid_is_sane(&op_data->op_fid2))
fid_zero(&op_data->op_fid2);
- CDEBUG(D_INODE, "LOOKUP_INTENT with fid1="DFID", fid2="DFID
- ", name='%s' -> mds #%d\n", PFID(&op_data->op_fid1),
- PFID(&op_data->op_fid2),
+ CDEBUG(D_INODE, "LOOKUP_INTENT with fid1=" DFID ", fid2=" DFID ", name='%s' -> mds #%u lsm=%p lsm_magic=%x\n",
+ PFID(&op_data->op_fid1), PFID(&op_data->op_fid2),
op_data->op_name ? op_data->op_name : "<NULL>",
- tgt->ltd_idx);
+ tgt->ltd_idx, lsm, !lsm ? -1 : lsm->lsm_md_magic);
op_data->op_bias &= ~MDS_CROSS_REF;
- rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it,
- flags, reqp, cb_blocking, extra_lock_flags);
+ rc = md_intent_lock(tgt->ltd_exp, op_data, it, reqp, cb_blocking,
+ extra_lock_flags);
+ if (rc < 0)
+ return rc;
- if (rc < 0 || !*reqp)
+ if (!*reqp) {
+ /*
+ * If RPC happens, lsm information will be revalidated
+ * during update_inode process (see ll_update_lsm_md)
+ */
+ if (op_data->op_mea2) {
+ rc = lmv_revalidate_slaves(exp, op_data->op_mea2,
+ cb_blocking,
+ extra_lock_flags);
+ if (rc != 0)
+ return rc;
+ }
return rc;
+ } else if (it_disposition(it, DISP_LOOKUP_NEG) && lsm &&
+ lmv_need_try_all_stripes(lsm)) {
+ /*
+ * For migrating and unknown hash type directory, it will
+ * try to target the entry on other stripes
+ */
+ int stripe_index;
+
+ for (stripe_index = 1;
+ stripe_index < lsm->lsm_md_stripe_count &&
+ it_disposition(it, DISP_LOOKUP_NEG); stripe_index++) {
+ struct lmv_oinfo *oinfo;
+
+ /* release the previous request */
+ ptlrpc_req_finished(*reqp);
+ it->it_request = NULL;
+ *reqp = NULL;
+
+ oinfo = &lsm->lsm_md_oinfo[stripe_index];
+ tgt = lmv_find_target(lmv, &oinfo->lmo_fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ CDEBUG(D_INODE, "Try other stripes " DFID"\n",
+ PFID(&oinfo->lmo_fid));
+
+ op_data->op_fid1 = oinfo->lmo_fid;
+ it->it_disposition &= ~DISP_ENQ_COMPLETE;
+ rc = md_intent_lock(tgt->ltd_exp, op_data, it, reqp,
+ cb_blocking, extra_lock_flags);
+ if (rc)
+ return rc;
+ }
+ }
/*
* MDS has returned success. Probably name has been resolved in
@@ -279,19 +461,23 @@ static int lmv_intent_lookup(struct obd_export *exp,
body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
if (!body)
return -EPROTO;
- /* Not cross-ref case, just get out of here. */
- if (likely(!(body->valid & OBD_MD_MDS)))
- return 0;
- rc = lmv_intent_remote(exp, lmm, lmmsize, it, NULL, flags, reqp,
- cb_blocking, extra_lock_flags);
+ /* Not cross-ref case, just get out of here. */
+ if (unlikely((body->mbo_valid & OBD_MD_MDS))) {
+ rc = lmv_intent_remote(exp, it, NULL, reqp, cb_blocking,
+ extra_lock_flags);
+ if (rc != 0)
+ return rc;
+ body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
+ if (!body)
+ return -EPROTO;
+ }
return rc;
}
int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int flags, struct ptlrpc_request **reqp,
+ struct lookup_intent *it, struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
__u64 extra_lock_flags)
{
@@ -300,8 +486,9 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
LASSERT(fid_is_sane(&op_data->op_fid1));
- CDEBUG(D_INODE, "INTENT LOCK '%s' for '%*s' on "DFID"\n",
- LL_IT2STR(it), op_data->op_namelen, op_data->op_name,
+ CDEBUG(D_INODE, "INTENT LOCK '%s' for "DFID" '%*s' on "DFID"\n",
+ LL_IT2STR(it), PFID(&op_data->op_fid2),
+ (int)op_data->op_namelen, op_data->op_name,
PFID(&op_data->op_fid1));
rc = lmv_check_connect(obd);
@@ -309,14 +496,34 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
return rc;
if (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_LAYOUT))
- rc = lmv_intent_lookup(exp, op_data, lmm, lmmsize, it,
- flags, reqp, cb_blocking,
+ rc = lmv_intent_lookup(exp, op_data, it, reqp, cb_blocking,
extra_lock_flags);
else if (it->it_op & IT_OPEN)
- rc = lmv_intent_open(exp, op_data, lmm, lmmsize, it,
- flags, reqp, cb_blocking,
+ rc = lmv_intent_open(exp, op_data, it, reqp, cb_blocking,
extra_lock_flags);
else
LBUG();
+
+ if (rc < 0) {
+ struct lustre_handle lock_handle;
+
+ if (it->it_lock_mode) {
+ lock_handle.cookie = it->it_lock_handle;
+ ldlm_lock_decref(&lock_handle, it->it_lock_mode);
+ }
+
+ it->it_lock_handle = 0;
+ it->it_lock_mode = 0;
+
+ if (it->it_remote_lock_mode) {
+ lock_handle.cookie = it->it_remote_lock_handle;
+ ldlm_lock_decref(&lock_handle,
+ it->it_remote_lock_mode);
+ }
+
+ it->it_remote_lock_handle = 0;
+ it->it_remote_lock_mode = 0;
+ }
+
return rc;
}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index 0beafc49b8d2..52b03745ac19 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -35,6 +35,7 @@
#include "../include/lustre/lustre_idl.h"
#include "../include/obd.h"
+#include "../include/lustre_lmv.h"
#define LMV_MAX_TGT_COUNT 128
@@ -44,77 +45,116 @@
int lmv_check_connect(struct obd_device *obd);
int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int flags, struct ptlrpc_request **reqp,
+ struct lookup_intent *it, struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
__u64 extra_lock_flags);
int lmv_fld_lookup(struct lmv_obd *lmv, const struct lu_fid *fid, u32 *mds);
int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds);
-int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data);
+int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
+ struct lu_fid *fid, struct md_op_data *op_data);
-static inline struct lmv_stripe_md *lmv_get_mea(struct ptlrpc_request *req)
-{
- struct mdt_body *body;
- struct lmv_stripe_md *mea;
-
- LASSERT(req);
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-
- if (!body || !S_ISDIR(body->mode) || !body->eadatasize)
- return NULL;
+int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
+ const union lmv_mds_md *lmm, int stripe_count);
- mea = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD,
- body->eadatasize);
- if (mea->mea_count == 0)
- return NULL;
- if (mea->mea_magic != MEA_MAGIC_LAST_CHAR &&
- mea->mea_magic != MEA_MAGIC_ALL_CHARS &&
- mea->mea_magic != MEA_MAGIC_HASH_SEGMENT)
- return NULL;
+int lmv_revalidate_slaves(struct obd_export *exp,
+ const struct lmv_stripe_md *lsm,
+ ldlm_blocking_callback cb_blocking,
+ int extra_lock_flags);
- return mea;
-}
-
-static inline int lmv_get_easize(struct lmv_obd *lmv)
+static inline struct obd_device *lmv2obd_dev(struct lmv_obd *lmv)
{
- return sizeof(struct lmv_stripe_md) +
- lmv->desc.ld_tgt_count *
- sizeof(struct lu_fid);
+ return container_of0(lmv, struct obd_device, u.lmv);
}
static inline struct lmv_tgt_desc *
-lmv_get_target(struct lmv_obd *lmv, u32 mds)
+lmv_get_target(struct lmv_obd *lmv, u32 mdt_idx, int *index)
{
- int count = lmv->desc.ld_tgt_count;
int i;
- for (i = 0; i < count; i++) {
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
if (!lmv->tgts[i])
continue;
- if (lmv->tgts[i]->ltd_idx == mds)
+ if (lmv->tgts[i]->ltd_idx == mdt_idx) {
+ if (index)
+ *index = i;
return lmv->tgts[i];
+ }
}
return ERR_PTR(-ENODEV);
}
-static inline struct lmv_tgt_desc *
-lmv_find_target(struct lmv_obd *lmv, const struct lu_fid *fid)
+static inline int
+lmv_find_target_index(struct lmv_obd *lmv, const struct lu_fid *fid)
{
- u32 mds = 0;
- int rc;
+ struct lmv_tgt_desc *ltd;
+ u32 mdt_idx = 0;
+ int index = 0;
if (lmv->desc.ld_tgt_count > 1) {
- rc = lmv_fld_lookup(lmv, fid, &mds);
- if (rc)
- return ERR_PTR(rc);
+ int rc;
+
+ rc = lmv_fld_lookup(lmv, fid, &mdt_idx);
+ if (rc < 0)
+ return rc;
}
- return lmv_get_target(lmv, mds);
+ ltd = lmv_get_target(lmv, mdt_idx, &index);
+ if (IS_ERR(ltd))
+ return PTR_ERR(ltd);
+
+ return index;
+}
+
+static inline struct lmv_tgt_desc *
+lmv_find_target(struct lmv_obd *lmv, const struct lu_fid *fid)
+{
+ int index;
+
+ index = lmv_find_target_index(lmv, fid);
+ if (index < 0)
+ return ERR_PTR(index);
+
+ return lmv->tgts[index];
+}
+
+static inline int lmv_stripe_md_size(int stripe_count)
+{
+ struct lmv_stripe_md *lsm;
+
+ return sizeof(*lsm) + stripe_count * sizeof(lsm->lsm_md_oinfo[0]);
+}
+
+int lmv_name_to_stripe_index(enum lmv_hash_type hashtype,
+ unsigned int max_mdt_index,
+ const char *name, int namelen);
+
+static inline const struct lmv_oinfo *
+lsm_name_to_stripe_info(const struct lmv_stripe_md *lsm, const char *name,
+ int namelen)
+{
+ int stripe_index;
+
+ stripe_index = lmv_name_to_stripe_index(lsm->lsm_md_hash_type,
+ lsm->lsm_md_stripe_count,
+ name, namelen);
+ if (stripe_index < 0)
+ return ERR_PTR(stripe_index);
+
+ LASSERTF(stripe_index < lsm->lsm_md_stripe_count,
+ "stripe_index = %d, stripe_count = %d hash_type = %x name = %.*s\n",
+ stripe_index, lsm->lsm_md_stripe_count,
+ lsm->lsm_md_hash_type, namelen, name);
+
+ return &lsm->lsm_md_oinfo[stripe_index];
+}
+
+static inline bool lmv_need_try_all_stripes(const struct lmv_stripe_md *lsm)
+{
+ return !lmv_is_known_hash_type(lsm->lsm_md_hash_type) ||
+ lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION;
}
struct lmv_tgt_desc
@@ -123,6 +163,6 @@ struct lmv_tgt_desc
/* lproc_lmv.c */
void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars);
-extern struct file_operations lmv_proc_target_fops;
+extern const struct file_operations lmv_proc_target_fops;
#endif
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 0e1588a43187..7dbb2b946acf 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -43,12 +43,13 @@
#include "../include/lustre/lustre_idl.h"
#include "../include/obd_support.h"
-#include "../include/lustre_lib.h"
#include "../include/lustre_net.h"
#include "../include/obd_class.h"
+#include "../include/lustre_lmv.h"
#include "../include/lprocfs_status.h"
-#include "../include/lustre_lite.h"
+#include "../include/cl_object.h"
#include "../include/lustre_fid.h"
+#include "../include/lustre/lustre_ioctl.h"
#include "../include/lustre_kernelcomm.h"
#include "lmv_internal.h"
@@ -70,12 +71,12 @@ static void lmv_activate_target(struct lmv_obd *lmv,
* -ENOTCONN: The UUID is found, but the target connection is bad (!)
* -EBADF : The UUID is found, but the OBD of the wrong type (!)
*/
-static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
+static int lmv_set_mdc_active(struct lmv_obd *lmv, const struct obd_uuid *uuid,
int activate)
{
struct lmv_tgt_desc *uninitialized_var(tgt);
struct obd_device *obd;
- int i;
+ u32 i;
int rc = 0;
CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
@@ -244,36 +245,12 @@ static int lmv_connect(const struct lu_env *env,
return rc;
}
-static void lmv_set_timeouts(struct obd_device *obd)
-{
- struct lmv_obd *lmv;
- int i;
-
- lmv = &obd->u.lmv;
- if (lmv->server_timeout == 0)
- return;
-
- if (lmv->connected == 0)
- return;
-
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- struct lmv_tgt_desc *tgt = lmv->tgts[i];
-
- tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
- continue;
-
- obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS),
- KEY_INTERMDS, 0, NULL, NULL);
- }
-}
-
-static int lmv_init_ea_size(struct obd_export *exp, int easize,
- int def_easize, int cookiesize, int def_cookiesize)
+static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
+ u32 cookiesize, u32 def_cookiesize)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- int i;
+ u32 i;
int rc = 0;
int change = 0;
@@ -420,6 +397,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
{
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
+ int orig_tgt_count = 0;
int rc = 0;
CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
@@ -489,14 +467,17 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
tgt->ltd_uuid = *uuidp;
tgt->ltd_active = 0;
lmv->tgts[index] = tgt;
- if (index >= lmv->desc.ld_tgt_count)
+ if (index >= lmv->desc.ld_tgt_count) {
+ orig_tgt_count = lmv->desc.ld_tgt_count;
lmv->desc.ld_tgt_count = index + 1;
+ }
if (lmv->connected) {
rc = lmv_connect_mdc(obd, tgt);
if (rc) {
spin_lock(&lmv->lmv_lock);
- lmv->desc.ld_tgt_count--;
+ if (lmv->desc.ld_tgt_count == index + 1)
+ lmv->desc.ld_tgt_count = orig_tgt_count;
memset(tgt, 0, sizeof(*tgt));
spin_unlock(&lmv->lmv_lock);
} else {
@@ -514,7 +495,7 @@ int lmv_check_connect(struct obd_device *obd)
{
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
- int i;
+ u32 i;
int rc;
int easize;
@@ -554,10 +535,9 @@ int lmv_check_connect(struct obd_device *obd)
goto out_disc;
}
- lmv_set_timeouts(obd);
class_export_put(lmv->exp);
lmv->connected = 1;
- easize = lmv_get_easize(lmv);
+ easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC);
lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
mutex_unlock(&lmv->lmv_init_mutex);
return 0;
@@ -629,7 +609,7 @@ static int lmv_disconnect(struct obd_export *exp)
struct obd_device *obd = class_exp2obd(exp);
struct lmv_obd *lmv = &obd->u.lmv;
int rc;
- int i;
+ u32 i;
if (!lmv->tgts)
goto out_local;
@@ -758,7 +738,7 @@ static int lmv_hsm_req_count(struct lmv_obd *lmv,
const struct hsm_user_request *hur,
const struct lmv_tgt_desc *tgt_mds)
{
- int i, nr = 0;
+ u32 i, nr = 0;
struct lmv_tgt_desc *curr_tgt;
/* count how many requests must be sent to the given target */
@@ -885,10 +865,8 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group,
&kcd, sizeof(kcd));
- if (rc) {
- if (filp)
- fput(filp);
- }
+ if (rc)
+ fput(filp);
return rc;
}
@@ -899,10 +877,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
struct obd_device *obddev = class_exp2obd(exp);
struct lmv_obd *lmv = &obddev->u.lmv;
struct lmv_tgt_desc *tgt = NULL;
- int i = 0;
+ u32 i = 0;
int rc = 0;
int set = 0;
- int count = lmv->desc.ld_tgt_count;
+ u32 count = lmv->desc.ld_tgt_count;
if (count == 0)
return -ENOTTY;
@@ -1011,6 +989,21 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
break;
}
+ case LL_IOC_FID2MDTIDX: {
+ struct lu_fid *fid = karg;
+ int mdt_index;
+
+ rc = lmv_fld_lookup(lmv, fid, &mdt_index);
+ if (rc)
+ return rc;
+
+ /*
+ * Note: this is from llite(see ll_dir_ioctl()), @uarg does not
+ * point to user space memory for FID2MDTIDX.
+ */
+ *(__u32 *)uarg = mdt_index;
+ break;
+ }
case OBD_IOC_FID2PATH: {
rc = lmv_fid2path(exp, len, karg, uarg);
break;
@@ -1169,32 +1162,37 @@ static int lmv_placement_policy(struct obd_device *obd,
return 0;
}
+ if (op_data->op_default_stripe_offset != -1) {
+ *mds = op_data->op_default_stripe_offset;
+ return 0;
+ }
+
/**
* If stripe_offset is provided during setdirstripe
* (setdirstripe -i xx), xx MDS will be chosen.
*/
- if (op_data->op_cli_flags & CLI_SET_MEA) {
+ if (op_data->op_cli_flags & CLI_SET_MEA && op_data->op_data) {
struct lmv_user_md *lum;
- lum = (struct lmv_user_md *)op_data->op_data;
- if (lum->lum_type == LMV_STRIPE_TYPE &&
- lum->lum_stripe_offset != -1) {
- if (lum->lum_stripe_offset >= lmv->desc.ld_tgt_count) {
- CERROR("%s: Stripe_offset %d > MDT count %d: rc = %d\n",
- obd->obd_name,
- lum->lum_stripe_offset,
- lmv->desc.ld_tgt_count, -ERANGE);
- return -ERANGE;
- }
- *mds = lum->lum_stripe_offset;
- return 0;
+ lum = op_data->op_data;
+ if (le32_to_cpu(lum->lum_stripe_offset) != (__u32)-1) {
+ *mds = le32_to_cpu(lum->lum_stripe_offset);
+ } else {
+ /*
+ * -1 means default, which will be in the same MDT with
+ * the stripe
+ */
+ *mds = op_data->op_mds;
+ lum->lum_stripe_offset = cpu_to_le32(op_data->op_mds);
}
+ } else {
+ /*
+ * Allocate new fid on target according to operation type and
+ * parent home mds.
+ */
+ *mds = op_data->op_mds;
}
- /* Allocate new fid on target according to operation type and parent
- * home mds.
- */
- *mds = op_data->op_mds;
return 0;
}
@@ -1203,7 +1201,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds)
struct lmv_tgt_desc *tgt;
int rc;
- tgt = lmv_get_target(lmv, mds);
+ tgt = lmv_get_target(lmv, mds, NULL);
if (IS_ERR(tgt))
return PTR_ERR(tgt);
@@ -1221,7 +1219,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds)
/*
* Asking underlaying tgt layer to allocate new fid.
*/
- rc = obd_fid_alloc(tgt->ltd_exp, fid, NULL);
+ rc = obd_fid_alloc(NULL, tgt->ltd_exp, fid, NULL);
if (rc > 0) {
LASSERT(fid_is_sane(fid));
rc = 0;
@@ -1232,8 +1230,8 @@ out:
return rc;
}
-int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data)
+int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
+ struct lu_fid *fid, struct md_op_data *op_data)
{
struct obd_device *obd = class_exp2obd(exp);
struct lmv_obd *lmv = &obd->u.lmv;
@@ -1278,10 +1276,10 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
return -EINVAL;
}
- lmv->tgts = kcalloc(32, sizeof(*lmv->tgts), GFP_NOFS);
+ lmv->tgts_size = 32U;
+ lmv->tgts = kcalloc(lmv->tgts_size, sizeof(*lmv->tgts), GFP_NOFS);
if (!lmv->tgts)
return -ENOMEM;
- lmv->tgts_size = 32;
obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
lmv->desc.ld_tgt_count = 0;
@@ -1354,7 +1352,7 @@ static int lmv_process_config(struct obd_device *obd, u32 len, void *buf)
obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1));
- if (sscanf(lustre_cfg_buf(lcfg, 2), "%d", &index) != 1) {
+ if (sscanf(lustre_cfg_buf(lcfg, 2), "%u", &index) != 1) {
rc = -EINVAL;
goto out;
}
@@ -1380,7 +1378,7 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
struct lmv_obd *lmv = &obd->u.lmv;
struct obd_statfs *temp;
int rc = 0;
- int i;
+ u32 i;
rc = lmv_check_connect(obd);
if (rc)
@@ -1522,7 +1520,7 @@ static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- int i;
+ u32 i;
int rc;
rc = lmv_check_connect(obd);
@@ -1545,36 +1543,6 @@ static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
return 0;
}
-static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_iterator_t it, void *data)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- int i;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
-
- /*
- * With DNE every object can have two locks in different namespaces:
- * lookup lock in space of MDT storing direntry and update/open lock in
- * space of MDT storing inode.
- */
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
- continue;
- rc = md_find_cbdata(lmv->tgts[i]->ltd_exp, fid, it, data);
- if (rc)
- return rc;
- }
-
- return rc;
-}
-
static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
struct md_open_data *mod, struct ptlrpc_request **request)
{
@@ -1596,25 +1564,116 @@ static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
-struct lmv_tgt_desc
-*lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
- struct lu_fid *fid)
+/**
+ * Choosing the MDT by name or FID in @op_data.
+ * For non-striped directory, it will locate MDT by fid.
+ * For striped-directory, it will locate MDT by name. And also
+ * it will reset op_fid1 with the FID of the chosen stripe.
+ **/
+static struct lmv_tgt_desc *
+lmv_locate_target_for_name(struct lmv_obd *lmv, struct lmv_stripe_md *lsm,
+ const char *name, int namelen, struct lu_fid *fid,
+ u32 *mds)
+{
+ const struct lmv_oinfo *oinfo;
+ struct lmv_tgt_desc *tgt;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_NAME_HASH)) {
+ if (cfs_fail_val >= lsm->lsm_md_stripe_count)
+ return ERR_PTR(-EBADF);
+ oinfo = &lsm->lsm_md_oinfo[cfs_fail_val];
+ } else {
+ oinfo = lsm_name_to_stripe_info(lsm, name, namelen);
+ if (IS_ERR(oinfo))
+ return ERR_CAST(oinfo);
+ }
+
+ if (fid)
+ *fid = oinfo->lmo_fid;
+ if (mds)
+ *mds = oinfo->lmo_mds;
+
+ tgt = lmv_get_target(lmv, oinfo->lmo_mds, NULL);
+
+ CDEBUG(D_INFO, "locate on mds %u " DFID "\n", oinfo->lmo_mds,
+ PFID(&oinfo->lmo_fid));
+ return tgt;
+}
+
+/**
+ * Locate mds by fid or name
+ *
+ * For striped directory (lsm != NULL), it will locate the stripe
+ * by name hash (see lsm_name_to_stripe_info()). Note: if the hash_type
+ * is unknown, it will return -EBADFD, and lmv_intent_lookup might need
+ * walk through all of stripes to locate the entry.
+ *
+ * For normal direcotry, it will locate MDS by FID directly.
+ * \param[in] lmv LMV device
+ * \param[in] op_data client MD stack parameters, name, namelen
+ * mds_num etc.
+ * \param[in] fid object FID used to locate MDS.
+ *
+ * retval pointer to the lmv_tgt_desc if succeed.
+ * ERR_PTR(errno) if failed.
+ */
+struct lmv_tgt_desc*
+lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
+ struct lu_fid *fid)
{
+ struct lmv_stripe_md *lsm = op_data->op_mea1;
struct lmv_tgt_desc *tgt;
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
+ /*
+ * During creating VOLATILE file, it should honor the mdt
+ * index if the file under striped dir is being restored, see
+ * ct_restore().
+ */
+ if (op_data->op_bias & MDS_CREATE_VOLATILE &&
+ (int)op_data->op_mds != -1 && lsm) {
+ int i;
+
+ tgt = lmv_get_target(lmv, op_data->op_mds, NULL);
+ if (IS_ERR(tgt))
+ return tgt;
+
+ /* refill the right parent fid */
+ for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
+ struct lmv_oinfo *oinfo;
+
+ oinfo = &lsm->lsm_md_oinfo[i];
+ if (oinfo->lmo_mds == op_data->op_mds) {
+ *fid = oinfo->lmo_fid;
+ break;
+ }
+ }
+
+ /* Hmm, can not find the stripe by mdt_index(op_mds) */
+ if (i == lsm->lsm_md_stripe_count)
+ tgt = ERR_PTR(-EINVAL);
+
return tgt;
+ }
- op_data->op_mds = tgt->ltd_idx;
+ if (!lsm || !op_data->op_namelen) {
+ tgt = lmv_find_target(lmv, fid);
+ if (IS_ERR(tgt))
+ return tgt;
- return tgt;
+ op_data->op_mds = tgt->ltd_idx;
+
+ return tgt;
+ }
+
+ return lmv_locate_target_for_name(lmv, lsm, op_data->op_name,
+ op_data->op_namelen, fid,
+ &op_data->op_mds);
}
static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, int datalen, int mode, __u32 uid,
- __u32 gid, cfs_cap_t cap_effective, __u64 rdev,
- struct ptlrpc_request **request)
+ const void *data, size_t datalen, umode_t mode,
+ uid_t uid, gid_t gid, cfs_cap_t cap_effective,
+ __u64 rdev, struct ptlrpc_request **request)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -1632,13 +1691,30 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
if (IS_ERR(tgt))
return PTR_ERR(tgt);
- rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
+ CDEBUG(D_INODE, "CREATE name '%.*s' on "DFID" -> mds #%x\n",
+ (int)op_data->op_namelen, op_data->op_name,
+ PFID(&op_data->op_fid1), op_data->op_mds);
+
+ rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc)
return rc;
- CDEBUG(D_INODE, "CREATE '%*s' on "DFID" -> mds #%x\n",
- op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
- op_data->op_mds);
+ if (exp_connect_flags(exp) & OBD_CONNECT_DIR_STRIPE) {
+ /*
+ * Send the create request to the MDT where the object
+ * will be located
+ */
+ tgt = lmv_find_target(lmv, &op_data->op_fid2);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ op_data->op_mds = tgt->ltd_idx;
+ } else {
+ CDEBUG(D_CONFIG, "Server doesn't support striped dirs\n");
+ }
+
+ CDEBUG(D_INODE, "CREATE obj "DFID" -> mds #%x\n",
+ PFID(&op_data->op_fid1), op_data->op_mds);
op_data->op_flags |= MF_MDC_CANCEL_FID1;
rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid,
@@ -1674,70 +1750,10 @@ static int lmv_done_writing(struct obd_export *exp,
}
static int
-lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
- struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, void *lmm, int lmmsize,
- __u64 extra_lock_flags)
-{
- struct ptlrpc_request *req = it->it_request;
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lustre_handle plock;
- struct lmv_tgt_desc *tgt;
- struct md_op_data *rdata;
- struct lu_fid fid1;
- struct mdt_body *body;
- int rc = 0;
- int pmode;
-
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-
- if (!(body->valid & OBD_MD_MDS))
- return 0;
-
- CDEBUG(D_INODE, "REMOTE_ENQUEUE '%s' on "DFID" -> "DFID"\n",
- LL_IT2STR(it), PFID(&op_data->op_fid1), PFID(&body->fid1));
-
- /*
- * We got LOOKUP lock, but we really need attrs.
- */
- pmode = it->it_lock_mode;
- LASSERT(pmode != 0);
- memcpy(&plock, lockh, sizeof(plock));
- it->it_lock_mode = 0;
- it->it_request = NULL;
- fid1 = body->fid1;
-
- ptlrpc_req_finished(req);
-
- tgt = lmv_find_target(lmv, &fid1);
- if (IS_ERR(tgt)) {
- rc = PTR_ERR(tgt);
- goto out;
- }
-
- rdata = kzalloc(sizeof(*rdata), GFP_NOFS);
- if (!rdata) {
- rc = -ENOMEM;
- goto out;
- }
-
- rdata->op_fid1 = fid1;
- rdata->op_bias = MDS_CROSS_REF;
-
- rc = md_enqueue(tgt->ltd_exp, einfo, it, rdata, lockh,
- lmm, lmmsize, NULL, extra_lock_flags);
- kfree(rdata);
-out:
- ldlm_lock_decref(&plock, pmode);
- return rc;
-}
-
-static int
lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
+ const ldlm_policy_data_t *policy,
struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, void *lmm, int lmmsize,
- struct ptlrpc_request **req, __u64 extra_lock_flags)
+ struct lustre_handle *lockh, __u64 extra_lock_flags)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -1755,22 +1771,18 @@ lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
if (IS_ERR(tgt))
return PTR_ERR(tgt);
- CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID" -> mds #%d\n",
+ CDEBUG(D_INODE, "ENQUEUE '%s' on " DFID " -> mds #%u\n",
LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx);
- rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data, lockh,
- lmm, lmmsize, req, extra_lock_flags);
+ rc = md_enqueue(tgt->ltd_exp, einfo, policy, it, op_data, lockh,
+ extra_lock_flags);
- if (rc == 0 && it && it->it_op == IT_OPEN) {
- rc = lmv_enqueue_remote(exp, einfo, it, op_data, lockh,
- lmm, lmmsize, extra_lock_flags);
- }
return rc;
}
static int
lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
+ struct ptlrpc_request **preq)
{
struct ptlrpc_request *req = NULL;
struct obd_device *obd = exp->exp_obd;
@@ -1787,26 +1799,25 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
if (IS_ERR(tgt))
return PTR_ERR(tgt);
- CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" -> mds #%d\n",
- op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
- tgt->ltd_idx);
+ CDEBUG(D_INODE, "GETATTR_NAME for %*s on " DFID " -> mds #%u\n",
+ (int)op_data->op_namelen, op_data->op_name,
+ PFID(&op_data->op_fid1), tgt->ltd_idx);
- rc = md_getattr_name(tgt->ltd_exp, op_data, request);
+ rc = md_getattr_name(tgt->ltd_exp, op_data, preq);
if (rc != 0)
return rc;
- body = req_capsule_server_get(&(*request)->rq_pill,
- &RMF_MDT_BODY);
-
- if (body->valid & OBD_MD_MDS) {
- struct lu_fid rid = body->fid1;
+ body = req_capsule_server_get(&(*preq)->rq_pill, &RMF_MDT_BODY);
+ if (body->mbo_valid & OBD_MD_MDS) {
+ struct lu_fid rid = body->mbo_fid1;
CDEBUG(D_INODE, "Request attrs for "DFID"\n",
PFID(&rid));
tgt = lmv_find_target(lmv, &rid);
if (IS_ERR(tgt)) {
- ptlrpc_req_finished(*request);
+ ptlrpc_req_finished(*preq);
+ *preq = NULL;
return PTR_ERR(tgt);
}
@@ -1815,8 +1826,8 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
op_data->op_namelen = 0;
op_data->op_name = NULL;
rc = md_getattr_name(tgt->ltd_exp, op_data, &req);
- ptlrpc_req_finished(*request);
- *request = req;
+ ptlrpc_req_finished(*preq);
+ *preq = req;
}
return rc;
@@ -1829,23 +1840,24 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \
NULL)
-static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data,
- int op_tgt, enum ldlm_mode mode, int bits,
- int flag)
+static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt,
+ struct md_op_data *op_data, int op_tgt,
+ enum ldlm_mode mode, int bits, int flag)
{
struct lu_fid *fid = md_op_data_fid(op_data, flag);
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
ldlm_policy_data_t policy = { {0} };
int rc = 0;
if (!fid_is_sane(fid))
return 0;
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
+ if (!tgt) {
+ tgt = lmv_find_target(lmv, fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+ }
if (tgt->ltd_idx != op_tgt) {
CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
@@ -1882,12 +1894,24 @@ static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
LASSERT(op_data->op_namelen != 0);
CDEBUG(D_INODE, "LINK "DFID":%*s to "DFID"\n",
- PFID(&op_data->op_fid2), op_data->op_namelen,
+ PFID(&op_data->op_fid2), (int)op_data->op_namelen,
op_data->op_name, PFID(&op_data->op_fid1));
op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
+ if (op_data->op_mea2) {
+ struct lmv_stripe_md *lsm = op_data->op_mea2;
+ const struct lmv_oinfo *oinfo;
+
+ oinfo = lsm_name_to_stripe_info(lsm, op_data->op_name,
+ op_data->op_namelen);
+ if (IS_ERR(oinfo))
+ return PTR_ERR(oinfo);
+
+ op_data->op_fid2 = oinfo->lmo_fid;
+ }
+
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
if (IS_ERR(tgt))
return PTR_ERR(tgt);
@@ -1896,7 +1920,7 @@ static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
* Cancel UPDATE lock on child (fid1).
*/
op_data->op_flags |= MF_MDC_CANCEL_FID2;
- rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
+ rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX,
MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
if (rc != 0)
return rc;
@@ -1907,20 +1931,22 @@ static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
}
static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new, int newlen,
+ const char *old, size_t oldlen,
+ const char *new, size_t newlen,
struct ptlrpc_request **request)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *src_tgt;
- struct lmv_tgt_desc *tgt_tgt;
int rc;
LASSERT(oldlen != 0);
- CDEBUG(D_INODE, "RENAME %*s in "DFID" to %*s in "DFID"\n",
- oldlen, old, PFID(&op_data->op_fid1),
- newlen, new, PFID(&op_data->op_fid2));
+ CDEBUG(D_INODE, "RENAME %.*s in "DFID":%d to %.*s in "DFID":%d\n",
+ (int)oldlen, old, PFID(&op_data->op_fid1),
+ op_data->op_mea1 ? op_data->op_mea1->lsm_md_stripe_count : 0,
+ (int)newlen, new, PFID(&op_data->op_fid2),
+ op_data->op_mea2 ? op_data->op_mea2->lsm_md_stripe_count : 0);
rc = lmv_check_connect(obd);
if (rc)
@@ -1929,13 +1955,60 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
- src_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
+
+ if (op_data->op_cli_flags & CLI_MIGRATE) {
+ LASSERTF(fid_is_sane(&op_data->op_fid3), "invalid FID "DFID"\n",
+ PFID(&op_data->op_fid3));
+
+ if (op_data->op_mea1) {
+ struct lmv_stripe_md *lsm = op_data->op_mea1;
+ struct lmv_tgt_desc *tmp;
+
+ /* Fix the parent fid for striped dir */
+ tmp = lmv_locate_target_for_name(lmv, lsm, old,
+ oldlen,
+ &op_data->op_fid1,
+ NULL);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ }
+
+ rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
+ if (rc)
+ return rc;
+ src_tgt = lmv_find_target(lmv, &op_data->op_fid3);
+ } else {
+ if (op_data->op_mea1) {
+ struct lmv_stripe_md *lsm = op_data->op_mea1;
+
+ src_tgt = lmv_locate_target_for_name(lmv, lsm, old,
+ oldlen,
+ &op_data->op_fid1,
+ &op_data->op_mds);
+ if (IS_ERR(src_tgt))
+ return PTR_ERR(src_tgt);
+ } else {
+ src_tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(src_tgt))
+ return PTR_ERR(src_tgt);
+
+ op_data->op_mds = src_tgt->ltd_idx;
+ }
+
+ if (op_data->op_mea2) {
+ struct lmv_stripe_md *lsm = op_data->op_mea2;
+ const struct lmv_oinfo *oinfo;
+
+ oinfo = lsm_name_to_stripe_info(lsm, new, newlen);
+ if (IS_ERR(oinfo))
+ return PTR_ERR(oinfo);
+
+ op_data->op_fid2 = oinfo->lmo_fid;
+ }
+ }
if (IS_ERR(src_tgt))
return PTR_ERR(src_tgt);
- tgt_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
- if (IS_ERR(tgt_tgt))
- return PTR_ERR(tgt_tgt);
/*
* LOOKUP lock on src child (fid3) should also be cancelled for
* src_tgt in mdc_rename.
@@ -1946,35 +2019,53 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
* Cancel UPDATE locks on tgt parent (fid2), tgt_tgt is its
* own target.
*/
- rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
+ rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
LCK_EX, MDS_INODELOCK_UPDATE,
MF_MDC_CANCEL_FID2);
-
+ if (rc)
+ return rc;
/*
- * Cancel LOOKUP locks on tgt child (fid4) for parent tgt_tgt.
+ * Cancel LOOKUP locks on source child (fid3) for parent tgt_tgt.
*/
- if (rc == 0) {
- rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
+ if (fid_is_sane(&op_data->op_fid3)) {
+ struct lmv_tgt_desc *tgt;
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ /* Cancel LOOKUP lock on its parent */
+ rc = lmv_early_cancel(exp, tgt, op_data, src_tgt->ltd_idx,
LCK_EX, MDS_INODELOCK_LOOKUP,
- MF_MDC_CANCEL_FID4);
+ MF_MDC_CANCEL_FID3);
+ if (rc)
+ return rc;
+
+ rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
+ LCK_EX, MDS_INODELOCK_FULL,
+ MF_MDC_CANCEL_FID3);
+ if (rc)
+ return rc;
}
/*
* Cancel all the locks on tgt child (fid4).
*/
- if (rc == 0)
- rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
+ if (fid_is_sane(&op_data->op_fid4))
+ rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
LCK_EX, MDS_INODELOCK_FULL,
MF_MDC_CANCEL_FID4);
- if (rc == 0)
- rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen,
- new, newlen, request);
+ CDEBUG(D_INODE, DFID":m%d to "DFID"\n", PFID(&op_data->op_fid1),
+ op_data->op_mds, PFID(&op_data->op_fid2));
+
+ rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen,
+ new, newlen, request);
return rc;
}
static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len,
+ void *ea, size_t ealen, void *ea2, size_t ea2len,
struct ptlrpc_request **request,
struct md_open_data **mod)
{
@@ -2021,169 +2112,419 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
return rc;
}
-/*
- * Adjust a set of pages, each page containing an array of lu_dirpages,
- * so that each page can be used as a single logical lu_dirpage.
+/**
+ * Get current minimum entry from striped directory
*
- * A lu_dirpage is laid out as follows, where s = ldp_hash_start,
- * e = ldp_hash_end, f = ldp_flags, p = padding, and each "ent" is a
- * struct lu_dirent. It has size up to LU_PAGE_SIZE. The ldp_hash_end
- * value is used as a cookie to request the next lu_dirpage in a
- * directory listing that spans multiple pages (two in this example):
- * ________
- * | |
- * .|--------v------- -----.
- * |s|e|f|p|ent|ent| ... |ent|
- * '--|-------------- -----' Each CFS_PAGE contains a single
- * '------. lu_dirpage.
- * .---------v------- -----.
- * |s|e|f|p|ent| 0 | ... | 0 |
- * '----------------- -----'
+ * This function will search the dir entry, whose hash value is the
+ * closest(>=) to @hash_offset, from all of sub-stripes, and it is
+ * only being called for striped directory.
*
- * However, on hosts where the native VM page size (PAGE_SIZE) is
- * larger than LU_PAGE_SIZE, a single host page may contain multiple
- * lu_dirpages. After reading the lu_dirpages from the MDS, the
- * ldp_hash_end of the first lu_dirpage refers to the one immediately
- * after it in the same CFS_PAGE (arrows simplified for brevity, but
- * in general e0==s1, e1==s2, etc.):
+ * \param[in] exp export of LMV
+ * \param[in] op_data parameters transferred beween client MD stack
+ * stripe_information will be included in this
+ * parameter
+ * \param[in] cb_op ldlm callback being used in enqueue in
+ * mdc_read_page
+ * \param[in] hash_offset the hash value, which is used to locate
+ * minum(closet) dir entry
+ * \param[in|out] stripe_offset the caller use this to indicate the stripe
+ * index of last entry, so to avoid hash conflict
+ * between stripes. It will also be used to
+ * return the stripe index of current dir entry.
+ * \param[in|out] entp the minum entry and it also is being used
+ * to input the last dir entry to resolve the
+ * hash conflict
*
- * .-------------------- -----.
- * |s0|e0|f0|p|ent|ent| ... |ent|
- * |---v---------------- -----|
- * |s1|e1|f1|p|ent|ent| ... |ent|
- * |---v---------------- -----| Here, each CFS_PAGE contains
- * ... multiple lu_dirpages.
- * |---v---------------- -----|
- * |s'|e'|f'|p|ent|ent| ... |ent|
- * '---|---------------- -----'
- * v
- * .----------------------------.
- * | next CFS_PAGE |
+ * \param[out] ppage the page which holds the minum entry
*
- * This structure is transformed into a single logical lu_dirpage as follows:
+ * \retval = 0 get the entry successfully
+ * negative errno (< 0) does not get the entry
+ */
+static int lmv_get_min_striped_entry(struct obd_export *exp,
+ struct md_op_data *op_data,
+ struct md_callback *cb_op,
+ __u64 hash_offset, int *stripe_offset,
+ struct lu_dirent **entp,
+ struct page **ppage)
+{
+ struct lmv_stripe_md *lsm = op_data->op_mea1;
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lu_dirent *min_ent = NULL;
+ struct page *min_page = NULL;
+ struct lmv_tgt_desc *tgt;
+ int stripe_count;
+ int min_idx = 0;
+ int rc = 0;
+ int i;
+
+ stripe_count = lsm->lsm_md_stripe_count;
+ for (i = 0; i < stripe_count; i++) {
+ __u64 stripe_hash = hash_offset;
+ struct lu_dirent *ent = NULL;
+ struct page *page = NULL;
+ struct lu_dirpage *dp;
+
+ tgt = lmv_get_target(lmv, lsm->lsm_md_oinfo[i].lmo_mds, NULL);
+ if (IS_ERR(tgt)) {
+ rc = PTR_ERR(tgt);
+ goto out;
+ }
+
+ /*
+ * op_data will be shared by each stripe, so we need
+ * reset these value for each stripe
+ */
+ op_data->op_fid1 = lsm->lsm_md_oinfo[i].lmo_fid;
+ op_data->op_fid2 = lsm->lsm_md_oinfo[i].lmo_fid;
+ op_data->op_data = lsm->lsm_md_oinfo[i].lmo_root;
+next:
+ rc = md_read_page(tgt->ltd_exp, op_data, cb_op, stripe_hash,
+ &page);
+ if (rc)
+ goto out;
+
+ dp = page_address(page);
+ for (ent = lu_dirent_start(dp); ent;
+ ent = lu_dirent_next(ent)) {
+ /* Skip dummy entry */
+ if (!le16_to_cpu(ent->lde_namelen))
+ continue;
+
+ if (le64_to_cpu(ent->lde_hash) < hash_offset)
+ continue;
+
+ if (le64_to_cpu(ent->lde_hash) == hash_offset &&
+ (*entp == ent || i < *stripe_offset))
+ continue;
+
+ /* skip . and .. for other stripes */
+ if (i && (!strncmp(ent->lde_name, ".",
+ le16_to_cpu(ent->lde_namelen)) ||
+ !strncmp(ent->lde_name, "..",
+ le16_to_cpu(ent->lde_namelen))))
+ continue;
+ break;
+ }
+
+ if (!ent) {
+ stripe_hash = le64_to_cpu(dp->ldp_hash_end);
+
+ kunmap(page);
+ put_page(page);
+ page = NULL;
+
+ /*
+ * reach the end of current stripe, go to next stripe
+ */
+ if (stripe_hash == MDS_DIR_END_OFF)
+ continue;
+ else
+ goto next;
+ }
+
+ if (min_ent) {
+ if (le64_to_cpu(min_ent->lde_hash) >
+ le64_to_cpu(ent->lde_hash)) {
+ min_ent = ent;
+ kunmap(min_page);
+ put_page(min_page);
+ min_idx = i;
+ min_page = page;
+ } else {
+ kunmap(page);
+ put_page(page);
+ page = NULL;
+ }
+ } else {
+ min_ent = ent;
+ min_page = page;
+ min_idx = i;
+ }
+ }
+
+out:
+ if (*ppage) {
+ kunmap(*ppage);
+ put_page(*ppage);
+ }
+ *stripe_offset = min_idx;
+ *entp = min_ent;
+ *ppage = min_page;
+ return rc;
+}
+
+/**
+ * Build dir entry page from a striped directory
*
- * - Replace e0 with e' so the request for the next lu_dirpage gets the page
- * labeled 'next CFS_PAGE'.
+ * This function gets one entry by @offset from a striped directory. It will
+ * read entries from all of stripes, and choose one closest to the required
+ * offset(&offset). A few notes
+ * 1. skip . and .. for non-zero stripes, because there can only have one .
+ * and .. in a directory.
+ * 2. op_data will be shared by all of stripes, instead of allocating new
+ * one, so need to restore before reusing.
+ * 3. release the entry page if that is not being chosen.
*
- * - Copy the LDF_COLLIDE flag from f' to f0 to correctly reflect whether
- * a hash collision with the next page exists.
+ * \param[in] exp obd export refer to LMV
+ * \param[in] op_data hold those MD parameters of read_entry
+ * \param[in] cb_op ldlm callback being used in enqueue in mdc_read_entry
+ * \param[out] ldp the entry being read
+ * \param[out] ppage the page holding the entry. Note: because the entry
+ * will be accessed in upper layer, so we need hold the
+ * page until the usages of entry is finished, see
+ * ll_dir_entry_next.
*
- * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
- * to the first entry of the next lu_dirpage.
+ * retval =0 if get entry successfully
+ * <0 cannot get entry
*/
-#if PAGE_SIZE > LU_PAGE_SIZE
-static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
-{
- int i;
+static int lmv_read_striped_page(struct obd_export *exp,
+ struct md_op_data *op_data,
+ struct md_callback *cb_op,
+ __u64 offset, struct page **ppage)
+{
+ struct inode *master_inode = op_data->op_data;
+ struct lu_fid master_fid = op_data->op_fid1;
+ struct obd_device *obd = exp->exp_obd;
+ __u64 hash_offset = offset;
+ struct page *min_ent_page = NULL;
+ struct page *ent_page = NULL;
+ struct lu_dirent *min_ent = NULL;
+ struct lu_dirent *last_ent;
+ struct lu_dirent *ent;
+ struct lu_dirpage *dp;
+ size_t left_bytes;
+ int ent_idx = 0;
+ void *area;
+ int rc;
- for (i = 0; i < ncfspgs; i++) {
- struct lu_dirpage *dp = kmap(pages[i]);
- struct lu_dirpage *first = dp;
- struct lu_dirent *end_dirent = NULL;
- struct lu_dirent *ent;
- __u64 hash_end = dp->ldp_hash_end;
- __u32 flags = dp->ldp_flags;
-
- while (--nlupgs > 0) {
- ent = lu_dirent_start(dp);
- for (end_dirent = ent; ent;
- end_dirent = ent, ent = lu_dirent_next(ent))
- ;
-
- /* Advance dp to next lu_dirpage. */
- dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
-
- /* Check if we've reached the end of the CFS_PAGE. */
- if (!((unsigned long)dp & ~PAGE_MASK))
- break;
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
- /* Save the hash and flags of this lu_dirpage. */
- hash_end = dp->ldp_hash_end;
- flags = dp->ldp_flags;
+ /*
+ * Allocate a page and read entries from all of stripes and fill
+ * the page by hash order
+ */
+ ent_page = alloc_page(GFP_KERNEL);
+ if (!ent_page)
+ return -ENOMEM;
- /* Check if lu_dirpage contains no entries. */
- if (!end_dirent)
- break;
+ /* Initialize the entry page */
+ dp = kmap(ent_page);
+ memset(dp, 0, sizeof(*dp));
+ dp->ldp_hash_start = cpu_to_le64(offset);
+ dp->ldp_flags |= LDF_COLLIDE;
+
+ area = dp + 1;
+ left_bytes = PAGE_SIZE - sizeof(*dp);
+ ent = area;
+ last_ent = ent;
+ do {
+ __u16 ent_size;
+
+ /* Find the minum entry from all sub-stripes */
+ rc = lmv_get_min_striped_entry(exp, op_data, cb_op, hash_offset,
+ &ent_idx, &min_ent,
+ &min_ent_page);
+ if (rc)
+ goto out;
- /* Enlarge the end entry lde_reclen from 0 to
- * first entry of next lu_dirpage.
- */
- LASSERT(le16_to_cpu(end_dirent->lde_reclen) == 0);
- end_dirent->lde_reclen =
- cpu_to_le16((char *)(dp->ldp_entries) -
- (char *)end_dirent);
+ /*
+ * If it can not get minum entry, it means it already reaches
+ * the end of this directory
+ */
+ if (!min_ent) {
+ last_ent->lde_reclen = 0;
+ hash_offset = MDS_DIR_END_OFF;
+ goto out;
+ }
+
+ ent_size = le16_to_cpu(min_ent->lde_reclen);
+
+ /*
+ * the last entry lde_reclen is 0, but it might not
+ * the end of this entry of this temporay entry
+ */
+ if (!ent_size)
+ ent_size = lu_dirent_calc_size(
+ le16_to_cpu(min_ent->lde_namelen),
+ le32_to_cpu(min_ent->lde_attrs));
+ if (ent_size > left_bytes) {
+ last_ent->lde_reclen = cpu_to_le16(0);
+ hash_offset = le64_to_cpu(min_ent->lde_hash);
+ goto out;
}
- first->ldp_hash_end = hash_end;
- first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
- first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
+ memcpy(ent, min_ent, ent_size);
+
+ /*
+ * Replace . with master FID and Replace .. with the parent FID
+ * of master object
+ */
+ if (!strncmp(ent->lde_name, ".",
+ le16_to_cpu(ent->lde_namelen)) &&
+ le16_to_cpu(ent->lde_namelen) == 1)
+ fid_cpu_to_le(&ent->lde_fid, &master_fid);
+ else if (!strncmp(ent->lde_name, "..",
+ le16_to_cpu(ent->lde_namelen)) &&
+ le16_to_cpu(ent->lde_namelen) == 2)
+ fid_cpu_to_le(&ent->lde_fid, &op_data->op_fid3);
+
+ left_bytes -= ent_size;
+ ent->lde_reclen = cpu_to_le16(ent_size);
+ last_ent = ent;
+ ent = (void *)ent + ent_size;
+ hash_offset = le64_to_cpu(min_ent->lde_hash);
+ if (hash_offset == MDS_DIR_END_OFF) {
+ last_ent->lde_reclen = 0;
+ break;
+ }
+ } while (1);
+out:
+ if (min_ent_page) {
+ kunmap(min_ent_page);
+ put_page(min_ent_page);
+ }
- kunmap(pages[i]);
+ if (unlikely(rc)) {
+ __free_page(ent_page);
+ ent_page = NULL;
+ } else {
+ if (ent == area)
+ dp->ldp_flags |= LDF_EMPTY;
+ dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
+ dp->ldp_hash_end = cpu_to_le64(hash_offset);
}
- LASSERTF(nlupgs == 0, "left = %d", nlupgs);
+
+ /*
+ * We do not want to allocate md_op_data during each
+ * dir entry reading, so op_data will be shared by every stripe,
+ * then we need to restore it back to original value before
+ * return to the upper layer
+ */
+ op_data->op_fid1 = master_fid;
+ op_data->op_fid2 = master_fid;
+ op_data->op_data = master_inode;
+
+ *ppage = ent_page;
+
+ return rc;
}
-#else
-#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
-#endif /* PAGE_SIZE > LU_PAGE_SIZE */
-static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
- struct page **pages, struct ptlrpc_request **request)
+static int lmv_read_page(struct obd_export *exp, struct md_op_data *op_data,
+ struct md_callback *cb_op, __u64 offset,
+ struct page **ppage)
{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- __u64 offset = op_data->op_offset;
- int rc;
- int ncfspgs; /* pages read in PAGE_SIZE */
- int nlupgs; /* pages read in LU_PAGE_SIZE */
- struct lmv_tgt_desc *tgt;
+ struct lmv_stripe_md *lsm = op_data->op_mea1;
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
rc = lmv_check_connect(obd);
if (rc)
return rc;
- CDEBUG(D_INODE, "READPAGE at %#llx from "DFID"\n",
- offset, PFID(&op_data->op_fid1));
+ if (unlikely(lsm)) {
+ rc = lmv_read_striped_page(exp, op_data, cb_op, offset, ppage);
+ return rc;
+ }
tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
return PTR_ERR(tgt);
- rc = md_readpage(tgt->ltd_exp, op_data, pages, request);
- if (rc != 0)
- return rc;
-
- ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1)
- >> PAGE_SHIFT;
- nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
- LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
- LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
-
- CDEBUG(D_INODE, "read %d(%d)/%d pages\n", ncfspgs, nlupgs,
- op_data->op_npages);
-
- lmv_adjust_dirpages(pages, ncfspgs, nlupgs);
+ rc = md_read_page(tgt->ltd_exp, op_data, cb_op, offset, ppage);
return rc;
}
+/**
+ * Unlink a file/directory
+ *
+ * Unlink a file or directory under the parent dir. The unlink request
+ * usually will be sent to the MDT where the child is located, but if
+ * the client does not have the child FID then request will be sent to the
+ * MDT where the parent is located.
+ *
+ * If the parent is a striped directory then it also needs to locate which
+ * stripe the name of the child is located, and replace the parent FID
+ * (@op->op_fid1) with the stripe FID. Note: if the stripe is unknown,
+ * it will walk through all of sub-stripes until the child is being
+ * unlinked finally.
+ *
+ * \param[in] exp export refer to LMV
+ * \param[in] op_data different parameters transferred beween client
+ * MD stacks, name, namelen, FIDs etc.
+ * op_fid1 is the parent FID, op_fid2 is the child
+ * FID.
+ * \param[out] request point to the request of unlink.
+ *
+ * retval 0 if succeed
+ * negative errno if failed.
+ */
static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
- struct obd_device *obd = exp->exp_obd;
+ struct lmv_stripe_md *lsm = op_data->op_mea1;
+ struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *parent_tgt = NULL;
struct lmv_tgt_desc *tgt = NULL;
struct mdt_body *body;
+ int stripe_index = 0;
int rc;
rc = lmv_check_connect(obd);
if (rc)
return rc;
-retry:
+retry_unlink:
+ /* For striped dir, we need to locate the parent as well */
+ if (lsm) {
+ struct lmv_tgt_desc *tmp;
+
+ LASSERT(op_data->op_name && op_data->op_namelen);
+
+ tmp = lmv_locate_target_for_name(lmv, lsm,
+ op_data->op_name,
+ op_data->op_namelen,
+ &op_data->op_fid1,
+ &op_data->op_mds);
+
+ /*
+ * return -EBADFD means unknown hash type, might
+ * need try all sub-stripe here
+ */
+ if (IS_ERR(tmp) && PTR_ERR(tmp) != -EBADFD)
+ return PTR_ERR(tmp);
+
+ /*
+ * Note: both migrating dir and unknown hash dir need to
+ * try all of sub-stripes, so we need start search the
+ * name from stripe 0, but migrating dir is already handled
+ * inside lmv_locate_target_for_name(), so we only check
+ * unknown hash type directory here
+ */
+ if (!lmv_is_known_hash_type(lsm->lsm_md_hash_type)) {
+ struct lmv_oinfo *oinfo;
+
+ oinfo = &lsm->lsm_md_oinfo[stripe_index];
+
+ op_data->op_fid1 = oinfo->lmo_fid;
+ op_data->op_mds = oinfo->lmo_mds;
+ }
+ }
+
+try_next_stripe:
/* Send unlink requests to the MDT where the child is located */
if (likely(!fid_is_zero(&op_data->op_fid2)))
- tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
+ tgt = lmv_find_target(lmv, &op_data->op_fid2);
+ else if (lsm)
+ tgt = lmv_get_target(lmv, op_data->op_mds, NULL);
else
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
+
if (IS_ERR(tgt))
return PTR_ERR(tgt);
@@ -2203,29 +2544,57 @@ retry:
/*
* Cancel FULL locks on child (fid3).
*/
- rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
- MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3);
+ parent_tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(parent_tgt))
+ return PTR_ERR(parent_tgt);
+ if (parent_tgt != tgt) {
+ rc = lmv_early_cancel(exp, parent_tgt, op_data, tgt->ltd_idx,
+ LCK_EX, MDS_INODELOCK_LOOKUP,
+ MF_MDC_CANCEL_FID3);
+ }
+
+ rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX,
+ MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3);
if (rc != 0)
return rc;
- CDEBUG(D_INODE, "unlink with fid="DFID"/"DFID" -> mds #%d\n",
+ CDEBUG(D_INODE, "unlink with fid=" DFID "/" DFID " -> mds #%u\n",
PFID(&op_data->op_fid1), PFID(&op_data->op_fid2), tgt->ltd_idx);
rc = md_unlink(tgt->ltd_exp, op_data, request);
- if (rc != 0 && rc != -EREMOTE)
+ if (rc != 0 && rc != -EREMOTE && rc != -ENOENT)
return rc;
+ /* Try next stripe if it is needed. */
+ if (rc == -ENOENT && lsm && lmv_need_try_all_stripes(lsm)) {
+ struct lmv_oinfo *oinfo;
+
+ stripe_index++;
+ if (stripe_index >= lsm->lsm_md_stripe_count)
+ return rc;
+
+ oinfo = &lsm->lsm_md_oinfo[stripe_index];
+
+ op_data->op_fid1 = oinfo->lmo_fid;
+ op_data->op_mds = oinfo->lmo_mds;
+
+ ptlrpc_req_finished(*request);
+ *request = NULL;
+
+ goto try_next_stripe;
+ }
+
body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
if (!body)
return -EPROTO;
/* Not cross-ref case, just get out of here. */
- if (likely(!(body->valid & OBD_MD_MDS)))
- return 0;
+ if (likely(!(body->mbo_valid & OBD_MD_MDS)))
+ return rc;
CDEBUG(D_INODE, "%s: try unlink to another MDT for "DFID"\n",
- exp->exp_obd->obd_name, PFID(&body->fid1));
+ exp->exp_obd->obd_name, PFID(&body->mbo_fid1));
/* This is a remote object, try remote MDT, Note: it may
* try more than 1 time here, Considering following case
@@ -2247,11 +2616,11 @@ retry:
* In theory, it might try unlimited time here, but it should
* be very rare case.
*/
- op_data->op_fid2 = body->fid1;
+ op_data->op_fid2 = body->mbo_fid1;
ptlrpc_req_finished(*request);
*request = NULL;
- goto retry;
+ goto retry_unlink;
}
static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
@@ -2274,6 +2643,22 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
return 0;
}
+/**
+ * Get by key a value associated with a LMV device.
+ *
+ * Dispatch request to lower-layer devices as needed.
+ *
+ * \param[in] env execution environment for this thread
+ * \param[in] exp export for the LMV device
+ * \param[in] keylen length of key identifier
+ * \param[in] key identifier of key to get value for
+ * \param[in] vallen size of \a val
+ * \param[out] val pointer to storage location for value
+ * \param[in] lsm optional striping metadata of object
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on failure
+ */
static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
__u32 keylen, void *key, __u32 *vallen, void *val,
struct lov_stripe_md *lsm)
@@ -2337,6 +2722,22 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
return -EINVAL;
}
+/**
+ * Asynchronously set by key a value associated with a LMV device.
+ *
+ * Dispatch request to lower-layer devices as needed.
+ *
+ * \param[in] env execution environment for this thread
+ * \param[in] exp export for the LMV device
+ * \param[in] keylen length of key identifier
+ * \param[in] key identifier of key to store value for
+ * \param[in] vallen size of value to store
+ * \param[in] val pointer to data to be stored
+ * \param[in] set optional list of related ptlrpc requests
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on failure
+ */
static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
u32 keylen, void *key, u32 vallen,
void *val, struct ptlrpc_request_set *set)
@@ -2354,7 +2755,8 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
}
lmv = &obd->u.lmv;
- if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX)) {
+ if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX) ||
+ KEY_IS(KEY_DEFAULT_EASIZE)) {
int i, err = 0;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
@@ -2375,105 +2777,247 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
return -EINVAL;
}
-static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
+static int lmv_pack_md_v1(const struct lmv_stripe_md *lsm,
+ struct lmv_mds_md_v1 *lmm1)
{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_stripe_md *meap;
- struct lmv_stripe_md *lsmp;
- int mea_size;
- int i;
+ int cplen;
+ int i;
+
+ lmm1->lmv_magic = cpu_to_le32(lsm->lsm_md_magic);
+ lmm1->lmv_stripe_count = cpu_to_le32(lsm->lsm_md_stripe_count);
+ lmm1->lmv_master_mdt_index = cpu_to_le32(lsm->lsm_md_master_mdt_index);
+ lmm1->lmv_hash_type = cpu_to_le32(lsm->lsm_md_hash_type);
+ cplen = strlcpy(lmm1->lmv_pool_name, lsm->lsm_md_pool_name,
+ sizeof(lmm1->lmv_pool_name));
+ if (cplen >= sizeof(lmm1->lmv_pool_name))
+ return -E2BIG;
+
+ for (i = 0; i < lsm->lsm_md_stripe_count; i++)
+ fid_cpu_to_le(&lmm1->lmv_stripe_fids[i],
+ &lsm->lsm_md_oinfo[i].lmo_fid);
+ return 0;
+}
- mea_size = lmv_get_easize(lmv);
- if (!lmmp)
- return mea_size;
+static int
+lmv_pack_md(union lmv_mds_md **lmmp, const struct lmv_stripe_md *lsm,
+ int stripe_count)
+{
+ int lmm_size = 0, rc = 0;
+ bool allocated = false;
+ LASSERT(lmmp);
+
+ /* Free lmm */
if (*lmmp && !lsm) {
+ int stripe_cnt;
+
+ stripe_cnt = lmv_mds_md_stripe_count_get(*lmmp);
+ lmm_size = lmv_mds_md_size(stripe_cnt,
+ le32_to_cpu((*lmmp)->lmv_magic));
+ if (!lmm_size)
+ return -EINVAL;
kvfree(*lmmp);
*lmmp = NULL;
return 0;
}
+ /* Alloc lmm */
+ if (!*lmmp && !lsm) {
+ lmm_size = lmv_mds_md_size(stripe_count, LMV_MAGIC);
+ LASSERT(lmm_size > 0);
+ *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
+ if (!*lmmp)
+ return -ENOMEM;
+ lmv_mds_md_stripe_count_set(*lmmp, stripe_count);
+ (*lmmp)->lmv_magic = cpu_to_le32(LMV_MAGIC);
+ return lmm_size;
+ }
+
+ /* pack lmm */
+ LASSERT(lsm);
+ lmm_size = lmv_mds_md_size(lsm->lsm_md_stripe_count,
+ lsm->lsm_md_magic);
if (!*lmmp) {
- *lmmp = libcfs_kvzalloc(mea_size, GFP_NOFS);
+ *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
if (!*lmmp)
return -ENOMEM;
+ allocated = true;
}
- if (!lsm)
- return mea_size;
+ switch (lsm->lsm_md_magic) {
+ case LMV_MAGIC_V1:
+ rc = lmv_pack_md_v1(lsm, &(*lmmp)->lmv_md_v1);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
- lsmp = (struct lmv_stripe_md *)lsm;
- meap = (struct lmv_stripe_md *)*lmmp;
+ if (rc && allocated) {
+ kvfree(*lmmp);
+ *lmmp = NULL;
+ }
- if (lsmp->mea_magic != MEA_MAGIC_LAST_CHAR &&
- lsmp->mea_magic != MEA_MAGIC_ALL_CHARS)
- return -EINVAL;
+ return lmm_size;
+}
- meap->mea_magic = cpu_to_le32(lsmp->mea_magic);
- meap->mea_count = cpu_to_le32(lsmp->mea_count);
- meap->mea_master = cpu_to_le32(lsmp->mea_master);
+static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
+ const struct lmv_mds_md_v1 *lmm1)
+{
+ struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ int stripe_count;
+ int rc = 0;
+ int cplen;
+ int i;
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- meap->mea_ids[i] = lsmp->mea_ids[i];
- fid_cpu_to_le(&meap->mea_ids[i], &lsmp->mea_ids[i]);
+ lsm->lsm_md_magic = le32_to_cpu(lmm1->lmv_magic);
+ lsm->lsm_md_stripe_count = le32_to_cpu(lmm1->lmv_stripe_count);
+ lsm->lsm_md_master_mdt_index = le32_to_cpu(lmm1->lmv_master_mdt_index);
+ if (OBD_FAIL_CHECK(OBD_FAIL_UNKNOWN_LMV_STRIPE))
+ lsm->lsm_md_hash_type = LMV_HASH_TYPE_UNKNOWN;
+ else
+ lsm->lsm_md_hash_type = le32_to_cpu(lmm1->lmv_hash_type);
+ lsm->lsm_md_layout_version = le32_to_cpu(lmm1->lmv_layout_version);
+ cplen = strlcpy(lsm->lsm_md_pool_name, lmm1->lmv_pool_name,
+ sizeof(lsm->lsm_md_pool_name));
+
+ if (cplen >= sizeof(lsm->lsm_md_pool_name))
+ return -E2BIG;
+
+ CDEBUG(D_INFO, "unpack lsm count %d, master %d hash_type %d layout_version %d\n",
+ lsm->lsm_md_stripe_count, lsm->lsm_md_master_mdt_index,
+ lsm->lsm_md_hash_type, lsm->lsm_md_layout_version);
+
+ stripe_count = le32_to_cpu(lmm1->lmv_stripe_count);
+ for (i = 0; i < stripe_count; i++) {
+ fid_le_to_cpu(&lsm->lsm_md_oinfo[i].lmo_fid,
+ &lmm1->lmv_stripe_fids[i]);
+ rc = lmv_fld_lookup(lmv, &lsm->lsm_md_oinfo[i].lmo_fid,
+ &lsm->lsm_md_oinfo[i].lmo_mds);
+ if (rc)
+ return rc;
+ CDEBUG(D_INFO, "unpack fid #%d "DFID"\n", i,
+ PFID(&lsm->lsm_md_oinfo[i].lmo_fid));
}
- return mea_size;
+ return rc;
}
-static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmm, int lmm_size)
+int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
+ const union lmv_mds_md *lmm, int stripe_count)
{
- struct obd_device *obd = class_exp2obd(exp);
- struct lmv_stripe_md **tmea = (struct lmv_stripe_md **)lsmp;
- struct lmv_stripe_md *mea = (struct lmv_stripe_md *)lmm;
- struct lmv_obd *lmv = &obd->u.lmv;
- int mea_size;
- int i;
- __u32 magic;
+ struct lmv_stripe_md *lsm;
+ bool allocated = false;
+ int lsm_size, rc;
- mea_size = lmv_get_easize(lmv);
- if (!lsmp)
- return mea_size;
+ LASSERT(lsmp);
- if (*lsmp && !lmm) {
- kvfree(*tmea);
+ lsm = *lsmp;
+ /* Free memmd */
+ if (lsm && !lmm) {
+ int i;
+
+ for (i = 1; i < lsm->lsm_md_stripe_count; i++) {
+ /*
+ * For migrating inode, the master stripe and master
+ * object will be the same, so do not need iput, see
+ * ll_update_lsm_md
+ */
+ if (!(lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION &&
+ !i) && lsm->lsm_md_oinfo[i].lmo_root)
+ iput(lsm->lsm_md_oinfo[i].lmo_root);
+ }
+
+ kvfree(lsm);
*lsmp = NULL;
return 0;
}
- LASSERT(mea_size == lmm_size);
+ /* Alloc memmd */
+ if (!lsm && !lmm) {
+ lsm_size = lmv_stripe_md_size(stripe_count);
+ lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS);
+ if (!lsm)
+ return -ENOMEM;
+ lsm->lsm_md_stripe_count = stripe_count;
+ *lsmp = lsm;
+ return 0;
+ }
- *tmea = libcfs_kvzalloc(mea_size, GFP_NOFS);
- if (!*tmea)
- return -ENOMEM;
+ if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE)
+ return -EPERM;
- if (!lmm)
- return mea_size;
+ /* Unpack memmd */
+ if (le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_V1 &&
+ le32_to_cpu(lmm->lmv_magic) != LMV_USER_MAGIC) {
+ CERROR("%s: invalid lmv magic %x: rc = %d\n",
+ exp->exp_obd->obd_name, le32_to_cpu(lmm->lmv_magic),
+ -EIO);
+ return -EIO;
+ }
- if (mea->mea_magic == MEA_MAGIC_LAST_CHAR ||
- mea->mea_magic == MEA_MAGIC_ALL_CHARS ||
- mea->mea_magic == MEA_MAGIC_HASH_SEGMENT) {
- magic = le32_to_cpu(mea->mea_magic);
- } else {
- /*
- * Old mea is not handled here.
+ if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_V1)
+ lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm));
+ else
+ /**
+ * Unpack default dirstripe(lmv_user_md) to lmv_stripe_md,
+ * stripecount should be 0 then.
*/
- CERROR("Old not supportable EA is found\n");
- LBUG();
+ lsm_size = lmv_stripe_md_size(0);
+
+ if (!lsm) {
+ lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS);
+ if (!lsm)
+ return -ENOMEM;
+ allocated = true;
+ *lsmp = lsm;
+ }
+
+ switch (le32_to_cpu(lmm->lmv_magic)) {
+ case LMV_MAGIC_V1:
+ rc = lmv_unpack_md_v1(exp, lsm, &lmm->lmv_md_v1);
+ break;
+ default:
+ CERROR("%s: unrecognized magic %x\n", exp->exp_obd->obd_name,
+ le32_to_cpu(lmm->lmv_magic));
+ rc = -EINVAL;
+ break;
}
- (*tmea)->mea_magic = magic;
- (*tmea)->mea_count = le32_to_cpu(mea->mea_count);
- (*tmea)->mea_master = le32_to_cpu(mea->mea_master);
+ if (rc && allocated) {
+ kvfree(lsm);
+ *lsmp = NULL;
+ lsm_size = rc;
+ }
+ return lsm_size;
+}
+EXPORT_SYMBOL(lmv_unpack_md);
+
+static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
+ struct lov_mds_md *lmm, int disk_len)
+{
+ return lmv_unpack_md(exp, (struct lmv_stripe_md **)lsmp,
+ (union lmv_mds_md *)lmm, disk_len);
+}
+
+static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
+ struct lov_stripe_md *lsm)
+{
+ const struct lmv_stripe_md *lmv = (struct lmv_stripe_md *)lsm;
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv_obd = &obd->u.lmv;
+ int stripe_count;
- for (i = 0; i < (*tmea)->mea_count; i++) {
- (*tmea)->mea_ids[i] = mea->mea_ids[i];
- fid_le_to_cpu(&(*tmea)->mea_ids[i], &(*tmea)->mea_ids[i]);
+ if (!lmmp) {
+ if (lsm)
+ stripe_count = lmv->lsm_md_stripe_count;
+ else
+ stripe_count = lmv_obd->desc.ld_tgt_count;
+
+ return lmv_mds_md_size(stripe_count, LMV_MAGIC_V1);
}
- return mea_size;
+
+ return lmv_pack_md((union lmv_mds_md **)lmmp, lmv, 0);
}
static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
@@ -2484,7 +3028,7 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
struct lmv_obd *lmv = &obd->u.lmv;
int rc = 0;
int err;
- int i;
+ u32 i;
LASSERT(fid);
@@ -2502,8 +3046,9 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
return rc;
}
-static int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
- __u64 *bits)
+static int lmv_set_lock_data(struct obd_export *exp,
+ const struct lustre_handle *lockh,
+ void *data, __u64 *bits)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
struct lmv_tgt_desc *tgt = lmv->tgts[0];
@@ -2526,24 +3071,32 @@ static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
enum ldlm_mode rc;
- int i;
+ int tgt;
+ u32 i;
CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
/*
- * With CMD every object can have two locks in different namespaces:
- * lookup lock in space of mds storing direntry and update/open lock in
- * space of mds storing inode. Thus we check all targets, not only that
- * one fid was created in.
+ * With DNE every object can have two locks in different namespaces:
+ * lookup lock in space of MDT storing direntry and update/open lock in
+ * space of MDT storing inode. Try the MDT that the FID maps to first,
+ * since this can be easily found, and only try others if that fails.
*/
- for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- struct lmv_tgt_desc *tgt = lmv->tgts[i];
+ for (i = 0, tgt = lmv_find_target_index(lmv, fid);
+ i < lmv->desc.ld_tgt_count;
+ i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) {
+ if (tgt < 0) {
+ CDEBUG(D_HA, "%s: "DFID" is inaccessible: rc = %d\n",
+ obd->obd_name, PFID(fid), tgt);
+ tgt = 0;
+ }
- if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
+ if (!lmv->tgts[tgt] || !lmv->tgts[tgt]->ltd_exp ||
+ !lmv->tgts[tgt]->ltd_active)
continue;
- rc = md_lock_match(tgt->ltd_exp, flags, fid, type, policy, mode,
- lockh);
+ rc = md_lock_match(lmv->tgts[tgt]->ltd_exp, flags, fid,
+ type, policy, mode, lockh);
if (rc)
return rc;
}
@@ -2571,8 +3124,10 @@ static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt = lmv->tgts[0];
- if (md->mea)
- obd_free_memmd(exp, (void *)&md->mea);
+ if (md->lmv) {
+ lmv_free_memmd(md->lmv);
+ md->lmv = NULL;
+ }
if (!tgt || !tgt->ltd_exp)
return -EINVAL;
return md_free_lustre_md(tgt->ltd_exp, md);
@@ -2621,7 +3176,7 @@ static int lmv_intent_getattr_async(struct obd_export *exp,
if (rc)
return rc;
- tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
if (IS_ERR(tgt))
return PTR_ERR(tgt);
@@ -2649,6 +3204,23 @@ static int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
return rc;
}
+static int
+lmv_get_fid_from_lsm(struct obd_export *exp,
+ const struct lmv_stripe_md *lsm,
+ const char *name, int namelen, struct lu_fid *fid)
+{
+ const struct lmv_oinfo *oinfo;
+
+ LASSERT(lsm);
+ oinfo = lsm_name_to_stripe_info(lsm, name, namelen);
+ if (IS_ERR(oinfo))
+ return PTR_ERR(oinfo);
+
+ *fid = oinfo->lmo_fid;
+
+ return 0;
+}
+
/**
* For lmv, only need to send request to master MDT, and the master MDT will
* process with other slave MDTs. The only exception is Q_GETOQUOTA for which
@@ -2660,8 +3232,9 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
struct obd_device *obd = class_exp2obd(exp);
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt = lmv->tgts[0];
- int rc = 0, i;
+ int rc = 0;
__u64 curspace = 0, curinodes = 0;
+ u32 i;
if (!tgt || !tgt->ltd_exp || !tgt->ltd_active ||
!lmv->desc.ld_tgt_count) {
@@ -2704,7 +3277,8 @@ static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
struct obd_device *obd = class_exp2obd(exp);
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
- int i, rc = 0;
+ int rc = 0;
+ u32 i;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
int err;
@@ -2723,6 +3297,47 @@ static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
return rc;
}
+static int lmv_merge_attr(struct obd_export *exp,
+ const struct lmv_stripe_md *lsm,
+ struct cl_attr *attr,
+ ldlm_blocking_callback cb_blocking)
+{
+ int rc, i;
+
+ rc = lmv_revalidate_slaves(exp, lsm, cb_blocking, 0);
+ if (rc < 0)
+ return rc;
+
+ for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
+ struct inode *inode = lsm->lsm_md_oinfo[i].lmo_root;
+
+ CDEBUG(D_INFO, ""DFID" size %llu, blocks %llu nlink %u, atime %lu ctime %lu, mtime %lu.\n",
+ PFID(&lsm->lsm_md_oinfo[i].lmo_fid),
+ i_size_read(inode), (unsigned long long)inode->i_blocks,
+ inode->i_nlink, LTIME_S(inode->i_atime),
+ LTIME_S(inode->i_ctime), LTIME_S(inode->i_mtime));
+
+ /* for slave stripe, it needs to subtract nlink for . and .. */
+ if (i)
+ attr->cat_nlink += inode->i_nlink - 2;
+ else
+ attr->cat_nlink = inode->i_nlink;
+
+ attr->cat_size += i_size_read(inode);
+ attr->cat_blocks += inode->i_blocks;
+
+ if (attr->cat_atime < LTIME_S(inode->i_atime))
+ attr->cat_atime = LTIME_S(inode->i_atime);
+
+ if (attr->cat_ctime < LTIME_S(inode->i_ctime))
+ attr->cat_ctime = LTIME_S(inode->i_ctime);
+
+ if (attr->cat_mtime < LTIME_S(inode->i_mtime))
+ attr->cat_mtime = LTIME_S(inode->i_mtime);
+ }
+ return 0;
+}
+
static struct obd_ops lmv_obd_ops = {
.owner = THIS_MODULE,
.setup = lmv_setup,
@@ -2746,7 +3361,6 @@ static struct obd_ops lmv_obd_ops = {
static struct md_ops lmv_md_ops = {
.getstatus = lmv_getstatus,
.null_inode = lmv_null_inode,
- .find_cbdata = lmv_find_cbdata,
.close = lmv_close,
.create = lmv_create,
.done_writing = lmv_done_writing,
@@ -2760,7 +3374,7 @@ static struct md_ops lmv_md_ops = {
.setattr = lmv_setattr,
.setxattr = lmv_setxattr,
.sync = lmv_sync,
- .readpage = lmv_readpage,
+ .read_page = lmv_read_page,
.unlink = lmv_unlink,
.init_ea_size = lmv_init_ea_size,
.cancel_unused = lmv_cancel_unused,
@@ -2768,10 +3382,12 @@ static struct md_ops lmv_md_ops = {
.lock_match = lmv_lock_match,
.get_lustre_md = lmv_get_lustre_md,
.free_lustre_md = lmv_free_lustre_md,
+ .merge_attr = lmv_merge_attr,
.set_open_replay_data = lmv_set_open_replay_data,
.clear_open_replay_data = lmv_clear_open_replay_data,
.intent_getattr_async = lmv_intent_getattr_async,
- .revalidate_lock = lmv_revalidate_lock
+ .revalidate_lock = lmv_revalidate_lock,
+ .get_fid_from_lsm = lmv_get_fid_from_lsm,
};
static int __init lmv_init(void)
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index c29c361eb0cc..20bbdfc21d15 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -169,7 +169,7 @@ static int lmv_tgt_seq_show(struct seq_file *p, void *v)
if (!tgt)
return 0;
- seq_printf(p, "%d: %s %sACTIVE\n",
+ seq_printf(p, "%u: %s %sACTIVE\n",
tgt->ltd_idx, tgt->ltd_uuid.uuid,
tgt->ltd_active ? "" : "IN");
return 0;
@@ -202,7 +202,7 @@ static struct lprocfs_vars lprocfs_lmv_obd_vars[] = {
{ NULL }
};
-struct file_operations lmv_proc_target_fops = {
+const struct file_operations lmv_proc_target_fops = {
.owner = THIS_MODULE,
.open = lmv_target_seq_open,
.read = seq_read,
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 9740568d9521..4d2b7d303fea 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -289,8 +289,8 @@ struct lov_lock {
};
struct lov_page {
- struct cl_page_slice lps_cl;
- int lps_invalid;
+ struct cl_page_slice lps_cl;
+ unsigned int lps_stripe; /* stripe index */
};
/*
@@ -556,6 +556,8 @@ struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
struct lovsub_lock *sub);
struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
const struct cl_page_slice *slice);
+
+struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
int lov_page_stripe(const struct cl_page *page);
#define lov_foreach_target(lov, var) \
@@ -742,11 +744,15 @@ static inline struct lov_thread_info *lov_env_info(const struct lu_env *env)
static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov)
{
LASSERT(lov->lo_type == LLT_RAID0);
- LASSERT(lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC ||
- lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC_V3);
+ LASSERT(lov->lo_lsm->lsm_magic == LOV_MAGIC ||
+ lov->lo_lsm->lsm_magic == LOV_MAGIC_V3);
return &lov->u.raid0;
}
+/* lov_pack.c */
+int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
+ struct lov_user_md __user *lump);
+
/** @} lov */
#endif
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index b1f260d43bc7..056ae2ed88e8 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -516,6 +516,5 @@ struct lu_device_type lov_device_type = {
.ldt_ops = &lov_device_type_ops,
.ldt_ctx_tags = LCT_CL_THREAD
};
-EXPORT_SYMBOL(lov_device_type);
/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 5053dead17bb..214c561767e0 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -66,7 +66,8 @@ static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
}
if (lmm->lmm_stripe_size == 0 ||
- (le32_to_cpu(lmm->lmm_stripe_size)&(LOV_MIN_STRIPE_SIZE-1)) != 0) {
+ (le32_to_cpu(lmm->lmm_stripe_size) &
+ (LOV_MIN_STRIPE_SIZE - 1)) != 0) {
CERROR("bad stripe size %u\n",
le32_to_cpu(lmm->lmm_stripe_size));
lov_dump_lmm_common(D_WARNING, lmm);
@@ -146,21 +147,15 @@ lsm_stripe_by_offset_plain(struct lov_stripe_md *lsm, int *stripeno,
*swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
}
-static int lsm_destroy_plain(struct lov_stripe_md *lsm, struct obdo *oa,
- struct obd_export *md_exp)
-{
- return 0;
-}
-
/* Find minimum stripe maxbytes value. For inactive or
- * reconnecting targets use LUSTRE_STRIPE_MAXBYTES.
+ * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
*/
static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
{
struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import;
if (!imp || !tgt->ltd_active) {
- *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+ *stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
return;
}
@@ -171,7 +166,7 @@ static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes)
*stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes;
} else {
- *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+ *stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
}
spin_unlock(&imp->imp_lock);
}
@@ -245,7 +240,6 @@ static int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm,
const struct lsm_operations lsm_v1_ops = {
.lsm_free = lsm_free_plain,
- .lsm_destroy = lsm_destroy_plain,
.lsm_stripe_by_index = lsm_stripe_by_index_plain,
.lsm_stripe_by_offset = lsm_stripe_by_offset_plain,
.lsm_lmm_verify = lsm_lmm_verify_v1,
@@ -335,7 +329,6 @@ static int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm,
const struct lsm_operations lsm_v3_ops = {
.lsm_free = lsm_free_plain,
- .lsm_destroy = lsm_destroy_plain,
.lsm_stripe_by_index = lsm_stripe_by_index_plain,
.lsm_stripe_by_offset = lsm_stripe_by_offset_plain,
.lsm_lmm_verify = lsm_lmm_verify_v3,
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 12bd511e8988..07e5ede3e952 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -134,8 +134,6 @@ static inline void lov_put_reqset(struct lov_request_set *set)
/* lov_merge.c */
void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
struct lov_stripe_md *lsm, int stripeno, int *set);
-int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
- u64 size, int shrink);
int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
struct ost_lvb *lvb, __u64 *kms_place);
@@ -157,11 +155,6 @@ int lov_update_common_set(struct lov_request_set *set,
int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
struct lov_request_set **reqset);
int lov_fini_getattr_set(struct lov_request_set *set);
-int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
- struct obdo *src_oa, struct lov_stripe_md *lsm,
- struct obd_trans_info *oti,
- struct lov_request_set **reqset);
-int lov_fini_destroy_set(struct lov_request_set *set);
int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
struct obd_trans_info *oti,
struct lov_request_set **reqset);
@@ -197,8 +190,6 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm,
struct lov_stripe_md *lsm);
int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
struct lov_mds_md *lmm, int lmm_bytes);
-int lov_getstripe(struct obd_export *exp,
- struct lov_stripe_md *lsm, struct lov_user_md __user *lump);
int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
int pattern, int magic);
int lov_free_memmd(struct lov_stripe_md **lsmp);
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 84032a510254..d10157985ed9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -87,6 +87,9 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
case CIT_SETATTR: {
io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
+ io->u.ci_setattr.sa_stripe_index = stripe;
+ io->u.ci_setattr.sa_parent_fid =
+ parent->u.ci_setattr.sa_parent_fid;
if (cl_io_is_trunc(io)) {
loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size;
@@ -244,14 +247,12 @@ void lov_sub_put(struct lov_io_sub *sub)
int lov_page_stripe(const struct cl_page *page)
{
- struct lovsub_object *subobj;
const struct cl_page_slice *slice;
- slice = cl_page_at(page, &lovsub_device_type);
+ slice = cl_page_at(page, &lov_device_type);
LASSERT(slice->cpl_obj);
- subobj = cl2lovsub(slice->cpl_obj);
- return subobj->lso_index;
+ return cl2lov_page(slice)->lps_stripe;
}
struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
@@ -298,8 +299,8 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
return result;
}
-static void lov_io_slice_init(struct lov_io *lio,
- struct lov_object *obj, struct cl_io *io)
+static int lov_io_slice_init(struct lov_io *lio, struct lov_object *obj,
+ struct cl_io *io)
{
io->ci_result = 0;
lio->lis_object = obj;
@@ -314,6 +315,15 @@ static void lov_io_slice_init(struct lov_io *lio,
lio->lis_io_endpos = lio->lis_endpos;
if (cl_io_is_append(io)) {
LASSERT(io->ci_type == CIT_WRITE);
+
+ /*
+ * If there is LOV EA hole, then we may cannot locate
+ * the current file-tail exactly.
+ */
+ if (unlikely(obj->lo_lsm->lsm_pattern &
+ LOV_PATTERN_F_HOLE))
+ return -EIO;
+
lio->lis_pos = 0;
lio->lis_endpos = OBD_OBJECT_EOF;
}
@@ -349,6 +359,7 @@ static void lov_io_slice_init(struct lov_io *lio,
default:
LBUG();
}
+ return 0;
}
static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
@@ -870,7 +881,7 @@ int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
struct lov_object *lov = cl2lov(obj);
INIT_LIST_HEAD(&lio->lis_active);
- lov_io_slice_init(lio, lov, io);
+ io->ci_result = lov_io_slice_init(lio, lov, io);
if (io->ci_result == 0) {
io->ci_result = lov_io_subio_init(env, lio, io);
if (io->ci_result == 0) {
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index b9c90865fdfc..674af106b50b 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -105,45 +105,6 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
return rc;
}
-/* Must be called under the lov_stripe_lock() */
-int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
- u64 size, int shrink)
-{
- struct lov_oinfo *loi;
- int stripe = 0;
- __u64 kms;
-
- assert_spin_locked(&lsm->lsm_lock);
- LASSERT(lsm->lsm_lock_owner == current_pid());
-
- if (shrink) {
- for (; stripe < lsm->lsm_stripe_count; stripe++) {
- struct lov_oinfo *loi = lsm->lsm_oinfo[stripe];
-
- kms = lov_size_to_stripe(lsm, size, stripe);
- CDEBUG(D_INODE,
- "stripe %d KMS %sing %llu->%llu\n",
- stripe, kms > loi->loi_kms ? "increase":"shrink",
- loi->loi_kms, kms);
- loi->loi_lvb.lvb_size = kms;
- loi_kms_set(loi, loi->loi_lvb.lvb_size);
- }
- return 0;
- }
-
- if (size > 0)
- stripe = lov_stripe_number(lsm, size - 1);
- kms = lov_size_to_stripe(lsm, size, stripe);
- loi = lsm->lsm_oinfo[stripe];
-
- CDEBUG(D_INODE, "stripe %d KMS %sincreasing %llu->%llu\n",
- stripe, kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms);
- if (kms > loi->loi_kms)
- loi_kms_set(loi, kms);
-
- return 0;
-}
-
void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
struct lov_stripe_md *lsm, int stripeno, int *set)
{
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 9b92d5522edb..b23016f7ec26 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -41,6 +41,7 @@
#include "../../include/linux/libcfs/libcfs.h"
#include "../include/obd_support.h"
+#include "../include/lustre/lustre_ioctl.h"
#include "../include/lustre_lib.h"
#include "../include/lustre_net.h"
#include "../include/lustre/lustre_idl.h"
@@ -940,7 +941,7 @@ int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
}
case LCFG_PARAM: {
struct lprocfs_static_vars lvars = { NULL };
- struct lov_desc *desc = &(obd->u.lov.desc);
+ struct lov_desc *desc = &obd->u.lov.desc;
if (!desc) {
rc = -EINVAL;
@@ -971,92 +972,6 @@ out:
return rc;
}
-static int lov_recreate(struct obd_export *exp, struct obdo *src_oa,
- struct lov_stripe_md **ea, struct obd_trans_info *oti)
-{
- struct lov_stripe_md *obj_mdp, *lsm;
- struct lov_obd *lov = &exp->exp_obd->u.lov;
- unsigned ost_idx;
- int rc, i;
-
- LASSERT(src_oa->o_valid & OBD_MD_FLFLAGS &&
- src_oa->o_flags & OBD_FL_RECREATE_OBJS);
-
- obj_mdp = kzalloc(sizeof(*obj_mdp), GFP_NOFS);
- if (!obj_mdp)
- return -ENOMEM;
-
- ost_idx = src_oa->o_nlink;
- lsm = *ea;
- if (!lsm) {
- rc = -EINVAL;
- goto out;
- }
- if (ost_idx >= lov->desc.ld_tgt_count ||
- !lov->lov_tgts[ost_idx]) {
- rc = -EINVAL;
- goto out;
- }
-
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *loi = lsm->lsm_oinfo[i];
-
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (loi->loi_ost_idx == ost_idx) {
- if (ostid_id(&loi->loi_oi) != ostid_id(&src_oa->o_oi)) {
- rc = -EINVAL;
- goto out;
- }
- break;
- }
- }
- if (i == lsm->lsm_stripe_count) {
- rc = -EINVAL;
- goto out;
- }
-
- rc = obd_create(NULL, lov->lov_tgts[ost_idx]->ltd_exp,
- src_oa, &obj_mdp, oti);
-out:
- kfree(obj_mdp);
- return rc;
-}
-
-/* the LOV expects oa->o_id to be set to the LOV object id */
-static int lov_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *src_oa, struct lov_stripe_md **ea,
- struct obd_trans_info *oti)
-{
- struct lov_obd *lov;
- int rc = 0;
-
- LASSERT(ea);
- if (!exp)
- return -EINVAL;
-
- if ((src_oa->o_valid & OBD_MD_FLFLAGS) &&
- src_oa->o_flags == OBD_FL_DELORPHAN) {
- /* should be used with LOV anymore */
- LBUG();
- }
-
- lov = &exp->exp_obd->u.lov;
- if (!lov->desc.ld_active_tgt_count)
- return -EIO;
-
- obd_getref(exp->exp_obd);
- /* Recreate a specific object id at the given OST index */
- if ((src_oa->o_valid & OBD_MD_FLFLAGS) &&
- (src_oa->o_flags & OBD_FL_RECREATE_OBJS)) {
- rc = lov_recreate(exp, src_oa, ea, oti);
- }
-
- obd_putref(exp->exp_obd);
- return rc;
-}
-
#define ASSERT_LSM_MAGIC(lsmp) \
do { \
LASSERT((lsmp)); \
@@ -1065,59 +980,6 @@ do { \
"%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic); \
} while (0)
-static int lov_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct lov_stripe_md *lsm,
- struct obd_trans_info *oti, struct obd_export *md_exp)
-{
- struct lov_request_set *set;
- struct obd_info oinfo;
- struct lov_request *req;
- struct lov_obd *lov;
- int rc = 0, err = 0;
-
- ASSERT_LSM_MAGIC(lsm);
-
- if (!exp || !exp->exp_obd)
- return -ENODEV;
-
- if (oa->o_valid & OBD_MD_FLCOOKIE) {
- LASSERT(oti);
- LASSERT(oti->oti_logcookies);
- }
-
- lov = &exp->exp_obd->u.lov;
- obd_getref(exp->exp_obd);
- rc = lov_prep_destroy_set(exp, &oinfo, oa, lsm, oti, &set);
- if (rc)
- goto out;
-
- list_for_each_entry(req, &set->set_list, rq_link) {
- if (oa->o_valid & OBD_MD_FLCOOKIE)
- oti->oti_logcookies = set->set_cookies + req->rq_stripe;
-
- err = obd_destroy(env, lov->lov_tgts[req->rq_idx]->ltd_exp,
- req->rq_oi.oi_oa, NULL, oti, NULL);
- err = lov_update_common_set(set, req, err);
- if (err) {
- CERROR("%s: destroying objid "DOSTID" subobj "
- DOSTID" on OST idx %d: rc = %d\n",
- exp->exp_obd->obd_name, POSTID(&oa->o_oi),
- POSTID(&req->rq_oi.oi_oa->o_oi),
- req->rq_idx, err);
- if (!rc)
- rc = err;
- }
- }
-
- if (rc == 0)
- rc = lsm_op_find(lsm->lsm_magic)->lsm_destroy(lsm, oa, md_exp);
-
- err = lov_fini_destroy_set(set);
-out:
- obd_putref(exp->exp_obd);
- return rc ? rc : err;
-}
-
static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
void *data, int rc)
{
@@ -1267,46 +1129,6 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
return 0;
}
-/* find any ldlm lock of the inode in lov
- * return 0 not find
- * 1 find one
- * < 0 error
- */
-static int lov_find_cbdata(struct obd_export *exp,
- struct lov_stripe_md *lsm, ldlm_iterator_t it,
- void *data)
-{
- struct lov_obd *lov;
- int rc = 0, i;
-
- ASSERT_LSM_MAGIC(lsm);
-
- if (!exp || !exp->exp_obd)
- return -ENODEV;
-
- lov = &exp->exp_obd->u.lov;
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_stripe_md submd;
- struct lov_oinfo *loi = lsm->lsm_oinfo[i];
-
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov->lov_tgts[loi->loi_ost_idx]) {
- CDEBUG(D_HA, "lov idx %d NULL\n", loi->loi_ost_idx);
- continue;
- }
-
- submd.lsm_oi = loi->loi_oi;
- submd.lsm_stripe_count = 0;
- rc = obd_find_cbdata(lov->lov_tgts[loi->loi_ost_idx]->ltd_exp,
- &submd, it, data);
- if (rc != 0)
- return rc;
- }
- return rc;
-}
-
int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc)
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
@@ -1460,7 +1282,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
}
desc = (struct lov_desc *)data->ioc_inlbuf1;
- memcpy(desc, &(lov->desc), sizeof(*desc));
+ memcpy(desc, &lov->desc, sizeof(*desc));
uuidp = (struct obd_uuid *)data->ioc_inlbuf2;
genp = (__u32 *)data->ioc_inlbuf3;
@@ -1477,9 +1299,6 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
obd_ioctl_freedata(buf, len);
break;
}
- case LL_IOC_LOV_GETSTRIPE:
- rc = lov_getstripe(exp, karg, uarg);
- break;
case OBD_IOC_QUOTACTL: {
struct if_quotactl *qctl = karg;
struct lov_tgt_desc *tgt = NULL;
@@ -1726,6 +1545,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
u64 fm_start, fm_end, fm_length, fm_end_offset;
u64 curr_loc;
int current_extent = 0, rc = 0, i;
+ /* Whether have we collected enough extents */
+ bool enough = false;
int ost_eof = 0; /* EOF for object */
int ost_done = 0; /* done with required mapping for this OST? */
int last_stripe;
@@ -1860,7 +1681,7 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
lun_start += len_mapped_single_call;
fm_local->fm_length = req_fm_len - len_mapped_single_call;
req_fm_len = fm_local->fm_length;
- fm_local->fm_extent_count = count_local;
+ fm_local->fm_extent_count = enough ? 1 : count_local;
fm_local->fm_mapped_extents = 0;
fm_local->fm_flags = fiemap->fm_flags;
@@ -1908,6 +1729,12 @@ inactive_tgt:
goto finish;
}
break;
+ } else if (enough) {
+ /*
+ * We've collected enough extents and there are
+ * more extents after it.
+ */
+ goto finish;
}
/* If we just need num of extents then go to next device */
@@ -1916,8 +1743,9 @@ inactive_tgt:
break;
}
- len_mapped_single_call = lcl_fm_ext[ext_count-1].fe_logical -
- lun_start + lcl_fm_ext[ext_count - 1].fe_length;
+ len_mapped_single_call =
+ lcl_fm_ext[ext_count - 1].fe_logical -
+ lun_start + lcl_fm_ext[ext_count - 1].fe_length;
/* Have we finished mapping on this device? */
if (req_fm_len <= len_mapped_single_call)
@@ -1926,14 +1754,15 @@ inactive_tgt:
/* Clear the EXTENT_LAST flag which can be present on
* last extent
*/
- if (lcl_fm_ext[ext_count-1].fe_flags & FIEMAP_EXTENT_LAST)
+ if (lcl_fm_ext[ext_count - 1].fe_flags &
+ FIEMAP_EXTENT_LAST)
lcl_fm_ext[ext_count - 1].fe_flags &=
~FIEMAP_EXTENT_LAST;
curr_loc = lov_stripe_size(lsm,
- lcl_fm_ext[ext_count - 1].fe_logical+
- lcl_fm_ext[ext_count - 1].fe_length,
- cur_stripe);
+ lcl_fm_ext[ext_count - 1].fe_logical +
+ lcl_fm_ext[ext_count - 1].fe_length,
+ cur_stripe);
if (curr_loc >= fm_key->oa.o_size)
ost_eof = 1;
@@ -1945,7 +1774,7 @@ inactive_tgt:
/* Ran out of available extents? */
if (current_extent >= fiemap->fm_extent_count)
- goto finish;
+ enough = true;
} while (ost_done == 0 && ost_eof == 0);
if (cur_stripe_wrap == last_stripe)
@@ -1985,73 +1814,14 @@ static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
{
struct obd_device *obddev = class_exp2obd(exp);
struct lov_obd *lov = &obddev->u.lov;
- int i, rc;
+ int rc;
if (!vallen || !val)
return -EFAULT;
obd_getref(obddev);
- if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
- struct {
- char name[16];
- struct ldlm_lock *lock;
- } *data = key;
- struct ldlm_res_id *res_id = &data->lock->l_resource->lr_name;
- struct lov_oinfo *loi;
- __u32 *stripe = val;
-
- if (*vallen < sizeof(*stripe)) {
- rc = -EFAULT;
- goto out;
- }
- *vallen = sizeof(*stripe);
-
- /* XXX This is another one of those bits that will need to
- * change if we ever actually support nested LOVs. It uses
- * the lock's export to find out which stripe it is.
- */
- /* XXX - it's assumed all the locks for deleted OSTs have
- * been cancelled. Also, the export for deleted OSTs will
- * be NULL and won't match the lock's export.
- */
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- loi = lsm->lsm_oinfo[i];
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov->lov_tgts[loi->loi_ost_idx])
- continue;
- if (lov->lov_tgts[loi->loi_ost_idx]->ltd_exp ==
- data->lock->l_conn_export &&
- ostid_res_name_eq(&loi->loi_oi, res_id)) {
- *stripe = i;
- rc = 0;
- goto out;
- }
- }
- LDLM_ERROR(data->lock, "lock on inode without such object");
- dump_lsm(D_ERROR, lsm);
- rc = -ENXIO;
- goto out;
- } else if (KEY_IS(KEY_LAST_ID)) {
- struct obd_id_info *info = val;
- __u32 size = sizeof(u64);
- struct lov_tgt_desc *tgt;
-
- LASSERT(*vallen == sizeof(struct obd_id_info));
- tgt = lov->lov_tgts[info->idx];
-
- if (!tgt || !tgt->ltd_active) {
- rc = -ESRCH;
- goto out;
- }
-
- rc = obd_get_info(env, tgt->ltd_exp, keylen, key,
- &size, info->data, NULL);
- rc = 0;
- goto out;
- } else if (KEY_IS(KEY_LOVDESC)) {
+ if (KEY_IS(KEY_LOVDESC)) {
struct lov_desc *desc_ret = val;
*desc_ret = lov->desc;
@@ -2060,22 +1830,6 @@ static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
} else if (KEY_IS(KEY_FIEMAP)) {
rc = lov_fiemap(lov, keylen, key, vallen, val, lsm);
goto out;
- } else if (KEY_IS(KEY_CONNECT_FLAG)) {
- struct lov_tgt_desc *tgt;
- __u64 ost_idx = *((__u64 *)val);
-
- LASSERT(*vallen == sizeof(__u64));
- LASSERT(ost_idx < lov->desc.ld_tgt_count);
- tgt = lov->lov_tgts[ost_idx];
-
- if (!tgt || !tgt->ltd_exp) {
- rc = -ESRCH;
- goto out;
- }
-
- *((__u64 *)val) = exp_connect_flags(tgt->ltd_exp);
- rc = 0;
- goto out;
} else if (KEY_IS(KEY_TGT_COUNT)) {
*((int *)val) = lov->desc.ld_tgt_count;
rc = 0;
@@ -2098,8 +1852,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
u32 count;
int i, rc = 0, err;
struct lov_tgt_desc *tgt;
- unsigned int incr = 0, check_uuid = 0, do_inactive = 0, no_set = 0;
- unsigned int next_id = 0, mds_con = 0;
+ int do_inactive = 0, no_set = 0;
if (!set) {
no_set = 1;
@@ -2111,18 +1864,8 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
obd_getref(obddev);
count = lov->desc.ld_tgt_count;
- if (KEY_IS(KEY_NEXT_ID)) {
- count = vallen / sizeof(struct obd_id_info);
- vallen = sizeof(u64);
- incr = sizeof(struct obd_id_info);
- do_inactive = 1;
- next_id = 1;
- } else if (KEY_IS(KEY_CHECKSUM)) {
+ if (KEY_IS(KEY_CHECKSUM)) {
do_inactive = 1;
- } else if (KEY_IS(KEY_EVICT_BY_NID)) {
- /* use defaults: do_inactive = incr = 0; */
- } else if (KEY_IS(KEY_MDS_CONN)) {
- mds_con = 1;
} else if (KEY_IS(KEY_CACHE_SET)) {
LASSERT(!lov->lov_cache);
lov->lov_cache = val;
@@ -2130,11 +1873,9 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
cl_cache_incref(lov->lov_cache);
}
- for (i = 0; i < count; i++, val = (char *)val + incr) {
- if (next_id)
- tgt = lov->lov_tgts[((struct obd_id_info *)val)->idx];
- else
- tgt = lov->lov_tgts[i];
+ for (i = 0; i < count; i++) {
+ tgt = lov->lov_tgts[i];
+
/* OST was disconnected */
if (!tgt || !tgt->ltd_exp)
continue;
@@ -2143,34 +1884,8 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
if (!tgt->ltd_active && !do_inactive)
continue;
- if (mds_con) {
- struct mds_group_info *mgi;
-
- LASSERT(vallen == sizeof(*mgi));
- mgi = (struct mds_group_info *)val;
-
- /* Only want a specific OSC */
- if (mgi->uuid && !obd_uuid_equals(mgi->uuid,
- &tgt->ltd_uuid))
- continue;
-
- err = obd_set_info_async(env, tgt->ltd_exp,
- keylen, key, sizeof(int),
- &mgi->group, set);
- } else if (next_id) {
- err = obd_set_info_async(env, tgt->ltd_exp,
- keylen, key, vallen,
- ((struct obd_id_info *)val)->data, set);
- } else {
- /* Only want a specific OSC */
- if (check_uuid &&
- !obd_uuid_equals(val, &tgt->ltd_uuid))
- continue;
-
- err = obd_set_info_async(env, tgt->ltd_exp,
- keylen, key, vallen, val, set);
- }
-
+ err = obd_set_info_async(env, tgt->ltd_exp, keylen, key,
+ vallen, val, set);
if (!rc)
rc = err;
}
@@ -2318,12 +2033,8 @@ static struct obd_ops lov_obd_ops = {
.statfs_async = lov_statfs_async,
.packmd = lov_packmd,
.unpackmd = lov_unpackmd,
- .create = lov_create,
- .destroy = lov_destroy,
.getattr_async = lov_getattr_async,
.setattr_async = lov_setattr_async,
- .adjust_kms = lov_adjust_kms,
- .find_cbdata = lov_find_cbdata,
.iocontrol = lov_iocontrol,
.get_info = lov_get_info,
.set_info_async = lov_set_info_async,
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index f9621b0fd469..52f736338887 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -75,6 +75,13 @@ struct lov_layout_operations {
static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
+void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm)
+{
+ if (lsm)
+ lov_free_memmd(&lsm);
+}
+EXPORT_SYMBOL(lov_lsm_put);
+
/*****************************************************************************
*
* Lov object layout operations.
@@ -195,6 +202,10 @@ static int lov_page_slice_fixup(struct lov_object *lov,
struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
struct cl_object *o;
+ if (!stripe)
+ return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off -
+ cfs_size_round(sizeof(struct lov_page));
+
cl_object_for_each(o, stripe)
o->co_slice_off += hdr->coh_page_bufsize;
@@ -224,6 +235,7 @@ static int lov_init_raid0(const struct lu_env *env,
LASSERT(!lov->lo_lsm);
lov->lo_lsm = lsm_addref(lsm);
+ lov->lo_layout_invalid = true;
r0->lo_nr = lsm->lsm_stripe_count;
LASSERT(r0->lo_nr <= lov_targets_nr(dev));
@@ -719,6 +731,10 @@ static int lov_layout_change(const struct lu_env *unused,
LASSERT(atomic_read(&lov->lo_active_ios) == 0);
lov->lo_type = LLT_EMPTY;
+ /* page bufsize fixup */
+ cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
+ lov_page_slice_fixup(lov, NULL);
+
result = new_ops->llo_init(env,
lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
lov, conf, state);
@@ -878,8 +894,8 @@ static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
}
-static int lov_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid)
+static int lov_attr_update(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned int valid)
{
/*
* No dispatch is required here, as no layout implements this.
@@ -895,13 +911,30 @@ int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
io);
}
+static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
+ struct lov_user_md __user *lum)
+{
+ struct lov_object *lov = cl2lov(obj);
+ struct lov_stripe_md *lsm;
+ int rc = 0;
+
+ lsm = lov_lsm_addref(lov);
+ if (!lsm)
+ return -ENODATA;
+
+ rc = lov_getstripe(cl2lov(obj), lsm, lum);
+ lov_lsm_put(obj, lsm);
+ return rc;
+}
+
static const struct cl_object_operations lov_ops = {
.coo_page_init = lov_page_init,
.coo_lock_init = lov_lock_init,
.coo_io_init = lov_io_init,
.coo_attr_get = lov_attr_get,
- .coo_attr_set = lov_attr_set,
- .coo_conf_set = lov_conf_set
+ .coo_attr_update = lov_attr_update,
+ .coo_conf_set = lov_conf_set,
+ .coo_getstripe = lov_object_getstripe
};
static const struct lu_object_operations lov_lu_obj_ops = {
@@ -938,7 +971,7 @@ struct lu_object *lov_object_alloc(const struct lu_env *env,
return obj;
}
-static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
+struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
{
struct lov_stripe_md *lsm = NULL;
@@ -969,13 +1002,6 @@ struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj)
}
EXPORT_SYMBOL(lov_lsm_get);
-void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm)
-{
- if (lsm)
- lov_free_memmd(&lsm);
-}
-EXPORT_SYMBOL(lov_lsm_put);
-
int lov_read_and_clear_async_rc(struct cl_object *clob)
{
struct lu_object *luobj;
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 869ef41b13ca..be6e9857ce2a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -45,6 +45,7 @@
#include "../include/lustre/lustre_user.h"
#include "lov_internal.h"
+#include "lov_cl_internal.h"
void lov_dump_lmm_common(int level, void *lmmp)
{
@@ -104,11 +105,9 @@ void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
* LOVs properly. For now lov_mds_md_size() just assumes one u64
* per stripe.
*/
-int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
+int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp,
+ struct lov_stripe_md *lsm)
{
- struct obd_device *obd = class_exp2obd(exp);
- struct lov_obd *lov = &obd->u.lov;
struct lov_mds_md_v1 *lmmv1;
struct lov_mds_md_v3 *lmmv3;
__u16 stripe_count;
@@ -148,16 +147,11 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
stripe_count = 0;
}
} else {
- /* No need to allocate more than maximum supported stripes.
- * Anyway, this is pretty inaccurate since ld_tgt_count now
- * represents max index and we should rely on the actual number
- * of OSTs instead
+ /*
+ * To calculate maximum easize by active targets at present,
+ * which is exactly the maximum easize to be seen by LOV
*/
- stripe_count = lov_mds_md_max_stripe_count(
- lov->lov_ocd.ocd_max_easize, lmm_magic);
-
- if (stripe_count > lov->desc.ld_tgt_count)
- stripe_count = lov->desc.ld_tgt_count;
+ stripe_count = lov->desc.ld_active_tgt_count;
}
/* XXX LOV STACKING call into osc for sizes */
@@ -225,6 +219,15 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
return lmm_size;
}
+int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
+ struct lov_stripe_md *lsm)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lov_obd *lov = &obd->u.lov;
+
+ return lov_obd_packmd(lov, lmmp, lsm);
+}
+
/* Find the max stripecount we should use */
__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
{
@@ -284,7 +287,7 @@ int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
spin_lock_init(&(*lsmp)->lsm_lock);
(*lsmp)->lsm_magic = magic;
(*lsmp)->lsm_stripe_count = stripe_count;
- (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
+ (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count;
(*lsmp)->lsm_pattern = pattern;
(*lsmp)->lsm_pool_name[0] = '\0';
(*lsmp)->lsm_layout_gen = 0;
@@ -372,16 +375,17 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
* the maximum number of OST indices which will fit in the user buffer.
* lmm_magic must be LOV_USER_MAGIC.
*/
-int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
+int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
struct lov_user_md __user *lump)
{
/*
* XXX huge struct allocated on stack.
*/
/* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
+ struct lov_obd *lov;
struct lov_user_md_v3 lum;
struct lov_mds_md *lmmk = NULL;
- int rc, lmm_size;
+ int rc, lmmk_size, lmm_size;
int lum_size;
mm_segment_t seg;
@@ -401,12 +405,13 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
lum_size = sizeof(struct lov_user_md_v1);
if (copy_from_user(&lum, lump, lum_size)) {
rc = -EFAULT;
- goto out_set;
+ goto out;
}
- if ((lum.lmm_magic != LOV_USER_MAGIC) &&
- (lum.lmm_magic != LOV_USER_MAGIC_V3)) {
+ if (lum.lmm_magic != LOV_USER_MAGIC_V1 &&
+ lum.lmm_magic != LOV_USER_MAGIC_V3 &&
+ lum.lmm_magic != LOV_USER_MAGIC_SPECIFIC) {
rc = -EINVAL;
- goto out_set;
+ goto out;
}
if (lum.lmm_stripe_count &&
@@ -415,11 +420,13 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
lum.lmm_stripe_count = lsm->lsm_stripe_count;
rc = copy_to_user(lump, &lum, lum_size);
rc = -EOVERFLOW;
- goto out_set;
+ goto out;
}
- rc = lov_packmd(exp, &lmmk, lsm);
+ lov = lu2lov_dev(obj->lo_cl.co_lu.lo_dev)->ld_lov;
+ rc = lov_obd_packmd(lov, &lmmk, lsm);
if (rc < 0)
- goto out_set;
+ goto out;
+ lmmk_size = rc;
lmm_size = rc;
rc = 0;
@@ -455,7 +462,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
lmm_size = lum_size;
} else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
rc = -EOVERFLOW;
- goto out_set;
+ goto out_free;
}
/*
* Have a difference between lov_mds_md & lov_user_md.
@@ -468,8 +475,9 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
if (copy_to_user(lump, lmmk, lmm_size))
rc = -EFAULT;
- obd_free_diskmd(exp, &lmmk);
-out_set:
+out_free:
+ kfree(lmmk);
+out:
set_fs(seg);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index c17026f14896..00bfabad78eb 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -65,7 +65,9 @@ static int lov_raid0_page_is_under_lock(const struct lu_env *env,
pgoff_t index = *max_index;
unsigned int pps; /* pages per stripe */
- CDEBUG(D_READA, "*max_index = %lu, nr = %d\n", index, r0->lo_nr);
+ CDEBUG(D_READA, DFID "*max_index = %lu, nr = %d\n",
+ PFID(lu_object_fid(lov2lu(loo))), index, r0->lo_nr);
+
if (index == 0) /* the page is not covered by any lock */
return 0;
@@ -80,7 +82,12 @@ static int lov_raid0_page_is_under_lock(const struct lu_env *env,
/* calculate the end of current stripe */
pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
- index = ((slice->cpl_index + pps) & ~(pps - 1)) - 1;
+ index = slice->cpl_index + pps - slice->cpl_index % pps - 1;
+
+ CDEBUG(D_READA, DFID "*max_index = %lu, index = %lu, pps = %u, stripe_size = %u, stripe no = %u, page index = %lu\n",
+ PFID(lu_object_fid(lov2lu(loo))), *max_index, index, pps,
+ loo->lo_lsm->lsm_stripe_size, lov_page_stripe(slice->cpl_page),
+ slice->cpl_index);
/* never exceed the end of the stripe */
*max_index = min_t(pgoff_t, *max_index, index);
@@ -122,6 +129,7 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff);
LASSERT(rc == 0);
+ lpg->lps_stripe = stripe;
cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_raid0_page_ops);
sub = lov_sub_get(env, lio, stripe);
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index 4c2d21729589..f8c8a361ef79 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -61,7 +61,7 @@ void lov_pool_putref(struct pool_desc *pool)
LASSERT(hlist_unhashed(&pool->pool_hash));
LASSERT(list_empty(&pool->pool_list));
LASSERT(!pool->pool_debugfs_entry);
- lov_ost_pool_free(&(pool->pool_obds));
+ lov_ost_pool_free(&pool->pool_obds);
kfree(pool);
}
}
@@ -92,7 +92,7 @@ static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, unsigned m
for (i = 0; i < LOV_MAXPOOLNAME; i++) {
if (poolname[i] == '\0')
break;
- result = (result << 4)^(result >> 28) ^ poolname[i];
+ result = (result << 4) ^ (result >> 28) ^ poolname[i];
}
return (result % mask);
}
@@ -260,7 +260,7 @@ static int pool_proc_show(struct seq_file *s, void *v)
tgt = pool_tgt(iter->pool, iter->idx);
up_read(&pool_tgt_rw_sem(iter->pool));
if (tgt)
- seq_printf(s, "%s\n", obd_uuid2str(&(tgt->ltd_uuid)));
+ seq_printf(s, "%s\n", obd_uuid2str(&tgt->ltd_uuid));
return 0;
}
@@ -400,7 +400,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
struct pool_desc *new_pool;
int rc;
- lov = &(obd->u.lov);
+ lov = &obd->u.lov;
if (strlen(poolname) > LOV_MAXPOOLNAME)
return -ENAMETOOLONG;
@@ -471,7 +471,7 @@ int lov_pool_del(struct obd_device *obd, char *poolname)
struct lov_obd *lov;
struct pool_desc *pool;
- lov = &(obd->u.lov);
+ lov = &obd->u.lov;
/* lookup and kill hash reference */
pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname);
@@ -503,7 +503,7 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
unsigned int lov_idx;
int rc;
- lov = &(obd->u.lov);
+ lov = &obd->u.lov;
pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
if (!pool)
@@ -517,7 +517,7 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
if (!lov->lov_tgts[lov_idx])
continue;
if (obd_uuid_equals(&ost_uuid,
- &(lov->lov_tgts[lov_idx]->ltd_uuid)))
+ &lov->lov_tgts[lov_idx]->ltd_uuid))
break;
}
/* test if ost found in lov */
@@ -547,7 +547,7 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
unsigned int lov_idx;
int rc = 0;
- lov = &(obd->u.lov);
+ lov = &obd->u.lov;
pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
if (!pool)
@@ -562,7 +562,7 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
continue;
if (obd_uuid_equals(&ost_uuid,
- &(lov->lov_tgts[lov_idx]->ltd_uuid)))
+ &lov->lov_tgts[lov_idx]->ltd_uuid))
break;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 4099b51f826e..09dcaf484c89 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -325,84 +325,6 @@ out_set:
return rc;
}
-int lov_fini_destroy_set(struct lov_request_set *set)
-{
- if (!set)
- return 0;
- LASSERT(set->set_exp);
- if (atomic_read(&set->set_completes)) {
- /* FIXME update qos data here */
- }
-
- lov_put_reqset(set);
-
- return 0;
-}
-
-int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
- struct obdo *src_oa, struct lov_stripe_md *lsm,
- struct obd_trans_info *oti,
- struct lov_request_set **reqset)
-{
- struct lov_request_set *set;
- struct lov_obd *lov = &exp->exp_obd->u.lov;
- int rc = 0, i;
-
- set = kzalloc(sizeof(*set), GFP_NOFS);
- if (!set)
- return -ENOMEM;
- lov_init_set(set);
-
- set->set_exp = exp;
- set->set_oi = oinfo;
- set->set_oi->oi_md = lsm;
- set->set_oi->oi_oa = src_oa;
- if (oti && src_oa->o_valid & OBD_MD_FLCOOKIE)
- set->set_cookies = oti->oti_logcookies;
-
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_oinfo *loi;
- struct lov_request *req;
-
- loi = lsm->lsm_oinfo[i];
- if (lov_oinfo_is_dummy(loi))
- continue;
-
- if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
- CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
- continue;
- }
-
- req = kzalloc(sizeof(*req), GFP_NOFS);
- if (!req) {
- rc = -ENOMEM;
- goto out_set;
- }
-
- req->rq_stripe = i;
- req->rq_idx = loi->loi_ost_idx;
-
- req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
- if (!req->rq_oi.oi_oa) {
- kfree(req);
- rc = -ENOMEM;
- goto out_set;
- }
- memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
- req->rq_oi.oi_oa->o_oi = loi->loi_oi;
- lov_set_add_req(req, set);
- }
- if (!set->set_count) {
- rc = -EIO;
- goto out_set;
- }
- *reqset = set;
- return rc;
-out_set:
- lov_fini_destroy_set(set);
- return rc;
-}
-
int lov_fini_setattr_set(struct lov_request_set *set)
{
int rc = 0;
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index fb2f2660b3e9..a2bac7a3b71b 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -98,8 +98,8 @@ static int lovsub_object_print(const struct lu_env *env, void *cookie,
return (*p)(env, cookie, "[%d]", los->lso_index);
}
-static int lovsub_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid)
+static int lovsub_attr_update(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned int valid)
{
struct lov_object *lov = cl2lovsub(obj)->lso_super;
@@ -119,7 +119,7 @@ static int lovsub_object_glimpse(const struct lu_env *env,
static const struct cl_object_operations lovsub_ops = {
.coo_page_init = lovsub_page_init,
.coo_lock_init = lovsub_lock_init,
- .coo_attr_set = lovsub_attr_set,
+ .coo_attr_update = lovsub_attr_update,
.coo_glimpse = lovsub_object_glimpse
};
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index 98d15fb247bc..fca9450de57c 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -43,11 +43,10 @@ static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
int len;
struct obd_device *dev = container_of(kobj, struct obd_device,
obd_kobj);
- struct client_obd *cli = &dev->u.cli;
+ __u32 max;
- spin_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight);
- spin_unlock(&cli->cl_loi_list_lock);
+ max = obd_get_max_rpcs_in_flight(&dev->u.cli);
+ len = sprintf(buf, "%u\n", max);
return len;
}
@@ -59,7 +58,6 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
{
struct obd_device *dev = container_of(kobj, struct obd_device,
obd_kobj);
- struct client_obd *cli = &dev->u.cli;
int rc;
unsigned long val;
@@ -67,12 +65,9 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
if (rc)
return rc;
- if (val < 1 || val > MDC_MAX_RIF_MAX)
- return -ERANGE;
-
- spin_lock(&cli->cl_loi_list_lock);
- cli->cl_max_rpcs_in_flight = val;
- spin_unlock(&cli->cl_loi_list_lock);
+ rc = obd_set_max_rpcs_in_flight(&dev->u.cli, val);
+ if (rc)
+ count = rc;
return count;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index 58f2841cabe4..f446c1c2584b 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -34,63 +34,57 @@
#define _MDC_INTERNAL_H
#include "../include/lustre_mdc.h"
-#include "../include/lustre_mds.h"
void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars);
void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid,
- __u64 valid, int ea_size, __u32 suppgid, int flags);
-void mdc_is_subdir_pack(struct ptlrpc_request *req, const struct lu_fid *pfid,
- const struct lu_fid *cfid, int flags);
+ __u64 valid, size_t ea_size, __u32 suppgid, u32 flags);
void mdc_swap_layouts_pack(struct ptlrpc_request *req,
struct md_op_data *op_data);
-void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, __u32 size,
+void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, size_t size,
const struct lu_fid *fid);
-void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
- struct md_op_data *data, int ea_size);
+void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
+ struct md_op_data *data, size_t ea_size);
void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len);
+ void *ea, size_t ealen, void *ea2, size_t ea2len);
void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const void *data, int datalen, __u32 mode, __u32 uid,
- __u32 gid, cfs_cap_t capability, __u64 rdev);
+ const void *data, size_t datalen, umode_t mode, uid_t uid,
+ gid_t gid, cfs_cap_t capability, __u64 rdev);
void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- __u32 mode, __u64 rdev, __u64 flags, const void *data,
- int datalen);
+ umode_t mode, __u64 rdev, __u64 flags, const void *data,
+ size_t datalen);
void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new, int newlen);
+ const char *old, size_t oldlen,
+ const char *new, size_t newlen);
void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
-int mdc_enter_request(struct client_obd *cli);
-void mdc_exit_request(struct client_obd *cli);
/* mdc/mdc_locks.c */
int mdc_set_lock_data(struct obd_export *exp,
- __u64 *lockh, void *data, __u64 *bits);
+ const struct lustre_handle *lockh,
+ void *data, __u64 *bits);
int mdc_null_inode(struct obd_export *exp, const struct lu_fid *fid);
-int mdc_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_iterator_t it, void *data);
-
int mdc_intent_lock(struct obd_export *exp,
- struct md_op_data *,
- void *lmm, int lmmsize,
- struct lookup_intent *, int,
+ struct md_op_data *op_data,
+ struct lookup_intent *it,
struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
__u64 extra_lock_flags);
+
int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
+ const ldlm_policy_data_t *policy,
struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, void *lmm, int lmmsize,
- struct ptlrpc_request **req, __u64 extra_lock_flags);
+ struct lustre_handle *lockh, __u64 extra_lock_flags);
int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
struct list_head *cancels, enum ldlm_mode mode,
__u64 bits);
/* mdc/mdc_request.c */
-int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data);
+int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
+ struct lu_fid *fid, struct md_op_data *op_data);
struct obd_client_handle;
int mdc_set_open_replay_data(struct obd_export *exp,
@@ -101,16 +95,17 @@ void mdc_commit_open(struct ptlrpc_request *req);
void mdc_replay_open(struct ptlrpc_request *req);
int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, int datalen, int mode, __u32 uid, __u32 gid,
- cfs_cap_t capability, __u64 rdev,
+ const void *data, size_t datalen, umode_t mode, uid_t uid,
+ gid_t gid, cfs_cap_t capability, __u64 rdev,
struct ptlrpc_request **request);
int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request);
int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new, int newlen,
+ const char *old, size_t oldlen,
+ const char *new, size_t newlen,
struct ptlrpc_request **request);
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len,
+ void *ea, size_t ealen, void *ea2, size_t ea2len,
struct ptlrpc_request **request, struct md_open_data **mod);
int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request);
@@ -138,4 +133,12 @@ static inline int mdc_prep_elc_req(struct obd_export *exp,
count);
}
+static inline unsigned long hash_x_index(__u64 hash, int hash64)
+{
+ if (BITS_PER_LONG == 32 && hash64)
+ hash >>= 32;
+ /* save hash 0 with hash 1 */
+ return ~0UL - (hash + !hash);
+}
+
#endif
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index 143bd7628572..aac7e04873e2 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -37,27 +37,12 @@
static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid)
{
- b->suppgid = suppgid;
- b->uid = from_kuid(&init_user_ns, current_uid());
- b->gid = from_kgid(&init_user_ns, current_gid());
- b->fsuid = from_kuid(&init_user_ns, current_fsuid());
- b->fsgid = from_kgid(&init_user_ns, current_fsgid());
- b->capability = cfs_curproc_cap_pack();
-}
-
-void mdc_is_subdir_pack(struct ptlrpc_request *req, const struct lu_fid *pfid,
- const struct lu_fid *cfid, int flags)
-{
- struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
- &RMF_MDT_BODY);
-
- if (pfid) {
- b->fid1 = *pfid;
- b->valid = OBD_MD_FLID;
- }
- if (cfid)
- b->fid2 = *cfid;
- b->flags = flags;
+ b->mbo_suppgid = suppgid;
+ b->mbo_uid = from_kuid(&init_user_ns, current_uid());
+ b->mbo_gid = from_kgid(&init_user_ns, current_gid());
+ b->mbo_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ b->mbo_fsgid = from_kgid(&init_user_ns, current_fsgid());
+ b->mbo_capability = cfs_curproc_cap_pack();
}
void mdc_swap_layouts_pack(struct ptlrpc_request *req,
@@ -67,43 +52,74 @@ void mdc_swap_layouts_pack(struct ptlrpc_request *req,
&RMF_MDT_BODY);
__mdc_pack_body(b, op_data->op_suppgids[0]);
- b->fid1 = op_data->op_fid1;
- b->fid2 = op_data->op_fid2;
- b->valid |= OBD_MD_FLID;
+ b->mbo_fid1 = op_data->op_fid1;
+ b->mbo_fid2 = op_data->op_fid2;
+ b->mbo_valid |= OBD_MD_FLID;
}
void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid,
- __u64 valid, int ea_size, __u32 suppgid, int flags)
+ __u64 valid, size_t ea_size, __u32 suppgid, u32 flags)
{
struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
&RMF_MDT_BODY);
- b->valid = valid;
- b->eadatasize = ea_size;
- b->flags = flags;
+ b->mbo_valid = valid;
+ b->mbo_eadatasize = ea_size;
+ b->mbo_flags = flags;
__mdc_pack_body(b, suppgid);
if (fid) {
- b->fid1 = *fid;
- b->valid |= OBD_MD_FLID;
+ b->mbo_fid1 = *fid;
+ b->mbo_valid |= OBD_MD_FLID;
}
}
-void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff,
- __u32 size, const struct lu_fid *fid)
+/**
+ * Pack a name (path component) into a request
+ *
+ * \param[in] req request
+ * \param[in] field request field (usually RMF_NAME)
+ * \param[in] name path component
+ * \param[in] name_len length of path component
+ *
+ * \a field must be present in \a req and of size \a name_len + 1.
+ *
+ * \a name must be '\0' terminated of length \a name_len and represent
+ * a single path component (not contain '/').
+ */
+static void mdc_pack_name(struct ptlrpc_request *req,
+ const struct req_msg_field *field,
+ const char *name, size_t name_len)
+{
+ size_t buf_size;
+ size_t cpy_len;
+ char *buf;
+
+ buf = req_capsule_client_get(&req->rq_pill, field);
+ buf_size = req_capsule_get_size(&req->rq_pill, field, RCL_CLIENT);
+
+ LASSERT(name && name_len && buf && buf_size == name_len + 1);
+
+ cpy_len = strlcpy(buf, name, buf_size);
+
+ LASSERT(cpy_len == name_len && lu_name_is_valid_2(buf, cpy_len));
+}
+
+void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, size_t size,
+ const struct lu_fid *fid)
{
struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
&RMF_MDT_BODY);
- b->fid1 = *fid;
- b->valid |= OBD_MD_FLID;
- b->size = pgoff; /* !! */
- b->nlink = size; /* !! */
+ b->mbo_fid1 = *fid;
+ b->mbo_valid |= OBD_MD_FLID;
+ b->mbo_size = pgoff; /* !! */
+ b->mbo_nlink = size; /* !! */
__mdc_pack_body(b, -1);
- b->mode = LUDA_FID | LUDA_TYPE;
+ b->mbo_mode = LUDA_FID | LUDA_TYPE;
}
/* packing of MDS records */
void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const void *data, int datalen, __u32 mode,
- __u32 uid, __u32 gid, cfs_cap_t cap_effective, __u64 rdev)
+ const void *data, size_t datalen, umode_t mode,
+ uid_t uid, gid_t gid, cfs_cap_t cap_effective, __u64 rdev)
{
struct mdt_rec_create *rec;
char *tmp;
@@ -130,22 +146,17 @@ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
rec->cr_bias = op_data->op_bias;
rec->cr_umask = current_umask();
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- LOGL0(op_data->op_name, op_data->op_namelen, tmp);
-
+ mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
if (data) {
tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
memcpy(tmp, data, datalen);
}
}
-static __u64 mds_pack_open_flags(__u64 flags, __u32 mode)
+static inline __u64 mds_pack_open_flags(__u64 flags)
{
__u64 cr_flags = (flags & (FMODE_READ | FMODE_WRITE |
- MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS |
- MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK |
- MDS_OPEN_BY_FID | MDS_OPEN_LEASE |
- MDS_OPEN_RELEASE));
+ MDS_OPEN_FL_INTERNAL));
if (flags & O_CREAT)
cr_flags |= MDS_OPEN_CREAT;
if (flags & O_EXCL)
@@ -171,8 +182,8 @@ static __u64 mds_pack_open_flags(__u64 flags, __u32 mode)
/* packing of MDS records */
void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- __u32 mode, __u64 rdev, __u64 flags, const void *lmm,
- int lmmlen)
+ umode_t mode, __u64 rdev, __u64 flags, const void *lmm,
+ size_t lmmlen)
{
struct mdt_rec_create *rec;
char *tmp;
@@ -190,7 +201,7 @@ void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
rec->cr_fid2 = op_data->op_fid2;
rec->cr_mode = mode;
- cr_flags = mds_pack_open_flags(flags, mode);
+ cr_flags = mds_pack_open_flags(flags);
rec->cr_rdev = rdev;
rec->cr_time = op_data->op_mod_time;
rec->cr_suppgid1 = op_data->op_suppgids[0];
@@ -200,8 +211,9 @@ void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
rec->cr_old_handle = op_data->op_handle;
if (op_data->op_name) {
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- LOGL0(op_data->op_name, op_data->op_namelen, tmp);
+ mdc_pack_name(req, &RMF_NAME, op_data->op_name,
+ op_data->op_namelen);
+
if (op_data->op_bias & MDS_CREATE_VOLATILE)
cr_flags |= MDS_OPEN_VOLATILE;
}
@@ -295,7 +307,7 @@ static void mdc_ioepoch_pack(struct mdt_ioepoch *epoch,
}
void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len)
+ void *ea, size_t ealen, void *ea2, size_t ea2len)
{
struct mdt_rec_setattr *rec;
struct mdt_ioepoch *epoch;
@@ -316,7 +328,7 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
if (!ea) { /* Remove LOV EA */
- lum->lmm_magic = LOV_USER_MAGIC_V1;
+ lum->lmm_magic = cpu_to_le32(LOV_USER_MAGIC_V1);
lum->lmm_stripe_size = 0;
lum->lmm_stripe_count = 0;
lum->lmm_stripe_offset = (typeof(lum->lmm_stripe_offset))(-1);
@@ -334,7 +346,6 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
{
struct mdt_rec_unlink *rec;
- char *tmp;
CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_unlink));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
@@ -352,15 +363,12 @@ void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
rec->ul_time = op_data->op_mod_time;
rec->ul_bias = op_data->op_bias;
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- LASSERT(tmp);
- LOGL0(op_data->op_name, op_data->op_namelen, tmp);
+ mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
}
void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
{
struct mdt_rec_link *rec;
- char *tmp;
CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_link));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
@@ -376,20 +384,21 @@ void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
rec->lk_time = op_data->op_mod_time;
rec->lk_bias = op_data->op_bias;
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- LOGL0(op_data->op_name, op_data->op_namelen, tmp);
+ mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
}
void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new, int newlen)
+ const char *old, size_t oldlen,
+ const char *new, size_t newlen)
{
struct mdt_rec_rename *rec;
- char *tmp;
CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_rename));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
/* XXX do something about time, uid, gid */
+ rec->rn_opcode = op_data->op_cli_flags & CLI_MIGRATE ?
+ REINT_MIGRATE : REINT_RENAME;
rec->rn_opcode = REINT_RENAME;
rec->rn_fsuid = op_data->op_fsuid;
rec->rn_fsgid = op_data->op_fsgid;
@@ -402,39 +411,34 @@ void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
rec->rn_mode = op_data->op_mode;
rec->rn_bias = op_data->op_bias;
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- LOGL0(old, oldlen, tmp);
+ mdc_pack_name(req, &RMF_NAME, old, oldlen);
- if (new) {
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SYMTGT);
- LOGL0(new, newlen, tmp);
- }
+ if (new)
+ mdc_pack_name(req, &RMF_SYMTGT, new, newlen);
}
-void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
- struct md_op_data *op_data, int ea_size)
+void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
+ struct md_op_data *op_data, size_t ea_size)
{
struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
&RMF_MDT_BODY);
- b->valid = valid;
+ b->mbo_valid = valid;
if (op_data->op_bias & MDS_CHECK_SPLIT)
- b->valid |= OBD_MD_FLCKSPLIT;
+ b->mbo_valid |= OBD_MD_FLCKSPLIT;
if (op_data->op_bias & MDS_CROSS_REF)
- b->valid |= OBD_MD_FLCROSSREF;
- b->eadatasize = ea_size;
- b->flags = flags;
+ b->mbo_valid |= OBD_MD_FLCROSSREF;
+ b->mbo_eadatasize = ea_size;
+ b->mbo_flags = flags;
__mdc_pack_body(b, op_data->op_suppgids[0]);
- b->fid1 = op_data->op_fid1;
- b->fid2 = op_data->op_fid2;
- b->valid |= OBD_MD_FLID;
-
- if (op_data->op_name) {
- char *tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
+ b->mbo_fid1 = op_data->op_fid1;
+ b->mbo_fid2 = op_data->op_fid2;
+ b->mbo_valid |= OBD_MD_FLID;
- LOGL0(op_data->op_name, op_data->op_namelen, tmp);
- }
+ if (op_data->op_name)
+ mdc_pack_name(req, &RMF_NAME, op_data->op_name,
+ op_data->op_namelen);
}
static void mdc_hsm_release_pack(struct ptlrpc_request *req,
@@ -482,67 +486,3 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
mdc_ioepoch_pack(epoch, op_data);
mdc_hsm_release_pack(req, op_data);
}
-
-static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
-{
- int rc;
-
- spin_lock(&cli->cl_loi_list_lock);
- rc = list_empty(&mcw->mcw_entry);
- spin_unlock(&cli->cl_loi_list_lock);
- return rc;
-};
-
-/* We record requests in flight in cli->cl_r_in_flight here.
- * There is only one write rpc possible in mdc anyway. If this to change
- * in the future - the code may need to be revisited.
- */
-int mdc_enter_request(struct client_obd *cli)
-{
- int rc = 0;
- struct mdc_cache_waiter mcw;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
- spin_lock(&cli->cl_loi_list_lock);
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
- init_waitqueue_head(&mcw.mcw_waitq);
- spin_unlock(&cli->cl_loi_list_lock);
- rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw),
- &lwi);
- if (rc) {
- spin_lock(&cli->cl_loi_list_lock);
- if (list_empty(&mcw.mcw_entry))
- cli->cl_r_in_flight--;
- list_del_init(&mcw.mcw_entry);
- spin_unlock(&cli->cl_loi_list_lock);
- }
- } else {
- cli->cl_r_in_flight++;
- spin_unlock(&cli->cl_loi_list_lock);
- }
- return rc;
-}
-
-void mdc_exit_request(struct client_obd *cli)
-{
- struct list_head *l, *tmp;
- struct mdc_cache_waiter *mcw;
-
- spin_lock(&cli->cl_loi_list_lock);
- cli->cl_r_in_flight--;
- list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- /* No free request slots anymore */
- break;
- }
-
- mcw = list_entry(l, struct mdc_cache_waiter, mcw_entry);
- list_del_init(&mcw->mcw_entry);
- cli->cl_r_in_flight++;
- wake_up(&mcw->mcw_waitq);
- }
- /* Empty waiting list? Decrease reqs in-flight number */
-
- spin_unlock(&cli->cl_loi_list_lock);
-}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index f48b58423307..f1f6c082fa42 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -93,8 +93,8 @@ int it_open_error(int phase, struct lookup_intent *it)
EXPORT_SYMBOL(it_open_error);
/* this must be called on a lockh that is known to have a referenced lock */
-int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
- __u64 *bits)
+int mdc_set_lock_data(struct obd_export *exp, const struct lustre_handle *lockh,
+ void *data, __u64 *bits)
{
struct ldlm_lock *lock;
struct inode *new_inode = data;
@@ -102,10 +102,10 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
if (bits)
*bits = 0;
- if (!*lockh)
+ if (!lustre_handle_is_used(lockh))
return 0;
- lock = ldlm_handle2lock((struct lustre_handle *)lockh);
+ lock = ldlm_handle2lock(lockh);
LASSERT(lock);
lock_res_and_lock(lock);
@@ -174,7 +174,7 @@ int mdc_null_inode(struct obd_export *exp,
fid_build_reg_res_name(fid, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if (!res)
+ if (IS_ERR(res))
return 0;
lock_res(res);
@@ -185,28 +185,6 @@ int mdc_null_inode(struct obd_export *exp,
return 0;
}
-/* find any ldlm lock of the inode in mdc
- * return 0 not find
- * 1 find one
- * < 0 error
- */
-int mdc_find_cbdata(struct obd_export *exp,
- const struct lu_fid *fid,
- ldlm_iterator_t it, void *data)
-{
- struct ldlm_res_id res_id;
- int rc = 0;
-
- fid_build_reg_res_name((struct lu_fid *)fid, &res_id);
- rc = ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace, &res_id,
- it, data);
- if (rc == LDLM_ITER_STOP)
- return 1;
- else if (rc == LDLM_ITER_CONTINUE)
- return 0;
- return rc;
-}
-
static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
{
/* Don't hold error requests for replay. */
@@ -240,24 +218,24 @@ static void mdc_realloc_openmsg(struct ptlrpc_request *req,
/* FIXME: remove this explicit offset. */
rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4,
- body->eadatasize);
+ body->mbo_eadatasize);
if (rc) {
CERROR("Can't enlarge segment %d size to %d\n",
- DLM_INTENT_REC_OFF + 4, body->eadatasize);
- body->valid &= ~OBD_MD_FLEASIZE;
- body->eadatasize = 0;
+ DLM_INTENT_REC_OFF + 4, body->mbo_eadatasize);
+ body->mbo_valid &= ~OBD_MD_FLEASIZE;
+ body->mbo_eadatasize = 0;
}
}
-static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
- struct lookup_intent *it,
- struct md_op_data *op_data,
- void *lmm, int lmmsize,
- void *cb_data)
+static struct ptlrpc_request *
+mdc_intent_open_pack(struct obd_export *exp, struct lookup_intent *it,
+ struct md_op_data *op_data)
{
struct ptlrpc_request *req;
struct obd_device *obddev = class_exp2obd(exp);
struct ldlm_intent *lit;
+ const void *lmm = op_data->op_data;
+ u32 lmmsize = op_data->op_data_size;
LIST_HEAD(cancels);
int count = 0;
int mode;
@@ -274,7 +252,7 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
else
mode = LCK_PR;
} else {
- if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
+ if (it->it_flags & (FMODE_WRITE | MDS_OPEN_TRUNC))
mode = LCK_CW;
else if (it->it_flags & __FMODE_EXEC)
mode = LCK_PR;
@@ -325,6 +303,9 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm,
lmmsize);
+ req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
+ obddev->u.cli.cl_max_mds_easize);
+
ptlrpc_request_set_replen(req);
return req;
}
@@ -336,7 +317,8 @@ mdc_intent_getxattr_pack(struct obd_export *exp,
{
struct ptlrpc_request *req;
struct ldlm_intent *lit;
- int rc, count = 0, maxdata;
+ int rc, count = 0;
+ u32 maxdata;
LIST_HEAD(cancels);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
@@ -421,7 +403,7 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
OBD_MD_MEA | OBD_MD_FLACL;
struct ldlm_intent *lit;
int rc;
- int easize;
+ u32 easize;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_INTENT_GETATTR);
@@ -526,7 +508,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
struct ldlm_reply *lockrep;
struct ldlm_lock *lock;
void *lvb_data = NULL;
- int lvb_len = 0;
+ u32 lvb_len = 0;
LASSERT(rc >= 0);
/* Similarly, if we're going to replay this request, we don't want to
@@ -605,7 +587,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
mdc_set_open_replay_data(NULL, NULL, it);
}
- if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
+ if ((body->mbo_valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
void *eadata;
mdc_update_max_ea_from_body(exp, body);
@@ -615,7 +597,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
* Eventually, obd_unpackmd() will check the contents.
*/
eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
- body->eadatasize);
+ body->mbo_eadatasize);
if (!eadata)
return -EPROTO;
@@ -623,7 +605,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
* lock
*/
lvb_data = eadata;
- lvb_len = body->eadatasize;
+ lvb_len = body->mbo_eadatasize;
/*
* We save the reply LOV EA in case we have to replay a
@@ -639,20 +621,20 @@ static int mdc_finish_enqueue(struct obd_export *exp,
if (req_capsule_get_size(pill, &RMF_EADATA,
RCL_CLIENT) <
- body->eadatasize)
+ body->mbo_eadatasize)
mdc_realloc_openmsg(req, body);
else
req_capsule_shrink(pill, &RMF_EADATA,
- body->eadatasize,
+ body->mbo_eadatasize,
RCL_CLIENT);
req_capsule_set_size(pill, &RMF_EADATA,
RCL_CLIENT,
- body->eadatasize);
+ body->mbo_eadatasize);
lmm = req_capsule_client_get(pill, &RMF_EADATA);
if (lmm)
- memcpy(lmm, eadata, body->eadatasize);
+ memcpy(lmm, eadata, body->mbo_eadatasize);
}
}
} else if (it->it_op & IT_LAYOUT) {
@@ -662,7 +644,8 @@ static int mdc_finish_enqueue(struct obd_export *exp,
lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER);
if (lvb_len > 0) {
lvb_data = req_capsule_server_sized_get(pill,
- &RMF_DLM_LVB, lvb_len);
+ &RMF_DLM_LVB,
+ lvb_len);
if (!lvb_data)
return -EPROTO;
}
@@ -705,9 +688,9 @@ static int mdc_finish_enqueue(struct obd_export *exp,
* we don't know in advance the file type.
*/
int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
+ const ldlm_policy_data_t *policy,
struct lookup_intent *it, struct md_op_data *op_data,
- struct lustre_handle *lockh, void *lmm, int lmmsize,
- struct ptlrpc_request **reqp, u64 extra_lock_flags)
+ struct lustre_handle *lockh, u64 extra_lock_flags)
{
static const ldlm_policy_data_t lookup_policy = {
.l_inodebits = { MDS_INODELOCK_LOOKUP }
@@ -721,9 +704,8 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
static const ldlm_policy_data_t getxattr_policy = {
.l_inodebits = { MDS_INODELOCK_XATTR }
};
- ldlm_policy_data_t const *policy = &lookup_policy;
struct obd_device *obddev = class_exp2obd(exp);
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req = NULL;
u64 flags, saved_flags = extra_lock_flags;
struct ldlm_res_id res_id;
int generation, resends = 0;
@@ -733,40 +715,32 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n",
einfo->ei_type);
-
fid_build_reg_res_name(&op_data->op_fid1, &res_id);
if (it) {
+ LASSERT(!policy);
+
saved_flags |= LDLM_FL_HAS_INTENT;
- if (it->it_op & (IT_UNLINK | IT_GETATTR | IT_READDIR))
+ if (it->it_op & (IT_OPEN | IT_UNLINK | IT_GETATTR | IT_READDIR))
policy = &update_policy;
else if (it->it_op & IT_LAYOUT)
policy = &layout_policy;
else if (it->it_op & (IT_GETXATTR | IT_SETXATTR))
policy = &getxattr_policy;
+ else
+ policy = &lookup_policy;
}
- LASSERT(!reqp);
-
generation = obddev->u.cli.cl_import->imp_generation;
resend:
flags = saved_flags;
if (!it) {
- /* The only way right now is FLOCK, in this case we hide flock
- * policy as lmm, but lmmsize is 0
- */
- LASSERT(lmm && lmmsize == 0);
+ /* The only way right now is FLOCK. */
LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n",
einfo->ei_type);
- policy = lmm;
res_id.name[3] = LDLM_FLOCK;
- req = NULL;
} else if (it->it_op & IT_OPEN) {
- req = mdc_intent_open_pack(exp, it, op_data, lmm, lmmsize,
- einfo->ei_cbdata);
- policy = &update_policy;
- einfo->ei_cbdata = NULL;
- lmm = NULL;
+ req = mdc_intent_open_pack(exp, it, op_data);
} else if (it->it_op & IT_UNLINK) {
req = mdc_intent_unlink_pack(exp, it, op_data);
} else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) {
@@ -806,7 +780,7 @@ resend:
*/
if (it) {
mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
- rc = mdc_enter_request(&obddev->u.cli);
+ rc = obd_get_request_slot(&obddev->u.cli);
if (rc != 0) {
mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
mdc_clear_replay_flag(req, 0);
@@ -834,13 +808,12 @@ resend:
return rc;
}
- mdc_exit_request(&obddev->u.cli);
+ obd_put_request_slot(&obddev->u.cli);
mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
if (rc < 0) {
- CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
- "%s: ldlm_cli_enqueue failed: rc = %d\n",
- obddev->obd_name, rc);
+ CDEBUG(D_INFO, "%s: ldlm_cli_enqueue failed: rc = %d\n",
+ obddev->obd_name, rc);
mdc_clear_replay_flag(req, rc);
ptlrpc_req_finished(req);
@@ -903,6 +876,9 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
LASSERT(request != LP_POISON);
LASSERT(request->rq_repmsg != LP_POISON);
+ if (it->it_op & IT_READDIR)
+ return 0;
+
if (!it_disposition(it, DISP_IT_EXECD)) {
/* The server failed before it even started executing the
* intent, i.e. because it couldn't unpack the request.
@@ -917,27 +893,6 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
LASSERT(mdt_body); /* mdc_enqueue checked */
- /* If we were revalidating a fid/name pair, mark the intent in
- * case we fail and get called again from lookup
- */
- if (fid_is_sane(&op_data->op_fid2) &&
- it->it_create_mode & M_CHECK_STALE &&
- it->it_op != IT_GETATTR) {
- /* Also: did we find the same inode? */
- /* sever can return one of two fids:
- * op_fid2 - new allocated fid - if file is created.
- * op_fid3 - existent fid - if file only open.
- * op_fid3 is saved in lmv_intent_open
- */
- if ((!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1)) &&
- (!lu_fid_eq(&op_data->op_fid3, &mdt_body->fid1))) {
- CDEBUG(D_DENTRY, "Found stale data "DFID"("DFID")/"DFID
- "\n", PFID(&op_data->op_fid2),
- PFID(&op_data->op_fid2), PFID(&mdt_body->fid1));
- return -ESTALE;
- }
- }
-
rc = it_open_error(DISP_LOOKUP_EXECD, it);
if (rc)
return rc;
@@ -980,10 +935,10 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
LDLM_DEBUG(lock, "matching against this");
- LASSERTF(fid_res_name_eq(&mdt_body->fid1,
+ LASSERTF(fid_res_name_eq(&mdt_body->mbo_fid1,
&lock->l_resource->lr_name),
"Lock res_id: "DLDLMRES", fid: "DFID"\n",
- PLDLMRES(lock->l_resource), PFID(&mdt_body->fid1));
+ PLDLMRES(lock->l_resource), PFID(&mdt_body->mbo_fid1));
LDLM_LOCK_PUT(lock);
memcpy(&old_lock, lockh, sizeof(*lockh));
@@ -998,8 +953,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
}
CDEBUG(D_DENTRY,
"D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
- op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op),
- it->it_status, it->it_disposition, rc);
+ (int)op_data->op_namelen, op_data->op_name,
+ ldlm_it2str(it->it_op), it->it_status, it->it_disposition, rc);
return rc;
}
@@ -1042,6 +997,9 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
MDS_INODELOCK_LOOKUP |
MDS_INODELOCK_PERM;
break;
+ case IT_READDIR:
+ policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
+ break;
case IT_LAYOUT:
policy.l_inodebits.bits = MDS_INODELOCK_LAYOUT;
break;
@@ -1095,10 +1053,8 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
* child lookup.
*/
int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
- void *lmm, int lmmsize, struct lookup_intent *it,
- int lookup_flags, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- __u64 extra_lock_flags)
+ struct lookup_intent *it, struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking, __u64 extra_lock_flags)
{
struct ldlm_enqueue_info einfo = {
.ei_type = LDLM_IBITS,
@@ -1112,14 +1068,14 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
LASSERT(it);
CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
- ", intent: %s flags %#Lo\n", op_data->op_namelen,
+ ", intent: %s flags %#Lo\n", (int)op_data->op_namelen,
op_data->op_name, PFID(&op_data->op_fid2),
PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
it->it_flags);
lockh.cookie = 0;
if (fid_is_sane(&op_data->op_fid2) &&
- (it->it_op & (IT_LOOKUP | IT_GETATTR))) {
+ (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_READDIR))) {
/* We could just return 1 immediately, but since we should only
* be called in revalidate_it if we already have a lock, let's
* verify that.
@@ -1135,13 +1091,13 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
/* For case if upper layer did not alloc fid, do it now. */
if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
- rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
+ rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc < 0) {
CERROR("Can't alloc new fid, rc %d\n", rc);
return rc;
}
}
- rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh, lmm, lmmsize, NULL,
+ rc = mdc_enqueue(exp, &einfo, NULL, it, op_data, &lockh,
extra_lock_flags);
if (rc < 0)
return rc;
@@ -1170,7 +1126,7 @@ static int mdc_intent_getattr_async_interpret(const struct lu_env *env,
obddev = class_exp2obd(exp);
- mdc_exit_request(&obddev->u.cli);
+ obd_put_request_slot(&obddev->u.cli);
if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GETATTR_ENQUEUE))
rc = -ETIMEDOUT;
@@ -1222,15 +1178,15 @@ int mdc_intent_getattr_async(struct obd_export *exp,
CDEBUG(D_DLMTRACE,
"name: %.*s in inode " DFID ", intent: %s flags %#Lo\n",
- op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
- ldlm_it2str(it->it_op), it->it_flags);
+ (int)op_data->op_namelen, op_data->op_name,
+ PFID(&op_data->op_fid1), ldlm_it2str(it->it_op), it->it_flags);
fid_build_reg_res_name(&op_data->op_fid1, &res_id);
req = mdc_intent_getattr_pack(exp, it, op_data);
if (IS_ERR(req))
return PTR_ERR(req);
- rc = mdc_enter_request(&obddev->u.cli);
+ rc = obd_get_request_slot(&obddev->u.cli);
if (rc != 0) {
ptlrpc_req_finished(req);
return rc;
@@ -1239,7 +1195,7 @@ int mdc_intent_getattr_async(struct obd_export *exp,
rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL,
0, LVB_T_NONE, &minfo->mi_lockh, 1);
if (rc < 0) {
- mdc_exit_request(&obddev->u.cli);
+ obd_put_request_slot(&obddev->u.cli);
ptlrpc_req_finished(req);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index 5dba2c813857..c921e471fa27 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -86,7 +86,7 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
fid_build_reg_res_name(fid, &res_id);
res = ldlm_resource_get(exp->exp_obd->obd_namespace,
NULL, &res_id, 0, 0);
- if (!res)
+ if (IS_ERR(res))
return 0;
LDLM_RESOURCE_ADDREF(res);
/* Initialize ibits lock policy. */
@@ -99,7 +99,7 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
}
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len,
+ void *ea, size_t ealen, void *ea2, size_t ea2len,
struct ptlrpc_request **request, struct md_open_data **mod)
{
LIST_HEAD(cancels);
@@ -110,11 +110,10 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
__u64 bits;
bits = MDS_INODELOCK_UPDATE;
- if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
+ if (op_data->op_attr.ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
bits |= MDS_INODELOCK_LOOKUP;
if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)) &&
- !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
+ (fid_is_sane(&op_data->op_fid1)))
count = mdc_resource_get_unused(exp, &op_data->op_fid1,
&cancels, LCK_EX, bits);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
@@ -177,8 +176,8 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- epoch->handle = body->handle;
- epoch->ioepoch = body->ioepoch;
+ epoch->handle = body->mbo_handle;
+ epoch->ioepoch = body->mbo_ioepoch;
req->rq_replay_cb = mdc_replay_open;
/** bug 3633, open may be committed and estale answer is not error */
} else if (rc == -ESTALE && (op_data->op_flags & MF_SOM_CHANGE)) {
@@ -197,9 +196,9 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
}
int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
- const void *data, int datalen, int mode, __u32 uid, __u32 gid,
- cfs_cap_t cap_effective, __u64 rdev,
- struct ptlrpc_request **request)
+ const void *data, size_t datalen, umode_t mode,
+ uid_t uid, gid_t gid, cfs_cap_t cap_effective,
+ __u64 rdev, struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
int level, rc;
@@ -214,11 +213,9 @@ int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
* mdc_fid_alloc() may return errno 1 in case of switch to new
* sequence, handle this.
*/
- rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
- if (rc < 0) {
- CERROR("Can't alloc new fid, rc %d\n", rc);
+ rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
+ if (rc < 0)
return rc;
- }
}
rebuild:
@@ -307,14 +304,12 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
LASSERT(!req);
if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
- (fid_is_sane(&op_data->op_fid1)) &&
- !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
+ (fid_is_sane(&op_data->op_fid1)))
count = mdc_resource_get_unused(exp, &op_data->op_fid1,
&cancels, LCK_EX,
MDS_INODELOCK_UPDATE);
if ((op_data->op_flags & MF_MDC_CANCEL_FID3) &&
- (fid_is_sane(&op_data->op_fid3)) &&
- !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
+ (fid_is_sane(&op_data->op_fid3)))
count += mdc_resource_get_unused(exp, &op_data->op_fid3,
&cancels, LCK_EX,
MDS_INODELOCK_FULL);
@@ -394,7 +389,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
}
int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
- const char *old, int oldlen, const char *new, int newlen,
+ const char *old, size_t oldlen, const char *new, size_t newlen,
struct ptlrpc_request **request)
{
LIST_HEAD(cancels);
@@ -431,7 +426,8 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
}
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, oldlen + 1);
- req_capsule_set_size(&req->rq_pill, &RMF_SYMTGT, RCL_CLIENT, newlen+1);
+ req_capsule_set_size(&req->rq_pill, &RMF_SYMTGT, RCL_CLIENT,
+ newlen + 1);
rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
if (rc) {
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 542801f04b0d..f56ea643f9bf 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -39,7 +39,9 @@
# include <linux/utsname.h>
#include "../include/lustre_acl.h"
+#include "../include/lustre/lustre_ioctl.h"
#include "../include/obd_class.h"
+#include "../include/lustre_lmv.h"
#include "../include/lustre_fid.h"
#include "../include/lprocfs_status.h"
#include "../include/lustre_param.h"
@@ -57,16 +59,16 @@ static inline int mdc_queue_wait(struct ptlrpc_request *req)
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
int rc;
- /* mdc_enter_request() ensures that this client has no more
+ /* obd_get_request_slot() ensures that this client has no more
* than cl_max_rpcs_in_flight RPCs simultaneously inf light
* against an MDT.
*/
- rc = mdc_enter_request(cli);
+ rc = obd_get_request_slot(cli);
if (rc != 0)
return rc;
rc = ptlrpc_queue_wait(req);
- mdc_exit_request(cli);
+ obd_put_request_slot(cli);
return rc;
}
@@ -98,7 +100,7 @@ static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid)
goto out;
}
- *rootfid = body->fid1;
+ *rootfid = body->mbo_fid1;
CDEBUG(D_NET,
"root fid="DFID", last_committed=%llu\n",
PFID(rootfid),
@@ -136,12 +138,12 @@ static int mdc_getattr_common(struct obd_export *exp,
if (!body)
return -EPROTO;
- CDEBUG(D_NET, "mode: %o\n", body->mode);
+ CDEBUG(D_NET, "mode: %o\n", body->mbo_mode);
mdc_update_max_ea_from_body(exp, body);
- if (body->eadatasize != 0) {
+ if (body->mbo_eadatasize != 0) {
eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
- body->eadatasize);
+ body->mbo_eadatasize);
if (!eadata)
return -EPROTO;
}
@@ -230,32 +232,6 @@ static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
-static int mdc_is_subdir(struct obd_export *exp,
- const struct lu_fid *pfid,
- const struct lu_fid *cfid,
- struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int rc;
-
- *request = NULL;
- req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
- &RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION,
- MDS_IS_SUBDIR);
- if (!req)
- return -ENOMEM;
-
- mdc_is_subdir_pack(req, pfid, cfid, 0);
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc && rc != -EREMOTE)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
static int mdc_xattr_common(struct obd_export *exp,
const struct req_format *fmt,
const struct lu_fid *fid,
@@ -397,15 +373,15 @@ static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md)
void *buf;
int rc;
- if (!body->aclsize)
+ if (!body->mbo_aclsize)
return 0;
- buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->aclsize);
+ buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->mbo_aclsize);
if (!buf)
return -EPROTO;
- acl = posix_acl_from_xattr(&init_user_ns, buf, body->aclsize);
+ acl = posix_acl_from_xattr(&init_user_ns, buf, body->mbo_aclsize);
if (!acl)
return 0;
@@ -443,24 +419,24 @@ static int mdc_get_lustre_md(struct obd_export *exp,
md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
- if (md->body->valid & OBD_MD_FLEASIZE) {
+ if (md->body->mbo_valid & OBD_MD_FLEASIZE) {
int lmmsize;
struct lov_mds_md *lmm;
- if (!S_ISREG(md->body->mode)) {
+ if (!S_ISREG(md->body->mbo_mode)) {
CDEBUG(D_INFO,
"OBD_MD_FLEASIZE set, should be a regular file, but is not\n");
rc = -EPROTO;
goto out;
}
- if (md->body->eadatasize == 0) {
+ if (md->body->mbo_eadatasize == 0) {
CDEBUG(D_INFO,
"OBD_MD_FLEASIZE set, but eadatasize 0\n");
rc = -EPROTO;
goto out;
}
- lmmsize = md->body->eadatasize;
+ lmmsize = md->body->mbo_eadatasize;
lmm = req_capsule_server_sized_get(pill, &RMF_MDT_MD, lmmsize);
if (!lmm) {
rc = -EPROTO;
@@ -471,7 +447,7 @@ static int mdc_get_lustre_md(struct obd_export *exp,
if (rc < 0)
goto out;
- if (rc < sizeof(*md->lsm)) {
+ if (rc < (typeof(rc))sizeof(*md->lsm)) {
CDEBUG(D_INFO,
"lsm size too small: rc < sizeof (*md->lsm) (%d < %d)\n",
rc, (int)sizeof(*md->lsm));
@@ -479,24 +455,24 @@ static int mdc_get_lustre_md(struct obd_export *exp,
goto out;
}
- } else if (md->body->valid & OBD_MD_FLDIREA) {
+ } else if (md->body->mbo_valid & OBD_MD_FLDIREA) {
int lmvsize;
struct lov_mds_md *lmv;
- if (!S_ISDIR(md->body->mode)) {
+ if (!S_ISDIR(md->body->mbo_mode)) {
CDEBUG(D_INFO,
"OBD_MD_FLDIREA set, should be a directory, but is not\n");
rc = -EPROTO;
goto out;
}
- if (md->body->eadatasize == 0) {
+ if (md->body->mbo_eadatasize == 0) {
CDEBUG(D_INFO,
"OBD_MD_FLDIREA is set, but eadatasize 0\n");
return -EPROTO;
}
- if (md->body->valid & OBD_MD_MEA) {
- lmvsize = md->body->eadatasize;
+ if (md->body->mbo_valid & OBD_MD_MEA) {
+ lmvsize = md->body->mbo_eadatasize;
lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
lmvsize);
if (!lmv) {
@@ -504,15 +480,15 @@ static int mdc_get_lustre_md(struct obd_export *exp,
goto out;
}
- rc = obd_unpackmd(md_exp, (void *)&md->mea, lmv,
+ rc = obd_unpackmd(md_exp, (void *)&md->lmv, lmv,
lmvsize);
if (rc < 0)
goto out;
- if (rc < sizeof(*md->mea)) {
+ if (rc < (typeof(rc))sizeof(*md->lmv)) {
CDEBUG(D_INFO,
- "size too small: rc < sizeof(*md->mea) (%d < %d)\n",
- rc, (int)sizeof(*md->mea));
+ "size too small: rc < sizeof(*md->lmv) (%d < %d)\n",
+ rc, (int)sizeof(*md->lmv));
rc = -EPROTO;
goto out;
}
@@ -520,12 +496,12 @@ static int mdc_get_lustre_md(struct obd_export *exp,
}
rc = 0;
- if (md->body->valid & OBD_MD_FLACL) {
+ if (md->body->mbo_valid & OBD_MD_FLACL) {
/* for ACL, it's possible that FLACL is set but aclsize is zero.
* only when aclsize != 0 there's an actual segment for ACL
* in reply buffer.
*/
- if (md->body->aclsize) {
+ if (md->body->mbo_aclsize) {
rc = mdc_unpack_acl(req, md);
if (rc)
goto out;
@@ -580,9 +556,9 @@ void mdc_replay_open(struct ptlrpc_request *req)
file_fh = &och->och_fh;
CDEBUG(D_HA, "updating handle from %#llx to %#llx\n",
- file_fh->cookie, body->handle.cookie);
+ file_fh->cookie, body->mbo_handle.cookie);
old = *file_fh;
- *file_fh = body->handle;
+ *file_fh = body->mbo_handle;
}
close_req = mod->mod_close_req;
if (close_req) {
@@ -597,7 +573,7 @@ void mdc_replay_open(struct ptlrpc_request *req)
if (och)
LASSERT(!memcmp(&old, &epoch->handle, sizeof(old)));
DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
- epoch->handle = body->handle;
+ epoch->handle = body->mbo_handle;
}
}
@@ -679,11 +655,11 @@ int mdc_set_open_replay_data(struct obd_export *exp,
spin_unlock(&open_req->rq_lock);
}
- rec->cr_fid2 = body->fid1;
- rec->cr_ioepoch = body->ioepoch;
- rec->cr_old_handle.cookie = body->handle.cookie;
+ rec->cr_fid2 = body->mbo_fid1;
+ rec->cr_ioepoch = body->mbo_ioepoch;
+ rec->cr_old_handle.cookie = body->mbo_handle.cookie;
open_req->rq_replay_cb = mdc_replay_open;
- if (!fid_is_sane(&body->fid1)) {
+ if (!fid_is_sane(&body->mbo_fid1)) {
DEBUG_REQ(D_ERROR, open_req,
"Saving replay request with insane fid");
LBUG();
@@ -701,9 +677,15 @@ static void mdc_free_open(struct md_open_data *mod)
imp_connect_disp_stripe(mod->mod_open_req->rq_import))
committed = 1;
- LASSERT(mod->mod_open_req->rq_replay == 0);
-
- DEBUG_REQ(D_RPCTRACE, mod->mod_open_req, "free open request\n");
+ /*
+ * No reason to asssert here if the open request has
+ * rq_replay == 1. It means that mdc_close failed, and
+ * close request wasn`t sent. It is not fatal to client.
+ * The worst thing is eviction if the client gets open lock
+ */
+ DEBUG_REQ(D_RPCTRACE, mod->mod_open_req,
+ "free open request rq_replay = %d\n",
+ mod->mod_open_req->rq_replay);
ptlrpc_request_committed(mod->mod_open_req, committed);
if (mod->mod_close_req)
@@ -744,7 +726,7 @@ static void mdc_close_handle_reply(struct ptlrpc_request *req,
epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
epoch->flags |= MF_SOM_AU;
- if (repbody->valid & OBD_MD_FLGETATTRLOCK)
+ if (repbody->mbo_valid & OBD_MD_FLGETATTRLOCK)
op_data->op_flags |= MF_GETATTR_LOCK;
}
}
@@ -763,7 +745,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
req_fmt = &RQF_MDS_RELEASE_CLOSE;
/* allocate a FID for volatile file */
- rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
+ rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc < 0) {
CERROR("%s: "DFID" failed to allocate FID: %d\n",
obd->obd_name, PFID(&op_data->op_fid1), rc);
@@ -773,22 +755,10 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
}
*request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- /* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
- * portal whose threads are not taking any DLM locks and are therefore
- * always progressing
- */
- req->rq_request_portal = MDS_READPAGE_PORTAL;
- ptlrpc_at_set_req_timeout(req);
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDC_CLOSE))
+ req = NULL;
+ else
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
/* Ensure that this close's handle is fixed up during replay. */
if (likely(mod)) {
@@ -809,6 +779,29 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
CDEBUG(D_HA,
"couldn't find open req; expecting close error\n");
}
+ if (!req) {
+ /*
+ * TODO: repeat close after errors
+ */
+ CWARN("%s: close of FID "DFID" failed, file reference will be dropped when this client unmounts or is evicted\n",
+ obd->obd_name, PFID(&op_data->op_fid1));
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
+ if (rc) {
+ ptlrpc_request_free(req);
+ goto out;
+ }
+
+ /*
+ * To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
+ * portal whose threads are not taking any DLM locks and are therefore
+ * always progressing
+ */
+ req->rq_request_portal = MDS_READPAGE_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
mdc_close_pack(req, op_data);
@@ -854,6 +847,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
}
}
+out:
if (mod) {
if (rc != 0)
mod->mod_close_req = NULL;
@@ -936,16 +930,17 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
-static int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
- struct page **pages, struct ptlrpc_request **request)
+static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
+ u64 offset, struct page **pages, int npages,
+ struct ptlrpc_request **request)
{
- struct ptlrpc_request *req;
struct ptlrpc_bulk_desc *desc;
- int i;
- wait_queue_head_t waitq;
- int resends = 0;
- struct l_wait_info lwi;
- int rc;
+ struct ptlrpc_request *req;
+ wait_queue_head_t waitq;
+ struct l_wait_info lwi;
+ int resends = 0;
+ int rc;
+ int i;
*request = NULL;
init_waitqueue_head(&waitq);
@@ -964,7 +959,7 @@ restart_bulk:
req->rq_request_portal = MDS_READPAGE_PORTAL;
ptlrpc_at_set_req_timeout(req);
- desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, 1, BULK_PUT_SINK,
+ desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK,
MDS_BULK_PORTAL);
if (!desc) {
ptlrpc_request_free(req);
@@ -972,12 +967,10 @@ restart_bulk:
}
/* NB req now owns desc and will free it when it gets freed */
- for (i = 0; i < op_data->op_npages; i++)
+ for (i = 0; i < npages; i++)
ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
- mdc_readdir_pack(req, op_data->op_offset,
- PAGE_SIZE * op_data->op_npages,
- &op_data->op_fid1);
+ mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
@@ -988,11 +981,12 @@ restart_bulk:
resends++;
if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
- CERROR("too many resend retries, returning error\n");
+ CERROR("%s: too many resend retries: rc = %d\n",
+ exp->exp_obd->obd_name, -EIO);
return -EIO;
}
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends),
- NULL, NULL, NULL);
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL,
+ NULL);
l_wait_event(waitq, 0, &lwi);
goto restart_bulk;
@@ -1006,9 +1000,9 @@ restart_bulk:
}
if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
- CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
- req->rq_bulk->bd_nob_transferred,
- PAGE_SIZE * op_data->op_npages);
+ CERROR("%s: unexpected bytes transferred: %d (%ld expected)\n",
+ exp->exp_obd->obd_name, req->rq_bulk->bd_nob_transferred,
+ PAGE_SIZE * npages);
ptlrpc_req_finished(req);
return -EPROTO;
}
@@ -1017,6 +1011,453 @@ restart_bulk:
return 0;
}
+static void mdc_release_page(struct page *page, int remove)
+{
+ if (remove) {
+ lock_page(page);
+ if (likely(page->mapping))
+ truncate_complete_page(page->mapping, page);
+ unlock_page(page);
+ }
+ put_page(page);
+}
+
+static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash,
+ __u64 *start, __u64 *end, int hash64)
+{
+ /*
+ * Complement of hash is used as an index so that
+ * radix_tree_gang_lookup() can be used to find a page with starting
+ * hash _smaller_ than one we are looking for.
+ */
+ unsigned long offset = hash_x_index(*hash, hash64);
+ struct page *page;
+ int found;
+
+ spin_lock_irq(&mapping->tree_lock);
+ found = radix_tree_gang_lookup(&mapping->page_tree,
+ (void **)&page, offset, 1);
+ if (found > 0 && !radix_tree_exceptional_entry(page)) {
+ struct lu_dirpage *dp;
+
+ get_page(page);
+ spin_unlock_irq(&mapping->tree_lock);
+ /*
+ * In contrast to find_lock_page() we are sure that directory
+ * page cannot be truncated (while DLM lock is held) and,
+ * hence, can avoid restart.
+ *
+ * In fact, page cannot be locked here at all, because
+ * mdc_read_page_remote does synchronous io.
+ */
+ wait_on_page_locked(page);
+ if (PageUptodate(page)) {
+ dp = kmap(page);
+ if (BITS_PER_LONG == 32 && hash64) {
+ *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
+ *end = le64_to_cpu(dp->ldp_hash_end) >> 32;
+ *hash = *hash >> 32;
+ } else {
+ *start = le64_to_cpu(dp->ldp_hash_start);
+ *end = le64_to_cpu(dp->ldp_hash_end);
+ }
+ if (unlikely(*start == 1 && *hash == 0))
+ *hash = *start;
+ else
+ LASSERTF(*start <= *hash, "start = %#llx,end = %#llx,hash = %#llx\n",
+ *start, *end, *hash);
+ CDEBUG(D_VFSTRACE, "offset %lx [%#llx %#llx], hash %#llx\n",
+ offset, *start, *end, *hash);
+ if (*hash > *end) {
+ kunmap(page);
+ mdc_release_page(page, 0);
+ page = NULL;
+ } else if (*end != *start && *hash == *end) {
+ /*
+ * upon hash collision, remove this page,
+ * otherwise put page reference, and
+ * mdc_read_page_remote() will issue RPC to
+ * fetch the page we want.
+ */
+ kunmap(page);
+ mdc_release_page(page,
+ le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
+ page = NULL;
+ }
+ } else {
+ put_page(page);
+ page = ERR_PTR(-EIO);
+ }
+ } else {
+ spin_unlock_irq(&mapping->tree_lock);
+ page = NULL;
+ }
+ return page;
+}
+
+/*
+ * Adjust a set of pages, each page containing an array of lu_dirpages,
+ * so that each page can be used as a single logical lu_dirpage.
+ *
+ * A lu_dirpage is laid out as follows, where s = ldp_hash_start,
+ * e = ldp_hash_end, f = ldp_flags, p = padding, and each "ent" is a
+ * struct lu_dirent. It has size up to LU_PAGE_SIZE. The ldp_hash_end
+ * value is used as a cookie to request the next lu_dirpage in a
+ * directory listing that spans multiple pages (two in this example):
+ * ________
+ * | |
+ * .|--------v------- -----.
+ * |s|e|f|p|ent|ent| ... |ent|
+ * '--|-------------- -----' Each PAGE contains a single
+ * '------. lu_dirpage.
+ * .---------v------- -----.
+ * |s|e|f|p|ent| 0 | ... | 0 |
+ * '----------------- -----'
+ *
+ * However, on hosts where the native VM page size (PAGE_SIZE) is
+ * larger than LU_PAGE_SIZE, a single host page may contain multiple
+ * lu_dirpages. After reading the lu_dirpages from the MDS, the
+ * ldp_hash_end of the first lu_dirpage refers to the one immediately
+ * after it in the same PAGE (arrows simplified for brevity, but
+ * in general e0==s1, e1==s2, etc.):
+ *
+ * .-------------------- -----.
+ * |s0|e0|f0|p|ent|ent| ... |ent|
+ * |---v---------------- -----|
+ * |s1|e1|f1|p|ent|ent| ... |ent|
+ * |---v---------------- -----| Here, each PAGE contains
+ * ... multiple lu_dirpages.
+ * |---v---------------- -----|
+ * |s'|e'|f'|p|ent|ent| ... |ent|
+ * '---|---------------- -----'
+ * v
+ * .----------------------------.
+ * | next PAGE |
+ *
+ * This structure is transformed into a single logical lu_dirpage as follows:
+ *
+ * - Replace e0 with e' so the request for the next lu_dirpage gets the page
+ * labeled 'next PAGE'.
+ *
+ * - Copy the LDF_COLLIDE flag from f' to f0 to correctly reflect whether
+ * a hash collision with the next page exists.
+ *
+ * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
+ * to the first entry of the next lu_dirpage.
+ */
+#if PAGE_SIZE > LU_PAGE_SIZE
+static void mdc_adjust_dirpages(struct page **pages, int cfs_pgs, int lu_pgs)
+{
+ int i;
+
+ for (i = 0; i < cfs_pgs; i++) {
+ struct lu_dirpage *dp = kmap(pages[i]);
+ __u64 hash_end = le64_to_cpu(dp->ldp_hash_end);
+ __u32 flags = le32_to_cpu(dp->ldp_flags);
+ struct lu_dirpage *first = dp;
+ struct lu_dirent *end_dirent = NULL;
+ struct lu_dirent *ent;
+
+ while (--lu_pgs > 0) {
+ ent = lu_dirent_start(dp);
+ for (end_dirent = ent; ent;
+ end_dirent = ent, ent = lu_dirent_next(ent));
+
+ /* Advance dp to next lu_dirpage. */
+ dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
+
+ /* Check if we've reached the end of the CFS_PAGE. */
+ if (!((unsigned long)dp & ~PAGE_MASK))
+ break;
+
+ /* Save the hash and flags of this lu_dirpage. */
+ hash_end = le64_to_cpu(dp->ldp_hash_end);
+ flags = le32_to_cpu(dp->ldp_flags);
+
+ /* Check if lu_dirpage contains no entries. */
+ if (!end_dirent)
+ break;
+
+ /*
+ * Enlarge the end entry lde_reclen from 0 to
+ * first entry of next lu_dirpage.
+ */
+ LASSERT(!le16_to_cpu(end_dirent->lde_reclen));
+ end_dirent->lde_reclen =
+ cpu_to_le16((char *)(dp->ldp_entries) -
+ (char *)end_dirent);
+ }
+
+ first->ldp_hash_end = hash_end;
+ first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
+ first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
+
+ kunmap(pages[i]);
+ }
+ LASSERTF(lu_pgs == 0, "left = %d", lu_pgs);
+}
+#else
+#define mdc_adjust_dirpages(pages, cfs_pgs, lu_pgs) do {} while (0)
+#endif /* PAGE_SIZE > LU_PAGE_SIZE */
+
+/* parameters for readdir page */
+struct readpage_param {
+ struct md_op_data *rp_mod;
+ __u64 rp_off;
+ int rp_hash64;
+ struct obd_export *rp_exp;
+ struct md_callback *rp_cb;
+};
+
+/**
+ * Read pages from server.
+ *
+ * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
+ * a header lu_dirpage which describes the start/end hash, and whether this
+ * page is empty (contains no dir entry) or hash collide with next page.
+ * After client receives reply, several pages will be integrated into dir page
+ * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the
+ * lu_dirpage for this integrated page will be adjusted.
+ **/
+static int mdc_read_page_remote(void *data, struct page *page0)
+{
+ struct readpage_param *rp = data;
+ struct page **page_pool;
+ struct page *page;
+ struct lu_dirpage *dp;
+ int rd_pgs = 0; /* number of pages read actually */
+ int npages;
+ struct md_op_data *op_data = rp->rp_mod;
+ struct ptlrpc_request *req;
+ int max_pages = op_data->op_max_pages;
+ struct inode *inode;
+ struct lu_fid *fid;
+ int i;
+ int rc;
+
+ LASSERT(max_pages > 0 && max_pages <= PTLRPC_MAX_BRW_PAGES);
+ inode = op_data->op_data;
+ fid = &op_data->op_fid1;
+ LASSERT(inode);
+
+ page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
+ if (page_pool) {
+ page_pool[0] = page0;
+ } else {
+ page_pool = &page0;
+ max_pages = 1;
+ }
+
+ for (npages = 1; npages < max_pages; npages++) {
+ page = page_cache_alloc_cold(inode->i_mapping);
+ if (!page)
+ break;
+ page_pool[npages] = page;
+ }
+
+ rc = mdc_getpage(rp->rp_exp, fid, rp->rp_off, page_pool, npages, &req);
+ if (!rc) {
+ int lu_pgs = req->rq_bulk->bd_nob_transferred;
+
+ rd_pgs = (req->rq_bulk->bd_nob_transferred +
+ PAGE_SIZE - 1) >> PAGE_SHIFT;
+ lu_pgs >>= LU_PAGE_SHIFT;
+ LASSERT(!(req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
+
+ CDEBUG(D_INODE, "read %d(%d) pages\n", rd_pgs, lu_pgs);
+
+ mdc_adjust_dirpages(page_pool, rd_pgs, lu_pgs);
+
+ SetPageUptodate(page0);
+ }
+
+ unlock_page(page0);
+ ptlrpc_req_finished(req);
+ CDEBUG(D_CACHE, "read %d/%d pages\n", rd_pgs, npages);
+ for (i = 1; i < npages; i++) {
+ unsigned long offset;
+ __u64 hash;
+ int ret;
+
+ page = page_pool[i];
+
+ if (rc < 0 || i >= rd_pgs) {
+ put_page(page);
+ continue;
+ }
+
+ SetPageUptodate(page);
+
+ dp = kmap(page);
+ hash = le64_to_cpu(dp->ldp_hash_start);
+ kunmap(page);
+
+ offset = hash_x_index(hash, rp->rp_hash64);
+
+ prefetchw(&page->flags);
+ ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
+ GFP_KERNEL);
+ if (!ret)
+ unlock_page(page);
+ else
+ CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: rc = %d\n",
+ offset, ret);
+ put_page(page);
+ }
+
+ if (page_pool != &page0)
+ kfree(page_pool);
+
+ return rc;
+}
+
+/**
+ * Read dir page from cache first, if it can not find it, read it from
+ * server and add into the cache.
+ *
+ * \param[in] exp MDC export
+ * \param[in] op_data client MD stack parameters, transferring parameters
+ * between different layers on client MD stack.
+ * \param[in] cb_op callback required for ldlm lock enqueue during
+ * read page
+ * \param[in] hash_offset the hash offset of the page to be read
+ * \param[in] ppage the page to be read
+ *
+ * retval = 0 get the page successfully
+ * errno(<0) get the page failed
+ */
+static int mdc_read_page(struct obd_export *exp, struct md_op_data *op_data,
+ struct md_callback *cb_op, __u64 hash_offset,
+ struct page **ppage)
+{
+ struct lookup_intent it = { .it_op = IT_READDIR };
+ struct page *page;
+ struct inode *dir = op_data->op_data;
+ struct address_space *mapping;
+ struct lu_dirpage *dp;
+ __u64 start = 0;
+ __u64 end = 0;
+ struct lustre_handle lockh;
+ struct ptlrpc_request *enq_req = NULL;
+ struct readpage_param rp_param;
+ int rc;
+
+ *ppage = NULL;
+
+ LASSERT(dir);
+ mapping = dir->i_mapping;
+
+ rc = mdc_intent_lock(exp, op_data, &it, &enq_req,
+ cb_op->md_blocking_ast, 0);
+ if (enq_req)
+ ptlrpc_req_finished(enq_req);
+
+ if (rc < 0) {
+ CERROR("%s: "DFID" lock enqueue fails: rc = %d\n",
+ exp->exp_obd->obd_name, PFID(&op_data->op_fid1), rc);
+ return rc;
+ }
+
+ rc = 0;
+ lockh.cookie = it.it_lock_handle;
+ mdc_set_lock_data(exp, &lockh, dir, NULL);
+
+ rp_param.rp_off = hash_offset;
+ rp_param.rp_hash64 = op_data->op_cli_flags & CLI_HASH64;
+ page = mdc_page_locate(mapping, &rp_param.rp_off, &start, &end,
+ rp_param.rp_hash64);
+ if (IS_ERR(page)) {
+ CDEBUG(D_INFO, "%s: dir page locate: " DFID " at %llu: rc %ld\n",
+ exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
+ rp_param.rp_off, PTR_ERR(page));
+ rc = PTR_ERR(page);
+ goto out_unlock;
+ } else if (page) {
+ /*
+ * XXX nikita: not entirely correct handling of a corner case:
+ * suppose hash chain of entries with hash value HASH crosses
+ * border between pages P0 and P1. First both P0 and P1 are
+ * cached, seekdir() is called for some entry from the P0 part
+ * of the chain. Later P0 goes out of cache. telldir(HASH)
+ * happens and finds P1, as it starts with matching hash
+ * value. Remaining entries from P0 part of the chain are
+ * skipped. (Is that really a bug?)
+ *
+ * Possible solutions: 0. don't cache P1 is such case, handle
+ * it as an "overflow" page. 1. invalidate all pages at
+ * once. 2. use HASH|1 as an index for P1.
+ */
+ goto hash_collision;
+ }
+
+ rp_param.rp_exp = exp;
+ rp_param.rp_mod = op_data;
+ page = read_cache_page(mapping,
+ hash_x_index(rp_param.rp_off,
+ rp_param.rp_hash64),
+ mdc_read_page_remote, &rp_param);
+ if (IS_ERR(page)) {
+ CERROR("%s: read cache page: "DFID" at %llu: rc %ld\n",
+ exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
+ rp_param.rp_off, PTR_ERR(page));
+ rc = PTR_ERR(page);
+ goto out_unlock;
+ }
+
+ wait_on_page_locked(page);
+ (void)kmap(page);
+ if (!PageUptodate(page)) {
+ CERROR("%s: page not updated: "DFID" at %llu: rc %d\n",
+ exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
+ rp_param.rp_off, -5);
+ goto fail;
+ }
+ if (!PageChecked(page))
+ SetPageChecked(page);
+ if (PageError(page)) {
+ CERROR("%s: page error: "DFID" at %llu: rc %d\n",
+ exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
+ rp_param.rp_off, -5);
+ goto fail;
+ }
+
+hash_collision:
+ dp = page_address(page);
+ if (BITS_PER_LONG == 32 && rp_param.rp_hash64) {
+ start = le64_to_cpu(dp->ldp_hash_start) >> 32;
+ end = le64_to_cpu(dp->ldp_hash_end) >> 32;
+ rp_param.rp_off = hash_offset >> 32;
+ } else {
+ start = le64_to_cpu(dp->ldp_hash_start);
+ end = le64_to_cpu(dp->ldp_hash_end);
+ rp_param.rp_off = hash_offset;
+ }
+ if (end == start) {
+ LASSERT(start == rp_param.rp_off);
+ CWARN("Page-wide hash collision: %#lx\n", (unsigned long)end);
+#if BITS_PER_LONG == 32
+ CWARN("Real page-wide hash collision at [%llu %llu] with hash %llu\n",
+ le64_to_cpu(dp->ldp_hash_start),
+ le64_to_cpu(dp->ldp_hash_end), hash_offset);
+#endif
+ /*
+ * Fetch whole overflow chain...
+ *
+ * XXX not yet.
+ */
+ goto fail;
+ }
+ *ppage = page;
+out_unlock:
+ ldlm_lock_decref(&lockh, it.it_lock_mode);
+ return rc;
+fail:
+ kunmap(page);
+ mdc_release_page(page, 1);
+ rc = -EIO;
+ goto out_unlock;
+}
+
static int mdc_statfs(const struct lu_env *env,
struct obd_export *exp, struct obd_statfs *osfs,
__u64 max_age, __u32 flags)
@@ -1401,7 +1842,7 @@ out:
return rc;
}
-static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
+static struct kuc_hdr *changelog_kuc_hdr(char *buf, size_t len, u32 flags)
{
struct kuc_hdr *lh = (struct kuc_hdr *)buf;
@@ -1415,40 +1856,44 @@ static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
return lh;
}
-#define D_CHANGELOG 0
-
struct changelog_show {
__u64 cs_startrec;
- __u32 cs_flags;
+ enum changelog_send_flag cs_flags;
struct file *cs_fp;
char *cs_buf;
struct obd_device *cs_obd;
};
+static inline char *cs_obd_name(struct changelog_show *cs)
+{
+ return cs->cs_obd->obd_name;
+}
+
static int changelog_kkuc_cb(const struct lu_env *env, struct llog_handle *llh,
struct llog_rec_hdr *hdr, void *data)
{
struct changelog_show *cs = data;
struct llog_changelog_rec *rec = (struct llog_changelog_rec *)hdr;
struct kuc_hdr *lh;
- int len, rc;
+ size_t len;
+ int rc;
if (rec->cr_hdr.lrh_type != CHANGELOG_REC) {
rc = -EINVAL;
CERROR("%s: not a changelog rec %x/%d: rc = %d\n",
- cs->cs_obd->obd_name, rec->cr_hdr.lrh_type,
+ cs_obd_name(cs), rec->cr_hdr.lrh_type,
rec->cr.cr_type, rc);
return rc;
}
if (rec->cr.cr_index < cs->cs_startrec) {
/* Skip entries earlier than what we are interested in */
- CDEBUG(D_CHANGELOG, "rec=%llu start=%llu\n",
+ CDEBUG(D_HSM, "rec=%llu start=%llu\n",
rec->cr.cr_index, cs->cs_startrec);
return 0;
}
- CDEBUG(D_CHANGELOG, "%llu %02d%-5s %llu 0x%x t="DFID" p="DFID
+ CDEBUG(D_HSM, "%llu %02d%-5s %llu 0x%x t=" DFID " p=" DFID
" %.*s\n", rec->cr.cr_index, rec->cr.cr_type,
changelog_type2str(rec->cr.cr_type), rec->cr.cr_time,
rec->cr.cr_flags & CLF_FLAGMASK,
@@ -1462,20 +1907,21 @@ static int changelog_kkuc_cb(const struct lu_env *env, struct llog_handle *llh,
memcpy(lh + 1, &rec->cr, len - sizeof(*lh));
rc = libcfs_kkuc_msg_put(cs->cs_fp, lh);
- CDEBUG(D_CHANGELOG, "kucmsg fp %p len %d rc %d\n", cs->cs_fp, len, rc);
+ CDEBUG(D_HSM, "kucmsg fp %p len %zu rc %d\n", cs->cs_fp, len, rc);
return rc;
}
static int mdc_changelog_send_thread(void *csdata)
{
+ enum llog_flag flags = LLOG_F_IS_CAT;
struct changelog_show *cs = csdata;
struct llog_ctxt *ctxt = NULL;
struct llog_handle *llh = NULL;
struct kuc_hdr *kuch;
int rc;
- CDEBUG(D_CHANGELOG, "changelog to fp=%p start %llu\n",
+ CDEBUG(D_HSM, "changelog to fp=%p start %llu\n",
cs->cs_fp, cs->cs_startrec);
cs->cs_buf = kzalloc(KUC_CHANGELOG_MSG_MAXSIZE, GFP_NOFS);
@@ -1494,10 +1940,14 @@ static int mdc_changelog_send_thread(void *csdata)
LLOG_OPEN_EXISTS);
if (rc) {
CERROR("%s: fail to open changelog catalog: rc = %d\n",
- cs->cs_obd->obd_name, rc);
+ cs_obd_name(cs), rc);
goto out;
}
- rc = llog_init_handle(NULL, llh, LLOG_F_IS_CAT, NULL);
+
+ if (cs->cs_flags & CHANGELOG_FLAG_JOBID)
+ flags |= LLOG_F_EXT_JOBID;
+
+ rc = llog_init_handle(NULL, llh, flags, NULL);
if (rc) {
CERROR("llog_init_handle failed %d\n", rc);
goto out;
@@ -1550,12 +2000,12 @@ static int mdc_ioc_changelog_send(struct obd_device *obd,
if (IS_ERR(task)) {
rc = PTR_ERR(task);
CERROR("%s: can't start changelog thread: rc = %d\n",
- obd->obd_name, rc);
+ cs_obd_name(cs), rc);
kfree(cs);
} else {
rc = 0;
- CDEBUG(D_CHANGELOG, "%s: started changelog thread\n",
- obd->obd_name);
+ CDEBUG(D_HSM, "%s: started changelog thread\n",
+ cs_obd_name(cs));
}
CERROR("Failed to start changelog thread: %d\n", rc);
@@ -1669,9 +2119,11 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp,
* with the request RPC to avoid extra RPC round trips
*/
count = mdc_resource_get_unused(exp, &op_data->op_fid1, &cancels,
- LCK_CR, MDS_INODELOCK_LAYOUT);
+ LCK_CR, MDS_INODELOCK_LAYOUT |
+ MDS_INODELOCK_XATTR);
count += mdc_resource_get_unused(exp, &op_data->op_fid2, &cancels,
- LCK_CR, MDS_INODELOCK_LAYOUT);
+ LCK_CR, MDS_INODELOCK_LAYOUT |
+ MDS_INODELOCK_XATTR);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_SWAP_LAYOUTS);
@@ -1917,7 +2369,7 @@ static void lustre_swab_hai(struct hsm_action_item *h)
static void lustre_swab_hal(struct hsm_action_list *h)
{
struct hsm_action_item *hai;
- int i;
+ u32 i;
__swab32s(&h->hal_version);
__swab32s(&h->hal_count);
@@ -1966,14 +2418,14 @@ static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
* @param val KUC message (kuc_hdr + hsm_action_list)
* @param len total length of message
*/
-static int mdc_hsm_copytool_send(int len, void *val)
+static int mdc_hsm_copytool_send(size_t len, void *val)
{
struct kuc_hdr *lh = (struct kuc_hdr *)val;
struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1);
if (len < sizeof(*lh) + sizeof(*hal)) {
- CERROR("Short HSM message %d < %d\n", len,
- (int)(sizeof(*lh) + sizeof(*hal)));
+ CERROR("Short HSM message %zu < %zu\n", len,
+ sizeof(*lh) + sizeof(*hal));
return -EPROTO;
}
if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
@@ -2044,9 +2496,8 @@ static int mdc_set_info_async(const struct lu_env *env,
}
spin_unlock(&imp->imp_lock);
- rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
- keylen, key, vallen, val, set);
- return rc;
+ return do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
+ keylen, key, vallen, val, set);
}
if (KEY_IS(KEY_SPTLRPC_CONF)) {
sptlrpc_conf_client_adapt(exp->exp_obd);
@@ -2065,6 +2516,12 @@ static int mdc_set_info_async(const struct lu_env *env,
rc = mdc_hsm_copytool_send(vallen, val);
return rc;
}
+ if (KEY_IS(KEY_DEFAULT_EASIZE)) {
+ u32 *default_easize = val;
+
+ exp->exp_obd->u.cli.cl_default_mds_easize = *default_easize;
+ return 0;
+ }
CERROR("Unknown key %s\n", (char *)key);
return -EINVAL;
@@ -2077,18 +2534,18 @@ static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
int rc = -EINVAL;
if (KEY_IS(KEY_MAX_EASIZE)) {
- int mdsize, *max_easize;
+ u32 mdsize, *max_easize;
if (*vallen != sizeof(int))
return -EINVAL;
- mdsize = *(int *)val;
+ mdsize = *(u32 *)val;
if (mdsize > exp->exp_obd->u.cli.cl_max_mds_easize)
exp->exp_obd->u.cli.cl_max_mds_easize = mdsize;
max_easize = val;
*max_easize = exp->exp_obd->u.cli.cl_max_mds_easize;
return 0;
} else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
- int *default_easize;
+ u32 *default_easize;
if (*vallen != sizeof(int))
return -EINVAL;
@@ -2105,7 +2562,7 @@ static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
*data = imp->imp_connect_data;
return 0;
} else if (KEY_IS(KEY_TGT_COUNT)) {
- *((int *)val) = 1;
+ *((u32 *)val) = 1;
return 0;
}
@@ -2199,13 +2656,13 @@ static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
return rc;
}
-int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data)
+int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
+ struct lu_fid *fid, struct md_op_data *op_data)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct lu_client_seq *seq = cli->cl_seq;
- return seq_client_alloc_fid(NULL, seq, fid);
+ return seq_client_alloc_fid(env, seq, fid);
}
static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
@@ -2333,8 +2790,8 @@ err_rpc_lock:
* a large number of stripes is possible. If a larger reply buffer is
* required it will be reallocated in the ptlrpc layer due to overflow.
*/
-static int mdc_init_ea_size(struct obd_export *exp, int easize,
- int def_easize, int cookiesize, int def_cookiesize)
+static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
+ u32 cookiesize, u32 def_cookiesize)
{
struct obd_device *obd = exp->exp_obd;
struct client_obd *cli = &obd->u.cli;
@@ -2430,7 +2887,6 @@ static struct obd_ops mdc_obd_ops = {
static struct md_ops mdc_md_ops = {
.getstatus = mdc_getstatus,
.null_inode = mdc_null_inode,
- .find_cbdata = mdc_find_cbdata,
.close = mdc_close,
.create = mdc_create,
.done_writing = mdc_done_writing,
@@ -2439,13 +2895,12 @@ static struct md_ops mdc_md_ops = {
.getattr_name = mdc_getattr_name,
.intent_lock = mdc_intent_lock,
.link = mdc_link,
- .is_subdir = mdc_is_subdir,
.rename = mdc_rename,
.setattr = mdc_setattr,
.setxattr = mdc_setxattr,
.getxattr = mdc_getxattr,
.sync = mdc_sync,
- .readpage = mdc_readpage,
+ .read_page = mdc_read_page,
.unlink = mdc_unlink,
.cancel_unused = mdc_cancel_unused,
.init_ea_size = mdc_init_ea_size,
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 9d0bd4745865..23374cae5133 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -549,8 +549,9 @@ static int mgc_requeue_thread(void *data)
* caused the lock revocation to finish its setup, plus some
* random so everyone doesn't try to reconnect at once.
*/
- to = MGC_TIMEOUT_MIN_SECONDS * HZ;
- to += rand * HZ / 100; /* rand is centi-seconds */
+ to = msecs_to_jiffies(MGC_TIMEOUT_MIN_SECONDS * MSEC_PER_SEC);
+ /* rand is centi-seconds */
+ to += msecs_to_jiffies(rand * MSEC_PER_SEC / 100);
lwi = LWI_TIMEOUT(to, NULL, NULL);
l_wait_event(rq_waitq, rq_state & (RQ_STOP | RQ_PRECLEANUP),
&lwi);
@@ -1158,7 +1159,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
while (datalen > 0) {
int entry_len = sizeof(*entry);
- int is_ost;
+ int is_ost, i;
struct obd_device *obd;
char *obdname;
char *cname;
@@ -1264,11 +1265,17 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
continue;
}
- /* TODO: iterate all nids to find one */
+ /* iterate all nids to find one */
/* find uuid by nid */
- rc = client_import_find_conn(obd->u.cli.cl_import,
- entry->u.nids[0],
- (struct obd_uuid *)uuid);
+ rc = -ENOENT;
+ for (i = 0; i < entry->mne_nid_count; i++) {
+ rc = client_import_find_conn(obd->u.cli.cl_import,
+ entry->u.nids[0],
+ (struct obd_uuid *)uuid);
+ if (!rc)
+ break;
+ }
+
up_read(&obd->u.cli.cl_sem);
if (rc < 0) {
CERROR("mgc: cannot find uuid by nid %s\n",
@@ -1428,14 +1435,12 @@ again:
}
mne_swab = !!ptlrpc_rep_need_swab(req);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
+#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE
/* This import flag means the server did an extra swab of IR MNE
* records (fixed in LU-1252), reverse it here if needed. LU-1644
*/
if (unlikely(req->rq_import->imp_need_mne_swab))
mne_swab = !mne_swab;
-#else
-#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
#endif
for (i = 0; i < nrpages && ealen > 0; i++) {
@@ -1740,8 +1745,6 @@ static struct obd_ops mgc_obd_ops = {
.del_conn = client_import_del_conn,
.connect = client_connect_import,
.disconnect = client_disconnect_export,
- /* .enqueue = mgc_enqueue, */
- /* .iocontrol = mgc_iocontrol, */
.set_info_async = mgc_set_info_async,
.get_info = mgc_get_info,
.import_event = mgc_import_event,
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
index df7e47f35a66..b42e109b30e0 100644
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -3,6 +3,6 @@ obj-$(CONFIG_LUSTRE_FS) += obdclass.o
obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \
genops.o uuid.o lprocfs_status.o lprocfs_counters.o \
- lustre_handles.o lustre_peer.o statfs_pack.o \
+ lustre_handles.o lustre_peer.o statfs_pack.o linkea.o \
obdo.o obd_config.o obd_mount.o lu_object.o lu_ref.o \
cl_object.o cl_page.o cl_lock.o cl_io.o kernelcomm.o
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index e72f1fc00a13..bc4b7b6b9a20 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -73,7 +73,6 @@ int cl_io_is_going(const struct lu_env *env)
{
return cl_env_info(env)->clt_current_io != NULL;
}
-EXPORT_SYMBOL(cl_io_is_going);
/**
* cl_io invariant that holds at all times when exported cl_io_*() functions
@@ -859,9 +858,6 @@ void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
LASSERT(page->cp_owner);
LINVRNT(plist->pl_owner == current);
- lockdep_off();
- mutex_lock(&page->cp_mutex);
- lockdep_on();
LASSERT(list_empty(&page->cp_batch));
list_add_tail(&page->cp_batch, &plist->pl_pages);
++plist->pl_nr;
@@ -877,12 +873,10 @@ void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
struct cl_page *page)
{
LASSERT(plist->pl_nr > 0);
+ LASSERT(cl_page_is_vmlocked(env, page));
LINVRNT(plist->pl_owner == current);
list_del_init(&page->cp_batch);
- lockdep_off();
- mutex_unlock(&page->cp_mutex);
- lockdep_on();
--plist->pl_nr;
lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
@@ -941,8 +935,6 @@ void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
}
EXPORT_SYMBOL(cl_page_list_splice);
-void cl_page_disown0(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg);
/**
* Disowns pages in a queue.
@@ -959,9 +951,6 @@ void cl_page_list_disown(const struct lu_env *env,
LASSERT(plist->pl_nr > 0);
list_del_init(&page->cp_batch);
- lockdep_off();
- mutex_unlock(&page->cp_mutex);
- lockdep_on();
--plist->pl_nr;
/*
* cl_page_disown0 rather than usual cl_page_disown() is used,
@@ -1221,7 +1210,7 @@ void cl_req_page_add(const struct lu_env *env,
{
struct cl_object *obj;
struct cl_req_obj *rqo;
- int i;
+ unsigned int i;
LASSERT(list_empty(&page->cp_flight));
LASSERT(!page->cp_req);
@@ -1268,7 +1257,7 @@ EXPORT_SYMBOL(cl_req_page_done);
*/
int cl_req_prep(const struct lu_env *env, struct cl_req *req)
{
- int i;
+ unsigned int i;
int result;
const struct cl_req_slice *slice;
@@ -1301,7 +1290,7 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
{
const struct cl_req_slice *slice;
struct cl_page *page;
- int i;
+ unsigned int i;
LASSERT(!list_empty(&req->crq_pages));
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 91a5806d0239..3199dd4a3b72 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -163,7 +163,7 @@ static spinlock_t *cl_object_attr_guard(struct cl_object *o)
*
* Prevents data-attributes from changing, until lock is released by
* cl_object_attr_unlock(). This has to be called before calls to
- * cl_object_attr_get(), cl_object_attr_set().
+ * cl_object_attr_get(), cl_object_attr_update().
*/
void cl_object_attr_lock(struct cl_object *o)
__acquires(cl_object_attr_guard(o))
@@ -217,11 +217,11 @@ EXPORT_SYMBOL(cl_object_attr_get);
* Updates data-attributes of an object \a obj.
*
* Only attributes, mentioned in a validness bit-mask \a v are
- * updated. Calls cl_object_operations::coo_attr_set() on every layer, bottom
- * to top.
+ * updated. Calls cl_object_operations::coo_attr_update() on every layer,
+ * bottom to top.
*/
-int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned v)
+int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned int v)
{
struct lu_object_header *top;
int result;
@@ -231,8 +231,9 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_attr_set) {
- result = obj->co_ops->coo_attr_set(env, obj, attr, v);
+ if (obj->co_ops->coo_attr_update) {
+ result = obj->co_ops->coo_attr_update(env, obj, attr,
+ v);
if (result != 0) {
if (result > 0)
result = 0;
@@ -242,7 +243,7 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
}
return result;
}
-EXPORT_SYMBOL(cl_object_attr_set);
+EXPORT_SYMBOL(cl_object_attr_update);
/**
* Notifies layers (bottom-to-top) that glimpse AST was received.
@@ -321,6 +322,27 @@ int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
EXPORT_SYMBOL(cl_object_prune);
/**
+ * Get stripe information of this object.
+ */
+int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
+ struct lov_user_md __user *uarg)
+{
+ struct lu_object_header *top;
+ int result = 0;
+
+ top = obj->co_lu.lo_header;
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_getstripe) {
+ result = obj->co_ops->coo_getstripe(env, obj, uarg);
+ if (result)
+ break;
+ }
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_object_getstripe);
+
+/**
* Helper function removing all object locks, and marking object for
* deletion. All object pages must have been deleted at this point.
*
@@ -377,7 +399,7 @@ static void cl_env_percpu_refill(void);
*/
int cl_site_init(struct cl_site *s, struct cl_device *d)
{
- int i;
+ size_t i;
int result;
result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
@@ -411,7 +433,7 @@ static struct cache_stats cl_env_stats = {
*/
int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
{
- int i;
+ size_t i;
static const char *pstate[] = {
[CPS_CACHED] = "c",
[CPS_OWNED] = "o",
@@ -1000,7 +1022,7 @@ static int cl_env_percpu_init(void)
* thus we must uninitialize up to i, the rest are undefined.
*/
for (j = 0; j < i; j++) {
- cle = &cl_env_percpu[i];
+ cle = &cl_env_percpu[j];
lu_context_exit(&cle->ce_ses);
lu_context_fini(&cle->ce_ses);
lu_env_fini(&cle->ce_lu);
@@ -1126,7 +1148,7 @@ static void *cl_key_init(const struct lu_context *ctx,
info = cl0_key_init(ctx, key);
if (!IS_ERR(info)) {
- int i;
+ size_t i;
for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
@@ -1138,7 +1160,7 @@ static void cl_key_fini(const struct lu_context *ctx,
struct lu_context_key *key, void *data)
{
struct cl_thread_info *info;
- int i;
+ size_t i;
info = data;
for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
@@ -1150,7 +1172,7 @@ static void cl_key_exit(const struct lu_context *ctx,
struct lu_context_key *key, void *data)
{
struct cl_thread_info *info = data;
- int i;
+ size_t i;
for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
LASSERT(info->clt_counters[i].ctc_nr_held == 0);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index db2dc6b39073..63973ba096da 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -151,7 +151,6 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
INIT_LIST_HEAD(&page->cp_layers);
INIT_LIST_HEAD(&page->cp_batch);
INIT_LIST_HEAD(&page->cp_flight);
- mutex_init(&page->cp_mutex);
lu_ref_init(&page->cp_reference);
head = o->co_lu.lo_header;
list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
@@ -171,7 +170,6 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
}
return page;
}
-EXPORT_SYMBOL(cl_page_alloc);
/**
* Returns a cl_page with index \a idx at the object \a o, and associated with
@@ -229,11 +227,6 @@ EXPORT_SYMBOL(cl_page_find);
static inline int cl_page_invariant(const struct cl_page *pg)
{
- /*
- * Page invariant is protected by a VM lock.
- */
- LINVRNT(cl_page_is_vmlocked(NULL, pg));
-
return cl_page_in_use_noref(pg);
}
@@ -478,7 +471,6 @@ static void cl_page_owner_clear(struct cl_page *page)
LASSERT(page->cp_owner->ci_owned_nr > 0);
page->cp_owner->ci_owned_nr--;
page->cp_owner = NULL;
- page->cp_task = NULL;
}
}
@@ -562,7 +554,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
PASSERT(env, pg, !pg->cp_owner);
PASSERT(env, pg, !pg->cp_req);
pg->cp_owner = cl_io_top(io);
- pg->cp_task = current;
cl_page_owner_set(pg);
if (pg->cp_state != CPS_FREEING) {
cl_page_state_set(env, pg, CPS_OWNED);
@@ -619,7 +610,6 @@ void cl_page_assume(const struct lu_env *env,
cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
PASSERT(env, pg, !pg->cp_owner);
pg->cp_owner = cl_io_top(io);
- pg->cp_task = current;
cl_page_owner_set(pg);
cl_page_state_set(env, pg, CPS_OWNED);
}
@@ -860,10 +850,6 @@ void cl_page_completion(const struct lu_env *env,
PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
- if (crt == CRT_READ && ioret == 0) {
- PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
- pg->cp_flags |= CPF_READ_COMPLETED;
- }
cl_page_state_set(env, pg, CPS_CACHED);
if (crt >= CRT_NR)
@@ -872,7 +858,6 @@ void cl_page_completion(const struct lu_env *env,
(const struct lu_env *,
const struct cl_page_slice *, int), ioret);
if (anchor) {
- LASSERT(cl_page_is_vmlocked(env, pg));
LASSERT(pg->cp_sync_io == anchor);
pg->cp_sync_io = NULL;
}
@@ -989,10 +974,10 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg)
{
(*printer)(env, cookie,
- "page@%p[%d %p %d %d %d %p %p %#x]\n",
+ "page@%p[%d %p %d %d %p %p]\n",
pg, atomic_read(&pg->cp_ref), pg->cp_obj,
- pg->cp_state, pg->cp_error, pg->cp_type,
- pg->cp_owner, pg->cp_req, pg->cp_flags);
+ pg->cp_state, pg->cp_type,
+ pg->cp_owner, pg->cp_req);
}
EXPORT_SYMBOL(cl_page_header_print);
@@ -1020,7 +1005,6 @@ int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
(const struct lu_env *,
const struct cl_page_slice *));
}
-EXPORT_SYMBOL(cl_page_cancel);
/**
* Converts a byte offset within object \a obj into a page index.
@@ -1046,9 +1030,9 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
}
EXPORT_SYMBOL(cl_index);
-int cl_page_size(const struct cl_object *obj)
+size_t cl_page_size(const struct cl_object *obj)
{
- return 1 << PAGE_SHIFT;
+ return 1UL << PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_page_size);
@@ -1087,11 +1071,11 @@ struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
/* Initialize cache data */
atomic_set(&cache->ccc_users, 1);
cache->ccc_lru_max = lru_page_max;
- atomic_set(&cache->ccc_lru_left, lru_page_max);
+ atomic_long_set(&cache->ccc_lru_left, lru_page_max);
spin_lock_init(&cache->ccc_lru_lock);
INIT_LIST_HEAD(&cache->ccc_lru);
- atomic_set(&cache->ccc_unstable_nr, 0);
+ atomic_long_set(&cache->ccc_unstable_nr, 0);
init_waitqueue_head(&cache->ccc_unstable_waitq);
return cache;
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index d9d2a1952b8b..76e1ee83a723 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -40,10 +40,10 @@
#include "../include/lprocfs_status.h"
#include <linux/list.h>
#include "../include/cl_object.h"
+#include "../include/lustre/lustre_ioctl.h"
#include "llog_internal.h"
struct obd_device *obd_devs[MAX_OBD_DEVICES];
-EXPORT_SYMBOL(obd_devs);
struct list_head obd_types;
DEFINE_RWLOCK(obd_dev_lock);
@@ -54,11 +54,9 @@ unsigned int obd_dump_on_timeout;
EXPORT_SYMBOL(obd_dump_on_timeout);
unsigned int obd_dump_on_eviction;
EXPORT_SYMBOL(obd_dump_on_eviction);
-unsigned int obd_max_dirty_pages = 256;
+unsigned long obd_max_dirty_pages;
EXPORT_SYMBOL(obd_max_dirty_pages);
-atomic_t obd_unstable_pages;
-EXPORT_SYMBOL(obd_unstable_pages);
-atomic_t obd_dirty_pages;
+atomic_long_t obd_dirty_pages;
EXPORT_SYMBOL(obd_dirty_pages);
unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
EXPORT_SYMBOL(obd_timeout);
@@ -76,13 +74,11 @@ EXPORT_SYMBOL(at_early_margin);
int at_extra = 30;
EXPORT_SYMBOL(at_extra);
-atomic_t obd_dirty_transit_pages;
+atomic_long_t obd_dirty_transit_pages;
EXPORT_SYMBOL(obd_dirty_transit_pages);
char obd_jobid_var[JOBSTATS_JOBID_VAR_MAX_LEN + 1] = JOBSTATS_DISABLE;
-EXPORT_SYMBOL(obd_jobid_var);
-
-char obd_jobid_node[JOBSTATS_JOBID_SIZE + 1];
+char obd_jobid_node[LUSTRE_JOBID_SIZE + 1];
/* Get jobid of current process from stored variable or calculate
* it from pid and user_id.
@@ -93,14 +89,14 @@ char obd_jobid_node[JOBSTATS_JOBID_SIZE + 1];
*/
int lustre_get_jobid(char *jobid)
{
- memset(jobid, 0, JOBSTATS_JOBID_SIZE);
+ memset(jobid, 0, LUSTRE_JOBID_SIZE);
/* Jobstats isn't enabled */
if (strcmp(obd_jobid_var, JOBSTATS_DISABLE) == 0)
return 0;
/* Use process name + fsuid as jobid */
if (strcmp(obd_jobid_var, JOBSTATS_PROCNAME_UID) == 0) {
- snprintf(jobid, JOBSTATS_JOBID_SIZE, "%s.%u",
+ snprintf(jobid, LUSTRE_JOBID_SIZE, "%s.%u",
current_comm(),
from_kuid(&init_user_ns, current_fsuid()));
return 0;
@@ -116,19 +112,6 @@ int lustre_get_jobid(char *jobid)
}
EXPORT_SYMBOL(lustre_get_jobid);
-static inline void obd_data2conn(struct lustre_handle *conn,
- struct obd_ioctl_data *data)
-{
- memset(conn, 0, sizeof(*conn));
- conn->cookie = data->ioc_cookie;
-}
-
-static inline void obd_conn2data(struct obd_ioctl_data *data,
- struct lustre_handle *conn)
-{
- data->ioc_cookie = conn->cookie;
-}
-
static int class_resolve_dev_name(__u32 len, const char *name)
{
int rc;
@@ -287,13 +270,6 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
goto out;
}
- case OBD_IOC_CLOSE_UUID: {
- CDEBUG(D_IOCTL, "closing all connections to uuid %s (NOOP)\n",
- data->ioc_inlbuf1);
- err = 0;
- goto out;
- }
-
case OBD_IOC_GETDEVICE: {
int index = data->ioc_count;
char *status, *str;
@@ -467,15 +443,10 @@ static int obd_init_checks(void)
return ret;
}
-extern int class_procfs_init(void);
-extern int class_procfs_clean(void);
-
static int __init obdclass_init(void)
{
int i, err;
- int lustre_register_fs(void);
-
LCONSOLE_INFO("Lustre: Build Version: " LUSTRE_VERSION_STRING "\n");
spin_lock_init(&obd_types_lock);
@@ -542,23 +513,9 @@ static int __init obdclass_init(void)
static void obdclass_exit(void)
{
- int i;
-
- int lustre_unregister_fs(void);
-
lustre_unregister_fs();
misc_deregister(&obd_psdev);
- for (i = 0; i < class_devno_max(); i++) {
- struct obd_device *obd = class_num2obd(i);
-
- if (obd && obd->obd_set_up &&
- OBT(obd) && OBP(obd, detach)) {
- /* XXX should this call generic detach otherwise? */
- LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- OBP(obd, detach)(obd);
- }
- }
llog_info_fini();
cl_global_fini();
lu_global_fini();
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 8acf67239fa8..0bd4ad20aba7 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -48,10 +48,10 @@ int block_debug_setup(void *addr, int len, __u64 off, __u64 id)
LASSERT(addr);
put_unaligned_le64(off, addr);
- put_unaligned_le64(id, addr+LPDS);
+ put_unaligned_le64(id, addr + LPDS);
addr += len - LPDS - LPDS;
put_unaligned_le64(off, addr);
- put_unaligned_le64(id, addr+LPDS);
+ put_unaligned_le64(id, addr + LPDS);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index 99c2da632b51..cf8bb2a2f40b 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -133,7 +133,6 @@ void class_put_type(struct obd_type *type)
module_put(type->typ_dt_ops->owner);
spin_unlock(&type->obd_type_lock);
}
-EXPORT_SYMBOL(class_put_type);
#define CLASS_MAX_NAME 1024
@@ -166,10 +165,10 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
!type->typ_name)
goto failed;
- *(type->typ_dt_ops) = *dt_ops;
+ *type->typ_dt_ops = *dt_ops;
/* md_ops is optional */
if (md_ops)
- *(type->typ_md_ops) = *md_ops;
+ *type->typ_md_ops = *md_ops;
strcpy(type->typ_name, name);
spin_lock_init(&type->obd_type_lock);
@@ -391,7 +390,6 @@ int class_name2dev(const char *name)
return -1;
}
-EXPORT_SYMBOL(class_name2dev);
struct obd_device *class_name2obd(const char *name)
{
@@ -421,7 +419,6 @@ int class_uuid2dev(struct obd_uuid *uuid)
return -1;
}
-EXPORT_SYMBOL(class_uuid2dev);
/**
* Get obd device from ::obd_devs[]
@@ -450,7 +447,6 @@ struct obd_device *class_num2obd(int num)
return obd;
}
-EXPORT_SYMBOL(class_num2obd);
/* Search for a client OBD connected to tgt_uuid. If grp_uuid is
* specified, then only the client with that uuid is returned,
@@ -509,7 +505,7 @@ struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
continue;
if (obd_uuid_equals(grp_uuid, &obd->obd_uuid)) {
if (next)
- *next = i+1;
+ *next = i + 1;
read_unlock(&obd_dev_lock);
return obd;
}
@@ -618,7 +614,7 @@ struct obd_export *class_conn2export(struct lustre_handle *conn)
}
CDEBUG(D_INFO, "looking for export cookie %#llx\n", conn->cookie);
- export = class_handle2object(conn->cookie);
+ export = class_handle2object(conn->cookie, NULL);
return export;
}
EXPORT_SYMBOL(class_conn2export);
@@ -817,7 +813,6 @@ void class_unlink_export(struct obd_export *exp)
spin_unlock(&exp->exp_obd->obd_dev_lock);
class_export_put(exp);
}
-EXPORT_SYMBOL(class_unlink_export);
/* Import management functions */
static void class_import_destroy(struct obd_import *imp)
@@ -973,7 +968,6 @@ void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
lock, exp, lock->l_exp_refs_nr);
spin_unlock(&exp->exp_locks_list_guard);
}
-EXPORT_SYMBOL(__class_export_add_lock_ref);
void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
{
@@ -991,7 +985,6 @@ void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
lock, exp, lock->l_exp_refs_nr);
spin_unlock(&exp->exp_locks_list_guard);
}
-EXPORT_SYMBOL(__class_export_del_lock_ref);
#endif
/* A connection defines an export context in which preallocation can
@@ -1100,7 +1093,6 @@ EXPORT_SYMBOL(class_fail_export);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
void (*class_export_dump_hook)(struct obd_export *) = NULL;
-EXPORT_SYMBOL(class_export_dump_hook);
#endif
/* Total amount of zombies to be destroyed */
@@ -1312,3 +1304,135 @@ void obd_zombie_impexp_stop(void)
obd_zombie_impexp_notify();
wait_for_completion(&obd_zombie_stop);
}
+
+struct obd_request_slot_waiter {
+ struct list_head orsw_entry;
+ wait_queue_head_t orsw_waitq;
+ bool orsw_signaled;
+};
+
+static bool obd_request_slot_avail(struct client_obd *cli,
+ struct obd_request_slot_waiter *orsw)
+{
+ bool avail;
+
+ spin_lock(&cli->cl_loi_list_lock);
+ avail = !!list_empty(&orsw->orsw_entry);
+ spin_unlock(&cli->cl_loi_list_lock);
+
+ return avail;
+};
+
+/*
+ * For network flow control, the RPC sponsor needs to acquire a credit
+ * before sending the RPC. The credits count for a connection is defined
+ * by the "cl_max_rpcs_in_flight". If all the credits are occpuied, then
+ * the subsequent RPC sponsors need to wait until others released their
+ * credits, or the administrator increased the "cl_max_rpcs_in_flight".
+ */
+int obd_get_request_slot(struct client_obd *cli)
+{
+ struct obd_request_slot_waiter orsw;
+ struct l_wait_info lwi;
+ int rc;
+
+ spin_lock(&cli->cl_loi_list_lock);
+ if (cli->cl_r_in_flight < cli->cl_max_rpcs_in_flight) {
+ cli->cl_r_in_flight++;
+ spin_unlock(&cli->cl_loi_list_lock);
+ return 0;
+ }
+
+ init_waitqueue_head(&orsw.orsw_waitq);
+ list_add_tail(&orsw.orsw_entry, &cli->cl_loi_read_list);
+ orsw.orsw_signaled = false;
+ spin_unlock(&cli->cl_loi_list_lock);
+
+ lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ rc = l_wait_event(orsw.orsw_waitq,
+ obd_request_slot_avail(cli, &orsw) ||
+ orsw.orsw_signaled,
+ &lwi);
+
+ /*
+ * Here, we must take the lock to avoid the on-stack 'orsw' to be
+ * freed but other (such as obd_put_request_slot) is using it.
+ */
+ spin_lock(&cli->cl_loi_list_lock);
+ if (rc) {
+ if (!orsw.orsw_signaled) {
+ if (list_empty(&orsw.orsw_entry))
+ cli->cl_r_in_flight--;
+ else
+ list_del(&orsw.orsw_entry);
+ }
+ }
+
+ if (orsw.orsw_signaled) {
+ LASSERT(list_empty(&orsw.orsw_entry));
+
+ rc = -EINTR;
+ }
+ spin_unlock(&cli->cl_loi_list_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(obd_get_request_slot);
+
+void obd_put_request_slot(struct client_obd *cli)
+{
+ struct obd_request_slot_waiter *orsw;
+
+ spin_lock(&cli->cl_loi_list_lock);
+ cli->cl_r_in_flight--;
+
+ /* If there is free slot, wakeup the first waiter. */
+ if (!list_empty(&cli->cl_loi_read_list) &&
+ likely(cli->cl_r_in_flight < cli->cl_max_rpcs_in_flight)) {
+ orsw = list_entry(cli->cl_loi_read_list.next,
+ struct obd_request_slot_waiter, orsw_entry);
+ list_del_init(&orsw->orsw_entry);
+ cli->cl_r_in_flight++;
+ wake_up(&orsw->orsw_waitq);
+ }
+ spin_unlock(&cli->cl_loi_list_lock);
+}
+EXPORT_SYMBOL(obd_put_request_slot);
+
+__u32 obd_get_max_rpcs_in_flight(struct client_obd *cli)
+{
+ return cli->cl_max_rpcs_in_flight;
+}
+EXPORT_SYMBOL(obd_get_max_rpcs_in_flight);
+
+int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max)
+{
+ struct obd_request_slot_waiter *orsw;
+ __u32 old;
+ int diff;
+ int i;
+
+ if (max > OBD_MAX_RIF_MAX || max < 1)
+ return -ERANGE;
+
+ spin_lock(&cli->cl_loi_list_lock);
+ old = cli->cl_max_rpcs_in_flight;
+ cli->cl_max_rpcs_in_flight = max;
+ diff = max - old;
+
+ /* We increase the max_rpcs_in_flight, then wakeup some waiters. */
+ for (i = 0; i < diff; i++) {
+ if (list_empty(&cli->cl_loi_read_list))
+ break;
+
+ orsw = list_entry(cli->cl_loi_read_list.next,
+ struct obd_request_slot_waiter, orsw_entry);
+ list_del_init(&orsw->orsw_entry);
+ cli->cl_r_in_flight++;
+ wake_up(&orsw->orsw_waitq);
+ }
+ spin_unlock(&cli->cl_loi_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(obd_set_max_rpcs_in_flight);
diff --git a/drivers/staging/lustre/lustre/obdclass/linkea.c b/drivers/staging/lustre/lustre/obdclass/linkea.c
new file mode 100644
index 000000000000..0b1d2f0a422c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/linkea.c
@@ -0,0 +1,201 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2013, 2014, Intel Corporation.
+ * Use is subject to license terms.
+ *
+ * Author: Di Wang <di.wang@intel.com>
+ */
+
+#include "../include/lustre/lustre_idl.h"
+#include "../include/obd.h"
+#include "../include/lustre_linkea.h"
+
+int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf)
+{
+ ldata->ld_buf = lu_buf_check_and_alloc(buf, PAGE_SIZE);
+ if (!ldata->ld_buf->lb_buf)
+ return -ENOMEM;
+ ldata->ld_leh = ldata->ld_buf->lb_buf;
+ ldata->ld_leh->leh_magic = LINK_EA_MAGIC;
+ ldata->ld_leh->leh_len = sizeof(struct link_ea_header);
+ ldata->ld_leh->leh_reccount = 0;
+ return 0;
+}
+EXPORT_SYMBOL(linkea_data_new);
+
+int linkea_init(struct linkea_data *ldata)
+{
+ struct link_ea_header *leh;
+
+ LASSERT(ldata->ld_buf);
+ leh = ldata->ld_buf->lb_buf;
+ if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
+ leh->leh_magic = LINK_EA_MAGIC;
+ leh->leh_reccount = __swab32(leh->leh_reccount);
+ leh->leh_len = __swab64(leh->leh_len);
+ /* entries are swabbed by linkea_entry_unpack */
+ }
+ if (leh->leh_magic != LINK_EA_MAGIC)
+ return -EINVAL;
+ if (leh->leh_reccount == 0)
+ return -ENODATA;
+
+ ldata->ld_leh = leh;
+ return 0;
+}
+EXPORT_SYMBOL(linkea_init);
+
+/**
+ * Pack a link_ea_entry.
+ * All elements are stored as chars to avoid alignment issues.
+ * Numbers are always big-endian
+ * \retval record length
+ */
+int linkea_entry_pack(struct link_ea_entry *lee, const struct lu_name *lname,
+ const struct lu_fid *pfid)
+{
+ struct lu_fid tmpfid;
+ int reclen;
+
+ tmpfid = *pfid;
+ if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LINKEA_CRASH))
+ tmpfid.f_ver = ~0;
+ fid_cpu_to_be(&tmpfid, &tmpfid);
+ memcpy(&lee->lee_parent_fid, &tmpfid, sizeof(tmpfid));
+ memcpy(lee->lee_name, lname->ln_name, lname->ln_namelen);
+ reclen = sizeof(struct link_ea_entry) + lname->ln_namelen;
+
+ lee->lee_reclen[0] = (reclen >> 8) & 0xff;
+ lee->lee_reclen[1] = reclen & 0xff;
+ return reclen;
+}
+EXPORT_SYMBOL(linkea_entry_pack);
+
+void linkea_entry_unpack(const struct link_ea_entry *lee, int *reclen,
+ struct lu_name *lname, struct lu_fid *pfid)
+{
+ *reclen = (lee->lee_reclen[0] << 8) | lee->lee_reclen[1];
+ memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
+ fid_be_to_cpu(pfid, pfid);
+ if (lname) {
+ lname->ln_name = lee->lee_name;
+ lname->ln_namelen = *reclen - sizeof(struct link_ea_entry);
+ }
+}
+EXPORT_SYMBOL(linkea_entry_unpack);
+
+/**
+ * Add a record to the end of link ea buf
+ **/
+int linkea_add_buf(struct linkea_data *ldata, const struct lu_name *lname,
+ const struct lu_fid *pfid)
+{
+ LASSERT(ldata->ld_leh);
+
+ if (!lname || !pfid)
+ return -EINVAL;
+
+ ldata->ld_reclen = lname->ln_namelen + sizeof(struct link_ea_entry);
+ if (ldata->ld_leh->leh_len + ldata->ld_reclen >
+ ldata->ld_buf->lb_len) {
+ if (lu_buf_check_and_grow(ldata->ld_buf,
+ ldata->ld_leh->leh_len +
+ ldata->ld_reclen) < 0)
+ return -ENOMEM;
+ }
+
+ ldata->ld_leh = ldata->ld_buf->lb_buf;
+ ldata->ld_lee = ldata->ld_buf->lb_buf + ldata->ld_leh->leh_len;
+ ldata->ld_reclen = linkea_entry_pack(ldata->ld_lee, lname, pfid);
+ ldata->ld_leh->leh_len += ldata->ld_reclen;
+ ldata->ld_leh->leh_reccount++;
+ CDEBUG(D_INODE, "New link_ea name '" DFID ":%.*s' is added\n",
+ PFID(pfid), lname->ln_namelen, lname->ln_name);
+ return 0;
+}
+EXPORT_SYMBOL(linkea_add_buf);
+
+/** Del the current record from the link ea buf */
+void linkea_del_buf(struct linkea_data *ldata, const struct lu_name *lname)
+{
+ LASSERT(ldata->ld_leh && ldata->ld_lee);
+
+ ldata->ld_leh->leh_reccount--;
+ ldata->ld_leh->leh_len -= ldata->ld_reclen;
+ memmove(ldata->ld_lee, (char *)ldata->ld_lee + ldata->ld_reclen,
+ (char *)ldata->ld_leh + ldata->ld_leh->leh_len -
+ (char *)ldata->ld_lee);
+ CDEBUG(D_INODE, "Old link_ea name '%.*s' is removed\n",
+ lname->ln_namelen, lname->ln_name);
+
+ if ((char *)ldata->ld_lee >= ((char *)ldata->ld_leh +
+ ldata->ld_leh->leh_len))
+ ldata->ld_lee = NULL;
+}
+EXPORT_SYMBOL(linkea_del_buf);
+
+/**
+ * Check if such a link exists in linkEA.
+ *
+ * \param ldata link data the search to be done on
+ * \param lname name in the parent's directory entry pointing to this object
+ * \param pfid parent fid the link to be found for
+ *
+ * \retval 0 success
+ * \retval -ENOENT link does not exist
+ * \retval -ve on error
+ */
+int linkea_links_find(struct linkea_data *ldata, const struct lu_name *lname,
+ const struct lu_fid *pfid)
+{
+ struct lu_name tmpname;
+ struct lu_fid tmpfid;
+ int count;
+
+ LASSERT(ldata->ld_leh);
+
+ /* link #0 */
+ ldata->ld_lee = (struct link_ea_entry *)(ldata->ld_leh + 1);
+
+ for (count = 0; count < ldata->ld_leh->leh_reccount; count++) {
+ linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen,
+ &tmpname, &tmpfid);
+ if (tmpname.ln_namelen == lname->ln_namelen &&
+ lu_fid_eq(&tmpfid, pfid) &&
+ (strncmp(tmpname.ln_name, lname->ln_name,
+ tmpname.ln_namelen) == 0))
+ break;
+ ldata->ld_lee = (struct link_ea_entry *)((char *)ldata->ld_lee +
+ ldata->ld_reclen);
+ }
+
+ if (count == ldata->ld_leh->leh_reccount) {
+ CDEBUG(D_INODE, "Old link_ea name '%.*s' not found\n",
+ lname->ln_namelen, lname->ln_name);
+ ldata->ld_lee = NULL;
+ ldata->ld_reclen = 0;
+ return -ENOENT;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(linkea_links_find);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 33342bfcc90e..be09e04b042f 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -65,6 +65,7 @@
#include "../../include/obd_support.h"
#include "../../include/obd_class.h"
#include "../../include/lprocfs_status.h"
+#include "../../include/lustre/lustre_ioctl.h"
#include "../../include/lustre_ver.h"
/* buffer MUST be at least the size of obd_ioctl_hdr */
@@ -157,7 +158,6 @@ int obd_ioctl_popdata(void __user *arg, void *data, int len)
err = copy_to_user(arg, data, len) ? -EFAULT : 0;
return err;
}
-EXPORT_SYMBOL(obd_ioctl_popdata);
/* opening /dev/obd */
static int obd_class_open(struct inode *inode, struct file *file)
@@ -191,7 +191,7 @@ static long obd_class_ioctl(struct file *filp, unsigned int cmd,
}
/* declare character device */
-static struct file_operations obd_psdev_fops = {
+static const struct file_operations obd_psdev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = obd_class_ioctl, /* unlocked_ioctl */
.open = obd_class_open, /* open */
@@ -291,7 +291,7 @@ static ssize_t jobid_name_store(struct kobject *kobj, struct attribute *attr,
const char *buffer,
size_t count)
{
- if (!count || count > JOBSTATS_JOBID_SIZE)
+ if (!count || count > LUSTRE_JOBID_SIZE)
return -EINVAL;
memcpy(obd_jobid_node, buffer, count);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index c6cc6a7666e3..41b77a30feb3 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -44,7 +44,7 @@
#include <linux/fs.h>
-void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
+void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid)
{
valid &= src->o_valid;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index 8f70dd2686f9..e6c785afceba 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -45,6 +45,7 @@
#include "../../include/obd_support.h"
#include "../../include/lprocfs_status.h"
+#include "../../include/obd_class.h"
struct static_lustre_uintvalue_attr {
struct {
@@ -95,8 +96,8 @@ LUSTRE_STATIC_UINT_ATTR(timeout, &obd_timeout);
static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
- return sprintf(buf, "%ul\n",
- obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
+ return sprintf(buf, "%lu\n",
+ obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
}
static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 1784ca063428..43797f106745 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -80,7 +80,7 @@ static void llog_free_handle(struct llog_handle *loghandle)
LASSERT(list_empty(&loghandle->u.phd.phd_entry));
else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
LASSERT(list_empty(&loghandle->u.chd.chd_head));
- LASSERT(sizeof(*(loghandle->lgh_hdr)) == LLOG_CHUNK_SIZE);
+ LASSERT(sizeof(*loghandle->lgh_hdr) == LLOG_CHUNK_SIZE);
kfree(loghandle->lgh_hdr);
out:
kfree(loghandle);
@@ -137,6 +137,7 @@ static int llog_read_header(const struct lu_env *env,
int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
int flags, struct obd_uuid *uuid)
{
+ enum llog_flag fmt = flags & LLOG_F_EXT_MASK;
struct llog_log_hdr *llh;
int rc;
@@ -194,6 +195,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
flags, LLOG_F_IS_CAT, LLOG_F_IS_PLAIN);
rc = -EINVAL;
}
+ llh->llh_flags |= fmt;
out:
if (rc) {
kfree(llh);
@@ -233,6 +235,10 @@ static int llog_process_thread(void *arg)
else
last_index = LLOG_BITMAP_BYTES * 8 - 1;
+ /* Record is not in this buffer. */
+ if (index > last_index)
+ goto out;
+
while (rc == 0) {
struct llog_rec_hdr *rec;
@@ -262,7 +268,7 @@ repeat:
*/
for (rec = (struct llog_rec_hdr *)buf;
(char *)rec < buf + LLOG_CHUNK_SIZE;
- rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) {
+ rec = llog_rec_hdr_next(rec)) {
CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
rec, rec->lrh_type);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index a82a2950295a..ce8e2f6f002a 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -63,11 +63,13 @@ static int llog_cat_id2handle(const struct lu_env *env,
struct llog_logid *logid)
{
struct llog_handle *loghandle;
+ enum llog_flag fmt;
int rc = 0;
if (!cathandle)
return -EBADF;
+ fmt = cathandle->lgh_hdr->llh_flags & LLOG_F_EXT_MASK;
down_write(&cathandle->lgh_lock);
list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
u.phd.phd_entry) {
@@ -99,7 +101,7 @@ static int llog_cat_id2handle(const struct lu_env *env,
return rc;
}
- rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN, NULL);
+ rc = llog_init_handle(env, loghandle, fmt | LLOG_F_IS_PLAIN, NULL);
if (rc < 0) {
llog_close(env, loghandle);
loghandle = NULL;
@@ -107,7 +109,7 @@ static int llog_cat_id2handle(const struct lu_env *env,
}
down_write(&cathandle->lgh_lock);
- list_add(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
+ list_add_tail(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
up_write(&cathandle->lgh_lock);
loghandle->u.phd.phd_cat_handle = cathandle;
@@ -123,7 +125,6 @@ out:
int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
{
struct llog_handle *loghandle, *n;
- int rc;
list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
u.phd.phd_entry) {
@@ -134,8 +135,7 @@ int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
/* if handle was stored in ctxt, remove it too */
if (cathandle->lgh_ctxt->loc_handle == cathandle)
cathandle->lgh_ctxt->loc_handle = NULL;
- rc = llog_close(env, cathandle);
- return rc;
+ return llog_close(env, cathandle);
}
EXPORT_SYMBOL(llog_cat_close);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_internal.h b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
index f7949525d952..21a93c73756a 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
@@ -70,4 +70,9 @@ int llog_process_or_fork(const struct lu_env *env,
llog_cb_t cb, void *data, void *catdata, bool fork);
int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
struct llog_handle *loghandle, int index);
+
+static inline struct llog_rec_hdr *llog_rec_hdr_next(struct llog_rec_hdr *rec)
+{
+ return (struct llog_rec_hdr *)((char *)rec + rec->lrh_len);
+}
#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index 6ace7e097859..a4277d684614 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -210,7 +210,6 @@ LU_KEY_INIT_FINI(llog, struct llog_thread_info);
/* context key: llog_thread_key */
LU_CONTEXT_KEY_DEFINE(llog, LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL);
LU_KEY_INIT_GENERIC(llog);
-EXPORT_SYMBOL(llog_thread_key);
int llog_info_init(void)
{
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index f7b9b190350c..8c4c1b3f1b45 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -172,20 +172,23 @@ void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
__swab64s(&cr->cr.cr_time);
lustre_swab_lu_fid(&cr->cr.cr_tfid);
lustre_swab_lu_fid(&cr->cr.cr_pfid);
- if (CHANGELOG_REC_EXTENDED(&cr->cr)) {
- struct llog_changelog_ext_rec *ext =
- (struct llog_changelog_ext_rec *)rec;
-
- lustre_swab_lu_fid(&ext->cr.cr_sfid);
- lustre_swab_lu_fid(&ext->cr.cr_spfid);
- tail = &ext->cr_tail;
- } else {
- tail = &cr->cr_tail;
+ if (cr->cr.cr_flags & CLF_RENAME) {
+ struct changelog_ext_rename *rnm =
+ changelog_rec_rename(&cr->cr);
+
+ lustre_swab_lu_fid(&rnm->cr_sfid);
+ lustre_swab_lu_fid(&rnm->cr_spfid);
}
- tail = (struct llog_rec_tail *)((char *)tail +
+ /*
+ * Because the tail follows a variable-length structure we need
+ * to compute its location at runtime
+ */
+ tail = (struct llog_rec_tail *)((char *)&cr->cr +
+ changelog_rec_size(&cr->cr) +
cr->cr.cr_namelen);
break;
}
+
case CHANGELOG_USER_REC:
{
struct llog_changelog_user_rec *cur =
@@ -224,6 +227,7 @@ void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
__swab32s(&lsr->lsr_uid_h);
__swab32s(&lsr->lsr_gid);
__swab32s(&lsr->lsr_gid_h);
+ __swab64s(&lsr->lsr_valid);
tail = &lsr->lsr_tail;
break;
}
@@ -343,7 +347,6 @@ void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg)
print_lustre_cfg(lcfg);
}
-EXPORT_SYMBOL(lustre_swab_lustre_cfg);
/* used only for compatibility with old on-disk cfg_marker data */
struct cfg_marker32 {
@@ -403,4 +406,3 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size)
__swab64s(&marker->cm_canceltime);
}
}
-EXPORT_SYMBOL(lustre_swab_cfg_marker);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 279b625f1afe..852a5acfefab 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -96,6 +96,12 @@ static const char * const obd_connect_names[] = {
"pingless",
"flock_deadlock",
"disp_stripe",
+ "open_by_fid",
+ "lfsck",
+ "unknown",
+ "unlink_close",
+ "unknown",
+ "dir_stripe",
"unknown",
NULL
};
@@ -309,7 +315,7 @@ struct dentry *ldebugfs_add_simple(struct dentry *root,
}
EXPORT_SYMBOL_GPL(ldebugfs_add_simple);
-static struct file_operations lprocfs_generic_fops = { };
+static const struct file_operations lprocfs_generic_fops = { };
int ldebugfs_add_vars(struct dentry *parent,
struct lprocfs_vars *list,
@@ -615,7 +621,6 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
}
-EXPORT_SYMBOL(lprocfs_stats_collect);
/**
* Append a space separated list of current set flags to str.
@@ -1043,7 +1048,6 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
}
return rc;
}
-EXPORT_SYMBOL(lprocfs_stats_alloc_one);
struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
enum lprocfs_stats_flags flags)
@@ -1547,6 +1551,146 @@ void lprocfs_oh_clear(struct obd_histogram *oh)
}
EXPORT_SYMBOL(lprocfs_oh_clear);
+int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count,
+ struct root_squash_info *squash, char *name)
+{
+ char kernbuf[64], *tmp, *errmsg;
+ unsigned long uid, gid;
+ int rc;
+
+ if (count >= sizeof(kernbuf)) {
+ errmsg = "string too long";
+ rc = -EINVAL;
+ goto failed_noprint;
+ }
+ if (copy_from_user(kernbuf, buffer, count)) {
+ errmsg = "bad address";
+ rc = -EFAULT;
+ goto failed_noprint;
+ }
+ kernbuf[count] = '\0';
+
+ /* look for uid gid separator */
+ tmp = strchr(kernbuf, ':');
+ if (!tmp) {
+ errmsg = "needs uid:gid format";
+ rc = -EINVAL;
+ goto failed;
+ }
+ *tmp = '\0';
+ tmp++;
+
+ /* parse uid */
+ if (kstrtoul(kernbuf, 0, &uid) != 0) {
+ errmsg = "bad uid";
+ rc = -EINVAL;
+ goto failed;
+ }
+ /* parse gid */
+ if (kstrtoul(tmp, 0, &gid) != 0) {
+ errmsg = "bad gid";
+ rc = -EINVAL;
+ goto failed;
+ }
+
+ squash->rsi_uid = uid;
+ squash->rsi_gid = gid;
+
+ LCONSOLE_INFO("%s: root_squash is set to %u:%u\n",
+ name, squash->rsi_uid, squash->rsi_gid);
+ return count;
+
+failed:
+ if (tmp) {
+ tmp--;
+ *tmp = ':';
+ }
+ CWARN("%s: failed to set root_squash to \"%s\", %s, rc = %d\n",
+ name, kernbuf, errmsg, rc);
+ return rc;
+failed_noprint:
+ CWARN("%s: failed to set root_squash due to %s, rc = %d\n",
+ name, errmsg, rc);
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_wr_root_squash);
+
+int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count,
+ struct root_squash_info *squash, char *name)
+{
+ char *kernbuf = NULL, *errmsg;
+ struct list_head tmp;
+ int len = count;
+ int rc;
+
+ if (count > 4096) {
+ errmsg = "string too long";
+ rc = -EINVAL;
+ goto failed;
+ }
+
+ kernbuf = kzalloc(count + 1, GFP_NOFS);
+ if (!kernbuf) {
+ errmsg = "no memory";
+ rc = -ENOMEM;
+ goto failed;
+ }
+
+ if (copy_from_user(kernbuf, buffer, count)) {
+ errmsg = "bad address";
+ rc = -EFAULT;
+ goto failed;
+ }
+ kernbuf[count] = '\0';
+
+ if (count > 0 && kernbuf[count - 1] == '\n')
+ len = count - 1;
+
+ if ((len == 4 && !strncmp(kernbuf, "NONE", len)) ||
+ (len == 5 && !strncmp(kernbuf, "clear", len))) {
+ /* empty string is special case */
+ down_write(&squash->rsi_sem);
+ if (!list_empty(&squash->rsi_nosquash_nids))
+ cfs_free_nidlist(&squash->rsi_nosquash_nids);
+ up_write(&squash->rsi_sem);
+ LCONSOLE_INFO("%s: nosquash_nids is cleared\n", name);
+ kfree(kernbuf);
+ return count;
+ }
+
+ INIT_LIST_HEAD(&tmp);
+ if (cfs_parse_nidlist(kernbuf, count, &tmp) <= 0) {
+ errmsg = "can't parse";
+ rc = -EINVAL;
+ goto failed;
+ }
+ LCONSOLE_INFO("%s: nosquash_nids set to %s\n",
+ name, kernbuf);
+ kfree(kernbuf);
+ kernbuf = NULL;
+
+ down_write(&squash->rsi_sem);
+ if (!list_empty(&squash->rsi_nosquash_nids))
+ cfs_free_nidlist(&squash->rsi_nosquash_nids);
+ list_splice(&tmp, &squash->rsi_nosquash_nids);
+ up_write(&squash->rsi_sem);
+
+ return count;
+
+failed:
+ if (kernbuf) {
+ CWARN("%s: failed to set nosquash_nids to \"%s\", %s rc = %d\n",
+ name, kernbuf, errmsg, rc);
+ kfree(kernbuf);
+ kernbuf = NULL;
+ } else {
+ CWARN("%s: failed to set nosquash_nids due to %s rc = %d\n",
+ name, errmsg, rc);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_wr_nosquash_nids);
+
static ssize_t lustre_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 9b03059f34d6..054e567e6c8d 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -55,6 +55,34 @@
#include "../include/lu_ref.h"
#include <linux/list.h>
+enum {
+ LU_CACHE_PERCENT_MAX = 50,
+ LU_CACHE_PERCENT_DEFAULT = 20
+};
+
+#define LU_CACHE_NR_MAX_ADJUST 128
+#define LU_CACHE_NR_UNLIMITED -1
+#define LU_CACHE_NR_DEFAULT LU_CACHE_NR_UNLIMITED
+#define LU_CACHE_NR_LDISKFS_LIMIT LU_CACHE_NR_UNLIMITED
+#define LU_CACHE_NR_ZFS_LIMIT 256
+
+#define LU_SITE_BITS_MIN 12
+#define LU_SITE_BITS_MAX 24
+/**
+ * total 256 buckets, we don't want too many buckets because:
+ * - consume too much memory
+ * - avoid unbalanced LRU list
+ */
+#define LU_SITE_BKT_BITS 8
+
+static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
+module_param(lu_cache_percent, int, 0644);
+MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache");
+
+static long lu_cache_nr = LU_CACHE_NR_DEFAULT;
+module_param(lu_cache_nr, long, 0644);
+MODULE_PARM_DESC(lu_cache_nr, "Maximum number of objects in lu_object cache");
+
static void lu_object_free(const struct lu_env *env, struct lu_object *o);
static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx);
@@ -310,10 +338,10 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
struct cfs_hash_bd bd2;
struct list_head dispose;
int did_sth;
- int start;
+ unsigned int start;
int count;
int bnr;
- int i;
+ unsigned int i;
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
return 0;
@@ -324,8 +352,13 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
* the dispose list, removing them from LRU and hash table.
*/
start = s->ls_purge_start;
- bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
+ bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1;
again:
+ /*
+ * It doesn't make any sense to make purge threads parallel, that can
+ * only bring troubles to us. See LU-5331.
+ */
+ mutex_lock(&s->ls_purge_mutex);
did_sth = 0;
cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
if (i < start)
@@ -371,6 +404,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
if (nr == 0)
break;
}
+ mutex_unlock(&s->ls_purge_mutex);
if (nr != 0 && did_sth && start != 0) {
start = 0; /* restart from the first bucket */
@@ -573,6 +607,27 @@ static struct lu_object *lu_object_find(const struct lu_env *env,
return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
}
+/*
+ * Limit the lu_object cache to a maximum of lu_cache_nr objects. Because
+ * the calculation for the number of objects to reclaim is not covered by
+ * a lock the maximum number of objects is capped by LU_CACHE_MAX_ADJUST.
+ * This ensures that many concurrent threads will not accidentally purge
+ * the entire cache.
+ */
+static void lu_object_limit(const struct lu_env *env, struct lu_device *dev)
+{
+ __u64 size, nr;
+
+ if (lu_cache_nr == LU_CACHE_NR_UNLIMITED)
+ return;
+
+ size = cfs_hash_size_get(dev->ld_site->ls_obj_hash);
+ nr = (__u64)lu_cache_nr;
+ if (size > nr)
+ lu_site_purge(env, dev->ld_site,
+ min_t(__u64, size - nr, LU_CACHE_NR_MAX_ADJUST));
+}
+
static struct lu_object *lu_object_new(const struct lu_env *env,
struct lu_device *dev,
const struct lu_fid *f,
@@ -590,6 +645,9 @@ static struct lu_object *lu_object_new(const struct lu_env *env,
cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
cfs_hash_bd_unlock(hs, &bd, 1);
+
+ lu_object_limit(env, dev);
+
return o;
}
@@ -656,6 +714,9 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
if (likely(PTR_ERR(shadow) == -ENOENT)) {
cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
cfs_hash_bd_unlock(hs, &bd, 1);
+
+ lu_object_limit(env, dev);
+
return o;
}
@@ -706,13 +767,15 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env,
struct lu_object *obj;
top = lu_object_find(env, dev, f, conf);
- if (!IS_ERR(top)) {
- obj = lu_object_locate(top->lo_header, dev->ld_type);
- if (!obj)
- lu_object_put(env, top);
- } else {
- obj = top;
+ if (IS_ERR(top))
+ return top;
+
+ obj = lu_object_locate(top->lo_header, dev->ld_type);
+ if (unlikely(!obj)) {
+ lu_object_put(env, top);
+ obj = ERR_PTR(-ENOENT);
}
+
return obj;
}
EXPORT_SYMBOL(lu_object_find_slice);
@@ -726,34 +789,31 @@ int lu_device_type_init(struct lu_device_type *ldt)
{
int result = 0;
+ atomic_set(&ldt->ldt_device_nr, 0);
INIT_LIST_HEAD(&ldt->ldt_linkage);
if (ldt->ldt_ops->ldto_init)
result = ldt->ldt_ops->ldto_init(ldt);
- if (result == 0)
+
+ if (!result) {
+ spin_lock(&obd_types_lock);
list_add(&ldt->ldt_linkage, &lu_device_types);
+ spin_unlock(&obd_types_lock);
+ }
+
return result;
}
EXPORT_SYMBOL(lu_device_type_init);
void lu_device_type_fini(struct lu_device_type *ldt)
{
+ spin_lock(&obd_types_lock);
list_del_init(&ldt->ldt_linkage);
+ spin_unlock(&obd_types_lock);
if (ldt->ldt_ops->ldto_fini)
ldt->ldt_ops->ldto_fini(ldt);
}
EXPORT_SYMBOL(lu_device_type_fini);
-void lu_types_stop(void)
-{
- struct lu_device_type *ldt;
-
- list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
- if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop)
- ldt->ldt_ops->ldto_stop(ldt);
- }
-}
-EXPORT_SYMBOL(lu_types_stop);
-
/**
* Global list of all sites on this node
*/
@@ -808,22 +868,14 @@ void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
}
EXPORT_SYMBOL(lu_site_print);
-enum {
- LU_CACHE_PERCENT_MAX = 50,
- LU_CACHE_PERCENT_DEFAULT = 20
-};
-
-static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
-module_param(lu_cache_percent, int, 0644);
-MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache");
-
/**
* Return desired hash table order.
*/
-static int lu_htable_order(void)
+static unsigned long lu_htable_order(struct lu_device *top)
{
+ unsigned long bits_max = LU_SITE_BITS_MAX;
unsigned long cache_size;
- int bits;
+ unsigned long bits;
/*
* Calculate hash table size, assuming that we want reasonable
@@ -854,7 +906,7 @@ static int lu_htable_order(void)
for (bits = 1; (1 << bits) < cache_size; ++bits) {
;
}
- return bits;
+ return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max);
}
static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
@@ -930,28 +982,18 @@ static void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
/**
* Initialize site \a s, with \a d as the top level device.
*/
-#define LU_SITE_BITS_MIN 12
-#define LU_SITE_BITS_MAX 19
-/**
- * total 256 buckets, we don't want too many buckets because:
- * - consume too much memory
- * - avoid unbalanced LRU list
- */
-#define LU_SITE_BKT_BITS 8
-
int lu_site_init(struct lu_site *s, struct lu_device *top)
{
struct lu_site_bkt_data *bkt;
struct cfs_hash_bd bd;
+ unsigned long bits;
+ unsigned long i;
char name[16];
- int bits;
- int i;
memset(s, 0, sizeof(*s));
- bits = lu_htable_order();
- snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
- for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
- bits >= LU_SITE_BITS_MIN; bits--) {
+ mutex_init(&s->ls_purge_mutex);
+ snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name);
+ for (bits = lu_htable_order(top); bits >= LU_SITE_BITS_MIN; bits--) {
s->ls_obj_hash = cfs_hash_create(name, bits, bits,
bits - LU_SITE_BKT_BITS,
sizeof(*bkt), 0, 0,
@@ -959,13 +1001,14 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
CFS_HASH_SPIN_BKTLOCK |
CFS_HASH_NO_ITEMREF |
CFS_HASH_DEPTH |
- CFS_HASH_ASSERT_EMPTY);
+ CFS_HASH_ASSERT_EMPTY |
+ CFS_HASH_COUNTER);
if (s->ls_obj_hash)
break;
}
if (!s->ls_obj_hash) {
- CERROR("failed to create lu_site hash with bits: %d\n", bits);
+ CERROR("failed to create lu_site hash with bits: %lu\n", bits);
return -ENOMEM;
}
@@ -1082,8 +1125,10 @@ EXPORT_SYMBOL(lu_device_put);
*/
int lu_device_init(struct lu_device *d, struct lu_device_type *t)
{
- if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start)
+ if (atomic_inc_return(&t->ldt_device_nr) == 1 &&
+ t->ldt_ops->ldto_start)
t->ldt_ops->ldto_start(t);
+
memset(d, 0, sizeof(*d));
atomic_set(&d->ld_ref, 0);
d->ld_type = t;
@@ -1098,9 +1143,8 @@ EXPORT_SYMBOL(lu_device_init);
*/
void lu_device_fini(struct lu_device *d)
{
- struct lu_device_type *t;
+ struct lu_device_type *t = d->ld_type;
- t = d->ld_type;
if (d->ld_obd) {
d->ld_obd->obd_lu_dev = NULL;
d->ld_obd = NULL;
@@ -1109,8 +1153,10 @@ void lu_device_fini(struct lu_device *d)
lu_ref_fini(&d->ld_reference);
LASSERTF(atomic_read(&d->ld_ref) == 0,
"Refcount is %u\n", atomic_read(&d->ld_ref));
- LASSERT(t->ldt_device_nr > 0);
- if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop)
+ LASSERT(atomic_read(&t->ldt_device_nr) > 0);
+
+ if (atomic_dec_and_test(&t->ldt_device_nr) &&
+ t->ldt_ops->ldto_stop)
t->ldt_ops->ldto_stop(t);
}
EXPORT_SYMBOL(lu_device_fini);
@@ -1254,7 +1300,6 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
}
}
}
-EXPORT_SYMBOL(lu_stack_fini);
enum {
/**
@@ -1281,7 +1326,7 @@ static unsigned key_set_version;
int lu_context_key_register(struct lu_context_key *key)
{
int result;
- int i;
+ unsigned int i;
LASSERT(key->lct_init);
LASSERT(key->lct_fini);
@@ -1476,18 +1521,16 @@ void lu_context_key_quiesce(struct lu_context_key *key)
++key_set_version;
}
}
-EXPORT_SYMBOL(lu_context_key_quiesce);
void lu_context_key_revive(struct lu_context_key *key)
{
key->lct_tags &= ~LCT_QUIESCENT;
++key_set_version;
}
-EXPORT_SYMBOL(lu_context_key_revive);
static void keys_fini(struct lu_context *ctx)
{
- int i;
+ unsigned int i;
if (!ctx->lc_value)
return;
@@ -1501,7 +1544,7 @@ static void keys_fini(struct lu_context *ctx)
static int keys_fill(struct lu_context *ctx)
{
- int i;
+ unsigned int i;
LINVRNT(ctx->lc_value);
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
@@ -1614,7 +1657,7 @@ EXPORT_SYMBOL(lu_context_enter);
*/
void lu_context_exit(struct lu_context *ctx)
{
- int i;
+ unsigned int i;
LINVRNT(ctx->lc_state == LCS_ENTERED);
ctx->lc_state = LCS_LEFT;
@@ -1642,7 +1685,6 @@ int lu_context_refill(struct lu_context *ctx)
{
return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
}
-EXPORT_SYMBOL(lu_context_refill);
/**
* lu_ctx_tags/lu_ses_tags will be updated if there are new types of
@@ -1696,7 +1738,7 @@ static void lu_site_stats_get(struct cfs_hash *hs,
struct lu_site_stats *stats, int populated)
{
struct cfs_hash_bd bd;
- int i;
+ unsigned int i;
cfs_hash_for_each_bucket(hs, &bd, i) {
struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
@@ -1940,3 +1982,73 @@ void lu_kmem_fini(struct lu_kmem_descr *caches)
}
}
EXPORT_SYMBOL(lu_kmem_fini);
+
+void lu_buf_free(struct lu_buf *buf)
+{
+ LASSERT(buf);
+ if (buf->lb_buf) {
+ LASSERT(buf->lb_len > 0);
+ kvfree(buf->lb_buf);
+ buf->lb_buf = NULL;
+ buf->lb_len = 0;
+ }
+}
+EXPORT_SYMBOL(lu_buf_free);
+
+void lu_buf_alloc(struct lu_buf *buf, size_t size)
+{
+ LASSERT(buf);
+ LASSERT(!buf->lb_buf);
+ LASSERT(!buf->lb_len);
+ buf->lb_buf = libcfs_kvzalloc(size, GFP_NOFS);
+ if (likely(buf->lb_buf))
+ buf->lb_len = size;
+}
+EXPORT_SYMBOL(lu_buf_alloc);
+
+void lu_buf_realloc(struct lu_buf *buf, size_t size)
+{
+ lu_buf_free(buf);
+ lu_buf_alloc(buf, size);
+}
+EXPORT_SYMBOL(lu_buf_realloc);
+
+struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len)
+{
+ if (!buf->lb_buf && !buf->lb_len)
+ lu_buf_alloc(buf, len);
+
+ if ((len > buf->lb_len) && buf->lb_buf)
+ lu_buf_realloc(buf, len);
+
+ return buf;
+}
+EXPORT_SYMBOL(lu_buf_check_and_alloc);
+
+/**
+ * Increase the size of the \a buf.
+ * preserves old data in buffer
+ * old buffer remains unchanged on error
+ * \retval 0 or -ENOMEM
+ */
+int lu_buf_check_and_grow(struct lu_buf *buf, size_t len)
+{
+ char *ptr;
+
+ if (len <= buf->lb_len)
+ return 0;
+
+ ptr = libcfs_kvzalloc(len, GFP_NOFS);
+ if (!ptr)
+ return -ENOMEM;
+
+ /* Free the old buf */
+ if (buf->lb_buf) {
+ memcpy(ptr, buf->lb_buf, buf->lb_len);
+ kvfree(buf->lb_buf);
+ }
+
+ buf->lb_buf = ptr;
+ buf->lb_len = len;
+ return 0;
+}
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
index 082f530c527c..c9445e5ec271 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
@@ -130,7 +130,7 @@ void class_handle_unhash(struct portals_handle *h)
}
EXPORT_SYMBOL(class_handle_unhash);
-void *class_handle2object(__u64 cookie)
+void *class_handle2object(__u64 cookie, const void *owner)
{
struct handle_bucket *bucket;
struct portals_handle *h;
@@ -145,7 +145,7 @@ void *class_handle2object(__u64 cookie)
rcu_read_lock();
list_for_each_entry_rcu(h, &bucket->head, h_link) {
- if (h->h_cookie != cookie)
+ if (h->h_cookie != cookie || h->h_owner != owner)
continue;
spin_lock(&h->h_lock);
@@ -164,8 +164,11 @@ EXPORT_SYMBOL(class_handle2object);
void class_handle_free_cb(struct rcu_head *rcu)
{
- struct portals_handle *h = RCU2HANDLE(rcu);
- void *ptr = (void *)(unsigned long)h->h_cookie;
+ struct portals_handle *h;
+ void *ptr;
+
+ h = container_of(rcu, struct portals_handle, h_rcu);
+ ptr = (void *)(unsigned long)h->h_cookie;
if (h->h_ops->hop_free)
h->h_ops->hop_free(ptr, h->h_size);
@@ -214,7 +217,7 @@ static int cleanup_all_handles(void)
struct portals_handle *h;
spin_lock(&handle_hash[i].lock);
- list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
+ list_for_each_entry_rcu(h, &handle_hash[i].head, h_link) {
CERROR("force clean handle %#llx addr %p ops %p\n",
h->h_cookie, h, h->h_ops);
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index 5974a9bf77c0..ffa740aa861c 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -139,7 +139,6 @@ int class_add_uuid(const char *uuid, __u64 nid)
}
return 0;
}
-EXPORT_SYMBOL(class_add_uuid);
/* Delete the nids for one uuid if specified, otherwise delete all */
int class_del_uuid(const char *uuid)
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 0eab1236501b..bbed1b72d52e 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -37,6 +37,7 @@
#define DEBUG_SUBSYSTEM S_CLASS
#include "../include/obd_class.h"
#include <linux/string.h>
+#include "../include/lustre/lustre_ioctl.h"
#include "../include/lustre_log.h"
#include "../include/lprocfs_status.h"
#include "../include/lustre_param.h"
@@ -237,7 +238,7 @@ static int class_attach(struct lustre_cfg *lcfg)
/* recovery data */
init_waitqueue_head(&obd->obd_evict_inprogress_waitq);
- llog_group_init(&obd->obd_olg, FID_SEQ_LLOG);
+ llog_group_init(&obd->obd_olg);
obd->obd_conn_inprogress = 0;
@@ -250,15 +251,6 @@ static int class_attach(struct lustre_cfg *lcfg)
}
memcpy(obd->obd_uuid.uuid, uuid, len);
- /* do the attach */
- if (OBP(obd, attach)) {
- rc = OBP(obd, attach)(obd, sizeof(*lcfg), lcfg);
- if (rc) {
- rc = -EINVAL;
- goto out;
- }
- }
-
/* Detach drops this */
spin_lock(&obd->obd_dev_lock);
atomic_set(&obd->obd_refcount, 1);
@@ -422,17 +414,12 @@ static int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
}
/* Leave this on forever */
obd->obd_stopping = 1;
-
- /* wait for already-arrived-connections to finish. */
- while (obd->obd_conn_inprogress > 0) {
- spin_unlock(&obd->obd_dev_lock);
-
- cond_resched();
-
- spin_lock(&obd->obd_dev_lock);
- }
spin_unlock(&obd->obd_dev_lock);
+ while (obd->obd_conn_inprogress > 0)
+ yield();
+ smp_rmb();
+
if (lcfg->lcfg_bufcount >= 2 && LUSTRE_CFG_BUFLEN(lcfg, 1) > 0) {
for (flag = lustre_cfg_string(lcfg, 1); *flag != 0; flag++)
switch (*flag) {
@@ -526,11 +513,6 @@ void class_decref(struct obd_device *obd, const char *scope, const void *source)
CERROR("Cleanup %s returned %d\n",
obd->obd_name, err);
}
- if (OBP(obd, detach)) {
- err = OBP(obd, detach)(obd);
- if (err)
- CERROR("Detach returned %d\n", err);
- }
class_release_dev(obd);
}
}
@@ -756,7 +738,7 @@ static int process_param2_config(struct lustre_cfg *lcfg)
}
start = ktime_get();
- rc = call_usermodehelper(argv[0], argv, NULL, 1);
+ rc = call_usermodehelper(argv[0], argv, NULL, UMH_WAIT_PROC);
end = ktime_get();
if (rc < 0) {
@@ -1026,7 +1008,7 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
oldfs = get_fs();
set_fs(KERNEL_DS);
- rc = (var->fops->write)(&fakefile, sval,
+ rc = var->fops->write(&fakefile, sval,
vallen, NULL);
set_fs(oldfs);
}
@@ -1060,8 +1042,6 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
}
EXPORT_SYMBOL(class_process_proc_param);
-extern int lustre_check_exclusion(struct super_block *sb, char *svname);
-
/** Parse a configuration llog, doing various manipulations on them
* for various reasons, (modifications for compatibility, skip obsolete
* records, change uuids, etc), then class_process_config() resulting
@@ -1317,33 +1297,33 @@ static int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf,
if (rc < 0)
return rc;
- ptr += snprintf(ptr, end-ptr, "cmd=%05x ", lcfg->lcfg_command);
+ ptr += snprintf(ptr, end - ptr, "cmd=%05x ", lcfg->lcfg_command);
if (lcfg->lcfg_flags)
- ptr += snprintf(ptr, end-ptr, "flags=%#08x ",
+ ptr += snprintf(ptr, end - ptr, "flags=%#08x ",
lcfg->lcfg_flags);
if (lcfg->lcfg_num)
- ptr += snprintf(ptr, end-ptr, "num=%#08x ", lcfg->lcfg_num);
+ ptr += snprintf(ptr, end - ptr, "num=%#08x ", lcfg->lcfg_num);
if (lcfg->lcfg_nid) {
char nidstr[LNET_NIDSTR_SIZE];
libcfs_nid2str_r(lcfg->lcfg_nid, nidstr, sizeof(nidstr));
- ptr += snprintf(ptr, end-ptr, "nid=%s(%#llx)\n ",
+ ptr += snprintf(ptr, end - ptr, "nid=%s(%#llx)\n ",
nidstr, lcfg->lcfg_nid);
}
if (lcfg->lcfg_command == LCFG_MARKER) {
struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1);
- ptr += snprintf(ptr, end-ptr, "marker=%d(%#x)%s '%s'",
+ ptr += snprintf(ptr, end - ptr, "marker=%d(%#x)%s '%s'",
marker->cm_step, marker->cm_flags,
marker->cm_tgtname, marker->cm_comment);
} else {
int i;
for (i = 0; i < lcfg->lcfg_bufcount; i++) {
- ptr += snprintf(ptr, end-ptr, "%d:%s ", i,
+ ptr += snprintf(ptr, end - ptr, "%d:%s ", i,
lustre_cfg_string(lcfg, i));
}
}
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index aa84a50e9904..0d3a3b05a637 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -37,11 +37,11 @@
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#define D_MOUNT (D_SUPER|D_CONFIG/*|D_WARNING */)
+#define D_MOUNT (D_SUPER | D_CONFIG/*|D_WARNING */)
#define PRINT_CMD CDEBUG
#include "../include/obd.h"
-#include "../include/linux/lustre_compat25.h"
+#include "../include/lustre_compat.h"
#include "../include/obd_class.h"
#include "../include/lustre/lustre_user.h"
#include "../include/lustre_log.h"
@@ -68,7 +68,7 @@ static void (*kill_super_cb)(struct super_block *sb);
* this log, and is added to the mgc's list of logs to follow.
*/
int lustre_process_log(struct super_block *sb, char *logname,
- struct config_llog_instance *cfg)
+ struct config_llog_instance *cfg)
{
struct lustre_cfg *lcfg;
struct lustre_cfg_bufs *bufs;
@@ -384,17 +384,15 @@ int lustre_start_mgc(struct super_block *sb)
OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV |
OBD_CONNECT_LVB_TYPE;
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
+#if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE
data->ocd_connect_flags |= OBD_CONNECT_MNE_SWAB;
-#else
-#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
#endif
if (lmd_is_client(lsi->lsi_lmd) &&
lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)
data->ocd_connect_flags &= ~OBD_CONNECT_IMP_RECOV;
data->ocd_version = LUSTRE_VERSION_CODE;
- rc = obd_connect(NULL, &exp, obd, &(obd->obd_uuid), data, NULL);
+ rc = obd_connect(NULL, &exp, obd, &obd->obd_uuid, data, NULL);
if (rc) {
CERROR("connect failed %d\n", rc);
goto out;
@@ -670,7 +668,6 @@ int lustre_common_put_super(struct super_block *sb)
}
/* Drop a ref to the mounted disk */
lustre_put_lsi(sb);
- lu_types_stop();
return rc;
}
EXPORT_SYMBOL(lustre_common_put_super);
@@ -731,7 +728,7 @@ int lustre_check_exclusion(struct super_block *sb, char *svname)
static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr)
{
const char *s1 = ptr, *s2;
- __u32 index, *exclude_list;
+ __u32 index = 0, *exclude_list;
int rc = 0, devmax;
/* The shortest an ost name can be is 8 chars: -OST0000.
@@ -758,7 +755,7 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr)
exclude_list[lmd->lmd_exclude_count++] = index;
else
CDEBUG(D_MOUNT, "ignoring exclude %.*s: type = %#x\n",
- (uint)(s2-s1), s1, rc);
+ (uint)(s2 - s1), s1, rc);
s1 = s2;
/* now we are pointing at ':' (next exclude)
* or ',' (end of excludes)
@@ -880,7 +877,7 @@ static int lmd_parse_mgs(struct lustre_mount_data *lmd, char **ptr)
*/
static int lmd_parse(char *options, struct lustre_mount_data *lmd)
{
- char *s1, *s2, *devname = NULL;
+ char *s1, *s2, *s3, *devname = NULL;
struct lustre_mount_data *raw = (struct lustre_mount_data *)options;
int rc = 0;
@@ -913,6 +910,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
/* Skip whitespace and extra commas */
while (*s1 == ' ' || *s1 == ',')
s1++;
+ s3 = s1;
/* Client options are parsed in ll_options: eg. flock,
* user_xattr, acl
@@ -970,6 +968,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
rc = lmd_parse_mgssec(lmd, s1 + 7);
if (rc)
goto invalid;
+ s3 = s2;
clear++;
/* ost exclusion list */
} else if (strncmp(s1, "exclude=", 8) == 0) {
@@ -990,10 +989,19 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
size_t length, params_length;
char *tail = strchr(s1 + 6, ',');
- if (!tail)
+ if (!tail) {
length = strlen(s1);
- else
- length = tail - s1;
+ } else {
+ lnet_nid_t nid;
+ char *param_str = tail + 1;
+ int supplementary = 1;
+
+ while (!class_parse_nid_quiet(param_str, &nid,
+ &param_str)) {
+ supplementary = 0;
+ }
+ length = param_str - s1 - supplementary;
+ }
length -= 6;
params_length = strlen(lmd->lmd_params);
if (params_length + length + 1 >= LMD_PARAMS_MAXLEN)
@@ -1001,6 +1009,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
strncat(lmd->lmd_params, s1 + 6, length);
lmd->lmd_params[params_length + length] = '\0';
strlcat(lmd->lmd_params, " ", LMD_PARAMS_MAXLEN);
+ s3 = s1 + 6 + length;
clear++;
} else if (strncmp(s1, "osd=", 4) == 0) {
rc = lmd_parse_string(&lmd->lmd_osd_type, s1 + 4);
@@ -1097,7 +1106,7 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent)
struct lustre_sb_info *lsi;
int rc;
- CDEBUG(D_MOUNT|D_VFSTRACE, "VFS Op: sb %p\n", sb);
+ CDEBUG(D_MOUNT | D_VFSTRACE, "VFS Op: sb %p\n", sb);
lsi = lustre_init_lsi(sb);
if (!lsi)
@@ -1133,7 +1142,7 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent)
} else {
rc = lustre_start_mgc(sb);
if (rc) {
- lustre_put_lsi(sb);
+ lustre_common_put_super(sb);
goto out;
}
/* Connect and start */
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 8583a4a8c206..79104a66da96 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -112,7 +112,7 @@ void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid)
}
EXPORT_SYMBOL(obdo_from_inode);
-void obdo_to_ioobj(struct obdo *oa, struct obd_ioobj *ioobj)
+void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj)
{
ioobj->ioo_oid = oa->o_oi;
if (unlikely(!(oa->o_valid & OBD_MD_FLGROUP)))
@@ -125,7 +125,8 @@ void obdo_to_ioobj(struct obdo *oa, struct obd_ioobj *ioobj)
}
EXPORT_SYMBOL(obdo_to_ioobj);
-static void iattr_from_obdo(struct iattr *attr, struct obdo *oa, u32 valid)
+static void iattr_from_obdo(struct iattr *attr, const struct obdo *oa,
+ u32 valid)
{
valid &= oa->o_valid;
@@ -152,12 +153,14 @@ static void iattr_from_obdo(struct iattr *attr, struct obdo *oa, u32 valid)
}
#if 0 /* you shouldn't be able to change a file's type with setattr */
if (valid & OBD_MD_FLTYPE) {
- attr->ia_mode = (attr->ia_mode & ~S_IFMT)|(oa->o_mode & S_IFMT);
+ attr->ia_mode = (attr->ia_mode & ~S_IFMT) |
+ (oa->o_mode & S_IFMT);
attr->ia_valid |= ATTR_MODE;
}
#endif
if (valid & OBD_MD_FLMODE) {
- attr->ia_mode = (attr->ia_mode & S_IFMT)|(oa->o_mode & ~S_IFMT);
+ attr->ia_mode = (attr->ia_mode & S_IFMT) |
+ (oa->o_mode & ~S_IFMT);
attr->ia_valid |= ATTR_MODE;
if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
!capable(CFS_CAP_FSETID))
@@ -173,7 +176,7 @@ static void iattr_from_obdo(struct iattr *attr, struct obdo *oa, u32 valid)
}
}
-void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid)
+void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid)
{
iattr_from_obdo(&op_data->op_attr, oa, valid);
if (valid & OBD_MD_FLBLOCKS) {
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 5b29c4a44fe5..505582ff4d1e 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -41,6 +41,7 @@
#include "../include/cl_object.h"
#include "../include/lustre_fid.h"
#include "../include/lustre_acl.h"
+#include "../include/lustre/lustre_ioctl.h"
#include "../include/lustre_net.h"
#include "echo_internal.h"
@@ -64,14 +65,14 @@ struct echo_object {
struct echo_device *eo_dev;
struct list_head eo_obj_chain;
- struct lov_stripe_md *eo_lsm;
+ struct lov_oinfo *eo_oinfo;
atomic_t eo_npages;
int eo_deleted;
};
struct echo_object_conf {
struct cl_object_conf eoc_cl;
- struct lov_stripe_md **eoc_md;
+ struct lov_oinfo **eoc_oinfo;
};
struct echo_page {
@@ -152,9 +153,6 @@ struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c)
}
/** @} echo_helpers */
-
-static struct echo_object *cl_echo_object_find(struct echo_device *d,
- struct lov_stripe_md **lsm);
static int cl_echo_object_put(struct echo_object *eco);
static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
struct page **pages, int npages, int async);
@@ -413,10 +411,13 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
cconf = lu2cl_conf(conf);
econf = cl2echo_conf(cconf);
- LASSERT(econf->eoc_md);
- eco->eo_lsm = *econf->eoc_md;
- /* clear the lsm pointer so that it won't get freed. */
- *econf->eoc_md = NULL;
+ LASSERT(econf->eoc_oinfo);
+ /*
+ * Transfer the oinfo pointer to eco that it won't be
+ * freed.
+ */
+ eco->eo_oinfo = *econf->eoc_oinfo;
+ *econf->eoc_oinfo = NULL;
eco->eo_dev = ed;
atomic_set(&eco->eo_npages, 0);
@@ -429,52 +430,6 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
return 0;
}
-/* taken from osc_unpackmd() */
-static int echo_alloc_memmd(struct echo_device *ed,
- struct lov_stripe_md **lsmp)
-{
- int lsm_size;
-
- /* If export is lov/osc then use their obd method */
- if (ed->ed_next)
- return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp);
- /* OFD has no unpackmd method, do everything here */
- lsm_size = lov_stripe_md_size(1);
-
- LASSERT(!*lsmp);
- *lsmp = kzalloc(lsm_size, GFP_NOFS);
- if (!*lsmp)
- return -ENOMEM;
-
- (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo), GFP_NOFS);
- if (!(*lsmp)->lsm_oinfo[0]) {
- kfree(*lsmp);
- return -ENOMEM;
- }
-
- loi_init((*lsmp)->lsm_oinfo[0]);
- (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
- ostid_set_seq_echo(&(*lsmp)->lsm_oi);
-
- return lsm_size;
-}
-
-static int echo_free_memmd(struct echo_device *ed, struct lov_stripe_md **lsmp)
-{
- int lsm_size;
-
- /* If export is lov/osc then use their obd method */
- if (ed->ed_next)
- return obd_free_memmd(ed->ed_ec->ec_exp, lsmp);
- /* OFD has no unpackmd method, do everything here */
- lsm_size = lov_stripe_md_size(1);
-
- kfree((*lsmp)->lsm_oinfo[0]);
- kfree(*lsmp);
- *lsmp = NULL;
- return 0;
-}
-
static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
{
struct echo_object *eco = cl2echo_obj(lu2cl(obj));
@@ -489,8 +444,7 @@ static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
lu_object_fini(obj);
lu_object_header_fini(obj->lo_header);
- if (eco->eo_lsm)
- echo_free_memmd(eco->eo_dev, &eco->eo_lsm);
+ kfree(eco->eo_oinfo);
kmem_cache_free(echo_object_kmem, eco);
}
@@ -864,25 +818,21 @@ static struct lu_device_type echo_device_type = {
*/
/* Interfaces to echo client obd device */
-static struct echo_object *cl_echo_object_find(struct echo_device *d,
- struct lov_stripe_md **lsmp)
+static struct echo_object *
+cl_echo_object_find(struct echo_device *d, const struct ost_id *oi)
{
struct lu_env *env;
struct echo_thread_info *info;
struct echo_object_conf *conf;
- struct lov_stripe_md *lsm;
+ struct lov_oinfo *oinfo = NULL;
struct echo_object *eco;
struct cl_object *obj;
struct lu_fid *fid;
int refcheck;
int rc;
- LASSERT(lsmp);
- lsm = *lsmp;
- LASSERT(lsm);
- LASSERTF(ostid_id(&lsm->lsm_oi) != 0, DOSTID"\n", POSTID(&lsm->lsm_oi));
- LASSERTF(ostid_seq(&lsm->lsm_oi) == FID_SEQ_ECHO, DOSTID"\n",
- POSTID(&lsm->lsm_oi));
+ LASSERTF(ostid_id(oi), DOSTID "\n", POSTID(oi));
+ LASSERTF(ostid_seq(oi) == FID_SEQ_ECHO, DOSTID "\n", POSTID(oi));
/* Never return an object if the obd is to be freed. */
if (echo_dev2cl(d)->cd_lu_dev.ld_obd->obd_stopping)
@@ -895,16 +845,24 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
info = echo_env_info(env);
conf = &info->eti_conf;
if (d->ed_next) {
- struct lov_oinfo *oinfo = lsm->lsm_oinfo[0];
+ oinfo = kzalloc(sizeof(*oinfo), GFP_NOFS);
+ if (!oinfo) {
+ eco = ERR_PTR(-ENOMEM);
+ goto out;
+ }
- LASSERT(oinfo);
- oinfo->loi_oi = lsm->lsm_oi;
+ oinfo->loi_oi = *oi;
conf->eoc_cl.u.coc_oinfo = oinfo;
}
- conf->eoc_md = lsmp;
+
+ /*
+ * If echo_object_init() is successful then ownership of oinfo
+ * is transferred to the object.
+ */
+ conf->eoc_oinfo = &oinfo;
fid = &info->eti_fid;
- rc = ostid_to_fid(fid, &lsm->lsm_oi, 0);
+ rc = ostid_to_fid(fid, (struct ost_id *)oi, 0);
if (rc != 0) {
eco = ERR_PTR(rc);
goto out;
@@ -927,6 +885,7 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
}
out:
+ kfree(oinfo);
cl_env_put(env, &refcheck);
return eco;
}
@@ -1051,7 +1010,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
struct cl_io *io;
struct cl_page *clp;
struct lustre_handle lh = { 0 };
- int page_size = cl_page_size(obj);
+ size_t page_size = cl_page_size(obj);
int refcheck;
int rc;
int i;
@@ -1145,7 +1104,6 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
{
struct echo_object *eco;
struct echo_client_obd *ec = ed->ed_ec;
- struct lov_stripe_md *lsm = NULL;
int rc;
int created = 0;
@@ -1156,30 +1114,19 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
return -EINVAL;
}
- rc = echo_alloc_memmd(ed, &lsm);
- if (rc < 0) {
- CERROR("Cannot allocate md: rc = %d\n", rc);
- goto failed;
- }
-
- /* setup object ID here */
- lsm->lsm_oi = oa->o_oi;
+ if (!ostid_id(&oa->o_oi))
+ ostid_set_id(&oa->o_oi, ++last_object_id);
- if (ostid_id(&lsm->lsm_oi) == 0)
- ostid_set_id(&lsm->lsm_oi, ++last_object_id);
-
- rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
+ rc = obd_create(env, ec->ec_exp, oa, oti);
if (rc != 0) {
CERROR("Cannot create objects: rc = %d\n", rc);
goto failed;
}
created = 1;
- /* See what object ID we were given */
- oa->o_oi = lsm->lsm_oi;
oa->o_valid |= OBD_MD_FLID;
- eco = cl_echo_object_find(ed, &lsm);
+ eco = cl_echo_object_find(ed, &oa->o_oi);
if (IS_ERR(eco)) {
rc = PTR_ERR(eco);
goto failed;
@@ -1190,9 +1137,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
failed:
if (created && rc)
- obd_destroy(env, ec->ec_exp, oa, lsm, oti, NULL);
- if (lsm)
- echo_free_memmd(ed, &lsm);
+ obd_destroy(env, ec->ec_exp, oa, oti);
if (rc)
CERROR("create object failed with: rc = %d\n", rc);
return rc;
@@ -1201,32 +1146,21 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
static int echo_get_object(struct echo_object **ecop, struct echo_device *ed,
struct obdo *oa)
{
- struct lov_stripe_md *lsm = NULL;
struct echo_object *eco;
int rc;
- if ((oa->o_valid & OBD_MD_FLID) == 0 || ostid_id(&oa->o_oi) == 0) {
- /* disallow use of object id 0 */
- CERROR("No valid oid\n");
+ if (!(oa->o_valid & OBD_MD_FLID) || !(oa->o_valid & OBD_MD_FLGROUP) ||
+ !ostid_id(&oa->o_oi)) {
+ CERROR("invalid oid " DOSTID "\n", POSTID(&oa->o_oi));
return -EINVAL;
}
- rc = echo_alloc_memmd(ed, &lsm);
- if (rc < 0)
- return rc;
-
- lsm->lsm_oi = oa->o_oi;
- if (!(oa->o_valid & OBD_MD_FLGROUP))
- ostid_set_seq_echo(&lsm->lsm_oi);
-
rc = 0;
- eco = cl_echo_object_find(ed, &lsm);
+ eco = cl_echo_object_find(ed, &oa->o_oi);
if (!IS_ERR(eco))
*ecop = eco;
else
rc = PTR_ERR(eco);
- if (lsm)
- echo_free_memmd(ed, &lsm);
return rc;
}
@@ -1436,13 +1370,12 @@ static int echo_client_prep_commit(const struct lu_env *env,
npages = tot_pages;
for (i = 0; i < npages; i++, off += PAGE_SIZE) {
- rnb[i].offset = off;
- rnb[i].len = PAGE_SIZE;
- rnb[i].flags = brw_flags;
+ rnb[i].rnb_offset = off;
+ rnb[i].rnb_len = PAGE_SIZE;
+ rnb[i].rnb_flags = brw_flags;
}
ioo.ioo_bufcnt = npages;
- oti->oti_transno = 0;
lpages = npages;
ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages,
@@ -1452,14 +1385,14 @@ static int echo_client_prep_commit(const struct lu_env *env,
LASSERT(lpages == npages);
for (i = 0; i < lpages; i++) {
- struct page *page = lnb[i].page;
+ struct page *page = lnb[i].lnb_page;
/* read past eof? */
- if (!page && lnb[i].rc == 0)
+ if (!page && lnb[i].lnb_rc == 0)
continue;
if (async)
- lnb[i].flags |= OBD_BRW_ASYNC;
+ lnb[i].lnb_flags |= OBD_BRW_ASYNC;
if (ostid_id(&oa->o_oi) == ECHO_PERSISTENT_OBJID ||
(oa->o_valid & OBD_MD_FLFLAGS) == 0 ||
@@ -1469,13 +1402,13 @@ static int echo_client_prep_commit(const struct lu_env *env,
if (rw == OBD_BRW_WRITE)
echo_client_page_debug_setup(page, rw,
ostid_id(&oa->o_oi),
- rnb[i].offset,
- rnb[i].len);
+ rnb[i].rnb_offset,
+ rnb[i].rnb_len);
else
echo_client_page_debug_check(page,
ostid_id(&oa->o_oi),
- rnb[i].offset,
- rnb[i].len);
+ rnb[i].rnb_offset,
+ rnb[i].rnb_len);
}
ret = obd_commitrw(env, rw, exp, oa, 1, &ioo,
@@ -1613,8 +1546,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
- rc = obd_destroy(env, ec->ec_exp, oa, NULL,
- &dummy_oti, NULL);
+ rc = obd_destroy(env, ec->ec_exp, oa, &dummy_oti);
if (rc == 0)
eco->eo_deleted = 1;
echo_put_object(eco);
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
index f5034a253f6d..966414fd5424 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_internal.h
+++ b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
@@ -33,9 +33,9 @@
/* The persistent object (i.e. actually stores stuff!) */
#define ECHO_PERSISTENT_OBJID 1ULL
-#define ECHO_PERSISTENT_SIZE ((__u64)(1<<20))
+#define ECHO_PERSISTENT_SIZE ((__u64)(1 << 20))
/* block size to use for data verification */
-#define OBD_ECHO_BLOCK_SIZE (4<<10)
+#define OBD_ECHO_BLOCK_SIZE (4 << 10)
#endif
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 7e83d395b998..f0062d44ee03 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -119,6 +119,7 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_rpcs_in_flight = val;
+ client_adjust_max_dirty(cli);
spin_unlock(&cli->cl_loi_list_lock);
return count;
@@ -136,10 +137,10 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj,
int mult;
spin_lock(&cli->cl_loi_list_lock);
- val = cli->cl_dirty_max;
+ val = cli->cl_dirty_max_pages;
spin_unlock(&cli->cl_loi_list_lock);
- mult = 1 << 20;
+ mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_read_frac_helper(buf, PAGE_SIZE, val, mult);
}
@@ -166,7 +167,7 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
return -ERANGE;
spin_lock(&cli->cl_loi_list_lock);
- cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
+ cli->cl_dirty_max_pages = pages_number;
osc_wake_cache_waiters(cli);
spin_unlock(&cli->cl_loi_list_lock);
@@ -181,11 +182,11 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
int shift = 20 - PAGE_SHIFT;
seq_printf(m,
- "used_mb: %d\n"
- "busy_cnt: %d\n",
- (atomic_read(&cli->cl_lru_in_list) +
- atomic_read(&cli->cl_lru_busy)) >> shift,
- atomic_read(&cli->cl_lru_busy));
+ "used_mb: %ld\n"
+ "busy_cnt: %ld\n",
+ (atomic_long_read(&cli->cl_lru_in_list) +
+ atomic_long_read(&cli->cl_lru_busy)) >> shift,
+ atomic_long_read(&cli->cl_lru_busy));
return 0;
}
@@ -197,8 +198,10 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct client_obd *cli = &dev->u.cli;
- int pages_number, mult, rc;
+ long pages_number, rc;
char kernbuf[128];
+ int mult;
+ u64 val;
if (count >= sizeof(kernbuf))
return -EINVAL;
@@ -210,14 +213,18 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
mult = 1 << (20 - PAGE_SHIFT);
buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
kernbuf;
- rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ rc = lprocfs_write_frac_u64_helper(buffer, count, &val, mult);
if (rc)
return rc;
+ if (val > LONG_MAX)
+ return -ERANGE;
+ pages_number = (long)val;
+
if (pages_number < 0)
return -ERANGE;
- rc = atomic_read(&cli->cl_lru_in_list) - pages_number;
+ rc = atomic_long_read(&cli->cl_lru_in_list) - pages_number;
if (rc > 0) {
struct lu_env *env;
int refcheck;
@@ -244,7 +251,7 @@ static ssize_t cur_dirty_bytes_show(struct kobject *kobj,
int len;
spin_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%lu\n", cli->cl_dirty);
+ len = sprintf(buf, "%lu\n", cli->cl_dirty_pages << PAGE_SHIFT);
spin_unlock(&cli->cl_loi_list_lock);
return len;
@@ -583,6 +590,7 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
}
spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_pages_per_rpc = val;
+ client_adjust_max_dirty(cli);
spin_unlock(&cli->cl_loi_list_lock);
return count;
@@ -596,13 +604,14 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
struct obd_device *dev = container_of(kobj, struct obd_device,
obd_kobj);
struct client_obd *cli = &dev->u.cli;
- int pages, mb;
+ long pages;
+ int mb;
- pages = atomic_read(&cli->cl_unstable_count);
+ pages = atomic_long_read(&cli->cl_unstable_count);
mb = (pages * PAGE_SIZE) >> 20;
- return sprintf(buf, "unstable_pages: %8d\n"
- "unstable_mb: %8d\n", pages, mb);
+ return sprintf(buf, "unstable_pages: %20ld\n"
+ "unstable_mb: %10d\n", pages, mb);
}
LUSTRE_RO_ATTR(unstable_stats);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index d011135802d5..4bbe219add98 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -44,7 +44,7 @@ static int extent_debug; /* set it to be true for more debug */
static void osc_update_pending(struct osc_object *obj, int cmd, int delta);
static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
- int state);
+ enum osc_extent_state state);
static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
struct osc_async_page *oap, int sent, int rc);
static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
@@ -177,7 +177,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
{
struct osc_object *obj = ext->oe_obj;
struct osc_async_page *oap;
- int page_count;
+ size_t page_count;
int rc = 0;
if (!osc_object_is_locked(obj)) {
@@ -632,7 +632,7 @@ static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
*/
static struct osc_extent *osc_extent_find(const struct lu_env *env,
struct osc_object *obj, pgoff_t index,
- int *grants)
+ unsigned int *grants)
{
struct client_obd *cli = osc_cli(obj);
struct osc_lock *olck;
@@ -643,10 +643,10 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
struct osc_extent *found = NULL;
pgoff_t chunk;
pgoff_t max_end;
- int max_pages; /* max_pages_per_rpc */
- int chunksize;
+ unsigned int max_pages; /* max_pages_per_rpc */
+ unsigned int chunksize;
int ppc_bits; /* pages per chunk bits */
- int chunk_mask;
+ pgoff_t chunk_mask;
int rc;
cur = osc_extent_alloc(obj);
@@ -700,8 +700,8 @@ restart:
if (!ext)
ext = first_extent(obj);
while (ext) {
- loff_t ext_chk_start = ext->oe_start >> ppc_bits;
- loff_t ext_chk_end = ext->oe_end >> ppc_bits;
+ pgoff_t ext_chk_start = ext->oe_start >> ppc_bits;
+ pgoff_t ext_chk_end = ext->oe_end >> ppc_bits;
LASSERT(sanity_check_nolock(ext) == 0);
if (chunk > ext_chk_end + 1)
@@ -913,7 +913,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
return 0;
}
-static int extent_wait_cb(struct osc_extent *ext, int state)
+static int extent_wait_cb(struct osc_extent *ext, enum osc_extent_state state)
{
int ret;
@@ -928,7 +928,7 @@ static int extent_wait_cb(struct osc_extent *ext, int state)
* Wait for the extent's state to become @state.
*/
static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
- int state)
+ enum osc_extent_state state)
{
struct osc_object *obj = ext->oe_obj;
struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
@@ -958,8 +958,8 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), &lwi);
if (rc == -ETIMEDOUT) {
OSC_EXTENT_DUMP(D_ERROR, ext,
- "%s: wait ext to %d timedout, recovery in progress?\n",
- osc_export(obj)->exp_obd->obd_name, state);
+ "%s: wait ext to %u timedout, recovery in progress?\n",
+ osc_export(obj)->exp_obd->obd_name, state);
lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
@@ -1099,7 +1099,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
struct osc_async_page *oap;
struct osc_async_page *last = NULL;
struct osc_object *obj = ext->oe_obj;
- int page_count = 0;
+ unsigned int page_count = 0;
int rc;
/* we're going to grab page lock, so object lock must not be taken. */
@@ -1140,9 +1140,11 @@ static int osc_extent_make_ready(const struct lu_env *env,
* the size of file.
*/
if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
- last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
- LASSERT(last->oap_count > 0);
- LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
+ int last_oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
+
+ LASSERT(last_oap_count > 0);
+ LASSERT(last->oap_page_off + last_oap_count <= PAGE_SIZE);
+ last->oap_count = last_oap_count;
spin_lock(&last->oap_lock);
last->oap_async_flags |= ASYNC_COUNT_STABLE;
spin_unlock(&last->oap_lock);
@@ -1174,7 +1176,8 @@ static int osc_extent_make_ready(const struct lu_env *env,
* called to expand the extent for the same IO. To expand the extent, the
* page index must be in the same or next chunk of ext->oe_end.
*/
-static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
+static int osc_extent_expand(struct osc_extent *ext, pgoff_t index,
+ unsigned int *grants)
{
struct osc_object *obj = ext->oe_obj;
struct client_obd *cli = osc_cli(obj);
@@ -1183,7 +1186,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
pgoff_t chunk = index >> ppc_bits;
pgoff_t end_chunk;
pgoff_t end_index;
- int chunksize = 1 << cli->cl_chunkbits;
+ unsigned int chunksize = 1 << cli->cl_chunkbits;
int rc = 0;
LASSERT(ext->oe_max_end >= index && ext->oe_start <= index);
@@ -1361,7 +1364,7 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
if (rc == 0 && srvlock) {
struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
- int bytes = oap->oap_count;
+ size_t bytes = oap->oap_count;
if (crt == CRT_READ)
stats->os_lockless_reads += bytes;
@@ -1383,18 +1386,16 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d " \
- "unstable_pages: %d/%d dropped: %ld avail: %ld, " \
- "reserved: %ld, flight: %d } lru {in list: %d, " \
- "left: %d, waiters: %d }" fmt, \
+ CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \
+ "dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \
+ "lru {in list: %ld, left: %ld, waiters: %d }" fmt "\n", \
__tmp->cl_import->imp_obd->obd_name, \
- __tmp->cl_dirty, __tmp->cl_dirty_max, \
- atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
- atomic_read(&obd_unstable_pages), obd_max_dirty_pages, \
+ __tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \
+ atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
__tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
- atomic_read(&__tmp->cl_lru_in_list), \
- atomic_read(&__tmp->cl_lru_busy), \
+ atomic_long_read(&__tmp->cl_lru_in_list), \
+ atomic_long_read(&__tmp->cl_lru_busy), \
atomic_read(&__tmp->cl_lru_shrinkers), ##args); \
} while (0)
@@ -1404,8 +1405,8 @@ static void osc_consume_write_grant(struct client_obd *cli,
{
assert_spin_locked(&cli->cl_loi_list_lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
- atomic_inc(&obd_dirty_pages);
- cli->cl_dirty += PAGE_SIZE;
+ atomic_long_inc(&obd_dirty_pages);
+ cli->cl_dirty_pages++;
pga->flag |= OBD_BRW_FROM_GRANT;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
PAGE_SIZE, pga, pga->pg);
@@ -1424,12 +1425,12 @@ static void osc_release_write_grant(struct client_obd *cli,
}
pga->flag &= ~OBD_BRW_FROM_GRANT;
- atomic_dec(&obd_dirty_pages);
- cli->cl_dirty -= PAGE_SIZE;
+ atomic_long_dec(&obd_dirty_pages);
+ cli->cl_dirty_pages--;
if (pga->flag & OBD_BRW_NOCACHE) {
pga->flag &= ~OBD_BRW_NOCACHE;
- atomic_dec(&obd_dirty_transit_pages);
- cli->cl_dirty_transit -= PAGE_SIZE;
+ atomic_long_dec(&obd_dirty_transit_pages);
+ cli->cl_dirty_transit--;
}
}
@@ -1494,11 +1495,11 @@ static void osc_unreserve_grant(struct client_obd *cli,
static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
unsigned int lost_grant)
{
- int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
+ unsigned long grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
spin_lock(&cli->cl_loi_list_lock);
- atomic_sub(nr_pages, &obd_dirty_pages);
- cli->cl_dirty -= nr_pages << PAGE_SHIFT;
+ atomic_long_sub(nr_pages, &obd_dirty_pages);
+ cli->cl_dirty_pages -= nr_pages;
cli->cl_lost_grant += lost_grant;
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
/* borrow some grant from truncate to avoid the case that
@@ -1511,7 +1512,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
lost_grant, cli->cl_lost_grant,
- cli->cl_avail_grant, cli->cl_dirty);
+ cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_SHIFT);
}
/**
@@ -1535,19 +1536,18 @@ static int osc_enter_cache_try(struct client_obd *cli,
{
int rc;
- OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
rc = osc_reserve_grant(cli, bytes);
if (rc < 0)
return 0;
- if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
- atomic_read(&obd_unstable_pages) + 1 +
- atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
+ if (cli->cl_dirty_pages <= cli->cl_dirty_max_pages &&
+ atomic_long_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
- cli->cl_dirty_transit += PAGE_SIZE;
- atomic_inc(&obd_dirty_transit_pages);
+ cli->cl_dirty_transit++;
+ atomic_long_inc(&obd_dirty_transit_pages);
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
}
rc = 1;
@@ -1581,11 +1581,13 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc = oap->oap_obj;
struct lov_oinfo *loi = osc->oo_oinfo;
struct osc_cache_waiter ocw;
- struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
+ struct l_wait_info lwi;
int rc = -EDQUOT;
- OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(AT_OFF ? obd_timeout : at_max),
+ NULL, LWI_ON_SIGNAL_NOOP, NULL);
+
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
spin_lock(&cli->cl_loi_list_lock);
@@ -1593,14 +1595,16 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
* of queued writes and create a discontiguous rpc stream
*/
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
- cli->cl_dirty_max < PAGE_SIZE ||
- cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
+ !cli->cl_dirty_max_pages || cli->cl_ar.ar_force_sync ||
+ loi->loi_ar.ar_force_sync) {
+ OSC_DUMP_GRANT(D_CACHE, cli, "forced sync i/o\n");
rc = -EDQUOT;
goto out;
}
/* Hopefully normal case - cache space and write credits available */
if (osc_enter_cache_try(cli, oap, bytes, 0)) {
+ OSC_DUMP_GRANT(D_CACHE, cli, "granted from cache\n");
rc = 0;
goto out;
}
@@ -1615,7 +1619,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
init_waitqueue_head(&ocw.ocw_waitq);
ocw.ocw_oap = oap;
ocw.ocw_grant = bytes;
- while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
+ while (cli->cl_dirty_pages > 0 || cli->cl_w_in_flight > 0) {
list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
ocw.ocw_rc = 0;
spin_unlock(&cli->cl_loi_list_lock);
@@ -1629,32 +1633,49 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
spin_lock(&cli->cl_loi_list_lock);
- /* l_wait_event is interrupted by signal, or timed out */
if (rc < 0) {
- if (rc == -ETIMEDOUT) {
- OSC_DUMP_GRANT(D_ERROR, cli,
- "try to reserve %d.\n", bytes);
- osc_extent_tree_dump(D_ERROR, osc);
- rc = -EDQUOT;
- }
-
+ /* l_wait_event is interrupted by signal, or timed out */
list_del_init(&ocw.ocw_entry);
- goto out;
+ break;
}
-
LASSERT(list_empty(&ocw.ocw_entry));
rc = ocw.ocw_rc;
if (rc != -EDQUOT)
- goto out;
+ break;
if (osc_enter_cache_try(cli, oap, bytes, 0)) {
rc = 0;
- goto out;
+ break;
}
}
+
+ switch (rc) {
+ case 0:
+ OSC_DUMP_GRANT(D_CACHE, cli, "finally got grant space\n");
+ break;
+ case -ETIMEDOUT:
+ OSC_DUMP_GRANT(D_CACHE, cli,
+ "timeout, fall back to sync i/o\n");
+ osc_extent_tree_dump(D_CACHE, osc);
+ /* fall back to synchronous I/O */
+ rc = -EDQUOT;
+ break;
+ case -EINTR:
+ /* Ensures restartability - LU-3581 */
+ OSC_DUMP_GRANT(D_CACHE, cli, "interrupted\n");
+ rc = -ERESTARTSYS;
+ break;
+ case -EDQUOT:
+ OSC_DUMP_GRANT(D_CACHE, cli,
+ "no grant space, fall back to sync i/o\n");
+ break;
+ default:
+ CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived due to %d, fall back to sync i/o\n",
+ cli->cl_import->imp_obd->obd_name, &ocw, rc);
+ break;
+ }
out:
spin_unlock(&cli->cl_loi_list_lock);
- OSC_DUMP_GRANT(D_CACHE, cli, "returned %d.\n", rc);
return rc;
}
@@ -1670,19 +1691,17 @@ void osc_wake_cache_waiters(struct client_obd *cli)
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
- if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
- (atomic_read(&obd_unstable_pages) + 1 +
- atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
- CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
- cli->cl_dirty,
- cli->cl_dirty_max, obd_max_dirty_pages);
+ if ((cli->cl_dirty_pages > cli->cl_dirty_max_pages) ||
+ (atomic_long_read(&obd_dirty_pages) + 1 >
+ obd_max_dirty_pages)) {
+ CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %ld\n",
+ cli->cl_dirty_pages, cli->cl_dirty_max_pages,
+ obd_max_dirty_pages);
goto wakeup;
}
- ocw->ocw_rc = 0;
- if (!osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant, 0))
- ocw->ocw_rc = -EDQUOT;
-
+ if (osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant, 0))
+ ocw->ocw_rc = 0;
wakeup:
CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n",
ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc);
@@ -1843,97 +1862,6 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
ar->ar_force_sync = 0;
}
-/**
- * Performs "unstable" page accounting. This function balances the
- * increment operations performed in osc_inc_unstable_pages. It is
- * registered as the RPC request callback, and is executed when the
- * bulk RPC is committed on the server. Thus at this point, the pages
- * involved in the bulk transfer are no longer considered unstable.
- */
-void osc_dec_unstable_pages(struct ptlrpc_request *req)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- int page_count = desc->bd_iov_count;
- int i;
-
- /* No unstable page tracking */
- if (!cli->cl_cache)
- return;
-
- LASSERT(page_count >= 0);
-
- for (i = 0; i < page_count; i++)
- dec_node_page_state(desc->bd_iov[i].kiov_page,
- NR_UNSTABLE_NFS);
-
- atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
- LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
-
- atomic_sub(page_count, &cli->cl_unstable_count);
- LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
-
- atomic_sub(page_count, &obd_unstable_pages);
- LASSERT(atomic_read(&obd_unstable_pages) >= 0);
-
- spin_lock(&req->rq_lock);
- req->rq_committed = 1;
- req->rq_unstable = 0;
- spin_unlock(&req->rq_lock);
-
- wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
-}
-
-/* "unstable" page accounting. See: osc_dec_unstable_pages. */
-void osc_inc_unstable_pages(struct ptlrpc_request *req)
-{
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- long page_count = desc->bd_iov_count;
- int i;
-
- /* No unstable page tracking */
- if (!cli->cl_cache)
- return;
-
- LASSERT(page_count >= 0);
-
- for (i = 0; i < page_count; i++)
- inc_node_page_state(desc->bd_iov[i].kiov_page,
- NR_UNSTABLE_NFS);
-
- LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
- atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
-
- LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
- atomic_add(page_count, &cli->cl_unstable_count);
-
- LASSERT(atomic_read(&obd_unstable_pages) >= 0);
- atomic_add(page_count, &obd_unstable_pages);
-
- spin_lock(&req->rq_lock);
-
- /*
- * If the request has already been committed (i.e. brw_commit
- * called via rq_commit_cb), we need to undo the unstable page
- * increments we just performed because rq_commit_cb wont be
- * called again. Otherwise, just set the commit callback so the
- * unstable page accounting is properly updated when the request
- * is committed
- */
- if (req->rq_committed) {
- /* Drop lock before calling osc_dec_unstable_pages */
- spin_unlock(&req->rq_lock);
- osc_dec_unstable_pages(req);
- spin_lock(&req->rq_lock);
- } else {
- req->rq_unstable = 1;
- req->rq_commit_cb = osc_dec_unstable_pages;
- }
-
- spin_unlock(&req->rq_lock);
-}
-
/* this must be called holding the loi list lock to give coverage to exit_cache,
* async_flag maintenance, and oap_request
*/
@@ -1945,9 +1873,6 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
__u64 xid = 0;
if (oap->oap_request) {
- if (!rc)
- osc_inc_unstable_pages(oap->oap_request);
-
xid = ptlrpc_req_xid(oap->oap_request);
ptlrpc_req_finished(oap->oap_request);
oap->oap_request = NULL;
@@ -1979,7 +1904,7 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
*/
static int try_to_add_extent_for_io(struct client_obd *cli,
struct osc_extent *ext, struct list_head *rpclist,
- int *pc, unsigned int *max_pages)
+ unsigned int *pc, unsigned int *max_pages)
{
struct osc_extent *tmp;
struct osc_async_page *oap = list_first_entry(&ext->oe_pages,
@@ -2032,12 +1957,13 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
* 5. Traverse the extent tree from the 1st extent;
* 6. Above steps exit if there is no space in this RPC.
*/
-static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
+static unsigned int get_write_extents(struct osc_object *obj,
+ struct list_head *rpclist)
{
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
struct osc_extent *temp;
- int page_count = 0;
+ unsigned int page_count = 0;
unsigned int max_pages = cli->cl_max_pages_per_rpc;
LASSERT(osc_object_is_locked(obj));
@@ -2175,7 +2101,7 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_extent *ext;
struct osc_extent *next;
LIST_HEAD(rpclist);
- int page_count = 0;
+ unsigned int page_count = 0;
unsigned int max_pages = cli->cl_max_pages_per_rpc;
int rc = 0;
@@ -2390,7 +2316,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
struct client_obd *cli = oap->oap_cli;
struct osc_object *osc = oap->oap_obj;
pgoff_t index;
- int grants = 0;
+ unsigned int grants = 0, tmp;
int brw_flags = OBD_BRW_ASYNC;
int cmd = OBD_BRW_WRITE;
int need_release = 0;
@@ -2434,9 +2360,6 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
return rc;
}
- if (osc_over_unstable_soft_limit(cli))
- brw_flags |= OBD_BRW_SOFT_SYNC;
-
oap->oap_cmd = cmd;
oap->oap_page_off = ops->ops_from;
oap->oap_count = ops->ops_to - ops->ops_from;
@@ -2476,7 +2399,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
grants = 0;
need_release = 1;
} else if (ext->oe_end < index) {
- int tmp = grants;
+ tmp = grants;
/* try to expand this extent */
rc = osc_extent_expand(ext, index, &tmp);
if (rc < 0) {
@@ -2501,7 +2424,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
}
if (!ext) {
- int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
+ tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
/* try to find new extent to cover this page */
LASSERT(!oio->oi_active);
@@ -2645,7 +2568,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
goto out;
spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
+ oap->oap_async_flags |= ASYNC_READY | ASYNC_URGENT;
spin_unlock(&oap->oap_lock);
if (memory_pressure_get())
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index c8c3f1ca77be..9c8de15c309c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -64,7 +64,7 @@ struct osc_io {
/** true if this io is lockless. */
unsigned int oi_lockless;
/** how many LRU pages are reserved for this IO */
- int oi_lru_reserved;
+ unsigned long oi_lru_reserved;
/** active extents, we know how many bytes is going to be written,
* so having an active extent will prevent it from being fragmented
@@ -389,7 +389,7 @@ extern struct lu_device_type osc_device_type;
extern struct lu_context_key osc_key;
extern struct lu_context_key osc_session_key;
-#define OSC_FLAGS (ASYNC_URGENT|ASYNC_READY)
+#define OSC_FLAGS (ASYNC_URGENT | ASYNC_READY)
int osc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
@@ -608,7 +608,7 @@ struct osc_extent {
/** link list of osc_object's oo_{hp|urgent|locking}_exts. */
struct list_head oe_link;
/** state of this extent */
- unsigned int oe_state;
+ enum osc_extent_state oe_state;
/** flags for this extent. */
unsigned int oe_intree:1,
/** 0 is write, 1 is read */
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index 7a27f0961955..67fe0a254991 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -71,7 +71,6 @@ struct osc_async_page {
struct client_obd *oap_cli;
struct osc_object *oap_obj;
- struct ldlm_lock *oap_ldlm_lock;
spinlock_t oap_lock;
};
@@ -134,9 +133,9 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg);
int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
struct list_head *ext_list, int cmd);
-int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
- int target, bool force);
-int osc_lru_reclaim(struct client_obd *cli);
+long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ long target, bool force);
+long osc_lru_reclaim(struct client_obd *cli);
unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
@@ -198,7 +197,7 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk);
void osc_inc_unstable_pages(struct ptlrpc_request *req);
void osc_dec_unstable_pages(struct ptlrpc_request *req);
-int osc_over_unstable_soft_limit(struct client_obd *cli);
+bool osc_over_unstable_soft_limit(struct client_obd *cli);
struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
struct osc_object *obj, pgoff_t index,
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 6e3dcd38913f..8a559cbcdd0c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -109,11 +109,11 @@ static int osc_io_submit(const struct lu_env *env,
struct cl_page_list *qin = &queue->c2_qin;
struct cl_page_list *qout = &queue->c2_qout;
- int queued = 0;
+ unsigned int queued = 0;
int result = 0;
int cmd;
int brw_flags;
- int max_pages;
+ unsigned int max_pages;
LASSERT(qin->pl_nr > 0);
@@ -163,14 +163,19 @@ static int osc_io_submit(const struct lu_env *env,
continue;
}
- cl_page_list_move(qout, qin, page);
spin_lock(&oap->oap_lock);
- oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
+ oap->oap_async_flags = ASYNC_URGENT | ASYNC_READY;
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
spin_unlock(&oap->oap_lock);
osc_page_submit(env, opg, crt, brw_flags);
list_add_tail(&oap->oap_pending_item, &list);
+
+ if (page->cp_sync_io)
+ cl_page_list_move(qout, qin, page);
+ else /* async IO */
+ cl_page_list_del(env, qin, page);
+
if (++queued == max_pages) {
queued = 0;
result = osc_queue_sync_pages(env, osc, &list, cmd,
@@ -195,7 +200,7 @@ static int osc_io_submit(const struct lu_env *env,
* Expand stripe KMS if necessary.
*/
static void osc_page_touch_at(const struct lu_env *env,
- struct cl_object *obj, pgoff_t idx, unsigned to)
+ struct cl_object *obj, pgoff_t idx, size_t to)
{
struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
@@ -228,7 +233,7 @@ static void osc_page_touch_at(const struct lu_env *env,
attr->cat_size = kms;
valid |= CAT_SIZE;
}
- cl_object_attr_set(env, obj, attr, valid);
+ cl_object_attr_update(env, obj, attr, valid);
cl_object_attr_unlock(obj);
}
@@ -314,8 +319,8 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
struct osc_object *osc = cl2osc(ios->cis_obj);
struct client_obd *cli = osc_cli(osc);
unsigned long c;
- unsigned int npages;
- unsigned int max_pages;
+ unsigned long npages;
+ unsigned long max_pages;
if (cl_io_is_append(io))
return 0;
@@ -328,15 +333,15 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
if (npages > max_pages)
npages = max_pages;
- c = atomic_read(cli->cl_lru_left);
+ c = atomic_long_read(cli->cl_lru_left);
if (c < npages && osc_lru_reclaim(cli) > 0)
- c = atomic_read(cli->cl_lru_left);
+ c = atomic_long_read(cli->cl_lru_left);
while (c >= npages) {
- if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+ if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
oio->oi_lru_reserved = npages;
break;
}
- c = atomic_read(cli->cl_lru_left);
+ c = atomic_long_read(cli->cl_lru_left);
}
return 0;
@@ -350,7 +355,7 @@ static void osc_io_rw_iter_fini(const struct lu_env *env,
struct client_obd *cli = osc_cli(osc);
if (oio->oi_lru_reserved > 0) {
- atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
+ atomic_long_add(oio->oi_lru_reserved, cli->cl_lru_left);
oio->oi_lru_reserved = 0;
}
oio->oi_write_osclock = NULL;
@@ -364,7 +369,7 @@ static int osc_io_fault_start(const struct lu_env *env,
io = ios->cis_io;
fio = &io->u.ci_fault;
- CDEBUG(D_INFO, "%lu %d %d\n",
+ CDEBUG(D_INFO, "%lu %d %zu\n",
fio->ft_index, fio->ft_writable, fio->ft_nob);
/*
* If mapping is writeable, adjust kms to cover this page,
@@ -471,18 +476,21 @@ static int osc_io_setattr_start(const struct lu_env *env,
attr->cat_ctime = lvb->lvb_ctime;
cl_valid |= CAT_CTIME;
}
- result = cl_object_attr_set(env, obj, attr, cl_valid);
+ result = cl_object_attr_update(env, obj, attr,
+ cl_valid);
}
cl_object_attr_unlock(obj);
}
memset(oa, 0, sizeof(*oa));
if (result == 0) {
oa->o_oi = loi->loi_oi;
+ obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
+ oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
oa->o_mtime = attr->cat_mtime;
oa->o_atime = attr->cat_atime;
oa->o_ctime = attr->cat_ctime;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
- OBD_MD_FLCTIME | OBD_MD_FLMTIME;
+ oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
+ OBD_MD_FLCTIME | OBD_MD_FLMTIME;
if (ia_valid & ATTR_SIZE) {
oa->o_size = size;
oa->o_blocks = OBD_OBJECT_EOF;
@@ -559,7 +567,7 @@ static int osc_io_read_start(const struct lu_env *env,
if (!slice->cis_io->ci_noatime) {
cl_object_attr_lock(obj);
attr->cat_atime = ktime_get_real_seconds();
- rc = cl_object_attr_set(env, obj, attr, CAT_ATIME);
+ rc = cl_object_attr_update(env, obj, attr, CAT_ATIME);
cl_object_attr_unlock(obj);
}
return rc;
@@ -576,7 +584,7 @@ static int osc_io_write_start(const struct lu_env *env,
cl_object_attr_lock(obj);
attr->cat_ctime = ktime_get_real_seconds();
attr->cat_mtime = attr->cat_ctime;
- rc = cl_object_attr_set(env, obj, attr, CAT_MTIME | CAT_CTIME);
+ rc = cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
cl_object_attr_unlock(obj);
return rc;
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 717d3ffb6789..39a8a5851603 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -222,7 +222,7 @@ static void osc_lock_lvb_update(const struct lu_env *env,
ldlm_lock_allow_match_locked(dlmlock);
}
- cl_object_attr_set(env, obj, attr, valid);
+ cl_object_attr_update(env, obj, attr, valid);
cl_object_attr_unlock(obj);
}
@@ -467,7 +467,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
*/
attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
- cl_object_attr_set(env, obj, attr, CAT_KMS);
+ cl_object_attr_update(env, obj, attr, CAT_KMS);
cl_object_attr_unlock(obj);
unlock_res_and_lock(dlmlock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index d211d1905e83..aae3a2d4243f 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -159,8 +159,8 @@ static int osc_attr_get(const struct lu_env *env, struct cl_object *obj,
return 0;
}
-static int osc_attr_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid)
+static int osc_attr_update(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned int valid)
{
struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
struct ost_lvb *lvb = &oinfo->loi_lvb;
@@ -195,7 +195,6 @@ static int osc_object_glimpse(const struct lu_env *env,
static int osc_object_ast_clear(struct ldlm_lock *lock, void *data)
{
- LASSERT(lock->l_granted_mode == lock->l_req_mode);
if (lock->l_ast_data == data)
lock->l_ast_data = NULL;
return LDLM_ITER_CONTINUE;
@@ -262,7 +261,7 @@ static const struct cl_object_operations osc_ops = {
.coo_lock_init = osc_lock_init,
.coo_io_init = osc_io_init,
.coo_attr_get = osc_attr_get,
- .coo_attr_set = osc_attr_set,
+ .coo_attr_update = osc_attr_update,
.coo_glimpse = osc_object_glimpse,
.coo_prune = osc_object_prune
};
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 355f496a2093..2a7a70aa9e80 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -323,32 +323,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
return result;
}
-int osc_over_unstable_soft_limit(struct client_obd *cli)
-{
- long obd_upages, obd_dpages, osc_upages;
-
- /* Can't check cli->cl_unstable_count, therefore, no soft limit */
- if (!cli)
- return 0;
-
- obd_upages = atomic_read(&obd_unstable_pages);
- obd_dpages = atomic_read(&obd_dirty_pages);
-
- osc_upages = atomic_read(&cli->cl_unstable_count);
-
- /*
- * obd_max_dirty_pages is the max number of (dirty + unstable)
- * pages allowed at any given time. To simulate an unstable page
- * only limit, we subtract the current number of dirty pages
- * from this max. This difference is roughly the amount of pages
- * currently available for unstable pages. Thus, the soft limit
- * is half of that difference. Check osc_upages to ensure we don't
- * set SOFT_SYNC for OSCs without any outstanding unstable pages.
- */
- return osc_upages &&
- obd_upages >= (obd_max_dirty_pages - obd_dpages) / 2;
-}
-
/**
* Helper function called by osc_io_submit() for every page in an immediate
* transfer (i.e., transferred synchronously).
@@ -368,9 +342,6 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
oap->oap_count = opg->ops_to - opg->ops_from;
oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC;
- if (osc_over_unstable_soft_limit(oap->oap_cli))
- oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
-
if (capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
oap->oap_cmd |= OBD_BRW_NOQUOTA;
@@ -409,7 +380,7 @@ static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */
static int osc_cache_too_much(struct client_obd *cli)
{
struct cl_client_cache *cache = cli->cl_cache;
- int pages = atomic_read(&cli->cl_lru_in_list);
+ long pages = atomic_long_read(&cli->cl_lru_in_list);
unsigned long budget;
budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
@@ -417,7 +388,7 @@ static int osc_cache_too_much(struct client_obd *cli)
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain fairness among OSCs.
*/
- if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
if (pages >= budget)
return lru_shrink_max;
else if (pages >= budget / 2)
@@ -444,7 +415,7 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
{
LIST_HEAD(lru);
struct osc_async_page *oap;
- int npages = 0;
+ long npages = 0;
list_for_each_entry(oap, plist, oap_pending_item) {
struct osc_page *opg = oap2osc_page(oap);
@@ -460,8 +431,8 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
if (npages > 0) {
spin_lock(&cli->cl_lru_list_lock);
list_splice_tail(&lru, &cli->cl_lru_list);
- atomic_sub(npages, &cli->cl_lru_busy);
- atomic_add(npages, &cli->cl_lru_in_list);
+ atomic_long_sub(npages, &cli->cl_lru_busy);
+ atomic_long_add(npages, &cli->cl_lru_in_list);
spin_unlock(&cli->cl_lru_list_lock);
/* XXX: May set force to be true for better performance */
@@ -472,9 +443,9 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
{
- LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
+ LASSERT(atomic_long_read(&cli->cl_lru_in_list) > 0);
list_del_init(&opg->ops_lru);
- atomic_dec(&cli->cl_lru_in_list);
+ atomic_long_dec(&cli->cl_lru_in_list);
}
/**
@@ -488,12 +459,12 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
if (!list_empty(&opg->ops_lru)) {
__osc_lru_del(cli, opg);
} else {
- LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
- atomic_dec(&cli->cl_lru_busy);
+ LASSERT(atomic_long_read(&cli->cl_lru_busy) > 0);
+ atomic_long_dec(&cli->cl_lru_busy);
}
spin_unlock(&cli->cl_lru_list_lock);
- atomic_inc(cli->cl_lru_left);
+ atomic_long_inc(cli->cl_lru_left);
/* this is a great place to release more LRU pages if
* this osc occupies too many LRU pages and kernel is
* stealing one of them.
@@ -518,7 +489,7 @@ static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
spin_lock(&cli->cl_lru_list_lock);
__osc_lru_del(cli, opg);
spin_unlock(&cli->cl_lru_list_lock);
- atomic_inc(&cli->cl_lru_busy);
+ atomic_long_inc(&cli->cl_lru_busy);
}
}
@@ -540,10 +511,32 @@ static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
}
/**
+ * Check if a cl_page can be released, i.e, it's not being used.
+ *
+ * If unstable account is turned on, bulk transfer may hold one refcount
+ * for recovery so we need to check vmpage refcount as well; otherwise,
+ * even we can destroy cl_page but the corresponding vmpage can't be reused.
+ */
+static inline bool lru_page_busy(struct client_obd *cli, struct cl_page *page)
+{
+ if (cl_page_in_use_noref(page))
+ return true;
+
+ if (cli->cl_cache->ccc_unstable_check) {
+ struct page *vmpage = cl_page_vmpage(page);
+
+ /* vmpage have two known users: cl_page and VM page cache */
+ if (page_count(vmpage) - page_mapcount(vmpage) > 2)
+ return true;
+ }
+ return false;
+}
+
+/**
* Drop @target of pages from LRU at most.
*/
-int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
- int target, bool force)
+long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ long target, bool force)
{
struct cl_io *io;
struct cl_object *clobj = NULL;
@@ -551,12 +544,12 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
struct osc_page *opg;
struct osc_page *temp;
int maxscan = 0;
- int count = 0;
+ long count = 0;
int index = 0;
int rc = 0;
- LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
- if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
+ LASSERT(atomic_long_read(&cli->cl_lru_in_list) >= 0);
+ if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
return 0;
if (!force) {
@@ -575,7 +568,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
io = &osc_env_info(env)->oti_io;
spin_lock(&cli->cl_lru_list_lock);
- maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
+ maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) {
struct cl_page *page;
bool will_free = false;
@@ -584,7 +577,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
break;
page = opg->ops_cl.cpl_page;
- if (cl_page_in_use_noref(page)) {
+ if (lru_page_busy(cli, page)) {
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
continue;
}
@@ -620,7 +613,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
}
if (cl_page_own_try(env, io, page) == 0) {
- if (!cl_page_in_use_noref(page)) {
+ if (!lru_page_busy(cli, page)) {
/* remove it from lru list earlier to avoid
* lock contention
*/
@@ -663,24 +656,19 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
atomic_dec(&cli->cl_lru_shrinkers);
if (count > 0) {
- atomic_add(count, cli->cl_lru_left);
+ atomic_long_add(count, cli->cl_lru_left);
wake_up_all(&osc_lru_waitq);
}
return count > 0 ? count : rc;
}
-static inline int max_to_shrink(struct client_obd *cli)
-{
- return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
-}
-
-int osc_lru_reclaim(struct client_obd *cli)
+long osc_lru_reclaim(struct client_obd *cli)
{
struct cl_env_nest nest;
struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
int max_scans;
- int rc = 0;
+ long rc = 0;
LASSERT(cache);
@@ -693,15 +681,15 @@ int osc_lru_reclaim(struct client_obd *cli)
if (rc == -EBUSY)
rc = 0;
- CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
+ CDEBUG(D_CACHE, "%s: Free %ld pages from own LRU: %p.\n",
cli->cl_import->imp_obd->obd_name, rc, cli);
goto out;
}
- CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
+ CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld, busy: %ld.\n",
cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy));
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen.
@@ -717,10 +705,10 @@ int osc_lru_reclaim(struct client_obd *cli)
cli = list_entry(cache->ccc_lru.next, struct client_obd,
cl_lru_osc);
- CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
+ CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy));
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
if (osc_cache_too_much(cli) > 0) {
@@ -737,11 +725,18 @@ int osc_lru_reclaim(struct client_obd *cli)
out:
cl_env_nested_put(&nest, env);
- CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
+ CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
cli->cl_import->imp_obd->obd_name, cli, rc);
return rc;
}
+/**
+ * osc_lru_reserve() is called to reserve an LRU slot for a cl_page.
+ *
+ * Usually the LRU slots are reserved in osc_io_iter_rw_init().
+ * Only in the case that the LRU slots are in extreme shortage, it should
+ * have reserved enough slots for an IO.
+ */
static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
struct osc_page *opg)
{
@@ -758,8 +753,8 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
goto out;
}
- LASSERT(atomic_read(cli->cl_lru_left) >= 0);
- while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
+ LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
+ while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
/* run out of LRU spaces, try to drop some by itself */
rc = osc_lru_reclaim(cli);
if (rc < 0)
@@ -770,7 +765,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
cond_resched();
rc = l_wait_event(osc_lru_waitq,
- atomic_read(cli->cl_lru_left) > 0,
+ atomic_long_read(cli->cl_lru_left) > 0,
&lwi);
if (rc < 0)
@@ -779,7 +774,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
out:
if (rc >= 0) {
- atomic_inc(&cli->cl_lru_busy);
+ atomic_long_inc(&cli->cl_lru_busy);
opg->ops_in_lru = 1;
rc = 0;
}
@@ -787,4 +782,151 @@ out:
return rc;
}
+/**
+ * Atomic operations are expensive. We accumulate the accounting for the
+ * same page pgdat to get better performance.
+ * In practice this can work pretty good because the pages in the same RPC
+ * are likely from the same page zone.
+ */
+static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+ int factor)
+{
+ int page_count = desc->bd_iov_count;
+ pg_data_t *last = NULL;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < page_count; i++) {
+ pg_data_t *pgdat = page_pgdat(desc->bd_iov[i].bv_page);
+
+ if (likely(pgdat == last)) {
+ ++count;
+ continue;
+ }
+
+ if (count > 0) {
+ mod_node_page_state(pgdat, NR_UNSTABLE_NFS,
+ factor * count);
+ count = 0;
+ }
+ last = pgdat;
+ ++count;
+ }
+ if (count > 0)
+ mod_node_page_state(last, NR_UNSTABLE_NFS, factor * count);
+}
+
+static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+{
+ unstable_page_accounting(desc, 1);
+}
+
+static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+{
+ unstable_page_accounting(desc, -1);
+}
+
+/**
+ * Performs "unstable" page accounting. This function balances the
+ * increment operations performed in osc_inc_unstable_pages. It is
+ * registered as the RPC request callback, and is executed when the
+ * bulk RPC is committed on the server. Thus at this point, the pages
+ * involved in the bulk transfer are no longer considered unstable.
+ *
+ * If this function is called, the request should have been committed
+ * or req:rq_unstable must have been set; it implies that the unstable
+ * statistic have been added.
+ */
+void osc_dec_unstable_pages(struct ptlrpc_request *req)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ int page_count = desc->bd_iov_count;
+ long unstable_count;
+
+ LASSERT(page_count >= 0);
+ dec_unstable_page_accounting(desc);
+
+ unstable_count = atomic_long_sub_return(page_count,
+ &cli->cl_unstable_count);
+ LASSERT(unstable_count >= 0);
+
+ unstable_count = atomic_long_sub_return(page_count,
+ &cli->cl_cache->ccc_unstable_nr);
+ LASSERT(unstable_count >= 0);
+ if (!unstable_count)
+ wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
+
+ if (osc_cache_too_much(cli))
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+}
+
+/**
+ * "unstable" page accounting. See: osc_dec_unstable_pages.
+ */
+void osc_inc_unstable_pages(struct ptlrpc_request *req)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ long page_count = desc->bd_iov_count;
+
+ /* No unstable page tracking */
+ if (!cli->cl_cache || !cli->cl_cache->ccc_unstable_check)
+ return;
+
+ add_unstable_page_accounting(desc);
+ atomic_long_add(page_count, &cli->cl_unstable_count);
+ atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+
+ /*
+ * If the request has already been committed (i.e. brw_commit
+ * called via rq_commit_cb), we need to undo the unstable page
+ * increments we just performed because rq_commit_cb wont be
+ * called again.
+ */
+ spin_lock(&req->rq_lock);
+ if (unlikely(req->rq_committed)) {
+ spin_unlock(&req->rq_lock);
+
+ osc_dec_unstable_pages(req);
+ } else {
+ req->rq_unstable = 1;
+ spin_unlock(&req->rq_lock);
+ }
+}
+
+/**
+ * Check if it piggybacks SOFT_SYNC flag to OST from this OSC.
+ * This function will be called by every BRW RPC so it's critical
+ * to make this function fast.
+ */
+bool osc_over_unstable_soft_limit(struct client_obd *cli)
+{
+ long unstable_nr, osc_unstable_count;
+
+ /* Can't check cli->cl_unstable_count, therefore, no soft limit */
+ if (!cli->cl_cache || !cli->cl_cache->ccc_unstable_check)
+ return false;
+
+ osc_unstable_count = atomic_long_read(&cli->cl_unstable_count);
+ unstable_nr = atomic_long_read(&cli->cl_cache->ccc_unstable_nr);
+
+ CDEBUG(D_CACHE,
+ "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
+ cli->cl_import->imp_obd->obd_name, cli,
+ unstable_nr, osc_unstable_count);
+
+ /*
+ * If the LRU slots are in shortage - 25% remaining AND this OSC
+ * has one full RPC window of unstable pages, it's a good chance
+ * to piggyback a SOFT_SYNC flag.
+ * Please notice that the OST won't take immediate response for the
+ * SOFT_SYNC request so active OSCs will have more chance to carry
+ * the flag, this is reasonable.
+ */
+ return unstable_nr > cli->cl_cache->ccc_lru_max >> 2 &&
+ osc_unstable_count > cli->cl_max_pages_per_rpc *
+ cli->cl_max_rpcs_in_flight;
+}
+
/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 536b868ff776..749781f022e2 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -41,6 +41,7 @@
#include "../include/lustre_ha.h"
#include "../include/lprocfs_status.h"
+#include "../include/lustre/lustre_ioctl.h"
#include "../include/lustre_debug.h"
#include "../include/lustre_param.h"
#include "../include/lustre_fid.h"
@@ -102,36 +103,6 @@ static void osc_release_ppga(struct brw_page **ppga, u32 count);
static int brw_interpret(const struct lu_env *env,
struct ptlrpc_request *req, void *data, int rc);
-/* Pack OSC object metadata for disk storage (LE byte order). */
-static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
- struct lov_stripe_md *lsm)
-{
- int lmm_size;
-
- lmm_size = sizeof(**lmmp);
- if (!lmmp)
- return lmm_size;
-
- if (*lmmp && !lsm) {
- kfree(*lmmp);
- *lmmp = NULL;
- return 0;
- } else if (unlikely(lsm && ostid_id(&lsm->lsm_oi) == 0)) {
- return -EBADF;
- }
-
- if (!*lmmp) {
- *lmmp = kzalloc(lmm_size, GFP_NOFS);
- if (!*lmmp)
- return -ENOMEM;
- }
-
- if (lsm)
- ostid_cpu_to_le(&lsm->lsm_oi, &(*lmmp)->lmm_oi);
-
- return lmm_size;
-}
-
/* Unpack OSC object metadata from disk storage (LE byte order). */
static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
struct lov_mds_md *lmm, int lmm_bytes)
@@ -189,7 +160,7 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
(imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
(*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
else
- (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+ (*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
return lsm_size;
}
@@ -427,24 +398,16 @@ static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
oinfo->oi_cb_up, oinfo, rqset);
}
-static int osc_real_create(struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md **ea,
- struct obd_trans_info *oti)
+static int osc_create(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct obd_trans_info *oti)
{
struct ptlrpc_request *req;
struct ost_body *body;
- struct lov_stripe_md *lsm;
int rc;
LASSERT(oa);
- LASSERT(ea);
-
- lsm = *ea;
- if (!lsm) {
- rc = obd_alloc_memmd(exp, &lsm);
- if (rc < 0)
- return rc;
- }
+ LASSERT(oa->o_valid & OBD_MD_FLGROUP);
+ LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
if (!req) {
@@ -490,21 +453,10 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa,
oa->o_blksize = cli_brw_size(exp->exp_obd);
oa->o_valid |= OBD_MD_FLBLKSZ;
- /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
- * have valid lsm_oinfo data structs, so don't go touching that.
- * This needs to be fixed in a big way.
- */
- lsm->lsm_oi = oa->o_oi;
- *ea = lsm;
-
- if (oti) {
- oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
-
- if (oa->o_valid & OBD_MD_FLCOOKIE) {
- if (!oti->oti_logcookies)
- oti_alloc_cookies(oti, 1);
- *oti->oti_logcookies = oa->o_lcookie;
- }
+ if (oti && oa->o_valid & OBD_MD_FLCOOKIE) {
+ if (!oti->oti_logcookies)
+ oti->oti_logcookies = &oti->oti_onecookie;
+ *oti->oti_logcookies = oa->o_lcookie;
}
CDEBUG(D_HA, "transno: %lld\n",
@@ -512,8 +464,6 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa,
out_req:
ptlrpc_req_finished(req);
out:
- if (rc && !*ea)
- obd_free_memmd(exp, &lsm);
return rc;
}
@@ -649,7 +599,7 @@ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
ostid_build_res_name(&oa->o_oi, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if (!res)
+ if (IS_ERR(res))
return 0;
LDLM_RESOURCE_ADDREF(res);
@@ -689,30 +639,6 @@ static int osc_can_send_destroy(struct client_obd *cli)
return 0;
}
-static int osc_create(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct lov_stripe_md **ea,
- struct obd_trans_info *oti)
-{
- int rc = 0;
-
- LASSERT(oa);
- LASSERT(ea);
- LASSERT(oa->o_valid & OBD_MD_FLGROUP);
-
- if ((oa->o_valid & OBD_MD_FLFLAGS) &&
- oa->o_flags == OBD_FL_RECREATE_OBJS) {
- return osc_real_create(exp, oa, ea, oti);
- }
-
- if (!fid_seq_is_mdt(ostid_seq(&oa->o_oi)))
- return osc_real_create(exp, oa, ea, oti);
-
- /* we should not get here anymore */
- LBUG();
-
- return rc;
-}
-
/* Destroy requests can be async always on the client, and we don't even really
* care about the return code since the client cannot do anything at all about
* a destroy failure.
@@ -725,8 +651,7 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
* cookies to the MDS after committing destroy transactions.
*/
static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
- struct obdo *oa, struct lov_stripe_md *ea,
- struct obd_trans_info *oti, struct obd_export *md_export)
+ struct obdo *oa, struct obd_trans_info *oti)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct ptlrpc_request *req;
@@ -794,42 +719,44 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
long writing_bytes)
{
- u32 bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
+ u32 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
LASSERT(!(oa->o_valid & bits));
oa->o_valid |= bits;
spin_lock(&cli->cl_loi_list_lock);
- oa->o_dirty = cli->cl_dirty;
- if (unlikely(cli->cl_dirty - cli->cl_dirty_transit >
- cli->cl_dirty_max)) {
+ oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
+ if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
+ cli->cl_dirty_max_pages)) {
CERROR("dirty %lu - %lu > dirty_max %lu\n",
- cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
+ cli->cl_dirty_pages, cli->cl_dirty_transit,
+ cli->cl_dirty_max_pages);
oa->o_undirty = 0;
- } else if (unlikely(atomic_read(&obd_unstable_pages) +
- atomic_read(&obd_dirty_pages) -
- atomic_read(&obd_dirty_transit_pages) >
- (long)(obd_max_dirty_pages + 1))) {
+ } else if (unlikely(atomic_long_read(&obd_dirty_pages) -
+ atomic_long_read(&obd_dirty_transit_pages) >
+ (obd_max_dirty_pages + 1))) {
/* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
* this CERROR() unless we add in a small fudge factor (+1).
*/
- CERROR("%s: dirty %d + %d - %d > system dirty_max %d\n",
+ CERROR("%s: dirty %ld + %ld > system dirty_max %lu\n",
cli->cl_import->imp_obd->obd_name,
- atomic_read(&obd_unstable_pages),
- atomic_read(&obd_dirty_pages),
- atomic_read(&obd_dirty_transit_pages),
+ atomic_long_read(&obd_dirty_pages),
+ atomic_long_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
oa->o_undirty = 0;
- } else if (unlikely(cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff)) {
+ } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
+ 0x7fffffff)) {
CERROR("dirty %lu - dirty_max %lu too big???\n",
- cli->cl_dirty, cli->cl_dirty_max);
+ cli->cl_dirty_pages, cli->cl_dirty_max_pages);
oa->o_undirty = 0;
} else {
- long max_in_flight = (cli->cl_max_pages_per_rpc <<
- PAGE_SHIFT)*
- (cli->cl_max_rpcs_in_flight + 1);
- oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
+ unsigned long max_in_flight;
+
+ max_in_flight = (cli->cl_max_pages_per_rpc << PAGE_SHIFT) *
+ (cli->cl_max_rpcs_in_flight + 1);
+ oa->o_undirty = max(cli->cl_dirty_max_pages << PAGE_SHIFT,
+ max_in_flight);
}
oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
oa->o_dropped = cli->cl_lost_grant;
@@ -1029,22 +956,24 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
{
/*
* ocd_grant is the total grant amount we're expect to hold: if we've
- * been evicted, it's the new avail_grant amount, cl_dirty will drop
- * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
+ * been evicted, it's the new avail_grant amount, cl_dirty_pages will
+ * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
+ * dirty.
*
* race is tolerable here: if we're evicted, but imp_state already
- * left EVICTED state, then cl_dirty must be 0 already.
+ * left EVICTED state, then cl_dirty_pages must be 0 already.
*/
spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
cli->cl_avail_grant = ocd->ocd_grant;
else
- cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
+ cli->cl_avail_grant = ocd->ocd_grant -
+ (cli->cl_dirty_pages << PAGE_SHIFT);
if (cli->cl_avail_grant < 0) {
CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
- ocd->ocd_grant, cli->cl_dirty);
+ ocd->ocd_grant, cli->cl_dirty_pages << PAGE_SHIFT);
/* workaround for servers which do not have the patch from
* LU-2679
*/
@@ -1181,7 +1110,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
}
while (nob > 0 && pg_count > 0) {
- int count = pga[i]->count > nob ? nob : pga[i]->count;
+ unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
/* corrupt the data before we compute the checksum, to
* simulate an OST->client data error
@@ -1191,7 +1120,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
unsigned char *ptr = kmap(pga[i]->pg);
int off = pga[i]->off & ~PAGE_MASK;
- memcpy(ptr + off, "bad1", min(4, nob));
+ memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
kunmap(pga[i]->pg);
}
cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
@@ -1335,11 +1264,11 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
if (i > 0 && can_merge_pages(pg_prev, pg)) {
niobuf--;
- niobuf->len += pg->count;
+ niobuf->rnb_len += pg->count;
} else {
- niobuf->offset = pg->off;
- niobuf->len = pg->count;
- niobuf->flags = pg->flag;
+ niobuf->rnb_offset = pg->off;
+ niobuf->rnb_len = pg->count;
+ niobuf->rnb_flags = pg->flag;
}
pg_prev = pg;
}
@@ -1418,6 +1347,11 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
INIT_LIST_HEAD(&aa->aa_oaps);
*reqp = req;
+ niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
+ CDEBUG(D_RPCTRACE, "brw rpc %p - object " DOSTID " offset %lld<>%lld\n",
+ req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
+ niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
+
return 0;
out:
@@ -1463,7 +1397,8 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
POSTID(&oa->o_oi), pga[0]->off,
- pga[page_count-1]->off + pga[page_count-1]->count - 1);
+ pga[page_count - 1]->off +
+ pga[page_count - 1]->count - 1);
CERROR("original client csum %x (type %x), server csum %x (type %x), client csum now %x\n",
client_cksum, client_cksum_type,
server_cksum, cksum_type, new_cksum);
@@ -1565,7 +1500,8 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
char *router = "";
enum cksum_type cksum_type;
- cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ?
+ cksum_type = cksum_type_unpack(body->oa.o_valid &
+ OBD_MD_FLFLAGS ?
body->oa.o_flags : 0);
client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
aa->aa_ppga, OST_READ,
@@ -1794,7 +1730,8 @@ static int brw_interpret(const struct lu_env *env,
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
- loff_t last_off = last->oap_count + last->oap_obj_off;
+ loff_t last_off = last->oap_count + last->oap_obj_off +
+ last->oap_page_off;
/* Change file size if this is an out of quota or
* direct IO write and it extends the file size
@@ -1812,11 +1749,14 @@ static int brw_interpret(const struct lu_env *env,
}
if (valid != 0)
- cl_object_attr_set(env, obj, attr, valid);
+ cl_object_attr_update(env, obj, attr, valid);
cl_object_attr_unlock(obj);
}
kmem_cache_free(obdo_cachep, aa->aa_oa);
+ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
+ osc_inc_unstable_pages(req);
+
list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
list_del_init(&ext->oe_link);
osc_extent_finish(env, ext, 1, rc);
@@ -1847,21 +1787,21 @@ static int brw_interpret(const struct lu_env *env,
static void brw_commit(struct ptlrpc_request *req)
{
- spin_lock(&req->rq_lock);
/*
* If osc_inc_unstable_pages (via osc_extent_finish) races with
* this called via the rq_commit_cb, I need to ensure
* osc_dec_unstable_pages is still called. Otherwise unstable
* pages may be leaked.
*/
- if (req->rq_unstable) {
+ spin_lock(&req->rq_lock);
+ if (unlikely(req->rq_unstable)) {
+ req->rq_unstable = 0;
spin_unlock(&req->rq_lock);
osc_dec_unstable_pages(req);
- spin_lock(&req->rq_lock);
} else {
req->rq_committed = 1;
+ spin_unlock(&req->rq_lock);
}
- spin_unlock(&req->rq_lock);
}
/**
@@ -1881,13 +1821,13 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_async_page *tmp;
struct cl_req *clerq = NULL;
enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
- struct ldlm_lock *lock = NULL;
struct cl_req_attr *crattr = NULL;
u64 starting_offset = OBD_OBJECT_EOF;
u64 ending_offset = 0;
int mpflag = 0;
int mem_tight = 0;
int page_count = 0;
+ bool soft_sync = false;
int i;
int rc;
struct ost_body *body;
@@ -1915,6 +1855,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
}
}
+ soft_sync = osc_over_unstable_soft_limit(cli);
if (mem_tight)
mpflag = cfs_memory_pressure_get_and_set();
@@ -1947,10 +1888,11 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
rc = PTR_ERR(clerq);
goto out;
}
- lock = oap->oap_ldlm_lock;
}
if (mem_tight)
oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
+ if (soft_sync)
+ oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
pga[i] = &oap->oap_brw_page;
pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
@@ -1964,10 +1906,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
LASSERT(clerq);
crattr->cra_oa = oa;
cl_req_attr_set(env, clerq, crattr, ~0ULL);
- if (lock) {
- oa->o_handle = lock->l_remote_handle;
- oa->o_valid |= OBD_MD_FLHANDLE;
- }
rc = cl_req_prep(env, clerq);
if (rc != 0) {
@@ -1998,7 +1936,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
crattr->cra_oa = &body->oa;
cl_req_attr_set(env, clerq, crattr,
- OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME);
lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
@@ -2044,7 +1982,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
}
spin_unlock(&cli->cl_loi_list_lock);
- DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
+ DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight",
page_count, aa, cli->cl_r_in_flight,
cli->cl_w_in_flight);
@@ -2116,27 +2054,6 @@ static int osc_set_data_with_check(struct lustre_handle *lockh,
return set;
}
-/* find any ldlm lock of the inode in osc
- * return 0 not find
- * 1 find one
- * < 0 error
- */
-static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
- ldlm_iterator_t replace, void *data)
-{
- struct ldlm_res_id res_id;
- struct obd_device *obd = class_exp2obd(exp);
- int rc = 0;
-
- ostid_build_res_name(&lsm->lsm_oi, &res_id);
- rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
- if (rc == LDLM_ITER_STOP)
- return 1;
- if (rc == LDLM_ITER_CONTINUE)
- return 0;
- return rc;
-}
-
static int osc_enqueue_fini(struct ptlrpc_request *req,
osc_enqueue_upcall_f upcall, void *cookie,
struct lustre_handle *lockh, enum ldlm_mode mode,
@@ -2586,71 +2503,6 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
return rc;
}
-/* Retrieve object striping information.
- *
- * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
- * the maximum number of OST indices which will fit in the user buffer.
- * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
- */
-static int osc_getstripe(struct lov_stripe_md *lsm,
- struct lov_user_md __user *lump)
-{
- /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
- struct lov_user_md_v3 lum, *lumk;
- struct lov_user_ost_data_v1 *lmm_objects;
- int rc = 0, lum_size;
-
- if (!lsm)
- return -ENODATA;
-
- /* we only need the header part from user space to get lmm_magic and
- * lmm_stripe_count, (the header part is common to v1 and v3)
- */
- lum_size = sizeof(struct lov_user_md_v1);
- if (copy_from_user(&lum, lump, lum_size))
- return -EFAULT;
-
- if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
- (lum.lmm_magic != LOV_USER_MAGIC_V3))
- return -EINVAL;
-
- /* lov_user_md_vX and lov_mds_md_vX must have the same size */
- LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
- LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
- LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
-
- /* we can use lov_mds_md_size() to compute lum_size
- * because lov_user_md_vX and lov_mds_md_vX have the same size
- */
- if (lum.lmm_stripe_count > 0) {
- lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
- lumk = kzalloc(lum_size, GFP_NOFS);
- if (!lumk)
- return -ENOMEM;
-
- if (lum.lmm_magic == LOV_USER_MAGIC_V1)
- lmm_objects =
- &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
- else
- lmm_objects = &(lumk->lmm_objects[0]);
- lmm_objects->l_ost_oi = lsm->lsm_oi;
- } else {
- lum_size = lov_mds_md_size(0, lum.lmm_magic);
- lumk = &lum;
- }
-
- lumk->lmm_oi = lsm->lsm_oi;
- lumk->lmm_stripe_count = 1;
-
- if (copy_to_user(lump, lumk, lum_size))
- rc = -EFAULT;
-
- if (lumk != &lum)
- kfree(lumk);
-
- return rc;
-}
-
static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void __user *uarg)
{
@@ -2664,57 +2516,6 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
return -EINVAL;
}
switch (cmd) {
- case OBD_IOC_LOV_GET_CONFIG: {
- char *buf;
- struct lov_desc *desc;
- struct obd_uuid uuid;
-
- buf = NULL;
- len = 0;
- if (obd_ioctl_getdata(&buf, &len, uarg)) {
- err = -EINVAL;
- goto out;
- }
-
- data = (struct obd_ioctl_data *)buf;
-
- if (sizeof(*desc) > data->ioc_inllen1) {
- obd_ioctl_freedata(buf, len);
- err = -EINVAL;
- goto out;
- }
-
- if (data->ioc_inllen2 < sizeof(uuid)) {
- obd_ioctl_freedata(buf, len);
- err = -EINVAL;
- goto out;
- }
-
- desc = (struct lov_desc *)data->ioc_inlbuf1;
- desc->ld_tgt_count = 1;
- desc->ld_active_tgt_count = 1;
- desc->ld_default_stripe_count = 1;
- desc->ld_default_stripe_size = 0;
- desc->ld_default_stripe_offset = 0;
- desc->ld_pattern = 0;
- memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
-
- memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
-
- err = copy_to_user(uarg, buf, len);
- if (err)
- err = -EFAULT;
- obd_ioctl_freedata(buf, len);
- goto out;
- }
- case LL_IOC_LOV_SETSTRIPE:
- err = obd_alloc_memmd(exp, karg);
- if (err > 0)
- err = 0;
- goto out;
- case LL_IOC_LOV_GETSTRIPE:
- err = osc_getstripe(karg, uarg);
- goto out;
case OBD_IOC_CLIENT_RECOVER:
err = ptlrpc_recover_import(obd->u.cli.cl_import,
data->ioc_inlbuf1, 0);
@@ -2749,51 +2550,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
if (!vallen || !val)
return -EFAULT;
- if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
- __u32 *stripe = val;
- *vallen = sizeof(*stripe);
- *stripe = 0;
- return 0;
- } else if (KEY_IS(KEY_LAST_ID)) {
- struct ptlrpc_request *req;
- u64 *reply;
- char *tmp;
- int rc;
-
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_OST_GET_INFO_LAST_ID);
- if (!req)
- return -ENOMEM;
-
- req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
- RCL_CLIENT, keylen);
- rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
- memcpy(tmp, key, keylen);
-
- req->rq_no_delay = 1;
- req->rq_no_resend = 1;
- ptlrpc_request_set_replen(req);
- rc = ptlrpc_queue_wait(req);
- if (rc)
- goto out;
-
- reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
- if (!reply) {
- rc = -EPROTO;
- goto out;
- }
-
- *((u64 *)val) = *reply;
-out:
- ptlrpc_req_finished(req);
- return rc;
- } else if (KEY_IS(KEY_FIEMAP)) {
+ if (KEY_IS(KEY_FIEMAP)) {
struct ll_fiemap_info_key *fm_key = key;
struct ldlm_res_id res_id;
ldlm_policy_data_t policy;
@@ -2931,11 +2688,11 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
struct client_obd *cli = &obd->u.cli;
- int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
- int target = *(int *)val;
+ long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
+ long target = *(long *)val;
nr = osc_lru_shrink(env, cli, min(nr, target), true);
- *(int *)val -= nr;
+ *(long *)val -= nr;
return 0;
}
@@ -3014,8 +2771,9 @@ static int osc_reconnect(const struct lu_env *env,
long lost_grant;
spin_lock(&cli->cl_loi_list_lock);
- data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
- 2 * cli_brw_size(obd);
+ data->ocd_grant = (cli->cl_avail_grant +
+ (cli->cl_dirty_pages << PAGE_SHIFT)) ?:
+ 2 * cli_brw_size(obd);
lost_grant = cli->cl_lost_grant;
cli->cl_lost_grant = 0;
spin_unlock(&cli->cl_loi_list_lock);
@@ -3346,7 +3104,6 @@ static struct obd_ops osc_obd_ops = {
.disconnect = osc_disconnect,
.statfs = osc_statfs,
.statfs_async = osc_statfs_async,
- .packmd = osc_packmd,
.unpackmd = osc_unpackmd,
.create = osc_create,
.destroy = osc_destroy,
@@ -3354,7 +3111,6 @@ static struct obd_ops osc_obd_ops = {
.getattr_async = osc_getattr_async,
.setattr = osc_setattr,
.setattr_async = osc_setattr_async,
- .find_cbdata = osc_find_cbdata,
.iocontrol = osc_iocontrol,
.get_info = osc_get_info,
.set_info_async = osc_set_info_async,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index d4463d7c81d2..8c51d51a678b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -45,6 +45,7 @@
static int ptlrpc_send_new_req(struct ptlrpc_request *req);
static int ptlrpcd_check_work(struct ptlrpc_request *req);
+static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
/**
* Initialize passed in client structure \a cl.
@@ -89,7 +90,6 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
return c;
}
-EXPORT_SYMBOL(ptlrpc_uuid_to_connection);
/**
* Allocate and initialize new bulk descriptor on the sender.
@@ -202,7 +202,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
if (unpin) {
for (i = 0; i < desc->bd_iov_count; i++)
- put_page(desc->bd_iov[i].kiov_page);
+ put_page(desc->bd_iov[i].bv_page);
}
kfree(desc);
@@ -283,8 +283,8 @@ int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
}
/* Adjust expected network latency */
-static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
- unsigned int service_time)
+void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
+ unsigned int service_time)
{
unsigned int nl, oldnl;
struct imp_at *at;
@@ -364,31 +364,37 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
}
rc = unpack_reply(early_req);
- if (rc == 0) {
- /* Expecting to increase the service time estimate here */
- ptlrpc_at_adj_service(req,
- lustre_msg_get_timeout(early_req->rq_repmsg));
- ptlrpc_at_adj_net_latency(req,
- lustre_msg_get_service_time(early_req->rq_repmsg));
- }
-
- sptlrpc_cli_finish_early_reply(early_req);
-
- if (rc != 0) {
+ if (rc) {
+ sptlrpc_cli_finish_early_reply(early_req);
spin_lock(&req->rq_lock);
return rc;
}
- /* Adjust the local timeout for this req */
- ptlrpc_at_set_req_timeout(req);
+ /*
+ * Use new timeout value just to adjust the local value for this
+ * request, don't include it into at_history. It is unclear yet why
+ * service time increased and should it be counted or skipped, e.g.
+ * that can be recovery case or some error or server, the real reply
+ * will add all new data if it is worth to add.
+ */
+ req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg);
+ lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
+
+ /* Network latency can be adjusted, it is pure network delays */
+ ptlrpc_at_adj_net_latency(req,
+ lustre_msg_get_service_time(early_req->rq_repmsg));
+
+ sptlrpc_cli_finish_early_reply(early_req);
spin_lock(&req->rq_lock);
olddl = req->rq_deadline;
/*
- * server assumes it now has rq_timeout from when it sent the
- * early reply, so client should give it at least that long.
+ * server assumes it now has rq_timeout from when the request
+ * arrived, so the client should give it at least that long.
+ * since we don't know the arrival time we'll use the original
+ * sent time
*/
- req->rq_deadline = ktime_get_real_seconds() + req->rq_timeout +
+ req->rq_deadline = req->rq_sent + req->rq_timeout +
ptlrpc_at_get_net_latency(req);
DEBUG_REQ(D_ADAPTTO, req,
@@ -884,7 +890,6 @@ struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
return set;
}
-EXPORT_SYMBOL(ptlrpc_prep_fcset);
/**
* Wind down and free request set structure previously allocated with
@@ -1004,7 +1009,6 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
}
}
-EXPORT_SYMBOL(ptlrpc_set_add_new_req);
/**
* Based on the current state of the import, determine if the request
@@ -1035,8 +1039,8 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
*status = -EIO;
} else if (ptlrpc_send_limit_expired(req)) {
/* probably doesn't need to be a D_ERROR after initial testing */
- DEBUG_REQ(D_ERROR, req, "send limit expired ");
- *status = -EIO;
+ DEBUG_REQ(D_HA, req, "send limit expired ");
+ *status = -ETIMEDOUT;
} else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
imp->imp_state == LUSTRE_IMP_CONNECTING) {
/* allow CONNECT even if import is invalid */
@@ -1073,36 +1077,42 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
}
/**
- * Decide if the error message regarding provided request \a req
- * should be printed to the console or not.
- * Makes it's decision on request status and other properties.
- * Returns 1 to print error on the system console or 0 if not.
+ * Decide if the error message should be printed to the console or not.
+ * Makes its decision based on request type, status, and failure frequency.
+ *
+ * \param[in] req request that failed and may need a console message
+ *
+ * \retval false if no message should be printed
+ * \retval true if console message should be printed
*/
-static int ptlrpc_console_allow(struct ptlrpc_request *req)
+static bool ptlrpc_console_allow(struct ptlrpc_request *req)
{
__u32 opc;
- int err;
LASSERT(req->rq_reqmsg);
opc = lustre_msg_get_opc(req->rq_reqmsg);
- /*
- * Suppress particular reconnect errors which are to be expected. No
- * errors are suppressed for the initial connection on an import
- */
- if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
- (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) {
+ /* Suppress particular reconnect errors which are to be expected. */
+ if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) {
+ int err;
+
/* Suppress timed out reconnect requests */
- if (req->rq_timedout)
- return 0;
+ if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) ||
+ req->rq_timedout)
+ return false;
- /* Suppress unavailable/again reconnect requests */
+ /*
+ * Suppress most unavailable/again reconnect requests, but
+ * print occasionally so it is clear client is trying to
+ * connect to a server where no target is running.
+ */
err = lustre_msg_get_status(req->rq_repmsg);
- if (err == -ENODEV || err == -EAGAIN)
- return 0;
+ if ((err == -ENODEV || err == -EAGAIN) &&
+ req->rq_import->imp_conn_cnt % 30 != 20)
+ return false;
}
- return 1;
+ return true;
}
/**
@@ -1116,14 +1126,14 @@ static int ptlrpc_check_status(struct ptlrpc_request *req)
err = lustre_msg_get_status(req->rq_repmsg);
if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
struct obd_import *imp = req->rq_import;
+ lnet_nid_t nid = imp->imp_connection->c_peer.nid;
__u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
if (ptlrpc_console_allow(req))
- LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s, operation %s failed with %d.\n",
+ LCONSOLE_ERROR_MSG(0x011, "%s: operation %s to node %s failed: rc = %d\n",
imp->imp_obd->obd_name,
- libcfs_nid2str(
- imp->imp_connection->c_peer.nid),
- ll_opcode2str(opc), err);
+ ll_opcode2str(opc),
+ libcfs_nid2str(nid), err);
return err < 0 ? err : -EINVAL;
}
@@ -1280,7 +1290,7 @@ static int after_reply(struct ptlrpc_request *req)
* some reason. Try to reconnect, and if that fails, punt to
* the upcall.
*/
- if (ll_rpc_recoverable_error(rc)) {
+ if (ptlrpc_recoverable_error(rc)) {
if (req->rq_send_state != LUSTRE_IMP_FULL ||
imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
return rc;
@@ -1628,8 +1638,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
req->rq_waiting || req->rq_wait_ctx) {
int status;
- if (!ptlrpc_unregister_reply(req, 1))
+ if (!ptlrpc_unregister_reply(req, 1)) {
+ ptlrpc_unregister_bulk(req, 1);
continue;
+ }
spin_lock(&imp->imp_lock);
if (ptlrpc_import_delay_req(imp, req,
@@ -1995,7 +2007,6 @@ int ptlrpc_expired_set(void *data)
*/
return 1;
}
-EXPORT_SYMBOL(ptlrpc_expired_set);
/**
* Sets rq_intr flag in \a req under spinlock.
@@ -2012,7 +2023,7 @@ EXPORT_SYMBOL(ptlrpc_mark_interrupted);
* Interrupts (sets interrupted flag) all uncompleted requests in
* a set \a data. Callback for l_wait_event for interruptible waits.
*/
-void ptlrpc_interrupted_set(void *data)
+static void ptlrpc_interrupted_set(void *data)
{
struct ptlrpc_request_set *set = data;
struct list_head *tmp;
@@ -2030,7 +2041,6 @@ void ptlrpc_interrupted_set(void *data)
ptlrpc_mark_interrupted(req);
}
}
-EXPORT_SYMBOL(ptlrpc_interrupted_set);
/**
* Get the smallest timeout in the set; this does NOT set a timeout.
@@ -2074,7 +2084,6 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
}
return timeout;
}
-EXPORT_SYMBOL(ptlrpc_set_next_timeout);
/**
* Send all unset request from the set and then wait until all
@@ -2325,7 +2334,7 @@ EXPORT_SYMBOL(ptlrpc_req_xid);
* The request owner (i.e. the thread doing the I/O) must call...
* Returns 0 on success or 1 if unregistering cannot be made.
*/
-int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
+static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
{
int rc;
wait_queue_head_t *wq;
@@ -2390,7 +2399,6 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
}
return 0;
}
-EXPORT_SYMBOL(ptlrpc_unregister_reply);
static void ptlrpc_free_request(struct ptlrpc_request *req)
{
@@ -2451,7 +2459,8 @@ void ptlrpc_free_committed(struct obd_import *imp)
imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
imp->imp_generation);
- if (imp->imp_generation != imp->imp_last_generation_checked)
+ if (imp->imp_generation != imp->imp_last_generation_checked ||
+ !imp->imp_last_transno_checked)
skip_committed_list = false;
imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
@@ -2499,6 +2508,9 @@ free_req:
if (req->rq_import_generation < imp->imp_generation) {
DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
ptlrpc_free_request(req);
+ } else if (!req->rq_replay) {
+ DEBUG_REQ(D_RPCTRACE, req, "free closed open request");
+ ptlrpc_free_request(req);
}
}
}
@@ -2541,7 +2553,6 @@ void ptlrpc_resend_req(struct ptlrpc_request *req)
ptlrpc_client_wake_req(req);
spin_unlock(&req->rq_lock);
}
-EXPORT_SYMBOL(ptlrpc_resend_req);
/**
* Grab additional reference on a request \a req
@@ -2610,7 +2621,6 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
list_add(&req->rq_replay_list, &imp->imp_replay_list);
}
-EXPORT_SYMBOL(ptlrpc_retain_replayable_request);
/**
* Send request and wait until it completes.
@@ -2783,7 +2793,6 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
ptlrpcd_add_req(req);
return 0;
}
-EXPORT_SYMBOL(ptlrpc_replay_req);
/**
* Aborts all in-flight request on import \a imp sending and delayed lists
@@ -2843,7 +2852,6 @@ void ptlrpc_abort_inflight(struct obd_import *imp)
spin_unlock(&imp->imp_lock);
}
-EXPORT_SYMBOL(ptlrpc_abort_inflight);
/**
* Abort all uncompleted requests in request set \a set
@@ -2929,7 +2937,6 @@ __u64 ptlrpc_next_xid(void)
return next;
}
-EXPORT_SYMBOL(ptlrpc_next_xid);
/**
* Get a glimpse at what next xid value might have been.
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index 177a379da9fa..7b020d60c9e5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -82,7 +82,6 @@ out:
libcfs_nid2str(conn->c_peer.nid));
return conn;
}
-EXPORT_SYMBOL(ptlrpc_connection_get);
int ptlrpc_connection_put(struct ptlrpc_connection *conn)
{
@@ -118,7 +117,6 @@ int ptlrpc_connection_put(struct ptlrpc_connection *conn)
return rc;
}
-EXPORT_SYMBOL(ptlrpc_connection_put);
struct ptlrpc_connection *
ptlrpc_connection_addref(struct ptlrpc_connection *conn)
@@ -130,7 +128,6 @@ ptlrpc_connection_addref(struct ptlrpc_connection *conn)
return conn;
}
-EXPORT_SYMBOL(ptlrpc_connection_addref);
int ptlrpc_connection_init(void)
{
@@ -146,13 +143,11 @@ int ptlrpc_connection_init(void)
return 0;
}
-EXPORT_SYMBOL(ptlrpc_connection_init);
void ptlrpc_connection_fini(void)
{
cfs_hash_putref(conn_hash);
}
-EXPORT_SYMBOL(ptlrpc_connection_fini);
/*
* Hash operations for net_peer<->connection
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index b1ce72511509..283dfb296d35 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -543,7 +543,7 @@ static int ptlrpc_ni_init(void)
rc = LNetNIInit(pid);
if (rc < 0) {
CDEBUG(D_NET, "Can't init network interface: %d\n", rc);
- return -ENOENT;
+ return rc;
}
/* CAVEAT EMPTOR: how we process portals events is _radically_
@@ -561,7 +561,7 @@ static int ptlrpc_ni_init(void)
CERROR("Failed to allocate event queue: %d\n", rc);
LNetNIFini();
- return -ENOMEM;
+ return rc;
}
int ptlrpc_init_portals(void)
@@ -570,7 +570,7 @@ int ptlrpc_init_portals(void)
if (rc != 0) {
CERROR("network initialisation failed\n");
- return -EIO;
+ return rc;
}
rc = ptlrpcd_addref();
if (rc == 0)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index 3292e6ea0102..a23d0a05b574 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -307,7 +307,8 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
*/
lwi = LWI_TIMEOUT_INTERVAL(
cfs_timeout_cap(cfs_time_seconds(timeout)),
- (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
+ (timeout > 1) ? cfs_time_seconds(1) :
+ cfs_time_seconds(1) / 2,
NULL, NULL);
rc = l_wait_event(imp->imp_recovery_waitq,
(atomic_read(&imp->imp_inflight) == 0),
@@ -424,7 +425,6 @@ void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
ptlrpc_pinger_force(imp);
}
}
-EXPORT_SYMBOL(ptlrpc_fail_import);
int ptlrpc_reconnect_import(struct obd_import *imp)
{
@@ -698,7 +698,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
request->rq_send_state = LUSTRE_IMP_CONNECTING;
/* Allow a slightly larger reply for future growth compatibility */
req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
- sizeof(struct obd_connect_data)+16*sizeof(__u64));
+ sizeof(struct obd_connect_data) +
+ 16 * sizeof(__u64));
ptlrpc_request_set_replen(request);
request->rq_interpret_reply = ptlrpc_connect_interpret;
@@ -750,6 +751,153 @@ static int ptlrpc_busy_reconnect(int rc)
return (rc == -EBUSY) || (rc == -EAGAIN);
}
+static int ptlrpc_connect_set_flags(struct obd_import *imp,
+ struct obd_connect_data *ocd,
+ u64 old_connect_flags,
+ struct obd_export *exp, int init_connect)
+{
+ struct client_obd *cli = &imp->imp_obd->u.cli;
+ static bool warned;
+
+ if ((imp->imp_connect_flags_orig & OBD_CONNECT_IBITS) &&
+ !(ocd->ocd_connect_flags & OBD_CONNECT_IBITS)) {
+ LCONSOLE_WARN("%s: MDS %s does not support ibits lock, either very old or invalid: requested %#llx, replied %#llx\n",
+ imp->imp_obd->obd_name,
+ imp->imp_connection->c_remote_uuid.uuid,
+ imp->imp_connect_flags_orig,
+ ocd->ocd_connect_flags);
+ return -EPROTO;
+ }
+
+ spin_lock(&imp->imp_lock);
+ list_del(&imp->imp_conn_current->oic_item);
+ list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list);
+ imp->imp_last_success_conn = imp->imp_conn_current->oic_last_attempt;
+
+ spin_unlock(&imp->imp_lock);
+
+ if (!warned && (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
+ (ocd->ocd_version > LUSTRE_VERSION_CODE +
+ LUSTRE_VERSION_OFFSET_WARN ||
+ ocd->ocd_version < LUSTRE_VERSION_CODE -
+ LUSTRE_VERSION_OFFSET_WARN)) {
+ /*
+ * Sigh, some compilers do not like #ifdef in the middle
+ * of macro arguments
+ */
+ const char *older = "older than client. Consider upgrading server";
+ const char *newer = "newer than client. Consider recompiling application";
+
+ LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n",
+ obd2cli_tgt(imp->imp_obd),
+ OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
+ OBD_OCD_VERSION_MINOR(ocd->ocd_version),
+ OBD_OCD_VERSION_PATCH(ocd->ocd_version),
+ OBD_OCD_VERSION_FIX(ocd->ocd_version),
+ ocd->ocd_version > LUSTRE_VERSION_CODE ?
+ newer : older, LUSTRE_VERSION_STRING);
+ warned = true;
+ }
+
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 53, 0)
+ /*
+ * Check if server has LU-1252 fix applied to not always swab
+ * the IR MNE entries. Do this only once per connection. This
+ * fixup is version-limited, because we don't want to carry the
+ * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we
+ * need interop with unpatched 2.2 servers. For newer servers,
+ * the client will do MNE swabbing only as needed. LU-1644
+ */
+ if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
+ !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) &&
+ OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 &&
+ OBD_OCD_VERSION_MINOR(ocd->ocd_version) == 2 &&
+ OBD_OCD_VERSION_PATCH(ocd->ocd_version) < 55 &&
+ !strcmp(imp->imp_obd->obd_type->typ_name,
+ LUSTRE_MGC_NAME)))
+ imp->imp_need_mne_swab = 1;
+ else /* clear if server was upgraded since last connect */
+ imp->imp_need_mne_swab = 0;
+#endif
+
+ if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
+ /*
+ * We sent to the server ocd_cksum_types with bits set
+ * for algorithms we understand. The server masked off
+ * the checksum types it doesn't support
+ */
+ if (!(ocd->ocd_cksum_types & cksum_types_supported_client())) {
+ LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n",
+ obd2cli_tgt(imp->imp_obd),
+ ocd->ocd_cksum_types,
+ cksum_types_supported_client());
+ cli->cl_checksum = 0;
+ cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
+ } else {
+ cli->cl_supp_cksum_types = ocd->ocd_cksum_types;
+ }
+ } else {
+ /*
+ * The server does not support OBD_CONNECT_CKSUM.
+ * Enforce ADLER for backward compatibility
+ */
+ cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
+ }
+ cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types);
+
+ if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
+ cli->cl_max_pages_per_rpc =
+ min(ocd->ocd_brw_size >> PAGE_SHIFT,
+ cli->cl_max_pages_per_rpc);
+ else if (imp->imp_connect_op == MDS_CONNECT ||
+ imp->imp_connect_op == MGS_CONNECT)
+ cli->cl_max_pages_per_rpc = 1;
+
+ LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) &&
+ (cli->cl_max_pages_per_rpc > 0));
+
+ client_adjust_max_dirty(cli);
+
+ /*
+ * Reset ns_connect_flags only for initial connect. It might be
+ * changed in while using FS and if we reset it in reconnect
+ * this leads to losing user settings done before such as
+ * disable lru_resize, etc.
+ */
+ if (old_connect_flags != exp_connect_flags(exp) || init_connect) {
+ CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n",
+ imp->imp_obd->obd_name, ocd->ocd_connect_flags);
+ imp->imp_obd->obd_namespace->ns_connect_flags =
+ ocd->ocd_connect_flags;
+ imp->imp_obd->obd_namespace->ns_orig_connect_flags =
+ ocd->ocd_connect_flags;
+ }
+
+ if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
+ (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
+ /*
+ * We need a per-message support flag, because
+ * a. we don't know if the incoming connect reply
+ * supports AT or not (in reply_in_callback)
+ * until we unpack it.
+ * b. failovered server means export and flags are gone
+ * (in ptlrpc_send_reply).
+ * Can only be set when we know AT is supported at
+ * both ends
+ */
+ imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
+ else
+ imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
+
+ if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) &&
+ (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
+ imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
+ else
+ imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
+
+ return 0;
+}
+
/**
* interpret_reply callback for connect RPCs.
* Looks into returned status of connect operation and decides
@@ -762,7 +910,6 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
{
struct ptlrpc_connect_async_args *aa = data;
struct obd_import *imp = request->rq_import;
- struct client_obd *cli = &imp->imp_obd->u.cli;
struct lustre_handle old_hdl;
__u64 old_connect_flags;
int msg_flags;
@@ -842,7 +989,21 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
old_connect_flags = exp_connect_flags(exp);
exp->exp_connect_data = *ocd;
imp->imp_obd->obd_self_export->exp_connect_data = *ocd;
+
+ /*
+ * The net statistics after (re-)connect is not valid anymore,
+ * because may reflect other routing, etc.
+ */
+ at_init(&imp->imp_at.iat_net_latency, 0, 0);
+ ptlrpc_at_adj_net_latency(request,
+ lustre_msg_get_service_time(request->rq_repmsg));
+
+ /* Import flags should be updated before waking import at FULL state */
+ rc = ptlrpc_connect_set_flags(imp, ocd, old_connect_flags, exp,
+ aa->pcaa_initial_connect);
class_export_put(exp);
+ if (rc)
+ goto out;
obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD);
@@ -987,151 +1148,13 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
finish:
rc = ptlrpc_import_recovery_state_machine(imp);
- if (rc != 0) {
- if (rc == -ENOTCONN) {
- CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n",
- obd2cli_tgt(imp->imp_obd),
- imp->imp_connection->c_remote_uuid.uuid);
- ptlrpc_connect_import(imp);
- imp->imp_connect_tried = 1;
- return 0;
- }
- } else {
- static bool warned;
-
- spin_lock(&imp->imp_lock);
- list_del(&imp->imp_conn_current->oic_item);
- list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list);
- imp->imp_last_success_conn =
- imp->imp_conn_current->oic_last_attempt;
-
- spin_unlock(&imp->imp_lock);
-
- if ((imp->imp_connect_flags_orig & OBD_CONNECT_IBITS) &&
- !(ocd->ocd_connect_flags & OBD_CONNECT_IBITS)) {
- LCONSOLE_WARN("%s: MDS %s does not support ibits lock, either very old or invalid: requested %llx, replied %llx\n",
- imp->imp_obd->obd_name,
- imp->imp_connection->c_remote_uuid.uuid,
- imp->imp_connect_flags_orig,
- ocd->ocd_connect_flags);
- rc = -EPROTO;
- goto out;
- }
-
- if (!warned && (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
- (ocd->ocd_version > LUSTRE_VERSION_CODE +
- LUSTRE_VERSION_OFFSET_WARN ||
- ocd->ocd_version < LUSTRE_VERSION_CODE -
- LUSTRE_VERSION_OFFSET_WARN)) {
- /* Sigh, some compilers do not like #ifdef in the middle
- * of macro arguments
- */
- const char *older = "older than client. Consider upgrading server";
- const char *newer = "newer than client. Consider recompiling application";
-
- LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n",
- obd2cli_tgt(imp->imp_obd),
- OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
- OBD_OCD_VERSION_MINOR(ocd->ocd_version),
- OBD_OCD_VERSION_PATCH(ocd->ocd_version),
- OBD_OCD_VERSION_FIX(ocd->ocd_version),
- ocd->ocd_version > LUSTRE_VERSION_CODE ?
- newer : older, LUSTRE_VERSION_STRING);
- warned = true;
- }
-
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
- /* Check if server has LU-1252 fix applied to not always swab
- * the IR MNE entries. Do this only once per connection. This
- * fixup is version-limited, because we don't want to carry the
- * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we
- * need interop with unpatched 2.2 servers. For newer servers,
- * the client will do MNE swabbing only as needed. LU-1644
- */
- if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
- !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) &&
- OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 &&
- OBD_OCD_VERSION_MINOR(ocd->ocd_version) == 2 &&
- OBD_OCD_VERSION_PATCH(ocd->ocd_version) < 55 &&
- strcmp(imp->imp_obd->obd_type->typ_name,
- LUSTRE_MGC_NAME) == 0))
- imp->imp_need_mne_swab = 1;
- else /* clear if server was upgraded since last connect */
- imp->imp_need_mne_swab = 0;
-#else
-#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
-#endif
-
- if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
- /* We sent to the server ocd_cksum_types with bits set
- * for algorithms we understand. The server masked off
- * the checksum types it doesn't support
- */
- if ((ocd->ocd_cksum_types &
- cksum_types_supported_client()) == 0) {
- LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n",
- obd2cli_tgt(imp->imp_obd),
- ocd->ocd_cksum_types,
- cksum_types_supported_client());
- cli->cl_checksum = 0;
- cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
- } else {
- cli->cl_supp_cksum_types = ocd->ocd_cksum_types;
- }
- } else {
- /* The server does not support OBD_CONNECT_CKSUM.
- * Enforce ADLER for backward compatibility
- */
- cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
- }
- cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types);
-
- if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
- cli->cl_max_pages_per_rpc =
- min(ocd->ocd_brw_size >> PAGE_SHIFT,
- cli->cl_max_pages_per_rpc);
- else if (imp->imp_connect_op == MDS_CONNECT ||
- imp->imp_connect_op == MGS_CONNECT)
- cli->cl_max_pages_per_rpc = 1;
-
- /* Reset ns_connect_flags only for initial connect. It might be
- * changed in while using FS and if we reset it in reconnect
- * this leads to losing user settings done before such as
- * disable lru_resize, etc.
- */
- if (old_connect_flags != exp_connect_flags(exp) ||
- aa->pcaa_initial_connect) {
- CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n",
- imp->imp_obd->obd_name, ocd->ocd_connect_flags);
- imp->imp_obd->obd_namespace->ns_connect_flags =
- ocd->ocd_connect_flags;
- imp->imp_obd->obd_namespace->ns_orig_connect_flags =
- ocd->ocd_connect_flags;
- }
-
- if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
- (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
- /* We need a per-message support flag, because
- * a. we don't know if the incoming connect reply
- * supports AT or not (in reply_in_callback)
- * until we unpack it.
- * b. failovered server means export and flags are gone
- * (in ptlrpc_send_reply).
- * Can only be set when we know AT is supported at
- * both ends
- */
- imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
- else
- imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
-
- if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) &&
- (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
- imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
- else
- imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
-
- LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) &&
- (cli->cl_max_pages_per_rpc > 0));
+ if (rc == -ENOTCONN) {
+ CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n",
+ obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid);
+ ptlrpc_connect_import(imp);
+ imp->imp_connect_tried = 1;
+ return 0;
}
out:
@@ -1497,10 +1520,13 @@ EXPORT_SYMBOL(ptlrpc_disconnect_import);
/* Adaptive Timeout utils */
extern unsigned int at_min, at_max, at_history;
-/* Bin into timeslices using AT_BINS bins.
- * This gives us a max of the last binlimit*AT_BINS secs without the storage,
- * but still smoothing out a return to normalcy from a slow response.
- * (E.g. remember the maximum latency in each minute of the last 4 minutes.)
+/*
+ *Update at_current with the specified value (bounded by at_min and at_max),
+ * as well as the AT history "bins".
+ * - Bin into timeslices using AT_BINS bins.
+ * - This gives us a max of the last at_history seconds without the storage,
+ * but still smoothing out a return to normalcy from a slow response.
+ * - (E.g. remember the maximum latency in each minute of the last 4 minutes.)
*/
int at_measured(struct adaptive_timeout *at, unsigned int val)
{
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index ab5d85174245..839ef3e80c1a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -667,11 +667,8 @@ static struct req_format *req_formats[] = {
&RQF_MDS_SYNC,
&RQF_MDS_CLOSE,
&RQF_MDS_RELEASE_CLOSE,
- &RQF_MDS_PIN,
- &RQF_MDS_UNPIN,
&RQF_MDS_READPAGE,
&RQF_MDS_WRITEPAGE,
- &RQF_MDS_IS_SUBDIR,
&RQF_MDS_DONE_WRITING,
&RQF_MDS_REINT,
&RQF_MDS_REINT_CREATE,
@@ -1116,9 +1113,9 @@ EXPORT_SYMBOL(RMF_SWAP_LAYOUTS);
struct req_format {
const char *rf_name;
- int rf_idx;
+ size_t rf_idx;
struct {
- int nr;
+ size_t nr;
const struct req_msg_field **d;
} rf_fields[RCL_NR];
};
@@ -1389,15 +1386,6 @@ struct req_format RQF_MDS_RELEASE_CLOSE =
mdt_release_close_client, mds_last_unlink_server);
EXPORT_SYMBOL(RQF_MDS_RELEASE_CLOSE);
-struct req_format RQF_MDS_PIN =
- DEFINE_REQ_FMT0("MDS_PIN",
- mdt_body_capa, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_PIN);
-
-struct req_format RQF_MDS_UNPIN =
- DEFINE_REQ_FMT0("MDS_UNPIN", mdt_body_only, empty);
-EXPORT_SYMBOL(RQF_MDS_UNPIN);
-
struct req_format RQF_MDS_DONE_WRITING =
DEFINE_REQ_FMT0("MDS_DONE_WRITING",
mdt_close_client, mdt_body_only);
@@ -1448,11 +1436,6 @@ struct req_format RQF_MDS_WRITEPAGE =
mdt_body_capa, mdt_body_only);
EXPORT_SYMBOL(RQF_MDS_WRITEPAGE);
-struct req_format RQF_MDS_IS_SUBDIR =
- DEFINE_REQ_FMT0("MDS_IS_SUBDIR",
- mdt_body_only, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_IS_SUBDIR);
-
struct req_format RQF_LLOG_ORIGIN_HANDLE_CREATE =
DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_CREATE",
llog_origin_handle_create_client, llogd_body_only);
@@ -1572,9 +1555,9 @@ EXPORT_SYMBOL(RQF_OST_GET_INFO_FIEMAP);
*/
int req_layout_init(void)
{
- int i;
- int j;
- int k;
+ size_t i;
+ size_t j;
+ size_t k;
struct req_format *rf = NULL;
for (i = 0; i < ARRAY_SIZE(req_formats); ++i) {
@@ -1616,7 +1599,7 @@ EXPORT_SYMBOL(req_layout_fini);
*/
static void req_capsule_init_area(struct req_capsule *pill)
{
- int i;
+ size_t i;
for (i = 0; i < ARRAY_SIZE(pill->rc_area[RCL_CLIENT]); i++) {
pill->rc_area[RCL_CLIENT][i] = -1;
@@ -1667,8 +1650,7 @@ EXPORT_SYMBOL(req_capsule_fini);
static int __req_format_is_sane(const struct req_format *fmt)
{
- return
- 0 <= fmt->rf_idx && fmt->rf_idx < ARRAY_SIZE(req_formats) &&
+ return fmt->rf_idx < ARRAY_SIZE(req_formats) &&
req_formats[fmt->rf_idx] == fmt;
}
@@ -1702,11 +1684,11 @@ EXPORT_SYMBOL(req_capsule_set);
* variable-sized fields. The field sizes come from the declared \a rmf_size
* field of a \a pill's \a rc_fmt's RMF's.
*/
-int req_capsule_filled_sizes(struct req_capsule *pill,
- enum req_location loc)
+size_t req_capsule_filled_sizes(struct req_capsule *pill,
+ enum req_location loc)
{
const struct req_format *fmt = pill->rc_fmt;
- int i;
+ size_t i;
for (i = 0; i < fmt->rf_fields[loc].nr; ++i) {
if (pill->rc_area[loc][i] == -1) {
@@ -1761,11 +1743,11 @@ EXPORT_SYMBOL(req_capsule_server_pack);
* Returns the PTLRPC request or reply (\a loc) buffer offset of a \a pill
* corresponding to the given RMF (\a field).
*/
-static int __req_capsule_offset(const struct req_capsule *pill,
+static u32 __req_capsule_offset(const struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc)
{
- int offset;
+ u32 offset;
offset = field->rmf_offset[pill->rc_fmt->rf_idx][loc];
LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n", pill->rc_fmt->rf_name,
@@ -1869,10 +1851,10 @@ static void *__req_capsule_get(struct req_capsule *pill,
const struct req_format *fmt;
struct lustre_msg *msg;
void *value;
- int len;
- int offset;
+ u32 len;
+ u32 offset;
- void *(*getter)(struct lustre_msg *m, int n, int minlen);
+ void *(*getter)(struct lustre_msg *m, u32 n, u32 minlen);
static const char *rcl_names[RCL_NR] = {
[RCL_CLIENT] = "client",
@@ -1899,20 +1881,20 @@ static void *__req_capsule_get(struct req_capsule *pill,
*/
len = lustre_msg_buflen(msg, offset);
if ((len % field->rmf_size) != 0) {
- CERROR("%s: array field size mismatch %d modulo %d != 0 (%d)\n",
+ CERROR("%s: array field size mismatch %d modulo %u != 0 (%d)\n",
field->rmf_name, len, field->rmf_size, loc);
return NULL;
}
} else if (pill->rc_area[loc][offset] != -1) {
len = pill->rc_area[loc][offset];
} else {
- len = max(field->rmf_size, 0);
+ len = max_t(typeof(field->rmf_size), field->rmf_size, 0);
}
value = getter(msg, offset, len);
if (!value) {
DEBUG_REQ(D_ERROR, pill->rc_req,
- "Wrong buffer for field `%s' (%d of %d) in format `%s': %d vs. %d (%s)\n",
+ "Wrong buffer for field `%s' (%u of %u) in format `%s': %u vs. %u (%s)\n",
field->rmf_name, offset, lustre_msg_bufcount(msg),
fmt->rf_name, lustre_msg_buflen(msg, offset), len,
rcl_names[loc]);
@@ -1958,7 +1940,7 @@ EXPORT_SYMBOL(req_capsule_client_swab_get);
*/
void *req_capsule_client_sized_get(struct req_capsule *pill,
const struct req_msg_field *field,
- int len)
+ u32 len)
{
req_capsule_set_size(pill, field, RCL_CLIENT, len);
return __req_capsule_get(pill, field, RCL_CLIENT, NULL, 0);
@@ -1999,7 +1981,7 @@ EXPORT_SYMBOL(req_capsule_server_swab_get);
*/
void *req_capsule_server_sized_get(struct req_capsule *pill,
const struct req_msg_field *field,
- int len)
+ u32 len)
{
req_capsule_set_size(pill, field, RCL_SERVER, len);
return __req_capsule_get(pill, field, RCL_SERVER, NULL, 0);
@@ -2008,7 +1990,7 @@ EXPORT_SYMBOL(req_capsule_server_sized_get);
void *req_capsule_server_sized_swab_get(struct req_capsule *pill,
const struct req_msg_field *field,
- int len, void *swabber)
+ u32 len, void *swabber)
{
req_capsule_set_size(pill, field, RCL_SERVER, len);
return __req_capsule_get(pill, field, RCL_SERVER, swabber, 0);
@@ -2024,23 +2006,25 @@ EXPORT_SYMBOL(req_capsule_server_sized_swab_get);
*/
void req_capsule_set_size(struct req_capsule *pill,
const struct req_msg_field *field,
- enum req_location loc, int size)
+ enum req_location loc, u32 size)
{
LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
- if ((size != field->rmf_size) &&
+ if ((size != (u32)field->rmf_size) &&
(field->rmf_size != -1) &&
!(field->rmf_flags & RMF_F_NO_SIZE_CHECK) &&
(size > 0)) {
+ u32 rmf_size = (u32)field->rmf_size;
+
if ((field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
- (size % field->rmf_size != 0)) {
- CERROR("%s: array field size mismatch %d %% %d != 0 (%d)\n",
- field->rmf_name, size, field->rmf_size, loc);
+ (size % rmf_size != 0)) {
+ CERROR("%s: array field size mismatch %u %% %u != 0 (%d)\n",
+ field->rmf_name, size, rmf_size, loc);
LBUG();
} else if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
- size < field->rmf_size) {
- CERROR("%s: field size mismatch %d != %d (%d)\n",
- field->rmf_name, size, field->rmf_size, loc);
+ size < rmf_size) {
+ CERROR("%s: field size mismatch %u != %u (%d)\n",
+ field->rmf_name, size, rmf_size, loc);
LBUG();
}
}
@@ -2057,7 +2041,7 @@ EXPORT_SYMBOL(req_capsule_set_size);
* actually sets the size in pill.rc_area[loc][offset], but this function
* returns the message buflen[offset], maybe we should use another name.
*/
-int req_capsule_get_size(const struct req_capsule *pill,
+u32 req_capsule_get_size(const struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc)
{
@@ -2075,7 +2059,7 @@ EXPORT_SYMBOL(req_capsule_get_size);
*
* See also req_capsule_set_size().
*/
-int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc)
+u32 req_capsule_msg_size(struct req_capsule *pill, enum req_location loc)
{
return lustre_msg_size(pill->rc_req->rq_import->imp_msg_magic,
pill->rc_fmt->rf_fields[loc].nr,
@@ -2090,10 +2074,11 @@ int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc)
* This function should not be used for formats which contain variable size
* fields.
*/
-int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
+u32 req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
enum req_location loc)
{
- int size, i = 0;
+ size_t i = 0;
+ u32 size;
/*
* This function should probably LASSERT() that fmt has no fields with
@@ -2103,7 +2088,7 @@ int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
* we do.
*/
size = lustre_msg_hdr_size(magic, fmt->rf_fields[loc].nr);
- if (size < 0)
+ if (!size)
return size;
for (; i < fmt->rf_fields[loc].nr; ++i)
@@ -2135,7 +2120,7 @@ int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt)
{
int i;
- int j;
+ size_t j;
const struct req_format *old;
@@ -2193,7 +2178,7 @@ static int req_capsule_field_present(const struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc)
{
- int offset;
+ u32 offset;
LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
LASSERT(req_capsule_has_field(pill, field, loc));
@@ -2210,12 +2195,11 @@ static int req_capsule_field_present(const struct req_capsule *pill,
*/
void req_capsule_shrink(struct req_capsule *pill,
const struct req_msg_field *field,
- unsigned int newlen,
- enum req_location loc)
+ u32 newlen, enum req_location loc)
{
const struct req_format *fmt;
struct lustre_msg *msg;
- int len;
+ u32 len;
int offset;
fmt = pill->rc_fmt;
@@ -2228,7 +2212,7 @@ void req_capsule_shrink(struct req_capsule *pill,
msg = __req_msg(pill, loc);
len = lustre_msg_buflen(msg, offset);
- LASSERTF(newlen <= len, "%s:%s, oldlen=%d, newlen=%d\n",
+ LASSERTF(newlen <= len, "%s:%s, oldlen=%u, newlen=%u\n",
fmt->rf_name, field->rmf_name, len, newlen);
if (loc == RCL_CLIENT)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index bc93b75744e1..9bad57d65db4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -191,7 +191,7 @@ ptlrpc_ldebugfs_register(struct dentry *root, char *dir,
LASSERT(!*debugfs_root_ret);
LASSERT(!*stats_ret);
- svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,
+ svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES + LUSTRE_MAX_OPCODES,
0);
if (!svc_stats)
return;
@@ -937,7 +937,7 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
static int
ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
{
- static struct seq_operations sops = {
+ static const struct seq_operations sops = {
.start = ptlrpc_lprocfs_svc_req_history_start,
.stop = ptlrpc_lprocfs_svc_req_history_stop,
.next = ptlrpc_lprocfs_svc_req_history_next,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index 11ec82545347..9c937398a085 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -295,7 +295,6 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
}
return 0;
}
-EXPORT_SYMBOL(ptlrpc_unregister_bulk);
static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
{
@@ -398,7 +397,8 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
lustre_msg_set_status(req->rq_repmsg,
ptlrpc_status_hton(req->rq_status));
lustre_msg_set_opc(req->rq_repmsg,
- req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
+ req->rq_reqmsg ?
+ lustre_msg_get_opc(req->rq_reqmsg) : 0);
target_pack_pool_reply(req);
@@ -433,7 +433,6 @@ out:
ptlrpc_connection_put(conn);
return rc;
}
-EXPORT_SYMBOL(ptlrpc_send_reply);
int ptlrpc_reply(struct ptlrpc_request *req)
{
@@ -441,7 +440,6 @@ int ptlrpc_reply(struct ptlrpc_request *req)
return 0;
return ptlrpc_send_reply(req, 0);
}
-EXPORT_SYMBOL(ptlrpc_reply);
/**
* For request \a req send an error reply back. Create empty
@@ -468,13 +466,11 @@ int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
rc = ptlrpc_send_reply(req, may_be_difficult);
return rc;
}
-EXPORT_SYMBOL(ptlrpc_send_error);
int ptlrpc_error(struct ptlrpc_request *req)
{
return ptlrpc_send_error(req, 0);
}
-EXPORT_SYMBOL(ptlrpc_error);
/**
* Send request \a request.
@@ -490,7 +486,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
struct ptlrpc_connection *connection;
lnet_handle_me_t reply_me_h;
lnet_md_t reply_md;
- struct obd_device *obd = request->rq_import->imp_obd;
+ struct obd_import *imp = request->rq_import;
+ struct obd_device *obd = imp->imp_obd;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
return 0;
@@ -503,7 +500,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
*/
LASSERT(!request->rq_receiving_reply);
LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
- (request->rq_import->imp_state == LUSTRE_IMP_FULL)));
+ (imp->imp_state == LUSTRE_IMP_FULL)));
if (unlikely(obd && obd->obd_fail)) {
CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
@@ -516,15 +513,22 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
return -ENODEV;
}
- connection = request->rq_import->imp_connection;
+ connection = imp->imp_connection;
lustre_msg_set_handle(request->rq_reqmsg,
- &request->rq_import->imp_remote_handle);
+ &imp->imp_remote_handle);
lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
- lustre_msg_set_conn_cnt(request->rq_reqmsg,
- request->rq_import->imp_conn_cnt);
- lustre_msghdr_set_flags(request->rq_reqmsg,
- request->rq_import->imp_msghdr_flags);
+ lustre_msg_set_conn_cnt(request->rq_reqmsg, imp->imp_conn_cnt);
+ lustre_msghdr_set_flags(request->rq_reqmsg, imp->imp_msghdr_flags);
+
+ /**
+ * For enabled AT all request should have AT_SUPPORT in the
+ * FULL import state when OBD_CONNECT_AT is set
+ */
+ LASSERT(AT_OFF || imp->imp_state != LUSTRE_IMP_FULL ||
+ (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) ||
+ !(imp->imp_connect_data.ocd_connect_flags &
+ OBD_CONNECT_AT));
if (request->rq_resend)
lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
@@ -628,7 +632,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
ptlrpc_request_addref(request);
if (obd && obd->obd_svc_stats)
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
- atomic_read(&request->rq_import->imp_inflight));
+ atomic_read(&imp->imp_inflight));
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
@@ -640,7 +644,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
request->rq_deadline = request->rq_sent + request->rq_timeout +
ptlrpc_at_get_net_latency(request);
- ptlrpc_pinger_sending_on_import(request->rq_import);
+ ptlrpc_pinger_sending_on_import(imp);
DEBUG_REQ(D_INFO, request, "send flg=%x",
lustre_msg_get_flags(request->rq_reqmsg));
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index b514f18fae50..871768511e8c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -50,36 +50,34 @@
#include "ptlrpc_internal.h"
-static inline int lustre_msg_hdr_size_v2(int count)
+static inline u32 lustre_msg_hdr_size_v2(u32 count)
{
return cfs_size_round(offsetof(struct lustre_msg_v2,
lm_buflens[count]));
}
-int lustre_msg_hdr_size(__u32 magic, int count)
+u32 lustre_msg_hdr_size(__u32 magic, u32 count)
{
switch (magic) {
case LUSTRE_MSG_MAGIC_V2:
return lustre_msg_hdr_size_v2(count);
default:
LASSERTF(0, "incorrect message magic: %08x\n", magic);
- return -EINVAL;
+ return 0;
}
}
-EXPORT_SYMBOL(lustre_msg_hdr_size);
void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
- int index)
+ u32 index)
{
if (inout)
lustre_set_req_swabbed(req, index);
else
lustre_set_rep_swabbed(req, index);
}
-EXPORT_SYMBOL(ptlrpc_buf_set_swabbed);
int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
- int index)
+ u32 index)
{
if (inout)
return (ptlrpc_req_need_swab(req) &&
@@ -88,12 +86,11 @@ int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
return (ptlrpc_rep_need_swab(req) &&
!lustre_rep_swabbed(req, index));
}
-EXPORT_SYMBOL(ptlrpc_buf_need_swab);
/* early reply size */
-int lustre_msg_early_size(void)
+u32 lustre_msg_early_size(void)
{
- static int size;
+ static u32 size;
if (!size) {
/* Always reply old ptlrpc_body_v2 to keep interoperability
@@ -111,9 +108,9 @@ int lustre_msg_early_size(void)
}
EXPORT_SYMBOL(lustre_msg_early_size);
-int lustre_msg_size_v2(int count, __u32 *lengths)
+u32 lustre_msg_size_v2(int count, __u32 *lengths)
{
- int size;
+ u32 size;
int i;
size = lustre_msg_hdr_size_v2(count);
@@ -131,7 +128,7 @@ EXPORT_SYMBOL(lustre_msg_size_v2);
* target then the first buffer will be stripped because the ptlrpc
* data is part of the lustre_msg_v1 header. b=14043
*/
-int lustre_msg_size(__u32 magic, int count, __u32 *lens)
+u32 lustre_msg_size(__u32 magic, int count, __u32 *lens)
{
__u32 size[] = { sizeof(struct ptlrpc_body) };
@@ -148,15 +145,14 @@ int lustre_msg_size(__u32 magic, int count, __u32 *lens)
return lustre_msg_size_v2(count, lens);
default:
LASSERTF(0, "incorrect message magic: %08x\n", magic);
- return -EINVAL;
+ return 0;
}
}
-EXPORT_SYMBOL(lustre_msg_size);
/* This is used to determine the size of a buffer that was already packed
* and will correctly handle the different message formats.
*/
-int lustre_packed_msg_size(struct lustre_msg *msg)
+u32 lustre_packed_msg_size(struct lustre_msg *msg)
{
switch (msg->lm_magic) {
case LUSTRE_MSG_MAGIC_V2:
@@ -166,7 +162,6 @@ int lustre_packed_msg_size(struct lustre_msg *msg)
return 0;
}
}
-EXPORT_SYMBOL(lustre_packed_msg_size);
void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
char **bufs)
@@ -227,7 +222,6 @@ int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
/* only use new format, we don't need to be compatible with 1.4 */
return lustre_pack_request_v2(req, count, lens, bufs);
}
-EXPORT_SYMBOL(lustre_pack_request);
#if RS_DEBUG
LIST_HEAD(ptlrpc_rs_debug_lru);
@@ -369,7 +363,6 @@ int lustre_pack_reply_flags(struct ptlrpc_request *req, int count, __u32 *lens,
lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens));
return rc;
}
-EXPORT_SYMBOL(lustre_pack_reply_flags);
int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens,
char **bufs)
@@ -378,11 +371,9 @@ int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens,
}
EXPORT_SYMBOL(lustre_pack_reply);
-void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size)
+void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, u32 n, u32 min_size)
{
- int i, offset, buflen, bufcount;
-
- LASSERT(n >= 0);
+ u32 i, offset, buflen, bufcount;
bufcount = m->lm_bufcount;
if (unlikely(n >= bufcount)) {
@@ -406,7 +397,7 @@ void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size)
return (char *)m + offset;
}
-void *lustre_msg_buf(struct lustre_msg *m, int n, int min_size)
+void *lustre_msg_buf(struct lustre_msg *m, u32 n, u32 min_size)
{
switch (m->lm_magic) {
case LUSTRE_MSG_MAGIC_V2:
@@ -419,7 +410,7 @@ void *lustre_msg_buf(struct lustre_msg *m, int n, int min_size)
}
EXPORT_SYMBOL(lustre_msg_buf);
-static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, int segment,
+static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, u32 segment,
unsigned int newlen, int move_data)
{
char *tail = NULL, *newpos;
@@ -493,7 +484,6 @@ void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
sptlrpc_svc_free_rs(rs);
}
-EXPORT_SYMBOL(lustre_free_reply_state);
static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
{
@@ -581,7 +571,6 @@ int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len)
}
return rc;
}
-EXPORT_SYMBOL(ptlrpc_unpack_req_msg);
int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len)
{
@@ -594,7 +583,6 @@ int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len)
}
return rc;
}
-EXPORT_SYMBOL(ptlrpc_unpack_rep_msg);
static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
const int inout, int offset)
@@ -647,7 +635,7 @@ int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset)
}
}
-static inline int lustre_msg_buflen_v2(struct lustre_msg_v2 *m, int n)
+static inline u32 lustre_msg_buflen_v2(struct lustre_msg_v2 *m, u32 n)
{
if (n >= m->lm_bufcount)
return 0;
@@ -662,14 +650,14 @@ static inline int lustre_msg_buflen_v2(struct lustre_msg_v2 *m, int n)
*
* returns zero for non-existent message indices
*/
-int lustre_msg_buflen(struct lustre_msg *m, int n)
+u32 lustre_msg_buflen(struct lustre_msg *m, u32 n)
{
switch (m->lm_magic) {
case LUSTRE_MSG_MAGIC_V2:
return lustre_msg_buflen_v2(m, n);
default:
CERROR("incorrect message magic: %08x\n", m->lm_magic);
- return -EINVAL;
+ return 0;
}
}
EXPORT_SYMBOL(lustre_msg_buflen);
@@ -677,23 +665,22 @@ EXPORT_SYMBOL(lustre_msg_buflen);
/* NB return the bufcount for lustre_msg_v2 format, so if message is packed
* in V1 format, the result is one bigger. (add struct ptlrpc_body).
*/
-int lustre_msg_bufcount(struct lustre_msg *m)
+u32 lustre_msg_bufcount(struct lustre_msg *m)
{
switch (m->lm_magic) {
case LUSTRE_MSG_MAGIC_V2:
return m->lm_bufcount;
default:
CERROR("incorrect message magic: %08x\n", m->lm_magic);
- return -EINVAL;
+ return 0;
}
}
-EXPORT_SYMBOL(lustre_msg_bufcount);
-char *lustre_msg_string(struct lustre_msg *m, int index, int max_len)
+char *lustre_msg_string(struct lustre_msg *m, u32 index, u32 max_len)
{
/* max_len == 0 means the string should fill the buffer */
char *str;
- int slen, blen;
+ u32 slen, blen;
switch (m->lm_magic) {
case LUSTRE_MSG_MAGIC_V2:
@@ -731,11 +718,10 @@ char *lustre_msg_string(struct lustre_msg *m, int index, int max_len)
return str;
}
-EXPORT_SYMBOL(lustre_msg_string);
/* Wrap up the normal fixed length cases */
-static inline void *__lustre_swab_buf(struct lustre_msg *msg, int index,
- int min_size, void *swabber)
+static inline void *__lustre_swab_buf(struct lustre_msg *msg, u32 index,
+ u32 min_size, void *swabber)
{
void *ptr = NULL;
@@ -804,7 +790,7 @@ __u32 lustre_msg_get_flags(struct lustre_msg *msg)
}
EXPORT_SYMBOL(lustre_msg_get_flags);
-void lustre_msg_add_flags(struct lustre_msg *msg, int flags)
+void lustre_msg_add_flags(struct lustre_msg *msg, u32 flags)
{
switch (msg->lm_magic) {
case LUSTRE_MSG_MAGIC_V2: {
@@ -820,7 +806,7 @@ void lustre_msg_add_flags(struct lustre_msg *msg, int flags)
}
EXPORT_SYMBOL(lustre_msg_add_flags);
-void lustre_msg_set_flags(struct lustre_msg *msg, int flags)
+void lustre_msg_set_flags(struct lustre_msg *msg, u32 flags)
{
switch (msg->lm_magic) {
case LUSTRE_MSG_MAGIC_V2: {
@@ -834,9 +820,8 @@ void lustre_msg_set_flags(struct lustre_msg *msg, int flags)
LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
}
}
-EXPORT_SYMBOL(lustre_msg_set_flags);
-void lustre_msg_clear_flags(struct lustre_msg *msg, int flags)
+void lustre_msg_clear_flags(struct lustre_msg *msg, u32 flags)
{
switch (msg->lm_magic) {
case LUSTRE_MSG_MAGIC_V2: {
@@ -868,9 +853,8 @@ __u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
return 0;
}
}
-EXPORT_SYMBOL(lustre_msg_get_op_flags);
-void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags)
+void lustre_msg_add_op_flags(struct lustre_msg *msg, u32 flags)
{
switch (msg->lm_magic) {
case LUSTRE_MSG_MAGIC_V2: {
@@ -903,7 +887,6 @@ struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg)
return NULL;
}
}
-EXPORT_SYMBOL(lustre_msg_get_handle);
__u32 lustre_msg_get_type(struct lustre_msg *msg)
{
@@ -924,7 +907,7 @@ __u32 lustre_msg_get_type(struct lustre_msg *msg)
}
EXPORT_SYMBOL(lustre_msg_get_type);
-void lustre_msg_add_version(struct lustre_msg *msg, int version)
+void lustre_msg_add_version(struct lustre_msg *msg, u32 version)
{
switch (msg->lm_magic) {
case LUSTRE_MSG_MAGIC_V2: {
@@ -938,7 +921,6 @@ void lustre_msg_add_version(struct lustre_msg *msg, int version)
LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
}
}
-EXPORT_SYMBOL(lustre_msg_add_version);
__u32 lustre_msg_get_opc(struct lustre_msg *msg)
{
@@ -1055,7 +1037,6 @@ __u64 lustre_msg_get_slv(struct lustre_msg *msg)
return -EINVAL;
}
}
-EXPORT_SYMBOL(lustre_msg_get_slv);
void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
{
@@ -1075,7 +1056,6 @@ void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
return;
}
}
-EXPORT_SYMBOL(lustre_msg_set_slv);
__u32 lustre_msg_get_limit(struct lustre_msg *msg)
{
@@ -1094,7 +1074,6 @@ __u32 lustre_msg_get_limit(struct lustre_msg *msg)
return -EINVAL;
}
}
-EXPORT_SYMBOL(lustre_msg_get_limit);
void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
{
@@ -1114,7 +1093,6 @@ void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
return;
}
}
-EXPORT_SYMBOL(lustre_msg_set_limit);
__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg)
{
@@ -1145,7 +1123,6 @@ __u32 lustre_msg_get_magic(struct lustre_msg *msg)
return 0;
}
}
-EXPORT_SYMBOL(lustre_msg_get_magic);
__u32 lustre_msg_get_timeout(struct lustre_msg *msg)
{
@@ -1203,8 +1180,9 @@ __u32 lustre_msg_calc_cksum(struct lustre_msg *msg)
unsigned int hsize = 4;
cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
- lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF),
- NULL, 0, (unsigned char *)&crc, &hsize);
+ lustre_msg_buflen(msg,
+ MSG_PTLRPC_BODY_OFF),
+ NULL, 0, (unsigned char *)&crc, &hsize);
return crc;
}
default:
@@ -1227,7 +1205,6 @@ void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle)
LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
}
}
-EXPORT_SYMBOL(lustre_msg_set_handle);
void lustre_msg_set_type(struct lustre_msg *msg, __u32 type)
{
@@ -1243,7 +1220,6 @@ void lustre_msg_set_type(struct lustre_msg *msg, __u32 type)
LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
}
}
-EXPORT_SYMBOL(lustre_msg_set_type);
void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
{
@@ -1259,7 +1235,6 @@ void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
}
}
-EXPORT_SYMBOL(lustre_msg_set_opc);
void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
{
@@ -1326,7 +1301,6 @@ void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt)
LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
}
}
-EXPORT_SYMBOL(lustre_msg_set_conn_cnt);
void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout)
{
@@ -1377,7 +1351,7 @@ void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
if (jobid)
- memcpy(pb->pb_jobid, jobid, JOBSTATS_JOBID_SIZE);
+ memcpy(pb->pb_jobid, jobid, LUSTRE_JOBID_SIZE);
else if (pb->pb_jobid[0] == '\0')
lustre_get_jobid(pb->pb_jobid);
return;
@@ -1491,7 +1465,6 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
*/
CLASSERT(offsetof(typeof(*b), pb_jobid) != 0);
}
-EXPORT_SYMBOL(lustre_swab_ptlrpc_body);
void lustre_swab_connect(struct obd_connect_data *ocd)
{
@@ -1591,7 +1564,6 @@ void lustre_swab_obd_statfs(struct obd_statfs *os)
CLASSERT(offsetof(typeof(*os), os_spare8) != 0);
CLASSERT(offsetof(typeof(*os), os_spare9) != 0);
}
-EXPORT_SYMBOL(lustre_swab_obd_statfs);
void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
{
@@ -1599,33 +1571,28 @@ void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
__swab32s(&ioo->ioo_max_brw);
__swab32s(&ioo->ioo_bufcnt);
}
-EXPORT_SYMBOL(lustre_swab_obd_ioobj);
void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
{
- __swab64s(&nbr->offset);
- __swab32s(&nbr->len);
- __swab32s(&nbr->flags);
+ __swab64s(&nbr->rnb_offset);
+ __swab32s(&nbr->rnb_len);
+ __swab32s(&nbr->rnb_flags);
}
-EXPORT_SYMBOL(lustre_swab_niobuf_remote);
void lustre_swab_ost_body(struct ost_body *b)
{
lustre_swab_obdo(&b->oa);
}
-EXPORT_SYMBOL(lustre_swab_ost_body);
void lustre_swab_ost_last_id(u64 *id)
{
__swab64s(id);
}
-EXPORT_SYMBOL(lustre_swab_ost_last_id);
void lustre_swab_generic_32s(__u32 *val)
{
__swab32s(val);
}
-EXPORT_SYMBOL(lustre_swab_generic_32s);
void lustre_swab_gl_desc(union ldlm_gl_desc *desc)
{
@@ -1674,37 +1641,36 @@ EXPORT_SYMBOL(lustre_swab_lquota_lvb);
void lustre_swab_mdt_body(struct mdt_body *b)
{
- lustre_swab_lu_fid(&b->fid1);
- lustre_swab_lu_fid(&b->fid2);
+ lustre_swab_lu_fid(&b->mbo_fid1);
+ lustre_swab_lu_fid(&b->mbo_fid2);
/* handle is opaque */
- __swab64s(&b->valid);
- __swab64s(&b->size);
- __swab64s(&b->mtime);
- __swab64s(&b->atime);
- __swab64s(&b->ctime);
- __swab64s(&b->blocks);
- __swab64s(&b->ioepoch);
- __swab64s(&b->t_state);
- __swab32s(&b->fsuid);
- __swab32s(&b->fsgid);
- __swab32s(&b->capability);
- __swab32s(&b->mode);
- __swab32s(&b->uid);
- __swab32s(&b->gid);
- __swab32s(&b->flags);
- __swab32s(&b->rdev);
- __swab32s(&b->nlink);
- CLASSERT(offsetof(typeof(*b), unused2) != 0);
- __swab32s(&b->suppgid);
- __swab32s(&b->eadatasize);
- __swab32s(&b->aclsize);
- __swab32s(&b->max_mdsize);
- __swab32s(&b->max_cookiesize);
- __swab32s(&b->uid_h);
- __swab32s(&b->gid_h);
- CLASSERT(offsetof(typeof(*b), padding_5) != 0);
-}
-EXPORT_SYMBOL(lustre_swab_mdt_body);
+ __swab64s(&b->mbo_valid);
+ __swab64s(&b->mbo_size);
+ __swab64s(&b->mbo_mtime);
+ __swab64s(&b->mbo_atime);
+ __swab64s(&b->mbo_ctime);
+ __swab64s(&b->mbo_blocks);
+ __swab64s(&b->mbo_ioepoch);
+ __swab64s(&b->mbo_t_state);
+ __swab32s(&b->mbo_fsuid);
+ __swab32s(&b->mbo_fsgid);
+ __swab32s(&b->mbo_capability);
+ __swab32s(&b->mbo_mode);
+ __swab32s(&b->mbo_uid);
+ __swab32s(&b->mbo_gid);
+ __swab32s(&b->mbo_flags);
+ __swab32s(&b->mbo_rdev);
+ __swab32s(&b->mbo_nlink);
+ CLASSERT(offsetof(typeof(*b), mbo_unused2) != 0);
+ __swab32s(&b->mbo_suppgid);
+ __swab32s(&b->mbo_eadatasize);
+ __swab32s(&b->mbo_aclsize);
+ __swab32s(&b->mbo_max_mdsize);
+ __swab32s(&b->mbo_max_cookiesize);
+ __swab32s(&b->mbo_uid_h);
+ __swab32s(&b->mbo_gid_h);
+ CLASSERT(offsetof(typeof(*b), mbo_padding_5) != 0);
+}
void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
{
@@ -1713,7 +1679,6 @@ void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
__swab32s(&b->flags);
CLASSERT(offsetof(typeof(*b), padding) != 0);
}
-EXPORT_SYMBOL(lustre_swab_mdt_ioepoch);
void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
{
@@ -1729,11 +1694,10 @@ void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
for (i = 0; i < MTI_NIDS_MAX; i++)
__swab64s(&mti->mti_nids[i]);
}
-EXPORT_SYMBOL(lustre_swab_mgs_target_info);
void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
{
- int i;
+ __u8 i;
__swab64s(&entry->mne_version);
__swab32s(&entry->mne_instance);
@@ -1760,14 +1724,12 @@ void lustre_swab_mgs_config_body(struct mgs_config_body *body)
__swab32s(&body->mcb_units);
__swab16s(&body->mcb_type);
}
-EXPORT_SYMBOL(lustre_swab_mgs_config_body);
void lustre_swab_mgs_config_res(struct mgs_config_res *body)
{
__swab64s(&body->mcr_offset);
__swab64s(&body->mcr_size);
}
-EXPORT_SYMBOL(lustre_swab_mgs_config_res);
static void lustre_swab_obd_dqinfo(struct obd_dqinfo *i)
{
@@ -1800,7 +1762,6 @@ void lustre_swab_obd_quotactl(struct obd_quotactl *q)
lustre_swab_obd_dqinfo(&q->qc_dqinfo);
lustre_swab_obd_dqblk(&q->qc_dqblk);
}
-EXPORT_SYMBOL(lustre_swab_obd_quotactl);
void lustre_swab_fid2path(struct getinfo_fid2path *gf)
{
@@ -1822,7 +1783,7 @@ static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
{
- int i;
+ __u32 i;
__swab64s(&fiemap->fm_start);
__swab64s(&fiemap->fm_length);
@@ -1834,7 +1795,6 @@ void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
for (i = 0; i < fiemap->fm_mapped_extents; i++)
lustre_swab_fiemap_extent(&fiemap->fm_extents[i]);
}
-EXPORT_SYMBOL(lustre_swab_fiemap);
void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
{
@@ -1863,7 +1823,6 @@ void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
CLASSERT(offsetof(typeof(*rr), rr_padding_4) != 0);
};
-EXPORT_SYMBOL(lustre_swab_mdt_rec_reint);
void lustre_swab_lov_desc(struct lov_desc *ld)
{
@@ -1878,18 +1837,42 @@ void lustre_swab_lov_desc(struct lov_desc *ld)
}
EXPORT_SYMBOL(lustre_swab_lov_desc);
-static void print_lum(struct lov_user_md *lum)
+/* This structure is always in little-endian */
+static void lustre_swab_lmv_mds_md_v1(struct lmv_mds_md_v1 *lmm1)
+{
+ int i;
+
+ __swab32s(&lmm1->lmv_magic);
+ __swab32s(&lmm1->lmv_stripe_count);
+ __swab32s(&lmm1->lmv_master_mdt_index);
+ __swab32s(&lmm1->lmv_hash_type);
+ __swab32s(&lmm1->lmv_layout_version);
+ for (i = 0; i < lmm1->lmv_stripe_count; i++)
+ lustre_swab_lu_fid(&lmm1->lmv_stripe_fids[i]);
+}
+
+void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm)
+{
+ switch (lmm->lmv_magic) {
+ case LMV_MAGIC_V1:
+ lustre_swab_lmv_mds_md_v1(&lmm->lmv_md_v1);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(lustre_swab_lmv_mds_md);
+
+void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
{
- CDEBUG(D_OTHER, "lov_user_md %p:\n", lum);
- CDEBUG(D_OTHER, "\tlmm_magic: %#x\n", lum->lmm_magic);
- CDEBUG(D_OTHER, "\tlmm_pattern: %#x\n", lum->lmm_pattern);
- CDEBUG(D_OTHER, "\tlmm_object_id: %llu\n", lmm_oi_id(&lum->lmm_oi));
- CDEBUG(D_OTHER, "\tlmm_object_gr: %llu\n", lmm_oi_seq(&lum->lmm_oi));
- CDEBUG(D_OTHER, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size);
- CDEBUG(D_OTHER, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count);
- CDEBUG(D_OTHER, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n",
- lum->lmm_stripe_offset);
+ __swab32s(&lum->lum_magic);
+ __swab32s(&lum->lum_stripe_count);
+ __swab32s(&lum->lum_stripe_offset);
+ __swab32s(&lum->lum_hash_type);
+ __swab32s(&lum->lum_type);
+ CLASSERT(offsetof(typeof(*lum), lum_padding1));
}
+EXPORT_SYMBOL(lustre_swab_lmv_user_md);
static void lustre_swab_lmm_oi(struct ost_id *oi)
{
@@ -1905,7 +1888,6 @@ static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
__swab32s(&lum->lmm_stripe_size);
__swab16s(&lum->lmm_stripe_count);
__swab16s(&lum->lmm_stripe_offset);
- print_lum(lum);
}
void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
@@ -1941,9 +1923,9 @@ void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
int i;
for (i = 0; i < stripe_count; i++) {
- lustre_swab_ost_id(&(lod[i].l_ost_oi));
- __swab32s(&(lod[i].l_ost_gen));
- __swab32s(&(lod[i].l_ost_idx));
+ lustre_swab_ost_id(&lod[i].l_ost_oi);
+ __swab32s(&lod[i].l_ost_gen);
+ __swab32s(&lod[i].l_ost_idx);
}
}
EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
@@ -1973,7 +1955,6 @@ void lustre_swab_ldlm_intent(struct ldlm_intent *i)
{
__swab64s(&i->opc);
}
-EXPORT_SYMBOL(lustre_swab_ldlm_intent);
static void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
{
@@ -1997,7 +1978,6 @@ void lustre_swab_ldlm_request(struct ldlm_request *rq)
__swab32s(&rq->lock_count);
/* lock_handle[] opaque */
}
-EXPORT_SYMBOL(lustre_swab_ldlm_request);
void lustre_swab_ldlm_reply(struct ldlm_reply *r)
{
@@ -2008,7 +1988,6 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r)
__swab64s(&r->lock_policy_res1);
__swab64s(&r->lock_policy_res2);
}
-EXPORT_SYMBOL(lustre_swab_ldlm_reply);
/* Dump functions */
void dump_ioo(struct obd_ioobj *ioo)
@@ -2018,14 +1997,12 @@ void dump_ioo(struct obd_ioobj *ioo)
POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
ioo->ioo_bufcnt);
}
-EXPORT_SYMBOL(dump_ioo);
void dump_rniobuf(struct niobuf_remote *nb)
{
CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n",
- nb->offset, nb->len, nb->flags);
+ nb->rnb_offset, nb->rnb_len, nb->rnb_flags);
}
-EXPORT_SYMBOL(dump_rniobuf);
static void dump_obdo(struct obdo *oa)
{
@@ -2093,13 +2070,11 @@ void dump_ost_body(struct ost_body *ob)
{
dump_obdo(&ob->oa);
}
-EXPORT_SYMBOL(dump_ost_body);
void dump_rcs(__u32 *rc)
{
CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc);
}
-EXPORT_SYMBOL(dump_rcs);
static inline int req_ptlrpc_body_swabbed(struct ptlrpc_request *req)
{
@@ -2184,14 +2159,12 @@ void lustre_swab_lustre_capa(struct lustre_capa *c)
__swab32s(&c->lc_timeout);
__swab32s(&c->lc_expiry);
}
-EXPORT_SYMBOL(lustre_swab_lustre_capa);
void lustre_swab_hsm_user_state(struct hsm_user_state *state)
{
__swab32s(&state->hus_states);
__swab32s(&state->hus_archive_id);
}
-EXPORT_SYMBOL(lustre_swab_hsm_user_state);
void lustre_swab_hsm_state_set(struct hsm_state_set *hss)
{
@@ -2214,14 +2187,12 @@ void lustre_swab_hsm_current_action(struct hsm_current_action *action)
__swab32s(&action->hca_action);
lustre_swab_hsm_extent(&action->hca_location);
}
-EXPORT_SYMBOL(lustre_swab_hsm_current_action);
void lustre_swab_hsm_user_item(struct hsm_user_item *hui)
{
lustre_swab_lu_fid(&hui->hui_fid);
lustre_swab_hsm_extent(&hui->hui_extent);
}
-EXPORT_SYMBOL(lustre_swab_hsm_user_item);
void lustre_swab_layout_intent(struct layout_intent *li)
{
@@ -2230,7 +2201,6 @@ void lustre_swab_layout_intent(struct layout_intent *li)
__swab64s(&li->li_start);
__swab64s(&li->li_end);
}
-EXPORT_SYMBOL(lustre_swab_layout_intent);
void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
{
@@ -2241,7 +2211,6 @@ void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
__swab16s(&hpk->hpk_flags);
__swab16s(&hpk->hpk_errval);
}
-EXPORT_SYMBOL(lustre_swab_hsm_progress_kernel);
void lustre_swab_hsm_request(struct hsm_request *hr)
{
@@ -2251,7 +2220,6 @@ void lustre_swab_hsm_request(struct hsm_request *hr)
__swab32s(&hr->hr_itemcount);
__swab32s(&hr->hr_data_len);
}
-EXPORT_SYMBOL(lustre_swab_hsm_request);
void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
{
@@ -2264,4 +2232,3 @@ void lustre_swab_close_data(struct close_data *cd)
lustre_swab_lu_fid(&cd->cd_fid);
__swab64s(&cd->cd_data_version);
}
-EXPORT_SYMBOL(lustre_swab_close_data);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
index 6c820e944171..5b9fb11c0b6b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c
@@ -64,9 +64,9 @@ void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
{
lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
- kiov->kiov_page = page;
- kiov->kiov_offset = pageoffset;
- kiov->kiov_len = len;
+ kiov->bv_page = page;
+ kiov->bv_offset = pageoffset;
+ kiov->bv_len = len;
desc->bd_iov_count++;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index c0529d808d81..5504fc2363ac 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -340,7 +340,6 @@ void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
{
ptlrpc_update_next_ping(imp, 0);
}
-EXPORT_SYMBOL(ptlrpc_pinger_sending_on_import);
void ptlrpc_pinger_commit_expected(struct obd_import *imp)
{
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index a9831fab80f3..f14d193287da 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -53,6 +53,8 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait);
int ptlrpcd_start(struct ptlrpcd_ctl *pc);
/* client.c */
+void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
+ unsigned int service_time);
struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
unsigned type, unsigned portal);
int ptlrpc_request_cache_init(void);
@@ -60,6 +62,11 @@ void ptlrpc_request_cache_fini(void);
struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags);
void ptlrpc_request_cache_free(struct ptlrpc_request *req);
void ptlrpc_init_xid(void);
+void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
+ struct ptlrpc_request *req);
+int ptlrpc_expired_set(void *data);
+int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
+void ptlrpc_resend_req(struct ptlrpc_request *request);
/* events.c */
int ptlrpc_init_portals(void);
@@ -268,7 +275,7 @@ void sptlrpc_conf_fini(void);
int sptlrpc_init(void);
void sptlrpc_fini(void);
-static inline int ll_rpc_recoverable_error(int rc)
+static inline bool ptlrpc_recoverable_error(int rc)
{
return (rc == -ENOTCONN || rc == -ENODEV);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 0a374b6c2f71..1f55d642aa75 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -412,7 +412,7 @@ static int ptlrpcd(void *arg)
* an argument, describing its "scope".
*/
rc = lu_context_init(&env.le_ctx,
- LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
+ LCT_CL_THREAD | LCT_REMEMBER | LCT_NOREF);
if (rc == 0) {
rc = lu_context_init(env.le_ses,
LCT_SESSION | LCT_REMEMBER | LCT_NOREF);
@@ -567,7 +567,7 @@ int ptlrpcd_start(struct ptlrpcd_ctl *pc)
* ptlrpcd thread (or a thread-set) has to be given an argument,
* describing its "scope".
*/
- rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
+ rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD | LCT_REMEMBER);
if (rc != 0)
goto out;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 718b3a8d61c6..405faf0dc9fc 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -201,7 +201,6 @@ int ptlrpc_resend(struct obd_import *imp)
return 0;
}
-EXPORT_SYMBOL(ptlrpc_resend);
/**
* Go through all requests in delayed list and wake their threads
@@ -221,7 +220,6 @@ void ptlrpc_wake_delayed(struct obd_import *imp)
}
spin_unlock(&imp->imp_lock);
}
-EXPORT_SYMBOL(ptlrpc_wake_delayed);
void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
{
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index dbd819fa6b75..a7416cd9ac71 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -311,6 +311,19 @@ static int import_sec_check_expire(struct obd_import *imp)
return sptlrpc_import_sec_adapt(imp, NULL, NULL);
}
+/**
+ * Get and validate the client side ptlrpc security facilities from
+ * \a imp. There is a race condition on client reconnect when the import is
+ * being destroyed while there are outstanding client bound requests. In
+ * this case do not output any error messages if import secuity is not
+ * found.
+ *
+ * \param[in] imp obd import associated with client
+ * \param[out] sec client side ptlrpc security
+ *
+ * \retval 0 if security retrieved successfully
+ * \retval -ve errno if there was a problem
+ */
static int import_sec_validate_get(struct obd_import *imp,
struct ptlrpc_sec **sec)
{
@@ -323,9 +336,11 @@ static int import_sec_validate_get(struct obd_import *imp,
}
*sec = sptlrpc_import_sec_ref(imp);
+ /* Only output an error when the import is still active */
if (!*sec) {
- CERROR("import %p (%s) with no sec\n",
- imp, ptlrpc_import_state_name(imp->imp_state));
+ if (list_empty(&imp->imp_zombie_chain))
+ CERROR("import %p (%s) with no sec\n",
+ imp, ptlrpc_import_state_name(imp->imp_state));
return -EACCES;
}
@@ -499,7 +514,7 @@ static int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
newctx, newctx->cc_flags);
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
} else {
/*
* it's possible newctx == oldctx if we're switching
@@ -718,8 +733,9 @@ again:
req->rq_restart = 0;
spin_unlock(&req->rq_lock);
- lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
- ctx_refresh_interrupt, req);
+ lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
+ ctx_refresh_timeout, ctx_refresh_interrupt,
+ req);
rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
/*
@@ -2204,7 +2220,7 @@ int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
task_lock(current);
if (pud->pud_ngroups > current_ngroups)
pud->pud_ngroups = current_ngroups;
- memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
+ memcpy(pud->pud_groups, current_cred()->group_info->gid,
pud->pud_ngroups * sizeof(__u32));
task_unlock(current);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 5f4d79718589..b2cc5ea6cb93 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -139,7 +139,7 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
"cache missing: %lu\n"
"low free mark: %lu\n"
"max waitqueue depth: %u\n"
- "max wait time: %ld/%u\n",
+ "max wait time: %ld/%lu\n",
totalram_pages,
PAGES_PER_POOL,
page_pools.epp_max_pages,
@@ -158,7 +158,7 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
page_pools.epp_st_lowfree,
page_pools.epp_st_max_wqlen,
page_pools.epp_st_max_wait,
- HZ);
+ msecs_to_jiffies(MSEC_PER_SEC));
spin_unlock(&page_pools.epp_lock);
@@ -326,12 +326,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
LASSERT(page_pools.epp_pools[p_idx]);
for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(desc->bd_enc_iov[i].kiov_page);
+ LASSERT(desc->bd_enc_iov[i].bv_page);
LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
LASSERT(!page_pools.epp_pools[p_idx][g_idx]);
page_pools.epp_pools[p_idx][g_idx] =
- desc->bd_enc_iov[i].kiov_page;
+ desc->bd_enc_iov[i].bv_page;
if (++g_idx == PAGES_PER_POOL) {
p_idx++;
@@ -348,7 +348,6 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
kfree(desc->bd_enc_iov);
desc->bd_enc_iov = NULL;
}
-EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
static inline void enc_pools_alloc(void)
{
@@ -432,12 +431,13 @@ void sptlrpc_enc_pool_fini(void)
if (page_pools.epp_st_access > 0) {
CDEBUG(D_SEC,
- "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%d\n",
+ "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld\n",
page_pools.epp_st_max_pages, page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
page_pools.epp_st_shrinks, page_pools.epp_st_access,
page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait, HZ);
+ page_pools.epp_st_max_wait,
+ msecs_to_jiffies(MSEC_PER_SEC));
}
}
@@ -456,13 +456,11 @@ const char *sptlrpc_get_hash_name(__u8 hash_alg)
{
return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]);
}
-EXPORT_SYMBOL(sptlrpc_get_hash_name);
__u8 sptlrpc_get_hash_alg(const char *algname)
{
return cfs_crypto_hash_alg(algname);
}
-EXPORT_SYMBOL(sptlrpc_get_hash_alg);
int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
{
@@ -522,9 +520,10 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
for (i = 0; i < desc->bd_iov_count; i++) {
- cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
- desc->bd_iov[i].kiov_offset & ~PAGE_MASK,
- desc->bd_iov[i].kiov_len);
+ cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].bv_page,
+ desc->bd_iov[i].bv_offset &
+ ~PAGE_MASK,
+ desc->bd_iov[i].bv_len);
}
if (hashsize > buflen) {
@@ -542,4 +541,3 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
return err;
}
-EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index c14035479c5f..2181a85efd49 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -58,7 +58,6 @@ enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd)
CERROR("unknown target %p(%s)\n", obd, type);
return LUSTRE_SP_ANY;
}
-EXPORT_SYMBOL(sptlrpc_target_sec_part);
/****************************************
* user supplied flavor string parsing *
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
index 9b9801ece582..8ffd000eafac 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
@@ -71,7 +71,6 @@ void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
}
-EXPORT_SYMBOL(sptlrpc_gc_add_sec);
void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
{
@@ -95,7 +94,6 @@ void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
}
-EXPORT_SYMBOL(sptlrpc_gc_del_sec);
static void sec_process_ctx_list(void)
{
@@ -182,7 +180,8 @@ again:
/* check ctx list again before sleep */
sec_process_ctx_list();
- lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
+ lwi = LWI_TIMEOUT(msecs_to_jiffies(SEC_GC_INTERVAL * MSEC_PER_SEC),
+ NULL, NULL);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopping(thread) ||
thread_is_signal(thread),
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 5c4590b0c521..cd305bcb334a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -154,13 +154,13 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
unsigned int off, i;
for (i = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].kiov_len == 0)
+ if (desc->bd_iov[i].bv_len == 0)
continue;
- ptr = kmap(desc->bd_iov[i].kiov_page);
- off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
+ ptr = kmap(desc->bd_iov[i].bv_page);
+ off = desc->bd_iov[i].bv_offset & ~PAGE_MASK;
ptr[off] ^= 0x1;
- kunmap(desc->bd_iov[i].kiov_page);
+ kunmap(desc->bd_iov[i].bv_page);
return;
}
}
@@ -249,9 +249,12 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
unsigned int hsize = 4;
cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
- lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
- lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
- NULL, 0, (unsigned char *)&cksum, &hsize);
+ lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF,
+ 0),
+ lustre_msg_buflen(msg,
+ PLAIN_PACK_MSG_OFF),
+ NULL, 0, (unsigned char *)&cksum,
+ &hsize);
if (cksum != msg->lm_cksum) {
CDEBUG(D_SEC,
"early reply checksum mismatch: %08x != %08x\n",
@@ -349,11 +352,11 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
/* fix the actual data size */
for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
- desc->bd_iov[i].kiov_len =
+ if (desc->bd_iov[i].bv_len + nob > desc->bd_nob_transferred) {
+ desc->bd_iov[i].bv_len =
desc->bd_nob_transferred - nob;
}
- nob += desc->bd_iov[i].kiov_len;
+ nob += desc->bd_iov[i].bv_len;
}
rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
@@ -869,9 +872,12 @@ int plain_authorize(struct ptlrpc_request *req)
unsigned int hsize = 4;
cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
- lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
- lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
- NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
+ lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF,
+ 0),
+ lustre_msg_buflen(msg,
+ PLAIN_PACK_MSG_OFF),
+ NULL, 0, (unsigned char *)&msg->lm_cksum,
+ &hsize);
req->rq_reply_off = 0;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 4788c4940c2a..72f39308eebb 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -1005,6 +1005,10 @@ ptlrpc_at_remove_timed(struct ptlrpc_request *req)
array->paa_count--;
}
+/*
+ * Attempt to extend the request deadline by sending an early reply to the
+ * client.
+ */
static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
@@ -1039,24 +1043,26 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
return -ENOSYS;
}
- /* Fake our processing time into the future to ask the clients
- * for some extra amount of time
+ /*
+ * We want to extend the request deadline by at_extra seconds,
+ * so we set our service estimate to reflect how much time has
+ * passed since this request arrived plus an additional
+ * at_extra seconds. The client will calculate the new deadline
+ * based on this service estimate (plus some additional time to
+ * account for network latency). See ptlrpc_at_recv_early_reply
*/
at_measured(&svcpt->scp_at_estimate, at_extra +
ktime_get_real_seconds() - req->rq_arrival_time.tv_sec);
+ newdl = req->rq_arrival_time.tv_sec + at_get(&svcpt->scp_at_estimate);
/* Check to see if we've actually increased the deadline -
* we may be past adaptive_max
*/
- if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
- at_get(&svcpt->scp_at_estimate)) {
+ if (req->rq_deadline >= newdl) {
DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n",
- olddl, req->rq_arrival_time.tv_sec +
- at_get(&svcpt->scp_at_estimate) -
- ktime_get_real_seconds());
+ olddl, newdl - ktime_get_real_seconds());
return -ETIMEDOUT;
}
- newdl = ktime_get_real_seconds() + at_get(&svcpt->scp_at_estimate);
reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
if (!reqcopy)
@@ -1982,11 +1988,12 @@ ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
cond_resched();
l_wait_event_exclusive_head(svcpt->scp_waitq,
- ptlrpc_thread_stopping(thread) ||
- ptlrpc_server_request_incoming(svcpt) ||
- ptlrpc_server_request_pending(svcpt, false) ||
- ptlrpc_rqbd_pending(svcpt) ||
- ptlrpc_at_check(svcpt), &lwi);
+ ptlrpc_thread_stopping(thread) ||
+ ptlrpc_server_request_incoming(svcpt) ||
+ ptlrpc_server_request_pending(svcpt,
+ false) ||
+ ptlrpc_rqbd_pending(svcpt) ||
+ ptlrpc_at_check(svcpt), &lwi);
if (ptlrpc_thread_stopping(thread))
return -EINTR;
@@ -2049,7 +2056,7 @@ static int ptlrpc_main(void *arg)
}
rc = lu_context_init(&env->le_ctx,
- svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF);
+ svc->srv_ctx_tags | LCT_REMEMBER | LCT_NOREF);
if (rc)
goto out_srv_fini;
@@ -2349,7 +2356,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
while (!list_empty(&zombie)) {
thread = list_entry(zombie.next,
- struct ptlrpc_thread, t_link);
+ struct ptlrpc_thread, t_link);
list_del(&thread->t_link);
kfree(thread);
}
@@ -2398,7 +2405,6 @@ int ptlrpc_start_threads(struct ptlrpc_service *svc)
ptlrpc_stop_all_threads(svc);
return rc;
}
-EXPORT_SYMBOL(ptlrpc_start_threads);
int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
{
@@ -2539,8 +2545,8 @@ int ptlrpc_hr_init(void)
LASSERT(hrp->hrp_nthrs > 0);
hrp->hrp_thrs =
kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS,
- cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
- i));
+ cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
+ i));
if (!hrp->hrp_thrs) {
rc = -ENOMEM;
goto out;
@@ -2593,7 +2599,8 @@ static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
NULL, NULL);
rc = l_wait_event(svcpt->scp_waitq,
- atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
+ atomic_read(&svcpt->scp_nreps_difficult) == 0,
+ &lwi);
if (rc == 0)
break;
CWARN("Unexpectedly long timeout %s %p\n",
@@ -2639,7 +2646,7 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
* event with its 'unlink' flag set for each posted rqbd
*/
list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
- rqbd_list) {
+ rqbd_list) {
rc = LNetMDUnlink(rqbd->rqbd_md_h);
LASSERT(rc == 0 || rc == -ENOENT);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 6cc2b2edf3fc..e5945e2ccc49 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -190,28 +190,30 @@ void lustre_assert_wire_constants(void)
(long long)REINT_SETXATTR);
LASSERTF(REINT_RMENTRY == 8, "found %lld\n",
(long long)REINT_RMENTRY);
- LASSERTF(REINT_MAX == 9, "found %lld\n",
+ LASSERTF(REINT_MIGRATE == 9, "found %lld\n",
+ (long long)REINT_MIGRATE);
+ LASSERTF(REINT_MAX == 10, "found %lld\n",
(long long)REINT_MAX);
LASSERTF(DISP_IT_EXECD == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)DISP_IT_EXECD);
+ (unsigned)DISP_IT_EXECD);
LASSERTF(DISP_LOOKUP_EXECD == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_EXECD);
+ (unsigned)DISP_LOOKUP_EXECD);
LASSERTF(DISP_LOOKUP_NEG == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_NEG);
+ (unsigned)DISP_LOOKUP_NEG);
LASSERTF(DISP_LOOKUP_POS == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)DISP_LOOKUP_POS);
+ (unsigned)DISP_LOOKUP_POS);
LASSERTF(DISP_OPEN_CREATE == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_CREATE);
+ (unsigned)DISP_OPEN_CREATE);
LASSERTF(DISP_OPEN_OPEN == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_OPEN);
+ (unsigned)DISP_OPEN_OPEN);
LASSERTF(DISP_ENQ_COMPLETE == 0x00400000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_COMPLETE);
+ (unsigned)DISP_ENQ_COMPLETE);
LASSERTF(DISP_ENQ_OPEN_REF == 0x00800000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_OPEN_REF);
+ (unsigned)DISP_ENQ_OPEN_REF);
LASSERTF(DISP_ENQ_CREATE_REF == 0x01000000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_ENQ_CREATE_REF);
+ (unsigned)DISP_ENQ_CREATE_REF);
LASSERTF(DISP_OPEN_LOCK == 0x02000000UL, "found 0x%.8xUL\n",
- (unsigned)DISP_OPEN_LOCK);
+ (unsigned)DISP_OPEN_LOCK);
LASSERTF(MDS_STATUS_CONN == 1, "found %lld\n",
(long long)MDS_STATUS_CONN);
LASSERTF(MDS_STATUS_LOV == 2, "found %lld\n",
@@ -219,55 +221,55 @@ void lustre_assert_wire_constants(void)
LASSERTF(LUSTRE_BFLAG_UNCOMMITTED_WRITES == 1, "found %lld\n",
(long long)LUSTRE_BFLAG_UNCOMMITTED_WRITES);
LASSERTF(MF_SOM_CHANGE == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MF_SOM_CHANGE);
+ (unsigned)MF_SOM_CHANGE);
LASSERTF(MF_EPOCH_OPEN == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MF_EPOCH_OPEN);
+ (unsigned)MF_EPOCH_OPEN);
LASSERTF(MF_EPOCH_CLOSE == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MF_EPOCH_CLOSE);
+ (unsigned)MF_EPOCH_CLOSE);
LASSERTF(MF_MDC_CANCEL_FID1 == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID1);
+ (unsigned)MF_MDC_CANCEL_FID1);
LASSERTF(MF_MDC_CANCEL_FID2 == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID2);
+ (unsigned)MF_MDC_CANCEL_FID2);
LASSERTF(MF_MDC_CANCEL_FID3 == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID3);
+ (unsigned)MF_MDC_CANCEL_FID3);
LASSERTF(MF_MDC_CANCEL_FID4 == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MF_MDC_CANCEL_FID4);
+ (unsigned)MF_MDC_CANCEL_FID4);
LASSERTF(MF_SOM_AU == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MF_SOM_AU);
+ (unsigned)MF_SOM_AU);
LASSERTF(MF_GETATTR_LOCK == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)MF_GETATTR_LOCK);
+ (unsigned)MF_GETATTR_LOCK);
LASSERTF(MDS_ATTR_MODE == 0x0000000000000001ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_MODE);
+ (long long)MDS_ATTR_MODE);
LASSERTF(MDS_ATTR_UID == 0x0000000000000002ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_UID);
+ (long long)MDS_ATTR_UID);
LASSERTF(MDS_ATTR_GID == 0x0000000000000004ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_GID);
+ (long long)MDS_ATTR_GID);
LASSERTF(MDS_ATTR_SIZE == 0x0000000000000008ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_SIZE);
+ (long long)MDS_ATTR_SIZE);
LASSERTF(MDS_ATTR_ATIME == 0x0000000000000010ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_ATIME);
+ (long long)MDS_ATTR_ATIME);
LASSERTF(MDS_ATTR_MTIME == 0x0000000000000020ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_MTIME);
+ (long long)MDS_ATTR_MTIME);
LASSERTF(MDS_ATTR_CTIME == 0x0000000000000040ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_CTIME);
+ (long long)MDS_ATTR_CTIME);
LASSERTF(MDS_ATTR_ATIME_SET == 0x0000000000000080ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_ATIME_SET);
+ (long long)MDS_ATTR_ATIME_SET);
LASSERTF(MDS_ATTR_MTIME_SET == 0x0000000000000100ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_MTIME_SET);
+ (long long)MDS_ATTR_MTIME_SET);
LASSERTF(MDS_ATTR_FORCE == 0x0000000000000200ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_FORCE);
+ (long long)MDS_ATTR_FORCE);
LASSERTF(MDS_ATTR_ATTR_FLAG == 0x0000000000000400ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_ATTR_FLAG);
+ (long long)MDS_ATTR_ATTR_FLAG);
LASSERTF(MDS_ATTR_KILL_SUID == 0x0000000000000800ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_KILL_SUID);
+ (long long)MDS_ATTR_KILL_SUID);
LASSERTF(MDS_ATTR_KILL_SGID == 0x0000000000001000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_KILL_SGID);
+ (long long)MDS_ATTR_KILL_SGID);
LASSERTF(MDS_ATTR_CTIME_SET == 0x0000000000002000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_CTIME_SET);
+ (long long)MDS_ATTR_CTIME_SET);
LASSERTF(MDS_ATTR_FROM_OPEN == 0x0000000000004000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_FROM_OPEN);
+ (long long)MDS_ATTR_FROM_OPEN);
LASSERTF(MDS_ATTR_BLOCKS == 0x0000000000008000ULL, "found 0x%.16llxULL\n",
- (long long)MDS_ATTR_BLOCKS);
+ (long long)MDS_ATTR_BLOCKS);
LASSERTF(FLD_QUERY == 900, "found %lld\n",
(long long)FLD_QUERY);
LASSERTF(FLD_FIRST_OPC == 900, "found %lld\n",
@@ -418,15 +420,15 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid) == 16, "found %lld\n",
(long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid));
LASSERTF(LMAI_RELEASED == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LMAI_RELEASED);
+ (unsigned)LMAI_RELEASED);
LASSERTF(LMAC_HSM == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_HSM);
+ (unsigned)LMAC_HSM);
LASSERTF(LMAC_SOM == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_SOM);
+ (unsigned)LMAC_SOM);
LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_NOT_IN_OI);
+ (unsigned)LMAC_NOT_IN_OI);
LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)LMAC_FID_ON_OST);
+ (unsigned)LMAC_FID_ON_OST);
/* Checks for struct ost_id */
LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n",
@@ -452,35 +454,35 @@ void lustre_assert_wire_constants(void)
LASSERTF(FID_SEQ_IGIF == 12, "found %lld\n",
(long long)FID_SEQ_IGIF);
LASSERTF(FID_SEQ_IGIF_MAX == 0x00000000ffffffffULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_IGIF_MAX);
+ (long long)FID_SEQ_IGIF_MAX);
LASSERTF(FID_SEQ_IDIF == 0x0000000100000000ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_IDIF);
+ (long long)FID_SEQ_IDIF);
LASSERTF(FID_SEQ_IDIF_MAX == 0x00000001ffffffffULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_IDIF_MAX);
+ (long long)FID_SEQ_IDIF_MAX);
LASSERTF(FID_SEQ_START == 0x0000000200000000ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_START);
+ (long long)FID_SEQ_START);
LASSERTF(FID_SEQ_LOCAL_FILE == 0x0000000200000001ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_LOCAL_FILE);
+ (long long)FID_SEQ_LOCAL_FILE);
LASSERTF(FID_SEQ_DOT_LUSTRE == 0x0000000200000002ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_DOT_LUSTRE);
+ (long long)FID_SEQ_DOT_LUSTRE);
LASSERTF(FID_SEQ_SPECIAL == 0x0000000200000004ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_SPECIAL);
+ (long long)FID_SEQ_SPECIAL);
LASSERTF(FID_SEQ_QUOTA == 0x0000000200000005ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_QUOTA);
+ (long long)FID_SEQ_QUOTA);
LASSERTF(FID_SEQ_QUOTA_GLB == 0x0000000200000006ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_QUOTA_GLB);
+ (long long)FID_SEQ_QUOTA_GLB);
LASSERTF(FID_SEQ_ROOT == 0x0000000200000007ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_ROOT);
+ (long long)FID_SEQ_ROOT);
LASSERTF(FID_SEQ_NORMAL == 0x0000000200000400ULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_NORMAL);
+ (long long)FID_SEQ_NORMAL);
LASSERTF(FID_SEQ_LOV_DEFAULT == 0xffffffffffffffffULL, "found 0x%.16llxULL\n",
- (long long)FID_SEQ_LOV_DEFAULT);
+ (long long)FID_SEQ_LOV_DEFAULT);
LASSERTF(FID_OID_SPECIAL_BFL == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_SPECIAL_BFL);
+ (unsigned)FID_OID_SPECIAL_BFL);
LASSERTF(FID_OID_DOT_LUSTRE == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_DOT_LUSTRE);
+ (unsigned)FID_OID_DOT_LUSTRE);
LASSERTF(FID_OID_DOT_LUSTRE_OBF == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)FID_OID_DOT_LUSTRE_OBF);
+ (unsigned)FID_OID_DOT_LUSTRE_OBF);
/* Checks for struct lu_dirent */
LASSERTF((int)sizeof(struct lu_dirent) == 32, "found %lld\n",
@@ -510,11 +512,11 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_name[0]) == 1, "found %lld\n",
(long long)(int)sizeof(((struct lu_dirent *)0)->lde_name[0]));
LASSERTF(LUDA_FID == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_FID);
+ (unsigned)LUDA_FID);
LASSERTF(LUDA_TYPE == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_TYPE);
+ (unsigned)LUDA_TYPE);
LASSERTF(LUDA_64BITHASH == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)LUDA_64BITHASH);
+ (unsigned)LUDA_64BITHASH);
/* Checks for struct luda_type */
LASSERTF((int)sizeof(struct luda_type) == 2, "found %lld\n",
@@ -602,9 +604,9 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_buflens[0]) == 4, "found %lld\n",
(long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_buflens[0]));
LASSERTF(LUSTRE_MSG_MAGIC_V2 == 0x0BD00BD3, "found 0x%.8x\n",
- LUSTRE_MSG_MAGIC_V2);
+ LUSTRE_MSG_MAGIC_V2);
LASSERTF(LUSTRE_MSG_MAGIC_V2_SWABBED == 0xD30BD00B, "found 0x%.8x\n",
- LUSTRE_MSG_MAGIC_V2_SWABBED);
+ LUSTRE_MSG_MAGIC_V2_SWABBED);
/* Checks for struct ptlrpc_body */
LASSERTF((int)sizeof(struct ptlrpc_body_v3) == 184, "found %lld\n",
@@ -682,7 +684,7 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == 32, "found %lld\n",
(long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding));
- CLASSERT(JOBSTATS_JOBID_SIZE == 32);
+ CLASSERT(LUSTRE_JOBID_SIZE == 32);
LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_jobid) == 152, "found %lld\n",
(long long)(int)offsetof(struct ptlrpc_body_v3, pb_jobid));
LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_jobid) == 32, "found %lld\n",
@@ -780,61 +782,61 @@ void lustre_assert_wire_constants(void)
LASSERTF(MSG_PTLRPC_HEADER_OFF == 31, "found %lld\n",
(long long)MSG_PTLRPC_HEADER_OFF);
LASSERTF(PTLRPC_MSG_VERSION == 0x00000003, "found 0x%.8x\n",
- PTLRPC_MSG_VERSION);
+ PTLRPC_MSG_VERSION);
LASSERTF(LUSTRE_VERSION_MASK == 0xffff0000, "found 0x%.8x\n",
- LUSTRE_VERSION_MASK);
+ LUSTRE_VERSION_MASK);
LASSERTF(LUSTRE_OBD_VERSION == 0x00010000, "found 0x%.8x\n",
- LUSTRE_OBD_VERSION);
+ LUSTRE_OBD_VERSION);
LASSERTF(LUSTRE_MDS_VERSION == 0x00020000, "found 0x%.8x\n",
- LUSTRE_MDS_VERSION);
+ LUSTRE_MDS_VERSION);
LASSERTF(LUSTRE_OST_VERSION == 0x00030000, "found 0x%.8x\n",
- LUSTRE_OST_VERSION);
+ LUSTRE_OST_VERSION);
LASSERTF(LUSTRE_DLM_VERSION == 0x00040000, "found 0x%.8x\n",
- LUSTRE_DLM_VERSION);
+ LUSTRE_DLM_VERSION);
LASSERTF(LUSTRE_LOG_VERSION == 0x00050000, "found 0x%.8x\n",
- LUSTRE_LOG_VERSION);
+ LUSTRE_LOG_VERSION);
LASSERTF(LUSTRE_MGS_VERSION == 0x00060000, "found 0x%.8x\n",
- LUSTRE_MGS_VERSION);
+ LUSTRE_MGS_VERSION);
LASSERTF(MSGHDR_AT_SUPPORT == 1, "found %lld\n",
(long long)MSGHDR_AT_SUPPORT);
LASSERTF(MSGHDR_CKSUM_INCOMPAT18 == 2, "found %lld\n",
(long long)MSGHDR_CKSUM_INCOMPAT18);
LASSERTF(MSG_OP_FLAG_MASK == 0xffff0000UL, "found 0x%.8xUL\n",
- (unsigned)MSG_OP_FLAG_MASK);
+ (unsigned)MSG_OP_FLAG_MASK);
LASSERTF(MSG_OP_FLAG_SHIFT == 16, "found %lld\n",
(long long)MSG_OP_FLAG_SHIFT);
LASSERTF(MSG_GEN_FLAG_MASK == 0x0000ffffUL, "found 0x%.8xUL\n",
- (unsigned)MSG_GEN_FLAG_MASK);
+ (unsigned)MSG_GEN_FLAG_MASK);
LASSERTF(MSG_LAST_REPLAY == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MSG_LAST_REPLAY);
+ (unsigned)MSG_LAST_REPLAY);
LASSERTF(MSG_RESENT == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MSG_RESENT);
+ (unsigned)MSG_RESENT);
LASSERTF(MSG_REPLAY == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MSG_REPLAY);
+ (unsigned)MSG_REPLAY);
LASSERTF(MSG_DELAY_REPLAY == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MSG_DELAY_REPLAY);
+ (unsigned)MSG_DELAY_REPLAY);
LASSERTF(MSG_VERSION_REPLAY == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MSG_VERSION_REPLAY);
+ (unsigned)MSG_VERSION_REPLAY);
LASSERTF(MSG_REQ_REPLAY_DONE == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MSG_REQ_REPLAY_DONE);
+ (unsigned)MSG_REQ_REPLAY_DONE);
LASSERTF(MSG_LOCK_REPLAY_DONE == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MSG_LOCK_REPLAY_DONE);
+ (unsigned)MSG_LOCK_REPLAY_DONE);
LASSERTF(MSG_CONNECT_RECOVERING == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_RECOVERING);
+ (unsigned)MSG_CONNECT_RECOVERING);
LASSERTF(MSG_CONNECT_RECONNECT == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_RECONNECT);
+ (unsigned)MSG_CONNECT_RECONNECT);
LASSERTF(MSG_CONNECT_REPLAYABLE == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_REPLAYABLE);
+ (unsigned)MSG_CONNECT_REPLAYABLE);
LASSERTF(MSG_CONNECT_LIBCLIENT == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_LIBCLIENT);
+ (unsigned)MSG_CONNECT_LIBCLIENT);
LASSERTF(MSG_CONNECT_INITIAL == 0x00000020UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_INITIAL);
+ (unsigned)MSG_CONNECT_INITIAL);
LASSERTF(MSG_CONNECT_ASYNC == 0x00000040UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_ASYNC);
+ (unsigned)MSG_CONNECT_ASYNC);
LASSERTF(MSG_CONNECT_NEXT_VER == 0x00000080UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_NEXT_VER);
+ (unsigned)MSG_CONNECT_NEXT_VER);
LASSERTF(MSG_CONNECT_TRANSNO == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)MSG_CONNECT_TRANSNO);
+ (unsigned)MSG_CONNECT_TRANSNO);
/* Checks for struct obd_connect_data */
LASSERTF((int)sizeof(struct obd_connect_data) == 192, "found %lld\n",
@@ -1069,12 +1071,18 @@ void lustre_assert_wire_constants(void)
"found 0x%.16llxULL\n", OBD_CONNECT_FLOCK_DEAD);
LASSERTF(OBD_CONNECT_OPEN_BY_FID == 0x20000000000000ULL,
"found 0x%.16llxULL\n", OBD_CONNECT_OPEN_BY_FID);
+ LASSERTF(OBD_CONNECT_LFSCK == 0x40000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_LFSCK);
+ LASSERTF(OBD_CONNECT_UNLINK_CLOSE == 0x100000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_UNLINK_CLOSE);
+ LASSERTF(OBD_CONNECT_DIR_STRIPE == 0x400000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_DIR_STRIPE);
LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_CRC32);
+ (unsigned)OBD_CKSUM_CRC32);
LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_ADLER);
+ (unsigned)OBD_CKSUM_ADLER);
LASSERTF(OBD_CKSUM_CRC32C == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)OBD_CKSUM_CRC32C);
+ (unsigned)OBD_CKSUM_CRC32C);
/* Checks for struct obdo */
LASSERTF((int)sizeof(struct obdo) == 208, "found %lld\n",
@@ -1346,7 +1354,7 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct lov_mds_md_v1, lmm_objects[0]));
LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_objects[0]) == 24, "found %lld\n",
(long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_objects[0]));
- CLASSERT(LOV_MAGIC_V1 == 0x0BD10BD0);
+ CLASSERT(LOV_MAGIC_V1 == (0x0BD10000 | 0x0BD0));
/* Checks for struct lov_mds_md_v3 */
LASSERTF((int)sizeof(struct lov_mds_md_v3) == 48, "found %lld\n",
@@ -1375,7 +1383,7 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct lov_mds_md_v3, lmm_layout_gen));
LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_layout_gen) == 2, "found %lld\n",
(long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_layout_gen));
- CLASSERT(LOV_MAXPOOLNAME == 16);
+ CLASSERT(LOV_MAXPOOLNAME == 15);
LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_pool_name[16]) == 48, "found %lld\n",
(long long)(int)offsetof(struct lov_mds_md_v3, lmm_pool_name[16]));
LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pool_name[16]) == 1, "found %lld\n",
@@ -1384,15 +1392,64 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct lov_mds_md_v3, lmm_objects[0]));
LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]) == 24, "found %lld\n",
(long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]));
- CLASSERT(LOV_MAGIC_V3 == 0x0BD30BD0);
+ CLASSERT(LOV_MAGIC_V3 == (0x0BD30000 | 0x0BD0));
LASSERTF(LOV_PATTERN_RAID0 == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_RAID0);
+ (unsigned)LOV_PATTERN_RAID0);
LASSERTF(LOV_PATTERN_RAID1 == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_RAID1);
+ (unsigned)LOV_PATTERN_RAID1);
LASSERTF(LOV_PATTERN_FIRST == 0x00000100UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_FIRST);
+ (unsigned)LOV_PATTERN_FIRST);
LASSERTF(LOV_PATTERN_CMOBD == 0x00000200UL, "found 0x%.8xUL\n",
- (unsigned)LOV_PATTERN_CMOBD);
+ (unsigned)LOV_PATTERN_CMOBD);
+
+ /* Checks for struct lmv_mds_md_v1 */
+ LASSERTF((int)sizeof(struct lmv_mds_md_v1) == 56, "found %lld\n",
+ (long long)(int)sizeof(struct lmv_mds_md_v1));
+ LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_magic) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_magic));
+ LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_magic) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_magic));
+ LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_stripe_count) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_stripe_count));
+ LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_count));
+ LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_master_mdt_index) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_master_mdt_index));
+ LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_master_mdt_index) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_master_mdt_index));
+ LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_hash_type) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_hash_type));
+ LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_hash_type) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_hash_type));
+ LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_layout_version) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_layout_version));
+ LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_layout_version) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_layout_version));
+ LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_padding1) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_padding1));
+ LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding1));
+ LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_padding2) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_padding2));
+ LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding2));
+ LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_padding3) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_padding3));
+ LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding3) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_padding3));
+ LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_pool_name[16]) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_pool_name[16]));
+ LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_pool_name[16]) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_pool_name[16]));
+ LASSERTF((int)offsetof(struct lmv_mds_md_v1, lmv_stripe_fids[0]) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_mds_md_v1, lmv_stripe_fids[0]));
+ LASSERTF((int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_fids[0]) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_mds_md_v1 *)0)->lmv_stripe_fids[0]));
+ CLASSERT(LMV_MAGIC_V1 == 0x0CD20CD0);
+ CLASSERT(LMV_MAGIC_STRIPE == 0x0CD40CD0);
+ CLASSERT(LMV_HASH_TYPE_MASK == 0x0000ffff);
+ CLASSERT(LMV_HASH_FLAG_MIGRATION == 0x80000000);
+ CLASSERT(LMV_HASH_FLAG_DEAD == 0x40000000);
/* Checks for struct obd_statfs */
LASSERTF((int)sizeof(struct obd_statfs) == 144, "found %lld\n",
@@ -1582,53 +1639,53 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_padding) == 4, "found %lld\n",
(long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_padding));
LASSERTF(Q_QUOTACHECK == 0x800100, "found 0x%.8x\n",
- Q_QUOTACHECK);
+ Q_QUOTACHECK);
LASSERTF(Q_INITQUOTA == 0x800101, "found 0x%.8x\n",
- Q_INITQUOTA);
+ Q_INITQUOTA);
LASSERTF(Q_GETOINFO == 0x800102, "found 0x%.8x\n",
- Q_GETOINFO);
+ Q_GETOINFO);
LASSERTF(Q_GETOQUOTA == 0x800103, "found 0x%.8x\n",
- Q_GETOQUOTA);
+ Q_GETOQUOTA);
LASSERTF(Q_FINVALIDATE == 0x800104, "found 0x%.8x\n",
- Q_FINVALIDATE);
+ Q_FINVALIDATE);
/* Checks for struct niobuf_remote */
LASSERTF((int)sizeof(struct niobuf_remote) == 16, "found %lld\n",
(long long)(int)sizeof(struct niobuf_remote));
- LASSERTF((int)offsetof(struct niobuf_remote, offset) == 0, "found %lld\n",
- (long long)(int)offsetof(struct niobuf_remote, offset));
- LASSERTF((int)sizeof(((struct niobuf_remote *)0)->offset) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct niobuf_remote *)0)->offset));
- LASSERTF((int)offsetof(struct niobuf_remote, len) == 8, "found %lld\n",
- (long long)(int)offsetof(struct niobuf_remote, len));
- LASSERTF((int)sizeof(((struct niobuf_remote *)0)->len) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct niobuf_remote *)0)->len));
- LASSERTF((int)offsetof(struct niobuf_remote, flags) == 12, "found %lld\n",
- (long long)(int)offsetof(struct niobuf_remote, flags));
- LASSERTF((int)sizeof(((struct niobuf_remote *)0)->flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct niobuf_remote *)0)->flags));
+ LASSERTF((int)offsetof(struct niobuf_remote, rnb_offset) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct niobuf_remote, rnb_offset));
+ LASSERTF((int)sizeof(((struct niobuf_remote *)0)->rnb_offset) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct niobuf_remote *)0)->rnb_offset));
+ LASSERTF((int)offsetof(struct niobuf_remote, rnb_len) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct niobuf_remote, rnb_len));
+ LASSERTF((int)sizeof(((struct niobuf_remote *)0)->rnb_len) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct niobuf_remote *)0)->rnb_len));
+ LASSERTF((int)offsetof(struct niobuf_remote, rnb_flags) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct niobuf_remote, rnb_flags));
+ LASSERTF((int)sizeof(((struct niobuf_remote *)0)->rnb_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct niobuf_remote *)0)->rnb_flags));
LASSERTF(OBD_BRW_READ == 0x01, "found 0x%.8x\n",
- OBD_BRW_READ);
+ OBD_BRW_READ);
LASSERTF(OBD_BRW_WRITE == 0x02, "found 0x%.8x\n",
- OBD_BRW_WRITE);
+ OBD_BRW_WRITE);
LASSERTF(OBD_BRW_SYNC == 0x08, "found 0x%.8x\n",
- OBD_BRW_SYNC);
+ OBD_BRW_SYNC);
LASSERTF(OBD_BRW_CHECK == 0x10, "found 0x%.8x\n",
- OBD_BRW_CHECK);
+ OBD_BRW_CHECK);
LASSERTF(OBD_BRW_FROM_GRANT == 0x20, "found 0x%.8x\n",
- OBD_BRW_FROM_GRANT);
+ OBD_BRW_FROM_GRANT);
LASSERTF(OBD_BRW_GRANTED == 0x40, "found 0x%.8x\n",
- OBD_BRW_GRANTED);
+ OBD_BRW_GRANTED);
LASSERTF(OBD_BRW_NOCACHE == 0x80, "found 0x%.8x\n",
- OBD_BRW_NOCACHE);
+ OBD_BRW_NOCACHE);
LASSERTF(OBD_BRW_NOQUOTA == 0x100, "found 0x%.8x\n",
- OBD_BRW_NOQUOTA);
+ OBD_BRW_NOQUOTA);
LASSERTF(OBD_BRW_SRVLOCK == 0x200, "found 0x%.8x\n",
- OBD_BRW_SRVLOCK);
+ OBD_BRW_SRVLOCK);
LASSERTF(OBD_BRW_ASYNC == 0x400, "found 0x%.8x\n",
- OBD_BRW_ASYNC);
+ OBD_BRW_ASYNC);
LASSERTF(OBD_BRW_MEMALLOC == 0x800, "found 0x%.8x\n",
- OBD_BRW_MEMALLOC);
+ OBD_BRW_MEMALLOC);
LASSERTF(OBD_BRW_OVER_USRQUOTA == 0x1000, "found 0x%.8x\n",
OBD_BRW_OVER_USRQUOTA);
LASSERTF(OBD_BRW_OVER_GRPQUOTA == 0x2000, "found 0x%.8x\n",
@@ -1663,203 +1720,203 @@ void lustre_assert_wire_constants(void)
/* Checks for struct mdt_body */
LASSERTF((int)sizeof(struct mdt_body) == 216, "found %lld\n",
(long long)(int)sizeof(struct mdt_body));
- LASSERTF((int)offsetof(struct mdt_body, fid1) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, fid1));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->fid1) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->fid1));
- LASSERTF((int)offsetof(struct mdt_body, fid2) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, fid2));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->fid2) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->fid2));
- LASSERTF((int)offsetof(struct mdt_body, handle) == 32, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, handle));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->handle) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->handle));
- LASSERTF((int)offsetof(struct mdt_body, valid) == 40, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, valid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->valid) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->valid));
- LASSERTF((int)offsetof(struct mdt_body, size) == 48, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, size));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->size) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->size));
- LASSERTF((int)offsetof(struct mdt_body, mtime) == 56, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mtime));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mtime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mtime));
- LASSERTF((int)offsetof(struct mdt_body, atime) == 64, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, atime));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->atime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->atime));
- LASSERTF((int)offsetof(struct mdt_body, ctime) == 72, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, ctime));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->ctime) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->ctime));
- LASSERTF((int)offsetof(struct mdt_body, blocks) == 80, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, blocks));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->blocks) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->blocks));
- LASSERTF((int)offsetof(struct mdt_body, t_state) == 96, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, t_state));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->t_state) == 8,
+ LASSERTF((int)offsetof(struct mdt_body, mbo_fid1) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_fid1));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fid1) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fid1));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_fid2) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_fid2));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fid2) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fid2));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_handle) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_handle));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_handle) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_handle));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_valid) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_valid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_valid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_valid));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_size) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_size));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_size) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_size));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_mtime) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_mtime));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_mtime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_mtime));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_atime) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_atime));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_atime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_atime));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_ctime) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_ctime));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_ctime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_ctime));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_blocks) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_blocks));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_blocks) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_blocks));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_t_state) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_t_state));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_t_state) == 8,
"found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->t_state));
- LASSERTF((int)offsetof(struct mdt_body, fsuid) == 104, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, fsuid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->fsuid));
- LASSERTF((int)offsetof(struct mdt_body, fsgid) == 108, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, fsgid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->fsgid));
- LASSERTF((int)offsetof(struct mdt_body, capability) == 112, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, capability));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->capability) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->capability));
- LASSERTF((int)offsetof(struct mdt_body, mode) == 116, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, mode));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->mode) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->mode));
- LASSERTF((int)offsetof(struct mdt_body, uid) == 120, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, uid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->uid));
- LASSERTF((int)offsetof(struct mdt_body, gid) == 124, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, gid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->gid));
- LASSERTF((int)offsetof(struct mdt_body, flags) == 128, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, flags));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->flags));
- LASSERTF((int)offsetof(struct mdt_body, rdev) == 132, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, rdev));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->rdev) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->rdev));
- LASSERTF((int)offsetof(struct mdt_body, nlink) == 136, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, nlink));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->nlink) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->nlink));
- LASSERTF((int)offsetof(struct mdt_body, unused2) == 140, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, unused2));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->unused2) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->unused2));
- LASSERTF((int)offsetof(struct mdt_body, suppgid) == 144, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, suppgid));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->suppgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->suppgid));
- LASSERTF((int)offsetof(struct mdt_body, eadatasize) == 148, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, eadatasize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->eadatasize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->eadatasize));
- LASSERTF((int)offsetof(struct mdt_body, aclsize) == 152, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, aclsize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->aclsize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->aclsize));
- LASSERTF((int)offsetof(struct mdt_body, max_mdsize) == 156, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, max_mdsize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->max_mdsize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->max_mdsize));
- LASSERTF((int)offsetof(struct mdt_body, max_cookiesize) == 160, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, max_cookiesize));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->max_cookiesize) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->max_cookiesize));
- LASSERTF((int)offsetof(struct mdt_body, uid_h) == 164, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, uid_h));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->uid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->uid_h));
- LASSERTF((int)offsetof(struct mdt_body, gid_h) == 168, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, gid_h));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->gid_h) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->gid_h));
- LASSERTF((int)offsetof(struct mdt_body, padding_5) == 172, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_5));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_5) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_5));
- LASSERTF((int)offsetof(struct mdt_body, padding_6) == 176, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_6));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_6) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_6));
- LASSERTF((int)offsetof(struct mdt_body, padding_7) == 184, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_7));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_7) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_7));
- LASSERTF((int)offsetof(struct mdt_body, padding_8) == 192, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_8));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_8) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_8));
- LASSERTF((int)offsetof(struct mdt_body, padding_9) == 200, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_9));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_9) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_9));
- LASSERTF((int)offsetof(struct mdt_body, padding_10) == 208, "found %lld\n",
- (long long)(int)offsetof(struct mdt_body, padding_10));
- LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_10) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_body *)0)->padding_10));
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_t_state));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_fsuid) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_fsuid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fsuid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fsuid));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_fsgid) == 108, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_fsgid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_fsgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_fsgid));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_capability) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_capability));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_capability) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_capability));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_mode) == 116, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_mode));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_mode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_mode));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_uid) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_uid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_uid));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_gid) == 124, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_gid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_gid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_gid));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_flags) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_flags));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_flags));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_rdev) == 132, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_rdev));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_rdev) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_rdev));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_nlink) == 136, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_nlink));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_nlink) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_nlink));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_unused2) == 140, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_unused2));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_unused2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_unused2));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_suppgid) == 144, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_suppgid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_suppgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_suppgid));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_eadatasize) == 148, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_eadatasize));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_eadatasize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_eadatasize));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_aclsize) == 152, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_aclsize));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_aclsize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_aclsize));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_max_mdsize) == 156, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_max_mdsize));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_max_cookiesize) == 160, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_max_cookiesize));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_uid_h) == 164, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_uid_h));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_uid_h));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_gid_h) == 168, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_gid_h));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_gid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_gid_h));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_padding_5) == 172, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_padding_5));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_5) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_5));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_padding_6) == 176, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_padding_6));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_6) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_6));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_padding_7) == 184, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_padding_7));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_7) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_7));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_padding_8) == 192, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_padding_8));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_8) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_8));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_padding_9) == 200, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_padding_9));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_9) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_9));
+ LASSERTF((int)offsetof(struct mdt_body, mbo_padding_10) == 208, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mbo_padding_10));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_padding_10) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mbo_padding_10));
LASSERTF(MDS_FMODE_CLOSED == 000000000000UL, "found 0%.11oUL\n",
- MDS_FMODE_CLOSED);
+ MDS_FMODE_CLOSED);
LASSERTF(MDS_FMODE_EXEC == 000000000004UL, "found 0%.11oUL\n",
- MDS_FMODE_EXEC);
+ MDS_FMODE_EXEC);
LASSERTF(MDS_FMODE_EPOCH == 000001000000UL, "found 0%.11oUL\n",
- MDS_FMODE_EPOCH);
+ MDS_FMODE_EPOCH);
LASSERTF(MDS_FMODE_TRUNC == 000002000000UL, "found 0%.11oUL\n",
- MDS_FMODE_TRUNC);
+ MDS_FMODE_TRUNC);
LASSERTF(MDS_FMODE_SOM == 000004000000UL, "found 0%.11oUL\n",
- MDS_FMODE_SOM);
+ MDS_FMODE_SOM);
LASSERTF(MDS_OPEN_CREATED == 000000000010UL, "found 0%.11oUL\n",
- MDS_OPEN_CREATED);
+ MDS_OPEN_CREATED);
LASSERTF(MDS_OPEN_CROSS == 000000000020UL, "found 0%.11oUL\n",
- MDS_OPEN_CROSS);
+ MDS_OPEN_CROSS);
LASSERTF(MDS_OPEN_CREAT == 000000000100UL, "found 0%.11oUL\n",
- MDS_OPEN_CREAT);
+ MDS_OPEN_CREAT);
LASSERTF(MDS_OPEN_EXCL == 000000000200UL, "found 0%.11oUL\n",
- MDS_OPEN_EXCL);
+ MDS_OPEN_EXCL);
LASSERTF(MDS_OPEN_TRUNC == 000000001000UL, "found 0%.11oUL\n",
- MDS_OPEN_TRUNC);
+ MDS_OPEN_TRUNC);
LASSERTF(MDS_OPEN_APPEND == 000000002000UL, "found 0%.11oUL\n",
- MDS_OPEN_APPEND);
+ MDS_OPEN_APPEND);
LASSERTF(MDS_OPEN_SYNC == 000000010000UL, "found 0%.11oUL\n",
- MDS_OPEN_SYNC);
+ MDS_OPEN_SYNC);
LASSERTF(MDS_OPEN_DIRECTORY == 000000200000UL, "found 0%.11oUL\n",
- MDS_OPEN_DIRECTORY);
+ MDS_OPEN_DIRECTORY);
LASSERTF(MDS_OPEN_BY_FID == 000040000000UL, "found 0%.11oUL\n",
- MDS_OPEN_BY_FID);
+ MDS_OPEN_BY_FID);
LASSERTF(MDS_OPEN_DELAY_CREATE == 000100000000UL, "found 0%.11oUL\n",
- MDS_OPEN_DELAY_CREATE);
+ MDS_OPEN_DELAY_CREATE);
LASSERTF(MDS_OPEN_OWNEROVERRIDE == 000200000000UL, "found 0%.11oUL\n",
- MDS_OPEN_OWNEROVERRIDE);
+ MDS_OPEN_OWNEROVERRIDE);
LASSERTF(MDS_OPEN_JOIN_FILE == 000400000000UL, "found 0%.11oUL\n",
- MDS_OPEN_JOIN_FILE);
+ MDS_OPEN_JOIN_FILE);
LASSERTF(MDS_OPEN_LOCK == 004000000000UL, "found 0%.11oUL\n",
- MDS_OPEN_LOCK);
+ MDS_OPEN_LOCK);
LASSERTF(MDS_OPEN_HAS_EA == 010000000000UL, "found 0%.11oUL\n",
- MDS_OPEN_HAS_EA);
+ MDS_OPEN_HAS_EA);
LASSERTF(MDS_OPEN_HAS_OBJS == 020000000000UL, "found 0%.11oUL\n",
- MDS_OPEN_HAS_OBJS);
+ MDS_OPEN_HAS_OBJS);
LASSERTF(MDS_OPEN_NORESTORE == 00000000000100000000000ULL, "found 0%.22lloULL\n",
- (long long)MDS_OPEN_NORESTORE);
+ (long long)MDS_OPEN_NORESTORE);
LASSERTF(MDS_OPEN_NEWSTRIPE == 00000000000200000000000ULL, "found 0%.22lloULL\n",
- (long long)MDS_OPEN_NEWSTRIPE);
+ (long long)MDS_OPEN_NEWSTRIPE);
LASSERTF(MDS_OPEN_VOLATILE == 00000000000400000000000ULL, "found 0%.22lloULL\n",
- (long long)MDS_OPEN_VOLATILE);
+ (long long)MDS_OPEN_VOLATILE);
LASSERTF(LUSTRE_SYNC_FL == 0x00000008, "found 0x%.8x\n",
- LUSTRE_SYNC_FL);
+ LUSTRE_SYNC_FL);
LASSERTF(LUSTRE_IMMUTABLE_FL == 0x00000010, "found 0x%.8x\n",
- LUSTRE_IMMUTABLE_FL);
+ LUSTRE_IMMUTABLE_FL);
LASSERTF(LUSTRE_APPEND_FL == 0x00000020, "found 0x%.8x\n",
- LUSTRE_APPEND_FL);
+ LUSTRE_APPEND_FL);
LASSERTF(LUSTRE_NOATIME_FL == 0x00000080, "found 0x%.8x\n",
- LUSTRE_NOATIME_FL);
+ LUSTRE_NOATIME_FL);
LASSERTF(LUSTRE_DIRSYNC_FL == 0x00010000, "found 0x%.8x\n",
- LUSTRE_DIRSYNC_FL);
+ LUSTRE_DIRSYNC_FL);
LASSERTF(MDS_INODELOCK_LOOKUP == 0x000001, "found 0x%.8x\n",
- MDS_INODELOCK_LOOKUP);
+ MDS_INODELOCK_LOOKUP);
LASSERTF(MDS_INODELOCK_UPDATE == 0x000002, "found 0x%.8x\n",
- MDS_INODELOCK_UPDATE);
+ MDS_INODELOCK_UPDATE);
LASSERTF(MDS_INODELOCK_OPEN == 0x000004, "found 0x%.8x\n",
- MDS_INODELOCK_OPEN);
+ MDS_INODELOCK_OPEN);
LASSERTF(MDS_INODELOCK_LAYOUT == 0x000008, "found 0x%.8x\n",
- MDS_INODELOCK_LAYOUT);
+ MDS_INODELOCK_LAYOUT);
/* Checks for struct mdt_ioepoch */
LASSERTF((int)sizeof(struct mdt_ioepoch) == 24, "found %lld\n",
@@ -2617,35 +2674,6 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_uuid) == 40, "found %lld\n",
(long long)(int)sizeof(((struct lmv_desc *)0)->ld_uuid));
- /* Checks for struct lmv_stripe_md */
- LASSERTF((int)sizeof(struct lmv_stripe_md) == 32, "found %lld\n",
- (long long)(int)sizeof(struct lmv_stripe_md));
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_magic));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_magic));
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_count));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_count));
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_master) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_master));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_master) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_master));
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_padding) == 12, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_padding));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_padding));
- CLASSERT(LOV_MAXPOOLNAME == 16);
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_pool_name[16]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_pool_name[16]));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_pool_name[16]) == 1, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_pool_name[16]));
- LASSERTF((int)offsetof(struct lmv_stripe_md, mea_ids[0]) == 32, "found %lld\n",
- (long long)(int)offsetof(struct lmv_stripe_md, mea_ids[0]));
- LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_ids[0]) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_ids[0]));
-
/* Checks for struct lov_desc */
LASSERTF((int)sizeof(struct lov_desc) == 88, "found %lld\n",
(long long)(int)sizeof(struct lov_desc));
@@ -3195,10 +3223,10 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct llog_setattr64_rec, lsr_gid_h));
LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid_h) == 4, "found %lld\n",
(long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid_h));
- LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_padding) == 48, "found %lld\n",
- (long long)(int)offsetof(struct llog_setattr64_rec, lsr_padding));
- LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_padding) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_padding));
+ LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_valid) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct llog_setattr64_rec, lsr_valid));
+ LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_valid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_valid));
LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_tail) == 56, "found %lld\n",
(long long)(int)offsetof(struct llog_setattr64_rec, lsr_tail));
LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_tail) == 8, "found %lld\n",
@@ -3272,50 +3300,6 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_pfid) == 16, "found %lld\n",
(long long)(int)sizeof(((struct changelog_rec *)0)->cr_pfid));
- /* Checks for struct changelog_ext_rec */
- LASSERTF((int)sizeof(struct changelog_ext_rec) == 96, "found %lld\n",
- (long long)(int)sizeof(struct changelog_ext_rec));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_namelen) == 0, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_namelen));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_namelen) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_namelen));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_flags) == 2, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_flags));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_flags));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_type) == 4, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_type));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_type));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_index) == 8, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_index));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_index) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_index));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_prev) == 16, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_prev));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_prev) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_prev));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_time) == 24, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_time));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_time));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_tfid) == 32, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_tfid));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_tfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_tfid));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_pfid) == 48, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_pfid));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_pfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_pfid));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_sfid) == 64, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_sfid));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_sfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_sfid));
- LASSERTF((int)offsetof(struct changelog_ext_rec, cr_spfid) == 80, "found %lld\n",
- (long long)(int)offsetof(struct changelog_ext_rec, cr_spfid));
- LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_spfid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_spfid));
-
/* Checks for struct changelog_setinfo */
LASSERTF((int)sizeof(struct changelog_setinfo) == 12, "found %lld\n",
(long long)(int)sizeof(struct changelog_setinfo));
@@ -3339,10 +3323,10 @@ void lustre_assert_wire_constants(void)
(long long)(int)offsetof(struct llog_changelog_rec, cr));
LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr) == 64, "found %lld\n",
(long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr));
- LASSERTF((int)offsetof(struct llog_changelog_rec, cr_tail) == 80, "found %lld\n",
- (long long)(int)offsetof(struct llog_changelog_rec, cr_tail));
- LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr_tail) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr_tail));
+ LASSERTF((int)offsetof(struct llog_changelog_rec, cr_do_not_use) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct llog_changelog_rec, cr_do_not_use));
+ LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr_do_not_use) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr_do_not_use));
/* Checks for struct llog_changelog_user_rec */
LASSERTF((int)sizeof(struct llog_changelog_user_rec) == 40, "found %lld\n",
@@ -3506,6 +3490,19 @@ void lustre_assert_wire_constants(void)
CLASSERT(LLOG_ORIGIN_HANDLE_DESTROY == 509);
CLASSERT(LLOG_FIRST_OPC == 501);
CLASSERT(LLOG_LAST_OPC == 510);
+ CLASSERT(LLOG_CONFIG_ORIG_CTXT == 0);
+ CLASSERT(LLOG_CONFIG_REPL_CTXT == 1);
+ CLASSERT(LLOG_MDS_OST_ORIG_CTXT == 2);
+ CLASSERT(LLOG_MDS_OST_REPL_CTXT == 3);
+ CLASSERT(LLOG_SIZE_ORIG_CTXT == 4);
+ CLASSERT(LLOG_SIZE_REPL_CTXT == 5);
+ CLASSERT(LLOG_TEST_ORIG_CTXT == 8);
+ CLASSERT(LLOG_TEST_REPL_CTXT == 9);
+ CLASSERT(LLOG_CHANGELOG_ORIG_CTXT == 12);
+ CLASSERT(LLOG_CHANGELOG_REPL_CTXT == 13);
+ CLASSERT(LLOG_CHANGELOG_USER_ORIG_CTXT == 14);
+ CLASSERT(LLOG_AGENT_ORIG_CTXT == 15);
+ CLASSERT(LLOG_MAX_CTXTS == 16);
/* Checks for struct llogd_conn_body */
LASSERTF((int)sizeof(struct llogd_conn_body) == 40, "found %lld\n",
@@ -3943,9 +3940,9 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct hsm_progress *)0)->padding) == 4, "found %lld\n",
(long long)(int)sizeof(((struct hsm_progress *)0)->padding));
LASSERTF(HP_FLAG_COMPLETED == 0x01, "found 0x%.8x\n",
- HP_FLAG_COMPLETED);
+ HP_FLAG_COMPLETED);
LASSERTF(HP_FLAG_RETRY == 0x02, "found 0x%.8x\n",
- HP_FLAG_RETRY);
+ HP_FLAG_RETRY);
LASSERTF((int)offsetof(struct hsm_copy, hc_data_version) == 0, "found %lld\n",
(long long)(int)offsetof(struct hsm_copy, hc_data_version));
@@ -4100,9 +4097,9 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_data_len) == 4, "found %lld\n",
(long long)(int)sizeof(((struct hsm_request *)0)->hr_data_len));
LASSERTF(HSM_FORCE_ACTION == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)HSM_FORCE_ACTION);
+ (unsigned)HSM_FORCE_ACTION);
LASSERTF(HSM_GHOST_COPY == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)HSM_GHOST_COPY);
+ (unsigned)HSM_GHOST_COPY);
/* Checks for struct hsm_user_request */
LASSERTF((int)sizeof(struct hsm_user_request) == 24, "found %lld\n",
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 8dade197f053..ea15cc638097 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -483,10 +483,8 @@ static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on)
memset(&bdev->rds_info, 0, sizeof(bdev->rds_info));
}
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
- bdev->cache_fm_rds_system);
-
- return err;
+ return bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
+ bdev->cache_fm_rds_system);
}
static int bcm2048_get_rds_no_lock(struct bcm2048_device *bdev)
@@ -1834,9 +1832,7 @@ static int bcm2048_deinit(struct bcm2048_device *bdev)
if (err < 0)
return err;
- err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
-
- return err;
+ return bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
}
/*
@@ -1995,9 +1991,7 @@ static ssize_t bcm2048_##prop##_read(struct device *dev, \
\
value = bcm2048_get_##prop(bdev); \
\
- value = sprintf(buf, mask "\n", value); \
- \
- return value; \
+ return sprintf(buf, mask "\n", value); \
}
#define DEFINE_SYSFS_PROPERTY(prop, signal, size, mask, check) \
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 692ba3e63e14..fedeb3c3549e 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -660,7 +660,7 @@ static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
struct cxd *ci = ca->data;
mutex_lock(&ci->lock);
- printk(kern_INFO "write_data %d\n", ecount);
+ dev_info(&ci->i2c->dev, "write_data %d\n", ecount);
write_reg(ci, 0x0d, ecount>>8);
write_reg(ci, 0x0e, ecount&0xff);
write_block(ci, 0x11, ebuf, ecount);
diff --git a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
index f4f35c9ad1ab..d3f34f9bf712 100644
--- a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
+++ b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
@@ -544,41 +544,41 @@ struct vpfe_isif_raw_config {
/* IPIPE module configurations */
/* IPIPE input configuration */
-#define VPFE_IPIPE_INPUT_CONFIG (1 << 0)
+#define VPFE_IPIPE_INPUT_CONFIG BIT(0)
/* LUT based Defect Pixel Correction */
-#define VPFE_IPIPE_LUTDPC (1 << 1)
+#define VPFE_IPIPE_LUTDPC BIT(1)
/* On the fly (OTF) Defect Pixel Correction */
-#define VPFE_IPIPE_OTFDPC (1 << 2)
+#define VPFE_IPIPE_OTFDPC BIT(2)
/* Noise Filter - 1 */
-#define VPFE_IPIPE_NF1 (1 << 3)
+#define VPFE_IPIPE_NF1 BIT(3)
/* Noise Filter - 2 */
-#define VPFE_IPIPE_NF2 (1 << 4)
+#define VPFE_IPIPE_NF2 BIT(4)
/* White Balance. Also a control ID */
-#define VPFE_IPIPE_WB (1 << 5)
+#define VPFE_IPIPE_WB BIT(5)
/* 1st RGB to RBG Blend module */
-#define VPFE_IPIPE_RGB2RGB_1 (1 << 6)
+#define VPFE_IPIPE_RGB2RGB_1 BIT(6)
/* 2nd RGB to RBG Blend module */
-#define VPFE_IPIPE_RGB2RGB_2 (1 << 7)
+#define VPFE_IPIPE_RGB2RGB_2 BIT(7)
/* Gamma Correction */
-#define VPFE_IPIPE_GAMMA (1 << 8)
+#define VPFE_IPIPE_GAMMA BIT(8)
/* 3D LUT color conversion */
-#define VPFE_IPIPE_3D_LUT (1 << 9)
+#define VPFE_IPIPE_3D_LUT BIT(9)
/* RGB to YCbCr module */
-#define VPFE_IPIPE_RGB2YUV (1 << 10)
+#define VPFE_IPIPE_RGB2YUV BIT(10)
/* YUV 422 conversion module */
-#define VPFE_IPIPE_YUV422_CONV (1 << 11)
+#define VPFE_IPIPE_YUV422_CONV BIT(11)
/* Edge Enhancement */
-#define VPFE_IPIPE_YEE (1 << 12)
+#define VPFE_IPIPE_YEE BIT(12)
/* Green Imbalance Correction */
-#define VPFE_IPIPE_GIC (1 << 13)
+#define VPFE_IPIPE_GIC BIT(13)
/* CFA Interpolation */
-#define VPFE_IPIPE_CFA (1 << 14)
+#define VPFE_IPIPE_CFA BIT(14)
/* Chroma Artifact Reduction */
-#define VPFE_IPIPE_CAR (1 << 15)
+#define VPFE_IPIPE_CAR BIT(15)
/* Chroma Gain Suppression */
-#define VPFE_IPIPE_CGS (1 << 16)
+#define VPFE_IPIPE_CGS BIT(16)
/* Global brightness and contrast control */
-#define VPFE_IPIPE_GBCE (1 << 17)
+#define VPFE_IPIPE_GBCE BIT(17)
#define VPFE_IPIPE_MAX_MODULES 18
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index ae9202ded59f..569bcdc9ce2f 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -146,9 +146,8 @@ enum v4l2_field vpfe_isif_get_fid(struct vpfe_device *vpfe_dev)
u32 field_status;
field_status = isif_read(isif->isif_cfg.base_addr, MODESET);
- field_status = (field_status >> DM365_ISIF_MDFS_OFFSET) &
- DM365_ISIF_MDFS_MASK;
- return field_status;
+ return (field_status >> DM365_ISIF_MDFS_OFFSET) &
+ DM365_ISIF_MDFS_MASK;
}
static int
@@ -594,8 +593,7 @@ isif_validate_raw_params(struct vpfe_isif_raw_config *params)
ret = isif_validate_dfc_params(&params->dfc);
if (ret)
return ret;
- ret = isif_validate_bclamp_params(&params->bclamp);
- return ret;
+ return isif_validate_bclamp_params(&params->bclamp);
}
static int isif_set_params(struct v4l2_subdev *sd, void *params)
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index 3cd56cc132c7..128662623ea8 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -128,7 +128,7 @@ resizer_configure_passthru(struct vpfe_resizer_device *resizer, int bypass)
static void
configure_resizer_out_params(struct vpfe_resizer_device *resizer, int index,
void *output_spec, unsigned char partial,
- unsigned flag)
+ unsigned int flag)
{
struct resizer_params *param = &resizer->config;
struct v4l2_mbus_framefmt *outformat;
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 3319fb8f7d01..8be9f854510f 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -37,7 +37,7 @@ static struct media_entity *vpfe_get_input_entity
struct media_pad *remote;
remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]);
- if (remote == NULL) {
+ if (!remote) {
pr_err("Invalid media connection to isif/ccdc\n");
return NULL;
}
@@ -54,7 +54,7 @@ static int vpfe_update_current_ext_subdev(struct vpfe_video_device *video)
int i;
remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]);
- if (remote == NULL) {
+ if (!remote) {
pr_err("Invalid media connection to isif/ccdc\n");
return -EINVAL;
}
@@ -107,7 +107,7 @@ __vpfe_video_get_format(struct vpfe_video_device *video,
int ret;
subdev = vpfe_video_remote_subdev(video, &pad);
- if (subdev == NULL)
+ if (!subdev)
return -EINVAL;
fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -236,7 +236,7 @@ static int vpfe_video_validate_pipeline(struct vpfe_pipeline *pipe)
* format of the connected pad.
*/
subdev = vpfe_video_remote_subdev(pipe->outputs[0], NULL);
- if (subdev == NULL)
+ if (!subdev)
return -EPIPE;
while (1) {
@@ -413,7 +413,7 @@ static int vpfe_open(struct file *file)
/* Allocate memory for the file handle object */
handle = kzalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
- if (handle == NULL)
+ if (!handle)
return -ENOMEM;
v4l2_fh_init(&handle->vfh, &video->video_dev);
@@ -683,14 +683,14 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
}
/* get the remote pad */
remote = media_entity_remote_pad(&video->pad);
- if (remote == NULL) {
+ if (!remote) {
v4l2_err(&vpfe_dev->v4l2_dev,
"invalid remote pad for video node\n");
return -EINVAL;
}
/* get the remote subdev */
subdev = vpfe_video_remote_subdev(video, NULL);
- if (subdev == NULL) {
+ if (!subdev) {
v4l2_err(&vpfe_dev->v4l2_dev,
"invalid remote subdev for video node\n");
return -EINVAL;
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
index 44f565547179..04d881b391c7 100644
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ b/drivers/staging/media/lirc/lirc_bt829.c
@@ -120,7 +120,7 @@ int init_module(void)
int rc;
pdev = do_pci_probe();
- if (pdev == NULL)
+ if (!pdev)
return -ENODEV;
rc = pci_enable_device(pdev);
@@ -163,7 +163,6 @@ err_put_dev:
return rc;
}
-
void cleanup_module(void)
{
struct pci_dev *pdev = to_pci_dev(atir_driver.dev);
@@ -174,7 +173,6 @@ void cleanup_module(void)
pci_dev_put(pdev);
}
-
static int atir_init_start(void)
{
pci_addr_lin = ioremap(pci_addr_phys + DATA_PCI_OFF, 0x400);
@@ -187,10 +185,9 @@ static int atir_init_start(void)
static void cycle_delay(int cycle)
{
- udelay(WAIT_CYCLE*cycle);
+ udelay(WAIT_CYCLE * cycle);
}
-
static int poll_main(void)
{
unsigned char status_high, status_low;
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index ff1926ca1f96..198a8057f2f1 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -32,7 +32,6 @@
#include <media/lirc.h>
#include <media/lirc_dev.h>
-
#define MOD_AUTHOR "Venky Raju <dev@venky.ws>"
#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
#define MOD_NAME "lirc_imon"
@@ -212,7 +211,6 @@ static void deregister_from_lirc(struct imon_context *context)
else
dev_info(&context->usbdev->dev,
"Deregistered iMON driver (minor:%d)\n", minor);
-
}
/**
@@ -429,7 +427,7 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
do {
memcpy(context->usb_tx_buf, context->tx.data_buf + offset, 7);
- context->usb_tx_buf[7] = (unsigned char) seq;
+ context->usb_tx_buf[7] = (unsigned char)seq;
retval = send_packet(context);
if (retval) {
@@ -447,7 +445,7 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
if (context->vfd_proto_6p) {
/* Send packet #6 */
memcpy(context->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6));
- context->usb_tx_buf[7] = (unsigned char) seq;
+ context->usb_tx_buf[7] = (unsigned char)seq;
retval = send_packet(context);
if (retval)
dev_err(&context->usbdev->dev,
@@ -563,7 +561,7 @@ static void submit_data(struct imon_context *context)
value |= PULSE_BIT;
for (i = 0; i < 4; ++i)
- buf[i] = value>>(i*8);
+ buf[i] = value >> (i * 8);
lirc_buffer_write(context->driver->rbuf, buf);
wake_up(&context->driver->rbuf->wait_poll);
@@ -589,7 +587,7 @@ static void imon_incoming_packet(struct imon_context *context,
if (len != 8) {
dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n",
- __func__, len, intf);
+ __func__, len, intf);
return;
}
@@ -704,7 +702,7 @@ static int imon_probe(struct usb_interface *interface,
/* prevent races probing devices w/multiple interfaces */
mutex_lock(&driver_lock);
- context = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
goto driver_unlock;
@@ -784,11 +782,11 @@ static int imon_probe(struct usb_interface *interface,
__func__, vfd_proto_6p);
}
- driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
goto free_context;
- rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
+ rbuf = kmalloc(sizeof(*rbuf), GFP_KERNEL);
if (!rbuf)
goto free_driver;
@@ -797,16 +795,11 @@ static int imon_probe(struct usb_interface *interface,
goto free_rbuf;
}
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rx_urb) {
- dev_err(dev, "%s: usb_alloc_urb failed for IR urb\n", __func__);
+ if (!rx_urb)
goto free_lirc_buf;
- }
tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tx_urb) {
- dev_err(dev, "%s: usb_alloc_urb failed for display urb\n",
- __func__);
+ if (!tx_urb)
goto free_rx_urb;
- }
mutex_init(&context->ctx_lock);
context->vfd_proto_6p = vfd_proto_6p;
@@ -835,7 +828,7 @@ static int imon_probe(struct usb_interface *interface,
}
dev_info(dev, "Registered iMON driver (lirc minor: %d)\n",
- lirc_minor);
+ lirc_minor);
/* Needed while unregistering! */
driver->minor = lirc_minor;
@@ -856,8 +849,8 @@ static int imon_probe(struct usb_interface *interface,
context->display = 1;
usb_fill_int_urb(context->rx_urb, context->usbdev,
- usb_rcvintpipe(context->usbdev,
- context->rx_endpoint->bEndpointAddress),
+ usb_rcvintpipe(context->usbdev,
+ context->rx_endpoint->bEndpointAddress),
context->usb_rx_buf, sizeof(context->usb_rx_buf),
usb_rx_callback, context,
context->rx_endpoint->bInterval);
@@ -882,7 +875,7 @@ static int imon_probe(struct usb_interface *interface,
}
dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n",
- vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum);
+ vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum);
/* Everything went fine. Just unlock and return retval (with is 0) */
mutex_unlock(&context->ctx_lock);
@@ -973,8 +966,8 @@ static int imon_resume(struct usb_interface *intf)
struct imon_context *context = usb_get_intfdata(intf);
usb_fill_int_urb(context->rx_urb, context->usbdev,
- usb_rcvintpipe(context->usbdev,
- context->rx_endpoint->bEndpointAddress),
+ usb_rcvintpipe(context->usbdev,
+ context->rx_endpoint->bEndpointAddress),
context->usb_rx_buf, sizeof(context->usb_rx_buf),
usb_rx_callback, context,
context->rx_endpoint->bInterval);
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 3906ac6e686d..64d99ec292e5 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -163,12 +163,12 @@ static unsigned int init_lirc_timer(void)
if (count >= 1000 && timeelapsed > 0) {
if (default_timer == 0) {
/* autodetect timer */
- newtimer = (1000000*count)/timeelapsed;
+ newtimer = (1000000 * count) / timeelapsed;
pr_info("%u Hz timer detected\n", newtimer);
return newtimer;
}
- newtimer = (1000000*count)/timeelapsed;
- if (abs(newtimer - default_timer) > default_timer/10) {
+ newtimer = (1000000 * count) / timeelapsed;
+ if (abs(newtimer - default_timer) > default_timer / 10) {
/* bad timer */
pr_notice("bad timer: %u Hz\n", newtimer);
pr_notice("using default timer: %u Hz\n",
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 2218d0042030..4678ae10b030 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -47,7 +47,6 @@
#include <media/lirc.h>
#include <media/lirc_dev.h>
-
#define MOD_AUTHOR "Oliver Stabel <oliver.stabel@gmx.de>, " \
"Tim Davies <tim@opensystems.net.au>"
#define MOD_DESC "USB Driver for Sasem Remote Controller V1.1"
@@ -73,7 +72,7 @@ static void usb_tx_callback(struct urb *urb);
/* VFD file_operations function prototypes */
static int vfd_open(struct inode *inode, struct file *file);
-static long vfd_ioctl(struct file *file, unsigned cmd, unsigned long arg);
+static long vfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static int vfd_close(struct inode *inode, struct file *file);
static ssize_t vfd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos);
@@ -86,7 +85,6 @@ static void ir_close(void *data);
#define SASEM_DATA_BUF_SZ 32
struct sasem_context {
-
struct usb_device *dev;
int vfd_isopen; /* VFD port has been opened */
unsigned int vfd_contrast; /* VFD contrast */
@@ -156,7 +154,6 @@ static int debug;
/*** M O D U L E C O D E ***/
-
MODULE_AUTHOR(MOD_AUTHOR);
MODULE_DESCRIPTION(MOD_DESC);
MODULE_LICENSE("GPL");
@@ -186,7 +183,6 @@ static void deregister_from_lirc(struct sasem_context *context)
else
dev_info(&context->dev->dev,
"Deregistered Sasem driver (minor:%d)\n", minor);
-
}
/**
@@ -243,7 +239,7 @@ exit:
* Called when the VFD device (e.g. /dev/usb/lcd)
* is closed by the application.
*/
-static long vfd_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+static long vfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct sasem_context *context;
@@ -297,7 +293,6 @@ static int vfd_close(struct inode *inode, struct file *file)
context->vfd_isopen = 0;
dev_info(&context->dev->dev, "VFD port closed\n");
if (!context->dev_present && !context->ir_isopen) {
-
/* Device disconnected before close and IR port is
* not open. If IR port is open, context will be
* deleted by ir_close. */
@@ -546,9 +541,7 @@ static void ir_close(void *data)
* at disconnect time, so do it now.
*/
deregister_from_lirc(context);
-
if (!context->vfd_isopen) {
-
mutex_unlock(&context->ctx_lock);
delete_context(context);
return;
@@ -633,7 +626,6 @@ static void usb_rx_callback(struct urb *urb)
return;
switch (urb->status) {
-
case -ENOENT: /* usbcore unlink successful! */
return;
@@ -651,8 +643,6 @@ static void usb_rx_callback(struct urb *urb)
usb_submit_urb(context->rx_urb, GFP_ATOMIC);
}
-
-
/**
* Callback function for USB core API: Probe
*/
@@ -709,7 +699,6 @@ static int sasem_probe(struct usb_interface *interface,
} else if (!vfd_ep_found &&
usb_endpoint_is_int_out(ep)) {
-
tx_endpoint = ep;
vfd_ep_found = 1;
if (debug)
@@ -735,17 +724,17 @@ static int sasem_probe(struct usb_interface *interface,
/* Allocate memory */
alloc_status = 0;
- context = kzalloc(sizeof(struct sasem_context), GFP_KERNEL);
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) {
alloc_status = 1;
goto alloc_status_switch;
}
- driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver) {
alloc_status = 2;
goto alloc_status_switch;
}
- rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
+ rbuf = kmalloc(sizeof(*rbuf), GFP_KERNEL);
if (!rbuf) {
alloc_status = 3;
goto alloc_status_switch;
@@ -758,17 +747,12 @@ static int sasem_probe(struct usb_interface *interface,
}
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rx_urb) {
- dev_err(&interface->dev,
- "%s: usb_alloc_urb failed for IR urb\n", __func__);
alloc_status = 5;
goto alloc_status_switch;
}
if (vfd_ep_found) {
tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!tx_urb) {
- dev_err(&interface->dev,
- "%s: usb_alloc_urb failed for VFD urb",
- __func__);
alloc_status = 6;
goto alloc_status_switch;
}
diff --git a/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt b/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt
index 42ff0d8b2ead..48aa45acc953 100644
--- a/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt
+++ b/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt
@@ -51,6 +51,140 @@ Description:
uses.
Users:
+What: /sys/class/most/mostcore/devices/<mdev>/dci
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ If the network interface controller is attached via USB, a dci
+ directory is created that allows applications to use the
+ controller's direct communication interface (DCI) to exchange
+ information.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/arb_address
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to set an arbitrary DCI register address an
+ application wants to read from or write to.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/arb_value
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to read from or write to the arbitrary DCI register
+ whose address is stored in arb_address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_eui48_hi
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MAC address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_eui48_lo
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MAC address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_eui48_mi
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MAC address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_filter
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP filter address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_hash0
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_hash1
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_hash2
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/mep_hash3
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ This is used to check and configure the MEP hash table.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/ni_state
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the current network interface state.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/node_address
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the current node address.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/node_position
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the current node position.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/packet_bandwidth
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Indicates the configured packet bandwidth.
+Users:
+
+What: /sys/class/most/mostcore/devices/<mdev>/dci/sync_ep
+Date: June 2016
+KernelVersion: 4.9
+Contact: Christian Gromm <christian.gromm@microchip.com>
+Description:
+ Triggers the controller's synchronization process for a certain
+ endpoint.
+Users:
+
What: /sys/class/most/mostcore/devices/<mdev>/<channel>/
Date: June 2015
KernelVersion: 4.3
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
index de4f76abfb47..7f51024dc5eb 100644
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ b/drivers/staging/most/aim-cdev/cdev.c
@@ -57,7 +57,11 @@ static inline bool ch_has_mbo(struct aim_channel *c)
static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo)
{
- *mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
+ if (!kfifo_peek(&c->fifo, mbo)) {
+ *mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
+ if (*mbo)
+ kfifo_in(&c->fifo, mbo, 1);
+ }
return *mbo;
}
@@ -130,7 +134,7 @@ static int aim_open(struct inode *inode, struct file *filp)
if (!c->dev) {
pr_info("WARN: Device is destroyed\n");
mutex_unlock(&c->io_mutex);
- return -EBUSY;
+ return -ENODEV;
}
if (c->access_ref) {
@@ -184,8 +188,7 @@ static ssize_t aim_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offset)
{
int ret;
- size_t actual_len;
- size_t max_len;
+ size_t to_copy, left;
struct mbo *mbo = NULL;
struct aim_channel *c = filp->private_data;
@@ -201,27 +204,28 @@ static ssize_t aim_write(struct file *filp, const char __user *buf,
}
if (unlikely(!c->dev)) {
- ret = -EPIPE;
+ ret = -ENODEV;
goto unlock;
}
- max_len = c->cfg->buffer_size;
- actual_len = min(count, max_len);
- mbo->buffer_length = actual_len;
-
- if (copy_from_user(mbo->virt_address, buf, mbo->buffer_length)) {
+ to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
+ left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
+ if (left == to_copy) {
ret = -EFAULT;
- goto put_mbo;
+ goto unlock;
}
- ret = most_submit_mbo(mbo);
- if (ret)
- goto put_mbo;
+ c->mbo_offs += to_copy - left;
+ if (c->mbo_offs >= c->cfg->buffer_size ||
+ c->cfg->data_type == MOST_CH_CONTROL ||
+ c->cfg->data_type == MOST_CH_ASYNC) {
+ kfifo_skip(&c->fifo);
+ mbo->buffer_length = c->mbo_offs;
+ c->mbo_offs = 0;
+ most_submit_mbo(mbo);
+ }
- mutex_unlock(&c->io_mutex);
- return actual_len;
-put_mbo:
- most_put_mbo(mbo);
+ ret = to_copy - left;
unlock:
mutex_unlock(&c->io_mutex);
return ret;
@@ -256,7 +260,7 @@ aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
/* make sure we don't submit to gone devices */
if (unlikely(!c->dev)) {
mutex_unlock(&c->io_mutex);
- return -EIO;
+ return -ENODEV;
}
to_copy = min_t(size_t,
@@ -290,7 +294,7 @@ static unsigned int aim_poll(struct file *filp, poll_table *wait)
if (!kfifo_is_empty(&c->fifo))
mask |= POLLIN | POLLRDNORM;
} else {
- if (ch_has_mbo(c))
+ if (!kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
mask |= POLLOUT | POLLWRNORM;
}
return mask;
@@ -366,7 +370,7 @@ static int aim_rx_completion(struct mbo *mbo)
spin_lock(&c->unlink);
if (!c->access_ref || !c->dev) {
spin_unlock(&c->unlink);
- return -EFAULT;
+ return -ENODEV;
}
kfifo_in(&c->fifo, &mbo, 1);
spin_unlock(&c->unlink);
@@ -499,23 +503,27 @@ static struct most_aim cdev_aim = {
static int __init mod_init(void)
{
+ int err;
+
pr_info("init()\n");
INIT_LIST_HEAD(&channel_list);
spin_lock_init(&ch_list_lock);
ida_init(&minor_id);
- if (alloc_chrdev_region(&aim_devno, 0, 50, "cdev") < 0)
- return -EIO;
+ err = alloc_chrdev_region(&aim_devno, 0, 50, "cdev");
+ if (err < 0)
+ goto dest_ida;
major = MAJOR(aim_devno);
aim_class = class_create(THIS_MODULE, "most_cdev_aim");
if (IS_ERR(aim_class)) {
pr_err("no udev support\n");
+ err = PTR_ERR(aim_class);
goto free_cdev;
}
-
- if (most_register_aim(&cdev_aim))
+ err = most_register_aim(&cdev_aim);
+ if (err)
goto dest_class;
return 0;
@@ -523,7 +531,9 @@ dest_class:
class_destroy(aim_class);
free_cdev:
unregister_chrdev_region(aim_devno, 1);
- return -EIO;
+dest_ida:
+ ida_destroy(&minor_id);
+ return err;
}
static void __exit mod_exit(void)
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
index 2f42de44d051..4659a6450c04 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/aim-network/networking.c
@@ -298,15 +298,16 @@ static struct net_dev_context *get_net_dev_context(
struct most_interface *iface)
{
struct net_dev_context *nd, *tmp;
+ unsigned long flags;
- spin_lock(&list_lock);
+ spin_lock_irqsave(&list_lock, flags);
list_for_each_entry_safe(nd, tmp, &net_devices, list) {
if (nd->iface == iface) {
- spin_unlock(&list_lock);
+ spin_unlock_irqrestore(&list_lock, flags);
return nd;
}
}
- spin_unlock(&list_lock);
+ spin_unlock_irqrestore(&list_lock, flags);
return NULL;
}
@@ -316,6 +317,7 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
{
struct net_dev_context *nd;
struct net_dev_channel *ch;
+ unsigned long flags;
if (!iface)
return -EINVAL;
@@ -332,9 +334,9 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
nd->iface = iface;
- spin_lock(&list_lock);
+ spin_lock_irqsave(&list_lock, flags);
list_add(&nd->list, &net_devices);
- spin_unlock(&list_lock);
+ spin_unlock_irqrestore(&list_lock, flags);
}
ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
@@ -377,6 +379,7 @@ static int aim_disconnect_channel(struct most_interface *iface,
{
struct net_dev_context *nd;
struct net_dev_channel *ch;
+ unsigned long flags;
nd = get_net_dev_context(iface);
if (!nd)
@@ -398,9 +401,9 @@ static int aim_disconnect_channel(struct most_interface *iface,
most_net_rm_netdev_safe(nd);
if (!nd->rx.linked && !nd->tx.linked) {
- spin_lock(&list_lock);
+ spin_lock_irqsave(&list_lock, flags);
list_del(&nd->list);
- spin_unlock(&list_lock);
+ spin_unlock_irqrestore(&list_lock, flags);
kfree(nd);
}
@@ -514,20 +517,21 @@ static int __init most_net_init(void)
static void __exit most_net_exit(void)
{
struct net_dev_context *nd, *tmp;
+ unsigned long flags;
- spin_lock(&list_lock);
+ spin_lock_irqsave(&list_lock, flags);
list_for_each_entry_safe(nd, tmp, &net_devices, list) {
list_del(&nd->list);
- spin_unlock(&list_lock);
+ spin_unlock_irqrestore(&list_lock, flags);
/*
* do not call most_stop_channel() here, because channels are
* going to be closed in ndo_stop() after unregister_netdev()
*/
most_net_rm_netdev_safe(nd);
kfree(nd);
- spin_lock(&list_lock);
+ spin_lock_irqsave(&list_lock, flags);
}
- spin_unlock(&list_lock);
+ spin_unlock_irqrestore(&list_lock, flags);
most_deregister_aim(&aim);
pr_info("most_net_exit()\n");
diff --git a/drivers/staging/most/aim-sound/sound.c b/drivers/staging/most/aim-sound/sound.c
index 9c645801cff4..e4198e5e064b 100644
--- a/drivers/staging/most/aim-sound/sound.c
+++ b/drivers/staging/most/aim-sound/sound.c
@@ -234,7 +234,6 @@ static int playback_thread(void *data)
while (!kthread_should_stop()) {
struct mbo *mbo = NULL;
bool period_elapsed = false;
- int ret;
wait_event_interruptible(
channel->playback_waitq,
@@ -250,10 +249,7 @@ static int playback_thread(void *data)
else
memset(mbo->virt_address, 0, mbo->buffer_length);
- ret = most_submit_mbo(mbo);
- if (ret)
- channel->is_stream_running = false;
-
+ most_submit_mbo(mbo);
if (period_elapsed)
snd_pcm_period_elapsed(channel->substream);
}
@@ -457,7 +453,7 @@ static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
/**
* Initialization of struct snd_pcm_ops
*/
-static struct snd_pcm_ops pcm_ops = {
+static const struct snd_pcm_ops pcm_ops = {
.open = pcm_open,
.close = pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -611,7 +607,8 @@ static int audio_probe_channel(struct most_interface *iface, int channel_id,
channel->id = channel_id;
init_waitqueue_head(&channel->playback_waitq);
- if (audio_set_hw_params(&channel->pcm_hardware, pcm_format, cfg))
+ ret = audio_set_hw_params(&channel->pcm_hardware, pcm_format, cfg);
+ if (ret)
goto err_free_card;
snprintf(card->driver, sizeof(card->driver), "%s", DRIVER_NAME);
diff --git a/drivers/staging/most/aim-v4l2/video.c b/drivers/staging/most/aim-v4l2/video.c
index 13abf7c275a4..e0748416aee5 100644
--- a/drivers/staging/most/aim-v4l2/video.c
+++ b/drivers/staging/most/aim-v4l2/video.c
@@ -79,7 +79,7 @@ static int aim_vdev_open(struct file *filp)
struct most_video_dev *mdev = video_drvdata(filp);
struct aim_fh *fh;
- pr_info("aim_vdev_open()\n");
+ v4l2_info(&mdev->v4l2_dev, "aim_vdev_open()\n");
switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
@@ -93,7 +93,7 @@ static int aim_vdev_open(struct file *filp)
return -ENOMEM;
if (!atomic_inc_and_test(&mdev->access_ref)) {
- pr_err("too many clients\n");
+ v4l2_err(&mdev->v4l2_dev, "too many clients\n");
ret = -EBUSY;
goto err_dec;
}
@@ -106,7 +106,7 @@ static int aim_vdev_open(struct file *filp)
ret = most_start_channel(mdev->iface, mdev->ch_idx, &aim_info);
if (ret) {
- pr_err("most_start_channel() failed\n");
+ v4l2_err(&mdev->v4l2_dev, "most_start_channel() failed\n");
goto err_rm;
}
@@ -128,7 +128,7 @@ static int aim_vdev_close(struct file *filp)
struct most_video_dev *mdev = fh->mdev;
struct mbo *mbo, *tmp;
- pr_info("aim_vdev_close()\n");
+ v4l2_info(&mdev->v4l2_dev, "aim_vdev_close()\n");
/*
* We need to put MBOs back before we call most_stop_channel()
@@ -139,15 +139,15 @@ static int aim_vdev_close(struct file *filp)
* This must be implemented in core.
*/
- spin_lock(&mdev->list_lock);
+ spin_lock_irq(&mdev->list_lock);
mdev->mute = true;
list_for_each_entry_safe(mbo, tmp, &mdev->pending_mbos, list) {
list_del(&mbo->list);
- spin_unlock(&mdev->list_lock);
+ spin_unlock_irq(&mdev->list_lock);
most_put_mbo(mbo);
- spin_lock(&mdev->list_lock);
+ spin_lock_irq(&mdev->list_lock);
}
- spin_unlock(&mdev->list_lock);
+ spin_unlock_irq(&mdev->list_lock);
most_stop_channel(mdev->iface, mdev->ch_idx, &aim_info);
mdev->mute = false;
@@ -187,7 +187,7 @@ static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
int const cnt = rem < count ? rem : count;
if (copy_to_user(buf, mbo->virt_address + fh->offs, cnt)) {
- pr_err("read: copy_to_user failed\n");
+ v4l2_err(&mdev->v4l2_dev, "read: copy_to_user failed\n");
if (!ret)
ret = -EFAULT;
return ret;
@@ -200,9 +200,9 @@ static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
if (cnt >= rem) {
fh->offs = 0;
- spin_lock(&mdev->list_lock);
+ spin_lock_irq(&mdev->list_lock);
list_del(&mbo->list);
- spin_unlock(&mdev->list_lock);
+ spin_unlock_irq(&mdev->list_lock);
most_put_mbo(mbo);
}
}
@@ -250,16 +250,16 @@ static int aim_set_format(struct most_video_dev *mdev, unsigned int cmd,
return 0;
}
-static int vidioc_querycap(struct file *file, void *priv,
+static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct aim_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
- pr_info("vidioc_querycap()\n");
+ v4l2_info(&mdev->v4l2_dev, "vidioc_querycap()\n");
strlcpy(cap->driver, "v4l2_most_aim", sizeof(cap->driver));
- strlcpy(cap->card, "my_card", sizeof(cap->card));
+ strlcpy(cap->card, "MOST", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"%s", mdev->iface->description);
@@ -270,10 +270,13 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- pr_info("vidioc_enum_fmt_vid_cap() %d\n", f->index);
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_enum_fmt_vid_cap() %d\n", f->index);
if (f->index)
return -EINVAL;
@@ -289,7 +292,10 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- pr_info("vidioc_g_fmt_vid_cap()\n");
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_g_fmt_vid_cap()\n");
aim_set_format_struct(f);
return 0;
@@ -298,7 +304,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct aim_fh *fh = priv;
+ struct aim_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
return aim_set_format(mdev, VIDIOC_TRY_FMT, f);
@@ -307,7 +313,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct aim_fh *fh = priv;
+ struct aim_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
return aim_set_format(mdev, VIDIOC_S_FMT, f);
@@ -315,7 +321,10 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
- pr_info("vidioc_g_std()\n");
+ struct aim_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_g_std()\n");
*norm = V4L2_STD_UNKNOWN;
return 0;
@@ -352,7 +361,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
struct aim_fh *fh = priv;
struct most_video_dev *mdev = fh->mdev;
- pr_info("vidioc_s_input(%d)\n", index);
+ v4l2_info(&mdev->v4l2_dev, "vidioc_s_input(%d)\n", index);
if (index >= V4L2_AIM_MAX_INPUT)
return -EINVAL;
@@ -360,7 +369,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
return 0;
}
-static struct v4l2_file_operations aim_fops = {
+static const struct v4l2_file_operations aim_fops = {
.owner = THIS_MODULE,
.open = aim_vdev_open,
.release = aim_vdev_close,
@@ -393,45 +402,46 @@ static const struct video_device aim_videodev_template = {
static struct most_video_dev *get_aim_dev(
struct most_interface *iface, int channel_idx)
{
- struct most_video_dev *mdev, *tmp;
+ struct most_video_dev *mdev;
+ unsigned long flags;
- spin_lock(&list_lock);
- list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry(mdev, &video_devices, list) {
if (mdev->iface == iface && mdev->ch_idx == channel_idx) {
- spin_unlock(&list_lock);
+ spin_unlock_irqrestore(&list_lock, flags);
return mdev;
}
}
- spin_unlock(&list_lock);
+ spin_unlock_irqrestore(&list_lock, flags);
return NULL;
}
static int aim_rx_data(struct mbo *mbo)
{
+ unsigned long flags;
struct most_video_dev *mdev =
get_aim_dev(mbo->ifp, mbo->hdm_channel_id);
if (!mdev)
return -EIO;
- spin_lock(&mdev->list_lock);
+ spin_lock_irqsave(&mdev->list_lock, flags);
if (unlikely(mdev->mute)) {
- spin_unlock(&mdev->list_lock);
+ spin_unlock_irqrestore(&mdev->list_lock, flags);
return -EIO;
}
list_add_tail(&mbo->list, &mdev->pending_mbos);
- spin_unlock(&mdev->list_lock);
+ spin_unlock_irqrestore(&mdev->list_lock, flags);
wake_up_interruptible(&mdev->wait_data);
return 0;
}
static int aim_register_videodev(struct most_video_dev *mdev)
{
- int retval = -ENOMEM;
int ret;
- pr_info("aim_register_videodev()\n");
+ v4l2_info(&mdev->v4l2_dev, "aim_register_videodev()\n");
init_waitqueue_head(&mdev->wait_data);
@@ -444,27 +454,24 @@ static int aim_register_videodev(struct most_video_dev *mdev)
*mdev->vdev = aim_videodev_template;
mdev->vdev->v4l2_dev = &mdev->v4l2_dev;
mdev->vdev->lock = &mdev->lock;
- strcpy(mdev->vdev->name, "most v4l2 aim video");
+ snprintf(mdev->vdev->name, sizeof(mdev->vdev->name), "MOST: %s",
+ mdev->v4l2_dev.name);
/* Register the v4l2 device */
video_set_drvdata(mdev->vdev, mdev);
- retval = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1);
- if (retval != 0) {
- pr_err("video_register_device failed (%d)\n", retval);
- ret = -ENODEV;
- goto err_vbi_dev;
+ ret = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&mdev->v4l2_dev, "video_register_device failed (%d)\n",
+ ret);
+ video_device_release(mdev->vdev);
}
- return 0;
-
-err_vbi_dev:
- video_device_release(mdev->vdev);
return ret;
}
static void aim_unregister_videodev(struct most_video_dev *mdev)
{
- pr_info("aim_unregister_videodev()\n");
+ v4l2_info(&mdev->v4l2_dev, "aim_unregister_videodev()\n");
video_unregister_device(mdev->vdev);
}
@@ -485,7 +492,7 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
int ret;
struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
- pr_info("aim_probe_channel()\n");
+ pr_info("aim_probe_channel(%s)\n", name);
if (mdev) {
pr_err("channel already linked\n");
@@ -498,8 +505,8 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
}
if (ccfg->data_type != MOST_CH_SYNC &&
- ccfg->data_type != MOST_CH_ISOC_AVP) {
- pr_err("wrong channel type, expect sync or isoc_avp\n");
+ ccfg->data_type != MOST_CH_ISOC) {
+ pr_err("wrong channel type, expect sync or isoc\n");
return -EINVAL;
}
@@ -516,8 +523,7 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
mdev->v4l2_dev.release = aim_v4l2_dev_release;
/* Create the v4l2_device */
- strlcpy(mdev->v4l2_dev.name, "most_video_device",
- sizeof(mdev->v4l2_dev.name));
+ strlcpy(mdev->v4l2_dev.name, name, sizeof(mdev->v4l2_dev.name));
ret = v4l2_device_register(NULL, &mdev->v4l2_dev);
if (ret) {
pr_err("v4l2_device_register() failed\n");
@@ -529,9 +535,10 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
if (ret)
goto err_unreg;
- spin_lock(&list_lock);
+ spin_lock_irq(&list_lock);
list_add(&mdev->list, &video_devices);
- spin_unlock(&list_lock);
+ spin_unlock_irq(&list_lock);
+ v4l2_info(&mdev->v4l2_dev, "aim_probe_channel() done\n");
return 0;
err_unreg:
@@ -545,16 +552,16 @@ static int aim_disconnect_channel(struct most_interface *iface,
{
struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
- pr_info("aim_disconnect_channel()\n");
-
if (!mdev) {
pr_err("no such channel is linked\n");
return -ENOENT;
}
- spin_lock(&list_lock);
+ v4l2_info(&mdev->v4l2_dev, "aim_disconnect_channel()\n");
+
+ spin_lock_irq(&list_lock);
list_del(&mdev->list);
- spin_unlock(&list_lock);
+ spin_unlock_irq(&list_lock);
aim_unregister_videodev(mdev);
v4l2_device_disconnect(&mdev->v4l2_dev);
@@ -585,17 +592,17 @@ static void __exit aim_exit(void)
* we simulate this call here.
* This must be fixed in core.
*/
- spin_lock(&list_lock);
+ spin_lock_irq(&list_lock);
list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
list_del(&mdev->list);
- spin_unlock(&list_lock);
+ spin_unlock_irq(&list_lock);
aim_unregister_videodev(mdev);
v4l2_device_disconnect(&mdev->v4l2_dev);
v4l2_device_put(&mdev->v4l2_dev);
- spin_lock(&list_lock);
+ spin_lock_irq(&list_lock);
}
- spin_unlock(&list_lock);
+ spin_unlock_irq(&list_lock);
most_deregister_aim(&aim_info);
BUG_ON(!list_empty(&video_devices));
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/hdm-dim2/dim2_hal.c
index 3c524506ee22..0b9816ce1761 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.c
@@ -2,7 +2,7 @@
* dim2_hal.c - DIM2 HAL implementation
* (MediaLB, Device Interface Macro IP, OS62420)
*
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,18 +20,6 @@
#include <linux/stddef.h>
/*
- * The number of frames per sub-buffer for synchronous channels.
- * Allowed values: 1, 2, 4, 8, 16, 32, 64.
- */
-#define FRAMES_PER_SUBBUFF 16
-
-/*
- * Size factor for synchronous DBR buffer.
- * Minimal value is 4*FRAMES_PER_SUBBUFF.
- */
-#define SYNC_DBR_FACTOR (4u * (u16)FRAMES_PER_SUBBUFF)
-
-/*
* Size factor for isochronous DBR buffer.
* Minimal value is 3.
*/
@@ -61,12 +49,11 @@
#define DBR_SIZE (16 * 1024) /* specified by IP */
#define DBR_BLOCK_SIZE (DBR_SIZE / 32 / DBR_MAP_SIZE)
+#define ROUND_UP_TO(x, d) (((x) + (d) - 1) / (d) * (d))
+
/* -------------------------------------------------------------------------- */
/* generic helper functions and macros */
-#define MLBC0_FCNT_VAL_MACRO(n) MLBC0_FCNT_VAL_ ## n ## FPSB
-#define MLBC0_FCNT_VAL(fpsb) MLBC0_FCNT_VAL_MACRO(fpsb)
-
static inline u32 bit_mask(u8 position)
{
return (u32)1 << position;
@@ -81,10 +68,20 @@ static inline bool dim_on_error(u8 error_id, const char *error_message)
/* -------------------------------------------------------------------------- */
/* types and local variables */
+struct async_tx_dbr {
+ u8 ch_addr;
+ u16 rpc;
+ u16 wpc;
+ u16 rest_size;
+ u16 sz_queue[CDT0_RPC_MASK + 1];
+};
+
struct lld_global_vars_t {
bool dim_is_initialized;
bool mcm_is_initialized;
struct dim2_regs __iomem *dim2; /* DIM2 core base address */
+ struct async_tx_dbr atx_dbr;
+ u32 fcnt;
u32 dbr_map[DBR_MAP_SIZE];
};
@@ -149,15 +146,34 @@ static void free_dbr(int offs, int size)
/* -------------------------------------------------------------------------- */
-static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
+static void dim2_transfer_madr(u32 val)
{
- dimcb_io_write(&g.dim2->MADR, ctr_addr);
+ dimcb_io_write(&g.dim2->MADR, val);
- /* wait till transfer is completed */
+ /* wait for transfer completion */
while ((dimcb_io_read(&g.dim2->MCTL) & 1) != 1)
continue;
dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
+}
+
+static void dim2_clear_dbr(u16 addr, u16 size)
+{
+ enum { MADR_TB_BIT = 30, MADR_WNR_BIT = 31 };
+
+ u16 const end_addr = addr + size;
+ u32 const cmd = bit_mask(MADR_WNR_BIT) | bit_mask(MADR_TB_BIT);
+
+ dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
+ dimcb_io_write(&g.dim2->MDAT0, 0);
+
+ for (; addr < end_addr; addr++)
+ dim2_transfer_madr(cmd | addr);
+}
+
+static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
+{
+ dim2_transfer_madr(ctr_addr);
return dimcb_io_read((&g.dim2->MDAT0) + mdat_idx);
}
@@ -182,13 +198,7 @@ static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
dimcb_io_write(&g.dim2->MDWE2, mask[2]);
dimcb_io_write(&g.dim2->MDWE3, mask[3]);
- dimcb_io_write(&g.dim2->MADR, bit_mask(MADR_WNR_BIT) | ctr_addr);
-
- /* wait till transfer is completed */
- while ((dimcb_io_read(&g.dim2->MCTL) & 1) != 1)
- continue;
-
- dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
+ dim2_transfer_madr(bit_mask(MADR_WNR_BIT) | ctr_addr);
}
static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
@@ -252,6 +262,13 @@ static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
dim2_write_ctr(CDT + ch_addr, cdt);
}
+static u16 dim2_rpc(u8 ch_addr)
+{
+ u32 cdt0 = dim2_read_ctr(CDT + ch_addr, 0);
+
+ return (cdt0 >> CDT0_RPC_SHIFT) & CDT0_RPC_MASK;
+}
+
static void dim2_clear_cdt(u8 ch_addr)
{
u32 cdt[4] = { 0, 0, 0, 0 };
@@ -356,6 +373,52 @@ static void dim2_clear_channel(u8 ch_addr)
dim2_clear_cat(MLB_CAT, ch_addr);
dim2_clear_cdt(ch_addr);
+
+ /* clear channel status bit */
+ dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
+}
+
+/* -------------------------------------------------------------------------- */
+/* trace async tx dbr fill state */
+
+static inline u16 norm_pc(u16 pc)
+{
+ return pc & CDT0_RPC_MASK;
+}
+
+static void dbrcnt_init(u8 ch_addr, u16 dbr_size)
+{
+ g.atx_dbr.rest_size = dbr_size;
+ g.atx_dbr.rpc = dim2_rpc(ch_addr);
+ g.atx_dbr.wpc = g.atx_dbr.rpc;
+}
+
+static void dbrcnt_enq(int buf_sz)
+{
+ g.atx_dbr.rest_size -= buf_sz;
+ g.atx_dbr.sz_queue[norm_pc(g.atx_dbr.wpc)] = buf_sz;
+ g.atx_dbr.wpc++;
+}
+
+u16 dim_dbr_space(struct dim_channel *ch)
+{
+ u16 cur_rpc;
+ struct async_tx_dbr *dbr = &g.atx_dbr;
+
+ if (ch->addr != dbr->ch_addr)
+ return 0xFFFF;
+
+ cur_rpc = dim2_rpc(ch->addr);
+
+ while (norm_pc(dbr->rpc) != cur_rpc) {
+ dbr->rest_size += dbr->sz_queue[norm_pc(dbr->rpc)];
+ dbr->rpc++;
+ }
+
+ if ((u16)(dbr->wpc - dbr->rpc) >= CDT0_RPC_MASK)
+ return 0;
+
+ return dbr->rest_size;
}
/* -------------------------------------------------------------------------- */
@@ -398,7 +461,8 @@ static inline bool check_packet_length(u32 packet_length)
static inline bool check_bytes_per_frame(u32 bytes_per_frame)
{
- u16 const max_size = ((u16)CDT3_BD_MASK + 1u) / SYNC_DBR_FACTOR;
+ u16 const bd_factor = g.fcnt + 2;
+ u16 const max_size = ((u16)CDT3_BD_MASK + 1u) >> bd_factor;
if (bytes_per_frame <= 0)
return false; /* too small */
@@ -439,7 +503,7 @@ static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
{
u16 n;
u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
- u32 const unit = bytes_per_frame * (u16)FRAMES_PER_SUBBUFF;
+ u32 const unit = bytes_per_frame << g.fcnt;
if (buf_size > max_size)
buf_size = max_size;
@@ -479,7 +543,7 @@ static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
dimcb_io_write(&g.dim2->MLBC0,
enable_6pin << MLBC0_MLBPEN_BIT |
mlb_clock << MLBC0_MLBCLK_SHIFT |
- MLBC0_FCNT_VAL(FRAMES_PER_SUBBUFF) << MLBC0_FCNT_SHIFT |
+ g.fcnt << MLBC0_FCNT_SHIFT |
true << MLBC0_MLBEN_BIT);
/* activate all HBI channels */
@@ -515,20 +579,17 @@ static inline bool service_channel(u8 ch_addr, u8 idx)
{
u8 const shift = idx * 16;
u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
+ u32 mask[4] = { 0, 0, 0, 0 };
+ u32 adt_w[4] = { 0, 0, 0, 0 };
if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
return false;
- {
- u32 mask[4] = { 0, 0, 0, 0 };
- u32 adt_w[4] = { 0, 0, 0, 0 };
-
- mask[1] =
- bit_mask(ADT1_DNE_BIT + shift) |
- bit_mask(ADT1_ERR_BIT + shift) |
- bit_mask(ADT1_RDY_BIT + shift);
- dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
- }
+ mask[1] =
+ bit_mask(ADT1_DNE_BIT + shift) |
+ bit_mask(ADT1_ERR_BIT + shift) |
+ bit_mask(ADT1_RDY_BIT + shift);
+ dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
/* clear channel status bit */
dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
@@ -612,6 +673,9 @@ static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
++state->level;
+ if (ch->addr == g.atx_dbr.ch_addr)
+ dbrcnt_enq(buf_size);
+
if (ch->packet_length || ch->bytes_per_frame)
dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
else
@@ -650,7 +714,8 @@ static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
/* -------------------------------------------------------------------------- */
/* API */
-u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock)
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
+ u32 fcnt)
{
g.dim_is_initialized = false;
@@ -662,7 +727,11 @@ u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock)
if (mlb_clock >= 8)
return DIM_INIT_ERR_MLB_CLOCK;
+ if (fcnt > MLBC0_FCNT_MAX_VAL)
+ return DIM_INIT_ERR_MLB_CLOCK;
+
g.dim2 = dim_base_address;
+ g.fcnt = fcnt;
g.dbr_map[0] = 0;
g.dbr_map[1] = 0;
@@ -693,7 +762,7 @@ static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
if (!check_channel_address(ch_address))
return DIM_INIT_ERR_CHANNEL_ADDRESS;
- ch->dbr_size = hw_buffer_size;
+ ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
ch->dbr_addr = alloc_dbr(ch->dbr_size);
if (ch->dbr_addr >= DBR_SIZE)
return DIM_INIT_ERR_OUT_OF_MEMORY;
@@ -706,6 +775,12 @@ static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
return DIM_NO_ERROR;
}
+void dim_service_mlb_int_irq(void)
+{
+ dimcb_io_write(&g.dim2->MS0, 0);
+ dimcb_io_write(&g.dim2->MS1, 0);
+}
+
u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
{
return norm_ctrl_async_buffer_size(buf_size);
@@ -749,8 +824,16 @@ u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
u16 max_buffer_size)
{
- return init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
- max_buffer_size);
+ u8 ret = init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
+ max_buffer_size);
+
+ if (is_tx && !g.atx_dbr.ch_addr) {
+ g.atx_dbr.ch_addr = ch->addr;
+ dbrcnt_init(ch->addr, ch->dbr_size);
+ dimcb_io_write(&g.dim2->MIEN, bit_mask(20));
+ }
+
+ return ret;
}
u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
@@ -781,6 +864,8 @@ u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
u16 bytes_per_frame)
{
+ u16 bd_factor = g.fcnt + 2;
+
if (!g.dim_is_initialized || !ch)
return DIM_ERR_DRIVER_NOT_INITIALIZED;
@@ -790,13 +875,14 @@ u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
if (!check_bytes_per_frame(bytes_per_frame))
return DIM_ERR_BAD_CONFIG;
- ch->dbr_size = bytes_per_frame * SYNC_DBR_FACTOR;
+ ch->dbr_size = bytes_per_frame << bd_factor;
ch->dbr_addr = alloc_dbr(ch->dbr_size);
if (ch->dbr_addr >= DBR_SIZE)
return DIM_INIT_ERR_OUT_OF_MEMORY;
sync_init(ch, ch_address / 2, bytes_per_frame);
+ dim2_clear_dbr(ch->dbr_addr, ch->dbr_size);
dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
ch->dbr_addr, ch->dbr_size, 0, true);
@@ -808,6 +894,11 @@ u8 dim_destroy_channel(struct dim_channel *ch)
if (!g.dim_is_initialized || !ch)
return DIM_ERR_DRIVER_NOT_INITIALIZED;
+ if (ch->addr == g.atx_dbr.ch_addr) {
+ dimcb_io_write(&g.dim2->MIEN, 0);
+ g.atx_dbr.ch_addr = 0;
+ }
+
dim2_clear_channel(ch->addr);
if (ch->dbr_addr < DBR_SIZE)
free_dbr(ch->dbr_addr, ch->dbr_size);
@@ -816,7 +907,7 @@ u8 dim_destroy_channel(struct dim_channel *ch)
return DIM_NO_ERROR;
}
-void dim_service_irq(struct dim_channel *const *channels)
+void dim_service_ahb_int_irq(struct dim_channel *const *channels)
{
bool state_changed;
@@ -848,10 +939,6 @@ void dim_service_irq(struct dim_channel *const *channels)
++ch;
}
} while (state_changed);
-
- /* clear pending Interrupts */
- dimcb_io_write(&g.dim2->MS0, 0);
- dimcb_io_write(&g.dim2->MS1, 0);
}
u8 dim_service_channel(struct dim_channel *ch)
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
index 1c924e869de7..6df6ea5f7da4 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.h
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.h
@@ -60,7 +60,8 @@ struct dim_channel {
u16 done_sw_buffers_number; /*< Done software buffers number. */
};
-u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock);
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
+ u32 fcnt);
void dim_shutdown(void);
@@ -86,13 +87,17 @@ u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
u8 dim_destroy_channel(struct dim_channel *ch);
-void dim_service_irq(struct dim_channel *const *channels);
+void dim_service_mlb_int_irq(void);
+
+void dim_service_ahb_int_irq(struct dim_channel *const *channels);
u8 dim_service_channel(struct dim_channel *ch);
struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
struct dim_ch_state_t *state_ptr);
+u16 dim_dbr_space(struct dim_channel *ch);
+
bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
u16 buffer_size);
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
index a36449551513..78b2c3dd9bb3 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c
@@ -1,7 +1,7 @@
/*
* dim2_hdm.c - MediaLB DIM2 Hardware Dependent Module
*
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -45,15 +45,15 @@ module_param(clock_speed, charp, 0);
MODULE_PARM_DESC(clock_speed, "MediaLB Clock Speed");
/*
- * #############################################################################
+ * The parameter representing the number of frames per sub-buffer for
+ * synchronous channels. Valid values: [0 .. 6].
*
- * The define below activates an utility function used by HAL-simu
- * for calling DIM interrupt handler.
- * It is used only for TEST PURPOSE and shall be commented before release.
- *
- * #############################################################################
+ * The values 0, 1, 2, 3, 4, 5, 6 represent corresponding number of frames per
+ * sub-buffer 1, 2, 4, 8, 16, 32, 64.
*/
-/* #define ENABLE_HDM_TEST */
+static u8 fcnt = 4; /* (1 << fcnt) frames per subbuffer */
+module_param(fcnt, byte, 0);
+MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a power of 2");
static DEFINE_SPINLOCK(dim_lock);
@@ -85,7 +85,6 @@ struct hdm_channel {
* @most_iface: most interface structure
* @capabilities: an array of channel capability data
* @io_base: I/O register base address
- * @irq_ahb0: dim2 AHB0 irq number
* @clk_speed: user selectable (through command line parameter) clock speed
* @netinfo_task: thread to deliver network status
* @netinfo_waitq: waitq for the thread to sleep
@@ -100,7 +99,6 @@ struct dim2_hdm {
struct most_interface most_iface;
char name[16 + sizeof "dim2-"];
void __iomem *io_base;
- unsigned int irq_ahb0;
int clk_speed;
struct task_struct *netinfo_task;
wait_queue_head_t netinfo_waitq;
@@ -118,10 +116,6 @@ struct dim2_hdm {
(((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
-#if defined(ENABLE_HDM_TEST)
-static struct dim2_hdm *test_dev;
-#endif
-
bool dim2_sysfs_get_state_cb(void)
{
bool state;
@@ -212,7 +206,8 @@ static int startup_dim(struct platform_device *pdev)
return ret;
}
- hal_ret = dim_startup(dev->io_base, dev->clk_speed);
+ pr_info("sync: num of frames per sub-buffer: %u\n", fcnt);
+ hal_ret = dim_startup(dev->io_base, dev->clk_speed, fcnt);
if (hal_ret != DIM_NO_ERROR) {
pr_err("dim_startup failed: %d\n", hal_ret);
if (pdata && pdata->destroy)
@@ -254,6 +249,11 @@ static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
mbo = list_first_entry(head, struct mbo, list);
buf_size = mbo->buffer_length;
+ if (dim_dbr_space(&hdm_ch->ch) < buf_size) {
+ spin_unlock_irqrestore(&dim_lock, flags);
+ return -EAGAIN;
+ }
+
BUG_ON(mbo->bus_address == 0);
if (!dim_enqueue_buffer(&hdm_ch->ch, mbo->bus_address, buf_size)) {
list_del(head->next);
@@ -411,6 +411,22 @@ static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
return buffer;
}
+static irqreturn_t dim2_mlb_isr(int irq, void *_dev)
+{
+ struct dim2_hdm *dev = _dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dim_lock, flags);
+ dim_service_mlb_int_irq();
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ if (dev->atx_idx >= 0 && dev->hch[dev->atx_idx].is_initialized)
+ while (!try_start_dim_transfer(dev->hch + dev->atx_idx))
+ continue;
+
+ return IRQ_HANDLED;
+}
+
/**
* dim2_tasklet_fn - tasklet function
* @data: private data
@@ -452,30 +468,14 @@ static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
unsigned long flags;
spin_lock_irqsave(&dim_lock, flags);
- dim_service_irq(get_active_channels(dev, buffer));
+ dim_service_ahb_int_irq(get_active_channels(dev, buffer));
spin_unlock_irqrestore(&dim_lock, flags);
-#if !defined(ENABLE_HDM_TEST)
dim2_tasklet.data = (unsigned long)dev;
tasklet_schedule(&dim2_tasklet);
-#else
- dim2_tasklet_fn((unsigned long)dev);
-#endif
return IRQ_HANDLED;
}
-#if defined(ENABLE_HDM_TEST)
-
-/*
- * Utility function used by HAL-simu for calling DIM interrupt handler.
- * It is used only for TEST PURPOSE.
- */
-void raise_dim_interrupt(void)
-{
- (void)dim2_ahb_isr(0, test_dev);
-}
-#endif
-
/**
* complete_all_mbos - complete MBO's in a list
* @head: list head
@@ -545,7 +545,7 @@ static int configure_channel(struct most_interface *most_iface, int ch_idx,
hdm_ch->name, buf_size, new_size);
spin_lock_irqsave(&dim_lock, flags);
hal_ret = dim_init_control(&hdm_ch->ch, is_tx, ch_addr,
- buf_size);
+ is_tx ? new_size * 2 : new_size);
break;
case MOST_CH_ASYNC:
new_size = dim_norm_ctrl_async_buffer_size(buf_size);
@@ -558,9 +558,10 @@ static int configure_channel(struct most_interface *most_iface, int ch_idx,
pr_warn("%s: fixed buffer size (%d -> %d)\n",
hdm_ch->name, buf_size, new_size);
spin_lock_irqsave(&dim_lock, flags);
- hal_ret = dim_init_async(&hdm_ch->ch, is_tx, ch_addr, buf_size);
+ hal_ret = dim_init_async(&hdm_ch->ch, is_tx, ch_addr,
+ is_tx ? new_size * 2 : new_size);
break;
- case MOST_CH_ISOC_AVP:
+ case MOST_CH_ISOC:
new_size = dim_norm_isoc_buffer_size(buf_size, sub_size);
if (new_size == 0) {
pr_err("%s: invalid sub-buffer size or too small buffer size\n",
@@ -705,12 +706,14 @@ static int poison_channel(struct most_interface *most_iface, int ch_idx)
if (!hdm_ch->is_initialized)
return -EPERM;
+ tasklet_disable(&dim2_tasklet);
spin_lock_irqsave(&dim_lock, flags);
hal_ret = dim_destroy_channel(&hdm_ch->ch);
hdm_ch->is_initialized = false;
if (ch_idx == dev->atx_idx)
dev->atx_idx = -1;
spin_unlock_irqrestore(&dim_lock, flags);
+ tasklet_enable(&dim2_tasklet);
if (hal_ret != DIM_NO_ERROR) {
pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
ret = -EFAULT;
@@ -735,6 +738,7 @@ static int dim2_probe(struct platform_device *pdev)
struct resource *res;
int ret, i;
struct kobject *kobj;
+ int irq;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -743,29 +747,37 @@ static int dim2_probe(struct platform_device *pdev)
dev->atx_idx = -1;
platform_set_drvdata(pdev, dev);
-#if defined(ENABLE_HDM_TEST)
- test_dev = dev;
-#else
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev->io_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dev->io_base))
return PTR_ERR(dev->io_base);
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get irq\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get ahb0_int irq\n");
return -ENODEV;
}
- dev->irq_ahb0 = ret;
- ret = devm_request_irq(&pdev->dev, dev->irq_ahb0, dim2_ahb_isr, 0,
- "mlb_ahb0", dev);
+ ret = devm_request_irq(&pdev->dev, irq, dim2_ahb_isr, 0,
+ "dim2_ahb0_int", dev);
if (ret) {
- dev_err(&pdev->dev, "failed to request IRQ: %d, err: %d\n",
- dev->irq_ahb0, ret);
+ dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
return ret;
}
-#endif
+
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get mlb_int irq\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, dim2_mlb_isr, 0,
+ "dim2_mlb_int", dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request mlb_int irq %d\n", irq);
+ return ret;
+ }
+
init_waitqueue_head(&dev->netinfo_waitq);
dev->deliver_netinfo = 0;
dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
@@ -785,7 +797,7 @@ static int dim2_probe(struct platform_device *pdev)
cap->name_suffix = hdm_ch->name;
cap->direction = MOST_CH_RX | MOST_CH_TX;
cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
- MOST_CH_ISOC_AVP | MOST_CH_SYNC;
+ MOST_CH_ISOC | MOST_CH_SYNC;
cap->num_buffers_packet = MAX_BUFFERS_PACKET;
cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
diff --git a/drivers/staging/most/hdm-dim2/dim2_reg.h b/drivers/staging/most/hdm-dim2/dim2_reg.h
index e0837b6b9ae1..01fe499411ff 100644
--- a/drivers/staging/most/hdm-dim2/dim2_reg.h
+++ b/drivers/staging/most/hdm-dim2/dim2_reg.h
@@ -77,13 +77,7 @@ enum {
MLBC0_FCNT_SHIFT = 15,
MLBC0_FCNT_MASK = 7,
- MLBC0_FCNT_VAL_1FPSB = 0,
- MLBC0_FCNT_VAL_2FPSB = 1,
- MLBC0_FCNT_VAL_4FPSB = 2,
- MLBC0_FCNT_VAL_8FPSB = 3,
- MLBC0_FCNT_VAL_16FPSB = 4,
- MLBC0_FCNT_VAL_32FPSB = 5,
- MLBC0_FCNT_VAL_64FPSB = 6,
+ MLBC0_FCNT_MAX_VAL = 6,
MLBC0_MLBEN_BIT = 0,
@@ -123,6 +117,9 @@ enum {
};
enum {
+ CDT0_RPC_SHIFT = 16 + 11,
+ CDT0_RPC_MASK = DIM2_MASK(5),
+
CDT1_BS_ISOC_SHIFT = 0,
CDT1_BS_ISOC_MASK = DIM2_MASK(9),
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.c b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
index 2b28e4a51131..d8b22f91d2c3 100644
--- a/drivers/staging/most/hdm-dim2/dim2_sysfs.c
+++ b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
@@ -39,7 +39,7 @@ static struct attribute *bus_default_attrs[] = {
NULL,
};
-static struct attribute_group bus_attr_group = {
+static const struct attribute_group bus_attr_group = {
.attrs = bus_default_attrs,
};
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index aeae071f2823..26c9adb29308 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -43,8 +43,9 @@
#define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */
#define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */
-#define USB_DEV_ID_INIC 0xCF18 /* PID: USB INIC */
-#define HW_RESYNC 0x0000
+#define USB_DEV_ID_OS81118 0xCF18 /* PID: USB OS81118 */
+#define USB_DEV_ID_OS81119 0xCF19 /* PID: USB OS81119 */
+#define USB_DEV_ID_OS81210 0xCF30 /* PID: USB OS81210 */
/* DRCI Addresses */
#define DRCI_REG_NI_STATE 0x0100
#define DRCI_REG_PACKET_BW 0x0101
@@ -64,33 +65,30 @@
#define DRCI_WRITE_REQ 0xA1
/**
- * struct buf_anchor - used to create a list of pending URBs
- * @urb: pointer to USB request block
- * @clear_work_obj:
- * @list: linked list
- * @urb_completion:
- */
-struct buf_anchor {
- struct urb *urb;
- struct work_struct clear_work_obj;
- struct list_head list;
- struct completion urb_compl;
-};
-
-#define to_buf_anchor(w) container_of(w, struct buf_anchor, clear_work_obj)
-
-/**
* struct most_dci_obj - Direct Communication Interface
* @kobj:position in sysfs
* @usb_device: pointer to the usb device
+ * @reg_addr: register address for arbitrary DCI access
*/
struct most_dci_obj {
struct kobject kobj;
struct usb_device *usb_device;
+ u16 reg_addr;
};
#define to_dci_obj(p) container_of(p, struct most_dci_obj, kobj)
+struct most_dev;
+
+struct clear_hold_work {
+ struct work_struct ws;
+ struct most_dev *mdev;
+ unsigned int channel;
+ int pipe;
+};
+
+#define to_clear_hold_work(w) container_of(w, struct clear_hold_work, ws)
+
/**
* struct most_dev - holds all usb interface specific stuff
* @parent: parent object in sysfs
@@ -104,10 +102,10 @@ struct most_dci_obj {
* @link_stat: link status of hardware
* @description: device description
* @suffix: suffix for channel name
- * @anchor_list_lock: locks list access
+ * @channel_lock: synchronize channel access
* @padding_active: indicates channel uses padding
* @is_channel_healthy: health status table of each channel
- * @anchor_list: list of anchored items
+ * @busy_urbs: list of anchored items
* @io_mutex: synchronize I/O with disconnect
* @link_stat_timer: timer for link status reports
* @poll_work_obj: work for polling link status
@@ -124,10 +122,11 @@ struct most_dev {
u16 link_stat;
char description[MAX_STRING_LEN];
char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
- spinlock_t anchor_list_lock[MAX_NUM_ENDPOINTS];
+ spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */
bool padding_active[MAX_NUM_ENDPOINTS];
bool is_channel_healthy[MAX_NUM_ENDPOINTS];
- struct list_head *anchor_list;
+ struct clear_hold_work clear_work[MAX_NUM_ENDPOINTS];
+ struct usb_anchor *busy_urbs;
struct mutex io_mutex;
struct timer_list link_stat_timer;
struct work_struct poll_work_obj;
@@ -191,40 +190,24 @@ static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
* free_anchored_buffers - free device's anchored items
* @mdev: the device
* @channel: channel ID
+ * @status: status of MBO termination
*/
-static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel)
+static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel,
+ enum mbo_status_flags status)
{
struct mbo *mbo;
- struct buf_anchor *anchor, *tmp;
- unsigned long flags;
+ struct urb *urb;
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_for_each_entry_safe(anchor, tmp, &mdev->anchor_list[channel],
- list) {
- struct urb *urb = anchor->urb;
-
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
- if (likely(urb)) {
- mbo = urb->context;
- if (!irqs_disabled()) {
- usb_kill_urb(urb);
- } else {
- usb_unlink_urb(urb);
- wait_for_completion(&anchor->urb_compl);
- }
- if ((mbo) && (mbo->complete)) {
- mbo->status = MBO_E_CLOSE;
- mbo->processed_length = 0;
- mbo->complete(mbo);
- }
- usb_free_urb(urb);
+ while ((urb = usb_get_from_anchor(&mdev->busy_urbs[channel]))) {
+ mbo = urb->context;
+ usb_kill_urb(urb);
+ if (mbo && mbo->complete) {
+ mbo->status = status;
+ mbo->processed_length = 0;
+ mbo->complete(mbo);
}
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_del(&anchor->list);
- cancel_work_sync(&anchor->clear_work_obj);
- kfree(anchor);
+ usb_free_urb(urb);
}
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
}
/**
@@ -241,7 +224,7 @@ static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
return frame_size;
}
switch (cfg->data_type) {
- case MOST_CH_ISOC_AVP:
+ case MOST_CH_ISOC:
frame_size = AV_PACKETS_PER_XACT * sub_size;
break;
case MOST_CH_SYNC:
@@ -274,22 +257,28 @@ static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
*/
static int hdm_poison_channel(struct most_interface *iface, int channel)
{
- struct most_dev *mdev;
+ struct most_dev *mdev = to_mdev(iface);
+ unsigned long flags;
+ spinlock_t *lock; /* temp. lock */
- mdev = to_mdev(iface);
if (unlikely(!iface)) {
dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
return -EIO;
}
- if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
+ if (unlikely(channel < 0 || channel >= iface->num_channels)) {
dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
return -ECHRNG;
}
+ lock = mdev->channel_lock + channel;
+ spin_lock_irqsave(lock, flags);
mdev->is_channel_healthy[channel] = false;
+ spin_unlock_irqrestore(lock, flags);
+
+ cancel_work_sync(&mdev->clear_work[channel].ws);
mutex_lock(&mdev->io_mutex);
- free_anchored_buffers(mdev, channel);
+ free_anchored_buffers(mdev, channel, MBO_E_CLOSE);
if (mdev->padding_active[channel])
mdev->padding_active[channel] = false;
@@ -313,10 +302,10 @@ static int hdm_poison_channel(struct most_interface *iface, int channel)
static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
{
struct most_channel_config *conf = &mdev->conf[channel];
- unsigned int j, num_frames, frame_size;
+ unsigned int frame_size = get_stream_frame_size(conf);
+ unsigned int j, num_frames;
u16 rd_addr, wr_addr;
- frame_size = get_stream_frame_size(conf);
if (!frame_size)
return -EIO;
num_frames = mbo->buffer_length / frame_size;
@@ -350,10 +339,10 @@ static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
static int hdm_remove_padding(struct most_dev *mdev, int channel,
struct mbo *mbo)
{
- unsigned int j, num_frames, frame_size;
struct most_channel_config *const conf = &mdev->conf[channel];
+ unsigned int frame_size = get_stream_frame_size(conf);
+ unsigned int j, num_frames;
- frame_size = get_stream_frame_size(conf);
if (!frame_size)
return -EIO;
num_frames = mbo->processed_length / USB_MTU;
@@ -380,37 +369,29 @@ static int hdm_remove_padding(struct most_dev *mdev, int channel,
*/
static void hdm_write_completion(struct urb *urb)
{
- struct mbo *mbo;
- struct buf_anchor *anchor;
- struct most_dev *mdev;
- struct device *dev;
- unsigned int channel;
+ struct mbo *mbo = urb->context;
+ struct most_dev *mdev = to_mdev(mbo->ifp);
+ unsigned int channel = mbo->hdm_channel_id;
+ struct device *dev = &mdev->usb_device->dev;
+ spinlock_t *lock = mdev->channel_lock + channel;
unsigned long flags;
- mbo = urb->context;
- anchor = mbo->priv;
- mdev = to_mdev(mbo->ifp);
- channel = mbo->hdm_channel_id;
- dev = &mdev->usb_device->dev;
-
- if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
- (!mdev->is_channel_healthy[channel])) {
- complete(&anchor->urb_compl);
+ spin_lock_irqsave(lock, flags);
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ !mdev->is_channel_healthy[channel]) {
+ spin_unlock_irqrestore(lock, flags);
return;
}
- if (unlikely(urb->status && !(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -ESHUTDOWN))) {
+ if (unlikely(urb->status && urb->status != -ESHUTDOWN)) {
mbo->processed_length = 0;
switch (urb->status) {
case -EPIPE:
dev_warn(dev, "Broken OUT pipe detected\n");
- most_stop_enqueue(&mdev->iface, channel);
- mbo->status = MBO_E_INVAL;
- usb_unlink_urb(urb);
- INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
- schedule_work(&anchor->clear_work_obj);
+ mdev->is_channel_healthy[channel] = false;
+ spin_unlock_irqrestore(lock, flags);
+ mdev->clear_work[channel].pipe = urb->pipe;
+ schedule_work(&mdev->clear_work[channel].ws);
return;
case -ENODEV:
case -EPROTO:
@@ -425,10 +406,7 @@ static void hdm_write_completion(struct urb *urb)
mbo->processed_length = urb->actual_length;
}
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_del(&anchor->list);
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
- kfree(anchor);
+ spin_unlock_irqrestore(lock, flags);
if (likely(mbo->complete))
mbo->complete(mbo);
@@ -545,36 +523,29 @@ static void hdm_write_completion(struct urb *urb)
*/
static void hdm_read_completion(struct urb *urb)
{
- struct mbo *mbo;
- struct buf_anchor *anchor;
- struct most_dev *mdev;
- struct device *dev;
+ struct mbo *mbo = urb->context;
+ struct most_dev *mdev = to_mdev(mbo->ifp);
+ unsigned int channel = mbo->hdm_channel_id;
+ struct device *dev = &mdev->usb_device->dev;
+ spinlock_t *lock = mdev->channel_lock + channel;
unsigned long flags;
- unsigned int channel;
-
- mbo = urb->context;
- anchor = mbo->priv;
- mdev = to_mdev(mbo->ifp);
- channel = mbo->hdm_channel_id;
- dev = &mdev->usb_device->dev;
- if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
- (!mdev->is_channel_healthy[channel])) {
- complete(&anchor->urb_compl);
+ spin_lock_irqsave(lock, flags);
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ !mdev->is_channel_healthy[channel]) {
+ spin_unlock_irqrestore(lock, flags);
return;
}
- if (unlikely(urb->status && !(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -ESHUTDOWN))) {
+ if (unlikely(urb->status && urb->status != -ESHUTDOWN)) {
mbo->processed_length = 0;
switch (urb->status) {
case -EPIPE:
dev_warn(dev, "Broken IN pipe detected\n");
- mbo->status = MBO_E_INVAL;
- usb_unlink_urb(urb);
- INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
- schedule_work(&anchor->clear_work_obj);
+ mdev->is_channel_healthy[channel] = false;
+ spin_unlock_irqrestore(lock, flags);
+ mdev->clear_work[channel].pipe = urb->pipe;
+ schedule_work(&mdev->clear_work[channel].ws);
return;
case -ENODEV:
case -EPROTO:
@@ -588,21 +559,15 @@ static void hdm_read_completion(struct urb *urb)
}
} else {
mbo->processed_length = urb->actual_length;
- if (!mdev->padding_active[channel]) {
- mbo->status = MBO_SUCCESS;
- } else {
- if (hdm_remove_padding(mdev, channel, mbo)) {
- mbo->processed_length = 0;
- mbo->status = MBO_E_INVAL;
- } else {
- mbo->status = MBO_SUCCESS;
- }
+ mbo->status = MBO_SUCCESS;
+ if (mdev->padding_active[channel] &&
+ hdm_remove_padding(mdev, channel, mbo)) {
+ mbo->processed_length = 0;
+ mbo->status = MBO_E_INVAL;
}
}
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_del(&anchor->list);
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
- kfree(anchor);
+
+ spin_unlock_irqrestore(lock, flags);
if (likely(mbo->complete))
mbo->complete(mbo);
@@ -628,18 +593,16 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
struct mbo *mbo)
{
struct most_dev *mdev;
- struct buf_anchor *anchor;
struct most_channel_config *conf;
struct device *dev;
int retval = 0;
struct urb *urb;
- unsigned long flags;
unsigned long length;
void *virt_address;
if (unlikely(!iface || !mbo))
return -EIO;
- if (unlikely(iface->num_channels <= channel) || (channel < 0))
+ if (unlikely(iface->num_channels <= channel || channel < 0))
return -ECHRNG;
mdev = to_mdev(iface);
@@ -650,32 +613,15 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
return -ENODEV;
urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
- if (!urb) {
- dev_err(dev, "Failed to allocate URB\n");
+ if (!urb)
return -ENOMEM;
- }
- anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC);
- if (!anchor) {
- retval = -ENOMEM;
+ if ((conf->direction & MOST_CH_TX) && mdev->padding_active[channel] &&
+ hdm_add_padding(mdev, channel, mbo)) {
+ retval = -EIO;
goto _error;
}
- anchor->urb = urb;
- init_completion(&anchor->urb_compl);
- mbo->priv = anchor;
-
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_add_tail(&anchor->list, &mdev->anchor_list[channel]);
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
-
- if ((mdev->padding_active[channel]) &&
- (conf->direction & MOST_CH_TX))
- if (hdm_add_padding(mdev, channel, mbo)) {
- retval = -EIO;
- goto _error_1;
- }
-
urb->transfer_dma = mbo->bus_address;
virt_address = mbo->virt_address;
length = mbo->buffer_length;
@@ -688,7 +634,7 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
length,
hdm_write_completion,
mbo);
- if (conf->data_type != MOST_CH_ISOC_AVP)
+ if (conf->data_type != MOST_CH_ISOC)
urb->transfer_flags |= URB_ZERO_PACKET;
} else {
usb_fill_bulk_urb(urb, mdev->usb_device,
@@ -701,6 +647,8 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
}
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &mdev->busy_urbs[channel]);
+
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval) {
dev_err(dev, "URB submit failed with error %d.\n", retval);
@@ -709,10 +657,7 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
return 0;
_error_1:
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_del(&anchor->list);
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
- kfree(anchor);
+ usb_unanchor_urb(urb);
_error:
usb_free_urb(urb);
return retval;
@@ -731,29 +676,30 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
unsigned int frame_size;
unsigned int temp_size;
unsigned int tail_space;
- struct most_dev *mdev;
- struct device *dev;
+ struct most_dev *mdev = to_mdev(iface);
+ struct device *dev = &mdev->usb_device->dev;
- mdev = to_mdev(iface);
mdev->is_channel_healthy[channel] = true;
- dev = &mdev->usb_device->dev;
+ mdev->clear_work[channel].channel = channel;
+ mdev->clear_work[channel].mdev = mdev;
+ INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
if (unlikely(!iface || !conf)) {
dev_err(dev, "Bad interface or config pointer.\n");
return -EINVAL;
}
- if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
+ if (unlikely(channel < 0 || channel >= iface->num_channels)) {
dev_err(dev, "Channel ID out of range.\n");
return -EINVAL;
}
- if ((!conf->num_buffers) || (!conf->buffer_size)) {
+ if (!conf->num_buffers || !conf->buffer_size) {
dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
return -EINVAL;
}
- if (!(conf->data_type == MOST_CH_SYNC) &&
- !((conf->data_type == MOST_CH_ISOC_AVP) &&
- (conf->packets_per_xact != 0xFF))) {
+ if (conf->data_type != MOST_CH_SYNC &&
+ !(conf->data_type == MOST_CH_ISOC &&
+ conf->packets_per_xact != 0xFF)) {
mdev->padding_active[channel] = false;
goto exit;
}
@@ -762,7 +708,7 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
temp_size = conf->buffer_size;
frame_size = get_stream_frame_size(conf);
- if ((frame_size == 0) || (frame_size > USB_MTU)) {
+ if (frame_size == 0 || frame_size > USB_MTU) {
dev_warn(dev, "Misconfig: frame size wrong\n");
return -EINVAL;
}
@@ -807,17 +753,17 @@ static int hdm_update_netinfo(struct most_dev *mdev)
if (!is_valid_ether_addr(mdev->hw_addr)) {
if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
- return -1;
+ return -EFAULT;
}
if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
- return -1;
+ return -EFAULT;
}
if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
- return -1;
+ return -EFAULT;
}
mutex_lock(&mdev->io_mutex);
@@ -832,7 +778,7 @@ static int hdm_update_netinfo(struct most_dev *mdev)
if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
dev_err(dev, "Vendor request \"link status\" failed\n");
- return -1;
+ return -EFAULT;
}
mutex_lock(&mdev->io_mutex);
@@ -886,25 +832,22 @@ static void link_stat_timer_handler(unsigned long data)
*/
static void wq_netinfo(struct work_struct *wq_obj)
{
- struct most_dev *mdev;
- int i, prev_link_stat;
+ struct most_dev *mdev = to_mdev_from_work(wq_obj);
+ int i, prev_link_stat = mdev->link_stat;
u8 prev_hw_addr[6];
- mdev = to_mdev_from_work(wq_obj);
- prev_link_stat = mdev->link_stat;
-
for (i = 0; i < 6; i++)
prev_hw_addr[i] = mdev->hw_addr[i];
if (hdm_update_netinfo(mdev) < 0)
return;
- if ((prev_link_stat != mdev->link_stat) ||
- (prev_hw_addr[0] != mdev->hw_addr[0]) ||
- (prev_hw_addr[1] != mdev->hw_addr[1]) ||
- (prev_hw_addr[2] != mdev->hw_addr[2]) ||
- (prev_hw_addr[3] != mdev->hw_addr[3]) ||
- (prev_hw_addr[4] != mdev->hw_addr[4]) ||
- (prev_hw_addr[5] != mdev->hw_addr[5]))
+ if (prev_link_stat != mdev->link_stat ||
+ prev_hw_addr[0] != mdev->hw_addr[0] ||
+ prev_hw_addr[1] != mdev->hw_addr[1] ||
+ prev_hw_addr[2] != mdev->hw_addr[2] ||
+ prev_hw_addr[3] != mdev->hw_addr[3] ||
+ prev_hw_addr[4] != mdev->hw_addr[4] ||
+ prev_hw_addr[5] != mdev->hw_addr[5])
most_deliver_netinfo(&mdev->iface, mdev->link_stat,
&mdev->hw_addr[0]);
}
@@ -917,33 +860,20 @@ static void wq_netinfo(struct work_struct *wq_obj)
*/
static void wq_clear_halt(struct work_struct *wq_obj)
{
- struct buf_anchor *anchor;
- struct most_dev *mdev;
- struct mbo *mbo;
- struct urb *urb;
- unsigned int channel;
- unsigned long flags;
+ struct clear_hold_work *clear_work = to_clear_hold_work(wq_obj);
+ struct most_dev *mdev = clear_work->mdev;
+ unsigned int channel = clear_work->channel;
+ int pipe = clear_work->pipe;
- anchor = to_buf_anchor(wq_obj);
- urb = anchor->urb;
- mbo = urb->context;
- mdev = to_mdev(mbo->ifp);
- channel = mbo->hdm_channel_id;
-
- if (usb_clear_halt(urb->dev, urb->pipe))
+ mutex_lock(&mdev->io_mutex);
+ most_stop_enqueue(&mdev->iface, channel);
+ free_anchored_buffers(mdev, channel, MBO_E_INVAL);
+ if (usb_clear_halt(mdev->usb_device, pipe))
dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
- usb_free_urb(urb);
- spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
- list_del(&anchor->list);
- spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
-
- if (likely(mbo->complete))
- mbo->complete(mbo);
- if (mdev->conf[channel].direction & MOST_CH_TX)
- most_resume_enqueue(&mdev->iface, channel);
-
- kfree(anchor);
+ mdev->is_channel_healthy[channel] = true;
+ most_resume_enqueue(&mdev->iface, channel);
+ mutex_unlock(&mdev->io_mutex);
}
/**
@@ -958,7 +888,9 @@ static const struct file_operations hdm_usb_fops = {
*/
static struct usb_device_id usbid[] = {
{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
- { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_INIC), },
+ { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81118), },
+ { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81119), },
+ { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81210), },
{ } /* Terminating entry */
};
@@ -970,6 +902,10 @@ static struct usb_device_id usbid[] = {
struct most_dci_attribute most_dci_attr_##_name = \
__ATTR(_name, S_IRUGO | S_IWUSR, show_value, store_value)
+#define MOST_DCI_WO_ATTR(_name) \
+ struct most_dci_attribute most_dci_attr_##_name = \
+ __ATTR(_name, S_IWUSR, NULL, store_value)
+
/**
* struct most_dci_attribute - to access the attributes of a dci object
* @attr: attributes of a dci object
@@ -1046,45 +982,68 @@ static void most_dci_release(struct kobject *kobj)
kfree(dci_obj);
}
+struct regs {
+ const char *name;
+ u16 reg;
+};
+
+static const struct regs ro_regs[] = {
+ { "ni_state", DRCI_REG_NI_STATE },
+ { "packet_bandwidth", DRCI_REG_PACKET_BW },
+ { "node_address", DRCI_REG_NODE_ADDR },
+ { "node_position", DRCI_REG_NODE_POS },
+};
+
+static const struct regs rw_regs[] = {
+ { "mep_filter", DRCI_REG_MEP_FILTER },
+ { "mep_hash0", DRCI_REG_HASH_TBL0 },
+ { "mep_hash1", DRCI_REG_HASH_TBL1 },
+ { "mep_hash2", DRCI_REG_HASH_TBL2 },
+ { "mep_hash3", DRCI_REG_HASH_TBL3 },
+ { "mep_eui48_hi", DRCI_REG_HW_ADDR_HI },
+ { "mep_eui48_mi", DRCI_REG_HW_ADDR_MI },
+ { "mep_eui48_lo", DRCI_REG_HW_ADDR_LO },
+};
+
+static int get_stat_reg_addr(const struct regs *regs, int size,
+ const char *name, u16 *reg_addr)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (!strcmp(name, regs[i].name)) {
+ *reg_addr = regs[i].reg;
+ return 0;
+ }
+ }
+ return -EFAULT;
+}
+
+#define get_static_reg_addr(regs, name, reg_addr) \
+ get_stat_reg_addr(regs, ARRAY_SIZE(regs), name, reg_addr)
+
static ssize_t show_value(struct most_dci_obj *dci_obj,
struct most_dci_attribute *attr, char *buf)
{
- u16 tmp_val;
+ const char *name = attr->attr.name;
+ u16 val;
u16 reg_addr;
int err;
- if (!strcmp(attr->attr.name, "ni_state"))
- reg_addr = DRCI_REG_NI_STATE;
- else if (!strcmp(attr->attr.name, "packet_bandwidth"))
- reg_addr = DRCI_REG_PACKET_BW;
- else if (!strcmp(attr->attr.name, "node_address"))
- reg_addr = DRCI_REG_NODE_ADDR;
- else if (!strcmp(attr->attr.name, "node_position"))
- reg_addr = DRCI_REG_NODE_POS;
- else if (!strcmp(attr->attr.name, "mep_filter"))
- reg_addr = DRCI_REG_MEP_FILTER;
- else if (!strcmp(attr->attr.name, "mep_hash0"))
- reg_addr = DRCI_REG_HASH_TBL0;
- else if (!strcmp(attr->attr.name, "mep_hash1"))
- reg_addr = DRCI_REG_HASH_TBL1;
- else if (!strcmp(attr->attr.name, "mep_hash2"))
- reg_addr = DRCI_REG_HASH_TBL2;
- else if (!strcmp(attr->attr.name, "mep_hash3"))
- reg_addr = DRCI_REG_HASH_TBL3;
- else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
- reg_addr = DRCI_REG_HW_ADDR_HI;
- else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
- reg_addr = DRCI_REG_HW_ADDR_MI;
- else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
- reg_addr = DRCI_REG_HW_ADDR_LO;
- else
- return -EIO;
+ if (!strcmp(name, "arb_address"))
+ return snprintf(buf, PAGE_SIZE, "%04x\n", dci_obj->reg_addr);
+
+ if (!strcmp(name, "arb_value"))
+ reg_addr = dci_obj->reg_addr;
+ else if (get_static_reg_addr(ro_regs, name, &reg_addr) &&
+ get_static_reg_addr(rw_regs, name, &reg_addr))
+ return -EFAULT;
- err = drci_rd_reg(dci_obj->usb_device, reg_addr, &tmp_val);
+ err = drci_rd_reg(dci_obj->usb_device, reg_addr, &val);
if (err < 0)
return err;
- return snprintf(buf, PAGE_SIZE, "%04x\n", tmp_val);
+ return snprintf(buf, PAGE_SIZE, "%04x\n", val);
}
static ssize_t store_value(struct most_dci_obj *dci_obj,
@@ -1093,31 +1052,28 @@ static ssize_t store_value(struct most_dci_obj *dci_obj,
{
u16 val;
u16 reg_addr;
- int err;
-
- if (!strcmp(attr->attr.name, "mep_filter"))
- reg_addr = DRCI_REG_MEP_FILTER;
- else if (!strcmp(attr->attr.name, "mep_hash0"))
- reg_addr = DRCI_REG_HASH_TBL0;
- else if (!strcmp(attr->attr.name, "mep_hash1"))
- reg_addr = DRCI_REG_HASH_TBL1;
- else if (!strcmp(attr->attr.name, "mep_hash2"))
- reg_addr = DRCI_REG_HASH_TBL2;
- else if (!strcmp(attr->attr.name, "mep_hash3"))
- reg_addr = DRCI_REG_HASH_TBL3;
- else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
- reg_addr = DRCI_REG_HW_ADDR_HI;
- else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
- reg_addr = DRCI_REG_HW_ADDR_MI;
- else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
- reg_addr = DRCI_REG_HW_ADDR_LO;
- else
- return -EIO;
+ const char *name = attr->attr.name;
+ int err = kstrtou16(buf, 16, &val);
- err = kstrtou16(buf, 16, &val);
if (err)
return err;
+ if (!strcmp(name, "arb_address")) {
+ dci_obj->reg_addr = val;
+ return count;
+ }
+
+ if (!strcmp(name, "arb_value")) {
+ reg_addr = dci_obj->reg_addr;
+ } else if (!strcmp(name, "sync_ep")) {
+ u16 ep = val;
+
+ reg_addr = DRCI_REG_BASE + DRCI_COMMAND + ep * 16;
+ val = 1;
+ } else if (get_static_reg_addr(ro_regs, name, &reg_addr)) {
+ return -EFAULT;
+ }
+
err = drci_wr_reg(dci_obj->usb_device, reg_addr, val);
if (err < 0)
return err;
@@ -1129,6 +1085,7 @@ static MOST_DCI_RO_ATTR(ni_state);
static MOST_DCI_RO_ATTR(packet_bandwidth);
static MOST_DCI_RO_ATTR(node_address);
static MOST_DCI_RO_ATTR(node_position);
+static MOST_DCI_WO_ATTR(sync_ep);
static MOST_DCI_ATTR(mep_filter);
static MOST_DCI_ATTR(mep_hash0);
static MOST_DCI_ATTR(mep_hash1);
@@ -1137,6 +1094,8 @@ static MOST_DCI_ATTR(mep_hash3);
static MOST_DCI_ATTR(mep_eui48_hi);
static MOST_DCI_ATTR(mep_eui48_mi);
static MOST_DCI_ATTR(mep_eui48_lo);
+static MOST_DCI_ATTR(arb_address);
+static MOST_DCI_ATTR(arb_value);
/**
* most_dci_def_attrs - array of default attribute files of the dci object
@@ -1146,6 +1105,7 @@ static struct attribute *most_dci_def_attrs[] = {
&most_dci_attr_packet_bandwidth.attr,
&most_dci_attr_node_address.attr,
&most_dci_attr_node_position.attr,
+ &most_dci_attr_sync_ep.attr,
&most_dci_attr_mep_filter.attr,
&most_dci_attr_mep_hash0.attr,
&most_dci_attr_mep_hash1.attr,
@@ -1154,6 +1114,8 @@ static struct attribute *most_dci_def_attrs[] = {
&most_dci_attr_mep_eui48_hi.attr,
&most_dci_attr_mep_eui48_mi.attr,
&most_dci_attr_mep_eui48_lo.attr,
+ &most_dci_attr_arb_address.attr,
+ &most_dci_attr_arb_value.attr,
NULL,
};
@@ -1176,10 +1138,9 @@ static struct kobj_type most_dci_ktype = {
static struct
most_dci_obj *create_most_dci_obj(struct kobject *parent)
{
- struct most_dci_obj *most_dci;
+ struct most_dci_obj *most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
int retval;
- most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
if (!most_dci)
return NULL;
@@ -1216,21 +1177,17 @@ static void destroy_most_dci_obj(struct most_dci_obj *p)
static int
hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
+ struct usb_host_interface *usb_iface_desc = interface->cur_altsetting;
+ struct usb_device *usb_dev = interface_to_usbdev(interface);
+ struct device *dev = &usb_dev->dev;
+ struct most_dev *mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
unsigned int i;
unsigned int num_endpoints;
struct most_channel_capability *tmp_cap;
- struct most_dev *mdev;
- struct usb_device *usb_dev;
- struct device *dev;
- struct usb_host_interface *usb_iface_desc;
struct usb_endpoint_descriptor *ep_desc;
int ret = 0;
int err;
- usb_iface_desc = interface->cur_altsetting;
- usb_dev = interface_to_usbdev(interface);
- dev = &usb_dev->dev;
- mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
goto exit_ENOMEM;
@@ -1276,9 +1233,9 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
if (!mdev->ep_address)
goto exit_free2;
- mdev->anchor_list =
- kcalloc(num_endpoints, sizeof(*mdev->anchor_list), GFP_KERNEL);
- if (!mdev->anchor_list)
+ mdev->busy_urbs =
+ kcalloc(num_endpoints, sizeof(*mdev->busy_urbs), GFP_KERNEL);
+ if (!mdev->busy_urbs)
goto exit_free3;
tmp_cap = mdev->cap;
@@ -1297,21 +1254,21 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
- MOST_CH_ISOC_AVP | MOST_CH_SYNC;
+ MOST_CH_ISOC | MOST_CH_SYNC;
if (usb_endpoint_dir_in(ep_desc))
tmp_cap->direction = MOST_CH_RX;
else
tmp_cap->direction = MOST_CH_TX;
tmp_cap++;
- INIT_LIST_HEAD(&mdev->anchor_list[i]);
- spin_lock_init(&mdev->anchor_list_lock[i]);
+ init_usb_anchor(&mdev->busy_urbs[i]);
+ spin_lock_init(&mdev->channel_lock[i]);
err = drci_wr_reg(usb_dev,
DRCI_REG_BASE + DRCI_COMMAND +
ep_desc->bEndpointAddress * 16,
1);
if (err < 0)
- pr_warn("DCI Sync for EP %02x failed",
- ep_desc->bEndpointAddress);
+ dev_warn(dev, "DCI Sync for EP %02x failed",
+ ep_desc->bEndpointAddress);
}
dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
le16_to_cpu(usb_dev->descriptor.idVendor),
@@ -1332,7 +1289,9 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
}
mutex_lock(&mdev->io_mutex);
- if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_INIC) {
+ if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81118 ||
+ le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81119 ||
+ le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81210) {
/* this increments the reference count of the instance
* object of the core
*/
@@ -1351,7 +1310,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
return 0;
exit_free4:
- kfree(mdev->anchor_list);
+ kfree(mdev->busy_urbs);
exit_free3:
kfree(mdev->ep_address);
exit_free2:
@@ -1379,9 +1338,8 @@ exit_ENOMEM:
*/
static void hdm_disconnect(struct usb_interface *interface)
{
- struct most_dev *mdev;
+ struct most_dev *mdev = usb_get_intfdata(interface);
- mdev = usb_get_intfdata(interface);
mutex_lock(&mdev->io_mutex);
usb_set_intfdata(interface, NULL);
mdev->usb_device = NULL;
@@ -1393,7 +1351,7 @@ static void hdm_disconnect(struct usb_interface *interface)
destroy_most_dci_obj(mdev->dci);
most_deregister_interface(&mdev->iface);
- kfree(mdev->anchor_list);
+ kfree(mdev->busy_urbs);
kfree(mdev->cap);
kfree(mdev->conf);
kfree(mdev->ep_address);
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
index 7c619feb12d3..329109c0024f 100644
--- a/drivers/staging/most/mostcore/core.c
+++ b/drivers/staging/most/mostcore/core.c
@@ -33,7 +33,7 @@
#define STRING_SIZE 80
static struct class *most_class;
-static struct device *class_glue_dir;
+static struct device *core_dev;
static struct ida mdev_id;
static int dummy_num_buffers;
@@ -51,6 +51,7 @@ struct most_c_obj {
u16 channel_id;
bool is_poisoned;
struct mutex start_mutex;
+ struct mutex nq_mutex; /* nq thread synchronization */
int is_starving;
struct most_interface *iface;
struct most_inst_obj *inst;
@@ -82,10 +83,13 @@ struct most_inst_obj {
static const struct {
int most_ch_data_type;
char *name;
-} ch_data_type[] = { { MOST_CH_CONTROL, "control\n" },
+} ch_data_type[] = {
+ { MOST_CH_CONTROL, "control\n" },
{ MOST_CH_ASYNC, "async\n" },
{ MOST_CH_SYNC, "sync\n" },
- { MOST_CH_ISOC_AVP, "isoc_avp\n"} };
+ { MOST_CH_ISOC, "isoc\n"},
+ { MOST_CH_ISOC, "isoc_avp\n"},
+};
#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
@@ -260,11 +264,11 @@ static ssize_t show_available_directions(struct most_c_obj *c,
strcpy(buf, "");
if (c->iface->channel_vector[i].direction & MOST_CH_RX)
- strcat(buf, "dir_rx ");
+ strcat(buf, "rx ");
if (c->iface->channel_vector[i].direction & MOST_CH_TX)
- strcat(buf, "dir_tx ");
+ strcat(buf, "tx ");
strcat(buf, "\n");
- return strlen(buf) + 1;
+ return strlen(buf);
}
static ssize_t show_available_datatypes(struct most_c_obj *c,
@@ -280,10 +284,10 @@ static ssize_t show_available_datatypes(struct most_c_obj *c,
strcat(buf, "async ");
if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
strcat(buf, "sync ");
- if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
- strcat(buf, "isoc_avp ");
+ if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
+ strcat(buf, "isoc ");
strcat(buf, "\n");
- return strlen(buf) + 1;
+ return strlen(buf);
}
static
@@ -391,9 +395,9 @@ static ssize_t show_set_direction(struct most_c_obj *c,
char *buf)
{
if (c->cfg.direction & MOST_CH_TX)
- return snprintf(buf, PAGE_SIZE, "dir_tx\n");
+ return snprintf(buf, PAGE_SIZE, "tx\n");
else if (c->cfg.direction & MOST_CH_RX)
- return snprintf(buf, PAGE_SIZE, "dir_rx\n");
+ return snprintf(buf, PAGE_SIZE, "rx\n");
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
@@ -404,8 +408,12 @@ static ssize_t store_set_direction(struct most_c_obj *c,
{
if (!strcmp(buf, "dir_rx\n")) {
c->cfg.direction = MOST_CH_RX;
+ } else if (!strcmp(buf, "rx\n")) {
+ c->cfg.direction = MOST_CH_RX;
} else if (!strcmp(buf, "dir_tx\n")) {
c->cfg.direction = MOST_CH_TX;
+ } else if (!strcmp(buf, "tx\n")) {
+ c->cfg.direction = MOST_CH_TX;
} else {
pr_info("WARN: invalid attribute settings\n");
return -EINVAL;
@@ -847,7 +855,23 @@ static ssize_t show_add_link(struct most_aim_obj *aim_obj,
struct most_aim_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
+ struct most_c_obj *c;
+ struct most_inst_obj *i;
+ int offs = 0;
+
+ list_for_each_entry(i, &instance_list, list) {
+ list_for_each_entry(c, &i->channel_list, list) {
+ if (c->aim0.ptr == aim_obj->driver ||
+ c->aim1.ptr == aim_obj->driver) {
+ offs += snprintf(buf + offs, PAGE_SIZE - offs,
+ "%s:%s\n",
+ kobject_name(&i->kobj),
+ kobject_name(&c->kobj));
+ }
+ }
+ }
+
+ return offs;
}
/**
@@ -1131,18 +1155,18 @@ static inline void trash_mbo(struct mbo *mbo)
spin_unlock_irqrestore(&c->fifo_lock, flags);
}
-static struct mbo *get_hdm_mbo(struct most_c_obj *c)
+static bool hdm_mbo_ready(struct most_c_obj *c)
{
- unsigned long flags;
- struct mbo *mbo;
+ bool empty;
- spin_lock_irqsave(&c->fifo_lock, flags);
- if (c->enqueue_halt || list_empty(&c->halt_fifo))
- mbo = NULL;
- else
- mbo = list_pop_mbo(&c->halt_fifo);
- spin_unlock_irqrestore(&c->fifo_lock, flags);
- return mbo;
+ if (c->enqueue_halt)
+ return false;
+
+ spin_lock_irq(&c->fifo_lock);
+ empty = list_empty(&c->halt_fifo);
+ spin_unlock_irq(&c->fifo_lock);
+
+ return !empty;
}
static void nq_hdm_mbo(struct mbo *mbo)
@@ -1160,20 +1184,32 @@ static int hdm_enqueue_thread(void *data)
{
struct most_c_obj *c = data;
struct mbo *mbo;
+ int ret;
typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
while (likely(!kthread_should_stop())) {
wait_event_interruptible(c->hdm_fifo_wq,
- (mbo = get_hdm_mbo(c)) ||
+ hdm_mbo_ready(c) ||
kthread_should_stop());
- if (unlikely(!mbo))
+ mutex_lock(&c->nq_mutex);
+ spin_lock_irq(&c->fifo_lock);
+ if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
+ spin_unlock_irq(&c->fifo_lock);
+ mutex_unlock(&c->nq_mutex);
continue;
+ }
+
+ mbo = list_pop_mbo(&c->halt_fifo);
+ spin_unlock_irq(&c->fifo_lock);
if (c->cfg.direction == MOST_CH_RX)
mbo->buffer_length = c->cfg.buffer_size;
- if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) {
+ ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
+ mutex_unlock(&c->nq_mutex);
+
+ if (unlikely(ret)) {
pr_err("hdm enqueue failed\n");
nq_hdm_mbo(mbo);
c->hdm_enqueue_task = NULL;
@@ -1294,17 +1330,14 @@ _exit:
/**
* most_submit_mbo - submits an MBO to fifo
* @mbo: pointer to the MBO
- *
*/
-int most_submit_mbo(struct mbo *mbo)
+void most_submit_mbo(struct mbo *mbo)
{
- if (unlikely((!mbo) || (!mbo->context))) {
- pr_err("Bad MBO or missing channel reference\n");
- return -EINVAL;
- }
+ if (WARN_ONCE(!mbo || !mbo->context,
+ "bad mbo or missing channel reference\n"))
+ return;
nq_hdm_mbo(mbo);
- return 0;
}
EXPORT_SYMBOL_GPL(most_submit_mbo);
@@ -1468,10 +1501,8 @@ static void most_read_completion(struct mbo *mbo)
return;
}
- if (atomic_sub_and_test(1, &c->mbo_nq_level)) {
- pr_info("WARN: rx device out of buffers\n");
+ if (atomic_sub_and_test(1, &c->mbo_nq_level))
c->is_starving = 1;
- }
if (c->aim0.refs && c->aim0.ptr->rx_completion &&
c->aim0.ptr->rx_completion(mbo) == 0)
@@ -1761,6 +1792,7 @@ struct kobject *most_register_interface(struct most_interface *iface)
init_completion(&c->cleanup);
atomic_set(&c->mbo_ref, 0);
mutex_init(&c->start_mutex);
+ mutex_init(&c->nq_mutex);
list_add_tail(&c->list, &inst->channel_list);
}
pr_info("registered new MOST device mdev%d (%s)\n",
@@ -1826,8 +1858,12 @@ void most_stop_enqueue(struct most_interface *iface, int id)
{
struct most_c_obj *c = get_channel_by_iface(iface, id);
- if (likely(c))
- c->enqueue_halt = true;
+ if (!c)
+ return;
+
+ mutex_lock(&c->nq_mutex);
+ c->enqueue_halt = true;
+ mutex_unlock(&c->nq_mutex);
}
EXPORT_SYMBOL_GPL(most_stop_enqueue);
@@ -1843,9 +1879,12 @@ void most_resume_enqueue(struct most_interface *iface, int id)
{
struct most_c_obj *c = get_channel_by_iface(iface, id);
- if (unlikely(!c))
+ if (!c)
return;
+
+ mutex_lock(&c->nq_mutex);
c->enqueue_halt = false;
+ mutex_unlock(&c->nq_mutex);
wake_up_interruptible(&c->hdm_fifo_wq);
}
@@ -1879,22 +1918,19 @@ static int __init most_init(void)
goto exit_class;
}
- class_glue_dir =
- device_create(most_class, NULL, 0, NULL, "mostcore");
- if (IS_ERR(class_glue_dir)) {
- err = PTR_ERR(class_glue_dir);
+ core_dev = device_create(most_class, NULL, 0, NULL, "mostcore");
+ if (IS_ERR(core_dev)) {
+ err = PTR_ERR(core_dev);
goto exit_driver;
}
- most_aim_kset =
- kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
+ most_aim_kset = kset_create_and_add("aims", NULL, &core_dev->kobj);
if (!most_aim_kset) {
err = -ENOMEM;
goto exit_class_container;
}
- most_inst_kset =
- kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
+ most_inst_kset = kset_create_and_add("devices", NULL, &core_dev->kobj);
if (!most_inst_kset) {
err = -ENOMEM;
goto exit_driver_kset;
diff --git a/drivers/staging/most/mostcore/mostcore.h b/drivers/staging/most/mostcore/mostcore.h
index 60e018e499ef..5f8339bd046f 100644
--- a/drivers/staging/most/mostcore/mostcore.h
+++ b/drivers/staging/most/mostcore/mostcore.h
@@ -56,7 +56,7 @@ enum most_channel_direction {
enum most_channel_data_type {
MOST_CH_CONTROL = 1 << 0,
MOST_CH_ASYNC = 1 << 1,
- MOST_CH_ISOC_AVP = 1 << 2,
+ MOST_CH_ISOC = 1 << 2,
MOST_CH_SYNC = 1 << 5,
};
@@ -112,7 +112,7 @@ struct most_channel_capability {
u16 buffer_size_packet;
u16 num_buffers_streaming;
u16 buffer_size_streaming;
- char *name_suffix;
+ const char *name_suffix;
};
/**
@@ -287,7 +287,7 @@ struct kobject *most_register_interface(struct most_interface *iface);
* @intf_instance Pointer to the interface instance description.
*/
void most_deregister_interface(struct most_interface *iface);
-int most_submit_mbo(struct mbo *mbo);
+void most_submit_mbo(struct mbo *mbo);
/**
* most_stop_enqueue - prevents core from enqueing MBOs
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 99445d0fcf9c..552a7dcbf50b 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -192,7 +192,7 @@ static int xlr_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
return phy_ethtool_sset(phydev, ecmd);
}
-static struct ethtool_ops xlr_ethtool_ops = {
+static const struct ethtool_ops xlr_ethtool_ops = {
.get_settings = xlr_get_settings,
.set_settings = xlr_set_settings,
};
@@ -269,16 +269,6 @@ static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr,
msg->msg3 = 0;
}
-static void __maybe_unused xlr_wakeup_queue(unsigned long dev)
-{
- struct net_device *ndev = (struct net_device *)dev;
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = xlr_get_phydev(priv);
-
- if (phydev->link)
- netif_tx_wake_queue(netdev_get_tx_queue(ndev, priv->wakeup_q));
-}
-
static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -413,7 +403,7 @@ static struct rtnl_link_stats64 *xlr_get_stats64(struct net_device *ndev,
return stats;
}
-static struct net_device_ops xlr_netdev_ops = {
+static const struct net_device_ops xlr_netdev_ops = {
.ndo_open = xlr_net_open,
.ndo_stop = xlr_net_stop,
.ndo_start_xmit = xlr_net_start_xmit,
diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig
index 16ea17ff3fd2..0b8f1d9c7056 100644
--- a/drivers/staging/octeon-usb/Kconfig
+++ b/drivers/staging/octeon-usb/Kconfig
@@ -6,5 +6,5 @@ config OCTEON_USB
Networks' products in the Octeon family.
To compile this driver as a module, choose M here. The module
- will be called octeon-usb.
+ will be called octeon-hcd.
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 17442b3ed849..9a7858a300fd 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -3292,7 +3292,6 @@ static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
spin_lock_irqsave(&usb->lock, flags);
port_status = cvmx_usb_get_status(usb);
spin_unlock_irqrestore(&usb->lock, flags);
- buf[0] = 0;
buf[0] = port_status.connect_change << 1;
return buf[0] != 0;
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index e13a4ab46977..691e4a51ace4 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -34,48 +34,23 @@ static void cvm_oct_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info));
}
-static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (priv->phydev)
- return phy_ethtool_gset(priv->phydev, cmd);
-
- return -EINVAL;
-}
-
-static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (priv->phydev)
- return phy_ethtool_sset(priv->phydev, cmd);
-
- return -EINVAL;
-}
-
static int cvm_oct_nway_reset(struct net_device *dev)
{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (priv->phydev)
- return phy_start_aneg(priv->phydev);
+ if (dev->phydev)
+ return phy_start_aneg(dev->phydev);
return -EINVAL;
}
const struct ethtool_ops cvm_oct_ethtool_ops = {
.get_drvinfo = cvm_oct_get_drvinfo,
- .get_settings = cvm_oct_get_settings,
- .set_settings = cvm_oct_set_settings,
.nway_reset = cvm_oct_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**
@@ -88,15 +63,13 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
*/
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- if (!priv->phydev)
+ if (!dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
void cvm_oct_note_carrier(struct octeon_ethernet *priv,
@@ -119,9 +92,9 @@ void cvm_oct_adjust_link(struct net_device *dev)
cvmx_helper_link_info_t link_info;
link_info.u64 = 0;
- link_info.s.link_up = priv->phydev->link ? 1 : 0;
- link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
- link_info.s.speed = priv->phydev->speed;
+ link_info.s.link_up = dev->phydev->link ? 1 : 0;
+ link_info.s.full_duplex = dev->phydev->duplex ? 1 : 0;
+ link_info.s.speed = dev->phydev->speed;
priv->link_info = link_info.u64;
/*
@@ -130,8 +103,8 @@ void cvm_oct_adjust_link(struct net_device *dev)
if (priv->poll)
priv->poll(dev);
- if (priv->last_link != priv->phydev->link) {
- priv->last_link = priv->phydev->link;
+ if (priv->last_link != dev->phydev->link) {
+ priv->last_link = dev->phydev->link;
cvmx_helper_link_set(priv->port, link_info);
cvm_oct_note_carrier(priv, link_info);
}
@@ -151,9 +124,8 @@ int cvm_oct_common_stop(struct net_device *dev)
priv->poll = NULL;
- if (priv->phydev)
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
if (priv->last_link) {
link_info.u64 = 0;
@@ -176,6 +148,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
struct device_node *phy_node;
+ struct phy_device *phydev = NULL;
if (!priv->of_node)
goto no_phy;
@@ -193,14 +166,15 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
if (!phy_node)
goto no_phy;
- priv->phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
+ of_node_put(phy_node);
- if (!priv->phydev)
+ if (!phydev)
return -ENODEV;
priv->last_link = 0;
- phy_start_aneg(priv->phydev);
+ phy_start_aneg(phydev);
return 0;
no_phy:
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 91b148cfcbdb..4e7304210bb9 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -117,7 +117,10 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
cvmx_helper_link_info_t link_info;
bool status_change;
- link_info = cvmx_helper_link_autoconf(priv->port);
+ link_info = cvmx_helper_link_get(priv->port);
+ if (priv->link_info != link_info.u64 &&
+ cvmx_helper_link_set(priv->port, link_info))
+ link_info.u64 = priv->link_info;
status_change = priv->link_info != link_info.u64;
priv->link_info = link_info.u64;
@@ -145,7 +148,7 @@ int cvm_oct_rgmii_open(struct net_device *dev)
if (ret)
return ret;
- if (priv->phydev) {
+ if (dev->phydev) {
/*
* In phydev mode, we need still periodic polling for the
* preamble error checking, and we also need to call this
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index a10fe3af9a9c..f0900d1c4d7b 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -43,21 +43,27 @@
#include <asm/octeon/cvmx-gmxx-defs.h>
-static struct napi_struct cvm_oct_napi;
+static atomic_t oct_rx_ready = ATOMIC_INIT(0);
+
+static struct oct_rx_group {
+ int irq;
+ int group;
+ struct napi_struct napi;
+} oct_rx_group[16];
/**
* cvm_oct_do_interrupt - interrupt handler.
- * @cpl: Interrupt number. Unused
- * @dev_id: Cookie to identify the device. Unused
+ * @irq: Interrupt number.
+ * @napi_id: Cookie to identify the NAPI instance.
*
* The interrupt occurs whenever the POW has packets in our group.
*
*/
-static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
+static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
{
/* Disable the IRQ and start napi_poll. */
- disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
- napi_schedule(&cvm_oct_napi);
+ disable_irq_nosync(irq);
+ napi_schedule(napi_id);
return IRQ_HANDLED;
}
@@ -143,14 +149,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
return 0;
}
-/**
- * cvm_oct_napi_poll - the NAPI poll function.
- * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
- * @budget: Maximum number of packets to receive.
- *
- * Returns the number of packets processed.
- */
-static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
+static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
{
const int coreid = cvmx_get_core_num();
u64 old_group_mask;
@@ -172,13 +171,13 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
- 1ull << pow_receive_group);
+ BIT(rx_group->group));
cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
} else {
old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
(old_group_mask & ~0xFFFFull) |
- 1 << pow_receive_group);
+ BIT(rx_group->group));
}
if (USE_ASYNC_IOBDMA) {
@@ -203,15 +202,15 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (!work) {
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
- 1ull << pow_receive_group);
+ BIT(rx_group->group));
cvmx_write_csr(CVMX_SSO_WQ_INT,
- 1ull << pow_receive_group);
+ BIT(rx_group->group));
} else {
union cvmx_pow_wq_int wq_int;
wq_int.u64 = 0;
- wq_int.s.iq_dis = 1 << pow_receive_group;
- wq_int.s.wq_int = 1 << pow_receive_group;
+ wq_int.s.iq_dis = BIT(rx_group->group);
+ wq_int.s.wq_int = BIT(rx_group->group);
cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
}
break;
@@ -410,10 +409,28 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
}
cvm_oct_rx_refill_pool(0);
- if (rx_count < budget && napi) {
+ return rx_count;
+}
+
+/**
+ * cvm_oct_napi_poll - the NAPI poll function.
+ * @napi: The NAPI instance.
+ * @budget: Maximum number of packets to receive.
+ *
+ * Returns the number of packets processed.
+ */
+static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
+ napi);
+ int rx_count;
+
+ rx_count = cvm_oct_poll(rx_group, budget);
+
+ if (rx_count < budget) {
/* No more work */
napi_complete(napi);
- enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
+ enable_irq(rx_group->irq);
}
return rx_count;
}
@@ -427,7 +444,17 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
*/
void cvm_oct_poll_controller(struct net_device *dev)
{
- cvm_oct_napi_poll(NULL, 16);
+ int i;
+
+ if (!atomic_read(&oct_rx_ready))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+ if (!(pow_receive_groups & BIT(i)))
+ continue;
+
+ cvm_oct_poll(&oct_rx_group[i], 16);
+ }
}
#endif
@@ -446,54 +473,80 @@ void cvm_oct_rx_initialize(void)
if (!dev_for_napi)
panic("No net_devices were allocated.");
- netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll,
- rx_napi_weight);
- napi_enable(&cvm_oct_napi);
+ for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+ int ret;
- /* Register an IRQ handler to receive POW interrupts */
- i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
- cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
+ if (!(pow_receive_groups & BIT(i)))
+ continue;
- if (i)
- panic("Could not acquire Ethernet IRQ %d\n",
- OCTEON_IRQ_WORKQ0 + pow_receive_group);
+ netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
+ cvm_oct_napi_poll, rx_napi_weight);
+ napi_enable(&oct_rx_group[i].napi);
- disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
+ oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
+ oct_rx_group[i].group = i;
- /* Enable POW interrupt when our port has at least one packet */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
- union cvmx_sso_wq_int_thrx int_thr;
- union cvmx_pow_wq_int_pc int_pc;
-
- int_thr.u64 = 0;
- int_thr.s.tc_en = 1;
- int_thr.s.tc_thr = 1;
- cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group),
- int_thr.u64);
-
- int_pc.u64 = 0;
- int_pc.s.pc_thr = 5;
- cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
- } else {
- union cvmx_pow_wq_int_thrx int_thr;
- union cvmx_pow_wq_int_pc int_pc;
-
- int_thr.u64 = 0;
- int_thr.s.tc_en = 1;
- int_thr.s.tc_thr = 1;
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
- int_thr.u64);
-
- int_pc.u64 = 0;
- int_pc.s.pc_thr = 5;
- cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
- }
+ /* Register an IRQ handler to receive POW interrupts */
+ ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
+ "Ethernet", &oct_rx_group[i].napi);
+ if (ret)
+ panic("Could not acquire Ethernet IRQ %d\n",
+ oct_rx_group[i].irq);
+
+ disable_irq_nosync(oct_rx_group[i].irq);
+
+ /* Enable POW interrupt when our port has at least one packet */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ union cvmx_sso_wq_int_thrx int_thr;
+ union cvmx_pow_wq_int_pc int_pc;
+
+ int_thr.u64 = 0;
+ int_thr.s.tc_en = 1;
+ int_thr.s.tc_thr = 1;
+ cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
+
+ int_pc.u64 = 0;
+ int_pc.s.pc_thr = 5;
+ cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
+ } else {
+ union cvmx_pow_wq_int_thrx int_thr;
+ union cvmx_pow_wq_int_pc int_pc;
+
+ int_thr.u64 = 0;
+ int_thr.s.tc_en = 1;
+ int_thr.s.tc_thr = 1;
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
- /* Schedule NAPI now. This will indirectly enable the interrupt. */
- napi_schedule(&cvm_oct_napi);
+ int_pc.u64 = 0;
+ int_pc.s.pc_thr = 5;
+ cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+ }
+
+ /* Schedule NAPI now. This will indirectly enable the
+ * interrupt.
+ */
+ napi_schedule(&oct_rx_group[i].napi);
+ }
+ atomic_inc(&oct_rx_ready);
}
void cvm_oct_rx_shutdown(void)
{
- netif_napi_del(&cvm_oct_napi);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+ if (!(pow_receive_groups & BIT(i)))
+ continue;
+
+ /* Disable POW interrupt */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
+ else
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
+
+ /* Free the interrupt handler */
+ free_irq(oct_rx_group[i].irq, cvm_oct_device);
+
+ netif_napi_del(&oct_rx_group[i].napi);
+ }
}
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 45f024bc5e33..617da8037a4d 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -32,12 +32,13 @@ static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
*/
static inline int INTERFACE(int ipd_port)
{
- int interface = cvmx_helper_get_interface_num(ipd_port);
+ int interface;
+ if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
+ return 10;
+ interface = cvmx_helper_get_interface_num(ipd_port);
if (interface >= 0)
return interface;
- else if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
- return 10;
panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port);
}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index e9cd5f242921..d02e3e31ed29 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -17,6 +17,8 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/of_net.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include <net/dst.h>
@@ -35,17 +37,19 @@
#include <asm/octeon/cvmx-fau.h>
#include <asm/octeon/cvmx-ipd.h>
#include <asm/octeon/cvmx-helper.h>
-
+#include <asm/octeon/cvmx-asxx-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-smix-defs.h>
+#define OCTEON_MAX_MTU 65392
+
static int num_packet_buffers = 1024;
module_param(num_packet_buffers, int, 0444);
MODULE_PARM_DESC(num_packet_buffers, "\n"
"\tNumber of packet buffers to allocate and store in the\n"
"\tFPA. By default, 1024 packet buffers are used.\n");
-int pow_receive_group = 15;
+static int pow_receive_group = 15;
module_param(pow_receive_group, int, 0444);
MODULE_PARM_DESC(pow_receive_group, "\n"
"\tPOW group to receive packets from. All ethernet hardware\n"
@@ -53,6 +57,15 @@ MODULE_PARM_DESC(pow_receive_group, "\n"
"\tgroup. Also any other software can submit packets to this\n"
"\tgroup for the kernel to process.");
+static int receive_group_order;
+module_param(receive_group_order, int, 0444);
+MODULE_PARM_DESC(receive_group_order, "\n"
+ "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
+ "\twill be configured to send incoming packets to multiple POW\n"
+ "\tgroups. pow_receive_group parameter is ignored when multiple\n"
+ "\tgroups are taken into use and groups are allocated starting\n"
+ "\tfrom 0. By default, a single group is used.\n");
+
int pow_send_group = -1;
module_param(pow_send_group, int, 0644);
MODULE_PARM_DESC(pow_send_group, "\n"
@@ -86,6 +99,8 @@ int rx_napi_weight = 32;
module_param(rx_napi_weight, int, 0444);
MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
+/* Mask indicating which receive groups are in use. */
+int pow_receive_groups;
/*
* cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
@@ -237,21 +252,22 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
{
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- int vlan_bytes = 4;
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ int vlan_bytes = VLAN_HLEN;
#else
int vlan_bytes = 0;
#endif
+ int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
/*
* Limit the MTU to make sure the ethernet packets are between
* 64 bytes and 65535 bytes.
*/
- if ((new_mtu + 14 + 4 + vlan_bytes < 64) ||
- (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
+ if ((new_mtu + mtu_overhead < VLAN_ETH_ZLEN) ||
+ (new_mtu + mtu_overhead > OCTEON_MAX_MTU)) {
pr_err("MTU must be between %d and %d.\n",
- 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
+ VLAN_ETH_ZLEN - mtu_overhead,
+ OCTEON_MAX_MTU - mtu_overhead);
return -EINVAL;
}
dev->mtu = new_mtu;
@@ -259,8 +275,9 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
if ((interface < 2) &&
(cvmx_helper_interface_get_mode(interface) !=
CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ int index = INDEX(priv->port);
/* Add ethernet header and FCS, and VLAN if configured. */
- int max_packet = new_mtu + 14 + 4 + vlan_bytes;
+ int max_packet = new_mtu + mtu_overhead;
if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
OCTEON_IS_MODEL(OCTEON_CN58XX)) {
@@ -275,7 +292,7 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
union cvmx_pip_frm_len_chkx frm_len_chk;
frm_len_chk.u64 = 0;
- frm_len_chk.s.minlen = 64;
+ frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
frm_len_chk.s.maxlen = max_packet;
cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
frm_len_chk.u64);
@@ -300,12 +317,12 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
if ((interface < 2) &&
(cvmx_helper_interface_get_mode(interface) !=
CVMX_HELPER_INTERFACE_MODE_SPI)) {
union cvmx_gmxx_rxx_adr_ctl control;
+ int index = INDEX(priv->port);
control.u64 = 0;
control.s.bcst = 1; /* Allow broadcast MAC addresses */
@@ -352,7 +369,6 @@ static int cvm_oct_set_mac_filter(struct net_device *dev)
struct octeon_ethernet *priv = netdev_priv(dev);
union cvmx_gmxx_prtx_cfg gmx_cfg;
int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
if ((interface < 2) &&
(cvmx_helper_interface_get_mode(interface) !=
@@ -360,6 +376,7 @@ static int cvm_oct_set_mac_filter(struct net_device *dev)
int i;
u8 *ptr = dev->dev_addr;
u64 mac = 0;
+ int index = INDEX(priv->port);
for (i = 0; i < 6; i++)
mac = (mac << 8) | (u64)ptr[i];
@@ -457,10 +474,8 @@ int cvm_oct_common_init(struct net_device *dev)
void cvm_oct_common_uninit(struct net_device *dev)
{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
}
int cvm_oct_common_open(struct net_device *dev,
@@ -479,15 +494,17 @@ int cvm_oct_common_open(struct net_device *dev,
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 1;
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ gmx_cfg.s.pknd = priv->port;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
if (octeon_is_simulation())
return 0;
- if (priv->phydev) {
- int r = phy_read_status(priv->phydev);
+ if (dev->phydev) {
+ int r = phy_read_status(dev->phydev);
- if (r == 0 && priv->phydev->link == 0)
+ if (r == 0 && dev->phydev->link == 0)
netif_carrier_off(dev);
cvm_oct_adjust_link(dev);
} else {
@@ -510,8 +527,10 @@ void cvm_oct_link_poll(struct net_device *dev)
if (link_info.u64 == priv->link_info)
return;
- link_info = cvmx_helper_link_autoconf(priv->port);
- priv->link_info = link_info.u64;
+ if (cvmx_helper_link_set(priv->port, link_info))
+ link_info.u64 = priv->link_info;
+ else
+ priv->link_info = link_info.u64;
if (link_info.s.link_up) {
if (!netif_carrier_ok(dev))
@@ -649,6 +668,16 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
return np;
}
+static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
+{
+ u32 delay_value;
+
+ if (!of_property_read_u32(np, "rx-delay", &delay_value))
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
+ if (!of_property_read_u32(np, "tx-delay", &delay_value))
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
+}
+
static int cvm_oct_probe(struct platform_device *pdev)
{
int num_interfaces;
@@ -665,11 +694,18 @@ static int cvm_oct_probe(struct platform_device *pdev)
return -EINVAL;
}
-
cvm_oct_configure_common_hw();
cvmx_helper_initialize_packet_io_global();
+ if (receive_group_order) {
+ if (receive_group_order > 4)
+ receive_group_order = 4;
+ pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
+ } else {
+ pow_receive_groups = BIT(pow_receive_group);
+ }
+
/* Change the input group for all ports before input is enabled */
num_interfaces = cvmx_helper_get_number_of_interfaces();
for (interface = 0; interface < num_interfaces; interface++) {
@@ -683,7 +719,37 @@ static int cvm_oct_probe(struct platform_device *pdev)
pip_prt_tagx.u64 =
cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
- pip_prt_tagx.s.grp = pow_receive_group;
+
+ if (receive_group_order) {
+ int tag_mask;
+
+ /* We support only 16 groups at the moment, so
+ * always disable the two additional "hidden"
+ * tag_mask bits on CN68XX.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ pip_prt_tagx.u64 |= 0x3ull << 44;
+
+ tag_mask = ~((1 << receive_group_order) - 1);
+ pip_prt_tagx.s.grptagbase = 0;
+ pip_prt_tagx.s.grptagmask = tag_mask;
+ pip_prt_tagx.s.grptag = 1;
+ pip_prt_tagx.s.tag_mode = 0;
+ pip_prt_tagx.s.inc_prt_flag = 1;
+ pip_prt_tagx.s.ip6_dprt_flag = 1;
+ pip_prt_tagx.s.ip4_dprt_flag = 1;
+ pip_prt_tagx.s.ip6_sprt_flag = 1;
+ pip_prt_tagx.s.ip4_sprt_flag = 1;
+ pip_prt_tagx.s.ip6_dst_flag = 1;
+ pip_prt_tagx.s.ip4_dst_flag = 1;
+ pip_prt_tagx.s.ip6_src_flag = 1;
+ pip_prt_tagx.s.ip4_src_flag = 1;
+ pip_prt_tagx.s.grp = 0;
+ } else {
+ pip_prt_tagx.s.grptag = 0;
+ pip_prt_tagx.s.grp = pow_receive_group;
+ }
+
cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
pip_prt_tagx.u64);
}
@@ -705,7 +771,6 @@ static int cvm_oct_probe(struct platform_device *pdev)
if ((pow_send_group != -1)) {
struct net_device *dev;
- pr_info("\tConfiguring device for POW only access\n");
dev = alloc_etherdev(sizeof(struct octeon_ethernet));
if (dev) {
/* Initialize the device private structure. */
@@ -808,6 +873,8 @@ static int cvm_oct_probe(struct platform_device *pdev)
case CVMX_HELPER_INTERFACE_MODE_GMII:
dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
strcpy(dev->name, "eth%d");
+ cvm_set_rgmii_delay(priv->of_node, interface,
+ port_index);
break;
}
@@ -844,17 +911,8 @@ static int cvm_oct_remove(struct platform_device *pdev)
{
int port;
- /* Disable POW interrupt */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX))
- cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
- else
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
-
cvmx_ipd_disable();
- /* Free the interrupt handler */
- free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
-
atomic_inc_return(&cvm_oct_poll_queue_stopping);
cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
@@ -878,7 +936,6 @@ static int cvm_oct_remove(struct platform_device *pdev)
}
}
-
cvmx_pko_shutdown();
cvmx_ipd_free_ptr();
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 6275c15e0035..9c6852d61c0d 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -40,7 +40,6 @@ struct octeon_ethernet {
struct sk_buff_head tx_free_list[16];
/* Device statistics */
struct net_device_stats stats;
- struct phy_device *phydev;
unsigned int last_speed;
unsigned int last_link;
/* Last negotiated link state */
@@ -73,7 +72,7 @@ void cvm_oct_link_poll(struct net_device *dev);
extern int always_use_pow;
extern int pow_send_group;
-extern int pow_receive_group;
+extern int pow_receive_groups;
extern char pow_send_list[];
extern struct net_device *cvm_oct_device[];
extern atomic_t cvm_oct_poll_queue_stopping;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
index 6a4d379c16a3..1e23ef15b263 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -49,7 +49,7 @@ static void dcon_clear_irq(void)
static int dcon_was_irq(void)
{
- u_int8_t tmp;
+ u8 tmp;
/* irq status will appear in PMIO_Rx50[6] on gpio12 */
tmp = inb(VX855_GPI_STATUS_CHG);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index a5755358cc5d..553e8d50352f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -78,9 +78,9 @@ static void update_BCNTIM(struct adapter *padapter)
/* update TIM IE */
p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen,
pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_);
- if (p != NULL && tim_ielen > 0) {
+ if (p && tim_ielen > 0) {
tim_ielen += 2;
- premainder_ie = p+tim_ielen;
+ premainder_ie = p + tim_ielen;
tim_ie_offset = (int)(p - pie);
remainder_ielen = pnetwork_mlmeext->IELength -
tim_ie_offset - tim_ielen;
@@ -98,7 +98,7 @@ static void update_BCNTIM(struct adapter *padapter)
_SUPPORTEDRATES_IE_, &tmp_len,
(pnetwork_mlmeext->IELength -
_BEACON_IE_OFFSET_));
- if (p != NULL)
+ if (p)
offset += tmp_len+2;
/* DS Parameter Set IE, len = 3 */
@@ -183,10 +183,10 @@ void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
i += (pIE->Length + 2);
}
- if (p != NULL && ielen > 0) {
+ if (p && ielen > 0) {
ielen += 2;
- premainder_ie = p+ielen;
+ premainder_ie = p + ielen;
ie_offset = (int)(p - pie);
@@ -195,7 +195,7 @@ void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
if (bmatch)
dst_ie = p;
else
- dst_ie = p+ielen;
+ dst_ie = p + ielen;
}
if (remainder_ielen > 0) {
@@ -232,10 +232,10 @@ void rtw_remove_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, index, &ielen,
pnetwork->IELength - _FIXED_IE_LENGTH_);
- if (p != NULL && ielen > 0) {
+ if (p && ielen > 0) {
ielen += 2;
- premainder_ie = p+ielen;
+ premainder_ie = p + ielen;
ie_offset = (int)(p - pie);
@@ -385,8 +385,8 @@ void expire_timeout_chk(struct adapter *padapter)
updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
} else {
/* TODO: Aging mechanism to digest frames in sleep_q to avoid running out of xmitframe */
- if (psta->sleepq_len > (NR_XMITFRAME/pstapriv->asoc_list_cnt) &&
- padapter->xmitpriv.free_xmitframe_cnt < (NR_XMITFRAME/pstapriv->asoc_list_cnt/2)) {
+ if (psta->sleepq_len > (NR_XMITFRAME / pstapriv->asoc_list_cnt) &&
+ padapter->xmitpriv.free_xmitframe_cnt < (NR_XMITFRAME / pstapriv->asoc_list_cnt / 2)) {
DBG_88E("%s sta:%pM, sleepq_len:%u, free_xmitframe_cnt:%u, asoc_list_cnt:%u, clear sleep_q\n", __func__,
(psta->hwaddr), psta->sleepq_len,
padapter->xmitpriv.free_xmitframe_cnt,
@@ -470,7 +470,7 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
/* b/g mode ra_bitmap */
for (i = 0; i < sizeof(psta->bssrateset); i++) {
if (psta->bssrateset[i])
- tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i]&0x7f);
+ tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i] & 0x7f);
}
/* n mode ra_bitmap */
if (psta_ht->ht_option) {
@@ -481,8 +481,8 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
limit = 8;/* 1R */
for (i = 0; i < limit; i++) {
- if (psta_ht->ht_cap.supp_mcs_set[i/8] & BIT(i%8))
- tx_ra_bitmap |= BIT(i+12);
+ if (psta_ht->ht_cap.mcs.rx_mask[i / 8] & BIT(i % 8))
+ tx_ra_bitmap |= BIT(i + 12);
}
/* max short GI rate */
@@ -507,19 +507,19 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
psta->wireless_mode = sta_band;
raid = networktype_to_raid(sta_band);
- init_rate = get_highest_rate_idx(tx_ra_bitmap&0x0fffffff)&0x3f;
+ init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
if (psta->aid < NUM_STA) {
u8 arg = 0;
- arg = psta->mac_id&0x1f;
+ arg = psta->mac_id & 0x1f;
arg |= BIT(7);/* support entry 2~31 */
if (shortGIrate)
arg |= BIT(5);
- tx_ra_bitmap |= ((raid<<28)&0xf0000000);
+ tx_ra_bitmap |= ((raid << 28) & 0xf0000000);
DBG_88E("%s => mac_id:%d , raid:%d , bitmap = 0x%x, arg = 0x%x\n",
__func__, psta->mac_id, raid, tx_ra_bitmap, arg);
@@ -573,7 +573,7 @@ static void update_bmc_sta(struct adapter *padapter)
/* b/g mode ra_bitmap */
for (i = 0; i < supportRateNum; i++) {
if (psta->bssrateset[i])
- tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i]&0x7f);
+ tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i] & 0x7f);
}
if (pcur_network->Configuration.DSConfig > 14) {
@@ -587,7 +587,7 @@ static void update_bmc_sta(struct adapter *padapter)
}
raid = networktype_to_raid(network_type);
- init_rate = get_highest_rate_idx(tx_ra_bitmap&0x0fffffff)&0x3f;
+ init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
/* ap mode */
rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
@@ -597,7 +597,7 @@ static void update_bmc_sta(struct adapter *padapter)
arg = psta->mac_id&0x1f;
arg |= BIT(7);
- tx_ra_bitmap |= ((raid<<28)&0xf0000000);
+ tx_ra_bitmap |= ((raid << 28) & 0xf0000000);
DBG_88E("update_bmc_sta, mask = 0x%x, arg = 0x%x\n", tx_ra_bitmap, arg);
/* bitmap[0:27] = tx_rate_bitmap */
@@ -636,7 +636,7 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
struct ht_priv *phtpriv_sta = &psta->htpriv;
- psta->mac_id = psta->aid+1;
+ psta->mac_id = psta->aid + 1;
DBG_88E("%s\n", __func__);
/* ap mode */
@@ -658,11 +658,15 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
phtpriv_sta->ampdu_enable = phtpriv_ap->ampdu_enable;
/* check if sta support s Short GI */
- if ((phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info) & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40))
+ if (le16_to_cpu(phtpriv_sta->ht_cap.cap_info &
+ phtpriv_ap->ht_cap.cap_info) &
+ (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40))
phtpriv_sta->sgi = true;
/* bwmode */
- if ((phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info) & IEEE80211_HT_CAP_SUP_WIDTH) {
+ if (le16_to_cpu(phtpriv_sta->ht_cap.cap_info &
+ phtpriv_ap->ht_cap.cap_info) &
+ IEEE80211_HT_CAP_SUP_WIDTH) {
phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
}
@@ -702,12 +706,12 @@ static void update_hw_ht_param(struct adapter *padapter)
/* handle A-MPDU parameter field */
/*
- AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
- AMPDU_para [4:2]:Min MPDU Start Spacing
+ ampdu_params_info [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ ampdu_params_info [4:2]:Min MPDU Start Spacing
*/
- max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
+ max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
- min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
@@ -716,7 +720,7 @@ static void update_hw_ht_param(struct adapter *padapter)
/* */
/* Config SM Power Save setting */
/* */
- pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & 0x0C) >> 2;
+ pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & 0x0C) >> 2;
if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
}
@@ -746,7 +750,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
/* check if there is wps ie, */
/* if there is wpsie in beacon, the hostapd will update beacon twice when stating hostapd, */
/* and at first time the security ie (RSN/WPA IE) will not include in beacon. */
- if (!rtw_get_wps_ie(pnetwork->IEs+_FIXED_IE_LENGTH_, pnetwork->IELength-_FIXED_IE_LENGTH_, NULL, NULL))
+ if (!rtw_get_wps_ie(pnetwork->IEs + _FIXED_IE_LENGTH_, pnetwork->IELength - _FIXED_IE_LENGTH_, NULL, NULL))
pmlmeext->bstart_bss = true;
/* todo: update wmm, ht cap */
@@ -799,7 +803,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
/* set channel, bwmode */
p = rtw_get_ie((pnetwork->IEs + sizeof(struct ndis_802_11_fixed_ie)), _HT_ADD_INFO_IE_, &ie_len, (pnetwork->IELength - sizeof(struct ndis_802_11_fixed_ie)));
if (p && ie_len) {
- pht_info = (struct HT_info_element *)(p+2);
+ pht_info = (struct HT_info_element *)(p + 2);
if ((pregpriv->cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
/* switch to the 40M Hz mode */
@@ -930,15 +934,15 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
memset(supportRate, 0, NDIS_802_11_LENGTH_RATES_EX);
/* get supported rates */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
- if (p != NULL) {
- memcpy(supportRate, p+2, ie_len);
+ if (p) {
+ memcpy(supportRate, p + 2, ie_len);
supportRateNum = ie_len;
}
/* get ext_supported rates */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->IELength - _BEACON_IE_OFFSET_);
- if (p != NULL) {
- memcpy(supportRate+supportRateNum, p+2, ie_len);
+ if (p) {
+ memcpy(supportRate + supportRateNum, p + 2, ie_len);
supportRateNum += ie_len;
}
@@ -966,7 +970,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
psecuritypriv->wpa2_pairwise_cipher = _NO_PRIVACY_;
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
if (p && ie_len > 0) {
- if (rtw_parse_wpa2_ie(p, ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ if (rtw_parse_wpa2_ie(p, ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
@@ -985,8 +989,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _SSN_IE_1_, &ie_len,
(pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if ((p) && (!memcmp(p+2, OUI1, 4))) {
- if (rtw_parse_wpa_ie(p, ie_len+2, &group_cipher,
+ if ((p) && (!memcmp(p + 2, OUI1, 4))) {
+ if (rtw_parse_wpa_ie(p, ie_len + 2, &group_cipher,
&pairwise_cipher, NULL) == _SUCCESS) {
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
@@ -1010,10 +1014,10 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len,
(pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if ((p) && !memcmp(p+2, WMM_PARA_IE, 6)) {
+ if ((p) && !memcmp(p + 2, WMM_PARA_IE, 6)) {
pmlmepriv->qospriv.qos_option = 1;
- *(p+8) |= BIT(7);/* QoS Info, support U-APSD */
+ *(p + 8) |= BIT(7);/* QoS Info, support U-APSD */
/* disable all ACM bits since the WMM admission control is not supported */
*(p + 10) &= ~BIT(4); /* BE */
@@ -1032,7 +1036,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
(pbss_network->IELength - _BEACON_IE_OFFSET_));
if (p && ie_len > 0) {
u8 rf_type;
- struct rtw_ieee80211_ht_cap *pht_cap = (struct rtw_ieee80211_ht_cap *)(p+2);
+ struct ieee80211_ht_cap *pht_cap = (struct ieee80211_ht_cap *)(p + 2);
pHT_caps_ie = p;
ht_cap = true;
@@ -1042,7 +1046,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
(psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP))
- pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&(0x07<<2));
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY & (0x07 << 2));
else
pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&0x00);
@@ -1050,8 +1054,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_FACTOR & 0x03);
if (rf_type == RF_1T1R) {
- pht_cap->supp_mcs_set[0] = 0xff;
- pht_cap->supp_mcs_set[1] = 0x0;
+ pht_cap->mcs.rx_mask[0] = 0xff;
+ pht_cap->mcs.rx_mask[1] = 0x0;
}
memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len);
}
@@ -1143,7 +1147,7 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
DBG_88E("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, (addr));
- if ((NUM_ACL-1) < pacl_list->num)
+ if ((NUM_ACL - 1) < pacl_list->num)
return -1;
spin_lock_bh(&(pacl_node_q->lock));
@@ -1422,7 +1426,8 @@ static int rtw_ht_operation_update(struct adapter *padapter)
if (pmlmepriv->num_sta_no_ht ||
(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT))
new_op_mode = OP_MODE_MIXED;
- else if ((phtpriv_ap->ht_cap.cap_info & IEEE80211_HT_CAP_SUP_WIDTH) &&
+ else if ((le16_to_cpu(phtpriv_ap->ht_cap.cap_info) &
+ IEEE80211_HT_CAP_SUP_WIDTH) &&
pmlmepriv->num_sta_ht_20mhz)
new_op_mode = OP_MODE_20MHZ_HT_STA_ASSOCED;
else if (pmlmepriv->olbc_ht)
@@ -1552,7 +1557,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
}
if (psta->flags & WLAN_STA_HT) {
- u16 ht_capab = psta->htpriv.ht_cap.cap_info;
+ u16 ht_capab = le16_to_cpu(psta->htpriv.ht_cap.cap_info);
DBG_88E("HT: STA %pM HT Capabilities Info: 0x%04x\n",
(psta->hwaddr), ht_capab);
@@ -1710,40 +1715,6 @@ u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
return beacon_updated;
}
-int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
-{
- struct list_head *phead, *plist;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
- return 0;
-
- DBG_88E(FUNC_NDEV_FMT" with ch:%u, offset:%u\n",
- FUNC_NDEV_ARG(padapter->pnetdev), new_ch, ch_offset);
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- plist = phead->next;
-
- /* for each sta in asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
- plist = plist->next;
-
- issue_action_spct_ch_switch(padapter, psta->hwaddr, new_ch, ch_offset);
- psta->expire_to = min_t(unsigned int, pstapriv->expire_to * 2, 5);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- issue_action_spct_ch_switch(padapter, bc_addr, new_ch, ch_offset);
-
- return 0;
-}
-
int rtw_sta_flush(struct adapter *padapter)
{
struct list_head *phead, *plist;
@@ -1856,9 +1827,6 @@ void start_ap_mode(struct adapter *padapter)
pmlmepriv->wps_probe_resp_ie = NULL;
pmlmepriv->wps_assoc_resp_ie = NULL;
- pmlmepriv->p2p_beacon_ie = NULL;
- pmlmepriv->p2p_probe_resp_ie = NULL;
-
/* for ACL */
INIT_LIST_HEAD(&(pacl_list->acl_node_q.queue));
pacl_list->num = 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 77485235c615..f1f4788dbd86 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -27,8 +27,8 @@ No irqsave is necessary.
int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
- sema_init(&(pcmdpriv->cmd_queue_sema), 0);
- sema_init(&(pcmdpriv->terminate_cmdthread_sema), 0);
+ init_completion(&pcmdpriv->cmd_queue_comp);
+ init_completion(&pcmdpriv->terminate_cmdthread_comp);
_rtw_init_queue(&(pcmdpriv->cmd_queue));
return _SUCCESS;
@@ -85,7 +85,7 @@ static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
/* To decide allow or not */
if ((pcmdpriv->padapter->pwrctrlpriv.bHWPwrPindetect) &&
(!pcmdpriv->padapter->registrypriv.usbss_enable)) {
- if (cmd_obj->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) {
+ if (cmd_obj->cmdcode == _Set_Drv_Extra_CMD_) {
struct drvextra_cmd_parm *pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)cmd_obj->parmbuf;
if (pdrvextra_cmd_parm->ec_id == POWER_SAVING_CTRL_WK_CID)
@@ -93,7 +93,7 @@ static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
}
}
- if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan))
+ if (cmd_obj->cmdcode == _SetChannelPlan_CMD_)
bAllow = true;
if ((!pcmdpriv->padapter->hw_init_completed && !bAllow) ||
@@ -122,7 +122,7 @@ u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
res = _rtw_enqueue_cmd(&pcmdpriv->cmd_queue, cmd_obj);
if (res == _SUCCESS)
- up(&pcmdpriv->cmd_queue_sema);
+ complete(&pcmdpriv->cmd_queue_comp);
exit:
@@ -162,12 +162,12 @@ int rtw_cmd_thread(void *context)
allow_signal(SIGTERM);
pcmdpriv->cmdthd_running = true;
- up(&pcmdpriv->terminate_cmdthread_sema);
+ complete(&pcmdpriv->terminate_cmdthread_comp);
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("start r871x rtw_cmd_thread !!!!\n"));
while (1) {
- if (_rtw_down_sema(&pcmdpriv->cmd_queue_sema) == _FAIL)
+ if (wait_for_completion_interruptible(&pcmdpriv->cmd_queue_comp))
break;
if (padapter->bDriverStopped ||
@@ -234,7 +234,7 @@ _next:
rtw_free_cmd_obj(pcmd);
}
- up(&pcmdpriv->terminate_cmdthread_sema);
+ complete(&pcmdpriv->terminate_cmdthread_comp);
complete_and_exit(NULL, 0);
@@ -271,7 +271,7 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("%s: flush network queue\n", __func__));
- init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, _SiteSurvey_CMD_);
/* psurveyPara->bsslimit = 48; */
psurveyPara->scan_mode = pmlmepriv->scan_mode;
@@ -305,8 +305,6 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
if (res == _SUCCESS) {
- pmlmepriv->scan_start_time = jiffies;
-
mod_timer(&pmlmepriv->scan_to_timer,
jiffies + msecs_to_jiffies(SCANNING_TIMEOUT));
@@ -491,7 +489,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);/* get cmdsz before endian conversion */
INIT_LIST_HEAD(&pcmd->list);
- pcmd->cmdcode = _JoinBss_CMD_;/* GEN_CMD_CODE(_JoinBss) */
+ pcmd->cmdcode = _JoinBss_CMD_;
pcmd->parmbuf = (unsigned char *)psecnetwork;
pcmd->rsp = NULL;
pcmd->rspsz = 0;
@@ -670,13 +668,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_KERNEL);
+ paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_ATOMIC);
if (!paddbareq_parm) {
kfree(ph2c);
res = _FAIL;
@@ -686,7 +684,7 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
paddbareq_parm->tid = tid;
memcpy(paddbareq_parm->addr, addr, ETH_ALEN);
- init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, _AddBAReq_CMD_);
/* DBG_88E("rtw_addbareq_cmd, tid =%d\n", tid); */
@@ -724,7 +722,7 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
pdrvextra_cmd_parm->type_size = 0;
pdrvextra_cmd_parm->pbuf = (u8 *)padapter;
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, _Set_Drv_Extra_CMD_);
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
@@ -767,7 +765,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
goto exit;
}
- init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelPlan_param, GEN_CMD_CODE(_SetChannelPlan));
+ init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelPlan_param, _SetChannelPlan_CMD_);
res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
} else {
/* no need to enqueue, do the cmd hdl directly and free cmd parameter */
@@ -936,7 +934,7 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
pdrvextra_cmd_parm->type_size = lps_ctrl_type;
pdrvextra_cmd_parm->pbuf = NULL;
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, _Set_Drv_Extra_CMD_);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
} else {
@@ -978,7 +976,7 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
pdrvextra_cmd_parm->ec_id = RTP_TIMER_CFG_WK_CID;
pdrvextra_cmd_parm->type_size = min_time;
pdrvextra_cmd_parm->pbuf = NULL;
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, _Set_Drv_Extra_CMD_);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
@@ -1020,7 +1018,7 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
pdrvextra_cmd_parm->ec_id = ANT_SELECT_WK_CID;
pdrvextra_cmd_parm->type_size = antenna;
pdrvextra_cmd_parm->pbuf = NULL;
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, _Set_Drv_Extra_CMD_);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
} else {
@@ -1048,7 +1046,7 @@ u8 rtw_ps_cmd(struct adapter *padapter)
pdrvextra_cmd_parm->ec_id = POWER_SAVING_CTRL_WK_CID;
pdrvextra_cmd_parm->pbuf = NULL;
- init_h2fwcmd_w_parm_no_rsp(ppscmd, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+ init_h2fwcmd_w_parm_no_rsp(ppscmd, pdrvextra_cmd_parm, _Set_Drv_Extra_CMD_);
return rtw_enqueue_cmd(pcmdpriv, ppscmd);
}
@@ -1119,7 +1117,7 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
pdrvextra_cmd_parm->type_size = 0;
pdrvextra_cmd_parm->pbuf = NULL;
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, _Set_Drv_Extra_CMD_);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index db5c952ac852..60d8c7b9f458 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -138,144 +138,6 @@ int proc_set_read_reg(struct file *file, const char __user *buffer,
return count;
}
-int proc_get_fwstate(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- int len = 0;
-
- len += snprintf(page + len, count - len, "fwstate=0x%x\n", get_fwstate(pmlmepriv));
-
- *eof = 1;
- return len;
-}
-
-int proc_get_sec_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- int len = 0;
-
- len += snprintf(page + len, count - len, "auth_alg=0x%x, enc_alg=0x%x, auth_type=0x%x, enc_type=0x%x\n",
- psecuritypriv->dot11AuthAlgrthm, psecuritypriv->dot11PrivacyAlgrthm,
- psecuritypriv->ndisauthtype, psecuritypriv->ndisencryptstatus);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_mlmext_state(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- int len = 0;
-
- len += snprintf(page + len, count - len, "pmlmeinfo->state=0x%x\n", pmlmeinfo->state);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_qos_option(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- int len = 0;
-
- len += snprintf(page + len, count - len, "qos_option=%d\n", pmlmepriv->qospriv.qos_option);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_ht_option(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- int len = 0;
-
- len += snprintf(page + len, count - len, "ht_option=%d\n", pmlmepriv->htpriv.ht_option);
- *eof = 1;
- return len;
-}
-
-int proc_get_rf_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- int len = 0;
-
- len += snprintf(page + len, count - len, "cur_ch=%d, cur_bw=%d, cur_ch_offset=%d\n",
- pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
- *eof = 1;
- return len;
-}
-
-int proc_get_ap_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct sta_info *psta;
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- struct sta_priv *pstapriv = &padapter->stapriv;
- int len = 0;
-
- psta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
- if (psta) {
- int i;
- struct recv_reorder_ctrl *preorder_ctrl;
-
- len += snprintf(page + len, count - len, "SSID=%s\n", cur_network->network.Ssid.Ssid);
- len += snprintf(page + len, count - len, "sta's macaddr:%pM\n", psta->hwaddr);
- len += snprintf(page + len, count - len, "cur_channel=%d, cur_bwmode=%d, cur_ch_offset=%d\n", pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
- len += snprintf(page + len, count - len, "rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self);
- len += snprintf(page + len, count - len, "state=0x%x, aid=%d, macid=%d, raid=%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
- len += snprintf(page + len, count - len, "qos_en=%d, ht_en=%d, init_rate=%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
- len += snprintf(page + len, count - len, "bwmode=%d, ch_offset=%d, sgi=%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
- len += snprintf(page + len, count - len, "ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
- len += snprintf(page + len, count - len, "agg_enable_bitmap=%x, candidate_tid_bitmap=%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
-
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
- if (preorder_ctrl->enable)
- len += snprintf(page + len, count - len, "tid=%d, indicate_seq=%d\n", i, preorder_ctrl->indicate_seq);
- }
- } else {
- len += snprintf(page + len, count - len, "can't get sta's macaddr, cur_network's macaddr: %pM\n", cur_network->network.MacAddress);
- }
-
- *eof = 1;
- return len;
-}
-
int proc_get_adapter_state(char *page, char **start,
off_t offset, int count,
int *eof, void *data)
@@ -291,599 +153,6 @@ int proc_get_adapter_state(char *page, char **start,
return len;
}
-int proc_get_trx_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct recv_priv *precvpriv = &padapter->recvpriv;
- int len = 0;
-
- len += snprintf(page + len, count - len, "free_xmitbuf_cnt=%d, free_xmitframe_cnt=%d, free_ext_xmitbuf_cnt=%d, free_recvframe_cnt=%d\n",
- pxmitpriv->free_xmitbuf_cnt, pxmitpriv->free_xmitframe_cnt, pxmitpriv->free_xmit_extbuf_cnt, precvpriv->free_recvframe_cnt);
- len += snprintf(page + len, count - len, "rx_urb_pending_cn=%d\n", precvpriv->rx_pending_cnt);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_mac_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
-
- for (i = 0x0; i < 0x300; i += 4) {
- if (j%4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", usb_read32(padapter, i));
- if ((j++)%4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
-
- *eof = 1;
- return len;
-}
-
-int proc_get_mac_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
- memset(page, 0, count);
- for (i = 0x300; i < 0x600; i += 4) {
- if (j%4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", usb_read32(padapter, i));
- if ((j++)%4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
-
- *eof = 1;
- return len;
-}
-
-int proc_get_mac_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
-
- for (i = 0x600; i < 0x800; i += 4) {
- if (j%4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", usb_read32(padapter, i));
- if ((j++)%4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
-
- *eof = 1;
- return len;
-}
-
-int proc_get_bb_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
- for (i = 0x800; i < 0xB00; i += 4) {
- if (j%4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", usb_read32(padapter, i));
- if ((j++)%4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-int proc_get_bb_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
- for (i = 0xB00; i < 0xE00; i += 4) {
- if (j%4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", usb_read32(padapter, i));
- if ((j++)%4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-int proc_get_bb_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
- for (i = 0xE00; i < 0x1000; i += 4) {
- if (j%4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", usb_read32(padapter, i));
- if ((j++)%4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-int proc_get_rf_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1, path;
- u32 value;
-
- len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
- path = 1;
- len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
- for (i = 0; i < 0xC0; i++) {
- value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
- if (j%4 == 1)
- len += snprintf(page + len, count - len, "0x%02x ", i);
- len += snprintf(page + len, count - len, " 0x%08x ", value);
- if ((j++)%4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-int proc_get_rf_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1, path;
- u32 value;
-
- len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
- path = 1;
- len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
- for (i = 0xC0; i < 0x100; i++) {
- value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
- if (j%4 == 1)
- len += snprintf(page + len, count - len, "0x%02x ", i);
- len += snprintf(page + len, count - len, " 0x%08x ", value);
- if ((j++)%4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-int proc_get_rf_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1, path;
- u32 value;
-
- len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
- path = 2;
- len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
- for (i = 0; i < 0xC0; i++) {
- value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
- if (j%4 == 1)
- len += snprintf(page + len, count - len, "0x%02x ", i);
- len += snprintf(page + len, count - len, " 0x%08x ", value);
- if ((j++)%4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
-
- *eof = 1;
- return len;
-}
-
-
-int proc_get_rf_reg_dump4(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1, path;
- u32 value;
-
- len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
- path = 2;
- len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
- for (i = 0xC0; i < 0x100; i++) {
- value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
- if (j%4 == 1)
- len += snprintf(page + len, count - len, "0x%02x ", i);
- len += snprintf(page + len, count - len, " 0x%08x ", value);
- if ((j++)%4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-
-
-int proc_get_rx_signal(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
-
- len = snprintf(page + len, count,
- "rssi:%d\n"
- "rxpwdb:%d\n"
- "signal_strength:%u\n"
- "signal_qual:%u\n"
- "noise:%u\n",
- padapter->recvpriv.rssi,
- padapter->recvpriv.rxpwdb,
- padapter->recvpriv.signal_strength,
- padapter->recvpriv.signal_qual,
- padapter->recvpriv.noise
- );
-
- *eof = 1;
- return len;
-}
-
-int proc_set_rx_signal(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
- u32 is_signal_dbg;
- s32 signal_strength;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- int num = sscanf(tmp, "%u %u", &is_signal_dbg, &signal_strength);
-
- is_signal_dbg = is_signal_dbg == 0 ? 0 : 1;
- if (is_signal_dbg && num != 2)
- return count;
-
- signal_strength = clamp(signal_strength, 0, 100);
-
- padapter->recvpriv.is_signal_dbg = is_signal_dbg;
- padapter->recvpriv.signal_strength_dbg = signal_strength;
-
- if (is_signal_dbg)
- DBG_88E("set %s %u\n", "DBG_SIGNAL_STRENGTH", signal_strength);
- else
- DBG_88E("set %s\n", "HW_SIGNAL_STRENGTH");
- }
- return count;
-}
-
-int proc_get_ht_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- int len = 0;
-
- if (pregpriv)
- len += snprintf(page + len, count - len,
- "%d\n",
- pregpriv->ht_enable
- );
- *eof = 1;
- return len;
-}
-
-int proc_set_ht_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- s32 mode = 0;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- if (pregpriv) {
- pregpriv->ht_enable = mode;
- pr_info("ht_enable=%d\n", pregpriv->ht_enable);
- }
- }
-
- return count;
-}
-
-int proc_get_cbw40_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- int len = 0;
-
- if (pregpriv)
- len += snprintf(page + len, count - len,
- "%d\n",
- pregpriv->cbw40_enable
- );
-
- *eof = 1;
- return len;
-}
-
-int proc_set_cbw40_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- s32 mode = 0;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- if (pregpriv) {
- pregpriv->cbw40_enable = mode;
- pr_info("cbw40_enable=%d\n", mode);
- }
- }
- return count;
-}
-
-int proc_get_ampdu_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- int len = 0;
-
- if (pregpriv)
- len += snprintf(page + len, count - len,
- "%d\n",
- pregpriv->ampdu_enable
- );
-
- *eof = 1;
- return len;
-}
-
-int proc_set_ampdu_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- s32 mode = 0;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- if (pregpriv) {
- pregpriv->ampdu_enable = mode;
- pr_info("ampdu_enable=%d\n", mode);
- }
- }
- return count;
-}
-
-int proc_get_two_path_rssi(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- int len = 0;
-
- if (padapter)
- len += snprintf(page + len, count - len,
- "%d %d\n",
- padapter->recvpriv.RxRssi[0],
- padapter->recvpriv.RxRssi[1]
- );
-
- *eof = 1;
- return len;
-}
-
-int proc_get_rx_stbc(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- int len = 0;
-
- if (pregpriv)
- len += snprintf(page + len, count - len,
- "%d\n",
- pregpriv->rx_stbc
- );
-
- *eof = 1;
- return len;
-}
-
-int proc_set_rx_stbc(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- u32 mode = 0;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- if (pregpriv) {
- pregpriv->rx_stbc = mode;
- netdev_info(dev, "rx_stbc=%d\n", mode);
- }
- }
- return count;
-}
-
-int proc_get_rssi_disp(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- *eof = 1;
- return 0;
-}
-
-int proc_set_rssi_disp(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
- u32 enable = 0;
-
- if (count < 1) {
- DBG_88E("argument size is less than 1\n");
- return -EFAULT;
- }
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- int num = sscanf(tmp, "%x", &enable);
-
- if (num != 1) {
- DBG_88E("invalid set_rssi_disp parameter!\n");
- return count;
- }
-
- if (enable) {
- DBG_88E("Turn On Rx RSSI Display Function\n");
- padapter->bRxRSSIDisplay = enable;
- } else {
- DBG_88E("Turn Off Rx RSSI Display Function\n");
- padapter->bRxRSSIDisplay = 0;
- }
- }
- return count;
-}
-
-#ifdef CONFIG_88EU_AP_MODE
-
-int proc_get_all_sta_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct sta_info *psta;
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct sta_priv *pstapriv = &padapter->stapriv;
- int i, j;
- struct list_head *plist, *phead;
- struct recv_reorder_ctrl *preorder_ctrl;
- int len = 0;
-
-
- len += snprintf(page + len, count - len, "sta_dz_bitmap=0x%x, tim_bitmap=0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
-
- for (i = 0; i < NUM_STA; i++) {
- phead = &pstapriv->sta_hash[i];
- plist = phead->next;
-
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, hash_list);
-
- plist = plist->next;
-
- len += snprintf(page + len, count - len, "sta's macaddr: %pM\n", psta->hwaddr);
- len += snprintf(page + len, count - len, "rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self);
- len += snprintf(page + len, count - len, "state=0x%x, aid=%d, macid=%d, raid=%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
- len += snprintf(page + len, count - len, "qos_en=%d, ht_en=%d, init_rate=%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
- len += snprintf(page + len, count - len, "bwmode=%d, ch_offset=%d, sgi=%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
- len += snprintf(page + len, count - len, "ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
- len += snprintf(page + len, count - len, "agg_enable_bitmap=%x, candidate_tid_bitmap=%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
- len += snprintf(page + len, count - len, "sleepq_len=%d\n", psta->sleepq_len);
- len += snprintf(page + len, count - len, "capability=0x%x\n", psta->capability);
- len += snprintf(page + len, count - len, "flags=0x%x\n", psta->flags);
- len += snprintf(page + len, count - len, "wpa_psk=0x%x\n", psta->wpa_psk);
- len += snprintf(page + len, count - len, "wpa2_group_cipher=0x%x\n", psta->wpa2_group_cipher);
- len += snprintf(page + len, count - len, "wpa2_pairwise_cipher=0x%x\n", psta->wpa2_pairwise_cipher);
- len += snprintf(page + len, count - len, "qos_info=0x%x\n", psta->qos_info);
- len += snprintf(page + len, count - len, "dot118021XPrivacy=0x%x\n", psta->dot118021XPrivacy);
-
- for (j = 0; j < 16; j++) {
- preorder_ctrl = &psta->recvreorder_ctrl[j];
- if (preorder_ctrl->enable)
- len += snprintf(page + len, count - len, "tid=%d, indicate_seq=%d\n", j, preorder_ctrl->indicate_seq);
- }
- }
- }
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- *eof = 1;
- return len;
-}
-#endif
-
int proc_get_best_channel(char *page, char **start,
off_t offset, int count,
int *eof, void *data)
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index fbce1f7e68ca..16cc7706a1e6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -317,69 +317,6 @@ void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _si
}
}
-/* Do not support BT */
-void EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut)
-{
- switch (type) {
- case TYPE_EFUSE_MAX_SECTION:
- {
- u8 *pMax_section;
- pMax_section = pOut;
- *pMax_section = EFUSE_MAX_SECTION_88E;
- }
- break;
- case TYPE_EFUSE_REAL_CONTENT_LEN:
- {
- u16 *pu2Tmp;
- pu2Tmp = pOut;
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
- }
- break;
- case TYPE_EFUSE_CONTENT_LEN_BANK:
- {
- u16 *pu2Tmp;
- pu2Tmp = pOut;
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
- }
- break;
- case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
- {
- u16 *pu2Tmp;
- pu2Tmp = pOut;
- *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
- {
- u16 *pu2Tmp;
- pu2Tmp = pOut;
- *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- case TYPE_EFUSE_MAP_LEN:
- {
- u16 *pu2Tmp;
- pu2Tmp = pOut;
- *pu2Tmp = (u16)EFUSE_MAP_LEN_88E;
- }
- break;
- case TYPE_EFUSE_PROTECT_BYTES_BANK:
- {
- u8 *pu1Tmp;
- pu1Tmp = pOut;
- *pu1Tmp = (u8)(EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- default:
- {
- u8 *pu1Tmp;
- pu1Tmp = pOut;
- *pu1Tmp = 0;
- }
- break;
- }
-}
-
u8 Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data)
{
u16 tmpaddr = 0;
@@ -483,14 +420,11 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
u8 hoffset = 0, hworden = 0;
u8 tmpidx = 0;
u8 tmpdata[8];
- u8 max_section = 0;
u8 tmp_header = 0;
- EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION, (void *)&max_section);
-
if (!data)
return false;
- if (offset > max_section)
+ if (offset > EFUSE_MAX_SECTION_88E)
return false;
memset(data, 0xff, sizeof(u8) * PGPKT_DATA_SIZE);
@@ -591,12 +525,12 @@ static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, st
static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt)
{
bool bRet = false;
- u16 efuse_addr = *pAddr, efuse_max_available_len = 0;
+ u16 efuse_addr = *pAddr;
+ u16 efuse_max_available_len =
+ EFUSE_REAL_CONTENT_LEN_88E - EFUSE_OOB_PROTECT_BYTES_88E;
u8 pg_header = 0, tmp_header = 0, pg_header_temp = 0;
u8 repeatcnt = 0;
- EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len);
-
while (efuse_addr < efuse_max_available_len) {
pg_header = ((pTargetPkt->offset & 0x07) << 5) | 0x0F;
efuse_OneByteWrite(pAdapter, efuse_addr, pg_header);
@@ -769,12 +703,11 @@ static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u
bool bRet = false;
u8 i, efuse_data = 0, cur_header = 0;
u8 matched_wden = 0, badworden = 0;
- u16 startAddr = 0, efuse_max_available_len = 0, efuse_max = 0;
+ u16 startAddr = 0;
+ u16 efuse_max_available_len =
+ EFUSE_REAL_CONTENT_LEN_88E - EFUSE_OOB_PROTECT_BYTES_88E;
struct pgpkt curPkt;
- EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len);
- EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&efuse_max);
-
rtw_hal_get_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&startAddr);
startAddr %= EFUSE_REAL_CONTENT_LEN;
@@ -846,12 +779,7 @@ hal_EfusePgCheckAvailableAddr(
u8 efuseType
)
{
- u16 efuse_max_available_len = 0;
-
- /* Change to check TYPE_EFUSE_MAP_LEN , because 8188E raw 256, logic map over 256. */
- EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&efuse_max_available_len);
-
- if (Efuse_GetCurrentSize(pAdapter) >= efuse_max_available_len)
+ if (Efuse_GetCurrentSize(pAdapter) >= EFUSE_MAP_LEN_88E)
return false;
return true;
}
@@ -977,13 +905,9 @@ void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata)
*/
static void Efuse_ReadAllMap(struct adapter *pAdapter, u8 efuseType, u8 *Efuse)
{
- u16 mapLen = 0;
-
Efuse_PowerSwitch(pAdapter, false, true);
- EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen);
-
- efuse_ReadEFuse(pAdapter, efuseType, 0, mapLen, Efuse);
+ efuse_ReadEFuse(pAdapter, efuseType, 0, EFUSE_MAP_LEN_88E, Efuse);
Efuse_PowerSwitch(pAdapter, false, false);
}
@@ -996,12 +920,9 @@ void EFUSE_ShadowMapUpdate(
u8 efuseType)
{
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
- u16 mapLen = 0;
-
- EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen);
if (pEEPROM->bautoload_fail_flag)
- memset(pEEPROM->efuse_eeprom_data, 0xFF, mapLen);
+ memset(pEEPROM->efuse_eeprom_data, 0xFF, EFUSE_MAP_LEN_88E);
else
Efuse_ReadAllMap(pAdapter, efuseType, pEEPROM->efuse_eeprom_data);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 0b0d78fe83ed..914c4923421b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -155,59 +155,6 @@ u8 *rtw_set_ie
return pbuf + len + 2;
}
-inline u8 *rtw_set_ie_ch_switch(u8 *buf, u32 *buf_len, u8 ch_switch_mode,
- u8 new_ch, u8 ch_switch_cnt)
-{
- u8 ie_data[3];
-
- ie_data[0] = ch_switch_mode;
- ie_data[1] = new_ch;
- ie_data[2] = ch_switch_cnt;
- return rtw_set_ie(buf, WLAN_EID_CHANNEL_SWITCH, 3, ie_data, buf_len);
-}
-
-inline u8 secondary_ch_offset_to_hal_ch_offset(u8 ch_offset)
-{
- if (ch_offset == SCN)
- return HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- else if (ch_offset == SCA)
- return HAL_PRIME_CHNL_OFFSET_UPPER;
- else if (ch_offset == SCB)
- return HAL_PRIME_CHNL_OFFSET_LOWER;
-
- return HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-}
-
-inline u8 hal_ch_offset_to_secondary_ch_offset(u8 ch_offset)
-{
- if (ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
- return SCN;
- else if (ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
- return SCB;
- else if (ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
- return SCA;
-
- return SCN;
-}
-
-inline u8 *rtw_set_ie_secondary_ch_offset(u8 *buf, u32 *buf_len, u8 secondary_ch_offset)
-{
- return rtw_set_ie(buf, WLAN_EID_SECONDARY_CHANNEL_OFFSET, 1, &secondary_ch_offset, buf_len);
-}
-
-inline u8 *rtw_set_ie_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl,
- u8 flags, u16 reason, u16 precedence)
-{
- u8 ie_data[6];
-
- ie_data[0] = ttl;
- ie_data[1] = flags;
- *(u16 *)(ie_data + 2) = cpu_to_le16(reason);
- *(u16 *)(ie_data + 4) = cpu_to_le16(precedence);
-
- return rtw_set_ie(buf, 0x118, 6, ie_data, buf_len);
-}
-
/*----------------------------------------------------------------------------
index: the information element id index, limit is the limit for search
-----------------------------------------------------------------------------*/
@@ -236,97 +183,6 @@ u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit)
return NULL;
}
-/**
- * rtw_get_ie_ex - Search specific IE from a series of IEs
- * @in_ie: Address of IEs to search
- * @in_len: Length limit from in_ie
- * @eid: Element ID to match
- * @oui: OUI to match
- * @oui_len: OUI length
- * @ie: If not NULL and the specific IE is found, the IE will be copied to the buf starting from the specific IE
- * @ielen: If not NULL and the specific IE is found, will set to the length of the entire IE
- *
- * Returns: The address of the specific IE found, or NULL
- */
-u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen)
-{
- uint cnt;
- u8 *target_ie = NULL;
-
-
- if (ielen)
- *ielen = 0;
-
- if (!in_ie || in_len <= 0)
- return target_ie;
-
- cnt = 0;
-
- while (cnt < in_len) {
- if (eid == in_ie[cnt] && (!oui || !memcmp(&in_ie[cnt + 2], oui, oui_len))) {
- target_ie = &in_ie[cnt];
-
- if (ie)
- memcpy(ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
-
- if (ielen)
- *ielen = in_ie[cnt + 1] + 2;
-
- break;
- } else {
- cnt += in_ie[cnt + 1] + 2; /* goto next */
- }
- }
- return target_ie;
-}
-
-/**
- * rtw_ies_remove_ie - Find matching IEs and remove
- * @ies: Address of IEs to search
- * @ies_len: Pointer of length of ies, will update to new length
- * @offset: The offset to start scarch
- * @eid: Element ID to match
- * @oui: OUI to match
- * @oui_len: OUI length
- *
- * Returns: _SUCCESS: ies is updated, _FAIL: not updated
- */
-int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len)
-{
- int ret = _FAIL;
- u8 *target_ie;
- u32 target_ielen;
- u8 *start;
- uint search_len;
-
- if (!ies || !ies_len || *ies_len <= offset)
- goto exit;
-
- start = ies + offset;
- search_len = *ies_len - offset;
-
- while (1) {
- target_ie = rtw_get_ie_ex(start, search_len, eid, oui, oui_len, NULL, &target_ielen);
- if (target_ie && target_ielen) {
- u8 buf[MAX_IE_SZ] = {0};
- u8 *remain_ies = target_ie + target_ielen;
- uint remain_len = search_len - (remain_ies - start);
-
- memcpy(buf, remain_ies, remain_len);
- memcpy(target_ie, buf, remain_len);
- *ies_len = *ies_len - target_ielen;
- ret = _SUCCESS;
-
- start = target_ie;
- search_len = remain_len;
- } else {
- break;
- }
- }
-exit:
- return ret;
-}
-
void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
{
@@ -1096,43 +952,6 @@ void rtw_macaddr_cfg(u8 *mac_addr)
DBG_88E("rtw_macaddr_cfg MAC Address = %pM\n", (mac_addr));
}
-void dump_ies(u8 *buf, u32 buf_len)
-{
- u8 *pos = buf;
- u8 id, len;
-
- while (pos - buf <= buf_len) {
- id = *pos;
- len = *(pos + 1);
-
- DBG_88E("%s ID:%u, LEN:%u\n", __func__, id, len);
- dump_wps_ie(pos, len);
-
- pos += (2 + len);
- }
-}
-
-void dump_wps_ie(u8 *ie, u32 ie_len)
-{
- u8 *pos = ie;
- u16 id;
- u16 len;
- u8 *wps_ie;
- uint wps_ielen;
-
- wps_ie = rtw_get_wps_ie(ie, ie_len, NULL, &wps_ielen);
- if (wps_ie != ie || wps_ielen == 0)
- return;
-
- pos += 6;
- while (pos - ie < ie_len) {
- id = get_unaligned_be16(pos);
- len = get_unaligned_be16(pos + 2);
- DBG_88E("%s ID:0x%04x, LEN:%u\n", __func__, id, len);
- pos += (4 + len);
- }
-}
-
/* Baron adds to avoid FreeBSD warning */
int ieee80211_is_empty_essid(const char *essid, int essid_len)
{
@@ -1223,7 +1042,6 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
__le16 le_tmp;
u16 wpa_len = 0, rsn_len = 0;
struct HT_info_element *pht_info = NULL;
- struct rtw_ieee80211_ht_cap *pht_cap = NULL;
unsigned int len;
unsigned char *p;
@@ -1259,10 +1077,12 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
/* parsing HT_CAP_IE */
p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
if (p && len > 0) {
- pht_cap = (struct rtw_ieee80211_ht_cap *)(p + 2);
- pnetwork->BcnInfo.ht_cap_info = pht_cap->cap_info;
+ struct ieee80211_ht_cap *ht_cap =
+ (struct ieee80211_ht_cap *)(p + 2);
+
+ pnetwork->BcnInfo.ht_cap_info = le16_to_cpu(ht_cap->cap_info);
} else {
- pnetwork->BcnInfo.ht_cap_info = 0;
+ pnetwork->BcnInfo.ht_cap_info = 0;
}
/* parsing HT_INFO_IE */
p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
@@ -1335,58 +1155,3 @@ u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsign
}
return max_rate;
}
-
-int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category, u8 *action)
-{
- const u8 *frame_body = frame + sizeof(struct rtw_ieee80211_hdr_3addr);
- u16 fc;
- u8 c, a = 0;
-
- fc = le16_to_cpu(((struct rtw_ieee80211_hdr_3addr *)frame)->frame_ctl);
-
- if ((fc & (RTW_IEEE80211_FCTL_FTYPE | RTW_IEEE80211_FCTL_STYPE)) !=
- (RTW_IEEE80211_FTYPE_MGMT | RTW_IEEE80211_STYPE_ACTION))
- return false;
-
- c = frame_body[0];
-
- switch (c) {
- case RTW_WLAN_CATEGORY_P2P: /* vendor-specific */
- break;
- default:
- a = frame_body[1];
- }
-
- if (category)
- *category = c;
- if (action)
- *action = a;
-
- return true;
-}
-
-static const char *_action_public_str[] = {
- "ACT_PUB_BSSCOEXIST",
- "ACT_PUB_DSE_ENABLE",
- "ACT_PUB_DSE_DEENABLE",
- "ACT_PUB_DSE_REG_LOCATION",
- "ACT_PUB_EXT_CHL_SWITCH",
- "ACT_PUB_DSE_MSR_REQ",
- "ACT_PUB_DSE_MSR_RPRT",
- "ACT_PUB_MP",
- "ACT_PUB_DSE_PWR_CONSTRAINT",
- "ACT_PUB_VENDOR",
- "ACT_PUB_GAS_INITIAL_REQ",
- "ACT_PUB_GAS_INITIAL_RSP",
- "ACT_PUB_GAS_COMEBACK_REQ",
- "ACT_PUB_GAS_COMEBACK_RSP",
- "ACT_PUB_TDLS_DISCOVERY_RSP",
- "ACT_PUB_LOCATION_TRACK",
- "ACT_PUB_RSVD",
-};
-
-const char *action_public_str(u8 action)
-{
- action = min_t(u8, action, ACT_PUBLIC_MAX);
- return _action_public_str[action];
-}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index f85a6abec3a3..6ed23f4db38c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -14,7 +14,6 @@
******************************************************************************/
#define _RTW_IOCTL_SET_C_
-
#include <osdep_service.h>
#include <drv_types.h>
#include <rtw_ioctl_set.h>
@@ -570,10 +569,8 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
struct registry_priv *pregistrypriv = &adapter->registrypriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
- struct rtw_ieee80211_ht_cap *pht_capie;
u8 rf_type = 0;
u8 bw_40MHz = 0, short_GI_20 = 0, short_GI_40 = 0;
- u16 mcs_rate = 0;
u32 ht_ielen = 0;
if (adapter->registrypriv.mp_mode == 1) {
@@ -588,15 +585,11 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N|WIRELESS_11_5N)) {
p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
if (p && ht_ielen > 0) {
- pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
-
- memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2);
-
/* cur_bwmod is updated by beacon, pmlmeinfo is updated by association response */
bw_40MHz = (pmlmeext->cur_bwmode && (HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH & pmlmeinfo->HT_info.infos[0])) ? 1 : 0;
- short_GI_20 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
- short_GI_40 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
+ short_GI_20 = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
+ short_GI_40 = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
rtw_hal_get_hwreg(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
max_rate = rtw_mcs_rate(
@@ -604,7 +597,7 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
bw_40MHz & (pregistrypriv->cbw40_enable),
short_GI_20,
short_GI_40,
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate
+ pmlmeinfo->HT_caps.mcs.rx_mask
);
}
} else {
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 1456499b84bf..ee2dcd05010f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -52,8 +52,6 @@ int rtw_init_mlme_priv(struct adapter *padapter)
_rtw_init_queue(&(pmlmepriv->free_bss_pool));
_rtw_init_queue(&(pmlmepriv->scanned_queue));
- set_scanned_network_val(pmlmepriv, 0);
-
memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
@@ -100,12 +98,6 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie, &pmlmepriv->wps_probe_req_ie_len);
rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_resp_ie, &pmlmepriv->wps_probe_resp_ie_len);
rtw_free_mlme_ie_data(&pmlmepriv->wps_assoc_resp_ie, &pmlmepriv->wps_assoc_resp_ie_len);
-
- rtw_free_mlme_ie_data(&pmlmepriv->p2p_beacon_ie, &pmlmepriv->p2p_beacon_ie_len);
- rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_req_ie, &pmlmepriv->p2p_probe_req_ie_len);
- rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_resp_ie, &pmlmepriv->p2p_probe_resp_ie_len);
- rtw_free_mlme_ie_data(&pmlmepriv->p2p_go_probe_resp_ie, &pmlmepriv->p2p_go_probe_resp_ie_len);
- rtw_free_mlme_ie_data(&pmlmepriv->p2p_assoc_req_ie, &pmlmepriv->p2p_assoc_req_ie_len);
}
#else
void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
@@ -143,8 +135,6 @@ struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)
pnetwork->aid = 0;
pnetwork->join_res = 0;
- pmlmepriv->num_of_scanned++;
-
exit:
spin_unlock_bh(&free_queue->lock);
@@ -175,7 +165,6 @@ static void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *
spin_lock_bh(&free_queue->lock);
list_del_init(&(pnetwork->list));
list_add_tail(&(pnetwork->list), &(free_queue->queue));
- pmlmepriv->num_of_scanned--;
spin_unlock_bh(&free_queue->lock);
}
@@ -189,7 +178,6 @@ void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *
return;
list_del_init(&(pnetwork->list));
list_add_tail(&(pnetwork->list), get_list_head(free_queue));
- pmlmepriv->num_of_scanned--;
}
/*
@@ -271,7 +259,6 @@ void rtw_generate_random_ibss(u8 *pibss)
pibss[3] = (u8)(curtime & 0xff);/* p[0]; */
pibss[4] = (u8)((curtime>>8) & 0xff);/* p[1]; */
pibss[5] = (u8)((curtime>>16) & 0xff);/* p[2]; */
- return;
}
u8 *rtw_get_capability_from_ie(u8 *ie)
@@ -569,7 +556,6 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *
void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf)
{
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_evet\n"));
- return;
}
@@ -732,7 +718,6 @@ static void free_scanqueue(struct mlme_priv *pmlmepriv)
list_del_init(plist);
list_add_tail(plist, &free_queue->queue);
plist = ptemp;
- pmlmepriv->num_of_scanned--;
}
spin_unlock_bh(&free_queue->lock);
@@ -1935,7 +1920,6 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
u32 ielen, out_len;
enum ht_cap_ampdu_factor max_rx_ampdu_factor;
unsigned char *p;
- struct rtw_ieee80211_ht_cap ht_capie;
unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
@@ -1948,6 +1932,8 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
p = rtw_get_ie(in_ie+12, _HT_CAPABILITY_IE_, &ielen, in_len-12);
if (p && ielen > 0) {
+ struct ieee80211_ht_cap ht_cap;
+
if (pqospriv->qos_option == 0) {
out_len = *pout_len;
rtw_set_ie(out_ie+out_len, _VENDOR_SPECIFIC_IE_,
@@ -1958,33 +1944,33 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
out_len = *pout_len;
- memset(&ht_capie, 0, sizeof(struct rtw_ieee80211_ht_cap));
+ memset(&ht_cap, 0, sizeof(struct ieee80211_ht_cap));
- ht_capie.cap_info = IEEE80211_HT_CAP_SUP_WIDTH |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_TX_STBC |
- IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_TX_STBC |
+ IEEE80211_HT_CAP_DSSSCCK40);
rtw_hal_get_def_var(padapter, HAL_DEF_RX_PACKET_OFFSET, &rx_packet_offset);
rtw_hal_get_def_var(padapter, HAL_DEF_MAX_RECVBUF_SZ, &max_recvbuf_sz);
/*
- AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
- AMPDU_para [4:2]:Min MPDU Start Spacing
+ ampdu_params_info [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ ampdu_params_info [4:2]:Min MPDU Start Spacing
*/
rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
- ht_capie.ampdu_params_info = (max_rx_ampdu_factor&0x03);
+ ht_cap.ampdu_params_info = max_rx_ampdu_factor & 0x03;
if (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
- ht_capie.ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&(0x07<<2));
+ ht_cap.ampdu_params_info |= IEEE80211_HT_CAP_AMPDU_DENSITY & (0x07 << 2);
else
- ht_capie.ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&0x00);
-
+ ht_cap.ampdu_params_info |= IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00;
rtw_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_,
- sizeof(struct rtw_ieee80211_ht_cap), (unsigned char *)&ht_capie, pout_len);
+ sizeof(struct ieee80211_ht_cap),
+ (unsigned char *)&ht_cap, pout_len);
phtpriv->ht_option = true;
@@ -2000,9 +1986,6 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
/* the function is > passive_level (in critical_section) */
void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
{
- u8 *p, max_ampdu_sz;
- int len;
- struct rtw_ieee80211_ht_cap *pht_capie;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
@@ -2027,34 +2010,21 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
phtpriv->ampdu_enable = true;
}
-
- /* check Max Rx A-MPDU Size */
- len = 0;
- p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fixed_ie), _HT_CAPABILITY_IE_, &len, ie_len-sizeof(struct ndis_802_11_fixed_ie));
- if (p && len > 0) {
- pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
- max_ampdu_sz = pht_capie->ampdu_params_info & IEEE80211_HT_CAP_AMPDU_FACTOR;
- max_ampdu_sz = 1 << (max_ampdu_sz+3); /* max_ampdu_sz (kbytes); */
- phtpriv->rx_ampdu_maxlen = max_ampdu_sz;
- }
- len = 0;
- p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fixed_ie), _HT_ADD_INFO_IE_, &len, ie_len-sizeof(struct ndis_802_11_fixed_ie));
-
/* update cur_bwmode & cur_ch_offset */
if ((pregistrypriv->cbw40_enable) &&
- (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & BIT(1)) &&
+ (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & BIT(1)) &&
(pmlmeinfo->HT_info.infos[0] & BIT(2))) {
int i;
u8 rf_type;
- padapter->HalFunc.GetHwRegHandler(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
/* update the MCS rates */
for (i = 0; i < 16; i++) {
if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
+ ((u8 *)&pmlmeinfo->HT_caps.mcs)[i] &= MCS_rate_1R[i];
else
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i];
+ ((u8 *)&pmlmeinfo->HT_caps.mcs)[i] &= MCS_rate_2R[i];
}
/* switch to the 40M Hz mode according to the AP */
pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
@@ -2072,7 +2042,7 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
}
/* Config SM Power Save setting */
- pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & 0x0C) >> 2;
+ pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & 0x0C) >> 2;
if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 7f32b39e5869..fb13df586441 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -320,7 +320,7 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned int rate_len;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
@@ -347,22 +347,22 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, cur_network->MacAddress, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, bc_addr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3, cur_network->MacAddress);
SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
/* pmlmeext->mgnt_seq++; */
SetFrameSubType(pframe, WIFI_BEACON);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
int len_diff;
@@ -377,8 +377,8 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
);
pframe += (cur_network->IELength+len_diff);
pattrib->pktlen += (cur_network->IELength+len_diff);
- wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct rtw_ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
- pattrib->pktlen-sizeof(struct rtw_ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
+ wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
+ pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0)
rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
if (sr != 0)
@@ -461,7 +461,7 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac, *bssid;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
@@ -488,22 +488,22 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&(padapter->eeprompriv));
bssid = cur_network->MacAddress;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
- memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr2, mac);
+ ether_addr_copy(pwlanhdr->addr3, bssid);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(fctrl, WIFI_PROBERSP);
- pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen = pattrib->hdrlen;
pframe += pattrib->hdrlen;
@@ -609,7 +609,7 @@ static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pss
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned char *mac;
unsigned char bssrate[NumRates];
@@ -633,31 +633,31 @@ static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pss
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&(padapter->eeprompriv));
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if (da) {
/* unicast probe request frame */
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr3, da, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr3, da);
} else {
/* broadcast probe request frame */
- memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
- memcpy(pwlanhdr->addr3, bc_addr, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, bc_addr);
+ ether_addr_copy(pwlanhdr->addr3, bc_addr);
}
- memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr2, mac);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_PROBEREQ);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if (pssid)
pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->SsidLength, pssid->Ssid, &(pattrib->pktlen));
@@ -745,7 +745,7 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned int val32;
u16 val16;
@@ -769,25 +769,27 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_AUTH);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if (psta) {/* for AP mode */
#ifdef CONFIG_88EU_AP_MODE
- memcpy(pwlanhdr->addr1, psta->hwaddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, psta->hwaddr);
+ ether_addr_copy(pwlanhdr->addr2,
+ myid(&(padapter->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3,
+ myid(&(padapter->eeprompriv)));
/* setting auth algo number */
@@ -825,9 +827,9 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
} else {
__le32 le_tmp32;
__le16 le_tmp16;
- memcpy(pwlanhdr->addr1, pnetwork->MacAddress, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, pnetwork->MacAddress);
+ ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
+ ether_addr_copy(pwlanhdr->addr3, pnetwork->MacAddress);
/* setting auth algo number */
val16 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) ? 1 : 0;/* 0:OPEN System, 1:Shared key */
@@ -866,7 +868,7 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
SetPrivacy(fctrl);
- pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->encrypt = _WEP40_;
@@ -889,7 +891,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
struct sta_info *pstat, int pkt_type)
{
struct xmit_frame *pmgntframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
struct pkt_attrib *pattrib;
unsigned char *pbuf, *pframe;
unsigned short val;
@@ -916,14 +918,15 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
- memcpy((void *)GetAddr1Ptr(pwlanhdr), pstat->hwaddr, ETH_ALEN);
- memcpy((void *)GetAddr2Ptr(pwlanhdr), myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy((void *)GetAddr3Ptr(pwlanhdr), pnetwork->MacAddress, ETH_ALEN);
+ ether_addr_copy((void *)GetAddr1Ptr(pwlanhdr), pstat->hwaddr);
+ ether_addr_copy((void *)GetAddr2Ptr(pwlanhdr),
+ myid(&(padapter->eeprompriv)));
+ ether_addr_copy((void *)GetAddr3Ptr(pwlanhdr), pnetwork->MacAddress);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
@@ -933,7 +936,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
else
return;
- pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->pktlen += pattrib->hdrlen;
pframe += pattrib->hdrlen;
@@ -1017,7 +1020,7 @@ static void issue_assocreq(struct adapter *padapter)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe, *p;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned int i, j, ie_len, index = 0;
unsigned char rf_type, bssrate[NumRates], sta_bssrate[NumRates];
@@ -1040,20 +1043,20 @@ static void issue_assocreq(struct adapter *padapter)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, pnetwork->MacAddress, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, pnetwork->MacAddress);
+ ether_addr_copy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3, pnetwork->MacAddress);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ASSOCREQ);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
/* caps */
@@ -1132,23 +1135,23 @@ static void issue_assocreq(struct adapter *padapter)
if (padapter->mlmepriv.htpriv.ht_option) {
p = rtw_get_ie((pmlmeinfo->network.IEs + sizeof(struct ndis_802_11_fixed_ie)), _HT_CAPABILITY_IE_, &ie_len, (pmlmeinfo->network.IELength - sizeof(struct ndis_802_11_fixed_ie)));
if ((p != NULL) && (!(is_ap_in_tkip(padapter)))) {
- memcpy(&(pmlmeinfo->HT_caps), (p + 2), sizeof(struct HT_caps_element));
+ memcpy(&pmlmeinfo->HT_caps, p + 2, sizeof(struct ieee80211_ht_cap));
/* to disable 40M Hz support while gd_bw_40MHz_en = 0 */
if (pregpriv->cbw40_enable == 0)
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info &= cpu_to_le16(~(BIT(6) | BIT(1)));
+ pmlmeinfo->HT_caps.cap_info &= cpu_to_le16(~(BIT(6) | BIT(1)));
else
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(BIT(1));
+ pmlmeinfo->HT_caps.cap_info |= cpu_to_le16(BIT(1));
/* todo: disable SM power save mode */
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x000c);
+ pmlmeinfo->HT_caps.cap_info |= cpu_to_le16(0x000c);
rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
switch (rf_type) {
case RF_1T1R:
if (pregpriv->rx_stbc)
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
- memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_1R, 16);
+ pmlmeinfo->HT_caps.cap_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
+ memcpy((u8 *)&pmlmeinfo->HT_caps.mcs, MCS_rate_1R, 16);
break;
case RF_2T2R:
case RF_1T2R:
@@ -1157,9 +1160,9 @@ static void issue_assocreq(struct adapter *padapter)
((pmlmeext->cur_wireless_mode & WIRELESS_11_24N) && (pregpriv->rx_stbc == 0x1)) || /* enable for 2.4GHz */
(pregpriv->wifi_spec == 1)) {
DBG_88E("declare supporting RX STBC\n");
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0200);/* RX STBC two spatial stream */
+ pmlmeinfo->HT_caps.cap_info |= cpu_to_le16(0x0200);/* RX STBC two spatial stream */
}
- memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_2R, 16);
+ memcpy(&pmlmeinfo->HT_caps.mcs, MCS_rate_2R, 16);
break;
}
pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len , (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
@@ -1212,7 +1215,7 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv;
struct mlme_ext_priv *pmlmeext;
@@ -1239,9 +1242,9 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
@@ -1252,16 +1255,16 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned
if (power_mode)
SetPwrMgt(fctrl);
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3, pnetwork->MacAddress);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_DATA_NULL);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pattrib->last_txcmdsz = pattrib->pktlen;
@@ -1332,7 +1335,7 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned short *qc;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
@@ -1359,9 +1362,9 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
@@ -1380,16 +1383,16 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
SetAckpolicy(qc, pattrib->ack_policy);
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3, pnetwork->MacAddress);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr_qos);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+ pframe += sizeof(struct ieee80211_qos_hdr);
+ pattrib->pktlen = sizeof(struct ieee80211_qos_hdr);
pattrib->last_txcmdsz = pattrib->pktlen;
@@ -1457,7 +1460,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
@@ -1478,21 +1481,21 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, da, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, da);
+ ether_addr_copy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3, pnetwork->MacAddress);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_DEAUTH);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
le_tmp = cpu_to_le16(reason);
pframe = rtw_set_fixed_ie(pframe, _RSON_CODE_, &le_tmp,
@@ -1559,66 +1562,6 @@ exit:
return ret;
}
-void issue_action_spct_ch_switch(struct adapter *padapter, u8 *ra, u8 new_ch, u8 ch_offset)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
-
-
- DBG_88E(FUNC_NDEV_FMT" ra =%pM, ch:%u, offset:%u\n",
- FUNC_NDEV_ARG(padapter->pnetdev), ra, new_ch, ch_offset);
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
-
- fctrl = &(pwlanhdr->frame_ctl);
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, ra, ETH_ALEN); /* RA */
- memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN); /* TA */
- memcpy(pwlanhdr->addr3, ra, ETH_ALEN); /* DA = RA */
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
-
- /* category, action */
- {
- u8 category, action;
- category = RTW_WLAN_CATEGORY_SPECTRUM_MGMT;
- action = RTW_WLAN_ACTION_SPCT_CHL_SWITCH;
-
- pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
- pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
- }
-
- pframe = rtw_set_ie_ch_switch(pframe, &(pattrib->pktlen), 0, new_ch, 0);
- pframe = rtw_set_ie_secondary_ch_offset(pframe, &(pattrib->pktlen),
- hal_ch_offset_to_secondary_ch_offset(ch_offset));
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
static void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
unsigned char action, unsigned short status)
{
@@ -1633,7 +1576,7 @@ static void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
u8 *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
@@ -1656,21 +1599,21 @@ static void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, raddr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3, pnetwork->MacAddress);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
@@ -1779,7 +1722,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
struct wlan_network *pnetwork = NULL;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
@@ -1815,21 +1758,21 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, cur_network->MacAddress, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, cur_network->MacAddress, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, cur_network->MacAddress);
+ ether_addr_copy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3, cur_network->MacAddress);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
@@ -2112,7 +2055,7 @@ static u8 collect_bss_info(struct adapter *padapter,
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+ len = packet_len - sizeof(struct ieee80211_hdr_3addr);
if (len > MAX_IE_SZ)
return _FAIL;
@@ -2142,7 +2085,7 @@ static u8 collect_bss_info(struct adapter *padapter,
/* below is to copy the information element */
bssid->IELength = len;
- memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+ memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
/* get the signal strength in dBM.raw data */
bssid->Rssi = precv_frame->attrib.phy_info.recvpower;
@@ -2219,7 +2162,7 @@ static u8 collect_bss_info(struct adapter *padapter,
if (subtype == WIFI_PROBEREQ) {
/* FIXME */
bssid->InfrastructureMode = Ndis802_11Infrastructure;
- memcpy(bssid->MacAddress, GetAddr2Ptr(pframe), ETH_ALEN);
+ ether_addr_copy(bssid->MacAddress, GetAddr2Ptr(pframe));
bssid->Privacy = 1;
return _SUCCESS;
}
@@ -2231,10 +2174,10 @@ static u8 collect_bss_info(struct adapter *padapter,
if (val16 & BIT(0)) {
bssid->InfrastructureMode = Ndis802_11Infrastructure;
- memcpy(bssid->MacAddress, GetAddr2Ptr(pframe), ETH_ALEN);
+ ether_addr_copy(bssid->MacAddress, GetAddr2Ptr(pframe));
} else {
bssid->InfrastructureMode = Ndis802_11IBSS;
- memcpy(bssid->MacAddress, GetAddr3Ptr(pframe), ETH_ALEN);
+ ether_addr_copy(bssid->MacAddress, GetAddr3Ptr(pframe));
}
if (val16 & BIT(4))
@@ -2249,10 +2192,10 @@ static u8 collect_bss_info(struct adapter *padapter,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
p = rtw_get_ie(bssid->IEs + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->IELength - ie_offset);
if (p && len > 0) {
- struct HT_caps_element *pHT_caps;
- pHT_caps = (struct HT_caps_element *)(p + 2);
+ struct ieee80211_ht_cap *pHT_caps =
+ (struct ieee80211_ht_cap *)(p + 2);
- if (le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info)&BIT(14))
+ if (le16_to_cpu(pHT_caps->cap_info) & BIT(14))
pmlmepriv->num_FortyMHzIntolerant++;
} else {
pmlmepriv->num_sta_no_ht++;
@@ -2684,7 +2627,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
}
/* check the vendor of the assoc AP */
- pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe+sizeof(struct rtw_ieee80211_hdr_3addr), len-sizeof(struct rtw_ieee80211_hdr_3addr));
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe+sizeof(struct ieee80211_hdr_3addr), len-sizeof(struct ieee80211_hdr_3addr));
/* update TSF Value */
update_TSF(pmlmeext, pframe, len);
@@ -3296,13 +3239,15 @@ static unsigned int OnAssocReq(struct adapter *padapter,
}
/* save HT capabilities in the sta object */
- memset(&pstat->htpriv.ht_cap, 0, sizeof(struct rtw_ieee80211_ht_cap));
- if (elems.ht_capabilities && elems.ht_capabilities_len >= sizeof(struct rtw_ieee80211_ht_cap)) {
+ memset(&pstat->htpriv.ht_cap, 0, sizeof(struct ieee80211_ht_cap));
+ if (elems.ht_capabilities &&
+ elems.ht_capabilities_len >= sizeof(struct ieee80211_ht_cap)) {
pstat->flags |= WLAN_STA_HT;
pstat->flags |= WLAN_STA_WME;
- memcpy(&pstat->htpriv.ht_cap, elems.ht_capabilities, sizeof(struct rtw_ieee80211_ht_cap));
+ memcpy(&pstat->htpriv.ht_cap,
+ elems.ht_capabilities, sizeof(struct ieee80211_ht_cap));
} else {
pstat->flags &= ~WLAN_STA_HT;
}
@@ -3639,7 +3584,7 @@ static unsigned int on_action_spct(struct adapter *padapter,
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
u8 category;
u8 action;
@@ -3713,7 +3658,7 @@ static unsigned int OnAction_back(struct adapter *padapter,
if (psta == NULL)
return _SUCCESS;
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
category = frame_body[0];
if (category == RTW_WLAN_CATEGORY_BACK) { /* representing Block Ack */
@@ -3800,7 +3745,7 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
u8 *pframe = precv_frame->rx_data;
u8 *frame_body;
u8 dialogToken = 0;
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
dialogToken = frame_body[7];
@@ -3814,7 +3759,7 @@ static unsigned int on_action_public_vendor(struct recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
if (!memcmp(frame_body + 2, P2P_OUI, 4))
ret = on_action_public_p2p(precv_frame);
@@ -3826,7 +3771,7 @@ static unsigned int on_action_public_default(struct recv_frame *precv_frame, u8
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
u8 token;
token = frame_body[2];
@@ -3845,7 +3790,7 @@ static unsigned int on_action_public(struct adapter *padapter,
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
u8 category, action;
/* check RA matches or not */
@@ -3917,7 +3862,7 @@ static unsigned int OnAction(struct adapter *padapter,
unsigned char *frame_body;
u8 *pframe = precv_frame->rx_data;
- frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
category = frame_body[0];
@@ -4295,7 +4240,7 @@ void report_survey_event(struct adapter *padapter,
INIT_LIST_HEAD(&pcmd_obj->list);
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdcode = _Set_MLME_EVT_CMD_;
pcmd_obj->cmdsz = cmdsz;
pcmd_obj->parmbuf = pevtcmd;
@@ -4304,7 +4249,7 @@ void report_survey_event(struct adapter *padapter,
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct survey_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
+ pc2h_evt_hdr->ID = _Survey_EVT_;
pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
@@ -4345,7 +4290,7 @@ void report_surveydone_event(struct adapter *padapter)
INIT_LIST_HEAD(&pcmd_obj->list);
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdcode = _Set_MLME_EVT_CMD_;
pcmd_obj->cmdsz = cmdsz;
pcmd_obj->parmbuf = pevtcmd;
@@ -4354,7 +4299,7 @@ void report_surveydone_event(struct adapter *padapter)
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct surveydone_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
+ pc2h_evt_hdr->ID = _SurveyDone_EVT_;
pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
@@ -4389,7 +4334,7 @@ void report_join_res(struct adapter *padapter, int res)
INIT_LIST_HEAD(&pcmd_obj->list);
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdcode = _Set_MLME_EVT_CMD_;
pcmd_obj->cmdsz = cmdsz;
pcmd_obj->parmbuf = pevtcmd;
@@ -4398,7 +4343,7 @@ void report_join_res(struct adapter *padapter, int res)
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct joinbss_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
+ pc2h_evt_hdr->ID = _JoinBss_EVT_;
pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
@@ -4440,7 +4385,7 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
INIT_LIST_HEAD(&pcmd_obj->list);
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdcode = _Set_MLME_EVT_CMD_;
pcmd_obj->cmdsz = cmdsz;
pcmd_obj->parmbuf = pevtcmd;
@@ -4449,11 +4394,11 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct stadel_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
+ pc2h_evt_hdr->ID = _DelSTA_EVT_;
pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
- memcpy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr, ETH_ALEN);
+ ether_addr_copy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr);
memcpy((unsigned char *)(pdel_sta_evt->rsvd), (unsigned char *)(&reason), 2);
@@ -4493,7 +4438,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
INIT_LIST_HEAD(&pcmd_obj->list);
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdcode = _Set_MLME_EVT_CMD_;
pcmd_obj->cmdsz = cmdsz;
pcmd_obj->parmbuf = pevtcmd;
@@ -4502,11 +4447,11 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
pc2h_evt_hdr->len = sizeof(struct stassoc_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
+ pc2h_evt_hdr->ID = _AddSTA_EVT_;
pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
- memcpy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr, ETH_ALEN);
+ ether_addr_copy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr);
padd_sta_evt->cam_id = cam_idx;
DBG_88E("report_add_sta_event: add STA\n");
@@ -4537,7 +4482,7 @@ void update_sta_info(struct adapter *padapter, struct sta_info *psta)
psta->htpriv.ampdu_enable = pmlmepriv->htpriv.ampdu_enable;
- if (support_short_GI(padapter, &(pmlmeinfo->HT_caps)))
+ if (support_short_GI(padapter, &pmlmeinfo->HT_caps))
psta->htpriv.sgi = true;
psta->qos_option = true;
@@ -4904,7 +4849,7 @@ void survey_timer_hdl(unsigned long data)
goto exit_survey_timer_hdl;
}
- init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, _SiteSurvey_CMD_);
rtw_enqueue_cmd(pcmdpriv, ph2c);
}
@@ -5480,7 +5425,7 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
pmlmeinfo->hidden_ssid_mode);
ptxBeacon_parm->IELength += len_diff;
- init_h2fwcmd_w_parm_no_rsp(ph2c, ptxBeacon_parm, GEN_CMD_CODE(_TX_Beacon));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, ptxBeacon_parm, _TX_Beacon_CMD_);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 59c6d8ab60f6..0b70fe7d3b72 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -38,7 +38,7 @@ static int rtw_hw_suspend(struct adapter *padapter)
LeaveAllPowerSaveMode(padapter);
DBG_88E("==> rtw_hw_suspend\n");
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->mutex_lock);
pwrpriv->bips_processing = true;
/* s1. */
if (pnetdev) {
@@ -73,7 +73,7 @@ static int rtw_hw_suspend(struct adapter *padapter)
pwrpriv->rf_pwrstate = rf_off;
pwrpriv->bips_processing = false;
- _exit_pwrlock(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->mutex_lock);
return 0;
@@ -90,12 +90,12 @@ static int rtw_hw_resume(struct adapter *padapter)
/* system resume */
DBG_88E("==> rtw_hw_resume\n");
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->mutex_lock);
pwrpriv->bips_processing = true;
rtw_reset_drv_sw(padapter);
if (pm_netdev_open(pnetdev, false) != 0) {
- _exit_pwrlock(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->mutex_lock);
goto error_exit;
}
@@ -113,7 +113,7 @@ static int rtw_hw_resume(struct adapter *padapter)
pwrpriv->rf_pwrstate = rf_on;
pwrpriv->bips_processing = false;
- _exit_pwrlock(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->mutex_lock);
return 0;
@@ -138,7 +138,7 @@ void ips_enter(struct adapter *padapter)
return;
}
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->mutex_lock);
pwrpriv->bips_processing = true;
@@ -159,7 +159,7 @@ void ips_enter(struct adapter *padapter)
}
pwrpriv->bips_processing = false;
- _exit_pwrlock(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->mutex_lock);
}
int ips_leave(struct adapter *padapter)
@@ -171,7 +171,7 @@ int ips_leave(struct adapter *padapter)
int keyid;
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->mutex_lock);
if ((pwrpriv->rf_pwrstate == rf_off) && (!pwrpriv->bips_processing)) {
pwrpriv->bips_processing = true;
@@ -205,7 +205,7 @@ int ips_leave(struct adapter *padapter)
pwrpriv->bpower_saving = false;
}
- _exit_pwrlock(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->mutex_lock);
return result;
}
@@ -504,7 +504,7 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
{
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
- _init_pwrlock(&pwrctrlpriv->lock);
+ mutex_init(&pwrctrlpriv->mutex_lock);
pwrctrlpriv->rf_pwrstate = rf_on;
pwrctrlpriv->ips_enter_cnts = 0;
pwrctrlpriv->ips_leave_cnts = 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 977bb2532c3e..b87cbbbee054 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -39,7 +39,7 @@ static u8 rtw_rfc1042_header[] = {
0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
};
-void rtw_signal_stat_timer_hdl(unsigned long data);
+static void rtw_signal_stat_timer_hdl(unsigned long data);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
@@ -73,7 +73,7 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
if (!precvpriv->pallocated_frame_buf)
return _FAIL;
- precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
+ precvpriv->precv_frame_buf = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ);
precvframe = (struct recv_frame *)precvpriv->precv_frame_buf;
@@ -2088,7 +2088,7 @@ _recv_entry_drop:
return ret;
}
-void rtw_signal_stat_timer_hdl(unsigned long data)
+static void rtw_signal_stat_timer_hdl(unsigned long data)
{
struct adapter *adapter = (struct adapter *)data;
struct recv_priv *recvpriv = &adapter->recvpriv;
diff --git a/drivers/staging/rtl8188eu/core/rtw_rf.c b/drivers/staging/rtl8188eu/core/rtw_rf.c
index 3fc1a8fd367c..e47be87fb402 100644
--- a/drivers/staging/rtl8188eu/core/rtw_rf.c
+++ b/drivers/staging/rtl8188eu/core/rtw_rf.c
@@ -19,7 +19,6 @@
#include <recv_osdep.h>
#include <xmit_osdep.h>
-
struct ch_freq {
u32 channel;
u32 frequency;
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 442a614a3726..85bb441a7214 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -233,7 +233,6 @@ void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
&crc, &payload[length-4]));
}
}
- return;
}
/* 3 ===== TKIP related ===== */
diff --git a/drivers/staging/rtl8188eu/core/rtw_sreset.c b/drivers/staging/rtl8188eu/core/rtw_sreset.c
index 13a5bf4730ab..a198c5779d50 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sreset.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sreset.c
@@ -16,18 +16,16 @@
#include <rtw_sreset.h>
#include <usb_ops_linux.h>
-void sreset_init_value(struct adapter *padapter)
+void rtw_hal_sreset_init(struct adapter *padapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+ struct sreset_priv *psrtpriv = &padapter->HalData->srestpriv;
psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
}
u8 sreset_get_wifi_status(struct adapter *padapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+ struct sreset_priv *psrtpriv = &padapter->HalData->srestpriv;
u8 status = WIFI_STATUS_SUCCESS;
u32 val32 = 0;
@@ -54,6 +52,5 @@ u8 sreset_get_wifi_status(struct adapter *padapter)
void sreset_set_wifi_error_status(struct adapter *padapter, u32 status)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- pHalData->srestpriv.Wifi_Error_Status = status;
+ padapter->HalData->srestpriv.Wifi_Error_Status = status;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 4410fe8d7c68..2a65ac702129 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -341,9 +341,6 @@ void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigne
{
u8 center_ch;
- if (padapter->bNotifyChannelChange)
- DBG_88E("[%s] ch = %d, offset = %d, bwmode = %d\n", __func__, channel, channel_offset, bwmode);
-
if ((bwmode == HT_CHANNEL_WIDTH_20) ||
(channel_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)) {
/* SelectChannel(padapter, channel); */
@@ -716,6 +713,7 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ u8 *HT_cap = (u8 *)(&pmlmeinfo->HT_caps);
if (pIE == NULL)
return;
@@ -728,20 +726,20 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
for (i = 0; i < (pIE->Length); i++) {
if (i != 2) {
/* Got the endian issue here. */
- pmlmeinfo->HT_caps.u.HT_cap[i] &= (pIE->data[i]);
+ HT_cap[i] &= (pIE->data[i]);
} else {
/* modify from fw by Thomas 2010/11/17 */
- if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3) > (pIE->data[i] & 0x3))
+ if ((pmlmeinfo->HT_caps.ampdu_params_info & 0x3) > (pIE->data[i] & 0x3))
max_AMPDU_len = pIE->data[i] & 0x3;
else
- max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3;
+ max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x3;
- if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) > (pIE->data[i] & 0x1c))
- min_MPDU_spacing = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c;
+ if ((pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) > (pIE->data[i] & 0x1c))
+ min_MPDU_spacing = pmlmeinfo->HT_caps.ampdu_params_info & 0x1c;
else
min_MPDU_spacing = pIE->data[i] & 0x1c;
- pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para = max_AMPDU_len | min_MPDU_spacing;
+ pmlmeinfo->HT_caps.ampdu_params_info = max_AMPDU_len | min_MPDU_spacing;
}
}
@@ -750,9 +748,9 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
/* update the MCS rates */
for (i = 0; i < 16; i++) {
if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
+ ((u8 *)&pmlmeinfo->HT_caps.mcs)[i] &= MCS_rate_1R[i];
else
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i];
+ ((u8 *)&pmlmeinfo->HT_caps.mcs)[i] &= MCS_rate_2R[i];
}
}
@@ -798,9 +796,9 @@ void HTOnAssocRsp(struct adapter *padapter)
AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
AMPDU_para [4:2]:Min MPDU Start Spacing
*/
- max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
+ max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
- min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
@@ -872,7 +870,6 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
u32 wpa_ielen = 0;
u8 *pbssid = GetAddr3Ptr(pframe);
struct HT_info_element *pht_info = NULL;
- struct rtw_ieee80211_ht_cap *pht_cap = NULL;
u32 bcn_channel;
unsigned short ht_cap_info;
unsigned char ht_info_infos_0;
@@ -881,7 +878,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
if (is_client_associated_to_ap(Adapter) == false)
return true;
- len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+ len = packet_len - sizeof(struct ieee80211_hdr_3addr);
if (len > MAX_IE_SZ) {
DBG_88E("%s IE too long for survey event\n", __func__);
@@ -907,14 +904,16 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
/* below is to copy the information element */
bssid->IELength = len;
- memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+ memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
/* check bw and channel offset */
/* parsing HT_CAP_IE */
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
if (p && len > 0) {
- pht_cap = (struct rtw_ieee80211_ht_cap *)(p + 2);
- ht_cap_info = pht_cap->cap_info;
+ struct ieee80211_ht_cap *ht_cap =
+ (struct ieee80211_ht_cap *)(p + 2);
+
+ ht_cap_info = le16_to_cpu(ht_cap->cap_info);
} else {
ht_cap_info = 0;
}
@@ -1243,16 +1242,17 @@ unsigned int update_supported_rate(unsigned char *ptn, unsigned int ptn_sz)
return mask;
}
-unsigned int update_MSC_rate(struct HT_caps_element *pHT_caps)
+unsigned int update_MSC_rate(struct ieee80211_ht_cap *pHT_caps)
{
unsigned int mask = 0;
- mask = (pHT_caps->u.HT_cap_element.MCS_rate[0] << 12) | (pHT_caps->u.HT_cap_element.MCS_rate[1] << 20);
+ mask = (pHT_caps->mcs.rx_mask[0] << 12) |
+ (pHT_caps->mcs.rx_mask[1] << 20);
return mask;
}
-int support_short_GI(struct adapter *padapter, struct HT_caps_element *pHT_caps)
+int support_short_GI(struct adapter *padapter, struct ieee80211_ht_cap *pHT_caps)
{
unsigned char bit_offset;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1266,7 +1266,7 @@ int support_short_GI(struct adapter *padapter, struct HT_caps_element *pHT_caps)
bit_offset = (pmlmeext->cur_bwmode & HT_CHANNEL_WIDTH_40) ? 6 : 5;
- if (__le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info) & (0x1 << bit_offset))
+ if (__le16_to_cpu(pHT_caps->cap_info) & (0x1 << bit_offset))
return _SUCCESS;
else
return _FAIL;
@@ -1508,7 +1508,7 @@ void update_wireless_mode(struct adapter *padapter)
SIFS_Timer = 0x0a0a0808;/* 0x0808 -> for CCK, 0x0a0a -> for OFDM */
/* change this value if having IOT issues. */
- padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer);
+ rtw_hal_set_hwreg(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer);
if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
@@ -1584,7 +1584,7 @@ void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
u8 *pIE;
__le32 *pbuf;
- pIE = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ pIE = pframe + sizeof(struct ieee80211_hdr_3addr);
pbuf = (__le32 *)pIE;
pmlmeext->TSFValue = le32_to_cpu(*(pbuf+1));
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index e0a5567f5942..0f8b8e0bffdf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -57,8 +57,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
/* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
spin_lock_init(&pxmitpriv->lock);
- sema_init(&pxmitpriv->xmit_sema, 0);
- sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
/*
Please insert all the queue initializaiton using _rtw_init_queue below
@@ -88,7 +86,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
res = _FAIL;
goto exit;
}
- pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_frame_buf), 4);
+ pxmitpriv->pxmit_frame_buf = PTR_ALIGN(pxmitpriv->pallocated_frame_buf, 4);
/* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */
/* ((size_t) (pxmitpriv->pallocated_frame_buf) &3); */
@@ -126,7 +124,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
}
- pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmitbuf), 4);
+ pxmitpriv->pxmitbuf = PTR_ALIGN(pxmitpriv->pallocated_xmitbuf, 4);
/* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */
/* ((size_t) (pxmitpriv->pallocated_xmitbuf) &3); */
@@ -144,9 +142,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
if (res == _FAIL) {
msleep(10);
res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
- if (res == _FAIL) {
+ if (res == _FAIL)
goto exit;
- }
}
pxmitbuf->flags = XMIT_VO_QUEUE;
@@ -168,7 +165,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
}
- pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4);
+ pxmitpriv->pxmit_extbuf = PTR_ALIGN(pxmitpriv->pallocated_xmit_extbuf, 4);
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
@@ -199,8 +196,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->txirp_cnt = 1;
- sema_init(&(pxmitpriv->tx_retevt), 0);
-
/* per AC pending irp */
pxmitpriv->beq_cnt = 0;
pxmitpriv->bkq_cnt = 0;
@@ -252,9 +247,8 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
pxmitbuf++;
}
- if (pxmitpriv->pallocated_xmit_extbuf) {
+ if (pxmitpriv->pallocated_xmit_extbuf)
vfree(pxmitpriv->pallocated_xmit_extbuf);
- }
rtw_free_hwxmits(padapter);
@@ -408,7 +402,7 @@ static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
_rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
/* user_prio = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
user_prio = ip_hdr.tos >> 5;
- } else if (pattrib->ether_type == 0x888e) {
+ } else if (pattrib->ether_type == ETH_P_PAE) {
/* "When priority processing of data frames is supported, */
/* a STA's SME should send EAPOL-Key frames at the highest priority." */
user_prio = 7;
@@ -457,14 +451,14 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
pattrib->pktlen = pktfile.pkt_len;
- if (ETH_P_IP == pattrib->ether_type) {
+ if (pattrib->ether_type == ETH_P_IP) {
/* The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time */
/* to prevent DHCP protocol fail */
u8 tmp[24];
_rtw_pktfile_read(&pktfile, &tmp[0], 24);
pattrib->dhcp_pkt = 0;
if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
- if (ETH_P_IP == pattrib->ether_type) {/* IP header */
+ if (pattrib->ether_type == ETH_P_IP) {/* IP header */
if (((tmp[21] == 68) && (tmp[23] == 67)) ||
((tmp[21] == 67) && (tmp[23] == 68))) {
/* 68 : UDP BOOTP client */
@@ -475,15 +469,15 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
}
}
}
- } else if (0x888e == pattrib->ether_type) {
+ } else if (pattrib->ether_type == ETH_P_PAE) {
DBG_88E_LEVEL(_drv_info_, "send eapol packet\n");
}
- if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
+ if ((pattrib->ether_type == ETH_P_PAE) || (pattrib->dhcp_pkt == 1))
rtw_set_scan_deny(padapter, 3000);
/* If EAPOL , ARP , OR DHCP packet, driver must be in active mode. */
- if ((pattrib->ether_type == 0x0806) || (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
+ if ((pattrib->ether_type == ETH_P_ARP) || (pattrib->ether_type == ETH_P_PAE) || (pattrib->dhcp_pkt == 1))
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 1);
bmcast = IS_MCAST(pattrib->ra);
@@ -515,8 +509,6 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
}
pattrib->ack_policy = 0;
- /* get ether_hdr_len */
- pattrib->pkt_hdrlen = ETH_HLEN;/* pattrib->ether_type == 0x8100) ? (14 + 4): 14; vlan tag */
pattrib->hdrlen = WLAN_HDR_A3_LEN;
pattrib->subtype = WIFI_DATA_TYPE;
@@ -539,8 +531,8 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
pattrib->encrypt = 0;
- if ((pattrib->ether_type != 0x888e) && !check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\npsta->ieee8021x_blocked == true, pattrib->ether_type(%.4x) != 0x888e\n", pattrib->ether_type));
+ if ((pattrib->ether_type != ETH_P_PAE) && !check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\npsta->ieee8021x_blocked == true, pattrib->ether_type(%.4x) != ETH_P_PAE\n", pattrib->ether_type));
res = _FAIL;
goto exit;
}
@@ -769,13 +761,13 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
{
u16 *qc;
- struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
+ struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
u8 qos_option = false;
int res = _SUCCESS;
- __le16 *fctrl = &pwlanhdr->frame_ctl;
+ __le16 *fctrl = &pwlanhdr->frame_control;
struct sta_info *psta;
@@ -785,11 +777,10 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
if (pattrib->psta) {
psta = pattrib->psta;
} else {
- if (bmcst) {
+ if (bmcst)
psta = rtw_get_bcmc_stainfo(padapter);
- } else {
+ else
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
- }
}
memset(hdr, 0, WLANHDR_OFFSET);
@@ -999,7 +990,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
}
_rtw_open_pktfile(pkt, &pktfile);
- _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
+ _rtw_pktfile_read(&pktfile, NULL, ETH_HLEN);
frg_inx = 0;
frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */
@@ -1054,9 +1045,8 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
mpdu_len -= llc_sz;
}
- if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc))
mpdu_len -= pattrib->icv_len;
- }
if (bmcst) {
/* don't do fragment to broadcat/multicast packets */
@@ -1560,11 +1550,10 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
int res = _SUCCESS;
- if (pattrib->psta) {
+ if (pattrib->psta)
psta = pattrib->psta;
- } else {
+ else
psta = rtw_get_stainfo(pstapriv, pattrib->ra);
- }
if (psta == NULL) {
res = _FAIL;
@@ -1658,16 +1647,6 @@ u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
return addr;
}
-static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib)
-{
- u8 qsel;
-
- qsel = pattrib->priority;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("### do_queue_select priority=%d , qsel = %d\n", pattrib->priority, qsel));
-
- pattrib->qsel = qsel;
-}
-
/*
* The main transmit(tx) entry
*
@@ -1700,7 +1679,7 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
rtw_led_control(padapter, LED_CTL_TX);
- do_queue_select(padapter, &pxmitframe->attrib);
+ pxmitframe->attrib.qsel = pxmitframe->attrib.priority;
#ifdef CONFIG_88EU_AP_MODE
spin_lock_bh(&pxmitpriv->lock);
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index cce1ea259b76..134fa6c595a8 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -498,7 +498,7 @@ static u32 array_phy_reg_pg_8188e[] = {
static void store_pwrindex_offset(struct adapter *adapter,
u32 regaddr, u32 bitmask, u32 data)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapter);
+ struct hal_data_8188e *hal_data = adapter->HalData;
u32 * const power_level_offset =
hal_data->MCSTxPowerLevelOriginalOffset[hal_data->pwrGroupCnt];
@@ -518,8 +518,7 @@ static void store_pwrindex_offset(struct adapter *adapter,
power_level_offset[4] = data;
if (regaddr == rTxAGC_A_Mcs15_Mcs12) {
power_level_offset[5] = data;
- if (hal_data->rf_type == RF_1T1R)
- hal_data->pwrGroupCnt++;
+ hal_data->pwrGroupCnt++;
}
if (regaddr == rTxAGC_B_Rate18_06)
power_level_offset[8] = data;
@@ -537,8 +536,6 @@ static void store_pwrindex_offset(struct adapter *adapter,
power_level_offset[12] = data;
if (regaddr == rTxAGC_B_Mcs15_Mcs12) {
power_level_offset[13] = data;
- if (hal_data->rf_type != RF_1T1R)
- hal_data->pwrGroupCnt++;
}
}
@@ -588,11 +585,10 @@ static bool config_bb_with_pgheader(struct adapter *adapt)
static void rtl88e_phy_init_bb_rf_register_definition(struct adapter *adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapter);
struct bb_reg_def *reg[4];
- reg[RF_PATH_A] = &hal_data->PHYRegDef[RF_PATH_A];
- reg[RF_PATH_B] = &hal_data->PHYRegDef[RF_PATH_B];
+ reg[RF_PATH_A] = &adapter->HalData->PHYRegDef[RF_PATH_A];
+ reg[RF_PATH_B] = &adapter->HalData->PHYRegDef[RF_PATH_B];
reg[RF_PATH_A]->rfintfs = rFPGA0_XAB_RFInterfaceSW;
reg[RF_PATH_B]->rfintfs = rFPGA0_XAB_RFInterfaceSW;
@@ -652,13 +648,12 @@ static void rtl88e_phy_init_bb_rf_register_definition(struct adapter *adapter)
static bool config_parafile(struct adapter *adapt)
{
struct eeprom_priv *eeprom = GET_EEPROM_EFUSE_PRIV(adapt);
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
set_baseband_phy_config(adapt);
/* If EEPROM or EFUSE autoload OK, We must config by PHY_REG_PG.txt */
if (!eeprom->bautoload_fail_flag) {
- hal_data->pwrGroupCnt = 0;
+ adapt->HalData->pwrGroupCnt = 0;
config_bb_with_pgheader(adapt);
}
set_baseband_agc_config(adapt);
@@ -668,7 +663,6 @@ static bool config_parafile(struct adapter *adapt)
bool rtl88eu_phy_bb_config(struct adapter *adapt)
{
int rtstatus = true;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
u32 regval;
u8 crystal_cap;
@@ -688,7 +682,7 @@ bool rtl88eu_phy_bb_config(struct adapter *adapt)
rtstatus = config_parafile(adapt);
/* write 0x24[16:11] = 0x24[22:17] = crystal_cap */
- crystal_cap = hal_data->CrystalCap & 0x3F;
+ crystal_cap = adapt->HalData->CrystalCap & 0x3F;
phy_set_bb_reg(adapt, REG_AFE_XTAL_CTRL, 0x7ff800,
(crystal_cap | (crystal_cap << 6)));
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 085f0fbd0c43..a11c7b4254f6 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -17,62 +17,6 @@
#include <osdep_service.h>
#include <drv_types.h>
#include <hal_intf.h>
-#include <usb_hal.h>
-
-void rtw_hal_chip_configure(struct adapter *adapt)
-{
- if (adapt->HalFunc.intf_chip_configure)
- adapt->HalFunc.intf_chip_configure(adapt);
-}
-
-void rtw_hal_read_chip_info(struct adapter *adapt)
-{
- if (adapt->HalFunc.read_adapter_info)
- adapt->HalFunc.read_adapter_info(adapt);
-}
-
-void rtw_hal_read_chip_version(struct adapter *adapt)
-{
- if (adapt->HalFunc.read_chip_version)
- adapt->HalFunc.read_chip_version(adapt);
-}
-
-void rtw_hal_def_value_init(struct adapter *adapt)
-{
- if (adapt->HalFunc.init_default_value)
- adapt->HalFunc.init_default_value(adapt);
-}
-
-void rtw_hal_free_data(struct adapter *adapt)
-{
- if (adapt->HalFunc.free_hal_data)
- adapt->HalFunc.free_hal_data(adapt);
-}
-
-void rtw_hal_dm_init(struct adapter *adapt)
-{
- if (adapt->HalFunc.dm_init)
- adapt->HalFunc.dm_init(adapt);
-}
-
-void rtw_hal_sw_led_init(struct adapter *adapt)
-{
- if (adapt->HalFunc.InitSwLeds)
- adapt->HalFunc.InitSwLeds(adapt);
-}
-
-void rtw_hal_sw_led_deinit(struct adapter *adapt)
-{
- if (adapt->HalFunc.DeInitSwLeds)
- adapt->HalFunc.DeInitSwLeds(adapt);
-}
-
-u32 rtw_hal_power_on(struct adapter *adapt)
-{
- if (adapt->HalFunc.hal_power_on)
- return adapt->HalFunc.hal_power_on(adapt);
- return _FAIL;
-}
uint rtw_hal_init(struct adapter *adapt)
{
@@ -80,15 +24,13 @@ uint rtw_hal_init(struct adapter *adapt)
adapt->hw_init_completed = false;
- status = adapt->HalFunc.hal_init(adapt);
+ status = rtl8188eu_hal_init(adapt);
if (status == _SUCCESS) {
adapt->hw_init_completed = true;
if (adapt->registrypriv.notch_filter == 1)
rtw_hal_notch_filter(adapt, 1);
-
- rtw_hal_reset_security_engine(adapt);
} else {
adapt->hw_init_completed = false;
DBG_88E("rtw_hal_init: hal__init fail\n");
@@ -104,7 +46,7 @@ uint rtw_hal_deinit(struct adapter *adapt)
{
uint status = _SUCCESS;
- status = adapt->HalFunc.hal_deinit(adapt);
+ status = rtl8188eu_hal_deinit(adapt);
if (status == _SUCCESS)
adapt->hw_init_completed = false;
@@ -114,92 +56,6 @@ uint rtw_hal_deinit(struct adapter *adapt)
return status;
}
-void rtw_hal_set_hwreg(struct adapter *adapt, u8 variable, u8 *val)
-{
- if (adapt->HalFunc.SetHwRegHandler)
- adapt->HalFunc.SetHwRegHandler(adapt, variable, val);
-}
-
-void rtw_hal_get_hwreg(struct adapter *adapt, u8 variable, u8 *val)
-{
- if (adapt->HalFunc.GetHwRegHandler)
- adapt->HalFunc.GetHwRegHandler(adapt, variable, val);
-}
-
-u8 rtw_hal_get_def_var(struct adapter *adapt,
- enum hal_def_variable var, void *val)
-{
- if (adapt->HalFunc.GetHalDefVarHandler)
- return adapt->HalFunc.GetHalDefVarHandler(adapt, var, val);
- return _FAIL;
-}
-
-void rtw_hal_set_odm_var(struct adapter *adapt,
- enum hal_odm_variable var, void *val1,
- bool set)
-{
- if (adapt->HalFunc.SetHalODMVarHandler)
- adapt->HalFunc.SetHalODMVarHandler(adapt, var,
- val1, set);
-}
-
-u32 rtw_hal_inirp_init(struct adapter *adapt)
-{
- u32 rst = _FAIL;
-
- if (adapt->HalFunc.inirp_init)
- rst = adapt->HalFunc.inirp_init(adapt);
- else
- DBG_88E(" %s HalFunc.inirp_init is NULL!!!\n", __func__);
- return rst;
-}
-
-u32 rtw_hal_inirp_deinit(struct adapter *adapt)
-{
- if (adapt->HalFunc.inirp_deinit)
- return adapt->HalFunc.inirp_deinit(adapt);
-
- return _FAIL;
-}
-
-s32 rtw_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
-{
- if (adapt->HalFunc.hal_xmit)
- return adapt->HalFunc.hal_xmit(adapt, pxmitframe);
-
- return false;
-}
-
-s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
-{
- s32 ret = _FAIL;
-
- if (adapt->HalFunc.mgnt_xmit)
- ret = adapt->HalFunc.mgnt_xmit(adapt, pmgntframe);
- return ret;
-}
-
-s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
-{
- if (adapt->HalFunc.init_xmit_priv)
- return adapt->HalFunc.init_xmit_priv(adapt);
- return _FAIL;
-}
-
-s32 rtw_hal_init_recv_priv(struct adapter *adapt)
-{
- if (adapt->HalFunc.init_recv_priv)
- return adapt->HalFunc.init_recv_priv(adapt);
-
- return _FAIL;
-}
-
-void rtw_hal_free_recv_priv(struct adapter *adapt)
-{
- if (adapt->HalFunc.free_recv_priv)
- adapt->HalFunc.free_recv_priv(adapt);
-}
-
void rtw_hal_update_ra_mask(struct adapter *adapt, u32 mac_id, u8 rssi_level)
{
struct mlme_priv *pmlmepriv = &(adapt->mlmepriv);
@@ -215,86 +71,6 @@ void rtw_hal_update_ra_mask(struct adapter *adapt, u32 mac_id, u8 rssi_level)
add_RATid(adapt, psta, 0);/* todo: based on rssi_level*/
#endif
} else {
- if (adapt->HalFunc.UpdateRAMaskHandler)
- adapt->HalFunc.UpdateRAMaskHandler(adapt, mac_id,
- rssi_level);
+ UpdateHalRAMask8188EUsb(adapt, mac_id, rssi_level);
}
}
-
-void rtw_hal_add_ra_tid(struct adapter *adapt, u32 bitmap, u8 arg,
- u8 rssi_level)
-{
- if (adapt->HalFunc.Add_RateATid)
- adapt->HalFunc.Add_RateATid(adapt, bitmap, arg,
- rssi_level);
-}
-
-u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
- u32 regaddr, u32 bitmask)
-{
- u32 data = 0;
-
- if (adapt->HalFunc.read_rfreg)
- data = adapt->HalFunc.read_rfreg(adapt, rfpath, regaddr,
- bitmask);
- return data;
-}
-
-void rtw_hal_set_bwmode(struct adapter *adapt,
- enum ht_channel_width bandwidth, u8 offset)
-{
- if (adapt->HalFunc.set_bwmode_handler)
- adapt->HalFunc.set_bwmode_handler(adapt, bandwidth,
- offset);
-}
-
-void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
-{
- if (adapt->HalFunc.set_channel_handler)
- adapt->HalFunc.set_channel_handler(adapt, channel);
-}
-
-void rtw_hal_dm_watchdog(struct adapter *adapt)
-{
- if (adapt->HalFunc.hal_dm_watchdog)
- adapt->HalFunc.hal_dm_watchdog(adapt);
-}
-
-void rtw_hal_bcn_related_reg_setting(struct adapter *adapt)
-{
- if (adapt->HalFunc.SetBeaconRelatedRegistersHandler)
- adapt->HalFunc.SetBeaconRelatedRegistersHandler(adapt);
-}
-
-u8 rtw_hal_antdiv_before_linked(struct adapter *adapt)
-{
- if (adapt->HalFunc.AntDivBeforeLinkHandler)
- return adapt->HalFunc.AntDivBeforeLinkHandler(adapt);
- return false;
-}
-
-void rtw_hal_antdiv_rssi_compared(struct adapter *adapt,
- struct wlan_bssid_ex *dst,
- struct wlan_bssid_ex *src)
-{
- if (adapt->HalFunc.AntDivCompareHandler)
- adapt->HalFunc.AntDivCompareHandler(adapt, dst, src);
-}
-
-void rtw_hal_sreset_init(struct adapter *adapt)
-{
- if (adapt->HalFunc.sreset_init_value)
- adapt->HalFunc.sreset_init_value(adapt);
-}
-
-void rtw_hal_notch_filter(struct adapter *adapter, bool enable)
-{
- if (adapter->HalFunc.hal_notch_filter)
- adapter->HalFunc.hal_notch_filter(adapter, enable);
-}
-
-void rtw_hal_reset_security_engine(struct adapter *adapter)
-{
- if (adapter->HalFunc.hal_reset_security_engine)
- adapter->HalFunc.hal_reset_security_engine(adapter);
-}
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 57a127501694..d983a8029f4c 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -226,150 +226,6 @@ void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
odm_EdcaTurboCheck(pDM_Odm);
}
-/* Init /.. Fixed HW value. Only init time. */
-void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u32 Value)
-{
- /* This section is used for init value */
- switch (CmnInfo) {
- /* Fixed ODM value. */
- case ODM_CMNINFO_ABILITY:
- pDM_Odm->SupportAbility = (u32)Value;
- break;
- case ODM_CMNINFO_PLATFORM:
- pDM_Odm->SupportPlatform = (u8)Value;
- break;
- case ODM_CMNINFO_INTERFACE:
- pDM_Odm->SupportInterface = (u8)Value;
- break;
- case ODM_CMNINFO_MP_TEST_CHIP:
- pDM_Odm->bIsMPChip = (u8)Value;
- break;
- case ODM_CMNINFO_IC_TYPE:
- pDM_Odm->SupportICType = Value;
- break;
- case ODM_CMNINFO_CUT_VER:
- pDM_Odm->CutVersion = (u8)Value;
- break;
- case ODM_CMNINFO_RF_TYPE:
- pDM_Odm->RFType = (u8)Value;
- break;
- case ODM_CMNINFO_RF_ANTENNA_TYPE:
- pDM_Odm->AntDivType = (u8)Value;
- break;
- case ODM_CMNINFO_BOARD_TYPE:
- pDM_Odm->BoardType = (u8)Value;
- break;
- case ODM_CMNINFO_EXT_LNA:
- pDM_Odm->ExtLNA = (u8)Value;
- break;
- case ODM_CMNINFO_EXT_PA:
- pDM_Odm->ExtPA = (u8)Value;
- break;
- case ODM_CMNINFO_EXT_TRSW:
- pDM_Odm->ExtTRSW = (u8)Value;
- break;
- case ODM_CMNINFO_PATCH_ID:
- pDM_Odm->PatchID = (u8)Value;
- break;
- case ODM_CMNINFO_BINHCT_TEST:
- pDM_Odm->bInHctTest = (bool)Value;
- break;
- case ODM_CMNINFO_BWIFI_TEST:
- pDM_Odm->bWIFITest = (bool)Value;
- break;
- case ODM_CMNINFO_SMART_CONCURRENT:
- pDM_Odm->bDualMacSmartConcurrent = (bool)Value;
- break;
- /* To remove the compiler warning, must add an empty default statement to handle the other values. */
- default:
- /* do nothing */
- break;
- }
-
- /* Tx power tracking BB swing table. */
- /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
- pDM_Odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
- pDM_Odm->BbSwingIdxOfdmCurrent = 12;
- pDM_Odm->BbSwingFlagOfdm = false;
-}
-
-void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, void *pValue)
-{
- /* */
- /* Hook call by reference pointer. */
- /* */
- switch (CmnInfo) {
- /* Dynamic call by reference pointer. */
- case ODM_CMNINFO_MAC_PHY_MODE:
- pDM_Odm->pMacPhyMode = (u8 *)pValue;
- break;
- case ODM_CMNINFO_TX_UNI:
- pDM_Odm->pNumTxBytesUnicast = (u64 *)pValue;
- break;
- case ODM_CMNINFO_RX_UNI:
- pDM_Odm->pNumRxBytesUnicast = (u64 *)pValue;
- break;
- case ODM_CMNINFO_WM_MODE:
- pDM_Odm->pWirelessMode = (u8 *)pValue;
- break;
- case ODM_CMNINFO_BAND:
- pDM_Odm->pBandType = (u8 *)pValue;
- break;
- case ODM_CMNINFO_SEC_CHNL_OFFSET:
- pDM_Odm->pSecChOffset = (u8 *)pValue;
- break;
- case ODM_CMNINFO_SEC_MODE:
- pDM_Odm->pSecurity = (u8 *)pValue;
- break;
- case ODM_CMNINFO_BW:
- pDM_Odm->pBandWidth = (u8 *)pValue;
- break;
- case ODM_CMNINFO_CHNL:
- pDM_Odm->pChannel = (u8 *)pValue;
- break;
- case ODM_CMNINFO_DMSP_GET_VALUE:
- pDM_Odm->pbGetValueFromOtherMac = (bool *)pValue;
- break;
- case ODM_CMNINFO_BUDDY_ADAPTOR:
- pDM_Odm->pBuddyAdapter = (struct adapter **)pValue;
- break;
- case ODM_CMNINFO_DMSP_IS_MASTER:
- pDM_Odm->pbMasterOfDMSP = (bool *)pValue;
- break;
- case ODM_CMNINFO_SCAN:
- pDM_Odm->pbScanInProcess = (bool *)pValue;
- break;
- case ODM_CMNINFO_POWER_SAVING:
- pDM_Odm->pbPowerSaving = (bool *)pValue;
- break;
- case ODM_CMNINFO_ONE_PATH_CCA:
- pDM_Odm->pOnePathCCA = (u8 *)pValue;
- break;
- case ODM_CMNINFO_DRV_STOP:
- pDM_Odm->pbDriverStopped = (bool *)pValue;
- break;
- case ODM_CMNINFO_PNP_IN:
- pDM_Odm->pbDriverIsGoingToPnpSetPowerSleep = (bool *)pValue;
- break;
- case ODM_CMNINFO_INIT_ON:
- pDM_Odm->pinit_adpt_in_progress = (bool *)pValue;
- break;
- case ODM_CMNINFO_ANT_TEST:
- pDM_Odm->pAntennaTest = (u8 *)pValue;
- break;
- case ODM_CMNINFO_NET_CLOSED:
- pDM_Odm->pbNet_closed = (bool *)pValue;
- break;
- case ODM_CMNINFO_MP_MODE:
- pDM_Odm->mp_mode = (u8 *)pValue;
- break;
- /* To remove the compiler warning, must add an empty default statement to handle the other values. */
- default:
- /* do nothing */
- break;
- }
-}
-
void ODM_CmnInfoPtrArrayHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u16 Index, void *pValue)
{
/* Hook call by reference pointer. */
@@ -385,46 +241,6 @@ void ODM_CmnInfoPtrArrayHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info
}
}
-/* Update Band/CHannel/.. The values are dynamic but non-per-packet. */
-void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value)
-{
- /* */
- /* This init variable may be changed in run time. */
- /* */
- switch (CmnInfo) {
- case ODM_CMNINFO_ABILITY:
- pDM_Odm->SupportAbility = (u32)Value;
- break;
- case ODM_CMNINFO_RF_TYPE:
- pDM_Odm->RFType = (u8)Value;
- break;
- case ODM_CMNINFO_WIFI_DIRECT:
- pDM_Odm->bWIFI_Direct = (bool)Value;
- break;
- case ODM_CMNINFO_WIFI_DISPLAY:
- pDM_Odm->bWIFI_Display = (bool)Value;
- break;
- case ODM_CMNINFO_LINK:
- pDM_Odm->bLinked = (bool)Value;
- break;
- case ODM_CMNINFO_RSSI_MIN:
- pDM_Odm->RSSI_Min = (u8)Value;
- break;
- case ODM_CMNINFO_DBG_COMP:
- pDM_Odm->DebugComponents = Value;
- break;
- case ODM_CMNINFO_DBG_LEVEL:
- pDM_Odm->DebugLevel = (u32)Value;
- break;
- case ODM_CMNINFO_RA_THRESHOLD_HIGH:
- pDM_Odm->RateAdaptive.HighRSSIThresh = (u8)Value;
- break;
- case ODM_CMNINFO_RA_THRESHOLD_LOW:
- pDM_Odm->RateAdaptive.LowRSSIThresh = (u8)Value;
- break;
- }
-}
-
void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
{
struct adapter *adapter = pDM_Odm->Adapter;
@@ -469,7 +285,6 @@ void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm)
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportInterface=%d\n", pDM_Odm->SupportInterface));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportICType=0x%x\n", pDM_Odm->SupportICType));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("CutVersion=%d\n", pDM_Odm->CutVersion));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RFType=%d\n", pDM_Odm->RFType));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("BoardType=%d\n", pDM_Odm->BoardType));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtLNA=%d\n", pDM_Odm->ExtLNA));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtPA=%d\n", pDM_Odm->ExtPA));
@@ -947,37 +762,21 @@ u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u
break;
case (ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
case (ODM_WM_A|ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
- if (pDM_Odm->RFType == ODM_1T2R || pDM_Odm->RFType == ODM_1T1R) {
- if (rssi_level == DM_RATR_STA_HIGH) {
- rate_bitmap = 0x000f0000;
- } else if (rssi_level == DM_RATR_STA_MIDDLE) {
- rate_bitmap = 0x000ff000;
- } else {
- if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
- rate_bitmap = 0x000ff015;
- else
- rate_bitmap = 0x000ff005;
- }
+ if (rssi_level == DM_RATR_STA_HIGH) {
+ rate_bitmap = 0x000f0000;
+ } else if (rssi_level == DM_RATR_STA_MIDDLE) {
+ rate_bitmap = 0x000ff000;
} else {
- if (rssi_level == DM_RATR_STA_HIGH) {
- rate_bitmap = 0x0f8f0000;
- } else if (rssi_level == DM_RATR_STA_MIDDLE) {
- rate_bitmap = 0x0f8ff000;
- } else {
- if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
- rate_bitmap = 0x0f8ff015;
- else
- rate_bitmap = 0x0f8ff005;
- }
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
+ rate_bitmap = 0x000ff015;
+ else
+ rate_bitmap = 0x000ff005;
}
break;
default:
/* case WIRELESS_11_24N: */
/* case WIRELESS_11_5N: */
- if (pDM_Odm->RFType == RF_1T2R)
- rate_bitmap = 0x000fffff;
- else
- rate_bitmap = 0x0fffffff;
+ rate_bitmap = 0x0fffffff;
break;
}
@@ -1096,8 +895,7 @@ bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate
void odm_DynamicTxPowerInit(struct odm_dm_struct *pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
pdmpriv->bDynamicTxPowerEnable = false;
pdmpriv->LastDTPLvl = TxHighPwrLevel_Normal;
pdmpriv->DynamicTxHighPowerLvl = TxHighPwrLevel_Normal;
@@ -1122,8 +920,7 @@ void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
static void FindMinimumRSSI(struct adapter *pAdapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct dm_priv *pdmpriv = &pAdapter->HalData->dmpriv;
/* 1 1.Unconditionally set RSSI */
pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
@@ -1132,8 +929,7 @@ static void FindMinimumRSSI(struct adapter *pAdapter)
void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
int i;
int tmpEntryMaxPWDB = 0, tmpEntryMinPWDB = 0xff;
u8 sta_cnt = 0;
@@ -1162,7 +958,7 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
for (i = 0; i < sta_cnt; i++) {
if (PWDB_rssi[i] != 0) {
- ODM_RA_SetRSSI_8188E(&pHalData->odmpriv,
+ ODM_RA_SetRSSI_8188E(&Adapter->HalData->odmpriv,
PWDB_rssi[i] & 0xFF,
(PWDB_rssi[i] >> 16) & 0xFF);
}
@@ -1179,8 +975,7 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
pdmpriv->EntryMinUndecoratedSmoothedPWDB = 0;
FindMinimumRSSI(Adapter);
- ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_RSSI_MIN,
- pdmpriv->MinUndecoratedPWDBForDM);
+ Adapter->HalData->odmpriv.RSSI_Min = pdmpriv->MinUndecoratedPWDBForDM;
}
/* 3============================================================ */
@@ -1290,7 +1085,6 @@ void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm)
u64 cur_tx_bytes = 0;
u64 cur_rx_bytes = 0;
u8 bbtchange = false;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
struct xmit_priv *pxmitpriv = &(Adapter->xmitpriv);
struct recv_priv *precvpriv = &(Adapter->recvpriv);
struct registry_priv *pregpriv = &Adapter->registrypriv;
@@ -1344,7 +1138,8 @@ void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm)
/* Turn Off EDCA turbo here. */
/* Restore original EDCA according to the declaration of AP. */
if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
- usb_write32(Adapter, REG_EDCA_BE_PARAM, pHalData->AcParam_BE);
+ usb_write32(Adapter, REG_EDCA_BE_PARAM,
+ Adapter->HalData->AcParam_BE);
pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
}
}
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index a83bbea9be93..5192ef70bcfc 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -65,8 +65,7 @@ static u32 rf_serial_read(struct adapter *adapt,
enum rf_radio_path rfpath, u32 offset)
{
u32 ret = 0;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
- struct bb_reg_def *phyreg = &hal_data->PHYRegDef[rfpath];
+ struct bb_reg_def *phyreg = &adapt->HalData->PHYRegDef[rfpath];
u32 tmplong, tmplong2;
u8 rfpi_enable = 0;
@@ -110,15 +109,14 @@ static void rf_serial_write(struct adapter *adapt,
u32 data)
{
u32 data_and_addr = 0;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
- struct bb_reg_def *phyreg = &hal_data->PHYRegDef[rfpath];
+ struct bb_reg_def *phyreg = &adapt->HalData->PHYRegDef[rfpath];
offset &= 0xff;
data_and_addr = ((offset<<20) | (data&0x000fffff)) & 0x0fffffff;
phy_set_bb_reg(adapt, phyreg->rf3wireOffset, bMaskDWord, data_and_addr);
}
-u32 phy_query_rf_reg(struct adapter *adapt, enum rf_radio_path rf_path,
+u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rf_path,
u32 reg_addr, u32 bit_mask)
{
u32 original_value, readback_value, bit_shift;
@@ -147,14 +145,11 @@ void phy_set_rf_reg(struct adapter *adapt, enum rf_radio_path rf_path,
static void get_tx_power_index(struct adapter *adapt, u8 channel, u8 *cck_pwr,
u8 *ofdm_pwr, u8 *bw20_pwr, u8 *bw40_pwr)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *hal_data = adapt->HalData;
u8 index = (channel - 1);
u8 TxCount = 0, path_nums;
- if ((RF_1T2R == hal_data->rf_type) || (RF_1T1R == hal_data->rf_type))
- path_nums = 1;
- else
- path_nums = 2;
+ path_nums = 1;
for (TxCount = 0; TxCount < path_nums; TxCount++) {
if (TxCount == RF_PATH_A) {
@@ -183,7 +178,7 @@ static void phy_power_index_check(struct adapter *adapt, u8 channel,
u8 *cck_pwr, u8 *ofdm_pwr, u8 *bw20_pwr,
u8 *bw40_pwr)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *hal_data = adapt->HalData;
hal_data->CurrentCckTxPwrIdx = cck_pwr[0];
hal_data->CurrentOfdm24GTxPwrIdx = ofdm_pwr[0];
@@ -211,7 +206,7 @@ void phy_set_tx_power_level(struct adapter *adapt, u8 channel)
static void phy_set_bw_mode_callback(struct adapter *adapt)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *hal_data = adapt->HalData;
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
@@ -274,10 +269,10 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
rtl88eu_phy_rf6052_set_bandwidth(adapt, hal_data->CurrentChannelBW);
}
-void phy_set_bw_mode(struct adapter *adapt, enum ht_channel_width bandwidth,
+void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth,
unsigned char offset)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *hal_data = adapt->HalData;
enum ht_channel_width tmp_bw = hal_data->CurrentChannelBW;
hal_data->CurrentChannelBW = bandwidth;
@@ -293,10 +288,7 @@ static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel)
{
u8 rf_path;
u32 param1, param2;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
-
- if (adapt->bNotifyChannelChange)
- DBG_88E("[%s] ch = %d\n", __func__, channel);
+ struct hal_data_8188e *hal_data = adapt->HalData;
phy_set_tx_power_level(adapt, channel);
@@ -310,9 +302,9 @@ static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel)
}
}
-void phy_sw_chnl(struct adapter *adapt, u8 channel)
+void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *hal_data = adapt->HalData;
u8 tmpchannel = hal_data->CurrentChannel;
if (hal_data->rf_chip == RF_PSEUDO_11N)
@@ -407,7 +399,7 @@ static void dm_txpwr_track_setpwr(struct odm_dm_struct *dm_odm)
void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *hal_data = adapt->HalData;
u8 thermal_val = 0, delta, delta_lck, delta_iqk, offset;
u8 thermal_avg_count = 0;
u32 thermal_avg = 0;
@@ -439,7 +431,7 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317;
- thermal_val = (u8)phy_query_rf_reg(adapt, RF_PATH_A,
+ thermal_val = (u8)rtw_hal_read_rfreg(adapt, RF_PATH_A,
RF_T_METER_88E, 0xfc00);
if (is2t)
@@ -632,8 +624,7 @@ static u8 phy_path_a_rx_iqk(struct adapter *adapt, bool configPathB)
{
u32 reg_eac, reg_e94, reg_e9c, reg_ea4, u4tmp;
u8 result = 0x00;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
+ struct odm_dm_struct *dm_odm = &adapt->HalData->odmpriv;
/* 1 Get TXIMR setting */
/* modify RXIQK mode table */
@@ -737,8 +728,7 @@ static u8 phy_path_b_iqk(struct adapter *adapt)
{
u32 regeac, regeb4, regebc, regec4, regecc;
u8 result = 0x00;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
+ struct odm_dm_struct *dm_odm = &adapt->HalData->odmpriv;
/* One shot, path B LOK & IQK */
phy_set_bb_reg(adapt, rIQK_AGC_Cont, bMaskDWord, 0x00000002);
@@ -954,17 +944,11 @@ static bool simularity_compare(struct adapter *adapt, s32 resulta[][8],
u8 c1, u8 c2)
{
u32 i, j, diff, sim_bitmap = 0, bound;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */
bool result = true;
s32 tmp1 = 0, tmp2 = 0;
- if ((dm_odm->RFType == ODM_2T2R) || (dm_odm->RFType == ODM_2T3R) ||
- (dm_odm->RFType == ODM_2T4R))
- bound = 8;
- else
- bound = 4;
+ bound = 4;
for (i = 0; i < bound; i++) {
if ((i == 1) || (i == 3) || (i == 5) || (i == 7)) {
@@ -1033,8 +1017,7 @@ static bool simularity_compare(struct adapter *adapt, s32 resulta[][8],
static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
u8 t, bool is2t)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
+ struct odm_dm_struct *dm_odm = &adapt->HalData->odmpriv;
u32 i;
u8 path_a_ok, path_b_ok;
u32 adda_reg[IQK_ADDA_REG_NUM] = {
@@ -1232,12 +1215,12 @@ static void phy_lc_calibrate(struct adapter *adapt, bool is2t)
if ((tmpreg&0x70) != 0) {
/* 1. Read original RF mode */
/* Path-A */
- rf_a_mode = phy_query_rf_reg(adapt, RF_PATH_A, RF_AC,
+ rf_a_mode = rtw_hal_read_rfreg(adapt, RF_PATH_A, RF_AC,
bMask12Bits);
/* Path-B */
if (is2t)
- rf_b_mode = phy_query_rf_reg(adapt, RF_PATH_B, RF_AC,
+ rf_b_mode = rtw_hal_read_rfreg(adapt, RF_PATH_B, RF_AC,
bMask12Bits);
/* 2. Set RF mode = standby mode */
@@ -1252,7 +1235,7 @@ static void phy_lc_calibrate(struct adapter *adapt, bool is2t)
}
/* 3. Read RF reg18 */
- lc_cal = phy_query_rf_reg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits);
+ lc_cal = rtw_hal_read_rfreg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits);
/* 4. Set LC calibration begin bit15 */
phy_set_rf_reg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits,
@@ -1279,8 +1262,7 @@ static void phy_lc_calibrate(struct adapter *adapt, bool is2t)
void rtl88eu_phy_iq_calibrate(struct adapter *adapt, bool recovery)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
+ struct odm_dm_struct *dm_odm = &adapt->HalData->odmpriv;
s32 result[4][8];
u8 i, final, chn_index;
bool pathaok, pathbok;
@@ -1295,7 +1277,7 @@ void rtl88eu_phy_iq_calibrate(struct adapter *adapt, bool recovery)
rOFDM0_RxIQExtAnta};
bool is2t;
- is2t = (dm_odm->RFType == ODM_2T2R) ? true : false;
+ is2t = false;
if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
return;
@@ -1391,7 +1373,7 @@ void rtl88eu_phy_iq_calibrate(struct adapter *adapt, bool recovery)
(reg_ec4 == 0));
}
- chn_index = get_right_chnl_for_iqk(hal_data->CurrentChannel);
+ chn_index = get_right_chnl_for_iqk(adapt->HalData->CurrentChannel);
if (final < 4) {
for (i = 0; i < IQK_Matrix_REG_NUM; i++)
@@ -1407,8 +1389,7 @@ void rtl88eu_phy_lc_calibrate(struct adapter *adapt)
{
bool singletone = false, carrier_sup = false;
u32 timeout = 2000, timecount = 0;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
+ struct odm_dm_struct *dm_odm = &adapt->HalData->odmpriv;
if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
return;
@@ -1422,12 +1403,7 @@ void rtl88eu_phy_lc_calibrate(struct adapter *adapt)
dm_odm->RFCalibrateInfo.bLCKInProgress = true;
- if (dm_odm->RFType == ODM_2T2R) {
- phy_lc_calibrate(adapt, true);
- } else {
- /* For 88C 1T1R */
- phy_lc_calibrate(adapt, false);
- }
+ phy_lc_calibrate(adapt, false);
dm_odm->RFCalibrateInfo.bLCKInProgress = false;
}
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 1596274eefc5..2f3edf0f850a 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -22,7 +22,7 @@
void rtl88eu_phy_rf6052_set_bandwidth(struct adapter *adapt,
enum ht_channel_width bandwidth)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *hal_data = adapt->HalData;
switch (bandwidth) {
case HT_CHANNEL_WIDTH_20:
@@ -44,7 +44,7 @@ void rtl88eu_phy_rf6052_set_bandwidth(struct adapter *adapt,
void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *hal_data = adapt->HalData;
struct dm_priv *pdmpriv = &hal_data->dmpriv;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
u32 tx_agc[2] = {0, 0}, tmpval = 0, pwrtrac_value;
@@ -129,7 +129,6 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm,
u8 *pwr_level_bw20, u8 *pwr_level_bw40,
u8 channel, u32 *ofdmbase, u32 *mcs_base)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
u32 powerbase0, powerbase1;
u8 i, powerlevel[2];
@@ -140,9 +139,9 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm,
(powerbase0<<8) | powerbase0;
*(ofdmbase+i) = powerbase0;
}
- for (i = 0; i < hal_data->NumTotalRFPath; i++) {
+ for (i = 0; i < adapt->HalData->NumTotalRFPath; i++) {
/* Check HT20 to HT40 diff */
- if (hal_data->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
powerlevel[i] = pwr_level_bw20[i];
else
powerlevel[i] = pwr_level_bw40[i];
@@ -156,7 +155,7 @@ static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
u8 index, u32 *powerbase0, u32 *powerbase1,
u32 *out_val)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *hal_data = adapt->HalData;
struct dm_priv *pdmpriv = &hal_data->dmpriv;
u8 i, chnlGroup = 0, pwr_diff_limit[4], customer_pwr_limit;
s8 pwr_diff = 0;
@@ -286,7 +285,6 @@ void rtl88eu_phy_rf6052_set_ofdm_txpower(struct adapter *adapt,
u8 *pwr_level_bw20,
u8 *pwr_level_bw40, u8 channel)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
u32 write_val[2], powerbase0[2], powerbase1[2], pwrtrac_value;
u8 direction;
u8 index = 0;
@@ -294,8 +292,8 @@ void rtl88eu_phy_rf6052_set_ofdm_txpower(struct adapter *adapt,
getpowerbase88e(adapt, pwr_level_ofdm, pwr_level_bw20, pwr_level_bw40,
channel, &powerbase0[0], &powerbase1[0]);
- rtl88eu_dm_txpower_track_adjust(&hal_data->odmpriv, 0, &direction,
- &pwrtrac_value);
+ rtl88eu_dm_txpower_track_adjust(&adapt->HalData->odmpriv, 0,
+ &direction, &pwrtrac_value);
for (index = 0; index < 6; index++) {
get_rx_power_val_by_reg(adapt, channel, index,
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index 453f9e729067..dde64417e66a 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -19,7 +19,7 @@
static bool check_condition(struct adapter *adapt, const u32 condition)
{
- struct odm_dm_struct *odm = &GET_HAL_DATA(adapt)->odmpriv;
+ struct odm_dm_struct *odm = &adapt->HalData->odmpriv;
u32 _board = odm->BoardType;
u32 _platform = odm->SupportPlatform;
u32 _interface = odm->SupportInterface;
@@ -228,7 +228,7 @@ static bool rtl88e_phy_config_rf_with_headerfile(struct adapter *adapt)
static bool rf6052_conf_para(struct adapter *adapt)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *hal_data = adapt->HalData;
u32 u4val = 0;
u8 rfpath;
bool rtstatus = true;
@@ -299,12 +299,9 @@ static bool rf6052_conf_para(struct adapter *adapt)
static bool rtl88e_phy_rf6052_config(struct adapter *adapt)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *hal_data = adapt->HalData;
- if (hal_data->rf_type == RF_1T1R)
- hal_data->NumTotalRFPath = 1;
- else
- hal_data->NumTotalRFPath = 2;
+ hal_data->NumTotalRFPath = 1;
return rf6052_conf_para(adapt);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 2422c0297a50..d0f59b7836f1 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -60,7 +60,6 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
u8 h2c_box_num;
u32 msgbox_addr;
u32 msgbox_ex_addr;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
u8 cmd_idx, ext_cmd_len;
u32 h2c_cmd = 0;
u32 h2c_cmd_ex = 0;
@@ -81,7 +80,7 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
/* pay attention to if race condition happened in H2C cmd setting. */
do {
- h2c_box_num = haldata->LastHMEBoxNum;
+ h2c_box_num = adapt->HalData->LastHMEBoxNum;
if (!_is_fw_read_cmd_down(adapt, h2c_box_num)) {
DBG_88E(" fw read cmd failed...\n");
@@ -110,7 +109,8 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
}
bcmd_down = true;
- haldata->LastHMEBoxNum = (h2c_box_num+1) % RTL88E_MAX_H2C_BOX_NUMS;
+ adapt->HalData->LastHMEBoxNum =
+ (h2c_box_num+1) % RTL88E_MAX_H2C_BOX_NUMS;
} while ((!bcmd_down) && (retry_cnts--));
@@ -126,9 +126,9 @@ exit:
/* bitmap[28:31]= Rate Adaptive id */
/* arg[0:4] = macid */
/* arg[5] = Short GI */
-void rtl8188e_Add_RateATid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_level)
+void rtw_hal_add_ra_tid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_level)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(pAdapter);
+ struct odm_dm_struct *odmpriv = &pAdapter->HalData->odmpriv;
u8 macid, init_rate, raid, shortGIrate = false;
@@ -138,7 +138,7 @@ void rtl8188e_Add_RateATid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi
bitmap &= 0x0fffffff;
if (rssi_level != DM_RATR_STA_INIT)
- bitmap = ODM_Get_Rate_Bitmap(&haldata->odmpriv, macid, bitmap, rssi_level);
+ bitmap = ODM_Get_Rate_Bitmap(odmpriv, macid, bitmap, rssi_level);
bitmap |= ((raid<<28)&0xf0000000);
@@ -156,7 +156,7 @@ void rtl8188e_Add_RateATid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi
DBG_88E("%s=> mac_id:%d, raid:%d, ra_bitmap=0x%x, shortGIrate=0x%02x\n",
__func__, macid, raid, bitmap, shortGIrate);
- ODM_RA_UpdateRateInfo_8188E(&(haldata->odmpriv), macid, raid, bitmap, shortGIrate);
+ ODM_RA_UpdateRateInfo_8188E(odmpriv, macid, raid, bitmap, shortGIrate);
}
void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode)
@@ -219,7 +219,7 @@ void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u32 rate_len, pktlen;
struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
@@ -227,20 +227,20 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, cur_network->MacAddress, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, bc_addr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3, cur_network->MacAddress);
SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
SetFrameSubType(pframe, WIFI_BEACON);
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
/* timestamp will be inserted by hardware */
pframe += 8;
@@ -304,16 +304,16 @@ _ConstructBeacon:
static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
__le16 *fctrl;
struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
/* Frame control. */
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
SetPwrMgt(fctrl);
SetFrameSubType(pframe, WIFI_PSPOLL);
@@ -322,10 +322,10 @@ static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
SetDuration(pframe, (pmlmeinfo->aid | 0xc000));
/* BSSID. */
- memcpy(pwlanhdr->addr1, pnetwork->MacAddress, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, pnetwork->MacAddress);
/* TA. */
- memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)));
*pLength = 16;
}
@@ -338,7 +338,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
u8 bEosp,
u8 bForcePowerSave)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u32 pktlen;
struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
@@ -347,9 +347,9 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
- fctrl = &pwlanhdr->frame_ctl;
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
if (bForcePowerSave)
SetPwrMgt(fctrl);
@@ -357,40 +357,40 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
switch (cur_network->network.InfrastructureMode) {
case Ndis802_11Infrastructure:
SetToDs(fctrl);
- memcpy(pwlanhdr->addr1, pnetwork->MacAddress, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, StaAddr, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, pnetwork->MacAddress);
+ ether_addr_copy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3, StaAddr);
break;
case Ndis802_11APMode:
SetFrDs(fctrl);
- memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, pnetwork->MacAddress, ETH_ALEN);
- memcpy(pwlanhdr->addr3, myid(&(adapt->eeprompriv)), ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, StaAddr);
+ ether_addr_copy(pwlanhdr->addr2, pnetwork->MacAddress);
+ ether_addr_copy(pwlanhdr->addr3, myid(&(adapt->eeprompriv)));
break;
case Ndis802_11IBSS:
default:
- memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
- memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, StaAddr);
+ ether_addr_copy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3, pnetwork->MacAddress);
break;
}
SetSeqNum(pwlanhdr, 0);
if (bQoS) {
- struct rtw_ieee80211_hdr_3addr_qos *pwlanqoshdr;
+ struct ieee80211_qos_hdr *pwlanqoshdr;
SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
- pwlanqoshdr = (struct rtw_ieee80211_hdr_3addr_qos *)pframe;
- SetPriority(&pwlanqoshdr->qc, AC);
- SetEOSP(&pwlanqoshdr->qc, bEosp);
+ pwlanqoshdr = (struct ieee80211_qos_hdr *)pframe;
+ SetPriority(&pwlanqoshdr->qos_ctrl, AC);
+ SetEOSP(&pwlanqoshdr->qos_ctrl, bEosp);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+ pktlen = sizeof(struct ieee80211_qos_hdr);
} else {
SetFrameSubType(pframe, WIFI_DATA_NULL);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
}
*pLength = pktlen;
@@ -398,7 +398,7 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u8 *StaAddr, bool bHideSSID)
{
- struct rtw_ieee80211_hdr *pwlanhdr;
+ struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u8 *mac, *bssid;
u32 pktlen;
@@ -406,21 +406,21 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
mac = myid(&(adapt->eeprompriv));
bssid = cur_network->MacAddress;
- fctrl = &(pwlanhdr->frame_ctl);
+ fctrl = &pwlanhdr->frame_control;
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
- memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
+ ether_addr_copy(pwlanhdr->addr1, StaAddr);
+ ether_addr_copy(pwlanhdr->addr2, mac);
+ ether_addr_copy(pwlanhdr->addr3, bssid);
SetSeqNum(pwlanhdr, 0);
SetFrameSubType(fctrl, WIFI_PROBERSP);
- pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
pframe += pktlen;
if (cur_network->IELength > MAX_IE_SZ)
@@ -445,7 +445,6 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
/* 2009.10.15 by tynli. */
static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
{
- struct hal_data_8188e *haldata;
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
struct xmit_priv *pxmitpriv;
@@ -467,7 +466,6 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
return;
}
- haldata = GET_HAL_DATA(adapt);
pxmitpriv = &adapt->xmitpriv;
pmlmeext = &adapt->mlmeextpriv;
pmlmeinfo = &pmlmeext->mlmext_info;
@@ -487,7 +485,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
if (PageNeed == 1)
PageNeed += 1;
PageNum += PageNeed;
- haldata->FwRsvdPageStartOffset = PageNum;
+ adapt->HalData->FwRsvdPageStartOffset = PageNum;
BufIndex += PageNeed*128;
@@ -554,7 +552,7 @@ exit:
void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = adapt->HalData;
struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
bool bSendBeacon = false;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 81f2931876f8..d04b7fbb71e1 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -42,43 +42,33 @@ static void dm_InitGPIOSetting(struct adapter *Adapter)
/* */
static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = Adapter->HalData;
struct dm_priv *pdmpriv = &hal_data->dmpriv;
struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
- u8 cut_ver;
/* Init Value */
memset(dm_odm, 0, sizeof(*dm_odm));
dm_odm->Adapter = Adapter;
+ dm_odm->SupportPlatform = ODM_CE;
+ dm_odm->SupportICType = ODM_RTL8188E;
+ dm_odm->CutVersion = ODM_CUT_A;
+ dm_odm->bIsMPChip = hal_data->VersionID.ChipType == NORMAL_CHIP;
+ dm_odm->PatchID = hal_data->CustomerID;
+ dm_odm->bWIFITest = Adapter->registrypriv.wifi_spec;
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_PLATFORM, ODM_CE);
+ dm_odm->AntDivType = hal_data->TRxAntDivType;
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_IC_TYPE, ODM_RTL8188E);
-
- cut_ver = ODM_CUT_A;
-
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_CUT_VER, cut_ver);
-
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_MP_TEST_CHIP, hal_data->VersionID.ChipType == NORMAL_CHIP ? true : false);
-
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_PATCH_ID, hal_data->CustomerID);
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_BWIFI_TEST, Adapter->registrypriv.wifi_spec);
-
-
- if (hal_data->rf_type == RF_1T1R)
- ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_1T1R);
- else if (hal_data->rf_type == RF_2T2R)
- ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_2T2R);
- else if (hal_data->rf_type == RF_1T2R)
- ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_1T2R);
-
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
+ /* Tx power tracking BB swing table. */
+ /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ dm_odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
+ dm_odm->BbSwingIdxOfdmCurrent = 12;
+ dm_odm->BbSwingFlagOfdm = false;
pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
ODM_RF_TX_PWR_TRACK;
- ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
+ dm_odm->SupportAbility = pdmpriv->InitODMFlag;
}
static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
@@ -86,7 +76,7 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = Adapter->HalData;
struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
struct dm_priv *pdmpriv = &hal_data->dmpriv;
int i;
@@ -109,20 +99,26 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
ODM_RF_TX_PWR_TRACK;
}
- ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
-
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_TX_UNI, &(Adapter->xmitpriv.tx_bytes));
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_RX_UNI, &(Adapter->recvpriv.rx_bytes));
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_WM_MODE, &(pmlmeext->cur_wireless_mode));
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SEC_CHNL_OFFSET, &(hal_data->nCur40MhzPrimeSC));
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SEC_MODE, &(Adapter->securitypriv.dot11PrivacyAlgrthm));
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_BW, &(hal_data->CurrentChannelBW));
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_CHNL, &(hal_data->CurrentChannel));
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_NET_CLOSED, &(Adapter->net_closed));
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_MP_MODE, &(Adapter->registrypriv.mp_mode));
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SCAN, &(pmlmepriv->bScanInProcess));
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_POWER_SAVING, &(pwrctrlpriv->bpower_saving));
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
+ dm_odm->SupportAbility = pdmpriv->InitODMFlag;
+
+ dm_odm->pNumTxBytesUnicast = &Adapter->xmitpriv.tx_bytes;
+ dm_odm->pNumRxBytesUnicast = &Adapter->recvpriv.rx_bytes;
+ dm_odm->pWirelessMode = &pmlmeext->cur_wireless_mode;
+ dm_odm->pSecChOffset = &hal_data->nCur40MhzPrimeSC;
+ dm_odm->pSecurity = (u8 *)&Adapter->securitypriv.dot11PrivacyAlgrthm;
+ dm_odm->pBandWidth = (u8 *)&hal_data->CurrentChannelBW;
+ dm_odm->pChannel = &hal_data->CurrentChannel;
+ dm_odm->pbNet_closed = (bool *)&Adapter->net_closed;
+ dm_odm->mp_mode = &Adapter->registrypriv.mp_mode;
+ dm_odm->pbScanInProcess = (bool *)&pmlmepriv->bScanInProcess;
+ dm_odm->pbPowerSaving = (bool *)&pwrctrlpriv->bpower_saving;
+ dm_odm->AntDivType = hal_data->TRxAntDivType;
+
+ /* Tx power tracking BB swing table. */
+ /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ dm_odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
+ dm_odm->BbSwingIdxOfdmCurrent = 12;
+ dm_odm->BbSwingFlagOfdm = false;
for (i = 0; i < NUM_STA; i++)
ODM_CmnInfoPtrArrayHook(dm_odm, ODM_CMNINFO_STA_STATUS, i, NULL);
@@ -130,23 +126,19 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
void rtl8188e_InitHalDm(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &hal_data->dmpriv;
- struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
+ struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
+ struct odm_dm_struct *dm_odm = &(Adapter->HalData->odmpriv);
dm_InitGPIOSetting(Adapter);
pdmpriv->DM_Type = DM_Type_ByDriver;
pdmpriv->DMFlag = DYNAMIC_FUNC_DISABLE;
Update_ODM_ComInfo_88E(Adapter);
ODM_DMInit(dm_odm);
- Adapter->fix_rate = 0xFF;
}
-void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
+void rtw_hal_dm_watchdog(struct adapter *Adapter)
{
- bool fw_ps_awake = true;
u8 hw_init_completed = false;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
struct mlme_priv *pmlmepriv = NULL;
u8 bLinked = false;
@@ -155,13 +147,6 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
if (!hw_init_completed)
goto skip_dm;
- rtw_hal_get_hwreg(Adapter, HW_VAR_FWLPS_RF_ON, (u8 *)(&fw_ps_awake));
-
- /* Fw is under p2p powersaving mode, driver should stop dynamic mechanism. */
- /* modifed by thomas. 2011.06.11. */
- if (Adapter->wdinfo.p2p_ps_mode)
- fw_ps_awake = false;
-
/* ODM */
pmlmepriv = &Adapter->mlmepriv;
@@ -175,19 +160,18 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
bLinked = true;
}
- ODM_CmnInfoUpdate(&hal_data->odmpriv, ODM_CMNINFO_LINK, bLinked);
- ODM_DMWatchdog(&hal_data->odmpriv);
+ Adapter->HalData->odmpriv.bLinked = bLinked;
+ ODM_DMWatchdog(&Adapter->HalData->odmpriv);
skip_dm:
/* Check GPIO to determine current RF on/off and Pbc status. */
/* Check Hardware Radio ON/OFF or not */
return;
}
-void rtl8188e_init_dm_priv(struct adapter *Adapter)
+void rtw_hal_dm_init(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &hal_data->dmpriv;
- struct odm_dm_struct *podmpriv = &hal_data->odmpriv;
+ struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
+ struct odm_dm_struct *podmpriv = &Adapter->HalData->odmpriv;
memset(pdmpriv, 0, sizeof(struct dm_priv));
Init_ODM_ComInfo_88E(Adapter);
@@ -196,11 +180,9 @@ void rtl8188e_init_dm_priv(struct adapter *Adapter)
/* Add new function to reset the state of antenna diversity before link. */
/* Compare RSSI for deciding antenna */
-void AntDivCompare8188E(struct adapter *Adapter, struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src)
+void rtw_hal_antdiv_rssi_compared(struct adapter *Adapter, struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
-
- if (0 != hal_data->AntDivCfg) {
+ if (0 != Adapter->HalData->AntDivCfg) {
/* select optimum_antenna for before linked =>For antenna diversity */
if (dst->Rssi >= src->Rssi) {/* keep org parameter */
src->Rssi = dst->Rssi;
@@ -210,15 +192,14 @@ void AntDivCompare8188E(struct adapter *Adapter, struct wlan_bssid_ex *dst, stru
}
/* Add new function to reset the state of antenna diversity before link. */
-u8 AntDivBeforeLink8188E(struct adapter *Adapter)
+u8 rtw_hal_antdiv_before_linked(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
- struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
+ struct odm_dm_struct *dm_odm = &Adapter->HalData->odmpriv;
struct sw_ant_switch *dm_swat_tbl = &dm_odm->DM_SWAT_Table;
struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
/* Condition that does not need to use antenna diversity. */
- if (hal_data->AntDivCfg == 0)
+ if (Adapter->HalData->AntDivCfg == 0)
return false;
if (check_fwstate(pmlmepriv, _FW_LINKED))
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 0b444fd3e550..385bc2f56f2f 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -108,28 +108,24 @@ void _8051Reset88E(struct adapter *padapter)
void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
-
/* Init Fw LPS related. */
padapter->pwrctrlpriv.bFwCurrentInPSMode = false;
/* Init H2C counter. by tynli. 2009.12.09. */
- pHalData->LastHMEBoxNum = 0;
+ padapter->HalData->LastHMEBoxNum = 0;
}
-static void rtl8188e_free_hal_data(struct adapter *padapter)
+void rtw_hal_free_data(struct adapter *padapter)
{
kfree(padapter->HalData);
padapter->HalData = NULL;
}
-static void ReadChipVersion8188E(struct adapter *padapter)
+void rtw_hal_read_chip_version(struct adapter *padapter)
{
u32 value32;
struct HAL_VERSION ChipVersion;
- struct hal_data_8188e *pHalData;
-
- pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = padapter->HalData;
value32 = usb_read32(padapter, REG_SYS_CFG);
ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
@@ -139,16 +135,13 @@ static void ReadChipVersion8188E(struct adapter *padapter)
dump_chip_info(ChipVersion);
pHalData->VersionID = ChipVersion;
- pHalData->rf_type = RF_1T1R;
pHalData->NumTotalRFPath = 1;
-
- MSG_88E("RF_Type is %x!!\n", pHalData->rf_type);
}
-static void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
+void rtw_hal_set_odm_var(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
- struct odm_dm_struct *podmpriv = &pHalData->odmpriv;
+ struct odm_dm_struct *podmpriv = &Adapter->HalData->odmpriv;
+
switch (eVariable) {
case HAL_ODM_STA_INFO:
{
@@ -165,17 +158,17 @@ static void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable
}
break;
case HAL_ODM_P2P_STATE:
- ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DIRECT, bSet);
+ podmpriv->bWIFI_Direct = bSet;
break;
case HAL_ODM_WIFI_DISPLAY_STATE:
- ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DISPLAY, bSet);
+ podmpriv->bWIFI_Display = bSet;
break;
default:
break;
}
}
-static void hal_notch_filter_8188e(struct adapter *adapter, bool enable)
+void rtw_hal_notch_filter(struct adapter *adapter, bool enable)
{
if (enable) {
DBG_88E("Enable notch filter\n");
@@ -185,32 +178,6 @@ static void hal_notch_filter_8188e(struct adapter *adapter, bool enable)
usb_write8(adapter, rOFDM0_RxDSP+1, usb_read8(adapter, rOFDM0_RxDSP+1) & ~BIT(1));
}
}
-void rtl8188e_set_hal_ops(struct hal_ops *pHalFunc)
-{
- pHalFunc->free_hal_data = &rtl8188e_free_hal_data;
-
- pHalFunc->dm_init = &rtl8188e_init_dm_priv;
-
- pHalFunc->read_chip_version = &ReadChipVersion8188E;
-
- pHalFunc->set_bwmode_handler = &phy_set_bw_mode;
- pHalFunc->set_channel_handler = &phy_sw_chnl;
-
- pHalFunc->hal_dm_watchdog = &rtl8188e_HalDmWatchDog;
-
- pHalFunc->Add_RateATid = &rtl8188e_Add_RateATid;
-
- pHalFunc->AntDivBeforeLinkHandler = &AntDivBeforeLink8188E;
- pHalFunc->AntDivCompareHandler = &AntDivCompare8188E;
- pHalFunc->read_rfreg = &phy_query_rf_reg;
-
- pHalFunc->sreset_init_value = &sreset_init_value;
- pHalFunc->sreset_get_wifi_status = &sreset_get_wifi_status;
-
- pHalFunc->SetHalODMVarHandler = &rtl8188e_SetHalODMVar;
-
- pHalFunc->hal_notch_filter = &hal_notch_filter_8188e;
-}
/* */
/* */
@@ -501,7 +468,7 @@ void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoL
void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = padapter->HalData;
struct txpowerinfo24g pwrInfo24G;
u8 rfPath, ch, group;
u8 bIn24G, TxCount;
@@ -553,7 +520,7 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct hal_data_8188e *pHalData = pAdapter->HalData;
if (!AutoLoadFail) {
pHalData->CrystalCap = hwinfo[EEPROM_XTAL_88E];
@@ -567,7 +534,7 @@ void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoa
void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct hal_data_8188e *pHalData = pAdapter->HalData;
if (!AutoLoadFail)
pHalData->BoardType = (hwinfo[EEPROM_RF_BOARD_OPTION_88E]
@@ -579,7 +546,7 @@ void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo, bool AutoL
void Hal_EfuseParseEEPROMVer88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = padapter->HalData;
if (!AutoLoadFail) {
pHalData->EEPROMVersion = hwinfo[EEPROM_VERSION_88E];
@@ -606,7 +573,7 @@ void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo, bool Auto
void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = padapter->HalData;
if (!AutoLoadFail) {
pHalData->EEPROMCustomerID = hwinfo[EEPROM_CUSTOMERID_88E];
@@ -619,7 +586,7 @@ void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo, bool Auto
void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct hal_data_8188e *pHalData = pAdapter->HalData;
struct registry_priv *registry_par = &pAdapter->registrypriv;
if (!AutoLoadFail) {
@@ -652,7 +619,7 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool
void Hal_ReadThermalMeter_88E(struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = Adapter->HalData;
/* ThermalMeter from EEPROM */
if (!AutoloadFail)
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index f110c961df70..fa2cfd5768de 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -57,10 +57,9 @@ static void process_link_qual(struct adapter *padapter,
signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
}
-void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
+void rtl8188e_process_phy_info(struct adapter *padapter,
+ struct recv_frame *precvframe)
{
- struct recv_frame *precvframe = prframe;
-
/* Check RSSI */
process_rssi(padapter, precvframe);
/* Check EVM */
@@ -140,7 +139,6 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe,
{
struct adapter *padapter = precvframe->adapter;
struct rx_pkt_attrib *pattrib = &precvframe->attrib;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
struct odm_phy_status_info *pPHYInfo = (struct odm_phy_status_info *)(&pattrib->phy_info);
u8 *wlanhdr;
struct odm_per_pkt_info pkt_info;
@@ -181,7 +179,8 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe,
pkt_info.StationID = psta->mac_id;
pkt_info.Rate = pattrib->mcs_rate;
- ODM_PhyStatusQuery(&pHalData->odmpriv, pPHYInfo, (u8 *)pphy_status, &(pkt_info));
+ ODM_PhyStatusQuery(&padapter->HalData->odmpriv, pPHYInfo,
+ (u8 *)pphy_status, &(pkt_info));
precvframe->psta = NULL;
if (pkt_info.bPacketMatchBSSID &&
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
index d9e677ef8f84..780666a755ee 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -40,14 +40,13 @@ void SwLedOn(struct adapter *padapter, struct LED_871x *pLed)
void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
{
u8 LedCfg;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
goto exit;
LedCfg = usb_read8(padapter, REG_LEDCFG2);/* 0x4E */
- if (pHalData->bLedOpenDrain) {
+ if (padapter->HalData->bLedOpenDrain) {
/* Open-drain arrangement for controlling the LED) */
LedCfg &= 0x90; /* Set to software control. */
usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
@@ -66,21 +65,20 @@ exit:
/* Description: */
/* Initialize all LED_871x objects. */
-void rtl8188eu_InitSwLeds(struct adapter *padapter)
+void rtw_hal_sw_led_init(struct adapter *padapter)
{
struct led_priv *pledpriv = &(padapter->ledpriv);
- struct hal_data_8188e *haldata = GET_HAL_DATA(padapter);
pledpriv->bRegUseLed = true;
pledpriv->LedControlHandler = LedControl8188eu;
- haldata->bLedOpenDrain = true;
+ padapter->HalData->bLedOpenDrain = true;
InitLed871x(padapter, &(pledpriv->SwLed0));
}
/* Description: */
/* DeInitialize all LED_819xUsb objects. */
-void rtl8188eu_DeInitSwLeds(struct adapter *padapter)
+void rtw_hal_sw_led_deinit(struct adapter *padapter)
{
struct led_priv *ledpriv = &(padapter->ledpriv);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index 255d6f215091..d0495a16ff79 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -13,6 +13,7 @@
*
******************************************************************************/
#define _RTL8188EU_RECV_C_
+#include <linux/kmemleak.h>
#include <osdep_service.h>
#include <drv_types.h>
#include <recv_osdep.h>
@@ -23,7 +24,7 @@
#include <rtl8188e_hal.h>
-int rtl8188eu_init_recv_priv(struct adapter *padapter)
+int rtw_hal_init_recv_priv(struct adapter *padapter)
{
struct recv_priv *precvpriv = &padapter->recvpriv;
int i, res = _SUCCESS;
@@ -72,6 +73,7 @@ int rtl8188eu_init_recv_priv(struct adapter *padapter)
MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ,
GFP_KERNEL);
if (pskb) {
+ kmemleak_not_leak(pskb);
pskb->dev = padapter->pnetdev;
tmpaddr = (size_t)pskb->data;
alignm = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
@@ -87,7 +89,7 @@ exit:
return res;
}
-void rtl8188eu_free_recv_priv(struct adapter *padapter)
+void rtw_hal_free_recv_priv(struct adapter *padapter)
{
int i;
struct recv_buf *precvbuf;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index ec21d8c82eba..85650b2663ec 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -21,7 +21,7 @@
#include <usb_ops_linux.h>
#include <rtl8188e_hal.h>
-s32 rtl8188eu_init_xmit_priv(struct adapter *adapt)
+s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
{
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
@@ -33,11 +33,7 @@ s32 rtl8188eu_init_xmit_priv(struct adapter *adapt)
static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
{
- u8 set_tx_desc_offset;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
- set_tx_desc_offset = (((sz + TXDESC_SIZE) % haldata->UsbBulkOutSize) == 0) ? 1 : 0;
-
- return set_tx_desc_offset;
+ return !((sz + TXDESC_SIZE) % adapt->HalData->UsbBulkOutSize);
}
static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
@@ -175,7 +171,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
u8 data_rate, pwr_status, offset;
struct adapter *adapt = pxmitframe->padapter;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *odmpriv = &adapt->HalData->odmpriv;
struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -259,12 +255,12 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* DATA/RTS Rate FB LMT */
if (pattrib->ht_en) {
- if (ODM_RA_GetShortGI_8188E(&haldata->odmpriv, pattrib->mac_id))
+ if (ODM_RA_GetShortGI_8188E(odmpriv, pattrib->mac_id))
ptxdesc->txdw5 |= cpu_to_le32(SGI);/* SGI */
}
- data_rate = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, pattrib->mac_id);
+ data_rate = ODM_RA_GetDecisionRate_8188E(odmpriv, pattrib->mac_id);
ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
- pwr_status = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, pattrib->mac_id);
+ pwr_status = ODM_RA_GetHwPwrStatus_8188E(odmpriv, pattrib->mac_id);
ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
} else {
/* EAP data packet and ARP packet and DHCP. */
@@ -332,8 +328,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw4 |= cpu_to_le32(HW_SSN); /* Hw set sequence number */
}
- rtl88eu_dm_set_tx_ant_by_tx_info(&haldata->odmpriv, pmem,
- pattrib->mac_id);
+ rtl88eu_dm_set_tx_ant_by_tx_info(odmpriv, pmem, pattrib->mac_id);
rtl8188eu_cal_txdesc_chksum(ptxdesc);
_dbg_dump_tx_info(adapt, pxmitframe->frame_tag, ptxdesc);
@@ -387,7 +382,7 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
}
ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
- inner_ret = usb_write_port(adapt, ff_hwaddr, w_sz, (unsigned char *)pxmitbuf);
+ inner_ret = usb_write_port(adapt, ff_hwaddr, w_sz, pxmitbuf);
rtw_count_tx_stats(adapt, pxmitframe, sz);
@@ -424,11 +419,11 @@ static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
return len;
}
-s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
struct xmit_frame *pxmitframe = NULL;
struct xmit_frame *pfirstframe = NULL;
+ struct xmit_buf *pxmitbuf;
/* aggregate variable */
struct hw_xmit *phwxmit;
@@ -441,7 +436,7 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
u32 pbuf_tail; /* last pkt tail */
u32 len; /* packet length, except TXDESC_SIZE and PKT_OFFSET */
- u32 bulksize = haldata->UsbBulkOutSize;
+ u32 bulksize = adapt->HalData->UsbBulkOutSize;
u8 desc_cnt;
u32 bulkptr;
@@ -450,12 +445,9 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
- /* check xmitbuffer is ok */
- if (pxmitbuf == NULL) {
- pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
- if (pxmitbuf == NULL)
- return false;
- }
+ pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
+ if (pxmitbuf == NULL)
+ return false;
/* 3 1. pick up first frame */
rtw_free_xmitframe(pxmitpriv, pxmitframe);
@@ -565,7 +557,7 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
if (pbuf < bulkptr) {
desc_cnt++;
- if (desc_cnt == haldata->UsbTxAggDescNum)
+ if (desc_cnt == adapt->HalData->UsbTxAggDescNum)
break;
} else {
desc_cnt = 0;
@@ -594,7 +586,7 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
/* 3 4. write xmit buffer to USB FIFO */
ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
- usb_write_port(adapt, ff_hwaddr, pbuf_tail, (u8 *)pxmitbuf);
+ usb_write_port(adapt, ff_hwaddr, pbuf_tail, pxmitbuf);
/* 3 5. update statisitc */
pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
@@ -607,24 +599,12 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
return true;
}
-static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe)
-{
- s32 res;
-
- res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
- if (res == _SUCCESS)
- rtw_dump_xframe(adapt, pxmitframe);
- else
- DBG_88E("==> %s xmitframe_coalsece failed\n", __func__);
- return res;
-}
-
/*
* Return
* true dump packet directly
* false enqueue packet
*/
-static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
+s32 rtw_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
{
s32 res;
struct xmit_buf *pxmitbuf = NULL;
@@ -650,7 +630,12 @@ static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
pxmitframe->buf_addr = pxmitbuf->pbuf;
pxmitbuf->priv_data = pxmitframe;
- if (xmitframe_direct(adapt, pxmitframe) != _SUCCESS) {
+ res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
+
+ if (res == _SUCCESS) {
+ rtw_dump_xframe(adapt, pxmitframe);
+ } else {
+ DBG_88E("==> %s xmitframe_coalsece failed\n", __func__);
rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
@@ -674,20 +659,10 @@ enqueue:
return false;
}
-s32 rtl8188eu_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
+s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
{
struct xmit_priv *xmitpriv = &adapt->xmitpriv;
rtl88eu_mon_xmit_hook(adapt->pmondev, pmgntframe, xmitpriv->frag_len);
return rtw_dump_xframe(adapt, pmgntframe);
}
-
-/*
- * Return
- * true dump packet directly ok
- * false temporary can't transmit packets to hardware
- */
-s32 rtl8188eu_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
-{
- return pre_xmitframe(adapt, pxmitframe);
-}
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 363f3a34ddce..7692ca495ee5 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -21,14 +21,13 @@
#include <rtl8188e_hal.h>
#include <rtl8188e_led.h>
#include <rtw_iol.h>
-#include <usb_hal.h>
#include <phy.h>
#define HAL_BB_ENABLE 1
static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = adapt->HalData;
switch (NumOutPipe) {
case 3:
@@ -51,13 +50,12 @@ static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPipe, u8 NumOutPipe)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
bool result = false;
_ConfigNormalChipOutEP_8188E(adapt, NumOutPipe);
/* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
- if (haldata->OutEpNumber == 1) {
+ if (adapt->HalData->OutEpNumber == 1) {
if (NumInPipe != 1)
return result;
}
@@ -69,9 +67,9 @@ static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPip
return result;
}
-static void rtl8188eu_interface_configure(struct adapter *adapt)
+void rtw_hal_chip_configure(struct adapter *adapt)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = adapt->HalData;
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapt);
if (pdvobjpriv->ishighspeed)
@@ -94,12 +92,11 @@ static void rtl8188eu_interface_configure(struct adapter *adapt)
pdvobjpriv->RtNumInPipes, pdvobjpriv->RtNumOutPipes);
}
-static u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
+u32 rtw_hal_power_on(struct adapter *adapt)
{
u16 value16;
/* HW Power on sequence */
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
- if (haldata->bMacPwrCtrlOn)
+ if (adapt->HalData->bMacPwrCtrlOn)
return _SUCCESS;
if (!rtl88eu_pwrseqcmdparsing(adapt, PWR_CUT_ALL_MSK,
@@ -119,7 +116,7 @@ static u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
/* for SDIO - Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy. Added by tynli. 2011.08.31. */
usb_write16(adapt, REG_CR, value16);
- haldata->bMacPwrCtrlOn = true;
+ adapt->HalData->bMacPwrCtrlOn = true;
return _SUCCESS;
}
@@ -129,18 +126,17 @@ static void _InitInterrupt(struct adapter *Adapter)
{
u32 imr, imr_ex;
u8 usb_opt;
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
/* HISR write one to clear */
usb_write32(Adapter, REG_HISR_88E, 0xFFFFFFFF);
/* HIMR - */
imr = IMR_PSTIMEOUT_88E | IMR_TBDER_88E | IMR_CPWM_88E | IMR_CPWM2_88E;
usb_write32(Adapter, REG_HIMR_88E, imr);
- haldata->IntrMask[0] = imr;
+ Adapter->HalData->IntrMask[0] = imr;
imr_ex = IMR_TXERR_88E | IMR_RXERR_88E | IMR_TXFOVW_88E | IMR_RXFOVW_88E;
usb_write32(Adapter, REG_HIMRE_88E, imr_ex);
- haldata->IntrMask[1] = imr_ex;
+ Adapter->HalData->IntrMask[1] = imr_ex;
/* REG_USB_SPECIAL_OPTION - BIT(4) */
/* 0; Use interrupt endpoint to upload interrupt pkt */
@@ -157,7 +153,6 @@ static void _InitInterrupt(struct adapter *Adapter)
static void _InitQueueReservedPage(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
u32 numHQ = 0;
u32 numLQ = 0;
@@ -168,14 +163,14 @@ static void _InitQueueReservedPage(struct adapter *Adapter)
bool bWiFiConfig = pregistrypriv->wifi_spec;
if (bWiFiConfig) {
- if (haldata->OutEpQueueSel & TX_SELE_HQ)
+ if (Adapter->HalData->OutEpQueueSel & TX_SELE_HQ)
numHQ = 0x29;
- if (haldata->OutEpQueueSel & TX_SELE_LQ)
+ if (Adapter->HalData->OutEpQueueSel & TX_SELE_LQ)
numLQ = 0x1C;
/* NOTE: This step shall be proceed before writing REG_RQPN. */
- if (haldata->OutEpQueueSel & TX_SELE_NQ)
+ if (Adapter->HalData->OutEpQueueSel & TX_SELE_NQ)
numNQ = 0x1C;
value8 = (u8)_NPQ(numNQ);
usb_write8(Adapter, REG_RQPN_NPQ, value8);
@@ -225,10 +220,9 @@ static void _InitNormalChipRegPriority(struct adapter *Adapter, u16 beQ,
static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
-
u16 value = 0;
- switch (haldata->OutEpQueueSel) {
+
+ switch (Adapter->HalData->OutEpQueueSel) {
case TX_SELE_HQ:
value = QUEUE_HIGH;
break;
@@ -247,13 +241,12 @@ static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
static void _InitNormalChipTwoOutEpPriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
u16 valueHi = 0;
u16 valueLow = 0;
- switch (haldata->OutEpQueueSel) {
+ switch (Adapter->HalData->OutEpQueueSel) {
case (TX_SELE_HQ | TX_SELE_LQ):
valueHi = QUEUE_HIGH;
valueLow = QUEUE_LOW;
@@ -313,9 +306,7 @@ static void _InitNormalChipThreeOutEpPriority(struct adapter *Adapter)
static void _InitQueuePriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
-
- switch (haldata->OutEpNumber) {
+ switch (Adapter->HalData->OutEpNumber) {
case 1:
_InitNormalChipOneOutEpPriority(Adapter);
break;
@@ -357,7 +348,7 @@ static void _InitDriverInfoSize(struct adapter *Adapter, u8 drvInfoSize)
static void _InitWMACSetting(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = Adapter->HalData;
haldata->ReceiveConfig = RCR_AAP | RCR_APM | RCR_AM | RCR_AB |
RCR_CBSSID_DATA | RCR_CBSSID_BCN |
@@ -456,7 +447,7 @@ static void _InitRetryFunction(struct adapter *Adapter)
*/
static void usb_AggSettingTxUpdate(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = Adapter->HalData;
u32 value32;
if (Adapter->registrypriv.wifi_spec)
@@ -492,7 +483,7 @@ usb_AggSettingRxUpdate(
struct adapter *Adapter
)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = Adapter->HalData;
u8 valueDMA;
u8 valueUSB;
@@ -566,8 +557,6 @@ usb_AggSettingRxUpdate(
static void InitUsbAggregationSetting(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
-
/* Tx aggregation setting */
usb_AggSettingTxUpdate(Adapter);
@@ -575,12 +564,12 @@ static void InitUsbAggregationSetting(struct adapter *Adapter)
usb_AggSettingRxUpdate(Adapter);
/* 201/12/10 MH Add for USB agg mode dynamic switch. */
- haldata->UsbRxHighSpeedMode = false;
+ Adapter->HalData->UsbRxHighSpeedMode = false;
}
static void _InitBeaconParameters(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = Adapter->HalData;
usb_write16(Adapter, REG_BCN_CTRL, 0x1010);
@@ -622,7 +611,7 @@ enum {
static void _InitAntenna_Selection(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = Adapter->HalData;
if (haldata->AntDivCfg == 0)
return;
@@ -672,13 +661,13 @@ enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt)
return rfpowerstate;
} /* HalDetectPwrDownMode */
-static u32 rtl8188eu_hal_init(struct adapter *Adapter)
+u32 rtl8188eu_hal_init(struct adapter *Adapter)
{
u8 value8 = 0;
u16 value16;
u8 txpktbuf_bndy;
u32 status = _SUCCESS;
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = Adapter->HalData;
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
unsigned long init_start_time = jiffies;
@@ -702,7 +691,7 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
}
HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PW_ON);
- status = rtl8188eu_InitPowerOn(Adapter);
+ status = rtw_hal_power_on(Adapter);
if (status == _FAIL) {
RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("Failed to init power on!\n"));
goto exit;
@@ -810,8 +799,8 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
usb_write16(Adapter, REG_PKT_BE_BK_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
/* Keep RfRegChnlVal for later use. */
- haldata->RfRegChnlVal[0] = phy_query_rf_reg(Adapter, (enum rf_radio_path)0, RF_CHNLBW, bRFRegOffsetMask);
- haldata->RfRegChnlVal[1] = phy_query_rf_reg(Adapter, (enum rf_radio_path)1, RF_CHNLBW, bRFRegOffsetMask);
+ haldata->RfRegChnlVal[0] = rtw_hal_read_rfreg(Adapter, (enum rf_radio_path)0, RF_CHNLBW, bRFRegOffsetMask);
+ haldata->RfRegChnlVal[1] = rtw_hal_read_rfreg(Adapter, (enum rf_radio_path)1, RF_CHNLBW, bRFRegOffsetMask);
HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_TURN_ON_BLOCK);
_BBTurnOnBlock(Adapter);
@@ -905,7 +894,6 @@ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
static void CardDisableRTL8188EU(struct adapter *Adapter)
{
u8 val8;
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("CardDisableRTL8188EU\n"));
@@ -958,7 +946,7 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
val8 = usb_read8(Adapter, REG_GPIO_IO_SEL+1);
usb_write8(Adapter, REG_GPIO_IO_SEL+1, val8|0x0F);/* Reg0x43 */
usb_write32(Adapter, REG_BB_PAD_CTRL, 0x00080808);/* set LNA ,TRSW,EX_PA Pin to output mode */
- haldata->bMacPwrCtrlOn = false;
+ Adapter->HalData->bMacPwrCtrlOn = false;
Adapter->bFWReady = false;
}
@@ -972,7 +960,7 @@ static void rtl8192cu_hw_power_down(struct adapter *adapt)
usb_write16(adapt, REG_APS_FSMCO, 0x8812);
}
-static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
+u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
{
DBG_88E("==> %s\n", __func__);
@@ -994,7 +982,7 @@ static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
return _SUCCESS;
}
-static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
+u32 rtw_hal_inirp_init(struct adapter *Adapter)
{
u8 i;
struct recv_buf *precvbuf;
@@ -1029,17 +1017,6 @@ exit:
return status;
}
-static unsigned int rtl8188eu_inirp_deinit(struct adapter *Adapter)
-{
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("\n ===> usb_rx_deinit\n"));
-
- usb_read_port_cancel(Adapter);
-
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("\n <=== usb_rx_deinit\n"));
-
- return _SUCCESS;
-}
-
/* */
/* */
/* EEPROM/EFUSE Content Parsing */
@@ -1047,7 +1024,7 @@ static unsigned int rtl8188eu_inirp_deinit(struct adapter *Adapter)
/* */
static void Hal_EfuseParsePIDVID_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = adapt->HalData;
if (!AutoLoadFail) {
/* VID, PID */
@@ -1132,12 +1109,10 @@ static void _ReadPROMContent(
static void _ReadRFType(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
-
- haldata->rf_chip = RF_6052;
+ Adapter->HalData->rf_chip = RF_6052;
}
-static void _ReadAdapterInfo8188EU(struct adapter *Adapter)
+void rtw_hal_read_chip_info(struct adapter *Adapter)
{
unsigned long start = jiffies;
@@ -1157,7 +1132,7 @@ static void rtl8192cu_trigger_gpio_0(struct adapter *adapt)
static void ResumeTxBeacon(struct adapter *adapt)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = adapt->HalData;
/* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
/* which should be read from register to a global variable. */
@@ -1171,7 +1146,7 @@ static void ResumeTxBeacon(struct adapter *adapt)
static void StopTxBeacon(struct adapter *adapt)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = adapt->HalData;
/* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
/* which should be read from register to a global variable. */
@@ -1276,9 +1251,9 @@ static void hw_var_set_bcn_func(struct adapter *Adapter, u8 variable, u8 *val)
usb_write8(Adapter, bcn_ctrl_reg, usb_read8(Adapter, bcn_ctrl_reg)&(~(EN_BCN_FUNCTION | EN_TXBCN_RPT)));
}
-static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
+void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = Adapter->HalData;
struct dm_priv *pdmpriv = &haldata->dmpriv;
struct odm_dm_struct *podmpriv = &haldata->odmpriv;
@@ -1426,17 +1401,8 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
/* enable update TSF */
usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL)&(~BIT(4)));
}
- if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
- usb_write32(Adapter, REG_RCR, usb_read32(Adapter, REG_RCR)|RCR_CBSSID_BCN);
- } else {
- if (Adapter->in_cta_test) {
- u32 v = usb_read32(Adapter, REG_RCR);
- v &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);/* RCR_ADF */
- usb_write32(Adapter, REG_RCR, v);
- } else {
- usb_write32(Adapter, REG_RCR, usb_read32(Adapter, REG_RCR)|RCR_CBSSID_BCN);
- }
- }
+
+ usb_write32(Adapter, REG_RCR, usb_read32(Adapter, REG_RCR)|RCR_CBSSID_BCN);
}
break;
case HW_VAR_MLME_JOIN:
@@ -1449,13 +1415,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
/* enable to rx data frame.Accept all data frame */
usb_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
- if (Adapter->in_cta_test) {
- u32 v = usb_read32(Adapter, REG_RCR);
- v &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);/* RCR_ADF */
- usb_write32(Adapter, REG_RCR, v);
- } else {
- usb_write32(Adapter, REG_RCR, usb_read32(Adapter, REG_RCR)|RCR_CBSSID_DATA|RCR_CBSSID_BCN);
- }
+ usb_write32(Adapter, REG_RCR, usb_read32(Adapter, REG_RCR)|RCR_CBSSID_DATA|RCR_CBSSID_BCN);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
RetryLimit = (haldata->CustomerID == RT_CID_CCX) ? 7 : 48;
@@ -1525,9 +1485,6 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
case HW_VAR_SEC_CFG:
usb_write8(Adapter, REG_SECCFG, *((u8 *)val));
break;
- case HW_VAR_DM_FLAG:
- podmpriv->SupportAbility = *((u8 *)val);
- break;
case HW_VAR_DM_FUNC_OP:
if (val[0])
podmpriv->BK_SupportAbility = podmpriv->SupportAbility;
@@ -1793,14 +1750,11 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
}
-static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
+void rtw_hal_get_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
- struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-
switch (variable) {
case HW_VAR_BASIC_RATE:
- *((u16 *)(val)) = haldata->BasicRateSet;
+ *((u16 *)(val)) = Adapter->HalData->BasicRateSet;
case HW_VAR_TXPAUSE:
val[0] = usb_read8(Adapter, REG_TXPAUSE);
break;
@@ -1808,11 +1762,8 @@ static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
/* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2 */
val[0] = (BIT(0) & usb_read8(Adapter, REG_TDECTRL+2)) ? true : false;
break;
- case HW_VAR_DM_FLAG:
- val[0] = podmpriv->SupportAbility;
- break;
case HW_VAR_RF_TYPE:
- val[0] = haldata->rf_type;
+ val[0] = RF_1T1R;
break;
case HW_VAR_FWLPS_RF_ON:
{
@@ -1833,13 +1784,13 @@ static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
break;
case HW_VAR_CURRENT_ANTENNA:
- val[0] = haldata->CurAntenna;
+ val[0] = Adapter->HalData->CurAntenna;
break;
case HW_VAR_EFUSE_BYTES: /* To get EFUE total used bytes, added by Roger, 2008.12.22. */
- *((u16 *)(val)) = haldata->EfuseUsedBytes;
+ *((u16 *)(val)) = Adapter->HalData->EfuseUsedBytes;
break;
case HW_VAR_APFM_ON_MAC:
- *val = haldata->bMacPwrCtrlOn;
+ *val = Adapter->HalData->bMacPwrCtrlOn;
break;
case HW_VAR_CHK_HI_QUEUE_EMPTY:
*val = ((usb_read32(Adapter, REG_HGQ_INFORMATION)&0x0000ff00) == 0) ? true : false;
@@ -1853,14 +1804,13 @@ static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
/* Description: */
/* Query setting of specified variable. */
/* */
-static u8
-GetHalDefVar8188EUsb(
+u8 rtw_hal_get_def_var(
struct adapter *Adapter,
enum hal_def_variable eVariable,
void *pValue
)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = Adapter->HalData;
u8 bResult = _SUCCESS;
switch (eVariable) {
@@ -1948,7 +1898,7 @@ GetHalDefVar8188EUsb(
return bResult;
}
-static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
+void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
{
u8 init_rate = 0;
u8 networkType, raid;
@@ -1956,7 +1906,7 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
u8 shortGIrate = false;
int supportRateNum = 0;
struct sta_info *psta;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *odmpriv = &adapt->HalData->odmpriv;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
@@ -1995,7 +1945,7 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
break;
}
- rate_bitmap = ODM_Get_Rate_Bitmap(&haldata->odmpriv, mac_id, mask, rssi_level);
+ rate_bitmap = ODM_Get_Rate_Bitmap(odmpriv, mac_id, mask, rssi_level);
DBG_88E("%s => mac_id:%d, networkType:0x%02x, mask:0x%08x\n\t ==> rssi_level:%d, rate_bitmap:0x%08x\n",
__func__, mac_id, networkType, mask, rssi_level, rate_bitmap);
@@ -2003,15 +1953,14 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
init_rate = get_highest_rate_idx(mask)&0x3f;
- ODM_RA_UpdateRateInfo_8188E(&haldata->odmpriv, mac_id,
- raid, mask, shortGIrate);
+ ODM_RA_UpdateRateInfo_8188E(odmpriv, mac_id, raid, mask, shortGIrate);
/* set ra_id */
psta->raid = raid;
psta->init_rate = init_rate;
}
-static void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
+void rtw_hal_bcn_related_reg_setting(struct adapter *adapt)
{
u32 value32;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
@@ -2045,13 +1994,12 @@ static void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
usb_write8(adapt, bcn_ctrl_reg, usb_read8(adapt, bcn_ctrl_reg) | BIT(1));
}
-static void rtl8188eu_init_default_value(struct adapter *adapt)
+void rtw_hal_def_value_init(struct adapter *adapt)
{
- struct hal_data_8188e *haldata;
+ struct hal_data_8188e *haldata = adapt->HalData;
struct pwrctrl_priv *pwrctrlpriv;
u8 i;
- haldata = GET_HAL_DATA(adapt);
pwrctrlpriv = &adapt->pwrctrlpriv;
/* init default value */
@@ -2067,43 +2015,3 @@ static void rtl8188eu_init_default_value(struct adapter *adapt)
for (i = 0; i < HP_THERMAL_NUM; i++)
haldata->odmpriv.RFCalibrateInfo.ThermalValue_HP[i] = 0;
}
-
-void rtl8188eu_set_hal_ops(struct adapter *adapt)
-{
- struct hal_ops *halfunc = &adapt->HalFunc;
-
-
- adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
- if (!adapt->HalData)
- DBG_88E("cant not alloc memory for HAL DATA\n");
-
- halfunc->hal_power_on = rtl8188eu_InitPowerOn;
- halfunc->hal_init = &rtl8188eu_hal_init;
- halfunc->hal_deinit = &rtl8188eu_hal_deinit;
-
- halfunc->inirp_init = &rtl8188eu_inirp_init;
- halfunc->inirp_deinit = &rtl8188eu_inirp_deinit;
-
- halfunc->init_xmit_priv = &rtl8188eu_init_xmit_priv;
-
- halfunc->init_recv_priv = &rtl8188eu_init_recv_priv;
- halfunc->free_recv_priv = &rtl8188eu_free_recv_priv;
- halfunc->InitSwLeds = &rtl8188eu_InitSwLeds;
- halfunc->DeInitSwLeds = &rtl8188eu_DeInitSwLeds;
-
- halfunc->init_default_value = &rtl8188eu_init_default_value;
- halfunc->intf_chip_configure = &rtl8188eu_interface_configure;
- halfunc->read_adapter_info = &_ReadAdapterInfo8188EU;
-
- halfunc->SetHwRegHandler = &SetHwReg8188EU;
- halfunc->GetHwRegHandler = &GetHwReg8188EU;
- halfunc->GetHalDefVarHandler = &GetHalDefVar8188EUsb;
-
- halfunc->UpdateRAMaskHandler = &UpdateHalRAMask8188EUsb;
- halfunc->SetBeaconRelatedRegistersHandler = &SetBeaconRelatedRegisters8188EUsb;
-
- halfunc->hal_xmit = &rtl8188eu_hal_xmit;
- halfunc->mgnt_xmit = &rtl8188eu_mgnt_xmit;
-
- rtl8188e_set_hal_ops(halfunc);
-}
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index 8990748a1919..0976a761b280 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -158,24 +158,6 @@ struct bb_reg_def {
* Path A and B */
};
-struct ant_sel_ofdm {
- u32 r_tx_antenna:4;
- u32 r_ant_l:4;
- u32 r_ant_non_ht:4;
- u32 r_ant_ht1:4;
- u32 r_ant_ht2:4;
- u32 r_ant_ht_s1:4;
- u32 r_ant_non_ht_s1:4;
- u32 OFDM_TXSC:2;
- u32 reserved:2;
-};
-
-struct ant_sel_cck {
- u8 r_cckrx_enable_2:2;
- u8 r_cckrx_enable:2;
- u8 r_ccktx_enable:4;
-};
-
/*------------------------------Define structure----------------------------*/
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
deleted file mode 100644
index dbb55247b0c6..000000000000
--- a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/******************************************************************************
-*
-* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-******************************************************************************/
-
-#ifndef __INC_FW_8188E_HW_IMG_H
-#define __INC_FW_8188E_HW_IMG_H
-
-
-/******************************************************************************
-* FW_AP.TXT
-******************************************************************************/
-/******************************************************************************
-* FW_WoWLAN.TXT
-******************************************************************************/
-#define ArrayLength_8188E_FW_WoWLAN 15764
-extern const u8 Array_8188E_FW_WoWLAN[ArrayLength_8188E_FW_WoWLAN];
-
-#endif
diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h
index 2c1676d2ac6e..69c4d49f43ab 100644
--- a/drivers/staging/rtl8188eu/include/basic_types.h
+++ b/drivers/staging/rtl8188eu/include/basic_types.h
@@ -137,8 +137,4 @@ value to host byte ordering.*/
((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
)
-/* Get the N-bytes aligment offset from the current length */
-#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
- (__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
-
#endif /* __BASIC_TYPES_H__ */
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index 55506a7da1a4..32326fd1dd24 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -53,30 +53,17 @@
#define SPEC_DEV_ID_ASSIGN_IFNAME BIT(5)
struct registry_priv {
- u8 chip_version;
- u8 rfintfs;
- u8 lbkmode;
- u8 hci;
struct ndis_802_11_ssid ssid;
- u8 network_mode; /* infra, ad-hoc, auto */
u8 channel;/* ad-hoc support requirement */
u8 wireless_mode;/* A, B, G, auto */
- u8 scan_mode;/* active, passive */
- u8 radio_enable;
u8 preamble;/* long, short, auto */
u8 vrtl_carrier_sense;/* Enable, Disable, Auto */
u8 vcs_type;/* RTS/CTS, CTS-to-self */
u16 rts_thresh;
u16 frag_thresh;
- u8 adhoc_tx_pwr;
- u8 soft_ap;
u8 power_mgnt;
u8 ips_mode;
u8 smart_ps;
- u8 long_retry_lmt;
- u8 short_retry_lmt;
- u16 busy_thresh;
- u8 ack_policy;
u8 mp_mode;
u8 software_encrypt;
u8 software_decrypt;
@@ -84,11 +71,6 @@ struct registry_priv {
/* UAPSD */
u8 wmm_enable;
u8 uapsd_enable;
- u8 uapsd_max_sp;
- u8 uapsd_acbk_en;
- u8 uapsd_acbe_en;
- u8 uapsd_acvi_en;
- u8 uapsd_acvo_en;
struct wlan_bssid_ex dev_network;
@@ -97,10 +79,6 @@ struct registry_priv {
u8 ampdu_enable;/* for tx */
u8 rx_stbc;
u8 ampdu_amsdu;/* A-MPDU Supports A-MSDU is permitted */
- u8 lowrate_two_xmit;
-
- u8 rf_config;
- u8 low_power;
u8 wifi_spec;/* !turbo_mode */
@@ -112,9 +90,6 @@ struct registry_priv {
u8 usbss_enable;/* 0:disable,1:enable */
u8 hwpdn_mode;/* 0:disable,1:enable,2:decide by EFUSE config */
- u8 hwpwrp_detect;/* 0:disable,1:enable */
-
- u8 hw_wps_pbc;/* 0:disable,1:enable */
u8 max_roaming_times; /* the max number driver will try */
@@ -129,12 +104,6 @@ struct registry_priv {
bool monitor_enable;
};
-/* For registry parameters */
-#define RGTRY_OFT(field) ((u32)offsetof(struct registry_priv, field))
-#define RGTRY_SZ(field) sizeof(((struct registry_priv *)0)->field)
-#define BSSID_OFT(field) ((u32)offsetofT(struct wlan_bssid_ex, field))
-#define BSSID_SZ(field) sizeof(((struct wlan_bssid_ex *)0)->field)
-
#define MAX_CONTINUAL_URB_ERR 4
struct dvobj_priv {
@@ -149,16 +118,11 @@ struct dvobj_priv {
u8 Queue2Pipe[HW_QUEUE_ENTRY];/* for out pipe mapping */
/*-------- below is for USB INTERFACE --------*/
-
- u8 nr_endpoint;
u8 ishighspeed;
u8 RtNumInPipes;
u8 RtNumOutPipes;
- int ep_num[5]; /* endpoint number */
struct mutex usb_vendor_req_mutex;
- u8 *usb_vendor_req_buf;
-
struct usb_interface *pusbintf;
struct usb_device *pusbdev;
};
@@ -184,14 +148,7 @@ struct adapter {
struct eeprom_priv eeprompriv;
struct led_priv ledpriv;
-#ifdef CONFIG_88EU_AP_MODE
- struct hostapd_priv *phostapdpriv;
-#endif
-
- struct wifidirect_info wdinfo;
-
- void *HalData;
- struct hal_ops HalFunc;
+ struct hal_data_8188e *HalData;
s32 bDriverStopped;
s32 bSurpriseRemoved;
@@ -199,20 +156,11 @@ struct adapter {
u8 hw_init_completed;
void *cmdThread;
- void *evtThread;
void (*intf_start)(struct adapter *adapter);
void (*intf_stop)(struct adapter *adapter);
struct net_device *pnetdev;
struct net_device *pmondev;
- /* used by rtw_rereg_nd_name related function */
- struct rereg_nd_name_data {
- struct net_device *old_pnetdev;
- char old_ifname[IFNAMSIZ];
- u8 old_ips_mode;
- u8 old_bRegUseLed;
- } rereg_nd_name_priv;
-
int bup;
struct net_device_stats stats;
struct iw_statistics iwstats;
@@ -223,23 +171,12 @@ struct adapter {
u8 bReadPortCancel;
u8 bWritePortCancel;
u8 bRxRSSIDisplay;
- /* The driver will show up the desired channel number
- * when this flag is 1. */
- u8 bNotifyChannelChange;
struct mutex hw_init_mutex;
-
- spinlock_t br_ext_lock;
-
- u8 fix_rate;
-
- unsigned char in_cta_test;
};
#define adapter_to_dvobj(adapter) (adapter->dvobj)
-int rtw_handle_dualmac(struct adapter *adapter, bool init);
-
static inline u8 *myid(struct eeprom_priv *peepriv)
{
return peepriv->mac_addr;
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index eaf939bd4103..fa032b0c12ff 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -58,7 +58,6 @@ enum hw_variables {
HW_VAR_SEC_CFG,
HW_VAR_BCN_VALID,
HW_VAR_RF_TYPE,
- HW_VAR_DM_FLAG,
HW_VAR_DM_FUNC_OP,
HW_VAR_DM_FUNC_SET,
HW_VAR_DM_FUNC_CLR,
@@ -139,81 +138,6 @@ enum hal_intf_ps_func {
HAL_MAX_ID,
};
-struct hal_ops {
- u32 (*hal_power_on)(struct adapter *padapter);
- u32 (*hal_init)(struct adapter *padapter);
- u32 (*hal_deinit)(struct adapter *padapter);
-
- void (*free_hal_data)(struct adapter *padapter);
-
- u32 (*inirp_init)(struct adapter *padapter);
- u32 (*inirp_deinit)(struct adapter *padapter);
-
- s32 (*init_xmit_priv)(struct adapter *padapter);
-
- s32 (*init_recv_priv)(struct adapter *padapter);
- void (*free_recv_priv)(struct adapter *padapter);
-
- void (*InitSwLeds)(struct adapter *padapter);
- void (*DeInitSwLeds)(struct adapter *padapter);
-
- void (*dm_init)(struct adapter *padapter);
- void (*read_chip_version)(struct adapter *padapter);
-
- void (*init_default_value)(struct adapter *padapter);
-
- void (*intf_chip_configure)(struct adapter *padapter);
-
- void (*read_adapter_info)(struct adapter *padapter);
-
- s32 (*interrupt_handler)(struct adapter *padapter);
-
- void (*set_bwmode_handler)(struct adapter *padapter,
- enum ht_channel_width Bandwidth,
- u8 Offset);
- void (*set_channel_handler)(struct adapter *padapter, u8 channel);
-
- void (*hal_dm_watchdog)(struct adapter *padapter);
-
- void (*SetHwRegHandler)(struct adapter *padapter, u8 variable,
- u8 *val);
- void (*GetHwRegHandler)(struct adapter *padapter, u8 variable,
- u8 *val);
-
- u8 (*GetHalDefVarHandler)(struct adapter *padapter,
- enum hal_def_variable eVariable,
- void *pValue);
-
- void (*SetHalODMVarHandler)(struct adapter *padapter,
- enum hal_odm_variable eVariable,
- void *pValue1, bool bSet);
-
- void (*UpdateRAMaskHandler)(struct adapter *padapter,
- u32 mac_id, u8 rssi_level);
- void (*SetBeaconRelatedRegistersHandler)(struct adapter *padapter);
-
- void (*Add_RateATid)(struct adapter *adapter, u32 bitmap, u8 arg,
- u8 rssi_level);
-
- u8 (*AntDivBeforeLinkHandler)(struct adapter *adapter);
- void (*AntDivCompareHandler)(struct adapter *adapter,
- struct wlan_bssid_ex *dst,
- struct wlan_bssid_ex *src);
- s32 (*hal_xmit)(struct adapter *padapter,
- struct xmit_frame *pxmitframe);
- s32 (*mgnt_xmit)(struct adapter *padapter,
- struct xmit_frame *pmgntframe);
- u32 (*read_rfreg)(struct adapter *padapter,
- enum rf_radio_path eRFPath, u32 RegAddr,
- u32 BitMask);
-
- void (*sreset_init_value)(struct adapter *padapter);
- u8 (*sreset_get_wifi_status)(struct adapter *padapter);
-
- void (*hal_notch_filter)(struct adapter *adapter, bool enable);
- void (*hal_reset_security_engine)(struct adapter *adapter);
-};
-
enum rt_eeprom_type {
EEPROM_93C46,
EEPROM_93C56,
@@ -235,6 +159,9 @@ enum hardware_type {
#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse)
+void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level);
+u32 rtl8188eu_hal_deinit(struct adapter *Adapter);
+u32 rtl8188eu_hal_init(struct adapter *Adapter);
void rtw_hal_def_value_init(struct adapter *padapter);
void rtw_hal_free_data(struct adapter *padapter);
@@ -262,7 +189,7 @@ void rtw_hal_set_odm_var(struct adapter *padapter,
bool bSet);
u32 rtw_hal_inirp_init(struct adapter *padapter);
-u32 rtw_hal_inirp_deinit(struct adapter *padapter);
+void rtw_hal_inirp_deinit(struct adapter *padapter);
s32 rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe);
s32 rtw_hal_mgnt_xmit(struct adapter *padapter,
@@ -270,7 +197,7 @@ s32 rtw_hal_mgnt_xmit(struct adapter *padapter,
s32 rtw_hal_init_xmit_priv(struct adapter *padapter);
-s32 rtw_hal_init_recv_priv(struct adapter *padapter);
+int rtw_hal_init_recv_priv(struct adapter *padapter);
void rtw_hal_free_recv_priv(struct adapter *padapter);
void rtw_hal_update_ra_mask(struct adapter *padapter, u32 mac_id, u8 level);
@@ -296,7 +223,6 @@ void rtw_hal_antdiv_rssi_compared(struct adapter *padapter,
void rtw_hal_sreset_init(struct adapter *padapter);
void rtw_hal_notch_filter(struct adapter *adapter, bool enable);
-void rtw_hal_reset_security_engine(struct adapter *adapter);
void indicate_wx_scan_complete_event(struct adapter *padapter);
u8 rtw_do_join(struct adapter *padapter);
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index d8284c84f09c..fc58621368c1 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -239,7 +239,7 @@ struct ieee_param {
u16 capability;
int flags;
u8 tx_supp_rates[16];
- struct rtw_ieee80211_ht_cap ht_cap;
+ struct ieee80211_ht_cap ht_cap;
} add_sta;
struct {
u8 reserved[2];/* for set max_num_sta */
@@ -264,7 +264,7 @@ struct sta_data {
u32 sta_set;
u8 tx_supp_rates[16];
u32 tx_supp_rates_len;
- struct rtw_ieee80211_ht_cap ht_cap;
+ struct ieee80211_ht_cap ht_cap;
u64 rx_pkts;
u64 rx_bytes;
u64 rx_drops;
@@ -291,62 +291,6 @@ struct sta_data {
/* this is stolen from ipw2200 driver */
#define IEEE_IBSS_MAC_HASH_SIZE 31
-struct ieee_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num;
- u16 frag_num;
- unsigned long packet_time;
- struct list_head list;
-};
-
-struct rtw_ieee80211_hdr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
- u8 addr4[ETH_ALEN];
-} __packed;
-
-struct rtw_ieee80211_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
-} __packed;
-
-struct rtw_ieee80211_hdr_qos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
- u8 addr4[ETH_ALEN];
- u16 qc;
-} __packed;
-
-struct rtw_ieee80211_hdr_3addr_qos {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
- u16 qc;
-} __packed;
-
-struct eapol {
- u8 snap[6];
- u16 ethertype;
- u8 version;
- u8 type;
- u16 length;
-} __packed;
-
enum eap_type {
EAP_PACKET = 0,
EAPOL_START,
@@ -552,83 +496,12 @@ struct ieee80211_snap_hdr {
#define IEEE80211_NUM_CCK_RATES 4
#define IEEE80211_OFDM_SHIFT_MASK_A 4
-/* NOTE: This data is for statistical purposes; not all hardware provides this
- * information for frames received. Not setting these will not cause
- * any adverse affects. */
-struct ieee80211_rx_stats {
- /* u32 mac_time[2]; */
- s8 rssi;
- u8 signal;
- u8 noise;
- u8 received_channel;
- u16 rate; /* in 100 kbps */
- /* u8 control; */
- u8 mask;
- u8 freq;
- u16 len;
-};
-
/* IEEE 802.11 requires that STA supports concurrent reception of at least
* three fragmented frames. This define can be increased to support more
* concurrent frames, but it should be noted that each entry can consume about
* 2 kB of RAM and increasing cache size will slow down frame reassembly. */
#define IEEE80211_FRAG_CACHE_LEN 4
-struct ieee80211_frag_entry {
- u32 first_frag_time;
- uint seq;
- uint last_frag;
- uint qos; /* jackson */
- uint tid; /* jackson */
- struct sk_buff *skb;
- u8 src_addr[ETH_ALEN];
- u8 dst_addr[ETH_ALEN];
-};
-
-struct ieee80211_stats {
- uint tx_unicast_frames;
- uint tx_multicast_frames;
- uint tx_fragments;
- uint tx_unicast_octets;
- uint tx_multicast_octets;
- uint tx_deferred_transmissions;
- uint tx_single_retry_frames;
- uint tx_multiple_retry_frames;
- uint tx_retry_limit_exceeded;
- uint tx_discards;
- uint rx_unicast_frames;
- uint rx_multicast_frames;
- uint rx_fragments;
- uint rx_unicast_octets;
- uint rx_multicast_octets;
- uint rx_fcs_errors;
- uint rx_discards_no_buffer;
- uint tx_discards_wrong_sa;
- uint rx_discards_undecryptable;
- uint rx_message_in_msg_fragments;
- uint rx_message_in_bad_msg_fragments;
-};
-
-struct ieee80211_softmac_stats {
- uint rx_ass_ok;
- uint rx_ass_err;
- uint rx_probe_rq;
- uint tx_probe_rs;
- uint tx_beacons;
- uint rx_auth_rq;
- uint rx_auth_rs_ok;
- uint rx_auth_rs_err;
- uint tx_auth_rq;
- uint no_auth_rs;
- uint no_ass_rs;
- uint tx_ass_rq;
- uint rx_ass_rq;
- uint tx_probe_rq;
- uint reassoc;
- uint swtxstop;
- uint swtxawake;
-};
-
#define SEC_KEY_1 (1<<0)
#define SEC_KEY_2 (1<<1)
#define SEC_KEY_3 (1<<2)
@@ -648,42 +521,6 @@ struct ieee80211_softmac_stats {
#define WEP_KEYS 4
#define WEP_KEY_LEN 13
-struct ieee80211_security {
- u16 active_key:2,
- enabled:1,
- auth_mode:2,
- auth_algo:4,
- unicast_uses_group:1;
- u8 key_sizes[WEP_KEYS];
- u8 keys[WEP_KEYS][WEP_KEY_LEN];
- u8 level;
- u16 flags;
-} __packed;
-
-/*
-
- 802.11 data frame from AP
-
- ,-------------------------------------------------------------------.
-Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
- |------|------|---------|---------|---------|------|---------|------|
-Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs |
- | | tion | (BSSID) | | | ence | data | |
- `-------------------------------------------------------------------'
-
-Total: 28-2340 bytes
-
-*/
-
-struct ieee80211_header_data {
- u16 frame_ctl;
- u16 duration_id;
- u8 addr1[6];
- u8 addr2[6];
- u8 addr3[6];
- u16 seq_ctrl;
-};
-
#define BEACON_PROBE_SSID_ID_POSITION 12
/* Management Frame Information Element Types */
@@ -700,81 +537,9 @@ struct ieee80211_header_data {
#define MFIE_TYPE_RATES_EX 50
#define MFIE_TYPE_GENERIC 221
-struct ieee80211_info_element_hdr {
- u8 id;
- u8 len;
-} __packed;
-
-struct ieee80211_info_element {
- u8 id;
- u8 len;
- u8 data[0];
-} __packed;
-
-/*
- * These are the data types that can make up management packets
- *
- u16 auth_algorithm;
- u16 auth_sequence;
- u16 beacon_interval;
- u16 capability;
- u8 current_ap[ETH_ALEN];
- u16 listen_interval;
- struct {
- u16 association_id:14, reserved:2;
- } __packed;
- u32 time_stamp[2];
- u16 reason;
- u16 status;
-*/
-
#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
#define IEEE80211_DEFAULT_BASIC_RATE 10
-struct ieee80211_authentication {
- struct ieee80211_header_data header;
- u16 algorithm;
- u16 transaction;
- u16 status;
- /* struct ieee80211_info_element_hdr info_element; */
-} __packed;
-
-struct ieee80211_probe_response {
- struct ieee80211_header_data header;
- u32 time_stamp[2];
- u16 beacon_interval;
- u16 capability;
- struct ieee80211_info_element info_element;
-} __packed;
-
-struct ieee80211_probe_request {
- struct ieee80211_header_data header;
-} __packed;
-
-struct ieee80211_assoc_request_frame {
- struct rtw_ieee80211_hdr_3addr header;
- u16 capability;
- u16 listen_interval;
- struct ieee80211_info_element_hdr info_element;
-} __packed;
-
-struct ieee80211_assoc_response_frame {
- struct rtw_ieee80211_hdr_3addr header;
- u16 capability;
- u16 status;
- u16 aid;
-} __packed;
-
-struct ieee80211_txb {
- u8 nr_frags;
- u8 encrypted;
- u16 reserved;
- u16 frag_size;
- u16 payload_size;
- struct sk_buff *fragments[0];
-};
-
-
/* SWEEP TABLE ENTRIES NUMBER*/
#define MAX_SWEEP_TAB_ENTRIES 42
#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
@@ -872,11 +637,6 @@ static inline int is_broadcast_mac_addr(const u8 *addr)
#define CFG_IEEE80211_RESERVE_FCS (1<<0)
#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
-struct tx_pending {
- int frag;
- struct ieee80211_txb *txb;
-};
-
#define MAXTID 16
#define IEEE_A (1<<0)
@@ -1096,20 +856,8 @@ enum secondary_ch_offset {
SCA = 1, /* secondary channel above */
SCB = 3, /* secondary channel below */
};
-u8 secondary_ch_offset_to_hal_ch_offset(u8 ch_offset);
-u8 hal_ch_offset_to_secondary_ch_offset(u8 ch_offset);
-u8 *rtw_set_ie_ch_switch(u8 *buf, u32 *buf_len, u8 ch_switch_mode,
- u8 new_ch, u8 ch_switch_cnt);
-u8 *rtw_set_ie_secondary_ch_offset(u8 *buf, u32 *buf_len,
- u8 secondary_ch_offset);
-u8 *rtw_set_ie_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl,
- u8 flags, u16 reason, u16 precedence);
u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit);
-u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui,
- u8 oui_len, u8 *ie, uint *ielen);
-int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset,
- u8 eid, u8 *oui, u8 oui_len);
void rtw_set_supported_rate(u8 *SupportedRates, uint mode);
@@ -1133,19 +881,6 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id,
u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id,
u8 *buf_content, uint *len_content);
-/**
- * for_each_ie - iterate over continuous IEs
- * @ie:
- * @buf:
- * @buf_len:
- */
-#define for_each_ie(ie, buf, buf_len) \
- for (ie = (void *)buf; (((u8 *)ie) - ((u8 *)buf) + 1) < buf_len; \
- ie = (void *)(((u8 *)ie) + *(((u8 *)ie)+1) + 2))
-
-void dump_ies(u8 *buf, u32 buf_len);
-void dump_wps_ie(u8 *ie, u32 ie_len);
-
uint rtw_get_rateset_len(u8 *rateset);
struct registry_priv;
@@ -1167,8 +902,4 @@ void rtw_macaddr_cfg(u8 *mac_addr);
u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40,
unsigned char *MCS_rate);
-int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category,
- u8 *action);
-const char *action_public_str(u8 action);
-
#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index dbebf17f36d3..805f52e108b2 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -315,22 +315,6 @@ enum odm_ability {
ODM_PSD2AFH = 0x00000800
};
-/* 2011/20/20 MH For MP driver RT_WLAN_STA = struct sta_info */
-/* Please declare below ODM relative info in your STA info structure. */
-
-struct odm_sta_info {
- /* Driver Write */
- bool bUsed; /* record the sta status link or not? */
- u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
-
- /* ODM Write */
- /* 1 PHY_STATUS_INFO */
- u8 RSSI_Path[4]; /* */
- u8 RSSI_Ave;
- u8 RXEVM[4];
- u8 RXSNR[4];
-};
-
/* 2011/10/20 MH Define Common info enum for all team. */
enum odm_common_info_def {
@@ -740,8 +724,6 @@ struct odm_dm_struct {
u32 SupportICType;
/* Cut Version TestChip/A-cut/B-cut... = 0/1/2/3/... */
u8 CutVersion;
- /* RF Type 4T4R/3T3R/2T2R/1T2R/1T1R/... */
- u8 RFType;
/* Board Type Normal/HighPower/MiniCard/SLIM/Combo/. = 0/1/2/3/4/. */
u8 BoardType;
/* with external LNA NO/Yes = 0/1 */
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index 54fca79827e3..dbd7dc4f87dd 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -34,7 +34,6 @@ int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
struct net_device *rtw_init_netdev(struct adapter *padapter);
u16 rtw_recv_select_queue(struct sk_buff *skb);
-void rtw_proc_remove_one(struct net_device *dev);
int pm_netdev_open(struct net_device *pnetdev, u8 bnormal);
void rtw_ips_dev_unload(struct adapter *padapter);
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 5475956c5ee5..9047b6dec9ed 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -35,7 +35,7 @@
#include <asm/byteorder.h>
#include <linux/atomic.h>
#include <linux/io.h>
-#include <linux/semaphore.h>
+#include <linux/mutex.h>
#include <linux/sem.h>
#include <linux/sched.h>
#include <linux/etherdevice.h>
@@ -44,7 +44,6 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/delay.h>
-#include <linux/proc_fs.h> /* Necessary because we use the proc fs */
#include <linux/interrupt.h> /* for struct tasklet_struct */
#include <linux/ip.h>
#include <linux/kthread.h>
@@ -78,16 +77,12 @@ u8 *_rtw_malloc(u32 sz);
void *rtw_malloc2d(int h, int w, int size);
-u32 _rtw_down_sema(struct semaphore *sema);
-
void _rtw_init_queue(struct __queue *pqueue);
struct rtw_netdev_priv_indicator {
void *priv;
- u32 sizeof_priv;
};
-struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
- void *old_priv);
+struct net_device *rtw_alloc_etherdev_with_old_priv(void *old_priv);
#define rtw_netdev_priv(netdev) \
(((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
diff --git a/drivers/staging/rtl8188eu/include/phy.h b/drivers/staging/rtl8188eu/include/phy.h
index 9a9ab82a8ed3..cd387e998574 100644
--- a/drivers/staging/rtl8188eu/include/phy.h
+++ b/drivers/staging/rtl8188eu/include/phy.h
@@ -11,17 +11,13 @@ bool rtl88eu_phy_bb_config(struct adapter *adapt);
u32 phy_query_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask);
void phy_set_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask, u32 data);
-u32 phy_query_rf_reg(struct adapter *adapt, enum rf_radio_path rf_path,
+u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rf_path,
u32 reg_addr, u32 bit_mask);
void phy_set_rf_reg(struct adapter *adapt, enum rf_radio_path rf_path,
u32 reg_addr, u32 bit_mask, u32 data);
void phy_set_tx_power_level(struct adapter *adapt, u8 channel);
-void phy_set_bw_mode(struct adapter *adapt, enum ht_channel_width bandwidth,
- unsigned char offset);
-void phy_sw_chnl(struct adapter *adapt, u8 channel);
-
void rtl88eu_dm_txpower_track_adjust(struct odm_dm_struct *dm_odm,
u8 type, u8 *dir, u32 *out_write);
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index cad31587c30a..7550d58f6b5b 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -26,13 +26,9 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv);
s32 rtw_recv_entry(struct recv_frame *precv_frame);
int rtw_recv_indicatepkt(struct adapter *adapter,
struct recv_frame *recv_frame);
-void rtw_recv_returnpacket(struct net_device *cnxt, struct sk_buff *retpkt);
void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
-int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
-void rtw_free_recv_priv(struct recv_priv *precvpriv);
-
void rtw_os_recv_resource_alloc(struct recv_frame *recvfr);
int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
index 4d7d804658c2..042b4ec656c8 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
@@ -49,12 +49,6 @@ enum RTL8188E_H2C_CMD_ID {
H2C_RESET_TSF = 0xc0,
};
-struct cmd_msg_parm {
- u8 eid; /* element id */
- u8 sz; /* sz */
- u8 buf[6];
-};
-
enum {
PWRS
};
@@ -67,15 +61,6 @@ struct setpwrmode_parm {
u8 PwrState;/* AllON(0x0c),RFON(0x04),RFOFF(0x00) */
};
-struct H2C_SS_RFOFF_PARAM {
- u8 ROFOn; /* 1: on, 0:off */
- u16 gpio_period; /* unit: 1024 us */
-} __packed;
-
-struct joinbssrpt_parm {
- u8 OpMode; /* RT_MEDIA_STATUS */
-};
-
struct rsvdpage_loc {
u8 LocProbeRsp;
u8 LocPsPoll;
@@ -84,26 +69,9 @@ struct rsvdpage_loc {
u8 LocBTQosNull;
};
-struct P2P_PS_Offload_t {
- u8 Offload_En:1;
- u8 role:1; /* 1: Owner, 0: Client */
- u8 CTWindow_En:1;
- u8 NoA0_En:1;
- u8 NoA1_En:1;
- u8 AllStaSleep:1; /* Only valid in Owner */
- u8 discovery:1;
- u8 rsvd:1;
-};
-
-struct P2P_PS_CTWPeriod_t {
- u8 CTWPeriod; /* TU */
-};
-
/* host message to firmware cmd */
void rtl8188e_set_FwPwrMode_cmd(struct adapter *padapter, u8 Mode);
void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *padapter, u8 mstatus);
-void rtl8188e_Add_RateATid(struct adapter *padapter, u32 bitmap, u8 arg,
- u8 rssi_level);
void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
index 4190112a50bf..c0ffd98d7617 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
@@ -45,12 +45,9 @@ struct dm_priv {
u8 PowerIndex_backup[6];
};
-void rtl8188e_init_dm_priv(struct adapter *adapt);
void rtl8188e_InitHalDm(struct adapter *adapt);
-void rtl8188e_HalDmWatchDog(struct adapter *adapt);
void AntDivCompare8188E(struct adapter *adapt, struct wlan_bssid_ex *dst,
struct wlan_bssid_ex *src);
-u8 AntDivBeforeLink8188E(struct adapter *adapt);
#endif
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 9dd5c293a54b..7c81e3f3d12e 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -202,7 +202,6 @@ struct hal_data_8188e {
/* rf_ctrl */
u8 rf_chip;
- u8 rf_type;
u8 NumTotalRFPath;
u8 BoardType;
@@ -351,10 +350,6 @@ struct hal_data_8188e {
u8 UsbRxAggPageTimeout;
};
-#define GET_HAL_DATA(__pAdapter) \
- ((struct hal_data_8188e *)((__pAdapter)->HalData))
-#define GET_RF_TYPE(priv) (GET_HAL_DATA(priv)->rf_type)
-
/* rtl8188e_hal_init.c */
void _8051Reset88E(struct adapter *padapter);
void rtl8188e_InitializeFirmwareVars(struct adapter *padapter);
@@ -385,8 +380,6 @@ void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo,
void Hal_ReadPowerSavingMode88E(struct adapter *pAdapter, u8 *hwinfo,
bool AutoLoadFail);
-void rtl8188e_set_hal_ops(struct hal_ops *pHalFunc);
-
/* register */
void rtl8188e_start_thread(struct adapter *padapter);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_led.h b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
index fca6d8c81e90..d1ad6aa8c1e0 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_led.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
@@ -22,8 +22,6 @@
/* */
/* Interface to manipulate LED objects. */
/* */
-void rtl8188eu_InitSwLeds(struct adapter *padapter);
-void rtl8188eu_DeInitSwLeds(struct adapter *padapter);
void SwLedOn(struct adapter *padapter, struct LED_871x *pLed);
void SwLedOff(struct adapter *padapter, struct LED_871x *pLed);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 54048bc826e5..80832a5f0732 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -51,12 +51,11 @@ enum rx_packet_type {
};
#define INTERRUPT_MSG_FORMAT_LEN 60
-s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
-void rtl8188eu_free_recv_priv(struct adapter *padapter);
void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf);
void rtl8188eu_recv_tasklet(void *priv);
void rtl8188e_query_rx_phy_status(struct recv_frame *fr, struct phy_stat *phy);
-void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe);
+void rtl8188e_process_phy_info(struct adapter *padapter,
+ struct recv_frame *prframe);
void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
void update_recvframe_attrib_88e(struct recv_frame *fra,
struct recv_stat *stat);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
index 65a63df2077f..66205b782721 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -154,14 +154,11 @@ struct txrpt_ccx_88e {
void rtl8188e_fill_fake_txdesc(struct adapter *padapter, u8 *pDesc,
u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull);
s32 rtl8188eu_init_xmit_priv(struct adapter *padapter);
-s32 rtl8188eu_hal_xmit(struct adapter *padapter, struct xmit_frame *frame);
-s32 rtl8188eu_mgnt_xmit(struct adapter *padapter, struct xmit_frame *frame);
s32 rtl8188eu_xmit_buf_handler(struct adapter *padapter);
#define hal_xmit_handler rtl8188eu_xmit_buf_handler
void rtl8188eu_xmit_tasklet(void *priv);
s32 rtl8188eu_xmitframe_complete(struct adapter *padapter,
- struct xmit_priv *pxmitpriv,
- struct xmit_buf *pxmitbuf);
+ struct xmit_priv *pxmitpriv);
void dump_txrpt_ccx_88e(void *buf);
void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf);
diff --git a/drivers/staging/rtl8188eu/include/rtw_ap.h b/drivers/staging/rtl8188eu/include/rtw_ap.h
index b820684bc3fe..e8dd6d4407aa 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ap.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ap.h
@@ -50,7 +50,6 @@ void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta);
u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
bool active, u16 reason);
int rtw_sta_flush(struct adapter *padapter);
-int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset);
void start_ap_mode(struct adapter *padapter);
void stop_ap_mode(struct adapter *padapter);
#endif /* end of CONFIG_88EU_AP_MODE */
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index 08ca59217cb7..18a6530c9dde 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -39,8 +39,8 @@ struct cmd_obj {
};
struct cmd_priv {
- struct semaphore cmd_queue_sema;
- struct semaphore terminate_cmdthread_sema;
+ struct completion cmd_queue_comp;
+ struct completion terminate_cmdthread_comp;
struct __queue cmd_queue;
u8 cmdthd_running;
struct adapter *padapter;
@@ -210,34 +210,6 @@ struct set_assocsta_rsp {
};
/*
- Caller Ad-Hoc/AP
-
- Command mode
-
- This is to force fw to del an sta_data entry per driver's request
-
- FW will invalidate the cam entry associated with it.
-
-*/
-struct del_assocsta_parm {
- u8 addr[ETH_ALEN];
-};
-
-/*
-Caller Mode: AP/Ad-HoC(M)
-
-Notes: To notify fw that given staid has changed its power state
-
-Command Mode
-
-*/
-struct setstapwrstate_parm {
- u8 staid;
- u8 status;
- u8 hwaddr[6];
-};
-
-/*
Notes: This command is used for H2C/C2H loopback testing
mac[0] == 0
@@ -312,8 +284,6 @@ struct SetChannelPlan_param {
u8 channel_plan;
};
-#define GEN_CMD_CODE(cmd) cmd ## _CMD_
-
/*
Result:
@@ -376,42 +346,42 @@ struct _cmd_callback {
};
enum rtw_h2c_cmd {
- GEN_CMD_CODE(_JoinBss),
- GEN_CMD_CODE(_DisConnect),
- GEN_CMD_CODE(_CreateBss),
- GEN_CMD_CODE(_SetOpMode),
- GEN_CMD_CODE(_SiteSurvey),
- GEN_CMD_CODE(_SetAuth),
- GEN_CMD_CODE(_SetKey),
- GEN_CMD_CODE(_SetStaKey),
- GEN_CMD_CODE(_SetAssocSta),
- GEN_CMD_CODE(_AddBAReq),
- GEN_CMD_CODE(_SetChannel),
- GEN_CMD_CODE(_TX_Beacon),
- GEN_CMD_CODE(_Set_MLME_EVT),
- GEN_CMD_CODE(_Set_Drv_Extra),
- GEN_CMD_CODE(_SetChannelPlan),
+ _JoinBss_CMD_,
+ _DisConnect_CMD_,
+ _CreateBss_CMD_,
+ _SetOpMode_CMD_,
+ _SiteSurvey_CMD_,
+ _SetAuth_CMD_,
+ _SetKey_CMD_,
+ _SetStaKey_CMD_,
+ _SetAssocSta_CMD_,
+ _AddBAReq_CMD_,
+ _SetChannel_CMD_,
+ _TX_Beacon_CMD_,
+ _Set_MLME_EVT_CMD_,
+ _Set_Drv_Extra_CMD_,
+ _SetChannelPlan_CMD_,
MAX_H2CCMD
};
#ifdef _RTW_CMD_C_
static struct _cmd_callback rtw_cmd_callback[] = {
- {GEN_CMD_CODE(_JoinBss), &rtw_joinbss_cmd_callback},
- {GEN_CMD_CODE(_DisConnect), &rtw_disassoc_cmd_callback},
- {GEN_CMD_CODE(_CreateBss), &rtw_createbss_cmd_callback},
- {GEN_CMD_CODE(_SetOpMode), NULL},
- {GEN_CMD_CODE(_SiteSurvey), &rtw_survey_cmd_callback},
- {GEN_CMD_CODE(_SetAuth), NULL},
- {GEN_CMD_CODE(_SetKey), NULL},
- {GEN_CMD_CODE(_SetStaKey), &rtw_setstaKey_cmdrsp_callback},
- {GEN_CMD_CODE(_SetAssocSta), &rtw_setassocsta_cmdrsp_callback},
- {GEN_CMD_CODE(_AddBAReq), NULL},
- {GEN_CMD_CODE(_SetChannel), NULL},
- {GEN_CMD_CODE(_TX_Beacon), NULL},
- {GEN_CMD_CODE(_Set_MLME_EVT), NULL},
- {GEN_CMD_CODE(_Set_Drv_Extra), NULL},
- {GEN_CMD_CODE(_SetChannelPlan), NULL},
+ {_JoinBss_CMD_, &rtw_joinbss_cmd_callback},
+ {_DisConnect_CMD_, &rtw_disassoc_cmd_callback},
+ {_CreateBss_CMD_, &rtw_createbss_cmd_callback},
+ {_SetOpMode_CMD_, NULL},
+ {_SiteSurvey_CMD_, &rtw_survey_cmd_callback},
+ {_SetAuth_CMD_, NULL},
+ {_SetKey_CMD_, NULL},
+ {_SetStaKey_CMD_, &rtw_setstaKey_cmdrsp_callback},
+ {_SetAssocSta_CMD_, &rtw_setassocsta_cmdrsp_callback},
+ {_AddBAReq_CMD_, NULL},
+ {_SetChannel_CMD_, NULL},
+ {_TX_Beacon_CMD_, NULL},
+ {_Set_MLME_EVT_CMD_, NULL},
+ {_Set_Drv_Extra_CMD_, NULL},
+ {_SetChannelPlan_CMD_, NULL},
};
#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index 7ed4cada7efa..95590a1a7b1b 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -129,133 +129,12 @@ int proc_get_read_reg(char *page, char **start,
int proc_set_read_reg(struct file *file, const char __user *buffer,
unsigned long count, void *data);
-int proc_get_fwstate(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-int proc_get_sec_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-int proc_get_mlmext_state(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_qos_option(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-int proc_get_ht_option(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-int proc_get_rf_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-int proc_get_ap_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
int proc_get_adapter_state(char *page, char **start,
off_t offset, int count,
int *eof, void *data);
-int proc_get_trx_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_mac_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_mac_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_mac_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_bb_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_bb_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_bb_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_rf_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_rf_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_rf_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_rf_reg_dump4(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-#ifdef CONFIG_88EU_AP_MODE
-
-int proc_get_all_sta_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-#endif
-
int proc_get_best_channel(char *page, char **start,
off_t offset, int count,
int *eof, void *data);
-int proc_get_rx_signal(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_rx_signal(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_ht_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_ht_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_cbw40_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_cbw40_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_ampdu_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_ampdu_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_rx_stbc(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_rx_stbc(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_two_path_rssi(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_rssi_disp(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_rssi_disp(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index 9bfb10c302b5..168c12d3c0b4 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -34,16 +34,6 @@
#define EFUSE_WIFI 0
#define EFUSE_BT 1
-enum _EFUSE_DEF_TYPE {
- TYPE_EFUSE_MAX_SECTION = 0,
- TYPE_EFUSE_REAL_CONTENT_LEN = 1,
- TYPE_AVAILABLE_EFUSE_BYTES_BANK = 2,
- TYPE_AVAILABLE_EFUSE_BYTES_TOTAL = 3,
- TYPE_EFUSE_MAP_LEN = 4,
- TYPE_EFUSE_PROTECT_BYTES_BANK = 5,
- TYPE_EFUSE_CONTENT_LEN_BANK = 6,
-};
-
/* E-Fuse */
#define EFUSE_MAP_SIZE 512
#define EFUSE_MAX_SIZE 256
@@ -95,8 +85,6 @@ struct efuse_hal {
};
u8 Efuse_CalculateWordCnts(u8 word_en);
-void EFUSE_GetEfuseDefinition(struct adapter *adapt, u8 type, u8 type1,
- void *out);
u8 efuse_OneByteRead(struct adapter *adapter, u16 addr, u8 *data);
u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data);
diff --git a/drivers/staging/rtl8188eu/include/rtw_event.h b/drivers/staging/rtl8188eu/include/rtw_event.h
index 5c34e567d341..1c5ebde97091 100644
--- a/drivers/staging/rtl8188eu/include/rtw_event.h
+++ b/drivers/staging/rtl8188eu/include/rtw_event.h
@@ -18,7 +18,7 @@
#include <osdep_service.h>
#include <wlan_bssdef.h>
-#include <linux/semaphore.h>
+#include <linux/mutex.h>
#include <linux/sem.h>
/*
@@ -71,12 +71,6 @@ struct stadel_event {
int mac_id;
};
-struct addba_event {
- unsigned int tid;
-};
-
-#define GEN_EVT_CODE(event) event ## _EVT_
-
struct fwevent {
u32 parmsize;
void (*event_callback)(struct adapter *dev, u8 *pbuf);
@@ -84,21 +78,6 @@ struct fwevent {
#define C2HEVENT_SZ 32
-struct event_node {
- unsigned char *node;
- unsigned char evt_code;
- unsigned short evt_sz;
- int *caller_ff_tail;
- int caller_ff_sz;
-};
-
-struct c2hevent_queue {
- int head;
- int tail;
- struct event_node nodes[C2HEVENT_SZ];
- unsigned char seq;
-};
-
#define NETWORK_QUEUE_SZ 4
struct network_queue {
diff --git a/drivers/staging/rtl8188eu/include/rtw_ht.h b/drivers/staging/rtl8188eu/include/rtw_ht.h
index b45483fd069f..d842eade7f57 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ht.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ht.h
@@ -15,16 +15,11 @@
#ifndef _RTW_HT_H_
#define _RTW_HT_H_
-#include <osdep_service.h>
-#include "wifi.h"
+#include <linux/ieee80211.h>
struct ht_priv {
u32 ht_option;
u32 ampdu_enable;/* for enable Tx A-MPDU */
- u32 tx_amsdu_enable;/* for enable Tx A-MSDU */
- u32 tx_amdsu_maxlen; /* 1: 8k, 0:4k ; default:8k, for tx */
- u32 rx_ampdu_maxlen; /* for rx reordering ctrl win_sz,
- * updated when join_callback. */
u8 bwmode;/* */
u8 ch_offset;/* PRIME_CHNL_OFFSET */
u8 sgi;/* short GI */
@@ -33,7 +28,7 @@ struct ht_priv {
u8 agg_enable_bitmap;
u8 candidate_tid_bitmap;
- struct rtw_ieee80211_ht_cap ht_cap;
+ struct ieee80211_ht_cap ht_cap;
};
#endif /* _RTL871X_HT_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
index 3a652df4b26c..a6b1c854a061 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
@@ -69,15 +69,6 @@ enum oid_type {
SET_OID
};
-struct oid_funs_node {
- unsigned int oid_start; /* the starting number for OID */
- unsigned int oid_end; /* the ending number for OID */
- struct oid_obj_priv *node_array;
- unsigned int array_sz; /* the size of node_array */
- int query_counter; /* count the number of query hits for this segment */
- int set_counter; /* count the number of set hits for this segment */
-};
-
struct oid_par_priv {
void *adapter_context;
NDIS_OID oid;
@@ -89,12 +80,6 @@ struct oid_par_priv {
u32 dbg;
};
-struct oid_obj_priv {
- unsigned char dbg; /* 0: without OID debug message
- * 1: with OID debug message */
- int (*oidfuns)(struct oid_par_priv *poid_par_priv);
-};
-
#if defined(_RTW_MP_IOCTL_C_)
static int oid_null_function(struct oid_par_priv *poid_par_priv) {
return NDIS_STATUS_SUCCESS;
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 5d8bce0f58db..9434b869c5e9 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -301,12 +301,10 @@ struct mlme_priv {
u8 *nic_hdl;
- u8 not_indic_disco;
struct list_head *pscanned;
struct __queue free_bss_pool;
struct __queue scanned_queue;
u8 *free_bss_buf;
- u32 num_of_scanned;
struct ndis_802_11_ssid assoc_ssid;
u8 assoc_bssid[6];
@@ -318,10 +316,8 @@ struct mlme_priv {
struct timer_list assoc_timer;
uint assoc_by_bssid;
- uint assoc_by_rssi;
struct timer_list scan_to_timer; /* driver itself handles scan_timeout status. */
- u32 scan_start_time; /* used to evaluate the time spent in scanning */
struct qos_priv qospriv;
@@ -387,17 +383,6 @@ struct mlme_priv {
u32 wps_probe_resp_ie_len;
u32 wps_assoc_resp_ie_len;
- u8 *p2p_beacon_ie;
- u8 *p2p_probe_req_ie;
- u8 *p2p_probe_resp_ie;
- u8 *p2p_go_probe_resp_ie; /* for GO */
- u8 *p2p_assoc_req_ie;
-
- u32 p2p_beacon_ie_len;
- u32 p2p_probe_req_ie_len;
- u32 p2p_probe_resp_ie_len;
- u32 p2p_go_probe_resp_ie_len; /* for GO */
- u32 p2p_assoc_req_ie_len;
spinlock_t bcn_update_lock;
u8 update_bcn;
#endif /* if defined (CONFIG_88EU_AP_MODE) */
@@ -500,27 +485,6 @@ static inline void clr_fwstate_ex(struct mlme_priv *pmlmepriv, int state)
spin_unlock_bh(&pmlmepriv->lock);
}
-static inline void up_scanned_network(struct mlme_priv *pmlmepriv)
-{
- spin_lock_bh(&pmlmepriv->lock);
- pmlmepriv->num_of_scanned++;
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-static inline void down_scanned_network(struct mlme_priv *pmlmepriv)
-{
- spin_lock_bh(&pmlmepriv->lock);
- pmlmepriv->num_of_scanned--;
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv, int val)
-{
- spin_lock_bh(&pmlmepriv->lock);
- pmlmepriv->num_of_scanned = val;
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
u16 rtw_get_capability(struct wlan_bssid_ex *bss);
void rtw_update_scanned_network(struct adapter *adapter,
struct wlan_bssid_ex *target);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 27382ff24a84..1b1caaf583c9 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -349,7 +349,7 @@ struct mlme_ext_info {
struct ADDBA_request ADDBA_req;
struct WMM_para_element WMM_param;
- struct HT_caps_element HT_caps;
+ struct ieee80211_ht_cap HT_caps;
struct HT_info_element HT_info;
struct wlan_bssid_ex network;/* join network or bss_network,
* if in ap mode, it is the same
@@ -529,12 +529,12 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie,
void update_sta_info(struct adapter *padapter, struct sta_info *psta);
unsigned int update_basic_rate(unsigned char *ptn, unsigned int ptn_sz);
unsigned int update_supported_rate(unsigned char *ptn, unsigned int ptn_sz);
-unsigned int update_MSC_rate(struct HT_caps_element *pHT_caps);
+unsigned int update_MSC_rate(struct ieee80211_ht_cap *pHT_caps);
void Update_RA_Entry(struct adapter *padapter, u32 mac_id);
void set_sta_rate(struct adapter *padapter, struct sta_info *psta);
unsigned char get_highest_rate_idx(u32 mask);
-int support_short_GI(struct adapter *padapter, struct HT_caps_element *caps);
+int support_short_GI(struct adapter *padapter, struct ieee80211_ht_cap *caps);
unsigned int is_ap_in_tkip(struct adapter *padapter);
unsigned int is_ap_in_wep(struct adapter *padapter);
unsigned int should_forbid_n_rate(struct adapter *padapter);
@@ -562,8 +562,6 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
u16 tid, int try_cnt, int wait_ms);
int issue_deauth(struct adapter *padapter, unsigned char *da,
unsigned short reason);
-void issue_action_spct_ch_switch(struct adapter *padapter, u8 *ra, u8 new_ch,
- u8 ch_offset);
unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr);
unsigned int send_beacon(struct adapter *padapter);
@@ -627,27 +625,24 @@ u8 led_blink_hdl(struct adapter *padapter, unsigned char *pbuf);
u8 set_csa_hdl(struct adapter *padapter, unsigned char *pbuf);
u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf);
-#define GEN_DRV_CMD_HANDLER(size, cmd) {size, &cmd ## _hdl},
-#define GEN_MLME_EXT_HANDLER(size, cmd) {size, cmd},
-
#ifdef _RTW_CMD_C_
static struct cmd_hdl wlancmds[] = {
- GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), join_cmd_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct disconnect_parm), disconnect_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), createbss_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct setopmode_parm), setopmode_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct sitesurvey_parm), sitesurvey_cmd_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct setauth_parm), setauth_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct setkey_parm), setkey_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct set_stakey_parm), set_stakey_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct set_assocsta_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct addBaReq_parm), add_ba_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct set_ch_parm), set_ch_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), tx_beacon_hdl)
- GEN_MLME_EXT_HANDLER(0, mlme_evt_hdl)
- GEN_MLME_EXT_HANDLER(0, rtw_drvextra_cmd_hdl)
- GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelPlan_param), set_chplan_hdl)
+ {sizeof(struct wlan_bssid_ex), join_cmd_hdl},
+ {sizeof(struct disconnect_parm), disconnect_hdl},
+ {sizeof(struct wlan_bssid_ex), createbss_hdl},
+ {sizeof(struct setopmode_parm), setopmode_hdl},
+ {sizeof(struct sitesurvey_parm), sitesurvey_cmd_hdl},
+ {sizeof(struct setauth_parm), setauth_hdl},
+ {sizeof(struct setkey_parm), setkey_hdl},
+ {sizeof(struct set_stakey_parm), set_stakey_hdl},
+ {sizeof(struct set_assocsta_parm), NULL},
+ {sizeof(struct addBaReq_parm), add_ba_hdl},
+ {sizeof(struct set_ch_parm), set_ch_hdl},
+ {sizeof(struct wlan_bssid_ex), tx_beacon_hdl},
+ {0, mlme_evt_hdl},
+ {0, rtw_drvextra_cmd_hdl},
+ {sizeof(struct SetChannelPlan_param), set_chplan_hdl}
};
#endif
@@ -669,32 +664,32 @@ void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf);
void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf);
enum rtw_c2h_event {
- GEN_EVT_CODE(_Read_MACREG) = 0, /*0*/
- GEN_EVT_CODE(_Read_BBREG),
- GEN_EVT_CODE(_Read_RFREG),
- GEN_EVT_CODE(_Read_EEPROM),
- GEN_EVT_CODE(_Read_EFUSE),
- GEN_EVT_CODE(_Read_CAM), /*5*/
- GEN_EVT_CODE(_Get_BasicRate),
- GEN_EVT_CODE(_Get_DataRate),
- GEN_EVT_CODE(_Survey), /*8*/
- GEN_EVT_CODE(_SurveyDone), /*9*/
-
- GEN_EVT_CODE(_JoinBss), /*10*/
- GEN_EVT_CODE(_AddSTA),
- GEN_EVT_CODE(_DelSTA),
- GEN_EVT_CODE(_AtimDone),
- GEN_EVT_CODE(_TX_Report),
- GEN_EVT_CODE(_CCX_Report), /*15*/
- GEN_EVT_CODE(_DTM_Report),
- GEN_EVT_CODE(_TX_Rate_Statistics),
- GEN_EVT_CODE(_C2HLBK),
- GEN_EVT_CODE(_FWDBG),
- GEN_EVT_CODE(_C2HFEEDBACK), /*20*/
- GEN_EVT_CODE(_ADDBA),
- GEN_EVT_CODE(_C2HBCN),
- GEN_EVT_CODE(_ReportPwrState), /* filen: only for PCIE, USB */
- GEN_EVT_CODE(_CloseRF), /* filen: only for PCIE,
+ _Read_MACREG_EVT_ = 0, /*0*/
+ _Read_BBREG_EVT_,
+ _Read_RFREG_EVT_,
+ _Read_EEPROM_EVT_,
+ _Read_EFUSE_EVT_,
+ _Read_CAM_EVT_, /*5*/
+ _Get_BasicRate_EVT_,
+ _Get_DataRate_EVT_,
+ _Survey_EVT_, /*8*/
+ _SurveyDone_EVT_, /*9*/
+
+ _JoinBss_EVT_, /*10*/
+ _AddSTA_EVT_,
+ _DelSTA_EVT_,
+ _AtimDone_EVT_,
+ _TX_Report_EVT_,
+ _CCX_Report_EVT_, /*15*/
+ _DTM_Report_EVT_,
+ _TX_Rate_Statistics_EVT_,
+ _C2HLBK_EVT_,
+ _FWDBG_EVT_,
+ _C2HFEEDBACK_EVT_, /*20*/
+ _ADDBA_EVT_,
+ _C2HBCN_EVT_,
+ _ReportPwrState_EVT_, /* filen: only for PCIE, USB */
+ _CloseRF_EVT_, /* filen: only for PCIE,
* work around ASPM */
MAX_C2HEVT
};
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index 9680e2eab62f..18a9e744fcbe 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -92,21 +92,6 @@ struct reportpwrstate_parm {
unsigned short rsvd;
};
-static inline void _init_pwrlock(struct semaphore *plock)
-{
- sema_init(plock, 1);
-}
-
-static inline void _enter_pwrlock(struct semaphore *plock)
-{
- _rtw_down_sema(plock);
-}
-
-static inline void _exit_pwrlock(struct semaphore *plock)
-{
- up(plock);
-}
-
#define LPS_DELAY_TIME 1*HZ /* 1 sec */
#define EXE_PWR_NONE 0x01
@@ -157,7 +142,7 @@ enum { /* for ips_mode */
};
struct pwrctrl_priv {
- struct semaphore lock;
+ struct mutex mutex_lock;
volatile u8 rpwm; /* requested power state for fw */
volatile u8 cpwm; /* fw current power state. updated when
* 1. read from HCPWM 2. driver lowers power level */
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index b0373b6216d6..49d973881a04 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -65,13 +65,6 @@ struct stainfo_rxcache {
*/
};
-struct smooth_rssi_data {
- u32 elements[100]; /* array to store values */
- u32 index; /* index to current array to store */
- u32 total_num; /* num of valid elements */
- u32 total_val; /* sum of valid elements */
-};
-
struct signal_stat {
u8 update_req; /* used to indicate */
u8 avg_val; /* avg of valid elements */
@@ -246,7 +239,6 @@ struct recv_buf {
struct recv_frame {
struct list_head list;
struct sk_buff *pkt;
- struct sk_buff *pkt_newalloc;
struct adapter *adapter;
struct rx_pkt_attrib attrib;
uint len;
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index ca1247bce6e3..2663e60bb6c6 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -164,12 +164,6 @@ struct security_priv {
u8 bWepDefaultKeyIdxSet;
};
-struct sha256_state {
- u64 length;
- u32 state[8], curlen;
- u8 buf[64];
-};
-
#define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst) \
do { \
switch (psecuritypriv->dot11AuthAlgrthm) { \
diff --git a/drivers/staging/rtl8188eu/include/rtw_sreset.h b/drivers/staging/rtl8188eu/include/rtw_sreset.h
index ce027dfdecc5..4c4ccd564863 100644
--- a/drivers/staging/rtl8188eu/include/rtw_sreset.h
+++ b/drivers/staging/rtl8188eu/include/rtw_sreset.h
@@ -33,7 +33,6 @@ struct sreset_priv {
#define WIFI_RX_HANG BIT(5)
#define WIFI_IF_NOT_EXIST BIT(6)
-void sreset_init_value(struct adapter *padapter);
u8 sreset_get_wifi_status(struct adapter *padapter);
void sreset_set_wifi_error_status(struct adapter *padapter, u32 status);
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index a0853bab3edb..dd6b7a9a8d4a 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -113,7 +113,6 @@ struct pkt_attrib {
u8 dhcp_pkt;
u16 ether_type;
u16 seqnum;
- u16 pkt_hdrlen; /* the original 802.3 pkt header len */
u16 hdrlen; /* the WLAN Header Len */
u32 pktlen; /* the original 802.3 pkt raw_data len (not include
* ether_hdr data) */
@@ -256,15 +255,8 @@ struct hw_txqueue {
int ac_tag;
};
-struct agg_pkt_info {
- u16 offset;
- u16 pkt_len;
-};
-
struct xmit_priv {
spinlock_t lock;
- struct semaphore xmit_sema;
- struct semaphore terminate_xmitthread_sema;
struct __queue be_pending;
struct __queue bk_pending;
struct __queue vi_pending;
@@ -289,7 +281,6 @@ struct xmit_priv {
u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength
* from large to small. it's value is 0->vo,
* 1->vi, 2->be, 3->bk. */
- struct semaphore tx_retevt;/* all tx return event; */
u8 txirp_cnt;/* */
struct tasklet_struct xmit_tasklet;
/* per AC pending irp */
diff --git a/drivers/staging/rtl8188eu/include/usb_hal.h b/drivers/staging/rtl8188eu/include/usb_hal.h
deleted file mode 100644
index b1bf07a9013e..000000000000
--- a/drivers/staging/rtl8188eu/include/usb_hal.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __USB_HAL_H__
-#define __USB_HAL_H__
-
-void rtl8188eu_set_hal_ops(struct adapter *padapter);
-#define hal_set_hal_ops rtl8188eu_set_hal_ops
-
-#endif /* __USB_HAL_H__ */
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
index 220733314f8b..78d9b6e035bf 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -47,21 +47,6 @@
#define usb_read_interrupt_complete(purb, regs) \
usb_read_interrupt_complete(purb)
-static inline u8 rtw_usb_bulk_size_boundary(struct adapter *padapter,
- int buf_len)
-{
- u8 rst = true;
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
-
- if (pdvobjpriv->ishighspeed)
- rst = (0 == (buf_len) % USB_HIGH_SPEED_BULK_SIZE) ?
- true : false;
- else
- rst = (0 == (buf_len) % USB_FULL_SPEED_BULK_SIZE) ?
- true : false;
- return rst;
-}
-
unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr);
u8 usb_read8(struct adapter *adapter, u32 addr);
@@ -75,7 +60,7 @@ int usb_write8(struct adapter *adapter, u32 addr, u8 val);
int usb_write16(struct adapter *adapter, u32 addr, u16 val);
int usb_write32(struct adapter *adapter, u32 addr, u32 val);
-u32 usb_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+u32 usb_write_port(struct adapter *adapter, u32 addr, u32 cnt, struct xmit_buf *pmem);
void usb_write_port_cancel(struct adapter *adapter);
#endif
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index e7c512183619..9e08e6842eca 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -508,22 +508,6 @@ struct rtw_ieee80211_bar {
#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
- /**
- * struct rtw_ieee80211_ht_cap - HT capabilities
- *
- * This structure refers to "HT capabilities element" as
- * described in 802.11n draft section 7.3.2.52
- */
-
-struct rtw_ieee80211_ht_cap {
- unsigned short cap_info;
- unsigned char ampdu_params_info;
- unsigned char supp_mcs_set[16];
- unsigned short extended_ht_cap_info;
- unsigned int tx_BF_cap_info;
- unsigned char antenna_selection_info;
-} __packed;
-
/**
* struct rtw_ieee80211_ht_cap - HT additional information
*
@@ -538,20 +522,6 @@ struct ieee80211_ht_addt_info {
unsigned char basic_set[16];
} __packed;
-struct HT_caps_element {
- union {
- struct {
- __le16 HT_caps_info;
- unsigned char AMPDU_para;
- unsigned char MCS_rate[16];
- unsigned short HT_ext_caps;
- unsigned int Beamforming_caps;
- unsigned char ASEL_caps;
- } HT_cap_element;
- unsigned char HT_cap[26];
- } u;
-} __packed;
-
struct HT_info_element {
unsigned char primary_channel;
unsigned char infos[5];
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
index 560966cd7dfe..e1931dd04da0 100644
--- a/drivers/staging/rtl8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -123,40 +123,10 @@ enum ndis_802_11_wep_status {
#define NDIS_802_11_AI_RESFI_STATUSCODE 2
#define NDIS_802_11_AI_RESFI_ASSOCIATIONID 4
-struct ndis_802_11_ai_reqfi {
- u16 Capabilities;
- u16 ListenInterval;
- unsigned char CurrentAPAddress[ETH_ALEN];
-};
-
-struct ndis_802_11_ai_resfi {
- u16 Capabilities;
- u16 StatusCode;
- u16 AssociationId;
-};
-
-struct ndis_802_11_assoc_info {
- u32 Length;
- u16 AvailableRequestFixedIEs;
- struct ndis_802_11_ai_reqfi RequestFixedIEs;
- u32 RequestIELength;
- u32 OffsetRequestIEs;
- u16 AvailableResponseFixedIEs;
- struct ndis_802_11_ai_resfi ResponseFixedIEs;
- u32 ResponseIELength;
- u32 OffsetResponseIEs;
-};
-
enum ndis_802_11_reload_def {
Ndis802_11ReloadWEPKeys
};
-struct ndis_802_11_remove_key {
- u32 Length; /* Length */
- u32 KeyIndex;
- unsigned char BSSID[ETH_ALEN];
-};
-
struct ndis_802_11_wep {
u32 Length; /* Length of this structure */
u32 KeyIndex; /* 0 is the per-client key,
@@ -165,12 +135,6 @@ struct ndis_802_11_wep {
u8 KeyMaterial[16];/* variable len depending on above field */
};
-struct ndis_802_11_auth_req {
- u32 Length; /* Length of structure */
- unsigned char Bssid[ETH_ALEN];
- u32 Flags;
-};
-
enum ndis_802_11_status_type {
Ndis802_11StatusType_Authentication,
Ndis802_11StatusType_MediaStreamMode,
@@ -179,10 +143,6 @@ enum ndis_802_11_status_type {
* an upper bound */
};
-struct ndis_802_11_status_ind {
- enum ndis_802_11_status_type StatusType;
-};
-
/* mask for authentication/integrity fields */
#define NDIS_802_11_AUTH_REQUEST_AUTH_FIELDS 0x0f
#define NDIS_802_11_AUTH_REQUEST_REAUTH 0x01
@@ -193,21 +153,6 @@ struct ndis_802_11_status_ind {
/* MIC check time, 60 seconds. */
#define MIC_CHECK_TIME 60000000
-struct ndis_802_11_auth_evt {
- struct ndis_802_11_status_ind Status;
- struct ndis_802_11_auth_req Request[1];
-};
-
-struct ndis_802_11_test {
- u32 Length;
- u32 Type;
- union {
- struct ndis_802_11_auth_evt AuthenticationEvent;
- NDIS_802_11_RSSI RssiTrigger;
- } tt;
-};
-
-
#ifndef Ndis802_11APMode
#define Ndis802_11APMode (Ndis802_11InfrastructureMax+1)
#endif
@@ -297,32 +242,4 @@ enum UAPSD_MAX_SP {
#define NUM_PRE_AUTH_KEY 16
#define NUM_PMKID_CACHE NUM_PRE_AUTH_KEY
-/*
-* WPA2
-*/
-
-struct pmkid_candidate {
- unsigned char BSSID[ETH_ALEN];
- u32 Flags;
-};
-
-struct ndis_802_11_pmkid_list {
- u32 Version; /* Version of the structure */
- u32 NumCandidates; /* No. of pmkid candidates */
- struct pmkid_candidate CandidateList[1];
-};
-
-struct ndis_802_11_auth_encrypt {
- enum ndis_802_11_auth_mode AuthModeSupported;
- enum ndis_802_11_wep_status EncryptStatusSupported;
-};
-
-struct ndis_802_11_cap {
- u32 Length;
- u32 Version;
- u32 NoOfPMKIDs;
- u32 NoOfAuthEncryptPairsSupported;
- struct ndis_802_11_auth_encrypt AuthenticationEncryptionSupported[1];
-};
-
#endif /* ifndef WLAN_BSSDEF_H_ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 5672f014cc46..4de9dbc93380 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -132,12 +132,15 @@ static char *translate_scan(struct adapter *padapter,
p = rtw_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength-12);
if (p && ht_ielen > 0) {
- struct rtw_ieee80211_ht_cap *pht_capie;
+ struct ieee80211_ht_cap *pht_capie;
ht_cap = true;
- pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
- memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2);
- bw_40MHz = (pht_capie->cap_info&IEEE80211_HT_CAP_SUP_WIDTH) ? 1 : 0;
- short_GI = (pht_capie->cap_info&(IEEE80211_HT_CAP_SGI_20|IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
+ pht_capie = (struct ieee80211_ht_cap *)(p + 2);
+ memcpy(&mcs_rate, pht_capie->mcs.rx_mask, 2);
+ bw_40MHz = !!(le16_to_cpu(pht_capie->cap_info) &
+ IEEE80211_HT_CAP_SUP_WIDTH);
+ short_GI = !!(le16_to_cpu(pht_capie->cap_info) &
+ (IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40));
}
/* Add the protocol name */
@@ -400,7 +403,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + offsetof(struct ndis_802_11_wep, KeyMaterial);
pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
- if (pwep == NULL) {
+ if (!pwep) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, (" wpa_set_encryption: pwep allocate fail !!!\n"));
goto exit;
}
@@ -441,7 +444,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE)) { /* sta mode */
psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
- if (psta == NULL) {
+ if (!psta) {
;
} else {
if (strcmp(param->u.crypt.alg, "none") != 0)
@@ -476,7 +479,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
}
}
pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (pbcmc_sta == NULL) {
+ if (!pbcmc_sta) {
;
} else {
/* Jeff: don't disable ieee8021x_blocked while clearing key */
@@ -502,9 +505,9 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
int group_cipher = 0, pairwise_cipher = 0;
int ret = 0;
- if ((ielen > MAX_WPA_IE_LEN) || (pie == NULL)) {
+ if ((ielen > MAX_WPA_IE_LEN) || (!pie)) {
_clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
- if (pie == NULL)
+ if (!pie)
return ret;
else
return -EINVAL;
@@ -512,7 +515,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
if (ielen) {
buf = kmemdup(pie, ielen, GFP_KERNEL);
- if (buf == NULL) {
+ if (!buf) {
ret = -ENOMEM;
goto exit;
}
@@ -1049,7 +1052,7 @@ static int rtw_wx_set_mlme(struct net_device *dev,
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- if (mlme == NULL)
+ if (!mlme)
return -1;
DBG_88E("%s\n", __func__);
@@ -1896,7 +1899,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
param_len = sizeof(struct ieee_param) + pext->key_len;
param = (struct ieee_param *)rtw_malloc(param_len);
- if (param == NULL)
+ if (!param)
return -1;
memset(param, 0, param_len);
@@ -2061,7 +2064,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
}
param = (struct ieee_param *)rtw_malloc(p->length);
- if (param == NULL) {
+ if (!param) {
ret = -ENOMEM;
goto out;
}
@@ -2254,13 +2257,13 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
}
}
- if (strcmp(param->u.crypt.alg, "none") == 0 && (psta == NULL)) {
+ if (strcmp(param->u.crypt.alg, "none") == 0 && (!psta)) {
/* todo:clear default encryption keys */
DBG_88E("clear default encryption keys, keyid =%d\n", param->u.crypt.idx);
goto exit;
}
- if (strcmp(param->u.crypt.alg, "WEP") == 0 && (psta == NULL)) {
+ if (strcmp(param->u.crypt.alg, "WEP") == 0 && (!psta)) {
DBG_88E("r871x_set_encryption, crypt.alg = WEP\n");
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
@@ -2274,7 +2277,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + offsetof(struct ndis_802_11_wep, KeyMaterial);
pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
- if (pwep == NULL) {
+ if (!pwep) {
DBG_88E(" r871x_set_encryption: pwep allocate fail !!!\n");
goto exit;
}
@@ -2528,7 +2531,8 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
if (WLAN_STA_HT&flags) {
psta->htpriv.ht_option = true;
psta->qos_option = 1;
- memcpy((void *)&psta->htpriv.ht_cap, (void *)&param->u.add_sta.ht_cap, sizeof(struct rtw_ieee80211_ht_cap));
+ memcpy(&psta->htpriv.ht_cap, &param->u.add_sta.ht_cap,
+ sizeof(struct ieee80211_ht_cap));
} else {
psta->htpriv.ht_option = false;
}
@@ -2624,7 +2628,8 @@ static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *par
(psta->ht_20mhz_set << 5));
psta_data->tx_supp_rates_len = psta->bssratelen;
memcpy(psta_data->tx_supp_rates, psta->bssrateset, psta->bssratelen);
- memcpy(&psta_data->ht_cap, &psta->htpriv.ht_cap, sizeof(struct rtw_ieee80211_ht_cap));
+ memcpy(&psta_data->ht_cap,
+ &psta->htpriv.ht_cap, sizeof(struct ieee80211_ht_cap));
psta_data->rx_pkts = psta->sta_stats.rx_data_pkts;
psta_data->rx_bytes = psta->sta_stats.rx_bytes;
psta_data->rx_drops = psta->sta_stats.rx_drops;
@@ -2699,7 +2704,7 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
if (ie_len > 0) {
pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len);
pmlmepriv->wps_beacon_ie_len = ie_len;
- if (pmlmepriv->wps_beacon_ie == NULL) {
+ if (!pmlmepriv->wps_beacon_ie) {
DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
return -EINVAL;
}
@@ -2734,7 +2739,7 @@ static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *par
if (ie_len > 0) {
pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len);
pmlmepriv->wps_probe_resp_ie_len = ie_len;
- if (pmlmepriv->wps_probe_resp_ie == NULL) {
+ if (!pmlmepriv->wps_probe_resp_ie) {
DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
return -EINVAL;
}
@@ -2764,7 +2769,7 @@ static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *par
if (ie_len > 0) {
pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len);
pmlmepriv->wps_assoc_resp_ie_len = ie_len;
- if (pmlmepriv->wps_assoc_resp_ie == NULL) {
+ if (!pmlmepriv->wps_assoc_resp_ie) {
DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
return -EINVAL;
}
@@ -2866,7 +2871,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
}
param = (struct ieee_param *)rtw_malloc(p->length);
- if (param == NULL) {
+ if (!param) {
ret = -ENOMEM;
goto out;
}
@@ -2976,7 +2981,7 @@ static int rtw_wx_set_priv(struct net_device *dev,
pmlmepriv->wps_probe_req_ie = NULL;
pmlmepriv->wps_probe_req_ie = rtw_malloc(cp_sz);
- if (pmlmepriv->wps_probe_req_ie == NULL) {
+ if (!pmlmepriv->wps_probe_req_ie) {
pr_info("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
ret = -EINVAL;
goto FREE_EXT;
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index ae2caff030f1..40691f1ec507 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -23,8 +23,6 @@
#include <rtw_ioctl.h>
#include <rtl8188e_hal.h>
-#include <usb_hal.h>
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
MODULE_AUTHOR("Realtek Semiconductor Corp.");
@@ -33,11 +31,7 @@ MODULE_VERSION(DRIVERVERSION);
#define RTW_NOTCH_FILTER 0 /* 0:Disable, 1:Enable, */
/* module param defaults */
-static int rtw_chip_version;
-static int rtw_rfintfs = HWPI;
-static int rtw_lbkmode;/* RTL8712_AIR_TRX; */
/* Ndis802_11Infrastructure; infra, ad-hoc, auto */
-static int rtw_network_mode = Ndis802_11IBSS;
static int rtw_channel = 1;/* ad-hoc support requirement */
static int rtw_wireless_mode = WIRELESS_11BG_24N;
static int rtw_vrtl_carrier_sense = AUTO_VCS;
@@ -45,9 +39,6 @@ static int rtw_vcs_type = RTS_CTS;/* */
static int rtw_rts_thresh = 2347;/* */
static int rtw_frag_thresh = 2346;/* */
static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */
-static int rtw_scan_mode = 1;/* active, passive */
-static int rtw_adhoc_tx_pwr = 1;
-static int rtw_soft_ap;
static int rtw_power_mgnt = 1;
static int rtw_ips_mode = IPS_NORMAL;
@@ -57,11 +48,6 @@ module_param(rtw_ips_mode, int, 0644);
MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode");
static int rtw_debug = 1;
-static int rtw_radio_enable = 1;
-static int rtw_long_retry_lmt = 7;
-static int rtw_short_retry_lmt = 7;
-static int rtw_busy_thresh = 40;
-static int rtw_ack_policy = NORMAL_ACK;
static int rtw_software_encrypt;
static int rtw_software_decrypt;
@@ -70,11 +56,6 @@ static int rtw_acm_method;/* 0:By SW 1:By HW. */
static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */
static int rtw_uapsd_enable;
-static int rtw_uapsd_max_sp = NO_LIMIT;
-static int rtw_uapsd_acbk_en;
-static int rtw_uapsd_acbe_en;
-static int rtw_uapsd_acvi_en;
-static int rtw_uapsd_acvo_en;
static int rtw_ht_enable = 1;
/* 0 :disable, bit(0): enable 2.4g, bit(1): enable 5g */
@@ -89,11 +70,6 @@ static int rtw_ampdu_enable = 1;/* for enable tx_ampdu */
static int rtw_rx_stbc = 1;
static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */
-/* Use 2 path Tx to transmit MCS0~7 and legacy mode */
-static int rtw_lowrate_two_xmit = 1;
-
-static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */
-static int rtw_low_power;
static int rtw_wifi_spec;
static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
@@ -111,10 +87,6 @@ static int rtw_enusbss;/* 0:disable, 1:enable */
static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */
-static int rtw_hwpwrp_detect; /* HW power ping detect 0:disable , 1:enable */
-
-static int rtw_hw_wps_pbc = 1;
-
int rtw_mc2u_disable;
static int rtw_80211d;
@@ -132,32 +104,22 @@ char *rtw_initmac;
module_param(rtw_initmac, charp, 0644);
module_param(rtw_channel_plan, int, 0644);
-module_param(rtw_chip_version, int, 0644);
-module_param(rtw_rfintfs, int, 0644);
-module_param(rtw_lbkmode, int, 0644);
-module_param(rtw_network_mode, int, 0644);
module_param(rtw_channel, int, 0644);
module_param(rtw_wmm_enable, int, 0644);
module_param(rtw_vrtl_carrier_sense, int, 0644);
module_param(rtw_vcs_type, int, 0644);
-module_param(rtw_busy_thresh, int, 0644);
module_param(rtw_ht_enable, int, 0644);
module_param(rtw_cbw40_enable, int, 0644);
module_param(rtw_ampdu_enable, int, 0644);
module_param(rtw_rx_stbc, int, 0644);
module_param(rtw_ampdu_amsdu, int, 0644);
-module_param(rtw_lowrate_two_xmit, int, 0644);
-module_param(rtw_rf_config, int, 0644);
module_param(rtw_power_mgnt, int, 0644);
module_param(rtw_smart_ps, int, 0644);
-module_param(rtw_low_power, int, 0644);
module_param(rtw_wifi_spec, int, 0644);
module_param(rtw_antdiv_cfg, int, 0644);
module_param(rtw_antdiv_type, int, 0644);
module_param(rtw_enusbss, int, 0644);
module_param(rtw_hwpdn_mode, int, 0644);
-module_param(rtw_hwpwrp_detect, int, 0644);
-module_param(rtw_hw_wps_pbc, int, 0644);
static uint rtw_max_roaming_times = 2;
module_param(rtw_max_roaming_times, uint, 0644);
@@ -185,361 +147,11 @@ MODULE_PARM_DESC(monitor_enable, "Enable monitor inferface (default: false)");
static int netdev_open(struct net_device *pnetdev);
static int netdev_close(struct net_device *pnetdev);
-/* dummy routines */
-void rtw_proc_remove_one(struct net_device *dev)
-{
-}
-
-static void rtw_proc_init_one(struct net_device *dev)
-{
-}
-
-#if 0 /* TODO: Convert these to /sys */
-static void rtw_proc_init_one(struct net_device *dev)
-{
- struct proc_dir_entry *dir_dev = NULL;
- struct proc_dir_entry *entry = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- u8 rf_type;
-
- if (rtw_proc == NULL) {
- memcpy(rtw_proc_name, DRV_NAME, sizeof(DRV_NAME));
-
- rtw_proc = create_proc_entry(rtw_proc_name, S_IFDIR,
- init_net.proc_net);
- if (rtw_proc == NULL) {
- DBG_88E(KERN_ERR "Unable to create rtw_proc directory\n");
- return;
- }
-
- entry = create_proc_read_entry("ver_info", S_IFREG | S_IRUGO,
- rtw_proc, proc_get_drv_version,
- dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- }
-
- if (padapter->dir_dev == NULL) {
- padapter->dir_dev = create_proc_entry(dev->name,
- S_IFDIR | S_IRUGO | S_IXUGO,
- rtw_proc);
- dir_dev = padapter->dir_dev;
- if (dir_dev == NULL) {
- if (rtw_proc_cnt == 0 && rtw_proc) {
- remove_proc_entry(rtw_proc_name, init_net.proc_net);
- rtw_proc = NULL;
- }
-
- pr_info("Unable to create dir_dev directory\n");
- return;
- }
- } else {
- return;
- }
-
- rtw_proc_cnt++;
-
- entry = create_proc_read_entry("write_reg", S_IFREG | S_IRUGO,
- dir_dev, proc_get_write_reg, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_write_reg;
-
- entry = create_proc_read_entry("read_reg", S_IFREG | S_IRUGO,
- dir_dev, proc_get_read_reg, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_read_reg;
-
-
- entry = create_proc_read_entry("fwstate", S_IFREG | S_IRUGO,
- dir_dev, proc_get_fwstate, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("sec_info", S_IFREG | S_IRUGO,
- dir_dev, proc_get_sec_info, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("mlmext_state", S_IFREG | S_IRUGO,
- dir_dev, proc_get_mlmext_state, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("qos_option", S_IFREG | S_IRUGO,
- dir_dev, proc_get_qos_option, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("ht_option", S_IFREG | S_IRUGO,
- dir_dev, proc_get_ht_option, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("rf_info", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rf_info, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("ap_info", S_IFREG | S_IRUGO,
- dir_dev, proc_get_ap_info, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("adapter_state", S_IFREG | S_IRUGO,
- dir_dev, proc_getstruct adapter_state, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("trx_info", S_IFREG | S_IRUGO,
- dir_dev, proc_get_trx_info, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("mac_reg_dump1", S_IFREG | S_IRUGO,
- dir_dev, proc_get_mac_reg_dump1, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("mac_reg_dump2", S_IFREG | S_IRUGO,
- dir_dev, proc_get_mac_reg_dump2, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("mac_reg_dump3", S_IFREG | S_IRUGO,
- dir_dev, proc_get_mac_reg_dump3, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("bb_reg_dump1", S_IFREG | S_IRUGO,
- dir_dev, proc_get_bb_reg_dump1, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("bb_reg_dump2", S_IFREG | S_IRUGO,
- dir_dev, proc_get_bb_reg_dump2, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("bb_reg_dump3", S_IFREG | S_IRUGO,
- dir_dev, proc_get_bb_reg_dump3, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("rf_reg_dump1", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rf_reg_dump1, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("rf_reg_dump2", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rf_reg_dump2, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type)) {
- entry = create_proc_read_entry("rf_reg_dump3",
- S_IFREG | S_IRUGO, dir_dev,
- proc_get_rf_reg_dump3, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("rf_reg_dump4",
- S_IFREG | S_IRUGO, dir_dev,
- proc_get_rf_reg_dump4, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- }
-
-#ifdef CONFIG_88EU_AP_MODE
-
- entry = create_proc_read_entry("all_sta_info", S_IFREG | S_IRUGO,
- dir_dev, proc_get_all_sta_info, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-#endif
-
- entry = create_proc_read_entry("best_channel", S_IFREG | S_IRUGO,
- dir_dev, proc_get_best_channel, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("rx_signal", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rx_signal, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_rx_signal;
- entry = create_proc_read_entry("ht_enable", S_IFREG | S_IRUGO,
- dir_dev, proc_get_ht_enable, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_ht_enable;
-
- entry = create_proc_read_entry("cbw40_enable", S_IFREG | S_IRUGO,
- dir_dev, proc_get_cbw40_enable, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_cbw40_enable;
-
- entry = create_proc_read_entry("ampdu_enable", S_IFREG | S_IRUGO,
- dir_dev, proc_get_ampdu_enable, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_ampdu_enable;
-
- entry = create_proc_read_entry("rx_stbc", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rx_stbc, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_rx_stbc;
-
- entry = create_proc_read_entry("path_rssi", S_IFREG | S_IRUGO,
- dir_dev, proc_get_two_path_rssi, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry = create_proc_read_entry("rssi_disp", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rssi_disp, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_rssi_disp;
-}
-
-void rtw_proc_remove_one(struct net_device *dev)
-{
- struct proc_dir_entry *dir_dev = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- u8 rf_type;
-
- dir_dev = padapter->dir_dev;
- padapter->dir_dev = NULL;
-
- if (dir_dev) {
- remove_proc_entry("write_reg", dir_dev);
- remove_proc_entry("read_reg", dir_dev);
- remove_proc_entry("fwstate", dir_dev);
- remove_proc_entry("sec_info", dir_dev);
- remove_proc_entry("mlmext_state", dir_dev);
- remove_proc_entry("qos_option", dir_dev);
- remove_proc_entry("ht_option", dir_dev);
- remove_proc_entry("rf_info", dir_dev);
- remove_proc_entry("ap_info", dir_dev);
- remove_proc_entry("adapter_state", dir_dev);
- remove_proc_entry("trx_info", dir_dev);
- remove_proc_entry("mac_reg_dump1", dir_dev);
- remove_proc_entry("mac_reg_dump2", dir_dev);
- remove_proc_entry("mac_reg_dump3", dir_dev);
- remove_proc_entry("bb_reg_dump1", dir_dev);
- remove_proc_entry("bb_reg_dump2", dir_dev);
- remove_proc_entry("bb_reg_dump3", dir_dev);
- remove_proc_entry("rf_reg_dump1", dir_dev);
- remove_proc_entry("rf_reg_dump2", dir_dev);
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type)) {
- remove_proc_entry("rf_reg_dump3", dir_dev);
- remove_proc_entry("rf_reg_dump4", dir_dev);
- }
-#ifdef CONFIG_88EU_AP_MODE
- remove_proc_entry("all_sta_info", dir_dev);
-#endif
-
- remove_proc_entry("best_channel", dir_dev);
- remove_proc_entry("rx_signal", dir_dev);
- remove_proc_entry("cbw40_enable", dir_dev);
- remove_proc_entry("ht_enable", dir_dev);
- remove_proc_entry("ampdu_enable", dir_dev);
- remove_proc_entry("rx_stbc", dir_dev);
- remove_proc_entry("path_rssi", dir_dev);
- remove_proc_entry("rssi_disp", dir_dev);
- remove_proc_entry(dev->name, rtw_proc);
- dir_dev = NULL;
- } else {
- return;
- }
- rtw_proc_cnt--;
-
- if (rtw_proc_cnt == 0) {
- if (rtw_proc) {
- remove_proc_entry("ver_info", rtw_proc);
-
- remove_proc_entry(rtw_proc_name, init_net.proc_net);
- rtw_proc = NULL;
- }
- }
-}
-#endif
-
static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
{
struct registry_priv *registry_par = &padapter->registrypriv;
GlobalDebugLevel = rtw_debug;
- registry_par->chip_version = (u8)rtw_chip_version;
- registry_par->rfintfs = (u8)rtw_rfintfs;
- registry_par->lbkmode = (u8)rtw_lbkmode;
- registry_par->network_mode = (u8)rtw_network_mode;
memcpy(registry_par->ssid.Ssid, "ANY", 3);
registry_par->ssid.SsidLength = 3;
@@ -551,17 +163,9 @@ static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
registry_par->rts_thresh = (u16)rtw_rts_thresh;
registry_par->frag_thresh = (u16)rtw_frag_thresh;
registry_par->preamble = (u8)rtw_preamble;
- registry_par->scan_mode = (u8)rtw_scan_mode;
- registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr;
- registry_par->soft_ap = (u8)rtw_soft_ap;
registry_par->smart_ps = (u8)rtw_smart_ps;
registry_par->power_mgnt = (u8)rtw_power_mgnt;
registry_par->ips_mode = (u8)rtw_ips_mode;
- registry_par->radio_enable = (u8)rtw_radio_enable;
- registry_par->long_retry_lmt = (u8)rtw_long_retry_lmt;
- registry_par->short_retry_lmt = (u8)rtw_short_retry_lmt;
- registry_par->busy_thresh = (u16)rtw_busy_thresh;
- registry_par->ack_policy = (u8)rtw_ack_policy;
registry_par->mp_mode = 0;
registry_par->software_encrypt = (u8)rtw_software_encrypt;
registry_par->software_decrypt = (u8)rtw_software_decrypt;
@@ -570,28 +174,18 @@ static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
/* UAPSD */
registry_par->wmm_enable = (u8)rtw_wmm_enable;
registry_par->uapsd_enable = (u8)rtw_uapsd_enable;
- registry_par->uapsd_max_sp = (u8)rtw_uapsd_max_sp;
- registry_par->uapsd_acbk_en = (u8)rtw_uapsd_acbk_en;
- registry_par->uapsd_acbe_en = (u8)rtw_uapsd_acbe_en;
- registry_par->uapsd_acvi_en = (u8)rtw_uapsd_acvi_en;
- registry_par->uapsd_acvo_en = (u8)rtw_uapsd_acvo_en;
registry_par->ht_enable = (u8)rtw_ht_enable;
registry_par->cbw40_enable = (u8)rtw_cbw40_enable;
registry_par->ampdu_enable = (u8)rtw_ampdu_enable;
registry_par->rx_stbc = (u8)rtw_rx_stbc;
registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu;
- registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit;
- registry_par->rf_config = (u8)rtw_rf_config;
- registry_par->low_power = (u8)rtw_low_power;
registry_par->wifi_spec = (u8)rtw_wifi_spec;
registry_par->channel_plan = (u8)rtw_channel_plan;
registry_par->accept_addba_req = true;
registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg;
registry_par->antdiv_type = (u8)rtw_antdiv_type;
registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode;
- registry_par->hwpwrp_detect = (u8)rtw_hwpwrp_detect;
- registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc;
registry_par->max_roaming_times = (u8)rtw_max_roaming_times;
@@ -732,7 +326,7 @@ struct net_device *rtw_init_netdev(struct adapter *old_padapter)
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+init_net_dev\n"));
if (old_padapter != NULL)
- pnetdev = rtw_alloc_etherdev_with_old_priv(sizeof(struct adapter), (void *)old_padapter);
+ pnetdev = rtw_alloc_etherdev_with_old_priv((void *)old_padapter);
if (!pnetdev)
return NULL;
@@ -762,7 +356,7 @@ static int rtw_start_drv_threads(struct adapter *padapter)
err = PTR_ERR(padapter->cmdThread);
else
/* wait for cmd_thread to run */
- _rtw_down_sema(&padapter->cmdpriv.terminate_cmdthread_sema);
+ wait_for_completion_interruptible(&padapter->cmdpriv.terminate_cmdthread_comp);
return err;
}
@@ -772,9 +366,9 @@ void rtw_stop_drv_threads(struct adapter *padapter)
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads\n"));
/* Below is to terminate rtw_cmd_thread & event_thread... */
- up(&padapter->cmdpriv.cmd_queue_sema);
+ complete(&padapter->cmdpriv.cmd_queue_comp);
if (padapter->cmdThread)
- _rtw_down_sema(&padapter->cmdpriv.terminate_cmdthread_sema);
+ wait_for_completion_interruptible(&padapter->cmdpriv.terminate_cmdthread_comp);
}
@@ -821,7 +415,6 @@ static u8 rtw_init_default_value(struct adapter *padapter)
padapter->bReadPortCancel = false;
padapter->bWritePortCancel = false;
padapter->bRxRSSIDisplay = 0;
- padapter->bNotifyChannelChange = 0;
return _SUCCESS;
}
@@ -912,8 +505,6 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
rtw_hal_sreset_init(padapter);
- spin_lock_init(&padapter->br_ext_lock);
-
exit:
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n"));
@@ -961,12 +552,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<== rtw_free_drv_sw\n"));
- /* free the old_pnetdev */
- if (padapter->rereg_nd_name_priv.old_pnetdev) {
- free_netdev(padapter->rereg_nd_name_priv.old_pnetdev);
- padapter->rereg_nd_name_priv.old_pnetdev = NULL;
- }
-
mutex_destroy(&padapter->hw_init_mutex);
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_free_drv_sw\n"));
@@ -1013,7 +598,6 @@ static int _netdev_open(struct net_device *pnetdev)
}
if (padapter->intf_start)
padapter->intf_start(padapter);
- rtw_proc_init_one(pnetdev);
rtw_led_control(padapter, LED_CTL_NO_LINK);
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 764250b4ba86..7cd2655f27fe 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -55,21 +55,13 @@ void *rtw_malloc2d(int h, int w, int size)
return a;
}
-u32 _rtw_down_sema(struct semaphore *sema)
-{
- if (down_interruptible(sema))
- return _FAIL;
- return _SUCCESS;
-}
-
void _rtw_init_queue(struct __queue *pqueue)
{
INIT_LIST_HEAD(&(pqueue->queue));
spin_lock_init(&(pqueue->lock));
}
-struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
- void *old_priv)
+struct net_device *rtw_alloc_etherdev_with_old_priv(void *old_priv)
{
struct net_device *pnetdev;
struct rtw_netdev_priv_indicator *pnpi;
@@ -80,7 +72,6 @@ struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
pnpi = netdev_priv(pnetdev);
pnpi->priv = old_priv;
- pnpi->sizeof_priv = sizeof_priv;
RETURN:
return pnetdev;
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 0c44914ea3e6..103cdb4ed073 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -24,7 +24,6 @@
/* alloc os related resource in struct recv_frame */
void rtw_os_recv_resource_alloc(struct recv_frame *precvframe)
{
- precvframe->pkt_newalloc = NULL;
precvframe->pkt = NULL;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 11d51a30170f..68e1e6bbe87f 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -25,9 +25,10 @@
#include <osdep_intf.h>
#include <usb_ops_linux.h>
-#include <usb_hal.h>
#include <rtw_ioctl.h>
+#include "rtl8188e_hal.h"
+
#define USB_VENDER_ID_REALTEK 0x0bda
/* DID_USB_v916_20130116 */
@@ -79,9 +80,8 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces;
pdvobjpriv->InterfaceNumber = piface_desc->bInterfaceNumber;
- pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
- for (i = 0; i < pdvobjpriv->nr_endpoint; i++) {
+ for (i = 0; i < piface_desc->bNumEndpoints; i++) {
int ep_num;
pendp_desc = &phost_iface->endpoint[i].desc;
@@ -98,7 +98,6 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
ep_num;
pdvobjpriv->RtNumOutPipes++;
}
- pdvobjpriv->ep_num[i] = ep_num;
}
if (pusbd->speed == USB_SPEED_HIGH)
@@ -107,13 +106,6 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
pdvobjpriv->ishighspeed = false;
mutex_init(&pdvobjpriv->usb_vendor_req_mutex);
- pdvobjpriv->usb_vendor_req_buf = kzalloc(MAX_USB_IO_CTL_SIZE, GFP_KERNEL);
-
- if (!pdvobjpriv->usb_vendor_req_buf) {
- usb_set_intfdata(usb_intf, NULL);
- kfree(pdvobjpriv);
- return NULL;
- }
usb_get_dev(pusbd);
return pdvobjpriv;
@@ -141,7 +133,6 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
}
}
- kfree(dvobj->usb_vendor_req_buf);
mutex_destroy(&dvobj->usb_vendor_req_mutex);
kfree(dvobj);
}
@@ -238,7 +229,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
rtw_cancel_all_timer(padapter);
LeaveAllPowerSaveMode(padapter);
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->mutex_lock);
/* s1. */
if (pnetdev) {
netif_carrier_off(pnetdev);
@@ -267,7 +258,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
rtw_free_network_queue(padapter, true);
rtw_dev_unload(padapter);
- _exit_pwrlock(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->mutex_lock);
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
rtw_indicate_scan_done(padapter, 1);
@@ -298,18 +289,20 @@ static int rtw_resume_process(struct adapter *padapter)
goto exit;
}
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->mutex_lock);
rtw_reset_drv_sw(padapter);
pwrpriv->bkeepfwalive = false;
pr_debug("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
- if (pm_netdev_open(pnetdev, true) != 0)
+ if (pm_netdev_open(pnetdev, true) != 0) {
+ mutex_unlock(&pwrpriv->mutex_lock);
goto exit;
+ }
netif_device_attach(pnetdev);
netif_carrier_on(pnetdev);
- _exit_pwrlock(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->mutex_lock);
rtw_roaming(padapter, NULL);
@@ -369,8 +362,9 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
padapter->pmondev = pmondev;
}
- /* step 2. hook HalFunc, allocate HalData */
- hal_set_hal_ops(padapter);
+ padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
+ if (!padapter->HalData)
+ DBG_88E("cant not alloc memory for HAL DATA\n");
padapter->intf_start = &usb_intf_start;
padapter->intf_stop = &usb_intf_stop;
@@ -456,11 +450,9 @@ static void rtw_usb_if1_deinit(struct adapter *if1)
free_mlme_ap_info(if1);
#endif
- if (pnetdev) {
- /* will call netdev_close() */
- unregister_netdev(pnetdev);
- rtw_proc_remove_one(pnetdev);
- }
+ if (pnetdev)
+ unregister_netdev(pnetdev); /* will call netdev_close() */
+
rtl88eu_mon_deinit(if1->pmondev);
rtw_cancel_all_timer(if1);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index ce1e1a135f1b..d0d591501b73 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -20,7 +20,7 @@
static void interrupt_handler_8188eu(struct adapter *adapt, u16 pkt_len, u8 *pbuf)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = adapt->HalData;
if (pkt_len != INTERRUPT_MSG_FORMAT_LEN) {
DBG_88E("%s Invalid interrupt content length (%d)!\n", __func__, pkt_len);
@@ -48,7 +48,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
struct sk_buff *pkt_copy = NULL;
struct recv_frame *precvframe = NULL;
struct rx_pkt_attrib *pattrib = NULL;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = adapt->HalData;
struct recv_priv *precvpriv = &adapt->recvpriv;
struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
@@ -251,7 +251,7 @@ static int usbctrl_vendorreq(struct adapter *adapt, u8 request, u16 value, u16 i
}
/* Acquire IO memory for vendorreq */
- pIo_buf = dvobjpriv->usb_vendor_req_buf;
+ pIo_buf = kmalloc(MAX_USB_IO_CTL_SIZE, GFP_ATOMIC);
if (pIo_buf == NULL) {
DBG_88E("[%s] pIo_buf == NULL\n", __func__);
@@ -285,8 +285,7 @@ static int usbctrl_vendorreq(struct adapter *adapt, u8 request, u16 value, u16 i
if (status == (-ESHUTDOWN) || status == -ENODEV) {
adapt->bSurpriseRemoved = true;
} else {
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
- haldata->srestpriv.Wifi_Error_Status = USB_VEN_REQ_CMD_FAIL;
+ adapt->HalData->srestpriv.Wifi_Error_Status = USB_VEN_REQ_CMD_FAIL;
}
} else { /* status != len && status >= 0 */
if (status > 0) {
@@ -303,6 +302,8 @@ static int usbctrl_vendorreq(struct adapter *adapt, u8 request, u16 value, u16 i
if ((value >= FW_8188E_START_ADDRESS && value <= FW_8188E_END_ADDRESS) || status == len)
break;
}
+ kfree(pIo_buf);
+
release_mutex:
mutex_unlock(&dvobjpriv->usb_vendor_req_mutex);
exit:
@@ -434,10 +435,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
break;
case -EPROTO:
case -EOVERFLOW:
- {
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
- haldata->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL;
- }
+ adapt->HalData->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL;
precvbuf->reuse = true;
usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
break;
@@ -525,7 +523,7 @@ u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
return ret;
}
-void usb_read_port_cancel(struct adapter *padapter)
+void rtw_hal_inirp_deinit(struct adapter *padapter)
{
int i;
struct recv_buf *precvbuf;
@@ -691,7 +689,7 @@ check_completion:
tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
}
-u32 usb_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
+u32 usb_write_port(struct adapter *padapter, u32 addr, u32 cnt, struct xmit_buf *xmitbuf)
{
unsigned long irqL;
unsigned int pipe;
@@ -700,8 +698,7 @@ u32 usb_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
struct urb *purb = NULL;
struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct xmit_buf *pxmitbuf = (struct xmit_buf *)wmem;
- struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
+ struct xmit_frame *pxmitframe = (struct xmit_frame *)xmitbuf->priv_data;
struct usb_device *pusbd = pdvobj->pusbdev;
@@ -711,7 +708,7 @@ u32 usb_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
(padapter->pwrctrlpriv.pnp_bstop_trx)) {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
("usb_write_port:( padapter->bDriverStopped ||padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n"));
- rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
+ rtw_sctx_done_err(&xmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
goto exit;
}
@@ -720,44 +717,44 @@ u32 usb_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
switch (addr) {
case VO_QUEUE_INX:
pxmitpriv->voq_cnt++;
- pxmitbuf->flags = VO_QUEUE_INX;
+ xmitbuf->flags = VO_QUEUE_INX;
break;
case VI_QUEUE_INX:
pxmitpriv->viq_cnt++;
- pxmitbuf->flags = VI_QUEUE_INX;
+ xmitbuf->flags = VI_QUEUE_INX;
break;
case BE_QUEUE_INX:
pxmitpriv->beq_cnt++;
- pxmitbuf->flags = BE_QUEUE_INX;
+ xmitbuf->flags = BE_QUEUE_INX;
break;
case BK_QUEUE_INX:
pxmitpriv->bkq_cnt++;
- pxmitbuf->flags = BK_QUEUE_INX;
+ xmitbuf->flags = BK_QUEUE_INX;
break;
case HIGH_QUEUE_INX:
- pxmitbuf->flags = HIGH_QUEUE_INX;
+ xmitbuf->flags = HIGH_QUEUE_INX;
break;
default:
- pxmitbuf->flags = MGT_QUEUE_INX;
+ xmitbuf->flags = MGT_QUEUE_INX;
break;
}
spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
- purb = pxmitbuf->pxmit_urb[0];
+ purb = xmitbuf->pxmit_urb[0];
/* translate DMA FIFO addr to pipehandle */
pipe = ffaddr2pipehdl(pdvobj, addr);
usb_fill_bulk_urb(purb, pusbd, pipe,
- pxmitframe->buf_addr, /* pxmitbuf->pbuf */
+ pxmitframe->buf_addr, /* xmitbuf->pbuf */
cnt,
usb_write_port_complete,
- pxmitbuf);/* context is pxmitbuf */
+ xmitbuf);/* context is xmitbuf */
status = usb_submit_urb(purb, GFP_ATOMIC);
if (status) {
- rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
+ rtw_sctx_done_err(&xmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
DBG_88E("usb_write_port, status =%d\n", status);
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port(): usb_submit_urb, status =%x\n", status));
@@ -779,7 +776,7 @@ u32 usb_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
exit:
if (ret != _SUCCESS)
- rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+ rtw_free_xmitbuf(pxmitpriv, xmitbuf);
return ret;
}
@@ -846,7 +843,7 @@ void rtl8188eu_xmit_tasklet(void *priv)
break;
}
- ret = rtl8188eu_xmitframe_complete(adapt, pxmitpriv, NULL);
+ ret = rtl8188eu_xmitframe_complete(adapt, pxmitpriv);
if (!ret)
break;
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 221e2750652e..4b1b04e00715 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -72,7 +72,7 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb
if (pxmitbuf->pallocated_buf == NULL)
return _FAIL;
- pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
+ pxmitbuf->pbuf = PTR_ALIGN(pxmitbuf->pallocated_buf, XMITBUF_ALIGN_SZ);
pxmitbuf->dma_transfer_addr = 0;
for (i = 0; i < 8; i++) {
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 4d8fb4158f6b..25725b158eca 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -51,9 +51,8 @@ void dot11d_init(struct rtllib_device *ieee)
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER + 1);
RESET_CIE_WATCHDOG(ieee);
-
}
EXPORT_SYMBOL(dot11d_init);
@@ -99,14 +98,13 @@ void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee)
}
EXPORT_SYMBOL(Dot11d_Channelmap);
-
void Dot11d_Reset(struct rtllib_device *ieee)
{
struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(ieee);
u32 i;
- memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
+ memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER + 1);
for (i = 1; i <= 11; i++)
(pDot11dInfo->channel_map)[i] = 1;
for (i = 12; i <= 14; i++)
@@ -123,8 +121,8 @@ void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
u8 i, j, NumTriples, MaxChnlNum;
struct chnl_txpow_triple *pTriple;
- memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
+ memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER + 1);
MaxChnlNum = 0;
NumTriples = (CoutryIeLen - 3) / 3;
pTriple = (struct chnl_txpow_triple *)(pCoutryIe + 3);
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 735a199ebdcf..aac395f26652 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -47,8 +47,8 @@ struct rt_dot11d_info {
u8 CountryIeSrcAddr[6];
u8 CountryIeWatchdog;
- u8 channel_map[MAX_CHANNEL_NUMBER+1];
- u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
+ u8 channel_map[MAX_CHANNEL_NUMBER + 1];
+ u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER + 1];
enum dot11d_state State;
};
@@ -78,6 +78,7 @@ static inline void RESET_CIE_WATCHDOG(struct rtllib_device *__pIeeeDev)
{
GET_CIE_WATCHDOG(__pIeeeDev) = 0;
}
+
#define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
void dot11d_init(struct rtllib_device *dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index ba64a4f1b3a8..8d6bca61e7aa 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -1487,8 +1487,8 @@ static void _rtl92e_query_rxphystatus(
struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *prxsc;
u8 *prxpkt;
u8 i, max_spatial_stream, tmp_rxsnr, tmp_rxevm, rxsc_sgien_exflg;
- char rx_pwr[4], rx_pwr_all = 0;
- char rx_snrX, rx_evmX;
+ s8 rx_pwr[4], rx_pwr_all = 0;
+ s8 rx_snrX, rx_evmX;
u8 evm, pwdb_all;
u32 RSSI, total_rssi = 0;
u8 is_cck_rate = 0;
@@ -1613,7 +1613,7 @@ static void _rtl92e_query_rxphystatus(
2) - 110;
tmp_rxsnr = pofdm_buf->rxsnr_X[i];
- rx_snrX = (char)(tmp_rxsnr);
+ rx_snrX = (s8)(tmp_rxsnr);
rx_snrX /= 2;
priv->stats.rxSNRdB[i] = (long)rx_snrX;
@@ -1643,7 +1643,7 @@ static void _rtl92e_query_rxphystatus(
for (i = 0; i < max_spatial_stream; i++) {
tmp_rxevm = pofdm_buf->rxevm_X[i];
- rx_evmX = (char)(tmp_rxevm);
+ rx_evmX = (s8)(tmp_rxevm);
rx_evmX /= 2;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
index 29cefb599bab..d437a8efe933 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
/*Created on 2008/11/18, 3: 7*/
#include "r8192E_hwimg.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 5e3bbe5c3ca4..dde492261451 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -256,7 +256,7 @@ u32 rtl92e_get_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
return 0;
if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter)
return 0;
- down(&priv->rf_sem);
+ mutex_lock(&priv->rf_mutex);
if (priv->Rf_Mode == RF_OP_By_FW) {
Original_Value = _rtl92e_phy_rf_fw_read(dev, eRFPath, RegAddr);
udelay(200);
@@ -265,7 +265,7 @@ u32 rtl92e_get_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
}
BitShift = _rtl92e_calculate_bit_shift(BitMask);
Readback_Value = (Original_Value & BitMask) >> BitShift;
- up(&priv->rf_sem);
+ mutex_unlock(&priv->rf_mutex);
return Readback_Value;
}
@@ -630,7 +630,7 @@ void rtl92e_set_tx_power(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 powerlevel = 0, powerlevelOFDM24G = 0;
- char ant_pwr_diff;
+ s8 ant_pwr_diff;
u32 u4RegValue;
if (priv->epromtype == EEPROM_93C46) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index 803c8b02a0c8..30f65af4d614 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -107,9 +107,9 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
__func__);
return;
}
- down(&priv->rtllib->ips_sem);
+ mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
- up(&priv->rtllib->ips_sem);
+ mutex_unlock(&priv->rtllib->ips_mutex);
}
}
priv->rtllib->is_set_key = true;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 13a5ddc2bea5..4c30eea45f89 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -38,7 +38,7 @@ static int channels = 0x3fff;
static char *ifname = "wlan%d";
-static struct rtl819x_ops rtl819xp_ops = {
+static const struct rtl819x_ops rtl819xp_ops = {
.nic_type = NIC_8192E,
.get_eeprom_size = rtl92e_get_eeprom_size,
.init_adapter_variable = rtl92e_init_variables,
@@ -993,8 +993,8 @@ static void _rtl92e_init_priv_lock(struct r8192_priv *priv)
spin_lock_init(&priv->irq_th_lock);
spin_lock_init(&priv->rf_ps_lock);
spin_lock_init(&priv->ps_lock);
- sema_init(&priv->wx_sem, 1);
- sema_init(&priv->rf_sem, 1);
+ mutex_init(&priv->wx_mutex);
+ mutex_init(&priv->rf_mutex);
mutex_init(&priv->mutex);
}
@@ -1247,7 +1247,7 @@ static void _rtl92e_if_silent_reset(struct net_device *dev)
RESET_START:
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (priv->rtllib->state == RTLLIB_LINKED)
rtl92e_leisure_ps_leave(dev);
@@ -1255,7 +1255,7 @@ RESET_START:
if (priv->up) {
netdev_info(dev, "%s():the driver is not up.\n",
__func__);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return;
}
priv->up = 0;
@@ -1277,14 +1277,14 @@ RESET_START:
rtllib_stop_scan_syncro(ieee);
if (ieee->state == RTLLIB_LINKED) {
- SEM_DOWN_IEEE_WX(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
netdev_info(dev, "ieee->state is RTLLIB_LINKED\n");
rtllib_stop_send_beacons(priv->rtllib);
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
rtllib_stop_scan(ieee);
netif_carrier_off(dev);
- SEM_UP_IEEE_WX(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
} else {
netdev_info(dev, "ieee->state is NOT LINKED\n");
rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
@@ -1292,7 +1292,7 @@ RESET_START:
rtl92e_dm_backup_state(dev);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
RT_TRACE(COMP_RESET,
"%s():<==========down process is finished\n",
__func__);
@@ -1982,7 +1982,7 @@ void rtl92e_update_rx_statistics(struct r8192_priv *priv,
weighting) / 6;
}
-u8 rtl92e_rx_db_to_percent(char antpower)
+u8 rtl92e_rx_db_to_percent(s8 antpower)
{
if ((antpower <= -100) || (antpower >= 20))
return 0;
@@ -1993,9 +1993,9 @@ u8 rtl92e_rx_db_to_percent(char antpower)
} /* QueryRxPwrPercentage */
-u8 rtl92e_evm_db_to_percent(char value)
+u8 rtl92e_evm_db_to_percent(s8 value)
{
- char ret_val;
+ s8 ret_val;
ret_val = value;
@@ -2179,9 +2179,9 @@ static int _rtl92e_open(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = _rtl92e_try_up(dev);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -2206,11 +2206,11 @@ static int _rtl92e_close(struct net_device *dev)
rtllib_stop_scan(priv->rtllib);
}
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = _rtl92e_down(dev, true);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
@@ -2242,11 +2242,11 @@ static void _rtl92e_restart(void *data)
reset_wq);
struct net_device *dev = priv->rtllib->dev;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
rtl92e_commit(dev);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
}
static void _rtl92e_set_multicast(struct net_device *dev)
@@ -2265,12 +2265,12 @@ static int _rtl92e_set_mac_adr(struct net_device *dev, void *mac)
struct r8192_priv *priv = rtllib_priv(dev);
struct sockaddr *addr = mac;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ether_addr_copy(dev->dev_addr, addr->sa_data);
schedule_work(&priv->reset_wq);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -2287,7 +2287,7 @@ static int _rtl92e_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct iw_point *p = &wrq->u.data;
struct ieee_param *ipw = NULL;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
switch (cmd) {
case RTL_IOCTL_WPA_SUPPLICANT:
@@ -2393,7 +2393,7 @@ static int _rtl92e_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
out:
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index f627fdc15a58..babc0b3bce95 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -375,8 +375,8 @@ struct r8192_priv {
struct tasklet_struct irq_tx_tasklet;
struct tasklet_struct irq_prepare_beacon_tasklet;
- struct semaphore wx_sem;
- struct semaphore rf_sem;
+ struct mutex wx_mutex;
+ struct mutex rf_mutex;
struct mutex mutex;
struct rt_stats stats;
@@ -503,8 +503,8 @@ struct r8192_priv {
u32 Pwr_Track;
u8 CCKPresentAttentuation_20Mdefault;
u8 CCKPresentAttentuation_40Mdefault;
- char CCKPresentAttentuation_difference;
- char CCKPresentAttentuation;
+ s8 CCKPresentAttentuation_difference;
+ s8 CCKPresentAttentuation;
long undecorated_smoothed_pwdb;
u32 MCSTxPowerLevelOriginalOffset[6];
@@ -604,8 +604,8 @@ void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index);
void rtl92e_update_rx_statistics(struct r8192_priv *priv,
struct rtllib_rx_stats *pprevious_stats);
-u8 rtl92e_evm_db_to_percent(char value);
-u8 rtl92e_rx_db_to_percent(char antpower);
+u8 rtl92e_evm_db_to_percent(s8 value);
+u8 rtl92e_rx_db_to_percent(s8 antpower);
void rtl92e_copy_mpdu_stats(struct rtllib_rx_stats *psrc_stats,
struct rtllib_rx_stats *ptarget_stats);
bool rtl92e_enable_nic(struct net_device *dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 98e4d88d0e73..aa4b015c3cc7 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -179,9 +179,9 @@ void rtl92e_ips_leave_wq(void *data)
struct net_device *dev = ieee->dev;
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- down(&priv->rtllib->ips_sem);
+ mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
- up(&priv->rtllib->ips_sem);
+ mutex_unlock(&priv->rtllib->ips_mutex);
}
void rtl92e_rtllib_ips_leave_wq(struct net_device *dev)
@@ -209,9 +209,9 @@ void rtl92e_rtllib_ips_leave(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- down(&priv->rtllib->ips_sem);
+ mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
- up(&priv->rtllib->ips_sem);
+ mutex_unlock(&priv->rtllib->ips_mutex);
}
static bool _rtl92e_ps_set_mode(struct net_device *dev, u8 rtPsMode)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 70df6a1485d6..7413a100ca19 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -65,11 +65,11 @@ static int _rtl92e_wx_set_rate(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_rate(priv->rtllib, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -84,11 +84,11 @@ static int _rtl92e_wx_set_rts(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_rts(priv->rtllib, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -114,11 +114,11 @@ static int _rtl92e_wx_set_power(struct net_device *dev,
__func__);
return 0;
}
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_power(priv->rtllib, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -142,11 +142,11 @@ static int _rtl92e_wx_set_rawtx(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_rawtx(priv->rtllib, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
@@ -158,12 +158,12 @@ static int _rtl92e_wx_force_reset(struct net_device *dev,
{
struct r8192_priv *priv = rtllib_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
RT_TRACE(COMP_DBG, "%s(): force reset ! extra is %d\n",
__func__, *extra);
priv->force_reset = *extra;
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -177,7 +177,7 @@ static int _rtl92e_wx_adapter_power_status(struct net_device *dev,
(&(priv->rtllib->PowerSaveControl));
struct rtllib_device *ieee = priv->rtllib;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
RT_TRACE(COMP_POWER, "%s(): %s\n", __func__, (*extra == 6) ?
"DC power" : "AC power");
@@ -193,7 +193,7 @@ static int _rtl92e_wx_adapter_power_status(struct net_device *dev,
ieee->ps = *extra;
}
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -207,13 +207,13 @@ static int _rtl92e_wx_set_lps_awake_interval(struct net_device *dev,
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
(&(priv->rtllib->PowerSaveControl));
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
netdev_info(dev, "%s(): set lps awake interval ! extra is %d\n",
__func__, *extra);
pPSC->RegMaxLPSAwakeIntvl = *extra;
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -223,13 +223,13 @@ static int _rtl92e_wx_set_force_lps(struct net_device *dev,
{
struct r8192_priv *priv = rtllib_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
netdev_info(dev,
"%s(): force LPS ! extra is %d (1 is open 0 is close)\n",
__func__, *extra);
priv->force_lps = *extra;
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -266,7 +266,7 @@ static int _rtl92e_wx_set_mode(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
rtState = priv->rtllib->eRFPowerState;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_MONITOR ||
ieee->bNetPromiscuousMode) {
if (priv->rtllib->PowerSaveControl.bInactivePs) {
@@ -275,21 +275,21 @@ static int _rtl92e_wx_set_mode(struct net_device *dev,
RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return -1;
}
netdev_info(dev,
"=========>%s(): rtl92e_ips_leave\n",
__func__);
- down(&priv->rtllib->ips_sem);
+ mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
- up(&priv->rtllib->ips_sem);
+ mutex_unlock(&priv->rtllib->ips_mutex);
}
}
}
ret = rtllib_wx_set_mode(priv->rtllib, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -425,7 +425,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
}
}
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
priv->rtllib->FirstIe_InScan = true;
@@ -436,15 +436,15 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return -1;
}
RT_TRACE(COMP_PS,
"=========>%s(): rtl92e_ips_leave\n",
__func__);
- down(&priv->rtllib->ips_sem);
+ mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
- up(&priv->rtllib->ips_sem);
+ mutex_unlock(&priv->rtllib->ips_mutex);
}
}
rtllib_stop_scan(priv->rtllib);
@@ -471,7 +471,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
ret = rtllib_wx_set_scan(priv->rtllib, a, wrqu, b);
}
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -491,11 +491,11 @@ static int _rtl92e_wx_get_scan(struct net_device *dev,
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_get_scan(priv->rtllib, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -513,10 +513,10 @@ static int _rtl92e_wx_set_essid(struct net_device *dev,
__func__);
return 0;
}
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_essid(priv->rtllib, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -528,11 +528,11 @@ static int _rtl92e_wx_get_essid(struct net_device *dev,
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_get_essid(priv->rtllib, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -545,12 +545,12 @@ static int _rtl92e_wx_set_nick(struct net_device *dev,
if (wrqu->data.length > IW_ESSID_MAX_SIZE)
return -E2BIG;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
wrqu->data.length = min_t(size_t, wrqu->data.length,
sizeof(priv->nick));
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -561,11 +561,11 @@ static int _rtl92e_wx_get_nick(struct net_device *dev,
{
struct r8192_priv *priv = rtllib_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
wrqu->data.length = strlen(priv->nick);
memcpy(extra, priv->nick, wrqu->data.length);
wrqu->data.flags = 1; /* active */
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -579,11 +579,11 @@ static int _rtl92e_wx_set_freq(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_freq(priv->rtllib, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -644,11 +644,11 @@ static int _rtl92e_wx_set_wap(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_wap(priv->rtllib, info, awrq, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
@@ -698,14 +698,14 @@ static int _rtl92e_wx_set_enc(struct net_device *dev,
return -ENETDOWN;
priv->rtllib->wx_set_enc = 1;
- down(&priv->rtllib->ips_sem);
+ mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
- up(&priv->rtllib->ips_sem);
- down(&priv->wx_sem);
+ mutex_unlock(&priv->rtllib->ips_mutex);
+ mutex_lock(&priv->wx_mutex);
RT_TRACE(COMP_SEC, "Setting SW wep key");
ret = rtllib_wx_set_encode(priv->rtllib, info, wrqu, key);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
@@ -799,7 +799,7 @@ static int _rtl92e_wx_set_retry(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
wrqu->retry.disabled) {
@@ -822,7 +822,7 @@ static int _rtl92e_wx_set_retry(struct net_device *dev,
rtl92e_commit(dev);
exit:
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return err;
}
@@ -875,7 +875,7 @@ static int _rtl92e_wx_set_sens(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (priv->rf_set_sens == NULL) {
err = -1; /* we have not this support for this radio */
goto exit;
@@ -886,7 +886,7 @@ static int _rtl92e_wx_set_sens(struct net_device *dev,
err = -EINVAL;
exit:
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return err;
}
@@ -902,12 +902,12 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
priv->rtllib->wx_set_enc = 1;
- down(&priv->rtllib->ips_sem);
+ mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
- up(&priv->rtllib->ips_sem);
+ mutex_unlock(&priv->rtllib->ips_mutex);
ret = rtllib_wx_set_encode_ext(ieee, info, wrqu, extra);
{
@@ -969,7 +969,7 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
end_hw_sec:
priv->rtllib->wx_set_enc = 0;
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -985,9 +985,9 @@ static int _rtl92e_wx_set_auth(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_auth(priv->rtllib, info, &(data->param), extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -1003,9 +1003,9 @@ static int _rtl92e_wx_set_mlme(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_mlme(priv->rtllib, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -1020,9 +1020,9 @@ static int _rtl92e_wx_set_gen_ie(struct net_device *dev,
if (priv->bHwRadioOff)
return 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtllib_wx_set_gen_ie(priv->rtllib, extra, data->data.length);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -1097,14 +1097,14 @@ static int _rtl92e_wx_get_promisc_mode(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
snprintf(extra, 45, "PromiscuousMode:%d, FilterSrcSTAFrame:%d",
ieee->IntelPromiscuousModeInfo.bPromiscuousOn,
ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame);
wrqu->data.length = strlen(extra) + 1;
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index 463122db6d29..61da8f7475bb 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -169,9 +169,6 @@ union qos_tclas {
} TYPE2_8021Q;
};
-#define IsACValid(ac) ((ac >= 0 && ac <= 7) ? true : false)
-
-
union aci_aifsn {
u8 charData;
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 2c8a526773ed..a966a8e490ab 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -306,6 +306,11 @@ static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
pTsCommonInfo->TClasNum = TCLAS_Num;
}
+static bool IsACValid(unsigned int tid)
+{
+ return tid < 7;
+}
+
bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
{
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 776e179d5bfd..b895a537d3e4 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -30,7 +30,7 @@
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/sched.h>
-#include <linux/semaphore.h>
+#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/wireless.h>
@@ -521,7 +521,7 @@ enum wireless_mode {
};
#ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
+#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
#define ETH_P_IP 0x0800 /* Internet Protocol packet */
#define ETH_P_ARP 0x0806 /* Address Resolution packet */
#endif /* ETH_P_PAE */
@@ -1651,9 +1651,9 @@ struct rtllib_device {
short proto_started;
short proto_stoppping;
- struct semaphore wx_sem;
- struct semaphore scan_sem;
- struct semaphore ips_sem;
+ struct mutex wx_mutex;
+ struct mutex scan_mutex;
+ struct mutex ips_mutex;
spinlock_t mgmt_tx_lock;
spinlock_t beacon_lock;
@@ -2212,7 +2212,5 @@ void rtllib_indicate_packets(struct rtllib_device *ieee,
void HTUseDefaultSetting(struct rtllib_device *ieee);
#define RT_ASOC_RETRY_LIMIT 5
u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee);
-#define SEM_DOWN_IEEE_WX(psem) down(psem)
-#define SEM_UP_IEEE_WX(psem) up(psem)
#endif /* RTLLIB_H */
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index f4f318abb299..9d5788e04dd5 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -138,7 +138,7 @@ struct net_device *alloc_rtllib(int sizeof_priv)
rtllib_softmac_init(ieee);
ieee->pHTInfo = kzalloc(sizeof(struct rt_hi_throughput), GFP_KERNEL);
- if (ieee->pHTInfo == NULL)
+ if (!ieee->pHTInfo)
return NULL;
HTUpdateDefaultSetting(ieee);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 62154e3f4463..da74dc49b95e 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -276,8 +276,9 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
}
}
-inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
- struct rtllib_device *ieee)
+static inline void
+softmac_ps_mgmt_xmit(struct sk_buff *skb,
+ struct rtllib_device *ieee)
{
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
struct rtllib_hdr_3addr *header =
@@ -513,7 +514,7 @@ static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
ieee->be_scan_inprogress = true;
- down(&ieee->scan_sem);
+ mutex_lock(&ieee->scan_mutex);
while (1) {
do {
@@ -566,7 +567,7 @@ out:
if (IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
}
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
ieee->be_scan_inprogress = false;
@@ -587,7 +588,7 @@ static void rtllib_softmac_scan_wq(void *data)
if (rtllib_act_scanning(ieee, true))
return;
- down(&ieee->scan_sem);
+ mutex_lock(&ieee->scan_mutex);
if (ieee->eRFPowerState == eRfOff) {
netdev_info(ieee->dev,
@@ -618,7 +619,7 @@ static void rtllib_softmac_scan_wq(void *data)
schedule_delayed_work(&ieee->softmac_scan_wq,
msecs_to_jiffies(RTLLIB_SOFTMAC_SCAN_TIME));
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
return;
out:
@@ -630,7 +631,7 @@ out1:
ieee->actscanning = false;
ieee->scan_watch_dog = 0;
ieee->scanning_continue = 0;
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
}
@@ -683,7 +684,7 @@ EXPORT_SYMBOL(rtllib_start_send_beacons);
static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
{
- down(&ieee->scan_sem);
+ mutex_lock(&ieee->scan_mutex);
ieee->scan_watch_dog = 0;
if (ieee->scanning_continue == 1) {
ieee->scanning_continue = 0;
@@ -692,7 +693,7 @@ static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
cancel_delayed_work_sync(&ieee->softmac_scan_wq);
}
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
}
void rtllib_stop_scan(struct rtllib_device *ieee)
@@ -753,7 +754,7 @@ static void rtllib_start_scan(struct rtllib_device *ieee)
}
}
-/* called with wx_sem held */
+/* called with wx_mutex held */
void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
{
if (IS_DOT11D_ENABLE(ieee)) {
@@ -770,8 +771,10 @@ void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
}
EXPORT_SYMBOL(rtllib_start_scan_syncro);
-inline struct sk_buff *rtllib_authentication_req(struct rtllib_network *beacon,
- struct rtllib_device *ieee, int challengelen, u8 *daddr)
+static inline struct sk_buff *
+rtllib_authentication_req(struct rtllib_network *beacon,
+ struct rtllib_device *ieee,
+ int challengelen, u8 *daddr)
{
struct sk_buff *skb;
struct rtllib_authentication *auth;
@@ -1130,7 +1133,7 @@ static void rtllib_resp_to_probe(struct rtllib_device *ieee, u8 *dest)
}
-inline int SecIsInPMKIDList(struct rtllib_device *ieee, u8 *bssid)
+static inline int SecIsInPMKIDList(struct rtllib_device *ieee, u8 *bssid)
{
int i = 0;
@@ -1146,8 +1149,9 @@ inline int SecIsInPMKIDList(struct rtllib_device *ieee, u8 *bssid)
return i;
}
-inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
- struct rtllib_device *ieee)
+static inline struct sk_buff *
+rtllib_association_req(struct rtllib_network *beacon,
+ struct rtllib_device *ieee)
{
struct sk_buff *skb;
struct rtllib_assoc_request_frame *hdr;
@@ -1590,7 +1594,7 @@ static void rtllib_associate_procedure_wq(void *data)
rtllib_stop_scan_syncro(ieee);
if (ieee->rtllib_ips_leave != NULL)
ieee->rtllib_ips_leave(ieee->dev);
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->data_hard_stop)
ieee->data_hard_stop(ieee->dev);
@@ -1605,14 +1609,14 @@ static void rtllib_associate_procedure_wq(void *data)
__func__);
if (ieee->rtllib_ips_leave_wq != NULL)
ieee->rtllib_ips_leave_wq(ieee->dev);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return;
}
ieee->associate_seq = 1;
rtllib_associate_step1(ieee, ieee->current_network.bssid);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
@@ -2209,8 +2213,9 @@ static void rtllib_process_action(struct rtllib_device *ieee,
}
}
-inline int rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
- struct rtllib_rx_stats *rx_stats)
+static inline int
+rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats)
{
u16 errcode;
int aid;
@@ -2344,8 +2349,9 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
}
}
-inline int rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
- struct rtllib_rx_stats *rx_stats)
+static inline int
+rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
+ struct rtllib_rx_stats *rx_stats)
{
if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) {
@@ -2361,7 +2367,8 @@ inline int rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
return 0;
}
-inline int rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
+static inline int
+rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct rtllib_hdr_3addr *header = (struct rtllib_hdr_3addr *) skb->data;
u16 frame_ctl;
@@ -2582,16 +2589,16 @@ static void rtllib_start_ibss_wq(void *data)
struct rtllib_device, start_ibss_wq);
/* iwconfig mode ad-hoc will schedule this and return
* on the other hand this will block further iwconfig SET
- * operations because of the wx_sem hold.
+ * operations because of the wx_mutex hold.
* Anyway some most set operations set a flag to speed-up
* (abort) this wq (when syncro scanning) before sleeping
- * on the semaphore
+ * on the mutex
*/
if (!ieee->proto_started) {
netdev_info(ieee->dev, "==========oh driver down return\n");
return;
}
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->current_network.ssid_len == 0) {
strcpy(ieee->current_network.ssid, RTLLIB_DEFAULT_TX_ESSID);
@@ -2703,7 +2710,7 @@ static void rtllib_start_ibss_wq(void *data)
netif_carrier_on(ieee->dev);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
inline void rtllib_start_ibss(struct rtllib_device *ieee)
@@ -2711,7 +2718,7 @@ inline void rtllib_start_ibss(struct rtllib_device *ieee)
schedule_delayed_work(&ieee->start_ibss_wq, msecs_to_jiffies(150));
}
-/* this is called only in user context, with wx_sem held */
+/* this is called only in user context, with wx_mutex held */
static void rtllib_start_bss(struct rtllib_device *ieee)
{
unsigned long flags;
@@ -2773,7 +2780,7 @@ static void rtllib_associate_retry_wq(void *data)
struct rtllib_device, associate_retry_wq);
unsigned long flags;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (!ieee->proto_started)
goto exit;
@@ -2806,7 +2813,7 @@ static void rtllib_associate_retry_wq(void *data)
ieee->beinretry = false;
exit:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
@@ -2853,9 +2860,9 @@ void rtllib_softmac_stop_protocol(struct rtllib_device *ieee, u8 mesh_flag,
u8 shutdown)
{
rtllib_stop_scan_syncro(ieee);
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
rtllib_stop_protocol(ieee, shutdown);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
EXPORT_SYMBOL(rtllib_softmac_stop_protocol);
@@ -2902,9 +2909,9 @@ void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
void rtllib_softmac_start_protocol(struct rtllib_device *ieee, u8 mesh_flag)
{
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
rtllib_start_protocol(ieee);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
EXPORT_SYMBOL(rtllib_softmac_start_protocol);
@@ -3034,9 +3041,9 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
INIT_WORK_RSL(&ieee->wx_sync_scan_wq, (void *)rtllib_wx_sync_scan_wq,
ieee);
- sema_init(&ieee->wx_sem, 1);
- sema_init(&ieee->scan_sem, 1);
- sema_init(&ieee->ips_sem, 1);
+ mutex_init(&ieee->wx_mutex);
+ mutex_init(&ieee->scan_mutex);
+ mutex_init(&ieee->ips_mutex);
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
@@ -3049,7 +3056,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
void rtllib_softmac_free(struct rtllib_device *ieee)
{
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
kfree(ieee->pDot11dInfo);
ieee->pDot11dInfo = NULL;
del_timer_sync(&ieee->associate_timer);
@@ -3064,7 +3071,7 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
cancel_work_sync(&ieee->associate_complete_wq);
cancel_work_sync(&ieee->ips_leave_wq);
cancel_work_sync(&ieee->wx_sync_scan_wq);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
tasklet_kill(&ieee->ps_task);
}
@@ -3397,8 +3404,9 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
return ret;
}
-inline struct sk_buff *rtllib_disauth_skb(struct rtllib_network *beacon,
- struct rtllib_device *ieee, u16 asRsn)
+static inline struct sk_buff *
+rtllib_disauth_skb(struct rtllib_network *beacon,
+ struct rtllib_device *ieee, u16 asRsn)
{
struct sk_buff *skb;
struct rtllib_disauth *disauth;
@@ -3423,8 +3431,9 @@ inline struct sk_buff *rtllib_disauth_skb(struct rtllib_network *beacon,
return skb;
}
-inline struct sk_buff *rtllib_disassociate_skb(struct rtllib_network *beacon,
- struct rtllib_device *ieee, u16 asRsn)
+static inline struct sk_buff *
+rtllib_disassociate_skb(struct rtllib_network *beacon,
+ struct rtllib_device *ieee, u16 asRsn)
{
struct sk_buff *skb;
struct rtllib_disassoc *disass;
@@ -3499,7 +3508,7 @@ int rtllib_wpa_supplicant_ioctl(struct rtllib_device *ieee, struct iw_point *p,
struct ieee_param *param;
int ret = 0;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (p->length < sizeof(struct ieee_param) || !p->pointer) {
ret = -EINVAL;
@@ -3543,7 +3552,7 @@ int rtllib_wpa_supplicant_ioctl(struct rtllib_device *ieee, struct iw_point *p,
kfree(param);
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 61ed8b0413e4..5f1412fc410d 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -35,7 +35,7 @@ int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
int ret;
struct iw_freq *fwrq = &wrqu->freq;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->iw_mode == IW_MODE_INFRA) {
ret = 0;
@@ -81,7 +81,7 @@ int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
ret = 0;
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_freq);
@@ -146,7 +146,7 @@ int rtllib_wx_set_wap(struct rtllib_device *ieee,
rtllib_stop_scan_syncro(ieee);
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
/* use ifconfig hw ether */
if (ieee->iw_mode == IW_MODE_MASTER) {
ret = -1;
@@ -185,7 +185,7 @@ int rtllib_wx_set_wap(struct rtllib_device *ieee,
if (ifup)
rtllib_start_protocol(ieee);
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_wap);
@@ -287,7 +287,7 @@ int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
int set_mode_status = 0;
rtllib_stop_scan_syncro(ieee);
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
switch (wrqu->mode) {
case IW_MODE_MONITOR:
case IW_MODE_ADHOC:
@@ -322,7 +322,7 @@ int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
}
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return set_mode_status;
}
EXPORT_SYMBOL(rtllib_wx_set_mode);
@@ -412,7 +412,7 @@ void rtllib_wx_sync_scan_wq(void *data)
rtllib_wake_all_queues(ieee);
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
@@ -421,7 +421,7 @@ int rtllib_wx_set_scan(struct rtllib_device *ieee, struct iw_request_info *a,
{
int ret = 0;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)) {
ret = -1;
@@ -435,7 +435,7 @@ int rtllib_wx_set_scan(struct rtllib_device *ieee, struct iw_request_info *a,
}
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_scan);
@@ -450,7 +450,7 @@ int rtllib_wx_set_essid(struct rtllib_device *ieee,
unsigned long flags;
rtllib_stop_scan_syncro(ieee);
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
proto_started = ieee->proto_started;
@@ -492,7 +492,7 @@ int rtllib_wx_set_essid(struct rtllib_device *ieee,
if (proto_started)
rtllib_start_protocol(ieee);
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(rtllib_wx_set_essid);
@@ -514,7 +514,7 @@ int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
int enable = (parms[0] > 0);
short prev = ieee->raw_tx;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (enable)
ieee->raw_tx = 1;
@@ -536,7 +536,7 @@ int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
netif_carrier_off(ieee->dev);
}
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return 0;
}
@@ -575,7 +575,7 @@ int rtllib_wx_set_power(struct rtllib_device *ieee,
return -1;
}
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (wrqu->power.disabled) {
RT_TRACE(COMP_DBG, "===>%s(): power disable\n", __func__);
@@ -611,7 +611,7 @@ int rtllib_wx_set_power(struct rtllib_device *ieee,
}
exit:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
@@ -622,7 +622,7 @@ int rtllib_wx_get_power(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->ps == RTLLIB_PS_DISABLED) {
wrqu->power.disabled = 1;
@@ -648,7 +648,7 @@ int rtllib_wx_get_power(struct rtllib_device *ieee,
wrqu->power.flags |= IW_POWER_UNICAST_R;
exit:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return 0;
}
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 58fc70ec5f2f..78a3ad5b231f 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -1,31 +1,31 @@
/******************************************************************************
-
- Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- The full GNU General Public License is included in this distribution in the
- file called LICENSE.
-
- Contact Information:
- James P. Ketrenos <ipw2100-admin@linux.intel.com>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************
-
- Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andrea.merello@gmail.com>
-
- A special thanks goes to Realtek for their support !
-
-******************************************************************************/
+ *
+ * Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * James P. Ketrenos <ipw2100-admin@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************
+ *
+ * Few modifications for Realtek's Wi-Fi drivers by
+ * Andrea Merello <andrea.merello@gmail.com>
+ *
+ * A special thanks goes to Realtek for their support !
+ *
+ *****************************************************************************/
#include <linux/compiler.h>
#include <linux/errno.h>
@@ -731,17 +731,19 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
if (qos_actived) {
hdr_len = RTLLIB_3ADDR_LEN + 2;
- /* in case we are a client verify acm is not set for this ac */
- while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
- netdev_info(ieee->dev, "skb->priority = %x\n",
- skb->priority);
- if (wme_downgrade_ac(skb))
- break;
- netdev_info(ieee->dev, "converted skb->priority = %x\n",
- skb->priority);
- }
+ /* in case we are a client verify acm is not set for this ac */
+ while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
+ netdev_info(ieee->dev, "skb->priority = %x\n",
+ skb->priority);
+ if (wme_downgrade_ac(skb))
+ break;
+ netdev_info(ieee->dev, "converted skb->priority = %x\n",
+ skb->priority);
+ }
+
qos_ctl |= skb->priority;
header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
+
} else {
hdr_len = RTLLIB_3ADDR_LEN;
}
@@ -981,6 +983,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
return 1;
}
+
int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
{
memset(skb->cb, 0, sizeof(skb->cb));
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index 84e6272f28cd..b1500ee9a5cf 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -263,7 +263,7 @@ int rtllib_wx_get_scan(struct rtllib_device *ieee,
int err = 0;
netdev_dbg(ieee->dev, "Getting scan\n");
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(network, &ieee->network_list, list) {
@@ -287,7 +287,7 @@ int rtllib_wx_get_scan(struct rtllib_device *ieee,
}
spin_unlock_irqrestore(&ieee->lock, flags);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
wrqu->data.length = ev - extra;
wrqu->data.flags = 0;
@@ -689,7 +689,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
if (ieee->state != RTLLIB_LINKED)
return -ENOLINK;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
@@ -716,11 +716,11 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
ieee->current_network.ssid_len = 0;
break;
default:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return -EOPNOTSUPP;
}
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return 0;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/Makefile b/drivers/staging/rtl8192u/ieee80211/Makefile
index b5d0c2eb045b..9e3f432e5355 100644
--- a/drivers/staging/rtl8192u/ieee80211/Makefile
+++ b/drivers/staging/rtl8192u/ieee80211/Makefile
@@ -1,7 +1,6 @@
NIC_SELECT = RTL8192U
-ccflags-y := -I$(TOPDIR)/drivers/net/wireless
-ccflags-y += -O2
+ccflags-y := -O2
ccflags-y += -DJACKSON_NEW_8187 -DJACKSON_NEW_RX
ieee80211-rsl-objs := ieee80211_rx.o \
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 09e9499b7f9d..077ea13eb1e7 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -746,7 +746,7 @@ struct ieee80211_rx_stats {
bool bisrxaggrsubframe;
bool bPacketBeacon; //cosa add for rssi
bool bToSelfBA; //cosa add for rssi
- char cck_adc_pwdb[4]; //cosa add for rx path selection
+ s8 cck_adc_pwdb[4]; //cosa add for rx path selection
u16 Seq_Num;
};
@@ -1814,7 +1814,7 @@ struct ieee80211_device {
u32 wpax_type_notify; //{added by David, 2006.9.26}
/* QoS related flag */
- char init_wmmparam_flag;
+ s8 init_wmmparam_flag;
/* set on initialization */
u8 qos_support;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 051c2be842d0..89cbc077a48d 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -1027,7 +1027,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
ieee,
(PTS_COMMON_INFO *) &pRxTS,
hdr->addr2,
- (u8)Frame_QoSTID((u8 *)(skb->data)),
+ Frame_QoSTID((u8 *)(skb->data)),
RX_DIR,
true))
{
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 49db1b75cd05..d7d85b3f19c4 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -284,7 +284,8 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
}
}
-inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
+static inline void
+softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee)
{
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
@@ -320,7 +321,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
//dev_kfree_skb_any(skb);//edit by thomas
}
-inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
+static inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
{
unsigned int len, rate_len;
u8 *tag;
@@ -640,8 +641,9 @@ void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
}
EXPORT_SYMBOL(ieee80211_start_scan_syncro);
-inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *beacon,
- struct ieee80211_device *ieee, int challengelen)
+static inline struct sk_buff *
+ieee80211_authentication_req(struct ieee80211_network *beacon,
+ struct ieee80211_device *ieee, int challengelen)
{
struct sk_buff *skb;
struct ieee80211_authentication *auth;
@@ -806,7 +808,7 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
*(tag++) = 2;
put_unaligned_le16(ieee->current_network.atim_window,
- (u8 *)tag);
+ tag);
tag+=2;
}
@@ -978,7 +980,9 @@ static void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
}
-inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beacon,struct ieee80211_device *ieee)
+static inline struct sk_buff *
+ieee80211_association_req(struct ieee80211_network *beacon,
+ struct ieee80211_device *ieee)
{
struct sk_buff *skb;
//unsigned long flags;
@@ -3091,7 +3095,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
return ret;
}
-inline struct sk_buff *ieee80211_disassociate_skb(
+static inline struct sk_buff *ieee80211_disassociate_skb(
struct ieee80211_network *beacon,
struct ieee80211_device *ieee,
u8 asRsn)
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 28737ec65186..98fbb6ef484d 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -354,7 +354,7 @@ int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
req = (struct rtl_80211_hdr_3addr *) skb->data;
tag = (u8 *)req;
- dst = (u8 *)(&req->addr2[0]);
+ dst = &req->addr2[0];
tag += sizeof(struct rtl_80211_hdr_3addr);
pDialogToken = tag + 2; //category+action
pBaParamSet = (PBA_PARAM_SET)(tag + 3); //+DialogToken
@@ -452,7 +452,7 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
}
rsp = (struct rtl_80211_hdr_3addr *)skb->data;
tag = (u8 *)rsp;
- dst = (u8 *)(&rsp->addr2[0]);
+ dst = &rsp->addr2[0];
tag += sizeof(struct rtl_80211_hdr_3addr);
pDialogToken = tag + 2;
pStatusCode = (u16 *)(tag + 3);
@@ -590,7 +590,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
delba = (struct rtl_80211_hdr_3addr *)skb->data;
- dst = (u8 *)(&delba->addr2[0]);
+ dst = &delba->addr2[0];
pDelBaParamSet = (PDELBA_PARAM_SET)&delba->payload[2];
if(pDelBaParamSet->field.Initiator == 1)
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 821afc0ddac5..0b7b04ea0910 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -533,7 +533,7 @@ typedef struct _rt_9x_tx_rate_history {
u32 ht_mcs[4][16];
} rt_tx_rahis_t, *prt_tx_rahis_t;
typedef struct _RT_SMOOTH_DATA_4RF {
- char elements[4][100]; /* array to store values */
+ s8 elements[4][100]; /* array to store values */
u32 index; /* index to current array to store */
u32 TotalNum; /* num of valid elements */
u32 TotalVal[4]; /* sum of valid elements */
@@ -1031,7 +1031,7 @@ typedef struct r8192_priv {
s8 cck_present_attentuation;
u8 cck_present_attentuation_20Mdefault;
u8 cck_present_attentuation_40Mdefault;
- char cck_present_attentuation_difference;
+ s8 cck_present_attentuation_difference;
bool btxpower_tracking;
bool bcck_in_ch14;
bool btxpowerdata_readfromEEPORM;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index dd0970facdf5..457eeb5f5239 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -114,9 +114,9 @@ static int channels = 0x3fff;
-module_param(ifname, charp, S_IRUGO | S_IWUSR);
-module_param(hwwep, int, S_IRUGO | S_IWUSR);
-module_param(channels, int, S_IRUGO | S_IWUSR);
+module_param(ifname, charp, 0644);
+module_param(hwwep, int, 0644);
+module_param(channels, int, 0644);
MODULE_PARM_DESC(ifname, " Net interface name, wlan%d=default");
MODULE_PARM_DESC(hwwep, " Try to use hardware security support. ");
@@ -922,47 +922,6 @@ void rtl8192_rtx_disable(struct net_device *dev)
skb_queue_purge(&priv->skb_queue);
}
-inline u16 ieeerate2rtlrate(int rate)
-{
- switch (rate) {
- case 10:
- return 0;
- case 20:
- return 1;
- case 55:
- return 2;
- case 110:
- return 3;
- case 60:
- return 4;
- case 90:
- return 5;
- case 120:
- return 6;
- case 180:
- return 7;
- case 240:
- return 8;
- case 360:
- return 9;
- case 480:
- return 10;
- case 540:
- return 11;
- default:
- return 3;
- }
-}
-
-static u16 rtl_rate[] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
-inline u16 rtl8192_rate2rate(short rate)
-{
- if (rate > 11)
- return 0;
- return rtl_rate[rate];
-}
-
-
/* The prototype of rx_isr has changed since one version of Linux Kernel */
static void rtl8192_rx_isr(struct urb *urb)
{
@@ -1319,14 +1278,6 @@ void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate)
}
-inline u8 rtl8192_IsWirelessBMode(u16 rate)
-{
- if (((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220))
- return 1;
- else
- return 0;
-}
-
short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1702,11 +1653,8 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
}
if (bSend0Byte) {
tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
- if (!tx_urb_zero) {
- RT_TRACE(COMP_ERR,
- "can't alloc urb for zero byte\n");
+ if (!tx_urb_zero)
return -ENOMEM;
- }
usb_fill_bulk_urb(tx_urb_zero, udev,
usb_sndbulkpipe(udev, idx_pipe),
&zero, 0, tx_zero_isr, dev);
@@ -4209,7 +4157,7 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
*
* Return: 0-100 percentage
*---------------------------------------------------------------------------*/
-static u8 rtl819x_query_rxpwrpercentage(char antpower)
+static u8 rtl819x_query_rxpwrpercentage(s8 antpower)
{
if ((antpower <= -100) || (antpower >= 20))
return 0;
@@ -4220,9 +4168,9 @@ static u8 rtl819x_query_rxpwrpercentage(char antpower)
} /* QueryRxPwrPercentage */
-static u8 rtl819x_evm_dbtopercentage(char value)
+static u8 rtl819x_evm_dbtopercentage(s8 value)
{
- char ret_val;
+ s8 ret_val;
ret_val = value;
@@ -4297,8 +4245,8 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
phy_ofdm_rx_status_rxsc_sgien_exintfflag *prxsc;
u8 *prxpkt;
u8 i, max_spatial_stream, tmp_rxsnr, tmp_rxevm, rxsc_sgien_exflg;
- char rx_pwr[4], rx_pwr_all = 0;
- char rx_snrX, rx_evmX;
+ s8 rx_pwr[4], rx_pwr_all = 0;
+ s8 rx_snrX, rx_evmX;
u8 evm, pwdb_all;
u32 RSSI, total_rssi = 0;
u8 is_cck_rate = 0;
@@ -4423,7 +4371,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
/* Get Rx snr value in DB */
tmp_rxsnr = pofdm_buf->rxsnr_X[i];
- rx_snrX = (char)(tmp_rxsnr);
+ rx_snrX = (s8)(tmp_rxsnr);
rx_snrX /= 2;
priv->stats.rxSNRdB[i] = (long)rx_snrX;
@@ -4457,7 +4405,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
for (i = 0; i < max_spatial_stream; i++) {
tmp_rxevm = pofdm_buf->rxevm_X[i];
- rx_evmX = (char)(tmp_rxevm);
+ rx_evmX = (s8)(tmp_rxevm);
/* Do not use shift operation like "rx_evmX >>= 1"
* because the compiler of free build environment will
@@ -4475,10 +4423,10 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
*/
pstats->SignalQuality =
precord_stats->SignalQuality =
- (u8)(evm & 0xff);
+ evm & 0xff;
pstats->RxMIMOSignalQuality[i] =
precord_stats->RxMIMOSignalQuality[i] =
- (u8)(evm & 0xff);
+ evm & 0xff;
}
@@ -5013,8 +4961,7 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
dev->netdev_ops = &rtl8192_netdev_ops;
- dev->wireless_handlers =
- (struct iw_handler_def *)&r8192_wx_handlers_def;
+ dev->wireless_handlers = &r8192_wx_handlers_def;
dev->type = ARPHRD_ETHER;
@@ -5222,7 +5169,8 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
} else {
/* Key Material */
if (KeyContent) {
- write_nic_dword(dev, WCAMI, (u32)(*(KeyContent + i - 2)));
+ write_nic_dword(dev, WCAMI,
+ *(KeyContent + i - 2));
write_nic_dword(dev, RWCAM, TargetCommand);
}
}
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 1e0e53c9c314..9209aad0515e 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -150,7 +150,7 @@ void deinit_hal_dm(struct net_device *dev)
#ifdef USB_RX_AGGREGATION_SUPPORT
void dm_CheckRxAggregation(struct net_device *dev)
{
- struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
+ struct r8192_priv *priv = ieee80211_priv(dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
static unsigned long lastTxOkCnt;
static unsigned long lastRxOkCnt;
@@ -2346,7 +2346,7 @@ dm_CheckEdcaTurbo_EXIT:
static void dm_init_ctstoself(struct net_device *dev)
{
- struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
+ struct r8192_priv *priv = ieee80211_priv(dev);
priv->ieee80211->bCTSToSelfEnable = true;
priv->ieee80211->CTSToSelfTH = CTSToSelfTHVal;
@@ -2354,7 +2354,7 @@ static void dm_init_ctstoself(struct net_device *dev)
static void dm_ctstoself(struct net_device *dev)
{
- struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
+ struct r8192_priv *priv = ieee80211_priv(dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
static unsigned long lastTxOkCnt;
static unsigned long lastRxOkCnt;
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index 8918654b44ed..5dc3b5b9bfff 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -145,7 +145,7 @@ static void set_supported_rate(u8 *rates, uint mode)
case WIRELESS_11BG:
memcpy(rates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
memcpy(rates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES,
- IEEE80211_NUM_OFDM_RATESLEN);
+ IEEE80211_NUM_OFDM_RATESLEN);
break;
}
}
@@ -188,24 +188,24 @@ int r8712_generate_ie(struct registry_priv *pregistrypriv)
ie += 2;
/*SSID*/
ie = r8712_set_ie(ie, _SSID_IE_, pdev_network->Ssid.SsidLength,
- pdev_network->Ssid.Ssid, &sz);
+ pdev_network->Ssid.Ssid, &sz);
/*supported rates*/
set_supported_rate(pdev_network->rates, pregistrypriv->wireless_mode);
rateLen = r8712_get_rateset_len(pdev_network->rates);
if (rateLen > 8) {
ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_, 8,
- pdev_network->rates, &sz);
+ pdev_network->rates, &sz);
ie = r8712_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8),
- (pdev_network->rates + 8), &sz);
+ (pdev_network->rates + 8), &sz);
} else
ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_,
- rateLen, pdev_network->rates, &sz);
+ rateLen, pdev_network->rates, &sz);
/*DS parameter set*/
ie = r8712_set_ie(ie, _DSSET_IE_, 1,
- (u8 *)&(pdev_network->Configuration.DSConfig), &sz);
+ (u8 *)&(pdev_network->Configuration.DSConfig), &sz);
/*IBSS Parameter Set*/
ie = r8712_set_ie(ie, _IBSS_PARA_IE_, 2,
- (u8 *)&(pdev_network->Configuration.ATIMWindow), &sz);
+ (u8 *)&(pdev_network->Configuration.ATIMWindow), &sz);
return sz;
}
@@ -220,8 +220,7 @@ unsigned char *r8712_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
pbuf = r8712_get_ie(pbuf, _WPA_IE_ID_, &len, limit);
if (pbuf) {
/*check if oui matches...*/
- if (memcmp((pbuf + 2), wpa_oui_type,
- sizeof(wpa_oui_type)))
+ if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type)))
goto check_next_ie;
/*check version...*/
memcpy((u8 *)&val16, (pbuf + 6), sizeof(val16));
@@ -279,7 +278,7 @@ static int r8712_get_wpa2_cipher_suite(u8 *s)
}
int r8712_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
- int *pairwise_cipher)
+ int *pairwise_cipher)
{
int i;
int left, count;
@@ -322,7 +321,7 @@ int r8712_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
}
int r8712_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher,
- int *pairwise_cipher)
+ int *pairwise_cipher)
{
int i;
int left, count;
@@ -365,7 +364,7 @@ int r8712_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher,
}
int r8712_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
- u8 *wpa_ie, u16 *wpa_len)
+ u8 *wpa_ie, u16 *wpa_len)
{
u8 authmode;
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
@@ -383,7 +382,7 @@ int r8712_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
} else {
if (authmode == _WPA2_IE_ID_) {
memcpy(rsn_ie, &in_ie[cnt],
- in_ie[cnt + 1] + 2);
+ in_ie[cnt + 1] + 2);
*rsn_len = in_ie[cnt + 1] + 2;
cnt += in_ie[cnt + 1] + 2; /*get next*/
} else {
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 57211f7e68a5..cbe4de05d26b 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -243,9 +243,9 @@ static u32 start_drv_threads(struct _adapter *padapter)
void r8712_stop_drv_threads(struct _adapter *padapter)
{
/*Below is to terminate r8712_cmd_thread & event_thread...*/
- up(&padapter->cmdpriv.cmd_queue_sema);
+ complete(&padapter->cmdpriv.cmd_queue_comp);
if (padapter->cmdThread)
- _down_sema(&padapter->cmdpriv.terminate_cmdthread_sema);
+ wait_for_completion_interruptible(&padapter->cmdpriv.terminate_cmdthread_comp);
padapter->cmdpriv.cmd_seq = 1;
}
@@ -425,7 +425,7 @@ static int netdev_open(struct net_device *pnetdev)
else
netif_wake_queue(pnetdev);
- if (video_mode)
+ if (video_mode)
enable_video_mode(padapter, cbw40_enable);
/* start driver mlme relation timer */
start_drv_timers(padapter);
diff --git a/drivers/staging/rtl8712/osdep_intf.h b/drivers/staging/rtl8712/osdep_intf.h
index aa0ec74af511..5d37e1f951cf 100644
--- a/drivers/staging/rtl8712/osdep_intf.h
+++ b/drivers/staging/rtl8712/osdep_intf.h
@@ -36,7 +36,7 @@ struct intf_priv {
/* when in USB, IO is through interrupt in/out endpoints */
struct usb_device *udev;
struct urb *piorw_urb;
- struct semaphore io_retevt;
+ struct completion io_retevt_comp;
};
int r871x_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index ad041c96fdb8..c9ea50daffff 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -57,13 +57,6 @@ struct __queue {
spin_lock_init(&((pqueue)->lock)); \
} while (0)
-static inline u32 _down_sema(struct semaphore *sema)
-{
- if (down_interruptible(sema))
- return _FAIL;
- return _SUCCESS;
-}
-
static inline u32 end_of_queue_search(struct list_head *head,
struct list_head *plist)
{
diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
index 735a0eadd98c..576c15d25a0f 100644
--- a/drivers/staging/rtl8712/recv_linux.c
+++ b/drivers/staging/rtl8712/recv_linux.c
@@ -60,7 +60,6 @@ int r8712_os_recvbuf_resource_alloc(struct _adapter *padapter,
if (!precvbuf->purb)
res = _FAIL;
precvbuf->pskb = NULL;
- precvbuf->reuse = false;
precvbuf->pallocated_buf = NULL;
precvbuf->pbuf = NULL;
precvbuf->pdata = NULL;
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 13c018340ff2..9f61583af150 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -264,9 +264,9 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
*/
if (padapter->pwrctrlpriv.pwr_mode > PS_MODE_ACTIVE) {
padapter->pwrctrlpriv.pwr_mode = PS_MODE_ACTIVE;
- _enter_pwrlock(&(padapter->pwrctrlpriv.lock));
+ mutex_lock(&padapter->pwrctrlpriv.mutex_lock);
r8712_set_rpwm(padapter, PS_STATE_S4);
- up(&(padapter->pwrctrlpriv.lock));
+ mutex_unlock(&padapter->pwrctrlpriv.mutex_lock);
}
pcmd_r = pcmd;
break;
@@ -322,7 +322,7 @@ int r8712_cmd_thread(void *context)
allow_signal(SIGTERM);
while (1) {
- if ((_down_sema(&(pcmdpriv->cmd_queue_sema))) == _FAIL)
+ if (wait_for_completion_interruptible(&pcmdpriv->cmd_queue_comp))
break;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
break;
@@ -395,10 +395,10 @@ _next:
}
if (pcmd->cmdcode == GEN_CMD_CODE(_SetPwrMode)) {
if (padapter->pwrctrlpriv.bSleep) {
- _enter_pwrlock(&(padapter->
- pwrctrlpriv.lock));
+ mutex_lock(&padapter->
+ pwrctrlpriv.mutex_lock);
r8712_set_rpwm(padapter, PS_STATE_S2);
- up(&padapter->pwrctrlpriv.lock);
+ mutex_unlock(&padapter->pwrctrlpriv.mutex_lock);
}
}
r8712_free_cmd_obj(pcmd);
@@ -420,7 +420,7 @@ _next:
break;
r8712_free_cmd_obj(pcmd);
} while (1);
- up(&pcmdpriv->terminate_cmdthread_sema);
+ complete(&pcmdpriv->terminate_cmdthread_comp);
thread_exit();
}
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index 76f60ba5ee9b..205298e23656 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -248,7 +248,7 @@ u8 r8712_efuse_pg_packet_read(struct _adapter *padapter, u8 offset, u8 *data)
u8 tmpdata[PGPKT_DATA_SIZE];
u8 ret = true;
- if (data == NULL)
+ if (!data)
return false;
if (offset > 0x0f)
return false;
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index 9055827cccf8..a8e237e480c9 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -43,7 +43,7 @@
#define LED_BLINK_LINK_INTERVAL_ALPHA 500
#define LED_BLINK_SCAN_INTERVAL_ALPHA 180
#define LED_BLINK_FASTER_INTERVAL_ALPHA 50
-#define LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA 5000
+#define LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA 5000
/*===========================================================================
* LED object.
@@ -51,17 +51,19 @@
*/
enum _LED_STATE_871x {
LED_UNKNOWN = 0,
- LED_ON = 1,
- LED_OFF = 2,
+ LED_STATE_ON = 1,
+ LED_STATE_OFF = 2,
LED_BLINK_NORMAL = 3,
LED_BLINK_SLOWLY = 4,
LED_POWER_ON_BLINK = 5,
LED_SCAN_BLINK = 6, /* LED is blinking during scanning period,
* the # of times to blink is depend on time
- * for scanning. */
+ * for scanning.
+ */
LED_NO_LINK_BLINK = 7, /* LED is blinking during no link state. */
LED_BLINK_StartToBlink = 8,/* Customized for Sercomm Printer
- * Server case */
+ * Server case
+ */
LED_BLINK_WPS = 9, /* LED is blinkg during WPS communication */
LED_TXRX_BLINK = 10,
LED_BLINK_WPS_STOP = 11, /*for ALPHA */
@@ -92,7 +94,7 @@ static void InitLed871x(struct _adapter *padapter, struct LED_871x *pLed,
nic = padapter->pnetdev;
pLed->padapter = padapter;
pLed->LedPin = LedPin;
- pLed->CurrLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
pLed->bLedOn = false;
pLed->bLedBlinkInProgress = false;
pLed->BlinkTimes = 0;
@@ -110,7 +112,8 @@ static void DeInitLed871x(struct LED_871x *pLed)
{
del_timer_sync(&pLed->BlinkTimer);
/* We should reset bLedBlinkInProgress if we cancel
- * the LedControlTimer, */
+ * the LedControlTimer,
+ */
pLed->bLedBlinkInProgress = false;
}
@@ -208,7 +211,7 @@ static void SwLedBlink(struct LED_871x *pLed)
u8 bStopBlinking = false;
/* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == LED_ON)
+ if (pLed->BlinkingLedState == LED_STATE_ON)
SwLedOn(padapter, pLed);
else
SwLedOff(padapter, pLed);
@@ -248,10 +251,10 @@ static void SwLedBlink(struct LED_871x *pLed)
pLed->bLedBlinkInProgress = false;
} else {
/* Assign LED state to toggle. */
- if (pLed->BlinkingLedState == LED_ON)
- pLed->BlinkingLedState = LED_OFF;
+ if (pLed->BlinkingLedState == LED_STATE_ON)
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
/* Schedule a timer to toggle LED state. */
switch (pLed->CurrLedState) {
@@ -288,7 +291,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
if (peeprompriv->CustomerID == RT_CID_819x_CAMEO)
pLed = &(ledpriv->SwLed1);
/* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == LED_ON)
+ if (pLed->BlinkingLedState == LED_STATE_ON)
SwLedOn(padapter, pLed);
else
SwLedOff(padapter, pLed);
@@ -312,17 +315,17 @@ static void SwLedBlink1(struct LED_871x *pLed)
switch (pLed->CurrLedState) {
case LED_BLINK_SLOWLY:
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
break;
case LED_BLINK_NORMAL:
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_LINK_INTERVAL_ALPHA));
break;
@@ -335,27 +338,27 @@ static void SwLedBlink1(struct LED_871x *pLed)
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_LINK_INTERVAL_ALPHA));
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
}
pLed->bLedScanBlinkInProgress = false;
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -369,18 +372,18 @@ static void SwLedBlink1(struct LED_871x *pLed)
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_LINK_INTERVAL_ALPHA));
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
}
@@ -388,26 +391,26 @@ static void SwLedBlink1(struct LED_871x *pLed)
pLed->bLedBlinkInProgress = false;
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
break;
case LED_BLINK_WPS:
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
break;
case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState == LED_ON) {
- pLed->BlinkingLedState = LED_OFF;
+ if (pLed->BlinkingLedState == LED_STATE_ON) {
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA));
+ msecs_to_jiffies(LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA));
bStopBlinking = false;
} else {
bStopBlinking = true;
@@ -416,9 +419,9 @@ static void SwLedBlink1(struct LED_871x *pLed)
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_LINK_INTERVAL_ALPHA));
}
@@ -436,7 +439,7 @@ static void SwLedBlink2(struct LED_871x *pLed)
u8 bStopBlinking = false;
/* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == LED_ON)
+ if (pLed->BlinkingLedState == LED_STATE_ON)
SwLedOn(padapter, pLed);
else
SwLedOff(padapter, pLed);
@@ -447,20 +450,20 @@ static void SwLedBlink2(struct LED_871x *pLed)
bStopBlinking = true;
if (bStopBlinking) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
SwLedOn(padapter, pLed);
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
SwLedOff(padapter, pLed);
}
pLed->bLedScanBlinkInProgress = false;
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -471,20 +474,20 @@ static void SwLedBlink2(struct LED_871x *pLed)
bStopBlinking = true;
if (bStopBlinking) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
SwLedOn(padapter, pLed);
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
SwLedOff(padapter, pLed);
}
pLed->bLedBlinkInProgress = false;
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
@@ -501,7 +504,7 @@ static void SwLedBlink3(struct LED_871x *pLed)
u8 bStopBlinking = false;
/* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == LED_ON)
+ if (pLed->BlinkingLedState == LED_STATE_ON)
SwLedOn(padapter, pLed);
else
if (pLed->CurrLedState != LED_BLINK_WPS_STOP)
@@ -513,22 +516,22 @@ static void SwLedBlink3(struct LED_871x *pLed)
bStopBlinking = true;
if (bStopBlinking) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
if (!pLed->bLedOn)
SwLedOn(padapter, pLed);
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
if (pLed->bLedOn)
SwLedOff(padapter, pLed);
}
pLed->bLedScanBlinkInProgress = false;
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -539,46 +542,46 @@ static void SwLedBlink3(struct LED_871x *pLed)
bStopBlinking = true;
if (bStopBlinking) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
if (!pLed->bLedOn)
SwLedOn(padapter, pLed);
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
if (pLed->bLedOn)
SwLedOff(padapter, pLed);
}
pLed->bLedBlinkInProgress = false;
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
break;
case LED_BLINK_WPS:
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
break;
case LED_BLINK_WPS_STOP: /*WPS success*/
- if (pLed->BlinkingLedState == LED_ON) {
- pLed->BlinkingLedState = LED_OFF;
+ if (pLed->BlinkingLedState == LED_STATE_ON) {
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA));
+ msecs_to_jiffies(LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA));
bStopBlinking = false;
} else {
bStopBlinking = true;
}
if (bStopBlinking) {
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
SwLedOn(padapter, pLed);
pLed->bLedWPSBlinkInProgress = false;
}
@@ -596,32 +599,32 @@ static void SwLedBlink4(struct LED_871x *pLed)
u8 bStopBlinking = false;
/* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == LED_ON)
+ if (pLed->BlinkingLedState == LED_STATE_ON)
SwLedOn(padapter, pLed);
else
SwLedOff(padapter, pLed);
if (!pLed1->bLedWPSBlinkInProgress &&
pLed1->BlinkingLedState == LED_UNKNOWN) {
- pLed1->BlinkingLedState = LED_OFF;
- pLed1->CurrLedState = LED_OFF;
+ pLed1->BlinkingLedState = LED_STATE_OFF;
+ pLed1->CurrLedState = LED_STATE_OFF;
SwLedOff(padapter, pLed1);
}
switch (pLed->CurrLedState) {
case LED_BLINK_SLOWLY:
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
break;
case LED_BLINK_StartToBlink:
if (pLed->bLedOn) {
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
} else {
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
}
@@ -634,17 +637,17 @@ static void SwLedBlink4(struct LED_871x *pLed)
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
pLed->bLedScanBlinkInProgress = false;
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -657,37 +660,37 @@ static void SwLedBlink4(struct LED_871x *pLed)
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
pLed->bLedBlinkInProgress = false;
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
break;
case LED_BLINK_WPS:
if (pLed->bLedOn) {
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
} else {
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
}
break;
case LED_BLINK_WPS_STOP: /*WPS authentication fail*/
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
break;
@@ -701,14 +704,14 @@ static void SwLedBlink4(struct LED_871x *pLed)
}
if (bStopBlinking) {
pLed->BlinkTimes = 10;
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_LINK_INTERVAL_ALPHA));
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
}
@@ -724,7 +727,7 @@ static void SwLedBlink5(struct LED_871x *pLed)
u8 bStopBlinking = false;
/* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == LED_ON)
+ if (pLed->BlinkingLedState == LED_STATE_ON)
SwLedOn(padapter, pLed);
else
SwLedOff(padapter, pLed);
@@ -734,17 +737,17 @@ static void SwLedBlink5(struct LED_871x *pLed)
if (pLed->BlinkTimes == 0)
bStopBlinking = true;
if (bStopBlinking) {
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
if (!pLed->bLedOn)
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
pLed->bLedScanBlinkInProgress = false;
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -754,17 +757,17 @@ static void SwLedBlink5(struct LED_871x *pLed)
if (pLed->BlinkTimes == 0)
bStopBlinking = true;
if (bStopBlinking) {
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
if (!pLed->bLedOn)
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
pLed->bLedBlinkInProgress = false;
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
@@ -780,7 +783,7 @@ static void SwLedBlink6(struct LED_871x *pLed)
u8 bStopBlinking = false;
/* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == LED_ON)
+ if (pLed->BlinkingLedState == LED_STATE_ON)
SwLedOn(padapter, pLed);
else
SwLedOff(padapter, pLed);
@@ -790,25 +793,25 @@ static void SwLedBlink6(struct LED_871x *pLed)
if (pLed->BlinkTimes == 0)
bStopBlinking = true;
if (bStopBlinking) {
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
if (!pLed->bLedOn)
SwLedOn(padapter, pLed);
pLed->bLedBlinkInProgress = false;
} else {
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
break;
case LED_BLINK_WPS:
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
break;
@@ -827,7 +830,8 @@ static void BlinkTimerCallback(unsigned long data)
struct LED_871x *pLed = (struct LED_871x *)data;
/* This fixed the crash problem on Fedora 12 when trying to do the
- * insmod;ifconfig up;rmmod commands. */
+ * insmod;ifconfig up;rmmod commands.
+ */
if (pLed->padapter->bSurpriseRemoved || pLed->padapter->bDriverStopped)
return;
schedule_work(&pLed->BlinkWorkItem);
@@ -908,9 +912,9 @@ static void SwLedControlMode1(struct _adapter *padapter,
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
}
@@ -931,9 +935,9 @@ static void SwLedControlMode1(struct _adapter *padapter,
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_LINK_INTERVAL_ALPHA));
}
@@ -961,9 +965,9 @@ static void SwLedControlMode1(struct _adapter *padapter,
pLed->CurrLedState = LED_SCAN_BLINK;
pLed->BlinkTimes = 24;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -986,9 +990,9 @@ static void SwLedControlMode1(struct _adapter *padapter,
pLed->CurrLedState = LED_TXRX_BLINK;
pLed->BlinkTimes = 2;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
@@ -1016,9 +1020,9 @@ static void SwLedControlMode1(struct _adapter *padapter,
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -1046,11 +1050,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS_STOP;
if (pLed->bLedOn) {
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA));
+ msecs_to_jiffies(LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA));
} else {
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
}
@@ -1063,15 +1067,15 @@ static void SwLedControlMode1(struct _adapter *padapter,
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
break;
case LED_CTL_POWER_OFF:
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
if (pLed->bLedNoLinkBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
@@ -1123,9 +1127,9 @@ static void SwLedControlMode2(struct _adapter *padapter,
pLed->CurrLedState = LED_SCAN_BLINK;
pLed->BlinkTimes = 24;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -1142,17 +1146,17 @@ static void SwLedControlMode2(struct _adapter *padapter,
pLed->CurrLedState = LED_TXRX_BLINK;
pLed->BlinkTimes = 2;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
break;
case LED_CTL_LINK:
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
if (pLed->bLedBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
@@ -1178,8 +1182,8 @@ static void SwLedControlMode2(struct _adapter *padapter,
pLed->bLedScanBlinkInProgress = false;
}
pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
}
@@ -1187,16 +1191,16 @@ static void SwLedControlMode2(struct _adapter *padapter,
case LED_CTL_STOP_WPS:
pLed->bLedWPSBlinkInProgress = false;
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
break;
case LED_CTL_STOP_WPS_FAIL:
pLed->bLedWPSBlinkInProgress = false;
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
break;
@@ -1204,15 +1208,15 @@ static void SwLedControlMode2(struct _adapter *padapter,
case LED_CTL_START_TO_LINK:
case LED_CTL_NO_LINK:
if (!IS_LED_BLINKING(pLed)) {
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
}
break;
case LED_CTL_POWER_OFF:
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
if (pLed->bLedBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
@@ -1255,9 +1259,9 @@ static void SwLedControlMode3(struct _adapter *padapter,
pLed->CurrLedState = LED_SCAN_BLINK;
pLed->BlinkTimes = 24;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -1273,9 +1277,9 @@ static void SwLedControlMode3(struct _adapter *padapter,
pLed->CurrLedState = LED_TXRX_BLINK;
pLed->BlinkTimes = 2;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
@@ -1283,8 +1287,8 @@ static void SwLedControlMode3(struct _adapter *padapter,
case LED_CTL_LINK:
if (IS_LED_WPS_BLINKING(pLed))
return;
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
if (pLed->bLedBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
@@ -1310,9 +1314,9 @@ static void SwLedControlMode3(struct _adapter *padapter,
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -1326,11 +1330,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
}
pLed->CurrLedState = LED_BLINK_WPS_STOP;
if (pLed->bLedOn) {
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA));
+ msecs_to_jiffies(LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA));
} else {
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
}
@@ -1340,23 +1344,23 @@ static void SwLedControlMode3(struct _adapter *padapter,
del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
break;
case LED_CTL_START_TO_LINK:
case LED_CTL_NO_LINK:
if (!IS_LED_BLINKING(pLed)) {
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
}
break;
case LED_CTL_POWER_OFF:
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
if (pLed->bLedBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
@@ -1390,8 +1394,8 @@ static void SwLedControlMode4(struct _adapter *padapter,
if (pLed1->bLedWPSBlinkInProgress) {
pLed1->bLedWPSBlinkInProgress = false;
del_timer(&pLed1->BlinkTimer);
- pLed1->BlinkingLedState = LED_OFF;
- pLed1->CurrLedState = LED_OFF;
+ pLed1->BlinkingLedState = LED_STATE_OFF;
+ pLed1->CurrLedState = LED_STATE_OFF;
if (pLed1->bLedOn)
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
@@ -1411,11 +1415,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed->bLedStartToLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_StartToBlink;
if (pLed->bLedOn) {
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
} else {
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
}
@@ -1428,8 +1432,8 @@ static void SwLedControlMode4(struct _adapter *padapter,
if (pLed1->bLedWPSBlinkInProgress) {
pLed1->bLedWPSBlinkInProgress = false;
del_timer(&pLed1->BlinkTimer);
- pLed1->BlinkingLedState = LED_OFF;
- pLed1->CurrLedState = LED_OFF;
+ pLed1->BlinkingLedState = LED_STATE_OFF;
+ pLed1->CurrLedState = LED_STATE_OFF;
if (pLed1->bLedOn)
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
@@ -1446,9 +1450,9 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
}
@@ -1472,9 +1476,9 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed->CurrLedState = LED_SCAN_BLINK;
pLed->BlinkTimes = 24;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -1493,9 +1497,9 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed->CurrLedState = LED_TXRX_BLINK;
pLed->BlinkTimes = 2;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
@@ -1505,8 +1509,8 @@ static void SwLedControlMode4(struct _adapter *padapter,
if (pLed1->bLedWPSBlinkInProgress) {
pLed1->bLedWPSBlinkInProgress = false;
del_timer(&pLed1->BlinkTimer);
- pLed1->BlinkingLedState = LED_OFF;
- pLed1->CurrLedState = LED_OFF;
+ pLed1->BlinkingLedState = LED_STATE_OFF;
+ pLed1->CurrLedState = LED_STATE_OFF;
if (pLed1->bLedOn)
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
@@ -1527,11 +1531,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS;
if (pLed->bLedOn) {
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
} else {
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
}
@@ -1545,9 +1549,9 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
break;
@@ -1559,9 +1563,9 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
/*LED1 settings*/
@@ -1571,9 +1575,9 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed1->bLedWPSBlinkInProgress = true;
pLed1->CurrLedState = LED_BLINK_WPS_STOP;
if (pLed1->bLedOn)
- pLed1->BlinkingLedState = LED_OFF;
+ pLed1->BlinkingLedState = LED_STATE_OFF;
else
- pLed1->BlinkingLedState = LED_ON;
+ pLed1->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
break;
@@ -1585,9 +1589,9 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
/*LED1 settings*/
@@ -1598,15 +1602,15 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
pLed1->BlinkTimes = 10;
if (pLed1->bLedOn)
- pLed1->BlinkingLedState = LED_OFF;
+ pLed1->BlinkingLedState = LED_STATE_OFF;
else
- pLed1->BlinkingLedState = LED_ON;
+ pLed1->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
break;
case LED_CTL_POWER_OFF:
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
if (pLed->bLedNoLinkBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false;
@@ -1660,8 +1664,8 @@ static void SwLedControlMode5(struct _adapter *padapter,
case LED_CTL_LINK: /* solid blue */
if (pLed->CurrLedState == LED_SCAN_BLINK)
return;
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
pLed->bLedBlinkInProgress = false;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
@@ -1679,9 +1683,9 @@ static void SwLedControlMode5(struct _adapter *padapter,
pLed->CurrLedState = LED_SCAN_BLINK;
pLed->BlinkTimes = 24;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -1695,16 +1699,16 @@ static void SwLedControlMode5(struct _adapter *padapter,
pLed->CurrLedState = LED_TXRX_BLINK;
pLed->BlinkTimes = 2;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
break;
case LED_CTL_POWER_OFF:
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
if (pLed->bLedBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
@@ -1731,8 +1735,8 @@ static void SwLedControlMode6(struct _adapter *padapter,
case LED_CTL_SITE_SURVEY:
if (IS_LED_WPS_BLINKING(pLed))
return;
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
pLed->bLedBlinkInProgress = false;
mod_timer(&(pLed->BlinkTimer), jiffies + msecs_to_jiffies(0));
break;
@@ -1746,9 +1750,9 @@ static void SwLedControlMode6(struct _adapter *padapter,
pLed->CurrLedState = LED_TXRX_BLINK;
pLed->BlinkTimes = 2;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
}
@@ -1763,9 +1767,9 @@ static void SwLedControlMode6(struct _adapter *padapter,
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS;
if (pLed->bLedOn)
- pLed->BlinkingLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
else
- pLed->BlinkingLedState = LED_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer, jiffies +
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
}
@@ -1776,14 +1780,14 @@ static void SwLedControlMode6(struct _adapter *padapter,
del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false;
}
- pLed->CurrLedState = LED_ON;
- pLed->BlinkingLedState = LED_ON;
+ pLed->CurrLedState = LED_STATE_ON;
+ pLed->BlinkingLedState = LED_STATE_ON;
mod_timer(&pLed->BlinkTimer,
jiffies + msecs_to_jiffies(0));
break;
case LED_CTL_POWER_OFF:
- pLed->CurrLedState = LED_OFF;
- pLed->BlinkingLedState = LED_OFF;
+ pLed->CurrLedState = LED_STATE_OFF;
+ pLed->BlinkingLedState = LED_STATE_OFF;
if (pLed->bLedBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false;
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index f25b34c7d115..66f0e0a35167 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -60,7 +60,7 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
_init_queue(&precvpriv->free_recv_buf_queue);
precvpriv->pallocated_recv_buf =
kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4, GFP_ATOMIC);
- if (precvpriv->pallocated_recv_buf == NULL)
+ if (!precvpriv->pallocated_recv_buf)
return _FAIL;
precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 -
((addr_t) (precvpriv->pallocated_recv_buf) & 3);
@@ -163,7 +163,8 @@ static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16;
drvinfo_sz <<= 3;
/*TODO:
- * Offset 0 */
+ * Offset 0
+ */
pattrib->bdecrypted = ((le32_to_cpu(prxstat->rxdw0) & BIT(27)) >> 27)
? 0 : 1;
pattrib->crc_err = (le32_to_cpu(prxstat->rxdw0) & BIT(14)) >> 14;
@@ -210,7 +211,8 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
curfragnum = 0;
if (curfragnum != pfhdr->attrib.frag_num) {
/*the first fragment number must be 0
- *free the whole queue*/
+ *free the whole queue
+ */
r8712_free_recvframe(prframe, pfree_recv_queue);
r8712_free_recvframe_queue(defrag_q, pfree_recv_queue);
return NULL;
@@ -224,18 +226,21 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
/*check the fragment sequence (2nd ~n fragment frame) */
if (curfragnum != pnfhdr->attrib.frag_num) {
/* the fragment number must increase (after decache)
- * release the defrag_q & prframe */
+ * release the defrag_q & prframe
+ */
r8712_free_recvframe(prframe, pfree_recv_queue);
r8712_free_recvframe_queue(defrag_q, pfree_recv_queue);
return NULL;
}
curfragnum++;
/* copy the 2nd~n fragment frame's payload to the first fragment
- * get the 2nd~last fragment frame's payload */
+ * get the 2nd~last fragment frame's payload
+ */
wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
recvframe_pull(pnextrframe, wlanhdr_offset);
/* append to first fragment frame's tail (if privacy frame,
- * pull the ICV) */
+ * pull the ICV)
+ */
recvframe_pull_tail(prframe, pfhdr->attrib.icv_len);
memcpy(pfhdr->rx_tail, pnfhdr->rx_data, pnfhdr->len);
recvframe_put(prframe, pnfhdr->len);
@@ -269,7 +274,7 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
fragnum = pfhdr->attrib.frag_num;
psta_addr = pfhdr->attrib.ta;
psta = r8712_get_stainfo(pstapriv, psta_addr);
- if (psta == NULL)
+ if (!psta)
pdefrag_q = NULL;
else
pdefrag_q = &psta->sta_recvpriv.defrag_q;
@@ -278,7 +283,8 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
prtnframe = precv_frame;/*isn't a fragment frame*/
if (ismfrag == 1) {
/* 0~(n-1) fragment frame
- * enqueue to defraf_g */
+ * enqueue to defraf_g
+ */
if (pdefrag_q != NULL) {
if (fragnum == 0) {
/*the first fragment*/
@@ -294,7 +300,8 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
prtnframe = NULL;
} else {
/* can't find this ta's defrag_queue, so free this
- * recv_frame */
+ * recv_frame
+ */
r8712_free_recvframe(precv_frame, pfree_recv_queue);
prtnframe = NULL;
}
@@ -302,7 +309,8 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
}
if ((ismfrag == 0) && (fragnum != 0)) {
/* the last fragment frame
- * enqueue the last fragment */
+ * enqueue the last fragment
+ */
if (pdefrag_q != NULL) {
phead = &pdefrag_q->queue;
list_add_tail(&pfhdr->list, phead);
@@ -311,7 +319,8 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
prtnframe = precv_frame;
} else {
/* can't find this ta's defrag_queue, so free this
- * recv_frame */
+ * recv_frame
+ */
r8712_free_recvframe(precv_frame, pfree_recv_queue);
prtnframe = NULL;
}
@@ -391,7 +400,8 @@ static int amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
!memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
- * replace EtherType */
+ * replace EtherType
+ */
skb_pull(sub_skb, SNAP_SIZE);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src,
ETH_ALEN);
@@ -530,7 +540,8 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
preorder_ctrl->indicate_seq = pattrib->seq_num;
}
/* Prepare indication list and indication.
- * Check if there is any packet need indicate. */
+ * Check if there is any packet need indicate.
+ */
while (!list_empty(phead)) {
prframe = container_of(plist, union recv_frame, u.list);
pattrib = &prframe->u.hdr.attrib;
@@ -757,7 +768,8 @@ static void query_rx_phy_status(struct _adapter *padapter,
/* Modify the RF RNA gain value to -40, -20,
* -2, 14 by Jenyu's suggestion
* Note: different RF with the different
- * RNA gain. */
+ * RNA gain.
+ */
case 0x3:
rx_pwr_all = -40 - (pcck_buf->cck_agc_rpt &
0x3e);
@@ -842,7 +854,8 @@ static void query_rx_phy_status(struct _adapter *padapter,
total_rssi += rssi;
}
/* (2)PWDB, Average PWDB cacluated by hardware (for
- * rate adaptive) */
+ * rate adaptive)
+ */
rx_pwr_all = (((pphy_head[PHY_STAT_PWDB_ALL_SHT]) >> 1) & 0x7f)
- 106;
pwdb_all = query_rx_pwr_percentage(rx_pwr_all);
@@ -870,7 +883,8 @@ static void query_rx_phy_status(struct _adapter *padapter,
}
/* UI BSS List signal strength(in percentage), make it good looking,
* from 0~100. It is assigned to the BSS List in
- * GetValueFromBeaconOrProbeRsp(). */
+ * GetValueFromBeaconOrProbeRsp().
+ */
if (bcck_rate)
prframe->u.hdr.attrib.signal_strength =
(u8)r8712_signal_scale_mapping(pwdb_all);
@@ -985,15 +999,15 @@ int recv_func(struct _adapter *padapter, void *pcontext)
}
process_phy_info(padapter, prframe);
prframe = r8712_decryptor(padapter, prframe);
- if (prframe == NULL) {
+ if (!prframe) {
retval = _FAIL;
goto _exit_recv_func;
}
prframe = r8712_recvframe_chk_defrag(padapter, prframe);
- if (prframe == NULL)
+ if (!prframe)
goto _exit_recv_func;
prframe = r8712_portctrl(padapter, prframe);
- if (prframe == NULL) {
+ if (!prframe) {
retval = _FAIL;
goto _exit_recv_func;
}
@@ -1027,10 +1041,12 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
transfer_len = pskb->len;
/* Test throughput with Netgear 3700 (No security) with Chariot 3T3R
* pairs. The packet count will be a big number so that the containing
- * packet will effect the Rx reordering. */
+ * packet will effect the Rx reordering.
+ */
if (transfer_len < pkt_len) {
/* In this case, it means the MAX_RECVBUF_SZ is too small to
- * get the data from 8712u. */
+ * get the data from 8712u.
+ */
return _FAIL;
}
do {
@@ -1049,7 +1065,7 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
if ((le32_to_cpu(prxstat->rxdw0) >> 23) & 0x01)
shift_sz = 2;
precvframe = r8712_alloc_recvframe(pfree_recv_queue);
- if (precvframe == NULL)
+ if (!precvframe)
goto _exit_recvbuf2recvframe;
INIT_LIST_HEAD(&precvframe->u.hdr.list);
precvframe->u.hdr.precvbuf = NULL; /*can't access the precvbuf*/
@@ -1057,14 +1073,16 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
tmp_len = pkt_len + drvinfo_sz + RXDESC_SIZE;
pkt_offset = (u16)round_up(tmp_len, 128);
/* for first fragment packet, driver need allocate 1536 +
- * drvinfo_sz + RXDESC_SIZE to defrag packet. */
+ * drvinfo_sz + RXDESC_SIZE to defrag packet.
+ */
if ((mf == 1) && (frag == 0))
/*1658+6=1664, 1664 is 128 alignment.*/
alloc_sz = max_t(u16, tmp_len, 1658);
else
alloc_sz = tmp_len;
/* 2 is for IP header 4 bytes alignment in QoS packet case.
- * 4 is for skb->data 4 bytes alignment. */
+ * 4 is for skb->data 4 bytes alignment.
+ */
alloc_sz += 6;
pkt_copy = netdev_alloc_skb(padapter->pnetdev, alloc_sz);
if (pkt_copy) {
diff --git a/drivers/staging/rtl8712/rtl8712_recv.h b/drivers/staging/rtl8712/rtl8712_recv.h
index fd9e3fc4c226..0b0c2730aac5 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.h
+++ b/drivers/staging/rtl8712/rtl8712_recv.h
@@ -61,7 +61,8 @@ struct recv_stat {
struct phy_cck_rx_status {
/* For CCK rate descriptor. This is a unsigned 8:1 variable.
* LSB bit present 0.5. And MSB 7 bts present a signed value.
- * Range from -64~+63.5. */
+ * Range from -64~+63.5.
+ */
u8 adc_pwdb_X[4];
u8 sq_rpt;
u8 cck_agc_rpt;
@@ -103,7 +104,6 @@ struct recv_buf {
struct _adapter *adapter;
struct urb *purb;
_pkt *pskb;
- u8 reuse;
u8 irp_pending;
u32 transfer_len;
uint len;
@@ -116,13 +116,13 @@ struct recv_buf {
};
/*
- head ----->
- data ----->
- payload
- tail ----->
- end ----->
- len = (unsigned int )(tail - data);
-*/
+ * head ----->
+ * data ----->
+ * payload
+ * tail ----->
+ * end ----->
+ * len = (unsigned int )(tail - data);
+ */
struct recv_frame_hdr {
struct list_head list;
_pkt *pkt;
diff --git a/drivers/staging/rtl8712/rtl8712_spec.h b/drivers/staging/rtl8712/rtl8712_spec.h
index af11b44b1140..51e042815cc9 100644
--- a/drivers/staging/rtl8712/rtl8712_spec.h
+++ b/drivers/staging/rtl8712/rtl8712_spec.h
@@ -83,7 +83,8 @@
#define CMD_ADDR_MAPPING_SHIFT 2 /*SDIO CMD ADDR MAPPING,
*shift 2 bit for match
- * offset[14:2]*/
+ * offset[14:2]
+ */
/*Offset for SDIO LOCAL*/
#define OFFSET_SDIO_LOCAL 0x0FFF
diff --git a/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h b/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
index eed09c872fc9..2e66d28d6918 100644
--- a/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
+++ b/drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h
@@ -68,11 +68,13 @@
#define SYS_CLKSEL BIT(SYS_CLKSEL_SHT) /* System Clock 80MHz*/
#define PS_CLKSEL_SHT 1
#define PS_CLKSEL BIT(PS_CLKSEL_SHT) /*System power save
- * clock select.*/
+ * clock select.
+ */
#define CPU_CLKSEL_SHT 2
#define CPU_CLKSEL BIT(CPU_CLKSEL_SHT) /* System Clock select,
* 1: AFE source,
- * 0: System clock(L-Bus)*/
+ * 0: System clock(L-Bus)
+ */
#define INT32K_EN_SHT 3
#define INT32K_EN BIT(INT32K_EN_SHT)
#define MACSLP_SHT 4
@@ -85,10 +87,12 @@
#define RING_CLK_EN BIT(RING_CLK_EN_SHT)
#define SWHW_SEL_SHT 14
#define SWHW_SEL BIT(SWHW_SEL_SHT) /* Load done,
- * control path switch.*/
+ * control path switch.
+ */
#define FWHW_SEL_SHT 15
#define FWHW_SEL BIT(FWHW_SEL_SHT) /* Sleep exit,
- * control path switch.*/
+ * control path switch.
+ */
/*9346CR*/
#define _VPDIDX_MSK 0xFF00
@@ -118,10 +122,12 @@
#define AFE_MISC_E32_EN BIT(AFE_MISC_E32_EN_SHT)
#define AFE_MISC_MBEN_SHT 1
#define AFE_MISC_MBEN BIT(AFE_MISC_MBEN_SHT)/* Enable AFE Macro
- * Block's Mbias.*/
+ * Block's Mbias.
+ */
#define AFE_MISC_BGEN_SHT 0
#define AFE_MISC_BGEN BIT(AFE_MISC_BGEN_SHT)/* Enable AFE Macro
- * Block's Bandgap.*/
+ * Block's Bandgap.
+ */
/*--------------------------------------------------------------------------*/
@@ -149,10 +155,12 @@
/* EFUSE_CTRL*/
#define EF_FLAG BIT(31) /* Access Flag, Write:1;
- * Read:0*/
+ * Read:0
+ */
#define EF_PGPD 0x70000000 /* E-fuse Program time*/
#define EF_RDT 0x0F000000 /* E-fuse read time: in the
- * unit of cycle time*/
+ * unit of cycle time
+ */
#define EF_PDN_EN BIT(19) /* EFuse Power down enable*/
#define ALD_EN BIT(18) /* Autoload Enable*/
#define EF_ADDR 0x0003FF00 /* Access Address*/
@@ -164,7 +172,8 @@
/* EFUSE_CLK_CTRL*/
#define EFUSE_CLK_EN BIT(1) /* E-Fuse Clock Enable*/
#define EFUSE_CLK_SEL BIT(0) /* E-Fuse Clock Select,
- * 0:500K, 1:40M*/
+ * 0:500K, 1:40M
+ */
#endif /*__RTL8712_SYSCFG_BITDEF_H__*/
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index 7e0b94503dfc..c4f03a602a2e 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -535,7 +535,8 @@ static void update_txdesc(struct xmit_frame *pxmitframe, uint *pmem, int sz)
* seqnum per tid. about usb using 4-endpoint, qsel points out
* the correct mapping between AC&Endpoint,
* the purpose is that correct mapping lets the MAC release
- * the AC Queue list correctly. */
+ * the AC Queue list correctly.
+ */
ptxdesc->txdw3 = cpu_to_le32((pattrib->priority << SEQ_SHT) &
0x0fff0000);
if ((pattrib->ether_type != 0x888e) &&
@@ -586,7 +587,8 @@ static void update_txdesc(struct xmit_frame *pxmitframe, uint *pmem, int sz)
* per tid. about usb using 4-endpoint, qsel points out the
* correct mapping between AC&Endpoint,
* the purpose is that correct mapping let the MAC releases
- * the AC Queue list correctly. */
+ * the AC Queue list correctly.
+ */
ptxdesc->txdw3 = cpu_to_le32((pattrib->priority << SEQ_SHT) &
0x0fff0000);
/* offset 16 */
@@ -627,7 +629,7 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
phwxmits = pxmitpriv->hwxmits;
hwentry = pxmitpriv->hwxmit_entry;
- if (pxmitbuf == NULL) {
+ if (!pxmitbuf) {
pxmitbuf = r8712_alloc_xmitbuf(pxmitpriv);
if (!pxmitbuf)
return false;
@@ -686,7 +688,8 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
res = r8712_xmitframe_coalesce(padapter,
pxmitframe->pkt, pxmitframe);
/* always return ndis_packet after
- * r8712_xmitframe_coalesce */
+ * r8712_xmitframe_coalesce
+ */
r8712_xmit_complete(padapter, pxmitframe);
}
if (res == _SUCCESS)
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index aed03cfbb1ba..b7ee5e63af33 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -51,14 +51,14 @@
#include "mlme_osdep.h"
/*
-Caller and the r8712_cmd_thread can protect cmd_q by spin_lock.
-No irqsave is necessary.
-*/
+ * Caller and the r8712_cmd_thread can protect cmd_q by spin_lock.
+ * No irqsave is necessary.
+ */
static sint _init_cmd_priv(struct cmd_priv *pcmdpriv)
{
- sema_init(&(pcmdpriv->cmd_queue_sema), 0);
- sema_init(&(pcmdpriv->terminate_cmdthread_sema), 0);
+ init_completion(&pcmdpriv->cmd_queue_comp);
+ init_completion(&pcmdpriv->terminate_cmdthread_comp);
_init_queue(&(pcmdpriv->cmd_queue));
@@ -66,13 +66,13 @@ static sint _init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->cmd_seq = 1;
pcmdpriv->cmd_allocated_buf = kmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ,
GFP_ATOMIC);
- if (pcmdpriv->cmd_allocated_buf == NULL)
+ if (!pcmdpriv->cmd_allocated_buf)
return _FAIL;
pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ -
((addr_t)(pcmdpriv->cmd_allocated_buf) &
(CMDBUFF_ALIGN_SZ - 1));
pcmdpriv->rsp_allocated_buf = kmalloc(MAX_RSPSZ + 4, GFP_ATOMIC);
- if (pcmdpriv->rsp_allocated_buf == NULL)
+ if (!pcmdpriv->rsp_allocated_buf)
return _FAIL;
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 -
((addr_t)(pcmdpriv->rsp_allocated_buf) & 3);
@@ -88,7 +88,7 @@ static sint _init_evt_priv(struct evt_priv *pevtpriv)
pevtpriv->event_seq = 0;
pevtpriv->evt_allocated_buf = kmalloc(MAX_EVTSZ + 4, GFP_ATOMIC);
- if (pevtpriv->evt_allocated_buf == NULL)
+ if (!pevtpriv->evt_allocated_buf)
return _FAIL;
pevtpriv->evt_buf = pevtpriv->evt_allocated_buf + 4 -
((addr_t)(pevtpriv->evt_allocated_buf) & 3);
@@ -110,20 +110,20 @@ static void _free_cmd_priv(struct cmd_priv *pcmdpriv)
}
/*
-Calling Context:
-
-_enqueue_cmd can only be called between kernel thread,
-since only spin_lock is used.
-
-ISR/Call-Back functions can't call this sub-function.
-
-*/
+ * Calling Context:
+ *
+ * _enqueue_cmd can only be called between kernel thread,
+ * since only spin_lock is used.
+ *
+ * ISR/Call-Back functions can't call this sub-function.
+ *
+ */
static sint _enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
{
unsigned long irqL;
- if (obj == NULL)
+ if (!obj)
return _SUCCESS;
spin_lock_irqsave(&queue->lock, irqL);
list_add_tail(&obj->list, &queue->queue);
@@ -172,7 +172,7 @@ u32 r8712_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *obj)
if (pcmdpriv->padapter->eeprompriv.bautoload_fail_flag)
return _FAIL;
res = _enqueue_cmd(&pcmdpriv->cmd_queue, obj);
- up(&pcmdpriv->cmd_queue_sema);
+ complete(&pcmdpriv->cmd_queue_comp);
return res;
}
@@ -181,7 +181,7 @@ u32 r8712_enqueue_cmd_ex(struct cmd_priv *pcmdpriv, struct cmd_obj *obj)
unsigned long irqL;
struct __queue *queue;
- if (obj == NULL)
+ if (!obj)
return _SUCCESS;
if (pcmdpriv->padapter->eeprompriv.bautoload_fail_flag)
return _FAIL;
@@ -189,7 +189,7 @@ u32 r8712_enqueue_cmd_ex(struct cmd_priv *pcmdpriv, struct cmd_obj *obj)
spin_lock_irqsave(&queue->lock, irqL);
list_add_tail(&obj->list, &queue->queue);
spin_unlock_irqrestore(&queue->lock, irqL);
- up(&pcmdpriv->cmd_queue_sema);
+ complete(&pcmdpriv->cmd_queue_comp);
return _SUCCESS;
}
@@ -211,11 +211,11 @@ void r8712_free_cmd_obj(struct cmd_obj *pcmd)
}
/*
-r8712_sitesurvey_cmd(~)
- ### NOTE:#### (!!!!)
- MUST TAKE CARE THAT BEFORE CALLING THIS FUNC,
- YOU SHOULD HAVE LOCKED pmlmepriv->lock
-*/
+ * r8712_sitesurvey_cmd(~)
+ * ### NOTE:#### (!!!!)
+ * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC,
+ * YOU SHOULD HAVE LOCKED pmlmepriv->lock
+ */
u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
struct ndis_802_11_ssid *pssid)
{
@@ -477,7 +477,7 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
}
}
psecnetwork = &psecuritypriv->sec_bss;
- if (psecnetwork == NULL) {
+ if (!psecnetwork) {
kfree(pcmd);
return _FAIL;
}
@@ -491,8 +491,9 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
memcpy(&psecuritypriv->authenticator_ie[1],
&psecnetwork->IEs[12], (256 - 1));
psecnetwork->IELength = 0;
- /* If the driver wants to use the bssid to create the connection.
- * If not, we copy the connecting AP's MAC address to it so that
+ /*
+ * If the driver wants to use the bssid to create the connection.
+ * If not, we copy the connecting AP's MAC address to it so that
* the driver just has the bssid information for PMKIDList searching.
*/
if (!pmlmepriv->assoc_by_bssid)
@@ -519,7 +520,8 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
}
}
if (pregistrypriv->ht_enable) {
- /* For WEP mode, we will use the bg mode to do the connection
+ /*
+ * For WEP mode, we will use the bg mode to do the connection
* to avoid some IOT issues, especially for Realtek 8192u
* SoftAP.
*/
@@ -882,16 +884,16 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
if (!psta) {
psta = r8712_alloc_stainfo(&padapter->stapriv,
pnetwork->MacAddress);
- if (psta == NULL)
+ if (!psta)
goto createbss_cmd_fail;
}
r8712_indicate_connect(padapter);
} else {
pwlan = _r8712_alloc_network(pmlmepriv);
- if (pwlan == NULL) {
+ if (!pwlan) {
pwlan = r8712_get_oldest_wlan_network(
&pmlmepriv->scanned_queue);
- if (pwlan == NULL)
+ if (!pwlan)
goto createbss_cmd_fail;
pwlan->last_scanned = jiffies;
} else
@@ -904,8 +906,10 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
(r8712_get_wlan_bssid_ex_sz(pnetwork)));
if (pmlmepriv->fw_state & _FW_UNDER_LINKING)
pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
- /* we will set _FW_LINKED when there is one more sat to
- * join us (stassoc_event_callback) */
+ /*
+ * we will set _FW_LINKED when there is one more sat to
+ * join us (stassoc_event_callback)
+ */
}
createbss_cmd_fail:
spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
@@ -921,7 +925,7 @@ void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter,
struct sta_info *psta = r8712_get_stainfo(pstapriv,
psetstakey_rsp->addr);
- if (psta == NULL)
+ if (!psta)
goto exit;
psta->aid = psta->mac_id = psetstakey_rsp->keyid; /*CAM_ID(CAM_ENTRY)*/
exit:
@@ -941,7 +945,7 @@ void r8712_setassocsta_cmdrsp_callback(struct _adapter *padapter,
struct sta_info *psta = r8712_get_stainfo(pstapriv,
passocsta_parm->addr);
- if (psta == NULL)
+ if (!psta)
return;
psta->aid = psta->mac_id = passocsta_rsp->cam_id;
spin_lock_irqsave(&pmlmepriv->lock, irqL);
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index e4a2a50c85de..3284dcf2f1a9 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -50,8 +50,8 @@ struct cmd_obj {
};
struct cmd_priv {
- struct semaphore cmd_queue_sema;
- struct semaphore terminate_cmdthread_sema;
+ struct completion cmd_queue_comp;
+ struct completion terminate_cmdthread_comp;
struct __queue cmd_queue;
u8 cmd_seq;
u8 *cmd_buf; /*shall be non-paged, and 4 bytes aligned*/
@@ -185,10 +185,12 @@ struct setauth_parm {
*/
struct setkey_parm {
u8 algorithm; /* encryption algorithm, could be none, wep40,
- * TKIP, CCMP, wep104 */
+ * TKIP, CCMP, wep104
+ */
u8 keyid;
u8 grpkey; /* 1: this is the grpkey for 802.1x.
- * 0: this is the unicast key for 802.1x */
+ * 0: this is the unicast key for 802.1x
+ */
u8 key[16]; /* this could be 40 or 104 */
};
@@ -215,15 +217,15 @@ struct SetMacAddr_param {
};
/*
-Caller Ad-Hoc/AP
-
-Command -Rsp(AID == CAMID) mode
-
-This is to force fw to add an sta_data entry per driver's request.
-
-FW will write an cam entry associated with it.
-
-*/
+ * Caller Ad-Hoc/AP
+ *
+ * Command -Rsp(AID == CAMID) mode
+ *
+ * This is to force fw to add an sta_data entry per driver's request.
+ *
+ * FW will write an cam entry associated with it.
+ *
+ */
struct set_assocsta_parm {
u8 addr[ETH_ALEN];
};
@@ -234,27 +236,27 @@ struct set_assocsta_rsp {
};
/*
- Caller Ad-Hoc/AP
-
- Command mode
-
- This is to force fw to del an sta_data entry per driver's request
-
- FW will invalidate the cam entry associated with it.
-
-*/
+ * Caller Ad-Hoc/AP
+ *
+ * Command mode
+ *
+ * This is to force fw to del an sta_data entry per driver's request
+ *
+ * FW will invalidate the cam entry associated with it.
+ *
+ */
struct del_assocsta_parm {
u8 addr[ETH_ALEN];
};
/*
-Caller Mode: AP/Ad-HoC(M)
-
-Notes: To notify fw that given staid has changed its power state
-
-Command Mode
-
-*/
+ * Caller Mode: AP/Ad-HoC(M)
+ *
+ * Notes: To notify fw that given staid has changed its power state
+ *
+ * Command Mode
+ *
+ */
struct setstapwrstate_parm {
u8 staid;
u8 status;
@@ -262,25 +264,25 @@ struct setstapwrstate_parm {
};
/*
-Caller Mode: Any
-
-Notes: To setup the basic rate of RTL8711
-
-Command Mode
-
-*/
+ * Caller Mode: Any
+ *
+ * Notes: To setup the basic rate of RTL8711
+ *
+ * Command Mode
+ *
+ */
struct setbasicrate_parm {
u8 basicrates[NumRates];
};
/*
-Caller Mode: Any
-
-Notes: To read the current basic rate
-
-Command-Rsp Mode
-
-*/
+ * Caller Mode: Any
+ *
+ * Notes: To read the current basic rate
+ *
+ * Command-Rsp Mode
+ *
+ */
struct getbasicrate_parm {
u32 rsvd;
};
@@ -290,13 +292,13 @@ struct getbasicrate_rsp {
};
/*
-Caller Mode: Any
-
-Notes: To setup the data rate of RTL8711
-
-Command Mode
-
-*/
+ * Caller Mode: Any
+ *
+ * Notes: To setup the data rate of RTL8711
+ *
+ * Command Mode
+ *
+ */
struct setdatarate_parm {
u8 mac_id;
u8 datarates[NumRates];
@@ -332,13 +334,13 @@ struct SetChannelPlan_param {
};
/*
-Caller Mode: Any
-
-Notes: To read the current data rate
-
-Command-Rsp Mode
-
-*/
+ * Caller Mode: Any
+ *
+ * Notes: To read the current data rate
+ *
+ * Command-Rsp Mode
+ *
+ */
struct getdatarate_parm {
u32 rsvd;
@@ -349,36 +351,36 @@ struct getdatarate_rsp {
/*
-Caller Mode: Any
-AP: AP can use the info for the contents of beacon frame
-Infra: STA can use the info when sitesurveying
-Ad-HoC(M): Like AP
-Ad-HoC(C): Like STA
-
-
-Notes: To set the phy capability of the NIC
-
-Command Mode
-
-*/
+ * Caller Mode: Any
+ * AP: AP can use the info for the contents of beacon frame
+ * Infra: STA can use the info when sitesurveying
+ * Ad-HoC(M): Like AP
+ * Ad-HoC(C): Like STA
+ *
+ *
+ * Notes: To set the phy capability of the NIC
+ *
+ * Command Mode
+ *
+ */
/*
-Caller Mode: Any
-
-Notes: To set the channel/modem/band
-This command will be used when channel/modem/band is changed.
-
-Command Mode
-
-*/
+ * Caller Mode: Any
+ *
+ * Notes: To set the channel/modem/band
+ * This command will be used when channel/modem/band is changed.
+ *
+ * Command Mode
+ *
+ */
/*
-Caller Mode: Any
-
-Notes: To get the current setting of channel/modem/band
-
-Command-Rsp Mode
-
-*/
+ * Caller Mode: Any
+ *
+ * Notes: To get the current setting of channel/modem/band
+ *
+ * Command-Rsp Mode
+ *
+ */
struct getphy_rsp {
u8 rfchannel;
u8 modem;
@@ -428,58 +430,58 @@ struct getrfintfs_parm {
};
/*
- Notes: This command is used for H2C/C2H loopback testing
-
- mac[0] == 0
- ==> CMD mode, return H2C_SUCCESS.
- The following condition must be ture under CMD mode
- mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0;
- s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7;
- s2 == (b1 << 8 | b0);
-
- mac[0] == 1
- ==> CMD_RSP mode, return H2C_SUCCESS_RSP
-
- The rsp layout shall be:
- rsp: parm:
- mac[0] = mac[5];
- mac[1] = mac[4];
- mac[2] = mac[3];
- mac[3] = mac[2];
- mac[4] = mac[1];
- mac[5] = mac[0];
- s0 = s1;
- s1 = swap16(s0);
- w0 = swap32(w1);
- b0 = b1
- s2 = s0 + s1
- b1 = b0
- w1 = w0
-
- mac[0] == 2
- ==> CMD_EVENT mode, return H2C_SUCCESS
- The event layout shall be:
- event: parm:
- mac[0] = mac[5];
- mac[1] = mac[4];
- mac[2] = event's sequence number, starting from 1 to parm's marc[3]
- mac[3] = mac[2];
- mac[4] = mac[1];
- mac[5] = mac[0];
- s0 = swap16(s0) - event.mac[2];
- s1 = s1 + event.mac[2];
- w0 = swap32(w0);
- b0 = b1
- s2 = s0 + event.mac[2]
- b1 = b0
- w1 = swap32(w1) - event.mac[2];
-
- parm->mac[3] is the total event counts that host requested.
-
-
- event will be the same with the cmd's param.
-
-*/
+ * Notes: This command is used for H2C/C2H loopback testing
+ *
+ * mac[0] == 0
+ * ==> CMD mode, return H2C_SUCCESS.
+ * The following condition must be ture under CMD mode
+ * mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0;
+ * s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7;
+ * s2 == (b1 << 8 | b0);
+ *
+ * mac[0] == 1
+ * ==> CMD_RSP mode, return H2C_SUCCESS_RSP
+ *
+ * The rsp layout shall be:
+ * rsp: parm:
+ * mac[0] = mac[5];
+ * mac[1] = mac[4];
+ * mac[2] = mac[3];
+ * mac[3] = mac[2];
+ * mac[4] = mac[1];
+ * mac[5] = mac[0];
+ * s0 = s1;
+ * s1 = swap16(s0);
+ * w0 = swap32(w1);
+ * b0 = b1
+ * s2 = s0 + s1
+ * b1 = b0
+ * w1 = w0
+ *
+ * mac[0] == 2
+ * ==> CMD_EVENT mode, return H2C_SUCCESS
+ * The event layout shall be:
+ * event: parm:
+ * mac[0] = mac[5];
+ * mac[1] = mac[4];
+ * mac[2] = event's sequence number, starting from 1 to parm's marc[3]
+ * mac[3] = mac[2];
+ * mac[4] = mac[1];
+ * mac[5] = mac[0];
+ * s0 = swap16(s0) - event.mac[2];
+ * s1 = s1 + event.mac[2];
+ * w0 = swap32(w0);
+ * b0 = b1
+ * s2 = s0 + event.mac[2]
+ * b1 = b0
+ * w1 = swap32(w1) - event.mac[2];
+ *
+ * parm->mac[3] is the total event counts that host requested.
+ *
+ *
+ * event will be the same with the cmd's param.
+ *
+ */
/* CMD param Formart for DRV INTERNAL CMD HDL*/
struct drvint_cmd_parm {
@@ -570,7 +572,8 @@ struct setpwrmode_parm {
u8 bcn_rx_en;
u8 bcn_pass_cnt; /* fw report one beacon information to
* driver when it receives bcn_pass_cnt
- * beacons. */
+ * beacons.
+ */
u8 bcn_to; /* beacon TO (ms). ¡§=0¡¨ no limit.*/
u16 bcn_itv;
u8 app_itv; /* only for VOIP mode. */
diff --git a/drivers/staging/rtl8712/rtl871x_ht.h b/drivers/staging/rtl8712/rtl871x_ht.h
index 41872d937408..513f458ea07c 100644
--- a/drivers/staging/rtl8712/rtl871x_ht.h
+++ b/drivers/staging/rtl8712/rtl871x_ht.h
@@ -36,7 +36,8 @@ struct ht_priv {
unsigned int tx_amsdu_enable;/*for enable Tx A-MSDU */
unsigned int tx_amdsu_maxlen; /* 1: 8k, 0:4k ; default:8k, for tx */
unsigned int rx_ampdu_maxlen; /* for rx reordering ctrl win_sz,
- * updated when join_callback. */
+ * updated when join_callback.
+ */
struct ieee80211_ht_cap ht_cap;
};
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl.h b/drivers/staging/rtl8712/rtl871x_ioctl.h
index c9218be5bb4f..08bcb3b41bbd 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_ioctl.h
@@ -68,7 +68,8 @@ struct oid_par_priv {
struct oid_obj_priv {
unsigned char dbg; /* 0: without OID debug message
- * 1: with OID debug message */
+ * 1: with OID debug message
+ */
uint (*oidfuns)(struct oid_par_priv *poid_par_priv);
};
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index e205adf24da2..475e7904fe45 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -1976,9 +1976,9 @@ static int r871x_get_ap_info(struct net_device *dev,
if (pdata->length >= 32) {
if (copy_from_user(data, pdata->pointer, 32))
return -EINVAL;
- data[32] = 0;
+ data[32] = 0;
} else {
- return -EINVAL;
+ return -EINVAL;
}
spin_lock_irqsave(&(pmlmepriv->scanned_queue.lock), irqL);
phead = &queue->queue;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 56760cda8e89..0aaf2aab6dd0 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -140,7 +140,8 @@ u8 r8712_set_802_11_bssid(struct _adapter *padapter, u8 *bssid)
ETH_ALEN)) {
if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE))
goto _Abort_Set_BSSID; /* driver is in
- * WIFI_ADHOC_MASTER_STATE */
+ * WIFI_ADHOC_MASTER_STATE
+ */
} else {
r8712_disassoc_cmd(padapter);
if (check_fwstate(pmlmepriv, _FW_LINKED))
@@ -203,7 +204,8 @@ void r8712_set_802_11_ssid(struct _adapter *padapter,
}
} else {
goto _Abort_Set_SSID; /* driver is in
- * WIFI_ADHOC_MASTER_STATE */
+ * WIFI_ADHOC_MASTER_STATE
+ */
}
}
} else {
@@ -254,12 +256,14 @@ void r8712_set_802_11_infrastructure_mode(struct _adapter *padapter,
(*pold_state == Ndis802_11IBSS)) {
/* will clr Linked_state before this function,
* we must have checked whether issue dis-assoc_cmd or
- * not */
+ * not
+ */
r8712_ind_disconnect(padapter);
}
*pold_state = networktype;
/* clear WIFI_STATION_STATE; WIFI_AP_STATE; WIFI_ADHOC_STATE;
- * WIFI_ADHOC_MASTER_STATE */
+ * WIFI_ADHOC_MASTER_STATE
+ */
_clr_fwstate_(pmlmepriv, WIFI_STATION_STATE | WIFI_AP_STATE |
WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE);
switch (networktype) {
diff --git a/drivers/staging/rtl8712/rtl871x_led.h b/drivers/staging/rtl8712/rtl871x_led.h
index eb612053a3dd..adfbc400a18d 100644
--- a/drivers/staging/rtl8712/rtl871x_led.h
+++ b/drivers/staging/rtl8712/rtl871x_led.h
@@ -72,14 +72,17 @@ enum LED_STRATEGY_871x {
SW_LED_MODE0, /* SW control 1 LED via GPIO0. It is default option. */
SW_LED_MODE1, /* 2 LEDs, through LED0 and LED1. For ALPHA. */
SW_LED_MODE2, /* SW control 1 LED via GPIO0,
- * custom for AzWave 8187 minicard. */
+ * custom for AzWave 8187 minicard.
+ */
SW_LED_MODE3, /* SW control 1 LED via GPIO0,
- * customized for Sercomm Printer Server case.*/
+ * customized for Sercomm Printer Server case.
+ */
SW_LED_MODE4, /*for Edimax / Belkin*/
SW_LED_MODE5, /*for Sercomm / Belkin*/
SW_LED_MODE6, /*for WNC / Corega*/
HW_LED, /* HW control 2 LEDs, LED0 and LED1 (there are 4 different
- * control modes, see MAC.CONFIG1 for details.)*/
+ * control modes, see MAC.CONFIG1 for details.)
+ */
};
struct LED_871x {
@@ -96,7 +99,8 @@ struct LED_871x {
u8 bLedWPSBlinkInProgress;
u32 BlinkTimes; /* No. times to toggle for blink.*/
u32 BlinkingLedState; /* Next state for blinking,
- * either LED_ON or OFF.*/
+ * either LED_ON or OFF.
+ */
struct timer_list BlinkTimer; /* Timer object for led blinking.*/
struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer */
@@ -115,7 +119,8 @@ struct led_priv {
/*===========================================================================
* Interface to manipulate LED objects.
- *===========================================================================*/
+ *===========================================================================
+ */
void r8712_InitSwLeds(struct _adapter *padapter);
void r8712_DeInitSwLeds(struct _adapter *padapter);
void LedControl871x(struct _adapter *padapter, enum LED_CTL_MODE LedAction);
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 772bf9fa9592..c1feef3da26c 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -403,7 +403,8 @@ static void update_scanned_network(struct _adapter *adapter,
/* If we didn't find a match, then get a new network slot to initialize
- * with this beacon's information */
+ * with this beacon's information
+ */
if (end_of_queue_search(phead, plist)) {
if (list_empty(&pmlmepriv->free_bss_pool.queue)) {
/* If there are no more slots, expire the oldest */
@@ -926,7 +927,8 @@ void r8712_stassoc_event_callback(struct _adapter *adapter, u8 *pbuf)
if (psta != NULL) {
/*the sta have been in sta_info_queue => do nothing
*(between drv has received this event before and
- * fw have not yet to set key to CAM_ENTRY) */
+ * fw have not yet to set key to CAM_ENTRY)
+ */
return;
}
@@ -1171,7 +1173,8 @@ int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv)
pmlmepriv->assoc_ssid.SsidLength))) {
if (pmlmepriv->assoc_by_rssi) {
/* if the ssid is the same, select the bss
- * which has the max rssi*/
+ * which has the max rssi
+ */
if (pnetwork_max_rssi) {
if (pnetwork->network.Rssi >
pnetwork_max_rssi->network.Rssi)
@@ -1352,7 +1355,8 @@ static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid)
i = -1; /* Could not find. */
} else {
; /* There is one Pre-Authentication Key for the
- * specific BSSID. */
+ * specific BSSID.
+ */
}
return i;
}
@@ -1430,7 +1434,8 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
if (match) {
if (sec_ie[0] == _WPA_IE_ID_) {
/* parsing SSN IE to select required encryption
- * algorithm, and set the bc/mc encryption algorithm */
+ * algorithm, and set the bc/mc encryption algorithm
+ */
while (true) {
/*check wpa_oui tag*/
if (memcmp(&sec_ie[2], &wpa_oui[0], 4)) {
@@ -1444,7 +1449,8 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
}
if (!memcmp(&sec_ie[8], &wpa_oui[0], 3)) {
/* get bc/mc encryption type (group
- * key type)*/
+ * key type)
+ */
switch (sec_ie[11]) {
case 0x0: /*none*/
psecuritypriv->XGrpPrivacy =
@@ -1482,7 +1488,8 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
} /*else the uncst_oui is match*/
} else { /*mixed mode, unicast_enc_type > 1*/
/*select the uncst_oui and remove
- * the other uncst_oui*/
+ * the other uncst_oui
+ */
cnt = sec_ie[12];
remove_cnt = (cnt - 1) * 4;
sec_ie[12] = 0x01;
@@ -1499,7 +1506,8 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
}
if (authmode == _WPA2_IE_ID_) {
/* parsing RSN IE to select required encryption
- * algorithm, and set the bc/mc encryption algorithm */
+ * algorithm, and set the bc/mc encryption algorithm
+ */
while (true) {
if ((sec_ie[2] != 0x01) || (sec_ie[3] != 0x0)) {
/*IE Ver error*/
@@ -1543,7 +1551,8 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
} /*else the uncst_oui is match*/
} else { /*mixed mode, unicast_enc_type > 1*/
/*select the uncst_oui and remove the
- * other uncst_oui*/
+ * other uncst_oui
+ */
cnt = sec_ie[8];
remove_cnt = (cnt - 1) * 4;
sec_ie[8] = 0x01;
@@ -1667,7 +1676,8 @@ void r8712_joinbss_reset(struct _adapter *padapter)
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
/* todo: if you want to do something io/reg/hw setting before join_bss,
- * please add code here */
+ * please add code here
+ */
phtpriv->ampdu_enable = false;/*reset to disabled*/
for (i = 0; i < 16; i++)
phtpriv->baddbareq_issued[i] = false;/*reset it*/
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h
index 61e0eb745d21..ddaaab058b2f 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.h
+++ b/drivers/staging/rtl8712/rtl871x_mlme.h
@@ -47,16 +47,20 @@
#define WIFI_ADHOC_MASTER_STATE 0x00000040
#define WIFI_UNDER_LINKING 0x00000080
#define WIFI_SITE_MONITOR 0x00000800 /* to indicate the station
- * is under site surveying*/
+ * is under site surveying
+ */
#define WIFI_MP_STATE 0x00010000
#define WIFI_MP_CTX_BACKGROUND 0x00020000 /* in cont. tx background*/
#define WIFI_MP_CTX_ST 0x00040000 /* in cont. tx with
- * single-tone*/
+ * single-tone
+ */
#define WIFI_MP_CTX_BACKGROUND_PENDING 0x00080000 /* pending in cont, tx
- * background due to out of skb*/
+ * background due to out of skb
+ */
#define WIFI_MP_CTX_CCK_HW 0x00100000 /* in continuous tx*/
#define WIFI_MP_CTX_CCK_CS 0x00200000 /* in cont, tx with carrier
- * suppression*/
+ * suppression
+ */
#define WIFI_MP_LPBK_STATE 0x00400000
#define _FW_UNDER_LINKING WIFI_UNDER_LINKING
diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c
index 5e4fda1890f5..3c10a2c848c8 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.c
+++ b/drivers/staging/rtl8712/rtl871x_mp.c
@@ -376,7 +376,8 @@ void r8712_SwitchBandwidth(struct _adapter *pAdapter)
/* Use PHY_REG.txt default value. Do not need to change.
* Correct the tx power for CCK rate in 40M.
* Set Control channel to upper or lower. These settings are
- * required only for 40MHz */
+ * required only for 40MHz
+ */
set_bb_reg(pAdapter, rCCK0_System, bCCKSideBand,
(HAL_PRIME_CHNL_OFFSET_DONT_CARE >> 1));
set_bb_reg(pAdapter, rOFDM1_LSTF, 0xC00,
diff --git a/drivers/staging/rtl8712/rtl871x_mp.h b/drivers/staging/rtl8712/rtl871x_mp.h
index 3f3b2e73b7d2..8df452e3e3ce 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.h
+++ b/drivers/staging/rtl8712/rtl871x_mp.h
@@ -108,7 +108,8 @@ struct mp_priv {
unsigned char network_macaddr[6];
/*Testing Flag*/
u32 mode;/*0 for normal type packet,
- * 1 for loopback packet (16bytes TXCMD)*/
+ * 1 for loopback packet (16bytes TXCMD)
+ */
sint prev_fw_state;
u8 *pallocated_mp_xmitframe_buf;
u8 *pmp_xmtframe_buf;
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
index 8dc898024e07..1102451a733d 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
@@ -158,28 +158,37 @@ static const struct oid_obj_priv oid_rtl_seg_81_80_00[] = {
{1, oid_null_function}, /*0x05 OID_RT_PRO_SET_SCRAMBLER*/
{1, oid_null_function}, /*0x06 OID_RT_PRO_SET_FILTER_BB*/
{1, oid_null_function}, /*0x07
- * OID_RT_PRO_SET_MANUAL_DIVERS_BB*/
+ * OID_RT_PRO_SET_MANUAL_DIVERS_BB
+ */
{1, oid_rt_pro_set_channel_direct_call_hdl}, /*0x08*/
{1, oid_null_function}, /*0x09
- * OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL*/
+ * OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL
+ */
{1, oid_null_function}, /*0x0A
- * OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL*/
+ * OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL
+ */
{1, oid_rt_pro_set_continuous_tx_hdl}, /*0x0B
- * OID_RT_PRO_SET_TX_CONTINUOUS_DIRECT_CALL*/
+ * OID_RT_PRO_SET_TX_CONTINUOUS_DIRECT_CALL
+ */
{1, oid_rt_pro_set_single_carrier_tx_hdl}, /*0x0C
- * OID_RT_PRO_SET_SINGLE_CARRIER_TX_CONTINUOUS*/
+ * OID_RT_PRO_SET_SINGLE_CARRIER_TX_CONTINUOUS
+ */
{1, oid_null_function}, /*0x0D
- * OID_RT_PRO_SET_TX_ANTENNA_BB*/
+ * OID_RT_PRO_SET_TX_ANTENNA_BB
+ */
{1, oid_rt_pro_set_antenna_bb_hdl}, /*0x0E*/
{1, oid_null_function}, /*0x0F OID_RT_PRO_SET_CR_SCRAMBLER*/
{1, oid_null_function}, /*0x10 OID_RT_PRO_SET_CR_NEW_FILTER*/
{1, oid_rt_pro_set_tx_power_control_hdl}, /*0x11
- * OID_RT_PRO_SET_TX_POWER_CONTROL*/
+ * OID_RT_PRO_SET_TX_POWER_CONTROL
+ */
{1, oid_null_function}, /*0x12 OID_RT_PRO_SET_CR_TX_CONFIG*/
{1, oid_null_function}, /*0x13
- * OID_RT_PRO_GET_TX_POWER_CONTROL*/
+ * OID_RT_PRO_GET_TX_POWER_CONTROL
+ */
{1, oid_null_function}, /*0x14
- * OID_RT_PRO_GET_CR_SIGNAL_QUALITY*/
+ * OID_RT_PRO_GET_CR_SIGNAL_QUALITY
+ */
{1, oid_null_function}, /*0x15 OID_RT_PRO_SET_CR_SETPOINT*/
{1, oid_null_function}, /*0x16 OID_RT_PRO_SET_INTEGRATOR*/
{1, oid_null_function}, /*0x17 OID_RT_PRO_SET_SIGNAL_QUALITY*/
@@ -203,13 +212,17 @@ static const struct oid_obj_priv oid_rtl_seg_81_80_20[] = {
{1, oid_rt_pro_query_rx_packet_received_hdl}, /*0x26*/
{1, oid_rt_pro_query_rx_packet_crc32_error_hdl},/*0x27*/
{1, oid_null_function}, /*0x28
- *OID_RT_PRO_QUERY_CURRENT_ADDRESS*/
+ *OID_RT_PRO_QUERY_CURRENT_ADDRESS
+ */
{1, oid_null_function}, /*0x29
- *OID_RT_PRO_QUERY_PERMANENT_ADDRESS*/
+ *OID_RT_PRO_QUERY_PERMANENT_ADDRESS
+ */
{1, oid_null_function}, /*0x2A
- *OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS*/
+ *OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS
+ */
{1, oid_rt_pro_set_carrier_suppression_tx_hdl},/*0x2B
- *OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX*/
+ *OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX
+ */
{1, oid_null_function}, /*0x2C OID_RT_PRO_RECEIVE_PACKET*/
{1, oid_null_function}, /*0x2D OID_RT_PRO_WRITE_EEPROM_BYTE*/
{1, oid_null_function}, /*0x2E OID_RT_PRO_READ_EEPROM_BYTE*/
diff --git a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
index 2e9120a21a0b..11bcfb7bf77c 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
@@ -82,7 +82,8 @@
* 3. Page8(0x800)
*/
#define rFPGA0_RFMOD 0x800 /*RF mode & CCK TxSC RF
- * BW Setting?? */
+ * BW Setting??
+ */
#define rFPGA0_TxInfo 0x804 /* Status report?? */
#define rFPGA0_PSDFunction 0x808
#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
@@ -119,7 +120,8 @@
#define rFPGA0_AnalogParameter1 0x880 /* Crystal cap setting
* RF-R/W protection
- * for parameter4?? */
+ * for parameter4??
+ */
#define rFPGA0_AnalogParameter2 0x884
#define rFPGA0_AnalogParameter3 0x888 /* Useless now */
#define rFPGA0_AnalogParameter4 0x88c
@@ -146,7 +148,8 @@
* 5. PageA(0xA00)
*
* Set Control channel to upper or lower.
- * These settings are required only for 40MHz */
+ * These settings are required only for 40MHz
+ */
#define rCCK0_System 0xa00
#define rCCK0_AFESetting 0xa04 /* Disable init gain now */
@@ -155,20 +158,23 @@
#define rCCK0_RxAGC1 0xa0c
/* AGC default value, saturation level
* Antenna Diversity, RX AGC, LNA Threshold, RX LNA Threshold useless now.
- * Not the same as 90 series */
+ * Not the same as 90 series
+ */
#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
#define rCCK0_RxHP 0xa14
#define rCCK0_DSPParameter1 0xa18 /* Timing recovery & Channel
- * estimation threshold */
+ * estimation threshold
+ */
#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
#define rCCK0_TxFilter1 0xa20
#define rCCK0_TxFilter2 0xa24
#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now 0xa30-a4f
- * channel report */
+ * channel report
+ */
#define rCCK0_TRSSIReport 0xa50
#define rCCK0_RxReport 0xa54 /* 0xa57 */
#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
@@ -193,11 +199,13 @@
#define rOFDM0_XDRxIQImbalance 0xc2c
#define rOFDM0_RxDetector1 0xc30 /* PD,BW & SBD DM tune
- * init gain */
+ * init gain
+ */
#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync &
- * Short-GI */
+ * Short-GI
+ */
#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
@@ -283,7 +291,8 @@
#define rTxAGC_Mcs15_Mcs12 0xe1c
/* Analog- control in RX_WAIT_CCA : REG: EE0
- * [Analog- Power & Control Register] */
+ * [Analog- Power & Control Register]
+ */
#define rRx_Wait_CCCA 0xe70
#define rAnapar_Ctrl_BB 0xee0
@@ -371,7 +380,8 @@
/*
* Bit Mask
*
- * 1. Page1(0x100) */
+ * 1. Page1(0x100)
+ */
#define bBBResetB 0x100 /* Useless now? */
#define bGlobalResetB 0x200
#define bOFDMTxStart 0x4
@@ -918,7 +928,8 @@
#define bPesudoNoiseState_D 0xffff0000
/* 7. RF Register
- * Zebra1 */
+ * Zebra1
+ */
#define bZebra1_HSSIEnable 0x8 /* Useless */
#define bZebra1_TRxControl 0xc00
#define bZebra1_TRxGainSetting 0x07f
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index bf10d6db50e8..d464c136dd98 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -52,7 +52,8 @@ void r8712_set_rpwm(struct _adapter *padapter, u8 val8)
pwrpriv->cpwm = val8;
break;
case PS_STATE_S2:/* only for USB normal powersave mode use,
- * temp mark some code. */
+ * temp mark some code.
+ */
case PS_STATE_S3:
case PS_STATE_S4:
pwrpriv->cpwm = val8;
@@ -103,14 +104,14 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
return;
del_timer(&padapter->pwrctrlpriv.rpwm_check_timer);
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->mutex_lock);
pwrpriv->cpwm = (preportpwrstate->state) & 0xf;
if (pwrpriv->cpwm >= PS_STATE_S2) {
if (pwrpriv->alives & CMD_ALIVE)
- up(&(pcmdpriv->cmd_queue_sema));
+ complete(&(pcmdpriv->cmd_queue_comp));
}
pwrpriv->cpwm_tog = (preportpwrstate->state) & 0x80;
- up(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->mutex_lock);
}
static inline void register_task_alive(struct pwrctrl_priv *pwrctrl, uint tag)
@@ -141,10 +142,10 @@ static void SetPSModeWorkItemCallback(struct work_struct *work)
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
if (!pwrpriv->bSleep) {
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->mutex_lock);
if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
r8712_set_rpwm(padapter, PS_STATE_S4);
- up(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->mutex_lock);
}
}
@@ -155,11 +156,11 @@ static void rpwm_workitem_callback(struct work_struct *work)
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
if (pwrpriv->cpwm != pwrpriv->rpwm) {
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->mutex_lock);
r8712_read8(padapter, SDIO_HCPWM);
pwrpriv->rpwm_retry = 1;
r8712_set_rpwm(padapter, pwrpriv->rpwm);
- up(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->mutex_lock);
}
}
@@ -175,7 +176,7 @@ void r8712_init_pwrctrl_priv(struct _adapter *padapter)
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
memset((unsigned char *)pwrctrlpriv, 0, sizeof(struct pwrctrl_priv));
- sema_init(&pwrctrlpriv->lock, 1);
+ mutex_init(&pwrctrlpriv->mutex_lock);
pwrctrlpriv->cpwm = PS_STATE_S4;
pwrctrlpriv->pwr_mode = PS_MODE_ACTIVE;
pwrctrlpriv->smart_ps = 0;
@@ -207,13 +208,13 @@ sint r8712_register_cmd_alive(struct _adapter *padapter)
uint res = _SUCCESS;
struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
- _enter_pwrlock(&pwrctrl->lock);
+ mutex_lock(&pwrctrl->mutex_lock);
register_task_alive(pwrctrl, CMD_ALIVE);
if (pwrctrl->cpwm < PS_STATE_S2) {
r8712_set_rpwm(padapter, PS_STATE_S3);
res = _FAIL;
}
- up(&pwrctrl->lock);
+ mutex_unlock(&pwrctrl->mutex_lock);
return res;
}
@@ -229,7 +230,7 @@ void r8712_unregister_cmd_alive(struct _adapter *padapter)
{
struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
- _enter_pwrlock(&pwrctrl->lock);
+ mutex_lock(&pwrctrl->mutex_lock);
unregister_task_alive(pwrctrl, CMD_ALIVE);
if ((pwrctrl->cpwm > PS_STATE_S2) &&
(pwrctrl->pwr_mode > PS_MODE_ACTIVE)) {
@@ -239,5 +240,5 @@ void r8712_unregister_cmd_alive(struct _adapter *padapter)
r8712_set_rpwm(padapter, PS_STATE_S0);
}
}
- up(&pwrctrl->lock);
+ mutex_unlock(&pwrctrl->mutex_lock);
}
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.h b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
index dbfb55523545..c82fdf85d474 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.h
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
@@ -87,16 +87,12 @@ struct reportpwrstate_parm {
unsigned short rsvd;
};
-static inline void _enter_pwrlock(struct semaphore *plock)
-{
- _down_sema(plock);
-}
-
struct pwrctrl_priv {
- struct semaphore lock;
+ struct mutex mutex_lock;
/*volatile*/ u8 rpwm; /* requested power state for fw */
/* fw current power state. updated when 1. read from HCPWM or
- * 2. driver lowers power level */
+ * 2. driver lowers power level
+ */
/*volatile*/ u8 cpwm;
/*volatile*/ u8 tog; /* toggling */
/*volatile*/ u8 cpwm_tog; /* toggling */
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 23c143890252..cbd2e51ba42b 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -265,7 +265,8 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
if ((psta != NULL) && (psta->ieee8021x_blocked)) {
/* blocked
- * only accept EAPOL frame */
+ * only accept EAPOL frame
+ */
if (ether_type == 0x888e) {
prtnframe = precv_frame;
} else {
@@ -277,7 +278,8 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
} else {
/* allowed
* check decryption status, and decrypt the
- * frame if needed */
+ * frame if needed
+ */
prtnframe = precv_frame;
/* check is the EAPOL frame or not (Rekey) */
if (ether_type == 0x888e) {
@@ -334,19 +336,22 @@ static sint sta2sta_data_frame(struct _adapter *adapter,
sta_addr = pattrib->src;
} else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
/* For Station mode, sa and bssid should always be BSSID,
- * and DA is my mac-address */
+ * and DA is my mac-address
+ */
if (memcmp(pattrib->bssid, pattrib->src, ETH_ALEN))
return _FAIL;
sta_addr = pattrib->bssid;
} else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
if (bmcast) {
/* For AP mode, if DA == MCAST, then BSSID should
- * be also MCAST */
+ * be also MCAST
+ */
if (!IS_MCAST(pattrib->bssid))
return _FAIL;
} else { /* not mc-frame */
/* For AP mode, if DA is non-MCAST, then it must be
- * BSSID, and bssid == BSSID */
+ * BSSID, and bssid == BSSID
+ */
if (memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN))
return _FAIL;
sta_addr = pattrib->src;
@@ -391,7 +396,8 @@ static sint ap2sta_data_frame(struct _adapter *adapter,
if ((GetFrameSubType(ptr)) == WIFI_DATA_NULL)
return _FAIL;
/* drop QoS-SubType Data, including QoS NULL,
- * excluding QoS-Data */
+ * excluding QoS-Data
+ */
if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) ==
WIFI_QOS_DATA_TYPE) {
if (GetFrameSubType(ptr) & (BIT(4) | BIT(5) | BIT(6)))
@@ -445,7 +451,8 @@ static sint sta2ap_data_frame(struct _adapter *adapter,
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
/* For AP mode, if DA is non-MCAST, then it must be BSSID,
* and bssid == BSSID
- * For AP mode, RA=BSSID, TX=STA(SRC_ADDR), A3=DST_ADDR */
+ * For AP mode, RA=BSSID, TX=STA(SRC_ADDR), A3=DST_ADDR
+ */
if (memcmp(pattrib->bssid, mybssid, ETH_ALEN))
return _FAIL;
*psta = r8712_get_stainfo(pstapriv, pattrib->src);
@@ -619,7 +626,8 @@ sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe)
(memcmp(psnap_type, (void *)SNAP_ETH_TYPE_APPLETALK_AARP, 2))) ||
!memcmp(psnap, (void *)bridge_tunnel_header, SNAP_SIZE)) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
- * replace EtherType */
+ * replace EtherType
+ */
bsnaphdr = true;
} else {
/* Leave Ethernet header part of hdr and full payload */
diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h
index 77487bb9d3c0..f419943ad75d 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.h
+++ b/drivers/staging/rtl8712/rtl871x_recv.h
@@ -153,7 +153,8 @@ static inline u8 *get_recvframe_data(union recv_frame *precvframe)
static inline u8 *recvframe_pull(union recv_frame *precvframe, sint sz)
{
/* used for extract sz bytes from rx_data, update rx_data and return
- * the updated rx_data to the caller */
+ * the updated rx_data to the caller
+ */
if (precvframe == NULL)
return NULL;
precvframe->u.hdr.rx_data += sz;
@@ -169,7 +170,8 @@ static inline u8 *recvframe_put(union recv_frame *precvframe, sint sz)
{
/* used for append sz bytes from ptr to rx_tail, update rx_tail and
* return the updated rx_tail to the caller
- * after putting, rx_tail must be still larger than rx_end. */
+ * after putting, rx_tail must be still larger than rx_end.
+ */
if (precvframe == NULL)
return NULL;
precvframe->u.hdr.rx_tail += sz;
@@ -186,7 +188,8 @@ static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, sint sz)
/* rmv data from rx_tail (by yitsen)
* used for extract sz bytes from rx_end, update rx_end and return the
* updated rx_end to the caller
- * after pulling, rx_end must be still larger than rx_data. */
+ * after pulling, rx_end must be still larger than rx_data.
+ */
if (precvframe == NULL)
return NULL;
precvframe->u.hdr.rx_tail -= sz;
diff --git a/drivers/staging/rtl8712/rtl871x_security.h b/drivers/staging/rtl8712/rtl871x_security.h
index 2295f0e64dc2..fa952e17975b 100644
--- a/drivers/staging/rtl8712/rtl871x_security.h
+++ b/drivers/staging/rtl8712/rtl871x_security.h
@@ -90,18 +90,23 @@ struct RT_PMKID_LIST {
struct security_priv {
u32 AuthAlgrthm; /* 802.11 auth, could be open, shared,
- * 8021x and authswitch */
+ * 8021x and authswitch
+ */
u32 PrivacyAlgrthm; /* This specify the privacy for shared
- * auth. algorithm. */
+ * auth. algorithm.
+ */
u32 PrivacyKeyIndex; /* this is only valid for legendary
- * wep, 0~3 for key id. */
+ * wep, 0~3 for key id.
+ */
union Keytype DefKey[4]; /* this is only valid for def. key */
u32 DefKeylen[4];
u32 XGrpPrivacy; /* This specify the privacy algthm.
- * used for Grp key */
+ * used for Grp key
+ */
u32 XGrpKeyid; /* key id used for Grp Key */
union Keytype XGrpKey[2]; /* 802.1x Group Key, for
- * inx0 and inx1 */
+ * inx0 and inx1
+ */
union Keytype XGrptxmickey[2];
union Keytype XGrprxmickey[2];
union pn48 Grptxpn; /* PN48 used for Grp Key xmit. */
@@ -118,9 +123,11 @@ struct security_priv {
s32 sw_encrypt; /* from registry_priv */
s32 sw_decrypt; /* from registry_priv */
s32 hw_decrypted; /* if the rx packets is hw_decrypted==false,
- * it means the hw has not been ready. */
+ * it means the hw has not been ready.
+ */
u32 ndisauthtype; /* keeps the auth_type & enc_status from upper
- * layer ioctl(wpa_supplicant or wzc) */
+ * layer ioctl(wpa_supplicant or wzc)
+ */
u32 ndisencryptstatus;
struct wlan_bssid_ex sec_bss; /* for joinbss (h2c buffer) usage */
struct NDIS_802_11_WEP ndiswep;
@@ -136,7 +143,8 @@ struct security_priv {
u32 btkip_countermeasure_time;
/*-------------------------------------------------------------------
* For WPA2 Pre-Authentication.
- *------------------------------------------------------------------ */
+ *------------------------------------------------------------------
+ **/
struct RT_PMKID_LIST PMKIDList[NUM_PMKID_CACHE];
u8 PMKIDIndex;
};
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index e11ce2896893..e2d75e4c473f 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -188,7 +188,8 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta)
_r8712_init_sta_xmit_priv(&psta->sta_xmitpriv);
_r8712_init_sta_recv_priv(&psta->sta_recvpriv);
/* for A-MPDU Rx reordering buffer control,
- * cancel reordering_ctrl_timer */
+ * cancel reordering_ctrl_timer
+ */
for (i = 0; i < 16; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
del_timer(&preorder_ctrl->reordering_ctrl_timer);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 99256baafd38..be38364c8a7c 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -204,7 +204,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
{
/*If driver xmit ARP packet, driver can set ps mode to initial
- * setting. It stands for getting DHCP or fix IP.*/
+ * setting. It stands for getting DHCP or fix IP.
+ */
if (pattrib->ether_type == 0x0806) {
if (padapter->pwrctrlpriv.pwr_mode !=
padapter->registrypriv.power_mgnt) {
@@ -232,7 +233,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
if (pattrib->ether_type != 0x8712)
return _FAIL;
/* for mp storing the txcmd per packet,
- * according to the info of txcmd to update pattrib */
+ * according to the info of txcmd to update pattrib
+ */
/*get MP_TXDESC_SIZE bytes txcmd per packet*/
_r8712_pktfile_read(&pktfile, (u8 *)&txdesc, TXDESC_SIZE);
memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
@@ -244,7 +246,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
if (pattrib->ether_type == ETH_P_IP) {
/* The following is for DHCP and ARP packet, we use cck1M to
* tx these packets and let LPS awake some time
- * to prevent DHCP protocol fail */
+ * to prevent DHCP protocol fail
+ */
u8 tmp[24];
_r8712_pktfile_read(&pktfile, &tmp[0], 24);
@@ -255,7 +258,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
((tmp[21] == 67) && (tmp[23] == 68))) {
/* 68 : UDP BOOTP client
* 67 : UDP BOOTP server
- * Use low rate to send DHCP packet.*/
+ * Use low rate to send DHCP packet.
+ */
pattrib->dhcp_pkt = 1;
}
}
@@ -337,7 +341,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
else
pattrib->bswenc = false;
/* if in MP_STATE, update pkt_attrib from mp_txcmd, and overwrite
- * some settings above.*/
+ * some settings above.
+ */
if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
pattrib->priority = (txdesc.txdw1 >> QSEL_SHT) & 0x1f;
return _SUCCESS;
@@ -438,7 +443,8 @@ static sint xmitframe_addmic(struct _adapter *padapter,
}
r8712_secgetmic(&micdata, &(mic[0]));
/* add mic code and add the mic code length in
- * last_txcmdsz */
+ * last_txcmdsz
+ */
memcpy(payload, &(mic[0]), 8);
pattrib->last_txcmdsz += 8;
payload = payload - pattrib->last_txcmdsz + 8;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index a9633c3f73d0..d899d0c6d3a6 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -58,7 +58,8 @@ do { \
} while (0)
/* Fixed the Big Endian bug when doing the Tx.
- * The Linksys WRH54G will check this.*/
+ * The Linksys WRH54G will check this.
+ */
#define TKIP_IV(pattrib_iv, txpn, keyidx)\
do { \
pattrib_iv[0] = txpn._byte_.TSC1;\
@@ -105,7 +106,8 @@ struct pkt_attrib {
u16 seqnum;
u16 ether_type;
u16 pktlen; /* the original 802.3 pkt raw_data len
- * (not include ether_hdr data) */
+ * (not include ether_hdr data)
+ */
u16 last_txcmdsz;
u8 pkt_hdrlen; /*the original 802.3 pkt header len*/
@@ -119,7 +121,8 @@ struct pkt_attrib {
u8 priority;
u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
- * indicate the encrypt algorithm*/
+ * indicate the encrypt algorithm
+ */
u8 iv_len;
u8 icv_len;
unsigned char iv[8];
@@ -176,7 +179,8 @@ struct sta_xmit_priv {
spinlock_t lock;
sint option;
sint apsd_setting; /* When bit mask is on, the associated edca
- * queue supports APSD.*/
+ * queue supports APSD.
+ */
struct tx_servq be_q; /* priority == 0,3 */
struct tx_servq bk_q; /* priority == 1,2*/
struct tx_servq vi_q; /*priority == 4,5*/
diff --git a/drivers/staging/rtl8712/usb_halinit.c b/drivers/staging/rtl8712/usb_halinit.c
index ad21df16c2bd..0b159850f5a2 100644
--- a/drivers/staging/rtl8712/usb_halinit.c
+++ b/drivers/staging/rtl8712/usb_halinit.c
@@ -196,7 +196,8 @@ u8 r8712_usb_hal_bus_init(struct _adapter *padapter)
msleep(20);
/* Revised POS, */
/* Enable AFE Macro Block's Bandgap and Enable AFE Macro
- * Block's Mbias */
+ * Block's Mbias
+ */
r8712_write8(padapter, SPS0_CTRL + 1, 0x53);
r8712_write8(padapter, SPS0_CTRL, 0x57);
val8 = r8712_read8(padapter, AFE_MISC);
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index c1a0ca490546..897d4621a5ce 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -301,7 +301,8 @@ void rtl871x_intf_stop(struct _adapter *padapter)
/*disable_hw_interrupt*/
if (!padapter->bSurpriseRemoved) {
/*device still exists, so driver can do i/o operation
- * TODO: */
+ * TODO:
+ */
}
/* cancel in irp */
@@ -368,7 +369,7 @@ static const struct device_type wlan_type = {
*
* notes: drv_init() is called when the bus driver has located a card for us
* to support. We accept the new device by returning 0.
-*/
+ */
static int r871xu_drv_init(struct usb_interface *pusb_intf,
const struct usb_device_id *pdid)
{
@@ -611,7 +612,8 @@ error:
}
/* rmmod module & unplug(SurpriseRemoved) will call r871xu_dev_remove()
- * => how to recognize both */
+ * => how to recognize both
+ */
static void r871xu_dev_remove(struct usb_interface *pusb_intf)
{
struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
@@ -635,12 +637,14 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
r8712_free_drv_sw(padapter);
/* decrease the reference count of the usb device structure
- * when disconnect */
+ * when disconnect
+ */
usb_put_dev(udev);
}
/* If we didn't unplug usb dongle and remove/insert module, driver
* fails on sitesurvey for the first time when device is up.
- * Reset usb port for sitesurvey fail issue. */
+ * Reset usb port for sitesurvey fail issue.
+ */
if (udev->state != USB_STATE_NOTATTACHED)
usb_reset_device(udev);
}
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 6f12345709c2..fc6bb0be2a28 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -50,7 +50,7 @@ uint r8712_usb_init_intf_priv(struct intf_priv *pintfpriv)
pintfpriv->piorw_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!pintfpriv->piorw_urb)
return _FAIL;
- sema_init(&(pintfpriv->io_retevt), 0);
+ init_completion(&pintfpriv->io_retevt_comp);
return _SUCCESS;
}
@@ -163,7 +163,7 @@ static void usb_write_mem_complete(struct urb *purb)
else
padapter->bSurpriseRemoved = true;
}
- up(&pintfpriv->io_retevt);
+ complete(&pintfpriv->io_retevt_comp);
}
void r8712_usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
@@ -187,7 +187,7 @@ void r8712_usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
wmem, cnt, usb_write_mem_complete,
pio_queue);
usb_submit_urb(piorw_urb, GFP_ATOMIC);
- _down_sema(&pintfpriv->io_retevt);
+ wait_for_completion_interruptible(&pintfpriv->io_retevt_comp);
}
static void r8712_usb_read_port_complete(struct urb *purb)
@@ -202,26 +202,23 @@ static void r8712_usb_read_port_complete(struct urb *purb)
if (purb->status == 0) { /* SUCCESS */
if ((purb->actual_length > (MAX_RECVBUF_SZ)) ||
(purb->actual_length < RXDESC_SIZE)) {
- precvbuf->reuse = true;
r8712_read_port(padapter, precvpriv->ff_hwaddr, 0,
(unsigned char *)precvbuf);
} else {
+ _pkt *pskb = precvbuf->pskb;
+
precvbuf->transfer_len = purb->actual_length;
pbuf = (uint *)precvbuf->pbuf;
isevt = le32_to_cpu(*(pbuf + 1)) & 0x1ff;
if ((isevt & 0x1ff) == 0x1ff) {
r8712_rxcmd_event_hdl(padapter, pbuf);
- precvbuf->reuse = true;
+ skb_queue_tail(&precvpriv->rx_skb_queue, pskb);
r8712_read_port(padapter, precvpriv->ff_hwaddr,
0, (unsigned char *)precvbuf);
} else {
- _pkt *pskb = precvbuf->pskb;
-
skb_put(pskb, purb->actual_length);
skb_queue_tail(&precvpriv->rx_skb_queue, pskb);
tasklet_hi_schedule(&precvpriv->recv_tasklet);
- precvbuf->pskb = NULL;
- precvbuf->reuse = false;
r8712_read_port(padapter, precvpriv->ff_hwaddr,
0, (unsigned char *)precvbuf);
}
@@ -241,7 +238,6 @@ static void r8712_usb_read_port_complete(struct urb *purb)
}
/* Fall through. */
case -EPROTO:
- precvbuf->reuse = true;
r8712_read_port(padapter, precvpriv->ff_hwaddr, 0,
(unsigned char *)precvbuf);
break;
@@ -270,51 +266,43 @@ u32 r8712_usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
struct usb_device *pusbd = pdvobj->pusbdev;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
- adapter->pwrctrlpriv.pnp_bstop_trx)
+ adapter->pwrctrlpriv.pnp_bstop_trx || !precvbuf)
return _FAIL;
- if (precvbuf->reuse || !precvbuf->pskb) {
- precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
- if (precvbuf->pskb != NULL)
- precvbuf->reuse = true;
+ r8712_init_recvbuf(adapter, precvbuf);
+ /* Try to use skb from the free queue */
+ precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
+
+ if (!precvbuf->pskb) {
+ precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev,
+ MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
+ if (!precvbuf->pskb)
+ return _FAIL;
+ tmpaddr = (addr_t)precvbuf->pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
+ skb_reserve(precvbuf->pskb,
+ (RECVBUFF_ALIGN_SZ - alignment));
+ precvbuf->phead = precvbuf->pskb->head;
+ precvbuf->pdata = precvbuf->pskb->data;
+ precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
+ precvbuf->pend = skb_end_pointer(precvbuf->pskb);
+ precvbuf->pbuf = precvbuf->pskb->data;
+ } else { /* skb is reused */
+ precvbuf->phead = precvbuf->pskb->head;
+ precvbuf->pdata = precvbuf->pskb->data;
+ precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
+ precvbuf->pend = skb_end_pointer(precvbuf->pskb);
+ precvbuf->pbuf = precvbuf->pskb->data;
}
- if (precvbuf != NULL) {
- r8712_init_recvbuf(adapter, precvbuf);
- /* re-assign for linux based on skb */
- if (!precvbuf->reuse || !precvbuf->pskb) {
- precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev,
- MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
- if (!precvbuf->pskb)
- return _FAIL;
- tmpaddr = (addr_t)precvbuf->pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
- skb_reserve(precvbuf->pskb,
- (RECVBUFF_ALIGN_SZ - alignment));
- precvbuf->phead = precvbuf->pskb->head;
- precvbuf->pdata = precvbuf->pskb->data;
- precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
- precvbuf->pend = skb_end_pointer(precvbuf->pskb);
- precvbuf->pbuf = precvbuf->pskb->data;
- } else { /* reuse skb */
- precvbuf->phead = precvbuf->pskb->head;
- precvbuf->pdata = precvbuf->pskb->data;
- precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
- precvbuf->pend = skb_end_pointer(precvbuf->pskb);
- precvbuf->pbuf = precvbuf->pskb->data;
- precvbuf->reuse = false;
- }
- purb = precvbuf->purb;
- /* translate DMA FIFO addr to pipehandle */
- pipe = ffaddr2pipehdl(pdvobj, addr);
- usb_fill_bulk_urb(purb, pusbd, pipe,
- precvbuf->pbuf, MAX_RECVBUF_SZ,
- r8712_usb_read_port_complete,
- precvbuf);
- err = usb_submit_urb(purb, GFP_ATOMIC);
- if ((err) && (err != (-EPERM)))
- ret = _FAIL;
- } else {
+ purb = precvbuf->purb;
+ /* translate DMA FIFO addr to pipehandle */
+ pipe = ffaddr2pipehdl(pdvobj, addr);
+ usb_fill_bulk_urb(purb, pusbd, pipe,
+ precvbuf->pbuf, MAX_RECVBUF_SZ,
+ r8712_usb_read_port_complete,
+ precvbuf);
+ err = usb_submit_urb(purb, GFP_ATOMIC);
+ if ((err) && (err != (-EPERM)))
ret = _FAIL;
- }
return ret;
}
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 7a352c45344f..b8af9656e6da 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -376,7 +376,8 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
/*-----------------------------------------------------------------------------
Below is for the security related definition
-------------------------------------------------------------------------------*/
+ *-----------------------------------------------------------------------------
+ */
#define _RESERVED_FRAME_TYPE_ 0
#define _SKB_FRAME_TYPE_ 2
#define _PRE_ALLOCMEM_ 1
@@ -420,7 +421,8 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
/* ---------------------------------------------------------------------------
Below is the fixed elements...
------------------------------------------------------------------------------*/
+ * ---------------------------------------------------------------------------
+ */
#define _AUTH_ALGM_NUM_ 2
#define _AUTH_SEQ_NUM_ 2
#define _BEACON_ITERVAL_ 2
@@ -448,20 +450,23 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
/*-----------------------------------------------------------------------------
Below is the definition for 802.11i / 802.1x
-------------------------------------------------------------------------------*/
+ *------------------------------------------------------------------------------
+ */
#define _IEEE8021X_MGT_ 1 /*WPA */
#define _IEEE8021X_PSK_ 2 /* WPA with pre-shared key */
/*-----------------------------------------------------------------------------
Below is the definition for WMM
-------------------------------------------------------------------------------*/
+ *------------------------------------------------------------------------------
+ */
#define _WMM_IE_Length_ 7 /* for WMM STA */
#define _WMM_Para_Element_Length_ 24
/*-----------------------------------------------------------------------------
Below is the definition for 802.11n
-------------------------------------------------------------------------------*/
+ *------------------------------------------------------------------------------
+ */
/* block-ack parameters */
#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
diff --git a/drivers/staging/rtl8712/wlan_bssdef.h b/drivers/staging/rtl8712/wlan_bssdef.h
index fda5707c4acd..86a88b493a43 100644
--- a/drivers/staging/rtl8712/wlan_bssdef.h
+++ b/drivers/staging/rtl8712/wlan_bssdef.h
@@ -171,7 +171,8 @@ struct NDIS_802_11_REMOVE_KEY {
struct NDIS_802_11_WEP {
u32 Length; /* Length of this structure */
u32 KeyIndex; /* 0 is the per-client key,
- * 1-N are the global keys */
+ * 1-N are the global keys
+ */
u32 KeyLength; /* length of key in bytes */
u8 KeyMaterial[16]; /* variable length depending on above field */
};
@@ -194,7 +195,8 @@ struct wlan_network {
struct list_head list;
int network_type; /*refer to ieee80211.h for WIRELESS_11A/B/G */
int fixed; /* set to fixed when not to be removed asi
- * site-surveying */
+ * site-surveying
+ */
unsigned int last_scanned; /*timestamp for the network */
int aid; /*will only be valid when a BSS is joined. */
int join_res;
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 695f9b9fc749..4ee4136b5c28 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -31,6 +31,7 @@
#include <linux/usb.h>
#include <linux/ip.h>
#include <linux/if_ether.h>
+#include <linux/kmemleak.h>
#include "osdep_service.h"
#include "drv_types.h"
@@ -91,7 +92,8 @@ void r8712_set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
} else {
/* "When priority processing of data frames is supported,
* a STA's SME should send EAPOL-Key frames at the highest
- * priority." */
+ * priority."
+ */
if (pattrib->ether_type == 0x888e)
UserPriority = 7;
@@ -132,6 +134,7 @@ int r8712_xmit_resource_alloc(struct _adapter *padapter,
netdev_err(padapter->pnetdev, "pxmitbuf->pxmit_urb[i] == NULL\n");
return _FAIL;
}
+ kmemleak_not_leak(pxmitbuf->pxmit_urb[i]);
}
return _SUCCESS;
}
@@ -162,16 +165,16 @@ int r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev)
struct _adapter *padapter = netdev_priv(pnetdev);
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
- if (!r8712_if_up(padapter)) {
+ if (!r8712_if_up(padapter))
goto _xmit_entry_drop;
- }
+
pxmitframe = r8712_alloc_xmitframe(pxmitpriv);
- if (!pxmitframe) {
+ if (!pxmitframe)
goto _xmit_entry_drop;
- }
- if ((!r8712_update_attrib(padapter, pkt, &pxmitframe->attrib))) {
+
+ if ((!r8712_update_attrib(padapter, pkt, &pxmitframe->attrib)))
goto _xmit_entry_drop;
- }
+
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_TX);
pxmitframe->pkt = pkt;
if (r8712_pre_xmit(padapter, pxmitframe)) {
diff --git a/drivers/staging/rtl8723au/Kconfig b/drivers/staging/rtl8723au/Kconfig
deleted file mode 100644
index 277c1ab69317..000000000000
--- a/drivers/staging/rtl8723au/Kconfig
+++ /dev/null
@@ -1,33 +0,0 @@
-config R8723AU
- tristate "Realtek RTL8723AU Wireless LAN NIC driver (deprecated)"
- depends on USB && WLAN && RFKILL
- select WIRELESS_EXT
- select WEXT_PRIV
- select CFG80211
- default n
- ---help---
- This option adds the Realtek RTL8723AU USB device such as found in
- the Lenovo Yoga 13 tablet. If built as a module, it will be called r8723au.
-
- Note: This driver is deprecated and scheduled to be removed in a
- future kernel release. Please use rtl8xxxu instead.
-
-if R8723AU
-
-config 8723AU_AP_MODE
- bool "Realtek RTL8723AU AP mode"
- default y
- ---help---
- This option enables Access Point mode. Unless you know that your system
- will never be used as an AP, or the target system has limited memory,
- "Y" should be selected.
-
-config 8723AU_BT_COEXIST
- bool "Realtek RTL8723AU BlueTooth Coexistence"
- default y
- ---help---
- This option enables icoexistence with BlueTooth communications for the r8723au driver.
- Unless you know that this driver will never by used with BT, or the target system has
- limited memory, "Y" should be selected.
-
-endif
diff --git a/drivers/staging/rtl8723au/Makefile b/drivers/staging/rtl8723au/Makefile
deleted file mode 100644
index 3e8989018a88..000000000000
--- a/drivers/staging/rtl8723au/Makefile
+++ /dev/null
@@ -1,53 +0,0 @@
-r8723au-y := \
- core/rtw_cmd.o \
- core/rtw_efuse.o \
- core/rtw_ieee80211.o \
- core/rtw_mlme.o \
- core/rtw_mlme_ext.o \
- core/rtw_pwrctrl.o \
- core/rtw_recv.o \
- core/rtw_security.o \
- core/rtw_sreset.o \
- core/rtw_sta_mgt.o \
- core/rtw_xmit.o \
- core/rtw_wlan_util.o \
- hal/hal_com.o \
- hal/hal_intf.o \
- hal/Hal8723PwrSeq.o \
- hal/Hal8723UHWImg_CE.o \
- hal/HalDMOutSrc8723A_CE.o \
- hal/HalHWImg8723A_BB.o \
- hal/HalHWImg8723A_MAC.o \
- hal/HalHWImg8723A_RF.o \
- hal/HalPwrSeqCmd.o \
- hal/odm_RegConfig8723A.o \
- hal/odm_debug.o \
- hal/odm_interface.o \
- hal/odm_HWConfig.o \
- hal/odm.o \
- hal/rtl8723a_cmd.o \
- hal/rtl8723a_dm.o \
- hal/rtl8723a_hal_init.o \
- hal/rtl8723a_phycfg.o \
- hal/rtl8723a_rf6052.o \
- hal/rtl8723a_rxdesc.o \
- hal/rtl8723a_sreset.o \
- hal/rtl8723au_recv.o \
- hal/rtl8723au_xmit.o \
- hal/usb_halinit.o \
- hal/usb_ops_linux.o \
- os_dep/ioctl_cfg80211.o \
- os_dep/mlme_linux.o \
- os_dep/os_intfs.o \
- os_dep/recv_linux.o \
- os_dep/usb_intf.o \
- os_dep/usb_ops_linux.o \
- os_dep/xmit_linux.o
-
-r8723au-$(CONFIG_8723AU_BT_COEXIST) += hal/rtl8723a_bt-coexist.o
-r8723au-$(CONFIG_8723AU_AP_MODE) += core/rtw_ap.o
-
-obj-$(CONFIG_R8723AU) := r8723au.o
-
-ccflags-y += $(call cc-option,-Wtype-limits,)
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/include
diff --git a/drivers/staging/rtl8723au/TODO b/drivers/staging/rtl8723au/TODO
deleted file mode 100644
index 42b86e478df8..000000000000
--- a/drivers/staging/rtl8723au/TODO
+++ /dev/null
@@ -1,16 +0,0 @@
-TODO:
-- find and remove code valid only for 5 HGz. Many of the obvious
- ones have been removed, but things like channel > 14 still exist.
-- find and remove any code for other chips that is left over
-- convert any remaining unusual variable types
-- find codes that can use %pM and %Nph formatting
-- checkpatch.pl fixes - most of the remaining ones are lines too long. Many
- of them will require refactoring
-- merge Realtek's bugfixes and new features into the driver
-- switch to use MAC80211
-
-A mac80211 driver for this hardware already was integrated at
-drivers/net/wireless/realtek/rtl8xxxu/
-
-Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
-Jes Sorensen <Jes.Sorensen@redhat.com>, and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c
deleted file mode 100644
index aad686da3cf0..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_ap.c
+++ /dev/null
@@ -1,1738 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTW_AP_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <linux/ieee80211.h>
-#include <wifi.h>
-#include <rtl8723a_cmd.h>
-#include <rtl8723a_hal.h>
-#include <asm/unaligned.h>
-#include <rtw_mlme_ext.h>
-
-void init_mlme_ap_info23a(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
-
- spin_lock_init(&pmlmepriv->bcn_update_lock);
-
- /* for ACL */
- _rtw_init_queue23a(&pacl_list->acl_node_q);
-
- start_ap_mode23a(padapter);
-}
-
-void free_mlme_ap_info23a(struct rtw_adapter *padapter)
-{
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- pmlmepriv->update_bcn = false;
- pmlmeext->bstart_bss = false;
-
- rtw_sta_flush23a(padapter);
-
- pmlmeinfo->state = MSR_NOLINK;
-
- /* free_assoc_sta_resources */
- rtw_free_all_stainfo23a(padapter);
-
- /* free bc/mc sta_info */
- psta = rtw_get_bcmc_stainfo23a(padapter);
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo23a(padapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-}
-
-static void update_BCNTIM(struct rtw_adapter *padapter)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
- unsigned char *pie = pnetwork_mlmeext->IEs;
- u8 *p, *dst_ie, *premainder_ie = NULL, *pbackup_remainder_ie = NULL;
- uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
-
- p = rtw_get_ie23a(pie, WLAN_EID_TIM, &tim_ielen,
- pnetwork_mlmeext->IELength);
- if (p != NULL && tim_ielen > 0) {
- tim_ielen += 2;
-
- premainder_ie = p+tim_ielen;
-
- tim_ie_offset = (int)(p - pie);
-
- remainder_ielen = pnetwork_mlmeext->IELength - tim_ie_offset - tim_ielen;
-
- /* append TIM IE from dst_ie offset */
- dst_ie = p;
- } else {
- tim_ielen = 0;
-
- /* calculate head_len */
- offset = 0;
-
- /* get ssid_ie len */
- p = rtw_get_ie23a(pie, WLAN_EID_SSID,
- &tmp_len, pnetwork_mlmeext->IELength);
- if (p != NULL)
- offset += tmp_len+2;
-
- /* get supported rates len */
- p = rtw_get_ie23a(pie, WLAN_EID_SUPP_RATES,
- &tmp_len, pnetwork_mlmeext->IELength);
- if (p != NULL)
- offset += tmp_len+2;
-
- /* DS Parameter Set IE, len = 3 */
- offset += 3;
-
- premainder_ie = pie + offset;
-
- remainder_ielen = pnetwork_mlmeext->IELength - offset - tim_ielen;
-
- /* append TIM IE from offset */
- dst_ie = pie + offset;
- }
-
- if (remainder_ielen > 0) {
- pbackup_remainder_ie = kmalloc(remainder_ielen, GFP_ATOMIC);
- if (pbackup_remainder_ie && premainder_ie)
- memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
- }
-
- *dst_ie++ = WLAN_EID_TIM;
-
- if ((pstapriv->tim_bitmap&0xff00) && (pstapriv->tim_bitmap&0x00fc))
- tim_ielen = 5;
- else
- tim_ielen = 4;
-
- *dst_ie++ = tim_ielen;
-
- *dst_ie++ = 0; /* DTIM count */
- *dst_ie++ = 1; /* DTIM period */
-
- if (pstapriv->tim_bitmap & BIT(0)) /* for bc/mc frames */
- *dst_ie++ = BIT(0); /* bitmap ctrl */
- else
- *dst_ie++ = 0;
-
- if (tim_ielen == 4) {
- *dst_ie++ = pstapriv->tim_bitmap & 0xff;
- } else if (tim_ielen == 5) {
- put_unaligned_le16(pstapriv->tim_bitmap, dst_ie);
- dst_ie += 2;
- }
-
- /* copy remainder IE */
- if (pbackup_remainder_ie) {
- memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
-
- kfree(pbackup_remainder_ie);
- }
-
- offset = (uint)(dst_ie - pie);
- pnetwork_mlmeext->IELength = offset + remainder_ielen;
-
- set_tx_beacon_cmd23a(padapter);
-}
-
-static u8 chk_sta_is_alive(struct sta_info *psta)
-{
- u8 ret = false;
-
- if ((psta->sta_stats.last_rx_data_pkts +
- psta->sta_stats.last_rx_ctrl_pkts) !=
- (psta->sta_stats.rx_data_pkts + psta->sta_stats.rx_ctrl_pkts))
- ret = true;
-
- sta_update_last_rx_pkts(psta);
-
- return ret;
-}
-
-void expire_timeout_chk23a(struct rtw_adapter *padapter)
-{
- struct list_head *phead;
- u8 updated = 0;
- struct sta_info *psta, *ptmp;
- struct sta_priv *pstapriv = &padapter->stapriv;
- u8 chk_alive_num = 0;
- struct sta_info *chk_alive_list[NUM_STA];
- int i;
-
- spin_lock_bh(&pstapriv->auth_list_lock);
- phead = &pstapriv->auth_list;
- /* check auth_queue */
- list_for_each_entry_safe(psta, ptmp, phead, auth_list) {
- if (psta->expire_to > 0) {
- psta->expire_to--;
- if (psta->expire_to == 0) {
- list_del_init(&psta->auth_list);
- pstapriv->auth_list_cnt--;
-
- DBG_8723A("auth expire %pM\n", psta->hwaddr);
-
- spin_unlock_bh(&pstapriv->auth_list_lock);
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo23a(padapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- spin_lock_bh(&pstapriv->auth_list_lock);
- }
- }
- }
- spin_unlock_bh(&pstapriv->auth_list_lock);
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- /* check asoc_queue */
- list_for_each_entry_safe(psta, ptmp, phead, asoc_list) {
- if (chk_sta_is_alive(psta) || !psta->expire_to) {
- psta->expire_to = pstapriv->expire_to;
- psta->keep_alive_trycnt = 0;
- } else {
- psta->expire_to--;
- }
-
- if (psta->expire_to <= 0) {
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (padapter->registrypriv.wifi_spec == 1) {
- psta->expire_to = pstapriv->expire_to;
- continue;
- }
-
- if (psta->state & WIFI_SLEEP_STATE) {
- if (!(psta->state & WIFI_STA_ALIVE_CHK_STATE)) {
- /*
- * check if alive by another method
- * if station is at ps mode.
- */
- psta->expire_to = pstapriv->expire_to;
- psta->state |= WIFI_STA_ALIVE_CHK_STATE;
- /*
- * update bcn with tim_bitmap
- * for this station
- */
- pstapriv->tim_bitmap |= CHKBIT(psta->aid);
- update_beacon23a(padapter, WLAN_EID_TIM, NULL, false);
-
- if (!pmlmeext->active_keep_alive_check)
- continue;
- }
- }
-
- if (pmlmeext->active_keep_alive_check) {
- chk_alive_list[chk_alive_num++] = psta;
- continue;
- }
-
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
-
- DBG_8723A("asoc expire %pM, state = 0x%x\n",
- psta->hwaddr, psta->state);
- updated = ap_free_sta23a(padapter, psta, false, WLAN_REASON_DEAUTH_LEAVING);
- } else {
- /*
- * TODO: Aging mechanism to digest frames in
- * sleep_q to avoid running out of xmitframe
- */
- if (psta->sleepq_len > (NR_XMITFRAME/pstapriv->asoc_list_cnt)
- && padapter->xmitpriv.free_xmitframe_cnt < ((NR_XMITFRAME/pstapriv->asoc_list_cnt)/2)
- ) {
- DBG_8723A("%s sta:%pM, sleepq_len:%u, free_xmitframe_cnt:%u, asoc_list_cnt:%u, clear sleep_q\n",
- __func__,
- psta->hwaddr,
- psta->sleepq_len,
- padapter->xmitpriv.free_xmitframe_cnt,
- pstapriv->asoc_list_cnt);
- wakeup_sta_to_xmit23a(padapter, psta);
- }
- }
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- if (chk_alive_num) {
-
- u8 backup_oper_channel = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- /*
- * switch to correct channel of current
- * network before issue keep-alive frames
- */
- if (rtw_get_oper_ch23a(padapter) != pmlmeext->cur_channel) {
- backup_oper_channel = rtw_get_oper_ch23a(padapter);
- SelectChannel23a(padapter, pmlmeext->cur_channel);
- }
-
- /* issue null data to check sta alive */
- for (i = 0; i < chk_alive_num; i++) {
-
- int ret = _FAIL;
-
- psta = chk_alive_list[i];
- if (!(psta->state & _FW_LINKED))
- continue;
-
- if (psta->state & WIFI_SLEEP_STATE)
- ret = issue_nulldata23a(padapter, psta->hwaddr, 0, 1, 50);
- else
- ret = issue_nulldata23a(padapter, psta->hwaddr, 0, 3, 50);
-
- psta->keep_alive_trycnt++;
- if (ret == _SUCCESS) {
- DBG_8723A("asoc check, sta(%pM) is alive\n",
- psta->hwaddr);
- psta->expire_to = pstapriv->expire_to;
- psta->keep_alive_trycnt = 0;
- continue;
- } else if (psta->keep_alive_trycnt <= 3) {
- DBG_8723A("ack check for asoc expire, keep_alive_trycnt =%d\n", psta->keep_alive_trycnt);
- psta->expire_to = 1;
- continue;
- }
-
- psta->keep_alive_trycnt = 0;
-
- DBG_8723A("asoc expire %pM, state = 0x%x\n",
- psta->hwaddr, psta->state);
- spin_lock_bh(&pstapriv->asoc_list_lock);
- if (!list_empty(&psta->asoc_list)) {
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
- updated = ap_free_sta23a(padapter, psta, false, WLAN_REASON_DEAUTH_LEAVING);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- }
-
- if (backup_oper_channel > 0) /* back to original operation channel */
- SelectChannel23a(padapter, backup_oper_channel);
-}
-
- associated_clients_update23a(padapter, updated);
-}
-
-void add_RATid23a(struct rtw_adapter *padapter, struct sta_info *psta, u8 rssi_level)
-{
- int i;
- u8 rf_type;
- u32 init_rate = 0;
- unsigned char sta_band = 0, raid, shortGIrate = false;
- unsigned char limit;
- unsigned int tx_ra_bitmap = 0;
- struct ht_priv *psta_ht = NULL;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pcur_network = &pmlmepriv->cur_network.network;
-
- if (psta)
- psta_ht = &psta->htpriv;
- else
- return;
-
- if (!(psta->state & _FW_LINKED))
- return;
-
- /* b/g mode ra_bitmap */
- for (i = 0; i < sizeof(psta->bssrateset); i++) {
- if (psta->bssrateset[i])
- tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value23a(psta->bssrateset[i]&0x7f);
- }
- /* n mode ra_bitmap */
- if (psta_ht->ht_option) {
- rf_type = rtl8723a_get_rf_type(padapter);
-
- if (rf_type == RF_2T2R)
- limit = 16; /* 2R */
- else
- limit = 8; /* 1R */
-
- for (i = 0; i < limit; i++) {
- if (psta_ht->ht_cap.mcs.rx_mask[i / 8] & BIT(i % 8))
- tx_ra_bitmap |= BIT(i + 12);
- }
-
- /* max short GI rate */
- shortGIrate = psta_ht->sgi;
- }
-
- if (pcur_network->DSConfig > 14) {
- /* 5G band */
- if (tx_ra_bitmap & 0xffff000)
- sta_band |= WIRELESS_11_5N | WIRELESS_11A;
- else
- sta_band |= WIRELESS_11A;
- } else {
- if (tx_ra_bitmap & 0xffff000)
- sta_band |= WIRELESS_11_24N | WIRELESS_11G | WIRELESS_11B;
- else if (tx_ra_bitmap & 0xff0)
- sta_band |= WIRELESS_11G | WIRELESS_11B;
- else
- sta_band |= WIRELESS_11B;
- }
-
- psta->wireless_mode = sta_band;
-
- raid = networktype_to_raid23a(sta_band);
- init_rate = get_highest_rate_idx23a(tx_ra_bitmap&0x0fffffff)&0x3f;
-
- if (psta->aid < NUM_STA) {
- u8 arg;
-
- arg = psta->mac_id&0x1f;
-
- arg |= BIT(7); /* support entry 2~31 */
-
- if (shortGIrate == true)
- arg |= BIT(5);
-
- tx_ra_bitmap |= ((raid<<28)&0xf0000000);
-
- DBG_8723A("%s => mac_id:%d , raid:%d , bitmap = 0x%x, arg = "
- "0x%x\n",
- __func__, psta->mac_id, raid, tx_ra_bitmap, arg);
-
- /* bitmap[0:27] = tx_rate_bitmap */
- /* bitmap[28:31]= Rate Adaptive id */
- /* arg[0:4] = macid */
- /* arg[5] = Short GI */
- rtl8723a_add_rateatid(padapter, tx_ra_bitmap, arg, rssi_level);
-
- if (shortGIrate == true)
- init_rate |= BIT(6);
-
- /* set ra_id, init_rate */
- psta->raid = raid;
- psta->init_rate = init_rate;
-
- } else
- DBG_8723A("station aid %d exceed the max number\n", psta->aid);
-}
-
-static void update_bmc_sta(struct rtw_adapter *padapter)
-{
- u32 init_rate = 0;
- unsigned char network_type, raid;
- int i, supportRateNum = 0;
- unsigned int tx_ra_bitmap = 0;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pcur_network = &pmlmepriv->cur_network.network;
- struct sta_info *psta = rtw_get_bcmc_stainfo23a(padapter);
-
- if (psta) {
- psta->aid = 0; /* default set to 0 */
- psta->mac_id = psta->aid + 1;
-
- psta->qos_option = 0;
- psta->htpriv.ht_option = false;
-
- psta->ieee8021x_blocked = 0;
-
- memset((void *)&psta->sta_stats, 0,
- sizeof(struct stainfo_stats));
-
- /* prepare for add_RATid23a */
- supportRateNum = rtw_get_rateset_len23a((u8 *)&pcur_network->SupportedRates);
- network_type = rtw_check_network_type23a((u8 *)&pcur_network->SupportedRates, supportRateNum, 1);
-
- memcpy(psta->bssrateset, &pcur_network->SupportedRates, supportRateNum);
- psta->bssratelen = supportRateNum;
-
- /* b/g mode ra_bitmap */
- for (i = 0; i < supportRateNum; i++) {
- if (psta->bssrateset[i])
- tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value23a(psta->bssrateset[i]&0x7f);
- }
-
- if (pcur_network->DSConfig > 14) {
- /* force to A mode. 5G doesn't support CCK rates */
- network_type = WIRELESS_11A;
- tx_ra_bitmap = 0x150; /* 6, 12, 24 Mbps */
- } else {
- /* force to b mode */
- network_type = WIRELESS_11B;
- tx_ra_bitmap = 0xf;
- }
-
- raid = networktype_to_raid23a(network_type);
- init_rate = get_highest_rate_idx23a(tx_ra_bitmap&0x0fffffff)&0x3f;
-
- /* ap mode */
- rtl8723a_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
-
- {
- u8 arg;
-
- arg = psta->mac_id&0x1f;
-
- arg |= BIT(7);
-
- tx_ra_bitmap |= ((raid<<28)&0xf0000000);
-
- DBG_8723A("update_bmc_sta, mask = 0x%x, arg = 0x%x\n", tx_ra_bitmap, arg);
-
- /* bitmap[0:27] = tx_rate_bitmap */
- /* bitmap[28:31]= Rate Adaptive id */
- /* arg[0:4] = macid */
- /* arg[5] = Short GI */
- rtl8723a_add_rateatid(padapter, tx_ra_bitmap, arg, 0);
- }
-
- /* set ra_id, init_rate */
- psta->raid = raid;
- psta->init_rate = init_rate;
-
- spin_lock_bh(&psta->lock);
- psta->state = _FW_LINKED;
- spin_unlock_bh(&psta->lock);
-
- } else
- DBG_8723A("add_RATid23a_bmc_sta error!\n");
-}
-
-/*
- * AID: 1~MAX for sta and 0 for bc/mc in ap/adhoc mode
- * MAC_ID = AID+1 for sta in ap/adhoc mode
- * MAC_ID = 1 for bc/mc for sta/ap/adhoc
- * MAC_ID = 0 for bssid for sta/ap/adhoc
- * CAM_ID = 0~3 for default key, cmd_id = macid + 3, macid = aid + 1;
- */
-void update_sta_info23a_apmode23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
- struct ht_priv *phtpriv_sta = &psta->htpriv;
- /* set intf_tag to if1 */
-
- psta->mac_id = psta->aid+1;
- DBG_8723A("%s\n", __func__);
-
- /* ap mode */
- rtl8723a_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
-
- if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
- psta->ieee8021x_blocked = true;
- else
- psta->ieee8021x_blocked = false;
-
- /* update sta's cap */
-
- /* ERP */
- VCS_update23a(padapter, psta);
- /* HT related cap */
- if (phtpriv_sta->ht_option) {
- /* check if sta supports rx ampdu */
- phtpriv_sta->ampdu_enable = phtpriv_ap->ampdu_enable;
-
- /* check if sta support s Short GI */
- if ((phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info) & cpu_to_le16(IEEE80211_HT_CAP_SGI_20|IEEE80211_HT_CAP_SGI_40))
- phtpriv_sta->sgi = true;
-
- /* bwmode */
- if ((phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info) & cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40)) {
- /* phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_40; */
- phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
- phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
-
- }
-
- psta->qos_option = true;
-
- } else {
- phtpriv_sta->ampdu_enable = false;
-
- phtpriv_sta->sgi = false;
- phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_20;
- phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- }
-
- /* Rx AMPDU */
- send_delba23a(padapter, 0, psta->hwaddr); /* recipient */
-
- /* TX AMPDU */
- send_delba23a(padapter, 1, psta->hwaddr); /* originator */
- phtpriv_sta->agg_enable_bitmap = 0x0;/* reset */
- phtpriv_sta->candidate_tid_bitmap = 0x0;/* reset */
-
- /* todo: init other variables */
-
- memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
-
- spin_lock_bh(&psta->lock);
- psta->state |= _FW_LINKED;
- spin_unlock_bh(&psta->lock);
-}
-
-static void update_hw_ht_param(struct rtw_adapter *padapter)
-{
- unsigned char max_AMPDU_len;
- unsigned char min_MPDU_spacing;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- DBG_8723A("%s\n", __func__);
- /*
- * handle A-MPDU parameter field
- * AMPDU_para [1:0]:Max AMPDU Len => 0:8k, 1:16k, 2:32k, 3:64k
- * AMPDU_para [4:2]:Min MPDU Start Spacing
- */
- max_AMPDU_len = pmlmeinfo->ht_cap.ampdu_params_info &
- IEEE80211_HT_AMPDU_PARM_FACTOR;
-
- min_MPDU_spacing = (pmlmeinfo->ht_cap.ampdu_params_info &
- IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2;
-
- rtl8723a_set_ampdu_min_space(padapter, min_MPDU_spacing);
- rtl8723a_set_ampdu_factor(padapter, max_AMPDU_len);
-
- /* Config SM Power Save setting */
- pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->ht_cap.cap_info) &
- IEEE80211_HT_CAP_SM_PS) >> 2;
- if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
- DBG_8723A("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
-}
-
-static void start_bss_network(struct rtw_adapter *padapter, u8 *pbuf)
-{
- const u8 *p;
- u8 val8, cur_channel, cur_bwmode, cur_ch_offset;
- u16 bcn_interval;
- u32 acparm;
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct wlan_bssid_ex *pnetwork = &pmlmepriv->cur_network.network;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
- struct ieee80211_ht_operation *pht_info = NULL;
-
- bcn_interval = (u16)pnetwork->beacon_interval;
- cur_channel = pnetwork->DSConfig;
- cur_bwmode = HT_CHANNEL_WIDTH_20;
- cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- /*
- * check if there is wps ie
- * if there is wpsie in beacon the hostapd will
- * update beacon twice when stating hostapd
- * and at first time the security
- * ie (RSN/WPA IE) will not include in beacon
- */
- if (!cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPS,
- pnetwork->IEs,
- pnetwork->IELength))
- pmlmeext->bstart_bss = true;
-
- /* todo: update wmm, ht cap */
- /* pmlmeinfo->WMM_enable; */
- /* pmlmeinfo->HT_enable; */
- if (pmlmepriv->qos_option)
- pmlmeinfo->WMM_enable = true;
- if (pmlmepriv->htpriv.ht_option) {
- pmlmeinfo->WMM_enable = true;
- pmlmeinfo->HT_enable = true;
-
- update_hw_ht_param(padapter);
- }
-
- if (pmlmepriv->cur_network.join_res != true) {
- /*
- * setting only at first time
- * WEP Key will be set before this
- * function, do not clear CAM.
- */
- if (psecuritypriv->dot11PrivacyAlgrthm !=
- WLAN_CIPHER_SUITE_WEP40 &&
- psecuritypriv->dot11PrivacyAlgrthm !=
- WLAN_CIPHER_SUITE_WEP104)
- flush_all_cam_entry23a(padapter); /* clear CAM */
- }
-
- /* set MSR to AP_Mode */
- rtl8723a_set_media_status(padapter, MSR_AP);
-
- /* Set BSSID REG */
- hw_var_set_bssid(padapter, pnetwork->MacAddress);
-
- /* Set EDCA param reg */
- acparm = 0x002F3217; /* VO */
- rtl8723a_set_ac_param_vo(padapter, acparm);
- acparm = 0x005E4317; /* VI */
- rtl8723a_set_ac_param_vi(padapter, acparm);
- acparm = 0x005ea42b;
- rtl8723a_set_ac_param_be(padapter, acparm);
- acparm = 0x0000A444; /* BK */
- rtl8723a_set_ac_param_bk(padapter, acparm);
-
- /* Set Security */
- val8 = (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) ?
- 0xcc : 0xcf;
- rtl8723a_set_sec_cfg(padapter, val8);
-
- /* Beacon Control related register */
- rtl8723a_set_beacon_interval(padapter, bcn_interval);
-
- UpdateBrateTbl23a(padapter, pnetwork->SupportedRates);
- HalSetBrateCfg23a(padapter, pnetwork->SupportedRates);
-
- if (!pmlmepriv->cur_network.join_res) {
- /* setting only at first time */
-
- /* disable dynamic functions, such as high power, DIG */
-
- /* turn on all dynamic functions */
- rtl8723a_odm_support_ability_set(padapter,
- DYNAMIC_ALL_FUNC_ENABLE);
- }
- /* set channel, bwmode */
-
- p = cfg80211_find_ie(WLAN_EID_HT_OPERATION, pnetwork->IEs,
- pnetwork->IELength);
- if (p && p[1]) {
- pht_info = (struct ieee80211_ht_operation *)(p + 2);
-
- if (pregpriv->cbw40_enable && pht_info->ht_param &
- IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) {
- /* switch to the 40M Hz mode */
- cur_bwmode = HT_CHANNEL_WIDTH_40;
- switch (pht_info->ht_param &
- IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- /*
- * pmlmeext->cur_ch_offset =
- * HAL_PRIME_CHNL_OFFSET_LOWER;
- */
- cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
- break;
- default:
- cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- break;
- }
- }
- }
- /*
- * TODO: need to judge the phy parameters
- * on concurrent mode for single phy
- */
- set_channel_bwmode23a(padapter, cur_channel, cur_ch_offset, cur_bwmode);
-
- DBG_8723A("CH =%d, BW =%d, offset =%d\n", cur_channel, cur_bwmode,
- cur_ch_offset);
-
- pmlmeext->cur_channel = cur_channel;
- pmlmeext->cur_bwmode = cur_bwmode;
- pmlmeext->cur_ch_offset = cur_ch_offset;
- pmlmeext->cur_wireless_mode = pmlmepriv->cur_network.network_type;
-
- /* update cur_wireless_mode */
- update_wireless_mode23a(padapter);
-
- /* update capability after cur_wireless_mode updated */
- update_capinfo23a(padapter, pnetwork->capability);
-
- /* let pnetwork_mlmeext == pnetwork_mlme. */
- memcpy(pnetwork_mlmeext, pnetwork, pnetwork->Length);
-
- if (pmlmeext->bstart_bss) {
- update_beacon23a(padapter, WLAN_EID_TIM, NULL, false);
-
- /* issue beacon frame */
- if (send_beacon23a(padapter) == _FAIL)
- DBG_8723A("issue_beacon23a, fail!\n");
- }
-
- /* update bc/mc sta_info */
- update_bmc_sta(padapter);
-}
-
-int rtw_check_beacon_data23a(struct rtw_adapter *padapter,
- struct ieee80211_mgmt *mgmt, unsigned int len)
-{
- int ret = _SUCCESS;
- u8 *p;
- u8 *pHT_caps_ie = NULL;
- u8 *pHT_info_ie = NULL;
- struct sta_info *psta = NULL;
- u16 ht_cap = false;
- uint ie_len = 0;
- int group_cipher, pairwise_cipher;
- u8 channel, network_type, supportRate[NDIS_802_11_LENGTH_RATES_EX];
- int supportRateNum = 0;
- u8 WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pbss_network = &pmlmepriv->cur_network.network;
- u8 *ie = pbss_network->IEs;
- u8 *pbuf = mgmt->u.beacon.variable;
-
- len -= offsetof(struct ieee80211_mgmt, u.beacon.variable);
- /* SSID */
- /* Supported rates */
- /* DS Params */
- /* WLAN_EID_COUNTRY */
- /* ERP Information element */
- /* Extended supported rates */
- /* WPA/WPA2 */
- /* Wi-Fi Wireless Multimedia Extensions */
- /* ht_capab, ht_oper */
- /* WPS IE */
-
- DBG_8723A("%s, len =%d\n", __func__, len);
-
- if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
- return _FAIL;
-
- if (len > MAX_IE_SZ)
- return _FAIL;
-
- pbss_network->IELength = len;
-
- memset(ie, 0, MAX_IE_SZ);
-
- memcpy(ie, pbuf, pbss_network->IELength);
-
- if (pbss_network->ifmode != NL80211_IFTYPE_AP &&
- pbss_network->ifmode != NL80211_IFTYPE_P2P_GO)
- return _FAIL;
-
- pbss_network->Rssi = 0;
-
- memcpy(pbss_network->MacAddress, myid(&padapter->eeprompriv), ETH_ALEN);
-
- /* SSID */
- p = rtw_get_ie23a(ie, WLAN_EID_SSID, &ie_len, pbss_network->IELength);
- if (p && ie_len > 0) {
- memset(&pbss_network->Ssid, 0, sizeof(struct cfg80211_ssid));
- memcpy(pbss_network->Ssid.ssid, (p + 2), ie_len);
- pbss_network->Ssid.ssid_len = ie_len;
- }
-
- /* channel */
- channel = 0;
- p = rtw_get_ie23a(ie, WLAN_EID_DS_PARAMS, &ie_len,
- pbss_network->IELength);
- if (p && ie_len > 0)
- channel = *(p + 2);
-
- pbss_network->DSConfig = channel;
-
- memset(supportRate, 0, NDIS_802_11_LENGTH_RATES_EX);
- /* get supported rates */
- p = rtw_get_ie23a(ie, WLAN_EID_SUPP_RATES, &ie_len,
- pbss_network->IELength);
- if (p) {
- memcpy(supportRate, p+2, ie_len);
- supportRateNum = ie_len;
- }
-
- /* get ext_supported rates */
- p = rtw_get_ie23a(ie, WLAN_EID_EXT_SUPP_RATES,
- &ie_len, pbss_network->IELength);
- if (p) {
- memcpy(supportRate+supportRateNum, p+2, ie_len);
- supportRateNum += ie_len;
- }
-
- network_type = rtw_check_network_type23a(supportRate,
- supportRateNum, channel);
-
- rtw_set_supported_rate23a(pbss_network->SupportedRates, network_type);
-
- /* parsing ERP_IE */
- p = rtw_get_ie23a(ie, WLAN_EID_ERP_INFO, &ie_len,
- pbss_network->IELength);
- if (p && ie_len > 0)
- ERP_IE_handler23a(padapter, p);
-
- /* update privacy/security */
- if (pbss_network->capability & BIT(4))
- pbss_network->Privacy = 1;
- else
- pbss_network->Privacy = 0;
-
- psecuritypriv->wpa_psk = 0;
-
- /* wpa2 */
- group_cipher = 0; pairwise_cipher = 0;
- psecuritypriv->wpa2_group_cipher = 0;
- psecuritypriv->wpa2_pairwise_cipher = 0;
- p = rtw_get_ie23a(ie, WLAN_EID_RSN, &ie_len,
- pbss_network->IELength);
- if (p && ie_len > 0) {
- if (rtw_parse_wpa2_ie23a(p, ie_len+2, &group_cipher,
- &pairwise_cipher, NULL) == _SUCCESS) {
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
-
- psecuritypriv->dot8021xalg = 1; /* psk, todo:802.1x */
- psecuritypriv->wpa_psk |= BIT(1);
-
- psecuritypriv->wpa2_group_cipher = group_cipher;
- psecuritypriv->wpa2_pairwise_cipher = pairwise_cipher;
- }
- }
-
- /* wpa */
- ie_len = 0;
- group_cipher = 0;
- pairwise_cipher = 0;
- psecuritypriv->wpa_group_cipher = 0;
- psecuritypriv->wpa_pairwise_cipher = 0;
- for (p = ie; ; p += (ie_len + 2)) {
- p = rtw_get_ie23a(p, WLAN_EID_VENDOR_SPECIFIC, &ie_len,
- pbss_network->IELength - (ie_len + 2));
- if ((p) && (!memcmp(p+2, RTW_WPA_OUI23A_TYPE, 4))) {
- if (rtw_parse_wpa_ie23a(p, ie_len+2, &group_cipher,
- &pairwise_cipher, NULL) == _SUCCESS) {
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
-
- /* psk, todo:802.1x */
- psecuritypriv->dot8021xalg = 1;
-
- psecuritypriv->wpa_psk |= BIT(0);
-
- psecuritypriv->wpa_group_cipher = group_cipher;
- psecuritypriv->wpa_pairwise_cipher = pairwise_cipher;
- }
- break;
- }
-
- if (!p || !ie_len)
- break;
- }
-
- /* wmm */
- ie_len = 0;
- pmlmepriv->qos_option = 0;
- if (pregistrypriv->wmm_enable) {
- for (p = ie; ; p += (ie_len + 2)) {
- p = rtw_get_ie23a(p, WLAN_EID_VENDOR_SPECIFIC, &ie_len,
- (pbss_network->IELength -
- (ie_len + 2)));
- if ((p) && !memcmp(p+2, WMM_PARA_IE, 6)) {
- pmlmepriv->qos_option = 1;
-
- *(p + 8) |= BIT(7);/* QoS Info:support U-APSD */
-
- /* disable all ACM bits since the WMM admission
- * control is not supported
- */
- *(p + 10) &= ~BIT(4); /* BE */
- *(p + 14) &= ~BIT(4); /* BK */
- *(p + 18) &= ~BIT(4); /* VI */
- *(p + 22) &= ~BIT(4); /* VO */
- break;
- }
- if ((p == NULL) || (ie_len == 0))
- break;
- }
- }
- /* parsing HT_CAP_IE */
- p = rtw_get_ie23a(ie, WLAN_EID_HT_CAPABILITY, &ie_len,
- pbss_network->IELength);
- if (p && ie_len > 0) {
- u8 rf_type;
-
- struct ieee80211_ht_cap *pht_cap = (struct ieee80211_ht_cap *)(p+2);
-
- pHT_caps_ie = p;
-
- ht_cap = true;
- network_type |= WIRELESS_11_24N;
-
- rf_type = rtl8723a_get_rf_type(padapter);
-
- if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
- (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP))
- pht_cap->ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_DENSITY & (0x07<<2));
- else
- pht_cap->ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_DENSITY&0x00);
-
- /* set Max Rx AMPDU size to 64K */
- pht_cap->ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_FACTOR & 0x03);
-
- if (rf_type == RF_1T1R) {
- pht_cap->mcs.rx_mask[0] = 0xff;
- pht_cap->mcs.rx_mask[1] = 0x0;
- }
-
- memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len);
- }
-
- /* parsing HT_INFO_IE */
- p = rtw_get_ie23a(ie, WLAN_EID_HT_OPERATION, &ie_len,
- pbss_network->IELength);
- if (p && ie_len > 0)
- pHT_info_ie = p;
-
- pmlmepriv->cur_network.network_type = network_type;
-
- pmlmepriv->htpriv.ht_option = false;
-
- /* ht_cap */
- if (pregistrypriv->ht_enable && ht_cap) {
- pmlmepriv->htpriv.ht_option = true;
- pmlmepriv->qos_option = 1;
-
- if (pregistrypriv->ampdu_enable == 1)
- pmlmepriv->htpriv.ampdu_enable = true;
-
- HT_caps_handler23a(padapter, pHT_caps_ie);
-
- HT_info_handler23a(padapter, pHT_info_ie);
- }
-
- pbss_network->Length = get_wlan_bssid_ex_sz(pbss_network);
-
- /* issue beacon to start bss network */
- start_bss_network(padapter, (u8 *)pbss_network);
-
- /* alloc sta_info for ap itself */
- psta = rtw_get_stainfo23a(&padapter->stapriv, pbss_network->MacAddress);
- if (!psta) {
- psta = rtw_alloc_stainfo23a(&padapter->stapriv,
- pbss_network->MacAddress,
- GFP_KERNEL);
- if (!psta)
- return _FAIL;
- }
- /* fix bug of flush_cam_entry at STOP AP mode */
- psta->state |= WIFI_AP_STATE;
- rtw_indicate_connect23a(padapter);
-
- /* for check if already set beacon */
- pmlmepriv->cur_network.join_res = true;
-
- return ret;
-}
-
-void rtw_set_macaddr_acl23a(struct rtw_adapter *padapter, int mode)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
-
- DBG_8723A("%s, mode =%d\n", __func__, mode);
-
- pacl_list->mode = mode;
-}
-
-static void update_bcn_erpinfo_ie(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- unsigned char *p, *ie = pnetwork->IEs;
- u32 len = 0;
-
- DBG_8723A("%s, ERP_enable =%d\n", __func__, pmlmeinfo->ERP_enable);
-
- if (!pmlmeinfo->ERP_enable)
- return;
-
- /* parsing ERP_IE */
- p = rtw_get_ie23a(ie, WLAN_EID_ERP_INFO, &len, pnetwork->IELength);
- if (p && len > 0) {
- if (pmlmepriv->num_sta_non_erp == 1)
- p[2] |= WLAN_ERP_NON_ERP_PRESENT |
- WLAN_ERP_USE_PROTECTION;
- else
- p[2] &= ~(WLAN_ERP_NON_ERP_PRESENT |
- WLAN_ERP_USE_PROTECTION);
-
- if (pmlmepriv->num_sta_no_short_preamble > 0)
- p[2] |= WLAN_ERP_BARKER_PREAMBLE;
- else
- p[2] &= ~(WLAN_ERP_BARKER_PREAMBLE);
-
- ERP_IE_handler23a(padapter, p);
- }
-}
-
-static void update_bcn_wpa_ie(struct rtw_adapter *padapter)
-{
- DBG_8723A("%s\n", __func__);
-}
-
-static void update_bcn_wmm_ie(struct rtw_adapter *padapter)
-{
- DBG_8723A("%s\n", __func__);
-}
-
-static void update_bcn_wps_ie(struct rtw_adapter *padapter)
-{
- DBG_8723A("%s\n", __func__);
-}
-
-static void update_bcn_p2p_ie(struct rtw_adapter *padapter)
-{
-}
-
-static void update_bcn_vendor_spec_ie(struct rtw_adapter *padapter, u8 *oui)
-{
- DBG_8723A("%s\n", __func__);
-
- if (!memcmp(RTW_WPA_OUI23A_TYPE, oui, 4))
- update_bcn_wpa_ie(padapter);
- else if (!memcmp(WMM_OUI23A, oui, 4))
- update_bcn_wmm_ie(padapter);
- else if (!memcmp(WPS_OUI23A, oui, 4))
- update_bcn_wps_ie(padapter);
- else if (!memcmp(P2P_OUI23A, oui, 4))
- update_bcn_p2p_ie(padapter);
- else
- DBG_8723A("unknown OUI type!\n");
-}
-
-void update_beacon23a(struct rtw_adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
-{
- struct mlme_priv *pmlmepriv;
- struct mlme_ext_priv *pmlmeext;
- /* struct mlme_ext_info *pmlmeinfo; */
-
- /* DBG_8723A("%s\n", __func__); */
-
- if (!padapter)
- return;
-
- pmlmepriv = &padapter->mlmepriv;
- pmlmeext = &padapter->mlmeextpriv;
- /* pmlmeinfo = &pmlmeext->mlmext_info; */
-
- if (false == pmlmeext->bstart_bss)
- return;
-
- spin_lock_bh(&pmlmepriv->bcn_update_lock);
-
- switch (ie_id) {
- case WLAN_EID_TIM:
- update_BCNTIM(padapter);
- break;
-
- case WLAN_EID_ERP_INFO:
- update_bcn_erpinfo_ie(padapter);
- break;
-
- case WLAN_EID_VENDOR_SPECIFIC:
- update_bcn_vendor_spec_ie(padapter, oui);
- break;
-
- default:
- break;
- }
-
- pmlmepriv->update_bcn = true;
-
- spin_unlock_bh(&pmlmepriv->bcn_update_lock);
-
- if (tx)
- set_tx_beacon_cmd23a(padapter);
-}
-
-/*
- * op_mode
- * Set to 0 (HT pure) under the following conditions
- * - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
- * - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
- * Set to 1 (HT non-member protection) if there may be non-HT STAs
- * in both the primary and the secondary channel
- * Set to 2 if only HT STAs are associated in BSS,
- * however and at least one 20 MHz HT STA is associated
- * Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
- * (currently non-GF HT station is considered as non-HT STA also)
-*/
-static int rtw_ht_operation_update(struct rtw_adapter *padapter)
-{
- u16 cur_op_mode, new_op_mode;
- int op_mode_changes = 0;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
-
- if (pmlmepriv->htpriv.ht_option)
- return 0;
-
- /* if (!iface->conf->ieee80211n || iface->conf->ht_op_mode_fixed) */
- /* return 0; */
-
- DBG_8723A("%s current operation mode = 0x%X\n",
- __func__, pmlmepriv->ht_op_mode);
-
- if (!(pmlmepriv->ht_op_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
- && pmlmepriv->num_sta_ht_no_gf) {
- pmlmepriv->ht_op_mode |=
- IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT;
- op_mode_changes++;
- } else if ((pmlmepriv->ht_op_mode &
- IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
- pmlmepriv->num_sta_ht_no_gf == 0) {
- pmlmepriv->ht_op_mode &=
- ~IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT;
- op_mode_changes++;
- }
-
- if (!(pmlmepriv->ht_op_mode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT) &&
- (pmlmepriv->num_sta_no_ht || pmlmepriv->olbc_ht)) {
- pmlmepriv->ht_op_mode |= IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
- op_mode_changes++;
- } else if ((pmlmepriv->ht_op_mode &
- IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT) &&
- (pmlmepriv->num_sta_no_ht == 0 && !pmlmepriv->olbc_ht)) {
- pmlmepriv->ht_op_mode &=
- ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
- op_mode_changes++;
- }
-
- /*
- * Note: currently we switch to the MIXED op mode if HT non-greenfield
- * station is associated. Probably it's a theoretical case, since
- * it looks like all known HT STAs support greenfield.
- */
- if (pmlmepriv->num_sta_no_ht ||
- (pmlmepriv->ht_op_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
- new_op_mode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
- else if ((le16_to_cpu(phtpriv_ap->ht_cap.cap_info) &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
- pmlmepriv->num_sta_ht_20mhz)
- new_op_mode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
- else if (pmlmepriv->olbc_ht)
- new_op_mode = IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER;
- else
- new_op_mode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
- cur_op_mode = pmlmepriv->ht_op_mode & IEEE80211_HT_OP_MODE_PROTECTION;
- if (cur_op_mode != new_op_mode) {
- pmlmepriv->ht_op_mode &= ~IEEE80211_HT_OP_MODE_PROTECTION;
- pmlmepriv->ht_op_mode |= new_op_mode;
- op_mode_changes++;
- }
-
- DBG_8723A("%s new operation mode = 0x%X changes =%d\n",
- __func__, pmlmepriv->ht_op_mode, op_mode_changes);
-
- return op_mode_changes;
-}
-
-void associated_clients_update23a(struct rtw_adapter *padapter, u8 updated)
-{
- /* update associated stations cap. */
- if (updated == true) {
- struct list_head *phead;
- struct sta_info *psta, *ptmp;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- list_for_each_entry_safe(psta, ptmp, phead, asoc_list)
- VCS_update23a(padapter, psta);
- spin_unlock_bh(&pstapriv->asoc_list_lock);
- }
-}
-
-/* called > TSR LEVEL for USB or SDIO Interface */
-void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- u8 beacon_updated = false;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE)) {
- if (!psta->no_short_preamble_set) {
- psta->no_short_preamble_set = 1;
-
- pmlmepriv->num_sta_no_short_preamble++;
-
- if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_preamble == 1)) {
- beacon_updated = true;
- update_beacon23a(padapter, 0xFF, NULL, true);
- }
-
- }
- } else {
- if (psta->no_short_preamble_set) {
- psta->no_short_preamble_set = 0;
-
- pmlmepriv->num_sta_no_short_preamble--;
-
- if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_preamble == 0)) {
- beacon_updated = true;
- update_beacon23a(padapter, 0xFF, NULL, true);
- }
-
- }
- }
-
- if (psta->flags & WLAN_STA_NONERP) {
- if (!psta->nonerp_set) {
- psta->nonerp_set = 1;
-
- pmlmepriv->num_sta_non_erp++;
-
- if (pmlmepriv->num_sta_non_erp == 1) {
- beacon_updated = true;
- update_beacon23a(padapter, WLAN_EID_ERP_INFO, NULL, true);
- }
- }
-
- } else {
- if (psta->nonerp_set) {
- psta->nonerp_set = 0;
-
- pmlmepriv->num_sta_non_erp--;
-
- if (pmlmepriv->num_sta_non_erp == 0) {
- beacon_updated = true;
- update_beacon23a(padapter, WLAN_EID_ERP_INFO, NULL, true);
- }
- }
-
- }
-
- if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)) {
- if (!psta->no_short_slot_time_set) {
- psta->no_short_slot_time_set = 1;
-
- pmlmepriv->num_sta_no_short_slot_time++;
-
- if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 1)) {
- beacon_updated = true;
- update_beacon23a(padapter, 0xFF, NULL, true);
- }
-
- }
- } else {
- if (psta->no_short_slot_time_set) {
- psta->no_short_slot_time_set = 0;
-
- pmlmepriv->num_sta_no_short_slot_time--;
-
- if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 0)) {
- beacon_updated = true;
- update_beacon23a(padapter, 0xFF, NULL, true);
- }
- }
- }
-
- if (psta->flags & WLAN_STA_HT) {
- u16 ht_capab = le16_to_cpu(psta->htpriv.ht_cap.cap_info);
-
- DBG_8723A("HT: STA %pM HT Capabilities Info: 0x%04x\n",
- psta->hwaddr, ht_capab);
-
- if (psta->no_ht_set) {
- psta->no_ht_set = 0;
- pmlmepriv->num_sta_no_ht--;
- }
-
- if ((ht_capab & IEEE80211_HT_CAP_GRN_FLD) == 0) {
- if (!psta->no_ht_gf_set) {
- psta->no_ht_gf_set = 1;
- pmlmepriv->num_sta_ht_no_gf++;
- }
- DBG_8723A("%s STA %pM - no greenfield, num of non-gf stations %d\n",
- __func__, psta->hwaddr,
- pmlmepriv->num_sta_ht_no_gf);
- }
-
- if ((ht_capab & IEEE80211_HT_CAP_SUP_WIDTH_20_40) == 0) {
- if (!psta->ht_20mhz_set) {
- psta->ht_20mhz_set = 1;
- pmlmepriv->num_sta_ht_20mhz++;
- }
- DBG_8723A("%s STA %pM - 20 MHz HT, num of 20MHz HT STAs %d\n",
- __func__, psta->hwaddr,
- pmlmepriv->num_sta_ht_20mhz);
- }
-
- } else {
- if (!psta->no_ht_set) {
- psta->no_ht_set = 1;
- pmlmepriv->num_sta_no_ht++;
- }
- if (pmlmepriv->htpriv.ht_option) {
- DBG_8723A("%s STA %pM - no HT, num of non-HT stations %d\n",
- __func__, psta->hwaddr,
- pmlmepriv->num_sta_no_ht);
- }
- }
-
- if (rtw_ht_operation_update(padapter) > 0) {
- update_beacon23a(padapter, WLAN_EID_HT_CAPABILITY, NULL, false);
- update_beacon23a(padapter, WLAN_EID_HT_OPERATION, NULL, true);
- }
-
- /* update associated stations cap. */
- associated_clients_update23a(padapter, beacon_updated);
-
- DBG_8723A("%s, updated =%d\n", __func__, beacon_updated);
-}
-
-u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- u8 beacon_updated = false;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (!psta)
- return beacon_updated;
-
- if (psta->no_short_preamble_set) {
- psta->no_short_preamble_set = 0;
- pmlmepriv->num_sta_no_short_preamble--;
- if (pmlmeext->cur_wireless_mode > WIRELESS_11B
- && pmlmepriv->num_sta_no_short_preamble == 0) {
- beacon_updated = true;
- update_beacon23a(padapter, 0xFF, NULL, true);
- }
- }
-
- if (psta->nonerp_set) {
- psta->nonerp_set = 0;
- pmlmepriv->num_sta_non_erp--;
- if (pmlmepriv->num_sta_non_erp == 0) {
- beacon_updated = true;
- update_beacon23a(padapter, WLAN_EID_ERP_INFO,
- NULL, true);
- }
- }
-
- if (psta->no_short_slot_time_set) {
- psta->no_short_slot_time_set = 0;
- pmlmepriv->num_sta_no_short_slot_time--;
- if (pmlmeext->cur_wireless_mode > WIRELESS_11B
- && pmlmepriv->num_sta_no_short_slot_time == 0) {
- beacon_updated = true;
- update_beacon23a(padapter, 0xFF, NULL, true);
- }
- }
-
- if (psta->no_ht_gf_set) {
- psta->no_ht_gf_set = 0;
- pmlmepriv->num_sta_ht_no_gf--;
- }
-
- if (psta->no_ht_set) {
- psta->no_ht_set = 0;
- pmlmepriv->num_sta_no_ht--;
- }
-
- if (psta->ht_20mhz_set) {
- psta->ht_20mhz_set = 0;
- pmlmepriv->num_sta_ht_20mhz--;
- }
-
- if (rtw_ht_operation_update(padapter) > 0) {
- update_beacon23a(padapter, WLAN_EID_HT_CAPABILITY, NULL, false);
- update_beacon23a(padapter, WLAN_EID_HT_OPERATION, NULL, true);
- }
-
- /* update associated stations cap. */
-
- DBG_8723A("%s, updated =%d\n", __func__, beacon_updated);
-
- return beacon_updated;
-}
-
-u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool active, u16 reason)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- u8 beacon_updated = false;
-
- if (!psta)
- return beacon_updated;
-
- if (active) {
- /* tear down Rx AMPDU */
- send_delba23a(padapter, 0, psta->hwaddr); /* recipient */
-
- /* tear down TX AMPDU */
- send_delba23a(padapter, 1, psta->hwaddr); /* originator */
-
- issue_deauth23a(padapter, psta->hwaddr, reason);
- }
-
- psta->htpriv.agg_enable_bitmap = 0x0; /* reset */
- psta->htpriv.candidate_tid_bitmap = 0x0; /* reset */
-
- /* report_del_sta_event23a(padapter, psta->hwaddr, reason); */
-
- /* clear cam entry / key */
- /* clear_cam_entry23a(padapter, (psta->mac_id + 3)); */
- rtw_clearstakey_cmd23a(padapter, (u8 *)psta, (u8)(psta->mac_id + 3),
- true);
-
- spin_lock_bh(&psta->lock);
- psta->state &= ~_FW_LINKED;
- spin_unlock_bh(&psta->lock);
-
- rtw_cfg80211_indicate_sta_disassoc(padapter, psta->hwaddr, reason);
-
- report_del_sta_event23a(padapter, psta->hwaddr, reason);
-
- beacon_updated = bss_cap_update_on_sta_leave23a(padapter, psta);
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo23a(padapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- return beacon_updated;
-}
-
-int rtw_sta_flush23a(struct rtw_adapter *padapter)
-{
- struct list_head *phead;
- struct sta_info *psta, *ptmp;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- u8 chk_alive_num = 0;
- struct sta_info *chk_alive_list[NUM_STA];
- int i;
-
- DBG_8723A("%s(%s)\n", __func__, padapter->pnetdev->name);
-
- if ((pmlmeinfo->state&0x03) != MSR_AP)
- return 0;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- list_for_each_entry_safe(psta, ptmp, phead, asoc_list) {
- /* Remove sta from asoc_list */
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
-
- /* Keep sta for ap_free_sta23a() beyond this asoc_list loop */
- chk_alive_list[chk_alive_num++] = psta;
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- /* For each sta in chk_alive_list, call ap_free_sta23a */
- for (i = 0; i < chk_alive_num; i++)
- ap_free_sta23a(padapter, chk_alive_list[i], true,
- WLAN_REASON_DEAUTH_LEAVING);
-
- issue_deauth23a(padapter, bc_addr, WLAN_REASON_DEAUTH_LEAVING);
-
- associated_clients_update23a(padapter, true);
-
- return 0;
-}
-
-/* called > TSR LEVEL for USB or SDIO Interface */
-void sta_info_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- int flags = psta->flags;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- /* update wmm cap. */
- if (WLAN_STA_WME&flags)
- psta->qos_option = 1;
- else
- psta->qos_option = 0;
-
- if (pmlmepriv->qos_option == 0)
- psta->qos_option = 0;
-
- /* update 802.11n ht cap. */
- if (WLAN_STA_HT&flags) {
- psta->htpriv.ht_option = true;
- psta->qos_option = 1;
- } else {
- psta->htpriv.ht_option = false;
- }
-
- if (!pmlmepriv->htpriv.ht_option)
- psta->htpriv.ht_option = false;
-
- update_sta_info23a_apmode23a(padapter, psta);
-}
-
-/* called >= TSR LEVEL for USB or SDIO Interface */
-void ap_sta_info_defer_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- if (psta->state & _FW_LINKED) {
- /* add ratid */
- add_RATid23a(padapter, psta, 0);/* DM_RATR_STA_INIT */
- }
-}
-
-/* restore hw setting from sw data structures */
-void rtw_ap_restore_network(struct rtw_adapter *padapter)
-{
- struct mlme_priv *mlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *psta, *ptmp;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct list_head *phead;
- u8 chk_alive_num = 0;
- struct sta_info *chk_alive_list[NUM_STA];
- int i;
-
- rtw_setopmode_cmd23a(padapter, NL80211_IFTYPE_AP);
-
- set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- start_bss_network(padapter, (u8 *)&mlmepriv->cur_network.network);
-
- if (padapter->securitypriv.dot11PrivacyAlgrthm ==
- WLAN_CIPHER_SUITE_TKIP ||
- padapter->securitypriv.dot11PrivacyAlgrthm ==
- WLAN_CIPHER_SUITE_CCMP) {
- /* restore group key, WEP keys is restored in ips_leave23a() */
- rtw_set_key23a(padapter, psecuritypriv,
- psecuritypriv->dot118021XGrpKeyid, 0);
- }
-
- /* per sta pairwise key and settings */
- if (padapter->securitypriv.dot11PrivacyAlgrthm !=
- WLAN_CIPHER_SUITE_TKIP &&
- padapter->securitypriv.dot11PrivacyAlgrthm !=
- WLAN_CIPHER_SUITE_CCMP) {
- return;
- }
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- list_for_each_entry_safe(psta, ptmp, phead, asoc_list)
- chk_alive_list[chk_alive_num++] = psta;
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- for (i = 0; i < chk_alive_num; i++) {
- psta = chk_alive_list[i];
-
- if (psta->state & _FW_LINKED) {
- Update_RA_Entry23a(padapter, psta);
- /* pairwise key */
- rtw_setstakey_cmd23a(padapter, (unsigned char *)psta, true);
- }
- }
-}
-
-void start_ap_mode23a(struct rtw_adapter *padapter)
-{
- int i;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
-
- pmlmepriv->update_bcn = false;
-
- /* init_mlme_ap_info23a(padapter); */
- pmlmeext->bstart_bss = false;
-
- pmlmepriv->num_sta_non_erp = 0;
-
- pmlmepriv->num_sta_no_short_slot_time = 0;
-
- pmlmepriv->num_sta_no_short_preamble = 0;
-
- pmlmepriv->num_sta_ht_no_gf = 0;
- pmlmepriv->num_sta_no_ht = 0;
- pmlmepriv->num_sta_ht_20mhz = 0;
-
- pmlmepriv->olbc = false;
-
- pmlmepriv->olbc_ht = false;
-
- pmlmepriv->ht_op_mode = 0;
-
- for (i = 0; i < NUM_STA; i++)
- pstapriv->sta_aid[i] = NULL;
-
- /* for ACL */
- INIT_LIST_HEAD(&pacl_list->acl_node_q.queue);
- pacl_list->num = 0;
- pacl_list->mode = 0;
- for (i = 0; i < NUM_ACL; i++) {
- INIT_LIST_HEAD(&pacl_list->aclnode[i].list);
- pacl_list->aclnode[i].valid = false;
- }
-}
-
-void stop_ap_mode23a(struct rtw_adapter *padapter)
-{
- struct list_head *phead;
- struct rtw_wlan_acl_node *paclnode, *ptmp;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
- struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q;
-
- pmlmepriv->update_bcn = false;
- pmlmeext->bstart_bss = false;
-
- /*
- * reset and init security priv , this can
- * refine with rtw_reset_securitypriv23a
- */
- memset((unsigned char *)&padapter->securitypriv, 0, sizeof(struct security_priv));
- padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
-
- /* for ACL */
- spin_lock_bh(&pacl_node_q->lock);
- phead = get_list_head(pacl_node_q);
- list_for_each_entry_safe(paclnode, ptmp, phead, list) {
- if (paclnode->valid == true) {
- paclnode->valid = false;
- list_del_init(&paclnode->list);
- pacl_list->num--;
- }
- }
- spin_unlock_bh(&pacl_node_q->lock);
-
- DBG_8723A("%s, free acl_node_queue, num =%d\n",
- __func__, pacl_list->num);
-
- rtw_sta_flush23a(padapter);
-
- /* free_assoc_sta_resources */
- rtw_free_all_stainfo23a(padapter);
-
- psta = rtw_get_bcmc_stainfo23a(padapter);
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo23a(padapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- rtw_init_bcmc_stainfo23a(padapter);
-
- rtw23a_free_mlme_priv_ie_data(pmlmepriv);
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_cmd.c b/drivers/staging/rtl8723au/core/rtw_cmd.c
deleted file mode 100644
index cd4e0f05d82f..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_cmd.c
+++ /dev/null
@@ -1,1470 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTW_CMD_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <recv_osdep.h>
-#include <mlme_osdep.h>
-#include <rtl8723a_cmd.h>
-#include <rtw_sreset.h>
-
-static struct cmd_hdl wlancmds[] = {
- GEN_DRV_CMD_HANDLER(0, NULL) /*0*/
- GEN_DRV_CMD_HANDLER(0, NULL)
- GEN_DRV_CMD_HANDLER(0, NULL)
- GEN_DRV_CMD_HANDLER(0, NULL)
- GEN_DRV_CMD_HANDLER(0, NULL)
- GEN_DRV_CMD_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL) /*10*/
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), join_cmd_hdl23a) /*14*/
- GEN_MLME_EXT_HANDLER(sizeof(struct disconnect_parm), disconnect_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), createbss_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof(struct setopmode_parm), setopmode_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof(struct sitesurvey_parm), sitesurvey_cmd_hdl23a) /*18*/
- GEN_MLME_EXT_HANDLER(sizeof(struct setauth_parm), setauth_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof(struct setkey_parm), setkey_hdl23a) /*20*/
- GEN_MLME_EXT_HANDLER(sizeof(struct set_stakey_parm), set_stakey_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof(struct set_assocsta_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct del_assocsta_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct setstapwrstate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct setbasicrate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct getbasicrate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct setdatarate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct getdatarate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct setphyinfo_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct getphyinfo_parm), NULL) /*30*/
- GEN_MLME_EXT_HANDLER(sizeof(struct setphy_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct getphy_parm), NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL) /*40*/
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct addBaReq_parm), add_ba_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof(struct set_ch_parm), set_ch_hdl23a) /* 46 */
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL) /*50*/
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(sizeof(struct Tx_Beacon_param), tx_beacon_hdl23a) /*55*/
-
- GEN_MLME_EXT_HANDLER(0, mlme_evt_hdl23a) /*56*/
- GEN_MLME_EXT_HANDLER(0, rtw_drvextra_cmd_hdl23a) /*57*/
-
- GEN_MLME_EXT_HANDLER(0, h2c_msg_hdl23a) /*58*/
- GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelPlan_param), set_chplan_hdl23a) /*59*/
- GEN_MLME_EXT_HANDLER(sizeof(struct LedBlink_param), led_blink_hdl23a) /*60*/
-
- GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelSwitch_param), set_csa_hdl23a) /*61*/
- GEN_MLME_EXT_HANDLER(sizeof(struct TDLSoption_param), tdls_hdl23a) /*62*/
-};
-
-struct _cmd_callback rtw_cmd_callback[] = {
- {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
- {GEN_CMD_CODE(_Write_MACREG), NULL},
- {GEN_CMD_CODE(_Read_BBREG), &rtw_getbbrfreg_cmdrsp_callback23a},
- {GEN_CMD_CODE(_Write_BBREG), NULL},
- {GEN_CMD_CODE(_Read_RFREG), &rtw_getbbrfreg_cmdrsp_callback23a},
- {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
- {GEN_CMD_CODE(_Read_EEPROM), NULL},
- {GEN_CMD_CODE(_Write_EEPROM), NULL},
- {GEN_CMD_CODE(_Read_EFUSE), NULL},
- {GEN_CMD_CODE(_Write_EFUSE), NULL},
-
- {GEN_CMD_CODE(_Read_CAM), NULL}, /*10*/
- {GEN_CMD_CODE(_Write_CAM), NULL},
- {GEN_CMD_CODE(_setBCNITV), NULL},
- {GEN_CMD_CODE(_setMBIDCFG), NULL},
- {GEN_CMD_CODE(_JoinBss), &rtw_joinbss_cmd23a_callback}, /*14*/
- {GEN_CMD_CODE(_DisConnect), &rtw_disassoc_cmd23a_callback}, /*15*/
- {GEN_CMD_CODE(_CreateBss), &rtw_createbss_cmd23a_callback},
- {GEN_CMD_CODE(_SetOpMode), NULL},
- {GEN_CMD_CODE(_SiteSurvey), &rtw_survey_cmd_callback23a}, /*18*/
- {GEN_CMD_CODE(_SetAuth), NULL},
-
- {GEN_CMD_CODE(_SetKey), NULL}, /*20*/
- {GEN_CMD_CODE(_SetStaKey), &rtw_setstaKey_cmdrsp_callback23a},
- {GEN_CMD_CODE(_SetAssocSta), &rtw_setassocsta_cmdrsp_callback23a},
- {GEN_CMD_CODE(_DelAssocSta), NULL},
- {GEN_CMD_CODE(_SetStaPwrState), NULL},
- {GEN_CMD_CODE(_SetBasicRate), NULL}, /*25*/
- {GEN_CMD_CODE(_GetBasicRate), NULL},
- {GEN_CMD_CODE(_SetDataRate), NULL},
- {GEN_CMD_CODE(_GetDataRate), NULL},
- {GEN_CMD_CODE(_SetPhyInfo), NULL},
-
- {GEN_CMD_CODE(_GetPhyInfo), NULL}, /*30*/
- {GEN_CMD_CODE(_SetPhy), NULL},
- {GEN_CMD_CODE(_GetPhy), NULL},
- {GEN_CMD_CODE(_readRssi), NULL},
- {GEN_CMD_CODE(_readGain), NULL},
- {GEN_CMD_CODE(_SetAtim), NULL}, /*35*/
- {GEN_CMD_CODE(_SetPwrMode), NULL},
- {GEN_CMD_CODE(_JoinbssRpt), NULL},
- {GEN_CMD_CODE(_SetRaTable), NULL},
- {GEN_CMD_CODE(_GetRaTable), NULL},
-
- {GEN_CMD_CODE(_GetCCXReport), NULL}, /*40*/
- {GEN_CMD_CODE(_GetDTMReport), NULL},
- {GEN_CMD_CODE(_GetTXRateStatistics), NULL},
- {GEN_CMD_CODE(_SetUsbSuspend), NULL},
- {GEN_CMD_CODE(_SetH2cLbk), NULL},
- {GEN_CMD_CODE(_AddBAReq), NULL}, /*45*/
- {GEN_CMD_CODE(_SetChannel), NULL}, /*46*/
- {GEN_CMD_CODE(_SetTxPower), NULL},
- {GEN_CMD_CODE(_SwitchAntenna), NULL},
- {GEN_CMD_CODE(_SetCrystalCap), NULL},
- {GEN_CMD_CODE(_SetSingleCarrierTx), NULL}, /*50*/
-
- {GEN_CMD_CODE(_SetSingleToneTx), NULL}, /*51*/
- {GEN_CMD_CODE(_SetCarrierSuppressionTx), NULL},
- {GEN_CMD_CODE(_SetContinuousTx), NULL},
- {GEN_CMD_CODE(_SwitchBandwidth), NULL}, /*54*/
- {GEN_CMD_CODE(_TX_Beacon), NULL},/*55*/
-
- {GEN_CMD_CODE(_Set_MLME_EVT), NULL},/*56*/
- {GEN_CMD_CODE(_Set_Drv_Extra), NULL},/*57*/
- {GEN_CMD_CODE(_Set_H2C_MSG), NULL},/*58*/
- {GEN_CMD_CODE(_SetChannelPlan), NULL},/*59*/
- {GEN_CMD_CODE(_LedBlink), NULL},/*60*/
-
- {GEN_CMD_CODE(_SetChannelSwitch), NULL},/*61*/
- {GEN_CMD_CODE(_TDLS), NULL},/*62*/
-};
-
-/*
-Caller and the rtw_cmd_thread23a can protect cmd_q by spin_lock.
-No irqsave is necessary.
-*/
-
-int rtw_init_cmd_priv23a(struct cmd_priv *pcmdpriv)
-{
- int res = _SUCCESS;
-
- pcmdpriv->cmd_issued_cnt = 0;
- pcmdpriv->cmd_done_cnt = 0;
- pcmdpriv->rsp_cnt = 0;
-
- pcmdpriv->wq = alloc_workqueue("rtl8723au_cmd", 0, 1);
- if (!pcmdpriv->wq)
- res = _FAIL;
-
- return res;
-}
-
-/* forward definition */
-
-static void rtw_irq_work(struct work_struct *work);
-
-u32 rtw_init_evt_priv23a(struct evt_priv *pevtpriv)
-{
- pevtpriv->wq = alloc_workqueue("rtl8723au_evt", 0, 1);
-
- INIT_WORK(&pevtpriv->irq_wk, rtw_irq_work);
-
- return _SUCCESS;
-}
-
-void rtw_free_evt_priv23a(struct evt_priv *pevtpriv)
-{
- cancel_work_sync(&pevtpriv->irq_wk);
-}
-
-static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
-{
- /* set to true to allow enqueuing cmd when hw_init_completed is false */
- u8 bAllow = false;
-
- if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan))
- bAllow = true;
-
- if (pcmdpriv->padapter->hw_init_completed == false && bAllow == false)
- return _FAIL;
- return _SUCCESS;
-}
-
-static void rtw_cmd_work(struct work_struct *work);
-
-int rtw_enqueue_cmd23a(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
-{
- int res = _FAIL;
-
- if (!cmd_obj)
- goto exit;
-
- cmd_obj->padapter = pcmdpriv->padapter;
-
- res = rtw_cmd_filter(pcmdpriv, cmd_obj);
- if (res == _FAIL) {
- rtw_free_cmd_obj23a(cmd_obj);
- goto exit;
- }
-
- INIT_WORK(&cmd_obj->work, rtw_cmd_work);
-
- res = queue_work(pcmdpriv->wq, &cmd_obj->work);
-
- if (!res) {
- netdev_err(pcmdpriv->padapter->pnetdev,
- "%s: Call to queue_work() failed\n", __func__);
- res = _FAIL;
- } else
- res = _SUCCESS;
-exit:
-
- return res;
-}
-
-void rtw_free_cmd_obj23a(struct cmd_obj *pcmd)
-{
-
- if (pcmd->cmdcode != _JoinBss_CMD_ &&
- pcmd->cmdcode != _CreateBss_CMD_) {
- /* free parmbuf in cmd_obj */
- kfree(pcmd->parmbuf);
- }
-
- if (pcmd->rsp) {
- if (pcmd->rspsz != 0) {
- /* free rsp in cmd_obj */
- kfree(pcmd->rsp);
- }
- }
-
- kfree(pcmd);
-}
-
-static void rtw_cmd_work(struct work_struct *work)
-{
- int (*cmd_hdl)(struct rtw_adapter *padapter, const u8 *pbuf);
- void (*pcmd_callback)(struct rtw_adapter *dev, struct cmd_obj *pcmd);
- struct cmd_priv *pcmdpriv;
- struct cmd_obj *pcmd = container_of(work, struct cmd_obj, work);
-
- pcmdpriv = &pcmd->padapter->cmdpriv;
-
- if (rtw_cmd_filter(pcmdpriv, pcmd) == _FAIL) {
- pcmd->res = H2C_DROPPED;
- goto post_process;
- }
-
- pcmdpriv->cmd_issued_cnt++;
-
- pcmd->cmdsz = ALIGN(pcmd->cmdsz, 4);
-
- if (pcmd->cmdcode < (sizeof(wlancmds)/sizeof(struct cmd_hdl))) {
- cmd_hdl = wlancmds[pcmd->cmdcode].h2cfuns;
-
- if (cmd_hdl)
- pcmd->res = cmd_hdl(pcmd->padapter, pcmd->parmbuf);
- else
- pcmd->res = H2C_DROPPED;
- } else
- pcmd->res = H2C_PARAMETERS_ERROR;
-
-post_process:
- /* call callback function for post-processed */
- if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
- pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
- "mlme_cmd_hdl(): pcmd_callback = 0x%p, cmdcode = 0x%x\n",
- pcmd_callback, pcmd->cmdcode);
- rtw_free_cmd_obj23a(pcmd);
- } else {
- /* need consider that free cmd_obj in
- rtw_cmd_callback */
- pcmd_callback(pcmd->padapter, pcmd);
- }
- } else {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "%s: cmdcode = 0x%x callback not defined!\n",
- __func__, pcmd->cmdcode);
- rtw_free_cmd_obj23a(pcmd);
- }
-}
-
-
-int rtw_sitesurvey_cmd23a(struct rtw_adapter *padapter,
- struct cfg80211_ssid *ssid, int ssid_num,
- struct rtw_ieee80211_channel *ch, int ch_num)
-{
- int res = _FAIL;
- struct cmd_obj *ph2c;
- struct sitesurvey_parm *psurveyPara;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_SCAN, 1);
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c)
- return _FAIL;
-
- psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
- if (!psurveyPara) {
- kfree(ph2c);
- return _FAIL;
- }
-
- rtw_free_network_queue23a(padapter);
-
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
- "%s: flush network queue\n", __func__);
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara,
- GEN_CMD_CODE(_SiteSurvey));
-
- /* psurveyPara->bsslimit = 48; */
- psurveyPara->scan_mode = pmlmepriv->scan_mode;
-
- /* prepare ssid list */
- if (ssid) {
- int i;
-
- for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
- if (ssid[i].ssid_len) {
- memcpy(&psurveyPara->ssid[i], &ssid[i],
- sizeof(struct cfg80211_ssid));
- psurveyPara->ssid_num++;
- }
- }
- }
-
- /* prepare channel list */
- if (ch) {
- int i;
-
- for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
- if (ch[i].hw_value &&
- !(ch[i].flags & IEEE80211_CHAN_DISABLED)) {
- memcpy(&psurveyPara->ch[i], &ch[i],
- sizeof(struct rtw_ieee80211_channel));
- psurveyPara->ch_num++;
- }
- }
- }
-
- set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
-
- if (res == _SUCCESS) {
- mod_timer(&pmlmepriv->scan_to_timer, jiffies +
- msecs_to_jiffies(SCANNING_TIMEOUT));
-
- pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
- } else
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
-
- return res;
-}
-
-void rtw_getbbrfreg_cmdrsp_callback23a(struct rtw_adapter *padapter,
- struct cmd_obj *pcmd)
-{
- kfree(pcmd->parmbuf);
- kfree(pcmd);
-}
-
-int rtw_createbss_cmd23a(struct rtw_adapter *padapter)
-{
- struct cmd_obj *pcmd;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pdev_network;
- u8 res = _SUCCESS;
-
- pdev_network = &padapter->registrypriv.dev_network;
-
- if (pmlmepriv->assoc_ssid.ssid_len == 0) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
- "createbss for Any SSid:%s\n",
- pmlmepriv->assoc_ssid.ssid);
- } else {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
- "createbss for SSid:%s\n",
- pmlmepriv->assoc_ssid.ssid);
- }
-
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!pcmd) {
- res = _FAIL;
- goto exit;
- }
-
- pcmd->cmdcode = _CreateBss_CMD_;
- pcmd->parmbuf = (unsigned char *)pdev_network;
- pcmd->cmdsz = get_wlan_bssid_ex_sz(pdev_network);
- pcmd->rsp = NULL;
- pcmd->rspsz = 0;
-
- pdev_network->Length = pcmd->cmdsz;
-
- res = rtw_enqueue_cmd23a(pcmdpriv, pcmd);
-
-exit:
-
- return res;
-}
-
-int rtw_joinbss_cmd23a(struct rtw_adapter *padapter,
- struct wlan_network *pnetwork)
-{
- int res = _SUCCESS;
- struct wlan_bssid_ex *psecnetwork;
- struct cmd_obj *pcmd;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
- enum nl80211_iftype ifmode;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- ifmode = pnetwork->network.ifmode;
-
- if (pmlmepriv->assoc_ssid.ssid_len == 0) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
- "+Join cmd: Any SSid\n");
- } else {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_,
- "+Join cmd: SSid =[%s]\n",
- pmlmepriv->assoc_ssid.ssid);
- }
-
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!pcmd) {
- res = _FAIL;
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "rtw_joinbss_cmd23a: memory allocate for cmd_obj fail!!!\n");
- goto exit;
- }
-
- /* for hidden ap to set fw_state here */
- if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE)) {
- switch (ifmode) {
- case NL80211_IFTYPE_ADHOC:
- set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_STATION:
- set_fwstate(pmlmepriv, WIFI_STATION_STATE);
- break;
- default:
- break;
- }
- }
-
- psecnetwork = &psecuritypriv->sec_bss;
- if (!psecnetwork) {
- kfree(pcmd);
- res = _FAIL;
-
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "rtw_joinbss_cmd23a :psecnetwork == NULL!!!\n");
-
- goto exit;
- }
-
- memset(psecnetwork, 0, sizeof(struct wlan_bssid_ex));
-
- memcpy(psecnetwork, &pnetwork->network,
- get_wlan_bssid_ex_sz(&pnetwork->network));
-
- psecnetwork->IELength = 0;
- /* Added by Albert 2009/02/18 */
- /* If the the driver wants to use the bssid to create the
- * connection. If not, we have to copy the connecting AP's
- * MAC address to it so that the driver just has the bssid
- * information for PMKIDList searching. */
-
- if (pmlmepriv->assoc_by_bssid == false)
- ether_addr_copy(&pmlmepriv->assoc_bssid[0],
- &pnetwork->network.MacAddress[0]);
-
- psecnetwork->IELength =
- rtw_restruct_sec_ie23a(padapter, &pnetwork->network.IEs[0],
- &psecnetwork->IEs[0],
- pnetwork->network.IELength);
-
- pmlmepriv->qos_option = 0;
-
- if (pregistrypriv->wmm_enable) {
- u32 tmp_len;
-
- tmp_len = rtw_restruct_wmm_ie23a(padapter,
- &pnetwork->network.IEs[0],
- &psecnetwork->IEs[0],
- pnetwork->network.IELength,
- psecnetwork->IELength);
-
- if (psecnetwork->IELength != tmp_len) {
- psecnetwork->IELength = tmp_len;
- /* There is WMM IE in this corresp. beacon */
- pmlmepriv->qos_option = 1;
- } else {
- /* There is no WMM IE in this corresp. beacon */
- pmlmepriv->qos_option = 0;
- }
- }
-
- phtpriv->ht_option = false;
- if (pregistrypriv->ht_enable) {
- u32 algo = padapter->securitypriv.dot11PrivacyAlgrthm;
- /* Added by Albert 2010/06/23 */
- /* For the WEP mode, we will use the bg mode to do
- the connection to avoid some IOT issue. */
- /* Especially for Realtek 8192u SoftAP. */
- if (algo != WLAN_CIPHER_SUITE_WEP40 &&
- algo != WLAN_CIPHER_SUITE_WEP104 &&
- algo != WLAN_CIPHER_SUITE_TKIP) {
- /* rtw_restructure_ht_ie23a */
- rtw_restructure_ht_ie23a(padapter,
- &pnetwork->network.IEs[0],
- &psecnetwork->IEs[0],
- pnetwork->network.IELength,
- &psecnetwork->IELength);
- }
- }
-
- pmlmeinfo->assoc_AP_vendor =
- check_assoc_AP23a(pnetwork->network.IEs,
- pnetwork->network.IELength);
-
- if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_TENDA)
- padapter->pwrctrlpriv.smart_ps = 0;
- else
- padapter->pwrctrlpriv.smart_ps =
- padapter->registrypriv.smart_ps;
-
- DBG_8723A("%s: smart_ps =%d\n", __func__,
- padapter->pwrctrlpriv.smart_ps);
-
- /* get cmdsz before endian conversion */
- pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);
-
- pcmd->cmdcode = _JoinBss_CMD_;/* GEN_CMD_CODE(_JoinBss) */
- pcmd->parmbuf = (unsigned char *)psecnetwork;
- pcmd->rsp = NULL;
- pcmd->rspsz = 0;
-
- res = rtw_enqueue_cmd23a(pcmdpriv, pcmd);
-exit:
-
- return res;
-}
-
-int rtw_disassoc_cmd23a(struct rtw_adapter *padapter, u32 deauth_timeout_ms,
- bool enqueue)
-{
- struct cmd_obj *cmdobj = NULL;
- struct disconnect_parm *param = NULL;
- struct cmd_priv *cmdpriv = &padapter->cmdpriv;
- int res = _SUCCESS;
-
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_,
- "+rtw_disassoc_cmd23a\n");
-
- /* prepare cmd parameter */
- param = kzalloc(sizeof(*param), GFP_ATOMIC);
- if (param == NULL) {
- res = _FAIL;
- goto exit;
- }
- param->deauth_timeout_ms = deauth_timeout_ms;
-
- if (enqueue) {
- /* need enqueue, prepare cmd_obj and enqueue */
- cmdobj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!cmdobj) {
- res = _FAIL;
- kfree(param);
- goto exit;
- }
- init_h2fwcmd_w_parm_no_rsp(cmdobj, param, _DisConnect_CMD_);
- res = rtw_enqueue_cmd23a(cmdpriv, cmdobj);
- } else {
- /* no need to enqueue, do the cmd hdl directly and
- free cmd parameter */
- if (disconnect_hdl23a(padapter, (u8 *)param) != H2C_SUCCESS)
- res = _FAIL;
- kfree(param);
- }
-
-exit:
- return res;
-}
-
-int rtw_setopmode_cmd23a(struct rtw_adapter *padapter,
- enum nl80211_iftype ifmode)
-{
- struct cmd_obj *ph2c;
- struct setopmode_parm *psetop;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- int res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (!ph2c) {
- res = false;
- goto exit;
- }
- psetop = kzalloc(sizeof(struct setopmode_parm), GFP_KERNEL);
-
- if (!psetop) {
- kfree(ph2c);
- res = false;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetop, _SetOpMode_CMD_);
- psetop->mode = ifmode;
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
-exit:
- return res;
-}
-
-int rtw_setstakey_cmd23a(struct rtw_adapter *padapter, u8 *psta, u8 unicast_key)
-{
- struct cmd_obj *ph2c;
- struct set_stakey_parm *psetstakey_para;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct set_stakey_rsp *psetstakey_rsp = NULL;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sta_info *sta = (struct sta_info *)psta;
- int res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL);
- if (!psetstakey_para) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp), GFP_KERNEL);
- if (!psetstakey_rsp) {
- kfree(ph2c);
- kfree(psetstakey_para);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
- ph2c->rsp = (u8 *) psetstakey_rsp;
- ph2c->rspsz = sizeof(struct set_stakey_rsp);
-
- ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- psetstakey_para->algorithm =
- (unsigned char)psecuritypriv->dot11PrivacyAlgrthm;
- } else {
- GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm,
- false);
- }
-
- if (unicast_key == true) {
- memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16);
- } else {
- int idx = psecuritypriv->dot118021XGrpKeyid;
-
- memcpy(&psetstakey_para->key,
- &psecuritypriv->dot118021XGrpKey[idx].skey, 16);
- }
-
- /* jeff: set this because at least sw key is ready */
- padapter->securitypriv.busetkipkey = 1;
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
-
-exit:
-
- return res;
-}
-
-int rtw_clearstakey_cmd23a(struct rtw_adapter *padapter, u8 *psta, u8 entry,
- u8 enqueue)
-{
- struct cmd_obj *ph2c;
- struct set_stakey_parm *psetstakey_para;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct set_stakey_rsp *psetstakey_rsp = NULL;
- struct sta_info *sta = (struct sta_info *)psta;
- int res = _SUCCESS;
-
- if (!enqueue) {
- clear_cam_entry23a(padapter, entry);
- } else {
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- psetstakey_para = kzalloc(sizeof(struct set_stakey_parm),
- GFP_KERNEL);
- if (!psetstakey_para) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp),
- GFP_KERNEL);
- if (!psetstakey_rsp) {
- kfree(ph2c);
- kfree(psetstakey_para);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para,
- _SetStaKey_CMD_);
- ph2c->rsp = (u8 *) psetstakey_rsp;
- ph2c->rspsz = sizeof(struct set_stakey_rsp);
-
- ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
-
- psetstakey_para->algorithm = 0;
-
- psetstakey_para->id = entry;
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
- }
-exit:
- return res;
-}
-
-int rtw_addbareq_cmd23a(struct rtw_adapter *padapter, u8 tid, u8 *addr)
-{
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct cmd_obj *ph2c;
- struct addBaReq_parm *paddbareq_parm;
- int res = _SUCCESS;
-
- if (tid >= MAXTID) {
- res = _FAIL;
- goto exit;
- }
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_ATOMIC);
- if (!paddbareq_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- paddbareq_parm->tid = tid;
- ether_addr_copy(paddbareq_parm->addr, addr);
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm,
- GEN_CMD_CODE(_AddBAReq));
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
-exit:
- return res;
-}
-
-int rtw_dynamic_chk_wk_cmd23a(struct rtw_adapter *padapter)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- int res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = DYNAMIC_CHK_WK_CID;
- pdrvextra_cmd_parm->type_size = 0;
- pdrvextra_cmd_parm->pbuf = (u8 *)padapter;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm,
- GEN_CMD_CODE(_Set_Drv_Extra));
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
-exit:
-
- return res;
-}
-
-static void traffic_status_watchdog(struct rtw_adapter *padapter)
-{
- u8 bEnterPS;
- u8 bBusyTraffic = false, bTxBusyTraffic = false, bRxBusyTraffic = false;
- u8 bHigherBusyTraffic = false, bHigherBusyRxTraffic = false;
- u8 bHigherBusyTxTraffic = false;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int BusyThreshold = 100;
- struct rt_link_detect *ldi = &pmlmepriv->LinkDetectInfo;
-
- /* */
- /* Determine if our traffic is busy now */
- /* */
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (rtl8723a_BT_coexist(padapter))
- BusyThreshold = 50;
- else if (ldi->bBusyTraffic)
- BusyThreshold = 75;
- /* if we raise bBusyTraffic in last watchdog, using
- lower threshold. */
- if (ldi->NumRxOkInPeriod > BusyThreshold ||
- ldi->NumTxOkInPeriod > BusyThreshold) {
- bBusyTraffic = true;
-
- if (ldi->NumRxOkInPeriod > ldi->NumTxOkInPeriod)
- bRxBusyTraffic = true;
- else
- bTxBusyTraffic = true;
- }
-
- /* Higher Tx/Rx data. */
- if (ldi->NumRxOkInPeriod > 4000 ||
- ldi->NumTxOkInPeriod > 4000) {
- bHigherBusyTraffic = true;
-
- if (ldi->NumRxOkInPeriod > ldi->NumTxOkInPeriod)
- bHigherBusyRxTraffic = true;
- else
- bHigherBusyTxTraffic = true;
- }
-
- if (!rtl8723a_BT_coexist(padapter) ||
- !rtl8723a_BT_using_antenna_1(padapter)) {
- /* check traffic for powersaving. */
- if (((ldi->NumRxUnicastOkInPeriod +
- ldi->NumTxOkInPeriod) > 8) ||
- ldi->NumRxUnicastOkInPeriod > 2)
- bEnterPS = false;
- else
- bEnterPS = true;
-
- /* LeisurePS only work in infra mode. */
- if (bEnterPS)
- LPS_Enter23a(padapter);
- else
- LPS_Leave23a(padapter);
- }
- } else
- LPS_Leave23a(padapter);
-
- ldi->NumRxOkInPeriod = 0;
- ldi->NumTxOkInPeriod = 0;
- ldi->NumRxUnicastOkInPeriod = 0;
- ldi->bBusyTraffic = bBusyTraffic;
- ldi->bTxBusyTraffic = bTxBusyTraffic;
- ldi->bRxBusyTraffic = bRxBusyTraffic;
- ldi->bHigherBusyTraffic = bHigherBusyTraffic;
- ldi->bHigherBusyRxTraffic = bHigherBusyRxTraffic;
- ldi->bHigherBusyTxTraffic = bHigherBusyTxTraffic;
-}
-
-static void dynamic_chk_wk_hdl(struct rtw_adapter *padapter, u8 *pbuf, int sz)
-{
- struct mlme_priv *pmlmepriv;
-
- padapter = (struct rtw_adapter *)pbuf;
- pmlmepriv = &padapter->mlmepriv;
-
-#ifdef CONFIG_8723AU_AP_MODE
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- expire_timeout_chk23a(padapter);
-#endif
-
- rtl8723a_sreset_xmit_status_check(padapter);
-
- linked_status_chk23a(padapter);
- traffic_status_watchdog(padapter);
-
- rtl8723a_HalDmWatchDog(padapter);
-
- /* */
- /* BT-Coexist */
- /* */
- rtl8723a_BT_do_coexist(padapter);
-}
-
-static void lps_ctrl_wk_hdl(struct rtw_adapter *padapter, u8 lps_ctrl_type)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 mstatus;
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))
- return;
-
- switch (lps_ctrl_type) {
- case LPS_CTRL_SCAN:
- rtl8723a_BT_wifiscan_notify(padapter, true);
- if (!rtl8723a_BT_using_antenna_1(padapter)) {
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- LPS_Leave23a(padapter);
- }
- break;
- case LPS_CTRL_JOINBSS:
- LPS_Leave23a(padapter);
- break;
- case LPS_CTRL_CONNECT:
- mstatus = 1;/* connect */
- /* Reset LPS Setting */
- padapter->pwrctrlpriv.LpsIdleCount = 0;
- rtl8723a_set_FwJoinBssReport_cmd(padapter, 1);
- rtl8723a_BT_mediastatus_notify(padapter, mstatus);
- break;
- case LPS_CTRL_DISCONNECT:
- mstatus = 0;/* disconnect */
- rtl8723a_BT_mediastatus_notify(padapter, mstatus);
- if (!rtl8723a_BT_using_antenna_1(padapter))
- LPS_Leave23a(padapter);
- rtl8723a_set_FwJoinBssReport_cmd(padapter, 0);
- break;
- case LPS_CTRL_SPECIAL_PACKET:
- pwrpriv->DelayLPSLastTimeStamp = jiffies;
- rtl8723a_BT_specialpacket_notify(padapter);
- if (!rtl8723a_BT_using_antenna_1(padapter))
- LPS_Leave23a(padapter);
- break;
- case LPS_CTRL_LEAVE:
- rtl8723a_BT_lps_leave(padapter);
- if (!rtl8723a_BT_using_antenna_1(padapter))
- LPS_Leave23a(padapter);
- break;
-
- default:
- break;
- }
-}
-
-int rtw_lps_ctrl_wk_cmd23a(struct rtw_adapter *padapter,
- u8 lps_ctrl_type, u8 enqueue)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- int res = _SUCCESS;
-
- if (enqueue) {
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
- GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = LPS_CTRL_WK_CID;
- pdrvextra_cmd_parm->type_size = lps_ctrl_type;
- pdrvextra_cmd_parm->pbuf = NULL;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm,
- GEN_CMD_CODE(_Set_Drv_Extra));
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
- } else
- lps_ctrl_wk_hdl(padapter, lps_ctrl_type);
-exit:
-
- return res;
-}
-
-int rtw_ps_cmd23a(struct rtw_adapter *padapter)
-{
- struct cmd_obj *ppscmd;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- int res = _SUCCESS;
-
- ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ppscmd) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
- GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ppscmd);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = POWER_SAVING_CTRL_WK_CID;
- pdrvextra_cmd_parm->pbuf = NULL;
- init_h2fwcmd_w_parm_no_rsp(ppscmd, pdrvextra_cmd_parm,
- GEN_CMD_CODE(_Set_Drv_Extra));
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ppscmd);
-exit:
-
- return res;
-}
-
-#ifdef CONFIG_8723AU_AP_MODE
-
-static void rtw_chk_hi_queue_hdl(struct rtw_adapter *padapter)
-{
- int cnt = 0;
- struct sta_info *psta_bmc;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
- if (!psta_bmc)
- return;
-
- if (psta_bmc->sleepq_len == 0) {
- bool val;
-
- val = rtl8723a_chk_hi_queue_empty(padapter);
-
- while (!val) {
- msleep(100);
-
- cnt++;
-
- if (cnt > 10)
- break;
-
- val = rtl8723a_chk_hi_queue_empty(padapter);
- }
-
- if (cnt <= 10) {
- pstapriv->tim_bitmap &= ~BIT(0);
- pstapriv->sta_dz_bitmap &= ~BIT(0);
-
- update_beacon23a(padapter, WLAN_EID_TIM, NULL, false);
- } else /* re check again */
- rtw_chk_hi_queue_cmd23a(padapter);
- }
-}
-
-int rtw_chk_hi_queue_cmd23a(struct rtw_adapter *padapter)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- int res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
- GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = CHECK_HIQ_WK_CID;
- pdrvextra_cmd_parm->type_size = 0;
- pdrvextra_cmd_parm->pbuf = NULL;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm,
- GEN_CMD_CODE(_Set_Drv_Extra));
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
-exit:
-
- return res;
-}
-#endif
-
-int rtw_c2h_wk_cmd23a(struct rtw_adapter *padapter, u8 *c2h_evt)
-{
- struct cmd_obj *ph2c;
- struct drvextra_cmd_parm *pdrvextra_cmd_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- int res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm),
- GFP_ATOMIC);
- if (!pdrvextra_cmd_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- pdrvextra_cmd_parm->ec_id = C2H_WK_CID;
- pdrvextra_cmd_parm->type_size = c2h_evt?16:0;
- pdrvextra_cmd_parm->pbuf = c2h_evt;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm,
- GEN_CMD_CODE(_Set_Drv_Extra));
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
-
-exit:
-
- return res;
-}
-
-static int c2h_evt_hdl(struct rtw_adapter *adapter, struct c2h_evt_hdr *c2h_evt)
-{
- int ret = _FAIL;
- u8 buf[16];
-
- if (!c2h_evt) {
- /* No c2h event in cmd_obj, read c2h event before handling*/
- if (c2h_evt_read23a(adapter, buf) == _SUCCESS) {
- c2h_evt = (struct c2h_evt_hdr *)buf;
-
- ret = c2h_handler_8723a(adapter, c2h_evt);
- }
- } else
- ret = c2h_handler_8723a(adapter, c2h_evt);
-
- return ret;
-}
-
-static void rtw_irq_work(struct work_struct *work)
-{
- struct evt_priv *evtpriv;
- struct rtw_adapter *adapter;
-
- evtpriv = container_of(work, struct evt_priv, irq_wk);
- adapter = container_of(evtpriv, struct rtw_adapter, evtpriv);
-
- c2h_evt_clear23a(adapter);
-}
-
-void rtw_evt_work(struct work_struct *work)
-{
- struct evt_work *ework;
- struct rtw_adapter *adapter;
-
- ework = container_of(work, struct evt_work, work);
- adapter = ework->adapter;
-
- c2h_evt_clear23a(adapter);
-
- if (!c2h_evt_exist(&ework->u.c2h_evt)) {
- kfree(ework);
- return;
- }
-
- if (c2h_id_filter_ccx_8723a(ework->u.c2h_evt.id) == true) {
- /* Handle CCX report here */
- c2h_handler_8723a(adapter, &ework->u.c2h_evt);
- kfree(ework);
- } else {
- /*
- * Enqueue into cmd_thread for others.
- * ework will be turned into a c2h_evt and freed once it
- * has been consumed.
- */
- rtw_c2h_wk_cmd23a(adapter, (u8 *)&ework->u.c2h_evt);
- }
-}
-
-int rtw_drvextra_cmd_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- const struct drvextra_cmd_parm *pdrvextra_cmd;
-
- if (!pbuf)
- return H2C_PARAMETERS_ERROR;
-
- pdrvextra_cmd = (struct drvextra_cmd_parm *)pbuf;
-
- switch (pdrvextra_cmd->ec_id) {
- case DYNAMIC_CHK_WK_CID:
- dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf,
- pdrvextra_cmd->type_size);
- break;
- case POWER_SAVING_CTRL_WK_CID:
- rtw_ps_processor23a(padapter);
- break;
- case LPS_CTRL_WK_CID:
- lps_ctrl_wk_hdl(padapter, (u8)pdrvextra_cmd->type_size);
- break;
-#ifdef CONFIG_8723AU_AP_MODE
- case CHECK_HIQ_WK_CID:
- rtw_chk_hi_queue_hdl(padapter);
- break;
-#endif /* CONFIG_8723AU_AP_MODE */
- case C2H_WK_CID:
- c2h_evt_hdl(padapter,
- (struct c2h_evt_hdr *)pdrvextra_cmd->pbuf);
- break;
-
- default:
- break;
- }
-
- if (pdrvextra_cmd->pbuf && (pdrvextra_cmd->type_size > 0)) {
- kfree(pdrvextra_cmd->pbuf);
- /*
- * No need to set pdrvextra_cmd->pbuf = NULL as we were
- * operating on a copy of the original pcmd->parmbuf
- * created in rtw_cmd_work().
- */
- }
-
- return H2C_SUCCESS;
-}
-
-void rtw_survey_cmd_callback23a(struct rtw_adapter *padapter,
- struct cmd_obj *pcmd)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (pcmd->res == H2C_DROPPED) {
- /* TODO: cancel timer and do timeout handler directly... */
- /* need to make timeout handlerOS independent */
- mod_timer(&pmlmepriv->scan_to_timer,
- jiffies + msecs_to_jiffies(1));
- } else if (pcmd->res != H2C_SUCCESS) {
- mod_timer(&pmlmepriv->scan_to_timer,
- jiffies + msecs_to_jiffies(1));
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "********Error: MgntActrtw_set_802_11_bssid23a_LIST_SCAN Fail ************\n");
- }
-
- /* free cmd */
- rtw_free_cmd_obj23a(pcmd);
-}
-
-void rtw_disassoc_cmd23a_callback(struct rtw_adapter *padapter,
- struct cmd_obj *pcmd)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (pcmd->res != H2C_SUCCESS) {
- spin_lock_bh(&pmlmepriv->lock);
- set_fwstate(pmlmepriv, _FW_LINKED);
- spin_unlock_bh(&pmlmepriv->lock);
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "***Error: disconnect_cmd_callback Fail ***\n");
- return;
- }
-
- /* free cmd */
- rtw_free_cmd_obj23a(pcmd);
-}
-
-void rtw_joinbss_cmd23a_callback(struct rtw_adapter *padapter,
- struct cmd_obj *pcmd)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (pcmd->res == H2C_DROPPED) {
- /* TODO: cancel timer and do timeout handler directly... */
- /* need to make timeout handlerOS independent */
- mod_timer(&pmlmepriv->assoc_timer,
- jiffies + msecs_to_jiffies(1));
- } else if (pcmd->res != H2C_SUCCESS) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "********Error:rtw_select_and_join_from_scanned_queue Wait Sema Fail ************\n");
- mod_timer(&pmlmepriv->assoc_timer,
- jiffies + msecs_to_jiffies(1));
- }
-
- rtw_free_cmd_obj23a(pcmd);
-}
-
-void rtw_createbss_cmd23a_callback(struct rtw_adapter *padapter,
- struct cmd_obj *pcmd)
-{
- struct sta_info *psta;
- struct wlan_network *pwlan;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
- struct wlan_network *tgt_network = &pmlmepriv->cur_network;
- struct rtw_queue *scanned_queue = &pmlmepriv->scanned_queue;
-
- if (pcmd->res != H2C_SUCCESS) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "********Error: rtw_createbss_cmd23a_callback Fail ************\n");
- mod_timer(&pmlmepriv->assoc_timer,
- jiffies + msecs_to_jiffies(1));
- }
-
- del_timer_sync(&pmlmepriv->assoc_timer);
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- psta = rtw_get_stainfo23a(&padapter->stapriv,
- pnetwork->MacAddress);
- if (!psta) {
- psta = rtw_alloc_stainfo23a(&padapter->stapriv,
- pnetwork->MacAddress,
- GFP_KERNEL);
- if (!psta) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "Can't alloc sta_info when createbss_cmd_callback\n");
- goto createbss_cmd_fail;
- }
- }
-
- spin_lock_bh(&pmlmepriv->lock);
- rtw_indicate_connect23a(padapter);
- spin_unlock_bh(&pmlmepriv->lock);
- } else {
- pwlan = rtw_alloc_network(pmlmepriv, GFP_KERNEL);
- spin_lock_bh(&scanned_queue->lock);
- if (!pwlan) {
- pwlan = rtw_get_oldest_wlan_network23a(scanned_queue);
- if (!pwlan) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "Error: can't get pwlan in rtw23a_joinbss_event_cb\n");
- spin_unlock_bh(&scanned_queue->lock);
- goto createbss_cmd_fail;
- }
- pwlan->last_scanned = jiffies;
- } else {
- list_add_tail(&pwlan->list,
- &scanned_queue->queue);
- }
-
- pnetwork->Length = get_wlan_bssid_ex_sz(pnetwork);
- memcpy(&pwlan->network, pnetwork, pnetwork->Length);
- /* pwlan->fixed = true; */
-
- /* list_add_tail(&pwlan->list,
- &pmlmepriv->scanned_queue.queue); */
-
- /* copy pdev_network information to
- pmlmepriv->cur_network */
- memcpy(&tgt_network->network, pnetwork,
- get_wlan_bssid_ex_sz(pnetwork));
-
- /* reset DSConfig */
-
- clr_fwstate(pmlmepriv, _FW_UNDER_LINKING);
-
- /* we will set _FW_LINKED when there is one more sat to
- join us (rtw_stassoc_event_callback23a) */
- spin_unlock_bh(&scanned_queue->lock);
- }
-
-createbss_cmd_fail:
-
- rtw_free_cmd_obj23a(pcmd);
-}
-
-void rtw_setstaKey_cmdrsp_callback23a(struct rtw_adapter *padapter,
- struct cmd_obj *pcmd)
-{
- struct sta_priv *pstapriv;
- struct set_stakey_rsp *psetstakey_rsp;
- struct sta_info *psta;
-
- pstapriv = &padapter->stapriv;
- psetstakey_rsp = (struct set_stakey_rsp *) (pcmd->rsp);
- psta = rtw_get_stainfo23a(pstapriv, psetstakey_rsp->addr);
-
- if (!psta) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "ERROR: rtw_setstaKey_cmdrsp_callback23a => can't get sta_info\n");
- goto exit;
- }
-
-exit:
-
- rtw_free_cmd_obj23a(pcmd);
-}
-
-void rtw_setassocsta_cmdrsp_callback23a(struct rtw_adapter *padapter,
- struct cmd_obj *pcmd)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct set_assocsta_parm *passocsta_parm;
- struct set_assocsta_rsp *passocsta_rsp;
- struct sta_info *psta;
-
- passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
- passocsta_rsp = (struct set_assocsta_rsp *) (pcmd->rsp);
- psta = rtw_get_stainfo23a(pstapriv, passocsta_parm->addr);
-
- if (psta == NULL) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "ERROR: setassocsta_cmdrsp_callbac => can't get sta_info\n");
- goto exit;
- }
-
- psta->aid = psta->mac_id = passocsta_rsp->cam_id;
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (check_fwstate(pmlmepriv, WIFI_MP_STATE) &&
- check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
-
- set_fwstate(pmlmepriv, _FW_LINKED);
- spin_unlock_bh(&pmlmepriv->lock);
-
-exit:
- rtw_free_cmd_obj23a(pcmd);
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_efuse.c b/drivers/staging/rtl8723au/core/rtw_efuse.c
deleted file mode 100644
index 359ef4197e94..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_efuse.c
+++ /dev/null
@@ -1,538 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTW_EFUSE_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#include <rtw_efuse.h>
-#include <rtl8723a_hal.h>
-#include <usb_ops_linux.h>
-
-#define REG_EFUSE_CTRL 0x0030
-#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control */
-
-#define VOLTAGE_V25 0x03
-#define LDOE25_SHIFT 28
-
-/*
- * When we want to enable write operation, we should change to
- * pwr on state. When we stop write, we should switch to 500k mode
- * and disable LDO 2.5V.
- */
-static void Efuse_PowerSwitch(struct rtw_adapter *padapter,
- u8 bWrite, u8 PwrState)
-{
- u8 tempval;
- u16 tmpV16;
-
- if (PwrState == true) {
- rtl8723au_write8(padapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
-
- /*
- * 1.2V Power: From VDDON with Power
- * Cut(0x0000h[15]), default valid
- */
- tmpV16 = rtl8723au_read16(padapter, REG_SYS_ISO_CTRL);
- if (!(tmpV16 & PWC_EV12V)) {
- tmpV16 |= PWC_EV12V;
- rtl8723au_write16(padapter, REG_SYS_ISO_CTRL, tmpV16);
- }
- /* Reset: 0x0000h[28], default valid */
- tmpV16 = rtl8723au_read16(padapter, REG_SYS_FUNC_EN);
- if (!(tmpV16 & FEN_ELDR)) {
- tmpV16 |= FEN_ELDR;
- rtl8723au_write16(padapter, REG_SYS_FUNC_EN, tmpV16);
- }
-
- /*
- * Clock: Gated(0x0008h[5]) 8M(0x0008h[1])
- * clock from ANA, default valid
- */
- tmpV16 = rtl8723au_read16(padapter, REG_SYS_CLKR);
- if ((!(tmpV16 & LOADER_CLK_EN)) || (!(tmpV16 & ANA8M))) {
- tmpV16 |= (LOADER_CLK_EN | ANA8M);
- rtl8723au_write16(padapter, REG_SYS_CLKR, tmpV16);
- }
-
- if (bWrite == true) {
- /* Enable LDO 2.5V before read/write action */
- tempval = rtl8723au_read8(padapter, EFUSE_TEST + 3);
- tempval &= 0x0F;
- tempval |= (VOLTAGE_V25 << 4);
- rtl8723au_write8(padapter, EFUSE_TEST + 3,
- tempval | 0x80);
- }
- } else {
- rtl8723au_write8(padapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
-
- if (bWrite == true) {
- /* Disable LDO 2.5V after read/write action */
- tempval = rtl8723au_read8(padapter, EFUSE_TEST + 3);
- rtl8723au_write8(padapter, EFUSE_TEST + 3,
- tempval & 0x7F);
- }
- }
-}
-
-u16 Efuse_GetCurrentSize23a(struct rtw_adapter *pAdapter, u8 efuseType)
-{
- u16 ret = 0;
-
- if (efuseType == EFUSE_WIFI)
- ret = rtl8723a_EfuseGetCurrentSize_WiFi(pAdapter);
- else
- ret = rtl8723a_EfuseGetCurrentSize_BT(pAdapter);
-
- return ret;
-}
-
-/* Get current efuse area enabled word */
-u8 Efuse_CalculateWordCnts23a(u8 word_en)
-{
- return hweight8((~word_en) & 0xf);
-}
-
-/*
- * Description: Execute E-Fuse read byte operation.
- *
- * Assumptions: 1. Boot from E-Fuse and successfully auto-load.
- * 2. PASSIVE_LEVEL (USB interface)
- */
-void ReadEFuseByte23a(struct rtw_adapter *Adapter, u16 _offset, u8 *pbuf)
-{
- u32 value32;
- u8 readbyte;
- u16 retry;
-
- /* Write Address */
- rtl8723au_write8(Adapter, EFUSE_CTRL+1, (_offset & 0xff));
- readbyte = rtl8723au_read8(Adapter, EFUSE_CTRL+2);
- rtl8723au_write8(Adapter, EFUSE_CTRL+2,
- ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
-
- /* Write bit 32 0 */
- readbyte = rtl8723au_read8(Adapter, EFUSE_CTRL+3);
- rtl8723au_write8(Adapter, EFUSE_CTRL+3, readbyte & 0x7f);
-
- /* Check bit 32 read-ready */
- retry = 0;
- value32 = rtl8723au_read32(Adapter, EFUSE_CTRL);
- while (!((value32 >> 24) & 0x80) && retry < 10000) {
- value32 = rtl8723au_read32(Adapter, EFUSE_CTRL);
- retry++;
- }
-
- /*
- * Added suggested delay. This fixes the problem that
- * Efuse read error in high temperature condition.
- * Designer says that there shall be some delay after
- * ready bit is set, or the result will always stay
- * on last data we read.
- */
- udelay(50);
- value32 = rtl8723au_read32(Adapter, EFUSE_CTRL);
-
- *pbuf = (u8)(value32 & 0xff);
-}
-
-void EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
- u8 type, void *pOut)
-{
- u8 *pu1Tmp;
- u16 *pu2Tmp;
- u8 *pMax_section;
-
- switch (type) {
- case TYPE_EFUSE_MAX_SECTION:
- pMax_section = pOut;
-
- if (efuseType == EFUSE_WIFI)
- *pMax_section = EFUSE_MAX_SECTION_8723A;
- else
- *pMax_section = EFUSE_BT_MAX_SECTION;
- break;
-
- case TYPE_EFUSE_REAL_CONTENT_LEN:
- pu2Tmp = pOut;
-
- if (efuseType == EFUSE_WIFI)
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723A;
- else
- *pu2Tmp = EFUSE_BT_REAL_CONTENT_LEN;
- break;
-
- case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
- pu2Tmp = pOut;
-
- if (efuseType == EFUSE_WIFI)
- *pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723A -
- EFUSE_OOB_PROTECT_BYTES);
- else
- *pu2Tmp = (EFUSE_BT_REAL_BANK_CONTENT_LEN -
- EFUSE_PROTECT_BYTES_BANK);
- break;
-
- case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
- pu2Tmp = pOut;
-
- if (efuseType == EFUSE_WIFI)
- *pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723A -
- EFUSE_OOB_PROTECT_BYTES);
- else
- *pu2Tmp = (EFUSE_BT_REAL_CONTENT_LEN -
- (EFUSE_PROTECT_BYTES_BANK * 3));
- break;
-
- case TYPE_EFUSE_MAP_LEN:
- pu2Tmp = pOut;
-
- if (efuseType == EFUSE_WIFI)
- *pu2Tmp = EFUSE_MAP_LEN_8723A;
- else
- *pu2Tmp = EFUSE_BT_MAP_LEN;
- break;
-
- case TYPE_EFUSE_PROTECT_BYTES_BANK:
- pu1Tmp = pOut;
-
- if (efuseType == EFUSE_WIFI)
- *pu1Tmp = EFUSE_OOB_PROTECT_BYTES;
- else
- *pu1Tmp = EFUSE_PROTECT_BYTES_BANK;
- break;
-
- case TYPE_EFUSE_CONTENT_LEN_BANK:
- pu2Tmp = pOut;
-
- if (efuseType == EFUSE_WIFI)
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723A;
- else
- *pu2Tmp = EFUSE_BT_REAL_BANK_CONTENT_LEN;
- break;
-
- default:
- pu1Tmp = pOut;
- *pu1Tmp = 0;
- break;
- }
-}
-
-/* Copy from WMAC for EFUSE read 1 byte. */
-u8 EFUSE_Read1Byte23a(struct rtw_adapter *Adapter, u16 Address)
-{
- u8 data;
- u8 Bytetemp = {0x00};
- u8 temp = {0x00};
- u32 k = 0;
- u16 contentLen = 0;
-
- EFUSE_GetEfuseDefinition23a(Adapter, EFUSE_WIFI,
- TYPE_EFUSE_REAL_CONTENT_LEN,
- (void *)&contentLen);
-
- if (Address < contentLen) { /* E-fuse 512Byte */
- /* Write E-fuse Register address bit0~7 */
- temp = Address & 0xFF;
- rtl8723au_write8(Adapter, EFUSE_CTRL+1, temp);
- Bytetemp = rtl8723au_read8(Adapter, EFUSE_CTRL+2);
- /* Write E-fuse Register address bit8~9 */
- temp = ((Address >> 8) & 0x03) | (Bytetemp & 0xFC);
- rtl8723au_write8(Adapter, EFUSE_CTRL+2, temp);
-
- /* Write 0x30[31]= 0 */
- Bytetemp = rtl8723au_read8(Adapter, EFUSE_CTRL+3);
- temp = Bytetemp & 0x7F;
- rtl8723au_write8(Adapter, EFUSE_CTRL+3, temp);
-
- /* Wait Write-ready (0x30[31]= 1) */
- Bytetemp = rtl8723au_read8(Adapter, EFUSE_CTRL+3);
- while (!(Bytetemp & 0x80)) {
- Bytetemp = rtl8723au_read8(Adapter, EFUSE_CTRL+3);
- k++;
- if (k == 1000) {
- k = 0;
- break;
- }
- }
- data = rtl8723au_read8(Adapter, EFUSE_CTRL);
- return data;
- }
- return 0xFF;
-}
-
-/* Read one byte from real Efuse. */
-int efuse_OneByteRead23a(struct rtw_adapter *pAdapter, u16 addr, u8 *data)
-{
- u8 tmpidx = 0;
- int bResult;
-
- /* -----------------e-fuse reg ctrl ---------------------------- */
- /* address */
- rtl8723au_write8(pAdapter, EFUSE_CTRL + 1, (u8)(addr & 0xff));
- rtl8723au_write8(pAdapter, EFUSE_CTRL + 2,
- ((u8)((addr >> 8) & 0x03)) |
- (rtl8723au_read8(pAdapter, EFUSE_CTRL + 2) & 0xFC));
-
- rtl8723au_write8(pAdapter, EFUSE_CTRL + 3, 0x72); /* read cmd */
-
- while (!(0x80 & rtl8723au_read8(pAdapter, EFUSE_CTRL + 3)) &&
- (tmpidx < 100))
- tmpidx++;
- if (tmpidx < 100) {
- *data = rtl8723au_read8(pAdapter, EFUSE_CTRL);
- bResult = _SUCCESS;
- } else {
- *data = 0xff;
- bResult = _FAIL;
- }
- return bResult;
-}
-
-/* Write one byte to reald Efuse. */
-int efuse_OneByteWrite23a(struct rtw_adapter *pAdapter, u16 addr, u8 data)
-{
- u8 tmpidx = 0;
- int bResult;
-
- /* return 0; */
-
- /* -----------------e-fuse reg ctrl ------------------------- */
- /* address */
- rtl8723au_write8(pAdapter, EFUSE_CTRL + 1, (u8)(addr & 0xff));
- rtl8723au_write8(pAdapter, EFUSE_CTRL + 2,
- (rtl8723au_read8(pAdapter, EFUSE_CTRL + 2) & 0xFC) |
- (u8)((addr >> 8) & 0x03));
- rtl8723au_write8(pAdapter, EFUSE_CTRL, data); /* data */
-
- rtl8723au_write8(pAdapter, EFUSE_CTRL + 3, 0xF2); /* write cmd */
-
- while ((0x80 & rtl8723au_read8(pAdapter, EFUSE_CTRL + 3)) &&
- (tmpidx < 100)) {
- tmpidx++;
- }
-
- if (tmpidx < 100)
- bResult = _SUCCESS;
- else
- bResult = _FAIL;
-
- return bResult;
-}
-
-/* Read allowed word in current efuse section data. */
-void efuse_WordEnableDataRead23a(u8 word_en, u8 *sourdata, u8 *targetdata)
-{
- if (!(word_en&BIT(0))) {
- targetdata[0] = sourdata[0];
- targetdata[1] = sourdata[1];
- }
- if (!(word_en&BIT(1))) {
- targetdata[2] = sourdata[2];
- targetdata[3] = sourdata[3];
- }
- if (!(word_en&BIT(2))) {
- targetdata[4] = sourdata[4];
- targetdata[5] = sourdata[5];
- }
- if (!(word_en&BIT(3))) {
- targetdata[6] = sourdata[6];
- targetdata[7] = sourdata[7];
- }
-}
-
-static int efuse_read8(struct rtw_adapter *padapter, u16 address, u8 *value)
-{
- return efuse_OneByteRead23a(padapter, address, value);
-}
-
-static int efuse_write8(struct rtw_adapter *padapter, u16 address, u8 *value)
-{
- return efuse_OneByteWrite23a(padapter, address, *value);
-}
-
-/* read/write raw efuse data */
-int rtw_efuse_access23a(struct rtw_adapter *padapter, u8 bWrite, u16 start_addr,
- u16 cnts, u8 *data)
-{
- int i = 0;
- u16 real_content_len = 0, max_available_size = 0;
- int res = _FAIL;
- int (*rw8)(struct rtw_adapter *, u16, u8*);
-
- EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI,
- TYPE_EFUSE_REAL_CONTENT_LEN,
- (void *)&real_content_len);
- EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI,
- TYPE_AVAILABLE_EFUSE_BYTES_TOTAL,
- (void *)&max_available_size);
-
- if (start_addr > real_content_len)
- return _FAIL;
-
- if (true == bWrite) {
- if ((start_addr + cnts) > max_available_size)
- return _FAIL;
- rw8 = &efuse_write8;
- } else
- rw8 = &efuse_read8;
-
- Efuse_PowerSwitch(padapter, bWrite, true);
-
- /* e-fuse one byte read/write */
- for (i = 0; i < cnts; i++) {
- if (start_addr >= real_content_len) {
- res = _FAIL;
- break;
- }
-
- res = rw8(padapter, start_addr++, data++);
- if (res == _FAIL)
- break;
- }
-
- Efuse_PowerSwitch(padapter, bWrite, false);
-
- return res;
-}
-
-u16 efuse_GetMaxSize23a(struct rtw_adapter *padapter)
-{
- u16 max_size;
-
- EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI,
- TYPE_AVAILABLE_EFUSE_BYTES_TOTAL,
- (void *)&max_size);
- return max_size;
-}
-
-int rtw_efuse_map_read23a(struct rtw_adapter *padapter,
- u16 addr, u16 cnts, u8 *data)
-{
- u16 mapLen = 0;
-
- EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI,
- TYPE_EFUSE_MAP_LEN, (void *)&mapLen);
-
- if ((addr + cnts) > mapLen)
- return _FAIL;
-
- Efuse_PowerSwitch(padapter, false, true);
-
- rtl8723a_readefuse(padapter, EFUSE_WIFI, addr, cnts, data);
-
- Efuse_PowerSwitch(padapter, false, false);
-
- return _SUCCESS;
-}
-
-int rtw_BT_efuse_map_read23a(struct rtw_adapter *padapter,
- u16 addr, u16 cnts, u8 *data)
-{
- u16 mapLen = 0;
-
- EFUSE_GetEfuseDefinition23a(padapter, EFUSE_BT,
- TYPE_EFUSE_MAP_LEN, (void *)&mapLen);
-
- if ((addr + cnts) > mapLen)
- return _FAIL;
-
- Efuse_PowerSwitch(padapter, false, true);
-
- rtl8723a_readefuse(padapter, EFUSE_BT, addr, cnts, data);
-
- Efuse_PowerSwitch(padapter, false, false);
-
- return _SUCCESS;
-}
-
-/* Read All Efuse content */
-static void Efuse_ReadAllMap(struct rtw_adapter *pAdapter, u8 efuseType,
- u8 *Efuse)
-{
- u16 mapLen = 0;
-
- Efuse_PowerSwitch(pAdapter, false, true);
-
- EFUSE_GetEfuseDefinition23a(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN,
- (void *)&mapLen);
-
- rtl8723a_readefuse(pAdapter, efuseType, 0, mapLen, Efuse);
-
- Efuse_PowerSwitch(pAdapter, false, false);
-}
-
-/*
- * Functions: efuse_ShadowRead1Byte
- * efuse_ShadowRead2Byte
- * efuse_ShadowRead4Byte
- *
- * Read from efuse init map by one/two/four bytes
- */
-static void efuse_ShadowRead1Byte(struct rtw_adapter *pAdapter, u16 Offset,
- u8 *Value)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
-
- *Value = pEEPROM->efuse_eeprom_data[Offset];
-}
-
-static void efuse_ShadowRead2Byte(struct rtw_adapter *pAdapter, u16 Offset,
- u16 *Value)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
-
- *Value = pEEPROM->efuse_eeprom_data[Offset];
- *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
-}
-
-static void efuse_ShadowRead4Byte(struct rtw_adapter *pAdapter, u16 Offset,
- u32 *Value)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
-
- *Value = pEEPROM->efuse_eeprom_data[Offset];
- *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
- *Value |= pEEPROM->efuse_eeprom_data[Offset+2]<<16;
- *Value |= pEEPROM->efuse_eeprom_data[Offset+3]<<24;
-}
-
-/* Transfer current EFUSE content to shadow init and modify map. */
-void EFUSE_ShadowMapUpdate23a(struct rtw_adapter *pAdapter, u8 efuseType)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
- u16 mapLen = 0;
-
- EFUSE_GetEfuseDefinition23a(pAdapter, efuseType,
- TYPE_EFUSE_MAP_LEN, (void *)&mapLen);
-
- if (pEEPROM->bautoload_fail_flag == true)
- memset(pEEPROM->efuse_eeprom_data, 0xFF, mapLen);
- else
- Efuse_ReadAllMap(pAdapter, efuseType,
- pEEPROM->efuse_eeprom_data);
-}
-
-/* Read from efuse init map */
-void EFUSE_ShadowRead23a(struct rtw_adapter *pAdapter, u8 Type,
- u16 Offset, u32 *Value)
-{
- if (Type == 1)
- efuse_ShadowRead1Byte(pAdapter, Offset, (u8 *)Value);
- else if (Type == 2)
- efuse_ShadowRead2Byte(pAdapter, Offset, (u16 *)Value);
- else if (Type == 4)
- efuse_ShadowRead4Byte(pAdapter, Offset, (u32 *)Value);
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_ieee80211.c b/drivers/staging/rtl8723au/core/rtw_ieee80211.c
deleted file mode 100644
index 07a6490a83d6..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_ieee80211.c
+++ /dev/null
@@ -1,855 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _IEEE80211_C
-
-#include <drv_types.h>
-#include <linux/ieee80211.h>
-#include <ieee80211.h>
-#include <wifi.h>
-#include <osdep_service.h>
-#include <wlan_bssdef.h>
-
-u8 RTW_WPA_OUI23A_TYPE[] = { 0x00, 0x50, 0xf2, 1 };
-u16 RTW_WPA_VERSION23A = 1;
-u8 WPA_AUTH_KEY_MGMT_NONE23A[] = { 0x00, 0x50, 0xf2, 0 };
-u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X23A[] = { 0x00, 0x50, 0xf2, 1 };
-u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X23A[] = { 0x00, 0x50, 0xf2, 2 };
-u8 WPA_CIPHER_SUITE_NONE23A[] = { 0x00, 0x50, 0xf2, 0 };
-u8 WPA_CIPHER_SUITE_WEP4023A[] = { 0x00, 0x50, 0xf2, 1 };
-u8 WPA_CIPHER_SUITE_TKIP23A[] = { 0x00, 0x50, 0xf2, 2 };
-u8 WPA_CIPHER_SUITE_WRAP23A[] = { 0x00, 0x50, 0xf2, 3 };
-u8 WPA_CIPHER_SUITE_CCMP23A[] = { 0x00, 0x50, 0xf2, 4 };
-u8 WPA_CIPHER_SUITE_WEP10423A[] = { 0x00, 0x50, 0xf2, 5 };
-
-u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X23A[] = { 0x00, 0x0f, 0xac, 1 };
-u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X23A[] = { 0x00, 0x0f, 0xac, 2 };
-u8 RSN_CIPHER_SUITE_NONE23A[] = { 0x00, 0x0f, 0xac, 0 };
-u8 RSN_CIPHER_SUITE_WEP4023A[] = { 0x00, 0x0f, 0xac, 1 };
-u8 RSN_CIPHER_SUITE_TKIP23A[] = { 0x00, 0x0f, 0xac, 2 };
-u8 RSN_CIPHER_SUITE_WRAP23A[] = { 0x00, 0x0f, 0xac, 3 };
-u8 RSN_CIPHER_SUITE_CCMP23A[] = { 0x00, 0x0f, 0xac, 4 };
-u8 RSN_CIPHER_SUITE_WEP10423A[] = { 0x00, 0x0f, 0xac, 5 };
-/* */
-/* for adhoc-master to generate ie and provide supported-rate to fw */
-/* */
-
-static u8 WIFI_CCKRATES[] = {
- IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK
-};
-
-static u8 WIFI_OFDMRATES[] = {
- IEEE80211_OFDM_RATE_6MB,
- IEEE80211_OFDM_RATE_9MB,
- IEEE80211_OFDM_RATE_12MB,
- IEEE80211_OFDM_RATE_18MB,
- IEEE80211_OFDM_RATE_24MB,
- IEEE80211_OFDM_RATE_36MB,
- IEEE80211_OFDM_RATE_48MB,
- IEEE80211_OFDM_RATE_54MB
-};
-
-int rtw_get_bit_value_from_ieee_value23a(u8 val)
-{
- unsigned char dot11_rate_table[]=
- {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0};
-
- int i = 0;
-
- while (dot11_rate_table[i] != 0) {
- if (dot11_rate_table[i] == val)
- return BIT(i);
- i++;
- }
- return 0;
-}
-
-static bool rtw_is_cckrates_included(u8 *rate)
-{
- u32 i = 0;
-
- while (rate[i]) {
- if ((rate[i] & 0x7f) == 2 || (rate[i] & 0x7f) == 4 ||
- (rate[i] & 0x7f) == 11 || (rate[i] & 0x7f) == 22)
- return true;
- i++;
- }
-
- return false;
-}
-
-static bool rtw_is_cckratesonly_included(u8 *rate)
-{
- u32 i = 0;
-
- while (rate[i]) {
- if ((rate[i] & 0x7f) != 2 && (rate[i] & 0x7f) != 4 &&
- (rate[i] & 0x7f) != 11 && (rate[i] & 0x7f) != 22)
- return false;
-
- i++;
- }
-
- return true;
-}
-
-int rtw_check_network_type23a(unsigned char *rate, int ratelen, int channel)
-{
- if (channel > 14) {
- if (rtw_is_cckrates_included(rate))
- return WIRELESS_INVALID;
- else
- return WIRELESS_11A;
- } else { /* could be pure B, pure G, or B/G */
- if (rtw_is_cckratesonly_included(rate))
- return WIRELESS_11B;
- else if (rtw_is_cckrates_included(rate))
- return WIRELESS_11BG;
- else
- return WIRELESS_11G;
- }
-}
-
-/* rtw_set_ie23a will update frame length */
-u8 *rtw_set_ie23a(u8 *pbuf, int index, uint len, const u8 *source, uint *frlen)
-{
-
- *pbuf = (u8)index;
-
- *(pbuf + 1) = (u8)len;
-
- if (len > 0)
- memcpy((void *)(pbuf + 2), (void *)source, len);
-
- *frlen = *frlen + (len + 2);
-
- return pbuf + len + 2;
-}
-
-inline u8 *rtw_set_ie23a_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode,
- u8 new_ch, u8 ch_switch_cnt)
-{
- u8 ie_data[3];
-
- ie_data[0] = ch_switch_mode;
- ie_data[1] = new_ch;
- ie_data[2] = ch_switch_cnt;
- return rtw_set_ie23a(buf, WLAN_EID_CHANNEL_SWITCH, 3, ie_data, buf_len);
-}
-
-inline u8 hal_ch_offset_to_secondary_ch_offset23a(u8 ch_offset)
-{
- if (ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
- return IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- else if (ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
- return IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-
- return IEEE80211_HT_PARAM_CHA_SEC_NONE;
-}
-
-inline u8 *rtw_set_ie23a_secondary_ch_offset(u8 *buf, u32 *buf_len,
- u8 secondary_ch_offset)
-{
- return rtw_set_ie23a(buf, WLAN_EID_SECONDARY_CHANNEL_OFFSET,
- 1, &secondary_ch_offset, buf_len);
-}
-
-/*----------------------------------------------------------------------------
-index: the information element id index, limit is the limit for search
------------------------------------------------------------------------------*/
-u8 *rtw_get_ie23a(u8 *pbuf, int index, int *len, int limit)
-{
- int tmp, i;
- u8 *p;
-
- if (limit < 1) {
-
- return NULL;
- }
-
- p = pbuf;
- i = 0;
- *len = 0;
- while (1) {
- if (*p == index) {
- *len = *(p + 1);
- return p;
- } else {
- tmp = *(p + 1);
- p += (tmp + 2);
- i += (tmp + 2);
- }
- if (i >= limit)
- break;
- }
-
- return NULL;
-}
-
-/**
- * rtw_get_ie23a_ex - Search specific IE from a series of IEs
- * @in_ie: Address of IEs to search
- * @in_len: Length limit from in_ie
- * @eid: Element ID to match
- * @oui: OUI to match
- * @oui_len: OUI length
- * @ie: If not NULL and the specific IE is found, the IE will be copied
- * to the buf starting from the specific IE
- * @ielen: If not NULL and the specific IE is found, will set to the length
- * of the entire IE
- *
- * Returns: The address of the specific IE found, or NULL
- */
-u8 *rtw_get_ie23a_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len,
- u8 *ie, uint *ielen)
-{
- uint cnt;
- u8 *target_ie = NULL;
-
- if (ielen)
- *ielen = 0;
-
- if (!in_ie || in_len <= 0)
- return target_ie;
-
- cnt = 0;
-
- while (cnt < in_len) {
- if (eid == in_ie[cnt] &&
- (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) {
- target_ie = &in_ie[cnt];
-
- if (ie)
- memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2);
-
- if (ielen)
- *ielen = in_ie[cnt+1]+2;
- break;
- } else {
- cnt += in_ie[cnt + 1] + 2; /* goto next */
- }
- }
-
- return target_ie;
-}
-
-/**
- * rtw_ies_remove_ie23a - Find matching IEs and remove
- * @ies: Address of IEs to search
- * @ies_len: Pointer of length of ies, will update to new length
- * @offset: The offset to start search
- * @eid: Element ID to match
- * @oui: OUI to match
- * @oui_len: OUI length
- *
- * Returns: _SUCCESS: ies is updated, _FAIL: not updated
- */
-int rtw_ies_remove_ie23a(u8 *ies, uint *ies_len, uint offset, u8 eid,
- u8 *oui, u8 oui_len)
-{
- int ret = _FAIL;
- u8 *target_ie;
- u32 target_ielen;
- u8 *start;
- uint search_len;
-
- if (!ies || !ies_len || *ies_len <= offset)
- goto exit;
-
- start = ies + offset;
- search_len = *ies_len - offset;
-
- while (1) {
- target_ie = rtw_get_ie23a_ex(start, search_len, eid, oui, oui_len,
- NULL, &target_ielen);
- if (target_ie && target_ielen) {
- u8 buf[MAX_IE_SZ] = {0};
- u8 *remain_ies = target_ie + target_ielen;
- uint remain_len = search_len - (remain_ies - start);
-
- memcpy(buf, remain_ies, remain_len);
- memcpy(target_ie, buf, remain_len);
- *ies_len = *ies_len - target_ielen;
- ret = _SUCCESS;
-
- start = target_ie;
- search_len = remain_len;
- } else {
- break;
- }
- }
-exit:
- return ret;
-}
-
-void rtw_set_supported_rate23a(u8 *SupportedRates, uint mode)
-{
-
-
- memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
-
- switch (mode) {
- case WIRELESS_11B:
- memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
- break;
-
- case WIRELESS_11G:
- case WIRELESS_11A:
- case WIRELESS_11_5N:
- case WIRELESS_11A_5N:/* Todo: no basic rate for ofdm ? */
- memcpy(SupportedRates, WIFI_OFDMRATES,
- IEEE80211_NUM_OFDM_RATESLEN);
- break;
-
- case WIRELESS_11BG:
- case WIRELESS_11G_24N:
- case WIRELESS_11_24N:
- case WIRELESS_11BG_24N:
- memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
- memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES,
- IEEE80211_NUM_OFDM_RATESLEN);
- break;
- }
-
-}
-
-uint rtw_get_rateset_len23a(u8 *rateset)
-{
- uint i = 0;
-
- while(1) {
- if (rateset[i] == 0)
- break;
-
- if (i > 12)
- break;
-
- i++;
- }
-
- return i;
-}
-
-int rtw_generate_ie23a(struct registry_priv *pregistrypriv)
-{
- u8 wireless_mode;
- int sz = 0, rateLen;
- struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
- u8 *ie = pdev_network->IEs;
- u16 cap;
-
- pdev_network->tsf = 0;
-
- cap = WLAN_CAPABILITY_IBSS;
-
- if (pregistrypriv->preamble == PREAMBLE_SHORT)
- cap |= WLAN_CAPABILITY_SHORT_PREAMBLE;
-
- if (pdev_network->Privacy)
- cap |= WLAN_CAPABILITY_PRIVACY;
-
- pdev_network->capability = cap;
-
- /* SSID */
- ie = rtw_set_ie23a(ie, WLAN_EID_SSID, pdev_network->Ssid.ssid_len,
- pdev_network->Ssid.ssid, &sz);
-
- /* supported rates */
- if (pregistrypriv->wireless_mode == WIRELESS_11ABGN) {
- if (pdev_network->DSConfig > 14)
- wireless_mode = WIRELESS_11A_5N;
- else
- wireless_mode = WIRELESS_11BG_24N;
- } else {
- wireless_mode = pregistrypriv->wireless_mode;
- }
-
- rtw_set_supported_rate23a(pdev_network->SupportedRates, wireless_mode) ;
-
- rateLen = rtw_get_rateset_len23a(pdev_network->SupportedRates);
-
- if (rateLen > 8) {
- ie = rtw_set_ie23a(ie, WLAN_EID_SUPP_RATES, 8,
- pdev_network->SupportedRates, &sz);
- /* ie = rtw_set_ie23a(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz); */
- } else {
- ie = rtw_set_ie23a(ie, WLAN_EID_SUPP_RATES, rateLen,
- pdev_network->SupportedRates, &sz);
- }
-
- /* DS parameter set */
- ie = rtw_set_ie23a(ie, WLAN_EID_DS_PARAMS, 1,
- (u8 *)&pdev_network->DSConfig, &sz);
-
- /* IBSS Parameter Set */
-
- ie = rtw_set_ie23a(ie, WLAN_EID_IBSS_PARAMS, 2,
- (u8 *)&pdev_network->ATIMWindow, &sz);
-
- if (rateLen > 8) {
- ie = rtw_set_ie23a(ie, WLAN_EID_EXT_SUPP_RATES, (rateLen - 8),
- (pdev_network->SupportedRates + 8), &sz);
- }
-
-
-
- /* return _SUCCESS; */
-
- return sz;
-}
-
-static int rtw_get_wpa_cipher_suite(const u8 *s)
-{
- if (!memcmp(s, WPA_CIPHER_SUITE_NONE23A, WPA_SELECTOR_LEN))
- return WPA_CIPHER_NONE;
- if (!memcmp(s, WPA_CIPHER_SUITE_WEP4023A, WPA_SELECTOR_LEN))
- return WPA_CIPHER_WEP40;
- if (!memcmp(s, WPA_CIPHER_SUITE_TKIP23A, WPA_SELECTOR_LEN))
- return WPA_CIPHER_TKIP;
- if (!memcmp(s, WPA_CIPHER_SUITE_CCMP23A, WPA_SELECTOR_LEN))
- return WPA_CIPHER_CCMP;
- if (!memcmp(s, WPA_CIPHER_SUITE_WEP10423A, WPA_SELECTOR_LEN))
- return WPA_CIPHER_WEP104;
-
- return 0;
-}
-
-static int rtw_get_wpa2_cipher_suite(const u8 *s)
-{
- if (!memcmp(s, RSN_CIPHER_SUITE_NONE23A, RSN_SELECTOR_LEN))
- return WPA_CIPHER_NONE;
- if (!memcmp(s, RSN_CIPHER_SUITE_WEP4023A, RSN_SELECTOR_LEN))
- return WPA_CIPHER_WEP40;
- if (!memcmp(s, RSN_CIPHER_SUITE_TKIP23A, RSN_SELECTOR_LEN))
- return WPA_CIPHER_TKIP;
- if (!memcmp(s, RSN_CIPHER_SUITE_CCMP23A, RSN_SELECTOR_LEN))
- return WPA_CIPHER_CCMP;
- if (!memcmp(s, RSN_CIPHER_SUITE_WEP10423A, RSN_SELECTOR_LEN))
- return WPA_CIPHER_WEP104;
-
- return 0;
-}
-
-int rtw_parse_wpa_ie23a(const u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
- int *pairwise_cipher, int *is_8021x)
-{
- int i, ret = _SUCCESS;
- int left, count;
- const u8 *pos;
-
- if (wpa_ie_len <= 0) {
- /* No WPA IE - fail silently */
- return _FAIL;
- }
-
- if (wpa_ie[1] != (u8)(wpa_ie_len - 2))
- return _FAIL;
-
- pos = wpa_ie;
-
- pos += 8;
- left = wpa_ie_len - 8;
-
- /* group_cipher */
- if (left >= WPA_SELECTOR_LEN) {
-
- *group_cipher = rtw_get_wpa_cipher_suite(pos);
-
- pos += WPA_SELECTOR_LEN;
- left -= WPA_SELECTOR_LEN;
- } else if (left > 0) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "%s: ie length mismatch, %u too much\n",
- __func__, left);
-
- return _FAIL;
- }
-
- /* pairwise_cipher */
- if (left >= 2) {
- /* count = le16_to_cpu(*(u16*)pos); */
- count = get_unaligned_le16(pos);
- pos += 2;
- left -= 2;
-
- if (count == 0 || left < count * WPA_SELECTOR_LEN) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "%s: ie count botch (pairwise), count %u left %u\n",
- __func__, count, left);
- return _FAIL;
- }
-
- for (i = 0; i < count; i++) {
- *pairwise_cipher |= rtw_get_wpa_cipher_suite(pos);
-
- pos += WPA_SELECTOR_LEN;
- left -= WPA_SELECTOR_LEN;
- }
- } else if (left == 1) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "%s: ie too short (for key mgmt)\n", __func__);
- return _FAIL;
- }
-
- if (is_8021x) {
- if (left >= 6) {
- pos += 2;
- if (!memcmp(pos, RTW_WPA_OUI23A_TYPE, 4)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "%s : there has 802.1x auth\n",
- __func__);
- *is_8021x = 1;
- }
- }
- }
-
- return ret;
-}
-
-int rtw_parse_wpa2_ie23a(const u8 *rsn_ie, int rsn_ie_len, int *group_cipher,
- int *pairwise_cipher, int *is_8021x)
-{
- int i, ret = _SUCCESS;
- int left, count;
- const u8 *pos;
- u8 SUITE_1X[4] = {0x00, 0x0f, 0xac, 0x01};
-
- if (rsn_ie_len <= 0) {
- /* No RSN IE - fail silently */
- return _FAIL;
- }
-
- if (*rsn_ie != WLAN_EID_RSN || *(rsn_ie+1) != (u8)(rsn_ie_len - 2)) {
- return _FAIL;
- }
-
- pos = rsn_ie;
- pos += 4;
- left = rsn_ie_len - 4;
-
- /* group_cipher */
- if (left >= RSN_SELECTOR_LEN) {
- *group_cipher = rtw_get_wpa2_cipher_suite(pos);
-
- pos += RSN_SELECTOR_LEN;
- left -= RSN_SELECTOR_LEN;
- } else if (left > 0) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "%s: ie length mismatch, %u too much\n",
- __func__, left);
- return _FAIL;
- }
-
- /* pairwise_cipher */
- if (left >= 2) {
- /* count = le16_to_cpu(*(u16*)pos); */
- count = get_unaligned_le16(pos);
- pos += 2;
- left -= 2;
-
- if (count == 0 || left < count * RSN_SELECTOR_LEN) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "%s: ie count botch (pairwise), count %u left %u\n",
- __func__, count, left);
- return _FAIL;
- }
-
- for (i = 0; i < count; i++) {
- *pairwise_cipher |= rtw_get_wpa2_cipher_suite(pos);
-
- pos += RSN_SELECTOR_LEN;
- left -= RSN_SELECTOR_LEN;
- }
- } else if (left == 1) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "%s: ie too short (for key mgmt)\n", __func__);
-
- return _FAIL;
- }
-
- if (is_8021x) {
- if (left >= 6) {
- pos += 2;
- if (!memcmp(pos, SUITE_1X, 4)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "%s (): there has 802.1x auth\n",
- __func__);
- *is_8021x = 1;
- }
- }
- }
-
- return ret;
-}
-
-/**
- * rtw_get_wps_attr23a - Search a specific WPS attribute from a given WPS IE
- * @wps_ie: Address of WPS IE to search
- * @wps_ielen: Length limit from wps_ie
- * @target_attr_id: The attribute ID of WPS attribute to search
- * @buf_attr: If not NULL and the WPS attribute is found, WPS attribute
- * will be copied to the buf starting from buf_attr
- * @len_attr: If not NULL and the WPS attribute is found, will set to the
- * length of the entire WPS attribute
- *
- * Returns: the address of the specific WPS attribute found, or NULL
- */
-const u8 *rtw_get_wps_attr23a(const u8 *wps_ie, uint wps_ielen,
- u16 target_attr_id, u8 *buf_attr, u32 *len_attr)
-{
- const u8 *attr_ptr = NULL;
- const u8 *target_attr_ptr = NULL;
- u8 wps_oui[4] = {0x00, 0x50, 0xF2, 0x04};
-
- if (len_attr)
- *len_attr = 0;
-
- if (wps_ie[0] != WLAN_EID_VENDOR_SPECIFIC ||
- memcmp(wps_ie + 2, wps_oui, 4)) {
- return attr_ptr;
- }
-
- /* 6 = 1(Element ID) + 1(Length) + 4(WPS OUI) */
- attr_ptr = wps_ie + 6; /* goto first attr */
-
- while (attr_ptr - wps_ie < wps_ielen) {
- /* 4 = 2(Attribute ID) + 2(Length) */
- u16 attr_id = get_unaligned_be16(attr_ptr);
- u16 attr_data_len = get_unaligned_be16(attr_ptr + 2);
- u16 attr_len = attr_data_len + 4;
-
- /* DBG_8723A("%s attr_ptr:%p, id:%u, length:%u\n", __func__, attr_ptr, attr_id, attr_data_len); */
- if (attr_id == target_attr_id) {
- target_attr_ptr = attr_ptr;
-
- if (buf_attr)
- memcpy(buf_attr, attr_ptr, attr_len);
-
- if (len_attr)
- *len_attr = attr_len;
-
- break;
- } else {
- attr_ptr += attr_len; /* goto next */
- }
- }
-
- return target_attr_ptr;
-}
-
-/**
- * rtw_get_wps_attr_content23a - Search a specific WPS attribute content
- * from a given WPS IE
- * @wps_ie: Address of WPS IE to search
- * @wps_ielen: Length limit from wps_ie
- * @target_attr_id: The attribute ID of WPS attribute to search
- * @buf_content: If not NULL and the WPS attribute is found, WPS attribute
- * content will be copied to the buf starting from buf_content
- * @len_content: If not NULL and the WPS attribute is found, will set to the
- * length of the WPS attribute content
- *
- * Returns: the address of the specific WPS attribute content found, or NULL
- */
-const u8 *rtw_get_wps_attr_content23a(const u8 *wps_ie, uint wps_ielen,
- u16 target_attr_id, u8 *buf_content)
-{
- const u8 *attr_ptr;
- u32 attr_len;
-
- attr_ptr = rtw_get_wps_attr23a(wps_ie, wps_ielen, target_attr_id,
- NULL, &attr_len);
-
- if (attr_ptr && attr_len) {
- if (buf_content)
- memcpy(buf_content, attr_ptr + 4, attr_len - 4);
-
- return attr_ptr + 4;
- }
-
- return NULL;
-}
-
-static int rtw_get_cipher_info(struct wlan_network *pnetwork)
-{
- const u8 *pbuf;
- int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
- int ret = _FAIL;
- int r, plen;
- char *pie;
-
- pie = pnetwork->network.IEs;
- plen = pnetwork->network.IELength;
-
- pbuf = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPA, pie, plen);
-
- if (pbuf && pbuf[1] > 0) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "rtw_get_cipher_info: wpa_ielen: %d\n", pbuf[1]);
- r = rtw_parse_wpa_ie23a(pbuf, pbuf[1] + 2, &group_cipher,
- &pairwise_cipher, &is8021x);
- if (r == _SUCCESS) {
- pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
- pnetwork->BcnInfo.group_cipher = group_cipher;
- pnetwork->BcnInfo.is_8021x = is8021x;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "%s: pnetwork->pairwise_cipher: %d, is_8021x is %d\n",
- __func__, pnetwork->BcnInfo.pairwise_cipher,
- pnetwork->BcnInfo.is_8021x);
- ret = _SUCCESS;
- }
- } else {
- pbuf = cfg80211_find_ie(WLAN_EID_RSN, pie, plen);
-
- if (pbuf && pbuf[1] > 0) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "get RSN IE\n");
- r = rtw_parse_wpa2_ie23a(pbuf, pbuf[1] + 2,
- &group_cipher, &pairwise_cipher,
- &is8021x);
- if (r == _SUCCESS) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "get RSN IE OK!!!\n");
- pnetwork->BcnInfo.pairwise_cipher =
- pairwise_cipher;
- pnetwork->BcnInfo.group_cipher = group_cipher;
- pnetwork->BcnInfo.is_8021x = is8021x;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "%s: pnetwork->pairwise_cipher: %d,pnetwork->group_cipher is %d, is_8021x is %d\n",
- __func__,
- pnetwork->BcnInfo.pairwise_cipher,
- pnetwork->BcnInfo.group_cipher,
- pnetwork->BcnInfo.is_8021x);
- ret = _SUCCESS;
- }
- }
- }
-
- return ret;
-}
-
-void rtw_get_bcn_info23a(struct wlan_network *pnetwork)
-{
- u8 bencrypt = 0;
- int pie_len;
- u8 *pie;
- const u8 *p;
-
- if (pnetwork->network.capability & WLAN_CAPABILITY_PRIVACY) {
- bencrypt = 1;
- pnetwork->network.Privacy = 1;
- } else
- pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_OPENSYS;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "%s: ssid =%s\n", __func__, pnetwork->network.Ssid.ssid);
-
- pie = pnetwork->network.IEs;
- pie_len = pnetwork->network.IELength;
-
- p = cfg80211_find_ie(WLAN_EID_RSN, pie, pie_len);
- if (p && p[1]) {
- pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA2;
- } else if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPA,
- pie, pie_len)) {
- pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA;
- } else {
- if (bencrypt)
- pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WEP;
- }
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "%s: pnetwork->encryp_protocol is %x\n", __func__,
- pnetwork->BcnInfo.encryp_protocol);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "%s: pnetwork->encryp_protocol is %x\n", __func__,
- pnetwork->BcnInfo.encryp_protocol);
- rtw_get_cipher_info(pnetwork);
-
- /* get bwmode and ch_offset */
-}
-
-/* show MCS rate, unit: 100Kbps */
-u16 rtw_mcs_rate23a(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40,
- struct ieee80211_mcs_info *mcs)
-{
- u16 max_rate = 0;
-
- if (rf_type == RF_1T1R) {
- if (mcs->rx_mask[0] & BIT(7))
- max_rate = (bw_40MHz) ? ((short_GI_40)?1500:1350):
- ((short_GI_20)?722:650);
- else if (mcs->rx_mask[0] & BIT(6))
- max_rate = (bw_40MHz) ? ((short_GI_40)?1350:1215):
- ((short_GI_20)?650:585);
- else if (mcs->rx_mask[0] & BIT(5))
- max_rate = (bw_40MHz) ? ((short_GI_40)?1200:1080):
- ((short_GI_20)?578:520);
- else if (mcs->rx_mask[0] & BIT(4))
- max_rate = (bw_40MHz) ? ((short_GI_40)?900:810):
- ((short_GI_20)?433:390);
- else if (mcs->rx_mask[0] & BIT(3))
- max_rate = (bw_40MHz) ? ((short_GI_40)?600:540):
- ((short_GI_20)?289:260);
- else if (mcs->rx_mask[0] & BIT(2))
- max_rate = (bw_40MHz) ? ((short_GI_40)?450:405):
- ((short_GI_20)?217:195);
- else if (mcs->rx_mask[0] & BIT(1))
- max_rate = (bw_40MHz) ? ((short_GI_40)?300:270):
- ((short_GI_20)?144:130);
- else if (mcs->rx_mask[0] & BIT(0))
- max_rate = (bw_40MHz) ? ((short_GI_40)?150:135):
- ((short_GI_20)?72:65);
- } else {
- if (mcs->rx_mask[1]) {
- if (mcs->rx_mask[1] & BIT(7))
- max_rate = (bw_40MHz) ? ((short_GI_40)?3000:2700):((short_GI_20)?1444:1300);
- else if (mcs->rx_mask[1] & BIT(6))
- max_rate = (bw_40MHz) ? ((short_GI_40)?2700:2430):((short_GI_20)?1300:1170);
- else if (mcs->rx_mask[1] & BIT(5))
- max_rate = (bw_40MHz) ? ((short_GI_40)?2400:2160):((short_GI_20)?1156:1040);
- else if (mcs->rx_mask[1] & BIT(4))
- max_rate = (bw_40MHz) ? ((short_GI_40)?1800:1620):((short_GI_20)?867:780);
- else if (mcs->rx_mask[1] & BIT(3))
- max_rate = (bw_40MHz) ? ((short_GI_40)?1200:1080):((short_GI_20)?578:520);
- else if (mcs->rx_mask[1] & BIT(2))
- max_rate = (bw_40MHz) ? ((short_GI_40)?900:810):((short_GI_20)?433:390);
- else if (mcs->rx_mask[1] & BIT(1))
- max_rate = (bw_40MHz) ? ((short_GI_40)?600:540):((short_GI_20)?289:260);
- else if (mcs->rx_mask[1] & BIT(0))
- max_rate = (bw_40MHz) ? ((short_GI_40)?300:270):((short_GI_20)?144:130);
- } else {
- if (mcs->rx_mask[0] & BIT(7))
- max_rate = (bw_40MHz) ? ((short_GI_40)?1500:1350):((short_GI_20)?722:650);
- else if (mcs->rx_mask[0] & BIT(6))
- max_rate = (bw_40MHz) ? ((short_GI_40)?1350:1215):((short_GI_20)?650:585);
- else if (mcs->rx_mask[0] & BIT(5))
- max_rate = (bw_40MHz) ? ((short_GI_40)?1200:1080):((short_GI_20)?578:520);
- else if (mcs->rx_mask[0] & BIT(4))
- max_rate = (bw_40MHz) ? ((short_GI_40)?900:810):((short_GI_20)?433:390);
- else if (mcs->rx_mask[0] & BIT(3))
- max_rate = (bw_40MHz) ? ((short_GI_40)?600:540):((short_GI_20)?289:260);
- else if (mcs->rx_mask[0] & BIT(2))
- max_rate = (bw_40MHz) ? ((short_GI_40)?450:405):((short_GI_20)?217:195);
- else if (mcs->rx_mask[0] & BIT(1))
- max_rate = (bw_40MHz) ? ((short_GI_40)?300:270):((short_GI_20)?144:130);
- else if (mcs->rx_mask[0] & BIT(0))
- max_rate = (bw_40MHz) ? ((short_GI_40)?150:135):((short_GI_20)?72:65);
- }
- }
- return max_rate;
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme.c b/drivers/staging/rtl8723au/core/rtw_mlme.c
deleted file mode 100644
index a786fc4bdb53..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_mlme.c
+++ /dev/null
@@ -1,2314 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTW_MLME_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <recv_osdep.h>
-#include <xmit_osdep.h>
-#include <hal_intf.h>
-#include <mlme_osdep.h>
-#include <sta_info.h>
-#include <linux/ieee80211.h>
-#include <wifi.h>
-#include <wlan_bssdef.h>
-#include <rtw_sreset.h>
-
-static struct wlan_network *
-rtw_select_candidate_from_queue(struct mlme_priv *pmlmepriv);
-static int rtw_do_join(struct rtw_adapter *padapter);
-
-static void rtw_init_mlme_timer(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- setup_timer(&pmlmepriv->assoc_timer, rtw23a_join_to_handler,
- (unsigned long)padapter);
-
- setup_timer(&pmlmepriv->scan_to_timer, rtw_scan_timeout_handler23a,
- (unsigned long)padapter);
-
- setup_timer(&pmlmepriv->dynamic_chk_timer,
- rtw_dynamic_check_timer_handler, (unsigned long)padapter);
-
- setup_timer(&pmlmepriv->set_scan_deny_timer,
- rtw_set_scan_deny_timer_hdl, (unsigned long)padapter);
-}
-
-int rtw_init_mlme_priv23a(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- pmlmepriv->nic_hdl = padapter;
-
- pmlmepriv->fw_state = 0;
- pmlmepriv->cur_network.network.ifmode = NL80211_IFTYPE_UNSPECIFIED;
- /* 1: active, 0: pasive. Maybe someday we should rename this
- varable to "active_mode" (Jeff) */
- pmlmepriv->scan_mode = SCAN_ACTIVE;
-
- spin_lock_init(&pmlmepriv->lock);
- _rtw_init_queue23a(&pmlmepriv->scanned_queue);
-
- memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct cfg80211_ssid));
-
- rtw_clear_scan_deny(padapter);
-
- rtw_init_mlme_timer(padapter);
- return _SUCCESS;
-}
-
-#ifdef CONFIG_8723AU_AP_MODE
-static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
-{
- if (*ppie) {
- kfree(*ppie);
- *plen = 0;
- *ppie = NULL;
- }
-}
-#endif
-
-void rtw23a_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
-{
-#ifdef CONFIG_8723AU_AP_MODE
- kfree(pmlmepriv->assoc_req);
- kfree(pmlmepriv->assoc_rsp);
- rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie,
- &pmlmepriv->wps_probe_req_ie_len);
-#endif
-}
-
-void rtw_free_mlme_priv23a(struct mlme_priv *pmlmepriv)
-{
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "rtw_free_mlme_priv23a\n");
-
- rtw23a_free_mlme_priv_ie_data(pmlmepriv);
-}
-
-struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv, gfp_t gfp)
-{
- struct wlan_network *pnetwork;
-
- pnetwork = kzalloc(sizeof(struct wlan_network), gfp);
- if (pnetwork) {
- INIT_LIST_HEAD(&pnetwork->list);
- pnetwork->network_type = 0;
- pnetwork->fixed = false;
- pnetwork->last_scanned = jiffies;
- pnetwork->join_res = 0;
- }
-
- return pnetwork;
-}
-
-static void _rtw_free_network23a(struct mlme_priv *pmlmepriv,
- struct wlan_network *pnetwork)
-{
- if (!pnetwork)
- return;
-
- if (pnetwork->fixed == true)
- return;
-
- list_del_init(&pnetwork->list);
-
- kfree(pnetwork);
-}
-
-/*
- return the wlan_network with the matching addr
-
- Shall be called under atomic context... to avoid possible racing condition...
-*/
-struct wlan_network *
-rtw_find_network23a(struct rtw_queue *scanned_queue, u8 *addr)
-{
- struct list_head *phead, *plist;
- struct wlan_network *pnetwork = NULL;
-
- if (is_zero_ether_addr(addr)) {
- pnetwork = NULL;
- goto exit;
- }
-
- /* spin_lock_bh(&scanned_queue->lock); */
-
- phead = get_list_head(scanned_queue);
- plist = phead->next;
-
- while (plist != phead) {
- pnetwork = container_of(plist, struct wlan_network, list);
-
- if (ether_addr_equal(addr, pnetwork->network.MacAddress))
- break;
-
- plist = plist->next;
- }
-
- if (plist == phead)
- pnetwork = NULL;
-
- /* spin_unlock_bh(&scanned_queue->lock); */
-
-exit:
-
- return pnetwork;
-}
-
-void rtw_free_network_queue23a(struct rtw_adapter *padapter)
-{
- struct list_head *phead;
- struct wlan_network *pnetwork, *ptmp;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct rtw_queue *scanned_queue = &pmlmepriv->scanned_queue;
-
- spin_lock_bh(&scanned_queue->lock);
- phead = get_list_head(scanned_queue);
- list_for_each_entry_safe(pnetwork, ptmp, phead, list)
- _rtw_free_network23a(pmlmepriv, pnetwork);
- spin_unlock_bh(&scanned_queue->lock);
-}
-
-int rtw_if_up23a(struct rtw_adapter *padapter)
-{
- int res;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
- !check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "rtw_if_up23a:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n",
- padapter->bDriverStopped, padapter->bSurpriseRemoved);
- res = false;
- } else
- res = true;
-
- return res;
-}
-
-void rtw_generate_random_ibss23a(u8 *pibss)
-{
- unsigned long curtime = jiffies;
-
- pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */
- pibss[1] = 0x11;
- pibss[2] = 0x87;
- pibss[3] = curtime & 0xff;/* p[0]; */
- pibss[4] = (curtime >> 8) & 0xff;/* p[1]; */
- pibss[5] = (curtime >> 16) & 0xff;/* p[2]; */
-}
-
-void rtw_set_roaming(struct rtw_adapter *adapter, u8 to_roaming)
-{
- if (to_roaming == 0)
- adapter->mlmepriv.to_join = false;
- adapter->mlmepriv.to_roaming = to_roaming;
-}
-
-static void _rtw_roaming(struct rtw_adapter *padapter,
- struct wlan_network *tgt_network)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_network *pnetwork;
- int do_join_r;
-
- if (tgt_network)
- pnetwork = tgt_network;
- else
- pnetwork = &pmlmepriv->cur_network;
-
- if (padapter->mlmepriv.to_roaming > 0) {
- DBG_8723A("roaming from %s(%pM), length:%d\n",
- pnetwork->network.Ssid.ssid,
- pnetwork->network.MacAddress,
- pnetwork->network.Ssid.ssid_len);
- memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.Ssid,
- sizeof(struct cfg80211_ssid));
-
- pmlmepriv->assoc_by_bssid = false;
-
- while (1) {
- do_join_r = rtw_do_join(padapter);
- if (do_join_r == _SUCCESS)
- break;
- else {
- DBG_8723A("roaming do_join return %d\n",
- do_join_r);
- pmlmepriv->to_roaming--;
-
- if (padapter->mlmepriv.to_roaming > 0)
- continue;
- else {
- DBG_8723A("%s(%d) -to roaming fail, "
- "indicate_disconnect\n",
- __func__, __LINE__);
- rtw_indicate_disconnect23a(padapter);
- break;
- }
- }
- }
- }
-}
-
-void rtw23a_roaming(struct rtw_adapter *padapter,
- struct wlan_network *tgt_network)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- spin_lock_bh(&pmlmepriv->lock);
- _rtw_roaming(padapter, tgt_network);
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-static void rtw_free_network_nolock(struct mlme_priv *pmlmepriv,
- struct wlan_network *pnetwork)
-{
- _rtw_free_network23a(pmlmepriv, pnetwork);
-}
-
-bool rtw_is_same_ibss23a(struct rtw_adapter *adapter,
- struct wlan_network *pnetwork)
-{
- int ret;
- struct security_priv *psecuritypriv = &adapter->securitypriv;
-
- if (psecuritypriv->dot11PrivacyAlgrthm != 0 &&
- pnetwork->network.Privacy == 0)
- ret = false;
- else if (psecuritypriv->dot11PrivacyAlgrthm == 0 &&
- pnetwork->network.Privacy == 1)
- ret = false;
- else
- ret = true;
-
- return ret;
-}
-
-inline int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b);
-inline int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b)
-{
- return (a->Ssid.ssid_len == b->Ssid.ssid_len) &&
- !memcmp(a->Ssid.ssid, b->Ssid.ssid, a->Ssid.ssid_len);
-}
-
-int is_same_network23a(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
-{
- u16 s_cap, d_cap;
-
- s_cap = src->capability;
- d_cap = dst->capability;
-
- return ((src->Ssid.ssid_len == dst->Ssid.ssid_len) &&
- /* (src->DSConfig == dst->DSConfig) && */
- ether_addr_equal(src->MacAddress, dst->MacAddress) &&
- !memcmp(src->Ssid.ssid, dst->Ssid.ssid, src->Ssid.ssid_len) &&
- (s_cap & WLAN_CAPABILITY_IBSS) ==
- (d_cap & WLAN_CAPABILITY_IBSS) &&
- (s_cap & WLAN_CAPABILITY_ESS) == (d_cap & WLAN_CAPABILITY_ESS));
-}
-
-struct wlan_network *
-rtw_get_oldest_wlan_network23a(struct rtw_queue *scanned_queue)
-{
- struct list_head *phead;
- struct wlan_network *pwlan;
- struct wlan_network *oldest = NULL;
-
- phead = get_list_head(scanned_queue);
- list_for_each_entry(pwlan, phead, list) {
- if (pwlan->fixed != true) {
- if (!oldest || time_after(oldest->last_scanned,
- pwlan->last_scanned))
- oldest = pwlan;
- }
- }
-
- return oldest;
-}
-
-void update_network23a(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
- struct rtw_adapter *padapter, bool update_ie)
-{
- u8 ss_ori = dst->SignalStrength;
- u8 sq_ori = dst->SignalQuality;
- long rssi_ori = dst->Rssi;
-
- u8 ss_smp = src->SignalStrength;
- u8 sq_smp = src->SignalQuality;
- long rssi_smp = src->Rssi;
-
- u8 ss_final;
- u8 sq_final;
- long rssi_final;
-
- DBG_8723A("%s %s(%pM, ch%u) ss_ori:%3u, sq_ori:%3u, rssi_ori:%3ld, "
- "ss_smp:%3u, sq_smp:%3u, rssi_smp:%3ld\n",
- __func__, src->Ssid.ssid, src->MacAddress,
- src->DSConfig, ss_ori, sq_ori, rssi_ori,
- ss_smp, sq_smp, rssi_smp
- );
-
- /* The rule below is 1/5 for sample value, 4/5 for history value */
- if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) &&
- is_same_network23a(&padapter->mlmepriv.cur_network.network, src)) {
- /* Take the recvpriv's value for the connected AP*/
- ss_final = padapter->recvpriv.signal_strength;
- sq_final = padapter->recvpriv.signal_qual;
- /* the rssi value here is undecorated, and will be
- used for antenna diversity */
- if (sq_smp != 101) /* from the right channel */
- rssi_final = (src->Rssi+dst->Rssi*4)/5;
- else
- rssi_final = rssi_ori;
- } else {
- if (sq_smp != 101) { /* from the right channel */
- ss_final = ((u32)src->SignalStrength +
- (u32)dst->SignalStrength * 4) / 5;
- sq_final = ((u32)src->SignalQuality +
- (u32)dst->SignalQuality * 4) / 5;
- rssi_final = src->Rssi+dst->Rssi * 4 / 5;
- } else {
- /* bss info not receiving from the right channel, use
- the original RX signal infos */
- ss_final = dst->SignalStrength;
- sq_final = dst->SignalQuality;
- rssi_final = dst->Rssi;
- }
-
- }
-
- if (update_ie)
- memcpy(dst, src, get_wlan_bssid_ex_sz(src));
-
- dst->SignalStrength = ss_final;
- dst->SignalQuality = sq_final;
- dst->Rssi = rssi_final;
-
- DBG_8723A("%s %s(%pM), SignalStrength:%u, SignalQuality:%u, "
- "RawRSSI:%ld\n", __func__, dst->Ssid.ssid, dst->MacAddress,
- dst->SignalStrength, dst->SignalQuality, dst->Rssi);
-}
-
-static void update_current_network(struct rtw_adapter *adapter,
- struct wlan_bssid_ex *pnetwork)
-{
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED) &&
- is_same_network23a(&pmlmepriv->cur_network.network, pnetwork)) {
- update_network23a(&pmlmepriv->cur_network.network,
- pnetwork, adapter, true);
-
- rtw_update_protection23a(adapter,
- pmlmepriv->cur_network.network.IEs,
- pmlmepriv->cur_network.network.IELength);
- }
-}
-
-/*
-
-Caller must hold pmlmepriv->lock first.
-
-*/
-static void rtw_update_scanned_network(struct rtw_adapter *adapter,
- struct wlan_bssid_ex *target)
-{
- struct list_head *plist, *phead;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct wlan_network *pnetwork = NULL;
- struct wlan_network *oldest = NULL;
- struct rtw_queue *queue = &pmlmepriv->scanned_queue;
- u32 bssid_ex_sz;
- int found = 0;
-
- spin_lock_bh(&queue->lock);
- phead = get_list_head(queue);
- list_for_each(plist, phead) {
- pnetwork = container_of(plist, struct wlan_network, list);
-
- if (is_same_network23a(&pnetwork->network, target)) {
- found = 1;
- break;
- }
- if (!oldest || time_after(oldest->last_scanned,
- pnetwork->last_scanned))
- oldest = pnetwork;
- }
-
- /* If we didn't find a match, then get a new network slot to initialize
- * with this beacon's information */
- if (!found) {
- pnetwork = rtw_alloc_network(pmlmepriv, GFP_ATOMIC);
- if (!pnetwork) {
- if (!oldest) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "something wrong here\n");
- goto exit;
- }
- pnetwork = oldest;
- } else
- list_add_tail(&pnetwork->list, &queue->queue);
-
- bssid_ex_sz = get_wlan_bssid_ex_sz(target);
- target->Length = bssid_ex_sz;
- memcpy(&pnetwork->network, target, bssid_ex_sz);
-
- /* variable initialize */
- pnetwork->fixed = false;
- pnetwork->last_scanned = jiffies;
-
- pnetwork->network_type = 0;
- pnetwork->join_res = 0;
-
- /* bss info not receiving from the right channel */
- if (pnetwork->network.SignalQuality == 101)
- pnetwork->network.SignalQuality = 0;
- } else {
- /*
- * we have an entry and we are going to update it. But
- * this entry may be already expired. In this case we
- * do the same as we found a new net and call the
- * new_net handler
- */
- bool update_ie = true;
-
- pnetwork->last_scanned = jiffies;
-
- /* target.reserved == 1, means that scanned network is
- * a bcn frame. */
- if (pnetwork->network.IELength > target->IELength &&
- target->reserved == 1)
- update_ie = false;
-
- update_network23a(&pnetwork->network, target, adapter,
- update_ie);
- }
-
-exit:
- spin_unlock_bh(&queue->lock);
-}
-
-static void rtw_add_network(struct rtw_adapter *adapter,
- struct wlan_bssid_ex *pnetwork)
-{
- update_current_network(adapter, pnetwork);
- rtw_update_scanned_network(adapter, pnetwork);
-}
-
-/* select the desired network based on the capability of the (i)bss. */
-/* check items: (1) security */
-/* (2) network_type */
-/* (3) WMM */
-/* (4) HT */
-/* (5) others */
-static int rtw_is_desired_network(struct rtw_adapter *adapter,
- struct wlan_network *pnetwork)
-{
- struct security_priv *psecuritypriv = &adapter->securitypriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- u32 desired_encmode;
- u32 privacy;
- int bselected = true;
-
- desired_encmode = psecuritypriv->ndisencryptstatus;
- privacy = pnetwork->network.Privacy;
-
- if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
- if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPA,
- pnetwork->network.IEs,
- pnetwork->network.IELength))
- return true;
- else
- return false;
- }
- if (adapter->registrypriv.wifi_spec == 1) {
- /* for correct flow of 8021X to do.... */
- if (desired_encmode == Ndis802_11EncryptionDisabled &&
- privacy != 0)
- bselected = false;
- }
-
- if (desired_encmode != Ndis802_11EncryptionDisabled && privacy == 0) {
- DBG_8723A("desired_encmode: %d, privacy: %d\n",
- desired_encmode, privacy);
- bselected = false;
- }
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- if (pnetwork->network.ifmode !=
- pmlmepriv->cur_network.network.ifmode)
- bselected = false;
- }
-
- return bselected;
-}
-
-void rtw_survey_event_cb23a(struct rtw_adapter *adapter, const u8 *pbuf)
-{
- u32 len;
- struct wlan_bssid_ex *pnetwork;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct survey_event *survey = (struct survey_event *)pbuf;
-
- pnetwork = survey->bss;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "rtw_survey_event_cb23a, ssid=%s\n", pnetwork->Ssid.ssid);
-
- len = get_wlan_bssid_ex_sz(pnetwork);
- if (len > (sizeof(struct wlan_bssid_ex))) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "****rtw_survey_event_cb23a: return a wrong bss ***\n");
- return;
- }
-
- spin_lock_bh(&pmlmepriv->lock);
-
- /* update IBSS_network 's timestamp */
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- if (ether_addr_equal(pmlmepriv->cur_network.network.MacAddress,
- pnetwork->MacAddress)) {
- struct wlan_network *ibss_wlan;
-
- pmlmepriv->cur_network.network.beacon_interval =
- pnetwork->beacon_interval;
- pmlmepriv->cur_network.network.capability =
- pnetwork->capability;
- pmlmepriv->cur_network.network.tsf = pnetwork->tsf;
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- ibss_wlan = rtw_find_network23a(
- &pmlmepriv->scanned_queue,
- pnetwork->MacAddress);
- if (ibss_wlan) {
- pmlmepriv->cur_network.network.beacon_interval =
- ibss_wlan->network.beacon_interval;
- pmlmepriv->cur_network.network.capability =
- ibss_wlan->network.capability;
- pmlmepriv->cur_network.network.tsf =
- ibss_wlan->network.tsf;
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- goto exit;
- }
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- }
- }
-
- /* lock pmlmepriv->lock when you accessing network_q */
- if (!check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
- if (pnetwork->Ssid.ssid[0] == 0)
- pnetwork->Ssid.ssid_len = 0;
-
- rtw_add_network(adapter, pnetwork);
- }
-
-exit:
-
- spin_unlock_bh(&pmlmepriv->lock);
-
- kfree(survey->bss);
- survey->bss = NULL;
-}
-
-void
-rtw_surveydone_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf)
-{
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
- int ret;
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (pmlmepriv->wps_probe_req_ie) {
- pmlmepriv->wps_probe_req_ie_len = 0;
- kfree(pmlmepriv->wps_probe_req_ie);
- pmlmepriv->wps_probe_req_ie = NULL;
- }
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "rtw_surveydone_event_callback23a: fw_state:%x\n",
- get_fwstate(pmlmepriv));
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
- del_timer_sync(&pmlmepriv->scan_to_timer);
-
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
- } else {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "nic status =%x, survey done event comes too late!\n",
- get_fwstate(pmlmepriv));
- }
-
- rtw_set_signal_stat_timer(&adapter->recvpriv);
-
- if (pmlmepriv->to_join == true) {
- set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- ret = rtw_select_and_join_from_scanned_queue23a(
- pmlmepriv);
- if (ret != _SUCCESS)
- rtw_do_join_adhoc(adapter);
- } else {
- pmlmepriv->to_join = false;
- ret = rtw_select_and_join_from_scanned_queue23a(
- pmlmepriv);
- if (ret != _SUCCESS) {
- DBG_8723A("try_to_join, but select scanning "
- "queue fail, to_roaming:%d\n",
- adapter->mlmepriv.to_roaming);
- if (adapter->mlmepriv.to_roaming) {
- if (--pmlmepriv->to_roaming == 0 ||
- rtw_sitesurvey_cmd23a(
- adapter,
- &pmlmepriv->assoc_ssid, 1,
- NULL, 0) != _SUCCESS) {
- rtw_set_roaming(adapter, 0);
- rtw_free_assoc_resources23a(
- adapter, 1);
- rtw_indicate_disconnect23a(
- adapter);
- } else
- pmlmepriv->to_join = true;
- }
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- }
- }
- }
-
- spin_unlock_bh(&pmlmepriv->lock);
-
- rtw_os_xmit_schedule23a(adapter);
-
- if (pmlmeext->sitesurvey_res.bss_cnt == 0)
- rtw_sreset_reset(adapter);
-
- rtw_cfg80211_surveydone_event_callback(adapter);
-}
-
-static void free_scanqueue(struct mlme_priv *pmlmepriv)
-{
- struct wlan_network *pnetwork, *ptemp;
- struct rtw_queue *scan_queue = &pmlmepriv->scanned_queue;
- struct list_head *phead;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, "+free_scanqueue\n");
- spin_lock_bh(&scan_queue->lock);
- phead = get_list_head(scan_queue);
- list_for_each_entry_safe(pnetwork, ptemp, phead, list) {
- pnetwork->fixed = false;
- _rtw_free_network23a(pmlmepriv, pnetwork);
- }
- spin_unlock_bh(&scan_queue->lock);
-}
-
-/*
- *rtw_free_assoc_resources23a: the caller has to lock pmlmepriv->lock
- */
-void rtw_free_assoc_resources23a(struct rtw_adapter *adapter,
- int lock_scanned_queue)
-{
- struct wlan_network *pwlan;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct wlan_network *tgt_network = &pmlmepriv->cur_network;
- struct sta_info *psta;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
- "+rtw_free_assoc_resources23a\n");
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "tgt_network->network.MacAddress=%pM ssid=%s\n",
- tgt_network->network.MacAddress,
- tgt_network->network.Ssid.ssid);
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_AP_STATE)) {
- psta = rtw_get_stainfo23a(&adapter->stapriv,
- tgt_network->network.MacAddress);
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo23a(adapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
- }
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE |
- WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE)) {
- rtw_free_all_stainfo23a(adapter);
-
- psta = rtw_get_bcmc_stainfo23a(adapter);
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo23a(adapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- rtw_init_bcmc_stainfo23a(adapter);
- }
-
- if (lock_scanned_queue)
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
- pwlan = rtw_find_network23a(&pmlmepriv->scanned_queue,
- tgt_network->network.MacAddress);
- if (pwlan)
- pwlan->fixed = false;
- else
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "rtw_free_assoc_resources23a : pwlan== NULL\n");
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) &&
- adapter->stapriv.asoc_sta_count == 1)
- rtw_free_network_nolock(pmlmepriv, pwlan);
-
- if (lock_scanned_queue)
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- pmlmepriv->key_mask = 0;
-}
-
-/*
-*rtw_indicate_connect23a: the caller has to lock pmlmepriv->lock
-*/
-void rtw_indicate_connect23a(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "+rtw_indicate_connect23a\n");
-
- pmlmepriv->to_join = false;
-
- if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
- set_fwstate(pmlmepriv, _FW_LINKED);
-
- rtw_cfg80211_indicate_connect(padapter);
-
- netif_carrier_on(padapter->pnetdev);
-
- if (padapter->pid[2] != 0)
- kill_pid(find_vpid(padapter->pid[2]), SIGALRM, 1);
- }
-
- rtw_set_roaming(padapter, 0);
-
- rtw_set_scan_deny(padapter, 3000);
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "-rtw_indicate_connect23a: fw_state=0x%08x\n",
- get_fwstate(pmlmepriv));
-}
-
-/*
- *rtw_indicate_disconnect23a: the caller has to lock pmlmepriv->lock
- */
-void rtw_indicate_disconnect23a(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "+rtw_indicate_disconnect23a\n");
-
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING|WIFI_UNDER_WPS);
-
- /* DBG_8723A("clear wps when %s\n", __func__); */
-
- if (padapter->mlmepriv.to_roaming > 0)
- _clr_fwstate_(pmlmepriv, _FW_LINKED);
-
- if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) ||
- padapter->mlmepriv.to_roaming <= 0) {
- rtw_os_indicate_disconnect23a(padapter);
-
- /* set ips_deny_time to avoid enter IPS before LPS leave */
- padapter->pwrctrlpriv.ips_deny_time =
- jiffies + msecs_to_jiffies(3000);
-
- _clr_fwstate_(pmlmepriv, _FW_LINKED);
-
- rtw_clear_scan_deny(padapter);
- }
-
- rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_DISCONNECT, 1);
-}
-
-void rtw_scan_abort23a(struct rtw_adapter *adapter)
-{
- unsigned long start;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
-
- start = jiffies;
- pmlmeext->scan_abort = true;
- while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) &&
- jiffies_to_msecs(jiffies - start) <= 200) {
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
- break;
-
- DBG_8723A("%s(%s): fw_state = _FW_UNDER_SURVEY!\n",
- __func__, adapter->pnetdev->name);
- msleep(20);
- }
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
- if (!adapter->bDriverStopped && !adapter->bSurpriseRemoved)
- DBG_8723A("%s(%s): waiting for scan_abort time out!\n",
- __func__, adapter->pnetdev->name);
- rtw_cfg80211_indicate_scan_done(wdev_to_priv(adapter->rtw_wdev),
- true);
- }
- pmlmeext->scan_abort = false;
-}
-
-static struct sta_info *
-rtw_joinbss_update_stainfo(struct rtw_adapter *padapter,
- struct wlan_network *pnetwork)
-{
- int i;
- struct sta_info *bmc_sta, *psta;
- struct recv_reorder_ctrl *preorder_ctrl;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- psta = rtw_get_stainfo23a(pstapriv, pnetwork->network.MacAddress);
- if (!psta)
- psta = rtw_alloc_stainfo23a(pstapriv,
- pnetwork->network.MacAddress,
- GFP_ATOMIC);
-
- if (psta) { /* update ptarget_sta */
- DBG_8723A("%s\n", __func__);
-
- psta->aid = pnetwork->join_res;
- psta->mac_id = 0;
-
- /* sta mode */
- rtl8723a_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
-
- /* security related */
- if (padapter->securitypriv.dot11AuthAlgrthm ==
- dot11AuthAlgrthm_8021X) {
- padapter->securitypriv.binstallGrpkey = 0;
- padapter->securitypriv.busetkipkey = 0;
-
- psta->ieee8021x_blocked = true;
- psta->dot118021XPrivacy =
- padapter->securitypriv.dot11PrivacyAlgrthm;
-
- memset(&psta->dot118021x_UncstKey, 0,
- sizeof (union Keytype));
-
- memset(&psta->dot11tkiprxmickey, 0,
- sizeof (union Keytype));
- memset(&psta->dot11tkiptxmickey, 0,
- sizeof (union Keytype));
-
- memset(&psta->dot11txpn, 0, sizeof (union pn48));
- memset(&psta->dot11rxpn, 0, sizeof (union pn48));
- }
-
- /* Commented by Albert 2012/07/21 */
- /* When doing the WPS, the wps_ie_len won't equal to 0 */
- /* And the Wi-Fi driver shouldn't allow the data packet
- to be transmitted. */
- if (padapter->securitypriv.wps_ie_len != 0) {
- psta->ieee8021x_blocked = true;
- padapter->securitypriv.wps_ie_len = 0;
- }
-
- /* for A-MPDU Rx reordering buffer control for bmc_sta &
- * sta_info */
- /* if A-MPDU Rx is enabled, resetting
- rx_ordering_ctrl wstart_b(indicate_seq) to default
- value = 0xffff */
- /* todo: check if AP can send A-MPDU packets */
- for (i = 0; i < 16 ; i++) {
- /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
- preorder_ctrl = &psta->recvreorder_ctrl[i];
- preorder_ctrl->enable = false;
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->wend_b = 0xffff;
- /* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */
- preorder_ctrl->wsize_b = 64;
- }
-
- bmc_sta = rtw_get_bcmc_stainfo23a(padapter);
- if (bmc_sta) {
- for (i = 0; i < 16 ; i++) {
- preorder_ctrl = &bmc_sta->recvreorder_ctrl[i];
- preorder_ctrl->enable = false;
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->wend_b = 0xffff;
- /* max_ampdu_sz; ex. 32(kbytes) ->
- wsize_b = 32 */
- preorder_ctrl->wsize_b = 64;
- }
- }
-
- /* misc. */
- update_sta_info23a(padapter, psta);
-
- }
-
- return psta;
-}
-
-/* pnetwork : returns from rtw23a_joinbss_event_cb */
-/* ptarget_wlan: found from scanned_queue */
-static void
-rtw_joinbss_update_network23a(struct rtw_adapter *padapter,
- struct wlan_network *ptarget_wlan,
- struct wlan_network *pnetwork)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
-
- DBG_8723A("%s\n", __func__);
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "fw_state:%x, BSSID:%pM\n",
- get_fwstate(pmlmepriv),
- pnetwork->network.MacAddress);
-
- /* why not use ptarget_wlan?? */
- memcpy(&cur_network->network, &pnetwork->network,
- pnetwork->network.Length);
- /* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */
- cur_network->network.IELength = ptarget_wlan->network.IELength;
- memcpy(&cur_network->network.IEs[0], &ptarget_wlan->network.IEs[0],
- MAX_IE_SZ);
-
- cur_network->network.capability = ptarget_wlan->network.capability;
- cur_network->network.beacon_interval =
- ptarget_wlan->network.beacon_interval;
- cur_network->network.tsf = ptarget_wlan->network.tsf;
-
- rtw_set_signal_stat_timer(&padapter->recvpriv);
- padapter->recvpriv.signal_strength =
- ptarget_wlan->network.SignalStrength;
- padapter->recvpriv.signal_qual = ptarget_wlan->network.SignalQuality;
- /*
- * the ptarget_wlan->network.Rssi is raw data, we use
- * ptarget_wlan->network.SignalStrength instead (has scaled)
- */
- DBG_8723A("%s signal_strength:%3u, signal_qual:%3u\n",
- __func__, padapter->recvpriv.signal_strength,
- padapter->recvpriv.signal_qual);
- rtw_set_signal_stat_timer(&padapter->recvpriv);
-
- /* update fw_state will clr _FW_UNDER_LINKING here indirectly */
- switch (pnetwork->network.ifmode) {
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_STATION:
- if (pmlmepriv->fw_state & WIFI_UNDER_WPS)
- pmlmepriv->fw_state = WIFI_STATION_STATE|WIFI_UNDER_WPS;
- else
- pmlmepriv->fw_state = WIFI_STATION_STATE;
- break;
- case NL80211_IFTYPE_ADHOC:
- pmlmepriv->fw_state = WIFI_ADHOC_STATE;
- break;
- default:
- pmlmepriv->fw_state = WIFI_NULL_STATE;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "Invalid network_mode\n");
- break;
- }
-
- rtw_update_protection23a(padapter, cur_network->network.IEs,
- cur_network->network.IELength);
-
- rtw_update_ht_cap23a(padapter, cur_network->network.IEs,
- cur_network->network.IELength);
-}
-
-/*
- * Notes:
- * the function could be > passive_level (the same context as Rx tasklet)
- * pnetwork : returns from rtw23a_joinbss_event_cb
- * ptarget_wlan: found from scanned_queue
- * if join_res > 0, for (fw_state==WIFI_STATION_STATE),
- * we check if "ptarget_sta" & "ptarget_wlan" exist.
- * if join_res > 0, for (fw_state==WIFI_ADHOC_STATE),
- * we only check if "ptarget_wlan" exist.
- * if join_res > 0, update "cur_network->network" from "pnetwork->network"
- * if (ptarget_wlan !=NULL).
- */
-
-void rtw_joinbss_event_prehandle23a(struct rtw_adapter *adapter, u8 *pbuf)
-{
- struct sta_info *ptarget_sta, *pcur_sta;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- struct wlan_network *pcur_wlan, *ptarget_wlan = NULL;
- bool the_same_macaddr;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "joinbss event call back received with res=%d\n",
- pnetwork->join_res);
-
- if (pmlmepriv->assoc_ssid.ssid_len == 0) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "@@@@@ joinbss event call back for Any SSid\n");
- } else {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "@@@@@ rtw23a_joinbss_event_cb for SSid:%s\n",
- pmlmepriv->assoc_ssid.ssid);
- }
-
- if (ether_addr_equal(pnetwork->network.MacAddress,
- cur_network->network.MacAddress))
- the_same_macaddr = true;
- else
- the_same_macaddr = false;
-
- pnetwork->network.Length = get_wlan_bssid_ex_sz(&pnetwork->network);
- if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "***joinbss_evt_callback return a wrong bss ***\n");
- return;
- }
-
- spin_lock_bh(&pmlmepriv->lock);
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "rtw23a_joinbss_event_cb !! _enter_critical\n");
-
- if (pnetwork->join_res > 0) {
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
- /* s1. find ptarget_wlan */
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (the_same_macaddr) {
- ptarget_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
- } else {
- pcur_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
- if (pcur_wlan)
- pcur_wlan->fixed = false;
-
- pcur_sta = rtw_get_stainfo23a(pstapriv, cur_network->network.MacAddress);
- if (pcur_sta) {
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo23a(adapter,
- pcur_sta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
- }
-
- ptarget_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
- if (check_fwstate(pmlmepriv,
- WIFI_STATION_STATE)) {
- if (ptarget_wlan)
- ptarget_wlan->fixed =
- true;
- }
- }
-
- } else {
- ptarget_wlan = rtw_find_network23a(
- &pmlmepriv->scanned_queue,
- pnetwork->network.MacAddress);
- if (check_fwstate(pmlmepriv,
- WIFI_STATION_STATE)) {
- if (ptarget_wlan)
- ptarget_wlan->fixed = true;
- }
- }
-
- /* s2. update cur_network */
- if (ptarget_wlan)
- rtw_joinbss_update_network23a(adapter,
- ptarget_wlan,
- pnetwork);
- else {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "Can't find ptarget_wlan when joinbss_event callback\n");
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- goto ignore_joinbss_callback;
- }
-
- /* s3. find ptarget_sta & update ptarget_sta after
- update cur_network only for station mode */
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- ptarget_sta = rtw_joinbss_update_stainfo(
- adapter, pnetwork);
- if (!ptarget_sta) {
- RT_TRACE(_module_rtl871x_mlme_c_,
- _drv_err_,
- "Can't update stainfo when joinbss_event callback\n");
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- goto ignore_joinbss_callback;
- }
- }
-
- /* s4. indicate connect */
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- rtw_indicate_connect23a(adapter);
- else {
- /* adhoc mode will rtw_indicate_connect23a
- when rtw_stassoc_event_callback23a */
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "adhoc mode, fw_state:%x\n",
- get_fwstate(pmlmepriv));
- }
-
- /* s5. Cancle assoc_timer */
- del_timer_sync(&pmlmepriv->assoc_timer);
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "Cancle assoc_timer\n");
- } else {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "rtw23a_joinbss_event_cb err: fw_state:%x\n",
- get_fwstate(pmlmepriv));
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- goto ignore_joinbss_callback;
- }
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- } else if (pnetwork->join_res == -4) {
- rtw_reset_securitypriv23a(adapter);
- mod_timer(&pmlmepriv->assoc_timer,
- jiffies + msecs_to_jiffies(1));
-
- /* rtw_free_assoc_resources23a(adapter, 1); */
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "fail! clear _FW_UNDER_LINKING ^^^fw_state=%x\n",
- get_fwstate(pmlmepriv));
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- }
- } else {
- /* if join_res < 0 (join fails), then try again */
- mod_timer(&pmlmepriv->assoc_timer,
- jiffies + msecs_to_jiffies(1));
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- }
-
-ignore_joinbss_callback:
-
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-void rtw23a_joinbss_event_cb(struct rtw_adapter *adapter, const u8 *pbuf)
-{
- struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
-
- mlmeext_joinbss_event_callback23a(adapter, pnetwork->join_res);
-
- rtw_os_xmit_schedule23a(adapter);
-}
-
-void rtw_stassoc_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf)
-{
- struct sta_info *psta;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct stassoc_event *pstassoc = (struct stassoc_event *)pbuf;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- struct wlan_network *ptarget_wlan;
-
- if (rtw_access_ctrl23a(adapter, pstassoc->macaddr) == false)
- return;
-
-#ifdef CONFIG_8723AU_AP_MODE
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- psta = rtw_get_stainfo23a(&adapter->stapriv, pstassoc->macaddr);
- if (psta) {
- /* bss_cap_update_on_sta_join23a(adapter, psta); */
- /* sta_info_update23a(adapter, psta); */
- ap_sta_info_defer_update23a(adapter, psta);
- }
- return;
- }
-#endif
- /* for AD-HOC mode */
- psta = rtw_get_stainfo23a(&adapter->stapriv, pstassoc->macaddr);
- if (psta != NULL) {
- /* the sta have been in sta_info_queue => do nothing */
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "Error: rtw_stassoc_event_callback23a: sta has been in sta_hash_queue\n");
- /* between drv has received this event before and
- fw have not yet to set key to CAM_ENTRY) */
- return;
- }
-
- psta = rtw_alloc_stainfo23a(&adapter->stapriv, pstassoc->macaddr,
- GFP_KERNEL);
- if (!psta) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "Can't alloc sta_info when rtw_stassoc_event_callback23a\n");
- return;
- }
-
- /* to do : init sta_info variable */
- psta->qos_option = 0;
- psta->mac_id = (uint)pstassoc->cam_id;
- /* psta->aid = (uint)pstassoc->cam_id; */
- DBG_8723A("%s\n", __func__);
- /* for ad-hoc mode */
- rtl8723a_SetHalODMVar(adapter, HAL_ODM_STA_INFO, psta, true);
-
- if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
- psta->dot118021XPrivacy =
- adapter->securitypriv.dot11PrivacyAlgrthm;
-
- psta->ieee8021x_blocked = false;
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- if (adapter->stapriv.asoc_sta_count == 2) {
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- ptarget_wlan =
- rtw_find_network23a(&pmlmepriv->scanned_queue,
- cur_network->network.MacAddress);
- if (ptarget_wlan)
- ptarget_wlan->fixed = true;
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
- rtw_indicate_connect23a(adapter);
- }
- }
-
- spin_unlock_bh(&pmlmepriv->lock);
-
- mlmeext_sta_add_event_callback23a(adapter, psta);
-}
-
-void rtw_stadel_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf)
-{
- int mac_id;
- struct sta_info *psta;
- struct wlan_network *pwlan;
- struct wlan_bssid_ex *pdev_network;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct stadel_event *pstadel = (struct stadel_event *)pbuf;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct wlan_network *tgt_network = &pmlmepriv->cur_network;
-
- psta = rtw_get_stainfo23a(&adapter->stapriv, pstadel->macaddr);
- if (psta)
- mac_id = psta->mac_id;
- else
- mac_id = pstadel->mac_id;
-
- DBG_8723A("%s(mac_id=%d)=%pM\n", __func__, mac_id, pstadel->macaddr);
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- return;
-
- mlmeext_sta_del_event_callback23a(adapter);
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- if (adapter->mlmepriv.to_roaming > 0) {
- /* this stadel_event is caused by roaming,
- decrease to_roaming */
- pmlmepriv->to_roaming--;
- } else if (adapter->mlmepriv.to_roaming == 0)
- rtw_set_roaming(adapter, adapter->registrypriv.max_roaming_times);
- if (*((u16 *)pstadel->rsvd) != WLAN_REASON_EXPIRATION_CHK)
- rtw_set_roaming(adapter, 0); /* don't roam */
-
- rtw_free_uc_swdec_pending_queue23a(adapter);
-
- rtw_free_assoc_resources23a(adapter, 1);
- rtw_indicate_disconnect23a(adapter);
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- /* remove the network entry in scanned_queue */
- pwlan = rtw_find_network23a(&pmlmepriv->scanned_queue,
- tgt_network->network.MacAddress);
- if (pwlan) {
- pwlan->fixed = false;
- rtw_free_network_nolock(pmlmepriv, pwlan);
- }
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- _rtw_roaming(adapter, tgt_network);
- }
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- rtw_free_stainfo23a(adapter, psta);
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
- if (adapter->stapriv.asoc_sta_count == 1) {
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- /* free old ibss network */
- /* pwlan = rtw_find_network23a(
- &pmlmepriv->scanned_queue, pstadel->macaddr); */
- pwlan = rtw_find_network23a(&pmlmepriv->scanned_queue,
- tgt_network->network.MacAddress);
- if (pwlan) {
- pwlan->fixed = false;
- rtw_free_network_nolock(pmlmepriv, pwlan);
- }
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- /* re-create ibss */
- pdev_network = &adapter->registrypriv.dev_network;
-
- memcpy(pdev_network, &tgt_network->network,
- get_wlan_bssid_ex_sz(&tgt_network->network));
-
- rtw_do_join_adhoc(adapter);
- }
- }
-
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-/*
-* rtw23a_join_to_handler - Timeout/failure handler for CMD JoinBss
-* @adapter: pointer to _adapter structure
-*/
-void rtw23a_join_to_handler (unsigned long data)
-{
- struct rtw_adapter *adapter = (struct rtw_adapter *)data;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- int do_join_r;
-
- DBG_8723A("%s, fw_state=%x\n", __func__, get_fwstate(pmlmepriv));
-
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
- return;
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (adapter->mlmepriv.to_roaming > 0) {
- /* join timeout caused by roaming */
- while (1) {
- pmlmepriv->to_roaming--;
- if (adapter->mlmepriv.to_roaming != 0) {
- /* try another */
- DBG_8723A("%s try another roaming\n", __func__);
- do_join_r = rtw_do_join(adapter);
- if (do_join_r != _SUCCESS) {
- DBG_8723A("%s roaming do_join return "
- "%d\n", __func__ , do_join_r);
- continue;
- }
- break;
- } else {
- DBG_8723A("%s We've try roaming but fail\n",
- __func__);
- rtw_indicate_disconnect23a(adapter);
- break;
- }
- }
- } else {
- rtw_indicate_disconnect23a(adapter);
- free_scanqueue(pmlmepriv);/* */
-
- /* indicate disconnect for the case that join_timeout and
- check_fwstate != FW_LINKED */
- rtw_cfg80211_indicate_disconnect(adapter);
- }
-
- spin_unlock_bh(&pmlmepriv->lock);
-
-}
-
-/*
-* rtw_scan_timeout_handler23a - Timeout/Failure handler for CMD SiteSurvey
-* @data: pointer to _adapter structure
-*/
-void rtw_scan_timeout_handler23a(unsigned long data)
-{
- struct rtw_adapter *adapter = (struct rtw_adapter *)data;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-
- DBG_8723A("%s(%s): fw_state =%x\n", __func__, adapter->pnetdev->name,
- get_fwstate(pmlmepriv));
-
- spin_lock_bh(&pmlmepriv->lock);
-
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
-
- spin_unlock_bh(&pmlmepriv->lock);
-
- rtw_cfg80211_indicate_scan_done(wdev_to_priv(adapter->rtw_wdev), true);
-}
-
-void rtw_dynamic_check_timer_handler(unsigned long data)
-{
- struct rtw_adapter *adapter = (struct rtw_adapter *)data;
-
- if (adapter->hw_init_completed == false)
- goto out;
-
- if (adapter->bDriverStopped == true ||
- adapter->bSurpriseRemoved == true)
- goto out;
-
- if (adapter->net_closed == true)
- goto out;
-
- rtw_dynamic_chk_wk_cmd23a(adapter);
-
-out:
- mod_timer(&adapter->mlmepriv.dynamic_chk_timer,
- jiffies + msecs_to_jiffies(2000));
-}
-
-inline bool rtw_is_scan_deny(struct rtw_adapter *adapter)
-{
- struct mlme_priv *mlmepriv = &adapter->mlmepriv;
-
- return (atomic_read(&mlmepriv->set_scan_deny) != 0) ? true : false;
-}
-
-void rtw_clear_scan_deny(struct rtw_adapter *adapter)
-{
- struct mlme_priv *mlmepriv = &adapter->mlmepriv;
-
- atomic_set(&mlmepriv->set_scan_deny, 0);
-}
-
-void rtw_set_scan_deny_timer_hdl(unsigned long data)
-{
- struct rtw_adapter *adapter = (struct rtw_adapter *)data;
-
- rtw_clear_scan_deny(adapter);
-}
-
-void rtw_set_scan_deny(struct rtw_adapter *adapter, u32 ms)
-{
- struct mlme_priv *mlmepriv = &adapter->mlmepriv;
-
- atomic_set(&mlmepriv->set_scan_deny, 1);
- mod_timer(&mlmepriv->set_scan_deny_timer,
- jiffies + msecs_to_jiffies(ms));
-}
-
-#if defined(IEEE80211_SCAN_RESULT_EXPIRE)
-#define RTW_SCAN_RESULT_EXPIRE \
- ((IEEE80211_SCAN_RESULT_EXPIRE / (HZ*1000)) - 1000) /* 3000 -1000 */
-#else
-#define RTW_SCAN_RESULT_EXPIRE 2000
-#endif
-
-/*
-* Select a new join candidate from the original @param candidate and
-* @param competitor
-* @return true: candidate is updated
-* @return false: candidate is not updated
-*/
-static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv,
- struct wlan_network **candidate,
- struct wlan_network *competitor)
-{
- int updated = false;
- struct rtw_adapter *adapter;
-
- adapter = container_of(pmlmepriv, struct rtw_adapter, mlmepriv);
-
- /* check bssid, if needed */
- if (pmlmepriv->assoc_by_bssid == true) {
- if (!ether_addr_equal(competitor->network.MacAddress,
- pmlmepriv->assoc_bssid))
- goto exit;
- }
-
- /* check ssid, if needed */
- if (pmlmepriv->assoc_ssid.ssid_len) {
- if (competitor->network.Ssid.ssid_len !=
- pmlmepriv->assoc_ssid.ssid_len ||
- memcmp(competitor->network.Ssid.ssid,
- pmlmepriv->assoc_ssid.ssid,
- pmlmepriv->assoc_ssid.ssid_len))
- goto exit;
- }
-
- if (rtw_is_desired_network(adapter, competitor) == false)
- goto exit;
-
- if (adapter->mlmepriv.to_roaming > 0) {
- unsigned int passed;
-
- passed = jiffies_to_msecs(jiffies - competitor->last_scanned);
- if (passed >= RTW_SCAN_RESULT_EXPIRE ||
- is_same_ess(&competitor->network,
- &pmlmepriv->cur_network.network) == false)
- goto exit;
- }
-
- if (!*candidate ||
- (*candidate)->network.Rssi<competitor->network.Rssi) {
- *candidate = competitor;
- updated = true;
- }
-
- if (updated) {
- DBG_8723A("[by_bssid:%u][assoc_ssid:%s][to_roaming:%u] new candidate: %s(%pM) rssi:%d\n",
- pmlmepriv->assoc_by_bssid,
- pmlmepriv->assoc_ssid.ssid,
- adapter->mlmepriv.to_roaming,
- (*candidate)->network.Ssid.ssid,
- (*candidate)->network.MacAddress,
- (int)(*candidate)->network.Rssi);
- }
-
-exit:
- return updated;
-}
-
-/*
-Calling context:
-The caller of the sub-routine will be in critical section...
-
-The caller must hold the following spinlock
-
-pmlmepriv->lock
-
-*/
-
-static int rtw_do_join(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int ret;
-
- pmlmepriv->cur_network.join_res = -2;
-
- set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
-
- pmlmepriv->to_join = true;
-
- ret = rtw_select_and_join_from_scanned_queue23a(pmlmepriv);
- if (ret == _SUCCESS) {
- pmlmepriv->to_join = false;
- } else {
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- /* switch to ADHOC_MASTER */
- ret = rtw_do_join_adhoc(padapter);
- if (ret != _SUCCESS)
- goto exit;
- } else {
- /* can't associate ; reset under-linking */
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
-
- ret = _FAIL;
- pmlmepriv->to_join = false;
- }
- }
-
-exit:
- return ret;
-}
-
-static struct wlan_network *
-rtw_select_candidate_from_queue(struct mlme_priv *pmlmepriv)
-{
- struct wlan_network *pnetwork, *ptmp, *candidate = NULL;
- struct rtw_queue *queue = &pmlmepriv->scanned_queue;
- struct list_head *phead;
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- phead = get_list_head(queue);
- list_for_each_entry_safe(pnetwork, ptmp, phead, list)
- rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- return candidate;
-}
-
-
-int rtw_do_join_adhoc(struct rtw_adapter *adapter)
-{
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct wlan_bssid_ex *pdev_network;
- u8 *ibss;
- int ret;
-
- pdev_network = &adapter->registrypriv.dev_network;
- ibss = adapter->registrypriv.dev_network.MacAddress;
-
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "switching to adhoc master\n");
-
- memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid,
- sizeof(struct cfg80211_ssid));
-
- rtw_update_registrypriv_dev_network23a(adapter);
- rtw_generate_random_ibss23a(ibss);
-
- pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
-
- ret = rtw_createbss_cmd23a(adapter);
- if (ret != _SUCCESS) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "Error =>rtw_createbss_cmd23a status FAIL\n");
- } else {
- pmlmepriv->to_join = false;
- }
-
- return ret;
-}
-
-int rtw_do_join_network(struct rtw_adapter *adapter,
- struct wlan_network *candidate)
-{
- int ret;
-
- /* check for situation of _FW_LINKED */
- if (check_fwstate(&adapter->mlmepriv, _FW_LINKED)) {
- DBG_8723A("%s: _FW_LINKED while ask_for_joinbss!\n", __func__);
-
- rtw_disassoc_cmd23a(adapter, 0, true);
- rtw_indicate_disconnect23a(adapter);
- rtw_free_assoc_resources23a(adapter, 0);
- }
- set_fwstate(&adapter->mlmepriv, _FW_UNDER_LINKING);
-
- ret = rtw_joinbss_cmd23a(adapter, candidate);
-
- if (ret == _SUCCESS)
- mod_timer(&adapter->mlmepriv.assoc_timer,
- jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
-
- return ret;
-}
-
-int rtw_select_and_join_from_scanned_queue23a(struct mlme_priv *pmlmepriv)
-{
- struct rtw_adapter *adapter;
- struct wlan_network *candidate = NULL;
- int ret;
-
- adapter = pmlmepriv->nic_hdl;
-
- candidate = rtw_select_candidate_from_queue(pmlmepriv);
- if (!candidate) {
- DBG_8723A("%s: return _FAIL(candidate == NULL)\n", __func__);
- ret = _FAIL;
- goto exit;
- } else {
- DBG_8723A("%s: candidate: %s(%pM, ch:%u)\n",
- __func__,
- candidate->network.Ssid.ssid,
- candidate->network.MacAddress,
- candidate->network.DSConfig);
- }
-
- ret = rtw_do_join_network(adapter, candidate);
-
-exit:
- return ret;
-}
-
-int rtw_set_auth23a(struct rtw_adapter *adapter,
- struct security_priv *psecuritypriv)
-{
- struct cmd_obj *pcmd;
- struct setauth_parm *psetauthparm;
- struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
- int res = _SUCCESS;
-
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (!pcmd) {
- res = _FAIL; /* try again */
- goto exit;
- }
-
- psetauthparm = kzalloc(sizeof(struct setauth_parm), GFP_KERNEL);
- if (!psetauthparm) {
- kfree(pcmd);
- res = _FAIL;
- goto exit;
- }
-
- psetauthparm->mode = (unsigned char)psecuritypriv->dot11AuthAlgrthm;
-
- pcmd->cmdcode = _SetAuth_CMD_;
- pcmd->parmbuf = (unsigned char *)psetauthparm;
- pcmd->cmdsz = (sizeof(struct setauth_parm));
- pcmd->rsp = NULL;
- pcmd->rspsz = 0;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "after enqueue set_auth_cmd, auth_mode=%x\n",
- psecuritypriv->dot11AuthAlgrthm);
-
- res = rtw_enqueue_cmd23a(pcmdpriv, pcmd);
-
-exit:
-
- return res;
-}
-
-int rtw_set_key23a(struct rtw_adapter *adapter,
- struct security_priv *psecuritypriv, int keyid, u8 set_tx)
-{
- u8 keylen;
- struct cmd_obj *pcmd;
- struct setkey_parm *psetkeyparm;
- struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- int res = _SUCCESS;
-
- if (keyid >= 4) {
- res = _FAIL;
- goto exit;
- }
-
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (!pcmd) {
- res = _FAIL; /* try again */
- goto exit;
- }
- psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
- if (!psetkeyparm) {
- kfree(pcmd);
- res = _FAIL;
- goto exit;
- }
-
- if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
- psetkeyparm->algorithm = (unsigned char)
- psecuritypriv->dot118021XGrpPrivacy;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "rtw_set_key23a: psetkeyparm->algorithm = (unsigned char)psecuritypriv->dot118021XGrpPrivacy =%d\n",
- psetkeyparm->algorithm);
- } else {
- psetkeyparm->algorithm = (u8)psecuritypriv->dot11PrivacyAlgrthm;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "rtw_set_key23a: psetkeyparm->algorithm = (u8)psecuritypriv->dot11PrivacyAlgrthm =%d\n",
- psetkeyparm->algorithm);
- }
- psetkeyparm->keyid = keyid;/* 0~3 */
- psetkeyparm->set_tx = set_tx;
- if (is_wep_enc(psetkeyparm->algorithm))
- pmlmepriv->key_mask |= BIT(psetkeyparm->keyid);
-
- DBG_8723A("==> rtw_set_key23a algorithm(%x), keyid(%x), key_mask(%x)\n",
- psetkeyparm->algorithm, psetkeyparm->keyid,
- pmlmepriv->key_mask);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "rtw_set_key23a: psetkeyparm->algorithm =%d psetkeyparm->keyid = (u8)keyid =%d\n",
- psetkeyparm->algorithm, keyid);
-
- switch (psetkeyparm->algorithm) {
- case WLAN_CIPHER_SUITE_WEP40:
- keylen = 5;
- memcpy(&psetkeyparm->key[0],
- &psecuritypriv->wep_key[keyid].key, keylen);
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- keylen = 13;
- memcpy(&psetkeyparm->key[0],
- &psecuritypriv->wep_key[keyid].key, keylen);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- keylen = 16;
- memcpy(&psetkeyparm->key,
- &psecuritypriv->dot118021XGrpKey[keyid], keylen);
- psetkeyparm->grpkey = 1;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- keylen = 16;
- memcpy(&psetkeyparm->key,
- &psecuritypriv->dot118021XGrpKey[keyid], keylen);
- psetkeyparm->grpkey = 1;
- break;
- default:
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "rtw_set_key23a:psecuritypriv->dot11PrivacyAlgrthm = %x (must be 1 or 2 or 4 or 5)\n",
- psecuritypriv->dot11PrivacyAlgrthm);
- res = _FAIL;
- kfree(pcmd);
- kfree(psetkeyparm);
- goto exit;
- }
-
- pcmd->cmdcode = _SetKey_CMD_;
- pcmd->parmbuf = (u8 *)psetkeyparm;
- pcmd->cmdsz = (sizeof(struct setkey_parm));
- pcmd->rsp = NULL;
- pcmd->rspsz = 0;
-
- /* sema_init(&pcmd->cmd_sem, 0); */
-
- res = rtw_enqueue_cmd23a(pcmdpriv, pcmd);
-
-exit:
-
- return res;
-}
-
-/* adjust IEs for rtw_joinbss_cmd23a in WMM */
-int rtw_restruct_wmm_ie23a(struct rtw_adapter *adapter, u8 *in_ie,
- u8 *out_ie, uint in_len, uint initial_out_len)
-{
- int ielength;
- const u8 *p;
-
- ielength = initial_out_len;
-
- p = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WMM,
- in_ie, in_len);
-
- if (p && p[1]) {
- memcpy(out_ie + initial_out_len, p, 9);
-
- out_ie[initial_out_len + 1] = 7;
- out_ie[initial_out_len + 6] = 0;
- out_ie[initial_out_len + 8] = 0;
-
- ielength += 9;
- }
-
- return ielength;
-}
-
-/* */
-/* Ported from 8185: IsInPreAuthKeyList().
- (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
-/* Added by Annie, 2006-05-07. */
-/* */
-/* Search by BSSID, */
-/* Return Value: */
-/* -1 :if there is no pre-auth key in the table */
-/* >= 0 :if there is pre-auth key, and return the entry id */
-/* */
-/* */
-
-static int SecIsInPMKIDList(struct rtw_adapter *Adapter, u8 *bssid)
-{
- struct security_priv *psecuritypriv = &Adapter->securitypriv;
- int i = 0;
-
- do {
- if (psecuritypriv->PMKIDList[i].bUsed &&
- ether_addr_equal(psecuritypriv->PMKIDList[i].Bssid, bssid)) {
- break;
- } else {
- i++;
- /* continue; */
- }
- } while (i < NUM_PMKID_CACHE);
-
- if (i == NUM_PMKID_CACHE)
- i = -1;/* Could not find. */
- else {
- /* There is one Pre-Authentication Key for
- the specific BSSID. */
- }
-
- return i;
-}
-
-/* */
-/* Check the RSN IE length */
-/* If the RSN IE length <= 20, the RSN IE didn't include
- the PMKID information */
-/* 0-11th element in the array are the fixed IE */
-/* 12th element in the array is the IE */
-/* 13th element in the array is the IE length */
-/* */
-
-static int rtw_append_pmkid(struct rtw_adapter *Adapter, int iEntry,
- u8 *ie, uint ie_len)
-{
- struct security_priv *psecuritypriv = &Adapter->securitypriv;
-
- if (ie[1] <= 20) {
- /* The RSN IE didn't include the PMK ID,
- append the PMK information */
- ie[ie_len] = 1;
- ie_len++;
- ie[ie_len] = 0; /* PMKID count = 0x0100 */
- ie_len++;
- memcpy(&ie[ie_len],
- &psecuritypriv->PMKIDList[iEntry].PMKID, 16);
-
- ie_len += 16;
- ie[1] += 18;/* PMKID length = 2+16 */
- }
- return ie_len;
-}
-
-int rtw_restruct_sec_ie23a(struct rtw_adapter *adapter, u8 *in_ie, u8 *out_ie,
- uint in_len)
-{
- u8 authmode;
- uint ielength;
- int iEntry;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct security_priv *psecuritypriv = &adapter->securitypriv;
- uint ndisauthmode = psecuritypriv->ndisauthtype;
- uint ndissecuritytype = psecuritypriv->ndisencryptstatus;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
- "+rtw_restruct_sec_ie23a: ndisauthmode=%d ndissecuritytype=%d\n",
- ndisauthmode, ndissecuritytype);
-
- ielength = 0;
- if (ndisauthmode == Ndis802_11AuthModeWPA ||
- ndisauthmode == Ndis802_11AuthModeWPAPSK)
- authmode = WLAN_EID_VENDOR_SPECIFIC;
- if (ndisauthmode == Ndis802_11AuthModeWPA2 ||
- ndisauthmode == Ndis802_11AuthModeWPA2PSK)
- authmode = WLAN_EID_RSN;
-
- if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
- memcpy(out_ie + ielength, psecuritypriv->wps_ie,
- psecuritypriv->wps_ie_len);
-
- ielength += psecuritypriv->wps_ie_len;
- } else if (authmode == WLAN_EID_VENDOR_SPECIFIC ||
- authmode == WLAN_EID_RSN) {
- /* copy RSN or SSN */
- memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0],
- psecuritypriv->supplicant_ie[1] + 2);
- ielength += psecuritypriv->supplicant_ie[1] + 2;
- }
-
- iEntry = SecIsInPMKIDList(adapter, pmlmepriv->assoc_bssid);
- if (iEntry < 0)
- return ielength;
- else {
- if (authmode == WLAN_EID_RSN)
- ielength = rtw_append_pmkid(adapter, iEntry,
- out_ie, ielength);
- }
-
- return ielength;
-}
-
-void rtw_init_registrypriv_dev_network23a(struct rtw_adapter *adapter)
-{
- struct registry_priv *pregistrypriv = &adapter->registrypriv;
- struct eeprom_priv *peepriv = &adapter->eeprompriv;
- struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
- u8 *myhwaddr = myid(peepriv);
-
- ether_addr_copy(pdev_network->MacAddress, myhwaddr);
-
- memcpy(&pdev_network->Ssid, &pregistrypriv->ssid,
- sizeof(struct cfg80211_ssid));
-
- pdev_network->beacon_interval = 100;
-}
-
-void rtw_update_registrypriv_dev_network23a(struct rtw_adapter *adapter)
-{
- int sz = 0;
- struct registry_priv *pregistrypriv = &adapter->registrypriv;
- struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
- struct security_priv *psecuritypriv = &adapter->securitypriv;
- struct wlan_network *cur_network = &adapter->mlmepriv.cur_network;
- /* struct xmit_priv *pxmitpriv = &adapter->xmitpriv; */
-
- pdev_network->Privacy =
- (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0);
-
- pdev_network->Rssi = 0;
-
- pdev_network->DSConfig = pregistrypriv->channel;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "pregistrypriv->channel =%d, pdev_network->DSConfig = 0x%x\n",
- pregistrypriv->channel, pdev_network->DSConfig);
-
- if (cur_network->network.ifmode == NL80211_IFTYPE_ADHOC)
- pdev_network->ATIMWindow = 0;
-
- pdev_network->ifmode = cur_network->network.ifmode;
-
- /* 1. Supported rates */
- /* 2. IE */
-
- sz = rtw_generate_ie23a(pregistrypriv);
-
- pdev_network->IELength = sz;
-
- pdev_network->Length =
- get_wlan_bssid_ex_sz(pdev_network);
-
- /* notes: translate IELength & Length after assign the
- Length to cmdsz in createbss_cmd(); */
- /* pdev_network->IELength = cpu_to_le32(sz); */
-}
-
-/* the function is at passive_level */
-void rtw_joinbss_reset23a(struct rtw_adapter *padapter)
-{
- u8 threshold;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
-
- /* todo: if you want to do something io/reg/hw setting
- before join_bss, please add code here */
-
- pmlmepriv->num_FortyMHzIntolerant = 0;
-
- pmlmepriv->num_sta_no_ht = 0;
-
- phtpriv->ampdu_enable = false;/* reset to disabled */
-
- /* TH = 1 => means that invalidate usb rx aggregation */
- /* TH = 0 => means that validate usb rx aggregation, use init value. */
- if (phtpriv->ht_option) {
- if (padapter->registrypriv.wifi_spec == 1)
- threshold = 1;
- else
- threshold = 0;
- } else
- threshold = 1;
-
- rtl8723a_set_rxdma_agg_pg_th(padapter, threshold);
-}
-
-/* the function is >= passive_level */
-bool rtw_restructure_ht_ie23a(struct rtw_adapter *padapter, u8 *in_ie,
- u8 *out_ie, uint in_len, uint *pout_len)
-{
- u32 out_len;
- int max_rx_ampdu_factor;
- unsigned char *pframe;
- const u8 *p;
- struct ieee80211_ht_cap ht_capie;
- u8 WMM_IE[7] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00};
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
-
- phtpriv->ht_option = false;
-
- p = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, in_ie, in_len);
-
- if (p && p[1] > 0) {
- u32 rx_packet_offset, max_recvbuf_sz;
-
- if (pmlmepriv->qos_option == 0) {
- out_len = *pout_len;
- pframe = rtw_set_ie23a(out_ie + out_len,
- WLAN_EID_VENDOR_SPECIFIC,
- sizeof(WMM_IE), WMM_IE,
- pout_len);
-
- pmlmepriv->qos_option = 1;
- }
-
- out_len = *pout_len;
-
- memset(&ht_capie, 0, sizeof(struct ieee80211_ht_cap));
-
- ht_capie.cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_DSSSCCK40);
-
- GetHalDefVar8192CUsb(padapter, HAL_DEF_RX_PACKET_OFFSET,
- &rx_packet_offset);
- GetHalDefVar8192CUsb(padapter, HAL_DEF_MAX_RECVBUF_SZ,
- &max_recvbuf_sz);
-
- GetHalDefVar8192CUsb(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR,
- &max_rx_ampdu_factor);
- ht_capie.ampdu_params_info = max_rx_ampdu_factor & 0x03;
-
- if (padapter->securitypriv.dot11PrivacyAlgrthm ==
- WLAN_CIPHER_SUITE_CCMP)
- ht_capie.ampdu_params_info |=
- (IEEE80211_HT_AMPDU_PARM_DENSITY& (0x07 << 2));
- else
- ht_capie.ampdu_params_info |=
- (IEEE80211_HT_AMPDU_PARM_DENSITY & 0x00);
-
- pframe = rtw_set_ie23a(out_ie + out_len, WLAN_EID_HT_CAPABILITY,
- sizeof(struct ieee80211_ht_cap),
- (unsigned char *)&ht_capie, pout_len);
-
- phtpriv->ht_option = true;
-
- p = cfg80211_find_ie(WLAN_EID_HT_OPERATION, in_ie, in_len);
- if (p && (p[1] == sizeof(struct ieee80211_ht_operation))) {
- out_len = *pout_len;
- pframe = rtw_set_ie23a(out_ie + out_len,
- WLAN_EID_HT_OPERATION,
- p[1], p + 2 , pout_len);
- }
- }
-
- return phtpriv->ht_option;
-}
-
-/* the function is > passive_level (in critical_section) */
-void rtw_update_ht_cap23a(struct rtw_adapter *padapter, u8 *pie, uint ie_len)
-{
- u8 max_ampdu_sz;
- const u8 *p;
- struct ieee80211_ht_cap *pht_capie;
- struct ieee80211_ht_operation *pht_addtinfo;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (!phtpriv->ht_option)
- return;
-
- if ((!pmlmeinfo->HT_info_enable) || (!pmlmeinfo->HT_caps_enable))
- return;
-
- DBG_8723A("+rtw_update_ht_cap23a()\n");
-
- /* maybe needs check if ap supports rx ampdu. */
- if (!phtpriv->ampdu_enable && pregistrypriv->ampdu_enable == 1) {
- if (pregistrypriv->wifi_spec == 1)
- phtpriv->ampdu_enable = false;
- else
- phtpriv->ampdu_enable = true;
- } else if (pregistrypriv->ampdu_enable == 2)
- phtpriv->ampdu_enable = true;
-
- /* check Max Rx A-MPDU Size */
- p = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, pie, ie_len);
-
- if (p && p[1] > 0) {
- pht_capie = (struct ieee80211_ht_cap *)(p + 2);
- max_ampdu_sz = pht_capie->ampdu_params_info &
- IEEE80211_HT_AMPDU_PARM_FACTOR;
- /* max_ampdu_sz (kbytes); */
- max_ampdu_sz = 1 << (max_ampdu_sz + 3);
-
- phtpriv->rx_ampdu_maxlen = max_ampdu_sz;
- }
-
- p = cfg80211_find_ie(WLAN_EID_HT_OPERATION, pie, ie_len);
- if (p && p[1] > 0) {
- pht_addtinfo = (struct ieee80211_ht_operation *)(p + 2);
- /* todo: */
- }
-
- /* update cur_bwmode & cur_ch_offset */
- if (pregistrypriv->cbw40_enable &&
- pmlmeinfo->ht_cap.cap_info &
- cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
- pmlmeinfo->HT_info.ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) {
- int i;
- u8 rf_type;
-
- rf_type = rtl8723a_get_rf_type(padapter);
-
- /* update the MCS rates */
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
- if (rf_type == RF_1T1R || rf_type == RF_1T2R)
- pmlmeinfo->ht_cap.mcs.rx_mask[i] &=
- MCS_rate_1R23A[i];
- else
- pmlmeinfo->ht_cap.mcs.rx_mask[i] &=
- MCS_rate_2R23A[i];
- }
- /* switch to the 40M Hz mode according to the AP */
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
- switch (pmlmeinfo->HT_info.ht_param &
- IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
- break;
-
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
- break;
-
- default:
- pmlmeext->cur_ch_offset =
- HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- break;
- }
- }
-
- /* */
- /* Config SM Power Save setting */
- /* */
- pmlmeinfo->SM_PS =
- (le16_to_cpu(pmlmeinfo->ht_cap.cap_info) &
- IEEE80211_HT_CAP_SM_PS) >> IEEE80211_HT_CAP_SM_PS_SHIFT;
- if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
- DBG_8723A("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
-
- /* */
- /* Config current HT Protection mode. */
- /* */
- pmlmeinfo->HT_protection =
- le16_to_cpu(pmlmeinfo->HT_info.operation_mode) &
- IEEE80211_HT_OP_MODE_PROTECTION;
-}
-
-void rtw_issue_addbareq_cmd23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe)
-{
- u8 issued;
- int priority;
- struct sta_info *psta;
- struct ht_priv *phtpriv;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- s32 bmcst = is_multicast_ether_addr(pattrib->ra);
-
- if (bmcst || padapter->mlmepriv.LinkDetectInfo.NumTxOkInPeriod < 100)
- return;
-
- priority = pattrib->priority;
-
- if (pattrib->psta)
- psta = pattrib->psta;
- else {
- DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
- psta = rtw_get_stainfo23a(&padapter->stapriv, pattrib->ra);
- }
-
- if (!psta) {
- DBG_8723A("%s, psta == NUL\n", __func__);
- return;
- }
-
- if (!(psta->state &_FW_LINKED)) {
- DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n",
- __func__, psta->state);
- return;
- }
-
- phtpriv = &psta->htpriv;
-
- if (phtpriv->ht_option && phtpriv->ampdu_enable) {
- issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
- issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
-
- if (issued == 0) {
- DBG_8723A("rtw_issue_addbareq_cmd23a, p =%d\n",
- priority);
- psta->htpriv.candidate_tid_bitmap |= BIT(priority);
- rtw_addbareq_cmd23a(padapter, (u8) priority,
- pattrib->ra);
- }
- }
-}
-
-int rtw_linked_check(struct rtw_adapter *padapter)
-{
- if (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) ||
- check_fwstate(&padapter->mlmepriv,
- WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) {
- if (padapter->stapriv.asoc_sta_count > 2)
- return true;
- } else { /* Station mode */
- if (check_fwstate(&padapter->mlmepriv, _FW_LINKED))
- return true;
- }
- return false;
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
deleted file mode 100644
index 7dd1540ebedd..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+++ /dev/null
@@ -1,6187 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTW_MLME_EXT_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <wifi.h>
-#include <rtw_mlme_ext.h>
-#include <wlan_bssdef.h>
-#include <mlme_osdep.h>
-#include <recv_osdep.h>
-#include <linux/ieee80211.h>
-#include <rtl8723a_hal.h>
-
-static int OnAssocReq23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnAssocRsp23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnProbeReq23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnProbeRsp23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int DoReserved23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnBeacon23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnAtim23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnDisassoc23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnAuth23aClient23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnDeAuth23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnAction23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-
-static int on_action_spct23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnAction23a_qos(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnAction23a_dls(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnAction23a_back23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int on_action_public23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnAction23a_ht(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnAction23a_wmm(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static int OnAction23a_p2p(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-
-static void issue_assocreq(struct rtw_adapter *padapter);
-static void issue_probereq(struct rtw_adapter *padapter,
- struct cfg80211_ssid *pssid, u8 *da);
-static int issue_probereq_ex(struct rtw_adapter *padapter,
- struct cfg80211_ssid *pssid,
- u8 *da, int try_cnt, int wait_ms);
-static void issue_probersp(struct rtw_adapter *padapter, unsigned char *da);
-static void issue_auth(struct rtw_adapter *padapter, struct sta_info *psta,
- unsigned short status);
-static int issue_deauth_ex(struct rtw_adapter *padapter, u8 *da,
- unsigned short reason, int try_cnt, int wait_ms);
-static void start_clnt_assoc(struct rtw_adapter *padapter);
-static void start_clnt_auth(struct rtw_adapter *padapter);
-static void start_clnt_join(struct rtw_adapter *padapter);
-static void start_create_ibss(struct rtw_adapter *padapter);
-static struct wlan_bssid_ex *collect_bss_info(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame);
-
-#ifdef CONFIG_8723AU_AP_MODE
-static int OnAuth23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-static void issue_assocrsp(struct rtw_adapter *padapter, unsigned short status,
- struct sta_info *pstat, u16 pkt_type);
-#endif
-
-static struct mlme_handler mlme_sta_tbl[]={
- {"OnAssocReq23a", &OnAssocReq23a},
- {"OnAssocRsp23a", &OnAssocRsp23a},
- {"OnReAssocReq", &OnAssocReq23a},
- {"OnReAssocRsp", &OnAssocRsp23a},
- {"OnProbeReq23a", &OnProbeReq23a},
- {"OnProbeRsp23a", &OnProbeRsp23a},
-
- /*----------------------------------------------------------
- below 2 are reserved
- -----------------------------------------------------------*/
- {"DoReserved23a", &DoReserved23a},
- {"DoReserved23a", &DoReserved23a},
- {"OnBeacon23a", &OnBeacon23a},
- {"OnATIM", &OnAtim23a},
- {"OnDisassoc23a", &OnDisassoc23a},
- {"OnAuth23a", &OnAuth23aClient23a},
- {"OnDeAuth23a", &OnDeAuth23a},
- {"OnAction23a", &OnAction23a},
-};
-
-static struct action_handler OnAction23a_tbl[]={
- {WLAN_CATEGORY_SPECTRUM_MGMT, "ACTION_SPECTRUM_MGMT", on_action_spct23a},
- {WLAN_CATEGORY_QOS, "ACTION_QOS", &OnAction23a_qos},
- {WLAN_CATEGORY_DLS, "ACTION_DLS", &OnAction23a_dls},
- {WLAN_CATEGORY_BACK, "ACTION_BACK", &OnAction23a_back23a},
- {WLAN_CATEGORY_PUBLIC, "ACTION_PUBLIC", on_action_public23a},
- {WLAN_CATEGORY_HT, "ACTION_HT", &OnAction23a_ht},
- {WLAN_CATEGORY_SA_QUERY, "ACTION_SA_QUERY", &DoReserved23a},
- {WLAN_CATEGORY_WMM, "ACTION_WMM", &OnAction23a_wmm},
- {WLAN_CATEGORY_VENDOR_SPECIFIC, "ACTION_P2P", &OnAction23a_p2p},
-};
-
-static u8 null_addr[ETH_ALEN]= {0, 0, 0, 0, 0, 0};
-
-/**************************************************
-OUI definitions for the vendor specific IE
-***************************************************/
-unsigned char WMM_OUI23A[] = {0x00, 0x50, 0xf2, 0x02};
-unsigned char WPS_OUI23A[] = {0x00, 0x50, 0xf2, 0x04};
-unsigned char P2P_OUI23A[] = {0x50, 0x6F, 0x9A, 0x09};
-unsigned char WFD_OUI23A[] = {0x50, 0x6F, 0x9A, 0x0A};
-
-unsigned char WMM_INFO_OUI23A[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
-unsigned char WMM_PARA_OUI23A[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
-
-static unsigned char REALTEK_96B_IE[] = {0x00, 0xe0, 0x4c, 0x02, 0x01, 0x20};
-
-/********************************************************
-MCS rate definitions
-*********************************************************/
-unsigned char MCS_rate_2R23A[16] = {
- 0xff, 0xff, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0,
- 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
-unsigned char MCS_rate_1R23A[16] = {
- 0xff, 0x00, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0,
- 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
-
-/********************************************************
-ChannelPlan definitions
-*********************************************************/
-
-static struct rt_channel_plan_2g RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
- /* 0x00, RT_CHANNEL_DOMAIN_2G_WORLD , Passive scan CH 12, 13 */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
- /* 0x01, RT_CHANNEL_DOMAIN_2G_ETSI1 */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
- /* 0x02, RT_CHANNEL_DOMAIN_2G_FCC1 */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11},
- /* 0x03, RT_CHANNEL_DOMAIN_2G_MIKK1 */
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14},
- /* 0x04, RT_CHANNEL_DOMAIN_2G_ETSI2 */
- {{10, 11, 12, 13}, 4},
- /* 0x05, RT_CHANNEL_DOMAIN_2G_NULL */
- {{}, 0},
-};
-
-static struct rt_channel_plan_5g RTW_ChannelPlan5G[RT_CHANNEL_DOMAIN_5G_MAX] = {
- /* 0x00, RT_CHANNEL_DOMAIN_5G_NULL */
- {{}, 0},
- /* 0x01, RT_CHANNEL_DOMAIN_5G_ETSI1 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
- 116, 120, 124, 128, 132, 136, 140}, 19},
- /* 0x02, RT_CHANNEL_DOMAIN_5G_ETSI2 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
- 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165}, 24},
- /* 0x03, RT_CHANNEL_DOMAIN_5G_ETSI3 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
- 116, 120, 124, 128, 132, 149, 153, 157, 161, 165}, 22},
- /* 0x04, RT_CHANNEL_DOMAIN_5G_FCC1 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
- 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165}, 24},
- /* 0x05, RT_CHANNEL_DOMAIN_5G_FCC2 */
- {{36, 40, 44, 48, 149, 153, 157, 161, 165}, 9},
- /* 0x06, RT_CHANNEL_DOMAIN_5G_FCC3 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 149, 153, 157, 161, 165}, 13},
- /* 0x07, RT_CHANNEL_DOMAIN_5G_FCC4 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 149, 153, 157, 161}, 12},
- /* 0x08, RT_CHANNEL_DOMAIN_5G_FCC5 */
- {{149, 153, 157, 161, 165}, 5},
- /* 0x09, RT_CHANNEL_DOMAIN_5G_FCC6 */
- {{36, 40, 44, 48, 52, 56, 60, 64}, 8},
- /* 0x0A, RT_CHANNEL_DOMAIN_5G_FCC7_IC1 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
- 116, 136, 140, 149, 153, 157, 161, 165}, 20},
- /* 0x0B, RT_CHANNEL_DOMAIN_5G_KCC1 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
- 116, 120, 124, 149, 153, 157, 161, 165}, 20},
- /* 0x0C, RT_CHANNEL_DOMAIN_5G_MKK1 */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
- 116, 120, 124, 128, 132, 136, 140}, 19},
- /* 0x0D, RT_CHANNEL_DOMAIN_5G_MKK2 */
- {{36, 40, 44, 48, 52, 56, 60, 64}, 8},
- /* 0x0E, RT_CHANNEL_DOMAIN_5G_MKK3 */
- {{100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 11},
- /* 0x0F, RT_CHANNEL_DOMAIN_5G_NCC1 */
- {{56, 60, 64, 100, 104, 108, 112, 116, 136, 140, 149,
- 153, 157, 161, 165}, 15},
- /* 0x10, RT_CHANNEL_DOMAIN_5G_NCC2 */
- {{56, 60, 64, 149, 153, 157, 161, 165}, 8},
-
- /* Driver self defined for old channel plan Compatible,
- Remember to modify if have new channel plan definition ===== */
- /* 0x11, RT_CHANNEL_DOMAIN_5G_FCC */
- {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
- 116, 132, 136, 140, 149, 153, 157, 161, 165}, 21},
- /* 0x12, RT_CHANNEL_DOMAIN_5G_JAPAN_NO_DFS */
- {{36, 40, 44, 48}, 4},
- /* 0x13, RT_CHANNEL_DOMAIN_5G_FCC4_NO_DFS */
- {{36, 40, 44, 48, 149, 153, 157, 161}, 8},
-};
-
-static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
- /* 0x00 ~ 0x1F , Old Define ===== */
- {0x02, 0x11}, /* 0x00, RT_CHANNEL_DOMAIN_FCC */
- {0x02, 0x0A}, /* 0x01, RT_CHANNEL_DOMAIN_IC */
- {0x01, 0x01}, /* 0x02, RT_CHANNEL_DOMAIN_ETSI */
- {0x01, 0x00}, /* 0x03, RT_CHANNEL_DOMAIN_SPAIN */
- {0x01, 0x00}, /* 0x04, RT_CHANNEL_DOMAIN_FRANCE */
- {0x03, 0x00}, /* 0x05, RT_CHANNEL_DOMAIN_MKK */
- {0x03, 0x00}, /* 0x06, RT_CHANNEL_DOMAIN_MKK1 */
- {0x01, 0x09}, /* 0x07, RT_CHANNEL_DOMAIN_ISRAEL */
- {0x03, 0x09}, /* 0x08, RT_CHANNEL_DOMAIN_TELEC */
- {0x03, 0x00}, /* 0x09, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN */
- {0x00, 0x00}, /* 0x0A, RT_CHANNEL_DOMAIN_WORLD_WIDE_13 */
- {0x02, 0x0F}, /* 0x0B, RT_CHANNEL_DOMAIN_TAIWAN */
- {0x01, 0x08}, /* 0x0C, RT_CHANNEL_DOMAIN_CHINA */
- {0x02, 0x06}, /* 0x0D, RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO */
- {0x02, 0x0B}, /* 0x0E, RT_CHANNEL_DOMAIN_KOREA */
- {0x02, 0x09}, /* 0x0F, RT_CHANNEL_DOMAIN_TURKEY */
- {0x01, 0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
- {0x02, 0x05}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
- {0x01, 0x12}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
- {0x00, 0x04}, /* 0x13, RT_CHANNEL_DOMAIN_WORLD_WIDE_5G */
- {0x02, 0x10}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
- {0x00, 0x12}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
- {0x00, 0x13}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
- {0x03, 0x12}, /* 0x17, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
- {0x05, 0x08}, /* 0x18, RT_CHANNEL_DOMAIN_PAKISTAN_NO_DFS */
- {0x02, 0x08}, /* 0x19, RT_CHANNEL_DOMAIN_TAIWAN2_NO_DFS */
- {0x00, 0x00}, /* 0x1A, */
- {0x00, 0x00}, /* 0x1B, */
- {0x00, 0x00}, /* 0x1C, */
- {0x00, 0x00}, /* 0x1D, */
- {0x00, 0x00}, /* 0x1E, */
- {0x05, 0x04}, /* 0x1F, RT_CHANNEL_DOMAIN_WORLD_WIDE_ONLY_5G */
- /* 0x20 ~ 0x7F , New Define ===== */
- {0x00, 0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
- {0x01, 0x00}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
- {0x02, 0x00}, /* 0x22, RT_CHANNEL_DOMAIN_FCC1_NULL */
- {0x03, 0x00}, /* 0x23, RT_CHANNEL_DOMAIN_MKK1_NULL */
- {0x04, 0x00}, /* 0x24, RT_CHANNEL_DOMAIN_ETSI2_NULL */
- {0x02, 0x04}, /* 0x25, RT_CHANNEL_DOMAIN_FCC1_FCC1 */
- {0x00, 0x01}, /* 0x26, RT_CHANNEL_DOMAIN_WORLD_ETSI1 */
- {0x03, 0x0C}, /* 0x27, RT_CHANNEL_DOMAIN_MKK1_MKK1 */
- {0x00, 0x0B}, /* 0x28, RT_CHANNEL_DOMAIN_WORLD_KCC1 */
- {0x00, 0x05}, /* 0x29, RT_CHANNEL_DOMAIN_WORLD_FCC2 */
- {0x00, 0x00}, /* 0x2A, */
- {0x00, 0x00}, /* 0x2B, */
- {0x00, 0x00}, /* 0x2C, */
- {0x00, 0x00}, /* 0x2D, */
- {0x00, 0x00}, /* 0x2E, */
- {0x00, 0x00}, /* 0x2F, */
- {0x00, 0x06}, /* 0x30, RT_CHANNEL_DOMAIN_WORLD_FCC3 */
- {0x00, 0x07}, /* 0x31, RT_CHANNEL_DOMAIN_WORLD_FCC4 */
- {0x00, 0x08}, /* 0x32, RT_CHANNEL_DOMAIN_WORLD_FCC5 */
- {0x00, 0x09}, /* 0x33, RT_CHANNEL_DOMAIN_WORLD_FCC6 */
- {0x02, 0x0A}, /* 0x34, RT_CHANNEL_DOMAIN_FCC1_FCC7 */
- {0x00, 0x02}, /* 0x35, RT_CHANNEL_DOMAIN_WORLD_ETSI2 */
- {0x00, 0x03}, /* 0x36, RT_CHANNEL_DOMAIN_WORLD_ETSI3 */
- {0x03, 0x0D}, /* 0x37, RT_CHANNEL_DOMAIN_MKK1_MKK2 */
- {0x03, 0x0E}, /* 0x38, RT_CHANNEL_DOMAIN_MKK1_MKK3 */
- {0x02, 0x0F}, /* 0x39, RT_CHANNEL_DOMAIN_FCC1_NCC1 */
- {0x00, 0x00}, /* 0x3A, */
- {0x00, 0x00}, /* 0x3B, */
- {0x00, 0x00}, /* 0x3C, */
- {0x00, 0x00}, /* 0x3D, */
- {0x00, 0x00}, /* 0x3E, */
- {0x00, 0x00}, /* 0x3F, */
- {0x02, 0x10}, /* 0x40, RT_CHANNEL_DOMAIN_FCC1_NCC2 */
- {0x03, 0x00}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */
-};
-
-static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE =
-{0x03, 0x02}; /* use the conbination for max channel numbers */
-
-static void dummy_event_callback(struct rtw_adapter *adapter, const u8 *pbuf)
-{
-}
-
-static struct fwevent wlanevents[] =
-{
- {0, &dummy_event_callback}, /*0*/
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, &rtw_survey_event_cb23a}, /*8*/
- {sizeof (struct surveydone_event), &rtw_surveydone_event_callback23a},
- {0, &rtw23a_joinbss_event_cb}, /*10*/
- {sizeof(struct stassoc_event), &rtw_stassoc_event_callback23a},
- {sizeof(struct stadel_event), &rtw_stadel_event_callback23a},
- {0, &dummy_event_callback},
- {0, &dummy_event_callback},
- {0, NULL}, /*15*/
- {0, NULL},
- {0, NULL},
- {0, NULL},
- {0, &dummy_event_callback},
- {0, NULL}, /*20*/
- {0, NULL},
- {0, NULL},
- {0, &dummy_event_callback},
- {0, NULL},
-};
-
-
-static void rtw_correct_TSF(struct rtw_adapter *padapter)
-{
- hw_var_set_correct_tsf(padapter);
-}
-
-static void
-rtw_update_TSF(struct mlme_ext_priv *pmlmeext, struct ieee80211_mgmt *mgmt)
-{
- pmlmeext->TSFValue = get_unaligned_le64(&mgmt->u.beacon.timestamp);
-}
-
-/*
- * Search the @param channel_num in given @param channel_set
- * @ch_set: the given channel set
- * @ch: the given channel number
- *
- * return the index of channel_num in channel_set, -1 if not found
- */
-int rtw_ch_set_search_ch23a(struct rt_channel_info *ch_set, const u32 ch)
-{
- int i;
-
- for (i = 0; ch_set[i]. ChannelNum != 0; i++) {
- if (ch == ch_set[i].ChannelNum)
- break;
- }
-
- if (i >= ch_set[i].ChannelNum)
- return -1;
- return i;
-}
-
-/****************************************************************************
-
-Following are the initialization functions for WiFi MLME
-
-*****************************************************************************/
-
-int init_hw_mlme_ext23a(struct rtw_adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- set_channel_bwmode23a(padapter, pmlmeext->cur_channel,
- pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
- return _SUCCESS;
-}
-
-static void init_mlme_ext_priv23a_value(struct rtw_adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- unsigned char mixed_datarate[NumRates] = {
- _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
- _9M_RATE_, _12M_RATE_, _18M_RATE_, _24M_RATE_, _36M_RATE_,
- _48M_RATE_, _54M_RATE_, 0xff};
- unsigned char mixed_basicrate[NumRates] = {
- _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
- _12M_RATE_, _24M_RATE_, 0xff,};
-
- atomic_set(&pmlmeext->event_seq, 0);
- /* reset to zero when disconnect at client mode */
- pmlmeext->mgnt_seq = 0;
-
- pmlmeext->cur_channel = padapter->registrypriv.channel;
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- pmlmeext->retry = 0;
-
- pmlmeext->cur_wireless_mode = padapter->registrypriv.wireless_mode;
-
- memcpy(pmlmeext->datarate, mixed_datarate, NumRates);
- memcpy(pmlmeext->basicrate, mixed_basicrate, NumRates);
-
- if (pmlmeext->cur_channel > 14)
- pmlmeext->tx_rate = IEEE80211_OFDM_RATE_6MB;
- else
- pmlmeext->tx_rate = IEEE80211_CCK_RATE_1MB;
-
- pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
- pmlmeext->sitesurvey_res.channel_idx = 0;
- pmlmeext->sitesurvey_res.bss_cnt = 0;
- pmlmeext->scan_abort = false;
-
- pmlmeinfo->state = MSR_NOLINK;
- pmlmeinfo->reauth_count = 0;
- pmlmeinfo->reassoc_count = 0;
- pmlmeinfo->link_count = 0;
- pmlmeinfo->auth_seq = 0;
- pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
- pmlmeinfo->key_index = 0;
- pmlmeinfo->iv = 0;
-
- pmlmeinfo->enc_algo = 0;
- pmlmeinfo->authModeToggle = 0;
-
- memset(pmlmeinfo->chg_txt, 0, 128);
-
- pmlmeinfo->slotTime = SHORT_SLOT_TIME;
- pmlmeinfo->preamble_mode = PREAMBLE_AUTO;
-
- pmlmeinfo->dialogToken = 0;
-
- pmlmeext->action_public_rxseq = 0xffff;
- pmlmeext->action_public_dialog_token = 0xff;
-}
-
-static int has_channel(struct rt_channel_info *channel_set,
- u8 chanset_size, u8 chan) {
- int i;
-
- for (i = 0; i < chanset_size; i++) {
- if (channel_set[i].ChannelNum == chan)
- return 1;
- }
-
- return 0;
-}
-
-static void init_channel_list(struct rtw_adapter *padapter,
- struct rt_channel_info *channel_set,
- u8 chanset_size,
- struct p2p_channels *channel_list)
-{
- struct p2p_oper_class_map op_class[] = {
- { IEEE80211G, 81, 1, 13, 1, BW20 },
- { IEEE80211G, 82, 14, 14, 1, BW20 },
- { IEEE80211A, 115, 36, 48, 4, BW20 },
- { IEEE80211A, 116, 36, 44, 8, BW40PLUS },
- { IEEE80211A, 117, 40, 48, 8, BW40MINUS },
- { IEEE80211A, 124, 149, 161, 4, BW20 },
- { IEEE80211A, 125, 149, 169, 4, BW20 },
- { IEEE80211A, 126, 149, 157, 8, BW40PLUS },
- { IEEE80211A, 127, 153, 161, 8, BW40MINUS },
- { -1, 0, 0, 0, 0, BW20 }
- };
-
- int cla, op;
-
- cla = 0;
-
- for (op = 0; op_class[op].op_class; op++) {
- u8 ch;
- struct p2p_oper_class_map *o = &op_class[op];
- struct p2p_reg_class *reg = NULL;
-
- for (ch = o->min_chan; ch <= o->max_chan; ch += o->inc) {
- if (!has_channel(channel_set, chanset_size, ch))
- continue;
-
- if ((0 == padapter->registrypriv.ht_enable) &&
- (o->inc == 8))
- continue;
-
- if ((0 == (padapter->registrypriv.cbw40_enable & BIT(1))) &&
- ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
- continue;
-
- if (reg == NULL) {
- reg = &channel_list->reg_class[cla];
- cla++;
- reg->reg_class = o->op_class;
- reg->channels = 0;
- }
- reg->channel[reg->channels] = ch;
- reg->channels++;
- }
- }
- channel_list->reg_classes = cla;
-}
-
-static u8 init_channel_set(struct rtw_adapter *padapter, u8 cplan,
- struct rt_channel_info *c_set)
-{
- u8 i, ch_size = 0;
- u8 b5GBand = false, b2_4GBand = false;
- u8 Index2G = 0, Index5G = 0;
-
- memset(c_set, 0, sizeof(struct rt_channel_info) * MAX_CHANNEL_NUM);
-
- if (cplan >= RT_CHANNEL_DOMAIN_MAX &&
- cplan != RT_CHANNEL_DOMAIN_REALTEK_DEFINE) {
- DBG_8723A("ChannelPlan ID %x error !!!!!\n", cplan);
- return ch_size;
- }
-
- if (padapter->registrypriv.wireless_mode & WIRELESS_11G) {
- b2_4GBand = true;
- if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == cplan)
- Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
- else
- Index2G = RTW_ChannelPlanMap[cplan].Index2G;
- }
-
- if (padapter->registrypriv.wireless_mode & WIRELESS_11A) {
- b5GBand = true;
- if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == cplan)
- Index5G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index5G;
- else
- Index5G = RTW_ChannelPlanMap[cplan].Index5G;
- }
-
- if (b2_4GBand) {
- for (i = 0; i < RTW_ChannelPlan2G[Index2G].Len; i++) {
- c_set[ch_size].ChannelNum =
- RTW_ChannelPlan2G[Index2G].Channel[i];
-
- if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == cplan) ||
- /* Channel 1~11 is active, and 12~14 is passive */
- RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G == cplan) {
- if (c_set[ch_size].ChannelNum >= 1 &&
- c_set[ch_size].ChannelNum <= 11)
- c_set[ch_size].ScanType = SCAN_ACTIVE;
- else if (c_set[ch_size].ChannelNum >= 12 &&
- c_set[ch_size].ChannelNum <= 14)
- c_set[ch_size].ScanType = SCAN_PASSIVE;
- } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == cplan ||
- RT_CHANNEL_DOMAIN_WORLD_WIDE_5G == cplan ||
- RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) {
- /* channel 12~13, passive scan */
- if (c_set[ch_size].ChannelNum <= 11)
- c_set[ch_size].ScanType = SCAN_ACTIVE;
- else
- c_set[ch_size].ScanType = SCAN_PASSIVE;
- } else
- c_set[ch_size].ScanType = SCAN_ACTIVE;
-
- ch_size++;
- }
- }
-
- if (b5GBand) {
- for (i = 0; i < RTW_ChannelPlan5G[Index5G].Len; i++) {
- if (RTW_ChannelPlan5G[Index5G].Channel[i] <= 48 ||
- RTW_ChannelPlan5G[Index5G].Channel[i] >= 149) {
- c_set[ch_size].ChannelNum =
- RTW_ChannelPlan5G[Index5G].Channel[i];
- if (RT_CHANNEL_DOMAIN_WORLD_WIDE_5G == cplan) {
- /* passive scan for all 5G channels */
- c_set[ch_size].ScanType =
- SCAN_PASSIVE;
- } else
- c_set[ch_size].ScanType =
- SCAN_ACTIVE;
- DBG_8723A("%s(): channel_set[%d].ChannelNum = "
- "%d\n", __func__, ch_size,
- c_set[ch_size].ChannelNum);
- ch_size++;
- }
- }
- }
-
- return ch_size;
-}
-
-int init_mlme_ext_priv23a(struct rtw_adapter *padapter)
-{
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- pmlmeext->padapter = padapter;
-
- init_mlme_ext_priv23a_value(padapter);
- pmlmeinfo->bAcceptAddbaReq = pregistrypriv->bAcceptAddbaReq;
-
- init_mlme_ext_timer23a(padapter);
-
-#ifdef CONFIG_8723AU_AP_MODE
- init_mlme_ap_info23a(padapter);
-#endif
-
- pmlmeext->max_chan_nums = init_channel_set(padapter,
- pmlmepriv->ChannelPlan,
- pmlmeext->channel_set);
- init_channel_list(padapter, pmlmeext->channel_set,
- pmlmeext->max_chan_nums, &pmlmeext->channel_list);
-
- pmlmeext->chan_scan_time = SURVEY_TO;
- pmlmeext->mlmeext_init = true;
-
- pmlmeext->active_keep_alive_check = true;
- return _SUCCESS;
-}
-
-void free_mlme_ext_priv23a (struct mlme_ext_priv *pmlmeext)
-{
- struct rtw_adapter *padapter = pmlmeext->padapter;
-
- if (!padapter)
- return;
-
- if (padapter->bDriverStopped == true) {
- del_timer_sync(&pmlmeext->survey_timer);
- del_timer_sync(&pmlmeext->link_timer);
- /* del_timer_sync(&pmlmeext->ADDBA_timer); */
- }
-}
-
-static void
-_mgt_dispatcher23a(struct rtw_adapter *padapter, struct mlme_handler *ptable,
- struct recv_frame *precv_frame)
-{
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-
- if (ptable->func) {
- /* receive the frames that ra(a1) is my address
- or ra(a1) is bc address. */
- if (!ether_addr_equal(hdr->addr1, myid(&padapter->eeprompriv))&&
- !is_broadcast_ether_addr(hdr->addr1))
- return;
-
- ptable->func(padapter, precv_frame);
- }
-}
-
-void mgt_dispatcher23a(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
- struct mlme_handler *ptable;
-#ifdef CONFIG_8723AU_AP_MODE
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-#endif /* CONFIG_8723AU_AP_MODE */
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
- struct sta_info *psta;
- u16 stype;
- int index;
-
- if (!ieee80211_is_mgmt(mgmt->frame_control))
- return;
-
- /* receive the frames that ra(a1) is my address or ra(a1) is
- bc address. */
- if (!ether_addr_equal(mgmt->da, myid(&padapter->eeprompriv)) &&
- !is_broadcast_ether_addr(mgmt->da))
- return;
-
- ptable = mlme_sta_tbl;
-
- stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
- index = stype >> 4;
-
- if (index > 13) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "Currently we do not support reserved sub-fr-type =%d\n",
- index);
- return;
- }
- ptable += index;
-
- psta = rtw_get_stainfo23a(&padapter->stapriv, mgmt->sa);
-
- if (psta) {
- if (ieee80211_has_retry(mgmt->frame_control)) {
- if (precv_frame->attrib.seq_num ==
- psta->RxMgmtFrameSeqNum) {
- /* drop the duplicate management frame */
- DBG_8723A("Drop duplicate management frame "
- "with seq_num = %d.\n",
- precv_frame->attrib.seq_num);
- return;
- }
- }
- psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num;
- }
-
-#ifdef CONFIG_8723AU_AP_MODE
- switch (stype) {
- case IEEE80211_STYPE_AUTH:
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- ptable->func = &OnAuth23a;
- else
- ptable->func = &OnAuth23aClient23a;
- /* pass through */
- case IEEE80211_STYPE_ASSOC_REQ:
- case IEEE80211_STYPE_REASSOC_REQ:
- _mgt_dispatcher23a(padapter, ptable, precv_frame);
- break;
- case IEEE80211_STYPE_PROBE_REQ:
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- _mgt_dispatcher23a(padapter, ptable, precv_frame);
- else
- _mgt_dispatcher23a(padapter, ptable, precv_frame);
- break;
- case IEEE80211_STYPE_BEACON:
- _mgt_dispatcher23a(padapter, ptable, precv_frame);
- break;
- case IEEE80211_STYPE_ACTION:
- /* if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) */
- _mgt_dispatcher23a(padapter, ptable, precv_frame);
- break;
- default:
- _mgt_dispatcher23a(padapter, ptable, precv_frame);
- break;
- }
-#else
- _mgt_dispatcher23a(padapter, ptable, precv_frame);
-#endif
-}
-
-/****************************************************************************
-
-Following are the callback functions for each subtype of the management frames
-
-*****************************************************************************/
-
-static int
-OnProbeReq23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- const u8 *ie;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur = &pmlmeinfo->network;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
- int len = skb->len;
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- return _SUCCESS;
-
- if (!check_fwstate(pmlmepriv, _FW_LINKED) &&
- !check_fwstate(pmlmepriv,
- WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE))
- return _SUCCESS;
-
- if (unlikely(!ieee80211_is_probe_req(mgmt->frame_control))) {
- printk(KERN_WARNING "%s: Received non probe request frame\n",
- __func__);
- return _FAIL;
- }
-
- len -= offsetof(struct ieee80211_mgmt, u.probe_req.variable);
-
- ie = cfg80211_find_ie(WLAN_EID_SSID, mgmt->u.probe_req.variable, len);
-
- /* check (wildcard) SSID */
- if (!ie)
- goto out;
-
- if ((ie[1] && memcmp(ie + 2, cur->Ssid.ssid, cur->Ssid.ssid_len)) ||
- (ie[1] == 0 && pmlmeinfo->hidden_ssid_mode)) {
- return _SUCCESS;
- }
-
- if (check_fwstate(pmlmepriv, _FW_LINKED) &&
- pmlmepriv->cur_network.join_res)
- issue_probersp(padapter, mgmt->sa);
-
-out:
- return _SUCCESS;
-}
-
-static int
-OnProbeRsp23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
- report_survey_event23a(padapter, precv_frame);
- return _SUCCESS;
- }
-
- return _SUCCESS;
-}
-
-static int
-OnBeacon23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- int cam_idx;
- struct sta_info *psta;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
- int pkt_len = skb->len;
- struct wlan_bssid_ex *pbss;
- int ret = _SUCCESS;
- u8 *p, *pie;
- int pie_len;
- u32 ielen = 0;
-
- pie = mgmt->u.beacon.variable;
- pie_len = pkt_len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
- p = rtw_get_ie23a(pie, WLAN_EID_EXT_SUPP_RATES, &ielen, pie_len);
- if (p && ielen > 0) {
- if (p[1 + ielen] == 0x2D && p[2 + ielen] != 0x2D) {
- /* Invalid value 0x2D is detected in Extended Supported
- * Rates (ESR) IE. Try to fix the IE length to avoid
- * failed Beacon parsing.
- */
- DBG_8723A("[WIFIDBG] Error in ESR IE is detected in "
- "Beacon of BSSID: %pM. Fix the length of "
- "ESR IE to avoid failed Beacon parsing.\n",
- mgmt->bssid);
- p[1] = ielen - 1;
- }
- }
-
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
- report_survey_event23a(padapter, precv_frame);
- return _SUCCESS;
- }
-
- if (!ether_addr_equal(mgmt->bssid,
- get_my_bssid23a(&pmlmeinfo->network)))
- goto out;
-
- if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
- /* we should update current network before auth,
- or some IE is wrong */
- pbss = collect_bss_info(padapter, precv_frame);
- if (pbss) {
- update_network23a(&pmlmepriv->cur_network.network, pbss,
- padapter, true);
- rtw_get_bcn_info23a(&pmlmepriv->cur_network);
- kfree(pbss);
- }
-
- /* check the vendor of the assoc AP */
- pmlmeinfo->assoc_AP_vendor =
- check_assoc_AP23a((u8 *)&mgmt->u.beacon, pkt_len -
- offsetof(struct ieee80211_mgmt, u));
-
- /* update TSF Value */
- rtw_update_TSF(pmlmeext, mgmt);
-
- /* start auth */
- start_clnt_auth(padapter);
-
- return _SUCCESS;
- }
-
- if (((pmlmeinfo->state & 0x03) == MSR_AP) &&
- (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) {
- psta = rtw_get_stainfo23a(pstapriv, mgmt->sa);
- if (psta) {
- ret = rtw_check_bcn_info23a(padapter, mgmt, pkt_len);
- if (ret != _SUCCESS) {
- DBG_8723A_LEVEL(_drv_always_, "ap has changed, "
- "disconnect now\n");
- receive_disconnect23a(padapter, pmlmeinfo->network.MacAddress, 65535);
- return _SUCCESS;
- }
- /* update WMM, ERP in the beacon */
- /* todo: the timer is used instead of
- the number of the beacon received */
- if ((sta_rx_pkts(psta) & 0xf) == 0) {
- /* DBG_8723A("update_bcn_info\n"); */
- update_beacon23a_info(padapter, mgmt,
- pkt_len, psta);
- }
- }
- } else if ((pmlmeinfo->state&0x03) == MSR_ADHOC) {
- psta = rtw_get_stainfo23a(pstapriv, mgmt->sa);
- if (psta) {
- /* update WMM, ERP in the beacon */
- /* todo: the timer is used instead of the
- number of the beacon received */
- if ((sta_rx_pkts(psta) & 0xf) == 0) {
- /* DBG_8723A("update_bcn_info\n"); */
- update_beacon23a_info(padapter, mgmt,
- pkt_len, psta);
- }
- } else {
- /* allocate a new CAM entry for IBSS station */
- cam_idx = allocate_fw_sta_entry23a(padapter);
- if (cam_idx == NUM_STA)
- goto out;
-
- /* get supported rate */
- if (update_sta_support_rate23a(padapter, pie, pie_len,
- cam_idx) == _FAIL) {
- pmlmeinfo->FW_sta_info[cam_idx].status = 0;
- goto out;
- }
-
- /* update TSF Value */
- rtw_update_TSF(pmlmeext, mgmt);
-
- /* report sta add event */
- report_add_sta_event23a(padapter, mgmt->sa,
- cam_idx);
- }
- }
-
-out:
-
- return _SUCCESS;
-}
-
-#ifdef CONFIG_8723AU_AP_MODE
-static int
-OnAuth23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- static struct sta_info stat;
- struct sta_info *pstat = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
- u8 *pframe;
- const u8 *p;
- unsigned char *sa;
- u16 auth_mode, seq, algorithm;
- int status, len = skb->len;
-
- if ((pmlmeinfo->state & 0x03) != MSR_AP)
- return _FAIL;
-
- DBG_8723A("+OnAuth23a\n");
-
- sa = mgmt->sa;
-
- auth_mode = psecuritypriv->dot11AuthAlgrthm;
-
- pframe = mgmt->u.auth.variable;
- len = skb->len - offsetof(struct ieee80211_mgmt, u.auth.variable);
-
- seq = le16_to_cpu(mgmt->u.auth.auth_transaction);
- algorithm = le16_to_cpu(mgmt->u.auth.auth_alg);
-
- DBG_8723A("auth alg =%x, seq =%X\n", algorithm, seq);
-
- if (auth_mode == 2 &&
- psecuritypriv->dot11PrivacyAlgrthm != WLAN_CIPHER_SUITE_WEP40 &&
- psecuritypriv->dot11PrivacyAlgrthm != WLAN_CIPHER_SUITE_WEP104)
- auth_mode = 0;
-
- /* rx a shared-key auth but shared not enabled, or */
- /* rx a open-system auth but shared-key is enabled */
- if ((algorithm != WLAN_AUTH_OPEN && auth_mode == 0) ||
- (algorithm == WLAN_AUTH_OPEN && auth_mode == 1)) {
- DBG_8723A("auth rejected due to bad alg [alg =%d, auth_mib "
- "=%d] %02X%02X%02X%02X%02X%02X\n",
- algorithm, auth_mode,
- sa[0], sa[1], sa[2], sa[3], sa[4], sa[5]);
-
- status = WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
-
- goto auth_fail;
- }
-
- if (rtw_access_ctrl23a(padapter, sa) == false) {
- status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
- goto auth_fail;
- }
-
- pstat = rtw_get_stainfo23a(pstapriv, sa);
- if (!pstat) {
- /* allocate a new one */
- DBG_8723A("going to alloc stainfo for sa =%pM\n", sa);
- pstat = rtw_alloc_stainfo23a(pstapriv, sa, GFP_ATOMIC);
- if (!pstat) {
- DBG_8723A(" Exceed the upper limit of supported "
- "clients...\n");
- status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
- goto auth_fail;
- }
-
- pstat->state = WIFI_FW_AUTH_NULL;
- pstat->auth_seq = 0;
-
- /* pstat->flags = 0; */
- /* pstat->capability = 0; */
- } else {
- spin_lock_bh(&pstapriv->asoc_list_lock);
- if (!list_empty(&pstat->asoc_list)) {
- list_del_init(&pstat->asoc_list);
- pstapriv->asoc_list_cnt--;
- if (pstat->expire_to > 0) {
- /* TODO: STA re_auth within expire_to */
- }
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- if (seq == 1) {
- /* TODO: STA re_auth and auth timeout */
- }
- }
-
- spin_lock_bh(&pstapriv->auth_list_lock);
- if (list_empty(&pstat->auth_list)) {
- list_add_tail(&pstat->auth_list, &pstapriv->auth_list);
- pstapriv->auth_list_cnt++;
- }
- spin_unlock_bh(&pstapriv->auth_list_lock);
-
- if (pstat->auth_seq == 0)
- pstat->expire_to = pstapriv->auth_to;
-
- if ((pstat->auth_seq + 1) != seq) {
- DBG_8723A("(1)auth rejected because out of seq [rx_seq =%d, "
- "exp_seq =%d]!\n", seq, pstat->auth_seq+1);
- status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
- goto auth_fail;
- }
-
- if (algorithm == WLAN_AUTH_OPEN && (auth_mode == 0 || auth_mode == 2)) {
- if (seq == 1) {
- pstat->state &= ~WIFI_FW_AUTH_NULL;
- pstat->state |= WIFI_FW_AUTH_SUCCESS;
- pstat->expire_to = pstapriv->assoc_to;
- pstat->authalg = algorithm;
- } else {
- DBG_8723A("(2)auth rejected because out of seq "
- "[rx_seq =%d, exp_seq =%d]!\n",
- seq, pstat->auth_seq+1);
- status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
- goto auth_fail;
- }
- } else { /* shared system or auto authentication */
- if (seq == 1) {
- /* prepare for the challenging txt... */
- pstat->state &= ~WIFI_FW_AUTH_NULL;
- pstat->state |= WIFI_FW_AUTH_STATE;
- pstat->authalg = algorithm;
- pstat->auth_seq = 2;
- } else if (seq == 3) {
- /* checking for challenging txt... */
- DBG_8723A("checking for challenging txt...\n");
-
- p = cfg80211_find_ie(WLAN_EID_CHALLENGE, pframe, len);
- if (!p || p[1] <= 0) {
- DBG_8723A("auth rejected because challenge "
- "failure!(1)\n");
- status = WLAN_STATUS_CHALLENGE_FAIL;
- goto auth_fail;
- }
-
- if (!memcmp(p + 2, pstat->chg_txt, 128)) {
- pstat->state &= ~WIFI_FW_AUTH_STATE;
- pstat->state |= WIFI_FW_AUTH_SUCCESS;
- /* challenging txt is correct... */
- pstat->expire_to = pstapriv->assoc_to;
- } else {
- DBG_8723A("auth rejected because challenge "
- "failure!\n");
- status = WLAN_STATUS_CHALLENGE_FAIL;
- goto auth_fail;
- }
- } else {
- DBG_8723A("(3)auth rejected because out of seq "
- "[rx_seq =%d, exp_seq =%d]!\n",
- seq, pstat->auth_seq+1);
- status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
- goto auth_fail;
- }
- }
-
- /* Now, we are going to issue_auth... */
- pstat->auth_seq = seq + 1;
-
- issue_auth(padapter, pstat, WLAN_STATUS_SUCCESS);
-
- if (pstat->state & WIFI_FW_AUTH_SUCCESS)
- pstat->auth_seq = 0;
-
- return _SUCCESS;
-
-auth_fail:
-
- if (pstat)
- rtw_free_stainfo23a(padapter, pstat);
-
- pstat = &stat;
- memset((char *)pstat, '\0', sizeof(stat));
- pstat->auth_seq = 2;
- ether_addr_copy(pstat->hwaddr, sa);
-
- issue_auth(padapter, pstat, (unsigned short)status);
-
- return _FAIL;
-}
-#endif
-
-static int
-OnAuth23aClient23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- unsigned int seq, status, algthm;
- unsigned int go2asoc = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
- const u8 *p;
- u8 *pie;
- int plen = skb->len;
-
- DBG_8723A("%s\n", __func__);
-
- /* check A1 matches or not */
- if (!ether_addr_equal(myid(&padapter->eeprompriv), mgmt->da))
- return _SUCCESS;
-
- if (!(pmlmeinfo->state & WIFI_FW_AUTH_STATE))
- return _SUCCESS;
-
- pie = mgmt->u.auth.variable;
- plen -= offsetof(struct ieee80211_mgmt, u.auth.variable);
-
- algthm = le16_to_cpu(mgmt->u.auth.auth_alg);
- seq = le16_to_cpu(mgmt->u.auth.auth_transaction);
- status = le16_to_cpu(mgmt->u.auth.status_code);
-
- if (status) {
- DBG_8723A("clnt auth fail, status: %d\n", status);
- /* pmlmeinfo->auth_algo == dot11AuthAlgrthm_Auto) */
- if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
- if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
- pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
- else
- pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared;
- /* pmlmeinfo->reauth_count = 0; */
- }
-
- set_link_timer(pmlmeext, 1);
- goto authclnt_fail;
- }
-
- if (seq == 2) {
- if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
- /* legendary shared system */
- p = cfg80211_find_ie(WLAN_EID_CHALLENGE, pie, plen);
-
- if (!p) {
- /* DBG_8723A("marc: no challenge text?\n"); */
- goto authclnt_fail;
- }
-
- memcpy((void *)(pmlmeinfo->chg_txt), p + 2, p[1]);
- pmlmeinfo->auth_seq = 3;
- issue_auth(padapter, NULL, 0);
- set_link_timer(pmlmeext, REAUTH_TO);
-
- return _SUCCESS;
- } else {
- /* open system */
- go2asoc = 1;
- }
- } else if (seq == 4) {
- if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
- go2asoc = 1;
- else
- goto authclnt_fail;
- } else {
- /* this is also illegal */
- /* DBG_8723A("marc: clnt auth failed due to illegal seq =%x\n",
- seq); */
- goto authclnt_fail;
- }
-
- if (go2asoc) {
- DBG_8723A_LEVEL(_drv_always_, "auth success, start assoc\n");
- start_clnt_assoc(padapter);
- return _SUCCESS;
- }
-
-authclnt_fail:
-
- /* pmlmeinfo->state &= ~(WIFI_FW_AUTH_STATE); */
-
- return _FAIL;
-}
-
-#ifdef CONFIG_8723AU_AP_MODE
-static int rtw_validate_vendor_specific_ies(const u8 *pos, int elen)
-{
- unsigned int oui;
-
- /* first 3 bytes in vendor specific information element are the IEEE
- * OUI of the vendor. The following byte is used a vendor specific
- * sub-type. */
- if (elen < 4) {
- DBG_8723A("short vendor specific information element "
- "ignored (len =%i)\n", elen);
- return -EINVAL;
- }
-
- oui = RTW_GET_BE24(pos);
- switch (oui) {
- case WLAN_OUI_MICROSOFT:
- /* Microsoft/Wi-Fi information elements are further typed and
- * subtyped */
- switch (pos[3]) {
- case WLAN_OUI_TYPE_MICROSOFT_WPA:
- /* Microsoft OUI (00:50:F2) with OUI Type 1:
- * real WPA information element */
- break;
- case WLAN_OUI_TYPE_MICROSOFT_WMM:
- if (elen < 5) {
- DBG_8723A("short WME information element "
- "ignored (len =%i)\n", elen);
- return -EINVAL;
- }
- switch (pos[4]) {
- case WME_OUI_SUBTYPE_INFORMATION_ELEMENT:
- case WME_OUI_SUBTYPE_PARAMETER_ELEMENT:
- break;
- case WME_OUI_SUBTYPE_TSPEC_ELEMENT:
- break;
- default:
- DBG_8723A("unknown WME information element "
- "ignored (subtype =%d len =%i)\n",
- pos[4], elen);
- return -EINVAL;
- }
- break;
- case WLAN_OUI_TYPE_MICROSOFT_WPS:
- /* Wi-Fi Protected Setup (WPS) IE */
- break;
- default:
- DBG_8723A("Unknown Microsoft information element "
- "ignored (type =%d len =%i)\n",
- pos[3], elen);
- return -EINVAL;
- }
- break;
-
- case OUI_BROADCOM:
- switch (pos[3]) {
- case VENDOR_HT_CAPAB_OUI_TYPE:
- break;
- default:
- DBG_8723A("Unknown Broadcom information element "
- "ignored (type =%d len =%i)\n", pos[3], elen);
- return -EINVAL;
- }
- break;
-
- default:
- DBG_8723A("unknown vendor specific information element "
- "ignored (vendor OUI %02x:%02x:%02x len =%i)\n",
- pos[0], pos[1], pos[2], elen);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int rtw_validate_frame_ies(const u8 *start, uint len)
-{
- const u8 *pos = start;
- int left = len;
- int unknown = 0;
-
- while (left >= 2) {
- u8 id, elen;
-
- id = *pos++;
- elen = *pos++;
- left -= 2;
-
- if (elen > left) {
- DBG_8723A("%s: IEEE 802.11 failed (id =%d elen =%d "
- "left =%i)\n", __func__, id, elen, left);
- return -EINVAL;
- }
-
- switch (id) {
- case WLAN_EID_SSID:
- case WLAN_EID_SUPP_RATES:
- case WLAN_EID_FH_PARAMS:
- case WLAN_EID_DS_PARAMS:
- case WLAN_EID_CF_PARAMS:
- case WLAN_EID_TIM:
- case WLAN_EID_IBSS_PARAMS:
- case WLAN_EID_CHALLENGE:
- case WLAN_EID_ERP_INFO:
- case WLAN_EID_EXT_SUPP_RATES:
- break;
- case WLAN_EID_VENDOR_SPECIFIC:
- if (rtw_validate_vendor_specific_ies(pos, elen))
- unknown++;
- break;
- case WLAN_EID_RSN:
- case WLAN_EID_PWR_CAPABILITY:
- case WLAN_EID_SUPPORTED_CHANNELS:
- case WLAN_EID_MOBILITY_DOMAIN:
- case WLAN_EID_FAST_BSS_TRANSITION:
- case WLAN_EID_TIMEOUT_INTERVAL:
- case WLAN_EID_HT_CAPABILITY:
- case WLAN_EID_HT_OPERATION:
- default:
- unknown++;
- DBG_8723A("%s IEEE 802.11 ignored unknown element "
- "(id =%d elen =%d)\n", __func__, id, elen);
- break;
- }
-
- left -= elen;
- pos += elen;
- }
-
- if (left)
- return -EINVAL;
-
- return 0;
-}
-#endif
-
-static int
-OnAssocReq23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
-#ifdef CONFIG_8723AU_AP_MODE
- u16 capab_info, listen_interval;
- struct sta_info *pstat;
- unsigned char reassoc;
- int i, wpa_ie_len, left;
- unsigned char supportRate[16];
- int supportRateNum;
- unsigned short status = WLAN_STATUS_SUCCESS;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur = &pmlmeinfo->network;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
- const u8 *pos, *p, *wpa_ie, *wps_ie;
- u8 *pframe = skb->data;
- uint pkt_len = skb->len;
- int r;
-
- if ((pmlmeinfo->state & 0x03) != MSR_AP)
- return _FAIL;
-
- left = pkt_len - sizeof(struct ieee80211_hdr_3addr);
- if (ieee80211_is_assoc_req(mgmt->frame_control)) {
- reassoc = 0;
- pos = mgmt->u.assoc_req.variable;
- left -= offsetof(struct ieee80211_mgmt, u.assoc_req.variable);
- } else { /* WIFI_REASSOCREQ */
- reassoc = 1;
- pos = mgmt->u.reassoc_req.variable;
- left -= offsetof(struct ieee80211_mgmt, u.reassoc_req.variable);
- }
-
- if (left < 0) {
- DBG_8723A("handle_assoc(reassoc =%d) - too short payload "
- "(len =%lu)\n", reassoc, (unsigned long)pkt_len);
- return _FAIL;
- }
-
- pstat = rtw_get_stainfo23a(pstapriv, mgmt->sa);
- if (!pstat) {
- status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
- goto asoc_class2_error;
- }
-
- /* These two are located at the same offsets whether it's an
- * assoc_req or a reassoc_req */
- capab_info = get_unaligned_le16(&mgmt->u.assoc_req.capab_info);
- listen_interval =
- get_unaligned_le16(&mgmt->u.assoc_req.listen_interval);
-
- DBG_8723A("%s\n", __func__);
-
- /* check if this stat has been successfully authenticated/assocated */
- if (!(pstat->state & WIFI_FW_AUTH_SUCCESS)) {
- if (!(pstat->state & WIFI_FW_ASSOC_SUCCESS)) {
- status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
- goto asoc_class2_error;
- } else {
- pstat->state &= (~WIFI_FW_ASSOC_SUCCESS);
- pstat->state |= WIFI_FW_ASSOC_STATE;
- }
- } else {
- pstat->state &= (~WIFI_FW_AUTH_SUCCESS);
- pstat->state |= WIFI_FW_ASSOC_STATE;
- }
-
- pstat->capability = capab_info;
-
- /* now parse all ieee802_11 ie to point to elems */
-
- if (rtw_validate_frame_ies(pos, left)) {
- DBG_8723A("STA %pM sent invalid association request\n",
- pstat->hwaddr);
- status = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto OnAssocReq23aFail;
- }
-
- /* now we should check all the fields... */
- /* checking SSID */
- p = cfg80211_find_ie(WLAN_EID_SSID, pos, left);
- if (!p || p[1] == 0) {
- /* broadcast ssid, however it is not allowed in assocreq */
- DBG_8723A("STA %pM sent invalid association request lacking an SSID\n",
- pstat->hwaddr);
- status = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto OnAssocReq23aFail;
- } else {
- /* check if ssid match */
- if (memcmp(p + 2, cur->Ssid.ssid, cur->Ssid.ssid_len))
- status = WLAN_STATUS_UNSPECIFIED_FAILURE;
-
- if (p[1] != cur->Ssid.ssid_len)
- status = WLAN_STATUS_UNSPECIFIED_FAILURE;
- }
-
- if (status != WLAN_STATUS_SUCCESS)
- goto OnAssocReq23aFail;
-
- /* check if the supported rate is ok */
- p = cfg80211_find_ie(WLAN_EID_SUPP_RATES, pos, left);
- if (!p) {
- DBG_8723A("Rx a sta assoc-req which supported rate is "
- "empty!\n");
- /* use our own rate set as statoin used */
- /* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
- /* supportRateNum = AP_BSSRATE_LEN; */
-
- status = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto OnAssocReq23aFail;
- } else {
- memcpy(supportRate, p + 2, p[1]);
- supportRateNum = p[1];
-
- p = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, pos, left);
- if (p) {
- if (supportRateNum <= sizeof(supportRate)) {
- memcpy(supportRate+supportRateNum, p + 2, p[1]);
- supportRateNum += p[1];
- }
- }
- }
-
- /* todo: mask supportRate between AP & STA -> move to update raid */
- /* get_matched_rate(pmlmeext, supportRate, &supportRateNum, 0); */
-
- /* update station supportRate */
- pstat->bssratelen = supportRateNum;
- memcpy(pstat->bssrateset, supportRate, supportRateNum);
- Update23aTblForSoftAP(pstat->bssrateset, pstat->bssratelen);
-
- /* check RSN/WPA/WPS */
- pstat->dot8021xalg = 0;
- pstat->wpa_psk = 0;
- pstat->wpa_group_cipher = 0;
- pstat->wpa2_group_cipher = 0;
- pstat->wpa_pairwise_cipher = 0;
- pstat->wpa2_pairwise_cipher = 0;
- memset(pstat->wpa_ie, 0, sizeof(pstat->wpa_ie));
-
- wpa_ie = cfg80211_find_ie(WLAN_EID_RSN, pos, left);
- if (!wpa_ie)
- wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPA,
- pos, left);
- if (wpa_ie) {
- int group_cipher = 0, pairwise_cipher = 0;
-
- wpa_ie_len = wpa_ie[1];
- if (psecuritypriv->wpa_psk & BIT(1)) {
- r = rtw_parse_wpa2_ie23a(wpa_ie, wpa_ie_len + 2,
- &group_cipher,
- &pairwise_cipher, NULL);
- if (r == _SUCCESS) {
- pstat->dot8021xalg = 1;/* psk, todo:802.1x */
- pstat->wpa_psk |= BIT(1);
-
- pstat->wpa2_group_cipher = group_cipher &
- psecuritypriv->wpa2_group_cipher;
- pstat->wpa2_pairwise_cipher = pairwise_cipher &
- psecuritypriv->wpa2_pairwise_cipher;
- } else
- status = WLAN_STATUS_INVALID_IE;
- } else if (psecuritypriv->wpa_psk & BIT(0)) {
- r = rtw_parse_wpa_ie23a(wpa_ie, wpa_ie_len + 2,
- &group_cipher, &pairwise_cipher,
- NULL);
- if (r == _SUCCESS) {
- pstat->dot8021xalg = 1;/* psk, todo:802.1x */
- pstat->wpa_psk |= BIT(0);
-
- pstat->wpa_group_cipher = group_cipher &
- psecuritypriv->wpa_group_cipher;
- pstat->wpa_pairwise_cipher = pairwise_cipher &
- psecuritypriv->wpa_pairwise_cipher;
- } else
- status = WLAN_STATUS_INVALID_IE;
- } else {
- wpa_ie = NULL;
- wpa_ie_len = 0;
- }
- if (wpa_ie && status == WLAN_STATUS_SUCCESS) {
- if (!pstat->wpa_group_cipher)
- status = WLAN_STATUS_INVALID_GROUP_CIPHER;
-
- if (!pstat->wpa_pairwise_cipher)
- status = WLAN_STATUS_INVALID_PAIRWISE_CIPHER;
- }
- }
-
- if (status != WLAN_STATUS_SUCCESS)
- goto OnAssocReq23aFail;
-
- pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
-
- wps_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPS,
- pos, left);
-
- if (!wpa_ie) {
- if (wps_ie) {
- DBG_8723A("STA included WPS IE in (Re)Association "
- "Request - assume WPS is used\n");
- pstat->flags |= WLAN_STA_WPS;
- } else {
- DBG_8723A("STA did not include WPA/RSN IE in (Re)"
- "Association Request - possible WPS use\n");
- pstat->flags |= WLAN_STA_MAYBE_WPS;
- }
- } else {
- int copy_len;
-
- if (psecuritypriv->wpa_psk == 0) {
- DBG_8723A("STA %pM: WPA/RSN IE in association request, but AP don't support WPA/RSN\n",
- pstat->hwaddr);
-
- status = WLAN_STATUS_INVALID_IE;
-
- goto OnAssocReq23aFail;
- }
-
- if (wps_ie) {
- DBG_8723A("STA included WPS IE in (Re)Association "
- "Request - WPS is used\n");
- pstat->flags |= WLAN_STA_WPS;
- copy_len = 0;
- } else {
- copy_len = ((wpa_ie_len + 2) > sizeof(pstat->wpa_ie)) ?
- sizeof(pstat->wpa_ie) : (wpa_ie_len + 2);
- }
-
- if (copy_len > 0)
- memcpy(pstat->wpa_ie, wpa_ie - 2, copy_len);
- }
-
- /* check if there is WMM IE & support WWM-PS */
- pstat->flags &= ~WLAN_STA_WME;
- pstat->qos_option = 0;
- pstat->qos_info = 0;
- pstat->has_legacy_ac = true;
- pstat->uapsd_vo = 0;
- pstat->uapsd_vi = 0;
- pstat->uapsd_be = 0;
- pstat->uapsd_bk = 0;
- if (pmlmepriv->qos_option) {
- const u8 *end = pos + left;
-
- p = pos;
-
- for (;;) {
- left = end - p;
- p = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WMM,
- p, left);
- if (p) {
- pstat->flags |= WLAN_STA_WME;
-
- pstat->qos_option = 1;
- pstat->qos_info = *(p + 8);
-
- pstat->max_sp_len =
- (pstat->qos_info >> 5) & 0x3;
-
- if ((pstat->qos_info & 0xf) != 0xf)
- pstat->has_legacy_ac = true;
- else
- pstat->has_legacy_ac = false;
-
- if (pstat->qos_info & 0xf) {
- if (pstat->qos_info & BIT(0))
- pstat->uapsd_vo = BIT(0)|BIT(1);
- else
- pstat->uapsd_vo = 0;
-
- if (pstat->qos_info & BIT(1))
- pstat->uapsd_vi = BIT(0)|BIT(1);
- else
- pstat->uapsd_vi = 0;
-
- if (pstat->qos_info & BIT(2))
- pstat->uapsd_bk = BIT(0)|BIT(1);
- else
- pstat->uapsd_bk = 0;
-
- if (pstat->qos_info & BIT(3))
- pstat->uapsd_be = BIT(0)|BIT(1);
- else
- pstat->uapsd_be = 0;
-
- break;
- }
- } else {
- break;
- }
- p = p + p[1] + 2;
- }
- }
-
- /* save HT capabilities in the sta object */
- memset(&pstat->htpriv.ht_cap, 0, sizeof(struct ieee80211_ht_cap));
- p = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, pos, left);
-
- if (p && p[1] >= sizeof(struct ieee80211_ht_cap)) {
- pstat->flags |= WLAN_STA_HT;
-
- pstat->flags |= WLAN_STA_WME;
-
- memcpy(&pstat->htpriv.ht_cap, p + 2,
- sizeof(struct ieee80211_ht_cap));
- } else
- pstat->flags &= ~WLAN_STA_HT;
-
- if (!pmlmepriv->htpriv.ht_option && pstat->flags & WLAN_STA_HT){
- status = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto OnAssocReq23aFail;
- }
-
- if (pstat->flags & WLAN_STA_HT &&
- (pstat->wpa2_pairwise_cipher & WPA_CIPHER_TKIP ||
- pstat->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
- DBG_8723A("HT: %pM tried to use TKIP with HT association\n",
- pstat->hwaddr);
-
- /* status = WLAN_STATUS_CIPHER_REJECTED_PER_POLICY; */
- /* goto OnAssocReq23aFail; */
- }
-
- pstat->flags |= WLAN_STA_NONERP;
- for (i = 0; i < pstat->bssratelen; i++) {
- if ((pstat->bssrateset[i] & 0x7f) > 22) {
- pstat->flags &= ~WLAN_STA_NONERP;
- break;
- }
- }
-
- if (pstat->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
- pstat->flags |= WLAN_STA_SHORT_PREAMBLE;
- else
- pstat->flags &= ~WLAN_STA_SHORT_PREAMBLE;
-
- if (status != WLAN_STATUS_SUCCESS)
- goto OnAssocReq23aFail;
-
- /* TODO: identify_proprietary_vendor_ie(); */
- /* Realtek proprietary IE */
- /* identify if this is Broadcom sta */
- /* identify if this is ralink sta */
- /* Customer proprietary IE */
-
- /* get a unique AID */
- if (pstat->aid > 0) {
- DBG_8723A(" old AID %d\n", pstat->aid);
- } else {
- for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
- if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
- break;
-
- if (pstat->aid > NUM_STA)
- pstat->aid = NUM_STA;
- if (pstat->aid > pstapriv->max_num_sta) {
-
- pstat->aid = 0;
-
- DBG_8723A(" no room for more AIDs\n");
-
- status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
-
- goto OnAssocReq23aFail;
- } else {
- pstapriv->sta_aid[pstat->aid - 1] = pstat;
- DBG_8723A("allocate new AID = (%d)\n", pstat->aid);
- }
- }
-
- pstat->state &= ~WIFI_FW_ASSOC_STATE;
- pstat->state |= WIFI_FW_ASSOC_SUCCESS;
-
- spin_lock_bh(&pstapriv->auth_list_lock);
- if (!list_empty(&pstat->auth_list)) {
- list_del_init(&pstat->auth_list);
- pstapriv->auth_list_cnt--;
- }
- spin_unlock_bh(&pstapriv->auth_list_lock);
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- if (list_empty(&pstat->asoc_list)) {
- pstat->expire_to = pstapriv->expire_to;
- list_add_tail(&pstat->asoc_list, &pstapriv->asoc_list);
- pstapriv->asoc_list_cnt++;
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- /* now the station is qualified to join our BSS... */
- if (pstat->state & WIFI_FW_ASSOC_SUCCESS &&
- status == WLAN_STATUS_SUCCESS) {
- /* 1 bss_cap_update & sta_info_update23a */
- bss_cap_update_on_sta_join23a(padapter, pstat);
- sta_info_update23a(padapter, pstat);
-
- /* issue assoc rsp before notify station join event. */
- if (ieee80211_is_assoc_req(mgmt->frame_control))
- issue_assocrsp(padapter, status, pstat,
- IEEE80211_STYPE_ASSOC_RESP);
- else
- issue_assocrsp(padapter, status, pstat,
- IEEE80211_STYPE_REASSOC_RESP);
-
- /* 2 - report to upper layer */
- DBG_8723A("indicate_sta_join_event to upper layer - hostapd\n");
- rtw_cfg80211_indicate_sta_assoc(padapter, pframe, pkt_len);
-
- /* 3-(1) report sta add event */
- report_add_sta_event23a(padapter, pstat->hwaddr, pstat->aid);
- }
-
- return _SUCCESS;
-
-asoc_class2_error:
-
- issue_deauth23a(padapter, mgmt->sa, status);
- return _FAIL;
-
-OnAssocReq23aFail:
-
- pstat->aid = 0;
- if (ieee80211_is_assoc_req(mgmt->frame_control))
- issue_assocrsp(padapter, status, pstat,
- IEEE80211_STYPE_ASSOC_RESP);
- else
- issue_assocrsp(padapter, status, pstat,
- IEEE80211_STYPE_REASSOC_RESP);
-
-#endif /* CONFIG_8723AU_AP_MODE */
-
- return _FAIL;
-}
-
-static int
-OnAssocRsp23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *pmgmt = (struct ieee80211_mgmt *) skb->data;
- int res;
- unsigned short status;
- const u8 *p, *pie;
- u8 *pframe = skb->data;
- int pkt_len = skb->len;
- int pielen;
-
- DBG_8723A("%s\n", __func__);
-
- /* check A1 matches or not */
- if (!ether_addr_equal(myid(&padapter->eeprompriv), pmgmt->da))
- return _SUCCESS;
-
- if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)))
- return _SUCCESS;
-
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
- return _SUCCESS;
-
- del_timer_sync(&pmlmeext->link_timer);
-
- /* status */
- status = le16_to_cpu(pmgmt->u.assoc_resp.status_code);
- if (status > 0) {
- DBG_8723A("assoc reject, status code: %d\n", status);
- pmlmeinfo->state = MSR_NOLINK;
- res = -4;
- goto report_assoc_result;
- }
-
- /* get capabilities */
- pmlmeinfo->capability = le16_to_cpu(pmgmt->u.assoc_resp.capab_info);
-
- /* set slot time */
- pmlmeinfo->slotTime = (pmlmeinfo->capability & BIT(10))? 9: 20;
-
- /* AID */
- res = pmlmeinfo->aid = le16_to_cpu(pmgmt->u.assoc_resp.aid) & 0x3fff;
-
- pie = pframe + offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
- pielen = pkt_len -
- offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
-
- p = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY,
- pmgmt->u.assoc_resp.variable, pielen);
- if (p && p[1])
- HT_caps_handler23a(padapter, p);
-
- p = cfg80211_find_ie(WLAN_EID_HT_OPERATION,
- pmgmt->u.assoc_resp.variable, pielen);
- if (p && p[1])
- HT_info_handler23a(padapter, p);
-
- p = cfg80211_find_ie(WLAN_EID_ERP_INFO,
- pmgmt->u.assoc_resp.variable, pielen);
- if (p && p[1])
- ERP_IE_handler23a(padapter, p);
-
- pie = pframe + offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
- while (true) {
- p = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WMM,
- pie, pframe + pkt_len - pie);
- if (!p)
- break;
-
- pie = p + p[1] + 2;
- /* if this IE is too short, try the next */
- if (p[1] <= 4)
- continue;
- /* if this IE is WMM params, we found what we wanted */
- if (p[6] == 1)
- break;
- }
-
- if (p && p[1])
- WMM_param_handler23a(padapter, p);
-
- pmlmeinfo->state &= ~WIFI_FW_ASSOC_STATE;
- pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
-
- /* Update Basic Rate Table for spec, 2010-12-28 , by thomas */
- UpdateBrateTbl23a(padapter, pmlmeinfo->network.SupportedRates);
-
-report_assoc_result:
- pmlmepriv->assoc_rsp_len = 0;
- if (res > 0) {
- kfree(pmlmepriv->assoc_rsp);
- pmlmepriv->assoc_rsp = kmalloc(pkt_len, GFP_ATOMIC);
- if (pmlmepriv->assoc_rsp) {
- memcpy(pmlmepriv->assoc_rsp, pframe, pkt_len);
- pmlmepriv->assoc_rsp_len = pkt_len;
- }
- } else
- kfree(pmlmepriv->assoc_rsp);
-
- report_join_res23a(padapter, res);
-
- return _SUCCESS;
-}
-
-static int
-OnDeAuth23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- unsigned short reason;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
-
- if (!ether_addr_equal(mgmt->bssid,
- get_my_bssid23a(&pmlmeinfo->network)))
- return _SUCCESS;
-
- reason = le16_to_cpu(mgmt->u.deauth.reason_code);
-
- DBG_8723A("%s Reason code(%d)\n", __func__, reason);
-
-#ifdef CONFIG_8723AU_AP_MODE
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- struct sta_info *psta;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- DBG_8723A_LEVEL(_drv_always_, "ap recv deauth reason code(%d) "
- "sta:%pM\n", reason, mgmt->sa);
-
- psta = rtw_get_stainfo23a(pstapriv, mgmt->sa);
- if (psta) {
- u8 updated = 0;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- if (!list_empty(&psta->asoc_list)) {
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
- updated = ap_free_sta23a(padapter, psta,
- false, reason);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- associated_clients_update23a(padapter, updated);
- }
-
- return _SUCCESS;
- } else
-#endif
- {
- DBG_8723A_LEVEL(_drv_always_, "sta recv deauth reason code(%d) "
- "sta:%pM\n", reason, mgmt->bssid);
-
- receive_disconnect23a(padapter, mgmt->bssid, reason);
- }
- pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
-
- return _SUCCESS;
-}
-
-static int
-OnDisassoc23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- unsigned short reason;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
-
- if (!ether_addr_equal(mgmt->bssid,
- get_my_bssid23a(&pmlmeinfo->network)))
- return _SUCCESS;
-
- reason = le16_to_cpu(mgmt->u.disassoc.reason_code);
-
- DBG_8723A("%s Reason code(%d)\n", __func__, reason);
-
-#ifdef CONFIG_8723AU_AP_MODE
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- struct sta_info *psta;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- DBG_8723A_LEVEL(_drv_always_, "ap recv disassoc reason code(%d)"
- " sta:%pM\n", reason, mgmt->sa);
-
- psta = rtw_get_stainfo23a(pstapriv, mgmt->sa);
- if (psta) {
- u8 updated = 0;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- if (!list_empty(&psta->asoc_list)) {
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
- updated = ap_free_sta23a(padapter, psta,
- false, reason);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- associated_clients_update23a(padapter, updated);
- }
-
- return _SUCCESS;
- } else
-#endif
- {
- DBG_8723A_LEVEL(_drv_always_, "ap recv disassoc reason "
- "code(%d) sta:%pM\n", reason, mgmt->bssid);
-
- receive_disconnect23a(padapter, mgmt->bssid, reason);
- }
- pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
- return _SUCCESS;
-}
-
-static int
-OnAtim23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- DBG_8723A("%s\n", __func__);
- return _SUCCESS;
-}
-
-static int
-on_action_spct23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- return _FAIL;
-}
-
-static int
-OnAction23a_qos(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-static int
-OnAction23a_dls(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-static int OnAction23a_back23a(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
- u8 *addr;
- struct sta_info *psta = NULL;
- struct recv_reorder_ctrl *preorder_ctrl;
- unsigned char category, action;
- unsigned short tid, status, capab, params, reason_code = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- /* check RA matches or not */
- if (!ether_addr_equal(myid(&padapter->eeprompriv), mgmt->da))
- return _SUCCESS;
-
- DBG_8723A("%s\n", __func__);
-
- if ((pmlmeinfo->state&0x03) != MSR_AP)
- if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
- return _SUCCESS;
-
- addr = mgmt->sa;
- psta = rtw_get_stainfo23a(pstapriv, addr);
-
- if (!psta)
- return _SUCCESS;
-
- category = mgmt->u.action.category;
- if (category == WLAN_CATEGORY_BACK) { /* representing Block Ack */
- if (!pmlmeinfo->HT_enable)
- return _SUCCESS;
- /* action_code is located in the same place for all
- action events, so pick any */
- action = mgmt->u.action.u.wme_action.action_code;
- DBG_8723A("%s, action =%d\n", __func__, action);
- switch (action) {
- case WLAN_ACTION_ADDBA_REQ: /* ADDBA request */
- memcpy(&pmlmeinfo->ADDBA_req,
- &mgmt->u.action.u.addba_req.dialog_token,
- sizeof(struct ADDBA_request));
- process_addba_req23a(padapter,
- (u8 *)&pmlmeinfo->ADDBA_req, addr);
- if (pmlmeinfo->bAcceptAddbaReq == true)
- issue_action_BA23a(padapter, addr,
- WLAN_ACTION_ADDBA_RESP, 0);
- else {
- /* reject ADDBA Req */
- issue_action_BA23a(padapter, addr,
- WLAN_ACTION_ADDBA_RESP, 37);
- }
- break;
- case WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
- status = get_unaligned_le16(
- &mgmt->u.action.u.addba_resp.status);
- capab = get_unaligned_le16(
- &mgmt->u.action.u.addba_resp.capab);
- tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
- if (status == 0) { /* successful */
- DBG_8723A("agg_enable for TID =%d\n", tid);
- psta->htpriv.agg_enable_bitmap |= BIT(tid);
- psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
- } else
- psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
- break;
-
- case WLAN_ACTION_DELBA: /* DELBA */
- params = get_unaligned_le16(
- &mgmt->u.action.u.delba.params);
- tid = params >> 12;
-
- if (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) {
- preorder_ctrl = &psta->recvreorder_ctrl[tid];
- preorder_ctrl->enable = false;
- preorder_ctrl->indicate_seq = 0xffff;
- } else {
- psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
- psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
- }
- reason_code = get_unaligned_le16(
- &mgmt->u.action.u.delba.reason_code);
- /* todo: how to notify the host while receiving
- DELETE BA */
- break;
- default:
- break;
- }
- }
- return _SUCCESS;
-}
-
-static int on_action_public23a(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u8 *pframe = skb->data;
- int freq, channel;
-
- /* check RA matches or not */
- if (!ether_addr_equal(myid(&padapter->eeprompriv), hdr->addr1))
- return _FAIL;
-
- channel = rtw_get_oper_ch23a(padapter);
-
- if (channel <= RTW_CH_MAX_2G_CHANNEL)
- freq = ieee80211_channel_to_frequency(channel,
- NL80211_BAND_2GHZ);
- else
- freq = ieee80211_channel_to_frequency(channel,
- NL80211_BAND_5GHZ);
-
- if (cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pframe,
- skb->len, 0))
- return _SUCCESS;
-
- return _FAIL;
-}
-
-static int
-OnAction23a_ht(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-static int
-OnAction23a_wmm(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-static int
-OnAction23a_p2p(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-static int
-OnAction23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
-{
- int i;
- u8 category;
- struct action_handler *ptable;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
-
- category = mgmt->u.action.category;
-
- for (i = 0; i < ARRAY_SIZE(OnAction23a_tbl); i++) {
- ptable = &OnAction23a_tbl[i];
-
- if (category == ptable->num)
- ptable->func(padapter, precv_frame);
- }
-
- return _SUCCESS;
-}
-
-static int DoReserved23a(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
- return _SUCCESS;
-}
-
-struct xmit_frame *alloc_mgtxmitframe23a(struct xmit_priv *pxmitpriv)
-{
- struct xmit_frame *pmgntframe;
- struct xmit_buf *pxmitbuf;
-
- pmgntframe = rtw_alloc_xmitframe23a_ext(pxmitpriv);
-
- if (!pmgntframe) {
- DBG_8723A("%s(%s): alloc xmitframe fail\n", __func__,
- pxmitpriv->adapter->pnetdev->name);
- goto exit;
- }
-
- pxmitbuf = rtw_alloc_xmitbuf23a_ext(pxmitpriv);
- if (!pxmitbuf) {
- DBG_8723A("%s(%s): alloc xmitbuf fail\n", __func__,
- pxmitpriv->adapter->pnetdev->name);
- rtw_free_xmitframe23a(pxmitpriv, pmgntframe);
- pmgntframe = NULL;
- goto exit;
- }
-
- pmgntframe->frame_tag = MGNT_FRAMETAG;
- pmgntframe->pxmitbuf = pxmitbuf;
- pmgntframe->buf_addr = pxmitbuf->pbuf;
- pxmitbuf->priv_data = pmgntframe;
-
-exit:
- return pmgntframe;
-}
-
-/****************************************************************************
-
-Following are some TX functions for WiFi MLME
-
-*****************************************************************************/
-
-void update_mgnt_tx_rate23a(struct rtw_adapter *padapter, u8 rate)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- pmlmeext->tx_rate = rate;
- DBG_8723A("%s(): rate = %x\n", __func__, rate);
-}
-
-void update_mgntframe_attrib23a(struct rtw_adapter *padapter,
- struct pkt_attrib *pattrib)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- memset((u8 *)pattrib, 0, sizeof(struct pkt_attrib));
-
- pattrib->hdrlen = 24;
- pattrib->nr_frags = 1;
- pattrib->priority = 7;
- pattrib->mac_id = 0;
- pattrib->qsel = 0x12;
-
- pattrib->pktlen = 0;
-
- if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
- pattrib->raid = 6;/* b mode */
- else
- pattrib->raid = 5;/* a/g mode */
-
- pattrib->encrypt = 0;
- pattrib->bswenc = false;
-
- pattrib->qos_en = false;
- pattrib->ht_en = false;
- pattrib->bwmode = HT_CHANNEL_WIDTH_20;
- pattrib->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- pattrib->sgi = false;
-
- pattrib->seqnum = pmlmeext->mgnt_seq;
-
- pattrib->retry_ctrl = true;
-}
-
-void dump_mgntframe23a(struct rtw_adapter *padapter,
- struct xmit_frame *pmgntframe)
-{
- if (padapter->bSurpriseRemoved == true ||
- padapter->bDriverStopped == true)
- return;
-
- rtl8723au_mgnt_xmit(padapter, pmgntframe);
-}
-
-int dump_mgntframe23a_and_wait(struct rtw_adapter *padapter,
- struct xmit_frame *pmgntframe, int timeout_ms)
-{
- int ret = _FAIL;
- unsigned long irqL;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct xmit_buf *pxmitbuf = pmgntframe->pxmitbuf;
- struct submit_ctx sctx;
-
- if (padapter->bSurpriseRemoved == true ||
- padapter->bDriverStopped == true)
- return ret;
-
- rtw_sctx_init23a(&sctx, timeout_ms);
- pxmitbuf->sctx = &sctx;
-
- ret = rtl8723au_mgnt_xmit(padapter, pmgntframe);
-
- if (ret == _SUCCESS)
- ret = rtw_sctx_wait23a(&sctx);
-
- spin_lock_irqsave(&pxmitpriv->lock_sctx, irqL);
- pxmitbuf->sctx = NULL;
- spin_unlock_irqrestore(&pxmitpriv->lock_sctx, irqL);
-
- return ret;
-}
-
-int dump_mgntframe23a_and_wait_ack23a(struct rtw_adapter *padapter,
- struct xmit_frame *pmgntframe)
-{
- int ret = _FAIL;
- u32 timeout_ms = 500;/* 500ms */
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- if (padapter->bSurpriseRemoved == true ||
- padapter->bDriverStopped == true)
- return _FAIL;
-
- mutex_lock(&pxmitpriv->ack_tx_mutex);
- pxmitpriv->ack_tx = true;
-
- pmgntframe->ack_report = 1;
- if (rtl8723au_mgnt_xmit(padapter, pmgntframe) == _SUCCESS)
- ret = rtw_ack_tx_wait23a(pxmitpriv, timeout_ms);
-
- pxmitpriv->ack_tx = false;
- mutex_unlock(&pxmitpriv->ack_tx_mutex);
-
- return ret;
-}
-
-static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
-{
- u8 *ssid_ie;
- int ssid_len_ori;
- int len_diff = 0;
- u8 *next_ie;
- u32 remain_len;
-
- ssid_ie = rtw_get_ie23a(ies, WLAN_EID_SSID, &ssid_len_ori, ies_len);
-
- /* DBG_8723A("%s hidden_ssid_mode:%u, ssid_ie:%p, ssid_len_ori:%d\n",
- __func__, hidden_ssid_mode, ssid_ie, ssid_len_ori); */
-
- if (ssid_ie && ssid_len_ori > 0) {
- switch (hidden_ssid_mode) {
- case 1:
- next_ie = ssid_ie + 2 + ssid_len_ori;
- remain_len = ies_len -(next_ie-ies);
-
- ssid_ie[1] = 0;
- memcpy(ssid_ie+2, next_ie, remain_len);
- len_diff -= ssid_len_ori;
-
- break;
- case 2:
- memset(&ssid_ie[2], 0, ssid_len_ori);
- break;
- default:
- break;
- }
- }
-
- return len_diff;
-}
-
-void issue_beacon23a(struct rtw_adapter *padapter, int timeout_ms)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_mgmt *mgmt;
- unsigned int rate_len;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- const u8 *wps_ie;
- u8 sr = 0;
- int len_diff;
-
- /* DBG_8723A("%s\n", __func__); */
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe) {
- DBG_8723A("%s, alloc mgnt frame fail\n", __func__);
- return;
- }
-#ifdef CONFIG_8723AU_AP_MODE
- spin_lock_bh(&pmlmepriv->bcn_update_lock);
-#endif
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
- pattrib->qsel = 0x10;
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- mgmt = (struct ieee80211_mgmt *)pframe;
-
- mgmt->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
- mgmt->seq_ctrl = 0;
-
- ether_addr_copy(mgmt->da, bc_addr);
- ether_addr_copy(mgmt->sa, myid(&padapter->eeprompriv));
- ether_addr_copy(mgmt->bssid, get_my_bssid23a(cur_network));
-
- /* timestamp will be inserted by hardware */
-
- put_unaligned_le16(cur_network->beacon_interval,
- &mgmt->u.beacon.beacon_int);
-
- put_unaligned_le16(cur_network->capability,
- &mgmt->u.beacon.capab_info);
-
- pframe = mgmt->u.beacon.variable;
- pattrib->pktlen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
-
- if ((pmlmeinfo->state & 0x03) == MSR_AP) {
- u8 *iebuf;
- int buflen;
- /* DBG_8723A("ie len =%d\n", cur_network->IELength); */
- memcpy(pframe, cur_network->IEs, cur_network->IELength);
- len_diff = update_hidden_ssid(pframe, cur_network->IELength,
- pmlmeinfo->hidden_ssid_mode);
- pframe += (cur_network->IELength+len_diff);
- pattrib->pktlen += (cur_network->IELength+len_diff);
-
- iebuf = mgmt->u.beacon.variable;
- buflen = pattrib->pktlen -
- offsetof(struct ieee80211_mgmt, u.beacon.variable);
- wps_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPS,
- iebuf, buflen);
-
- if (wps_ie && wps_ie[1] > 0) {
- rtw_get_wps_attr_content23a(wps_ie, wps_ie[1],
- WPS_ATTR_SELECTED_REGISTRAR,
- (u8 *)&sr);
- }
- if (sr != 0)
- set_fwstate(pmlmepriv, WIFI_UNDER_WPS);
- else
- _clr_fwstate_(pmlmepriv, WIFI_UNDER_WPS);
-
- goto _issue_bcn;
- }
-
- /* SSID */
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SSID,
- cur_network->Ssid.ssid_len,
- cur_network->Ssid.ssid, &pattrib->pktlen);
-
- /* supported rates... */
- rate_len = rtw_get_rateset_len23a(cur_network->SupportedRates);
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SUPP_RATES,
- ((rate_len > 8)? 8: rate_len),
- cur_network->SupportedRates, &pattrib->pktlen);
-
- /* DS parameter set */
- pframe = rtw_set_ie23a(pframe, WLAN_EID_DS_PARAMS, 1, (unsigned char *)
- &cur_network->DSConfig, &pattrib->pktlen);
-
- /* if ((pmlmeinfo->state&0x03) == MSR_ADHOC) */
- {
- u8 erpinfo = 0;
- u32 ATIMWindow;
- /* IBSS Parameter Set... */
- /* ATIMWindow = cur->ATIMWindow; */
- ATIMWindow = 0;
- pframe = rtw_set_ie23a(pframe, WLAN_EID_IBSS_PARAMS, 2,
- (unsigned char *)&ATIMWindow,
- &pattrib->pktlen);
-
- /* ERP IE */
- pframe = rtw_set_ie23a(pframe, WLAN_EID_ERP_INFO, 1,
- &erpinfo, &pattrib->pktlen);
- }
-
- /* EXTERNDED SUPPORTED RATE */
- if (rate_len > 8)
- pframe = rtw_set_ie23a(pframe, WLAN_EID_EXT_SUPP_RATES,
- rate_len - 8,
- cur_network->SupportedRates + 8,
- &pattrib->pktlen);
-
- /* todo:HT for adhoc */
-
-_issue_bcn:
-
-#ifdef CONFIG_8723AU_AP_MODE
- pmlmepriv->update_bcn = false;
-
- spin_unlock_bh(&pmlmepriv->bcn_update_lock);
-#endif
-
- if ((pattrib->pktlen + TXDESC_SIZE) > 512) {
- DBG_8723A("beacon frame too large\n");
- return;
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- /* DBG_8723A("issue bcn_sz =%d\n", pattrib->last_txcmdsz); */
- if (timeout_ms > 0)
- dump_mgntframe23a_and_wait(padapter, pmgntframe, timeout_ms);
- else
- dump_mgntframe23a(padapter, pmgntframe);
-}
-
-static void issue_probersp(struct rtw_adapter *padapter, unsigned char *da)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_mgmt *mgmt;
- unsigned char *mac, *bssid;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-#ifdef CONFIG_8723AU_AP_MODE
- const u8 *pwps_ie;
- u8 *ssid_ie;
- int ssid_ielen;
- int ssid_ielen_diff;
- u8 buf[MAX_IE_SZ];
-#endif
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- unsigned int rate_len;
-
- /* DBG_8723A("%s\n", __func__); */
-
- if (cur_network->IELength > MAX_IE_SZ)
- return;
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe) {
- DBG_8723A("%s, alloc mgnt frame fail\n", __func__);
- return;
- }
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)pmgntframe->buf_addr + TXDESC_OFFSET;
- mgmt = (struct ieee80211_mgmt *)pframe;
-
- mac = myid(&padapter->eeprompriv);
- bssid = cur_network->MacAddress;
-
- mgmt->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
-
- ether_addr_copy(mgmt->da, da);
- ether_addr_copy(mgmt->sa, mac);
- ether_addr_copy(mgmt->bssid, bssid);
-
- mgmt->seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(pmlmeext->mgnt_seq));
- pmlmeext->mgnt_seq++;
-
- pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
-
- /* timestamp will be inserted by hardware */
- put_unaligned_le16(cur_network->beacon_interval,
- &mgmt->u.probe_resp.beacon_int);
-
- put_unaligned_le16(cur_network->capability,
- &mgmt->u.probe_resp.capab_info);
-
- pframe = mgmt->u.probe_resp.variable;
- pattrib->pktlen =
- offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-
- /* below for ad-hoc mode */
-
-#ifdef CONFIG_8723AU_AP_MODE
- if ((pmlmeinfo->state & 0x03) == MSR_AP) {
- pwps_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPS,
- cur_network->IEs,
- cur_network->IELength);
-
- memcpy(pframe, cur_network->IEs, cur_network->IELength);
- pframe += cur_network->IELength;
- pattrib->pktlen += cur_network->IELength;
-
- /* retrieve SSID IE from cur_network->Ssid */
-
- ssid_ie = rtw_get_ie23a(mgmt->u.probe_resp.variable,
- WLAN_EID_SSID, &ssid_ielen,
- pframe - mgmt->u.probe_resp.variable);
-
- ssid_ielen_diff = cur_network->Ssid.ssid_len - ssid_ielen;
-
- if (ssid_ie && cur_network->Ssid.ssid_len) {
- uint remainder_ielen;
- u8 *remainder_ie;
-
- remainder_ie = ssid_ie + 2;
-
- remainder_ielen = pframe - remainder_ie;
-
- DBG_8723A_LEVEL(_drv_warning_, "%s(%s): "
- "remainder_ielen > MAX_IE_SZ\n",
- __func__, padapter->pnetdev->name);
- if (remainder_ielen > MAX_IE_SZ)
- remainder_ielen = MAX_IE_SZ;
-
- memcpy(buf, remainder_ie, remainder_ielen);
- memcpy(remainder_ie + ssid_ielen_diff, buf,
- remainder_ielen);
- *(ssid_ie + 1) = cur_network->Ssid.ssid_len;
- memcpy(ssid_ie + 2, cur_network->Ssid.ssid,
- cur_network->Ssid.ssid_len);
-
- pframe += ssid_ielen_diff;
- pattrib->pktlen += ssid_ielen_diff;
- }
- } else
-#endif
- {
- /* SSID */
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SSID,
- cur_network->Ssid.ssid_len,
- cur_network->Ssid.ssid,
- &pattrib->pktlen);
-
- /* supported rates... */
- rate_len = rtw_get_rateset_len23a(cur_network->SupportedRates);
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SUPP_RATES,
- ((rate_len > 8)? 8: rate_len),
- cur_network->SupportedRates,
- &pattrib->pktlen);
-
- /* DS parameter set */
- pframe = rtw_set_ie23a(pframe, WLAN_EID_DS_PARAMS, 1,
- (unsigned char *)&cur_network->DSConfig,
- &pattrib->pktlen);
-
- if ((pmlmeinfo->state & 0x03) == MSR_ADHOC) {
- u8 erpinfo = 0;
- u32 ATIMWindow;
- /* IBSS Parameter Set... */
- /* ATIMWindow = cur->ATIMWindow; */
- ATIMWindow = 0;
- pframe = rtw_set_ie23a(pframe, WLAN_EID_IBSS_PARAMS, 2,
- (unsigned char *)&ATIMWindow,
- &pattrib->pktlen);
-
- /* ERP IE */
- pframe = rtw_set_ie23a(pframe, WLAN_EID_ERP_INFO, 1,
- &erpinfo, &pattrib->pktlen);
- }
-
- /* EXTERNDED SUPPORTED RATE */
- if (rate_len > 8)
- pframe = rtw_set_ie23a(pframe, WLAN_EID_EXT_SUPP_RATES,
- rate_len - 8,
- cur_network->SupportedRates + 8,
- &pattrib->pktlen);
-
- /* todo:HT for adhoc */
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe23a(padapter, pmgntframe);
-}
-
-static int _issue_probereq(struct rtw_adapter *padapter,
- struct cfg80211_ssid *pssid, u8 *da, int wait_ack)
-{
- int ret = _FAIL;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- unsigned char *mac;
- unsigned char bssrate[NumRates];
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- int bssrate_len = 0;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
- "+%s\n", __func__);
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- mac = myid(&padapter->eeprompriv);
-
- pwlanhdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_PROBE_REQ);
-
- if (da) {
- /* unicast probe request frame */
- ether_addr_copy(pwlanhdr->addr1, da);
- ether_addr_copy(pwlanhdr->addr3, da);
- } else {
- /* broadcast probe request frame */
- ether_addr_copy(pwlanhdr->addr1, bc_addr);
- ether_addr_copy(pwlanhdr->addr3, bc_addr);
- }
-
- ether_addr_copy(pwlanhdr->addr2, mac);
-
- pwlanhdr->seq_ctrl =
- cpu_to_le16(IEEE80211_SN_TO_SEQ(pmlmeext->mgnt_seq));
-
- pmlmeext->mgnt_seq++;
-
- pframe += sizeof (struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof (struct ieee80211_hdr_3addr);
-
- if (pssid)
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SSID, pssid->ssid_len,
- pssid->ssid, &pattrib->pktlen);
- else
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SSID, 0, NULL,
- &pattrib->pktlen);
-
- get_rate_set23a(padapter, bssrate, &bssrate_len);
-
- if (bssrate_len > 8) {
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SUPP_RATES, 8,
- bssrate, &pattrib->pktlen);
- pframe = rtw_set_ie23a(pframe, WLAN_EID_EXT_SUPP_RATES,
- (bssrate_len - 8), (bssrate + 8),
- &pattrib->pktlen);
- } else {
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SUPP_RATES,
- bssrate_len, bssrate, &pattrib->pktlen);
- }
-
- /* add wps_ie for wps2.0 */
- if (pmlmepriv->wps_probe_req_ie_len>0 && pmlmepriv->wps_probe_req_ie) {
- memcpy(pframe, pmlmepriv->wps_probe_req_ie,
- pmlmepriv->wps_probe_req_ie_len);
- pframe += pmlmepriv->wps_probe_req_ie_len;
- pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
- "issuing probe_req, tx_len =%d\n", pattrib->last_txcmdsz);
-
- if (wait_ack) {
- ret = dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe);
- } else {
- dump_mgntframe23a(padapter, pmgntframe);
- ret = _SUCCESS;
- }
-
-exit:
- return ret;
-}
-
-static inline void issue_probereq(struct rtw_adapter *padapter,
- struct cfg80211_ssid *pssid, u8 *da)
-{
- _issue_probereq(padapter, pssid, da, false);
-}
-
-static int issue_probereq_ex(struct rtw_adapter *padapter,
- struct cfg80211_ssid *pssid, u8 *da,
- int try_cnt, int wait_ms)
-{
- int ret;
- int i = 0;
- unsigned long start = jiffies;
-
- do {
- ret = _issue_probereq(padapter, pssid, da,
- wait_ms > 0 ? true : false);
-
- i++;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
- break;
-
- if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- msleep(wait_ms);
-
- } while((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
-
- if (ret != _FAIL) {
- ret = _SUCCESS;
- goto exit;
- }
-
- if (try_cnt && wait_ms) {
- if (da)
- DBG_8723A("%s(%s): to %pM, ch:%u%s, %d/%d in %u ms\n",
- __func__, padapter->pnetdev->name,
- da, rtw_get_oper_ch23a(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- else
- DBG_8723A("%s(%s):, ch:%u%s, %d/%d in %u ms\n",
- __func__, padapter->pnetdev->name,
- rtw_get_oper_ch23a(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- }
-exit:
- return ret;
-}
-
-/* if psta == NULL, indiate we are station(client) now... */
-static void issue_auth(struct rtw_adapter *padapter, struct sta_info *psta,
- unsigned short status)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_mgmt *mgmt;
- unsigned int val32;
- u16 auth_algo;
- int use_shared_key = 0;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- mgmt = (struct ieee80211_mgmt *)pframe;
-
- mgmt->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
- mgmt->seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(pmlmeext->mgnt_seq));
- pmlmeext->mgnt_seq++;
-
- pattrib->pktlen = offsetof(struct ieee80211_mgmt, u.auth.variable);
-
- if (psta) { /* for AP mode */
-#ifdef CONFIG_8723AU_AP_MODE
- unsigned short val16;
-
- ether_addr_copy(mgmt->da, psta->hwaddr);
- ether_addr_copy(mgmt->sa, myid(&padapter->eeprompriv));
- ether_addr_copy(mgmt->bssid, myid(&padapter->eeprompriv));
-
- /* setting auth algo number */
- val16 = (u16)psta->authalg;
-
- if (status != WLAN_STATUS_SUCCESS)
- val16 = 0;
-
- if (val16)
- use_shared_key = 1;
-
- mgmt->u.auth.auth_alg = cpu_to_le16(val16);
-
- /* setting auth seq number */
- mgmt->u.auth.auth_transaction =
- cpu_to_le16((u16)psta->auth_seq);
-
- /* setting status code... */
- mgmt->u.auth.status_code = cpu_to_le16(status);
-
- pframe = mgmt->u.auth.variable;
- /* added challenging text... */
- if ((psta->auth_seq == 2) &&
- (psta->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1))
- pframe = rtw_set_ie23a(pframe, WLAN_EID_CHALLENGE, 128,
- psta->chg_txt, &pattrib->pktlen);
-#endif
- } else {
- struct ieee80211_mgmt *iv_mgmt;
-
- ether_addr_copy(mgmt->da, get_my_bssid23a(&pmlmeinfo->network));
- ether_addr_copy(mgmt->sa, myid(&padapter->eeprompriv));
- ether_addr_copy(mgmt->bssid,
- get_my_bssid23a(&pmlmeinfo->network));
-
- /* setting auth algo number */
- /* 0:OPEN System, 1:Shared key */
- if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
- use_shared_key = 1;
- auth_algo = WLAN_AUTH_SHARED_KEY;
- } else
- auth_algo = WLAN_AUTH_OPEN;
-
- /* DBG_8723A("%s auth_algo = %s auth_seq =%d\n", __func__,
- (pmlmeinfo->auth_algo == 0)?"OPEN":"SHARED",
- pmlmeinfo->auth_seq); */
-
- /* setting IV for auth seq #3 */
- if ((pmlmeinfo->auth_seq == 3) &&
- (pmlmeinfo->state & WIFI_FW_AUTH_STATE) &&
- (use_shared_key == 1)) {
- u32 *piv = (u32 *)&mgmt->u.auth;
-
- iv_mgmt = (struct ieee80211_mgmt *)(pframe + 4);
- /* DBG_8723A("==> iv(%d), key_index(%d)\n",
- pmlmeinfo->iv, pmlmeinfo->key_index); */
- val32 = (pmlmeinfo->iv & 0x3fffffff) |
- (pmlmeinfo->key_index << 30);
- pmlmeinfo->iv++;
- put_unaligned_le32(val32, piv);
-
- pattrib->pktlen += 4;
-
- pattrib->iv_len = IEEE80211_WEP_IV_LEN;
- } else
- iv_mgmt = mgmt;
-
- iv_mgmt->u.auth.auth_alg = cpu_to_le16(auth_algo);
-
- /* setting auth seq number */
- iv_mgmt->u.auth.auth_transaction =
- cpu_to_le16(pmlmeinfo->auth_seq);
-
- /* setting status code... */
- iv_mgmt->u.auth.status_code = cpu_to_le16(status);
-
- pframe = iv_mgmt->u.auth.variable;
-
- /* then checking to see if sending challenging text... */
- if ((pmlmeinfo->auth_seq == 3) &&
- (pmlmeinfo->state & WIFI_FW_AUTH_STATE) &&
- (use_shared_key == 1)) {
- pframe = rtw_set_ie23a(pframe, WLAN_EID_CHALLENGE, 128,
- pmlmeinfo->chg_txt,
- &pattrib->pktlen);
-
- mgmt->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_PROTECTED);
-
- pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
-
- pattrib->encrypt = WLAN_CIPHER_SUITE_WEP40;
-
- pattrib->icv_len = IEEE80211_WEP_ICV_LEN;
-
- pattrib->pktlen += pattrib->icv_len;
- }
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- rtw_wep_encrypt23a(padapter, pmgntframe);
- DBG_8723A("%s\n", __func__);
- dump_mgntframe23a(padapter, pmgntframe);
-}
-
-#ifdef CONFIG_8723AU_AP_MODE
-static void issue_assocrsp(struct rtw_adapter *padapter, unsigned short status,
- struct sta_info *pstat, u16 pkt_type)
-{
- struct xmit_frame *pmgntframe;
- struct ieee80211_mgmt *mgmt;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- const u8 *p;
- u8 *ie = pnetwork->IEs;
-
- DBG_8723A("%s\n", __func__);
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- mgmt = (struct ieee80211_mgmt *)pframe;
-
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | pkt_type);
-
- ether_addr_copy(mgmt->da, pstat->hwaddr);
- ether_addr_copy(mgmt->sa, myid(&padapter->eeprompriv));
- ether_addr_copy(mgmt->bssid, get_my_bssid23a(&pmlmeinfo->network));
-
- mgmt->seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(pmlmeext->mgnt_seq));
-
- pmlmeext->mgnt_seq++;
-
- pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen =
- offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
-
- mgmt->u.assoc_resp.capab_info = cpu_to_le16(pnetwork->capability);
- mgmt->u.assoc_resp.status_code = cpu_to_le16(status);
- mgmt->u.assoc_resp.aid = cpu_to_le16(pstat->aid | BIT(14) | BIT(15));
-
- pframe = mgmt->u.assoc_resp.variable;
-
- if (pstat->bssratelen <= 8) {
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SUPP_RATES,
- pstat->bssratelen, pstat->bssrateset,
- &pattrib->pktlen);
- } else {
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SUPP_RATES, 8,
- pstat->bssrateset, &pattrib->pktlen);
- pframe = rtw_set_ie23a(pframe, WLAN_EID_EXT_SUPP_RATES,
- pstat->bssratelen - 8,
- pstat->bssrateset + 8, &pattrib->pktlen);
- }
-
- if (pstat->flags & WLAN_STA_HT && pmlmepriv->htpriv.ht_option) {
- /* FILL HT CAP INFO IE */
- /* p = hostapd_eid_ht_capabilities_info(hapd, p); */
- p = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ie,
- pnetwork->IELength);
- if (p && p[1]) {
- memcpy(pframe, p, p[1] + 2);
- pframe += (p[1] + 2);
- pattrib->pktlen += (p[1] + 2);
- }
-
- /* FILL HT ADD INFO IE */
- /* p = hostapd_eid_ht_operation(hapd, p); */
- p = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ie,
- pnetwork->IELength);
- if (p && p[1] > 0) {
- memcpy(pframe, p, p[1] + 2);
- pframe += (p[1] + 2);
- pattrib->pktlen += (p[1] + 2);
- }
- }
-
- /* FILL WMM IE */
- if (pstat->flags & WLAN_STA_WME && pmlmepriv->qos_option) {
- unsigned char WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02,
- 0x01, 0x01};
- int ie_len = 0;
-
- for (p = ie; ; p += (ie_len + 2)) {
- p = cfg80211_find_ie(WLAN_EID_VENDOR_SPECIFIC, p,
- pnetwork->IELength - (ie_len + 2));
- if (p)
- ie_len = p[1];
- else
- ie_len = 0;
- if (p && !memcmp(p + 2, WMM_PARA_IE, 6)) {
- memcpy(pframe, p, ie_len + 2);
- pframe += (ie_len + 2);
- pattrib->pktlen += (ie_len + 2);
-
- break;
- }
-
- if (!p || ie_len == 0)
- break;
- }
- }
-
- if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK) {
- pframe = rtw_set_ie23a(pframe, WLAN_EID_VENDOR_SPECIFIC, 6,
- REALTEK_96B_IE, &pattrib->pktlen);
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe23a(padapter, pmgntframe);
-}
-#endif
-
-static void issue_assocreq(struct rtw_adapter *padapter)
-{
- int ret = _FAIL;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- const u8 *p;
- struct ieee80211_mgmt *mgmt;
- unsigned int i, j, index = 0;
- unsigned char rf_type, bssrate[NumRates], sta_bssrate[NumRates];
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- int bssrate_len = 0, sta_bssrate_len = 0, pie_len;
- u8 *pie;
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)pmgntframe->buf_addr + TXDESC_OFFSET;
- mgmt = (struct ieee80211_mgmt *)pframe;
-
- mgmt->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ);
-
- ether_addr_copy(mgmt->da, get_my_bssid23a(&pmlmeinfo->network));
- ether_addr_copy(mgmt->sa, myid(&padapter->eeprompriv));
- ether_addr_copy(mgmt->bssid, get_my_bssid23a(&pmlmeinfo->network));
-
- mgmt->seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(pmlmeext->mgnt_seq));
- pmlmeext->mgnt_seq++;
-
- /* caps */
- put_unaligned_le16(pmlmeinfo->network.capability,
- &mgmt->u.assoc_req.capab_info);
- /* todo: listen interval for power saving */
- put_unaligned_le16(3, &mgmt->u.assoc_req.listen_interval);
-
- pframe = mgmt->u.assoc_req.variable;
- pattrib->pktlen = offsetof(struct ieee80211_mgmt, u.assoc_req.variable);
-
- /* SSID */
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SSID,
- pmlmeinfo->network.Ssid.ssid_len,
- pmlmeinfo->network.Ssid.ssid, &pattrib->pktlen);
-
- /* supported rate & extended supported rate */
-
- get_rate_set23a(padapter, sta_bssrate, &sta_bssrate_len);
- /* DBG_8723A("sta_bssrate_len =%d\n", sta_bssrate_len); */
-
- /* for JAPAN, channel 14 can only uses B Mode(CCK) */
- if (pmlmeext->cur_channel == 14)
- sta_bssrate_len = 4;
-
- /* for (i = 0; i < sta_bssrate_len; i++) { */
- /* DBG_8723A("sta_bssrate[%d]=%02X\n", i, sta_bssrate[i]); */
- /* */
-
- for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
- if (pmlmeinfo->network.SupportedRates[i] == 0)
- break;
- DBG_8723A("network.SupportedRates[%d]=%02X\n", i,
- pmlmeinfo->network.SupportedRates[i]);
- }
-
- for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
- if (pmlmeinfo->network.SupportedRates[i] == 0)
- break;
-
- /* Check if the AP's supported rates are also
- supported by STA. */
- for (j = 0; j < sta_bssrate_len; j++) {
- /* Avoid the proprietary data rate (22Mbps) of
- Handlink WSG-4000 AP */
- if ((pmlmeinfo->network.SupportedRates[i] |
- IEEE80211_BASIC_RATE_MASK) ==
- (sta_bssrate[j] | IEEE80211_BASIC_RATE_MASK)) {
- /* DBG_8723A("match i = %d, j =%d\n", i, j); */
- break;
- }
- }
-
- if (j == sta_bssrate_len) {
- /* the rate is not supported by STA */
- DBG_8723A("%s(): the rate[%d]=%02X is not supported by "
- "STA!\n", __func__, i,
- pmlmeinfo->network.SupportedRates[i]);
- } else {
- /* the rate is supported by STA */
- bssrate[index++] = pmlmeinfo->network.SupportedRates[i];
- }
- }
-
- bssrate_len = index;
- DBG_8723A("bssrate_len = %d\n", bssrate_len);
-
- if (bssrate_len == 0) {
- rtw_free_xmitbuf23a(pxmitpriv, pmgntframe->pxmitbuf);
- rtw_free_xmitframe23a(pxmitpriv, pmgntframe);
- goto exit; /* don't connect to AP if no joint supported rate */
- }
-
- if (bssrate_len > 8) {
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SUPP_RATES, 8,
- bssrate, &pattrib->pktlen);
- pframe = rtw_set_ie23a(pframe, WLAN_EID_EXT_SUPP_RATES,
- (bssrate_len - 8), (bssrate + 8),
- &pattrib->pktlen);
- } else
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SUPP_RATES,
- bssrate_len, bssrate, &pattrib->pktlen);
-
- /* RSN */
-
- pie = pmlmeinfo->network.IEs;
- pie_len = pmlmeinfo->network.IELength;
-
- p = cfg80211_find_ie(WLAN_EID_RSN, pie, pie_len);
- if (p)
- pframe = rtw_set_ie23a(pframe, WLAN_EID_RSN, p[1], p + 2,
- &pattrib->pktlen);
-
- /* HT caps */
- if (padapter->mlmepriv.htpriv.ht_option) {
- p = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, pie, pie_len);
-
- if (p && !is_ap_in_tkip23a(padapter)) {
- struct ieee80211_ht_cap *cap = &pmlmeinfo->ht_cap;
-
- memcpy(cap, p + 2, sizeof(struct ieee80211_ht_cap));
-
- /* to disable 40M Hz support while gd_bw_40MHz_en = 0 */
- if (pregpriv->cbw40_enable == 0) {
- cap->cap_info &= ~cpu_to_le16(
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_SUP_WIDTH_20_40);
- } else {
- cap->cap_info |= cpu_to_le16(
- IEEE80211_HT_CAP_SUP_WIDTH_20_40);
- }
-
- /* todo: disable SM power save mode */
- cap->cap_info |= cpu_to_le16(IEEE80211_HT_CAP_SM_PS);
-
- rf_type = rtl8723a_get_rf_type(padapter);
- /* switch (pregpriv->rf_config) */
- switch (rf_type) {
- case RF_1T1R:
- /* RX STBC One spatial stream */
- if (pregpriv->rx_stbc)
- cap->cap_info |= cpu_to_le16(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
-
- memcpy(&cap->mcs, MCS_rate_1R23A, 16);
- break;
-
- case RF_2T2R:
- case RF_1T2R:
- default:
- /* enable for 2.4/5 GHz */
- if (pregpriv->rx_stbc == 0x3 ||
- (pmlmeext->cur_wireless_mode &
- WIRELESS_11_24N &&
- /* enable for 2.4GHz */
- pregpriv->rx_stbc == 0x1) ||
- (pmlmeext->cur_wireless_mode &
- WIRELESS_11_5N &&
- pregpriv->rx_stbc == 0x2) ||
- /* enable for 5GHz */
- pregpriv->wifi_spec == 1) {
- DBG_8723A("declare supporting RX "
- "STBC\n");
- /* RX STBC two spatial stream */
- cap->cap_info |= cpu_to_le16(2 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
- }
- memcpy(&cap->mcs, MCS_rate_2R23A, 16);
- break;
- }
-
- if (rtl8723a_BT_coexist(padapter) &&
- rtl8723a_BT_using_antenna_1(padapter)) {
- /* set to 8K */
- cap->ampdu_params_info &=
- ~IEEE80211_HT_AMPDU_PARM_FACTOR;
-/* cap->ampdu_params_info |= MAX_AMPDU_FACTOR_8K */
- }
-
- pframe = rtw_set_ie23a(pframe, WLAN_EID_HT_CAPABILITY,
- p[1], (u8 *)&pmlmeinfo->ht_cap,
- &pattrib->pktlen);
- }
- }
-
- /* vendor specific IE, such as WPA, WMM, WPS */
- for (i = 0; i < pmlmeinfo->network.IELength;) {
- p = pmlmeinfo->network.IEs + i;
-
- switch (p[0]) {
- case WLAN_EID_VENDOR_SPECIFIC:
- if (!memcmp(p + 2, RTW_WPA_OUI23A_TYPE, 4) ||
- !memcmp(p + 2, WMM_OUI23A, 4) ||
- !memcmp(p + 2, WPS_OUI23A, 4)) {
- u8 plen = p[1];
-
- if (!padapter->registrypriv.wifi_spec) {
- /* Commented by Kurt 20110629 */
- /* In some older APs, WPS handshake */
- /* would be fail if we append vender
- extensions informations to AP */
- if (!memcmp(p + 2, WPS_OUI23A, 4))
- plen = 14;
- }
- pframe = rtw_set_ie23a(pframe,
- WLAN_EID_VENDOR_SPECIFIC,
- plen, p + 2,
- &pattrib->pktlen);
- }
- break;
-
- default:
- break;
- }
-
- i += p[1] + 2;
- }
-
- if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
- pframe = rtw_set_ie23a(pframe, WLAN_EID_VENDOR_SPECIFIC, 6,
- REALTEK_96B_IE, &pattrib->pktlen);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
- dump_mgntframe23a(padapter, pmgntframe);
-
- ret = _SUCCESS;
-
-exit:
- pmlmepriv->assoc_req_len = 0;
- if (ret == _SUCCESS) {
- kfree(pmlmepriv->assoc_req);
- pmlmepriv->assoc_req = kmalloc(pattrib->pktlen, GFP_ATOMIC);
- if (pmlmepriv->assoc_req) {
- memcpy(pmlmepriv->assoc_req, mgmt, pattrib->pktlen);
- pmlmepriv->assoc_req_len = pattrib->pktlen;
- }
- } else
- kfree(pmlmepriv->assoc_req);
-}
-
-/* when wait_ack is true, this function should be called at process context */
-static int _issue_nulldata23a(struct rtw_adapter *padapter, unsigned char *da,
- unsigned int power_mode, int wait_ack)
-{
- int ret = _FAIL;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_hdr *pwlanhdr;
- struct xmit_priv *pxmitpriv;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
-
- /* DBG_8723A("%s:%d\n", __func__, power_mode); */
-
- if (!padapter)
- goto exit;
-
- pxmitpriv = &padapter->xmitpriv;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
- pattrib->retry_ctrl = false;
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- pwlanhdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC);
-
- if ((pmlmeinfo->state&0x03) == MSR_AP)
- pwlanhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
- else if ((pmlmeinfo->state&0x03) == MSR_INFRA)
- pwlanhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_TODS);
-
- if (power_mode)
- pwlanhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
-
- ether_addr_copy(pwlanhdr->addr1, da);
- ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
- ether_addr_copy(pwlanhdr->addr3, get_my_bssid23a(&pmlmeinfo->network));
-
- pwlanhdr->seq_ctrl =
- cpu_to_le16(IEEE80211_SN_TO_SEQ(pmlmeext->mgnt_seq));
- pmlmeext->mgnt_seq++;
-
- pframe += sizeof(struct ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- if (wait_ack)
- ret = dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe);
- else {
- dump_mgntframe23a(padapter, pmgntframe);
- ret = _SUCCESS;
- }
-
-exit:
- return ret;
-}
-
-/* when wait_ms >0 , this function should be called at process context */
-/* da == NULL for station mode */
-int issue_nulldata23a(struct rtw_adapter *padapter, unsigned char *da,
- unsigned int power_mode, int try_cnt, int wait_ms)
-{
- int ret;
- int i = 0;
- unsigned long start = jiffies;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- /* da == NULL, assume it's null data for sta to ap*/
- if (da == NULL)
- da = get_my_bssid23a(&pmlmeinfo->network);
-
- do {
- ret = _issue_nulldata23a(padapter, da, power_mode,
- wait_ms > 0 ? true : false);
-
- i++;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
- break;
-
- if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- msleep(wait_ms);
-
- } while((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
-
- if (ret != _FAIL) {
- ret = _SUCCESS;
- goto exit;
- }
-
- if (try_cnt && wait_ms) {
- if (da)
- DBG_8723A("%s(%s): to %pM, ch:%u%s, %d/%d in %u ms\n",
- __func__, padapter->pnetdev->name,
- da, rtw_get_oper_ch23a(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- else
- DBG_8723A("%s(%s):, ch:%u%s, %d/%d in %u ms\n",
- __func__, padapter->pnetdev->name,
- rtw_get_oper_ch23a(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- }
-exit:
- return ret;
-}
-
-/* when wait_ack is true, this function should be called at process context */
-static int _issue_qos_nulldata23a(struct rtw_adapter *padapter,
- unsigned char *da, u16 tid, int wait_ack)
-{
- int ret = _FAIL;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_qos_hdr *pwlanhdr;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- DBG_8723A("%s\n", __func__);
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
-
- pattrib->hdrlen += 2;
- pattrib->qos_en = true;
- pattrib->eosp = 1;
- pattrib->ack_policy = 0;
- pattrib->mdata = 0;
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct ieee80211_qos_hdr *)pframe;
-
- pwlanhdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_QOS_NULLFUNC);
-
- if ((pmlmeinfo->state&0x03) == MSR_AP)
- pwlanhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
- else if ((pmlmeinfo->state&0x03) == MSR_INFRA)
- pwlanhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_TODS);
-
- if (pattrib->mdata)
- pwlanhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
-
- pwlanhdr->qos_ctrl = cpu_to_le16(tid & IEEE80211_QOS_CTL_TID_MASK);
- pwlanhdr->qos_ctrl |= cpu_to_le16((pattrib->ack_policy << 5) &
- IEEE80211_QOS_CTL_ACK_POLICY_MASK);
- if (pattrib->eosp)
- pwlanhdr->qos_ctrl |= cpu_to_le16(IEEE80211_QOS_CTL_EOSP);
-
- ether_addr_copy(pwlanhdr->addr1, da);
- ether_addr_copy(pwlanhdr->addr2, myid(&padapter->eeprompriv));
- ether_addr_copy(pwlanhdr->addr3, get_my_bssid23a(&pmlmeinfo->network));
-
- pwlanhdr->seq_ctrl =
- cpu_to_le16(IEEE80211_SN_TO_SEQ(pmlmeext->mgnt_seq));
- pmlmeext->mgnt_seq++;
-
- pframe += sizeof(struct ieee80211_qos_hdr);
- pattrib->pktlen = sizeof(struct ieee80211_qos_hdr);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- if (wait_ack)
- ret = dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe);
- else {
- dump_mgntframe23a(padapter, pmgntframe);
- ret = _SUCCESS;
- }
-
-exit:
- return ret;
-}
-
-/* when wait_ms >0 , this function should be called at process context */
-/* da == NULL for station mode */
-int issue_qos_nulldata23a(struct rtw_adapter *padapter, unsigned char *da,
- u16 tid, int try_cnt, int wait_ms)
-{
- int ret;
- int i = 0;
- unsigned long start = jiffies;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- /* da == NULL, assume it's null data for sta to ap*/
- if (da == NULL)
- da = get_my_bssid23a(&pmlmeinfo->network);
-
- do {
- ret = _issue_qos_nulldata23a(padapter, da, tid,
- wait_ms > 0 ? true : false);
-
- i++;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
- break;
-
- if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- msleep(wait_ms);
- } while((i < try_cnt) && ((ret == _FAIL)||(wait_ms == 0)));
-
- if (ret != _FAIL) {
- ret = _SUCCESS;
- goto exit;
- }
-
- if (try_cnt && wait_ms) {
- if (da)
- DBG_8723A("%s(%s): to %pM, ch:%u%s, %d/%d in %u ms\n",
- __func__, padapter->pnetdev->name,
- da, rtw_get_oper_ch23a(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- else
- DBG_8723A("%s(%s):, ch:%u%s, %d/%d in %u ms\n",
- __func__, padapter->pnetdev->name,
- rtw_get_oper_ch23a(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- }
-exit:
- return ret;
-}
-
-static int _issue_deauth(struct rtw_adapter *padapter, unsigned char *da,
- unsigned short reason, u8 wait_ack)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- struct ieee80211_mgmt *mgmt;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- int ret = _FAIL;
-
- /* DBG_8723A("%s to %pM\n", __func__, da); */
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
- pattrib->retry_ctrl = false;
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- mgmt = (struct ieee80211_mgmt *)(pmgntframe->buf_addr + TXDESC_OFFSET);
-
- mgmt->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH);
-
- ether_addr_copy(mgmt->da, da);
- ether_addr_copy(mgmt->sa, myid(&padapter->eeprompriv));
- ether_addr_copy(mgmt->bssid, get_my_bssid23a(&pmlmeinfo->network));
-
- mgmt->seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(pmlmeext->mgnt_seq));
- pmlmeext->mgnt_seq++;
-
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr) + 2;
-
- mgmt->u.deauth.reason_code = cpu_to_le16(reason);
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- if (wait_ack)
- ret = dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe);
- else {
- dump_mgntframe23a(padapter, pmgntframe);
- ret = _SUCCESS;
- }
-
-exit:
- return ret;
-}
-
-int issue_deauth23a(struct rtw_adapter *padapter, unsigned char *da,
- unsigned short reason)
-{
- DBG_8723A("%s to %pM\n", __func__, da);
- return _issue_deauth(padapter, da, reason, false);
-}
-
-static int issue_deauth_ex(struct rtw_adapter *padapter, u8 *da,
- unsigned short reason, int try_cnt, int wait_ms)
-{
- int ret;
- int i = 0;
- unsigned long start = jiffies;
-
- do {
- ret = _issue_deauth(padapter, da, reason,
- wait_ms >0 ? true : false);
-
- i++;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
- break;
-
- if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- msleep(wait_ms);
-
- } while((i < try_cnt) && ((ret == _FAIL)||(wait_ms == 0)));
-
- if (ret != _FAIL) {
- ret = _SUCCESS;
- goto exit;
- }
-
- if (try_cnt && wait_ms) {
- if (da)
- DBG_8723A("%s(%s): to %pM, ch:%u%s, %d/%d in %u ms\n",
- __func__, padapter->pnetdev->name,
- da, rtw_get_oper_ch23a(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- else
- DBG_8723A("%s(%s):, ch:%u%s, %d/%d in %u ms\n",
- __func__, padapter->pnetdev->name,
- rtw_get_oper_ch23a(padapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt,
- jiffies_to_msecs(jiffies - start));
- }
-exit:
- return ret;
-}
-
-void issue_action_spct_ch_switch23a(struct rtw_adapter *padapter,
- u8 *ra, u8 new_ch, u8 ch_offset)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct ieee80211_mgmt *mgmt;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- DBG_8723A("%s(%s): ra=%pM, ch:%u, offset:%u\n",
- __func__, padapter->pnetdev->name, ra, new_ch, ch_offset);
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- mgmt = (struct ieee80211_mgmt *)(pmgntframe->buf_addr + TXDESC_OFFSET);
-
- mgmt->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION);
-
- ether_addr_copy(mgmt->da, ra); /* RA */
- ether_addr_copy(mgmt->sa, myid(&padapter->eeprompriv)); /* TA */
- ether_addr_copy(mgmt->bssid, ra); /* DA = RA */
-
- mgmt->seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(pmlmeext->mgnt_seq));
- pmlmeext->mgnt_seq++;
-
- mgmt->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
- mgmt->u.action.u.chan_switch.action_code = WLAN_ACTION_SPCT_CHL_SWITCH;
-
- pframe = mgmt->u.action.u.chan_switch.variable;
- pattrib->pktlen = offsetof(struct ieee80211_mgmt,
- u.action.u.chan_switch.variable);
-
- pframe = rtw_set_ie23a_ch_switch (pframe, &pattrib->pktlen, 0,
- new_ch, 0);
- pframe = rtw_set_ie23a_secondary_ch_offset(pframe, &pattrib->pktlen,
- hal_ch_offset_to_secondary_ch_offset23a(ch_offset));
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe23a(padapter, pmgntframe);
-}
-
-void issue_action_BA23a(struct rtw_adapter *padapter,
- const unsigned char *raddr,
- unsigned char action, unsigned short status)
-{
- u16 start_seq;
- u16 BA_para_set;
- u16 BA_starting_seqctrl;
- u16 BA_para;
- int max_rx_ampdu_factor;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- struct ieee80211_mgmt *mgmt;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sta_info *psta;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct registry_priv *pregpriv = &padapter->registrypriv;
- u8 tendaAPMac[] = {0xC8, 0x3A, 0x35};
-
- DBG_8723A("%s, action =%d, status =%d\n", __func__, action, status);
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- mgmt = (struct ieee80211_mgmt *)(pmgntframe->buf_addr + TXDESC_OFFSET);
-
- mgmt->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION);
-
- ether_addr_copy(mgmt->da, raddr);
- ether_addr_copy(mgmt->sa, myid(&padapter->eeprompriv));
- ether_addr_copy(mgmt->bssid, get_my_bssid23a(&pmlmeinfo->network));
-
- mgmt->seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(pmlmeext->mgnt_seq));
- pmlmeext->mgnt_seq++;
-
- mgmt->u.action.category = WLAN_CATEGORY_BACK;
-
- pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr) + 1;
-
- switch (action) {
- case WLAN_ACTION_ADDBA_REQ:
- pattrib->pktlen += sizeof(mgmt->u.action.u.addba_req);
-
- mgmt->u.action.u.addba_req.action_code = action;
-
- do {
- pmlmeinfo->dialogToken++;
- } while (pmlmeinfo->dialogToken == 0);
-
- mgmt->u.action.u.addba_req.dialog_token =
- pmlmeinfo->dialogToken;
-
- if (rtl8723a_BT_coexist(padapter) &&
- rtl8723a_BT_using_antenna_1(padapter) &&
- (pmlmeinfo->assoc_AP_vendor != broadcomAP ||
- memcmp(raddr, tendaAPMac, 3))) {
- /* A-MSDU NOT Supported */
- BA_para_set = 0;
- /* immediate Block Ack */
- BA_para_set |= (1 << 1) &
- IEEE80211_ADDBA_PARAM_POLICY_MASK;
- /* TID */
- BA_para_set |= (status << 2) &
- IEEE80211_ADDBA_PARAM_TID_MASK;
- /* max buffer size is 8 MSDU */
- BA_para_set |= (8 << 6) &
- IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
- } else {
- /* immediate ack & 64 buffer size */
- BA_para_set = 0x1002 | ((status & 0xf) << 2);
- }
-
- put_unaligned_le16(BA_para_set,
- &mgmt->u.action.u.addba_req.capab);
-
- /* 5ms */
- put_unaligned_le16(5000, &mgmt->u.action.u.addba_req.timeout);
-
- psta = rtw_get_stainfo23a(pstapriv, raddr);
- if (psta) {
- int idx;
-
- idx = status & 0x07;
- start_seq =
- (psta->sta_xmitpriv.txseq_tid[idx] & 0xfff) + 1;
-
- DBG_8723A("BA_starting_seqctrl = %d for TID =%d\n",
- start_seq, idx);
-
- psta->BA_starting_seqctrl[idx] = start_seq;
-
- BA_starting_seqctrl = start_seq << 4;
- } else
- BA_starting_seqctrl = 0;
-
- put_unaligned_le16(BA_starting_seqctrl,
- &mgmt->u.action.u.addba_req.start_seq_num);
-
- break;
-
- case WLAN_ACTION_ADDBA_RESP:
- pattrib->pktlen += sizeof(mgmt->u.action.u.addba_resp);
-
- mgmt->u.action.u.addba_resp.action_code = action;
- mgmt->u.action.u.addba_resp.dialog_token =
- pmlmeinfo->ADDBA_req.dialog_token;
- put_unaligned_le16(status,
- &mgmt->u.action.u.addba_resp.status);
-
- GetHalDefVar8192CUsb(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR,
- &max_rx_ampdu_factor);
-
- BA_para = le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f;
- if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_64K)
- BA_para_set = BA_para | 0x1000; /* 64 buffer size */
- else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_32K)
- BA_para_set = BA_para | 0x0800; /* 32 buffer size */
- else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_16K)
- BA_para_set = BA_para | 0x0400; /* 16 buffer size */
- else if (max_rx_ampdu_factor == IEEE80211_HT_MAX_AMPDU_8K)
- BA_para_set = BA_para | 0x0200; /* 8 buffer size */
- else
- BA_para_set = BA_para | 0x1000; /* 64 buffer size */
-
- if (rtl8723a_BT_coexist(padapter) &&
- rtl8723a_BT_using_antenna_1(padapter) &&
- (pmlmeinfo->assoc_AP_vendor != broadcomAP ||
- memcmp(raddr, tendaAPMac, 3))) {
- /* max buffer size is 8 MSDU */
- BA_para_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
- BA_para_set |= (8 << 6) &
- IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
- }
-
- if (pregpriv->ampdu_amsdu == 0)/* disabled */
- BA_para_set &= ~BIT(0);
- else if (pregpriv->ampdu_amsdu == 1)/* enabled */
- BA_para_set |= BIT(0);
-
- put_unaligned_le16(BA_para_set,
- &mgmt->u.action.u.addba_resp.capab);
-
- mgmt->u.action.u.addba_resp.timeout
- = pmlmeinfo->ADDBA_req.BA_timeout_value;
-
- pattrib->pktlen += 8;
- break;
- case WLAN_ACTION_DELBA:
- pattrib->pktlen += sizeof(mgmt->u.action.u.delba);
-
- mgmt->u.action.u.delba.action_code = action;
- BA_para_set = (status & 0x1F) << 3;
- mgmt->u.action.u.delba.params = cpu_to_le16(BA_para_set);
- mgmt->u.action.u.delba.reason_code =
- cpu_to_le16(WLAN_REASON_QSTA_NOT_USE);
-
- pattrib->pktlen += 5;
- break;
- default:
- break;
- }
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe23a(padapter, pmgntframe);
-}
-
-int send_delba23a(struct rtw_adapter *padapter, u8 initiator, u8 *addr)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *psta = NULL;
- /* struct recv_reorder_ctrl *preorder_ctrl; */
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u16 tid;
-
- if ((pmlmeinfo->state&0x03) != MSR_AP)
- if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
- return _SUCCESS;
-
- psta = rtw_get_stainfo23a(pstapriv, addr);
- if (psta == NULL)
- return _SUCCESS;
-
- if (initiator == 0) { /* recipient */
- for (tid = 0; tid < MAXTID; tid++) {
- if (psta->recvreorder_ctrl[tid].enable == true) {
- DBG_8723A("rx agg disable tid(%d)\n", tid);
- issue_action_BA23a(padapter, addr, WLAN_ACTION_DELBA, (((tid <<1) |initiator)&0x1F));
- psta->recvreorder_ctrl[tid].enable = false;
- psta->recvreorder_ctrl[tid].indicate_seq = 0xffff;
- }
- }
- } else if (initiator == 1) { /* originator */
- for (tid = 0; tid < MAXTID; tid++) {
- if (psta->htpriv.agg_enable_bitmap & BIT(tid)) {
- DBG_8723A("tx agg disable tid(%d)\n", tid);
- issue_action_BA23a(padapter, addr, WLAN_ACTION_DELBA, (((tid <<1) |initiator)&0x1F));
- psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
- psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
-
- }
- }
- }
- return _SUCCESS;
-}
-
-int send_beacon23a(struct rtw_adapter *padapter)
-{
- bool bxmitok;
- int issue = 0;
- int poll = 0;
- unsigned long start = jiffies;
- unsigned int passing_time;
-
- rtl8723a_bcn_valid(padapter);
- do {
- issue_beacon23a(padapter, 100);
- issue++;
- do {
- yield();
- bxmitok = rtl8723a_get_bcn_valid(padapter);
- poll++;
- } while ((poll % 10) != 0 && !bxmitok &&
- !padapter->bSurpriseRemoved &&
- !padapter->bDriverStopped);
-
- } while (!bxmitok && issue<100 && !padapter->bSurpriseRemoved &&
- !padapter->bDriverStopped);
-
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
- return _FAIL;
-
- passing_time = jiffies_to_msecs(jiffies - start);
-
- if (!bxmitok) {
- DBG_8723A("%s fail! %u ms\n", __func__, passing_time);
- return _FAIL;
- } else {
-
- if (passing_time > 100 || issue > 3)
- DBG_8723A("%s success, issue:%d, poll:%d, %u ms\n",
- __func__, issue, poll, passing_time);
- return _SUCCESS;
- }
-}
-
-/****************************************************************************
-
-Following are some utitity functions for WiFi MLME
-
-*****************************************************************************/
-
-bool IsLegal5GChannel(struct rtw_adapter *Adapter, u8 channel)
-{
-
- int i = 0;
- u8 Channel_5G[45] = {36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
- 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
- 114, 116, 118, 120, 122, 124, 126, 128, 130, 132,
- 134, 136, 138, 140, 149, 151, 153, 155, 157, 159,
- 161, 163, 165};
- for (i = 0; i < sizeof(Channel_5G); i++)
- if (channel == Channel_5G[i])
- return true;
- return false;
-}
-
-static void rtw_site_survey(struct rtw_adapter *padapter)
-{
- unsigned char survey_channel = 0;
- enum rt_scan_type ScanType = SCAN_PASSIVE;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct rtw_ieee80211_channel *ch;
-
- if (pmlmeext->sitesurvey_res.channel_idx <
- pmlmeext->sitesurvey_res.ch_num) {
- ch = &pmlmeext->sitesurvey_res.ch[pmlmeext->sitesurvey_res.channel_idx];
- survey_channel = ch->hw_value;
- ScanType = (ch->flags & IEEE80211_CHAN_NO_IR) ?
- SCAN_PASSIVE : SCAN_ACTIVE;
- }
-
- if (survey_channel != 0) {
- /* PAUSE 4-AC Queue when site_survey */
- if (pmlmeext->sitesurvey_res.channel_idx == 0)
- set_channel_bwmode23a(padapter, survey_channel,
- HAL_PRIME_CHNL_OFFSET_DONT_CARE,
- HT_CHANNEL_WIDTH_20);
- else
- SelectChannel23a(padapter, survey_channel);
-
- if (ScanType == SCAN_ACTIVE) /* obey the channel plan setting... */
- {
- int i;
-
- for (i = 0;i<RTW_SSID_SCAN_AMOUNT;i++) {
- if (pmlmeext->sitesurvey_res.ssid[i].ssid_len) {
- /* todo: to issue two probe req??? */
- issue_probereq(padapter, &pmlmeext->sitesurvey_res.ssid[i], NULL);
- /* msleep(SURVEY_TO>>1); */
- issue_probereq(padapter, &pmlmeext->sitesurvey_res.ssid[i], NULL);
- }
- }
-
- if (pmlmeext->sitesurvey_res.scan_mode == SCAN_ACTIVE) {
- /* todo: to issue two probe req??? */
- issue_probereq(padapter, NULL, NULL);
- /* msleep(SURVEY_TO>>1); */
- issue_probereq(padapter, NULL, NULL);
- }
- }
-
- set_survey_timer(pmlmeext, pmlmeext->chan_scan_time);
- } else {
- /* channel number is 0 or this channel is not valid. */
- pmlmeext->sitesurvey_res.state = SCAN_COMPLETE;
-
- /* switch back to the original channel */
-
- set_channel_bwmode23a(padapter, pmlmeext->cur_channel,
- pmlmeext->cur_ch_offset,
- pmlmeext->cur_bwmode);
-
- /* flush 4-AC Queue after rtw_site_survey */
- /* val8 = 0; */
-
- /* config MSR */
- rtl8723a_set_media_status(padapter, pmlmeinfo->state & 0x3);
-
- /* restore RX GAIN */
- rtl8723a_set_initial_gain(padapter, 0xff);
- /* turn on dynamic functions */
- rtl8723a_odm_support_ability_restore(padapter);
-
- if (is_client_associated_to_ap23a(padapter) == true)
- issue_nulldata23a(padapter, NULL, 0, 3, 500);
-
- rtl8723a_mlme_sitesurvey(padapter, 0);
-
- report_surveydone_event23a(padapter);
-
- pmlmeext->chan_scan_time = SURVEY_TO;
- pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
- }
-}
-
-/* collect bss info from Beacon and Probe request/response frames. */
-static struct wlan_bssid_ex *collect_bss_info(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *bssid;
- const u8 *p;
- u8 *pie;
- unsigned int length;
- int i;
-
- length = skb->len;
-
- bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
- if (!bssid)
- return NULL;
-
- if (ieee80211_is_beacon(mgmt->frame_control)) {
- length -= offsetof(struct ieee80211_mgmt, u.beacon.variable);
- pie = mgmt->u.beacon.variable;
- bssid->reserved = 1;
- bssid->capability =
- get_unaligned_le16(&mgmt->u.beacon.capab_info);
- bssid->beacon_interval =
- get_unaligned_le16(&mgmt->u.beacon.beacon_int);
- bssid->tsf = get_unaligned_le64(&mgmt->u.beacon.timestamp);
- } else if (ieee80211_is_probe_req(mgmt->frame_control)) {
- length -= offsetof(struct ieee80211_mgmt, u.probe_req.variable);
- pie = mgmt->u.probe_req.variable;
- bssid->reserved = 2;
- bssid->capability = 0;
- bssid->beacon_interval =
- padapter->registrypriv.dev_network.beacon_interval;
- bssid->tsf = 0;
- } else if (ieee80211_is_probe_resp(mgmt->frame_control)) {
- length -=
- offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
- pie = mgmt->u.probe_resp.variable;
- bssid->reserved = 3;
- bssid->capability =
- get_unaligned_le16(&mgmt->u.probe_resp.capab_info);
- bssid->beacon_interval =
- get_unaligned_le16(&mgmt->u.probe_resp.beacon_int);
- bssid->tsf = get_unaligned_le64(&mgmt->u.probe_resp.timestamp);
- } else {
- length -= offsetof(struct ieee80211_mgmt, u.beacon.variable);
- pie = mgmt->u.beacon.variable;
- bssid->reserved = 0;
- bssid->capability =
- get_unaligned_le16(&mgmt->u.beacon.capab_info);
- bssid->beacon_interval =
- padapter->registrypriv.dev_network.beacon_interval;
- bssid->tsf = 0;
- }
-
- if (length > MAX_IE_SZ) {
- /* DBG_8723A("IE too long for survey event\n"); */
- kfree(bssid);
- return NULL;
- }
-
- bssid->Length = offsetof(struct wlan_bssid_ex, IEs) + length;
-
- /* below is to copy the information element */
- bssid->IELength = length;
- memcpy(bssid->IEs, pie, bssid->IELength);
-
- /* get the signal strength */
- /* in dBM.raw data */
- bssid->Rssi = precv_frame->attrib.phy_info.RecvSignalPower;
- bssid->SignalQuality =
- precv_frame->attrib.phy_info.SignalQuality;/* in percentage */
- bssid->SignalStrength =
- precv_frame->attrib.phy_info.SignalStrength;/* in percentage */
-
- /* checking SSID */
- p = cfg80211_find_ie(WLAN_EID_SSID, bssid->IEs, bssid->IELength);
-
- if (!p) {
- DBG_8723A("marc: cannot find SSID for survey event\n");
- goto fail;
- }
-
- if (p[1] > IEEE80211_MAX_SSID_LEN) {
- DBG_8723A("%s()-%d: IE too long (%d) for survey "
- "event\n", __func__, __LINE__, p[1]);
- goto fail;
- }
- memcpy(bssid->Ssid.ssid, p + 2, p[1]);
- bssid->Ssid.ssid_len = p[1];
-
- /* checking rate info... */
- i = 0;
- p = cfg80211_find_ie(WLAN_EID_SUPP_RATES, bssid->IEs, bssid->IELength);
- if (p) {
- if (p[1] > NDIS_802_11_LENGTH_RATES_EX) {
- DBG_8723A("%s()-%d: IE too long (%d) for survey "
- "event\n", __func__, __LINE__, p[1]);
- goto fail;
- }
- memcpy(bssid->SupportedRates, p + 2, p[1]);
- i = p[1];
- }
-
- p = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, bssid->IEs,
- bssid->IELength);
- if (p) {
- if (p[1] > (NDIS_802_11_LENGTH_RATES_EX-i)) {
- DBG_8723A("%s()-%d: IE too long (%d) for survey "
- "event\n", __func__, __LINE__, p[1]);
- goto fail;
- }
- memcpy(bssid->SupportedRates + i, p + 2, p[1]);
- }
-
- /* Checking for DSConfig */
- p = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bssid->IEs, bssid->IELength);
-
- bssid->DSConfig = 0;
-
- if (p) {
- bssid->DSConfig = p[2];
- } else {/* In 5G, some ap do not have DSSET IE */
- /* checking HT info for channel */
- p = cfg80211_find_ie(WLAN_EID_HT_OPERATION, bssid->IEs,
- bssid->IELength);
- if (p) {
- struct ieee80211_ht_operation *HT_info =
- (struct ieee80211_ht_operation *)(p + 2);
- bssid->DSConfig = HT_info->primary_chan;
- } else /* use current channel */
- bssid->DSConfig = rtw_get_oper_ch23a(padapter);
- }
-
- if (ieee80211_is_probe_req(mgmt->frame_control)) {
- /* FIXME */
- bssid->ifmode = NL80211_IFTYPE_STATION;
- ether_addr_copy(bssid->MacAddress, mgmt->sa);
- bssid->Privacy = 1;
- return bssid;
- }
-
- if (bssid->capability & WLAN_CAPABILITY_ESS) {
- bssid->ifmode = NL80211_IFTYPE_STATION;
- ether_addr_copy(bssid->MacAddress, mgmt->sa);
- } else {
- bssid->ifmode = NL80211_IFTYPE_ADHOC;
- ether_addr_copy(bssid->MacAddress, mgmt->bssid);
- }
-
- if (bssid->capability & WLAN_CAPABILITY_PRIVACY)
- bssid->Privacy = 1;
- else
- bssid->Privacy = 0;
-
- bssid->ATIMWindow = 0;
-
- /* 20/40 BSS Coexistence check */
- if (pregistrypriv->wifi_spec == 1 &&
- pmlmeinfo->bwmode_updated == false) {
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- p = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, bssid->IEs,
- bssid->IELength);
- if (p && p[1] > 0) {
- struct ieee80211_ht_cap *pHT_caps;
-
- pHT_caps = (struct ieee80211_ht_cap *)(p + 2);
-
- if (pHT_caps->cap_info &
- cpu_to_le16(IEEE80211_HT_CAP_40MHZ_INTOLERANT))
- pmlmepriv->num_FortyMHzIntolerant++;
- } else
- pmlmepriv->num_sta_no_ht++;
- }
-
-
- /* mark bss info receiving from nearby channel as SignalQuality 101 */
- if (bssid->DSConfig != rtw_get_oper_ch23a(padapter))
- bssid->SignalQuality = 101;
-
- return bssid;
-fail:
- kfree (bssid);
- return NULL;
-}
-
-static void start_create_ibss(struct rtw_adapter *padapter)
-{
- unsigned short caps;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
-
- pmlmeext->cur_channel = (u8)pnetwork->DSConfig;
- pmlmeinfo->bcn_interval = pnetwork->beacon_interval;
-
- /* update wireless mode */
- update_wireless_mode23a(padapter);
-
- /* update capability */
- caps = pnetwork->capability;
- update_capinfo23a(padapter, caps);
- if (caps & WLAN_CAPABILITY_IBSS) { /* adhoc master */
- rtl8723a_set_sec_cfg(padapter, 0xcf);
-
- /* switch channel */
- /* SelectChannel23a(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE); */
- set_channel_bwmode23a(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
-
- rtl8723a_SetBeaconRelatedRegisters(padapter);
-
- /* set msr to MSR_ADHOC */
- pmlmeinfo->state = MSR_ADHOC;
- rtl8723a_set_media_status(padapter, pmlmeinfo->state & 0x3);
-
- /* issue beacon */
- if (send_beacon23a(padapter) == _FAIL) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "issuing beacon frame fail....\n");
-
- report_join_res23a(padapter, -1);
- pmlmeinfo->state = MSR_NOLINK;
- } else {
- hw_var_set_bssid(padapter, padapter->registrypriv.dev_network.MacAddress);
- hw_var_set_mlme_join(padapter, 0);
-
- report_join_res23a(padapter, 1);
- pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
- }
- } else {
- DBG_8723A("%s: invalid cap:%x\n", __func__, caps);
- return;
- }
-}
-
-static void start_clnt_join(struct rtw_adapter *padapter)
-{
- unsigned short caps;
- u8 val8;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- int beacon_timeout;
-
- pmlmeext->cur_channel = (u8)pnetwork->DSConfig;
- pmlmeinfo->bcn_interval = pnetwork->beacon_interval;
-
- /* update wireless mode */
- update_wireless_mode23a(padapter);
-
- /* update capability */
- caps = pnetwork->capability;
- update_capinfo23a(padapter, caps);
- if (caps & WLAN_CAPABILITY_ESS) {
- /* switch channel */
- set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- rtl8723a_set_media_status(padapter, MSR_INFRA);
-
- val8 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_8021X) ?
- 0xcc: 0xcf;
-
- rtl8723a_set_sec_cfg(padapter, val8);
-
- /* switch channel */
- /* set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
-
- /* here wait for receiving the beacon to start auth */
- /* and enable a timer */
- beacon_timeout = decide_wait_for_beacon_timeout23a(pmlmeinfo->bcn_interval);
- set_link_timer(pmlmeext, beacon_timeout);
- mod_timer(&padapter->mlmepriv.assoc_timer, jiffies +
- msecs_to_jiffies((REAUTH_TO * REAUTH_LIMIT) + (REASSOC_TO*REASSOC_LIMIT) + beacon_timeout));
- pmlmeinfo->state = WIFI_FW_AUTH_NULL | MSR_INFRA;
- } else if (caps & WLAN_CAPABILITY_IBSS) { /* adhoc client */
- rtl8723a_set_media_status(padapter, MSR_ADHOC);
-
- rtl8723a_set_sec_cfg(padapter, 0xcf);
-
- /* switch channel */
- set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- rtl8723a_SetBeaconRelatedRegisters(padapter);
-
- pmlmeinfo->state = MSR_ADHOC;
-
- report_join_res23a(padapter, 1);
- } else {
- /* DBG_8723A("marc: invalid cap:%x\n", caps); */
- return;
- }
-}
-
-static void start_clnt_auth(struct rtw_adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- del_timer_sync(&pmlmeext->link_timer);
-
- pmlmeinfo->state &= (~WIFI_FW_AUTH_NULL);
- pmlmeinfo->state |= WIFI_FW_AUTH_STATE;
-
- pmlmeinfo->auth_seq = 1;
- pmlmeinfo->reauth_count = 0;
- pmlmeinfo->reassoc_count = 0;
- pmlmeinfo->link_count = 0;
- pmlmeext->retry = 0;
-
- /* Because of AP's not receiving deauth before */
- /* AP may: 1)not response auth or 2)deauth us after link is complete */
- /* issue deauth before issuing auth to deal with the situation */
- /* Commented by Albert 2012/07/21 */
- /* For the Win8 P2P connection, it will be hard to have a
- successful connection if this Wi-Fi doesn't connect to it. */
- issue_deauth23a(padapter, (&pmlmeinfo->network)->MacAddress,
- WLAN_REASON_DEAUTH_LEAVING);
-
- DBG_8723A_LEVEL(_drv_always_, "start auth\n");
- issue_auth(padapter, NULL, 0);
-
- set_link_timer(pmlmeext, REAUTH_TO);
-}
-
-static void start_clnt_assoc(struct rtw_adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- del_timer_sync(&pmlmeext->link_timer);
-
- pmlmeinfo->state &= (~(WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE));
- pmlmeinfo->state |= (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE);
-
- issue_assocreq(padapter);
-
- set_link_timer(pmlmeext, REASSOC_TO);
-}
-
-int receive_disconnect23a(struct rtw_adapter *padapter,
- unsigned char *MacAddr, unsigned short reason)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- /* check A3 */
- if (!ether_addr_equal(MacAddr, get_my_bssid23a(&pmlmeinfo->network)))
- return _SUCCESS;
-
- DBG_8723A("%s\n", __func__);
-
- if ((pmlmeinfo->state&0x03) == MSR_INFRA) {
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
- pmlmeinfo->state = MSR_NOLINK;
- report_del_sta_event23a(padapter, MacAddr, reason);
-
- } else if (pmlmeinfo->state & WIFI_FW_LINKING_STATE) {
- pmlmeinfo->state = MSR_NOLINK;
- report_join_res23a(padapter, -2);
- }
- }
-
- return _SUCCESS;
-}
-
-static void process_80211d(struct rtw_adapter *padapter,
- struct wlan_bssid_ex *bssid)
-{
- struct registry_priv *pregistrypriv;
- struct mlme_ext_priv *pmlmeext;
- struct rt_channel_info *chplan_new;
- u8 channel;
- u8 i;
-
- pregistrypriv = &padapter->registrypriv;
- pmlmeext = &padapter->mlmeextpriv;
-
- /* Adjust channel plan by AP Country IE */
- if (pregistrypriv->enable80211d &&
- !pmlmeext->update_channel_plan_by_ap_done) {
- const u8 *ie, *p;
- struct rt_channel_plan chplan_ap;
- struct rt_channel_info chplan_sta[MAX_CHANNEL_NUM];
- u8 country[4];
- u8 fcn; /* first channel number */
- u8 noc; /* number of channel */
- u8 j, k;
-
- ie = cfg80211_find_ie(WLAN_EID_COUNTRY, bssid->IEs,
- bssid->IELength);
- if (!ie || ie[1] < IEEE80211_COUNTRY_IE_MIN_LEN)
- return;
-
- p = ie + 2;
- ie += ie[1];
- ie += 2;
-
- memcpy(country, p, 3);
- country[3] = '\0';
-
- p += 3;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
- "%s: 802.11d country =%s\n", __func__, country);
-
- i = 0;
- while ((ie - p) >= 3) {
- fcn = *(p++);
- noc = *(p++);
- p++;
-
- for (j = 0; j < noc; j++) {
- if (fcn <= 14)
- channel = fcn + j; /* 2.4 GHz */
- else
- channel = fcn + j * 4; /* 5 GHz */
-
- chplan_ap.Channel[i++] = channel;
- }
- }
- chplan_ap.Len = i;
-
- memcpy(chplan_sta, pmlmeext->channel_set, sizeof(chplan_sta));
- memset(pmlmeext->channel_set, 0, sizeof(pmlmeext->channel_set));
- chplan_new = pmlmeext->channel_set;
-
- i = j = k = 0;
- if (pregistrypriv->wireless_mode & WIRELESS_11G) {
- do {
- if (i == MAX_CHANNEL_NUM ||
- chplan_sta[i].ChannelNum == 0 ||
- chplan_sta[i].ChannelNum > 14)
- break;
-
- if (j == chplan_ap.Len ||
- chplan_ap.Channel[j] > 14)
- break;
-
- if (chplan_sta[i].ChannelNum ==
- chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum =
- chplan_ap.Channel[j];
- chplan_new[k].ScanType = SCAN_ACTIVE;
- i++;
- j++;
- k++;
- } else if (chplan_sta[i].ChannelNum <
- chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum =
- chplan_sta[i].ChannelNum;
- chplan_new[k].ScanType =
- SCAN_PASSIVE;
- i++;
- k++;
- } else if (chplan_sta[i].ChannelNum >
- chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum =
- chplan_ap.Channel[j];
- chplan_new[k].ScanType =
- SCAN_ACTIVE;
- j++;
- k++;
- }
- } while (1);
-
- /* change AP not support channel to Passive scan */
- while (i < MAX_CHANNEL_NUM &&
- chplan_sta[i].ChannelNum != 0 &&
- chplan_sta[i].ChannelNum <= 14) {
- chplan_new[k].ChannelNum =
- chplan_sta[i].ChannelNum;
- chplan_new[k].ScanType = SCAN_PASSIVE;
- i++;
- k++;
- }
-
- /* add channel AP supported */
- while (j < chplan_ap.Len && chplan_ap.Channel[j] <= 14){
- chplan_new[k].ChannelNum = chplan_ap.Channel[j];
- chplan_new[k].ScanType = SCAN_ACTIVE;
- j++;
- k++;
- }
- } else {
- /* keep original STA 2.4G channel plan */
- while (i < MAX_CHANNEL_NUM &&
- chplan_sta[i].ChannelNum != 0 &&
- chplan_sta[i].ChannelNum <= 14) {
- chplan_new[k].ChannelNum =
- chplan_sta[i].ChannelNum;
- chplan_new[k].ScanType = chplan_sta[i].ScanType;
- i++;
- k++;
- }
-
- /* skip AP 2.4G channel plan */
- while (j < chplan_ap.Len && chplan_ap.Channel[j] <= 14)
- j++;
- }
-
- if (pregistrypriv->wireless_mode & WIRELESS_11A) {
- do {
- if (i == MAX_CHANNEL_NUM ||
- chplan_sta[i].ChannelNum == 0)
- break;
-
- if (j == chplan_ap.Len ||
- chplan_ap.Channel[j] == 0)
- break;
-
- if (chplan_sta[i].ChannelNum ==
- chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum =
- chplan_ap.Channel[j];
- chplan_new[k].ScanType = SCAN_ACTIVE;
- i++;
- j++;
- k++;
- } else if (chplan_sta[i].ChannelNum <
- chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum =
- chplan_sta[i].ChannelNum;
- chplan_new[k].ScanType = SCAN_PASSIVE;
- i++;
- k++;
- } else if (chplan_sta[i].ChannelNum >
- chplan_ap.Channel[j]) {
- chplan_new[k].ChannelNum =
- chplan_ap.Channel[j];
- chplan_new[k].ScanType = SCAN_ACTIVE;
- j++;
- k++;
- }
- } while (1);
-
- /* change AP not support channel to Passive scan */
- while (i < MAX_CHANNEL_NUM &&
- chplan_sta[i].ChannelNum != 0) {
- chplan_new[k].ChannelNum =
- chplan_sta[i].ChannelNum;
- chplan_new[k].ScanType = SCAN_PASSIVE;
- i++;
- k++;
- }
-
- /* add channel AP supported */
- while (j < chplan_ap.Len && chplan_ap.Channel[j] != 0) {
- chplan_new[k].ChannelNum = chplan_ap.Channel[j];
- chplan_new[k].ScanType = SCAN_ACTIVE;
- j++;
- k++;
- }
- } else {
- /* keep original STA 5G channel plan */
- while (i < MAX_CHANNEL_NUM &&
- chplan_sta[i].ChannelNum != 0) {
- chplan_new[k].ChannelNum =
- chplan_sta[i].ChannelNum;
- chplan_new[k].ScanType = chplan_sta[i].ScanType;
- i++;
- k++;
- }
- }
- pmlmeext->update_channel_plan_by_ap_done = 1;
- }
-
- /* If channel is used by AP, set channel scan type to active */
- channel = bssid->DSConfig;
- chplan_new = pmlmeext->channel_set;
- i = 0;
- while (i < MAX_CHANNEL_NUM && chplan_new[i].ChannelNum != 0) {
- if (chplan_new[i].ChannelNum == channel) {
- if (chplan_new[i].ScanType == SCAN_PASSIVE) {
- /* 5G Bnad 2, 3 (DFS) doesn't change
- to active scan */
- if (channel >= 52 && channel <= 144)
- break;
-
- chplan_new[i].ScanType = SCAN_ACTIVE;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
- "%s: change channel %d scan type from passive to active\n",
- __func__, channel);
- }
- break;
- }
- i++;
- }
-}
-
-/****************************************************************************
-
-Following are the functions to report events
-
-*****************************************************************************/
-
-void report_survey_event23a(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
- struct cmd_obj *pcmd_obj;
- u8 *pevtcmd;
- u32 cmdsz;
- struct survey_event *psurvey_evt;
- struct C2HEvent_Header *pc2h_evt_hdr;
- struct mlme_ext_priv *pmlmeext;
- struct cmd_priv *pcmdpriv;
-
- if (!padapter)
- return;
-
- pmlmeext = &padapter->mlmeextpriv;
- pcmdpriv = &padapter->cmdpriv;
-
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!pcmd_obj)
- return;
-
- cmdsz = sizeof(struct survey_event) + sizeof(struct C2HEvent_Header);
- pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (!pevtcmd) {
- kfree(pcmd_obj);
- return;
- }
-
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
- pcmd_obj->cmdsz = cmdsz;
- pcmd_obj->parmbuf = pevtcmd;
-
- pcmd_obj->rsp = NULL;
- pcmd_obj->rspsz = 0;
-
- pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
- pc2h_evt_hdr->len = sizeof(struct survey_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
-
- psurvey_evt = (struct survey_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
-
- psurvey_evt->bss = collect_bss_info(padapter, precv_frame);
- if (!psurvey_evt->bss) {
- kfree(pcmd_obj);
- kfree(pevtcmd);
- return;
- }
-
- process_80211d(padapter, psurvey_evt->bss);
-
- rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-
- pmlmeext->sitesurvey_res.bss_cnt++;
-}
-
-void report_surveydone_event23a(struct rtw_adapter *padapter)
-{
- struct cmd_obj *pcmd_obj;
- u8 *pevtcmd;
- u32 cmdsz;
- struct surveydone_event *psurveydone_evt;
- struct C2HEvent_Header *pc2h_evt_hdr;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!pcmd_obj)
- return;
-
- cmdsz = sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header);
- pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (!pevtcmd) {
- kfree(pcmd_obj);
- return;
- }
-
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
- pcmd_obj->cmdsz = cmdsz;
- pcmd_obj->parmbuf = pevtcmd;
-
- pcmd_obj->rsp = NULL;
- pcmd_obj->rspsz = 0;
-
- pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
- pc2h_evt_hdr->len = sizeof(struct surveydone_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
-
- psurveydone_evt = (struct surveydone_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
- psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
-
- DBG_8723A("survey done event(%x)\n", psurveydone_evt->bss_cnt);
-
- rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-}
-
-void report_join_res23a(struct rtw_adapter *padapter, int res)
-{
- struct cmd_obj *pcmd_obj;
- u8 *pevtcmd;
- u32 cmdsz;
- struct joinbss_event *pjoinbss_evt;
- struct C2HEvent_Header *pc2h_evt_hdr;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!pcmd_obj)
- return;
-
- cmdsz = sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header);
- pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (!pevtcmd) {
- kfree(pcmd_obj);
- return;
- }
-
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
- pcmd_obj->cmdsz = cmdsz;
- pcmd_obj->parmbuf = pevtcmd;
-
- pcmd_obj->rsp = NULL;
- pcmd_obj->rspsz = 0;
-
- pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
- pc2h_evt_hdr->len = sizeof(struct joinbss_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
-
- pjoinbss_evt = (struct joinbss_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
- memcpy((unsigned char *)&pjoinbss_evt->network.network,
- &pmlmeinfo->network, sizeof(struct wlan_bssid_ex));
- pjoinbss_evt->network.join_res = res;
-
- DBG_8723A("report_join_res23a(%d)\n", res);
-
- rtw_joinbss_event_prehandle23a(padapter, (u8 *)&pjoinbss_evt->network);
-
- rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-}
-
-void report_del_sta_event23a(struct rtw_adapter *padapter,
- unsigned char *MacAddr, unsigned short reason)
-{
- struct cmd_obj *pcmd_obj;
- u8 *pevtcmd;
- u32 cmdsz;
- struct sta_info *psta;
- int mac_id;
- struct stadel_event *pdel_sta_evt;
- struct C2HEvent_Header *pc2h_evt_hdr;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!pcmd_obj)
- return;
-
- cmdsz = sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header);
- pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (!pevtcmd) {
- kfree(pcmd_obj);
- return;
- }
-
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
- pcmd_obj->cmdsz = cmdsz;
- pcmd_obj->parmbuf = pevtcmd;
-
- pcmd_obj->rsp = NULL;
- pcmd_obj->rspsz = 0;
-
- pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
- pc2h_evt_hdr->len = sizeof(struct stadel_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
-
- pdel_sta_evt = (struct stadel_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
- ether_addr_copy((unsigned char *)&pdel_sta_evt->macaddr, MacAddr);
- memcpy((unsigned char *)pdel_sta_evt->rsvd, (unsigned char *)&reason,
- 2);
-
- psta = rtw_get_stainfo23a(&padapter->stapriv, MacAddr);
- if (psta)
- mac_id = (int)psta->mac_id;
- else
- mac_id = -1;
-
- pdel_sta_evt->mac_id = mac_id;
-
- DBG_8723A("report_del_sta_event23a: delete STA, mac_id =%d\n", mac_id);
-
- rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-}
-
-void report_add_sta_event23a(struct rtw_adapter *padapter,
- unsigned char *MacAddr, int cam_idx)
-{
- struct cmd_obj *pcmd_obj;
- u8 *pevtcmd;
- u32 cmdsz;
- struct stassoc_event *padd_sta_evt;
- struct C2HEvent_Header *pc2h_evt_hdr;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!pcmd_obj)
- return;
-
- cmdsz = sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header);
- pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (!pevtcmd) {
- kfree(pcmd_obj);
- return;
- }
-
- pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
- pcmd_obj->cmdsz = cmdsz;
- pcmd_obj->parmbuf = pevtcmd;
-
- pcmd_obj->rsp = NULL;
- pcmd_obj->rspsz = 0;
-
- pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
- pc2h_evt_hdr->len = sizeof(struct stassoc_event);
- pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
-
- padd_sta_evt = (struct stassoc_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
- ether_addr_copy((unsigned char *)&padd_sta_evt->macaddr, MacAddr);
- padd_sta_evt->cam_id = cam_idx;
-
- DBG_8723A("report_add_sta_event23a: add STA\n");
-
- rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-}
-
-/****************************************************************************
-
-Following are the event callback functions
-
-*****************************************************************************/
-
-/* for sta/adhoc mode */
-void update_sta_info23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- /* ERP */
- VCS_update23a(padapter, psta);
-
- /* HT */
- if (pmlmepriv->htpriv.ht_option) {
- psta->htpriv.ht_option = true;
-
- psta->htpriv.ampdu_enable = pmlmepriv->htpriv.ampdu_enable;
-
- if (support_short_GI23a(padapter, &pmlmeinfo->ht_cap))
- psta->htpriv.sgi = true;
-
- psta->qos_option = true;
-
- } else {
- psta->htpriv.ht_option = false;
-
- psta->htpriv.ampdu_enable = false;
-
- psta->htpriv.sgi = false;
- psta->qos_option = false;
-
- }
- psta->htpriv.bwmode = pmlmeext->cur_bwmode;
- psta->htpriv.ch_offset = pmlmeext->cur_ch_offset;
-
- psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
- psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
-
- /* QoS */
- if (pmlmepriv->qos_option)
- psta->qos_option = true;
-
- psta->state = _FW_LINKED;
-}
-
-void mlmeext_joinbss_event_callback23a(struct rtw_adapter *padapter,
- int join_res)
-{
- struct sta_info *psta, *psta_bmc;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (join_res < 0) {
- hw_var_set_mlme_join(padapter, 1);
- hw_var_set_bssid(padapter, null_addr);
-
- /* restore to initial setting. */
- update_tx_basic_rate23a(padapter,
- padapter->registrypriv.wireless_mode);
-
- goto exit_mlmeext_joinbss_event_callback23a;
- }
-
- if ((pmlmeinfo->state&0x03) == MSR_ADHOC) {
- /* for bc/mc */
- psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
- if (psta_bmc) {
- pmlmeinfo->FW_sta_info[psta_bmc->mac_id].psta = psta_bmc;
- update_bmc_sta_support_rate23a(padapter, psta_bmc->mac_id);
- Update_RA_Entry23a(padapter, psta_bmc);
- }
- }
-
- /* turn on dynamic functions */
- rtl8723a_odm_support_ability_set(padapter, DYNAMIC_ALL_FUNC_ENABLE);
-
- /* update IOT-releated issue */
- update_IOT_info23a(padapter);
-
- HalSetBrateCfg23a(padapter, cur_network->SupportedRates);
-
- /* BCN interval */
- rtl8723a_set_beacon_interval(padapter, pmlmeinfo->bcn_interval);
-
- /* update capability */
- update_capinfo23a(padapter, pmlmeinfo->capability);
-
- /* WMM, Update EDCA param */
- WMMOnAssocRsp23a(padapter);
-
- /* HT */
- HTOnAssocRsp23a(padapter);
-
- /* Set cur_channel&cur_bwmode&cur_ch_offset */
- set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- psta = rtw_get_stainfo23a(pstapriv, cur_network->MacAddress);
- if (psta) { /* only for infra. mode */
- pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
-
- /* DBG_8723A("set_sta_rate23a\n"); */
-
- psta->wireless_mode = pmlmeext->cur_wireless_mode;
-
- /* set per sta rate after updating HT cap. */
- set_sta_rate23a(padapter, psta);
- }
-
- hw_var_set_mlme_join(padapter, 2);
-
- if ((pmlmeinfo->state&0x03) == MSR_INFRA) {
- /* correcting TSF */
- rtw_correct_TSF(padapter);
-
- /* set_link_timer(pmlmeext, DISCONNECT_TO); */
- }
-
- rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_CONNECT, 0);
-
-exit_mlmeext_joinbss_event_callback23a:
- DBG_8723A("=>%s\n", __func__);
-}
-
-void mlmeext_sta_add_event_callback23a(struct rtw_adapter *padapter,
- struct sta_info *psta)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- DBG_8723A("%s\n", __func__);
-
- if ((pmlmeinfo->state & 0x03) == MSR_ADHOC) {
- /* adhoc master or sta_count>1 */
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
- /* nothing to do */
- } else { /* adhoc client */
- /* correcting TSF */
- rtw_correct_TSF(padapter);
-
- /* start beacon */
- if (send_beacon23a(padapter) != _SUCCESS) {
- pmlmeinfo->FW_sta_info[psta->mac_id].status = 0;
-
- pmlmeinfo->state ^= MSR_ADHOC;
-
- return;
- }
-
- pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
- }
- hw_var_set_mlme_join(padapter, 2);
- }
-
- pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
-
- /* rate radaptive */
- Update_RA_Entry23a(padapter, psta);
-
- /* update adhoc sta_info */
- update_sta_info23a(padapter, psta);
-}
-
-void mlmeext_sta_del_event_callback23a(struct rtw_adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (is_client_associated_to_ap23a(padapter) ||
- is_IBSS_empty23a(padapter)) {
- /* set_opmode_cmd(padapter, infra_client_with_mlme); */
-
- hw_var_set_mlme_disconnect(padapter);
- hw_var_set_bssid(padapter, null_addr);
-
- /* restore to initial setting. */
- update_tx_basic_rate23a(padapter,
- padapter->registrypriv.wireless_mode);
-
- /* switch to the 20M Hz mode after disconnect */
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- set_channel_bwmode23a(padapter, pmlmeext->cur_channel,
- pmlmeext->cur_ch_offset,
- pmlmeext->cur_bwmode);
-
- flush_all_cam_entry23a(padapter);
-
- pmlmeinfo->state = MSR_NOLINK;
-
- /* set MSR to no link state -> infra. mode */
- rtl8723a_set_media_status(padapter, MSR_INFRA);
-
- del_timer_sync(&pmlmeext->link_timer);
- }
-}
-
-static u8 chk_ap_is_alive(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- u8 ret = false;
-
- if (sta_rx_data_pkts(psta) == sta_last_rx_data_pkts(psta) &&
- sta_rx_beacon_pkts(psta) == sta_last_rx_beacon_pkts(psta) &&
- sta_rx_probersp_pkts(psta) == sta_last_rx_probersp_pkts(psta))
- ret = false;
- else
- ret = true;
-
- sta_update_last_rx_pkts(psta);
- return ret;
-}
-
-void linked_status_chk23a(struct rtw_adapter *padapter)
-{
- u32 i;
- struct sta_info *psta;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (is_client_associated_to_ap23a(padapter)) {
- /* linked infrastructure client mode */
-
- int tx_chk = _SUCCESS, rx_chk = _SUCCESS;
- int rx_chk_limit;
-
- rx_chk_limit = 4;
-
- psta = rtw_get_stainfo23a(pstapriv,
- pmlmeinfo->network.MacAddress);
- if (psta) {
- bool is_p2p_enable = false;
-
- if (chk_ap_is_alive(padapter, psta) == false)
- rx_chk = _FAIL;
-
- if (pxmitpriv->last_tx_pkts == pxmitpriv->tx_pkts)
- tx_chk = _FAIL;
-
- if (pmlmeext->active_keep_alive_check &&
- (rx_chk == _FAIL || tx_chk == _FAIL)) {
- u8 backup_oper_channel = 0;
-
- /* switch to correct channel of current
- network before issue keep-alive frames */
- if (rtw_get_oper_ch23a(padapter) !=
- pmlmeext->cur_channel) {
- backup_oper_channel =
- rtw_get_oper_ch23a(padapter);
- SelectChannel23a(padapter,
- pmlmeext->cur_channel);
- }
-
- if (rx_chk != _SUCCESS)
- issue_probereq_ex(padapter, &pmlmeinfo->network.Ssid, psta->hwaddr, 3, 1);
-
- if ((tx_chk != _SUCCESS &&
- pmlmeinfo->link_count++ == 0xf) ||
- rx_chk != _SUCCESS) {
- tx_chk = issue_nulldata23a(padapter,
- psta->hwaddr,
- 0, 3, 1);
- /* if tx acked and p2p disabled,
- set rx_chk _SUCCESS to reset retry
- count */
- if (tx_chk == _SUCCESS &&
- !is_p2p_enable)
- rx_chk = _SUCCESS;
- }
-
- /* back to the original operation channel */
- if (backup_oper_channel>0)
- SelectChannel23a(padapter,
- backup_oper_channel);
- } else {
- if (rx_chk != _SUCCESS) {
- if (pmlmeext->retry == 0) {
- issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
- issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
- issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
- }
- }
-
- if (tx_chk != _SUCCESS &&
- pmlmeinfo->link_count++ == 0xf)
- tx_chk = issue_nulldata23a(padapter,
- NULL, 0, 1,
- 0);
- }
-
- if (rx_chk == _FAIL) {
- pmlmeext->retry++;
- if (pmlmeext->retry > rx_chk_limit) {
- DBG_8723A_LEVEL(_drv_always_,
- "%s(%s): disconnect or "
- "roaming\n", __func__,
- padapter->pnetdev->name);
- receive_disconnect23a(padapter, pmlmeinfo->network.MacAddress,
- WLAN_REASON_EXPIRATION_CHK);
- return;
- }
- } else
- pmlmeext->retry = 0;
-
- if (tx_chk == _FAIL)
- pmlmeinfo->link_count &= 0xf;
- else {
- pxmitpriv->last_tx_pkts = pxmitpriv->tx_pkts;
- pmlmeinfo->link_count = 0;
- }
-
- }
- } else if (is_client_associated_to_ibss23a(padapter)) {
- /* linked IBSS mode */
- /* for each assoc list entry to check the rx pkt counter */
- for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
- if (pmlmeinfo->FW_sta_info[i].status == 1) {
- psta = pmlmeinfo->FW_sta_info[i].psta;
-
- if (!psta)
- continue;
-
- if (pmlmeinfo->FW_sta_info[i].rx_pkt ==
- sta_rx_pkts(psta)) {
-
- if (pmlmeinfo->FW_sta_info[i].retry<3) {
- pmlmeinfo->FW_sta_info[i].retry++;
- } else {
- pmlmeinfo->FW_sta_info[i].retry = 0;
- pmlmeinfo->FW_sta_info[i].status = 0;
- report_del_sta_event23a(padapter, psta->hwaddr,
- 65535/* indicate disconnect caused by no rx */
- );
- }
- } else {
- pmlmeinfo->FW_sta_info[i].retry = 0;
- pmlmeinfo->FW_sta_info[i].rx_pkt = (u32)sta_rx_pkts(psta);
- }
- }
- }
- /* set_link_timer(pmlmeext, DISCONNECT_TO); */
- }
-}
-
-static void survey_timer_hdl(unsigned long data)
-{
- struct rtw_adapter *padapter = (struct rtw_adapter *)data;
- struct cmd_obj *ph2c;
- struct sitesurvey_parm *psurveyPara;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- /* issue rtw_sitesurvey_cmd23a */
- if (pmlmeext->sitesurvey_res.state > SCAN_START) {
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS)
- pmlmeext->sitesurvey_res.channel_idx++;
-
- if (pmlmeext->scan_abort == true) {
- pmlmeext->sitesurvey_res.channel_idx =
- pmlmeext->sitesurvey_res.ch_num;
- DBG_8723A("%s idx:%d\n", __func__,
- pmlmeext->sitesurvey_res.channel_idx);
-
- pmlmeext->scan_abort = false;/* reset */
- }
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c)
- goto exit_survey_timer_hdl;
-
- psurveyPara = kzalloc(sizeof(struct sitesurvey_parm),
- GFP_ATOMIC);
- if (!psurveyPara) {
- kfree(ph2c);
- goto exit_survey_timer_hdl;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara,
- GEN_CMD_CODE(_SiteSurvey));
- rtw_enqueue_cmd23a(pcmdpriv, ph2c);
- }
-
-exit_survey_timer_hdl:
- return;
-}
-
-static void link_timer_hdl(unsigned long data)
-{
- struct rtw_adapter *padapter = (struct rtw_adapter *)data;
- /* static unsigned int rx_pkt = 0; */
- /* static u64 tx_cnt = 0; */
- /* struct xmit_priv *pxmitpriv = &padapter->xmitpriv; */
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- /* struct sta_priv *pstapriv = &padapter->stapriv; */
-
- if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
- DBG_8723A("link_timer_hdl:no beacon while connecting\n");
- pmlmeinfo->state = MSR_NOLINK;
- report_join_res23a(padapter, -3);
- } else if (pmlmeinfo->state & WIFI_FW_AUTH_STATE) {
- /* re-auth timer */
- if (++pmlmeinfo->reauth_count > REAUTH_LIMIT) {
- /* if (pmlmeinfo->auth_algo != dot11AuthAlgrthm_Auto) */
- /* */
- pmlmeinfo->state = 0;
- report_join_res23a(padapter, -1);
- return;
- /* */
- /* else */
- /* */
- /* pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared; */
- /* pmlmeinfo->reauth_count = 0; */
- /* */
- }
-
- DBG_8723A("link_timer_hdl: auth timeout and try again\n");
- pmlmeinfo->auth_seq = 1;
- issue_auth(padapter, NULL, 0);
- set_link_timer(pmlmeext, REAUTH_TO);
- } else if (pmlmeinfo->state & WIFI_FW_ASSOC_STATE) {
- /* re-assoc timer */
- if (++pmlmeinfo->reassoc_count > REASSOC_LIMIT) {
- pmlmeinfo->state = MSR_NOLINK;
- report_join_res23a(padapter, -2);
- return;
- }
-
- DBG_8723A("link_timer_hdl: assoc timeout and try again\n");
- issue_assocreq(padapter);
- set_link_timer(pmlmeext, REASSOC_TO);
- }
-}
-
-static void addba_timer_hdl(unsigned long data)
-{
- struct sta_info *psta = (struct sta_info *)data;
- struct ht_priv *phtpriv;
-
- if (!psta)
- return;
-
- phtpriv = &psta->htpriv;
-
- if (phtpriv->ht_option && phtpriv->ampdu_enable) {
- if (phtpriv->candidate_tid_bitmap)
- phtpriv->candidate_tid_bitmap = 0x0;
- }
-}
-
-void init_addba_retry_timer23a(struct sta_info *psta)
-{
- setup_timer(&psta->addba_retry_timer, addba_timer_hdl,
- (unsigned long)psta);
-}
-
-void init_mlme_ext_timer23a(struct rtw_adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- setup_timer(&pmlmeext->survey_timer, survey_timer_hdl,
- (unsigned long)padapter);
-
- setup_timer(&pmlmeext->link_timer, link_timer_hdl,
- (unsigned long)padapter);
-}
-
-int NULL_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- return H2C_SUCCESS;
-}
-
-int setopmode_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- enum nl80211_iftype type;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- const struct setopmode_parm *psetop = (struct setopmode_parm *)pbuf;
-
- switch (psetop->mode) {
- case NL80211_IFTYPE_P2P_GO:
- case NL80211_IFTYPE_AP:
- pmlmeinfo->state = MSR_AP;
- type = MSR_AP;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_STATION:
- /* clear state */
- pmlmeinfo->state &= ~(BIT(0)|BIT(1));
- /* set to STATION_STATE */
- pmlmeinfo->state |= MSR_INFRA;
- type = MSR_INFRA;
- break;
- case NL80211_IFTYPE_ADHOC:
- type = MSR_ADHOC;
- break;
- default:
- type = MSR_NOLINK;
- break;
- }
-
- hw_var_set_opmode(padapter, type);
- /* Set_NETYPE0_MSR(padapter, type); */
-
- return H2C_SUCCESS;
-}
-
-int createbss_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- const struct wlan_bssid_ex *pparm = (struct wlan_bssid_ex *)pbuf;
- /* u32 initialgain; */
-
- if (pparm->ifmode == NL80211_IFTYPE_AP ||
- pparm->ifmode == NL80211_IFTYPE_P2P_GO) {
-#ifdef CONFIG_8723AU_AP_MODE
- if (pmlmeinfo->state == MSR_AP) {
- /* todo: */
- return H2C_SUCCESS;
- }
-#endif
- }
-
- /* below is for ad-hoc master */
- if (pparm->ifmode == NL80211_IFTYPE_ADHOC) {
- rtw_joinbss_reset23a(padapter);
-
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- pmlmeinfo->ERP_enable = 0;
- pmlmeinfo->WMM_enable = 0;
- pmlmeinfo->HT_enable = 0;
- pmlmeinfo->HT_caps_enable = 0;
- pmlmeinfo->HT_info_enable = 0;
-
- /* disable dynamic functions, such as high power, DIG */
- rtl8723a_odm_support_ability_backup(padapter);
-
- rtl8723a_odm_support_ability_clr(padapter,
- DYNAMIC_FUNC_DISABLE);
-
- /* cancel link timer */
- del_timer_sync(&pmlmeext->link_timer);
-
- /* clear CAM */
- flush_all_cam_entry23a(padapter);
-
- if (pparm->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
- return H2C_PARAMETERS_ERROR;
-
- memcpy(pnetwork, pparm, sizeof(struct wlan_bssid_ex));
-
- start_create_ibss(padapter);
- }
-
- return H2C_SUCCESS;
-}
-
-int join_cmd_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
- const struct wlan_bssid_ex *pparm = (struct wlan_bssid_ex *)pbuf;
- struct ieee80211_ht_operation *pht_info;
- u32 i;
- u8 *p;
- /* u32 initialgain; */
- /* u32 acparm; */
-
- /* check already connecting to AP or not */
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
- if (pmlmeinfo->state & MSR_INFRA)
- issue_deauth_ex(padapter, pnetwork->MacAddress,
- WLAN_REASON_DEAUTH_LEAVING, 5, 100);
-
- pmlmeinfo->state = MSR_NOLINK;
-
- /* clear CAM */
- flush_all_cam_entry23a(padapter);
-
- del_timer_sync(&pmlmeext->link_timer);
-
- /* set MSR to nolink -> infra. mode */
- rtl8723a_set_media_status(padapter, MSR_INFRA);
-
- hw_var_set_mlme_disconnect(padapter);
- }
-
- rtw_joinbss_reset23a(padapter);
-
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- pmlmeinfo->ERP_enable = 0;
- pmlmeinfo->WMM_enable = 0;
- pmlmeinfo->HT_enable = 0;
- pmlmeinfo->HT_caps_enable = 0;
- pmlmeinfo->HT_info_enable = 0;
- pmlmeinfo->bwmode_updated = false;
- /* pmlmeinfo->assoc_AP_vendor = HT_IOT_PEER_MAX; */
-
- if (pparm->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
- return H2C_PARAMETERS_ERROR;
-
- memcpy(pnetwork, pbuf, sizeof(struct wlan_bssid_ex));
-
- /* Check AP vendor to move rtw_joinbss_cmd23a() */
- /* pmlmeinfo->assoc_AP_vendor = check_assoc_AP23a(pnetwork->IEs,
- pnetwork->IELength); */
-
- for (i = 0; i < pnetwork->IELength;) {
- p = pnetwork->IEs + i;
-
- switch (p[0]) {
- case WLAN_EID_VENDOR_SPECIFIC:/* Get WMM IE. */
- if (!memcmp(p + 2, WMM_OUI23A, 4))
- pmlmeinfo->WMM_enable = 1;
- break;
-
- case WLAN_EID_HT_CAPABILITY: /* Get HT Cap IE. */
- pmlmeinfo->HT_caps_enable = 1;
- break;
-
- case WLAN_EID_HT_OPERATION: /* Get HT Info IE. */
- pmlmeinfo->HT_info_enable = 1;
-
- /* spec case only for cisco's ap because cisco's ap
- * issue assoc rsp using mcs rate @40MHz or @20MHz */
- pht_info = (struct ieee80211_ht_operation *)(p + 2);
-
- if (pregpriv->cbw40_enable &&
- (pht_info->ht_param &
- IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
- /* switch to the 40M Hz mode according to AP */
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
- switch (pht_info->ht_param &
- IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- pmlmeext->cur_ch_offset =
- HAL_PRIME_CHNL_OFFSET_LOWER;
- break;
-
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- pmlmeext->cur_ch_offset =
- HAL_PRIME_CHNL_OFFSET_UPPER;
- break;
-
- default:
- pmlmeext->cur_ch_offset =
- HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- break;
- }
-
- DBG_8723A("set ch/bw before connected\n");
- }
- break;
-
- default:
- break;
- }
-
- i += (p[1] + 2);
- }
-
- hw_var_set_bssid(padapter, pmlmeinfo->network.MacAddress);
- hw_var_set_mlme_join(padapter, 0);
-
- /* cancel link timer */
- del_timer_sync(&pmlmeext->link_timer);
-
- start_clnt_join(padapter);
-
- return H2C_SUCCESS;
-}
-
-int disconnect_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- const struct disconnect_parm *param = (struct disconnect_parm *)pbuf;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
-
- if (is_client_associated_to_ap23a(padapter)) {
- issue_deauth_ex(padapter, pnetwork->MacAddress,
- WLAN_REASON_DEAUTH_LEAVING,
- param->deauth_timeout_ms/100, 100);
- }
-
- /* set_opmode_cmd(padapter, infra_client_with_mlme); */
-
- /* pmlmeinfo->state = MSR_NOLINK; */
-
- hw_var_set_mlme_disconnect(padapter);
- hw_var_set_bssid(padapter, null_addr);
-
- /* restore to initial setting. */
- update_tx_basic_rate23a(padapter, padapter->registrypriv.wireless_mode);
-
- if ((pmlmeinfo->state & 0x03) == MSR_ADHOC ||
- (pmlmeinfo->state & 0x03) == MSR_AP)
- rtl8723a_set_bcn_func(padapter, 0); /* Stop BCN */
-
- /* set MSR to no link state -> infra. mode */
- rtl8723a_set_media_status(padapter, MSR_INFRA);
-
- pmlmeinfo->state = MSR_NOLINK;
-
- /* switch to the 20M Hz mode after disconnect */
- pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
- pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- set_channel_bwmode23a(padapter, pmlmeext->cur_channel,
- pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- flush_all_cam_entry23a(padapter);
-
- del_timer_sync(&pmlmeext->link_timer);
-
- rtw_free_uc_swdec_pending_queue23a(padapter);
-
- return H2C_SUCCESS;
-}
-
-static int
-rtw_scan_ch_decision(struct rtw_adapter *padapter,
- struct rtw_ieee80211_channel *out, u32 out_num,
- const struct rtw_ieee80211_channel *in, u32 in_num)
-{
- int i, j;
- int scan_ch_num = 0;
- int set_idx;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- /* clear out first */
- memset(out, 0, sizeof(struct rtw_ieee80211_channel)*out_num);
-
- /* acquire channels from in */
- j = 0;
- for (i = 0;i<in_num;i++) {
- if (in[i].hw_value &&
- !(in[i].flags & IEEE80211_CHAN_DISABLED) &&
- (set_idx = rtw_ch_set_search_ch23a(pmlmeext->channel_set,
- in[i].hw_value)) >= 0) {
- memcpy(&out[j], &in[i],
- sizeof(struct rtw_ieee80211_channel));
-
- if (pmlmeext->channel_set[set_idx].ScanType ==
- SCAN_PASSIVE)
- out[j].flags &= IEEE80211_CHAN_NO_IR;
-
- j++;
- }
- if (j>= out_num)
- break;
- }
-
- /* if out is empty, use channel_set as default */
- if (j == 0) {
- for (i = 0;i<pmlmeext->max_chan_nums;i++) {
- out[i].hw_value = pmlmeext->channel_set[i].ChannelNum;
-
- if (pmlmeext->channel_set[i].ScanType == SCAN_PASSIVE)
- out[i].flags &= IEEE80211_CHAN_NO_IR;
-
- j++;
- }
- }
-
- if (padapter->setband == GHZ_24) { /* 2.4G */
- for (i = 0; i < j ; i++) {
- if (out[i].hw_value > 35)
- memset(&out[i], 0,
- sizeof(struct rtw_ieee80211_channel));
- else
- scan_ch_num++;
- }
- j = scan_ch_num;
- } else if (padapter->setband == GHZ_50) { /* 5G */
- for (i = 0; i < j ; i++) {
- if (out[i].hw_value > 35) {
- memcpy(&out[scan_ch_num++], &out[i],
- sizeof(struct rtw_ieee80211_channel));
- }
- }
- j = scan_ch_num;
- } else
- {}
-
- return j;
-}
-
-int sitesurvey_cmd_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- const struct sitesurvey_parm *pparm = (struct sitesurvey_parm *)pbuf;
- u8 bdelayscan = false;
- u32 initialgain;
- u32 i;
-
- if (pmlmeext->sitesurvey_res.state == SCAN_DISABLE) {
- pmlmeext->sitesurvey_res.state = SCAN_START;
- pmlmeext->sitesurvey_res.bss_cnt = 0;
- pmlmeext->sitesurvey_res.channel_idx = 0;
-
- for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
- if (pparm->ssid[i].ssid_len) {
- memcpy(pmlmeext->sitesurvey_res.ssid[i].ssid,
- pparm->ssid[i].ssid,
- IEEE80211_MAX_SSID_LEN);
- pmlmeext->sitesurvey_res.ssid[i].ssid_len =
- pparm->ssid[i].ssid_len;
- } else {
- pmlmeext->sitesurvey_res.ssid[i].ssid_len = 0;
- }
- }
-
- pmlmeext->sitesurvey_res.ch_num =
- rtw_scan_ch_decision(padapter,
- pmlmeext->sitesurvey_res.ch,
- RTW_CHANNEL_SCAN_AMOUNT,
- pparm->ch, pparm->ch_num);
-
- pmlmeext->sitesurvey_res.scan_mode = pparm->scan_mode;
-
- /* issue null data if associating to the AP */
- if (is_client_associated_to_ap23a(padapter)) {
- pmlmeext->sitesurvey_res.state = SCAN_TXNULL;
-
- /* switch to correct channel of current network
- before issue keep-alive frames */
- if (rtw_get_oper_ch23a(padapter) !=
- pmlmeext->cur_channel)
- SelectChannel23a(padapter,
- pmlmeext->cur_channel);
-
- issue_nulldata23a(padapter, NULL, 1, 3, 500);
-
- bdelayscan = true;
- }
-
- if (bdelayscan) {
- /* delay 50ms to protect nulldata(1). */
- set_survey_timer(pmlmeext, 50);
- return H2C_SUCCESS;
- }
- }
-
- if (pmlmeext->sitesurvey_res.state == SCAN_START ||
- pmlmeext->sitesurvey_res.state == SCAN_TXNULL) {
- /* disable dynamic functions, such as high power, DIG */
- rtl8723a_odm_support_ability_backup(padapter);
- rtl8723a_odm_support_ability_clr(padapter,
- DYNAMIC_FUNC_DISABLE);
-
- /* config the initial gain under scanning, need to
- write the BB registers */
- if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled == true)
- initialgain = 0x30;
- else
- initialgain = 0x1E;
-
- rtl8723a_set_initial_gain(padapter, initialgain);
-
- /* set MSR to no link state */
- rtl8723a_set_media_status(padapter, MSR_NOLINK);
-
- rtl8723a_mlme_sitesurvey(padapter, 1);
-
- pmlmeext->sitesurvey_res.state = SCAN_PROCESS;
- }
-
- rtw_site_survey(padapter);
-
- return H2C_SUCCESS;
-}
-
-int setauth_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- const struct setauth_parm *pparm = (struct setauth_parm *)pbuf;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pparm->mode < 4)
- pmlmeinfo->auth_algo = pparm->mode;
-
- return H2C_SUCCESS;
-}
-
-int setkey_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- unsigned short ctrl;
- const struct setkey_parm *pparm = (struct setkey_parm *)pbuf;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
- /* main tx key for wep. */
- if (pparm->set_tx)
- pmlmeinfo->key_index = pparm->keyid;
-
- /* write cam */
- ctrl = BIT(15) | (pparm->algorithm) << 2 | pparm->keyid;
-
- DBG_8723A_LEVEL(_drv_always_, "set group key to hw: alg:%d(WEP40-1 "
- "WEP104-5 TKIP-2 AES-4) keyid:%d\n",
- pparm->algorithm, pparm->keyid);
- rtl8723a_cam_write(padapter, pparm->keyid, ctrl, null_sta, pparm->key);
-
- /* allow multicast packets to driver */
- rtl8723a_on_rcr_am(padapter);
-
- return H2C_SUCCESS;
-}
-
-int set_stakey_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- u16 ctrl = 0;
- u8 cam_id;/* cam_entry */
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- const struct set_stakey_parm *pparm = (struct set_stakey_parm *)pbuf;
-
- /* cam_entry: */
- /* 0~3 for default key */
-
- /* for concurrent mode (ap+sta): */
- /* default key is disable, using sw encrypt/decrypt */
- /* cam_entry = 4 for sta mode (macid = 0) */
- /* cam_entry(macid+3) = 5 ~ N for ap mode (aid = 1~N, macid = 2 ~N) */
-
- /* for concurrent mode (sta+sta): */
- /* default key is disable, using sw encrypt/decrypt */
- /* cam_entry = 4 mapping to macid = 0 */
- /* cam_entry = 5 mapping to macid = 2 */
-
- cam_id = 4;
-
- DBG_8723A_LEVEL(_drv_always_, "set pairwise key to hw: alg:%d(WEP40-1 "
- "WEP104-5 TKIP-2 AES-4) camid:%d\n",
- pparm->algorithm, cam_id);
- if ((pmlmeinfo->state & 0x03) == MSR_AP) {
- struct sta_info *psta;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (pparm->algorithm == 0) { /* clear cam entry */
- clear_cam_entry23a(padapter, pparm->id);
- return H2C_SUCCESS_RSP;
- }
-
- psta = rtw_get_stainfo23a(pstapriv, pparm->addr);
- if (psta) {
- ctrl = BIT(15) | (pparm->algorithm << 2);
-
- DBG_8723A("r871x_set_stakey_hdl23a(): enc_algorithm "
- "=%d\n", pparm->algorithm);
-
- if (psta->mac_id < 1 || psta->mac_id > (NUM_STA - 4)) {
- DBG_8723A("r871x_set_stakey_hdl23a():set_stakey"
- " failed, mac_id(aid) =%d\n",
- psta->mac_id);
- return H2C_REJECTED;
- }
-
- /* 0~3 for default key, cmd_id = macid + 3,
- macid = aid+1; */
- cam_id = psta->mac_id + 3;
-
- DBG_8723A("Write CAM, mac_addr =%pM, "
- "cam_entry =%d\n", pparm->addr, cam_id);
-
- rtl8723a_cam_write(padapter, cam_id, ctrl,
- pparm->addr, pparm->key);
-
- return H2C_SUCCESS_RSP;
- } else {
- DBG_8723A("r871x_set_stakey_hdl23a(): sta has been "
- "free\n");
- return H2C_REJECTED;
- }
- }
-
- /* below for sta mode */
-
- if (pparm->algorithm == 0) { /* clear cam entry */
- clear_cam_entry23a(padapter, pparm->id);
- return H2C_SUCCESS;
- }
-
- ctrl = BIT(15) | (pparm->algorithm << 2);
-
- rtl8723a_cam_write(padapter, cam_id, ctrl, pparm->addr, pparm->key);
-
- pmlmeinfo->enc_algo = pparm->algorithm;
-
- return H2C_SUCCESS;
-}
-
-int add_ba_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- const struct addBaReq_parm *pparm = (struct addBaReq_parm *)pbuf;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct sta_info *psta;
-
- psta = rtw_get_stainfo23a(&padapter->stapriv, pparm->addr);
-
- if (!psta)
- return H2C_SUCCESS;
-
- if (((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) &&
- pmlmeinfo->HT_enable) ||
- (pmlmeinfo->state & 0x03) == MSR_AP) {
- issue_action_BA23a(padapter, pparm->addr,
- WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid);
- mod_timer(&psta->addba_retry_timer,
- jiffies + msecs_to_jiffies(ADDBA_TO));
- } else
- psta->htpriv.candidate_tid_bitmap &= ~BIT(pparm->tid);
-
- return H2C_SUCCESS;
-}
-
-int set_tx_beacon_cmd23a(struct rtw_adapter *padapter)
-{
- struct cmd_obj *ph2c;
- struct Tx_Beacon_param *ptxBeacon_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u8 res = _SUCCESS;
- int len_diff = 0;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- ptxBeacon_parm = kzalloc(sizeof(struct Tx_Beacon_param), GFP_ATOMIC);
- if (!ptxBeacon_parm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- memcpy(&ptxBeacon_parm->network, &pmlmeinfo->network,
- sizeof(struct wlan_bssid_ex));
-
- len_diff = update_hidden_ssid(ptxBeacon_parm->network.IEs,
- ptxBeacon_parm->network.IELength,
- pmlmeinfo->hidden_ssid_mode);
- ptxBeacon_parm->network.IELength += len_diff;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, ptxBeacon_parm,
- GEN_CMD_CODE(_TX_Beacon));
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
-
-exit:
- return res;
-}
-
-int mlme_evt_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- u8 evt_code, evt_seq;
- u16 evt_sz;
- const struct C2HEvent_Header *c2h;
- void (*event_callback)(struct rtw_adapter *dev, const u8 *pbuf);
-
- c2h = (struct C2HEvent_Header *)pbuf;
- evt_sz = c2h->len;
- evt_seq = c2h->seq;
- evt_code = c2h->ID;
-
- /* checking if event code is valid */
- if (evt_code >= MAX_C2HEVT) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "Event Code(%d) mismatch!\n", evt_code);
- goto _abort_event_;
- }
-
- /* checking if event size match the event parm size */
- if (wlanevents[evt_code].parmsize != 0 &&
- wlanevents[evt_code].parmsize != evt_sz) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
- "Event(%d) Parm Size mismatch (%d vs %d)!\n",
- evt_code, wlanevents[evt_code].parmsize, evt_sz);
- goto _abort_event_;
- }
-
- event_callback = wlanevents[evt_code].event_callback;
- event_callback(padapter, pbuf + sizeof(struct C2HEvent_Header));
-
-_abort_event_:
-
- return H2C_SUCCESS;
-}
-
-int h2c_msg_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- if (!pbuf)
- return H2C_PARAMETERS_ERROR;
-
- return H2C_SUCCESS;
-}
-
-int tx_beacon_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- if (send_beacon23a(padapter) == _FAIL) {
- DBG_8723A("issue_beacon23a, fail!\n");
- return H2C_PARAMETERS_ERROR;
- }
-#ifdef CONFIG_8723AU_AP_MODE
- else { /* tx bc/mc frames after update TIM */
- struct sta_info *psta_bmc;
- struct list_head *phead;
- struct xmit_frame *pxmitframe, *ptmp;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- /* for BC/MC Frames */
- psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
- if (!psta_bmc)
- return H2C_SUCCESS;
-
- if (pstapriv->tim_bitmap & BIT(0) && psta_bmc->sleepq_len > 0) {
- msleep(10);/* 10ms, ATIM(HIQ) Windows */
- /* spin_lock_bh(&psta_bmc->sleep_q.lock); */
- spin_lock_bh(&pxmitpriv->lock);
-
- phead = get_list_head(&psta_bmc->sleep_q);
-
- list_for_each_entry_safe(pxmitframe, ptmp,
- phead, list) {
-
- list_del_init(&pxmitframe->list);
-
- psta_bmc->sleepq_len--;
- if (psta_bmc->sleepq_len>0)
- pxmitframe->attrib.mdata = 1;
- else
- pxmitframe->attrib.mdata = 0;
-
- pxmitframe->attrib.triggered = 1;
-
- pxmitframe->attrib.qsel = 0x11;/* HIQ */
-
- rtl8723au_hal_xmitframe_enqueue(padapter,
- pxmitframe);
- }
- /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
- spin_unlock_bh(&pxmitpriv->lock);
- }
- }
-#endif
-
- return H2C_SUCCESS;
-}
-
-int set_ch_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- const struct set_ch_parm *set_ch_parm;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (!pbuf)
- return H2C_PARAMETERS_ERROR;
-
- set_ch_parm = (struct set_ch_parm *)pbuf;
-
- DBG_8723A("%s(%s): ch:%u, bw:%u, ch_offset:%u\n", __func__,
- padapter->pnetdev->name, set_ch_parm->ch,
- set_ch_parm->bw, set_ch_parm->ch_offset);
-
- pmlmeext->cur_channel = set_ch_parm->ch;
- pmlmeext->cur_ch_offset = set_ch_parm->ch_offset;
- pmlmeext->cur_bwmode = set_ch_parm->bw;
-
- set_channel_bwmode23a(padapter, set_ch_parm->ch,
- set_ch_parm->ch_offset, set_ch_parm->bw);
-
- return H2C_SUCCESS;
-}
-
-int set_chplan_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- const struct SetChannelPlan_param *setChannelPlan_param;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (!pbuf)
- return H2C_PARAMETERS_ERROR;
-
- setChannelPlan_param = (struct SetChannelPlan_param *)pbuf;
-
- pmlmeext->max_chan_nums =
- init_channel_set(padapter, setChannelPlan_param->channel_plan,
- pmlmeext->channel_set);
- init_channel_list(padapter, pmlmeext->channel_set,
- pmlmeext->max_chan_nums, &pmlmeext->channel_list);
-
- return H2C_SUCCESS;
-}
-
-int led_blink_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- struct LedBlink_param *ledBlink_param;
-
- if (!pbuf)
- return H2C_PARAMETERS_ERROR;
-
- ledBlink_param = (struct LedBlink_param *)pbuf;
-
- return H2C_SUCCESS;
-}
-
-int set_csa_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- return H2C_REJECTED;
-}
-
-/* TDLS_WRCR : write RCR DATA BIT */
-/* TDLS_SD_PTI : issue peer traffic indication */
-/* TDLS_CS_OFF : go back to the channel linked with AP,
- terminating channel switch procedure */
-/* TDLS_INIT_CH_SEN : init channel sensing, receive all data and
- mgnt frame */
-/* TDLS_DONE_CH_SEN : channel sensing and report candidate channel */
-/* TDLS_OFF_CH : first time set channel to off channel */
-/* TDLS_BASE_CH : go back tp the channel linked with AP when set
- base channel as target channel */
-/* TDLS_P_OFF_CH : periodically go to off channel */
-/* TDLS_P_BASE_CH : periodically go back to base channel */
-/* TDLS_RS_RCR : restore RCR */
-/* TDLS_CKALV_PH1 : check alive timer phase1 */
-/* TDLS_CKALV_PH2 : check alive timer phase2 */
-/* TDLS_FREE_STA : free tdls sta */
-int tdls_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
-{
- return H2C_REJECTED;
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_pwrctrl.c b/drivers/staging/rtl8723au/core/rtw_pwrctrl.c
deleted file mode 100644
index 7488a104935b..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_pwrctrl.c
+++ /dev/null
@@ -1,606 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTW_PWRCTRL_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <osdep_intf.h>
-#include <rtl8723a_cmd.h>
-#include <rtw_sreset.h>
-
-#include <rtl8723a_bt_intf.h>
-#include <usb_ops_linux.h>
-
-void ips_enter23a(struct rtw_adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- down(&pwrpriv->lock);
-
- pwrpriv->bips_processing = true;
-
- /* syn ips_mode with request */
- pwrpriv->ips_mode = pwrpriv->ips_mode_req;
-
- pwrpriv->ips_enter23a_cnts++;
- DBG_8723A("==>ips_enter23a cnts:%d\n", pwrpriv->ips_enter23a_cnts);
- rtl8723a_BT_disable_coexist(padapter);
-
- if (pwrpriv->change_rfpwrstate == rf_off) {
- pwrpriv->bpower_saving = true;
- DBG_8723A_LEVEL(_drv_always_, "nolinked power save enter\n");
-
- if (pwrpriv->ips_mode == IPS_LEVEL_2)
- pwrpriv->bkeepfwalive = true;
-
- rtw_ips_pwr_down23a(padapter);
- pwrpriv->rf_pwrstate = rf_off;
- }
- pwrpriv->bips_processing = false;
-
- up(&pwrpriv->lock);
-}
-
-int ips_leave23a(struct rtw_adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int result = _SUCCESS;
- int keyid;
-
- down(&pwrpriv->lock);
-
- if (pwrpriv->rf_pwrstate == rf_off && !pwrpriv->bips_processing) {
- pwrpriv->bips_processing = true;
- pwrpriv->change_rfpwrstate = rf_on;
- pwrpriv->ips_leave23a_cnts++;
- DBG_8723A("==>ips_leave23a cnts:%d\n",
- pwrpriv->ips_leave23a_cnts);
-
- result = rtw_ips_pwr_up23a(padapter);
- if (result == _SUCCESS)
- pwrpriv->rf_pwrstate = rf_on;
-
- DBG_8723A_LEVEL(_drv_always_, "nolinked power save leave\n");
-
- if (psecuritypriv->dot11PrivacyAlgrthm ==
- WLAN_CIPHER_SUITE_WEP40 ||
- psecuritypriv->dot11PrivacyAlgrthm ==
- WLAN_CIPHER_SUITE_WEP104) {
- DBG_8723A("==>%s, channel(%d), processing(%x)\n",
- __func__, padapter->mlmeextpriv.cur_channel,
- pwrpriv->bips_processing);
- set_channel_bwmode23a(padapter,
- padapter->mlmeextpriv.cur_channel,
- HAL_PRIME_CHNL_OFFSET_DONT_CARE,
- HT_CHANNEL_WIDTH_20);
- for (keyid = 0; keyid < 4; keyid++) {
- if (pmlmepriv->key_mask & BIT(keyid)) {
- if (keyid ==
- psecuritypriv->dot11PrivacyKeyIndex)
- result = rtw_set_key23a(padapter, psecuritypriv, keyid, 1);
- else
- result = rtw_set_key23a(padapter, psecuritypriv, keyid, 0);
- }
- }
- }
-
- DBG_8723A("==> ips_leave23a.....LED(0x%08x)...\n",
- rtl8723au_read32(padapter, 0x4c));
- pwrpriv->bips_processing = false;
-
- pwrpriv->bkeepfwalive = false;
- pwrpriv->bpower_saving = false;
- }
-
- up(&pwrpriv->lock);
-
- return result;
-}
-
-
-static bool rtw_pwr_unassociated_idle(struct rtw_adapter *adapter)
-{
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct xmit_priv *pxmit_priv = &adapter->xmitpriv;
-
- bool ret = false;
-
- if (time_after_eq(adapter->pwrctrlpriv.ips_deny_time, jiffies))
- goto exit;
-
- if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR) ||
- check_fwstate(pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS) ||
- check_fwstate(pmlmepriv, WIFI_AP_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE)){
- goto exit;
- }
-
- if (pxmit_priv->free_xmitbuf_cnt != NR_XMITBUFF ||
- pxmit_priv->free_xmit_extbuf_cnt != NR_XMIT_EXTBUFF) {
- DBG_8723A_LEVEL(_drv_always_,
- "There are some pkts to transmit\n");
- DBG_8723A_LEVEL(_drv_info_, "free_xmitbuf_cnt: %d, "
- "free_xmit_extbuf_cnt: %d\n",
- pxmit_priv->free_xmitbuf_cnt,
- pxmit_priv->free_xmit_extbuf_cnt);
- goto exit;
- }
-
- ret = true;
-
-exit:
- return ret;
-}
-
-void rtw_ps_processor23a(struct rtw_adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- pwrpriv->ps_processing = true;
-
- if (pwrpriv->bips_processing == true)
- goto exit;
-
- if (pwrpriv->ips_mode_req == IPS_NONE)
- goto exit;
-
- if (!rtw_pwr_unassociated_idle(padapter))
- goto exit;
-
- if (pwrpriv->rf_pwrstate == rf_on &&
- (pwrpriv->pwr_state_check_cnts % 4) == 0) {
- DBG_8723A("==>%s .fw_state(%x)\n", __func__,
- get_fwstate(pmlmepriv));
- pwrpriv->change_rfpwrstate = rf_off;
- ips_enter23a(padapter);
- }
-exit:
- rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
- pwrpriv->ps_processing = false;
-}
-
-static void pwr_state_check_handler(unsigned long data)
-{
- struct rtw_adapter *padapter = (struct rtw_adapter *)data;
-
- rtw_ps_cmd23a(padapter);
-}
-
-/*
- *
- * Parameters
- * padapter
- * pslv power state level, only could be PS_STATE_S0 ~ PS_STATE_S4
- *
- */
-void rtw_set_rpwm23a(struct rtw_adapter *padapter, u8 pslv)
-{
- u8 rpwm;
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- pslv = PS_STATE(pslv);
-
- if (pwrpriv->btcoex_rfon) {
- if (pslv < PS_STATE_S4)
- pslv = PS_STATE_S3;
- }
-
- if (pwrpriv->rpwm == pslv) {
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
- "%s: Already set rpwm[0x%02X], new = 0x%02X!\n",
- __func__, pwrpriv->rpwm, pslv);
- return;
- }
-
- if (padapter->bSurpriseRemoved == true ||
- padapter->hw_init_completed == false) {
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
- "%s: SurpriseRemoved(%d) hw_init_completed(%d)\n",
- __func__, padapter->bSurpriseRemoved,
- padapter->hw_init_completed);
-
- pwrpriv->cpwm = PS_STATE_S4;
-
- return;
- }
-
- if (padapter->bDriverStopped == true) {
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
- "%s: change power state(0x%02X) when DriverStopped\n",
- __func__, pslv);
-
- if (pslv < PS_STATE_S2) {
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
- "%s: Reject to enter PS_STATE(0x%02X) lower than S2 when DriverStopped!!\n",
- __func__, pslv);
- return;
- }
- }
-
- rpwm = pslv | pwrpriv->tog;
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
- "rtw_set_rpwm23a: rpwm = 0x%02x cpwm = 0x%02x\n",
- rpwm, pwrpriv->cpwm);
-
- pwrpriv->rpwm = pslv;
-
- rtl8723a_set_rpwm(padapter, rpwm);
-
- pwrpriv->tog += 0x80;
- pwrpriv->cpwm = pslv;
-}
-
-static bool PS_RDY_CHECK(struct rtw_adapter *padapter)
-{
- unsigned long delta_time;
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- delta_time = jiffies - pwrpriv->DelayLPSLastTimeStamp;
-
- if (delta_time < LPS_DELAY_TIME)
- return false;
-
- if (!check_fwstate(pmlmepriv, _FW_LINKED) ||
- check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) ||
- check_fwstate(pmlmepriv, WIFI_AP_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))
- return false;
- if (pwrpriv->bInSuspend)
- return false;
- if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X &&
- !padapter->securitypriv.binstallGrpkey) {
- DBG_8723A("Group handshake still in progress !!!\n");
- return false;
- }
- if (!rtw_cfg80211_pwr_mgmt(padapter))
- return false;
-
- return true;
-}
-
-void rtw_set_ps_mode23a(struct rtw_adapter *padapter, u8 ps_mode,
- u8 smart_ps, u8 bcn_ant_mode)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
- "%s: PowerMode =%d Smart_PS =%d\n",
- __func__, ps_mode, smart_ps);
-
- if (ps_mode > PM_Card_Disable) {
- RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
- "ps_mode:%d error\n", ps_mode);
- return;
- }
-
- if (pwrpriv->pwr_mode == ps_mode) {
- if (PS_MODE_ACTIVE == ps_mode)
- return;
-
- if (pwrpriv->smart_ps == smart_ps &&
- pwrpriv->bcn_ant_mode == bcn_ant_mode)
- return;
- }
-
- if (ps_mode == PS_MODE_ACTIVE) {
- DBG_8723A("rtw_set_ps_mode23a: Leave 802.11 power save\n");
-
- pwrpriv->pwr_mode = ps_mode;
- rtw_set_rpwm23a(padapter, PS_STATE_S4);
- rtl8723a_set_FwPwrMode_cmd(padapter, ps_mode);
- pwrpriv->bFwCurrentInPSMode = false;
- } else {
- if (PS_RDY_CHECK(padapter) ||
- rtl8723a_BT_using_antenna_1(padapter)) {
- DBG_8723A("%s: Enter 802.11 power save\n", __func__);
-
- pwrpriv->bFwCurrentInPSMode = true;
- pwrpriv->pwr_mode = ps_mode;
- pwrpriv->smart_ps = smart_ps;
- pwrpriv->bcn_ant_mode = bcn_ant_mode;
- rtl8723a_set_FwPwrMode_cmd(padapter, ps_mode);
-
- rtw_set_rpwm23a(padapter, PS_STATE_S2);
- }
- }
-}
-
-/*
- * Return:
- * 0: Leave OK
- * -1: Timeout
- * -2: Other error
- */
-s32 LPS_RF_ON_check23a(struct rtw_adapter *padapter, u32 delay_ms)
-{
- unsigned long start_time, end_time;
- u8 bAwake = false;
- s32 err = 0;
-
- start_time = jiffies;
- end_time = start_time + msecs_to_jiffies(delay_ms);
-
- while (1) {
- bAwake = rtl8723a_get_fwlps_rf_on(padapter);
- if (bAwake == true)
- break;
-
- if (padapter->bSurpriseRemoved == true) {
- err = -2;
- DBG_8723A("%s: device surprise removed!!\n", __func__);
- break;
- }
-
- if (time_after(jiffies, end_time)) {
- err = -1;
- DBG_8723A("%s: Wait for FW LPS leave more than %u "
- "ms!\n", __func__, delay_ms);
- break;
- }
- udelay(100);
- }
-
- return err;
-}
-
-/* Description: */
-/* Enter the leisure power save mode. */
-void LPS_Enter23a(struct rtw_adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- if (!PS_RDY_CHECK(padapter))
- return;
-
- if (pwrpriv->bLeisurePs) {
- /* Idle for a while if we connect to AP a while ago. */
- if (pwrpriv->LpsIdleCount >= 2) { /* 4 Sec */
- if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) {
- pwrpriv->bpower_saving = true;
- DBG_8723A("%s smart_ps:%d\n", __func__,
- pwrpriv->smart_ps);
- /* For Tenda W311R IOT issue */
- rtw_set_ps_mode23a(padapter,
- pwrpriv->power_mgnt,
- pwrpriv->smart_ps, 0);
- }
- } else
- pwrpriv->LpsIdleCount++;
- }
-}
-
-/* Description: */
-/* Leave the leisure power save mode. */
-void LPS_Leave23a(struct rtw_adapter *padapter)
-{
-#define LPS_LEAVE_TIMEOUT_MS 100
-
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- if (pwrpriv->bLeisurePs) {
- if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
- rtw_set_ps_mode23a(padapter, PS_MODE_ACTIVE, 0, 0);
-
- if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
- LPS_RF_ON_check23a(padapter,
- LPS_LEAVE_TIMEOUT_MS);
- }
- }
-
- pwrpriv->bpower_saving = false;
-}
-
-/* Description: Leave all power save mode: LPS, FwLPS, IPS if needed. */
-/* Move code to function by tynli. 2010.03.26. */
-void LeaveAllPowerSaveMode23a(struct rtw_adapter *Adapter)
-{
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
- u8 enqueue = 0;
-
- /* DBG_8723A("%s.....\n", __func__); */
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- rtw_lps_ctrl_wk_cmd23a(Adapter, LPS_CTRL_LEAVE, enqueue);
-}
-
-void rtw_init_pwrctrl_priv23a(struct rtw_adapter *padapter)
-{
- struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-
- sema_init(&pwrctrlpriv->lock, 1);
- pwrctrlpriv->rf_pwrstate = rf_on;
- pwrctrlpriv->ips_enter23a_cnts = 0;
- pwrctrlpriv->ips_leave23a_cnts = 0;
- pwrctrlpriv->bips_processing = false;
-
- pwrctrlpriv->ips_mode = padapter->registrypriv.ips_mode;
- pwrctrlpriv->ips_mode_req = padapter->registrypriv.ips_mode;
-
- pwrctrlpriv->pwr_state_check_interval = RTW_PWR_STATE_CHK_INTERVAL;
- pwrctrlpriv->pwr_state_check_cnts = 0;
- pwrctrlpriv->bInSuspend = false;
- pwrctrlpriv->bkeepfwalive = false;
-
- pwrctrlpriv->LpsIdleCount = 0;
-
- /* PS_MODE_MIN; */
- pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;
- pwrctrlpriv->bLeisurePs =
- (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt)?true:false;
-
- pwrctrlpriv->bFwCurrentInPSMode = false;
-
- pwrctrlpriv->rpwm = 0;
- pwrctrlpriv->cpwm = PS_STATE_S4;
-
- pwrctrlpriv->pwr_mode = PS_MODE_ACTIVE;
- pwrctrlpriv->smart_ps = padapter->registrypriv.smart_ps;
- pwrctrlpriv->bcn_ant_mode = 0;
-
- pwrctrlpriv->tog = 0x80;
-
- pwrctrlpriv->btcoex_rfon = false;
-
- setup_timer(&pwrctrlpriv->pwr_state_check_timer,
- pwr_state_check_handler, (unsigned long)padapter);
-}
-
-void rtw_free_pwrctrl_priv(struct rtw_adapter *adapter)
-{
-}
-
-inline void rtw_set_ips_deny23a(struct rtw_adapter *padapter, u32 ms)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- pwrpriv->ips_deny_time = jiffies + msecs_to_jiffies(ms);
-}
-
-/*
-* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
-* @adapter: pointer to _adapter structure
-* @ips_deffer_ms: the ms will prevent from falling into IPS after wakeup
-* Return _SUCCESS or _FAIL
-*/
-
-int _rtw_pwr_wakeup23a(struct rtw_adapter *padapter, u32 ips_deffer_ms, const char *caller)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int ret = _SUCCESS;
- unsigned long start = jiffies;
- unsigned long new_deny_time;
-
- new_deny_time = jiffies + msecs_to_jiffies(ips_deffer_ms);
-
- if (time_before(pwrpriv->ips_deny_time, new_deny_time))
- pwrpriv->ips_deny_time = new_deny_time;
-
- if (pwrpriv->ps_processing) {
- DBG_8723A("%s wait ps_processing...\n", __func__);
- while (pwrpriv->ps_processing &&
- jiffies_to_msecs(jiffies - start) <= 3000)
- msleep(10);
- if (pwrpriv->ps_processing)
- DBG_8723A("%s wait ps_processing timeout\n", __func__);
- else
- DBG_8723A("%s wait ps_processing done\n", __func__);
- }
-
- if (rtw_sreset_inprogress(padapter)) {
- DBG_8723A("%s wait sreset_inprogress...\n", __func__);
- while (rtw_sreset_inprogress(padapter) &&
- jiffies_to_msecs(jiffies - start) <= 4000)
- msleep(10);
- if (rtw_sreset_inprogress(padapter))
- DBG_8723A("%s wait sreset_inprogress timeout\n",
- __func__);
- else
- DBG_8723A("%s wait sreset_inprogress done\n", __func__);
- }
-
- if (pwrpriv->bInSuspend) {
- DBG_8723A("%s wait bInSuspend...\n", __func__);
- while (pwrpriv->bInSuspend &&
- (jiffies_to_msecs(jiffies - start) <= 3000)) {
- msleep(10);
- }
- if (pwrpriv->bInSuspend)
- DBG_8723A("%s wait bInSuspend timeout\n", __func__);
- else
- DBG_8723A("%s wait bInSuspend done\n", __func__);
- }
-
- /* System suspend is not allowed to wakeup */
- if (pwrpriv->bInSuspend) {
- ret = _FAIL;
- goto exit;
- }
-
- /* I think this should be check in IPS, LPS, autosuspend functions... */
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- ret = _SUCCESS;
- goto exit;
- }
-
- if (rf_off == pwrpriv->rf_pwrstate) {
- DBG_8723A("%s call ips_leave23a....\n", __func__);
- if (ips_leave23a(padapter)== _FAIL) {
- DBG_8723A("======> ips_leave23a fail.............\n");
- ret = _FAIL;
- goto exit;
- }
- }
-
- /* TODO: the following checking need to be merged... */
- if (padapter->bDriverStopped || !padapter->bup ||
- !padapter->hw_init_completed) {
- DBG_8723A("%s: bDriverStopped =%d, bup =%d, hw_init_completed "
- "=%u\n", caller, padapter->bDriverStopped,
- padapter->bup, padapter->hw_init_completed);
- ret = _FAIL;
- goto exit;
- }
-
-exit:
- new_deny_time = jiffies + msecs_to_jiffies(ips_deffer_ms);
- if (time_before(pwrpriv->ips_deny_time, new_deny_time))
- pwrpriv->ips_deny_time = new_deny_time;
- return ret;
-}
-
-int rtw_pm_set_lps23a(struct rtw_adapter *padapter, u8 mode)
-{
- int ret = 0;
- struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-
- if (mode < PS_MODE_NUM) {
- if (pwrctrlpriv->power_mgnt != mode) {
- if (PS_MODE_ACTIVE == mode)
- LeaveAllPowerSaveMode23a(padapter);
- else
- pwrctrlpriv->LpsIdleCount = 2;
- pwrctrlpriv->power_mgnt = mode;
- pwrctrlpriv->bLeisurePs =
- (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ?
- true:false;
- }
- } else
- ret = -EINVAL;
-
- return ret;
-}
-
-int rtw_pm_set_ips23a(struct rtw_adapter *padapter, u8 mode)
-{
- struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-
- if (mode != IPS_NORMAL && mode != IPS_LEVEL_2 && mode != IPS_NONE)
- return -EINVAL;
-
- pwrctrlpriv->ips_mode_req = mode;
- if (mode == IPS_NONE) {
- DBG_8723A("%s %s\n", __func__, "IPS_NONE");
- if (padapter->bSurpriseRemoved == 0 &&
- rtw_pwr_wakeup(padapter) == _FAIL)
- return -EFAULT;
- }
-
- return 0;
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
deleted file mode 100644
index 150dabc2a58d..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_recv.c
+++ /dev/null
@@ -1,2204 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTW_RECV_C_
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <recv_osdep.h>
-#include <mlme_osdep.h>
-#include <linux/ip.h>
-#include <linux/if_ether.h>
-#include <usb_ops.h>
-#include <linux/ieee80211.h>
-#include <wifi.h>
-#include <rtl8723a_recv.h>
-#include <rtl8723a_xmit.h>
-
-void rtw_signal_stat_timer_hdl23a(unsigned long data);
-
-void _rtw_init_sta_recv_priv23a(struct sta_recv_priv *psta_recvpriv)
-{
-
-
-
- spin_lock_init(&psta_recvpriv->lock);
-
- /* for (i = 0; i<MAX_RX_NUMBLKS; i++) */
- /* _rtw_init_queue23a(&psta_recvpriv->blk_strms[i]); */
-
- _rtw_init_queue23a(&psta_recvpriv->defrag_q);
-
-
-}
-
-int _rtw_init_recv_priv23a(struct recv_priv *precvpriv,
- struct rtw_adapter *padapter)
-{
- struct recv_frame *precvframe;
- int i;
- int res = _SUCCESS;
-
- spin_lock_init(&precvpriv->lock);
-
- _rtw_init_queue23a(&precvpriv->free_recv_queue);
- _rtw_init_queue23a(&precvpriv->recv_pending_queue);
- _rtw_init_queue23a(&precvpriv->uc_swdec_pending_queue);
-
- precvpriv->adapter = padapter;
-
- for (i = 0; i < NR_RECVFRAME ; i++) {
- precvframe = kzalloc(sizeof(struct recv_frame), GFP_KERNEL);
- if (!precvframe)
- break;
- INIT_LIST_HEAD(&precvframe->list);
-
- list_add_tail(&precvframe->list,
- &precvpriv->free_recv_queue.queue);
-
- precvframe->adapter = padapter;
- precvframe++;
- }
-
- precvpriv->free_recvframe_cnt = i;
- precvpriv->rx_pending_cnt = 1;
-
- res = rtl8723au_init_recv_priv(padapter);
-
- setup_timer(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl23a,
- (unsigned long)padapter);
-
- precvpriv->signal_stat_sampling_interval = 1000; /* ms */
-
- rtw_set_signal_stat_timer(precvpriv);
-
- return res;
-}
-
-void _rtw_free_recv_priv23a(struct recv_priv *precvpriv)
-{
- struct rtw_adapter *padapter = precvpriv->adapter;
- struct recv_frame *precvframe, *ptmp;
-
- rtw_free_uc_swdec_pending_queue23a(padapter);
-
- list_for_each_entry_safe(precvframe, ptmp,
- &precvpriv->free_recv_queue.queue, list) {
- list_del_init(&precvframe->list);
- kfree(precvframe);
- }
-
- rtl8723au_free_recv_priv(padapter);
-}
-
-struct recv_frame *rtw_alloc_recvframe23a(struct rtw_queue *pfree_recv_queue)
-{
- struct recv_frame *pframe;
- struct rtw_adapter *padapter;
- struct recv_priv *precvpriv;
-
- spin_lock_bh(&pfree_recv_queue->lock);
-
- pframe = list_first_entry_or_null(&pfree_recv_queue->queue,
- struct recv_frame, list);
- if (pframe) {
- list_del_init(&pframe->list);
- padapter = pframe->adapter;
- if (padapter) {
- precvpriv = &padapter->recvpriv;
- if (pfree_recv_queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt--;
- }
- }
-
- spin_unlock_bh(&pfree_recv_queue->lock);
-
- return pframe;
-}
-
-int rtw_free_recvframe23a(struct recv_frame *precvframe)
-{
- struct rtw_adapter *padapter = precvframe->adapter;
- struct recv_priv *precvpriv = &padapter->recvpriv;
- struct rtw_queue *pfree_recv_queue;
-
- if (precvframe->pkt) {
- dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */
- precvframe->pkt = NULL;
- }
-
- pfree_recv_queue = &precvpriv->free_recv_queue;
- spin_lock_bh(&pfree_recv_queue->lock);
-
- list_del_init(&precvframe->list);
-
- list_add_tail(&precvframe->list, get_list_head(pfree_recv_queue));
-
- if (padapter) {
- if (pfree_recv_queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt++;
- }
-
- spin_unlock_bh(&pfree_recv_queue->lock);
-
-
-
- return _SUCCESS;
-}
-
-int rtw_enqueue_recvframe23a(struct recv_frame *precvframe, struct rtw_queue *queue)
-{
- struct rtw_adapter *padapter = precvframe->adapter;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- spin_lock_bh(&queue->lock);
-
- list_del_init(&precvframe->list);
-
- list_add_tail(&precvframe->list, get_list_head(queue));
-
- if (padapter) {
- if (queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt++;
- }
-
- spin_unlock_bh(&queue->lock);
-
- return _SUCCESS;
-}
-
-/*
-caller : defrag ; recvframe_chk_defrag23a in recv_thread (passive)
-pframequeue: defrag_queue : will be accessed in recv_thread (passive)
-
-using spinlock to protect
-
-*/
-
-static void rtw_free_recvframe23a_queue(struct rtw_queue *pframequeue)
-{
- struct recv_frame *hdr, *ptmp;
- struct list_head *phead;
-
- spin_lock(&pframequeue->lock);
- phead = get_list_head(pframequeue);
- list_for_each_entry_safe(hdr, ptmp, phead, list)
- rtw_free_recvframe23a(hdr);
- spin_unlock(&pframequeue->lock);
-}
-
-u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter)
-{
- u32 cnt = 0;
- struct recv_frame *pending_frame;
-
- while ((pending_frame = rtw_alloc_recvframe23a(&adapter->recvpriv.uc_swdec_pending_queue))) {
- rtw_free_recvframe23a(pending_frame);
- DBG_8723A("%s: dequeue uc_swdec_pending_queue\n", __func__);
- cnt++;
- }
-
- return cnt;
-}
-
-struct recv_buf *rtw_dequeue_recvbuf23a (struct rtw_queue *queue)
-{
- unsigned long irqL;
- struct recv_buf *precvbuf;
-
- spin_lock_irqsave(&queue->lock, irqL);
-
- precvbuf = list_first_entry_or_null(&queue->queue,
- struct recv_buf, list);
- if (precvbuf)
- list_del_init(&precvbuf->list);
-
- spin_unlock_irqrestore(&queue->lock, irqL);
-
- return precvbuf;
-}
-
-int recvframe_chkmic(struct rtw_adapter *adapter,
- struct recv_frame *precvframe);
-int recvframe_chkmic(struct rtw_adapter *adapter,
- struct recv_frame *precvframe) {
-
- int i, res = _SUCCESS;
- u32 datalen;
- u8 miccode[8];
- u8 bmic_err = false, brpt_micerror = true;
- u8 *pframe, *payload, *pframemic;
- u8 *mickey;
- struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
- struct security_priv *psecuritypriv = &adapter->securitypriv;
-
- struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
-
- stainfo = rtw_get_stainfo23a(&adapter->stapriv, &prxattrib->ta[0]);
-
- if (prxattrib->encrypt == WLAN_CIPHER_SUITE_TKIP) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "recvframe_chkmic:prxattrib->encrypt == WLAN_CIPHER_SUITE_TKIP\n");
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "recvframe_chkmic:da = %pM\n", prxattrib->ra);
-
- /* calculate mic code */
- if (stainfo != NULL) {
- if (is_multicast_ether_addr(prxattrib->ra)) {
- mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "recvframe_chkmic: bcmc key\n");
-
- if (!psecuritypriv->binstallGrpkey) {
- res = _FAIL;
- RT_TRACE(_module_rtl871x_recv_c_,
- _drv_err_,
- "recvframe_chkmic:didn't install group key!\n");
- DBG_8723A("\n recvframe_chkmic:didn't "
- "install group key!!!!!!\n");
- goto exit;
- }
- } else {
- mickey = &stainfo->dot11tkiprxmickey.skey[0];
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "recvframe_chkmic: unicast key\n");
- }
-
- /* icv_len included the mic code */
- datalen = precvframe->pkt->len-prxattrib->
- hdrlen-prxattrib->iv_len-prxattrib->icv_len - 8;
- pframe = precvframe->pkt->data;
- payload = pframe + prxattrib->hdrlen +
- prxattrib->iv_len;
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "prxattrib->iv_len =%d prxattrib->icv_len =%d\n",
- prxattrib->iv_len, prxattrib->icv_len);
-
- /* care the length of the data */
- rtw_seccalctkipmic23a(mickey, pframe, payload,
- datalen, &miccode[0],
- (unsigned char)prxattrib->priority);
-
- pframemic = payload + datalen;
-
- bmic_err = false;
-
- for (i = 0; i < 8; i++) {
- if (miccode[i] != *(pframemic + i)) {
- RT_TRACE(_module_rtl871x_recv_c_,
- _drv_err_,
- "recvframe_chkmic:miccode[%d](%02x) != *(pframemic+%d)(%02x)\n",
- i, miccode[i],
- i, *(pframemic + i));
- bmic_err = true;
- }
- }
-
- if (bmic_err == true) {
- int i;
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "*(pframemic-8)-*(pframemic-1) =%*phC\n",
- 8, pframemic - 8);
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "*(pframemic-16)-*(pframemic-9) =%*phC\n",
- 8, pframemic - 16);
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "====== demp packet (len =%d) ======\n",
- precvframe->pkt->len);
- for (i = 0; i < precvframe->pkt->len; i = i + 8) {
- RT_TRACE(_module_rtl871x_recv_c_,
- _drv_err_, "%*phC\n",
- 8, precvframe->pkt->data + i);
- }
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "====== demp packet end [len =%d]======\n",
- precvframe->pkt->len);
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "hrdlen =%d\n", prxattrib->hdrlen);
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "ra = %pM psecuritypriv->binstallGrpkey =%d\n",
- prxattrib->ra,
- psecuritypriv->binstallGrpkey);
-
- /* double check key_index for some timing
- issue, cannot compare with
- psecuritypriv->dot118021XGrpKeyid also
- cause timing issue */
- if ((is_multicast_ether_addr(prxattrib->ra)) &&
- (prxattrib->key_index !=
- pmlmeinfo->key_index))
- brpt_micerror = false;
-
- if ((prxattrib->bdecrypted == true) &&
- (brpt_micerror == true)) {
- rtw_handle_tkip_mic_err23a(adapter, (u8)is_multicast_ether_addr(prxattrib->ra));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "mic error :prxattrib->bdecrypted =%d\n",
- prxattrib->bdecrypted);
- DBG_8723A(" mic error :prxattrib->"
- "bdecrypted =%d\n",
- prxattrib->bdecrypted);
- } else {
- RT_TRACE(_module_rtl871x_recv_c_,
- _drv_err_,
- "mic error :prxattrib->bdecrypted =%d\n",
- prxattrib->bdecrypted);
- DBG_8723A(" mic error :prxattrib->"
- "bdecrypted =%d\n",
- prxattrib->bdecrypted);
- }
-
- res = _FAIL;
- } else {
- /* mic checked ok */
- if (!psecuritypriv->bcheck_grpkey &&
- is_multicast_ether_addr(prxattrib->ra)) {
- psecuritypriv->bcheck_grpkey = 1;
- RT_TRACE(_module_rtl871x_recv_c_,
- _drv_err_,
- "psecuritypriv->bcheck_grpkey = true\n");
- }
- }
- } else {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "recvframe_chkmic: rtw_get_stainfo23a ==NULL!!!\n");
- }
-
- skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
- }
-
-exit:
-
-
-
- return res;
-}
-
-/* decrypt and set the ivlen, icvlen of the recv_frame */
-struct recv_frame *decryptor(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame);
-struct recv_frame *decryptor(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
- struct rx_pkt_attrib *prxattrib = &precv_frame->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct recv_frame *return_packet = precv_frame;
- int res = _SUCCESS;
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "prxstat->decrypted =%x prxattrib->encrypt = 0x%03x\n",
- prxattrib->bdecrypted, prxattrib->encrypt);
-
- if (prxattrib->encrypt > 0) {
- u8 *iv = precv_frame->pkt->data + prxattrib->hdrlen;
-
- prxattrib->key_index = (((iv[3]) >> 6) & 0x3);
-
- if (prxattrib->key_index > WEP_KEYS) {
- DBG_8723A("prxattrib->key_index(%d) > WEP_KEYS\n",
- prxattrib->key_index);
-
- switch (prxattrib->encrypt) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- prxattrib->key_index =
- psecuritypriv->dot11PrivacyKeyIndex;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- case WLAN_CIPHER_SUITE_CCMP:
- default:
- prxattrib->key_index =
- psecuritypriv->dot118021XGrpKeyid;
- break;
- }
- }
- }
-
- if ((prxattrib->encrypt > 0) && ((prxattrib->bdecrypted == 0))) {
- psecuritypriv->hw_decrypted = 0;
- switch (prxattrib->encrypt) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- rtw_wep_decrypt23a(padapter, precv_frame);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- res = rtw_tkip_decrypt23a(padapter, precv_frame);
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- res = rtw_aes_decrypt23a(padapter, precv_frame);
- break;
- default:
- break;
- }
- } else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 &&
- (psecuritypriv->busetkipkey == 1 ||
- prxattrib->encrypt != WLAN_CIPHER_SUITE_TKIP)) {
- psecuritypriv->hw_decrypted = 1;
- }
-
- if (res == _FAIL) {
- rtw_free_recvframe23a(return_packet);
- return_packet = NULL;
- }
-
-
-
- return return_packet;
-}
-
-/* set the security information in the recv_frame */
-static struct recv_frame *portctrl(struct rtw_adapter *adapter,
- struct recv_frame *precv_frame)
-{
- u8 *psta_addr, *ptr;
- uint auth_alg;
- struct recv_frame *pfhdr;
- struct sta_info *psta;
- struct sta_priv *pstapriv ;
- struct recv_frame *prtnframe;
- u16 ether_type;
- u16 eapol_type = ETH_P_PAE;/* for Funia BD's WPA issue */
- struct rx_pkt_attrib *pattrib;
-
- pstapriv = &adapter->stapriv;
-
- auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
-
- pfhdr = precv_frame;
- pattrib = &pfhdr->attrib;
- psta_addr = pattrib->ta;
- psta = rtw_get_stainfo23a(pstapriv, psta_addr);
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "########portctrl:adapter->securitypriv.dot11AuthAlgrthm =%d\n",
- adapter->securitypriv.dot11AuthAlgrthm);
-
- prtnframe = precv_frame;
-
- if (auth_alg == dot11AuthAlgrthm_8021X) {
- /* get ether_type */
- ptr = pfhdr->pkt->data + pfhdr->attrib.hdrlen;
-
- ether_type = (ptr[6] << 8) | ptr[7];
-
- if (psta && psta->ieee8021x_blocked) {
- /* blocked */
- /* only accept EAPOL frame */
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "########portctrl:psta->ieee8021x_blocked ==1\n");
-
- if (ether_type != eapol_type) {
- /* free this frame */
- rtw_free_recvframe23a(precv_frame);
- prtnframe = NULL;
- }
- }
- }
-
- return prtnframe;
-}
-
-int recv_decache(struct recv_frame *precv_frame, u8 bretry,
- struct stainfo_rxcache *prxcache);
-int recv_decache(struct recv_frame *precv_frame, u8 bretry,
- struct stainfo_rxcache *prxcache)
-{
- int tid = precv_frame->attrib.priority;
-
- u16 seq_ctrl = ((precv_frame->attrib.seq_num & 0xffff) << 4) |
- (precv_frame->attrib.frag_num & 0xf);
-
-
-
- if (tid > 15) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- "recv_decache, (tid>15)! seq_ctrl = 0x%x, tid = 0x%x\n",
- seq_ctrl, tid);
-
- return _FAIL;
- }
-
- if (1) { /* if (bretry) */
- if (seq_ctrl == prxcache->tid_rxseq[tid]) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- "recv_decache, seq_ctrl = 0x%x, tid = 0x%x, tid_rxseq = 0x%x\n",
- seq_ctrl, tid, prxcache->tid_rxseq[tid]);
-
- return _FAIL;
- }
- }
-
- prxcache->tid_rxseq[tid] = seq_ctrl;
-
-
-
- return _SUCCESS;
-}
-
-void process23a_pwrbit_data(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame);
-void process23a_pwrbit_data(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
-#ifdef CONFIG_8723AU_AP_MODE
- unsigned char pwrbit;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *psta;
-
- psta = rtw_get_stainfo23a(pstapriv, pattrib->src);
-
- if (psta) {
- pwrbit = ieee80211_has_pm(hdr->frame_control);
-
- if (pwrbit) {
- if (!(psta->state & WIFI_SLEEP_STATE))
- stop_sta_xmit23a(padapter, psta);
- } else {
- if (psta->state & WIFI_SLEEP_STATE)
- wakeup_sta_to_xmit23a(padapter, psta);
- }
- }
-
-#endif
-}
-
-void process_wmmps_data(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame);
-void process_wmmps_data(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
-#ifdef CONFIG_8723AU_AP_MODE
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *psta;
-
- psta = rtw_get_stainfo23a(pstapriv, pattrib->src);
-
- if (!psta)
- return;
-
-
- if (!psta->qos_option)
- return;
-
- if (!(psta->qos_info & 0xf))
- return;
-
- if (psta->state & WIFI_SLEEP_STATE) {
- u8 wmmps_ac = 0;
-
- switch (pattrib->priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(1);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(1);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(1);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(1);
- break;
- }
-
- if (wmmps_ac) {
- if (psta->sleepq_ac_len > 0) {
- /* process received triggered frame */
- xmit_delivery_enabled_frames23a(padapter, psta);
- } else {
- /* issue one qos null frame with More data bit = 0 and the EOSP bit set (= 1) */
- issue_qos_nulldata23a(padapter, psta->hwaddr,
- (u16)pattrib->priority,
- 0, 0);
- }
- }
- }
-
-#endif
-}
-
-static void count_rx_stats(struct rtw_adapter *padapter,
- struct recv_frame *prframe, struct sta_info *sta)
-{
- int sz;
- struct sta_info *psta = NULL;
- struct stainfo_stats *pstats = NULL;
- struct rx_pkt_attrib *pattrib = & prframe->attrib;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- sz = prframe->pkt->len;
- precvpriv->rx_bytes += sz;
-
- padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
-
- if ((!is_broadcast_ether_addr(pattrib->dst)) &&
- (!is_multicast_ether_addr(pattrib->dst)))
- padapter->mlmepriv.LinkDetectInfo.NumRxUnicastOkInPeriod++;
-
- if (sta)
- psta = sta;
- else
- psta = prframe->psta;
-
- if (psta) {
- pstats = &psta->sta_stats;
-
- pstats->rx_data_pkts++;
- pstats->rx_bytes += sz;
- }
-}
-
-static int sta2sta_data_frame(struct rtw_adapter *adapter,
- struct recv_frame *precv_frame,
- struct sta_info**psta)
-{
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- int ret = _SUCCESS;
- struct rx_pkt_attrib *pattrib = & precv_frame->attrib;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- u8 *mybssid = get_bssid(pmlmepriv);
- u8 *myhwaddr = myid(&adapter->eeprompriv);
- u8 *sta_addr = NULL;
- int bmcast = is_multicast_ether_addr(pattrib->dst);
-
-
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
-
- /* filter packets that SA is myself or multicast or broadcast */
- if (ether_addr_equal(myhwaddr, pattrib->src)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "SA == myself\n");
- ret = _FAIL;
- goto exit;
- }
-
- if (!ether_addr_equal(myhwaddr, pattrib->dst) && !bmcast) {
- ret = _FAIL;
- goto exit;
- }
-
- if (ether_addr_equal(pattrib->bssid, "\x0\x0\x0\x0\x0\x0") ||
- ether_addr_equal(mybssid, "\x0\x0\x0\x0\x0\x0") ||
- !ether_addr_equal(pattrib->bssid, mybssid)) {
- ret = _FAIL;
- goto exit;
- }
-
- sta_addr = pattrib->src;
- } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- /* For Station mode, sa and bssid should always be BSSID,
- and DA is my mac-address */
- if (!ether_addr_equal(pattrib->bssid, pattrib->src)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "bssid != TA under STATION_MODE; drop pkt\n");
- ret = _FAIL;
- goto exit;
- }
-
- sta_addr = pattrib->bssid;
-
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- if (bmcast) {
- /* For AP mode, if DA == MCAST, then BSSID should be also MCAST */
- if (!is_multicast_ether_addr(pattrib->bssid)) {
- ret = _FAIL;
- goto exit;
- }
- } else { /* not mc-frame */
- /* For AP mode, if DA is non-MCAST, then it must
- be BSSID, and bssid == BSSID */
- if (!ether_addr_equal(pattrib->bssid, pattrib->dst)) {
- ret = _FAIL;
- goto exit;
- }
-
- sta_addr = pattrib->src;
- }
- } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
- ether_addr_copy(pattrib->dst, hdr->addr1);
- ether_addr_copy(pattrib->src, hdr->addr2);
- ether_addr_copy(pattrib->bssid, hdr->addr3);
- ether_addr_copy(pattrib->ra, pattrib->dst);
- ether_addr_copy(pattrib->ta, pattrib->src);
-
- sta_addr = mybssid;
- } else {
- ret = _FAIL;
- }
-
- if (bmcast)
- *psta = rtw_get_bcmc_stainfo23a(adapter);
- else
- *psta = rtw_get_stainfo23a(pstapriv, sta_addr); /* get ap_info */
-
- if (*psta == NULL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "can't get psta under sta2sta_data_frame ; drop pkt\n");
- ret = _FAIL;
- goto exit;
- }
-
-exit:
-
- return ret;
-}
-
-int ap2sta_data_frame(struct rtw_adapter *adapter,
- struct recv_frame *precv_frame,
- struct sta_info **psta);
-int ap2sta_data_frame(struct rtw_adapter *adapter,
- struct recv_frame *precv_frame,
- struct sta_info **psta)
-{
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct rx_pkt_attrib *pattrib = & precv_frame->attrib;
- int ret = _SUCCESS;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- u8 *mybssid = get_bssid(pmlmepriv);
- u8 *myhwaddr = myid(&adapter->eeprompriv);
- int bmcast = is_multicast_ether_addr(pattrib->dst);
-
-
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
- (check_fwstate(pmlmepriv, _FW_LINKED) ||
- check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) {
-
- /* filter packets that SA is myself or multicast or broadcast */
- if (ether_addr_equal(myhwaddr, pattrib->src)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "SA == myself\n");
- ret = _FAIL;
- goto exit;
- }
-
- /* da should be for me */
- if (!ether_addr_equal(myhwaddr, pattrib->dst) && !bmcast) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "ap2sta_data_frame: compare DA failed; DA=%pM\n",
- pattrib->dst);
- ret = _FAIL;
- goto exit;
- }
-
- /* check BSSID */
- if (ether_addr_equal(pattrib->bssid, "\x0\x0\x0\x0\x0\x0") ||
- ether_addr_equal(mybssid, "\x0\x0\x0\x0\x0\x0") ||
- !ether_addr_equal(pattrib->bssid, mybssid)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "ap2sta_data_frame: compare BSSID failed; BSSID=%pM\n",
- pattrib->bssid);
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "mybssid=%pM\n", mybssid);
-
- if (!bmcast) {
- DBG_8723A("issue_deauth23a to the nonassociated ap=%pM for the reason(7)\n",
- pattrib->bssid);
- issue_deauth23a(adapter, pattrib->bssid,
- WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
- }
-
- ret = _FAIL;
- goto exit;
- }
-
- if (bmcast)
- *psta = rtw_get_bcmc_stainfo23a(adapter);
- else
- /* get ap_info */
- *psta = rtw_get_stainfo23a(pstapriv, pattrib->bssid);
-
- if (*psta == NULL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "ap2sta: can't get psta under STATION_MODE; drop pkt\n");
- ret = _FAIL;
- goto exit;
- }
-
- if (ieee80211_is_nullfunc(hdr->frame_control)) {
- /* No data, will not indicate to upper layer,
- temporily count it here */
- count_rx_stats(adapter, precv_frame, *psta);
- ret = RTW_RX_HANDLED;
- goto exit;
- }
-
- } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) &&
- check_fwstate(pmlmepriv, _FW_LINKED)) {
- ether_addr_copy(pattrib->dst, hdr->addr1);
- ether_addr_copy(pattrib->src, hdr->addr2);
- ether_addr_copy(pattrib->bssid, hdr->addr3);
- ether_addr_copy(pattrib->ra, pattrib->dst);
- ether_addr_copy(pattrib->ta, pattrib->src);
-
- /* */
- ether_addr_copy(pattrib->bssid, mybssid);
-
- /* get sta_info */
- *psta = rtw_get_stainfo23a(pstapriv, pattrib->bssid);
- if (*psta == NULL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "can't get psta under MP_MODE ; drop pkt\n");
- ret = _FAIL;
- goto exit;
- }
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- /* Special case */
- ret = RTW_RX_HANDLED;
- goto exit;
- } else {
- if (ether_addr_equal(myhwaddr, pattrib->dst) && !bmcast) {
- *psta = rtw_get_stainfo23a(pstapriv, pattrib->bssid);
- if (*psta == NULL) {
- DBG_8723A("issue_deauth23a to the ap=%pM for the reason(7)\n",
- pattrib->bssid);
-
- issue_deauth23a(adapter, pattrib->bssid,
- WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
- }
- }
-
- ret = _FAIL;
- }
-
-exit:
-
-
-
- return ret;
-}
-
-int sta2ap_data_frame(struct rtw_adapter *adapter,
- struct recv_frame *precv_frame,
- struct sta_info **psta);
-int sta2ap_data_frame(struct rtw_adapter *adapter,
- struct recv_frame *precv_frame,
- struct sta_info **psta)
-{
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct rx_pkt_attrib *pattrib = & precv_frame->attrib;
- struct sta_priv *pstapriv = &adapter->stapriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- unsigned char *mybssid = get_bssid(pmlmepriv);
- int ret = _SUCCESS;
-
-
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- /* For AP mode, RA = BSSID, TX = STA(SRC_ADDR), A3 = DST_ADDR */
- if (!ether_addr_equal(pattrib->bssid, mybssid)) {
- ret = _FAIL;
- goto exit;
- }
-
- *psta = rtw_get_stainfo23a(pstapriv, pattrib->src);
- if (*psta == NULL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "can't get psta under AP_MODE; drop pkt\n");
- DBG_8723A("issue_deauth23a to sta=%pM for the reason(7)\n",
- pattrib->src);
-
- issue_deauth23a(adapter, pattrib->src,
- WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
-
- ret = RTW_RX_HANDLED;
- goto exit;
- }
-
- process23a_pwrbit_data(adapter, precv_frame);
-
- /* We only get here if it's a data frame, so no need to
- * confirm data frame type first */
- if (ieee80211_is_data_qos(hdr->frame_control))
- process_wmmps_data(adapter, precv_frame);
-
- if (ieee80211_is_nullfunc(hdr->frame_control)) {
- /* No data, will not indicate to upper layer,
- temporily count it here */
- count_rx_stats(adapter, precv_frame, *psta);
- ret = RTW_RX_HANDLED;
- goto exit;
- }
- } else {
- u8 *myhwaddr = myid(&adapter->eeprompriv);
-
- if (!ether_addr_equal(pattrib->ra, myhwaddr)) {
- ret = RTW_RX_HANDLED;
- goto exit;
- }
- DBG_8723A("issue_deauth23a to sta=%pM for the reason(7)\n",
- pattrib->src);
- issue_deauth23a(adapter, pattrib->src,
- WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
- ret = RTW_RX_HANDLED;
- goto exit;
- }
-
-exit:
-
-
-
- return ret;
-}
-
-static int validate_recv_ctrl_frame(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
-#ifdef CONFIG_8723AU_AP_MODE
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-
- if (!ieee80211_is_ctl(hdr->frame_control))
- return _FAIL;
-
- /* receive the frames that ra(a1) is my address */
- if (!ether_addr_equal(hdr->addr1, myid(&padapter->eeprompriv)))
- return _FAIL;
-
- /* only handle ps-poll */
- if (ieee80211_is_pspoll(hdr->frame_control)) {
- struct ieee80211_pspoll *psp = (struct ieee80211_pspoll *)hdr;
- u16 aid;
- u8 wmmps_ac = 0;
- struct sta_info *psta = NULL;
-
- aid = le16_to_cpu(psp->aid) & 0x3fff;
- psta = rtw_get_stainfo23a(pstapriv, hdr->addr2);
-
- if (!psta || psta->aid != aid)
- return _FAIL;
-
- /* for rx pkt statistics */
- psta->sta_stats.rx_ctrl_pkts++;
-
- switch (pattrib->priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(0);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(0);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(0);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(0);
- break;
- }
-
- if (wmmps_ac)
- return _FAIL;
-
- if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
- DBG_8723A("%s alive check-rx ps-poll\n", __func__);
- psta->expire_to = pstapriv->expire_to;
- psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
- }
-
- if ((psta->state & WIFI_SLEEP_STATE) &&
- (pstapriv->sta_dz_bitmap & CHKBIT(psta->aid))) {
- struct list_head *xmitframe_phead;
- struct xmit_frame *pxmitframe;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- spin_lock_bh(&pxmitpriv->lock);
-
- xmitframe_phead = get_list_head(&psta->sleep_q);
- pxmitframe = list_first_entry_or_null(xmitframe_phead,
- struct xmit_frame,
- list);
- if (pxmitframe) {
- list_del_init(&pxmitframe->list);
-
- psta->sleepq_len--;
-
- if (psta->sleepq_len>0)
- pxmitframe->attrib.mdata = 1;
- else
- pxmitframe->attrib.mdata = 0;
-
- pxmitframe->attrib.triggered = 1;
-
- rtl8723au_hal_xmitframe_enqueue(padapter,
- pxmitframe);
-
- if (psta->sleepq_len == 0) {
- pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
- update_beacon23a(padapter, WLAN_EID_TIM,
- NULL, false);
- }
-
- spin_unlock_bh(&pxmitpriv->lock);
-
- } else {
- spin_unlock_bh(&pxmitpriv->lock);
-
- if (pstapriv->tim_bitmap & CHKBIT(psta->aid)) {
- if (psta->sleepq_len == 0) {
- DBG_8723A("no buffered packets "
- "to xmit\n");
-
- /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
- issue_nulldata23a(padapter,
- psta->hwaddr,
- 0, 0, 0);
- } else {
- DBG_8723A("error!psta->sleepq"
- "_len =%d\n",
- psta->sleepq_len);
- psta->sleepq_len = 0;
- }
-
- pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
-
- update_beacon23a(padapter, WLAN_EID_TIM,
- NULL, false);
- }
- }
- }
- }
-
-#endif
- return _FAIL;
-}
-
-struct recv_frame *recvframe_chk_defrag23a(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame);
-static int validate_recv_mgnt_frame(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
- struct sta_info *psta;
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "+validate_recv_mgnt_frame\n");
-
- precv_frame = recvframe_chk_defrag23a(padapter, precv_frame);
- if (precv_frame == NULL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- "%s: fragment packet\n", __func__);
- return _SUCCESS;
- }
-
- skb = precv_frame->pkt;
- hdr = (struct ieee80211_hdr *) skb->data;
-
- /* for rx pkt statistics */
- psta = rtw_get_stainfo23a(&padapter->stapriv, hdr->addr2);
- if (psta) {
- psta->sta_stats.rx_mgnt_pkts++;
-
- if (ieee80211_is_beacon(hdr->frame_control))
- psta->sta_stats.rx_beacon_pkts++;
- else if (ieee80211_is_probe_req(hdr->frame_control))
- psta->sta_stats.rx_probereq_pkts++;
- else if (ieee80211_is_probe_resp(hdr->frame_control)) {
- if (ether_addr_equal(padapter->eeprompriv.mac_addr,
- hdr->addr1))
- psta->sta_stats.rx_probersp_pkts++;
- else if (is_broadcast_ether_addr(hdr->addr1) ||
- is_multicast_ether_addr(hdr->addr1))
- psta->sta_stats.rx_probersp_bm_pkts++;
- else
- psta->sta_stats.rx_probersp_uo_pkts++;
- }
- }
-
- mgt_dispatcher23a(padapter, precv_frame);
-
- return _SUCCESS;
-}
-
-static int validate_recv_data_frame(struct rtw_adapter *adapter,
- struct recv_frame *precv_frame)
-{
- u8 bretry;
- u8 *psa, *pda;
- struct sta_info *psta = NULL;
- struct rx_pkt_attrib *pattrib = & precv_frame->attrib;
- struct security_priv *psecuritypriv = &adapter->securitypriv;
- int ret = _SUCCESS;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-
-
-
- bretry = ieee80211_has_retry(hdr->frame_control);
- pda = ieee80211_get_DA(hdr);
- psa = ieee80211_get_SA(hdr);
-
- ether_addr_copy(pattrib->dst, pda);
- ether_addr_copy(pattrib->src, psa);
-
- switch (hdr->frame_control &
- cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
- case cpu_to_le16(0):
- ether_addr_copy(pattrib->bssid, hdr->addr3);
- ether_addr_copy(pattrib->ra, pda);
- ether_addr_copy(pattrib->ta, psa);
- ret = sta2sta_data_frame(adapter, precv_frame, &psta);
- break;
-
- case cpu_to_le16(IEEE80211_FCTL_FROMDS):
- ether_addr_copy(pattrib->bssid, hdr->addr2);
- ether_addr_copy(pattrib->ra, pda);
- ether_addr_copy(pattrib->ta, hdr->addr2);
- ret = ap2sta_data_frame(adapter, precv_frame, &psta);
- break;
-
- case cpu_to_le16(IEEE80211_FCTL_TODS):
- ether_addr_copy(pattrib->bssid, hdr->addr1);
- ether_addr_copy(pattrib->ra, hdr->addr1);
- ether_addr_copy(pattrib->ta, psa);
- ret = sta2ap_data_frame(adapter, precv_frame, &psta);
- break;
-
- case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
- /*
- * There is no BSSID in this case, but the driver has been
- * using addr1 so far, so keep it for now.
- */
- ether_addr_copy(pattrib->bssid, hdr->addr1);
- ether_addr_copy(pattrib->ra, hdr->addr1);
- ether_addr_copy(pattrib->ta, hdr->addr2);
- ret = _FAIL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "case 3\n");
- break;
- }
-
- if ((ret == _FAIL) || (ret == RTW_RX_HANDLED))
- goto exit;
-
- if (!psta) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "after to_fr_ds_chk; psta == NULL\n");
- ret = _FAIL;
- goto exit;
- }
-
- precv_frame->psta = psta;
-
- pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
- if (ieee80211_has_a4(hdr->frame_control))
- pattrib->hdrlen += ETH_ALEN;
-
- /* parsing QC field */
- if (pattrib->qos == 1) {
- __le16 *qptr = (__le16 *)ieee80211_get_qos_ctl(hdr);
- u16 qos_ctrl = le16_to_cpu(*qptr);
-
- pattrib->priority = qos_ctrl & IEEE80211_QOS_CTL_TID_MASK;
- pattrib->ack_policy = (qos_ctrl >> 5) & 3;
- pattrib->amsdu =
- (qos_ctrl & IEEE80211_QOS_CTL_A_MSDU_PRESENT) >> 7;
- pattrib->hdrlen += IEEE80211_QOS_CTL_LEN;
-
- if (pattrib->priority != 0 && pattrib->priority != 3) {
- adapter->recvpriv.bIsAnyNonBEPkts = true;
- }
- } else {
- pattrib->priority = 0;
- pattrib->ack_policy = 0;
- pattrib->amsdu = 0;
- }
-
- if (pattrib->order) { /* HT-CTRL 11n */
- pattrib->hdrlen += 4;
- }
-
- precv_frame->preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
-
- /* decache, drop duplicate recv packets */
- if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) ==
- _FAIL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "decache : drop pkt\n");
- ret = _FAIL;
- goto exit;
- }
-
- if (pattrib->privacy) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "validate_recv_data_frame:pattrib->privacy =%x\n",
- pattrib->privacy);
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "^^^^^^^^^^^is_multicast_ether_addr(pattrib->ra(0x%02x)) =%d^^^^^^^^^^^^^^^6\n",
- pattrib->ra[0],
- is_multicast_ether_addr(pattrib->ra));
-
- GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt,
- is_multicast_ether_addr(pattrib->ra));
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "pattrib->encrypt =%d\n", pattrib->encrypt);
-
- switch (pattrib->encrypt) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- pattrib->iv_len = IEEE80211_WEP_IV_LEN;
- pattrib->icv_len = IEEE80211_WEP_ICV_LEN;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- pattrib->iv_len = IEEE80211_TKIP_IV_LEN;
- pattrib->icv_len = IEEE80211_TKIP_ICV_LEN;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- pattrib->iv_len = IEEE80211_CCMP_HDR_LEN;
- pattrib->icv_len = IEEE80211_CCMP_MIC_LEN;
- break;
- default:
- pattrib->iv_len = 0;
- pattrib->icv_len = 0;
- break;
- }
- } else {
- pattrib->encrypt = 0;
- pattrib->iv_len = 0;
- pattrib->icv_len = 0;
- }
-
-exit:
-
-
-
- return ret;
-}
-
-static void dump_rx_pkt(struct sk_buff *skb, u16 type, int level)
-{
- int i;
- u8 *ptr;
-
- if ((level == 1) ||
- ((level == 2) && (type == IEEE80211_FTYPE_MGMT)) ||
- ((level == 3) && (type == IEEE80211_FTYPE_DATA))) {
-
- ptr = skb->data;
-
- DBG_8723A("#############################\n");
-
- for (i = 0; i < 64; i = i + 8)
- DBG_8723A("%*phC:\n", 8, ptr + i);
- DBG_8723A("#############################\n");
- }
-}
-
-static int validate_recv_frame(struct rtw_adapter *adapter,
- struct recv_frame *precv_frame)
-{
- /* shall check frame subtype, to / from ds, da, bssid */
-
- /* then call check if rx seq/frag. duplicated. */
- u8 type;
- u8 subtype;
- int retval = _SUCCESS;
- struct rx_pkt_attrib *pattrib = & precv_frame->attrib;
- struct sk_buff *skb = precv_frame->pkt;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u8 ver;
- u8 bDumpRxPkt;
- u16 seq_ctrl, fctl;
-
- fctl = le16_to_cpu(hdr->frame_control);
- ver = fctl & IEEE80211_FCTL_VERS;
- type = fctl & IEEE80211_FCTL_FTYPE;
- subtype = fctl & IEEE80211_FCTL_STYPE;
-
- /* add version chk */
- if (ver != 0) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "validate_recv_data_frame fail! (ver!= 0)\n");
- retval = _FAIL;
- goto exit;
- }
-
- seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
- pattrib->frag_num = seq_ctrl & IEEE80211_SCTL_FRAG;
- pattrib->seq_num = seq_ctrl >> 4;
-
- pattrib->pw_save = ieee80211_has_pm(hdr->frame_control);
- pattrib->mfrag = ieee80211_has_morefrags(hdr->frame_control);
- pattrib->mdata = ieee80211_has_moredata(hdr->frame_control);
- pattrib->privacy = ieee80211_has_protected(hdr->frame_control);
- pattrib->order = ieee80211_has_order(hdr->frame_control);
-
- GetHalDefVar8192CUsb(adapter, HAL_DEF_DBG_DUMP_RXPKT, &bDumpRxPkt);
-
- if (unlikely(bDumpRxPkt == 1))
- dump_rx_pkt(skb, type, bDumpRxPkt);
-
- switch (type) {
- case IEEE80211_FTYPE_MGMT:
- retval = validate_recv_mgnt_frame(adapter, precv_frame);
- if (retval == _FAIL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "validate_recv_mgnt_frame fail\n");
- }
- retval = _FAIL; /* only data frame return _SUCCESS */
- break;
- case IEEE80211_FTYPE_CTL:
- retval = validate_recv_ctrl_frame(adapter, precv_frame);
- if (retval == _FAIL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "validate_recv_ctrl_frame fail\n");
- }
- retval = _FAIL; /* only data frame return _SUCCESS */
- break;
- case IEEE80211_FTYPE_DATA:
- pattrib->qos = (subtype & IEEE80211_STYPE_QOS_DATA) ? 1 : 0;
- retval = validate_recv_data_frame(adapter, precv_frame);
- if (retval == _FAIL) {
- struct recv_priv *precvpriv = &adapter->recvpriv;
-
- precvpriv->rx_drop++;
- }
- break;
- default:
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "validate_recv_data_frame fail! type = 0x%x\n", type);
- retval = _FAIL;
- break;
- }
-
-exit:
- return retval;
-}
-
-/* remove the wlanhdr and add the eth_hdr */
-
-static int wlanhdr_to_ethhdr (struct recv_frame *precvframe)
-{
- u16 eth_type, len, hdrlen;
- u8 bsnaphdr;
- u8 *psnap;
- struct rtw_adapter *adapter = precvframe->adapter;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-
- struct sk_buff *skb = precvframe->pkt;
- u8 *ptr;
- struct rx_pkt_attrib *pattrib = &precvframe->attrib;
-
-
-
- ptr = skb->data;
- hdrlen = pattrib->hdrlen;
- psnap = ptr + hdrlen;
- eth_type = (psnap[6] << 8) | psnap[7];
- /* convert hdr + possible LLC headers into Ethernet header */
- if ((ether_addr_equal(psnap, rfc1042_header) &&
- eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
- ether_addr_equal(psnap, bridge_tunnel_header)) {
- /* remove RFC1042 or Bridge-Tunnel encapsulation
- and replace EtherType */
- bsnaphdr = true;
- hdrlen += SNAP_SIZE;
- } else {
- /* Leave Ethernet header part of hdr and full payload */
- bsnaphdr = false;
- eth_type = (psnap[0] << 8) | psnap[1];
- }
-
- len = skb->len - hdrlen;
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "=== pattrib->hdrlen: %x, pattrib->iv_len:%x ===\n",
- pattrib->hdrlen, pattrib->iv_len);
-
- pattrib->eth_type = eth_type;
- if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
- ptr += hdrlen;
- *ptr = 0x87;
- *(ptr + 1) = 0x12;
-
- eth_type = 0x8712;
- /* append rx status for mp test packets */
-
- ptr = skb_pull(skb, (hdrlen - sizeof(struct ethhdr) + 2) - 24);
- memcpy(ptr, skb->head, 24);
- ptr += 24;
- } else {
- ptr = skb_pull(skb, (hdrlen - sizeof(struct ethhdr) +
- (bsnaphdr ? 2:0)));
- }
-
- ether_addr_copy(ptr, pattrib->dst);
- ether_addr_copy(ptr + ETH_ALEN, pattrib->src);
-
- if (!bsnaphdr) {
- put_unaligned_be16(len, ptr + 12);
- }
-
-
- return _SUCCESS;
-}
-
-/* perform defrag */
-struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter,
- struct rtw_queue *defrag_q);
-struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter,
- struct rtw_queue *defrag_q)
-{
- struct list_head *phead;
- u8 wlanhdr_offset;
- u8 curfragnum;
- struct recv_frame *pnfhdr, *ptmp;
- struct recv_frame *prframe, *pnextrframe;
- struct rtw_queue *pfree_recv_queue;
- struct sk_buff *skb;
-
- curfragnum = 0;
- pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
-
- phead = get_list_head(defrag_q);
- prframe = list_first_entry(phead, struct recv_frame, list);
- list_del_init(&prframe->list);
- skb = prframe->pkt;
-
- if (curfragnum != prframe->attrib.frag_num) {
- /* the first fragment number must be 0 */
- /* free the whole queue */
- rtw_free_recvframe23a(prframe);
- rtw_free_recvframe23a_queue(defrag_q);
-
- return NULL;
- }
-
- curfragnum++;
-
- list_for_each_entry_safe(pnfhdr, ptmp, phead, list) {
- pnextrframe = (struct recv_frame *)pnfhdr;
- /* check the fragment sequence (2nd ~n fragment frame) */
-
- if (curfragnum != pnfhdr->attrib.frag_num) {
- /* the fragment number must be increasing
- (after decache) */
- /* release the defrag_q & prframe */
- rtw_free_recvframe23a(prframe);
- rtw_free_recvframe23a_queue(defrag_q);
- return NULL;
- }
-
- curfragnum++;
-
- /* copy the 2nd~n fragment frame's payload to the
- first fragment */
- /* get the 2nd~last fragment frame's payload */
-
- wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
-
- skb_pull(pnfhdr->pkt, wlanhdr_offset);
-
- /* append to first fragment frame's tail
- (if privacy frame, pull the ICV) */
-
- skb_trim(skb, skb->len - prframe->attrib.icv_len);
-
- memcpy(skb_tail_pointer(skb), pnfhdr->pkt->data,
- pnfhdr->pkt->len);
-
- skb_put(skb, pnfhdr->pkt->len);
-
- prframe->attrib.icv_len = pnfhdr->attrib.icv_len;
- }
-
- /* free the defrag_q queue and return the prframe */
- rtw_free_recvframe23a_queue(defrag_q);
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "Performance defrag!!!!!\n");
-
- return prframe;
-}
-
-/* check if need to defrag, if needed queue the frame to defrag_q */
-struct recv_frame *recvframe_chk_defrag23a(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
- u8 ismfrag;
- u8 fragnum;
- u8 *psta_addr;
- struct recv_frame *pfhdr;
- struct sta_info *psta;
- struct sta_priv *pstapriv;
- struct list_head *phead;
- struct recv_frame *prtnframe = NULL;
- struct rtw_queue *pfree_recv_queue, *pdefrag_q;
-
-
-
- pstapriv = &padapter->stapriv;
-
- pfhdr = precv_frame;
-
- pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
-
- /* need to define struct of wlan header frame ctrl */
- ismfrag = pfhdr->attrib.mfrag;
- fragnum = pfhdr->attrib.frag_num;
-
- psta_addr = pfhdr->attrib.ta;
- psta = rtw_get_stainfo23a(pstapriv, psta_addr);
- if (!psta) {
- struct ieee80211_hdr *hdr =
- (struct ieee80211_hdr *) pfhdr->pkt->data;
- if (!ieee80211_is_data(hdr->frame_control)) {
- psta = rtw_get_bcmc_stainfo23a(padapter);
- pdefrag_q = &psta->sta_recvpriv.defrag_q;
- } else
- pdefrag_q = NULL;
- } else
- pdefrag_q = &psta->sta_recvpriv.defrag_q;
-
- if ((ismfrag == 0) && (fragnum == 0)) {
- prtnframe = precv_frame;/* isn't a fragment frame */
- }
-
- if (ismfrag == 1) {
- /* 0~(n-1) fragment frame */
- /* enqueue to defraf_g */
- if (pdefrag_q != NULL) {
- if (fragnum == 0) {
- /* the first fragment */
- if (!list_empty(&pdefrag_q->queue)) {
- /* free current defrag_q */
- rtw_free_recvframe23a_queue(pdefrag_q);
- }
- }
-
- /* Then enqueue the 0~(n-1) fragment into the
- defrag_q */
-
- /* spin_lock(&pdefrag_q->lock); */
- phead = get_list_head(pdefrag_q);
- list_add_tail(&pfhdr->list, phead);
- /* spin_unlock(&pdefrag_q->lock); */
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "Enqueuq: ismfrag = %d, fragnum = %d\n",
- ismfrag, fragnum);
-
- prtnframe = NULL;
-
- } else {
- /* can't find this ta's defrag_queue,
- so free this recv_frame */
- rtw_free_recvframe23a(precv_frame);
- prtnframe = NULL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "Free because pdefrag_q == NULL: ismfrag = %d, fragnum = %d\n",
- ismfrag, fragnum);
- }
- }
-
- if ((ismfrag == 0) && (fragnum != 0)) {
- /* the last fragment frame */
- /* enqueue the last fragment */
- if (pdefrag_q != NULL) {
- /* spin_lock(&pdefrag_q->lock); */
- phead = get_list_head(pdefrag_q);
- list_add_tail(&pfhdr->list, phead);
- /* spin_unlock(&pdefrag_q->lock); */
-
- /* call recvframe_defrag to defrag */
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "defrag: ismfrag = %d, fragnum = %d\n",
- ismfrag, fragnum);
- precv_frame = recvframe_defrag(padapter, pdefrag_q);
- prtnframe = precv_frame;
- } else {
- /* can't find this ta's defrag_queue,
- so free this recv_frame */
- rtw_free_recvframe23a(precv_frame);
- prtnframe = NULL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "Free because pdefrag_q == NULL: ismfrag = %d, fragnum = %d\n",
- ismfrag, fragnum);
- }
-
- }
-
- if ((prtnframe != NULL) && (prtnframe->attrib.privacy)) {
- /* after defrag we must check tkip mic code */
- if (recvframe_chkmic(padapter, prtnframe) == _FAIL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "recvframe_chkmic(padapter, prtnframe) ==_FAIL\n");
- rtw_free_recvframe23a(prtnframe);
- prtnframe = NULL;
- }
- }
-
-
-
- return prtnframe;
-}
-
-int amsdu_to_msdu(struct rtw_adapter *padapter, struct recv_frame *prframe);
-int amsdu_to_msdu(struct rtw_adapter *padapter, struct recv_frame *prframe)
-{
- struct rx_pkt_attrib *pattrib;
- struct sk_buff *skb, *sub_skb;
- struct sk_buff_head skb_list;
-
- pattrib = &prframe->attrib;
-
- skb = prframe->pkt;
- skb_pull(skb, prframe->attrib.hdrlen);
- __skb_queue_head_init(&skb_list);
-
- ieee80211_amsdu_to_8023s(skb, &skb_list, NULL, 0, 0, false);
-
- while (!skb_queue_empty(&skb_list)) {
- sub_skb = __skb_dequeue(&skb_list);
-
- sub_skb->protocol = eth_type_trans(sub_skb, padapter->pnetdev);
- sub_skb->dev = padapter->pnetdev;
-
- sub_skb->ip_summed = CHECKSUM_NONE;
-
- netif_rx(sub_skb);
- }
-
- prframe->pkt = NULL;
- rtw_free_recvframe23a(prframe);
- return _SUCCESS;
-}
-
-int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num);
-int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num)
-{
- u8 wsize = preorder_ctrl->wsize_b;
- u16 wend = (preorder_ctrl->indicate_seq + wsize -1) & 0xFFF;
-
- /* Rx Reorder initialize condition. */
- if (preorder_ctrl->indicate_seq == 0xFFFF)
- preorder_ctrl->indicate_seq = seq_num;
-
- /* Drop out the packet which SeqNum is smaller than WinStart */
- if (SN_LESS(seq_num, preorder_ctrl->indicate_seq))
- return false;
-
- /* */
- /* Sliding window manipulation. Conditions includes: */
- /* 1. Incoming SeqNum is equal to WinStart =>Window shift 1 */
- /* 2. Incoming SeqNum is larger than the WinEnd => Window shift N */
- /* */
- if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq)) {
- preorder_ctrl->indicate_seq =
- (preorder_ctrl->indicate_seq + 1) & 0xFFF;
- } else if (SN_LESS(wend, seq_num)) {
- /* boundary situation, when seq_num cross 0xFFF */
- if (seq_num >= (wsize - 1))
- preorder_ctrl->indicate_seq = seq_num + 1 -wsize;
- else
- preorder_ctrl->indicate_seq = 0xFFF - (wsize - (seq_num + 1)) + 1;
- }
- return true;
-}
-
-static int enqueue_reorder_recvframe23a(struct recv_reorder_ctrl *preorder_ctrl,
- struct recv_frame *prframe)
-{
- struct rx_pkt_attrib *pattrib = &prframe->attrib;
- struct rtw_queue *ppending_recvframe_queue;
- struct list_head *phead, *plist, *ptmp;
- struct recv_frame *hdr;
- struct rx_pkt_attrib *pnextattrib;
-
- ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
- phead = get_list_head(ppending_recvframe_queue);
-
- list_for_each_safe(plist, ptmp, phead) {
- hdr = container_of(plist, struct recv_frame, list);
- pnextattrib = &hdr->attrib;
-
- if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num)) {
- continue;
- } else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num)) {
- /* Duplicate entry is found!! Do not insert current entry. */
- return false;
- } else {
- break;
- }
-
- }
-
- list_del_init(&prframe->list);
-
- list_add_tail(&prframe->list, plist);
-
- return true;
-}
-
-int recv_indicatepkts_in_order(struct rtw_adapter *padapter,
- struct recv_reorder_ctrl *preorder_ctrl,
- int bforced);
-int recv_indicatepkts_in_order(struct rtw_adapter *padapter,
- struct recv_reorder_ctrl *preorder_ctrl,
- int bforced)
-{
- struct list_head *phead, *plist;
- struct recv_frame *prframe;
- struct rx_pkt_attrib *pattrib;
- int bPktInBuf = false;
- struct recv_priv *precvpriv;
- struct rtw_queue *ppending_recvframe_queue;
-
- precvpriv = &padapter->recvpriv;
- ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
- phead = get_list_head(ppending_recvframe_queue);
- plist = phead->next;
-
- /* Handling some condition for forced indicate case. */
- if (bforced) {
- if (list_empty(phead)) {
- return true;
- }
-
- prframe = container_of(plist, struct recv_frame, list);
- pattrib = &prframe->attrib;
- preorder_ctrl->indicate_seq = pattrib->seq_num;
- }
-
- /* Prepare indication list and indication. */
- /* Check if there is any packet need indicate. */
- while (!list_empty(phead)) {
-
- prframe = container_of(plist, struct recv_frame, list);
- pattrib = &prframe->attrib;
-
- if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- "recv_indicatepkts_in_order: indicate =%d seq =%d amsdu =%d\n",
- preorder_ctrl->indicate_seq,
- pattrib->seq_num, pattrib->amsdu);
-
- plist = plist->next;
- list_del_init(&prframe->list);
-
- if (SN_EQUAL(preorder_ctrl->indicate_seq,
- pattrib->seq_num)) {
- preorder_ctrl->indicate_seq =
- (preorder_ctrl->indicate_seq + 1)&0xFFF;
- }
-
- if (!pattrib->amsdu) {
- if ((padapter->bDriverStopped == false) &&
- (padapter->bSurpriseRemoved == false)) {
- rtw_recv_indicatepkt23a(padapter, prframe);
- }
- } else {
- if (amsdu_to_msdu(padapter, prframe) !=
- _SUCCESS)
- rtw_free_recvframe23a(prframe);
- }
-
- /* Update local variables. */
- bPktInBuf = false;
-
- } else {
- bPktInBuf = true;
- break;
- }
-
- }
-
- return bPktInBuf;
-}
-
-int recv_indicatepkt_reorder(struct rtw_adapter *padapter,
- struct recv_frame *prframe);
-int recv_indicatepkt_reorder(struct rtw_adapter *padapter,
- struct recv_frame *prframe)
-{
- int retval = _SUCCESS;
- struct rx_pkt_attrib *pattrib;
- struct recv_reorder_ctrl *preorder_ctrl;
- struct rtw_queue *ppending_recvframe_queue;
-
- pattrib = &prframe->attrib;
- preorder_ctrl = prframe->preorder_ctrl;
- ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
-
- if (!pattrib->amsdu) {
- /* s1. */
- wlanhdr_to_ethhdr(prframe);
-
- if ((pattrib->qos!= 1) || (pattrib->eth_type == ETH_P_ARP) ||
- (pattrib->ack_policy != 0)) {
- if ((padapter->bDriverStopped == false) &&
- (padapter->bSurpriseRemoved == false)) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- "@@@@ recv_indicatepkt_reorder -recv_func recv_indicatepkt\n");
-
- rtw_recv_indicatepkt23a(padapter, prframe);
- return _SUCCESS;
- }
-
- return _FAIL;
- }
-
- if (preorder_ctrl->enable == false) {
- /* indicate this recv_frame */
- preorder_ctrl->indicate_seq = pattrib->seq_num;
- rtw_recv_indicatepkt23a(padapter, prframe);
-
- preorder_ctrl->indicate_seq =
- (preorder_ctrl->indicate_seq + 1) % 4096;
- return _SUCCESS;
- }
- } else {
- /* temp filter -> means didn't support A-MSDUs in a A-MPDU */
- if (preorder_ctrl->enable == false) {
- preorder_ctrl->indicate_seq = pattrib->seq_num;
- retval = amsdu_to_msdu(padapter, prframe);
-
- preorder_ctrl->indicate_seq =
- (preorder_ctrl->indicate_seq + 1) % 4096;
- return retval;
- }
- }
-
- spin_lock_bh(&ppending_recvframe_queue->lock);
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- "recv_indicatepkt_reorder: indicate =%d seq =%d\n",
- preorder_ctrl->indicate_seq, pattrib->seq_num);
-
- /* s2. check if winstart_b(indicate_seq) needs to been updated */
- if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num)) {
- goto _err_exit;
- }
-
- /* s3. Insert all packet into Reorder Queue to maintain its ordering. */
- if (!enqueue_reorder_recvframe23a(preorder_ctrl, prframe)) {
- goto _err_exit;
- }
-
- /* s4. */
- /* Indication process. */
- /* After Packet dropping and Sliding Window shifting as above,
- we can now just indicate the packets */
- /* with the SeqNum smaller than latest WinStart and buffer
- other packets. */
- /* */
- /* For Rx Reorder condition: */
- /* 1. All packets with SeqNum smaller than WinStart => Indicate */
- /* 2. All packets with SeqNum larger than or equal to WinStart =>
- Buffer it. */
- /* */
-
- if (recv_indicatepkts_in_order(padapter, preorder_ctrl, false) == true) {
- mod_timer(&preorder_ctrl->reordering_ctrl_timer,
- jiffies + msecs_to_jiffies(REORDER_WAIT_TIME));
- spin_unlock_bh(&ppending_recvframe_queue->lock);
- } else {
- spin_unlock_bh(&ppending_recvframe_queue->lock);
- del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
- }
- return _SUCCESS;
-
-_err_exit:
-
- spin_unlock_bh(&ppending_recvframe_queue->lock);
- return _FAIL;
-}
-
-void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext)
-{
- struct recv_reorder_ctrl *preorder_ctrl;
- struct rtw_adapter *padapter;
- struct rtw_queue *ppending_recvframe_queue;
-
- preorder_ctrl = (struct recv_reorder_ctrl *)pcontext;
- padapter = preorder_ctrl->padapter;
- ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
- return;
- }
-
- spin_lock_bh(&ppending_recvframe_queue->lock);
-
- if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true) == true) {
- mod_timer(&preorder_ctrl->reordering_ctrl_timer,
- jiffies + msecs_to_jiffies(REORDER_WAIT_TIME));
- }
-
- spin_unlock_bh(&ppending_recvframe_queue->lock);
-}
-
-int process_recv_indicatepkts(struct rtw_adapter *padapter,
- struct recv_frame *prframe);
-int process_recv_indicatepkts(struct rtw_adapter *padapter,
- struct recv_frame *prframe)
-{
- int retval = _SUCCESS;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
-
- if (phtpriv->ht_option == true) { /* B/G/N Mode */
- /* including perform A-MPDU Rx Ordering Buffer Control */
- if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) {
- if ((padapter->bDriverStopped == false) &&
- (padapter->bSurpriseRemoved == false)) {
- retval = _FAIL;
- return retval;
- }
- }
- } else { /* B/G mode */
- retval = wlanhdr_to_ethhdr(prframe);
- if (retval != _SUCCESS) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "wlanhdr_to_ethhdr: drop pkt\n");
- return retval;
- }
-
- if ((padapter->bDriverStopped == false) &&
- (padapter->bSurpriseRemoved == false)) {
- /* indicate this recv_frame */
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- "@@@@ process_recv_indicatepkts- recv_func recv_indicatepkt\n");
- rtw_recv_indicatepkt23a(padapter, prframe);
- } else {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- "@@@@ process_recv_indicatepkts- recv_func free_indicatepkt\n");
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- "recv_func:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n",
- padapter->bDriverStopped,
- padapter->bSurpriseRemoved);
- retval = _FAIL;
- return retval;
- }
-
- }
-
- return retval;
-}
-
-static int recv_func_prehandle(struct rtw_adapter *padapter,
- struct recv_frame *rframe)
-{
- int ret;
-
- /* check the frame crtl field and decache */
- ret = validate_recv_frame(padapter, rframe);
- if (ret != _SUCCESS) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "recv_func: validate_recv_frame fail! drop pkt\n");
- rtw_free_recvframe23a(rframe);
- goto exit;
- }
-
-exit:
- return ret;
-}
-
-static int recv_func_posthandle(struct rtw_adapter *padapter,
- struct recv_frame *prframe)
-{
- int ret = _SUCCESS;
- struct recv_frame *orig_prframe = prframe;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- /* DATA FRAME */
- prframe = decryptor(padapter, prframe);
- if (prframe == NULL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "decryptor: drop pkt\n");
- ret = _FAIL;
- goto _recv_data_drop;
- }
-
- prframe = recvframe_chk_defrag23a(padapter, prframe);
- if (!prframe) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "recvframe_chk_defrag23a: drop pkt\n");
- goto _recv_data_drop;
- }
-
- /*
- * Pull off crypto headers
- */
- if (prframe->attrib.iv_len > 0) {
- skb_pull(prframe->pkt, prframe->attrib.iv_len);
- }
-
- if (prframe->attrib.icv_len > 0) {
- skb_trim(prframe->pkt,
- prframe->pkt->len - prframe->attrib.icv_len);
- }
-
- prframe = portctrl(padapter, prframe);
- if (!prframe) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "portctrl: drop pkt\n");
- ret = _FAIL;
- goto _recv_data_drop;
- }
-
- count_rx_stats(padapter, prframe, NULL);
-
- ret = process_recv_indicatepkts(padapter, prframe);
- if (ret != _SUCCESS) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "recv_func: process_recv_indicatepkts fail!\n");
- rtw_free_recvframe23a(orig_prframe);/* free this recv_frame */
- goto _recv_data_drop;
- }
- return ret;
-
-_recv_data_drop:
- precvpriv->rx_drop++;
- return ret;
-}
-
-int rtw_recv_entry23a(struct recv_frame *rframe)
-{
- int ret, r;
- struct rtw_adapter *padapter = rframe->adapter;
- struct rx_pkt_attrib *prxattrib = &rframe->attrib;
- struct recv_priv *recvpriv = &padapter->recvpriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_priv *mlmepriv = &padapter->mlmepriv;
-
- /* check if need to handle uc_swdec_pending_queue*/
- if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
- psecuritypriv->busetkipkey) {
- struct recv_frame *pending_frame;
-
- while ((pending_frame = rtw_alloc_recvframe23a(&padapter->recvpriv.uc_swdec_pending_queue))) {
- r = recv_func_posthandle(padapter, pending_frame);
- if (r == _SUCCESS)
- DBG_8723A("%s: dequeue uc_swdec_pending_queue\n", __func__);
- }
- }
-
- ret = recv_func_prehandle(padapter, rframe);
-
- if (ret == _SUCCESS) {
- /* check if need to enqueue into uc_swdec_pending_queue*/
- if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
- !is_multicast_ether_addr(prxattrib->ra) &&
- prxattrib->encrypt > 0 &&
- (prxattrib->bdecrypted == 0) &&
- !is_wep_enc(psecuritypriv->dot11PrivacyAlgrthm) &&
- !psecuritypriv->busetkipkey) {
- rtw_enqueue_recvframe23a(rframe, &padapter->recvpriv.uc_swdec_pending_queue);
- DBG_8723A("%s: no key, enqueue uc_swdec_pending_queue\n", __func__);
- goto exit;
- }
-
- ret = recv_func_posthandle(padapter, rframe);
-
- recvpriv->rx_pkts++;
- }
-
-exit:
- return ret;
-}
-
-void rtw_signal_stat_timer_hdl23a(unsigned long data)
-{
- struct rtw_adapter *adapter = (struct rtw_adapter *)data;
- struct recv_priv *recvpriv = &adapter->recvpriv;
-
- u32 tmp_s, tmp_q;
- u8 avg_signal_strength = 0;
- u8 avg_signal_qual = 0;
- u32 num_signal_strength = 0;
- u32 num_signal_qual = 0;
- u8 _alpha = 3; /* this value is based on converging_constant = 5000 */
- /* and sampling_interval = 1000 */
-
- if (recvpriv->signal_strength_data.update_req == 0) {
- /* update_req is clear, means we got rx */
- avg_signal_strength = recvpriv->signal_strength_data.avg_val;
- num_signal_strength = recvpriv->signal_strength_data.total_num;
- /* after avg_vals are acquired, we can re-stat */
- /* the signal values */
- recvpriv->signal_strength_data.update_req = 1;
- }
-
- if (recvpriv->signal_qual_data.update_req == 0) {
- /* update_req is clear, means we got rx */
- avg_signal_qual = recvpriv->signal_qual_data.avg_val;
- num_signal_qual = recvpriv->signal_qual_data.total_num;
- /* after avg_vals are acquired, we can re-stat */
- /*the signal values */
- recvpriv->signal_qual_data.update_req = 1;
- }
-
- /* update value of signal_strength, rssi, signal_qual */
- if (!check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY)) {
- tmp_s = avg_signal_strength + (_alpha - 1) *
- recvpriv->signal_strength;
- if (tmp_s %_alpha)
- tmp_s = tmp_s / _alpha + 1;
- else
- tmp_s = tmp_s / _alpha;
- if (tmp_s > 100)
- tmp_s = 100;
-
- tmp_q = avg_signal_qual + (_alpha - 1) * recvpriv->signal_qual;
- if (tmp_q %_alpha)
- tmp_q = tmp_q / _alpha + 1;
- else
- tmp_q = tmp_q / _alpha;
- if (tmp_q > 100)
- tmp_q = 100;
-
- recvpriv->signal_strength = tmp_s;
- recvpriv->signal_qual = tmp_q;
-
- DBG_8723A("%s signal_strength:%3u, signal_qual:%3u, "
- "num_signal_strength:%u, num_signal_qual:%u\n",
- __func__, recvpriv->signal_strength,
- recvpriv->signal_qual, num_signal_strength,
- num_signal_qual);
- }
-
- rtw_set_signal_stat_timer(recvpriv);
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_security.c b/drivers/staging/rtl8723au/core/rtw_security.c
deleted file mode 100644
index 5a4cfdf1ebd4..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_security.c
+++ /dev/null
@@ -1,1630 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTW_SECURITY_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <wifi.h>
-#include <osdep_intf.h>
-
-/* WEP related ===== */
-
-#define CRC32_POLY 0x04c11db7
-
-struct arc4context {
- u32 x;
- u32 y;
- u8 state[256];
-};
-
-static void arcfour_init(struct arc4context *parc4ctx, u8 *key, u32 key_len)
-{
- u32 t, u;
- u32 keyindex;
- u32 stateindex;
- u8 *state;
- u32 counter;
-
- state = parc4ctx->state;
- parc4ctx->x = 0;
- parc4ctx->y = 0;
- for (counter = 0; counter < 256; counter++)
- state[counter] = (u8)counter;
- keyindex = 0;
- stateindex = 0;
- for (counter = 0; counter < 256; counter++) {
- t = state[counter];
- stateindex = (stateindex + key[keyindex] + t) & 0xff;
- u = state[stateindex];
- state[stateindex] = (u8)t;
- state[counter] = (u8)u;
- if (++keyindex >= key_len)
- keyindex = 0;
- }
-
-}
-
-static u32 arcfour_byte(struct arc4context *parc4ctx)
-{
- u32 x;
- u32 y;
- u32 sx, sy;
- u8 *state;
-
- state = parc4ctx->state;
- x = (parc4ctx->x + 1) & 0xff;
- sx = state[x];
- y = (sx + parc4ctx->y) & 0xff;
- sy = state[y];
- parc4ctx->x = x;
- parc4ctx->y = y;
- state[y] = (u8)sx;
- state[x] = (u8)sy;
-
- return state[(sx + sy) & 0xff];
-}
-
-static void arcfour_encrypt(struct arc4context *parc4ctx, u8 *dest,
- u8 *src, u32 len)
-{
- u32 i;
-
- for (i = 0; i < len; i++)
- dest[i] = src[i] ^ (unsigned char)arcfour_byte(parc4ctx);
-}
-
-static int bcrc32initialized;
-static u32 crc32_table[256];
-
-static u8 crc32_reverseBit(u8 data)
-{
- u8 retval = ((data << 7) & 0x80) | ((data << 5) & 0x40) |
- ((data << 3) & 0x20) | ((data << 1) & 0x10) |
- ((data >> 1) & 0x08) | ((data >> 3) & 0x04) |
- ((data >> 5) & 0x02) | ((data >> 7) & 0x01);
- return retval;
-}
-
-static void crc32_init(void)
-{
- int i, j;
- u32 c;
- u8 *p, *p1;
- u8 k;
-
- if (bcrc32initialized == 1)
- return;
-
- p = (u8 *) &c;
- c = 0x12340000;
-
- for (i = 0; i < 256; ++i) {
- k = crc32_reverseBit((u8)i);
-
- for (c = ((u32)k) << 24, j = 8; j > 0; --j)
- c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
-
- p1 = (u8 *)&crc32_table[i];
-
- p1[0] = crc32_reverseBit(p[3]);
- p1[1] = crc32_reverseBit(p[2]);
- p1[2] = crc32_reverseBit(p[1]);
- p1[3] = crc32_reverseBit(p[0]);
- }
-
- bcrc32initialized = 1;
-}
-
-static u32 getcrc32(u8 *buf, int len)
-{
- u8 *p;
- u32 crc;
-
- if (bcrc32initialized == 0)
- crc32_init();
-
- crc = 0xffffffff; /* preload shift register, per CRC-32 spec */
-
- for (p = buf; len > 0; ++p, --len)
- crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8);
-
- return ~crc; /* transmit complement, per CRC-32 spec */
-}
-
-/* Need to consider the fragment situation */
-void rtw_wep_encrypt23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe)
-{
- /* exclude ICV */
- __le32 crc;
- struct arc4context mycontext;
- int curfragnum, length, index;
- u32 keylength;
- u8 *pframe, *payload, *iv; /* wepkey */
- u8 wepkey[16];
- u8 hw_hdr_offset = 0;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- if (!pxmitframe->buf_addr)
- return;
-
- hw_hdr_offset = TXDESC_OFFSET;
-
- pframe = pxmitframe->buf_addr + hw_hdr_offset;
-
- /* start to encrypt each fragment */
- if (pattrib->encrypt != WLAN_CIPHER_SUITE_WEP40 &&
- pattrib->encrypt != WLAN_CIPHER_SUITE_WEP104)
- return;
-
- index = psecuritypriv->dot11PrivacyKeyIndex;
- keylength = psecuritypriv->wep_key[index].keylen;
-
- for (curfragnum = 0; curfragnum < pattrib->nr_frags ; curfragnum++) {
- iv = pframe + pattrib->hdrlen;
- memcpy(&wepkey[0], iv, 3);
- memcpy(&wepkey[3], &psecuritypriv->wep_key[index].key,
- keylength);
- payload = pframe + pattrib->iv_len + pattrib->hdrlen;
-
- if ((curfragnum + 1) == pattrib->nr_frags) {
- /* the last fragment */
- length = pattrib->last_txcmdsz - pattrib->hdrlen -
- pattrib->iv_len - pattrib->icv_len;
-
- crc = cpu_to_le32(getcrc32(payload, length));
-
- arcfour_init(&mycontext, wepkey, 3 + keylength);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length,
- (char *)&crc, 4);
- } else {
- length = pxmitpriv->frag_len - pattrib->hdrlen -
- pattrib->iv_len - pattrib->icv_len;
- crc = cpu_to_le32(getcrc32(payload, length));
- arcfour_init(&mycontext, wepkey, 3 + keylength);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length,
- (char *)&crc, 4);
-
- pframe += pxmitpriv->frag_len;
- pframe = PTR_ALIGN(pframe, 4);
- }
- }
-
-}
-
-void rtw_wep_decrypt23a(struct rtw_adapter *padapter,
- struct recv_frame *precvframe)
-{
- /* exclude ICV */
- u32 actual_crc, expected_crc;
- struct arc4context mycontext;
- int length;
- u32 keylength;
- u8 *pframe, *payload, *iv, wepkey[16];
- u8 keyindex;
- struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sk_buff *skb = precvframe->pkt;
-
- pframe = skb->data;
-
- /* start to decrypt recvframe */
- if (prxattrib->encrypt != WLAN_CIPHER_SUITE_WEP40 &&
- prxattrib->encrypt != WLAN_CIPHER_SUITE_WEP104)
- return;
-
- iv = pframe + prxattrib->hdrlen;
- /* keyindex = (iv[3]&0x3); */
- keyindex = prxattrib->key_index;
- keylength = psecuritypriv->wep_key[keyindex].keylen;
- memcpy(&wepkey[0], iv, 3);
- /* memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[psecuritypriv->dot11PrivacyKeyIndex].skey[0], keylength); */
- memcpy(&wepkey[3], &psecuritypriv->wep_key[keyindex].key, keylength);
- length = skb->len - prxattrib->hdrlen - prxattrib->iv_len;
-
- payload = pframe + prxattrib->iv_len + prxattrib->hdrlen;
-
- /* decrypt payload include icv */
- arcfour_init(&mycontext, wepkey, 3 + keylength);
- arcfour_encrypt(&mycontext, payload, payload, length);
-
- /* calculate icv and compare the icv */
- actual_crc = getcrc32(payload, length - 4);
- expected_crc = get_unaligned_le32(&payload[length - 4]);
-
- if (actual_crc != expected_crc) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "%s:icv CRC mismatch: "
- "actual: %08x, expected: %08x\n",
- __func__, actual_crc, expected_crc);
- }
-}
-
-/* 3 ===== TKIP related ===== */
-
-static u32 secmicgetuint32(u8 *p)
-/* Convert from Byte[] to u32 in a portable way */
-{
- s32 i;
- u32 res = 0;
-
- for (i = 0; i < 4; i++)
- res |= ((u32)(*p++)) << (8 * i);
-
- return res;
-}
-
-static void secmicputuint32(u8 *p, u32 val)
-/* Convert from long to Byte[] in a portable way */
-{
- long i;
-
- for (i = 0; i < 4; i++) {
- *p++ = (u8) (val & 0xff);
- val >>= 8;
- }
-
-}
-
-static void secmicclear(struct mic_data *pmicdata)
-{
-/* Reset the state to the empty message. */
-
- pmicdata->L = pmicdata->K0;
- pmicdata->R = pmicdata->K1;
- pmicdata->nBytesInM = 0;
- pmicdata->M = 0;
-
-}
-
-void rtw_secmicsetkey23a(struct mic_data *pmicdata, u8 *key)
-{
- /* Set the key */
-
- pmicdata->K0 = secmicgetuint32(key);
- pmicdata->K1 = secmicgetuint32(key + 4);
- /* and reset the message */
- secmicclear(pmicdata);
-
-}
-
-void rtw_secmicappend23abyte23a(struct mic_data *pmicdata, u8 b)
-{
-
- /* Append the byte to our word-sized buffer */
- pmicdata->M |= ((unsigned long)b) << (8 * pmicdata->nBytesInM);
- pmicdata->nBytesInM++;
- /* Process the word if it is full. */
- if (pmicdata->nBytesInM >= 4) {
- pmicdata->L ^= pmicdata->M;
- pmicdata->R ^= ROL32(pmicdata->L, 17);
- pmicdata->L += pmicdata->R;
- pmicdata->R ^= ((pmicdata->L & 0xff00ff00) >> 8) | ((pmicdata->L & 0x00ff00ff) << 8);
- pmicdata->L += pmicdata->R;
- pmicdata->R ^= ROL32(pmicdata->L, 3);
- pmicdata->L += pmicdata->R;
- pmicdata->R ^= ROR32(pmicdata->L, 2);
- pmicdata->L += pmicdata->R;
- /* Clear the buffer */
- pmicdata->M = 0;
- pmicdata->nBytesInM = 0;
- }
-
-}
-
-void rtw_secmicappend23a(struct mic_data *pmicdata, u8 *src, u32 nbytes)
-{
-
- /* This is simple */
- while (nbytes > 0) {
- rtw_secmicappend23abyte23a(pmicdata, *src++);
- nbytes--;
- }
-
-}
-
-void rtw_secgetmic23a(struct mic_data *pmicdata, u8 *dst)
-{
-
- /* Append the minimum padding */
- rtw_secmicappend23abyte23a(pmicdata, 0x5a);
- rtw_secmicappend23abyte23a(pmicdata, 0);
- rtw_secmicappend23abyte23a(pmicdata, 0);
- rtw_secmicappend23abyte23a(pmicdata, 0);
- rtw_secmicappend23abyte23a(pmicdata, 0);
- /* and then zeroes until the length is a multiple of 4 */
- while (pmicdata->nBytesInM != 0)
- rtw_secmicappend23abyte23a(pmicdata, 0);
- /* The appendByte function has already computed the result. */
- secmicputuint32(dst, pmicdata->L);
- secmicputuint32(dst + 4, pmicdata->R);
- /* Reset to the empty message. */
- secmicclear(pmicdata);
-
-}
-
-void rtw_seccalctkipmic23a(u8 *key, u8 *header, u8 *data, u32 data_len,
- u8 *mic_code, u8 pri)
-{
-
- struct mic_data micdata;
- u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
-
- rtw_secmicsetkey23a(&micdata, key);
- priority[0] = pri;
-
- /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
- if (header[1]&1) { /* ToDS == 1 */
- rtw_secmicappend23a(&micdata, &header[16], 6); /* DA */
- if (header[1]&2) /* From Ds == 1 */
- rtw_secmicappend23a(&micdata, &header[24], 6);
- else
- rtw_secmicappend23a(&micdata, &header[10], 6);
- } else { /* ToDS == 0 */
- rtw_secmicappend23a(&micdata, &header[4], 6); /* DA */
- if (header[1]&2) /* From Ds == 1 */
- rtw_secmicappend23a(&micdata, &header[16], 6);
- else
- rtw_secmicappend23a(&micdata, &header[10], 6);
-
- }
- rtw_secmicappend23a(&micdata, &priority[0], 4);
-
- rtw_secmicappend23a(&micdata, data, data_len);
-
- rtw_secgetmic23a(&micdata, mic_code);
-
-}
-
-/* macros for extraction/creation of unsigned char/unsigned short values */
-#define RotR1(v16) ((((v16) >> 1) & 0x7FFF) ^ (((v16) & 1) << 15))
-#define Lo8(v16) ((u8)((v16) & 0x00FF))
-#define Hi8(v16) ((u8)(((v16) >> 8) & 0x00FF))
-#define Lo16(v32) ((u16)((v32) & 0xFFFF))
-#define Hi16(v32) ((u16)(((v32) >> 16) & 0xFFFF))
-#define Mk16(hi, lo) ((lo) ^ (((u16)(hi)) << 8))
-
-/* select the Nth 16-bit word of the temporal key unsigned char array TK[] */
-#define TK16(N) Mk16(tk[2 * (N) + 1], tk[2 * (N)])
-
-/* S-box lookup: 16 bits --> 16 bits */
-#define _S_(v16) (Sbox1[0][Lo8(v16)] ^ Sbox1[1][Hi8(v16)])
-
-/* fixed algorithm "parameters" */
-#define PHASE1_LOOP_CNT 8 /* this needs to be "big enough" */
-#define TA_SIZE 6 /* 48-bit transmitter address */
-#define TK_SIZE 16 /* 128-bit temporal key */
-#define P1K_SIZE 10 /* 80-bit Phase1 key */
-#define RC4_KEY_SIZE 16 /* 128-bit RC4KEY (104 bits unknown) */
-
-/* 2-unsigned char by 2-unsigned char subset of the full AES S-box table */
-static const unsigned short Sbox1[2][256] = {
- /* Sbox for hash (can be in ROM) */
- {
- 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
- 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
- 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
- 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
- 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
- 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
- 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
- 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
- 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
- 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
- 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
- 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
- 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
- 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
- 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
- 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
- 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
- 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
- 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
- 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
- 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
- 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
- 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
- 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
- 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
- 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
- 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
- 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
- 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
- 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
- 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
- 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
- },
- { /* second half of table is unsigned char-reversed version of first! */
- 0xA5C6, 0x84F8, 0x99EE, 0x8DF6, 0x0DFF, 0xBDD6, 0xB1DE, 0x5491,
- 0x5060, 0x0302, 0xA9CE, 0x7D56, 0x19E7, 0x62B5, 0xE64D, 0x9AEC,
- 0x458F, 0x9D1F, 0x4089, 0x87FA, 0x15EF, 0xEBB2, 0xC98E, 0x0BFB,
- 0xEC41, 0x67B3, 0xFD5F, 0xEA45, 0xBF23, 0xF753, 0x96E4, 0x5B9B,
- 0xC275, 0x1CE1, 0xAE3D, 0x6A4C, 0x5A6C, 0x417E, 0x02F5, 0x4F83,
- 0x5C68, 0xF451, 0x34D1, 0x08F9, 0x93E2, 0x73AB, 0x5362, 0x3F2A,
- 0x0C08, 0x5295, 0x6546, 0x5E9D, 0x2830, 0xA137, 0x0F0A, 0xB52F,
- 0x090E, 0x3624, 0x9B1B, 0x3DDF, 0x26CD, 0x694E, 0xCD7F, 0x9FEA,
- 0x1B12, 0x9E1D, 0x7458, 0x2E34, 0x2D36, 0xB2DC, 0xEEB4, 0xFB5B,
- 0xF6A4, 0x4D76, 0x61B7, 0xCE7D, 0x7B52, 0x3EDD, 0x715E, 0x9713,
- 0xF5A6, 0x68B9, 0x0000, 0x2CC1, 0x6040, 0x1FE3, 0xC879, 0xEDB6,
- 0xBED4, 0x468D, 0xD967, 0x4B72, 0xDE94, 0xD498, 0xE8B0, 0x4A85,
- 0x6BBB, 0x2AC5, 0xE54F, 0x16ED, 0xC586, 0xD79A, 0x5566, 0x9411,
- 0xCF8A, 0x10E9, 0x0604, 0x81FE, 0xF0A0, 0x4478, 0xBA25, 0xE34B,
- 0xF3A2, 0xFE5D, 0xC080, 0x8A05, 0xAD3F, 0xBC21, 0x4870, 0x04F1,
- 0xDF63, 0xC177, 0x75AF, 0x6342, 0x3020, 0x1AE5, 0x0EFD, 0x6DBF,
- 0x4C81, 0x1418, 0x3526, 0x2FC3, 0xE1BE, 0xA235, 0xCC88, 0x392E,
- 0x5793, 0xF255, 0x82FC, 0x477A, 0xACC8, 0xE7BA, 0x2B32, 0x95E6,
- 0xA0C0, 0x9819, 0xD19E, 0x7FA3, 0x6644, 0x7E54, 0xAB3B, 0x830B,
- 0xCA8C, 0x29C7, 0xD36B, 0x3C28, 0x79A7, 0xE2BC, 0x1D16, 0x76AD,
- 0x3BDB, 0x5664, 0x4E74, 0x1E14, 0xDB92, 0x0A0C, 0x6C48, 0xE4B8,
- 0x5D9F, 0x6EBD, 0xEF43, 0xA6C4, 0xA839, 0xA431, 0x37D3, 0x8BF2,
- 0x32D5, 0x438B, 0x596E, 0xB7DA, 0x8C01, 0x64B1, 0xD29C, 0xE049,
- 0xB4D8, 0xFAAC, 0x07F3, 0x25CF, 0xAFCA, 0x8EF4, 0xE947, 0x1810,
- 0xD56F, 0x88F0, 0x6F4A, 0x725C, 0x2438, 0xF157, 0xC773, 0x5197,
- 0x23CB, 0x7CA1, 0x9CE8, 0x213E, 0xDD96, 0xDC61, 0x860D, 0x850F,
- 0x90E0, 0x427C, 0xC471, 0xAACC, 0xD890, 0x0506, 0x01F7, 0x121C,
- 0xA3C2, 0x5F6A, 0xF9AE, 0xD069, 0x9117, 0x5899, 0x273A, 0xB927,
- 0x38D9, 0x13EB, 0xB32B, 0x3322, 0xBBD2, 0x70A9, 0x8907, 0xA733,
- 0xB62D, 0x223C, 0x9215, 0x20C9, 0x4987, 0xFFAA, 0x7850, 0x7AA5,
- 0x8F03, 0xF859, 0x8009, 0x171A, 0xDA65, 0x31D7, 0xC684, 0xB8D0,
- 0xC382, 0xB029, 0x775A, 0x111E, 0xCB7B, 0xFCA8, 0xD66D, 0x3A2C,
- }
-};
-
- /*
-**********************************************************************
-* Routine: Phase 1 -- generate P1K, given TA, TK, IV32
-*
-* Inputs:
-* tk[] = temporal key [128 bits]
-* ta[] = transmitter's MAC address [ 48 bits]
-* iv32 = upper 32 bits of IV [ 32 bits]
-* Output:
-* p1k[] = Phase 1 key [ 80 bits]
-*
-* Note:
-* This function only needs to be called every 2**16 packets,
-* although in theory it could be called every packet.
-*
-**********************************************************************
-*/
-static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
-{
- int i;
-
- /* Initialize the 80 bits of P1K[] from IV32 and TA[0..5] */
- p1k[0] = Lo16(iv32);
- p1k[1] = Hi16(iv32);
- p1k[2] = Mk16(ta[1], ta[0]); /* use TA[] as little-endian */
- p1k[3] = Mk16(ta[3], ta[2]);
- p1k[4] = Mk16(ta[5], ta[4]);
-
- /* Now compute an unbalanced Feistel cipher with 80-bit block */
- /* size on the 80-bit block P1K[], using the 128-bit key TK[] */
- for (i = 0; i < PHASE1_LOOP_CNT; i++) {
- /* Each add operation here is mod 2**16 */
- p1k[0] += _S_(p1k[4] ^ TK16((i & 1) + 0));
- p1k[1] += _S_(p1k[0] ^ TK16((i & 1) + 2));
- p1k[2] += _S_(p1k[1] ^ TK16((i & 1) + 4));
- p1k[3] += _S_(p1k[2] ^ TK16((i & 1) + 6));
- p1k[4] += _S_(p1k[3] ^ TK16((i & 1) + 0));
- p1k[4] += (unsigned short) i; /* avoid "slide attacks" */
- }
-
-}
-
-/*
-**********************************************************************
-* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
-*
-* Inputs:
-* tk[] = Temporal key [128 bits]
-* p1k[] = Phase 1 output key [ 80 bits]
-* iv16 = low 16 bits of IV counter [ 16 bits]
-* Output:
-* rc4key[] = the key used to encrypt the packet [128 bits]
-*
-* Note:
-* The value {TA, IV32, IV16} for Phase1/Phase2 must be unique
-* across all packets using the same key TK value. Then, for a
-* given value of TK[], this TKIP48 construction guarantees that
-* the final RC4KEY value is unique across all packets.
-*
-* Suggested implementation optimization: if PPK[] is "overlaid"
-* appropriately on RC4KEY[], there is no need for the final
-* for loop below that copies the PPK[] result into RC4KEY[].
-*
-**********************************************************************
-*/
-static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
-{
- int i;
- u16 PPK[6]; /* temporary key for mixing */
-
- /* Note: all adds in the PPK[] equations below are mod 2**16 */
- for (i = 0; i < 5; i++)
- PPK[i] = p1k[i]; /* first, copy P1K to PPK */
-
- PPK[5] = p1k[4] + iv16; /* next, add in IV16 */
-
- /* Bijective non-linear mixing of the 96 bits of PPK[0..5] */
- PPK[0] += _S_(PPK[5] ^ TK16(0)); /* Mix key in each "round" */
- PPK[1] += _S_(PPK[0] ^ TK16(1));
- PPK[2] += _S_(PPK[1] ^ TK16(2));
- PPK[3] += _S_(PPK[2] ^ TK16(3));
- PPK[4] += _S_(PPK[3] ^ TK16(4));
- PPK[5] += _S_(PPK[4] ^ TK16(5)); /* Total # S-box lookups == 6 */
-
- /* Final sweep: bijective, "linear". Rotates kill LSB correlations */
- PPK[0] += RotR1(PPK[5] ^ TK16(6));
- PPK[1] += RotR1(PPK[0] ^ TK16(7)); /* Use all of TK[] in Phase2 */
- PPK[2] += RotR1(PPK[1]);
- PPK[3] += RotR1(PPK[2]);
- PPK[4] += RotR1(PPK[3]);
- PPK[5] += RotR1(PPK[4]);
- /* Note: At this point, for a given key TK[0..15], the 96-bit output */
- /* value PPK[0..5] is guaranteed to be unique, as a function */
- /* of the 96-bit "input" value {TA, IV32, IV16}. That is, */
- /* P1K is now a keyed permutation of {TA, IV32, IV16}. */
-
- /* Set RC4KEY[0..3], which includes "cleartext" portion of RC4 key */
- rc4key[0] = Hi8(iv16); /* RC4KEY[0..2] is the WEP IV */
- rc4key[1] = (Hi8(iv16) | 0x20) & 0x7F; /* Help avoid weak (FMS) keys */
- rc4key[2] = Lo8(iv16);
- rc4key[3] = Lo8((PPK[5] ^ TK16(0)) >> 1);
-
- /* Copy 96 bits of PPK[0..5] to RC4KEY[4..15] (little-endian) */
- for (i = 0; i < 6; i++) {
- rc4key[4 + 2 * i] = Lo8(PPK[i]);
- rc4key[5 + 2 * i] = Hi8(PPK[i]);
- }
-
-}
-
-/* The hlen isn't include the IV */
-int rtw_tkip_encrypt23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe)
-{
- u16 pnl;
- u32 pnh;
- u8 rc4key[16];
- u8 ttkey[16];
- __le32 crc;
- u8 hw_hdr_offset = 0;
- struct arc4context mycontext;
- int curfragnum, length;
- u8 *pframe, *payload, *iv, *prwskey;
- union pn48 dot11txpn;
- struct sta_info *stainfo;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- int res = _SUCCESS;
-
- if (pattrib->encrypt != WLAN_CIPHER_SUITE_TKIP)
- return _FAIL;
-
- if (!pxmitframe->buf_addr)
- return _FAIL;
-
- hw_hdr_offset = TXDESC_OFFSET;
-
- pframe = pxmitframe->buf_addr + hw_hdr_offset;
-
- if (pattrib->psta)
- stainfo = pattrib->psta;
- else {
- DBG_8723A("%s, call rtw_get_stainfo()\n", __func__);
- stainfo = rtw_get_stainfo23a(&padapter->stapriv,
- &pattrib->ra[0]);
- }
-
- if (!stainfo) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "%s: stainfo == NULL!!!\n", __func__);
- DBG_8723A("%s, psta == NUL\n", __func__);
- return _FAIL;
- }
-
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "%s: stainfo!= NULL!!!\n", __func__);
-
- if (!(stainfo->state & _FW_LINKED)) {
- DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
- return _FAIL;
- }
-
- if (is_multicast_ether_addr(pattrib->ra))
- prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
- else
- prwskey = &stainfo->dot118021x_UncstKey.skey[0];
-
- /* 4 start to encrypt each fragment */
- for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- iv = pframe + pattrib->hdrlen;
- payload = pframe + pattrib->iv_len + pattrib->hdrlen;
-
- GET_TKIP_PN(iv, dot11txpn);
-
- pnl = (u16)(dot11txpn.val);
- pnh = (u32)(dot11txpn.val>>16);
-
- phase1((u16 *)&ttkey[0], prwskey, &pattrib->ta[0], pnh);
-
- phase2(&rc4key[0], prwskey, (u16 *)&ttkey[0], pnl);
-
- if ((curfragnum + 1) == pattrib->nr_frags) { /* 4 the last fragment */
- length = (pattrib->last_txcmdsz -
- pattrib->hdrlen -
- pattrib->iv_len -
- pattrib->icv_len);
-
- RT_TRACE(_module_rtl871x_security_c_, _drv_info_,
- "pattrib->iv_len =%x, pattrib->icv_len =%x\n",
- pattrib->iv_len,
- pattrib->icv_len);
- crc = cpu_to_le32(getcrc32(payload, length));
-
- arcfour_init(&mycontext, rc4key, 16);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length,
- (char *)&crc, 4);
-
- } else {
- length = (pxmitpriv->frag_len -
- pattrib->hdrlen -
- pattrib->iv_len -
- pattrib->icv_len);
-
- crc = cpu_to_le32(getcrc32(payload, length));
- arcfour_init(&mycontext, rc4key, 16);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length,
- (char *)&crc, 4);
-
- pframe += pxmitpriv->frag_len;
- pframe = PTR_ALIGN(pframe, 4);
- }
- }
-
- return res;
-}
-
-/* The hlen isn't include the IV */
-int rtw_tkip_decrypt23a(struct rtw_adapter *padapter,
- struct recv_frame *precvframe)
-{
- u16 pnl;
- u32 pnh;
- u8 rc4key[16];
- u8 ttkey[16];
- u32 actual_crc, expected_crc;
- struct arc4context mycontext;
- int length;
- u8 *pframe, *payload, *iv, *prwskey;
- union pn48 dot11txpn;
- struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sk_buff *skb = precvframe->pkt;
- int res = _SUCCESS;
-
- if (prxattrib->encrypt != WLAN_CIPHER_SUITE_TKIP)
- return _FAIL;
-
- pframe = skb->data;
-
- stainfo = rtw_get_stainfo23a(&padapter->stapriv,
- &prxattrib->ta[0]);
- if (!stainfo) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "%s: stainfo == NULL!!!\n", __func__);
- return _FAIL;
- }
-
- /* 4 start to decrypt recvframe */
- if (is_multicast_ether_addr(prxattrib->ra)) {
- if (psecuritypriv->binstallGrpkey == 0) {
- res = _FAIL;
- DBG_8723A("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
- goto exit;
- }
- prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
- } else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "%s: stainfo!= NULL!!!\n", __func__);
- prwskey = &stainfo->dot118021x_UncstKey.skey[0];
- }
-
- iv = pframe + prxattrib->hdrlen;
- payload = pframe + prxattrib->iv_len + prxattrib->hdrlen;
- length = skb->len - prxattrib->hdrlen - prxattrib->iv_len;
-
- GET_TKIP_PN(iv, dot11txpn);
-
- pnl = (u16)(dot11txpn.val);
- pnh = (u32)(dot11txpn.val>>16);
-
- phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
- phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
-
- /* 4 decrypt payload include icv */
- arcfour_init(&mycontext, rc4key, 16);
- arcfour_encrypt(&mycontext, payload, payload, length);
-
- actual_crc = getcrc32(payload, length - 4);
- expected_crc = get_unaligned_le32(&payload[length - 4]);
-
- if (actual_crc != expected_crc) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "%s:icv CRC mismatch: "
- "actual: %08x, expected: %08x\n",
- __func__, actual_crc, expected_crc);
- res = _FAIL;
- }
-
-exit:
- return res;
-}
-
-/* 3 ===== AES related ===== */
-
-#define MAX_MSG_SIZE 2048
-/*****************************/
-/******** SBOX Table *********/
-/*****************************/
-
-static u8 sbox_table[256] = {
- 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
- 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
- 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
- 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
- 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
- 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
- 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
- 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
- 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
- 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
- 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
- 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
- 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
- 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
- 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
- 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
- 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
- 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
- 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
- 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
- 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
- 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
- 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
- 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
- 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
- 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
- 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
- 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
- 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
- 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
- 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
- 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-};
-
-/*****************************/
-/**** Function Prototypes ****/
-/*****************************/
-
-static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists,
- int qc_exists);
-
-static void xor_128(u8 *a, u8 *b, u8 *out)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- out[i] = a[i] ^ b[i];
-}
-
-static void xor_32(u8 *a, u8 *b, u8 *out)
-{
- int i;
-
- for (i = 0; i < 4; i++)
- out[i] = a[i] ^ b[i];
-}
-
-static u8 sbox(u8 a)
-{
- return sbox_table[(int)a];
-}
-
-static void next_key(u8 *key, int round)
-{
- u8 rcon;
- u8 sbox_key[4];
- u8 rcon_table[12] = {
- 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
- 0x1b, 0x36, 0x36, 0x36
- };
-
- sbox_key[0] = sbox(key[13]);
- sbox_key[1] = sbox(key[14]);
- sbox_key[2] = sbox(key[15]);
- sbox_key[3] = sbox(key[12]);
-
- rcon = rcon_table[round];
-
- xor_32(&key[0], sbox_key, &key[0]);
- key[0] = key[0] ^ rcon;
-
- xor_32(&key[4], &key[0], &key[4]);
- xor_32(&key[8], &key[4], &key[8]);
- xor_32(&key[12], &key[8], &key[12]);
-
-}
-
-static void byte_sub(u8 *in, u8 *out)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- out[i] = sbox(in[i]);
-}
-
-static void shift_row(u8 *in, u8 *out)
-{
-
- out[0] = in[0];
- out[1] = in[5];
- out[2] = in[10];
- out[3] = in[15];
- out[4] = in[4];
- out[5] = in[9];
- out[6] = in[14];
- out[7] = in[3];
- out[8] = in[8];
- out[9] = in[13];
- out[10] = in[2];
- out[11] = in[7];
- out[12] = in[12];
- out[13] = in[1];
- out[14] = in[6];
- out[15] = in[11];
-
-}
-
-static void mix_column(u8 *in, u8 *out)
-{
- int i;
- u8 add1b[4];
- u8 add1bf7[4];
- u8 rotl[4];
- u8 swap_halfs[4];
- u8 andf7[4];
- u8 rotr[4];
- u8 temp[4];
- u8 tempb[4];
-
- for (i = 0; i < 4; i++) {
- if ((in[i] & 0x80) == 0x80)
- add1b[i] = 0x1b;
- else
- add1b[i] = 0x00;
- }
-
- swap_halfs[0] = in[2]; /* Swap halfs */
- swap_halfs[1] = in[3];
- swap_halfs[2] = in[0];
- swap_halfs[3] = in[1];
-
- rotl[0] = in[3]; /* Rotate left 8 bits */
- rotl[1] = in[0];
- rotl[2] = in[1];
- rotl[3] = in[2];
-
- andf7[0] = in[0] & 0x7f;
- andf7[1] = in[1] & 0x7f;
- andf7[2] = in[2] & 0x7f;
- andf7[3] = in[3] & 0x7f;
-
- for (i = 3; i > 0; i--) { /* logical shift left 1 bit */
- andf7[i] = andf7[i] << 1;
- if ((andf7[i - 1] & 0x80) == 0x80)
- andf7[i] = (andf7[i] | 0x01);
- }
- andf7[0] = andf7[0] << 1;
- andf7[0] = andf7[0] & 0xfe;
-
- xor_32(add1b, andf7, add1bf7);
-
- xor_32(in, add1bf7, rotr);
-
- temp[0] = rotr[0]; /* Rotate right 8 bits */
- rotr[0] = rotr[1];
- rotr[1] = rotr[2];
- rotr[2] = rotr[3];
- rotr[3] = temp[0];
-
- xor_32(add1bf7, rotr, temp);
- xor_32(swap_halfs, rotl, tempb);
- xor_32(temp, tempb, out);
-
-}
-
-static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
-{
- int round;
- int i;
- u8 intermediatea[16];
- u8 intermediateb[16];
- u8 round_key[16];
-
- for (i = 0; i < 16; i++)
- round_key[i] = key[i];
-
- for (round = 0; round < 11; round++) {
- if (round == 0) {
- xor_128(round_key, data, ciphertext);
- next_key(round_key, round);
- } else if (round == 10) {
- byte_sub(ciphertext, intermediatea);
- shift_row(intermediatea, intermediateb);
- xor_128(intermediateb, round_key, ciphertext);
- } else { /* 1 - 9 */
- byte_sub(ciphertext, intermediatea);
- shift_row(intermediatea, intermediateb);
- mix_column(&intermediateb[0], &intermediatea[0]);
- mix_column(&intermediateb[4], &intermediatea[4]);
- mix_column(&intermediateb[8], &intermediatea[8]);
- mix_column(&intermediateb[12], &intermediatea[12]);
- xor_128(intermediatea, round_key, ciphertext);
- next_key(round_key, round);
- }
- }
-
-}
-
-/************************************************/
-/* construct_mic_iv() */
-/* Builds the MIC IV from header fields and PN */
-/************************************************/
-static void construct_mic_iv(u8 *mic_iv, int qc_exists, int a4_exists, u8 *mpdu,
- uint payload_length, u8 *pn_vector)
-{
- int i;
-
- mic_iv[0] = 0x59;
- if (qc_exists && a4_exists)
- mic_iv[1] = mpdu[30] & 0x0f; /* QoS_TC */
- if (qc_exists && !a4_exists)
- mic_iv[1] = mpdu[24] & 0x0f; /* mute bits 7-4 */
- if (!qc_exists)
- mic_iv[1] = 0x00;
- for (i = 2; i < 8; i++)
- mic_iv[i] = mpdu[i + 8]; /* mic_iv[2:7] = A2[0:5] = mpdu[10:15] */
- for (i = 8; i < 14; i++)
- mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
- mic_iv[14] = (unsigned char)(payload_length / 256);
- mic_iv[15] = (unsigned char)(payload_length % 256);
-}
-
-/************************************************/
-/* construct_mic_header1() */
-/* Builds the first MIC header block from */
-/* header fields. */
-/************************************************/
-static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu)
-{
- mic_header1[0] = (u8)((header_length - 2) / 256);
- mic_header1[1] = (u8)((header_length - 2) % 256);
- mic_header1[2] = mpdu[0] & 0xcf; /* Mute CF poll & CF ack bits */
- mic_header1[3] = mpdu[1] & 0xc7; /* Mute retry, more data and pwr mgt bits */
- mic_header1[4] = mpdu[4]; /* A1 */
- mic_header1[5] = mpdu[5];
- mic_header1[6] = mpdu[6];
- mic_header1[7] = mpdu[7];
- mic_header1[8] = mpdu[8];
- mic_header1[9] = mpdu[9];
- mic_header1[10] = mpdu[10]; /* A2 */
- mic_header1[11] = mpdu[11];
- mic_header1[12] = mpdu[12];
- mic_header1[13] = mpdu[13];
- mic_header1[14] = mpdu[14];
- mic_header1[15] = mpdu[15];
-
-}
-
-/************************************************/
-/* construct_mic_header2() */
-/* Builds the last MIC header block from */
-/* header fields. */
-/************************************************/
-static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists,
- int qc_exists)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- mic_header2[i] = 0x00;
-
- mic_header2[0] = mpdu[16]; /* A3 */
- mic_header2[1] = mpdu[17];
- mic_header2[2] = mpdu[18];
- mic_header2[3] = mpdu[19];
- mic_header2[4] = mpdu[20];
- mic_header2[5] = mpdu[21];
-
- mic_header2[6] = 0x00;
- mic_header2[7] = 0x00; /* mpdu[23]; */
-
- if (!qc_exists && a4_exists) {
- for (i = 0; i < 6; i++)
- mic_header2[8+i] = mpdu[24+i]; /* A4 */
- }
-
- if (qc_exists && !a4_exists) {
- mic_header2[8] = mpdu[24] & 0x0f; /* mute bits 15 - 4 */
- mic_header2[9] = mpdu[25] & 0x00;
- }
-
- if (qc_exists && a4_exists) {
- for (i = 0; i < 6; i++)
- mic_header2[8+i] = mpdu[24+i]; /* A4 */
-
- mic_header2[14] = mpdu[30] & 0x0f;
- mic_header2[15] = mpdu[31] & 0x00;
- }
-
-}
-
-/************************************************/
-/* construct_mic_header2() */
-/* Builds the last MIC header block from */
-/* header fields. */
-/************************************************/
-static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists,
- u8 *mpdu, u8 *pn_vector, int c)
-{
- int i = 0;
-
- for (i = 0; i < 16; i++)
- ctr_preload[i] = 0x00;
-
- i = 0;
-
- ctr_preload[0] = 0x01; /* flag */
- if (qc_exists && a4_exists)
- ctr_preload[1] = mpdu[30] & 0x0f; /* QoC_Control */
- if (qc_exists && !a4_exists)
- ctr_preload[1] = mpdu[24] & 0x0f;
-
- for (i = 2; i < 8; i++)
- ctr_preload[i] = mpdu[i + 8]; /* ctr_preload[2:7] = A2[0:5] = mpdu[10:15] */
- for (i = 8; i < 14; i++)
- ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
- ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */
- ctr_preload[15] = (unsigned char) (c % 256);
-
-}
-
-/************************************/
-/* bitwise_xor() */
-/* A 128 bit, bitwise exclusive or */
-/************************************/
-static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- out[i] = ina[i] ^ inb[i];
-}
-
-static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
-{
- uint qc_exists, a4_exists, i, j, payload_remainder,
- num_blocks, payload_index;
- u8 pn_vector[6];
- u8 mic_iv[16];
- u8 mic_header1[16];
- u8 mic_header2[16];
- u8 ctr_preload[16];
- /* Intermediate Buffers */
- u8 chain_buffer[16];
- u8 aes_out[16];
- u8 padded_buffer[16];
- u8 mic[8];
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)pframe;
- u16 frsubtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
-
- memset((void *)mic_iv, 0, 16);
- memset((void *)mic_header1, 0, 16);
- memset((void *)mic_header2, 0, 16);
- memset((void *)ctr_preload, 0, 16);
- memset((void *)chain_buffer, 0, 16);
- memset((void *)aes_out, 0, 16);
- memset((void *)padded_buffer, 0, 16);
-
- if ((hdrlen == sizeof(struct ieee80211_hdr_3addr) ||
- (hdrlen == sizeof(struct ieee80211_qos_hdr))))
- a4_exists = 0;
- else
- a4_exists = 1;
-
- if (ieee80211_is_data(hdr->frame_control)) {
- if ((frsubtype == IEEE80211_STYPE_DATA_CFACK) ||
- (frsubtype == IEEE80211_STYPE_DATA_CFPOLL) ||
- (frsubtype == IEEE80211_STYPE_DATA_CFACKPOLL)) {
- qc_exists = 1;
- if (hdrlen != sizeof(struct ieee80211_qos_hdr))
- hdrlen += 2;
- } else if ((frsubtype == IEEE80211_STYPE_QOS_DATA) ||
- (frsubtype == IEEE80211_STYPE_QOS_DATA_CFACK) ||
- (frsubtype == IEEE80211_STYPE_QOS_DATA_CFPOLL) ||
- (frsubtype == IEEE80211_STYPE_QOS_DATA_CFACKPOLL)) {
- if (hdrlen != sizeof(struct ieee80211_qos_hdr))
- hdrlen += 2;
- qc_exists = 1;
- } else {
- qc_exists = 0;
- }
- } else {
- qc_exists = 0;
- }
- pn_vector[0] = pframe[hdrlen];
- pn_vector[1] = pframe[hdrlen + 1];
- pn_vector[2] = pframe[hdrlen + 4];
- pn_vector[3] = pframe[hdrlen + 5];
- pn_vector[4] = pframe[hdrlen + 6];
- pn_vector[5] = pframe[hdrlen + 7];
-
- construct_mic_iv(mic_iv, qc_exists, a4_exists, pframe, plen, pn_vector);
-
- construct_mic_header1(mic_header1, hdrlen, pframe);
- construct_mic_header2(mic_header2, pframe, a4_exists, qc_exists);
-
- payload_remainder = plen % 16;
- num_blocks = plen / 16;
-
- /* Find start of payload */
- payload_index = hdrlen + 8;
-
- /* Calculate MIC */
- aes128k128d(key, mic_iv, aes_out);
- bitwise_xor(aes_out, mic_header1, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
- bitwise_xor(aes_out, mic_header2, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
-
- for (i = 0; i < num_blocks; i++) {
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
-
- payload_index += 16;
- aes128k128d(key, chain_buffer, aes_out);
- }
-
- /* Add on the final payload block if it needs padding */
- if (payload_remainder > 0) {
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = pframe[payload_index++];
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
- }
-
- for (j = 0; j < 8; j++)
- mic[j] = aes_out[j];
-
- /* Insert MIC into payload */
- for (j = 0; j < 8; j++)
- pframe[payload_index + j] = mic[j];
-
- payload_index = hdrlen + 8;
- for (i = 0; i < num_blocks; i++) {
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists,
- pframe, pn_vector, i + 1);
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
- for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
-
- if (payload_remainder > 0) {
- /* If there is a short final block, then pad it,
- * encrypt it and copy the unpadded part back
- */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe,
- pn_vector, num_blocks + 1);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = pframe[payload_index + j];
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < payload_remainder; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
-
- /* Encrypt the MIC */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe,
- pn_vector, 0);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < 8; j++)
- padded_buffer[j] = pframe[j + hdrlen + 8 + plen];
-
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < 8; j++)
- pframe[payload_index++] = chain_buffer[j];
-
- return _SUCCESS;
-}
-
-int rtw_aes_encrypt23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe)
-{ /* exclude ICV */
- /* Intermediate Buffers */
- int curfragnum, length;
- u8 *pframe, *prwskey;
- u8 hw_hdr_offset = 0;
- struct sta_info *stainfo;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- int res = _SUCCESS;
-
- if (!pxmitframe->buf_addr)
- return _FAIL;
-
- hw_hdr_offset = TXDESC_OFFSET;
-
- pframe = pxmitframe->buf_addr + hw_hdr_offset;
-
- /* 4 start to encrypt each fragment */
- if (pattrib->encrypt != WLAN_CIPHER_SUITE_CCMP)
- return _FAIL;
-
- if (pattrib->psta) {
- stainfo = pattrib->psta;
- } else {
- DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
- stainfo = rtw_get_stainfo23a(&padapter->stapriv, &pattrib->ra[0]);
- }
-
- if (!stainfo) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "%s: stainfo == NULL!!!\n", __func__);
- DBG_8723A("%s, psta == NUL\n", __func__);
- res = _FAIL;
- goto out;
- }
- if (!(stainfo->state & _FW_LINKED)) {
- DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n",
- __func__, stainfo->state);
- return _FAIL;
- }
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "%s: stainfo!= NULL!!!\n", __func__);
-
- if (is_multicast_ether_addr(pattrib->ra))
- prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
- else
- prwskey = &stainfo->dot118021x_UncstKey.skey[0];
-
- for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- /* 4 the last fragment */
- if ((curfragnum + 1) == pattrib->nr_frags) {
- length = pattrib->last_txcmdsz -
- pattrib->hdrlen-pattrib->iv_len -
- pattrib->icv_len;
-
- aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
- } else {
- length = pxmitpriv->frag_len-pattrib->hdrlen -
- pattrib->iv_len - pattrib->icv_len;
-
- aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
- pframe += pxmitpriv->frag_len;
- pframe = PTR_ALIGN(pframe, 4);
- }
- }
-out:
- return res;
-}
-
-static int aes_decipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
-{
- static u8 message[MAX_MSG_SIZE];
- uint qc_exists, a4_exists, i, j, payload_remainder,
- num_blocks, payload_index;
- int res = _SUCCESS;
- u8 pn_vector[6];
- u8 mic_iv[16];
- u8 mic_header1[16];
- u8 mic_header2[16];
- u8 ctr_preload[16];
- /* Intermediate Buffers */
- u8 chain_buffer[16];
- u8 aes_out[16];
- u8 padded_buffer[16];
- u8 mic[8];
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)pframe;
- u16 frsubtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
-
- memset((void *)mic_iv, 0, 16);
- memset((void *)mic_header1, 0, 16);
- memset((void *)mic_header2, 0, 16);
- memset((void *)ctr_preload, 0, 16);
- memset((void *)chain_buffer, 0, 16);
- memset((void *)aes_out, 0, 16);
- memset((void *)padded_buffer, 0, 16);
-
- /* start to decrypt the payload */
-
- num_blocks = (plen - 8) / 16; /* plen including llc, payload_length and mic) */
-
- payload_remainder = (plen - 8) % 16;
-
- pn_vector[0] = pframe[hdrlen];
- pn_vector[1] = pframe[hdrlen + 1];
- pn_vector[2] = pframe[hdrlen + 4];
- pn_vector[3] = pframe[hdrlen + 5];
- pn_vector[4] = pframe[hdrlen + 6];
- pn_vector[5] = pframe[hdrlen + 7];
-
- if ((hdrlen == sizeof(struct ieee80211_hdr_3addr) ||
- (hdrlen == sizeof(struct ieee80211_qos_hdr))))
- a4_exists = 0;
- else
- a4_exists = 1;
-
- if (ieee80211_is_data(hdr->frame_control)) {
- if ((frsubtype == IEEE80211_STYPE_DATA_CFACK) ||
- (frsubtype == IEEE80211_STYPE_DATA_CFPOLL) ||
- (frsubtype == IEEE80211_STYPE_DATA_CFACKPOLL)) {
- qc_exists = 1;
- if (hdrlen != sizeof(struct ieee80211_hdr_3addr))
- hdrlen += 2;
- } else if ((frsubtype == IEEE80211_STYPE_QOS_DATA) ||
- (frsubtype == IEEE80211_STYPE_QOS_DATA_CFACK) ||
- (frsubtype == IEEE80211_STYPE_QOS_DATA_CFPOLL) ||
- (frsubtype == IEEE80211_STYPE_QOS_DATA_CFACKPOLL)) {
- if (hdrlen != sizeof(struct ieee80211_hdr_3addr))
- hdrlen += 2;
- qc_exists = 1;
- } else {
- qc_exists = 0;
- }
- } else {
- qc_exists = 0;
- }
-
- /* now, decrypt pframe with hdrlen offset and plen long */
-
- payload_index = hdrlen + 8; /* 8 is for extiv */
-
- for (i = 0; i < num_blocks; i++) {
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists,
- pframe, pn_vector, i + 1);
-
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
-
- for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
-
- if (payload_remainder > 0) {
- /* If there is a short final block, then pad it,
- * encrypt it and copy the unpadded part back
- */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe,
- pn_vector, num_blocks + 1);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = pframe[payload_index + j];
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < payload_remainder; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
-
- /* start to calculate the mic */
- if ((hdrlen + plen + 8) <= MAX_MSG_SIZE)
- memcpy(message, pframe, (hdrlen + plen + 8)); /* 8 is for ext iv len */
-
- pn_vector[0] = pframe[hdrlen];
- pn_vector[1] = pframe[hdrlen + 1];
- pn_vector[2] = pframe[hdrlen + 4];
- pn_vector[3] = pframe[hdrlen + 5];
- pn_vector[4] = pframe[hdrlen + 6];
- pn_vector[5] = pframe[hdrlen + 7];
-
- construct_mic_iv(mic_iv, qc_exists, a4_exists, message,
- plen - 8, pn_vector);
-
- construct_mic_header1(mic_header1, hdrlen, message);
- construct_mic_header2(mic_header2, message, a4_exists, qc_exists);
-
- payload_remainder = (plen - 8) % 16;
- num_blocks = (plen - 8) / 16;
-
- /* Find start of payload */
- payload_index = hdrlen + 8;
-
- /* Calculate MIC */
- aes128k128d(key, mic_iv, aes_out);
- bitwise_xor(aes_out, mic_header1, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
- bitwise_xor(aes_out, mic_header2, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
-
- for (i = 0; i < num_blocks; i++) {
- bitwise_xor(aes_out, &message[payload_index], chain_buffer);
-
- payload_index += 16;
- aes128k128d(key, chain_buffer, aes_out);
- }
-
- /* Add on the final payload block if it needs padding */
- if (payload_remainder > 0) {
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = message[payload_index++];
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
- }
-
- for (j = 0 ; j < 8; j++)
- mic[j] = aes_out[j];
-
- /* Insert MIC into payload */
- for (j = 0; j < 8; j++)
- message[payload_index + j] = mic[j];
-
- payload_index = hdrlen + 8;
- for (i = 0; i < num_blocks; i++) {
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists,
- message, pn_vector, i + 1);
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &message[payload_index], chain_buffer);
- for (j = 0; j < 16; j++)
- message[payload_index++] = chain_buffer[j];
- }
-
- if (payload_remainder > 0) {
- /* If there is a short final block, then pad it,
- * encrypt it and copy the unpadded part back
- */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists,
- message, pn_vector, num_blocks + 1);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = message[payload_index + j];
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < payload_remainder; j++)
- message[payload_index++] = chain_buffer[j];
- }
-
- /* Encrypt the MIC */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message,
- pn_vector, 0);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < 8; j++)
- padded_buffer[j] = message[j + hdrlen + 8 + plen - 8];
-
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < 8; j++)
- message[payload_index++] = chain_buffer[j];
-
- /* compare the mic */
- for (i = 0; i < 8; i++) {
- if (pframe[hdrlen + 8 + plen - 8 + i] != message[hdrlen + 8 + plen - 8 + i]) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "%s:mic check error mic[%d]: pframe(%x) != message(%x)\n",
- __func__, i,
- pframe[hdrlen + 8 + plen - 8 + i],
- message[hdrlen + 8 + plen - 8 + i]);
- DBG_8723A("%s:mic check error mic[%d]: pframe(%x) != message(%x)\n",
- __func__, i,
- pframe[hdrlen + 8 + plen - 8 + i],
- message[hdrlen + 8 + plen - 8 + i]);
- res = _FAIL;
- }
- }
- return res;
-}
-
-int rtw_aes_decrypt23a(struct rtw_adapter *padapter,
- struct recv_frame *precvframe)
-{ /* exclude ICV */
- struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sk_buff *skb = precvframe->pkt;
- int length;
- u8 *pframe, *prwskey;
- int res = _SUCCESS;
-
- pframe = skb->data;
- /* 4 start to encrypt each fragment */
- if (prxattrib->encrypt != WLAN_CIPHER_SUITE_CCMP)
- return _FAIL;
-
- stainfo = rtw_get_stainfo23a(&padapter->stapriv, &prxattrib->ta[0]);
- if (!stainfo) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "%s: stainfo == NULL!!!\n", __func__);
- res = _FAIL;
- goto exit;
- }
-
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "%s: stainfo!= NULL!!!\n", __func__);
-
- if (is_multicast_ether_addr(prxattrib->ra)) {
- /* in concurrent we should use sw decrypt in
- * group key, so we remove this message
- */
- if (!psecuritypriv->binstallGrpkey) {
- res = _FAIL;
- DBG_8723A("%s:rx bc/mc packets, but didn't install "
- "group key!!!!!!!!!!\n", __func__);
- goto exit;
- }
- prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
- if (psecuritypriv->dot118021XGrpKeyid != prxattrib->key_index) {
- DBG_8723A("not match packet_index =%d, install_index ="
- "%d\n", prxattrib->key_index,
- psecuritypriv->dot118021XGrpKeyid);
- res = _FAIL;
- goto exit;
- }
- } else {
- prwskey = &stainfo->dot118021x_UncstKey.skey[0];
- }
-
- length = skb->len - prxattrib->hdrlen - prxattrib->iv_len;
-
- res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
-exit:
- return res;
-}
-
-void rtw_use_tkipkey_handler23a(void *function_context)
-{
- struct rtw_adapter *padapter = function_context;
-
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "^^^%s ^^^\n", __func__);
- padapter->securitypriv.busetkipkey = 1;
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
- "^^^%s padapter->securitypriv.busetkipkey =%d^^^\n",
- __func__, padapter->securitypriv.busetkipkey);
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_sreset.c b/drivers/staging/rtl8723au/core/rtw_sreset.c
deleted file mode 100644
index 29a29d92a6ac..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_sreset.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#include <rtw_sreset.h>
-#include <usb_ops_linux.h>
-
-void rtw_sreset_init(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct sreset_priv *psrtpriv = &pHalData->srestpriv;
-
- mutex_init(&psrtpriv->silentreset_mutex);
- psrtpriv->silent_reset_inprogress = false;
- psrtpriv->last_tx_time = 0;
- psrtpriv->last_tx_complete_time = 0;
-}
-
-void rtw_sreset_reset_value(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct sreset_priv *psrtpriv = &pHalData->srestpriv;
-
- psrtpriv->silent_reset_inprogress = false;
- psrtpriv->last_tx_time = 0;
- psrtpriv->last_tx_complete_time = 0;
-}
-
-bool rtw_sreset_inprogress(struct rtw_adapter *padapter)
-{
- struct rtw_adapter *primary_adapter = GET_PRIMARY_ADAPTER(padapter);
- struct hal_data_8723a *pHalData = GET_HAL_DATA(primary_adapter);
-
- return pHalData->srestpriv.silent_reset_inprogress;
-}
-
-static void sreset_restore_security_station(struct rtw_adapter *padapter)
-{
- struct mlme_priv *mlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *psta;
- struct mlme_ext_info *pmlmeinfo = &padapter->mlmeextpriv.mlmext_info;
- u8 val8;
-
- if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_8021X)
- val8 = 0xcc;
- else
- val8 = 0xcf;
-
- rtl8723a_set_sec_cfg(padapter, val8);
-
- if (padapter->securitypriv.dot11PrivacyAlgrthm ==
- WLAN_CIPHER_SUITE_TKIP ||
- padapter->securitypriv.dot11PrivacyAlgrthm ==
- WLAN_CIPHER_SUITE_CCMP) {
- psta = rtw_get_stainfo23a(pstapriv, get_bssid(mlmepriv));
- if (psta == NULL) {
- /* DEBUG_ERR(("Set wpa_set_encryption: Obtain Sta_info fail\n")); */
- } else {
- /* pairwise key */
- rtw_setstakey_cmd23a(padapter, (unsigned char *)psta, true);
- /* group key */
- rtw_set_key23a(padapter,&padapter->securitypriv, padapter->securitypriv.dot118021XGrpKeyid, 0);
- }
- }
-}
-
-static void sreset_restore_network_station(struct rtw_adapter *padapter)
-{
- struct mlme_priv *mlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u8 threshold;
-
- rtw_setopmode_cmd23a(padapter, NL80211_IFTYPE_STATION);
-
- /* TH = 1 => means that invalidate usb rx aggregation */
- /* TH = 0 => means that validate usb rx aggregation, use init value. */
- if (mlmepriv->htpriv.ht_option) {
- if (padapter->registrypriv.wifi_spec == 1)
- threshold = 1;
- else
- threshold = 0;
- } else
- threshold = 1;
-
- rtl8723a_set_rxdma_agg_pg_th(padapter, threshold);
-
- set_channel_bwmode23a(padapter, pmlmeext->cur_channel,
- pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-
- hw_var_set_bssid(padapter, pmlmeinfo->network.MacAddress);
- hw_var_set_mlme_join(padapter, 0);
-
- rtl8723a_set_media_status(padapter, pmlmeinfo->state & 0x3);
-
- mlmeext_joinbss_event_callback23a(padapter, 1);
- /* restore Sequence No. */
- rtl8723au_write8(padapter, REG_NQOS_SEQ, padapter->xmitpriv.nqos_ssn);
-
- sreset_restore_security_station(padapter);
-}
-
-static void sreset_restore_network_status(struct rtw_adapter *padapter)
-{
- struct mlme_priv *mlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(mlmepriv, WIFI_STATION_STATE)) {
- DBG_8723A("%s(%s): fwstate:0x%08x - WIFI_STATION_STATE\n",
- __func__, padapter->pnetdev->name,
- get_fwstate(mlmepriv));
- sreset_restore_network_station(padapter);
-#ifdef CONFIG_8723AU_AP_MODE
- } else if (check_fwstate(mlmepriv, WIFI_AP_STATE)) {
- DBG_8723A("%s(%s): fwstate:0x%08x - WIFI_AP_STATE\n",
- __func__, padapter->pnetdev->name,
- get_fwstate(mlmepriv));
- rtw_ap_restore_network(padapter);
-#endif
- } else if (check_fwstate(mlmepriv, WIFI_ADHOC_STATE)) {
- DBG_8723A("%s(%s): fwstate:0x%08x - WIFI_ADHOC_STATE\n",
- __func__, padapter->pnetdev->name,
- get_fwstate(mlmepriv));
- } else {
- DBG_8723A("%s(%s): fwstate:0x%08x - ???\n", __func__,
- padapter->pnetdev->name, get_fwstate(mlmepriv));
- }
-}
-
-static void sreset_stop_adapter(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- if (padapter == NULL)
- return;
-
- DBG_8723A("%s(%s)\n", __func__, padapter->pnetdev->name);
-
- if (!rtw_netif_queue_stopped(padapter->pnetdev))
- netif_tx_stop_all_queues(padapter->pnetdev);
-
- rtw_cancel_all_timer23a(padapter);
-
- /* TODO: OS and HCI independent */
- tasklet_kill(&pxmitpriv->xmit_tasklet);
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
- rtw_scan_abort23a(padapter);
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
- rtw23a_join_to_handler((unsigned long)padapter);
-}
-
-static void sreset_start_adapter(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- if (padapter == NULL)
- return;
-
- DBG_8723A("%s(%s)\n", __func__, padapter->pnetdev->name);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- sreset_restore_network_status(padapter);
-
- /* TODO: OS and HCI independent */
- tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
-
- mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
- jiffies + msecs_to_jiffies(2000));
-
- if (rtw_netif_queue_stopped(padapter->pnetdev))
- netif_tx_wake_all_queues(padapter->pnetdev);
-}
-
-void rtw_sreset_reset(struct rtw_adapter *active_adapter)
-{
- struct rtw_adapter *padapter = GET_PRIMARY_ADAPTER(active_adapter);
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct sreset_priv *psrtpriv = &pHalData->srestpriv;
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- unsigned long start = jiffies;
-
- DBG_8723A("%s\n", __func__);
-
- mutex_lock(&psrtpriv->silentreset_mutex);
- psrtpriv->silent_reset_inprogress = true;
- pwrpriv->change_rfpwrstate = rf_off;
-
- sreset_stop_adapter(padapter);
-
- ips_enter23a(padapter);
- ips_leave23a(padapter);
-
- sreset_start_adapter(padapter);
- psrtpriv->silent_reset_inprogress = false;
- mutex_unlock(&psrtpriv->silentreset_mutex);
-
- DBG_8723A("%s done in %d ms\n", __func__,
- jiffies_to_msecs(jiffies - start));
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_sta_mgt.c b/drivers/staging/rtl8723au/core/rtw_sta_mgt.c
deleted file mode 100644
index a9b778c45d44..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_sta_mgt.c
+++ /dev/null
@@ -1,439 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTW_STA_MGT_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <recv_osdep.h>
-#include <xmit_osdep.h>
-#include <mlme_osdep.h>
-#include <sta_info.h>
-#include <rtl8723a_hal.h>
-
-static const u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
-static void _rtw_init_stainfo(struct sta_info *psta)
-{
- memset((u8 *)psta, 0, sizeof(struct sta_info));
- spin_lock_init(&psta->lock);
- INIT_LIST_HEAD(&psta->list);
- INIT_LIST_HEAD(&psta->hash_list);
- _rtw_init_queue23a(&psta->sleep_q);
- psta->sleepq_len = 0;
- _rtw_init_sta_xmit_priv23a(&psta->sta_xmitpriv);
- _rtw_init_sta_recv_priv23a(&psta->sta_recvpriv);
-#ifdef CONFIG_8723AU_AP_MODE
- INIT_LIST_HEAD(&psta->asoc_list);
- INIT_LIST_HEAD(&psta->auth_list);
- psta->expire_to = 0;
- psta->flags = 0;
- psta->capability = 0;
- psta->bpairwise_key_installed = false;
- psta->nonerp_set = 0;
- psta->no_short_slot_time_set = 0;
- psta->no_short_preamble_set = 0;
- psta->no_ht_gf_set = 0;
- psta->no_ht_set = 0;
- psta->ht_20mhz_set = 0;
- psta->keep_alive_trycnt = 0;
-#endif /* CONFIG_8723AU_AP_MODE */
-}
-
-int _rtw_init_sta_priv23a(struct sta_priv *pstapriv)
-{
- int i;
-
- spin_lock_init(&pstapriv->sta_hash_lock);
- pstapriv->asoc_sta_count = 0;
- for (i = 0; i < NUM_STA; i++)
- INIT_LIST_HEAD(&pstapriv->sta_hash[i]);
-
-#ifdef CONFIG_8723AU_AP_MODE
- pstapriv->sta_dz_bitmap = 0;
- pstapriv->tim_bitmap = 0;
- INIT_LIST_HEAD(&pstapriv->asoc_list);
- INIT_LIST_HEAD(&pstapriv->auth_list);
- spin_lock_init(&pstapriv->asoc_list_lock);
- spin_lock_init(&pstapriv->auth_list_lock);
- pstapriv->asoc_list_cnt = 0;
- pstapriv->auth_list_cnt = 0;
- pstapriv->auth_to = 3; /* 3*2 = 6 sec */
- pstapriv->assoc_to = 3;
- /* pstapriv->expire_to = 900; 900*2 = 1800 sec = 30 min,
- expire after no any traffic. */
- /* pstapriv->expire_to = 30; 30*2 = 60 sec = 1 min,
- expire after no any traffic. */
- pstapriv->expire_to = 3; /* 3*2 = 6 sec */
- pstapriv->max_num_sta = NUM_STA;
-#endif
- return _SUCCESS;
-}
-
-int _rtw_free_sta_priv23a(struct sta_priv *pstapriv)
-{
- struct list_head *phead;
- struct sta_info *psta, *ptmp;
- struct recv_reorder_ctrl *preorder_ctrl;
- int index;
-
- if (pstapriv) {
- /* delete all reordering_ctrl_timer */
- spin_lock_bh(&pstapriv->sta_hash_lock);
- for (index = 0; index < NUM_STA; index++) {
- phead = &pstapriv->sta_hash[index];
- list_for_each_entry_safe(psta, ptmp, phead, hash_list) {
- int i;
-
- for (i = 0; i < 16 ; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
- del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
- }
- }
- }
- spin_unlock_bh(&pstapriv->sta_hash_lock);
- /*===============================*/
- }
- return _SUCCESS;
-}
-
-struct sta_info *
-rtw_alloc_stainfo23a(struct sta_priv *pstapriv, const u8 *hwaddr, gfp_t gfp)
-{
- struct list_head *phash_list;
- struct sta_info *psta;
- struct recv_reorder_ctrl *preorder_ctrl;
- s32 index;
- int i = 0;
- u16 wRxSeqInitialValue = 0xffff;
-
- psta = kmalloc(sizeof(struct sta_info), gfp);
- if (!psta)
- return NULL;
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
-
- _rtw_init_stainfo(psta);
-
- psta->padapter = pstapriv->padapter;
-
- ether_addr_copy(psta->hwaddr, hwaddr);
-
- index = wifi_mac_hash(hwaddr);
-
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
- "rtw_alloc_stainfo23a: index = %x\n", index);
- if (index >= NUM_STA) {
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
- "ERROR => rtw_alloc_stainfo23a: index >= NUM_STA\n");
- psta = NULL;
- goto exit;
- }
- phash_list = &pstapriv->sta_hash[index];
-
- list_add_tail(&psta->hash_list, phash_list);
-
- pstapriv->asoc_sta_count++;
-
-/* For the SMC router, the sequence number of first packet of WPS
- handshake will be 0. */
-/* In this case, this packet will be dropped by recv_decache function
- if we use the 0x00 as the default value for tid_rxseq variable. */
-/* So, we initialize the tid_rxseq variable as the 0xffff. */
-
- for (i = 0; i < 16; i++)
- memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i],
- &wRxSeqInitialValue, 2);
-
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
- "alloc number_%d stainfo with hwaddr = %pM\n",
- pstapriv->asoc_sta_count, hwaddr);
-
- init_addba_retry_timer23a(psta);
-
- /* for A-MPDU Rx reordering buffer control */
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
-
- preorder_ctrl->padapter = pstapriv->padapter;
-
- preorder_ctrl->enable = false;
-
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->wend_b = 0xffff;
- /* preorder_ctrl->wsize_b = (NR_RECVBUFF-2); */
- preorder_ctrl->wsize_b = 64;/* 64; */
-
- _rtw_init_queue23a(&preorder_ctrl->pending_recvframe_queue);
-
- rtw_init_recv_timer23a(preorder_ctrl);
- }
- /* init for DM */
- psta->rssi_stat.UndecoratedSmoothedPWDB = (-1);
- psta->rssi_stat.UndecoratedSmoothedCCK = (-1);
-
- /* init for the sequence number of received management frame */
- psta->RxMgmtFrameSeqNum = 0xffff;
-exit:
- spin_unlock_bh(&pstapriv->sta_hash_lock);
- return psta;
-}
-
-/* using pstapriv->sta_hash_lock to protect */
-int rtw_free_stainfo23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- struct recv_reorder_ctrl *preorder_ctrl;
- struct sta_xmit_priv *pstaxmitpriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct hw_xmit *phwxmit;
- int i;
-
- if (!psta)
- goto exit;
-
- spin_lock_bh(&psta->lock);
- psta->state &= ~_FW_LINKED;
- spin_unlock_bh(&psta->lock);
-
- pstaxmitpriv = &psta->sta_xmitpriv;
-
- spin_lock_bh(&pxmitpriv->lock);
-
- rtw_free_xmitframe_queue23a(pxmitpriv, &psta->sleep_q);
- psta->sleepq_len = 0;
-
- /* vo */
- rtw_free_xmitframe_queue23a(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
- list_del_init(&pstaxmitpriv->vo_q.tx_pending);
- phwxmit = pxmitpriv->hwxmits;
- phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt;
- pstaxmitpriv->vo_q.qcnt = 0;
-
- /* vi */
- rtw_free_xmitframe_queue23a(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
- list_del_init(&pstaxmitpriv->vi_q.tx_pending);
- phwxmit = pxmitpriv->hwxmits+1;
- phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt;
- pstaxmitpriv->vi_q.qcnt = 0;
-
- /* be */
- rtw_free_xmitframe_queue23a(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&pstaxmitpriv->be_q.tx_pending);
- phwxmit = pxmitpriv->hwxmits+2;
- phwxmit->accnt -= pstaxmitpriv->be_q.qcnt;
- pstaxmitpriv->be_q.qcnt = 0;
-
- /* bk */
- rtw_free_xmitframe_queue23a(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
- list_del_init(&pstaxmitpriv->bk_q.tx_pending);
- phwxmit = pxmitpriv->hwxmits+3;
- phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt;
- pstaxmitpriv->bk_q.qcnt = 0;
-
- spin_unlock_bh(&pxmitpriv->lock);
-
- list_del_init(&psta->hash_list);
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
- "free number_%d stainfo with hwaddr = %pM\n",
- pstapriv->asoc_sta_count, psta->hwaddr);
- pstapriv->asoc_sta_count--;
-
- /* re-init sta_info; 20061114 will be init in alloc_stainfo */
- /* _rtw_init_sta_xmit_priv23a(&psta->sta_xmitpriv); */
- /* _rtw_init_sta_recv_priv23a(&psta->sta_recvpriv); */
-
- del_timer_sync(&psta->addba_retry_timer);
-
- /* for A-MPDU Rx reordering buffer control,
- cancel reordering_ctrl_timer */
- for (i = 0; i < 16; i++) {
- struct list_head *phead, *plist;
- struct recv_frame *prframe;
- struct rtw_queue *ppending_recvframe_queue;
-
- preorder_ctrl = &psta->recvreorder_ctrl[i];
-
- del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
-
- ppending_recvframe_queue =
- &preorder_ctrl->pending_recvframe_queue;
-
- spin_lock_bh(&ppending_recvframe_queue->lock);
- phead = get_list_head(ppending_recvframe_queue);
- plist = phead->next;
-
- while (!list_empty(phead)) {
- prframe = container_of(plist, struct recv_frame, list);
- plist = plist->next;
- list_del_init(&prframe->list);
- rtw_free_recvframe23a(prframe);
- }
- spin_unlock_bh(&ppending_recvframe_queue->lock);
- }
- if (!(psta->state & WIFI_AP_STATE))
- rtl8723a_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, false);
-#ifdef CONFIG_8723AU_AP_MODE
- spin_lock_bh(&pstapriv->auth_list_lock);
- if (!list_empty(&psta->auth_list)) {
- list_del_init(&psta->auth_list);
- pstapriv->auth_list_cnt--;
- }
- spin_unlock_bh(&pstapriv->auth_list_lock);
-
- psta->expire_to = 0;
-
- psta->sleepq_ac_len = 0;
- psta->qos_info = 0;
-
- psta->max_sp_len = 0;
- psta->uapsd_bk = 0;
- psta->uapsd_be = 0;
- psta->uapsd_vi = 0;
- psta->uapsd_vo = 0;
-
- psta->has_legacy_ac = 0;
-
- pstapriv->sta_dz_bitmap &= ~CHKBIT(psta->aid);
- pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
-
- if ((psta->aid > 0) && (pstapriv->sta_aid[psta->aid - 1] == psta)) {
- pstapriv->sta_aid[psta->aid - 1] = NULL;
- psta->aid = 0;
- }
-#endif /* CONFIG_8723AU_AP_MODE */
-
- kfree(psta);
-exit:
- return _SUCCESS;
-}
-
-/* free all stainfo which in sta_hash[all] */
-void rtw_free_all_stainfo23a(struct rtw_adapter *padapter)
-{
- struct list_head *phead;
- struct sta_info *psta, *ptmp;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo23a(padapter);
- s32 index;
-
- if (pstapriv->asoc_sta_count == 1)
- return;
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- for (index = 0; index < NUM_STA; index++) {
- phead = &pstapriv->sta_hash[index];
- list_for_each_entry_safe(psta, ptmp, phead, hash_list) {
- if (pbcmc_stainfo != psta)
- rtw_free_stainfo23a(padapter, psta);
- }
- }
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-}
-
-/* any station allocated can be searched by hash list */
-struct sta_info *rtw_get_stainfo23a(struct sta_priv *pstapriv, const u8 *hwaddr)
-{
- struct list_head *phead;
- struct sta_info *pos, *psta = NULL;
- u32 index;
- const u8 *addr;
-
- if (!hwaddr)
- return NULL;
-
- if (is_multicast_ether_addr(hwaddr))
- addr = bc_addr;
- else
- addr = hwaddr;
-
- index = wifi_mac_hash(addr);
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- phead = &pstapriv->sta_hash[index];
- list_for_each_entry(pos, phead, hash_list) {
- psta = pos;
-
- /* if found the matched address */
- if (ether_addr_equal(psta->hwaddr, addr))
- break;
-
- psta = NULL;
- }
- spin_unlock_bh(&pstapriv->sta_hash_lock);
- return psta;
-}
-
-int rtw_init_bcmc_stainfo23a(struct rtw_adapter *padapter)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *psta;
- struct tx_servq *ptxservq;
- int res = _SUCCESS;
-
- psta = rtw_alloc_stainfo23a(pstapriv, bc_addr, GFP_KERNEL);
- if (!psta) {
- res = _FAIL;
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
- "rtw_alloc_stainfo23a fail\n");
- return res;
- }
- /* default broadcast & multicast use macid 1 */
- psta->mac_id = 1;
-
- ptxservq = &psta->sta_xmitpriv.be_q;
- return _SUCCESS;
-}
-
-struct sta_info *rtw_get_bcmc_stainfo23a(struct rtw_adapter *padapter)
-{
- struct sta_info *psta;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- psta = rtw_get_stainfo23a(pstapriv, bc_addr);
- return psta;
-}
-
-bool rtw_access_ctrl23a(struct rtw_adapter *padapter, u8 *mac_addr)
-{
- bool res = true;
-#ifdef CONFIG_8723AU_AP_MODE
- struct list_head *phead;
- struct rtw_wlan_acl_node *paclnode;
- bool match = false;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
- struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q;
-
- spin_lock_bh(&pacl_node_q->lock);
- phead = get_list_head(pacl_node_q);
- list_for_each_entry(paclnode, phead, list) {
- if (ether_addr_equal(paclnode->addr, mac_addr)) {
- if (paclnode->valid) {
- match = true;
- break;
- }
- }
- }
- spin_unlock_bh(&pacl_node_q->lock);
-
- if (pacl_list->mode == 1)/* accept unless in deny list */
- res = (match) ? false : true;
- else if (pacl_list->mode == 2)/* deny unless in accept list */
- res = (match) ? true : false;
- else
- res = true;
-#endif
- return res;
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_wlan_util.c b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
deleted file mode 100644
index 694cf17f82cf..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_wlan_util.c
+++ /dev/null
@@ -1,1537 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTW_WLAN_UTIL_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <linux/ieee80211.h>
-#include <wifi.h>
-#include <rtl8723a_spec.h>
-
-static unsigned char ARTHEROS_OUI1[] = {0x00, 0x03, 0x7f};
-static unsigned char ARTHEROS_OUI2[] = {0x00, 0x13, 0x74};
-
-static unsigned char BROADCOM_OUI1[] = {0x00, 0x10, 0x18};
-static unsigned char BROADCOM_OUI2[] = {0x00, 0x0a, 0xf7};
-
-static unsigned char CISCO_OUI[] = {0x00, 0x40, 0x96};
-static unsigned char MARVELL_OUI[] = {0x00, 0x50, 0x43};
-static unsigned char RALINK_OUI[] = {0x00, 0x0c, 0x43};
-static unsigned char REALTEK_OUI[] = {0x00, 0xe0, 0x4c};
-static unsigned char AIRGOCAP_OUI[] = {0x00, 0x0a, 0xf5};
-static unsigned char EPIGRAM_OUI[] = {0x00, 0x90, 0x4c};
-
-static unsigned char WPA_TKIP_CIPHER[4] = {0x00, 0x50, 0xf2, 0x02};
-static unsigned char RSN_TKIP_CIPHER[4] = {0x00, 0x0f, 0xac, 0x02};
-
-#define R2T_PHY_DELAY 0
-
-/* define WAIT_FOR_BCN_TO_MIN 3000 */
-#define WAIT_FOR_BCN_TO_MIN 6000
-#define WAIT_FOR_BCN_TO_MAX 20000
-
-static u8 rtw_basic_rate_cck[4] = {
- IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK
-};
-
-static u8 rtw_basic_rate_ofdm[3] = {
- IEEE80211_OFDM_RATE_6MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_12MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK
-};
-
-static u8 rtw_basic_rate_mix[7] = {
- IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_6MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_12MB | IEEE80211_BASIC_RATE_MASK,
- IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK
-};
-
-int cckrates_included23a(unsigned char *rate, int ratelen)
-{
- int i;
-
- for (i = 0; i < ratelen; i++) {
- if (((rate[i]) & 0x7f) == 2 || ((rate[i]) & 0x7f) == 4 ||
- ((rate[i]) & 0x7f) == 11 || ((rate[i]) & 0x7f) == 22)
- return true;
- }
-
- return false;
-}
-
-int cckratesonly_included23a(unsigned char *rate, int ratelen)
-{
- int i;
-
- for (i = 0; i < ratelen; i++) {
- if (((rate[i]) & 0x7f) != 2 && ((rate[i]) & 0x7f) != 4 &&
- ((rate[i]) & 0x7f) != 11 && ((rate[i]) & 0x7f) != 22)
- return false;
- }
-
- return true;
-}
-
-unsigned char networktype_to_raid23a(unsigned char network_type)
-{
- unsigned char raid;
-
- switch (network_type) {
- case WIRELESS_11B:
- raid = RATR_INX_WIRELESS_B;
- break;
- case WIRELESS_11A:
- case WIRELESS_11G:
- raid = RATR_INX_WIRELESS_G;
- break;
- case WIRELESS_11BG:
- raid = RATR_INX_WIRELESS_GB;
- break;
- case WIRELESS_11_24N:
- case WIRELESS_11_5N:
- raid = RATR_INX_WIRELESS_N;
- break;
- case WIRELESS_11A_5N:
- case WIRELESS_11G_24N:
- raid = RATR_INX_WIRELESS_NG;
- break;
- case WIRELESS_11BG_24N:
- raid = RATR_INX_WIRELESS_NGB;
- break;
- default:
- raid = RATR_INX_WIRELESS_GB;
- break;
- }
- return raid;
-}
-
-u8 judge_network_type23a(struct rtw_adapter *padapter,
- unsigned char *rate, int ratelen)
-{
- u8 network_type = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pmlmeext->cur_channel > 14) {
- if (pmlmeinfo->HT_enable)
- network_type = WIRELESS_11_5N;
- network_type |= WIRELESS_11A;
- } else {
- if (pmlmeinfo->HT_enable)
- network_type = WIRELESS_11_24N;
-
- if ((cckratesonly_included23a(rate, ratelen)) == true)
- network_type |= WIRELESS_11B;
- else if ((cckrates_included23a(rate, ratelen)) == true)
- network_type |= WIRELESS_11BG;
- else
- network_type |= WIRELESS_11G;
- }
- return network_type;
-}
-
-static unsigned char ratetbl_val_2wifirate(unsigned char rate)
-{
- unsigned char val = 0;
-
- switch (rate & 0x7f) {
- case 0:
- val = IEEE80211_CCK_RATE_1MB;
- break;
- case 1:
- val = IEEE80211_CCK_RATE_2MB;
- break;
- case 2:
- val = IEEE80211_CCK_RATE_5MB;
- break;
- case 3:
- val = IEEE80211_CCK_RATE_11MB;
- break;
- case 4:
- val = IEEE80211_OFDM_RATE_6MB;
- break;
- case 5:
- val = IEEE80211_OFDM_RATE_9MB;
- break;
- case 6:
- val = IEEE80211_OFDM_RATE_12MB;
- break;
- case 7:
- val = IEEE80211_OFDM_RATE_18MB;
- break;
- case 8:
- val = IEEE80211_OFDM_RATE_24MB;
- break;
- case 9:
- val = IEEE80211_OFDM_RATE_36MB;
- break;
- case 10:
- val = IEEE80211_OFDM_RATE_48MB;
- break;
- case 11:
- val = IEEE80211_OFDM_RATE_54MB;
- break;
- }
- return val;
-}
-
-static int is_basicrate(struct rtw_adapter *padapter, unsigned char rate)
-{
- int i;
- unsigned char val;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- for (i = 0; i < NumRates; i++) {
- val = pmlmeext->basicrate[i];
-
- if (val != 0xff && val != 0xfe) {
- if (rate == ratetbl_val_2wifirate(val))
- return true;
- }
- }
-
- return false;
-}
-
-static unsigned int ratetbl2rateset(struct rtw_adapter *padapter,
- unsigned char *rateset)
-{
- int i;
- unsigned char rate;
- unsigned int len = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- for (i = 0; i < NumRates; i++) {
- rate = pmlmeext->datarate[i];
-
- switch (rate) {
- case 0xff:
- return len;
- case 0xfe:
- continue;
- default:
- rate = ratetbl_val_2wifirate(rate);
-
- if (is_basicrate(padapter, rate) == true)
- rate |= IEEE80211_BASIC_RATE_MASK;
-
- rateset[len] = rate;
- len++;
- break;
- }
- }
- return len;
-}
-
-void get_rate_set23a(struct rtw_adapter *padapter,
- unsigned char *pbssrate, int *bssrate_len)
-{
- unsigned char supportedrates[NumRates];
-
- memset(supportedrates, 0, NumRates);
- *bssrate_len = ratetbl2rateset(padapter, supportedrates);
- memcpy(pbssrate, supportedrates, *bssrate_len);
-}
-
-void UpdateBrateTbl23a(struct rtw_adapter *Adapter, u8 *mBratesOS)
-{
- u8 i;
- u8 rate;
-
- /* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */
- for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
- rate = mBratesOS[i] & 0x7f;
- switch (rate) {
- case IEEE80211_CCK_RATE_1MB:
- case IEEE80211_CCK_RATE_2MB:
- case IEEE80211_CCK_RATE_5MB:
- case IEEE80211_CCK_RATE_11MB:
- case IEEE80211_OFDM_RATE_6MB:
- case IEEE80211_OFDM_RATE_12MB:
- case IEEE80211_OFDM_RATE_24MB:
- mBratesOS[i] |= IEEE80211_BASIC_RATE_MASK;
- break;
- default:
- break;
- }
- }
-}
-
-void Update23aTblForSoftAP(u8 *bssrateset, u32 bssratelen)
-{
- u8 i;
- u8 rate;
-
- for (i = 0; i < bssratelen; i++) {
- rate = bssrateset[i] & 0x7f;
- switch (rate) {
- case IEEE80211_CCK_RATE_1MB:
- case IEEE80211_CCK_RATE_2MB:
- case IEEE80211_CCK_RATE_5MB:
- case IEEE80211_CCK_RATE_11MB:
- bssrateset[i] |= IEEE80211_BASIC_RATE_MASK;
- break;
- }
- }
-}
-
-inline u8 rtw_get_oper_ch23a(struct rtw_adapter *adapter)
-{
- return adapter_to_dvobj(adapter)->oper_channel;
-}
-
-inline void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch)
-{
- adapter_to_dvobj(adapter)->oper_channel = ch;
-}
-
-inline void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw)
-{
- adapter_to_dvobj(adapter)->oper_bwmode = bw;
-}
-
-inline void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset)
-{
- adapter_to_dvobj(adapter)->oper_ch_offset = offset;
-}
-
-void SelectChannel23a(struct rtw_adapter *padapter, unsigned char channel)
-{
- mutex_lock(&adapter_to_dvobj(padapter)->setch_mutex);
-
- /* saved channel info */
- rtw_set_oper_ch23a(padapter, channel);
-
- PHY_SwChnl8723A(padapter, channel);
-
- mutex_unlock(&adapter_to_dvobj(padapter)->setch_mutex);
-}
-
-static void set_bwmode(struct rtw_adapter *padapter, unsigned short bwmode,
- unsigned char channel_offset)
-{
- mutex_lock(&adapter_to_dvobj(padapter)->setbw_mutex);
-
- /* saved bw info */
- rtw_set_oper_bw23a(padapter, bwmode);
- rtw_set_oper_ch23aoffset23a(padapter, channel_offset);
-
- PHY_SetBWMode23a8723A(padapter, (enum ht_channel_width)bwmode,
- channel_offset);
-
- mutex_unlock(&adapter_to_dvobj(padapter)->setbw_mutex);
-}
-
-void set_channel_bwmode23a(struct rtw_adapter *padapter, unsigned char channel,
- unsigned char channel_offset, unsigned short bwmode)
-{
- u8 center_ch;
-
- if (bwmode == HT_CHANNEL_WIDTH_20 ||
- channel_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE) {
- /* SelectChannel23a(padapter, channel); */
- center_ch = channel;
- } else {
- /* switch to the proper channel */
- if (channel_offset == HAL_PRIME_CHNL_OFFSET_LOWER) {
- /* SelectChannel23a(padapter, channel + 2); */
- center_ch = channel + 2;
- } else {
- /* SelectChannel23a(padapter, channel - 2); */
- center_ch = channel - 2;
- }
- }
-
- /* set Channel */
- mutex_lock(&adapter_to_dvobj(padapter)->setch_mutex);
-
- /* saved channel/bw info */
- rtw_set_oper_ch23a(padapter, channel);
- rtw_set_oper_bw23a(padapter, bwmode);
- rtw_set_oper_ch23aoffset23a(padapter, channel_offset);
-
- PHY_SwChnl8723A(padapter, center_ch); /* set center channel */
-
- mutex_unlock(&adapter_to_dvobj(padapter)->setch_mutex);
-
- set_bwmode(padapter, bwmode, channel_offset);
-}
-
-inline u8 *get_my_bssid23a(struct wlan_bssid_ex *pnetwork)
-{
- return pnetwork->MacAddress;
-}
-
-bool is_client_associated_to_ap23a(struct rtw_adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
-
- if (!padapter)
- return false;
-
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS &&
- (pmlmeinfo->state & 0x03) == MSR_INFRA)
- return true;
- else
- return false;
-}
-
-bool is_client_associated_to_ibss23a(struct rtw_adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS &&
- (pmlmeinfo->state & 0x03) == MSR_ADHOC)
- return true;
- else
- return false;
-}
-
-bool is_IBSS_empty23a(struct rtw_adapter *padapter)
-{
- unsigned int i;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
- if (pmlmeinfo->FW_sta_info[i].status == 1)
- return false;
- }
-
- return true;
-}
-
-unsigned int decide_wait_for_beacon_timeout23a(unsigned int bcn_interval)
-{
- if ((bcn_interval << 2) < WAIT_FOR_BCN_TO_MIN)
- return WAIT_FOR_BCN_TO_MIN;
- else if ((bcn_interval << 2) > WAIT_FOR_BCN_TO_MAX)
- return WAIT_FOR_BCN_TO_MAX;
- else
- return bcn_interval << 2;
-}
-
-void clear_cam_entry23a(struct rtw_adapter *padapter, u8 entry)
-{
- unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char null_key[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00};
-
- rtl8723a_cam_write(padapter, entry, 0, null_sta, null_key);
-}
-
-int allocate_fw_sta_entry23a(struct rtw_adapter *padapter)
-{
- unsigned int mac_id;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- for (mac_id = IBSS_START_MAC_ID; mac_id < NUM_STA; mac_id++) {
- if (pmlmeinfo->FW_sta_info[mac_id].status == 0) {
- pmlmeinfo->FW_sta_info[mac_id].status = 1;
- pmlmeinfo->FW_sta_info[mac_id].retry = 0;
- break;
- }
- }
-
- return mac_id;
-}
-
-void flush_all_cam_entry23a(struct rtw_adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- rtl8723a_cam_invalidate_all(padapter);
-
- memset(pmlmeinfo->FW_sta_info, 0, sizeof(pmlmeinfo->FW_sta_info));
-}
-
-int WMM_param_handler23a(struct rtw_adapter *padapter, const u8 *p)
-{
- /* struct registry_priv *pregpriv = &padapter->registrypriv; */
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pmlmepriv->qos_option == 0) {
- pmlmeinfo->WMM_enable = 0;
- return _FAIL;
- }
-
- pmlmeinfo->WMM_enable = 1;
- memcpy(&pmlmeinfo->WMM_param, p + 2 + 6,
- sizeof(struct WMM_para_element));
- return true;
-}
-
-void WMMOnAssocRsp23a(struct rtw_adapter *padapter)
-{
- u8 ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime;
- u8 acm_mask;
- u16 TXOP;
- u32 acParm, i;
- u32 edca[4], inx[4];
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- if (pmlmeinfo->WMM_enable == 0) {
- padapter->mlmepriv.acm_mask = 0;
- return;
- }
-
- acm_mask = 0;
-
- if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
- aSifsTime = 10;
- else
- aSifsTime = 16;
-
- for (i = 0; i < 4; i++) {
- ACI = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 5) & 0x03;
- ACM = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 4) & 0x01;
-
- /* AIFS = AIFSN * slot time + SIFS - r2t phy delay */
- AIFS = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN & 0x0f) *
- pmlmeinfo->slotTime + aSifsTime;
-
- ECWMin = pmlmeinfo->WMM_param.ac_param[i].CW & 0x0f;
- ECWMax = (pmlmeinfo->WMM_param.ac_param[i].CW & 0xf0) >> 4;
- TXOP = le16_to_cpu(pmlmeinfo->WMM_param.ac_param[i].TXOP_limit);
-
- acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16);
-
- switch (ACI) {
- case 0x0:
- rtl8723a_set_ac_param_be(padapter, acParm);
- acm_mask |= (ACM? BIT(1):0);
- edca[XMIT_BE_QUEUE] = acParm;
- break;
- case 0x1:
- rtl8723a_set_ac_param_bk(padapter, acParm);
- /* acm_mask |= (ACM? BIT(0):0); */
- edca[XMIT_BK_QUEUE] = acParm;
- break;
- case 0x2:
- rtl8723a_set_ac_param_vi(padapter, acParm);
- acm_mask |= (ACM? BIT(2):0);
- edca[XMIT_VI_QUEUE] = acParm;
- break;
- case 0x3:
- rtl8723a_set_ac_param_vo(padapter, acParm);
- acm_mask |= (ACM? BIT(3):0);
- edca[XMIT_VO_QUEUE] = acParm;
- break;
- }
-
- DBG_8723A("WMM(%x): %x, %x\n", ACI, ACM, acParm);
- }
-
- if (padapter->registrypriv.acm_method == 1)
- rtl8723a_set_acm_ctrl(padapter, acm_mask);
- else
- padapter->mlmepriv.acm_mask = acm_mask;
-
- inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
-
- if (pregpriv->wifi_spec == 1) {
- u32 j, change_inx = false;
-
- /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
- for (i = 0; i < 4; i++) {
- for (j = i+1; j < 4; j++) {
- /* compare CW and AIFS */
- if ((edca[j] & 0xFFFF) < (edca[i] & 0xFFFF)) {
- change_inx = true;
- } else if ((edca[j] & 0xFFFF) ==
- (edca[i] & 0xFFFF)) {
- /* compare TXOP */
- if ((edca[j] >> 16) > (edca[i] >> 16))
- change_inx = true;
- }
-
- if (change_inx) {
- swap(edca[i], edca[j]);
- swap(inx[i], inx[j]);
- change_inx = false;
- }
- }
- }
- }
-
- for (i = 0; i<4; i++) {
- pxmitpriv->wmm_para_seq[i] = inx[i];
- DBG_8723A("wmm_para_seq(%d): %d\n", i,
- pxmitpriv->wmm_para_seq[i]);
- }
-}
-
-static void bwmode_update_check(struct rtw_adapter *padapter, const u8 *p)
-{
- struct ieee80211_ht_operation *pHT_info;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
- unsigned char new_bwmode;
- unsigned char new_ch_offset;
-
- if (!p)
- return;
- if (!phtpriv->ht_option)
- return;
- if (p[1] != sizeof(struct ieee80211_ht_operation))
- return;
-
- pHT_info = (struct ieee80211_ht_operation *)(p + 2);
-
- if ((pHT_info->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) &&
- pregistrypriv->cbw40_enable) {
- new_bwmode = HT_CHANNEL_WIDTH_40;
-
- switch (pHT_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET){
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- new_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- new_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
- break;
- default:
- new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- break;
- }
- } else {
- new_bwmode = HT_CHANNEL_WIDTH_20;
- new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- }
-
- if (new_bwmode != pmlmeext->cur_bwmode ||
- new_ch_offset != pmlmeext->cur_ch_offset) {
- pmlmeinfo->bwmode_updated = true;
-
- pmlmeext->cur_bwmode = new_bwmode;
- pmlmeext->cur_ch_offset = new_ch_offset;
-
- /* update HT info also */
- HT_info_handler23a(padapter, p);
- } else
- pmlmeinfo->bwmode_updated = false;
-
- if (pmlmeinfo->bwmode_updated) {
- struct sta_info *psta;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- /* set_channel_bwmode23a(padapter, pmlmeext->cur_channel,
- pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
-
- /* update ap's stainfo */
- psta = rtw_get_stainfo23a(pstapriv, cur_network->MacAddress);
- if (psta) {
- struct ht_priv *phtpriv_sta = &psta->htpriv;
-
- if (phtpriv_sta->ht_option) {
- /* bwmode */
- phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
- phtpriv_sta->ch_offset =
- pmlmeext->cur_ch_offset;
- } else {
- phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_20;
- phtpriv_sta->ch_offset =
- HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- }
- }
- }
-}
-
-void HT_caps_handler23a(struct rtw_adapter *padapter, const u8 *p)
-{
- unsigned int i;
- u8 rf_type;
- u8 max_AMPDU_len, min_MPDU_spacing;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
- struct ieee80211_ht_cap *cap;
- u8 *dstcap;
-
- if (!p)
- return;
-
- if (!phtpriv->ht_option)
- return;
-
- pmlmeinfo->HT_caps_enable = 1;
-
- cap = &pmlmeinfo->ht_cap;
- dstcap = (u8 *)cap;
- for (i = 0; i < p[1]; i++) {
- if (i != 2) {
- dstcap[i] &= p[i + 2];
- } else {
- /* modify from fw by Thomas 2010/11/17 */
- if ((cap->ampdu_params_info &
- IEEE80211_HT_AMPDU_PARM_FACTOR) >
- (p[i + 2] & IEEE80211_HT_AMPDU_PARM_FACTOR))
- max_AMPDU_len = p[i + 2] &
- IEEE80211_HT_AMPDU_PARM_FACTOR;
- else
- max_AMPDU_len = cap->ampdu_params_info &
- IEEE80211_HT_AMPDU_PARM_FACTOR;
-
- if ((cap->ampdu_params_info &
- IEEE80211_HT_AMPDU_PARM_DENSITY) >
- (p[i + 2] & IEEE80211_HT_AMPDU_PARM_DENSITY))
- min_MPDU_spacing = cap->ampdu_params_info &
- IEEE80211_HT_AMPDU_PARM_DENSITY;
- else
- min_MPDU_spacing = p[i + 2] &
- IEEE80211_HT_AMPDU_PARM_DENSITY;
-
- cap->ampdu_params_info =
- max_AMPDU_len | min_MPDU_spacing;
- }
- }
-
- rf_type = rtl8723a_get_rf_type(padapter);
-
- /* update the MCS rates */
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
- if (rf_type == RF_1T1R || rf_type == RF_1T2R)
- cap->mcs.rx_mask[i] &= MCS_rate_1R23A[i];
- else
- cap->mcs.rx_mask[i] &= MCS_rate_2R23A[i];
- }
-}
-
-void HT_info_handler23a(struct rtw_adapter *padapter, const u8 *p)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct ht_priv *phtpriv = &pmlmepriv->htpriv;
-
- if (!p)
- return;
-
- if (!phtpriv->ht_option)
- return;
-
- if (p[1] != sizeof(struct ieee80211_ht_operation))
- return;
-
- pmlmeinfo->HT_info_enable = 1;
- memcpy(&pmlmeinfo->HT_info, p + 2, p[1]);
-}
-
-void HTOnAssocRsp23a(struct rtw_adapter *padapter)
-{
- unsigned char max_AMPDU_len;
- unsigned char min_MPDU_spacing;
- /* struct registry_priv *pregpriv = &padapter->registrypriv; */
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- DBG_8723A("%s\n", __func__);
-
- if (pmlmeinfo->HT_info_enable && pmlmeinfo->HT_caps_enable)
- pmlmeinfo->HT_enable = 1;
- else {
- pmlmeinfo->HT_enable = 0;
- /* set_channel_bwmode23a(padapter, pmlmeext->cur_channel,
- pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
- return;
- }
-
- /* handle A-MPDU parameter field */
- /*
- AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
- AMPDU_para [4:2]:Min MPDU Start Spacing
- */
- max_AMPDU_len = pmlmeinfo->ht_cap.ampdu_params_info &
- IEEE80211_HT_AMPDU_PARM_FACTOR;
-
- min_MPDU_spacing =
- (pmlmeinfo->ht_cap.ampdu_params_info &
- IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2;
-
- rtl8723a_set_ampdu_min_space(padapter, min_MPDU_spacing);
- rtl8723a_set_ampdu_factor(padapter, max_AMPDU_len);
-}
-
-void ERP_IE_handler23a(struct rtw_adapter *padapter, const u8 *p)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (p[1] > 1)
- return;
-
- pmlmeinfo->ERP_enable = 1;
- memcpy(&pmlmeinfo->ERP_IE, p + 2, p[1]);
-}
-
-void VCS_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- switch (pregpriv->vrtl_carrier_sense) { /* 0:off 1:on 2:auto */
- case 0: /* off */
- psta->rtsen = 0;
- psta->cts2self = 0;
- break;
- case 1: /* on */
- if (pregpriv->vcs_type == RTS_CTS) {
- psta->rtsen = 1;
- psta->cts2self = 0;
- } else {
- psta->rtsen = 0;
- psta->cts2self = 1;
- }
- break;
- case 2: /* auto */
- default:
- if (pmlmeinfo->ERP_enable && pmlmeinfo->ERP_IE & BIT(1)) {
- if (pregpriv->vcs_type == RTS_CTS) {
- psta->rtsen = 1;
- psta->cts2self = 0;
- } else {
- psta->rtsen = 0;
- psta->cts2self = 1;
- }
- } else {
- psta->rtsen = 0;
- psta->cts2self = 0;
- }
- break;
- }
-}
-
-int rtw_check_bcn_info23a(struct rtw_adapter *Adapter,
- struct ieee80211_mgmt *mgmt, u32 pkt_len)
-{
- struct wlan_network *cur_network = &Adapter->mlmepriv.cur_network;
- struct ieee80211_ht_operation *pht_info;
- unsigned short val16;
- u8 crypto, bcn_channel;
- int group_cipher = 0, pairwise_cipher = 0, is_8021x = 0, r;
- int pie_len, ssid_len, privacy;
- const u8 *p, *ssid;
-
- if (!is_client_associated_to_ap23a(Adapter))
- return _SUCCESS;
-
- if (unlikely(!ieee80211_is_beacon(mgmt->frame_control))) {
- printk(KERN_WARNING "%s: received a non beacon frame!\n",
- __func__);
- return _FAIL;
- }
-
- if (!ether_addr_equal(cur_network->network.MacAddress, mgmt->bssid)) {
- DBG_8723A("%s: linked but recv other bssid bcn %pM %pM\n",
- __func__, mgmt->bssid,
- cur_network->network.MacAddress);
- return _FAIL;
- }
-
- /* check bw and channel offset */
- /* parsing HT_CAP_IE */
- pie_len = pkt_len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
-
- /* Checking for channel */
- p = cfg80211_find_ie(WLAN_EID_DS_PARAMS, mgmt->u.beacon.variable,
- pie_len);
- if (p)
- bcn_channel = p[2];
- else {
- /* In 5G, some ap do not have DSSET IE checking HT
- info for channel */
- p = cfg80211_find_ie(WLAN_EID_HT_OPERATION,
- mgmt->u.beacon.variable, pie_len);
-
- if (p && p[1] > 0) {
- pht_info = (struct ieee80211_ht_operation *)(p + 2);
- bcn_channel = pht_info->primary_chan;
- } else { /* we don't find channel IE, so don't check it */
- DBG_8723A("Oops: %s we don't find channel IE, so don't "
- "check it\n", __func__);
- bcn_channel = Adapter->mlmeextpriv.cur_channel;
- }
- }
- if (bcn_channel != Adapter->mlmeextpriv.cur_channel) {
- DBG_8723A("%s beacon channel:%d cur channel:%d disconnect\n",
- __func__, bcn_channel,
- Adapter->mlmeextpriv.cur_channel);
- goto _mismatch;
- }
-
- /* checking SSID */
- p = cfg80211_find_ie(WLAN_EID_SSID, mgmt->u.beacon.variable, pie_len);
- if (p && p[1]) {
- ssid = p + 2;
- ssid_len = p[1];
- } else {
- DBG_8723A("%s marc: cannot find SSID for survey event\n",
- __func__);
- ssid = NULL;
- ssid_len = 0;
- }
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d cur_network->network.Ssid.Ssid:%s len:%d\n",
- __func__, ssid, ssid_len, cur_network->network.Ssid.ssid,
- cur_network->network.Ssid.ssid_len);
-
- if (ssid_len != cur_network->network.Ssid.ssid_len || ssid_len > 32 ||
- (ssid_len &&
- memcmp(ssid, cur_network->network.Ssid.ssid, ssid_len))) {
- DBG_8723A("%s(), SSID is not match return FAIL\n", __func__);
- goto _mismatch;
- }
-
- /* check encryption info */
- val16 = le16_to_cpu(mgmt->u.beacon.capab_info);
-
- if (val16 & WLAN_CAPABILITY_PRIVACY)
- privacy = 1;
- else
- privacy = 0;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "%s(): cur_network->network.Privacy is %d, bssid.Privacy is %d\n",
- __func__, cur_network->network.Privacy, privacy);
- if (cur_network->network.Privacy != privacy) {
- DBG_8723A("%s(), privacy is not match return FAIL\n", __func__);
- goto _mismatch;
- }
-
- p = cfg80211_find_ie(WLAN_EID_RSN, mgmt->u.beacon.variable, pie_len);
- if (p && p[1]) {
- crypto = ENCRYP_PROTOCOL_WPA2;
- if (p && p[1]) {
- r = rtw_parse_wpa2_ie23a(p, p[1] + 2, &group_cipher,
- &pairwise_cipher, &is_8021x);
- if (r == _SUCCESS)
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "%s pnetwork->pairwise_cipher: %d, pnetwork->group_cipher: %d, is_802x : %d\n",
- __func__, pairwise_cipher,
- group_cipher, is_8021x);
- }
- } else {
- p = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPA,
- mgmt->u.beacon.variable, pie_len);
- if (p && p[1]) {
- crypto = ENCRYP_PROTOCOL_WPA;
- r = rtw_parse_wpa_ie23a(p, p[1] + 2, &group_cipher,
- &pairwise_cipher, &is_8021x);
- if (r == _SUCCESS)
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- "%s pnetwork->pairwise_cipher: %d, group_cipher is %d, is_8021x is %d\n",
- __func__, pairwise_cipher,
- group_cipher, is_8021x);
- } else {
- if (privacy)
- crypto = ENCRYP_PROTOCOL_WEP;
- else
- crypto = ENCRYP_PROTOCOL_OPENSYS;
- }
- }
-
- if (cur_network->BcnInfo.encryp_protocol != crypto) {
- DBG_8723A("%s(): encryption mismatch, return FAIL\n", __func__);
- goto _mismatch;
- }
-
- if (crypto == ENCRYP_PROTOCOL_WPA || crypto == ENCRYP_PROTOCOL_WPA2) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "%s cur_network->group_cipher is %d: %d\n", __func__,
- cur_network->BcnInfo.group_cipher, group_cipher);
- if (pairwise_cipher != cur_network->BcnInfo.pairwise_cipher ||
- group_cipher != cur_network->BcnInfo.group_cipher) {
- DBG_8723A("%s pairwise_cipher(%x:%x) or group_cipher "
- "(%x:%x) is not match, return FAIL\n",
- __func__, pairwise_cipher,
- cur_network->BcnInfo.pairwise_cipher,
- group_cipher,
- cur_network->BcnInfo.group_cipher);
- goto _mismatch;
- }
-
- if (is_8021x != cur_network->BcnInfo.is_8021x) {
- DBG_8723A("%s authentication is not match, return "
- "FAIL\n", __func__);
- goto _mismatch;
- }
- }
-
- return _SUCCESS;
-
-_mismatch:
-
- return _FAIL;
-}
-
-void update_beacon23a_info(struct rtw_adapter *padapter,
- struct ieee80211_mgmt *mgmt,
- uint pkt_len, struct sta_info *psta)
-{
- unsigned int len;
- const u8 *p;
-
- len = pkt_len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
-
- p = cfg80211_find_ie(WLAN_EID_HT_OPERATION, mgmt->u.beacon.variable,
- len);
- if (p)
- bwmode_update_check(padapter, p);
-
- p = cfg80211_find_ie(WLAN_EID_ERP_INFO, mgmt->u.beacon.variable, len);
- if (p) {
- ERP_IE_handler23a(padapter, p);
- VCS_update23a(padapter, psta);
- }
-}
-
-bool is_ap_in_tkip23a(struct rtw_adapter *padapter)
-{
- u32 i;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- const u8 *p;
-
- if (cur_network->capability & WLAN_CAPABILITY_PRIVACY) {
- for (i = 0; i < pmlmeinfo->network.IELength;) {
- p = pmlmeinfo->network.IEs + i;
-
- switch (p[0]) {
- case WLAN_EID_VENDOR_SPECIFIC:
- if (!memcmp(p + 2, RTW_WPA_OUI23A_TYPE, 4) &&
- !memcmp(p + 2 + 12, WPA_TKIP_CIPHER, 4))
- return true;
- break;
- case WLAN_EID_RSN:
- if (!memcmp(p + 2 + 8, RSN_TKIP_CIPHER, 4))
- return true;
- break;
- default:
- break;
- }
- i += (p[1] + 2);
- }
- return false;
- } else
- return false;
-}
-
-bool should_forbid_n_rate23a(struct rtw_adapter *padapter)
-{
- u32 i;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *cur_network = &pmlmepriv->cur_network.network;
- const u8 *p;
-
- if (cur_network->capability & WLAN_CAPABILITY_PRIVACY) {
- for (i = 0; i < cur_network->IELength;) {
- p = cur_network->IEs + i;
-
- switch (p[0]) {
- case WLAN_EID_VENDOR_SPECIFIC:
- if (!memcmp(p + 2, RTW_WPA_OUI23A_TYPE, 4) &&
- (!memcmp(p + 2 + 12,
- WPA_CIPHER_SUITE_CCMP23A, 4) ||
- !memcmp(p + 2 + 16,
- WPA_CIPHER_SUITE_CCMP23A, 4)))
- return false;
- break;
- case WLAN_EID_RSN:
- if (!memcmp(p + 2 + 8,
- RSN_CIPHER_SUITE_CCMP23A, 4) ||
- !memcmp(p + 2 + 12,
- RSN_CIPHER_SUITE_CCMP23A, 4))
- return false;
- default:
- break;
- }
-
- i += (p[1] + 2);
- }
- return true;
- } else {
- return false;
- }
-}
-
-bool is_ap_in_wep23a(struct rtw_adapter *padapter)
-{
- u32 i;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- const u8 *p;
-
- if (cur_network->capability & WLAN_CAPABILITY_PRIVACY) {
- for (i = 0; i < pmlmeinfo->network.IELength;) {
- p = pmlmeinfo->network.IEs + i;
-
- switch (p[0]) {
- case WLAN_EID_VENDOR_SPECIFIC:
- if (!memcmp(p + 2, RTW_WPA_OUI23A_TYPE, 4))
- return false;
- break;
- case WLAN_EID_RSN:
- return false;
-
- default:
- break;
- }
-
- i += (p[1] + 2);
- }
-
- return true;
- } else
- return false;
-}
-
-static int wifirate2_ratetbl_inx23a(unsigned char rate)
-{
- int inx = 0;
-
- rate = rate & 0x7f;
-
- switch (rate) {
- case 54*2:
- inx = 11;
- break;
- case 48*2:
- inx = 10;
- break;
- case 36*2:
- inx = 9;
- break;
- case 24*2:
- inx = 8;
- break;
- case 18*2:
- inx = 7;
- break;
- case 12*2:
- inx = 6;
- break;
- case 9*2:
- inx = 5;
- break;
- case 6*2:
- inx = 4;
- break;
- case 11*2:
- inx = 3;
- break;
- case 11:
- inx = 2;
- break;
- case 2*2:
- inx = 1;
- break;
- case 1*2:
- inx = 0;
- break;
- }
- return inx;
-}
-
-unsigned int update_basic_rate23a(unsigned char *ptn, unsigned int ptn_sz)
-{
- unsigned int i, num_of_rate;
- unsigned int mask = 0;
-
- num_of_rate = (ptn_sz > NumRates)? NumRates: ptn_sz;
-
- for (i = 0; i < num_of_rate; i++) {
- if ((*(ptn + i)) & 0x80)
- mask |= 0x1 << wifirate2_ratetbl_inx23a(*(ptn + i));
- }
- return mask;
-}
-
-unsigned int update_supported_rate23a(unsigned char *ptn, unsigned int ptn_sz)
-{
- unsigned int i, num_of_rate;
- unsigned int mask = 0;
-
- num_of_rate = (ptn_sz > NumRates) ? NumRates : ptn_sz;
-
- for (i = 0; i < num_of_rate; i++)
- mask |= 0x1 << wifirate2_ratetbl_inx23a(*(ptn + i));
- return mask;
-}
-
-unsigned int update_MSC_rate23a(struct ieee80211_ht_cap *pHT_caps)
-{
- unsigned int mask;
-
- mask = pHT_caps->mcs.rx_mask[0] << 12 |
- pHT_caps->mcs.rx_mask[1] << 20;
-
- return mask;
-}
-
-int support_short_GI23a(struct rtw_adapter *padapter,
- struct ieee80211_ht_cap *pHT_caps)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- unsigned char bit_offset;
-
- if (!pmlmeinfo->HT_enable)
- return _FAIL;
- if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_RALINK)
- return _FAIL;
- bit_offset = (pmlmeext->cur_bwmode & HT_CHANNEL_WIDTH_40)? 6: 5;
-
- if (pHT_caps->cap_info & cpu_to_le16(0x1 << bit_offset))
- return _SUCCESS;
- else
- return _FAIL;
-}
-
-unsigned char get_highest_rate_idx23a(u32 mask)
-{
- int i;
- unsigned char rate_idx = 0;
-
- for (i = 27; i >= 0; i--) {
- if (mask & BIT(i)) {
- rate_idx = i;
- break;
- }
- }
- return rate_idx;
-}
-
-void Update_RA_Entry23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- rtw_hal_update_ra_mask23a(psta, 0);
-}
-
-static void enable_rate_adaptive(struct rtw_adapter *padapter,
- struct sta_info *psta)
-{
- Update_RA_Entry23a(padapter, psta);
-}
-
-void set_sta_rate23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- /* rate adaptive */
- enable_rate_adaptive(padapter, psta);
-}
-
-/* Update RRSR and Rate for USERATE */
-void update_tx_basic_rate23a(struct rtw_adapter *padapter, u8 wirelessmode)
-{
- unsigned char supported_rates[NDIS_802_11_LENGTH_RATES_EX];
-
- memset(supported_rates, 0, NDIS_802_11_LENGTH_RATES_EX);
-
- if (wirelessmode == WIRELESS_11B) {
- memcpy(supported_rates, rtw_basic_rate_cck, 4);
- } else if (wirelessmode & WIRELESS_11B) {
- memcpy(supported_rates, rtw_basic_rate_mix, 7);
- } else {
- memcpy(supported_rates, rtw_basic_rate_ofdm, 3);
- }
-
- if (wirelessmode & WIRELESS_11B)
- update_mgnt_tx_rate23a(padapter, IEEE80211_CCK_RATE_1MB);
- else
- update_mgnt_tx_rate23a(padapter, IEEE80211_OFDM_RATE_6MB);
-
- HalSetBrateCfg23a(padapter, supported_rates);
-}
-
-unsigned char check_assoc_AP23a(u8 *pframe, uint len)
-{
- int i;
- u8 epigram_vendor_flag;
- u8 ralink_vendor_flag;
- const u8 *p;
-
- epigram_vendor_flag = 0;
- ralink_vendor_flag = 0;
-
- for (i = 0; i < len;) {
- p = pframe + i;
-
- switch (p[0]) {
- case WLAN_EID_VENDOR_SPECIFIC:
- if (!memcmp(p + 2, ARTHEROS_OUI1, 3) ||
- !memcmp(p + 2, ARTHEROS_OUI2, 3)) {
- DBG_8723A("link to Artheros AP\n");
- return HT_IOT_PEER_ATHEROS;
- } else if (!memcmp(p + 2, BROADCOM_OUI1, 3) ||
- !memcmp(p + 2, BROADCOM_OUI2, 3)) {
- DBG_8723A("link to Broadcom AP\n");
- return HT_IOT_PEER_BROADCOM;
- } else if (!memcmp(p + 2, MARVELL_OUI, 3)) {
- DBG_8723A("link to Marvell AP\n");
- return HT_IOT_PEER_MARVELL;
- } else if (!memcmp(p + 2, RALINK_OUI, 3)) {
- if (!ralink_vendor_flag)
- ralink_vendor_flag = 1;
- else {
- DBG_8723A("link to Ralink AP\n");
- return HT_IOT_PEER_RALINK;
- }
- } else if (!memcmp(p + 2, CISCO_OUI, 3)) {
- DBG_8723A("link to Cisco AP\n");
- return HT_IOT_PEER_CISCO;
- } else if (!memcmp(p + 2, REALTEK_OUI, 3)) {
- DBG_8723A("link to Realtek 96B\n");
- return HT_IOT_PEER_REALTEK;
- } else if (!memcmp(p + 2, AIRGOCAP_OUI, 3)) {
- DBG_8723A("link to Airgo Cap\n");
- return HT_IOT_PEER_AIRGO;
- } else if (!memcmp(p + 2, EPIGRAM_OUI, 3)) {
- epigram_vendor_flag = 1;
- if (ralink_vendor_flag) {
- DBG_8723A("link to Tenda W311R AP\n");
- return HT_IOT_PEER_TENDA;
- } else
- DBG_8723A("Capture EPIGRAM_OUI\n");
- } else
- break;
- default:
- break;
- }
-
- i += (p[1] + 2);
- }
-
- if (ralink_vendor_flag && !epigram_vendor_flag) {
- DBG_8723A("link to Ralink AP\n");
- return HT_IOT_PEER_RALINK;
- } else if (ralink_vendor_flag && epigram_vendor_flag) {
- DBG_8723A("link to Tenda W311R AP\n");
- return HT_IOT_PEER_TENDA;
- } else {
- DBG_8723A("link to new AP\n");
- return HT_IOT_PEER_UNKNOWN;
- }
-}
-
-void update_IOT_info23a(struct rtw_adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- switch (pmlmeinfo->assoc_AP_vendor) {
- case HT_IOT_PEER_MARVELL:
- pmlmeinfo->turboMode_cts2self = 1;
- pmlmeinfo->turboMode_rtsen = 0;
- break;
- case HT_IOT_PEER_RALINK:
- pmlmeinfo->turboMode_cts2self = 0;
- pmlmeinfo->turboMode_rtsen = 1;
- /* disable high power */
- rtl8723a_odm_support_ability_clr(padapter, (u32)
- ~DYNAMIC_BB_DYNAMIC_TXPWR);
- break;
- case HT_IOT_PEER_REALTEK:
- /* rtw_write16(padapter, 0x4cc, 0xffff); */
- /* rtw_write16(padapter, 0x546, 0x01c0); */
- /* disable high power */
- rtl8723a_odm_support_ability_clr(padapter, (u32)
- ~DYNAMIC_BB_DYNAMIC_TXPWR);
- break;
- default:
- pmlmeinfo->turboMode_cts2self = 0;
- pmlmeinfo->turboMode_rtsen = 1;
- break;
- }
-}
-
-void update_capinfo23a(struct rtw_adapter *Adapter, u16 updateCap)
-{
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (updateCap & cShortPreamble) {
- /* Short Preamble */
- if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) {
- /* PREAMBLE_LONG or PREAMBLE_AUTO */
- pmlmeinfo->preamble_mode = PREAMBLE_SHORT;
- rtl8723a_ack_preamble(Adapter, true);
- }
- } else { /* Long Preamble */
- if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) {
- /* PREAMBLE_SHORT or PREAMBLE_AUTO */
- pmlmeinfo->preamble_mode = PREAMBLE_LONG;
- rtl8723a_ack_preamble(Adapter, false);
- }
- }
- if (updateCap & cIBSS) {
- /* Filen: See 802.11-2007 p.91 */
- pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
- } else {
- /* Filen: See 802.11-2007 p.90 */
- if (pmlmeext->cur_wireless_mode &
- (WIRELESS_11G | WIRELESS_11_24N)) {
- if (updateCap & cShortSlotTime) { /* Short Slot Time */
- if (pmlmeinfo->slotTime != SHORT_SLOT_TIME)
- pmlmeinfo->slotTime = SHORT_SLOT_TIME;
- } else { /* Long Slot Time */
- if (pmlmeinfo->slotTime != NON_SHORT_SLOT_TIME)
- pmlmeinfo->slotTime =
- NON_SHORT_SLOT_TIME;
- }
- } else if (pmlmeext->cur_wireless_mode &
- (WIRELESS_11A | WIRELESS_11_5N)) {
- pmlmeinfo->slotTime = SHORT_SLOT_TIME;
- } else {
- /* B Mode */
- pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
- }
- }
- rtl8723a_set_slot_time(Adapter, pmlmeinfo->slotTime);
-}
-
-void update_wireless_mode23a(struct rtw_adapter *padapter)
-{
- int ratelen, network_type = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- unsigned char *rate = cur_network->SupportedRates;
-
- ratelen = rtw_get_rateset_len23a(cur_network->SupportedRates);
-
- if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable))
- pmlmeinfo->HT_enable = 1;
-
- if (pmlmeext->cur_channel > 14) {
- if (pmlmeinfo->HT_enable)
- network_type = WIRELESS_11_5N;
- network_type |= WIRELESS_11A;
- } else {
- if (pmlmeinfo->HT_enable)
- network_type = WIRELESS_11_24N;
-
- if (cckratesonly_included23a(rate, ratelen) == true)
- network_type |= WIRELESS_11B;
- else if (cckrates_included23a(rate, ratelen) == true)
- network_type |= WIRELESS_11BG;
- else
- network_type |= WIRELESS_11G;
- }
-
- pmlmeext->cur_wireless_mode =
- network_type & padapter->registrypriv.wireless_mode;
-
- /* 0x0808 -> for CCK, 0x0a0a -> for OFDM */
- /* change this value if having IOT issues. */
- rtl8723a_set_resp_sifs(padapter, 0x08, 0x08, 0x0a, 0x0a);
-
- if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
- update_mgnt_tx_rate23a(padapter, IEEE80211_CCK_RATE_1MB);
- else
- update_mgnt_tx_rate23a(padapter, IEEE80211_OFDM_RATE_6MB);
-}
-
-void update_bmc_sta_support_rate23a(struct rtw_adapter *padapter, u32 mac_id)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pmlmeext->cur_wireless_mode & WIRELESS_11B) {
- /* Only B, B/G, and B/G/N AP could use CCK rate */
- memcpy((pmlmeinfo->FW_sta_info[mac_id].SupportedRates),
- rtw_basic_rate_cck, 4);
- } else {
- memcpy(pmlmeinfo->FW_sta_info[mac_id].SupportedRates,
- rtw_basic_rate_ofdm, 3);
- }
-}
-
-int update_sta_support_rate23a(struct rtw_adapter *padapter, u8 *pvar_ie,
- uint var_ie_len, int cam_idx)
-{
- int supportRateNum = 0;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- const u8 *p;
-
- p = cfg80211_find_ie(WLAN_EID_SUPP_RATES, pvar_ie, var_ie_len);
- if (!p)
- return _FAIL;
-
- memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, p + 2, p[1]);
- supportRateNum = p[1];
-
- p = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, pvar_ie, var_ie_len);
- if (p)
- memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates +
- supportRateNum, p + 2, p[1]);
- return _SUCCESS;
-}
-
-void process_addba_req23a(struct rtw_adapter *padapter,
- u8 *paddba_req, u8 *addr)
-{
- struct sta_info *psta;
- u16 tid, start_seq, param;
- struct recv_reorder_ctrl *preorder_ctrl;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct ADDBA_request *preq = (struct ADDBA_request *)paddba_req;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- psta = rtw_get_stainfo23a(pstapriv, addr);
-
- if (psta) {
- start_seq = le16_to_cpu(preq->BA_starting_seqctrl) >> 4;
-
- param = le16_to_cpu(preq->BA_para_set);
- tid = (param >> 2) & 0x0f;
-
- preorder_ctrl = &psta->recvreorder_ctrl[tid];
-
- preorder_ctrl->indicate_seq = 0xffff;
-
- preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq == true) ?
- true : false;
- }
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_xmit.c b/drivers/staging/rtl8723au/core/rtw_xmit.c
deleted file mode 100644
index 3de40cfa5f3b..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_xmit.c
+++ /dev/null
@@ -1,2341 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTW_XMIT_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <wifi.h>
-#include <osdep_intf.h>
-#include <linux/ip.h>
-#include <usb_ops.h>
-#include <rtl8723a_xmit.h>
-
-static void _init_txservq(struct tx_servq *ptxservq)
-{
-
- INIT_LIST_HEAD(&ptxservq->tx_pending);
- _rtw_init_queue23a(&ptxservq->sta_pending);
- ptxservq->qcnt = 0;
-
-}
-
-void _rtw_init_sta_xmit_priv23a(struct sta_xmit_priv *psta_xmitpriv)
-{
-
- spin_lock_init(&psta_xmitpriv->lock);
-
- /* for (i = 0 ; i < MAX_NUMBLKS; i++) */
- /* _init_txservq(&psta_xmitpriv->blk_q[i]); */
-
- _init_txservq(&psta_xmitpriv->be_q);
- _init_txservq(&psta_xmitpriv->bk_q);
- _init_txservq(&psta_xmitpriv->vi_q);
- _init_txservq(&psta_xmitpriv->vo_q);
- INIT_LIST_HEAD(&psta_xmitpriv->legacy_dz);
- INIT_LIST_HEAD(&psta_xmitpriv->apsd);
-
-}
-
-int _rtw_init_xmit_priv23a(struct xmit_priv *pxmitpriv,
- struct rtw_adapter *padapter)
-{
- int i;
- struct xmit_buf *pxmitbuf;
- struct xmit_frame *pxframe;
- int res = _SUCCESS;
- u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
- u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
-
- spin_lock_init(&pxmitpriv->lock);
- spin_lock_init(&pxmitpriv->lock_sctx);
- sema_init(&pxmitpriv->xmit_sema, 0);
- sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
-
- pxmitpriv->adapter = padapter;
-
- _rtw_init_queue23a(&pxmitpriv->be_pending);
- _rtw_init_queue23a(&pxmitpriv->bk_pending);
- _rtw_init_queue23a(&pxmitpriv->vi_pending);
- _rtw_init_queue23a(&pxmitpriv->vo_pending);
- _rtw_init_queue23a(&pxmitpriv->bm_pending);
-
- _rtw_init_queue23a(&pxmitpriv->free_xmit_queue);
-
- for (i = 0; i < NR_XMITFRAME; i++) {
- pxframe = kzalloc(sizeof(struct xmit_frame), GFP_KERNEL);
- if (!pxframe)
- break;
- INIT_LIST_HEAD(&pxframe->list);
-
- pxframe->padapter = padapter;
- pxframe->frame_tag = NULL_FRAMETAG;
-
- list_add_tail(&pxframe->list,
- &pxmitpriv->free_xmit_queue.queue);
- }
-
- pxmitpriv->free_xmitframe_cnt = i;
-
- pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
-
- /* init xmit_buf */
- _rtw_init_queue23a(&pxmitpriv->free_xmitbuf_queue);
- INIT_LIST_HEAD(&pxmitpriv->xmitbuf_list);
- _rtw_init_queue23a(&pxmitpriv->pending_xmitbuf_queue);
-
- for (i = 0; i < NR_XMITBUFF; i++) {
- pxmitbuf = kzalloc(sizeof(struct xmit_buf), GFP_KERNEL);
- if (!pxmitbuf)
- goto fail;
- INIT_LIST_HEAD(&pxmitbuf->list);
- INIT_LIST_HEAD(&pxmitbuf->list2);
-
- pxmitbuf->padapter = padapter;
-
- /* Tx buf allocation may fail sometimes, so sleep and retry. */
- res = rtw_os_xmit_resource_alloc23a(padapter, pxmitbuf,
- (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
- if (res == _FAIL) {
- goto fail;
- }
-
- list_add_tail(&pxmitbuf->list,
- &pxmitpriv->free_xmitbuf_queue.queue);
- list_add_tail(&pxmitbuf->list2,
- &pxmitpriv->xmitbuf_list);
- }
-
- pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
-
- /* init xframe_ext queue, the same count as extbuf */
- _rtw_init_queue23a(&pxmitpriv->free_xframe_ext_queue);
-
- for (i = 0; i < num_xmit_extbuf; i++) {
- pxframe = kzalloc(sizeof(struct xmit_frame), GFP_KERNEL);
- if (!pxframe)
- break;
- INIT_LIST_HEAD(&pxframe->list);
-
- pxframe->padapter = padapter;
- pxframe->frame_tag = NULL_FRAMETAG;
-
- pxframe->pkt = NULL;
-
- pxframe->buf_addr = NULL;
- pxframe->pxmitbuf = NULL;
-
- pxframe->ext_tag = 1;
-
- list_add_tail(&pxframe->list,
- &pxmitpriv->free_xframe_ext_queue.queue);
- }
- pxmitpriv->free_xframe_ext_cnt = i;
-
- /* Init xmit extension buff */
- _rtw_init_queue23a(&pxmitpriv->free_xmit_extbuf_queue);
- INIT_LIST_HEAD(&pxmitpriv->xmitextbuf_list);
-
- for (i = 0; i < num_xmit_extbuf; i++) {
- pxmitbuf = kzalloc(sizeof(struct xmit_buf), GFP_KERNEL);
- if (!pxmitbuf)
- goto fail;
- INIT_LIST_HEAD(&pxmitbuf->list);
- INIT_LIST_HEAD(&pxmitbuf->list2);
-
- pxmitbuf->padapter = padapter;
-
- /* Tx buf allocation may fail sometimes, so sleep and retry. */
- res = rtw_os_xmit_resource_alloc23a(padapter, pxmitbuf,
- max_xmit_extbuf_size + XMITBUF_ALIGN_SZ);
- if (res == _FAIL) {
- goto exit;
- }
-
- list_add_tail(&pxmitbuf->list,
- &pxmitpriv->free_xmit_extbuf_queue.queue);
- list_add_tail(&pxmitbuf->list2,
- &pxmitpriv->xmitextbuf_list);
- }
-
- pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
-
- rtw_alloc_hwxmits23a(padapter);
- rtw_init_hwxmits23a(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
-
- for (i = 0; i < 4; i ++)
- pxmitpriv->wmm_para_seq[i] = i;
-
- sema_init(&pxmitpriv->tx_retevt, 0);
-
- pxmitpriv->ack_tx = false;
- mutex_init(&pxmitpriv->ack_tx_mutex);
- rtw_sctx_init23a(&pxmitpriv->ack_tx_ops, 0);
- tasklet_init(&padapter->xmitpriv.xmit_tasklet,
- (void(*)(unsigned long))rtl8723au_xmit_tasklet,
- (unsigned long)padapter);
-
-exit:
-
- return res;
-fail:
- goto exit;
-}
-
-void _rtw_free_xmit_priv23a(struct xmit_priv *pxmitpriv)
-{
- struct rtw_adapter *padapter = pxmitpriv->adapter;
- struct xmit_frame *pxframe, *ptmp;
- struct xmit_buf *pxmitbuf, *ptmp2;
-
- list_for_each_entry_safe(pxframe, ptmp,
- &pxmitpriv->free_xmit_queue.queue, list) {
- list_del_init(&pxframe->list);
- rtw_os_xmit_complete23a(padapter, pxframe);
- kfree(pxframe);
- }
-
- list_for_each_entry_safe(pxmitbuf, ptmp2,
- &pxmitpriv->xmitbuf_list, list2) {
- list_del_init(&pxmitbuf->list2);
- rtw_os_xmit_resource_free23a(padapter, pxmitbuf);
- kfree(pxmitbuf);
- }
-
- /* free xframe_ext queue, the same count as extbuf */
- list_for_each_entry_safe(pxframe, ptmp,
- &pxmitpriv->free_xframe_ext_queue.queue,
- list) {
- list_del_init(&pxframe->list);
- rtw_os_xmit_complete23a(padapter, pxframe);
- kfree(pxframe);
- }
-
- /* free xmit extension buff */
- list_for_each_entry_safe(pxmitbuf, ptmp2,
- &pxmitpriv->xmitextbuf_list, list2) {
- list_del_init(&pxmitbuf->list2);
- rtw_os_xmit_resource_free23a(padapter, pxmitbuf);
- kfree(pxmitbuf);
- }
-
- rtw_free_hwxmits23a(padapter);
- mutex_destroy(&pxmitpriv->ack_tx_mutex);
-}
-
-static void update_attrib_vcs_info(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
-{
- u32 sz;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct sta_info *psta = pattrib->psta;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- if (pattrib->psta) {
- psta = pattrib->psta;
- } else {
- DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
- psta = rtw_get_stainfo23a(&padapter->stapriv, &pattrib->ra[0]);
- }
-
- if (psta == NULL) {
- DBG_8723A("%s, psta == NUL\n", __func__);
- return;
- }
-
- if (!(psta->state &_FW_LINKED)) {
- DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
- return;
- }
-
- if (pattrib->nr_frags != 1)
- sz = padapter->xmitpriv.frag_len;
- else /* no frag */
- sz = pattrib->last_txcmdsz;
-
- /* (1) RTS_Threshold is compared to the MPDU, not MSDU. */
- /* (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. */
- /* Other fragments are protected by previous fragment. */
- /* So we only need to check the length of first fragment. */
- if (pmlmeext->cur_wireless_mode < WIRELESS_11_24N || padapter->registrypriv.wifi_spec) {
- if (sz > padapter->registrypriv.rts_thresh) {
- pattrib->vcs_mode = RTS_CTS;
- } else {
- if (psta->rtsen)
- pattrib->vcs_mode = RTS_CTS;
- else if (psta->cts2self)
- pattrib->vcs_mode = CTS_TO_SELF;
- else
- pattrib->vcs_mode = NONE_VCS;
- }
- } else {
- while (true) {
- /* IOT action */
- if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS &&
- pattrib->ampdu_en &&
- padapter->securitypriv.dot11PrivacyAlgrthm ==
- WLAN_CIPHER_SUITE_CCMP) {
- pattrib->vcs_mode = CTS_TO_SELF;
- break;
- }
-
- /* check ERP protection */
- if (psta->rtsen || psta->cts2self) {
- if (psta->rtsen)
- pattrib->vcs_mode = RTS_CTS;
- else if (psta->cts2self)
- pattrib->vcs_mode = CTS_TO_SELF;
-
- break;
- }
-
- /* check HT op mode */
- if (pattrib->ht_en) {
- u8 HTOpMode = pmlmeinfo->HT_protection;
-
- if ((pmlmeext->cur_bwmode && (HTOpMode == 2 || HTOpMode == 3)) ||
- (!pmlmeext->cur_bwmode && HTOpMode == 3)) {
- pattrib->vcs_mode = RTS_CTS;
- break;
- }
- }
-
- /* check rts */
- if (sz > padapter->registrypriv.rts_thresh) {
- pattrib->vcs_mode = RTS_CTS;
- break;
- }
-
- /* to do list: check MIMO power save condition. */
-
- /* check AMPDU aggregation for TXOP */
- if (pattrib->ampdu_en) {
- pattrib->vcs_mode = RTS_CTS;
- break;
- }
-
- pattrib->vcs_mode = NONE_VCS;
- break;
- }
- }
-}
-
-static void update_attrib_phy_info(struct pkt_attrib *pattrib, struct sta_info *psta)
-{
- /*if (psta->rtsen)
- pattrib->vcs_mode = RTS_CTS;
- else if (psta->cts2self)
- pattrib->vcs_mode = CTS_TO_SELF;
- else
- pattrib->vcs_mode = NONE_VCS;*/
-
- pattrib->mdata = 0;
- pattrib->eosp = 0;
- pattrib->triggered = 0;
-
- /* qos_en, ht_en, init rate, , bw, ch_offset, sgi */
- pattrib->qos_en = psta->qos_option;
-
- pattrib->raid = psta->raid;
- pattrib->ht_en = psta->htpriv.ht_option;
- pattrib->bwmode = psta->htpriv.bwmode;
- pattrib->ch_offset = psta->htpriv.ch_offset;
- pattrib->sgi = psta->htpriv.sgi;
- pattrib->ampdu_en = false;
-
- pattrib->retry_ctrl = false;
-}
-
-u8 qos_acm23a(u8 acm_mask, u8 priority)
-{
- u8 change_priority = priority;
-
- switch (priority) {
- case 0:
- case 3:
- if (acm_mask & BIT(1))
- change_priority = 1;
- break;
- case 1:
- case 2:
- break;
- case 4:
- case 5:
- if (acm_mask & BIT(2))
- change_priority = 0;
- break;
- case 6:
- case 7:
- if (acm_mask & BIT(3))
- change_priority = 5;
- break;
- default:
- DBG_8723A("qos_acm23a(): invalid pattrib->priority: %d!!!\n",
- priority);
- change_priority = 0;
- break;
- }
-
- return change_priority;
-}
-
-static void set_qos(struct sk_buff *skb, struct pkt_attrib *pattrib)
-{
- u8 *pframe = skb->data;
- struct iphdr *ip_hdr;
- u8 UserPriority = 0;
-
- /* get UserPriority from IP hdr */
- if (pattrib->ether_type == ETH_P_IP) {
- ip_hdr = (struct iphdr *)(pframe + ETH_HLEN);
- UserPriority = ip_hdr->tos >> 5;
- } else if (pattrib->ether_type == ETH_P_PAE) {
- /* "When priority processing of data frames is supported, */
- /* a STA's SME should send EAPOL-Key frames at the highest
- priority." */
- UserPriority = 7;
- }
-
- pattrib->priority = UserPriority;
- pattrib->hdrlen = sizeof(struct ieee80211_qos_hdr);
- pattrib->type = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA;
-}
-
-static int update_attrib(struct rtw_adapter *padapter,
- struct sk_buff *skb, struct pkt_attrib *pattrib)
-{
- struct sta_info *psta = NULL;
- int bmcast;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int res = _SUCCESS;
- struct ethhdr *ehdr = (struct ethhdr *) skb->data;
-
- pattrib->ether_type = ntohs(ehdr->h_proto);
-
- ether_addr_copy(pattrib->dst, ehdr->h_dest);
- ether_addr_copy(pattrib->src, ehdr->h_source);
-
- pattrib->pctrl = 0;
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- ether_addr_copy(pattrib->ra, pattrib->dst);
- ether_addr_copy(pattrib->ta, pattrib->src);
- } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- ether_addr_copy(pattrib->ra, get_bssid(pmlmepriv));
- ether_addr_copy(pattrib->ta, pattrib->src);
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- ether_addr_copy(pattrib->ra, pattrib->dst);
- ether_addr_copy(pattrib->ta, get_bssid(pmlmepriv));
- }
-
- pattrib->pktlen = skb->len - ETH_HLEN;
-
- if (pattrib->ether_type == ETH_P_IP) {
- /* The following is for DHCP and ARP packet, we use cck1M
- to tx these packets and let LPS awake some time */
- /* to prevent DHCP protocol fail */
- pattrib->dhcp_pkt = 0;
- /* MINIMUM_DHCP_PACKET_SIZE) { */
- if (pattrib->pktlen > 282 + 24) {
- if (pattrib->ether_type == ETH_P_IP) {/* IP header */
- u8 *pframe = skb->data;
-
- pframe += ETH_HLEN;
-
- if ((pframe[21] == 68 && pframe[23] == 67) ||
- (pframe[21] == 67 && pframe[23] == 68)) {
- /* 68 : UDP BOOTP client */
- /* 67 : UDP BOOTP server */
- RT_TRACE(_module_rtl871x_xmit_c_,
- _drv_err_,
- "======================update_attrib: get DHCP Packet\n");
- pattrib->dhcp_pkt = 1;
- }
- }
- }
- } else if (pattrib->ether_type == ETH_P_PAE) {
- DBG_8723A_LEVEL(_drv_always_, "send eapol packet\n");
- }
-
- if ((pattrib->ether_type == ETH_P_PAE) || (pattrib->dhcp_pkt == 1)) {
- rtw_set_scan_deny(padapter, 3000);
- }
-
- /* If EAPOL , ARP , OR DHCP packet, driver must be in active mode. */
- if ((pattrib->ether_type == ETH_P_ARP) ||
- (pattrib->ether_type == ETH_P_PAE) || (pattrib->dhcp_pkt == 1)) {
- rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_SPECIAL_PACKET, 1);
- }
-
- bmcast = is_multicast_ether_addr(pattrib->ra);
-
- /* get sta_info */
- if (bmcast) {
- psta = rtw_get_bcmc_stainfo23a(padapter);
- } else {
- psta = rtw_get_stainfo23a(pstapriv, pattrib->ra);
- if (psta == NULL) { /* if we cannot get psta => drrp the pkt */
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_,
- "update_attrib => get sta_info fail, ra:%pM\n",
- pattrib->ra);
- res = _FAIL;
- goto exit;
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) &&
- (!(psta->state & _FW_LINKED))) {
- res = _FAIL;
- goto exit;
- }
- }
-
- if (psta) {
- pattrib->mac_id = psta->mac_id;
- /* DBG_8723A("%s ==> mac_id(%d)\n", __func__, pattrib->mac_id); */
- pattrib->psta = psta;
- } else {
- /* if we cannot get psta => drop the pkt */
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_,
- "update_attrib => get sta_info fail, ra:%pM\n",
- pattrib->ra);
- res = _FAIL;
- goto exit;
- }
-
- pattrib->ack_policy = 0;
- /* get ether_hdr_len */
-
- /* pattrib->ether_type == 0x8100) ? (14 + 4): 14; vlan tag */
- pattrib->pkt_hdrlen = ETH_HLEN;
-
- pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
- pattrib->type = IEEE80211_FTYPE_DATA;
- pattrib->priority = 0;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE | WIFI_ADHOC_STATE |
- WIFI_ADHOC_MASTER_STATE)) {
- if (psta->qos_option)
- set_qos(skb, pattrib);
- } else {
- if (pmlmepriv->qos_option) {
- set_qos(skb, pattrib);
-
- if (pmlmepriv->acm_mask != 0) {
- pattrib->priority = qos_acm23a(pmlmepriv->acm_mask,
- pattrib->priority);
- }
- }
- }
-
- if (psta->ieee8021x_blocked == true) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "psta->ieee8021x_blocked == true\n");
-
- pattrib->encrypt = 0;
-
- if ((pattrib->ether_type != ETH_P_PAE) &&
- !check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "psta->ieee8021x_blocked == true, pattrib->ether_type(%.4x) != 0x888e\n",
- pattrib->ether_type);
- res = _FAIL;
- goto exit;
- }
- } else {
- GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast);
-
- switch (psecuritypriv->dot11AuthAlgrthm) {
- case dot11AuthAlgrthm_Open:
- case dot11AuthAlgrthm_Shared:
- case dot11AuthAlgrthm_Auto:
- pattrib->key_idx =
- (u8)psecuritypriv->dot11PrivacyKeyIndex;
- break;
- case dot11AuthAlgrthm_8021X:
- if (bmcast)
- pattrib->key_idx =
- (u8)psecuritypriv->dot118021XGrpKeyid;
- else
- pattrib->key_idx = 0;
- break;
- default:
- pattrib->key_idx = 0;
- break;
- }
-
- }
-
- switch (pattrib->encrypt) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- pattrib->iv_len = IEEE80211_WEP_IV_LEN;
- pattrib->icv_len = IEEE80211_WEP_ICV_LEN;
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- pattrib->iv_len = IEEE80211_TKIP_IV_LEN;
- pattrib->icv_len = IEEE80211_TKIP_ICV_LEN;
-
- if (!padapter->securitypriv.busetkipkey) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "padapter->securitypriv.busetkipkey(%d) == false drop packet\n",
- padapter->securitypriv.busetkipkey);
- res = _FAIL;
- goto exit;
- }
-
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "pattrib->encrypt =%d (WLAN_CIPHER_SUITE_CCMP)\n",
- pattrib->encrypt);
- pattrib->iv_len = IEEE80211_CCMP_HDR_LEN;
- pattrib->icv_len = IEEE80211_CCMP_MIC_LEN;
- break;
-
- default:
- pattrib->iv_len = 0;
- pattrib->icv_len = 0;
- break;
- }
-
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "update_attrib: encrypt =%d\n", pattrib->encrypt);
-
- if (pattrib->encrypt && !psecuritypriv->hw_decrypted) {
- pattrib->bswenc = true;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "update_attrib: encrypt =%d bswenc = true\n",
- pattrib->encrypt);
- } else {
- pattrib->bswenc = false;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "update_attrib: bswenc = false\n");
- }
- update_attrib_phy_info(pattrib, psta);
-
-exit:
-
- return res;
-}
-
-static int xmitframe_addmic(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe) {
- struct mic_data micdata;
- struct sta_info *stainfo;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- int curfragnum, length;
- u8 *pframe, *payload, mic[8];
- u8 priority[4]= {0x0, 0x0, 0x0, 0x0};
- u8 hw_hdr_offset = 0;
- int bmcst = is_multicast_ether_addr(pattrib->ra);
-
- if (pattrib->psta) {
- stainfo = pattrib->psta;
- } else {
- DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
- stainfo = rtw_get_stainfo23a(&padapter->stapriv, &pattrib->ra[0]);
- }
-
- if (!stainfo) {
- DBG_8723A("%s, psta == NUL\n", __func__);
- return _FAIL;
- }
-
- if (!(stainfo->state &_FW_LINKED)) {
- DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n",
- __func__, stainfo->state);
- return _FAIL;
- }
-
- hw_hdr_offset = TXDESC_OFFSET;
-
- if (pattrib->encrypt == WLAN_CIPHER_SUITE_TKIP) {
- /* encode mic code */
- if (stainfo) {
- u8 null_key[16]={0x0, 0x0, 0x0, 0x0,
- 0x0, 0x0, 0x0, 0x0,
- 0x0, 0x0, 0x0, 0x0,
- 0x0, 0x0, 0x0, 0x0};
-
- pframe = pxmitframe->buf_addr + hw_hdr_offset;
-
- if (bmcst) {
- if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16)) {
- return _FAIL;
- }
- /* start to calculate the mic code */
- rtw_secmicsetkey23a(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
- } else {
- if (!memcmp(&stainfo->dot11tkiptxmickey.skey[0],
- null_key, 16)) {
- return _FAIL;
- }
- /* start to calculate the mic code */
- rtw_secmicsetkey23a(&micdata, &stainfo->dot11tkiptxmickey.skey[0]);
- }
-
- if (pframe[1] & 1) { /* ToDS == 1 */
- /* DA */
- rtw_secmicappend23a(&micdata, &pframe[16], 6);
- if (pframe[1] & 2) /* From Ds == 1 */
- rtw_secmicappend23a(&micdata,
- &pframe[24], 6);
- else
- rtw_secmicappend23a(&micdata,
- &pframe[10], 6);
- } else { /* ToDS == 0 */
- /* DA */
- rtw_secmicappend23a(&micdata, &pframe[4], 6);
- if (pframe[1] & 2) /* From Ds == 1 */
- rtw_secmicappend23a(&micdata,
- &pframe[16], 6);
- else
- rtw_secmicappend23a(&micdata,
- &pframe[10], 6);
- }
-
- /* if (pmlmepriv->qos_option == 1) */
- if (pattrib->qos_en)
- priority[0] = (u8)pxmitframe->attrib.priority;
-
- rtw_secmicappend23a(&micdata, &priority[0], 4);
-
- payload = pframe;
-
- for (curfragnum = 0; curfragnum < pattrib->nr_frags;
- curfragnum++) {
- payload = PTR_ALIGN(payload, 4);
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "=== curfragnum =%d, pframe = 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x,!!!\n",
- curfragnum, *payload, *(payload + 1),
- *(payload + 2), *(payload + 3),
- *(payload + 4), *(payload + 5),
- *(payload + 6), *(payload + 7));
-
- payload = payload + pattrib->hdrlen +
- pattrib->iv_len;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "curfragnum =%d pattrib->hdrlen =%d pattrib->iv_len =%d\n",
- curfragnum,
- pattrib->hdrlen, pattrib->iv_len);
- if ((curfragnum + 1) == pattrib->nr_frags) {
- length = pattrib->last_txcmdsz -
- pattrib->hdrlen -
- pattrib->iv_len -
- ((pattrib->bswenc) ?
- pattrib->icv_len : 0);
- rtw_secmicappend23a(&micdata, payload,
- length);
- payload = payload + length;
- } else {
- length = pxmitpriv->frag_len -
- pattrib->hdrlen -
- pattrib->iv_len -
- ((pattrib->bswenc) ?
- pattrib->icv_len : 0);
- rtw_secmicappend23a(&micdata, payload,
- length);
- payload = payload + length +
- pattrib->icv_len;
- RT_TRACE(_module_rtl871x_xmit_c_,
- _drv_err_,
- "curfragnum =%d length =%d pattrib->icv_len =%d\n",
- curfragnum, length,
- pattrib->icv_len);
- }
- }
- rtw_secgetmic23a(&micdata, &mic[0]);
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "xmitframe_addmic: before add mic code!!\n");
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "xmitframe_addmic: pattrib->last_txcmdsz =%d!!!\n",
- pattrib->last_txcmdsz);
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "xmitframe_addmic: mic[0]= 0x%.2x , mic[1]=0x%.2x , mic[2]= 0x%.2x , mic[3]= 0x%.2x\nmic[4]= 0x%.2x , mic[5]= 0x%.2x , mic[6]= 0x%.2x , mic[7]= 0x%.2x !!!!\n",
- mic[0], mic[1], mic[2], mic[3],
- mic[4], mic[5], mic[6], mic[7]);
- /* add mic code and add the mic code length
- in last_txcmdsz */
-
- memcpy(payload, &mic[0], 8);
- pattrib->last_txcmdsz += 8;
-
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "======== last pkt ========\n");
- payload = payload - pattrib->last_txcmdsz + 8;
- for (curfragnum = 0; curfragnum < pattrib->last_txcmdsz;
- curfragnum = curfragnum + 8) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "%.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x\n",
- *(payload + curfragnum),
- *(payload + curfragnum + 1),
- *(payload + curfragnum + 2),
- *(payload + curfragnum + 3),
- *(payload + curfragnum + 4),
- *(payload + curfragnum + 5),
- *(payload + curfragnum + 6),
- *(payload + curfragnum + 7));
- }
- } else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "xmitframe_addmic: rtw_get_stainfo23a ==NULL!!!\n");
- }
- }
-
- return _SUCCESS;
-}
-
-static int xmitframe_swencrypt(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe)
-{
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
-
- /* if ((psecuritypriv->sw_encrypt)||(pattrib->bswenc)) */
- if (pattrib->bswenc) {
- /* DBG_8723A("start xmitframe_swencrypt\n"); */
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_,
- "### xmitframe_swencrypt\n");
- switch (pattrib->encrypt) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- rtw_wep_encrypt23a(padapter, pxmitframe);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- rtw_tkip_encrypt23a(padapter, pxmitframe);
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- rtw_aes_encrypt23a(padapter, pxmitframe);
- break;
- default:
- break;
- }
-
- } else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_,
- "### xmitframe_hwencrypt\n");
- }
-
- return _SUCCESS;
-}
-
-static int rtw_make_wlanhdr(struct rtw_adapter *padapter, u8 *hdr,
- struct pkt_attrib *pattrib)
-{
- struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
- struct ieee80211_qos_hdr *qoshdr;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 qos_option = false;
- int res = _SUCCESS;
-
- struct sta_info *psta;
-
- int bmcst = is_multicast_ether_addr(pattrib->ra);
-
- if (pattrib->psta) {
- psta = pattrib->psta;
- } else {
- DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
- if (bmcst) {
- psta = rtw_get_bcmc_stainfo23a(padapter);
- } else {
- psta = rtw_get_stainfo23a(&padapter->stapriv, pattrib->ra);
- }
- }
-
- if (psta == NULL) {
- DBG_8723A("%s, psta == NUL\n", __func__);
- return _FAIL;
- }
-
- if (!(psta->state &_FW_LINKED)) {
- DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
- return _FAIL;
- }
-
- memset(hdr, 0, WLANHDR_OFFSET);
-
- pwlanhdr->frame_control = cpu_to_le16(pattrib->type);
-
- if (pattrib->type & IEEE80211_FTYPE_DATA) {
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- /* to_ds = 1, fr_ds = 0; */
- /* Data transfer to AP */
- pwlanhdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_TODS);
- ether_addr_copy(pwlanhdr->addr1, get_bssid(pmlmepriv));
- ether_addr_copy(pwlanhdr->addr2, pattrib->src);
- ether_addr_copy(pwlanhdr->addr3, pattrib->dst);
-
- if (pmlmepriv->qos_option)
- qos_option = true;
-
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- /* to_ds = 0, fr_ds = 1; */
- pwlanhdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_FROMDS);
- ether_addr_copy(pwlanhdr->addr1, pattrib->dst);
- ether_addr_copy(pwlanhdr->addr2, get_bssid(pmlmepriv));
- ether_addr_copy(pwlanhdr->addr3, pattrib->src);
-
- if (psta->qos_option)
- qos_option = true;
- } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- ether_addr_copy(pwlanhdr->addr1, pattrib->dst);
- ether_addr_copy(pwlanhdr->addr2, pattrib->src);
- ether_addr_copy(pwlanhdr->addr3, get_bssid(pmlmepriv));
-
- if (psta->qos_option)
- qos_option = true;
- }
- else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "fw_state:%x is not allowed to xmit frame\n",
- get_fwstate(pmlmepriv));
- res = _FAIL;
- goto exit;
- }
- if (pattrib->mdata)
- pwlanhdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- if (pattrib->encrypt)
- pwlanhdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_PROTECTED);
- if (qos_option) {
- qoshdr = (struct ieee80211_qos_hdr *)hdr;
-
- qoshdr->qos_ctrl = cpu_to_le16(
- pattrib->priority & IEEE80211_QOS_CTL_TID_MASK);
-
- qoshdr->qos_ctrl |= cpu_to_le16(
- (pattrib->ack_policy << 5) &
- IEEE80211_QOS_CTL_ACK_POLICY_MASK);
-
- if (pattrib->eosp)
- qoshdr->qos_ctrl |=
- cpu_to_le16(IEEE80211_QOS_CTL_EOSP);
- }
- /* TODO: fill HT Control Field */
-
- /* Update Seq Num will be handled by f/w */
- if (psta) {
- psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
- psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
- pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
- /* We dont need to worry about frag bits here */
- pwlanhdr->seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(
- pattrib->seqnum));
- /* check if enable ampdu */
- if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
- if (pattrib->priority >= 16)
- printk(KERN_WARNING "%s: Invalid "
- "pattrib->priority %i\n",
- __func__, pattrib->priority);
- if (psta->htpriv.agg_enable_bitmap &
- BIT(pattrib->priority))
- pattrib->ampdu_en = true;
- }
- /* re-check if enable ampdu by BA_starting_seqctrl */
- if (pattrib->ampdu_en) {
- u16 tx_seq;
-
- tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f];
-
- /* check BA_starting_seqctrl */
- if (SN_LESS(pattrib->seqnum, tx_seq)) {
- /* DBG_8723A("tx ampdu seqnum(%d) < tx_seq(%d)\n", pattrib->seqnum, tx_seq); */
- pattrib->ampdu_en = false;/* AGG BK */
- } else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
- psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq+1)&0xfff;
- pattrib->ampdu_en = true;/* AGG EN */
- } else {
- /* DBG_8723A("tx ampdu over run\n"); */
- psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum+1)&0xfff;
- pattrib->ampdu_en = true;/* AGG EN */
- }
- }
- }
- }
-exit:
- return res;
-}
-
-s32 rtw_txframes_pending23a(struct rtw_adapter *padapter)
-{
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- return (!list_empty(&pxmitpriv->be_pending.queue)) ||
- (!list_empty(&pxmitpriv->bk_pending.queue)) ||
- (!list_empty(&pxmitpriv->vi_pending.queue)) ||
- (!list_empty(&pxmitpriv->vo_pending.queue));
-}
-
-s32 rtw_txframes_sta_ac_pending23a(struct rtw_adapter *padapter,
- struct pkt_attrib *pattrib)
-{
- struct sta_info *psta;
- struct tx_servq *ptxservq;
- int priority = pattrib->priority;
-
- if (pattrib->psta) {
- psta = pattrib->psta;
- } else {
- DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
- psta = rtw_get_stainfo23a(&padapter->stapriv, &pattrib->ra[0]);
- }
- if (psta == NULL) {
- DBG_8723A("%s, psta == NUL\n", __func__);
- return 0;
- }
- if (!(psta->state &_FW_LINKED)) {
- DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__,
- psta->state);
- return 0;
- }
- switch (priority) {
- case 1:
- case 2:
- ptxservq = &psta->sta_xmitpriv.bk_q;
- break;
- case 4:
- case 5:
- ptxservq = &psta->sta_xmitpriv.vi_q;
- break;
- case 6:
- case 7:
- ptxservq = &psta->sta_xmitpriv.vo_q;
- break;
- case 0:
- case 3:
- default:
- ptxservq = &psta->sta_xmitpriv.be_q;
- break;
- }
- return ptxservq->qcnt;
-}
-
-/* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
- * IEEE LLC/SNAP header contains 8 octets
- * First 3 octets comprise the LLC portion
- * SNAP portion, 5 octets, is divided into two fields:
- * Organizationally Unique Identifier(OUI), 3 octets,
- * type, defined by that organization, 2 octets.
- */
-static int rtw_put_snap(u8 *data, u16 h_proto)
-{
- if (h_proto == ETH_P_IPX || h_proto == ETH_P_AARP)
- ether_addr_copy(data, bridge_tunnel_header);
- else
- ether_addr_copy(data, rfc1042_header);
-
- data += ETH_ALEN;
- put_unaligned_be16(h_proto, data);
- return ETH_ALEN + sizeof(u16);
-}
-
-/*
-
-This sub-routine will perform all the following:
-
-1. remove 802.3 header.
-2. create wlan_header, based on the info in pxmitframe
-3. append sta's iv/ext-iv
-4. append LLC
-5. move frag chunk from pframe to pxmitframe->mem
-6. apply sw-encrypt, if necessary.
-
-*/
-int rtw_xmitframe_coalesce23a(struct rtw_adapter *padapter, struct sk_buff *skb,
- struct xmit_frame *pxmitframe)
-{
- struct sta_info *psta;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct ieee80211_hdr *hdr;
- s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
- u8 *pframe, *mem_start;
- u8 hw_hdr_offset;
- u8 *pbuf_start;
- u8 *pdata = skb->data;
- int data_len = skb->len;
- s32 bmcst = is_multicast_ether_addr(pattrib->ra);
- int res = _SUCCESS;
-
- if (pattrib->psta)
- psta = pattrib->psta;
- else {
- DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
- psta = rtw_get_stainfo23a(&padapter->stapriv, pattrib->ra);
- }
-
- if (!psta) {
- DBG_8723A("%s, psta == NUL\n", __func__);
- return _FAIL;
- }
-
- if (!(psta->state &_FW_LINKED)) {
- DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n",
- __func__, psta->state);
- return _FAIL;
- }
-
- if (!pxmitframe->buf_addr) {
- DBG_8723A("==> %s buf_addr == NULL\n", __func__);
- return _FAIL;
- }
-
- pbuf_start = pxmitframe->buf_addr;
-
- hw_hdr_offset = TXDESC_OFFSET;
-
- mem_start = pbuf_start + hw_hdr_offset;
-
- if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "%s: rtw_make_wlanhdr fail; drop pkt\n", __func__);
- res = _FAIL;
- goto exit;
- }
-
- pdata += pattrib->pkt_hdrlen;
- data_len -= pattrib->pkt_hdrlen;
-
- frg_inx = 0;
- frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */
-
- while (1) {
- llc_sz = 0;
-
- mpdu_len = frg_len;
-
- pframe = mem_start;
- hdr = (struct ieee80211_hdr *)mem_start;
-
- pframe += pattrib->hdrlen;
- mpdu_len -= pattrib->hdrlen;
-
- /* adding icv, if necessary... */
- if (pattrib->iv_len) {
- if (psta) {
- switch (pattrib->encrypt) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- WEP_IV(pattrib->iv, psta->dot11txpn,
- pattrib->key_idx);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- if (bmcst)
- TKIP_IV(pattrib->iv,
- psta->dot11txpn,
- pattrib->key_idx);
- else
- TKIP_IV(pattrib->iv,
- psta->dot11txpn, 0);
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- if (bmcst)
- AES_IV(pattrib->iv,
- psta->dot11txpn,
- pattrib->key_idx);
- else
- AES_IV(pattrib->iv,
- psta->dot11txpn, 0);
- break;
- }
- }
-
- memcpy(pframe, pattrib->iv, pattrib->iv_len);
-
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_,
- "rtw_xmiaframe_coalesce23a: keyid =%d pattrib->iv[3]=%.2x pframe =%.2x %.2x %.2x %.2x\n",
- padapter->securitypriv.dot11PrivacyKeyIndex,
- pattrib->iv[3], *pframe, *(pframe+1),
- *(pframe+2), *(pframe+3));
- pframe += pattrib->iv_len;
- mpdu_len -= pattrib->iv_len;
- }
- if (frg_inx == 0) {
- llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
- pframe += llc_sz;
- mpdu_len -= llc_sz;
- }
-
- if (pattrib->icv_len > 0 && pattrib->bswenc)
- mpdu_len -= pattrib->icv_len;
-
- if (bmcst)
- /* don't do fragment to broadcast/multicast packets */
- mem_sz = min_t(s32, data_len, pattrib->pktlen);
- else
- mem_sz = min_t(s32, data_len, mpdu_len);
-
- memcpy(pframe, pdata, mem_sz);
-
- pframe += mem_sz;
- pdata += mem_sz;
- data_len -= mem_sz;
-
- if ((pattrib->icv_len >0) && (pattrib->bswenc)) {
- memcpy(pframe, pattrib->icv, pattrib->icv_len);
- pframe += pattrib->icv_len;
- }
-
- frg_inx++;
-
- if (bmcst || data_len <= 0) {
- pattrib->nr_frags = frg_inx;
-
- pattrib->last_txcmdsz = pattrib->hdrlen +
- pattrib->iv_len +
- ((pattrib->nr_frags == 1) ?
- llc_sz : 0) +
- ((pattrib->bswenc) ?
- pattrib->icv_len : 0) + mem_sz;
- hdr->frame_control &=
- ~cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
-
- break;
- } else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "%s: There're still something in packet!\n",
- __func__);
- }
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
-
- mem_start = PTR_ALIGN(pframe, 4) + hw_hdr_offset;
- memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
- }
-
- if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "xmitframe_addmic(padapter, pxmitframe) == _FAIL\n");
- DBG_8723A("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n");
- res = _FAIL;
- goto exit;
- }
-
- xmitframe_swencrypt(padapter, pxmitframe);
-
- if (bmcst == false)
- update_attrib_vcs_info(padapter, pxmitframe);
- else
- pattrib->vcs_mode = NONE_VCS;
-
-exit:
- return res;
-}
-
-void rtw_update_protection23a(struct rtw_adapter *padapter, u8 *ie, uint ie_len)
-{
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- uint protection;
- const u8 *p;
-
- switch (pregistrypriv->vrtl_carrier_sense) {
- case DISABLE_VCS:
- pxmitpriv->vcs = NONE_VCS;
- break;
- case ENABLE_VCS:
- break;
- case AUTO_VCS:
- default:
- p = cfg80211_find_ie(WLAN_EID_ERP_INFO, ie, ie_len);
- if (!p)
- pxmitpriv->vcs = NONE_VCS;
- else {
- protection = (*(p + 2)) & BIT(1);
- if (protection) {
- if (pregistrypriv->vcs_type == RTS_CTS)
- pxmitpriv->vcs = RTS_CTS;
- else
- pxmitpriv->vcs = CTS_TO_SELF;
- } else {
- pxmitpriv->vcs = NONE_VCS;
- }
- }
- break;
- }
-}
-
-void rtw_count_tx_stats23a(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe, int sz)
-{
- struct sta_info *psta = NULL;
- struct stainfo_stats *pstats = NULL;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (pxmitframe->frame_tag == DATA_FRAMETAG) {
- pxmitpriv->tx_bytes += sz;
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod++;
-
- psta = pxmitframe->attrib.psta;
- if (psta) {
- pstats = &psta->sta_stats;
- pstats->tx_pkts++;
- pstats->tx_bytes += sz;
- }
- }
-}
-
-struct xmit_buf *rtw_alloc_xmitbuf23a_ext(struct xmit_priv *pxmitpriv)
-{
- unsigned long irqL;
- struct xmit_buf *pxmitbuf = NULL;
- struct list_head *phead;
- struct rtw_queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
-
- spin_lock_irqsave(&pfree_queue->lock, irqL);
-
- phead = get_list_head(pfree_queue);
-
- if (!list_empty(phead)) {
- pxmitbuf = list_first_entry(phead, struct xmit_buf, list);
-
- list_del_init(&pxmitbuf->list);
-
- pxmitpriv->free_xmit_extbuf_cnt--;
- pxmitbuf->priv_data = NULL;
- pxmitbuf->ext_tag = true;
-
- if (pxmitbuf->sctx) {
- DBG_8723A("%s pxmitbuf->sctx is not NULL\n", __func__);
- rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
- }
- }
-
- spin_unlock_irqrestore(&pfree_queue->lock, irqL);
-
- return pxmitbuf;
-}
-
-int rtw_free_xmitbuf_ext23a(struct xmit_priv *pxmitpriv,
- struct xmit_buf *pxmitbuf)
-{
- unsigned long irqL;
- struct rtw_queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
-
- if (pxmitbuf == NULL)
- return _FAIL;
-
- spin_lock_irqsave(&pfree_queue->lock, irqL);
-
- list_del_init(&pxmitbuf->list);
-
- list_add_tail(&pxmitbuf->list, get_list_head(pfree_queue));
- pxmitpriv->free_xmit_extbuf_cnt++;
-
- spin_unlock_irqrestore(&pfree_queue->lock, irqL);
-
- return _SUCCESS;
-}
-
-struct xmit_buf *rtw_alloc_xmitbuf23a(struct xmit_priv *pxmitpriv)
-{
- unsigned long irqL;
- struct xmit_buf *pxmitbuf = NULL;
- struct list_head *phead;
- struct rtw_queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
-
- /* DBG_8723A("+rtw_alloc_xmitbuf23a\n"); */
-
- spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
-
- phead = get_list_head(pfree_xmitbuf_queue);
-
- if (!list_empty(phead)) {
- pxmitbuf = list_first_entry(phead, struct xmit_buf, list);
-
- list_del_init(&pxmitbuf->list);
-
- pxmitpriv->free_xmitbuf_cnt--;
- pxmitbuf->priv_data = NULL;
- pxmitbuf->ext_tag = false;
- pxmitbuf->flags = XMIT_VO_QUEUE;
-
- if (pxmitbuf->sctx) {
- DBG_8723A("%s pxmitbuf->sctx is not NULL\n", __func__);
- rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
- }
- }
-
- spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL);
-
- return pxmitbuf;
-}
-
-int rtw_free_xmitbuf23a(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
-{
- unsigned long irqL;
- struct rtw_queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
-
- /* DBG_8723A("+rtw_free_xmitbuf23a\n"); */
-
- if (pxmitbuf == NULL)
- return _FAIL;
-
- if (pxmitbuf->sctx) {
- DBG_8723A("%s pxmitbuf->sctx is not NULL\n", __func__);
- rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE);
- }
-
- if (pxmitbuf->ext_tag) {
- rtw_free_xmitbuf_ext23a(pxmitpriv, pxmitbuf);
- } else {
- spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
-
- list_del_init(&pxmitbuf->list);
-
- list_add_tail(&pxmitbuf->list,
- get_list_head(pfree_xmitbuf_queue));
-
- pxmitpriv->free_xmitbuf_cnt++;
- spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL);
- }
-
- return _SUCCESS;
-}
-
-static void rtw_init_xmitframe(struct xmit_frame *pxframe)
-{
- if (pxframe != NULL) {
- /* default value setting */
- pxframe->buf_addr = NULL;
- pxframe->pxmitbuf = NULL;
-
- memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
- /* pxframe->attrib.psta = NULL; */
-
- pxframe->frame_tag = DATA_FRAMETAG;
-
- pxframe->pkt = NULL;
- pxframe->pkt_offset = 1;/* default use pkt_offset to fill tx desc */
-
- pxframe->ack_report = 0;
- }
-}
-
-/*
-Calling context:
-1. OS_TXENTRY
-2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
-
-If we turn on USE_RXTHREAD, then, no need for critical section.
-Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
-
-Must be very very cautious...
-
-*/
-static struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)
-{
- struct xmit_frame *pxframe;
- struct rtw_queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
-
- spin_lock_bh(&pfree_xmit_queue->lock);
-
- pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue,
- struct xmit_frame, list);
- if (!pxframe) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "rtw_alloc_xmitframe:%d\n",
- pxmitpriv->free_xmitframe_cnt);
- } else {
- list_del_init(&pxframe->list);
- pxmitpriv->free_xmitframe_cnt--;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "rtw_alloc_xmitframe():free_xmitframe_cnt =%d\n",
- pxmitpriv->free_xmitframe_cnt);
- }
-
- spin_unlock_bh(&pfree_xmit_queue->lock);
-
- rtw_init_xmitframe(pxframe);
-
- return pxframe;
-}
-
-struct xmit_frame *rtw_alloc_xmitframe23a_ext(struct xmit_priv *pxmitpriv)
-{
- struct xmit_frame *pxframe;
- struct rtw_queue *queue = &pxmitpriv->free_xframe_ext_queue;
-
- spin_lock_bh(&queue->lock);
-
- pxframe = list_first_entry_or_null(&queue->queue,
- struct xmit_frame, list);
- if (!pxframe) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "rtw_alloc_xmitframe23a_ext:%d\n",
- pxmitpriv->free_xframe_ext_cnt);
- } else {
- list_del_init(&pxframe->list);
- pxmitpriv->free_xframe_ext_cnt--;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "rtw_alloc_xmitframe23a_ext():free_xmitframe_cnt =%d\n",
- pxmitpriv->free_xframe_ext_cnt);
- }
-
- spin_unlock_bh(&queue->lock);
-
- rtw_init_xmitframe(pxframe);
-
- return pxframe;
-}
-
-s32 rtw_free_xmitframe23a(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe)
-{
- struct rtw_queue *queue = NULL;
- struct rtw_adapter *padapter = pxmitpriv->adapter;
- struct sk_buff *pndis_pkt = NULL;
-
- if (pxmitframe == NULL) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "====== rtw_free_xmitframe23a():pxmitframe == NULL!!!!!!!!!!\n");
- goto exit;
- }
-
- if (pxmitframe->pkt) {
- pndis_pkt = pxmitframe->pkt;
- pxmitframe->pkt = NULL;
- }
-
- if (pxmitframe->ext_tag == 0)
- queue = &pxmitpriv->free_xmit_queue;
- else if (pxmitframe->ext_tag == 1)
- queue = &pxmitpriv->free_xframe_ext_queue;
-
- if (!queue)
- goto check_pkt_complete;
- spin_lock_bh(&queue->lock);
-
- list_del_init(&pxmitframe->list);
- list_add_tail(&pxmitframe->list, get_list_head(queue));
- if (pxmitframe->ext_tag == 0) {
- pxmitpriv->free_xmitframe_cnt++;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_,
- "rtw_free_xmitframe23a():free_xmitframe_cnt =%d\n",
- pxmitpriv->free_xmitframe_cnt);
- } else if (pxmitframe->ext_tag == 1) {
- pxmitpriv->free_xframe_ext_cnt++;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_,
- "rtw_free_xmitframe23a():free_xframe_ext_cnt =%d\n",
- pxmitpriv->free_xframe_ext_cnt);
- }
-
- spin_unlock_bh(&queue->lock);
-
-check_pkt_complete:
-
- if (pndis_pkt)
- rtw_os_pkt_complete23a(padapter, pndis_pkt);
-
-exit:
-
- return _SUCCESS;
-}
-
-void rtw_free_xmitframe_queue23a(struct xmit_priv *pxmitpriv,
- struct rtw_queue *pframequeue)
-{
- struct list_head *phead;
- struct xmit_frame *pxmitframe, *ptmp;
-
- spin_lock_bh(&pframequeue->lock);
- phead = get_list_head(pframequeue);
- list_for_each_entry_safe(pxmitframe, ptmp, phead, list)
- rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
- spin_unlock_bh(&pframequeue->lock);
-
-}
-
-int rtw_xmitframe_enqueue23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe)
-{
- if (rtw_xmit23a_classifier(padapter, pxmitframe) == _FAIL) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "rtw_xmitframe_enqueue23a: drop xmit pkt for classifier fail\n");
- return _FAIL;
- }
-
- return _SUCCESS;
-}
-
-static struct xmit_frame *
-dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit,
- struct tx_servq *ptxservq, struct rtw_queue *pframe_queue)
-{
- struct list_head *phead;
- struct xmit_frame *pxmitframe = NULL;
-
- phead = get_list_head(pframe_queue);
-
- if (!list_empty(phead)) {
- pxmitframe = list_first_entry(phead, struct xmit_frame, list);
- list_del_init(&pxmitframe->list);
- ptxservq->qcnt--;
- }
- return pxmitframe;
-}
-
-struct xmit_frame *
-rtw_dequeue_xframe23a(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i,
- int entry)
-{
- struct list_head *sta_phead;
- struct hw_xmit *phwxmit;
- struct tx_servq *ptxservq = NULL, *ptmp;
- struct rtw_queue *pframe_queue = NULL;
- struct xmit_frame *pxmitframe = NULL;
- struct rtw_adapter *padapter = pxmitpriv->adapter;
- struct registry_priv *pregpriv = &padapter->registrypriv;
- int i, inx[4];
-
- inx[0] = 0;
- inx[1] = 1;
- inx[2] = 2;
- inx[3] = 3;
- if (pregpriv->wifi_spec == 1) {
- int j;
-
- for (j = 0; j < 4; j++)
- inx[j] = pxmitpriv->wmm_para_seq[j];
- }
-
- spin_lock_bh(&pxmitpriv->lock);
-
- for (i = 0; i < entry; i++) {
- phwxmit = phwxmit_i + inx[i];
-
- sta_phead = get_list_head(phwxmit->sta_queue);
- list_for_each_entry_safe(ptxservq, ptmp, sta_phead,
- tx_pending) {
- pframe_queue = &ptxservq->sta_pending;
-
- pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue);
-
- if (pxmitframe) {
- phwxmit->accnt--;
-
- /* Remove sta node when there is no pending packets. */
- /* must be done after get_next and
- before break */
- if (list_empty(&pframe_queue->queue))
- list_del_init(&ptxservq->tx_pending);
- goto exit;
- }
- }
- }
-exit:
- spin_unlock_bh(&pxmitpriv->lock);
- return pxmitframe;
-}
-
-struct tx_servq *rtw_get_sta_pending23a(struct rtw_adapter *padapter, struct sta_info *psta, int up, u8 *ac)
-{
- struct tx_servq *ptxservq = NULL;
-
- switch (up) {
- case 1:
- case 2:
- ptxservq = &psta->sta_xmitpriv.bk_q;
- *(ac) = 3;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "rtw_get_sta_pending23a : BK\n");
- break;
- case 4:
- case 5:
- ptxservq = &psta->sta_xmitpriv.vi_q;
- *(ac) = 1;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "rtw_get_sta_pending23a : VI\n");
- break;
- case 6:
- case 7:
- ptxservq = &psta->sta_xmitpriv.vo_q;
- *(ac) = 0;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "rtw_get_sta_pending23a : VO\n");
- break;
- case 0:
- case 3:
- default:
- ptxservq = &psta->sta_xmitpriv.be_q;
- *(ac) = 2;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "rtw_get_sta_pending23a : BE\n");
- break;
- }
- return ptxservq;
-}
-
-/*
- * Will enqueue pxmitframe to the proper queue,
- * and indicate it to xx_pending list.....
- */
-int rtw_xmit23a_classifier(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe)
-{
- struct sta_info *psta;
- struct tx_servq *ptxservq;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
- u8 ac_index;
- int res = _SUCCESS;
-
- if (pattrib->psta) {
- psta = pattrib->psta;
- } else {
- DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
- psta = rtw_get_stainfo23a(pstapriv, pattrib->ra);
- }
- if (psta == NULL) {
- res = _FAIL;
- DBG_8723A("rtw_xmit23a_classifier: psta == NULL\n");
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "rtw_xmit23a_classifier: psta == NULL\n");
- goto exit;
- }
- if (!(psta->state & _FW_LINKED)) {
- DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__,
- psta->state);
- return _FAIL;
- }
- ptxservq = rtw_get_sta_pending23a(padapter, psta, pattrib->priority,
- (u8 *)(&ac_index));
-
- if (list_empty(&ptxservq->tx_pending)) {
- list_add_tail(&ptxservq->tx_pending,
- get_list_head(phwxmits[ac_index].sta_queue));
- }
-
- list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
- ptxservq->qcnt++;
- phwxmits[ac_index].accnt++;
-exit:
- return res;
-}
-
-void rtw_alloc_hwxmits23a(struct rtw_adapter *padapter)
-{
- struct hw_xmit *hwxmits;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- int size;
-
- pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
-
- size = sizeof(struct hw_xmit) * (pxmitpriv->hwxmit_entry + 1);
- pxmitpriv->hwxmits = kzalloc(size, GFP_KERNEL);
-
- hwxmits = pxmitpriv->hwxmits;
-
- if (pxmitpriv->hwxmit_entry == 5) {
- /* pxmitpriv->bmc_txqueue.head = 0; */
- /* hwxmits[0] .phwtxqueue = &pxmitpriv->bmc_txqueue; */
- hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
-
- /* pxmitpriv->vo_txqueue.head = 0; */
- /* hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue; */
- hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
-
- /* pxmitpriv->vi_txqueue.head = 0; */
- /* hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue; */
- hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
-
- /* pxmitpriv->bk_txqueue.head = 0; */
- /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
- hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
-
- /* pxmitpriv->be_txqueue.head = 0; */
- /* hwxmits[4] .phwtxqueue = &pxmitpriv->be_txqueue; */
- hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
-
- } else if (pxmitpriv->hwxmit_entry == 4) {
-
- /* pxmitpriv->vo_txqueue.head = 0; */
- /* hwxmits[0] .phwtxqueue = &pxmitpriv->vo_txqueue; */
- hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
-
- /* pxmitpriv->vi_txqueue.head = 0; */
- /* hwxmits[1] .phwtxqueue = &pxmitpriv->vi_txqueue; */
- hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
-
- /* pxmitpriv->be_txqueue.head = 0; */
- /* hwxmits[2] .phwtxqueue = &pxmitpriv->be_txqueue; */
- hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
-
- /* pxmitpriv->bk_txqueue.head = 0; */
- /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
- hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
- } else {
-
- }
-}
-
-void rtw_free_hwxmits23a(struct rtw_adapter *padapter)
-{
- struct hw_xmit *hwxmits;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- hwxmits = pxmitpriv->hwxmits;
- kfree(hwxmits);
-}
-
-void rtw_init_hwxmits23a(struct hw_xmit *phwxmit, int entry)
-{
- int i;
-
- for (i = 0; i < entry; i++, phwxmit++)
- phwxmit->accnt = 0;
-}
-
-u32 rtw_get_ff_hwaddr23a(struct xmit_frame *pxmitframe)
-{
- u32 addr;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
-
- switch (pattrib->qsel) {
- case 0:
- case 3:
- addr = BE_QUEUE_INX;
- break;
- case 1:
- case 2:
- addr = BK_QUEUE_INX;
- break;
- case 4:
- case 5:
- addr = VI_QUEUE_INX;
- break;
- case 6:
- case 7:
- addr = VO_QUEUE_INX;
- break;
- case 0x10:
- addr = BCN_QUEUE_INX;
- break;
- case 0x11:/* BC/MC in PS (HIQ) */
- addr = HIGH_QUEUE_INX;
- break;
- case 0x12:
- default:
- addr = MGT_QUEUE_INX;
- break;
- }
-
- return addr;
-}
-
-/*
- * The main transmit(tx) entry
- *
- * Return
- * 1 enqueue
- * 0 success, hardware will handle this xmit frame(packet)
- * <0 fail
- */
-int rtw_xmit23a(struct rtw_adapter *padapter, struct sk_buff *skb)
-{
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct xmit_frame *pxmitframe = NULL;
- int res;
-
- pxmitframe = rtw_alloc_xmitframe(pxmitpriv);
-
- if (pxmitframe == NULL) {
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_,
- "rtw_xmit23a: no more pxmitframe\n");
- return -1;
- }
-
- res = update_attrib(padapter, skb, &pxmitframe->attrib);
-
- if (res == _FAIL) {
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_,
- "rtw_xmit23a: update attrib fail\n");
- rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
- return -1;
- }
- pxmitframe->pkt = skb;
-
- pxmitframe->attrib.qsel = pxmitframe->attrib.priority;
-
-#ifdef CONFIG_8723AU_AP_MODE
- spin_lock_bh(&pxmitpriv->lock);
- if (xmitframe_enqueue_for_sleeping_sta23a(padapter, pxmitframe)) {
- spin_unlock_bh(&pxmitpriv->lock);
- return 1;
- }
- spin_unlock_bh(&pxmitpriv->lock);
-#endif
-
- if (rtl8723au_hal_xmit(padapter, pxmitframe) == false)
- return 1;
-
- return 0;
-}
-
-#if defined(CONFIG_8723AU_AP_MODE)
-
-int xmitframe_enqueue_for_sleeping_sta23a(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe)
-{
- int ret = false;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int bmcst = is_multicast_ether_addr(pattrib->ra);
-
- if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
- return ret;
-
- if (pattrib->psta) {
- psta = pattrib->psta;
- } else {
- DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__);
- psta = rtw_get_stainfo23a(pstapriv, pattrib->ra);
- }
-
- if (psta == NULL) {
- DBG_8723A("%s, psta == NUL\n", __func__);
- return false;
- }
-
- if (!(psta->state & _FW_LINKED)) {
- DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__,
- psta->state);
- return false;
- }
-
- if (pattrib->triggered == 1) {
- if (bmcst)
- pattrib->qsel = 0x11;/* HIQ */
- return ret;
- }
-
- if (bmcst) {
- spin_lock_bh(&psta->sleep_q.lock);
-
- if (pstapriv->sta_dz_bitmap) {
- /* if anyone sta is in ps mode */
- list_del_init(&pxmitframe->list);
-
- /* spin_lock_bh(&psta->sleep_q.lock); */
-
- list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
-
- psta->sleepq_len++;
-
- pstapriv->tim_bitmap |= BIT(0);/* */
- pstapriv->sta_dz_bitmap |= BIT(0);
-
- /* DBG_8723A("enqueue, sq_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
-
- /* tx bc/mc packets after update bcn */
- update_beacon23a(padapter, WLAN_EID_TIM, NULL, false);
-
- /* spin_unlock_bh(&psta->sleep_q.lock); */
-
- ret = true;
-
- }
-
- spin_unlock_bh(&psta->sleep_q.lock);
-
- return ret;
-
- }
-
- spin_lock_bh(&psta->sleep_q.lock);
-
- if (psta->state&WIFI_SLEEP_STATE) {
- u8 wmmps_ac = 0;
-
- if (pstapriv->sta_dz_bitmap & CHKBIT(psta->aid)) {
- list_del_init(&pxmitframe->list);
-
- /* spin_lock_bh(&psta->sleep_q.lock); */
-
- list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
-
- psta->sleepq_len++;
-
- switch (pattrib->priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(0);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(0);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(0);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(0);
- break;
- }
-
- if (wmmps_ac)
- psta->sleepq_ac_len++;
-
- if (((psta->has_legacy_ac) && (!wmmps_ac)) ||
- ((!psta->has_legacy_ac) && (wmmps_ac))) {
- pstapriv->tim_bitmap |= CHKBIT(psta->aid);
-
- if (psta->sleepq_len == 1) {
- /* update BCN for TIM IE */
- update_beacon23a(padapter, WLAN_EID_TIM,
- NULL, false);
- }
- }
-
- /* spin_unlock_bh(&psta->sleep_q.lock); */
-
- /* if (psta->sleepq_len > (NR_XMITFRAME>>3)) */
- /* */
- /* wakeup_sta_to_xmit23a(padapter, psta); */
- /* */
-
- ret = true;
-
- }
-
- }
-
- spin_unlock_bh(&psta->sleep_q.lock);
-
- return ret;
-}
-
-static void
-dequeue_xmitframes_to_sleeping_queue(struct rtw_adapter *padapter,
- struct sta_info *psta,
- struct rtw_queue *pframequeue)
-{
- int ret;
- struct list_head *phead;
- u8 ac_index;
- struct tx_servq *ptxservq;
- struct pkt_attrib *pattrib;
- struct xmit_frame *pxmitframe, *ptmp;
- struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
-
- phead = get_list_head(pframequeue);
- list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
- ret = xmitframe_enqueue_for_sleeping_sta23a(padapter, pxmitframe);
-
- if (ret == true) {
- pattrib = &pxmitframe->attrib;
-
- ptxservq = rtw_get_sta_pending23a(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
-
- ptxservq->qcnt--;
- phwxmits[ac_index].accnt--;
- } else {
- /* DBG_8723A("xmitframe_enqueue_for_sleeping_sta23a return false\n"); */
- }
- }
-}
-
-void stop_sta_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- struct sta_info *psta_bmc;
- struct sta_xmit_priv *pstaxmitpriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- pstaxmitpriv = &psta->sta_xmitpriv;
-
- /* for BC/MC Frames */
- psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
-
- spin_lock_bh(&pxmitpriv->lock);
-
- psta->state |= WIFI_SLEEP_STATE;
-
- pstapriv->sta_dz_bitmap |= CHKBIT(psta->aid);
-
- dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
- list_del_init(&pstaxmitpriv->vo_q.tx_pending);
-
- dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
- list_del_init(&pstaxmitpriv->vi_q.tx_pending);
-
- dequeue_xmitframes_to_sleeping_queue(padapter, psta,
- &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&pstaxmitpriv->be_q.tx_pending);
-
- dequeue_xmitframes_to_sleeping_queue(padapter, psta,
- &pstaxmitpriv->bk_q.sta_pending);
- list_del_init(&pstaxmitpriv->bk_q.tx_pending);
-
- /* for BC/MC Frames */
- pstaxmitpriv = &psta_bmc->sta_xmitpriv;
- dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc,
- &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&pstaxmitpriv->be_q.tx_pending);
-
- spin_unlock_bh(&pxmitpriv->lock);
-}
-
-void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- u8 update_mask = 0, wmmps_ac = 0;
- struct sta_info *psta_bmc;
- struct list_head *phead;
- struct xmit_frame *pxmitframe = NULL, *ptmp;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- spin_lock_bh(&pxmitpriv->lock);
- phead = get_list_head(&psta->sleep_q);
- list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
- list_del_init(&pxmitframe->list);
-
- switch (pxmitframe->attrib.priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(1);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(1);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(1);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(1);
- break;
- }
-
- psta->sleepq_len--;
- if (psta->sleepq_len > 0)
- pxmitframe->attrib.mdata = 1;
- else
- pxmitframe->attrib.mdata = 0;
-
- if (wmmps_ac) {
- psta->sleepq_ac_len--;
- if (psta->sleepq_ac_len > 0) {
- pxmitframe->attrib.mdata = 1;
- pxmitframe->attrib.eosp = 0;
- } else {
- pxmitframe->attrib.mdata = 0;
- pxmitframe->attrib.eosp = 1;
- }
- }
-
- pxmitframe->attrib.triggered = 1;
- rtl8723au_hal_xmitframe_enqueue(padapter, pxmitframe);
- }
-
- if (psta->sleepq_len == 0) {
- pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
-
- /* update BCN for TIM IE */
- update_mask = BIT(0);
-
- if (psta->state&WIFI_SLEEP_STATE)
- psta->state ^= WIFI_SLEEP_STATE;
-
- if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
- psta->expire_to = pstapriv->expire_to;
- psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
- }
-
- pstapriv->sta_dz_bitmap &= ~CHKBIT(psta->aid);
- }
- /* spin_unlock_bh(&psta->sleep_q.lock); */
- spin_unlock_bh(&pxmitpriv->lock);
-
- /* for BC/MC Frames */
- psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
- if (!psta_bmc)
- return;
-
- if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) {
- /* no any sta in ps mode */
- spin_lock_bh(&pxmitpriv->lock);
- phead = get_list_head(&psta_bmc->sleep_q);
- list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
- list_del_init(&pxmitframe->list);
-
- psta_bmc->sleepq_len--;
- if (psta_bmc->sleepq_len > 0)
- pxmitframe->attrib.mdata = 1;
- else
- pxmitframe->attrib.mdata = 0;
-
- pxmitframe->attrib.triggered = 1;
- rtl8723au_hal_xmitframe_enqueue(padapter, pxmitframe);
- }
- if (psta_bmc->sleepq_len == 0) {
- pstapriv->tim_bitmap &= ~BIT(0);
- pstapriv->sta_dz_bitmap &= ~BIT(0);
-
- /* update BCN for TIM IE */
- /* update_BCNTIM(padapter); */
- update_mask |= BIT(1);
- }
- /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
- spin_unlock_bh(&pxmitpriv->lock);
- }
-
- if (update_mask)
- update_beacon23a(padapter, WLAN_EID_TIM, NULL, false);
-}
-
-void xmit_delivery_enabled_frames23a(struct rtw_adapter *padapter,
- struct sta_info *psta)
-{
- u8 wmmps_ac = 0;
- struct list_head *phead;
- struct xmit_frame *pxmitframe, *ptmp;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- /* spin_lock_bh(&psta->sleep_q.lock); */
- spin_lock_bh(&pxmitpriv->lock);
- phead = get_list_head(&psta->sleep_q);
- list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
- switch (pxmitframe->attrib.priority) {
- case 1:
- case 2:
- wmmps_ac = psta->uapsd_bk & BIT(1);
- break;
- case 4:
- case 5:
- wmmps_ac = psta->uapsd_vi & BIT(1);
- break;
- case 6:
- case 7:
- wmmps_ac = psta->uapsd_vo & BIT(1);
- break;
- case 0:
- case 3:
- default:
- wmmps_ac = psta->uapsd_be & BIT(1);
- break;
- }
-
- if (!wmmps_ac)
- continue;
-
- list_del_init(&pxmitframe->list);
-
- psta->sleepq_len--;
- psta->sleepq_ac_len--;
-
- if (psta->sleepq_ac_len > 0) {
- pxmitframe->attrib.mdata = 1;
- pxmitframe->attrib.eosp = 0;
- } else {
- pxmitframe->attrib.mdata = 0;
- pxmitframe->attrib.eosp = 1;
- }
-
- pxmitframe->attrib.triggered = 1;
-
- rtl8723au_hal_xmitframe_enqueue(padapter, pxmitframe);
-
- if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) &&
- (wmmps_ac)) {
- pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
-
- /* update BCN for TIM IE */
- update_beacon23a(padapter, WLAN_EID_TIM, NULL, false);
- }
- }
- spin_unlock_bh(&pxmitpriv->lock);
-}
-
-#endif
-
-void rtw_sctx_init23a(struct submit_ctx *sctx, int timeout_ms)
-{
- sctx->timeout_ms = timeout_ms;
- init_completion(&sctx->done);
- sctx->status = RTW_SCTX_SUBMITTED;
-}
-
-int rtw_sctx_wait23a(struct submit_ctx *sctx)
-{
- int ret = _FAIL;
- unsigned long expire;
- int status = 0;
-
- expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) :
- MAX_SCHEDULE_TIMEOUT;
- if (!wait_for_completion_timeout(&sctx->done, expire)) {
- /* timeout, do something?? */
- status = RTW_SCTX_DONE_TIMEOUT;
- DBG_8723A("%s timeout\n", __func__);
- } else {
- status = sctx->status;
- }
-
- if (status == RTW_SCTX_DONE_SUCCESS)
- ret = _SUCCESS;
-
- return ret;
-}
-
-static bool rtw_sctx_chk_waring_status(int status)
-{
- switch (status) {
- case RTW_SCTX_DONE_UNKNOWN:
- case RTW_SCTX_DONE_BUF_ALLOC:
- case RTW_SCTX_DONE_BUF_FREE:
- case RTW_SCTX_DONE_DRV_STOP:
- case RTW_SCTX_DONE_DEV_REMOVE:
- return true;
- default:
- return false;
- }
-}
-
-void rtw23a_sctx_done_err(struct submit_ctx **sctx, int status)
-{
- if (*sctx) {
- if (rtw_sctx_chk_waring_status(status))
- DBG_8723A("%s status:%d\n", __func__, status);
- (*sctx)->status = status;
- complete(&(*sctx)->done);
- *sctx = NULL;
- }
-}
-
-int rtw_ack_tx_wait23a(struct xmit_priv *pxmitpriv, u32 timeout_ms)
-{
- struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
-
- pack_tx_ops->timeout_ms = timeout_ms;
- pack_tx_ops->status = RTW_SCTX_SUBMITTED;
-
- return rtw_sctx_wait23a(pack_tx_ops);
-}
-
diff --git a/drivers/staging/rtl8723au/hal/Hal8723PwrSeq.c b/drivers/staging/rtl8723au/hal/Hal8723PwrSeq.c
deleted file mode 100644
index 747f86cddeb9..000000000000
--- a/drivers/staging/rtl8723au/hal/Hal8723PwrSeq.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#include "Hal8723PwrSeq.h"
-
-/*
- drivers should parse below arrays and do the corresponding actions
-*/
-/* 3 Power on Array */
-struct wlan_pwr_cfg rtl8723AU_power_on_flow[RTL8723A_TRANS_CARDEMU_TO_ACT_STEPS+RTL8723A_TRANS_END_STEPS] = {
- RTL8723A_TRANS_CARDEMU_TO_ACT
- RTL8723A_TRANS_END
-};
-
-/* 3 Radio off GPIO Array */
-struct wlan_pwr_cfg rtl8723AU_radio_off_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_END_STEPS] = {
- RTL8723A_TRANS_ACT_TO_CARDEMU
- RTL8723A_TRANS_END
-};
-
-/* 3 Card Disable Array */
-struct wlan_pwr_cfg rtl8723AU_card_disable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS] = {
- RTL8723A_TRANS_ACT_TO_CARDEMU
- RTL8723A_TRANS_CARDEMU_TO_CARDDIS
- RTL8723A_TRANS_END
-};
-
-/* 3 Card Enable Array */
-struct wlan_pwr_cfg rtl8723AU_card_enable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS] = {
- RTL8723A_TRANS_CARDDIS_TO_CARDEMU
- RTL8723A_TRANS_CARDEMU_TO_ACT
- RTL8723A_TRANS_END
-};
-
-/* 3 Suspend Array */
-struct wlan_pwr_cfg rtl8723AU_suspend_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS+RTL8723A_TRANS_END_STEPS] = {
- RTL8723A_TRANS_ACT_TO_CARDEMU
- RTL8723A_TRANS_CARDEMU_TO_SUS
- RTL8723A_TRANS_END
-};
-
-/* 3 Resume Array */
-struct wlan_pwr_cfg rtl8723AU_resume_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS+RTL8723A_TRANS_END_STEPS] = {
- RTL8723A_TRANS_SUS_TO_CARDEMU
- RTL8723A_TRANS_CARDEMU_TO_ACT
- RTL8723A_TRANS_END
-};
-
-/* 3 HWPDN Array */
-struct wlan_pwr_cfg rtl8723AU_hwpdn_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS] = {
- RTL8723A_TRANS_ACT_TO_CARDEMU
- RTL8723A_TRANS_CARDEMU_TO_PDN
- RTL8723A_TRANS_END
-};
-
-/* 3 Enter LPS */
-struct wlan_pwr_cfg rtl8723AU_enter_lps_flow[RTL8723A_TRANS_ACT_TO_LPS_STEPS+RTL8723A_TRANS_END_STEPS] = {
- /* FW behavior */
- RTL8723A_TRANS_ACT_TO_LPS
- RTL8723A_TRANS_END
-};
-
-/* 3 Leave LPS */
-struct wlan_pwr_cfg rtl8723AU_leave_lps_flow[RTL8723A_TRANS_LPS_TO_ACT_STEPS+RTL8723A_TRANS_END_STEPS] = {
- /* FW behavior */
- RTL8723A_TRANS_LPS_TO_ACT
- RTL8723A_TRANS_END
-};
diff --git a/drivers/staging/rtl8723au/hal/Hal8723UHWImg_CE.c b/drivers/staging/rtl8723au/hal/Hal8723UHWImg_CE.c
deleted file mode 100644
index 56833da63ced..000000000000
--- a/drivers/staging/rtl8723au/hal/Hal8723UHWImg_CE.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-/*Created on 2013/01/14, 15:51*/
-#include "odm_precomp.h"
-
-u32 Rtl8723UPHY_REG_Array_PG[Rtl8723UPHY_REG_Array_PGLength] = {
- 0xe00, 0xffffffff, 0x0a0c0c0c,
- 0xe04, 0xffffffff, 0x02040608,
- 0xe08, 0x0000ff00, 0x00000000,
- 0x86c, 0xffffff00, 0x00000000,
- 0xe10, 0xffffffff, 0x0a0c0d0e,
- 0xe14, 0xffffffff, 0x02040608,
- 0xe18, 0xffffffff, 0x0a0c0d0e,
- 0xe1c, 0xffffffff, 0x02040608,
- 0x830, 0xffffffff, 0x0a0c0c0c,
- 0x834, 0xffffffff, 0x02040608,
- 0x838, 0xffffff00, 0x00000000,
- 0x86c, 0x000000ff, 0x00000000,
- 0x83c, 0xffffffff, 0x0a0c0d0e,
- 0x848, 0xffffffff, 0x02040608,
- 0x84c, 0xffffffff, 0x0a0c0d0e,
- 0x868, 0xffffffff, 0x02040608,
- 0xe00, 0xffffffff, 0x00000000,
- 0xe04, 0xffffffff, 0x00000000,
- 0xe08, 0x0000ff00, 0x00000000,
- 0x86c, 0xffffff00, 0x00000000,
- 0xe10, 0xffffffff, 0x00000000,
- 0xe14, 0xffffffff, 0x00000000,
- 0xe18, 0xffffffff, 0x00000000,
- 0xe1c, 0xffffffff, 0x00000000,
- 0x830, 0xffffffff, 0x00000000,
- 0x834, 0xffffffff, 0x00000000,
- 0x838, 0xffffff00, 0x00000000,
- 0x86c, 0x000000ff, 0x00000000,
- 0x83c, 0xffffffff, 0x00000000,
- 0x848, 0xffffffff, 0x00000000,
- 0x84c, 0xffffffff, 0x00000000,
- 0x868, 0xffffffff, 0x00000000,
- 0xe00, 0xffffffff, 0x04040404,
- 0xe04, 0xffffffff, 0x00020204,
- 0xe08, 0x0000ff00, 0x00000000,
- 0x86c, 0xffffff00, 0x00000000,
- 0xe10, 0xffffffff, 0x06060606,
- 0xe14, 0xffffffff, 0x00020406,
- 0xe18, 0xffffffff, 0x00000000,
- 0xe1c, 0xffffffff, 0x00000000,
- 0x830, 0xffffffff, 0x04040404,
- 0x834, 0xffffffff, 0x00020204,
- 0x838, 0xffffff00, 0x00000000,
- 0x86c, 0x000000ff, 0x00000000,
- 0x83c, 0xffffffff, 0x06060606,
- 0x848, 0xffffffff, 0x00020406,
- 0x84c, 0xffffffff, 0x00000000,
- 0x868, 0xffffffff, 0x00000000,
- 0xe00, 0xffffffff, 0x00000000,
- 0xe04, 0xffffffff, 0x00000000,
- 0xe08, 0x0000ff00, 0x00000000,
- 0x86c, 0xffffff00, 0x00000000,
- 0xe10, 0xffffffff, 0x00000000,
- 0xe14, 0xffffffff, 0x00000000,
- 0xe18, 0xffffffff, 0x00000000,
- 0xe1c, 0xffffffff, 0x00000000,
- 0x830, 0xffffffff, 0x00000000,
- 0x834, 0xffffffff, 0x00000000,
- 0x838, 0xffffff00, 0x00000000,
- 0x86c, 0x000000ff, 0x00000000,
- 0x83c, 0xffffffff, 0x00000000,
- 0x848, 0xffffffff, 0x00000000,
- 0x84c, 0xffffffff, 0x00000000,
- 0x868, 0xffffffff, 0x00000000,
- 0xe00, 0xffffffff, 0x00000000,
- 0xe04, 0xffffffff, 0x00000000,
- 0xe08, 0x0000ff00, 0x00000000,
- 0x86c, 0xffffff00, 0x00000000,
- 0xe10, 0xffffffff, 0x00000000,
- 0xe14, 0xffffffff, 0x00000000,
- 0xe18, 0xffffffff, 0x00000000,
- 0xe1c, 0xffffffff, 0x00000000,
- 0x830, 0xffffffff, 0x00000000,
- 0x834, 0xffffffff, 0x00000000,
- 0x838, 0xffffff00, 0x00000000,
- 0x86c, 0x000000ff, 0x00000000,
- 0x83c, 0xffffffff, 0x00000000,
- 0x848, 0xffffffff, 0x00000000,
- 0x84c, 0xffffffff, 0x00000000,
- 0x868, 0xffffffff, 0x00000000,
- 0xe00, 0xffffffff, 0x04040404,
- 0xe04, 0xffffffff, 0x00020204,
- 0xe08, 0x0000ff00, 0x00000000,
- 0x86c, 0xffffff00, 0x00000000,
- 0xe10, 0xffffffff, 0x00000000,
- 0xe14, 0xffffffff, 0x00000000,
- 0xe18, 0xffffffff, 0x00000000,
- 0xe1c, 0xffffffff, 0x00000000,
- 0x830, 0xffffffff, 0x04040404,
- 0x834, 0xffffffff, 0x00020204,
- 0x838, 0xffffff00, 0x00000000,
- 0x86c, 0x000000ff, 0x00000000,
- 0x83c, 0xffffffff, 0x00000000,
- 0x848, 0xffffffff, 0x00000000,
- 0x84c, 0xffffffff, 0x00000000,
- 0x868, 0xffffffff, 0x00000000,
- 0xe00, 0xffffffff, 0x00000000,
- 0xe04, 0xffffffff, 0x00000000,
- 0xe08, 0x0000ff00, 0x00000000,
- 0x86c, 0xffffff00, 0x00000000,
- 0xe10, 0xffffffff, 0x00000000,
- 0xe14, 0xffffffff, 0x00000000,
- 0xe18, 0xffffffff, 0x00000000,
- 0xe1c, 0xffffffff, 0x00000000,
- 0x830, 0xffffffff, 0x00000000,
- 0x834, 0xffffffff, 0x00000000,
- 0x838, 0xffffff00, 0x00000000,
- 0x86c, 0x000000ff, 0x00000000,
- 0x83c, 0xffffffff, 0x00000000,
- 0x848, 0xffffffff, 0x00000000,
- 0x84c, 0xffffffff, 0x00000000,
- 0x868, 0xffffffff, 0x00000000,
- };
-
-u32 Rtl8723UMACPHY_Array_PG[Rtl8723UMACPHY_Array_PGLength] = {
- 0x0,
-};
diff --git a/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c b/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c
deleted file mode 100644
index 3f9ec9e00e16..000000000000
--- a/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c
+++ /dev/null
@@ -1,1097 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-/* Description: */
-/* This file is for 92CE/92CU dynamic mechanism only */
-
-/* include files */
-
-#include "odm_precomp.h"
-#include <usb_ops_linux.h>
-
-#define DPK_DELTA_MAPPING_NUM 13
-#define index_mapping_HP_NUM 15
-/* 091212 chiyokolin */
-static void
-odm_TXPowerTrackingCallback_ThermalMeter_92C(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- u8 ThermalValue = 0, delta, delta_LCK, delta_IQK, delta_HP;
- int ele_A, ele_D, TempCCk, X, value32;
- int Y, ele_C;
- s8 OFDM_index[2], CCK_index = 0, OFDM_index_old[2] = {0};
- s8 CCK_index_old = 0;
- int i = 0;
- u8 OFDM_min_index = 6, rf; /* OFDM BB Swing should be less than +3.0dB*/
- u8 ThermalValue_HP_count = 0;
- u32 ThermalValue_HP = 0;
- s32 index_mapping_HP[index_mapping_HP_NUM] = {
- 0, 1, 3, 4, 6,
- 7, 9, 10, 12, 13,
- 15, 16, 18, 19, 21
- };
- s8 index_HP;
-
- pdmpriv->TXPowerTrackingCallbackCnt++; /* cosa add for debug */
- pdmpriv->bTXPowerTrackingInit = true;
-
- if (pHalData->CurrentChannel == 14 && !pdmpriv->bCCKinCH14)
- pdmpriv->bCCKinCH14 = true;
- else if (pHalData->CurrentChannel != 14 && pdmpriv->bCCKinCH14)
- pdmpriv->bCCKinCH14 = false;
-
- ThermalValue = (u8)PHY_QueryRFReg(Adapter, RF_PATH_A, RF_T_METER,
- 0x1f);/* 0x24: RF Reg[4:0] */
-
- rtl8723a_phy_ap_calibrate(Adapter, (ThermalValue -
- pHalData->EEPROMThermalMeter));
-
- if (pHalData->rf_type == RF_2T2R)
- rf = 2;
- else
- rf = 1;
-
- if (ThermalValue) {
- /* Query OFDM path A default setting */
- ele_D = rtl8723au_read32(Adapter, rOFDM0_XATxIQImbalance) &
- bMaskOFDM_D;
- for (i = 0; i < OFDM_TABLE_SIZE_92C; i++) {
- /* find the index */
- if (ele_D == (OFDMSwingTable23A[i]&bMaskOFDM_D)) {
- OFDM_index_old[0] = (u8)i;
- break;
- }
- }
-
- /* Query OFDM path B default setting */
- if (pHalData->rf_type == RF_2T2R) {
- ele_D = rtl8723au_read32(Adapter,
- rOFDM0_XBTxIQImbalance);
- ele_D &= bMaskOFDM_D;
- for (i = 0; i < OFDM_TABLE_SIZE_92C; i++) { /* find the index */
- if (ele_D == (OFDMSwingTable23A[i]&bMaskOFDM_D)) {
- OFDM_index_old[1] = (u8)i;
- break;
- }
- }
- }
-
- /* Query CCK default setting From 0xa24 */
- TempCCk = rtl8723au_read32(Adapter, rCCK0_TxFilter2) & bMaskCCK;
- for (i = 0 ; i < CCK_TABLE_SIZE ; i++) {
- if (pdmpriv->bCCKinCH14) {
- if (!memcmp(&TempCCk,
- &CCKSwingTable_Ch1423A[i][2], 4)) {
- CCK_index_old = (u8)i;
- break;
- }
- } else {
- if (!memcmp(&TempCCk,
- &CCKSwingTable_Ch1_Ch1323A[i][2], 4)) {
- CCK_index_old = (u8)i;
- break;
- }
- }
- }
-
- if (!pdmpriv->ThermalValue) {
- pdmpriv->ThermalValue = pHalData->EEPROMThermalMeter;
- pdmpriv->ThermalValue_LCK = ThermalValue;
- pdmpriv->ThermalValue_IQK = ThermalValue;
- pdmpriv->ThermalValue_DPK = pHalData->EEPROMThermalMeter;
-
- for (i = 0; i < rf; i++) {
- pdmpriv->OFDM_index_HP[i] = OFDM_index_old[i];
- pdmpriv->OFDM_index[i] = OFDM_index_old[i];
- }
- pdmpriv->CCK_index_HP = CCK_index_old;
- pdmpriv->CCK_index = CCK_index_old;
- }
-
- if (pHalData->BoardType == BOARD_USB_High_PA) {
- pdmpriv->ThermalValue_HP[pdmpriv->ThermalValue_HP_index] = ThermalValue;
- pdmpriv->ThermalValue_HP_index++;
- if (pdmpriv->ThermalValue_HP_index == HP_THERMAL_NUM)
- pdmpriv->ThermalValue_HP_index = 0;
-
- for (i = 0; i < HP_THERMAL_NUM; i++) {
- if (pdmpriv->ThermalValue_HP[i]) {
- ThermalValue_HP += pdmpriv->ThermalValue_HP[i];
- ThermalValue_HP_count++;
- }
- }
-
- if (ThermalValue_HP_count)
- ThermalValue = (u8)(ThermalValue_HP / ThermalValue_HP_count);
- }
-
- delta = (ThermalValue > pdmpriv->ThermalValue) ?
- (ThermalValue - pdmpriv->ThermalValue) :
- (pdmpriv->ThermalValue - ThermalValue);
- if (pHalData->BoardType == BOARD_USB_High_PA) {
- if (pdmpriv->bDoneTxpower)
- delta_HP = (ThermalValue > pdmpriv->ThermalValue) ?
- (ThermalValue - pdmpriv->ThermalValue) :
- (pdmpriv->ThermalValue - ThermalValue);
- else
- delta_HP = ThermalValue > pHalData->EEPROMThermalMeter ?
- (ThermalValue - pHalData->EEPROMThermalMeter) :
- (pHalData->EEPROMThermalMeter - ThermalValue);
- } else {
- delta_HP = 0;
- }
- delta_LCK = (ThermalValue > pdmpriv->ThermalValue_LCK) ?
- (ThermalValue - pdmpriv->ThermalValue_LCK) :
- (pdmpriv->ThermalValue_LCK - ThermalValue);
- delta_IQK = (ThermalValue > pdmpriv->ThermalValue_IQK) ?
- (ThermalValue - pdmpriv->ThermalValue_IQK) :
- (pdmpriv->ThermalValue_IQK - ThermalValue);
-
- if (delta_LCK > 1) {
- pdmpriv->ThermalValue_LCK = ThermalValue;
- rtl8723a_phy_lc_calibrate(Adapter);
- }
-
- if ((delta > 0 || delta_HP > 0) && pdmpriv->TxPowerTrackControl) {
- if (pHalData->BoardType == BOARD_USB_High_PA) {
- pdmpriv->bDoneTxpower = true;
- delta_HP = ThermalValue > pHalData->EEPROMThermalMeter ?
- (ThermalValue - pHalData->EEPROMThermalMeter) :
- (pHalData->EEPROMThermalMeter - ThermalValue);
-
- if (delta_HP > index_mapping_HP_NUM-1)
- index_HP = index_mapping_HP[index_mapping_HP_NUM-1];
- else
- index_HP = index_mapping_HP[delta_HP];
-
- if (ThermalValue > pHalData->EEPROMThermalMeter) {
- /* set larger Tx power */
- for (i = 0; i < rf; i++)
- OFDM_index[i] = pdmpriv->OFDM_index_HP[i] - index_HP;
- CCK_index = pdmpriv->CCK_index_HP - index_HP;
- } else {
- for (i = 0; i < rf; i++)
- OFDM_index[i] = pdmpriv->OFDM_index_HP[i] + index_HP;
- CCK_index = pdmpriv->CCK_index_HP + index_HP;
- }
-
- delta_HP = (ThermalValue > pdmpriv->ThermalValue) ?
- (ThermalValue - pdmpriv->ThermalValue) :
- (pdmpriv->ThermalValue - ThermalValue);
- } else {
- if (ThermalValue > pdmpriv->ThermalValue) {
- for (i = 0; i < rf; i++)
- pdmpriv->OFDM_index[i] -= delta;
- pdmpriv->CCK_index -= delta;
- } else {
- for (i = 0; i < rf; i++)
- pdmpriv->OFDM_index[i] += delta;
- pdmpriv->CCK_index += delta;
- }
- }
-
- /* no adjust */
- if (pHalData->BoardType != BOARD_USB_High_PA) {
- if (ThermalValue > pHalData->EEPROMThermalMeter) {
- for (i = 0; i < rf; i++)
- OFDM_index[i] = pdmpriv->OFDM_index[i]+1;
- CCK_index = pdmpriv->CCK_index+1;
- } else {
- for (i = 0; i < rf; i++)
- OFDM_index[i] = pdmpriv->OFDM_index[i];
- CCK_index = pdmpriv->CCK_index;
- }
- }
- for (i = 0; i < rf; i++) {
- if (OFDM_index[i] > (OFDM_TABLE_SIZE_92C-1))
- OFDM_index[i] = (OFDM_TABLE_SIZE_92C-1);
- else if (OFDM_index[i] < OFDM_min_index)
- OFDM_index[i] = OFDM_min_index;
- }
-
- if (CCK_index > (CCK_TABLE_SIZE-1))
- CCK_index = CCK_TABLE_SIZE-1;
- else if (CCK_index < 0)
- CCK_index = 0;
- }
-
- if (pdmpriv->TxPowerTrackControl &&
- (delta != 0 || delta_HP != 0)) {
- /* Adujst OFDM Ant_A according to IQK result */
- ele_D = (OFDMSwingTable23A[OFDM_index[0]] & 0xFFC00000)>>22;
- X = pdmpriv->RegE94;
- Y = pdmpriv->RegE9C;
-
- if (X != 0) {
- if ((X & 0x00000200) != 0)
- X = X | 0xFFFFFC00;
- ele_A = ((X * ele_D)>>8)&0x000003FF;
-
- /* new element C = element D x Y */
- if ((Y & 0x00000200) != 0)
- Y = Y | 0xFFFFFC00;
- ele_C = ((Y * ele_D)>>8)&0x000003FF;
-
- /* write new elements A, C, D to regC80 and regC94, element B is always 0 */
- value32 = (ele_D<<22)|((ele_C&0x3F)<<16)|ele_A;
- rtl8723au_write32(Adapter,
- rOFDM0_XATxIQImbalance,
- value32);
-
- value32 = (ele_C&0x000003C0)>>6;
- PHY_SetBBReg(Adapter, rOFDM0_XCTxAFE, bMaskH4Bits, value32);
-
- value32 = ((X * ele_D)>>7)&0x01;
- PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold,
- BIT(31), value32);
-
- value32 = ((Y * ele_D)>>7)&0x01;
- PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold,
- BIT(29), value32);
- } else {
- rtl8723au_write32(Adapter,
- rOFDM0_XATxIQImbalance,
- OFDMSwingTable23A[OFDM_index[0]]);
- PHY_SetBBReg(Adapter, rOFDM0_XCTxAFE,
- bMaskH4Bits, 0x00);
- PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold,
- BIT(31) | BIT(29), 0x00);
- }
-
- /* Adjust CCK according to IQK result */
- if (!pdmpriv->bCCKinCH14) {
- rtl8723au_write8(Adapter, 0xa22, CCKSwingTable_Ch1_Ch1323A[CCK_index][0]);
- rtl8723au_write8(Adapter, 0xa23, CCKSwingTable_Ch1_Ch1323A[CCK_index][1]);
- rtl8723au_write8(Adapter, 0xa24, CCKSwingTable_Ch1_Ch1323A[CCK_index][2]);
- rtl8723au_write8(Adapter, 0xa25, CCKSwingTable_Ch1_Ch1323A[CCK_index][3]);
- rtl8723au_write8(Adapter, 0xa26, CCKSwingTable_Ch1_Ch1323A[CCK_index][4]);
- rtl8723au_write8(Adapter, 0xa27, CCKSwingTable_Ch1_Ch1323A[CCK_index][5]);
- rtl8723au_write8(Adapter, 0xa28, CCKSwingTable_Ch1_Ch1323A[CCK_index][6]);
- rtl8723au_write8(Adapter, 0xa29, CCKSwingTable_Ch1_Ch1323A[CCK_index][7]);
- } else {
- rtl8723au_write8(Adapter, 0xa22, CCKSwingTable_Ch1423A[CCK_index][0]);
- rtl8723au_write8(Adapter, 0xa23, CCKSwingTable_Ch1423A[CCK_index][1]);
- rtl8723au_write8(Adapter, 0xa24, CCKSwingTable_Ch1423A[CCK_index][2]);
- rtl8723au_write8(Adapter, 0xa25, CCKSwingTable_Ch1423A[CCK_index][3]);
- rtl8723au_write8(Adapter, 0xa26, CCKSwingTable_Ch1423A[CCK_index][4]);
- rtl8723au_write8(Adapter, 0xa27, CCKSwingTable_Ch1423A[CCK_index][5]);
- rtl8723au_write8(Adapter, 0xa28, CCKSwingTable_Ch1423A[CCK_index][6]);
- rtl8723au_write8(Adapter, 0xa29, CCKSwingTable_Ch1423A[CCK_index][7]);
- }
-
- if (pHalData->rf_type == RF_2T2R) {
- ele_D = (OFDMSwingTable23A[(u8)OFDM_index[1]] & 0xFFC00000)>>22;
-
- /* new element A = element D x X */
- X = pdmpriv->RegEB4;
- Y = pdmpriv->RegEBC;
-
- if (X != 0) {
- if ((X & 0x00000200) != 0) /* consider minus */
- X = X | 0xFFFFFC00;
- ele_A = ((X * ele_D)>>8)&0x000003FF;
-
- /* new element C = element D x Y */
- if ((Y & 0x00000200) != 0)
- Y = Y | 0xFFFFFC00;
- ele_C = ((Y * ele_D)>>8)&0x00003FF;
-
- /* write new elements A, C, D to regC88 and regC9C, element B is always 0 */
- value32 = (ele_D<<22)|((ele_C&0x3F)<<16) | ele_A;
- rtl8723au_write32(Adapter, rOFDM0_XBTxIQImbalance, value32);
-
- value32 = (ele_C&0x000003C0)>>6;
- PHY_SetBBReg(Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
-
- value32 = ((X * ele_D)>>7)&0x01;
- PHY_SetBBReg(Adapter,
- rOFDM0_ECCAThreshold,
- BIT(27), value32);
-
- value32 = ((Y * ele_D)>>7)&0x01;
- PHY_SetBBReg(Adapter,
- rOFDM0_ECCAThreshold,
- BIT(25), value32);
- } else {
- rtl8723au_write32(Adapter,
- rOFDM0_XBTxIQImbalance,
- OFDMSwingTable23A[OFDM_index[1]]);
- PHY_SetBBReg(Adapter,
- rOFDM0_XDTxAFE,
- bMaskH4Bits, 0x00);
- PHY_SetBBReg(Adapter,
- rOFDM0_ECCAThreshold,
- BIT(27) | BIT(25), 0x00);
- }
- }
-
- }
- if (delta_IQK > 3) {
- pdmpriv->ThermalValue_IQK = ThermalValue;
- rtl8723a_phy_iq_calibrate(Adapter, false);
- }
-
- /* update thermal meter value */
- if (pdmpriv->TxPowerTrackControl)
- pdmpriv->ThermalValue = ThermalValue;
- }
- pdmpriv->TXPowercount = 0;
-}
-
-/* Description: */
-/* - Dispatch TxPower Tracking direct call ONLY for 92s. */
-/* - We shall NOT schedule Workitem within PASSIVE LEVEL, which will cause system resource */
-/* leakage under some platform. */
-/* Assumption: */
-/* PASSIVE_LEVEL when this routine is called. */
-static void ODM_TXPowerTracking92CDirectCall(struct rtw_adapter *Adapter)
-{
- odm_TXPowerTrackingCallback_ThermalMeter_92C(Adapter);
-}
-
-void rtl8723a_odm_check_tx_power_tracking(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
-
- if (!pdmpriv->TM_Trigger) { /* at least delay 1 sec */
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_T_METER, bRFRegOffsetMask, 0x60);
-
- pdmpriv->TM_Trigger = 1;
- return;
- } else {
- ODM_TXPowerTracking92CDirectCall(Adapter);
- pdmpriv->TM_Trigger = 0;
- }
-}
-
-/* IQK */
-#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1 /* ms */
-
-static u8 _PHY_PathA_IQK(struct rtw_adapter *pAdapter, bool configPathB)
-{
- u32 regEAC, regE94, regE9C, regEA4;
- u8 result = 0x00;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
-
- /* path-A IQK setting */
- rtl8723au_write32(pAdapter, rTx_IQK_Tone_A, 0x10008c1f);
- rtl8723au_write32(pAdapter, rRx_IQK_Tone_A, 0x10008c1f);
- rtl8723au_write32(pAdapter, rTx_IQK_PI_A, 0x82140102);
-
- rtl8723au_write32(pAdapter, rRx_IQK_PI_A, configPathB ? 0x28160202 :
- IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID)?0x28160202:0x28160502);
-
- /* path-B IQK setting */
- if (configPathB) {
- rtl8723au_write32(pAdapter, rTx_IQK_Tone_B, 0x10008c22);
- rtl8723au_write32(pAdapter, rRx_IQK_Tone_B, 0x10008c22);
- rtl8723au_write32(pAdapter, rTx_IQK_PI_B, 0x82140102);
- rtl8723au_write32(pAdapter, rRx_IQK_PI_B, 0x28160202);
- }
-
- /* LO calibration setting */
- rtl8723au_write32(pAdapter, rIQK_AGC_Rsp, 0x001028d1);
-
- /* One shot, path A LOK & IQK */
- rtl8723au_write32(pAdapter, rIQK_AGC_Pts, 0xf9000000);
- rtl8723au_write32(pAdapter, rIQK_AGC_Pts, 0xf8000000);
-
- /* delay x ms */
- /* PlatformStallExecution(IQK_DELAY_TIME*1000); */
- udelay(IQK_DELAY_TIME*1000);
-
- /* Check failed */
- regEAC = rtl8723au_read32(pAdapter, rRx_Power_After_IQK_A_2);
- regE94 = rtl8723au_read32(pAdapter, rTx_Power_Before_IQK_A);
- regE9C = rtl8723au_read32(pAdapter, rTx_Power_After_IQK_A);
- regEA4 = rtl8723au_read32(pAdapter, rRx_Power_Before_IQK_A_2);
-
- if (!(regEAC & BIT(28)) &&
- (((regE94 & 0x03FF0000)>>16) != 0x142) &&
- (((regE9C & 0x03FF0000)>>16) != 0x42))
- result |= 0x01;
- else /* if Tx not OK, ignore Rx */
- return result;
-
- if (!(regEAC & BIT(27)) && /* if Tx is OK, check whether Rx is OK */
- (((regEA4 & 0x03FF0000)>>16) != 0x132) &&
- (((regEAC & 0x03FF0000)>>16) != 0x36))
- result |= 0x02;
- else
- DBG_8723A("Path A Rx IQK fail!!\n");
- return result;
-}
-
-static u8 _PHY_PathB_IQK(struct rtw_adapter *pAdapter)
-{
- u32 regEAC, regEB4, regEBC, regEC4, regECC;
- u8 result = 0x00;
-
- /* One shot, path B LOK & IQK */
- rtl8723au_write32(pAdapter, rIQK_AGC_Cont, 0x00000002);
- rtl8723au_write32(pAdapter, rIQK_AGC_Cont, 0x00000000);
-
- /* delay x ms */
- udelay(IQK_DELAY_TIME*1000);
-
- /* Check failed */
- regEAC = rtl8723au_read32(pAdapter, rRx_Power_After_IQK_A_2);
- regEB4 = rtl8723au_read32(pAdapter, rTx_Power_Before_IQK_B);
- regEBC = rtl8723au_read32(pAdapter, rTx_Power_After_IQK_B);
- regEC4 = rtl8723au_read32(pAdapter, rRx_Power_Before_IQK_B_2);
- regECC = rtl8723au_read32(pAdapter, rRx_Power_After_IQK_B_2);
-
- if (!(regEAC & BIT(31)) &&
- (((regEB4 & 0x03FF0000)>>16) != 0x142) &&
- (((regEBC & 0x03FF0000)>>16) != 0x42))
- result |= 0x01;
- else
- return result;
-
- if (!(regEAC & BIT(30)) &&
- (((regEC4 & 0x03FF0000)>>16) != 0x132) &&
- (((regECC & 0x03FF0000)>>16) != 0x36))
- result |= 0x02;
- else
- DBG_8723A("Path B Rx IQK fail!!\n");
- return result;
-}
-
-static void _PHY_PathAFillIQKMatrix(struct rtw_adapter *pAdapter,
- bool bIQKOK,
- int result[][8],
- u8 final_candidate,
- bool bTxOnly
- )
-{
- u32 Oldval_0, X, TX0_A, reg;
- s32 Y, TX0_C;
-
- DBG_8723A("Path A IQ Calibration %s !\n", (bIQKOK)?"Success":"Failed");
-
- if (final_candidate == 0xFF) {
- return;
- } else if (bIQKOK) {
- Oldval_0 = rtl8723au_read32(pAdapter, rOFDM0_XATxIQImbalance);
- Oldval_0 = (Oldval_0 >> 22) & 0x3FF;
-
- X = result[final_candidate][0];
- if ((X & 0x00000200) != 0)
- X = X | 0xFFFFFC00;
- TX0_A = (X * Oldval_0) >> 8;
- PHY_SetBBReg(pAdapter, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
- PHY_SetBBReg(pAdapter, rOFDM0_ECCAThreshold, BIT(31),
- ((X * Oldval_0>>7) & 0x1));
-
- Y = result[final_candidate][1];
- if ((Y & 0x00000200) != 0)
- Y = Y | 0xFFFFFC00;
- TX0_C = (Y * Oldval_0) >> 8;
- PHY_SetBBReg(pAdapter, rOFDM0_XCTxAFE, 0xF0000000,
- ((TX0_C&0x3C0)>>6));
- PHY_SetBBReg(pAdapter, rOFDM0_XATxIQImbalance, 0x003F0000,
- (TX0_C&0x3F));
- PHY_SetBBReg(pAdapter, rOFDM0_ECCAThreshold, BIT(29),
- ((Y * Oldval_0>>7) & 0x1));
-
- if (bTxOnly) {
- DBG_8723A("_PHY_PathAFillIQKMatrix only Tx OK\n");
- return;
- }
-
- reg = result[final_candidate][2];
- PHY_SetBBReg(pAdapter, rOFDM0_XARxIQImbalance, 0x3FF, reg);
-
- reg = result[final_candidate][3] & 0x3F;
- PHY_SetBBReg(pAdapter, rOFDM0_XARxIQImbalance, 0xFC00, reg);
-
- reg = (result[final_candidate][3] >> 6) & 0xF;
- PHY_SetBBReg(pAdapter, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
- }
-}
-
-static void _PHY_PathBFillIQKMatrix(struct rtw_adapter *pAdapter, bool bIQKOK, int result[][8], u8 final_candidate, bool bTxOnly)
-{
- u32 Oldval_1, X, TX1_A, reg;
- s32 Y, TX1_C;
-
- DBG_8723A("Path B IQ Calibration %s !\n", (bIQKOK)?"Success":"Failed");
-
- if (final_candidate == 0xFF) {
- return;
- } else if (bIQKOK) {
- Oldval_1 = rtl8723au_read32(pAdapter, rOFDM0_XBTxIQImbalance);
- Oldval_1 = (Oldval_1 >> 22) & 0x3FF;
-
- X = result[final_candidate][4];
- if ((X & 0x00000200) != 0)
- X = X | 0xFFFFFC00;
- TX1_A = (X * Oldval_1) >> 8;
- PHY_SetBBReg(pAdapter, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A);
- PHY_SetBBReg(pAdapter, rOFDM0_ECCAThreshold, BIT(27),
- ((X * Oldval_1 >> 7) & 0x1));
-
- Y = result[final_candidate][5];
- if ((Y & 0x00000200) != 0)
- Y = Y | 0xFFFFFC00;
- TX1_C = (Y * Oldval_1) >> 8;
- PHY_SetBBReg(pAdapter, rOFDM0_XDTxAFE, 0xF0000000,
- ((TX1_C & 0x3C0) >> 6));
- PHY_SetBBReg(pAdapter, rOFDM0_XBTxIQImbalance, 0x003F0000,
- (TX1_C & 0x3F));
- PHY_SetBBReg(pAdapter, rOFDM0_ECCAThreshold, BIT(25),
- ((Y * Oldval_1 >> 7) & 0x1));
-
- if (bTxOnly)
- return;
-
- reg = result[final_candidate][6];
- PHY_SetBBReg(pAdapter, rOFDM0_XBRxIQImbalance, 0x3FF, reg);
-
- reg = result[final_candidate][7] & 0x3F;
- PHY_SetBBReg(pAdapter, rOFDM0_XBRxIQImbalance, 0xFC00, reg);
-
- reg = (result[final_candidate][7] >> 6) & 0xF;
- PHY_SetBBReg(pAdapter, rOFDM0_AGCRSSITable, 0x0000F000, reg);
- }
-}
-
-static void _PHY_SaveADDARegisters(struct rtw_adapter *pAdapter, u32 *ADDAReg, u32 *ADDABackup, u32 RegisterNum)
-{
- u32 i;
-
- for (i = 0 ; i < RegisterNum ; i++) {
- ADDABackup[i] = rtl8723au_read32(pAdapter, ADDAReg[i]);
- }
-}
-
-static void _PHY_SaveMACRegisters(struct rtw_adapter *pAdapter, u32 *MACReg,
- u32 *MACBackup)
-{
- u32 i;
-
- for (i = 0 ; i < (IQK_MAC_REG_NUM - 1); i++) {
- MACBackup[i] = rtl8723au_read8(pAdapter, MACReg[i]);
- }
- MACBackup[i] = rtl8723au_read32(pAdapter, MACReg[i]);
-}
-
-static void _PHY_ReloadADDARegisters(struct rtw_adapter *pAdapter,
- u32 *ADDAReg, u32 *ADDABackup,
- u32 RegiesterNum)
-{
- u32 i;
-
- for (i = 0 ; i < RegiesterNum ; i++) {
- rtl8723au_write32(pAdapter, ADDAReg[i], ADDABackup[i]);
- }
-}
-
-static void _PHY_ReloadMACRegisters(struct rtw_adapter *pAdapter,
- u32 *MACReg, u32 *MACBackup)
-{
- u32 i;
-
- for (i = 0 ; i < (IQK_MAC_REG_NUM - 1); i++)
- rtl8723au_write8(pAdapter, MACReg[i], (u8)MACBackup[i]);
-
- rtl8723au_write32(pAdapter, MACReg[i], MACBackup[i]);
-}
-
-static void _PHY_PathADDAOn(struct rtw_adapter *pAdapter, u32 *ADDAReg,
- bool isPathAOn, bool is2T)
-{
- u32 pathOn;
- u32 i;
-
- pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4;
- if (!is2T) {
- pathOn = 0x0bdb25a0;
- rtl8723au_write32(pAdapter, ADDAReg[0], 0x0b1b25a0);
- } else {
- rtl8723au_write32(pAdapter, ADDAReg[0], pathOn);
- }
-
- for (i = 1 ; i < IQK_ADDA_REG_NUM ; i++)
- rtl8723au_write32(pAdapter, ADDAReg[i], pathOn);
-}
-
-static void _PHY_MACSettingCalibration(struct rtw_adapter *pAdapter,
- u32 *MACReg, u32 *MACBackup)
-{
- u32 i = 0;
-
- rtl8723au_write8(pAdapter, MACReg[i], 0x3F);
-
- for (i = 1 ; i < (IQK_MAC_REG_NUM - 1); i++) {
- rtl8723au_write8(pAdapter, MACReg[i],
- (u8)(MACBackup[i] & ~BIT(3)));
- }
- rtl8723au_write8(pAdapter, MACReg[i], (u8)(MACBackup[i] & ~BIT(5)));
-}
-
-static void _PHY_PathAStandBy(struct rtw_adapter *pAdapter)
-{
- rtl8723au_write32(pAdapter, rFPGA0_IQK, 0x0);
- rtl8723au_write32(pAdapter, 0x840, 0x00010000);
- rtl8723au_write32(pAdapter, rFPGA0_IQK, 0x80800000);
-}
-
-static void _PHY_PIModeSwitch(struct rtw_adapter *pAdapter, bool PIMode)
-{
- u32 mode;
-
- mode = PIMode ? 0x01000100 : 0x01000000;
- rtl8723au_write32(pAdapter, 0x820, mode);
- rtl8723au_write32(pAdapter, 0x828, mode);
-}
-
-/*
-return false => do IQK again
-*/
-static bool _PHY_SimularityCompare(struct rtw_adapter *pAdapter, int result[][8], u8 c1, u8 c2)
-{
- u32 i, j, diff, SimularityBitMap, bound = 0;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
- u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */
- bool bResult = true;
-
- if (pHalData->rf_type == RF_2T2R)
- bound = 8;
- else
- bound = 4;
-
- SimularityBitMap = 0;
-
- for (i = 0; i < bound; i++) {
- diff = (result[c1][i] > result[c2][i]) ? (result[c1][i] - result[c2][i]) : (result[c2][i] - result[c1][i]);
- if (diff > MAX_TOLERANCE) {
- if ((i == 2 || i == 6) && !SimularityBitMap) {
- if (result[c1][i]+result[c1][i+1] == 0)
- final_candidate[(i/4)] = c2;
- else if (result[c2][i]+result[c2][i+1] == 0)
- final_candidate[(i/4)] = c1;
- else
- SimularityBitMap = SimularityBitMap|(1<<i);
- } else {
- SimularityBitMap = SimularityBitMap|(1<<i);
- }
- }
- }
-
- if (SimularityBitMap == 0) {
- for (i = 0; i < (bound/4); i++) {
- if (final_candidate[i] != 0xFF) {
- for (j = i*4; j < (i+1)*4-2; j++)
- result[3][j] = result[final_candidate[i]][j];
- bResult = false;
- }
- }
- return bResult;
- } else if (!(SimularityBitMap & 0x0F)) {
- /* path A OK */
- for (i = 0; i < 4; i++)
- result[3][i] = result[c1][i];
- return false;
- } else if (!(SimularityBitMap & 0xF0) && pHalData->rf_type == RF_2T2R) {
- /* path B OK */
- for (i = 4; i < 8; i++)
- result[3][i] = result[c1][i];
- return false;
- } else {
- return false;
- }
-}
-
-static void _PHY_IQCalibrate(struct rtw_adapter *pAdapter, int result[][8], u8 t, bool is2T)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- u32 i;
- u8 PathAOK, PathBOK;
- u32 ADDA_REG[IQK_ADDA_REG_NUM] = {
- rFPGA0_XCD_SwitchControl, rBlue_Tooth,
- rRx_Wait_CCA, rTx_CCK_RFON,
- rTx_CCK_BBON, rTx_OFDM_RFON,
- rTx_OFDM_BBON, rTx_To_Rx,
- rTx_To_Tx, rRx_CCK,
- rRx_OFDM, rRx_Wait_RIFS,
- rRx_TO_Rx, rStandby,
- rSleep, rPMPD_ANAEN
- };
-
- u32 IQK_MAC_REG[IQK_MAC_REG_NUM] = {
- REG_TXPAUSE, REG_BCN_CTRL,
- REG_BCN_CTRL_1, REG_GPIO_MUXCFG
- };
-
- u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
- rOFDM0_TRxPathEnable, rOFDM0_TRMuxPar,
- rFPGA0_XCD_RFInterfaceSW, rConfig_AntA, rConfig_AntB,
- rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE,
- rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD
- };
-
- const u32 retryCount = 2;
-
- /* Note: IQ calibration must be performed after loading */
- /* PHY_REG.txt , and radio_a, radio_b.txt */
-
- u32 bbvalue;
-
- if (t == 0) {
- bbvalue = rtl8723au_read32(pAdapter, rFPGA0_RFMOD);
-
- /* Save ADDA parameters, turn Path A ADDA on */
- _PHY_SaveADDARegisters(pAdapter, ADDA_REG, pdmpriv->ADDA_backup, IQK_ADDA_REG_NUM);
- _PHY_SaveMACRegisters(pAdapter, IQK_MAC_REG, pdmpriv->IQK_MAC_backup);
- _PHY_SaveADDARegisters(pAdapter, IQK_BB_REG_92C, pdmpriv->IQK_BB_backup, IQK_BB_REG_NUM);
- }
- _PHY_PathADDAOn(pAdapter, ADDA_REG, true, is2T);
-
- if (t == 0)
- pdmpriv->bRfPiEnable = (u8)
- PHY_QueryBBReg(pAdapter, rFPGA0_XA_HSSIParameter1,
- BIT(8));
-
- if (!pdmpriv->bRfPiEnable) {
- /* Switch BB to PI mode to do IQ Calibration. */
- _PHY_PIModeSwitch(pAdapter, true);
- }
-
- PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, BIT(24), 0x00);
- rtl8723au_write32(pAdapter, rOFDM0_TRxPathEnable, 0x03a05600);
- rtl8723au_write32(pAdapter, rOFDM0_TRMuxPar, 0x000800e4);
- rtl8723au_write32(pAdapter, rFPGA0_XCD_RFInterfaceSW, 0x22204000);
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT(10), 0x01);
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT(26), 0x01);
- PHY_SetBBReg(pAdapter, rFPGA0_XA_RFInterfaceOE, BIT(10), 0x00);
- PHY_SetBBReg(pAdapter, rFPGA0_XB_RFInterfaceOE, BIT(10), 0x00);
-
- if (is2T) {
- rtl8723au_write32(pAdapter,
- rFPGA0_XA_LSSIParameter, 0x00010000);
- rtl8723au_write32(pAdapter,
- rFPGA0_XB_LSSIParameter, 0x00010000);
- }
-
- /* MAC settings */
- _PHY_MACSettingCalibration(pAdapter, IQK_MAC_REG, pdmpriv->IQK_MAC_backup);
-
- /* Page B init */
- rtl8723au_write32(pAdapter, rConfig_AntA, 0x00080000);
-
- if (is2T)
- rtl8723au_write32(pAdapter, rConfig_AntB, 0x00080000);
-
- /* IQ calibration setting */
- rtl8723au_write32(pAdapter, rFPGA0_IQK, 0x80800000);
- rtl8723au_write32(pAdapter, rTx_IQK, 0x01007c00);
- rtl8723au_write32(pAdapter, rRx_IQK, 0x01004800);
-
- for (i = 0 ; i < retryCount ; i++) {
- PathAOK = _PHY_PathA_IQK(pAdapter, is2T);
- if (PathAOK == 0x03) {
- DBG_8723A("Path A IQK Success!!\n");
- result[t][0] = (rtl8723au_read32(pAdapter, rTx_Power_Before_IQK_A)&0x3FF0000)>>16;
- result[t][1] = (rtl8723au_read32(pAdapter, rTx_Power_After_IQK_A)&0x3FF0000)>>16;
- result[t][2] = (rtl8723au_read32(pAdapter, rRx_Power_Before_IQK_A_2)&0x3FF0000)>>16;
- result[t][3] = (rtl8723au_read32(pAdapter, rRx_Power_After_IQK_A_2)&0x3FF0000)>>16;
- break;
- } else if (i == (retryCount-1) && PathAOK == 0x01) {
- /* Tx IQK OK */
- DBG_8723A("Path A IQK Only Tx Success!!\n");
-
- result[t][0] = (rtl8723au_read32(pAdapter, rTx_Power_Before_IQK_A)&0x3FF0000)>>16;
- result[t][1] = (rtl8723au_read32(pAdapter, rTx_Power_After_IQK_A)&0x3FF0000)>>16;
- }
- }
-
- if (0x00 == PathAOK) {
- DBG_8723A("Path A IQK failed!!\n");
- }
-
- if (is2T) {
- _PHY_PathAStandBy(pAdapter);
-
- /* Turn Path B ADDA on */
- _PHY_PathADDAOn(pAdapter, ADDA_REG, false, is2T);
-
- for (i = 0 ; i < retryCount ; i++) {
- PathBOK = _PHY_PathB_IQK(pAdapter);
- if (PathBOK == 0x03) {
- DBG_8723A("Path B IQK Success!!\n");
- result[t][4] = (rtl8723au_read32(pAdapter, rTx_Power_Before_IQK_B)&0x3FF0000)>>16;
- result[t][5] = (rtl8723au_read32(pAdapter, rTx_Power_After_IQK_B)&0x3FF0000)>>16;
- result[t][6] = (rtl8723au_read32(pAdapter, rRx_Power_Before_IQK_B_2)&0x3FF0000)>>16;
- result[t][7] = (rtl8723au_read32(pAdapter, rRx_Power_After_IQK_B_2)&0x3FF0000)>>16;
- break;
- } else if (i == (retryCount - 1) && PathBOK == 0x01) {
- /* Tx IQK OK */
- DBG_8723A("Path B Only Tx IQK Success!!\n");
- result[t][4] = (rtl8723au_read32(pAdapter, rTx_Power_Before_IQK_B)&0x3FF0000)>>16;
- result[t][5] = (rtl8723au_read32(pAdapter, rTx_Power_After_IQK_B)&0x3FF0000)>>16;
- }
- }
-
- if (0x00 == PathBOK) {
- DBG_8723A("Path B IQK failed!!\n");
- }
- }
-
- /* Back to BB mode, load original value */
- rtl8723au_write32(pAdapter, rFPGA0_IQK, 0);
-
- if (t != 0) {
- if (!pdmpriv->bRfPiEnable) {
- /* Switch back BB to SI mode after finish IQ Calibration. */
- _PHY_PIModeSwitch(pAdapter, false);
- }
-
- /* Reload ADDA power saving parameters */
- _PHY_ReloadADDARegisters(pAdapter, ADDA_REG, pdmpriv->ADDA_backup, IQK_ADDA_REG_NUM);
-
- /* Reload MAC parameters */
- _PHY_ReloadMACRegisters(pAdapter, IQK_MAC_REG, pdmpriv->IQK_MAC_backup);
-
- /* Reload BB parameters */
- _PHY_ReloadADDARegisters(pAdapter, IQK_BB_REG_92C, pdmpriv->IQK_BB_backup, IQK_BB_REG_NUM);
-
- /* Restore RX initial gain */
- rtl8723au_write32(pAdapter,
- rFPGA0_XA_LSSIParameter, 0x00032ed3);
- if (is2T) {
- rtl8723au_write32(pAdapter,
- rFPGA0_XB_LSSIParameter, 0x00032ed3);
- }
-
- /* load 0xe30 IQC default value */
- rtl8723au_write32(pAdapter, rTx_IQK_Tone_A, 0x01008c00);
- rtl8723au_write32(pAdapter, rRx_IQK_Tone_A, 0x01008c00);
-
- }
-}
-
-static void _PHY_LCCalibrate(struct rtw_adapter *pAdapter, bool is2T)
-{
- u8 tmpReg;
- u32 RF_Amode = 0, RF_Bmode = 0, LC_Cal;
-
- /* Check continuous TX and Packet TX */
- tmpReg = rtl8723au_read8(pAdapter, 0xd03);
-
- if ((tmpReg&0x70) != 0) {
- /* Deal with contisuous TX case */
- /* disable all continuous TX */
- rtl8723au_write8(pAdapter, 0xd03, tmpReg&0x8F);
- } else {
- /* Deal with Packet TX case */
- /* block all queues */
- rtl8723au_write8(pAdapter, REG_TXPAUSE, 0xFF);
- }
-
- if ((tmpReg&0x70) != 0) {
- /* 1. Read original RF mode */
- /* Path-A */
- RF_Amode = PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_AC, bMask12Bits);
-
- /* Path-B */
- if (is2T)
- RF_Bmode = PHY_QueryRFReg(pAdapter, RF_PATH_B, RF_AC, bMask12Bits);
-
- /* 2. Set RF mode = standby mode */
- /* Path-A */
- PHY_SetRFReg(pAdapter, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode&0x8FFFF)|0x10000);
-
- /* Path-B */
- if (is2T)
- PHY_SetRFReg(pAdapter, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode&0x8FFFF)|0x10000);
- }
-
- /* 3. Read RF reg18 */
- LC_Cal = PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_CHNLBW, bMask12Bits);
-
- /* 4. Set LC calibration begin */
- PHY_SetRFReg(pAdapter, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal|0x08000);
-
- msleep(100);
-
- /* Restore original situation */
- if ((tmpReg&0x70) != 0) { /* Deal with contuous TX case */
- /* Path-A */
- rtl8723au_write8(pAdapter, 0xd03, tmpReg);
- PHY_SetRFReg(pAdapter, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
-
- /* Path-B */
- if (is2T)
- PHY_SetRFReg(pAdapter, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
- } else /* Deal with Packet TX case */
- rtl8723au_write8(pAdapter, REG_TXPAUSE, 0x00);
-}
-
-/* Analog Pre-distortion calibration */
-#define APK_BB_REG_NUM 8
-#define APK_CURVE_REG_NUM 4
-#define PATH_NUM 2
-
-void rtl8723a_phy_iq_calibrate(struct rtw_adapter *pAdapter, bool bReCovery)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- s32 result[4][8]; /* last is final result */
- u8 i, final_candidate;
- bool bPathAOK, bPathBOK;
- s32 RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4;
- s32 RegECC, RegTmp = 0;
- bool is12simular, is13simular, is23simular;
- bool bStartContTx = false, bSingleTone = false;
- bool bCarrierSuppression = false;
- u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
- rOFDM0_XARxIQImbalance, rOFDM0_XBRxIQImbalance,
- rOFDM0_ECCAThreshold, rOFDM0_AGCRSSITable,
- rOFDM0_XATxIQImbalance, rOFDM0_XBTxIQImbalance,
- rOFDM0_XCTxAFE, rOFDM0_XDTxAFE,
- rOFDM0_RxIQExtAnta
- };
-
- /* ignore IQK when continuous Tx */
- if (bStartContTx || bSingleTone || bCarrierSuppression)
- return;
-
- if (bReCovery) {
- _PHY_ReloadADDARegisters(pAdapter, IQK_BB_REG_92C, pdmpriv->IQK_BB_backup_recover, 9);
- return;
- }
- DBG_8723A("IQK:Start!!!\n");
-
- for (i = 0; i < 8; i++) {
- result[0][i] = 0;
- result[1][i] = 0;
- result[2][i] = 0;
- result[3][i] = 0;
- }
- final_candidate = 0xff;
- bPathAOK = false;
- bPathBOK = false;
- is12simular = false;
- is23simular = false;
- is13simular = false;
-
- for (i = 0; i < 3; i++) {
- if (pHalData->rf_type == RF_2T2R)
- _PHY_IQCalibrate(pAdapter, result, i, true);
- else /* For 88C 1T1R */
- _PHY_IQCalibrate(pAdapter, result, i, false);
-
- if (i == 1) {
- is12simular = _PHY_SimularityCompare(pAdapter, result, 0, 1);
- if (is12simular) {
- final_candidate = 0;
- break;
- }
- }
-
- if (i == 2) {
- is13simular = _PHY_SimularityCompare(pAdapter, result, 0, 2);
- if (is13simular) {
- final_candidate = 0;
- break;
- }
-
- is23simular = _PHY_SimularityCompare(pAdapter, result, 1, 2);
- if (is23simular) {
- final_candidate = 1;
- } else {
- for (i = 0; i < 8; i++)
- RegTmp += result[3][i];
-
- if (RegTmp != 0)
- final_candidate = 3;
- else
- final_candidate = 0xFF;
- }
- }
- }
-
- for (i = 0; i < 4; i++) {
- RegE94 = result[i][0];
- RegE9C = result[i][1];
- RegEA4 = result[i][2];
- RegEAC = result[i][3];
- RegEB4 = result[i][4];
- RegEBC = result[i][5];
- RegEC4 = result[i][6];
- RegECC = result[i][7];
- }
-
- if (final_candidate != 0xff) {
- RegE94 = result[final_candidate][0];
- pdmpriv->RegE94 = RegE94;
- RegE9C = result[final_candidate][1];
- pdmpriv->RegE9C = RegE9C;
- RegEA4 = result[final_candidate][2];
- RegEAC = result[final_candidate][3];
- RegEB4 = result[final_candidate][4];
- pdmpriv->RegEB4 = RegEB4;
- RegEBC = result[final_candidate][5];
- pdmpriv->RegEBC = RegEBC;
- RegEC4 = result[final_candidate][6];
- RegECC = result[final_candidate][7];
- DBG_8723A("IQK: final_candidate is %x\n", final_candidate);
- DBG_8723A("IQK: RegE94 =%x RegE9C =%x RegEA4 =%x RegEAC =%x RegEB4 =%x RegEBC =%x RegEC4 =%x RegECC =%x\n ",
- RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC);
- bPathAOK = bPathBOK = true;
- } else {
- RegE94 = RegEB4 = pdmpriv->RegE94 = pdmpriv->RegEB4 = 0x100; /* X default value */
- RegE9C = RegEBC = pdmpriv->RegE9C = pdmpriv->RegEBC = 0x0; /* Y default value */
- }
-
- if ((RegE94 != 0)/*&&(RegEA4 != 0)*/)
- _PHY_PathAFillIQKMatrix(pAdapter, bPathAOK, result, final_candidate, (RegEA4 == 0));
-
- if (pHalData->rf_type == RF_2T2R) {
- if ((RegEB4 != 0)/*&&(RegEC4 != 0)*/)
- _PHY_PathBFillIQKMatrix(pAdapter, bPathBOK, result,
- final_candidate, (RegEC4 == 0));
- }
-
- _PHY_SaveADDARegisters(pAdapter, IQK_BB_REG_92C, pdmpriv->IQK_BB_backup_recover, 9);
-}
-
-void rtl8723a_phy_lc_calibrate(struct rtw_adapter *pAdapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
- struct mlme_ext_priv *pmlmeext = &pAdapter->mlmeextpriv;
- bool bStartContTx = false, bSingleTone = false, bCarrierSuppression = false;
-
- /* ignore IQK when continuous Tx */
- if (bStartContTx || bSingleTone || bCarrierSuppression)
- return;
-
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS)
- return;
-
- if (pHalData->rf_type == RF_2T2R)
- _PHY_LCCalibrate(pAdapter, true);
- else /* For 88C 1T1R */
- _PHY_LCCalibrate(pAdapter, false);
-}
-
-void
-rtl8723a_phy_ap_calibrate(struct rtw_adapter *pAdapter, char delta)
-{
-}
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c
deleted file mode 100644
index 8d3ea6c0cbe6..000000000000
--- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c
+++ /dev/null
@@ -1,565 +0,0 @@
-/******************************************************************************
-*
-* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-******************************************************************************/
-
-#include "odm_precomp.h"
-
-static bool CheckCondition(const u32 Condition, const u32 Hex)
-{
- u32 _board = (Hex & 0x000000FF);
- u32 _interface = (Hex & 0x0000FF00) >> 8;
- u32 _platform = (Hex & 0x00FF0000) >> 16;
- u32 cond = Condition;
-
- if (Condition == 0xCDCDCDCD)
- return true;
-
- cond = Condition & 0x000000FF;
- if ((_board == cond) && cond != 0x00)
- return false;
-
- cond = Condition & 0x0000FF00;
- cond >>= 8;
- if ((_interface & cond) == 0 && cond != 0x07)
- return false;
-
- cond = Condition & 0x00FF0000;
- cond >>= 16;
- if ((_platform & cond) == 0 && cond != 0x0F)
- return false;
- return true;
-}
-
-/******************************************************************************
-* AGC_TAB_1T.TXT
-******************************************************************************/
-
-static u32 Array_AGC_TAB_1T_8723A[] = {
- 0xC78, 0x7B000001,
- 0xC78, 0x7B010001,
- 0xC78, 0x7B020001,
- 0xC78, 0x7B030001,
- 0xC78, 0x7B040001,
- 0xC78, 0x7B050001,
- 0xC78, 0x7A060001,
- 0xC78, 0x79070001,
- 0xC78, 0x78080001,
- 0xC78, 0x77090001,
- 0xC78, 0x760A0001,
- 0xC78, 0x750B0001,
- 0xC78, 0x740C0001,
- 0xC78, 0x730D0001,
- 0xC78, 0x720E0001,
- 0xC78, 0x710F0001,
- 0xC78, 0x70100001,
- 0xC78, 0x6F110001,
- 0xC78, 0x6E120001,
- 0xC78, 0x6D130001,
- 0xC78, 0x6C140001,
- 0xC78, 0x6B150001,
- 0xC78, 0x6A160001,
- 0xC78, 0x69170001,
- 0xC78, 0x68180001,
- 0xC78, 0x67190001,
- 0xC78, 0x661A0001,
- 0xC78, 0x651B0001,
- 0xC78, 0x641C0001,
- 0xC78, 0x631D0001,
- 0xC78, 0x621E0001,
- 0xC78, 0x611F0001,
- 0xC78, 0x60200001,
- 0xC78, 0x49210001,
- 0xC78, 0x48220001,
- 0xC78, 0x47230001,
- 0xC78, 0x46240001,
- 0xC78, 0x45250001,
- 0xC78, 0x44260001,
- 0xC78, 0x43270001,
- 0xC78, 0x42280001,
- 0xC78, 0x41290001,
- 0xC78, 0x402A0001,
- 0xC78, 0x262B0001,
- 0xC78, 0x252C0001,
- 0xC78, 0x242D0001,
- 0xC78, 0x232E0001,
- 0xC78, 0x222F0001,
- 0xC78, 0x21300001,
- 0xC78, 0x20310001,
- 0xC78, 0x06320001,
- 0xC78, 0x05330001,
- 0xC78, 0x04340001,
- 0xC78, 0x03350001,
- 0xC78, 0x02360001,
- 0xC78, 0x01370001,
- 0xC78, 0x00380001,
- 0xC78, 0x00390001,
- 0xC78, 0x003A0001,
- 0xC78, 0x003B0001,
- 0xC78, 0x003C0001,
- 0xC78, 0x003D0001,
- 0xC78, 0x003E0001,
- 0xC78, 0x003F0001,
- 0xC78, 0x7B400001,
- 0xC78, 0x7B410001,
- 0xC78, 0x7B420001,
- 0xC78, 0x7B430001,
- 0xC78, 0x7B440001,
- 0xC78, 0x7B450001,
- 0xC78, 0x7A460001,
- 0xC78, 0x79470001,
- 0xC78, 0x78480001,
- 0xC78, 0x77490001,
- 0xC78, 0x764A0001,
- 0xC78, 0x754B0001,
- 0xC78, 0x744C0001,
- 0xC78, 0x734D0001,
- 0xC78, 0x724E0001,
- 0xC78, 0x714F0001,
- 0xC78, 0x70500001,
- 0xC78, 0x6F510001,
- 0xC78, 0x6E520001,
- 0xC78, 0x6D530001,
- 0xC78, 0x6C540001,
- 0xC78, 0x6B550001,
- 0xC78, 0x6A560001,
- 0xC78, 0x69570001,
- 0xC78, 0x68580001,
- 0xC78, 0x67590001,
- 0xC78, 0x665A0001,
- 0xC78, 0x655B0001,
- 0xC78, 0x645C0001,
- 0xC78, 0x635D0001,
- 0xC78, 0x625E0001,
- 0xC78, 0x615F0001,
- 0xC78, 0x60600001,
- 0xC78, 0x49610001,
- 0xC78, 0x48620001,
- 0xC78, 0x47630001,
- 0xC78, 0x46640001,
- 0xC78, 0x45650001,
- 0xC78, 0x44660001,
- 0xC78, 0x43670001,
- 0xC78, 0x42680001,
- 0xC78, 0x41690001,
- 0xC78, 0x406A0001,
- 0xC78, 0x266B0001,
- 0xC78, 0x256C0001,
- 0xC78, 0x246D0001,
- 0xC78, 0x236E0001,
- 0xC78, 0x226F0001,
- 0xC78, 0x21700001,
- 0xC78, 0x20710001,
- 0xC78, 0x06720001,
- 0xC78, 0x05730001,
- 0xC78, 0x04740001,
- 0xC78, 0x03750001,
- 0xC78, 0x02760001,
- 0xC78, 0x01770001,
- 0xC78, 0x00780001,
- 0xC78, 0x00790001,
- 0xC78, 0x007A0001,
- 0xC78, 0x007B0001,
- 0xC78, 0x007C0001,
- 0xC78, 0x007D0001,
- 0xC78, 0x007E0001,
- 0xC78, 0x007F0001,
- 0xC78, 0x3800001E,
- 0xC78, 0x3801001E,
- 0xC78, 0x3802001E,
- 0xC78, 0x3803001E,
- 0xC78, 0x3804001E,
- 0xC78, 0x3805001E,
- 0xC78, 0x3806001E,
- 0xC78, 0x3807001E,
- 0xC78, 0x3808001E,
- 0xC78, 0x3C09001E,
- 0xC78, 0x3E0A001E,
- 0xC78, 0x400B001E,
- 0xC78, 0x440C001E,
- 0xC78, 0x480D001E,
- 0xC78, 0x4C0E001E,
- 0xC78, 0x500F001E,
- 0xC78, 0x5210001E,
- 0xC78, 0x5611001E,
- 0xC78, 0x5A12001E,
- 0xC78, 0x5E13001E,
- 0xC78, 0x6014001E,
- 0xC78, 0x6015001E,
- 0xC78, 0x6016001E,
- 0xC78, 0x6217001E,
- 0xC78, 0x6218001E,
- 0xC78, 0x6219001E,
- 0xC78, 0x621A001E,
- 0xC78, 0x621B001E,
- 0xC78, 0x621C001E,
- 0xC78, 0x621D001E,
- 0xC78, 0x621E001E,
- 0xC78, 0x621F001E,
-};
-
-#define READ_NEXT_PAIR(v1, v2, i) \
- do { \
- i += 2; v1 = Array[i]; v2 = Array[i+1]; \
- } while (0)
-
-void ODM_ReadAndConfig_AGC_TAB_1T_8723A(struct dm_odm_t *pDM_Odm)
-{
- u32 hex;
- u32 i;
- u8 platform = 0x04;
- u8 board = pDM_Odm->BoardType;
- u32 ArrayLen = ARRAY_SIZE(Array_AGC_TAB_1T_8723A);
- u32 *Array = Array_AGC_TAB_1T_8723A;
-
- hex = board;
- hex += ODM_ITRF_USB << 8;
- hex += platform << 16;
- hex += 0xFF000000;
- for (i = 0; i < ArrayLen; i += 2) {
- u32 v1 = Array[i];
- u32 v2 = Array[i+1];
-
- /* This (offset, data) pair meets the condition. */
- if (v1 < 0xCDCDCDCD) {
- odm_ConfigBB_AGC_8723A(pDM_Odm, v1, v2);
- continue;
- } else {
- if (!CheckCondition(Array[i], hex)) {
- /* Discard the following (offset, data) pairs */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- i -= 2; /* prevent from for-loop += 2 */
- } else {
- /* Configure matched pairs and skip to
- end of if-else. */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2) {
- odm_ConfigBB_AGC_8723A(pDM_Odm, v1, v2);
- READ_NEXT_PAIR(v1, v2, i);
- }
- while (v2 != 0xDEAD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- }
- }
- }
-}
-
-/******************************************************************************
-* PHY_REG_1T.TXT
-******************************************************************************/
-
-static u32 Array_PHY_REG_1T_8723A[] = {
- 0x800, 0x80040000,
- 0x804, 0x00000003,
- 0x808, 0x0000FC00,
- 0x80C, 0x0000000A,
- 0x810, 0x10001331,
- 0x814, 0x020C3D10,
- 0x818, 0x02200385,
- 0x81C, 0x00000000,
- 0x820, 0x01000100,
- 0x824, 0x00390004,
- 0x828, 0x00000000,
- 0x82C, 0x00000000,
- 0x830, 0x00000000,
- 0x834, 0x00000000,
- 0x838, 0x00000000,
- 0x83C, 0x00000000,
- 0x840, 0x00010000,
- 0x844, 0x00000000,
- 0x848, 0x00000000,
- 0x84C, 0x00000000,
- 0x850, 0x00000000,
- 0x854, 0x00000000,
- 0x858, 0x569A569A,
- 0x85C, 0x001B25A4,
- 0x860, 0x66F60110,
- 0x864, 0x061F0130,
- 0x868, 0x00000000,
- 0x86C, 0x32323200,
- 0x870, 0x07000760,
- 0x874, 0x22004000,
- 0x878, 0x00000808,
- 0x87C, 0x00000000,
- 0x880, 0xC0083070,
- 0x884, 0x000004D5,
- 0x888, 0x00000000,
- 0x88C, 0xCCC000C0,
- 0x890, 0x00000800,
- 0x894, 0xFFFFFFFE,
- 0x898, 0x40302010,
- 0x89C, 0x00706050,
- 0x900, 0x00000000,
- 0x904, 0x00000023,
- 0x908, 0x00000000,
- 0x90C, 0x81121111,
- 0xA00, 0x00D047C8,
- 0xA04, 0x80FF000C,
- 0xA08, 0x8C838300,
- 0xA0C, 0x2E68120F,
- 0xA10, 0x9500BB78,
- 0xA14, 0x11144028,
- 0xA18, 0x00881117,
- 0xA1C, 0x89140F00,
- 0xA20, 0x1A1B0000,
- 0xA24, 0x090E1317,
- 0xA28, 0x00000204,
- 0xA2C, 0x00D30000,
- 0xA70, 0x101FBF00,
- 0xA74, 0x00000007,
- 0xA78, 0x00000900,
- 0xC00, 0x48071D40,
- 0xC04, 0x03A05611,
- 0xC08, 0x000000E4,
- 0xC0C, 0x6C6C6C6C,
- 0xC10, 0x08800000,
- 0xC14, 0x40000100,
- 0xC18, 0x08800000,
- 0xC1C, 0x40000100,
- 0xC20, 0x00000000,
- 0xC24, 0x00000000,
- 0xC28, 0x00000000,
- 0xC2C, 0x00000000,
- 0xC30, 0x69E9AC44,
- 0xFF0F011F, 0xABCD,
- 0xC34, 0x469652CF,
- 0xCDCDCDCD, 0xCDCD,
- 0xC34, 0x469652AF,
- 0xFF0F011F, 0xDEAD,
- 0xC38, 0x49795994,
- 0xC3C, 0x0A97971C,
- 0xC40, 0x1F7C403F,
- 0xC44, 0x000100B7,
- 0xC48, 0xEC020107,
- 0xC4C, 0x007F037F,
- 0xC50, 0x69543420,
- 0xC54, 0x43BC0094,
- 0xC58, 0x69543420,
- 0xC5C, 0x433C0094,
- 0xC60, 0x00000000,
- 0xFF0F011F, 0xABCD,
- 0xC64, 0x7116848B,
- 0xCDCDCDCD, 0xCDCD,
- 0xC64, 0x7112848B,
- 0xFF0F011F, 0xDEAD,
- 0xC68, 0x47C00BFF,
- 0xC6C, 0x00000036,
- 0xC70, 0x2C7F000D,
- 0xC74, 0x018610DB,
- 0xC78, 0x0000001F,
- 0xC7C, 0x00B91612,
- 0xC80, 0x40000100,
- 0xC84, 0x20F60000,
- 0xC88, 0x40000100,
- 0xC8C, 0x20200000,
- 0xC90, 0x00121820,
- 0xC94, 0x00000000,
- 0xC98, 0x00121820,
- 0xC9C, 0x00007F7F,
- 0xCA0, 0x00000000,
- 0xCA4, 0x00000080,
- 0xCA8, 0x00000000,
- 0xCAC, 0x00000000,
- 0xCB0, 0x00000000,
- 0xCB4, 0x00000000,
- 0xCB8, 0x00000000,
- 0xCBC, 0x28000000,
- 0xCC0, 0x00000000,
- 0xCC4, 0x00000000,
- 0xCC8, 0x00000000,
- 0xCCC, 0x00000000,
- 0xCD0, 0x00000000,
- 0xCD4, 0x00000000,
- 0xCD8, 0x64B22427,
- 0xCDC, 0x00766932,
- 0xCE0, 0x00222222,
- 0xCE4, 0x00000000,
- 0xCE8, 0x37644302,
- 0xCEC, 0x2F97D40C,
- 0xD00, 0x00080740,
- 0xD04, 0x00020401,
- 0xD08, 0x0000907F,
- 0xD0C, 0x20010201,
- 0xD10, 0xA0633333,
- 0xD14, 0x3333BC43,
- 0xD18, 0x7A8F5B6B,
- 0xD2C, 0xCC979975,
- 0xD30, 0x00000000,
- 0xD34, 0x80608000,
- 0xD38, 0x00000000,
- 0xD3C, 0x00027293,
- 0xD40, 0x00000000,
- 0xD44, 0x00000000,
- 0xD48, 0x00000000,
- 0xD4C, 0x00000000,
- 0xD50, 0x6437140A,
- 0xD54, 0x00000000,
- 0xD58, 0x00000000,
- 0xD5C, 0x30032064,
- 0xD60, 0x4653DE68,
- 0xD64, 0x04518A3C,
- 0xD68, 0x00002101,
- 0xD6C, 0x2A201C16,
- 0xD70, 0x1812362E,
- 0xD74, 0x322C2220,
- 0xD78, 0x000E3C24,
- 0xE00, 0x2A2A2A2A,
- 0xE04, 0x2A2A2A2A,
- 0xE08, 0x03902A2A,
- 0xE10, 0x2A2A2A2A,
- 0xE14, 0x2A2A2A2A,
- 0xE18, 0x2A2A2A2A,
- 0xE1C, 0x2A2A2A2A,
- 0xE28, 0x00000000,
- 0xE30, 0x1000DC1F,
- 0xE34, 0x10008C1F,
- 0xE38, 0x02140102,
- 0xE3C, 0x681604C2,
- 0xE40, 0x01007C00,
- 0xE44, 0x01004800,
- 0xE48, 0xFB000000,
- 0xE4C, 0x000028D1,
- 0xE50, 0x1000DC1F,
- 0xE54, 0x10008C1F,
- 0xE58, 0x02140102,
- 0xE5C, 0x28160D05,
- 0xE60, 0x00000008,
- 0xE68, 0x001B25A4,
- 0xE6C, 0x631B25A0,
- 0xE70, 0x631B25A0,
- 0xE74, 0x081B25A0,
- 0xE78, 0x081B25A0,
- 0xE7C, 0x081B25A0,
- 0xE80, 0x081B25A0,
- 0xE84, 0x631B25A0,
- 0xE88, 0x081B25A0,
- 0xE8C, 0x631B25A0,
- 0xED0, 0x631B25A0,
- 0xED4, 0x631B25A0,
- 0xED8, 0x631B25A0,
- 0xEDC, 0x001B25A0,
- 0xEE0, 0x001B25A0,
- 0xEEC, 0x6B1B25A0,
- 0xF14, 0x00000003,
- 0xF4C, 0x00000000,
- 0xF00, 0x00000300,
-};
-
-void ODM_ReadAndConfig_PHY_REG_1T_8723A(struct dm_odm_t *pDM_Odm)
-{
- u32 hex = 0;
- u32 i = 0;
- u8 platform = 0x04;
- u8 board = pDM_Odm->BoardType;
- u32 ArrayLen = ARRAY_SIZE(Array_PHY_REG_1T_8723A);
- u32 *Array = Array_PHY_REG_1T_8723A;
-
- hex += board;
- hex += ODM_ITRF_USB << 8;
- hex += platform << 16;
- hex += 0xFF000000;
- for (i = 0; i < ArrayLen; i += 2) {
- u32 v1 = Array[i];
- u32 v2 = Array[i+1];
-
- /* This (offset, data) pair meets the condition. */
- if (v1 < 0xCDCDCDCD) {
- odm_ConfigBB_PHY_8723A(pDM_Odm, v1, v2);
- continue;
- } else {
- if (!CheckCondition(Array[i], hex)) {
- /* Discard the following (offset, data) pairs */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- i -= 2; /* prevent from for-loop += 2 */
- } else {
- /* Configure matched pairs and skip to
- end of if-else. */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2) {
- odm_ConfigBB_PHY_8723A(pDM_Odm, v1, v2);
- READ_NEXT_PAIR(v1, v2, i);
- }
- while (v2 != 0xDEAD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- }
- }
- }
-}
-
-/******************************************************************************
-* PHY_REG_MP.TXT
-******************************************************************************/
-
-static u32 Array_PHY_REG_MP_8723A[] = {
- 0xC30, 0x69E9AC4A,
- 0xC3C, 0x0A979718,
-};
-
-void ODM_ReadAndConfig_PHY_REG_MP_8723A(struct dm_odm_t *pDM_Odm)
-{
- u32 hex = 0;
- u32 i;
- u8 platform = 0x04;
- u8 board = pDM_Odm->BoardType;
- u32 ArrayLen = ARRAY_SIZE(Array_PHY_REG_MP_8723A);
- u32 *Array = Array_PHY_REG_MP_8723A;
-
- hex += board;
- hex += ODM_ITRF_USB << 8;
- hex += platform << 16;
- hex += 0xFF000000;
- for (i = 0; i < ArrayLen; i += 2) {
- u32 v1 = Array[i];
- u32 v2 = Array[i+1];
-
- /* This (offset, data) pair meets the condition. */
- if (v1 < 0xCDCDCDCD) {
- odm_ConfigBB_PHY_8723A(pDM_Odm, v1, v2);
- continue;
- } else {
- if (!CheckCondition(Array[i], hex)) {
- /* Discard the following (offset, data) pairs */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- i -= 2; /* prevent from for-loop += 2 */
- } else {
- /* Configure matched pairs and skip to
- end of if-else. */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2) {
- odm_ConfigBB_PHY_8723A(pDM_Odm, v1, v2);
- READ_NEXT_PAIR(v1, v2, i);
- }
- while (v2 != 0xDEAD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- }
- }
- }
-}
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c
deleted file mode 100644
index 9bf685905e68..000000000000
--- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/******************************************************************************
-*
-* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-******************************************************************************/
-
-#include "odm_precomp.h"
-
-static bool CheckCondition(const u32 Condition, const u32 Hex)
-{
- u32 _board = (Hex & 0x000000FF);
- u32 _interface = (Hex & 0x0000FF00) >> 8;
- u32 _platform = (Hex & 0x00FF0000) >> 16;
- u32 cond = Condition;
-
- if (Condition == 0xCDCDCDCD)
- return true;
-
- cond = Condition & 0x000000FF;
- if ((_board == cond) && cond != 0x00)
- return false;
-
- cond = Condition & 0x0000FF00;
- cond >>= 8;
- if ((_interface & cond) == 0 && cond != 0x07)
- return false;
-
- cond = Condition & 0x00FF0000;
- cond >>= 16;
- if ((_platform & cond) == 0 && cond != 0x0F)
- return false;
- return true;
-}
-
-/******************************************************************************
-* MAC_REG.TXT
-******************************************************************************/
-
-static u32 Array_MAC_REG_8723A[] = {
- 0x420, 0x00000080,
- 0x423, 0x00000000,
- 0x430, 0x00000000,
- 0x431, 0x00000000,
- 0x432, 0x00000000,
- 0x433, 0x00000001,
- 0x434, 0x00000004,
- 0x435, 0x00000005,
- 0x436, 0x00000006,
- 0x437, 0x00000007,
- 0x438, 0x00000000,
- 0x439, 0x00000000,
- 0x43A, 0x00000000,
- 0x43B, 0x00000001,
- 0x43C, 0x00000004,
- 0x43D, 0x00000005,
- 0x43E, 0x00000006,
- 0x43F, 0x00000007,
- 0x440, 0x0000005D,
- 0x441, 0x00000001,
- 0x442, 0x00000000,
- 0x444, 0x00000015,
- 0x445, 0x000000F0,
- 0x446, 0x0000000F,
- 0x447, 0x00000000,
- 0x458, 0x00000041,
- 0x459, 0x000000A8,
- 0x45A, 0x00000072,
- 0x45B, 0x000000B9,
- 0x460, 0x00000066,
- 0x461, 0x00000066,
- 0x462, 0x00000008,
- 0x463, 0x00000003,
- 0x4C8, 0x000000FF,
- 0x4C9, 0x00000008,
- 0x4CC, 0x000000FF,
- 0x4CD, 0x000000FF,
- 0x4CE, 0x00000001,
- 0x500, 0x00000026,
- 0x501, 0x000000A2,
- 0x502, 0x0000002F,
- 0x503, 0x00000000,
- 0x504, 0x00000028,
- 0x505, 0x000000A3,
- 0x506, 0x0000005E,
- 0x507, 0x00000000,
- 0x508, 0x0000002B,
- 0x509, 0x000000A4,
- 0x50A, 0x0000005E,
- 0x50B, 0x00000000,
- 0x50C, 0x0000004F,
- 0x50D, 0x000000A4,
- 0x50E, 0x00000000,
- 0x50F, 0x00000000,
- 0x512, 0x0000001C,
- 0x514, 0x0000000A,
- 0x515, 0x00000010,
- 0x516, 0x0000000A,
- 0x517, 0x00000010,
- 0x51A, 0x00000016,
- 0x524, 0x0000000F,
- 0x525, 0x0000004F,
- 0x546, 0x00000040,
- 0x547, 0x00000000,
- 0x550, 0x00000010,
- 0x551, 0x00000010,
- 0x559, 0x00000002,
- 0x55A, 0x00000002,
- 0x55D, 0x000000FF,
- 0x605, 0x00000030,
- 0x608, 0x0000000E,
- 0x609, 0x0000002A,
- 0x652, 0x00000020,
- 0x63C, 0x0000000A,
- 0x63D, 0x0000000A,
- 0x63E, 0x0000000E,
- 0x63F, 0x0000000E,
- 0x66E, 0x00000005,
- 0x700, 0x00000021,
- 0x701, 0x00000043,
- 0x702, 0x00000065,
- 0x703, 0x00000087,
- 0x708, 0x00000021,
- 0x709, 0x00000043,
- 0x70A, 0x00000065,
- 0x70B, 0x00000087,
-};
-
-void ODM_ReadAndConfig_MAC_REG_8723A(struct dm_odm_t *pDM_Odm)
-{
- #define READ_NEXT_PAIR(v1, v2, i) \
- do { \
- i += 2; v1 = Array[i]; v2 = Array[i+1]; \
- } while (0)
-
- u32 hex = 0;
- u32 i = 0;
- u8 platform = 0x04;
- u8 board = pDM_Odm->BoardType;
- u32 ArrayLen = ARRAY_SIZE(Array_MAC_REG_8723A);
- u32 *Array = Array_MAC_REG_8723A;
-
- hex += board;
- hex += ODM_ITRF_USB << 8;
- hex += platform << 16;
- hex += 0xFF000000;
- for (i = 0; i < ArrayLen; i += 2) {
- u32 v1 = Array[i];
- u32 v2 = Array[i+1];
-
- /* This (offset, data) pair meets the condition. */
- if (v1 < 0xCDCDCDCD) {
- odm_ConfigMAC_8723A(pDM_Odm, v1, (u8)v2);
- continue;
- } else {
- if (!CheckCondition(Array[i], hex)) {
- /* Discard the following (offset, data) pairs. */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- i -= 2; /* prevent from for-loop += 2 */
- } else {
- /* Configure matched pairs and skip to end of if-else. */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2) {
- odm_ConfigMAC_8723A(pDM_Odm, v1, (u8)v2);
- READ_NEXT_PAIR(v1, v2, i);
- }
-
- while (v2 != 0xDEAD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- }
- }
- }
-}
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c
deleted file mode 100644
index 286f3ea3d263..000000000000
--- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c
+++ /dev/null
@@ -1,259 +0,0 @@
-/******************************************************************************
-*
-* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-******************************************************************************/
-
-#include "odm_precomp.h"
-
-static bool CheckCondition(const u32 Condition, const u32 Hex)
-{
- u32 _board = (Hex & 0x000000FF);
- u32 _interface = (Hex & 0x0000FF00) >> 8;
- u32 _platform = (Hex & 0x00FF0000) >> 16;
- u32 cond = Condition;
-
- if (Condition == 0xCDCDCDCD)
- return true;
-
- cond = Condition & 0x000000FF;
- if ((_board == cond) && cond != 0x00)
- return false;
-
- cond = Condition & 0x0000FF00;
- cond >>= 8;
- if ((_interface & cond) == 0 && cond != 0x07)
- return false;
-
- cond = Condition & 0x00FF0000;
- cond >>= 16;
- if ((_platform & cond) == 0 && cond != 0x0F)
- return false;
- return true;
-}
-
-/******************************************************************************
-* RadioA_1T.TXT
-******************************************************************************/
-
-static u32 Array_RadioA_1T_8723A[] = {
- 0x000, 0x00030159,
- 0x001, 0x00031284,
- 0x002, 0x00098000,
- 0xFF0F011F, 0xABCD,
- 0x003, 0x00018C63,
- 0xCDCDCDCD, 0xCDCD,
- 0x003, 0x00039C63,
- 0xFF0F011F, 0xDEAD,
- 0x004, 0x000210E7,
- 0x009, 0x0002044F,
- 0x00A, 0x0001A3F1,
- 0x00B, 0x00014787,
- 0x00C, 0x000896FE,
- 0x00D, 0x0000E02C,
- 0x00E, 0x00039CE7,
- 0x00F, 0x00000451,
- 0x019, 0x00000000,
- 0x01A, 0x00030355,
- 0x01B, 0x00060A00,
- 0x01C, 0x000FC378,
- 0x01D, 0x000A1250,
- 0x01E, 0x0000024F,
- 0x01F, 0x00000000,
- 0x020, 0x0000B614,
- 0x021, 0x0006C000,
- 0x022, 0x00000000,
- 0x023, 0x00001558,
- 0x024, 0x00000060,
- 0x025, 0x00000483,
- 0x026, 0x0004F000,
- 0x027, 0x000EC7D9,
- 0x028, 0x00057730,
- 0x029, 0x00004783,
- 0x02A, 0x00000001,
- 0x02B, 0x00021334,
- 0x02A, 0x00000000,
- 0x02B, 0x00000054,
- 0x02A, 0x00000001,
- 0x02B, 0x00000808,
- 0x02B, 0x00053333,
- 0x02C, 0x0000000C,
- 0x02A, 0x00000002,
- 0x02B, 0x00000808,
- 0x02B, 0x0005B333,
- 0x02C, 0x0000000D,
- 0x02A, 0x00000003,
- 0x02B, 0x00000808,
- 0x02B, 0x00063333,
- 0x02C, 0x0000000D,
- 0x02A, 0x00000004,
- 0x02B, 0x00000808,
- 0x02B, 0x0006B333,
- 0x02C, 0x0000000D,
- 0x02A, 0x00000005,
- 0x02B, 0x00000808,
- 0x02B, 0x00073333,
- 0x02C, 0x0000000D,
- 0x02A, 0x00000006,
- 0x02B, 0x00000709,
- 0x02B, 0x0005B333,
- 0x02C, 0x0000000D,
- 0x02A, 0x00000007,
- 0x02B, 0x00000709,
- 0x02B, 0x00063333,
- 0x02C, 0x0000000D,
- 0x02A, 0x00000008,
- 0x02B, 0x0000060A,
- 0x02B, 0x0004B333,
- 0x02C, 0x0000000D,
- 0x02A, 0x00000009,
- 0x02B, 0x0000060A,
- 0x02B, 0x00053333,
- 0x02C, 0x0000000D,
- 0x02A, 0x0000000A,
- 0x02B, 0x0000060A,
- 0x02B, 0x0005B333,
- 0x02C, 0x0000000D,
- 0x02A, 0x0000000B,
- 0x02B, 0x0000060A,
- 0x02B, 0x00063333,
- 0x02C, 0x0000000D,
- 0x02A, 0x0000000C,
- 0x02B, 0x0000060A,
- 0x02B, 0x0006B333,
- 0x02C, 0x0000000D,
- 0x02A, 0x0000000D,
- 0x02B, 0x0000060A,
- 0x02B, 0x00073333,
- 0x02C, 0x0000000D,
- 0x02A, 0x0000000E,
- 0x02B, 0x0000050B,
- 0x02B, 0x00066666,
- 0x02C, 0x0000001A,
- 0x02A, 0x000E0000,
- 0x010, 0x0004000F,
- 0x011, 0x000E31FC,
- 0x010, 0x0006000F,
- 0x011, 0x000FF9F8,
- 0x010, 0x0002000F,
- 0x011, 0x000203F9,
- 0x010, 0x0003000F,
- 0x011, 0x000FF500,
- 0x010, 0x00000000,
- 0x011, 0x00000000,
- 0x010, 0x0008000F,
- 0x011, 0x0003F100,
- 0x010, 0x0009000F,
- 0x011, 0x00023100,
- 0x012, 0x00032000,
- 0x012, 0x00071000,
- 0x012, 0x000B0000,
- 0x012, 0x000FC000,
- 0x013, 0x000287B3,
- 0x013, 0x000244B7,
- 0x013, 0x000204AB,
- 0x013, 0x0001C49F,
- 0x013, 0x00018493,
- 0x013, 0x0001429B,
- 0x013, 0x00010299,
- 0x013, 0x0000C29C,
- 0x013, 0x000081A0,
- 0x013, 0x000040AC,
- 0x013, 0x00000020,
- 0x014, 0x0001944C,
- 0x014, 0x00059444,
- 0x014, 0x0009944C,
- 0x014, 0x000D9444,
- 0xFF0F011F, 0xABCD,
- 0x015, 0x0000F424,
- 0x015, 0x0004F424,
- 0x015, 0x0008F424,
- 0x015, 0x000CF424,
- 0xCDCDCDCD, 0xCDCD,
- 0x015, 0x0000F474,
- 0x015, 0x0004F477,
- 0x015, 0x0008F455,
- 0x015, 0x000CF455,
- 0xFF0F011F, 0xDEAD,
- 0x016, 0x00000339,
- 0x016, 0x00040339,
- 0x016, 0x00080339,
- 0xFF0F011F, 0xABCD,
- 0x016, 0x000C0356,
- 0xCDCDCDCD, 0xCDCD,
- 0x016, 0x000C0366,
- 0xFF0F011F, 0xDEAD,
- 0x000, 0x00010159,
- 0x018, 0x0000F401,
- 0x0FE, 0x00000000,
- 0x0FE, 0x00000000,
- 0x01F, 0x00000003,
- 0x0FE, 0x00000000,
- 0x0FE, 0x00000000,
- 0x01E, 0x00000247,
- 0x01F, 0x00000000,
- 0x000, 0x00030159,
-};
-
-void ODM_ReadAndConfig_RadioA_1T_8723A(struct dm_odm_t *pDM_Odm)
-{
- #define READ_NEXT_PAIR(v1, v2, i) \
- do { \
- i += 2; v1 = Array[i]; v2 = Array[i+1];\
- } while (0)
-
- u32 hex = 0;
- u32 i = 0;
- u8 platform = 0x04;
- u8 board = pDM_Odm->BoardType;
- u32 ArrayLen = ARRAY_SIZE(Array_RadioA_1T_8723A);
- u32 *Array = Array_RadioA_1T_8723A;
-
- hex += board;
- hex += ODM_ITRF_USB << 8;
- hex += platform << 16;
- hex += 0xFF000000;
-
- for (i = 0; i < ArrayLen; i += 2) {
- u32 v1 = Array[i];
- u32 v2 = Array[i+1];
-
- /* This (offset, data) pair meets the condition. */
- if (v1 < 0xCDCDCDCD) {
- odm_ConfigRFReg_8723A(pDM_Odm, v1, v2, RF_PATH_A, v1);
- continue;
- } else {
- if (!CheckCondition(Array[i], hex)) {
- /* Discard the following (offset, data) pairs. */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- i -= 2; /* prevent from for-loop += 2 */
- } else {
- /* Configure matched pairs and skip to end of if-else. */
- READ_NEXT_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < ArrayLen - 2) {
- odm_ConfigRFReg_8723A(pDM_Odm, v1, v2,
- RF_PATH_A, v1);
- READ_NEXT_PAIR(v1, v2, i);
- }
-
- while (v2 != 0xDEAD && i < ArrayLen - 2)
- READ_NEXT_PAIR(v1, v2, i);
- }
- }
- }
-}
diff --git a/drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c
deleted file mode 100644
index 0a3d96e840cc..000000000000
--- a/drivers/staging/rtl8723au/hal/HalPwrSeqCmd.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-/*++
-Copyright (c) Realtek Semiconductor Corp. All rights reserved.
-
-Module Name:
- HalPwrSeqCmd.c
-
-Abstract:
- Implement HW Power sequence configuration CMD handling routine for
- Realtek devices.
-
-Major Change History:
- When Who What
- ---------- --------------- -------------------------------
- 2011-10-26 Lucas Modify to be compatible with SD4-CE driver.
- 2011-07-07 Roger Create.
-
---*/
-#include <HalPwrSeqCmd.h>
-#include <usb_ops_linux.h>
-
-/* */
-/* Description: */
-/* This routine deal with the Power Configuration CMDs parsing
- for RTL8723/RTL8188E Series IC. */
-/* */
-/* Assumption: */
-/* We should follow specific format which was released from
- HW SD. */
-/* */
-/* 2011.07.07, added by Roger. */
-/* */
-u8 HalPwrSeqCmdParsing23a(struct rtw_adapter *padapter, u8 CutVersion,
- u8 FabVersion, u8 InterfaceType,
- struct wlan_pwr_cfg PwrSeqCmd[])
-{
- struct wlan_pwr_cfg PwrCfgCmd;
- u8 bPollingBit;
- u32 AryIdx = 0;
- u8 value;
- u32 offset;
- u32 pollingCount = 0; /* polling autoload done. */
- u32 maxPollingCnt = 5000;
-
- do {
- PwrCfgCmd = PwrSeqCmd[AryIdx];
-
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "HalPwrSeqCmdParsing23a: offset(%#x) cut_msk(%#x) fab_msk(%#x) interface_msk(%#x) base(%#x) cmd(%#x) msk(%#x) value(%#x)\n",
- GET_PWR_CFG_OFFSET(PwrCfgCmd),
- GET_PWR_CFG_CUT_MASK(PwrCfgCmd),
- GET_PWR_CFG_FAB_MASK(PwrCfgCmd),
- GET_PWR_CFG_INTF_MASK(PwrCfgCmd),
- GET_PWR_CFG_BASE(PwrCfgCmd),
- GET_PWR_CFG_CMD(PwrCfgCmd),
- GET_PWR_CFG_MASK(PwrCfgCmd),
- GET_PWR_CFG_VALUE(PwrCfgCmd));
-
- /* 2 Only Handle the command whose FAB, CUT, and Interface are
- matched */
- if ((GET_PWR_CFG_FAB_MASK(PwrCfgCmd) & FabVersion) &&
- (GET_PWR_CFG_CUT_MASK(PwrCfgCmd) & CutVersion) &&
- (GET_PWR_CFG_INTF_MASK(PwrCfgCmd) & InterfaceType)) {
- switch (GET_PWR_CFG_CMD(PwrCfgCmd)) {
- case PWR_CMD_READ:
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "HalPwrSeqCmdParsing23a: PWR_CMD_READ\n");
- break;
-
- case PWR_CMD_WRITE:
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "HalPwrSeqCmdParsing23a: PWR_CMD_WRITE\n");
- offset = GET_PWR_CFG_OFFSET(PwrCfgCmd);
-
- /* Read the value from system register */
- value = rtl8723au_read8(padapter, offset);
-
- value &= ~(GET_PWR_CFG_MASK(PwrCfgCmd));
- value |= (GET_PWR_CFG_VALUE(PwrCfgCmd) &
- GET_PWR_CFG_MASK(PwrCfgCmd));
-
- /* Write the value back to system register */
- rtl8723au_write8(padapter, offset, value);
- break;
-
- case PWR_CMD_POLLING:
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "HalPwrSeqCmdParsing23a: PWR_CMD_POLLING\n");
-
- bPollingBit = false;
- offset = GET_PWR_CFG_OFFSET(PwrCfgCmd);
- do {
- value = rtl8723au_read8(padapter,
- offset);
-
- value &= GET_PWR_CFG_MASK(PwrCfgCmd);
- if (value ==
- (GET_PWR_CFG_VALUE(PwrCfgCmd) &
- GET_PWR_CFG_MASK(PwrCfgCmd)))
- bPollingBit = true;
- else
- udelay(10);
-
- if (pollingCount++ > maxPollingCnt) {
- DBG_8723A("Fail to polling "
- "Offset[%#x]\n",
- offset);
- return false;
- }
- } while (!bPollingBit);
-
- break;
-
- case PWR_CMD_DELAY:
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "HalPwrSeqCmdParsing23a: PWR_CMD_DELAY\n");
- if (GET_PWR_CFG_VALUE(PwrCfgCmd) ==
- PWRSEQ_DELAY_US)
- udelay(GET_PWR_CFG_OFFSET(PwrCfgCmd));
- else
- udelay(GET_PWR_CFG_OFFSET(PwrCfgCmd) *
- 1000);
- break;
-
- case PWR_CMD_END:
- /* When this command is parsed, end
- the process */
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "HalPwrSeqCmdParsing23a: PWR_CMD_END\n");
- return true;
-
- default:
- RT_TRACE(_module_hal_init_c_, _drv_err_,
- "HalPwrSeqCmdParsing23a: Unknown CMD!!\n");
- break;
- }
- }
-
- AryIdx++; /* Add Array Index */
- } while (1);
-
- return true;
-}
diff --git a/drivers/staging/rtl8723au/hal/hal_com.c b/drivers/staging/rtl8723au/hal/hal_com.c
deleted file mode 100644
index 9d7b11b63957..000000000000
--- a/drivers/staging/rtl8723au/hal/hal_com.c
+++ /dev/null
@@ -1,853 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#include <hal_intf.h>
-#include <hal_com.h>
-#include <rtl8723a_hal.h>
-#include <usb_ops_linux.h>
-
-#define _HAL_INIT_C_
-
-#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
-
-/* return the final channel plan decision */
-/* hw_channel_plan: channel plan from HW (efuse/eeprom) */
-/* sw_channel_plan: channel plan from SW (registry/module param) */
-/* def_channel_plan: channel plan used when the former two is invalid */
-u8 hal_com_get_channel_plan23a(struct rtw_adapter *padapter, u8 hw_channel_plan,
- u8 sw_channel_plan, u8 def_channel_plan,
- bool AutoLoadFail)
-{
- u8 swConfig;
- u8 chnlPlan;
-
- swConfig = true;
- if (!AutoLoadFail) {
- if (!rtw_is_channel_plan_valid(sw_channel_plan))
- swConfig = false;
- if (hw_channel_plan & EEPROM_CHANNEL_PLAN_BY_HW_MASK)
- swConfig = false;
- }
-
- if (swConfig == true)
- chnlPlan = sw_channel_plan;
- else
- chnlPlan = hw_channel_plan & (~EEPROM_CHANNEL_PLAN_BY_HW_MASK);
-
- if (!rtw_is_channel_plan_valid(chnlPlan))
- chnlPlan = def_channel_plan;
-
- return chnlPlan;
-}
-
-u8 MRateToHwRate23a(u8 rate)
-{
- u8 ret = DESC_RATE1M;
-
- switch (rate) {
- /* CCK and OFDM non-HT rates */
- case IEEE80211_CCK_RATE_1MB:
- ret = DESC_RATE1M;
- break;
- case IEEE80211_CCK_RATE_2MB:
- ret = DESC_RATE2M;
- break;
- case IEEE80211_CCK_RATE_5MB:
- ret = DESC_RATE5_5M;
- break;
- case IEEE80211_CCK_RATE_11MB:
- ret = DESC_RATE11M;
- break;
- case IEEE80211_OFDM_RATE_6MB:
- ret = DESC_RATE6M;
- break;
- case IEEE80211_OFDM_RATE_9MB:
- ret = DESC_RATE9M;
- break;
- case IEEE80211_OFDM_RATE_12MB:
- ret = DESC_RATE12M;
- break;
- case IEEE80211_OFDM_RATE_18MB:
- ret = DESC_RATE18M;
- break;
- case IEEE80211_OFDM_RATE_24MB:
- ret = DESC_RATE24M;
- break;
- case IEEE80211_OFDM_RATE_36MB:
- ret = DESC_RATE36M;
- break;
- case IEEE80211_OFDM_RATE_48MB:
- ret = DESC_RATE48M;
- break;
- case IEEE80211_OFDM_RATE_54MB:
- ret = DESC_RATE54M;
- break;
-
- /* HT rates since here */
- /* case MGN_MCS0: ret = DESC_RATEMCS0; break; */
- /* case MGN_MCS1: ret = DESC_RATEMCS1; break; */
- /* case MGN_MCS2: ret = DESC_RATEMCS2; break; */
- /* case MGN_MCS3: ret = DESC_RATEMCS3; break; */
- /* case MGN_MCS4: ret = DESC_RATEMCS4; break; */
- /* case MGN_MCS5: ret = DESC_RATEMCS5; break; */
- /* case MGN_MCS6: ret = DESC_RATEMCS6; break; */
- /* case MGN_MCS7: ret = DESC_RATEMCS7; break; */
-
- default:
- break;
- }
- return ret;
-}
-
-void HalSetBrateCfg23a(struct rtw_adapter *padapter, u8 *mBratesOS)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 i, is_brate, brate;
- u16 brate_cfg = 0;
- u8 rate_index;
-
- for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
- is_brate = mBratesOS[i] & IEEE80211_BASIC_RATE_MASK;
- brate = mBratesOS[i] & 0x7f;
-
- if (is_brate) {
- switch (brate) {
- case IEEE80211_CCK_RATE_1MB:
- brate_cfg |= RATE_1M;
- break;
- case IEEE80211_CCK_RATE_2MB:
- brate_cfg |= RATE_2M;
- break;
- case IEEE80211_CCK_RATE_5MB:
- brate_cfg |= RATE_5_5M;
- break;
- case IEEE80211_CCK_RATE_11MB:
- brate_cfg |= RATE_11M;
- break;
- case IEEE80211_OFDM_RATE_6MB:
- brate_cfg |= RATE_6M;
- break;
- case IEEE80211_OFDM_RATE_9MB:
- brate_cfg |= RATE_9M;
- break;
- case IEEE80211_OFDM_RATE_12MB:
- brate_cfg |= RATE_12M;
- break;
- case IEEE80211_OFDM_RATE_18MB:
- brate_cfg |= RATE_18M;
- break;
- case IEEE80211_OFDM_RATE_24MB:
- brate_cfg |= RATE_24M;
- break;
- case IEEE80211_OFDM_RATE_36MB:
- brate_cfg |= RATE_36M;
- break;
- case IEEE80211_OFDM_RATE_48MB:
- brate_cfg |= RATE_48M;
- break;
- case IEEE80211_OFDM_RATE_54MB:
- brate_cfg |= RATE_54M;
- break;
- }
- }
- }
-
- /* 2007.01.16, by Emily */
- /* Select RRSR (in Legacy-OFDM and CCK) */
- /* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M, 2M,
- and 1M from the Basic rate. */
- /* We do not use other rates. */
- /* 2011.03.30 add by Luke Lee */
- /* CCK 2M ACK should be disabled for some BCM and Atheros AP IOT */
- /* because CCK 2M has poor TXEVM */
- /* CCK 5.5M & 11M ACK should be enabled for better
- performance */
-
- brate_cfg = (brate_cfg | 0xd) & 0x15d;
- pHalData->BasicRateSet = brate_cfg;
- brate_cfg |= 0x01; /* default enable 1M ACK rate */
- DBG_8723A("HW_VAR_BASIC_RATE: BrateCfg(%#x)\n", brate_cfg);
-
- /* Set RRSR rate table. */
- rtl8723au_write8(padapter, REG_RRSR, brate_cfg & 0xff);
- rtl8723au_write8(padapter, REG_RRSR + 1, (brate_cfg >> 8) & 0xff);
- rtl8723au_write8(padapter, REG_RRSR + 2,
- rtl8723au_read8(padapter, REG_RRSR + 2) & 0xf0);
-
- rate_index = 0;
- /* Set RTS initial rate */
- while (brate_cfg > 0x1) {
- brate_cfg >>= 1;
- rate_index++;
- }
- /* Ziv - Check */
- rtl8723au_write8(padapter, REG_INIRTS_RATE_SEL, rate_index);
-}
-
-static void _OneOutPipeMapping(struct rtw_adapter *pAdapter)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(pAdapter);
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0]; /* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[0]; /* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0]; /* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD */
-}
-
-static void _TwoOutPipeMapping(struct rtw_adapter *pAdapter, bool bWIFICfg)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(pAdapter);
-
- if (bWIFICfg) { /* WMM */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 0, 1, 0, 1, 0, 0, 0, 0, 0 }; */
- /* 0:H, 1:L */
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1]; /* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0]; /* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1]; /* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0]; /* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/
- } else { /* typical setting */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 1, 1, 0, 0, 0, 0, 0, 0, 0 }; */
- /* 0:H, 1:L */
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0]; /* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1]; /* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1]; /* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/
- }
-}
-
-static void _ThreeOutPipeMapping(struct rtw_adapter *pAdapter, bool bWIFICfg)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(pAdapter);
-
- if (bWIFICfg) { /* for WMM */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 1, 2, 1, 0, 0, 0, 0, 0, 0 }; */
- /* 0:H, 1:N, 2:L */
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1]; /* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2]; /* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1]; /* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/
- } else { /* typical setting */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 2, 2, 1, 0, 0, 0, 0, 0, 0 }; */
- /* 0:H, 1:N, 2:L */
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1]; /* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2]; /* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[2]; /* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/
- }
-}
-
-bool Hal_MappingOutPipe23a(struct rtw_adapter *pAdapter, u8 NumOutPipe)
-{
- struct registry_priv *pregistrypriv = &pAdapter->registrypriv;
- bool bWIFICfg = (pregistrypriv->wifi_spec) ? true : false;
- bool result = true;
-
- switch (NumOutPipe) {
- case 2:
- _TwoOutPipeMapping(pAdapter, bWIFICfg);
- break;
- case 3:
- _ThreeOutPipeMapping(pAdapter, bWIFICfg);
- break;
- case 1:
- _OneOutPipeMapping(pAdapter);
- break;
- default:
- result = false;
- break;
- }
-
- return result;
-}
-
-/*
-* C2H event format:
-* Field TRIGGER CONTENT CMD_SEQ CMD_LEN CMD_ID
-* BITS [127:120] [119:16] [15:8] [7:4] [3:0]
-*/
-
-void c2h_evt_clear23a(struct rtw_adapter *adapter)
-{
- rtl8723au_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
-}
-
-int c2h_evt_read23a(struct rtw_adapter *adapter, u8 *buf)
-{
- int ret = _FAIL;
- struct c2h_evt_hdr *c2h_evt;
- int i;
- u8 trigger;
-
- if (buf == NULL)
- goto exit;
-
- trigger = rtl8723au_read8(adapter, REG_C2HEVT_CLEAR);
-
- if (trigger == C2H_EVT_HOST_CLOSE)
- goto exit; /* Not ready */
- if (trigger != C2H_EVT_FW_CLOSE)
- goto clear_evt; /* Not a valid value */
-
- c2h_evt = (struct c2h_evt_hdr *)buf;
-
- memset(c2h_evt, 0, 16);
-
- *buf = rtl8723au_read8(adapter, REG_C2HEVT_MSG_NORMAL);
- *(buf + 1) = rtl8723au_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1);
-
- RT_PRINT_DATA(_module_hal_init_c_, _drv_info_, "c2h_evt_read23a(): ",
- &c2h_evt, sizeof(c2h_evt));
-
- if (0) {
- DBG_8723A("%s id:%u, len:%u, seq:%u, trigger:0x%02x\n",
- __func__, c2h_evt->id, c2h_evt->plen, c2h_evt->seq,
- trigger);
- }
-
- /* Read the content */
- for (i = 0; i < c2h_evt->plen; i++)
- c2h_evt->payload[i] = rtl8723au_read8(adapter,
- REG_C2HEVT_MSG_NORMAL +
- sizeof(*c2h_evt) + i);
-
- RT_PRINT_DATA(_module_hal_init_c_, _drv_info_,
- "c2h_evt_read23a(): Command Content:\n", c2h_evt->payload,
- c2h_evt->plen);
-
- ret = _SUCCESS;
-
-clear_evt:
- /*
- * Clear event to notify FW we have read the command.
- * If this field isn't clear, the FW won't update the
- * next command message.
- */
- c2h_evt_clear23a(adapter);
-exit:
- return ret;
-}
-
-void
-rtl8723a_set_ampdu_min_space(struct rtw_adapter *padapter, u8 MinSpacingToSet)
-{
- u8 SecMinSpace;
-
- if (MinSpacingToSet <= 7) {
- switch (padapter->securitypriv.dot11PrivacyAlgrthm) {
- case 0:
- case WLAN_CIPHER_SUITE_CCMP:
- SecMinSpace = 0;
- break;
-
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- case WLAN_CIPHER_SUITE_TKIP:
- SecMinSpace = 6;
- break;
- default:
- SecMinSpace = 7;
- break;
- }
-
- if (MinSpacingToSet < SecMinSpace)
- MinSpacingToSet = SecMinSpace;
-
- MinSpacingToSet |=
- rtl8723au_read8(padapter, REG_AMPDU_MIN_SPACE) & 0xf8;
- rtl8723au_write8(padapter, REG_AMPDU_MIN_SPACE,
- MinSpacingToSet);
- }
-}
-
-void rtl8723a_set_ampdu_factor(struct rtw_adapter *padapter, u8 FactorToSet)
-{
- u8 RegToSet_Normal[4] = { 0x41, 0xa8, 0x72, 0xb9 };
- u8 MaxAggNum;
- u8 *pRegToSet;
- u8 index = 0;
-
- pRegToSet = RegToSet_Normal; /* 0xb972a841; */
-
- if (rtl8723a_BT_enabled(padapter) &&
- rtl8723a_BT_using_antenna_1(padapter))
- MaxAggNum = 0x8;
- else
- MaxAggNum = 0xF;
-
- if (FactorToSet <= 3) {
- FactorToSet = 1 << (FactorToSet + 2);
- if (FactorToSet > MaxAggNum)
- FactorToSet = MaxAggNum;
-
- for (index = 0; index < 4; index++) {
- if ((pRegToSet[index] & 0xf0) > (FactorToSet << 4))
- pRegToSet[index] = (pRegToSet[index] & 0x0f) |
- (FactorToSet << 4);
-
- if ((pRegToSet[index] & 0x0f) > FactorToSet)
- pRegToSet[index] = (pRegToSet[index] & 0xf0) |
- FactorToSet;
-
- rtl8723au_write8(padapter, REG_AGGLEN_LMT + index,
- pRegToSet[index]);
- }
- }
-}
-
-void rtl8723a_set_acm_ctrl(struct rtw_adapter *padapter, u8 ctrl)
-{
- u8 hwctrl = 0;
-
- if (ctrl != 0) {
- hwctrl |= AcmHw_HwEn;
-
- if (ctrl & BIT(1)) /* BE */
- hwctrl |= AcmHw_BeqEn;
-
- if (ctrl & BIT(2)) /* VI */
- hwctrl |= AcmHw_ViqEn;
-
- if (ctrl & BIT(3)) /* VO */
- hwctrl |= AcmHw_VoqEn;
- }
-
- DBG_8723A("[HW_VAR_ACM_CTRL] Write 0x%02X\n", hwctrl);
- rtl8723au_write8(padapter, REG_ACMHWCTRL, hwctrl);
-}
-
-void rtl8723a_set_media_status(struct rtw_adapter *padapter, u8 status)
-{
- u8 val8;
-
- val8 = rtl8723au_read8(padapter, MSR) & 0x0c;
- val8 |= status;
- rtl8723au_write8(padapter, MSR, val8);
-}
-
-void rtl8723a_set_media_status1(struct rtw_adapter *padapter, u8 status)
-{
- u8 val8;
-
- val8 = rtl8723au_read8(padapter, MSR) & 0x03;
- val8 |= status << 2;
- rtl8723au_write8(padapter, MSR, val8);
-}
-
-void rtl8723a_set_bcn_func(struct rtw_adapter *padapter, u8 val)
-{
- if (val)
- SetBcnCtrlReg23a(padapter, EN_BCN_FUNCTION | EN_TXBCN_RPT, 0);
- else
- SetBcnCtrlReg23a(padapter, 0, EN_BCN_FUNCTION | EN_TXBCN_RPT);
-}
-
-void rtl8723a_check_bssid(struct rtw_adapter *padapter, u8 val)
-{
- u32 val32;
-
- val32 = rtl8723au_read32(padapter, REG_RCR);
- if (val)
- val32 |= RCR_CBSSID_DATA | RCR_CBSSID_BCN;
- else
- val32 &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
- rtl8723au_write32(padapter, REG_RCR, val32);
-}
-
-void rtl8723a_mlme_sitesurvey(struct rtw_adapter *padapter, u8 flag)
-{
- if (flag) { /* under sitesurvey */
- u32 v32;
-
- /* config RCR to receive different BSSID & not
- to receive data frame */
- v32 = rtl8723au_read32(padapter, REG_RCR);
- v32 &= ~(RCR_CBSSID_BCN);
- rtl8723au_write32(padapter, REG_RCR, v32);
- /* reject all data frame */
- rtl8723au_write16(padapter, REG_RXFLTMAP2, 0);
-
- /* disable update TSF */
- SetBcnCtrlReg23a(padapter, DIS_TSF_UDT, 0);
- } else { /* sitesurvey done */
-
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo;
- u32 v32;
-
- pmlmeinfo = &pmlmeext->mlmext_info;
-
- if ((is_client_associated_to_ap23a(padapter) == true) ||
- ((pmlmeinfo->state & 0x03) == MSR_ADHOC) ||
- ((pmlmeinfo->state & 0x03) == MSR_AP)) {
- /* enable to rx data frame */
- rtl8723au_write16(padapter, REG_RXFLTMAP2, 0xFFFF);
-
- /* enable update TSF */
- SetBcnCtrlReg23a(padapter, 0, DIS_TSF_UDT);
- }
-
- v32 = rtl8723au_read32(padapter, REG_RCR);
- v32 |= RCR_CBSSID_BCN;
- rtl8723au_write32(padapter, REG_RCR, v32);
- }
-
- rtl8723a_BT_wifiscan_notify(padapter, flag ? true : false);
-}
-
-void rtl8723a_on_rcr_am(struct rtw_adapter *padapter)
-{
- rtl8723au_write32(padapter, REG_RCR,
- rtl8723au_read32(padapter, REG_RCR) | RCR_AM);
- DBG_8723A("%s, %d, RCR = %x\n", __func__, __LINE__,
- rtl8723au_read32(padapter, REG_RCR));
-}
-
-void rtl8723a_off_rcr_am(struct rtw_adapter *padapter)
-{
- rtl8723au_write32(padapter, REG_RCR,
- rtl8723au_read32(padapter, REG_RCR) & (~RCR_AM));
- DBG_8723A("%s, %d, RCR = %x\n", __func__, __LINE__,
- rtl8723au_read32(padapter, REG_RCR));
-}
-
-void rtl8723a_set_slot_time(struct rtw_adapter *padapter, u8 slottime)
-{
- u8 u1bAIFS, aSifsTime;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- rtl8723au_write8(padapter, REG_SLOT, slottime);
-
- if (pmlmeinfo->WMM_enable == 0) {
- if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
- aSifsTime = 10;
- else
- aSifsTime = 16;
-
- u1bAIFS = aSifsTime + (2 * pmlmeinfo->slotTime);
-
- /* <Roger_EXP> Temporary removed, 2008.06.20. */
- rtl8723au_write8(padapter, REG_EDCA_VO_PARAM, u1bAIFS);
- rtl8723au_write8(padapter, REG_EDCA_VI_PARAM, u1bAIFS);
- rtl8723au_write8(padapter, REG_EDCA_BE_PARAM, u1bAIFS);
- rtl8723au_write8(padapter, REG_EDCA_BK_PARAM, u1bAIFS);
- }
-}
-
-void rtl8723a_ack_preamble(struct rtw_adapter *padapter, u8 bShortPreamble)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 regTmp;
-
- /* Joseph marked out for Netgear 3500 TKIP
- channel 7 issue.(Temporarily) */
- regTmp = (pHalData->nCur40MhzPrimeSC) << 5;
- /* regTmp = 0; */
- if (bShortPreamble)
- regTmp |= 0x80;
- rtl8723au_write8(padapter, REG_RRSR + 2, regTmp);
-}
-
-void rtl8723a_set_sec_cfg(struct rtw_adapter *padapter, u8 sec)
-{
- rtl8723au_write8(padapter, REG_SECCFG, sec);
-}
-
-void rtl8723a_cam_empty_entry(struct rtw_adapter *padapter, u8 ucIndex)
-{
- u8 i;
- u32 ulCommand = 0;
- u32 ulContent = 0;
- u32 ulEncAlgo = CAM_AES;
-
- for (i = 0; i < CAM_CONTENT_COUNT; i++) {
- /* filled id in CAM config 2 byte */
- if (i == 0) {
- ulContent |= (ucIndex & 0x03) |
- ((u16) (ulEncAlgo) << 2);
- /* ulContent |= CAM_VALID; */
- } else {
- ulContent = 0;
- }
- /* polling bit, and No Write enable, and address */
- ulCommand = CAM_CONTENT_COUNT * ucIndex + i;
- ulCommand = ulCommand | CAM_POLLINIG | CAM_WRITE;
- /* write content 0 is equall to mark invalid */
- /* delay_ms(40); */
- rtl8723au_write32(padapter, WCAMI, ulContent);
- /* delay_ms(40); */
- rtl8723au_write32(padapter, REG_CAMCMD, ulCommand);
- }
-}
-
-void rtl8723a_cam_invalidate_all(struct rtw_adapter *padapter)
-{
- rtl8723au_write32(padapter, REG_CAMCMD, CAM_POLLINIG | BIT(30));
-}
-
-void rtl8723a_cam_write(struct rtw_adapter *padapter,
- u8 entry, u16 ctrl, const u8 *mac, const u8 *key)
-{
- u32 cmd;
- unsigned int i, val, addr;
- int j;
-
- addr = entry << 3;
-
- for (j = 5; j >= 0; j--) {
- switch (j) {
- case 0:
- val = ctrl | (mac[0] << 16) | (mac[1] << 24);
- break;
- case 1:
- val = mac[2] | (mac[3] << 8) |
- (mac[4] << 16) | (mac[5] << 24);
- break;
- default:
- i = (j - 2) << 2;
- val = key[i] | (key[i+1] << 8) |
- (key[i+2] << 16) | (key[i+3] << 24);
- break;
- }
-
- rtl8723au_write32(padapter, WCAMI, val);
- cmd = CAM_POLLINIG | CAM_WRITE | (addr + j);
- rtl8723au_write32(padapter, REG_CAMCMD, cmd);
-
- /* DBG_8723A("%s => cam write: %x, %x\n", __func__, cmd, val);*/
- }
-}
-
-void rtl8723a_fifo_cleanup(struct rtw_adapter *padapter)
-{
-#define RW_RELEASE_EN BIT(18)
-#define RXDMA_IDLE BIT(17)
-
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- u8 trycnt = 100;
-
- /* pause tx */
- rtl8723au_write8(padapter, REG_TXPAUSE, 0xff);
-
- /* keep sn */
- padapter->xmitpriv.nqos_ssn = rtl8723au_read8(padapter, REG_NQOS_SEQ);
-
- if (pwrpriv->bkeepfwalive != true) {
- u32 v32;
-
- /* RX DMA stop */
- v32 = rtl8723au_read32(padapter, REG_RXPKT_NUM);
- v32 |= RW_RELEASE_EN;
- rtl8723au_write32(padapter, REG_RXPKT_NUM, v32);
- do {
- v32 = rtl8723au_read32(padapter,
- REG_RXPKT_NUM) & RXDMA_IDLE;
- if (!v32)
- break;
- } while (trycnt--);
- if (trycnt == 0)
- DBG_8723A("Stop RX DMA failed......\n");
-
- /* RQPN Load 0 */
- rtl8723au_write16(padapter, REG_RQPN_NPQ, 0);
- rtl8723au_write32(padapter, REG_RQPN, 0x80000000);
- mdelay(10);
- }
-}
-
-void rtl8723a_bcn_valid(struct rtw_adapter *padapter)
-{
- /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2,
- write 1 to clear, Clear by sw */
- rtl8723au_write8(padapter, REG_TDECTRL + 2,
- rtl8723au_read8(padapter, REG_TDECTRL + 2) | BIT(0));
-}
-
-bool rtl8723a_get_bcn_valid(struct rtw_adapter *padapter)
-{
- bool retval;
-
- retval = (rtl8723au_read8(padapter, REG_TDECTRL + 2) & BIT(0)) ? true : false;
-
- return retval;
-}
-
-void rtl8723a_set_beacon_interval(struct rtw_adapter *padapter, u16 interval)
-{
- rtl8723au_write16(padapter, REG_BCN_INTERVAL, interval);
-}
-
-void rtl8723a_set_resp_sifs(struct rtw_adapter *padapter,
- u8 r2t1, u8 r2t2, u8 t2t1, u8 t2t2)
-{
- /* SIFS_Timer = 0x0a0a0808; */
- /* RESP_SIFS for CCK */
- /* SIFS_T2T_CCK (0x08) */
- rtl8723au_write8(padapter, REG_R2T_SIFS, r2t1);
- /* SIFS_R2T_CCK(0x08) */
- rtl8723au_write8(padapter, REG_R2T_SIFS + 1, r2t2);
- /* RESP_SIFS for OFDM */
- /* SIFS_T2T_OFDM (0x0a) */
- rtl8723au_write8(padapter, REG_T2T_SIFS, t2t1);
- /* SIFS_R2T_OFDM(0x0a) */
- rtl8723au_write8(padapter, REG_T2T_SIFS + 1, t2t2);
-}
-
-void rtl8723a_set_ac_param_vo(struct rtw_adapter *padapter, u32 vo)
-{
- rtl8723au_write32(padapter, REG_EDCA_VO_PARAM, vo);
-}
-
-void rtl8723a_set_ac_param_vi(struct rtw_adapter *padapter, u32 vi)
-{
- rtl8723au_write32(padapter, REG_EDCA_VI_PARAM, vi);
-}
-
-void rtl8723a_set_ac_param_be(struct rtw_adapter *padapter, u32 be)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- pHalData->AcParam_BE = be;
- rtl8723au_write32(padapter, REG_EDCA_BE_PARAM, be);
-}
-
-void rtl8723a_set_ac_param_bk(struct rtw_adapter *padapter, u32 bk)
-{
- rtl8723au_write32(padapter, REG_EDCA_BK_PARAM, bk);
-}
-
-void rtl8723a_set_rxdma_agg_pg_th(struct rtw_adapter *padapter, u8 val)
-{
- rtl8723au_write8(padapter, REG_RXDMA_AGG_PG_TH, val);
-}
-
-void rtl8723a_set_initial_gain(struct rtw_adapter *padapter, u32 rx_gain)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct dig_t *pDigTable = &pHalData->odmpriv.DM_DigTable;
-
- if (rx_gain == 0xff) /* restore rx gain */
- ODM_Write_DIG23a(&pHalData->odmpriv, pDigTable->BackupIGValue);
- else {
- pDigTable->BackupIGValue = pDigTable->CurIGValue;
- ODM_Write_DIG23a(&pHalData->odmpriv, rx_gain);
- }
-}
-
-void rtl8723a_odm_support_ability_restore(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- pHalData->odmpriv.SupportAbility = pHalData->odmpriv.BK_SupportAbility;
-}
-
-void rtl8723a_odm_support_ability_backup(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- pHalData->odmpriv.BK_SupportAbility = pHalData->odmpriv.SupportAbility;
-}
-
-void rtl8723a_odm_support_ability_set(struct rtw_adapter *padapter, u32 val)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (val == DYNAMIC_ALL_FUNC_ENABLE)
- pHalData->odmpriv.SupportAbility = pHalData->dmpriv.InitODMFlag;
- else
- pHalData->odmpriv.SupportAbility |= val;
-}
-
-void rtl8723a_odm_support_ability_clr(struct rtw_adapter *padapter, u32 val)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- pHalData->odmpriv.SupportAbility &= val;
-}
-
-void rtl8723a_set_rpwm(struct rtw_adapter *padapter, u8 val)
-{
- rtl8723au_write8(padapter, REG_USB_HRPWM, val);
-}
-
-u8 rtl8723a_get_rf_type(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- return pHalData->rf_type;
-}
-
-bool rtl8723a_get_fwlps_rf_on(struct rtw_adapter *padapter)
-{
- bool retval;
- u32 valRCR;
-
- /* When we halt NIC, we should check if FW LPS is leave. */
-
- if ((padapter->bSurpriseRemoved == true) ||
- (padapter->pwrctrlpriv.rf_pwrstate == rf_off)) {
- /* If it is in HW/SW Radio OFF or IPS state, we do
- not check Fw LPS Leave, because Fw is unload. */
- retval = true;
- } else {
- valRCR = rtl8723au_read32(padapter, REG_RCR);
- if (valRCR & 0x00070000)
- retval = false;
- else
- retval = true;
- }
-
- return retval;
-}
-
-bool rtl8723a_chk_hi_queue_empty(struct rtw_adapter *padapter)
-{
- u32 hgq;
-
- hgq = rtl8723au_read32(padapter, REG_HGQ_INFORMATION);
-
- return ((hgq & 0x0000ff00) == 0) ? true : false;
-}
diff --git a/drivers/staging/rtl8723au/hal/hal_intf.c b/drivers/staging/rtl8723au/hal/hal_intf.c
deleted file mode 100644
index 5383e692546f..000000000000
--- a/drivers/staging/rtl8723au/hal/hal_intf.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#define _HAL_INTF_C_
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#include <hal_intf.h>
-
-#include <rtl8723a_hal.h>
-
-void rtw_hal_update_ra_mask23a(struct sta_info *psta, u8 rssi_level)
-{
- struct rtw_adapter *padapter;
- struct mlme_priv *pmlmepriv;
-
- if (!psta)
- return;
-
- padapter = psta->padapter;
-
- pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
-#ifdef CONFIG_8723AU_AP_MODE
- add_RATid23a(padapter, psta, rssi_level);
-#endif
- } else
- rtl8723a_update_ramask(padapter, psta->mac_id, rssi_level);
-}
diff --git a/drivers/staging/rtl8723au/hal/odm.c b/drivers/staging/rtl8723au/hal/odm.c
deleted file mode 100644
index e279c34b3fc6..000000000000
--- a/drivers/staging/rtl8723au/hal/odm.c
+++ /dev/null
@@ -1,1732 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#include "odm_precomp.h"
-#include "usb_ops_linux.h"
-
-static const u16 dB_Invert_Table[8][12] = {
- {1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4},
- {4, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16},
- {18, 20, 22, 25, 28, 32, 35, 40, 45, 50, 56, 63},
- {71, 79, 89, 100, 112, 126, 141, 158, 178, 200, 224, 251},
- {282, 316, 355, 398, 447, 501, 562, 631, 708, 794, 891, 1000},
- {1122, 1259, 1413, 1585, 1778, 1995, 2239, 2512, 2818, 3162, 3548, 3981},
- {4467, 5012, 5623, 6310, 7079, 7943, 8913, 10000, 11220, 12589, 14125, 15849},
- {17783, 19953, 22387, 25119, 28184, 31623, 35481, 39811, 44668, 50119, 56234, 65535}
-};
-
-static u32 EDCAParam[HT_IOT_PEER_MAX][3] = { /* UL DL */
- {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 0:unknown AP */
- {0xa44f, 0x5ea44f, 0x5e431c}, /* 1:realtek AP */
- {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 2:unknown AP => realtek_92SE */
- {0x5ea32b, 0x5ea42b, 0x5e4322}, /* 3:broadcom AP */
- {0x5ea422, 0x00a44f, 0x00a44f}, /* 4:ralink AP */
- {0x5ea322, 0x00a630, 0x00a44f}, /* 5:atheros AP */
- {0x5e4322, 0x5e4322, 0x5e4322},/* 6:cisco AP */
- {0x5ea44f, 0x00a44f, 0x5ea42b}, /* 8:marvell AP */
- {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 10:unknown AP => 92U AP */
- {0x5ea42b, 0xa630, 0x5e431c}, /* 11:airgocap AP */
-};
-
-/* EDCA Parameter for AP/ADSL by Mingzhi 2011-11-22 */
-
-/* Global var */
-u32 OFDMSwingTable23A[OFDM_TABLE_SIZE_92D] = {
- 0x7f8001fe, /* 0, +6.0dB */
- 0x788001e2, /* 1, +5.5dB */
- 0x71c001c7, /* 2, +5.0dB */
- 0x6b8001ae, /* 3, +4.5dB */
- 0x65400195, /* 4, +4.0dB */
- 0x5fc0017f, /* 5, +3.5dB */
- 0x5a400169, /* 6, +3.0dB */
- 0x55400155, /* 7, +2.5dB */
- 0x50800142, /* 8, +2.0dB */
- 0x4c000130, /* 9, +1.5dB */
- 0x47c0011f, /* 10, +1.0dB */
- 0x43c0010f, /* 11, +0.5dB */
- 0x40000100, /* 12, +0dB */
- 0x3c8000f2, /* 13, -0.5dB */
- 0x390000e4, /* 14, -1.0dB */
- 0x35c000d7, /* 15, -1.5dB */
- 0x32c000cb, /* 16, -2.0dB */
- 0x300000c0, /* 17, -2.5dB */
- 0x2d4000b5, /* 18, -3.0dB */
- 0x2ac000ab, /* 19, -3.5dB */
- 0x288000a2, /* 20, -4.0dB */
- 0x26000098, /* 21, -4.5dB */
- 0x24000090, /* 22, -5.0dB */
- 0x22000088, /* 23, -5.5dB */
- 0x20000080, /* 24, -6.0dB */
- 0x1e400079, /* 25, -6.5dB */
- 0x1c800072, /* 26, -7.0dB */
- 0x1b00006c, /* 27. -7.5dB */
- 0x19800066, /* 28, -8.0dB */
- 0x18000060, /* 29, -8.5dB */
- 0x16c0005b, /* 30, -9.0dB */
- 0x15800056, /* 31, -9.5dB */
- 0x14400051, /* 32, -10.0dB */
- 0x1300004c, /* 33, -10.5dB */
- 0x12000048, /* 34, -11.0dB */
- 0x11000044, /* 35, -11.5dB */
- 0x10000040, /* 36, -12.0dB */
- 0x0f00003c,/* 37, -12.5dB */
- 0x0e400039,/* 38, -13.0dB */
- 0x0d800036,/* 39, -13.5dB */
- 0x0cc00033,/* 40, -14.0dB */
- 0x0c000030,/* 41, -14.5dB */
- 0x0b40002d,/* 42, -15.0dB */
-};
-
-u8 CCKSwingTable_Ch1_Ch1323A[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
-};
-
-u8 CCKSwingTable_Ch1423A[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
-};
-
-/* Local Function predefine. */
-
-/* START------------COMMON INFO RELATED--------------- */
-void odm_CommonInfoSelfInit23a(struct dm_odm_t *pDM_Odm);
-
-static void odm_CommonInfoSelfUpdate(struct hal_data_8723a *pHalData);
-
-void odm_CmnInfoInit_Debug23a(struct dm_odm_t *pDM_Odm);
-
-void odm_CmnInfoUpdate_Debug23a(struct dm_odm_t *pDM_Odm);
-
-/* START---------------DIG--------------------------- */
-void odm_FalseAlarmCounterStatistics23a(struct dm_odm_t *pDM_Odm);
-
-void odm_DIG23aInit(struct dm_odm_t *pDM_Odm);
-
-void odm_DIG23a(struct rtw_adapter *adapter);
-
-void odm_CCKPacketDetectionThresh23a(struct dm_odm_t *pDM_Odm);
-/* END---------------DIG--------------------------- */
-
-/* START-------BB POWER SAVE----------------------- */
-void odm23a_DynBBPSInit(struct dm_odm_t *pDM_Odm);
-
-
-/* END---------BB POWER SAVE----------------------- */
-
-void odm_DynamicTxPower23aInit(struct dm_odm_t *pDM_Odm);
-
-static void odm_RSSIMonitorCheck(struct dm_odm_t *pDM_Odm);
-void odm_DynamicTxPower23a(struct dm_odm_t *pDM_Odm);
-
-static void odm_RefreshRateAdaptiveMask(struct dm_odm_t *pDM_Odm);
-
-void odm_RateAdaptiveMaskInit23a(struct dm_odm_t *pDM_Odm);
-
-static void odm_TXPowerTrackingInit(struct dm_odm_t *pDM_Odm);
-
-static void odm_EdcaTurboCheck23a(struct dm_odm_t *pDM_Odm);
-static void ODM_EdcaTurboInit23a(struct dm_odm_t *pDM_Odm);
-
-#define RxDefaultAnt1 0x65a9
-#define RxDefaultAnt2 0x569a
-
-bool odm_StaDefAntSel(struct dm_odm_t *pDM_Odm,
- u32 OFDM_Ant1_Cnt,
- u32 OFDM_Ant2_Cnt,
- u32 CCK_Ant1_Cnt,
- u32 CCK_Ant2_Cnt,
- u8 *pDefAnt
- );
-
-void odm_SetRxIdleAnt(struct dm_odm_t *pDM_Odm,
- u8 Ant,
- bool bDualPath
-);
-
-/* 3 Export Interface */
-
-/* 2011/09/21 MH Add to describe different team necessary resource allocate?? */
-void ODM23a_DMInit(struct dm_odm_t *pDM_Odm)
-{
- /* For all IC series */
- odm_CommonInfoSelfInit23a(pDM_Odm);
- odm_CmnInfoInit_Debug23a(pDM_Odm);
- odm_DIG23aInit(pDM_Odm);
- odm_RateAdaptiveMaskInit23a(pDM_Odm);
-
- odm23a_DynBBPSInit(pDM_Odm);
- odm_DynamicTxPower23aInit(pDM_Odm);
- odm_TXPowerTrackingInit(pDM_Odm);
- ODM_EdcaTurboInit23a(pDM_Odm);
-}
-
-/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
-/* You can not add any dummy function here, be care, you can only use DM structure */
-/* to perform any new ODM_DM. */
-void ODM_DMWatchdog23a(struct rtw_adapter *adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(adapter);
- struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- struct pwrctrl_priv *pwrctrlpriv = &adapter->pwrctrlpriv;
-
- /* 2012.05.03 Luke: For all IC series */
- odm_CmnInfoUpdate_Debug23a(pDM_Odm);
- odm_CommonInfoSelfUpdate(pHalData);
- odm_FalseAlarmCounterStatistics23a(pDM_Odm);
- odm_RSSIMonitorCheck(pDM_Odm);
-
- /* 8723A or 8189ES platform */
- /* NeilChen--2012--08--24-- */
- /* Fix Leave LPS issue */
- if ((pDM_Odm->Adapter->pwrctrlpriv.pwr_mode != PS_MODE_ACTIVE) &&/* in LPS mode */
- (pDM_Odm->SupportICType & ODM_RTL8723A)) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("----Step1: odm_DIG23a is in LPS mode\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("---Step2: 8723AS is in LPS mode\n"));
- odm_DIG23abyRSSI_LPS(pDM_Odm);
- } else {
- odm_DIG23a(adapter);
- }
-
- odm_CCKPacketDetectionThresh23a(pDM_Odm);
-
- if (pwrctrlpriv->bpower_saving)
- return;
-
- odm_RefreshRateAdaptiveMask(pDM_Odm);
-
-
- odm_EdcaTurboCheck23a(pDM_Odm);
-}
-
-/* */
-/* Init /.. Fixed HW value. Only init time. */
-/* */
-void ODM_CmnInfoInit23a(struct dm_odm_t *pDM_Odm,
- enum odm_cmninfo CmnInfo,
- u32 Value
- )
-{
- /* ODM_RT_TRACE(pDM_Odm,); */
-
- /* */
- /* This section is used for init value */
- /* */
- switch (CmnInfo) {
- /* Fixed ODM value. */
- case ODM_CMNINFO_MP_TEST_CHIP:
- pDM_Odm->bIsMPChip = (u8)Value;
- break;
- case ODM_CMNINFO_IC_TYPE:
- pDM_Odm->SupportICType = Value;
- break;
- case ODM_CMNINFO_CUT_VER:
- pDM_Odm->CutVersion = (u8)Value;
- break;
- case ODM_CMNINFO_FAB_VER:
- pDM_Odm->FabVersion = (u8)Value;
- break;
- case ODM_CMNINFO_BOARD_TYPE:
- pDM_Odm->BoardType = (u8)Value;
- break;
- case ODM_CMNINFO_EXT_LNA:
- pDM_Odm->ExtLNA = (u8)Value;
- break;
- case ODM_CMNINFO_EXT_PA:
- pDM_Odm->ExtPA = (u8)Value;
- break;
- case ODM_CMNINFO_EXT_TRSW:
- pDM_Odm->ExtTRSW = (u8)Value;
- break;
- case ODM_CMNINFO_BINHCT_TEST:
- pDM_Odm->bInHctTest = (bool)Value;
- break;
- case ODM_CMNINFO_BWIFI_TEST:
- pDM_Odm->bWIFITest = (bool)Value;
- break;
- case ODM_CMNINFO_SMART_CONCURRENT:
- pDM_Odm->bDualMacSmartConcurrent = (bool)Value;
- break;
- /* To remove the compiler warning, must add an empty default statement to handle the other values. */
- default:
- /* do nothing */
- break;
- }
-}
-
-void ODM_CmnInfoPtrArrayHook23a(struct dm_odm_t *pDM_Odm, enum odm_cmninfo CmnInfo,
- u16 Index, void *pValue)
-{
- /* Hook call by reference pointer. */
- switch (CmnInfo) {
- /* Dynamic call by reference pointer. */
- case ODM_CMNINFO_STA_STATUS:
- pDM_Odm->pODM_StaInfo[Index] = (struct sta_info *)pValue;
- break;
- /* To remove the compiler warning, must add an empty default statement to handle the other values. */
- default:
- /* do nothing */
- break;
- }
-}
-
-/* Update Band/CHannel/.. The values are dynamic but non-per-packet. */
-void ODM_CmnInfoUpdate23a(struct dm_odm_t *pDM_Odm, u32 CmnInfo, u64 Value)
-{
- /* This init variable may be changed in run time. */
- switch (CmnInfo) {
- case ODM_CMNINFO_WIFI_DIRECT:
- pDM_Odm->bWIFI_Direct = (bool)Value;
- break;
- case ODM_CMNINFO_WIFI_DISPLAY:
- pDM_Odm->bWIFI_Display = (bool)Value;
- break;
- case ODM_CMNINFO_LINK:
- pDM_Odm->bLinked = (bool)Value;
- break;
- case ODM_CMNINFO_RSSI_MIN:
- pDM_Odm->RSSI_Min = (u8)Value;
- break;
- case ODM_CMNINFO_DBG_COMP:
- pDM_Odm->DebugComponents = Value;
- break;
- case ODM_CMNINFO_DBG_LEVEL:
- pDM_Odm->DebugLevel = (u32)Value;
- break;
- case ODM_CMNINFO_RA_THRESHOLD_HIGH:
- pDM_Odm->RateAdaptive.HighRSSIThresh = (u8)Value;
- break;
- case ODM_CMNINFO_RA_THRESHOLD_LOW:
- pDM_Odm->RateAdaptive.LowRSSIThresh = (u8)Value;
- break;
- }
-
-}
-
-void odm_CommonInfoSelfInit23a(struct dm_odm_t *pDM_Odm)
-{
- u32 val32;
-
- val32 = rtl8723au_read32(pDM_Odm->Adapter, rFPGA0_XA_HSSIParameter2);
- if (val32 & BIT(9))
- pDM_Odm->bCckHighPower = true;
- else
- pDM_Odm->bCckHighPower = false;
-
- pDM_Odm->RFPathRxEnable =
- rtl8723au_read32(pDM_Odm->Adapter, rOFDM0_TRxPathEnable) & 0x0F;
-
- ODM_InitDebugSetting23a(pDM_Odm);
-}
-
-static void odm_CommonInfoSelfUpdate(struct hal_data_8723a *pHalData)
-{
- struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- struct sta_info *pEntry;
- u8 EntryCnt = 0;
- u8 i;
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- pEntry = pDM_Odm->pODM_StaInfo[i];
- if (pEntry)
- EntryCnt++;
- }
- if (EntryCnt == 1)
- pDM_Odm->bOneEntryOnly = true;
- else
- pDM_Odm->bOneEntryOnly = false;
-}
-
-void odm_CmnInfoInit_Debug23a(struct dm_odm_t *pDM_Odm)
-{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoInit_Debug23a ==>\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportAbility = 0x%x\n", pDM_Odm->SupportAbility));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportICType = 0x%x\n", pDM_Odm->SupportICType));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("CutVersion =%d\n", pDM_Odm->CutVersion));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("FabVersion =%d\n", pDM_Odm->FabVersion));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("BoardType =%d\n", pDM_Odm->BoardType));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtLNA =%d\n", pDM_Odm->ExtLNA));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtPA =%d\n", pDM_Odm->ExtPA));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtTRSW =%d\n", pDM_Odm->ExtTRSW));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bInHctTest =%d\n", pDM_Odm->bInHctTest));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFITest =%d\n", pDM_Odm->bWIFITest));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bDualMacSmartConcurrent =%d\n", pDM_Odm->bDualMacSmartConcurrent));
-
-}
-
-void odm_CmnInfoUpdate_Debug23a(struct dm_odm_t *pDM_Odm)
-{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoUpdate_Debug23a ==>\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Direct =%d\n", pDM_Odm->bWIFI_Direct));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Display =%d\n", pDM_Odm->bWIFI_Display));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bLinked =%d\n", pDM_Odm->bLinked));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RSSI_Min =%d\n", pDM_Odm->RSSI_Min));
-}
-
-void ODM_Write_DIG23a(struct dm_odm_t *pDM_Odm, u8 CurrentIGI)
-{
- struct rtw_adapter *adapter = pDM_Odm->Adapter;
- struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
- u32 val32;
-
- if (pDM_DigTable->CurIGValue != CurrentIGI) {
- val32 = rtl8723au_read32(adapter, ODM_REG_IGI_A_11N);
- val32 &= ~ODM_BIT_IGI_11N;
- val32 |= CurrentIGI;
- rtl8723au_write32(adapter, ODM_REG_IGI_A_11N, val32);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("CurrentIGI(0x%02x). \n", CurrentIGI));
- pDM_DigTable->CurIGValue = CurrentIGI;
- }
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("ODM_Write_DIG23a():CurrentIGI = 0x%x \n", CurrentIGI));
-}
-
-/* Need LPS mode for CE platform --2012--08--24--- */
-/* 8723AS/8189ES */
-void odm_DIG23abyRSSI_LPS(struct dm_odm_t *pDM_Odm)
-{
- struct rtw_adapter *pAdapter = pDM_Odm->Adapter;
- struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
- u8 RSSI_Lower = DM_DIG_MIN_NIC; /* 0x1E or 0x1C */
- u8 bFwCurrentInPSMode = false;
- u8 CurrentIGI = pDM_Odm->RSSI_Min;
-
- if (!(pDM_Odm->SupportICType & ODM_RTL8723A))
- return;
-
- CurrentIGI = CurrentIGI+RSSI_OFFSET_DIG;
- bFwCurrentInPSMode = pAdapter->pwrctrlpriv.bFwCurrentInPSMode;
-
- /* Using FW PS mode to make IGI */
- if (bFwCurrentInPSMode) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("---Neil---odm_DIG23a is in LPS mode\n"));
- /* Adjust by FA in LPS MODE */
- if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_LPS)
- CurrentIGI = CurrentIGI+2;
- else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1_LPS)
- CurrentIGI = CurrentIGI+1;
- else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0_LPS)
- CurrentIGI = CurrentIGI-1;
- } else {
- CurrentIGI = RSSI_Lower;
- }
-
- /* Lower bound checking */
-
- /* RSSI Lower bound check */
- if ((pDM_Odm->RSSI_Min-10) > DM_DIG_MIN_NIC)
- RSSI_Lower = (pDM_Odm->RSSI_Min-10);
- else
- RSSI_Lower = DM_DIG_MIN_NIC;
-
- /* Upper and Lower Bound checking */
- if (CurrentIGI > DM_DIG_MAX_NIC)
- CurrentIGI = DM_DIG_MAX_NIC;
- else if (CurrentIGI < RSSI_Lower)
- CurrentIGI = RSSI_Lower;
-
- ODM_Write_DIG23a(pDM_Odm, CurrentIGI);
-}
-
-void odm_DIG23aInit(struct dm_odm_t *pDM_Odm)
-{
- struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
- u32 val32;
-
- val32 = rtl8723au_read32(pDM_Odm->Adapter, ODM_REG_IGI_A_11N);
- pDM_DigTable->CurIGValue = val32 & ODM_BIT_IGI_11N;
-
- pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
- pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
- pDM_DigTable->FALowThresh = DM_FALSEALARM_THRESH_LOW;
- pDM_DigTable->FAHighThresh = DM_FALSEALARM_THRESH_HIGH;
- if (pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) {
- pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
- pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
- } else {
- pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
- pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
- }
- pDM_DigTable->BackoffVal = DM_DIG_BACKOFF_DEFAULT;
- pDM_DigTable->BackoffVal_range_max = DM_DIG_BACKOFF_MAX;
- pDM_DigTable->BackoffVal_range_min = DM_DIG_BACKOFF_MIN;
- pDM_DigTable->PreCCK_CCAThres = 0xFF;
- pDM_DigTable->CurCCK_CCAThres = 0x83;
- pDM_DigTable->ForbiddenIGI = DM_DIG_MIN_NIC;
- pDM_DigTable->LargeFAHit = 0;
- pDM_DigTable->Recover_cnt = 0;
- pDM_DigTable->DIG_Dynamic_MIN_0 = DM_DIG_MIN_NIC;
- pDM_DigTable->DIG_Dynamic_MIN_1 = DM_DIG_MIN_NIC;
- pDM_DigTable->bMediaConnect_0 = false;
- pDM_DigTable->bMediaConnect_1 = false;
-}
-
-void odm_DIG23a(struct rtw_adapter *adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(adapter);
- struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
- struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
- u8 DIG_Dynamic_MIN;
- u8 DIG_MaxOfMin;
- bool FirstConnect, FirstDisConnect;
- u8 dm_dig_max, dm_dig_min;
- u8 CurrentIGI = pDM_DigTable->CurIGValue;
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG23a() ==>\n"));
- if (adapter->mlmepriv.bScanInProcess) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG23a() Return: In Scan Progress \n"));
- return;
- }
-
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
- FirstDisConnect = (!pDM_Odm->bLinked) &&
- (pDM_DigTable->bMediaConnect_0);
-
- /* 1 Boundary Decision */
- if ((pDM_Odm->SupportICType & ODM_RTL8723A) &&
- (pDM_Odm->BoardType == ODM_BOARD_HIGHPWR || pDM_Odm->ExtLNA)) {
- dm_dig_max = DM_DIG_MAX_NIC_HP;
- dm_dig_min = DM_DIG_MIN_NIC_HP;
- DIG_MaxOfMin = DM_DIG_MAX_AP_HP;
- } else {
- dm_dig_max = DM_DIG_MAX_NIC;
- dm_dig_min = DM_DIG_MIN_NIC;
- DIG_MaxOfMin = DM_DIG_MAX_AP;
- }
-
- if (pDM_Odm->bLinked) {
- /* 2 8723A Series, offset need to be 10 */
- if (pDM_Odm->SupportICType == ODM_RTL8723A) {
- /* 2 Upper Bound */
- if ((pDM_Odm->RSSI_Min + 10) > DM_DIG_MAX_NIC)
- pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
- else if ((pDM_Odm->RSSI_Min + 10) < DM_DIG_MIN_NIC)
- pDM_DigTable->rx_gain_range_max = DM_DIG_MIN_NIC;
- else
- pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 10;
-
- /* 2 If BT is Concurrent, need to set Lower Bound */
- DIG_Dynamic_MIN = DM_DIG_MIN_NIC;
- } else {
- /* 2 Modify DIG upper bound */
- if ((pDM_Odm->RSSI_Min + 20) > dm_dig_max)
- pDM_DigTable->rx_gain_range_max = dm_dig_max;
- else if ((pDM_Odm->RSSI_Min + 20) < dm_dig_min)
- pDM_DigTable->rx_gain_range_max = dm_dig_min;
- else
- pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 20;
-
- /* 2 Modify DIG lower bound */
- if (pDM_Odm->bOneEntryOnly) {
- if (pDM_Odm->RSSI_Min < dm_dig_min)
- DIG_Dynamic_MIN = dm_dig_min;
- else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
- DIG_Dynamic_MIN = DIG_MaxOfMin;
- else
- DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG23a() : bOneEntryOnly = true, DIG_Dynamic_MIN = 0x%x\n",
- DIG_Dynamic_MIN));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG23a() : pDM_Odm->RSSI_Min =%d\n",
- pDM_Odm->RSSI_Min));
- } else {
- DIG_Dynamic_MIN = dm_dig_min;
- }
- }
- } else {
- pDM_DigTable->rx_gain_range_max = dm_dig_max;
- DIG_Dynamic_MIN = dm_dig_min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a() : No Link\n"));
- }
-
- /* 1 Modify DIG lower bound, deal with abnormally large false alarm */
- if (pFalseAlmCnt->Cnt_all > 10000) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("dm_DIG(): Abnornally false alarm case. \n"));
-
- if (pDM_DigTable->LargeFAHit != 3)
- pDM_DigTable->LargeFAHit++;
- if (pDM_DigTable->ForbiddenIGI < CurrentIGI) {
- pDM_DigTable->ForbiddenIGI = CurrentIGI;
- pDM_DigTable->LargeFAHit = 1;
- }
-
- if (pDM_DigTable->LargeFAHit >= 3) {
- if ((pDM_DigTable->ForbiddenIGI+1) > pDM_DigTable->rx_gain_range_max)
- pDM_DigTable->rx_gain_range_min = pDM_DigTable->rx_gain_range_max;
- else
- pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
- pDM_DigTable->Recover_cnt = 3600; /* 3600 = 2hr */
- }
- } else {
- /* Recovery mechanism for IGI lower bound */
- if (pDM_DigTable->Recover_cnt != 0) {
- pDM_DigTable->Recover_cnt--;
- } else {
- if (pDM_DigTable->LargeFAHit < 3) {
- if ((pDM_DigTable->ForbiddenIGI - 1) < DIG_Dynamic_MIN) {
- pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
- pDM_DigTable->rx_gain_range_min = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG23a(): Normal Case: At Lower Bound\n"));
- } else {
- pDM_DigTable->ForbiddenIGI--;
- pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG23a(): Normal Case: Approach Lower Bound\n"));
- }
- } else {
- pDM_DigTable->LargeFAHit = 0;
- }
- }
- }
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): pDM_DigTable->LargeFAHit =%d\n", pDM_DigTable->LargeFAHit));
-
- /* 1 Adjust initial gain by false alarm */
- if (pDM_Odm->bLinked) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): DIG AfterLink\n"));
- if (FirstConnect) {
- CurrentIGI = pDM_Odm->RSSI_Min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("DIG: First Connect\n"));
- } else {
- if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
- CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
- else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
- else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
- CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue-1; */
- }
- } else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): DIG BeforeLink\n"));
- if (FirstDisConnect) {
- CurrentIGI = pDM_DigTable->rx_gain_range_min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): First DisConnect \n"));
- } else {
- /* 2012.03.30 LukeLee: enable DIG before link but with very high thresholds */
- if (pFalseAlmCnt->Cnt_all > 10000)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
- else if (pFalseAlmCnt->Cnt_all > 8000)
- CurrentIGI = CurrentIGI + 1;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
- else if (pFalseAlmCnt->Cnt_all < 500)
- CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue-1; */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): England DIG \n"));
- }
- }
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): DIG End Adjust IGI\n"));
- /* 1 Check initial gain by upper/lower bound */
- if (CurrentIGI > pDM_DigTable->rx_gain_range_max)
- CurrentIGI = pDM_DigTable->rx_gain_range_max;
- if (CurrentIGI < pDM_DigTable->rx_gain_range_min)
- CurrentIGI = pDM_DigTable->rx_gain_range_min;
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): rx_gain_range_max = 0x%x, rx_gain_range_min = 0x%x\n",
- pDM_DigTable->rx_gain_range_max, pDM_DigTable->rx_gain_range_min));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): TotalFA =%d\n", pFalseAlmCnt->Cnt_all));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG23a(): CurIGValue = 0x%x\n", CurrentIGI));
-
- /* 2 High power RSSI threshold */
-
- ODM_Write_DIG23a(pDM_Odm, CurrentIGI);/* ODM_Write_DIG23a(pDM_Odm, pDM_DigTable->CurIGValue); */
- pDM_DigTable->bMediaConnect_0 = pDM_Odm->bLinked;
- pDM_DigTable->DIG_Dynamic_MIN_0 = DIG_Dynamic_MIN;
-}
-
-/* 3 ============================================================ */
-/* 3 FASLE ALARM CHECK */
-/* 3 ============================================================ */
-
-void odm_FalseAlarmCounterStatistics23a(struct dm_odm_t *pDM_Odm)
-{
- struct rtw_adapter *adapter = pDM_Odm->Adapter;
- struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
- u32 ret_value, val32;
-
- /* hold ofdm counter */
- /* hold page C counter */
- val32 = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_HOLDC_11N);
- val32 |= BIT(31);
- rtl8723au_write32(adapter, ODM_REG_OFDM_FA_HOLDC_11N, val32);
- /* hold page D counter */
- val32 = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_RSTD_11N);
- val32 |= BIT(31);
- rtl8723au_write32(adapter, ODM_REG_OFDM_FA_RSTD_11N, val32);
- ret_value = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_TYPE1_11N);
- FalseAlmCnt->Cnt_Fast_Fsync = (ret_value&0xffff);
- FalseAlmCnt->Cnt_SB_Search_fail = (ret_value & 0xffff0000)>>16;
- ret_value = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_TYPE2_11N);
- FalseAlmCnt->Cnt_OFDM_CCA = (ret_value&0xffff);
- FalseAlmCnt->Cnt_Parity_Fail = (ret_value & 0xffff0000)>>16;
- ret_value = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_TYPE3_11N);
- FalseAlmCnt->Cnt_Rate_Illegal = (ret_value&0xffff);
- FalseAlmCnt->Cnt_Crc8_fail = (ret_value & 0xffff0000)>>16;
- ret_value = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_TYPE4_11N);
- FalseAlmCnt->Cnt_Mcs_fail = (ret_value&0xffff);
-
- FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail +
- FalseAlmCnt->Cnt_Rate_Illegal +
- FalseAlmCnt->Cnt_Crc8_fail +
- FalseAlmCnt->Cnt_Mcs_fail +
- FalseAlmCnt->Cnt_Fast_Fsync +
- FalseAlmCnt->Cnt_SB_Search_fail;
- /* hold cck counter */
- val32 = rtl8723au_read32(adapter, ODM_REG_CCK_FA_RST_11N);
- val32 |= (BIT(12) | BIT(14));
- rtl8723au_write32(adapter, ODM_REG_CCK_FA_RST_11N, val32);
-
- ret_value = rtl8723au_read32(adapter, ODM_REG_CCK_FA_LSB_11N) & 0xff;
- FalseAlmCnt->Cnt_Cck_fail = ret_value;
- ret_value = rtl8723au_read32(adapter, ODM_REG_CCK_FA_MSB_11N) >> 16;
- FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff00);
-
- ret_value = rtl8723au_read32(adapter, ODM_REG_CCK_CCA_CNT_11N);
- FalseAlmCnt->Cnt_CCK_CCA =
- ((ret_value&0xFF)<<8) | ((ret_value&0xFF00)>>8);
-
- FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
- FalseAlmCnt->Cnt_SB_Search_fail +
- FalseAlmCnt->Cnt_Parity_Fail +
- FalseAlmCnt->Cnt_Rate_Illegal +
- FalseAlmCnt->Cnt_Crc8_fail +
- FalseAlmCnt->Cnt_Mcs_fail +
- FalseAlmCnt->Cnt_Cck_fail);
-
- FalseAlmCnt->Cnt_CCA_all =
- FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
-
- if (pDM_Odm->SupportICType >= ODM_RTL8723A) {
- /* reset false alarm counter registers */
- val32 = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_RSTC_11N);
- val32 |= BIT(31);
- rtl8723au_write32(adapter, ODM_REG_OFDM_FA_RSTC_11N, val32);
- val32 = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_RSTC_11N);
- val32 &= ~BIT(31);
- rtl8723au_write32(adapter, ODM_REG_OFDM_FA_RSTC_11N, val32);
-
- val32 = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_RSTD_11N);
- val32 |= BIT(27);
- rtl8723au_write32(adapter, ODM_REG_OFDM_FA_RSTD_11N, val32);
- val32 = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_RSTD_11N);
- val32 &= ~BIT(27);
- rtl8723au_write32(adapter, ODM_REG_OFDM_FA_RSTD_11N, val32);
-
- /* update ofdm counter */
- /* update page C counter */
- val32 = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_HOLDC_11N);
- val32 &= ~BIT(31);
- rtl8723au_write32(adapter, ODM_REG_OFDM_FA_HOLDC_11N, val32);
-
- /* update page D counter */
- val32 = rtl8723au_read32(adapter, ODM_REG_OFDM_FA_RSTD_11N);
- val32 &= ~BIT(31);
- rtl8723au_write32(adapter, ODM_REG_OFDM_FA_RSTD_11N, val32);
-
- /* reset CCK CCA counter */
- val32 = rtl8723au_read32(adapter, ODM_REG_CCK_FA_RST_11N);
- val32 &= ~(BIT(12) | BIT(13) | BIT(14) | BIT(15));
- rtl8723au_write32(adapter, ODM_REG_CCK_FA_RST_11N, val32);
-
- val32 = rtl8723au_read32(adapter, ODM_REG_CCK_FA_RST_11N);
- val32 |= (BIT(13) | BIT(15));
- rtl8723au_write32(adapter, ODM_REG_CCK_FA_RST_11N, val32);
- }
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Enter odm_FalseAlarmCounterStatistics23a\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Fast_Fsync =%d, Cnt_SB_Search_fail =%d\n",
- FalseAlmCnt->Cnt_Fast_Fsync,
- FalseAlmCnt->Cnt_SB_Search_fail));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Parity_Fail =%d, Cnt_Rate_Illegal =%d\n",
- FalseAlmCnt->Cnt_Parity_Fail,
- FalseAlmCnt->Cnt_Rate_Illegal));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Crc8_fail =%d, Cnt_Mcs_fail =%d\n",
- FalseAlmCnt->Cnt_Crc8_fail, FalseAlmCnt->Cnt_Mcs_fail));
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Cck_fail =%d\n", FalseAlmCnt->Cnt_Cck_fail));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Cnt_Ofdm_fail =%d\n", FalseAlmCnt->Cnt_Ofdm_fail));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
- ("Total False Alarm =%d\n", FalseAlmCnt->Cnt_all));
-}
-
-/* 3 ============================================================ */
-/* 3 CCK Packet Detect Threshold */
-/* 3 ============================================================ */
-
-void odm_CCKPacketDetectionThresh23a(struct dm_odm_t *pDM_Odm)
-{
- struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
- u8 CurCCK_CCAThres;
-
- if (pDM_Odm->ExtLNA)
- return;
-
- if (pDM_Odm->bLinked) {
- if (pDM_Odm->RSSI_Min > 25) {
- CurCCK_CCAThres = 0xcd;
- } else if (pDM_Odm->RSSI_Min <= 25 && pDM_Odm->RSSI_Min > 10) {
- CurCCK_CCAThres = 0x83;
- } else {
- if (FalseAlmCnt->Cnt_Cck_fail > 1000)
- CurCCK_CCAThres = 0x83;
- else
- CurCCK_CCAThres = 0x40;
- }
- } else {
- if (FalseAlmCnt->Cnt_Cck_fail > 1000)
- CurCCK_CCAThres = 0x83;
- else
- CurCCK_CCAThres = 0x40;
- }
-
- ODM_Write_CCK_CCA_Thres23a(pDM_Odm, CurCCK_CCAThres);
-}
-
-void ODM_Write_CCK_CCA_Thres23a(struct dm_odm_t *pDM_Odm, u8 CurCCK_CCAThres)
-{
- struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
-
- if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres)
- rtl8723au_write8(pDM_Odm->Adapter, ODM_REG(CCK_CCA, pDM_Odm),
- CurCCK_CCAThres);
- pDM_DigTable->PreCCK_CCAThres = pDM_DigTable->CurCCK_CCAThres;
- pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
-}
-
-/* 3 ============================================================ */
-/* 3 BB Power Save */
-/* 3 ============================================================ */
-void odm23a_DynBBPSInit(struct dm_odm_t *pDM_Odm)
-{
- struct dynamic_pwr_sav *pDM_PSTable = &pDM_Odm->DM_PSTable;
-
- pDM_PSTable->PreCCAState = CCA_MAX;
- pDM_PSTable->CurCCAState = CCA_MAX;
- pDM_PSTable->PreRFState = RF_MAX;
- pDM_PSTable->CurRFState = RF_MAX;
- pDM_PSTable->Rssi_val_min = 0;
- pDM_PSTable->initialize = 0;
-}
-
-
-void ODM_RF_Saving23a(struct dm_odm_t *pDM_Odm, u8 bForceInNormal)
-{
- struct dynamic_pwr_sav *pDM_PSTable = &pDM_Odm->DM_PSTable;
- struct rtw_adapter *adapter = pDM_Odm->Adapter;
- u32 val32;
- u8 Rssi_Up_bound = 30;
- u8 Rssi_Low_bound = 25;
- if (pDM_PSTable->initialize == 0) {
-
- pDM_PSTable->Reg874 =
- rtl8723au_read32(adapter, 0x874) & 0x1CC000;
- pDM_PSTable->RegC70 =
- rtl8723au_read32(adapter, 0xc70) & BIT(3);
- pDM_PSTable->Reg85C =
- rtl8723au_read32(adapter, 0x85c) & 0xFF000000;
- pDM_PSTable->RegA74 = rtl8723au_read32(adapter, 0xa74) & 0xF000;
- pDM_PSTable->initialize = 1;
- }
-
- if (!bForceInNormal) {
- if (pDM_Odm->RSSI_Min != 0xFF) {
- if (pDM_PSTable->PreRFState == RF_Normal) {
- if (pDM_Odm->RSSI_Min >= Rssi_Up_bound)
- pDM_PSTable->CurRFState = RF_Save;
- else
- pDM_PSTable->CurRFState = RF_Normal;
- } else {
- if (pDM_Odm->RSSI_Min <= Rssi_Low_bound)
- pDM_PSTable->CurRFState = RF_Normal;
- else
- pDM_PSTable->CurRFState = RF_Save;
- }
- } else {
- pDM_PSTable->CurRFState = RF_MAX;
- }
- } else {
- pDM_PSTable->CurRFState = RF_Normal;
- }
-
- if (pDM_PSTable->PreRFState != pDM_PSTable->CurRFState) {
- if (pDM_PSTable->CurRFState == RF_Save) {
- /* <tynli_note> 8723 RSSI report will be wrong.
- * Set 0x874[5]= 1 when enter BB power saving mode. */
- /* Suggested by SD3 Yu-Nan. 2011.01.20. */
- /* Reg874[5]= 1b'1 */
- if (pDM_Odm->SupportICType == ODM_RTL8723A) {
- val32 = rtl8723au_read32(adapter, 0x874);
- val32 |= BIT(5);
- rtl8723au_write32(adapter, 0x874, val32);
- }
- /* Reg874[20:18]= 3'b010 */
- val32 = rtl8723au_read32(adapter, 0x874);
- val32 &= ~(BIT(18) | BIT(20));
- val32 |= BIT(19);
- rtl8723au_write32(adapter, 0x874, val32);
- /* RegC70[3]= 1'b0 */
- val32 = rtl8723au_read32(adapter, 0xc70);
- val32 &= ~BIT(3);
- rtl8723au_write32(adapter, 0xc70, val32);
- /* Reg85C[31:24]= 0x63 */
- val32 = rtl8723au_read32(adapter, 0x85c);
- val32 &= 0x00ffffff;
- val32 |= 0x63000000;
- rtl8723au_write32(adapter, 0x85c, val32);
- /* Reg874[15:14]= 2'b10 */
- val32 = rtl8723au_read32(adapter, 0x874);
- val32 &= ~BIT(14);
- val32 |= BIT(15);
- rtl8723au_write32(adapter, 0x874, val32);
- /* RegA75[7:4]= 0x3 */
- val32 = rtl8723au_read32(adapter, 0xa74);
- val32 &= ~(BIT(14) | BIT(15));
- val32 |= (BIT(12) | BIT(13));
- rtl8723au_write32(adapter, 0xa74, val32);
- /* Reg818[28]= 1'b0 */
- val32 = rtl8723au_read32(adapter, 0x818);
- val32 &= ~BIT(28);
- rtl8723au_write32(adapter, 0x818, val32);
- /* Reg818[28]= 1'b1 */
- val32 = rtl8723au_read32(adapter, 0x818);
- val32 |= BIT(28);
- rtl8723au_write32(adapter, 0x818, val32);
- } else {
- val32 = rtl8723au_read32(adapter, 0x874);
- val32 |= pDM_PSTable->Reg874;
- rtl8723au_write32(adapter, 0x874, val32);
-
- val32 = rtl8723au_read32(adapter, 0xc70);
- val32 |= pDM_PSTable->RegC70;
- rtl8723au_write32(adapter, 0xc70, val32);
-
- val32 = rtl8723au_read32(adapter, 0x85c);
- val32 |= pDM_PSTable->Reg85C;
- rtl8723au_write32(adapter, 0x85c, val32);
-
- val32 = rtl8723au_read32(adapter, 0xa74);
- val32 |= pDM_PSTable->RegA74;
- rtl8723au_write32(adapter, 0xa74, val32);
-
- val32 = rtl8723au_read32(adapter, 0x818);
- val32 &= ~BIT(28);
- rtl8723au_write32(adapter, 0x818, val32);
-
- /* Reg874[5]= 1b'0 */
- if (pDM_Odm->SupportICType == ODM_RTL8723A) {
- val32 = rtl8723au_read32(adapter, 0x874);
- val32 &= ~BIT(5);
- rtl8723au_write32(adapter, 0x874, val32);
- }
- }
- pDM_PSTable->PreRFState = pDM_PSTable->CurRFState;
- }
-}
-
-/* 3 ============================================================ */
-/* 3 RATR MASK */
-/* 3 ============================================================ */
-/* 3 ============================================================ */
-/* 3 Rate Adaptive */
-/* 3 ============================================================ */
-
-void odm_RateAdaptiveMaskInit23a(struct dm_odm_t *pDM_Odm)
-{
- struct odm_rate_adapt *pOdmRA = &pDM_Odm->RateAdaptive;
-
- pOdmRA->Type = DM_Type_ByDriver;
-
- pOdmRA->RATRState = DM_RATR_STA_INIT;
- pOdmRA->HighRSSIThresh = 50;
- pOdmRA->LowRSSIThresh = 20;
-}
-
-u32 ODM_Get_Rate_Bitmap23a(struct hal_data_8723a *pHalData, u32 macid,
- u32 ra_mask, u8 rssi_level)
-{
- struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- struct sta_info *pEntry;
- u32 rate_bitmap = 0x0fffffff;
- u8 WirelessMode;
-
- pEntry = pDM_Odm->pODM_StaInfo[macid];
- if (!pEntry)
- return ra_mask;
-
- WirelessMode = pEntry->wireless_mode;
-
- switch (WirelessMode) {
- case ODM_WM_B:
- if (ra_mask & 0x0000000c) /* 11M or 5.5M enable */
- rate_bitmap = 0x0000000d;
- else
- rate_bitmap = 0x0000000f;
- break;
- case (ODM_WM_A|ODM_WM_G):
- if (rssi_level == DM_RATR_STA_HIGH)
- rate_bitmap = 0x00000f00;
- else
- rate_bitmap = 0x00000ff0;
- break;
- case (ODM_WM_B|ODM_WM_G):
- if (rssi_level == DM_RATR_STA_HIGH)
- rate_bitmap = 0x00000f00;
- else if (rssi_level == DM_RATR_STA_MIDDLE)
- rate_bitmap = 0x00000ff0;
- else
- rate_bitmap = 0x00000ff5;
- break;
- case (ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
- case (ODM_WM_A|ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
- if (pHalData->rf_type == RF_1T2R ||
- pHalData->rf_type == RF_1T1R) {
- if (rssi_level == DM_RATR_STA_HIGH) {
- rate_bitmap = 0x000f0000;
- } else if (rssi_level == DM_RATR_STA_MIDDLE) {
- rate_bitmap = 0x000ff000;
- } else {
- if (pHalData->CurrentChannelBW ==
- HT_CHANNEL_WIDTH_40)
- rate_bitmap = 0x000ff015;
- else
- rate_bitmap = 0x000ff005;
- }
- } else {
- if (rssi_level == DM_RATR_STA_HIGH) {
- rate_bitmap = 0x0f8f0000;
- } else if (rssi_level == DM_RATR_STA_MIDDLE) {
- rate_bitmap = 0x0f8ff000;
- } else {
- if (pHalData->CurrentChannelBW ==
- HT_CHANNEL_WIDTH_40)
- rate_bitmap = 0x0f8ff015;
- else
- rate_bitmap = 0x0f8ff005;
- }
- }
- break;
- default:
- /* case WIRELESS_11_24N: */
- /* case WIRELESS_11_5N: */
- if (pHalData->rf_type == RF_1T2R)
- rate_bitmap = 0x000fffff;
- else
- rate_bitmap = 0x0fffffff;
- break;
- }
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
- (" ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x \n",
- rssi_level, WirelessMode, rate_bitmap));
-
- return rate_bitmap;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: odm_RefreshRateAdaptiveMask()
- *
- * Overview: Update rate table mask according to rssi
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- *When Who Remark
- *05/27/2009 hpfan Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-static void odm_RefreshRateAdaptiveMask(struct dm_odm_t *pDM_Odm)
-{
- struct rtw_adapter *pAdapter = pDM_Odm->Adapter;
- u32 smoothed;
- u8 i;
-
- if (pAdapter->bDriverStopped) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_TRACE,
- ("<---- %s: driver is going to unload\n",
- __func__));
- return;
- }
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- struct sta_info *pstat = pDM_Odm->pODM_StaInfo[i];
- if (pstat) {
- smoothed = pstat->rssi_stat.UndecoratedSmoothedPWDB;
- if (ODM_RAStateCheck23a(pDM_Odm, smoothed, false,
- &pstat->rssi_level)) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK,
- ODM_DBG_LOUD,
- ("RSSI:%d, RSSI_LEVEL:%d\n",
- smoothed,
- pstat->rssi_level));
- rtw_hal_update_ra_mask23a(pstat,
- pstat->rssi_level);
- }
- }
- }
-}
-
-/* Return Value: bool */
-/* - true: RATRState is changed. */
-bool ODM_RAStateCheck23a(struct dm_odm_t *pDM_Odm, s32 RSSI, bool bForceUpdate,
- u8 *pRATRState)
-{
- struct odm_rate_adapt *pRA = &pDM_Odm->RateAdaptive;
- const u8 GoUpGap = 5;
- u8 HighRSSIThreshForRA = pRA->HighRSSIThresh;
- u8 LowRSSIThreshForRA = pRA->LowRSSIThresh;
- u8 RATRState;
-
- /* Threshold Adjustment: */
- /* when RSSI state trends to go up one or two levels, make sure RSSI is high enough. */
- /* Here GoUpGap is added to solve the boundary's level alternation issue. */
- switch (*pRATRState) {
- case DM_RATR_STA_INIT:
- case DM_RATR_STA_HIGH:
- break;
- case DM_RATR_STA_MIDDLE:
- HighRSSIThreshForRA += GoUpGap;
- break;
- case DM_RATR_STA_LOW:
- HighRSSIThreshForRA += GoUpGap;
- LowRSSIThreshForRA += GoUpGap;
- break;
- default:
- ODM_RT_ASSERT(pDM_Odm, false, ("wrong rssi level setting %d !",
- *pRATRState));
- break;
- }
-
- /* Decide RATRState by RSSI. */
- if (RSSI > HighRSSIThreshForRA)
- RATRState = DM_RATR_STA_HIGH;
- else if (RSSI > LowRSSIThreshForRA)
- RATRState = DM_RATR_STA_MIDDLE;
- else
- RATRState = DM_RATR_STA_LOW;
-
- if (*pRATRState != RATRState || bForceUpdate) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
- ("RSSI Level %d -> %d\n", *pRATRState, RATRState));
- *pRATRState = RATRState;
- return true;
- }
- return false;
-}
-
-/* 3 ============================================================ */
-/* 3 Dynamic Tx Power */
-/* 3 ============================================================ */
-
-void odm_DynamicTxPower23aInit(struct dm_odm_t *pDM_Odm)
-{
- struct rtw_adapter *Adapter = pDM_Odm->Adapter;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
-
- /*
- * This is never changed, so we should be able to clean up the
- * code checking for different values in rtl8723a_rf6052.c
- */
- pdmpriv->DynamicTxHighPowerLvl = TxHighPwrLevel_Normal;
-}
-
-static void
-FindMinimumRSSI(struct rtw_adapter *pAdapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
-
- /* 1 1.Determine the minimum RSSI */
-
- if (!pDM_Odm->bLinked && !pdmpriv->EntryMinUndecoratedSmoothedPWDB)
- pdmpriv->MinUndecoratedPWDBForDM = 0;
- else
- pdmpriv->MinUndecoratedPWDBForDM =
- pdmpriv->EntryMinUndecoratedSmoothedPWDB;
-}
-
-static void odm_RSSIMonitorCheck(struct dm_odm_t *pDM_Odm)
-{
- struct rtw_adapter *Adapter = pDM_Odm->Adapter;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- int i;
- int MaxDB = 0, MinDB = 0xff;
- u8 sta_cnt = 0;
- u32 tmpdb;
- u32 PWDB_rssi[NUM_STA] = {0};/* 0~15]:MACID, [16~31]:PWDB_rssi */
- struct sta_info *psta;
-
- if (!pDM_Odm->bLinked)
- return;
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- psta = pDM_Odm->pODM_StaInfo[i];
- if (psta) {
- if (psta->rssi_stat.UndecoratedSmoothedPWDB < MinDB)
- MinDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
-
- if (psta->rssi_stat.UndecoratedSmoothedPWDB > MaxDB)
- MaxDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
-
- if (psta->rssi_stat.UndecoratedSmoothedPWDB != -1) {
- tmpdb = psta->rssi_stat.UndecoratedSmoothedPWDB;
- PWDB_rssi[sta_cnt++] = psta->mac_id |
- (tmpdb << 16);
- }
- }
- }
-
- for (i = 0; i < sta_cnt; i++) {
- if (PWDB_rssi[i] != (0))
- rtl8723a_set_rssi_cmd(Adapter, PWDB_rssi[i]);
- }
-
- pdmpriv->EntryMaxUndecoratedSmoothedPWDB = MaxDB;
-
- if (MinDB != 0xff) /* If associated entry is found */
- pdmpriv->EntryMinUndecoratedSmoothedPWDB = MinDB;
- else
- pdmpriv->EntryMinUndecoratedSmoothedPWDB = 0;
-
- FindMinimumRSSI(Adapter);/* get pdmpriv->MinUndecoratedPWDBForDM */
-
- ODM_CmnInfoUpdate23a(&pHalData->odmpriv, ODM_CMNINFO_RSSI_MIN,
- pdmpriv->MinUndecoratedPWDBForDM);
-}
-
-/* endif */
-/* 3 ============================================================ */
-/* 3 Tx Power Tracking */
-/* 3 ============================================================ */
-
-static void odm_TXPowerTrackingInit(struct dm_odm_t *pDM_Odm)
-{
- struct rtw_adapter *Adapter = pDM_Odm->Adapter;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
-
- pdmpriv->bTXPowerTracking = true;
- pdmpriv->TXPowercount = 0;
- pdmpriv->bTXPowerTrackingInit = false;
- pdmpriv->TxPowerTrackControl = true;
- MSG_8723A("pdmpriv->TxPowerTrackControl = %d\n",
- pdmpriv->TxPowerTrackControl);
-
- pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
-}
-
-/* EDCA Turbo */
-static void ODM_EdcaTurboInit23a(struct dm_odm_t *pDM_Odm)
-{
- struct rtw_adapter *Adapter = pDM_Odm->Adapter;
-
- pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
- Adapter->recvpriv.bIsAnyNonBEPkts = false;
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
- ("Orginial VO PARAM: 0x%x\n",
- rtl8723au_read32(Adapter, ODM_EDCA_VO_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
- ("Orginial VI PARAM: 0x%x\n",
- rtl8723au_read32(Adapter, ODM_EDCA_VI_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
- ("Orginial BE PARAM: 0x%x\n",
- rtl8723au_read32(Adapter, ODM_EDCA_BE_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
- ("Orginial BK PARAM: 0x%x\n",
- rtl8723au_read32(Adapter, ODM_EDCA_BK_PARAM)));
-}
-
-static void odm_EdcaTurboCheck23a(struct dm_odm_t *pDM_Odm)
-{
- struct rtw_adapter *Adapter = pDM_Odm->Adapter;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct xmit_priv *pxmitpriv = &Adapter->xmitpriv;
- struct recv_priv *precvpriv = &Adapter->recvpriv;
- struct registry_priv *pregpriv = &Adapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u32 trafficIndex;
- u32 edca_param;
- u64 cur_tx_bytes;
- u64 cur_rx_bytes;
-
- /* For AP/ADSL use struct rtl8723a_priv * */
- /* For CE/NIC use struct rtw_adapter * */
-
- /*
- * 2011/09/29 MH In HW integration first stage, we provide 4
- * different handle to operate at the same time. In the stage2/3,
- * we need to prive universal interface and merge all HW dynamic
- * mechanism.
- */
-
- if ((pregpriv->wifi_spec == 1))/* (pmlmeinfo->HT_enable == 0)) */
- goto dm_CheckEdcaTurbo_EXIT;
-
- if (pmlmeinfo->assoc_AP_vendor >= HT_IOT_PEER_MAX)
- goto dm_CheckEdcaTurbo_EXIT;
-
- if (rtl8723a_BT_disable_EDCA_turbo(Adapter))
- goto dm_CheckEdcaTurbo_EXIT;
-
- /* Check if the status needs to be changed. */
- if (!precvpriv->bIsAnyNonBEPkts) {
- cur_tx_bytes = pxmitpriv->tx_bytes - pxmitpriv->last_tx_bytes;
- cur_rx_bytes = precvpriv->rx_bytes - precvpriv->last_rx_bytes;
-
- /* traffic, TX or RX */
- if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_RALINK) ||
- (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS)) {
- if (cur_tx_bytes > (cur_rx_bytes << 2)) {
- /* Uplink TP is present. */
- trafficIndex = UP_LINK;
- } else { /* Balance TP is present. */
- trafficIndex = DOWN_LINK;
- }
- } else {
- if (cur_rx_bytes > (cur_tx_bytes << 2)) {
- /* Downlink TP is present. */
- trafficIndex = DOWN_LINK;
- } else { /* Balance TP is present. */
- trafficIndex = UP_LINK;
- }
- }
-
- if ((pDM_Odm->DM_EDCA_Table.prv_traffic_idx != trafficIndex) ||
- (!pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA)) {
- if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_CISCO) &&
- (pmlmeext->cur_wireless_mode & WIRELESS_11_24N))
- edca_param = EDCAParam[pmlmeinfo->assoc_AP_vendor][trafficIndex];
- else
- edca_param = EDCAParam[HT_IOT_PEER_UNKNOWN][trafficIndex];
- rtl8723au_write32(Adapter, REG_EDCA_BE_PARAM,
- edca_param);
-
- pDM_Odm->DM_EDCA_Table.prv_traffic_idx = trafficIndex;
- }
-
- pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = true;
- } else {
- /* Turn Off EDCA turbo here. */
- /* Restore original EDCA according to the declaration of AP. */
- if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
- rtl8723au_write32(Adapter, REG_EDCA_BE_PARAM,
- pHalData->AcParam_BE);
- pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
- }
- }
-
-dm_CheckEdcaTurbo_EXIT:
- /* Set variables for next time. */
- precvpriv->bIsAnyNonBEPkts = false;
- pxmitpriv->last_tx_bytes = pxmitpriv->tx_bytes;
- precvpriv->last_rx_bytes = precvpriv->rx_bytes;
-}
-
-u32 GetPSDData(struct dm_odm_t *pDM_Odm, unsigned int point,
- u8 initial_gain_psd)
-{
- struct rtw_adapter *adapter = pDM_Odm->Adapter;
- u32 psd_report, val32;
-
- /* Set DCO frequency index, offset = (40MHz/SamplePts)*point */
- val32 = rtl8723au_read32(adapter, 0x808);
- val32 &= ~0x3ff;
- val32 |= (point & 0x3ff);
- rtl8723au_write32(adapter, 0x808, val32);
-
- /* Start PSD calculation, Reg808[22]= 0->1 */
- val32 = rtl8723au_read32(adapter, 0x808);
- val32 |= BIT(22);
- rtl8723au_write32(adapter, 0x808, val32);
- /* Need to wait for HW PSD report */
- udelay(30);
- val32 = rtl8723au_read32(adapter, 0x808);
- val32 &= ~BIT(22);
- rtl8723au_write32(adapter, 0x808, val32);
- /* Read PSD report, Reg8B4[15:0] */
- psd_report = rtl8723au_read32(adapter, 0x8B4) & 0x0000FFFF;
-
- psd_report = (u32)(ConvertTo_dB23a(psd_report)) +
- (u32)(initial_gain_psd-0x1c);
-
- return psd_report;
-}
-
-u32 ConvertTo_dB23a(u32 Value)
-{
- u8 i;
- u8 j;
- u32 dB;
-
- Value = Value & 0xFFFF;
-
- for (i = 0; i < 8; i++) {
- if (Value <= dB_Invert_Table[i][11])
- break;
- }
-
- if (i >= 8)
- return 96; /* maximum 96 dB */
-
- for (j = 0; j < 12; j++) {
- if (Value <= dB_Invert_Table[i][j])
- break;
- }
-
- dB = i*12 + j + 1;
-
- return dB;
-}
-
-/* */
-/* Description: */
-/* Set Single/Dual Antenna default setting for products that do not
- * do detection in advance. */
-/* */
-/* Added by Joseph, 2012.03.22 */
-/* */
-void ODM_SingleDualAntennaDefaultSetting(struct dm_odm_t *pDM_Odm)
-{
- struct sw_ant_sw *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
-
- pDM_SWAT_Table->ANTA_ON = true;
- pDM_SWAT_Table->ANTB_ON = true;
-}
-
-/* 2 8723A ANT DETECT */
-
-static void odm_PHY_SaveAFERegisters(struct dm_odm_t *pDM_Odm, u32 *AFEReg,
- u32 *AFEBackup, u32 RegisterNum)
-{
- u32 i;
-
- for (i = 0 ; i < RegisterNum ; i++)
- AFEBackup[i] = rtl8723au_read32(pDM_Odm->Adapter, AFEReg[i]);
-}
-
-static void odm_PHY_ReloadAFERegisters(struct dm_odm_t *pDM_Odm, u32 *AFEReg,
- u32 *AFEBackup, u32 RegiesterNum)
-{
- u32 i;
-
- for (i = 0 ; i < RegiesterNum; i++)
- rtl8723au_write32(pDM_Odm->Adapter, AFEReg[i], AFEBackup[i]);
-}
-
-/* 2 8723A ANT DETECT */
-/* Description: */
-/* Implement IQK single tone for RF DPK loopback and BB PSD scanning. */
-/* This function is cooperated with BB team Neil. */
-bool ODM_SingleDualAntennaDetection(struct dm_odm_t *pDM_Odm, u8 mode)
-{
- struct sw_ant_sw *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
- struct rtw_adapter *adapter = pDM_Odm->Adapter;
- u32 CurrentChannel, RfLoopReg;
- u8 n;
- u32 Reg88c, Regc08, Reg874, Regc50, val32;
- u8 initial_gain = 0x5a;
- u32 PSD_report_tmp;
- u32 AntA_report = 0x0, AntB_report = 0x0, AntO_report = 0x0;
- bool bResult = true;
- u32 AFE_Backup[16];
- u32 AFE_REG_8723A[16] = {
- rRx_Wait_CCA, rTx_CCK_RFON,
- rTx_CCK_BBON, rTx_OFDM_RFON,
- rTx_OFDM_BBON, rTx_To_Rx,
- rTx_To_Tx, rRx_CCK,
- rRx_OFDM, rRx_Wait_RIFS,
- rRx_TO_Rx, rStandby,
- rSleep, rPMPD_ANAEN,
- rFPGA0_XCD_SwitchControl, rBlue_Tooth};
-
- if (!(pDM_Odm->SupportICType & ODM_RTL8723A))
- return bResult;
-
- if (!(pDM_Odm->SupportAbility&ODM_BB_ANT_DIV))
- return bResult;
- /* 1 Backup Current RF/BB Settings */
-
- CurrentChannel = ODM_GetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL,
- bRFRegOffsetMask);
- RfLoopReg = ODM_GetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask);
- /* change to Antenna A */
- val32 = rtl8723au_read32(adapter, rFPGA0_XA_RFInterfaceOE);
- val32 &= ~0x300;
- val32 |= 0x100; /* Enable antenna A */
- rtl8723au_write32(adapter, rFPGA0_XA_RFInterfaceOE, val32);
-
- /* Step 1: USE IQK to transmitter single tone */
-
- udelay(10);
-
- /* Store A Path Register 88c, c08, 874, c50 */
- Reg88c = rtl8723au_read32(adapter, rFPGA0_AnalogParameter4);
- Regc08 = rtl8723au_read32(adapter, rOFDM0_TRMuxPar);
- Reg874 = rtl8723au_read32(adapter, rFPGA0_XCD_RFInterfaceSW);
- Regc50 = rtl8723au_read32(adapter, rOFDM0_XAAGCCore1);
-
- /* Store AFE Registers */
- odm_PHY_SaveAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
-
- /* Set PSD 128 pts */
- val32 = rtl8723au_read32(adapter, rFPGA0_PSDFunction);
- val32 &= ~(BIT(14) | BIT(15));
- rtl8723au_write32(adapter, rFPGA0_PSDFunction, val32);
-
- /* To SET CH1 to do */
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask, 0x01);
-
- /* AFE all on step */
- rtl8723au_write32(adapter, rRx_Wait_CCA, 0x6FDB25A4);
- rtl8723au_write32(adapter, rTx_CCK_RFON, 0x6FDB25A4);
- rtl8723au_write32(adapter, rTx_CCK_BBON, 0x6FDB25A4);
- rtl8723au_write32(adapter, rTx_OFDM_RFON, 0x6FDB25A4);
- rtl8723au_write32(adapter, rTx_OFDM_BBON, 0x6FDB25A4);
- rtl8723au_write32(adapter, rTx_To_Rx, 0x6FDB25A4);
- rtl8723au_write32(adapter, rTx_To_Tx, 0x6FDB25A4);
- rtl8723au_write32(adapter, rRx_CCK, 0x6FDB25A4);
- rtl8723au_write32(adapter, rRx_OFDM, 0x6FDB25A4);
- rtl8723au_write32(adapter, rRx_Wait_RIFS, 0x6FDB25A4);
- rtl8723au_write32(adapter, rRx_TO_Rx, 0x6FDB25A4);
- rtl8723au_write32(adapter, rStandby, 0x6FDB25A4);
- rtl8723au_write32(adapter, rSleep, 0x6FDB25A4);
- rtl8723au_write32(adapter, rPMPD_ANAEN, 0x6FDB25A4);
- rtl8723au_write32(adapter, rFPGA0_XCD_SwitchControl, 0x6FDB25A4);
- rtl8723au_write32(adapter, rBlue_Tooth, 0x6FDB25A4);
-
- /* 3 wire Disable */
- rtl8723au_write32(adapter, rFPGA0_AnalogParameter4, 0xCCF000C0);
-
- /* BB IQK Setting */
- rtl8723au_write32(adapter, rOFDM0_TRMuxPar, 0x000800E4);
- rtl8723au_write32(adapter, rFPGA0_XCD_RFInterfaceSW, 0x22208000);
-
- /* IQK setting tone@ 4.34Mhz */
- rtl8723au_write32(adapter, rTx_IQK_Tone_A, 0x10008C1C);
- rtl8723au_write32(adapter, rTx_IQK, 0x01007c00);
-
- /* Page B init */
- rtl8723au_write32(adapter, rConfig_AntA, 0x00080000);
- rtl8723au_write32(adapter, rConfig_AntA, 0x0f600000);
- rtl8723au_write32(adapter, rRx_IQK, 0x01004800);
- rtl8723au_write32(adapter, rRx_IQK_Tone_A, 0x10008c1f);
- rtl8723au_write32(adapter, rTx_IQK_PI_A, 0x82150008);
- rtl8723au_write32(adapter, rRx_IQK_PI_A, 0x28150008);
- rtl8723au_write32(adapter, rIQK_AGC_Rsp, 0x001028d0);
-
- /* RF loop Setting */
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x0, 0xFFFFF, 0x50008);
-
- /* IQK Single tone start */
- rtl8723au_write32(adapter, rFPGA0_IQK, 0x80800000);
- rtl8723au_write32(adapter, rIQK_AGC_Pts, 0xf8000000);
- udelay(1000);
- PSD_report_tmp = 0x0;
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntA_report)
- AntA_report = PSD_report_tmp;
- }
-
- PSD_report_tmp = 0x0;
-
- val32 = rtl8723au_read32(adapter, rFPGA0_XA_RFInterfaceOE);
- val32 &= ~0x300;
- val32 |= 0x200; /* Enable antenna B */
- rtl8723au_write32(adapter, rFPGA0_XA_RFInterfaceOE, val32);
- udelay(10);
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntB_report)
- AntB_report = PSD_report_tmp;
- }
-
- /* change to open case */
- /* change to Ant A and B all open case */
- val32 = rtl8723au_read32(adapter, rFPGA0_XA_RFInterfaceOE);
- val32 &= ~0x300;
- rtl8723au_write32(adapter, rFPGA0_XA_RFInterfaceOE, val32);
- udelay(10);
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntO_report)
- AntO_report = PSD_report_tmp;
- }
-
- /* Close IQK Single Tone function */
- rtl8723au_write32(adapter, rFPGA0_IQK, 0x00000000);
- PSD_report_tmp = 0x0;
-
- /* 1 Return to antanna A */
- val32 = rtl8723au_read32(adapter, rFPGA0_XA_RFInterfaceOE);
- val32 &= ~0x300;
- val32 |= 0x100; /* Enable antenna A */
- rtl8723au_write32(adapter, rFPGA0_XA_RFInterfaceOE, val32);
- rtl8723au_write32(adapter, rFPGA0_AnalogParameter4, Reg88c);
- rtl8723au_write32(adapter, rOFDM0_TRMuxPar, Regc08);
- rtl8723au_write32(adapter, rFPGA0_XCD_RFInterfaceSW, Reg874);
- val32 = rtl8723au_read32(adapter, rOFDM0_XAAGCCore1);
- val32 &= ~0x7f;
- val32 |= 0x40;
- rtl8723au_write32(adapter, rOFDM0_XAAGCCore1, val32);
-
- rtl8723au_write32(adapter, rOFDM0_XAAGCCore1, Regc50);
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask,
- CurrentChannel);
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask, RfLoopReg);
-
- /* Reload AFE Registers */
- odm_PHY_ReloadAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("psd_report_A[%d]= %d \n", 2416, AntA_report));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("psd_report_B[%d]= %d \n", 2416, AntB_report));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("psd_report_O[%d]= %d \n", 2416, AntO_report));
-
- /* 2 Test Ant B based on Ant A is ON */
- if (mode == ANTTESTB) {
- if (AntA_report >= 100) {
- if (AntB_report > (AntA_report+1)) {
- pDM_SWAT_Table->ANTB_ON = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna A\n"));
- } else {
- pDM_SWAT_Table->ANTB_ON = true;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Dual Antenna is A and B\n"));
- }
- } else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
- pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
- bResult = false;
- }
- } else if (mode == ANTTESTALL) {
- /* 2 Test Ant A and B based on DPDT Open */
- if ((AntO_report >= 100) & (AntO_report < 118)) {
- if (AntA_report > (AntO_report+1)) {
- pDM_SWAT_Table->ANTA_ON = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV,
- ODM_DBG_LOUD, ("Ant A is OFF"));
- } else {
- pDM_SWAT_Table->ANTA_ON = true;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV,
- ODM_DBG_LOUD, ("Ant A is ON"));
- }
-
- if (AntB_report > (AntO_report+2)) {
- pDM_SWAT_Table->ANTB_ON = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV,
- ODM_DBG_LOUD, ("Ant B is OFF"));
- } else {
- pDM_SWAT_Table->ANTB_ON = true;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV,
- ODM_DBG_LOUD, ("Ant B is ON"));
- }
- }
- } else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
- /* Set Antenna A on as default */
- pDM_SWAT_Table->ANTA_ON = true;
- /* Set Antenna B off as default */
- pDM_SWAT_Table->ANTB_ON = false;
- bResult = false;
- }
-
- return bResult;
-}
diff --git a/drivers/staging/rtl8723au/hal/odm_HWConfig.c b/drivers/staging/rtl8723au/hal/odm_HWConfig.c
deleted file mode 100644
index 0562f61bd1dc..000000000000
--- a/drivers/staging/rtl8723au/hal/odm_HWConfig.c
+++ /dev/null
@@ -1,396 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-/* */
-/* include files */
-/* */
-
-#include "odm_precomp.h"
-
-static u8 odm_QueryRxPwrPercentage(s8 AntPower)
-{
- if ((AntPower <= -100) || (AntPower >= 20))
- return 0;
- else if (AntPower >= 0)
- return 100;
- else
- return 100 + AntPower;
-}
-
-static s32 odm_SignalScaleMapping_92CSeries(struct dm_odm_t *pDM_Odm, s32 CurrSig)
-{
- s32 RetSig = 0;
-
- if (CurrSig >= 51 && CurrSig <= 100)
- RetSig = 100;
- else if (CurrSig >= 41 && CurrSig <= 50)
- RetSig = 80 + ((CurrSig - 40)*2);
- else if (CurrSig >= 31 && CurrSig <= 40)
- RetSig = 66 + (CurrSig - 30);
- else if (CurrSig >= 21 && CurrSig <= 30)
- RetSig = 54 + (CurrSig - 20);
- else if (CurrSig >= 10 && CurrSig <= 20)
- RetSig = 42 + (((CurrSig - 10) * 2) / 3);
- else if (CurrSig >= 5 && CurrSig <= 9)
- RetSig = 22 + (((CurrSig - 5) * 3) / 2);
- else if (CurrSig >= 1 && CurrSig <= 4)
- RetSig = 6 + (((CurrSig - 1) * 3) / 2);
- else
- RetSig = CurrSig;
-
- return RetSig;
-}
-
-static s32 odm_SignalScaleMapping(struct dm_odm_t *pDM_Odm, s32 CurrSig)
-{
- return odm_SignalScaleMapping_92CSeries(pDM_Odm, CurrSig);
-}
-
-static u8
-odm_EVMdbToPercentage(
- s8 Value
- )
-{
- /* */
- /* -33dB~0dB to 0%~99% */
- /* */
- s8 ret_val;
-
- ret_val = Value;
-
- if (ret_val >= 0)
- ret_val = 0;
- if (ret_val <= -33)
- ret_val = -33;
-
- ret_val = 0 - ret_val;
- ret_val *= 3;
-
- if (ret_val == 99)
- ret_val = 100;
-
- return ret_val;
-}
-
-static void odm_RxPhyStatus92CSeries_Parsing(struct dm_odm_t *pDM_Odm,
- struct phy_info *pPhyInfo,
- u8 *pPhyStatus,
- struct odm_packet_info *pPktinfo)
-{
- struct phy_status_rpt *pPhyStaRpt = (struct phy_status_rpt *)pPhyStatus;
- u8 i, Max_spatial_stream;
- s8 rx_pwr[4], rx_pwr_all = 0;
- u8 EVM, PWDB_ALL = 0, PWDB_ALL_BT;
- u8 RSSI, total_rssi = 0;
- u8 isCCKrate = 0;
- u8 rf_rx_num = 0;
- u8 cck_highpwr = 0;
-
- isCCKrate = (pPktinfo->Rate <= DESC92C_RATE11M) ? true : false;
- pPhyInfo->RxMIMOSignalQuality[RF_PATH_A] = -1;
- pPhyInfo->RxMIMOSignalQuality[RF_PATH_B] = -1;
-
- if (isCCKrate) {
- u8 report;
- u8 cck_agc_rpt;
-
- pDM_Odm->PhyDbgInfo.NumQryPhyStatusCCK++;
- /* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
-
- cck_highpwr = pDM_Odm->bCckHighPower;
-
- cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a;
-
- /* The RSSI formula should be modified according to the gain table */
- if (!cck_highpwr) {
- report = (cck_agc_rpt & 0xc0)>>6;
- switch (report) {
- /* Modify the RF RNA gain value to -40, -20, -2, 14 by Jenyu's suggestion */
- /* Note: different RF with the different RNA gain. */
- case 0x3:
- rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
- break;
- case 0x2:
- rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
- break;
- case 0x1:
- rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
- break;
- case 0x0:
- rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
- break;
- }
- } else {
- report = (cck_agc_rpt & 0x60)>>5;
- switch (report) {
- case 0x3:
- rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f)<<1);
- break;
- case 0x2:
- rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f)<<1);
- break;
- case 0x1:
- rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f)<<1);
- break;
- case 0x0:
- rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f)<<1);
- break;
- }
- }
-
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
-
- /* Modification for ext-LNA board */
- if (pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) {
- if ((cck_agc_rpt>>7) == 0) {
- PWDB_ALL = (PWDB_ALL > 94) ? 100 : (PWDB_ALL+6);
- } else {
- if (PWDB_ALL > 38)
- PWDB_ALL -= 16;
- else
- PWDB_ALL = (PWDB_ALL <= 16) ? (PWDB_ALL>>2) : (PWDB_ALL-12);
- }
-
- /* CCK modification */
- if (PWDB_ALL > 25 && PWDB_ALL <= 60)
- PWDB_ALL += 6;
- } else { /* Modification for int-LNA board */
- if (PWDB_ALL > 99)
- PWDB_ALL -= 8;
- else if (PWDB_ALL > 50 && PWDB_ALL <= 68)
- PWDB_ALL += 4;
- }
- pPhyInfo->RxPWDBAll = PWDB_ALL;
- pPhyInfo->BTRxRSSIPercentage = PWDB_ALL;
- pPhyInfo->RecvSignalPower = rx_pwr_all;
- /* (3) Get Signal Quality (EVM) */
- if (pPktinfo->bPacketMatchBSSID) {
- u8 SQ, SQ_rpt;
-
- SQ_rpt = pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all;
-
- if (SQ_rpt > 64)
- SQ = 0;
- else if (SQ_rpt < 20)
- SQ = 100;
- else
- SQ = ((64-SQ_rpt) * 100) / 44;
-
- pPhyInfo->SignalQuality = SQ;
- pPhyInfo->RxMIMOSignalQuality[RF_PATH_A] = SQ;
- pPhyInfo->RxMIMOSignalQuality[RF_PATH_B] = -1;
- }
- } else { /* is OFDM rate */
- pDM_Odm->PhyDbgInfo.NumQryPhyStatusOFDM++;
-
- /* (1)Get RSSI for HT rate */
-
- for (i = RF_PATH_A; i < RF_PATH_MAX; i++) {
- /* 2008/01/30 MH we will judge RF RX path now. */
- if (pDM_Odm->RFPathRxEnable & BIT(i))
- rf_rx_num++;
-
- rx_pwr[i] = ((pPhyStaRpt->path_agc[i].gain & 0x3F)*2) - 110;
-
- pPhyInfo->RxPwr[i] = rx_pwr[i];
-
- /* Translate DBM to percentage. */
- RSSI = odm_QueryRxPwrPercentage(rx_pwr[i]);
- total_rssi += RSSI;
-
- /* Modification for ext-LNA board */
- if (pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) {
- if ((pPhyStaRpt->path_agc[i].trsw) == 1)
- RSSI = (RSSI > 94) ? 100 : (RSSI+6);
- else
- RSSI = (RSSI <= 16) ? (RSSI>>3) : (RSSI-16);
-
- if ((RSSI <= 34) && (RSSI >= 4))
- RSSI -= 4;
- }
-
- pPhyInfo->RxMIMOSignalStrength[i] = (u8) RSSI;
-
- /* Get Rx snr value in DB */
- pPhyInfo->RxSNR[i] = pDM_Odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
- }
-
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
- rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f)-110;
-
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
- PWDB_ALL_BT = PWDB_ALL;
-
- pPhyInfo->RxPWDBAll = PWDB_ALL;
- pPhyInfo->BTRxRSSIPercentage = PWDB_ALL_BT;
- pPhyInfo->RxPower = rx_pwr_all;
- pPhyInfo->RecvSignalPower = rx_pwr_all;
-
- /* (3)EVM of HT rate */
- if (pPktinfo->Rate >= DESC92C_RATEMCS8 && pPktinfo->Rate <= DESC92C_RATEMCS15)
- Max_spatial_stream = 2; /* both spatial stream make sense */
- else
- Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
-
- for (i = 0; i < Max_spatial_stream; i++) {
- /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
- /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
- /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
- EVM = odm_EVMdbToPercentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
-
- if (pPktinfo->bPacketMatchBSSID) {
- if (i == RF_PATH_A) {
- /* Fill value in RFD, Get the first spatial stream only */
- pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
- }
- pPhyInfo->RxMIMOSignalQuality[i] = (u8)(EVM & 0xff);
- }
- }
- }
- /* UI BSS List signal strength(in percentage), make it good looking, from 0~100. */
- /* It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp(). */
- if (isCCKrate) {
- pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(pDM_Odm, PWDB_ALL));/* PWDB_ALL; */
- } else {
- if (rf_rx_num != 0)
- pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(pDM_Odm, total_rssi /= rf_rx_num));
- }
-}
-
-static void odm_Process_RSSIForDM(struct dm_odm_t *pDM_Odm,
- struct phy_info *pPhyInfo,
- struct odm_packet_info *pPktinfo)
-{
- s32 UndecoratedSmoothedPWDB, UndecoratedSmoothedCCK;
- s32 UndecoratedSmoothedOFDM, RSSI_Ave;
- u8 isCCKrate = 0;
- u8 RSSI_max, RSSI_min, i;
- u32 OFDM_pkt = 0;
- u32 Weighting = 0;
- struct sta_info *pEntry;
-
- if (pPktinfo->StationID == 0xFF)
- return;
-
- pEntry = pDM_Odm->pODM_StaInfo[pPktinfo->StationID];
- if (!pEntry)
- return;
- if ((!pPktinfo->bPacketMatchBSSID))
- return;
-
- isCCKrate = (pPktinfo->Rate <= DESC92C_RATE11M) ? true : false;
-
- /* Smart Antenna Debug Message------------------*/
-
- UndecoratedSmoothedCCK = pEntry->rssi_stat.UndecoratedSmoothedCCK;
- UndecoratedSmoothedOFDM = pEntry->rssi_stat.UndecoratedSmoothedOFDM;
- UndecoratedSmoothedPWDB = pEntry->rssi_stat.UndecoratedSmoothedPWDB;
-
- if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
- if (!isCCKrate) { /* ofdm rate */
- if (pPhyInfo->RxMIMOSignalStrength[RF_PATH_B] == 0) {
- RSSI_Ave = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
- } else {
- if (pPhyInfo->RxMIMOSignalStrength[RF_PATH_A] > pPhyInfo->RxMIMOSignalStrength[RF_PATH_B]) {
- RSSI_max = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
- RSSI_min = pPhyInfo->RxMIMOSignalStrength[RF_PATH_B];
- } else {
- RSSI_max = pPhyInfo->RxMIMOSignalStrength[RF_PATH_B];
- RSSI_min = pPhyInfo->RxMIMOSignalStrength[RF_PATH_A];
- }
- if ((RSSI_max - RSSI_min) < 3)
- RSSI_Ave = RSSI_max;
- else if ((RSSI_max - RSSI_min) < 6)
- RSSI_Ave = RSSI_max - 1;
- else if ((RSSI_max - RSSI_min) < 10)
- RSSI_Ave = RSSI_max - 2;
- else
- RSSI_Ave = RSSI_max - 3;
- }
-
- /* 1 Process OFDM RSSI */
- if (UndecoratedSmoothedOFDM <= 0) {
- /* initialize */
- UndecoratedSmoothedOFDM = pPhyInfo->RxPWDBAll;
- } else {
- if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedOFDM) {
- UndecoratedSmoothedOFDM =
- (((UndecoratedSmoothedOFDM)*(Rx_Smooth_Factor-1)) +
- (RSSI_Ave)) / (Rx_Smooth_Factor);
- UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM + 1;
- } else {
- UndecoratedSmoothedOFDM =
- (((UndecoratedSmoothedOFDM)*(Rx_Smooth_Factor-1)) +
- (RSSI_Ave)) / (Rx_Smooth_Factor);
- }
- }
- pEntry->rssi_stat.PacketMap =
- (pEntry->rssi_stat.PacketMap<<1) | BIT(0);
- } else {
- RSSI_Ave = pPhyInfo->RxPWDBAll;
-
- /* 1 Process CCK RSSI */
- if (UndecoratedSmoothedCCK <= 0) {
- /* initialize */
- UndecoratedSmoothedCCK = pPhyInfo->RxPWDBAll;
- } else {
- if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedCCK) {
- UndecoratedSmoothedCCK =
- (((UndecoratedSmoothedCCK)*(Rx_Smooth_Factor-1)) +
- (pPhyInfo->RxPWDBAll)) / (Rx_Smooth_Factor);
- UndecoratedSmoothedCCK = UndecoratedSmoothedCCK + 1;
- } else {
- UndecoratedSmoothedCCK =
- (((UndecoratedSmoothedCCK)*(Rx_Smooth_Factor-1)) +
- (pPhyInfo->RxPWDBAll)) / (Rx_Smooth_Factor);
- }
- }
- pEntry->rssi_stat.PacketMap = pEntry->rssi_stat.PacketMap<<1;
- }
-
- /* 2011.07.28 LukeLee: modified to prevent unstable CCK RSSI */
- if (pEntry->rssi_stat.ValidBit >= 64)
- pEntry->rssi_stat.ValidBit = 64;
- else
- pEntry->rssi_stat.ValidBit++;
-
- for (i = 0; i < pEntry->rssi_stat.ValidBit; i++)
- OFDM_pkt +=
- (u8)(pEntry->rssi_stat.PacketMap>>i) & BIT(0);
-
- if (pEntry->rssi_stat.ValidBit == 64) {
- Weighting = ((OFDM_pkt<<4) > 64)?64:(OFDM_pkt<<4);
- UndecoratedSmoothedPWDB = (Weighting*UndecoratedSmoothedOFDM+(64-Weighting)*UndecoratedSmoothedCCK)>>6;
- } else {
- if (pEntry->rssi_stat.ValidBit != 0)
- UndecoratedSmoothedPWDB = (OFDM_pkt*UndecoratedSmoothedOFDM+(pEntry->rssi_stat.ValidBit-OFDM_pkt)*UndecoratedSmoothedCCK)/pEntry->rssi_stat.ValidBit;
- else
- UndecoratedSmoothedPWDB = 0;
- }
- pEntry->rssi_stat.UndecoratedSmoothedCCK = UndecoratedSmoothedCCK;
- pEntry->rssi_stat.UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM;
- pEntry->rssi_stat.UndecoratedSmoothedPWDB = UndecoratedSmoothedPWDB;
- }
-}
-
-void ODM_PhyStatusQuery23a(struct dm_odm_t *pDM_Odm, struct phy_info *pPhyInfo,
- u8 *pPhyStatus, struct odm_packet_info *pPktinfo)
-{
- odm_RxPhyStatus92CSeries_Parsing(pDM_Odm, pPhyInfo,
- pPhyStatus, pPktinfo);
-
- odm_Process_RSSIForDM(pDM_Odm, pPhyInfo, pPktinfo);
-}
diff --git a/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c b/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c
deleted file mode 100644
index a63c6cb88bc9..000000000000
--- a/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#include "odm_precomp.h"
-#include "usb_ops_linux.h"
-
-void
-odm_ConfigRFReg_8723A(
- struct dm_odm_t *pDM_Odm,
- u32 Addr,
- u32 Data,
- enum RF_RADIO_PATH RF_PATH,
- u32 RegAddr
- )
-{
- if (Addr == 0xfe) {
- msleep(50);
- } else if (Addr == 0xfd) {
- mdelay(5);
- } else if (Addr == 0xfc) {
- mdelay(1);
- } else if (Addr == 0xfb) {
- udelay(50);
- } else if (Addr == 0xfa) {
- udelay(5);
- } else if (Addr == 0xf9) {
- udelay(1);
- } else {
- ODM_SetRFReg(pDM_Odm, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
- }
-}
-
-void odm_ConfigMAC_8723A(struct dm_odm_t *pDM_Odm, u32 addr, u8 data)
-{
- rtl8723au_write8(pDM_Odm->Adapter, addr, data);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- ("===> %s: [MAC_REG] %08X %08X\n", __func__, addr, data));
-}
-
-void odm_ConfigBB_AGC_8723A(struct dm_odm_t *pDM_Odm, u32 addr, u32 data)
-{
- rtl8723au_write32(pDM_Odm->Adapter, addr, data);
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- ("===> %s: [AGC_TAB] %08X %08X\n", __func__, addr, data));
-}
-
-void
-odm_ConfigBB_PHY_8723A(struct dm_odm_t *pDM_Odm, u32 addr, u32 data)
-{
- if (addr == 0xfe)
- msleep(50);
- else if (addr == 0xfd)
- mdelay(5);
- else if (addr == 0xfc)
- mdelay(1);
- else if (addr == 0xfb)
- udelay(50);
- else if (addr == 0xfa)
- udelay(5);
- else if (addr == 0xf9)
- udelay(1);
- else if (addr == 0xa24)
- pDM_Odm->RFCalibrateInfo.RegA24 = data;
- rtl8723au_write32(pDM_Odm->Adapter, addr, data);
-
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
- ("===> %s: [PHY_REG] %08X %08X\n", __func__, addr, data));
-}
diff --git a/drivers/staging/rtl8723au/hal/odm_debug.c b/drivers/staging/rtl8723au/hal/odm_debug.c
deleted file mode 100644
index cb2bdda6b0eb..000000000000
--- a/drivers/staging/rtl8723au/hal/odm_debug.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#include "odm_precomp.h"
-
-void ODM_InitDebugSetting23a(struct dm_odm_t *pDM_Odm)
-{
- pDM_Odm->DebugLevel = ODM_DBG_TRACE;
- pDM_Odm->DebugComponents = 0;
-}
-
-u32 GlobalDebugLevel23A;
-
-void rt_trace(int comp, int level, const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- pr_info(DRIVER_PREFIX " [0x%08x,%d] %pV", comp, level, &vaf);
-
- va_end(args);
-}
diff --git a/drivers/staging/rtl8723au/hal/odm_interface.c b/drivers/staging/rtl8723au/hal/odm_interface.c
deleted file mode 100644
index d8f67902708e..000000000000
--- a/drivers/staging/rtl8723au/hal/odm_interface.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-/* */
-/* include files */
-/* */
-
-#include "odm_precomp.h"
-/* */
-/* ODM IO Relative API. */
-/* */
-#include <usb_ops_linux.h>
-
-void ODM_SetRFReg(
- struct dm_odm_t *pDM_Odm,
- enum RF_RADIO_PATH eRFPath,
- u32 RegAddr,
- u32 BitMask,
- u32 Data
- )
-{
- struct rtw_adapter *Adapter = pDM_Odm->Adapter;
-
- PHY_SetRFReg(Adapter, eRFPath, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetRFReg(
- struct dm_odm_t *pDM_Odm,
- enum RF_RADIO_PATH eRFPath,
- u32 RegAddr,
- u32 BitMask
- )
-{
- struct rtw_adapter *Adapter = pDM_Odm->Adapter;
-
- return PHY_QueryRFReg(Adapter, eRFPath, RegAddr, BitMask);
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
deleted file mode 100644
index bfcbd7a349cf..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
+++ /dev/null
@@ -1,11265 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- *published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#include <drv_types.h>
-#include <rtl8723a_hal.h>
-#include <usb_ops_linux.h>
-
-#define DIS_PS_RX_BCN
-
-u32 BTCoexDbgLevel = _bt_dbg_off_;
-
-#define RTPRINT(_Comp, _Level, Fmt)\
-do {\
- if ((BTCoexDbgLevel == _bt_dbg_on_)) {\
- printk Fmt;\
- } \
-} while (0)
-
-#define RTPRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr)\
-if ((BTCoexDbgLevel == _bt_dbg_on_)) {\
- u32 __i; \
- u8 *ptr = (u8 *)_Ptr; \
- printk printstr; \
- printk(" "); \
- for (__i = 0; __i < 6; __i++) \
- printk("%02X%s", ptr[__i], (__i == 5)?"":"-"); \
- printk("\n"); \
-}
-#define RTPRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)\
-if ((BTCoexDbgLevel == _bt_dbg_on_)) {\
- u32 __i; \
- u8 *ptr = (u8 *)_HexData; \
- printk(_TitleString); \
- for (__i = 0; __i < (u32)_HexDataLen; __i++) { \
- printk("%02X%s", ptr[__i], (((__i + 1) % 4) == 0)?" ":" ");\
- if (((__i + 1) % 16) == 0) \
- printk("\n"); \
- } \
- printk("\n"); \
-}
-/* Added by Annie, 2005-11-22. */
-#define MAX_STR_LEN 64
-/* I want to see ASCII 33 to 126 only. Otherwise, I print '?'. */
-#define PRINTABLE(_ch) (_ch >= ' ' && _ch <= '~')
-#define RT_PRINT_STR(_Comp, _Level, _TitleString, _Ptr, _Len) \
- { \
- u32 __i; \
- u8 buffer[MAX_STR_LEN]; \
- u32 length = (_Len < MAX_STR_LEN) ? _Len : (MAX_STR_LEN-1);\
- memset(buffer, 0, MAX_STR_LEN); \
- memcpy(buffer, (u8 *)_Ptr, length); \
- for (__i = 0; __i < length; __i++) { \
- if (!PRINTABLE(buffer[__i])) \
- buffer[__i] = '?'; \
- } \
- buffer[length] = '\0'; \
- printk(_TitleString); \
- printk(": %d, <%s>\n", _Len, buffer); \
- }
-
-#define DCMD_Printf(...)
-#define RT_ASSERT(...)
-
-
-#define GetDefaultAdapter(padapter) padapter
-
-#define PlatformZeroMemory(ptr, sz) memset(ptr, 0, sz)
-
-#define GET_UNDECORATED_AVERAGE_RSSI(padapter) \
- (GET_HAL_DATA(padapter)->dmpriv.EntryMinUndecoratedSmoothedPWDB)
-#define RT_RF_CHANGE_SOURCE u32
-
-enum {
- RT_JOIN_INFRA = 1,
- RT_JOIN_IBSS = 2,
- RT_START_IBSS = 3,
- RT_NO_ACTION = 4,
-};
-
-/* power saving */
-
-/* ===== Below this line is sync from SD7 driver COMMOM/BT.c ===== */
-
-static u8 BT_Operation(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->BtOperationOn)
- return true;
- else
- return false;
-}
-
-static u8 BT_IsLegalChannel(struct rtw_adapter *padapter, u8 channel)
-{
- struct rt_channel_info *pChanneList = NULL;
- u8 channelLen, i;
-
- pChanneList = padapter->mlmeextpriv.channel_set;
- channelLen = padapter->mlmeextpriv.max_chan_nums;
-
- for (i = 0; i < channelLen; i++) {
- RTPRINT(FIOCTL, IOCTL_STATE,
- ("Check if chnl(%d) in channel plan contains bt target chnl(%d) for BT connection\n",
- pChanneList[i].ChannelNum, channel));
- if ((channel == pChanneList[i].ChannelNum) ||
- (channel == pChanneList[i].ChannelNum + 2))
- return channel;
- }
- return 0;
-}
-
-void BT_SignalCompensation(struct rtw_adapter *padapter, u8 *rssi_wifi, u8 *rssi_bt)
-{
- BTDM_SignalCompensation(padapter, rssi_wifi, rssi_bt);
-}
-
-void rtl8723a_BT_wifiscan_notify(struct rtw_adapter *padapter, u8 scanType)
-{
- BTHCI_WifiScanNotify(padapter, scanType);
- BTDM_CheckAntSelMode(padapter);
- BTDM_WifiScanNotify(padapter, scanType);
-}
-
-void rtl8723a_BT_wifiassociate_notify(struct rtw_adapter *padapter, u8 action)
-{
- /* action : */
- /* true = associate start */
- /* false = associate finished */
- if (action)
- BTDM_CheckAntSelMode(padapter);
-
- BTDM_WifiAssociateNotify(padapter, action);
-}
-
-void BT_HaltProcess(struct rtw_adapter *padapter)
-{
- BTDM_ForHalt(padapter);
-}
-
-/* ===== End of sync from SD7 driver COMMOM/BT.c ===== */
-
-#define i64fmt "ll"
-#define UINT64_C(v) (v)
-
-#define FillOctetString(_os, _octet, _len) \
- (_os).Octet = (u8 *)(_octet); \
- (_os).Length = (_len);
-
-static enum rt_status PlatformIndicateBTEvent(
- struct rtw_adapter *padapter,
- void *pEvntData,
- u32 dataLen
- )
-{
- enum rt_status rt_status = RT_STATUS_FAILURE;
-
- RTPRINT(FIOCTL, IOCTL_BT_EVENT_DETAIL, ("BT event start, %d bytes data to Transferred!!\n", dataLen));
- RTPRINT_DATA(FIOCTL, IOCTL_BT_EVENT_DETAIL, "To transfer Hex Data :\n",
- pEvntData, dataLen);
-
- BT_EventParse(padapter, pEvntData, dataLen);
-
- printk(KERN_WARNING "%s: Linux has no way to report BT event!!\n", __func__);
-
- RTPRINT(FIOCTL, IOCTL_BT_EVENT_DETAIL, ("BT event end, %s\n",
- (rt_status == RT_STATUS_SUCCESS) ? "SUCCESS" : "FAIL"));
-
- return rt_status;
-}
-
-/* ===== Below this line is sync from SD7 driver COMMOM/bt_hci.c ===== */
-
-static u8 bthci_GetLocalChannel(struct rtw_adapter *padapter)
-{
- return padapter->mlmeextpriv.cur_channel;
-}
-
-static u8 bthci_GetCurrentEntryNum(struct rtw_adapter *padapter, u8 PhyHandle)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- u8 i;
-
- for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
- if ((pBTInfo->BtAsocEntry[i].bUsed) &&
- (pBTInfo->BtAsocEntry[i].PhyLinkCmdData.BtPhyLinkhandle == PhyHandle))
- return i;
- }
-
- return 0xFF;
-}
-
-static void bthci_DecideBTChannel(struct rtw_adapter *padapter, u8 EntryNum)
-{
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct mlme_priv *pmlmepriv;
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
- struct bt_hci_info *pBtHciInfo;
- struct chnl_txpower_triple *pTriple_subband = NULL;
- struct common_triple *pTriple;
- u8 i, j, localchnl, firstRemoteLegalChnlInTriplet = 0;
- u8 regulatory_skipLen = 0;
- u8 subbandTripletCnt = 0;
-
- pmlmepriv = &padapter->mlmepriv;
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
- pBtHciInfo = &pBTInfo->BtHciInfo;
-
- pBtMgnt->CheckChnlIsSuit = true;
- localchnl = bthci_GetLocalChannel(padapter);
-
- pTriple = (struct common_triple *)
- &pBtHciInfo->BTPreChnllist[COUNTRY_STR_LEN];
-
- /* contains country string, len is 3 */
- for (i = 0; i < (pBtHciInfo->BtPreChnlListLen-COUNTRY_STR_LEN); i += 3, pTriple++) {
- /* */
- /* check every triplet, an triplet may be */
- /* regulatory extension identifier or sub-band triplet */
- /* */
- if (pTriple->byte_1st == 0xc9) {
- /* Regulatory Extension Identifier, skip it */
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO),
- ("Find Regulatory ID, regulatory class = %d\n", pTriple->byte_2nd));
- regulatory_skipLen += 3;
- pTriple_subband = NULL;
- continue;
- } else { /* Sub-band triplet */
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Find Sub-band triplet \n"));
- subbandTripletCnt++;
- pTriple_subband = (struct chnl_txpower_triple *)pTriple;
- /* if remote first legal channel not found, then find first remote channel */
- /* and it's legal for our channel plan. */
-
- /* search the sub-band triplet and find if remote channel is legal to our channel plan. */
- for (j = pTriple_subband->FirstChnl; j < (pTriple_subband->FirstChnl+pTriple_subband->NumChnls); j++) {
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), (" Check if chnl(%d) is legal\n", j));
- if (BT_IsLegalChannel(padapter, j)) {
- /* remote channel is legal for our channel plan. */
- firstRemoteLegalChnlInTriplet = j;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO),
- ("Find first remote legal channel : %d\n",
- firstRemoteLegalChnlInTriplet));
-
- /* If we find a remote legal channel in the sub-band triplet */
- /* and only BT connection is established(local not connect to any AP or IBSS), */
- /* then we just switch channel to remote channel. */
- if (!(check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_ADHOC_STATE|WIFI_AP_STATE) ||
- BTHCI_HsConnectionEstablished(padapter))) {
- pBtMgnt->BTChannel = firstRemoteLegalChnlInTriplet;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Remote legal channel (%d) is selected, Local not connect to any!!\n", pBtMgnt->BTChannel));
- return;
- } else {
- if ((localchnl >= firstRemoteLegalChnlInTriplet) &&
- (localchnl < (pTriple_subband->FirstChnl+pTriple_subband->NumChnls))) {
- pBtMgnt->BTChannel = localchnl;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Local channel (%d) is selected, wifi or BT connection exists\n", pBtMgnt->BTChannel));
- return;
- }
- }
- break;
- }
- }
- }
- }
-
- if (subbandTripletCnt) {
- /* if any preferred channel triplet exists */
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("There are %d sub band triplet exists, ", subbandTripletCnt));
- if (firstRemoteLegalChnlInTriplet == 0) {
- /* no legal channel is found, reject the connection. */
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("no legal channel is found!!\n"));
- } else {
- /* Remote Legal channel is found but not match to local */
- /* wifi connection exists), so reject the connection. */
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO),
- ("Remote Legal channel is found but not match to local(wifi connection exists)!!\n"));
- }
- pBtMgnt->CheckChnlIsSuit = false;
- } else {
- /* There are not any preferred channel triplet exists */
- /* Use current legal channel as the bt channel. */
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("No sub band triplet exists!!\n"));
- }
- pBtMgnt->BTChannel = localchnl;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Local channel (%d) is selected!!\n", pBtMgnt->BTChannel));
-}
-
-/* Success:return true */
-/* Fail:return false */
-static u8 bthci_GetAssocInfo(struct rtw_adapter *padapter, u8 EntryNum)
-{
- struct bt_30info *pBTInfo;
- struct bt_hci_info *pBtHciInfo;
- u8 tempBuf[256];
- u8 i = 0;
- u8 BaseMemoryShift = 0;
- u16 TotalLen = 0;
- struct amp_assoc_structure *pAmpAsoc;
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("GetAssocInfo start\n"));
- pBTInfo = GET_BT_INFO(padapter);
- pBtHciInfo = &pBTInfo->BtHciInfo;
-
- if (pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar == 0) {
- if (pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocRemLen < (MAX_AMP_ASSOC_FRAG_LEN))
- TotalLen = pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocRemLen;
- else if (pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocRemLen == (MAX_AMP_ASSOC_FRAG_LEN))
- TotalLen = MAX_AMP_ASSOC_FRAG_LEN;
- } else if (pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar > 0)
- TotalLen = pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar;
-
- while ((pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar >= BaseMemoryShift) || TotalLen > BaseMemoryShift) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("GetAssocInfo, TotalLen =%d, BaseMemoryShift =%d\n", TotalLen, BaseMemoryShift));
- memcpy(tempBuf,
- (u8 *)pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocfragment+BaseMemoryShift,
- TotalLen-BaseMemoryShift);
- RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_DETAIL, "GetAssocInfo :\n",
- tempBuf, TotalLen-BaseMemoryShift);
-
- pAmpAsoc = (struct amp_assoc_structure *)tempBuf;
- le16_to_cpus(&pAmpAsoc->Length);
- BaseMemoryShift += 3 + pAmpAsoc->Length;
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("TypeID = 0x%x, ", pAmpAsoc->TypeID));
- RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD, "Hex Data: \n", pAmpAsoc->Data, pAmpAsoc->Length);
- switch (pAmpAsoc->TypeID) {
- case AMP_MAC_ADDR:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("==> AMP_MAC_ADDR\n"));
- if (pAmpAsoc->Length > 6)
- return false;
- memcpy(pBTInfo->BtAsocEntry[EntryNum].BTRemoteMACAddr, pAmpAsoc->Data, 6);
- RTPRINT_ADDR(FIOCTL, IOCTL_BT_HCICMD, ("Remote Mac address \n"), pBTInfo->BtAsocEntry[EntryNum].BTRemoteMACAddr);
- break;
- case AMP_PREFERRED_CHANNEL_LIST:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("==> AMP_PREFERRED_CHANNEL_LIST\n"));
- pBtHciInfo->BtPreChnlListLen = pAmpAsoc->Length;
- memcpy(pBtHciInfo->BTPreChnllist,
- pAmpAsoc->Data,
- pBtHciInfo->BtPreChnlListLen);
- RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD, "Preferred channel list : \n", pBtHciInfo->BTPreChnllist, pBtHciInfo->BtPreChnlListLen);
- bthci_DecideBTChannel(padapter, EntryNum);
- break;
- case AMP_CONNECTED_CHANNEL:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("==> AMP_CONNECTED_CHANNEL\n"));
- pBtHciInfo->BTConnectChnlListLen = pAmpAsoc->Length;
- memcpy(pBtHciInfo->BTConnectChnllist,
- pAmpAsoc->Data,
- pBtHciInfo->BTConnectChnlListLen);
- break;
- case AMP_80211_PAL_CAP_LIST:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("==> AMP_80211_PAL_CAP_LIST\n"));
- pBTInfo->BtAsocEntry[EntryNum].BTCapability = *(u32 *)(pAmpAsoc->Data);
- if (pBTInfo->BtAsocEntry[EntryNum].BTCapability & 0x00000001) {
- /* TODO: */
-
- /* Signifies PAL capable of utilizing received activity reports. */
- }
- if (pBTInfo->BtAsocEntry[EntryNum].BTCapability & 0x00000002) {
- /* TODO: */
- /* Signifies PAL is capable of utilizing scheduling information received in an activity reports. */
- }
- break;
- case AMP_80211_PAL_VISION:
- pBtHciInfo->BTPalVersion = *(u8 *)(pAmpAsoc->Data);
- pBtHciInfo->BTPalCompanyID = *(u16 *)(((u8 *)(pAmpAsoc->Data))+1);
- pBtHciInfo->BTPalsubversion = *(u16 *)(((u8 *)(pAmpAsoc->Data))+3);
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("==> AMP_80211_PAL_VISION PalVersion 0x%x, PalCompanyID 0x%x, Palsubversion 0x%x\n",
- pBtHciInfo->BTPalVersion,
- pBtHciInfo->BTPalCompanyID,
- pBtHciInfo->BTPalsubversion));
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("==> Unsupport TypeID !!\n"));
- break;
- }
- i++;
- }
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("GetAssocInfo end\n"));
-
- return true;
-}
-
-static u8 bthci_AddEntry(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
- u8 i;
-
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
-
- for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
- if (pBTInfo->BtAsocEntry[i].bUsed == false) {
- pBTInfo->BtAsocEntry[i].bUsed = true;
- pBtMgnt->CurrentConnectEntryNum = i;
- break;
- }
- }
-
- if (i == MAX_BT_ASOC_ENTRY_NUM) {
- RTPRINT(FIOCTL, IOCTL_STATE, ("bthci_AddEntry(), Add entry fail!!\n"));
- return false;
- }
- return true;
-}
-
-static u8 bthci_DiscardTxPackets(struct rtw_adapter *padapter, u16 LLH)
-{
- return false;
-}
-
-static u8
-bthci_CheckLogLinkBehavior(
- struct rtw_adapter *padapter,
- struct hci_flow_spec TxFlowSpec
- )
-{
- u8 ID = TxFlowSpec.Identifier;
- u8 ServiceType = TxFlowSpec.ServiceType;
- u16 MaxSDUSize = TxFlowSpec.MaximumSDUSize;
- u32 SDUInterArrivatime = TxFlowSpec.SDUInterArrivalTime;
- u8 match = false;
-
- switch (ID) {
- case 1:
- if (ServiceType == BT_LL_BE) {
- match = true;
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = TX best effort flowspec\n"));
- } else if ((ServiceType == BT_LL_GU) && (MaxSDUSize == 0xffff)) {
- match = true;
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = RX guaranteed latency flowspec\n"));
- } else if ((ServiceType == BT_LL_GU) && (MaxSDUSize == 2500)) {
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = RX guaranteed Large latency flowspec\n"));
- }
- break;
- case 2:
- if (ServiceType == BT_LL_BE) {
- match = true;
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = RX best effort flowspec\n"));
-
- }
- break;
- case 3:
- if ((ServiceType == BT_LL_GU) && (MaxSDUSize == 1492)) {
- match = true;
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = TX guaranteed latency flowspec\n"));
- } else if ((ServiceType == BT_LL_GU) && (MaxSDUSize == 2500)) {
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = TX guaranteed Large latency flowspec\n"));
- }
- break;
- case 4:
- if (ServiceType == BT_LL_BE) {
- if ((SDUInterArrivatime == 0xffffffff) && (ServiceType == BT_LL_BE) && (MaxSDUSize == 1492)) {
- match = true;
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = TX/RX aggregated best effort flowspec\n"));
- }
- } else if (ServiceType == BT_LL_GU) {
- if (SDUInterArrivatime == 100) {
- match = true;
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = TX/RX guaranteed bandwidth flowspec\n"));
- }
- }
- break;
- default:
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Logical Link Type = Unknow Type !!!!!!!!\n"));
- break;
- }
-
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO),
- ("ID = 0x%x, ServiceType = 0x%x, MaximumSDUSize = 0x%x, SDUInterArrivalTime = 0x%x, AccessLatency = 0x%x, FlushTimeout = 0x%x\n",
- TxFlowSpec.Identifier, TxFlowSpec.ServiceType, MaxSDUSize,
- SDUInterArrivatime, TxFlowSpec.AccessLatency, TxFlowSpec.FlushTimeout));
- return match;
-}
-
-static u16 bthci_AssocMACAddr(struct rtw_adapter *padapter, void *pbuf)
-{
- struct amp_assoc_structure *pAssoStrc = (struct amp_assoc_structure *)pbuf;
- pAssoStrc->TypeID = AMP_MAC_ADDR;
- pAssoStrc->Length = 0x06;
- memcpy(&pAssoStrc->Data[0], padapter->eeprompriv.mac_addr, 6);
- RTPRINT_DATA(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO),
- ("AssocMACAddr : \n"), pAssoStrc, pAssoStrc->Length+3);
-
- return pAssoStrc->Length + 3;
-}
-
-static u16
-bthci_PALCapabilities(
- struct rtw_adapter *padapter,
- void *pbuf
- )
-{
- struct amp_assoc_structure *pAssoStrc = (struct amp_assoc_structure *)pbuf;
-
- pAssoStrc->TypeID = AMP_80211_PAL_CAP_LIST;
- pAssoStrc->Length = 0x04;
-
- pAssoStrc->Data[0] = 0x00;
- pAssoStrc->Data[1] = 0x00;
-
- RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("PALCapabilities:\n"), pAssoStrc, pAssoStrc->Length+3);
- RTPRINT(FIOCTL, IOCTL_BT_LOGO, ("PALCapabilities \n"));
-
- RTPRINT(FIOCTL, IOCTL_BT_LOGO, (" TypeID = 0x%x,\n Length = 0x%x,\n Content = 0x0000\n",
- pAssoStrc->TypeID,
- pAssoStrc->Length));
-
- return pAssoStrc->Length + 3;
-}
-
-static u16 bthci_AssocPreferredChannelList(struct rtw_adapter *padapter,
- void *pbuf, u8 EntryNum)
-{
- struct bt_30info *pBTInfo;
- struct amp_assoc_structure *pAssoStrc;
- struct amp_pref_chnl_regulatory *pReg;
- struct chnl_txpower_triple *pTriple;
- char ctrString[3] = {'X', 'X', 'X'};
- u32 len = 0;
- u8 preferredChnl;
-
- pBTInfo = GET_BT_INFO(padapter);
- pAssoStrc = (struct amp_assoc_structure *)pbuf;
- pReg = (struct amp_pref_chnl_regulatory *)&pAssoStrc->Data[3];
-
- preferredChnl = bthci_GetLocalChannel(padapter);
- pAssoStrc->TypeID = AMP_PREFERRED_CHANNEL_LIST;
-
- /* locale unknown */
- memcpy(&pAssoStrc->Data[0], &ctrString[0], 3);
- pReg->reXId = 201;
- pReg->regulatoryClass = 254;
- pReg->coverageClass = 0;
- len += 6;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD | IOCTL_BT_LOGO), ("PREFERRED_CHNL_LIST\n"));
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD | IOCTL_BT_LOGO), ("XXX, 201, 254, 0\n"));
- /* at the following, chnl 1~11 should be contained */
- pTriple = (struct chnl_txpower_triple *)&pAssoStrc->Data[len];
-
- /* (1) if any wifi or bt HS connection exists */
- if ((pBTInfo->BtAsocEntry[EntryNum].AMPRole == AMP_BTAP_CREATOR) ||
- (check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE |
- WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE |
- WIFI_AP_STATE)) ||
- BTHCI_HsConnectionEstablished(padapter)) {
- pTriple->FirstChnl = preferredChnl;
- pTriple->NumChnls = 1;
- pTriple->MaxTxPowerInDbm = 20;
- len += 3;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD | IOCTL_BT_LOGO), ("First Channel = %d, Channel Num = %d, MaxDbm = %d\n",
- pTriple->FirstChnl,
- pTriple->NumChnls,
- pTriple->MaxTxPowerInDbm));
- }
-
- pAssoStrc->Length = (u16)len;
- RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD, ("AssocPreferredChannelList : \n"), pAssoStrc, pAssoStrc->Length+3);
-
- return pAssoStrc->Length + 3;
-}
-
-static u16 bthci_AssocPALVer(struct rtw_adapter *padapter, void *pbuf)
-{
- struct amp_assoc_structure *pAssoStrc = (struct amp_assoc_structure *)pbuf;
- u8 *pu1Tmp;
- u16 *pu2Tmp;
-
- pAssoStrc->TypeID = AMP_80211_PAL_VISION;
- pAssoStrc->Length = 0x5;
- pu1Tmp = &pAssoStrc->Data[0];
- *pu1Tmp = 0x1; /* PAL Version */
- pu2Tmp = (u16 *)&pAssoStrc->Data[1];
- *pu2Tmp = 0x5D; /* SIG Company identifier of 802.11 PAL vendor */
- pu2Tmp = (u16 *)&pAssoStrc->Data[3];
- *pu2Tmp = 0x1; /* PAL Sub-version specifier */
-
- RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("AssocPALVer : \n"), pAssoStrc, pAssoStrc->Length+3);
- RTPRINT(FIOCTL, IOCTL_BT_LOGO, ("AssocPALVer \n"));
-
- RTPRINT(FIOCTL, IOCTL_BT_LOGO, (" TypeID = 0x%x,\n Length = 0x%x,\n PAL Version = 0x01,\n PAL vendor = 0x01,\n PAL Sub-version specifier = 0x01\n",
- pAssoStrc->TypeID,
- pAssoStrc->Length));
- return pAssoStrc->Length + 3;
-}
-
-static u8 bthci_CheckRfStateBeforeConnect(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo;
- enum rt_rf_power_state RfState;
-
- pBTInfo = GET_BT_INFO(padapter);
-
- RfState = padapter->pwrctrlpriv.rf_pwrstate;
-
- if (RfState != rf_on) {
- mod_timer(&pBTInfo->BTPsDisableTimer,
- jiffies + msecs_to_jiffies(50));
- return false;
- }
- return true;
-}
-
-static void bthci_ResponderStartToScan(struct rtw_adapter *padapter)
-{
-}
-
-static u8 bthci_PhyLinkConnectionInProgress(struct rtw_adapter *padapter, u8 PhyLinkHandle)
-{
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
-
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->bPhyLinkInProgress &&
- (pBtMgnt->BtCurrentPhyLinkhandle == PhyLinkHandle))
- return true;
- return false;
-}
-
-static void bthci_ResetFlowSpec(struct rtw_adapter *padapter, u8 EntryNum, u8 index)
-{
- struct bt_30info *pBTinfo;
-
- pBTinfo = GET_BT_INFO(padapter);
-
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].BtLogLinkhandle = 0;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].BtPhyLinkhandle = 0;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].bLLCompleteEventIsSet = false;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].bLLCancelCMDIsSetandComplete = false;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].BtTxFlowSpecID = 0;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].TxPacketCount = 0;
-
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.Identifier = 0x01;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.ServiceType = SERVICE_BEST_EFFORT;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.MaximumSDUSize = 0xffff;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.SDUInterArrivalTime = 0xffffffff;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.AccessLatency = 0xffffffff;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Tx_Flow_Spec.FlushTimeout = 0xffffffff;
-
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.Identifier = 0x01;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.ServiceType = SERVICE_BEST_EFFORT;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.MaximumSDUSize = 0xffff;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.SDUInterArrivalTime = 0xffffffff;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.AccessLatency = 0xffffffff;
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[index].Rx_Flow_Spec.FlushTimeout = 0xffffffff;
-}
-
-static void bthci_ResetEntry(struct rtw_adapter *padapter, u8 EntryNum)
-{
- struct bt_30info *pBTinfo;
- struct bt_mgnt *pBtMgnt;
- u8 j;
-
- pBTinfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTinfo->BtMgnt;
-
- pBTinfo->BtAsocEntry[EntryNum].bUsed = false;
- pBTinfo->BtAsocEntry[EntryNum].BtCurrentState = HCI_STATE_DISCONNECTED;
- pBTinfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_DISCONNECTED;
-
- pBTinfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocRemLen = 0;
- pBTinfo->BtAsocEntry[EntryNum].AmpAsocCmdData.BtPhyLinkhandle = 0;
- if (pBTinfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocfragment != NULL)
- memset(pBTinfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocfragment, 0, TOTAL_ALLOCIATE_ASSOC_LEN);
- pBTinfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar = 0;
-
- pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyType = 0;
- pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle = 0;
- memset(pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKey, 0,
- pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen);
- pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen = 0;
-
- /* 0x640; 0.625ms*1600 = 1000ms, 0.625ms*16000 = 10000ms */
- pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.LinkSuperversionTimeout = 0x3e80;
-
- pBTinfo->BtAsocEntry[EntryNum].AMPRole = AMP_BTAP_NONE;
-
- pBTinfo->BtAsocEntry[EntryNum].mAssoc = false;
- pBTinfo->BtAsocEntry[EntryNum].b4waySuccess = false;
-
- /* Reset BT WPA */
- pBTinfo->BtAsocEntry[EntryNum].KeyReplayCounter = 0;
- pBTinfo->BtAsocEntry[EntryNum].BTWPAAuthState = STATE_WPA_AUTH_UNINITIALIZED;
-
- pBTinfo->BtAsocEntry[EntryNum].bSendSupervisionPacket = false;
- pBTinfo->BtAsocEntry[EntryNum].NoRxPktCnt = 0;
- pBTinfo->BtAsocEntry[EntryNum].ShortRangeMode = 0;
- pBTinfo->BtAsocEntry[EntryNum].rxSuvpPktCnt = 0;
-
- for (j = 0; j < MAX_LOGICAL_LINK_NUM; j++)
- bthci_ResetFlowSpec(padapter, EntryNum, j);
-
- pBtMgnt->BTAuthCount = 0;
- pBtMgnt->BTAsocCount = 0;
- pBtMgnt->BTCurrentConnectType = BT_DISCONNECT;
- pBtMgnt->BTReceiveConnectPkt = BT_DISCONNECT;
-
- HALBT_RemoveKey(padapter, EntryNum);
-}
-
-static void bthci_RemoveEntryByEntryNum(struct rtw_adapter *padapter, u8 EntryNum)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- bthci_ResetEntry(padapter, EntryNum);
-
- if (pBtMgnt->CurrentBTConnectionCnt > 0)
- pBtMgnt->CurrentBTConnectionCnt--;
-
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], CurrentBTConnectionCnt = %d!!\n",
- pBtMgnt->CurrentBTConnectionCnt));
-
- if (pBtMgnt->CurrentBTConnectionCnt > 0) {
- pBtMgnt->BtOperationOn = true;
- } else {
- pBtMgnt->BtOperationOn = false;
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], Bt Operation OFF!!\n"));
- }
-
- if (!pBtMgnt->BtOperationOn) {
- del_timer_sync(&pBTInfo->BTHCIDiscardAclDataTimer);
- del_timer_sync(&pBTInfo->BTBeaconTimer);
- pBtMgnt->bStartSendSupervisionPkt = false;
- }
-}
-
-static u8
-bthci_CommandCompleteHeader(
- u8 *pbuf,
- u16 OGF,
- u16 OCF,
- enum hci_status status
- )
-{
- struct packet_irp_hcievent_data *PPacketIrpEvent = (struct packet_irp_hcievent_data *)pbuf;
- u8 NumHCI_Comm = 0x1;
-
- PPacketIrpEvent->EventCode = HCI_EVENT_COMMAND_COMPLETE;
- PPacketIrpEvent->Data[0] = NumHCI_Comm; /* packet # */
- PPacketIrpEvent->Data[1] = HCIOPCODELOW(OCF, OGF);
- PPacketIrpEvent->Data[2] = HCIOPCODEHIGHT(OCF, OGF);
-
- if (OGF == OGF_EXTENSION) {
- if (OCF == HCI_SET_RSSI_VALUE) {
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT_PERIODICAL),
- ("[BT event], CommandComplete, Num_HCI_Comm = 0x%x, Opcode = 0x%02x%02x, status = 0x%x, OGF = 0x%x, OCF = 0x%x\n",
- NumHCI_Comm, (HCIOPCODEHIGHT(OCF, OGF)), (HCIOPCODELOW(OCF, OGF)), status, OGF, OCF));
- } else {
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_EXT),
- ("[BT event], CommandComplete, Num_HCI_Comm = 0x%x, Opcode = 0x%02x%02x, status = 0x%x, OGF = 0x%x, OCF = 0x%x\n",
- NumHCI_Comm, (HCIOPCODEHIGHT(OCF, OGF)), (HCIOPCODELOW(OCF, OGF)), status, OGF, OCF));
- }
- } else {
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO),
- ("[BT event], CommandComplete, Num_HCI_Comm = 0x%x, Opcode = 0x%02x%02x, status = 0x%x, OGF = 0x%x, OCF = 0x%x\n",
- NumHCI_Comm, (HCIOPCODEHIGHT(OCF, OGF)), (HCIOPCODELOW(OCF, OGF)), status, OGF, OCF));
- }
- return 3;
-}
-
-static u8 bthci_ExtensionEventHeaderRtk(u8 *pbuf, u8 extensionEvent)
-{
- struct packet_irp_hcievent_data *PPacketIrpEvent = (struct packet_irp_hcievent_data *)pbuf;
- PPacketIrpEvent->EventCode = HCI_EVENT_EXTENSION_RTK;
- PPacketIrpEvent->Data[0] = extensionEvent; /* extension event code */
-
- return 1;
-}
-
-static enum rt_status
-bthci_IndicateEvent(
- struct rtw_adapter *padapter,
- void *pEvntData,
- u32 dataLen
- )
-{
- return PlatformIndicateBTEvent(padapter, pEvntData, dataLen);
-}
-
-static void
-bthci_EventWriteRemoteAmpAssoc(
- struct rtw_adapter *padapter,
- enum hci_status status,
- u8 PLHandle
- )
-{
- u8 localBuf[TmpLocalBufSize] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_STATUS_PARAMETERS,
- HCI_WRITE_REMOTE_AMP_ASSOC,
- status);
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("PhyLinkHandle = 0x%x, status = %d\n", PLHandle, status));
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pRetPar[1] = PLHandle;
- len += 2;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-}
-
-static void
-bthci_EventEnhancedFlushComplete(
- struct rtw_adapter *padapter,
- u16 LLH
- )
-{
- u8 localBuf[4] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("EventEnhancedFlushComplete, LLH = 0x%x\n", LLH));
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_ENHANCED_FLUSH_COMPLETE;
- PPacketIrpEvent->Length = 2;
- /* Logical link handle */
- PPacketIrpEvent->Data[0] = TWOBYTE_LOWBYTE(LLH);
- PPacketIrpEvent->Data[1] = TWOBYTE_HIGHTBYTE(LLH);
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
-}
-
-static void
-bthci_EventShortRangeModeChangeComplete(
- struct rtw_adapter *padapter,
- enum hci_status HciStatus,
- u8 ShortRangeState,
- u8 EntryNum
- )
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[5] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_SHORT_RANGE_MODE_CHANGE_COMPLETE)) {
- RTPRINT(FIOCTL, IOCTL_BT_EVENT,
- ("[BT event], Short Range Mode Change Complete, Ignore to send this event due to event mask page 2\n"));
- return;
- }
- RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Short Range Mode Change Complete, Status = %d\n , PLH = 0x%x\n, Short_Range_Mode_State = 0x%x\n",
- HciStatus, pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle, ShortRangeState));
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_SHORT_RANGE_MODE_CHANGE_COMPLETE;
- PPacketIrpEvent->Length = 3;
- PPacketIrpEvent->Data[0] = HciStatus;
- PPacketIrpEvent->Data[1] = pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
- PPacketIrpEvent->Data[2] = ShortRangeState;
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 5);
-}
-
-static void bthci_EventSendFlowSpecModifyComplete(struct rtw_adapter *padapter,
- enum hci_status HciStatus,
- u16 logicHandle)
-{
- u8 localBuf[5] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
-
- if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_FLOW_SPEC_MODIFY_COMPLETE)) {
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO),
- ("[BT event], Flow Spec Modify Complete, Ignore to send this event due to event mask page 2\n"));
- return;
- }
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO),
- ("[BT event], Flow Spec Modify Complete, status = 0x%x, LLH = 0x%x\n", HciStatus, logicHandle));
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_FLOW_SPEC_MODIFY_COMPLETE;
- PPacketIrpEvent->Length = 3;
-
- PPacketIrpEvent->Data[0] = HciStatus;
- /* Logical link handle */
- PPacketIrpEvent->Data[1] = TWOBYTE_LOWBYTE(logicHandle);
- PPacketIrpEvent->Data[2] = TWOBYTE_HIGHTBYTE(logicHandle);
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 5);
-}
-
-static void
-bthci_EventExtWifiScanNotify(
- struct rtw_adapter *padapter,
- u8 scanType
- )
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- u8 len = 0;
- u8 localBuf[7] = "";
- u8 *pRetPar;
- u8 *pu1Temp;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- if (!pBtMgnt->BtOperationOn)
- return;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_ExtensionEventHeaderRtk(&localBuf[0], HCI_EVENT_EXT_WIFI_SCAN_NOTIFY);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pu1Temp = (u8 *)&pRetPar[0];
- *pu1Temp = scanType;
- len += 1;
-
- PPacketIrpEvent->Length = len;
-
- if (bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2) == RT_STATUS_SUCCESS) {
- RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Wifi scan notify, scan type = %d\n",
- scanType));
- }
-}
-
-static void
-bthci_EventAMPReceiverReport(
- struct rtw_adapter *padapter,
- u8 Reason
- )
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
-
- if (pBtHciInfo->bTestNeedReport) {
- u8 localBuf[20] = "";
- u32 *pu4Temp;
- u16 *pu2Temp;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), (" HCI_EVENT_AMP_RECEIVER_REPORT\n"));
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_AMP_RECEIVER_REPORT;
- PPacketIrpEvent->Length = 2;
-
- PPacketIrpEvent->Data[0] = pBtHciInfo->TestCtrType;
-
- PPacketIrpEvent->Data[1] = Reason;
-
- pu4Temp = (u32 *)&PPacketIrpEvent->Data[2];
- *pu4Temp = pBtHciInfo->TestEventType;
-
- pu2Temp = (u16 *)&PPacketIrpEvent->Data[6];
- *pu2Temp = pBtHciInfo->TestNumOfFrame;
-
- pu2Temp = (u16 *)&PPacketIrpEvent->Data[8];
- *pu2Temp = pBtHciInfo->TestNumOfErrFrame;
-
- pu4Temp = (u32 *)&PPacketIrpEvent->Data[10];
- *pu4Temp = pBtHciInfo->TestNumOfBits;
-
- pu4Temp = (u32 *)&PPacketIrpEvent->Data[14];
- *pu4Temp = pBtHciInfo->TestNumOfErrBits;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 20);
-
- /* Return to Idel state with RX and TX off. */
-
- }
-
- pBtHciInfo->TestNumOfFrame = 0x00;
-}
-
-static void
-bthci_EventChannelSelected(
- struct rtw_adapter *padapter,
- u8 EntryNum
- )
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[3] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_CHANNEL_SELECT)) {
- RTPRINT(FIOCTL, IOCTL_BT_EVENT,
- ("[BT event], Channel Selected, Ignore to send this event due to event mask page 2\n"));
- return;
- }
-
- RTPRINT(FIOCTL, IOCTL_BT_EVENT|IOCTL_STATE,
- ("[BT event], Channel Selected, PhyLinkHandle %d\n",
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle));
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_CHANNEL_SELECT;
- PPacketIrpEvent->Length = 1;
- PPacketIrpEvent->Data[0] = pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 3);
-}
-
-static void
-bthci_EventDisconnectPhyLinkComplete(
- struct rtw_adapter *padapter,
- enum hci_status HciStatus,
- enum hci_status Reason,
- u8 EntryNum
- )
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[5] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_DISCONNECT_PHY_LINK_COMPLETE)) {
- RTPRINT(FIOCTL, IOCTL_BT_EVENT,
- ("[BT event], Disconnect Physical Link Complete, Ignore to send this event due to event mask page 2\n"));
- return;
- }
- RTPRINT(FIOCTL, IOCTL_BT_EVENT,
- ("[BT event], Disconnect Physical Link Complete, Status = 0x%x, PLH = 0x%x Reason = 0x%x\n",
- HciStatus, pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle, Reason));
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_DISCONNECT_PHY_LINK_COMPLETE;
- PPacketIrpEvent->Length = 3;
- PPacketIrpEvent->Data[0] = HciStatus;
- PPacketIrpEvent->Data[1] = pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
- PPacketIrpEvent->Data[2] = Reason;
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 5);
-}
-
-static void
-bthci_EventPhysicalLinkComplete(
- struct rtw_adapter *padapter,
- enum hci_status HciStatus,
- u8 EntryNum,
- u8 PLHandle
- )
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
- u8 localBuf[4] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u8 PL_handle;
-
- pBtMgnt->bPhyLinkInProgress = false;
- pBtDbg->dbgHciInfo.hciCmdPhyLinkStatus = HciStatus;
- if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_PHY_LINK_COMPLETE)) {
- RTPRINT(FIOCTL, IOCTL_BT_EVENT,
- ("[BT event], Physical Link Complete, Ignore to send this event due to event mask page 2\n"));
- return;
- }
-
- if (EntryNum == 0xff) {
- /* connection not started yet, just use the input physical link handle to response. */
- PL_handle = PLHandle;
- } else {
- /* connection is under progress, use the phy link handle we recorded. */
- PL_handle = pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
- pBTInfo->BtAsocEntry[EntryNum].bNeedPhysLinkCompleteEvent = false;
- }
-
- RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Physical Link Complete, Status = 0x%x PhyLinkHandle = 0x%x\n", HciStatus,
- PL_handle));
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_PHY_LINK_COMPLETE;
- PPacketIrpEvent->Length = 2;
-
- PPacketIrpEvent->Data[0] = HciStatus;
- PPacketIrpEvent->Data[1] = PL_handle;
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
-
-}
-
-static void
-bthci_EventCommandStatus(
- struct rtw_adapter *padapter,
- u8 OGF,
- u16 OCF,
- enum hci_status HciStatus
- )
-{
-
- u8 localBuf[6] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u8 Num_Hci_Comm = 0x1;
- RTPRINT(FIOCTL, IOCTL_BT_EVENT,
- ("[BT event], CommandStatus, Opcode = 0x%02x%02x, OGF = 0x%x, OCF = 0x%x, Status = 0x%x, Num_HCI_COMM = 0x%x\n",
- (HCIOPCODEHIGHT(OCF, OGF)), (HCIOPCODELOW(OCF, OGF)), OGF, OCF, HciStatus, Num_Hci_Comm));
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_COMMAND_STATUS;
- PPacketIrpEvent->Length = 4;
- PPacketIrpEvent->Data[0] = HciStatus; /* current pending */
- PPacketIrpEvent->Data[1] = Num_Hci_Comm; /* packet # */
- PPacketIrpEvent->Data[2] = HCIOPCODELOW(OCF, OGF);
- PPacketIrpEvent->Data[3] = HCIOPCODEHIGHT(OCF, OGF);
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 6);
-
-}
-
-static void
-bthci_EventLogicalLinkComplete(
- struct rtw_adapter *padapter,
- enum hci_status HciStatus,
- u8 PhyLinkHandle,
- u16 LogLinkHandle,
- u8 LogLinkIndex,
- u8 EntryNum
- )
-{
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[7] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_LOGICAL_LINK_COMPLETE)) {
- RTPRINT(FIOCTL, IOCTL_BT_EVENT,
- ("[BT event], Logical Link Complete, Ignore to send this event due to event mask page 2\n"));
- return;
- }
- RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Logical Link Complete, PhyLinkHandle = 0x%x, LogLinkHandle = 0x%x, Status = 0x%x\n",
- PhyLinkHandle, LogLinkHandle, HciStatus));
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_LOGICAL_LINK_COMPLETE;
- PPacketIrpEvent->Length = 5;
-
- PPacketIrpEvent->Data[0] = HciStatus;/* status code */
- /* Logical link handle */
- PPacketIrpEvent->Data[1] = TWOBYTE_LOWBYTE(LogLinkHandle);
- PPacketIrpEvent->Data[2] = TWOBYTE_HIGHTBYTE(LogLinkHandle);
- /* Physical link handle */
- PPacketIrpEvent->Data[3] = TWOBYTE_LOWBYTE(PhyLinkHandle);
- /* corresponding Tx flow spec ID */
- if (HciStatus == HCI_STATUS_SUCCESS) {
- PPacketIrpEvent->Data[4] =
- pBTInfo->BtAsocEntry[EntryNum].LogLinkCmdData[LogLinkIndex].Tx_Flow_Spec.Identifier;
- } else {
- PPacketIrpEvent->Data[4] = 0x0;
- }
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 7);
-}
-
-static void
-bthci_EventDisconnectLogicalLinkComplete(
- struct rtw_adapter *padapter,
- enum hci_status HciStatus,
- u16 LogLinkHandle,
- enum hci_status Reason
- )
-{
- u8 localBuf[6] = "";
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_DISCONNECT_LOGICAL_LINK_COMPLETE)) {
- RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Disconnect Logical Link Complete, Ignore to send this event due to event mask page 2\n"));
- return;
- }
- RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Disconnect Logical Link Complete, Status = 0x%x, LLH = 0x%x Reason = 0x%x\n", HciStatus, LogLinkHandle, Reason));
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_DISCONNECT_LOGICAL_LINK_COMPLETE;
- PPacketIrpEvent->Length = 4;
-
- PPacketIrpEvent->Data[0] = HciStatus;
- /* Logical link handle */
- PPacketIrpEvent->Data[1] = TWOBYTE_LOWBYTE(LogLinkHandle);
- PPacketIrpEvent->Data[2] = TWOBYTE_HIGHTBYTE(LogLinkHandle);
- /* Disconnect reason */
- PPacketIrpEvent->Data[3] = Reason;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 6);
-}
-
-static void
-bthci_EventFlushOccurred(
- struct rtw_adapter *padapter,
- u16 LogLinkHandle
- )
-{
- u8 localBuf[4] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("bthci_EventFlushOccurred(), LLH = 0x%x\n", LogLinkHandle));
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_FLUSH_OCCRUED;
- PPacketIrpEvent->Length = 2;
- /* Logical link handle */
- PPacketIrpEvent->Data[0] = TWOBYTE_LOWBYTE(LogLinkHandle);
- PPacketIrpEvent->Data[1] = TWOBYTE_HIGHTBYTE(LogLinkHandle);
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
-}
-
-static enum hci_status
-bthci_BuildPhysicalLink(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd,
- u16 OCF
-)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- u8 EntryNum, PLH;
-
- /* Send HCI Command status event to AMP. */
- bthci_EventCommandStatus(padapter,
- LINK_CONTROL_COMMANDS,
- OCF,
- HCI_STATUS_SUCCESS);
-
- PLH = *((u8 *)pHciCmd->Data);
-
- /* Check if resource or bt connection is under progress, if yes, reject the link creation. */
- if (!bthci_AddEntry(padapter)) {
- status = HCI_STATUS_CONNECT_RJT_LIMIT_RESOURCE;
- bthci_EventPhysicalLinkComplete(padapter, status, INVALID_ENTRY_NUM, PLH);
- return status;
- }
-
- EntryNum = pBtMgnt->CurrentConnectEntryNum;
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle = PLH;
- pBtMgnt->BtCurrentPhyLinkhandle = PLH;
-
- if (pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.AMPAssocfragment == NULL) {
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Create/Accept PhysicalLink, AMP controller is busy\n"));
- status = HCI_STATUS_CONTROLLER_BUSY;
- bthci_EventPhysicalLinkComplete(padapter, status, INVALID_ENTRY_NUM, PLH);
- return status;
- }
-
- /* Record Key and the info */
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen = (*((u8 *)pHciCmd->Data+1));
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyType = (*((u8 *)pHciCmd->Data+2));
- memcpy(pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKey,
- (((u8 *)pHciCmd->Data+3)), pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen);
- memcpy(pBTInfo->BtAsocEntry[EntryNum].PMK, pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKey, PMK_LEN);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("BuildPhysicalLink, EntryNum = %d, PLH = 0x%x KeyLen = 0x%x, KeyType = 0x%x\n",
- EntryNum, pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle,
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen,
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyType));
- RTPRINT_DATA(FIOCTL, (IOCTL_BT_LOGO|IOCTL_BT_HCICMD), ("BtAMPKey\n"), pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKey,
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtAMPKeyLen);
- RTPRINT_DATA(FIOCTL, (IOCTL_BT_LOGO|IOCTL_BT_HCICMD), ("PMK\n"), pBTInfo->BtAsocEntry[EntryNum].PMK,
- PMK_LEN);
-
- if (OCF == HCI_CREATE_PHYSICAL_LINK) {
- /* These macros require braces */
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTED, STATE_CMD_CREATE_PHY_LINK, EntryNum);
- } else if (OCF == HCI_ACCEPT_PHYSICAL_LINK) {
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTED, STATE_CMD_ACCEPT_PHY_LINK, EntryNum);
- }
-
- return status;
-}
-
-static void
-bthci_BuildLogicalLink(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd,
- u16 OCF
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTinfo->BtMgnt;
- u8 PhyLinkHandle, EntryNum;
- static u16 AssignLogHandle = 1;
-
- struct hci_flow_spec TxFlowSpec;
- struct hci_flow_spec RxFlowSpec;
- u32 MaxSDUSize, ArriveTime, Bandwidth;
-
- PhyLinkHandle = *((u8 *)pHciCmd->Data);
-
- EntryNum = bthci_GetCurrentEntryNum(padapter, PhyLinkHandle);
-
- memcpy(&TxFlowSpec,
- &pHciCmd->Data[1], sizeof(struct hci_flow_spec));
- memcpy(&RxFlowSpec,
- &pHciCmd->Data[17], sizeof(struct hci_flow_spec));
-
- MaxSDUSize = TxFlowSpec.MaximumSDUSize;
- ArriveTime = TxFlowSpec.SDUInterArrivalTime;
-
- if (bthci_CheckLogLinkBehavior(padapter, TxFlowSpec) && bthci_CheckLogLinkBehavior(padapter, RxFlowSpec))
- Bandwidth = BTTOTALBANDWIDTH;
- else if (MaxSDUSize == 0xffff && ArriveTime == 0xffffffff)
- Bandwidth = BTTOTALBANDWIDTH;
- else
- Bandwidth = MaxSDUSize*8*1000/(ArriveTime+244);
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD,
- ("BuildLogicalLink, PhyLinkHandle = 0x%x, MaximumSDUSize = 0x%x, SDUInterArrivalTime = 0x%x, Bandwidth = 0x%x\n",
- PhyLinkHandle, MaxSDUSize, ArriveTime, Bandwidth));
-
- if (EntryNum == 0xff) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Invalid Physical Link handle = 0x%x, status = HCI_STATUS_UNKNOW_CONNECT_ID, return\n", PhyLinkHandle));
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
-
- /* When we receive Create/Accept logical link command, we should send command status event first. */
- bthci_EventCommandStatus(padapter,
- LINK_CONTROL_COMMANDS,
- OCF,
- status);
- return;
- }
-
- if (!pBtMgnt->bLogLinkInProgress) {
- if (bthci_PhyLinkConnectionInProgress(padapter, PhyLinkHandle)) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Physical link connection in progress, status = HCI_STATUS_CMD_DISALLOW, return\n"));
- status = HCI_STATUS_CMD_DISALLOW;
-
- pBtMgnt->bPhyLinkInProgressStartLL = true;
- /* When we receive Create/Accept logical link command, we should send command status event first. */
- bthci_EventCommandStatus(padapter,
- LINK_CONTROL_COMMANDS,
- OCF,
- status);
-
- return;
- }
-
- if (Bandwidth > BTTOTALBANDWIDTH) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("status = HCI_STATUS_QOS_REJECT, Bandwidth = 0x%x, return\n", Bandwidth));
- status = HCI_STATUS_QOS_REJECT;
-
- /* When we receive Create/Accept logical link command, we should send command status event first. */
- bthci_EventCommandStatus(padapter,
- LINK_CONTROL_COMMANDS,
- OCF,
- status);
- } else {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("status = HCI_STATUS_SUCCESS\n"));
- status = HCI_STATUS_SUCCESS;
-
- /* When we receive Create/Accept logical link command, we should send command status event first. */
- bthci_EventCommandStatus(padapter,
- LINK_CONTROL_COMMANDS,
- OCF,
- status);
-
- }
-
- if (pBTinfo->BtAsocEntry[EntryNum].BtCurrentState != HCI_STATE_CONNECTED) {
- bthci_EventLogicalLinkComplete(padapter,
- HCI_STATUS_CMD_DISALLOW, 0, 0, 0, EntryNum);
- } else {
- u8 i, find = 0;
-
- pBtMgnt->bLogLinkInProgress = true;
-
- /* find an unused logical link index and copy the data */
- for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
- if (pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtLogLinkhandle == 0) {
- enum hci_status LogCompEventstatus = HCI_STATUS_SUCCESS;
-
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtPhyLinkhandle = *((u8 *)pHciCmd->Data);
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtLogLinkhandle = AssignLogHandle;
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("BuildLogicalLink, EntryNum = %d, physical link handle = 0x%x, logical link handle = 0x%x\n",
- EntryNum, pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle,
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtLogLinkhandle));
- memcpy(&pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].Tx_Flow_Spec,
- &TxFlowSpec, sizeof(struct hci_flow_spec));
- memcpy(&pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].Rx_Flow_Spec,
- &RxFlowSpec, sizeof(struct hci_flow_spec));
-
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].bLLCompleteEventIsSet = false;
-
- if (pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].bLLCancelCMDIsSetandComplete)
- LogCompEventstatus = HCI_STATUS_UNKNOW_CONNECT_ID;
- bthci_EventLogicalLinkComplete(padapter,
- LogCompEventstatus,
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtPhyLinkhandle,
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].BtLogLinkhandle, i, EntryNum);
-
- pBTinfo->BtAsocEntry[EntryNum].LogLinkCmdData[i].bLLCompleteEventIsSet = true;
-
- find = 1;
- pBtMgnt->BtCurrentLogLinkhandle = AssignLogHandle;
- AssignLogHandle++;
- break;
- }
- }
-
- if (!find) {
- bthci_EventLogicalLinkComplete(padapter,
- HCI_STATUS_CONNECT_RJT_LIMIT_RESOURCE, 0, 0, 0, EntryNum);
- }
- pBtMgnt->bLogLinkInProgress = false;
- }
- } else {
- bthci_EventLogicalLinkComplete(padapter,
- HCI_STATUS_CONTROLLER_BUSY, 0, 0, 0, EntryNum);
- }
-
-}
-
-static void
-bthci_StartBeaconAndConnect(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd,
- u8 CurrentAssocNum
- )
-{
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("StartBeaconAndConnect, CurrentAssocNum =%d, AMPRole =%d\n",
- CurrentAssocNum,
- pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole));
-
- if (!pBtMgnt->CheckChnlIsSuit) {
- bthci_EventPhysicalLinkComplete(padapter, HCI_STATUS_CONNECT_REJ_NOT_SUIT_CHNL_FOUND, CurrentAssocNum, INVALID_PL_HANDLE);
- bthci_RemoveEntryByEntryNum(padapter, CurrentAssocNum);
- return;
- }
-
- if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_CREATOR) {
- snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32,
- "AMP-%pMF", padapter->eeprompriv.mac_addr);
- } else if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_JOINER) {
- snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32,
- "AMP-%pMF", pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr);
- }
-
- FillOctetString(pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsid, pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 21);
- pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsid.Length = 21;
-
- /* To avoid set the start ap or connect twice, or the original connection will be disconnected. */
- if (!pBtMgnt->bBTConnectInProgress) {
- pBtMgnt->bBTConnectInProgress = true;
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], BT Connect in progress ON!!\n"));
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_STARTING, STATE_CMD_MAC_START_COMPLETE, CurrentAssocNum);
-
- /* 20100325 Joseph: Check RF ON/OFF. */
- /* If RF OFF, it reschedule connecting operation after 50ms. */
- if (!bthci_CheckRfStateBeforeConnect(padapter))
- return;
-
- if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_CREATOR) {
- /* These macros need braces */
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_CONNECTING, STATE_CMD_MAC_CONNECT_COMPLETE, CurrentAssocNum);
- } else if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_JOINER) {
- bthci_ResponderStartToScan(padapter);
- }
- }
- RT_PRINT_STR(_module_rtl871x_mlme_c_, _drv_notice_,
- "StartBeaconAndConnect, SSID:\n",
- pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].BTSsid.Octet,
- pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].BTSsid.Length);
-}
-
-static void bthci_ResetBtMgnt(struct bt_mgnt *pBtMgnt)
-{
- pBtMgnt->BtOperationOn = false;
- pBtMgnt->bBTConnectInProgress = false;
- pBtMgnt->bLogLinkInProgress = false;
- pBtMgnt->bPhyLinkInProgress = false;
- pBtMgnt->bPhyLinkInProgressStartLL = false;
- pBtMgnt->DisconnectEntryNum = 0xff;
- pBtMgnt->bStartSendSupervisionPkt = false;
- pBtMgnt->JoinerNeedSendAuth = false;
- pBtMgnt->CurrentBTConnectionCnt = 0;
- pBtMgnt->BTCurrentConnectType = BT_DISCONNECT;
- pBtMgnt->BTReceiveConnectPkt = BT_DISCONNECT;
- pBtMgnt->BTAuthCount = 0;
- pBtMgnt->btLogoTest = 0;
-}
-
-static void bthci_ResetBtHciInfo(struct bt_hci_info *pBtHciInfo)
-{
- pBtHciInfo->BTEventMask = 0;
- pBtHciInfo->BTEventMaskPage2 = 0;
- pBtHciInfo->ConnAcceptTimeout = 10000;
- pBtHciInfo->PageTimeout = 0x30;
- pBtHciInfo->LocationDomainAware = 0x0;
- pBtHciInfo->LocationDomain = 0x5858;
- pBtHciInfo->LocationDomainOptions = 0x58;
- pBtHciInfo->LocationOptions = 0x0;
- pBtHciInfo->FlowControlMode = 0x1; /* 0:Packet based data flow control mode(BR/EDR), 1: Data block based data flow control mode(AMP). */
-
- pBtHciInfo->enFlush_LLH = 0;
- pBtHciInfo->FLTO_LLH = 0;
-
- /* Test command only */
- pBtHciInfo->bTestIsEnd = true;
- pBtHciInfo->bInTestMode = false;
- pBtHciInfo->bTestNeedReport = false;
- pBtHciInfo->TestScenario = 0xff;
- pBtHciInfo->TestReportInterval = 0x01;
- pBtHciInfo->TestCtrType = 0x5d;
- pBtHciInfo->TestEventType = 0x00;
- pBtHciInfo->TestNumOfFrame = 0;
- pBtHciInfo->TestNumOfErrFrame = 0;
- pBtHciInfo->TestNumOfBits = 0;
- pBtHciInfo->TestNumOfErrBits = 0;
-}
-
-static void bthci_ResetBtSec(struct rtw_adapter *padapter, struct bt_security *pBtSec)
-{
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
-
- /* Set BT used HW or SW encrypt !! */
- if (GET_HAL_DATA(padapter)->bBTMode)
- pBtSec->bUsedHwEncrypt = true;
- else
- pBtSec->bUsedHwEncrypt = false;
- RT_TRACE(_module_rtl871x_security_c_, _drv_info_,
- "%s: bUsedHwEncrypt =%d\n", __func__, pBtSec->bUsedHwEncrypt);
-
- pBtSec->RSNIE.Octet = pBtSec->RSNIEBuf;
-}
-
-static void bthci_ResetBtExtInfo(struct bt_mgnt *pBtMgnt)
-{
- u8 i;
-
- for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
- pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle = 0;
- pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode = 0;
- pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode = 0;
- pBtMgnt->ExtConfig.linkInfo[i].BTProfile = BT_PROFILE_NONE;
- pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec = BT_SPEC_2_1_EDR;
- pBtMgnt->ExtConfig.linkInfo[i].BT_RSSI = 0;
- pBtMgnt->ExtConfig.linkInfo[i].TrafficProfile = BT_PROFILE_NONE;
- pBtMgnt->ExtConfig.linkInfo[i].linkRole = BT_LINK_MASTER;
- }
-
- pBtMgnt->ExtConfig.CurrentConnectHandle = 0;
- pBtMgnt->ExtConfig.CurrentIncomingTrafficMode = 0;
- pBtMgnt->ExtConfig.CurrentOutgoingTrafficMode = 0;
- pBtMgnt->ExtConfig.MIN_BT_RSSI = 0;
- pBtMgnt->ExtConfig.NumberOfHandle = 0;
- pBtMgnt->ExtConfig.NumberOfSCO = 0;
- pBtMgnt->ExtConfig.CurrentBTStatus = 0;
- pBtMgnt->ExtConfig.HCIExtensionVer = 0;
-
- pBtMgnt->ExtConfig.bManualControl = false;
- pBtMgnt->ExtConfig.bBTBusy = false;
- pBtMgnt->ExtConfig.bBTA2DPBusy = false;
-}
-
-static enum hci_status bthci_CmdReset(struct rtw_adapter *_padapter, u8 bNeedSendEvent)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct rtw_adapter *padapter;
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
- struct bt_hci_info *pBtHciInfo;
- struct bt_security *pBtSec;
- struct bt_dgb *pBtDbg;
- u8 i;
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("bthci_CmdReset()\n"));
-
- padapter = GetDefaultAdapter(_padapter);
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
- pBtHciInfo = &pBTInfo->BtHciInfo;
- pBtSec = &pBTInfo->BtSec;
- pBtDbg = &pBTInfo->BtDbg;
-
- pBTInfo->padapter = padapter;
-
- for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++)
- bthci_ResetEntry(padapter, i);
-
- bthci_ResetBtMgnt(pBtMgnt);
- bthci_ResetBtHciInfo(pBtHciInfo);
- bthci_ResetBtSec(padapter, pBtSec);
-
- pBtMgnt->BTChannel = BT_Default_Chnl;
- pBtMgnt->CheckChnlIsSuit = true;
-
- pBTInfo->BTBeaconTmrOn = false;
-
- pBtMgnt->bCreateSpportQos = true;
-
- del_timer_sync(&pBTInfo->BTHCIDiscardAclDataTimer);
- del_timer_sync(&pBTInfo->BTBeaconTimer);
-
- HALBT_SetRtsCtsNoLenLimit(padapter);
- /* */
- /* Maybe we need to take care Group != AES case !! */
- /* now we Pairwise and Group all used AES !! */
-
- bthci_ResetBtExtInfo(pBtMgnt);
-
- /* send command complete event here when all data are received. */
- if (bNeedSendEvent) {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_RESET,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdWriteRemoteAMPAssoc(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
- u8 CurrentAssocNum;
- u8 PhyLinkHandle;
-
- pBtDbg->dbgHciInfo.hciCmdCntWriteRemoteAmpAssoc++;
- PhyLinkHandle = *((u8 *)pHciCmd->Data);
- CurrentAssocNum = bthci_GetCurrentEntryNum(padapter, PhyLinkHandle);
-
- if (CurrentAssocNum == 0xff) {
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("WriteRemoteAMPAssoc, No such Handle in the Entry\n"));
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
- bthci_EventWriteRemoteAmpAssoc(padapter, status, PhyLinkHandle);
- return status;
- }
-
- if (pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocfragment == NULL) {
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("WriteRemoteAMPAssoc, AMP controller is busy\n"));
- status = HCI_STATUS_CONTROLLER_BUSY;
- bthci_EventWriteRemoteAmpAssoc(padapter, status, PhyLinkHandle);
- return status;
- }
-
- pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.BtPhyLinkhandle = PhyLinkHandle;/* u8 *)pHciCmd->Data); */
- pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.LenSoFar = *((u16 *)((u8 *)pHciCmd->Data+1));
- pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen = *((u16 *)((u8 *)pHciCmd->Data+3));
-
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("WriteRemoteAMPAssoc, LenSoFar = 0x%x, AssocRemLen = 0x%x\n",
- pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.LenSoFar,
- pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen));
-
- RTPRINT_DATA(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO),
- ("WriteRemoteAMPAssoc fragment \n"),
- pHciCmd->Data,
- pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen+5);
- if ((pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen) > MAX_AMP_ASSOC_FRAG_LEN) {
- memcpy(((u8 *)pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocfragment+(pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.LenSoFar*(sizeof(u8)))),
- (u8 *)pHciCmd->Data+5,
- MAX_AMP_ASSOC_FRAG_LEN);
- } else {
- memcpy((u8 *)(pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocfragment)+(pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.LenSoFar*(sizeof(u8))),
- ((u8 *)pHciCmd->Data+5),
- (pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen));
-
- RTPRINT_DATA(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), "WriteRemoteAMPAssoc :\n",
- pHciCmd->Data+5, pBTInfo->BtAsocEntry[CurrentAssocNum].AmpAsocCmdData.AMPAssocRemLen);
-
- if (!bthci_GetAssocInfo(padapter, CurrentAssocNum))
- status = HCI_STATUS_INVALID_HCI_CMD_PARA_VALUE;
-
- bthci_EventWriteRemoteAmpAssoc(padapter, status, PhyLinkHandle);
-
- bthci_StartBeaconAndConnect(padapter, pHciCmd, CurrentAssocNum);
- }
-
- return status;
-}
-
-/* 7.3.13 */
-static enum hci_status bthci_CmdReadConnectionAcceptTimeout(struct rtw_adapter *padapter)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[8] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 *pu2Temp;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_READ_CONNECTION_ACCEPT_TIMEOUT,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pu2Temp = (u16 *)&pRetPar[1]; /* Conn_Accept_Timeout */
- *pu2Temp = pBtHciInfo->ConnAcceptTimeout;
- len += 3;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-/* 7.3.14 */
-static enum hci_status
-bthci_CmdWriteConnectionAcceptTimeout(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u16 *pu2Temp;
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- pu2Temp = (u16 *)&pHciCmd->Data[0];
- pBtHciInfo->ConnAcceptTimeout = *pu2Temp;
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("ConnAcceptTimeout = 0x%x",
- pBtHciInfo->ConnAcceptTimeout));
-
- /* send command complete event here when all data are received. */
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdReadPageTimeout(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[8] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 *pu2Temp;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_READ_PAGE_TIMEOUT,
- status);
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Read PageTimeout = 0x%x\n", pBtHciInfo->PageTimeout));
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pu2Temp = (u16 *)&pRetPar[1]; /* Page_Timeout */
- *pu2Temp = pBtHciInfo->PageTimeout;
- len += 3;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdWritePageTimeout(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u16 *pu2Temp;
-
- pu2Temp = (u16 *)&pHciCmd->Data[0];
- pBtHciInfo->PageTimeout = *pu2Temp;
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Write PageTimeout = 0x%x\n",
- pBtHciInfo->PageTimeout));
-
- /* send command complete event here when all data are received. */
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_WRITE_PAGE_TIMEOUT,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdReadLinkSupervisionTimeout(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
- u8 physicalLinkHandle, EntryNum;
-
- physicalLinkHandle = *((u8 *)pHciCmd->Data);
-
- EntryNum = bthci_GetCurrentEntryNum(padapter, physicalLinkHandle);
-
- if (EntryNum == 0xff) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("ReadLinkSupervisionTimeout, No such Handle in the Entry\n"));
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
- return status;
- }
-
- if (pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle != physicalLinkHandle)
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
-
- {
- u8 localBuf[10] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 *pu2Temp;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_READ_LINK_SUPERVISION_TIMEOUT,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status;
- pRetPar[1] = pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
- pRetPar[2] = 0;
- pu2Temp = (u16 *)&pRetPar[3]; /* Conn_Accept_Timeout */
- *pu2Temp = pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.LinkSuperversionTimeout;
- len += 5;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdWriteLinkSupervisionTimeout(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
- u8 physicalLinkHandle, EntryNum;
-
- physicalLinkHandle = *((u8 *)pHciCmd->Data);
-
- EntryNum = bthci_GetCurrentEntryNum(padapter, physicalLinkHandle);
-
- if (EntryNum == 0xff) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("WriteLinkSupervisionTimeout, No such Handle in the Entry\n"));
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
- } else {
- if (pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle != physicalLinkHandle) {
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
- } else {
- pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.LinkSuperversionTimeout = *((u16 *)(((u8 *)pHciCmd->Data)+2));
- RTPRINT(FIOCTL, IOCTL_STATE, ("BT Write LinkSuperversionTimeout[%d] = 0x%x\n",
- EntryNum, pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.LinkSuperversionTimeout));
- }
- }
-
- {
- u8 localBuf[8] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_WRITE_LINK_SUPERVISION_TIMEOUT,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status;
- pRetPar[1] = pBTinfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle;
- pRetPar[2] = 0;
- len += 3;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdEnhancedFlush(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTinfo->BtHciInfo;
- u16 logicHandle;
- u8 Packet_Type;
-
- logicHandle = *((u16 *)&pHciCmd->Data[0]);
- Packet_Type = pHciCmd->Data[2];
-
- if (Packet_Type != 0)
- status = HCI_STATUS_INVALID_HCI_CMD_PARA_VALUE;
- else
- pBtHciInfo->enFlush_LLH = logicHandle;
-
- if (bthci_DiscardTxPackets(padapter, pBtHciInfo->enFlush_LLH))
- bthci_EventFlushOccurred(padapter, pBtHciInfo->enFlush_LLH);
-
- /* should send command status event */
- bthci_EventCommandStatus(padapter,
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_ENHANCED_FLUSH,
- status);
-
- if (pBtHciInfo->enFlush_LLH) {
- bthci_EventEnhancedFlushComplete(padapter, pBtHciInfo->enFlush_LLH);
- pBtHciInfo->enFlush_LLH = 0;
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdReadLogicalLinkAcceptTimeout(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[8] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 *pu2Temp;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_READ_LOGICAL_LINK_ACCEPT_TIMEOUT,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status;
-
- pu2Temp = (u16 *)&pRetPar[1]; /* Conn_Accept_Timeout */
- *pu2Temp = pBtHciInfo->LogicalAcceptTimeout;
- len += 3;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdWriteLogicalLinkAcceptTimeout(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- pBtHciInfo->LogicalAcceptTimeout = *((u16 *)pHciCmd->Data);
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_WRITE_LOGICAL_LINK_ACCEPT_TIMEOUT,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status;
-
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- return status;
-}
-
-static enum hci_status
-bthci_CmdSetEventMask(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 *pu8Temp;
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- pu8Temp = (u8 *)&pHciCmd->Data[0];
- pBtHciInfo->BTEventMask = *pu8Temp;
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("BTEventMask = 0x%"i64fmt"x\n",
- pBtHciInfo->BTEventMask));
-
- /* send command complete event here when all data are received. */
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_SET_EVENT_MASK,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-/* 7.3.69 */
-static enum hci_status
-bthci_CmdSetEventMaskPage2(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 *pu8Temp;
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- pu8Temp = (u8 *)&pHciCmd->Data[0];
- pBtHciInfo->BTEventMaskPage2 = *pu8Temp;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("BTEventMaskPage2 = 0x%"i64fmt"x\n",
- pBtHciInfo->BTEventMaskPage2));
-
- /* send command complete event here when all data are received. */
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_SET_EVENT_MASK_PAGE_2,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdReadLocationData(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[12] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 *pu2Temp;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_READ_LOCATION_DATA,
- status);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DomainAware = 0x%x\n", pBtHciInfo->LocationDomainAware));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Domain = 0x%x\n", pBtHciInfo->LocationDomain));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DomainOptions = 0x%x\n", pBtHciInfo->LocationDomainOptions));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Options = 0x%x\n", pBtHciInfo->LocationOptions));
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status;
-
- pRetPar[1] = pBtHciInfo->LocationDomainAware; /* 0x0; Location_Domain_Aware */
- pu2Temp = (u16 *)&pRetPar[2]; /* Location_Domain */
- *pu2Temp = pBtHciInfo->LocationDomain; /* 0x5858; */
- pRetPar[4] = pBtHciInfo->LocationDomainOptions; /* 0x58; Location_Domain_Options */
- pRetPar[5] = pBtHciInfo->LocationOptions; /* 0x0; Location_Options */
- len += 6;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- return status;
-}
-
-static enum hci_status
-bthci_CmdWriteLocationData(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u16 *pu2Temp;
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- pBtHciInfo->LocationDomainAware = pHciCmd->Data[0];
- pu2Temp = (u16 *)&pHciCmd->Data[1];
- pBtHciInfo->LocationDomain = *pu2Temp;
- pBtHciInfo->LocationDomainOptions = pHciCmd->Data[3];
- pBtHciInfo->LocationOptions = pHciCmd->Data[4];
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DomainAware = 0x%x\n", pBtHciInfo->LocationDomainAware));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Domain = 0x%x\n", pBtHciInfo->LocationDomain));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DomainOptions = 0x%x\n", pBtHciInfo->LocationDomainOptions));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Options = 0x%x\n", pBtHciInfo->LocationOptions));
-
- /* send command complete event here when all data are received. */
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_WRITE_LOCATION_DATA,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdReadFlowControlMode(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[7] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_READ_FLOW_CONTROL_MODE,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status;
- pRetPar[1] = pBtHciInfo->FlowControlMode; /* Flow Control Mode */
- len += 2;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- return status;
-}
-
-static enum hci_status
-bthci_CmdWriteFlowControlMode(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- pBtHciInfo->FlowControlMode = pHciCmd->Data[0];
-
- /* send command complete event here when all data are received. */
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_WRITE_FLOW_CONTROL_MODE,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdReadBestEffortFlushTimeout(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
- u16 i, j, logicHandle;
- u32 BestEffortFlushTimeout = 0xffffffff;
- u8 find = 0;
-
- logicHandle = *((u16 *)pHciCmd->Data);
- /* find an matched logical link index and copy the data */
- for (j = 0; j < MAX_BT_ASOC_ENTRY_NUM; j++) {
- for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
- if (pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle == logicHandle) {
- BestEffortFlushTimeout = pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BestEffortFlushTimeout;
- find = 1;
- break;
- }
- }
- }
-
- if (!find)
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
-
- {
- u8 localBuf[10] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u32 *pu4Temp;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_READ_BEST_EFFORT_FLUSH_TIMEOUT,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status;
- pu4Temp = (u32 *)&pRetPar[1]; /* Best_Effort_Flush_Timeout */
- *pu4Temp = BestEffortFlushTimeout;
- len += 5;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
- return status;
-}
-
-static enum hci_status
-bthci_CmdWriteBestEffortFlushTimeout(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
- u16 i, j, logicHandle;
- u32 BestEffortFlushTimeout = 0xffffffff;
- u8 find = 0;
-
- logicHandle = *((u16 *)pHciCmd->Data);
- BestEffortFlushTimeout = *((u32 *)(pHciCmd->Data+1));
-
- /* find an matched logical link index and copy the data */
- for (j = 0; j < MAX_BT_ASOC_ENTRY_NUM; j++) {
- for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
- if (pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle == logicHandle) {
- pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BestEffortFlushTimeout = BestEffortFlushTimeout;
- find = 1;
- break;
- }
- }
- }
-
- if (!find)
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
-
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_WRITE_BEST_EFFORT_FLUSH_TIMEOUT,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status;
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
- return status;
-}
-
-static enum hci_status
-bthci_CmdShortRangeMode(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- u8 PhyLinkHandle, EntryNum, ShortRangeMode;
-
- PhyLinkHandle = pHciCmd->Data[0];
- ShortRangeMode = pHciCmd->Data[1];
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("PLH = 0x%x, Short_Range_Mode = 0x%x\n", PhyLinkHandle, ShortRangeMode));
-
- EntryNum = bthci_GetCurrentEntryNum(padapter, PhyLinkHandle);
- if (EntryNum != 0xff) {
- pBTInfo->BtAsocEntry[EntryNum].ShortRangeMode = ShortRangeMode;
- } else {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("No such PLH(0x%x)\n", PhyLinkHandle));
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
- }
-
- bthci_EventCommandStatus(padapter,
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_SHORT_RANGE_MODE,
- status);
-
- bthci_EventShortRangeModeChangeComplete(padapter, status, ShortRangeMode, EntryNum);
-
- return status;
-}
-
-static enum hci_status bthci_CmdReadLocalSupportedCommands(struct rtw_adapter *padapter)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- u8 localBuf[TmpLocalBufSize] = "";
- u8 *pRetPar, *pSupportedCmds;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- /* send command complete event here when all data are received. */
- PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_INFORMATIONAL_PARAMETERS,
- HCI_READ_LOCAL_SUPPORTED_COMMANDS,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- pSupportedCmds = &pRetPar[1];
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[5]= 0xc0\nBit [6]= Set Event Mask, [7]= Reset\n"));
- pSupportedCmds[5] = 0xc0;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[6]= 0x01\nBit [0]= Set Event Filter\n"));
- pSupportedCmds[6] = 0x01;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[7]= 0x0c\nBit [2]= Read Connection Accept Timeout, [3]= Write Connection Accept Timeout\n"));
- pSupportedCmds[7] = 0x0c;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[10]= 0x80\nBit [7]= Host Number Of Completed Packets\n"));
- pSupportedCmds[10] = 0x80;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[11]= 0x03\nBit [0]= Read Link Supervision Timeout, [1]= Write Link Supervision Timeout\n"));
- pSupportedCmds[11] = 0x03;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[14]= 0xa8\nBit [3]= Read Local Version Information, [5]= Read Local Supported Features, [7]= Read Buffer Size\n"));
- pSupportedCmds[14] = 0xa8;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[15]= 0x1c\nBit [2]= Read Failed Contact Count, [3]= Reset Failed Contact Count, [4]= Get Link Quality\n"));
- pSupportedCmds[15] = 0x1c;
- /* pSupportedCmds[16] = 0x04; */
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[19]= 0x40\nBit [6]= Enhanced Flush\n"));
- pSupportedCmds[19] = 0x40;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[21]= 0xff\nBit [0]= Create Physical Link, [1]= Accept Physical Link, [2]= Disconnect Physical Link, [3]= Create Logical Link\n"));
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), (" [4]= Accept Logical Link, [5]= Disconnect Logical Link, [6]= Logical Link Cancel, [7]= Flow Spec Modify\n"));
- pSupportedCmds[21] = 0xff;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[22]= 0xff\nBit [0]= Read Logical Link Accept Timeout, [1]= Write Logical Link Accept Timeout, [2]= Set Event Mask Page 2, [3]= Read Location Data\n"));
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), (" [4]= Write Location Data, [5]= Read Local AMP Info, [6]= Read Local AMP_ASSOC, [7]= Write Remote AMP_ASSOC\n"));
- pSupportedCmds[22] = 0xff;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[23]= 0x07\nBit [0]= Read Flow Control Mode, [1]= Write Flow Control Mode, [2]= Read Data Block Size\n"));
- pSupportedCmds[23] = 0x07;
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD|IOCTL_BT_LOGO), ("Octet[24]= 0x1c\nBit [2]= Read Best Effort Flush Timeout, [3]= Write Best Effort Flush Timeout, [4]= Short Range Mode\n"));
- pSupportedCmds[24] = 0x1c;
- len += 64;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-static enum hci_status bthci_CmdReadLocalSupportedFeatures(struct rtw_adapter *padapter)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- u8 localBuf[TmpLocalBufSize] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- /* send command complete event here when all data are received. */
- PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_INFORMATIONAL_PARAMETERS,
- HCI_READ_LOCAL_SUPPORTED_FEATURES,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 9;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- return status;
-}
-
-static enum hci_status bthci_CmdReadLocalAMPAssoc(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
- u8 PhyLinkHandle, EntryNum;
-
- pBtDbg->dbgHciInfo.hciCmdCntReadLocalAmpAssoc++;
- PhyLinkHandle = *((u8 *)pHciCmd->Data);
- EntryNum = bthci_GetCurrentEntryNum(padapter, PhyLinkHandle);
-
- if ((EntryNum == 0xff) && PhyLinkHandle != 0) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("ReadLocalAMPAssoc, EntryNum = %d !!!!!, physical link handle = 0x%x\n",
- EntryNum, PhyLinkHandle));
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
- } else if (pBtMgnt->bPhyLinkInProgressStartLL) {
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
- pBtMgnt->bPhyLinkInProgressStartLL = false;
- } else {
- pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.BtPhyLinkhandle = *((u8 *)pHciCmd->Data);
- pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar = *((u16 *)((u8 *)pHciCmd->Data+1));
- pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.MaxRemoteASSOCLen = *((u16 *)((u8 *)pHciCmd->Data+3));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("ReadLocalAMPAssoc, LenSoFar =%d, MaxRemoteASSOCLen =%d\n",
- pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar,
- pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.MaxRemoteASSOCLen));
- }
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("ReadLocalAMPAssoc, EntryNum = %d !!!!!, physical link handle = 0x%x, LengthSoFar = %x \n",
- EntryNum, PhyLinkHandle, pBTInfo->BtAsocEntry[EntryNum].AmpAsocCmdData.LenSoFar));
-
- /* send command complete event here when all data are received. */
- {
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- /* PVOID buffer = padapter->IrpHCILocalbuf.Ptr; */
- u8 localBuf[TmpLocalBufSize] = "";
- u16 *pRemainLen;
- u32 totalLen = 0;
- u16 typeLen = 0, remainLen = 0, ret_index = 0;
- u8 *pRetPar;
-
- PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
- /* PPacketIrpEvent = (struct packet_irp_hcievent_data *)(buffer); */
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- totalLen += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_STATUS_PARAMETERS,
- HCI_READ_LOCAL_AMP_ASSOC,
- status);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("ReadLocalAMPAssoc, Remaining_Len =%d \n", remainLen));
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[totalLen];
- pRetPar[0] = status; /* status */
- pRetPar[1] = *((u8 *)pHciCmd->Data);
- pRemainLen = (u16 *)&pRetPar[2]; /* AMP_ASSOC_Remaining_Length */
- totalLen += 4; /* 0]~[3] */
- ret_index = 4;
-
- typeLen = bthci_AssocMACAddr(padapter, &pRetPar[ret_index]);
- totalLen += typeLen;
- remainLen += typeLen;
- ret_index += typeLen;
- typeLen = bthci_AssocPreferredChannelList(padapter, &pRetPar[ret_index], EntryNum);
- totalLen += typeLen;
- remainLen += typeLen;
- ret_index += typeLen;
- typeLen = bthci_PALCapabilities(padapter, &pRetPar[ret_index]);
- totalLen += typeLen;
- remainLen += typeLen;
- ret_index += typeLen;
- typeLen = bthci_AssocPALVer(padapter, &pRetPar[ret_index]);
- totalLen += typeLen;
- remainLen += typeLen;
- PPacketIrpEvent->Length = (u8)totalLen;
- *pRemainLen = remainLen; /* AMP_ASSOC_Remaining_Length */
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("ReadLocalAMPAssoc, Remaining_Len =%d \n", remainLen));
- RTPRINT_DATA(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("AMP_ASSOC_fragment : \n"), PPacketIrpEvent->Data, totalLen);
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, totalLen+2);
- }
-
- return status;
-}
-
-static enum hci_status bthci_CmdReadFailedContactCounter(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
-
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[TmpLocalBufSize] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 handle;
-
- handle = *((u16 *)pHciCmd->Data);
- /* send command complete event here when all data are received. */
- PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_STATUS_PARAMETERS,
- HCI_READ_FAILED_CONTACT_COUNTER,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pRetPar[1] = TWOBYTE_LOWBYTE(handle);
- pRetPar[2] = TWOBYTE_HIGHTBYTE(handle);
- pRetPar[3] = TWOBYTE_LOWBYTE(pBtHciInfo->FailContactCount);
- pRetPar[4] = TWOBYTE_HIGHTBYTE(pBtHciInfo->FailContactCount);
- len += 5;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdResetFailedContactCounter(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u16 handle;
- u8 localBuf[TmpLocalBufSize] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- handle = *((u16 *)pHciCmd->Data);
- pBtHciInfo->FailContactCount = 0;
-
- /* send command complete event here when all data are received. */
- PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
- /* PPacketIrpEvent = (struct packet_irp_hcievent_data *)(buffer); */
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_STATUS_PARAMETERS,
- HCI_RESET_FAILED_CONTACT_COUNTER,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pRetPar[1] = TWOBYTE_LOWBYTE(handle);
- pRetPar[2] = TWOBYTE_HIGHTBYTE(handle);
- len += 3;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- return status;
-}
-
-/* */
-/* BT 3.0+HS [Vol 2] 7.4.1 */
-/* */
-static enum hci_status
-bthci_CmdReadLocalVersionInformation(
- struct rtw_adapter *padapter
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- /* send command complete event here when all data are received. */
- u8 localBuf[TmpLocalBufSize] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 *pu2Temp;
-
- PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_INFORMATIONAL_PARAMETERS,
- HCI_READ_LOCAL_VERSION_INFORMATION,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pRetPar[1] = 0x05; /* HCI_Version */
- pu2Temp = (u16 *)&pRetPar[2]; /* HCI_Revision */
- *pu2Temp = 0x0001;
- pRetPar[4] = 0x05; /* LMP/PAL_Version */
- pu2Temp = (u16 *)&pRetPar[5]; /* Manufacturer_Name */
- *pu2Temp = 0x005d;
- pu2Temp = (u16 *)&pRetPar[7]; /* LMP/PAL_Subversion */
- *pu2Temp = 0x0001;
- len += 9;
- PPacketIrpEvent->Length = len;
-
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("LOCAL_VERSION_INFORMATION\n"));
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("Status %x\n", status));
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("HCI_Version = 0x05\n"));
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("HCI_Revision = 0x0001\n"));
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("LMP/PAL_Version = 0x05\n"));
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("Manufacturer_Name = 0x0001\n"));
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("LMP/PAL_Subversion = 0x0001\n"));
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-/* 7.4.7 */
-static enum hci_status bthci_CmdReadDataBlockSize(struct rtw_adapter *padapter)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- u8 localBuf[TmpLocalBufSize] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 *pu2Temp;
-
- PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_INFORMATIONAL_PARAMETERS,
- HCI_READ_DATA_BLOCK_SIZE,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = HCI_STATUS_SUCCESS; /* status */
- pu2Temp = (u16 *)&pRetPar[1]; /* Max_ACL_Data_Packet_Length */
- *pu2Temp = Max80211PALPDUSize;
-
- pu2Temp = (u16 *)&pRetPar[3]; /* Data_Block_Length */
- *pu2Temp = Max80211PALPDUSize;
- pu2Temp = (u16 *)&pRetPar[5]; /* Total_Num_Data_Blocks */
- *pu2Temp = BTTotalDataBlockNum;
- len += 7;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-/* 7.4.5 */
-static enum hci_status bthci_CmdReadBufferSize(struct rtw_adapter *padapter)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- u8 localBuf[TmpLocalBufSize] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 *pu2Temp;
-
- PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
- /* PPacketIrpEvent = (struct packet_irp_hcievent_data *)(buffer); */
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_INFORMATIONAL_PARAMETERS,
- HCI_READ_BUFFER_SIZE,
- status);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Synchronous_Data_Packet_Length = 0x%x\n", BTSynDataPacketLength));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Total_Num_ACL_Data_Packets = 0x%x\n", BTTotalDataBlockNum));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("Total_Num_Synchronous_Data_Packets = 0x%x\n", BTTotalDataBlockNum));
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pu2Temp = (u16 *)&pRetPar[1]; /* HC_ACL_Data_Packet_Length */
- *pu2Temp = Max80211PALPDUSize;
-
- pRetPar[3] = BTSynDataPacketLength; /* HC_Synchronous_Data_Packet_Length */
- pu2Temp = (u16 *)&pRetPar[4]; /* HC_Total_Num_ACL_Data_Packets */
- *pu2Temp = BTTotalDataBlockNum;
- pu2Temp = (u16 *)&pRetPar[6]; /* HC_Total_Num_Synchronous_Data_Packets */
- *pu2Temp = BTTotalDataBlockNum;
- len += 8;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-static enum hci_status bthci_CmdReadLocalAMPInfo(struct rtw_adapter *padapter)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct pwrctrl_priv *ppwrctrl = &padapter->pwrctrlpriv;
- u8 localBuf[TmpLocalBufSize] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 *pu2Temp;
- u32 *pu4Temp;
- u32 TotalBandwidth = BTTOTALBANDWIDTH, MaxBandGUBandwidth = BTMAXBANDGUBANDWIDTH;
- u8 ControlType = 0x01, AmpStatus = 0x01;
- u32 MaxFlushTimeout = 10000, BestEffortFlushTimeout = 5000;
- u16 MaxPDUSize = Max80211PALPDUSize, PalCap = 0x1, AmpAssocLen = Max80211AMPASSOCLen, MinLatency = 20;
-
- if ((ppwrctrl->rfoff_reason & RF_CHANGE_BY_HW) ||
- (ppwrctrl->rfoff_reason & RF_CHANGE_BY_SW)) {
- AmpStatus = AMP_STATUS_NO_CAPACITY_FOR_BT;
- }
-
- PlatformZeroMemory(&localBuf[0], TmpLocalBufSize);
- /* PPacketIrpEvent = (struct packet_irp_hcievent_data *)(buffer); */
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_STATUS_PARAMETERS,
- HCI_READ_LOCAL_AMP_INFO,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pRetPar[1] = AmpStatus; /* AMP_Status */
- pu4Temp = (u32 *)&pRetPar[2]; /* Total_Bandwidth */
- *pu4Temp = TotalBandwidth; /* 0x19bfcc00;0x7530; */
- pu4Temp = (u32 *)&pRetPar[6]; /* Max_Guaranteed_Bandwidth */
- *pu4Temp = MaxBandGUBandwidth; /* 0x19bfcc00;0x4e20; */
- pu4Temp = (u32 *)&pRetPar[10]; /* Min_Latency */
- *pu4Temp = MinLatency; /* 150; */
- pu4Temp = (u32 *)&pRetPar[14]; /* Max_PDU_Size */
- *pu4Temp = MaxPDUSize;
- pRetPar[18] = ControlType; /* Controller_Type */
- pu2Temp = (u16 *)&pRetPar[19]; /* PAL_Capabilities */
- *pu2Temp = PalCap;
- pu2Temp = (u16 *)&pRetPar[21]; /* AMP_ASSOC_Length */
- *pu2Temp = AmpAssocLen;
- pu4Temp = (u32 *)&pRetPar[23]; /* Max_Flush_Timeout */
- *pu4Temp = MaxFlushTimeout;
- pu4Temp = (u32 *)&pRetPar[27]; /* Best_Effort_Flush_Timeout */
- *pu4Temp = BestEffortFlushTimeout;
- len += 31;
- PPacketIrpEvent->Length = len;
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("AmpStatus = 0x%x\n",
- AmpStatus));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("TotalBandwidth = 0x%x, MaxBandGUBandwidth = 0x%x, MinLatency = 0x%x, \n MaxPDUSize = 0x%x, ControlType = 0x%x\n",
- TotalBandwidth, MaxBandGUBandwidth, MinLatency, MaxPDUSize, ControlType));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("PalCap = 0x%x, AmpAssocLen = 0x%x, MaxFlushTimeout = 0x%x, BestEffortFlushTimeout = 0x%x\n",
- PalCap, AmpAssocLen, MaxFlushTimeout, BestEffortFlushTimeout));
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- return status;
-}
-
-static enum hci_status
-bthci_CmdCreatePhysicalLink(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
-
- pBtDbg->dbgHciInfo.hciCmdCntCreatePhyLink++;
-
- return bthci_BuildPhysicalLink(padapter,
- pHciCmd, HCI_CREATE_PHYSICAL_LINK);
-}
-
-static enum hci_status
-bthci_CmdReadLinkQuality(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- u16 PLH;
- u8 EntryNum, LinkQuality = 0x55;
-
- PLH = *((u16 *)&pHciCmd->Data[0]);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("PLH = 0x%x\n", PLH));
-
- EntryNum = bthci_GetCurrentEntryNum(padapter, (u8)PLH);
- if (EntryNum == 0xff) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("No such PLH(0x%x)\n", PLH));
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
- }
-
- {
- u8 localBuf[11] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_STATUS_PARAMETERS,
- HCI_READ_LINK_QUALITY,
- status);
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, (" PLH = 0x%x\n Link Quality = 0x%x\n", PLH, LinkQuality));
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- *((u16 *)&pRetPar[1]) = pBTInfo->BtAsocEntry[EntryNum].PhyLinkCmdData.BtPhyLinkhandle; /* Handle */
- pRetPar[3] = 0x55; /* Link Quailty */
- len += 4;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdCreateLogicalLink(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
-
- pBtDbg->dbgHciInfo.hciCmdCntCreateLogLink++;
-
- bthci_BuildLogicalLink(padapter, pHciCmd,
- HCI_CREATE_LOGICAL_LINK);
-
- return HCI_STATUS_SUCCESS;
-}
-
-static enum hci_status
-bthci_CmdAcceptLogicalLink(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
-
- pBtDbg->dbgHciInfo.hciCmdCntAcceptLogLink++;
-
- bthci_BuildLogicalLink(padapter, pHciCmd,
- HCI_ACCEPT_LOGICAL_LINK);
-
- return HCI_STATUS_SUCCESS;
-}
-
-static enum hci_status
-bthci_CmdDisconnectLogicalLink(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTinfo->BtMgnt;
- struct bt_dgb *pBtDbg = &pBTinfo->BtDbg;
- u16 logicHandle;
- u8 i, j, find = 0, LogLinkCount = 0;
-
- pBtDbg->dbgHciInfo.hciCmdCntDisconnectLogLink++;
-
- logicHandle = *((u16 *)pHciCmd->Data);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DisconnectLogicalLink, logicHandle = 0x%x\n", logicHandle));
-
- /* find an created logical link index and clear the data */
- for (j = 0; j < MAX_BT_ASOC_ENTRY_NUM; j++) {
- for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
- if (pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle == logicHandle) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("DisconnectLogicalLink, logicHandle is matched 0x%x\n", logicHandle));
- bthci_ResetFlowSpec(padapter, j, i);
- find = 1;
- pBtMgnt->DisconnectEntryNum = j;
- break;
- }
- }
- }
-
- if (!find)
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
-
- /* To check each */
- for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
- if (pBTinfo->BtAsocEntry[pBtMgnt->DisconnectEntryNum].LogLinkCmdData[i].BtLogLinkhandle != 0)
- LogLinkCount++;
- }
-
- /* When we receive Create logical link command, we should send command status event first. */
- bthci_EventCommandStatus(padapter,
- LINK_CONTROL_COMMANDS,
- HCI_DISCONNECT_LOGICAL_LINK,
- status);
- /* */
- /* When we determines the logical link is established, we should send command complete event. */
- /* */
- if (status == HCI_STATUS_SUCCESS) {
- bthci_EventDisconnectLogicalLinkComplete(padapter, status,
- logicHandle, HCI_STATUS_CONNECT_TERMINATE_LOCAL_HOST);
- }
-
- if (LogLinkCount == 0)
- mod_timer(&pBTinfo->BTDisconnectPhyLinkTimer,
- jiffies + msecs_to_jiffies(100));
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdLogicalLinkCancel(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTinfo->BtMgnt;
- u8 CurrentEntryNum, CurrentLogEntryNum;
-
- u8 physicalLinkHandle, TxFlowSpecID, i;
- u16 CurrentLogicalHandle;
-
- physicalLinkHandle = *((u8 *)pHciCmd->Data);
- TxFlowSpecID = *(((u8 *)pHciCmd->Data)+1);
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("LogicalLinkCancel, physicalLinkHandle = 0x%x, TxFlowSpecID = 0x%x\n",
- physicalLinkHandle, TxFlowSpecID));
-
- CurrentEntryNum = pBtMgnt->CurrentConnectEntryNum;
- CurrentLogicalHandle = pBtMgnt->BtCurrentLogLinkhandle;
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("CurrentEntryNum = 0x%x, CurrentLogicalHandle = 0x%x\n",
- CurrentEntryNum, CurrentLogicalHandle));
-
- CurrentLogEntryNum = 0xff;
- for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
- if ((CurrentLogicalHandle == pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[i].BtLogLinkhandle) &&
- (physicalLinkHandle == pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[i].BtPhyLinkhandle)) {
- CurrentLogEntryNum = i;
- break;
- }
- }
-
- if (CurrentLogEntryNum == 0xff) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("LogicalLinkCancel, CurrentLogEntryNum == 0xff !!!!\n"));
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
- return status;
- } else {
- if (pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[CurrentLogEntryNum].bLLCompleteEventIsSet) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("LogicalLinkCancel, LLCompleteEventIsSet!!!!\n"));
- status = HCI_STATUS_ACL_CONNECT_EXISTS;
- }
- }
-
- {
- u8 localBuf[8] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- LINK_CONTROL_COMMANDS,
- HCI_LOGICAL_LINK_CANCEL,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pRetPar[1] = pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[CurrentLogEntryNum].BtPhyLinkhandle;
- pRetPar[2] = pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[CurrentLogEntryNum].BtTxFlowSpecID;
- len += 3;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- pBTinfo->BtAsocEntry[CurrentEntryNum].LogLinkCmdData[CurrentLogEntryNum].bLLCancelCMDIsSetandComplete = true;
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdFlowSpecModify(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTinfo = GET_BT_INFO(padapter);
- u8 i, j, find = 0;
- u16 logicHandle;
-
- logicHandle = *((u16 *)pHciCmd->Data);
- /* find an matched logical link index and copy the data */
- for (j = 0; j < MAX_BT_ASOC_ENTRY_NUM; j++) {
- for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
- if (pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle == logicHandle) {
- memcpy(&pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].Tx_Flow_Spec,
- &pHciCmd->Data[2], sizeof(struct hci_flow_spec));
- memcpy(&pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].Rx_Flow_Spec,
- &pHciCmd->Data[18], sizeof(struct hci_flow_spec));
-
- bthci_CheckLogLinkBehavior(padapter, pBTinfo->BtAsocEntry[j].LogLinkCmdData[i].Tx_Flow_Spec);
- find = 1;
- break;
- }
- }
- }
- RTPRINT(FIOCTL, IOCTL_BT_LOGO, ("FlowSpecModify, LLH = 0x%x, \n", logicHandle));
-
- /* When we receive Flow Spec Modify command, we should send command status event first. */
- bthci_EventCommandStatus(padapter,
- LINK_CONTROL_COMMANDS,
- HCI_FLOW_SPEC_MODIFY,
- HCI_STATUS_SUCCESS);
-
- if (!find)
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
-
- bthci_EventSendFlowSpecModifyComplete(padapter, status, logicHandle);
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdAcceptPhysicalLink(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
-
- pBtDbg->dbgHciInfo.hciCmdCntAcceptPhyLink++;
-
- return bthci_BuildPhysicalLink(padapter,
- pHciCmd, HCI_ACCEPT_PHYSICAL_LINK);
-}
-
-static enum hci_status
-bthci_CmdDisconnectPhysicalLink(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
- u8 PLH, CurrentEntryNum, PhysLinkDisconnectReason;
-
- pBtDbg->dbgHciInfo.hciCmdCntDisconnectPhyLink++;
-
- PLH = *((u8 *)pHciCmd->Data);
- PhysLinkDisconnectReason = *((u8 *)pHciCmd->Data+1);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_DISCONNECT_PHYSICAL_LINK PhyHandle = 0x%x, Reason = 0x%x\n",
- PLH, PhysLinkDisconnectReason));
-
- CurrentEntryNum = bthci_GetCurrentEntryNum(padapter, PLH);
-
- if (CurrentEntryNum == 0xff) {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD,
- ("DisconnectPhysicalLink, No such Handle in the Entry\n"));
- status = HCI_STATUS_UNKNOW_CONNECT_ID;
- } else {
- pBTInfo->BtAsocEntry[CurrentEntryNum].PhyLinkDisconnectReason =
- (enum hci_status)PhysLinkDisconnectReason;
- }
- /* Send HCI Command status event to AMP. */
- bthci_EventCommandStatus(padapter, LINK_CONTROL_COMMANDS,
- HCI_DISCONNECT_PHYSICAL_LINK, status);
-
- if (status != HCI_STATUS_SUCCESS)
- return status;
-
- /* The macros below require { and } in the if statement */
- if (pBTInfo->BtAsocEntry[CurrentEntryNum].BtCurrentState == HCI_STATE_DISCONNECTED) {
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTED, STATE_CMD_DISCONNECT_PHY_LINK, CurrentEntryNum);
- } else {
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTING, STATE_CMD_DISCONNECT_PHY_LINK, CurrentEntryNum);
- }
- return status;
-}
-
-static enum hci_status
-bthci_CmdSetACLLinkDataFlowMode(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- u8 localBuf[8] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 *pu2Temp;
-
- pBtMgnt->ExtConfig.CurrentConnectHandle = *((u16 *)pHciCmd->Data);
- pBtMgnt->ExtConfig.CurrentIncomingTrafficMode = *((u8 *)pHciCmd->Data)+2;
- pBtMgnt->ExtConfig.CurrentOutgoingTrafficMode = *((u8 *)pHciCmd->Data)+3;
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("Connection Handle = 0x%x, Incoming Traffic mode = 0x%x, Outgoing Traffic mode = 0x%x",
- pBtMgnt->ExtConfig.CurrentConnectHandle,
- pBtMgnt->ExtConfig.CurrentIncomingTrafficMode,
- pBtMgnt->ExtConfig.CurrentOutgoingTrafficMode));
-
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_SET_ACL_LINK_DATA_FLOW_MODE,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
-
- pu2Temp = (u16 *)&pRetPar[1];
- *pu2Temp = pBtMgnt->ExtConfig.CurrentConnectHandle;
- len += 3;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- return status;
-}
-
-static enum hci_status
-bthci_CmdSetACLLinkStatus(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
- u8 i;
- u8 *pTriple;
-
- pBtDbg->dbgHciInfo.hciCmdCntSetAclLinkStatus++;
- RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_EXT, "SetACLLinkStatus, Hex Data :\n",
- &pHciCmd->Data[0], pHciCmd->Length);
-
- /* Only Core Stack v251 and later version support this command. */
- pBtMgnt->bSupportProfile = true;
-
- pBtMgnt->ExtConfig.NumberOfHandle = *((u8 *)pHciCmd->Data);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("NumberOfHandle = 0x%x\n", pBtMgnt->ExtConfig.NumberOfHandle));
-
- pTriple = &pHciCmd->Data[1];
- for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
- pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle = *((u16 *)&pTriple[0]);
- pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode = pTriple[2];
- pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode = pTriple[3];
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT,
- ("Connection_Handle = 0x%x, Incoming Traffic mode = 0x%x, Outgoing Traffic Mode = 0x%x\n",
- pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle,
- pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode,
- pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode));
- pTriple += 4;
- }
-
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_SET_ACL_LINK_STATUS,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
-
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdSetSCOLinkStatus(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
-
- pBtDbg->dbgHciInfo.hciCmdCntSetScoLinkStatus++;
- pBtMgnt->ExtConfig.NumberOfSCO = *((u8 *)pHciCmd->Data);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("NumberOfSCO = 0x%x\n",
- pBtMgnt->ExtConfig.NumberOfSCO));
-
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_SET_SCO_LINK_STATUS,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
-
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdSetRSSIValue(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- s8 min_bt_rssi = 0;
- u8 i;
- for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
- if (pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle == *((u16 *)&pHciCmd->Data[0])) {
- pBtMgnt->ExtConfig.linkInfo[i].BT_RSSI = (s8)(pHciCmd->Data[2]);
- RTPRINT(FIOCTL, IOCTL_BT_EVENT_PERIODICAL,
- ("Connection_Handle = 0x%x, RSSI = %d \n",
- pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle,
- pBtMgnt->ExtConfig.linkInfo[i].BT_RSSI));
- }
- /* get the minimum bt rssi value */
- if (pBtMgnt->ExtConfig.linkInfo[i].BT_RSSI <= min_bt_rssi)
- min_bt_rssi = pBtMgnt->ExtConfig.linkInfo[i].BT_RSSI;
- }
-
- pBtMgnt->ExtConfig.MIN_BT_RSSI = min_bt_rssi;
- RTPRINT(FBT, BT_TRACE, ("[bt rssi], the min rssi is %d\n", min_bt_rssi));
-
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_SET_RSSI_VALUE,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
-
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdSetCurrentBluetoothStatus(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- pBtMgnt->ExtConfig.CurrentBTStatus = *((u8 *)&pHciCmd->Data[0]);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("SetCurrentBluetoothStatus, CurrentBTStatus = 0x%x\n",
- pBtMgnt->ExtConfig.CurrentBTStatus));
-
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_SET_CURRENT_BLUETOOTH_STATUS,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
-
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdExtensionVersionNotify(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
-
- pBtDbg->dbgHciInfo.hciCmdCntExtensionVersionNotify++;
- RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_EXT, "ExtensionVersionNotify, Hex Data :\n",
- &pHciCmd->Data[0], pHciCmd->Length);
-
- pBtMgnt->ExtConfig.HCIExtensionVer = *((u16 *)&pHciCmd->Data[0]);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCIExtensionVer = 0x%x\n", pBtMgnt->ExtConfig.HCIExtensionVer));
-
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_EXTENSION_VERSION_NOTIFY,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
-
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdLinkStatusNotify(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
- u8 i;
- u8 *pTriple;
-
- pBtDbg->dbgHciInfo.hciCmdCntLinkStatusNotify++;
- RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_EXT, "LinkStatusNotify, Hex Data :\n",
- &pHciCmd->Data[0], pHciCmd->Length);
-
- /* Current only RTL8723 support this command. */
- pBtMgnt->bSupportProfile = true;
-
- pBtMgnt->ExtConfig.NumberOfHandle = *((u8 *)pHciCmd->Data);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("NumberOfHandle = 0x%x\n", pBtMgnt->ExtConfig.NumberOfHandle));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCIExtensionVer = %d\n", pBtMgnt->ExtConfig.HCIExtensionVer));
-
- pTriple = &pHciCmd->Data[1];
- for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
- if (pBtMgnt->ExtConfig.HCIExtensionVer < 1) {
- pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle = *((u16 *)&pTriple[0]);
- pBtMgnt->ExtConfig.linkInfo[i].BTProfile = pTriple[2];
- pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec = pTriple[3];
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT,
- ("Connection_Handle = 0x%x, BTProfile =%d, BTSpec =%d\n",
- pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle,
- pBtMgnt->ExtConfig.linkInfo[i].BTProfile,
- pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec));
- pTriple += 4;
- } else if (pBtMgnt->ExtConfig.HCIExtensionVer >= 1) {
- pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle = *((u16 *)&pTriple[0]);
- pBtMgnt->ExtConfig.linkInfo[i].BTProfile = pTriple[2];
- pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec = pTriple[3];
- pBtMgnt->ExtConfig.linkInfo[i].linkRole = pTriple[4];
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT,
- ("Connection_Handle = 0x%x, BTProfile =%d, BTSpec =%d, LinkRole =%d\n",
- pBtMgnt->ExtConfig.linkInfo[i].ConnectHandle,
- pBtMgnt->ExtConfig.linkInfo[i].BTProfile,
- pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec,
- pBtMgnt->ExtConfig.linkInfo[i].linkRole));
- pTriple += 5;
- }
-
- }
- BTHCI_UpdateBTProfileRTKToMoto(padapter);
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_LINK_STATUS_NOTIFY,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
-
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdBtOperationNotify(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_EXT, "Bt Operation notify, Hex Data :\n",
- &pHciCmd->Data[0], pHciCmd->Length);
-
- pBtMgnt->ExtConfig.btOperationCode = *((u8 *)pHciCmd->Data);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("btOperationCode = 0x%x\n", pBtMgnt->ExtConfig.btOperationCode));
- switch (pBtMgnt->ExtConfig.btOperationCode) {
- case HCI_BT_OP_NONE:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Operation None!!\n"));
- break;
- case HCI_BT_OP_INQUIRY_START:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Inquire start!!\n"));
- break;
- case HCI_BT_OP_INQUIRY_FINISH:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Inquire finished!!\n"));
- break;
- case HCI_BT_OP_PAGING_START:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Paging is started!!\n"));
- break;
- case HCI_BT_OP_PAGING_SUCCESS:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Paging complete successfully!!\n"));
- break;
- case HCI_BT_OP_PAGING_UNSUCCESS:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Paging complete unsuccessfully!!\n"));
- break;
- case HCI_BT_OP_PAIRING_START:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Pairing start!!\n"));
- break;
- case HCI_BT_OP_PAIRING_FINISH:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Pairing finished!!\n"));
- break;
- case HCI_BT_OP_BT_DEV_ENABLE:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : BT Device is enabled!!\n"));
- break;
- case HCI_BT_OP_BT_DEV_DISABLE:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : BT Device is disabled!!\n"));
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[bt operation] : Unknown, error!!\n"));
- break;
- }
- BTDM_AdjustForBtOperation(padapter);
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_BT_OPERATION_NOTIFY,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
-
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdEnableWifiScanNotify(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- RTPRINT_DATA(FIOCTL, IOCTL_BT_HCICMD_EXT, "Enable Wifi scan notify, Hex Data :\n",
- &pHciCmd->Data[0], pHciCmd->Length);
-
- pBtMgnt->ExtConfig.bEnableWifiScanNotify = *((u8 *)pHciCmd->Data);
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("bEnableWifiScanNotify = %d\n", pBtMgnt->ExtConfig.bEnableWifiScanNotify));
-
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_ENABLE_WIFI_SCAN_NOTIFY,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
-
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdWIFICurrentChannel(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- u8 chnl = pmlmeext->cur_channel;
-
- if (pmlmeext->cur_bwmode == HT_CHANNEL_WIDTH_40) {
- if (pmlmeext->cur_ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
- chnl += 2;
- else if (pmlmeext->cur_ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
- chnl -= 2;
- }
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("Current Channel = 0x%x\n", chnl));
-
- {
- u8 localBuf[8] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_WIFI_CURRENT_CHANNEL,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pRetPar[1] = chnl; /* current channel */
- len += 2;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdWIFICurrentBandwidth(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- enum ht_channel_width bw;
- u8 CurrentBW = 0;
-
- bw = padapter->mlmeextpriv.cur_bwmode;
-
- if (bw == HT_CHANNEL_WIDTH_20)
- CurrentBW = 0;
- else if (bw == HT_CHANNEL_WIDTH_40)
- CurrentBW = 1;
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("Current BW = 0x%x\n",
- CurrentBW));
-
- {
- u8 localBuf[8] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_WIFI_CURRENT_BANDWIDTH,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pRetPar[1] = CurrentBW; /* current BW */
- len += 2;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdWIFIConnectionStatus(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- u8 connectStatus = HCI_WIFI_NOT_CONNECTED;
-
- if (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE)) {
- if (padapter->stapriv.asoc_sta_count >= 3)
- connectStatus = HCI_WIFI_CONNECTED;
- else
- connectStatus = HCI_WIFI_NOT_CONNECTED;
- } else if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE|WIFI_ASOC_STATE)) {
- connectStatus = HCI_WIFI_CONNECTED;
- } else if (check_fwstate(&padapter->mlmepriv, WIFI_UNDER_LINKING)) {
- connectStatus = HCI_WIFI_CONNECT_IN_PROGRESS;
- } else {
- connectStatus = HCI_WIFI_NOT_CONNECTED;
- }
-
- {
- u8 localBuf[8] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_EXTENSION,
- HCI_WIFI_CONNECTION_STATUS,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- pRetPar[1] = connectStatus; /* connect status */
- len += 2;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdEnableDeviceUnderTestMode(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
-
- pBtHciInfo->bInTestMode = true;
- pBtHciInfo->bTestIsEnd = false;
-
- /* send command complete event here when all data are received. */
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_TESTING_COMMANDS,
- HCI_ENABLE_DEVICE_UNDER_TEST_MODE,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdAMPTestEnd(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
-
- if (!pBtHciInfo->bInTestMode) {
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Not in Test mode, return status = HCI_STATUS_CMD_DISALLOW\n"));
- status = HCI_STATUS_CMD_DISALLOW;
- return status;
- }
-
- pBtHciInfo->bTestIsEnd = true;
-
- del_timer_sync(&pBTInfo->BTTestSendPacketTimer);
-
- rtl8723a_check_bssid(padapter, true);
-
- /* send command complete event here when all data are received. */
- {
- u8 localBuf[4] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("AMP Test End Event \n"));
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_AMP_TEST_END;
- PPacketIrpEvent->Length = 2;
-
- PPacketIrpEvent->Data[0] = status;
- PPacketIrpEvent->Data[1] = pBtHciInfo->TestScenario;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
- }
-
- bthci_EventAMPReceiverReport(padapter, 0x01);
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdAMPTestCommand(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
-
- if (!pBtHciInfo->bInTestMode) {
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Not in Test mode, return status = HCI_STATUS_CMD_DISALLOW\n"));
- status = HCI_STATUS_CMD_DISALLOW;
- return status;
- }
-
- pBtHciInfo->TestScenario = *((u8 *)pHciCmd->Data);
-
- if (pBtHciInfo->TestScenario == 0x01)
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("TX Single Test \n"));
- else if (pBtHciInfo->TestScenario == 0x02)
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Receive Frame Test \n"));
- else
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("No Such Test !!!!!!!!!!!!!!!!!! \n"));
-
- if (pBtHciInfo->bTestIsEnd) {
- u8 localBuf[5] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("AMP Test End Event \n"));
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_AMP_TEST_END;
- PPacketIrpEvent->Length = 2;
-
- PPacketIrpEvent->Data[0] = status;
- PPacketIrpEvent->Data[1] = pBtHciInfo->TestScenario ;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
-
- /* Return to Idel state with RX and TX off. */
-
- return status;
- }
-
- /* should send command status event */
- bthci_EventCommandStatus(padapter,
- OGF_TESTING_COMMANDS,
- HCI_AMP_TEST_COMMAND,
- status);
-
- /* The HCI_AMP_Start Test Event shall be generated when the */
- /* HCI_AMP_Test_Command has completed and the first data is ready to be sent */
- /* or received. */
-
- {
- u8 localBuf[5] = "";
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), (" HCI_AMP_Start Test Event \n"));
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- PPacketIrpEvent->EventCode = HCI_EVENT_AMP_START_TEST;
- PPacketIrpEvent->Length = 2;
-
- PPacketIrpEvent->Data[0] = status;
- PPacketIrpEvent->Data[1] = pBtHciInfo->TestScenario ;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, 4);
-
- /* Return to Idel state with RX and TX off. */
- }
-
- if (pBtHciInfo->TestScenario == 0x01) {
- /*
- When in a transmitter test scenario and the frames/bursts count have been
- transmitted the HCI_AMP_Test_End event shall be sent.
- */
- mod_timer(&pBTInfo->BTTestSendPacketTimer,
- jiffies + msecs_to_jiffies(50));
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("TX Single Test \n"));
- } else if (pBtHciInfo->TestScenario == 0x02) {
- rtl8723a_check_bssid(padapter, false);
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_BT_LOGO), ("Receive Frame Test \n"));
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdEnableAMPReceiverReports(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
-
- if (!pBtHciInfo->bInTestMode) {
- status = HCI_STATUS_CMD_DISALLOW;
- /* send command complete event here when all data are received. */
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_TESTING_COMMANDS,
- HCI_ENABLE_AMP_RECEIVER_REPORTS,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
- return status;
- }
-
- pBtHciInfo->bTestNeedReport = *((u8 *)pHciCmd->Data);
- pBtHciInfo->TestReportInterval = (*((u8 *)pHciCmd->Data+2));
-
- bthci_EventAMPReceiverReport(padapter, 0x00);
-
- /* send command complete event here when all data are received. */
- {
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_TESTING_COMMANDS,
- HCI_ENABLE_AMP_RECEIVER_REPORTS,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
- }
-
- return status;
-}
-
-static enum hci_status
-bthci_CmdHostBufferSize(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- enum hci_status status = HCI_STATUS_SUCCESS;
- u8 localBuf[6] = "";
- u8 *pRetPar;
- u8 len = 0;
-
- pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].ACLPacketsData.ACLDataPacketLen = *((u16 *)pHciCmd->Data);
- pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].SyncDataPacketLen = *((u8 *)(pHciCmd->Data+2));
- pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].TotalNumACLDataPackets = *((u16 *)(pHciCmd->Data+3));
- pBTInfo->BtAsocEntry[pBtMgnt->CurrentConnectEntryNum].TotalSyncNumDataPackets = *((u16 *)(pHciCmd->Data+5));
-
- /* send command complete event here when all data are received. */
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- len += bthci_CommandCompleteHeader(&localBuf[0],
- OGF_SET_EVENT_MASK_COMMAND,
- HCI_HOST_BUFFER_SIZE,
- status);
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[len];
- pRetPar[0] = status; /* status */
- len += 1;
- PPacketIrpEvent->Length = len;
-
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-
- return status;
-}
-
-static enum hci_status
-bthci_UnknownCMD(struct rtw_adapter *padapter, struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_UNKNOW_HCI_CMD;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
-
- pBtDbg->dbgHciInfo.hciCmdCntUnknown++;
- bthci_EventCommandStatus(padapter,
- (u8)pHciCmd->OGF,
- pHciCmd->OCF,
- status);
-
- return status;
-}
-
-static enum hci_status
-bthci_HandleOGFInformationalParameters(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-
- switch (pHciCmd->OCF) {
- case HCI_READ_LOCAL_VERSION_INFORMATION:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_VERSION_INFORMATION\n"));
- status = bthci_CmdReadLocalVersionInformation(padapter);
- break;
- case HCI_READ_LOCAL_SUPPORTED_COMMANDS:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_SUPPORTED_COMMANDS\n"));
- status = bthci_CmdReadLocalSupportedCommands(padapter);
- break;
- case HCI_READ_LOCAL_SUPPORTED_FEATURES:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_SUPPORTED_FEATURES\n"));
- status = bthci_CmdReadLocalSupportedFeatures(padapter);
- break;
- case HCI_READ_BUFFER_SIZE:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_BUFFER_SIZE\n"));
- status = bthci_CmdReadBufferSize(padapter);
- break;
- case HCI_READ_DATA_BLOCK_SIZE:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_DATA_BLOCK_SIZE\n"));
- status = bthci_CmdReadDataBlockSize(padapter);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("bthci_HandleOGFInformationalParameters(), Unknown case = 0x%x\n", pHciCmd->OCF));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
- status = bthci_UnknownCMD(padapter, pHciCmd);
- break;
- }
- return status;
-}
-
-static enum hci_status
-bthci_HandleOGFSetEventMaskCMD(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-
- switch (pHciCmd->OCF) {
- case HCI_SET_EVENT_MASK:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_SET_EVENT_MASK\n"));
- status = bthci_CmdSetEventMask(padapter, pHciCmd);
- break;
- case HCI_RESET:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_RESET\n"));
- status = bthci_CmdReset(padapter, true);
- break;
- case HCI_READ_CONNECTION_ACCEPT_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_CONNECTION_ACCEPT_TIMEOUT\n"));
- status = bthci_CmdReadConnectionAcceptTimeout(padapter);
- break;
- case HCI_SET_EVENT_FILTER:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_SET_EVENT_FILTER\n"));
- break;
- case HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT\n"));
- status = bthci_CmdWriteConnectionAcceptTimeout(padapter, pHciCmd);
- break;
- case HCI_READ_PAGE_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_PAGE_TIMEOUT\n"));
- status = bthci_CmdReadPageTimeout(padapter, pHciCmd);
- break;
- case HCI_WRITE_PAGE_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_PAGE_TIMEOUT\n"));
- status = bthci_CmdWritePageTimeout(padapter, pHciCmd);
- break;
- case HCI_HOST_NUMBER_OF_COMPLETED_PACKETS:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_HOST_NUMBER_OF_COMPLETED_PACKETS\n"));
- break;
- case HCI_READ_LINK_SUPERVISION_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LINK_SUPERVISION_TIMEOUT\n"));
- status = bthci_CmdReadLinkSupervisionTimeout(padapter, pHciCmd);
- break;
- case HCI_WRITE_LINK_SUPERVISION_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_LINK_SUPERVISION_TIMEOUT\n"));
- status = bthci_CmdWriteLinkSupervisionTimeout(padapter, pHciCmd);
- break;
- case HCI_ENHANCED_FLUSH:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_ENHANCED_FLUSH\n"));
- status = bthci_CmdEnhancedFlush(padapter, pHciCmd);
- break;
- case HCI_READ_LOGICAL_LINK_ACCEPT_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOGICAL_LINK_ACCEPT_TIMEOUT\n"));
- status = bthci_CmdReadLogicalLinkAcceptTimeout(padapter, pHciCmd);
- break;
- case HCI_WRITE_LOGICAL_LINK_ACCEPT_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_LOGICAL_LINK_ACCEPT_TIMEOUT\n"));
- status = bthci_CmdWriteLogicalLinkAcceptTimeout(padapter, pHciCmd);
- break;
- case HCI_SET_EVENT_MASK_PAGE_2:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_SET_EVENT_MASK_PAGE_2\n"));
- status = bthci_CmdSetEventMaskPage2(padapter, pHciCmd);
- break;
- case HCI_READ_LOCATION_DATA:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCATION_DATA\n"));
- status = bthci_CmdReadLocationData(padapter, pHciCmd);
- break;
- case HCI_WRITE_LOCATION_DATA:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_LOCATION_DATA\n"));
- status = bthci_CmdWriteLocationData(padapter, pHciCmd);
- break;
- case HCI_READ_FLOW_CONTROL_MODE:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_FLOW_CONTROL_MODE\n"));
- status = bthci_CmdReadFlowControlMode(padapter, pHciCmd);
- break;
- case HCI_WRITE_FLOW_CONTROL_MODE:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_FLOW_CONTROL_MODE\n"));
- status = bthci_CmdWriteFlowControlMode(padapter, pHciCmd);
- break;
- case HCI_READ_BEST_EFFORT_FLUSH_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_BEST_EFFORT_FLUSH_TIMEOUT\n"));
- status = bthci_CmdReadBestEffortFlushTimeout(padapter, pHciCmd);
- break;
- case HCI_WRITE_BEST_EFFORT_FLUSH_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_BEST_EFFORT_FLUSH_TIMEOUT\n"));
- status = bthci_CmdWriteBestEffortFlushTimeout(padapter, pHciCmd);
- break;
- case HCI_SHORT_RANGE_MODE:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_SHORT_RANGE_MODE\n"));
- status = bthci_CmdShortRangeMode(padapter, pHciCmd);
- break;
- case HCI_HOST_BUFFER_SIZE:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_HOST_BUFFER_SIZE\n"));
- status = bthci_CmdHostBufferSize(padapter, pHciCmd);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("bthci_HandleOGFSetEventMaskCMD(), Unknown case = 0x%x\n", pHciCmd->OCF));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
- status = bthci_UnknownCMD(padapter, pHciCmd);
- break;
- }
- return status;
-}
-
-static enum hci_status
-bthci_HandleOGFStatusParameters(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-
- switch (pHciCmd->OCF) {
- case HCI_READ_FAILED_CONTACT_COUNTER:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_FAILED_CONTACT_COUNTER\n"));
- status = bthci_CmdReadFailedContactCounter(padapter, pHciCmd);
- break;
- case HCI_RESET_FAILED_CONTACT_COUNTER:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_RESET_FAILED_CONTACT_COUNTER\n"));
- status = bthci_CmdResetFailedContactCounter(padapter, pHciCmd);
- break;
- case HCI_READ_LINK_QUALITY:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LINK_QUALITY\n"));
- status = bthci_CmdReadLinkQuality(padapter, pHciCmd);
- break;
- case HCI_READ_RSSI:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_RSSI\n"));
- break;
- case HCI_READ_LOCAL_AMP_INFO:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_AMP_INFO\n"));
- status = bthci_CmdReadLocalAMPInfo(padapter);
- break;
- case HCI_READ_LOCAL_AMP_ASSOC:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_AMP_ASSOC\n"));
- status = bthci_CmdReadLocalAMPAssoc(padapter, pHciCmd);
- break;
- case HCI_WRITE_REMOTE_AMP_ASSOC:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_REMOTE_AMP_ASSOC\n"));
- status = bthci_CmdWriteRemoteAMPAssoc(padapter, pHciCmd);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("bthci_HandleOGFStatusParameters(), Unknown case = 0x%x\n", pHciCmd->OCF));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
- status = bthci_UnknownCMD(padapter, pHciCmd);
- break;
- }
- return status;
-}
-
-static enum hci_status
-bthci_HandleOGFLinkControlCMD(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-
- switch (pHciCmd->OCF) {
- case HCI_CREATE_PHYSICAL_LINK:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_CREATE_PHYSICAL_LINK\n"));
- status = bthci_CmdCreatePhysicalLink(padapter, pHciCmd);
- break;
- case HCI_ACCEPT_PHYSICAL_LINK:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_ACCEPT_PHYSICAL_LINK\n"));
- status = bthci_CmdAcceptPhysicalLink(padapter, pHciCmd);
- break;
- case HCI_DISCONNECT_PHYSICAL_LINK:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_DISCONNECT_PHYSICAL_LINK\n"));
- status = bthci_CmdDisconnectPhysicalLink(padapter, pHciCmd);
- break;
- case HCI_CREATE_LOGICAL_LINK:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_CREATE_LOGICAL_LINK\n"));
- status = bthci_CmdCreateLogicalLink(padapter, pHciCmd);
- break;
- case HCI_ACCEPT_LOGICAL_LINK:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_ACCEPT_LOGICAL_LINK\n"));
- status = bthci_CmdAcceptLogicalLink(padapter, pHciCmd);
- break;
- case HCI_DISCONNECT_LOGICAL_LINK:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_DISCONNECT_LOGICAL_LINK\n"));
- status = bthci_CmdDisconnectLogicalLink(padapter, pHciCmd);
- break;
- case HCI_LOGICAL_LINK_CANCEL:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_LOGICAL_LINK_CANCEL\n"));
- status = bthci_CmdLogicalLinkCancel(padapter, pHciCmd);
- break;
- case HCI_FLOW_SPEC_MODIFY:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_FLOW_SPEC_MODIFY\n"));
- status = bthci_CmdFlowSpecModify(padapter, pHciCmd);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("bthci_HandleOGFLinkControlCMD(), Unknown case = 0x%x\n", pHciCmd->OCF));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
- status = bthci_UnknownCMD(padapter, pHciCmd);
- break;
- }
- return status;
-}
-
-static enum hci_status
-bthci_HandleOGFTestingCMD(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- switch (pHciCmd->OCF) {
- case HCI_ENABLE_DEVICE_UNDER_TEST_MODE:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_ENABLE_DEVICE_UNDER_TEST_MODE\n"));
- bthci_CmdEnableDeviceUnderTestMode(padapter, pHciCmd);
- break;
- case HCI_AMP_TEST_END:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_AMP_TEST_END\n"));
- bthci_CmdAMPTestEnd(padapter, pHciCmd);
- break;
- case HCI_AMP_TEST_COMMAND:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_AMP_TEST_COMMAND\n"));
- bthci_CmdAMPTestCommand(padapter, pHciCmd);
- break;
- case HCI_ENABLE_AMP_RECEIVER_REPORTS:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_ENABLE_AMP_RECEIVER_REPORTS\n"));
- bthci_CmdEnableAMPReceiverReports(padapter, pHciCmd);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
- status = bthci_UnknownCMD(padapter, pHciCmd);
- break;
- }
- return status;
-}
-
-static enum hci_status
-bthci_HandleOGFExtension(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- switch (pHciCmd->OCF) {
- case HCI_SET_ACL_LINK_DATA_FLOW_MODE:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_SET_ACL_LINK_DATA_FLOW_MODE\n"));
- status = bthci_CmdSetACLLinkDataFlowMode(padapter, pHciCmd);
- break;
- case HCI_SET_ACL_LINK_STATUS:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_SET_ACL_LINK_STATUS\n"));
- status = bthci_CmdSetACLLinkStatus(padapter, pHciCmd);
- break;
- case HCI_SET_SCO_LINK_STATUS:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_SET_SCO_LINK_STATUS\n"));
- status = bthci_CmdSetSCOLinkStatus(padapter, pHciCmd);
- break;
- case HCI_SET_RSSI_VALUE:
- RTPRINT(FIOCTL, IOCTL_BT_EVENT_PERIODICAL, ("HCI_SET_RSSI_VALUE\n"));
- status = bthci_CmdSetRSSIValue(padapter, pHciCmd);
- break;
- case HCI_SET_CURRENT_BLUETOOTH_STATUS:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_SET_CURRENT_BLUETOOTH_STATUS\n"));
- status = bthci_CmdSetCurrentBluetoothStatus(padapter, pHciCmd);
- break;
- /* The following is for RTK8723 */
-
- case HCI_EXTENSION_VERSION_NOTIFY:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_EXTENSION_VERSION_NOTIFY\n"));
- status = bthci_CmdExtensionVersionNotify(padapter, pHciCmd);
- break;
- case HCI_LINK_STATUS_NOTIFY:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_LINK_STATUS_NOTIFY\n"));
- status = bthci_CmdLinkStatusNotify(padapter, pHciCmd);
- break;
- case HCI_BT_OPERATION_NOTIFY:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_BT_OPERATION_NOTIFY\n"));
- status = bthci_CmdBtOperationNotify(padapter, pHciCmd);
- break;
- case HCI_ENABLE_WIFI_SCAN_NOTIFY:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_ENABLE_WIFI_SCAN_NOTIFY\n"));
- status = bthci_CmdEnableWifiScanNotify(padapter, pHciCmd);
- break;
-
- /* The following is for IVT */
- case HCI_WIFI_CURRENT_CHANNEL:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_WIFI_CURRENT_CHANNEL\n"));
- status = bthci_CmdWIFICurrentChannel(padapter, pHciCmd);
- break;
- case HCI_WIFI_CURRENT_BANDWIDTH:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_WIFI_CURRENT_BANDWIDTH\n"));
- status = bthci_CmdWIFICurrentBandwidth(padapter, pHciCmd);
- break;
- case HCI_WIFI_CONNECTION_STATUS:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_WIFI_CONNECTION_STATUS\n"));
- status = bthci_CmdWIFIConnectionStatus(padapter, pHciCmd);
- break;
-
- default:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("HCI_UNKNOWN_COMMAND\n"));
- status = bthci_UnknownCMD(padapter, pHciCmd);
- break;
- }
- return status;
-}
-
-static void
-bthci_StateStarting(struct rtw_adapter *padapter,
- enum hci_state_with_cmd StateCmd, u8 EntryNum)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Starting], "));
- switch (StateCmd) {
- case STATE_CMD_CONNECT_ACCEPT_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_CONNECT_ACCEPT_TIMEOUT\n"));
- pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_CONNECT_ACCEPT_TIMEOUT;
- pBtMgnt->bNeedNotifyAMPNoCap = true;
- BTHCI_DisconnectPeer(padapter, EntryNum);
- break;
- case STATE_CMD_DISCONNECT_PHY_LINK:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
-
- bthci_EventDisconnectPhyLinkComplete(padapter,
- HCI_STATUS_SUCCESS,
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
- EntryNum);
-
- del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
-
- pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_UNKNOW_CONNECT_ID;
-
- BTHCI_DisconnectPeer(padapter, EntryNum);
- break;
- case STATE_CMD_MAC_START_COMPLETE:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_MAC_START_COMPLETE\n"));
- if (pBTInfo->BtAsocEntry[EntryNum].AMPRole == AMP_BTAP_CREATOR)
- bthci_EventChannelSelected(padapter, EntryNum);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
- break;
- }
-}
-
-static void
-bthci_StateConnecting(struct rtw_adapter *padapter,
- enum hci_state_with_cmd StateCmd, u8 EntryNum)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Connecting], "));
- switch (StateCmd) {
- case STATE_CMD_CONNECT_ACCEPT_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_CONNECT_ACCEPT_TIMEOUT\n"));
- pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_CONNECT_ACCEPT_TIMEOUT;
- pBtMgnt->bNeedNotifyAMPNoCap = true;
- BTHCI_DisconnectPeer(padapter, EntryNum);
- break;
- case STATE_CMD_MAC_CONNECT_COMPLETE:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_MAC_CONNECT_COMPLETE\n"));
-
- if (pBTInfo->BtAsocEntry[EntryNum].AMPRole == AMP_BTAP_JOINER) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_info_,
- "StateConnecting\n");
- }
- break;
- case STATE_CMD_DISCONNECT_PHY_LINK:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
-
- bthci_EventDisconnectPhyLinkComplete(padapter,
- HCI_STATUS_SUCCESS,
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
- EntryNum);
-
- pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_UNKNOW_CONNECT_ID;
-
- del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
-
- BTHCI_DisconnectPeer(padapter, EntryNum);
-
- break;
- case STATE_CMD_MAC_CONNECT_CANCEL_INDICATE:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_MAC_CONNECT_CANCEL_INDICATE\n"));
- pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_CONTROLLER_BUSY;
- /* Because this state cmd is caused by the BTHCI_EventAMPStatusChange(), */
- /* we don't need to send event in the following BTHCI_DisconnectPeer() again. */
- pBtMgnt->bNeedNotifyAMPNoCap = false;
- BTHCI_DisconnectPeer(padapter, EntryNum);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
- break;
- }
-}
-
-static void
-bthci_StateConnected(struct rtw_adapter *padapter,
- enum hci_state_with_cmd StateCmd, u8 EntryNum)
-{
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- u8 i;
- u16 logicHandle = 0;
-
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Connected], "));
- switch (StateCmd) {
- case STATE_CMD_DISCONNECT_PHY_LINK:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
-
- /* When we are trying to disconnect the phy link, we should disconnect log link first, */
- for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
- if (pBTInfo->BtAsocEntry[EntryNum].LogLinkCmdData->BtLogLinkhandle != 0) {
- logicHandle = pBTInfo->BtAsocEntry[EntryNum].LogLinkCmdData->BtLogLinkhandle;
-
- bthci_EventDisconnectLogicalLinkComplete(padapter, HCI_STATUS_SUCCESS,
- logicHandle, pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason);
-
- pBTInfo->BtAsocEntry[EntryNum].LogLinkCmdData->BtLogLinkhandle = 0;
- }
- }
-
- bthci_EventDisconnectPhyLinkComplete(padapter,
- HCI_STATUS_SUCCESS,
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
- EntryNum);
-
- del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
-
- BTHCI_DisconnectPeer(padapter, EntryNum);
- break;
-
- case STATE_CMD_MAC_DISCONNECT_INDICATE:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_MAC_DISCONNECT_INDICATE\n"));
-
- bthci_EventDisconnectPhyLinkComplete(padapter,
- HCI_STATUS_SUCCESS,
- /* TODO: Remote Host not local host */
- HCI_STATUS_CONNECT_TERMINATE_LOCAL_HOST,
- EntryNum);
- BTHCI_DisconnectPeer(padapter, EntryNum);
-
- break;
- case STATE_CMD_ENTER_STATE:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_ENTER_STATE\n"));
-
- if (pBtMgnt->bBTConnectInProgress) {
- pBtMgnt->bBTConnectInProgress = false;
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], BT Connect in progress OFF!!\n"));
- }
- pBTInfo->BtAsocEntry[EntryNum].BtCurrentState = HCI_STATE_CONNECTED;
- pBTInfo->BtAsocEntry[EntryNum].b4waySuccess = true;
- pBtMgnt->bStartSendSupervisionPkt = true;
-
- /* for rate adaptive */
-
- rtl8723a_update_ramask(padapter,
- MAX_FW_SUPPORT_MACID_NUM-1-EntryNum, 0);
-
- HalSetBrateCfg23a(padapter, padapter->mlmepriv.cur_network.network.SupportedRates);
- BTDM_SetFwChnlInfo(padapter, RT_MEDIA_CONNECT);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
- break;
- }
-}
-
-static void
-bthci_StateAuth(struct rtw_adapter *padapter, enum hci_state_with_cmd StateCmd,
- u8 EntryNum)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Authenticating], "));
- switch (StateCmd) {
- case STATE_CMD_CONNECT_ACCEPT_TIMEOUT:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_CONNECT_ACCEPT_TIMEOUT\n"));
- pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_CONNECT_ACCEPT_TIMEOUT;
- pBtMgnt->bNeedNotifyAMPNoCap = true;
- BTHCI_DisconnectPeer(padapter, EntryNum);
- break;
- case STATE_CMD_DISCONNECT_PHY_LINK:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
- bthci_EventDisconnectPhyLinkComplete(padapter,
- HCI_STATUS_SUCCESS,
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
- EntryNum);
-
- pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_UNKNOW_CONNECT_ID;
-
- del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
-
- BTHCI_DisconnectPeer(padapter, EntryNum);
- break;
- case STATE_CMD_4WAY_FAILED:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_4WAY_FAILED\n"));
-
- pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus = HCI_STATUS_AUTH_FAIL;
- pBtMgnt->bNeedNotifyAMPNoCap = true;
-
- BTHCI_DisconnectPeer(padapter, EntryNum);
-
- del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
- break;
- case STATE_CMD_4WAY_SUCCESSED:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_4WAY_SUCCESSED\n"));
-
- bthci_EventPhysicalLinkComplete(padapter, HCI_STATUS_SUCCESS, EntryNum, INVALID_PL_HANDLE);
-
- del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
-
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_CONNECTED, STATE_CMD_ENTER_STATE, EntryNum);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
- break;
- }
-}
-
-static void
-bthci_StateDisconnecting(struct rtw_adapter *padapter,
- enum hci_state_with_cmd StateCmd, u8 EntryNum)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Disconnecting], "));
- switch (StateCmd) {
- case STATE_CMD_MAC_CONNECT_CANCEL_INDICATE:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_MAC_CONNECT_CANCEL_INDICATE\n"));
- if (pBTInfo->BtAsocEntry[EntryNum].bNeedPhysLinkCompleteEvent) {
- bthci_EventPhysicalLinkComplete(padapter,
- pBTInfo->BtAsocEntry[EntryNum].PhysLinkCompleteStatus,
- EntryNum, INVALID_PL_HANDLE);
- }
-
- if (pBtMgnt->bBTConnectInProgress) {
- pBtMgnt->bBTConnectInProgress = false;
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], BT Connect in progress OFF!!\n"));
- }
-
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTED, STATE_CMD_ENTER_STATE, EntryNum);
- break;
- case STATE_CMD_DISCONNECT_PHY_LINK:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
-
- bthci_EventDisconnectPhyLinkComplete(padapter,
- HCI_STATUS_SUCCESS,
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
- EntryNum);
-
- del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
-
- BTHCI_DisconnectPeer(padapter, EntryNum);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
- break;
- }
-}
-
-static void
-bthci_StateDisconnected(struct rtw_adapter *padapter,
- enum hci_state_with_cmd StateCmd, u8 EntryNum)
-{
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state], [Disconnected], "));
- switch (StateCmd) {
- case STATE_CMD_CREATE_PHY_LINK:
- case STATE_CMD_ACCEPT_PHY_LINK:
- if (StateCmd == STATE_CMD_CREATE_PHY_LINK)
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_CREATE_PHY_LINK\n"));
- else
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_ACCEPT_PHY_LINK\n"));
-
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT PS], Disable IPS and LPS\n"));
- ips_leave23a(padapter);
- LPS_Leave23a(padapter);
-
- pBtMgnt->bPhyLinkInProgress = true;
- pBtMgnt->BTCurrentConnectType = BT_DISCONNECT;
- pBtMgnt->CurrentBTConnectionCnt++;
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], CurrentBTConnectionCnt = %d\n",
- pBtMgnt->CurrentBTConnectionCnt));
- pBtMgnt->BtOperationOn = true;
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], Bt Operation ON!! CurrentConnectEntryNum = %d\n",
- pBtMgnt->CurrentConnectEntryNum));
-
- if (pBtMgnt->bBTConnectInProgress) {
- bthci_EventPhysicalLinkComplete(padapter, HCI_STATUS_CONTROLLER_BUSY, INVALID_ENTRY_NUM, pBtMgnt->BtCurrentPhyLinkhandle);
- bthci_RemoveEntryByEntryNum(padapter, EntryNum);
- return;
- }
-
- if (StateCmd == STATE_CMD_CREATE_PHY_LINK)
- pBTInfo->BtAsocEntry[EntryNum].AMPRole = AMP_BTAP_CREATOR;
- else
- pBTInfo->BtAsocEntry[EntryNum].AMPRole = AMP_BTAP_JOINER;
-
- /* 1. MAC not yet in selected channel */
- while (check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR)) {
- RTPRINT(FIOCTL, IOCTL_STATE, ("Scan/Roaming/Wifi Link is in Progress, wait 200 ms\n"));
- mdelay(200);
- }
- /* 2. MAC already in selected channel */
- RTPRINT(FIOCTL, IOCTL_STATE, ("Channel is Ready\n"));
- mod_timer(&pBTInfo->BTHCIJoinTimeoutTimer,
- jiffies + msecs_to_jiffies(pBtHciInfo->ConnAcceptTimeout));
-
- pBTInfo->BtAsocEntry[EntryNum].bNeedPhysLinkCompleteEvent = true;
- break;
- case STATE_CMD_DISCONNECT_PHY_LINK:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_DISCONNECT_PHY_LINK\n"));
-
- del_timer_sync(&pBTInfo->BTHCIJoinTimeoutTimer);
-
- bthci_EventDisconnectPhyLinkComplete(padapter,
- HCI_STATUS_SUCCESS,
- pBTInfo->BtAsocEntry[EntryNum].PhyLinkDisconnectReason,
- EntryNum);
-
- if (pBTInfo->BtAsocEntry[EntryNum].bNeedPhysLinkCompleteEvent) {
- bthci_EventPhysicalLinkComplete(padapter,
- HCI_STATUS_UNKNOW_CONNECT_ID,
- EntryNum, INVALID_PL_HANDLE);
- }
-
- if (pBtMgnt->bBTConnectInProgress) {
- pBtMgnt->bBTConnectInProgress = false;
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], BT Connect in progress OFF!!\n"));
- }
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTED, STATE_CMD_ENTER_STATE, EntryNum);
- bthci_RemoveEntryByEntryNum(padapter, EntryNum);
- break;
- case STATE_CMD_ENTER_STATE:
- RTPRINT(FIOCTL, IOCTL_STATE, ("STATE_CMD_ENTER_STATE\n"));
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_STATE, ("State command(%d) is Wrong !!!\n", StateCmd));
- break;
- }
-}
-
-void BTHCI_EventParse(struct rtw_adapter *padapter, void *pEvntData, u32 dataLen)
-{
-}
-
-u8 BTHCI_HsConnectionEstablished(struct rtw_adapter *padapter)
-{
- u8 bBtConnectionExist = false;
- struct bt_30info *pBtinfo = GET_BT_INFO(padapter);
- u8 i;
-
- for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
- if (pBtinfo->BtAsocEntry[i].b4waySuccess) {
- bBtConnectionExist = true;
- break;
- }
- }
-
-/*RTPRINT(FIOCTL, IOCTL_STATE, (" BTHCI_HsConnectionEstablished(), connection exist = %d\n", bBtConnectionExist)); */
-
- return bBtConnectionExist;
-}
-
-static u8
-BTHCI_CheckProfileExist(struct rtw_adapter *padapter,
- enum bt_traffic_mode_profile Profile)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- u8 IsPRofile = false;
- u8 i = 0;
-
- for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
- if (pBtMgnt->ExtConfig.linkInfo[i].TrafficProfile == Profile) {
- IsPRofile = true;
- break;
- }
- }
-
- return IsPRofile;
-}
-
-void BTHCI_UpdateBTProfileRTKToMoto(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- u8 i = 0;
-
- pBtMgnt->ExtConfig.NumberOfSCO = 0;
-
- for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
- pBtMgnt->ExtConfig.linkInfo[i].TrafficProfile = BT_PROFILE_NONE;
-
- if (pBtMgnt->ExtConfig.linkInfo[i].BTProfile == BT_PROFILE_SCO)
- pBtMgnt->ExtConfig.NumberOfSCO++;
-
- pBtMgnt->ExtConfig.linkInfo[i].TrafficProfile = pBtMgnt->ExtConfig.linkInfo[i].BTProfile;
- switch (pBtMgnt->ExtConfig.linkInfo[i].TrafficProfile) {
- case BT_PROFILE_SCO:
- break;
- case BT_PROFILE_PAN:
- pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode = BT_MOTOR_EXT_BE;
- pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode = BT_MOTOR_EXT_BE;
- break;
- case BT_PROFILE_A2DP:
- pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode = BT_MOTOR_EXT_GULB;
- pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode = BT_MOTOR_EXT_GULB;
- break;
- case BT_PROFILE_HID:
- pBtMgnt->ExtConfig.linkInfo[i].IncomingTrafficMode = BT_MOTOR_EXT_GUL;
- pBtMgnt->ExtConfig.linkInfo[i].OutgoingTrafficMode = BT_MOTOR_EXT_BE;
- break;
- default:
- break;
- }
- }
-
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RTK, NumberOfHandle = %d, NumberOfSCO = %d\n",
- pBtMgnt->ExtConfig.NumberOfHandle, pBtMgnt->ExtConfig.NumberOfSCO));
-}
-
-void BTHCI_WifiScanNotify(struct rtw_adapter *padapter, u8 scanType)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->ExtConfig.bEnableWifiScanNotify)
- bthci_EventExtWifiScanNotify(padapter, scanType);
-}
-
-void
-BTHCI_StateMachine(
- struct rtw_adapter *padapter,
- u8 StateToEnter,
- enum hci_state_with_cmd StateCmd,
- u8 EntryNum
- )
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (EntryNum == 0xff) {
- RTPRINT(FIOCTL, IOCTL_STATE, (" StateMachine, error EntryNum = 0x%x \n", EntryNum));
- return;
- }
- RTPRINT(FIOCTL, IOCTL_STATE, (" StateMachine, EntryNum = 0x%x, CurrentState = 0x%x, BtNextState = 0x%x, StateCmd = 0x%x , StateToEnter = 0x%x\n",
- EntryNum, pBTInfo->BtAsocEntry[EntryNum].BtCurrentState, pBTInfo->BtAsocEntry[EntryNum].BtNextState, StateCmd, StateToEnter));
-
- if (pBTInfo->BtAsocEntry[EntryNum].BtNextState & StateToEnter) {
- pBTInfo->BtAsocEntry[EntryNum].BtCurrentState = StateToEnter;
-
- switch (StateToEnter) {
- case HCI_STATE_STARTING:
- pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_DISCONNECTING | HCI_STATE_CONNECTING;
- bthci_StateStarting(padapter, StateCmd, EntryNum);
- break;
- case HCI_STATE_CONNECTING:
- pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_CONNECTING | HCI_STATE_DISCONNECTING | HCI_STATE_AUTHENTICATING;
- bthci_StateConnecting(padapter, StateCmd, EntryNum);
- break;
- case HCI_STATE_AUTHENTICATING:
- pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_DISCONNECTING | HCI_STATE_CONNECTED;
- bthci_StateAuth(padapter, StateCmd, EntryNum);
- break;
- case HCI_STATE_CONNECTED:
- pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_CONNECTED | HCI_STATE_DISCONNECTING;
- bthci_StateConnected(padapter, StateCmd, EntryNum);
- break;
- case HCI_STATE_DISCONNECTING:
- pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_DISCONNECTED | HCI_STATE_DISCONNECTING;
- bthci_StateDisconnecting(padapter, StateCmd, EntryNum);
- break;
- case HCI_STATE_DISCONNECTED:
- pBTInfo->BtAsocEntry[EntryNum].BtNextState = HCI_STATE_DISCONNECTED | HCI_STATE_STARTING | HCI_STATE_CONNECTING;
- bthci_StateDisconnected(padapter, StateCmd, EntryNum);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_STATE, (" StateMachine, Unknown state to enter!!!\n"));
- break;
- }
- } else {
- RTPRINT(FIOCTL, IOCTL_STATE, (" StateMachine, Wrong state to enter\n"));
- }
-
- /* 20100325 Joseph: Disable/Enable IPS/LPS according to BT status. */
- if (!pBtMgnt->bBTConnectInProgress && !pBtMgnt->BtOperationOn) {
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT PS], ips_enter23a()\n"));
- ips_enter23a(padapter);
- }
-}
-
-void BTHCI_DisconnectPeer(struct rtw_adapter *padapter, u8 EntryNum)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, (" BTHCI_DisconnectPeer()\n"));
-
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTING, STATE_CMD_MAC_CONNECT_CANCEL_INDICATE, EntryNum);
-
- if (pBTInfo->BtAsocEntry[EntryNum].bUsed) {
-/*BTPKT_SendDeauthentication(padapter, pBTInfo->BtAsocEntry[EntryNum].BTRemoteMACAddr, unspec_reason); not porting yet */
- }
-
- if (pBtMgnt->bBTConnectInProgress) {
- pBtMgnt->bBTConnectInProgress = false;
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT Flag], BT Connect in progress OFF!!\n"));
- }
-
- bthci_RemoveEntryByEntryNum(padapter, EntryNum);
-
- if (pBtMgnt->bNeedNotifyAMPNoCap) {
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT AMPStatus], set to invalid in BTHCI_DisconnectPeer()\n"));
- BTHCI_EventAMPStatusChange(padapter, AMP_STATUS_NO_CAPACITY_FOR_BT);
- }
-}
-
-void BTHCI_EventNumOfCompletedDataBlocks(struct rtw_adapter *padapter)
-{
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_hci_info *pBtHciInfo = &pBTInfo->BtHciInfo;
- u8 localBuf[TmpLocalBufSize] = "";
- u8 *pRetPar, *pTriple;
- u8 len = 0, i, j, handleNum = 0;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u16 *pu2Temp, *pPackets, *pHandle, *pDblocks;
- u8 sent = 0;
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
-
- if (!(pBtHciInfo->BTEventMaskPage2 & EMP2_HCI_EVENT_NUM_OF_COMPLETE_DATA_BLOCKS)) {
- RTPRINT(FIOCTL, IOCTL_BT_EVENT, ("[BT event], Num Of Completed DataBlocks, Ignore to send NumOfCompletedDataBlocksEvent due to event mask page 2\n"));
- return;
- }
-
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[0];
- pTriple = &pRetPar[3];
- for (j = 0; j < MAX_BT_ASOC_ENTRY_NUM; j++) {
-
- for (i = 0; i < MAX_LOGICAL_LINK_NUM; i++) {
- if (pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle) {
- handleNum++;
- pHandle = (u16 *)&pTriple[0]; /* Handle[i] */
- pPackets = (u16 *)&pTriple[2]; /* Num_Of_Completed_Packets[i] */
- pDblocks = (u16 *)&pTriple[4]; /* Num_Of_Completed_Blocks[i] */
- *pHandle = pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].BtLogLinkhandle;
- *pPackets = (u16)pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].TxPacketCount;
- *pDblocks = (u16)pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].TxPacketCount;
- if (pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].TxPacketCount) {
- sent = 1;
- RTPRINT(FIOCTL, IOCTL_BT_EVENT_DETAIL,
- ("[BT event], Num Of Completed DataBlocks, Handle = 0x%x, Num_Of_Completed_Packets = 0x%x, Num_Of_Completed_Blocks = 0x%x\n",
- *pHandle, *pPackets, *pDblocks));
- }
- pBTInfo->BtAsocEntry[j].LogLinkCmdData[i].TxPacketCount = 0;
- len += 6;
- pTriple += len;
- }
- }
- }
-
- pRetPar[2] = handleNum; /* Number_of_Handles */
- len += 1;
- pu2Temp = (u16 *)&pRetPar[0];
- *pu2Temp = BTTotalDataBlockNum;
- len += 2;
-
- PPacketIrpEvent->EventCode = HCI_EVENT_NUM_OF_COMPLETE_DATA_BLOCKS;
- PPacketIrpEvent->Length = len;
- if (handleNum && sent)
- bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2);
-}
-
-void BTHCI_EventAMPStatusChange(struct rtw_adapter *padapter, u8 AMP_Status)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct packet_irp_hcievent_data *PPacketIrpEvent;
- u8 len = 0;
- u8 localBuf[7] = "";
- u8 *pRetPar;
-
- if (AMP_Status == AMP_STATUS_NO_CAPACITY_FOR_BT) {
- pBtMgnt->BTNeedAMPStatusChg = true;
- pBtMgnt->bNeedNotifyAMPNoCap = false;
-
- BTHCI_DisconnectAll(padapter);
- } else if (AMP_Status == AMP_STATUS_FULL_CAPACITY_FOR_BT) {
- pBtMgnt->BTNeedAMPStatusChg = false;
- }
-
- PPacketIrpEvent = (struct packet_irp_hcievent_data *)(&localBuf[0]);
- /* Return parameters starts from here */
- pRetPar = &PPacketIrpEvent->Data[0];
-
- pRetPar[0] = 0; /* Status */
- len += 1;
- pRetPar[1] = AMP_Status; /* AMP_Status */
- len += 1;
-
- PPacketIrpEvent->EventCode = HCI_EVENT_AMP_STATUS_CHANGE;
- PPacketIrpEvent->Length = len;
- if (bthci_IndicateEvent(padapter, PPacketIrpEvent, len+2) == RT_STATUS_SUCCESS)
- RTPRINT(FIOCTL, (IOCTL_BT_EVENT|IOCTL_STATE), ("[BT event], AMP Status Change, AMP_Status = %d\n", AMP_Status));
-}
-
-void BTHCI_DisconnectAll(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- u8 i;
-
- RTPRINT(FIOCTL, IOCTL_STATE, (" DisconnectALL()\n"));
-
- for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
- if (pBTInfo->BtAsocEntry[i].b4waySuccess) {
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_CONNECTED, STATE_CMD_DISCONNECT_PHY_LINK, i);
- } else if (pBTInfo->BtAsocEntry[i].bUsed) {
- if (pBTInfo->BtAsocEntry[i].BtCurrentState == HCI_STATE_CONNECTING) {
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_CONNECTING, STATE_CMD_MAC_CONNECT_CANCEL_INDICATE, i);
- } else if (pBTInfo->BtAsocEntry[i].BtCurrentState == HCI_STATE_DISCONNECTING) {
- BTHCI_SM_WITH_INFO(padapter, HCI_STATE_DISCONNECTING, STATE_CMD_MAC_CONNECT_CANCEL_INDICATE, i);
- }
- }
- }
-}
-
-enum hci_status
-BTHCI_HandleHCICMD(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
-
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("\n"));
- RTPRINT(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), ("HCI Command start, OGF = 0x%x, OCF = 0x%x, Length = 0x%x\n",
- pHciCmd->OGF, pHciCmd->OCF, pHciCmd->Length));
- if (pHciCmd->Length) {
- RTPRINT_DATA(FIOCTL, (IOCTL_BT_HCICMD_DETAIL|IOCTL_BT_LOGO), "HCI Command, Hex Data :\n",
- &pHciCmd->Data[0], pHciCmd->Length);
- }
- if (pHciCmd->OGF == OGF_EXTENSION) {
- if (pHciCmd->OCF == HCI_SET_RSSI_VALUE)
- RTPRINT(FIOCTL, IOCTL_BT_EVENT_PERIODICAL, ("[BT cmd], "));
- else
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_EXT, ("[BT cmd], "));
- } else {
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("[BT cmd], "));
- }
-
- pBtDbg->dbgHciInfo.hciCmdCnt++;
-
- switch (pHciCmd->OGF) {
- case LINK_CONTROL_COMMANDS:
- status = bthci_HandleOGFLinkControlCMD(padapter, pHciCmd);
- break;
- case HOLD_MODE_COMMAND:
- break;
- case OGF_SET_EVENT_MASK_COMMAND:
- status = bthci_HandleOGFSetEventMaskCMD(padapter, pHciCmd);
- break;
- case OGF_INFORMATIONAL_PARAMETERS:
- status = bthci_HandleOGFInformationalParameters(padapter, pHciCmd);
- break;
- case OGF_STATUS_PARAMETERS:
- status = bthci_HandleOGFStatusParameters(padapter, pHciCmd);
- break;
- case OGF_TESTING_COMMANDS:
- status = bthci_HandleOGFTestingCMD(padapter, pHciCmd);
- break;
- case OGF_EXTENSION:
- status = bthci_HandleOGFExtension(padapter, pHciCmd);
- break;
- default:
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI Command(), Unknown OGF = 0x%x\n", pHciCmd->OGF));
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_UNKNOWN_COMMAND\n"));
- status = bthci_UnknownCMD(padapter, pHciCmd);
- break;
- }
- RTPRINT(FIOCTL, IOCTL_BT_HCICMD_DETAIL, ("HCI Command execution end!!\n"));
-
- return status;
-}
-
-/* ===== End of sync from SD7 driver COMMOM/bt_hci.c ===== */
-
-static const char *const BtStateString[] = {
- "BT_DISABLED",
- "BT_NO_CONNECTION",
- "BT_CONNECT_IDLE",
- "BT_INQ_OR_PAG",
- "BT_ACL_ONLY_BUSY",
- "BT_SCO_ONLY_BUSY",
- "BT_ACL_SCO_BUSY",
- "BT_ACL_INQ_OR_PAG",
- "BT_STATE_NOT_DEFINED"
-};
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc87231Ant.c ===== */
-
-static void btdm_SetFwIgnoreWlanAct(struct rtw_adapter *padapter, u8 bEnable)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 H2C_Parameter[1] = {0};
-
- if (bEnable) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT Ignore Wlan_Act !!\n"));
- H2C_Parameter[0] |= BIT(0); /* function enable */
- pHalData->bt_coexist.bFWCoexistAllOff = false;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT don't ignore Wlan_Act !!\n"));
- }
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25 = 0x%02x\n",
- H2C_Parameter[0]));
-
- FillH2CCmd(padapter, BT_IGNORE_WLAN_ACT_EID, 1, H2C_Parameter);
-}
-
-static void btdm_NotifyFwScan(struct rtw_adapter *padapter, u8 scanType)
-{
- u8 H2C_Parameter[1] = {0};
-
- if (scanType == true)
- H2C_Parameter[0] = 0x1;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Notify FW for wifi scan, write 0x3b = 0x%02x\n",
- H2C_Parameter[0]));
-
- FillH2CCmd(padapter, 0x3b, 1, H2C_Parameter);
-}
-
-static void btdm_1AntSetPSMode(struct rtw_adapter *padapter,
- u8 enable, u8 smartps, u8 mode)
-{
- struct pwrctrl_priv *pwrctrl;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Current LPS(%s, %d), smartps =%d\n", enable == true?"ON":"OFF", mode, smartps));
-
- pwrctrl = &padapter->pwrctrlpriv;
-
- if (enable == true) {
- rtw_set_ps_mode23a(padapter, PS_MODE_MIN, smartps, mode);
- } else {
- rtw_set_ps_mode23a(padapter, PS_MODE_ACTIVE, 0, 0);
- LPS_RF_ON_check23a(padapter, 100);
- }
-}
-
-static void btdm_1AntTSFSwitch(struct rtw_adapter *padapter, u8 enable)
-{
- u8 oldVal, newVal;
-
- oldVal = rtl8723au_read8(padapter, 0x550);
-
- if (enable)
- newVal = oldVal | EN_BCN_FUNCTION;
- else
- newVal = oldVal & ~EN_BCN_FUNCTION;
-
- if (oldVal != newVal)
- rtl8723au_write8(padapter, 0x550, newVal);
-}
-
-static u8 btdm_Is1AntPsTdmaStateChange(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_1ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
-
- if ((pBtdm8723->bPrePsTdmaOn != pBtdm8723->bCurPsTdmaOn) ||
- (pBtdm8723->prePsTdma != pBtdm8723->curPsTdma))
- return true;
- else
- return false;
-}
-
-/* Before enter TDMA, make sure Power Saving is enable! */
-static void
-btdm_1AntPsTdma(
- struct rtw_adapter *padapter,
- u8 bTurnOn,
- u8 type
- )
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_1ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
-
- pBtdm8723->bCurPsTdmaOn = bTurnOn;
- pBtdm8723->curPsTdma = type;
- if (bTurnOn) {
- switch (type) {
- case 1: /* A2DP Level-1 or FTP/OPP */
- default:
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- /* wide duration for WiFi */
- BTDM_SetFw3a(padapter, 0xd3, 0x1a, 0x1a, 0x0, 0x58);
- }
- break;
- case 2: /* A2DP Level-2 */
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- /* normal duration for WiFi */
- BTDM_SetFw3a(padapter, 0xd3, 0x12, 0x12, 0x0, 0x58);
- }
- break;
- case 3: /* BT FTP/OPP */
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- /* normal duration for WiFi */
- BTDM_SetFw3a(padapter, 0xd3, 0x30, 0x03, 0x10, 0x58);
-
- }
- break;
- case 4: /* for wifi scan & BT is connected */
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- /* protect 3 beacons in 3-beacon period & no Tx pause at BT slot */
- BTDM_SetFw3a(padapter, 0x93, 0x15, 0x03, 0x14, 0x0);
- }
- break;
- case 5: /* for WiFi connected-busy & BT is Non-Connected-Idle */
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- /* SCO mode, Ant fixed at WiFi, WLAN_Act toggle */
- BTDM_SetFw3a(padapter, 0x61, 0x15, 0x03, 0x31, 0x00);
- }
- break;
- case 9: /* ACL high-retry type - 2 */
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- /* narrow duration for WiFi */
- BTDM_SetFw3a(padapter, 0xd3, 0xa, 0xa, 0x0, 0x58); /* narrow duration for WiFi */
- }
- break;
- case 10: /* for WiFi connect idle & BT ACL busy or WiFi Connected-Busy & BT is Inquiry */
- if (btdm_Is1AntPsTdmaStateChange(padapter))
- BTDM_SetFw3a(padapter, 0x13, 0xa, 0xa, 0x0, 0x40);
- break;
- case 11: /* ACL high-retry type - 3 */
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- /* narrow duration for WiFi */
- BTDM_SetFw3a(padapter, 0xd3, 0x05, 0x05, 0x00, 0x58);
- }
- break;
- case 12: /* for WiFi Connected-Busy & BT is Connected-Idle */
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- /* Allow High-Pri BT */
- BTDM_SetFw3a(padapter, 0xeb, 0x0a, 0x03, 0x31, 0x18);
- }
- break;
- case 20: /* WiFi only busy , TDMA mode for power saving */
- if (btdm_Is1AntPsTdmaStateChange(padapter))
- BTDM_SetFw3a(padapter, 0x13, 0x25, 0x25, 0x00, 0x00);
- break;
- case 27: /* WiFi DHCP/Site Survey & BT SCO busy */
- if (btdm_Is1AntPsTdmaStateChange(padapter))
- BTDM_SetFw3a(padapter, 0xa3, 0x25, 0x03, 0x31, 0x98);
- break;
- case 28: /* WiFi DHCP/Site Survey & BT idle */
- if (btdm_Is1AntPsTdmaStateChange(padapter))
- BTDM_SetFw3a(padapter, 0x69, 0x25, 0x03, 0x31, 0x00);
- break;
- case 29: /* WiFi DHCP/Site Survey & BT ACL busy */
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- BTDM_SetFw3a(padapter, 0xeb, 0x1a, 0x1a, 0x01, 0x18);
- rtl8723au_write32(padapter, 0x6c0, 0x5afa5afa);
- rtl8723au_write32(padapter, 0x6c4, 0x5afa5afa);
- }
- break;
- case 30: /* WiFi idle & BT Inquiry */
- if (btdm_Is1AntPsTdmaStateChange(padapter))
- BTDM_SetFw3a(padapter, 0x93, 0x15, 0x03, 0x14, 0x00);
- break;
- case 31: /* BT HID */
- if (btdm_Is1AntPsTdmaStateChange(padapter))
- BTDM_SetFw3a(padapter, 0xd3, 0x1a, 0x1a, 0x00, 0x58);
- break;
- case 32: /* BT SCO & Inquiry */
- if (btdm_Is1AntPsTdmaStateChange(padapter))
- BTDM_SetFw3a(padapter, 0xab, 0x0a, 0x03, 0x11, 0x98);
- break;
- case 33: /* BT SCO & WiFi site survey */
- if (btdm_Is1AntPsTdmaStateChange(padapter))
- BTDM_SetFw3a(padapter, 0xa3, 0x25, 0x03, 0x30, 0x98);
- break;
- case 34: /* BT HID & WiFi site survey */
- if (btdm_Is1AntPsTdmaStateChange(padapter))
- BTDM_SetFw3a(padapter, 0xd3, 0x1a, 0x1a, 0x00, 0x18);
- break;
- case 35: /* BT HID & WiFi Connecting */
- if (btdm_Is1AntPsTdmaStateChange(padapter))
- BTDM_SetFw3a(padapter, 0xe3, 0x1a, 0x1a, 0x00, 0x18);
- break;
- }
- } else {
- /* disable PS-TDMA */
- switch (type) {
- case 8:
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- /* Antenna control by PTA, 0x870 = 0x310 */
- BTDM_SetFw3a(padapter, 0x8, 0x0, 0x0, 0x0, 0x0);
- }
- break;
- case 0:
- default:
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- /* Antenna control by PTA, 0x870 = 0x310 */
- BTDM_SetFw3a(padapter, 0x0, 0x0, 0x0, 0x8, 0x0);
- }
- /* Switch Antenna to BT */
- rtl8723au_write16(padapter, 0x860, 0x210);
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], 0x860 = 0x210, Switch Antenna to BT\n"));
- break;
- case 9:
- if (btdm_Is1AntPsTdmaStateChange(padapter)) {
- /* Antenna control by PTA, 0x870 = 0x310 */
- BTDM_SetFw3a(padapter, 0x0, 0x0, 0x0, 0x8, 0x0);
- }
- /* Switch Antenna to WiFi */
- rtl8723au_write16(padapter, 0x860, 0x110);
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], 0x860 = 0x110, Switch Antenna to WiFi\n"));
- break;
- }
- }
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Current TDMA(%s, %d)\n",
- pBtdm8723->bCurPsTdmaOn?"ON":"OFF", pBtdm8723->curPsTdma));
-
- /* update pre state */
- pBtdm8723->bPrePsTdmaOn = pBtdm8723->bCurPsTdmaOn;
- pBtdm8723->prePsTdma = pBtdm8723->curPsTdma;
-}
-
-static void
-_btdm_1AntSetPSTDMA(struct rtw_adapter *padapter, u8 bPSEn, u8 smartps,
- u8 psOption, u8 bTDMAOn, u8 tdmaType)
-{
- struct pwrctrl_priv *pwrctrl;
- struct hal_data_8723a *pHalData;
- struct btdm_8723a_1ant *pBtdm8723;
- u8 psMode;
- u8 bSwitchPS;
-
- if (!check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE) &&
- (get_fwstate(&padapter->mlmepriv) != WIFI_NULL_STATE)) {
- btdm_1AntPsTdma(padapter, bTDMAOn, tdmaType);
- return;
- }
- psOption &= ~BIT(0);
-
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], Set LPS(%s, %d) TDMA(%s, %d)\n",
- bPSEn == true?"ON":"OFF", psOption,
- bTDMAOn == true?"ON":"OFF", tdmaType));
-
- pwrctrl = &padapter->pwrctrlpriv;
- pHalData = GET_HAL_DATA(padapter);
- pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
-
- if (bPSEn) {
- if (pBtdm8723->bWiFiHalt) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Enable PS Fail, WiFi in Halt!!\n"));
- return;
- }
-
- if (pwrctrl->bInSuspend) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Enable PS Fail, WiFi in Suspend!!\n"));
- return;
- }
-
- if (padapter->bDriverStopped) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Enable PS Fail, WiFi driver stopped!!\n"));
- return;
- }
-
- if (padapter->bSurpriseRemoved) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Enable PS Fail, WiFi Surprise Removed!!\n"));
- return;
- }
-
- psMode = PS_MODE_MIN;
- } else {
- psMode = PS_MODE_ACTIVE;
- psOption = 0;
- }
-
- if (psMode != pwrctrl->pwr_mode) {
- bSwitchPS = true;
- } else if (psMode != PS_MODE_ACTIVE) {
- if (psOption != pwrctrl->bcn_ant_mode)
- bSwitchPS = true;
- else if (smartps != pwrctrl->smart_ps)
- bSwitchPS = true;
- else
- bSwitchPS = false;
- } else {
- bSwitchPS = false;
- }
-
- if (bSwitchPS) {
- /* disable TDMA */
- if (pBtdm8723->bCurPsTdmaOn) {
- if (!bTDMAOn) {
- btdm_1AntPsTdma(padapter, false, tdmaType);
- } else {
- if (!rtl8723a_BT_enabled(padapter) ||
- (pHalData->bt_coexist.halCoex8723.c2hBtInfo == BT_INFO_STATE_NO_CONNECTION) ||
- (pHalData->bt_coexist.halCoex8723.c2hBtInfo == BT_INFO_STATE_CONNECT_IDLE) ||
- (tdmaType == 29))
- btdm_1AntPsTdma(padapter, false, 9);
- else
- btdm_1AntPsTdma(padapter, false, 0);
- }
- }
-
- /* change Power Save State */
- btdm_1AntSetPSMode(padapter, bPSEn, smartps, psOption);
- }
-
- btdm_1AntPsTdma(padapter, bTDMAOn, tdmaType);
-}
-
-static void
-btdm_1AntSetPSTDMA(struct rtw_adapter *padapter, u8 bPSEn,
- u8 psOption, u8 bTDMAOn, u8 tdmaType)
-{
- _btdm_1AntSetPSTDMA(padapter, bPSEn, 0, psOption, bTDMAOn, tdmaType);
-}
-
-static void btdm_1AntWifiParaAdjust(struct rtw_adapter *padapter, u8 bEnable)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_1ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
-
- if (bEnable) {
- pBtdm8723->curWifiPara = 1;
- if (pBtdm8723->preWifiPara != pBtdm8723->curWifiPara)
- BTDM_SetSwPenaltyTxRateAdaptive(padapter, BT_TX_RATE_ADAPTIVE_LOW_PENALTY);
- } else {
- pBtdm8723->curWifiPara = 2;
- if (pBtdm8723->preWifiPara != pBtdm8723->curWifiPara)
- BTDM_SetSwPenaltyTxRateAdaptive(padapter, BT_TX_RATE_ADAPTIVE_NORMAL);
- }
-
-}
-
-static void btdm_1AntPtaParaReload(struct rtw_adapter *padapter)
-{
- /* PTA parameter */
- rtl8723au_write8(padapter, 0x6cc, 0x0); /* 1-Ant coex */
- rtl8723au_write32(padapter, 0x6c8, 0xffff); /* wifi break table */
- rtl8723au_write32(padapter, 0x6c4, 0x55555555); /* coex table */
-
- /* Antenna switch control parameter */
- rtl8723au_write32(padapter, 0x858, 0xaaaaaaaa);
- if (IS_8723A_A_CUT(GET_HAL_DATA(padapter)->VersionID)) {
- /* SPDT(connected with TRSW) control by hardware PTA */
- rtl8723au_write32(padapter, 0x870, 0x0);
- rtl8723au_write8(padapter, 0x40, 0x24);
- } else {
- rtl8723au_write8(padapter, 0x40, 0x20);
- /* set antenna at bt side if ANTSW is software control */
- rtl8723au_write16(padapter, 0x860, 0x210);
- /* SPDT(connected with TRSW) control by hardware PTA */
- rtl8723au_write32(padapter, 0x870, 0x300);
- /* ANTSW keep by GNT_BT */
- rtl8723au_write32(padapter, 0x874, 0x22804000);
- }
-
- /* coexistence parameters */
- rtl8723au_write8(padapter, 0x778, 0x1); /* enable RTK mode PTA */
-
- /* BT don't ignore WLAN_Act */
- btdm_SetFwIgnoreWlanAct(padapter, false);
-}
-
-/*
- * Return
- *1: upgrade (add WiFi duration time)
- *0: keep
- *-1: downgrade (add BT duration time)
- */
-static s8 btdm_1AntTdmaJudgement(struct rtw_adapter *padapter, u8 retry)
-{
- struct hal_data_8723a *pHalData;
- struct btdm_8723a_1ant *pBtdm8723;
- static s8 up, dn, m = 1, WaitCount;
- s8 ret;
-
- pHalData = GET_HAL_DATA(padapter);
- pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
- ret = 0;
-
- if (pBtdm8723->psTdmaMonitorCnt == 0) {
- up = 0;
- dn = 0;
- m = 1;
- WaitCount = 0;
- } else {
- WaitCount++;
- }
-
- if (retry == 0) {
- /* no retry in the last 2-second duration */
- up++;
- dn--;
- if (dn < 0)
- dn = 0;
- if (up >= 3*m) {
- /* retry = 0 in consecutive 3m*(2s), add WiFi duration */
- ret = 1;
- up = 0;
- dn = 0;
- WaitCount = 0;
- }
- } else if (retry <= 3) {
- /* retry<= 3 in the last 2-second duration */
- up--;
- dn++;
- if (up < 0)
- up = 0;
-
- if (dn == 2) {
- /* retry<= 3 in consecutive 2*(2s), minus WiFi duration (add BT duration) */
- ret = -1;
-
- /* record how many time downgrad WiFi duration */
- if (WaitCount <= 2)
- m++;
- else
- m = 1;
- /* the max number of m is 20 */
- /* the longest time of upgrade WiFi duration is 20*3*2s = 120s */
- if (m >= 20)
- m = 20;
- up = 0;
- dn = 0;
- WaitCount = 0;
- }
- } else {
- /* retry count > 3 */
- /* retry>3, minus WiFi duration (add BT duration) */
- ret = -1;
-
- /* record how many time downgrad WiFi duration */
- if (WaitCount == 1)
- m++;
- else
- m = 1;
- if (m >= 20)
- m = 20;
-
- up = 0;
- dn = 0;
- WaitCount = 0;
- }
- return ret;
-}
-
-static void btdm_1AntTdmaDurationAdjustForACL(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_1ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
-
- if (pBtdm8723->psTdmaGlobalCnt != pBtdm8723->psTdmaMonitorCnt) {
- pBtdm8723->psTdmaMonitorCnt = 0;
- pBtdm8723->psTdmaGlobalCnt = 0;
- }
- if (pBtdm8723->psTdmaMonitorCnt == 0) {
- btdm_1AntSetPSTDMA(padapter, true, 0, true, 2);
- pBtdm8723->psTdmaDuAdjType = 2;
- } else {
- /* Now we only have 4 level Ps Tdma, */
- /* if that's not the following 4 level(will changed by wifi scan, dhcp...), */
- /* then we have to adjust it back to the previous record one. */
- if ((pBtdm8723->curPsTdma != 1) &&
- (pBtdm8723->curPsTdma != 2) &&
- (pBtdm8723->curPsTdma != 9) &&
- (pBtdm8723->curPsTdma != 11)) {
- btdm_1AntSetPSTDMA(padapter, true, 0, true, pBtdm8723->psTdmaDuAdjType);
- } else {
- s32 judge;
-
- judge = btdm_1AntTdmaJudgement(padapter, pHalData->bt_coexist.halCoex8723.btRetryCnt);
- if (judge == -1) {
- if (pBtdm8723->curPsTdma == 1) {
- /* Decrease WiFi duration for high BT retry */
- if (pHalData->bt_coexist.halCoex8723.btInfoExt)
- pBtdm8723->psTdmaDuAdjType = 9;
- else
- pBtdm8723->psTdmaDuAdjType = 2;
- btdm_1AntSetPSTDMA(padapter, true, 0, true, pBtdm8723->psTdmaDuAdjType);
- } else if (pBtdm8723->curPsTdma == 2) {
- btdm_1AntSetPSTDMA(padapter, true, 0, true, 9);
- pBtdm8723->psTdmaDuAdjType = 9;
- } else if (pBtdm8723->curPsTdma == 9) {
- btdm_1AntSetPSTDMA(padapter, true, 0, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- }
- } else if (judge == 1) {
- if (pBtdm8723->curPsTdma == 11) {
- btdm_1AntSetPSTDMA(padapter, true, 0, true, 9);
- pBtdm8723->psTdmaDuAdjType = 9;
- } else if (pBtdm8723->curPsTdma == 9) {
- if (pHalData->bt_coexist.halCoex8723.btInfoExt)
- pBtdm8723->psTdmaDuAdjType = 9;
- else
- pBtdm8723->psTdmaDuAdjType = 2;
- btdm_1AntSetPSTDMA(padapter, true, 0, true, pBtdm8723->psTdmaDuAdjType);
- } else if (pBtdm8723->curPsTdma == 2) {
- if (pHalData->bt_coexist.halCoex8723.btInfoExt)
- pBtdm8723->psTdmaDuAdjType = 9;
- else
- pBtdm8723->psTdmaDuAdjType = 1;
- btdm_1AntSetPSTDMA(padapter, true, 0, true, pBtdm8723->psTdmaDuAdjType);
- }
- }
- }
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], ACL current TDMA(%s, %d)\n",
- (pBtdm8723->bCurPsTdmaOn ? "ON" : "OFF"), pBtdm8723->curPsTdma));
- }
- pBtdm8723->psTdmaMonitorCnt++;
-}
-
-static void btdm_1AntCoexProcessForWifiConnect(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv;
- struct hal_data_8723a *pHalData;
- struct bt_coexist_8723a *pBtCoex;
- struct btdm_8723a_1ant *pBtdm8723;
- u8 BtState;
-
- pmlmepriv = &padapter->mlmepriv;
- pHalData = GET_HAL_DATA(padapter);
- pBtCoex = &pHalData->bt_coexist.halCoex8723;
- pBtdm8723 = &pBtCoex->btdm1Ant;
- BtState = pBtCoex->c2hBtInfo;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], WiFi is %s\n",
- BTDM_IsWifiBusy(padapter)?"Busy":"IDLE"));
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT is %s\n",
- BtStateString[BtState]));
-
- padapter->pwrctrlpriv.btcoex_rfon = false;
-
- if (!BTDM_IsWifiBusy(padapter) &&
- !check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) &&
- (BtState == BT_INFO_STATE_NO_CONNECTION ||
- BtState == BT_INFO_STATE_CONNECT_IDLE)) {
- switch (BtState) {
- case BT_INFO_STATE_NO_CONNECTION:
- _btdm_1AntSetPSTDMA(padapter, true, 2, 0x26, false, 9);
- break;
- case BT_INFO_STATE_CONNECT_IDLE:
- _btdm_1AntSetPSTDMA(padapter, true, 2, 0x26, false, 0);
- break;
- }
- } else {
- switch (BtState) {
- case BT_INFO_STATE_NO_CONNECTION:
- case BT_INFO_STATE_CONNECT_IDLE:
- /* WiFi is Busy */
- btdm_1AntSetPSTDMA(padapter, false, 0, true, 5);
- rtl8723au_write32(padapter, 0x6c0, 0x5a5a5a5a);
- rtl8723au_write32(padapter, 0x6c4, 0x5a5a5a5a);
- break;
- case BT_INFO_STATE_ACL_INQ_OR_PAG:
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], BT PROFILE is "
- "BT_INFO_STATE_ACL_INQ_OR_PAG\n"));
- case BT_INFO_STATE_INQ_OR_PAG:
- padapter->pwrctrlpriv.btcoex_rfon = true;
- btdm_1AntSetPSTDMA(padapter, true, 0, true, 30);
- break;
- case BT_INFO_STATE_SCO_ONLY_BUSY:
- case BT_INFO_STATE_ACL_SCO_BUSY:
- if (true == pBtCoex->bC2hBtInquiryPage)
- btdm_1AntSetPSTDMA(padapter, false, 0,
- true, 32);
- else {
-#ifdef BTCOEX_CMCC_TEST
- btdm_1AntSetPSTDMA(padapter, false, 0,
- true, 23);
-#else /* !BTCOEX_CMCC_TEST */
- btdm_1AntSetPSTDMA(padapter, false, 0,
- false, 8);
- rtl8723au_write32(padapter, 0x6c0, 0x5a5a5a5a);
- rtl8723au_write32(padapter, 0x6c4, 0x5a5a5a5a);
-#endif /* !BTCOEX_CMCC_TEST */
- }
- break;
- case BT_INFO_STATE_ACL_ONLY_BUSY:
- padapter->pwrctrlpriv.btcoex_rfon = true;
- if (pBtCoex->c2hBtProfile == BT_INFO_HID) {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], BT PROFILE is HID\n"));
- btdm_1AntSetPSTDMA(padapter, true, 0, true, 31);
- } else if (pBtCoex->c2hBtProfile == BT_INFO_FTP) {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], BT PROFILE is FTP/OPP\n"));
- btdm_1AntSetPSTDMA(padapter, true, 0, true, 3);
- } else if (pBtCoex->c2hBtProfile == (BT_INFO_A2DP|BT_INFO_FTP)) {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], BT PROFILE is A2DP_FTP\n"));
- btdm_1AntSetPSTDMA(padapter, true, 0, true, 11);
- } else {
- if (pBtCoex->c2hBtProfile == BT_INFO_A2DP)
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], BT PROFILE is "
- "A2DP\n"));
- else
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], BT PROFILE is "
- "UNKNOWN(0x%02X)! Use A2DP "
- "Profile\n",
- pBtCoex->c2hBtProfile));
- btdm_1AntTdmaDurationAdjustForACL(padapter);
- }
- break;
- }
- }
-
- pBtdm8723->psTdmaGlobalCnt++;
-}
-
-static void
-btdm_1AntUpdateHalRAMask(struct rtw_adapter *padapter, u32 mac_id, u32 filter)
-{
- u8 init_rate = 0;
- u8 raid, arg;
- u32 mask;
- u8 shortGIrate = false;
- int supportRateNum = 0;
- struct sta_info *psta;
- struct hal_data_8723a *pHalData;
- struct dm_priv *pdmpriv;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
- struct wlan_bssid_ex *cur_network;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], %s, MACID =%d, filter = 0x%08x!!\n",
- __func__, mac_id, filter));
-
- pHalData = GET_HAL_DATA(padapter);
- pdmpriv = &pHalData->dmpriv;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
- cur_network = &pmlmeinfo->network;
-
- if (mac_id >= NUM_STA) { /* CAM_SIZE */
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], %s, MACID =%d illegal!!\n",
- __func__, mac_id));
- return;
- }
-
- psta = pmlmeinfo->FW_sta_info[mac_id].psta;
- if (!psta) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], %s, Can't find station!!\n",
- __func__));
- return;
- }
-
- raid = psta->raid;
-
- switch (mac_id) {
- case 0:/* for infra mode */
- supportRateNum =
- rtw_get_rateset_len23a(cur_network->SupportedRates);
- mask = update_supported_rate23a(cur_network->SupportedRates,
- supportRateNum);
- mask |= (pmlmeinfo->HT_enable) ?
- update_MSC_rate23a(&pmlmeinfo->ht_cap):0;
- if (support_short_GI23a(padapter, &pmlmeinfo->ht_cap))
- shortGIrate = true;
- break;
- case 1:/* for broadcast/multicast */
- supportRateNum = rtw_get_rateset_len23a(
- pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
- mask = update_basic_rate23a(cur_network->SupportedRates,
- supportRateNum);
- break;
- default: /* for each sta in IBSS */
- supportRateNum = rtw_get_rateset_len23a(
- pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
- mask = update_supported_rate23a(cur_network->SupportedRates,
- supportRateNum);
- break;
- }
- mask |= ((raid<<28)&0xf0000000);
- mask &= 0xffffffff;
- mask &= ~filter;
- init_rate = get_highest_rate_idx23a(mask)&0x3f;
-
- arg = mac_id&0x1f;/* MACID */
- arg |= BIT(7);
- if (true == shortGIrate)
- arg |= BIT(5);
-
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], Update FW RAID entry, MASK = 0x%08x, "
- "arg = 0x%02x\n", mask, arg));
-
- rtl8723a_set_raid_cmd(padapter, mask, arg);
-
- psta->init_rate = init_rate;
- pdmpriv->INIDATA_RATE[mac_id] = init_rate;
-}
-
-static void
-btdm_1AntUpdateHalRAMaskForSCO(struct rtw_adapter *padapter, u8 forceUpdate)
-{
- struct btdm_8723a_1ant *pBtdm8723;
- struct sta_priv *pstapriv;
- struct wlan_bssid_ex *cur_network;
- struct sta_info *psta;
- u32 macid;
- u32 filter = 0;
-
- pBtdm8723 = &GET_HAL_DATA(padapter)->bt_coexist.halCoex8723.btdm1Ant;
-
- if (pBtdm8723->bRAChanged == true && forceUpdate == false)
- return;
-
- pstapriv = &padapter->stapriv;
- cur_network = &padapter->mlmeextpriv.mlmext_info.network;
- psta = rtw_get_stainfo23a(pstapriv, cur_network->MacAddress);
- macid = psta->mac_id;
-
- filter |= BIT(_1M_RATE_);
- filter |= BIT(_2M_RATE_);
- filter |= BIT(_5M_RATE_);
- filter |= BIT(_11M_RATE_);
- filter |= BIT(_6M_RATE_);
- filter |= BIT(_9M_RATE_);
-
- btdm_1AntUpdateHalRAMask(padapter, macid, filter);
-
- pBtdm8723->bRAChanged = true;
-}
-
-static void btdm_1AntRecoverHalRAMask(struct rtw_adapter *padapter)
-{
- struct btdm_8723a_1ant *pBtdm8723;
- struct sta_priv *pstapriv;
- struct wlan_bssid_ex *cur_network;
- struct sta_info *psta;
-
- pBtdm8723 = &GET_HAL_DATA(padapter)->bt_coexist.halCoex8723.btdm1Ant;
-
- if (pBtdm8723->bRAChanged == false)
- return;
-
- pstapriv = &padapter->stapriv;
- cur_network = &padapter->mlmeextpriv.mlmext_info.network;
- psta = rtw_get_stainfo23a(pstapriv, cur_network->MacAddress);
-
- Update_RA_Entry23a(padapter, psta);
-
- pBtdm8723->bRAChanged = false;
-}
-
-static void
-btdm_1AntBTStateChangeHandler(struct rtw_adapter *padapter,
- enum bt_state_1ant oldState,
- enum bt_state_1ant newState)
-{
- struct hal_data_8723a *phaldata;
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT state change, %s => %s\n",
- BtStateString[oldState],
- BtStateString[newState]));
-
- /* BT default ignore wlan active, */
- /* WiFi MUST disable this when BT is enable */
- if (newState > BT_INFO_STATE_DISABLED)
- btdm_SetFwIgnoreWlanAct(padapter, false);
-
- if ((check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE)) &&
- (BTDM_IsWifiConnectionExist(padapter))) {
- if ((newState == BT_INFO_STATE_SCO_ONLY_BUSY) ||
- (newState == BT_INFO_STATE_ACL_SCO_BUSY)) {
- btdm_1AntUpdateHalRAMaskForSCO(padapter, false);
- } else {
- /* Recover original RA setting */
- btdm_1AntRecoverHalRAMask(padapter);
- }
- } else {
- phaldata = GET_HAL_DATA(padapter);
- phaldata->bt_coexist.halCoex8723.btdm1Ant.bRAChanged = false;
- }
-
- if (oldState == newState)
- return;
-
- if (oldState == BT_INFO_STATE_ACL_ONLY_BUSY) {
- struct hal_data_8723a *Hal = GET_HAL_DATA(padapter);
- Hal->bt_coexist.halCoex8723.btdm1Ant.psTdmaMonitorCnt = 0;
- Hal->bt_coexist.halCoex8723.btdm1Ant.psTdmaMonitorCntForSCO = 0;
- }
-
- if ((oldState == BT_INFO_STATE_SCO_ONLY_BUSY) ||
- (oldState == BT_INFO_STATE_ACL_SCO_BUSY)) {
- struct hal_data_8723a *Hal = GET_HAL_DATA(padapter);
- Hal->bt_coexist.halCoex8723.btdm1Ant.psTdmaMonitorCntForSCO = 0;
- }
-
- /* Active 2Ant mechanism when BT Connected */
- if ((oldState == BT_INFO_STATE_DISABLED) ||
- (oldState == BT_INFO_STATE_NO_CONNECTION)) {
- if ((newState != BT_INFO_STATE_DISABLED) &&
- (newState != BT_INFO_STATE_NO_CONNECTION)) {
- BTDM_SetSwRfRxLpfCorner(padapter,
- BT_RF_RX_LPF_CORNER_SHRINK);
- BTDM_AGCTable(padapter, BT_AGCTABLE_ON);
- BTDM_BBBackOffLevel(padapter, BT_BB_BACKOFF_ON);
- }
- } else {
- if ((newState == BT_INFO_STATE_DISABLED) ||
- (newState == BT_INFO_STATE_NO_CONNECTION)) {
- BTDM_SetSwRfRxLpfCorner(padapter,
- BT_RF_RX_LPF_CORNER_RESUME);
- BTDM_AGCTable(padapter, BT_AGCTABLE_OFF);
- BTDM_BBBackOffLevel(padapter, BT_BB_BACKOFF_OFF);
- }
- }
-}
-
-static void btdm_1AntBtCoexistHandler(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct bt_coexist_8723a *pBtCoex8723;
- struct btdm_8723a_1ant *pBtdm8723;
-
- pHalData = GET_HAL_DATA(padapter);
- pBtCoex8723 = &pHalData->bt_coexist.halCoex8723;
- pBtdm8723 = &pBtCoex8723->btdm1Ant;
- padapter->pwrctrlpriv.btcoex_rfon = false;
- if (!rtl8723a_BT_enabled(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT is disabled\n"));
-
- if (BTDM_IsWifiConnectionExist(padapter)) {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], wifi is connected\n"));
-
- if (BTDM_IsWifiBusy(padapter)) {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], Wifi is busy\n"));
- btdm_1AntSetPSTDMA(padapter, false, 0,
- false, 9);
- } else {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], Wifi is idle\n"));
- _btdm_1AntSetPSTDMA(padapter, true, 2, 1,
- false, 9);
- }
- } else {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], wifi is disconnected\n"));
-
- btdm_1AntSetPSTDMA(padapter, false, 0, false, 9);
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT is enabled\n"));
-
- if (BTDM_IsWifiConnectionExist(padapter)) {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], wifi is connected\n"));
-
- btdm_1AntWifiParaAdjust(padapter, true);
- btdm_1AntCoexProcessForWifiConnect(padapter);
- } else {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], wifi is disconnected\n"));
-
- /* Antenna switch at BT side(0x870 = 0x300,
- 0x860 = 0x210) after PSTDMA off */
- btdm_1AntWifiParaAdjust(padapter, false);
- btdm_1AntSetPSTDMA(padapter, false, 0, false, 0);
- }
- }
-
- btdm_1AntBTStateChangeHandler(padapter, pBtCoex8723->prec2hBtInfo,
- pBtCoex8723->c2hBtInfo);
- pBtCoex8723->prec2hBtInfo = pBtCoex8723->c2hBtInfo;
-}
-
-void BTDM_1AntSignalCompensation(struct rtw_adapter *padapter,
- u8 *rssi_wifi, u8 *rssi_bt)
-{
- struct hal_data_8723a *pHalData;
- struct btdm_8723a_1ant *pBtdm8723;
- u8 RSSI_WiFi_Cmpnstn, RSSI_BT_Cmpnstn;
-
- pHalData = GET_HAL_DATA(padapter);
- pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm1Ant;
- RSSI_WiFi_Cmpnstn = 0;
- RSSI_BT_Cmpnstn = 0;
-
- switch (pBtdm8723->curPsTdma) {
- case 1: /* WiFi 52ms */
- RSSI_WiFi_Cmpnstn = 11; /* 22*0.48 */
- break;
- case 2: /* WiFi 36ms */
- RSSI_WiFi_Cmpnstn = 14; /* 22*0.64 */
- break;
- case 9: /* WiFi 20ms */
- RSSI_WiFi_Cmpnstn = 18; /* 22*0.80 */
- break;
- case 11: /* WiFi 10ms */
- RSSI_WiFi_Cmpnstn = 20; /* 22*0.90 */
- break;
- case 4: /* WiFi 21ms */
- RSSI_WiFi_Cmpnstn = 17; /* 22*0.79 */
- break;
- case 16: /* WiFi 24ms */
- RSSI_WiFi_Cmpnstn = 18; /* 22*0.76 */
- break;
- case 18: /* WiFi 37ms */
- RSSI_WiFi_Cmpnstn = 14; /* 22*0.64 */
- break;
- case 23: /* Level-1, Antenna switch to BT at all time */
- case 24: /* Level-2, Antenna switch to BT at all time */
- case 25: /* Level-3a, Antenna switch to BT at all time */
- case 26: /* Level-3b, Antenna switch to BT at all time */
- case 27: /* Level-3b, Antenna switch to BT at all time */
- case 33: /* BT SCO & WiFi site survey */
- RSSI_WiFi_Cmpnstn = 22;
- break;
- default:
- break;
- }
-
- if (rssi_wifi && RSSI_WiFi_Cmpnstn) {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], 1AntSgnlCmpnstn, case %d, WiFiCmpnstn "
- "=%d(%d => %d)\n", pBtdm8723->curPsTdma,
- RSSI_WiFi_Cmpnstn, *rssi_wifi,
- *rssi_wifi+RSSI_WiFi_Cmpnstn));
- *rssi_wifi += RSSI_WiFi_Cmpnstn;
- }
-
- if (rssi_bt && RSSI_BT_Cmpnstn) {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], 1AntSgnlCmpnstn, case %d, BTCmpnstn "
- "=%d(%d => %d)\n", pBtdm8723->curPsTdma,
- RSSI_BT_Cmpnstn, *rssi_bt, *rssi_bt+RSSI_BT_Cmpnstn));
- *rssi_bt += RSSI_BT_Cmpnstn;
- }
-}
-
-static void BTDM_1AntParaInit(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct bt_coexist_8723a *pBtCoex;
- struct btdm_8723a_1ant *pBtdm8723;
-
- pHalData = GET_HAL_DATA(padapter);
- pBtCoex = &pHalData->bt_coexist.halCoex8723;
- pBtdm8723 = &pBtCoex->btdm1Ant;
-
- /* Enable counter statistics */
- rtl8723au_write8(padapter, 0x76e, 0x4);
- btdm_1AntPtaParaReload(padapter);
-
- pBtdm8723->wifiRssiThresh = 48;
-
- pBtdm8723->bWiFiHalt = false;
- pBtdm8723->bRAChanged = false;
-
- if ((pBtCoex->c2hBtInfo != BT_INFO_STATE_DISABLED) &&
- (pBtCoex->c2hBtInfo != BT_INFO_STATE_NO_CONNECTION)) {
- BTDM_SetSwRfRxLpfCorner(padapter, BT_RF_RX_LPF_CORNER_SHRINK);
- BTDM_AGCTable(padapter, BT_AGCTABLE_ON);
- BTDM_BBBackOffLevel(padapter, BT_BB_BACKOFF_ON);
- }
-}
-
-static void BTDM_1AntForHalt(struct rtw_adapter *padapter)
-{
- RTPRINT(FBT, BT_TRACE, ("\n[BTCoex], 1Ant for halt\n"));
-
- GET_HAL_DATA(padapter)->bt_coexist.halCoex8723.btdm1Ant.bWiFiHalt =
- true;
-
- btdm_1AntWifiParaAdjust(padapter, false);
-
- /* don't use btdm_1AntSetPSTDMA() here */
- /* it will call rtw_set_ps_mode23a() and request pwrpriv->lock. */
- /* This will lead to deadlock, if this function is called in IPS */
- /* Lucas@20130205 */
- btdm_1AntPsTdma(padapter, false, 0);
-
- btdm_SetFwIgnoreWlanAct(padapter, true);
-}
-
-static void BTDM_1AntLpsLeave(struct rtw_adapter *padapter)
-{
- RTPRINT(FBT, BT_TRACE, ("\n[BTCoex], 1Ant for LPS Leave\n"));
-
- /* Prevent from entering LPS again */
- GET_HAL_DATA(padapter)->bt_coexist.halCoex8723.btdm1Ant.bWiFiHalt =
- true;
-
- btdm_1AntSetPSTDMA(padapter, false, 0, false, 8);
-/*btdm_1AntPsTdma(padapter, false, 8); */
-}
-
-static void BTDM_1AntWifiAssociateNotify(struct rtw_adapter *padapter, u8 type)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- RTPRINT(FBT, BT_TRACE,
- ("\n[BTCoex], 1Ant for associate, type =%d\n", type));
-
- if (type) {
- rtl8723a_CheckAntenna_Selection(padapter);
- if (!rtl8723a_BT_enabled(padapter))
- btdm_1AntSetPSTDMA(padapter, false, 0, false, 9);
- else {
- struct bt_coexist_8723a *pBtCoex;
- u8 BtState;
-
- pBtCoex = &pHalData->bt_coexist.halCoex8723;
- BtState = pBtCoex->c2hBtInfo;
-
- btdm_1AntTSFSwitch(padapter, true);
-
- if (BtState == BT_INFO_STATE_NO_CONNECTION ||
- BtState == BT_INFO_STATE_CONNECT_IDLE) {
- btdm_1AntSetPSTDMA(padapter, false, 0,
- true, 28);
- } else if (BtState == BT_INFO_STATE_SCO_ONLY_BUSY ||
- BtState == BT_INFO_STATE_ACL_SCO_BUSY) {
- btdm_1AntSetPSTDMA(padapter, false, 0,
- false, 8);
- rtl8723au_write32(padapter, 0x6c0, 0x5a5a5a5a);
- rtl8723au_write32(padapter, 0x6c4, 0x5a5a5a5a);
- } else if (BtState == BT_INFO_STATE_ACL_ONLY_BUSY ||
- BtState == BT_INFO_STATE_ACL_INQ_OR_PAG) {
- if (pBtCoex->c2hBtProfile == BT_INFO_HID)
- btdm_1AntSetPSTDMA(padapter, false, 0,
- true, 35);
- else
- btdm_1AntSetPSTDMA(padapter, false, 0,
- true, 29);
- }
- }
- } else {
- if (!rtl8723a_BT_enabled(padapter)) {
- if (!BTDM_IsWifiConnectionExist(padapter)) {
- btdm_1AntPsTdma(padapter, false, 0);
- btdm_1AntTSFSwitch(padapter, false);
- }
- }
-
- btdm_1AntBtCoexistHandler(padapter);
- }
-}
-
-static void
-BTDM_1AntMediaStatusNotify(struct rtw_adapter *padapter,
- enum rt_media_status mstatus)
-{
- struct bt_coexist_8723a *pBtCoex;
-
- pBtCoex = &GET_HAL_DATA(padapter)->bt_coexist.halCoex8723;
-
- RTPRINT(FBT, BT_TRACE,
- ("\n\n[BTCoex]******************************\n"));
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], MediaStatus, WiFi %s !!\n",
- mstatus == RT_MEDIA_CONNECT?"CONNECT":"DISCONNECT"));
- RTPRINT(FBT, BT_TRACE, ("[BTCoex]******************************\n"));
-
- if (RT_MEDIA_CONNECT == mstatus) {
- if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE)) {
- if (pBtCoex->c2hBtInfo == BT_INFO_STATE_SCO_ONLY_BUSY ||
- pBtCoex->c2hBtInfo == BT_INFO_STATE_ACL_SCO_BUSY)
- btdm_1AntUpdateHalRAMaskForSCO(padapter, true);
- }
-
- padapter->pwrctrlpriv.DelayLPSLastTimeStamp = jiffies;
- BTDM_1AntForDhcp(padapter);
- } else {
- /* DBG_8723A("%s rtl8723a_DeinitAntenna_Selection\n",
- __func__); */
- rtl8723a_DeinitAntenna_Selection(padapter);
- btdm_1AntBtCoexistHandler(padapter);
- pBtCoex->btdm1Ant.bRAChanged = false;
- }
-}
-
-void BTDM_1AntForDhcp(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- u8 BtState;
- struct bt_coexist_8723a *pBtCoex;
- struct btdm_8723a_1ant *pBtdm8723;
-
- pHalData = GET_HAL_DATA(padapter);
- pBtCoex = &pHalData->bt_coexist.halCoex8723;
- BtState = pBtCoex->c2hBtInfo;
- pBtdm8723 = &pBtCoex->btdm1Ant;
-
- RTPRINT(FBT, BT_TRACE, ("\n[BTCoex], 1Ant for DHCP\n"));
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1Ant for DHCP, WiFi is %s\n",
- BTDM_IsWifiBusy(padapter)?"Busy":"IDLE"));
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1Ant for DHCP, %s\n",
- BtStateString[BtState]));
-
- BTDM_1AntWifiAssociateNotify(padapter, true);
-}
-
-static void BTDM_1AntWifiScanNotify(struct rtw_adapter *padapter, u8 scanType)
-{
- struct hal_data_8723a *pHalData;
- u8 BtState;
- struct bt_coexist_8723a *pBtCoex;
- struct btdm_8723a_1ant *pBtdm8723;
-
- pHalData = GET_HAL_DATA(padapter);
- BtState = pHalData->bt_coexist.halCoex8723.c2hBtInfo;
- pBtCoex = &pHalData->bt_coexist.halCoex8723;
- pBtdm8723 = &pBtCoex->btdm1Ant;
-
- RTPRINT(FBT, BT_TRACE, ("\n[BTCoex], 1Ant for wifi scan =%d!!\n",
- scanType));
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1Ant for wifi scan, WiFi is %s\n",
- BTDM_IsWifiBusy(padapter)?"Busy":"IDLE"));
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1Ant for wifi scan, %s\n",
- BtStateString[BtState]));
-
- if (scanType) {
- rtl8723a_CheckAntenna_Selection(padapter);
- if (!rtl8723a_BT_enabled(padapter)) {
- btdm_1AntSetPSTDMA(padapter, false, 0, false, 9);
- } else if (BTDM_IsWifiConnectionExist(padapter) == false) {
- BTDM_1AntWifiAssociateNotify(padapter, true);
- } else {
- if ((BtState == BT_INFO_STATE_SCO_ONLY_BUSY) ||
- (BtState == BT_INFO_STATE_ACL_SCO_BUSY)) {
- if (pBtCoex->bC2hBtInquiryPage) {
- btdm_1AntSetPSTDMA(padapter, false, 0,
- true, 32);
- } else {
- padapter->pwrctrlpriv.btcoex_rfon =
- true;
- btdm_1AntSetPSTDMA(padapter, true, 0,
- true, 33);
- }
- } else if (true == pBtCoex->bC2hBtInquiryPage) {
- padapter->pwrctrlpriv.btcoex_rfon = true;
- btdm_1AntSetPSTDMA(padapter, true, 0, true, 30);
- } else if (BtState == BT_INFO_STATE_ACL_ONLY_BUSY) {
- padapter->pwrctrlpriv.btcoex_rfon = true;
- if (pBtCoex->c2hBtProfile == BT_INFO_HID)
- btdm_1AntSetPSTDMA(padapter, true, 0,
- true, 34);
- else
- btdm_1AntSetPSTDMA(padapter, true, 0,
- true, 4);
- } else {
- padapter->pwrctrlpriv.btcoex_rfon = true;
- btdm_1AntSetPSTDMA(padapter, true, 0, true, 5);
- }
- }
-
- btdm_NotifyFwScan(padapter, 1);
- } else {
- /* WiFi_Finish_Scan */
- btdm_NotifyFwScan(padapter, 0);
- btdm_1AntBtCoexistHandler(padapter);
- }
-}
-
-static void BTDM_1AntFwC2hBtInfo8723A(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
- struct bt_coexist_8723a *pBtCoex;
- u8 u1tmp, btState;
-
- pHalData = GET_HAL_DATA(padapter);
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
- pBtCoex = &pHalData->bt_coexist.halCoex8723;
-
- u1tmp = pBtCoex->c2hBtInfoOriginal;
- /* sco BUSY bit is not used on voice over PCM platform */
- btState = u1tmp & 0xF;
- pBtCoex->c2hBtProfile = u1tmp & 0xE0;
-
- /* default set bt to idle state. */
- pBtMgnt->ExtConfig.bBTBusy = false;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_IDLE;
-
- /* check BIT2 first ==> check if bt is under inquiry or page scan */
- if (btState & BIT(2))
- pBtCoex->bC2hBtInquiryPage = true;
- else
- pBtCoex->bC2hBtInquiryPage = false;
- btState &= ~BIT(2);
-
- if (!(btState & BIT(0)))
- pBtCoex->c2hBtInfo = BT_INFO_STATE_NO_CONNECTION;
- else {
- if (btState == 0x1)
- pBtCoex->c2hBtInfo = BT_INFO_STATE_CONNECT_IDLE;
- else if (btState == 0x9) {
- if (pBtCoex->bC2hBtInquiryPage == true)
- pBtCoex->c2hBtInfo =
- BT_INFO_STATE_ACL_INQ_OR_PAG;
- else
- pBtCoex->c2hBtInfo =
- BT_INFO_STATE_ACL_ONLY_BUSY;
- pBtMgnt->ExtConfig.bBTBusy = true;
- } else if (btState == 0x3) {
- pBtCoex->c2hBtInfo = BT_INFO_STATE_SCO_ONLY_BUSY;
- pBtMgnt->ExtConfig.bBTBusy = true;
- } else if (btState == 0xb) {
- pBtCoex->c2hBtInfo = BT_INFO_STATE_ACL_SCO_BUSY;
- pBtMgnt->ExtConfig.bBTBusy = true;
- } else
- pBtCoex->c2hBtInfo = BT_INFO_STATE_MAX;
- if (pBtMgnt->ExtConfig.bBTBusy)
- pHalData->bt_coexist.CurrentState &=
- ~BT_COEX_STATE_BT_IDLE;
- }
-
- if (BT_INFO_STATE_NO_CONNECTION == pBtCoex->c2hBtInfo ||
- BT_INFO_STATE_CONNECT_IDLE == pBtCoex->c2hBtInfo) {
- if (pBtCoex->bC2hBtInquiryPage)
- pBtCoex->c2hBtInfo = BT_INFO_STATE_INQ_OR_PAG;
- }
-
- RTPRINT(FBT, BT_TRACE, ("[BTC2H], %s(%d)\n",
- BtStateString[pBtCoex->c2hBtInfo], pBtCoex->c2hBtInfo));
-
- if (pBtCoex->c2hBtProfile != BT_INFO_HID)
- pBtCoex->c2hBtProfile &= ~BT_INFO_HID;
-}
-
-void BTDM_1AntBtCoexist8723A(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv;
- struct hal_data_8723a *pHalData;
- unsigned long delta_time;
-
- pmlmepriv = &padapter->mlmepriv;
- pHalData = GET_HAL_DATA(padapter);
-
- if (check_fwstate(pmlmepriv, WIFI_SITE_MONITOR)) {
- /* already done in BTDM_1AntForScan() */
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], wifi is under scan progress!!\n"));
- return;
- }
-
- if (check_fwstate(pmlmepriv, WIFI_UNDER_LINKING)) {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], wifi is under link progress!!\n"));
- return;
- }
-
- /* under DHCP(Special packet) */
- delta_time = jiffies - padapter->pwrctrlpriv.DelayLPSLastTimeStamp;
- delta_time = jiffies_to_msecs(delta_time);
- if (delta_time < 500) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], wifi is under DHCP "
- "progress(%li ms)!!\n", delta_time));
- return;
- }
-
- BTDM_CheckWiFiState(padapter);
-
- btdm_1AntBtCoexistHandler(padapter);
-}
-
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc87231Ant.c ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc87232Ant.c ===== */
-
-/* local function start with btdm_ */
-static u8 btdm_ActionAlgorithm(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
- u8 bScoExist = false, bBtLinkExist = false, bBtHsModeExist = false;
- u8 algorithm = BT_2ANT_COEX_ALGO_UNDEFINED;
-
- if (pBtMgnt->ExtConfig.NumberOfHandle)
- bBtLinkExist = true;
- if (pBtMgnt->ExtConfig.NumberOfSCO)
- bScoExist = true;
- if (BT_HsConnectionEstablished(padapter))
- bBtHsModeExist = true;
-
- /* here we get BT status first */
- /* 1) initialize */
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_IDLE;
-
- if ((bScoExist) || (bBtHsModeExist) ||
- (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID))) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO or HID or HS exists, set BT non-idle !!!\n"));
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_NON_IDLE;
- } else {
- /* A2dp profile */
- if ((pBtMgnt->ExtConfig.NumberOfHandle == 1) &&
- (BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP))) {
- if (BTDM_BtTxRxCounterL(padapter) < 100) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP, low priority tx+rx < 100, set BT connected-idle!!!\n"));
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP, low priority tx+rx >= 100, set BT non-idle!!!\n"));
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_NON_IDLE;
- }
- }
- /* Pan profile */
- if ((pBtMgnt->ExtConfig.NumberOfHandle == 1) &&
- (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN))) {
- if (BTDM_BtTxRxCounterL(padapter) < 600) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN, low priority tx+rx < 600, set BT connected-idle!!!\n"));
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
- } else {
- if (pHalData->bt_coexist.halCoex8723.lowPriorityTx) {
- if ((pHalData->bt_coexist.halCoex8723.lowPriorityRx /
- pHalData->bt_coexist.halCoex8723.lowPriorityTx) > 9) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN, low priority rx/tx > 9, set BT connected-idle!!!\n"));
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
- }
- }
- }
- if (BT_2ANT_BT_STATUS_CONNECTED_IDLE != pBtdm8723->btStatus) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN, set BT non-idle!!!\n"));
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_NON_IDLE;
- }
- }
- /* Pan+A2dp profile */
- if ((pBtMgnt->ExtConfig.NumberOfHandle == 2) &&
- (BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) &&
- (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN))) {
- if (BTDM_BtTxRxCounterL(padapter) < 600) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN+A2DP, low priority tx+rx < 600, set BT connected-idle!!!\n"));
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
- } else {
- if (pHalData->bt_coexist.halCoex8723.lowPriorityTx) {
- if ((pHalData->bt_coexist.halCoex8723.lowPriorityRx /
- pHalData->bt_coexist.halCoex8723.lowPriorityTx) > 9) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN+A2DP, low priority rx/tx > 9, set BT connected-idle!!!\n"));
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
- }
- }
- }
- if (BT_2ANT_BT_STATUS_CONNECTED_IDLE != pBtdm8723->btStatus) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN+A2DP, set BT non-idle!!!\n"));
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_NON_IDLE;
- }
- }
- }
- if (BT_2ANT_BT_STATUS_IDLE != pBtdm8723->btStatus)
- pBtMgnt->ExtConfig.bBTBusy = true;
- else
- pBtMgnt->ExtConfig.bBTBusy = false;
-
- if (!bBtLinkExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], No profile exists!!!\n"));
- return algorithm;
- }
-
- if (pBtMgnt->ExtConfig.NumberOfHandle == 1) {
- if (bScoExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO only\n"));
- algorithm = BT_2ANT_COEX_ALGO_SCO;
- } else {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID)) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID only\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID;
- } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP only\n"));
- algorithm = BT_2ANT_COEX_ALGO_A2DP;
- } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) {
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN(HS) only\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANHS;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN(EDR) only\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANEDR;
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! NO matched profile for NumberOfHandle =%d \n",
- pBtMgnt->ExtConfig.NumberOfHandle));
- }
- }
- } else if (pBtMgnt->ExtConfig.NumberOfHandle == 2) {
- if (bScoExist) {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID)) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + HID\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID;
- } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + A2DP\n"));
- } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) {
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_SCO;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO exists but why NO matched ACL profile for NumberOfHandle =%d\n",
- pBtMgnt->ExtConfig.NumberOfHandle));
- }
- } else {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
- } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) {
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
- }
- } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_A2DP;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANEDR_A2DP;
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! NO matched profile for NumberOfHandle =%d\n",
- pBtMgnt->ExtConfig.NumberOfHandle));
- }
- }
- } else if (pBtMgnt->ExtConfig.NumberOfHandle == 3) {
- if (bScoExist) {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP\n"));
- } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) {
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
- }
- } else if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + A2DP + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_SCO;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + A2DP + PAN(EDR)\n"));
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO exists but why NO matched profile for NumberOfHandle =%d\n",
- pBtMgnt->ExtConfig.NumberOfHandle));
- }
- } else {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP_PANHS;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! NO matched profile for NumberOfHandle =%d\n",
- pBtMgnt->ExtConfig.NumberOfHandle));
- }
- }
- } else if (pBtMgnt->ExtConfig.NumberOfHandle >= 3) {
- if (bScoExist) {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
- if (bBtHsModeExist)
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
- else
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(EDR)\n"));
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO exists but why NO matched profile for NumberOfHandle =%d\n",
- pBtMgnt->ExtConfig.NumberOfHandle));
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! NO matched profile for NumberOfHandle =%d\n",
- pBtMgnt->ExtConfig.NumberOfHandle));
- }
- }
- return algorithm;
-}
-
-static u8 btdm_NeedToDecBtPwr(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 bRet = false;
-
- if (BT_Operation(padapter)) {
- if (pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB > 47) {
- RTPRINT(FBT, BT_TRACE, ("Need to decrease bt power for HS mode!!\n"));
- bRet = true;
- } else {
- RTPRINT(FBT, BT_TRACE, ("NO Need to decrease bt power for HS mode!!\n"));
- }
- } else {
- if (BTDM_IsWifiConnectionExist(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("Need to decrease bt power for Wifi is connected!!\n"));
- bRet = true;
- }
- }
- return bRet;
-}
-
-static void
-btdm_SetCoexTable(struct rtw_adapter *padapter, u32 val0x6c0,
- u32 val0x6c8, u8 val0x6cc)
-{
- RTPRINT(FBT, BT_TRACE, ("set coex table, set 0x6c0 = 0x%x\n", val0x6c0));
- rtl8723au_write32(padapter, 0x6c0, val0x6c0);
-
- RTPRINT(FBT, BT_TRACE, ("set coex table, set 0x6c8 = 0x%x\n", val0x6c8));
- rtl8723au_write32(padapter, 0x6c8, val0x6c8);
-
- RTPRINT(FBT, BT_TRACE, ("set coex table, set 0x6cc = 0x%x\n", val0x6cc));
- rtl8723au_write8(padapter, 0x6cc, val0x6cc);
-}
-
-static void
-btdm_SetSwFullTimeDacSwing(struct rtw_adapter *padapter, u8 bSwDacSwingOn,
- u32 swDacSwingLvl)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (bSwDacSwingOn) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SwDacSwing = 0x%x\n", swDacSwingLvl));
- PHY_SetBBReg(padapter, 0x880, 0xff000000, swDacSwingLvl);
- pHalData->bt_coexist.bSWCoexistAllOff = false;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SwDacSwing Off!\n"));
- PHY_SetBBReg(padapter, 0x880, 0xff000000, 0xc0);
- }
-}
-
-static void
-btdm_SetFwDacSwingLevel(struct rtw_adapter *padapter, u8 dacSwingLvl)
-{
- u8 H2C_Parameter[1] = {0};
-
- H2C_Parameter[0] = dacSwingLvl;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Set Dac Swing Level = 0x%x\n", dacSwingLvl));
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], write 0x29 = 0x%x\n", H2C_Parameter[0]));
-
- FillH2CCmd(padapter, 0x29, 1, H2C_Parameter);
-}
-
-static void btdm_2AntDecBtPwr(struct rtw_adapter *padapter, u8 bDecBtPwr)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
-
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], Dec BT power = %s\n",
- ((bDecBtPwr) ? "ON" : "OFF")));
- pBtdm8723->bCurDecBtPwr = bDecBtPwr;
-
- if (pBtdm8723->bPreDecBtPwr == pBtdm8723->bCurDecBtPwr)
- return;
-
- BTDM_SetFwDecBtPwr(padapter, pBtdm8723->bCurDecBtPwr);
-
- pBtdm8723->bPreDecBtPwr = pBtdm8723->bCurDecBtPwr;
-}
-
-static void
-btdm_2AntFwDacSwingLvl(struct rtw_adapter *padapter, u8 fwDacSwingLvl)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], set FW Dac Swing level = %d\n", fwDacSwingLvl));
- pBtdm8723->curFwDacSwingLvl = fwDacSwingLvl;
-
- /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], preFwDacSwingLvl =%d, curFwDacSwingLvl =%d\n", */
- /*pBtdm8723->preFwDacSwingLvl, pBtdm8723->curFwDacSwingLvl)); */
-
- if (pBtdm8723->preFwDacSwingLvl == pBtdm8723->curFwDacSwingLvl)
- return;
-
- btdm_SetFwDacSwingLevel(padapter, pBtdm8723->curFwDacSwingLvl);
-
- pBtdm8723->preFwDacSwingLvl = pBtdm8723->curFwDacSwingLvl;
-}
-
-static void
-btdm_2AntRfShrink(struct rtw_adapter *padapter, u8 bRxRfShrinkOn)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
-
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], turn Rx RF Shrink = %s\n",
- ((bRxRfShrinkOn) ? "ON" : "OFF")));
- pBtdm8723->bCurRfRxLpfShrink = bRxRfShrinkOn;
-
- /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], bPreRfRxLpfShrink =%d, bCurRfRxLpfShrink =%d\n", */
- /*pBtdm8723->bPreRfRxLpfShrink, pBtdm8723->bCurRfRxLpfShrink)); */
-
- if (pBtdm8723->bPreRfRxLpfShrink == pBtdm8723->bCurRfRxLpfShrink)
- return;
-
- BTDM_SetSwRfRxLpfCorner(padapter, (u8)pBtdm8723->bCurRfRxLpfShrink);
-
- pBtdm8723->bPreRfRxLpfShrink = pBtdm8723->bCurRfRxLpfShrink;
-}
-
-static void
-btdm_2AntLowPenaltyRa(struct rtw_adapter *padapter, u8 bLowPenaltyRa)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
-
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], turn LowPenaltyRA = %s\n",
- ((bLowPenaltyRa) ? "ON" : "OFF")));
- pBtdm8723->bCurLowPenaltyRa = bLowPenaltyRa;
-
- /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], bPreLowPenaltyRa =%d, bCurLowPenaltyRa =%d\n", */
- /*pBtdm8723->bPreLowPenaltyRa, pBtdm8723->bCurLowPenaltyRa)); */
-
- if (pBtdm8723->bPreLowPenaltyRa == pBtdm8723->bCurLowPenaltyRa)
- return;
-
- BTDM_SetSwPenaltyTxRateAdaptive(padapter, (u8)pBtdm8723->bCurLowPenaltyRa);
-
- pBtdm8723->bPreLowPenaltyRa = pBtdm8723->bCurLowPenaltyRa;
-}
-
-static void
-btdm_2AntDacSwing(struct rtw_adapter *padapter,
- u8 bDacSwingOn, u32 dacSwingLvl)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
-
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], turn DacSwing =%s, dacSwingLvl = 0x%x\n",
- (bDacSwingOn ? "ON" : "OFF"), dacSwingLvl));
- pBtdm8723->bCurDacSwingOn = bDacSwingOn;
- pBtdm8723->curDacSwingLvl = dacSwingLvl;
-
- if ((pBtdm8723->bPreDacSwingOn == pBtdm8723->bCurDacSwingOn) &&
- (pBtdm8723->preDacSwingLvl == pBtdm8723->curDacSwingLvl))
- return;
-
- mdelay(30);
- btdm_SetSwFullTimeDacSwing(padapter, bDacSwingOn, dacSwingLvl);
-
- pBtdm8723->bPreDacSwingOn = pBtdm8723->bCurDacSwingOn;
- pBtdm8723->preDacSwingLvl = pBtdm8723->curDacSwingLvl;
-}
-
-static void btdm_2AntAdcBackOff(struct rtw_adapter *padapter, u8 bAdcBackOff)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
-
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], turn AdcBackOff = %s\n",
- ((bAdcBackOff) ? "ON" : "OFF")));
- pBtdm8723->bCurAdcBackOff = bAdcBackOff;
-
- if (pBtdm8723->bPreAdcBackOff == pBtdm8723->bCurAdcBackOff)
- return;
-
- BTDM_BBBackOffLevel(padapter, (u8)pBtdm8723->bCurAdcBackOff);
-
- pBtdm8723->bPreAdcBackOff = pBtdm8723->bCurAdcBackOff;
-}
-
-static void btdm_2AntAgcTable(struct rtw_adapter *padapter, u8 bAgcTableEn)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
-
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], %s Agc Table\n", ((bAgcTableEn) ? "Enable" : "Disable")));
- pBtdm8723->bCurAgcTableEn = bAgcTableEn;
-
- /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], bPreAgcTableEn =%d, bCurAgcTableEn =%d\n", */
- /*pBtdm8723->bPreAgcTableEn, pBtdm8723->bCurAgcTableEn)); */
-
- if (pBtdm8723->bPreAgcTableEn == pBtdm8723->bCurAgcTableEn)
- return;
-
- BTDM_AGCTable(padapter, (u8)bAgcTableEn);
-
- pBtdm8723->bPreAgcTableEn = pBtdm8723->bCurAgcTableEn;
-}
-
-static void
-btdm_2AntCoexTable(struct rtw_adapter *padapter,
- u32 val0x6c0, u32 val0x6c8, u8 val0x6cc)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], write Coex Table 0x6c0 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- val0x6c0, val0x6c8, val0x6cc));
- pBtdm8723->curVal0x6c0 = val0x6c0;
- pBtdm8723->curVal0x6c8 = val0x6c8;
- pBtdm8723->curVal0x6cc = val0x6cc;
-
- /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n", */
- /*pBtdm8723->preVal0x6c0, pBtdm8723->preVal0x6c8, pBtdm8723->preVal0x6cc)); */
- /* RTPRINT(FBT, BT_TRACE, ("[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n", */
- /*pBtdm8723->curVal0x6c0, pBtdm8723->curVal0x6c8, pBtdm8723->curVal0x6cc)); */
-
- if ((pBtdm8723->preVal0x6c0 == pBtdm8723->curVal0x6c0) &&
- (pBtdm8723->preVal0x6c8 == pBtdm8723->curVal0x6c8) &&
- (pBtdm8723->preVal0x6cc == pBtdm8723->curVal0x6cc))
- return;
-
- btdm_SetCoexTable(padapter, val0x6c0, val0x6c8, val0x6cc);
-
- pBtdm8723->preVal0x6c0 = pBtdm8723->curVal0x6c0;
- pBtdm8723->preVal0x6c8 = pBtdm8723->curVal0x6c8;
- pBtdm8723->preVal0x6cc = pBtdm8723->curVal0x6cc;
-}
-
-static void btdm_2AntIgnoreWlanAct(struct rtw_adapter *padapter, u8 bEnable)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
-
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], turn Ignore WlanAct %s\n", (bEnable ? "ON" : "OFF")));
- pBtdm8723->bCurIgnoreWlanAct = bEnable;
-
-
- if (pBtdm8723->bPreIgnoreWlanAct == pBtdm8723->bCurIgnoreWlanAct)
- return;
-
- btdm_SetFwIgnoreWlanAct(padapter, bEnable);
- pBtdm8723->bPreIgnoreWlanAct = pBtdm8723->bCurIgnoreWlanAct;
-}
-
-static void
-btdm_2AntSetFw3a(struct rtw_adapter *padapter, u8 byte1, u8 byte2,
- u8 byte3, u8 byte4, u8 byte5)
-{
- u8 H2C_Parameter[5] = {0};
-
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- /* byte1[1:0] != 0 means enable pstdma */
- /* for 2Ant bt coexist, if byte1 != 0 means enable pstdma */
- if (byte1)
- pHalData->bt_coexist.bFWCoexistAllOff = false;
- H2C_Parameter[0] = byte1;
- H2C_Parameter[1] = byte2;
- H2C_Parameter[2] = byte3;
- H2C_Parameter[3] = byte4;
- H2C_Parameter[4] = byte5;
-
- pHalData->bt_coexist.fw3aVal[0] = byte1;
- pHalData->bt_coexist.fw3aVal[1] = byte2;
- pHalData->bt_coexist.fw3aVal[2] = byte3;
- pHalData->bt_coexist.fw3aVal[3] = byte4;
- pHalData->bt_coexist.fw3aVal[4] = byte5;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], FW write 0x3a(5bytes) = 0x%x%08x\n",
- H2C_Parameter[0],
- H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
-
- FillH2CCmd(padapter, 0x3a, 5, H2C_Parameter);
- }
-
-static void btdm_2AntPsTdma(struct rtw_adapter *padapter, u8 bTurnOn, u8 type)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
- u32 btTxRxCnt = 0;
- u8 bTurnOnByCnt = false;
- u8 psTdmaTypeByCnt = 0;
-
- btTxRxCnt = BTDM_BtTxRxCounterH(padapter)+BTDM_BtTxRxCounterL(padapter);
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT TxRx Counters = %d\n", btTxRxCnt));
- if (btTxRxCnt > 3000) {
- bTurnOnByCnt = true;
- psTdmaTypeByCnt = 8;
-
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], For BTTxRxCounters, turn %s PS TDMA, type =%d\n",
- (bTurnOnByCnt ? "ON" : "OFF"), psTdmaTypeByCnt));
- pBtdm8723->bCurPsTdmaOn = bTurnOnByCnt;
- pBtdm8723->curPsTdma = psTdmaTypeByCnt;
- } else {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], turn %s PS TDMA, type =%d\n",
- (bTurnOn ? "ON" : "OFF"), type));
- pBtdm8723->bCurPsTdmaOn = bTurnOn;
- pBtdm8723->curPsTdma = type;
- }
-
- if ((pBtdm8723->bPrePsTdmaOn == pBtdm8723->bCurPsTdmaOn) &&
- (pBtdm8723->prePsTdma == pBtdm8723->curPsTdma))
- return;
-
- if (bTurnOn) {
- switch (type) {
- case 1:
- default:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x1a, 0x1a, 0xa1, 0x98);
- break;
- case 2:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x12, 0x12, 0xa1, 0x98);
- break;
- case 3:
- btdm_2AntSetFw3a(padapter, 0xe3, 0xa, 0xa, 0xa1, 0x98);
- break;
- case 4:
- btdm_2AntSetFw3a(padapter, 0xa3, 0x5, 0x5, 0xa1, 0x80);
- break;
- case 5:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x1a, 0x1a, 0x20, 0x98);
- break;
- case 6:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x12, 0x12, 0x20, 0x98);
- break;
- case 7:
- btdm_2AntSetFw3a(padapter, 0xe3, 0xa, 0xa, 0x20, 0x98);
- break;
- case 8:
- btdm_2AntSetFw3a(padapter, 0xa3, 0x5, 0x5, 0x20, 0x80);
- break;
- case 9:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x1a, 0x1a, 0xa1, 0x98);
- break;
- case 10:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x12, 0x12, 0xa1, 0x98);
- break;
- case 11:
- btdm_2AntSetFw3a(padapter, 0xe3, 0xa, 0xa, 0xa1, 0x98);
- break;
- case 12:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x5, 0x5, 0xa1, 0x98);
- break;
- case 13:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x1a, 0x1a, 0x20, 0x98);
- break;
- case 14:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x12, 0x12, 0x20, 0x98);
- break;
- case 15:
- btdm_2AntSetFw3a(padapter, 0xe3, 0xa, 0xa, 0x20, 0x98);
- break;
- case 16:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x5, 0x5, 0x20, 0x98);
- break;
- case 17:
- btdm_2AntSetFw3a(padapter, 0xa3, 0x2f, 0x2f, 0x20, 0x80);
- break;
- case 18:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x5, 0x5, 0xa1, 0x98);
- break;
- case 19:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x25, 0x25, 0xa1, 0x98);
- break;
- case 20:
- btdm_2AntSetFw3a(padapter, 0xe3, 0x25, 0x25, 0x20, 0x98);
- break;
- }
- } else {
- /* disable PS tdma */
- switch (type) {
- case 0:
- btdm_2AntSetFw3a(padapter, 0x0, 0x0, 0x0, 0x8, 0x0);
- break;
- case 1:
- btdm_2AntSetFw3a(padapter, 0x0, 0x0, 0x0, 0x0, 0x0);
- break;
- default:
- btdm_2AntSetFw3a(padapter, 0x0, 0x0, 0x0, 0x8, 0x0);
- break;
- }
- }
-
- /* update pre state */
- pBtdm8723->bPrePsTdmaOn = pBtdm8723->bCurPsTdmaOn;
- pBtdm8723->prePsTdma = pBtdm8723->curPsTdma;
-}
-
-static void btdm_2AntBtInquiryPage(struct rtw_adapter *padapter)
-{
- btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
- btdm_2AntIgnoreWlanAct(padapter, false);
- btdm_2AntPsTdma(padapter, true, 8);
-}
-
-static u8 btdm_HoldForBtInqPage(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u32 curTime = jiffies;
-
- if (pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage) {
- /* bt inquiry or page is started. */
- if (pHalData->bt_coexist.halCoex8723.btInqPageStartTime == 0) {
- pHalData->bt_coexist.halCoex8723.btInqPageStartTime = curTime;
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT Inquiry/page is started at time : 0x%lx \n",
- pHalData->bt_coexist.halCoex8723.btInqPageStartTime));
- }
- }
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT Inquiry/page started time : 0x%lx, curTime : 0x%x \n",
- pHalData->bt_coexist.halCoex8723.btInqPageStartTime, curTime));
-
- if (pHalData->bt_coexist.halCoex8723.btInqPageStartTime) {
- if (((curTime - pHalData->bt_coexist.halCoex8723.btInqPageStartTime)/1000000) >= 10) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], BT Inquiry/page >= 10sec!!!"));
- pHalData->bt_coexist.halCoex8723.btInqPageStartTime = 0;
- }
- }
-
- if (pHalData->bt_coexist.halCoex8723.btInqPageStartTime) {
- btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
- btdm_2AntIgnoreWlanAct(padapter, false);
- btdm_2AntPsTdma(padapter, true, 8);
- return true;
- } else {
- return false;
- }
-}
-
-static u8 btdm_Is2Ant8723ACommonAction(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
- u8 bCommon = false;
-
- RTPRINT(FBT, BT_TRACE, ("%s :BTDM_IsWifiConnectionExist =%x check_fwstate =%x pmlmepriv->fw_state = 0x%x\n", __func__, BTDM_IsWifiConnectionExist(padapter), check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)), padapter->mlmepriv.fw_state));
-
- if ((!BTDM_IsWifiConnectionExist(padapter)) &&
- (!check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING))) &&
- (BT_2ANT_BT_STATUS_IDLE == pBtdm8723->btStatus)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi idle + Bt idle!!\n"));
-
- btdm_2AntLowPenaltyRa(padapter, false);
- btdm_2AntRfShrink(padapter, false);
- btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
-
- btdm_2AntIgnoreWlanAct(padapter, false);
- btdm_2AntPsTdma(padapter, false, 0);
- btdm_2AntFwDacSwingLvl(padapter, 0x20);
- btdm_2AntDecBtPwr(padapter, false);
-
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
-
- bCommon = true;
- } else if (((BTDM_IsWifiConnectionExist(padapter)) ||
- (check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)))) &&
- (BT_2ANT_BT_STATUS_IDLE == pBtdm8723->btStatus)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi non-idle + BT idle!!\n"));
-
- btdm_2AntLowPenaltyRa(padapter, true);
- btdm_2AntRfShrink(padapter, false);
- btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
-
- btdm_2AntIgnoreWlanAct(padapter, false);
- btdm_2AntPsTdma(padapter, false, 0);
- btdm_2AntFwDacSwingLvl(padapter, 0x20);
- btdm_2AntDecBtPwr(padapter, true);
-
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
-
- bCommon = true;
- } else if ((!BTDM_IsWifiConnectionExist(padapter)) &&
- (!check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING))) &&
- (BT_2ANT_BT_STATUS_CONNECTED_IDLE == pBtdm8723->btStatus)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi idle + Bt connected idle!!\n"));
-
- btdm_2AntLowPenaltyRa(padapter, true);
- btdm_2AntRfShrink(padapter, true);
- btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
-
- btdm_2AntIgnoreWlanAct(padapter, false);
- btdm_2AntPsTdma(padapter, false, 0);
- btdm_2AntFwDacSwingLvl(padapter, 0x20);
- btdm_2AntDecBtPwr(padapter, false);
-
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
-
- bCommon = true;
- } else if (((BTDM_IsWifiConnectionExist(padapter)) ||
- (check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)))) &&
- (BT_2ANT_BT_STATUS_CONNECTED_IDLE == pBtdm8723->btStatus)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi non-idle + Bt connected idle!!\n"));
-
- btdm_2AntLowPenaltyRa(padapter, true);
- btdm_2AntRfShrink(padapter, true);
- btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
-
- btdm_2AntIgnoreWlanAct(padapter, false);
- btdm_2AntPsTdma(padapter, false, 0);
- btdm_2AntFwDacSwingLvl(padapter, 0x20);
- btdm_2AntDecBtPwr(padapter, true);
-
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
-
- bCommon = true;
- } else if ((!BTDM_IsWifiConnectionExist(padapter)) &&
- (!check_fwstate(&padapter->mlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING))) &&
- (BT_2ANT_BT_STATUS_NON_IDLE == pBtdm8723->btStatus)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi idle + BT non-idle!!\n"));
-
- btdm_2AntLowPenaltyRa(padapter, true);
- btdm_2AntRfShrink(padapter, true);
- btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
-
- btdm_2AntIgnoreWlanAct(padapter, false);
- btdm_2AntPsTdma(padapter, false, 0);
- btdm_2AntFwDacSwingLvl(padapter, 0x20);
- btdm_2AntDecBtPwr(padapter, false);
-
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
-
- bCommon = true;
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi non-idle + BT non-idle!!\n"));
- btdm_2AntLowPenaltyRa(padapter, true);
- btdm_2AntRfShrink(padapter, true);
- btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
- btdm_2AntIgnoreWlanAct(padapter, false);
- btdm_2AntFwDacSwingLvl(padapter, 0x20);
-
- bCommon = false;
- }
- return bCommon;
-}
-
-static void
-btdm_2AntTdmaDurationAdjust(struct rtw_adapter *padapter, u8 bScoHid,
- u8 bTxPause, u8 maxInterval)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
- static s32 up, dn, m, n, WaitCount;
- s32 result; /* 0: no change, +1: increase WiFi duration, -1: decrease WiFi duration */
- u8 retryCount = 0;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], TdmaDurationAdjust()\n"));
-
- if (pBtdm8723->bResetTdmaAdjust) {
- pBtdm8723->bResetTdmaAdjust = false;
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
- if (bScoHid) {
- if (bTxPause) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- }
- } else {
- if (bTxPause) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- }
- }
- up = 0;
- dn = 0;
- m = 1;
- n = 3;
- result = 0;
- WaitCount = 0;
- } else {
- /* accquire the BT TRx retry count from BT_Info byte2 */
- retryCount = pHalData->bt_coexist.halCoex8723.btRetryCnt;
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], retryCount = %d\n", retryCount));
- result = 0;
- WaitCount++;
-
- if (retryCount == 0) { /* no retry in the last 2-second duration */
- up++;
- dn--;
-
- if (dn <= 0)
- dn = 0;
-
- if (up >= n) { /* if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration */
- WaitCount = 0;
- n = 3;
- up = 0;
- dn = 0;
- result = 1;
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Increase wifi duration!!\n"));
- }
- } else if (retryCount <= 3) { /* <= 3 retry in the last 2-second duration */
- up--;
- dn++;
-
- if (up <= 0)
- up = 0;
-
- if (dn == 2) { /* if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration */
- if (WaitCount <= 2)
- m++; /* ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^ */
- else
- m = 1;
-
- if (m >= 20) /* m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration. */
- m = 20;
-
- n = 3*m;
- up = 0;
- dn = 0;
- WaitCount = 0;
- result = -1;
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
- }
- } else { /* retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration */
- if (WaitCount == 1)
- m++; /* ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^ */
- else
- m = 1;
-
- if (m >= 20) /* m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration. */
- m = 20;
- n = 3*m;
- up = 0;
- dn = 0;
- WaitCount = 0;
- result = -1;
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
- }
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], max Interval = %d\n", maxInterval));
- if (maxInterval == 1) {
- if (bTxPause) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 1\n"));
- if (pBtdm8723->curPsTdma == 1) {
- btdm_2AntPsTdma(padapter, true, 5);
- pBtdm8723->psTdmaDuAdjType = 5;
- } else if (pBtdm8723->curPsTdma == 2) {
- btdm_2AntPsTdma(padapter, true, 6);
- pBtdm8723->psTdmaDuAdjType = 6;
- } else if (pBtdm8723->curPsTdma == 3) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 4) {
- btdm_2AntPsTdma(padapter, true, 8);
- pBtdm8723->psTdmaDuAdjType = 8;
- }
- if (pBtdm8723->curPsTdma == 9) {
- btdm_2AntPsTdma(padapter, true, 13);
- pBtdm8723->psTdmaDuAdjType = 13;
- } else if (pBtdm8723->curPsTdma == 10) {
- btdm_2AntPsTdma(padapter, true, 14);
- pBtdm8723->psTdmaDuAdjType = 14;
- } else if (pBtdm8723->curPsTdma == 11) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 12) {
- btdm_2AntPsTdma(padapter, true, 16);
- pBtdm8723->psTdmaDuAdjType = 16;
- }
-
- if (result == -1) {
- if (pBtdm8723->curPsTdma == 5) {
- btdm_2AntPsTdma(padapter, true, 6);
- pBtdm8723->psTdmaDuAdjType = 6;
- } else if (pBtdm8723->curPsTdma == 6) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 7) {
- btdm_2AntPsTdma(padapter, true, 8);
- pBtdm8723->psTdmaDuAdjType = 8;
- } else if (pBtdm8723->curPsTdma == 13) {
- btdm_2AntPsTdma(padapter, true, 14);
- pBtdm8723->psTdmaDuAdjType = 14;
- } else if (pBtdm8723->curPsTdma == 14) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 15) {
- btdm_2AntPsTdma(padapter, true, 16);
- pBtdm8723->psTdmaDuAdjType = 16;
- }
- } else if (result == 1) {
- if (pBtdm8723->curPsTdma == 8) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 7) {
- btdm_2AntPsTdma(padapter, true, 6);
- pBtdm8723->psTdmaDuAdjType = 6;
- } else if (pBtdm8723->curPsTdma == 6) {
- btdm_2AntPsTdma(padapter, true, 5);
- pBtdm8723->psTdmaDuAdjType = 5;
- } else if (pBtdm8723->curPsTdma == 16) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 15) {
- btdm_2AntPsTdma(padapter, true, 14);
- pBtdm8723->psTdmaDuAdjType = 14;
- } else if (pBtdm8723->curPsTdma == 14) {
- btdm_2AntPsTdma(padapter, true, 13);
- pBtdm8723->psTdmaDuAdjType = 13;
- }
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 0\n"));
- if (pBtdm8723->curPsTdma == 5) {
- btdm_2AntPsTdma(padapter, true, 1);
- pBtdm8723->psTdmaDuAdjType = 1;
- } else if (pBtdm8723->curPsTdma == 6) {
- btdm_2AntPsTdma(padapter, true, 2);
- pBtdm8723->psTdmaDuAdjType = 2;
- } else if (pBtdm8723->curPsTdma == 7) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 8) {
- btdm_2AntPsTdma(padapter, true, 4);
- pBtdm8723->psTdmaDuAdjType = 4;
- }
- if (pBtdm8723->curPsTdma == 13) {
- btdm_2AntPsTdma(padapter, true, 9);
- pBtdm8723->psTdmaDuAdjType = 9;
- } else if (pBtdm8723->curPsTdma == 14) {
- btdm_2AntPsTdma(padapter, true, 10);
- pBtdm8723->psTdmaDuAdjType = 10;
- } else if (pBtdm8723->curPsTdma == 15) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 16) {
- btdm_2AntPsTdma(padapter, true, 12);
- pBtdm8723->psTdmaDuAdjType = 12;
- }
-
- if (result == -1) {
- if (pBtdm8723->curPsTdma == 1) {
- btdm_2AntPsTdma(padapter, true, 2);
- pBtdm8723->psTdmaDuAdjType = 2;
- } else if (pBtdm8723->curPsTdma == 2) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 3) {
- btdm_2AntPsTdma(padapter, true, 4);
- pBtdm8723->psTdmaDuAdjType = 4;
- } else if (pBtdm8723->curPsTdma == 9) {
- btdm_2AntPsTdma(padapter, true, 10);
- pBtdm8723->psTdmaDuAdjType = 10;
- } else if (pBtdm8723->curPsTdma == 10) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 11) {
- btdm_2AntPsTdma(padapter, true, 12);
- pBtdm8723->psTdmaDuAdjType = 12;
- }
- } else if (result == 1) {
- if (pBtdm8723->curPsTdma == 4) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 3) {
- btdm_2AntPsTdma(padapter, true, 2);
- pBtdm8723->psTdmaDuAdjType = 2;
- } else if (pBtdm8723->curPsTdma == 2) {
- btdm_2AntPsTdma(padapter, true, 1);
- pBtdm8723->psTdmaDuAdjType = 1;
- } else if (pBtdm8723->curPsTdma == 12) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 11) {
- btdm_2AntPsTdma(padapter, true, 10);
- pBtdm8723->psTdmaDuAdjType = 10;
- } else if (pBtdm8723->curPsTdma == 10) {
- btdm_2AntPsTdma(padapter, true, 9);
- pBtdm8723->psTdmaDuAdjType = 9;
- }
- }
- }
- } else if (maxInterval == 2) {
- if (bTxPause) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 1\n"));
- if (pBtdm8723->curPsTdma == 1) {
- btdm_2AntPsTdma(padapter, true, 6);
- pBtdm8723->psTdmaDuAdjType = 6;
- } else if (pBtdm8723->curPsTdma == 2) {
- btdm_2AntPsTdma(padapter, true, 6);
- pBtdm8723->psTdmaDuAdjType = 6;
- } else if (pBtdm8723->curPsTdma == 3) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 4) {
- btdm_2AntPsTdma(padapter, true, 8);
- pBtdm8723->psTdmaDuAdjType = 8;
- }
- if (pBtdm8723->curPsTdma == 9) {
- btdm_2AntPsTdma(padapter, true, 14);
- pBtdm8723->psTdmaDuAdjType = 14;
- } else if (pBtdm8723->curPsTdma == 10) {
- btdm_2AntPsTdma(padapter, true, 14);
- pBtdm8723->psTdmaDuAdjType = 14;
- } else if (pBtdm8723->curPsTdma == 11) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 12) {
- btdm_2AntPsTdma(padapter, true, 16);
- pBtdm8723->psTdmaDuAdjType = 16;
- }
- if (result == -1) {
- if (pBtdm8723->curPsTdma == 5) {
- btdm_2AntPsTdma(padapter, true, 6);
- pBtdm8723->psTdmaDuAdjType = 6;
- } else if (pBtdm8723->curPsTdma == 6) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 7) {
- btdm_2AntPsTdma(padapter, true, 8);
- pBtdm8723->psTdmaDuAdjType = 8;
- } else if (pBtdm8723->curPsTdma == 13) {
- btdm_2AntPsTdma(padapter, true, 14);
- pBtdm8723->psTdmaDuAdjType = 14;
- } else if (pBtdm8723->curPsTdma == 14) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 15) {
- btdm_2AntPsTdma(padapter, true, 16);
- pBtdm8723->psTdmaDuAdjType = 16;
- }
- } else if (result == 1) {
- if (pBtdm8723->curPsTdma == 8) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 7) {
- btdm_2AntPsTdma(padapter, true, 6);
- pBtdm8723->psTdmaDuAdjType = 6;
- } else if (pBtdm8723->curPsTdma == 6) {
- btdm_2AntPsTdma(padapter, true, 6);
- pBtdm8723->psTdmaDuAdjType = 6;
- } else if (pBtdm8723->curPsTdma == 16) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 15) {
- btdm_2AntPsTdma(padapter, true, 14);
- pBtdm8723->psTdmaDuAdjType = 14;
- } else if (pBtdm8723->curPsTdma == 14) {
- btdm_2AntPsTdma(padapter, true, 14);
- pBtdm8723->psTdmaDuAdjType = 14;
- }
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 0\n"));
- if (pBtdm8723->curPsTdma == 5) {
- btdm_2AntPsTdma(padapter, true, 2);
- pBtdm8723->psTdmaDuAdjType = 2;
- } else if (pBtdm8723->curPsTdma == 6) {
- btdm_2AntPsTdma(padapter, true, 2);
- pBtdm8723->psTdmaDuAdjType = 2;
- } else if (pBtdm8723->curPsTdma == 7) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 8) {
- btdm_2AntPsTdma(padapter, true, 4);
- pBtdm8723->psTdmaDuAdjType = 4;
- }
- if (pBtdm8723->curPsTdma == 13) {
- btdm_2AntPsTdma(padapter, true, 10);
- pBtdm8723->psTdmaDuAdjType = 10;
- } else if (pBtdm8723->curPsTdma == 14) {
- btdm_2AntPsTdma(padapter, true, 10);
- pBtdm8723->psTdmaDuAdjType = 10;
- } else if (pBtdm8723->curPsTdma == 15) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 16) {
- btdm_2AntPsTdma(padapter, true, 12);
- pBtdm8723->psTdmaDuAdjType = 12;
- }
- if (result == -1) {
- if (pBtdm8723->curPsTdma == 1) {
- btdm_2AntPsTdma(padapter, true, 2);
- pBtdm8723->psTdmaDuAdjType = 2;
- } else if (pBtdm8723->curPsTdma == 2) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 3) {
- btdm_2AntPsTdma(padapter, true, 4);
- pBtdm8723->psTdmaDuAdjType = 4;
- } else if (pBtdm8723->curPsTdma == 9) {
- btdm_2AntPsTdma(padapter, true, 10);
- pBtdm8723->psTdmaDuAdjType = 10;
- } else if (pBtdm8723->curPsTdma == 10) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 11) {
- btdm_2AntPsTdma(padapter, true, 12);
- pBtdm8723->psTdmaDuAdjType = 12;
- }
- } else if (result == 1) {
- if (pBtdm8723->curPsTdma == 4) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 3) {
- btdm_2AntPsTdma(padapter, true, 2);
- pBtdm8723->psTdmaDuAdjType = 2;
- } else if (pBtdm8723->curPsTdma == 2) {
- btdm_2AntPsTdma(padapter, true, 2);
- pBtdm8723->psTdmaDuAdjType = 2;
- } else if (pBtdm8723->curPsTdma == 12) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 11) {
- btdm_2AntPsTdma(padapter, true, 10);
- pBtdm8723->psTdmaDuAdjType = 10;
- } else if (pBtdm8723->curPsTdma == 10) {
- btdm_2AntPsTdma(padapter, true, 10);
- pBtdm8723->psTdmaDuAdjType = 10;
- }
- }
- }
- } else if (maxInterval == 3) {
- if (bTxPause) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 1\n"));
- if (pBtdm8723->curPsTdma == 1) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 2) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 3) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 4) {
- btdm_2AntPsTdma(padapter, true, 8);
- pBtdm8723->psTdmaDuAdjType = 8;
- }
- if (pBtdm8723->curPsTdma == 9) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 10) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 11) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 12) {
- btdm_2AntPsTdma(padapter, true, 16);
- pBtdm8723->psTdmaDuAdjType = 16;
- }
- if (result == -1) {
- if (pBtdm8723->curPsTdma == 5) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 6) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 7) {
- btdm_2AntPsTdma(padapter, true, 8);
- pBtdm8723->psTdmaDuAdjType = 8;
- } else if (pBtdm8723->curPsTdma == 13) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 14) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 15) {
- btdm_2AntPsTdma(padapter, true, 16);
- pBtdm8723->psTdmaDuAdjType = 16;
- }
- } else if (result == 1) {
- if (pBtdm8723->curPsTdma == 8) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 7) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 6) {
- btdm_2AntPsTdma(padapter, true, 7);
- pBtdm8723->psTdmaDuAdjType = 7;
- } else if (pBtdm8723->curPsTdma == 16) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 15) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- } else if (pBtdm8723->curPsTdma == 14) {
- btdm_2AntPsTdma(padapter, true, 15);
- pBtdm8723->psTdmaDuAdjType = 15;
- }
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], TxPause = 0\n"));
- if (pBtdm8723->curPsTdma == 5) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 6) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 7) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 8) {
- btdm_2AntPsTdma(padapter, true, 4);
- pBtdm8723->psTdmaDuAdjType = 4;
- }
- if (pBtdm8723->curPsTdma == 13) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 14) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 15) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 16) {
- btdm_2AntPsTdma(padapter, true, 12);
- pBtdm8723->psTdmaDuAdjType = 12;
- }
- if (result == -1) {
- if (pBtdm8723->curPsTdma == 1) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 2) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 3) {
- btdm_2AntPsTdma(padapter, true, 4);
- pBtdm8723->psTdmaDuAdjType = 4;
- } else if (pBtdm8723->curPsTdma == 9) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 10) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 11) {
- btdm_2AntPsTdma(padapter, true, 12);
- pBtdm8723->psTdmaDuAdjType = 12;
- }
- } else if (result == 1) {
- if (pBtdm8723->curPsTdma == 4) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 3) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 2) {
- btdm_2AntPsTdma(padapter, true, 3);
- pBtdm8723->psTdmaDuAdjType = 3;
- } else if (pBtdm8723->curPsTdma == 12) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 11) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- } else if (pBtdm8723->curPsTdma == 10) {
- btdm_2AntPsTdma(padapter, true, 11);
- pBtdm8723->psTdmaDuAdjType = 11;
- }
- }
- }
- }
- }
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], PsTdma type : recordPsTdma =%d\n", pBtdm8723->psTdmaDuAdjType));
- /* if current PsTdma not match with the recorded one (when scan, dhcp...), */
- /* then we have to adjust it back to the previous record one. */
- if (pBtdm8723->curPsTdma != pBtdm8723->psTdmaDuAdjType) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], PsTdma type dismatch!!!, curPsTdma =%d, recordPsTdma =%d\n",
- pBtdm8723->curPsTdma, pBtdm8723->psTdmaDuAdjType));
-
- if (!check_fwstate(&padapter->mlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING))
- btdm_2AntPsTdma(padapter, true, pBtdm8723->psTdmaDuAdjType);
- else
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"));
- }
-}
-
-/* default Action */
-/* SCO only or SCO+PAN(HS) */
-static void btdm_2Ant8723ASCOAction(struct rtw_adapter *padapter)
-{
- u8 btRssiState, btRssiState1;
-
- if (btdm_NeedToDecBtPwr(padapter))
- btdm_2AntDecBtPwr(padapter, true);
- else
- btdm_2AntDecBtPwr(padapter, false);
-
- if (BTDM_IsHT40(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("HT40\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- /* fw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- btdm_2AntPsTdma(padapter, true, 11);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
- btdm_2AntPsTdma(padapter, true, 15);
- }
-
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
-
- /* fw mechanism */
- if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
- (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- btdm_2AntPsTdma(padapter, true, 11);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
- btdm_2AntPsTdma(padapter, true, 15);
- }
-
- /* sw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- btdm_2AntAgcTable(padapter, true);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- }
- }
-}
-
-static void btdm_2Ant8723AHIDAction(struct rtw_adapter *padapter)
-{
- u8 btRssiState, btRssiState1;
-
- if (btdm_NeedToDecBtPwr(padapter))
- btdm_2AntDecBtPwr(padapter, true);
- else
- btdm_2AntDecBtPwr(padapter, false);
-
- if (BTDM_IsHT40(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("HT40\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- /* fw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- btdm_2AntPsTdma(padapter, true, 9);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
- btdm_2AntPsTdma(padapter, true, 13);
- }
-
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
-
- /* fw mechanism */
- if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
- (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- btdm_2AntPsTdma(padapter, true, 9);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
- btdm_2AntPsTdma(padapter, true, 13);
- }
-
- /* sw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- btdm_2AntAgcTable(padapter, true);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- }
- }
-}
-
-/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
-static void btdm_2Ant8723AA2DPAction(struct rtw_adapter *padapter)
-{
- u8 btRssiState, btRssiState1;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
-
- if (btdm_NeedToDecBtPwr(padapter))
- btdm_2AntDecBtPwr(padapter, true);
- else
- btdm_2AntDecBtPwr(padapter, false);
-
- if (BTDM_IsHT40(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("HT40\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
-
- /* fw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
-
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, false, false, 3);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, false, false, 1);
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, false, true, 3);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, false, true, 1);
- }
- }
-
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
-
- /* fw mechanism */
- if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
- (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, false, false, 3);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, false, false, 1);
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, false, true, 3);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, false, true, 1);
- }
- }
-
- /* sw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- btdm_2AntAgcTable(padapter, true);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- }
- }
-}
-
-static void btdm_2Ant8723APANEDRAction(struct rtw_adapter *padapter)
-{
- u8 btRssiState, btRssiState1;
-
- if (btdm_NeedToDecBtPwr(padapter))
- btdm_2AntDecBtPwr(padapter, true);
- else
- btdm_2AntDecBtPwr(padapter, false);
-
- if (BTDM_IsHT40(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("HT40\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
-
- /* fw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- btdm_2AntPsTdma(padapter, true, 2);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- btdm_2AntPsTdma(padapter, true, 6);
- }
-
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
-
- /* fw mechanism */
- if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
- (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- btdm_2AntPsTdma(padapter, true, 2);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
- btdm_2AntPsTdma(padapter, true, 6);
- }
-
- /* sw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- btdm_2AntAgcTable(padapter, true);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- }
- }
-}
-
-/* PAN(HS) only */
-static void btdm_2Ant8723APANHSAction(struct rtw_adapter *padapter)
-{
- u8 btRssiState;
-
- if (BTDM_IsHT40(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("HT40\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 47, 0);
- /* fw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- btdm_2AntDecBtPwr(padapter, true);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- btdm_2AntDecBtPwr(padapter, false);
- }
- btdm_2AntPsTdma(padapter, false, 0);
-
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 47, 0);
-
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high\n"));
- /* fw mechanism */
- btdm_2AntDecBtPwr(padapter, true);
- btdm_2AntPsTdma(padapter, false, 0);
-
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, true);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low\n"));
- /* fw mechanism */
- btdm_2AntDecBtPwr(padapter, false);
- btdm_2AntPsTdma(padapter, false, 0);
-
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- }
- }
-}
-
-/* PAN(EDR)+A2DP */
-static void btdm_2Ant8723APANEDRA2DPAction(struct rtw_adapter *padapter)
-{
- u8 btRssiState, btRssiState1, btInfoExt;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
-
- if (btdm_NeedToDecBtPwr(padapter))
- btdm_2AntDecBtPwr(padapter, true);
- else
- btdm_2AntDecBtPwr(padapter, false);
-
- if (BTDM_IsHT40(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("HT40\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
-
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- /* fw mechanism */
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
-
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntPsTdma(padapter, true, 4);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntPsTdma(padapter, true, 2);
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- /* fw mechanism */
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntPsTdma(padapter, true, 8);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntPsTdma(padapter, true, 6);
- }
- }
-
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
-
- if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
- (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- /* fw mechanism */
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntPsTdma(padapter, true, 4);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntPsTdma(padapter, true, 2);
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
- /* fw mechanism */
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntPsTdma(padapter, true, 8);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntPsTdma(padapter, true, 6);
- }
- }
-
- /* sw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- btdm_2AntAgcTable(padapter, true);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- }
- }
-}
-
-static void btdm_2Ant8723APANEDRHIDAction(struct rtw_adapter *padapter)
-{
- u8 btRssiState, btRssiState1;
-
- if (btdm_NeedToDecBtPwr(padapter))
- btdm_2AntDecBtPwr(padapter, true);
- else
- btdm_2AntDecBtPwr(padapter, false);
-
- if (BTDM_IsHT40(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("HT40\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- /* fw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- btdm_2AntPsTdma(padapter, true, 10);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- btdm_2AntPsTdma(padapter, true, 14);
- }
-
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
-
- /* fw mechanism */
- if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
- (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- btdm_2AntPsTdma(padapter, true, 10);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
- btdm_2AntPsTdma(padapter, true, 14);
- }
-
- /* sw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- btdm_2AntAgcTable(padapter, true);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- }
- }
-}
-
-/* HID+A2DP+PAN(EDR) */
-static void btdm_2Ant8723AHIDA2DPPANEDRAction(struct rtw_adapter *padapter)
-{
- u8 btRssiState, btRssiState1, btInfoExt;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
-
- if (btdm_NeedToDecBtPwr(padapter))
- btdm_2AntDecBtPwr(padapter, true);
- else
- btdm_2AntDecBtPwr(padapter, false);
-
- if (BTDM_IsHT40(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("HT40\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
-
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntPsTdma(padapter, true, 12);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntPsTdma(padapter, true, 10);
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntPsTdma(padapter, true, 16);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntPsTdma(padapter, true, 14);
- }
- }
-
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
- btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 37, 0);
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 27, 0);
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
-
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntPsTdma(padapter, true, 12);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntPsTdma(padapter, true, 10);
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntPsTdma(padapter, true, 16);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntPsTdma(padapter, true, 14);
- }
- }
-
- /* sw mechanism */
- if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
- (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- btdm_2AntAgcTable(padapter, true);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- }
- }
-}
-
-static void btdm_2Ant8723AHIDA2DPAction(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 btRssiState, btRssiState1, btInfoExt;
-
- btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
-
- if (btdm_NeedToDecBtPwr(padapter))
- btdm_2AntDecBtPwr(padapter, true);
- else
- btdm_2AntDecBtPwr(padapter, false);
-
- if (BTDM_IsHT40(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("HT40\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
-
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, true, false, 3);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, true, false, 1);
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, true, true, 3);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, true, true, 1);
- }
- }
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- btRssiState1 = BTDM_CheckCoexRSSIState(padapter, 2, 27, 0);
-
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
-
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, true, false, 3);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, true, false, 1);
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- if (btInfoExt&BIT(0)) { /* a2dp rate, 1:basic /0:edr */
- RTPRINT(FBT, BT_TRACE, ("a2dp basic rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, true, true, 3);
- } else {
- RTPRINT(FBT, BT_TRACE, ("a2dp edr rate \n"));
- btdm_2AntTdmaDurationAdjust(padapter, true, true, 1);
- }
- }
- if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
- (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, true);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- }
- }
-}
-
-static void btdm_2Ant8723AA2dp(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 btRssiState, btRssiState1, btInfoExt;
-
- btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
-
- if (btdm_NeedToDecBtPwr(padapter))
- btdm_2AntDecBtPwr(padapter, true);
- else
- btdm_2AntDecBtPwr(padapter, false);
- /* coex table */
- btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
- btdm_2AntIgnoreWlanAct(padapter, false);
-
- if (BTDM_IsHT40(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("HT40\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 37, 0);
- /* fw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- btdm_2AntTdmaDurationAdjust(padapter, false, false, 1);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
- btdm_2AntTdmaDurationAdjust(padapter, false, true, 1);
- }
-
- /* sw mechanism */
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("HT20 or Legacy\n"));
- btRssiState = BTDM_CheckCoexRSSIState(padapter, 2, 47, 0);
- btRssiState1 = BTDM_CheckCoexRSSIState1(padapter, 2, 27, 0);
-
- /* fw mechanism */
- if ((btRssiState1 == BT_RSSI_STATE_HIGH) ||
- (btRssiState1 == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 high \n"));
- PlatformEFIOWrite1Byte(padapter, 0x883, 0x40);
- btdm_2AntTdmaDurationAdjust(padapter, false, false, 1);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi-1 low \n"));
- btdm_2AntTdmaDurationAdjust(padapter, false, true, 1);
- }
-
- /* sw mechanism */
- if ((btRssiState == BT_RSSI_STATE_HIGH) ||
- (btRssiState == BT_RSSI_STATE_STAY_HIGH)) {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi high \n"));
- btdm_2AntAgcTable(padapter, true);
- btdm_2AntAdcBackOff(padapter, true);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- } else {
- RTPRINT(FBT, BT_TRACE, ("Wifi rssi low \n"));
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
- }
- }
-}
-
-/* extern function start with BTDM_ */
-static void BTDM_2AntParaInit(struct rtw_adapter *padapter)
-{
-
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], 2Ant Parameter Init!!\n"));
-
- /* Enable counter statistics */
- rtl8723au_write8(padapter, 0x76e, 0x4);
- rtl8723au_write8(padapter, 0x778, 0x3);
- rtl8723au_write8(padapter, 0x40, 0x20);
-
- /* force to reset coex mechanism */
- pBtdm8723->preVal0x6c0 = 0x0;
- btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
-
- pBtdm8723->bPrePsTdmaOn = true;
- btdm_2AntPsTdma(padapter, false, 0);
-
- pBtdm8723->preFwDacSwingLvl = 0x10;
- btdm_2AntFwDacSwingLvl(padapter, 0x20);
-
- pBtdm8723->bPreDecBtPwr = true;
- btdm_2AntDecBtPwr(padapter, false);
-
- pBtdm8723->bPreAgcTableEn = true;
- btdm_2AntAgcTable(padapter, false);
-
- pBtdm8723->bPreAdcBackOff = true;
- btdm_2AntAdcBackOff(padapter, false);
-
- pBtdm8723->bPreLowPenaltyRa = true;
- btdm_2AntLowPenaltyRa(padapter, false);
-
- pBtdm8723->bPreRfRxLpfShrink = true;
- btdm_2AntRfShrink(padapter, false);
-
- pBtdm8723->bPreDacSwingOn = true;
- btdm_2AntDacSwing(padapter, false, 0xc0);
-
- pBtdm8723->bPreIgnoreWlanAct = true;
- btdm_2AntIgnoreWlanAct(padapter, false);
-}
-
-static void BTDM_2AntHwCoexAllOff8723A(struct rtw_adapter *padapter)
-{
- btdm_2AntCoexTable(padapter, 0x55555555, 0xffff, 0x3);
-}
-
-static void BTDM_2AntFwCoexAllOff8723A(struct rtw_adapter *padapter)
-{
- btdm_2AntIgnoreWlanAct(padapter, false);
- btdm_2AntPsTdma(padapter, false, 0);
- btdm_2AntFwDacSwingLvl(padapter, 0x20);
- btdm_2AntDecBtPwr(padapter, false);
-}
-
-static void BTDM_2AntSwCoexAllOff8723A(struct rtw_adapter *padapter)
-{
- btdm_2AntAgcTable(padapter, false);
- btdm_2AntAdcBackOff(padapter, false);
- btdm_2AntLowPenaltyRa(padapter, false);
- btdm_2AntRfShrink(padapter, false);
- btdm_2AntDacSwing(padapter, false, 0xc0);
-}
-
-static void BTDM_2AntFwC2hBtInfo8723A(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
- u8 btInfo = 0;
- u8 algorithm = BT_2ANT_COEX_ALGO_UNDEFINED;
- u8 bBtLinkExist = false, bBtHsModeExist = false;
-
- btInfo = pHalData->bt_coexist.halCoex8723.c2hBtInfoOriginal;
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_IDLE;
-
- /* check BIT2 first ==> check if bt is under inquiry or page scan */
- if (btInfo & BIT(2)) {
- if (!pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage) {
- pBtMgnt->ExtConfig.bHoldForBtOperation = true;
- pBtMgnt->ExtConfig.bHoldPeriodCnt = 1;
- btdm_2AntBtInquiryPage(padapter);
- } else {
- pBtMgnt->ExtConfig.bHoldPeriodCnt++;
- btdm_HoldForBtInqPage(padapter);
- }
- pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage = true;
-
- } else {
- pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage = false;
- pBtMgnt->ExtConfig.bHoldForBtOperation = false;
- pBtMgnt->ExtConfig.bHoldPeriodCnt = 0;
-
- }
- RTPRINT(FBT, BT_TRACE,
- ("[BTC2H], pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage =%x pBtMgnt->ExtConfig.bHoldPeriodCnt =%x pBtMgnt->ExtConfig.bHoldForBtOperation =%x\n",
- pHalData->bt_coexist.halCoex8723.bC2hBtInquiryPage,
- pBtMgnt->ExtConfig.bHoldPeriodCnt,
- pBtMgnt->ExtConfig.bHoldForBtOperation));
-
- RTPRINT(FBT, BT_TRACE,
- ("[BTC2H], btInfo =%x pHalData->bt_coexist.halCoex8723.c2hBtInfoOriginal =%x\n",
- btInfo, pHalData->bt_coexist.halCoex8723.c2hBtInfoOriginal));
- if (btInfo&BT_INFO_ACL) {
- RTPRINT(FBT, BT_TRACE, ("[BTC2H], BTInfo: bConnect = true btInfo =%x\n", btInfo));
- bBtLinkExist = true;
- if (((btInfo&(BT_INFO_FTP|BT_INFO_A2DP|BT_INFO_HID|BT_INFO_SCO_BUSY)) != 0) ||
- pHalData->bt_coexist.halCoex8723.btRetryCnt > 0) {
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_NON_IDLE;
- } else {
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_CONNECTED_IDLE;
- }
-
- if (btInfo&BT_INFO_SCO || btInfo&BT_INFO_SCO_BUSY) {
- if (btInfo&BT_INFO_FTP || btInfo&BT_INFO_A2DP || btInfo&BT_INFO_HID) {
- switch (btInfo&0xe0) {
- case BT_INFO_HID:
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + HID\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID;
- break;
- case BT_INFO_A2DP:
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Error!!! SCO + A2DP\n"));
- break;
- case BT_INFO_FTP:
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_SCO;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
- }
- break;
- case (BT_INFO_HID | BT_INFO_A2DP):
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
- break;
- case (BT_INFO_HID | BT_INFO_FTP):
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
- }
- break;
- case (BT_INFO_A2DP | BT_INFO_FTP):
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_A2DP;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANEDR_A2DP;
- }
- break;
- case (BT_INFO_HID | BT_INFO_A2DP | BT_INFO_FTP):
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
- }
- break;
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], SCO only\n"));
- algorithm = BT_2ANT_COEX_ALGO_SCO;
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], non SCO\n"));
- switch (btInfo&0xe0) {
- case BT_INFO_HID:
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID;
- break;
- case BT_INFO_A2DP:
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP\n"));
- algorithm = BT_2ANT_COEX_ALGO_A2DP;
- break;
- case BT_INFO_FTP:
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
- break;
- case (BT_INFO_HID | BT_INFO_A2DP):
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
- break;
- case (BT_INFO_HID|BT_INFO_FTP):
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANEDR_HID;
- }
- break;
- case (BT_INFO_A2DP|BT_INFO_FTP):
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_A2DP;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_PANEDR_A2DP;
- }
- break;
- case (BT_INFO_HID|BT_INFO_A2DP|BT_INFO_FTP):
- if (bBtHsModeExist) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
- algorithm = BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
- }
- break;
- }
-
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTC2H], BTInfo: bConnect = false\n"));
- pBtdm8723->btStatus = BT_2ANT_BT_STATUS_IDLE;
- }
-
- pBtdm8723->curAlgorithm = algorithm;
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Algorithm = %d \n", pBtdm8723->curAlgorithm));
-
-/* From */
- BTDM_CheckWiFiState(padapter);
- if (pBtMgnt->ExtConfig.bManualControl) {
- RTPRINT(FBT, BT_TRACE, ("Action Manual control, won't execute bt coexist mechanism!!\n"));
- return;
- }
-}
-
-void BTDM_2AntBtCoexist8723A(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
- u8 btInfoOriginal = 0;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct btdm_8723a_2ant *pBtdm8723 = &pHalData->bt_coexist.halCoex8723.btdm2Ant;
-
- if (BTDM_BtProfileSupport(padapter)) {
- if (pBtMgnt->ExtConfig.bHoldForBtOperation) {
- RTPRINT(FBT, BT_TRACE, ("Action for BT Operation adjust!!\n"));
- return;
- }
- if (pBtMgnt->ExtConfig.bHoldPeriodCnt) {
- RTPRINT(FBT, BT_TRACE, ("Hold BT inquiry/page scan setting (cnt = %d)!!\n",
- pBtMgnt->ExtConfig.bHoldPeriodCnt));
- if (pBtMgnt->ExtConfig.bHoldPeriodCnt >= 11) {
- pBtMgnt->ExtConfig.bHoldPeriodCnt = 0;
- /* next time the coexist parameters should be reset again. */
- } else {
- pBtMgnt->ExtConfig.bHoldPeriodCnt++;
- }
- return;
- }
-
- if (pBtDbg->dbgCtrl)
- RTPRINT(FBT, BT_TRACE, ("[Dbg control], "));
-
- pBtdm8723->curAlgorithm = btdm_ActionAlgorithm(padapter);
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Algorithm = %d \n", pBtdm8723->curAlgorithm));
-
- if (btdm_Is2Ant8723ACommonAction(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant common.\n"));
- pBtdm8723->bResetTdmaAdjust = true;
- } else {
- if (pBtdm8723->curAlgorithm != pBtdm8723->preAlgorithm) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], preAlgorithm =%d, curAlgorithm =%d\n",
- pBtdm8723->preAlgorithm, pBtdm8723->curAlgorithm));
- pBtdm8723->bResetTdmaAdjust = true;
- }
- switch (pBtdm8723->curAlgorithm) {
- case BT_2ANT_COEX_ALGO_SCO:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = SCO.\n"));
- btdm_2Ant8723ASCOAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_HID:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID.\n"));
- btdm_2Ant8723AHIDAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_A2DP:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = A2DP.\n"));
- btdm_2Ant8723AA2DPAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_PANEDR:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN(EDR).\n"));
- btdm_2Ant8723APANEDRAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_PANHS:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HS mode.\n"));
- btdm_2Ant8723APANHSAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_PANEDR_A2DP:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN+A2DP.\n"));
- btdm_2Ant8723APANEDRA2DPAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_PANEDR_HID:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN(EDR)+HID.\n"));
- btdm_2Ant8723APANEDRHIDAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID+A2DP+PAN.\n"));
- btdm_2Ant8723AHIDA2DPPANEDRAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_HID_A2DP:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID+A2DP.\n"));
- btdm_2Ant8723AHIDA2DPAction(padapter);
- break;
- default:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = 0.\n"));
- btdm_2Ant8723AA2DPAction(padapter);
- break;
- }
- pBtdm8723->preAlgorithm = pBtdm8723->curAlgorithm;
- }
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex] Get bt info by fw!!\n"));
- /* msg shows c2h rsp for bt_info is received or not. */
- if (pHalData->bt_coexist.halCoex8723.bC2hBtInfoReqSent)
- RTPRINT(FBT, BT_TRACE, ("[BTCoex] c2h for btInfo not rcvd yet!!\n"));
-
- btInfoOriginal = pHalData->bt_coexist.halCoex8723.c2hBtInfoOriginal;
-
- if (pBtMgnt->ExtConfig.bHoldForBtOperation) {
- RTPRINT(FBT, BT_TRACE, ("Action for BT Operation adjust!!\n"));
- return;
- }
- if (pBtMgnt->ExtConfig.bHoldPeriodCnt) {
- RTPRINT(FBT, BT_TRACE,
- ("Hold BT inquiry/page scan setting (cnt = %d)!!\n",
- pBtMgnt->ExtConfig.bHoldPeriodCnt));
- if (pBtMgnt->ExtConfig.bHoldPeriodCnt >= 11) {
- pBtMgnt->ExtConfig.bHoldPeriodCnt = 0;
- /* next time the coexist parameters should be reset again. */
- } else {
- pBtMgnt->ExtConfig.bHoldPeriodCnt++;
- }
- return;
- }
-
- if (pBtDbg->dbgCtrl)
- RTPRINT(FBT, BT_TRACE, ("[Dbg control], "));
- if (btdm_Is2Ant8723ACommonAction(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant common.\n"));
- pBtdm8723->bResetTdmaAdjust = true;
- } else {
- if (pBtdm8723->curAlgorithm != pBtdm8723->preAlgorithm) {
- RTPRINT(FBT, BT_TRACE,
- ("[BTCoex], preAlgorithm =%d, curAlgorithm =%d\n",
- pBtdm8723->preAlgorithm,
- pBtdm8723->curAlgorithm));
- pBtdm8723->bResetTdmaAdjust = true;
- }
- switch (pBtdm8723->curAlgorithm) {
- case BT_2ANT_COEX_ALGO_SCO:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = SCO.\n"));
- btdm_2Ant8723ASCOAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_HID:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID.\n"));
- btdm_2Ant8723AHIDAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_A2DP:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = A2DP.\n"));
- btdm_2Ant8723AA2dp(padapter);
- break;
- case BT_2ANT_COEX_ALGO_PANEDR:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN(EDR).\n"));
- btdm_2Ant8723APANEDRAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_PANHS:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HS mode.\n"));
- btdm_2Ant8723APANHSAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_PANEDR_A2DP:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN+A2DP.\n"));
- btdm_2Ant8723APANEDRA2DPAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_PANEDR_HID:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = PAN(EDR)+HID.\n"));
- btdm_2Ant8723APANEDRHIDAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID+A2DP+PAN.\n"));
- btdm_2Ant8723AHIDA2DPPANEDRAction(padapter);
- break;
- case BT_2ANT_COEX_ALGO_HID_A2DP:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = HID+A2DP.\n"));
- btdm_2Ant8723AHIDA2DPAction(padapter);
- break;
- default:
- RTPRINT(FBT, BT_TRACE, ("Action 2-Ant, algorithm = 0.\n"));
- btdm_2Ant8723AA2DPAction(padapter);
- break;
- }
- pBtdm8723->preAlgorithm = pBtdm8723->curAlgorithm;
- }
- }
-}
-
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc87232Ant.c ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc8723.c ===== */
-
-static u8 btCoexDbgBuf[BT_TMP_BUF_SIZE];
-
-static const char *const BtProfileString[] = {
- "NONE",
- "A2DP",
- "PAN",
- "HID",
- "SCO",
-};
-
-static const char *const BtSpecString[] = {
- "1.0b",
- "1.1",
- "1.2",
- "2.0+EDR",
- "2.1+EDR",
- "3.0+HS",
- "4.0",
-};
-
-static const char *const BtLinkRoleString[] = {
- "Master",
- "Slave",
-};
-
-static u8 btdm_BtWifiAntNum(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct bt_coexist_8723a *pBtCoex = &pHalData->bt_coexist.halCoex8723;
-
- if (Ant_x2 == pHalData->bt_coexist.BT_Ant_Num) {
- if (Ant_x2 == pBtCoex->TotalAntNum)
- return Ant_x2;
- else
- return Ant_x1;
- } else {
- return Ant_x1;
- }
- return Ant_x2;
-}
-
-static void btdm_BtHwCountersMonitor(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u32 regHPTxRx, regLPTxRx, u4Tmp;
- u32 regHPTx = 0, regHPRx = 0, regLPTx = 0, regLPRx = 0;
-
- regHPTxRx = REG_HIGH_PRIORITY_TXRX;
- regLPTxRx = REG_LOW_PRIORITY_TXRX;
-
- u4Tmp = rtl8723au_read32(padapter, regHPTxRx);
- regHPTx = u4Tmp & bMaskLWord;
- regHPRx = (u4Tmp & bMaskHWord)>>16;
-
- u4Tmp = rtl8723au_read32(padapter, regLPTxRx);
- regLPTx = u4Tmp & bMaskLWord;
- regLPRx = (u4Tmp & bMaskHWord)>>16;
-
- pHalData->bt_coexist.halCoex8723.highPriorityTx = regHPTx;
- pHalData->bt_coexist.halCoex8723.highPriorityRx = regHPRx;
- pHalData->bt_coexist.halCoex8723.lowPriorityTx = regLPTx;
- pHalData->bt_coexist.halCoex8723.lowPriorityRx = regLPRx;
-
- RTPRINT(FBT, BT_TRACE, ("High Priority Tx/Rx = %d / %d\n", regHPTx, regHPRx));
- RTPRINT(FBT, BT_TRACE, ("Low Priority Tx/Rx = %d / %d\n", regLPTx, regLPRx));
-
- /* reset counter */
- rtl8723au_write8(padapter, 0x76e, 0xc);
-}
-
-/* This function check if 8723 bt is disabled */
-static void btdm_BtEnableDisableCheck8723A(struct rtw_adapter *padapter)
-{
- u8 btAlife = true;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
-#ifdef CHECK_BT_EXIST_FROM_REG
- u8 val8;
-
- /* ox68[28]= 1 => BT enable; otherwise disable */
- val8 = rtl8723au_read8(padapter, 0x6B);
- if (!(val8 & BIT(4)))
- btAlife = false;
-
- if (btAlife)
- pHalData->bt_coexist.bCurBtDisabled = false;
- else
- pHalData->bt_coexist.bCurBtDisabled = true;
-#else
- if (pHalData->bt_coexist.halCoex8723.highPriorityTx == 0 &&
- pHalData->bt_coexist.halCoex8723.highPriorityRx == 0 &&
- pHalData->bt_coexist.halCoex8723.lowPriorityTx == 0 &&
- pHalData->bt_coexist.halCoex8723.lowPriorityRx == 0)
- btAlife = false;
- if (pHalData->bt_coexist.halCoex8723.highPriorityTx == 0xeaea &&
- pHalData->bt_coexist.halCoex8723.highPriorityRx == 0xeaea &&
- pHalData->bt_coexist.halCoex8723.lowPriorityTx == 0xeaea &&
- pHalData->bt_coexist.halCoex8723.lowPriorityRx == 0xeaea)
- btAlife = false;
- if (pHalData->bt_coexist.halCoex8723.highPriorityTx == 0xffff &&
- pHalData->bt_coexist.halCoex8723.highPriorityRx == 0xffff &&
- pHalData->bt_coexist.halCoex8723.lowPriorityTx == 0xffff &&
- pHalData->bt_coexist.halCoex8723.lowPriorityRx == 0xffff)
- btAlife = false;
- if (btAlife) {
- pHalData->bt_coexist.btActiveZeroCnt = 0;
- pHalData->bt_coexist.bCurBtDisabled = false;
- RTPRINT(FBT, BT_TRACE, ("8723A BT is enabled !!\n"));
- } else {
- pHalData->bt_coexist.btActiveZeroCnt++;
- RTPRINT(FBT, BT_TRACE, ("8723A bt all counters = 0, %d times!!\n",
- pHalData->bt_coexist.btActiveZeroCnt));
- if (pHalData->bt_coexist.btActiveZeroCnt >= 2) {
- pHalData->bt_coexist.bCurBtDisabled = true;
- RTPRINT(FBT, BT_TRACE, ("8723A BT is disabled !!\n"));
- }
- }
-#endif
-
- if (!pHalData->bt_coexist.bCurBtDisabled) {
- if (BTDM_IsWifiConnectionExist(padapter))
- BTDM_SetFwChnlInfo(padapter, RT_MEDIA_CONNECT);
- else
- BTDM_SetFwChnlInfo(padapter, RT_MEDIA_DISCONNECT);
- }
-
- if (pHalData->bt_coexist.bPreBtDisabled !=
- pHalData->bt_coexist.bCurBtDisabled) {
- RTPRINT(FBT, BT_TRACE, ("8723A BT is from %s to %s!!\n",
- (pHalData->bt_coexist.bPreBtDisabled ? "disabled":"enabled"),
- (pHalData->bt_coexist.bCurBtDisabled ? "disabled":"enabled")));
- pHalData->bt_coexist.bPreBtDisabled = pHalData->bt_coexist.bCurBtDisabled;
- }
-}
-
-static void btdm_BTCoexist8723AHandler(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
-
- pHalData = GET_HAL_DATA(padapter);
-
- if (btdm_BtWifiAntNum(padapter) == Ant_x2) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], 2 Ant mechanism\n"));
- BTDM_2AntBtCoexist8723A(padapter);
- } else {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], 1 Ant mechanism\n"));
- BTDM_1AntBtCoexist8723A(padapter);
- }
-
- if (!BTDM_IsSameCoexistState(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Coexist State[bitMap] change from 0x%"i64fmt"x to 0x%"i64fmt"x\n",
- pHalData->bt_coexist.PreviousState,
- pHalData->bt_coexist.CurrentState));
- pHalData->bt_coexist.PreviousState = pHalData->bt_coexist.CurrentState;
-
- RTPRINT(FBT, BT_TRACE, ("["));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_BT30)
- RTPRINT(FBT, BT_TRACE, ("BT 3.0, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_HT20)
- RTPRINT(FBT, BT_TRACE, ("HT20, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_HT40)
- RTPRINT(FBT, BT_TRACE, ("HT40, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_LEGACY)
- RTPRINT(FBT, BT_TRACE, ("Legacy, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_RSSI_LOW)
- RTPRINT(FBT, BT_TRACE, ("Rssi_Low, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_RSSI_MEDIUM)
- RTPRINT(FBT, BT_TRACE, ("Rssi_Mid, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_RSSI_HIGH)
- RTPRINT(FBT, BT_TRACE, ("Rssi_High, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_IDLE)
- RTPRINT(FBT, BT_TRACE, ("Wifi_Idle, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_UPLINK)
- RTPRINT(FBT, BT_TRACE, ("Wifi_Uplink, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_WIFI_DOWNLINK)
- RTPRINT(FBT, BT_TRACE, ("Wifi_Downlink, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_BT_IDLE)
- RTPRINT(FBT, BT_TRACE, ("BT_idle, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_PROFILE_HID)
- RTPRINT(FBT, BT_TRACE, ("PRO_HID, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_PROFILE_A2DP)
- RTPRINT(FBT, BT_TRACE, ("PRO_A2DP, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_PROFILE_PAN)
- RTPRINT(FBT, BT_TRACE, ("PRO_PAN, "));
- if (pHalData->bt_coexist.CurrentState & BT_COEX_STATE_PROFILE_SCO)
- RTPRINT(FBT, BT_TRACE, ("PRO_SCO, "));
- RTPRINT(FBT, BT_TRACE, ("]\n"));
- }
-}
-
-/* extern function start with BTDM_ */
-u32 BTDM_BtTxRxCounterH(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u32 counters;
-
- counters = pHalData->bt_coexist.halCoex8723.highPriorityTx+
- pHalData->bt_coexist.halCoex8723.highPriorityRx;
- return counters;
-}
-
-u32 BTDM_BtTxRxCounterL(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u32 counters;
-
- counters = pHalData->bt_coexist.halCoex8723.lowPriorityTx+
- pHalData->bt_coexist.halCoex8723.lowPriorityRx;
- return counters;
-}
-
-void BTDM_SetFwChnlInfo(struct rtw_adapter *padapter, enum rt_media_status mstatus)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- u8 H2C_Parameter[3] = {0};
- u8 chnl;
-
- /* opMode */
- if (RT_MEDIA_CONNECT == mstatus)
- H2C_Parameter[0] = 0x1; /* 0: disconnected, 1:connected */
-
- if (check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE)) {
- /* channel */
- chnl = pmlmeext->cur_channel;
- if (BTDM_IsHT40(padapter)) {
- if (pmlmeext->cur_ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
- chnl -= 2;
- else if (pmlmeext->cur_ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
- chnl += 2;
- }
- H2C_Parameter[1] = chnl;
- } else { /* check if HS link is exists */
- /* channel */
- if (BT_Operation(padapter))
- H2C_Parameter[1] = pBtMgnt->BTChannel;
- else
- H2C_Parameter[1] = pmlmeext->cur_channel;
- }
-
- if (BTDM_IsHT40(padapter))
- H2C_Parameter[2] = 0x30;
- else
- H2C_Parameter[2] = 0x20;
-
- FillH2CCmd(padapter, 0x19, 3, H2C_Parameter);
-}
-
-u8 BTDM_IsWifiConnectionExist(struct rtw_adapter *padapter)
-{
- u8 bRet = false;
-
- if (BTHCI_HsConnectionEstablished(padapter))
- bRet = true;
-
- if (check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE) == true)
- bRet = true;
-
- return bRet;
-}
-
-void BTDM_SetFw3a(
- struct rtw_adapter *padapter,
- u8 byte1,
- u8 byte2,
- u8 byte3,
- u8 byte4,
- u8 byte5
- )
-{
- u8 H2C_Parameter[5] = {0};
-
- if (rtl8723a_BT_using_antenna_1(padapter)) {
- if ((!check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE)) &&
- (get_fwstate(&padapter->mlmepriv) != WIFI_NULL_STATE)) {
- /* for softap mode */
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct bt_coexist_8723a *pBtCoex = &pHalData->bt_coexist.halCoex8723;
- u8 BtState = pBtCoex->c2hBtInfo;
-
- if ((BtState != BT_INFO_STATE_NO_CONNECTION) &&
- (BtState != BT_INFO_STATE_CONNECT_IDLE)) {
- if (byte1 & BIT(4)) {
- byte1 &= ~BIT(4);
- byte1 |= BIT(5);
- }
-
- byte5 |= BIT(5);
- if (byte5 & BIT(6))
- byte5 &= ~BIT(6);
- }
- }
- }
-
- H2C_Parameter[0] = byte1;
- H2C_Parameter[1] = byte2;
- H2C_Parameter[2] = byte3;
- H2C_Parameter[3] = byte4;
- H2C_Parameter[4] = byte5;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], FW write 0x3a(5bytes) = 0x%02x%08x\n",
- H2C_Parameter[0],
- H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
-
- FillH2CCmd(padapter, 0x3a, 5, H2C_Parameter);
-}
-
-void BTDM_QueryBtInformation(struct rtw_adapter *padapter)
-{
- u8 H2C_Parameter[1] = {0};
- struct hal_data_8723a *pHalData;
- struct bt_coexist_8723a *pBtCoex;
-
- pHalData = GET_HAL_DATA(padapter);
- pBtCoex = &pHalData->bt_coexist.halCoex8723;
-
- if (!rtl8723a_BT_enabled(padapter)) {
- pBtCoex->c2hBtInfo = BT_INFO_STATE_DISABLED;
- pBtCoex->bC2hBtInfoReqSent = false;
- return;
- }
-
- if (pBtCoex->c2hBtInfo == BT_INFO_STATE_DISABLED)
- pBtCoex->c2hBtInfo = BT_INFO_STATE_NO_CONNECTION;
-
- if (pBtCoex->bC2hBtInfoReqSent == true)
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], didn't recv previous BtInfo report!\n"));
- else
- pBtCoex->bC2hBtInfoReqSent = true;
-
- H2C_Parameter[0] |= BIT(0); /* trigger */
-
-/*RTPRINT(FBT, BT_TRACE, ("[BTCoex], Query Bt information, write 0x38 = 0x%x\n", */
-/*H2C_Parameter[0])); */
-
- FillH2CCmd(padapter, 0x38, 1, H2C_Parameter);
-}
-
-void BTDM_SetSwRfRxLpfCorner(struct rtw_adapter *padapter, u8 type)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (BT_RF_RX_LPF_CORNER_SHRINK == type) {
- /* Shrink RF Rx LPF corner */
- RTPRINT(FBT, BT_TRACE, ("Shrink RF Rx LPF corner!!\n"));
- PHY_SetRFReg(padapter, PathA, 0x1e, bRFRegOffsetMask, 0xf0ff7);
- pHalData->bt_coexist.bSWCoexistAllOff = false;
- } else if (BT_RF_RX_LPF_CORNER_RESUME == type) {
- /* Resume RF Rx LPF corner */
- RTPRINT(FBT, BT_TRACE, ("Resume RF Rx LPF corner!!\n"));
- PHY_SetRFReg(padapter, PathA, 0x1e, bRFRegOffsetMask, pHalData->bt_coexist.BtRfRegOrigin1E);
- }
-}
-
-void
-BTDM_SetSwPenaltyTxRateAdaptive(
- struct rtw_adapter *padapter,
- u8 raType
- )
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 tmpU1;
-
- tmpU1 = rtl8723au_read8(padapter, 0x4fd);
- tmpU1 |= BIT(0);
- if (BT_TX_RATE_ADAPTIVE_LOW_PENALTY == raType) {
- tmpU1 &= ~BIT(2);
- pHalData->bt_coexist.bSWCoexistAllOff = false;
- } else if (BT_TX_RATE_ADAPTIVE_NORMAL == raType) {
- tmpU1 |= BIT(2);
- }
-
- rtl8723au_write8(padapter, 0x4fd, tmpU1);
-}
-
-void BTDM_SetFwDecBtPwr(struct rtw_adapter *padapter, u8 bDecBtPwr)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 H2C_Parameter[1] = {0};
-
- H2C_Parameter[0] = 0;
-
- if (bDecBtPwr) {
- H2C_Parameter[0] |= BIT(1);
- pHalData->bt_coexist.bFWCoexistAllOff = false;
- }
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], decrease Bt Power : %s, write 0x21 = 0x%x\n",
- (bDecBtPwr ? "Yes!!" : "No!!"), H2C_Parameter[0]));
-
- FillH2CCmd(padapter, 0x21, 1, H2C_Parameter);
-}
-
-u8 BTDM_BtProfileSupport(struct rtw_adapter *padapter)
-{
- u8 bRet = false;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (pBtMgnt->bSupportProfile &&
- !pHalData->bt_coexist.halCoex8723.bForceFwBtInfo)
- bRet = true;
-
- return bRet;
-}
-
-static void BTDM_AdjustForBtOperation8723A(struct rtw_adapter *padapter)
-{
- /* BTDM_2AntAdjustForBtOperation8723(padapter); */
-}
-
-static void BTDM_FwC2hBtRssi8723A(struct rtw_adapter *padapter, u8 *tmpBuf)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 percent, u1tmp;
-
- u1tmp = tmpBuf[0];
- percent = u1tmp*2+10;
-
- pHalData->bt_coexist.halCoex8723.btRssi = percent;
-/*RTPRINT(FBT, BT_TRACE, ("[BTC2H], BT RSSI =%d\n", percent)); */
-}
-
-void
-rtl8723a_fw_c2h_BT_info(struct rtw_adapter *padapter, u8 *tmpBuf, u8 length)
-{
- struct hal_data_8723a *pHalData;
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
- struct bt_coexist_8723a *pBtCoex;
- u8 i;
-
- pHalData = GET_HAL_DATA(padapter);
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
- pBtCoex = &pHalData->bt_coexist.halCoex8723;
-
- pBtCoex->bC2hBtInfoReqSent = false;
-
- RTPRINT(FBT, BT_TRACE, ("[BTC2H], BT info[%d]=[", length));
-
- pBtCoex->btRetryCnt = 0;
- for (i = 0; i < length; i++) {
- switch (i) {
- case 0:
- pBtCoex->c2hBtInfoOriginal = tmpBuf[i];
- break;
- case 1:
- pBtCoex->btRetryCnt = tmpBuf[i];
- break;
- case 2:
- BTDM_FwC2hBtRssi8723A(padapter, &tmpBuf[i]);
- break;
- case 3:
- pBtCoex->btInfoExt = tmpBuf[i]&BIT(0);
- break;
- }
-
- if (i == length-1)
- RTPRINT(FBT, BT_TRACE, ("0x%02x]\n", tmpBuf[i]));
- else
- RTPRINT(FBT, BT_TRACE, ("0x%02x, ", tmpBuf[i]));
- }
- RTPRINT(FBT, BT_TRACE, ("[BTC2H], BT RSSI =%d\n", pBtCoex->btRssi));
- if (pBtCoex->btInfoExt)
- RTPRINT(FBT, BT_TRACE, ("[BTC2H], pBtCoex->btInfoExt =%x\n", pBtCoex->btInfoExt));
-
- if (btdm_BtWifiAntNum(padapter) == Ant_x1)
- BTDM_1AntFwC2hBtInfo8723A(padapter);
- else
- BTDM_2AntFwC2hBtInfo8723A(padapter);
-
- if (pBtMgnt->ExtConfig.bManualControl) {
- RTPRINT(FBT, BT_TRACE, ("%s: Action Manual control!!\n", __func__));
- return;
- }
-
- btdm_BTCoexist8723AHandler(padapter);
-}
-
-static void BTDM_Display8723ABtCoexInfo(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct bt_coexist_8723a *pBtCoex = &pHalData->bt_coexist.halCoex8723;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- u8 u1Tmp, u1Tmp1, u1Tmp2, i, btInfoExt, psTdmaCase = 0;
- u32 u4Tmp[4];
- u8 antNum = Ant_x2;
-
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
- DCMD_Printf(btCoexDbgBuf);
-
- if (!rtl8723a_BT_coexist(padapter)) {
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
- DCMD_Printf(btCoexDbgBuf);
- return;
- }
-
- antNum = btdm_BtWifiAntNum(padapter);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/%d ", "Ant mechanism PG/Now run :", \
- ((pHalData->bt_coexist.BT_Ant_Num == Ant_x2) ? 2 : 1), ((antNum == Ant_x2) ? 2 : 1));
- DCMD_Printf(btCoexDbgBuf);
-
- if (pBtMgnt->ExtConfig.bManualControl) {
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "[Action Manual control]!!");
- DCMD_Printf(btCoexDbgBuf);
- } else {
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
- ((pBtMgnt->bSupportProfile) ? "Yes" : "No"), pBtMgnt->ExtConfig.HCIExtensionVer);
- DCMD_Printf(btCoexDbgBuf);
- }
-
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\n %-35s = / %d", "Dot11 channel / BT channel", \
- pBtMgnt->BTChannel);
- DCMD_Printf(btCoexDbgBuf);
-
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\n %-35s = %d / %d / %d", "Wifi/BT/HS rssi", \
- BTDM_GetRxSS(padapter),
- pHalData->bt_coexist.halCoex8723.btRssi,
- pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB);
- DCMD_Printf(btCoexDbgBuf);
-
- if (!pBtMgnt->ExtConfig.bManualControl) {
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\n %-35s = %s / %s ", "WIfi status",
- ((BTDM_Legacy(padapter)) ? "Legacy" : (((BTDM_IsHT40(padapter)) ? "HT40" : "HT20"))),
- ((!BTDM_IsWifiBusy(padapter)) ? "idle" : ((BTDM_IsWifiUplink(padapter)) ? "uplink" : "downlink")));
- DCMD_Printf(btCoexDbgBuf);
-
- if (pBtMgnt->bSupportProfile) {
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
- ((BTHCI_CheckProfileExist(padapter, BT_PROFILE_SCO)) ? 1 : 0),
- ((BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID)) ? 1 : 0),
- ((BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) ? 1 : 0),
- ((BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) ? 1 : 0));
- DCMD_Printf(btCoexDbgBuf);
-
- for (i = 0; i < pBtMgnt->ExtConfig.NumberOfHandle; i++) {
- if (pBtMgnt->ExtConfig.HCIExtensionVer >= 1) {
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/ %s/ %s", "Bt link type/spec/role",
- BtProfileString[pBtMgnt->ExtConfig.linkInfo[i].BTProfile],
- BtSpecString[pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec],
- BtLinkRoleString[pBtMgnt->ExtConfig.linkInfo[i].linkRole]);
- DCMD_Printf(btCoexDbgBuf);
-
- btInfoExt = pHalData->bt_coexist.halCoex8723.btInfoExt;
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "A2DP rate", \
- (btInfoExt & BIT(0)) ?
- "Basic rate" : "EDR rate");
- DCMD_Printf(btCoexDbgBuf);
- } else {
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/ %s", "Bt link type/spec", \
- BtProfileString[pBtMgnt->ExtConfig.linkInfo[i].BTProfile],
- BtSpecString[pBtMgnt->ExtConfig.linkInfo[i].BTCoreSpec]);
- DCMD_Printf(btCoexDbgBuf);
- }
- }
- }
- }
-
- /* Sw mechanism */
- if (!pBtMgnt->ExtConfig.bManualControl) {
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw BT Coex mechanism]============");
- DCMD_Printf(btCoexDbgBuf);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "AGC Table", \
- pBtCoex->btdm2Ant.bCurAgcTableEn);
- DCMD_Printf(btCoexDbgBuf);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "ADC Backoff", \
- pBtCoex->btdm2Ant.bCurAdcBackOff);
- DCMD_Printf(btCoexDbgBuf);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "Low penalty RA", \
- pBtCoex->btdm2Ant.bCurLowPenaltyRa);
- DCMD_Printf(btCoexDbgBuf);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "RF Rx LPF Shrink", \
- pBtCoex->btdm2Ant.bCurRfRxLpfShrink);
- DCMD_Printf(btCoexDbgBuf);
- }
- u4Tmp[0] = PHY_QueryRFReg(padapter, PathA, 0x1e, 0xff0);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "RF-A, 0x1e[11:4]/original val", \
- u4Tmp[0], pHalData->bt_coexist.BtRfRegOrigin1E);
- DCMD_Printf(btCoexDbgBuf);
-
- /* Fw mechanism */
- if (!pBtMgnt->ExtConfig.bManualControl) {
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw BT Coex mechanism]============");
- DCMD_Printf(btCoexDbgBuf);
- }
- if (!pBtMgnt->ExtConfig.bManualControl) {
- if (btdm_BtWifiAntNum(padapter) == Ant_x1)
- psTdmaCase = pHalData->bt_coexist.halCoex8723.btdm1Ant.curPsTdma;
- else
- psTdmaCase = pHalData->bt_coexist.halCoex8723.btdm2Ant.curPsTdma;
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %*ph case-%d",
- "PS TDMA(0x3a)", 5, pHalData->bt_coexist.fw3aVal, psTdmaCase);
- DCMD_Printf(btCoexDbgBuf);
-
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "Decrease Bt Power", \
- pBtCoex->btdm2Ant.bCurDecBtPwr);
- DCMD_Printf(btCoexDbgBuf);
- }
- u1Tmp = rtl8723au_read8(padapter, 0x778);
- u1Tmp1 = rtl8723au_read8(padapter, 0x783);
- u1Tmp2 = rtl8723au_read8(padapter, 0x796);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/ 0x783/ 0x796", \
- u1Tmp, u1Tmp1, u1Tmp2);
- DCMD_Printf(btCoexDbgBuf);
-
- if (!pBtMgnt->ExtConfig.bManualControl) {
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x / 0x%x", "Sw DacSwing Ctrl/Val", \
- pBtCoex->btdm2Ant.bCurDacSwingOn, pBtCoex->btdm2Ant.curDacSwingLvl);
- DCMD_Printf(btCoexDbgBuf);
- }
- u4Tmp[0] = rtl8723au_read32(padapter, 0x880);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x880", \
- u4Tmp[0]);
- DCMD_Printf(btCoexDbgBuf);
-
- /* Hw mechanism */
- if (!pBtMgnt->ExtConfig.bManualControl) {
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw BT Coex mechanism]============");
- DCMD_Printf(btCoexDbgBuf);
- }
-
- u1Tmp = rtl8723au_read8(padapter, 0x40);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x40", \
- u1Tmp);
- DCMD_Printf(btCoexDbgBuf);
-
- u4Tmp[0] = rtl8723au_read32(padapter, 0x550);
- u1Tmp = rtl8723au_read8(padapter, 0x522);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x", "0x550(bcn contrl)/0x522", \
- u4Tmp[0], u1Tmp);
- DCMD_Printf(btCoexDbgBuf);
-
- u4Tmp[0] = rtl8723au_read32(padapter, 0x484);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x484(rate adaptive)", \
- u4Tmp[0]);
- DCMD_Printf(btCoexDbgBuf);
-
- u4Tmp[0] = rtl8723au_read32(padapter, 0x50);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
- u4Tmp[0]);
- DCMD_Printf(btCoexDbgBuf);
-
- u4Tmp[0] = rtl8723au_read32(padapter, 0xda0);
- u4Tmp[1] = rtl8723au_read32(padapter, 0xda4);
- u4Tmp[2] = rtl8723au_read32(padapter, 0xda8);
- u4Tmp[3] = rtl8723au_read32(padapter, 0xdac);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0xda0/0xda4/0xda8/0xdac(FA cnt)", \
- u4Tmp[0], u4Tmp[1], u4Tmp[2], u4Tmp[3]);
- DCMD_Printf(btCoexDbgBuf);
-
- u4Tmp[0] = rtl8723au_read32(padapter, 0x6c0);
- u4Tmp[1] = rtl8723au_read32(padapter, 0x6c4);
- u4Tmp[2] = rtl8723au_read32(padapter, 0x6c8);
- u1Tmp = rtl8723au_read8(padapter, 0x6cc);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
- u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp);
- DCMD_Printf(btCoexDbgBuf);
-
- /* u4Tmp = rtl8723au_read32(padapter, 0x770); */
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d", "0x770(Hi pri Rx[31:16]/Tx[15:0])", \
- pHalData->bt_coexist.halCoex8723.highPriorityRx,
- pHalData->bt_coexist.halCoex8723.highPriorityTx);
- DCMD_Printf(btCoexDbgBuf);
- /* u4Tmp = rtl8723au_read32(padapter, 0x774); */
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d", "0x774(Lo pri Rx[31:16]/Tx[15:0])", \
- pHalData->bt_coexist.halCoex8723.lowPriorityRx,
- pHalData->bt_coexist.halCoex8723.lowPriorityTx);
- DCMD_Printf(btCoexDbgBuf);
-
- /* Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang */
- u1Tmp = rtl8723au_read8(padapter, 0x41b);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x41b (hang chk == 0xf)", \
- u1Tmp);
- DCMD_Printf(btCoexDbgBuf);
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "lastHMEBoxNum", \
- pHalData->LastHMEBoxNum);
- DCMD_Printf(btCoexDbgBuf);
-}
-
-static void
-BTDM_8723ASignalCompensation(struct rtw_adapter *padapter,
- u8 *rssi_wifi, u8 *rssi_bt)
-{
- if (btdm_BtWifiAntNum(padapter) == Ant_x1)
- BTDM_1AntSignalCompensation(padapter, rssi_wifi, rssi_bt);
-}
-
-static void BTDM_8723AInit(struct rtw_adapter *padapter)
-{
- if (btdm_BtWifiAntNum(padapter) == Ant_x2)
- BTDM_2AntParaInit(padapter);
- else
- BTDM_1AntParaInit(padapter);
-}
-
-static void BTDM_HWCoexAllOff8723A(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->ExtConfig.bManualControl)
- return;
-
- if (btdm_BtWifiAntNum(padapter) == Ant_x2)
- BTDM_2AntHwCoexAllOff8723A(padapter);
-}
-
-static void BTDM_FWCoexAllOff8723A(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->ExtConfig.bManualControl)
- return;
-
- if (btdm_BtWifiAntNum(padapter) == Ant_x2)
- BTDM_2AntFwCoexAllOff8723A(padapter);
-}
-
-static void BTDM_SWCoexAllOff8723A(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->ExtConfig.bManualControl)
- return;
-
- if (btdm_BtWifiAntNum(padapter) == Ant_x2)
- BTDM_2AntSwCoexAllOff8723A(padapter);
-}
-
-static void
-BTDM_Set8723ABtCoexCurrAntNum(struct rtw_adapter *padapter, u8 antNum)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct bt_coexist_8723a *pBtCoex = &pHalData->bt_coexist.halCoex8723;
-
- if (antNum == 1)
- pBtCoex->TotalAntNum = Ant_x1;
- else if (antNum == 2)
- pBtCoex->TotalAntNum = Ant_x2;
-}
-
-void rtl8723a_BT_lps_leave(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->ExtConfig.bManualControl)
- return;
-
- if (btdm_BtWifiAntNum(padapter) == Ant_x1)
- BTDM_1AntLpsLeave(padapter);
-}
-
-static void BTDM_ForHalt8723A(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->ExtConfig.bManualControl)
- return;
-
- if (btdm_BtWifiAntNum(padapter) == Ant_x1)
- BTDM_1AntForHalt(padapter);
-}
-
-static void BTDM_WifiScanNotify8723A(struct rtw_adapter *padapter, u8 scanType)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->ExtConfig.bManualControl)
- return;
-
- if (btdm_BtWifiAntNum(padapter) == Ant_x1)
- BTDM_1AntWifiScanNotify(padapter, scanType);
-}
-
-static void
-BTDM_WifiAssociateNotify8723A(struct rtw_adapter *padapter, u8 action)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->ExtConfig.bManualControl)
- return;
-
- if (btdm_BtWifiAntNum(padapter) == Ant_x1)
- BTDM_1AntWifiAssociateNotify(padapter, action);
-}
-
-static void
-BTDM_MediaStatusNotify8723A(struct rtw_adapter *padapter,
- enum rt_media_status mstatus)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], MediaStatusNotify, %s\n",
- mstatus?"connect":"disconnect"));
-
- BTDM_SetFwChnlInfo(padapter, mstatus);
-
- if (pBtMgnt->ExtConfig.bManualControl)
- return;
-
- if (btdm_BtWifiAntNum(padapter) == Ant_x1)
- BTDM_1AntMediaStatusNotify(padapter, mstatus);
-}
-
-static void BTDM_ForDhcp8723A(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->ExtConfig.bManualControl)
- return;
-
- if (btdm_BtWifiAntNum(padapter) == Ant_x1)
- BTDM_1AntForDhcp(padapter);
-}
-
-bool rtl8723a_BT_using_antenna_1(struct rtw_adapter *padapter)
-{
- if (btdm_BtWifiAntNum(padapter) == Ant_x1)
- return true;
- else
- return false;
-}
-
-static void BTDM_BTCoexist8723A(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
- struct bt_coexist_8723a *pBtCoex;
-
- pHalData = GET_HAL_DATA(padapter);
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
- pBtCoex = &pHalData->bt_coexist.halCoex8723;
-
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], beacon RSSI = 0x%x(%d)\n",
- pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB,
- pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB));
-
- btdm_BtHwCountersMonitor(padapter);
- btdm_BtEnableDisableCheck8723A(padapter);
-
- if (pBtMgnt->ExtConfig.bManualControl) {
- RTPRINT(FBT, BT_TRACE, ("%s: Action Manual control!!\n", __func__));
- return;
- }
-
- if (pBtCoex->bC2hBtInfoReqSent) {
- if (!rtl8723a_BT_enabled(padapter)) {
- pBtCoex->c2hBtInfo = BT_INFO_STATE_DISABLED;
- } else {
- if (pBtCoex->c2hBtInfo == BT_INFO_STATE_DISABLED)
- pBtCoex->c2hBtInfo = BT_INFO_STATE_NO_CONNECTION;
- }
-
- btdm_BTCoexist8723AHandler(padapter);
- } else if (!rtl8723a_BT_enabled(padapter)) {
- pBtCoex->c2hBtInfo = BT_INFO_STATE_DISABLED;
- btdm_BTCoexist8723AHandler(padapter);
- }
-
- BTDM_QueryBtInformation(padapter);
-}
-
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc8723.c ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtcCsr1Ant.c ===== */
-
-/* local function start with btdm_ */
-/* extern function start with BTDM_ */
-
-static void BTDM_SetAntenna(struct rtw_adapter *padapter, u8 who)
-{
-}
-
-void
-BTDM_SingleAnt(
- struct rtw_adapter *padapter,
- u8 bSingleAntOn,
- u8 bInterruptOn,
- u8 bMultiNAVOn
- )
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 H2C_Parameter[3] = {0};
-
- if (pHalData->bt_coexist.BT_Ant_Num != Ant_x1)
- return;
-
- H2C_Parameter[2] = 0;
- H2C_Parameter[1] = 0;
- H2C_Parameter[0] = 0;
-
- if (bInterruptOn) {
- H2C_Parameter[2] |= 0x02; /* BIT1 */
- pHalData->bt_coexist.bFWCoexistAllOff = false;
- }
- pHalData->bt_coexist.bInterruptOn = bInterruptOn;
-
- if (bSingleAntOn) {
- H2C_Parameter[2] |= 0x10; /* BIT4 */
- pHalData->bt_coexist.bFWCoexistAllOff = false;
- }
- pHalData->bt_coexist.bSingleAntOn = bSingleAntOn;
-
- if (bMultiNAVOn) {
- H2C_Parameter[2] |= 0x20; /* BIT5 */
- pHalData->bt_coexist.bFWCoexistAllOff = false;
- }
- pHalData->bt_coexist.bMultiNAVOn = bMultiNAVOn;
-
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], SingleAntenna =[%s:%s:%s], write 0xe = 0x%x\n",
- bSingleAntOn?"ON":"OFF", bInterruptOn?"ON":"OFF", bMultiNAVOn?"ON":"OFF",
- H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
-}
-
-void BTDM_CheckBTIdleChange1Ant(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- u8 stateChange = false;
- u32 BT_Polling, Ratio_Act, Ratio_STA;
- u32 BT_Active, BT_State;
- u32 regBTActive = 0, regBTState = 0, regBTPolling = 0;
-
- if (!rtl8723a_BT_coexist(padapter))
- return;
- if (pBtMgnt->ExtConfig.bManualControl)
- return;
- if (pHalData->bt_coexist.BT_CoexistType != BT_CSR_BC8)
- return;
- if (pHalData->bt_coexist.BT_Ant_Num != Ant_x1)
- return;
-
- /* The following we only consider CSR BC8 and fw version should be >= 62 */
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], FirmwareVersion = 0x%x(%d)\n",
- pHalData->FirmwareVersion, pHalData->FirmwareVersion));
- regBTActive = REG_BT_ACTIVE;
- regBTState = REG_BT_STATE;
- if (pHalData->FirmwareVersion >= FW_VER_BT_REG1)
- regBTPolling = REG_BT_POLLING1;
- else
- regBTPolling = REG_BT_POLLING;
-
- BT_Active = rtl8723au_read32(padapter, regBTActive);
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT_Active(0x%x) =%x\n", regBTActive, BT_Active));
- BT_Active = BT_Active & 0x00ffffff;
-
- BT_State = rtl8723au_read32(padapter, regBTState);
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT_State(0x%x) =%x\n", regBTState, BT_State));
- BT_State = BT_State & 0x00ffffff;
-
- BT_Polling = rtl8723au_read32(padapter, regBTPolling);
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT_Polling(0x%x) =%x\n", regBTPolling, BT_Polling));
-
- if (BT_Active == 0xffffffff && BT_State == 0xffffffff && BT_Polling == 0xffffffff)
- return;
- if (BT_Polling == 0)
- return;
-
- Ratio_Act = BT_Active*1000/BT_Polling;
- Ratio_STA = BT_State*1000/BT_Polling;
-
- pHalData->bt_coexist.Ratio_Tx = Ratio_Act;
- pHalData->bt_coexist.Ratio_PRI = Ratio_STA;
-
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], Ratio_Act =%d\n", Ratio_Act));
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], Ratio_STA =%d\n", Ratio_STA));
-
- if (Ratio_STA < 60 && Ratio_Act < 500) { /* BT PAN idle */
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_PAN_IDLE;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_DOWNLINK;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_UPLINK;
- } else {
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_IDLE;
-
- if (Ratio_STA) {
- /* Check if BT PAN (under BT 2.1) is uplink or downlink */
- if ((Ratio_Act/Ratio_STA) < 2) {
- /* BT PAN Uplink */
- pHalData->bt_coexist.BT21TrafficStatistics.bTxBusyTraffic = true;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_PAN_UPLINK;
- pHalData->bt_coexist.BT21TrafficStatistics.bRxBusyTraffic = false;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_DOWNLINK;
- } else {
- /* BT PAN downlink */
- pHalData->bt_coexist.BT21TrafficStatistics.bTxBusyTraffic = false;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_UPLINK;
- pHalData->bt_coexist.BT21TrafficStatistics.bRxBusyTraffic = true;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_PAN_DOWNLINK;
- }
- } else {
- /* BT PAN downlink */
- pHalData->bt_coexist.BT21TrafficStatistics.bTxBusyTraffic = false;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_PAN_UPLINK;
- pHalData->bt_coexist.BT21TrafficStatistics.bRxBusyTraffic = true;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_PAN_DOWNLINK;
- }
- }
-
- /* Check BT is idle or not */
- if (pBtMgnt->ExtConfig.NumberOfHandle == 0 &&
- pBtMgnt->ExtConfig.NumberOfSCO == 0) {
- pBtMgnt->ExtConfig.bBTBusy = false;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_IDLE;
- } else {
- if (Ratio_STA < 60) {
- pBtMgnt->ExtConfig.bBTBusy = false;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_IDLE;
- } else {
- pBtMgnt->ExtConfig.bBTBusy = true;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_IDLE;
- }
- }
-
- if (pBtMgnt->ExtConfig.NumberOfHandle == 0 &&
- pBtMgnt->ExtConfig.NumberOfSCO == 0) {
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_RSSI_LOW;
- pBtMgnt->ExtConfig.MIN_BT_RSSI = 0;
- BTDM_SetAntenna(padapter, BTDM_ANT_BT_IDLE);
- } else {
- if (pBtMgnt->ExtConfig.MIN_BT_RSSI <= -5) {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT_RSSI_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], core stack notify bt rssi Low\n"));
- } else {
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT_RSSI_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], core stack notify bt rssi Normal\n"));
- }
- }
-
- if (pHalData->bt_coexist.bBTBusyTraffic != pBtMgnt->ExtConfig.bBTBusy) {
- /* BT idle or BT non-idle */
- pHalData->bt_coexist.bBTBusyTraffic = pBtMgnt->ExtConfig.bBTBusy;
- stateChange = true;
- }
-
- if (stateChange) {
- if (!pBtMgnt->ExtConfig.bBTBusy)
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT is idle or disable\n"));
- else
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT is non-idle\n"));
- }
- if (!pBtMgnt->ExtConfig.bBTBusy) {
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT is idle or disable\n"));
- if (check_fwstate(&padapter->mlmepriv, WIFI_UNDER_LINKING|WIFI_SITE_MONITOR) == true)
- BTDM_SetAntenna(padapter, BTDM_ANT_WIFI);
- }
-}
-
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtcCsr1Ant.c ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtcCsr2Ant.c ===== */
-
-/* local function start with btdm_ */
-
-/* Note: */
-/* In the following, FW should be done before SW mechanism. */
-/* BTDM_Balance(), BTDM_DiminishWiFi(), BT_NAV() should be done */
-/* before BTDM_AGCTable(), BTDM_BBBackOffLevel(), btdm_DacSwing(). */
-
-/* extern function start with BTDM_ */
-
-void
-BTDM_DiminishWiFi(
- struct rtw_adapter *padapter,
- u8 bDACOn,
- u8 bInterruptOn,
- u8 DACSwingLevel,
- u8 bNAVOn
- )
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 H2C_Parameter[3] = {0};
-
- if (pHalData->bt_coexist.BT_Ant_Num != Ant_x2)
- return;
-
- if ((pHalData->bt_coexist.CurrentState & BT_COEX_STATE_BT_RSSI_LOW) &&
- (DACSwingLevel == 0x20)) {
- RTPRINT(FBT, BT_TRACE, ("[BT]DiminishWiFi 0x20 original, but set 0x18 for Low RSSI!\n"));
- DACSwingLevel = 0x18;
- }
-
- H2C_Parameter[2] = 0;
- H2C_Parameter[1] = DACSwingLevel;
- H2C_Parameter[0] = 0;
- if (bDACOn) {
- H2C_Parameter[2] |= 0x01; /* BIT0 */
- if (bInterruptOn)
- H2C_Parameter[2] |= 0x02; /* BIT1 */
- pHalData->bt_coexist.bFWCoexistAllOff = false;
- }
- if (bNAVOn) {
- H2C_Parameter[2] |= 0x08; /* BIT3 */
- pHalData->bt_coexist.bFWCoexistAllOff = false;
- }
-
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], bDACOn = %s, bInterruptOn = %s, write 0xe = 0x%x\n",
- bDACOn?"ON":"OFF", bInterruptOn?"ON":"OFF",
- H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], bNAVOn = %s\n",
- bNAVOn?"ON":"OFF"));
-}
-
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtcCsr2Ant.c ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtCoexist.c ===== */
-
-/* local function */
-static void btdm_ResetFWCoexState(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- pHalData->bt_coexist.CurrentState = 0;
- pHalData->bt_coexist.PreviousState = 0;
-}
-
-static void btdm_InitBtCoexistDM(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- /* 20100415 Joseph: Restore RF register 0x1E and 0x1F value for further usage. */
- pHalData->bt_coexist.BtRfRegOrigin1E = PHY_QueryRFReg(padapter, PathA, RF_RCK1, bRFRegOffsetMask);
- pHalData->bt_coexist.BtRfRegOrigin1F = PHY_QueryRFReg(padapter, PathA, RF_RCK2, 0xf0);
-
- pHalData->bt_coexist.CurrentState = 0;
- pHalData->bt_coexist.PreviousState = 0;
-
- BTDM_8723AInit(padapter);
- pHalData->bt_coexist.bInitlized = true;
-}
-
-/* */
-/* extern function */
-/* */
-void BTDM_CheckAntSelMode(struct rtw_adapter *padapter)
-{
-}
-
-void BTDM_FwC2hBtRssi(struct rtw_adapter *padapter, u8 *tmpBuf)
-{
- BTDM_FwC2hBtRssi8723A(padapter, tmpBuf);
-}
-
-void BTDM_DisplayBtCoexInfo(struct rtw_adapter *padapter)
-{
- BTDM_Display8723ABtCoexInfo(padapter);
-}
-
-void BTDM_RejectAPAggregatedPacket(struct rtw_adapter *padapter, u8 bReject)
-{
-}
-
-u8 BTDM_IsHT40(struct rtw_adapter *padapter)
-{
- u8 isht40 = true;
- enum ht_channel_width bw;
-
- bw = padapter->mlmeextpriv.cur_bwmode;
-
- if (bw == HT_CHANNEL_WIDTH_20)
- isht40 = false;
- else if (bw == HT_CHANNEL_WIDTH_40)
- isht40 = true;
-
- return isht40;
-}
-
-u8 BTDM_Legacy(struct rtw_adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext;
- u8 isLegacy = false;
-
- pmlmeext = &padapter->mlmeextpriv;
- if ((pmlmeext->cur_wireless_mode == WIRELESS_11B) ||
- (pmlmeext->cur_wireless_mode == WIRELESS_11G) ||
- (pmlmeext->cur_wireless_mode == WIRELESS_11BG))
- isLegacy = true;
-
- return isLegacy;
-}
-
-void BTDM_CheckWiFiState(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct mlme_priv *pmlmepriv;
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
-
- pHalData = GET_HAL_DATA(padapter);
- pmlmepriv = &padapter->mlmepriv;
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_IDLE;
-
- if (pmlmepriv->LinkDetectInfo.bTxBusyTraffic)
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_UPLINK;
- else
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_UPLINK;
-
- if (pmlmepriv->LinkDetectInfo.bRxBusyTraffic)
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_DOWNLINK;
- else
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_DOWNLINK;
- } else {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_IDLE;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_UPLINK;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_DOWNLINK;
- }
-
- if (BTDM_Legacy(padapter)) {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_LEGACY;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_HT20;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_HT40;
- } else {
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_LEGACY;
- if (BTDM_IsHT40(padapter)) {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_HT40;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_HT20;
- } else {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_HT20;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_HT40;
- }
- }
-
- if (pBtMgnt->BtOperationOn)
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_BT30;
- else
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_BT30;
-}
-
-s32 BTDM_GetRxSS(struct rtw_adapter *padapter)
-{
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct mlme_priv *pmlmepriv;
- struct hal_data_8723a *pHalData;
- s32 UndecoratedSmoothedPWDB = 0;
-
- pmlmepriv = &padapter->mlmepriv;
- pHalData = GET_HAL_DATA(padapter);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- UndecoratedSmoothedPWDB = GET_UNDECORATED_AVERAGE_RSSI(padapter);
- } else { /* associated entry pwdb */
- UndecoratedSmoothedPWDB = pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB;
- /* pHalData->BT_EntryMinUndecoratedSmoothedPWDB */
- }
- RTPRINT(FBT, BT_TRACE, ("BTDM_GetRxSS() = %d\n", UndecoratedSmoothedPWDB));
- return UndecoratedSmoothedPWDB;
-}
-
-static s32 BTDM_GetRxBeaconSS(struct rtw_adapter *padapter)
-{
-/*PMGNT_INFO pMgntInfo = &padapter->MgntInfo; */
- struct mlme_priv *pmlmepriv;
- struct hal_data_8723a *pHalData;
- s32 pwdbBeacon = 0;
-
- pmlmepriv = &padapter->mlmepriv;
- pHalData = GET_HAL_DATA(padapter);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- /* pwdbBeacon = pHalData->dmpriv.UndecoratedSmoothedBeacon; */
- pwdbBeacon = pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB;
- }
- RTPRINT(FBT, BT_TRACE, ("BTDM_GetRxBeaconSS() = %d\n", pwdbBeacon));
- return pwdbBeacon;
-}
-
-/* Get beacon rssi state */
-u8 BTDM_CheckCoexBcnRssiState(struct rtw_adapter *padapter, u8 levelNum,
- u8 RssiThresh, u8 RssiThresh1)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- s32 pwdbBeacon = 0;
- u8 bcnRssiState = 0;
-
- pwdbBeacon = BTDM_GetRxBeaconSS(padapter);
-
- if (levelNum == 2) {
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM;
-
- if ((pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_LOW) ||
- (pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_STAY_LOW)) {
- if (pwdbBeacon >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
- bcnRssiState = BT_RSSI_STATE_HIGH;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to High\n"));
- } else {
- bcnRssiState = BT_RSSI_STATE_STAY_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state stay at Low\n"));
- }
- } else {
- if (pwdbBeacon < RssiThresh) {
- bcnRssiState = BT_RSSI_STATE_LOW;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to Low\n"));
- } else {
- bcnRssiState = BT_RSSI_STATE_STAY_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state stay at High\n"));
- }
- }
- } else if (levelNum == 3) {
- if (RssiThresh > RssiThresh1) {
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON thresh error!!\n"));
- return pHalData->bt_coexist.preRssiStateBeacon;
- }
-
- if ((pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_LOW) ||
- (pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_STAY_LOW)) {
- if (pwdbBeacon >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
- bcnRssiState = BT_RSSI_STATE_MEDIUM;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to Medium\n"));
- } else {
- bcnRssiState = BT_RSSI_STATE_STAY_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state stay at Low\n"));
- }
- } else if ((pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_MEDIUM) ||
- (pHalData->bt_coexist.preRssiStateBeacon == BT_RSSI_STATE_STAY_MEDIUM)) {
- if (pwdbBeacon >= (RssiThresh1+BT_FW_COEX_THRESH_TOL)) {
- bcnRssiState = BT_RSSI_STATE_HIGH;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to High\n"));
- } else if (pwdbBeacon < RssiThresh) {
- bcnRssiState = BT_RSSI_STATE_LOW;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to Low\n"));
- } else {
- bcnRssiState = BT_RSSI_STATE_STAY_MEDIUM;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state stay at Medium\n"));
- }
- } else {
- if (pwdbBeacon < RssiThresh1) {
- bcnRssiState = BT_RSSI_STATE_MEDIUM;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_BEACON_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state switch to Medium\n"));
- } else {
- bcnRssiState = BT_RSSI_STATE_STAY_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_BEACON state stay at High\n"));
- }
- }
- }
-
- pHalData->bt_coexist.preRssiStateBeacon = bcnRssiState;
-
- return bcnRssiState;
-}
-
-u8 BTDM_CheckCoexRSSIState1(struct rtw_adapter *padapter, u8 levelNum,
- u8 RssiThresh, u8 RssiThresh1)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- s32 UndecoratedSmoothedPWDB = 0;
- u8 btRssiState = 0;
-
- UndecoratedSmoothedPWDB = BTDM_GetRxSS(padapter);
-
- if (levelNum == 2) {
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
-
- if ((pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_LOW) ||
- (pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_STAY_LOW)) {
- if (UndecoratedSmoothedPWDB >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
- btRssiState = BT_RSSI_STATE_HIGH;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to High\n"));
- } else {
- btRssiState = BT_RSSI_STATE_STAY_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n"));
- }
- } else {
- if (UndecoratedSmoothedPWDB < RssiThresh) {
- btRssiState = BT_RSSI_STATE_LOW;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n"));
- } else {
- btRssiState = BT_RSSI_STATE_STAY_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state stay at High\n"));
- }
- }
- } else if (levelNum == 3) {
- if (RssiThresh > RssiThresh1) {
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 thresh error!!\n"));
- return pHalData->bt_coexist.preRssiState1;
- }
-
- if ((pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_LOW) ||
- (pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_STAY_LOW)) {
- if (UndecoratedSmoothedPWDB >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
- btRssiState = BT_RSSI_STATE_MEDIUM;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to Medium\n"));
- } else {
- btRssiState = BT_RSSI_STATE_STAY_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n"));
- }
- } else if ((pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_MEDIUM) ||
- (pHalData->bt_coexist.preRssiState1 == BT_RSSI_STATE_STAY_MEDIUM)) {
- if (UndecoratedSmoothedPWDB >= (RssiThresh1+BT_FW_COEX_THRESH_TOL)) {
- btRssiState = BT_RSSI_STATE_HIGH;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to High\n"));
- } else if (UndecoratedSmoothedPWDB < RssiThresh) {
- btRssiState = BT_RSSI_STATE_LOW;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n"));
- } else {
- btRssiState = BT_RSSI_STATE_STAY_MEDIUM;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state stay at Medium\n"));
- }
- } else {
- if (UndecoratedSmoothedPWDB < RssiThresh1) {
- btRssiState = BT_RSSI_STATE_MEDIUM;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state switch to Medium\n"));
- } else {
- btRssiState = BT_RSSI_STATE_STAY_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI_1 state stay at High\n"));
- }
- }
- }
-
- pHalData->bt_coexist.preRssiState1 = btRssiState;
-
- return btRssiState;
-}
-
-u8 BTDM_CheckCoexRSSIState(struct rtw_adapter *padapter, u8 levelNum,
- u8 RssiThresh, u8 RssiThresh1)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- s32 UndecoratedSmoothedPWDB = 0;
- u8 btRssiState = 0;
-
- UndecoratedSmoothedPWDB = BTDM_GetRxSS(padapter);
-
- if (levelNum == 2) {
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
-
- if ((pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_LOW) ||
- (pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_STAY_LOW)) {
- if (UndecoratedSmoothedPWDB >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
- btRssiState = BT_RSSI_STATE_HIGH;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to High\n"));
- } else {
- btRssiState = BT_RSSI_STATE_STAY_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state stay at Low\n"));
- }
- } else {
- if (UndecoratedSmoothedPWDB < RssiThresh) {
- btRssiState = BT_RSSI_STATE_LOW;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to Low\n"));
- } else {
- btRssiState = BT_RSSI_STATE_STAY_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state stay at High\n"));
- }
- }
- } else if (levelNum == 3) {
- if (RssiThresh > RssiThresh1) {
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI thresh error!!\n"));
- return pHalData->bt_coexist.preRssiState;
- }
-
- if ((pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_LOW) ||
- (pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_STAY_LOW)) {
- if (UndecoratedSmoothedPWDB >= (RssiThresh+BT_FW_COEX_THRESH_TOL)) {
- btRssiState = BT_RSSI_STATE_MEDIUM;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to Medium\n"));
- } else {
- btRssiState = BT_RSSI_STATE_STAY_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state stay at Low\n"));
- }
- } else if ((pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_MEDIUM) ||
- (pHalData->bt_coexist.preRssiState == BT_RSSI_STATE_STAY_MEDIUM)) {
- if (UndecoratedSmoothedPWDB >= (RssiThresh1+BT_FW_COEX_THRESH_TOL)) {
- btRssiState = BT_RSSI_STATE_HIGH;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to High\n"));
- } else if (UndecoratedSmoothedPWDB < RssiThresh) {
- btRssiState = BT_RSSI_STATE_LOW;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_LOW;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to Low\n"));
- } else {
- btRssiState = BT_RSSI_STATE_STAY_MEDIUM;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state stay at Medium\n"));
- }
- } else {
- if (UndecoratedSmoothedPWDB < RssiThresh1) {
- btRssiState = BT_RSSI_STATE_MEDIUM;
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
- pHalData->bt_coexist.CurrentState &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state switch to Medium\n"));
- } else {
- btRssiState = BT_RSSI_STATE_STAY_HIGH;
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], RSSI state stay at High\n"));
- }
- }
- }
-
- pHalData->bt_coexist.preRssiState = btRssiState;
-
- return btRssiState;
-}
-
-bool rtl8723a_BT_disable_EDCA_turbo(struct rtw_adapter *padapter)
-{
- struct bt_mgnt *pBtMgnt;
- struct hal_data_8723a *pHalData;
- u8 bBtChangeEDCA = false;
- u32 EDCA_BT_BE = 0x5ea42b, cur_EDCA_reg;
- bool bRet = false;
-
- pHalData = GET_HAL_DATA(padapter);
- pBtMgnt = &pHalData->BtInfo.BtMgnt;
-
- if (!rtl8723a_BT_coexist(padapter)) {
- bRet = false;
- pHalData->bt_coexist.lastBtEdca = 0;
- return bRet;
- }
- if (!((pBtMgnt->bSupportProfile) ||
- (pHalData->bt_coexist.BT_CoexistType == BT_CSR_BC8))) {
- bRet = false;
- pHalData->bt_coexist.lastBtEdca = 0;
- return bRet;
- }
-
- if (rtl8723a_BT_using_antenna_1(padapter)) {
- bRet = false;
- pHalData->bt_coexist.lastBtEdca = 0;
- return bRet;
- }
-
- if (pHalData->bt_coexist.exec_cnt < 3)
- pHalData->bt_coexist.exec_cnt++;
- else
- pHalData->bt_coexist.bEDCAInitialized = true;
-
- /* When BT is non idle */
- if (!(pHalData->bt_coexist.CurrentState & BT_COEX_STATE_BT_IDLE)) {
- RTPRINT(FBT, BT_TRACE, ("BT state non idle, set bt EDCA\n"));
-
- /* aggr_num = 0x0909; */
- if (pHalData->odmpriv.DM_EDCA_Table.bCurrentTurboEDCA) {
- bBtChangeEDCA = true;
- pHalData->odmpriv.DM_EDCA_Table.bCurrentTurboEDCA = false;
- pHalData->dmpriv.prv_traffic_idx = 3;
- }
- cur_EDCA_reg = rtl8723au_read32(padapter, REG_EDCA_BE_PARAM);
-
- if (cur_EDCA_reg != EDCA_BT_BE)
- bBtChangeEDCA = true;
- if (bBtChangeEDCA || !pHalData->bt_coexist.bEDCAInitialized) {
- rtl8723au_write32(padapter, REG_EDCA_BE_PARAM,
- EDCA_BT_BE);
- pHalData->bt_coexist.lastBtEdca = EDCA_BT_BE;
- }
- bRet = true;
- } else {
- RTPRINT(FBT, BT_TRACE, ("BT state idle, set original EDCA\n"));
- pHalData->bt_coexist.lastBtEdca = 0;
- bRet = false;
- }
- return bRet;
-}
-
-void
-BTDM_Balance(
- struct rtw_adapter *padapter,
- u8 bBalanceOn,
- u8 ms0,
- u8 ms1
- )
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 H2C_Parameter[3] = {0};
-
- if (bBalanceOn) {
- H2C_Parameter[2] = 1;
- H2C_Parameter[1] = ms1;
- H2C_Parameter[0] = ms0;
- pHalData->bt_coexist.bFWCoexistAllOff = false;
- } else {
- H2C_Parameter[2] = 0;
- H2C_Parameter[1] = 0;
- H2C_Parameter[0] = 0;
- }
- pHalData->bt_coexist.bBalanceOn = bBalanceOn;
-
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], Balance =[%s:%dms:%dms], write 0xc = 0x%x\n",
- bBalanceOn?"ON":"OFF", ms0, ms1,
- H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
-
- FillH2CCmd(padapter, 0xc, 3, H2C_Parameter);
-}
-
-void BTDM_AGCTable(struct rtw_adapter *padapter, u8 type)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- if (type == BT_AGCTABLE_OFF) {
- RTPRINT(FBT, BT_TRACE, ("[BT]AGCTable Off!\n"));
- rtl8723au_write32(padapter, 0xc78, 0x641c0001);
- rtl8723au_write32(padapter, 0xc78, 0x631d0001);
- rtl8723au_write32(padapter, 0xc78, 0x621e0001);
- rtl8723au_write32(padapter, 0xc78, 0x611f0001);
- rtl8723au_write32(padapter, 0xc78, 0x60200001);
-
- PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0x32000);
- PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0x71000);
- PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0xb0000);
- PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0xfc000);
- PHY_SetRFReg(padapter, PathA, RF_RX_G1, bRFRegOffsetMask, 0x30355);
-
- pHalData->bt_coexist.b8723aAgcTableOn = false;
- } else if (type == BT_AGCTABLE_ON) {
- RTPRINT(FBT, BT_TRACE, ("[BT]AGCTable On!\n"));
- rtl8723au_write32(padapter, 0xc78, 0x4e1c0001);
- rtl8723au_write32(padapter, 0xc78, 0x4d1d0001);
- rtl8723au_write32(padapter, 0xc78, 0x4c1e0001);
- rtl8723au_write32(padapter, 0xc78, 0x4b1f0001);
- rtl8723au_write32(padapter, 0xc78, 0x4a200001);
-
- PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0xdc000);
- PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0x90000);
- PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0x51000);
- PHY_SetRFReg(padapter, PathA, RF_RX_AGC_HP, bRFRegOffsetMask, 0x12000);
- PHY_SetRFReg(padapter, PathA, RF_RX_G1, bRFRegOffsetMask, 0x00355);
-
- pHalData->bt_coexist.b8723aAgcTableOn = true;
-
- pHalData->bt_coexist.bSWCoexistAllOff = false;
- }
-}
-
-void BTDM_BBBackOffLevel(struct rtw_adapter *padapter, u8 type)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (type == BT_BB_BACKOFF_OFF) {
- RTPRINT(FBT, BT_TRACE, ("[BT]BBBackOffLevel Off!\n"));
- rtl8723au_write32(padapter, 0xc04, 0x3a05611);
- } else if (type == BT_BB_BACKOFF_ON) {
- RTPRINT(FBT, BT_TRACE, ("[BT]BBBackOffLevel On!\n"));
- rtl8723au_write32(padapter, 0xc04, 0x3a07611);
- pHalData->bt_coexist.bSWCoexistAllOff = false;
- }
-}
-
-void BTDM_FWCoexAllOff(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- RTPRINT(FBT, BT_TRACE, ("BTDM_FWCoexAllOff()\n"));
- if (pHalData->bt_coexist.bFWCoexistAllOff)
- return;
- RTPRINT(FBT, BT_TRACE, ("BTDM_FWCoexAllOff(), real Do\n"));
-
- BTDM_FWCoexAllOff8723A(padapter);
-
- pHalData->bt_coexist.bFWCoexistAllOff = true;
-}
-
-void BTDM_SWCoexAllOff(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- RTPRINT(FBT, BT_TRACE, ("BTDM_SWCoexAllOff()\n"));
- if (pHalData->bt_coexist.bSWCoexistAllOff)
- return;
- RTPRINT(FBT, BT_TRACE, ("BTDM_SWCoexAllOff(), real Do\n"));
- BTDM_SWCoexAllOff8723A(padapter);
-
- pHalData->bt_coexist.bSWCoexistAllOff = true;
-}
-
-void BTDM_HWCoexAllOff(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- RTPRINT(FBT, BT_TRACE, ("BTDM_HWCoexAllOff()\n"));
- if (pHalData->bt_coexist.bHWCoexistAllOff)
- return;
- RTPRINT(FBT, BT_TRACE, ("BTDM_HWCoexAllOff(), real Do\n"));
-
- BTDM_HWCoexAllOff8723A(padapter);
-
- pHalData->bt_coexist.bHWCoexistAllOff = true;
-}
-
-void BTDM_CoexAllOff(struct rtw_adapter *padapter)
-{
- BTDM_FWCoexAllOff(padapter);
- BTDM_SWCoexAllOff(padapter);
- BTDM_HWCoexAllOff(padapter);
-}
-
-void rtl8723a_BT_disable_coexist(struct rtw_adapter *padapter)
-{
- struct pwrctrl_priv *ppwrctrl = &padapter->pwrctrlpriv;
-
- if (!rtl8723a_BT_coexist(padapter))
- return;
-
- /* 8723 1Ant doesn't need to turn off bt coexist mechanism. */
- if (rtl8723a_BT_using_antenna_1(padapter))
- return;
-
- /* Before enter IPS, turn off FW BT Co-exist mechanism */
- if (ppwrctrl->reg_rfoff == rf_on) {
- RTPRINT(FBT, BT_TRACE, ("[BT][DM], Before enter IPS, turn off all Coexist DM\n"));
- btdm_ResetFWCoexState(padapter);
- BTDM_CoexAllOff(padapter);
- BTDM_SetAntenna(padapter, BTDM_ANT_BT);
- }
-}
-
-void BTDM_SignalCompensation(struct rtw_adapter *padapter, u8 *rssi_wifi, u8 *rssi_bt)
-{
- BTDM_8723ASignalCompensation(padapter, rssi_wifi, rssi_bt);
-}
-
-void rtl8723a_BT_do_coexist(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (!rtl8723a_BT_coexist(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], BT not exists!!\n"));
- return;
- }
-
- if (!pHalData->bt_coexist.bInitlized) {
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], btdm_InitBtCoexistDM()\n"));
- btdm_InitBtCoexistDM(padapter);
- }
-
- RTPRINT(FBT, BT_TRACE, ("\n\n[DM][BT], BTDM start!!\n"));
-
- BTDM_PWDBMonitor(padapter);
-
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], HW type is 8723\n"));
- BTDM_BTCoexist8723A(padapter);
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], BTDM end!!\n\n"));
-}
-
-void BTDM_UpdateCoexState(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (!BTDM_IsSameCoexistState(padapter)) {
- RTPRINT(FBT, BT_TRACE, ("[BTCoex], Coexist State[bitMap] change from 0x%"i64fmt"x to 0x%"i64fmt"x, changeBits = 0x%"i64fmt"x\n",
- pHalData->bt_coexist.PreviousState,
- pHalData->bt_coexist.CurrentState,
- (pHalData->bt_coexist.PreviousState^pHalData->bt_coexist.CurrentState)));
- pHalData->bt_coexist.PreviousState = pHalData->bt_coexist.CurrentState;
- }
-}
-
-u8 BTDM_IsSameCoexistState(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (pHalData->bt_coexist.PreviousState == pHalData->bt_coexist.CurrentState) {
- return true;
- } else {
- RTPRINT(FBT, BT_TRACE, ("[DM][BT], Coexist state changed!!\n"));
- return false;
- }
-}
-
-void BTDM_PWDBMonitor(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(GetDefaultAdapter(padapter));
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 H2C_Parameter[3] = {0};
- s32 tmpBTEntryMaxPWDB = 0, tmpBTEntryMinPWDB = 0xff;
- u8 i;
-
- if (pBtMgnt->BtOperationOn) {
- for (i = 0; i < MAX_BT_ASOC_ENTRY_NUM; i++) {
- if (pBTInfo->BtAsocEntry[i].bUsed) {
- if (pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB < tmpBTEntryMinPWDB)
- tmpBTEntryMinPWDB = pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB;
- if (pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB > tmpBTEntryMaxPWDB)
- tmpBTEntryMaxPWDB = pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB;
- /* Report every BT connection (HS mode) RSSI to FW */
- H2C_Parameter[2] = (u8)(pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB & 0xFF);
- H2C_Parameter[0] = (MAX_FW_SUPPORT_MACID_NUM-1-i);
- RTPRINT(FDM, DM_BT30, ("RSSI report for BT[%d], H2C_Par = 0x%x\n", i, H2C_Parameter[0]));
- FillH2CCmd(padapter, RSSI_SETTING_EID, 3, H2C_Parameter);
- RTPRINT_ADDR(FDM, (DM_PWDB|DM_BT30), ("BT_Entry Mac :"),
- pBTInfo->BtAsocEntry[i].BTRemoteMACAddr)
- RTPRINT(FDM, (DM_PWDB|DM_BT30),
- ("BT rx pwdb[%d] = 0x%x(%d)\n", i,
- pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB,
- pBTInfo->BtAsocEntry[i].UndecoratedSmoothedPWDB));
- }
- }
- if (tmpBTEntryMaxPWDB != 0) { /* If associated entry is found */
- pHalData->dmpriv.BT_EntryMaxUndecoratedSmoothedPWDB = tmpBTEntryMaxPWDB;
- RTPRINT(FDM, (DM_PWDB|DM_BT30), ("BT_EntryMaxPWDB = 0x%x(%d)\n",
- tmpBTEntryMaxPWDB, tmpBTEntryMaxPWDB));
- } else {
- pHalData->dmpriv.BT_EntryMaxUndecoratedSmoothedPWDB = 0;
- }
- if (tmpBTEntryMinPWDB != 0xff) { /* If associated entry is found */
- pHalData->dmpriv.BT_EntryMinUndecoratedSmoothedPWDB = tmpBTEntryMinPWDB;
- RTPRINT(FDM, (DM_PWDB|DM_BT30), ("BT_EntryMinPWDB = 0x%x(%d)\n",
- tmpBTEntryMinPWDB, tmpBTEntryMinPWDB));
- } else {
- pHalData->dmpriv.BT_EntryMinUndecoratedSmoothedPWDB = 0;
- }
- }
-}
-
-u8 BTDM_IsBTBusy(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_mgnt *pBtMgnt = &pBTInfo->BtMgnt;
-
- if (pBtMgnt->ExtConfig.bBTBusy)
- return true;
- else
- return false;
-}
-
-u8 BTDM_IsWifiBusy(struct rtw_adapter *padapter)
-{
-/*PMGNT_INFO pMgntInfo = &GetDefaultAdapter(padapter)->MgntInfo; */
- struct mlme_priv *pmlmepriv = &GetDefaultAdapter(padapter)->mlmepriv;
- struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
- struct bt_traffic *pBtTraffic = &pBTInfo->BtTraffic;
-
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic ||
- pBtTraffic->Bt30TrafficStatistics.bTxBusyTraffic ||
- pBtTraffic->Bt30TrafficStatistics.bRxBusyTraffic)
- return true;
- else
- return false;
-}
-
-u8 BTDM_IsCoexistStateChanged(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (pHalData->bt_coexist.PreviousState == pHalData->bt_coexist.CurrentState)
- return false;
- else
- return true;
-}
-
-u8 BTDM_IsWifiUplink(struct rtw_adapter *padapter)
-{
-/*PMGNT_INFO pMgntInfo = &GetDefaultAdapter(padapter)->MgntInfo; */
- struct mlme_priv *pmlmepriv;
- struct bt_30info *pBTInfo;
- struct bt_traffic *pBtTraffic;
-
- pmlmepriv = &padapter->mlmepriv;
- pBTInfo = GET_BT_INFO(padapter);
- pBtTraffic = &pBTInfo->BtTraffic;
-
- if ((pmlmepriv->LinkDetectInfo.bTxBusyTraffic) ||
- (pBtTraffic->Bt30TrafficStatistics.bTxBusyTraffic))
- return true;
- else
- return false;
-}
-
-u8 BTDM_IsWifiDownlink(struct rtw_adapter *padapter)
-{
-/*PMGNT_INFO pMgntInfo = &GetDefaultAdapter(padapter)->MgntInfo; */
- struct mlme_priv *pmlmepriv;
- struct bt_30info *pBTInfo;
- struct bt_traffic *pBtTraffic;
-
- pmlmepriv = &padapter->mlmepriv;
- pBTInfo = GET_BT_INFO(padapter);
- pBtTraffic = &pBTInfo->BtTraffic;
-
- if ((pmlmepriv->LinkDetectInfo.bRxBusyTraffic) ||
- (pBtTraffic->Bt30TrafficStatistics.bRxBusyTraffic))
- return true;
- else
- return false;
-}
-
-u8 BTDM_IsBTHSMode(struct rtw_adapter *padapter)
-{
-/*PMGNT_INFO pMgntInfo = &GetDefaultAdapter(padapter)->MgntInfo; */
- struct hal_data_8723a *pHalData;
- struct bt_mgnt *pBtMgnt;
-
- pHalData = GET_HAL_DATA(padapter);
- pBtMgnt = &pHalData->BtInfo.BtMgnt;
-
- if (pBtMgnt->BtOperationOn)
- return true;
- else
- return false;
-}
-
-u8 BTDM_IsBTUplink(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (pHalData->bt_coexist.BT21TrafficStatistics.bTxBusyTraffic)
- return true;
- else
- return false;
-}
-
-u8 BTDM_IsBTDownlink(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (pHalData->bt_coexist.BT21TrafficStatistics.bRxBusyTraffic)
- return true;
- else
- return false;
-}
-
-void BTDM_AdjustForBtOperation(struct rtw_adapter *padapter)
-{
- RTPRINT(FBT, BT_TRACE, ("[BT][DM], BTDM_AdjustForBtOperation()\n"));
- BTDM_AdjustForBtOperation8723A(padapter);
-}
-
-void BTDM_SetBtCoexCurrAntNum(struct rtw_adapter *padapter, u8 antNum)
-{
- BTDM_Set8723ABtCoexCurrAntNum(padapter, antNum);
-}
-
-void BTDM_ForHalt(struct rtw_adapter *padapter)
-{
- if (!rtl8723a_BT_coexist(padapter))
- return;
-
- BTDM_ForHalt8723A(padapter);
- GET_HAL_DATA(padapter)->bt_coexist.bInitlized = false;
-}
-
-void BTDM_WifiScanNotify(struct rtw_adapter *padapter, u8 scanType)
-{
- if (!rtl8723a_BT_coexist(padapter))
- return;
-
- BTDM_WifiScanNotify8723A(padapter, scanType);
-}
-
-void BTDM_WifiAssociateNotify(struct rtw_adapter *padapter, u8 action)
-{
- if (!rtl8723a_BT_coexist(padapter))
- return;
-
- BTDM_WifiAssociateNotify8723A(padapter, action);
-}
-
-void rtl8723a_BT_mediastatus_notify(struct rtw_adapter *padapter,
- enum rt_media_status mstatus)
-{
- if (!rtl8723a_BT_coexist(padapter))
- return;
-
- BTDM_MediaStatusNotify8723A(padapter, mstatus);
-}
-
-void rtl8723a_BT_specialpacket_notify(struct rtw_adapter *padapter)
-{
- if (!rtl8723a_BT_coexist(padapter))
- return;
-
- BTDM_ForDhcp8723A(padapter);
-}
-
-void BTDM_ResetActionProfileState(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- pHalData->bt_coexist.CurrentState &= ~\
- (BT_COEX_STATE_PROFILE_HID|BT_COEX_STATE_PROFILE_A2DP|
- BT_COEX_STATE_PROFILE_PAN|BT_COEX_STATE_PROFILE_SCO);
-}
-
-u8 BTDM_IsActionSCO(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
- struct bt_dgb *pBtDbg;
- u8 bRet;
-
- pHalData = GET_HAL_DATA(padapter);
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
- pBtDbg = &pBTInfo->BtDbg;
- bRet = false;
-
- if (pBtDbg->dbgCtrl) {
- if (pBtDbg->dbgProfile == BT_DBG_PROFILE_SCO) {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_SCO;
- bRet = true;
- }
- } else {
- if (pBtMgnt->ExtConfig.NumberOfSCO > 0) {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_SCO;
- bRet = true;
- }
- }
- return bRet;
-}
-
-u8 BTDM_IsActionHID(struct rtw_adapter *padapter)
-{
- struct bt_30info *pBTInfo;
- struct hal_data_8723a *pHalData;
- struct bt_mgnt *pBtMgnt;
- struct bt_dgb *pBtDbg;
- u8 bRet;
-
- pHalData = GET_HAL_DATA(padapter);
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
- pBtDbg = &pBTInfo->BtDbg;
- bRet = false;
-
- if (pBtDbg->dbgCtrl) {
- if (pBtDbg->dbgProfile == BT_DBG_PROFILE_HID) {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_HID;
- bRet = true;
- }
- } else {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
- pBtMgnt->ExtConfig.NumberOfHandle == 1) {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_HID;
- bRet = true;
- }
- }
- return bRet;
-}
-
-u8 BTDM_IsActionA2DP(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
- struct bt_dgb *pBtDbg;
- u8 bRet;
-
- pHalData = GET_HAL_DATA(padapter);
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
- pBtDbg = &pBTInfo->BtDbg;
- bRet = false;
-
- if (pBtDbg->dbgCtrl) {
- if (pBtDbg->dbgProfile == BT_DBG_PROFILE_A2DP) {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_A2DP;
- bRet = true;
- }
- } else {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP) &&
- pBtMgnt->ExtConfig.NumberOfHandle == 1) {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_A2DP;
- bRet = true;
- }
- }
- return bRet;
-}
-
-u8 BTDM_IsActionPAN(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
- struct bt_dgb *pBtDbg;
- u8 bRet;
-
- pHalData = GET_HAL_DATA(padapter);
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
- pBtDbg = &pBTInfo->BtDbg;
- bRet = false;
-
- if (pBtDbg->dbgCtrl) {
- if (pBtDbg->dbgProfile == BT_DBG_PROFILE_PAN) {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_PAN;
- bRet = true;
- }
- } else {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) &&
- pBtMgnt->ExtConfig.NumberOfHandle == 1) {
- pHalData->bt_coexist.CurrentState |= BT_COEX_STATE_PROFILE_PAN;
- bRet = true;
- }
- }
- return bRet;
-}
-
-u8 BTDM_IsActionHIDA2DP(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct bt_30info *pBTInfo;
- struct bt_mgnt *pBtMgnt;
- struct bt_dgb *pBtDbg;
- u8 bRet;
-
- pHalData = GET_HAL_DATA(padapter);
- pBTInfo = GET_BT_INFO(padapter);
- pBtMgnt = &pBTInfo->BtMgnt;
- pBtDbg = &pBTInfo->BtDbg;
- bRet = false;
-
- if (pBtDbg->dbgCtrl) {
- if (pBtDbg->dbgProfile == BT_DBG_PROFILE_HID_A2DP) {
- pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_HID|BT_COEX_STATE_PROFILE_A2DP);
- bRet = true;
- }
- } else {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
- pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_HID|BT_COEX_STATE_PROFILE_A2DP);
- bRet = true;
- }
- }
- return bRet;
-}
-
-u8 BTDM_IsActionHIDPAN(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct bt_30info *pBTInfo;
- struct bt_dgb *pBtDbg;
- u8 bRet;
-
- pHalData = GET_HAL_DATA(padapter);
- pBTInfo = GET_BT_INFO(padapter);
- pBtDbg = &pBTInfo->BtDbg;
- bRet = false;
-
- if (pBtDbg->dbgCtrl) {
- if (pBtDbg->dbgProfile == BT_DBG_PROFILE_HID_PAN) {
- pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_HID|BT_COEX_STATE_PROFILE_PAN);
- bRet = true;
- }
- } else {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_HID) &&
- BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN)) {
- pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_HID|BT_COEX_STATE_PROFILE_PAN);
- bRet = true;
- }
- }
- return bRet;
-}
-
-u8 BTDM_IsActionPANA2DP(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct bt_30info *pBTInfo;
- struct bt_dgb *pBtDbg;
- u8 bRet;
-
- pHalData = GET_HAL_DATA(padapter);
- pBTInfo = GET_BT_INFO(padapter);
- pBtDbg = &pBTInfo->BtDbg;
- bRet = false;
-
- if (pBtDbg->dbgCtrl) {
- if (pBtDbg->dbgProfile == BT_DBG_PROFILE_PAN_A2DP) {
- pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_PAN|BT_COEX_STATE_PROFILE_A2DP);
- bRet = true;
- }
- } else {
- if (BTHCI_CheckProfileExist(padapter, BT_PROFILE_PAN) && BTHCI_CheckProfileExist(padapter, BT_PROFILE_A2DP)) {
- pHalData->bt_coexist.CurrentState |= (BT_COEX_STATE_PROFILE_PAN|BT_COEX_STATE_PROFILE_A2DP);
- bRet = true;
- }
- }
- return bRet;
-}
-
-bool rtl8723a_BT_enabled(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (pHalData->bt_coexist.bCurBtDisabled)
- return false;
- else
- return true;
-}
-
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtCoexist.c ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/HalBT.c ===== */
-
-/* */
-/*local function */
-/* */
-
-static void halbt_InitHwConfig8723A(struct rtw_adapter *padapter)
-{
-}
-
-/* */
-/*extern function */
-/* */
-u8 HALBT_GetPGAntNum(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- return pHalData->bt_coexist.BT_Ant_Num;
-}
-
-void HALBT_SetKey(struct rtw_adapter *padapter, u8 EntryNum)
-{
- struct bt_30info *pBTinfo;
- struct bt_asoc_entry *pBtAssocEntry;
- u16 usConfig = 0;
-
- pBTinfo = GET_BT_INFO(padapter);
- pBtAssocEntry = &pBTinfo->BtAsocEntry[EntryNum];
-
- pBtAssocEntry->HwCAMIndex = BT_HWCAM_STAR + EntryNum;
-
- usConfig = CAM_VALID | (CAM_AES << 2);
- rtl8723a_cam_write(padapter, pBtAssocEntry->HwCAMIndex, usConfig,
- pBtAssocEntry->BTRemoteMACAddr,
- pBtAssocEntry->PTK + TKIP_ENC_KEY_POS);
-}
-
-void HALBT_RemoveKey(struct rtw_adapter *padapter, u8 EntryNum)
-{
- struct bt_30info *pBTinfo;
- struct bt_asoc_entry *pBtAssocEntry;
-
- pBTinfo = GET_BT_INFO(padapter);
- pBtAssocEntry = &pBTinfo->BtAsocEntry[EntryNum];
-
- if (pBTinfo->BtAsocEntry[EntryNum].HwCAMIndex != 0) {
- /* ToDo : add New HALBT_RemoveKey function !! */
- if (pBtAssocEntry->HwCAMIndex >= BT_HWCAM_STAR &&
- pBtAssocEntry->HwCAMIndex < HALF_CAM_ENTRY)
- rtl8723a_cam_empty_entry(padapter,
- pBtAssocEntry->HwCAMIndex);
- pBTinfo->BtAsocEntry[EntryNum].HwCAMIndex = 0;
- }
-}
-
-void rtl8723a_BT_init_hal_vars(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
-
- pHalData = GET_HAL_DATA(padapter);
-
- pHalData->bt_coexist.BluetoothCoexist = pHalData->EEPROMBluetoothCoexist;
- pHalData->bt_coexist.BT_Ant_Num = pHalData->EEPROMBluetoothAntNum;
- pHalData->bt_coexist.BT_CoexistType = pHalData->EEPROMBluetoothType;
- pHalData->bt_coexist.BT_Ant_isolation = pHalData->EEPROMBluetoothAntIsolation;
- pHalData->bt_coexist.bt_radiosharedtype = pHalData->EEPROMBluetoothRadioShared;
-
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "BT Coexistance = 0x%x\n", rtl8723a_BT_coexist(padapter));
-
- if (rtl8723a_BT_coexist(padapter)) {
- if (pHalData->bt_coexist.BT_Ant_Num == Ant_x2) {
- BTDM_SetBtCoexCurrAntNum(padapter, 2);
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "BlueTooth BT_Ant_Num = Antx2\n");
- } else if (pHalData->bt_coexist.BT_Ant_Num == Ant_x1) {
- BTDM_SetBtCoexCurrAntNum(padapter, 1);
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "BlueTooth BT_Ant_Num = Antx1\n");
- }
- pHalData->bt_coexist.bBTBusyTraffic = false;
- pHalData->bt_coexist.bBTTrafficModeSet = false;
- pHalData->bt_coexist.bBTNonTrafficModeSet = false;
- pHalData->bt_coexist.CurrentState = 0;
- pHalData->bt_coexist.PreviousState = 0;
-
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "bt_radiosharedType = 0x%x\n",
- pHalData->bt_coexist.bt_radiosharedtype);
- }
-}
-
-bool rtl8723a_BT_coexist(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (pHalData->bt_coexist.BluetoothCoexist)
- return true;
- else
- return false;
-}
-
-u8 HALBT_BTChipType(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- return pHalData->bt_coexist.BT_CoexistType;
-}
-
-void rtl8723a_BT_init_hwconfig(struct rtw_adapter *padapter)
-{
- halbt_InitHwConfig8723A(padapter);
- rtl8723a_BT_do_coexist(padapter);
-}
-
-void HALBT_SetRtsCtsNoLenLimit(struct rtw_adapter *padapter)
-{
-}
-
-/* ===== End of sync from SD7 driver HAL/HalBT.c ===== */
-
-void rtl8723a_dual_antenna_detection(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct dm_odm_t *pDM_Odm;
- struct sw_ant_sw *pDM_SWAT_Table;
- u8 i;
-
- pHalData = GET_HAL_DATA(padapter);
- pDM_Odm = &pHalData->odmpriv;
- pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
-
- /* */
- /* <Roger_Notes> RTL8723A Single and Dual antenna dynamic detection
- mechanism when RF power state is on. */
- /* We should take power tracking, IQK, LCK, RCK RF read/write
- operation into consideration. */
- /* 2011.12.15. */
- /* */
- if (!pHalData->bAntennaDetected) {
- u8 btAntNum = BT_GetPGAntNum(padapter);
-
- /* Set default antenna B status */
- if (btAntNum == Ant_x2)
- pDM_SWAT_Table->ANTB_ON = true;
- else if (btAntNum == Ant_x1)
- pDM_SWAT_Table->ANTB_ON = false;
- else
- pDM_SWAT_Table->ANTB_ON = true;
-
- if (pHalData->CustomerID != RT_CID_TOSHIBA) {
- for (i = 0; i < MAX_ANTENNA_DETECTION_CNT; i++) {
- if (ODM_SingleDualAntennaDetection
- (&pHalData->odmpriv, ANTTESTALL) == true)
- break;
- }
-
- /* Set default antenna number for BT coexistence */
- if (btAntNum == Ant_x2)
- BT_SetBtCoexCurrAntNum(padapter,
- pDM_SWAT_Table->
- ANTB_ON ? 2 : 1);
- }
- pHalData->bAntennaDetected = true;
- }
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
deleted file mode 100644
index 2230f4c539ec..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
+++ /dev/null
@@ -1,755 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTL8723A_CMD_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <recv_osdep.h>
-#include <mlme_osdep.h>
-#include <rtl8723a_hal.h>
-#include <usb_ops_linux.h>
-
-#define RTL92C_MAX_H2C_BOX_NUMS 4
-#define RTL92C_MAX_CMD_LEN 5
-#define MESSAGE_BOX_SIZE 4
-#define EX_MESSAGE_BOX_SIZE 2
-
-static u8 _is_fw_read_cmd_down(struct rtw_adapter *padapter, u8 msgbox_num)
-{
- u8 read_down = false;
- int retry_cnts = 100;
- u8 valid;
-
- do {
- valid = rtl8723au_read8(padapter, REG_HMETFR) & BIT(msgbox_num);
- if (0 == valid)
- read_down = true;
- } while ((!read_down) && (retry_cnts--));
-
- return read_down;
-}
-
-/*****************************************
-* H2C Msg format :
-*| 31 - 8 |7 | 6 - 0 |
-*| h2c_msg |Ext_bit |CMD_ID |
-*
-******************************************/
-int FillH2CCmd(struct rtw_adapter *padapter, u8 ElementID, u32 CmdLen,
- u8 *pCmdBuffer)
-{
- u8 bcmd_down = false;
- s32 retry_cnts = 100;
- u8 h2c_box_num;
- u32 msgbox_addr;
- u32 msgbox_ex_addr;
- struct hal_data_8723a *pHalData;
- u32 h2c_cmd = 0;
- u16 h2c_cmd_ex = 0;
- int ret = _FAIL;
-
- padapter = GET_PRIMARY_ADAPTER(padapter);
- pHalData = GET_HAL_DATA(padapter);
-
- mutex_lock(&adapter_to_dvobj(padapter)->h2c_fwcmd_mutex);
-
- if (!pCmdBuffer)
- goto exit;
- if (CmdLen > RTL92C_MAX_CMD_LEN)
- goto exit;
- if (padapter->bSurpriseRemoved == true)
- goto exit;
-
- /* pay attention to if race condition happened in H2C cmd setting. */
- do {
- h2c_box_num = pHalData->LastHMEBoxNum;
-
- if (!_is_fw_read_cmd_down(padapter, h2c_box_num)) {
- DBG_8723A(" fw read cmd failed...\n");
- goto exit;
- }
-
- if (CmdLen <= 3) {
- memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, CmdLen);
- } else {
- memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer, EX_MESSAGE_BOX_SIZE);
- memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer+2, (CmdLen-EX_MESSAGE_BOX_SIZE));
- *(u8 *)(&h2c_cmd) |= BIT(7);
- }
-
- *(u8 *)(&h2c_cmd) |= ElementID;
-
- if (h2c_cmd & BIT(7)) {
- msgbox_ex_addr = REG_HMEBOX_EXT_0 + (h2c_box_num * EX_MESSAGE_BOX_SIZE);
- h2c_cmd_ex = le16_to_cpu(h2c_cmd_ex);
- rtl8723au_write16(padapter, msgbox_ex_addr, h2c_cmd_ex);
- }
- msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * MESSAGE_BOX_SIZE);
- h2c_cmd = le32_to_cpu(h2c_cmd);
- rtl8723au_write32(padapter, msgbox_addr, h2c_cmd);
-
- bcmd_down = true;
-
- pHalData->LastHMEBoxNum = (h2c_box_num+1) % RTL92C_MAX_H2C_BOX_NUMS;
-
- } while ((!bcmd_down) && (retry_cnts--));
-
- ret = _SUCCESS;
-
-exit:
- mutex_unlock(&adapter_to_dvobj(padapter)->h2c_fwcmd_mutex);
- return ret;
-}
-
-int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u32 param)
-{
- __le32 cmd = cpu_to_le32(param);
-
- FillH2CCmd(padapter, RSSI_SETTING_EID, 3, (void *)&cmd);
-
- return _SUCCESS;
-}
-
-int rtl8723a_set_raid_cmd(struct rtw_adapter *padapter, u32 mask, u8 arg)
-{
- u8 buf[5];
-
- memset(buf, 0, 5);
- put_unaligned_le32(mask, buf);
- buf[4] = arg;
-
- FillH2CCmd(padapter, MACID_CONFIG_EID, 5, buf);
-
- return _SUCCESS;
-}
-
-/* bitmap[0:27] = tx_rate_bitmap */
-/* bitmap[28:31]= Rate Adaptive id */
-/* arg[0:4] = macid */
-/* arg[5] = Short GI */
-void rtl8723a_add_rateatid(struct rtw_adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_level)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
- u8 macid = arg & 0x1f;
- u32 raid = bitmap & 0xf0000000;
-
- bitmap &= 0x0fffffff;
- if (rssi_level != DM_RATR_STA_INIT)
- bitmap = ODM_Get_Rate_Bitmap23a(pHalData, macid, bitmap,
- rssi_level);
-
- bitmap |= raid;
-
- rtl8723a_set_raid_cmd(pAdapter, bitmap, arg);
-}
-
-void rtl8723a_set_FwPwrMode_cmd(struct rtw_adapter *padapter, u8 Mode)
-{
- struct setpwrmode_parm H2CSetPwrMode;
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- DBG_8723A("%s: Mode =%d SmartPS =%d UAPSD =%d BcnMode = 0x%02x\n", __func__,
- Mode, pwrpriv->smart_ps, padapter->registrypriv.uapsd_enable, pwrpriv->bcn_ant_mode);
-
- /* Forece leave RF low power mode for 1T1R to
- prevent conficting setting in Fw power */
- /* saving sequence. 2010.06.07. Added by tynli.
- Suggested by SD3 yschang. */
- if (Mode != PS_MODE_ACTIVE && pHalData->rf_type != RF_2T2R)
- ODM_RF_Saving23a(&pHalData->odmpriv, true);
-
- H2CSetPwrMode.Mode = Mode;
- H2CSetPwrMode.SmartPS = pwrpriv->smart_ps;
- H2CSetPwrMode.AwakeInterval = 1;
- H2CSetPwrMode.bAllQueueUAPSD = padapter->registrypriv.uapsd_enable;
- H2CSetPwrMode.BcnAntMode = pwrpriv->bcn_ant_mode;
-
- FillH2CCmd(padapter, SET_PWRMODE_EID, sizeof(H2CSetPwrMode), (u8 *)&H2CSetPwrMode);
-
-}
-
-static void
-ConstructBeacon(struct rtw_adapter *padapter, u8 *pframe, u32 *pLength)
-{
- struct ieee80211_mgmt *mgmt;
- u32 rate_len, pktlen;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- /* DBG_8723A("%s\n", __func__); */
-
- mgmt = (struct ieee80211_mgmt *)pframe;
-
- mgmt->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
-
- ether_addr_copy(mgmt->da, bc_addr);
- ether_addr_copy(mgmt->sa, myid(&padapter->eeprompriv));
- ether_addr_copy(mgmt->bssid, get_my_bssid23a(cur_network));
-
- /* A Beacon frame shouldn't have fragment bits set */
- mgmt->seq_ctrl = 0;
-
- /* timestamp will be inserted by hardware */
-
- put_unaligned_le16(cur_network->beacon_interval,
- &mgmt->u.beacon.beacon_int);
-
- put_unaligned_le16(cur_network->capability,
- &mgmt->u.beacon.capab_info);
-
- pframe = mgmt->u.beacon.variable;
- pktlen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
-
- if ((pmlmeinfo->state&0x03) == MSR_AP) {
- /* DBG_8723A("ie len =%d\n", cur_network->IELength); */
- pktlen += cur_network->IELength;
- memcpy(pframe, cur_network->IEs, pktlen);
-
- goto _ConstructBeacon;
- }
-
- /* below for ad-hoc mode */
-
- /* SSID */
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SSID,
- cur_network->Ssid.ssid_len,
- cur_network->Ssid.ssid, &pktlen);
-
- /* supported rates... */
- rate_len = rtw_get_rateset_len23a(cur_network->SupportedRates);
- pframe = rtw_set_ie23a(pframe, WLAN_EID_SUPP_RATES, ((rate_len > 8) ?
- 8 : rate_len), cur_network->SupportedRates, &pktlen);
-
- /* DS parameter set */
- pframe = rtw_set_ie23a(pframe, WLAN_EID_DS_PARAMS, 1, (unsigned char *)
- &cur_network->DSConfig, &pktlen);
-
- if ((pmlmeinfo->state&0x03) == MSR_ADHOC) {
- u32 ATIMWindow;
- /* IBSS Parameter Set... */
- /* ATIMWindow = cur->ATIMWindow; */
- ATIMWindow = 0;
- pframe = rtw_set_ie23a(pframe, WLAN_EID_IBSS_PARAMS, 2,
- (unsigned char *)&ATIMWindow, &pktlen);
- }
-
- /* todo: ERP IE */
-
- /* EXTERNDED SUPPORTED RATE */
- if (rate_len > 8)
- pframe = rtw_set_ie23a(pframe, WLAN_EID_EXT_SUPP_RATES,
- (rate_len - 8),
- (cur_network->SupportedRates + 8),
- &pktlen);
-
- /* todo:HT for adhoc */
-
-_ConstructBeacon:
-
- if ((pktlen + TXDESC_SIZE) > 512) {
- DBG_8723A("beacon frame too large\n");
- return;
- }
-
- *pLength = pktlen;
-
- /* DBG_8723A("%s bcn_sz =%d\n", __func__, pktlen); */
-
-}
-
-static void ConstructPSPoll(struct rtw_adapter *padapter,
- u8 *pframe, u32 *pLength)
-{
- struct ieee80211_hdr *pwlanhdr;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- /* Frame control. */
- pwlanhdr->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
- pwlanhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
-
- /* AID. */
- pwlanhdr->duration_id = cpu_to_le16(pmlmeinfo->aid | 0xc000);
-
- /* BSSID. */
- memcpy(pwlanhdr->addr1, get_my_bssid23a(&pmlmeinfo->network), ETH_ALEN);
-
- /* TA. */
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
-
- *pLength = 16;
-}
-
-static void
-ConstructNullFunctionData(struct rtw_adapter *padapter, u8 *pframe,
- u32 *pLength, u8 *StaAddr, u8 bQoS, u8 AC,
- u8 bEosp, u8 bForcePowerSave)
-{
- struct ieee80211_hdr *pwlanhdr;
- u32 pktlen;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- pwlanhdr = (struct ieee80211_hdr *)pframe;
-
- pwlanhdr->frame_control = 0;
- pwlanhdr->seq_ctrl = 0;
-
- if (bForcePowerSave)
- pwlanhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
-
- switch (cur_network->network.ifmode) {
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_STATION:
- pwlanhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_TODS);
- memcpy(pwlanhdr->addr1,
- get_my_bssid23a(&pmlmeinfo->network), ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv),
- ETH_ALEN);
- memcpy(pwlanhdr->addr3, StaAddr, ETH_ALEN);
- break;
- case NL80211_IFTYPE_P2P_GO:
- case NL80211_IFTYPE_AP:
- pwlanhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
- memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2,
- get_my_bssid23a(&pmlmeinfo->network), ETH_ALEN);
- memcpy(pwlanhdr->addr3, myid(&padapter->eeprompriv),
- ETH_ALEN);
- break;
- case NL80211_IFTYPE_ADHOC:
- default:
- memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3,
- get_my_bssid23a(&pmlmeinfo->network), ETH_ALEN);
- break;
- }
-
- if (bQoS == true) {
- struct ieee80211_qos_hdr *qoshdr;
- qoshdr = (struct ieee80211_qos_hdr *)pframe;
-
- qoshdr->frame_control |=
- cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_QOS_NULLFUNC);
-
- qoshdr->qos_ctrl = cpu_to_le16(AC & IEEE80211_QOS_CTL_TID_MASK);
- if (bEosp)
- qoshdr->qos_ctrl |= cpu_to_le16(IEEE80211_QOS_CTL_EOSP);
-
- pktlen = sizeof(struct ieee80211_qos_hdr);
- } else {
- pwlanhdr->frame_control |=
- cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC);
-
- pktlen = sizeof(struct ieee80211_hdr_3addr);
- }
-
- *pLength = pktlen;
-}
-
-static void ConstructProbeRsp(struct rtw_adapter *padapter, u8 *pframe,
- u32 *pLength, u8 *StaAddr, bool bHideSSID)
-{
- struct ieee80211_mgmt *mgmt;
- u8 *mac, *bssid;
- u32 pktlen;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
-
- /* DBG_8723A("%s\n", __func__); */
-
- mgmt = (struct ieee80211_mgmt *)pframe;
-
- mac = myid(&padapter->eeprompriv);
- bssid = cur_network->MacAddress;
-
- mgmt->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
-
- mgmt->seq_ctrl = 0;
-
- memcpy(mgmt->da, StaAddr, ETH_ALEN);
- memcpy(mgmt->sa, mac, ETH_ALEN);
- memcpy(mgmt->bssid, bssid, ETH_ALEN);
-
- put_unaligned_le64(cur_network->tsf,
- &mgmt->u.probe_resp.timestamp);
- put_unaligned_le16(cur_network->beacon_interval,
- &mgmt->u.probe_resp.beacon_int);
- put_unaligned_le16(cur_network->capability,
- &mgmt->u.probe_resp.capab_info);
-
- pktlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-
- if (cur_network->IELength > MAX_IE_SZ)
- return;
-
- memcpy(mgmt->u.probe_resp.variable, cur_network->IEs,
- cur_network->IELength);
- pktlen += (cur_network->IELength);
-
- *pLength = pktlen;
-}
-
-/* */
-/* Description: Fill the reserved packets that FW will use to RSVD page. */
-/* Now we just send 4 types packet to rsvd page. */
-/* (1)Beacon, (2)Ps-poll, (3)Null data, (4)ProbeRsp. */
-/* Input: */
-/* bDLFinished - false: At the first time we will send all the packets as a large packet to Hw, */
-/* so we need to set the packet length to total lengh. */
-/* true: At the second time, we should send the first packet (default:beacon) */
-/* to Hw again and set the lengh in descriptor to the real beacon lengh. */
-/* 2009.10.15 by tynli. */
-static void SetFwRsvdPagePkt(struct rtw_adapter *padapter, bool bDLFinished)
-{
- struct hal_data_8723a *pHalData;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- struct xmit_priv *pxmitpriv;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
- u32 BeaconLength = 0, ProbeRspLength = 0, PSPollLength;
- u32 NullDataLength, QosNullLength, BTQosNullLength;
- u8 *ReservedPagePacket;
- u8 PageNum, PageNeed, TxDescLen;
- u16 BufIndex;
- u32 TotalPacketLen;
- struct rsvdpage_loc RsvdPageLoc;
-
- DBG_8723A("%s\n", __func__);
-
- ReservedPagePacket = kzalloc(1000, GFP_KERNEL);
- if (ReservedPagePacket == NULL) {
- DBG_8723A("%s: alloc ReservedPagePacket fail!\n", __func__);
- return;
- }
-
- pHalData = GET_HAL_DATA(padapter);
- pxmitpriv = &padapter->xmitpriv;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
-
- TxDescLen = TXDESC_SIZE;
- PageNum = 0;
-
- /* 3 (1) beacon */
- BufIndex = TXDESC_OFFSET;
- ConstructBeacon(padapter, &ReservedPagePacket[BufIndex], &BeaconLength);
-
- /* When we count the first page size, we need to reserve description size for the RSVD */
- /* packet, it will be filled in front of the packet in TXPKTBUF. */
- PageNeed = (u8)PageNum_128(TxDescLen + BeaconLength);
- /* To reserved 2 pages for beacon buffer. 2010.06.24. */
- if (PageNeed == 1)
- PageNeed += 1;
- PageNum += PageNeed;
- pHalData->FwRsvdPageStartOffset = PageNum;
-
- BufIndex += PageNeed*128;
-
- /* 3 (2) ps-poll */
- RsvdPageLoc.LocPsPoll = PageNum;
- ConstructPSPoll(padapter, &ReservedPagePacket[BufIndex], &PSPollLength);
- rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], PSPollLength, true, false);
-
- PageNeed = (u8)PageNum_128(TxDescLen + PSPollLength);
- PageNum += PageNeed;
-
- BufIndex += PageNeed*128;
-
- /* 3 (3) null data */
- RsvdPageLoc.LocNullData = PageNum;
- ConstructNullFunctionData(padapter, &ReservedPagePacket[BufIndex],
- &NullDataLength,
- get_my_bssid23a(&pmlmeinfo->network),
- false, 0, 0, false);
- rtl8723a_fill_fake_txdesc(padapter,
- &ReservedPagePacket[BufIndex-TxDescLen],
- NullDataLength, false, false);
-
- PageNeed = (u8)PageNum_128(TxDescLen + NullDataLength);
- PageNum += PageNeed;
-
- BufIndex += PageNeed*128;
-
- /* 3 (4) probe response */
- RsvdPageLoc.LocProbeRsp = PageNum;
- ConstructProbeRsp(
- padapter,
- &ReservedPagePacket[BufIndex],
- &ProbeRspLength,
- get_my_bssid23a(&pmlmeinfo->network),
- false);
- rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], ProbeRspLength, false, false);
-
- PageNeed = (u8)PageNum_128(TxDescLen + ProbeRspLength);
- PageNum += PageNeed;
-
- BufIndex += PageNeed*128;
-
- /* 3 (5) Qos null data */
- RsvdPageLoc.LocQosNull = PageNum;
- ConstructNullFunctionData(
- padapter,
- &ReservedPagePacket[BufIndex],
- &QosNullLength,
- get_my_bssid23a(&pmlmeinfo->network),
- true, 0, 0, false);
- rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], QosNullLength, false, false);
-
- PageNeed = (u8)PageNum_128(TxDescLen + QosNullLength);
- PageNum += PageNeed;
-
- BufIndex += PageNeed*128;
-
- /* 3 (6) BT Qos null data */
- RsvdPageLoc.LocBTQosNull = PageNum;
- ConstructNullFunctionData(
- padapter,
- &ReservedPagePacket[BufIndex],
- &BTQosNullLength,
- get_my_bssid23a(&pmlmeinfo->network),
- true, 0, 0, false);
- rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], BTQosNullLength, false, true);
-
- TotalPacketLen = BufIndex + BTQosNullLength;
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (pmgntframe == NULL)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
- pattrib->qsel = 0x10;
- pattrib->pktlen = pattrib->last_txcmdsz = TotalPacketLen - TXDESC_OFFSET;
- memcpy(pmgntframe->buf_addr, ReservedPagePacket, TotalPacketLen);
-
- rtl8723au_mgnt_xmit(padapter, pmgntframe);
-
- DBG_8723A("%s: Set RSVD page location to Fw\n", __func__);
- FillH2CCmd(padapter, RSVD_PAGE_EID, sizeof(RsvdPageLoc), (u8 *)&RsvdPageLoc);
-
-exit:
- kfree(ReservedPagePacket);
-}
-
-void rtl8723a_set_FwJoinBssReport_cmd(struct rtw_adapter *padapter, u8 mstatus)
-{
- struct joinbssrpt_parm JoinBssRptParm;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- DBG_8723A("%s mstatus(%x)\n", __func__, mstatus);
-
- if (mstatus == 1) {
- bool bRecover = false;
- u8 v8;
-
- /* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */
- /* Suggested by filen. Added by tynli. */
- rtl8723au_write16(padapter, REG_BCN_PSR_RPT,
- 0xC000|pmlmeinfo->aid);
- /* Do not set TSF again here or vWiFi beacon DMA INT will not work. */
- /* correct_TSF23a(padapter, pmlmeext); */
- /* Hw sequende enable by dedault. 2010.06.23. by tynli. */
- /* rtl8723au_write16(padapter, REG_NQOS_SEQ, ((pmlmeext->mgnt_seq+100)&0xFFF)); */
- /* rtl8723au_write8(padapter, REG_HWSEQ_CTRL, 0xFF); */
-
- /* set REG_CR bit 8 */
- v8 = rtl8723au_read8(padapter, REG_CR+1);
- v8 |= BIT(0); /* ENSWBCN */
- rtl8723au_write8(padapter, REG_CR+1, v8);
-
- /* Disable Hw protection for a time which revserd for Hw sending beacon. */
- /* Fix download reserved page packet fail that access collision with the protection time. */
- /* 2010.05.11. Added by tynli. */
-/* SetBcnCtrlReg23a(padapter, 0, BIT(3)); */
-/* SetBcnCtrlReg23a(padapter, BIT(4), 0); */
- SetBcnCtrlReg23a(padapter, BIT(4), BIT(3));
-
- /* Set FWHW_TXQ_CTRL 0x422[6]= 0 to tell Hw the packet is not a real beacon frame. */
- if (pHalData->RegFwHwTxQCtrl & BIT(6))
- bRecover = true;
-
- /* To tell Hw the packet is not a real beacon frame. */
- /* U1bTmp = rtl8723au_read8(padapter, REG_FWHW_TXQ_CTRL+2); */
- rtl8723au_write8(padapter, REG_FWHW_TXQ_CTRL + 2,
- pHalData->RegFwHwTxQCtrl & ~BIT(6));
- pHalData->RegFwHwTxQCtrl &= ~BIT(6);
- SetFwRsvdPagePkt(padapter, 0);
-
- /* 2010.05.11. Added by tynli. */
- SetBcnCtrlReg23a(padapter, BIT(3), BIT(4));
-
- /* To make sure that if there exists an adapter which would like to send beacon. */
- /* If exists, the origianl value of 0x422[6] will be 1, we should check this to */
- /* prevent from setting 0x422[6] to 0 after download reserved page, or it will cause */
- /* the beacon cannot be sent by HW. */
- /* 2010.06.23. Added by tynli. */
- if (bRecover) {
- rtl8723au_write8(padapter, REG_FWHW_TXQ_CTRL + 2,
- pHalData->RegFwHwTxQCtrl | BIT(6));
- pHalData->RegFwHwTxQCtrl |= BIT(6);
- }
-
- /* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
- v8 = rtl8723au_read8(padapter, REG_CR+1);
- v8 &= ~BIT(0); /* ~ENSWBCN */
- rtl8723au_write8(padapter, REG_CR+1, v8);
- }
-
- JoinBssRptParm.OpMode = mstatus;
-
- FillH2CCmd(padapter, JOINBSS_RPT_EID, sizeof(JoinBssRptParm), (u8 *)&JoinBssRptParm);
-
-}
-
-#ifdef CONFIG_8723AU_BT_COEXIST
-static void SetFwRsvdPagePkt_BTCoex(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- struct xmit_priv *pxmitpriv;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
- u8 fakemac[6] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x00};
- u32 NullDataLength, BTQosNullLength;
- u8 *ReservedPagePacket;
- u8 PageNum, PageNeed, TxDescLen;
- u16 BufIndex;
- u32 TotalPacketLen;
- struct rsvdpage_loc RsvdPageLoc;
-
- DBG_8723A("+%s\n", __func__);
-
- ReservedPagePacket = kzalloc(1024, GFP_KERNEL);
- if (ReservedPagePacket == NULL) {
- DBG_8723A("%s: alloc ReservedPagePacket fail!\n", __func__);
- return;
- }
-
- pHalData = GET_HAL_DATA(padapter);
- pxmitpriv = &padapter->xmitpriv;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
-
- TxDescLen = TXDESC_SIZE;
- PageNum = 0;
-
- /* 3 (1) beacon */
- BufIndex = TXDESC_OFFSET;
- /* skip Beacon Packet */
- PageNeed = 3;
-
- PageNum += PageNeed;
- pHalData->FwRsvdPageStartOffset = PageNum;
-
- BufIndex += PageNeed*128;
-
- /* 3 (3) null data */
- RsvdPageLoc.LocNullData = PageNum;
- ConstructNullFunctionData(
- padapter,
- &ReservedPagePacket[BufIndex],
- &NullDataLength,
- fakemac,
- false, 0, 0, false);
- rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], NullDataLength, false, false);
-
- PageNeed = (u8)PageNum_128(TxDescLen + NullDataLength);
- PageNum += PageNeed;
-
- BufIndex += PageNeed*128;
-
- /* 3 (6) BT Qos null data */
- RsvdPageLoc.LocBTQosNull = PageNum;
- ConstructNullFunctionData(
- padapter,
- &ReservedPagePacket[BufIndex],
- &BTQosNullLength,
- fakemac,
- true, 0, 0, false);
- rtl8723a_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], BTQosNullLength, false, true);
-
- TotalPacketLen = BufIndex + BTQosNullLength;
-
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (pmgntframe == NULL)
- goto exit;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
- pattrib->qsel = 0x10;
- pattrib->pktlen = pattrib->last_txcmdsz = TotalPacketLen - TXDESC_OFFSET;
- memcpy(pmgntframe->buf_addr, ReservedPagePacket, TotalPacketLen);
-
- rtl8723au_mgnt_xmit(padapter, pmgntframe);
-
- DBG_8723A("%s: Set RSVD page location to Fw\n", __func__);
- FillH2CCmd(padapter, RSVD_PAGE_EID, sizeof(RsvdPageLoc), (u8 *)&RsvdPageLoc);
-
-exit:
- kfree(ReservedPagePacket);
-}
-
-void rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- u8 bRecover = false;
-
- DBG_8723A("+%s\n", __func__);
-
- pHalData = GET_HAL_DATA(padapter);
-
- /* Set FWHW_TXQ_CTRL 0x422[6]= 0 to tell Hw the packet is not a real beacon frame. */
- if (pHalData->RegFwHwTxQCtrl & BIT(6))
- bRecover = true;
-
- /* To tell Hw the packet is not a real beacon frame. */
- pHalData->RegFwHwTxQCtrl &= ~BIT(6);
- rtl8723au_write8(padapter, REG_FWHW_TXQ_CTRL + 2,
- pHalData->RegFwHwTxQCtrl);
- SetFwRsvdPagePkt_BTCoex(padapter);
-
- /* To make sure that if there exists an adapter which would like to send beacon. */
- /* If exists, the origianl value of 0x422[6] will be 1, we should check this to */
- /* prevent from setting 0x422[6] to 0 after download reserved page, or it will cause */
- /* the beacon cannot be sent by HW. */
- /* 2010.06.23. Added by tynli. */
- if (bRecover) {
- pHalData->RegFwHwTxQCtrl |= BIT(6);
- rtl8723au_write8(padapter, REG_FWHW_TXQ_CTRL + 2,
- pHalData->RegFwHwTxQCtrl);
- }
-}
-#endif
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_dm.c b/drivers/staging/rtl8723au/hal/rtl8723a_dm.c
deleted file mode 100644
index 1e831f2d1caf..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723a_dm.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-/* */
-/* Description: */
-/* */
-/* This file is for 92CE/92CU dynamic mechanism only */
-/* */
-/* */
-/* */
-#define _RTL8723A_DM_C_
-
-/* */
-/* include files */
-/* */
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#include <rtl8723a_hal.h>
-#include <usb_ops_linux.h>
-
-/* */
-/* Global var */
-/* */
-
-static void dm_CheckPbcGPIO(struct rtw_adapter *padapter)
-{
- u8 tmp1byte;
- u8 bPbcPressed = false;
-
- if (!padapter->registrypriv.hw_wps_pbc)
- return;
-
- tmp1byte = rtl8723au_read8(padapter, GPIO_IO_SEL);
- tmp1byte |= (HAL_8192C_HW_GPIO_WPS_BIT);
- /* enable GPIO[2] as output mode */
- rtl8723au_write8(padapter, GPIO_IO_SEL, tmp1byte);
-
- tmp1byte &= ~(HAL_8192C_HW_GPIO_WPS_BIT);
- /* reset the floating voltage level */
- rtl8723au_write8(padapter, GPIO_IN, tmp1byte);
-
- tmp1byte = rtl8723au_read8(padapter, GPIO_IO_SEL);
- tmp1byte &= ~(HAL_8192C_HW_GPIO_WPS_BIT);
- /* enable GPIO[2] as input mode */
- rtl8723au_write8(padapter, GPIO_IO_SEL, tmp1byte);
-
- tmp1byte = rtl8723au_read8(padapter, GPIO_IN);
-
- if (tmp1byte == 0xff)
- return;
-
- if (tmp1byte&HAL_8192C_HW_GPIO_WPS_BIT)
- bPbcPressed = true;
-
- if (bPbcPressed) {
- /* Here we only set bPbcPressed to true */
- /* After trigger PBC, the variable will be set to false */
- DBG_8723A("CheckPbcGPIO - PBC is pressed\n");
-
- if (padapter->pid[0] == 0) {
- /* 0 is the default value and it means the application
- * monitors the HW PBC doesn't privde its pid to driver.
- */
- return;
- }
-
- kill_pid(find_vpid(padapter->pid[0]), SIGUSR1, 1);
- }
-}
-
-/* Initialize GPIO setting registers */
-/* functions */
-
-void rtl8723a_init_dm_priv(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- u8 cut_ver, fab_ver;
-
- memset(pdmpriv, 0, sizeof(struct dm_priv));
- memset(pDM_Odm, 0, sizeof(*pDM_Odm));
-
- pDM_Odm->Adapter = Adapter;
-
- ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_IC_TYPE, ODM_RTL8723A);
-
- if (IS_8723A_A_CUT(pHalData->VersionID)) {
- fab_ver = ODM_UMC;
- cut_ver = ODM_CUT_A;
- } else if (IS_8723A_B_CUT(pHalData->VersionID)) {
- fab_ver = ODM_UMC;
- cut_ver = ODM_CUT_B;
- } else {
- fab_ver = ODM_TSMC;
- cut_ver = ODM_CUT_A;
- }
- ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_FAB_VER, fab_ver);
- ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_CUT_VER, cut_ver);
- ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_MP_TEST_CHIP, IS_NORMAL_CHIP(pHalData->VersionID));
-
- ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_BOARD_TYPE, pHalData->BoardType);
-
- if (pHalData->BoardType == BOARD_USB_High_PA) {
- ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_EXT_LNA, true);
- ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_EXT_PA, true);
- }
- ODM_CmnInfoInit23a(pDM_Odm, ODM_CMNINFO_BWIFI_TEST, Adapter->registrypriv.wifi_spec);
-}
-
-static void Update_ODM_ComInfo_8723a(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- int i;
- pdmpriv->InitODMFlag = 0;
- /* Pointer reference */
- rtl8723a_odm_support_ability_set(Adapter, DYNAMIC_ALL_FUNC_ENABLE);
-
- for (i = 0; i < NUM_STA; i++)
- ODM_CmnInfoPtrArrayHook23a(pDM_Odm, ODM_CMNINFO_STA_STATUS, i, NULL);
-}
-
-void rtl8723a_InitHalDm(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- u8 i;
-
- Update_ODM_ComInfo_8723a(Adapter);
- ODM23a_DMInit(pDM_Odm);
- /* Save REG_INIDATA_RATE_SEL value for TXDESC. */
- for (i = 0; i < 32; i++)
- pdmpriv->INIDATA_RATE[i] = rtl8723au_read8(Adapter, REG_INIDATA_RATE_SEL+i) & 0x3f;
-}
-
-void
-rtl8723a_HalDmWatchDog(
- struct rtw_adapter *Adapter
- )
-{
- bool bFwCurrentInPSMode = false;
- bool bFwPSAwake = true;
- u8 bLinked = false;
- u8 hw_init_completed = false;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
-
- hw_init_completed = Adapter->hw_init_completed;
-
- if (hw_init_completed == false)
- goto skip_dm;
-
- bFwCurrentInPSMode = Adapter->pwrctrlpriv.bFwCurrentInPSMode;
- bFwPSAwake = rtl8723a_get_fwlps_rf_on(Adapter);
-
- if (!bFwCurrentInPSMode && bFwPSAwake) {
- /* Read REG_INIDATA_RATE_SEL value for TXDESC. */
- if (check_fwstate(&Adapter->mlmepriv, WIFI_STATION_STATE)) {
- pdmpriv->INIDATA_RATE[0] = rtl8723au_read8(Adapter, REG_INIDATA_RATE_SEL) & 0x3f;
- } else {
- u8 i;
- for (i = 1 ; i < (Adapter->stapriv.asoc_sta_count + 1); i++)
- pdmpriv->INIDATA_RATE[i] = rtl8723au_read8(Adapter, (REG_INIDATA_RATE_SEL+i)) & 0x3f;
- }
- }
-
- /* ODM */
- if (rtw_linked_check(Adapter))
- bLinked = true;
-
- ODM_CmnInfoUpdate23a(&pHalData->odmpriv, ODM_CMNINFO_LINK, bLinked);
- ODM_DMWatchdog23a(Adapter);
-
-skip_dm:
-
- /* Check GPIO to determine current RF on/off and Pbc status. */
- /* Check Hardware Radio ON/OFF or not */
- dm_CheckPbcGPIO(Adapter);
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
deleted file mode 100644
index 1ea0af499ce9..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ /dev/null
@@ -1,2076 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _HAL_INIT_C_
-
-#include <linux/firmware.h>
-#include <drv_types.h>
-#include <rtw_efuse.h>
-
-#include <rtl8723a_hal.h>
-#include <usb_ops_linux.h>
-
-static void _FWDownloadEnable(struct rtw_adapter *padapter, bool enable)
-{
- u8 tmp;
-
- if (enable) {
- /* 8051 enable */
- tmp = rtl8723au_read8(padapter, REG_SYS_FUNC_EN + 1);
- rtl8723au_write8(padapter, REG_SYS_FUNC_EN + 1, tmp | 0x04);
-
- /* MCU firmware download enable. */
- tmp = rtl8723au_read8(padapter, REG_MCUFWDL);
- rtl8723au_write8(padapter, REG_MCUFWDL, tmp | 0x01);
-
- /* 8051 reset */
- tmp = rtl8723au_read8(padapter, REG_MCUFWDL + 2);
- rtl8723au_write8(padapter, REG_MCUFWDL + 2, tmp & 0xf7);
- } else {
- /* MCU firmware download disable. */
- tmp = rtl8723au_read8(padapter, REG_MCUFWDL);
- rtl8723au_write8(padapter, REG_MCUFWDL, tmp & 0xfe);
-
- /* Reserved for fw extension. */
- rtl8723au_write8(padapter, REG_MCUFWDL + 1, 0x00);
- }
-}
-
-static int
-_PageWrite(struct rtw_adapter *padapter, u32 page, void *buffer, u32 size)
-{
- u8 value8;
- u8 u8Page = (u8) (page & 0x07);
-
- if (size > MAX_PAGE_SIZE)
- return _FAIL;
-
- value8 = (rtl8723au_read8(padapter, REG_MCUFWDL + 2) & 0xF8) | u8Page;
- rtl8723au_write8(padapter, REG_MCUFWDL + 2, value8);
-
- return rtl8723au_writeN(padapter, FW_8723A_START_ADDRESS, size, buffer);
-}
-
-static int _WriteFW(struct rtw_adapter *padapter, void *buffer, u32 size)
-{
- /* Since we need dynamic decide method of dwonload fw, so we
- call this function to get chip version. */
- /* We can remove _ReadChipVersion from ReadpadapterInfo8192C later. */
- int ret = _SUCCESS;
- u32 pageNums, remainSize;
- u32 page, offset;
- u8 *bufferPtr = (u8 *) buffer;
-
- pageNums = size / MAX_PAGE_SIZE;
- /* RT_ASSERT((pageNums <= 4),
- ("Page numbers should not greater then 4 \n")); */
- remainSize = size % MAX_PAGE_SIZE;
-
- for (page = 0; page < pageNums; page++) {
- offset = page * MAX_PAGE_SIZE;
- ret = _PageWrite(padapter, page, bufferPtr + offset,
- MAX_PAGE_SIZE);
-
- if (ret == _FAIL)
- goto exit;
- }
- if (remainSize) {
- offset = pageNums * MAX_PAGE_SIZE;
- page = pageNums;
- ret = _PageWrite(padapter, page, bufferPtr + offset,
- remainSize);
-
- if (ret == _FAIL)
- goto exit;
- }
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "_WriteFW Done- for Normal chip.\n");
-
-exit:
- return ret;
-}
-
-static int _FWFreeToGo(struct rtw_adapter *padapter)
-{
- u32 counter = 0;
- u32 value32;
-
- /* polling CheckSum report */
- do {
- value32 = rtl8723au_read32(padapter, REG_MCUFWDL);
- if (value32 & FWDL_ChkSum_rpt)
- break;
- } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
-
- if (counter >= POLLING_READY_TIMEOUT_COUNT) {
- RT_TRACE(_module_hal_init_c_, _drv_err_,
- "%s: chksum report fail! REG_MCUFWDL:0x%08x\n",
- __func__, value32);
- return _FAIL;
- }
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "%s: Checksum report OK! REG_MCUFWDL:0x%08x\n", __func__,
- value32);
-
- value32 = rtl8723au_read32(padapter, REG_MCUFWDL);
- value32 |= MCUFWDL_RDY;
- value32 &= ~WINTINI_RDY;
- rtl8723au_write32(padapter, REG_MCUFWDL, value32);
-
- /* polling for FW ready */
- counter = 0;
- do {
- value32 = rtl8723au_read32(padapter, REG_MCUFWDL);
- if (value32 & WINTINI_RDY) {
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "%s: Polling FW ready success!! REG_MCUFWDL:0x%08x\n",
- __func__, value32);
- return _SUCCESS;
- }
- udelay(5);
- } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
-
- RT_TRACE(_module_hal_init_c_, _drv_err_,
- "%s: Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
- __func__, value32);
- return _FAIL;
-}
-
-#define IS_FW_81xxC(padapter) (((GET_HAL_DATA(padapter))->FirmwareSignature & 0xFFF0) == 0x88C0)
-
-void rtl8723a_FirmwareSelfReset(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 u1bTmp;
- u8 Delay = 100;
-
- if (!(IS_FW_81xxC(padapter) &&
- ((pHalData->FirmwareVersion < 0x21) ||
- (pHalData->FirmwareVersion == 0x21 &&
- pHalData->FirmwareSubVersion < 0x01)))) {
- /* after 88C Fw v33.1 */
- /* 0x1cf = 0x20. Inform 8051 to reset. 2009.12.25. tynli_test */
- rtl8723au_write8(padapter, REG_HMETFR + 3, 0x20);
-
- u1bTmp = rtl8723au_read8(padapter, REG_SYS_FUNC_EN + 1);
- while (u1bTmp & BIT(2)) {
- Delay--;
- if (Delay == 0)
- break;
- udelay(50);
- u1bTmp = rtl8723au_read8(padapter, REG_SYS_FUNC_EN + 1);
- }
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "-%s: 8051 reset success (%d)\n", __func__,
- Delay);
-
- if ((Delay == 0)) {
- /* force firmware reset */
- u1bTmp = rtl8723au_read8(padapter, REG_SYS_FUNC_EN + 1);
- rtl8723au_write8(padapter, REG_SYS_FUNC_EN + 1,
- u1bTmp & ~BIT(2));
- }
- }
-}
-
-/* */
-/* Description: */
-/* Download 8192C firmware code. */
-/* */
-/* */
-int rtl8723a_FirmwareDownload(struct rtw_adapter *padapter)
-{
- int rtStatus = _SUCCESS;
- u8 writeFW_retry = 0;
- unsigned long fwdl_start_time;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
- struct device *device = dvobj_to_dev(dvobj);
- struct rt_8723a_firmware_hdr *pFwHdr = NULL;
- const struct firmware *fw;
- char *fw_name;
- u8 *firmware_buf = NULL;
- u8 *buf;
- int fw_size;
- static int log_version;
-
- RT_TRACE(_module_hal_init_c_, _drv_info_, "+%s\n", __func__);
-
- if (IS_8723A_A_CUT(pHalData->VersionID)) {
- fw_name = "rtlwifi/rtl8723aufw_A.bin";
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "rtl8723a_FirmwareDownload: R8723FwImageArray_UMC for RTL8723A A CUT\n");
- } else if (IS_8723A_B_CUT(pHalData->VersionID)) {
- /* WLAN Fw. */
- if (padapter->registrypriv.wifi_spec == 1) {
- fw_name = "rtlwifi/rtl8723aufw_B_NoBT.bin";
- DBG_8723A(" Rtl8723_FwUMCBCutImageArrayWithoutBT for "
- "RTL8723A B CUT\n");
- } else {
- if (rtl8723a_BT_coexist(padapter)) {
- fw_name = "rtlwifi/rtl8723aufw_B.bin";
- DBG_8723A(" Rtl8723_FwUMCBCutImageArrayWithBT "
- "for RTL8723A B CUT\n");
- } else {
- fw_name = "rtlwifi/rtl8723aufw_B_NoBT.bin";
- DBG_8723A(" Rtl8723_FwUMCBCutImageArrayWithout "
- "BT for RTL8723A B CUT\n");
- }
- }
- } else {
- /* <Roger_TODO> We should download proper RAM Code here
- to match the ROM code. */
- RT_TRACE(_module_hal_init_c_, _drv_err_,
- "%s: unknown version!\n", __func__);
- rtStatus = _FAIL;
- goto Exit;
- }
-
- pr_info("rtl8723au: Loading firmware %s\n", fw_name);
- if (request_firmware(&fw, fw_name, device)) {
- pr_err("rtl8723au: request_firmware load failed\n");
- rtStatus = _FAIL;
- goto Exit;
- }
- if (!fw) {
- pr_err("rtl8723au: Firmware %s not available\n", fw_name);
- rtStatus = _FAIL;
- goto Exit;
- }
- firmware_buf = kmemdup(fw->data, fw->size, GFP_KERNEL);
- fw_size = fw->size;
- release_firmware(fw);
- if (!firmware_buf) {
- rtStatus = _FAIL;
- goto Exit;
- }
- buf = firmware_buf;
-
- /* To Check Fw header. Added by tynli. 2009.12.04. */
- pFwHdr = (struct rt_8723a_firmware_hdr *)firmware_buf;
-
- pHalData->FirmwareVersion = le16_to_cpu(pFwHdr->Version);
- pHalData->FirmwareSubVersion = pFwHdr->Subversion;
- pHalData->FirmwareSignature = le16_to_cpu(pFwHdr->Signature);
-
- DBG_8723A("%s: fw_ver =%d fw_subver =%d sig = 0x%x\n",
- __func__, pHalData->FirmwareVersion,
- pHalData->FirmwareSubVersion, pHalData->FirmwareSignature);
-
- if (!log_version++)
- pr_info("%sFirmware Version %d, SubVersion %d, Signature "
- "0x%x\n", DRIVER_PREFIX, pHalData->FirmwareVersion,
- pHalData->FirmwareSubVersion,
- pHalData->FirmwareSignature);
-
- if (IS_FW_HEADER_EXIST(pFwHdr)) {
- /* Shift 32 bytes for FW header */
- buf = buf + 32;
- fw_size = fw_size - 32;
- }
-
- /* Suggested by Filen. If 8051 is running in RAM code, driver should
- inform Fw to reset by itself, */
- /* or it will cause download Fw fail. 2010.02.01. by tynli. */
- if (rtl8723au_read8(padapter, REG_MCUFWDL) & RAM_DL_SEL) {
- /* 8051 RAM code */
- rtl8723a_FirmwareSelfReset(padapter);
- rtl8723au_write8(padapter, REG_MCUFWDL, 0x00);
- }
-
- _FWDownloadEnable(padapter, true);
- fwdl_start_time = jiffies;
- while (1) {
- /* reset the FWDL chksum */
- rtl8723au_write8(padapter, REG_MCUFWDL,
- rtl8723au_read8(padapter, REG_MCUFWDL) |
- FWDL_ChkSum_rpt);
-
- rtStatus = _WriteFW(padapter, buf, fw_size);
-
- if (rtStatus == _SUCCESS ||
- (jiffies_to_msecs(jiffies - fwdl_start_time) > 500 &&
- writeFW_retry++ >= 3))
- break;
-
- DBG_8723A("%s writeFW_retry:%u, time after fwdl_start_time:"
- "%ums\n", __func__, writeFW_retry,
- jiffies_to_msecs(jiffies - fwdl_start_time));
- }
- _FWDownloadEnable(padapter, false);
- if (_SUCCESS != rtStatus) {
- DBG_8723A("DL Firmware failed!\n");
- goto Exit;
- }
-
- rtStatus = _FWFreeToGo(padapter);
- if (_SUCCESS != rtStatus) {
- RT_TRACE(_module_hal_init_c_, _drv_err_,
- "DL Firmware failed!\n");
- goto Exit;
- }
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "Firmware is ready to run!\n");
-
-Exit:
- kfree(firmware_buf);
- return rtStatus;
-}
-
-void rtl8723a_InitializeFirmwareVars(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- /* Init Fw LPS related. */
- padapter->pwrctrlpriv.bFwCurrentInPSMode = false;
-
- /* Init H2C counter. by tynli. 2009.12.09. */
- pHalData->LastHMEBoxNum = 0;
-}
-
-/* */
-/* Efuse related code */
-/* */
-static u8
-hal_EfuseSwitchToBank(struct rtw_adapter *padapter, u8 bank)
-{
- u8 bRet = false;
- u32 value32 = 0;
-
- DBG_8723A("%s: Efuse switch bank to %d\n", __func__, bank);
- value32 = rtl8723au_read32(padapter, EFUSE_TEST);
- bRet = true;
- switch (bank) {
- case 0:
- value32 = (value32 & ~EFUSE_SEL_MASK) |
- EFUSE_SEL(EFUSE_WIFI_SEL_0);
- break;
- case 1:
- value32 = (value32 & ~EFUSE_SEL_MASK) |
- EFUSE_SEL(EFUSE_BT_SEL_0);
- break;
- case 2:
- value32 = (value32 & ~EFUSE_SEL_MASK) |
- EFUSE_SEL(EFUSE_BT_SEL_1);
- break;
- case 3:
- value32 = (value32 & ~EFUSE_SEL_MASK) |
- EFUSE_SEL(EFUSE_BT_SEL_2);
- break;
- default:
- value32 = (value32 & ~EFUSE_SEL_MASK) |
- EFUSE_SEL(EFUSE_WIFI_SEL_0);
- bRet = false;
- break;
- }
- rtl8723au_write32(padapter, EFUSE_TEST, value32);
-
- return bRet;
-}
-
-static void
-hal_ReadEFuse_WiFi(struct rtw_adapter *padapter,
- u16 _offset, u16 _size_byte, u8 *pbuf)
-{
- u8 *efuseTbl = NULL;
- u16 eFuse_Addr = 0;
- u8 offset, wden;
- u8 efuseHeader, efuseExtHdr, efuseData;
- u16 i, total, used;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- /* Do NOT excess total size of EFuse table.
- Added by Roger, 2008.11.10. */
- if ((_offset + _size_byte) > EFUSE_MAP_LEN_8723A) {
- DBG_8723A("%s: Invalid offset(%#x) with read bytes(%#x)!!\n",
- __func__, _offset, _size_byte);
- return;
- }
-
- efuseTbl = kmalloc(EFUSE_MAP_LEN_8723A, GFP_KERNEL);
- if (!efuseTbl)
- return;
- /* 0xff will be efuse default value instead of 0x00. */
- memset(efuseTbl, 0xFF, EFUSE_MAP_LEN_8723A);
-
- /* switch bank back to bank 0 for later BT and wifi use. */
- hal_EfuseSwitchToBank(padapter, 0);
-
- while (AVAILABLE_EFUSE_ADDR(eFuse_Addr)) {
- ReadEFuseByte23a(padapter, eFuse_Addr++, &efuseHeader);
- if (efuseHeader == 0xFF) {
- DBG_8723A("%s: data end at address =%#x\n", __func__,
- eFuse_Addr);
- break;
- }
-
- /* Check PG header for section num. */
- if (EXT_HEADER(efuseHeader)) { /* extended header */
- offset = GET_HDR_OFFSET_2_0(efuseHeader);
-
- ReadEFuseByte23a(padapter, eFuse_Addr++, &efuseExtHdr);
- if (ALL_WORDS_DISABLED(efuseExtHdr))
- continue;
-
- offset |= ((efuseExtHdr & 0xF0) >> 1);
- wden = efuseExtHdr & 0x0F;
- } else {
- offset = (efuseHeader >> 4) & 0x0f;
- wden = efuseHeader & 0x0f;
- }
-
- if (offset < EFUSE_MAX_SECTION_8723A) {
- u16 addr;
- /* Get word enable value from PG header */
-
- addr = offset * PGPKT_DATA_SIZE;
- for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- /* Check word enable condition in the section */
- if (!(wden & (0x01 << i))) {
- ReadEFuseByte23a(padapter, eFuse_Addr++,
- &efuseData);
- efuseTbl[addr] = efuseData;
-
- ReadEFuseByte23a(padapter, eFuse_Addr++,
- &efuseData);
- efuseTbl[addr + 1] = efuseData;
- }
- addr += 2;
- }
- } else {
- DBG_8723A(KERN_ERR "%s: offset(%d) is illegal!!\n",
- __func__, offset);
- eFuse_Addr += Efuse_CalculateWordCnts23a(wden) * 2;
- }
- }
-
- /* Copy from Efuse map to output pointer memory!!! */
- for (i = 0; i < _size_byte; i++)
- pbuf[i] = efuseTbl[_offset + i];
-
- /* Calculate Efuse utilization */
- EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI,
- TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &total);
- used = eFuse_Addr - 1;
- pHalData->EfuseUsedBytes = used;
-
- kfree(efuseTbl);
-}
-
-static void
-hal_ReadEFuse_BT(struct rtw_adapter *padapter,
- u16 _offset, u16 _size_byte, u8 *pbuf)
-{
- u8 *efuseTbl;
- u8 bank;
- u16 eFuse_Addr;
- u8 efuseHeader, efuseExtHdr, efuseData;
- u8 offset, wden;
- u16 i, total, used;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- /* Do NOT excess total size of EFuse table.
- Added by Roger, 2008.11.10. */
- if ((_offset + _size_byte) > EFUSE_BT_MAP_LEN) {
- DBG_8723A("%s: Invalid offset(%#x) with read bytes(%#x)!!\n",
- __func__, _offset, _size_byte);
- return;
- }
-
- efuseTbl = kmalloc(EFUSE_BT_MAP_LEN, GFP_KERNEL);
- if (!efuseTbl)
- return;
- /* 0xff will be efuse default value instead of 0x00. */
- memset(efuseTbl, 0xFF, EFUSE_BT_MAP_LEN);
-
- EFUSE_GetEfuseDefinition23a(padapter, EFUSE_BT,
- TYPE_AVAILABLE_EFUSE_BYTES_BANK, &total);
-
- for (bank = 1; bank < EFUSE_MAX_BANK; bank++) {
- if (hal_EfuseSwitchToBank(padapter, bank) == false) {
- DBG_8723A("%s: hal_EfuseSwitchToBank Fail!!\n",
- __func__);
- goto exit;
- }
-
- eFuse_Addr = 0;
-
- while (AVAILABLE_EFUSE_ADDR(eFuse_Addr)) {
- ReadEFuseByte23a(padapter, eFuse_Addr++, &efuseHeader);
- if (efuseHeader == 0xFF)
- break;
-
- /* Check PG header for section num. */
- if (EXT_HEADER(efuseHeader)) { /* extended header */
- offset = GET_HDR_OFFSET_2_0(efuseHeader);
-
- ReadEFuseByte23a(padapter, eFuse_Addr++,
- &efuseExtHdr);
- if (ALL_WORDS_DISABLED(efuseExtHdr))
- continue;
-
- offset |= ((efuseExtHdr & 0xF0) >> 1);
- wden = efuseExtHdr & 0x0F;
- } else {
- offset = (efuseHeader >> 4) & 0x0f;
- wden = efuseHeader & 0x0f;
- }
-
- if (offset < EFUSE_BT_MAX_SECTION) {
- u16 addr;
-
- /* Get word enable value from PG header */
-
- addr = offset * PGPKT_DATA_SIZE;
- for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- /* Check word enable condition in
- the section */
- if (!(wden & (0x01 << i))) {
- ReadEFuseByte23a(padapter,
- eFuse_Addr++,
- &efuseData);
- efuseTbl[addr] = efuseData;
-
- ReadEFuseByte23a(padapter,
- eFuse_Addr++,
- &efuseData);
- efuseTbl[addr + 1] = efuseData;
- }
- addr += 2;
- }
- } else {
- DBG_8723A(KERN_ERR
- "%s: offset(%d) is illegal!!\n",
- __func__, offset);
- eFuse_Addr += Efuse_CalculateWordCnts23a(wden) * 2;
- }
- }
-
- if ((eFuse_Addr - 1) < total) {
- DBG_8723A("%s: bank(%d) data end at %#x\n",
- __func__, bank, eFuse_Addr - 1);
- break;
- }
- }
-
- /* switch bank back to bank 0 for later BT and wifi use. */
- hal_EfuseSwitchToBank(padapter, 0);
-
- /* Copy from Efuse map to output pointer memory!!! */
- for (i = 0; i < _size_byte; i++)
- pbuf[i] = efuseTbl[_offset + i];
-
- /* */
- /* Calculate Efuse utilization. */
- /* */
- EFUSE_GetEfuseDefinition23a(padapter, EFUSE_BT,
- TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &total);
- used = (EFUSE_BT_REAL_BANK_CONTENT_LEN * (bank - 1)) + eFuse_Addr - 1;
- pHalData->BTEfuseUsedBytes = used;
-
-exit:
- kfree(efuseTbl);
-}
-
-void
-rtl8723a_readefuse(struct rtw_adapter *padapter,
- u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf)
-{
- if (efuseType == EFUSE_WIFI)
- hal_ReadEFuse_WiFi(padapter, _offset, _size_byte, pbuf);
- else
- hal_ReadEFuse_BT(padapter, _offset, _size_byte, pbuf);
-}
-
-u16 rtl8723a_EfuseGetCurrentSize_WiFi(struct rtw_adapter *padapter)
-{
- u16 efuse_addr = 0;
- u8 hoffset = 0, hworden = 0;
- u8 efuse_data, word_cnts = 0;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- efuse_addr = pHalData->EfuseUsedBytes;
-
- DBG_8723A("%s: start_efuse_addr = 0x%X\n", __func__, efuse_addr);
-
- /* switch bank back to bank 0 for later BT and wifi use. */
- hal_EfuseSwitchToBank(padapter, 0);
-
- while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
- if (efuse_OneByteRead23a(padapter, efuse_addr, &efuse_data) ==
- _FAIL) {
- DBG_8723A(KERN_ERR "%s: efuse_OneByteRead23a Fail! "
- "addr = 0x%X !!\n", __func__, efuse_addr);
- break;
- }
-
- if (efuse_data == 0xFF)
- break;
-
- if (EXT_HEADER(efuse_data)) {
- hoffset = GET_HDR_OFFSET_2_0(efuse_data);
- efuse_addr++;
- efuse_OneByteRead23a(padapter, efuse_addr, &efuse_data);
- if (ALL_WORDS_DISABLED(efuse_data))
- continue;
-
- hoffset |= ((efuse_data & 0xF0) >> 1);
- hworden = efuse_data & 0x0F;
- } else {
- hoffset = (efuse_data >> 4) & 0x0F;
- hworden = efuse_data & 0x0F;
- }
-
- word_cnts = Efuse_CalculateWordCnts23a(hworden);
- efuse_addr += (word_cnts * 2) + 1;
- }
-
- pHalData->EfuseUsedBytes = efuse_addr;
-
- DBG_8723A("%s: CurrentSize =%d\n", __func__, efuse_addr);
-
- return efuse_addr;
-}
-
-u16 rtl8723a_EfuseGetCurrentSize_BT(struct rtw_adapter *padapter)
-{
- u16 btusedbytes;
- u16 efuse_addr;
- u8 bank, startBank;
- u8 hoffset = 0, hworden = 0;
- u8 efuse_data, word_cnts = 0;
- u16 retU2 = 0;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- btusedbytes = pHalData->BTEfuseUsedBytes;
-
- efuse_addr = (u16) ((btusedbytes % EFUSE_BT_REAL_BANK_CONTENT_LEN));
- startBank = (u8) (1 + (btusedbytes / EFUSE_BT_REAL_BANK_CONTENT_LEN));
-
- DBG_8723A("%s: start from bank =%d addr = 0x%X\n", __func__, startBank,
- efuse_addr);
-
- EFUSE_GetEfuseDefinition23a(padapter, EFUSE_BT,
- TYPE_AVAILABLE_EFUSE_BYTES_BANK, &retU2);
-
- for (bank = startBank; bank < EFUSE_MAX_BANK; bank++) {
- if (hal_EfuseSwitchToBank(padapter, bank) == false) {
- DBG_8723A(KERN_ERR "%s: switch bank(%d) Fail!!\n",
- __func__, bank);
- bank = EFUSE_MAX_BANK;
- break;
- }
-
- /* only when bank is switched we have to reset
- the efuse_addr. */
- if (bank != startBank)
- efuse_addr = 0;
-
- while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
- if (efuse_OneByteRead23a(padapter, efuse_addr,
- &efuse_data) == _FAIL) {
- DBG_8723A(KERN_ERR "%s: efuse_OneByteRead23a Fail!"
- " addr = 0x%X !!\n",
- __func__, efuse_addr);
- bank = EFUSE_MAX_BANK;
- break;
- }
-
- if (efuse_data == 0xFF)
- break;
-
- if (EXT_HEADER(efuse_data)) {
- hoffset = GET_HDR_OFFSET_2_0(efuse_data);
- efuse_addr++;
- efuse_OneByteRead23a(padapter, efuse_addr,
- &efuse_data);
- if (ALL_WORDS_DISABLED(efuse_data)) {
- efuse_addr++;
- continue;
- }
-
- hoffset |= ((efuse_data & 0xF0) >> 1);
- hworden = efuse_data & 0x0F;
- } else {
- hoffset = (efuse_data >> 4) & 0x0F;
- hworden = efuse_data & 0x0F;
- }
- word_cnts = Efuse_CalculateWordCnts23a(hworden);
- /* read next header */
- efuse_addr += (word_cnts * 2) + 1;
- }
-
- /* Check if we need to check next bank efuse */
- if (efuse_addr < retU2)
- break; /* don't need to check next bank. */
- }
-
- retU2 = ((bank - 1) * EFUSE_BT_REAL_BANK_CONTENT_LEN) + efuse_addr;
- pHalData->BTEfuseUsedBytes = retU2;
-
- DBG_8723A("%s: CurrentSize =%d\n", __func__, retU2);
- return retU2;
-}
-
-void rtl8723a_read_chip_version(struct rtw_adapter *padapter)
-{
- u32 value32;
- struct hal_version ChipVersion;
- struct hal_data_8723a *pHalData;
-
- pHalData = GET_HAL_DATA(padapter);
-
- value32 = rtl8723au_read32(padapter, REG_SYS_CFG);
- ChipVersion.ICType = CHIP_8723A;
- ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
- pHalData->rf_type = RF_1T1R;
- ChipVersion.VendorType =
- ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
- ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
-
- /* For regulator mode. by tynli. 2011.01.14 */
- pHalData->RegulatorMode = ((value32 & SPS_SEL) ?
- RT_LDO_REGULATOR : RT_SWITCHING_REGULATOR);
-
- value32 = rtl8723au_read32(padapter, REG_GPIO_OUTSTS);
- /* ROM code version. */
- ChipVersion.ROMVer = (value32 & RF_RL_ID) >> 20;
-
- /* For multi-function consideration. Added by Roger, 2010.10.06. */
- pHalData->MultiFunc = RT_MULTI_FUNC_NONE;
- value32 = rtl8723au_read32(padapter, REG_MULTI_FUNC_CTRL);
- pHalData->MultiFunc |=
- ((value32 & WL_FUNC_EN) ? RT_MULTI_FUNC_WIFI : 0);
- pHalData->MultiFunc |= ((value32 & BT_FUNC_EN) ? RT_MULTI_FUNC_BT : 0);
- pHalData->MultiFunc |=
- ((value32 & GPS_FUNC_EN) ? RT_MULTI_FUNC_GPS : 0);
- pHalData->PolarityCtl =
- ((value32 & WL_HWPDN_SL) ? RT_POLARITY_HIGH_ACT :
- RT_POLARITY_LOW_ACT);
- pHalData->VersionID = ChipVersion;
-
- MSG_8723A("RF_Type is %x!!\n", pHalData->rf_type);
-}
-
-/* */
-/* */
-/* 20100209 Joseph: */
-/* This function is used only for 92C to set REG_BCN_CTRL(0x550) register. */
-/* We just reserve the value of the register in variable
- pHalData->RegBcnCtrlVal and then operate */
-/* the value of the register via atomic operation. */
-/* This prevents from race condition when setting this register. */
-/* The value of pHalData->RegBcnCtrlVal is initialized in
- HwConfigureRTL8192CE() function. */
-/* */
-void SetBcnCtrlReg23a(struct rtw_adapter *padapter, u8 SetBits, u8 ClearBits)
-{
- u8 val8;
-
- val8 = rtl8723au_read8(padapter, REG_BCN_CTRL);
- val8 |= SetBits;
- val8 &= ~ClearBits;
-
- rtl8723au_write8(padapter, REG_BCN_CTRL, val8);
-}
-
-void rtl8723a_InitBeaconParameters(struct rtw_adapter *padapter)
-{
- rtl8723au_write16(padapter, REG_BCN_CTRL, 0x1010);
-
- /* TODO: Remove these magic number */
- rtl8723au_write16(padapter, REG_TBTT_PROHIBIT, 0x6404); /* ms */
- /* Firmware will control REG_DRVERLYINT when power saving is enable, */
- /* so don't set this register on STA mode. */
- if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE) == false)
- rtl8723au_write8(padapter, REG_DRVERLYINT,
- DRIVER_EARLY_INT_TIME);
- /* 2ms */
- rtl8723au_write8(padapter, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
-
- /* Suggested by designer timchen. Change beacon AIFS to the
- largest number beacause test chip does not contension before
- sending beacon. by tynli. 2009.11.03 */
- rtl8723au_write16(padapter, REG_BCNTCFG, 0x660F);
-}
-
-static void ResumeTxBeacon(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- /* 2010.03.01. Marked by tynli. No need to call workitem beacause
- we record the value */
- /* which should be read from register to a global variable. */
-
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, "+ResumeTxBeacon\n");
-
- pHalData->RegFwHwTxQCtrl |= BIT(6);
- rtl8723au_write8(padapter, REG_FWHW_TXQ_CTRL + 2,
- pHalData->RegFwHwTxQCtrl);
- rtl8723au_write8(padapter, REG_TBTT_PROHIBIT + 1, 0xff);
- pHalData->RegReg542 |= BIT(0);
- rtl8723au_write8(padapter, REG_TBTT_PROHIBIT + 2, pHalData->RegReg542);
-}
-
-static void StopTxBeacon(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- /* 2010.03.01. Marked by tynli. No need to call workitem beacause
- we record the value */
- /* which should be read from register to a global variable. */
-
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, "+StopTxBeacon\n");
-
- pHalData->RegFwHwTxQCtrl &= ~BIT(6);
- rtl8723au_write8(padapter, REG_FWHW_TXQ_CTRL + 2,
- pHalData->RegFwHwTxQCtrl);
- rtl8723au_write8(padapter, REG_TBTT_PROHIBIT + 1, 0x64);
- pHalData->RegReg542 &= ~BIT(0);
- rtl8723au_write8(padapter, REG_TBTT_PROHIBIT + 2, pHalData->RegReg542);
-}
-
-static void _BeaconFunctionEnable(struct rtw_adapter *padapter, u8 Enable,
- u8 Linked)
-{
- SetBcnCtrlReg23a(padapter, DIS_TSF_UDT | EN_BCN_FUNCTION | DIS_BCNQ_SUB,
- 0);
- rtl8723au_write8(padapter, REG_RD_CTRL + 1, 0x6F);
-}
-
-void rtl8723a_SetBeaconRelatedRegisters(struct rtw_adapter *padapter)
-{
- u32 value32;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- /* reset TSF, enable update TSF, correcting TSF On Beacon */
-
- /* REG_BCN_INTERVAL */
- /* REG_BCNDMATIM */
- /* REG_ATIMWND */
- /* REG_TBTT_PROHIBIT */
- /* REG_DRVERLYINT */
- /* REG_BCN_MAX_ERR */
- /* REG_BCNTCFG (0x510) */
- /* REG_DUAL_TSF_RST */
- /* REG_BCN_CTRL (0x550) */
-
- /* */
- /* ATIM window */
- /* */
- rtl8723au_write16(padapter, REG_ATIMWND, 2);
-
- /* */
- /* Beacon interval (in unit of TU). */
- /* */
- rtl8723au_write16(padapter, REG_BCN_INTERVAL, pmlmeinfo->bcn_interval);
-
- rtl8723a_InitBeaconParameters(padapter);
-
- rtl8723au_write8(padapter, REG_SLOT, 0x09);
-
- /* */
- /* Reset TSF Timer to zero, added by Roger. 2008.06.24 */
- /* */
- value32 = rtl8723au_read32(padapter, REG_TCR);
- value32 &= ~TSFRST;
- rtl8723au_write32(padapter, REG_TCR, value32);
-
- value32 |= TSFRST;
- rtl8723au_write32(padapter, REG_TCR, value32);
-
- /* NOTE: Fix test chip's bug (about contention windows's randomness) */
- if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE |
- WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE) == true) {
- rtl8723au_write8(padapter, REG_RXTSF_OFFSET_CCK, 0x50);
- rtl8723au_write8(padapter, REG_RXTSF_OFFSET_OFDM, 0x50);
- }
-
- _BeaconFunctionEnable(padapter, true, true);
-
- ResumeTxBeacon(padapter);
- SetBcnCtrlReg23a(padapter, DIS_BCNQ_SUB, 0);
-}
-
-void rtl8723a_SetHalODMVar(struct rtw_adapter *Adapter,
- enum hal_odm_variable eVariable,
- void *pValue1, bool bSet)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_odm_t *podmpriv = &pHalData->odmpriv;
- switch (eVariable) {
- case HAL_ODM_STA_INFO:
- {
- struct sta_info *psta = (struct sta_info *)pValue1;
-
- if (bSet) {
- DBG_8723A("Set STA_(%d) info\n", psta->mac_id);
- ODM_CmnInfoPtrArrayHook23a(podmpriv,
- ODM_CMNINFO_STA_STATUS,
- psta->mac_id, psta);
- } else {
- DBG_8723A("Clean STA_(%d) info\n", psta->mac_id);
- ODM_CmnInfoPtrArrayHook23a(podmpriv,
- ODM_CMNINFO_STA_STATUS,
- psta->mac_id, NULL);
- }
- }
- break;
- case HAL_ODM_P2P_STATE:
- ODM_CmnInfoUpdate23a(podmpriv, ODM_CMNINFO_WIFI_DIRECT, bSet);
- break;
- case HAL_ODM_WIFI_DISPLAY_STATE:
- ODM_CmnInfoUpdate23a(podmpriv, ODM_CMNINFO_WIFI_DISPLAY, bSet);
- break;
- default:
- break;
- }
-}
-
-void rtl8723a_notch_filter(struct rtw_adapter *adapter, bool enable)
-{
- if (enable) {
- DBG_8723A("Enable notch filter\n");
- rtl8723au_write8(adapter, rOFDM0_RxDSP + 1,
- rtl8723au_read8(adapter, rOFDM0_RxDSP + 1) |
- BIT(1));
- } else {
- DBG_8723A("Disable notch filter\n");
- rtl8723au_write8(adapter, rOFDM0_RxDSP + 1,
- rtl8723au_read8(adapter, rOFDM0_RxDSP + 1) &
- ~BIT(1));
- }
-}
-
-bool c2h_id_filter_ccx_8723a(u8 id)
-{
- bool ret = false;
- if (id == C2H_CCX_TX_RPT)
- ret = true;
-
- return ret;
-}
-
-int c2h_handler_8723a(struct rtw_adapter *padapter, struct c2h_evt_hdr *c2h_evt)
-{
- int ret = _SUCCESS;
- u8 i = 0;
-
- if (c2h_evt == NULL) {
- DBG_8723A("%s c2h_evt is NULL\n", __func__);
- ret = _FAIL;
- goto exit;
- }
-
- switch (c2h_evt->id) {
- case C2H_DBG:
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "C2HCommandHandler: %s\n", c2h_evt->payload);
- break;
-
- case C2H_CCX_TX_RPT:
- handle_txrpt_ccx_8723a(padapter, c2h_evt->payload);
- break;
- case C2H_EXT_RA_RPT:
- break;
- case C2H_HW_INFO_EXCH:
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "[BT], C2H_HW_INFO_EXCH\n");
- for (i = 0; i < c2h_evt->plen; i++) {
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "[BT], tmpBuf[%d]= 0x%x\n", i,
- c2h_evt->payload[i]);
- }
- break;
-
- case C2H_C2H_H2C_TEST:
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "[BT], C2H_H2C_TEST\n");
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "[BT], tmpBuf[0]/[1]/[2]/[3]/[4]= 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x\n",
- c2h_evt->payload[0],
- c2h_evt->payload[1], c2h_evt->payload[2],
- c2h_evt->payload[3], c2h_evt->payload[4]);
- break;
-
- case C2H_BT_INFO:
- DBG_8723A("%s , Got C2H_BT_INFO \n", __func__);
- rtl8723a_fw_c2h_BT_info(padapter,
- c2h_evt->payload, c2h_evt->plen);
- break;
-
- default:
- ret = _FAIL;
- break;
- }
-
-exit:
- return ret;
-}
-
-void handle_txrpt_ccx_8723a(struct rtw_adapter *adapter, void *buf)
-{
- struct txrpt_ccx_8723a *txrpt_ccx = buf;
- struct submit_ctx *pack_tx_ops = &adapter->xmitpriv.ack_tx_ops;
-
- if (txrpt_ccx->int_ccx && adapter->xmitpriv.ack_tx) {
- if (txrpt_ccx->pkt_ok)
- rtw23a_sctx_done_err(&pack_tx_ops,
- RTW_SCTX_DONE_SUCCESS);
- else
- rtw23a_sctx_done_err(&pack_tx_ops,
- RTW_SCTX_DONE_CCX_PKT_FAIL);
- }
-}
-
-void rtl8723a_InitAntenna_Selection(struct rtw_adapter *padapter)
-{
- u8 val;
-
- val = rtl8723au_read8(padapter, REG_LEDCFG2);
- /* Let 8051 take control antenna setting */
- val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
- rtl8723au_write8(padapter, REG_LEDCFG2, val);
-}
-
-void rtl8723a_CheckAntenna_Selection(struct rtw_adapter *padapter)
-{
- u8 val;
-
- val = rtl8723au_read8(padapter, REG_LEDCFG2);
- /* Let 8051 take control antenna setting */
- if (!(val & BIT(7))) {
- val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
- rtl8723au_write8(padapter, REG_LEDCFG2, val);
- }
-}
-
-void rtl8723a_DeinitAntenna_Selection(struct rtw_adapter *padapter)
-{
- u8 val;
-
- val = rtl8723au_read8(padapter, REG_LEDCFG2);
- /* Let 8051 take control antenna setting */
- val &= ~BIT(7); /* DPDT_SEL_EN, clear 0x4C[23] */
- rtl8723au_write8(padapter, REG_LEDCFG2, val);
-}
-
-void rtl8723a_init_default_value(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData;
- struct dm_priv *pdmpriv;
- u8 i;
-
- pHalData = GET_HAL_DATA(padapter);
- pdmpriv = &pHalData->dmpriv;
-
- /* init default value */
- pHalData->bIQKInitialized = false;
- if (!padapter->pwrctrlpriv.bkeepfwalive)
- pHalData->LastHMEBoxNum = 0;
-
- pHalData->bIQKInitialized = false;
-
- /* init dm default value */
- pdmpriv->TM_Trigger = 0; /* for IQK */
-/* pdmpriv->binitialized = false; */
-/* pdmpriv->prv_traffic_idx = 3; */
-/* pdmpriv->initialize = 0; */
-
- pdmpriv->ThermalValue_HP_index = 0;
- for (i = 0; i < HP_THERMAL_NUM; i++)
- pdmpriv->ThermalValue_HP[i] = 0;
-
- /* init Efuse variables */
- pHalData->EfuseUsedBytes = 0;
- pHalData->BTEfuseUsedBytes = 0;
-}
-
-u8 GetEEPROMSize8723A(struct rtw_adapter *padapter)
-{
- u8 size = 0;
- u32 cr;
-
- cr = rtl8723au_read16(padapter, REG_9346CR);
- /* 6: EEPROM used is 93C46, 4: boot from E-Fuse. */
- size = (cr & BOOT_FROM_EEPROM) ? 6 : 4;
-
- MSG_8723A("EEPROM type is %s\n", size == 4 ? "E-FUSE" : "93C46");
-
- return size;
-}
-
-/* */
-/* */
-/* LLT R/W/Init function */
-/* */
-/* */
-static int _LLTWrite(struct rtw_adapter *padapter, u32 address, u32 data)
-{
- int status = _SUCCESS;
- s32 count = 0;
- u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) |
- _LLT_OP(_LLT_WRITE_ACCESS);
- u16 LLTReg = REG_LLT_INIT;
-
- rtl8723au_write32(padapter, LLTReg, value);
-
- /* polling */
- do {
- value = rtl8723au_read32(padapter, LLTReg);
- if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
- break;
-
- if (count > POLLING_LLT_THRESHOLD) {
- RT_TRACE(_module_hal_init_c_, _drv_err_,
- "Failed to polling write LLT done at address %d!\n",
- address);
- status = _FAIL;
- break;
- }
- } while (count++);
-
- return status;
-}
-
-int InitLLTTable23a(struct rtw_adapter *padapter, u32 boundary)
-{
- int status = _SUCCESS;
- u32 i;
- u32 txpktbuf_bndy = boundary;
- u32 Last_Entry_Of_TxPktBuf = LAST_ENTRY_OF_TX_PKT_BUFFER;
-
- for (i = 0; i < (txpktbuf_bndy - 1); i++) {
- status = _LLTWrite(padapter, i, i + 1);
- if (status != _SUCCESS)
- return status;
- }
-
- /* end of list */
- status = _LLTWrite(padapter, (txpktbuf_bndy - 1), 0xFF);
- if (status != _SUCCESS)
- return status;
-
- /* Make the other pages as ring buffer */
- /* This ring buffer is used as beacon buffer if we config this
- MAC as two MAC transfer. */
- /* Otherwise used as local loopback buffer. */
- for (i = txpktbuf_bndy; i < Last_Entry_Of_TxPktBuf; i++) {
- status = _LLTWrite(padapter, i, (i + 1));
- if (_SUCCESS != status)
- return status;
- }
-
- /* Let last entry point to the start entry of ring buffer */
- status = _LLTWrite(padapter, Last_Entry_Of_TxPktBuf, txpktbuf_bndy);
-
- return status;
-}
-
-static void _DisableGPIO(struct rtw_adapter *padapter)
-{
-/***************************************
-j. GPIO_PIN_CTRL 0x44[31:0]= 0x000
-k.Value = GPIO_PIN_CTRL[7:0]
-l. GPIO_PIN_CTRL 0x44[31:0] = 0x00FF0000 | (value <<8); write external PIN level
-m. GPIO_MUXCFG 0x42 [15:0] = 0x0780
-n. LEDCFG 0x4C[15:0] = 0x8080
-***************************************/
- u32 value32;
- u32 u4bTmp;
-
- /* 1. Disable GPIO[7:0] */
- rtl8723au_write16(padapter, REG_GPIO_PIN_CTRL + 2, 0x0000);
- value32 = rtl8723au_read32(padapter, REG_GPIO_PIN_CTRL) & 0xFFFF00FF;
- u4bTmp = value32 & 0x000000FF;
- value32 |= ((u4bTmp << 8) | 0x00FF0000);
- rtl8723au_write32(padapter, REG_GPIO_PIN_CTRL, value32);
-
- /* */
- /* <Roger_Notes> For RTL8723u multi-function configuration which
- was autoload from Efuse offset 0x0a and 0x0b, */
- /* WLAN HW GPIO[9], GPS HW GPIO[10] and BT HW GPIO[11]. */
- /* Added by Roger, 2010.10.07. */
- /* */
- /* 2. Disable GPIO[8] and GPIO[12] */
-
- /* Configure all pins as input mode. */
- rtl8723au_write16(padapter, REG_GPIO_IO_SEL_2, 0x0000);
- value32 = rtl8723au_read32(padapter, REG_GPIO_PIN_CTRL_2) & 0xFFFF001F;
- u4bTmp = value32 & 0x0000001F;
- /* Set pin 8, 10, 11 and pin 12 to output mode. */
- value32 |= ((u4bTmp << 8) | 0x001D0000);
- rtl8723au_write32(padapter, REG_GPIO_PIN_CTRL_2, value32);
-
- /* 3. Disable LED0 & 1 */
- rtl8723au_write16(padapter, REG_LEDCFG0, 0x8080);
-} /* end of _DisableGPIO() */
-
-static void _DisableRFAFEAndResetBB8192C(struct rtw_adapter *padapter)
-{
-/**************************************
-a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue
-b. RF path 0 offset 0x00 = 0x00 disable RF
-c. APSD_CTRL 0x600[7:0] = 0x40
-d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine
-e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
-***************************************/
- u8 value8;
-
- rtl8723au_write8(padapter, REG_TXPAUSE, 0xFF);
-
- PHY_SetRFReg(padapter, RF_PATH_A, 0x0, bMaskByte0, 0x0);
-
- value8 = APSDOFF;
- rtl8723au_write8(padapter, REG_APSD_CTRL, value8); /* 0x40 */
-
- /* Set BB reset at first */
- value8 = FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn;
- rtl8723au_write8(padapter, REG_SYS_FUNC_EN, value8); /* 0x16 */
-
- /* Set global reset. */
- value8 &= ~FEN_BB_GLB_RSTn;
- rtl8723au_write8(padapter, REG_SYS_FUNC_EN, value8); /* 0x14 */
-
- /* 2010/08/12 MH We need to set BB/GLBAL reset to save power
- for SS mode. */
-}
-
-static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter,
- bool bWithoutHWSM)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (IS_FW_81xxC(padapter) && (pHalData->FirmwareVersion <= 0x20)) {
- /*****************************
- f. MCUFWDL 0x80[7:0]= 0 reset MCU ready status
- g. SYS_FUNC_EN 0x02[10]= 0 reset MCU register, (8051 reset)
- h. SYS_FUNC_EN 0x02[15-12]= 5 reset MAC register, DCORE
- i. SYS_FUNC_EN 0x02[10]= 1 enable MCU register,
- (8051 enable)
- ******************************/
- u16 valu16;
- rtl8723au_write8(padapter, REG_MCUFWDL, 0);
-
- valu16 = rtl8723au_read16(padapter, REG_SYS_FUNC_EN);
- /* reset MCU , 8051 */
- rtl8723au_write16(padapter, REG_SYS_FUNC_EN,
- valu16 & ~FEN_CPUEN);
-
- valu16 = rtl8723au_read16(padapter, REG_SYS_FUNC_EN) & 0x0FFF;
- /* reset MAC */
- rtl8723au_write16(padapter, REG_SYS_FUNC_EN,
- valu16 | FEN_HWPDN | FEN_ELDR);
-
- valu16 = rtl8723au_read16(padapter, REG_SYS_FUNC_EN);
- /* enable MCU , 8051 */
- rtl8723au_write16(padapter, REG_SYS_FUNC_EN,
- valu16 | FEN_CPUEN);
- } else {
- u8 retry_cnts = 0;
- u8 val8;
-
- val8 = rtl8723au_read8(padapter, REG_MCUFWDL);
-
- /* 2010/08/12 MH For USB SS, we can not stop 8051 when we
- are trying to enter IPS/HW&SW radio off. For
- S3/S4/S5/Disable, we can stop 8051 because */
- /* we will init FW when power on again. */
- /* If we want to SS mode, we can not reset 8051. */
- if ((val8 & BIT(1)) && padapter->bFWReady) {
- /* IF fw in RAM code, do reset */
- /* 2010/08/25 MH According to RD alfred's
- suggestion, we need to disable other */
- /* HRCV INT to influence 8051 reset. */
- rtl8723au_write8(padapter, REG_FWIMR, 0x20);
- /* 2011/02/15 MH According to Alex's
- suggestion, close mask to prevent
- incorrect FW write operation. */
- rtl8723au_write8(padapter, REG_FTIMR, 0x00);
- rtl8723au_write8(padapter, REG_FSIMR, 0x00);
-
- /* 8051 reset by self */
- rtl8723au_write8(padapter, REG_HMETFR + 3, 0x20);
-
- while ((retry_cnts++ < 100) &&
- (rtl8723au_read16(padapter, REG_SYS_FUNC_EN) &
- FEN_CPUEN)) {
- udelay(50); /* us */
- }
-
- if (retry_cnts >= 100) {
- /* Reset MAC and Enable 8051 */
- rtl8723au_write8(padapter,
- REG_SYS_FUNC_EN + 1, 0x50);
- mdelay(10);
- }
- }
- /* Reset MAC and Enable 8051 */
- rtl8723au_write8(padapter, REG_SYS_FUNC_EN + 1, 0x54);
- rtl8723au_write8(padapter, REG_MCUFWDL, 0);
- }
-
- if (bWithoutHWSM) {
- /*****************************
- Without HW auto state machine
- g. SYS_CLKR 0x08[15:0] = 0x30A3 disable MAC clock
- h. AFE_PLL_CTRL 0x28[7:0] = 0x80 disable AFE PLL
- i. AFE_XTAL_CTRL 0x24[15:0] = 0x880F gated AFE DIG_CLOCK
- j. SYS_ISO_CTRL 0x00[7:0] = 0xF9 isolated digital to PON
- ******************************/
- /* modify to 0x70A3 by Scott. */
- rtl8723au_write16(padapter, REG_SYS_CLKR, 0x70A3);
- rtl8723au_write8(padapter, REG_AFE_PLL_CTRL, 0x80);
- rtl8723au_write16(padapter, REG_AFE_XTAL_CTRL, 0x880F);
- rtl8723au_write8(padapter, REG_SYS_ISO_CTRL, 0xF9);
- } else {
- /* Disable all RF/BB power */
- rtl8723au_write8(padapter, REG_RF_CTRL, 0x00);
- }
-}
-
-static void _ResetDigitalProcedure2(struct rtw_adapter *padapter)
-{
-/*****************************
-k. SYS_FUNC_EN 0x03[7:0] = 0x44 disable ELDR runction
-l. SYS_CLKR 0x08[15:0] = 0x3083 disable ELDR clock
-m. SYS_ISO_CTRL 0x01[7:0] = 0x83 isolated ELDR to PON
-******************************/
- /* modify to 0x70a3 by Scott. */
- rtl8723au_write16(padapter, REG_SYS_CLKR, 0x70a3);
- /* modify to 0x82 by Scott. */
- rtl8723au_write8(padapter, REG_SYS_ISO_CTRL + 1, 0x82);
-}
-
-static void _DisableAnalog(struct rtw_adapter *padapter, bool bWithoutHWSM)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u16 value16;
- u8 value8;
-
- if (bWithoutHWSM) {
- /*****************************
- n. LDOA15_CTRL 0x20[7:0] = 0x04 disable A15 power
- o. LDOV12D_CTRL 0x21[7:0] = 0x54 disable digital core power
- r. When driver call disable, the ASIC will turn off remaining
- clock automatically
- ******************************/
-
- rtl8723au_write8(padapter, REG_LDOA15_CTRL, 0x04);
- /* rtl8723au_write8(padapter, REG_LDOV12D_CTRL, 0x54); */
-
- value8 = rtl8723au_read8(padapter, REG_LDOV12D_CTRL);
- value8 &= ~LDV12_EN;
- rtl8723au_write8(padapter, REG_LDOV12D_CTRL, value8);
- }
-
- /*****************************
- h. SPS0_CTRL 0x11[7:0] = 0x23 enter PFM mode
- i. APS_FSMCO 0x04[15:0] = 0x4802 set USB suspend
- ******************************/
- value8 = 0x23;
- if (IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID))
- value8 |= BIT(3);
-
- rtl8723au_write8(padapter, REG_SPS0_CTRL, value8);
-
- if (bWithoutHWSM) {
- /* value16 |= (APDM_HOST | FSM_HSUS |/PFM_ALDN); */
- /* 2010/08/31 According to Filen description, we need to
- use HW to shut down 8051 automatically. */
- /* Because suspend operation need the asistance of 8051
- to wait for 3ms. */
- value16 = APDM_HOST | AFSM_HSUS | PFM_ALDN;
- } else {
- value16 = APDM_HOST | AFSM_HSUS | PFM_ALDN;
- }
-
- rtl8723au_write16(padapter, REG_APS_FSMCO, value16); /* 0x4802 */
-
- rtl8723au_write8(padapter, REG_RSV_CTRL, 0x0e);
-}
-
-/* HW Auto state machine */
-int CardDisableHWSM(struct rtw_adapter *padapter, u8 resetMCU)
-{
- if (padapter->bSurpriseRemoved)
- return _SUCCESS;
-
- /* RF Off Sequence ==== */
- _DisableRFAFEAndResetBB8192C(padapter);
-
- /* ==== Reset digital sequence ====== */
- _ResetDigitalProcedure1_92C(padapter, false);
-
- /* ==== Pull GPIO PIN to balance level and LED control ====== */
- _DisableGPIO(padapter);
-
- /* ==== Disable analog sequence === */
- _DisableAnalog(padapter, false);
-
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "======> Card disable finished.\n");
-
- return _SUCCESS;
-}
-
-/* without HW Auto state machine */
-int CardDisableWithoutHWSM(struct rtw_adapter *padapter)
-{
- if (padapter->bSurpriseRemoved)
- return _SUCCESS;
-
- /* RF Off Sequence ==== */
- _DisableRFAFEAndResetBB8192C(padapter);
-
- /* ==== Reset digital sequence ====== */
- _ResetDigitalProcedure1_92C(padapter, true);
-
- /* ==== Pull GPIO PIN to balance level and LED control ====== */
- _DisableGPIO(padapter);
-
- /* ==== Reset digital sequence ====== */
- _ResetDigitalProcedure2(padapter);
-
- /* ==== Disable analog sequence === */
- _DisableAnalog(padapter, true);
-
- return _SUCCESS;
-}
-
-void Hal_InitPGData(struct rtw_adapter *padapter, u8 *PROMContent)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
-
- if (!pEEPROM->bautoload_fail_flag) { /* autoload OK. */
- if (!pEEPROM->EepromOrEfuse) {
- /* Read EFUSE real map to shadow. */
- EFUSE_ShadowMapUpdate23a(padapter, EFUSE_WIFI);
- memcpy(PROMContent, pEEPROM->efuse_eeprom_data,
- HWSET_MAX_SIZE);
- }
- } else {
- RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
- "AutoLoad Fail reported from CR9346!!\n");
- /* update to default value 0xFF */
- if (!pEEPROM->EepromOrEfuse)
- EFUSE_ShadowMapUpdate23a(padapter, EFUSE_WIFI);
- memcpy(PROMContent, pEEPROM->efuse_eeprom_data,
- HWSET_MAX_SIZE);
- }
-}
-
-void Hal_EfuseParseIDCode(struct rtw_adapter *padapter, u8 *hwinfo)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
-/* struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); */
- u16 EEPROMId;
-
- /* Checl 0x8129 again for making sure autoload status!! */
- EEPROMId = le16_to_cpu(*((__le16 *) hwinfo));
- if (EEPROMId != RTL_EEPROM_ID) {
- DBG_8723A("EEPROM ID(%#x) is invalid!!\n", EEPROMId);
- pEEPROM->bautoload_fail_flag = true;
- } else {
- pEEPROM->bautoload_fail_flag = false;
- }
-
- RT_TRACE(_module_hal_init_c_, _drv_info_,
- "EEPROM ID = 0x%04x\n", EEPROMId);
-}
-
-static void
-Hal_ReadPowerValueFromPROM_8723A(struct txpowerinfo *pwrInfo,
- u8 *PROMContent, bool AutoLoadFail)
-{
- u32 rfPath, eeAddr, group, rfPathMax = 1;
-
- memset(pwrInfo, 0, sizeof(*pwrInfo));
-
- if (AutoLoadFail) {
- for (group = 0; group < MAX_CHNL_GROUP; group++) {
- for (rfPath = 0; rfPath < rfPathMax; rfPath++) {
- pwrInfo->CCKIndex[rfPath][group] =
- EEPROM_Default_TxPowerLevel;
- pwrInfo->HT40_1SIndex[rfPath][group] =
- EEPROM_Default_TxPowerLevel;
- pwrInfo->HT40_2SIndexDiff[rfPath][group] =
- EEPROM_Default_HT40_2SDiff;
- pwrInfo->HT20IndexDiff[rfPath][group] =
- EEPROM_Default_HT20_Diff;
- pwrInfo->OFDMIndexDiff[rfPath][group] =
- EEPROM_Default_LegacyHTTxPowerDiff;
- pwrInfo->HT40MaxOffset[rfPath][group] =
- EEPROM_Default_HT40_PwrMaxOffset;
- pwrInfo->HT20MaxOffset[rfPath][group] =
- EEPROM_Default_HT20_PwrMaxOffset;
- }
- }
- pwrInfo->TSSI_A[0] = EEPROM_Default_TSSI;
- return;
- }
-
- for (rfPath = 0; rfPath < rfPathMax; rfPath++) {
- for (group = 0; group < MAX_CHNL_GROUP; group++) {
- eeAddr =
- EEPROM_CCK_TX_PWR_INX_8723A + (rfPath * 3) + group;
-
- pwrInfo->CCKIndex[rfPath][group] = PROMContent[eeAddr];
- if (pwrInfo->CCKIndex[rfPath][group] > 63)
- pwrInfo->CCKIndex[rfPath][group] =
- EEPROM_Default_TxPowerLevel;
-
- eeAddr = EEPROM_HT40_1S_TX_PWR_INX_8723A +
- (rfPath * 3) + group;
- pwrInfo->HT40_1SIndex[rfPath][group] =
- PROMContent[eeAddr];
- if (pwrInfo->HT40_1SIndex[rfPath][group] > 63)
- pwrInfo->HT40_1SIndex[rfPath][group] =
- EEPROM_Default_TxPowerLevel;
- }
- }
-
- for (group = 0; group < MAX_CHNL_GROUP; group++) {
- for (rfPath = 0; rfPath < rfPathMax; rfPath++) {
- pwrInfo->HT40_2SIndexDiff[rfPath][group] = 0;
- pwrInfo->HT20IndexDiff[rfPath][group] =
- (PROMContent
- [EEPROM_HT20_TX_PWR_INX_DIFF_8723A +
- group] >> (rfPath * 4)) & 0xF;
- /* 4bit sign number to 8 bit sign number */
- if (pwrInfo->HT20IndexDiff[rfPath][group] & BIT(3))
- pwrInfo->HT20IndexDiff[rfPath][group] |= 0xF0;
-
- pwrInfo->OFDMIndexDiff[rfPath][group] =
- (PROMContent[EEPROM_OFDM_TX_PWR_INX_DIFF_8723A +
- group] >> (rfPath * 4)) & 0xF;
-
- pwrInfo->HT40MaxOffset[rfPath][group] =
- (PROMContent[EEPROM_HT40_MAX_PWR_OFFSET_8723A +
- group] >> (rfPath * 4)) & 0xF;
-
- pwrInfo->HT20MaxOffset[rfPath][group] =
- (PROMContent[EEPROM_HT20_MAX_PWR_OFFSET_8723A +
- group] >> (rfPath * 4)) & 0xF;
- }
- }
-
- pwrInfo->TSSI_A[0] = PROMContent[EEPROM_TSSI_A_8723A];
-}
-
-static u8 Hal_GetChnlGroup(u8 chnl)
-{
- u8 group = 0;
-
- if (chnl < 3) /* Cjanel 1-3 */
- group = 0;
- else if (chnl < 9) /* Channel 4-9 */
- group = 1;
- else /* Channel 10-14 */
- group = 2;
-
- return group;
-}
-
-void
-Hal_EfuseParsetxpowerinfo_8723A(struct rtw_adapter *padapter,
- u8 *PROMContent, bool AutoLoadFail)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct txpowerinfo pwrInfo;
- u8 rfPath, ch, group, rfPathMax = 1;
- u8 pwr, diff;
-
- Hal_ReadPowerValueFromPROM_8723A(&pwrInfo, PROMContent, AutoLoadFail);
- for (rfPath = 0; rfPath < rfPathMax; rfPath++) {
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
- group = Hal_GetChnlGroup(ch);
-
- pHalData->TxPwrLevelCck[rfPath][ch] =
- pwrInfo.CCKIndex[rfPath][group];
- pHalData->TxPwrLevelHT40_1S[rfPath][ch] =
- pwrInfo.HT40_1SIndex[rfPath][group];
-
- pHalData->TxPwrHt20Diff[rfPath][ch] =
- pwrInfo.HT20IndexDiff[rfPath][group];
- pHalData->TxPwrLegacyHtDiff[rfPath][ch] =
- pwrInfo.OFDMIndexDiff[rfPath][group];
- pHalData->PwrGroupHT20[rfPath][ch] =
- pwrInfo.HT20MaxOffset[rfPath][group];
- pHalData->PwrGroupHT40[rfPath][ch] =
- pwrInfo.HT40MaxOffset[rfPath][group];
-
- pwr = pwrInfo.HT40_1SIndex[rfPath][group];
- diff = pwrInfo.HT40_2SIndexDiff[rfPath][group];
-
- pHalData->TxPwrLevelHT40_2S[rfPath][ch] =
- (pwr > diff) ? (pwr - diff) : 0;
- }
- }
- for (rfPath = 0; rfPath < RF_PATH_MAX; rfPath++) {
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "RF(%u)-Ch(%u) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n",
- rfPath, ch,
- pHalData->TxPwrLevelCck[rfPath][ch],
- pHalData->TxPwrLevelHT40_1S[rfPath][ch],
- pHalData->TxPwrLevelHT40_2S[rfPath][ch]);
-
- }
- }
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "RF-A Ht20 to HT40 Diff[%u] = 0x%x(%d)\n", ch,
- pHalData->TxPwrHt20Diff[RF_PATH_A][ch],
- pHalData->TxPwrHt20Diff[RF_PATH_A][ch]);
- }
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++)
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "RF-A Legacy to Ht40 Diff[%u] = 0x%x\n", ch,
- pHalData->TxPwrLegacyHtDiff[RF_PATH_A][ch]);
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "RF-B Ht20 to HT40 Diff[%u] = 0x%x(%d)\n", ch,
- pHalData->TxPwrHt20Diff[RF_PATH_B][ch],
- pHalData->TxPwrHt20Diff[RF_PATH_B][ch]);
- }
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++)
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "RF-B Legacy to HT40 Diff[%u] = 0x%x\n", ch,
- pHalData->TxPwrLegacyHtDiff[RF_PATH_B][ch]);
- if (!AutoLoadFail) {
- struct registry_priv *registry_par = &padapter->registrypriv;
- if (registry_par->regulatory_tid == 0xff) {
- if (PROMContent[RF_OPTION1_8723A] == 0xff)
- pHalData->EEPROMRegulatory = 0;
- else
- pHalData->EEPROMRegulatory =
- PROMContent[RF_OPTION1_8723A] & 0x7;
- } else {
- pHalData->EEPROMRegulatory =
- registry_par->regulatory_tid;
- }
- } else {
- pHalData->EEPROMRegulatory = 0;
- }
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "EEPROMRegulatory = 0x%x\n", pHalData->EEPROMRegulatory);
-
- if (!AutoLoadFail)
- pHalData->bTXPowerDataReadFromEEPORM = true;
-}
-
-void
-Hal_EfuseParseBTCoexistInfo_8723A(struct rtw_adapter *padapter,
- u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u8 tempval;
- u32 tmpu4;
-
- if (!AutoLoadFail) {
- tmpu4 = rtl8723au_read32(padapter, REG_MULTI_FUNC_CTRL);
- if (tmpu4 & BT_FUNC_EN)
- pHalData->EEPROMBluetoothCoexist = 1;
- else
- pHalData->EEPROMBluetoothCoexist = 0;
- pHalData->EEPROMBluetoothType = BT_RTL8723A;
-
- /* The following need to be checked with newer version of */
- /* eeprom spec */
- tempval = hwinfo[RF_OPTION4_8723A];
- pHalData->EEPROMBluetoothAntNum = (tempval & 0x1);
- pHalData->EEPROMBluetoothAntIsolation = (tempval & 0x10) >> 4;
- pHalData->EEPROMBluetoothRadioShared = (tempval & 0x20) >> 5;
- } else {
- pHalData->EEPROMBluetoothCoexist = 0;
- pHalData->EEPROMBluetoothType = BT_RTL8723A;
- pHalData->EEPROMBluetoothAntNum = Ant_x2;
- pHalData->EEPROMBluetoothAntIsolation = 0;
- pHalData->EEPROMBluetoothRadioShared = BT_Radio_Shared;
- }
-
- rtl8723a_BT_init_hal_vars(padapter);
-}
-
-void
-Hal_EfuseParseEEPROMVer(struct rtw_adapter *padapter,
- u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (!AutoLoadFail)
- pHalData->EEPROMVersion = hwinfo[EEPROM_VERSION_8723A];
- else
- pHalData->EEPROMVersion = 1;
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "Hal_EfuseParseEEPROMVer(), EEVer = %d\n",
- pHalData->EEPROMVersion);
-}
-
-void
-rtl8723a_EfuseParseChnlPlan(struct rtw_adapter *padapter,
- u8 *hwinfo, bool AutoLoadFail)
-{
- padapter->mlmepriv.ChannelPlan =
- hal_com_get_channel_plan23a(padapter, hwinfo ?
- hwinfo[EEPROM_ChannelPlan_8723A]:0xFF,
- padapter->registrypriv.channel_plan,
- RT_CHANNEL_DOMAIN_WORLD_WIDE_13,
- AutoLoadFail);
-
- DBG_8723A("mlmepriv.ChannelPlan = 0x%02x\n",
- padapter->mlmepriv.ChannelPlan);
-}
-
-void
-Hal_EfuseParseCustomerID(struct rtw_adapter *padapter,
- u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- if (!AutoLoadFail) {
- pHalData->EEPROMCustomerID = hwinfo[EEPROM_CustomID_8723A];
- pHalData->EEPROMSubCustomerID =
- hwinfo[EEPROM_SubCustomID_8723A];
- } else {
- pHalData->EEPROMCustomerID = 0;
- pHalData->EEPROMSubCustomerID = 0;
- }
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "EEPROM Customer ID: 0x%2x\n", pHalData->EEPROMCustomerID);
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "EEPROM SubCustomer ID: 0x%02x\n",
- pHalData->EEPROMSubCustomerID);
-}
-
-void
-Hal_EfuseParseAntennaDiversity(struct rtw_adapter *padapter,
- u8 *hwinfo, bool AutoLoadFail)
-{
-}
-
-void
-Hal_EfuseParseRateIndicationOption(struct rtw_adapter *padapter,
- u8 *hwinfo, bool AutoLoadFail)
-{
-}
-
-void
-Hal_EfuseParseXtal_8723A(struct rtw_adapter *pAdapter,
- u8 *hwinfo, u8 AutoLoadFail)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
-
- if (!AutoLoadFail) {
- pHalData->CrystalCap = hwinfo[EEPROM_XTAL_K_8723A];
- if (pHalData->CrystalCap == 0xFF)
- pHalData->CrystalCap = EEPROM_Default_CrystalCap_8723A;
- } else {
- pHalData->CrystalCap = EEPROM_Default_CrystalCap_8723A;
- }
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "%s: CrystalCap = 0x%2x\n", __func__,
- pHalData->CrystalCap);
-}
-
-void
-Hal_EfuseParseThermalMeter_8723A(struct rtw_adapter *padapter,
- u8 *PROMContent, bool AutoloadFail)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
-
- /* */
- /* ThermalMeter from EEPROM */
- /* */
- if (!AutoloadFail)
- pHalData->EEPROMThermalMeter =
- PROMContent[EEPROM_THERMAL_METER_8723A];
- else
- pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
-
- if ((pHalData->EEPROMThermalMeter == 0xff) || AutoloadFail) {
- pHalData->bAPKThermalMeterIgnore = true;
- pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
- }
-
- DBG_8723A("%s: ThermalMeter = 0x%x\n", __func__,
- pHalData->EEPROMThermalMeter);
-}
-
-static void rtl8723a_cal_txdesc_chksum(struct tx_desc *ptxdesc)
-{
- __le16 *usPtr = (__le16 *)ptxdesc;
- u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
- u32 index;
- u16 checksum = 0;
-
- /* Clear first */
- ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
-
- for (index = 0; index < count; index++)
- checksum ^= le16_to_cpu(usPtr[index]);
-
- ptxdesc->txdw7 |= cpu_to_le32(checksum & 0x0000ffff);
-}
-
-/*
- * Description: In normal chip, we should send some packet to Hw which
- * will be used by Fw in FW LPS mode. The function is to fill the Tx
- * descriptor of this packets, then
- */
-/* Fw can tell Hw to send these packet derectly. */
-/* Added by tynli. 2009.10.15. */
-/* */
-void rtl8723a_fill_fake_txdesc(struct rtw_adapter *padapter, u8 *pDesc,
- u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull)
-{
- struct tx_desc *ptxdesc;
-
- /* Clear all status */
- ptxdesc = (struct tx_desc *)pDesc;
- memset(pDesc, 0, TXDESC_SIZE);
-
- /* offset 0 */
- /* own, bFirstSeg, bLastSeg; */
- ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
-
- /* 32 bytes for TX Desc */
- ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE + OFFSET_SZ) <<
- OFFSET_SHT) & 0x00ff0000);
-
- /* Buffer size + command header */
- ptxdesc->txdw0 |= cpu_to_le32(BufferLen & 0x0000ffff);
-
- /* offset 4 */
- /* Fixed queue of Mgnt queue */
- ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT << QSEL_SHT) & 0x00001f00);
-
- /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed
- to error vlaue by Hw. */
- if (IsPsPoll) {
- ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
- } else {
- /* Hw set sequence number */
- ptxdesc->txdw4 |= cpu_to_le32(BIT(7));
- /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
- ptxdesc->txdw3 |= cpu_to_le32((8 << 28));
- }
-
- if (true == IsBTQosNull)
- ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /* BT NULL */
-
- /* offset 16 */
- ptxdesc->txdw4 |= cpu_to_le32(BIT(8)); /* driver uses rate */
-
- /* USB interface drop packet if the checksum of descriptor isn't
- correct. */
- /* Using this checksum can let hardware recovery from packet bulk
- out error (e.g. Cancel URC, Bulk out error.). */
- rtl8723a_cal_txdesc_chksum(ptxdesc);
-}
-
-void hw_var_set_opmode(struct rtw_adapter *padapter, u8 mode)
-{
- u8 val8;
-
- if (mode == MSR_INFRA || mode == MSR_NOLINK) {
- StopTxBeacon(padapter);
-
- /* disable atim wnd */
- val8 = DIS_TSF_UDT | EN_BCN_FUNCTION | DIS_ATIM;
- SetBcnCtrlReg23a(padapter, val8, ~val8);
- } else if (mode == MSR_ADHOC) {
- ResumeTxBeacon(padapter);
-
- val8 = DIS_TSF_UDT | EN_BCN_FUNCTION | DIS_BCNQ_SUB;
- SetBcnCtrlReg23a(padapter, val8, ~val8);
- } else if (mode == MSR_AP) {
- /* add NULL Data and BT NULL Data Packets to FW RSVD Page */
- rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(padapter);
-
- ResumeTxBeacon(padapter);
-
- val8 = DIS_TSF_UDT | DIS_BCNQ_SUB;
- SetBcnCtrlReg23a(padapter, val8, ~val8);
-
- /* Set RCR */
- /* rtl8723au_write32(padapter, REG_RCR, 0x70002a8e);
- CBSSID_DATA must set to 0 */
- /* CBSSID_DATA must set to 0 */
- rtl8723au_write32(padapter, REG_RCR, 0x7000228e);
- /* enable to rx data frame */
- rtl8723au_write16(padapter, REG_RXFLTMAP2, 0xFFFF);
- /* enable to rx ps-poll */
- rtl8723au_write16(padapter, REG_RXFLTMAP1, 0x0400);
-
- /* Beacon Control related register for first time */
- /* 2ms */
- rtl8723au_write8(padapter, REG_BCNDMATIM, 0x02);
- /* 5ms */
- rtl8723au_write8(padapter, REG_DRVERLYINT, 0x05);
- /* 10ms for port0 */
- rtl8723au_write8(padapter, REG_ATIMWND, 0x0a);
- rtl8723au_write16(padapter, REG_BCNTCFG, 0x00);
- rtl8723au_write16(padapter, REG_TBTT_PROHIBIT, 0xff04);
- /* +32767 (~32ms) */
- rtl8723au_write16(padapter, REG_TSFTR_SYN_OFFSET, 0x7fff);
-
- /* reset TSF */
- rtl8723au_write8(padapter, REG_DUAL_TSF_RST, BIT(0));
-
- /* enable BCN Function */
- /* don't enable update TSF (due to TSF update when
- beacon/probe rsp are received) */
- val8 = DIS_TSF_UDT | EN_BCN_FUNCTION |
- EN_TXBCN_RPT | DIS_BCNQ_SUB;
- SetBcnCtrlReg23a(padapter, val8, ~val8);
- }
-
- val8 = rtl8723au_read8(padapter, MSR);
- val8 = (val8 & 0xC) | mode;
- rtl8723au_write8(padapter, MSR, val8);
-}
-
-void hw_var_set_macaddr(struct rtw_adapter *padapter, u8 *val)
-{
- u8 idx = 0;
- u32 reg_macid;
-
- reg_macid = REG_MACID;
-
- for (idx = 0; idx < 6; idx++)
- rtl8723au_write8(padapter, (reg_macid + idx), val[idx]);
-}
-
-void hw_var_set_bssid(struct rtw_adapter *padapter, u8 *val)
-{
- u8 idx = 0;
- u32 reg_bssid;
-
- reg_bssid = REG_BSSID;
-
- for (idx = 0; idx < 6; idx++)
- rtl8723au_write8(padapter, (reg_bssid + idx), val[idx]);
-}
-
-void hw_var_set_correct_tsf(struct rtw_adapter *padapter)
-{
- u64 tsf;
- u32 reg_tsftr;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- /* tsf = pmlmeext->TSFValue - ((u32)pmlmeext->TSFValue %
- (pmlmeinfo->bcn_interval*1024)) - 1024; us */
- tsf = pmlmeext->TSFValue -
- do_div(pmlmeext->TSFValue,
- (pmlmeinfo->bcn_interval * 1024)) - 1024; /* us */
-
- if (((pmlmeinfo->state & 0x03) == MSR_ADHOC) ||
- ((pmlmeinfo->state & 0x03) == MSR_AP)) {
- /* pHalData->RegTxPause |= STOP_BCNQ;BIT(6) */
- /* rtl8723au_write8(padapter, REG_TXPAUSE,
- (rtl8723au_read8(Adapter, REG_TXPAUSE)|BIT(6))); */
- StopTxBeacon(padapter);
- }
-
- reg_tsftr = REG_TSFTR;
-
- /* disable related TSF function */
- SetBcnCtrlReg23a(padapter, 0, EN_BCN_FUNCTION);
-
- rtl8723au_write32(padapter, reg_tsftr, tsf);
- rtl8723au_write32(padapter, reg_tsftr + 4, tsf >> 32);
-
- /* enable related TSF function */
- SetBcnCtrlReg23a(padapter, EN_BCN_FUNCTION, 0);
-
- if (((pmlmeinfo->state & 0x03) == MSR_ADHOC) ||
- ((pmlmeinfo->state & 0x03) == MSR_AP))
- ResumeTxBeacon(padapter);
-}
-
-void hw_var_set_mlme_disconnect(struct rtw_adapter *padapter)
-{
- /* reject all data frames */
- rtl8723au_write16(padapter, REG_RXFLTMAP2, 0);
-
- /* reset TSF */
- rtl8723au_write8(padapter, REG_DUAL_TSF_RST, BIT(0));
-
- /* disable update TSF */
- SetBcnCtrlReg23a(padapter, DIS_TSF_UDT, 0);
-}
-
-void hw_var_set_mlme_join(struct rtw_adapter *padapter, u8 type)
-{
- u8 RetryLimit = 0x30;
-
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (type == 0) { /* prepare to join */
- u32 v32;
-
- /* enable to rx data frame.Accept all data frame */
- /* rtl8723au_write32(padapter, REG_RCR,
- rtl8723au_read32(padapter, REG_RCR)|RCR_ADF); */
- rtl8723au_write16(padapter, REG_RXFLTMAP2, 0xFFFF);
-
- v32 = rtl8723au_read32(padapter, REG_RCR);
- v32 |= RCR_CBSSID_DATA | RCR_CBSSID_BCN;
- rtl8723au_write32(padapter, REG_RCR, v32);
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)
- RetryLimit =
- (pHalData->CustomerID == RT_CID_CCX) ? 7 : 48;
- else /* Ad-hoc Mode */
- RetryLimit = 0x7;
- } else if (type == 1) { /* joinbss_event callback when join res < 0 */
- /* config RCR to receive different BSSID & not to
- receive data frame during linking */
- rtl8723au_write16(padapter, REG_RXFLTMAP2, 0);
- } else if (type == 2) { /* sta add event callback */
- /* enable update TSF */
- SetBcnCtrlReg23a(padapter, 0, DIS_TSF_UDT);
-
- if (check_fwstate(pmlmepriv,
- WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) {
- /* fixed beacon issue for 8191su........... */
- rtl8723au_write8(padapter, 0x542, 0x02);
- RetryLimit = 0x7;
- }
- }
-
- rtl8723au_write16(padapter, REG_RL,
- RetryLimit << RETRY_LIMIT_SHORT_SHIFT | RetryLimit <<
- RETRY_LIMIT_LONG_SHIFT);
-
- switch (type) {
- case 0:
- /* prepare to join */
- rtl8723a_BT_wifiassociate_notify(padapter, true);
- break;
- case 1:
- /* joinbss_event callback when join res < 0 */
- rtl8723a_BT_wifiassociate_notify(padapter, false);
- break;
- case 2:
- /* sta add event callback */
-/* BT_WifiMediaStatusNotify(padapter, RT_MEDIA_CONNECT); */
- break;
- }
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c b/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
deleted file mode 100644
index 06a6c3eeeb33..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
+++ /dev/null
@@ -1,961 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTL8723A_PHYCFG_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#include <rtl8723a_hal.h>
-#include <usb_ops_linux.h>
-
-/*---------------------------Define Local Constant---------------------------*/
-/* Channel switch:The size of command tables for switch channel*/
-#define MAX_PRECMD_CNT 16
-#define MAX_RFDEPENDCMD_CNT 16
-#define MAX_POSTCMD_CNT 16
-
-#define MAX_DOZE_WAITING_TIMES_9x 64
-
-/*---------------------------Define Local Constant---------------------------*/
-
-/*------------------------Define global variable-----------------------------*/
-
-/*------------------------Define local variable------------------------------*/
-
-/*--------------------Define export function prototype-----------------------*/
-/* Please refer to header file */
-/*--------------------Define export function prototype-----------------------*/
-
-/*----------------------------Function Body----------------------------------*/
-/* */
-/* 1. BB register R/W API */
-/* */
-
-/**
-* Function: phy_CalculateBitShift
-*
-* OverView: Get shifted position of the BitMask
-*
-* Input:
-* u32 BitMask,
-*
-* Output: none
-* Return: u32 Return the shift bit bit position of the mask
-*/
-static u32 phy_CalculateBitShift(u32 BitMask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((BitMask>>i) & 0x1) == 1)
- break;
- }
-
- return i;
-}
-
-/**
-* Function: PHY_QueryBBReg
-*
-* OverView: Read "sepcific bits" from BB register
-*
-* Input:
-* struct rtw_adapter * Adapter,
-* u32 RegAddr, Target address to be readback
-* u32 BitMask Target bit position in the
-* target address to be readback
-* Output:
-* None
-* Return:
-* u32 Data The readback register value
-* Note:
-* This function is equal to "GetRegSetting" in PHY programming guide
-*/
-u32
-PHY_QueryBBReg(struct rtw_adapter *Adapter, u32 RegAddr, u32 BitMask)
-{
- u32 ReturnValue = 0, OriginalValue, BitShift;
-
- OriginalValue = rtl8723au_read32(Adapter, RegAddr);
- BitShift = phy_CalculateBitShift(BitMask);
- ReturnValue = (OriginalValue & BitMask) >> BitShift;
- return ReturnValue;
-}
-
-/**
-* Function: PHY_SetBBReg
-*
-* OverView: Write "Specific bits" to BB register (page 8~)
-*
-* Input:
-* struct rtw_adapter * Adapter,
-* u32 RegAddr, Target address to be modified
-* u32 BitMask Target bit position in the
-* target address to be modified
-* u32 Data The new register value in the
-* target bit position of the
-* target address
-*
-* Output:
-* None
-* Return:
-* None
-* Note:
-* This function is equal to "PutRegSetting" in PHY programming guide
-*/
-
-void
-PHY_SetBBReg(struct rtw_adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data)
-{
- u32 OriginalValue, BitShift;
-
- if (BitMask != bMaskDWord) {/* if not "double word" write */
- OriginalValue = rtl8723au_read32(Adapter, RegAddr);
- BitShift = phy_CalculateBitShift(BitMask);
- Data = (OriginalValue & (~BitMask)) | (Data << BitShift);
- }
-
- rtl8723au_write32(Adapter, RegAddr, Data);
-
- /* RTPRINT(FPHY, PHY_BBW, ("BBW MASK = 0x%lx Addr[0x%lx]= 0x%lx\n", BitMask, RegAddr, Data)); */
-}
-
-/* */
-/* 2. RF register R/W API */
-/* */
-
-/**
-* Function: phy_RFSerialRead
-*
-* OverView: Read regster from RF chips
-*
-* Input:
-* struct rtw_adapter * Adapter,
-* enum RF_RADIO_PATH eRFPath, Radio path of A/B/C/D
-* u32 Offset, The target address to be read
-*
-* Output: None
-* Return: u32 reback value
-* Note: Threre are three types of serial operations:
-* 1. Software serial write
-* 2. Hardware LSSI-Low Speed Serial Interface
-* 3. Hardware HSSI-High speed
-* serial write. Driver need to implement (1) and (2).
-* This function is equal to the combination of RF_ReadReg() and
-* RFLSSIRead()
-*/
-static u32
-phy_RFSerialRead(struct rtw_adapter *Adapter, enum RF_RADIO_PATH eRFPath,
- u32 Offset)
-{
- u32 retValue = 0;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct bb_reg_define *pPhyReg = &pHalData->PHYRegDef[eRFPath];
- u32 NewOffset;
- u32 tmplong, tmplong2;
- u8 RfPiEnable = 0;
- /* */
- /* Make sure RF register offset is correct */
- /* */
- Offset &= 0x3f;
-
- /* */
- /* Switch page for 8256 RF IC */
- /* */
- NewOffset = Offset;
-
- /* 2009/06/17 MH We can not execute IO for power save or
- other accident mode. */
- /* if (RT_CANNOT_IO(Adapter)) */
- /* */
- /* RTPRINT(FPHY, PHY_RFR, ("phy_RFSerialRead return all one\n")); */
- /* return 0xFFFFFFFF; */
- /* */
-
- /* For 92S LSSI Read RFLSSIRead */
- /* For RF A/B write 0x824/82c(does not work in the future) */
- /* We must use 0x824 for RF A and B to execute read trigger */
- tmplong = rtl8723au_read32(Adapter, rFPGA0_XA_HSSIParameter2);
- if (eRFPath == RF_PATH_A)
- tmplong2 = tmplong;
- else
- tmplong2 = rtl8723au_read32(Adapter, pPhyReg->rfHSSIPara2);
-
- tmplong2 = (tmplong2 & ~bLSSIReadAddress) |
- (NewOffset << 23) | bLSSIReadEdge; /* T65 RF */
-
- rtl8723au_write32(Adapter, rFPGA0_XA_HSSIParameter2,
- tmplong & (~bLSSIReadEdge));
- udelay(10);/* PlatformStallExecution(10); */
-
- rtl8723au_write32(Adapter, pPhyReg->rfHSSIPara2, tmplong2);
- udelay(100);/* PlatformStallExecution(100); */
-
- rtl8723au_write32(Adapter, rFPGA0_XA_HSSIParameter2,
- tmplong | bLSSIReadEdge);
- udelay(10);/* PlatformStallExecution(10); */
-
- if (eRFPath == RF_PATH_A)
- RfPiEnable = (u8)PHY_QueryBBReg(Adapter,
- rFPGA0_XA_HSSIParameter1,
- BIT(8));
- else if (eRFPath == RF_PATH_B)
- RfPiEnable = (u8)PHY_QueryBBReg(Adapter,
- rFPGA0_XB_HSSIParameter1,
- BIT(8));
-
- if (RfPiEnable) {
- /* Read from BBreg8b8, 12 bits for 8190, 20bits for T65 RF */
- retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBackPi,
- bLSSIReadBackData);
- /* DBG_8723A("Readback from RF-PI : 0x%x\n", retValue); */
- } else {
- /* Read from BBreg8a0, 12 bits for 8190, 20 bits for T65 RF */
- retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBack,
- bLSSIReadBackData);
- /* DBG_8723A("Readback from RF-SI : 0x%x\n", retValue); */
- }
- /* DBG_8723A("RFR-%d Addr[0x%x]= 0x%x\n", eRFPath, pPhyReg->rfLSSIReadBack, retValue); */
-
- return retValue;
-}
-
-/**
-* Function: phy_RFSerialWrite
-*
-* OverView: Write data to RF register (page 8~)
-*
-* Input:
-* struct rtw_adapter * Adapter,
-* enum RF_RADIO_PATH eRFPath, Radio path of A/B/C/D
-* u32 Offset, The target address to be read
-* u32 Data The new register Data in the target
-* bit position of the target to be read
-*
-* Output:
-* None
-* Return:
-* None
-* Note:
-* Threre are three types of serial operations:
-* 1. Software serial write
-* 2. Hardware LSSI-Low Speed Serial Interface
-* 3. Hardware HSSI-High speed
-* serial write. Driver need to implement (1) and (2).
-* This function is equal to the combination of RF_ReadReg() and
-* RFLSSIRead()
-*
-* Note: For RF8256 only
-* The total count of RTL8256(Zebra4) register is around 36 bit it only employs
-* 4-bit RF address. RTL8256 uses "register mode control bit"
-* (Reg00[12], Reg00[10]) to access register address bigger than 0xf.
-* See "Appendix-4 in PHY Configuration programming guide" for more details.
-* Thus, we define a sub-finction for RTL8526 register address conversion
-* ===========================================================
-* Register Mode: RegCTL[1] RegCTL[0] Note
-* (Reg00[12]) (Reg00[10])
-* ===========================================================
-* Reg_Mode0 0 x Reg 0 ~15(0x0 ~ 0xf)
-* ------------------------------------------------------------------
-* Reg_Mode1 1 0 Reg 16 ~30(0x1 ~ 0xf)
-* ------------------------------------------------------------------
-* Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf)
-* ------------------------------------------------------------------
-*
-* 2008/09/02 MH Add 92S RF definition
-*/
-static void
-phy_RFSerialWrite(struct rtw_adapter *Adapter, enum RF_RADIO_PATH eRFPath,
- u32 Offset, u32 Data)
-{
- u32 DataAndAddr = 0;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct bb_reg_define *pPhyReg = &pHalData->PHYRegDef[eRFPath];
- u32 NewOffset;
-
- /* 2009/06/17 MH We can not execute IO for power save or
- other accident mode. */
- /* if (RT_CANNOT_IO(Adapter)) */
- /* */
- /* RTPRINT(FPHY, PHY_RFW, ("phy_RFSerialWrite stop\n")); */
- /* return; */
- /* */
-
- Offset &= 0x3f;
-
- /* */
- /* Shadow Update */
- /* */
- /* PHY_RFShadowWrite(Adapter, eRFPath, Offset, Data); */
-
- /* */
- /* Switch page for 8256 RF IC */
- /* */
- NewOffset = Offset;
-
- /* */
- /* Put write addr in [5:0] and write data in [31:16] */
- /* */
- /* DataAndAddr = (Data<<16) | (NewOffset&0x3f); */
- /* T65 RF */
- DataAndAddr = ((NewOffset<<20) | (Data&0x000fffff)) & 0x0fffffff;
-
- /* */
- /* Write Operation */
- /* */
- rtl8723au_write32(Adapter, pPhyReg->rf3wireOffset, DataAndAddr);
-}
-
-/**
-* Function: PHY_QueryRFReg
-*
-* OverView: Query "Specific bits" to RF register (page 8~)
-*
-* Input:
-* struct rtw_adapter * Adapter,
-* enum RF_RADIO_PATH eRFPath, Radio path of A/B/C/D
-* u32 RegAddr, The target address to be read
-* u32BitMask The target bit position in the target
-* address to be read
-*
-* Output:
-* None
-* Return:
-* u32 Readback value
-* Note:
-* This function is equal to "GetRFRegSetting" in PHY programming guide
-*/
-u32
-PHY_QueryRFReg(struct rtw_adapter *Adapter, enum RF_RADIO_PATH eRFPath,
- u32 RegAddr, u32 BitMask)
-{
- u32 Original_Value, Readback_Value, BitShift;
- /* struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); */
- /* u8 RFWaitCounter = 0; */
- /* _irqL irqL; */
-
- Original_Value = phy_RFSerialRead(Adapter, eRFPath, RegAddr);
-
- BitShift = phy_CalculateBitShift(BitMask);
- Readback_Value = (Original_Value & BitMask) >> BitShift;
-
- return Readback_Value;
-}
-
-/**
-* Function: PHY_SetRFReg
-*
-* OverView: Write "Specific bits" to RF register (page 8~)
-*
-* Input:
-* struct rtw_adapter * Adapter,
-* enum RF_RADIO_PATH eRFPath, Radio path of A/B/C/D
-* u32 RegAddr, The target address to be modified
-* u32 BitMask The target bit position in the target
-* address to be modified
-* u32 Data The new register Data in the target
-* bit position of the target address
-*
-* Output:
-* None
-* Return:
-* None
-* Note: This function is equal to "PutRFRegSetting" in PHY programming guide
-*/
-void
-PHY_SetRFReg(struct rtw_adapter *Adapter, enum RF_RADIO_PATH eRFPath,
- u32 RegAddr, u32 BitMask, u32 Data)
-{
- /* struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter); */
- /* u8 RFWaitCounter = 0; */
- u32 Original_Value, BitShift;
-
- /* RF data is 12 bits only */
- if (BitMask != bRFRegOffsetMask) {
- Original_Value = phy_RFSerialRead(Adapter, eRFPath, RegAddr);
- BitShift = phy_CalculateBitShift(BitMask);
- Data = (Original_Value & (~BitMask)) | (Data << BitShift);
- }
-
- phy_RFSerialWrite(Adapter, eRFPath, RegAddr, Data);
-}
-
-/* 3. Initial MAC/BB/RF config by reading MAC/BB/RF txt. */
-
-/*-----------------------------------------------------------------------------
- * Function: PHY_MACConfig8723A
- *
- * Overview: Condig MAC by header file or parameter file.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 08/12/2008 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-int PHY_MACConfig8723A(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- /* */
- /* Config MAC */
- /* */
- ODM_ReadAndConfig_MAC_REG_8723A(&pHalData->odmpriv);
-
- /* 2010.07.13 AMPDU aggregation number 9 */
- rtl8723au_write8(Adapter, REG_MAX_AGGR_NUM, 0x0A);
- if (pHalData->rf_type == RF_2T2R &&
- BOARD_USB_DONGLE == pHalData->BoardType)
- rtl8723au_write8(Adapter, 0x40, 0x04);
-
- return _SUCCESS;
-}
-
-/**
-* Function: phy_InitBBRFRegisterDefinition
-*
-* OverView: Initialize Register definition offset for Radio Path A/B/C/D
-*
-* Input:
-* struct rtw_adapter * Adapter,
-*
-* Output: None
-* Return: None
-* Note:
-* The initialization value is constant and it should never be changes
-*/
-static void
-phy_InitBBRFRegisterDefinition(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- /* RF Interface Sowrtware Control */
- /* 16 LSBs if read 32-bit from 0x870 */
- pHalData->PHYRegDef[RF_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW;
- /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
- pHalData->PHYRegDef[RF_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW;
-
- /* RF Interface Readback Value */
- /* 16 LSBs if read 32-bit from 0x8E0 */
- pHalData->PHYRegDef[RF_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB;
- /* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
- pHalData->PHYRegDef[RF_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;
-
- /* RF Interface Output (and Enable) */
- /* 16 LSBs if read 32-bit from 0x860 */
- pHalData->PHYRegDef[RF_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE;
- /* 16 LSBs if read 32-bit from 0x864 */
- pHalData->PHYRegDef[RF_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE;
-
- /* RF Interface (Output and) Enable */
- /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
- pHalData->PHYRegDef[RF_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE;
- /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
- pHalData->PHYRegDef[RF_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE;
-
- /* Addr of LSSI. Wirte RF register by driver */
- pHalData->PHYRegDef[RF_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter;
- pHalData->PHYRegDef[RF_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
-
- /* RF parameter */
- /* BB Band Select */
- pHalData->PHYRegDef[RF_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter;
- pHalData->PHYRegDef[RF_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
-
- /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
- pHalData->PHYRegDef[RF_PATH_A].rfTxGainStage = rFPGA0_TxGainStage;
- pHalData->PHYRegDef[RF_PATH_B].rfTxGainStage = rFPGA0_TxGainStage;
-
- /* Tranceiver A~D HSSI Parameter-1 */
- /* wire control parameter1 */
- pHalData->PHYRegDef[RF_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1;
- /* wire control parameter1 */
- pHalData->PHYRegDef[RF_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1;
-
- /* Tranceiver A~D HSSI Parameter-2 */
- /* wire control parameter2 */
- pHalData->PHYRegDef[RF_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2;
- /* wire control parameter2 */
- pHalData->PHYRegDef[RF_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2;
-
- /* RF switch Control */
- pHalData->PHYRegDef[RF_PATH_A].rfSwitchControl =
- rFPGA0_XAB_SwitchControl; /* TR/Ant switch control */
- pHalData->PHYRegDef[RF_PATH_B].rfSwitchControl =
- rFPGA0_XAB_SwitchControl;
-
- /* AGC control 1 */
- pHalData->PHYRegDef[RF_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
- pHalData->PHYRegDef[RF_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
-
- /* AGC control 2 */
- pHalData->PHYRegDef[RF_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
- pHalData->PHYRegDef[RF_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
-
- /* RX AFE control 1 */
- pHalData->PHYRegDef[RF_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
- pHalData->PHYRegDef[RF_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
-
- /* RX AFE control 1 */
- pHalData->PHYRegDef[RF_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
- pHalData->PHYRegDef[RF_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
-
- /* Tx AFE control 1 */
- pHalData->PHYRegDef[RF_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
- pHalData->PHYRegDef[RF_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
-
- /* Tx AFE control 2 */
- pHalData->PHYRegDef[RF_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
- pHalData->PHYRegDef[RF_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
-
- /* Tranceiver LSSI Readback SI mode */
- pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
- pHalData->PHYRegDef[RF_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
-
- /* Tranceiver LSSI Readback PI mode */
- pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBackPi =
- TransceiverA_HSPI_Readback;
- pHalData->PHYRegDef[RF_PATH_B].rfLSSIReadBackPi =
- TransceiverB_HSPI_Readback;
-}
-
-/* The following is for High Power PA */
-static void
-storePwrIndexDiffRateOffset(struct rtw_adapter *Adapter, u32 RegAddr,
- u32 BitMask, u32 Data)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- if (RegAddr == rTxAGC_A_Rate18_06) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][0] = Data;
- }
- if (RegAddr == rTxAGC_A_Rate54_24) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][1] = Data;
- }
- if (RegAddr == rTxAGC_A_CCK1_Mcs32) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][6] = Data;
- }
- if (RegAddr == rTxAGC_B_CCK11_A_CCK2_11 && BitMask == 0xffffff00) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][7] = Data;
- }
- if (RegAddr == rTxAGC_A_Mcs03_Mcs00) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][2] = Data;
- }
- if (RegAddr == rTxAGC_A_Mcs07_Mcs04) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][3] = Data;
- }
- if (RegAddr == rTxAGC_A_Mcs11_Mcs08) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][4] = Data;
- }
- if (RegAddr == rTxAGC_A_Mcs15_Mcs12) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][5] = Data;
- }
- if (RegAddr == rTxAGC_B_Rate18_06) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][8] = Data;
- }
- if (RegAddr == rTxAGC_B_Rate54_24) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][9] = Data;
- }
- if (RegAddr == rTxAGC_B_CCK1_55_Mcs32) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][14] = Data;
- }
- if (RegAddr == rTxAGC_B_CCK11_A_CCK2_11 && BitMask == 0x000000ff) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][15] = Data;
- }
- if (RegAddr == rTxAGC_B_Mcs03_Mcs00) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][10] = Data;
- }
- if (RegAddr == rTxAGC_B_Mcs07_Mcs04) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][11] = Data;
- }
- if (RegAddr == rTxAGC_B_Mcs11_Mcs08) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][12] = Data;
- }
- if (RegAddr == rTxAGC_B_Mcs15_Mcs12) {
- pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][13] = Data;
- pHalData->pwrGroupCnt++;
- }
-}
-
-/*-----------------------------------------------------------------------------
- * Function: phy_ConfigBBWithPgHeaderFile
- *
- * Overview: Config PHY_REG_PG array
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/06/2008 MHC Add later!!!!!!.. Please modify for new files!!!!
- * 11/10/2008 tynli Modify to mew files.
- *---------------------------------------------------------------------------*/
-static int
-phy_ConfigBBWithPgHeaderFile(struct rtw_adapter *Adapter)
-{
- int i;
- u32 *Rtl819XPHY_REGArray_Table_PG;
- u16 PHY_REGArrayPGLen;
-
- PHY_REGArrayPGLen = Rtl8723_PHY_REG_Array_PGLength;
- Rtl819XPHY_REGArray_Table_PG = (u32 *)Rtl8723_PHY_REG_Array_PG;
-
- for (i = 0; i < PHY_REGArrayPGLen; i = i + 3) {
- storePwrIndexDiffRateOffset(Adapter,
- Rtl819XPHY_REGArray_Table_PG[i],
- Rtl819XPHY_REGArray_Table_PG[i+1],
- Rtl819XPHY_REGArray_Table_PG[i+2]);
- }
-
- return _SUCCESS;
-}
-
-static void
-phy_BB8192C_Config_1T(struct rtw_adapter *Adapter)
-{
- /* for path - B */
- PHY_SetBBReg(Adapter, rFPGA0_TxInfo, 0x3, 0x2);
- PHY_SetBBReg(Adapter, rFPGA1_TxInfo, 0x300033, 0x200022);
-
- /* 20100519 Joseph: Add for 1T2R config. Suggested by Kevin,
- Jenyu and Yunan. */
- PHY_SetBBReg(Adapter, rCCK0_AFESetting, bMaskByte3, 0x45);
- PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, bMaskByte0, 0x23);
- /* B path first AGC */
- PHY_SetBBReg(Adapter, rOFDM0_AGCParameter1, 0x30, 0x1);
-
- PHY_SetBBReg(Adapter, 0xe74, 0x0c000000, 0x2);
- PHY_SetBBReg(Adapter, 0xe78, 0x0c000000, 0x2);
- PHY_SetBBReg(Adapter, 0xe7c, 0x0c000000, 0x2);
- PHY_SetBBReg(Adapter, 0xe80, 0x0c000000, 0x2);
- PHY_SetBBReg(Adapter, 0xe88, 0x0c000000, 0x2);
-}
-
-static int
-phy_BB8723a_Config_ParaFile(struct rtw_adapter *Adapter)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(Adapter);
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- int rtStatus = _SUCCESS;
-
- /* */
- /* 1. Read PHY_REG.TXT BB INIT!! */
- /* We will separate as 88C / 92C according to chip version */
- /* */
- ODM_ReadAndConfig_PHY_REG_1T_8723A(&pHalData->odmpriv);
-
- /* */
- /* 20100318 Joseph: Config 2T2R to 1T2R if necessary. */
- /* */
- if (pHalData->rf_type == RF_1T2R) {
- phy_BB8192C_Config_1T(Adapter);
- DBG_8723A("phy_BB8723a_Config_ParaFile():Config to 1T!!\n");
- }
-
- /* */
- /* 2. If EEPROM or EFUSE autoload OK, We must config by
- PHY_REG_PG.txt */
- /* */
- if (pEEPROM->bautoload_fail_flag == false) {
- pHalData->pwrGroupCnt = 0;
-
- rtStatus = phy_ConfigBBWithPgHeaderFile(Adapter);
- }
-
- if (rtStatus != _SUCCESS)
- goto phy_BB8190_Config_ParaFile_Fail;
-
- /* */
- /* 3. BB AGC table Initialization */
- /* */
- ODM_ReadAndConfig_AGC_TAB_1T_8723A(&pHalData->odmpriv);
-
-phy_BB8190_Config_ParaFile_Fail:
-
- return rtStatus;
-}
-
-int
-PHY_BBConfig8723A(struct rtw_adapter *Adapter)
-{
- int rtStatus = _SUCCESS;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u8 TmpU1B = 0;
- u8 CrystalCap;
-
- phy_InitBBRFRegisterDefinition(Adapter);
-
- /* Suggested by Scott. tynli_test. 2010.12.30. */
- /* 1. 0x28[1] = 1 */
- TmpU1B = rtl8723au_read8(Adapter, REG_AFE_PLL_CTRL);
- udelay(2);
- rtl8723au_write8(Adapter, REG_AFE_PLL_CTRL, TmpU1B | BIT(1));
- udelay(2);
-
- /* 2. 0x29[7:0] = 0xFF */
- rtl8723au_write8(Adapter, REG_AFE_PLL_CTRL+1, 0xff);
- udelay(2);
-
- /* 3. 0x02[1:0] = 2b'11 */
- TmpU1B = rtl8723au_read8(Adapter, REG_SYS_FUNC_EN);
- rtl8723au_write8(Adapter, REG_SYS_FUNC_EN,
- (TmpU1B | FEN_BB_GLB_RSTn | FEN_BBRSTB));
-
- /* 4. 0x25[6] = 0 */
- TmpU1B = rtl8723au_read8(Adapter, REG_AFE_XTAL_CTRL + 1);
- rtl8723au_write8(Adapter, REG_AFE_XTAL_CTRL+1, TmpU1B & ~BIT(6));
-
- /* 5. 0x24[20] = 0 Advised by SD3 Alex Wang. 2011.02.09. */
- TmpU1B = rtl8723au_read8(Adapter, REG_AFE_XTAL_CTRL+2);
- rtl8723au_write8(Adapter, REG_AFE_XTAL_CTRL+2, TmpU1B & ~BIT(4));
-
- /* 6. 0x1f[7:0] = 0x07 */
- rtl8723au_write8(Adapter, REG_RF_CTRL, 0x07);
-
- /* */
- /* Config BB and AGC */
- /* */
- rtStatus = phy_BB8723a_Config_ParaFile(Adapter);
-
-/* only for B-cut */
- if (pHalData->EEPROMVersion >= 0x01) {
- CrystalCap = pHalData->CrystalCap & 0x3F;
- PHY_SetBBReg(Adapter, REG_MAC_PHY_CTRL, 0xFFF000,
- (CrystalCap | (CrystalCap << 6)));
- }
-
- rtl8723au_write32(Adapter, REG_LDOA15_CTRL, 0x01572505);
- return rtStatus;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: SetTxPowerLevel8723A()
- *
- * Overview: This function is export to "HalCommon" moudule
- * We must consider RF path later!!!!!!!
- *
- * Input: struct rtw_adapter * Adapter
- * u8 channel
- *
- * Output: NONE
- *
- * Return: NONE
- *
- *---------------------------------------------------------------------------*/
-void PHY_SetTxPowerLevel8723A(struct rtw_adapter *Adapter, u8 channel)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u8 cckpwr[2], ofdmpwr[2]; /* [0]:RF-A, [1]:RF-B */
- int i = channel - 1;
-
- if (pHalData->bTXPowerDataReadFromEEPORM == false)
- return;
-
- /* 1. CCK */
- cckpwr[RF_PATH_A] = pHalData->TxPwrLevelCck[RF_PATH_A][i];
- cckpwr[RF_PATH_B] = pHalData->TxPwrLevelCck[RF_PATH_B][i];
-
- /* 2. OFDM for 1S or 2S */
- if (GET_RF_TYPE(Adapter) == RF_1T2R ||
- GET_RF_TYPE(Adapter) == RF_1T1R) {
- /* Read HT 40 OFDM TX power */
- ofdmpwr[RF_PATH_A] = pHalData->TxPwrLevelHT40_1S[RF_PATH_A][i];
- ofdmpwr[RF_PATH_B] = pHalData->TxPwrLevelHT40_1S[RF_PATH_B][i];
- } else if (GET_RF_TYPE(Adapter) == RF_2T2R) {
- /* Read HT 40 OFDM TX power */
- ofdmpwr[RF_PATH_A] = pHalData->TxPwrLevelHT40_2S[RF_PATH_A][i];
- ofdmpwr[RF_PATH_B] = pHalData->TxPwrLevelHT40_2S[RF_PATH_B][i];
- }
-
- rtl823a_phy_rf6052setccktxpower(Adapter, &cckpwr[0]);
- rtl8723a_PHY_RF6052SetOFDMTxPower(Adapter, &ofdmpwr[0], channel);
-}
-
-/*-----------------------------------------------------------------------------
- * Function: PHY_SetBWMode23aCallback8192C()
- *
- * Overview: Timer callback function for SetSetBWMode23a
- *
- * Input: PRT_TIMER pTimer
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Note:
- * (1) We do not take j mode into consideration now
- * (2) Will two workitem of "switch channel" and
- * "switch channel bandwidth" run concurrently?
- *---------------------------------------------------------------------------*/
-static void
-_PHY_SetBWMode23a92C(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u8 regBwOpMode;
- u8 regRRSR_RSC;
-
- if (Adapter->bDriverStopped)
- return;
-
- /* 3 */
- /* 3<1>Set MAC register */
- /* 3 */
-
- regBwOpMode = rtl8723au_read8(Adapter, REG_BWOPMODE);
- regRRSR_RSC = rtl8723au_read8(Adapter, REG_RRSR+2);
-
- switch (pHalData->CurrentChannelBW) {
- case HT_CHANNEL_WIDTH_20:
- regBwOpMode |= BW_OPMODE_20MHZ;
- rtl8723au_write8(Adapter, REG_BWOPMODE, regBwOpMode);
- break;
- case HT_CHANNEL_WIDTH_40:
- regBwOpMode &= ~BW_OPMODE_20MHZ;
- rtl8723au_write8(Adapter, REG_BWOPMODE, regBwOpMode);
- regRRSR_RSC = (regRRSR_RSC & 0x90) |
- (pHalData->nCur40MhzPrimeSC << 5);
- rtl8723au_write8(Adapter, REG_RRSR+2, regRRSR_RSC);
- break;
-
- default:
- break;
- }
-
- /* 3 */
- /* 3<2>Set PHY related register */
- /* 3 */
- switch (pHalData->CurrentChannelBW) {
- /* 20 MHz channel*/
- case HT_CHANNEL_WIDTH_20:
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x0);
- PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x0);
- PHY_SetBBReg(Adapter, rFPGA0_AnalogParameter2, BIT(10), 1);
-
- break;
-
- /* 40 MHz channel*/
- case HT_CHANNEL_WIDTH_40:
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x1);
- PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x1);
-
- /* Set Control channel to upper or lower. These settings
- are required only for 40MHz */
- PHY_SetBBReg(Adapter, rCCK0_System, bCCKSideBand,
- (pHalData->nCur40MhzPrimeSC >> 1));
- PHY_SetBBReg(Adapter, rOFDM1_LSTF, 0xC00,
- pHalData->nCur40MhzPrimeSC);
- PHY_SetBBReg(Adapter, rFPGA0_AnalogParameter2, BIT(10), 0);
-
- PHY_SetBBReg(Adapter, 0x818, BIT(26) | BIT(27),
- (pHalData->nCur40MhzPrimeSC ==
- HAL_PRIME_CHNL_OFFSET_LOWER) ? 2:1);
- break;
-
- default:
- break;
- }
- /* Skip over setting of J-mode in BB register here. Default value
- is "None J mode". Emily 20070315 */
-
- /* Added it for 20/40 mhz switch time evaluation by guangan 070531 */
- /* NowL = PlatformEFIORead4Byte(Adapter, TSFR); */
- /* NowH = PlatformEFIORead4Byte(Adapter, TSFR+4); */
- /* EndTime = ((u64)NowH << 32) + NowL; */
-
- rtl8723a_phy_rf6052set_bw(Adapter, pHalData->CurrentChannelBW);
-}
-
- /*-----------------------------------------------------------------------------
- * Function: SetBWMode23a8190Pci()
- *
- * Overview: This function is export to "HalCommon" moudule
- *
- * Input: struct rtw_adapter * Adapter
- * enum ht_channel_width Bandwidth 20M or 40M
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Note: We do not take j mode into consideration now
- *---------------------------------------------------------------------------*/
-void
-PHY_SetBWMode23a8723A(struct rtw_adapter *Adapter,
- enum ht_channel_width Bandwidth, unsigned char Offset)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- enum ht_channel_width tmpBW = pHalData->CurrentChannelBW;
-
- pHalData->CurrentChannelBW = Bandwidth;
-
- pHalData->nCur40MhzPrimeSC = Offset;
-
- if ((!Adapter->bDriverStopped) && (!Adapter->bSurpriseRemoved))
- _PHY_SetBWMode23a92C(Adapter);
- else
- pHalData->CurrentChannelBW = tmpBW;
-}
-
-static void _PHY_SwChnl8723A(struct rtw_adapter *Adapter, u8 channel)
-{
- enum RF_RADIO_PATH eRFPath;
- u32 param1, param2;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- /* s1. pre common command - CmdID_SetTxPowerLevel */
- PHY_SetTxPowerLevel8723A(Adapter, channel);
-
- /* s2. RF dependent command - CmdID_RF_WriteReg,
- param1 = RF_CHNLBW, param2 = channel */
- param1 = RF_CHNLBW;
- param2 = channel;
- for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
- pHalData->RfRegChnlVal[eRFPath] =
- (pHalData->RfRegChnlVal[eRFPath] & 0xfffffc00) | param2;
- PHY_SetRFReg(Adapter, eRFPath, param1,
- bRFRegOffsetMask, pHalData->RfRegChnlVal[eRFPath]);
- }
-
- /* s3. post common command - CmdID_End, None */
-}
-
-void PHY_SwChnl8723A(struct rtw_adapter *Adapter, u8 channel)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u8 tmpchannel = pHalData->CurrentChannel;
- bool result = true;
-
- if (channel == 0)
- channel = 1;
-
- pHalData->CurrentChannel = channel;
-
- if ((!Adapter->bDriverStopped) && (!Adapter->bSurpriseRemoved)) {
- _PHY_SwChnl8723A(Adapter, channel);
-
- if (!result)
- pHalData->CurrentChannel = tmpchannel;
- } else {
- pHalData->CurrentChannel = tmpchannel;
- }
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
deleted file mode 100644
index 24c0ff3d82bc..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
+++ /dev/null
@@ -1,503 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-/******************************************************************************
- *
- *
- * Module: rtl8192c_rf6052.c (Source C File)
- *
- * Note: Provide RF 6052 series relative API.
- *
- * Function:
- *
- * Export:
- *
- * Abbrev:
- *
- * History:
- * Data Who Remark
- *
- * 09/25/2008 MHC Create initial version.
- * 11/05/2008 MHC Add API for tw power setting.
- *
- *
-******************************************************************************/
-
-#define _RTL8723A_RF6052_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#include <rtl8723a_hal.h>
-#include <usb_ops_linux.h>
-
-/*-----------------------------------------------------------------------------
- * Function: PHY_RF6052SetBandwidth()
- *
- * Overview: This function is called by SetBWMode23aCallback8190Pci() only
- *
- * Input: struct rtw_adapter * Adapter
- * WIRELESS_BANDWIDTH_E Bandwidth 20M or 40M
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Note: For RF type 0222D
- *---------------------------------------------------------------------------*/
-void rtl8723a_phy_rf6052set_bw(struct rtw_adapter *Adapter,
- enum ht_channel_width Bandwidth) /* 20M or 40M */
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- switch (Bandwidth) {
- case HT_CHANNEL_WIDTH_20:
- pHalData->RfRegChnlVal[0] =
- (pHalData->RfRegChnlVal[0] & 0xfffff3ff) | 0x0400;
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask,
- pHalData->RfRegChnlVal[0]);
- break;
- case HT_CHANNEL_WIDTH_40:
- pHalData->RfRegChnlVal[0] =
- (pHalData->RfRegChnlVal[0] & 0xfffff3ff);
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask,
- pHalData->RfRegChnlVal[0]);
- break;
- default:
- break;
- }
-}
-
-/*-----------------------------------------------------------------------------
- * Function: PHY_RF6052SetCckTxPower
- *
- * Overview:
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/05/2008 MHC Simulate 8192series..
- *
- *---------------------------------------------------------------------------*/
-
-void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter,
- u8 *pPowerlevel)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- u32 TxAGC[2] = {0, 0}, tmpval = 0;
- u8 idx1, idx2;
- u8 *ptr;
-
- if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
- TxAGC[RF_PATH_A] = 0x3f3f3f3f;
- TxAGC[RF_PATH_B] = 0x3f3f3f3f;
-
- for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- TxAGC[idx1] = pPowerlevel[idx1] |
- (pPowerlevel[idx1] << 8) |
- (pPowerlevel[idx1] << 16) |
- (pPowerlevel[idx1] << 24);
- /*
- * 2010/10/18 MH For external PA module. We need
- * to limit power index to be less than 0x20.
- */
- if (TxAGC[idx1] > 0x20 && pHalData->ExternalPA)
- TxAGC[idx1] = 0x20;
- }
- } else {
-/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx
- * power. It shall be determined by power training mechanism. */
-/* Currently, we cannot fully disable driver dynamic tx power
- * mechanism because it is referenced by BT coexist mechanism. */
-/* In the future, two mechanism shall be separated from each other
- * and maintained independently. Thanks for Lanhsin's reminder. */
- if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
- TxAGC[RF_PATH_A] = 0x10101010;
- TxAGC[RF_PATH_B] = 0x10101010;
- } else if (pdmpriv->DynamicTxHighPowerLvl ==
- TxHighPwrLevel_Level2) {
- TxAGC[RF_PATH_A] = 0x00000000;
- TxAGC[RF_PATH_B] = 0x00000000;
- } else {
- for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- TxAGC[idx1] = pPowerlevel[idx1] |
- (pPowerlevel[idx1] << 8) |
- (pPowerlevel[idx1] << 16) |
- (pPowerlevel[idx1] << 24);
- }
-
- if (pHalData->EEPROMRegulatory == 0) {
- tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][6]) +
- (pHalData->MCSTxPowerLevelOriginalOffset[0][7]<<8);
- TxAGC[RF_PATH_A] += tmpval;
-
- tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][14]) +
- (pHalData->MCSTxPowerLevelOriginalOffset[0][15]<<24);
- TxAGC[RF_PATH_B] += tmpval;
- }
- }
- }
-
- for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- ptr = (u8 *)(&TxAGC[idx1]);
- for (idx2 = 0; idx2 < 4; idx2++) {
- if (*ptr > RF6052_MAX_TX_PWR)
- *ptr = RF6052_MAX_TX_PWR;
- ptr++;
- }
- }
-
- /* rf-A cck tx power */
- tmpval = TxAGC[RF_PATH_A] & 0xff;
- PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
- tmpval = TxAGC[RF_PATH_A] >> 8;
- PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
-
- /* rf-B cck tx power */
- tmpval = TxAGC[RF_PATH_B] >> 24;
- PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
- tmpval = TxAGC[RF_PATH_B] & 0x00ffffff;
- PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
-} /* PHY_RF6052SetCckTxPower */
-
-/* powerbase0 for OFDM rates */
-/* powerbase1 for HT MCS rates */
-static void getPowerBase(struct rtw_adapter *Adapter, u8 *pPowerLevel,
- u8 Channel, u32 *OfdmBase, u32 *MCSBase)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u32 ofdm, mcs;
- u8 Legacy_pwrdiff = 0;
- s8 HT20_pwrdiff = 0;
- u8 i, powerlevel[2];
-
- for (i = 0; i < 2; i++) {
- powerlevel[i] = pPowerLevel[i];
- Legacy_pwrdiff = pHalData->TxPwrLegacyHtDiff[i][Channel-1];
- ofdm = powerlevel[i] + Legacy_pwrdiff;
-
- ofdm = ofdm << 24 | ofdm << 16 | ofdm << 8 | ofdm;
- *(OfdmBase + i) = ofdm;
- }
-
- for (i = 0; i < 2; i++) {
- /* Check HT20 to HT40 diff */
- if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20) {
- HT20_pwrdiff = pHalData->TxPwrHt20Diff[i][Channel-1];
- powerlevel[i] += HT20_pwrdiff;
- }
- mcs = powerlevel[i];
- mcs = mcs << 24 | mcs << 16 | mcs << 8 | mcs;
- *(MCSBase + i) = mcs;
- }
-}
-
-static void
-getTxPowerWriteValByRegulatory(struct rtw_adapter *Adapter, u8 Channel,
- u8 index, u32 *powerBase0, u32 *powerBase1,
- u32 *pOutWriteVal)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- u8 i, chnlGroup = 0, pwr_diff_limit[4];
- u32 writeVal, customer_limit, rf;
-
- /* Index 0 & 1 = legacy OFDM, 2-5 = HT_MCS rate */
- for (rf = 0; rf < 2; rf++) {
- switch (pHalData->EEPROMRegulatory) {
- case 0: /* Realtek better performance */
- /* increase power diff defined by Realtek for
- * large power */
- chnlGroup = 0;
- writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] +
- ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
- break;
- case 1: /* Realtek regulatory */
- /* increase power diff defined by Realtek for
- * regulatory */
- if (pHalData->pwrGroupCnt == 1)
- chnlGroup = 0;
- if (pHalData->pwrGroupCnt >= 3) {
- if (Channel <= 3)
- chnlGroup = 0;
- else if (Channel >= 4 && Channel <= 9)
- chnlGroup = 1;
- else if (Channel > 9)
- chnlGroup = 2;
-
- if (pHalData->CurrentChannelBW ==
- HT_CHANNEL_WIDTH_20)
- chnlGroup++;
- else
- chnlGroup += 4;
- }
- writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] +
- ((index < 2) ? powerBase0[rf] :
- powerBase1[rf]);
- break;
- case 2: /* Better regulatory */
- /* don't increase any power diff */
- writeVal = (index < 2) ? powerBase0[rf] :
- powerBase1[rf];
- break;
- case 3: /* Customer defined power diff. */
- chnlGroup = 0;
-
- for (i = 0; i < 4; i++) {
- pwr_diff_limit[i] = (u8)((pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index +
- (rf ? 8 : 0)]&(0x7f << (i*8))) >> (i*8));
- if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_40) {
- if (pwr_diff_limit[i] > pHalData->PwrGroupHT40[rf][Channel-1])
- pwr_diff_limit[i] = pHalData->PwrGroupHT40[rf][Channel-1];
- } else {
- if (pwr_diff_limit[i] > pHalData->PwrGroupHT20[rf][Channel-1])
- pwr_diff_limit[i] = pHalData->PwrGroupHT20[rf][Channel-1];
- }
- }
- customer_limit = (pwr_diff_limit[3]<<24) | (pwr_diff_limit[2]<<16) |
- (pwr_diff_limit[1]<<8) | (pwr_diff_limit[0]);
- writeVal = customer_limit + ((index<2)?powerBase0[rf]:powerBase1[rf]);
- break;
- default:
- chnlGroup = 0;
- writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] +
- ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
- break;
- }
-
-/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power.
- It shall be determined by power training mechanism. */
-/* Currently, we cannot fully disable driver dynamic tx power mechanism
- because it is referenced by BT coexist mechanism. */
-/* In the future, two mechanism shall be separated from each other and
- maintained independently. Thanks for Lanhsin's reminder. */
-
- if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1)
- writeVal = 0x14141414;
- else if (pdmpriv->DynamicTxHighPowerLvl ==
- TxHighPwrLevel_Level2)
- writeVal = 0x00000000;
-
- /* 20100628 Joseph: High power mode for BT-Coexist mechanism. */
- /* This mechanism is only applied when
- Driver-Highpower-Mechanism is OFF. */
- if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT1)
- writeVal = writeVal - 0x06060606;
- else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT2)
- writeVal = writeVal;
- *(pOutWriteVal + rf) = writeVal;
- }
-}
-
-static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index,
- u32 *pValue)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u16 RegOffset_A[6] = {
- rTxAGC_A_Rate18_06, rTxAGC_A_Rate54_24,
- rTxAGC_A_Mcs03_Mcs00, rTxAGC_A_Mcs07_Mcs04,
- rTxAGC_A_Mcs11_Mcs08, rTxAGC_A_Mcs15_Mcs12
- };
- u16 RegOffset_B[6] = {
- rTxAGC_B_Rate18_06, rTxAGC_B_Rate54_24,
- rTxAGC_B_Mcs03_Mcs00, rTxAGC_B_Mcs07_Mcs04,
- rTxAGC_B_Mcs11_Mcs08, rTxAGC_B_Mcs15_Mcs12
- };
- u8 i, rf, pwr_val[4];
- u32 writeVal;
- u16 RegOffset;
-
- for (rf = 0; rf < 2; rf++) {
- writeVal = pValue[rf];
- for (i = 0; i < 4; i++) {
- pwr_val[i] = (u8)((writeVal &
- (0x7f << (i * 8))) >> (i * 8));
- if (pwr_val[i] > RF6052_MAX_TX_PWR)
- pwr_val[i] = RF6052_MAX_TX_PWR;
- }
- writeVal = pwr_val[3] << 24 | pwr_val[2] << 16 |
- pwr_val[1] << 8 | pwr_val[0];
-
- if (rf == 0)
- RegOffset = RegOffset_A[index];
- else
- RegOffset = RegOffset_B[index];
-
- rtl8723au_write32(Adapter, RegOffset, writeVal);
-
- /* 201005115 Joseph: Set Tx Power diff for Tx power
- training mechanism. */
- if (((pHalData->rf_type == RF_2T2R) &&
- (RegOffset == rTxAGC_A_Mcs15_Mcs12 ||
- RegOffset == rTxAGC_B_Mcs15_Mcs12)) ||
- ((pHalData->rf_type != RF_2T2R) &&
- (RegOffset == rTxAGC_A_Mcs07_Mcs04 ||
- RegOffset == rTxAGC_B_Mcs07_Mcs04))) {
- writeVal = pwr_val[3];
- if (RegOffset == rTxAGC_A_Mcs15_Mcs12 ||
- RegOffset == rTxAGC_A_Mcs07_Mcs04)
- RegOffset = 0xc90;
- if (RegOffset == rTxAGC_B_Mcs15_Mcs12 ||
- RegOffset == rTxAGC_B_Mcs07_Mcs04)
- RegOffset = 0xc98;
- for (i = 0; i < 3; i++) {
- if (i != 2)
- writeVal = (writeVal > 8) ?
- (writeVal - 8) : 0;
- else
- writeVal = (writeVal > 6) ?
- (writeVal - 6) : 0;
- rtl8723au_write8(Adapter, RegOffset + i,
- (u8)writeVal);
- }
- }
- }
-}
-/*-----------------------------------------------------------------------------
- * Function: PHY_RF6052SetOFDMTxPower
- *
- * Overview: For legacy and HY OFDM, we must read EEPROM TX power index for
- * different channel and read original value in TX power
- * register area from 0xe00. We increase offset and
- * original value to be correct tx pwr.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Remark
- * 11/05/2008 MHC Simulate 8192 series method.
- * 01/06/2009 MHC 1. Prevent Path B tx power overflow or
- * underflow dure to A/B pwr difference or
- * legacy/HT pwr diff.
- * 2. We concern with path B legacy/HT OFDM difference.
- * 01/22/2009 MHC Support new EPRO format from SD3.
- *
- *---------------------------------------------------------------------------*/
-void rtl8723a_PHY_RF6052SetOFDMTxPower(struct rtw_adapter *Adapter,
- u8 *pPowerLevel, u8 Channel)
-{
- u32 writeVal[2], powerBase0[2], powerBase1[2];
- u8 index = 0;
-
- getPowerBase(Adapter, pPowerLevel, Channel,
- &powerBase0[0], &powerBase1[0]);
-
- for (index = 0; index < 6; index++) {
- getTxPowerWriteValByRegulatory(Adapter, Channel, index,
- &powerBase0[0], &powerBase1[0], &writeVal[0]);
-
- writeOFDMPowerReg(Adapter, index, &writeVal[0]);
- }
-}
-
-static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
-{
- u32 u4RegValue = 0;
- u8 eRFPath;
- struct bb_reg_define *pPhyReg;
- int rtStatus = _SUCCESS;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- /* 3----------------------------------------------------------------- */
- /* 3 <2> Initialize RF */
- /* 3----------------------------------------------------------------- */
- for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
-
- pPhyReg = &pHalData->PHYRegDef[eRFPath];
-
- /*----Store original RFENV control type----*/
- switch (eRFPath) {
- case RF_PATH_A:
- u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs,
- bRFSI_RFENV);
- break;
- case RF_PATH_B:
- u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs,
- bRFSI_RFENV << 16);
- break;
- }
-
- /*----Set RF_ENV enable----*/
- PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
- udelay(1);/* PlatformStallExecution(1); */
-
- /*----Set RF_ENV output high----*/
- PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
- udelay(1);/* PlatformStallExecution(1); */
-
- /* Set bit number of Address and Data for RF register */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength,
- 0x0); /* Set 1 to 4 bits for 8255 */
- udelay(1);/* PlatformStallExecution(1); */
-
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength,
- 0x0); /* Set 0 to 12 bits for 8255 */
- udelay(1);/* PlatformStallExecution(1); */
-
- /*----Initialize RF fom connfiguration file----*/
- switch (eRFPath) {
- case RF_PATH_A:
- ODM_ReadAndConfig_RadioA_1T_8723A(&pHalData->odmpriv);
- break;
- case RF_PATH_B:
- break;
- }
-
- /*----Restore RFENV control type----*/
- switch (eRFPath) {
- case RF_PATH_A:
- PHY_SetBBReg(Adapter, pPhyReg->rfintfs,
- bRFSI_RFENV, u4RegValue);
- break;
- case RF_PATH_B:
- PHY_SetBBReg(Adapter, pPhyReg->rfintfs,
- bRFSI_RFENV << 16, u4RegValue);
- break;
- }
-
- if (rtStatus != _SUCCESS) {
- goto phy_RF6052_Config_ParaFile_Fail;
- }
- }
-phy_RF6052_Config_ParaFile_Fail:
- return rtStatus;
-}
-
-int PHY_RF6052_Config8723A(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- /* Initialize general global value */
- /* TODO: Extend RF_PATH_C and RF_PATH_D in the future */
- if (pHalData->rf_type == RF_1T1R)
- pHalData->NumTotalRFPath = 1;
- else
- pHalData->NumTotalRFPath = 2;
-
- /* Config BB and RF */
- return phy_RF6052_Config_ParaFile(Adapter);
-}
-
-/* End of HalRf6052.c */
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_rxdesc.c b/drivers/staging/rtl8723au/hal/rtl8723a_rxdesc.c
deleted file mode 100644
index 81b5efe649fa..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723a_rxdesc.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTL8723A_REDESC_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <rtl8723a_hal.h>
-
-static void process_rssi(struct rtw_adapter *padapter,
- struct recv_frame *prframe)
-{
- struct rx_pkt_attrib *pattrib = &prframe->attrib;
- struct signal_stat *signal_stat = &padapter->recvpriv.signal_strength_data;
-
- if (signal_stat->update_req) {
- signal_stat->total_num = 0;
- signal_stat->total_val = 0;
- signal_stat->update_req = 0;
- }
-
- signal_stat->total_num++;
- signal_stat->total_val += pattrib->phy_info.SignalStrength;
- signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
-}
-
-static void process_link_qual(struct rtw_adapter *padapter,
- struct recv_frame *prframe)
-{
- struct rx_pkt_attrib *pattrib;
- struct signal_stat *signal_stat;
-
- if (prframe == NULL || padapter == NULL)
- return;
-
- pattrib = &prframe->attrib;
- signal_stat = &padapter->recvpriv.signal_qual_data;
-
- if (signal_stat->update_req) {
- signal_stat->total_num = 0;
- signal_stat->total_val = 0;
- signal_stat->update_req = 0;
- }
-
- signal_stat->total_num++;
- signal_stat->total_val += pattrib->phy_info.SignalQuality;
- signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
-}
-
-/* void rtl8723a_process_phy_info(struct rtw_adapter *padapter, union recv_frame *prframe) */
-void rtl8723a_process_phy_info(struct rtw_adapter *padapter, void *prframe)
-{
- struct recv_frame *precvframe = prframe;
- /* Check RSSI */
- process_rssi(padapter, precvframe);
- /* Check EVM */
- process_link_qual(padapter, precvframe);
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_sreset.c b/drivers/staging/rtl8723au/hal/rtl8723a_sreset.c
deleted file mode 100644
index 3c46294b8788..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723a_sreset.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTL8723A_SRESET_C_
-
-#include <rtl8723a_sreset.h>
-#include <rtl8723a_hal.h>
-#include <usb_ops_linux.h>
-
-void rtl8723a_sreset_xmit_status_check(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct sreset_priv *psrtpriv = &pHalData->srestpriv;
-
- unsigned long current_time;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- unsigned int diff_time;
- u32 txdma_status;
-
- txdma_status = rtl8723au_read32(padapter, REG_TXDMA_STATUS);
- if (txdma_status != 0) {
- DBG_8723A("%s REG_TXDMA_STATUS:0x%08x\n", __func__, txdma_status);
- rtw_sreset_reset(padapter);
- }
-
- current_time = jiffies;
-
- if (0 == pxmitpriv->free_xmitbuf_cnt || 0 == pxmitpriv->free_xmit_extbuf_cnt) {
-
- diff_time = jiffies_to_msecs(jiffies - psrtpriv->last_tx_time);
-
- if (diff_time > 2000) {
- if (psrtpriv->last_tx_complete_time == 0) {
- psrtpriv->last_tx_complete_time = current_time;
- } else {
- diff_time = jiffies_to_msecs(jiffies - psrtpriv->last_tx_complete_time);
- if (diff_time > 4000) {
- DBG_8723A("%s tx hang\n", __func__);
- rtw_sreset_reset(padapter);
- }
- }
- }
- }
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723au_recv.c b/drivers/staging/rtl8723au/hal/rtl8723au_recv.c
deleted file mode 100644
index 0fec84bcb5d9..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723au_recv.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTL8192CU_RECV_C_
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <recv_osdep.h>
-#include <mlme_osdep.h>
-#include <linux/ip.h>
-#include <linux/if_ether.h>
-#include <usb_ops.h>
-#include <wifi.h>
-#include <rtl8723a_hal.h>
-
-int rtl8723au_init_recv_priv(struct rtw_adapter *padapter)
-{
- struct recv_priv *precvpriv = &padapter->recvpriv;
- int i, size, res = _SUCCESS;
- struct recv_buf *precvbuf;
- unsigned long tmpaddr;
- unsigned long alignment;
- struct sk_buff *pskb;
-
- tasklet_init(&precvpriv->recv_tasklet,
- (void(*)(unsigned long))rtl8723au_recv_tasklet,
- (unsigned long)padapter);
-
- precvpriv->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!precvpriv->int_in_urb)
- DBG_8723A("alloc_urb for interrupt in endpoint fail !!!!\n");
- precvpriv->int_in_buf = kzalloc(USB_INTR_CONTENT_LENGTH, GFP_KERNEL);
- if (!precvpriv->int_in_buf)
- DBG_8723A("alloc_mem for interrupt in endpoint fail !!!!\n");
-
- size = NR_RECVBUFF * sizeof(struct recv_buf);
- precvpriv->precv_buf = kzalloc(size, GFP_KERNEL);
- if (!precvpriv->precv_buf) {
- res = _FAIL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "alloc recv_buf fail!\n");
- goto exit;
- }
-
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
-
- for (i = 0; i < NR_RECVBUFF; i++) {
- INIT_LIST_HEAD(&precvbuf->list);
-
- precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
- if (!precvbuf->purb)
- break;
-
- precvbuf->adapter = padapter;
-
- precvbuf++;
- }
-
- skb_queue_head_init(&precvpriv->rx_skb_queue);
- skb_queue_head_init(&precvpriv->free_recv_skb_queue);
-
- for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
- size = MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ;
- pskb = __netdev_alloc_skb(padapter->pnetdev, size, GFP_KERNEL);
-
- if (pskb) {
- pskb->dev = padapter->pnetdev;
-
- tmpaddr = (unsigned long)pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
- skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
-
- skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
- }
-
- pskb = NULL;
- }
-
-exit:
- return res;
-}
-
-void rtl8723au_free_recv_priv(struct rtw_adapter *padapter)
-{
- int i;
- struct recv_buf *precvbuf;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
-
- for (i = 0; i < NR_RECVBUFF; i++) {
- usb_free_urb(precvbuf->purb);
-
- if (precvbuf->pskb)
- dev_kfree_skb_any(precvbuf->pskb);
-
- precvbuf++;
- }
-
- kfree(precvpriv->precv_buf);
-
- usb_free_urb(precvpriv->int_in_urb);
- kfree(precvpriv->int_in_buf);
-
- if (skb_queue_len(&precvpriv->rx_skb_queue))
- DBG_8723A(KERN_WARNING "rx_skb_queue not empty\n");
-
- skb_queue_purge(&precvpriv->rx_skb_queue);
-
- if (skb_queue_len(&precvpriv->free_recv_skb_queue)) {
- DBG_8723A(KERN_WARNING "free_recv_skb_queue not empty, %d\n",
- skb_queue_len(&precvpriv->free_recv_skb_queue));
- }
-
- skb_queue_purge(&precvpriv->free_recv_skb_queue);
-}
-
-struct recv_stat_cpu {
- u32 rxdw0;
- u32 rxdw1;
- u32 rxdw2;
- u32 rxdw3;
- u32 rxdw4;
- u32 rxdw5;
-};
-
-void update_recvframe_attrib(struct recv_frame *precvframe,
- struct recv_stat *prxstat)
-{
- struct rx_pkt_attrib *pattrib;
- struct recv_stat_cpu report;
- struct rxreport_8723a *prxreport;
-
- report.rxdw0 = le32_to_cpu(prxstat->rxdw0);
- report.rxdw1 = le32_to_cpu(prxstat->rxdw1);
- report.rxdw2 = le32_to_cpu(prxstat->rxdw2);
- report.rxdw3 = le32_to_cpu(prxstat->rxdw3);
- report.rxdw4 = le32_to_cpu(prxstat->rxdw4);
- report.rxdw5 = le32_to_cpu(prxstat->rxdw5);
-
- prxreport = (struct rxreport_8723a *)&report;
-
- pattrib = &precvframe->attrib;
- memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
-
- /* update rx report to recv_frame attribute */
- pattrib->pkt_len = (u16)prxreport->pktlen;
- pattrib->drvinfo_sz = (u8)(prxreport->drvinfosize << 3);
- pattrib->physt = (u8)prxreport->physt;
-
- pattrib->crc_err = (u8)prxreport->crc32;
- pattrib->icv_err = (u8)prxreport->icverr;
-
- pattrib->bdecrypted = (u8)(prxreport->swdec ? 0 : 1);
- pattrib->encrypt = (u8)prxreport->security;
-
- pattrib->qos = (u8)prxreport->qos;
- pattrib->priority = (u8)prxreport->tid;
-
- pattrib->amsdu = (u8)prxreport->amsdu;
-
- pattrib->seq_num = (u16)prxreport->seq;
- pattrib->frag_num = (u8)prxreport->frag;
- pattrib->mfrag = (u8)prxreport->mf;
- pattrib->mdata = (u8)prxreport->md;
-
- pattrib->mcs_rate = (u8)prxreport->rxmcs;
- pattrib->rxht = (u8)prxreport->rxht;
-}
-
-void update_recvframe_phyinfo(struct recv_frame *precvframe,
- struct phy_stat *pphy_status)
-{
- struct rtw_adapter *padapter = precvframe->adapter;
- struct rx_pkt_attrib *pattrib = &precvframe->attrib;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct phy_info *pPHYInfo = &pattrib->phy_info;
- struct odm_packet_info pkt_info;
- u8 *sa = NULL, *da;
- struct sta_priv *pstapriv;
- struct sta_info *psta;
- struct sk_buff *skb = precvframe->pkt;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- bool matchbssid = false;
- u8 *bssid;
-
- matchbssid = !ieee80211_is_ctl(hdr->frame_control) &&
- !pattrib->icv_err && !pattrib->crc_err;
-
- if (matchbssid) {
- switch (hdr->frame_control &
- cpu_to_le16(IEEE80211_FCTL_TODS |
- IEEE80211_FCTL_FROMDS)) {
- case cpu_to_le16(IEEE80211_FCTL_TODS):
- bssid = hdr->addr1;
- break;
- case cpu_to_le16(IEEE80211_FCTL_FROMDS):
- bssid = hdr->addr2;
- break;
- case cpu_to_le16(0):
- bssid = hdr->addr3;
- break;
- default:
- bssid = NULL;
- matchbssid = false;
- }
-
- if (bssid)
- matchbssid = ether_addr_equal(
- get_bssid(&padapter->mlmepriv), bssid);
- }
-
- pkt_info.bPacketMatchBSSID = matchbssid;
-
- da = ieee80211_get_DA(hdr);
- pkt_info.bPacketToSelf = pkt_info.bPacketMatchBSSID &&
- (!memcmp(da, myid(&padapter->eeprompriv), ETH_ALEN));
-
- pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
- ieee80211_is_beacon(hdr->frame_control);
-
- pkt_info.StationID = 0xFF;
- if (pkt_info.bPacketBeacon) {
- if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE) == true)
- sa = padapter->mlmepriv.cur_network.network.MacAddress;
- /* to do Ad-hoc */
- } else {
- sa = ieee80211_get_SA(hdr);
- }
-
- pstapriv = &padapter->stapriv;
- psta = rtw_get_stainfo23a(pstapriv, sa);
- if (psta) {
- pkt_info.StationID = psta->mac_id;
- /* printk("%s ==> StationID(%d)\n", __func__, pkt_info.StationID); */
- }
- pkt_info.Rate = pattrib->mcs_rate;
-
- ODM_PhyStatusQuery23a(&pHalData->odmpriv, pPHYInfo,
- (u8 *)pphy_status, &pkt_info);
- precvframe->psta = NULL;
- if (pkt_info.bPacketMatchBSSID &&
- (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == true)) {
- if (psta) {
- precvframe->psta = psta;
- rtl8723a_process_phy_info(padapter, precvframe);
- }
- } else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) {
- if (check_fwstate(&padapter->mlmepriv,
- WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE) ==
- true) {
- if (psta)
- precvframe->psta = psta;
- }
- rtl8723a_process_phy_info(padapter, precvframe);
- }
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c b/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
deleted file mode 100644
index 14746dd8db78..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
+++ /dev/null
@@ -1,520 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTL8192C_XMIT_C_
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <wifi.h>
-#include <osdep_intf.h>
-#include <usb_ops.h>
-/* include <rtl8192c_hal.h> */
-#include <rtl8723a_hal.h>
-
-static int urb_zero_packet_chk(struct rtw_adapter *padapter, int sz)
-{
- int blnSetTxDescOffset;
- struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
-
- if (pdvobj->ishighspeed) {
- if (((sz + TXDESC_SIZE) % 512) == 0)
- blnSetTxDescOffset = 1;
- else
- blnSetTxDescOffset = 0;
- } else {
- if (((sz + TXDESC_SIZE) % 64) == 0)
- blnSetTxDescOffset = 1;
- else
- blnSetTxDescOffset = 0;
- }
- return blnSetTxDescOffset;
-}
-
-static void rtl8192cu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
-{
- __le16 *usPtr = (__le16 *)ptxdesc;
- u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
- u32 index;
- u16 checksum = 0;
-
- /* Clear first */
- ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
-
- for (index = 0 ; index < count ; index++)
- checksum = checksum ^ le16_to_cpu(*(usPtr + index));
-
- ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff&checksum);
-}
-
-static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
-{
- if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
- switch (pattrib->encrypt) {
- /* SEC_TYPE */
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- ptxdesc->txdw1 |= cpu_to_le32((0x01<<22)&0x00c00000);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- /* ptxdesc->txdw1 |= cpu_to_le32((0x02<<22)&0x00c00000); */
- ptxdesc->txdw1 |= cpu_to_le32((0x01<<22)&0x00c00000);
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- ptxdesc->txdw1 |= cpu_to_le32((0x03<<22)&0x00c00000);
- break;
- case 0:
- default:
- break;
- }
- }
-}
-
-static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
-{
- /* DBG_8723A("cvs_mode =%d\n", pattrib->vcs_mode); */
-
- switch (pattrib->vcs_mode) {
- case RTS_CTS:
- *pdw |= cpu_to_le32(BIT(12));
- break;
- case CTS_TO_SELF:
- *pdw |= cpu_to_le32(BIT(11));
- break;
- case NONE_VCS:
- default:
- break;
- }
-
- if (pattrib->vcs_mode) {
- *pdw |= cpu_to_le32(BIT(13));
-
- /* Set RTS BW */
- if (pattrib->ht_en) {
- *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0;
-
- if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
- *pdw |= cpu_to_le32((0x01<<28)&0x30000000);
- else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
- *pdw |= cpu_to_le32((0x02<<28)&0x30000000);
- else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
- *pdw |= 0;
- else
- *pdw |= cpu_to_le32((0x03<<28)&0x30000000);
- }
- }
-}
-
-static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
-{
- if (pattrib->ht_en) {
- *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
-
- if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
- *pdw |= cpu_to_le32((0x01<<20)&0x003f0000);
- else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
- *pdw |= cpu_to_le32((0x02<<20)&0x003f0000);
- else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
- *pdw |= 0;
- else
- *pdw |= cpu_to_le32((0x03<<20)&0x003f0000);
- }
-}
-
-static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz)
-{
- int pull = 0;
- uint qsel;
- struct rtw_adapter *padapter = pxmitframe->padapter;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- int bmcst = is_multicast_ether_addr(pattrib->ra);
-
- if (urb_zero_packet_chk(padapter, sz) == 0) {
- ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
- pull = 1;
- pxmitframe->pkt_offset--;
- }
-
- memset(ptxdesc, 0, sizeof(struct tx_desc));
-
- if (pxmitframe->frame_tag == DATA_FRAMETAG) {
- /* offset 4 */
- ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f);
-
- qsel = (uint)(pattrib->qsel & 0x0000001f);
- ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
-
- ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid<<16) & 0x000f0000);
-
- fill_txdesc_sectype(pattrib, ptxdesc);
-
- if (pattrib->ampdu_en)
- ptxdesc->txdw1 |= cpu_to_le32(BIT(5));/* AGG EN */
- else
- ptxdesc->txdw1 |= cpu_to_le32(BIT(6));/* AGG BK */
-
- /* offset 8 */
-
- /* offset 12 */
- ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0xffff0000);
-
- /* offset 16 , offset 20 */
- if (pattrib->qos_en)
- ptxdesc->txdw4 |= cpu_to_le32(BIT(6));/* QoS */
-
- if ((pattrib->ether_type != 0x888e) &&
- (pattrib->ether_type != 0x0806) &&
- (pattrib->dhcp_pkt != 1)) {
- /* Non EAP & ARP & DHCP type data packet */
-
- fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
- fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
-
- ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate = 24M */
- ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* */
-
- /* use REG_INIDATA_RATE_SEL value */
- ptxdesc->txdw5 |= cpu_to_le32(pdmpriv->INIDATA_RATE[pattrib->mac_id]);
- } else {
- /* EAP data packet and ARP packet. */
- /* Use the 1M data rate to send the EAP/ARP packet. */
- /* This will maybe make the handshake smooth. */
-
- ptxdesc->txdw1 |= cpu_to_le32(BIT(6));/* AGG BK */
-
- ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
-
- if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
- ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/* DATA_SHORT */
-
- ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate23a(pmlmeext->tx_rate));
- }
- } else if (pxmitframe->frame_tag == MGNT_FRAMETAG) {
- /* offset 4 */
- ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f);
-
- qsel = (uint)(pattrib->qsel&0x0000001f);
- ptxdesc->txdw1 |= cpu_to_le32((qsel<<QSEL_SHT)&0x00001f00);
-
- ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid<<16) & 0x000f0000);
-
- /* offset 8 */
- /* CCX-TXRPT ack for xmit mgmt frames. */
- if (pxmitframe->ack_report)
- ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
-
- /* offset 12 */
- ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0xffff0000);
-
- /* offset 16 */
- ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
-
- /* offset 20 */
- ptxdesc->txdw5 |= cpu_to_le32(BIT(17));/* retry limit enable */
- ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
-
- ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate23a(pmlmeext->tx_rate));
- } else if (pxmitframe->frame_tag == TXAGG_FRAMETAG) {
- DBG_8723A("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
- } else {
- DBG_8723A("pxmitframe->frame_tag = %d\n",
- pxmitframe->frame_tag);
-
- /* offset 4 */
- ptxdesc->txdw1 |= cpu_to_le32((4)&0x1f);/* CAM_ID(MAC_ID) */
-
- ptxdesc->txdw1 |= cpu_to_le32((6<<16) & 0x000f0000);/* raid */
-
- /* offset 8 */
-
- /* offset 12 */
- ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0xffff0000);
-
- /* offset 16 */
- ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
-
- /* offset 20 */
- ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate23a(pmlmeext->tx_rate));
- }
-
- /* (1) The sequence number of each non-Qos frame / broadcast / multicast / */
- /* mgnt frame should be controlled by Hw because Fw will also send null data */
- /* which we cannot control when Fw LPS enable. */
- /* --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
- /* (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
- /* (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
- if (!pattrib->qos_en) {
- /* Hw set sequence number */
- ptxdesc->txdw4 |= cpu_to_le32(BIT(7));
- /* set bit3 to 1. */
- ptxdesc->txdw3 |= cpu_to_le32((8 << 28));
- }
-
- /* offset 0 */
- ptxdesc->txdw0 |= cpu_to_le32(sz&0x0000ffff);
- ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
- ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000);/* 32 bytes for TX Desc */
-
- if (bmcst)
- ptxdesc->txdw0 |= cpu_to_le32(BIT(24));
-
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "offset0-txdesc = 0x%x\n", ptxdesc->txdw0);
-
- /* offset 4 */
- /* pkt_offset, unit:8 bytes padding */
- if (pxmitframe->pkt_offset > 0)
- ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
-
- rtl8192cu_cal_txdesc_chksum(ptxdesc);
- return pull;
-}
-
-static int rtw_dump_xframe(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe)
-{
- int ret = _SUCCESS;
- int inner_ret = _SUCCESS;
- int t, sz, w_sz, pull = 0;
- u8 *mem_addr;
- u32 ff_hwaddr;
- struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- if (pxmitframe->frame_tag == DATA_FRAMETAG &&
- pxmitframe->attrib.ether_type != ETH_P_ARP &&
- pxmitframe->attrib.ether_type != ETH_P_PAE &&
- pxmitframe->attrib.dhcp_pkt != 1)
- rtw_issue_addbareq_cmd23a(padapter, pxmitframe);
-
- mem_addr = pxmitframe->buf_addr;
-
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, "rtw_dump_xframe()\n");
-
- for (t = 0; t < pattrib->nr_frags; t++) {
- if (inner_ret != _SUCCESS && ret == _SUCCESS)
- ret = _FAIL;
-
- if (t != (pattrib->nr_frags - 1)) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- "pattrib->nr_frags =%d\n", pattrib->nr_frags);
-
- sz = pxmitpriv->frag_len;
- sz = sz - 4 - pattrib->icv_len;
- } else {
- /* no frag */
- sz = pattrib->last_txcmdsz;
- }
-
- pull = update_txdesc(pxmitframe, mem_addr, sz);
-
- if (pull) {
- mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
-
- pxmitframe->buf_addr = mem_addr;
-
- w_sz = sz + TXDESC_SIZE;
- } else {
- w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
- }
-
- ff_hwaddr = rtw_get_ff_hwaddr23a(pxmitframe);
- inner_ret = rtl8723au_write_port(padapter, ff_hwaddr,
- w_sz, pxmitbuf);
- rtw_count_tx_stats23a(padapter, pxmitframe, sz);
-
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "rtw_write_port, w_sz =%d\n", w_sz);
-
- mem_addr += w_sz;
-
- mem_addr = PTR_ALIGN(mem_addr, 4);
- }
-
- rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
-
- if (ret != _SUCCESS)
- rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
-
- return ret;
-}
-
-bool rtl8723au_xmitframe_complete(struct rtw_adapter *padapter,
- struct xmit_priv *pxmitpriv,
- struct xmit_buf *pxmitbuf)
-{
- struct hw_xmit *phwxmits;
- struct xmit_frame *pxmitframe;
- int hwentry;
- int res = _SUCCESS, xcnt = 0;
-
- phwxmits = pxmitpriv->hwxmits;
- hwentry = pxmitpriv->hwxmit_entry;
-
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, "xmitframe_complete()\n");
-
- if (pxmitbuf == NULL) {
- pxmitbuf = rtw_alloc_xmitbuf23a(pxmitpriv);
- if (!pxmitbuf)
- return false;
- }
- pxmitframe = rtw_dequeue_xframe23a(pxmitpriv, phwxmits, hwentry);
-
- if (pxmitframe) {
- pxmitframe->pxmitbuf = pxmitbuf;
-
- pxmitframe->buf_addr = pxmitbuf->pbuf;
-
- pxmitbuf->priv_data = pxmitframe;
-
- if (pxmitframe->frame_tag == DATA_FRAMETAG) {
- if (pxmitframe->attrib.priority <= 15)/* TID0~15 */
- res = rtw_xmitframe_coalesce23a(padapter, pxmitframe->pkt, pxmitframe);
-
- rtw_os_xmit_complete23a(padapter, pxmitframe);/* always return ndis_packet after rtw_xmitframe_coalesce23a */
- }
-
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- "xmitframe_complete(): rtw_dump_xframe\n");
-
- if (res == _SUCCESS) {
- rtw_dump_xframe(padapter, pxmitframe);
- } else {
- rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf);
- rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
- }
- xcnt++;
- } else {
- rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf);
- return false;
- }
- return true;
-}
-
-static int xmitframe_direct(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe)
-{
- int res;
-
- res = rtw_xmitframe_coalesce23a(padapter, pxmitframe->pkt, pxmitframe);
- if (res == _SUCCESS)
- rtw_dump_xframe(padapter, pxmitframe);
- return res;
-}
-
-/*
- * Return
- * true dump packet directly
- * false enqueue packet
- */
-bool rtl8723au_hal_xmit(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe)
-{
- int res;
- struct xmit_buf *pxmitbuf = NULL;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- pattrib->qsel = pattrib->priority;
- spin_lock_bh(&pxmitpriv->lock);
-
-#ifdef CONFIG_8723AU_AP_MODE
- if (xmitframe_enqueue_for_sleeping_sta23a(padapter, pxmitframe)) {
- struct sta_info *psta;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- spin_unlock_bh(&pxmitpriv->lock);
-
- if (pattrib->psta)
- psta = pattrib->psta;
- else
- psta = rtw_get_stainfo23a(pstapriv, pattrib->ra);
-
- if (psta) {
- if (psta->sleepq_len > (NR_XMITFRAME>>3))
- wakeup_sta_to_xmit23a(padapter, psta);
- }
-
- return false;
- }
-#endif
-
- if (rtw_txframes_sta_ac_pending23a(padapter, pattrib) > 0)
- goto enqueue;
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
- goto enqueue;
-
- pxmitbuf = rtw_alloc_xmitbuf23a(pxmitpriv);
- if (pxmitbuf == NULL)
- goto enqueue;
-
- spin_unlock_bh(&pxmitpriv->lock);
-
- pxmitframe->pxmitbuf = pxmitbuf;
- pxmitframe->buf_addr = pxmitbuf->pbuf;
- pxmitbuf->priv_data = pxmitframe;
-
- if (xmitframe_direct(padapter, pxmitframe) != _SUCCESS) {
- rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf);
- rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
- }
- return true;
-
-enqueue:
- res = rtw_xmitframe_enqueue23a(padapter, pxmitframe);
- spin_unlock_bh(&pxmitpriv->lock);
-
- if (res != _SUCCESS) {
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_,
- "pre_xmitframe: enqueue xmitframe fail\n");
- rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
-
- /* Trick, make the statistics correct */
- pxmitpriv->tx_pkts--;
- pxmitpriv->tx_drop++;
- return true;
- }
- return false;
-}
-
-int rtl8723au_mgnt_xmit(struct rtw_adapter *padapter,
- struct xmit_frame *pmgntframe)
-{
- return rtw_dump_xframe(padapter, pmgntframe);
-}
-
-int rtl8723au_hal_xmitframe_enqueue(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe)
-{
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- int err;
-
- err = rtw_xmitframe_enqueue23a(padapter, pxmitframe);
- if (err != _SUCCESS) {
- rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
-
- /* Trick, make the statistics correct */
- pxmitpriv->tx_pkts--;
- pxmitpriv->tx_drop++;
- } else {
- tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
- }
- return err;
-}
diff --git a/drivers/staging/rtl8723au/hal/usb_halinit.c b/drivers/staging/rtl8723au/hal/usb_halinit.c
deleted file mode 100644
index fa47aebf8b98..000000000000
--- a/drivers/staging/rtl8723au/hal/usb_halinit.c
+++ /dev/null
@@ -1,1269 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _HCI_HAL_INIT_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <rtw_efuse.h>
-
-#include <HalPwrSeqCmd.h>
-#include <Hal8723PwrSeq.h>
-#include <rtl8723a_hal.h>
-#include <linux/ieee80211.h>
-
-#include <usb_ops.h>
-
-static void phy_SsPwrSwitch92CU(struct rtw_adapter *Adapter,
- enum rt_rf_power_state eRFPowerState);
-
-static void
-_ConfigChipOutEP(struct rtw_adapter *pAdapter, u8 NumOutPipe)
-{
- u8 value8;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
-
- pHalData->OutEpQueueSel = 0;
- pHalData->OutEpNumber = 0;
-
- /* Normal and High queue */
- value8 = rtl8723au_read8(pAdapter, (REG_NORMAL_SIE_EP + 1));
-
- if (value8 & USB_NORMAL_SIE_EP_MASK) {
- pHalData->OutEpQueueSel |= TX_SELE_HQ;
- pHalData->OutEpNumber++;
- }
-
- if ((value8 >> USB_NORMAL_SIE_EP_SHIFT) & USB_NORMAL_SIE_EP_MASK) {
- pHalData->OutEpQueueSel |= TX_SELE_NQ;
- pHalData->OutEpNumber++;
- }
-
- /* Low queue */
- value8 = rtl8723au_read8(pAdapter, (REG_NORMAL_SIE_EP + 2));
- if (value8 & USB_NORMAL_SIE_EP_MASK) {
- pHalData->OutEpQueueSel |= TX_SELE_LQ;
- pHalData->OutEpNumber++;
- }
-
- /* TODO: Error recovery for this case */
- /* RT_ASSERT((NumOutPipe == pHalData->OutEpNumber),
- ("Out EP number isn't match! %d(Descriptor) != %d (SIE reg)\n",
- (u32)NumOutPipe, (u32)pHalData->OutEpNumber)); */
-}
-
-bool rtl8723au_chip_configure(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
- u8 NumInPipe = pdvobjpriv->RtNumInPipes;
- u8 NumOutPipe = pdvobjpriv->RtNumOutPipes;
-
- _ConfigChipOutEP(padapter, NumOutPipe);
-
- /* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
- if (pHalData->OutEpNumber == 1) {
- if (NumInPipe != 1)
- return false;
- }
-
- return Hal_MappingOutPipe23a(padapter, NumOutPipe);
-}
-
-static int _InitPowerOn(struct rtw_adapter *padapter)
-{
- u16 value16;
- u8 value8;
-
- /* RSV_CTRL 0x1C[7:0] = 0x00
- unlock ISO/CLK/Power control register */
- rtl8723au_write8(padapter, REG_RSV_CTRL, 0x0);
-
- /* HW Power on sequence */
- if (!HalPwrSeqCmdParsing23a(padapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
- PWR_INTF_USB_MSK, rtl8723AU_card_enable_flow))
- return _FAIL;
-
- /* 0x04[19] = 1, suggest by Jackie 2011.05.09, reset 8051 */
- value8 = rtl8723au_read8(padapter, REG_APS_FSMCO+2);
- rtl8723au_write8(padapter, REG_APS_FSMCO + 2, value8 | BIT(3));
-
- /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
- /* Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy.
- Added by tynli. 2011.08.31. */
- value16 = rtl8723au_read16(padapter, REG_CR);
- value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN |
- PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN |
- ENSEC | CALTMR_EN);
- rtl8723au_write16(padapter, REG_CR, value16);
-
- /* for Efuse PG, suggest by Jackie 2011.11.23 */
- PHY_SetBBReg(padapter, REG_EFUSE_CTRL, BIT(28)|BIT(29)|BIT(30), 0x06);
-
- return _SUCCESS;
-}
-
-/* Shall USB interface init this? */
-static void _InitInterrupt(struct rtw_adapter *Adapter)
-{
- u32 value32;
-
- /* HISR - turn all on */
- value32 = 0xFFFFFFFF;
- rtl8723au_write32(Adapter, REG_HISR, value32);
-
- /* HIMR - turn all on */
- rtl8723au_write32(Adapter, REG_HIMR, value32);
-}
-
-static void _InitQueueReservedPage(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u32 numHQ = 0;
- u32 numLQ = 0;
- u32 numNQ = 0;
- u32 numPubQ;
- u32 value32;
- u8 value8;
- bool bWiFiConfig = pregistrypriv->wifi_spec;
-
- /* RT_ASSERT((outEPNum>= 2), ("for WMM , number of out-ep "
- "must more than or equal to 2!\n")); */
-
- numPubQ = bWiFiConfig ? WMM_NORMAL_PAGE_NUM_PUBQ : NORMAL_PAGE_NUM_PUBQ;
-
- if (pHalData->OutEpQueueSel & TX_SELE_HQ) {
- numHQ = bWiFiConfig ?
- WMM_NORMAL_PAGE_NUM_HPQ : NORMAL_PAGE_NUM_HPQ;
- }
-
- if (pHalData->OutEpQueueSel & TX_SELE_LQ) {
- numLQ = bWiFiConfig ?
- WMM_NORMAL_PAGE_NUM_LPQ : NORMAL_PAGE_NUM_LPQ;
- }
- /* NOTE: This step shall be proceed before
- writting REG_RQPN. */
- if (pHalData->OutEpQueueSel & TX_SELE_NQ) {
- numNQ = bWiFiConfig ?
- WMM_NORMAL_PAGE_NUM_NPQ : NORMAL_PAGE_NUM_NPQ;
- }
- value8 = (u8)_NPQ(numNQ);
- rtl8723au_write8(Adapter, REG_RQPN_NPQ, value8);
-
- /* TX DMA */
- value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
- rtl8723au_write32(Adapter, REG_RQPN, value32);
-}
-
-static void _InitTxBufferBoundary(struct rtw_adapter *Adapter)
-{
- struct registry_priv *pregistrypriv = &Adapter->registrypriv;
-
- u8 txpktbuf_bndy;
-
- if (!pregistrypriv->wifi_spec)
- txpktbuf_bndy = TX_PAGE_BOUNDARY;
- else /* for WMM */
- txpktbuf_bndy = WMM_NORMAL_TX_PAGE_BOUNDARY;
-
- rtl8723au_write8(Adapter, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
- rtl8723au_write8(Adapter, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
- rtl8723au_write8(Adapter, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
- rtl8723au_write8(Adapter, REG_TRXFF_BNDY, txpktbuf_bndy);
- rtl8723au_write8(Adapter, REG_TDECTRL+1, txpktbuf_bndy);
-}
-
-static void _InitPageBoundary(struct rtw_adapter *Adapter)
-{
- /* RX Page Boundary */
- /* srand(static_cast<unsigned int>(time(NULL))); */
- u16 rxff_bndy = 0x27FF;/* rand() % 1) ? 0x27FF : 0x23FF; */
-
- rtl8723au_write16(Adapter, (REG_TRXFF_BNDY + 2), rxff_bndy);
-
- /* TODO: ?? shall we set tx boundary? */
-}
-
-static void
-_InitNormalChipRegPriority(struct rtw_adapter *Adapter, u16 beQ, u16 bkQ,
- u16 viQ, u16 voQ, u16 mgtQ, u16 hiQ)
-{
- u16 value16 = rtl8723au_read16(Adapter, REG_TRXDMA_CTRL) & 0x7;
-
- value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
- _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
- _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
-
- rtl8723au_write16(Adapter, REG_TRXDMA_CTRL, value16);
-}
-
-static void _InitNormalChipOneOutEpPriority(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u16 value = 0;
-
- switch (pHalData->OutEpQueueSel) {
- case TX_SELE_HQ:
- value = QUEUE_HIGH;
- break;
- case TX_SELE_LQ:
- value = QUEUE_LOW;
- break;
- case TX_SELE_NQ:
- value = QUEUE_NORMAL;
- break;
- default:
- /* RT_ASSERT(false, ("Shall not reach here!\n")); */
- break;
- }
-
- _InitNormalChipRegPriority(Adapter, value, value, value,
- value, value, value);
-}
-
-static void _InitNormalChipTwoOutEpPriority(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
- u16 valueHi = 0;
- u16 valueLow = 0;
-
- switch (pHalData->OutEpQueueSel) {
- case (TX_SELE_HQ | TX_SELE_LQ):
- valueHi = QUEUE_HIGH;
- valueLow = QUEUE_LOW;
- break;
- case (TX_SELE_NQ | TX_SELE_LQ):
- valueHi = QUEUE_NORMAL;
- valueLow = QUEUE_LOW;
- break;
- case (TX_SELE_HQ | TX_SELE_NQ):
- valueHi = QUEUE_HIGH;
- valueLow = QUEUE_NORMAL;
- break;
- default:
- /* RT_ASSERT(false, ("Shall not reach here!\n")); */
- break;
- }
-
- if (!pregistrypriv->wifi_spec) {
- beQ = valueLow;
- bkQ = valueLow;
- viQ = valueHi;
- voQ = valueHi;
- mgtQ = valueHi;
- hiQ = valueHi;
- } else {/* for WMM , CONFIG_OUT_EP_WIFI_MODE */
- beQ = valueLow;
- bkQ = valueHi;
- viQ = valueHi;
- voQ = valueLow;
- mgtQ = valueHi;
- hiQ = valueHi;
- }
-
- _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
-}
-
-static void _InitNormalChipThreeOutEpPriority(struct rtw_adapter *Adapter)
-{
- struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
-
- if (!pregistrypriv->wifi_spec) {/* typical setting */
- beQ = QUEUE_LOW;
- bkQ = QUEUE_LOW;
- viQ = QUEUE_NORMAL;
- voQ = QUEUE_HIGH;
- mgtQ = QUEUE_HIGH;
- hiQ = QUEUE_HIGH;
- } else {/* for WMM */
- beQ = QUEUE_LOW;
- bkQ = QUEUE_NORMAL;
- viQ = QUEUE_NORMAL;
- voQ = QUEUE_HIGH;
- mgtQ = QUEUE_HIGH;
- hiQ = QUEUE_HIGH;
- }
- _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
-}
-
-static void _InitQueuePriority(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- switch (pHalData->OutEpNumber) {
- case 1:
- _InitNormalChipOneOutEpPriority(Adapter);
- break;
- case 2:
- _InitNormalChipTwoOutEpPriority(Adapter);
- break;
- case 3:
- _InitNormalChipThreeOutEpPriority(Adapter);
- break;
- default:
- /* RT_ASSERT(false, ("Shall not reach here!\n")); */
- break;
- }
-}
-
-static void _InitTransferPageSize(struct rtw_adapter *Adapter)
-{
- /* Tx page size is always 128. */
-
- u8 value8;
- value8 = _PSRX(PBP_128) | _PSTX(PBP_128);
- rtl8723au_write8(Adapter, REG_PBP, value8);
-}
-
-static void _InitDriverInfoSize(struct rtw_adapter *Adapter, u8 drvInfoSize)
-{
- rtl8723au_write8(Adapter, REG_RX_DRVINFO_SZ, drvInfoSize);
-}
-
-static void _InitWMACSetting(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- /* don't turn on AAP, it will allow all packets to driver */
- pHalData->ReceiveConfig = RCR_APM | RCR_AM | RCR_AB | RCR_CBSSID_DATA |
- RCR_CBSSID_BCN | RCR_APP_ICV | RCR_AMF |
- RCR_HTC_LOC_CTRL | RCR_APP_MIC |
- RCR_APP_PHYSTS;
-
- /* some REG_RCR will be modified later by
- phy_ConfigMACWithHeaderFile() */
- rtl8723au_write32(Adapter, REG_RCR, pHalData->ReceiveConfig);
-
- /* Accept all multicast address */
- rtl8723au_write32(Adapter, REG_MAR, 0xFFFFFFFF);
- rtl8723au_write32(Adapter, REG_MAR + 4, 0xFFFFFFFF);
-
- /* Accept all data frames */
- /* value16 = 0xFFFF; */
- /* rtl8723au_write16(Adapter, REG_RXFLTMAP2, value16); */
-
- /* 2010.09.08 hpfan */
- /* Since ADF is removed from RCR, ps-poll will not be indicate
- to driver, */
- /* RxFilterMap should mask ps-poll to guarantee AP mode can
- rx ps-poll. */
- /* value16 = 0x400; */
- /* rtl8723au_write16(Adapter, REG_RXFLTMAP1, value16); */
-
- /* Accept all management frames */
- /* value16 = 0xFFFF; */
- /* rtl8723au_write16(Adapter, REG_RXFLTMAP0, value16); */
-
- /* enable RX_SHIFT bits */
- /* rtl8723au_write8(Adapter, REG_TRXDMA_CTRL, rtl8723au_read8(Adapter,
- REG_TRXDMA_CTRL)|BIT(1)); */
-}
-
-static void _InitAdaptiveCtrl(struct rtw_adapter *Adapter)
-{
- u16 value16;
- u32 value32;
-
- /* Response Rate Set */
- value32 = rtl8723au_read32(Adapter, REG_RRSR);
- value32 &= ~RATE_BITMAP_ALL;
- value32 |= RATE_RRSR_CCK_ONLY_1M;
- rtl8723au_write32(Adapter, REG_RRSR, value32);
-
- /* CF-END Threshold */
- /* m_spIoBase->rtl8723au_write8(REG_CFEND_TH, 0x1); */
-
- /* SIFS (used in NAV) */
- value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
- rtl8723au_write16(Adapter, REG_SPEC_SIFS, value16);
-
- /* Retry Limit */
- value16 = _LRL(0x30) | _SRL(0x30);
- rtl8723au_write16(Adapter, REG_RL, value16);
-}
-
-static void _InitRateFallback(struct rtw_adapter *Adapter)
-{
- /* Set Data Auto Rate Fallback Retry Count register. */
- rtl8723au_write32(Adapter, REG_DARFRC, 0x00000000);
- rtl8723au_write32(Adapter, REG_DARFRC+4, 0x10080404);
- rtl8723au_write32(Adapter, REG_RARFRC, 0x04030201);
- rtl8723au_write32(Adapter, REG_RARFRC+4, 0x08070605);
-}
-
-static void _InitEDCA(struct rtw_adapter *Adapter)
-{
- /* Set Spec SIFS (used in NAV) */
- rtl8723au_write16(Adapter, REG_SPEC_SIFS, 0x100a);
- rtl8723au_write16(Adapter, REG_MAC_SPEC_SIFS, 0x100a);
-
- /* Set SIFS for CCK */
- rtl8723au_write16(Adapter, REG_SIFS_CTX, 0x100a);
-
- /* Set SIFS for OFDM */
- rtl8723au_write16(Adapter, REG_SIFS_TRX, 0x100a);
-
- /* TXOP */
- rtl8723au_write32(Adapter, REG_EDCA_BE_PARAM, 0x005EA42B);
- rtl8723au_write32(Adapter, REG_EDCA_BK_PARAM, 0x0000A44F);
- rtl8723au_write32(Adapter, REG_EDCA_VI_PARAM, 0x005EA324);
- rtl8723au_write32(Adapter, REG_EDCA_VO_PARAM, 0x002FA226);
-}
-
-static void _InitRDGSetting(struct rtw_adapter *Adapter)
-{
- rtl8723au_write8(Adapter, REG_RD_CTRL, 0xFF);
- rtl8723au_write16(Adapter, REG_RD_NAV_NXT, 0x200);
- rtl8723au_write8(Adapter, REG_RD_RESP_PKT_TH, 0x05);
-}
-
-static void _InitRetryFunction(struct rtw_adapter *Adapter)
-{
- u8 value8;
-
- value8 = rtl8723au_read8(Adapter, REG_FWHW_TXQ_CTRL);
- value8 |= EN_AMPDU_RTY_NEW;
- rtl8723au_write8(Adapter, REG_FWHW_TXQ_CTRL, value8);
-
- /* Set ACK timeout */
- rtl8723au_write8(Adapter, REG_ACKTO, 0x40);
-}
-
-static void _InitRFType(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- pHalData->rf_type = RF_1T1R;
-}
-
-/* Set CCK and OFDM Block "ON" */
-static void _BBTurnOnBlock(struct rtw_adapter *Adapter)
-{
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bCCKEn, 0x1);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
-}
-
-#define MgntActSet_RF_State(...)
-static void _RfPowerSave(struct rtw_adapter *padapter)
-{
-}
-
-enum {
- Antenna_Lfet = 1,
- Antenna_Right = 2,
-};
-
-enum rt_rf_power_state RfOnOffDetect23a(struct rtw_adapter *pAdapter)
-{
- /* struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter); */
- u8 val8;
- enum rt_rf_power_state rfpowerstate = rf_off;
-
- rtl8723au_write8(pAdapter, REG_MAC_PINMUX_CFG,
- rtl8723au_read8(pAdapter,
- REG_MAC_PINMUX_CFG) & ~BIT(3));
- val8 = rtl8723au_read8(pAdapter, REG_GPIO_IO_SEL);
- DBG_8723A("GPIO_IN =%02x\n", val8);
- rfpowerstate = (val8 & BIT(3)) ? rf_on : rf_off;
-
- return rfpowerstate;
-}
-
-int rtl8723au_hal_init(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
- struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u8 val8 = 0;
- u32 boundary;
- int status = _SUCCESS;
- bool mac_on;
-
- unsigned long init_start_time = jiffies;
-
- Adapter->hw_init_completed = false;
-
- if (Adapter->pwrctrlpriv.bkeepfwalive) {
- phy_SsPwrSwitch92CU(Adapter, rf_on);
-
- if (pHalData->bIQKInitialized) {
- rtl8723a_phy_iq_calibrate(Adapter, true);
- } else {
- rtl8723a_phy_iq_calibrate(Adapter, false);
- pHalData->bIQKInitialized = true;
- }
- rtl8723a_odm_check_tx_power_tracking(Adapter);
- rtl8723a_phy_lc_calibrate(Adapter);
-
- goto exit;
- }
-
- /* Check if MAC has already power on. by tynli. 2011.05.27. */
- val8 = rtl8723au_read8(Adapter, REG_CR);
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "%s: REG_CR 0x100 = 0x%02x\n", __func__, val8);
- /* Fix 92DU-VC S3 hang with the reason is that secondary mac is not
- initialized. */
- /* 0x100 value of first mac is 0xEA while 0x100 value of secondary
- is 0x00 */
- if (val8 == 0xEA) {
- mac_on = false;
- } else {
- mac_on = true;
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "%s: MAC has already power on\n", __func__);
- }
-
- status = _InitPowerOn(Adapter);
- if (status == _FAIL) {
- RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
- "Failed to init power on!\n");
- goto exit;
- }
-
- if (!pregistrypriv->wifi_spec) {
- boundary = TX_PAGE_BOUNDARY;
- } else {
- /* for WMM */
- boundary = WMM_NORMAL_TX_PAGE_BOUNDARY;
- }
-
- if (!mac_on) {
- status = InitLLTTable23a(Adapter, boundary);
- if (status == _FAIL) {
- RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
- "Failed to init LLT table\n");
- goto exit;
- }
- }
-
- if (pHalData->bRDGEnable)
- _InitRDGSetting(Adapter);
-
- status = rtl8723a_FirmwareDownload(Adapter);
- if (status != _SUCCESS) {
- Adapter->bFWReady = false;
- DBG_8723A("fw download fail!\n");
- goto exit;
- } else {
- Adapter->bFWReady = true;
- DBG_8723A("fw download ok!\n");
- }
-
- rtl8723a_InitializeFirmwareVars(Adapter);
-
- if (pwrctrlpriv->reg_rfoff == true) {
- pwrctrlpriv->rf_pwrstate = rf_off;
- }
-
- /* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
- /* HW GPIO pin. Before PHY_RFConfig8192C. */
- /* HalDetectPwrDownMode(Adapter); */
- /* 2010/08/26 MH If Efuse does not support sective suspend then disable the function. */
- /* HalDetectSelectiveSuspendMode(Adapter); */
-
- /* Set RF type for BB/RF configuration */
- _InitRFType(Adapter);/* _ReadRFType() */
-
- /* Save target channel */
- /* <Roger_Notes> Current Channel will be updated again later. */
- pHalData->CurrentChannel = 6;/* default set to 6 */
-
- status = PHY_MACConfig8723A(Adapter);
- if (status == _FAIL) {
- DBG_8723A("PHY_MACConfig8723A fault !!\n");
- goto exit;
- }
-
- /* */
- /* d. Initialize BB related configurations. */
- /* */
- status = PHY_BBConfig8723A(Adapter);
- if (status == _FAIL) {
- DBG_8723A("PHY_BBConfig8723A fault !!\n");
- goto exit;
- }
-
- /* Add for tx power by rate fine tune. We need to call the function after BB config. */
- /* Because the tx power by rate table is inited in BB config. */
-
- status = PHY_RF6052_Config8723A(Adapter);
- if (status == _FAIL) {
- DBG_8723A("PHY_RF6052_Config8723A failed!!\n");
- goto exit;
- }
-
- /* reducing 80M spur */
- rtl8723au_write32(Adapter, REG_AFE_XTAL_CTRL, 0x0381808d);
- rtl8723au_write32(Adapter, REG_AFE_PLL_CTRL, 0xf0ffff83);
- rtl8723au_write32(Adapter, REG_AFE_PLL_CTRL, 0xf0ffff82);
- rtl8723au_write32(Adapter, REG_AFE_PLL_CTRL, 0xf0ffff83);
-
- /* RFSW Control */
- /* 0x804[14]= 0 */
- rtl8723au_write32(Adapter, rFPGA0_TxInfo, 0x00000003);
- /* 0x870[6:5]= b'11 */
- rtl8723au_write32(Adapter, rFPGA0_XAB_RFInterfaceSW, 0x07000760);
- /* 0x860[6:5]= b'00 */
- rtl8723au_write32(Adapter, rFPGA0_XA_RFInterfaceOE, 0x66F60210);
-
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "%s: 0x870 = value 0x%x\n", __func__,
- rtl8723au_read32(Adapter, 0x870));
-
- /* */
- /* Joseph Note: Keep RfRegChnlVal for later use. */
- /* */
- pHalData->RfRegChnlVal[0] = PHY_QueryRFReg(Adapter, RF_PATH_A,
- RF_CHNLBW, bRFRegOffsetMask);
- pHalData->RfRegChnlVal[1] = PHY_QueryRFReg(Adapter, RF_PATH_B,
- RF_CHNLBW, bRFRegOffsetMask);
-
- if (!mac_on) {
- _InitQueueReservedPage(Adapter);
- _InitTxBufferBoundary(Adapter);
- }
- _InitQueuePriority(Adapter);
- _InitPageBoundary(Adapter);
- _InitTransferPageSize(Adapter);
-
- /* Get Rx PHY status in order to report RSSI and others. */
- _InitDriverInfoSize(Adapter, DRVINFO_SZ);
-
- _InitInterrupt(Adapter);
- hw_var_set_macaddr(Adapter, Adapter->eeprompriv.mac_addr);
- rtl8723a_set_media_status(Adapter, MSR_INFRA);
- _InitWMACSetting(Adapter);
- _InitAdaptiveCtrl(Adapter);
- _InitEDCA(Adapter);
- _InitRateFallback(Adapter);
- _InitRetryFunction(Adapter);
- rtl8723a_InitBeaconParameters(Adapter);
-
- _BBTurnOnBlock(Adapter);
- /* NicIFSetMacAddress(padapter, padapter->PermanentAddress); */
-
- rtl8723a_cam_invalidate_all(Adapter);
-
- /* 2010/12/17 MH We need to set TX power according to EFUSE content at first. */
- PHY_SetTxPowerLevel8723A(Adapter, pHalData->CurrentChannel);
-
- rtl8723a_InitAntenna_Selection(Adapter);
-
- /* HW SEQ CTRL */
- /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
- rtl8723au_write8(Adapter, REG_HWSEQ_CTRL, 0xFF);
-
- /* */
- /* Disable BAR, suggested by Scott */
- /* 2010.04.09 add by hpfan */
- /* */
- rtl8723au_write32(Adapter, REG_BAR_MODE_CTRL, 0x0201ffff);
-
- if (pregistrypriv->wifi_spec)
- rtl8723au_write16(Adapter, REG_FAST_EDCA_CTRL, 0);
-
- /* Move by Neo for USB SS from above setp */
- _RfPowerSave(Adapter);
-
- /* 2010/08/26 MH Merge from 8192CE. */
- /* sherry masked that it has been done in _RfPowerSave */
- /* 20110927 */
- /* recovery for 8192cu and 9723Au 20111017 */
- if (pwrctrlpriv->rf_pwrstate == rf_on) {
- if (pHalData->bIQKInitialized) {
- rtl8723a_phy_iq_calibrate(Adapter, true);
- } else {
- rtl8723a_phy_iq_calibrate(Adapter, false);
- pHalData->bIQKInitialized = true;
- }
-
- rtl8723a_odm_check_tx_power_tracking(Adapter);
-
- rtl8723a_phy_lc_calibrate(Adapter);
-
- rtl8723a_dual_antenna_detection(Adapter);
- }
-
- /* fixed USB interface interference issue */
- rtl8723au_write8(Adapter, 0xfe40, 0xe0);
- rtl8723au_write8(Adapter, 0xfe41, 0x8d);
- rtl8723au_write8(Adapter, 0xfe42, 0x80);
- rtl8723au_write32(Adapter, 0x20c, 0xfd0320);
- /* Solve too many protocol error on USB bus */
- if (!IS_81xxC_VENDOR_UMC_A_CUT(pHalData->VersionID)) {
- /* 0xE6 = 0x94 */
- rtl8723au_write8(Adapter, 0xFE40, 0xE6);
- rtl8723au_write8(Adapter, 0xFE41, 0x94);
- rtl8723au_write8(Adapter, 0xFE42, 0x80);
-
- /* 0xE0 = 0x19 */
- rtl8723au_write8(Adapter, 0xFE40, 0xE0);
- rtl8723au_write8(Adapter, 0xFE41, 0x19);
- rtl8723au_write8(Adapter, 0xFE42, 0x80);
-
- /* 0xE5 = 0x91 */
- rtl8723au_write8(Adapter, 0xFE40, 0xE5);
- rtl8723au_write8(Adapter, 0xFE41, 0x91);
- rtl8723au_write8(Adapter, 0xFE42, 0x80);
-
- /* 0xE2 = 0x81 */
- rtl8723au_write8(Adapter, 0xFE40, 0xE2);
- rtl8723au_write8(Adapter, 0xFE41, 0x81);
- rtl8723au_write8(Adapter, 0xFE42, 0x80);
-
- }
-
-/* _InitPABias(Adapter); */
-
- /* Init BT hw config. */
- rtl8723a_BT_init_hwconfig(Adapter);
-
- rtl8723a_InitHalDm(Adapter);
-
- val8 = DIV_ROUND_UP(WiFiNavUpperUs, HAL_8723A_NAV_UPPER_UNIT);
- rtl8723au_write8(Adapter, REG_NAV_UPPER, val8);
-
- /* 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test, but we need to fin root cause. */
- if (((rtl8723au_read32(Adapter, rFPGA0_RFMOD) & 0xFF000000) !=
- 0x83000000)) {
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(24), 1);
- RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
- "%s: IQK fail recover\n", __func__);
- }
-
- /* ack for xmit mgmt frames. */
- rtl8723au_write32(Adapter, REG_FWHW_TXQ_CTRL,
- rtl8723au_read32(Adapter, REG_FWHW_TXQ_CTRL)|BIT(12));
-
-exit:
- if (status == _SUCCESS) {
- Adapter->hw_init_completed = true;
-
- if (Adapter->registrypriv.notch_filter == 1)
- rtl8723a_notch_filter(Adapter, 1);
- }
-
- DBG_8723A("%s in %dms\n", __func__,
- jiffies_to_msecs(jiffies - init_start_time));
- return status;
-}
-
-static void phy_SsPwrSwitch92CU(struct rtw_adapter *Adapter,
- enum rt_rf_power_state eRFPowerState)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u8 sps0;
-
- sps0 = rtl8723au_read8(Adapter, REG_SPS0_CTRL);
-
- switch (eRFPowerState) {
- case rf_on:
- /* 1. Enable MAC Clock. Can not be enabled now. */
- /* WriteXBYTE(REG_SYS_CLKR+1,
- ReadXBYTE(REG_SYS_CLKR+1) | BIT(3)); */
-
- /* 2. Force PWM, Enable SPS18_LDO_Marco_Block */
- rtl8723au_write8(Adapter, REG_SPS0_CTRL,
- sps0 | BIT(0) | BIT(3));
-
- /* 3. restore BB, AFE control register. */
- /* RF */
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x380038, 1);
- else
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x38, 1);
- PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 1);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 0);
-
- /* AFE */
- if (pHalData->rf_type == RF_2T2R)
- rtl8723au_write32(Adapter, rRx_Wait_CCA, 0x63DB25A0);
- else if (pHalData->rf_type == RF_1T1R)
- rtl8723au_write32(Adapter, rRx_Wait_CCA, 0x631B25A0);
-
- /* 4. issue 3-wire command that RF set to Rx idle
- mode. This is used to re-write the RX idle mode. */
- /* We can only prvide a usual value instead and then
- HW will modify the value by itself. */
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_AC,
- bRFRegOffsetMask, 0x32D95);
- if (pHalData->rf_type == RF_2T2R) {
- PHY_SetRFReg(Adapter, RF_PATH_B, RF_AC,
- bRFRegOffsetMask, 0x32D95);
- }
- break;
- case rf_sleep:
- case rf_off:
- if (IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID))
- sps0 &= ~BIT(0);
- else
- sps0 &= ~(BIT(0) | BIT(3));
-
- RT_TRACE(_module_hal_init_c_, _drv_err_, "SS LVL1\n");
- /* Disable RF and BB only for SelectSuspend. */
-
- /* 1. Set BB/RF to shutdown. */
- /* (1) Reg878[5:3]= 0 RF rx_code for
- preamble power saving */
- /* (2)Reg878[21:19]= 0 Turn off RF-B */
- /* (3) RegC04[7:4]= 0 Turn off all paths
- for packet detection */
- /* (4) Reg800[1] = 1 enable preamble power saving */
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF0] =
- rtl8723au_read32(Adapter, rFPGA0_XAB_RFParameter);
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF1] =
- rtl8723au_read32(Adapter, rOFDM0_TRxPathEnable);
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF2] =
- rtl8723au_read32(Adapter, rFPGA0_RFMOD);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x380038, 0);
- else if (pHalData->rf_type == RF_1T1R)
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, 0x38, 0);
- PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 0);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 1);
-
- /* 2 .AFE control register to power down. bit[30:22] */
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_AFE0] =
- rtl8723au_read32(Adapter, rRx_Wait_CCA);
- if (pHalData->rf_type == RF_2T2R)
- rtl8723au_write32(Adapter, rRx_Wait_CCA, 0x00DB25A0);
- else if (pHalData->rf_type == RF_1T1R)
- rtl8723au_write32(Adapter, rRx_Wait_CCA, 0x001B25A0);
-
- /* 3. issue 3-wire command that RF set to power down.*/
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_AC, bRFRegOffsetMask, 0);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetRFReg(Adapter, RF_PATH_B, RF_AC,
- bRFRegOffsetMask, 0);
-
- /* 4. Force PFM , disable SPS18_LDO_Marco_Block */
- rtl8723au_write8(Adapter, REG_SPS0_CTRL, sps0);
- break;
- default:
- break;
- }
-}
-
-static void CardDisableRTL8723U(struct rtw_adapter *Adapter)
-{
- u8 u1bTmp;
-
- DBG_8723A("CardDisableRTL8723U\n");
- /* USB-MF Card Disable Flow */
- /* 1. Run LPS WL RFOFF flow */
- HalPwrSeqCmdParsing23a(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
- PWR_INTF_USB_MSK, rtl8723AU_enter_lps_flow);
-
- /* 2. 0x1F[7:0] = 0 turn off RF */
- rtl8723au_write8(Adapter, REG_RF_CTRL, 0x00);
-
- /* ==== Reset digital sequence ====== */
- if ((rtl8723au_read8(Adapter, REG_MCUFWDL) & BIT(7)) &&
- Adapter->bFWReady) /* 8051 RAM code */
- rtl8723a_FirmwareSelfReset(Adapter);
-
- /* Reset MCU. Suggested by Filen. 2011.01.26. by tynli. */
- u1bTmp = rtl8723au_read8(Adapter, REG_SYS_FUNC_EN+1);
- rtl8723au_write8(Adapter, REG_SYS_FUNC_EN+1, u1bTmp & ~BIT(2));
-
- /* g. MCUFWDL 0x80[1:0]= 0 reset MCU ready status */
- rtl8723au_write8(Adapter, REG_MCUFWDL, 0x00);
-
- /* ==== Reset digital sequence end ====== */
- /* Card disable power action flow */
- HalPwrSeqCmdParsing23a(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
- PWR_INTF_USB_MSK,
- rtl8723AU_card_disable_flow);
-
- /* Reset MCU IO Wrapper, added by Roger, 2011.08.30. */
- u1bTmp = rtl8723au_read8(Adapter, REG_RSV_CTRL + 1);
- rtl8723au_write8(Adapter, REG_RSV_CTRL+1, u1bTmp & ~BIT(0));
- u1bTmp = rtl8723au_read8(Adapter, REG_RSV_CTRL + 1);
- rtl8723au_write8(Adapter, REG_RSV_CTRL+1, u1bTmp | BIT(0));
-
- /* 7. RSV_CTRL 0x1C[7:0] = 0x0E lock ISO/CLK/Power control register */
- rtl8723au_write8(Adapter, REG_RSV_CTRL, 0x0e);
-}
-
-int rtl8723au_hal_deinit(struct rtw_adapter *padapter)
-{
- DBG_8723A("==> %s\n", __func__);
-
-#ifdef CONFIG_8723AU_BT_COEXIST
- BT_HaltProcess(padapter);
-#endif
- /* 2011/02/18 To Fix RU LNA power leakage problem. We need to
- execute below below in Adapter init and halt sequence.
- According to EEchou's opinion, we can enable the ability for all */
- /* IC. Accord to johnny's opinion, only RU need the support. */
- CardDisableRTL8723U(padapter);
-
- padapter->hw_init_completed = false;
-
- return _SUCCESS;
-}
-
-int rtl8723au_inirp_init(struct rtw_adapter *Adapter)
-{
- u8 i;
- struct recv_buf *precvbuf;
- int status;
- struct recv_priv *precvpriv = &Adapter->recvpriv;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- status = _SUCCESS;
-
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, "===> usb_inirp_init\n");
-
- /* issue Rx irp to receive data */
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
- for (i = 0; i < NR_RECVBUFF; i++) {
- if (rtl8723au_read_port(Adapter, 0, precvbuf) == _FAIL) {
- RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
- "usb_rx_init: usb_read_port error\n");
- status = _FAIL;
- goto exit;
- }
- precvbuf++;
- }
- if (rtl8723au_read_interrupt(Adapter) == _FAIL) {
- RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
- "%s: usb_read_interrupt error\n", __func__);
- status = _FAIL;
- }
- pHalData->IntrMask[0] = rtl8723au_read32(Adapter, REG_USB_HIMR);
- MSG_8723A("pHalData->IntrMask = 0x%04x\n", pHalData->IntrMask[0]);
- pHalData->IntrMask[0] |= UHIMR_C2HCMD|UHIMR_CPWM;
- rtl8723au_write32(Adapter, REG_USB_HIMR, pHalData->IntrMask[0]);
-exit:
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "<=== usb_inirp_init\n");
- return status;
-}
-
-int rtl8723au_inirp_deinit(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "===> usb_rx_deinit\n");
- rtl8723au_read_port_cancel(Adapter);
- pHalData->IntrMask[0] = rtl8723au_read32(Adapter, REG_USB_HIMR);
- MSG_8723A("%s pHalData->IntrMask = 0x%04x\n", __func__,
- pHalData->IntrMask[0]);
- pHalData->IntrMask[0] = 0x0;
- rtl8723au_write32(Adapter, REG_USB_HIMR, pHalData->IntrMask[0]);
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
- "<=== usb_rx_deinit\n");
- return _SUCCESS;
-}
-
-static void _ReadBoardType(struct rtw_adapter *Adapter, u8 *PROMContent,
- bool AutoloadFail)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u8 boardType = BOARD_USB_DONGLE;
-
- if (AutoloadFail) {
- if (IS_8723_SERIES(pHalData->VersionID))
- pHalData->rf_type = RF_1T1R;
- else
- pHalData->rf_type = RF_2T2R;
- pHalData->BoardType = boardType;
- return;
- }
-
- boardType = PROMContent[EEPROM_NORMAL_BoardType];
- boardType &= BOARD_TYPE_NORMAL_MASK;/* bit[7:5] */
- boardType >>= 5;
-
- pHalData->BoardType = boardType;
- MSG_8723A("_ReadBoardType(%x)\n", pHalData->BoardType);
-
- if (boardType == BOARD_USB_High_PA)
- pHalData->ExternalPA = 1;
-}
-
-static void Hal_EfuseParseMACAddr_8723AU(struct rtw_adapter *padapter,
- u8 *hwinfo, bool AutoLoadFail)
-{
- u16 i;
- u8 sMacAddr[ETH_ALEN] = {0x00, 0xE0, 0x4C, 0x87, 0x23, 0x00};
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
-
- if (AutoLoadFail) {
- for (i = 0; i < 6; i++)
- pEEPROM->mac_addr[i] = sMacAddr[i];
- } else {
- /* Read Permanent MAC address */
- memcpy(pEEPROM->mac_addr, &hwinfo[EEPROM_MAC_ADDR_8723AU],
- ETH_ALEN);
- }
-
- RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
- "Hal_EfuseParseMACAddr_8723AU: Permanent Address =%pM\n",
- pEEPROM->mac_addr);
-}
-
-static void readAdapterInfo(struct rtw_adapter *padapter)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
- /* struct hal_data_8723a * pHalData = GET_HAL_DATA(padapter); */
- u8 hwinfo[HWSET_MAX_SIZE];
-
- Hal_InitPGData(padapter, hwinfo);
- Hal_EfuseParseIDCode(padapter, hwinfo);
- Hal_EfuseParseEEPROMVer(padapter, hwinfo,
- pEEPROM->bautoload_fail_flag);
- Hal_EfuseParseMACAddr_8723AU(padapter, hwinfo,
- pEEPROM->bautoload_fail_flag);
- Hal_EfuseParsetxpowerinfo_8723A(padapter, hwinfo,
- pEEPROM->bautoload_fail_flag);
- _ReadBoardType(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
- Hal_EfuseParseBTCoexistInfo_8723A(padapter, hwinfo,
- pEEPROM->bautoload_fail_flag);
-
- rtl8723a_EfuseParseChnlPlan(padapter, hwinfo,
- pEEPROM->bautoload_fail_flag);
- Hal_EfuseParseThermalMeter_8723A(padapter, hwinfo,
- pEEPROM->bautoload_fail_flag);
-/* _ReadRFSetting(Adapter, PROMContent, pEEPROM->bautoload_fail_flag); */
-/* _ReadPSSetting(Adapter, PROMContent, pEEPROM->bautoload_fail_flag); */
- Hal_EfuseParseAntennaDiversity(padapter, hwinfo,
- pEEPROM->bautoload_fail_flag);
-
- Hal_EfuseParseEEPROMVer(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
- Hal_EfuseParseCustomerID(padapter, hwinfo,
- pEEPROM->bautoload_fail_flag);
- Hal_EfuseParseRateIndicationOption(padapter, hwinfo,
- pEEPROM->bautoload_fail_flag);
- Hal_EfuseParseXtal_8723A(padapter, hwinfo,
- pEEPROM->bautoload_fail_flag);
-
- /* hal_CustomizedBehavior_8723U(Adapter); */
-
-/* Adapter->bDongle = (PROMContent[EEPROM_EASY_REPLACEMENT] == 1)? 0: 1; */
- DBG_8723A("%s(): REPLACEMENT = %x\n", __func__, padapter->bDongle);
-}
-
-static void _ReadPROMContent(struct rtw_adapter *Adapter)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(Adapter);
- u8 eeValue;
-
- eeValue = rtl8723au_read8(Adapter, REG_9346CR);
- /* To check system boot selection. */
- pEEPROM->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM) ? true : false;
- pEEPROM->bautoload_fail_flag = (eeValue & EEPROM_EN) ? false : true;
-
- DBG_8723A("Boot from %s, Autoload %s !\n",
- (pEEPROM->EepromOrEfuse ? "EEPROM" : "EFUSE"),
- (pEEPROM->bautoload_fail_flag ? "Fail" : "OK"));
-
- readAdapterInfo(Adapter);
-}
-
-/* */
-/* Description: */
-/* We should set Efuse cell selection to WiFi cell in default. */
-/* */
-/* Assumption: */
-/* PASSIVE_LEVEL */
-/* */
-/* Added by Roger, 2010.11.23. */
-/* */
-static void hal_EfuseCellSel(struct rtw_adapter *Adapter)
-{
- u32 value32;
-
- value32 = rtl8723au_read32(Adapter, EFUSE_TEST);
- value32 = (value32 & ~EFUSE_SEL_MASK) | EFUSE_SEL(EFUSE_WIFI_SEL_0);
- rtl8723au_write32(Adapter, EFUSE_TEST, value32);
-}
-
-void rtl8723a_read_adapter_info(struct rtw_adapter *Adapter)
-{
- unsigned long start = jiffies;
-
- /* Read EEPROM size before call any EEPROM function */
- Adapter->EepromAddressSize = GetEEPROMSize8723A(Adapter);
-
- MSG_8723A("====> _ReadAdapterInfo8723AU\n");
-
- hal_EfuseCellSel(Adapter);
-
- _ReadPROMContent(Adapter);
-
- MSG_8723A("<==== _ReadAdapterInfo8723AU in %d ms\n",
- jiffies_to_msecs(jiffies - start));
-}
-
-/* */
-/* Description: */
-/* Query setting of specified variable. */
-/* */
-int GetHalDefVar8192CUsb(struct rtw_adapter *Adapter,
- enum hal_def_variable eVariable, void *pValue)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- int bResult = _SUCCESS;
-
- switch (eVariable) {
- case HAL_DEF_UNDERCORATEDSMOOTHEDPWDB:
- *((int *)pValue) = pHalData->dmpriv.UndecoratedSmoothedPWDB;
- break;
- case HAL_DEF_IS_SUPPORT_ANT_DIV:
- break;
- case HAL_DEF_CURRENT_ANTENNA:
- break;
- case HAL_DEF_DRVINFO_SZ:
- *((u32 *)pValue) = DRVINFO_SZ;
- break;
- case HAL_DEF_MAX_RECVBUF_SZ:
- *((u32 *)pValue) = MAX_RECVBUF_SZ;
- break;
- case HAL_DEF_RX_PACKET_OFFSET:
- *((u32 *)pValue) = RXDESC_SIZE + DRVINFO_SZ;
- break;
- case HAL_DEF_DBG_DUMP_RXPKT:
- *((u8 *)pValue) = pHalData->bDumpRxPkt;
- break;
- case HAL_DEF_DBG_DM_FUNC:
- *((u32 *)pValue) = pHalData->odmpriv.SupportAbility;
- break;
- case HW_VAR_MAX_RX_AMPDU_FACTOR:
- *((u32 *)pValue) = IEEE80211_HT_MAX_AMPDU_64K;
- break;
- case HW_DEF_ODM_DBG_FLAG:
- {
- struct dm_odm_t *pDM_Odm = &pHalData->odmpriv;
- printk("pDM_Odm->DebugComponents = 0x%llx\n",
- pDM_Odm->DebugComponents);
- }
- break;
- default:
- bResult = _FAIL;
- break;
- }
-
- return bResult;
-}
-
-void rtl8723a_update_ramask(struct rtw_adapter *padapter,
- u32 mac_id, u8 rssi_level)
-{
- struct sta_info *psta;
- struct FW_Sta_Info *fw_sta;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- u8 init_rate, networkType, raid, arg;
- u32 mask, rate_bitmap;
- u8 shortGIrate = false;
- int supportRateNum;
-
- if (mac_id >= NUM_STA) /* CAM_SIZE */
- return;
-
- psta = pmlmeinfo->FW_sta_info[mac_id].psta;
- if (psta == NULL)
- return;
-
- switch (mac_id) {
- case 0:/* for infra mode */
- supportRateNum =
- rtw_get_rateset_len23a(cur_network->SupportedRates);
- networkType = judge_network_type23a(padapter,
- cur_network->SupportedRates,
- supportRateNum) & 0xf;
- /* pmlmeext->cur_wireless_mode = networkType; */
- raid = networktype_to_raid23a(networkType);
-
- mask = update_supported_rate23a(cur_network->SupportedRates,
- supportRateNum);
- mask |= (pmlmeinfo->HT_enable) ?
- update_MSC_rate23a(&pmlmeinfo->ht_cap) : 0;
-
- if (support_short_GI23a(padapter, &pmlmeinfo->ht_cap))
- shortGIrate = true;
- break;
-
- case 1:/* for broadcast/multicast */
- fw_sta = &pmlmeinfo->FW_sta_info[mac_id];
- supportRateNum = rtw_get_rateset_len23a(fw_sta->SupportedRates);
- if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
- networkType = WIRELESS_11B;
- else
- networkType = WIRELESS_11G;
- raid = networktype_to_raid23a(networkType);
-
- mask = update_basic_rate23a(cur_network->SupportedRates,
- supportRateNum);
- break;
-
- default: /* for each sta in IBSS */
- fw_sta = &pmlmeinfo->FW_sta_info[mac_id];
- supportRateNum = rtw_get_rateset_len23a(fw_sta->SupportedRates);
- networkType = judge_network_type23a(padapter,
- fw_sta->SupportedRates,
- supportRateNum) & 0xf;
- /* pmlmeext->cur_wireless_mode = networkType; */
- raid = networktype_to_raid23a(networkType);
-
- mask = update_supported_rate23a(cur_network->SupportedRates,
- supportRateNum);
-
- /* todo: support HT in IBSS */
- break;
- }
-
- /* mask &= 0x0fffffff; */
- rate_bitmap = ODM_Get_Rate_Bitmap23a(pHalData, mac_id, mask,
- rssi_level);
- DBG_8723A("%s => mac_id:%d, networkType:0x%02x, "
- "mask:0x%08x\n\t ==> rssi_level:%d, rate_bitmap:0x%08x\n",
- __func__, mac_id, networkType, mask, rssi_level, rate_bitmap);
-
- mask &= rate_bitmap;
- mask |= ((raid << 28) & 0xf0000000);
-
- init_rate = get_highest_rate_idx23a(mask) & 0x3f;
-
- arg = mac_id & 0x1f;/* MACID */
- arg |= BIT(7);
-
- if (shortGIrate == true)
- arg |= BIT(5);
-
- DBG_8723A("update raid entry, mask = 0x%x, arg = 0x%x\n", mask, arg);
-
- rtl8723a_set_raid_cmd(padapter, mask, arg);
-
- /* set ra_id */
- psta->raid = raid;
- psta->init_rate = init_rate;
-
- /* set correct initial date rate for each mac_id */
- pdmpriv->INIDATA_RATE[mac_id] = init_rate;
-}
diff --git a/drivers/staging/rtl8723au/hal/usb_ops_linux.c b/drivers/staging/rtl8723au/hal/usb_ops_linux.c
deleted file mode 100644
index 5c81ff48252e..000000000000
--- a/drivers/staging/rtl8723au/hal/usb_ops_linux.c
+++ /dev/null
@@ -1,690 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _HCI_OPS_OS_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <osdep_intf.h>
-#include <usb_ops.h>
-#include <recv_osdep.h>
-#include <rtl8723a_hal.h>
-#include <rtl8723a_recv.h>
-
-u8 rtl8723au_read8(struct rtw_adapter *padapter, u16 addr)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
- struct usb_device *udev = pdvobjpriv->pusbdev;
- int len;
- u8 data;
-
- mutex_lock(&pdvobjpriv->usb_vendor_req_mutex);
- len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- REALTEK_USB_VENQT_CMD_REQ, REALTEK_USB_VENQT_READ,
- addr, 0, &pdvobjpriv->usb_buf.val8, sizeof(data),
- RTW_USB_CONTROL_MSG_TIMEOUT);
-
- data = pdvobjpriv->usb_buf.val8;
- mutex_unlock(&pdvobjpriv->usb_vendor_req_mutex);
-
- return data;
-}
-
-u16 rtl8723au_read16(struct rtw_adapter *padapter, u16 addr)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
- struct usb_device *udev = pdvobjpriv->pusbdev;
- int len;
- u16 data;
-
- mutex_lock(&pdvobjpriv->usb_vendor_req_mutex);
- len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- REALTEK_USB_VENQT_CMD_REQ, REALTEK_USB_VENQT_READ,
- addr, 0, &pdvobjpriv->usb_buf.val16, sizeof(data),
- RTW_USB_CONTROL_MSG_TIMEOUT);
-
- data = le16_to_cpu(pdvobjpriv->usb_buf.val16);
- mutex_unlock(&pdvobjpriv->usb_vendor_req_mutex);
-
- return data;
-}
-
-u32 rtl8723au_read32(struct rtw_adapter *padapter, u16 addr)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
- struct usb_device *udev = pdvobjpriv->pusbdev;
- int len;
- u32 data;
-
- mutex_lock(&pdvobjpriv->usb_vendor_req_mutex);
- len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- REALTEK_USB_VENQT_CMD_REQ, REALTEK_USB_VENQT_READ,
- addr, 0, &pdvobjpriv->usb_buf.val32, sizeof(data),
- RTW_USB_CONTROL_MSG_TIMEOUT);
-
- data = le32_to_cpu(pdvobjpriv->usb_buf.val32);
- mutex_unlock(&pdvobjpriv->usb_vendor_req_mutex);
-
- return data;
-}
-
-int rtl8723au_write8(struct rtw_adapter *padapter, u16 addr, u8 val)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
- struct usb_device *udev = pdvobjpriv->pusbdev;
- int ret;
-
- mutex_lock(&pdvobjpriv->usb_vendor_req_mutex);
- pdvobjpriv->usb_buf.val8 = val;
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- REALTEK_USB_VENQT_CMD_REQ,
- REALTEK_USB_VENQT_WRITE,
- addr, 0, &pdvobjpriv->usb_buf.val8, sizeof(val),
- RTW_USB_CONTROL_MSG_TIMEOUT);
-
- if (ret != sizeof(val))
- ret = _FAIL;
- else
- ret = _SUCCESS;
-
- mutex_unlock(&pdvobjpriv->usb_vendor_req_mutex);
- return ret;
-}
-
-int rtl8723au_write16(struct rtw_adapter *padapter, u16 addr, u16 val)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
- struct usb_device *udev = pdvobjpriv->pusbdev;
- int ret;
-
- mutex_lock(&pdvobjpriv->usb_vendor_req_mutex);
- pdvobjpriv->usb_buf.val16 = cpu_to_le16(val);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- REALTEK_USB_VENQT_CMD_REQ,
- REALTEK_USB_VENQT_WRITE,
- addr, 0, &pdvobjpriv->usb_buf.val16, sizeof(val),
- RTW_USB_CONTROL_MSG_TIMEOUT);
-
- if (ret != sizeof(val))
- ret = _FAIL;
- else
- ret = _SUCCESS;
-
- mutex_unlock(&pdvobjpriv->usb_vendor_req_mutex);
- return ret;
-}
-
-int rtl8723au_write32(struct rtw_adapter *padapter, u16 addr, u32 val)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
- struct usb_device *udev = pdvobjpriv->pusbdev;
- int ret;
-
- mutex_lock(&pdvobjpriv->usb_vendor_req_mutex);
- pdvobjpriv->usb_buf.val32 = cpu_to_le32(val);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- REALTEK_USB_VENQT_CMD_REQ,
- REALTEK_USB_VENQT_WRITE,
- addr, 0, &pdvobjpriv->usb_buf.val32, sizeof(val),
- RTW_USB_CONTROL_MSG_TIMEOUT);
-
- if (ret != sizeof(val))
- ret = _FAIL;
- else
- ret = _SUCCESS;
-
- mutex_unlock(&pdvobjpriv->usb_vendor_req_mutex);
- return ret;
-}
-
-int rtl8723au_writeN(struct rtw_adapter *padapter, u16 addr, u16 len, u8 *buf)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
- struct usb_device *udev = pdvobjpriv->pusbdev;
- int ret;
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- REALTEK_USB_VENQT_CMD_REQ,
- REALTEK_USB_VENQT_WRITE,
- addr, 0, buf, len, RTW_USB_CONTROL_MSG_TIMEOUT);
-
- if (ret != len)
- return _FAIL;
- return _SUCCESS;
-}
-
-/*
- * Description:
- * Recognize the interrupt content by reading the interrupt
- * register or content and masking interrupt mask (IMR)
- * if it is our NIC's interrupt. After recognizing, we may clear
- * the all interrupts (ISR).
- * Arguments:
- * [in] Adapter -
- * The adapter context.
- * [in] pContent -
- * Under PCI interface, this field is ignord.
- * Under USB interface, the content is the interrupt
- * content pointer.
- * Under SDIO interface, this is the interrupt type which
- * is Local interrupt or system interrupt.
- * [in] ContentLen -
- * The length in byte of pContent.
- * Return:
- * If any interrupt matches the mask (IMR), return true, and
- * return false otherwise.
- */
-static bool
-InterruptRecognized8723AU(struct rtw_adapter *Adapter, void *pContent,
- u32 ContentLen)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u8 *buffer = (u8 *)pContent;
- struct reportpwrstate_parm report;
-
- memcpy(&pHalData->IntArray[0], &buffer[USB_INTR_CONTENT_HISR_OFFSET],
- 4);
- pHalData->IntArray[0] &= pHalData->IntrMask[0];
-
- /* For HISR extension. Added by tynli. 2009.10.07. */
- memcpy(&pHalData->IntArray[1],
- &buffer[USB_INTR_CONTENT_HISRE_OFFSET], 4);
- pHalData->IntArray[1] &= pHalData->IntrMask[1];
-
- /* We sholud remove this function later because DDK suggest
- * not to executing too many operations in MPISR */
-
- memcpy(&report.state, &buffer[USB_INTR_CPWM_OFFSET], 1);
-
- return (pHalData->IntArray[0] & pHalData->IntrMask[0]) != 0 ||
- (pHalData->IntArray[1] & pHalData->IntrMask[1]) != 0;
-}
-
-static void usb_read_interrupt_complete(struct urb *purb)
-{
- int err;
- struct rtw_adapter *padapter = (struct rtw_adapter *)purb->context;
-
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped ||
- padapter->bReadPortCancel) {
- DBG_8723A("%s() RX Warning! bDriverStopped(%d) OR "
- "bSurpriseRemoved(%d) bReadPortCancel(%d)\n",
- __func__, padapter->bDriverStopped,
- padapter->bSurpriseRemoved,
- padapter->bReadPortCancel);
- return;
- }
-
- if (purb->status == 0) {
- struct c2h_evt_hdr *c2h_evt;
-
- c2h_evt = (struct c2h_evt_hdr *)purb->transfer_buffer;
-
- if (purb->actual_length > USB_INTR_CONTENT_LENGTH) {
- DBG_8723A("usb_read_interrupt_complete: purb->actual_"
- "length > USB_INTR_CONTENT_LENGTH\n");
- goto urb_submit;
- }
-
- InterruptRecognized8723AU(padapter, purb->transfer_buffer,
- purb->actual_length);
-
- if (c2h_evt_exist(c2h_evt)) {
- if (c2h_id_filter_ccx_8723a(c2h_evt->id)) {
- /* Handle CCX report here */
- handle_txrpt_ccx_8723a(padapter, (void *)
- c2h_evt->payload);
- schedule_work(&padapter->evtpriv.irq_wk);
- } else {
- struct evt_work *c2w;
- int res;
-
- c2w = kmalloc(sizeof(struct evt_work),
- GFP_ATOMIC);
-
- if (!c2w)
- goto urb_submit;
-
- c2w->adapter = padapter;
- INIT_WORK(&c2w->work, rtw_evt_work);
- memcpy(c2w->u.buf, purb->transfer_buffer, 16);
-
- res = queue_work(padapter->evtpriv.wq,
- &c2w->work);
-
- if (!res) {
- printk(KERN_ERR "%s: Call to "
- "queue_work() failed\n",
- __func__);
- kfree(c2w);
- goto urb_submit;
- }
- }
- }
-
-urb_submit:
- err = usb_submit_urb(purb, GFP_ATOMIC);
- if (err && (err != -EPERM)) {
- DBG_8723A("cannot submit interrupt in-token(err = "
- "0x%08x), urb_status = %d\n",
- err, purb->status);
- }
- } else {
- DBG_8723A("###=> usb_read_interrupt_complete => urb "
- "status(%d)\n", purb->status);
-
- switch (purb->status) {
- case -EINVAL:
- case -EPIPE:
- case -ENODEV:
- case -ESHUTDOWN:
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_read_port_complete:bSurpriseRemoved =true\n");
- /* Fall Through here */
- case -ENOENT:
- padapter->bDriverStopped = true;
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_read_port_complete:bDriverStopped =true\n");
- break;
- case -EPROTO:
- break;
- case -EINPROGRESS:
- DBG_8723A("ERROR: URB IS IN PROGRESS!\n");
- break;
- default:
- break;
- }
- }
-}
-
-int rtl8723au_read_interrupt(struct rtw_adapter *adapter)
-{
- int err;
- unsigned int pipe;
- int ret = _SUCCESS;
- struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
- struct recv_priv *precvpriv = &adapter->recvpriv;
- struct usb_device *pusbd = pdvobj->pusbdev;
-
- /* translate DMA FIFO addr to pipehandle */
- pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]);
-
- usb_fill_int_urb(precvpriv->int_in_urb, pusbd, pipe,
- precvpriv->int_in_buf, USB_INTR_CONTENT_LENGTH,
- usb_read_interrupt_complete, adapter, 1);
-
- err = usb_submit_urb(precvpriv->int_in_urb, GFP_ATOMIC);
- if (err && (err != -EPERM)) {
- DBG_8723A("cannot submit interrupt in-token(err = 0x%08x),"
- "urb_status = %d\n", err,
- precvpriv->int_in_urb->status);
- ret = _FAIL;
- }
-
- return ret;
-}
-
-static int recvbuf2recvframe(struct rtw_adapter *padapter, struct sk_buff *pskb)
-{
- u8 *pbuf;
- u8 shift_sz = 0;
- u16 pkt_cnt;
- u32 pkt_offset, skb_len, alloc_sz;
- int transfer_len;
- struct recv_stat *prxstat;
- struct phy_stat *pphy_info;
- struct sk_buff *pkt_copy;
- struct recv_frame *precvframe;
- struct rx_pkt_attrib *pattrib;
- struct recv_priv *precvpriv = &padapter->recvpriv;
- struct rtw_queue *pfree_recv_queue = &precvpriv->free_recv_queue;
-
- transfer_len = (int)pskb->len;
- pbuf = pskb->data;
-
- prxstat = (struct recv_stat *)pbuf;
- pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff;
-
- do {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "recvbuf2recvframe: rxdesc = offsset 0:0x%08x, 4:0x%08x, 8:0x%08x, C:0x%08x\n",
- prxstat->rxdw0, prxstat->rxdw1,
- prxstat->rxdw2, prxstat->rxdw4);
-
- prxstat = (struct recv_stat *)pbuf;
-
- precvframe = rtw_alloc_recvframe23a(pfree_recv_queue);
- if (!precvframe) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "recvbuf2recvframe: precvframe == NULL\n");
- DBG_8723A("%s()-%d: rtw_alloc_recvframe23a() failed! RX "
- "Drop!\n", __func__, __LINE__);
- goto _exit_recvbuf2recvframe;
- }
-
- INIT_LIST_HEAD(&precvframe->list);
-
- update_recvframe_attrib(precvframe, prxstat);
-
- pattrib = &precvframe->attrib;
-
- if (pattrib->crc_err) {
- DBG_8723A("%s()-%d: RX Warning! rx CRC ERROR !!\n",
- __func__, __LINE__);
- rtw_free_recvframe23a(precvframe);
- goto _exit_recvbuf2recvframe;
- }
-
- pkt_offset = RXDESC_SIZE + pattrib->drvinfo_sz +
- pattrib->shift_sz + pattrib->pkt_len;
-
- if (pattrib->pkt_len <= 0 || pkt_offset > transfer_len) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- "recvbuf2recvframe: pkt_len<= 0\n");
- DBG_8723A("%s()-%d: RX Warning!\n",
- __func__, __LINE__);
- rtw_free_recvframe23a(precvframe);
- goto _exit_recvbuf2recvframe;
- }
-
- /* Modified by Albert 20101213 */
- /* For 8 bytes IP header alignment. */
- /* Qos data, wireless lan header length is 26 */
- if (pattrib->qos)
- shift_sz = 6;
- else
- shift_sz = 0;
-
- skb_len = pattrib->pkt_len;
-
- /* for first fragment packet, driver need allocate
- * 1536+drvinfo_sz+RXDESC_SIZE to defrag packet.
- * modify alloc_sz for recvive crc error packet
- * by thomas 2011-06-02 */
- if (pattrib->mfrag == 1 && pattrib->frag_num == 0) {
- /* alloc_sz = 1664; 1664 is 128 alignment. */
- if (skb_len <= 1650)
- alloc_sz = 1664;
- else
- alloc_sz = skb_len + 14;
- } else {
- alloc_sz = skb_len;
- /* 6 is for IP header 8 bytes alignment in QoS packet case. */
- /* 8 is for skb->data 4 bytes alignment. */
- alloc_sz += 14;
- }
-
- pkt_copy = netdev_alloc_skb(padapter->pnetdev, alloc_sz);
- if (pkt_copy) {
- pkt_copy->dev = padapter->pnetdev;
- precvframe->pkt = pkt_copy;
- /* force pkt_copy->data at 8-byte alignment address */
- skb_reserve(pkt_copy, 8 -
- ((unsigned long)(pkt_copy->data) & 7));
- /*force ip_hdr at 8-byte alignment address
- according to shift_sz. */
- skb_reserve(pkt_copy, shift_sz);
- memcpy(pkt_copy->data, pbuf + pattrib->shift_sz +
- pattrib->drvinfo_sz + RXDESC_SIZE, skb_len);
- skb_put(pkt_copy, skb_len);
- } else {
- if (pattrib->mfrag == 1 && pattrib->frag_num == 0) {
- DBG_8723A("recvbuf2recvframe: alloc_skb fail, "
- "drop frag frame \n");
- rtw_free_recvframe23a(precvframe);
- goto _exit_recvbuf2recvframe;
- }
-
- precvframe->pkt = skb_clone(pskb, GFP_ATOMIC);
- if (!precvframe->pkt) {
- DBG_8723A("recvbuf2recvframe: skb_clone "
- "fail\n");
- rtw_free_recvframe23a(precvframe);
- goto _exit_recvbuf2recvframe;
- }
- }
-
- if (pattrib->physt) {
- pphy_info = (struct phy_stat *)(pbuf + RXDESC_OFFSET);
- update_recvframe_phyinfo(precvframe, pphy_info);
- }
-
- if (rtw_recv_entry23a(precvframe) != _SUCCESS)
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "recvbuf2recvframe: rtw_recv_entry23a(precvframe) != _SUCCESS\n");
-
- pkt_cnt--;
- transfer_len -= pkt_offset;
- pbuf += pkt_offset;
- precvframe = NULL;
- pkt_copy = NULL;
-
- if (transfer_len > 0 && pkt_cnt == 0)
- pkt_cnt = (le32_to_cpu(prxstat->rxdw2)>>16) & 0xff;
-
- } while (transfer_len > 0 && pkt_cnt > 0);
-
-_exit_recvbuf2recvframe:
-
- return _SUCCESS;
-}
-
-void rtl8723au_recv_tasklet(void *priv)
-{
- struct sk_buff *pskb;
- struct rtw_adapter *padapter = (struct rtw_adapter *)priv;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
- DBG_8723A("recv_tasklet => bDriverStopped or "
- "bSurpriseRemoved \n");
- dev_kfree_skb_any(pskb);
- break;
- }
-
- recvbuf2recvframe(padapter, pskb);
- skb_reset_tail_pointer(pskb);
-
- pskb->len = 0;
-
- skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
- }
-}
-
-static void usb_read_port_complete(struct urb *purb)
-{
- struct recv_buf *precvbuf = (struct recv_buf *)purb->context;
- struct rtw_adapter *padapter = (struct rtw_adapter *)precvbuf->adapter;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_read_port_complete!!!\n");
-
- precvpriv->rx_pending_cnt--;
-
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped ||
- padapter->bReadPortCancel) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n",
- padapter->bDriverStopped, padapter->bSurpriseRemoved);
-
- DBG_8723A("%s()-%d: RX Warning! bDriverStopped(%d) OR "
- "bSurpriseRemoved(%d) bReadPortCancel(%d)\n",
- __func__, __LINE__, padapter->bDriverStopped,
- padapter->bSurpriseRemoved, padapter->bReadPortCancel);
- return;
- }
-
- if (purb->status == 0) {
- if (purb->actual_length > MAX_RECVBUF_SZ ||
- purb->actual_length < RXDESC_SIZE) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n");
- rtl8723au_read_port(padapter, 0, precvbuf);
- DBG_8723A("%s()-%d: RX Warning!\n",
- __func__, __LINE__);
- } else {
- rtw_reset_continual_urb_error(
- adapter_to_dvobj(padapter));
-
- skb_put(precvbuf->pskb, purb->actual_length);
- skb_queue_tail(&precvpriv->rx_skb_queue,
- precvbuf->pskb);
-
- if (skb_queue_len(&precvpriv->rx_skb_queue) <= 1)
- tasklet_schedule(&precvpriv->recv_tasklet);
-
- precvbuf->pskb = NULL;
- rtl8723au_read_port(padapter, 0, precvbuf);
- }
- } else {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_read_port_complete : purb->status(%d) != 0\n",
- purb->status);
- skb_put(precvbuf->pskb, purb->actual_length);
- precvbuf->pskb = NULL;
-
- DBG_8723A("###=> usb_read_port_complete => urb status(%d)\n",
- purb->status);
-
- if (rtw_inc_and_chk_continual_urb_error(
- adapter_to_dvobj(padapter))) {
- padapter->bSurpriseRemoved = true;
- }
-
- switch (purb->status) {
- case -EINVAL:
- case -EPIPE:
- case -ENODEV:
- case -ESHUTDOWN:
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_read_port_complete:bSurpriseRemoved = true\n");
- /* Intentional fall through here */
- case -ENOENT:
- padapter->bDriverStopped = true;
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_read_port_complete:bDriverStopped = true\n");
- break;
- case -EPROTO:
- case -EOVERFLOW:
- rtl8723au_read_port(padapter, 0, precvbuf);
- break;
- case -EINPROGRESS:
- DBG_8723A("ERROR: URB IS IN PROGRESS!\n");
- break;
- default:
- break;
- }
- }
-}
-
-int rtl8723au_read_port(struct rtw_adapter *adapter, u32 cnt,
- struct recv_buf *precvbuf)
-{
- struct urb *purb;
- struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
- struct recv_priv *precvpriv = &adapter->recvpriv;
- struct usb_device *pusbd = pdvobj->pusbdev;
- int err;
- unsigned int pipe;
- unsigned long tmpaddr;
- unsigned long alignment;
- int ret = _SUCCESS;
-
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_read_port:(padapter->bDriverStopped ||padapter->bSurpriseRemoved)!!!\n");
- return _FAIL;
- }
-
- if (!precvbuf) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_read_port:precvbuf == NULL\n");
- return _FAIL;
- }
-
- if (!precvbuf->pskb)
- precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
-
- /* re-assign for linux based on skb */
- if (!precvbuf->pskb) {
- precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
- if (precvbuf->pskb == NULL) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "init_recvbuf(): alloc_skb fail!\n");
- return _FAIL;
- }
-
- tmpaddr = (unsigned long)precvbuf->pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
- skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
- }
-
- precvpriv->rx_pending_cnt++;
-
- purb = precvbuf->purb;
-
- /* translate DMA FIFO addr to pipehandle */
- pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
-
- usb_fill_bulk_urb(purb, pusbd, pipe, precvbuf->pskb->data,
- MAX_RECVBUF_SZ, usb_read_port_complete,
- precvbuf);/* context is precvbuf */
-
- err = usb_submit_urb(purb, GFP_ATOMIC);
- if ((err) && (err != -EPERM)) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "cannot submit rx in-token(err = 0x%.8x), URB_STATUS = 0x%.8x\n",
- err, purb->status);
- DBG_8723A("cannot submit rx in-token(err = 0x%08x), urb_status "
- "= %d\n", err, purb->status);
- ret = _FAIL;
- }
- return ret;
-}
-
-void rtl8723au_xmit_tasklet(void *priv)
-{
- int ret;
- struct rtw_adapter *padapter = (struct rtw_adapter *)priv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- if (check_fwstate(&padapter->mlmepriv, _FW_UNDER_SURVEY))
- return;
-
- while (1) {
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
- padapter->bWritePortCancel) {
- DBG_8723A("xmit_tasklet => bDriverStopped or "
- "bSurpriseRemoved or bWritePortCancel\n");
- break;
- }
-
- ret = rtl8723au_xmitframe_complete(padapter, pxmitpriv, NULL);
-
- if (!ret)
- break;
- }
-}
-
-void rtl8723au_set_hw_type(struct rtw_adapter *padapter)
-{
- padapter->chip_type = RTL8723A;
- padapter->HardwareType = HARDWARE_TYPE_RTL8723AU;
- DBG_8723A("CHIP TYPE: RTL8723A\n");
-}
diff --git a/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h b/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h
deleted file mode 100644
index bcf36579f43a..000000000000
--- a/drivers/staging/rtl8723au/include/Hal8723APhyCfg.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
- ******************************************************************************/
-#ifndef __INC_HAL8723PHYCFG_H__
-#define __INC_HAL8723PHYCFG_H__
-
-/*------------------------------Define structure----------------------------*/
-enum RF_RADIO_PATH {
- RF_PATH_A = 0, /* Radio Path A */
- RF_PATH_B = 1, /* Radio Path B */
- RF_PATH_MAX /* Max RF number 90 support */
-};
-
-#define CHANNEL_MAX_NUMBER 14 /* 14 is the max channel number */
-
-enum WIRELESS_MODE {
- WIRELESS_MODE_UNKNOWN = 0x00,
- WIRELESS_MODE_A = BIT(2),
- WIRELESS_MODE_B = BIT(0),
- WIRELESS_MODE_G = BIT(1),
- WIRELESS_MODE_AUTO = BIT(5),
- WIRELESS_MODE_N_24G = BIT(3),
- WIRELESS_MODE_N_5G = BIT(4),
- WIRELESS_MODE_AC = BIT(6)
-};
-
-struct bb_reg_define {
- u32 rfintfs; /* set software control: */
- /* 0x870~0x877[8 bytes] */
- u32 rfintfi; /* readback data: */
- /* 0x8e0~0x8e7[8 bytes] */
- u32 rfintfo; /* output data: */
- /* 0x860~0x86f [16 bytes] */
- u32 rfintfe; /* output enable: */
- /* 0x860~0x86f [16 bytes] */
- u32 rf3wireOffset; /* LSSI data: */
- /* 0x840~0x84f [16 bytes] */
- u32 rfLSSI_Select; /* BB Band Select: */
- /* 0x878~0x87f [8 bytes] */
- u32 rfTxGainStage; /* Tx gain stage: */
- /* 0x80c~0x80f [4 bytes] */
- u32 rfHSSIPara1; /* wire parameter control1 : */
- /* 0x820~0x823, 0x828~0x82b, 0x830~0x833, 0x838~0x83b [16 bytes] */
- u32 rfHSSIPara2; /* wire parameter control2 : */
- /* 0x824~0x827, 0x82c~0x82f, 0x834~0x837, 0x83c~0x83f [16 bytes] */
- u32 rfSwitchControl; /* Tx Rx antenna control : */
- /* 0x858~0x85f [16 bytes] */
- u32 rfAGCControl1; /* AGC parameter control1 : */
- /* 0xc50~0xc53, 0xc58~0xc5b, 0xc60~0xc63, 0xc68~0xc6b [16 bytes] */
- u32 rfAGCControl2; /* AGC parameter control2 : */
- /* 0xc54~0xc57, 0xc5c~0xc5f, 0xc64~0xc67, 0xc6c~0xc6f [16 bytes] */
- u32 rfRxIQImbalance; /* OFDM Rx IQ imbalance matrix : */
- /* 0xc14~0xc17, 0xc1c~0xc1f, 0xc24~0xc27, 0xc2c~0xc2f [16 bytes] */
- u32 rfRxAFE; /* Rx IQ DC ofset and Rx digital filter, Rx DC notch filter : */
- /* 0xc10~0xc13, 0xc18~0xc1b, 0xc20~0xc23, 0xc28~0xc2b [16 bytes] */
- u32 rfTxIQImbalance; /* OFDM Tx IQ imbalance matrix */
- /* 0xc80~0xc83, 0xc88~0xc8b, 0xc90~0xc93, 0xc98~0xc9b [16 bytes] */
- u32 rfTxAFE; /* Tx IQ DC Offset and Tx DFIR type */
- /* 0xc84~0xc87, 0xc8c~0xc8f, 0xc94~0xc97, 0xc9c~0xc9f [16 bytes] */
- u32 rfLSSIReadBack; /* LSSI RF readback data SI mode */
- /* 0x8a0~0x8af [16 bytes] */
- u32 rfLSSIReadBackPi; /* LSSI RF readback data PI mode 0x8b8-8bc for Path A and B */
-};
-
-struct r_antenna_sel_ofdm {
- u32 r_tx_antenna:4;
- u32 r_ant_l:4;
- u32 r_ant_non_ht:4;
- u32 r_ant_ht1:4;
- u32 r_ant_ht2:4;
- u32 r_ant_ht_s1:4;
- u32 r_ant_non_ht_s1:4;
- u32 OFDM_TXSC:2;
- u32 Reserved:2;
-};
-
-struct r_antenna_sel_cck {
- u8 r_cckrx_enable_2:2;
- u8 r_cckrx_enable:2;
- u8 r_ccktx_enable:4;
-};
-
-/*------------------------------Define structure----------------------------*/
-
-
-/*------------------------Export global variable----------------------------*/
-/*------------------------Export global variable----------------------------*/
-
-
-/*------------------------Export Macro Definition---------------------------*/
-/*------------------------Export Macro Definition---------------------------*/
-
-
-/*--------------------------Exported Function prototype---------------------*/
-/* */
-/* BB and RF register read/write */
-/* */
-u32 PHY_QueryBBReg(struct rtw_adapter *Adapter, u32 RegAddr,
- u32 BitMask);
-void PHY_SetBBReg(struct rtw_adapter *Adapter, u32 RegAddr,
- u32 BitMask, u32 Data);
-u32 PHY_QueryRFReg(struct rtw_adapter *Adapter,
- enum RF_RADIO_PATH eRFPath, u32 RegAddr,
- u32 BitMask);
-void PHY_SetRFReg(struct rtw_adapter *Adapter,
- enum RF_RADIO_PATH eRFPath, u32 RegAddr,
- u32 BitMask, u32 Data);
-
-/* */
-/* BB TX Power R/W */
-/* */
-void PHY_SetTxPowerLevel8723A(struct rtw_adapter *Adapter, u8 channel);
-
-/* */
-/* Switch bandwidth for 8723A */
-/* */
-void PHY_SetBWMode23a8723A(struct rtw_adapter *pAdapter,
- enum ht_channel_width ChnlWidth,
- unsigned char Offset);
-
-/* */
-/* channel switch related funciton */
-/* */
-void PHY_SwChnl8723A(struct rtw_adapter *pAdapter, u8 channel);
- /* Call after initialization */
-void ChkFwCmdIoDone(struct rtw_adapter *Adapter);
-
-/* */
-/* Modify the value of the hw register when beacon interval be changed. */
-/* */
-void
-rtl8192c_PHY_SetBeaconHwReg(struct rtw_adapter *Adapter, u16 BeaconInterval);
-
-
-void PHY_SwitchEphyParameter(struct rtw_adapter *Adapter);
-
-void PHY_EnableHostClkReq(struct rtw_adapter *Adapter);
-
-bool
-SetAntennaConfig92C(struct rtw_adapter *Adapter, u8 DefaultAnt);
-
-/*--------------------------Exported Function prototype---------------------*/
-
-#define PHY_SetMacReg PHY_SetBBReg
-
-/* MAC/BB/RF HAL config */
-int PHY_BBConfig8723A(struct rtw_adapter *Adapter);
-s32 PHY_MACConfig8723A(struct rtw_adapter *padapter);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/Hal8723APhyReg.h b/drivers/staging/rtl8723au/include/Hal8723APhyReg.h
deleted file mode 100644
index 759928f78d6d..000000000000
--- a/drivers/staging/rtl8723au/include/Hal8723APhyReg.h
+++ /dev/null
@@ -1,1078 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
- ******************************************************************************/
-#ifndef __INC_HAL8723APHYREG_H__
-#define __INC_HAL8723APHYREG_H__
-
-/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
-/* 1. Page1(0x100) */
-#define rPMAC_Reset 0x100
-#define rPMAC_TxStart 0x104
-#define rPMAC_TxLegacySIG 0x108
-#define rPMAC_TxHTSIG1 0x10c
-#define rPMAC_TxHTSIG2 0x110
-#define rPMAC_PHYDebug 0x114
-#define rPMAC_TxPacketNum 0x118
-#define rPMAC_TxIdle 0x11c
-#define rPMAC_TxMACHeader0 0x120
-#define rPMAC_TxMACHeader1 0x124
-#define rPMAC_TxMACHeader2 0x128
-#define rPMAC_TxMACHeader3 0x12c
-#define rPMAC_TxMACHeader4 0x130
-#define rPMAC_TxMACHeader5 0x134
-#define rPMAC_TxDataType 0x138
-#define rPMAC_TxRandomSeed 0x13c
-#define rPMAC_CCKPLCPPreamble 0x140
-#define rPMAC_CCKPLCPHeader 0x144
-#define rPMAC_CCKCRC16 0x148
-#define rPMAC_OFDMRxCRC32OK 0x170
-#define rPMAC_OFDMRxCRC32Er 0x174
-#define rPMAC_OFDMRxParityEr 0x178
-#define rPMAC_OFDMRxCRC8Er 0x17c
-#define rPMAC_CCKCRxRC16Er 0x180
-#define rPMAC_CCKCRxRC32Er 0x184
-#define rPMAC_CCKCRxRC32OK 0x188
-#define rPMAC_TxStatus 0x18c
-
-/* 2. Page2(0x200) */
-/* The following two definition are only used for USB interface. */
-#define RF_BB_CMD_ADDR 0x02c0 /* RF/BB read/write command address. */
-#define RF_BB_CMD_DATA 0x02c4 /* RF/BB read/write command data. */
-
-/* 3. Page8(0x800) */
-#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting?? */
-
-#define rFPGA0_TxInfo 0x804 /* Status report?? */
-#define rFPGA0_PSDFunction 0x808
-
-#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
-
-#define rFPGA0_RFTiming1 0x810 /* Useless now */
-#define rFPGA0_RFTiming2 0x814
-
-#define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */
-#define rFPGA0_XA_HSSIParameter2 0x824
-#define rFPGA0_XB_HSSIParameter1 0x828
-#define rFPGA0_XB_HSSIParameter2 0x82c
-#define rTxAGC_B_Rate18_06 0x830
-#define rTxAGC_B_Rate54_24 0x834
-#define rTxAGC_B_CCK1_55_Mcs32 0x838
-#define rTxAGC_B_Mcs03_Mcs00 0x83c
-
-#define rTxAGC_B_Mcs07_Mcs04 0x848
-#define rTxAGC_B_Mcs11_Mcs08 0x84c
-
-#define rFPGA0_XA_LSSIParameter 0x840
-#define rFPGA0_XB_LSSIParameter 0x844
-
-#define rFPGA0_RFWakeUpParameter 0x850 /* Useless now */
-#define rFPGA0_RFSleepUpParameter 0x854
-
-#define rFPGA0_XAB_SwitchControl 0x858 /* RF Channel switch */
-#define rFPGA0_XCD_SwitchControl 0x85c
-
-#define rFPGA0_XA_RFInterfaceOE 0x860 /* RF Channel switch */
-#define rFPGA0_XB_RFInterfaceOE 0x864
-
-#define rTxAGC_B_Mcs15_Mcs12 0x868
-#define rTxAGC_B_CCK11_A_CCK2_11 0x86c
-
-#define rFPGA0_XAB_RFInterfaceSW 0x870 /* RF Interface Software Control */
-#define rFPGA0_XCD_RFInterfaceSW 0x874
-
-#define rFPGA0_XAB_RFParameter 0x878 /* RF Parameter */
-#define rFPGA0_XCD_RFParameter 0x87c
-
-#define rFPGA0_AnalogParameter1 0x880 /* Crystal cap setting RF-R/W protection for parameter4?? */
-#define rFPGA0_AnalogParameter2 0x884
-#define rFPGA0_AnalogParameter3 0x888 /* Useless now */
-#define rFPGA0_AnalogParameter4 0x88c
-
-#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */
-#define rFPGA0_XB_LSSIReadBack 0x8a4
-#define rFPGA0_XC_LSSIReadBack 0x8a8
-#define rFPGA0_XD_LSSIReadBack 0x8ac
-
-#define rFPGA0_PSDReport 0x8b4 /* Useless now */
-#define TransceiverA_HSPI_Readback 0x8b8 /* Transceiver A HSPI Readback */
-#define TransceiverB_HSPI_Readback 0x8bc /* Transceiver B HSPI Readback */
-#define rFPGA0_XAB_RFInterfaceRB 0x8e0 /* Useless now RF Interface Readback Value */
-#define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* Useless now */
-
-/* 4. Page9(0x900) */
-#define rFPGA1_RFMOD 0x900 /* RF mode & OFDM TxSC RF BW Setting?? */
-
-#define rFPGA1_TxBlock 0x904 /* Useless now */
-#define rFPGA1_DebugSelect 0x908 /* Useless now */
-#define rFPGA1_TxInfo 0x90c /* Useless now Status report?? */
-
-/* 5. PageA(0xA00) */
-/* Set Control channel to upper or lower. These settings are required only for 40MHz */
-#define rCCK0_System 0xa00
-
-#define rCCK0_AFESetting 0xa04 /* Disable init gain now Select RX path by RSSI */
-#define rCCK0_CCA 0xa08 /* Disable init gain now Init gain */
-
-#define rCCK0_RxAGC1 0xa0c /* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold, RX LNA Threshold useless now. Not the same as 90 series */
-#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
-
-#define rCCK0_RxHP 0xa14
-
-#define rCCK0_DSPParameter1 0xa18 /* Timing recovery & Channel estimation threshold */
-#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
-
-#define rCCK0_TxFilter1 0xa20
-#define rCCK0_TxFilter2 0xa24
-#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
-#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now 0xa30-a4f channel report */
-#define rCCK0_TRSSIReport 0xa50
-#define rCCK0_RxReport 0xa54 /* 0xa57 */
-#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
-#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
-/* PageB(0xB00) */
-#define rPdp_AntA 0xb00
-#define rPdp_AntA_4 0xb04
-#define rConfig_Pmpd_AntA 0xb28
-#define rConfig_AntA 0xb68
-#define rConfig_AntB 0xb6c
-#define rPdp_AntB 0xb70
-#define rPdp_AntB_4 0xb74
-#define rConfig_Pmpd_AntB 0xb98
-#define rAPK 0xbd8
-
-/* 6. PageC(0xC00) */
-#define rOFDM0_LSTF 0xc00
-
-#define rOFDM0_TRxPathEnable 0xc04
-#define rOFDM0_TRMuxPar 0xc08
-#define rOFDM0_TRSWIsolation 0xc0c
-
-#define rOFDM0_XARxAFE 0xc10 /* RxIQ DC offset, Rx digital filter, DC notch filter */
-#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */
-#define rOFDM0_XBRxAFE 0xc18
-#define rOFDM0_XBRxIQImbalance 0xc1c
-#define rOFDM0_XCRxAFE 0xc20
-#define rOFDM0_XCRxIQImbalance 0xc24
-#define rOFDM0_XDRxAFE 0xc28
-#define rOFDM0_XDRxIQImbalance 0xc2c
-
-#define rOFDM0_RxDetector1 0xc30 /* PD,BW & SBD DM tune init gain */
-#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
-#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
-#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
-
-#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
-#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
-#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
-#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
-
-#define rOFDM0_XAAGCCore1 0xc50 /* DIG */
-#define rOFDM0_XAAGCCore2 0xc54
-#define rOFDM0_XBAGCCore1 0xc58
-#define rOFDM0_XBAGCCore2 0xc5c
-#define rOFDM0_XCAGCCore1 0xc60
-#define rOFDM0_XCAGCCore2 0xc64
-#define rOFDM0_XDAGCCore1 0xc68
-#define rOFDM0_XDAGCCore2 0xc6c
-
-#define rOFDM0_AGCParameter1 0xc70
-#define rOFDM0_AGCParameter2 0xc74
-#define rOFDM0_AGCRSSITable 0xc78
-#define rOFDM0_HTSTFAGC 0xc7c
-
-#define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */
-#define rOFDM0_XATxAFE 0xc84
-#define rOFDM0_XBTxIQImbalance 0xc88
-#define rOFDM0_XBTxAFE 0xc8c
-#define rOFDM0_XCTxIQImbalance 0xc90
-#define rOFDM0_XCTxAFE 0xc94
-#define rOFDM0_XDTxIQImbalance 0xc98
-#define rOFDM0_XDTxAFE 0xc9c
-
-#define rOFDM0_RxIQExtAnta 0xca0
-#define rOFDM0_TxCoeff1 0xca4
-#define rOFDM0_TxCoeff2 0xca8
-#define rOFDM0_TxCoeff3 0xcac
-#define rOFDM0_TxCoeff4 0xcb0
-#define rOFDM0_TxCoeff5 0xcb4
-#define rOFDM0_TxCoeff6 0xcb8
-#define rOFDM0_RxHPParameter 0xce0
-#define rOFDM0_TxPseudoNoiseWgt 0xce4
-#define rOFDM0_FrameSync 0xcf0
-#define rOFDM0_DFSReport 0xcf4
-
-/* 7. PageD(0xD00) */
-#define rOFDM1_LSTF 0xd00
-#define rOFDM1_TRxPathEnable 0xd04
-
-#define rOFDM1_CFO 0xd08 /* No setting now */
-#define rOFDM1_CSI1 0xd10
-#define rOFDM1_SBD 0xd14
-#define rOFDM1_CSI2 0xd18
-#define rOFDM1_CFOTracking 0xd2c
-#define rOFDM1_TRxMesaure1 0xd34
-#define rOFDM1_IntfDet 0xd3c
-#define rOFDM1_PseudoNoiseStateAB 0xd50
-#define rOFDM1_PseudoNoiseStateCD 0xd54
-#define rOFDM1_RxPseudoNoiseWgt 0xd58
-
-#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
-#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
-#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
-
-#define rOFDM_ShortCFOAB 0xdac /* No setting now */
-#define rOFDM_ShortCFOCD 0xdb0
-#define rOFDM_LongCFOAB 0xdb4
-#define rOFDM_LongCFOCD 0xdb8
-#define rOFDM_TailCFOAB 0xdbc
-#define rOFDM_TailCFOCD 0xdc0
-#define rOFDM_PWMeasure1 0xdc4
-#define rOFDM_PWMeasure2 0xdc8
-#define rOFDM_BWReport 0xdcc
-#define rOFDM_AGCReport 0xdd0
-#define rOFDM_RxSNR 0xdd4
-#define rOFDM_RxEVMCSI 0xdd8
-#define rOFDM_SIGReport 0xddc
-
-
-/* 8. PageE(0xE00) */
-#define rTxAGC_A_Rate18_06 0xe00
-#define rTxAGC_A_Rate54_24 0xe04
-#define rTxAGC_A_CCK1_Mcs32 0xe08
-#define rTxAGC_A_Mcs03_Mcs00 0xe10
-#define rTxAGC_A_Mcs07_Mcs04 0xe14
-#define rTxAGC_A_Mcs11_Mcs08 0xe18
-#define rTxAGC_A_Mcs15_Mcs12 0xe1c
-
-#define rFPGA0_IQK 0xe28
-#define rTx_IQK_Tone_A 0xe30
-#define rRx_IQK_Tone_A 0xe34
-#define rTx_IQK_PI_A 0xe38
-#define rRx_IQK_PI_A 0xe3c
-
-#define rTx_IQK 0xe40
-#define rRx_IQK 0xe44
-#define rIQK_AGC_Pts 0xe48
-#define rIQK_AGC_Rsp 0xe4c
-#define rTx_IQK_Tone_B 0xe50
-#define rRx_IQK_Tone_B 0xe54
-#define rTx_IQK_PI_B 0xe58
-#define rRx_IQK_PI_B 0xe5c
-#define rIQK_AGC_Cont 0xe60
-
-#define rBlue_Tooth 0xe6c
-#define rRx_Wait_CCA 0xe70
-#define rTx_CCK_RFON 0xe74
-#define rTx_CCK_BBON 0xe78
-#define rTx_OFDM_RFON 0xe7c
-#define rTx_OFDM_BBON 0xe80
-#define rTx_To_Rx 0xe84
-#define rTx_To_Tx 0xe88
-#define rRx_CCK 0xe8c
-
-#define rTx_Power_Before_IQK_A 0xe94
-#define rTx_Power_After_IQK_A 0xe9c
-
-#define rRx_Power_Before_IQK_A 0xea0
-#define rRx_Power_Before_IQK_A_2 0xea4
-#define rRx_Power_After_IQK_A 0xea8
-#define rRx_Power_After_IQK_A_2 0xeac
-
-#define rTx_Power_Before_IQK_B 0xeb4
-#define rTx_Power_After_IQK_B 0xebc
-
-#define rRx_Power_Before_IQK_B 0xec0
-#define rRx_Power_Before_IQK_B_2 0xec4
-#define rRx_Power_After_IQK_B 0xec8
-#define rRx_Power_After_IQK_B_2 0xecc
-
-#define rRx_OFDM 0xed0
-#define rRx_Wait_RIFS 0xed4
-#define rRx_TO_Rx 0xed8
-#define rStandby 0xedc
-#define rSleep 0xee0
-#define rPMPD_ANAEN 0xeec
-
-/* 7. RF Register 0x00-0x2E (RF 8256) */
-/* RF-0222D 0x00-3F */
-/* Zebra1 */
-#define rZebra1_HSSIEnable 0x0 /* Useless now */
-#define rZebra1_TRxEnable1 0x1
-#define rZebra1_TRxEnable2 0x2
-#define rZebra1_AGC 0x4
-#define rZebra1_ChargePump 0x5
-#define rZebra1_Channel 0x7 /* RF channel switch */
-
-#define rZebra1_TxGain 0x8 /* Useless now */
-#define rZebra1_TxLPF 0x9
-#define rZebra1_RxLPF 0xb
-#define rZebra1_RxHPFCorner 0xc
-
-/* Zebra4 */
-#define rGlobalCtrl 0 /* Useless now */
-#define rRTL8256_TxLPF 19
-#define rRTL8256_RxLPF 11
-
-/* RTL8258 */
-#define rRTL8258_TxLPF 0x11 /* Useless now */
-#define rRTL8258_RxLPF 0x13
-#define rRTL8258_RSSILPF 0xa
-
-/* RL6052 Register definition */
-#define RF_AC 0x00
-#define RF_IQADJ_G1 0x01
-#define RF_IQADJ_G2 0x02
-#define RF_BS_PA_APSET_G1_G4 0x03
-#define RF_BS_PA_APSET_G5_G8 0x04
-#define RF_POW_TRSW 0x05
-#define RF_GAIN_RX 0x06
-#define RF_GAIN_TX 0x07
-#define RF_TXM_IDAC 0x08
-#define RF_IPA_G 0x09
-#define RF_TXBIAS_G 0x0A
-#define RF_TXPA_AG 0x0B
-#define RF_IPA_A 0x0C
-#define RF_TXBIAS_A 0x0D
-#define RF_BS_PA_APSET_G9_G11 0x0E
-#define RF_BS_IQGEN 0x0F
-#define RF_MODE1 0x10
-#define RF_MODE2 0x11
-#define RF_RX_AGC_HP 0x12
-#define RF_TX_AGC 0x13
-#define RF_BIAS 0x14
-#define RF_IPA 0x15
-#define RF_TXBIAS 0x16
-#define RF_POW_ABILITY 0x17
-#define RF_MODE_AG 0x18
-#define rRfChannel 0x18 /* RF channel and BW switch */
-#define RF_CHNLBW 0x18 /* RF channel and BW switch */
-#define RF_TOP 0x19
-#define RF_RX_G1 0x1A
-#define RF_RX_G2 0x1B
-#define RF_RX_BB2 0x1C
-#define RF_RX_BB1 0x1D
-#define RF_RCK1 0x1E
-#define RF_RCK2 0x1F
-#define RF_TX_G1 0x20
-#define RF_TX_G2 0x21
-#define RF_TX_G3 0x22
-#define RF_TX_BB1 0x23
-#define RF_T_METER 0x24
-#define RF_SYN_G1 0x25 /* RF TX Power control */
-#define RF_SYN_G2 0x26 /* RF TX Power control */
-#define RF_SYN_G3 0x27 /* RF TX Power control */
-#define RF_SYN_G4 0x28 /* RF TX Power control */
-#define RF_SYN_G5 0x29 /* RF TX Power control */
-#define RF_SYN_G6 0x2A /* RF TX Power control */
-#define RF_SYN_G7 0x2B /* RF TX Power control */
-#define RF_SYN_G8 0x2C /* RF TX Power control */
-
-#define RF_RCK_OS 0x30 /* RF TX PA control */
-
-#define RF_TXPA_G1 0x31 /* RF TX PA control */
-#define RF_TXPA_G2 0x32 /* RF TX PA control */
-#define RF_TXPA_G3 0x33 /* RF TX PA control */
-
-/* Bit Mask */
-/* 1. Page1(0x100) */
-#define bBBResetB 0x100 /* Useless now? */
-#define bGlobalResetB 0x200
-#define bOFDMTxStart 0x4
-#define bCCKTxStart 0x8
-#define bCRC32Debug 0x100
-#define bPMACLoopback 0x10
-#define bTxLSIG 0xffffff
-#define bOFDMTxRate 0xf
-#define bOFDMTxReserved 0x10
-#define bOFDMTxLength 0x1ffe0
-#define bOFDMTxParity 0x20000
-#define bTxHTSIG1 0xffffff
-#define bTxHTMCSRate 0x7f
-#define bTxHTBW 0x80
-#define bTxHTLength 0xffff00
-#define bTxHTSIG2 0xffffff
-#define bTxHTSmoothing 0x1
-#define bTxHTSounding 0x2
-#define bTxHTReserved 0x4
-#define bTxHTAggreation 0x8
-#define bTxHTSTBC 0x30
-#define bTxHTAdvanceCoding 0x40
-#define bTxHTShortGI 0x80
-#define bTxHTNumberHT_LTF 0x300
-#define bTxHTCRC8 0x3fc00
-#define bCounterReset 0x10000
-#define bNumOfOFDMTx 0xffff
-#define bNumOfCCKTx 0xffff0000
-#define bTxIdleInterval 0xffff
-#define bOFDMService 0xffff0000
-#define bTxMACHeader 0xffffffff
-#define bTxDataInit 0xff
-#define bTxHTMode 0x100
-#define bTxDataType 0x30000
-#define bTxRandomSeed 0xffffffff
-#define bCCKTxPreamble 0x1
-#define bCCKTxSFD 0xffff0000
-#define bCCKTxSIG 0xff
-#define bCCKTxService 0xff00
-#define bCCKLengthExt 0x8000
-#define bCCKTxLength 0xffff0000
-#define bCCKTxCRC16 0xffff
-#define bCCKTxStatus 0x1
-#define bOFDMTxStatus 0x2
-
-#define IS_BB_REG_OFFSET_92S(_Offset) \
- ((_Offset >= 0x800) && (_Offset <= 0xfff))
-
-/* 2. Page8(0x800) */
-#define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */
-#define bJapanMode 0x2
-#define bCCKTxSC 0x30
-#define bCCKEn 0x1000000
-#define bOFDMEn 0x2000000
-
-#define bOFDMRxADCPhase 0x10000 /* Useless now */
-#define bOFDMTxDACPhase 0x40000
-#define bXATxAGC 0x3f
-
-#define bAntennaSelect 0x0300
-
-#define bXBTxAGC 0xf00 /* Reg 80c rFPGA0_TxGainStage */
-#define bXCTxAGC 0xf000
-#define bXDTxAGC 0xf0000
-
-#define bPAStart 0xf0000000 /* Useless now */
-#define bTRStart 0x00f00000
-#define bRFStart 0x0000f000
-#define bBBStart 0x000000f0
-#define bBBCCKStart 0x0000000f
-#define bPAEnd 0xf /* Reg0x814 */
-#define bTREnd 0x0f000000
-#define bRFEnd 0x000f0000
-#define bCCAMask 0x000000f0 /* T2R */
-#define bR2RCCAMask 0x00000f00
-#define bHSSI_R2TDelay 0xf8000000
-#define bHSSI_T2RDelay 0xf80000
-#define bContTxHSSI 0x400 /* chane gain at continue Tx */
-#define bIGFromCCK 0x200
-#define bAGCAddress 0x3f
-#define bRxHPTx 0x7000
-#define bRxHPT2R 0x38000
-#define bRxHPCCKIni 0xc0000
-#define bAGCTxCode 0xc00000
-#define bAGCRxCode 0x300000
-
-#define b3WireDataLength 0x800 /* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */
-#define b3WireAddressLength 0x400
-
-#define b3WireRFPowerDown 0x1 /* Useless now */
-/* define bHWSISelect 0x8 */
-#define b5GPAPEPolarity 0x40000000
-#define b2GPAPEPolarity 0x80000000
-#define bRFSW_TxDefaultAnt 0x3
-#define bRFSW_TxOptionAnt 0x30
-#define bRFSW_RxDefaultAnt 0x300
-#define bRFSW_RxOptionAnt 0x3000
-#define bRFSI_3WireData 0x1
-#define bRFSI_3WireClock 0x2
-#define bRFSI_3WireLoad 0x4
-#define bRFSI_3WireRW 0x8
-#define bRFSI_3Wire 0xf
-
-#define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */
-
-#define bRFSI_TRSW 0x20 /* Useless now */
-#define bRFSI_TRSWB 0x40
-#define bRFSI_ANTSW 0x100
-#define bRFSI_ANTSWB 0x200
-#define bRFSI_PAPE 0x400
-#define bRFSI_PAPE5G 0x800
-#define bBandSelect 0x1
-#define bHTSIG2_GI 0x80
-#define bHTSIG2_Smoothing 0x01
-#define bHTSIG2_Sounding 0x02
-#define bHTSIG2_Aggreaton 0x08
-#define bHTSIG2_STBC 0x30
-#define bHTSIG2_AdvCoding 0x40
-#define bHTSIG2_NumOfHTLTF 0x300
-#define bHTSIG2_CRC8 0x3fc
-#define bHTSIG1_MCS 0x7f
-#define bHTSIG1_BandWidth 0x80
-#define bHTSIG1_HTLength 0xffff
-#define bLSIG_Rate 0xf
-#define bLSIG_Reserved 0x10
-#define bLSIG_Length 0x1fffe
-#define bLSIG_Parity 0x20
-#define bCCKRxPhase 0x4
-
-#define bLSSIReadAddress 0x7f800000 /* T65 RF */
-
-#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
-
-#define bLSSIReadBackData 0xfffff /* T65 RF */
-
-#define bLSSIReadOKFlag 0x1000 /* Useless now */
-#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
-#define bRegulator0Standby 0x1
-#define bRegulatorPLLStandby 0x2
-#define bRegulator1Standby 0x4
-#define bPLLPowerUp 0x8
-#define bDPLLPowerUp 0x10
-#define bDA10PowerUp 0x20
-#define bAD7PowerUp 0x200
-#define bDA6PowerUp 0x2000
-#define bXtalPowerUp 0x4000
-#define b40MDClkPowerUP 0x8000
-#define bDA6DebugMode 0x20000
-#define bDA6Swing 0x380000
-
-#define bADClkPhase 0x4000000 /* Reg 0x880 rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
-
-#define b80MClkDelay 0x18000000 /* Useless */
-#define bAFEWatchDogEnable 0x20000000
-
-#define bXtalCap01 0xc0000000 /* Reg 0x884 rFPGA0_AnalogParameter2 Crystal cap */
-#define bXtalCap23 0x3
-#define bXtalCap92x 0x0f000000
-#define bXtalCap 0x0f000000
-
-#define bIntDifClkEnable 0x400 /* Useless */
-#define bExtSigClkEnable 0x800
-#define bBandgapMbiasPowerUp 0x10000
-#define bAD11SHGain 0xc0000
-#define bAD11InputRange 0x700000
-#define bAD11OPCurrent 0x3800000
-#define bIPathLoopback 0x4000000
-#define bQPathLoopback 0x8000000
-#define bAFELoopback 0x10000000
-#define bDA10Swing 0x7e0
-#define bDA10Reverse 0x800
-#define bDAClkSource 0x1000
-#define bAD7InputRange 0x6000
-#define bAD7Gain 0x38000
-#define bAD7OutputCMMode 0x40000
-#define bAD7InputCMMode 0x380000
-#define bAD7Current 0xc00000
-#define bRegulatorAdjust 0x7000000
-#define bAD11PowerUpAtTx 0x1
-#define bDA10PSAtTx 0x10
-#define bAD11PowerUpAtRx 0x100
-#define bDA10PSAtRx 0x1000
-#define bCCKRxAGCFormat 0x200
-#define bPSDFFTSamplepPoint 0xc000
-#define bPSDAverageNum 0x3000
-#define bIQPathControl 0xc00
-#define bPSDFreq 0x3ff
-#define bPSDAntennaPath 0x30
-#define bPSDIQSwitch 0x40
-#define bPSDRxTrigger 0x400000
-#define bPSDTxTrigger 0x80000000
-#define bPSDSineToneScale 0x7f000000
-#define bPSDReport 0xffff
-
-/* 3. Page9(0x900) */
-#define bOFDMTxSC 0x30000000 /* Useless */
-#define bCCKTxOn 0x1
-#define bOFDMTxOn 0x2
-#define bDebugPage 0xfff /* reset debug page and also HWord, LWord */
-#define bDebugItem 0xff /* reset debug page and LWord */
-#define bAntL 0x10
-#define bAntNonHT 0x100
-#define bAntHT1 0x1000
-#define bAntHT2 0x10000
-#define bAntHT1S1 0x100000
-#define bAntNonHTS1 0x1000000
-
-/* 4. PageA(0xA00) */
-#define bCCKBBMode 0x3 /* Useless */
-#define bCCKTxPowerSaving 0x80
-#define bCCKRxPowerSaving 0x40
-
-#define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0_System 20/40 switch */
-
-#define bCCKScramble 0x8 /* Useless */
-#define bCCKAntDiversity 0x8000
-#define bCCKCarrierRecovery 0x4000
-#define bCCKTxRate 0x3000
-#define bCCKDCCancel 0x0800
-#define bCCKISICancel 0x0400
-#define bCCKMatchFilter 0x0200
-#define bCCKEqualizer 0x0100
-#define bCCKPreambleDetect 0x800000
-#define bCCKFastFalseCCA 0x400000
-#define bCCKChEstStart 0x300000
-#define bCCKCCACount 0x080000
-#define bCCKcs_lim 0x070000
-#define bCCKBistMode 0x80000000
-#define bCCKCCAMask 0x40000000
-#define bCCKTxDACPhase 0x4
-#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
-#define bCCKr_cp_mode0 0x0100
-#define bCCKTxDCOffset 0xf0
-#define bCCKRxDCOffset 0xf
-#define bCCKCCAMode 0xc000
-#define bCCKFalseCS_lim 0x3f00
-#define bCCKCS_ratio 0xc00000
-#define bCCKCorgBit_sel 0x300000
-#define bCCKPD_lim 0x0f0000
-#define bCCKNewCCA 0x80000000
-#define bCCKRxHPofIG 0x8000
-#define bCCKRxIG 0x7f00
-#define bCCKLNAPolarity 0x800000
-#define bCCKRx1stGain 0x7f0000
-#define bCCKRFExtend 0x20000000 /* CCK Rx Iinital gain polarity */
-#define bCCKRxAGCSatLevel 0x1f000000
-#define bCCKRxAGCSatCount 0xe0
-#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
-#define bCCKFixedRxAGC 0x8000
-/* define bCCKRxAGCFormat 0x4000 remove to HSSI register 0x824 */
-#define bCCKAntennaPolarity 0x2000
-#define bCCKTxFilterType 0x0c00
-#define bCCKRxAGCReportType 0x0300
-#define bCCKRxDAGCEn 0x80000000
-#define bCCKRxDAGCPeriod 0x20000000
-#define bCCKRxDAGCSatLevel 0x1f000000
-#define bCCKTimingRecovery 0x800000
-#define bCCKTxC0 0x3f0000
-#define bCCKTxC1 0x3f000000
-#define bCCKTxC2 0x3f
-#define bCCKTxC3 0x3f00
-#define bCCKTxC4 0x3f0000
-#define bCCKTxC5 0x3f000000
-#define bCCKTxC6 0x3f
-#define bCCKTxC7 0x3f00
-#define bCCKDebugPort 0xff0000
-#define bCCKDACDebug 0x0f000000
-#define bCCKFalseAlarmEnable 0x8000
-#define bCCKFalseAlarmRead 0x4000
-#define bCCKTRSSI 0x7f
-#define bCCKRxAGCReport 0xfe
-#define bCCKRxReport_AntSel 0x80000000
-#define bCCKRxReport_MFOff 0x40000000
-#define bCCKRxRxReport_SQLoss 0x20000000
-#define bCCKRxReport_Pktloss 0x10000000
-#define bCCKRxReport_Lockedbit 0x08000000
-#define bCCKRxReport_RateError 0x04000000
-#define bCCKRxReport_RxRate 0x03000000
-#define bCCKRxFACounterLower 0xff
-#define bCCKRxFACounterUpper 0xff000000
-#define bCCKRxHPAGCStart 0xe000
-#define bCCKRxHPAGCFinal 0x1c00
-#define bCCKRxFalseAlarmEnable 0x8000
-#define bCCKFACounterFreeze 0x4000
-#define bCCKTxPathSel 0x10000000
-#define bCCKDefaultRxPath 0xc000000
-#define bCCKOptionRxPath 0x3000000
-
-/* 5. PageC(0xC00) */
-#define bNumOfSTF 0x3 /* Useless */
-#define bShift_L 0xc0
-#define bGI_TH 0xc
-#define bRxPathA 0x1
-#define bRxPathB 0x2
-#define bRxPathC 0x4
-#define bRxPathD 0x8
-#define bTxPathA 0x1
-#define bTxPathB 0x2
-#define bTxPathC 0x4
-#define bTxPathD 0x8
-#define bTRSSIFreq 0x200
-#define bADCBackoff 0x3000
-#define bDFIRBackoff 0xc000
-#define bTRSSILatchPhase 0x10000
-#define bRxIDCOffset 0xff
-#define bRxQDCOffset 0xff00
-#define bRxDFIRMode 0x1800000
-#define bRxDCNFType 0xe000000
-#define bRXIQImb_A 0x3ff
-#define bRXIQImb_B 0xfc00
-#define bRXIQImb_C 0x3f0000
-#define bRXIQImb_D 0xffc00000
-#define bDC_dc_Notch 0x60000
-#define bRxNBINotch 0x1f000000
-#define bPD_TH 0xf
-#define bPD_TH_Opt2 0xc000
-#define bPWED_TH 0x700
-#define bIfMF_Win_L 0x800
-#define bPD_Option 0x1000
-#define bMF_Win_L 0xe000
-#define bBW_Search_L 0x30000
-#define bwin_enh_L 0xc0000
-#define bBW_TH 0x700000
-#define bED_TH2 0x3800000
-#define bBW_option 0x4000000
-#define bRatio_TH 0x18000000
-#define bWindow_L 0xe0000000
-#define bSBD_Option 0x1
-#define bFrame_TH 0x1c
-#define bFS_Option 0x60
-#define bDC_Slope_check 0x80
-#define bFGuard_Counter_DC_L 0xe00
-#define bFrame_Weight_Short 0x7000
-#define bSub_Tune 0xe00000
-#define bFrame_DC_Length 0xe000000
-#define bSBD_start_offset 0x30000000
-#define bFrame_TH_2 0x7
-#define bFrame_GI2_TH 0x38
-#define bGI2_Sync_en 0x40
-#define bSarch_Short_Early 0x300
-#define bSarch_Short_Late 0xc00
-#define bSarch_GI2_Late 0x70000
-#define bCFOAntSum 0x1
-#define bCFOAcc 0x2
-#define bCFOStartOffset 0xc
-#define bCFOLookBack 0x70
-#define bCFOSumWeight 0x80
-#define bDAGCEnable 0x10000
-#define bTXIQImb_A 0x3ff
-#define bTXIQImb_B 0xfc00
-#define bTXIQImb_C 0x3f0000
-#define bTXIQImb_D 0xffc00000
-#define bTxIDCOffset 0xff
-#define bTxQDCOffset 0xff00
-#define bTxDFIRMode 0x10000
-#define bTxPesudoNoiseOn 0x4000000
-#define bTxPesudoNoise_A 0xff
-#define bTxPesudoNoise_B 0xff00
-#define bTxPesudoNoise_C 0xff0000
-#define bTxPesudoNoise_D 0xff000000
-#define bCCADropOption 0x20000
-#define bCCADropThres 0xfff00000
-#define bEDCCA_H 0xf
-#define bEDCCA_L 0xf0
-#define bLambda_ED 0x300
-#define bRxInitialGain 0x7f
-#define bRxAntDivEn 0x80
-#define bRxAGCAddressForLNA 0x7f00
-#define bRxHighPowerFlow 0x8000
-#define bRxAGCFreezeThres 0xc0000
-#define bRxFreezeStep_AGC1 0x300000
-#define bRxFreezeStep_AGC2 0xc00000
-#define bRxFreezeStep_AGC3 0x3000000
-#define bRxFreezeStep_AGC0 0xc000000
-#define bRxRssi_Cmp_En 0x10000000
-#define bRxQuickAGCEn 0x20000000
-#define bRxAGCFreezeThresMode 0x40000000
-#define bRxOverFlowCheckType 0x80000000
-#define bRxAGCShift 0x7f
-#define bTRSW_Tri_Only 0x80
-#define bPowerThres 0x300
-#define bRxAGCEn 0x1
-#define bRxAGCTogetherEn 0x2
-#define bRxAGCMin 0x4
-#define bRxHP_Ini 0x7
-#define bRxHP_TRLNA 0x70
-#define bRxHP_RSSI 0x700
-#define bRxHP_BBP1 0x7000
-#define bRxHP_BBP2 0x70000
-#define bRxHP_BBP3 0x700000
-#define bRSSI_H 0x7f0000 /* the threshold for high power */
-#define bRSSI_Gen 0x7f000000 /* the threshold for ant diversity */
-#define bRxSettle_TRSW 0x7
-#define bRxSettle_LNA 0x38
-#define bRxSettle_RSSI 0x1c0
-#define bRxSettle_BBP 0xe00
-#define bRxSettle_RxHP 0x7000
-#define bRxSettle_AntSW_RSSI 0x38000
-#define bRxSettle_AntSW 0xc0000
-#define bRxProcessTime_DAGC 0x300000
-#define bRxSettle_HSSI 0x400000
-#define bRxProcessTime_BBPPW 0x800000
-#define bRxAntennaPowerShift 0x3000000
-#define bRSSITableSelect 0xc000000
-#define bRxHP_Final 0x7000000
-#define bRxHTSettle_BBP 0x7
-#define bRxHTSettle_HSSI 0x8
-#define bRxHTSettle_RxHP 0x70
-#define bRxHTSettle_BBPPW 0x80
-#define bRxHTSettle_Idle 0x300
-#define bRxHTSettle_Reserved 0x1c00
-#define bRxHTRxHPEn 0x8000
-#define bRxHTAGCFreezeThres 0x30000
-#define bRxHTAGCTogetherEn 0x40000
-#define bRxHTAGCMin 0x80000
-#define bRxHTAGCEn 0x100000
-#define bRxHTDAGCEn 0x200000
-#define bRxHTRxHP_BBP 0x1c00000
-#define bRxHTRxHP_Final 0xe0000000
-#define bRxPWRatioTH 0x3
-#define bRxPWRatioEn 0x4
-#define bRxMFHold 0x3800
-#define bRxPD_Delay_TH1 0x38
-#define bRxPD_Delay_TH2 0x1c0
-#define bRxPD_DC_COUNT_MAX 0x600
-/* define bRxMF_Hold 0x3800 */
-#define bRxPD_Delay_TH 0x8000
-#define bRxProcess_Delay 0xf0000
-#define bRxSearchrange_GI2_Early 0x700000
-#define bRxFrame_Guard_Counter_L 0x3800000
-#define bRxSGI_Guard_L 0xc000000
-#define bRxSGI_Search_L 0x30000000
-#define bRxSGI_TH 0xc0000000
-#define bDFSCnt0 0xff
-#define bDFSCnt1 0xff00
-#define bDFSFlag 0xf0000
-#define bMFWeightSum 0x300000
-#define bMinIdxTH 0x7f000000
-#define bDAFormat 0x40000
-#define bTxChEmuEnable 0x01000000
-#define bTRSWIsolation_A 0x7f
-#define bTRSWIsolation_B 0x7f00
-#define bTRSWIsolation_C 0x7f0000
-#define bTRSWIsolation_D 0x7f000000
-#define bExtLNAGain 0x7c00
-
-/* 6. PageE(0xE00) */
-#define bSTBCEn 0x4 /* Useless */
-#define bAntennaMapping 0x10
-#define bNss 0x20
-#define bCFOAntSumD 0x200
-#define bPHYCounterReset 0x8000000
-#define bCFOReportGet 0x4000000
-#define bOFDMContinueTx 0x10000000
-#define bOFDMSingleCarrier 0x20000000
-#define bOFDMSingleTone 0x40000000
-/* define bRxPath1 0x01 */
-/* define bRxPath2 0x02 */
-/* define bRxPath3 0x04 */
-/* define bRxPath4 0x08 */
-/* define bTxPath1 0x10 */
-/* define bTxPath2 0x20 */
-#define bHTDetect 0x100
-#define bCFOEn 0x10000
-#define bCFOValue 0xfff00000
-#define bSigTone_Re 0x3f
-#define bSigTone_Im 0x7f00
-#define bCounter_CCA 0xffff
-#define bCounter_ParityFail 0xffff0000
-#define bCounter_RateIllegal 0xffff
-#define bCounter_CRC8Fail 0xffff0000
-#define bCounter_MCSNoSupport 0xffff
-#define bCounter_FastSync 0xffff
-#define bShortCFO 0xfff
-#define bShortCFOTLength 12 /* total */
-#define bShortCFOFLength 11 /* fraction */
-#define bLongCFO 0x7ff
-#define bLongCFOTLength 11
-#define bLongCFOFLength 11
-#define bTailCFO 0x1fff
-#define bTailCFOTLength 13
-#define bTailCFOFLength 12
-#define bmax_en_pwdB 0xffff
-#define bCC_power_dB 0xffff0000
-#define bnoise_pwdB 0xffff
-#define bPowerMeasTLength 10
-#define bPowerMeasFLength 3
-#define bRx_HT_BW 0x1
-#define bRxSC 0x6
-#define bRx_HT 0x8
-#define bNB_intf_det_on 0x1
-#define bIntf_win_len_cfg 0x30
-#define bNB_Intf_TH_cfg 0x1c0
-#define bRFGain 0x3f
-#define bTableSel 0x40
-#define bTRSW 0x80
-#define bRxSNR_A 0xff
-#define bRxSNR_B 0xff00
-#define bRxSNR_C 0xff0000
-#define bRxSNR_D 0xff000000
-#define bSNREVMTLength 8
-#define bSNREVMFLength 1
-#define bCSI1st 0xff
-#define bCSI2nd 0xff00
-#define bRxEVM1st 0xff0000
-#define bRxEVM2nd 0xff000000
-#define bSIGEVM 0xff
-#define bPWDB 0xff00
-#define bSGIEN 0x10000
-
-#define bSFactorQAM1 0xf /* Useless */
-#define bSFactorQAM2 0xf0
-#define bSFactorQAM3 0xf00
-#define bSFactorQAM4 0xf000
-#define bSFactorQAM5 0xf0000
-#define bSFactorQAM6 0xf0000
-#define bSFactorQAM7 0xf00000
-#define bSFactorQAM8 0xf000000
-#define bSFactorQAM9 0xf0000000
-#define bCSIScheme 0x100000
-
-#define bNoiseLvlTopSet 0x3 /* Useless */
-#define bChSmooth 0x4
-#define bChSmoothCfg1 0x38
-#define bChSmoothCfg2 0x1c0
-#define bChSmoothCfg3 0xe00
-#define bChSmoothCfg4 0x7000
-#define bMRCMode 0x800000
-#define bTHEVMCfg 0x7000000
-
-#define bLoopFitType 0x1 /* Useless */
-#define bUpdCFO 0x40
-#define bUpdCFOOffData 0x80
-#define bAdvUpdCFO 0x100
-#define bAdvTimeCtrl 0x800
-#define bUpdClko 0x1000
-#define bFC 0x6000
-#define bTrackingMode 0x8000
-#define bPhCmpEnable 0x10000
-#define bUpdClkoLTF 0x20000
-#define bComChCFO 0x40000
-#define bCSIEstiMode 0x80000
-#define bAdvUpdEqz 0x100000
-#define bUChCfg 0x7000000
-#define bUpdEqz 0x8000000
-
-/* Rx Pseduo noise */
-#define bRxPesudoNoiseOn 0x20000000 /* Useless */
-#define bRxPesudoNoise_A 0xff
-#define bRxPesudoNoise_B 0xff00
-#define bRxPesudoNoise_C 0xff0000
-#define bRxPesudoNoise_D 0xff000000
-#define bPesudoNoiseState_A 0xffff
-#define bPesudoNoiseState_B 0xffff0000
-#define bPesudoNoiseState_C 0xffff
-#define bPesudoNoiseState_D 0xffff0000
-
-/* 7. RF Register */
-/* Zebra1 */
-#define bZebra1_HSSIEnable 0x8 /* Useless */
-#define bZebra1_TRxControl 0xc00
-#define bZebra1_TRxGainSetting 0x07f
-#define bZebra1_RxCorner 0xc00
-#define bZebra1_TxChargePump 0x38
-#define bZebra1_RxChargePump 0x7
-#define bZebra1_ChannelNum 0xf80
-#define bZebra1_TxLPFBW 0x400
-#define bZebra1_RxLPFBW 0x600
-
-/* Zebra4 */
-#define bRTL8256RegModeCtrl1 0x100 /* Useless */
-#define bRTL8256RegModeCtrl0 0x40
-#define bRTL8256_TxLPFBW 0x18
-#define bRTL8256_RxLPFBW 0x600
-
-/* RTL8258 */
-#define bRTL8258_TxLPFBW 0xc /* Useless */
-#define bRTL8258_RxLPFBW 0xc00
-#define bRTL8258_RSSILPFBW 0xc0
-
-
-/* Other Definition */
-
-/* byte endable for sb_write */
-#define bByte0 0x1 /* Useless */
-#define bByte1 0x2
-#define bByte2 0x4
-#define bByte3 0x8
-#define bWord0 0x3
-#define bWord1 0xc
-#define bDWord 0xf
-
-/* for PutRegsetting & GetRegSetting BitMask */
-#define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */
-#define bMaskByte1 0xff00
-#define bMaskByte2 0xff0000
-#define bMaskByte3 0xff000000
-#define bMaskHWord 0xffff0000
-#define bMaskLWord 0x0000ffff
-#define bMaskDWord 0xffffffff
-#define bMask12Bits 0xfff
-#define bMaskH4Bits 0xf0000000
-#define bMaskOFDM_D 0xffc00000
-#define bMaskCCK 0x3f3f3f3f
-
-/* for PutRFRegsetting & GetRFRegSetting BitMask */
-#define bRFRegOffsetMask 0xfffff
-
-#define bDisable 0x0
-
-#define LeftAntenna 0x0 /* Useless */
-#define RightAntenna 0x1
-
-#define tCheckTxStatus 500 /* 500ms Useless */
-#define tUpdateRxCounter 100 /* 100ms */
-
-#define rateCCK 0 /* Useless */
-#define rateOFDM 1
-#define rateHT 2
-
-/* define Register-End */
-#define bPMAC_End 0x1ff /* Useless */
-#define bFPGAPHY0_End 0x8ff
-#define bFPGAPHY1_End 0x9ff
-#define bCCKPHY0_End 0xaff
-#define bOFDMPHY0_End 0xcff
-#define bOFDMPHY1_End 0xdff
-
-/* define max debug item in each debug page */
-/* define bMaxItem_FPGA_PHY0 0x9 */
-/* define bMaxItem_FPGA_PHY1 0x3 */
-/* define bMaxItem_PHY_11B 0x16 */
-/* define bMaxItem_OFDM_PHY0 0x29 */
-/* define bMaxItem_OFDM_PHY1 0x0 */
-
-#define bPMACControl 0x0 /* Useless */
-#define bWMACControl 0x1
-#define bWNICControl 0x2
-
-#define PathA 0x0 /* Useless */
-#define PathB 0x1
-#define PathC 0x2
-#define PathD 0x3
-
-/* PageB(0xB00) */
-#define rPdp_AntA 0xb00
-#define rPdp_AntA_4 0xb04
-#define rPdp_AntA_8 0xb08
-#define rPdp_AntA_C 0xb0c
-#define rPdp_AntA_18 0xb18
-#define rPdp_AntA_1C 0xb1c
-#define rPdp_AntA_20 0xb20
-#define rPdp_AntA_24 0xb24
-
-#define rConfig_Pmpd_AntA 0xb28
-#define rConfig_ram64x16 0xb2c
-
-#define rBndA 0xb30
-#define rHssiPar 0xb34
-
-#define rConfig_AntA 0xb68
-#define rConfig_AntB 0xb6c
-
-#define rPdp_AntB 0xb70
-#define rPdp_AntB_4 0xb74
-#define rPdp_AntB_8 0xb78
-#define rPdp_AntB_C 0xb7c
-#define rPdp_AntB_10 0xb80
-#define rPdp_AntB_14 0xb84
-#define rPdp_AntB_18 0xb88
-#define rPdp_AntB_1C 0xb8c
-#define rPdp_AntB_20 0xb90
-#define rPdp_AntB_24 0xb94
-
-#define rConfig_Pmpd_AntB 0xb98
-
-#define rBndB 0xba0
-
-#define rAPK 0xbd8
-#define rPm_Rx0_AntA 0xbdc
-#define rPm_Rx1_AntA 0xbe0
-#define rPm_Rx2_AntA 0xbe4
-#define rPm_Rx3_AntA 0xbe8
-#define rPm_Rx0_AntB 0xbec
-#define rPm_Rx1_AntB 0xbf0
-#define rPm_Rx2_AntB 0xbf4
-#define rPm_Rx3_AntB 0xbf8
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/Hal8723PwrSeq.h b/drivers/staging/rtl8723au/include/Hal8723PwrSeq.h
deleted file mode 100644
index 3771d6bb5774..000000000000
--- a/drivers/staging/rtl8723au/include/Hal8723PwrSeq.h
+++ /dev/null
@@ -1,126 +0,0 @@
-#ifndef __HAL8723PWRSEQ_H__
-#define __HAL8723PWRSEQ_H__
-/*
- Check document WM-20110607-Paul-RTL8723A_Power_Architecture-R02.vsd
- There are 6 HW Power States:
- 0: POFF--Power Off
- 1: PDN--Power Down
- 2: CARDEMU--Card Emulation
- 3: ACT--Active Mode
- 4: LPS--Low Power State
- 5: SUS--Suspend
-
- The transision from different states are defined below
- TRANS_CARDEMU_TO_ACT
- TRANS_ACT_TO_CARDEMU
- TRANS_CARDEMU_TO_SUS
- TRANS_SUS_TO_CARDEMU
- TRANS_CARDEMU_TO_PDN
- TRANS_ACT_TO_LPS
- TRANS_LPS_TO_ACT
-
- TRANS_END
-*/
-#include "HalPwrSeqCmd.h"
-#include "rtl8723a_spec.h"
-
-#define RTL8723A_TRANS_CARDEMU_TO_ACT_STEPS 15
-#define RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS 15
-#define RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS 15
-#define RTL8723A_TRANS_SUS_TO_CARDEMU_STEPS 15
-#define RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS 15
-#define RTL8723A_TRANS_PDN_TO_CARDEMU_STEPS 15
-#define RTL8723A_TRANS_ACT_TO_LPS_STEPS 15
-#define RTL8723A_TRANS_LPS_TO_ACT_STEPS 15
-#define RTL8723A_TRANS_END_STEPS 1
-
-
-/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here
- */
-#define RTL8723A_TRANS_CARDEMU_TO_ACT \
- {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/ \
- {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*0x67[0] = 0 to disable BT_GPS_SEL pins*/ \
- {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},/*Delay 1ms*/ \
- {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), 0}, /*0x00[5] = 1b'0 release analog Ips to digital , 1:isolation*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), 0},/* disable SW LPS 0x04[10]= 0*/ \
- {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)},/* wait till 0x04[17] = 1 power ready*/ \
- {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)},/* release WLON reset 0x04[16]= 1*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},/* disable HWPDN 0x04[15]= 0*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0},/* disable WL suspend*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)},/* polling until return 0*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0},/**/ \
- {0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 1},/*0x4C[23] = 0x4E[7] = 1, switch DPDT_SEL_P output from WL BB */\
-
-#define RTL8723A_TRANS_ACT_TO_CARDEMU \
- {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/ \
- {0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},/*0x4C[23] = 0x4E[7] = 0, switch DPDT_SEL_P output from register 0x65[2] */\
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \
- {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)}, /*0x00[5] = 1b'1 analog Ips to digital , 1:isolation*/ \
- {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, /*0x20[0] = 1b'0 disable LDOA12 MACRO block*/ \
-
-
-#define RTL8723A_TRANS_CARDEMU_TO_SUS \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, /*0x04[12:11] = 2b'01 enable WL suspend*/
-
-#define RTL8723A_TRANS_SUS_TO_CARDEMU \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, /*clear suspend enable and power down enable*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
-
-#define RTL8723A_TRANS_CARDEMU_TO_CARDDIS \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
- {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/
-
-#define RTL8723A_TRANS_CARDDIS_TO_CARDEMU \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, /*clear suspend enable and power down enable*/ \
- {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
-
-#define RTL8723A_TRANS_CARDEMU_TO_PDN \
- {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SOP option to disable BG/MB/ACK/SWR*/ \
- {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},/* 0x04[16] = 0*/\
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)},/* 0x04[15] = 1*/
-
-#define RTL8723A_TRANS_PDN_TO_CARDEMU \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},/* 0x04[15] = 0*/
-
-#define RTL8723A_TRANS_ACT_TO_LPS \
- {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},/*Tx Pause*/ \
- {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},/*CCK and OFDM are disabled, and clock are gated*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},/*Whole BB is reset*/ \
- {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/ \
- {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},/*check if removed later*/ \
- {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)},/*Respond TxOK to scheduler*/
-
-#define RTL8723A_TRANS_LPS_TO_ACT \
- {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\
- {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*. 0x08[4] = 0 switch TSF to 40M*/\
- {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, /*Polling 0x109[7]= 0 TSF in 40M*/\
- {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/\
- {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, /*. 0x101[1] = 1*/\
- {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1)|BIT(0), BIT(1)|BIT(0)}, /*. 0x02[1:0] = 2b'11 enable BB macro*/\
- {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
-
-#define RTL8723A_TRANS_END \
- {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, PWR_CMD_END, 0, 0},
-
-
-extern struct wlan_pwr_cfg rtl8723AU_power_on_flow[RTL8723A_TRANS_CARDEMU_TO_ACT_STEPS+RTL8723A_TRANS_END_STEPS];
-extern struct wlan_pwr_cfg rtl8723AU_radio_off_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_END_STEPS];
-extern struct wlan_pwr_cfg rtl8723AU_card_disable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS];
-extern struct wlan_pwr_cfg rtl8723AU_card_enable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS];
-extern struct wlan_pwr_cfg rtl8723AU_suspend_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS+RTL8723A_TRANS_END_STEPS];
-extern struct wlan_pwr_cfg rtl8723AU_resume_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS+RTL8723A_TRANS_END_STEPS];
-extern struct wlan_pwr_cfg rtl8723AU_hwpdn_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723A_TRANS_END_STEPS];
-extern struct wlan_pwr_cfg rtl8723AU_enter_lps_flow[RTL8723A_TRANS_ACT_TO_LPS_STEPS+RTL8723A_TRANS_END_STEPS];
-extern struct wlan_pwr_cfg rtl8723AU_leave_lps_flow[RTL8723A_TRANS_LPS_TO_ACT_STEPS+RTL8723A_TRANS_END_STEPS];
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h b/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h
deleted file mode 100644
index c834b3a738d7..000000000000
--- a/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __INC_HAL8723U_FW_IMG_H
-#define __INC_HAL8723U_FW_IMG_H
-
-/*Created on 2013/01/14, 15:51*/
-
-/* FW v16 enable usb interrupt */
-#define Rtl8723UImgArrayLength 22172
-extern u8 Rtl8723UFwImgArray[Rtl8723UImgArrayLength];
-#define Rtl8723UBTImgArrayLength 1
-extern u8 Rtl8723UFwBTImgArray[Rtl8723UBTImgArrayLength];
-
-#define Rtl8723UUMCBCutImgArrayWithBTLength 24118
-#define Rtl8723UUMCBCutImgArrayWithoutBTLength 19200
-
-extern u8 Rtl8723UFwUMCBCutImgArrayWithBT[Rtl8723UUMCBCutImgArrayWithBTLength];
-extern u8 Rtl8723UFwUMCBCutImgArrayWithoutBT[Rtl8723UUMCBCutImgArrayWithoutBTLength];
-
-#define Rtl8723SUMCBCutMPImgArrayLength 24174
-extern const u8 Rtl8723SFwUMCBCutMPImgArray[Rtl8723SUMCBCutMPImgArrayLength];
-
-#define Rtl8723EBTImgArrayLength 15276
-extern u8 Rtl8723EFwBTImgArray[Rtl8723EBTImgArrayLength];
-
-#define Rtl8723UPHY_REG_Array_PGLength 336
-extern u32 Rtl8723UPHY_REG_Array_PG[Rtl8723UPHY_REG_Array_PGLength];
-#define Rtl8723UMACPHY_Array_PGLength 1
-extern u32 Rtl8723UMACPHY_Array_PG[Rtl8723UMACPHY_Array_PGLength];
-
-#endif /* ifndef __INC_HAL8723U_FW_IMG_H */
diff --git a/drivers/staging/rtl8723au/include/HalDMOutSrc8723A.h b/drivers/staging/rtl8723au/include/HalDMOutSrc8723A.h
deleted file mode 100644
index d7651f7a665b..000000000000
--- a/drivers/staging/rtl8723au/include/HalDMOutSrc8723A.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
- ******************************************************************************/
-#ifndef __RTL8723A_ODM_H__
-#define __RTL8723A_ODM_H__
-/* */
-
-#define RSSI_CCK 0
-#define RSSI_OFDM 1
-#define RSSI_DEFAULT 2
-
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-
-
-/* */
-/* structure and define */
-/* */
-
-
-
-
-/*------------------------Export global variable----------------------------*/
-/*------------------------Export global variable----------------------------*/
-/*------------------------Export Marco Definition---------------------------*/
-/* define DM_MultiSTA_InitGainChangeNotify(Event) {DM_DigTable.CurMultiSTAConnectState = Event;} */
-
-
-/* */
-/* function prototype */
-/* */
-
-/* */
-/* IQ calibrate */
-/* */
-void rtl8723a_phy_iq_calibrate(struct rtw_adapter *pAdapter, bool bReCovery);
-
-/* */
-/* LC calibrate */
-/* */
-void rtl8723a_phy_lc_calibrate(struct rtw_adapter *pAdapter);
-
-/* */
-/* AP calibrate */
-/* */
-void rtl8723a_phy_ap_calibrate(struct rtw_adapter *pAdapter, char delta);
-
-void rtl8723a_odm_check_tx_power_tracking(struct rtw_adapter *Adapter);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/HalHWImg8723A_BB.h b/drivers/staging/rtl8723au/include/HalHWImg8723A_BB.h
deleted file mode 100644
index 127609404652..000000000000
--- a/drivers/staging/rtl8723au/include/HalHWImg8723A_BB.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/******************************************************************************
-*
-* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-*
-******************************************************************************/
-
-#ifndef __INC_BB_8723A_HW_IMG_H
-#define __INC_BB_8723A_HW_IMG_H
-
-/******************************************************************************
-* AGC_TAB_1T.TXT
-******************************************************************************/
-
-void ODM_ReadAndConfig_AGC_TAB_1T_8723A(struct dm_odm_t *pDM_Odm);
-
-/******************************************************************************
-* PHY_REG_1T.TXT
-******************************************************************************/
-
-void ODM_ReadAndConfig_PHY_REG_1T_8723A(struct dm_odm_t *pDM_Odm);
-
-/******************************************************************************
-* PHY_REG_MP.TXT
-******************************************************************************/
-
-void ODM_ReadAndConfig_PHY_REG_MP_8723A(struct dm_odm_t *pDM_Odm);
-
-#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8723au/include/HalHWImg8723A_FW.h b/drivers/staging/rtl8723au/include/HalHWImg8723A_FW.h
deleted file mode 100644
index 7ee363b99b49..000000000000
--- a/drivers/staging/rtl8723au/include/HalHWImg8723A_FW.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/******************************************************************************
-*
-* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-*
-******************************************************************************/
-
-#ifndef __INC_FW_8723A_HW_IMG_H
-#define __INC_FW_8723A_HW_IMG_H
-
-
-/******************************************************************************
-* rtl8723fw_B.TXT
-******************************************************************************/
-
-void ODM_ReadFirmware_8723A_rtl8723fw_B(struct dm_odm_t *pDM_Odm,
- u8 *pFirmware, u32 *pFirmwareSize);
-
-#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8723au/include/HalHWImg8723A_MAC.h b/drivers/staging/rtl8723au/include/HalHWImg8723A_MAC.h
deleted file mode 100644
index 201be1f87292..000000000000
--- a/drivers/staging/rtl8723au/include/HalHWImg8723A_MAC.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/******************************************************************************
-*
-* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-*
-******************************************************************************/
-
-#ifndef __INC_MAC_8723A_HW_IMG_H
-#define __INC_MAC_8723A_HW_IMG_H
-
-/******************************************************************************
-* MAC_REG.TXT
-******************************************************************************/
-
-void ODM_ReadAndConfig_MAC_REG_8723A(struct dm_odm_t *pDM_Odm);
-
-#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8723au/include/HalHWImg8723A_RF.h b/drivers/staging/rtl8723au/include/HalHWImg8723A_RF.h
deleted file mode 100644
index c9af1c375339..000000000000
--- a/drivers/staging/rtl8723au/include/HalHWImg8723A_RF.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/******************************************************************************
-*
-* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details.
-*
-******************************************************************************/
-
-#ifndef __INC_RF_8723A_HW_IMG_H
-#define __INC_RF_8723A_HW_IMG_H
-
-/******************************************************************************
-* RadioA_1T.TXT
-******************************************************************************/
-
-void ODM_ReadAndConfig_RadioA_1T_8723A(struct dm_odm_t *pDM_Odm);
-
-#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8723au/include/HalPwrSeqCmd.h b/drivers/staging/rtl8723au/include/HalPwrSeqCmd.h
deleted file mode 100644
index 12e03a36f2d3..000000000000
--- a/drivers/staging/rtl8723au/include/HalPwrSeqCmd.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
- ******************************************************************************/
-#ifndef __HALPWRSEQCMD_H__
-#define __HALPWRSEQCMD_H__
-
-#include <drv_types.h>
-
-/*---------------------------------------------*/
-/*---------------------------------------------*/
-#define PWR_CMD_READ 0x00
- /* offset: the read register offset */
- /* msk: the mask of the read value */
- /* value: N/A, left by 0 */
- /* note: dirver shall implement this function by read & msk */
-
-#define PWR_CMD_WRITE 0x01
- /* offset: the read register offset */
- /* msk: the mask of the write bits */
- /* value: write value */
- /* note: driver shall implement this cmd by read & msk after write */
-
-#define PWR_CMD_POLLING 0x02
- /* offset: the read register offset */
- /* msk: the mask of the polled value */
- /* value: the value to be polled, masked by the msd field. */
- /* note: driver shall implement this cmd by */
- /* do{ */
- /* if( (Read(offset) & msk) == (value & msk) ) */
- /* break; */
- /* } while(not timeout); */
-
-#define PWR_CMD_DELAY 0x03
- /* offset: the value to delay */
- /* msk: N/A */
- /* value: the unit of delay, 0: us, 1: ms */
-
-#define PWR_CMD_END 0x04
- /* offset: N/A */
- /* msk: N/A */
- /* value: N/A */
-
-/*---------------------------------------------*/
-/* 3 The value of base: 4 bits */
-/*---------------------------------------------*/
- /* define the base address of each block */
-#define PWR_BASEADDR_MAC 0x00
-#define PWR_BASEADDR_USB 0x01
-#define PWR_BASEADDR_PCIE 0x02
-#define PWR_BASEADDR_SDIO 0x03
-
-/*---------------------------------------------*/
-/* 3 The value of interface_msk: 4 bits */
-/*---------------------------------------------*/
-#define PWR_INTF_SDIO_MSK BIT(0)
-#define PWR_INTF_USB_MSK BIT(1)
-#define PWR_INTF_PCI_MSK BIT(2)
-#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
-
-/*---------------------------------------------*/
-/* 3 The value of fab_msk: 4 bits */
-/*---------------------------------------------*/
-#define PWR_FAB_TSMC_MSK BIT(0)
-#define PWR_FAB_UMC_MSK BIT(1)
-#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
-
-/*---------------------------------------------*/
-/* 3 The value of cut_msk: 8 bits */
-/*---------------------------------------------*/
-#define PWR_CUT_TESTCHIP_MSK BIT(0)
-#define PWR_CUT_A_MSK BIT(1)
-#define PWR_CUT_B_MSK BIT(2)
-#define PWR_CUT_C_MSK BIT(3)
-#define PWR_CUT_D_MSK BIT(4)
-#define PWR_CUT_E_MSK BIT(5)
-#define PWR_CUT_F_MSK BIT(6)
-#define PWR_CUT_G_MSK BIT(7)
-#define PWR_CUT_ALL_MSK 0xFF
-
-
-enum pwrseq_delay_unit {
- PWRSEQ_DELAY_US,
- PWRSEQ_DELAY_MS,
-};
-
-struct wlan_pwr_cfg {
- u16 offset;
- u8 cut_msk;
- u8 fab_msk:4;
- u8 interface_msk:4;
- u8 base:4;
- u8 cmd:4;
- u8 msk;
- u8 value;
-};
-
-
-#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
-#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
-#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
-#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
-#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
-#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
-#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
-#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
-
-
-/* */
-/* Prototype of protected function. */
-/* */
-u8 HalPwrSeqCmdParsing23a(
- struct rtw_adapter *padapter,
- u8 CutVersion,
- u8 FabVersion,
- u8 InterfaceType,
- struct wlan_pwr_cfg PwrCfgCmd[]);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/HalVerDef.h b/drivers/staging/rtl8723au/include/HalVerDef.h
deleted file mode 100644
index 2a0e4ea7afad..000000000000
--- a/drivers/staging/rtl8723au/include/HalVerDef.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __HAL_VERSION_DEF_H__
-#define __HAL_VERSION_DEF_H__
-
-enum hal_ic_type {
- CHIP_8192S = 0,
- CHIP_8188C = 1,
- CHIP_8192C = 2,
- CHIP_8192D = 3,
- CHIP_8723A = 4,
- CHIP_8188E = 5,
- CHIP_8881A = 6,
- CHIP_8812A = 7,
- CHIP_8821A = 8,
- CHIP_8723B = 9,
- CHIP_8192E = 10,
-};
-
-enum hal_chip_type {
- TEST_CHIP = 0,
- NORMAL_CHIP = 1,
- FPGA = 2,
-};
-
-enum hal_cut_version {
- A_CUT_VERSION = 0,
- B_CUT_VERSION = 1,
- C_CUT_VERSION = 2,
- D_CUT_VERSION = 3,
- E_CUT_VERSION = 4,
- F_CUT_VERSION = 5,
- G_CUT_VERSION = 6,
-};
-
-/* HAL_Manufacturer */
-enum hal_vendor {
- CHIP_VENDOR_TSMC = 0,
- CHIP_VENDOR_UMC = 1,
-};
-
-struct hal_version {
- enum hal_ic_type ICType;
- enum hal_chip_type ChipType;
- enum hal_cut_version CUTVersion;
- enum hal_vendor VendorType;
- u8 ROMVer;
-};
-
-/* Get element */
-#define GET_CVID_IC_TYPE(version) ((version).ICType)
-#define GET_CVID_CHIP_TYPE(version) ((version).ChipType)
-#define GET_CVID_MANUFACTUER(version) ((version).VendorType)
-#define GET_CVID_CUT_VERSION(version) ((version).CUTVersion)
-#define GET_CVID_ROM_VERSION(version) (((version).ROMVer) & ROM_VERSION_MASK)
-
-/* Common Macro. -- */
-
-#define IS_81XXC(version) \
- (((GET_CVID_IC_TYPE(version) == CHIP_8192C) || \
- (GET_CVID_IC_TYPE(version) == CHIP_8188C)) ? true : false)
-#define IS_8723_SERIES(version) \
- ((GET_CVID_IC_TYPE(version) == CHIP_8723A) ? true : false)
-
-#define IS_TEST_CHIP(version) \
- ((GET_CVID_CHIP_TYPE(version) == TEST_CHIP) ? true : false)
-#define IS_NORMAL_CHIP(version) \
- ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false)
-
-#define IS_A_CUT(version) \
- ((GET_CVID_CUT_VERSION(version) == A_CUT_VERSION) ? true : false)
-#define IS_B_CUT(version) \
- ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? true : false)
-#define IS_C_CUT(version) \
- ((GET_CVID_CUT_VERSION(version) == C_CUT_VERSION) ? true : false)
-#define IS_D_CUT(version) \
- ((GET_CVID_CUT_VERSION(version) == D_CUT_VERSION) ? true : false)
-#define IS_E_CUT(version) \
- ((GET_CVID_CUT_VERSION(version) == E_CUT_VERSION) ? true : false)
-
-#define IS_CHIP_VENDOR_TSMC(version) \
- ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC) ? true : false)
-#define IS_CHIP_VENDOR_UMC(version) \
- ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC) ? true : false)
-
-/* Chip version Macro. -- */
-
-#define IS_81xxC_VENDOR_UMC_A_CUT(version) \
- (IS_81XXC(version)?(IS_CHIP_VENDOR_UMC(version) ? \
- (IS_A_CUT(version) ? true : false) : false) : false)
-#define IS_81xxC_VENDOR_UMC_B_CUT(version) \
- (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
- (IS_B_CUT(version) ? true : false) : false): false)
-#define IS_81xxC_VENDOR_UMC_C_CUT(version) \
- (IS_81XXC(version)?(IS_CHIP_VENDOR_UMC(version) ? \
- (IS_C_CUT(version) ? true : false) : false) : false)
-#define IS_8723A_A_CUT(version) \
- ((IS_8723_SERIES(version)) ? (IS_A_CUT(version) ? true : false) : false)
-#define IS_8723A_B_CUT(version) \
- ((IS_8723_SERIES(version)) ? (IS_B_CUT(version) ? true : false) : false)
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/drv_types.h b/drivers/staging/rtl8723au/include/drv_types.h
deleted file mode 100644
index e83463aeb9b1..000000000000
--- a/drivers/staging/rtl8723au/include/drv_types.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-/*-----------------------------------------------------------------------------
-
- For type defines and data structure defines
-
-------------------------------------------------------------------------------*/
-
-
-#ifndef __DRV_TYPES_H__
-#define __DRV_TYPES_H__
-
-#include <osdep_service.h>
-#include <wlan_bssdef.h>
-
-
-enum _NIC_VERSION {
- RTL8711_NIC,
- RTL8712_NIC,
- RTL8713_NIC,
- RTL8716_NIC
-
-};
-
-
-#include <rtw_ht.h>
-
-#include <rtw_cmd.h>
-#include <rtw_xmit.h>
-#include <rtw_recv.h>
-#include <hal_intf.h>
-#include <hal_com.h>
-#include <rtw_security.h>
-#include <rtw_pwrctrl.h>
-#include <rtw_io.h>
-#include <rtw_eeprom.h>
-#include <sta_info.h>
-#include <rtw_mlme.h>
-#include <rtw_debug.h>
-#include <rtw_rf.h>
-#include <rtw_event.h>
-#include <rtw_mlme_ext.h>
-#include <rtw_ap.h>
-
-#include "ioctl_cfg80211.h"
-
-struct registry_priv {
- u8 chip_version;
- u8 rfintfs;
- struct cfg80211_ssid ssid;
- u8 channel;/* ad-hoc support requirement */
- u8 wireless_mode;/* A, B, G, auto */
- u8 scan_mode;/* active, passive */
- u8 preamble;/* long, short, auto */
- u8 vrtl_carrier_sense;/* Enable, Disable, Auto */
- u8 vcs_type;/* RTS/CTS, CTS-to-self */
- u16 rts_thresh;
- u16 frag_thresh;
- u8 adhoc_tx_pwr;
- u8 soft_ap;
- u8 power_mgnt;
- u8 ips_mode;
- u8 smart_ps;
- u8 long_retry_lmt;
- u8 short_retry_lmt;
- u16 busy_thresh;
- u8 ack_policy;
- u8 software_encrypt;
- u8 software_decrypt;
- u8 acm_method;
- /* UAPSD */
- u8 wmm_enable;
- u8 uapsd_enable;
-
- struct wlan_bssid_ex dev_network;
-
- u8 ht_enable;
- u8 cbw40_enable;
- u8 ampdu_enable;/* for tx */
- u8 rx_stbc;
- u8 ampdu_amsdu;/* A-MPDU Supports A-MSDU is permitted */
- u8 lowrate_two_xmit;
-
- u8 rf_config;
- u8 low_power;
-
- u8 wifi_spec;/* !turbo_mode */
-
- u8 channel_plan;
-#ifdef CONFIG_8723AU_BT_COEXIST
- u8 btcoex;
- u8 bt_iso;
- u8 bt_sco;
- u8 bt_ampdu;
-#endif
- bool bAcceptAddbaReq;
-
- u8 antdiv_cfg;
- u8 antdiv_type;
-
- u8 hwpdn_mode;/* 0:disable,1:enable,2:decide by EFUSE config */
- u8 hwpwrp_detect;/* 0:disable,1:enable */
-
- u8 hw_wps_pbc;/* 0:disable,1:enable */
-
- u8 max_roaming_times; /* max number driver will try to roaming */
-
- u8 enable80211d;
-
- u8 ifname[16];
- u8 if2name[16];
-
- u8 notch_filter;
-
- u8 regulatory_tid;
-};
-
-
-#define MAX_CONTINUAL_URB_ERR 4
-
-#define GET_PRIMARY_ADAPTER(padapter) \
- (((struct rtw_adapter *)padapter)->dvobj->if1)
-
-enum _IFACE_ID {
- IFACE_ID0, /* maping to PRIMARY_ADAPTER */
- IFACE_ID1, /* maping to SECONDARY_ADAPTER */
- IFACE_ID2,
- IFACE_ID3,
- IFACE_ID_MAX,
-};
-
-struct dvobj_priv {
- struct rtw_adapter *if1; /* PRIMARY_ADAPTER */
- struct rtw_adapter *if2; /* SECONDARY_ADAPTER */
-
- /* for local/global synchronization */
- struct mutex hw_init_mutex;
- struct mutex h2c_fwcmd_mutex;
- struct mutex setch_mutex;
- struct mutex setbw_mutex;
-
- unsigned char oper_channel; /* saved chan info when set chan bw */
- unsigned char oper_bwmode;
- unsigned char oper_ch_offset;/* PRIME_CHNL_OFFSET */
-
- struct rtw_adapter *padapters[IFACE_ID_MAX];
- u8 iface_nums; /* total number of ifaces used runtime */
-
- /* For 92D, DMDP have 2 interface. */
- u8 InterfaceNumber;
- u8 NumInterfaces;
-
- /* In /Out Pipe information */
- int RtInPipe[2];
- int RtOutPipe[3];
- u8 Queue2Pipe[HW_QUEUE_ENTRY];/* for out pipe mapping */
-
-/*-------- below is for USB INTERFACE --------*/
-
- u8 nr_endpoint;
- u8 ishighspeed;
- u8 RtNumInPipes;
- u8 RtNumOutPipes;
- int ep_num[5]; /* endpoint number */
-
- struct mutex usb_vendor_req_mutex;
-
- union {
- __le32 val32;
- __le16 val16;
- u8 val8;
- } usb_buf;
-
- struct usb_interface *pusbintf;
- struct usb_device *pusbdev;
- atomic_t continual_urb_error;
-
-/*-------- below is for PCIE INTERFACE --------*/
-
-};
-
-static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
-{
- /* todo: get interface type from dvobj and the return the dev accordingly */
- return &dvobj->pusbintf->dev;
-}
-
-enum _IFACE_TYPE {
- IFACE_PORT0, /* mapping to port0 for C/D series chips */
- IFACE_PORT1, /* mapping to port1 for C/D series chip */
- MAX_IFACE_PORT,
-};
-
-enum _ADAPTER_TYPE {
- PRIMARY_ADAPTER,
- SECONDARY_ADAPTER,
- MAX_ADAPTER,
-};
-
-struct rtw_adapter {
- int pid[3];/* process id from UI, 0:wps, 1:hostapd, 2:dhcpcd */
- int bDongle;/* build-in module or external dongle */
- u16 chip_type;
- u16 HardwareType;
-
- struct dvobj_priv *dvobj;
- struct mlme_priv mlmepriv;
- struct mlme_ext_priv mlmeextpriv;
- struct cmd_priv cmdpriv;
- struct evt_priv evtpriv;
- struct xmit_priv xmitpriv;
- struct recv_priv recvpriv;
- struct sta_priv stapriv;
- struct security_priv securitypriv;
- struct registry_priv registrypriv;
- struct pwrctrl_priv pwrctrlpriv;
- struct eeprom_priv eeprompriv;
-
- u32 setband;
-
- void *HalData;
-
- s32 bDriverStopped;
- s32 bSurpriseRemoved;
- s32 bCardDisableWOHSM;
-
- u32 IsrContent;
- u32 ImrContent;
-
- u8 EepromAddressSize;
- u8 hw_init_completed;
- u8 bDriverIsGoingToUnload;
- u8 init_adpt_in_progress;
- u8 bHaltInProgress;
-
- struct net_device *pnetdev;
-
- /* used by rtw_rereg_nd_name related function */
- int bup;
- struct net_device_stats stats;
-
- struct wireless_dev *rtw_wdev;
- int net_closed;
-
- u8 bFWReady;
- u8 bReadPortCancel;
- u8 bWritePortCancel;
-
- /* extend to support multi interface */
- /* IFACE_ID0 is equals to PRIMARY_ADAPTER */
- /* IFACE_ID1 is equals to SECONDARY_ADAPTER */
- u8 iface_id;
-};
-
-#define adapter_to_dvobj(adapter) (adapter->dvobj)
-
-static inline u8 *myid(struct eeprom_priv *peepriv)
-{
- return peepriv->mac_addr;
-}
-
-#endif /* __DRV_TYPES_H__ */
diff --git a/drivers/staging/rtl8723au/include/hal_com.h b/drivers/staging/rtl8723au/include/hal_com.h
deleted file mode 100644
index 9c50320b2100..000000000000
--- a/drivers/staging/rtl8723au/include/hal_com.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __HAL_COMMON_H__
-#define __HAL_COMMON_H__
-
-/* */
-/* Rate Definition */
-/* */
-/* CCK */
-#define RATR_1M 0x00000001
-#define RATR_2M 0x00000002
-#define RATR_55M 0x00000004
-#define RATR_11M 0x00000008
-/* OFDM */
-#define RATR_6M 0x00000010
-#define RATR_9M 0x00000020
-#define RATR_12M 0x00000040
-#define RATR_18M 0x00000080
-#define RATR_24M 0x00000100
-#define RATR_36M 0x00000200
-#define RATR_48M 0x00000400
-#define RATR_54M 0x00000800
-/* MCS 1 Spatial Stream */
-#define RATR_MCS0 0x00001000
-#define RATR_MCS1 0x00002000
-#define RATR_MCS2 0x00004000
-#define RATR_MCS3 0x00008000
-#define RATR_MCS4 0x00010000
-#define RATR_MCS5 0x00020000
-#define RATR_MCS6 0x00040000
-#define RATR_MCS7 0x00080000
-/* MCS 2 Spatial Stream */
-#define RATR_MCS8 0x00100000
-#define RATR_MCS9 0x00200000
-#define RATR_MCS10 0x00400000
-#define RATR_MCS11 0x00800000
-#define RATR_MCS12 0x01000000
-#define RATR_MCS13 0x02000000
-#define RATR_MCS14 0x04000000
-#define RATR_MCS15 0x08000000
-
-/* CCK */
-#define RATE_1M BIT(0)
-#define RATE_2M BIT(1)
-#define RATE_5_5M BIT(2)
-#define RATE_11M BIT(3)
-/* OFDM */
-#define RATE_6M BIT(4)
-#define RATE_9M BIT(5)
-#define RATE_12M BIT(6)
-#define RATE_18M BIT(7)
-#define RATE_24M BIT(8)
-#define RATE_36M BIT(9)
-#define RATE_48M BIT(10)
-#define RATE_54M BIT(11)
-
-/*------------------------------ Tx Desc definition Macro ------------------------*/
-/* pragma mark -- Tx Desc related definition. -- */
-/* */
-/* */
-/* Rate */
-/* */
-/* CCK Rates, TxHT = 0 */
-#define DESC_RATE1M 0x00
-#define DESC_RATE2M 0x01
-#define DESC_RATE5_5M 0x02
-#define DESC_RATE11M 0x03
-
-/* OFDM Rates, TxHT = 0 */
-#define DESC_RATE6M 0x04
-#define DESC_RATE9M 0x05
-#define DESC_RATE12M 0x06
-#define DESC_RATE18M 0x07
-#define DESC_RATE24M 0x08
-#define DESC_RATE36M 0x09
-#define DESC_RATE48M 0x0a
-#define DESC_RATE54M 0x0b
-
-/* MCS Rates, TxHT = 1 */
-#define DESC_RATEMCS0 0x0c
-#define DESC_RATEMCS1 0x0d
-#define DESC_RATEMCS2 0x0e
-#define DESC_RATEMCS3 0x0f
-#define DESC_RATEMCS4 0x10
-#define DESC_RATEMCS5 0x11
-#define DESC_RATEMCS6 0x12
-#define DESC_RATEMCS7 0x13
-#define DESC_RATEMCS8 0x14
-#define DESC_RATEMCS9 0x15
-#define DESC_RATEMCS10 0x16
-#define DESC_RATEMCS11 0x17
-#define DESC_RATEMCS12 0x18
-#define DESC_RATEMCS13 0x19
-#define DESC_RATEMCS14 0x1a
-#define DESC_RATEMCS15 0x1b
-#define DESC_RATEMCS15_SG 0x1c
-#define DESC_RATEMCS32 0x20
-
-#define REG_P2P_CTWIN 0x0572 /* 1 Byte long (in unit of TU) */
-#define REG_NOA_DESC_SEL 0x05CF
-#define REG_NOA_DESC_DURATION 0x05E0
-#define REG_NOA_DESC_INTERVAL 0x05E4
-#define REG_NOA_DESC_START 0x05E8
-#define REG_NOA_DESC_COUNT 0x05EC
-
-#include "HalVerDef.h"
-
-
-u8 /* return the final channel plan decision */
-hal_com_get_channel_plan23a(
- struct rtw_adapter *padapter,
- u8 hw_channel_plan, /* channel plan from HW (efuse/eeprom) */
- u8 sw_channel_plan, /* channel plan from SW (registry/module param) */
- u8 def_channel_plan, /* channel plan used when the former two is invalid */
- bool AutoLoadFail
- );
-
-u8 MRateToHwRate23a(u8 rate);
-
-void HalSetBrateCfg23a(struct rtw_adapter *padapter, u8 *mBratesOS);
-
-bool
-Hal_MappingOutPipe23a(struct rtw_adapter *pAdapter, u8 NumOutPipe);
-
-void c2h_evt_clear23a(struct rtw_adapter *adapter);
-s32 c2h_evt_read23a(struct rtw_adapter *adapter, u8 *buf);
-
-void rtl8723a_set_ampdu_min_space(struct rtw_adapter *padapter, u8 MinSpacingToSet);
-void rtl8723a_set_ampdu_factor(struct rtw_adapter *padapter, u8 FactorToSet);
-void rtl8723a_set_acm_ctrl(struct rtw_adapter *padapter, u8 ctrl);
-void rtl8723a_set_media_status(struct rtw_adapter *padapter, u8 status);
-void rtl8723a_set_media_status1(struct rtw_adapter *padapter, u8 status);
-void rtl8723a_set_bcn_func(struct rtw_adapter *padapter, u8 val);
-void rtl8723a_check_bssid(struct rtw_adapter *padapter, u8 val);
-void rtl8723a_mlme_sitesurvey(struct rtw_adapter *padapter, u8 flag);
-void rtl8723a_on_rcr_am(struct rtw_adapter *padapter);
-void rtl8723a_off_rcr_am(struct rtw_adapter *padapter);
-void rtl8723a_set_slot_time(struct rtw_adapter *padapter, u8 slottime);
-void rtl8723a_ack_preamble(struct rtw_adapter *padapter, u8 bShortPreamble);
-void rtl8723a_set_sec_cfg(struct rtw_adapter *padapter, u8 sec);
-void rtl8723a_cam_empty_entry(struct rtw_adapter *padapter, u8 ucIndex);
-void rtl8723a_cam_invalidate_all(struct rtw_adapter *padapter);
-void rtl8723a_cam_write(struct rtw_adapter *padapter,
- u8 entry, u16 ctrl, const u8 *mac, const u8 *key);
-void rtl8723a_fifo_cleanup(struct rtw_adapter *padapter);
-void rtl8723a_set_apfm_on_mac(struct rtw_adapter *padapter, u8 val);
-void rtl8723a_bcn_valid(struct rtw_adapter *padapter);
-bool rtl8723a_get_bcn_valid(struct rtw_adapter *padapter);
-void rtl8723a_set_beacon_interval(struct rtw_adapter *padapter, u16 interval);
-void rtl8723a_set_resp_sifs(struct rtw_adapter *padapter,
- u8 r2t1, u8 r2t2, u8 t2t1, u8 t2t2);
-void rtl8723a_set_ac_param_vo(struct rtw_adapter *padapter, u32 vo);
-void rtl8723a_set_ac_param_vi(struct rtw_adapter *padapter, u32 vi);
-void rtl8723a_set_ac_param_be(struct rtw_adapter *padapter, u32 be);
-void rtl8723a_set_ac_param_bk(struct rtw_adapter *padapter, u32 bk);
-void rtl8723a_set_rxdma_agg_pg_th(struct rtw_adapter *padapter, u8 val);
-void rtl8723a_set_initial_gain(struct rtw_adapter *padapter, u32 rx_gain);
-
-void rtl8723a_odm_support_ability_write(struct rtw_adapter *padapter, u32 val);
-void rtl8723a_odm_support_ability_backup(struct rtw_adapter *padapter);
-void rtl8723a_odm_support_ability_restore(struct rtw_adapter *padapter);
-void rtl8723a_odm_support_ability_set(struct rtw_adapter *padapter, u32 val);
-void rtl8723a_odm_support_ability_clr(struct rtw_adapter *padapter, u32 val);
-
-void rtl8723a_set_rpwm(struct rtw_adapter *padapter, u8 val);
-u8 rtl8723a_get_rf_type(struct rtw_adapter *padapter);
-bool rtl8723a_get_fwlps_rf_on(struct rtw_adapter *padapter);
-bool rtl8723a_chk_hi_queue_empty(struct rtw_adapter *padapter);
-
-#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723au/include/hal_intf.h b/drivers/staging/rtl8723au/include/hal_intf.h
deleted file mode 100644
index b924d47fcfbc..000000000000
--- a/drivers/staging/rtl8723au/include/hal_intf.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __HAL_INTF_H__
-#define __HAL_INTF_H__
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-enum _CHIP_TYPE {
- NULL_CHIP_TYPE,
- RTL8712_8188S_8191S_8192S,
- RTL8188C_8192C,
- RTL8192D,
- RTL8723A,
- RTL8188E,
- MAX_CHIP_TYPE
-};
-
-enum hal_def_variable {
- HAL_DEF_UNDERCORATEDSMOOTHEDPWDB,
- HAL_DEF_IS_SUPPORT_ANT_DIV,
- HAL_DEF_CURRENT_ANTENNA,
- HAL_DEF_DRVINFO_SZ,
- HAL_DEF_MAX_RECVBUF_SZ,
- HAL_DEF_RX_PACKET_OFFSET,
- HAL_DEF_DBG_DUMP_RXPKT,/* for dbg */
- HAL_DEF_DBG_DM_FUNC,/* for dbg */
- HAL_DEF_RA_DECISION_RATE,
- HAL_DEF_RA_SGI,
- HAL_DEF_PT_PWR_STATUS,
- HW_VAR_MAX_RX_AMPDU_FACTOR,
- HW_DEF_RA_INFO_DUMP,
- HAL_DEF_DBG_DUMP_TXPKT,
- HW_DEF_FA_CNT_DUMP,
- HW_DEF_ODM_DBG_FLAG,
-};
-
-enum hal_odm_variable {
- HAL_ODM_STA_INFO,
- HAL_ODM_P2P_STATE,
- HAL_ODM_WIFI_DISPLAY_STATE,
-};
-
-enum rt_eeprom_type {
- EEPROM_93C46,
- EEPROM_93C56,
- EEPROM_BOOT_EFUSE,
-};
-
-
-
-#define RF_CHANGE_BY_INIT 0
-#define RF_CHANGE_BY_IPS BIT(28)
-#define RF_CHANGE_BY_PS BIT(29)
-#define RF_CHANGE_BY_HW BIT(30)
-#define RF_CHANGE_BY_SW BIT(31)
-
-enum hardware_type {
- HARDWARE_TYPE_RTL8180,
- HARDWARE_TYPE_RTL8185,
- HARDWARE_TYPE_RTL8187,
- HARDWARE_TYPE_RTL8188,
- HARDWARE_TYPE_RTL8190P,
- HARDWARE_TYPE_RTL8192E,
- HARDWARE_TYPE_RTL819xU,
- HARDWARE_TYPE_RTL8192SE,
- HARDWARE_TYPE_RTL8192SU,
- HARDWARE_TYPE_RTL8192CE,
- HARDWARE_TYPE_RTL8192CU,
- HARDWARE_TYPE_RTL8192DE,
- HARDWARE_TYPE_RTL8192DU,
- HARDWARE_TYPE_RTL8723AE,
- HARDWARE_TYPE_RTL8723AU,
- HARDWARE_TYPE_RTL8723AS,
- HARDWARE_TYPE_RTL8188EE,
- HARDWARE_TYPE_RTL8188EU,
- HARDWARE_TYPE_RTL8188ES,
- HARDWARE_TYPE_MAX,
-};
-
-#define GET_EEPROM_EFUSE_PRIV(adapter) (&adapter->eeprompriv)
-
-void rtw_hal_def_value_init23a(struct rtw_adapter *padapter);
-int pm_netdev_open23a(struct net_device *pnetdev, u8 bnormal);
-
-int rtl8723au_hal_init(struct rtw_adapter *padapter);
-int rtl8723au_hal_deinit(struct rtw_adapter *padapter);
-void rtw_hal_stop(struct rtw_adapter *padapter);
-
-void rtw_hal_update_ra_mask23a(struct sta_info *psta, u8 rssi_level);
-void rtw_hal_clone_data(struct rtw_adapter *dst_padapter, struct rtw_adapter *src_padapter);
-
-void hw_var_set_correct_tsf(struct rtw_adapter *padapter);
-void hw_var_set_mlme_disconnect(struct rtw_adapter *padapter);
-void hw_var_set_opmode(struct rtw_adapter *padapter, u8 mode);
-void hw_var_set_macaddr(struct rtw_adapter *padapter, u8 *val);
-void hw_var_set_bssid(struct rtw_adapter *padapter, u8 *val);
-void hw_var_set_mlme_join(struct rtw_adapter *padapter, u8 type);
-
-int GetHalDefVar8192CUsb(struct rtw_adapter *Adapter,
- enum hal_def_variable eVariable, void *pValue);
-
-#endif /* __HAL_INTF_H__ */
diff --git a/drivers/staging/rtl8723au/include/ieee80211.h b/drivers/staging/rtl8723au/include/ieee80211.h
deleted file mode 100644
index 634102e1bda6..000000000000
--- a/drivers/staging/rtl8723au/include/ieee80211.h
+++ /dev/null
@@ -1,341 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __IEEE80211_H
-#define __IEEE80211_H
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include "linux/ieee80211.h"
-#include "wifi.h"
-
-#include <linux/wireless.h>
-
-#if (WIRELESS_EXT < 22)
-#error "Obsolete pre 2007 wireless extensions are not supported"
-#endif
-
-
-#ifdef CONFIG_8723AU_AP_MODE
-
-/* STA flags */
-#define WLAN_STA_AUTH BIT(0)
-#define WLAN_STA_ASSOC BIT(1)
-#define WLAN_STA_PS BIT(2)
-#define WLAN_STA_TIM BIT(3)
-#define WLAN_STA_PERM BIT(4)
-#define WLAN_STA_AUTHORIZED BIT(5)
-#define WLAN_STA_PENDING_POLL BIT(6) /* pending activity poll not ACKed */
-#define WLAN_STA_SHORT_PREAMBLE BIT(7)
-#define WLAN_STA_PREAUTH BIT(8)
-#define WLAN_STA_WME BIT(9)
-#define WLAN_STA_MFP BIT(10)
-#define WLAN_STA_HT BIT(11)
-#define WLAN_STA_WPS BIT(12)
-#define WLAN_STA_MAYBE_WPS BIT(13)
-#define WLAN_STA_NONERP BIT(31)
-
-#endif
-
-#define WPA_CIPHER_NONE BIT(0)
-#define WPA_CIPHER_WEP40 BIT(1)
-#define WPA_CIPHER_WEP104 BIT(2)
-#define WPA_CIPHER_TKIP BIT(3)
-#define WPA_CIPHER_CCMP BIT(4)
-
-
-
-#define WPA_SELECTOR_LEN 4
-extern u8 RTW_WPA_OUI23A_TYPE[] ;
-extern u16 RTW_WPA_VERSION23A ;
-extern u8 WPA_AUTH_KEY_MGMT_NONE23A[];
-extern u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X23A[];
-extern u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X23A[];
-extern u8 WPA_CIPHER_SUITE_NONE23A[];
-extern u8 WPA_CIPHER_SUITE_WEP4023A[];
-extern u8 WPA_CIPHER_SUITE_TKIP23A[];
-extern u8 WPA_CIPHER_SUITE_WRAP23A[];
-extern u8 WPA_CIPHER_SUITE_CCMP23A[];
-extern u8 WPA_CIPHER_SUITE_WEP10423A[];
-
-
-#define RSN_HEADER_LEN 4
-#define RSN_SELECTOR_LEN 4
-
-extern u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X23A[];
-extern u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X23A[];
-extern u8 RSN_CIPHER_SUITE_NONE23A[];
-extern u8 RSN_CIPHER_SUITE_WEP4023A[];
-extern u8 RSN_CIPHER_SUITE_TKIP23A[];
-extern u8 RSN_CIPHER_SUITE_WRAP23A[];
-extern u8 RSN_CIPHER_SUITE_CCMP23A[];
-extern u8 RSN_CIPHER_SUITE_WEP10423A[];
-
-enum ratr_table_mode {
- RATR_INX_WIRELESS_NGB = 0, /* BGN 40 Mhz 2SS 1SS */
- RATR_INX_WIRELESS_NG = 1, /* GN or N */
- RATR_INX_WIRELESS_NB = 2, /* BGN 20 Mhz 2SS 1SS or BN */
- RATR_INX_WIRELESS_N = 3,
- RATR_INX_WIRELESS_GB = 4,
- RATR_INX_WIRELESS_G = 5,
- RATR_INX_WIRELESS_B = 6,
- RATR_INX_WIRELESS_MC = 7,
- RATR_INX_WIRELESS_AC_N = 8,
-};
-
-enum NETWORK_TYPE
-{
- WIRELESS_INVALID = 0,
- /* Sub-Element */
- /* tx: cck only , rx: cck only, hw: cck */
- WIRELESS_11B = BIT(0),
- /* tx: ofdm only, rx: ofdm & cck, hw: cck & ofdm */
- WIRELESS_11G = BIT(1),
- /* tx: ofdm only, rx: ofdm only, hw: ofdm only */
- WIRELESS_11A = BIT(2),
- /* tx: MCS only, rx: MCS & cck, hw: MCS & cck */
- WIRELESS_11_24N = BIT(3),
- /* tx: MCS only, rx: MCS & ofdm, hw: ofdm only */
- WIRELESS_11_5N = BIT(4),
- /* WIRELESS_AUTO = BIT(5), */
- WIRELESS_AC = BIT(6),
-
- /* Combination */
- /* tx: cck & ofdm, rx: cck & ofdm & MCS, hw: cck & ofdm */
- WIRELESS_11BG = WIRELESS_11B|WIRELESS_11G,
- /* tx: ofdm & MCS, rx: ofdm & cck & MCS, hw: cck & ofdm */
- WIRELESS_11G_24N = WIRELESS_11G | WIRELESS_11_24N,
- /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
- WIRELESS_11A_5N = WIRELESS_11A | WIRELESS_11_5N,
- /* tx: ofdm & cck & MCS, rx: ofdm & cck & MCS, hw: ofdm & cck */
- WIRELESS_11BG_24N = WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N,
- /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
- WIRELESS_11AGN = WIRELESS_11A | WIRELESS_11G | WIRELESS_11_24N |
- WIRELESS_11_5N,
- WIRELESS_11ABGN = WIRELESS_11A | WIRELESS_11B | WIRELESS_11G |
- WIRELESS_11_24N | WIRELESS_11_5N,
-};
-
-#define SUPPORTED_24G_NETTYPE_MSK (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N)
-#define SUPPORTED_5G_NETTYPE_MSK (WIRELESS_11A | WIRELESS_11_5N)
-
-#define IsSupported24G(NetType) (NetType & SUPPORTED_24G_NETTYPE_MSK ? true : false)
-#define IsSupported5G(NetType) (NetType & SUPPORTED_5G_NETTYPE_MSK ? true : false)
-
-#define IsEnableHWCCK(NetType) IsSupported24G(NetType)
-#define IsEnableHWOFDM(NetType) (NetType & (WIRELESS_11G|WIRELESS_11_24N|SUPPORTED_5G_NETTYPE_MSK) ? true : false)
-
-#define IsSupportedRxCCK(NetType) IsEnableHWCCK(NetType)
-#define IsSupportedRxOFDM(NetType) IsEnableHWOFDM(NetType)
-#define IsSupportedRxMCS(NetType) IsEnableHWOFDM(NetType)
-
-#define IsSupportedTxCCK(NetType) (NetType & (WIRELESS_11B) ? true : false)
-#define IsSupportedTxOFDM(NetType) (NetType & (WIRELESS_11G|WIRELESS_11A) ? true : false)
-#define IsSupportedTxMCS(NetType) (NetType & (WIRELESS_11_24N|WIRELESS_11_5N) ? true : false)
-
-
-#define MIN_FRAG_THRESHOLD 256U
-#define MAX_FRAG_THRESHOLD 2346U
-
-/* QoS,QOS */
-#define NORMAL_ACK 0
-#define NO_ACK 1
-#define NON_EXPLICIT_ACK 2
-#define BLOCK_ACK 3
-
-/* IEEE 802.11 defines */
-
-#define P80211_OUI_LEN 3
-
-struct ieee80211_snap_hdr {
- u8 dsap; /* always 0xAA */
- u8 ssap; /* always 0xAA */
- u8 ctrl; /* always 0x03 */
- u8 oui[P80211_OUI_LEN]; /* organizational universal id */
-} __attribute__ ((packed));
-
-
-#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
-
-#define WLAN_REASON_JOIN_WRONG_CHANNEL 65534
-#define WLAN_REASON_EXPIRATION_CHK 65535
-
-#define IEEE80211_CCK_RATE_LEN 4
-#define IEEE80211_NUM_OFDM_RATESLEN 8
-
-
-#define IEEE80211_CCK_RATE_1MB 0x02
-#define IEEE80211_CCK_RATE_2MB 0x04
-#define IEEE80211_CCK_RATE_5MB 0x0B
-#define IEEE80211_CCK_RATE_11MB 0x16
-#define IEEE80211_OFDM_RATE_LEN 8
-#define IEEE80211_OFDM_RATE_6MB 0x0C
-#define IEEE80211_OFDM_RATE_9MB 0x12
-#define IEEE80211_OFDM_RATE_12MB 0x18
-#define IEEE80211_OFDM_RATE_18MB 0x24
-#define IEEE80211_OFDM_RATE_24MB 0x30
-#define IEEE80211_OFDM_RATE_36MB 0x48
-#define IEEE80211_OFDM_RATE_48MB 0x60
-#define IEEE80211_OFDM_RATE_54MB 0x6C
-#define IEEE80211_BASIC_RATE_MASK 0x80
-
-#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
-#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
-#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
-#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
-#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
-#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
-#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
-#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
-#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
-#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
-#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
-#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
-
-#define IEEE80211_CCK_RATES_MASK 0x0000000F
-#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
- IEEE80211_CCK_RATE_2MB_MASK)
-#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \
- IEEE80211_CCK_RATE_5MB_MASK | \
- IEEE80211_CCK_RATE_11MB_MASK)
-
-#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
-#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
- IEEE80211_OFDM_RATE_12MB_MASK | \
- IEEE80211_OFDM_RATE_24MB_MASK)
-#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \
- IEEE80211_OFDM_RATE_9MB_MASK | \
- IEEE80211_OFDM_RATE_18MB_MASK | \
- IEEE80211_OFDM_RATE_36MB_MASK | \
- IEEE80211_OFDM_RATE_48MB_MASK | \
- IEEE80211_OFDM_RATE_54MB_MASK)
-#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
- IEEE80211_CCK_DEFAULT_RATES_MASK)
-
-#define IEEE80211_NUM_OFDM_RATES 8
-#define IEEE80211_NUM_CCK_RATES 4
-#define IEEE80211_OFDM_SHIFT_MASK_A 4
-
-#define WEP_KEYS 4
-
-
-/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
- * only use 8, and then use extended rates for the remaining supported
- * rates. Other APs, however, stick all of their supported rates on the
- * main rates information element... */
-#define MAX_RATES_LENGTH 12
-#define MAX_RATES_EX_LENGTH 16
-#define MAX_CHANNEL_NUMBER 161
-#define RTW_CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
-
-#define MAX_WPA_IE_LEN 256
-#define MAX_WPS_IE_LEN 256
-#define MAX_P2P_IE_LEN 256
-#define MAX_WFD_IE_LEN 128
-
-/*
-join_res:
--1: authentication fail
--2: association fail
-> 0: TID
-*/
-
-#define MAXTID 16
-
-#define WME_OUI_TYPE 2
-#define WME_OUI_SUBTYPE_INFORMATION_ELEMENT 0
-#define WME_OUI_SUBTYPE_PARAMETER_ELEMENT 1
-#define WME_OUI_SUBTYPE_TSPEC_ELEMENT 2
-#define WME_VERSION 1
-
-
-#define OUI_BROADCOM 0x00904c /* Broadcom (Epigram) */
-
-#define VENDOR_HT_CAPAB_OUI_TYPE 0x33 /* 00-90-4c:0x33 */
-
-/* Represent channel details, subset of ieee80211_channel */
-struct rtw_ieee80211_channel {
- /* enum nl80211_band band; */
- /* u16 center_freq; */
- u16 hw_value;
- u32 flags;
- /* int max_antenna_gain; */
- /* int max_power; */
- /* int max_reg_power; */
- /* bool beacon_found; */
- /* u32 orig_flags; */
- /* int orig_mag; */
- /* int orig_mpwr; */
-};
-
-#define CHAN_FMT \
- /*"band:%d, "*/ \
- /*"center_freq:%u, "*/ \
- "hw_value:%u, " \
- "flags:0x%08x" \
- /*"max_antenna_gain:%d\n"*/ \
- /*"max_power:%d\n"*/ \
- /*"max_reg_power:%d\n"*/ \
- /*"beacon_found:%u\n"*/ \
- /*"orig_flags:0x%08x\n"*/ \
- /*"orig_mag:%d\n"*/ \
- /*"orig_mpwr:%d\n"*/
-
-#define CHAN_ARG(channel) \
- /*(channel)->band*/ \
- /*, (channel)->center_freq*/ \
- (channel)->hw_value \
- , (channel)->flags \
- /*, (channel)->max_antenna_gain*/ \
- /*, (channel)->max_power*/ \
- /*, (channel)->max_reg_power*/ \
- /*, (channel)->beacon_found*/ \
- /*, (channel)->orig_flags*/ \
- /*, (channel)->orig_mag*/ \
- /*, (channel)->orig_mpwr*/ \
-
-u8 *rtw_set_ie23a(u8 *pbuf, int index, uint len, const u8 *source, uint *frlen);
-
-u8 hal_ch_offset_to_secondary_ch_offset23a(u8 ch_offset);
-u8 *rtw_set_ie23a_ch_switch(u8 *buf, u32 *buf_len, u8 ch_switch_mode, u8 new_ch, u8 ch_switch_cnt);
-u8 *rtw_set_ie23a_secondary_ch_offset(u8 *buf, u32 *buf_len, u8 secondary_ch_offset);
-
-u8 *rtw_get_ie23a(u8*pbuf, int index, int *len, int limit);
-u8 *rtw_get_ie23a_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen);
-int rtw_ies_remove_ie23a(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len);
-
-void rtw_set_supported_rate23a(u8 *SupportedRates, uint mode);
-
-int rtw_parse_wpa_ie23a(const u8* wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x);
-int rtw_parse_wpa2_ie23a(const u8* wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x);
-
-const u8 *rtw_get_wps_attr23a(const u8 *wps_ie, uint wps_ielen, u16 target_attr_id ,u8 *buf_attr, u32 *len_attr);
-const u8 *rtw_get_wps_attr_content23a(const u8 *wps_ie, uint wps_ielen, u16 target_attr_id ,u8 *buf_content);
-
-uint rtw_get_rateset_len23a(u8 *rateset);
-
-struct registry_priv;
-int rtw_generate_ie23a(struct registry_priv *pregistrypriv);
-
-
-int rtw_get_bit_value_from_ieee_value23a(u8 val);
-
-int rtw_check_network_type23a(unsigned char *rate, int ratelen, int channel);
-
-void rtw_get_bcn_info23a(struct wlan_network *pnetwork);
-
-u16 rtw_mcs_rate23a(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40,
- struct ieee80211_mcs_info *mcs);
-
-#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8723au/include/ioctl_cfg80211.h b/drivers/staging/rtl8723au/include/ioctl_cfg80211.h
deleted file mode 100644
index 3a4ead54f948..000000000000
--- a/drivers/staging/rtl8723au/include/ioctl_cfg80211.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __IOCTL_CFG80211_H__
-#define __IOCTL_CFG80211_H__
-
-struct rtw_wdev_priv {
- struct wireless_dev *rtw_wdev;
-
- struct rtw_adapter *padapter;
-
- struct cfg80211_scan_request *scan_request;
- spinlock_t scan_req_lock;
-
- struct net_device *pmon_ndev;/* for monitor interface */
- char ifname_mon[IFNAMSIZ + 1]; /* name for monitor interface */
-
- u8 p2p_enabled;
-
- bool power_mgmt;
-};
-
-#define wdev_to_priv(w) ((struct rtw_wdev_priv *)(wdev_priv(w)))
-
-#define wiphy_to_adapter(x) \
- (struct rtw_adapter *)(((struct rtw_wdev_priv *) \
- wiphy_priv(x))->padapter)
-
-#define wiphy_to_wdev(x) \
- (struct wireless_dev *)(((struct rtw_wdev_priv *) \
- wiphy_priv(x))->rtw_wdev)
-
-int rtw_wdev_alloc(struct rtw_adapter *padapter, struct device *dev);
-void rtw_wdev_free(struct wireless_dev *wdev);
-void rtw_wdev_unregister(struct wireless_dev *wdev);
-
-void rtw_cfg80211_init_wiphy(struct rtw_adapter *padapter);
-
-void rtw_cfg80211_surveydone_event_callback(struct rtw_adapter *padapter);
-
-void rtw_cfg80211_indicate_connect(struct rtw_adapter *padapter);
-void rtw_cfg80211_indicate_disconnect(struct rtw_adapter *padapter);
-void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
- bool aborted);
-
-#ifdef CONFIG_8723AU_AP_MODE
-void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter,
- u8 *pmgmt_frame, uint frame_len);
-void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter,
- unsigned char *da, unsigned short reason);
-#endif /* CONFIG_8723AU_AP_MODE */
-
-bool rtw_cfg80211_pwr_mgmt(struct rtw_adapter *adapter);
-
-#endif /* __IOCTL_CFG80211_H__ */
diff --git a/drivers/staging/rtl8723au/include/mlme_osdep.h b/drivers/staging/rtl8723au/include/mlme_osdep.h
deleted file mode 100644
index 4bb5525b7a68..000000000000
--- a/drivers/staging/rtl8723au/include/mlme_osdep.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __MLME_OSDEP_H_
-#define __MLME_OSDEP_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-void rtw_os_indicate_disconnect23a(struct rtw_adapter *adapter);
-void rtw_reset_securitypriv23a(struct rtw_adapter *adapter);
-
-#endif /* _MLME_OSDEP_H_ */
diff --git a/drivers/staging/rtl8723au/include/odm.h b/drivers/staging/rtl8723au/include/odm.h
deleted file mode 100644
index 24f2f28c473f..000000000000
--- a/drivers/staging/rtl8723au/include/odm.h
+++ /dev/null
@@ -1,860 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-
-#ifndef __HALDMOUTSRC_H__
-#define __HALDMOUTSRC_H__
-
-/* */
-/* Definition */
-/* */
-/* */
-/* 2011/09/22 MH Define all team supprt ability. */
-/* */
-
-/* */
-/* 2011/09/22 MH Define for all teams. Please Define the constan in your precomp header. */
-/* */
-/* define DM_ODM_SUPPORT_AP 0 */
-/* define DM_ODM_SUPPORT_ADSL 0 */
-/* define DM_ODM_SUPPORT_CE 0 */
-/* define DM_ODM_SUPPORT_MP 1 */
-
-#define TP_MODE 0
-#define RSSI_MODE 1
-#define TRAFFIC_LOW 0
-#define TRAFFIC_HIGH 1
-
-
-/* */
-/* 3 Tx Power Tracking */
-/* 3============================================================ */
-#define DPK_DELTA_MAPPING_NUM 13
-#define index_mapping_HP_NUM 15
-
-
-/* */
-/* 3 PSD Handler */
-/* 3============================================================ */
-
-#define AFH_PSD 1 /* 0:normal PSD scan, 1: only do 20 pts PSD */
-#define MODE_40M 0 /* 0:20M, 1:40M */
-#define PSD_TH2 3
-#define PSD_CHMIN 20 /* Minimum channel number for BT AFH */
-#define SIR_STEP_SIZE 3
-#define Smooth_Size_1 5
-#define Smooth_TH_1 3
-#define Smooth_Size_2 10
-#define Smooth_TH_2 4
-#define Smooth_Size_3 20
-#define Smooth_TH_3 4
-#define Smooth_Step_Size 5
-#define Adaptive_SIR 1
-#define PSD_RESCAN 4
-#define PSD_SCAN_INTERVAL 700 /* ms */
-
-/* 8723A High Power IGI Setting */
-#define DM_DIG_HIGH_PWR_IGI_LOWER_BOUND 0x22
-#define DM_DIG_Gmode_HIGH_PWR_IGI_LOWER_BOUND 0x28
-#define DM_DIG_HIGH_PWR_THRESHOLD 0x3a
-
-/* LPS define */
-#define DM_DIG_FA_TH0_LPS 4 /* 4 in lps */
-#define DM_DIG_FA_TH1_LPS 15 /* 15 lps */
-#define DM_DIG_FA_TH2_LPS 30 /* 30 lps */
-#define RSSI_OFFSET_DIG 0x05;
-
-/* ANT Test */
-#define ANTTESTALL 0x00 /* Ant A or B will be Testing */
-#define ANTTESTA 0x01 /* Ant A will be Testing */
-#define ANTTESTB 0x02 /* Ant B will be testing */
-
-
-/* */
-/* structure and define */
-/* */
-
-struct dig_t {
- u8 Dig_Enable_Flag;
- u8 Dig_Ext_Port_Stage;
-
- int RssiLowThresh;
- int RssiHighThresh;
-
- u32 FALowThresh;
- u32 FAHighThresh;
-
- u8 CurSTAConnectState;
- u8 PreSTAConnectState;
- u8 CurMultiSTAConnectState;
-
- u8 PreIGValue;
- u8 CurIGValue;
- u8 BackupIGValue;
-
- s8 BackoffVal;
- s8 BackoffVal_range_max;
- s8 BackoffVal_range_min;
- u8 rx_gain_range_max;
- u8 rx_gain_range_min;
- u8 Rssi_val_min;
-
- u8 PreCCK_CCAThres;
- u8 CurCCK_CCAThres;
- u8 PreCCKPDState;
- u8 CurCCKPDState;
-
- u8 LargeFAHit;
- u8 ForbiddenIGI;
- u32 Recover_cnt;
-
- u8 DIG_Dynamic_MIN_0;
- u8 DIG_Dynamic_MIN_1;
- bool bMediaConnect_0;
- bool bMediaConnect_1;
-
- u32 RSSI_max;
-};
-
-struct dynamic_pwr_sav {
- u8 PreCCAState;
- u8 CurCCAState;
-
- u8 PreRFState;
- u8 CurRFState;
-
- int Rssi_val_min;
-
- u8 initialize;
- u32 Reg874, RegC70, Reg85C, RegA74;
-};
-
-struct false_alarm_stats {
- u32 Cnt_Parity_Fail;
- u32 Cnt_Rate_Illegal;
- u32 Cnt_Crc8_fail;
- u32 Cnt_Mcs_fail;
- u32 Cnt_Ofdm_fail;
- u32 Cnt_Cck_fail;
- u32 Cnt_all;
- u32 Cnt_Fast_Fsync;
- u32 Cnt_SB_Search_fail;
- u32 Cnt_OFDM_CCA;
- u32 Cnt_CCK_CCA;
- u32 Cnt_CCA_all;
- u32 Cnt_BW_USC; /* Gary */
- u32 Cnt_BW_LSC; /* Gary */
-};
-
-#define ASSOCIATE_ENTRY_NUM 32 /* Max size of AsocEntry[]. */
-#define ODM_ASSOCIATE_ENTRY_NUM ASSOCIATE_ENTRY_NUM
-
-/* This indicates two different the steps. */
-/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to the signal on the air. */
-/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in SWAW_STEP_PEAK */
-/* with original RSSI to determine if it is necessary to switch antenna. */
-#define SWAW_STEP_PEAK 0
-#define SWAW_STEP_DETERMINE 1
-
-#define TP_MODE 0
-#define RSSI_MODE 1
-#define TRAFFIC_LOW 0
-#define TRAFFIC_HIGH 1
-
-struct sw_ant_sw {
- u8 try_flag;
- s32 PreRSSI;
- u8 CurAntenna;
- u8 PreAntenna;
- u8 RSSI_Trying;
- u8 TestMode;
- u8 bTriggerAntennaSwitch;
- u8 SelectAntennaMap;
- u8 RSSI_target;
-
- /* Before link Antenna Switch check */
- u8 SWAS_NoLink_State;
- u32 SWAS_NoLink_BK_Reg860;
- bool ANTA_ON; /* To indicate Ant A is or not */
- bool ANTB_ON; /* To indicate Ant B is on or not */
-
- s32 RSSI_sum_A;
- s32 RSSI_sum_B;
- s32 RSSI_cnt_A;
- s32 RSSI_cnt_B;
-
- u64 lastTxOkCnt;
- u64 lastRxOkCnt;
- u64 TXByteCnt_A;
- u64 TXByteCnt_B;
- u64 RXByteCnt_A;
- u64 RXByteCnt_B;
- u8 TrafficLoad;
-};
-
-struct edca_turbo {
- bool bCurrentTurboEDCA;
- u32 prv_traffic_idx; /* edca turbo */
-};
-
-struct odm_rate_adapt {
- u8 Type; /* DM_Type_ByFW/DM_Type_ByDriver */
- u8 HighRSSIThresh; /* if RSSI > HighRSSIThresh => RATRState is DM_RATR_STA_HIGH */
- u8 LowRSSIThresh; /* if RSSI <= LowRSSIThresh => RATRState is DM_RATR_STA_LOW */
- u8 RATRState; /* Current RSSI level, DM_RATR_STA_HIGH/DM_RATR_STA_MIDDLE/DM_RATR_STA_LOW */
- u32 LastRATR; /* RATR Register Content */
-};
-
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM_MAX 10
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-
-#define AVG_THERMAL_NUM 8
-#define IQK_Matrix_REG_NUM 8
-#define IQK_Matrix_Settings_NUM 1+24+21
-
-#define DM_Type_ByFW 0
-#define DM_Type_ByDriver 1
-
-/* Declare for common info */
-
-struct odm_phy_dbg_info {
- /* ODM Write,debug info */
- s8 RxSNRdB[RF_PATH_MAX];
- u64 NumQryPhyStatus;
- u64 NumQryPhyStatusCCK;
- u64 NumQryPhyStatusOFDM;
- /* Others */
- s32 RxEVM[RF_PATH_MAX];
-
-};
-
-struct odm_packet_info {
- u8 Rate;
- u8 StationID;
- bool bPacketMatchBSSID;
- bool bPacketToSelf;
- bool bPacketBeacon;
-};
-
-
-enum {
- /* BB Team */
- ODM_DIG = 0x00000001,
- ODM_HIGH_POWER = 0x00000002,
- ODM_CCK_CCA_TH = 0x00000004,
- ODM_FA_STATISTICS = 0x00000008,
- ODM_RAMASK = 0x00000010,
- ODM_RSSI_MONITOR = 0x00000020,
- ODM_SW_ANTDIV = 0x00000040,
- ODM_HW_ANTDIV = 0x00000080,
- ODM_BB_PWRSV = 0x00000100,
- ODM_2TPATHDIV = 0x00000200,
- ODM_1TPATHDIV = 0x00000400,
- ODM_PSD2AFH = 0x00000800
-};
-
-/* */
-/* 2011/10/20 MH Define Common info enum for all team. */
-/* */
-
-enum odm_cmninfo {
- /* Fixed value: */
- /* */
-
- ODM_CMNINFO_MP_TEST_CHIP = 2,
- ODM_CMNINFO_IC_TYPE, /* enum odm_ic_type_def */
- ODM_CMNINFO_CUT_VER, /* enum odm_cut_version */
- ODM_CMNINFO_FAB_VER, /* enum odm_fab_version */
- ODM_CMNINFO_BOARD_TYPE, /* enum odm_board_type */
- ODM_CMNINFO_EXT_LNA, /* true */
- ODM_CMNINFO_EXT_PA,
- ODM_CMNINFO_EXT_TRSW,
- ODM_CMNINFO_BINHCT_TEST,
- ODM_CMNINFO_BWIFI_TEST,
- ODM_CMNINFO_SMART_CONCURRENT,
-
-
- /* */
- /* Dynamic value: */
- /* */
- ODM_CMNINFO_MP_MODE,
-
- ODM_CMNINFO_WIFI_DIRECT,
- ODM_CMNINFO_WIFI_DISPLAY,
- ODM_CMNINFO_LINK,
- ODM_CMNINFO_RSSI_MIN,
- ODM_CMNINFO_DBG_COMP, /* u64 */
- ODM_CMNINFO_DBG_LEVEL, /* u32 */
- ODM_CMNINFO_RA_THRESHOLD_HIGH, /* u8 */
- ODM_CMNINFO_RA_THRESHOLD_LOW, /* u8 */
- ODM_CMNINFO_RF_ANTENNA_TYPE, /* u8 */
- ODM_CMNINFO_BT_DISABLED,
- ODM_CMNINFO_BT_OPERATION,
- ODM_CMNINFO_BT_DIG,
- ODM_CMNINFO_BT_BUSY, /* Check Bt is using or not */
- ODM_CMNINFO_BT_DISABLE_EDCA,
-
- /* */
- /* Dynamic ptr array hook itms. */
- /* */
- ODM_CMNINFO_STA_STATUS,
- ODM_CMNINFO_PHY_STATUS,
- ODM_CMNINFO_MAC_STATUS,
-
- ODM_CMNINFO_MAX,
-};
-
-/* Define ODM support ability. ODM_CMNINFO_ABILITY */
-enum {
- /* BB ODM section BIT 0-15 */
- ODM_BB_ANT_DIV = BIT(6),
-};
-
-/* ODM_CMNINFO_INTERFACE */
-enum odm_interface_def {
- ODM_ITRF_PCIE = 0x1,
- ODM_ITRF_USB = 0x2,
- ODM_ITRF_SDIO = 0x4,
- ODM_ITRF_ALL = 0x7,
-};
-
-/* ODM_CMNINFO_IC_TYPE */
-enum odm_ic_type_def {
- ODM_RTL8192S = BIT(0),
- ODM_RTL8192C = BIT(1),
- ODM_RTL8192D = BIT(2),
- ODM_RTL8723A = BIT(3),
- ODM_RTL8188E = BIT(4),
- ODM_RTL8812 = BIT(5),
- ODM_RTL8821 = BIT(6),
-};
-
-/* ODM_CMNINFO_CUT_VER */
-enum odm_cut_version {
- ODM_CUT_A = 1,
- ODM_CUT_B = 2,
- ODM_CUT_C = 3,
- ODM_CUT_D = 4,
- ODM_CUT_E = 5,
- ODM_CUT_F = 6,
- ODM_CUT_TEST = 7,
-};
-
-/* ODM_CMNINFO_FAB_VER */
-enum odm_fab_version {
- ODM_TSMC = 0,
- ODM_UMC = 1,
-};
-
-/* For example 1T2R (A+AB = BIT0|BIT4|BIT5) */
-enum rf_path_def {
- ODM_RF_TX_A = BIT(0),
- ODM_RF_TX_B = BIT(1),
- ODM_RF_TX_C = BIT(2),
- ODM_RF_TX_D = BIT(3),
- ODM_RF_RX_A = BIT(4),
- ODM_RF_RX_B = BIT(5),
- ODM_RF_RX_C = BIT(6),
- ODM_RF_RX_D = BIT(7),
-};
-
-/* ODM Dynamic common info value definition */
-
-enum odm_mac_phy_mode {
- ODM_SMSP = 0,
- ODM_DMSP = 1,
- ODM_DMDP = 2,
-};
-
-
-enum odm_bt_coexist {
- ODM_BT_BUSY = 1,
- ODM_BT_ON = 2,
- ODM_BT_OFF = 3,
- ODM_BT_NONE = 4,
-};
-
-/* ODM_CMNINFO_OP_MODE */
-enum odm_operation_mode {
- ODM_NO_LINK = BIT(0),
- ODM_LINK = BIT(1),
- ODM_SCAN = BIT(2),
- ODM_POWERSAVE = BIT(3),
- ODM_AP_MODE = BIT(4),
- ODM_CLIENT_MODE = BIT(5),
- ODM_AD_HOC = BIT(6),
- ODM_WIFI_DIRECT = BIT(7),
- ODM_WIFI_DISPLAY = BIT(8),
-};
-
-/* ODM_CMNINFO_WM_MODE */
-enum odm_wireless_mode {
- ODM_WM_UNKNOW = 0x0,
- ODM_WM_B = BIT(0),
- ODM_WM_G = BIT(1),
- ODM_WM_A = BIT(2),
- ODM_WM_N24G = BIT(3),
- ODM_WM_N5G = BIT(4),
- ODM_WM_AUTO = BIT(5),
- ODM_WM_AC = BIT(6),
-};
-
-/* ODM_CMNINFO_BAND */
-enum odm_band_type {
- ODM_BAND_2_4G = BIT(0),
- ODM_BAND_5G = BIT(1),
-
-};
-
-/* ODM_CMNINFO_SEC_CHNL_OFFSET */
-enum odm_sec_chnl_offset {
- ODM_DONT_CARE = 0,
- ODM_BELOW = 1,
- ODM_ABOVE = 2
-};
-
-/* ODM_CMNINFO_CHNL */
-
-/* ODM_CMNINFO_BOARD_TYPE */
-enum odm_board_type {
- ODM_BOARD_NORMAL = 0,
- ODM_BOARD_HIGHPWR = 1,
- ODM_BOARD_MINICARD = 2,
- ODM_BOARD_SLIM = 3,
- ODM_BOARD_COMBO = 4,
-
-};
-
-/* ODM_CMNINFO_ONE_PATH_CCA */
-enum odm_cca_path {
- ODM_CCA_2R = 0,
- ODM_CCA_1R_A = 1,
- ODM_CCA_1R_B = 2,
-};
-
-struct iqk_matrix_regs_set {
- bool bIQKDone;
- s32 Value[1][IQK_Matrix_REG_NUM];
-};
-
-struct odm_rf_cal_t {
- /* for tx power tracking */
-
- u32 RegA24; /* for TempCCK */
- s32 RegE94;
- s32 RegE9C;
- s32 RegEB4;
- s32 RegEBC;
-
- /* u8 bTXPowerTracking; */
- u8 TXPowercount;
- bool bTXPowerTrackingInit;
- bool bTXPowerTracking;
- u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking as default */
- u8 TM_Trigger;
- u8 InternalPA5G[2]; /* pathA / pathB */
-
- u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
- u8 ThermalValue;
- u8 ThermalValue_LCK;
- u8 ThermalValue_IQK;
- u8 ThermalValue_DPK;
- u8 ThermalValue_AVG[AVG_THERMAL_NUM];
- u8 ThermalValue_AVG_index;
- u8 ThermalValue_RxGain;
- u8 ThermalValue_Crystal;
- u8 ThermalValue_DPKstore;
- u8 ThermalValue_DPKtrack;
- bool TxPowerTrackingInProgress;
- bool bDPKenable;
-
- bool bReloadtxpowerindex;
- u8 bRfPiEnable;
- u32 TXPowerTrackingCallbackCnt; /* cosa add for debug */
-
- u8 bCCKinCH14;
- u8 CCK_index;
- u8 OFDM_index[2];
- bool bDoneTxpower;
-
- u8 ThermalValue_HP[HP_THERMAL_NUM];
- u8 ThermalValue_HP_index;
- struct iqk_matrix_regs_set IQKMatrixRegSetting[IQK_Matrix_Settings_NUM];
-
- u8 Delta_IQK;
- u8 Delta_LCK;
-
- /* for IQK */
- u32 RegC04;
- u32 Reg874;
- u32 RegC08;
- u32 RegB68;
- u32 RegB6C;
- u32 Reg870;
- u32 Reg860;
- u32 Reg864;
-
- bool bIQKInitialized;
- bool bLCKInProgress;
- bool bAntennaDetected;
- u32 ADDA_backup[IQK_ADDA_REG_NUM];
- u32 IQK_MAC_backup[IQK_MAC_REG_NUM];
- u32 IQK_BB_backup_recover[9];
- u32 IQK_BB_backup[IQK_BB_REG_NUM];
-
- /* for APK */
- u32 APKoutput[2][2]; /* path A/B; output1_1a/output1_2a */
- u8 bAPKdone;
- u8 bAPKThermalMeterIgnore;
- u8 bDPdone;
- u8 bDPPathAOK;
- u8 bDPPathBOK;
-};
-
-enum ant_dif_type {
- NO_ANTDIV = 0xFF,
- CG_TRX_HW_ANTDIV = 0x01,
- CGCS_RX_HW_ANTDIV = 0x02,
- FIXED_HW_ANTDIV = 0x03,
- CG_TRX_SMART_ANTDIV = 0x04,
- CGCS_RX_SW_ANTDIV = 0x05,
-};
-
-/* 2011/09/22 MH Copy from SD4 defined structure. We use to support PHY DM integration. */
-struct dm_odm_t {
- /* */
- /* Add for different team use temporarily */
- /* */
- struct rtw_adapter *Adapter; /* For CE/NIC team */
-
- u64 DebugComponents;
- u32 DebugLevel;
-
-/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
- bool bCckHighPower;
- u8 RFPathRxEnable; /* ODM_CMNINFO_RFPATH_ENABLE */
-/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
-
-/* 1 COMMON INFORMATION */
-
- /* Init Value */
-/* HOOK BEFORE REG INIT----------- */
- /* ODM Support Ability DIG/RATR/TX_PWR_TRACK/ ¡K¡K = 1/2/3/¡K */
- u32 SupportAbility;
- /* ODM composite or independent. Bit oriented/ 92C+92D+ .... or any other type = 1/2/3/... */
- u32 SupportICType;
- /* Cut Version TestChip/A-cut/B-cut... = 0/1/2/3/... */
- u8 CutVersion;
- /* Fab Version TSMC/UMC = 0/1 */
- u8 FabVersion;
- /* Board Type Normal/HighPower/MiniCard/SLIM/Combo/... = 0/1/2/3/4/... */
- u8 BoardType;
- /* with external LNA NO/Yes = 0/1 */
- u8 ExtLNA;
- /* with external PA NO/Yes = 0/1 */
- u8 ExtPA;
- /* with external TRSW NO/Yes = 0/1 */
- u8 ExtTRSW;
- bool bInHctTest;
- bool bWIFITest;
-
- bool bDualMacSmartConcurrent;
- u32 BK_SupportAbility;
-/* HOOK BEFORE REG INIT----------- */
-
- /* */
- /* Dynamic Value */
- /* */
-/* POINTER REFERENCE----------- */
-
- u8 u8_temp;
- bool bool_temp;
- struct rtw_adapter *PADAPTER_temp;
-
-/* POINTER REFERENCE----------- */
- /* */
-/* CALL BY VALUE------------- */
- bool bWIFI_Direct;
- bool bWIFI_Display;
- bool bLinked;
- u8 RSSI_Min;
- u8 InterfaceIndex; /* Add for 92D dual MAC: 0--Mac0 1--Mac1 */
- bool bIsMPChip;
- bool bOneEntryOnly;
- /* Common info for BTDM */
- bool bBtDisabled; /* BT is disabled */
- bool bBtHsOperation; /* BT HS mode is under progress */
- u8 btHsDigVal; /* use BT rssi to decide the DIG value */
- bool bBtDisableEdcaTurbo; /* Under some condition, don't enable the EDCA Turbo */
- bool bBtBusy; /* BT is busy. */
-/* CALL BY VALUE------------- */
-
- /* 2 Define STA info. */
- /* _ODM_STA_INFO */
- /* 2012/01/12 MH For MP, we need to reduce one array pointer for default port.?? */
- struct sta_info * pODM_StaInfo[ODM_ASSOCIATE_ENTRY_NUM];
-
- /* Latest packet phy info (ODM write) */
- struct odm_phy_dbg_info PhyDbgInfo;
- /* PHY_INFO_88E PhyInfo; */
-
- /* Latest packet phy info (ODM write) */
- /* MAC_INFO_88E MacInfo; */
-
- /* Different Team independt structure?? */
-
- /* */
- /* TX_RTP_CMN TX_retrpo; */
- /* TX_RTP_88E TX_retrpo; */
- /* TX_RTP_8195 TX_retrpo; */
-
- /* */
- /* ODM Structure */
- /* */
- struct dig_t DM_DigTable;
- struct dynamic_pwr_sav DM_PSTable;
- struct false_alarm_stats FalseAlmCnt;
- struct false_alarm_stats FlaseAlmCntBuddyAdapter;
- struct sw_ant_sw DM_SWAT_Table;
-
- struct edca_turbo DM_EDCA_Table;
- u32 WMMEDCA_BE;
- /* Copy from SD4 structure */
- /* */
- /* ================================================== */
- /* */
-
- /* PSD */
- u8 RSSI_BT; /* come from BT */
- struct odm_rate_adapt RateAdaptive;
-
-
- struct odm_rf_cal_t RFCalibrateInfo;
-}; /* DM_Dynamic_Mechanism_Structure */
-
-enum odm_rf_content {
- odm_radioa_txt = 0x1000,
- odm_radiob_txt = 0x1001,
- odm_radioc_txt = 0x1002,
- odm_radiod_txt = 0x1003
-};
-
-/* Status code */
-enum rt_status {
- RT_STATUS_SUCCESS,
- RT_STATUS_FAILURE,
- RT_STATUS_PENDING,
- RT_STATUS_RESOURCE,
- RT_STATUS_INVALID_CONTEXT,
- RT_STATUS_INVALID_PARAMETER,
- RT_STATUS_NOT_SUPPORT,
- RT_STATUS_OS_API_FAILED,
-};
-
-/* include "odm_function.h" */
-
-/* 3=========================================================== */
-/* 3 DIG */
-/* 3=========================================================== */
-
-enum dm_dig_op {
- DIG_TYPE_THRESH_HIGH = 0,
- DIG_TYPE_THRESH_LOW = 1,
- DIG_TYPE_BACKOFF = 2,
- DIG_TYPE_RX_GAIN_MIN = 3,
- DIG_TYPE_RX_GAIN_MAX = 4,
- DIG_TYPE_ENABLE = 5,
- DIG_TYPE_DISABLE = 6,
- DIG_OP_TYPE_MAX
-};
-
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_SCAN_RSSI_TH 0x14 /* scan return issue for LC */
-
-
-#define DM_FALSEALARM_THRESH_LOW 400
-#define DM_FALSEALARM_THRESH_HIGH 1000
-
-#define DM_DIG_MAX_NIC 0x4e
-#define DM_DIG_MIN_NIC 0x1e
-
-#define DM_DIG_MAX_AP 0x32
-#define DM_DIG_MIN_AP 0x20
-
-#define DM_DIG_MAX_NIC_HP 0x46
-#define DM_DIG_MIN_NIC_HP 0x2e
-
-#define DM_DIG_MAX_AP_HP 0x42
-#define DM_DIG_MIN_AP_HP 0x30
-
-/* vivi 92c&92d has different definition, 20110504 */
-/* this is for 92c */
-#define DM_DIG_FA_TH0 0x200
-#define DM_DIG_FA_TH1 0x300
-#define DM_DIG_FA_TH2 0x400
-/* this is for 92d */
-#define DM_DIG_FA_TH0_92D 0x100
-#define DM_DIG_FA_TH1_92D 0x400
-#define DM_DIG_FA_TH2_92D 0x600
-
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
-/* 3=========================================================== */
-/* 3 AGC RX High Power Mode */
-/* 3=========================================================== */
-#define LNA_Low_Gain_1 0x64
-#define LNA_Low_Gain_2 0x5A
-#define LNA_Low_Gain_3 0x58
-
-#define FA_RXHP_TH1 5000
-#define FA_RXHP_TH2 1500
-#define FA_RXHP_TH3 800
-#define FA_RXHP_TH4 600
-#define FA_RXHP_TH5 500
-
-/* 3=========================================================== */
-/* 3 EDCA */
-/* 3=========================================================== */
-
-/* 3=========================================================== */
-/* 3 Dynamic Tx Power */
-/* 3=========================================================== */
-/* Dynamic Tx Power Control Threshold */
-#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
-#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
-#define TX_POWER_NEAR_FIELD_THRESH_AP 0x3F
-
-#define TxHighPwrLevel_Normal 0
-#define TxHighPwrLevel_Level1 1
-#define TxHighPwrLevel_Level2 2
-#define TxHighPwrLevel_BT1 3
-#define TxHighPwrLevel_BT2 4
-#define TxHighPwrLevel_15 5
-#define TxHighPwrLevel_35 6
-#define TxHighPwrLevel_50 7
-#define TxHighPwrLevel_70 8
-#define TxHighPwrLevel_100 9
-
-/* 3=========================================================== */
-/* 3 Rate Adaptive */
-/* 3=========================================================== */
-#define DM_RATR_STA_INIT 0
-#define DM_RATR_STA_HIGH 1
-#define DM_RATR_STA_MIDDLE 2
-#define DM_RATR_STA_LOW 3
-
-/* 3=========================================================== */
-/* 3 BB Power Save */
-/* 3=========================================================== */
-
-
-enum dm_1r_cca {
- CCA_1R =0,
- CCA_2R = 1,
- CCA_MAX = 2,
-};
-
-enum dm_rf_def {
- RF_Save =0,
- RF_Normal = 1,
- RF_MAX = 2,
-};
-
-/* 3=========================================================== */
-/* 3 Antenna Diversity */
-/* 3=========================================================== */
-enum dm_swas {
- Antenna_A = 1,
- Antenna_B = 2,
- Antenna_MAX = 3,
-};
-
-/* Maximal number of antenna detection mechanism needs to perform, added by Roger, 2011.12.28. */
-#define MAX_ANTENNA_DETECTION_CNT 10
-
-/* */
-/* Extern Global Variables. */
-/* */
-#define OFDM_TABLE_SIZE_92C 37
-#define OFDM_TABLE_SIZE_92D 43
-#define CCK_TABLE_SIZE 33
-
-extern u32 OFDMSwingTable23A[OFDM_TABLE_SIZE_92D];
-extern u8 CCKSwingTable_Ch1_Ch1323A[CCK_TABLE_SIZE][8];
-extern u8 CCKSwingTable_Ch1423A [CCK_TABLE_SIZE][8];
-
-
-
-/* 20100514 Joseph: Add definition for antenna switching test after link. */
-/* This indicates two different the steps. */
-/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to the signal on the air. */
-/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in SWAW_STEP_PEAK */
-/* with original RSSI to determine if it is necessary to switch antenna. */
-#define SWAW_STEP_PEAK 0
-#define SWAW_STEP_DETERMINE 1
-
-struct hal_data_8723a;
-
-void ODM_Write_DIG23a(struct dm_odm_t *pDM_Odm, u8 CurrentIGI);
-void ODM_Write_CCK_CCA_Thres23a(struct dm_odm_t *pDM_Odm, u8 CurCCK_CCAThres);
-
-void ODM_SetAntenna(struct dm_odm_t *pDM_Odm, u8 Antenna);
-
-
-#define dm_RF_Saving ODM_RF_Saving23a
-void ODM_RF_Saving23a(struct dm_odm_t *pDM_Odm, u8 bForceInNormal);
-
-#define dm_CheckTXPowerTracking ODM_TXPowerTrackingCheck23a
-void ODM_TXPowerTrackingCheck23a(struct dm_odm_t *pDM_Odm);
-
-bool ODM_RAStateCheck23a(struct dm_odm_t *pDM_Odm, s32 RSSI, bool bForceUpdate,
- u8 *pRATRState);
-
-
-u32 ConvertTo_dB23a(u32 Value);
-
-u32 GetPSDData(struct dm_odm_t *pDM_Odm, unsigned int point, u8 initial_gain_psd);
-
-void odm_DIG23abyRSSI_LPS(struct dm_odm_t *pDM_Odm);
-
-u32 ODM_Get_Rate_Bitmap23a(struct hal_data_8723a *pHalData, u32 macid, u32 ra_mask, u8 rssi_level);
-
-
-void ODM23a_DMInit(struct dm_odm_t *pDM_Odm);
-
-void ODM_DMWatchdog23a(struct rtw_adapter *adapter);
-
-void ODM_CmnInfoInit23a(struct dm_odm_t *pDM_Odm, enum odm_cmninfo CmnInfo, u32 Value);
-
-void ODM_CmnInfoPtrArrayHook23a(struct dm_odm_t *pDM_Odm, enum odm_cmninfo CmnInfo, u16 Index, void *pValue);
-
-void ODM_CmnInfoUpdate23a(struct dm_odm_t *pDM_Odm, u32 CmnInfo, u64 Value);
-
-void ODM_ResetIQKResult(struct dm_odm_t *pDM_Odm);
-
-void ODM_AntselStatistics_88C(struct dm_odm_t *pDM_Odm, u8 MacId, u32 PWDBAll, bool isCCKrate);
-
-void ODM_SingleDualAntennaDefaultSetting(struct dm_odm_t *pDM_Odm);
-
-bool ODM_SingleDualAntennaDetection(struct dm_odm_t *pDM_Odm, u8 mode);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/odm_HWConfig.h b/drivers/staging/rtl8723au/include/odm_HWConfig.h
deleted file mode 100644
index c748d5fb47fa..000000000000
--- a/drivers/staging/rtl8723au/include/odm_HWConfig.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-
-#ifndef __HALHWOUTSRC_H__
-#define __HALHWOUTSRC_H__
-
-#include <Hal8723APhyCfg.h>
-
-/* */
-/* Definition */
-/* */
-/* */
-/* */
-/* CCK Rates, TxHT = 0 */
-#define DESC92C_RATE1M 0x00
-#define DESC92C_RATE2M 0x01
-#define DESC92C_RATE5_5M 0x02
-#define DESC92C_RATE11M 0x03
-
-/* OFDM Rates, TxHT = 0 */
-#define DESC92C_RATE6M 0x04
-#define DESC92C_RATE9M 0x05
-#define DESC92C_RATE12M 0x06
-#define DESC92C_RATE18M 0x07
-#define DESC92C_RATE24M 0x08
-#define DESC92C_RATE36M 0x09
-#define DESC92C_RATE48M 0x0a
-#define DESC92C_RATE54M 0x0b
-
-/* MCS Rates, TxHT = 1 */
-#define DESC92C_RATEMCS0 0x0c
-#define DESC92C_RATEMCS1 0x0d
-#define DESC92C_RATEMCS2 0x0e
-#define DESC92C_RATEMCS3 0x0f
-#define DESC92C_RATEMCS4 0x10
-#define DESC92C_RATEMCS5 0x11
-#define DESC92C_RATEMCS6 0x12
-#define DESC92C_RATEMCS7 0x13
-#define DESC92C_RATEMCS8 0x14
-#define DESC92C_RATEMCS9 0x15
-#define DESC92C_RATEMCS10 0x16
-#define DESC92C_RATEMCS11 0x17
-#define DESC92C_RATEMCS12 0x18
-#define DESC92C_RATEMCS13 0x19
-#define DESC92C_RATEMCS14 0x1a
-#define DESC92C_RATEMCS15 0x1b
-#define DESC92C_RATEMCS15_SG 0x1c
-#define DESC92C_RATEMCS32 0x20
-
-
-/* */
-/* structure and define */
-/* */
-
-struct phy_rx_agc_info {
- #ifdef __LITTLE_ENDIAN
- u8 gain:7, trsw:1;
- #else
- u8 trsw:1, gain:7;
- #endif
-};
-
-struct phy_status_rpt {
- struct phy_rx_agc_info path_agc[RF_PATH_MAX];
- u8 ch_corr[RF_PATH_MAX];
- u8 cck_sig_qual_ofdm_pwdb_all;
- u8 cck_agc_rpt_ofdm_cfosho_a;
- u8 cck_rpt_b_ofdm_cfosho_b;
- u8 rsvd_1;/* ch_corr_msb; */
- u8 noise_power_db_msb;
- u8 path_cfotail[RF_PATH_MAX];
- u8 pcts_mask[RF_PATH_MAX];
- s8 stream_rxevm[RF_PATH_MAX];
- u8 path_rxsnr[RF_PATH_MAX];
- u8 noise_power_db_lsb;
- u8 rsvd_2[3];
- u8 stream_csi[RF_PATH_MAX];
- u8 stream_target_csi[RF_PATH_MAX];
- s8 sig_evm;
- u8 rsvd_3;
-
-#ifdef __LITTLE_ENDIAN
- u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
- u8 sgi_en:1;
- u8 rxsc:2;
- u8 idle_long:1;
- u8 r_ant_train_en:1;
- u8 ant_sel_b:1;
- u8 ant_sel:1;
-#else /* _BIG_ENDIAN_ */
- u8 ant_sel:1;
- u8 ant_sel_b:1;
- u8 r_ant_train_en:1;
- u8 idle_long:1;
- u8 rxsc:2;
- u8 sgi_en:1;
- u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
-#endif
-};
-
-
-struct phy_status_rpt_8195 {
- struct phy_rx_agc_info path_agc[2];
- u8 ch_num[2];
- u8 cck_sig_qual_ofdm_pwdb_all;
- u8 cck_agc_rpt_ofdm_cfosho_a;
- u8 cck_bb_pwr_ofdm_cfosho_b;
- u8 cck_rx_path; /* CCK_RX_PATH [3:0] (with regA07[3:0] definition) */
- u8 rsvd_1;
- u8 path_cfotail[2];
- u8 pcts_mask[2];
- s8 stream_rxevm[2];
- u8 path_rxsnr[2];
- u8 rsvd_2[2];
- u8 stream_snr[2];
- u8 stream_csi[2];
- u8 rsvd_3[2];
- s8 sig_evm;
- u8 rsvd_4;
-#ifdef __LITTLE_ENDIAN
- u8 antidx_anta:3;
- u8 antidx_antb:3;
- u8 rsvd_5:2;
-#else /* _BIG_ENDIAN_ */
- u8 rsvd_5:2;
- u8 antidx_antb:3;
- u8 antidx_anta:3;
-#endif
-};
-
-
-void
-ODM_PhyStatusQuery23a(
- struct dm_odm_t *pDM_Odm,
- struct phy_info *pPhyInfo,
- u8 * pPhyStatus,
- struct odm_packet_info *pPktinfo
- );
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/odm_RegConfig8723A.h b/drivers/staging/rtl8723au/include/odm_RegConfig8723A.h
deleted file mode 100644
index f2a54d829ed5..000000000000
--- a/drivers/staging/rtl8723au/include/odm_RegConfig8723A.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __INC_ODM_REGCONFIG_H_8723A
-#define __INC_ODM_REGCONFIG_H_8723A
-
-void odm_ConfigRFReg_8723A(struct dm_odm_t *pDM_Odm, u32 Addr, u32 Data,
- enum RF_RADIO_PATH RF_PATH, u32 RegAddr);
-
-void odm_ConfigMAC_8723A(struct dm_odm_t *pDM_Odm, u32 Addr, u8 Data);
-
-void odm_ConfigBB_AGC_8723A(struct dm_odm_t *pDM_Odm, u32 addr, u32 data);
-
-void odm_ConfigBB_PHY_8723A(struct dm_odm_t *pDM_Odm, u32 addr, u32 data);
-
-#endif /* end of SUPPORT */
diff --git a/drivers/staging/rtl8723au/include/odm_RegDefine11N.h b/drivers/staging/rtl8723au/include/odm_RegDefine11N.h
deleted file mode 100644
index 27782154f502..000000000000
--- a/drivers/staging/rtl8723au/include/odm_RegDefine11N.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#ifndef __ODM_REGDEFINE11N_H__
-#define __ODM_REGDEFINE11N_H__
-
-
-/* 2 RF REG LIST */
-#define ODM_REG_RF_MODE_11N 0x00
-#define ODM_REG_RF_0B_11N 0x0B
-#define ODM_REG_CHNBW_11N 0x18
-#define ODM_REG_T_METER_11N 0x24
-#define ODM_REG_RF_25_11N 0x25
-#define ODM_REG_RF_26_11N 0x26
-#define ODM_REG_RF_27_11N 0x27
-#define ODM_REG_RF_2B_11N 0x2B
-#define ODM_REG_RF_2C_11N 0x2C
-#define ODM_REG_RXRF_A3_11N 0x3C
-#define ODM_REG_T_METER_92D_11N 0x42
-#define ODM_REG_T_METER_88E_11N 0x42
-
-
-
-/* 2 BB REG LIST */
-/* PAGE 8 */
-#define ODM_REG_BB_CTRL_11N 0x800
-#define ODM_REG_RF_PIN_11N 0x804
-#define ODM_REG_PSD_CTRL_11N 0x808
-#define ODM_REG_TX_ANT_CTRL_11N 0x80C
-#define ODM_REG_BB_PWR_SAV5_11N 0x818
-#define ODM_REG_CCK_RPT_FORMAT_11N 0x824
-#define ODM_REG_RX_DEFUALT_A_11N 0x858
-#define ODM_REG_RX_DEFUALT_B_11N 0x85A
-#define ODM_REG_BB_PWR_SAV3_11N 0x85C
-#define ODM_REG_ANTSEL_CTRL_11N 0x860
-#define ODM_REG_RX_ANT_CTRL_11N 0x864
-#define ODM_REG_PIN_CTRL_11N 0x870
-#define ODM_REG_BB_PWR_SAV1_11N 0x874
-#define ODM_REG_ANTSEL_PATH_11N 0x878
-#define ODM_REG_BB_3WIRE_11N 0x88C
-#define ODM_REG_SC_CNT_11N 0x8C4
-#define ODM_REG_PSD_DATA_11N 0x8B4
-/* PAGE 9 */
-#define ODM_REG_ANT_MAPPING1_11N 0x914
-#define ODM_REG_ANT_MAPPING2_11N 0x918
-/* PAGE A */
-#define ODM_REG_CCK_ANTDIV_PARA1_11N 0xA00
-#define ODM_REG_CCK_CCA_11N 0xA0A
-#define ODM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
-#define ODM_REG_CCK_ANTDIV_PARA3_11N 0xA10
-#define ODM_REG_CCK_ANTDIV_PARA4_11N 0xA14
-#define ODM_REG_CCK_FILTER_PARA1_11N 0xA22
-#define ODM_REG_CCK_FILTER_PARA2_11N 0xA23
-#define ODM_REG_CCK_FILTER_PARA3_11N 0xA24
-#define ODM_REG_CCK_FILTER_PARA4_11N 0xA25
-#define ODM_REG_CCK_FILTER_PARA5_11N 0xA26
-#define ODM_REG_CCK_FILTER_PARA6_11N 0xA27
-#define ODM_REG_CCK_FILTER_PARA7_11N 0xA28
-#define ODM_REG_CCK_FILTER_PARA8_11N 0xA29
-#define ODM_REG_CCK_FA_RST_11N 0xA2C
-#define ODM_REG_CCK_FA_MSB_11N 0xA58
-#define ODM_REG_CCK_FA_LSB_11N 0xA5C
-#define ODM_REG_CCK_CCA_CNT_11N 0xA60
-#define ODM_REG_BB_PWR_SAV4_11N 0xA74
-/* PAGE B */
-#define ODM_REG_LNA_SWITCH_11N 0xB2C
-#define ODM_REG_PATH_SWITCH_11N 0xB30
-#define ODM_REG_RSSI_CTRL_11N 0xB38
-#define ODM_REG_CONFIG_ANTA_11N 0xB68
-#define ODM_REG_RSSI_BT_11N 0xB9C
-/* PAGE C */
-#define ODM_REG_OFDM_FA_HOLDC_11N 0xC00
-#define ODM_REG_RX_PATH_11N 0xC04
-#define ODM_REG_TRMUX_11N 0xC08
-#define ODM_REG_OFDM_FA_RSTC_11N 0xC0C
-#define ODM_REG_RXIQI_MATRIX_11N 0xC14
-#define ODM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
-#define ODM_REG_IGI_A_11N 0xC50
-#define ODM_REG_ANTDIV_PARA2_11N 0xC54
-#define ODM_REG_IGI_B_11N 0xC58
-#define ODM_REG_ANTDIV_PARA3_11N 0xC5C
-#define ODM_REG_BB_PWR_SAV2_11N 0xC70
-#define ODM_REG_RX_OFF_11N 0xC7C
-#define ODM_REG_TXIQK_MATRIXA_11N 0xC80
-#define ODM_REG_TXIQK_MATRIXB_11N 0xC88
-#define ODM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
-#define ODM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
-#define ODM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
-#define ODM_REG_ANTDIV_PARA1_11N 0xCA4
-#define ODM_REG_OFDM_FA_TYPE1_11N 0xCF0
-/* PAGE D */
-#define ODM_REG_OFDM_FA_RSTD_11N 0xD00
-#define ODM_REG_OFDM_FA_TYPE2_11N 0xDA0
-#define ODM_REG_OFDM_FA_TYPE3_11N 0xDA4
-#define ODM_REG_OFDM_FA_TYPE4_11N 0xDA8
-/* PAGE E */
-#define ODM_REG_TXAGC_A_6_18_11N 0xE00
-#define ODM_REG_TXAGC_A_24_54_11N 0xE04
-#define ODM_REG_TXAGC_A_1_MCS32_11N 0xE08
-#define ODM_REG_TXAGC_A_MCS0_3_11N 0xE10
-#define ODM_REG_TXAGC_A_MCS4_7_11N 0xE14
-#define ODM_REG_TXAGC_A_MCS8_11_11N 0xE18
-#define ODM_REG_TXAGC_A_MCS12_15_11N 0xE1C
-#define ODM_REG_FPGA0_IQK_11N 0xE28
-#define ODM_REG_TXIQK_TONE_A_11N 0xE30
-#define ODM_REG_RXIQK_TONE_A_11N 0xE34
-#define ODM_REG_TXIQK_PI_A_11N 0xE38
-#define ODM_REG_RXIQK_PI_A_11N 0xE3C
-#define ODM_REG_TXIQK_11N 0xE40
-#define ODM_REG_RXIQK_11N 0xE44
-#define ODM_REG_IQK_AGC_PTS_11N 0xE48
-#define ODM_REG_IQK_AGC_RSP_11N 0xE4C
-#define ODM_REG_BLUETOOTH_11N 0xE6C
-#define ODM_REG_RX_WAIT_CCA_11N 0xE70
-#define ODM_REG_TX_CCK_RFON_11N 0xE74
-#define ODM_REG_TX_CCK_BBON_11N 0xE78
-#define ODM_REG_OFDM_RFON_11N 0xE7C
-#define ODM_REG_OFDM_BBON_11N 0xE80
-#define ODM_REG_TX2RX_11N 0xE84
-#define ODM_REG_TX2TX_11N 0xE88
-#define ODM_REG_RX_CCK_11N 0xE8C
-#define ODM_REG_RX_OFDM_11N 0xED0
-#define ODM_REG_RX_WAIT_RIFS_11N 0xED4
-#define ODM_REG_RX2RX_11N 0xED8
-#define ODM_REG_STANDBY_11N 0xEDC
-#define ODM_REG_SLEEP_11N 0xEE0
-#define ODM_REG_PMPD_ANAEN_11N 0xEEC
-
-
-
-
-
-
-
-/* 2 MAC REG LIST */
-#define ODM_REG_BB_RST_11N 0x02
-#define ODM_REG_ANTSEL_PIN_11N 0x4C
-#define ODM_REG_EARLY_MODE_11N 0x4D0
-#define ODM_REG_RSSI_MONITOR_11N 0x4FE
-#define ODM_REG_EDCA_VO_11N 0x500
-#define ODM_REG_EDCA_VI_11N 0x504
-#define ODM_REG_EDCA_BE_11N 0x508
-#define ODM_REG_EDCA_BK_11N 0x50C
-#define ODM_REG_TXPAUSE_11N 0x522
-#define ODM_REG_RESP_TX_11N 0x6D8
-#define ODM_REG_ANT_TRAIN_PARA1_11N 0x7b0
-#define ODM_REG_ANT_TRAIN_PARA2_11N 0x7b4
-
-
-/* DIG Related */
-#define ODM_BIT_IGI_11N 0x0000007F
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/odm_debug.h b/drivers/staging/rtl8723au/include/odm_debug.h
deleted file mode 100644
index c4b375a6f409..000000000000
--- a/drivers/staging/rtl8723au/include/odm_debug.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-
-#ifndef __ODM_DBG_H__
-#define __ODM_DBG_H__
-
-
-/* */
-/* Define the debug levels */
-/* */
-/* 1. DBG_TRACE and DBG_LOUD are used for normal cases. */
-/* So that, they can help SW engineer to develop or trace states changed */
-/* and also help HW enginner to trace every operation to and from HW, */
-/* e.g IO, Tx, Rx. */
-/* */
-/* 2. DBG_WARNNING and DBG_SERIOUS are used for unusual or error cases, */
-/* which help us to debug SW or HW. */
-/* */
-/* */
-/* */
-/* Never used in a call to ODM_RT_TRACE()! */
-/* */
-#define ODM_DBG_OFF 1
-
-/* */
-/* Fatal bug. */
-/* For example, Tx/Rx/IO locked up, OS hangs, memory access violation, */
-/* resource allocation failed, unexpected HW behavior, HW BUG and so on. */
-/* */
-#define ODM_DBG_SERIOUS 2
-
-/* */
-/* Abnormal, rare, or unexpeted cases. */
-/* For example, IRP/Packet/OID canceled, device suprisely unremoved and so on. */
-/* */
-#define ODM_DBG_WARNING 3
-
-/* */
-/* Normal case with useful information about current SW or HW state. */
-/* For example, Tx/Rx descriptor to fill, Tx/Rx descriptor completed status, */
-/* SW protocol state change, dynamic mechanism state change and so on. */
-/* */
-#define ODM_DBG_LOUD 4
-
-/* */
-/* Normal case with detail execution flow or information. */
-/* */
-#define ODM_DBG_TRACE 5
-
-/* */
-/* Define the tracing components */
-/* */
-/* */
-/* BB Functions */
-#define ODM_COMP_DIG BIT(0)
-#define ODM_COMP_RA_MASK BIT(1)
-#define ODM_COMP_DYNAMIC_TXPWR BIT(2)
-#define ODM_COMP_FA_CNT BIT(3)
-#define ODM_COMP_RSSI_MONITOR BIT(4)
-#define ODM_COMP_CCK_PD BIT(5)
-#define ODM_COMP_ANT_DIV BIT(6)
-#define ODM_COMP_PWR_SAVE BIT(7)
-#define ODM_COMP_PWR_TRAIN BIT(8)
-#define ODM_COMP_RATE_ADAPTIVE BIT(9)
-#define ODM_COMP_PATH_DIV BIT(10)
-#define ODM_COMP_PSD BIT(11)
-#define ODM_COMP_DYNAMIC_PRICCA BIT(12)
-#define ODM_COMP_RXHP BIT(13)
-/* MAC Functions */
-#define ODM_COMP_EDCA_TURBO BIT(16)
-#define ODM_COMP_EARLY_MODE BIT(17)
-/* RF Functions */
-#define ODM_COMP_TX_PWR_TRACK BIT(24)
-#define ODM_COMP_RX_GAIN_TRACK BIT(25)
-#define ODM_COMP_CALIBRATION BIT(26)
-/* Common Functions */
-#define ODM_COMP_COMMON BIT(30)
-#define ODM_COMP_INIT BIT(31)
-
-/*------------------------Export Macro Definition---------------------------*/
- #define RT_PRINTK(fmt, args...) printk("%s(): " fmt, __func__, ## args);
-
-#ifndef ASSERT
- #define ASSERT(expr)
-#endif
-
-#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) \
- if(((comp) & pDM_Odm->DebugComponents) && (level <= pDM_Odm->DebugLevel)) \
- { \
- printk("[ODM-8723A] "); \
- RT_PRINTK fmt; \
- }
-
-#define ODM_RT_ASSERT(pDM_Odm, expr, fmt) \
- if(!(expr)) { \
- printk("Assertion failed! %s at ......\n", #expr); \
- printk(" ......%s,%s,line=%d\n", __FILE__, __func__, __LINE__);\
- RT_PRINTK fmt; \
- ASSERT(false); \
- }
-
-void ODM_InitDebugSetting23a(struct dm_odm_t *pDM_Odm);
-
-#endif /* __ODM_DBG_H__ */
diff --git a/drivers/staging/rtl8723au/include/odm_interface.h b/drivers/staging/rtl8723au/include/odm_interface.h
deleted file mode 100644
index 1d3bf03b59ea..000000000000
--- a/drivers/staging/rtl8723au/include/odm_interface.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-
-#ifndef __ODM_INTERFACE_H__
-#define __ODM_INTERFACE_H__
-
-
-/* _cat: implemented by Token-Pasting Operator. */
-
-/*===================================
-
-#define ODM_REG_DIG_11N 0xC50
-#define ODM_REG_DIG_11AC 0xDDD
-
-ODM_REG(DIG,_pDM_Odm)
-=====================================*/
-
-#define _reg_11N(_name) ODM_REG_##_name##_11N
-#define _reg_11AC(_name) ODM_REG_##_name##_11AC
-#define _bit_11N(_name) ODM_BIT_##_name##_11N
-#define _bit_11AC(_name) ODM_BIT_##_name##_11AC
-
-#define _cat(_name, _func) \
- ( \
- _func##_11N(_name) \
- )
-
-/* _name: name of register or bit. */
-/* Example: "ODM_REG(R_A_AGC_CORE1, pDM_Odm)" */
-/* gets "ODM_R_A_AGC_CORE1" or "ODM_R_A_AGC_CORE1_8192C", depends on SupportICType. */
-#define ODM_REG(_name, _pDM_Odm) _cat(_name, _reg)
-#define ODM_BIT(_name, _pDM_Odm) _cat(_name, _bit)
-
-/* */
-/* 2012/02/17 MH For non-MP compile pass only. Linux does not support workitem. */
-/* Suggest HW team to use thread instead of workitem. Windows also support the feature. */
-/* */
-typedef void (*RT_WORKITEM_CALL_BACK)(struct work_struct *pContext);
-
-/* */
-/* =========== EXtern Function Prototype */
-/* */
-
-void ODM_SetRFReg(struct dm_odm_t *pDM_Odm, enum RF_RADIO_PATH eRFPath,
- u32 RegAddr, u32 BitMask, u32 Data);
-u32 ODM_GetRFReg(struct dm_odm_t *pDM_Odm, enum RF_RADIO_PATH eRFPath,
- u32 RegAddr, u32 BitMask);
-
-#endif /* __ODM_INTERFACE_H__ */
diff --git a/drivers/staging/rtl8723au/include/odm_precomp.h b/drivers/staging/rtl8723au/include/odm_precomp.h
deleted file mode 100644
index fb793c8ba7f8..000000000000
--- a/drivers/staging/rtl8723au/include/odm_precomp.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#ifndef __ODM_PRECOMP_H__
-#define __ODM_PRECOMP_H__
-
-/* 2 Config Flags and Structs - defined by each ODM Type */
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <hal_intf.h>
-
-
-/* 2 Hardware Parameter Files */
-#include "Hal8723UHWImg_CE.h"
-
-
-/* 2 OutSrc Header Files */
-
-#include "odm.h"
-#include "odm_HWConfig.h"
-#include "odm_debug.h"
-#include "odm_RegDefine11N.h"
-
-#include "HalDMOutSrc8723A.h" /* for IQK,LCK,Power-tracking */
-#include "rtl8723a_hal.h"
-
-#include "odm_interface.h"
-#include "odm_reg.h"
-
-#include "HalHWImg8723A_MAC.h"
-#include "HalHWImg8723A_RF.h"
-#include "HalHWImg8723A_BB.h"
-#include "HalHWImg8723A_FW.h"
-#include "odm_RegConfig8723A.h"
-
-#endif /* __ODM_PRECOMP_H__ */
diff --git a/drivers/staging/rtl8723au/include/odm_reg.h b/drivers/staging/rtl8723au/include/odm_reg.h
deleted file mode 100644
index c18433120fe8..000000000000
--- a/drivers/staging/rtl8723au/include/odm_reg.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-/* */
-/* File Name: odm_reg.h */
-/* */
-/* Description: */
-/* */
-/* This file is for general register definition. */
-/* */
-/* */
-/* */
-#ifndef __HAL_ODM_REG_H__
-#define __HAL_ODM_REG_H__
-
-/* */
-/* Register Definition */
-/* */
-
-/* MAC REG */
-#define ODM_BB_RESET 0x002
-#define ODM_DUMMY 0x4fe
-#define ODM_EDCA_VO_PARAM 0x500
-#define ODM_EDCA_VI_PARAM 0x504
-#define ODM_EDCA_BE_PARAM 0x508
-#define ODM_EDCA_BK_PARAM 0x50C
-#define ODM_TXPAUSE 0x522
-
-/* BB REG */
-#define ODM_FPGA_PHY0_PAGE8 0x800
-#define ODM_PSD_SETTING 0x808
-#define ODM_AFE_SETTING 0x818
-#define ODM_TXAGC_B_6_18 0x830
-#define ODM_TXAGC_B_24_54 0x834
-#define ODM_TXAGC_B_MCS32_5 0x838
-#define ODM_TXAGC_B_MCS0_MCS3 0x83c
-#define ODM_TXAGC_B_MCS4_MCS7 0x848
-#define ODM_TXAGC_B_MCS8_MCS11 0x84c
-#define ODM_ANALOG_REGISTER 0x85c
-#define ODM_RF_INTERFACE_OUTPUT 0x860
-#define ODM_TXAGC_B_MCS12_MCS15 0x868
-#define ODM_TXAGC_B_11_A_2_11 0x86c
-#define ODM_AD_DA_LSB_MASK 0x874
-#define ODM_ENABLE_3_WIRE 0x88c
-#define ODM_PSD_REPORT 0x8b4
-#define ODM_R_ANT_SELECT 0x90c
-#define ODM_CCK_ANT_SELECT 0xa07
-#define ODM_CCK_PD_THRESH 0xa0a
-#define ODM_CCK_RF_REG1 0xa11
-#define ODM_CCK_MATCH_FILTER 0xa20
-#define ODM_CCK_RAKE_MAC 0xa2e
-#define ODM_CCK_CNT_RESET 0xa2d
-#define ODM_CCK_TX_DIVERSITY 0xa2f
-#define ODM_CCK_FA_CNT_MSB 0xa5b
-#define ODM_CCK_FA_CNT_LSB 0xa5c
-#define ODM_CCK_NEW_FUNCTION 0xa75
-#define ODM_OFDM_PHY0_PAGE_C 0xc00
-#define ODM_OFDM_RX_ANT 0xc04
-#define ODM_R_A_RXIQI 0xc14
-#define ODM_R_A_AGC_CORE1 0xc50
-#define ODM_R_A_AGC_CORE2 0xc54
-#define ODM_R_B_AGC_CORE1 0xc58
-#define ODM_R_AGC_PAR 0xc70
-#define ODM_R_HTSTF_AGC_PAR 0xc7c
-#define ODM_TX_PWR_TRAINING_A 0xc90
-#define ODM_TX_PWR_TRAINING_B 0xc98
-#define ODM_OFDM_FA_CNT1 0xcf0
-#define ODM_OFDM_PHY0_PAGE_D 0xd00
-#define ODM_OFDM_FA_CNT2 0xda0
-#define ODM_OFDM_FA_CNT3 0xda4
-#define ODM_OFDM_FA_CNT4 0xda8
-#define ODM_TXAGC_A_6_18 0xe00
-#define ODM_TXAGC_A_24_54 0xe04
-#define ODM_TXAGC_A_1_MCS32 0xe08
-#define ODM_TXAGC_A_MCS0_MCS3 0xe10
-#define ODM_TXAGC_A_MCS4_MCS7 0xe14
-#define ODM_TXAGC_A_MCS8_MCS11 0xe18
-#define ODM_TXAGC_A_MCS12_MCS15 0xe1c
-
-/* RF REG */
-#define ODM_GAIN_SETTING 0x00
-#define ODM_CHANNEL 0x18
-
-/* Ant Detect Reg */
-#define ODM_DPDT 0x300
-
-/* PSD Init */
-#define ODM_PSDREG 0x808
-
-/* 92D Path Div */
-#define PATHDIV_REG 0xB30
-#define PATHDIV_TRI 0xBA0
-
-/* */
-/* Bitmap Definition */
-/* */
-
-#define BIT_FA_RESET BIT(0)
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/osdep_intf.h b/drivers/staging/rtl8723au/include/osdep_intf.h
deleted file mode 100644
index a157eb2e78df..000000000000
--- a/drivers/staging/rtl8723au/include/osdep_intf.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#ifndef __OSDEP_INTF_H_
-#define __OSDEP_INTF_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-int rtw_init_drv_sw23a(struct rtw_adapter *padapter);
-int rtw_free_drv_sw23a(struct rtw_adapter *padapter);
-int rtw_reset_drv_sw23a(struct rtw_adapter *padapter);
-
-void rtw_cancel_all_timer23a(struct rtw_adapter *padapter);
-
-int rtw_init_netdev23a_name23a(struct net_device *pnetdev, const char *ifname);
-struct net_device *rtw_init_netdev23a(struct rtw_adapter *padapter);
-
-u16 rtw_recv_select_queue23a(struct sk_buff *skb);
-
-void rtw_ips_dev_unload23a(struct rtw_adapter *padapter);
-
-int rtw_ips_pwr_up23a(struct rtw_adapter *padapter);
-void rtw_ips_pwr_down23a(struct rtw_adapter *padapter);
-
-int rtw_drv_register_netdev(struct rtw_adapter *padapter);
-void rtw_ndev_destructor(struct net_device *ndev);
-
-int rtl8723au_inirp_init(struct rtw_adapter *Adapter);
-int rtl8723au_inirp_deinit(struct rtw_adapter *Adapter);
-void rtl8723a_usb_intf_stop(struct rtw_adapter *padapter);
-
-#endif /* _OSDEP_INTF_H_ */
diff --git a/drivers/staging/rtl8723au/include/osdep_service.h b/drivers/staging/rtl8723au/include/osdep_service.h
deleted file mode 100644
index 98250b12e9f2..000000000000
--- a/drivers/staging/rtl8723au/include/osdep_service.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __OSDEP_SERVICE_H_
-#define __OSDEP_SERVICE_H_
-
-#define _FAIL 0
-#define _SUCCESS 1
-#define RTW_RX_HANDLED 2
-
-#include <linux/spinlock.h>
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kref.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/uaccess.h>
-#include <asm/byteorder.h>
-#include <linux/atomic.h>
-#include <linux/io.h>
-#include <linux/semaphore.h>
-#include <linux/sem.h>
-#include <linux/sched.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <linux/if_arp.h>
-#include <linux/rtnetlink.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h> /* for struct tasklet_struct */
-#include <linux/ip.h>
-
-#include <net/ieee80211_radiotap.h>
-#include <net/cfg80211.h>
-
-struct rtw_queue {
- struct list_head queue;
- spinlock_t lock;
-};
-
-static inline struct list_head *get_list_head(struct rtw_queue *queue)
-{
- return &queue->queue;
-}
-
-static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
-{
- return (netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
- netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
- netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
- netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)));
-}
-
-static inline u32 CHKBIT(u32 x)
-{
- WARN_ON(x >= 32);
- if (x >= 32)
- return 0;
- return BIT(x);
-}
-
-extern unsigned char MCS_rate_2R23A[16];
-
-extern unsigned char MCS_rate_2R23A[16];
-extern unsigned char MCS_rate_1R23A[16];
-
-void _rtw_init_queue23a(struct rtw_queue *pqueue);
-
-/* Macros for handling unaligned memory accesses */
-
-#define RTW_GET_BE24(a) ((((u32) (a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
- ((u32) (a)[2]))
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/recv_osdep.h b/drivers/staging/rtl8723au/include/recv_osdep.h
deleted file mode 100644
index c2d3f1bd5948..000000000000
--- a/drivers/staging/rtl8723au/include/recv_osdep.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RECV_OSDEP_H_
-#define __RECV_OSDEP_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-int _rtw_init_recv_priv23a(struct recv_priv *precvpriv, struct rtw_adapter *padapter);
-void _rtw_free_recv_priv23a (struct recv_priv *precvpriv);
-
-int rtw_recv_entry23a(struct recv_frame *precv_frame);
-int rtw_recv_indicatepkt23a(struct rtw_adapter *adapter, struct recv_frame *precv_frame);
-
-void rtw_handle_tkip_mic_err23a(struct rtw_adapter *padapter, u8 bgroup);
-
-int rtw_init_recv_priv(struct recv_priv *precvpriv, struct rtw_adapter *padapter);
-void rtw_free_recv_priv (struct recv_priv *precvpriv);
-
-int rtw_os_recv_resource_init(struct recv_priv *precvpriv, struct rtw_adapter *padapter);
-
-void rtw_init_recv_timer23a(struct recv_reorder_ctrl *preorder_ctrl);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h b/drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h
deleted file mode 100644
index 7add5dfe015f..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_bt-coexist.h
+++ /dev/null
@@ -1,1627 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8723A_BT_COEXIST_H__
-#define __RTL8723A_BT_COEXIST_H__
-
-#include <drv_types.h>
-#include "odm_precomp.h"
-
-
-/* HEADER/PlatformDef.h */
-enum rt_media_status {
- RT_MEDIA_DISCONNECT = 0,
- RT_MEDIA_CONNECT = 1
-};
-
-/* ===== Below this line is sync from SD7 driver COMMON/BT.h ===== */
-
-#define BT_TMP_BUF_SIZE 100
-
-void BT_SignalCompensation(struct rtw_adapter *padapter,
- u8 *rssi_wifi, u8 *rssi_bt);
-void BT_HaltProcess(struct rtw_adapter *padapter);
-void BT_LpsLeave(struct rtw_adapter *padapter);
-
-
-#define BT_HsConnectionEstablished(Adapter) false
-/* ===== End of sync from SD7 driver COMMON/BT.h ===== */
-
-/* HEADER/SecurityType.h */
-#define TKIP_ENC_KEY_POS 32 /* KEK_LEN+KEK_LEN) */
-#define MAXRSNIELEN 256
-
-/* COMMON/Protocol802_11.h */
-/* */
-/* 802.11 Management frame Status Code field */
-/* */
-struct octet_string {
- u8 *Octet;
- u16 Length;
-};
-
-
-/* AES_CCMP specific */
-enum {
- AESCCMP_BLK_SIZE = 16, /* # octets in an AES block */
- AESCCMP_MAX_PACKET = 4*512, /* largest packet size */
- AESCCMP_N_RESERVED = 0, /* reserved nonce octet value */
- AESCCMP_A_DATA = 0x40, /* the Adata bit in the flags */
- AESCCMP_M_SHIFT = 3, /* how much to shift the 3-bit M field */
- AESCCMP_L_SHIFT = 0, /* how much to shift the 3-bit L field */
- AESCCMP_L_SIZE = 2, /* size of the l(m) length field (in octets) */
- AESCCMP_OFFSET_SC = 22,
- AESCCMP_OFFSET_DURATION = 4,
- AESCCMP_OFFSET_A2 = 10,
- AESCCMP_OFFSET_A4 = 24,
- AESCCMP_QC_TID_MASK = 0x0f,
- AESCCMP_BLK_SIZE_TOTAL = 16*16, /* Added by Annie for CKIP AES MIC BSOD, 2006-08-17. */
- /* 16*8 < 4*60 Resove to 16*16 */
-};
-
-/* Key Length */
-#define PMK_LEN 32
-#define PTK_LEN_TKIP 64
-#define GTK_LEN 32
-#define KEY_NONCE_LEN 32
-
-
-/* COMMON/Dot11d.h */
-struct chnl_txpower_triple {
- u8 FirstChnl;
- u8 NumChnls;
- s8 MaxTxPowerInDbm;
-};
-
-
-/* ===== Below this line is sync from SD7 driver COMMON/bt_hci.h ===== */
-/* The following is for BT 3.0 + HS HCI COMMAND ERRORS CODES */
-
-#define Max80211PALPDUSize 1492
-#define Max80211AMPASSOCLen 672
-#define MinGUserPrio 4
-#define MaxGUserPrio 7
-#define BEUserPrio0 0
-#define BEUserPrio1 3
-#define Max80211BeaconPeriod 2000
-#define ShortRangeModePowerMax 4
-
-#define BT_Default_Chnl 10
-#define ACLDataHeaderLen 4
-
-#define BTTotalDataBlockNum 0x100
-#define BTLocalBufNum 0x200
-#define BTMaxDataBlockLen 0x800
-#define BTTOTALBANDWIDTH 0x7530
-#define BTMAXBANDGUBANDWIDTH 0x4e20
-#define TmpLocalBufSize 0x100
-#define BTSynDataPacketLength 0xff
-/* */
-
-#define BTMaxAuthCount 5
-#define BTMaxAsocCount 5
-
-#define MAX_LOGICAL_LINK_NUM 2 /* temporarily define */
-#define MAX_BT_ASOC_ENTRY_NUM 2 /* temporarily define */
-
-#define INVALID_PL_HANDLE 0xff
-#define INVALID_ENTRY_NUM 0xff
-/* */
-
-#define CAM_BT_START_INDEX (HALF_CAM_ENTRY - 4) /* MAX_BT_ASOC_ENTRY_NUM : 4 !!! */
-#define BT_HWCAM_STAR CAM_BT_START_INDEX /* We used HALF_CAM_ENTRY ~ HALF_CAM_ENTRY -MAX_BT_ASOC_ENTRY_NUM */
-
-enum hci_status {
- HCI_STATUS_SUCCESS = 0x00, /* Success */
- HCI_STATUS_UNKNOW_HCI_CMD = 0x01, /* Unknown HCI Command */
- HCI_STATUS_UNKNOW_CONNECT_ID = 0X02, /* Unknown Connection Identifier */
- HCI_STATUS_HW_FAIL = 0X03, /* Hardware Failure */
- HCI_STATUS_PAGE_TIMEOUT = 0X04, /* Page Timeout */
- HCI_STATUS_AUTH_FAIL = 0X05, /* Authentication Failure */
- HCI_STATUS_PIN_OR_KEY_MISSING = 0X06, /* PIN or Key Missing */
- HCI_STATUS_MEM_CAP_EXCEED = 0X07, /* Memory Capacity Exceeded */
- HCI_STATUS_CONNECT_TIMEOUT = 0X08, /* Connection Timeout */
- HCI_STATUS_CONNECT_LIMIT = 0X09, /* Connection Limit Exceeded */
- HCI_STATUS_SYN_CONNECT_LIMIT = 0X0a, /* Synchronous Connection Limit To A Device Exceeded */
- HCI_STATUS_ACL_CONNECT_EXISTS = 0X0b, /* ACL Connection Already Exists */
- HCI_STATUS_CMD_DISALLOW = 0X0c, /* Command Disallowed */
- HCI_STATUS_CONNECT_RJT_LIMIT_RESOURCE = 0X0d, /* Connection Rejected due to Limited Resources */
- HCI_STATUS_CONNECT_RJT_SEC_REASON = 0X0e, /* Connection Rejected Due To Security Reasons */
- HCI_STATUS_CONNECT_RJT_UNACCEPT_BD_ADDR = 0X0f, /* Connection Rejected due to Unacceptable BD_ADDR */
- HCI_STATUS_CONNECT_ACCEPT_TIMEOUT = 0X10, /* Connection Accept Timeout Exceeded */
- HCI_STATUS_UNSUPPORT_FEATURE_PARA_VALUE = 0X11, /* Unsupported Feature or Parameter Value */
- HCI_STATUS_INVALID_HCI_CMD_PARA_VALUE = 0X12, /* Invalid HCI Command Parameters */
- HCI_STATUS_REMOTE_USER_TERMINATE_CONNECT = 0X13, /* Remote User Terminated Connection */
- HCI_STATUS_REMOTE_DEV_TERMINATE_LOW_RESOURCE = 0X14, /* Remote Device Terminated Connection due to Low Resources */
- HCI_STATUS_REMOTE_DEV_TERMINATE_CONNECT_POWER_OFF = 0X15, /* Remote Device Terminated Connection due to Power Off */
- HCI_STATUS_CONNECT_TERMINATE_LOCAL_HOST = 0X16, /* Connection Terminated By Local Host */
- HCI_STATUS_REPEATE_ATTEMPT = 0X17, /* Repeated Attempts */
- HCI_STATUS_PAIR_NOT_ALLOW = 0X18, /* Pairing Not Allowed */
- HCI_STATUS_UNKNOW_LMP_PDU = 0X19, /* Unknown LMP PDU */
- HCI_STATUS_UNSUPPORT_REMOTE_LMP_FEATURE = 0X1a, /* Unsupported Remote Feature / Unsupported LMP Feature */
- HCI_STATUS_SOC_OFFSET_REJECT = 0X1b, /* SCO Offset Rejected */
- HCI_STATUS_SOC_INTERVAL_REJECT = 0X1c, /* SCO Interval Rejected */
- HCI_STATUS_SOC_AIR_MODE_REJECT = 0X1d,/* SCO Air Mode Rejected */
- HCI_STATUS_INVALID_LMP_PARA = 0X1e, /* Invalid LMP Parameters */
- HCI_STATUS_UNSPECIFIC_ERROR = 0X1f, /* Unspecified Error */
- HCI_STATUS_UNSUPPORT_LMP_PARA_VALUE = 0X20, /* Unsupported LMP Parameter Value */
- HCI_STATUS_ROLE_CHANGE_NOT_ALLOW = 0X21, /* Role Change Not Allowed */
- HCI_STATUS_LMP_RESPONSE_TIMEOUT = 0X22, /* LMP Response Timeout */
- HCI_STATUS_LMP_ERROR_TRANSACTION_COLLISION = 0X23, /* LMP Error Transaction Collision */
- HCI_STATUS_LMP_PDU_NOT_ALLOW = 0X24, /* LMP PDU Not Allowed */
- HCI_STATUS_ENCRYPTION_MODE_NOT_ALLOW = 0X25, /* Encryption Mode Not Acceptable */
- HCI_STATUS_LINK_KEY_CAN_NOT_CHANGE = 0X26, /* Link Key Can Not be Changed */
- HCI_STATUS_REQUEST_QOS_NOT_SUPPORT = 0X27, /* Requested QoS Not Supported */
- HCI_STATUS_INSTANT_PASSED = 0X28, /* Instant Passed */
- HCI_STATUS_PAIRING_UNIT_KEY_NOT_SUPPORT = 0X29, /* Pairing With Unit Key Not Supported */
- HCI_STATUS_DIFFERENT_TRANSACTION_COLLISION = 0X2a, /* Different Transaction Collision */
- HCI_STATUS_RESERVE_1 = 0X2b, /* Reserved */
- HCI_STATUS_QOS_UNACCEPT_PARA = 0X2c, /* QoS Unacceptable Parameter */
- HCI_STATUS_QOS_REJECT = 0X2d, /* QoS Rejected */
- HCI_STATUS_CHNL_CLASSIFICATION_NOT_SUPPORT = 0X2e, /* Channel Classification Not Supported */
- HCI_STATUS_INSUFFICIENT_SECURITY = 0X2f, /* Insufficient Security */
- HCI_STATUS_PARA_OUT_OF_RANGE = 0x30, /* Parameter Out Of Mandatory Range */
- HCI_STATUS_RESERVE_2 = 0X31, /* Reserved */
- HCI_STATUS_ROLE_SWITCH_PENDING = 0X32, /* Role Switch Pending */
- HCI_STATUS_RESERVE_3 = 0X33, /* Reserved */
- HCI_STATUS_RESERVE_SOLT_VIOLATION = 0X34, /* Reserved Slot Violation */
- HCI_STATUS_ROLE_SWITCH_FAIL = 0X35, /* Role Switch Failed */
- HCI_STATUS_EXTEND_INQUIRY_RSP_TOO_LARGE = 0X36, /* Extended Inquiry Response Too Large */
- HCI_STATUS_SEC_SIMPLE_PAIRING_NOT_SUPPORT = 0X37, /* Secure Simple Pairing Not Supported By Host. */
- HCI_STATUS_HOST_BUSY_PAIRING = 0X38, /* Host Busy - Pairing */
- HCI_STATUS_CONNECT_REJ_NOT_SUIT_CHNL_FOUND = 0X39, /* Connection Rejected due to No Suitable Channel Found */
- HCI_STATUS_CONTROLLER_BUSY = 0X3a /* CONTROLLER BUSY */
-};
-
-/* */
-/* The following is for BT 3.0 + HS HCI COMMAND */
-/* */
-
-/* bit 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
-/* | OCF | OGF | */
-/* */
-
-/* OGF 0x01 */
-#define LINK_CONTROL_COMMANDS 0x01
-enum link_control_commands {
- HCI_INQUIRY = 0x0001,
- HCI_INQUIRY_CANCEL = 0x0002,
- HCI_PERIODIC_INQUIRY_MODE = 0x0003,
- HCI_EXIT_PERIODIC_INQUIRY_MODE = 0x0004,
- HCI_CREATE_CONNECTION = 0x0005,
- HCI_DISCONNECT = 0x0006,
- HCI_CREATE_CONNECTION_CANCEL = 0x0008,
- HCI_ACCEPT_CONNECTIONREQUEST = 0x0009,
- HCI_REJECT_CONNECTION_REQUEST = 0x000a,
- HCI_LINK_KEY_REQUEST_REPLY = 0x000b,
- HCI_LINK_KEY_REQUEST_NEGATIVE_REPLY = 0x000c,
- HCI_PIN_CODE_REQUEST_REPLY = 0x000d,
- HCI_PIN_CODE_REQUEST_NEGATIVE_REPLY = 0x000e,
- HCI_CHANGE_CONNECTION_PACKET_TYPE = 0x000f,
- HCI_AUTHENTICATION_REQUESTED = 0x0011,
- HCI_SET_CONNECTION_ENCRYPTION = 0x0013,
- HCI_CHANGE_CONNECTION_LINK_KEY = 0x0015,
- HCI_MASTER_LINK_KEY = 0x0017,
- HCI_REMOTE_NAME_REQUEST = 0x0019,
- HCI_REMOTE_NAME_REQUEST_CANCEL = 0x001a,
- HCI_READ_REMOTE_SUPPORTED_FEATURES = 0x001b,
- HCI_READ_REMOTE_EXTENDED_FEATURES = 0x001c,
- HCI_READ_REMOTE_VERSION_INFORMATION = 0x001d,
- HCI_READ_CLOCK_OFFSET = 0x001f,
- HCI_READ_LMP_HANDLE = 0x0020,
- HCI_SETUP_SYNCHRONOUS_CONNECTION = 0x0028,
- HCI_ACCEPT_SYNCHRONOUS_CONNECTION_REQUEST = 0x0029,
- HCI_REJECT_SYNCHRONOUS_CONNECTION_REQUEST = 0x002a,
- HCI_IO_CAPABILITY_REQUEST_REPLY = 0x002b,
- HCI_USER_CONFIRMATION_REQUEST_REPLY = 0x002c,
- HCI_USER_CONFIRMATION_REQUEST_NEGATIVE_REPLY = 0x002d,
- HCI_USER_PASSKEY_REQUEST_REPLY = 0x002e,
- HCI_USER_PASSKEY_REQUESTNEGATIVE_REPLY = 0x002f,
- HCI_REMOTE_OOB_DATA_REQUEST_REPLY = 0x0030,
- HCI_REMOTE_OOB_DATA_REQUEST_NEGATIVE_REPLY = 0x0033,
- HCI_IO_CAPABILITY_REQUEST_NEGATIVE_REPLY = 0x0034,
- HCI_CREATE_PHYSICAL_LINK = 0x0035,
- HCI_ACCEPT_PHYSICAL_LINK = 0x0036,
- HCI_DISCONNECT_PHYSICAL_LINK = 0x0037,
- HCI_CREATE_LOGICAL_LINK = 0x0038,
- HCI_ACCEPT_LOGICAL_LINK = 0x0039,
- HCI_DISCONNECT_LOGICAL_LINK = 0x003a,
- HCI_LOGICAL_LINK_CANCEL = 0x003b,
- HCI_FLOW_SPEC_MODIFY = 0x003c
-};
-
-/* OGF 0x02 */
-#define HOLD_MODE_COMMAND 0x02
-enum hold_mode_command {
- HCI_HOLD_MODE = 0x0001,
- HCI_SNIFF_MODE = 0x0002,
- HCI_EXIT_SNIFF_MODE = 0x0003,
- HCI_PARK_STATE = 0x0005,
- HCI_EXIT_PARK_STATE = 0x0006,
- HCI_QOS_SETUP = 0x0007,
- HCI_ROLE_DISCOVERY = 0x0009,
- HCI_SWITCH_ROLE = 0x000b,
- HCI_READ_LINK_POLICY_SETTINGS = 0x000c,
- HCI_WRITE_LINK_POLICY_SETTINGS = 0x000d,
- HCI_READ_DEFAULT_LINK_POLICY_SETTINGS = 0x000e,
- HCI_WRITE_DEFAULT_LINK_POLICY_SETTINGS = 0x000f,
- HCI_FLOW_SPECIFICATION = 0x0010,
- HCI_SNIFF_SUBRATING = 0x0011
-};
-
-/* OGF 0x03 */
-#define OGF_SET_EVENT_MASK_COMMAND 0x03
-enum set_event_mask_command {
- HCI_SET_EVENT_MASK = 0x0001,
- HCI_RESET = 0x0003,
- HCI_SET_EVENT_FILTER = 0x0005,
- HCI_FLUSH = 0x0008,
- HCI_READ_PIN_TYPE = 0x0009,
- HCI_WRITE_PIN_TYPE = 0x000a,
- HCI_CREATE_NEW_UNIT_KEY = 0x000b,
- HCI_READ_STORED_LINK_KEY = 0x000d,
- HCI_WRITE_STORED_LINK_KEY = 0x0011,
- HCI_DELETE_STORED_LINK_KEY = 0x0012,
- HCI_WRITE_LOCAL_NAME = 0x0013,
- HCI_READ_LOCAL_NAME = 0x0014,
- HCI_READ_CONNECTION_ACCEPT_TIMEOUT = 0x0015,
- HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT = 0x0016,
- HCI_READ_PAGE_TIMEOUT = 0x0017,
- HCI_WRITE_PAGE_TIMEOUT = 0x0018,
- HCI_READ_SCAN_ENABLE = 0x0019,
- HCI_WRITE_SCAN_ENABLE = 0x001a,
- HCI_READ_PAGE_SCAN_ACTIVITY = 0x001b,
- HCI_WRITE_PAGE_SCAN_ACTIVITY = 0x001c,
- HCI_READ_INQUIRY_SCAN_ACTIVITY = 0x001d,
- HCI_WRITE_INQUIRY_SCAN_ACTIVITY = 0x001e,
- HCI_READ_AUTHENTICATION_ENABLE = 0x001f,
- HCI_WRITE_AUTHENTICATION_ENABLE = 0x0020,
- HCI_READ_CLASS_OF_DEVICE = 0x0023,
- HCI_WRITE_CLASS_OF_DEVICE = 0x0024,
- HCI_READ_VOICE_SETTING = 0x0025,
- HCI_WRITE_VOICE_SETTING = 0x0026,
- HCI_READ_AUTOMATIC_FLUSH_TIMEOUT = 0x0027,
- HCI_WRITE_AUTOMATIC_FLUSH_TIMEOUT = 0x0028,
- HCI_READ_NUM_BROADCAST_RETRANSMISSIONS = 0x0029,
- HCI_WRITE_NUM_BROADCAST_RETRANSMISSIONS = 0x002a,
- HCI_READ_HOLD_MODE_ACTIVITY = 0x002b,
- HCI_WRITE_HOLD_MODE_ACTIVITY = 0x002c,
- HCI_READ_SYNCHRONOUS_FLOW_CONTROL_ENABLE = 0x002e,
- HCI_WRITE_SYNCHRONOUS_FLOW_CONTROL_ENABLE = 0x002f,
- HCI_SET_CONTROLLER_TO_HOST_FLOW_CONTROL = 0x0031,
- HCI_HOST_BUFFER_SIZE = 0x0033,
- HCI_HOST_NUMBER_OF_COMPLETED_PACKETS = 0x0035,
- HCI_READ_LINK_SUPERVISION_TIMEOUT = 0x0036,
- HCI_WRITE_LINK_SUPERVISION_TIMEOUT = 0x0037,
- HCI_READ_NUMBER_OF_SUPPORTED_IAC = 0x0038,
- HCI_READ_CURRENT_IAC_LAP = 0x0039,
- HCI_WRITE_CURRENT_IAC_LAP = 0x003a,
- HCI_READ_PAGE_SCAN_MODE = 0x003d,
- HCI_WRITE_PAGE_SCAN_MODE = 0x003e,
- HCI_SET_AFH_HOST_CHANNEL_CLASSIFICATION = 0x003f,
- HCI_READ_INQUIRY_SCAN_TYPE = 0x0042,
- HCI_WRITE_INQUIRY_SCAN_TYPE = 0x0043,
- HCI_READ_INQUIRY_MODE = 0x0044,
- HCI_WRITE_INQUIRY_MODE = 0x0045,
- HCI_READ_PAGE_SCAN_TYPE = 0x0046,
- HCI_WRITE_PAGE_SCAN_TYPE = 0x0047,
- HCI_READ_AFH_CHANNEL_ASSESSMENT_MODE = 0x0048,
- HCI_WRITE_AFH_CHANNEL_ASSESSMENT_MODE = 0x0049,
- HCI_READ_EXTENDED_INQUIRY_RESPONSE = 0x0051,
- HCI_WRITE_EXTENDED_INQUIRY_RESPONSE = 0x0052,
- HCI_REFRESH_ENCRYPTION_KEY = 0x0053,
- HCI_READ_SIMPLE_PAIRING_MODE = 0x0055,
- HCI_WRITE_SIMPLE_PAIRING_MODE = 0x0056,
- HCI_READ_LOCAL_OOB_DATA = 0x0057,
- HCI_READ_INQUIRY_RESPONSE_TRANSMIT_POWER_LEVEL = 0x0058,
- HCI_WRITE_INQUIRY_TRANSMIT_POWER_LEVEL = 0x0059,
- HCI_READ_DEFAULT_ERRONEOUS_DATA_REPORTING = 0x005a,
- HCI_WRITE_DEFAULT_ERRONEOUS_DATA_REPORTING = 0x005b,
- HCI_ENHANCED_FLUSH = 0x005f,
- HCI_SEND_KEYPRESS_NOTIFICATION = 0x0060,
- HCI_READ_LOGICAL_LINK_ACCEPT_TIMEOUT = 0x0061,
- HCI_WRITE_LOGICAL_LINK_ACCEPT_TIMEOUT = 0x0062,
- HCI_SET_EVENT_MASK_PAGE_2 = 0x0063,
- HCI_READ_LOCATION_DATA = 0x0064,
- HCI_WRITE_LOCATION_DATA = 0x0065,
- HCI_READ_FLOW_CONTROL_MODE = 0x0066,
- HCI_WRITE_FLOW_CONTROL_MODE = 0x0067,
- HCI_READ_ENHANCE_TRANSMIT_POWER_LEVEL = 0x0068,
- HCI_READ_BEST_EFFORT_FLUSH_TIMEOUT = 0x0069,
- HCI_WRITE_BEST_EFFORT_FLUSH_TIMEOUT = 0x006a,
- HCI_SHORT_RANGE_MODE = 0x006b
-};
-
-/* OGF 0x04 */
-#define OGF_INFORMATIONAL_PARAMETERS 0x04
-enum informational_params {
- HCI_READ_LOCAL_VERSION_INFORMATION = 0x0001,
- HCI_READ_LOCAL_SUPPORTED_COMMANDS = 0x0002,
- HCI_READ_LOCAL_SUPPORTED_FEATURES = 0x0003,
- HCI_READ_LOCAL_EXTENDED_FEATURES = 0x0004,
- HCI_READ_BUFFER_SIZE = 0x0005,
- HCI_READ_BD_ADDR = 0x0009,
- HCI_READ_DATA_BLOCK_SIZE = 0x000a
-};
-
-/* OGF 0x05 */
-#define OGF_STATUS_PARAMETERS 0x05
-enum status_params {
- HCI_READ_FAILED_CONTACT_COUNTER = 0x0001,
- HCI_RESET_FAILED_CONTACT_COUNTER = 0x0002,
- HCI_READ_LINK_QUALITY = 0x0003,
- HCI_READ_RSSI = 0x0005,
- HCI_READ_AFH_CHANNEL_MAP = 0x0006,
- HCI_READ_CLOCK = 0x0007,
- HCI_READ_ENCRYPTION_KEY_SIZE = 0x0008,
- HCI_READ_LOCAL_AMP_INFO = 0x0009,
- HCI_READ_LOCAL_AMP_ASSOC = 0x000a,
- HCI_WRITE_REMOTE_AMP_ASSOC = 0x000b
-};
-
-/* OGF 0x06 */
-#define OGF_TESTING_COMMANDS 0x06
-enum testing_commands {
- HCI_READ_LOOPBACK_MODE = 0x0001,
- HCI_WRITE_LOOPBACK_MODE = 0x0002,
- HCI_ENABLE_DEVICE_UNDER_TEST_MODE = 0x0003,
- HCI_WRITE_SIMPLE_PAIRING_DEBUG_MODE = 0x0004,
- HCI_ENABLE_AMP_RECEIVER_REPORTS = 0x0007,
- HCI_AMP_TEST_END = 0x0008,
- HCI_AMP_TEST_COMMAND = 0x0009
-};
-
-/* OGF 0x3f */
-#define OGF_EXTENSION 0X3f
-enum hci_extension_commands {
- HCI_SET_ACL_LINK_DATA_FLOW_MODE = 0x0010,
- HCI_SET_ACL_LINK_STATUS = 0x0020,
- HCI_SET_SCO_LINK_STATUS = 0x0030,
- HCI_SET_RSSI_VALUE = 0x0040,
- HCI_SET_CURRENT_BLUETOOTH_STATUS = 0x0041,
-
- /* The following is for RTK8723 */
- HCI_EXTENSION_VERSION_NOTIFY = 0x0100,
- HCI_LINK_STATUS_NOTIFY = 0x0101,
- HCI_BT_OPERATION_NOTIFY = 0x0102,
- HCI_ENABLE_WIFI_SCAN_NOTIFY = 0x0103,
-
-
- /* The following is for IVT */
- HCI_WIFI_CURRENT_CHANNEL = 0x0300,
- HCI_WIFI_CURRENT_BANDWIDTH = 0x0301,
- HCI_WIFI_CONNECTION_STATUS = 0x0302,
-};
-
-enum bt_spec {
- BT_SPEC_1_0_b = 0x00,
- BT_SPEC_1_1 = 0x01,
- BT_SPEC_1_2 = 0x02,
- BT_SPEC_2_0_EDR = 0x03,
- BT_SPEC_2_1_EDR = 0x04,
- BT_SPEC_3_0_HS = 0x05,
- BT_SPEC_4_0 = 0x06
-};
-
-/* The following is for BT 3.0 + HS EVENTS */
-enum hci_event {
- HCI_EVENT_INQUIRY_COMPLETE = 0x01,
- HCI_EVENT_INQUIRY_RESULT = 0x02,
- HCI_EVENT_CONNECTION_COMPLETE = 0x03,
- HCI_EVENT_CONNECTION_REQUEST = 0x04,
- HCI_EVENT_DISCONNECTION_COMPLETE = 0x05,
- HCI_EVENT_AUTHENTICATION_COMPLETE = 0x06,
- HCI_EVENT_REMOTE_NAME_REQUEST_COMPLETE = 0x07,
- HCI_EVENT_ENCRYPTION_CHANGE = 0x08,
- HCI_EVENT_CHANGE_LINK_KEY_COMPLETE = 0x09,
- HCI_EVENT_MASTER_LINK_KEY_COMPLETE = 0x0a,
- HCI_EVENT_READ_REMOTE_SUPPORT_FEATURES_COMPLETE = 0x0b,
- HCI_EVENT_READ_REMOTE_VER_INFO_COMPLETE = 0x0c,
- HCI_EVENT_QOS_SETUP_COMPLETE = 0x0d,
- HCI_EVENT_COMMAND_COMPLETE = 0x0e,
- HCI_EVENT_COMMAND_STATUS = 0x0f,
- HCI_EVENT_HARDWARE_ERROR = 0x10,
- HCI_EVENT_FLUSH_OCCRUED = 0x11,
- HCI_EVENT_ROLE_CHANGE = 0x12,
- HCI_EVENT_NUMBER_OF_COMPLETE_PACKETS = 0x13,
- HCI_EVENT_MODE_CHANGE = 0x14,
- HCI_EVENT_RETURN_LINK_KEYS = 0x15,
- HCI_EVENT_PIN_CODE_REQUEST = 0x16,
- HCI_EVENT_LINK_KEY_REQUEST = 0x17,
- HCI_EVENT_LINK_KEY_NOTIFICATION = 0x18,
- HCI_EVENT_LOOPBACK_COMMAND = 0x19,
- HCI_EVENT_DATA_BUFFER_OVERFLOW = 0x1a,
- HCI_EVENT_MAX_SLOTS_CHANGE = 0x1b,
- HCI_EVENT_READ_CLOCK_OFFSET_COMPLETE = 0x1c,
- HCI_EVENT_CONNECT_PACKET_TYPE_CHANGE = 0x1d,
- HCI_EVENT_QOS_VIOLATION = 0x1e,
- HCI_EVENT_PAGE_SCAN_REPETITION_MODE_CHANGE = 0x20,
- HCI_EVENT_FLOW_SEPC_COMPLETE = 0x21,
- HCI_EVENT_INQUIRY_RESULT_WITH_RSSI = 0x22,
- HCI_EVENT_READ_REMOTE_EXT_FEATURES_COMPLETE = 0x23,
- HCI_EVENT_SYNC_CONNECT_COMPLETE = 0x2c,
- HCI_EVENT_SYNC_CONNECT_CHANGE = 0x2d,
- HCI_EVENT_SNIFFER_SUBRATING = 0x2e,
- HCI_EVENT_EXTENTED_INQUIRY_RESULT = 0x2f,
- HCI_EVENT_ENCRYPTION_KEY_REFLASH_COMPLETE = 0x30,
- HCI_EVENT_IO_CAPIBILITY_COMPLETE = 0x31,
- HCI_EVENT_IO_CAPIBILITY_RESPONSE = 0x32,
- HCI_EVENT_USER_CONFIRMTION_REQUEST = 0x33,
- HCI_EVENT_USER_PASSKEY_REQUEST = 0x34,
- HCI_EVENT_REMOTE_OOB_DATA_REQUEST = 0x35,
- HCI_EVENT_SIMPLE_PAIRING_COMPLETE = 0x36,
- HCI_EVENT_LINK_SUPERVISION_TIMEOUT_CHANGE = 0x38,
- HCI_EVENT_ENHANCED_FLUSH_COMPLETE = 0x39,
- HCI_EVENT_USER_PASSKEY_NOTIFICATION = 0x3b,
- HCI_EVENT_KEYPRESS_NOTIFICATION = 0x3c,
- HCI_EVENT_REMOTE_HOST_SUPPORT_FEATURES_NOTIFICATION = 0x3d,
- HCI_EVENT_PHY_LINK_COMPLETE = 0x40,
- HCI_EVENT_CHANNEL_SELECT = 0x41,
- HCI_EVENT_DISCONNECT_PHY_LINK_COMPLETE = 0x42,
- HCI_EVENT_PHY_LINK_LOSS_EARLY_WARNING = 0x43,
- HCI_EVENT_PHY_LINK_RECOVER = 0x44,
- HCI_EVENT_LOGICAL_LINK_COMPLETE = 0x45,
- HCI_EVENT_DISCONNECT_LOGICAL_LINK_COMPLETE = 0x46,
- HCI_EVENT_FLOW_SPEC_MODIFY_COMPLETE = 0x47,
- HCI_EVENT_NUM_OF_COMPLETE_DATA_BLOCKS = 0x48,
- HCI_EVENT_AMP_START_TEST = 0x49,
- HCI_EVENT_AMP_TEST_END = 0x4a,
- HCI_EVENT_AMP_RECEIVER_REPORT = 0x4b,
- HCI_EVENT_SHORT_RANGE_MODE_CHANGE_COMPLETE = 0x4c,
- HCI_EVENT_AMP_STATUS_CHANGE = 0x4d,
- HCI_EVENT_EXTENSION_RTK = 0xfe,
- HCI_EVENT_EXTENSION_MOTO = 0xff,
-};
-
-enum hci_extension_event_moto {
- HCI_EVENT_GET_BT_RSSI = 0x01,
-};
-
-enum hci_extension_event {
- HCI_EVENT_EXT_WIFI_SCAN_NOTIFY = 0x01,
-};
-
-enum hci_event_mask_page_2 {
- EMP2_HCI_EVENT_PHY_LINK_COMPLETE = 0x0000000000000001,
- EMP2_HCI_EVENT_CHANNEL_SELECT = 0x0000000000000002,
- EMP2_HCI_EVENT_DISCONNECT_PHY_LINK_COMPLETE = 0x0000000000000004,
- EMP2_HCI_EVENT_PHY_LINK_LOSS_EARLY_WARNING = 0x0000000000000008,
- EMP2_HCI_EVENT_PHY_LINK_RECOVER = 0x0000000000000010,
- EMP2_HCI_EVENT_LOGICAL_LINK_COMPLETE = 0x0000000000000020,
- EMP2_HCI_EVENT_DISCONNECT_LOGICAL_LINK_COMPLETE = 0x0000000000000040,
- EMP2_HCI_EVENT_FLOW_SPEC_MODIFY_COMPLETE = 0x0000000000000080,
- EMP2_HCI_EVENT_NUM_OF_COMPLETE_DATA_BLOCKS = 0x0000000000000100,
- EMP2_HCI_EVENT_AMP_START_TEST = 0x0000000000000200,
- EMP2_HCI_EVENT_AMP_TEST_END = 0x0000000000000400,
- EMP2_HCI_EVENT_AMP_RECEIVER_REPORT = 0x0000000000000800,
- EMP2_HCI_EVENT_SHORT_RANGE_MODE_CHANGE_COMPLETE = 0x0000000000001000,
- EMP2_HCI_EVENT_AMP_STATUS_CHANGE = 0x0000000000002000,
-};
-
-enum hci_state_machine {
- HCI_STATE_STARTING = 0x01,
- HCI_STATE_CONNECTING = 0x02,
- HCI_STATE_AUTHENTICATING = 0x04,
- HCI_STATE_CONNECTED = 0x08,
- HCI_STATE_DISCONNECTING = 0x10,
- HCI_STATE_DISCONNECTED = 0x20
-};
-
-enum amp_assoc_structure_type {
- AMP_MAC_ADDR = 0x01,
- AMP_PREFERRED_CHANNEL_LIST = 0x02,
- AMP_CONNECTED_CHANNEL = 0x03,
- AMP_80211_PAL_CAP_LIST = 0x04,
- AMP_80211_PAL_VISION = 0x05,
- AMP_RESERVED_FOR_TESTING = 0x33
-};
-
-enum amp_btap_type {
- AMP_BTAP_NONE,
- AMP_BTAP_CREATOR,
- AMP_BTAP_JOINER
-};
-
-enum hci_state_with_cmd {
- STATE_CMD_CREATE_PHY_LINK,
- STATE_CMD_ACCEPT_PHY_LINK,
- STATE_CMD_DISCONNECT_PHY_LINK,
- STATE_CMD_CONNECT_ACCEPT_TIMEOUT,
- STATE_CMD_MAC_START_COMPLETE,
- STATE_CMD_MAC_START_FAILED,
- STATE_CMD_MAC_CONNECT_COMPLETE,
- STATE_CMD_MAC_CONNECT_FAILED,
- STATE_CMD_MAC_DISCONNECT_INDICATE,
- STATE_CMD_MAC_CONNECT_CANCEL_INDICATE,
- STATE_CMD_4WAY_FAILED,
- STATE_CMD_4WAY_SUCCESSED,
- STATE_CMD_ENTER_STATE,
- STATE_CMD_NO_SUCH_CMD,
-};
-
-enum hci_service_type {
- SERVICE_NO_TRAFFIC,
- SERVICE_BEST_EFFORT,
- SERVICE_GUARANTEE
-};
-
-enum hci_traffic_mode {
- TRAFFIC_MODE_BEST_EFFORT = 0x00,
- TRAFFIC_MODE_GUARANTEED_LATENCY = 0x01,
- TRAFFIC_MODE_GUARANTEED_BANDWIDTH = 0x02,
- TRAFFIC_MODE_GUARANTEED_LATENCY_AND_BANDWIDTH = 0x03
-};
-
-#define HCIOPCODE(_OCF, _OGF) (_OGF<<10|_OCF)
-#define HCIOPCODELOW(_OCF, _OGF) (u8)(HCIOPCODE(_OCF, _OGF)&0x00ff)
-#define HCIOPCODEHIGHT(_OCF, _OGF) (u8)(HCIOPCODE(_OCF, _OGF)>>8)
-
-#define TWOBYTE_HIGHTBYTE(_DATA) (u8)(_DATA>>8)
-#define TWOBYTE_LOWBYTE(_DATA) (u8)(_DATA)
-
-enum amp_status {
- AMP_STATUS_AVA_PHY_PWR_DWN = 0x0,
- AMP_STATUS_BT_USE_ONLY = 0x1,
- AMP_STATUS_NO_CAPACITY_FOR_BT = 0x2,
- AMP_STATUS_LOW_CAPACITY_FOR_BT = 0x3,
- AMP_STATUS_MEDIUM_CAPACITY_FOR_BT = 0x4,
- AMP_STATUS_HIGH_CAPACITY_FOR_BT = 0x5,
- AMP_STATUS_FULL_CAPACITY_FOR_BT = 0x6
-};
-
-enum bt_wpa_msg_type {
- Type_BT_4way1st = 0,
- Type_BT_4way2nd = 1,
- Type_BT_4way3rd = 2,
- Type_BT_4way4th = 3,
- Type_BT_unknow = 4
-};
-
-enum bt_connect_type {
- BT_CONNECT_AUTH_REQ = 0x00,
- BT_CONNECT_AUTH_RSP = 0x01,
- BT_CONNECT_ASOC_REQ = 0x02,
- BT_CONNECT_ASOC_RSP = 0x03,
- BT_DISCONNECT = 0x04
-};
-
-enum bt_ll_service_type {
- BT_LL_BE = 0x01,
- BT_LL_GU = 0x02
-};
-
-enum bt_ll_flowspec {
- BT_TX_BE_FS, /* TX best effort flowspec */
- BT_RX_BE_FS, /* RX best effort flowspec */
- BT_TX_GU_FS, /* TX guaranteed latency flowspec */
- BT_RX_GU_FS, /* RX guaranteed latency flowspec */
- BT_TX_BE_AGG_FS, /* TX aggregated best effort flowspec */
- BT_RX_BE_AGG_FS, /* RX aggregated best effort flowspec */
- BT_TX_GU_BW_FS, /* TX guaranteed bandwidth flowspec */
- BT_RX_GU_BW_FS, /* RX guaranteed bandwidth flowspec */
- BT_TX_GU_LARGE_FS, /* TX guaranteed latency flowspec, for testing only */
- BT_RX_GU_LARGE_FS, /* RX guaranteed latency flowspec, for testing only */
-};
-
-enum bt_traffic_mode {
- BT_MOTOR_EXT_BE = 0x00, /* Best Effort. Default. for HCRP, PAN, SDP, RFCOMM-based profiles like FTP, OPP, SPP, DUN, etc. */
- BT_MOTOR_EXT_GUL = 0x01, /* Guaranteed Latency. This type of traffic is used e.g. for HID and AVRCP. */
- BT_MOTOR_EXT_GUB = 0X02, /* Guaranteed Bandwidth. */
- BT_MOTOR_EXT_GULB = 0X03 /* Guaranteed Latency and Bandwidth. for A2DP and VDP. */
-};
-
-enum bt_traffic_mode_profile {
- BT_PROFILE_NONE,
- BT_PROFILE_A2DP,
- BT_PROFILE_PAN,
- BT_PROFILE_HID,
- BT_PROFILE_SCO
-};
-
-enum bt_link_role {
- BT_LINK_MASTER = 0,
- BT_LINK_SLAVE = 1
-};
-
-enum bt_state_wpa_auth {
- STATE_WPA_AUTH_UNINITIALIZED,
- STATE_WPA_AUTH_WAIT_PACKET_1, /* Join */
- STATE_WPA_AUTH_WAIT_PACKET_2, /* Creat */
- STATE_WPA_AUTH_WAIT_PACKET_3,
- STATE_WPA_AUTH_WAIT_PACKET_4,
- STATE_WPA_AUTH_SUCCESSED
-};
-
-#define BT_WPA_AUTH_TIMEOUT_PERIOD 1000
-#define BTMaxWPAAuthReTransmitCoun 5
-
-#define MAX_AMP_ASSOC_FRAG_LEN 248
-#define TOTAL_ALLOCIATE_ASSOC_LEN 1000
-
-struct hci_flow_spec {
- u8 Identifier;
- u8 ServiceType;
- u16 MaximumSDUSize;
- u32 SDUInterArrivalTime;
- u32 AccessLatency;
- u32 FlushTimeout;
-};
-
-struct hci_log_link_cmd_data {
- u8 BtPhyLinkhandle;
- u16 BtLogLinkhandle;
- u8 BtTxFlowSpecID;
- struct hci_flow_spec Tx_Flow_Spec;
- struct hci_flow_spec Rx_Flow_Spec;
- u32 TxPacketCount;
- u32 BestEffortFlushTimeout;
-
- u8 bLLCompleteEventIsSet;
-
- u8 bLLCancelCMDIsSetandComplete;
-};
-
-struct hci_phy_link_cmd_data {
- /* Physical_Link_Handle */
- u8 BtPhyLinkhandle;
-
- u16 LinkSuperversionTimeout;
-
- /* u16 SuperTimeOutCnt; */
-
- /* Dedicated_AMP_Key_Length */
- u8 BtAMPKeyLen;
- /* Dedicated_AMP_Key_Type */
- u8 BtAMPKeyType;
- /* Dedicated_AMP_Key */
- u8 BtAMPKey[PMK_LEN];
-};
-
-struct amp_assoc_structure {
- /* TYPE ID */
- u8 TypeID;
- /* Length */
- u16 Length;
- /* Value */
- u8 Data[1];
-};
-
-struct amp_pref_chnl_regulatory {
- u8 reXId;
- u8 regulatoryClass;
- u8 coverageClass;
-};
-
-struct amp_assoc_cmd_data {
- /* Physical_Link_Handle */
- u8 BtPhyLinkhandle;
- /* Length_So_Far */
- u16 LenSoFar;
-
- u16 MaxRemoteASSOCLen;
- /* AMP_ASSOC_Remaining_Length */
- u16 AMPAssocRemLen;
- /* AMP_ASSOC_fragment */
- void *AMPAssocfragment;
-};
-
-struct hci_link_info {
- u16 ConnectHandle;
- u8 IncomingTrafficMode;
- u8 OutgoingTrafficMode;
- u8 BTProfile;
- u8 BTCoreSpec;
- s8 BT_RSSI;
- u8 TrafficProfile;
- u8 linkRole;
-};
-
-struct hci_ext_config {
- struct hci_link_info linkInfo[MAX_BT_ASOC_ENTRY_NUM];
- u8 btOperationCode;
- u16 CurrentConnectHandle;
- u8 CurrentIncomingTrafficMode;
- u8 CurrentOutgoingTrafficMode;
- s8 MIN_BT_RSSI;
- u8 NumberOfHandle;
- u8 NumberOfSCO;
- u8 CurrentBTStatus;
- u16 HCIExtensionVer;
-
- /* Bt coexist related */
- u8 btProfileCase;
- u8 btProfileAction;
- u8 bManualControl;
- u8 bBTBusy;
- u8 bBTA2DPBusy;
- u8 bEnableWifiScanNotify;
-
- u8 bHoldForBtOperation;
- u32 bHoldPeriodCnt;
-};
-
-struct hci_acl_packet_data {
- u16 ACLDataPacketLen;
- u8 SyncDataPacketLen;
- u16 TotalNumACLDataPackets;
- u16 TotalSyncNumDataPackets;
-};
-
-struct hci_phy_link_bss_info {
- u16 bdCap; /* capability information */
-};
-
-struct packet_irp_hcicmd_data {
- u16 OCF:10;
- u16 OGF:6;
- u8 Length;
- u8 Data[20];
-};
-
-struct bt_asoc_entry {
- u8 bUsed;
- u8 mAssoc;
- u8 b4waySuccess;
- u8 Bssid[6];
- struct hci_phy_link_cmd_data PhyLinkCmdData;
-
- struct hci_log_link_cmd_data LogLinkCmdData[MAX_LOGICAL_LINK_NUM];
-
- struct hci_acl_packet_data ACLPacketsData;
-
- struct amp_assoc_cmd_data AmpAsocCmdData;
- struct octet_string BTSsid;
- u8 BTSsidBuf[33];
-
- enum hci_status PhyLinkDisconnectReason;
-
- u8 bSendSupervisionPacket;
- /* u8 CurrentSuervisionPacketSendNum; */
- /* u8 LastSuervisionPacketSendNum; */
- u32 NoRxPktCnt;
- /* Is Creator or Joiner */
- enum amp_btap_type AMPRole;
-
- /* BT current state */
- u8 BtCurrentState;
- /* BT next state */
- u8 BtNextState;
-
- u8 bNeedPhysLinkCompleteEvent;
-
- enum hci_status PhysLinkCompleteStatus;
-
- u8 BTRemoteMACAddr[6];
-
- u32 BTCapability;
-
- u8 SyncDataPacketLen;
-
- u16 TotalSyncNumDataPackets;
- u16 TotalNumACLDataPackets;
-
- u8 ShortRangeMode;
-
- u8 PTK[PTK_LEN_TKIP];
- u8 GTK[GTK_LEN];
- u8 ANonce[KEY_NONCE_LEN];
- u8 SNonce[KEY_NONCE_LEN];
- u64 KeyReplayCounter;
- u8 WPAAuthReplayCount;
- u8 AESKeyBuf[AESCCMP_BLK_SIZE_TOTAL];
- u8 PMK[PMK_LEN];
- enum bt_state_wpa_auth BTWPAAuthState;
- s32 UndecoratedSmoothedPWDB;
-
- /* Add for HW security !! */
- u8 HwCAMIndex; /* Cam index */
- u8 bPeerQosSta;
-
- u32 rxSuvpPktCnt;
-};
-
-struct bt_traffic_statistics {
- u8 bTxBusyTraffic;
- u8 bRxBusyTraffic;
- u8 bIdle;
- u32 TxPktCntInPeriod;
- u32 RxPktCntInPeriod;
- u64 TxPktLenInPeriod;
- u64 RxPktLenInPeriod;
-};
-
-struct bt_mgnt {
- u8 bBTConnectInProgress;
- u8 bLogLinkInProgress;
- u8 bPhyLinkInProgress;
- u8 bPhyLinkInProgressStartLL;
- u8 BtCurrentPhyLinkhandle;
- u16 BtCurrentLogLinkhandle;
- u8 CurrentConnectEntryNum;
- u8 DisconnectEntryNum;
- u8 CurrentBTConnectionCnt;
- enum bt_connect_type BTCurrentConnectType;
- enum bt_connect_type BTReceiveConnectPkt;
- u8 BTAuthCount;
- u8 BTAsocCount;
- u8 bStartSendSupervisionPkt;
- u8 BtOperationOn;
- u8 BTNeedAMPStatusChg;
- u8 JoinerNeedSendAuth;
- struct hci_phy_link_bss_info bssDesc;
- struct hci_ext_config ExtConfig;
- u8 bNeedNotifyAMPNoCap;
- u8 bCreateSpportQos;
- u8 bSupportProfile;
- u8 BTChannel;
- u8 CheckChnlIsSuit;
- u8 bBtScan;
- u8 btLogoTest;
-};
-
-struct bt_hci_dgb_info {
- u32 hciCmdCnt;
- u32 hciCmdCntUnknown;
- u32 hciCmdCntCreatePhyLink;
- u32 hciCmdCntAcceptPhyLink;
- u32 hciCmdCntDisconnectPhyLink;
- u32 hciCmdPhyLinkStatus;
- u32 hciCmdCntCreateLogLink;
- u32 hciCmdCntAcceptLogLink;
- u32 hciCmdCntDisconnectLogLink;
- u32 hciCmdCntReadLocalAmpAssoc;
- u32 hciCmdCntWriteRemoteAmpAssoc;
- u32 hciCmdCntSetAclLinkStatus;
- u32 hciCmdCntSetScoLinkStatus;
- u32 hciCmdCntExtensionVersionNotify;
- u32 hciCmdCntLinkStatusNotify;
-};
-
-struct bt_irp_dgb_info {
- u32 irpMJCreate;
- /* Io Control */
- u32 irpIoControl;
- u32 irpIoCtrlHciCmd;
- u32 irpIoCtrlHciEvent;
- u32 irpIoCtrlHciTxData;
- u32 irpIoCtrlHciRxData;
- u32 irpIoCtrlUnknown;
-
- u32 irpIoCtrlHciTxData1s;
-};
-
-struct bt_packet_dgb_info {
- u32 btPktTxProbReq;
- u32 btPktRxProbReq;
- u32 btPktRxProbReqFail;
- u32 btPktTxProbRsp;
- u32 btPktRxProbRsp;
- u32 btPktTxAuth;
- u32 btPktRxAuth;
- u32 btPktRxAuthButDrop;
- u32 btPktTxAssocReq;
- u32 btPktRxAssocReq;
- u32 btPktRxAssocReqButDrop;
- u32 btPktTxAssocRsp;
- u32 btPktRxAssocRsp;
- u32 btPktTxDisassoc;
- u32 btPktRxDisassoc;
- u32 btPktRxDeauth;
- u32 btPktTx4way1st;
- u32 btPktRx4way1st;
- u32 btPktTx4way2nd;
- u32 btPktRx4way2nd;
- u32 btPktTx4way3rd;
- u32 btPktRx4way3rd;
- u32 btPktTx4way4th;
- u32 btPktRx4way4th;
- u32 btPktTxLinkSuperReq;
- u32 btPktRxLinkSuperReq;
- u32 btPktTxLinkSuperRsp;
- u32 btPktRxLinkSuperRsp;
- u32 btPktTxData;
- u32 btPktRxData;
-};
-
-struct bt_dgb {
- u8 dbgCtrl;
- u32 dbgProfile;
- struct bt_hci_dgb_info dbgHciInfo;
- struct bt_irp_dgb_info dbgIrpInfo;
- struct bt_packet_dgb_info dbgBtPkt;
-};
-
-struct bt_hci_info {
- /* 802.11 Pal version specifier */
- u8 BTPalVersion;
- u16 BTPalCompanyID;
- u16 BTPalsubversion;
-
- /* Connected channel list */
- u16 BTConnectChnlListLen;
- u8 BTConnectChnllist[64];
-
- /* Fail contact counter */
- u16 FailContactCount;
-
- /* Event mask */
- u64 BTEventMask;
- u64 BTEventMaskPage2;
-
- /* timeout var */
- u16 ConnAcceptTimeout;
- u16 LogicalAcceptTimeout;
- u16 PageTimeout;
-
- u8 LocationDomainAware;
- u16 LocationDomain;
- u8 LocationDomainOptions;
- u8 LocationOptions;
-
- u8 FlowControlMode;
-
- /* Preferred channel list */
- u16 BtPreChnlListLen;
- u8 BTPreChnllist[64];
-
- u16 enFlush_LLH; /* enhanced flush handle */
- u16 FLTO_LLH; /* enhanced flush handle */
-
- /* */
- /* Test command only. */
- u8 bInTestMode;
- u8 bTestIsEnd;
- u8 bTestNeedReport;
- u8 TestScenario;
- u8 TestReportInterval;
- u8 TestCtrType;
- u32 TestEventType;
- u16 TestNumOfFrame;
- u16 TestNumOfErrFrame;
- u16 TestNumOfBits;
- u16 TestNumOfErrBits;
- /* */
-};
-
-struct bt_traffic {
- /* Add for check replay data */
- u8 LastRxUniFragNum;
- u16 LastRxUniSeqNum;
-
- /* s32 EntryMaxUndecoratedSmoothedPWDB; */
- /* s32 EntryMinUndecoratedSmoothedPWDB; */
-
- struct bt_traffic_statistics Bt30TrafficStatistics;
-};
-
-#define RT_WORK_ITEM struct work_struct
-
-struct bt_security {
- /* WPA auth state
- * May need to remove to BTSecInfo ...
- * enum bt_state_wpa_auth BTWPAAuthState;
- */
- struct octet_string RSNIE;
- u8 RSNIEBuf[MAXRSNIELEN];
- u8 bRegNoEncrypt;
- u8 bUsedHwEncrypt;
-};
-
-struct bt_30info {
- struct rtw_adapter *padapter;
- struct bt_asoc_entry BtAsocEntry[MAX_BT_ASOC_ENTRY_NUM];
- struct bt_mgnt BtMgnt;
- struct bt_dgb BtDbg;
- struct bt_hci_info BtHciInfo;
- struct bt_traffic BtTraffic;
- struct bt_security BtSec;
- RT_WORK_ITEM HCICmdWorkItem;
- struct timer_list BTHCICmdTimer;
- RT_WORK_ITEM BTPsDisableWorkItem;
- RT_WORK_ITEM BTConnectWorkItem;
- struct timer_list BTHCIDiscardAclDataTimer;
- struct timer_list BTHCIJoinTimeoutTimer;
- struct timer_list BTTestSendPacketTimer;
- struct timer_list BTDisconnectPhyLinkTimer;
- struct timer_list BTBeaconTimer;
- u8 BTBeaconTmrOn;
-
- struct timer_list BTPsDisableTimer;
-
- void * pBtChnlList;
-};
-
-struct packet_irp_acl_data {
- u16 Handle:12;
- u16 PB_Flag:2;
- u16 BC_Flag:2;
- u16 Length;
- u8 Data[1];
-};
-
-struct packet_irp_hcievent_data {
- u8 EventCode;
- u8 Length;
- u8 Data[20];
-};
-
-struct common_triple {
- u8 byte_1st;
- u8 byte_2nd;
- u8 byte_3rd;
-};
-
-#define COUNTRY_STR_LEN 3 /* country string len = 3 */
-
-#define LOCAL_PMK 0
-
-enum hci_wifi_connect_status {
- HCI_WIFI_NOT_CONNECTED = 0x0,
- HCI_WIFI_CONNECTED = 0x1,
- HCI_WIFI_CONNECT_IN_PROGRESS = 0x2,
-};
-
-enum hci_ext_bp_operation {
- HCI_BT_OP_NONE = 0x0,
- HCI_BT_OP_INQUIRY_START = 0x1,
- HCI_BT_OP_INQUIRY_FINISH = 0x2,
- HCI_BT_OP_PAGING_START = 0x3,
- HCI_BT_OP_PAGING_SUCCESS = 0x4,
- HCI_BT_OP_PAGING_UNSUCCESS = 0x5,
- HCI_BT_OP_PAIRING_START = 0x6,
- HCI_BT_OP_PAIRING_FINISH = 0x7,
- HCI_BT_OP_BT_DEV_ENABLE = 0x8,
- HCI_BT_OP_BT_DEV_DISABLE = 0x9,
- HCI_BT_OP_MAX
-};
-
-#define BTHCI_SM_WITH_INFO(_Adapter, _StateToEnter, _StateCmd, _EntryNum) \
-{ \
- RTPRINT(FIOCTL, IOCTL_STATE, ("[BT state change] caused by ""%s"", line =%d\n", __func__, __LINE__)); \
- BTHCI_StateMachine(_Adapter, _StateToEnter, _StateCmd, _EntryNum);\
-}
-
-void BTHCI_EventParse(struct rtw_adapter *padapter, void *pEvntData,
- u32 dataLen);
-#define BT_EventParse BTHCI_EventParse
-u8 BTHCI_HsConnectionEstablished(struct rtw_adapter *padapter);
-void BTHCI_UpdateBTProfileRTKToMoto(struct rtw_adapter *padapter);
-void BTHCI_WifiScanNotify(struct rtw_adapter *padapter, u8 scanType);
-void BTHCI_StateMachine(struct rtw_adapter *padapter, u8 StateToEnter,
- enum hci_state_with_cmd StateCmd, u8 EntryNum);
-void BTHCI_DisconnectPeer(struct rtw_adapter *padapter, u8 EntryNum);
-void BTHCI_EventNumOfCompletedDataBlocks(struct rtw_adapter *padapter);
-void BTHCI_EventAMPStatusChange(struct rtw_adapter *padapter, u8 AMP_Status);
-void BTHCI_DisconnectAll(struct rtw_adapter *padapter);
-enum hci_status BTHCI_HandleHCICMD(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd);
-
-/* ===== End of sync from SD7 driver COMMON/bt_hci.h ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc87231Ant.h ===== */
-#define GET_BT_INFO(padapter) (&GET_HAL_DATA(padapter)->BtInfo)
-
-#define BTC_FOR_SCAN_START 1
-#define BTC_FOR_SCAN_FINISH 0
-
-#define BT_TXRX_CNT_THRES_1 1200
-#define BT_TXRX_CNT_THRES_2 1400
-#define BT_TXRX_CNT_THRES_3 3000
-#define BT_TXRX_CNT_LEVEL_0 0 /* < 1200 */
-#define BT_TXRX_CNT_LEVEL_1 1 /* >= 1200 && < 1400 */
-#define BT_TXRX_CNT_LEVEL_2 2 /* >= 1400 */
-#define BT_TXRX_CNT_LEVEL_3 3 /* >= 3000 */
-
-enum bt_state_1ant {
- BT_INFO_STATE_DISABLED = 0,
- BT_INFO_STATE_NO_CONNECTION = 1,
- BT_INFO_STATE_CONNECT_IDLE = 2,
- BT_INFO_STATE_INQ_OR_PAG = 3,
- BT_INFO_STATE_ACL_ONLY_BUSY = 4,
- BT_INFO_STATE_SCO_ONLY_BUSY = 5,
- BT_INFO_STATE_ACL_SCO_BUSY = 6,
- BT_INFO_STATE_ACL_INQ_OR_PAG = 7,
- BT_INFO_STATE_MAX = 8
-};
-
-struct btdm_8723a_1ant {
- u8 prePsTdma;
- u8 curPsTdma;
- u8 psTdmaDuAdjType;
- u8 bPrePsTdmaOn;
- u8 bCurPsTdmaOn;
- u8 preWifiPara;
- u8 curWifiPara;
- u8 preCoexWifiCon;
- u8 curCoexWifiCon;
- u8 wifiRssiThresh;
-
- u32 psTdmaMonitorCnt;
- u32 psTdmaGlobalCnt;
-
- /* DurationAdjust For SCO */
- u32 psTdmaMonitorCntForSCO;
- u8 psTdmaDuAdjTypeForSCO;
- u8 RSSI_WiFi_Last;
- u8 RSSI_BT_Last;
-
- u8 bWiFiHalt;
- u8 bRAChanged;
-};
-
-void BTDM_1AntSignalCompensation(struct rtw_adapter *padapter,
- u8 *rssi_wifi, u8 *rssi_bt);
-void BTDM_1AntForDhcp(struct rtw_adapter *padapter);
-void BTDM_1AntBtCoexist8723A(struct rtw_adapter *padapter);
-
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc87231Ant.h ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc87232Ant.h ===== */
-enum bt_2ant_bt_status {
- BT_2ANT_BT_STATUS_IDLE = 0x0,
- BT_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
- BT_2ANT_BT_STATUS_NON_IDLE = 0x2,
- BT_2ANT_BT_STATUS_MAX
-};
-
-enum bt_2ant_coex_algo {
- BT_2ANT_COEX_ALGO_UNDEFINED = 0x0,
- BT_2ANT_COEX_ALGO_SCO = 0x1,
- BT_2ANT_COEX_ALGO_HID = 0x2,
- BT_2ANT_COEX_ALGO_A2DP = 0x3,
- BT_2ANT_COEX_ALGO_PANEDR = 0x4,
- BT_2ANT_COEX_ALGO_PANHS = 0x5,
- BT_2ANT_COEX_ALGO_PANEDR_A2DP = 0x6,
- BT_2ANT_COEX_ALGO_PANEDR_HID = 0x7,
- BT_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x8,
- BT_2ANT_COEX_ALGO_HID_A2DP = 0x9,
- BT_2ANT_COEX_ALGO_HID_A2DP_PANHS = 0xA,
- BT_2ANT_COEX_ALGO_MAX = 0xB,
-};
-
-struct btdm_8723a_2ant {
- u8 bPreDecBtPwr;
- u8 bCurDecBtPwr;
-
- u8 preWlanActHi;
- u8 curWlanActHi;
- u8 preWlanActLo;
- u8 curWlanActLo;
-
- u8 preFwDacSwingLvl;
- u8 curFwDacSwingLvl;
-
- u8 bPreRfRxLpfShrink;
- u8 bCurRfRxLpfShrink;
-
- u8 bPreLowPenaltyRa;
- u8 bCurLowPenaltyRa;
-
- u8 preBtRetryIndex;
- u8 curBtRetryIndex;
-
- u8 bPreDacSwingOn;
- u32 preDacSwingLvl;
- u8 bCurDacSwingOn;
- u32 curDacSwingLvl;
-
- u8 bPreAdcBackOff;
- u8 bCurAdcBackOff;
-
- u8 bPreAgcTableEn;
- u8 bCurAgcTableEn;
-
- u32 preVal0x6c0;
- u32 curVal0x6c0;
- u32 preVal0x6c8;
- u32 curVal0x6c8;
- u8 preVal0x6cc;
- u8 curVal0x6cc;
-
- u8 bCurIgnoreWlanAct;
- u8 bPreIgnoreWlanAct;
-
- u8 prePsTdma;
- u8 curPsTdma;
- u8 psTdmaDuAdjType;
- u8 bPrePsTdmaOn;
- u8 bCurPsTdmaOn;
-
- u8 preAlgorithm;
- u8 curAlgorithm;
- u8 bResetTdmaAdjust;
-
- u8 btStatus;
-};
-
-void BTDM_2AntBtCoexist8723A(struct rtw_adapter *padapter);
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc87232Ant.h ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtc8723.h ===== */
-
-#define BT_Q_PKT_OFF 0
-#define BT_Q_PKT_ON 1
-
-#define BT_TX_PWR_OFF 0
-#define BT_TX_PWR_ON 1
-
-/* TDMA mode definition */
-#define TDMA_2ANT 0
-#define TDMA_1ANT 1
-#define TDMA_NAV_OFF 0
-#define TDMA_NAV_ON 1
-#define TDMA_DAC_SWING_OFF 0
-#define TDMA_DAC_SWING_ON 1
-
-#define BT_RSSI_LEVEL_H 0
-#define BT_RSSI_LEVEL_M 1
-#define BT_RSSI_LEVEL_L 2
-
-/* PTA mode related definition */
-#define BT_PTA_MODE_OFF 0
-#define BT_PTA_MODE_ON 1
-
-/* Penalty Tx Rate Adaptive */
-#define BT_TX_RATE_ADAPTIVE_NORMAL 0
-#define BT_TX_RATE_ADAPTIVE_LOW_PENALTY 1
-
-/* RF Corner */
-#define BT_RF_RX_LPF_CORNER_RESUME 0
-#define BT_RF_RX_LPF_CORNER_SHRINK 1
-
-#define BT_INFO_ACL BIT(0)
-#define BT_INFO_SCO BIT(1)
-#define BT_INFO_INQ_PAG BIT(2)
-#define BT_INFO_ACL_BUSY BIT(3)
-#define BT_INFO_SCO_BUSY BIT(4)
-#define BT_INFO_HID BIT(5)
-#define BT_INFO_A2DP BIT(6)
-#define BT_INFO_FTP BIT(7)
-
-
-
-struct bt_coexist_8723a {
- u32 highPriorityTx;
- u32 highPriorityRx;
- u32 lowPriorityTx;
- u32 lowPriorityRx;
- u8 btRssi;
- u8 TotalAntNum;
- u8 bC2hBtInfoSupport;
- u8 c2hBtInfo;
- u8 c2hBtInfoOriginal;
- u8 prec2hBtInfo; /* for 1Ant */
- u8 bC2hBtInquiryPage;
- unsigned long btInqPageStartTime; /* for 2Ant */
- u8 c2hBtProfile; /* for 1Ant */
- u8 btRetryCnt;
- u8 btInfoExt;
- u8 bC2hBtInfoReqSent;
- u8 bForceFwBtInfo;
- u8 bForceA2dpSink;
- struct btdm_8723a_2ant btdm2Ant;
- struct btdm_8723a_1ant btdm1Ant;
-};
-
-void BTDM_SetFwChnlInfo(struct rtw_adapter *padapter,
- enum rt_media_status mstatus);
-u8 BTDM_IsWifiConnectionExist(struct rtw_adapter *padapter);
-void BTDM_SetFw3a(struct rtw_adapter *padapter, u8 byte1, u8 byte2, u8 byte3,
- u8 byte4, u8 byte5);
-void BTDM_QueryBtInformation(struct rtw_adapter *padapter);
-void BTDM_SetSwRfRxLpfCorner(struct rtw_adapter *padapter, u8 type);
-void BTDM_SetSwPenaltyTxRateAdaptive(struct rtw_adapter *padapter, u8 raType);
-void BTDM_SetFwDecBtPwr(struct rtw_adapter *padapter, u8 bDecBtPwr);
-u8 BTDM_BtProfileSupport(struct rtw_adapter *padapter);
-void BTDM_LpsLeave(struct rtw_adapter *padapter);
-
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtc8723.h ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtcCsr1Ant.h ===== */
-
-enum BT_A2DP_INDEX{
- BT_A2DP_INDEX0 = 0, /* 32, 12; the most critical for BT */
- BT_A2DP_INDEX1, /* 12, 24 */
- BT_A2DP_INDEX2, /* 0, 0 */
- BT_A2DP_INDEX_MAX
-};
-
-#define BT_A2DP_STATE_NOT_ENTERED 0
-#define BT_A2DP_STATE_DETECTING 1
-#define BT_A2DP_STATE_DETECTED 2
-
-#define BTDM_ANT_BT_IDLE 0
-#define BTDM_ANT_WIFI 1
-#define BTDM_ANT_BT 2
-
-
-void BTDM_SingleAnt(struct rtw_adapter *padapter, u8 bSingleAntOn,
- u8 bInterruptOn, u8 bMultiNAVOn);
-void BTDM_CheckBTIdleChange1Ant(struct rtw_adapter *padapter);
-
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtcCsr1Ant.h ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtcCsr2Ant.h ===== */
-
-/* */
-/* For old core stack before v251 */
-/* */
-#define BT_RSSI_STATE_NORMAL_POWER BIT(0)
-#define BT_RSSI_STATE_AMDPU_OFF BIT(1)
-#define BT_RSSI_STATE_SPECIAL_LOW BIT(2)
-#define BT_RSSI_STATE_BG_EDCA_LOW BIT(3)
-#define BT_RSSI_STATE_TXPOWER_LOW BIT(4)
-
-#define BT_DACSWING_OFF 0
-#define BT_DACSWING_M4 1
-#define BT_DACSWING_M7 2
-#define BT_DACSWING_M10 3
-
-void BTDM_DiminishWiFi(struct rtw_adapter *Adapter, u8 bDACOn, u8 bInterruptOn,
- u8 DACSwingLevel, u8 bNAVOn);
-
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtcCsr2Ant.h ===== */
-
-/* HEADER/TypeDef.h */
-#define MAX_FW_SUPPORT_MACID_NUM 64
-
-/* ===== Below this line is sync from SD7 driver HAL/BTCoexist/HalBtCoexist.h ===== */
-
-#define FW_VER_BT_REG 62
-#define FW_VER_BT_REG1 74
-#define REG_BT_ACTIVE 0x444
-#define REG_BT_STATE 0x448
-#define REG_BT_POLLING1 0x44c
-#define REG_BT_POLLING 0x700
-
-#define REG_BT_ACTIVE_OLD 0x488
-#define REG_BT_STATE_OLD 0x48c
-#define REG_BT_POLLING_OLD 0x490
-
-/* The reg define is for 8723 */
-#define REG_HIGH_PRIORITY_TXRX 0x770
-#define REG_LOW_PRIORITY_TXRX 0x774
-
-#define BT_FW_COEX_THRESH_TOL 6
-#define BT_FW_COEX_THRESH_20 20
-#define BT_FW_COEX_THRESH_23 23
-#define BT_FW_COEX_THRESH_25 25
-#define BT_FW_COEX_THRESH_30 30
-#define BT_FW_COEX_THRESH_35 35
-#define BT_FW_COEX_THRESH_40 40
-#define BT_FW_COEX_THRESH_45 45
-#define BT_FW_COEX_THRESH_47 47
-#define BT_FW_COEX_THRESH_50 50
-#define BT_FW_COEX_THRESH_55 55
-#define BT_FW_COEX_THRESH_65 65
-
-#define BT_COEX_STATE_BT30 BIT(0)
-#define BT_COEX_STATE_WIFI_HT20 BIT(1)
-#define BT_COEX_STATE_WIFI_HT40 BIT(2)
-#define BT_COEX_STATE_WIFI_LEGACY BIT(3)
-
-#define BT_COEX_STATE_WIFI_RSSI_LOW BIT(4)
-#define BT_COEX_STATE_WIFI_RSSI_MEDIUM BIT(5)
-#define BT_COEX_STATE_WIFI_RSSI_HIGH BIT(6)
-#define BT_COEX_STATE_DEC_BT_POWER BIT(7)
-
-#define BT_COEX_STATE_WIFI_IDLE BIT(8)
-#define BT_COEX_STATE_WIFI_UPLINK BIT(9)
-#define BT_COEX_STATE_WIFI_DOWNLINK BIT(10)
-
-#define BT_COEX_STATE_BT_INQ_PAGE BIT(11)
-#define BT_COEX_STATE_BT_IDLE BIT(12)
-#define BT_COEX_STATE_BT_UPLINK BIT(13)
-#define BT_COEX_STATE_BT_DOWNLINK BIT(14)
-/* */
-/* Todo: Remove these definitions */
-#define BT_COEX_STATE_BT_PAN_IDLE BIT(15)
-#define BT_COEX_STATE_BT_PAN_UPLINK BIT(16)
-#define BT_COEX_STATE_BT_PAN_DOWNLINK BIT(17)
-#define BT_COEX_STATE_BT_A2DP_IDLE BIT(18)
-/* */
-#define BT_COEX_STATE_BT_RSSI_LOW BIT(19)
-
-#define BT_COEX_STATE_PROFILE_HID BIT(20)
-#define BT_COEX_STATE_PROFILE_A2DP BIT(21)
-#define BT_COEX_STATE_PROFILE_PAN BIT(22)
-#define BT_COEX_STATE_PROFILE_SCO BIT(23)
-
-#define BT_COEX_STATE_WIFI_RSSI_1_LOW BIT(24)
-#define BT_COEX_STATE_WIFI_RSSI_1_MEDIUM BIT(25)
-#define BT_COEX_STATE_WIFI_RSSI_1_HIGH BIT(26)
-
-#define BT_COEX_STATE_WIFI_RSSI_BEACON_LOW BIT(27)
-#define BT_COEX_STATE_WIFI_RSSI_BEACON_MEDIUM BIT(28)
-#define BT_COEX_STATE_WIFI_RSSI_BEACON_HIGH BIT(29)
-
-
-#define BT_COEX_STATE_BTINFO_COMMON BIT(30)
-#define BT_COEX_STATE_BTINFO_B_HID_SCOESCO BIT(31)
-#define BT_COEX_STATE_BTINFO_B_FTP_A2DP BIT(32)
-
-#define BT_COEX_STATE_BT_CNT_LEVEL_0 BIT(33)
-#define BT_COEX_STATE_BT_CNT_LEVEL_1 BIT(34)
-#define BT_COEX_STATE_BT_CNT_LEVEL_2 BIT(35)
-#define BT_COEX_STATE_BT_CNT_LEVEL_3 BIT(36)
-
-#define BT_RSSI_STATE_HIGH 0
-#define BT_RSSI_STATE_MEDIUM 1
-#define BT_RSSI_STATE_LOW 2
-#define BT_RSSI_STATE_STAY_HIGH 3
-#define BT_RSSI_STATE_STAY_MEDIUM 4
-#define BT_RSSI_STATE_STAY_LOW 5
-
-#define BT_AGCTABLE_OFF 0
-#define BT_AGCTABLE_ON 1
-
-#define BT_BB_BACKOFF_OFF 0
-#define BT_BB_BACKOFF_ON 1
-
-#define BT_FW_NAV_OFF 0
-#define BT_FW_NAV_ON 1
-
-#define BT_COEX_MECH_NONE 0
-#define BT_COEX_MECH_SCO 1
-#define BT_COEX_MECH_HID 2
-#define BT_COEX_MECH_A2DP 3
-#define BT_COEX_MECH_PAN 4
-#define BT_COEX_MECH_HID_A2DP 5
-#define BT_COEX_MECH_HID_PAN 6
-#define BT_COEX_MECH_PAN_A2DP 7
-#define BT_COEX_MECH_HID_SCO_ESCO 8
-#define BT_COEX_MECH_FTP_A2DP 9
-#define BT_COEX_MECH_COMMON 10
-#define BT_COEX_MECH_MAX 11
-/* BT Dbg Ctrl */
-#define BT_DBG_PROFILE_NONE 0
-#define BT_DBG_PROFILE_SCO 1
-#define BT_DBG_PROFILE_HID 2
-#define BT_DBG_PROFILE_A2DP 3
-#define BT_DBG_PROFILE_PAN 4
-#define BT_DBG_PROFILE_HID_A2DP 5
-#define BT_DBG_PROFILE_HID_PAN 6
-#define BT_DBG_PROFILE_PAN_A2DP 7
-#define BT_DBG_PROFILE_MAX 9
-
-struct bt_coexist_str {
- u8 BluetoothCoexist;
- u8 BT_Ant_Num;
- u8 BT_CoexistType;
- u8 BT_Ant_isolation; /* 0:good, 1:bad */
- u8 bt_radiosharedtype;
- u32 Ratio_Tx;
- u32 Ratio_PRI;
- u8 bInitlized;
- u32 BtRfRegOrigin1E;
- u32 BtRfRegOrigin1F;
- u8 bBTBusyTraffic;
- u8 bBTTrafficModeSet;
- u8 bBTNonTrafficModeSet;
- struct bt_traffic_statistics BT21TrafficStatistics;
- u64 CurrentState;
- u64 PreviousState;
- u8 preRssiState;
- u8 preRssiState1;
- u8 preRssiStateBeacon;
- u8 bFWCoexistAllOff;
- u8 bSWCoexistAllOff;
- u8 bHWCoexistAllOff;
- u8 bBalanceOn;
- u8 bSingleAntOn;
- u8 bInterruptOn;
- u8 bMultiNAVOn;
- u8 PreWLANActH;
- u8 PreWLANActL;
- u8 WLANActH;
- u8 WLANActL;
- u8 A2DPState;
- u8 AntennaState;
- u32 lastBtEdca;
- u16 last_aggr_num;
- u8 bEDCAInitialized;
- u8 exec_cnt;
- u8 b8723aAgcTableOn;
- u8 b92DAgcTableOn;
- struct bt_coexist_8723a halCoex8723;
- u8 btActiveZeroCnt;
- u8 bCurBtDisabled;
- u8 bPreBtDisabled;
- u8 bNeedToRoamForBtDisableEnable;
- u8 fw3aVal[5];
-};
-
-void BTDM_CheckAntSelMode(struct rtw_adapter *padapter);
-void BTDM_FwC2hBtRssi(struct rtw_adapter *padapter, u8 *tmpBuf);
-#define BT_FwC2hBtRssi BTDM_FwC2hBtRssi
-void BTDM_DisplayBtCoexInfo(struct rtw_adapter *padapter);
-#define BT_DisplayBtCoexInfo BTDM_DisplayBtCoexInfo
-void BTDM_RejectAPAggregatedPacket(struct rtw_adapter *padapter, u8 bReject);
-u8 BTDM_IsHT40(struct rtw_adapter *padapter);
-u8 BTDM_Legacy(struct rtw_adapter *padapter);
-void BTDM_CheckWiFiState(struct rtw_adapter *padapter);
-s32 BTDM_GetRxSS(struct rtw_adapter *padapter);
-u8 BTDM_CheckCoexBcnRssiState(struct rtw_adapter *padapter, u8 levelNum,
- u8 RssiThresh, u8 RssiThresh1);
-u8 BTDM_CheckCoexRSSIState1(struct rtw_adapter *padapter, u8 levelNum,
- u8 RssiThresh, u8 RssiThresh1);
-u8 BTDM_CheckCoexRSSIState(struct rtw_adapter *padapter, u8 levelNum,
- u8 RssiThresh, u8 RssiThresh1);
-void BTDM_Balance(struct rtw_adapter *padapter, u8 bBalanceOn, u8 ms0, u8 ms1);
-void BTDM_AGCTable(struct rtw_adapter *padapter, u8 type);
-void BTDM_BBBackOffLevel(struct rtw_adapter *padapter, u8 type);
-void BTDM_FWCoexAllOff(struct rtw_adapter *padapter);
-void BTDM_SWCoexAllOff(struct rtw_adapter *padapter);
-void BTDM_HWCoexAllOff(struct rtw_adapter *padapter);
-void BTDM_CoexAllOff(struct rtw_adapter *padapter);
-void BTDM_TurnOffBtCoexistBeforeEnterIPS(struct rtw_adapter *padapter);
-void BTDM_SignalCompensation(struct rtw_adapter *padapter, u8 *rssi_wifi,
- u8 *rssi_bt);
-void BTDM_UpdateCoexState(struct rtw_adapter *padapter);
-u8 BTDM_IsSameCoexistState(struct rtw_adapter *padapter);
-void BTDM_PWDBMonitor(struct rtw_adapter *padapter);
-u8 BTDM_IsBTBusy(struct rtw_adapter *padapter);
-#define BT_IsBtBusy BTDM_IsBTBusy
-u8 BTDM_IsWifiBusy(struct rtw_adapter *padapter);
-u8 BTDM_IsCoexistStateChanged(struct rtw_adapter *padapter);
-u8 BTDM_IsWifiUplink(struct rtw_adapter *padapter);
-u8 BTDM_IsWifiDownlink(struct rtw_adapter *padapter);
-u8 BTDM_IsBTHSMode(struct rtw_adapter *padapter);
-u8 BTDM_IsBTUplink(struct rtw_adapter *padapter);
-u8 BTDM_IsBTDownlink(struct rtw_adapter *padapter);
-void BTDM_AdjustForBtOperation(struct rtw_adapter *padapter);
-void BTDM_ForHalt(struct rtw_adapter *padapter);
-void BTDM_WifiScanNotify(struct rtw_adapter *padapter, u8 scanType);
-void BTDM_WifiAssociateNotify(struct rtw_adapter *padapter, u8 action);
-void BTDM_MediaStatusNotify(struct rtw_adapter *padapter,
- enum rt_media_status mstatus);
-void BTDM_ForDhcp(struct rtw_adapter *padapter);
-void BTDM_ResetActionProfileState(struct rtw_adapter *padapter);
-void BTDM_SetBtCoexCurrAntNum(struct rtw_adapter *padapter, u8 antNum);
-#define BT_SetBtCoexCurrAntNum BTDM_SetBtCoexCurrAntNum
-u8 BTDM_IsActionSCO(struct rtw_adapter *padapter);
-u8 BTDM_IsActionHID(struct rtw_adapter *padapter);
-u8 BTDM_IsActionA2DP(struct rtw_adapter *padapter);
-u8 BTDM_IsActionPAN(struct rtw_adapter *padapter);
-u8 BTDM_IsActionHIDA2DP(struct rtw_adapter *padapter);
-u8 BTDM_IsActionHIDPAN(struct rtw_adapter *padapter);
-u8 BTDM_IsActionPANA2DP(struct rtw_adapter *padapter);
-u32 BTDM_BtTxRxCounterH(struct rtw_adapter *padapter);
-u32 BTDM_BtTxRxCounterL(struct rtw_adapter *padapter);
-
-/* ===== End of sync from SD7 driver HAL/BTCoexist/HalBtCoexist.h ===== */
-
-/* ===== Below this line is sync from SD7 driver HAL/HalBT.h ===== */
-
-#define RTS_CTS_NO_LEN_LIMIT 0
-
-u8 HALBT_GetPGAntNum(struct rtw_adapter *padapter);
-#define BT_GetPGAntNum HALBT_GetPGAntNum
-void HALBT_SetKey(struct rtw_adapter *padapter, u8 EntryNum);
-void HALBT_RemoveKey(struct rtw_adapter *padapter, u8 EntryNum);
-u8 HALBT_IsBTExist(struct rtw_adapter *padapter);
-#define BT_IsBtExist HALBT_IsBTExist
-u8 HALBT_BTChipType(struct rtw_adapter *padapter);
-void HALBT_SetRtsCtsNoLenLimit(struct rtw_adapter *padapter);
-
-/* ===== End of sync from SD7 driver HAL/HalBT.c ===== */
-
-#define _bt_dbg_off_ 0
-#define _bt_dbg_on_ 1
-
-extern u32 BTCoexDbgLevel;
-
-
-
-#endif /* __RTL8723A_BT_COEXIST_H__ */
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_bt_intf.h b/drivers/staging/rtl8723au/include/rtl8723a_bt_intf.h
deleted file mode 100644
index 4733559970e5..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_bt_intf.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- * Copyright(c) 2014, Jes Sorensen <Jes.Sorensen@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8723A_BT_INTF_H__
-#define __RTL8723A_BT_INTF_H__
-
-#include <drv_types.h>
-
-#ifdef CONFIG_8723AU_BT_COEXIST
-enum rt_media_status;
-bool rtl8723a_BT_using_antenna_1(struct rtw_adapter *padapter);
-bool rtl8723a_BT_enabled(struct rtw_adapter *padapter);
-bool rtl8723a_BT_coexist(struct rtw_adapter *padapter);
-void rtl8723a_BT_do_coexist(struct rtw_adapter *padapter);
-void rtl8723a_BT_wifiscan_notify(struct rtw_adapter *padapter, u8 scanType);
-void rtl8723a_BT_mediastatus_notify(struct rtw_adapter *padapter,
- enum rt_media_status mstatus);
-void rtl8723a_BT_specialpacket_notify(struct rtw_adapter *padapter);
-void rtl8723a_BT_lps_leave(struct rtw_adapter *padapter);
-void rtl8723a_BT_disable_coexist(struct rtw_adapter *padapter);
-bool rtl8723a_BT_disable_EDCA_turbo(struct rtw_adapter *padapter);
-void rtl8723a_dual_antenna_detection(struct rtw_adapter *padapter);
-void rtl8723a_BT_init_hwconfig(struct rtw_adapter *padapter);
-void rtl8723a_BT_wifiassociate_notify(struct rtw_adapter *padapter, u8 action);
-void rtl8723a_BT_init_hal_vars(struct rtw_adapter *padapter);
-void rtl8723a_fw_c2h_BT_info(struct rtw_adapter *padapter, u8 *tmpBuf, u8 length);
-#else
-static inline bool rtl8723a_BT_using_antenna_1(struct rtw_adapter *padapter)
-{
- return false;
-}
-static inline bool rtl8723a_BT_enabled(struct rtw_adapter *padapter)
-{
- return false;
-}
-static inline bool rtl8723a_BT_coexist(struct rtw_adapter *padapter)
-{
- return false;
-}
-#define rtl8723a_BT_do_coexist(padapter) do {} while(0)
-#define rtl8723a_BT_wifiscan_notify(padapter, scanType) do {} while(0)
-#define rtl8723a_BT_mediastatus_notify(padapter, mstatus) do {} while(0)
-#define rtl8723a_BT_specialpacket_notify(padapter) do {} while(0)
-#define rtl8723a_BT_lps_leave(padapter) do {} while(0)
-#define rtl8723a_BT_disable_coexist(padapter) do {} while(0)
-static inline bool rtl8723a_BT_disable_EDCA_turbo(struct rtw_adapter *padapter)
-{
- return false;
-}
-#define rtl8723a_dual_antenna_detection(padapter) do {} while(0)
-#define rtl8723a_BT_init_hwconfig(padapter) do {} while(0)
-#define rtl8723a_BT_wifiassociate_notify(padapter, action) do {} while(0)
-#define rtl8723a_BT_init_hal_vars(padapter) do {} while(0)
-#define rtl8723a_fw_c2h_BT_info(padapter, tmpBuf, length) do {} while(0)
-#endif
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_cmd.h b/drivers/staging/rtl8723au/include/rtl8723a_cmd.h
deleted file mode 100644
index f95535a915ab..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_cmd.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8723A_CMD_H__
-#define __RTL8723A_CMD_H__
-
-
-#define H2C_BT_FW_PATCH_LEN 3
-#define H2C_BT_PWR_FORCE_LEN 3
-
-enum cmd_msg_element_id
-{
- NONE_CMDMSG_EID,
- AP_OFFLOAD_EID = 0,
- SET_PWRMODE_EID = 1,
- JOINBSS_RPT_EID = 2,
- RSVD_PAGE_EID = 3,
- RSSI_4_EID = 4,
- RSSI_SETTING_EID = 5,
- MACID_CONFIG_EID = 6,
- MACID_PS_MODE_EID = 7,
- P2P_PS_OFFLOAD_EID = 8,
- SELECTIVE_SUSPEND_ROF_CMD = 9,
- BT_QUEUE_PKT_EID = 17,
- BT_ANT_TDMA_EID = 20,
- BT_2ANT_HID_EID = 21,
- P2P_PS_CTW_CMD_EID = 32,
- FORCE_BT_TX_PWR_EID = 33,
- SET_TDMA_WLAN_ACT_TIME_EID = 34,
- SET_BT_TX_RETRY_INDEX_EID = 35,
- HID_PROFILE_ENABLE_EID = 36,
- BT_IGNORE_WLAN_ACT_EID = 37,
- BT_PTA_MANAGER_UPDATE_ENABLE_EID = 38,
- DAC_SWING_VALUE_EID = 41,
- TRADITIONAL_TDMA_EN_EID = 51,
- H2C_BT_FW_PATCH = 54,
- B_TYPE_TDMA_EID = 58,
- SCAN_EN_EID = 59,
- LOWPWR_LPS_EID = 71,
- H2C_RESET_TSF = 75,
- MAX_CMDMSG_EID
-};
-
-struct cmd_msg_parm {
- u8 eid; /* element id */
- u8 sz; /* sz */
- u8 buf[6];
-};
-
-struct setpwrmode_parm {
- u8 Mode;
- u8 SmartPS;
- u8 AwakeInterval; /* unit: beacon interval */
- u8 bAllQueueUAPSD;
-
-#define SETPM_LOWRXBCN BIT(0)
-#define SETPM_AUTOANTSWITCH BIT(1)
-#define SETPM_PSALLOWBTHIGHPRI BIT(2)
- u8 BcnAntMode;
-} __packed;
-
-struct H2C_SS_RFOFF_PARAM{
- u8 ROFOn; /* 1: on, 0:off */
- u16 gpio_period; /* unit: 1024 us */
-}__attribute__ ((packed));
-
-
-struct joinbssrpt_parm {
- u8 OpMode; /* enum rt_media_status */
-};
-
-struct rsvdpage_loc {
- u8 LocProbeRsp;
- u8 LocPsPoll;
- u8 LocNullData;
- u8 LocQosNull;
- u8 LocBTQosNull;
-};
-
-struct P2P_PS_Offload_t {
- u8 Offload_En:1;
- u8 role:1; /* 1: Owner, 0: Client */
- u8 CTWindow_En:1;
- u8 NoA0_En:1;
- u8 NoA1_En:1;
- u8 AllStaSleep:1; /* Only valid in Owner */
- u8 discovery:1;
- u8 rsvd:1;
-};
-
-struct P2P_PS_CTWPeriod_t {
- u8 CTWPeriod; /* TU */
-};
-
-#define B_TDMA_EN BIT(0)
-#define B_TDMA_FIXANTINBT BIT(1)
-#define B_TDMA_TXPSPOLL BIT(2)
-#define B_TDMA_VAL870 BIT(3)
-#define B_TDMA_AUTOWAKEUP BIT(4)
-#define B_TDMA_NOPS BIT(5)
-#define B_TDMA_WLANHIGHPRI BIT(6)
-
-struct b_type_tdma_parm {
- u8 option;
-
- u8 TBTTOnPeriod;
- u8 MedPeriod;
- u8 rsvd30;
-} __packed;
-
-struct scan_en_parm {
- u8 En;
-} __packed;
-
-/* BT_PWR */
-#define SET_H2CCMD_BT_PWR_IDX(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd, 0, 8, __Value)
-
-/* BT_FW_PATCH */
-#define SET_H2CCMD_BT_FW_PATCH_ENABLE(__pH2CCmd, __Value) SET_BITS_TO_LE_4BYTE(__pH2CCmd, 0, 8, __Value) /* SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value) */
-#define SET_H2CCMD_BT_FW_PATCH_SIZE(__pH2CCmd, __Value) SET_BITS_TO_LE_4BYTE(__pH2CCmd, 8, 16, __Value) /* SET_BITS_TO_LE_2BYTE((__pH2CCmd)+1, 0, 16, __Value) */
-
-struct lowpwr_lps_parm{
- u8 bcn_count:4;
- u8 tb_bcn_threshold:3;
- u8 enable:1;
- u8 bcn_interval;
- u8 drop_threshold;
- u8 max_early_period;
- u8 max_bcn_timeout_period;
-} __packed;
-
-
-/* host message to firmware cmd */
-void rtl8723a_set_FwPwrMode_cmd(struct rtw_adapter *padapter, u8 Mode);
-void rtl8723a_set_FwJoinBssReport_cmd(struct rtw_adapter *padapter, u8 mstatus);
-#ifdef CONFIG_8723AU_BT_COEXIST
-void rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(struct rtw_adapter *padapter);
-#else
-#define rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(padapter) do {} while(0)
-#endif
-int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u32 param);
-int rtl8723a_set_raid_cmd(struct rtw_adapter *padapter, u32 mask, u8 arg);
-void rtl8723a_add_rateatid(struct rtw_adapter *padapter, u32 bitmap, u8 arg, u8 rssi_level);
-
-int FillH2CCmd(struct rtw_adapter *padapter, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_dm.h b/drivers/staging/rtl8723au/include/rtl8723a_dm.h
deleted file mode 100644
index bf236e8e47a2..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_dm.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8723A_DM_H__
-#define __RTL8723A_DM_H__
-/* */
-/* Description: */
-/* */
-/* This file is for 8723A dynamic mechanism only */
-/* */
-/* */
-/* */
-#define DYNAMIC_FUNC_BT BIT(0)
-
-enum{
- UP_LINK,
- DOWN_LINK,
-};
-/* */
-/* structure and define */
-/* */
-
-/* duplicate code,will move to ODM ######### */
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-/* duplicate code,will move to ODM ######### */
-struct dm_priv {
- u32 InitODMFlag;
-
- /* Upper and Lower Signal threshold for Rate Adaptive*/
- int UndecoratedSmoothedPWDB;
- int UndecoratedSmoothedCCK;
- int EntryMinUndecoratedSmoothedPWDB;
- int EntryMaxUndecoratedSmoothedPWDB;
- int MinUndecoratedPWDBForDM;
- int LastMinUndecoratedPWDBForDM;
-
- s32 UndecoratedSmoothedBeacon;
- #ifdef CONFIG_8723AU_BT_COEXIST
- s32 BT_EntryMinUndecoratedSmoothedPWDB;
- s32 BT_EntryMaxUndecoratedSmoothedPWDB;
- #endif
-
- /* for High Power */
- u8 DynamicTxHighPowerLvl;/* Add by Jacken Tx Power Control for Near/Far Range 2008/03/06 */
-
- /* for tx power tracking */
- u8 bTXPowerTracking;
- u8 TXPowercount;
- u8 bTXPowerTrackingInit;
- u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking as default */
- u8 TM_Trigger;
-
- u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
- u8 ThermalValue;
- u8 ThermalValue_LCK;
- u8 ThermalValue_IQK;
- u8 ThermalValue_DPK;
-
- u8 bRfPiEnable;
-
- /* for APK */
- u32 APKoutput[2][2]; /* path A/B; output1_1a/output1_2a */
- u8 bAPKdone;
- u8 bAPKThermalMeterIgnore;
- u8 bDPdone;
- u8 bDPPathAOK;
- u8 bDPPathBOK;
-
- /* for IQK */
- u32 RegC04;
- u32 Reg874;
- u32 RegC08;
- u32 RegB68;
- u32 RegB6C;
- u32 Reg870;
- u32 Reg860;
- u32 Reg864;
- u32 ADDA_backup[IQK_ADDA_REG_NUM];
- u32 IQK_MAC_backup[IQK_MAC_REG_NUM];
- u32 IQK_BB_backup_recover[9];
- u32 IQK_BB_backup[IQK_BB_REG_NUM];
- u8 PowerIndex_backup[6];
-
- u8 bCCKinCH14;
-
- u8 CCK_index;
- u8 OFDM_index[2];
-
- u8 bDoneTxpower;
- u8 CCK_index_HP;
- u8 OFDM_index_HP[2];
- u8 ThermalValue_HP[HP_THERMAL_NUM];
- u8 ThermalValue_HP_index;
-
- /* for TxPwrTracking */
- s32 RegE94;
- s32 RegE9C;
- s32 RegEB4;
- s32 RegEBC;
-
- u32 TXPowerTrackingCallbackCnt; /* cosa add for debug */
-
- u32 prv_traffic_idx; /* edca turbo */
-
- s32 OFDM_Pkt_Cnt;
- u8 RSSI_Select;
-/* u8 DIG_Dynamic_MIN ; */
-/* duplicate code,will move to ODM ######### */
- /* Add for Reading Initial Data Rate SEL Register 0x484 during watchdog. Using for fill tx desc. 2011.3.21 by Thomas */
- u8 INIDATA_RATE[32];
-};
-
-
-/* */
-/* function prototype */
-/* */
-
-void rtl8723a_init_dm_priv(struct rtw_adapter *padapter);
-
-void rtl8723a_InitHalDm(struct rtw_adapter *padapter);
-void rtl8723a_HalDmWatchDog(struct rtw_adapter *padapter);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_hal.h b/drivers/staging/rtl8723au/include/rtl8723a_hal.h
deleted file mode 100644
index 77a0fd485b51..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_hal.h
+++ /dev/null
@@ -1,538 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8723A_HAL_H__
-#define __RTL8723A_HAL_H__
-
-#include "rtl8723a_spec.h"
-#include "rtl8723a_pg.h"
-#include "Hal8723APhyReg.h"
-#include "Hal8723APhyCfg.h"
-#include "rtl8723a_rf.h"
-#include "rtl8723a_bt_intf.h"
-#ifdef CONFIG_8723AU_BT_COEXIST
-#include "rtl8723a_bt-coexist.h"
-#endif
-#include "rtl8723a_dm.h"
-#include "rtl8723a_recv.h"
-#include "rtl8723a_xmit.h"
-#include "rtl8723a_cmd.h"
-#include "rtl8723a_sreset.h"
-#include "rtw_efuse.h"
-#include "rtw_eeprom.h"
-
-#include "odm_precomp.h"
-#include "odm.h"
-
-
-/* 2TODO: We should define 8192S firmware related macro settings here!! */
-#define RTL819X_DEFAULT_RF_TYPE RF_1T2R
-#define RTL819X_TOTAL_RF_PATH 2
-
-/* */
-/* RTL8723S From header */
-/* */
-
-/* Fw Array */
-#define Rtl8723_FwImageArray Rtl8723UFwImgArray
-#define Rtl8723_FwUMCBCutImageArrayWithBT Rtl8723UFwUMCBCutImgArrayWithBT
-#define Rtl8723_FwUMCBCutImageArrayWithoutBT Rtl8723UFwUMCBCutImgArrayWithoutBT
-
-#define Rtl8723_ImgArrayLength Rtl8723UImgArrayLength
-#define Rtl8723_UMCBCutImgArrayWithBTLength Rtl8723UUMCBCutImgArrayWithBTLength
-#define Rtl8723_UMCBCutImgArrayWithoutBTLength Rtl8723UUMCBCutImgArrayWithoutBTLength
-
-#define Rtl8723_PHY_REG_Array_PG Rtl8723UPHY_REG_Array_PG
-#define Rtl8723_PHY_REG_Array_PGLength Rtl8723UPHY_REG_Array_PGLength
-
-#define Rtl8723_FwUMCBCutMPImageArray Rtl8723SFwUMCBCutMPImgAr
-#define Rtl8723_UMCBCutMPImgArrayLength Rtl8723SUMCBCutMPImgArrayLength
-
-#define DRVINFO_SZ 4 /* unit is 8bytes */
-#define PageNum_128(_Len) (u32)(((_Len)>>7) + ((_Len)&0x7F ? 1:0))
-
-#define FW_8723A_SIZE 0x8000
-#define FW_8723A_START_ADDRESS 0x1000
-#define FW_8723A_END_ADDRESS 0x1FFF /* 0x5FFF */
-
-#define MAX_PAGE_SIZE 4096 /* @ page : 4k bytes */
-
-#define IS_FW_HEADER_EXIST(_pFwHdr) ((le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x92C0 ||\
- (le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x88C0 ||\
- (le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x2300)
-
-/* */
-/* This structure must be cared byte-ordering */
-/* */
-/* Added by tynli. 2009.12.04. */
-struct rt_8723a_firmware_hdr {
- /* 8-byte alinment required */
-
- /* LONG WORD 0 ---- */
- __le16 Signature; /*
- * 92C0: test chip; 92C, 88C0: test chip;
- * 88C1: MP A-cut; 92C1: MP A-cut
- */
- u8 Category; /* AP/NIC and USB/PCI */
- u8 Function; /* Reserved for different FW function indcation, for further use when driver needs to download different FW in different conditions */
- __le16 Version; /* FW Version */
- u8 Subversion; /* FW Subversion, default 0x00 */
- u8 Rsvd1;
-
-
- /* LONG WORD 1 ---- */
- u8 Month; /* Release time Month field */
- u8 Date; /* Release time Date field */
- u8 Hour; /* Release time Hour field */
- u8 Minute; /* Release time Minute field */
- __le16 RamCodeSize; /* The size of RAM code */
- __le16 Rsvd2;
-
- /* LONG WORD 2 ---- */
- __le32 SvnIdx; /* The SVN entry index */
- __le32 Rsvd3;
-
- /* LONG WORD 3 ---- */
- __le32 Rsvd4;
- __le32 Rsvd5;
-};
-
-#define DRIVER_EARLY_INT_TIME 0x05
-#define BCN_DMA_ATIME_INT_TIME 0x02
-
-
-/* BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON. */
-#define MAX_TX_QUEUE 9
-
-#define TX_SELE_HQ BIT(0) /* High Queue */
-#define TX_SELE_LQ BIT(1) /* Low Queue */
-#define TX_SELE_NQ BIT(2) /* Normal Queue */
-
-/* Note: We will divide number of page equally for each queue other than public queue! */
-#define TX_TOTAL_PAGE_NUMBER 0xF8
-#define TX_PAGE_BOUNDARY (TX_TOTAL_PAGE_NUMBER + 1)
-
-/* For Normal Chip Setting */
-/* (HPQ + LPQ + NPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
-#define NORMAL_PAGE_NUM_PUBQ 0xE7
-#define NORMAL_PAGE_NUM_HPQ 0x0C
-#define NORMAL_PAGE_NUM_LPQ 0x02
-#define NORMAL_PAGE_NUM_NPQ 0x02
-
-/* For Test Chip Setting */
-/* (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
-#define TEST_PAGE_NUM_PUBQ 0x7E
-
-/* For Test Chip Setting */
-#define WMM_TEST_TX_TOTAL_PAGE_NUMBER 0xF5
-#define WMM_TEST_TX_PAGE_BOUNDARY (WMM_TEST_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
-
-#define WMM_TEST_PAGE_NUM_PUBQ 0xA3
-#define WMM_TEST_PAGE_NUM_HPQ 0x29
-#define WMM_TEST_PAGE_NUM_LPQ 0x29
-
-/* Note: For Normal Chip Setting, modify later */
-#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER 0xF5
-#define WMM_NORMAL_TX_PAGE_BOUNDARY (WMM_TEST_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
-
-#define WMM_NORMAL_PAGE_NUM_PUBQ 0xB0
-#define WMM_NORMAL_PAGE_NUM_HPQ 0x29
-#define WMM_NORMAL_PAGE_NUM_LPQ 0x1C
-#define WMM_NORMAL_PAGE_NUM_NPQ 0x1C
-
-
-/* */
-/* Chip specific */
-/* */
-#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
-#define CHIP_BONDING_92C_1T2R 0x1
-#define CHIP_BONDING_88C_USB_MCARD 0x2
-#define CHIP_BONDING_88C_USB_HP 0x1
-
-#include "HalVerDef.h"
-#include "hal_com.h"
-
-/* */
-/* Channel Plan */
-/* */
-enum ChannelPlan
-{
- CHPL_FCC = 0,
- CHPL_IC = 1,
- CHPL_ETSI = 2,
- CHPL_SPAIN = 3,
- CHPL_FRANCE = 4,
- CHPL_MKK = 5,
- CHPL_MKK1 = 6,
- CHPL_ISRAEL = 7,
- CHPL_TELEC = 8,
- CHPL_GLOBAL = 9,
- CHPL_WORLD = 10,
-};
-
-#define EFUSE_REAL_CONTENT_LEN 512
-#define EFUSE_MAP_LEN 128
-#define EFUSE_MAX_SECTION 16
-#define EFUSE_IC_ID_OFFSET 506 /* For some inferiority IC purpose. added by Roger, 2009.09.02. */
-#define AVAILABLE_EFUSE_ADDR(addr) (addr < EFUSE_REAL_CONTENT_LEN)
-/* */
-/* <Roger_Notes> */
-/* To prevent out of boundary programming case, */
-/* leave 1byte and program full section */
-/* 9bytes + 1byt + 5bytes and pre 1byte. */
-/* For worst case: */
-/* | 1byte|----8bytes----|1byte|--5bytes--| */
-/* | | Reserved(14bytes) | */
-/* */
-
-/* PG data exclude header, dummy 6 bytes from CP test and reserved 1byte. */
-#define EFUSE_OOB_PROTECT_BYTES 15
-
-#define EFUSE_REAL_CONTENT_LEN_8723A 512
-#define EFUSE_MAP_LEN_8723A 256
-#define EFUSE_MAX_SECTION_8723A 32
-
-/* */
-/* EFUSE for BT definition */
-/* */
-#define EFUSE_BT_REAL_BANK_CONTENT_LEN 512
-#define EFUSE_BT_REAL_CONTENT_LEN 1536 /* 512*3 */
-#define EFUSE_BT_MAP_LEN 1024 /* 1k bytes */
-#define EFUSE_BT_MAX_SECTION 128 /* 1024/8 */
-
-#define EFUSE_PROTECT_BYTES_BANK 16
-
-/* */
-/* <Roger_Notes> For RTL8723 WiFi/BT/GPS multi-function configuration. 2010.10.06. */
-/* */
-enum RT_MULTI_FUNC {
- RT_MULTI_FUNC_NONE = 0x00,
- RT_MULTI_FUNC_WIFI = 0x01,
- RT_MULTI_FUNC_BT = 0x02,
- RT_MULTI_FUNC_GPS = 0x04,
-};
-
-/* */
-/* <Roger_Notes> For RTL8723 WiFi PDn/GPIO polarity control configuration. 2010.10.08. */
-/* */
-enum RT_POLARITY_CTL {
- RT_POLARITY_LOW_ACT = 0,
- RT_POLARITY_HIGH_ACT = 1,
-};
-
-/* For RTL8723 regulator mode. by tynli. 2011.01.14. */
-enum RT_REGULATOR_MODE {
- RT_SWITCHING_REGULATOR = 0,
- RT_LDO_REGULATOR = 1,
-};
-
-/* Description: Determine the types of C2H events that are the same in driver and Fw. */
-/* Fisrt constructed by tynli. 2009.10.09. */
-enum {
- C2H_DBG = 0,
- C2H_TSF = 1,
- C2H_AP_RPT_RSP = 2,
- C2H_CCX_TX_RPT = 3, /* The FW notify the report of the specific tx packet. */
- C2H_BT_RSSI = 4,
- C2H_BT_OP_MODE = 5,
- C2H_EXT_RA_RPT = 6,
- C2H_HW_INFO_EXCH = 10,
- C2H_C2H_H2C_TEST = 11,
- C2H_BT_INFO = 12,
- C2H_BT_MP_INFO = 15,
- MAX_C2HEVENT
-};
-
-struct hal_data_8723a {
- struct hal_version VersionID;
- enum rt_customer_id CustomerID;
-
- u16 FirmwareVersion;
- u16 FirmwareVersionRev;
- u16 FirmwareSubVersion;
- u16 FirmwareSignature;
-
- /* current WIFI_PHY values */
- u32 ReceiveConfig;
- enum WIRELESS_MODE CurrentWirelessMode;
- enum ht_channel_width CurrentChannelBW;
- u8 CurrentChannel;
- u8 nCur40MhzPrimeSC;/* Control channel sub-carrier */
-
- u16 BasicRateSet;
-
- /* rf_ctrl */
- u8 rf_type;
- u8 NumTotalRFPath;
-
- u8 BoardType;
- u8 CrystalCap;
- /* */
- /* EEPROM setting. */
- /* */
- u8 EEPROMVersion;
- u8 EEPROMCustomerID;
- u8 EEPROMSubCustomerID;
- u8 EEPROMRegulatory;
- u8 EEPROMThermalMeter;
- u8 EEPROMBluetoothCoexist;
- u8 EEPROMBluetoothType;
- u8 EEPROMBluetoothAntNum;
- u8 EEPROMBluetoothAntIsolation;
- u8 EEPROMBluetoothRadioShared;
-
- u8 bTXPowerDataReadFromEEPORM;
- u8 bAPKThermalMeterIgnore;
-
- u8 bIQKInitialized;
- u8 bAntennaDetected;
-
- u8 TxPwrLevelCck[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- u8 TxPwrLevelHT40_1S[RF_PATH_MAX][CHANNEL_MAX_NUMBER]; /* For HT 40MHZ pwr */
- u8 TxPwrLevelHT40_2S[RF_PATH_MAX][CHANNEL_MAX_NUMBER]; /* For HT 40MHZ pwr */
- u8 TxPwrHt20Diff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];/* HT 20<->40 Pwr diff */
- u8 TxPwrLegacyHtDiff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];/* For HT<->legacy pwr diff */
- /* For power group */
- u8 PwrGroupHT20[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- u8 PwrGroupHT40[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
-
- u8 LegacyHTTxPowerDiff;/* Legacy to HT rate power diff */
-
- /* Read/write are allow for following hardware information variables */
- u8 framesync;
- u32 framesyncC34;
- u8 framesyncMonitor;
- u8 pwrGroupCnt;
- u32 MCSTxPowerLevelOriginalOffset[7][16];
- u32 CCKTxPowerLevelOriginalOffset;
-
- u32 AntennaTxPath; /* Antenna path Tx */
- u32 AntennaRxPath; /* Antenna path Rx */
- u8 ExternalPA;
-
- u8 bLedOpenDrain; /* Support Open-drain arrangement for controlling the LED. Added by Roger, 2009.10.16. */
-
- u8 b1x1RecvCombine; /* for 1T1R receive combining */
-
- /* For EDCA Turbo mode */
-
- u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
-
- /* vivi, for tx power tracking, 20080407 */
- /* u16 TSSI_13dBm; */
- /* u32 Pwr_Track; */
- /* The current Tx Power Level */
- u8 CurrentCckTxPwrIdx;
- u8 CurrentOfdm24GTxPwrIdx;
-
- struct bb_reg_define PHYRegDef[4]; /* Radio A/B/C/D */
-
- bool bRFPathRxEnable[4]; /* We support 4 RF path now. */
-
- u32 RfRegChnlVal[2];
-
- u8 bCckHighPower;
-
- /* RDG enable */
- bool bRDGEnable;
-
- /* for host message to fw */
- u8 LastHMEBoxNum;
-
- u8 RegTxPause;
- /* Beacon function related global variable. */
- u8 RegFwHwTxQCtrl;
- u8 RegReg542;
-
- struct dm_priv dmpriv;
- struct dm_odm_t odmpriv;
- struct sreset_priv srestpriv;
-
-#ifdef CONFIG_8723AU_BT_COEXIST
- u8 bBTMode;
- /* BT only. */
- struct bt_30info BtInfo;
- /* For bluetooth co-existance */
- struct bt_coexist_str bt_coexist;
-#endif
-
- u8 bDumpRxPkt;/* for debug */
- u8 FwRsvdPageStartOffset; /* 2010.06.23. Added by tynli. Reserve page start offset except beacon in TxQ. */
-
- /* 2010/08/09 MH Add CU power down mode. */
- u8 pwrdown;
-
- u8 OutEpQueueSel;
- u8 OutEpNumber;
-
- /* */
- /* Add For EEPROM Efuse switch and Efuse Shadow map Setting */
- /* */
- u8 EepromOrEfuse;
- u16 EfuseUsedBytes;
- u16 BTEfuseUsedBytes;
-
- /* Interrupt relatd register information. */
- u32 SysIntrStatus;
- u32 SysIntrMask;
-
- /* */
- /* 2011/02/23 MH Add for 8723 mylti function definition. The define should be moved to an */
- /* independent file in the future. */
- /* */
- /* 8723-----------------------------------------*/
- enum RT_MULTI_FUNC MultiFunc; /* For multi-function consideration. */
- enum RT_POLARITY_CTL PolarityCtl; /* For Wifi PDn Polarity control. */
- enum RT_REGULATOR_MODE RegulatorMode; /* switching regulator or LDO */
- /* 8723-----------------------------------------
- * 2011/02/23 MH Add for 8723 mylti function definition. The define should be moved to an */
- /* independent file in the future. */
-
- /* Interrupt related register information. */
- u32 IntArray[2];
- u32 IntrMask[2];
-};
-
-#define GET_HAL_DATA(__pAdapter) ((struct hal_data_8723a *)((__pAdapter)->HalData))
-#define GET_RF_TYPE(priv) (GET_HAL_DATA(priv)->rf_type)
-
-#define INCLUDE_MULTI_FUNC_BT(_Adapter) (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_BT)
-#define INCLUDE_MULTI_FUNC_GPS(_Adapter) (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_GPS)
-
-struct rxreport_8723a {
- u32 pktlen:14;
- u32 crc32:1;
- u32 icverr:1;
- u32 drvinfosize:4;
- u32 security:3;
- u32 qos:1;
- u32 shift:2;
- u32 physt:1;
- u32 swdec:1;
- u32 ls:1;
- u32 fs:1;
- u32 eor:1;
- u32 own:1;
-
- u32 macid:5;
- u32 tid:4;
- u32 hwrsvd:4;
- u32 amsdu:1;
- u32 paggr:1;
- u32 faggr:1;
- u32 a1fit:4;
- u32 a2fit:4;
- u32 pam:1;
- u32 pwr:1;
- u32 md:1;
- u32 mf:1;
- u32 type:2;
- u32 mc:1;
- u32 bc:1;
-
- u32 seq:12;
- u32 frag:4;
- u32 nextpktlen:14;
- u32 nextind:1;
- u32 rsvd0831:1;
-
- u32 rxmcs:6;
- u32 rxht:1;
- u32 gf:1;
- u32 splcp:1;
- u32 bw:1;
- u32 htc:1;
- u32 eosp:1;
- u32 bssidfit:2;
- u32 rsvd1214:16;
- u32 unicastwake:1;
- u32 magicwake:1;
-
- u32 pattern0match:1;
- u32 pattern1match:1;
- u32 pattern2match:1;
- u32 pattern3match:1;
- u32 pattern4match:1;
- u32 pattern5match:1;
- u32 pattern6match:1;
- u32 pattern7match:1;
- u32 pattern8match:1;
- u32 pattern9match:1;
- u32 patternamatch:1;
- u32 patternbmatch:1;
- u32 patterncmatch:1;
- u32 rsvd1613:19;
-
- u32 tsfl;
-
- u32 bassn:12;
- u32 bavld:1;
- u32 rsvd2413:19;
-};
-
-/* rtl8723a_hal_init.c */
-s32 rtl8723a_FirmwareDownload(struct rtw_adapter *padapter);
-void rtl8723a_FirmwareSelfReset(struct rtw_adapter *padapter);
-void rtl8723a_InitializeFirmwareVars(struct rtw_adapter *padapter);
-
-void rtl8723a_InitAntenna_Selection(struct rtw_adapter *padapter);
-void rtl8723a_DeinitAntenna_Selection(struct rtw_adapter *padapter);
-void rtl8723a_CheckAntenna_Selection(struct rtw_adapter *padapter);
-void rtl8723a_init_default_value(struct rtw_adapter *padapter);
-
-s32 InitLLTTable23a(struct rtw_adapter *padapter, u32 boundary);
-
-s32 CardDisableHWSM(struct rtw_adapter *padapter, u8 resetMCU);
-s32 CardDisableWithoutHWSM(struct rtw_adapter *padapter);
-
-/* EFuse */
-u8 GetEEPROMSize8723A(struct rtw_adapter *padapter);
-void Hal_InitPGData(struct rtw_adapter *padapter, u8 *PROMContent);
-void Hal_EfuseParseIDCode(struct rtw_adapter *padapter, u8 *hwinfo);
-void Hal_EfuseParsetxpowerinfo_8723A(struct rtw_adapter *padapter, u8 *PROMContent, bool AutoLoadFail);
-void Hal_EfuseParseBTCoexistInfo_8723A(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseEEPROMVer(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void rtl8723a_EfuseParseChnlPlan(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseCustomerID(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseAntennaDiversity(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseRateIndicationOption(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_EfuseParseXtal_8723A(struct rtw_adapter *pAdapter, u8 *hwinfo, u8 AutoLoadFail);
-void Hal_EfuseParseThermalMeter_8723A(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-
-/* register */
-void SetBcnCtrlReg23a(struct rtw_adapter *padapter, u8 SetBits, u8 ClearBits);
-void rtl8723a_InitBeaconParameters(struct rtw_adapter *padapter);
-
-void rtl8723a_start_thread(struct rtw_adapter *padapter);
-void rtl8723a_stop_thread(struct rtw_adapter *padapter);
-
-bool c2h_id_filter_ccx_8723a(u8 id);
-int c2h_handler_8723a(struct rtw_adapter *padapter, struct c2h_evt_hdr *c2h_evt);
-
-void rtl8723a_read_adapter_info(struct rtw_adapter *Adapter);
-void rtl8723a_read_chip_version(struct rtw_adapter *padapter);
-void rtl8723a_notch_filter(struct rtw_adapter *adapter, bool enable);
-void rtl8723a_SetBeaconRelatedRegisters(struct rtw_adapter *padapter);
-void rtl8723a_SetHalODMVar(struct rtw_adapter *Adapter,
- enum hal_odm_variable eVariable,
- void *pValue1, bool bSet);
-void
-rtl8723a_readefuse(struct rtw_adapter *padapter,
- u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf);
-u16 rtl8723a_EfuseGetCurrentSize_WiFi(struct rtw_adapter *padapter);
-u16 rtl8723a_EfuseGetCurrentSize_BT(struct rtw_adapter *padapter);
-void rtl8723a_update_ramask(struct rtw_adapter *padapter,
- u32 mac_id, u8 rssi_level);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_pg.h b/drivers/staging/rtl8723au/include/rtl8723a_pg.h
deleted file mode 100644
index 5c2ec448e568..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_pg.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8723A_PG_H__
-#define __RTL8723A_PG_H__
-
-/* EEPROM/Efuse PG Offset for 8723E/8723U/8723S */
-#define EEPROM_CCK_TX_PWR_INX_8723A 0x10
-#define EEPROM_HT40_1S_TX_PWR_INX_8723A 0x16
-#define EEPROM_HT20_TX_PWR_INX_DIFF_8723A 0x1C
-#define EEPROM_OFDM_TX_PWR_INX_DIFF_8723A 0x1F
-#define EEPROM_HT40_MAX_PWR_OFFSET_8723A 0x22
-#define EEPROM_HT20_MAX_PWR_OFFSET_8723A 0x25
-
-#define EEPROM_ChannelPlan_8723A 0x28
-#define EEPROM_TSSI_A_8723A 0x29
-#define EEPROM_THERMAL_METER_8723A 0x2A
-#define RF_OPTION1_8723A 0x2B
-#define RF_OPTION2_8723A 0x2C
-#define RF_OPTION3_8723A 0x2D
-#define RF_OPTION4_8723A 0x2E
-#define EEPROM_VERSION_8723A 0x30
-#define EEPROM_CustomID_8723A 0x31
-#define EEPROM_SubCustomID_8723A 0x32
-#define EEPROM_XTAL_K_8723A 0x33
-#define EEPROM_Chipset_8723A 0x34
-
-/* RTL8723AE */
-#define EEPROM_VID_8723AE 0x49
-#define EEPROM_DID_8723AE 0x4B
-#define EEPROM_SVID_8723AE 0x4D
-#define EEPROM_SMID_8723AE 0x4F
-#define EEPROM_MAC_ADDR_8723AE 0x67
-
-/* RTL8723AU */
-#define EEPROM_MAC_ADDR_8723AU 0xC6
-#define EEPROM_VID_8723AU 0xB7
-#define EEPROM_PID_8723AU 0xB9
-
-/* RTL8723AS */
-#define EEPROM_MAC_ADDR_8723AS 0xAA
-
-/* EEPROM/Efuse Value Type */
-#define EETYPE_TX_PWR 0x0
-
-/* EEPROM/Efuse Default Value */
-#define EEPROM_Default_CrystalCap_8723A 0x20
-
-
-/* EEPROM/EFUSE data structure definition. */
-#define MAX_CHNL_GROUP 3+9
-
-struct txpowerinfo {
- u8 CCKIndex[RF_PATH_MAX][MAX_CHNL_GROUP];
- u8 HT40_1SIndex[RF_PATH_MAX][MAX_CHNL_GROUP];
- u8 HT40_2SIndexDiff[RF_PATH_MAX][MAX_CHNL_GROUP];
- u8 HT20IndexDiff[RF_PATH_MAX][MAX_CHNL_GROUP];
- u8 OFDMIndexDiff[RF_PATH_MAX][MAX_CHNL_GROUP];
- u8 HT40MaxOffset[RF_PATH_MAX][MAX_CHNL_GROUP];
- u8 HT20MaxOffset[RF_PATH_MAX][MAX_CHNL_GROUP];
- u8 TSSI_A[3];
- u8 TSSI_B[3];
- u8 TSSI_A_5G[3]; /* 5GL/5GM/5GH */
- u8 TSSI_B_5G[3];
-};
-
-enum bt_ant_num {
- Ant_x2 = 0,
- Ant_x1 = 1
-};
-
-enum bt_cotype {
- BT_2Wire = 0,
- BT_ISSC_3Wire = 1,
- BT_Accel = 2,
- BT_CSR_BC4 = 3,
- BT_CSR_BC8 = 4,
- BT_RTL8756 = 5,
- BT_RTL8723A = 6
-};
-
-enum bt_radioshared {
- BT_Radio_Shared = 0,
- BT_Radio_Individual = 1,
-};
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_recv.h b/drivers/staging/rtl8723au/include/rtl8723a_recv.h
deleted file mode 100644
index 875d37b3b94c..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_recv.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8723A_RECV_H__
-#define __RTL8723A_RECV_H__
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#define NR_RECVBUFF 4
-
-#define NR_PREALLOC_RECV_SKB 8
-
-#define RECV_BLK_SZ 512
-#define RECV_BLK_CNT 16
-#define RECV_BLK_TH RECV_BLK_CNT
-
-#define MAX_RECVBUF_SZ 15360 /* 15k < 16k */
-
-#define PHY_RSSI_SLID_WIN_MAX 100
-#define PHY_LINKQUALITY_SLID_WIN_MAX 20
-
-
-struct phy_stat {
- unsigned int phydw0;
- unsigned int phydw1;
- unsigned int phydw2;
- unsigned int phydw3;
- unsigned int phydw4;
- unsigned int phydw5;
- unsigned int phydw6;
- unsigned int phydw7;
-};
-
-/* Rx smooth factor */
-#define Rx_Smooth_Factor 20
-
-struct interrupt_msg_format {
- unsigned int C2H_MSG0;
- unsigned int C2H_MSG1;
- unsigned int C2H_MSG2;
- unsigned int C2H_MSG3;
- unsigned int HISR; /* from HISR Reg0x124, read to clear */
- unsigned int HISRE;/* from HISRE Reg0x12c, read to clear */
- unsigned int MSG_EX;
-};
-
-int rtl8723au_init_recv_priv(struct rtw_adapter *padapter);
-void rtl8723au_free_recv_priv(struct rtw_adapter *padapter);
-void rtl8723a_process_phy_info(struct rtw_adapter *padapter, void *prframe);
-void update_recvframe_attrib(struct recv_frame *precvframe, struct recv_stat *prxstat);
-void update_recvframe_phyinfo(struct recv_frame *precvframe, struct phy_stat *pphy_info);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_rf.h b/drivers/staging/rtl8723au/include/rtl8723a_rf.h
deleted file mode 100644
index 0432799f53cf..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_rf.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8723A_RF_H__
-#define __RTL8723A_RF_H__
-
-/*--------------------------Define Parameters-------------------------------*/
-
-/* */
-/* For RF 6052 Series */
-/* */
-#define RF6052_MAX_TX_PWR 0x3F
-#define RF6052_MAX_REG 0x3F
-#define RF6052_MAX_PATH 2
-/*--------------------------Define Parameters-------------------------------*/
-
-
-/*------------------------------Define structure----------------------------*/
-
-/*------------------------------Define structure----------------------------*/
-
-
-/*------------------------Export global variable----------------------------*/
-/*------------------------Export global variable----------------------------*/
-
-/*------------------------Export Marco Definition---------------------------*/
-
-/*------------------------Export Marco Definition---------------------------*/
-
-
-/*--------------------------Exported Function prototype---------------------*/
-
-/* */
-/* RF RL6052 Series API */
-/* */
-void rtl8723a_phy_rf6052set_bw(struct rtw_adapter *Adapter,
- enum ht_channel_width Bandwidth);
-void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter,
- u8 *pPowerlevel);
-void rtl8723a_PHY_RF6052SetOFDMTxPower(struct rtw_adapter *Adapter,
- u8 *pPowerLevel, u8 Channel);
-
-/*--------------------------Exported Function prototype---------------------*/
-
-int PHY_RF6052_Config8723A(struct rtw_adapter *Adapter);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_spec.h b/drivers/staging/rtl8723au/include/rtl8723a_spec.h
deleted file mode 100644
index 2f186890dbb5..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_spec.h
+++ /dev/null
@@ -1,2148 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *******************************************************************************/
-#ifndef __RTL8723A_SPEC_H__
-#define __RTL8723A_SPEC_H__
-
-/* */
-/* */
-/* 0x0000h ~ 0x00FFh System Configuration */
-/* */
-/* */
-#define REG_SYS_ISO_CTRL 0x0000
-#define REG_SYS_FUNC_EN 0x0002
-#define REG_APS_FSMCO 0x0004
-#define REG_SYS_CLKR 0x0008
-#define REG_9346CR 0x000A
-#define REG_EE_VPD 0x000C
-#define REG_AFE_MISC 0x0010
-#define REG_SPS0_CTRL 0x0011
-#define REG_SPS_OCP_CFG 0x0018
-#define REG_RSV_CTRL 0x001C
-#define REG_RF_CTRL 0x001F
-#define REG_LDOA15_CTRL 0x0020
-#define REG_LDOV12D_CTRL 0x0021
-#define REG_LDOHCI12_CTRL 0x0022
-#define REG_LPLDO_CTRL 0x0023
-#define REG_AFE_XTAL_CTRL 0x0024
-#define REG_AFE_PLL_CTRL 0x0028
-#define REG_MAC_PHY_CTRL 0x002c
-#define REG_EFUSE_CTRL 0x0030
-#define REG_EFUSE_TEST 0x0034
-#define REG_PWR_DATA 0x0038
-#define REG_CAL_TIMER 0x003C
-#define REG_ACLK_MON 0x003E
-#define REG_GPIO_MUXCFG 0x0040
-#define REG_GPIO_IO_SEL 0x0042
-#define REG_MAC_PINMUX_CFG 0x0043
-#define REG_GPIO_PIN_CTRL 0x0044
-#define REG_GPIO_INTM 0x0048
-#define REG_LEDCFG0 0x004C
-#define REG_LEDCFG1 0x004D
-#define REG_LEDCFG2 0x004E
-#define REG_LEDCFG3 0x004F
-#define REG_LEDCFG REG_LEDCFG2
-#define REG_FSIMR 0x0050
-#define REG_FSISR 0x0054
-#define REG_HSIMR 0x0058
-#define REG_HSISR 0x005c
- /* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
-#define REG_GPIO_PIN_CTRL_2 0x0060
- /* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
-#define REG_GPIO_IO_SEL_2 0x0062
- /* RTL8723 WIFI/BT/GPS Multi-Function control source. */
-#define REG_MULTI_FUNC_CTRL 0x0068
-#define REG_MCUFWDL 0x0080
-#define REG_HMEBOX_EXT_0 0x0088
-#define REG_HMEBOX_EXT_1 0x008A
-#define REG_HMEBOX_EXT_2 0x008C
-#define REG_HMEBOX_EXT_3 0x008E
- /* Host suspend counter on FPGA platform */
-#define REG_HOST_SUSP_CNT 0x00BC
- /* Efuse access protection for RTL8723 */
-#define REG_EFUSE_ACCESS 0x00CF
-#define REG_BIST_SCAN 0x00D0
-#define REG_BIST_RPT 0x00D4
-#define REG_BIST_ROM_RPT 0x00D8
-#define REG_USB_SIE_INTF 0x00E0
-#define REG_PCIE_MIO_INTF 0x00E4
-#define REG_PCIE_MIO_INTD 0x00E8
-#define REG_HPON_FSM 0x00EC
-#define REG_SYS_CFG 0x00F0
-#define REG_GPIO_OUTSTS 0x00F4 /* For RTL8723 only. */
-
-/* */
-/* */
-/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
-/* */
-/* */
-#define REG_CR 0x0100
-#define REG_PBP 0x0104
-#define REG_TRXDMA_CTRL 0x010C
-#define REG_TRXFF_BNDY 0x0114
-#define REG_TRXFF_STATUS 0x0118
-#define REG_RXFF_PTR 0x011C
-#define REG_HIMR 0x0120
-#define REG_HISR 0x0124
-#define REG_HIMRE 0x0128
-#define REG_HISRE 0x012C
-#define REG_CPWM 0x012F
-#define REG_FWIMR 0x0130
-#define REG_FWISR 0x0134
-#define REG_PKTBUF_DBG_CTRL 0x0140
-#define REG_PKTBUF_DBG_DATA_L 0x0144
-#define REG_PKTBUF_DBG_DATA_H 0x0148
-
-#define REG_TC0_CTRL 0x0150
-#define REG_TC1_CTRL 0x0154
-#define REG_TC2_CTRL 0x0158
-#define REG_TC3_CTRL 0x015C
-#define REG_TC4_CTRL 0x0160
-#define REG_TCUNIT_BASE 0x0164
-#define REG_MBIST_START 0x0174
-#define REG_MBIST_DONE 0x0178
-#define REG_MBIST_FAIL 0x017C
-#define REG_C2HEVT_MSG_NORMAL 0x01A0
-#define REG_C2HEVT_CLEAR 0x01AF
-#define REG_C2HEVT_MSG_TEST 0x01B8
-#define REG_MCUTST_1 0x01c0
-#define REG_FMETHR 0x01C8
-#define REG_HMETFR 0x01CC
-#define REG_HMEBOX_0 0x01D0
-#define REG_HMEBOX_1 0x01D4
-#define REG_HMEBOX_2 0x01D8
-#define REG_HMEBOX_3 0x01DC
-
-#define REG_LLT_INIT 0x01E0
-#define REG_BB_ACCEESS_CTRL 0x01E8
-#define REG_BB_ACCESS_DATA 0x01EC
-
-
-/* */
-/* */
-/* 0x0200h ~ 0x027Fh TXDMA Configuration */
-/* */
-/* */
-#define REG_RQPN 0x0200
-#define REG_FIFOPAGE 0x0204
-#define REG_TDECTRL 0x0208
-#define REG_TXDMA_OFFSET_CHK 0x020C
-#define REG_TXDMA_STATUS 0x0210
-#define REG_RQPN_NPQ 0x0214
-
-/* */
-/* */
-/* 0x0280h ~ 0x02FFh RXDMA Configuration */
-/* */
-/* */
-#define REG_RXDMA_AGG_PG_TH 0x0280
-#define REG_RXPKT_NUM 0x0284
-#define REG_RXDMA_STATUS 0x0288
-
-
-/* */
-/* */
-/* 0x0300h ~ 0x03FFh PCIe */
-/* */
-/* */
-#define REG_PCIE_CTRL_REG 0x0300
-#define REG_INT_MIG 0x0304 /* Interrupt Migration */
- /* TX Beacon Descriptor Address */
-#define REG_BCNQ_DESA 0x0308
- /* TX High Queue Descriptor Address */
-#define REG_HQ_DESA 0x0310
- /* TX Manage Queue Descriptor Address */
-#define REG_MGQ_DESA 0x0318
- /* TX VO Queue Descriptor Address */
-#define REG_VOQ_DESA 0x0320
- /* TX VI Queue Descriptor Address */
-#define REG_VIQ_DESA 0x0328
- /* TX BE Queue Descriptor Address */
-#define REG_BEQ_DESA 0x0330
- /* TX BK Queue Descriptor Address */
-#define REG_BKQ_DESA 0x0338
- /* RX Queue Descriptor Address */
-#define REG_RX_DESA 0x0340
- /* Backdoor REG for Access Configuration */
-#define REG_DBI 0x0348
- /* MDIO for Access PCIE PHY */
-#define REG_MDIO 0x0354
- /* Debug Selection Register */
-#define REG_DBG_SEL 0x0360
- /* PCIe RPWM */
-#define REG_PCIE_HRPWM 0x0361
- /* PCIe CPWM */
-#define REG_PCIE_HCPWM 0x0363
- /* UART Control */
-#define REG_UART_CTRL 0x0364
- /* UART TX Descriptor Address */
-#define REG_UART_TX_DESA 0x0370
- /* UART Rx Descriptor Address */
-#define REG_UART_RX_DESA 0x0378
-
-
-/* spec version 11 */
-/* */
-/* */
-/* 0x0400h ~ 0x047Fh Protocol Configuration */
-/* */
-/* */
-#define REG_VOQ_INFORMATION 0x0400
-#define REG_VIQ_INFORMATION 0x0404
-#define REG_BEQ_INFORMATION 0x0408
-#define REG_BKQ_INFORMATION 0x040C
-#define REG_MGQ_INFORMATION 0x0410
-#define REG_HGQ_INFORMATION 0x0414
-#define REG_BCNQ_INFORMATION 0x0418
-
-
-#define REG_CPU_MGQ_INFORMATION 0x041C
-#define REG_FWHW_TXQ_CTRL 0x0420
-#define REG_HWSEQ_CTRL 0x0423
-#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
-#define REG_TXPKTBUF_MGQ_BDNY 0x0425
-#define REG_LIFETIME_EN 0x0426
-#define REG_MULTI_BCNQ_OFFSET 0x0427
-#define REG_SPEC_SIFS 0x0428
-#define REG_RL 0x042A
-#define REG_DARFRC 0x0430
-#define REG_RARFRC 0x0438
-#define REG_RRSR 0x0440
-#define REG_ARFR0 0x0444
-#define REG_ARFR1 0x0448
-#define REG_ARFR2 0x044C
-#define REG_ARFR3 0x0450
-#define REG_AGGLEN_LMT 0x0458
-#define REG_AMPDU_MIN_SPACE 0x045C
-#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
-#define REG_FAST_EDCA_CTRL 0x0460
-#define REG_RD_RESP_PKT_TH 0x0463
-#define REG_INIRTS_RATE_SEL 0x0480
-#define REG_INIDATA_RATE_SEL 0x0484
-
-
-#define REG_POWER_STATUS 0x04A4
-#define REG_POWER_STAGE1 0x04B4
-#define REG_POWER_STAGE2 0x04B8
-#define REG_PKT_VO_VI_LIFE_TIME 0x04C0
-#define REG_PKT_BE_BK_LIFE_TIME 0x04C2
-#define REG_STBC_SETTING 0x04C4
-#define REG_PROT_MODE_CTRL 0x04C8
-#define REG_MAX_AGGR_NUM 0x04CA
-#define REG_RTS_MAX_AGGR_NUM 0x04CB
-#define REG_BAR_MODE_CTRL 0x04CC
-#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
-#define REG_NQOS_SEQ 0x04DC
-#define REG_QOS_SEQ 0x04DE
-#define REG_NEED_CPU_HANDLE 0x04E0
-#define REG_PKT_LOSE_RPT 0x04E1
-#define REG_PTCL_ERR_STATUS 0x04E2
-#define REG_DUMMY 0x04FC
-
-
-
-/* */
-/* */
-/* 0x0500h ~ 0x05FFh EDCA Configuration */
-/* */
-/* */
-#define REG_EDCA_VO_PARAM 0x0500
-#define REG_EDCA_VI_PARAM 0x0504
-#define REG_EDCA_BE_PARAM 0x0508
-#define REG_EDCA_BK_PARAM 0x050C
-#define REG_BCNTCFG 0x0510
-#define REG_PIFS 0x0512
-#define REG_RDG_PIFS 0x0513
-#define REG_SIFS_CCK 0x0514
-#define REG_SIFS_OFDM 0x0516
-#define REG_SIFS_CTX 0x0514
-#define REG_SIFS_TRX 0x0516
-#define REG_TSFTR_SYN_OFFSET 0x0518
-#define REG_AGGR_BREAK_TIME 0x051A
-#define REG_SLOT 0x051B
-#define REG_TX_PTCL_CTRL 0x0520
-#define REG_TXPAUSE 0x0522
-#define REG_DIS_TXREQ_CLR 0x0523
-#define REG_RD_CTRL 0x0524
-#define REG_TBTT_PROHIBIT 0x0540
-#define REG_RD_NAV_NXT 0x0544
-#define REG_NAV_PROT_LEN 0x0546
-#define REG_BCN_CTRL 0x0550
-#define REG_BCN_CTRL_1 0x0551
-#define REG_MBID_NUM 0x0552
-#define REG_DUAL_TSF_RST 0x0553
- /* The same as REG_MBSSID_BCN_SPACE */
-#define REG_BCN_INTERVAL 0x0554
-#define REG_MBSSID_BCN_SPACE 0x0554
-#define REG_DRVERLYINT 0x0558
-#define REG_BCNDMATIM 0x0559
-#define REG_ATIMWND 0x055A
-#define REG_BCN_MAX_ERR 0x055D
-#define REG_RXTSF_OFFSET_CCK 0x055E
-#define REG_RXTSF_OFFSET_OFDM 0x055F
-#define REG_TSFTR 0x0560
-#define REG_TSFTR1 0x0568
-#define REG_INIT_TSFTR 0x0564
-#define REG_ATIMWND_1 0x0570
-#define REG_PSTIMER 0x0580
-#define REG_TIMER0 0x0584
-#define REG_TIMER1 0x0588
-#define REG_ACMHWCTRL 0x05C0
-#define REG_ACMRSTCTRL 0x05C1
-#define REG_ACMAVG 0x05C2
-#define REG_VO_ADMTIME 0x05C4
-#define REG_VI_ADMTIME 0x05C6
-#define REG_BE_ADMTIME 0x05C8
-#define REG_EDCA_RANDOM_GEN 0x05CC
-#define REG_SCH_TXCMD 0x05D0
-
-/* define REG_FW_TSF_SYNC_CNT 0x04A0 */
-#define REG_FW_RESET_TSF_CNT_1 0x05FC
-#define REG_FW_RESET_TSF_CNT_0 0x05FD
-#define REG_FW_BCN_DIS_CNT 0x05FE
-
-/* */
-/* */
-/* 0x0600h ~ 0x07FFh WMAC Configuration */
-/* */
-/* */
-#define REG_APSD_CTRL 0x0600
-#define REG_BWOPMODE 0x0603
-#define REG_TCR 0x0604
-#define REG_RCR 0x0608
-#define REG_RX_PKT_LIMIT 0x060C
-#define REG_RX_DLK_TIME 0x060D
-#define REG_RX_DRVINFO_SZ 0x060F
-
-#define REG_MACID 0x0610
-#define REG_BSSID 0x0618
-#define REG_MAR 0x0620
-#define REG_MBIDCAMCFG 0x0628
-
-#define REG_USTIME_EDCA 0x0638
-#define REG_MAC_SPEC_SIFS 0x063A
-
-/* 20100719 Joseph: Hardware register definition change. (HW datasheet v54) */
- /* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
-#define REG_R2T_SIFS 0x063C
- /* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
-#define REG_T2T_SIFS 0x063E
-#define REG_ACKTO 0x0640
-#define REG_CTS2TO 0x0641
-#define REG_EIFS 0x0642
-
-/* WMA, BA, CCX */
-#define REG_NAV_CTRL 0x0650
-#define REG_BACAMCMD 0x0654
-#define REG_BACAMCONTENT 0x0658
-#define REG_LBDLY 0x0660
-#define REG_FWDLY 0x0661
-#define REG_RXERR_RPT 0x0664
-#define REG_WMAC_TRXPTCL_CTL 0x0668
-
-
-/* Security */
-#define REG_CAMCMD 0x0670
-#define REG_CAMWRITE 0x0674
-#define REG_CAMREAD 0x0678
-#define REG_CAMDBG 0x067C
-#define REG_SECCFG 0x0680
-
-/* Power */
-#define REG_WOW_CTRL 0x0690
-#define REG_PSSTATUS 0x0691
-#define REG_PS_RX_INFO 0x0692
-#define REG_LPNAV_CTRL 0x0694
-#define REG_WKFMCAM_CMD 0x0698
-#define REG_WKFMCAM_RWD 0x069C
-#define REG_RXFLTMAP0 0x06A0
-#define REG_RXFLTMAP1 0x06A2
-#define REG_RXFLTMAP2 0x06A4
-#define REG_BCN_PSR_RPT 0x06A8
-#define REG_CALB32K_CTRL 0x06AC
-#define REG_PKT_MON_CTRL 0x06B4
-#define REG_BT_COEX_TABLE 0x06C0
-#define REG_WMAC_RESP_TXINFO 0x06D8
-
-#define REG_MACID1 0x0700
-#define REG_BSSID1 0x0708
-
-
-/* */
-/* */
-/* 0xFE00h ~ 0xFE55h USB Configuration */
-/* */
-/* */
-#define REG_USB_INFO 0xFE17
-#define REG_USB_SPECIAL_OPTION 0xFE55
-#define REG_USB_DMA_AGG_TO 0xFE5B
-#define REG_USB_AGG_TO 0xFE5C
-#define REG_USB_AGG_TH 0xFE5D
-
-/* For test chip */
-#define REG_TEST_USB_TXQS 0xFE48
-#define REG_TEST_SIE_VID 0xFE60 /* 0xFE60~0xFE61 */
-#define REG_TEST_SIE_PID 0xFE62 /* 0xFE62~0xFE63 */
-#define REG_TEST_SIE_OPTIONAL 0xFE64
-#define REG_TEST_SIE_CHIRP_K 0xFE65
-#define REG_TEST_SIE_PHY 0xFE66 /* 0xFE66~0xFE6B */
-#define REG_TEST_SIE_MAC_ADDR 0xFE70 /* 0xFE70~0xFE75 */
-#define REG_TEST_SIE_STRING 0xFE80 /* 0xFE80~0xFEB9 */
-
-
-/* For normal chip */
-#define REG_NORMAL_SIE_VID 0xFE60 /* 0xFE60~0xFE61 */
-#define REG_NORMAL_SIE_PID 0xFE62 /* 0xFE62~0xFE63 */
-#define REG_NORMAL_SIE_OPTIONAL 0xFE64
-#define REG_NORMAL_SIE_EP 0xFE65 /* 0xFE65~0xFE67 */
-#define REG_NORMAL_SIE_PHY 0xFE68 /* 0xFE68~0xFE6B */
-#define REG_NORMAL_SIE_OPTIONAL2 0xFE6C
-#define REG_NORMAL_SIE_GPS_EP 0xFE6D /* RTL8723 only */
-#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 /* 0xFE70~0xFE75 */
-#define REG_NORMAL_SIE_STRING 0xFE80 /* 0xFE80~0xFEDF */
-
-
-/* */
-/* */
-/* Redifine 8192C register definition for compatibility */
-/* */
-/* */
-
-/* TODO: use these definition when using REG_xxx naming rule. */
-/* NOTE: DO NOT Remove these definition. Use later. */
-
- /* System Isolation Interface Control. */
-#define SYS_ISO_CTRL REG_SYS_ISO_CTRL
- /* System Function Enable. */
-#define SYS_FUNC_EN REG_SYS_FUNC_EN
-#define SYS_CLK REG_SYS_CLKR
- /* 93C46/93C56 Command Register. */
-#define CR9346 REG_9346CR
- /* E-Fuse Control. */
-#define EFUSE_CTRL REG_EFUSE_CTRL
- /* E-Fuse Test. */
-#define EFUSE_TEST REG_EFUSE_TEST
- /* Media Status register */
-#define MSR (REG_CR + 2)
-#define ISR REG_HISR
- /* Timing Sync Function Timer Register. */
-#define TSFR REG_TSFTR
-
- /* MAC ID Register, Offset 0x0050-0x0053 */
-#define MACIDR0 REG_MACID
- /* MAC ID Register, Offset 0x0054-0x0055 */
-#define MACIDR4 (REG_MACID + 4)
-
-#define PBP REG_PBP
-
- /* Redifine MACID register, to compatible prior ICs. */
-#define IDR0 MACIDR0
-#define IDR4 MACIDR4
-
-
-/* */
-/* 9. Security Control Registers (Offset: ) */
-/* */
- /* Software write CAM input content */
-#define WCAMI REG_CAMWRITE
- /* Software read/write CAM config */
-#define RCAMO REG_CAMREAD
-#define CAMDBG REG_CAMDBG
- /* Security Configuration Register */
-#define SECR REG_SECCFG
-
-/* Unused register */
-#define UnusedRegister 0x1BF
-#define DCAM UnusedRegister
-#define PSR UnusedRegister
-#define BBAddr UnusedRegister
-#define PhyDataR UnusedRegister
-
-#define InvalidBBRFValue 0x12345678
-
-/* Min Spacing related settings. */
-#define MAX_MSS_DENSITY_2T 0x13
-#define MAX_MSS_DENSITY_1T 0x0A
-
-/* */
-/* 8192C Cmd9346CR bits (Offset 0xA, 16bit) */
-/* */
- /* EEPROM enable when set 1 */
-#define CmdEEPROM_En BIT(5)
- /* System EEPROM select, 0: boot from E-FUSE,
- 1: The EEPROM used is 9346 */
-#define CmdEERPOMSEL BIT(4)
-#define Cmd9346CR_9356SEL BIT(4)
-#define AutoLoadEEPROM (CmdEEPROM_En|CmdEERPOMSEL)
-#define AutoLoadEFUSE CmdEEPROM_En
-
-/* */
-/* 8192C GPIO MUX Configuration Register (offset 0x40, 4 byte) */
-/* */
-#define GPIOSEL_GPIO 0
-#define GPIOSEL_ENBT BIT(5)
-
-/* */
-/* 8192C GPIO PIN Control Register (offset 0x44, 4 byte) */
-/* */
- /* GPIO pins input value */
-#define GPIO_IN REG_GPIO_PIN_CTRL
- /* GPIO pins output value */
-#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
- /* GPIO pins output enable when a bit is set to "1";
- otherwise, input is configured. */
-#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
-#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
-
-/* */
-/* 8192C (MSR) Media Status Register (Offset 0x4C, 8 bits) */
-/* */
-/*
-Network Type
-00: No link
-01: Link in ad hoc network
-10: Link in infrastructure network
-11: AP mode
-Default: 00b.
-*/
-#define MSR_NOLINK 0x00
-#define MSR_ADHOC 0x01
-#define MSR_INFRA 0x02
-#define MSR_AP 0x03
-
-/* */
-/* 6. Adaptive Control Registers (Offset: 0x0160 - 0x01CF) */
-/* */
-/* */
-/* 8192C Response Rate Set Register (offset 0x181, 24bits) */
-/* */
-#define RRSR_RSC_OFFSET 21
-#define RRSR_SHORT_OFFSET 23
-#define RRSR_RSC_BW_40M 0x600000
-#define RRSR_RSC_UPSUBCHNL 0x400000
-#define RRSR_RSC_LOWSUBCHNL 0x200000
-#define RRSR_SHORT 0x800000
-#define RRSR_1M BIT(0)
-#define RRSR_2M BIT(1)
-#define RRSR_5_5M BIT(2)
-#define RRSR_11M BIT(3)
-#define RRSR_6M BIT(4)
-#define RRSR_9M BIT(5)
-#define RRSR_12M BIT(6)
-#define RRSR_18M BIT(7)
-#define RRSR_24M BIT(8)
-#define RRSR_36M BIT(9)
-#define RRSR_48M BIT(10)
-#define RRSR_54M BIT(11)
-#define RRSR_MCS0 BIT(12)
-#define RRSR_MCS1 BIT(13)
-#define RRSR_MCS2 BIT(14)
-#define RRSR_MCS3 BIT(15)
-#define RRSR_MCS4 BIT(16)
-#define RRSR_MCS5 BIT(17)
-#define RRSR_MCS6 BIT(18)
-#define RRSR_MCS7 BIT(19)
-#define BRSR_AckShortPmb BIT(23)
-/* CCK ACK: use Short Preamble or not */
-
-/* */
-/* 8192C BW_OPMODE bits (Offset 0x203, 8bit) */
-/* */
-#define BW_OPMODE_20MHZ BIT(2)
-#define BW_OPMODE_5G BIT(1)
-#define BW_OPMODE_11J BIT(0)
-
-
-/* */
-/* 8192C CAM Config Setting (offset 0x250, 1 byte) */
-/* */
-#define CAM_VALID BIT(15)
-#define CAM_NOTVALID 0x0000
-#define CAM_USEDK BIT(5)
-
-#define CAM_CONTENT_COUNT 8
-
-#define CAM_NONE 0x0
-#define CAM_WEP40 0x01
-#define CAM_TKIP 0x02
-#define CAM_AES 0x04
-#define CAM_WEP104 0x05
-
-#define TOTAL_CAM_ENTRY 32
-#define HALF_CAM_ENTRY 16
-
-#define CAM_CONFIG_USEDK true
-#define CAM_CONFIG_NO_USEDK false
-
-#define CAM_WRITE BIT(16)
-#define CAM_READ 0x00000000
-#define CAM_POLLINIG BIT(31)
-
-#define SCR_UseDK 0x01
-#define SCR_TxSecEnable 0x02
-#define SCR_RxSecEnable 0x04
-
-
-/* */
-/* 12. Host Interrupt Status Registers (Offset: 0x0300 - 0x030F) */
-/* */
-/* */
-/* 8190 IMR/ISR bits (offset 0xfd, 8bits) */
-/* */
-#define IMR8190_DISABLED 0x0
-/* IMR DW0 Bit 0-31 */
-
-#define IMR_BCNDMAINT6 BIT(31) /* Beacon DMA Interrupt 6 */
-#define IMR_BCNDMAINT5 BIT(30) /* Beacon DMA Interrupt 5 */
-#define IMR_BCNDMAINT4 BIT(29) /* Beacon DMA Interrupt 4 */
-#define IMR_BCNDMAINT3 BIT(28) /* Beacon DMA Interrupt 3 */
-#define IMR_BCNDMAINT2 BIT(27) /* Beacon DMA Interrupt 2 */
-#define IMR_BCNDMAINT1 BIT(26) /* Beacon DMA Interrupt 1 */
-#define IMR_BCNDOK8 BIT(25) /* Beacon Queue DMA OK
- Interrupt 8 */
-#define IMR_BCNDOK7 BIT(24) /* Beacon Queue DMA OK
- Interrupt 7 */
-#define IMR_BCNDOK6 BIT(23) /* Beacon Queue DMA OK
- Interrupt 6 */
-#define IMR_BCNDOK5 BIT(22) /* Beacon Queue DMA OK
- Interrupt 5 */
-#define IMR_BCNDOK4 BIT(21) /* Beacon Queue DMA OK
- Interrupt 4 */
-#define IMR_BCNDOK3 BIT(20) /* Beacon Queue DMA OK
- Interrupt 3 */
-#define IMR_BCNDOK2 BIT(19) /* Beacon Queue DMA OK
- Interrupt 2 */
-#define IMR_BCNDOK1 BIT(18) /* Beacon Queue DMA OK
- Interrupt 1 */
-#define IMR_TIMEOUT2 BIT(17) /* Timeout interrupt 2 */
-#define IMR_TIMEOUT1 BIT(16) /* Timeout interrupt 1 */
-#define IMR_TXFOVW BIT(15) /* Transmit FIFO Overflow */
-#define IMR_PSTIMEOUT BIT(14) /* Power save time out
- interrupt */
-#define IMR_BcnInt BIT(13) /* Beacon DMA Interrupt 0 */
-#define IMR_RXFOVW BIT(12) /* Receive FIFO Overflow */
-#define IMR_RDU BIT(11) /* Receive Descriptor
- Unavailable */
-#define IMR_ATIMEND BIT(10) /* For 92C,ATIM Window
- End Interrupt */
-#define IMR_BDOK BIT(9) /* Beacon Queue DMA OK
- Interrup */
-#define IMR_HIGHDOK BIT(8) /* High Queue DMA OK
- Interrupt */
-#define IMR_TBDOK BIT(7) /* Transmit Beacon OK
- interrup */
-#define IMR_MGNTDOK BIT(6) /* Management Queue DMA OK
- Interrupt */
-#define IMR_TBDER BIT(5) /* For 92C,Transmit Beacon
- Error Interrupt */
-#define IMR_BKDOK BIT(4) /* AC_BK DMA OK Interrupt */
-#define IMR_BEDOK BIT(3) /* AC_BE DMA OK Interrupt */
-#define IMR_VIDOK BIT(2) /* AC_VI DMA OK Interrupt */
-#define IMR_VODOK BIT(1) /* AC_VO DMA Interrupt */
-#define IMR_ROK BIT(0) /* Receive DMA OK Interrupt */
-
-#define IMR_RX_MASK (IMR_ROK|IMR_RDU|IMR_RXFOVW)
-#define IMR_TX_MASK (IMR_VODOK|IMR_VIDOK|IMR_BEDOK| \
- IMR_BKDOK|IMR_MGNTDOK|IMR_HIGHDOK| \
- IMR_BDOK)
-
-/* 13. Host Interrupt Status Extension Register (Offset: 0x012C-012Eh) */
-#define IMR_BcnInt_E BIT(12)
-#define IMR_TXERR BIT(11)
-#define IMR_RXERR BIT(10)
-#define IMR_C2HCMD BIT(9)
-#define IMR_CPWM BIT(8)
-/* RSVD [2-7] */
-#define IMR_OCPINT BIT(1)
-#define IMR_WLANOFF BIT(0)
-
-
-/* 8192C EEPROM/EFUSE share register definition. */
-
-/* Default Value for EEPROM or EFUSE!!! */
-#define EEPROM_Default_TSSI 0x0
-#define EEPROM_Default_TxPowerDiff 0x0
-#define EEPROM_Default_CrystalCap 0x5
- /* Default: 2X2, RTL8192CE(QFPN68) */
-#define EEPROM_Default_BoardType 0x02
-#define EEPROM_Default_TxPower 0x1010
-#define EEPROM_Default_HT2T_TxPwr 0x10
-
-#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
-#define EEPROM_Default_ThermalMeter 0x12
-
-#define EEPROM_Default_AntTxPowerDiff 0x0
-#define EEPROM_Default_TxPwDiff_CrystalCap 0x5
-#define EEPROM_Default_TxPowerLevel 0x22
-#define EEPROM_Default_HT40_2SDiff 0x0
- /* HT20<->40 default Tx Power Index Difference */
-#define EEPROM_Default_HT20_Diff 2
-#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
-#define EEPROM_Default_HT40_PwrMaxOffset 0
-#define EEPROM_Default_HT20_PwrMaxOffset 0
-
-/* For debug */
-#define EEPROM_Default_PID 0x1234
-#define EEPROM_Default_VID 0x5678
-#define EEPROM_Default_CustomerID 0xAB
-#define EEPROM_Default_SubCustomerID 0xCD
-#define EEPROM_Default_Version 0
-
-#define EEPROM_CHANNEL_PLAN_FCC 0x0
-#define EEPROM_CHANNEL_PLAN_IC 0x1
-#define EEPROM_CHANNEL_PLAN_ETSI 0x2
-#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
-#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
-#define EEPROM_CHANNEL_PLAN_MKK 0x5
-#define EEPROM_CHANNEL_PLAN_MKK1 0x6
-#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
-#define EEPROM_CHANNEL_PLAN_TELEC 0x8
-#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
-#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
-#define EEPROM_CHANNEL_PLAN_NCC 0xB
-#define EEPROM_USB_OPTIONAL1 0xE
-#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
-
-
-#define EEPROM_CID_DEFAULT 0x0
-#define EEPROM_CID_TOSHIBA 0x4
- /* CCX test. By Bruce, 2009-02-25. */
-#define EEPROM_CID_CCX 0x10
-#define EEPROM_CID_QMI 0x0D
- /* added by chiyoko for dtm, 20090108 */
-#define EEPROM_CID_WHQL 0xFE
-
-
-#define RTL_EEPROM_ID 0x8129
-
-#define SUPPORT_HW_RADIO_DETECT(pHalData) \
- (pHalData->BoardType == BOARD_MINICARD || \
- pHalData->BoardType == BOARD_USB_SOLO || \
- pHalData->BoardType == BOARD_USB_COMBO)
-
-/* */
-/* EEPROM address for Test chip */
-/* */
-#define EEPROM_TEST_USB_OPT 0x0E
-#define EEPROM_TEST_CHIRP_K 0x0F
-#define EEPROM_TEST_EP_SETTING 0x0E
-#define EEPROM_TEST_USB_PHY 0x10
-
-
-/* */
-/* EEPROM address for Normal chip */
-/* */
-#define EEPROM_NORMAL_USB_OPT 0x0E
-#define EEPROM_NORMAL_CHIRP_K 0x0E /* Changed */
-#define EEPROM_NORMAL_EP_SETTING 0x0F /* Changed */
-#define EEPROM_NORMAL_USB_PHY 0x12 /* Changed */
-
-enum {
- BOARD_USB_DONGLE = 0, /* USB dongle */
- BOARD_USB_High_PA = 1, /* USB dongle with high power PA */
- BOARD_MINICARD = 2, /* Minicard */
- BOARD_USB_SOLO = 3, /* USB solo-Slim module */
- BOARD_USB_COMBO = 4, /* USB Combo-Slim module */
-};
-
-/* Test chip and normal chip common define */
-/* */
-/* EEPROM address for both */
-/* */
-#define EEPROM_ID0 0x00
-#define EEPROM_ID1 0x01
-#define EEPROM_RTK_RSV1 0x02
-#define EEPROM_RTK_RSV2 0x03
-#define EEPROM_RTK_RSV3 0x04
-#define EEPROM_RTK_RSV4 0x05
-#define EEPROM_RTK_RSV5 0x06
-#define EEPROM_DBG_SEL 0x07
-#define EEPROM_RTK_RSV6 0x08
-#define EEPROM_VID 0x0A
-#define EEPROM_PID 0x0C
-
-#define EEPROM_MAC_ADDR 0x16
-#define EEPROM_STRING 0x1C
-#define EEPROM_SUBCUSTOMER_ID 0x59
-#define EEPROM_CCK_TX_PWR_INX 0x5A
-#define EEPROM_HT40_1S_TX_PWR_INX 0x60
-#define EEPROM_HT40_2S_TX_PWR_INX_DIFF 0x66
-#define EEPROM_HT20_TX_PWR_INX_DIFF 0x69
-#define EEPROM_OFDM_TX_PWR_INX_DIFF 0x6C
-#define EEPROM_HT40_MAX_PWR_OFFSET 0x6F
-#define EEPROM_HT20_MAX_PWR_OFFSET 0x72
-
-#define EEPROM_CHANNEL_PLAN 0x75
-#define EEPROM_TSSI_A 0x76
-#define EEPROM_TSSI_B 0x77
-#define EEPROM_THERMAL_METER 0x78
-#define EEPROM_RF_OPT1 0x79
-#define EEPROM_RF_OPT2 0x7A
-#define EEPROM_RF_OPT3 0x7B
-#define EEPROM_RF_OPT4 0x7C
-#define EEPROM_VERSION 0x7E
-#define EEPROM_CUSTOMER_ID 0x7F
-
- /* 0x0: RTL8188SU, 0x1: RTL8191SU, 0x2: RTL8192SU, 0x3: RTL8191GU */
-#define EEPROM_BoardType 0x54
- /* 0x5C-0x76, Tx Power index. */
-#define EEPROM_TxPwIndex 0x5C
- /* Difference of gain index between legacy and high throughput OFDM. */
-#define EEPROM_PwDiff 0x67
- /* CCK Tx Power */
-#define EEPROM_TxPowerCCK 0x5A
-
-/* 2009/02/09 Cosa Add for SD3 requirement */
- /* HT20 Tx Power Index Difference */
-#define EEPROM_TX_PWR_HT20_DIFF 0x6e
- /* HT20<->40 default Tx Power Index Difference */
-#define DEFAULT_HT20_TXPWR_DIFF 2
- /* OFDM Tx Power Index Difference */
-#define EEPROM_TX_PWR_OFDM_DIFF 0x71
-
- /* Power diff for channel group */
-#define EEPROM_TxPWRGroup 0x73
- /* Check if power safety is need */
-#define EEPROM_Regulatory 0x79
-
- /* 92cu, 0x7E[4] */
-#define EEPROM_BLUETOOTH_COEXIST 0x7E
-#define EEPROM_NORMAL_BoardType EEPROM_RF_OPT1 /* 7:5] */
-#define BOARD_TYPE_NORMAL_MASK 0xE0
-#define BOARD_TYPE_TEST_MASK 0x0F
- /* BIT0 1 for build-in module, 0 for external dongle */
-#define EEPROM_EASY_REPLACEMENT 0x50
-/* */
-/* EPROM content definitions */
-/* */
-#define OS_LINK_SPEED BIT(5)
-
-#define BOARD_TYPE_MASK 0xF
-
-#define BT_COEXISTENCE BIT(4)
-#define BT_CO_SHIFT 4
-
-#define EP_NUMBER_MASK 0x30 /* bit 4:5 0Eh */
-#define EP_NUMBER_SHIFT 4
-
-
-#define USB_PHY_PARA_SIZE 5
-
-
-/* */
-/* EEPROM default value definitions */
-/* */
-/* Use 0xABCD instead of 0x8192 for debug */
-#define EEPROM_DEF_ID_0 0xCD /* Byte 0x00 */
-#define EEPROM_DEF_ID_1 0xAB /* Byte 0x01 */
-
-#define EEPROM_DEF_RTK_RSV_A3 0x74 /* Byte 0x03 */
-#define EEPROM_DEF_RTK_RSV_A4 0x6D /* Byte 0x04 */
-#define EEPROM_DEF_RTK_RSV_A8 0xFF /* Byte 0x08 */
-
-#define EEPROM_DEF_VID_0 0x0A /* Byte 0x0A */
-#define EEPROM_DEF_VID_1 0x0B
-
-#define EEPROM_DEF_PID_0 0x92 /* Byte 0x0C */
-#define EEPROM_DEF_PID_1 0x81
-
-
-#define EEPROM_TEST_DEF_USB_OPT 0x80 /* Byte 0x0E */
-#define EEPROM_NORMAL_DEF_USB_OPT 0x00 /* Byte 0x0E */
-
-#define EEPROM_DEF_CHIRPK 0x15 /* Byte 0x0F */
-
-#define EEPROM_DEF_USB_PHY_0 0x85 /* Byte 0x10 */
-#define EEPROM_DEF_USB_PHY_1 0x62 /* Byte 0x11 */
-#define EEPROM_DEF_USB_PHY_2 0x9E /* Byte 0x12 */
-#define EEPROM_DEF_USB_PHY_3 0x06 /* Byte 0x13 */
-
-#define EEPROM_DEF_TSSI_A 0x09 /* Byte 0x78 */
-#define EEPROM_DEF_TSSI_B 0x09 /* Byte 0x79 */
-
-
-#define EEPROM_DEF_THERMAL_METER 0x12 /* Byte 0x7A */
-
- /* Check if power safety spec is need */
-#define RF_OPTION1 0x79
-#define RF_OPTION2 0x7A
-#define RF_OPTION3 0x7B
-#define RF_OPTION4 0x7C
-
-
-#define EEPROM_USB_SN BIT(0)
-#define EEPROM_USB_REMOTE_WAKEUP BIT(1)
-#define EEPROM_USB_DEVICE_PWR BIT(2)
-#define EEPROM_EP_NUMBER (BIT(3)|BIT(4))
-
-/*===================================================================
-=====================================================================
-Here the register defines are for 92C. When the define is as same with 92C,
-we will use the 92C's define for the consistency
-So the following defines for 92C is not entire!!!!!!
-=====================================================================
-=====================================================================*/
-/*
-Based on Datasheet V33---090401
-Register Summary
-Current IOREG MAP
-0x0000h ~ 0x00FFh System Configuration (256 Bytes)
-0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
-0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
-0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
-0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
-0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
-0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
-0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
-0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
-*/
-
-/* */
-/* 8192C (RCR) Receive Configuration Register (Offset 0x608, 32 bits) */
-/* */
-#define RCR_APPFCS BIT(31) /* WMAC append FCS after payload*/
-#define RCR_APP_MIC BIT(30)
-#define RCR_APP_PHYSTS BIT(28)
-#define RCR_APP_ICV BIT(29)
-#define RCR_APP_PHYST_RXFF BIT(28)
-#define RCR_APP_BA_SSN BIT(27) /* Accept BA SSN */
-#define RCR_ENMBID BIT(24) /* Enable Multiple BssId. */
-#define RCR_LSIGEN BIT(23)
-#define RCR_MFBEN BIT(22)
-#define RCR_HTC_LOC_CTRL BIT(14) /* MFC<--HTC=1 MFC-->HTC=0 */
-#define RCR_AMF BIT(13) /* Accept management type frame */
-#define RCR_ACF BIT(12) /* Accept control type frame */
-#define RCR_ADF BIT(11) /* Accept data type frame */
-#define RCR_AICV BIT(9) /* Accept ICV error packet */
-#define RCR_ACRC32 BIT(8) /* Accept CRC32 error packet */
-#define RCR_CBSSID_BCN BIT(7) /* Accept BSSID match packet
- (Rx beacon, probe rsp) */
-#define RCR_CBSSID_DATA BIT(6) /* Accept BSSID match packet
- (Data) */
-#define RCR_CBSSID RCR_CBSSID_DATA /* Accept BSSID match
- packet */
-#define RCR_APWRMGT BIT(5) /* Accept power management
- packet */
-#define RCR_ADD3 BIT(4) /* Accept address 3 match
- packet */
-#define RCR_AB BIT(3) /* Accept broadcast packet */
-#define RCR_AM BIT(2) /* Accept multicast packet */
-#define RCR_APM BIT(1) /* Accept physical match packet */
-#define RCR_AAP BIT(0) /* Accept all unicast packet */
-#define RCR_MXDMA_OFFSET 8
-#define RCR_FIFO_OFFSET 13
-
-
-
-/* */
-/* 8192c USB specific Regsiter Offset and Content definition, */
-/* 2009.08.18, added by vivi. for merge 92c and 92C into one driver */
-/* */
-/* define APS_FSMCO 0x0004 same with 92Ce */
-#define RSV_CTRL 0x001C
-#define RD_CTRL 0x0524
-
-/* */
-/* */
-/* 0xFE00h ~ 0xFE55h USB Configuration */
-/* */
-/* */
-#define REG_USB_INFO 0xFE17
-#define REG_USB_SPECIAL_OPTION 0xFE55
-#define REG_USB_DMA_AGG_TO 0xFE5B
-#define REG_USB_AGG_TO 0xFE5C
-#define REG_USB_AGG_TH 0xFE5D
-
-#define REG_USB_VID 0xFE60
-#define REG_USB_PID 0xFE62
-#define REG_USB_OPTIONAL 0xFE64
-#define REG_USB_CHIRP_K 0xFE65
-#define REG_USB_PHY 0xFE66
-#define REG_USB_MAC_ADDR 0xFE70
-
-#define REG_USB_HRPWM 0xFE58
-#define REG_USB_HCPWM 0xFE57
-
-#define InvalidBBRFValue 0x12345678
-
-/* */
-/* 8192C Regsiter Bit and Content definition */
-/* */
-/* */
-/* */
-/* 0x0000h ~ 0x00FFh System Configuration */
-/* */
-/* */
-
-/* 2 SPS0_CTRL */
-#define SW18_FPWM BIT(3)
-
-
-/* 2 SYS_ISO_CTRL */
-#define ISO_MD2PP BIT(0)
-#define ISO_UA2USB BIT(1)
-#define ISO_UD2CORE BIT(2)
-#define ISO_PA2PCIE BIT(3)
-#define ISO_PD2CORE BIT(4)
-#define ISO_IP2MAC BIT(5)
-#define ISO_DIOP BIT(6)
-#define ISO_DIOE BIT(7)
-#define ISO_EB2CORE BIT(8)
-#define ISO_DIOR BIT(9)
-
-#define PWC_EV25V BIT(14)
-#define PWC_EV12V BIT(15)
-
-
-/* 2 SYS_FUNC_EN */
-#define FEN_BBRSTB BIT(0)
-#define FEN_BB_GLB_RSTn BIT(1)
-#define FEN_USBA BIT(2)
-#define FEN_UPLL BIT(3)
-#define FEN_USBD BIT(4)
-#define FEN_DIO_PCIE BIT(5)
-#define FEN_PCIEA BIT(6)
-#define FEN_PPLL BIT(7)
-#define FEN_PCIED BIT(8)
-#define FEN_DIOE BIT(9)
-#define FEN_CPUEN BIT(10)
-#define FEN_DCORE BIT(11)
-#define FEN_ELDR BIT(12)
-#define FEN_DIO_RF BIT(13)
-#define FEN_HWPDN BIT(14)
-#define FEN_MREGEN BIT(15)
-
-/* 2 APS_FSMCO */
-#define PFM_LDALL BIT(0)
-#define PFM_ALDN BIT(1)
-#define PFM_LDKP BIT(2)
-#define PFM_WOWL BIT(3)
-#define EnPDN BIT(4)
-#define PDN_PL BIT(5)
-#define APFM_ONMAC BIT(8)
-#define APFM_OFF BIT(9)
-#define APFM_RSM BIT(10)
-#define AFSM_HSUS BIT(11)
-#define AFSM_PCIE BIT(12)
-#define APDM_MAC BIT(13)
-#define APDM_HOST BIT(14)
-#define APDM_HPDN BIT(15)
-#define RDY_MACON BIT(16)
-#define SUS_HOST BIT(17)
-#define ROP_ALD BIT(20)
-#define ROP_PWR BIT(21)
-#define ROP_SPS BIT(22)
-#define SOP_MRST BIT(25)
-#define SOP_FUSE BIT(26)
-#define SOP_ABG BIT(27)
-#define SOP_AMB BIT(28)
-#define SOP_RCK BIT(29)
-#define SOP_A8M BIT(30)
-#define XOP_BTCK BIT(31)
-
-/* 2 SYS_CLKR */
-#define ANAD16V_EN BIT(0)
-#define ANA8M BIT(1)
-#define MACSLP BIT(4)
-#define LOADER_CLK_EN BIT(5)
-#define _80M_SSC_DIS BIT(7)
-#define _80M_SSC_EN_HO BIT(8)
-#define PHY_SSC_RSTB BIT(9)
-#define SEC_CLK_EN BIT(10)
-#define MAC_CLK_EN BIT(11)
-#define SYS_CLK_EN BIT(12)
-#define RING_CLK_EN BIT(13)
-
-
-/* 2 9346CR */
-
-
-#define EEDO BIT(0)
-#define EEDI BIT(1)
-#define EESK BIT(2)
-#define EECS BIT(3)
-/* define EERPROMSEL BIT(4) */
-/* define EEPROM_EN BIT(5) */
-#define BOOT_FROM_EEPROM BIT(4)
-#define EEPROM_EN BIT(5)
-#define EEM0 BIT(6)
-#define EEM1 BIT(7)
-
-
-/* 2 AFE_MISC */
-#define AFE_BGEN BIT(0)
-#define AFE_MBEN BIT(1)
-#define MAC_ID_EN BIT(7)
-
-
-/* 2 SPS0_CTRL */
-
-
-/* 2 SPS_OCP_CFG */
-
-
-/* 2 RSV_CTRL */
-#define WLOCK_ALL BIT(0)
-#define WLOCK_00 BIT(1)
-#define WLOCK_04 BIT(2)
-#define WLOCK_08 BIT(3)
-#define WLOCK_40 BIT(4)
-#define R_DIS_PRST_0 BIT(5)
-#define R_DIS_PRST_1 BIT(6)
-#define LOCK_ALL_EN BIT(7)
-
-/* 2 RF_CTRL */
-#define RF_EN BIT(0)
-#define RF_RSTB BIT(1)
-#define RF_SDMRSTB BIT(2)
-
-
-
-/* 2 LDOA15_CTRL */
-#define LDA15_EN BIT(0)
-#define LDA15_STBY BIT(1)
-#define LDA15_OBUF BIT(2)
-#define LDA15_REG_VOS BIT(3)
-#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
-
-
-
-/* 2 LDOV12D_CTRL */
-#define LDV12_EN BIT(0)
-#define LDV12_SDBY BIT(1)
-#define LPLDO_HSM BIT(2)
-#define LPLDO_LSM_DIS BIT(3)
-#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
-
-
-/* 2 AFE_XTAL_CTRL */
-#define XTAL_EN BIT(0)
-#define XTAL_BSEL BIT(1)
-#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
-#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
-#define XTAL_GATE_USB BIT(8)
-#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
-#define XTAL_GATE_AFE BIT(11)
-#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
-#define XTAL_RF_GATE BIT(14)
-#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
-#define XTAL_GATE_DIG BIT(17)
-#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
-#define XTAL_BT_GATE BIT(20)
-#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
-#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
-
-
-#define CKDLY_AFE BIT(26)
-#define CKDLY_USB BIT(27)
-#define CKDLY_DIG BIT(28)
-#define CKDLY_BT BIT(29)
-
-
-/* 2 AFE_PLL_CTRL */
-#define APLL_EN BIT(0)
-#define APLL_320_EN BIT(1)
-#define APLL_FREF_SEL BIT(2)
-#define APLL_EDGE_SEL BIT(3)
-#define APLL_WDOGB BIT(4)
-#define APLL_LPFEN BIT(5)
-
-#define APLL_REF_CLK_13MHZ 0x1
-#define APLL_REF_CLK_19_2MHZ 0x2
-#define APLL_REF_CLK_20MHZ 0x3
-#define APLL_REF_CLK_25MHZ 0x4
-#define APLL_REF_CLK_26MHZ 0x5
-#define APLL_REF_CLK_38_4MHZ 0x6
-#define APLL_REF_CLK_40MHZ 0x7
-
-#define APLL_320EN BIT(14)
-#define APLL_80EN BIT(15)
-#define APLL_1MEN BIT(24)
-
-
-/* 2 EFUSE_CTRL */
-#define ALD_EN BIT(18)
-#define EF_PD BIT(19)
-#define EF_FLAG BIT(31)
-
-/* 2 EFUSE_TEST (For RTL8723 partially) */
-#define EF_TRPT BIT(7)
- /* 00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
-#define EF_CELL_SEL (BIT(8)|BIT(9))
-#define LDOE25_EN BIT(31)
-#define EFUSE_SEL(x) (((x) & 0x3) << 8)
-#define EFUSE_SEL_MASK 0x300
-#define EFUSE_WIFI_SEL_0 0x0
-#define EFUSE_BT_SEL_0 0x1
-#define EFUSE_BT_SEL_1 0x2
-#define EFUSE_BT_SEL_2 0x3
-
-#define EFUSE_ACCESS_ON 0x69 /* For RTL8723 only. */
-#define EFUSE_ACCESS_OFF 0x00 /* For RTL8723 only. */
-
-/* 2 PWR_DATA */
-
-/* 2 CAL_TIMER */
-
-/* 2 ACLK_MON */
-#define RSM_EN BIT(0)
-#define Timer_EN BIT(4)
-
-
-/* 2 GPIO_MUXCFG */
-#define TRSW0EN BIT(2)
-#define TRSW1EN BIT(3)
-#define EROM_EN BIT(4)
-#define EnBT BIT(5)
-#define EnUart BIT(8)
-#define Uart_910 BIT(9)
-#define EnPMAC BIT(10)
-#define SIC_SWRST BIT(11)
-#define EnSIC BIT(12)
-#define SIC_23 BIT(13)
-#define EnHDP BIT(14)
-#define SIC_LBK BIT(15)
-
-/* 2 GPIO_PIN_CTRL */
-
-/* GPIO BIT */
-#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
-
-/* 2 GPIO_INTM */
-
-/* 2 LEDCFG */
-#define LED0PL BIT(4)
-#define LED0DIS BIT(7)
-#define LED1DIS BIT(15)
-#define LED1PL BIT(12)
-
-#define SECCAM_CLR BIT(30)
-
-
-/* 2 FSIMR */
-
-/* 2 FSISR */
-
-
-/* 2 8051FWDL */
-/* 2 MCUFWDL */
-#define MCUFWDL_EN BIT(0)
-#define MCUFWDL_RDY BIT(1)
-#define FWDL_ChkSum_rpt BIT(2)
-#define MACINI_RDY BIT(3)
-#define BBINI_RDY BIT(4)
-#define RFINI_RDY BIT(5)
-#define WINTINI_RDY BIT(6)
-#define CPRST BIT(23)
-
-/* 2REG_HPON_FSM */
-#define BOND92CE_1T2R_CFG BIT(22)
-
-
-/* 2 REG_SYS_CFG */
-#define XCLK_VLD BIT(0)
-#define ACLK_VLD BIT(1)
-#define UCLK_VLD BIT(2)
-#define PCLK_VLD BIT(3)
-#define PCIRSTB BIT(4)
-#define V15_VLD BIT(5)
-#define TRP_B15V_EN BIT(7)
-#define SIC_IDLE BIT(8)
-#define BD_MAC2 BIT(9)
-#define BD_MAC1 BIT(10)
-#define IC_MACPHY_MODE BIT(11)
-#define CHIP_VER (BIT(12)|BIT(13)|BIT(14)|BIT(15))
-#define BT_FUNC BIT(16)
-#define VENDOR_ID BIT(19)
-#define PAD_HWPD_IDN BIT(22)
-#define TRP_VAUX_EN BIT(23)
-#define TRP_BT_EN BIT(24)
-#define BD_PKG_SEL BIT(25)
-#define BD_HCI_SEL BIT(26)
-#define TYPE_ID BIT(27)
-
-#define CHIP_VER_RTL_MASK 0xF000 /* Bit 12 ~ 15 */
-#define CHIP_VER_RTL_SHIFT 12
-
-/* 2REG_GPIO_OUTSTS (For RTL8723 only) */
-#define EFS_HCI_SEL (BIT(0)|BIT(1))
-#define PAD_HCI_SEL (BIT(2)|BIT(3))
-#define HCI_SEL (BIT(4)|BIT(5))
-#define PKG_SEL_HCI BIT(6)
-#define FEN_GPS BIT(7)
-#define FEN_BT BIT(8)
-#define FEN_WL BIT(9)
-#define FEN_PCI BIT(10)
-#define FEN_USB BIT(11)
-#define BTRF_HWPDN_N BIT(12)
-#define WLRF_HWPDN_N BIT(13)
-#define PDN_BT_N BIT(14)
-#define PDN_GPS_N BIT(15)
-#define BT_CTL_HWPDN BIT(16)
-#define GPS_CTL_HWPDN BIT(17)
-#define PPHY_SUSB BIT(20)
-#define UPHY_SUSB BIT(21)
-#define PCI_SUSEN BIT(22)
-#define USB_SUSEN BIT(23)
-#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
-
-/* */
-/* */
-/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
-/* */
-/* */
-
-
-/* 2 Function Enable Registers */
-/* 2 CR */
-
-#define REG_LBMODE (REG_CR + 3)
-
-
-#define HCI_TXDMA_EN BIT(0)
-#define HCI_RXDMA_EN BIT(1)
-#define TXDMA_EN BIT(2)
-#define RXDMA_EN BIT(3)
-#define PROTOCOL_EN BIT(4)
-#define SCHEDULE_EN BIT(5)
-#define MACTXEN BIT(6)
-#define MACRXEN BIT(7)
-#define ENSWBCN BIT(8)
-#define ENSEC BIT(9)
-
-#define _LBMODE(x) (((x) & 0xF) << 24)
-#define MASK_LBMODE 0xF000000
-#define LOOPBACK_NORMAL 0x0
-#define LOOPBACK_IMMEDIATELY 0xB
-#define LOOPBACK_MAC_DELAY 0x3
-#define LOOPBACK_PHY 0x1
-#define LOOPBACK_DMA 0x7
-
-
-/* 2 PBP - Page Size Register */
-#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
-#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
-#define _PSRX_MASK 0xF
-#define _PSTX_MASK 0xF0
-#define _PSRX(x) (x)
-#define _PSTX(x) ((x) << 4)
-
-#define PBP_64 0x0
-#define PBP_128 0x1
-#define PBP_256 0x2
-#define PBP_512 0x3
-#define PBP_1024 0x4
-
-
-/* 2 TX/RXDMA */
-#define RXDMA_ARBBW_EN BIT(0)
-#define RXSHFT_EN BIT(1)
-#define RXDMA_AGG_EN BIT(2)
-#define QS_VO_QUEUE BIT(8)
-#define QS_VI_QUEUE BIT(9)
-#define QS_BE_QUEUE BIT(10)
-#define QS_BK_QUEUE BIT(11)
-#define QS_MANAGER_QUEUE BIT(12)
-#define QS_HIGH_QUEUE BIT(13)
-
-#define HQSEL_VOQ BIT(0)
-#define HQSEL_VIQ BIT(1)
-#define HQSEL_BEQ BIT(2)
-#define HQSEL_BKQ BIT(3)
-#define HQSEL_MGTQ BIT(4)
-#define HQSEL_HIQ BIT(5)
-
-/* For normal driver, 0x10C */
-#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
-#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
-#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
-#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
-#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
-#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
-
-#define QUEUE_LOW 1
-#define QUEUE_NORMAL 2
-#define QUEUE_HIGH 3
-
-
-
-/* 2 TRXFF_BNDY */
-
-
-/* 2 LLT_INIT */
-#define _LLT_NO_ACTIVE 0x0
-#define _LLT_WRITE_ACCESS 0x1
-#define _LLT_READ_ACCESS 0x2
-
-#define _LLT_INIT_DATA(x) ((x) & 0xFF)
-#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
-#define _LLT_OP(x) (((x) & 0x3) << 30)
-#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
-
-
-/* 2 BB_ACCESS_CTRL */
-#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
-#define BB_WRITE_EN BIT(30)
-#define BB_READ_EN BIT(31)
-/* define BB_ADDR_MASK 0xFFF */
-/* define _BB_ADDR(x) ((x) & BB_ADDR_MASK) */
-
-/* */
-/* */
-/* 0x0200h ~ 0x027Fh TXDMA Configuration */
-/* */
-/* */
-/* 2 RQPN */
-#define _HPQ(x) ((x) & 0xFF)
-#define _LPQ(x) (((x) & 0xFF) << 8)
-#define _PUBQ(x) (((x) & 0xFF) << 16)
- /* NOTE: in RQPN_NPQ register */
-#define _NPQ(x) ((x) & 0xFF)
-
-
-#define HPQ_PUBLIC_DIS BIT(24)
-#define LPQ_PUBLIC_DIS BIT(25)
-#define LD_RQPN BIT(31)
-
-
-/* 2 TDECTRL */
-#define BCN_VALID BIT(16)
-#define BCN_HEAD(x) (((x) & 0xFF) << 8)
-#define BCN_HEAD_MASK 0xFF00
-
-/* 2 TDECTL */
-#define BLK_DESC_NUM_SHIFT 4
-#define BLK_DESC_NUM_MASK 0xF
-
-
-/* 2 TXDMA_OFFSET_CHK */
-#define DROP_DATA_EN BIT(9)
-
-/* */
-/* */
-/* 0x0400h ~ 0x047Fh Protocol Configuration */
-/* */
-/* */
-/* 2 FWHW_TXQ_CTRL */
-#define EN_AMPDU_RTY_NEW BIT(7)
-
-/* 2 INIRTSMCS_SEL */
-#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
-
-
-/* 2 SPEC SIFS */
-#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
-#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
-
-
-/* 2 RRSR */
-
-#define RATE_REG_BITMAP_ALL 0xFFFFF
-
-#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
-
-#define _RRSR_RSC(x) (((x) & 0x3) << 21)
-#define RRSR_RSC_RESERVED 0x0
-#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
-#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
-#define RRSR_RSC_DUPLICATE_MODE 0x3
-
-
-/* 2 ARFR */
-#define USE_SHORT_G1 BIT(20)
-
-/* 2 AGGLEN_LMT_L */
-#define _AGGLMT_MCS0(x) ((x) & 0xF)
-#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
-#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
-#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
-#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
-#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
-#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
-#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
-
-
-/* 2 RL */
-#define RETRY_LIMIT_SHORT_SHIFT 8
-#define RETRY_LIMIT_LONG_SHIFT 0
-
-
-/* 2 DARFRC */
-#define _DARF_RC1(x) ((x) & 0x1F)
-#define _DARF_RC2(x) (((x) & 0x1F) << 8)
-#define _DARF_RC3(x) (((x) & 0x1F) << 16)
-#define _DARF_RC4(x) (((x) & 0x1F) << 24)
-/* NOTE: shift starting from address (DARFRC + 4) */
-#define _DARF_RC5(x) ((x) & 0x1F)
-#define _DARF_RC6(x) (((x) & 0x1F) << 8)
-#define _DARF_RC7(x) (((x) & 0x1F) << 16)
-#define _DARF_RC8(x) (((x) & 0x1F) << 24)
-
-
-/* 2 RARFRC */
-#define _RARF_RC1(x) ((x) & 0x1F)
-#define _RARF_RC2(x) (((x) & 0x1F) << 8)
-#define _RARF_RC3(x) (((x) & 0x1F) << 16)
-#define _RARF_RC4(x) (((x) & 0x1F) << 24)
-/* NOTE: shift starting from address (RARFRC + 4) */
-#define _RARF_RC5(x) ((x) & 0x1F)
-#define _RARF_RC6(x) (((x) & 0x1F) << 8)
-#define _RARF_RC7(x) (((x) & 0x1F) << 16)
-#define _RARF_RC8(x) (((x) & 0x1F) << 24)
-
-
-/* */
-/* */
-/* 0x0500h ~ 0x05FFh EDCA Configuration */
-/* */
-/* */
-
-
-
-/* 2 EDCA setting */
-#define AC_PARAM_TXOP_LIMIT_OFFSET 16
-#define AC_PARAM_ECW_MAX_OFFSET 12
-#define AC_PARAM_ECW_MIN_OFFSET 8
-#define AC_PARAM_AIFS_OFFSET 0
-
-
-/* 2 EDCA_VO_PARAM */
-#define _AIFS(x) (x)
-#define _ECW_MAX_MIN(x) ((x) << 8)
-#define _TXOP_LIMIT(x) ((x) << 16)
-
-
-#define _BCNIFS(x) ((x) & 0xFF)
-#define _BCNECW(x) (((x) & 0xF))<< 8)
-
-
-#define _LRL(x) ((x) & 0x3F)
-#define _SRL(x) (((x) & 0x3F) << 8)
-
-
-/* 2 SIFS_CCK */
-#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
-#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8);
-
-
-/* 2 SIFS_OFDM */
-#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
-#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8);
-
-
-/* 2 TBTT PROHIBIT */
-#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
-
-
-/* 2 REG_RD_CTRL */
-#define DIS_EDCA_CNT_DWN BIT(11)
-
-
-/* 2 BCN_CTRL */
-#define EN_MBSSID BIT(1)
-#define EN_TXBCN_RPT BIT(2)
-#define EN_BCN_FUNCTION BIT(3)
-#define DIS_TSF_UPDATE BIT(3)
-
-/* The same function but different bit field. */
-#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
-#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
-
-/* 2 ACMHWCTRL */
-#define AcmHw_HwEn BIT(0)
-#define AcmHw_BeqEn BIT(1)
-#define AcmHw_ViqEn BIT(2)
-#define AcmHw_VoqEn BIT(3)
-#define AcmHw_BeqStatus BIT(4)
-#define AcmHw_ViqStatus BIT(5)
-#define AcmHw_VoqStatus BIT(6)
-
-
-
-/* */
-/* */
-/* 0x0600h ~ 0x07FFh WMAC Configuration */
-/* */
-/* */
-
-/* 2 APSD_CTRL */
-#define APSDOFF BIT(6)
-#define APSDOFF_STATUS BIT(7)
-
-
-/* 2 BWOPMODE */
-#define BW_20MHZ BIT(2)
-
-
-#define RATE_BITMAP_ALL 0xFFFFF
-
-/* Only use CCK 1M rate for ACK */
-#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
-
-/* 2 TCR */
-#define TSFRST BIT(0)
-#define DIS_GCLK BIT(1)
-#define PAD_SEL BIT(2)
-#define PWR_ST BIT(6)
-#define PWRBIT_OW_EN BIT(7)
-#define ACRC BIT(8)
-#define CFENDFORM BIT(9)
-#define ICV BIT(10)
-
-
-
-/* 2 RCR */
-#define AAP BIT(0)
-#define APM BIT(1)
-#define AM BIT(2)
-#define AB BIT(3)
-#define ADD3 BIT(4)
-#define APWRMGT BIT(5)
-#define CBSSID BIT(6)
-#define CBSSID_BCN BIT(7)
-#define ACRC32 BIT(8)
-#define AICV BIT(9)
-#define ADF BIT(11)
-#define ACF BIT(12)
-#define AMF BIT(13)
-#define HTC_LOC_CTRL BIT(14)
-#define UC_DATA_EN BIT(16)
-#define BM_DATA_EN BIT(17)
-#define MFBEN BIT(22)
-#define LSIGEN BIT(23)
-#define EnMBID BIT(24)
-#define APP_BASSN BIT(27)
-#define APP_PHYSTS BIT(28)
-#define APP_ICV BIT(29)
-#define APP_MIC BIT(30)
-#define APP_FCS BIT(31)
-
-/* 2 RX_PKT_LIMIT */
-
-/* 2 RX_DLK_TIME */
-
-/* 2 MBIDCAMCFG */
-
-
-
-/* 2 AMPDU_MIN_SPACE */
-#define _MIN_SPACE(x) ((x) & 0x7)
-#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
-
-
-/* 2 RXERR_RPT */
-#define RXERR_TYPE_OFDM_PPDU 0
-#define RXERR_TYPE_OFDMfalse_ALARM 1
-#define RXERR_TYPE_OFDM_MPDU_OK 2
-#define RXERR_TYPE_OFDM_MPDU_FAIL 3
-#define RXERR_TYPE_CCK_PPDU 4
-#define RXERR_TYPE_CCKfalse_ALARM 5
-#define RXERR_TYPE_CCK_MPDU_OK 6
-#define RXERR_TYPE_CCK_MPDU_FAIL 7
-#define RXERR_TYPE_HT_PPDU 8
-#define RXERR_TYPE_HTfalse_ALARM 9
-#define RXERR_TYPE_HT_MPDU_TOTAL 10
-#define RXERR_TYPE_HT_MPDU_OK 11
-#define RXERR_TYPE_HT_MPDU_FAIL 12
-#define RXERR_TYPE_RX_FULL_DROP 15
-
-#define RXERR_COUNTER_MASK 0xFFFFF
-#define RXERR_RPT_RST BIT(27)
-#define _RXERR_RPT_SEL(type) ((type) << 28)
-
-
-/* 2 SECCFG */
-#define SCR_TxUseDK BIT(0) /* Force Tx Use Default Key */
-#define SCR_RxUseDK BIT(1) /* Force Rx Use Default Key */
-#define SCR_TxEncEnable BIT(2) /* Enable Tx Encryption */
-#define SCR_RxDecEnable BIT(3) /* Enable Rx Decryption */
-#define SCR_SKByA2 BIT(4) /* Search kEY BY A2 */
-#define SCR_NoSKMC BIT(5) /* No Key Search Multicast */
-
-
-
-/* */
-/* */
-/* 0xFE00h ~ 0xFE55h USB Configuration */
-/* */
-/* */
-
-/* 2 USB Information (0xFE17) */
-#define USB_IS_HIGH_SPEED 0
-#define USB_IS_FULL_SPEED 1
-#define USB_SPEED_MASK BIT(5)
-
-#define USB_NORMAL_SIE_EP_MASK 0xF
-#define USB_NORMAL_SIE_EP_SHIFT 4
-
-#define USB_TEST_EP_MASK 0x30
-#define USB_TEST_EP_SHIFT 4
-
-/* 2 Special Option */
-#define USB_AGG_EN BIT(3)
-
-
-/* 2REG_C2HEVT_CLEAR */
- /* Set by driver and notify FW that the driver has read the
- C2H command message */
-#define C2H_EVT_HOST_CLOSE 0x00
- /* Set by FW indicating that FW had set the C2H command message
- and it's not yet read by driver. */
-#define C2H_EVT_FW_CLOSE 0xFF
-
-
-/* 2REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
- /* Enable GPIO[9] as WiFi HW PDn source */
-#define WL_HWPDN_EN BIT(0)
- /* WiFi HW PDn polarity control */
-#define WL_HWPDN_SL BIT(1)
- /* WiFi function enable */
-#define WL_FUNC_EN BIT(2)
- /* Enable GPIO[9] as WiFi RF HW PDn source */
-#define WL_HWROF_EN BIT(3)
- /* Enable GPIO[11] as BT HW PDn source */
-#define BT_HWPDN_EN BIT(16)
- /* BT HW PDn polarity control */
-#define BT_HWPDN_SL BIT(17)
- /* BT function enable */
-#define BT_FUNC_EN BIT(18)
- /* Enable GPIO[11] as BT/GPS RF HW PDn source */
-#define BT_HWROF_EN BIT(19)
- /* Enable GPIO[10] as GPS HW PDn source */
-#define GPS_HWPDN_EN BIT(20)
- /* GPS HW PDn polarity control */
-#define GPS_HWPDN_SL BIT(21)
- /* GPS function enable */
-#define GPS_FUNC_EN BIT(22)
-
-/* 3 REG_LIFECTRL_CTRL */
-#define HAL92C_EN_PKT_LIFE_TIME_BK BIT(3)
-#define HAL92C_EN_PKT_LIFE_TIME_BE BIT(2)
-#define HAL92C_EN_PKT_LIFE_TIME_VI BIT(1)
-#define HAL92C_EN_PKT_LIFE_TIME_VO BIT(0)
-
-#define HAL92C_MSDU_LIFE_TIME_UNIT 128 /* in us, said by Tim. */
-
-/* */
-/* General definitions */
-/* */
-
-#define LAST_ENTRY_OF_TX_PKT_BUFFER 255
-
-#define POLLING_LLT_THRESHOLD 20
-#define POLLING_READY_TIMEOUT_COUNT 1000
-
-/* Min Spacing related settings. */
-#define MAX_MSS_DENSITY_2T 0x13
-#define MAX_MSS_DENSITY_1T 0x0A
-
-/* */
-/* 8723A Regsiter offset definition */
-/* */
-#define HAL_8723A_NAV_UPPER_UNIT 128 /* micro-second */
-
-/* */
-/* */
-/* 0x0000h ~ 0x00FFh System Configuration */
-/* */
-/* */
-#define REG_SYSON_REG_LOCK 0x001C
-
-
-/* */
-/* */
-/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
-/* */
-/* */
-#define REG_FTIMR 0x0138
-
-
-/* */
-/* */
-/* 0x0200h ~ 0x027Fh TXDMA Configuration */
-/* */
-/* */
-
-
-/* */
-/* */
-/* 0x0280h ~ 0x02FFh RXDMA Configuration */
-/* */
-/* */
-
-
-/* */
-/* */
-/* 0x0300h ~ 0x03FFh PCIe */
-/* */
-/* */
-
-
-/* */
-/* */
-/* 0x0400h ~ 0x047Fh Protocol Configuration */
-/* */
-/* */
-#define REG_EARLY_MODE_CONTROL 0x4D0
-
-
-/* */
-/* */
-/* 0x0500h ~ 0x05FFh EDCA Configuration */
-/* */
-/* */
-
-/* 2 BCN_CTRL */
-#define DIS_ATIM BIT(0)
-#define DIS_BCNQ_SUB BIT(1)
-#define DIS_TSF_UDT BIT(4)
-
-
-/* */
-/* */
-/* 0x0600h ~ 0x07FFh WMAC Configuration */
-/* */
-/* */
-/* */
-/* Note: */
-/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
- * The default value is always too small, but the WiFi TestPlan test
- * by 25,000 microseconds of NAV through sending CTS in the air. We
- * must update this value greater than 25,000 microseconds to pass the
- * item.
-* The offset of NAV_UPPER in 8192C Spec is incorrect, and the offset
-* should be 0x0652. Commented by SD1 Scott. */
-/* By Bruce, 2011-07-18. */
-/* */
-#define REG_NAV_UPPER 0x0652 /* unit of 128 */
-
-
-/* */
-/* 8723 Regsiter Bit and Content definition */
-/* */
-
-/* */
-/* */
-/* 0x0000h ~ 0x00FFh System Configuration */
-/* */
-/* */
-
-/* 2 SPS0_CTRL */
-
-/* 2 SYS_ISO_CTRL */
-
-/* 2 SYS_FUNC_EN */
-
-/* 2 APS_FSMCO */
-#define EN_WLON BIT(16)
-
-/* 2 SYS_CLKR */
-
-/* 2 9346CR */
-
-/* 2 AFE_MISC */
-
-/* 2 SPS0_CTRL */
-
-/* 2 SPS_OCP_CFG */
-
-/* 2 SYSON_REG_LOCK */
-#define WLOCK_ALL BIT(0)
-#define WLOCK_00 BIT(1)
-#define WLOCK_04 BIT(2)
-#define WLOCK_08 BIT(3)
-#define WLOCK_40 BIT(4)
-#define WLOCK_1C_B6 BIT(5)
-#define R_DIS_PRST_1 BIT(6)
-#define LOCK_ALL_EN BIT(7)
-
-/* 2 RF_CTRL */
-
-/* 2 LDOA15_CTRL */
-
-/* 2 LDOV12D_CTRL */
-
-/* 2 AFE_XTAL_CTRL */
-
-/* 2 AFE_PLL_CTRL */
-
-/* 2 EFUSE_CTRL */
-
-/* 2 EFUSE_TEST (For RTL8723 partially) */
-
-/* 2 PWR_DATA */
-
-/* 2 CAL_TIMER */
-
-/* 2 ACLK_MON */
-
-/* 2 GPIO_MUXCFG */
-
-/* 2 GPIO_PIN_CTRL */
-
-/* 2 GPIO_INTM */
-
-/* 2 LEDCFG */
-
-/* 2 FSIMR */
-
-/* 2 FSISR */
-
-/* 2 HSIMR */
-/* 8723 Host System Interrupt Mask Register (offset 0x58, 32 byte) */
-#define HSIMR_GPIO12_0_INT_EN BIT(0)
-#define HSIMR_SPS_OCP_INT_EN BIT(5)
-#define HSIMR_RON_INT_EN BIT(6)
-#define HSIMR_PDNINT_EN BIT(7)
-#define HSIMR_GPIO9_INT_EN BIT(25)
-
-/* 2 HSISR */
-/* 8723 Host System Interrupt Status Register (offset 0x5C, 32 byte) */
-#define HSISR_GPIO12_0_INT BIT(0)
-#define HSISR_SPS_OCP_INT BIT(5)
-#define HSISR_RON_INT BIT(6)
-#define HSISR_PDNINT BIT(7)
-#define HSISR_GPIO9_INT BIT(25)
-
-/* interrupt mask which needs to clear */
-#define MASK_HSISR_CLEAR (HSISR_GPIO12_0_INT | \
- HSISR_SPS_OCP_INT | \
- HSISR_RON_INT | \
- HSISR_PDNINT | \
- HSISR_GPIO9_INT)
-
-/* 2 MCUFWDL */
-#define RAM_DL_SEL BIT(7) /* 1:RAM, 0:ROM */
-
-/* 2 HPON_FSM */
-
-/* 2 SYS_CFG */
-#define RTL_ID BIT(23) /* TestChip ID,
- 1:Test(RLE); 0:MP(RL) */
-#define SPS_SEL BIT(24) /* 1:LDO regulator mode;
- 0:Switching regulator mode*/
-
-
-/* */
-/* */
-/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
-/* */
-/* */
-
-/* 2 Function Enable Registers */
-
-/* 2 CR */
-#define CALTMR_EN BIT(10)
-
-/* 2 PBP - Page Size Register */
-
-/* 2 TX/RXDMA */
-
-/* 2 TRXFF_BNDY */
-
-/* 2 LLT_INIT */
-
-/* 2 BB_ACCESS_CTRL */
-
-
-/* */
-/* */
-/* 0x0200h ~ 0x027Fh TXDMA Configuration */
-/* */
-/* */
-
-/* 2 RQPN */
-
-/* 2 TDECTRL */
-
-/* 2 TDECTL */
-
-/* 2 TXDMA_OFFSET_CHK */
-
-
-/* */
-/* */
-/* 0x0400h ~ 0x047Fh Protocol Configuration */
-/* */
-/* */
-
-/* 2 FWHW_TXQ_CTRL */
-
-/* 2 INIRTSMCS_SEL */
-
-/* 2 SPEC SIFS */
-
-/* 2 RRSR */
-
-/* 2 ARFR */
-
-/* 2 AGGLEN_LMT_L */
-
-/* 2 RL */
-
-/* 2 DARFRC */
-
-/* 2 RARFRC */
-
-
-/* */
-/* */
-/* 0x0500h ~ 0x05FFh EDCA Configuration */
-/* */
-/* */
-
-/* 2 EDCA setting */
-
-/* 2 EDCA_VO_PARAM */
-
-/* 2 SIFS_CCK */
-
-/* 2 SIFS_OFDM */
-
-/* 2 TBTT PROHIBIT */
-
-/* 2 REG_RD_CTRL */
-
-/* 2 BCN_CTRL */
-
-/* 2 ACMHWCTRL */
-
-
-/* */
-/* */
-/* 0x0600h ~ 0x07FFh WMAC Configuration */
-/* */
-/* */
-
-/* 2 APSD_CTRL */
-
-/* 2 BWOPMODE */
-
-/* 2 TCR */
-
-/* 2 RCR */
-
-/* 2 RX_PKT_LIMIT */
-
-/* 2 RX_DLK_TIME */
-
-/* 2 MBIDCAMCFG */
-
-/* 2 AMPDU_MIN_SPACE */
-
-/* 2 RXERR_RPT */
-
-/* 2 SECCFG */
-
-
-/* */
-/* */
-/* 0xFE00h ~ 0xFE55h RTL8723 SDIO Configuration */
-/* */
-/* */
-
-/* I/O bus domain address mapping */
-#define WLAN_IOREG_BASE 0x10260000
-#define FIRMWARE_FIFO_BASE 0x10270000
-#define TX_HIQ_BASE 0x10310000
-#define TX_MIQ_BASE 0x10320000
-#define TX_LOQ_BASE 0x10330000
-#define RX_RX0FF_BASE 0x10340000
-
-/* SDIO host local register space mapping. */
-#define WLAN_IOREG_MSK 0x7FFF
-#define WLAN_FIFO_MSK 0x1FFF /* Aggregation Length[12:0] */
-#define WLAN_RX0FF_MSK 0x0003
-
-#define WLAN_RX0FF_DEVICE_ID 7 /* 0b[16], 111b[15:13] */
-#define WLAN_IOREG_DEVICE_ID 8 /* 1b[16] */
-
-/* 8723 EFUSE */
-#define HWSET_MAX_SIZE 256
-
-
-/* USB interrupt */
-#define UHIMR_TIMEOUT2 BIT(31)
-#define UHIMR_TIMEOUT1 BIT(30)
-#define UHIMR_PSTIMEOUT BIT(29)
-#define UHIMR_GTINT4 BIT(28)
-#define UHIMR_GTINT3 BIT(27)
-#define UHIMR_TXBCNERR BIT(26)
-#define UHIMR_TXBCNOK BIT(25)
-#define UHIMR_TSF_BIT32_TOGGLE BIT(24)
-#define UHIMR_BCNDMAINT3 BIT(23)
-#define UHIMR_BCNDMAINT2 BIT(22)
-#define UHIMR_BCNDMAINT1 BIT(21)
-#define UHIMR_BCNDMAINT0 BIT(20)
-#define UHIMR_BCNDOK3 BIT(19)
-#define UHIMR_BCNDOK2 BIT(18)
-#define UHIMR_BCNDOK1 BIT(17)
-#define UHIMR_BCNDOK0 BIT(16)
-#define UHIMR_HSISR_IND BIT(15)
-#define UHIMR_BCNDMAINT_E BIT(14)
-/* RSVD BIT(13) */
-#define UHIMR_CTW_END BIT(12)
-/* RSVD BIT(11) */
-#define UHIMR_C2HCMD BIT(10)
-#define UHIMR_CPWM2 BIT(9)
-#define UHIMR_CPWM BIT(8)
-#define UHIMR_HIGHDOK BIT(7) /* High Queue DMA OK
- Interrupt */
-#define UHIMR_MGNTDOK BIT(6) /* Management Queue DMA OK
- Interrupt */
-#define UHIMR_BKDOK BIT(5) /* AC_BK DMA OK Interrupt */
-#define UHIMR_BEDOK BIT(4) /* AC_BE DMA OK Interrupt */
-#define UHIMR_VIDOK BIT(3) /* AC_VI DMA OK Interrupt */
-#define UHIMR_VODOK BIT(2) /* AC_VO DMA Interrupt */
-#define UHIMR_RDU BIT(1) /* Receive Descriptor
- Unavailable */
-#define UHIMR_ROK BIT(0) /* Receive DMA OK Interrupt */
-
-/* USB Host Interrupt Status Extension bit */
-#define UHIMR_BCNDMAINT7 BIT(23)
-#define UHIMR_BCNDMAINT6 BIT(22)
-#define UHIMR_BCNDMAINT5 BIT(21)
-#define UHIMR_BCNDMAINT4 BIT(20)
-#define UHIMR_BCNDOK7 BIT(19)
-#define UHIMR_BCNDOK6 BIT(18)
-#define UHIMR_BCNDOK5 BIT(17)
-#define UHIMR_BCNDOK4 BIT(16)
-/* bit14-15: RSVD */
-#define UHIMR_ATIMEND_E BIT(13)
-#define UHIMR_ATIMEND BIT(12)
-#define UHIMR_TXERR BIT(11)
-#define UHIMR_RXERR BIT(10)
-#define UHIMR_TXFOVW BIT(9)
-#define UHIMR_RXFOVW BIT(8)
-/* bit2-7: RSVD */
-#define UHIMR_OCPINT BIT(1)
-/* bit0: RSVD */
-
-#define REG_USB_HIMR 0xFE38
-#define REG_USB_HIMRE 0xFE3C
-#define REG_USB_HISR 0xFE78
-#define REG_USB_HISRE 0xFE7C
-
-#define USB_INTR_CPWM_OFFSET 16
-#define USB_INTR_CONTENT_HISR_OFFSET 48
-#define USB_INTR_CONTENT_HISRE_OFFSET 52
-#define USB_INTR_CONTENT_LENGTH 56
-#define USB_C2H_CMDID_OFFSET 0
-#define USB_C2H_SEQ_OFFSET 1
-#define USB_C2H_EVENT_OFFSET 2
-/* */
-/* General definitions */
-/* */
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_sreset.h b/drivers/staging/rtl8723au/include/rtl8723a_sreset.h
deleted file mode 100644
index 6197910a4a53..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_sreset.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef _RTL8723A_SRESET_H_
-#define _RTL8723A_SRESET_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <rtw_sreset.h>
-
-void rtl8723a_sreset_xmit_status_check(struct rtw_adapter *padapter);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_xmit.h b/drivers/staging/rtl8723au/include/rtl8723a_xmit.h
deleted file mode 100644
index 7db29f40ab70..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_xmit.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8723A_XMIT_H__
-#define __RTL8723A_XMIT_H__
-
-/* */
-/* Queue Select Value in TxDesc */
-/* */
-#define QSLT_BK 0x2/* 0x01 */
-#define QSLT_BE 0x0
-#define QSLT_VI 0x5/* 0x4 */
-#define QSLT_VO 0x7/* 0x6 */
-#define QSLT_BEACON 0x10
-#define QSLT_HIGH 0x11
-#define QSLT_MGNT 0x12
-#define QSLT_CMD 0x13
-
-/* */
-/* defined for TX DESC Operation */
-/* */
-
-#define MAX_TID (15)
-
-/* OFFSET 0 */
-#define OFFSET_SZ 0
-#define OFFSET_SHT 16
-#define BMC BIT(24)
-#define LSG BIT(26)
-#define FSG BIT(27)
-#define OWN BIT(31)
-
-
-/* OFFSET 4 */
-#define PKT_OFFSET_SZ 0
-#define BK BIT(6)
-#define QSEL_SHT 8
-#define Rate_ID_SHT 16
-#define NAVUSEHDR BIT(20)
-#define PKT_OFFSET_SHT 26
-#define HWPC BIT(31)
-
-/* OFFSET 8 */
-#define AGG_EN BIT(29)
-
-/* OFFSET 12 */
-#define SEQ_SHT 16
-
-/* OFFSET 16 */
-#define QoS BIT(6)
-#define HW_SEQ_EN BIT(7)
-#define USERATE BIT(8)
-#define DISDATAFB BIT(10)
-#define DATA_SHORT BIT(24)
-#define DATA_BW BIT(25)
-
-/* OFFSET 20 */
-#define SGI BIT(6)
-
-struct txdesc_8723a {
- u32 pktlen:16;
- u32 offset:8;
- u32 bmc:1;
- u32 htc:1;
- u32 ls:1;
- u32 fs:1;
- u32 linip:1;
- u32 noacm:1;
- u32 gf:1;
- u32 own:1;
-
- u32 macid:5;
- u32 agg_en:1;
- u32 bk:1;
- u32 rd_en:1;
- u32 qsel:5;
- u32 rd_nav_ext:1;
- u32 lsig_txop_en:1;
- u32 pifs:1;
- u32 rate_id:4;
- u32 navusehdr:1;
- u32 en_desc_id:1;
- u32 sectype:2;
- u32 rsvd0424:2;
- u32 pkt_offset:5; /* unit: 8 bytes */
- u32 rsvd0431:1;
-
- u32 rts_rc:6;
- u32 data_rc:6;
- u32 rsvd0812:2;
- u32 bar_rty_th:2;
- u32 rsvd0816:1;
- u32 morefrag:1;
- u32 raw:1;
- u32 ccx:1;
- u32 ampdu_density:3;
- u32 bt_null:1;
- u32 ant_sel_a:1;
- u32 ant_sel_b:1;
- u32 tx_ant_cck:2;
- u32 tx_antl:2;
- u32 tx_ant_ht:2;
-
- u32 nextheadpage:8;
- u32 tailpage:8;
- u32 seq:12;
- u32 cpu_handle:1;
- u32 tag1:1;
- u32 trigger_int:1;
- u32 hwseq_en:1;
-
- u32 rtsrate:5;
- u32 ap_dcfe:1;
- u32 hwseq_sel:2;
- u32 userate:1;
- u32 disrtsfb:1;
- u32 disdatafb:1;
- u32 cts2self:1;
- u32 rtsen:1;
- u32 hw_rts_en:1;
- u32 port_id:1;
- u32 rsvd1615:3;
- u32 wait_dcts:1;
- u32 cts2ap_en:1;
- u32 data_sc:2;
- u32 data_stbc:2;
- u32 data_short:1;
- u32 data_bw:1;
- u32 rts_short:1;
- u32 rts_bw:1;
- u32 rts_sc:2;
- u32 vcs_stbc:2;
-
- u32 datarate:6;
- u32 sgi:1;
- u32 try_rate:1;
- u32 data_ratefb_lmt:5;
- u32 rts_ratefb_lmt:4;
- u32 rty_lmt_en:1;
- u32 data_rt_lmt:6;
- u32 usb_txagg_num:8;
-
- u32 txagg_a:5;
- u32 txagg_b:5;
- u32 use_max_len:1;
- u32 max_agg_num:5;
- u32 mcsg1_max_len:4;
- u32 mcsg2_max_len:4;
- u32 mcsg3_max_len:4;
- u32 mcs7_sgi_max_len:4;
-
- u32 checksum:16; /* TxBuffSize(PCIe)/CheckSum(USB) */
- u32 mcsg4_max_len:4;
- u32 mcsg5_max_len:4;
- u32 mcsg6_max_len:4;
- u32 mcs15_sgi_max_len:4;
-};
-
-#define txdesc_set_ccx_sw_8723a(txdesc, value) \
- do { \
- ((struct txdesc_8723a *)(txdesc))->mcsg4_max_len = (((value)>>8) & 0x0f); \
- ((struct txdesc_8723a *)(txdesc))->mcs15_sgi_max_len= (((value)>>4) & 0x0f); \
- ((struct txdesc_8723a *)(txdesc))->mcsg6_max_len = ((value) & 0x0f); \
- } while (0)
-
-struct txrpt_ccx_8723a {
- /* offset 0 */
- u8 tag1:1;
- u8 rsvd:4;
- u8 int_bt:1;
- u8 int_tri:1;
- u8 int_ccx:1;
-
- /* offset 1 */
- u8 mac_id:5;
- u8 pkt_drop:1;
- u8 pkt_ok:1;
- u8 bmc:1;
-
- /* offset 2 */
- u8 retry_cnt:6;
- u8 lifetime_over:1;
- u8 retry_over:1;
-
- /* offset 3 */
- u8 ccx_qtime0;
- u8 ccx_qtime1;
-
- /* offset 5 */
- u8 final_data_rate;
-
- /* offset 6 */
- u8 sw1:4;
- u8 qsel:4;
-
- /* offset 7 */
- u8 sw0;
-};
-
-#define txrpt_ccx_sw_8723a(txrpt_ccx) ((txrpt_ccx)->sw0 + ((txrpt_ccx)->sw1<<8))
-#define txrpt_ccx_qtime_8723a(txrpt_ccx) ((txrpt_ccx)->ccx_qtime0+((txrpt_ccx)->ccx_qtime1<<8))
-
-void handle_txrpt_ccx_8723a(struct rtw_adapter *adapter, void *buf);
-void rtl8723a_fill_fake_txdesc(struct rtw_adapter *padapter, u8 *pDesc, u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull);
-
-int rtl8723au_hal_xmitframe_enqueue(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe);
-s32 rtl8723au_xmit_buf_handler(struct rtw_adapter *padapter);
-#define hal_xmit_handler rtl8723au_xmit_buf_handler
-bool rtl8723au_hal_xmit(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe);
-int rtl8723au_mgnt_xmit(struct rtw_adapter *padapter, struct xmit_frame *pmgntframe);
-bool rtl8723au_xmitframe_complete(struct rtw_adapter *padapter, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
-
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_ap.h b/drivers/staging/rtl8723au/include/rtw_ap.h
deleted file mode 100644
index 55a708f9fc5b..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_ap.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTW_AP_H_
-#define __RTW_AP_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-
-#ifdef CONFIG_8723AU_AP_MODE
-
-/* external function */
-
-void init_mlme_ap_info23a(struct rtw_adapter *padapter);
-void free_mlme_ap_info23a(struct rtw_adapter *padapter);
-/* void update_BCNTIM(struct rtw_adapter *padapter); */
-void rtw_add_bcn_ie(struct rtw_adapter *padapter, struct wlan_bssid_ex *pnetwork, u8 index, u8 *data, u8 len);
-void rtw_remove_bcn_ie(struct rtw_adapter *padapter, struct wlan_bssid_ex *pnetwork, u8 index);
-void update_beacon23a(struct rtw_adapter *padapter, u8 ie_id, u8 *oui, u8 tx);
-void add_RATid23a(struct rtw_adapter *padapter, struct sta_info *psta, u8 rssi_level);
-void expire_timeout_chk23a(struct rtw_adapter *padapter);
-void update_sta_info23a_apmode23a(struct rtw_adapter *padapter, struct sta_info *psta);
-int rtw_check_beacon_data23a(struct rtw_adapter *padapter,
- struct ieee80211_mgmt *mgmt, unsigned int len);
-void rtw_ap_restore_network(struct rtw_adapter *padapter);
-void rtw_set_macaddr_acl23a(struct rtw_adapter *padapter, int mode);
-
-void associated_clients_update23a(struct rtw_adapter *padapter, u8 updated);
-void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info *psta);
-u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info *psta);
-void sta_info_update23a(struct rtw_adapter *padapter, struct sta_info *psta);
-void ap_sta_info_defer_update23a(struct rtw_adapter *padapter, struct sta_info *psta);
-u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool active, u16 reason);
-int rtw_sta_flush23a(struct rtw_adapter *padapter);
-void start_ap_mode23a(struct rtw_adapter *padapter);
-void stop_ap_mode23a(struct rtw_adapter *padapter);
-#endif /* end of CONFIG_8723AU_AP_MODE */
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_cmd.h b/drivers/staging/rtl8723au/include/rtw_cmd.h
deleted file mode 100644
index d1fa95d28904..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_cmd.h
+++ /dev/null
@@ -1,815 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTW_CMD_H_
-#define __RTW_CMD_H_
-
-#include <wlan_bssdef.h>
-#include <rtw_rf.h>
-
-#define C2H_MEM_SZ (16*1024)
-
-#include <osdep_service.h>
-#include <ieee80211.h> /* <ieee80211/ieee80211.h> */
-
-
-#define MAX_CMDSZ 1024
-#define MAX_RSPSZ 512
-#define MAX_EVTSZ 1024
-
-#define CMDBUFF_ALIGN_SZ 512
-
-struct cmd_obj {
- struct work_struct work;
- struct rtw_adapter *padapter;
- u16 cmdcode;
- int res;
- u32 cmdsz;
- u8 *parmbuf;
- u8 *rsp;
- u32 rspsz;
-};
-
-struct cmd_priv {
- struct workqueue_struct *wq;
- u32 cmd_issued_cnt;
- u32 cmd_done_cnt;
- u32 rsp_cnt;
- struct rtw_adapter *padapter;
-};
-
-#define C2H_QUEUE_MAX_LEN 10
-
-struct evt_priv {
- struct workqueue_struct *wq;
- struct work_struct irq_wk;
-};
-
-#define init_h2fwcmd_w_parm_no_rsp(pcmd, pparm, code) \
-do {\
- pcmd->cmdcode = code;\
- pcmd->parmbuf = (u8 *)(pparm);\
- pcmd->cmdsz = sizeof (*pparm);\
- pcmd->rsp = NULL;\
- pcmd->rspsz = 0;\
-} while(0)
-
-struct c2h_evt_hdr {
- u8 id:4;
- u8 plen:4;
- u8 seq;
- u8 payload[0];
-};
-
-/*
- * Do not reorder - this allows for struct evt_work to be passed on to
- * rtw_c2h_wk_cmd23a() as a 'struct c2h_evt_hdr *' without making an
- * additional copy.
- */
-struct evt_work {
- union {
- struct c2h_evt_hdr c2h_evt;
- u8 buf[16];
- } u;
- struct work_struct work;
- struct rtw_adapter *adapter;
-};
-
-#define c2h_evt_exist(c2h_evt) ((c2h_evt)->id || (c2h_evt)->plen)
-
-void rtw_evt_work(struct work_struct *work);
-
-int rtw_enqueue_cmd23a(struct cmd_priv *pcmdpriv, struct cmd_obj *obj);
-void rtw_free_cmd_obj23a(struct cmd_obj *pcmd);
-
-int rtw_cmd_thread23a(void *context);
-
-int rtw_init_cmd_priv23a(struct cmd_priv *pcmdpriv);
-
-u32 rtw_init_evt_priv23a (struct evt_priv *pevtpriv);
-void rtw_free_evt_priv23a (struct evt_priv *pevtpriv);
-void rtw_evt_notify_isr(struct evt_priv *pevtpriv);
-
-enum rtw_drvextra_cmd_id
-{
- NONE_WK_CID,
- DYNAMIC_CHK_WK_CID,
- DM_CTRL_WK_CID,
- PBC_POLLING_WK_CID,
- POWER_SAVING_CTRL_WK_CID,/* IPS,AUTOSuspend */
- LPS_CTRL_WK_CID,
- ANT_SELECT_WK_CID,
- P2P_PS_WK_CID,
- P2P_PROTO_WK_CID,
- CHECK_HIQ_WK_CID,/* for softap mode, check hi queue if empty */
- C2H_WK_CID,
- RTP_TIMER_CFG_WK_CID,
- MAX_WK_CID
-};
-
-enum LPS_CTRL_TYPE
-{
- LPS_CTRL_SCAN=0,
- LPS_CTRL_JOINBSS=1,
- LPS_CTRL_CONNECT=2,
- LPS_CTRL_DISCONNECT=3,
- LPS_CTRL_SPECIAL_PACKET=4,
- LPS_CTRL_LEAVE=5,
-};
-
-enum RFINTFS {
- SWSI,
- HWSI,
- HWPI,
-};
-
-/*
-Caller Mode: Infra, Ad-HoC(C)
-
-Notes: To enter USB suspend mode
-
-Command Mode
-
-*/
-struct usb_suspend_parm {
- u32 action;/* 1: sleep, 0:resume */
-};
-
-/*
-Caller Mode: Infra, Ad-HoC
-
-Notes: To join a known BSS.
-
-Command-Event Mode
-
-*/
-
-/*
-Caller Mode: Infra, Ad-HoC(C)
-
-Notes: To disconnect the current associated BSS
-
-Command Mode
-
-*/
-struct disconnect_parm {
- u32 deauth_timeout_ms;
-};
-
-struct setopmode_parm {
- enum nl80211_iftype mode;
-};
-
-/*
-Caller Mode: AP, Ad-HoC, Infra
-
-Notes: To ask RTL8711 performing site-survey
-
-Command-Event Mode
-
-*/
-
-#define RTW_SSID_SCAN_AMOUNT 9 /* for WEXT_CSCAN_AMOUNT 9 */
-#define RTW_CHANNEL_SCAN_AMOUNT (14+37)
-struct sitesurvey_parm {
- int scan_mode; /* active: 1, passive: 0 */
- u8 ssid_num;
- u8 ch_num;
- struct cfg80211_ssid ssid[RTW_SSID_SCAN_AMOUNT];
- struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
-};
-
-/*
-Caller Mode: Any
-
-Notes: To set the auth type of RTL8711. open/shared/802.1x
-
-Command Mode
-
-*/
-struct setauth_parm {
- u8 mode; /* 0: legacy open, 1: legacy shared 2: 802.1x */
- u8 _1x; /* 0: PSK, 1: TLS */
- u8 rsvd[2];
-};
-
-/*
-Caller Mode: Infra
-
-a. algorithm: wep40, wep104, tkip & aes
-b. keytype: grp key/unicast key
-c. key contents
-
-when shared key ==> keyid is the camid
-when 802.1x ==> keyid [0:1] ==> grp key
-when 802.1x ==> keyid > 2 ==> unicast key
-
-*/
-struct setkey_parm {
- u32 algorithm; /* encryption algorithm, could be none, wep40, TKIP, CCMP, wep104 */
- u8 keyid;
- u8 grpkey; /* 1: this is the grpkey for 802.1x. 0: this is the unicast key for 802.1x */
- u8 set_tx; /* 1: main tx key for wep. 0: other key. */
- u8 key[16]; /* this could be 40 or 104 */
-};
-
-/*
-When in AP or Ad-Hoc mode, this is used to
-allocate an sw/hw entry for a newly associated sta.
-
-Command
-
-when shared key ==> algorithm/keyid
-
-*/
-struct set_stakey_parm {
- u8 addr[ETH_ALEN];
- u8 id;/* currently for erasing cam entry if algorithm == _NO_PRIVACY_ */
- u32 algorithm;
- u8 key[16];
-};
-
-struct set_stakey_rsp {
- u8 addr[ETH_ALEN];
- u8 keyid;
- u8 rsvd;
-};
-
-/*
-Caller Ad-Hoc/AP
-
-Command -Rsp(AID == CAMID) mode
-
-This is to force fw to add an sta_data entry per driver's request.
-
-FW will write an cam entry associated with it.
-
-*/
-struct set_assocsta_parm {
- u8 addr[ETH_ALEN];
-};
-
-struct set_assocsta_rsp {
- u8 cam_id;
- u8 rsvd[3];
-};
-
-/*
- Caller Ad-Hoc/AP
-
- Command mode
-
- This is to force fw to del an sta_data entry per driver's request
-
- FW will invalidate the cam entry associated with it.
-
-*/
-struct del_assocsta_parm {
- u8 addr[ETH_ALEN];
-};
-
-/*
-Caller Mode: AP/Ad-HoC(M)
-
-Notes: To notify fw that given staid has changed its power state
-
-Command Mode
-
-*/
-struct setstapwrstate_parm {
- u8 staid;
- u8 status;
- u8 hwaddr[6];
-};
-
-/*
-Caller Mode: Any
-
-Notes: To setup the basic rate of RTL8711
-
-Command Mode
-
-*/
-struct setbasicrate_parm {
- u8 basicrates[NumRates];
-};
-
-/*
-Caller Mode: Any
-
-Notes: To read the current basic rate
-
-Command-Rsp Mode
-
-*/
-struct getbasicrate_parm {
- u32 rsvd;
-};
-
-struct getbasicrate_rsp {
- u8 basicrates[NumRates];
-};
-
-/*
-Caller Mode: Any
-
-Notes: To setup the data rate of RTL8711
-
-Command Mode
-
-*/
-struct setdatarate_parm {
- u8 mac_id;
- u8 datarates[NumRates];
-};
-
-/*
-Caller Mode: Any
-
-Notes: To read the current data rate
-
-Command-Rsp Mode
-
-*/
-struct getdatarate_parm {
- u32 rsvd;
-};
-
-struct getdatarate_rsp {
- u8 datarates[NumRates];
-};
-
-
-/*
-Caller Mode: Any
-AP: AP can use the info for the contents of beacon frame
-Infra: STA can use the info when sitesurveying
-Ad-HoC(M): Like AP
-Ad-HoC(C): Like STA
-
-
-Notes: To set the phy capability of the NIC
-
-Command Mode
-
-*/
-
-struct setphyinfo_parm {
- struct regulatory_class class_sets[NUM_REGULATORYS];
- u8 status;
-};
-
-struct getphyinfo_parm {
- u32 rsvd;
-};
-
-struct getphyinfo_rsp {
- struct regulatory_class class_sets[NUM_REGULATORYS];
- u8 status;
-};
-
-/*
-Caller Mode: Any
-
-Notes: To set the channel/modem/band
-This command will be used when channel/modem/band is changed.
-
-Command Mode
-
-*/
-struct setphy_parm {
- u8 rfchannel;
- u8 modem;
-};
-
-/*
-Caller Mode: Any
-
-Notes: To get the current setting of channel/modem/band
-
-Command-Rsp Mode
-
-*/
-struct getphy_parm {
- u32 rsvd;
-};
-
-struct getphy_rsp {
- u8 rfchannel;
- u8 modem;
-};
-
-struct readBB_parm {
- u8 offset;
-};
-
-struct readBB_rsp {
- u8 value;
-};
-
-struct readTSSI_parm {
- u8 offset;
-};
-
-struct readTSSI_rsp {
- u8 value;
-};
-
-struct writeBB_parm {
- u8 offset;
- u8 value;
-};
-
-struct readRF_parm {
- u8 offset;
-};
-
-struct readRF_rsp {
- u32 value;
-};
-
-struct writeRF_parm {
- u32 offset;
- u32 value;
-};
-
-struct getrfintfs_parm {
- u8 rfintfs;
-};
-
-struct Tx_Beacon_param {
- struct wlan_bssid_ex network;
-};
-
-/* CMD param Formart for driver extra cmd handler */
-struct drvextra_cmd_parm {
- int ec_id; /* extra cmd id */
- int type_size; /* Can use this field as the type id or command size */
- unsigned char *pbuf;
-};
-
-/*------------------- Below are used for RF/BB tunning ---------------------*/
-
-struct setantenna_parm {
- u8 tx_antset;
- u8 rx_antset;
- u8 tx_antenna;
- u8 rx_antenna;
-};
-
-struct enrateadaptive_parm {
- u32 en;
-};
-
-struct settxagctbl_parm {
- u32 txagc[MAX_RATES_LENGTH];
-};
-
-struct gettxagctbl_parm {
- u32 rsvd;
-};
-
-struct gettxagctbl_rsp {
- u32 txagc[MAX_RATES_LENGTH];
-};
-
-struct setagcctrl_parm {
- u32 agcctrl; /* 0: pure hw, 1: fw */
-};
-
-struct setssup_parm {
- u32 ss_ForceUp[MAX_RATES_LENGTH];
-};
-
-struct getssup_parm {
- u32 rsvd;
-};
-
-struct getssup_rsp {
- u8 ss_ForceUp[MAX_RATES_LENGTH];
-};
-
-struct setssdlevel_parm {
- u8 ss_DLevel[MAX_RATES_LENGTH];
-};
-
-struct getssdlevel_parm {
- u32 rsvd;
-};
-
-struct getssdlevel_rsp {
- u8 ss_DLevel[MAX_RATES_LENGTH];
-};
-
-struct setssulevel_parm {
- u8 ss_ULevel[MAX_RATES_LENGTH];
-};
-
-struct getssulevel_parm {
- u32 rsvd;
-};
-
-struct getssulevel_rsp {
- u8 ss_ULevel[MAX_RATES_LENGTH];
-};
-
-struct setcountjudge_parm {
- u8 count_judge[MAX_RATES_LENGTH];
-};
-
-struct getcountjudge_parm {
- u32 rsvd;
-};
-
-struct getcountjudge_rsp {
- u8 count_judge[MAX_RATES_LENGTH];
-};
-
-struct setratable_parm {
- u8 ss_ForceUp[NumRates];
- u8 ss_ULevel[NumRates];
- u8 ss_DLevel[NumRates];
- u8 count_judge[NumRates];
-};
-
-struct getratable_parm {
- uint rsvd;
-};
-
-struct getratable_rsp {
- u8 ss_ForceUp[NumRates];
- u8 ss_ULevel[NumRates];
- u8 ss_DLevel[NumRates];
- u8 count_judge[NumRates];
-};
-
-/* to get TX,RX retry count */
-struct gettxretrycnt_parm{
- unsigned int rsvd;
-};
-struct gettxretrycnt_rsp{
- unsigned long tx_retrycnt;
-};
-
-struct getrxretrycnt_parm{
- unsigned int rsvd;
-};
-struct getrxretrycnt_rsp{
- unsigned long rx_retrycnt;
-};
-
-/* to get BCNOK,BCNERR count */
-struct getbcnokcnt_parm{
- unsigned int rsvd;
-};
-struct getbcnokcnt_rsp{
- unsigned long bcnokcnt;
-};
-
-struct getbcnerrcnt_parm{
- unsigned int rsvd;
-};
-struct getbcnerrcnt_rsp{
- unsigned long bcnerrcnt;
-};
-
-/* to get current TX power level */
-struct getcurtxpwrlevel_parm{
- unsigned int rsvd;
-};
-
-struct getcurtxpwrlevel_rsp{
- unsigned short tx_power;
-};
-
-struct setprobereqextraie_parm {
- unsigned char e_id;
- unsigned char ie_len;
- unsigned char ie[0];
-};
-
-struct setassocreqextraie_parm {
- unsigned char e_id;
- unsigned char ie_len;
- unsigned char ie[0];
-};
-
-struct setproberspextraie_parm {
- unsigned char e_id;
- unsigned char ie_len;
- unsigned char ie[0];
-};
-
-struct setassocrspextraie_parm {
- unsigned char e_id;
- unsigned char ie_len;
- unsigned char ie[0];
-};
-
-struct addBaReq_parm {
- unsigned int tid;
- u8 addr[ETH_ALEN];
-};
-
-/*H2C Handler index: 46 */
-struct set_ch_parm {
- u8 ch;
- u8 bw;
- u8 ch_offset;
-};
-
-/*H2C Handler index: 59 */
-struct SetChannelPlan_param {
- u8 channel_plan;
-};
-
-/*H2C Handler index: 60 */
-struct LedBlink_param {
- struct led_8723a *pLed;
-};
-
-/*H2C Handler index: 61 */
-struct SetChannelSwitch_param {
- u8 new_ch_no;
-};
-
-/*H2C Handler index: 62 */
-struct TDLSoption_param {
- u8 addr[ETH_ALEN];
- u8 option;
-};
-
-#define GEN_CMD_CODE(cmd) cmd ## _CMD_
-
-
-/*
-
-Result:
-0x00: success
-0x01: success, and check Response.
-0x02: cmd ignored due to duplicated sequcne number
-0x03: cmd dropped due to invalid cmd code
-0x04: reserved.
-
-*/
-
-#define H2C_RSP_OFFSET 512
-
-#define H2C_SUCCESS 0x00
-#define H2C_SUCCESS_RSP 0x01
-#define H2C_DUPLICATED 0x02
-#define H2C_DROPPED 0x03
-#define H2C_PARAMETERS_ERROR 0x04
-#define H2C_REJECTED 0x05
-#define H2C_CMD_OVERFLOW 0x06
-#define H2C_RESERVED 0x07
-
-int rtw_setassocsta_cmd(struct rtw_adapter *padapter, u8 *mac_addr);
-int rtw_setstandby_cmd(struct rtw_adapter *padapter, uint action);
-int rtw_sitesurvey_cmd23a(struct rtw_adapter *padapter, struct cfg80211_ssid *ssid, int ssid_num, struct rtw_ieee80211_channel *ch, int ch_num);
-int rtw_createbss_cmd23a(struct rtw_adapter *padapter);
-int rtw_createbss_cmd23a_ex(struct rtw_adapter *padapter, unsigned char *pbss, unsigned int sz);
-int rtw_setphy_cmd(struct rtw_adapter *padapter, u8 modem, u8 ch);
-int rtw_setstakey_cmd23a(struct rtw_adapter *padapter, u8 *psta, u8 unicast_key);
-int rtw_clearstakey_cmd23a(struct rtw_adapter *padapter, u8 *psta, u8 entry, u8 enqueue);
-int rtw_joinbss_cmd23a(struct rtw_adapter *padapter, struct wlan_network* pnetwork);
-int rtw_disassoc_cmd23a(struct rtw_adapter *padapter, u32 deauth_timeout_ms, bool enqueue);
-int rtw_setopmode_cmd23a(struct rtw_adapter *padapter, enum nl80211_iftype ifmode);
-int rtw_setdatarate_cmd(struct rtw_adapter *padapter, u8 *rateset);
-int rtw_setbasicrate_cmd(struct rtw_adapter *padapter, u8 *rateset);
-int rtw_setbbreg_cmd(struct rtw_adapter *padapter, u8 offset, u8 val);
-int rtw_setrfreg_cmd(struct rtw_adapter *padapter, u8 offset, u32 val);
-int rtw_getbbreg_cmd(struct rtw_adapter *padapter, u8 offset, u8 *pval);
-int rtw_getrfreg_cmd(struct rtw_adapter *padapter, u8 offset, u8 *pval);
-int rtw_setrfintfs_cmd(struct rtw_adapter *padapter, u8 mode);
-int rtw_setrttbl_cmd(struct rtw_adapter *padapter, struct setratable_parm *prate_table);
-int rtw_getrttbl_cmd(struct rtw_adapter *padapter, struct getratable_rsp *pval);
-
-int rtw_gettssi_cmd(struct rtw_adapter *padapter, u8 offset, u8 *pval);
-int rtw_setfwdig_cmd(struct rtw_adapter*padapter, u8 type);
-int rtw_setfwra_cmd(struct rtw_adapter*padapter, u8 type);
-
-int rtw_addbareq_cmd23a(struct rtw_adapter*padapter, u8 tid, u8 *addr);
-
-int rtw_dynamic_chk_wk_cmd23a(struct rtw_adapter *adapter);
-
-int rtw_lps_ctrl_wk_cmd23a(struct rtw_adapter*padapter, u8 lps_ctrl_type, u8 enqueue);
-
-int rtw_ps_cmd23a(struct rtw_adapter*padapter);
-
-#ifdef CONFIG_8723AU_AP_MODE
-int rtw_chk_hi_queue_cmd23a(struct rtw_adapter*padapter);
-#endif
-
-int rtw_set_chplan_cmd(struct rtw_adapter*padapter, u8 chplan, u8 enqueue);
-int rtw_led_blink_cmd(struct rtw_adapter*padapter, struct led_8723a *pLed);
-int rtw_set_csa_cmd(struct rtw_adapter*padapter, u8 new_ch_no);
-
-int rtw_c2h_wk_cmd23a(struct rtw_adapter *padapter, u8 *c2h_evt);
-
-int rtw_drvextra_cmd_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-
-void rtw_survey_cmd_callback23a(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
-void rtw_disassoc_cmd23a_callback(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
-void rtw_joinbss_cmd23a_callback(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
-void rtw_createbss_cmd23a_callback(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
-void rtw_getbbrfreg_cmdrsp_callback23a(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
-
-void rtw_setstaKey_cmdrsp_callback23a(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
-void rtw_setassocsta_cmdrsp_callback23a(struct rtw_adapter *padapter, struct cmd_obj *pcmd);
-
-struct _cmd_callback {
- u32 cmd_code;
- void (*callback)(struct rtw_adapter *padapter, struct cmd_obj *cmd);
-};
-
-enum rtw_h2c_cmd {
- GEN_CMD_CODE(_Read_MACREG) , /*0*/
- GEN_CMD_CODE(_Write_MACREG) ,
- GEN_CMD_CODE(_Read_BBREG) ,
- GEN_CMD_CODE(_Write_BBREG) ,
- GEN_CMD_CODE(_Read_RFREG) ,
- GEN_CMD_CODE(_Write_RFREG) , /*5*/
- GEN_CMD_CODE(_Read_EEPROM) ,
- GEN_CMD_CODE(_Write_EEPROM) ,
- GEN_CMD_CODE(_Read_EFUSE) ,
- GEN_CMD_CODE(_Write_EFUSE) ,
-
- GEN_CMD_CODE(_Read_CAM) , /*10*/
- GEN_CMD_CODE(_Write_CAM) ,
- GEN_CMD_CODE(_setBCNITV),
- GEN_CMD_CODE(_setMBIDCFG),
- GEN_CMD_CODE(_JoinBss), /*14*/
- GEN_CMD_CODE(_DisConnect) , /*15*/
- GEN_CMD_CODE(_CreateBss) ,
- GEN_CMD_CODE(_SetOpMode) ,
- GEN_CMD_CODE(_SiteSurvey), /*18*/
- GEN_CMD_CODE(_SetAuth) ,
-
- GEN_CMD_CODE(_SetKey) , /*20*/
- GEN_CMD_CODE(_SetStaKey) ,
- GEN_CMD_CODE(_SetAssocSta) ,
- GEN_CMD_CODE(_DelAssocSta) ,
- GEN_CMD_CODE(_SetStaPwrState) ,
- GEN_CMD_CODE(_SetBasicRate) , /*25*/
- GEN_CMD_CODE(_GetBasicRate) ,
- GEN_CMD_CODE(_SetDataRate) ,
- GEN_CMD_CODE(_GetDataRate) ,
- GEN_CMD_CODE(_SetPhyInfo) ,
-
- GEN_CMD_CODE(_GetPhyInfo) , /*30*/
- GEN_CMD_CODE(_SetPhy) ,
- GEN_CMD_CODE(_GetPhy) ,
- GEN_CMD_CODE(_readRssi) ,
- GEN_CMD_CODE(_readGain) ,
- GEN_CMD_CODE(_SetAtim) , /*35*/
- GEN_CMD_CODE(_SetPwrMode) ,
- GEN_CMD_CODE(_JoinbssRpt),
- GEN_CMD_CODE(_SetRaTable) ,
- GEN_CMD_CODE(_GetRaTable) ,
-
- GEN_CMD_CODE(_GetCCXReport), /*40*/
- GEN_CMD_CODE(_GetDTMReport),
- GEN_CMD_CODE(_GetTXRateStatistics),
- GEN_CMD_CODE(_SetUsbSuspend),
- GEN_CMD_CODE(_SetH2cLbk),
- GEN_CMD_CODE(_AddBAReq) , /*45*/
- GEN_CMD_CODE(_SetChannel), /*46*/
- GEN_CMD_CODE(_SetTxPower),
- GEN_CMD_CODE(_SwitchAntenna),
- GEN_CMD_CODE(_SetCrystalCap),
- GEN_CMD_CODE(_SetSingleCarrierTx), /*50*/
-
- GEN_CMD_CODE(_SetSingleToneTx),/*51*/
- GEN_CMD_CODE(_SetCarrierSuppressionTx),
- GEN_CMD_CODE(_SetContinuousTx),
- GEN_CMD_CODE(_SwitchBandwidth), /*54*/
- GEN_CMD_CODE(_TX_Beacon), /*55*/
-
- GEN_CMD_CODE(_Set_MLME_EVT), /*56*/
- GEN_CMD_CODE(_Set_Drv_Extra), /*57*/
- GEN_CMD_CODE(_Set_H2C_MSG), /*58*/
-
- GEN_CMD_CODE(_SetChannelPlan), /*59*/
- GEN_CMD_CODE(_LedBlink), /*60*/
-
- GEN_CMD_CODE(_SetChannelSwitch), /*61*/
- GEN_CMD_CODE(_TDLS), /*62*/
-
- MAX_H2CCMD
-};
-
-extern struct _cmd_callback rtw_cmd_callback[];
-
-#endif /* _CMD_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_debug.h b/drivers/staging/rtl8723au/include/rtw_debug.h
deleted file mode 100644
index 159183e9cab0..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_debug.h
+++ /dev/null
@@ -1,191 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
- ******************************************************************************/
-#ifndef __RTW_DEBUG_H__
-#define __RTW_DEBUG_H__
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#define _drv_always_ 1
-#define _drv_emerg_ 2
-#define _drv_alert_ 3
-#define _drv_err_ 4
-#define _drv_warning_ 5
-#define _drv_notice_ 6
-#define _drv_info_ 7
-#define _drv_debug_ 8
-
-#define _module_rtl871x_xmit_c_ BIT(0)
-#define _module_xmit_osdep_c_ BIT(1)
-#define _module_rtl871x_recv_c_ BIT(2)
-#define _module_recv_osdep_c_ BIT(3)
-#define _module_rtl871x_mlme_c_ BIT(4)
-#define _module_mlme_osdep_c_ BIT(5)
-#define _module_rtl871x_sta_mgt_c_ BIT(6)
-#define _module_rtl871x_cmd_c_ BIT(7)
-#define _module_cmd_osdep_c_ BIT(8)
-#define _module_rtl871x_io_c_ BIT(9)
-#define _module_io_osdep_c_ BIT(10)
-#define _module_os_intfs_c_ BIT(11)
-#define _module_rtl871x_security_c_ BIT(12)
-#define _module_rtl871x_eeprom_c_ BIT(13)
-#define _module_hal_init_c_ BIT(14)
-#define _module_hci_hal_init_c_ BIT(15)
-#define _module_rtl871x_ioctl_c_ BIT(16)
-#define _module_rtl871x_ioctl_set_c_ BIT(17)
-#define _module_rtl871x_ioctl_query_c_ BIT(18)
-#define _module_rtl871x_pwrctrl_c_ BIT(19)
-#define _module_hci_intfs_c_ BIT(20)
-#define _module_hci_ops_c_ BIT(21)
-#define _module_osdep_service_c_ BIT(22)
-#define _module_mp_ BIT(23)
-#define _module_hci_ops_os_c_ BIT(24)
-#define _module_rtl871x_ioctl_os_c BIT(25)
-#define _module_rtl8712_cmd_c_ BIT(26)
-#define _module_rtl8192c_xmit_c_ BIT(28)
-#define _module_hal_xmit_c_ BIT(28) /* duplication intentional */
-#define _module_efuse_ BIT(29)
-#define _module_rtl8712_recv_c_ BIT(30)
-#define _module_rtl8712_led_c_ BIT(31)
-
-#undef _MODULE_DEFINE_
-
-#if defined _RTW_XMIT_C_
- #define _MODULE_DEFINE_ _module_rtl871x_xmit_c_
-#elif defined _XMIT_OSDEP_C_
- #define _MODULE_DEFINE_ _module_xmit_osdep_c_
-#elif defined _RTW_RECV_C_
- #define _MODULE_DEFINE_ _module_rtl871x_recv_c_
-#elif defined _RECV_OSDEP_C_
- #define _MODULE_DEFINE_ _module_recv_osdep_c_
-#elif defined _RTW_MLME_C_
- #define _MODULE_DEFINE_ _module_rtl871x_mlme_c_
-#elif defined _MLME_OSDEP_C_
- #define _MODULE_DEFINE_ _module_mlme_osdep_c_
-#elif defined _RTW_MLME_EXT_C_
- #define _MODULE_DEFINE_ 1
-#elif defined _RTW_STA_MGT_C_
- #define _MODULE_DEFINE_ _module_rtl871x_sta_mgt_c_
-#elif defined _RTW_CMD_C_
- #define _MODULE_DEFINE_ _module_rtl871x_cmd_c_
-#elif defined _CMD_OSDEP_C_
- #define _MODULE_DEFINE_ _module_cmd_osdep_c_
-#elif defined _RTW_IO_C_
- #define _MODULE_DEFINE_ _module_rtl871x_io_c_
-#elif defined _IO_OSDEP_C_
- #define _MODULE_DEFINE_ _module_io_osdep_c_
-#elif defined _OS_INTFS_C_
- #define _MODULE_DEFINE_ _module_os_intfs_c_
-#elif defined _RTW_SECURITY_C_
- #define _MODULE_DEFINE_ _module_rtl871x_security_c_
-#elif defined _RTW_EEPROM_C_
- #define _MODULE_DEFINE_ _module_rtl871x_eeprom_c_
-#elif defined _HAL_INTF_C_
- #define _MODULE_DEFINE_ _module_hal_init_c_
-#elif (defined _HCI_HAL_INIT_C_) || (defined _SDIO_HALINIT_C_)
- #define _MODULE_DEFINE_ _module_hci_hal_init_c_
-#elif defined _RTL871X_IOCTL_C_
- #define _MODULE_DEFINE_ _module_rtl871x_ioctl_c_
-#elif defined _RTL871X_IOCTL_SET_C_
- #define _MODULE_DEFINE_ _module_rtl871x_ioctl_set_c_
-#elif defined _RTL871X_IOCTL_QUERY_C_
- #define _MODULE_DEFINE_ _module_rtl871x_ioctl_query_c_
-#elif defined _RTL871X_PWRCTRL_C_
- #define _MODULE_DEFINE_ _module_rtl871x_pwrctrl_c_
-#elif defined _RTW_PWRCTRL_C_
- #define _MODULE_DEFINE_ 1
-#elif defined _HCI_INTF_C_
- #define _MODULE_DEFINE_ _module_hci_intfs_c_
-#elif defined _HCI_OPS_C_
- #define _MODULE_DEFINE_ _module_hci_ops_c_
-#elif defined _SDIO_OPS_C_
- #define _MODULE_DEFINE_ 1
-#elif defined _OSDEP_HCI_INTF_C_
- #define _MODULE_DEFINE_ _module_hci_intfs_c_
-#elif defined _OSDEP_SERVICE_C_
- #define _MODULE_DEFINE_ _module_osdep_service_c_
-#elif defined _HCI_OPS_OS_C_
- #define _MODULE_DEFINE_ _module_hci_ops_os_c_
-#elif defined _RTL871X_IOCTL_LINUX_C_
- #define _MODULE_DEFINE_ _module_rtl871x_ioctl_os_c
-#elif defined _RTL8712_CMD_C_
- #define _MODULE_DEFINE_ _module_rtl8712_cmd_c_
-#elif defined _RTL8192C_XMIT_C_
- #define _MODULE_DEFINE_ 1
-#elif defined _RTL8723AS_XMIT_C_
- #define _MODULE_DEFINE_ 1
-#elif defined _RTL8712_RECV_C_
- #define _MODULE_DEFINE_ _module_rtl8712_recv_c_
-#elif defined _RTL8192CU_RECV_C_
- #define _MODULE_DEFINE_ _module_rtl8712_recv_c_
-#elif defined _RTL871X_MLME_EXT_C_
- #define _MODULE_DEFINE_ _module_mlme_osdep_c_
-#elif defined _RTW_MP_C_
- #define _MODULE_DEFINE_ _module_mp_
-#elif defined _RTW_MP_IOCTL_C_
- #define _MODULE_DEFINE_ _module_mp_
-#elif defined _RTW_EFUSE_C_
- #define _MODULE_DEFINE_ _module_efuse_
-#endif
-
-#define DRIVER_PREFIX "RTL8723AU: "
-#define DEBUG_LEVEL (_drv_err_)
-#define DBG_8723A_LEVEL(_level, fmt, arg...) \
- do { \
- if (_level <= GlobalDebugLevel23A) \
- pr_info(DRIVER_PREFIX fmt, ##arg);\
- } while (0)
-
-#define DBG_8723A(...) \
- do { \
- if (_drv_err_ <= GlobalDebugLevel23A) \
- pr_info(DRIVER_PREFIX __VA_ARGS__); \
- } while (0)
-
-#define MSG_8723A(...) \
- do { \
- if (_drv_err_ <= GlobalDebugLevel23A) \
- pr_info(DRIVER_PREFIX __VA_ARGS__); \
- } while (0)
-
-extern u32 GlobalDebugLevel23A;
-
-__printf(3, 4)
-void rt_trace(int comp, int level, const char *fmt, ...);
-
-#define RT_TRACE(_Comp, _Level, Fmt, ...) \
-do { \
- if (_Level <= GlobalDebugLevel23A) \
- rt_trace(_Comp, _Level, Fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, \
- _HexDataLen) \
- if (_Level <= GlobalDebugLevel23A) { \
- int __i; \
- u8 *ptr = (u8 *)_HexData; \
- pr_info("%s", DRIVER_PREFIX); \
- pr_info(_TitleString); \
- for (__i = 0; __i < (int)_HexDataLen; __i++) { \
- printk("%02X%s", ptr[__i], \
- (((__i + 1) % 4) == 0) ? " " : " "); \
- if (((__i + 1) % 16) == 0) \
- printk("\n"); \
- } \
- printk("\n"); \
- }
-
-#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8723au/include/rtw_eeprom.h b/drivers/staging/rtl8723au/include/rtw_eeprom.h
deleted file mode 100644
index a86f36e49dd1..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_eeprom.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTW_EEPROM_H__
-#define __RTW_EEPROM_H__
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#define RTL8712_EEPROM_ID 0x8712
-/* define EEPROM_MAX_SIZE 256 */
-
-#define HWSET_MAX_SIZE_512 512
-#define EEPROM_MAX_SIZE HWSET_MAX_SIZE_512
-
-#define CLOCK_RATE 50 /* 100us */
-
-/* EEPROM opcodes */
-#define EEPROM_READ_OPCODE 06
-#define EEPROM_WRITE_OPCODE 05
-#define EEPROM_ERASE_OPCODE 07
-#define EEPROM_EWEN_OPCODE 19 /* Erase/write enable */
-#define EEPROM_EWDS_OPCODE 16 /* Erase/write disable */
-
-/* Country codes */
-#define USA 0x555320
-#define EUROPE 0x1 /* temp, should be provided later */
-#define JAPAN 0x2 /* temp, should be provided later */
-
-#define EEPROM_CID_DEFAULT 0x0
-#define EEPROM_CID_ALPHA 0x1
-#define EEPROM_CID_Senao 0x3
-#define EEPROM_CID_NetCore 0x5
-#define EEPROM_CID_CAMEO 0X8
-#define EEPROM_CID_SITECOM 0x9
-#define EEPROM_CID_COREGA 0xB
-#define EEPROM_CID_EDIMAX_BELKIN 0xC
-#define EEPROM_CID_SERCOMM_BELKIN 0xE
-#define EEPROM_CID_CAMEO1 0xF
-#define EEPROM_CID_WNC_COREGA 0x12
-#define EEPROM_CID_CLEVO 0x13
-#define EEPROM_CID_WHQL 0xFE /* added by chiyoko for dtm, 20090108 */
-
-/* */
-/* Customer ID, note that: */
-/* This variable is initiailzed through EEPROM or registry, */
-/* however, its definition may be different with that in EEPROM for */
-/* EEPROM size consideration. So, we have to perform proper translation between them. */
-/* Besides, CustomerID of registry has precedence of that of EEPROM. */
-/* defined below. 060703, by rcnjko. */
-/* */
-enum rt_customer_id
-{
- RT_CID_DEFAULT = 0,
- RT_CID_8187_ALPHA0 = 1,
- RT_CID_8187_SERCOMM_PS = 2,
- RT_CID_8187_HW_LED = 3,
- RT_CID_8187_NETGEAR = 4,
- RT_CID_WHQL = 5,
- RT_CID_819x_CAMEO = 6,
- RT_CID_819x_RUNTOP = 7,
- RT_CID_819x_Senao = 8,
- RT_CID_TOSHIBA = 9, /* Merge by Jacken, 2008/01/31. */
- RT_CID_819x_Netcore = 10,
- RT_CID_Nettronix = 11,
- RT_CID_DLINK = 12,
- RT_CID_PRONET = 13,
- RT_CID_COREGA = 14,
- RT_CID_CHINA_MOBILE = 15,
- RT_CID_819x_ALPHA = 16,
- RT_CID_819x_Sitecom = 17,
- RT_CID_CCX = 18, /* It's set under CCX logo test and isn't demanded for CCX functions, but for test behavior like retry limit and tx report. By Bruce, 2009-02-17. */
- RT_CID_819x_Lenovo = 19,
- RT_CID_819x_QMI = 20,
- RT_CID_819x_Edimax_Belkin = 21,
- RT_CID_819x_Sercomm_Belkin = 22,
- RT_CID_819x_CAMEO1 = 23,
- RT_CID_819x_MSI = 24,
- RT_CID_819x_Acer = 25,
- RT_CID_819x_AzWave_ASUS = 26,
- RT_CID_819x_AzWave = 27, /* For AzWave in PCIe, The ID is AzWave use and not only Asus */
- RT_CID_819x_HP = 28,
- RT_CID_819x_WNC_COREGA = 29,
- RT_CID_819x_Arcadyan_Belkin = 30,
- RT_CID_819x_SAMSUNG = 31,
- RT_CID_819x_CLEVO = 32,
- RT_CID_819x_DELL = 33,
- RT_CID_819x_PRONETS = 34,
- RT_CID_819x_Edimax_ASUS = 35,
- RT_CID_819x_CAMEO_NETGEAR = 36,
- RT_CID_PLANEX = 37,
- RT_CID_CC_C = 38,
- RT_CID_819x_Xavi = 39,
- RT_CID_819x_FUNAI_TV = 40,
- RT_CID_819x_ALPHA_WD=41,
-};
-
-struct eeprom_priv {
- u8 mac_addr[6]; /* PermanentAddress */
- u8 bautoload_fail_flag;
- u8 bloadfile_fail_flag;
- u8 bloadmac_fail_flag;
- /* u8 bempty; */
- /* u8 sys_config; */
- /* u8 config0; */
- u16 channel_plan;
- /* u8 country_string[3]; */
- /* u8 tx_power_b[15]; */
- /* u8 tx_power_g[15]; */
- /* u8 tx_power_a[201]; */
-
- u8 EepromOrEfuse;
-
- u8 efuse_eeprom_data[HWSET_MAX_SIZE_512]; /* 92C:256bytes, 88E:512bytes, we use union set (512bytes) */
-};
-
-void eeprom_write16(struct rtw_adapter *padapter, u16 reg, u16 data);
-u16 eeprom_read16(struct rtw_adapter *padapter, u16 reg);
-void read_eeprom_content(struct rtw_adapter *padapter);
-void eeprom_read_sz(struct rtw_adapter *padapter, u16 reg, u8 *data, u32 sz);
-
-void read_eeprom_content_by_attrib(struct rtw_adapter *padapter);
-
-#endif /* __RTL871X_EEPROM_H__ */
diff --git a/drivers/staging/rtl8723au/include/rtw_efuse.h b/drivers/staging/rtl8723au/include/rtw_efuse.h
deleted file mode 100644
index c577e260f151..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_efuse.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
- ******************************************************************************/
-#ifndef __RTW_EFUSE_H__
-#define __RTW_EFUSE_H__
-
-#include <osdep_service.h>
-
-#define EFUSE_ERROE_HANDLE 1
-
-#define PG_STATE_HEADER 0x01
-#define PG_STATE_WORD_0 0x02
-#define PG_STATE_WORD_1 0x04
-#define PG_STATE_WORD_2 0x08
-#define PG_STATE_WORD_3 0x10
-#define PG_STATE_DATA 0x20
-
-#define PG_SWBYTE_H 0x01
-#define PG_SWBYTE_L 0x02
-
-#define PGPKT_DATA_SIZE 8
-
-#define EFUSE_WIFI 0
-#define EFUSE_BT 1
-
-enum _EFUSE_DEF_TYPE {
- TYPE_EFUSE_MAX_SECTION = 0,
- TYPE_EFUSE_REAL_CONTENT_LEN = 1,
- TYPE_AVAILABLE_EFUSE_BYTES_BANK = 2,
- TYPE_AVAILABLE_EFUSE_BYTES_TOTAL = 3,
- TYPE_EFUSE_MAP_LEN = 4,
- TYPE_EFUSE_PROTECT_BYTES_BANK = 5,
- TYPE_EFUSE_CONTENT_LEN_BANK = 6,
-};
-
-/* E-Fuse */
-#define EFUSE_MAP_SIZE 256
-
-#define EFUSE_MAX_SIZE 512
-/* end of E-Fuse */
-
-#define EFUSE_MAX_MAP_LEN 256
-#define EFUSE_MAX_HW_SIZE 512
-#define EFUSE_MAX_SECTION_BASE 16
-
-#define EXT_HEADER(header) ((header & 0x1F) == 0x0F)
-#define ALL_WORDS_DISABLED(wde) ((wde & 0x0F) == 0x0F)
-#define GET_HDR_OFFSET_2_0(header) ( (header & 0xE0) >> 5)
-
-#define EFUSE_REPEAT_THRESHOLD_ 3
-
-/* */
-/* The following is for BT Efuse definition */
-/* */
-#define EFUSE_BT_MAX_MAP_LEN 1024
-#define EFUSE_MAX_BANK 4
-#define EFUSE_MAX_BT_BANK (EFUSE_MAX_BANK-1)
-/* */
-/*--------------------------Define Parameters-------------------------------*/
-#define EFUSE_MAX_WORD_UNIT 4
-
-/*------------------------------Define structure----------------------------*/
-struct pg_pkt_struct {
- u8 offset;
- u8 word_en;
- u8 data[8];
- u8 word_cnts;
-};
-
-/*------------------------Export global variable----------------------------*/
-
-u16 efuse_GetMaxSize23a(struct rtw_adapter *padapter);
-int rtw_efuse_access23a(struct rtw_adapter *padapter, u8 bRead, u16 start_addr, u16 cnts, u8 *data);
-int rtw_efuse_map_read23a(struct rtw_adapter *padapter, u16 addr, u16 cnts, u8 *data);
-u8 rtw_efuse_map_write(struct rtw_adapter *padapter, u16 addr, u16 cnts, u8 *data);
-int rtw_BT_efuse_map_read23a(struct rtw_adapter *padapter, u16 addr, u16 cnts, u8 *data);
-u8 rtw_BT_efuse_map_write(struct rtw_adapter *padapter, u16 addr, u16 cnts, u8 *data);
-
-u16 Efuse_GetCurrentSize23a(struct rtw_adapter *pAdapter, u8 efuseType);
-u8 Efuse_CalculateWordCnts23a(u8 word_en);
-void ReadEFuseByte23a(struct rtw_adapter *Adapter, u16 _offset, u8 *pbuf);
-void EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType, u8 type, void *pOut);
-int efuse_OneByteRead23a(struct rtw_adapter *pAdapter, u16 addr, u8 *data);
-int efuse_OneByteWrite23a(struct rtw_adapter *pAdapter, u16 addr, u8 data);
-
-void Efuse_PowerSwitch23a(struct rtw_adapter *pAdapter, u8 bWrite,
- u8 PwrState);
-int Efuse_PgPacketRead23a(struct rtw_adapter *pAdapter, u8 offset, u8 *data);
-int Efuse_PgPacketWrite23a(struct rtw_adapter *pAdapter, u8 offset, u8 word_en, u8 *data);
-void efuse_WordEnableDataRead23a(u8 word_en, u8 *sourdata, u8 *targetdata);
-u8 Efuse_WordEnableDataWrite23a(struct rtw_adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data);
-
-u8 EFUSE_Read1Byte23a(struct rtw_adapter *pAdapter, u16 Address);
-void EFUSE_ShadowMapUpdate23a(struct rtw_adapter *pAdapter, u8 efuseType);
-void EFUSE_ShadowRead23a(struct rtw_adapter *pAdapter, u8 Type, u16 Offset, u32 *Value);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_event.h b/drivers/staging/rtl8723au/include/rtw_event.h
deleted file mode 100644
index 4557aeccc604..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_event.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef _RTW_EVENT_H_
-#define _RTW_EVENT_H_
-
-#include <osdep_service.h>
-#include <wlan_bssdef.h>
-
-/*
-Used to report a bss has been scanned
-*/
-struct survey_event {
- struct wlan_bssid_ex *bss;
-};
-
-/*
-Used to report that the requested site survey has been done.
-bss_cnt indicates the number of bss that has been reported.
-*/
-struct surveydone_event {
- unsigned int bss_cnt;
-};
-
-/*
-Used to report the link result of joinning the given bss
-join_res:
--1: authentication fail
--2: association fail
-> 0: TID
-*/
-struct joinbss_event {
- struct wlan_network network;
-};
-
-/*
-Used to report a given STA has joinned the created BSS.
-It is used in AP/Ad-HoC(M) mode.
-*/
-struct stassoc_event {
- unsigned char macaddr[6];
- unsigned char rsvd[2];
- int cam_id;
-};
-
-struct stadel_event {
- unsigned char macaddr[6];
- unsigned char rsvd[2]; /* for reason */
- int mac_id;
-};
-
-struct addba_event {
- unsigned int tid;
-};
-
-#define GEN_EVT_CODE(event) event ## _EVT_
-
-struct fwevent {
- u32 parmsize;
- void (*event_callback)(struct rtw_adapter *dev, const u8 *pbuf);
-};
-
-#endif /* _WLANEVENT_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_ht.h b/drivers/staging/rtl8723au/include/rtw_ht.h
deleted file mode 100644
index 780eb8944118..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_ht.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef _RTW_HT_H_
-#define _RTW_HT_H_
-
-#include <osdep_service.h>
-#include "linux/ieee80211.h"
-#include "wifi.h"
-
-struct ht_priv {
- bool ht_option;
- bool ampdu_enable;/* for enable Tx A-MPDU */
- /* u8 baddbareq_issued[16]; */
- u32 tx_amsdu_enable;/* for enable Tx A-MSDU */
- u32 tx_amdsu_maxlen; /* 1: 8k, 0:4k ; default:8k, for tx */
- u32 rx_ampdu_maxlen; /* for rx reordering ctrl win_sz, updated when join_callback. */
-
- u8 bwmode;/* */
- u8 ch_offset;/* PRIME_CHNL_OFFSET */
- u8 sgi;/* short GI */
-
- /* for processing Tx A-MPDU */
- u16 agg_enable_bitmap;
- /* u8 ADDBA_retry_count; */
- u16 candidate_tid_bitmap;
-
- struct ieee80211_ht_cap ht_cap;
-};
-
-#endif /* _RTL871X_HT_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_io.h b/drivers/staging/rtl8723au/include/rtw_io.h
deleted file mode 100644
index c8119e2c6545..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_io.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#ifndef _RTW_IO_H_
-#define _RTW_IO_H_
-
-#include <osdep_service.h>
-#include <osdep_intf.h>
-
-#include <asm/byteorder.h>
-#include <linux/semaphore.h>
-#include <linux/list.h>
-/* include <linux/smp_lock.h> */
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-
-#include <linux/usb.h>
-#include <linux/usb/ch9.h>
-
-#define rtw_usb_buffer_alloc(dev, size, dma) usb_alloc_coherent((dev), (size), (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), (dma))
-#define rtw_usb_buffer_free(dev, size, addr, dma) usb_free_coherent((dev), (size), (addr), (dma))
-
-#define NUM_IOREQ 8
-
-#define MAX_PROT_SZ (64-16)
-
-#define _IOREADY 0
-#define _IO_WAIT_COMPLETE 1
-#define _IO_WAIT_RSP 2
-
-/* IO COMMAND TYPE */
-#define _IOSZ_MASK_ (0x7F)
-#define _IO_WRITE_ BIT(7)
-#define _IO_FIXED_ BIT(8)
-#define _IO_BURST_ BIT(9)
-#define _IO_BYTE_ BIT(10)
-#define _IO_HW_ BIT(11)
-#define _IO_WORD_ BIT(12)
-#define _IO_SYNC_ BIT(13)
-#define _IO_CMDMASK_ (0x1F80)
-
-
-/*
- For prompt mode accessing, caller shall free io_req
- Otherwise, io_handler will free io_req
-*/
-
-
-
-/* IO STATUS TYPE */
-#define _IO_ERR_ BIT(2)
-#define _IO_SUCCESS_ BIT(1)
-#define _IO_DONE_ BIT(0)
-
-
-#define IO_RD32 (_IO_SYNC_ | _IO_WORD_)
-#define IO_RD16 (_IO_SYNC_ | _IO_HW_)
-#define IO_RD8 (_IO_SYNC_ | _IO_BYTE_)
-
-#define IO_RD32_ASYNC (_IO_WORD_)
-#define IO_RD16_ASYNC (_IO_HW_)
-#define IO_RD8_ASYNC (_IO_BYTE_)
-
-#define IO_WR32 (_IO_WRITE_ | _IO_SYNC_ | _IO_WORD_)
-#define IO_WR16 (_IO_WRITE_ | _IO_SYNC_ | _IO_HW_)
-#define IO_WR8 (_IO_WRITE_ | _IO_SYNC_ | _IO_BYTE_)
-
-#define IO_WR32_ASYNC (_IO_WRITE_ | _IO_WORD_)
-#define IO_WR16_ASYNC (_IO_WRITE_ | _IO_HW_)
-#define IO_WR8_ASYNC (_IO_WRITE_ | _IO_BYTE_)
-
-/*
-
- Only Sync. burst accessing is provided.
-
-*/
-
-#define IO_WR_BURST(x) (_IO_WRITE_ | _IO_SYNC_ | _IO_BURST_ | ( (x) & _IOSZ_MASK_))
-#define IO_RD_BURST(x) (_IO_SYNC_ | _IO_BURST_ | ( (x) & _IOSZ_MASK_))
-
-
-
-/* below is for the intf_option bit defition... */
-
-#define _INTF_ASYNC_ BIT(0) /* support async io */
-
-struct intf_priv;
-
-struct io_req {
- struct list_head list;
- u32 addr;
- volatile u32 val;
- u32 command;
- u32 status;
- u8 *pbuf;
- struct semaphore sema;
-
- void (*_async_io_callback)(struct rtw_adapter *padater, struct io_req *pio_req, u8 *cnxt);
- u8 *cnxt;
-};
-
-struct reg_protocol_rd {
-
-#ifdef __LITTLE_ENDIAN
-
- /* DW1 */
- u32 NumOfTrans:4;
- u32 Reserved1:4;
- u32 Reserved2:24;
- /* DW2 */
- u32 ByteCount:7;
- u32 WriteEnable:1; /* 0:read, 1:write */
- u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
- u32 BurstMode:1;
- u32 Byte1Access:1;
- u32 Byte2Access:1;
- u32 Byte4Access:1;
- u32 Reserved3:3;
- u32 Reserved4:16;
- /* DW3 */
- u32 BusAddress;
- /* DW4 */
- /* u32 Value; */
-#else
-
-
-/* DW1 */
- u32 Reserved1 :4;
- u32 NumOfTrans :4;
-
- u32 Reserved2 :24;
-
- /* DW2 */
- u32 WriteEnable : 1;
- u32 ByteCount :7;
-
-
- u32 Reserved3 : 3;
- u32 Byte4Access : 1;
-
- u32 Byte2Access : 1;
- u32 Byte1Access : 1;
- u32 BurstMode :1 ;
- u32 FixOrContinuous : 1;
-
- u32 Reserved4 : 16;
-
- /* DW3 */
- u32 BusAddress;
-
- /* DW4 */
- /* u32 Value; */
-
-#endif
-
-};
-
-
-struct reg_protocol_wt {
-
-
-#ifdef __LITTLE_ENDIAN
-
- /* DW1 */
- u32 NumOfTrans:4;
- u32 Reserved1:4;
- u32 Reserved2:24;
- /* DW2 */
- u32 ByteCount:7;
- u32 WriteEnable:1; /* 0:read, 1:write */
- u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
- u32 BurstMode:1;
- u32 Byte1Access:1;
- u32 Byte2Access:1;
- u32 Byte4Access:1;
- u32 Reserved3:3;
- u32 Reserved4:16;
- /* DW3 */
- u32 BusAddress;
- /* DW4 */
- u32 Value;
-
-#else
- /* DW1 */
- u32 Reserved1 :4;
- u32 NumOfTrans :4;
-
- u32 Reserved2 :24;
-
- /* DW2 */
- u32 WriteEnable : 1;
- u32 ByteCount :7;
-
- u32 Reserved3 : 3;
- u32 Byte4Access : 1;
-
- u32 Byte2Access : 1;
- u32 Byte1Access : 1;
- u32 BurstMode :1 ;
- u32 FixOrContinuous : 1;
-
- u32 Reserved4 : 16;
-
- /* DW3 */
- u32 BusAddress;
-
- /* DW4 */
- u32 Value;
-
-#endif
-
-};
-
-#define PlatformEFIOWrite1Byte(_a, _b, _c) \
- rtl8723au_write8(_a, _b, _c)
-#define PlatformEFIOWrite2Byte(_a, _b, _c) \
- rtl8723au_write16(_a, _b, _c)
-#define PlatformEFIOWrite4Byte(_a, _b, _c) \
- rtl8723au_write32(_a, _b, _c)
-
-#define PlatformEFIORead1Byte(_a, _b) rtl8723au_read8(_a, _b)
-#define PlatformEFIORead2Byte(_a, _b) rtl8723au_read16(_a, _b)
-#define PlatformEFIORead4Byte(_a, _b) rtl8723au_read32(_a, _b)
-
-#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme.h b/drivers/staging/rtl8723au/include/rtw_mlme.h
deleted file mode 100644
index dbd3a5f5c523..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_mlme.h
+++ /dev/null
@@ -1,340 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *
- ******************************************************************************/
-#ifndef __RTW_MLME_H_
-#define __RTW_MLME_H_
-
-#include <osdep_service.h>
-#include <mlme_osdep.h>
-#include <drv_types.h>
-#include <wlan_bssdef.h>
-
-#define MAX_BSS_CNT 128
-#define MAX_JOIN_TIMEOUT 6500
-
-/* Increase the scanning timeout because of increasing the SURVEY_TO value. */
-
-#define SCANNING_TIMEOUT 8000
-
-#define SCAN_INTERVAL (30) /* unit:2sec, 30*2 = 60sec */
-
-#define SCANQUEUE_LIFETIME 20 /* unit:sec */
-
-#define WIFI_NULL_STATE 0x00000000
-
-#define WIFI_ASOC_STATE 0x00000001 /* Under Linked state.*/
-#define WIFI_REASOC_STATE 0x00000002
-#define WIFI_SLEEP_STATE 0x00000004
-#define WIFI_STATION_STATE 0x00000008
-
-#define WIFI_AP_STATE 0x00000010
-#define WIFI_ADHOC_STATE 0x00000020
-#define WIFI_ADHOC_MASTER_STATE 0x00000040
-#define WIFI_UNDER_LINKING 0x00000080
-
-#define WIFI_UNDER_WPS 0x00000100
-#define WIFI_STA_ALIVE_CHK_STATE 0x00000400
-/* to indicate the station is under site surveying */
-#define WIFI_SITE_MONITOR 0x00000800
-
-#define WIFI_MP_STATE 0x00010000
-#define WIFI_MP_CTX_BACKGROUND 0x00020000 /* in continuous tx background */
-#define WIFI_MP_CTX_ST 0x00040000 /* in continuous tx with single-tone */
-#define WIFI_MP_CTX_BACKGROUND_PENDING 0x00080000 /* pending in continuous tx background due to out of skb */
-#define WIFI_MP_CTX_CCK_HW 0x00100000 /* in continuous tx */
-#define WIFI_MP_CTX_CCK_CS 0x00200000 /* in continuous tx with carrier suppression */
-#define WIFI_MP_LPBK_STATE 0x00400000
-
-#define _FW_UNDER_LINKING WIFI_UNDER_LINKING
-#define _FW_LINKED WIFI_ASOC_STATE
-#define _FW_UNDER_SURVEY WIFI_SITE_MONITOR
-
-
-enum dot11AuthAlgrthmNum {
- dot11AuthAlgrthm_Open = 0,
- dot11AuthAlgrthm_Shared,
- dot11AuthAlgrthm_8021X,
- dot11AuthAlgrthm_Auto,
- dot11AuthAlgrthm_MaxNum
-};
-
-/* Scan type including active and passive scan. */
-enum rt_scan_type {
- SCAN_PASSIVE,
- SCAN_ACTIVE,
- SCAN_MIX,
-};
-
-enum {
- GHZ24_50 = 0,
- GHZ_50,
- GHZ_24,
-};
-
-/*
-
-there are several "locks" in mlme_priv,
-since mlme_priv is a shared resource between many threads,
-like ISR/Call-Back functions, the OID handlers, and even timer functions.
-
-
-Each _queue has its own locks, already.
-Other items are protected by mlme_priv.lock.
-
-To avoid possible dead lock, any thread trying to modifiying mlme_priv
-SHALL not lock up more than one locks at a time!
-*/
-
-struct rt_link_detect {
- u32 NumTxOkInPeriod;
- u32 NumRxOkInPeriod;
- u32 NumRxUnicastOkInPeriod;
- bool bBusyTraffic;
- bool bTxBusyTraffic;
- bool bRxBusyTraffic;
- bool bHigherBusyTraffic; /* For interrupt migration purpose. */
- bool bHigherBusyRxTraffic; /* We may disable Tx interrupt according as Rx traffic. */
- bool bHigherBusyTxTraffic; /* We may disable Tx interrupt according as Tx traffic. */
-};
-
-struct mlme_priv {
- spinlock_t lock;
- int fw_state;
- u8 bScanInProcess;
- u8 to_join; /* flag */
- u8 to_roaming; /* roaming trying times */
-
- struct rtw_adapter *nic_hdl;
-
- u8 not_indic_disco;
- struct rtw_queue scanned_queue;
-
- struct cfg80211_ssid assoc_ssid;
- u8 assoc_bssid[6];
-
- struct wlan_network cur_network;
-
- /* uint wireless_mode; no used, remove it */
-
- u32 scan_interval;
-
- struct timer_list assoc_timer;
-
- uint assoc_by_bssid;
- uint assoc_by_rssi;
-
- struct timer_list scan_to_timer;
-
- struct timer_list set_scan_deny_timer;
- atomic_t set_scan_deny; /* 0: allowed, 1: deny */
-
- unsigned int qos_option;
-
- /* Number of non-HT AP/stations */
- int num_sta_no_ht;
-
- int num_FortyMHzIntolerant;
-
- struct ht_priv htpriv;
-
- struct rt_link_detect LinkDetectInfo;
- struct timer_list dynamic_chk_timer; /* dynamic/periodic check timer */
-
- u8 key_mask; /* use for ips to set wep key after ips_leave23a */
- u8 acm_mask; /* for wmm acm mask */
- u8 ChannelPlan;
- enum rt_scan_type scan_mode; /* active: 1, passive: 0 */
-
- u8 *wps_probe_req_ie;
- u32 wps_probe_req_ie_len;
- u8 *assoc_req;
- u32 assoc_req_len;
- u32 assoc_rsp_len;
- u8 *assoc_rsp;
-
-#ifdef CONFIG_8723AU_AP_MODE
- /* Number of associated Non-ERP stations (i.e., stations using 802.11b
- * in 802.11g BSS) */
- int num_sta_non_erp;
-
- /* Number of associated stations that do not support Short Slot Time */
- int num_sta_no_short_slot_time;
-
- /* Number of associated stations that do not support Short Preamble */
- int num_sta_no_short_preamble;
-
- int olbc; /* Overlapping Legacy BSS Condition */
-
- /* Number of HT associated stations that do not support greenfield */
- int num_sta_ht_no_gf;
-
- /* Number of associated non-HT stations */
- /* int num_sta_no_ht; */
-
- /* Number of HT associated stations 20 MHz */
- int num_sta_ht_20mhz;
-
- /* Overlapping BSS information */
- int olbc_ht;
-
- u16 ht_op_mode;
-
- spinlock_t bcn_update_lock;
- u8 update_bcn;
-
-#endif /* ifdef CONFIG_8723AU_AP_MODE */
-};
-
-void rtw_joinbss_event_prehandle23a(struct rtw_adapter *adapter, u8 *pbuf);
-void rtw_survey_event_cb23a(struct rtw_adapter *adapter, const u8 *pbuf);
-void rtw_surveydone_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf);
-void rtw23a_joinbss_event_cb(struct rtw_adapter *adapter, const u8 *pbuf);
-void rtw_stassoc_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf);
-void rtw_stadel_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf);
-
-int event_thread(void *context);
-void rtw23a_join_to_handler(unsigned long);
-
-void rtw_free_network_queue23a(struct rtw_adapter *adapter);
-int rtw_init_mlme_priv23a(struct rtw_adapter *adapter);
-
-void rtw_free_mlme_priv23a(struct mlme_priv *pmlmepriv);
-
-int rtw_do_join_adhoc(struct rtw_adapter *adapter);
-int rtw_do_join_network(struct rtw_adapter *adapter,
- struct wlan_network *candidate);
-int rtw_select_and_join_from_scanned_queue23a(struct mlme_priv *pmlmepriv);
-int rtw_set_key23a(struct rtw_adapter *adapter,
- struct security_priv *psecuritypriv, int keyid, u8 set_tx);
-int rtw_set_auth23a(struct rtw_adapter *adapter,
- struct security_priv *psecuritypriv);
-
-static inline u8 *get_bssid(struct mlme_priv *pmlmepriv)
-{ /* if sta_mode:pmlmepriv->cur_network.network.MacAddress => bssid */
- /* if adhoc_mode:pmlmepriv->cur_network.network.MacAddress => ibss mac address */
- return pmlmepriv->cur_network.network.MacAddress;
-}
-
-static inline bool check_fwstate(struct mlme_priv *pmlmepriv, int state)
-{
- if (pmlmepriv->fw_state & state)
- return true;
-
- return false;
-}
-
-static inline int get_fwstate(struct mlme_priv *pmlmepriv)
-{
- return pmlmepriv->fw_state;
-}
-
-/*
- * No Limit on the calling context,
- * therefore set it to be the critical section...
- *
- * ### NOTE:#### (!!!!)
- * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
- */
-static inline void set_fwstate(struct mlme_priv *pmlmepriv, int state)
-{
- pmlmepriv->fw_state |= state;
- /* FOR HW integration */
- if (_FW_UNDER_SURVEY == state)
- pmlmepriv->bScanInProcess = true;
-}
-
-static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
-{
- pmlmepriv->fw_state &= ~state;
- /* FOR HW integration */
- if (_FW_UNDER_SURVEY == state)
- pmlmepriv->bScanInProcess = false;
-}
-
-/*
- * No Limit on the calling context,
- * therefore set it to be the critical section...
- */
-static inline void clr_fwstate(struct mlme_priv *pmlmepriv, int state)
-{
- spin_lock_bh(&pmlmepriv->lock);
- if (check_fwstate(pmlmepriv, state))
- pmlmepriv->fw_state ^= state;
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-static inline void clr_fwstate_ex(struct mlme_priv *pmlmepriv, int state)
-{
- spin_lock_bh(&pmlmepriv->lock);
- _clr_fwstate_(pmlmepriv, state);
- spin_unlock_bh(&pmlmepriv->lock);
-}
-
-void rtw_disconnect_hdl23a_under_linked(struct rtw_adapter *adapter,
- struct sta_info *psta, u8 free_assoc);
-void rtw_generate_random_ibss23a(u8 *pibss);
-struct wlan_network *rtw_find_network23a(struct rtw_queue *scanned_queue, u8 *addr);
-struct wlan_network *rtw_get_oldest_wlan_network23a(struct rtw_queue *scanned_queue);
-
-void rtw_free_assoc_resources23a(struct rtw_adapter *adapter,
- int lock_scanned_queue);
-void rtw_indicate_disconnect23a(struct rtw_adapter *adapter);
-void rtw_indicate_connect23a(struct rtw_adapter *adapter);
-void rtw_scan_abort23a(struct rtw_adapter *adapter);
-
-int rtw_restruct_sec_ie23a(struct rtw_adapter *adapter, u8 *in_ie, u8 *out_ie,
- uint in_len);
-int rtw_restruct_wmm_ie23a(struct rtw_adapter *adapter, u8 *in_ie, u8 *out_ie,
- uint in_len, uint initial_out_len);
-void rtw_init_registrypriv_dev_network23a(struct rtw_adapter *adapter);
-
-void rtw_update_registrypriv_dev_network23a(struct rtw_adapter *adapter);
-
-void rtw_scan_timeout_handler23a(unsigned long data);
-
-void rtw_dynamic_check_timer_handler(unsigned long data);
-bool rtw_is_scan_deny(struct rtw_adapter *adapter);
-void rtw_clear_scan_deny(struct rtw_adapter *adapter);
-void rtw_set_scan_deny_timer_hdl(unsigned long data);
-void rtw_set_scan_deny(struct rtw_adapter *adapter, u32 ms);
-
-void rtw23a_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
-
-void _rtw_free_mlme_priv23a(struct mlme_priv *pmlmepriv);
-
-struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv, gfp_t gfp);
-
-int rtw_if_up23a(struct rtw_adapter *padapter);
-
-int rtw_linked_check(struct rtw_adapter *padapter);
-
-void rtw_joinbss_reset23a(struct rtw_adapter *padapter);
-
-bool rtw_restructure_ht_ie23a(struct rtw_adapter *padapter, u8 *in_ie,
- u8 *out_ie, uint in_len, uint *pout_len);
-void rtw_update_ht_cap23a(struct rtw_adapter *padapter,
- u8 *pie, uint ie_len);
-void rtw_issue_addbareq_cmd23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe);
-
-bool rtw_is_same_ibss23a(struct rtw_adapter *adapter,
- struct wlan_network *pnetwork);
-int is_same_network23a(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst);
-
-void rtw23a_roaming(struct rtw_adapter *adapter,
- struct wlan_network *tgt_network);
-void rtw_set_roaming(struct rtw_adapter *adapter, u8 to_roaming);
-
-#endif /* __RTL871X_MLME_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
deleted file mode 100644
index 0e7d3da91471..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+++ /dev/null
@@ -1,683 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTW_MLME_EXT_H_
-#define __RTW_MLME_EXT_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <wlan_bssdef.h>
-
-
-/* Commented by Albert 20101105 */
-/* Increase the SURVEY_TO value from 100 to 150 ( 100ms to 150ms ) */
-/* The Realtek 8188CE SoftAP will spend around 100ms to send the probe response after receiving the probe request. */
-/* So, this driver tried to extend the dwell time for each scanning channel. */
-/* This will increase the chance to receive the probe response from SoftAP. */
-
-#define SURVEY_TO (100)
-#define REAUTH_TO (300) /* 50) */
-#define REASSOC_TO (300) /* 50) */
-/* define DISCONNECT_TO (3000) */
-#define ADDBA_TO (2000)
-
-#define LINKED_TO (1) /* unit:2 sec, 1x2=2 sec */
-
-#define REAUTH_LIMIT (4)
-#define REASSOC_LIMIT (4)
-#define READDBA_LIMIT (2)
-
-#define ROAMING_LIMIT 8
-
-#define DYNAMIC_FUNC_DISABLE (0x0)
-
-/* ====== enum odm_ability ======== */
-/* BB ODM section BIT 0-15 */
-#define DYNAMIC_BB_DIG BIT(0)
-#define DYNAMIC_BB_RA_MASK BIT(1)
-#define DYNAMIC_BB_DYNAMIC_TXPWR BIT(2)
-#define DYNAMIC_BB_BB_FA_CNT BIT(3)
-
-#define DYNAMIC_BB_RSSI_MONITOR BIT(4)
-#define DYNAMIC_BB_CCK_PD BIT(5)
-#define DYNAMIC_BB_ANT_DIV BIT(6)
-#define DYNAMIC_BB_PWR_SAVE BIT(7)
-#define DYNAMIC_BB_PWR_TRAIN BIT(8)
-#define DYNAMIC_BB_RATE_ADAPTIVE BIT(9)
-#define DYNAMIC_BB_PATH_DIV BIT(10)
-#define DYNAMIC_BB_PSD BIT(11)
-
-/* MAC DM section BIT 16-23 */
-#define DYNAMIC_MAC_struct edca_turboURBO BIT(16)
-#define DYNAMIC_MAC_EARLY_MODE BIT(17)
-
-/* RF ODM section BIT 24-31 */
-#define DYNAMIC_RF_TX_PWR_TRACK BIT(24)
-#define DYNAMIC_RF_RX_GAIN_TRACK BIT(25)
-#define DYNAMIC_RF_CALIBRATION BIT(26)
-
-#define DYNAMIC_ALL_FUNC_ENABLE 0xFFFFFFF
-
-#define _HW_STATE_NOLINK_ 0x00
-#define _HW_STATE_ADHOC_ 0x01
-#define _HW_STATE_STATION_ 0x02
-#define _HW_STATE_AP_ 0x03
-
-
-#define _1M_RATE_ 0
-#define _2M_RATE_ 1
-#define _5M_RATE_ 2
-#define _11M_RATE_ 3
-#define _6M_RATE_ 4
-#define _9M_RATE_ 5
-#define _12M_RATE_ 6
-#define _18M_RATE_ 7
-#define _24M_RATE_ 8
-#define _36M_RATE_ 9
-#define _48M_RATE_ 10
-#define _54M_RATE_ 11
-
-
-extern unsigned char WMM_OUI23A[];
-extern unsigned char WPS_OUI23A[];
-extern unsigned char WFD_OUI23A[];
-extern unsigned char P2P_OUI23A[];
-
-extern unsigned char WMM_INFO_OUI23A[];
-extern unsigned char WMM_PARA_OUI23A[];
-
-
-/* */
-/* Channel Plan Type. */
-/* Note: */
-/* We just add new channel plan when the new channel plan is different from any of the following */
-/* channel plan. */
-/* If you just want to customize the actions(scan period or join actions) about one of the channel plan, */
-/* customize them in struct rt_channel_info in the RT_CHANNEL_LIST. */
-/* */
-enum { /* _RT_CHANNEL_DOMAIN */
- /* old channel plan mapping ===== */
- RT_CHANNEL_DOMAIN_FCC = 0x00,
- RT_CHANNEL_DOMAIN_IC = 0x01,
- RT_CHANNEL_DOMAIN_ETSI = 0x02,
- RT_CHANNEL_DOMAIN_SPAIN = 0x03,
- RT_CHANNEL_DOMAIN_FRANCE = 0x04,
- RT_CHANNEL_DOMAIN_MKK = 0x05,
- RT_CHANNEL_DOMAIN_MKK1 = 0x06,
- RT_CHANNEL_DOMAIN_ISRAEL = 0x07,
- RT_CHANNEL_DOMAIN_TELEC = 0x08,
- RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN = 0x09,
- RT_CHANNEL_DOMAIN_WORLD_WIDE_13 = 0x0A,
- RT_CHANNEL_DOMAIN_TAIWAN = 0x0B,
- RT_CHANNEL_DOMAIN_CHINA = 0x0C,
- RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO = 0x0D,
- RT_CHANNEL_DOMAIN_KOREA = 0x0E,
- RT_CHANNEL_DOMAIN_TURKEY = 0x0F,
- RT_CHANNEL_DOMAIN_JAPAN = 0x10,
- RT_CHANNEL_DOMAIN_FCC_NO_DFS = 0x11,
- RT_CHANNEL_DOMAIN_JAPAN_NO_DFS = 0x12,
- RT_CHANNEL_DOMAIN_WORLD_WIDE_5G = 0x13,
- RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS = 0x14,
-
- /* new channel plan mapping, (2GDOMAIN_5GDOMAIN) ===== */
- RT_CHANNEL_DOMAIN_WORLD_NULL = 0x20,
- RT_CHANNEL_DOMAIN_ETSI1_NULL = 0x21,
- RT_CHANNEL_DOMAIN_FCC1_NULL = 0x22,
- RT_CHANNEL_DOMAIN_MKK1_NULL = 0x23,
- RT_CHANNEL_DOMAIN_ETSI2_NULL = 0x24,
- RT_CHANNEL_DOMAIN_FCC1_FCC1 = 0x25,
- RT_CHANNEL_DOMAIN_WORLD_ETSI1 = 0x26,
- RT_CHANNEL_DOMAIN_MKK1_MKK1 = 0x27,
- RT_CHANNEL_DOMAIN_WORLD_KCC1 = 0x28,
- RT_CHANNEL_DOMAIN_WORLD_FCC2 = 0x29,
- RT_CHANNEL_DOMAIN_WORLD_FCC3 = 0x30,
- RT_CHANNEL_DOMAIN_WORLD_FCC4 = 0x31,
- RT_CHANNEL_DOMAIN_WORLD_FCC5 = 0x32,
- RT_CHANNEL_DOMAIN_WORLD_FCC6 = 0x33,
- RT_CHANNEL_DOMAIN_FCC1_FCC7 = 0x34,
- RT_CHANNEL_DOMAIN_WORLD_ETSI2 = 0x35,
- RT_CHANNEL_DOMAIN_WORLD_ETSI3 = 0x36,
- RT_CHANNEL_DOMAIN_MKK1_MKK2 = 0x37,
- RT_CHANNEL_DOMAIN_MKK1_MKK3 = 0x38,
- RT_CHANNEL_DOMAIN_FCC1_NCC1 = 0x39,
- RT_CHANNEL_DOMAIN_FCC1_NCC2 = 0x40,
- RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G = 0x41,
- /* Add new channel plan above this line=============== */
- RT_CHANNEL_DOMAIN_MAX,
- RT_CHANNEL_DOMAIN_REALTEK_DEFINE = 0x7F,
-};
-
-enum { /* _RT_CHANNEL_DOMAIN_2G */
- RT_CHANNEL_DOMAIN_2G_WORLD = 0x00, /* Worldwird 13 */
- RT_CHANNEL_DOMAIN_2G_ETSI1 = 0x01, /* Europe */
- RT_CHANNEL_DOMAIN_2G_FCC1 = 0x02, /* US */
- RT_CHANNEL_DOMAIN_2G_MKK1 = 0x03, /* Japan */
- RT_CHANNEL_DOMAIN_2G_ETSI2 = 0x04, /* France */
- RT_CHANNEL_DOMAIN_2G_NULL = 0x05,
- /* Add new channel plan above this line=============== */
- RT_CHANNEL_DOMAIN_2G_MAX,
-};
-
-enum { /* _RT_CHANNEL_DOMAIN_5G */
- RT_CHANNEL_DOMAIN_5G_NULL = 0x00,
- RT_CHANNEL_DOMAIN_5G_ETSI1 = 0x01, /* Europe */
- RT_CHANNEL_DOMAIN_5G_ETSI2 = 0x02, /* Australia, New Zealand */
- RT_CHANNEL_DOMAIN_5G_ETSI3 = 0x03, /* Russia */
- RT_CHANNEL_DOMAIN_5G_FCC1 = 0x04, /* US */
- RT_CHANNEL_DOMAIN_5G_FCC2 = 0x05, /* FCC o/w DFS Channels */
- RT_CHANNEL_DOMAIN_5G_FCC3 = 0x06, /* India, Mexico */
- RT_CHANNEL_DOMAIN_5G_FCC4 = 0x07, /* Venezuela */
- RT_CHANNEL_DOMAIN_5G_FCC5 = 0x08, /* China */
- RT_CHANNEL_DOMAIN_5G_FCC6 = 0x09, /* Israel */
- RT_CHANNEL_DOMAIN_5G_FCC7_IC1 = 0x0A, /* US, Canada */
- RT_CHANNEL_DOMAIN_5G_KCC1 = 0x0B, /* Korea */
- RT_CHANNEL_DOMAIN_5G_MKK1 = 0x0C, /* Japan */
- RT_CHANNEL_DOMAIN_5G_MKK2 = 0x0D, /* Japan (W52, W53) */
- RT_CHANNEL_DOMAIN_5G_MKK3 = 0x0E, /* Japan (W56) */
- RT_CHANNEL_DOMAIN_5G_NCC1 = 0x0F, /* Taiwan */
- RT_CHANNEL_DOMAIN_5G_NCC2 = 0x10, /* Taiwan o/w DFS */
- /* Add new channel plan above this line=============== */
- /* Driver Self Defined ===== */
- RT_CHANNEL_DOMAIN_5G_FCC = 0x11,
- RT_CHANNEL_DOMAIN_5G_JAPAN_NO_DFS = 0x12,
- RT_CHANNEL_DOMAIN_5G_FCC4_NO_DFS = 0x13,
- RT_CHANNEL_DOMAIN_5G_MAX,
-};
-
-#define rtw_is_channel_plan_valid(chplan) (chplan<RT_CHANNEL_DOMAIN_MAX || chplan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
-
-struct rt_channel_plan {
- unsigned char Channel[MAX_CHANNEL_NUM];
- unsigned char Len;
-};
-
-struct rt_channel_plan_2g {
- unsigned char Channel[MAX_CHANNEL_NUM_2G];
- unsigned char Len;
-};
-
-struct rt_channel_plan_5g {
- unsigned char Channel[MAX_CHANNEL_NUM_5G];
- unsigned char Len;
-};
-
-struct rt_channel_plan_map {
- unsigned char Index2G;
- unsigned char Index5G;
-};
-
-enum Associated_AP {
- atherosAP = 0,
- broadcomAP = 1,
- ciscoAP = 2,
- marvellAP = 3,
- ralinkAP = 4,
- realtekAP = 5,
- airgocapAP = 6,
- unknownAP = 7,
- maxAP,
-};
-
-enum { /* HT_IOT_PEER_E */
- HT_IOT_PEER_UNKNOWN = 0,
- HT_IOT_PEER_REALTEK = 1,
- HT_IOT_PEER_REALTEK_92SE = 2,
- HT_IOT_PEER_BROADCOM = 3,
- HT_IOT_PEER_RALINK = 4,
- HT_IOT_PEER_ATHEROS = 5,
- HT_IOT_PEER_CISCO = 6,
- HT_IOT_PEER_MERU = 7,
- HT_IOT_PEER_MARVELL = 8,
- HT_IOT_PEER_REALTEK_SOFTAP = 9,/* peer is RealTek SOFT_AP, by Bohn, 2009.12.17 */
- HT_IOT_PEER_SELF_SOFTAP = 10, /* Self is SoftAP */
- HT_IOT_PEER_AIRGO = 11,
- HT_IOT_PEER_INTEL = 12,
- HT_IOT_PEER_RTK_APCLIENT = 13,
- HT_IOT_PEER_REALTEK_81XX = 14,
- HT_IOT_PEER_REALTEK_WOW = 15,
- HT_IOT_PEER_TENDA = 16,
- HT_IOT_PEER_MAX = 17
-};
-
-enum SCAN_STATE {
- SCAN_DISABLE = 0,
- SCAN_START = 1,
- SCAN_TXNULL = 2,
- SCAN_PROCESS = 3,
- SCAN_COMPLETE = 4,
- SCAN_STATE_MAX,
-};
-
-struct mlme_handler {
- char *str;
- int (*func)(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-};
-
-struct action_handler {
- unsigned int num;
- char *str;
- int (*func)(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
-};
-
-struct ss_res {
- int state;
- int bss_cnt;
- int channel_idx;
- int scan_mode;
- u8 ssid_num;
- u8 ch_num;
- struct cfg80211_ssid ssid[RTW_SSID_SCAN_AMOUNT];
- struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
-};
-
-#define WIFI_FW_AUTH_NULL 0x00000100
-#define WIFI_FW_AUTH_STATE 0x00000200
-#define WIFI_FW_AUTH_SUCCESS 0x00000400
-
-#define WIFI_FW_ASSOC_STATE 0x00002000
-#define WIFI_FW_ASSOC_SUCCESS 0x00004000
-
-#define WIFI_FW_LINKING_STATE (WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE | WIFI_FW_AUTH_SUCCESS |WIFI_FW_ASSOC_STATE)
-
-struct FW_Sta_Info {
- struct sta_info *psta;
- u32 status;
- u32 rx_pkt;
- u32 retry;
- unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
-};
-
-/*
- * Usage:
- * When one iface acted as AP mode and the other iface is STA mode and scanning,
- * it should switch back to AP's operating channel periodically.
- * Parameters info:
- * When the driver scanned RTW_SCAN_NUM_OF_CH channels, it would switch back to AP's operating channel for
- * RTW_STAY_AP_CH_MILLISECOND * SURVEY_TO milliseconds.
- * Example:
- * For chip supports 2.4G + 5GHz and AP mode is operating in channel 1,
- * RTW_SCAN_NUM_OF_CH is 8, RTW_STAY_AP_CH_MILLISECOND is 3 and SURVEY_TO is 100.
- * When it's STA mode gets set_scan command,
- * it would
- * 1. Doing the scan on channel 1.2.3.4.5.6.7.8
- * 2. Back to channel 1 for 300 milliseconds
- * 3. Go through doing site survey on channel 9.10.11.36.40.44.48.52
- * 4. Back to channel 1 for 300 milliseconds
- * 5. ... and so on, till survey done.
- */
-
-struct mlme_ext_info {
- u32 state;
- u32 reauth_count;
- u32 reassoc_count;
- u32 link_count;
- u32 auth_seq;
- u32 auth_algo; /* 802.11 auth, could be open, shared, auto */
- u32 authModeToggle;
- u32 enc_algo;/* encrypt algorithm; */
- u32 key_index; /* this is only valid for legendary wep, 0~3 for key id. */
- u32 iv;
- u8 chg_txt[128];
- u16 aid;
- u16 bcn_interval;
- u16 capability;
- u8 assoc_AP_vendor;
- u8 slotTime;
- u8 preamble_mode;
- u8 WMM_enable;
- u8 ERP_enable;
- u8 ERP_IE;
- u8 HT_enable;
- u8 HT_caps_enable;
- u8 HT_info_enable;
- u8 HT_protection;
- u8 turboMode_cts2self;
- u8 turboMode_rtsen;
- u8 SM_PS;
- u8 ADDBA_retry_count;
- u8 dialogToken;
- /* Accept ADDBA Request */
- bool bAcceptAddbaReq;
- u8 bwmode_updated;
- u8 hidden_ssid_mode;
-
- struct ADDBA_request ADDBA_req;
- struct WMM_para_element WMM_param;
- struct ieee80211_ht_cap ht_cap;
- struct ieee80211_ht_operation HT_info;
- struct wlan_bssid_ex network;/* join network or bss_network, if in ap mode, it is the same to cur_network.network */
- struct FW_Sta_Info FW_sta_info[NUM_STA];
-};
-
-/* The channel information about this channel including joining, scanning, and power constraints. */
-struct rt_channel_info {
- u8 ChannelNum; /* The channel number. */
- enum rt_scan_type ScanType; /* Scan type such as passive or active scan. */
-};
-
-int rtw_ch_set_search_ch23a(struct rt_channel_info *ch_set, const u32 ch);
-
-/* P2P_MAX_REG_CLASSES - Maximum number of regulatory classes */
-#define P2P_MAX_REG_CLASSES 10
-
-/* P2P_MAX_REG_CLASS_CHANNELS - Maximum number of channels per regulatory class */
-#define P2P_MAX_REG_CLASS_CHANNELS 20
-
-/* struct p2p_channels - List of supported channels */
-struct p2p_channels {
- /* struct p2p_reg_class - Supported regulatory class */
- struct p2p_reg_class {
- /* reg_class - Regulatory class (IEEE 802.11-2007, Annex J) */
- u8 reg_class;
-
- /* channel - Supported channels */
- u8 channel[P2P_MAX_REG_CLASS_CHANNELS];
-
- /* channels - Number of channel entries in use */
- size_t channels;
- } reg_class[P2P_MAX_REG_CLASSES];
-
- /* reg_classes - Number of reg_class entries in use */
- size_t reg_classes;
-};
-
-struct p2p_oper_class_map {
- enum hw_mode {IEEE80211G, IEEE80211A} mode;
- u8 op_class;
- u8 min_chan;
- u8 max_chan;
- u8 inc;
- enum {
- BW20, BW40PLUS, BW40MINUS
- } bw;
-};
-
-struct mlme_ext_priv {
- struct rtw_adapter *padapter;
- u8 mlmeext_init;
- atomic_t event_seq;
- u16 mgnt_seq;
-
- /* struct fw_priv fwpriv; */
-
- unsigned char cur_channel;
- unsigned char cur_bwmode;
- unsigned char cur_ch_offset;/* PRIME_CHNL_OFFSET */
- unsigned char cur_wireless_mode; /* NETWORK_TYPE */
-
- unsigned char max_chan_nums;
- struct rt_channel_info channel_set[MAX_CHANNEL_NUM];
- struct p2p_channels channel_list;
- unsigned char basicrate[NumRates];
- unsigned char datarate[NumRates];
-
- struct ss_res sitesurvey_res;
- struct mlme_ext_info mlmext_info;/* for sta/adhoc mode, including current scanning/connecting/connected related info. */
- /* for ap mode, network includes ap's cap_info */
- struct timer_list survey_timer;
- struct timer_list link_timer;
- u16 chan_scan_time;
-
- u8 scan_abort;
- u8 tx_rate; /* TXRATE when USERATE is set. */
-
- u32 retry; /* retry for issue probereq */
-
- u64 TSFValue;
-
- unsigned char bstart_bss;
- u8 update_channel_plan_by_ap_done;
- /* recv_decache check for Action_public frame */
- u8 action_public_dialog_token;
- u16 action_public_rxseq;
- u8 active_keep_alive_check;
-};
-
-int init_mlme_ext_priv23a(struct rtw_adapter *padapter);
-int init_hw_mlme_ext23a(struct rtw_adapter *padapter);
-void free_mlme_ext_priv23a (struct mlme_ext_priv *pmlmeext);
-void init_mlme_ext_timer23a(struct rtw_adapter *padapter);
-void init_addba_retry_timer23a(struct sta_info *psta);
-struct xmit_frame *alloc_mgtxmitframe23a(struct xmit_priv *pxmitpriv);
-
-unsigned char networktype_to_raid23a(unsigned char network_type);
-u8 judge_network_type23a(struct rtw_adapter *padapter, unsigned char *rate,
- int ratelen);
-void get_rate_set23a(struct rtw_adapter *padapter, unsigned char *pbssrate,
- int *bssrate_len);
-void UpdateBrateTbl23a(struct rtw_adapter *padapter, u8 *mBratesOS);
-void Update23aTblForSoftAP(u8 *bssrateset, u32 bssratelen);
-
-u8 rtw_get_oper_ch23a(struct rtw_adapter *adapter);
-void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch);
-void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw);
-void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset);
-
-void set_channel_bwmode23a(struct rtw_adapter *padapter, unsigned char channel,
- unsigned char channel_offset, unsigned short bwmode);
-void SelectChannel23a(struct rtw_adapter *padapter, unsigned char channel);
-
-unsigned int decide_wait_for_beacon_timeout23a(unsigned int bcn_interval);
-
-void clear_cam_entry23a(struct rtw_adapter *padapter, u8 entry);
-
-void invalidate_cam_all23a(struct rtw_adapter *padapter);
-
-int allocate_fw_sta_entry23a(struct rtw_adapter *padapter);
-void flush_all_cam_entry23a(struct rtw_adapter *padapter);
-
-bool IsLegal5GChannel(struct rtw_adapter *Adapter, u8 channel);
-
-void update_network23a(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
- struct rtw_adapter *padapter, bool update_ie);
-
-u8 *get_my_bssid23a(struct wlan_bssid_ex *pnetwork);
-
-bool is_client_associated_to_ap23a(struct rtw_adapter *padapter);
-bool is_client_associated_to_ibss23a(struct rtw_adapter *padapter);
-bool is_IBSS_empty23a(struct rtw_adapter *padapter);
-
-unsigned char check_assoc_AP23a(u8 *pframe, uint len);
-
-int WMM_param_handler23a(struct rtw_adapter *padapter, const u8 *p);
-void WMMOnAssocRsp23a(struct rtw_adapter *padapter);
-
-void HT_caps_handler23a(struct rtw_adapter *padapter, const u8 *p);
-void HT_info_handler23a(struct rtw_adapter *padapter, const u8 *p);
-void HTOnAssocRsp23a(struct rtw_adapter *padapter);
-
-void ERP_IE_handler23a(struct rtw_adapter *padapter, const u8 *p);
-void VCS_update23a(struct rtw_adapter *padapter, struct sta_info *psta);
-
-void update_beacon23a_info(struct rtw_adapter *padapter,
- struct ieee80211_mgmt *mgmt, uint len,
- struct sta_info *psta);
-int rtw_check_bcn_info23a(struct rtw_adapter *Adapter,
- struct ieee80211_mgmt *mgmt, u32 packet_len);
-void update_IOT_info23a(struct rtw_adapter *padapter);
-void update_capinfo23a(struct rtw_adapter *Adapter, u16 updateCap);
-void update_wireless_mode23a(struct rtw_adapter *padapter);
-void update_tx_basic_rate23a(struct rtw_adapter *padapter, u8 modulation);
-void update_bmc_sta_support_rate23a(struct rtw_adapter *padapter, u32 mac_id);
-int update_sta_support_rate23a(struct rtw_adapter *padapter, u8 *pvar_ie,
- uint var_ie_len, int cam_idx);
-
-/* for sta/adhoc mode */
-void update_sta_info23a(struct rtw_adapter *padapter, struct sta_info *psta);
-unsigned int update_basic_rate23a(unsigned char *ptn, unsigned int ptn_sz);
-unsigned int update_supported_rate23a(unsigned char *ptn, unsigned int ptn_sz);
-unsigned int update_MSC_rate23a(struct ieee80211_ht_cap *ht_cap);
-void Update_RA_Entry23a(struct rtw_adapter *padapter, struct sta_info *psta);
-void set_sta_rate23a(struct rtw_adapter *padapter, struct sta_info *psta);
-
-int receive_disconnect23a(struct rtw_adapter *padapter,
- unsigned char *MacAddr, unsigned short reason);
-
-unsigned char get_highest_rate_idx23a(u32 mask);
-int support_short_GI23a(struct rtw_adapter *padapter,
- struct ieee80211_ht_cap *ht_cap);
-bool is_ap_in_tkip23a(struct rtw_adapter *padapter);
-bool is_ap_in_wep23a(struct rtw_adapter *padapter);
-bool should_forbid_n_rate23a(struct rtw_adapter *padapter);
-
-void report_join_res23a(struct rtw_adapter *padapter, int res);
-void report_survey_event23a(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame);
-void report_surveydone_event23a(struct rtw_adapter *padapter);
-void report_del_sta_event23a(struct rtw_adapter *padapter,
- unsigned char *MacAddr, unsigned short reason);
-void report_add_sta_event23a(struct rtw_adapter *padapter,
- unsigned char *MacAddr, int cam_idx);
-
-int set_tx_beacon_cmd23a(struct rtw_adapter*padapter);
-unsigned int setup_beacon_frame(struct rtw_adapter *padapter,
- unsigned char *beacon_frame);
-void update_mgnt_tx_rate23a(struct rtw_adapter *padapter, u8 rate);
-void update_mgntframe_attrib23a(struct rtw_adapter *padapter,
- struct pkt_attrib *pattrib);
-void dump_mgntframe23a(struct rtw_adapter *padapter,
- struct xmit_frame *pmgntframe);
-s32 dump_mgntframe23a_and_wait(struct rtw_adapter *padapter,
- struct xmit_frame *pmgntframe, int timeout_ms);
-s32 dump_mgntframe23a_and_wait_ack23a(struct rtw_adapter *padapter,
- struct xmit_frame *pmgntframe);
-
-void issue_beacon23a(struct rtw_adapter *padapter, int timeout_ms);
-int issue_nulldata23a(struct rtw_adapter *padapter, unsigned char *da,
- unsigned int power_mode, int try_cnt, int wait_ms);
-int issue_qos_nulldata23a(struct rtw_adapter *padapter, unsigned char *da, u16 tid,
- int try_cnt, int wait_ms);
-int issue_deauth23a(struct rtw_adapter *padapter, unsigned char *da,
- unsigned short reason);
-void issue_action_spct_ch_switch23a(struct rtw_adapter *padapter, u8 *ra,
- u8 new_ch, u8 ch_offset);
-void issue_action_BA23a(struct rtw_adapter *padapter,
- const unsigned char *raddr,
- unsigned char action, unsigned short status);
-int send_delba23a(struct rtw_adapter *padapter, u8 initiator, u8 *addr);
-int send_beacon23a(struct rtw_adapter *padapter);
-
-void mlmeext_joinbss_event_callback23a(struct rtw_adapter *padapter, int join_res);
-void mlmeext_sta_del_event_callback23a(struct rtw_adapter *padapter);
-void mlmeext_sta_add_event_callback23a(struct rtw_adapter *padapter, struct sta_info *psta);
-
-void linked_status_chk23a(struct rtw_adapter *padapter);
-
-#define set_survey_timer(mlmeext, ms) \
- /*DBG_8723A("%s set_survey_timer(%p, %d)\n", __func__, (mlmeext), (ms));*/ \
- mod_timer(&mlmeext->survey_timer, jiffies + msecs_to_jiffies(ms));
-
-#define set_link_timer(mlmeext, ms) \
- /*DBG_8723A("%s set_link_timer(%p, %d)\n", __func__, (mlmeext), (ms));*/ \
- mod_timer(&mlmeext->link_timer, jiffies + msecs_to_jiffies(ms));
-
-int cckrates_included23a(unsigned char *rate, int ratelen);
-int cckratesonly_included23a(unsigned char *rate, int ratelen);
-
-void process_addba_req23a(struct rtw_adapter *padapter, u8 *paddba_req, u8 *addr);
-
-void correct_TSF23a(struct rtw_adapter *padapter, struct mlme_ext_priv *pmlmeext);
-
-struct cmd_hdl {
- uint parmsize;
- int (*h2cfuns)(struct rtw_adapter *padapter, const u8 *pbuf);
-};
-
-
-int read_macreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
-int write_macreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
-int read_bbreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
-int write_bbreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
-int read_rfreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
-int write_rfreg_hdl(struct rtw_adapter *padapter, u8 *pbuf);
-
-
-int NULL_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int join_cmd_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int disconnect_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int createbss_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int setopmode_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int sitesurvey_cmd_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int setauth_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int setkey_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int set_stakey_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int set_assocsta_hdl(struct rtw_adapter *padapter, const u8 *pbuf);
-int del_assocsta_hdl(struct rtw_adapter *padapter, const u8 *pbuf);
-int add_ba_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-
-int mlme_evt_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int h2c_msg_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int tx_beacon_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int set_ch_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int set_chplan_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int led_blink_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-int set_csa_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf); /* Kurt: Handling DFS channel switch announcement ie. */
-int tdls_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf);
-
-#define GEN_DRV_CMD_HANDLER(size, cmd) {size, &cmd ## _hdl23a},
-#define GEN_MLME_EXT_HANDLER(size, cmd) {size, cmd},
-
-struct C2HEvent_Header {
-#ifdef __LITTLE_ENDIAN
-
- unsigned int len:16;
- unsigned int ID:8;
- unsigned int seq:8;
-
-#elif defined(__BIG_ENDIAN)
-
- unsigned int seq:8;
- unsigned int ID:8;
- unsigned int len:16;
-
-#else
-
-# error "Must be LITTLE or BIG Endian"
-
-#endif
-
- unsigned int rsvd;
-};
-
-enum rtw_c2h_event {
- GEN_EVT_CODE(_Read_MACREG) = 0, /*0*/
- GEN_EVT_CODE(_Read_BBREG),
- GEN_EVT_CODE(_Read_RFREG),
- GEN_EVT_CODE(_Read_EEPROM),
- GEN_EVT_CODE(_Read_EFUSE),
- GEN_EVT_CODE(_Read_CAM), /*5*/
- GEN_EVT_CODE(_Get_BasicRate),
- GEN_EVT_CODE(_Get_DataRate),
- GEN_EVT_CODE(_Survey), /*8*/
- GEN_EVT_CODE(_SurveyDone), /*9*/
-
- GEN_EVT_CODE(_JoinBss) , /*10*/
- GEN_EVT_CODE(_AddSTA),
- GEN_EVT_CODE(_DelSTA),
- GEN_EVT_CODE(_AtimDone) ,
- GEN_EVT_CODE(_TX_Report),
- GEN_EVT_CODE(_CCX_Report), /*15*/
- GEN_EVT_CODE(_DTM_Report),
- GEN_EVT_CODE(_TX_Rate_Statistics),
- GEN_EVT_CODE(_C2HLBK),
- GEN_EVT_CODE(_FWDBG),
- GEN_EVT_CODE(_C2HFEEDBACK), /*20*/
- GEN_EVT_CODE(_ADDBA),
- GEN_EVT_CODE(_C2HBCN),
- GEN_EVT_CODE(_ReportPwrState), /* filen: only for PCIE, USB */
- GEN_EVT_CODE(_CloseRF), /* filen: only for PCIE, work around ASPM */
- MAX_C2HEVT
-};
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_pwrctrl.h b/drivers/staging/rtl8723au/include/rtw_pwrctrl.h
deleted file mode 100644
index 599fed9b365d..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_pwrctrl.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTW_PWRCTRL_H_
-#define __RTW_PWRCTRL_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#define FW_PWR0 0
-#define FW_PWR1 1
-#define FW_PWR2 2
-#define FW_PWR3 3
-
-
-#define HW_PWR0 7
-#define HW_PWR1 6
-#define HW_PWR2 2
-#define HW_PWR3 0
-#define HW_PWR4 8
-
-#define FW_PWRMSK 0x7
-
-
-#define XMIT_ALIVE BIT(0)
-#define RECV_ALIVE BIT(1)
-#define CMD_ALIVE BIT(2)
-#define EVT_ALIVE BIT(3)
-
-enum Power_Mgnt {
- PS_MODE_ACTIVE = 0,
- PS_MODE_MIN,
- PS_MODE_MAX,
- PS_MODE_DTIM,
- PS_MODE_VOIP,
- PS_MODE_UAPSD_WMM,
- PS_MODE_UAPSD,
- PS_MODE_IBSS,
- PS_MODE_WWLAN,
- PM_Radio_Off,
- PM_Card_Disable,
- PS_MODE_NUM
-};
-
-
-/* BIT[2:0] = HW state
- * BIT[3] = Protocol PS state, 0: active, 1: sleep state
- * BIT[4] = sub-state
- */
-
-#define PS_DPS BIT(0)
-#define PS_LCLK (PS_DPS)
-#define PS_RF_OFF BIT(1)
-#define PS_ALL_ON BIT(2)
-#define PS_ST_ACTIVE BIT(3)
-
-#define PS_ISR_ENABLE BIT(4)
-#define PS_IMR_ENABLE BIT(5)
-#define PS_ACK BIT(6)
-#define PS_TOGGLE BIT(7)
-
-#define PS_STATE_MASK (0x0F)
-#define PS_STATE_HW_MASK (0x07)
-#define PS_SEQ_MASK (0xc0)
-
-#define PS_STATE(x) (PS_STATE_MASK & (x))
-#define PS_STATE_HW(x) (PS_STATE_HW_MASK & (x))
-#define PS_SEQ(x) (PS_SEQ_MASK & (x))
-
-#define PS_STATE_S0 (PS_DPS)
-#define PS_STATE_S1 (PS_LCLK)
-#define PS_STATE_S2 (PS_RF_OFF)
-#define PS_STATE_S3 (PS_ALL_ON)
-#define PS_STATE_S4 ((PS_ST_ACTIVE) | (PS_ALL_ON))
-
-
-#define PS_IS_RF_ON(x) ((x) & (PS_ALL_ON))
-#define PS_IS_ACTIVE(x) ((x) & (PS_ST_ACTIVE))
-#define CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
-
-
-struct reportpwrstate_parm {
- unsigned char mode;
- unsigned char state; /* the CPWM value */
- unsigned short rsvd;
-};
-
-#define LPS_DELAY_TIME (1*HZ) /* 1 sec */
-
-#define EXE_PWR_NONE 0x01
-#define EXE_PWR_IPS 0x02
-#define EXE_PWR_LPS 0x04
-
-/* RF state. */
-enum rt_rf_power_state {
- rf_on, /* RF is on after RFSleep or RFOff */
- rf_sleep, /* 802.11 Power Save mode */
- rf_off, /* HW/SW Radio OFF or Inactive Power Save */
- /* Add the new RF state above this line===== */
- rf_max
-};
-
-/* RF Off Level for IPS or HW/SW radio off */
-#define RT_RF_OFF_LEVL_ASPM BIT(0) /* PCI ASPM */
-#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /* PCI clock request */
-#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /* PCI D3 mode */
-/* NIC halt, re-init hw params */
-#define RT_RF_OFF_LEVL_HALT_NIC BIT(3)
-/* FW free, re-download the FW */
-#define RT_RF_OFF_LEVL_FREE_FW BIT(4)
-#define RT_RF_OFF_LEVL_FW_32K BIT(5) /* FW in 32k */
-/* Always enable ASPM and Clock Req in initialization. */
-#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6)
-/* When LPS is on, disable 2R if no packet is received or transmittd. */
-#define RT_RF_LPS_DISALBE_2R BIT(30)
-#define RT_RF_LPS_LEVEL_ASPM BIT(31) /* LPS with ASPM */
-
-#define RT_IN_PS_LEVEL(ppsc, _PS_FLAG) \
- ((ppsc->cur_ps_level & _PS_FLAG) ? true : false)
-#define RT_CLEAR_PS_LEVEL(ppsc, _PS_FLAG) \
- (ppsc->cur_ps_level &= (~(_PS_FLAG)))
-#define RT_SET_PS_LEVEL(ppsc, _PS_FLAG) \
- (ppsc->cur_ps_level |= _PS_FLAG)
-
-
-enum {
- PSBBREG_RF0 = 0,
- PSBBREG_RF1,
- PSBBREG_RF2,
- PSBBREG_AFE0,
- PSBBREG_TOTALCNT
-};
-
-enum { /* for ips_mode */
- IPS_NONE = 0,
- IPS_NORMAL,
- IPS_LEVEL_2,
-};
-
-struct pwrctrl_priv {
- struct semaphore lock;
- volatile u8 rpwm; /* requested power state for fw */
- volatile u8 cpwm; /* fw current power state. updated when 1.
- * read from HCPWM 2. driver lowers power level
- */
- volatile u8 tog; /* toggling */
-
- u8 pwr_mode;
- u8 smart_ps;
- u8 bcn_ant_mode;
-
- u8 bpower_saving;
-
- u8 reg_rfoff;
- u32 rfoff_reason;
-
- /* RF OFF Level */
- u32 cur_ps_level;
- u32 reg_rfps_level;
-
- uint ips_enter23a_cnts;
- uint ips_leave23a_cnts;
-
- u8 ips_mode;
- u8 ips_mode_req; /* used to accept the mode setting request */
- uint bips_processing;
- unsigned long ips_deny_time; /* deny IPS when system time is smaller */
- u8 ps_processing; /* used to mark whether in rtw_ps_processor23a */
-
- u8 bLeisurePs;
- u8 LpsIdleCount;
- u8 power_mgnt;
- u8 bFwCurrentInPSMode;
- unsigned long DelayLPSLastTimeStamp;
- u8 btcoex_rfon;
-
- u8 bInSuspend;
-#ifdef CONFIG_8723AU_BT_COEXIST
- u8 bAutoResume;
- u8 autopm_cnt;
-#endif
- u8 bSupportRemoteWakeup;
- struct timer_list pwr_state_check_timer;
- int pwr_state_check_interval;
- u8 pwr_state_check_cnts;
-
- enum rt_rf_power_state rf_pwrstate;/* cur power state */
- enum rt_rf_power_state change_rfpwrstate;
-
- u8 bkeepfwalive;
- unsigned long PS_BBRegBackup[PSBBREG_TOTALCNT];
-};
-
-#define RTW_PWR_STATE_CHK_INTERVAL 2000
-
-#define _rtw_set_pwr_state_check_timer(pwrctrlpriv, ms) \
- (mod_timer(&pwrctrlpriv->pwr_state_check_timer, jiffies + \
- msecs_to_jiffies(ms)))
-
-#define rtw_set_pwr_state_check_timer(pwrctrlpriv) \
- (_rtw_set_pwr_state_check_timer((pwrctrlpriv), \
- (pwrctrlpriv)->pwr_state_check_interval))
-
-void rtw_init_pwrctrl_priv23a(struct rtw_adapter *adapter);
-void rtw_free_pwrctrl_priv(struct rtw_adapter *adapter);
-
-void rtw_set_ps_mode23a(struct rtw_adapter *padapter, u8 ps_mode,
- u8 smart_ps, u8 bcn_ant_mode);
-void rtw_set_rpwm23a(struct rtw_adapter *padapter, u8 val8);
-void LeaveAllPowerSaveMode23a(struct rtw_adapter *adapter);
-void ips_enter23a(struct rtw_adapter *padapter);
-int ips_leave23a(struct rtw_adapter *padapter);
-
-void rtw_ps_processor23a(struct rtw_adapter *padapter);
-
-enum rt_rf_power_state RfOnOffDetect23a(struct rtw_adapter *adapter);
-
-s32 LPS_RF_ON_check23a(struct rtw_adapter *padapter, u32 delay_ms);
-void LPS_Enter23a(struct rtw_adapter *padapter);
-void LPS_Leave23a(struct rtw_adapter *padapter);
-
-void rtw_set_ips_deny23a(struct rtw_adapter *padapter, u32 ms);
-int _rtw_pwr_wakeup23a(struct rtw_adapter *padapter, u32 ips_deffer_ms,
- const char *caller);
-#define rtw_pwr_wakeup(adapter) _rtw_pwr_wakeup23a(adapter, \
- RTW_PWR_STATE_CHK_INTERVAL, __func__)
-int rtw_pm_set_ips23a(struct rtw_adapter *padapter, u8 mode);
-int rtw_pm_set_lps23a(struct rtw_adapter *padapter, u8 mode);
-
-#endif /* __RTL871X_PWRCTRL_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_recv.h b/drivers/staging/rtl8723au/include/rtw_recv.h
deleted file mode 100644
index 85a5edb450e3..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_recv.h
+++ /dev/null
@@ -1,305 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef _RTW_RECV_H_
-#define _RTW_RECV_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <Hal8723APhyCfg.h>
-
-#define NR_RECVFRAME 256
-
-#define MAX_RXFRAME_CNT 512
-#define MAX_RX_NUMBLKS (32)
-#define RECVFRAME_HDR_ALIGN 128
-
-#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
-
-#define MAX_SUBFRAME_COUNT 64
-
-/* for Rx reordering buffer control */
-struct recv_reorder_ctrl {
- struct rtw_adapter *padapter;
- u8 enable;
- u16 indicate_seq;/* wstart_b, init_value=0xffff */
- u16 wend_b;
- u8 wsize_b;
- struct rtw_queue pending_recvframe_queue;
- struct timer_list reordering_ctrl_timer;
-};
-
-struct stainfo_rxcache {
- u16 tid_rxseq[16];
-/*
- unsigned short tid0_rxseq;
- unsigned short tid1_rxseq;
- unsigned short tid2_rxseq;
- unsigned short tid3_rxseq;
- unsigned short tid4_rxseq;
- unsigned short tid5_rxseq;
- unsigned short tid6_rxseq;
- unsigned short tid7_rxseq;
- unsigned short tid8_rxseq;
- unsigned short tid9_rxseq;
- unsigned short tid10_rxseq;
- unsigned short tid11_rxseq;
- unsigned short tid12_rxseq;
- unsigned short tid13_rxseq;
- unsigned short tid14_rxseq;
- unsigned short tid15_rxseq;
-*/
-};
-
-struct smooth_rssi_data {
- u32 elements[100]; /* array to store values */
- u32 index; /* index to current array to store */
- u32 total_num; /* num of valid elements */
- u32 total_val; /* sum of valid elements */
-};
-
-struct signal_stat {
- u8 update_req; /* used to indicate */
- u8 avg_val; /* avg of valid elements */
- u32 total_num; /* num of valid elements */
- u32 total_val; /* sum of valid elements */
-};
-
-struct phy_info {
- u8 RxPWDBAll;
- u8 SignalQuality; /* in 0-100 index. */
- u8 RxMIMOSignalQuality[RF_PATH_MAX]; /* EVM */
- u8 RxMIMOSignalStrength[RF_PATH_MAX];/* 0~100 */
- s8 RxPower; /* in dBm Translate from PWdB */
- /* Real power in dBm for this packet, no beautification and aggregation.
- * Keep this raw info to be used for the other procedures.
- */
- s8 RecvSignalPower;
- u8 BTRxRSSIPercentage;
- u8 SignalStrength; /* in 0-100 index. */
- u8 RxPwr[RF_PATH_MAX];/* per-path's pwdb */
- u8 RxSNR[RF_PATH_MAX];/* per-path's SNR */
-};
-
-
-struct rx_pkt_attrib {
- u16 pkt_len;
- u8 physt;
- u8 drvinfo_sz;
- u8 shift_sz;
- u8 hdrlen; /* the WLAN Header Len */
- u8 amsdu;
- u8 qos;
- u8 priority;
- u8 pw_save;
- u8 mdata;
- u16 seq_num;
- u8 frag_num;
- u8 mfrag;
- u8 order;
- u8 privacy; /* in frame_ctrl field */
- u8 bdecrypted;
- /* when 0 indicate no encrypt. when non-zero, indicate the algorith */
- u32 encrypt;
- u8 iv_len;
- u8 icv_len;
- u8 crc_err;
- u8 icv_err;
-
- u16 eth_type;
-
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
- u8 ta[ETH_ALEN];
- u8 ra[ETH_ALEN];
- u8 bssid[ETH_ALEN];
-
- u8 ack_policy;
-
- u8 tcpchk_valid; /* 0: invalid, 1: valid */
- u8 ip_chkrpt; /* 0: incorrect, 1: correct */
- u8 tcp_chkrpt; /* 0: incorrect, 1: correct */
- u8 key_index;
-
- u8 mcs_rate;
- u8 rxht;
- u8 sgi;
- u8 pkt_rpt_type;
- u32 MacIDValidEntry[2]; /* 64 bits present 64 entry. */
- struct phy_info phy_info;
-};
-
-/* These definition is used for Rx packet reordering. */
-#define SN_LESS(a, b) (((a-b) & 0x800) != 0)
-#define SN_EQUAL(a, b) (a == b)
-#define REORDER_WAIT_TIME (50) /* (ms) */
-
-#define RECVBUFF_ALIGN_SZ 8
-
-#define RXDESC_SIZE 24
-#define RXDESC_OFFSET RXDESC_SIZE
-
-struct recv_stat {
- __le32 rxdw0;
- __le32 rxdw1;
- __le32 rxdw2;
- __le32 rxdw3;
- __le32 rxdw4;
- __le32 rxdw5;
-};
-
-/* accesser of recv_priv: rtw_recv_entry23a(dispatch / passive level); \
- * recv_thread(passive) ; returnpkt(dispatch) ; halt(passive) ;
- *
- * using enter_critical section to protect
- */
-struct recv_priv {
- spinlock_t lock;
-
- struct rtw_queue free_recv_queue;
- struct rtw_queue recv_pending_queue;
- struct rtw_queue uc_swdec_pending_queue;
-
- int free_recvframe_cnt;
-
- struct rtw_adapter *adapter;
-
- u32 bIsAnyNonBEPkts;
- u64 rx_bytes;
- u64 rx_pkts;
- u64 rx_drop;
- u64 last_rx_bytes;
-
- uint rx_icv_err;
- uint rx_largepacket_crcerr;
- uint rx_smallpacket_crcerr;
- uint rx_middlepacket_crcerr;
-
- /* u8 *pallocated_urb_buf; */
- u8 rx_pending_cnt;
-
- struct urb *int_in_urb;
-
- u8 *int_in_buf;
-
- struct tasklet_struct irq_prepare_beacon_tasklet;
- struct tasklet_struct recv_tasklet;
- struct sk_buff_head free_recv_skb_queue;
- struct sk_buff_head rx_skb_queue;
- u8 *precv_buf;
-
- /* For display the phy informatiom */
- s8 rxpwdb;
- u8 signal_strength;
- u8 signal_qual;
- u8 noise;
- int RxSNRdB[2];
- s8 RxRssi[2];
- int FalseAlmCnt_all;
-
- struct timer_list signal_stat_timer;
- u32 signal_stat_sampling_interval;
- /* u32 signal_stat_converging_constant; */
- struct signal_stat signal_qual_data;
- struct signal_stat signal_strength_data;
-};
-
-#define rtw_set_signal_stat_timer(recvpriv) \
- mod_timer(&(recvpriv)->signal_stat_timer, jiffies + \
- msecs_to_jiffies((recvpriv)->signal_stat_sampling_interval))
-
-struct sta_recv_priv {
- spinlock_t lock;
- int option;
-
- /* struct rtw_queue blk_strms[MAX_RX_NUMBLKS]; */
- struct rtw_queue defrag_q; /* keeping the fragment frame until defrag */
-
- struct stainfo_rxcache rxcache;
-
- /* uint sta_rx_bytes; */
- /* uint sta_rx_pkts; */
- /* uint sta_rx_fail; */
-
-};
-
-
-struct recv_buf {
- struct list_head list;
-
- struct rtw_adapter *adapter;
-
- struct urb *purb;
- struct sk_buff *pskb;
-};
-
-/* head ----->
- *
- * data ----->
- *
- * payload
- *
- * tail ----->
- *
- * end ----->
- *
- * len = (unsigned int )(tail - data);
- *
- */
-struct recv_frame {
- struct list_head list;
- struct sk_buff *pkt;
-
- struct rtw_adapter *adapter;
-
- struct rx_pkt_attrib attrib;
-
- struct sta_info *psta;
-
- /* for A-MPDU Rx reordering buffer control */
- struct recv_reorder_ctrl *preorder_ctrl;
-};
-
-/* get a free recv_frame from pfree_recv_queue */
-struct recv_frame *rtw_alloc_recvframe23a(struct rtw_queue *pfree_recv_queue);
-int rtw_free_recvframe23a(struct recv_frame *precvframe);
-
-int rtw_enqueue_recvframe23a(struct recv_frame *precvframe, struct rtw_queue *queue);
-
-u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter);
-
-struct recv_buf *rtw_dequeue_recvbuf23a(struct rtw_queue *queue);
-
-void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext);
-
-static inline s32 translate_percentage_to_dbm(u32 SignalStrengthIndex)
-{
- s32 SignalPower; /* in dBm. */
-
- /* Translate to dBm (x=0.5y-95). */
- SignalPower = (s32)((SignalStrengthIndex + 1) >> 1);
- SignalPower -= 95;
-
- return SignalPower;
-}
-
-
-struct sta_info;
-
-void _rtw_init_sta_recv_priv23a(struct sta_recv_priv *psta_recvpriv);
-
-void mgt_dispatcher23a(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_rf.h b/drivers/staging/rtl8723au/include/rtw_rf.h
deleted file mode 100644
index a7de714137b8..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_rf.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTW_RF_H_
-#define __RTW_RF_H_
-
-#include <rtw_cmd.h>
-
-#define OFDM_PHY 1
-#define MIXED_PHY 2
-#define CCK_PHY 3
-
-#define NumRates (13)
-
-/* slot time for 11g */
-#define SHORT_SLOT_TIME 9
-#define NON_SHORT_SLOT_TIME 20
-
-/* We now define the max channels in each channel plan. */
-#define MAX_CHANNEL_NUM_2G 14
-#define MAX_CHANNEL_NUM_5G 24
-#define MAX_CHANNEL_NUM 38/* 14+24 */
-
-/* define NUM_REGULATORYS 21 */
-#define NUM_REGULATORYS 1
-
-/* Country codes */
-#define USA 0x555320
-#define EUROPE 0x1 /* temp, should be provided later */
-#define JAPAN 0x2 /* temp, should be provided later */
-
-struct regulatory_class {
- u32 starting_freq; /* MHz, */
- u8 channel_set[MAX_CHANNEL_NUM];
- u8 channel_cck_power[MAX_CHANNEL_NUM];/* dbm */
- u8 channel_ofdm_power[MAX_CHANNEL_NUM];/* dbm */
- u8 txpower_limit; /* dbm */
- u8 channel_spacing; /* MHz */
- u8 modem;
-};
-
-enum {
- cESS = 0x0001,
- cIBSS = 0x0002,
- cPollable = 0x0004,
- cPollReq = 0x0008,
- cPrivacy = 0x0010,
- cShortPreamble = 0x0020,
- cPBCC = 0x0040,
- cChannelAgility = 0x0080,
- cSpectrumMgnt = 0x0100,
- cQos = 0x0200, /* For HCCA, use with CF-Pollable and CF-PollReq */
- cShortSlotTime = 0x0400,
- cAPSD = 0x0800,
- cRM = 0x1000, /* RRM (Radio Request Measurement) */
- cDSSS_OFDM = 0x2000,
- cDelayedBA = 0x4000,
- cImmediateBA = 0x8000,
-};
-
-enum {
- PREAMBLE_LONG = 1,
- PREAMBLE_AUTO = 2,
- PREAMBLE_SHORT = 3,
-};
-
-/* Bandwidth Offset */
-#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
-#define HAL_PRIME_CHNL_OFFSET_LOWER 1
-#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-
-/* Represent Channel Width in HT Capabilities */
-enum ht_channel_width {
- HT_CHANNEL_WIDTH_20 = 0,
- HT_CHANNEL_WIDTH_40 = 1,
- HT_CHANNEL_WIDTH_80 = 2,
- HT_CHANNEL_WIDTH_160 = 3,
- HT_CHANNEL_WIDTH_10 = 4,
-};
-
-/* 2007/11/15 MH Define different RF type. */
-enum {
- RF_1T2R = 0,
- RF_2T4R = 1,
- RF_2T2R = 2,
- RF_1T1R = 3,
- RF_2T2R_GREEN = 4,
- RF_819X_MAX_TYPE = 5,
-};
-
-#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_security.h b/drivers/staging/rtl8723au/include/rtw_security.h
deleted file mode 100644
index 624a9d788e45..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_security.h
+++ /dev/null
@@ -1,331 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTW_SECURITY_H_
-#define __RTW_SECURITY_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <net/lib80211.h>
-
-
-#define is_wep_enc(alg) (alg == WLAN_CIPHER_SUITE_WEP40 || \
- alg == WLAN_CIPHER_SUITE_WEP104)
-
-#define SHA256_MAC_LEN 32
-#define AES_BLOCK_SIZE 16
-#define AES_PRIV_SIZE (4 * 44)
-
-enum ENCRYP_PROTOCOL {
- ENCRYP_PROTOCOL_OPENSYS, /* open system */
- ENCRYP_PROTOCOL_WEP, /* WEP */
- ENCRYP_PROTOCOL_WPA, /* WPA */
- ENCRYP_PROTOCOL_WPA2, /* WPA2 */
- ENCRYP_PROTOCOL_MAX
-};
-
-#ifndef Ndis802_11AuthModeWPA2
-#define Ndis802_11AuthModeWPA2 (Ndis802_11AuthModeWPANone + 1)
-#endif
-
-#ifndef Ndis802_11AuthModeWPA2PSK
-#define Ndis802_11AuthModeWPA2PSK (Ndis802_11AuthModeWPANone + 2)
-#endif
-
-union pn48 {
- u64 val;
-
-#ifdef __LITTLE_ENDIAN
-
-struct {
- u8 TSC0;
- u8 TSC1;
- u8 TSC2;
- u8 TSC3;
- u8 TSC4;
- u8 TSC5;
- u8 TSC6;
- u8 TSC7;
-} _byte_;
-
-#elif defined(__BIG_ENDIAN)
-
-struct {
- u8 TSC7;
- u8 TSC6;
- u8 TSC5;
- u8 TSC4;
- u8 TSC3;
- u8 TSC2;
- u8 TSC1;
- u8 TSC0;
-} _byte_;
-#else
-#error Need BIG or LITTLE endian
-
-#endif
-
-};
-
-union Keytype {
- u8 skey[16];
- u32 lkey[4];
-};
-
-struct rtw_wep_key {
- u8 key[WLAN_KEY_LEN_WEP104 + 1]; /* 14 */
- u16 keylen;
-};
-
-struct rt_pmkid_list {
- u8 bUsed;
- u8 Bssid[6];
- u8 PMKID[16];
- u8 SsidBuf[33];
- u8 *ssid_octet;
- u16 ssid_length;
-};
-
-struct security_priv {
- u32 dot11AuthAlgrthm; /* 802.11 auth, could be open, shared,
- * 8021x and authswitch */
- u32 dot11PrivacyAlgrthm; /* This specifies the privacy for
- * shared auth. algorithm.
- */
- /* WEP */
- u32 dot11PrivacyKeyIndex; /* this is only valid for legendary
- * wep, 0~3 for key id. (tx key index)
- */
- struct rtw_wep_key wep_key[NUM_WEP_KEYS];
-
- u32 dot118021XGrpPrivacy; /* specify the privacy algthm.
- * used for Grp key
- */
- u32 dot118021XGrpKeyid; /* key id used for Grp Key
- * (tx key index)
- */
- union Keytype dot118021XGrpKey[4];/* 802.1x Grp Key, inx0 and inx1 */
- union Keytype dot118021XGrptxmickey[4];
- union Keytype dot118021XGrprxmickey[4];
- union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit.*/
- union pn48 dot11Grprxpn; /* PN48 used for Grp Key recv.*/
-
-#ifdef CONFIG_8723AU_AP_MODE
- /* extend security capabilities for AP_MODE */
- unsigned int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
- unsigned int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
- unsigned int wpa_group_cipher;
- unsigned int wpa2_group_cipher;
- unsigned int wpa_pairwise_cipher;
- unsigned int wpa2_pairwise_cipher;
-#endif
-
- u8 wps_ie[MAX_WPS_IE_LEN];/* added in assoc req */
- int wps_ie_len;
- unsigned int binstallGrpkey:1;
- unsigned int busetkipkey:1;
- unsigned int bcheck_grpkey:1;
- unsigned int hw_decrypted:1;
- u32 ndisauthtype; /* enum ndis_802_11_auth_mode */
- u32 ndisencryptstatus; /* NDIS_802_11_ENCRYPTION_STATUS */
- struct wlan_bssid_ex sec_bss; /* for joinbss (h2c buffer) usage */
- u8 assoc_info[600];
- u8 szofcapability[256]; /* for wpa2 usage */
- u8 oidassociation[512]; /* for wpa/wpa2 usage */
- u8 supplicant_ie[256]; /* store sta security information element */
-
- /* for tkip countermeasure */
- unsigned long last_mic_err_time;
- u8 btkip_countermeasure;
- u8 btkip_wait_report;
- unsigned long btkip_countermeasure_time;
-
- /* For WPA2 Pre-Authentication. */
- struct rt_pmkid_list PMKIDList[NUM_PMKID_CACHE];
- u8 PMKIDIndex;
- u8 bWepDefaultKeyIdxSet;
-};
-
-struct sha256_state {
- u64 length;
- u32 state[8], curlen;
- u8 buf[64];
-};
-
-#define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst)\
-do {\
- switch (psecuritypriv->dot11AuthAlgrthm) {\
- case dot11AuthAlgrthm_Open:\
- case dot11AuthAlgrthm_Shared:\
- case dot11AuthAlgrthm_Auto:\
- encry_algo = psecuritypriv->dot11PrivacyAlgrthm;\
- break;\
- case dot11AuthAlgrthm_8021X:\
- if (bmcst)\
- encry_algo = psecuritypriv->dot118021XGrpPrivacy;\
- else\
- encry_algo = psta->dot118021XPrivacy;\
- break;\
- } \
-} while (0)
-
-#define GET_TKIP_PN(iv, dot11txpn)\
-do {\
- dot11txpn._byte_.TSC0 = iv[2];\
- dot11txpn._byte_.TSC1 = iv[0];\
- dot11txpn._byte_.TSC2 = iv[4];\
- dot11txpn._byte_.TSC3 = iv[5];\
- dot11txpn._byte_.TSC4 = iv[6];\
- dot11txpn._byte_.TSC5 = iv[7];\
-} while (0)
-
-#define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
-#define ROR32(A, n) ROL32((A), 32-(n))
-
-struct mic_data {
- u32 K0, K1; /* Key */
- u32 L, R; /* Current state */
- u32 M; /* Message accumulator (single word) */
- u32 nBytesInM; /* # bytes in M */
-};
-
-extern const u32 Te0[256];
-extern const u32 Te1[256];
-extern const u32 Te2[256];
-extern const u32 Te3[256];
-extern const u32 Te4[256];
-extern const u32 Td0[256];
-extern const u32 Td1[256];
-extern const u32 Td2[256];
-extern const u32 Td3[256];
-extern const u32 Td4[256];
-extern const u32 rcon[10];
-extern const u8 Td4s[256];
-extern const u8 rcons[10];
-
-#define RCON(i) (rcons[(i)] << 24)
-
-static inline u32 rotr(u32 val, int bits)
-{
- return (val >> bits) | (val << (32 - bits));
-}
-
-#define TE0(i) Te0[((i) >> 24) & 0xff]
-#define TE1(i) rotr(Te0[((i) >> 16) & 0xff], 8)
-#define TE2(i) rotr(Te0[((i) >> 8) & 0xff], 16)
-#define TE3(i) rotr(Te0[(i) & 0xff], 24)
-#define TE41(i) ((Te0[((i) >> 24) & 0xff] << 8) & 0xff000000)
-#define TE42(i) (Te0[((i) >> 16) & 0xff] & 0x00ff0000)
-#define TE43(i) (Te0[((i) >> 8) & 0xff] & 0x0000ff00)
-#define TE44(i) ((Te0[(i) & 0xff] >> 8) & 0x000000ff)
-#define TE421(i) ((Te0[((i) >> 16) & 0xff] << 8) & 0xff000000)
-#define TE432(i) (Te0[((i) >> 8) & 0xff] & 0x00ff0000)
-#define TE443(i) (Te0[(i) & 0xff] & 0x0000ff00)
-#define TE414(i) ((Te0[((i) >> 24) & 0xff] >> 8) & 0x000000ff)
-#define TE4(i) ((Te0[(i)] >> 8) & 0x000000ff)
-
-#define TD0(i) Td0[((i) >> 24) & 0xff]
-#define TD1(i) rotr(Td0[((i) >> 16) & 0xff], 8)
-#define TD2(i) rotr(Td0[((i) >> 8) & 0xff], 16)
-#define TD3(i) rotr(Td0[(i) & 0xff], 24)
-#define TD41(i) (Td4s[((i) >> 24) & 0xff] << 24)
-#define TD42(i) (Td4s[((i) >> 16) & 0xff] << 16)
-#define TD43(i) (Td4s[((i) >> 8) & 0xff] << 8)
-#define TD44(i) (Td4s[(i) & 0xff])
-#define TD0_(i) Td0[(i) & 0xff]
-#define TD1_(i) rotr(Td0[(i) & 0xff], 8)
-#define TD2_(i) rotr(Td0[(i) & 0xff], 16)
-#define TD3_(i) rotr(Td0[(i) & 0xff], 24)
-
-#define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ \
- ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3]))
-
-#define PUTU32(ct, st) { \
-(ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); \
-(ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); }
-
-#define WPA_GET_BE32(a) ((((u32) (a)[0]) << 24) | (((u32) (a)[1]) << 16) | \
- (((u32) (a)[2]) << 8) | ((u32) (a)[3]))
-
-#define WPA_PUT_LE16(a, val) \
- do { \
- (a)[1] = ((u16) (val)) >> 8; \
- (a)[0] = ((u16) (val)) & 0xff; \
- } while (0)
-
-#define WPA_PUT_BE32(a, val) \
- do { \
- (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[3] = (u8) (((u32) (val)) & 0xff); \
- } while (0)
-
-#define WPA_PUT_BE64(a, val) \
- do { \
- (a)[0] = (u8) (((u64) (val)) >> 56); \
- (a)[1] = (u8) (((u64) (val)) >> 48); \
- (a)[2] = (u8) (((u64) (val)) >> 40); \
- (a)[3] = (u8) (((u64) (val)) >> 32); \
- (a)[4] = (u8) (((u64) (val)) >> 24); \
- (a)[5] = (u8) (((u64) (val)) >> 16); \
- (a)[6] = (u8) (((u64) (val)) >> 8); \
- (a)[7] = (u8) (((u64) (val)) & 0xff); \
- } while (0)
-
-/* ===== start - public domain SHA256 implementation ===== */
-
-/* This is based on SHA256 implementation in LibTomCrypt that was released into
- * public domain by Tom St Denis. */
-
-/* the K array */
-static const unsigned long K[64] = {
- 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, 0x3956c25bUL,
- 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, 0xd807aa98UL, 0x12835b01UL,
- 0x243185beUL, 0x550c7dc3UL, 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL,
- 0xc19bf174UL, 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
- 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, 0x983e5152UL,
- 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, 0xc6e00bf3UL, 0xd5a79147UL,
- 0x06ca6351UL, 0x14292967UL, 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL,
- 0x53380d13UL, 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
- 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, 0xd192e819UL,
- 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, 0x19a4c116UL, 0x1e376c08UL,
- 0x2748774cUL, 0x34b0bcb5UL, 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL,
- 0x682e6ff3UL, 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
- 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
-};
-
-void rtw_secmicsetkey23a(struct mic_data *pmicdata, u8 *key);
-void rtw_secmicappend23abyte23a(struct mic_data *pmicdata, u8 b);
-void rtw_secmicappend23a(struct mic_data *pmicdata, u8 *src, u32 nbBytes);
-void rtw_secgetmic23a(struct mic_data *pmicdata, u8 *dst);
-
-void rtw_seccalctkipmic23a(u8 *key, u8 *header, u8 *data, u32 data_len,
- u8 *Miccode, u8 priorityi);
-
-int rtw_aes_encrypt23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe);
-int rtw_tkip_encrypt23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe);
-void rtw_wep_encrypt23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe);
-int rtw_aes_decrypt23a(struct rtw_adapter *padapter,
- struct recv_frame *precvframe);
-int rtw_tkip_decrypt23a(struct rtw_adapter *padapter,
- struct recv_frame *precvframe);
-void rtw_wep_decrypt23a(struct rtw_adapter *padapter, struct recv_frame *precvframe);
-
-void rtw_use_tkipkey_handler23a(void *FunctionContext);
-
-#endif /* __RTL871X_SECURITY_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_sreset.h b/drivers/staging/rtl8723au/include/rtw_sreset.h
deleted file mode 100644
index 60fa8296e1ff..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_sreset.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef _RTW_SRESET_C_
-#define _RTW_SRESET_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-struct sreset_priv {
- struct mutex silentreset_mutex;
- u8 silent_reset_inprogress;
- unsigned long last_tx_time;
- unsigned long last_tx_complete_time;
-};
-
-#include <rtl8723a_hal.h>
-
-void rtw_sreset_init(struct rtw_adapter *padapter);
-void rtw_sreset_reset_value(struct rtw_adapter *padapter);
-bool rtw_sreset_inprogress(struct rtw_adapter *padapter);
-void sreset_set_trigger_point(struct rtw_adapter *padapter, s32 tgp);
-void rtw_sreset_reset(struct rtw_adapter *active_adapter);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtw_version.h b/drivers/staging/rtl8723au/include/rtw_version.h
deleted file mode 100644
index c947733a3e3e..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_version.h
+++ /dev/null
@@ -1 +0,0 @@
-#define DRIVERVERSION "v4.1.6_7336.20130426"
diff --git a/drivers/staging/rtl8723au/include/rtw_xmit.h b/drivers/staging/rtl8723au/include/rtw_xmit.h
deleted file mode 100644
index 2b7d6d08238b..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_xmit.h
+++ /dev/null
@@ -1,385 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef _RTW_XMIT_H_
-#define _RTW_XMIT_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#define MAX_XMITBUF_SZ 2048
-#define NR_XMITBUFF 4
-
-#define XMITBUF_ALIGN_SZ 512
-
-/* xmit extension buff defination */
-#define MAX_XMIT_EXTBUF_SZ 1536
-#define NR_XMIT_EXTBUFF 32
-
-#define MAX_NUMBLKS 1
-
-#define XMIT_VO_QUEUE 0
-#define XMIT_VI_QUEUE 1
-#define XMIT_BE_QUEUE 2
-#define XMIT_BK_QUEUE 3
-
-#define VO_QUEUE_INX 0
-#define VI_QUEUE_INX 1
-#define BE_QUEUE_INX 2
-#define BK_QUEUE_INX 3
-#define BCN_QUEUE_INX 4
-#define MGT_QUEUE_INX 5
-#define HIGH_QUEUE_INX 6
-#define TXCMD_QUEUE_INX 7
-
-#define HW_QUEUE_ENTRY 8
-
-#define WEP_IV(pattrib_iv, dot11txpn, keyidx) \
-do { \
- pattrib_iv[0] = dot11txpn._byte_.TSC0; \
- pattrib_iv[1] = dot11txpn._byte_.TSC1; \
- pattrib_iv[2] = dot11txpn._byte_.TSC2; \
- pattrib_iv[3] = ((keyidx & 0x3) << 6); \
- dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0 : \
- (dot11txpn.val+1); \
-} while (0)
-
-#define TKIP_IV(pattrib_iv, dot11txpn, keyidx) \
-do { \
- pattrib_iv[0] = dot11txpn._byte_.TSC1; \
- pattrib_iv[1] = (dot11txpn._byte_.TSC1 | 0x20) & 0x7f; \
- pattrib_iv[2] = dot11txpn._byte_.TSC0; \
- pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6); \
- pattrib_iv[4] = dot11txpn._byte_.TSC2; \
- pattrib_iv[5] = dot11txpn._byte_.TSC3; \
- pattrib_iv[6] = dot11txpn._byte_.TSC4; \
- pattrib_iv[7] = dot11txpn._byte_.TSC5; \
- dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : \
- (dot11txpn.val+1); \
-} while (0)
-
-#define AES_IV(pattrib_iv, dot11txpn, keyidx)\
-do { \
- pattrib_iv[0] = dot11txpn._byte_.TSC0; \
- pattrib_iv[1] = dot11txpn._byte_.TSC1; \
- pattrib_iv[2] = 0; \
- pattrib_iv[3] = BIT(5) | ((keyidx & 0x3) << 6); \
- pattrib_iv[4] = dot11txpn._byte_.TSC2; \
- pattrib_iv[5] = dot11txpn._byte_.TSC3; \
- pattrib_iv[6] = dot11txpn._byte_.TSC4; \
- pattrib_iv[7] = dot11txpn._byte_.TSC5; \
- dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : \
- (dot11txpn.val+1); \
-} while (0)
-
-#define HWXMIT_ENTRY 4
-
-#define TXDESC_SIZE 32
-
-#define PACKET_OFFSET_SZ 8
-#define TXDESC_OFFSET (TXDESC_SIZE + PACKET_OFFSET_SZ)
-
-struct tx_desc {
- /* DWORD 0 */
- __le32 txdw0;
- __le32 txdw1;
- __le32 txdw2;
- __le32 txdw3;
- __le32 txdw4;
- __le32 txdw5;
- __le32 txdw6;
- __le32 txdw7;
-};
-
-union txdesc {
- struct tx_desc txdesc;
- unsigned int value[TXDESC_SIZE>>2];
-};
-
-struct hw_xmit {
- struct rtw_queue *sta_queue;
- int accnt;
-};
-
-/* reduce size */
-struct pkt_attrib {
- u16 type;
- u8 bswenc;
- u8 dhcp_pkt;
- u16 ether_type;
- u16 seqnum;
- u16 pkt_hdrlen; /* the original 802.3 pkt header len */
- u16 hdrlen; /* the WLAN Header Len */
- u32 pktlen; /* the original 802.3 pkt raw_data len */
- u32 last_txcmdsz;
- u32 encrypt; /* when 0 indicate no encrypt. */
- u8 nr_frags;
- u8 iv_len;
- u8 icv_len;
- u8 iv[18];
- u8 icv[16];
- u8 priority;
- u8 ack_policy;
- u8 mac_id;
- u8 vcs_mode; /* virtual carrier sense method */
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
- u8 ta[ETH_ALEN];
- u8 ra[ETH_ALEN];
- u8 key_idx;
- u8 qos_en;
- u8 ht_en;
- u8 raid;/* rate adpative id */
- u8 bwmode;
- u8 ch_offset;/* PRIME_CHNL_OFFSET */
- u8 sgi;/* short GI */
- u8 ampdu_en;/* tx ampdu enable */
- u8 mdata;/* more data bit */
- u8 pctrl;/* per packet txdesc control enable */
- u8 triggered;/* for ap mode handling Power Saving sta */
- u8 qsel;
- u8 eosp;
- u8 rate;
- u8 retry_ctrl;
- struct sta_info *psta;
-};
-
-#define WLANHDR_OFFSET 64
-
-#define NULL_FRAMETAG 0x0
-#define DATA_FRAMETAG 0x01
-#define L2_FRAMETAG 0x02
-#define MGNT_FRAMETAG 0x03
-#define AMSDU_FRAMETAG 0x04
-
-#define EII_FRAMETAG 0x05
-#define IEEE8023_FRAMETAG 0x06
-
-#define MP_FRAMETAG 0x07
-
-#define TXAGG_FRAMETAG 0x08
-
-struct submit_ctx {
- u32 timeout_ms; /* <0: not synchronous, 0: wait forever,
- * >0: up to ms waiting
- */
- int status; /* status for operation */
- struct completion done;
-};
-
-enum {
- RTW_SCTX_SUBMITTED = -1,
- RTW_SCTX_DONE_SUCCESS = 0,
- RTW_SCTX_DONE_UNKNOWN,
- RTW_SCTX_DONE_TIMEOUT,
- RTW_SCTX_DONE_BUF_ALLOC,
- RTW_SCTX_DONE_BUF_FREE,
- RTW_SCTX_DONE_WRITE_PORT_ERR,
- RTW_SCTX_DONE_TX_DESC_NA,
- RTW_SCTX_DONE_TX_DENY,
- RTW_SCTX_DONE_CCX_PKT_FAIL,
- RTW_SCTX_DONE_DRV_STOP,
- RTW_SCTX_DONE_DEV_REMOVE,
-};
-
-void rtw_sctx_init23a(struct submit_ctx *sctx, int timeout_ms);
-int rtw_sctx_wait23a(struct submit_ctx *sctx);
-void rtw23a_sctx_done_err(struct submit_ctx **sctx, int status);
-
-struct xmit_buf {
- struct list_head list, list2;
- struct rtw_adapter *padapter;
-
- u8 *pallocated_buf;
- u8 *pbuf;
- void *priv_data;
-
- u16 ext_tag; /* 0: Normal xmitbuf, 1: extension xmitbuf. */
- u16 flags;
- u32 alloc_sz;
- u32 len;
- struct submit_ctx *sctx;
- u32 ff_hwaddr;
- struct urb *pxmit_urb[8];
- u8 bpending[8];
- int last[8];
-#if defined(DBG_XMIT_BUF) || defined(DBG_XMIT_BUF_EXT)
- u8 no;
-#endif
-};
-
-struct xmit_frame {
- struct list_head list;
- struct pkt_attrib attrib;
- struct sk_buff *pkt;
- int frame_tag;
- struct rtw_adapter *padapter;
- u8 *buf_addr;
- struct xmit_buf *pxmitbuf;
-
- s8 pkt_offset;
-
- u8 ack_report;
-
- u8 ext_tag; /* 0:data, 1:mgmt */
-};
-
-struct tx_servq {
- struct list_head tx_pending;
- struct rtw_queue sta_pending;
- int qcnt;
-};
-
-struct sta_xmit_priv {
- spinlock_t lock;
- int option;
- int apsd_setting; /* When bit mask is on, the associated edca
- * queue supports APSD.
- */
- struct tx_servq be_q; /* priority == 0,3 */
- struct tx_servq bk_q; /* priority == 1,2 */
- struct tx_servq vi_q; /* priority == 4,5 */
- struct tx_servq vo_q; /* priority == 6,7 */
- struct list_head legacy_dz;
- struct list_head apsd;
- u16 txseq_tid[16];
-};
-
-struct hw_txqueue {
- volatile int head;
- volatile int tail;
- volatile int free_sz; /* in units of 64 bytes */
- volatile int free_cmdsz;
- volatile int txsz[8];
- uint ff_hwaddr;
- uint cmd_hwaddr;
- int ac_tag;
-};
-
-struct agg_pkt_info {
- u16 offset;
- u16 pkt_len;
-};
-
-struct xmit_priv {
- spinlock_t lock;
-
- struct semaphore xmit_sema;
- struct semaphore terminate_xmitthread_sema;
-
- struct rtw_queue be_pending;
- struct rtw_queue bk_pending;
- struct rtw_queue vi_pending;
- struct rtw_queue vo_pending;
- struct rtw_queue bm_pending;
-
- int free_xmitframe_cnt;
- struct rtw_queue free_xmit_queue;
-
- int free_xframe_ext_cnt;
- struct rtw_queue free_xframe_ext_queue;
-
- uint frag_len;
-
- struct rtw_adapter *adapter;
-
- u64 tx_bytes;
- u64 tx_pkts;
- u64 tx_drop;
- u64 last_tx_bytes;
- u64 last_tx_pkts;
-
- struct hw_xmit *hwxmits;
- u8 hwxmit_entry;
- u8 vcs;
- u8 nqos_ssn;
-
- u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength from
- * large to small. it's value is 0->vo, 1->vi,
- * 2->be, 3->bk.
- */
-
- struct semaphore tx_retevt;/* all tx return event; */
-
- struct tasklet_struct xmit_tasklet;
-
- struct rtw_queue free_xmitbuf_queue;
- struct list_head xmitbuf_list; /* track buffers for cleanup */
- struct rtw_queue pending_xmitbuf_queue;
- uint free_xmitbuf_cnt;
-
- struct rtw_queue free_xmit_extbuf_queue;
- struct list_head xmitextbuf_list; /* track buffers for cleanup */
- uint free_xmit_extbuf_cnt;
-
- int ack_tx;
- struct mutex ack_tx_mutex;
- struct submit_ctx ack_tx_ops;
- spinlock_t lock_sctx;
-};
-
-struct xmit_buf *rtw_alloc_xmitbuf23a_ext(struct xmit_priv *pxmitpriv);
-s32 rtw_free_xmitbuf_ext23a(struct xmit_priv *pxmitpriv,
- struct xmit_buf *pxmitbuf);
-
-struct xmit_buf *rtw_alloc_xmitbuf23a(struct xmit_priv *pxmitpriv);
-s32 rtw_free_xmitbuf23a(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
-
-void rtw_count_tx_stats23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe, int sz);
-void rtw_update_protection23a(struct rtw_adapter *padapter, u8 *ie, uint ie_len);
-struct xmit_frame *rtw_alloc_xmitframe23a_ext(struct xmit_priv *pxmitpriv);
-struct xmit_frame *rtw_alloc_xmitframe23a_once(struct xmit_priv *pxmitpriv);
-s32 rtw_free_xmitframe23a(struct xmit_priv *pxmitpriv,
- struct xmit_frame *pxmitframe);
-void rtw_free_xmitframe_queue23a(struct xmit_priv *pxmitpriv, struct rtw_queue *pframequeue);
-struct tx_servq *rtw_get_sta_pending23a(struct rtw_adapter *padapter,
- struct sta_info *psta, int up, u8 *ac);
-s32 rtw_xmitframe_enqueue23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe);
-struct xmit_frame *rtw_dequeue_xframe23a(struct xmit_priv *pxmitpriv,
- struct hw_xmit *phwxmit_i, int entry);
-s32 rtw_xmit23a_classifier(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe);
-s32 rtw_xmitframe_coalesce23a(struct rtw_adapter *padapter, struct sk_buff *pkt,
- struct xmit_frame *pxmitframe);
-s32 _rtw_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag);
-void _rtw_init_sta_xmit_priv23a(struct sta_xmit_priv *psta_xmitpriv);
-
-s32 rtw_txframes_pending23a(struct rtw_adapter *padapter);
-s32 rtw_txframes_sta_ac_pending23a(struct rtw_adapter *padapter,
- struct pkt_attrib *pattrib);
-void rtw_init_hwxmits23a(struct hw_xmit *phwxmit, int entry);
-int _rtw_init_xmit_priv23a(struct xmit_priv *pxmitpriv,
- struct rtw_adapter *padapter);
-void _rtw_free_xmit_priv23a(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits23a(struct rtw_adapter *padapter);
-void rtw_free_hwxmits23a(struct rtw_adapter *padapter);
-int rtw_xmit23a(struct rtw_adapter *padapter, struct sk_buff *pkt);
-#if defined(CONFIG_8723AU_AP_MODE)
-int xmitframe_enqueue_for_sleeping_sta23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxmitframe);
-void stop_sta_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta);
-void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta);
-void xmit_delivery_enabled_frames23a(struct rtw_adapter *padapter,
- struct sta_info *psta);
-#endif
-u8 qos_acm23a(u8 acm_mask, u8 priority);
-u32 rtw_get_ff_hwaddr23a(struct xmit_frame *pxmitframe);
-int rtw_ack_tx_wait23a(struct xmit_priv *pxmitpriv, u32 timeout_ms);
-
-/* include after declaring struct xmit_buf, in order to avoid warning */
-#include <xmit_osdep.h>
-
-#endif /* _RTL871X_XMIT_H_ */
diff --git a/drivers/staging/rtl8723au/include/sta_info.h b/drivers/staging/rtl8723au/include/sta_info.h
deleted file mode 100644
index e7260050e533..000000000000
--- a/drivers/staging/rtl8723au/include/sta_info.h
+++ /dev/null
@@ -1,373 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __STA_INFO_H_
-#define __STA_INFO_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <wifi.h>
-
-#define IBSS_START_MAC_ID 2
-#define NUM_STA 32
-#define NUM_ACL 16
-
-
-/* if mode ==0, then the sta is allowed once the addr is hit. */
-/* if mode ==1, then the sta is rejected once the addr is non-hit. */
-struct rtw_wlan_acl_node {
- struct list_head list;
- u8 addr[ETH_ALEN];
- u8 valid;
-};
-
-/* mode=0, disable */
-/* mode=1, accept unless in deny list */
-/* mode=2, deny unless in accept list */
-struct wlan_acl_pool {
- int mode;
- int num;
- struct rtw_wlan_acl_node aclnode[NUM_ACL];
- struct rtw_queue acl_node_q;
-};
-
-struct rssi_sta {
- s32 UndecoratedSmoothedPWDB;
- s32 UndecoratedSmoothedCCK;
- s32 UndecoratedSmoothedOFDM;
- u64 PacketMap;
- u8 ValidBit;
-};
-
-struct stainfo_stats {
- u64 rx_mgnt_pkts;
- u64 rx_beacon_pkts;
- u64 rx_probereq_pkts;
- u64 rx_probersp_pkts;
- u64 rx_probersp_bm_pkts;
- u64 rx_probersp_uo_pkts;
- u64 rx_ctrl_pkts;
- u64 rx_data_pkts;
-
- u64 last_rx_mgnt_pkts;
- u64 last_rx_beacon_pkts;
- u64 last_rx_probereq_pkts;
- u64 last_rx_probersp_pkts;
- u64 last_rx_probersp_bm_pkts;
- u64 last_rx_probersp_uo_pkts;
- u64 last_rx_ctrl_pkts;
- u64 last_rx_data_pkts;
-
- u64 rx_bytes;
- u64 rx_drops;
-
- u64 tx_pkts;
- u64 tx_bytes;
- u64 tx_drops;
-
-};
-
-struct sta_info {
- spinlock_t lock;
- struct list_head list; /* free_sta_queue */
- struct list_head hash_list; /* sta_hash */
- struct rtw_adapter *padapter;
-
- struct sta_xmit_priv sta_xmitpriv;
- struct sta_recv_priv sta_recvpriv;
-
- struct rtw_queue sleep_q;
- unsigned int sleepq_len;
-
- uint state;
- uint aid;
- uint mac_id;
- uint qos_option;
- u8 hwaddr[ETH_ALEN];
-
- uint ieee8021x_blocked; /* 0: allowed, 1:blocked */
- u32 dot118021XPrivacy; /* aes, tkip... */
- union Keytype dot11tkiptxmickey;
- union Keytype dot11tkiprxmickey;
- union Keytype dot118021x_UncstKey;
- union pn48 dot11txpn; /* PN48 used for Unicast xmit. */
- union pn48 dot11rxpn; /* PN48 used for Unicast recv. */
-
-
- u8 bssrateset[16];
- u32 bssratelen;
- s32 rssi;
- s32 signal_quality;
-
- u8 cts2self;
- u8 rtsen;
-
- u8 raid;
- u8 init_rate;
- u32 ra_mask;
- u8 wireless_mode; /* NETWORK_TYPE */
- struct stainfo_stats sta_stats;
-
- /* for A-MPDU TX, ADDBA timeout check */
- struct timer_list addba_retry_timer;
-
- /* for A-MPDU Rx reordering buffer control */
- struct recv_reorder_ctrl recvreorder_ctrl[16];
-
- /* for A-MPDU Tx */
- /* unsigned char ampdu_txen_bitmap; */
- u16 BA_starting_seqctrl[16];
-
- struct ht_priv htpriv;
-
- /* Notes: */
- /* STA_Mode: */
- /* curr_network(mlme_priv/security_priv/qos/ht) + sta_info: (STA & AP) CAP/INFO */
- /* scan_q: AP CAP/INFO */
-
- /* AP_Mode: */
- /* curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO */
- /* sta_info: (AP & STA) CAP/INFO */
-
- struct list_head asoc_list;
- struct list_head auth_list;
-
- unsigned int expire_to;
- unsigned int auth_seq;
- unsigned int authalg;
- unsigned char chg_txt[128];
-
- u16 capability;
- int flags;
-
- int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
- int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
- int wpa_group_cipher;
- int wpa2_group_cipher;
- int wpa_pairwise_cipher;
- int wpa2_pairwise_cipher;
-
- u8 bpairwise_key_installed;
-
- u8 wpa_ie[32];
-
- u8 nonerp_set;
- u8 no_short_slot_time_set;
- u8 no_short_preamble_set;
- u8 no_ht_gf_set;
- u8 no_ht_set;
- u8 ht_20mhz_set;
-
- unsigned int tx_ra_bitmap;
- u8 qos_info;
-
- u8 max_sp_len;
- u8 uapsd_bk;/* BIT(0): Delivery enabled, BIT(1): Trigger enabled */
- u8 uapsd_be;
- u8 uapsd_vi;
- u8 uapsd_vo;
-
- u8 has_legacy_ac;
- unsigned int sleepq_ac_len;
-
- /* p2p priv data */
- u8 is_p2p_device;
- u8 p2p_status_code;
-
- u8 keep_alive_trycnt;
-
- /* p2p client info */
- u8 dev_addr[ETH_ALEN];
- u8 dev_cap;
- u16 config_methods;
- u8 primary_dev_type[8];
- u8 num_of_secdev_type;
- u8 secdev_types_list[32];/* 32/8 == 4; */
- u16 dev_name_len;
- u8 dev_name[32];
- u8 *passoc_req;
- u32 assoc_req_len;
-
- /* for DM */
- struct rssi_sta rssi_stat;
-
- /* */
- /* ================ODM Relative Info======================= */
- /* Please be care, dont declare too much structure here. It will cost memory * STA support num. */
- /* */
- /* */
- /* 2011/10/20 MH Add for ODM STA info. */
- /* */
- /* Driver Write */
- u8 bValid; /* record the sta status link or not? */
- u8 rssi_level; /* for Refresh RA mask */
- /* ODM Write */
- /* 1 PHY_STATUS_INFO */
- u8 RSSI_Path[4]; /* */
- u8 RSSI_Ave;
- u8 RXEVM[4];
- u8 RXSNR[4];
-
- /* ODM Write */
- /* 1 TX_INFO (may changed by IC) */
- /* ================ODM Relative Info======================= */
- /* */
-
- /* To store the sequence number of received management frame */
- u16 RxMgmtFrameSeqNum;
-};
-
-#define sta_rx_pkts(sta) \
- (sta->sta_stats.rx_mgnt_pkts \
- + sta->sta_stats.rx_ctrl_pkts \
- + sta->sta_stats.rx_data_pkts)
-
-#define sta_last_rx_pkts(sta) \
- (sta->sta_stats.last_rx_mgnt_pkts \
- + sta->sta_stats.last_rx_ctrl_pkts \
- + sta->sta_stats.last_rx_data_pkts)
-
-#define sta_rx_data_pkts(sta) \
- (sta->sta_stats.rx_data_pkts)
-
-#define sta_last_rx_data_pkts(sta) \
- (sta->sta_stats.last_rx_data_pkts)
-
-#define sta_rx_mgnt_pkts(sta) \
- (sta->sta_stats.rx_mgnt_pkts)
-
-#define sta_last_rx_mgnt_pkts(sta) \
- (sta->sta_stats.last_rx_mgnt_pkts)
-
-#define sta_rx_beacon_pkts(sta) \
- (sta->sta_stats.rx_beacon_pkts)
-
-#define sta_last_rx_beacon_pkts(sta) \
- (sta->sta_stats.last_rx_beacon_pkts)
-
-#define sta_rx_probereq_pkts(sta) \
- (sta->sta_stats.rx_probereq_pkts)
-
-#define sta_last_rx_probereq_pkts(sta) \
- (sta->sta_stats.last_rx_probereq_pkts)
-
-#define sta_rx_probersp_pkts(sta) \
- (sta->sta_stats.rx_probersp_pkts)
-
-#define sta_last_rx_probersp_pkts(sta) \
- (sta->sta_stats.last_rx_probersp_pkts)
-
-#define sta_rx_probersp_bm_pkts(sta) \
- (sta->sta_stats.rx_probersp_bm_pkts)
-
-#define sta_last_rx_probersp_bm_pkts(sta) \
- (sta->sta_stats.last_rx_probersp_bm_pkts)
-
-#define sta_rx_probersp_uo_pkts(sta) \
- (sta->sta_stats.rx_probersp_uo_pkts)
-
-#define sta_last_rx_probersp_uo_pkts(sta) \
- (sta->sta_stats.last_rx_probersp_uo_pkts)
-
-#define sta_update_last_rx_pkts(sta) \
- do { \
- sta->sta_stats.last_rx_mgnt_pkts = sta->sta_stats.rx_mgnt_pkts; \
- sta->sta_stats.last_rx_beacon_pkts = sta->sta_stats.rx_beacon_pkts; \
- sta->sta_stats.last_rx_probereq_pkts = sta->sta_stats.rx_probereq_pkts; \
- sta->sta_stats.last_rx_probersp_pkts = sta->sta_stats.rx_probersp_pkts; \
- sta->sta_stats.last_rx_probersp_bm_pkts = sta->sta_stats.rx_probersp_bm_pkts; \
- sta->sta_stats.last_rx_probersp_uo_pkts = sta->sta_stats.rx_probersp_uo_pkts; \
- sta->sta_stats.last_rx_ctrl_pkts = sta->sta_stats.rx_ctrl_pkts; \
- sta->sta_stats.last_rx_data_pkts = sta->sta_stats.rx_data_pkts; \
- } while (0)
-
-#define STA_RX_PKTS_ARG(sta) \
- sta->sta_stats.rx_mgnt_pkts \
- , sta->sta_stats.rx_ctrl_pkts \
- , sta->sta_stats.rx_data_pkts
-
-#define STA_LAST_RX_PKTS_ARG(sta) \
- sta->sta_stats.last_rx_mgnt_pkts, \
- sta->sta_stats.last_rx_ctrl_pkts, \
- sta->sta_stats.last_rx_data_pkts
-
-#define STA_RX_PKTS_DIFF_ARG(sta) \
- sta->sta_stats.rx_mgnt_pkts - sta->sta_stats.last_rx_mgnt_pkts, \
- sta->sta_stats.rx_ctrl_pkts - sta->sta_stats.last_rx_ctrl_pkts, \
- sta->sta_stats.rx_data_pkts - sta->sta_stats.last_rx_data_pkts
-
-#define STA_PKTS_FMT "(m:%llu, c:%llu, d:%llu)"
-
-struct sta_priv {
- spinlock_t sta_hash_lock;
- struct list_head sta_hash[NUM_STA];
- int asoc_sta_count;
-
- struct rtw_adapter *padapter;
- struct list_head asoc_list;
- struct list_head auth_list;
- spinlock_t asoc_list_lock;
- spinlock_t auth_list_lock;
- u8 asoc_list_cnt;
- u8 auth_list_cnt;
-
- unsigned int auth_to; /* sec, time to expire in authenticating. */
- unsigned int assoc_to; /* sec, time to expire before associating. */
- unsigned int expire_to; /* sec , time to expire after associated. */
-
- /* pointers to STA info; based on allocated AID or NULL if AID free
- * AID is in the range 1-2007, so sta_aid[0] corresponders to AID 1
- * and so on
- */
- struct sta_info *sta_aid[NUM_STA];
-
- u16 sta_dz_bitmap;/* only support 15 stations, station aid bitmap
- * for sleeping sta. */
- u16 tim_bitmap;/* only support 15 stations,
- * aid=0~15 mapping bit0~bit15 */
-
- u16 max_num_sta;
-
- struct wlan_acl_pool acl_list;
-};
-
-static inline u32 wifi_mac_hash(const u8 *mac)
-{
- u32 x;
-
- x = mac[0];
- x = (x << 2) ^ mac[1];
- x = (x << 2) ^ mac[2];
- x = (x << 2) ^ mac[3];
- x = (x << 2) ^ mac[4];
- x = (x << 2) ^ mac[5];
-
- x ^= x >> 8;
- x = x & (NUM_STA - 1);
-
- return x;
-}
-
-int _rtw_init_sta_priv23a(struct sta_priv *pstapriv);
-int _rtw_free_sta_priv23a(struct sta_priv *pstapriv);
-
-struct sta_info *rtw_alloc_stainfo23a(struct sta_priv *pstapriv, const u8 *hwaddr, gfp_t gfp);
-int rtw_free_stainfo23a(struct rtw_adapter *padapter, struct sta_info *psta);
-void rtw_free_all_stainfo23a(struct rtw_adapter *padapter);
-struct sta_info *rtw_get_stainfo23a(struct sta_priv *pstapriv, const u8 *hwaddr);
-int rtw_init_bcmc_stainfo23a(struct rtw_adapter *padapter);
-struct sta_info *rtw_get_bcmc_stainfo23a(struct rtw_adapter *padapter);
-bool rtw_access_ctrl23a(struct rtw_adapter *padapter, u8 *mac_addr);
-
-#endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/rtl8723au/include/usb_ops.h b/drivers/staging/rtl8723au/include/usb_ops.h
deleted file mode 100644
index ff11e13b24a8..000000000000
--- a/drivers/staging/rtl8723au/include/usb_ops.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __USB_OPS_H_
-#define __USB_OPS_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <osdep_intf.h>
-#include <usb_ops_linux.h>
-
-#define REALTEK_USB_VENQT_READ 0xC0
-#define REALTEK_USB_VENQT_WRITE 0x40
-#define REALTEK_USB_VENQT_CMD_REQ 0x05
-#define REALTEK_USB_VENQT_CMD_IDX 0x00
-
-enum {
- VENDOR_WRITE = 0x00,
- VENDOR_READ = 0x01,
-};
-
-#define ALIGNMENT_UNIT 16
-#define MAX_VENDOR_REQ_CMD_SIZE 254 /* 8188cu SIE Support */
-#define MAX_USB_IO_CTL_SIZE (MAX_VENDOR_REQ_CMD_SIZE +ALIGNMENT_UNIT)
-
-void rtl8723au_set_hw_type(struct rtw_adapter *padapter);
-
-void rtl8723au_recv_tasklet(void *priv);
-
-void rtl8723au_xmit_tasklet(void *priv);
-
-/* Increase and check if the continual_urb_error of this @param dvobjprive is
- * larger than MAX_CONTINUAL_URB_ERR. Return result
- */
-static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
-{
- int ret = false;
- int value;
-
- value = atomic_inc_return(&dvobj->continual_urb_error);
- if (value > MAX_CONTINUAL_URB_ERR) {
- DBG_8723A("[dvobj:%p][ERROR] continual_urb_error:%d > %d\n",
- dvobj, value, MAX_CONTINUAL_URB_ERR);
- ret = true;
- }
- return ret;
-}
-
-/* Set the continual_urb_error of this @param dvobjprive to 0 */
-static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
-{
- atomic_set(&dvobj->continual_urb_error, 0);
-}
-
-bool rtl8723au_chip_configure(struct rtw_adapter *padapter);
-
-#endif /* __USB_OPS_H_ */
diff --git a/drivers/staging/rtl8723au/include/usb_ops_linux.h b/drivers/staging/rtl8723au/include/usb_ops_linux.h
deleted file mode 100644
index af2f14b8b360..000000000000
--- a/drivers/staging/rtl8723au/include/usb_ops_linux.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __USB_OPS_LINUX_H__
-#define __USB_OPS_LINUX_H__
-
-#define VENDOR_CMD_MAX_DATA_LEN 254
-
-#define RTW_USB_CONTROL_MSG_TIMEOUT 500/* ms */
-
-#define MAX_USBCTRL_VENDORREQ_TIMES 10
-
-int rtl8723au_read_port(struct rtw_adapter *adapter, u32 cnt,
- struct recv_buf *precvbuf);
-void rtl8723au_read_port_cancel(struct rtw_adapter *padapter);
-int rtl8723au_write_port(struct rtw_adapter *padapter, u32 addr, u32 cnt,
- struct xmit_buf *pxmitbuf);
-void rtl8723au_write_port_cancel(struct rtw_adapter *padapter);
-int rtl8723au_read_interrupt(struct rtw_adapter *adapter);
-
-u8 rtl8723au_read8(struct rtw_adapter *padapter, u16 addr);
-u16 rtl8723au_read16(struct rtw_adapter *padapter, u16 addr);
-u32 rtl8723au_read32(struct rtw_adapter *padapter, u16 addr);
-int rtl8723au_write8(struct rtw_adapter *padapter, u16 addr, u8 val);
-int rtl8723au_write16(struct rtw_adapter *padapter, u16 addr, u16 val);
-int rtl8723au_write32(struct rtw_adapter *padapter, u16 addr, u32 val);
-int rtl8723au_writeN(struct rtw_adapter *padapter,
- u16 addr, u16 length, u8 *pdata);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/wifi.h b/drivers/staging/rtl8723au/include/wifi.h
deleted file mode 100644
index 25d573c3e232..000000000000
--- a/drivers/staging/rtl8723au/include/wifi.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef _WIFI_H_
-#define _WIFI_H_
-
-/* This value is tested by WiFi 11n Test Plan 5.2.3.
- * This test verifies the WLAN NIC can update the NAV through sending
- * the CTS with large duration.
- */
-#define WiFiNavUpperUs 30000 /* 30 ms */
-
-/*-----------------------------------------------------------------------------
- Below is the definition for 802.11n
-------------------------------------------------------------------------------*/
-
-struct AC_param {
- u8 ACI_AIFSN;
- u8 CW;
- __le16 TXOP_limit;
-} __packed;
-
-struct WMM_para_element {
- unsigned char QoS_info;
- unsigned char reserved;
- struct AC_param ac_param[4];
-} __packed;
-
-struct ADDBA_request {
- u8 dialog_token;
- __le16 BA_para_set;
- __le16 BA_timeout_value;
- __le16 BA_starting_seqctrl;
-} __packed;
-
-
-/* ===============WPS Section=============== */
-/* WPS attribute ID */
-#define WPS_ATTR_VER1 0x104A
-#define WPS_ATTR_SIMPLE_CONF_STATE 0x1044
-#define WPS_ATTR_RESP_TYPE 0x103B
-#define WPS_ATTR_UUID_E 0x1047
-#define WPS_ATTR_MANUFACTURER 0x1021
-#define WPS_ATTR_MODEL_NAME 0x1023
-#define WPS_ATTR_MODEL_NUMBER 0x1024
-#define WPS_ATTR_SERIAL_NUMBER 0x1042
-#define WPS_ATTR_PRIMARY_DEV_TYPE 0x1054
-#define WPS_ATTR_SEC_DEV_TYPE_LIST 0x1055
-#define WPS_ATTR_DEVICE_NAME 0x1011
-#define WPS_ATTR_CONF_METHOD 0x1008
-#define WPS_ATTR_RF_BANDS 0x103C
-#define WPS_ATTR_DEVICE_PWID 0x1012
-#define WPS_ATTR_REQUEST_TYPE 0x103A
-#define WPS_ATTR_ASSOCIATION_STATE 0x1002
-#define WPS_ATTR_CONFIG_ERROR 0x1009
-#define WPS_ATTR_VENDOR_EXT 0x1049
-#define WPS_ATTR_SELECTED_REGISTRAR 0x1041
-
-/* WPS Configuration Method */
-#define WPS_CM_NONE 0x0000
-#define WPS_CM_LABEL 0x0004
-#define WPS_CM_DISPLYA 0x0008
-#define WPS_CM_EXTERNAL_NFC_TOKEN 0x0010
-#define WPS_CM_INTEGRATED_NFC_TOKEN 0x0020
-#define WPS_CM_NFC_INTERFACE 0x0040
-#define WPS_CM_PUSH_BUTTON 0x0080
-#define WPS_CM_KEYPAD 0x0100
-#define WPS_CM_SW_PUHS_BUTTON 0x0280
-#define WPS_CM_HW_PUHS_BUTTON 0x0480
-#define WPS_CM_SW_DISPLAY_PIN 0x2008
-#define WPS_CM_LCD_DISPLAY_PIN 0x4008
-
-#endif /* _WIFI_H_ */
diff --git a/drivers/staging/rtl8723au/include/wlan_bssdef.h b/drivers/staging/rtl8723au/include/wlan_bssdef.h
deleted file mode 100644
index 95b32e15a4d0..000000000000
--- a/drivers/staging/rtl8723au/include/wlan_bssdef.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __WLAN_BSSDEF_H__
-#define __WLAN_BSSDEF_H__
-
-
-#define MAX_IE_SZ 768
-
-
-#define NDIS_802_11_LENGTH_RATES 8
-#define NDIS_802_11_LENGTH_RATES_EX 16
-
-/* Length is the 4 bytes multiples of the sum of
- * sizeof(6 * sizeof(unsigned char)) + 2 + sizeof(struct ndis_802_11_ssid) +
- * sizeof(u32) + sizeof(long) + sizeof(enum ndis_802_11_net_type) +
- * sizeof(struct ndis_802_11_config) + sizeof(sizeof(unsigned char) *
- * NDIS_802_11_LENGTH_RATES_EX) + IELength
- *
- * Except the IELength, all other fields are fixed length. Therefore,
- * we can define a macro to present the partial sum.
- */
-
-enum ndis_802_11_auth_mode {
- Ndis802_11AuthModeOpen,
- Ndis802_11AuthModeShared,
- Ndis802_11AuthModeAutoSwitch,
- Ndis802_11AuthModeWPA,
- Ndis802_11AuthModeWPAPSK,
- Ndis802_11AuthModeWPANone,
- dis802_11AuthModeMax /* upper bound */
-};
-
-enum {
- Ndis802_11WEPEnabled,
- Ndis802_11Encryption1Enabled = Ndis802_11WEPEnabled,
- Ndis802_11WEPDisabled,
- Ndis802_11EncryptionDisabled = Ndis802_11WEPDisabled,
- Ndis802_11WEPKeyAbsent,
- Ndis802_11Encryption1KeyAbsent = Ndis802_11WEPKeyAbsent,
- Ndis802_11WEPNotSupported,
- Ndis802_11EncryptionNotSupported = Ndis802_11WEPNotSupported,
- Ndis802_11Encryption2Enabled,
- Ndis802_11Encryption2KeyAbsent,
- Ndis802_11Encryption3Enabled,
- Ndis802_11Encryption3KeyAbsent,
-};
-
-struct wlan_bcn_info {
- /* these infor get from rtw_get_encrypt_info when
- * * translate scan to UI */
- u8 encryp_protocol;/* ENCRYP_PROTOCOL_E: OPEN/WEP/WPA/WPA2 */
- int group_cipher; /* WPA/WPA2 group cipher */
- int pairwise_cipher;/* WPA/WPA2/WEP pairwise cipher */
- int is_8021x;
-
- /* bwmode 20/40 and ch_offset UP/LOW */
-};
-
-struct wlan_bssid_ex {
- u32 Length;
- u8 MacAddress[ETH_ALEN];
- u16 reserved;
- struct cfg80211_ssid Ssid;
- u32 Privacy;
- long Rssi;/* in dBM, raw data , get from PHY) */
- u16 beacon_interval;
- u16 capability;
- u64 tsf;
- u32 ATIMWindow; /* units are Kusec */
- u32 DSConfig; /* Frequency, units are kHz */
- enum nl80211_iftype ifmode;
- unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
- u8 SignalStrength;/* in percentage */
- u8 SignalQuality;/* in percentage */
- u32 IELength;
- u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and capability info*/
-} __packed;
-
-static inline uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
-{
- return sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + bss->IELength;
-}
-
-struct wlan_network {
- struct list_head list;
- int network_type; /* refer to ieee80211.h for 11A/B/G */
- /* set to fixed when not to be removed as site-surveying */
- int fixed;
- unsigned long last_scanned; /* timestamp for the network */
- int join_res;
- struct wlan_bssid_ex network; /* must be the last item */
- struct wlan_bcn_info BcnInfo;
-};
-
-enum VRTL_CARRIER_SENSE {
- DISABLE_VCS,
- ENABLE_VCS,
- AUTO_VCS
-};
-
-enum VCS_TYPE {
- NONE_VCS,
- RTS_CTS,
- CTS_TO_SELF
-};
-
-/* john */
-#define NUM_PRE_AUTH_KEY 16
-#define NUM_PMKID_CACHE NUM_PRE_AUTH_KEY
-
-#endif /* ifndef WLAN_BSSDEF_H_ */
diff --git a/drivers/staging/rtl8723au/include/xmit_osdep.h b/drivers/staging/rtl8723au/include/xmit_osdep.h
deleted file mode 100644
index 2be04c48656c..000000000000
--- a/drivers/staging/rtl8723au/include/xmit_osdep.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __XMIT_OSDEP_H_
-#define __XMIT_OSDEP_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-
-#define NR_XMITFRAME 256
-
-int rtw_xmit23a_entry23a(struct sk_buff *pkt, struct net_device *pnetdev);
-
-void rtw_os_xmit_schedule23a(struct rtw_adapter *padapter);
-
-int rtw_os_xmit_resource_alloc23a(struct rtw_adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 alloc_sz);
-void rtw_os_xmit_resource_free23a(struct rtw_adapter *padapter,
- struct xmit_buf *pxmitbuf);
-
-void rtw_os_pkt_complete23a(struct rtw_adapter *padapter, struct sk_buff *pkt);
-void rtw_os_xmit_complete23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxframe);
-int netdev_open23a(struct net_device *pnetdev);
-
-#endif /* __XMIT_OSDEP_H_ */
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
deleted file mode 100644
index d0ba3778990e..000000000000
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ /dev/null
@@ -1,3348 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _IOCTL_CFG80211_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <xmit_osdep.h>
-
-#include "ioctl_cfg80211.h"
-
-#define RTW_MAX_MGMT_TX_CNT 8
-
-#define RTW_MAX_REMAIN_ON_CHANNEL_DURATION 65535 /* ms */
-#define RTW_MAX_NUM_PMKIDS 4
-
-static const u32 rtw_cipher_suites[] = {
- WLAN_CIPHER_SUITE_WEP40,
- WLAN_CIPHER_SUITE_WEP104,
- WLAN_CIPHER_SUITE_TKIP,
- WLAN_CIPHER_SUITE_CCMP,
-};
-
-#define RATETAB_ENT(_rate, _rateid, _flags) { \
- .bitrate = (_rate), \
- .hw_value = (_rateid), \
- .flags = (_flags), \
-}
-
-#define CHAN2G(_channel, _freq, _flags) { \
- .band = NL80211_BAND_2GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_channel), \
- .flags = (_flags), \
- .max_antenna_gain = 0, \
- .max_power = 30, \
-}
-
-#define CHAN5G(_channel, _flags) { \
- .band = NL80211_BAND_5GHZ, \
- .center_freq = 5000 + (5 * (_channel)), \
- .hw_value = (_channel), \
- .flags = (_flags), \
- .max_antenna_gain = 0, \
- .max_power = 30, \
-}
-
-static struct ieee80211_rate rtw_rates[] = {
- RATETAB_ENT(10, 0x1, 0),
- RATETAB_ENT(20, 0x2, 0),
- RATETAB_ENT(55, 0x4, 0),
- RATETAB_ENT(110, 0x8, 0),
- RATETAB_ENT(60, 0x10, 0),
- RATETAB_ENT(90, 0x20, 0),
- RATETAB_ENT(120, 0x40, 0),
- RATETAB_ENT(180, 0x80, 0),
- RATETAB_ENT(240, 0x100, 0),
- RATETAB_ENT(360, 0x200, 0),
- RATETAB_ENT(480, 0x400, 0),
- RATETAB_ENT(540, 0x800, 0),
-};
-
-#define rtw_a_rates (rtw_rates + 4)
-#define RTW_A_RATES_NUM 8
-#define rtw_g_rates (rtw_rates + 0)
-#define RTW_G_RATES_NUM 12
-
-#define RTW_2G_CHANNELS_NUM 14
-#define RTW_5G_CHANNELS_NUM 37
-
-static struct ieee80211_channel rtw_2ghz_channels[] = {
- CHAN2G(1, 2412, 0),
- CHAN2G(2, 2417, 0),
- CHAN2G(3, 2422, 0),
- CHAN2G(4, 2427, 0),
- CHAN2G(5, 2432, 0),
- CHAN2G(6, 2437, 0),
- CHAN2G(7, 2442, 0),
- CHAN2G(8, 2447, 0),
- CHAN2G(9, 2452, 0),
- CHAN2G(10, 2457, 0),
- CHAN2G(11, 2462, 0),
- CHAN2G(12, 2467, 0),
- CHAN2G(13, 2472, 0),
- CHAN2G(14, 2484, 0),
-};
-
-static struct ieee80211_channel rtw_5ghz_a_channels[] = {
- CHAN5G(34, 0), CHAN5G(36, 0),
- CHAN5G(38, 0), CHAN5G(40, 0),
- CHAN5G(42, 0), CHAN5G(44, 0),
- CHAN5G(46, 0), CHAN5G(48, 0),
- CHAN5G(52, 0), CHAN5G(56, 0),
- CHAN5G(60, 0), CHAN5G(64, 0),
- CHAN5G(100, 0), CHAN5G(104, 0),
- CHAN5G(108, 0), CHAN5G(112, 0),
- CHAN5G(116, 0), CHAN5G(120, 0),
- CHAN5G(124, 0), CHAN5G(128, 0),
- CHAN5G(132, 0), CHAN5G(136, 0),
- CHAN5G(140, 0), CHAN5G(149, 0),
- CHAN5G(153, 0), CHAN5G(157, 0),
- CHAN5G(161, 0), CHAN5G(165, 0),
- CHAN5G(184, 0), CHAN5G(188, 0),
- CHAN5G(192, 0), CHAN5G(196, 0),
- CHAN5G(200, 0), CHAN5G(204, 0),
- CHAN5G(208, 0), CHAN5G(212, 0),
- CHAN5G(216, 0),
-};
-
-static void rtw_2g_channels_init(struct ieee80211_channel *channels)
-{
- memcpy((void *)channels, (void *)rtw_2ghz_channels,
- sizeof(struct ieee80211_channel) * RTW_2G_CHANNELS_NUM);
-}
-
-static void rtw_5g_channels_init(struct ieee80211_channel *channels)
-{
- memcpy((void *)channels, (void *)rtw_5ghz_a_channels,
- sizeof(struct ieee80211_channel) * RTW_5G_CHANNELS_NUM);
-}
-
-static void rtw_2g_rates_init(struct ieee80211_rate *rates)
-{
- memcpy(rates, rtw_g_rates,
- sizeof(struct ieee80211_rate) * RTW_G_RATES_NUM);
-}
-
-static void rtw_5g_rates_init(struct ieee80211_rate *rates)
-{
- memcpy(rates, rtw_a_rates,
- sizeof(struct ieee80211_rate) * RTW_A_RATES_NUM);
-}
-
-static struct ieee80211_supported_band *
-rtw_spt_band_alloc(enum nl80211_band band)
-{
- struct ieee80211_supported_band *spt_band = NULL;
- int n_channels, n_bitrates;
-
- if (band == NL80211_BAND_2GHZ) {
- n_channels = RTW_2G_CHANNELS_NUM;
- n_bitrates = RTW_G_RATES_NUM;
- } else if (band == NL80211_BAND_5GHZ) {
- n_channels = RTW_5G_CHANNELS_NUM;
- n_bitrates = RTW_A_RATES_NUM;
- } else {
- goto exit;
- }
- spt_band = kzalloc(sizeof(struct ieee80211_supported_band) +
- sizeof(struct ieee80211_channel) * n_channels +
- sizeof(struct ieee80211_rate) * n_bitrates,
- GFP_KERNEL);
- if (!spt_band)
- goto exit;
-
- spt_band->channels =
- (struct ieee80211_channel *)(((u8 *) spt_band) +
- sizeof(struct
- ieee80211_supported_band));
- spt_band->bitrates =
- (struct ieee80211_rate *)(((u8 *) spt_band->channels) +
- sizeof(struct ieee80211_channel) *
- n_channels);
- spt_band->band = band;
- spt_band->n_channels = n_channels;
- spt_band->n_bitrates = n_bitrates;
-
- if (band == NL80211_BAND_2GHZ) {
- rtw_2g_channels_init(spt_band->channels);
- rtw_2g_rates_init(spt_band->bitrates);
- } else if (band == NL80211_BAND_5GHZ) {
- rtw_5g_channels_init(spt_band->channels);
- rtw_5g_rates_init(spt_band->bitrates);
- }
-
- /* spt_band.ht_cap */
-
-exit:
- return spt_band;
-}
-
-static const struct ieee80211_txrx_stypes
-rtw_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
- [NL80211_IFTYPE_ADHOC] = {
- .tx = 0xffff,
- .rx = BIT(IEEE80211_STYPE_ACTION >> 4)
- },
- [NL80211_IFTYPE_STATION] = {
- .tx = 0xffff,
- .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
- BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
- },
- [NL80211_IFTYPE_AP] = {
- .tx = 0xffff,
- .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
- BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
- BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
- BIT(IEEE80211_STYPE_DISASSOC >> 4) |
- BIT(IEEE80211_STYPE_AUTH >> 4) |
- BIT(IEEE80211_STYPE_DEAUTH >> 4) |
- BIT(IEEE80211_STYPE_ACTION >> 4)
- },
- [NL80211_IFTYPE_AP_VLAN] = {
- /* copy AP */
- .tx = 0xffff,
- .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
- BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
- BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
- BIT(IEEE80211_STYPE_DISASSOC >> 4) |
- BIT(IEEE80211_STYPE_AUTH >> 4) |
- BIT(IEEE80211_STYPE_DEAUTH >> 4) |
- BIT(IEEE80211_STYPE_ACTION >> 4)
- },
- [NL80211_IFTYPE_P2P_CLIENT] = {
- .tx = 0xffff,
- .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
- BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
- },
- [NL80211_IFTYPE_P2P_GO] = {
- .tx = 0xffff,
- .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
- BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
- BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
- BIT(IEEE80211_STYPE_DISASSOC >> 4) |
- BIT(IEEE80211_STYPE_AUTH >> 4) |
- BIT(IEEE80211_STYPE_DEAUTH >> 4) |
- BIT(IEEE80211_STYPE_ACTION >> 4)
- },
-};
-
-static int rtw_cfg80211_inform_bss(struct rtw_adapter *padapter,
- struct wlan_network *pnetwork)
-{
- int ret = 0;
- struct ieee80211_channel *notify_channel;
- struct cfg80211_bss *bss;
- u16 channel;
- u32 freq;
- u8 *notify_ie;
- size_t notify_ielen;
- s32 notify_signal;
- struct wireless_dev *wdev = padapter->rtw_wdev;
- struct wiphy *wiphy = wdev->wiphy;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- channel = pnetwork->network.DSConfig;
- if (channel <= RTW_CH_MAX_2G_CHANNEL)
- freq = ieee80211_channel_to_frequency(channel,
- NL80211_BAND_2GHZ);
- else
- freq = ieee80211_channel_to_frequency(channel,
- NL80211_BAND_5GHZ);
-
- notify_channel = ieee80211_get_channel(wiphy, freq);
-
- notify_ie = pnetwork->network.IEs;
- notify_ielen = pnetwork->network.IELength;
-
- /* We've set wiphy's signal_type as CFG80211_SIGNAL_TYPE_MBM:
- * signal strength in mBm (100*dBm)
- */
- if (check_fwstate(pmlmepriv, _FW_LINKED) &&
- is_same_network23a(&pmlmepriv->cur_network.network,
- &pnetwork->network)) {
- notify_signal = 100 * translate_percentage_to_dbm(padapter->recvpriv.signal_strength); /* dbm */
- } else {
- notify_signal = 100 * translate_percentage_to_dbm(
- pnetwork->network.SignalStrength); /* dbm */
- }
-
- bss = cfg80211_inform_bss(wiphy, notify_channel,
- CFG80211_BSS_FTYPE_UNKNOWN,
- pnetwork->network.MacAddress,
- pnetwork->network.tsf,
- pnetwork->network.capability,
- pnetwork->network.beacon_interval,
- notify_ie, notify_ielen,
- notify_signal, GFP_ATOMIC);
-
- if (unlikely(!bss)) {
- DBG_8723A("rtw_cfg80211_inform_bss error\n");
- return -EINVAL;
- }
-
- cfg80211_put_bss(wiphy, bss);
-
- return ret;
-}
-
-void rtw_cfg80211_indicate_connect(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- struct wireless_dev *pwdev = padapter->rtw_wdev;
-
- DBG_8723A("%s(padapter =%p)\n", __func__, padapter);
-
- if (pwdev->iftype != NL80211_IFTYPE_STATION &&
- pwdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
- return;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- return;
-
- if (padapter->mlmepriv.to_roaming > 0) {
- struct wiphy *wiphy = pwdev->wiphy;
- struct ieee80211_channel *notify_channel;
- u32 freq;
- u16 channel = cur_network->network.DSConfig;
-
- if (channel <= RTW_CH_MAX_2G_CHANNEL)
- freq =
- ieee80211_channel_to_frequency(channel,
- NL80211_BAND_2GHZ);
- else
- freq =
- ieee80211_channel_to_frequency(channel,
- NL80211_BAND_5GHZ);
-
- notify_channel = ieee80211_get_channel(wiphy, freq);
-
- DBG_8723A("%s call cfg80211_roamed\n", __func__);
- cfg80211_roamed(padapter->pnetdev, notify_channel,
- cur_network->network.MacAddress,
- pmlmepriv->assoc_req +
- sizeof(struct ieee80211_hdr_3addr) + 2,
- pmlmepriv->assoc_req_len -
- sizeof(struct ieee80211_hdr_3addr) - 2,
- pmlmepriv->assoc_rsp +
- sizeof(struct ieee80211_hdr_3addr) + 6,
- pmlmepriv->assoc_rsp_len -
- sizeof(struct ieee80211_hdr_3addr) - 6,
- GFP_ATOMIC);
- } else {
- cfg80211_connect_result(padapter->pnetdev,
- cur_network->network.MacAddress,
- pmlmepriv->assoc_req +
- sizeof(struct ieee80211_hdr_3addr) + 2,
- pmlmepriv->assoc_req_len -
- sizeof(struct ieee80211_hdr_3addr) - 2,
- pmlmepriv->assoc_rsp +
- sizeof(struct ieee80211_hdr_3addr) + 6,
- pmlmepriv->assoc_rsp_len -
- sizeof(struct ieee80211_hdr_3addr) - 6,
- WLAN_STATUS_SUCCESS, GFP_ATOMIC);
- }
-}
-
-void rtw_cfg80211_indicate_disconnect(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wireless_dev *pwdev = padapter->rtw_wdev;
-
- DBG_8723A("%s(padapter =%p)\n", __func__, padapter);
-
- if (pwdev->iftype != NL80211_IFTYPE_STATION &&
- pwdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
- return;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- return;
-
- if (!padapter->mlmepriv.not_indic_disco) {
- if (check_fwstate(&padapter->mlmepriv, WIFI_UNDER_LINKING)) {
- cfg80211_connect_result(padapter->pnetdev, NULL, NULL,
- 0, NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_ATOMIC);
- } else {
- cfg80211_disconnected(padapter->pnetdev, 0, NULL,
- 0, false, GFP_ATOMIC);
- }
- }
-}
-
-#ifdef CONFIG_8723AU_AP_MODE
-static int set_pairwise_key(struct rtw_adapter *padapter, struct sta_info *psta)
-{
- struct cmd_obj *ph2c;
- struct set_stakey_parm *psetstakey_para;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- int res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
- res = _FAIL;
- goto exit;
- }
-
- psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL);
- if (psetstakey_para == NULL) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
-
- psetstakey_para->algorithm = psta->dot118021XPrivacy;
-
- ether_addr_copy(psetstakey_para->addr, psta->hwaddr);
-
- memcpy(psetstakey_para->key, &psta->dot118021x_UncstKey, 16);
-
- res = rtw_enqueue_cmd23a(pcmdpriv, ph2c);
-
-exit:
- return res;
-}
-
-static int set_group_key(struct rtw_adapter *padapter, struct key_params *parms,
- u32 alg, u8 keyid)
-{
- struct cmd_obj *pcmd;
- struct setkey_parm *psetkeyparm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- int res = _SUCCESS;
-
- DBG_8723A("%s\n", __func__);
-
- if (keyid >= 4) {
- res = _FAIL;
- goto exit;
- }
-
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (!pcmd) {
- res = _FAIL;
- goto exit;
- }
- psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
- if (!psetkeyparm) {
- kfree(pcmd);
- res = _FAIL;
- goto exit;
- }
-
- psetkeyparm->keyid = keyid;
- if (is_wep_enc(alg))
- padapter->mlmepriv.key_mask |= BIT(psetkeyparm->keyid);
-
- psetkeyparm->algorithm = alg;
-
- psetkeyparm->set_tx = 1;
-
- memcpy(&psetkeyparm->key, parms->key, parms->key_len);
-
- pcmd->cmdcode = _SetKey_CMD_;
- pcmd->parmbuf = (u8 *) psetkeyparm;
- pcmd->cmdsz = sizeof(struct setkey_parm);
- pcmd->rsp = NULL;
- pcmd->rspsz = 0;
-
- res = rtw_enqueue_cmd23a(pcmdpriv, pcmd);
-
-exit:
- return res;
-}
-
-static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index,
- int set_tx, const u8 *sta_addr,
- struct key_params *keyparms)
-{
- int key_len;
- struct sta_info *psta = NULL, *pbcmc_sta = NULL;
- struct rtw_adapter *padapter = netdev_priv(dev);
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- DBG_8723A("%s\n", __func__);
-
- if (!is_broadcast_ether_addr(sta_addr)) {
- psta = rtw_get_stainfo23a(pstapriv, sta_addr);
- if (!psta) {
- /* ret = -EINVAL; */
- DBG_8723A("rtw_set_encryption(), sta has already "
- "been removed or never been added\n");
- goto exit;
- }
- }
-
- key_len = keyparms->key_len;
-
- if (!psta && (keyparms->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- keyparms->cipher == WLAN_CIPHER_SUITE_WEP104)) {
- DBG_8723A("r871x_set_encryption, crypt.alg = WEP\n");
-
- DBG_8723A("r871x_set_encryption, wep_key_idx =%d, len =%d\n",
- key_index, key_len);
-
- if (psecuritypriv->bWepDefaultKeyIdxSet == 0) {
- /* wep default key has not been set, so use
- this key index as default key. */
-
- psecuritypriv->ndisencryptstatus =
- Ndis802_11Encryption1Enabled;
- psecuritypriv->dot11PrivacyAlgrthm = keyparms->cipher;
- psecuritypriv->dot118021XGrpPrivacy = keyparms->cipher;
-
- psecuritypriv->dot11PrivacyKeyIndex = key_index;
- }
-
- memcpy(&psecuritypriv->wep_key[key_index].key,
- keyparms->key, key_len);
-
- psecuritypriv->wep_key[key_index].keylen = key_len;
-
- set_group_key(padapter, keyparms, keyparms->cipher, key_index);
-
- goto exit;
- }
-
- if (!psta) { /* group key */
- if (set_tx == 0) { /* group key */
- if (keyparms->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- keyparms->cipher == WLAN_CIPHER_SUITE_WEP104) {
- DBG_8723A("%s, set group_key, WEP\n", __func__);
-
- memcpy(psecuritypriv->
- dot118021XGrpKey[key_index].skey,
- keyparms->key, key_len);
-
- psecuritypriv->dot118021XGrpPrivacy =
- keyparms->cipher;
- } else if (keyparms->cipher == WLAN_CIPHER_SUITE_TKIP) {
- DBG_8723A("%s, set group_key, TKIP\n",
- __func__);
-
- psecuritypriv->dot118021XGrpPrivacy =
- WLAN_CIPHER_SUITE_TKIP;
-
- memcpy(psecuritypriv->
- dot118021XGrpKey[key_index].skey,
- keyparms->key,
- (min(16, key_len)));
-
- /* set mic key */
- memcpy(psecuritypriv->
- dot118021XGrptxmickey[key_index].skey,
- &keyparms->key[16], 8);
- memcpy(psecuritypriv->
- dot118021XGrprxmickey[key_index].skey,
- &keyparms->key[24], 8);
-
- psecuritypriv->busetkipkey = 1;
-
- } else if (keyparms->cipher == WLAN_CIPHER_SUITE_CCMP) {
- DBG_8723A("%s, set group_key, CCMP\n",
- __func__);
-
- psecuritypriv->dot118021XGrpPrivacy =
- WLAN_CIPHER_SUITE_CCMP;
-
- memcpy(psecuritypriv->
- dot118021XGrpKey[key_index].skey,
- keyparms->key,
- (min(16, key_len)));
- } else {
- DBG_8723A("%s, set group_key, none\n",
- __func__);
-
- psecuritypriv->dot118021XGrpPrivacy = 0;
- }
-
- psecuritypriv->dot118021XGrpKeyid = key_index;
-
- psecuritypriv->binstallGrpkey = 1;
-
- psecuritypriv->dot11PrivacyAlgrthm =
- psecuritypriv->dot118021XGrpPrivacy;
-
- set_group_key(padapter, keyparms,
- psecuritypriv->dot118021XGrpPrivacy,
- key_index);
-
- pbcmc_sta = rtw_get_bcmc_stainfo23a(padapter);
- if (pbcmc_sta) {
- pbcmc_sta->ieee8021x_blocked = false;
- /* rx will use bmc_sta's dot118021XPrivacy */
- pbcmc_sta->dot118021XPrivacy =
- psecuritypriv->dot118021XGrpPrivacy;
-
- }
-
- }
-
- goto exit;
- }
-
- if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) {
- /* psk/802_1x */
- if (set_tx == 1) {
- /* pairwise key */
- memcpy(psta->dot118021x_UncstKey.skey,
- keyparms->key, (min(16, key_len)));
-
- if (keyparms->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- keyparms->cipher == WLAN_CIPHER_SUITE_WEP104) {
- DBG_8723A("%s, set pairwise key, WEP\n",
- __func__);
-
- psecuritypriv->dot118021XGrpPrivacy =
- keyparms->cipher;
- } else if (keyparms->cipher == WLAN_CIPHER_SUITE_TKIP) {
- DBG_8723A("%s, set pairwise key, TKIP\n",
- __func__);
-
- psta->dot118021XPrivacy =
- WLAN_CIPHER_SUITE_TKIP;
-
- /* set mic key */
- memcpy(psta->dot11tkiptxmickey.skey,
- &keyparms->key[16], 8);
- memcpy(psta->dot11tkiprxmickey.skey,
- &keyparms->key[24], 8);
-
- psecuritypriv->busetkipkey = 1;
-
- } else if (keyparms->cipher == WLAN_CIPHER_SUITE_CCMP) {
- DBG_8723A("%s, set pairwise key, CCMP\n",
- __func__);
-
- psta->dot118021XPrivacy =
- WLAN_CIPHER_SUITE_CCMP;
- } else {
- DBG_8723A("%s, set pairwise key, none\n",
- __func__);
-
- psta->dot118021XPrivacy = 0;
- }
-
- set_pairwise_key(padapter, psta);
-
- psta->ieee8021x_blocked = false;
-
- psta->bpairwise_key_installed = true;
- } else { /* group key??? */
- if (keyparms->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- keyparms->cipher == WLAN_CIPHER_SUITE_WEP104) {
- memcpy(psecuritypriv->
- dot118021XGrpKey[key_index].skey,
- keyparms->key, key_len);
-
- psecuritypriv->dot118021XGrpPrivacy =
- keyparms->cipher;
- } else if (keyparms->cipher == WLAN_CIPHER_SUITE_TKIP) {
- psecuritypriv->dot118021XGrpPrivacy =
- WLAN_CIPHER_SUITE_TKIP;
-
- memcpy(psecuritypriv->
- dot118021XGrpKey[key_index].skey,
- keyparms->key,
- (min(16, key_len)));
-
- /* set mic key */
- memcpy(psecuritypriv->
- dot118021XGrptxmickey[key_index].skey,
- &keyparms->key[16], 8);
- memcpy(psecuritypriv->
- dot118021XGrprxmickey[key_index].skey,
- &keyparms->key[24], 8);
-
- psecuritypriv->busetkipkey = 1;
- } else if (keyparms->cipher == WLAN_CIPHER_SUITE_CCMP) {
- psecuritypriv->dot118021XGrpPrivacy =
- WLAN_CIPHER_SUITE_CCMP;
-
- memcpy(psecuritypriv->
- dot118021XGrpKey[key_index].skey,
- keyparms->key,
- (min(16, key_len)));
- } else {
- psecuritypriv->dot118021XGrpPrivacy = 0;
- }
-
- psecuritypriv->dot118021XGrpKeyid = key_index;
-
- psecuritypriv->binstallGrpkey = 1;
-
- psecuritypriv->dot11PrivacyAlgrthm =
- psecuritypriv->dot118021XGrpPrivacy;
-
- set_group_key(padapter, keyparms,
- psecuritypriv->dot118021XGrpPrivacy,
- key_index);
-
- pbcmc_sta = rtw_get_bcmc_stainfo23a(padapter);
- if (pbcmc_sta) {
- /* rx will use bmc_sta's
- dot118021XPrivacy */
- pbcmc_sta->ieee8021x_blocked = false;
- pbcmc_sta->dot118021XPrivacy =
- psecuritypriv->dot118021XGrpPrivacy;
- }
- }
- }
-
-exit:
-
- return 0;
-}
-#endif
-
-static int rtw_cfg80211_set_encryption(struct net_device *dev, u8 key_index,
- int set_tx, const u8 *sta_addr,
- struct key_params *keyparms)
-{
- int ret = 0;
- int key_len;
- struct rtw_adapter *padapter = netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- DBG_8723A("%s\n", __func__);
-
- key_len = keyparms->key_len;
-
- if (keyparms->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- keyparms->cipher == WLAN_CIPHER_SUITE_WEP104) {
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_,
- "wpa_set_encryption, crypt.alg = WEP\n");
- DBG_8723A("wpa_set_encryption, crypt.alg = WEP\n");
-
- if (psecuritypriv->bWepDefaultKeyIdxSet == 0) {
- /* wep default key has not been set, so use this
- key index as default key. */
-
- psecuritypriv->ndisencryptstatus =
- Ndis802_11Encryption1Enabled;
- psecuritypriv->dot11PrivacyAlgrthm = keyparms->cipher;
- psecuritypriv->dot118021XGrpPrivacy = keyparms->cipher;
-
- psecuritypriv->dot11PrivacyKeyIndex = key_index;
- }
-
- memcpy(&psecuritypriv->wep_key[key_index].key,
- keyparms->key, key_len);
-
- psecuritypriv->wep_key[key_index].keylen = key_len;
-
- rtw_set_key23a(padapter, psecuritypriv, key_index, 0);
-
- goto exit;
- }
-
- if (padapter->securitypriv.dot11AuthAlgrthm ==
- dot11AuthAlgrthm_8021X) { /* 802_1x */
- struct sta_info *psta, *pbcmc_sta;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (check_fwstate(pmlmepriv,
- WIFI_STATION_STATE | WIFI_MP_STATE)) {
- /* sta mode */
- psta = rtw_get_stainfo23a(pstapriv, get_bssid(pmlmepriv));
- if (psta == NULL) {
- DBG_8723A("%s, : Obtain Sta_info fail\n",
- __func__);
- } else {
- /* Jeff: don't disable ieee8021x_blocked
- while clearing key */
- if (keyparms->cipher != IW_AUTH_CIPHER_NONE &&
- keyparms->cipher != 0)
- psta->ieee8021x_blocked = false;
-
- if ((padapter->securitypriv.ndisencryptstatus ==
- Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus ==
- Ndis802_11Encryption3Enabled)) {
- psta->dot118021XPrivacy =
- padapter->securitypriv.
- dot11PrivacyAlgrthm;
- }
-
- if (set_tx == 1) {
- /* pairwise key */
- DBG_8723A("%s, : set_tx == 1\n",
- __func__);
-
- memcpy(psta->dot118021x_UncstKey.skey,
- keyparms->key,
- (min(16, key_len)));
-
- if (keyparms->cipher ==
- WLAN_CIPHER_SUITE_TKIP) {
- memcpy(psta->dot11tkiptxmickey.
- skey,
- &keyparms->key[16], 8);
- memcpy(psta->dot11tkiprxmickey.
- skey,
- &keyparms->key[24], 8);
-
- padapter->securitypriv.
- busetkipkey = 0;
- }
- DBG_8723A(" ~~~~set sta key:unicastkey\n");
-
- rtw_setstakey_cmd23a(padapter,
- (unsigned char *)psta,
- true);
- } else { /* group key */
- memcpy(padapter->securitypriv.
- dot118021XGrpKey[key_index].skey,
- keyparms->key,
- (min(16, key_len)));
- memcpy(padapter->securitypriv.
- dot118021XGrptxmickey[key_index].
- skey, &keyparms->key[16], 8);
- memcpy(padapter->securitypriv.
- dot118021XGrprxmickey[key_index].
- skey, &keyparms->key[24], 8);
- padapter->securitypriv.binstallGrpkey =
- 1;
- DBG_8723A
- (" ~~~~set sta key:groupkey\n");
-
- padapter->securitypriv.
- dot118021XGrpKeyid = key_index;
-
- rtw_set_key23a(padapter,
- &padapter->securitypriv,
- key_index, 1);
- }
- }
-
- pbcmc_sta = rtw_get_bcmc_stainfo23a(padapter);
- if (pbcmc_sta) {
- /* Jeff: don't disable ieee8021x_blocked
- while clearing key */
- if (keyparms->cipher != IW_AUTH_CIPHER_NONE &&
- keyparms->cipher != 0)
- pbcmc_sta->ieee8021x_blocked = false;
-
- if ((padapter->securitypriv.ndisencryptstatus ==
- Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus ==
- Ndis802_11Encryption3Enabled)) {
- pbcmc_sta->dot118021XPrivacy =
- padapter->securitypriv.
- dot11PrivacyAlgrthm;
- }
- }
- }
- }
-
-exit:
-
- DBG_8723A("%s, ret =%d\n", __func__, ret);
-
-
-
- return ret;
-}
-
-static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise,
- const u8 *mac_addr, struct key_params *params)
-{
- int set_tx, ret = 0;
- struct wireless_dev *rtw_wdev = wiphy_to_wdev(wiphy);
- struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 sta_addr[ETH_ALEN];
-
- DBG_8723A("%s(%s): adding key for %pM\n", __func__, ndev->name,
- mac_addr);
- DBG_8723A("cipher = 0x%x\n", params->cipher);
- DBG_8723A("key_len = 0x%x\n", params->key_len);
- DBG_8723A("seq_len = 0x%x\n", params->seq_len);
- DBG_8723A("key_index =%d\n", key_index);
- DBG_8723A("pairwise =%d\n", pairwise);
-
- switch (params->cipher) {
- case IW_AUTH_CIPHER_NONE:
- case WLAN_CIPHER_SUITE_WEP40:
- if (params->key_len != WLAN_KEY_LEN_WEP40) {
- ret = -EINVAL;
- goto exit;
- }
- case WLAN_CIPHER_SUITE_WEP104:
- if (params->key_len != WLAN_KEY_LEN_WEP104) {
- ret = -EINVAL;
- goto exit;
- }
- case WLAN_CIPHER_SUITE_TKIP:
- case WLAN_CIPHER_SUITE_CCMP:
- break;
- default:
- ret = -ENOTSUPP;
- goto exit;
- }
-
- if (key_index >= WEP_KEYS || params->key_len < 0) {
- ret = -EINVAL;
- goto exit;
- }
-
- eth_broadcast_addr(sta_addr);
-
- if (!mac_addr || is_broadcast_ether_addr(mac_addr))
- set_tx = 0; /* for wpa/wpa2 group key */
- else
- set_tx = 1; /* for wpa/wpa2 pairwise key */
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- ret = rtw_cfg80211_set_encryption(ndev, key_index, set_tx,
- sta_addr, params);
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
-#ifdef CONFIG_8723AU_AP_MODE
- if (mac_addr)
- ether_addr_copy(sta_addr, mac_addr);
-
- ret = rtw_cfg80211_ap_set_encryption(ndev, key_index, set_tx,
- sta_addr, params);
-#endif
- } else {
- DBG_8723A("error! fw_state = 0x%x, iftype =%d\n",
- pmlmepriv->fw_state, rtw_wdev->iftype);
-
- }
-
-exit:
- return ret;
-}
-
-static int
-cfg80211_rtw_get_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- void *cookie,
- void (*callback) (void *cookie, struct key_params *))
-{
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
- return 0;
-}
-
-static int cfg80211_rtw_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise,
- const u8 *mac_addr)
-{
- struct rtw_adapter *padapter = netdev_priv(ndev);
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- DBG_8723A("%s(%s): key_index =%d\n", __func__, ndev->name, key_index);
-
- if (key_index == psecuritypriv->dot11PrivacyKeyIndex) {
- /* clear the flag of wep default key set. */
- psecuritypriv->bWepDefaultKeyIdxSet = 0;
- }
-
- return 0;
-}
-
-static int cfg80211_rtw_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev, u8 key_index,
- bool unicast, bool multicast)
-{
- struct rtw_adapter *padapter = netdev_priv(ndev);
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- DBG_8723A("%s(%s): key_index =%d, unicast =%d, multicast =%d.\n",
- __func__, ndev->name, key_index, unicast, multicast);
-
- if (key_index < NUM_WEP_KEYS &&
- (psecuritypriv->dot11PrivacyAlgrthm == WLAN_CIPHER_SUITE_WEP40 ||
- psecuritypriv->dot11PrivacyAlgrthm == WLAN_CIPHER_SUITE_WEP104)) {
- /* set wep default key */
- psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
-
- psecuritypriv->dot11PrivacyKeyIndex = key_index;
-
- psecuritypriv->dot11PrivacyAlgrthm = WLAN_CIPHER_SUITE_WEP40;
- psecuritypriv->dot118021XGrpPrivacy = WLAN_CIPHER_SUITE_WEP40;
- if (psecuritypriv->wep_key[key_index].keylen == 13) {
- psecuritypriv->dot11PrivacyAlgrthm =
- WLAN_CIPHER_SUITE_WEP104;
- psecuritypriv->dot118021XGrpPrivacy =
- WLAN_CIPHER_SUITE_WEP104;
- }
-
- /* set the flag to represent that wep default key
- has been set */
- psecuritypriv->bWepDefaultKeyIdxSet = 1;
- }
-
- return 0;
-}
-
-static u16 rtw_get_cur_max_rate(struct rtw_adapter *adapter)
-{
- int i = 0;
- const u8 *p;
- u16 rate = 0, max_rate = 0;
- struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct registry_priv *pregistrypriv = &adapter->registrypriv;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
- struct ieee80211_ht_cap *pht_capie;
- u8 rf_type = 0;
- u8 bw_40MHz = 0, short_GI_20 = 0, short_GI_40 = 0;
- u16 mcs_rate = 0;
-
- p = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY,
- pcur_bss->IEs, pcur_bss->IELength);
- if (p && p[1] > 0) {
- pht_capie = (struct ieee80211_ht_cap *)(p + 2);
-
- memcpy(&mcs_rate, &pht_capie->mcs, 2);
-
- /* bw_40MHz = (pht_capie->cap_info&
- IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1:0; */
- /* cur_bwmod is updated by beacon, pmlmeinfo is
- updated by association response */
- bw_40MHz = (pmlmeext->cur_bwmode &&
- (pmlmeinfo->HT_info.ht_param &
- IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) ? 1:0;
-
- /* short_GI = (pht_capie->cap_info & (IEEE80211_HT_CAP
- _SGI_20|IEEE80211_HT_CAP_SGI_40)) ? 1 : 0; */
- short_GI_20 = (pmlmeinfo->ht_cap.cap_info &
- cpu_to_le16(IEEE80211_HT_CAP_SGI_20)) ? 1:0;
- short_GI_40 = (pmlmeinfo->ht_cap.cap_info &
- cpu_to_le16(IEEE80211_HT_CAP_SGI_40)) ? 1:0;
-
- rf_type = rtl8723a_get_rf_type(adapter);
- max_rate = rtw_mcs_rate23a(rf_type, bw_40MHz &
- pregistrypriv->cbw40_enable,
- short_GI_20, short_GI_40,
- &pmlmeinfo->ht_cap.mcs);
- } else {
- while (pcur_bss->SupportedRates[i] != 0 &&
- pcur_bss->SupportedRates[i] != 0xFF) {
- rate = pcur_bss->SupportedRates[i] & 0x7F;
- if (rate > max_rate)
- max_rate = rate;
- i++;
- }
-
- max_rate = max_rate * 10 / 2;
- }
-
- return max_rate;
-}
-
-static int cfg80211_rtw_get_station(struct wiphy *wiphy,
- struct net_device *ndev,
- const u8 *mac, struct station_info *sinfo)
-{
- int ret = 0;
- struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- sinfo->filled = 0;
-
- if (!mac) {
- DBG_8723A("%s(%s): mac ==%p\n", __func__, ndev->name, mac);
- ret = -ENOENT;
- goto exit;
- }
-
- psta = rtw_get_stainfo23a(pstapriv, mac);
- if (psta == NULL) {
- DBG_8723A("%s, sta_info is null\n", __func__);
- ret = -ENOENT;
- goto exit;
- }
- DBG_8723A("%s(%s): mac=%pM\n", __func__, ndev->name, mac);
-
- /* for infra./P2PClient mode */
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
- check_fwstate(pmlmepriv, _FW_LINKED)) {
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
-
- if (!ether_addr_equal(mac, cur_network->network.MacAddress)) {
- DBG_8723A("%s, mismatch bssid=%pM\n",
- __func__, cur_network->network.MacAddress);
- ret = -ENOENT;
- goto exit;
- }
-
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
- sinfo->signal = translate_percentage_to_dbm(padapter->recvpriv.
- signal_strength);
-
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
- sinfo->txrate.legacy = rtw_get_cur_max_rate(padapter);
-
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
- sinfo->rx_packets = sta_rx_data_pkts(psta);
-
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
- sinfo->tx_packets = psta->sta_stats.tx_pkts;
- }
-
- /* for Ad-Hoc/AP mode */
- if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
- check_fwstate(pmlmepriv, WIFI_AP_STATE)) &&
- check_fwstate(pmlmepriv, _FW_LINKED)
- ) {
- /* TODO: should acquire station info... */
- }
-
-exit:
- return ret;
-}
-
-static int cfg80211_infrastructure_mode(struct rtw_adapter *padapter,
- enum nl80211_iftype ifmode)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- enum nl80211_iftype old_mode;
-
- old_mode = cur_network->network.ifmode;
-
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_notice_,
- "+%s: old =%d new =%d fw_state = 0x%08x\n", __func__,
- old_mode, ifmode, get_fwstate(pmlmepriv));
-
- if (old_mode != ifmode) {
- spin_lock_bh(&pmlmepriv->lock);
-
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- "change mode!\n");
-
- if (old_mode == NL80211_IFTYPE_AP ||
- old_mode == NL80211_IFTYPE_P2P_GO) {
- /* change to other mode from Ndis802_11APMode */
- cur_network->join_res = -1;
-
-#ifdef CONFIG_8723AU_AP_MODE
- stop_ap_mode23a(padapter);
-#endif
- }
-
- if (check_fwstate(pmlmepriv, _FW_LINKED) ||
- old_mode == NL80211_IFTYPE_ADHOC)
- rtw_disassoc_cmd23a(padapter, 0, true);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))
- rtw_free_assoc_resources23a(padapter, 1);
-
- if (old_mode == NL80211_IFTYPE_STATION ||
- old_mode == NL80211_IFTYPE_P2P_CLIENT ||
- old_mode == NL80211_IFTYPE_ADHOC) {
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- /* will clr Linked_state; before this function,
- we must have chked whether issue
- dis-assoc_cmd or not */
- rtw_indicate_disconnect23a(padapter);
- }
- }
-
- cur_network->network.ifmode = ifmode;
-
- _clr_fwstate_(pmlmepriv, ~WIFI_NULL_STATE);
-
- switch (ifmode) {
- case NL80211_IFTYPE_ADHOC:
- set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
- break;
-
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_STATION:
- set_fwstate(pmlmepriv, WIFI_STATION_STATE);
- break;
-
- case NL80211_IFTYPE_P2P_GO:
- case NL80211_IFTYPE_AP:
- set_fwstate(pmlmepriv, WIFI_AP_STATE);
-#ifdef CONFIG_8723AU_AP_MODE
- start_ap_mode23a(padapter);
- /* rtw_indicate_connect23a(padapter); */
-#endif
- break;
-
- default:
- break;
- }
-
- /* SecClearAllKeys(adapter); */
-
- spin_unlock_bh(&pmlmepriv->lock);
- }
-
- return _SUCCESS;
-}
-
-static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
- struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params)
-{
- enum nl80211_iftype old_type;
- struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wireless_dev *rtw_wdev = wiphy_to_wdev(wiphy);
- int ret = 0;
-
- DBG_8723A("%s(%s): call netdev_open23a\n", __func__, ndev->name);
-
- old_type = rtw_wdev->iftype;
- DBG_8723A("%s(%s): old_iftype =%d, new_iftype =%d\n",
- __func__, ndev->name, old_type, type);
-
- if (old_type != type) {
- pmlmeext->action_public_rxseq = 0xffff;
- pmlmeext->action_public_dialog_token = 0xff;
- }
-
- switch (type) {
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_P2P_GO:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_UNSPECIFIED:
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- rtw_wdev->iftype = type;
-
- if (cfg80211_infrastructure_mode(padapter, type) != _SUCCESS) {
- rtw_wdev->iftype = old_type;
- ret = -EPERM;
- goto exit;
- }
-
- rtw_setopmode_cmd23a(padapter, type);
-
-exit:
- return ret;
-}
-
-void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
- bool aborted)
-{
- spin_lock_bh(&pwdev_priv->scan_req_lock);
- if (pwdev_priv->scan_request != NULL) {
- DBG_8723A("%s with scan req\n", __func__);
-
- if (pwdev_priv->scan_request->wiphy !=
- pwdev_priv->rtw_wdev->wiphy) {
- DBG_8723A("error wiphy compare\n");
- } else {
- struct cfg80211_scan_info info = {
- .aborted = aborted,
- };
-
- cfg80211_scan_done(pwdev_priv->scan_request, &info);
- }
-
- pwdev_priv->scan_request = NULL;
- } else {
- DBG_8723A("%s without scan req\n", __func__);
- }
- spin_unlock_bh(&pwdev_priv->scan_req_lock);
-}
-
-void rtw_cfg80211_surveydone_event_callback(struct rtw_adapter *padapter)
-{
- struct list_head *phead;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct rtw_queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork, *ptmp;
-
- spin_lock_bh(&pmlmepriv->scanned_queue.lock);
- phead = get_list_head(queue);
- list_for_each_entry_safe(pnetwork, ptmp, phead, list) {
- /* report network only if the current channel set
- contains the channel to which this network belongs */
- if (rtw_ch_set_search_ch23a
- (padapter->mlmeextpriv.channel_set,
- pnetwork->network.DSConfig) >= 0)
- rtw_cfg80211_inform_bss(padapter, pnetwork);
- }
- spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
- /* call this after other things have been done */
- rtw_cfg80211_indicate_scan_done(wdev_to_priv(padapter->rtw_wdev),
- false);
-}
-
-static int rtw_cfg80211_set_probe_req_wpsp2pie(struct rtw_adapter *padapter,
- char *buf, int len)
-{
- int ret = 0;
- const u8 *wps_ie;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- DBG_8723A("%s, ielen =%d\n", __func__, len);
-
- if (len > 0) {
- wps_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPS,
- buf, len);
- if (wps_ie) {
- DBG_8723A("probe_req_wps_ielen =%d\n", wps_ie[1]);
-
- if (pmlmepriv->wps_probe_req_ie) {
- pmlmepriv->wps_probe_req_ie_len = 0;
- kfree(pmlmepriv->wps_probe_req_ie);
- pmlmepriv->wps_probe_req_ie = NULL;
- }
-
- pmlmepriv->wps_probe_req_ie = kmemdup(wps_ie, wps_ie[1],
- GFP_KERNEL);
- if (pmlmepriv->wps_probe_req_ie == NULL) {
- DBG_8723A("%s()-%d: kmalloc() ERROR!\n",
- __func__, __LINE__);
- return -EINVAL;
- }
- pmlmepriv->wps_probe_req_ie_len = wps_ie[1];
- }
- }
-
- return ret;
-}
-
-static int cfg80211_rtw_scan(struct wiphy *wiphy,
- struct cfg80211_scan_request *request)
-{
- int i;
- u8 _status = false;
- int ret = 0;
- struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct cfg80211_ssid ssid[RTW_SSID_SCAN_AMOUNT];
- struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
- struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
- struct cfg80211_ssid *ssids = request->ssids;
- bool need_indicate_scan_done = false;
-
- DBG_8723A("%s(%s)\n", __func__, padapter->pnetdev->name);
-
- spin_lock_bh(&pwdev_priv->scan_req_lock);
- pwdev_priv->scan_request = request;
- spin_unlock_bh(&pwdev_priv->scan_req_lock);
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- DBG_8723A("%s under WIFI_AP_STATE\n", __func__);
- /* need_indicate_scan_done = true; */
- /* goto check_need_indicate_scan_done; */
- }
-
- if (rtw_pwr_wakeup(padapter) == _FAIL) {
- need_indicate_scan_done = true;
- goto check_need_indicate_scan_done;
- }
-
- if (request->ie && request->ie_len > 0) {
- rtw_cfg80211_set_probe_req_wpsp2pie(padapter,
- (u8 *) request->ie,
- request->ie_len);
- }
-
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true) {
- DBG_8723A("%s, bBusyTraffic == true\n", __func__);
- need_indicate_scan_done = true;
- goto check_need_indicate_scan_done;
- }
- if (rtw_is_scan_deny(padapter)) {
- DBG_8723A("%s(%s): scan deny\n", __func__,
- padapter->pnetdev->name);
- need_indicate_scan_done = true;
- goto check_need_indicate_scan_done;
- }
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING) ==
- true) {
- DBG_8723A("%s, fwstate = 0x%x\n", __func__, pmlmepriv->fw_state);
- need_indicate_scan_done = true;
- goto check_need_indicate_scan_done;
- }
-
- memset(ssid, 0, sizeof(struct cfg80211_ssid) * RTW_SSID_SCAN_AMOUNT);
- /* parsing request ssids, n_ssids */
- for (i = 0; i < request->n_ssids && i < RTW_SSID_SCAN_AMOUNT; i++) {
- DBG_8723A("ssid =%s, len =%d\n", ssids[i].ssid,
- ssids[i].ssid_len);
- memcpy(ssid[i].ssid, ssids[i].ssid, ssids[i].ssid_len);
- ssid[i].ssid_len = ssids[i].ssid_len;
- }
-
- /* parsing channels, n_channels */
- memset(ch, 0,
- sizeof(struct rtw_ieee80211_channel) * RTW_CHANNEL_SCAN_AMOUNT);
-
- if (request->n_channels == 1) {
- for (i = 0; i < request->n_channels &&
- i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
- DBG_8723A("%s:(%s):" CHAN_FMT "\n",
- __func__, padapter->pnetdev->name,
- CHAN_ARG(request->channels[i]));
- ch[i].hw_value = request->channels[i]->hw_value;
- ch[i].flags = request->channels[i]->flags;
- }
- }
-
- spin_lock_bh(&pmlmepriv->lock);
- if (request->n_channels == 1) {
- memcpy(&ch[1], &ch[0], sizeof(struct rtw_ieee80211_channel));
- memcpy(&ch[2], &ch[0], sizeof(struct rtw_ieee80211_channel));
- _status = rtw_sitesurvey_cmd23a(padapter, ssid,
- RTW_SSID_SCAN_AMOUNT, ch, 3);
- } else {
- _status = rtw_sitesurvey_cmd23a(padapter, ssid,
- RTW_SSID_SCAN_AMOUNT, NULL, 0);
- }
- spin_unlock_bh(&pmlmepriv->lock);
-
- if (_status == false)
- ret = -1;
-
-check_need_indicate_scan_done:
- if (need_indicate_scan_done)
- rtw_cfg80211_surveydone_event_callback(padapter);
- return ret;
-}
-
-static int cfg80211_rtw_set_wiphy_params(struct wiphy *wiphy, u32 changed)
-{
- DBG_8723A("%s\n", __func__);
- return 0;
-}
-
-static int cfg80211_rtw_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_ibss_params *params)
-{
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
- return 0;
-}
-
-static int cfg80211_rtw_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
-{
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
- return 0;
-}
-
-static int rtw_cfg80211_set_wpa_version(struct security_priv *psecuritypriv,
- u32 wpa_version)
-{
- DBG_8723A("%s, wpa_version =%d\n", __func__, wpa_version);
-
- if (!wpa_version) {
- psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
- return 0;
- }
-
- if (wpa_version & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
- psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPAPSK;
-
-/*
- if (wpa_version & NL80211_WPA_VERSION_2)
- {
- psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPA2PSK;
- }
-*/
-
- return 0;
-}
-
-static int rtw_cfg80211_set_auth_type(struct security_priv *psecuritypriv,
- enum nl80211_auth_type sme_auth_type)
-{
- DBG_8723A("%s, nl80211_auth_type =%d\n", __func__, sme_auth_type);
-
- switch (sme_auth_type) {
- case NL80211_AUTHTYPE_AUTOMATIC:
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
-
- break;
- case NL80211_AUTHTYPE_OPEN_SYSTEM:
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
-
- if (psecuritypriv->ndisauthtype > Ndis802_11AuthModeWPA)
- psecuritypriv->dot11AuthAlgrthm =
- dot11AuthAlgrthm_8021X;
- break;
- case NL80211_AUTHTYPE_SHARED_KEY:
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
-
- psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- default:
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
- /* return -ENOTSUPP; */
- }
-
- return 0;
-}
-
-static int rtw_cfg80211_set_cipher(struct security_priv *psecuritypriv,
- u32 cipher, bool ucast)
-{
- u32 ndisencryptstatus = Ndis802_11EncryptionDisabled;
-
- u32 *profile_cipher = ucast ? &psecuritypriv->dot11PrivacyAlgrthm :
- &psecuritypriv->dot118021XGrpPrivacy;
-
- DBG_8723A("%s, ucast =%d, cipher = 0x%x\n", __func__, ucast, cipher);
-
- if (!cipher) {
- *profile_cipher = 0;
- psecuritypriv->ndisencryptstatus = ndisencryptstatus;
- return 0;
- }
-
- switch (cipher) {
- case IW_AUTH_CIPHER_NONE:
- *profile_cipher = 0;
- ndisencryptstatus = Ndis802_11EncryptionDisabled;
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- *profile_cipher = WLAN_CIPHER_SUITE_WEP40;
- ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- *profile_cipher = WLAN_CIPHER_SUITE_WEP104;
- ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- *profile_cipher = WLAN_CIPHER_SUITE_TKIP;
- ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- *profile_cipher = WLAN_CIPHER_SUITE_CCMP;
- ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- default:
- DBG_8723A("Unsupported cipher: 0x%x\n", cipher);
- return -ENOTSUPP;
- }
-
- if (ucast)
- psecuritypriv->ndisencryptstatus = ndisencryptstatus;
-
- return 0;
-}
-
-static int rtw_cfg80211_set_key_mgt(struct security_priv *psecuritypriv,
- u32 key_mgt)
-{
- DBG_8723A("%s, key_mgt = 0x%x\n", __func__, key_mgt);
-
- if (key_mgt == WLAN_AKM_SUITE_8021X)
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- else if (key_mgt == WLAN_AKM_SUITE_PSK)
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
- else
- DBG_8723A("Invalid key mgt: 0x%x\n", key_mgt);
-
- return 0;
-}
-
-static int rtw_cfg80211_set_wpa_ie(struct rtw_adapter *padapter, const u8 *pie,
- size_t ielen)
-{
- const u8 *wps_ie;
- int group_cipher = 0, pairwise_cipher = 0;
- int ret = 0;
- const u8 *pwpa, *pwpa2;
- int i;
-
- if (!pie || !ielen) {
- /* Treat this as normal case, but need to clear
- WIFI_UNDER_WPS */
- _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
- goto exit;
- }
- if (ielen > MAX_WPA_IE_LEN + MAX_WPS_IE_LEN + MAX_P2P_IE_LEN) {
- ret = -EINVAL;
- goto exit;
- }
-
- /* dump */
- DBG_8723A("set wpa_ie(length:%zu):\n", ielen);
- for (i = 0; i < ielen; i = i + 8)
- DBG_8723A("0x%.2x 0x%.2x 0x%.2x 0x%.2x "
- "0x%.2x 0x%.2x 0x%.2x 0x%.2x\n",
- pie[i], pie[i + 1], pie[i + 2], pie[i + 3],
- pie[i + 4], pie[i + 5], pie[i + 6], pie[i + 7]);
- if (ielen < RSN_HEADER_LEN) {
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_,
- "Ie len too short %d\n", (int)ielen);
- ret = -1;
- goto exit;
- }
-
- pwpa = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPA,
- pie, ielen);
- if (pwpa && pwpa[1] > 0) {
- if (rtw_parse_wpa_ie23a(pwpa, pwpa[1] + 2, &group_cipher,
- &pairwise_cipher, NULL) == _SUCCESS) {
- padapter->securitypriv.dot11AuthAlgrthm =
- dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype =
- Ndis802_11AuthModeWPAPSK;
- memcpy(padapter->securitypriv.supplicant_ie, pwpa,
- pwpa[1] + 2);
-
- DBG_8723A("got wpa_ie, wpa_ielen:%u\n", pwpa[1]);
- }
- }
-
- pwpa2 = cfg80211_find_ie(WLAN_EID_RSN, pie, ielen);
- if (pwpa2 && pwpa2[1] > 0) {
- if (rtw_parse_wpa2_ie23a (pwpa2, pwpa2[1] + 2, &group_cipher,
- &pairwise_cipher, NULL) == _SUCCESS) {
- padapter->securitypriv.dot11AuthAlgrthm =
- dot11AuthAlgrthm_8021X;
- padapter->securitypriv.ndisauthtype =
- Ndis802_11AuthModeWPA2PSK;
- memcpy(padapter->securitypriv.supplicant_ie, pwpa2,
- pwpa2[1] + 2);
-
- DBG_8723A("got wpa2_ie, wpa2_ielen:%u\n", pwpa2[1]);
- }
- }
-
- if (group_cipher == 0) {
- group_cipher = WPA_CIPHER_NONE;
- }
- if (pairwise_cipher == 0) {
- pairwise_cipher = WPA_CIPHER_NONE;
- }
-
- switch (group_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot118021XGrpPrivacy = 0;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot118021XGrpPrivacy = WLAN_CIPHER_SUITE_WEP40;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot118021XGrpPrivacy = WLAN_CIPHER_SUITE_TKIP;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot118021XGrpPrivacy = WLAN_CIPHER_SUITE_CCMP;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot118021XGrpPrivacy = WLAN_CIPHER_SUITE_WEP104;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption1Enabled;
- break;
- }
-
- switch (pairwise_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot11PrivacyAlgrthm = 0;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot11PrivacyAlgrthm = WLAN_CIPHER_SUITE_WEP40;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot11PrivacyAlgrthm = WLAN_CIPHER_SUITE_TKIP;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot11PrivacyAlgrthm = WLAN_CIPHER_SUITE_CCMP;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot11PrivacyAlgrthm = WLAN_CIPHER_SUITE_WEP104;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption1Enabled;
- break;
- }
-
- wps_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPS,
- pie, ielen);
- if (wps_ie && wps_ie[1] > 0) {
- DBG_8723A("got wps_ie, wps_ielen:%u\n", wps_ie[1]);
- padapter->securitypriv.wps_ie_len = wps_ie[1];
- memcpy(padapter->securitypriv.wps_ie, wps_ie,
- padapter->securitypriv.wps_ie_len);
- set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
- } else {
- _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
- }
-
- /* TKIP and AES disallow multicast packets until installing group key */
- if (padapter->securitypriv.dot11PrivacyAlgrthm ==
- WLAN_CIPHER_SUITE_TKIP ||
- padapter->securitypriv.dot11PrivacyAlgrthm ==
- WLAN_CIPHER_SUITE_CCMP)
- /* WPS open need to enable multicast */
- /* check_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS) == true)*/
- rtl8723a_off_rcr_am(padapter);
-
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- "rtw_set_wpa_ie: pairwise_cipher = 0x%08x padapter->securitypriv.ndisencryptstatus =%d padapter->securitypriv.ndisauthtype =%d\n",
- pairwise_cipher,
- padapter->securitypriv.ndisencryptstatus,
- padapter->securitypriv.ndisauthtype);
-
-exit:
- if (ret)
- _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
- return ret;
-}
-
-static int rtw_cfg80211_add_wep(struct rtw_adapter *padapter,
- struct rtw_wep_key *wep, u8 keyid)
-{
- int res;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- if (keyid >= NUM_WEP_KEYS) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
- "%s:keyid>4 =>fail\n", __func__);
- res = _FAIL;
- goto exit;
- }
-
- switch (wep->keylen) {
- case WLAN_KEY_LEN_WEP40:
- psecuritypriv->dot11PrivacyAlgrthm = WLAN_CIPHER_SUITE_WEP40;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- "%s:wep->KeyLength = 5\n", __func__);
- break;
- case WLAN_KEY_LEN_WEP104:
- psecuritypriv->dot11PrivacyAlgrthm = WLAN_CIPHER_SUITE_WEP104;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- "%s:wep->KeyLength = 13\n", __func__);
- break;
- default:
- psecuritypriv->dot11PrivacyAlgrthm = 0;
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- "%s:wep->KeyLength!= 5 or 13\n", __func__);
- res = _FAIL;
- goto exit;
- }
-
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- "%s:before memcpy, wep->KeyLength = 0x%x keyid =%x\n",
- __func__, wep->keylen, keyid);
-
- memcpy(&psecuritypriv->wep_key[keyid], wep, sizeof(struct rtw_wep_key));
-
- psecuritypriv->dot11PrivacyKeyIndex = keyid;
-
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- "%s:security key material : %x %x %x %x %x %x %x %x %x %x %x %x %x\n",
- __func__,
- psecuritypriv->wep_key[keyid].key[0],
- psecuritypriv->wep_key[keyid].key[1],
- psecuritypriv->wep_key[keyid].key[2],
- psecuritypriv->wep_key[keyid].key[3],
- psecuritypriv->wep_key[keyid].key[4],
- psecuritypriv->wep_key[keyid].key[5],
- psecuritypriv->wep_key[keyid].key[6],
- psecuritypriv->wep_key[keyid].key[7],
- psecuritypriv->wep_key[keyid].key[8],
- psecuritypriv->wep_key[keyid].key[9],
- psecuritypriv->wep_key[keyid].key[10],
- psecuritypriv->wep_key[keyid].key[11],
- psecuritypriv->wep_key[keyid].key[12]);
-
- res = rtw_set_key23a(padapter, psecuritypriv, keyid, 1);
-
-exit:
-
- return res;
-}
-
-static int rtw_set_ssid(struct rtw_adapter *padapter,
- struct wlan_network *newnetwork)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_network *pnetwork = &pmlmepriv->cur_network;
- int status = _SUCCESS;
- u32 cur_time = 0;
-
- DBG_8723A_LEVEL(_drv_always_, "set ssid [%s] fw_state = 0x%08x\n",
- newnetwork->network.Ssid.ssid, get_fwstate(pmlmepriv));
-
- if (padapter->hw_init_completed == false) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
- "set_ssid: hw_init_completed == false =>exit!!!\n");
- status = _FAIL;
- goto exit;
- }
-
- spin_lock_bh(&pmlmepriv->lock);
-
- DBG_8723A("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
- goto handle_tkip_countermeasure;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- "set_ssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n");
-
- if (pmlmepriv->assoc_ssid.ssid_len ==
- newnetwork->network.Ssid.ssid_len &&
- !memcmp(&pmlmepriv->assoc_ssid.ssid,
- newnetwork->network.Ssid.ssid,
- newnetwork->network.Ssid.ssid_len)) {
- if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- RT_TRACE(_module_rtl871x_ioctl_set_c_,
- _drv_err_,
- "New SSID is same SSID, fw_state = 0x%08x\n",
- get_fwstate(pmlmepriv));
-
- if (rtw_is_same_ibss23a(padapter, pnetwork)) {
- /*
- * it means driver is in
- * WIFI_ADHOC_MASTER_STATE, we needn't
- * create bss again.
- */
- goto release_mlme_lock;
- }
-
- /*
- * if in WIFI_ADHOC_MASTER_STATE |
- * WIFI_ADHOC_STATE, create bss or
- * rejoin again
- */
- rtw_disassoc_cmd23a(padapter, 0, true);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- rtw_indicate_disconnect23a(padapter);
-
- rtw_free_assoc_resources23a(padapter, 1);
-
- if (check_fwstate(pmlmepriv,
- WIFI_ADHOC_MASTER_STATE)) {
- _clr_fwstate_(pmlmepriv,
- WIFI_ADHOC_MASTER_STATE);
- set_fwstate(pmlmepriv,
- WIFI_ADHOC_STATE);
- }
- } else {
- rtw_lps_ctrl_wk_cmd23a(padapter,
- LPS_CTRL_JOINBSS, 1);
- }
- } else {
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- "Set SSID not the same ssid\n");
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- "set_ssid =[%s] len = 0x%x\n",
- newnetwork->network.Ssid.ssid,
- newnetwork->network.Ssid.ssid_len);
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- "assoc_ssid =[%s] len = 0x%x\n",
- pmlmepriv->assoc_ssid.ssid,
- pmlmepriv->assoc_ssid.ssid_len);
-
- rtw_disassoc_cmd23a(padapter, 0, true);
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- rtw_indicate_disconnect23a(padapter);
-
- rtw_free_assoc_resources23a(padapter, 1);
-
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
- set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
- }
- }
- }
-
-handle_tkip_countermeasure:
-
- if (padapter->securitypriv.btkip_countermeasure == true) {
- cur_time = jiffies;
-
- if ((cur_time -
- padapter->securitypriv.btkip_countermeasure_time) >
- 60 * HZ) {
- padapter->securitypriv.btkip_countermeasure = false;
- padapter->securitypriv.btkip_countermeasure_time = 0;
- } else {
- status = _FAIL;
- goto release_mlme_lock;
- }
- }
-
- memcpy(&pmlmepriv->assoc_ssid, &newnetwork->network.Ssid,
- sizeof(struct cfg80211_ssid));
-
- pmlmepriv->assoc_by_bssid = false;
-
- pmlmepriv->to_join = true;
-
- if (!check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
- pmlmepriv->cur_network.join_res = -2;
-
- status = rtw_do_join_network(padapter, newnetwork);
- if (status == _SUCCESS) {
- pmlmepriv->to_join = false;
- } else {
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- /* switch to ADHOC_MASTER */
- status = rtw_do_join_adhoc(padapter);
- if (status != _SUCCESS)
- goto release_mlme_lock;
- } else {
- /* can't associate ; reset under-linking */
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- status = _FAIL;
- pmlmepriv->to_join = false;
- }
- }
- }
-release_mlme_lock:
- spin_unlock_bh(&pmlmepriv->lock);
-
-exit:
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
- "-%s: status =%d\n", __func__, status);
-
- return status;
-}
-
-static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_connect_params *sme)
-{
- int ret = 0;
- struct list_head *phead, *plist, *ptmp;
- struct wlan_network *pnetwork = NULL;
- /* u8 matched_by_bssid = false; */
- /* u8 matched_by_ssid = false; */
- u8 matched = false;
- struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct rtw_queue *queue = &pmlmepriv->scanned_queue;
-
- DBG_8723A("=>" "%s(%s)\n", __func__, ndev->name);
- DBG_8723A("privacy =%d, key =%p, key_len =%d, key_idx =%d\n",
- sme->privacy, sme->key, sme->key_len, sme->key_idx);
-
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
- ret = -EPERM;
- goto exit;
- }
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- ret = -EPERM;
- goto exit;
- }
-
- if (!sme->ssid || !sme->ssid_len ||
- sme->ssid_len > IEEE80211_MAX_SSID_LEN) {
- ret = -EINVAL;
- goto exit;
- }
-
- DBG_8723A("ssid =%s, len =%zu\n", sme->ssid, sme->ssid_len);
-
- if (sme->bssid)
- DBG_8723A("bssid=%pM\n", sme->bssid);
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
- ret = -EBUSY;
- DBG_8723A("%s, fw_state = 0x%x, goto exit\n", __func__,
- pmlmepriv->fw_state);
- goto exit;
- }
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
- rtw_scan_abort23a(padapter);
- }
-
- spin_lock_bh(&queue->lock);
-
- phead = get_list_head(queue);
-
- list_for_each_safe(plist, ptmp, phead) {
- pnetwork = container_of(plist, struct wlan_network, list);
-
- if (sme->bssid) {
- if (!ether_addr_equal(pnetwork->network.MacAddress,
- sme->bssid))
- continue;
- }
-
- if (sme->ssid && sme->ssid_len) {
- if (pnetwork->network.Ssid.ssid_len != sme->ssid_len ||
- memcmp(pnetwork->network.Ssid.ssid, sme->ssid,
- sme->ssid_len))
- continue;
- }
-
- if (sme->bssid) {
- if (ether_addr_equal(pnetwork->network.MacAddress,
- sme->bssid)) {
- DBG_8723A("matched by bssid\n");
-
- matched = true;
- break;
- }
- } else if (sme->ssid && sme->ssid_len) {
- if (!memcmp(pnetwork->network.Ssid.ssid,
- sme->ssid, sme->ssid_len) &&
- pnetwork->network.Ssid.ssid_len == sme->ssid_len) {
- DBG_8723A("matched by ssid\n");
-
- matched = true;
- break;
- }
- }
- }
-
- spin_unlock_bh(&queue->lock);
-
- if (!matched || !pnetwork) {
- ret = -ENOENT;
- DBG_8723A("connect, matched == false, goto exit\n");
- goto exit;
- }
-
- if (cfg80211_infrastructure_mode(
- padapter, pnetwork->network.ifmode) != _SUCCESS) {
- ret = -EPERM;
- goto exit;
- }
-
- psecuritypriv->ndisencryptstatus = Ndis802_11EncryptionDisabled;
- psecuritypriv->dot11PrivacyAlgrthm = 0;
- psecuritypriv->dot118021XGrpPrivacy = 0;
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
- psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
-
- ret = rtw_cfg80211_set_wpa_version(psecuritypriv,
- sme->crypto.wpa_versions);
- if (ret < 0)
- goto exit;
-
- ret = rtw_cfg80211_set_auth_type(psecuritypriv, sme->auth_type);
-
- if (ret < 0)
- goto exit;
-
- DBG_8723A("%s, ie_len =%zu\n", __func__, sme->ie_len);
-
- ret = rtw_cfg80211_set_wpa_ie(padapter, sme->ie, sme->ie_len);
- if (ret < 0)
- goto exit;
-
- if (sme->crypto.n_ciphers_pairwise) {
- ret = rtw_cfg80211_set_cipher(psecuritypriv,
- sme->crypto.ciphers_pairwise[0],
- true);
- if (ret < 0)
- goto exit;
- }
-
- /* For WEP Shared auth */
- if ((psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_Shared ||
- psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_Auto) &&
- sme->key) {
- struct rtw_wep_key wep_key;
- u8 wep_key_idx, wep_key_len;
- DBG_8723A("%s(): Shared/Auto WEP\n", __func__);
-
- wep_key_idx = sme->key_idx;
- wep_key_len = sme->key_len;
-
- if (wep_key_idx > WEP_KEYS || !wep_key_len ||
- wep_key_len > WLAN_KEY_LEN_WEP104) {
- ret = -EINVAL;
- goto exit;
- }
-
- wep_key_len = wep_key_len <= 5 ? 5 : 13;
-
- memset(&wep_key, 0, sizeof(struct rtw_wep_key));
-
- wep_key.keylen = wep_key_len;
-
- if (wep_key_len == 13) {
- padapter->securitypriv.dot11PrivacyAlgrthm =
- WLAN_CIPHER_SUITE_WEP104;
- padapter->securitypriv.dot118021XGrpPrivacy =
- WLAN_CIPHER_SUITE_WEP104;
- } else {
- padapter->securitypriv.dot11PrivacyAlgrthm =
- WLAN_CIPHER_SUITE_WEP40;
- padapter->securitypriv.dot118021XGrpPrivacy =
- WLAN_CIPHER_SUITE_WEP40;
- }
-
- memcpy(wep_key.key, (void *)sme->key, wep_key.keylen);
-
- if (rtw_cfg80211_add_wep(padapter, &wep_key, wep_key_idx) !=
- _SUCCESS)
- ret = -EOPNOTSUPP;
-
- if (ret < 0)
- goto exit;
- }
-
- ret = rtw_cfg80211_set_cipher(psecuritypriv,
- sme->crypto.cipher_group, false);
- if (ret < 0)
- goto exit;
-
- if (sme->crypto.n_akm_suites) {
- ret = rtw_cfg80211_set_key_mgt(psecuritypriv,
- sme->crypto.akm_suites[0]);
- if (ret < 0)
- goto exit;
- }
-
- if (psecuritypriv->ndisauthtype > 3)
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
-
- if (rtw_set_auth23a(padapter, psecuritypriv) != _SUCCESS) {
- ret = -EBUSY;
- goto exit;
- }
-
- /* rtw_set_802_11_encryption_mode(padapter,
- padapter->securitypriv.ndisencryptstatus); */
-
- if (rtw_set_ssid(padapter, pnetwork) != _SUCCESS) {
- ret = -EBUSY;
- goto exit;
- }
-
- DBG_8723A("set ssid:dot11AuthAlgrthm =%d, dot11PrivacyAlgrthm =%d, "
- "dot118021XGrpPrivacy =%d\n", psecuritypriv->dot11AuthAlgrthm,
- psecuritypriv->dot11PrivacyAlgrthm,
- psecuritypriv->dot118021XGrpPrivacy);
-
-exit:
-
- DBG_8723A("<=%s, ret %d\n", __func__, ret);
-
- return ret;
-}
-
-static int cfg80211_rtw_disconnect(struct wiphy *wiphy, struct net_device *ndev,
- u16 reason_code)
-{
- struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
-
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
-
- rtw_set_roaming(padapter, 0);
-
- if (check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
- rtw_scan_abort23a(padapter);
- LeaveAllPowerSaveMode23a(padapter);
- rtw_disassoc_cmd23a(padapter, 500, false);
-
- DBG_8723A("%s...call rtw_indicate_disconnect23a\n", __func__);
-
- padapter->mlmepriv.not_indic_disco = true;
- rtw_indicate_disconnect23a(padapter);
- padapter->mlmepriv.not_indic_disco = false;
-
- rtw_free_assoc_resources23a(padapter, 1);
- }
-
- return 0;
-}
-
-static int cfg80211_rtw_set_txpower(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type, int mbm)
-{
- DBG_8723A("%s\n", __func__);
- return 0;
-}
-
-static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
- struct wireless_dev *wdev, int *dbm)
-{
- DBG_8723A("%s\n", __func__);
- *dbm = 12;
- return 0;
-}
-
-inline bool rtw_cfg80211_pwr_mgmt(struct rtw_adapter *adapter)
-{
- struct rtw_wdev_priv *rtw_wdev_priv = wdev_to_priv(adapter->rtw_wdev);
- return rtw_wdev_priv->power_mgmt;
-}
-
-static int cfg80211_rtw_set_power_mgmt(struct wiphy *wiphy,
- struct net_device *ndev,
- bool enabled, int timeout)
-{
- struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
- struct rtw_wdev_priv *rtw_wdev_priv = wdev_to_priv(padapter->rtw_wdev);
-
- DBG_8723A("%s(%s): enabled:%u, timeout:%d\n",
- __func__, ndev->name, enabled, timeout);
-
- rtw_wdev_priv->power_mgmt = enabled;
-
- if (!enabled)
- LPS_Leave23a(padapter);
-
- return 0;
-}
-
-static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy,
- struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
-{
- u8 index, blInserted = false;
- struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- DBG_8723A("%s(%s)\n", __func__, netdev->name);
-
- if (is_zero_ether_addr(pmksa->bssid))
- return -EINVAL;
-
- blInserted = false;
-
- /* overwrite PMKID */
- for (index = 0; index < NUM_PMKID_CACHE; index++) {
- if (ether_addr_equal(psecuritypriv->PMKIDList[index].Bssid,
- pmksa->bssid)) {
- /* BSSID is matched, the same AP => rewrite with
- new PMKID. */
- DBG_8723A("%s(%s): BSSID exists in the PMKList.\n",
- __func__, netdev->name);
-
- memcpy(psecuritypriv->PMKIDList[index].PMKID,
- pmksa->pmkid, WLAN_PMKID_LEN);
- psecuritypriv->PMKIDList[index].bUsed = true;
- psecuritypriv->PMKIDIndex = index + 1;
- blInserted = true;
- break;
- }
- }
-
- if (!blInserted) {
- /* Find a new entry */
- DBG_8723A("%s(%s): Use new entry index = %d for this PMKID\n",
- __func__, netdev->name, psecuritypriv->PMKIDIndex);
-
- ether_addr_copy(
- psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].
- Bssid, pmksa->bssid);
- memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].
- PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
-
- psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed =
- true;
- psecuritypriv->PMKIDIndex++;
- if (psecuritypriv->PMKIDIndex == 16) {
- psecuritypriv->PMKIDIndex = 0;
- }
- }
-
- return 0;
-}
-
-static int cfg80211_rtw_del_pmksa(struct wiphy *wiphy,
- struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
-{
- u8 index, bMatched = false;
- struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- DBG_8723A("%s(%s)\n", __func__, netdev->name);
-
- for (index = 0; index < NUM_PMKID_CACHE; index++) {
- if (ether_addr_equal(psecuritypriv->PMKIDList[index].Bssid,
- pmksa->bssid)) {
- /* BSSID is matched, the same AP => Remove this PMKID
- information and reset it. */
- eth_zero_addr(psecuritypriv->PMKIDList[index].Bssid);
- memset(psecuritypriv->PMKIDList[index].PMKID, 0x00,
- WLAN_PMKID_LEN);
- psecuritypriv->PMKIDList[index].bUsed = false;
- bMatched = true;
- break;
- }
- }
-
- if (false == bMatched) {
- DBG_8723A("%s(%s): do not have matched BSSID\n", __func__,
- netdev->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int cfg80211_rtw_flush_pmksa(struct wiphy *wiphy,
- struct net_device *netdev)
-{
- struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- DBG_8723A("%s(%s)\n", __func__, netdev->name);
-
- memset(&psecuritypriv->PMKIDList[0], 0x00,
- sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- psecuritypriv->PMKIDIndex = 0;
-
- return 0;
-}
-
-#ifdef CONFIG_8723AU_AP_MODE
-void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter,
- u8 *pmgmt_frame, uint frame_len)
-{
- s32 freq;
- int channel;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct net_device *ndev = padapter->pnetdev;
-
- DBG_8723A("%s(padapter =%p,%s)\n", __func__, padapter, ndev->name);
-
-#if defined(RTW_USE_CFG80211_STA_EVENT)
- {
- struct station_info sinfo;
- u8 ie_offset;
-
- if (ieee80211_is_assoc_req(hdr->frame_control))
- ie_offset = offsetof(struct ieee80211_mgmt,
- u.assoc_req.variable);
- else /* WIFI_REASSOCREQ */
- ie_offset = offsetof(struct ieee80211_mgmt,
- u.reassoc_req.variable);
-
- sinfo.filled = 0;
- sinfo.assoc_req_ies = pmgmt_frame + ie_offset;
- sinfo.assoc_req_ies_len = frame_len - ie_offset;
- cfg80211_new_sta(ndev, hdr->addr2, &sinfo, GFP_ATOMIC);
- }
-#else /* defined(RTW_USE_CFG80211_STA_EVENT) */
- channel = pmlmeext->cur_channel;
- if (channel <= RTW_CH_MAX_2G_CHANNEL)
- freq = ieee80211_channel_to_frequency(channel,
- NL80211_BAND_2GHZ);
- else
- freq = ieee80211_channel_to_frequency(channel,
- NL80211_BAND_5GHZ);
-
- cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pmgmt_frame, frame_len,
- 0);
-#endif /* defined(RTW_USE_CFG80211_STA_EVENT) */
-}
-
-void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter,
- unsigned char *da,
- unsigned short reason)
-{
- s32 freq;
- int channel;
- uint frame_len;
- struct ieee80211_mgmt mgmt;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct net_device *ndev = padapter->pnetdev;
-
- DBG_8723A("%s(padapter =%p,%s)\n", __func__, padapter, ndev->name);
-
- memset(&mgmt, 0, sizeof(struct ieee80211_mgmt));
-
-#if defined(RTW_USE_CFG80211_STA_EVENT)
- cfg80211_del_sta(ndev, da, GFP_ATOMIC);
-#else /* defined(RTW_USE_CFG80211_STA_EVENT) */
- channel = pmlmeext->cur_channel;
- if (channel <= RTW_CH_MAX_2G_CHANNEL)
- freq = ieee80211_channel_to_frequency(channel,
- NL80211_BAND_2GHZ);
- else
- freq = ieee80211_channel_to_frequency(channel,
- NL80211_BAND_5GHZ);
-
- mgmt.frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH);
-
- ether_addr_copy(mgmt.da, myid(&padapter->eeprompriv));
- ether_addr_copy(mgmt.sa, da);
- ether_addr_copy(mgmt.bssid, get_my_bssid23a(&pmlmeinfo->network));
-
- mgmt.seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(pmlmeext->mgnt_seq));
- pmlmeext->mgnt_seq++;
-
- mgmt.u.disassoc.reason_code = cpu_to_le16(reason);
-
- frame_len = sizeof(struct ieee80211_hdr_3addr) + 2;
-
- cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, (u8 *)&mgmt, frame_len,
- 0);
-#endif /* defined(RTW_USE_CFG80211_STA_EVENT) */
-}
-
-static int rtw_cfg80211_monitor_if_open(struct net_device *ndev)
-{
- DBG_8723A("%s\n", __func__);
-
- return 0;
-}
-
-static int rtw_cfg80211_monitor_if_close(struct net_device *ndev)
-{
- DBG_8723A("%s\n", __func__);
-
- return 0;
-}
-
-static int rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb,
- struct net_device *ndev)
-{
- int ret = 0;
- int rtap_len;
- int qos_len = 0;
- int dot11_hdr_len = 24;
- int snap_len = 6;
- unsigned char *pdata;
- unsigned char src_mac_addr[6];
- unsigned char dst_mac_addr[6];
- struct ieee80211_hdr *dot11_hdr;
- struct ieee80211_radiotap_header *rtap_hdr;
- struct rtw_adapter *padapter = netdev_priv(ndev);
-
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
-
- if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
- goto fail;
-
- rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
- if (unlikely(rtap_hdr->it_version))
- goto fail;
-
- rtap_len = ieee80211_get_radiotap_len(skb->data);
- if (unlikely(skb->len < rtap_len))
- goto fail;
-
- if (rtap_len != 14) {
- DBG_8723A("radiotap len (should be 14): %d\n", rtap_len);
- goto fail;
- }
-
- /* Skip the ratio tap header */
- skb_pull(skb, rtap_len);
-
- dot11_hdr = (struct ieee80211_hdr *)skb->data;
- /* Check if the QoS bit is set */
- if (ieee80211_is_data(dot11_hdr->frame_control)) {
- /* Check if this ia a Wireless Distribution System (WDS) frame
- * which has 4 MAC addresses
- */
- if (ieee80211_is_data_qos(dot11_hdr->frame_control))
- qos_len = IEEE80211_QOS_CTL_LEN;
- if (ieee80211_has_a4(dot11_hdr->frame_control))
- dot11_hdr_len += 6;
-
- memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr));
- memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
-
- /*
- * Skip the 802.11 header, QoS (if any) and SNAP,
- * but leave spaces for two MAC addresses
- */
- skb_pull(skb, dot11_hdr_len + qos_len + snap_len -
- ETH_ALEN * 2);
- pdata = (unsigned char *)skb->data;
- ether_addr_copy(pdata, dst_mac_addr);
- ether_addr_copy(pdata + ETH_ALEN, src_mac_addr);
-
- DBG_8723A("should be eapol packet\n");
-
- /* Use the real net device to transmit the packet */
- ret = rtw_xmit23a_entry23a(skb, padapter->pnetdev);
-
- return ret;
-
- } else if (ieee80211_is_action(dot11_hdr->frame_control)) {
- struct ieee80211_mgmt *mgmt;
- /* only for action frames */
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- /* u8 category, action, OUI_Subtype, dialogToken = 0; */
- /* unsigned char *frame_body; */
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- u32 len = skb->len;
- u8 category, action;
-
- mgmt = (struct ieee80211_mgmt *)dot11_hdr;
-
- DBG_8723A("RTW_Tx:da=%pM via %s(%s)\n",
- mgmt->da, __func__, ndev->name);
- category = mgmt->u.action.category;
- action = mgmt->u.action.u.wme_action.action_code;
- DBG_8723A("RTW_Tx:category(%u), action(%u)\n",
- category, action);
-
- /* starting alloc mgmt frame to dump it */
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (pmgntframe == NULL)
- goto fail;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
- pattrib->retry_ctrl = false;
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
-
- memcpy(pframe, skb->data, len);
- pattrib->pktlen = len;
-
- /* update seq number */
- pmlmeext->mgnt_seq = le16_to_cpu(dot11_hdr->seq_ctrl) >> 4;
- pattrib->seqnum = pmlmeext->mgnt_seq;
- pmlmeext->mgnt_seq++;
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe23a(padapter, pmgntframe);
- }
-
-fail:
-
- dev_kfree_skb(skb);
-
- return 0;
-}
-
-static int
-rtw_cfg80211_monitor_if_set_mac_address(struct net_device *ndev, void *addr)
-{
- DBG_8723A("%s\n", __func__);
-
- return 0;
-}
-
-static const struct net_device_ops rtw_cfg80211_monitor_if_ops = {
- .ndo_open = rtw_cfg80211_monitor_if_open,
- .ndo_stop = rtw_cfg80211_monitor_if_close,
- .ndo_start_xmit = rtw_cfg80211_monitor_if_xmit_entry,
- .ndo_set_mac_address = rtw_cfg80211_monitor_if_set_mac_address,
-};
-
-static int rtw_cfg80211_add_monitor_if(struct rtw_adapter *padapter, char *name,
- unsigned char name_assign_type,
- struct net_device **ndev)
-{
- int ret = 0;
- struct net_device *mon_ndev = NULL;
- struct wireless_dev *mon_wdev = NULL;
- struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
-
- if (!name) {
- DBG_8723A("%s(%s): without specific name\n",
- __func__, padapter->pnetdev->name);
- ret = -EINVAL;
- goto out;
- }
-
- if (pwdev_priv->pmon_ndev) {
- DBG_8723A("%s(%s): monitor interface exist: %s\n", __func__,
- padapter->pnetdev->name, pwdev_priv->pmon_ndev->name);
- ret = -EBUSY;
- goto out;
- }
-
- mon_ndev = alloc_etherdev(sizeof(struct rtw_adapter));
- if (!mon_ndev) {
- DBG_8723A("%s(%s): allocate ndev fail\n", __func__,
- padapter->pnetdev->name);
- ret = -ENOMEM;
- goto out;
- }
-
- mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP;
- strncpy(mon_ndev->name, name, IFNAMSIZ);
- mon_ndev->name[IFNAMSIZ - 1] = 0;
- mon_ndev->name_assign_type = name_assign_type;
- mon_ndev->destructor = rtw_ndev_destructor;
-
- mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops;
-
- /* wdev */
- mon_wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!mon_wdev) {
- ret = -ENOMEM;
- goto out;
- }
-
- mon_wdev->wiphy = padapter->rtw_wdev->wiphy;
- mon_wdev->netdev = mon_ndev;
- mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
- mon_ndev->ieee80211_ptr = mon_wdev;
-
- ret = register_netdevice(mon_ndev);
- if (ret) {
- goto out;
- }
-
- *ndev = pwdev_priv->pmon_ndev = mon_ndev;
- memcpy(pwdev_priv->ifname_mon, name, IFNAMSIZ + 1);
-
-out:
- if (ret) {
- kfree(mon_wdev);
- mon_wdev = NULL;
- }
-
- if (ret && mon_ndev) {
- free_netdev(mon_ndev);
- *ndev = mon_ndev = NULL;
- }
-
- return ret;
-}
-
-static struct wireless_dev *
-cfg80211_rtw_add_virtual_intf(struct wiphy *wiphy, const char *name,
- unsigned char name_assign_type,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params)
-{
- int ret = 0;
- struct net_device *ndev = NULL;
- struct rtw_adapter *padapter = wiphy_to_adapter(wiphy);
-
- DBG_8723A("%s(%s): wiphy:%s, name:%s, type:%d\n", __func__,
- padapter->pnetdev->name, wiphy_name(wiphy), name, type);
-
- switch (type) {
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MESH_POINT:
- ret = -ENODEV;
- break;
- case NL80211_IFTYPE_MONITOR:
- ret =
- rtw_cfg80211_add_monitor_if(padapter, (char *)name,
- name_assign_type, &ndev);
- break;
-
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_STATION:
- ret = -ENODEV;
- break;
-
- case NL80211_IFTYPE_P2P_GO:
- case NL80211_IFTYPE_AP:
- ret = -ENODEV;
- break;
- default:
- ret = -ENODEV;
- DBG_8723A("Unsupported interface type\n");
- break;
- }
-
- DBG_8723A("%s(%s): ndev:%p, ret:%d\n", __func__,
- padapter->pnetdev->name,
- ndev, ret);
-
- return ndev ? ndev->ieee80211_ptr : ERR_PTR(ret);
-}
-
-static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
- struct wireless_dev *wdev)
-{
- struct rtw_wdev_priv *pwdev_priv =
- (struct rtw_wdev_priv *)wiphy_priv(wiphy);
- struct net_device *ndev;
- ndev = wdev ? wdev->netdev : NULL;
-
- if (!ndev)
- goto exit;
-
- unregister_netdevice(ndev);
-
- if (ndev == pwdev_priv->pmon_ndev) {
- pwdev_priv->pmon_ndev = NULL;
- pwdev_priv->ifname_mon[0] = '\0';
- DBG_8723A("%s(%s): remove monitor interface\n",
- __func__, ndev->name);
- }
-
-exit:
- return 0;
-}
-
-static int rtw_add_beacon(struct rtw_adapter *adapter, const u8 *head,
- size_t head_len, const u8 *tail, size_t tail_len)
-{
- int ret = 0;
- u8 *pbuf;
- uint len, ielen, wps_ielen = 0;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct wlan_bssid_ex *bss = &pmlmepriv->cur_network.network;
- const struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)head;
- struct ieee80211_mgmt *tmpmgmt;
- /* struct sta_priv *pstapriv = &padapter->stapriv; */
-
- DBG_8723A("%s beacon_head_len =%zu, beacon_tail_len =%zu\n",
- __func__, head_len, tail_len);
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return -EINVAL;
-
- if (head_len < offsetof(struct ieee80211_mgmt, u.beacon.variable))
- return -EINVAL;
-
- pbuf = kzalloc(head_len + tail_len, GFP_KERNEL);
- if (!pbuf)
- return -ENOMEM;
- tmpmgmt = (struct ieee80211_mgmt *)pbuf;
-
- bss->beacon_interval = get_unaligned_le16(&mgmt->u.beacon.beacon_int);
- bss->capability = get_unaligned_le16(&mgmt->u.beacon.capab_info);
- bss->tsf = get_unaligned_le64(&mgmt->u.beacon.timestamp);
-
- /* 24 = beacon header len. */
- memcpy(pbuf, (void *)head, head_len);
- memcpy(pbuf + head_len, (void *)tail, tail_len);
-
- len = head_len + tail_len;
- ielen = len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
- /* check wps ie if inclued */
- if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPS,
- tmpmgmt->u.beacon.variable, ielen))
- DBG_8723A("add bcn, wps_ielen =%d\n", wps_ielen);
-
- /* pbss_network->IEs will not include p2p_ie, wfd ie */
- rtw_ies_remove_ie23a(tmpmgmt->u.beacon.variable, &ielen, 0,
- WLAN_EID_VENDOR_SPECIFIC, P2P_OUI23A, 4);
- rtw_ies_remove_ie23a(tmpmgmt->u.beacon.variable, &ielen, 0,
- WLAN_EID_VENDOR_SPECIFIC, WFD_OUI23A, 4);
-
- len = ielen + offsetof(struct ieee80211_mgmt, u.beacon.variable);
- if (rtw_check_beacon_data23a(adapter, tmpmgmt, len) == _SUCCESS) {
- ret = 0;
- } else {
- ret = -EINVAL;
- }
-
- kfree(pbuf);
-
- return ret;
-}
-
-static int cfg80211_rtw_start_ap(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_ap_settings *settings)
-{
- int ret = 0;
- struct rtw_adapter *adapter = wiphy_to_adapter(wiphy);
-
- DBG_8723A("%s(%s): hidden_ssid:%d, auth_type:%d\n",
- __func__, ndev->name, settings->hidden_ssid,
- settings->auth_type);
-
- ret = rtw_add_beacon(adapter, settings->beacon.head,
- settings->beacon.head_len, settings->beacon.tail,
- settings->beacon.tail_len);
-
- adapter->mlmeextpriv.mlmext_info.hidden_ssid_mode =
- settings->hidden_ssid;
-
- if (settings->ssid && settings->ssid_len) {
- struct wlan_bssid_ex *pbss_network =
- &adapter->mlmepriv.cur_network.network;
- struct wlan_bssid_ex *pbss_network_ext =
- &adapter->mlmeextpriv.mlmext_info.network;
-
- memcpy(pbss_network->Ssid.ssid, (void *)settings->ssid,
- settings->ssid_len);
- pbss_network->Ssid.ssid_len = settings->ssid_len;
- memcpy(pbss_network_ext->Ssid.ssid, (void *)settings->ssid,
- settings->ssid_len);
- pbss_network_ext->Ssid.ssid_len = settings->ssid_len;
- }
-
- return ret;
-}
-
-static int cfg80211_rtw_change_beacon(struct wiphy *wiphy,
- struct net_device *ndev,
- struct cfg80211_beacon_data *info)
-{
- int ret = 0;
- struct rtw_adapter *adapter = wiphy_to_adapter(wiphy);
-
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
-
- ret = rtw_add_beacon(adapter, info->head, info->head_len, info->tail,
- info->tail_len);
-
- return ret;
-}
-
-static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
-{
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
- return 0;
-}
-
-static int cfg80211_rtw_add_station(struct wiphy *wiphy,
- struct net_device *ndev, const u8 *mac,
- struct station_parameters *params)
-{
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
-
- return 0;
-}
-
-static int cfg80211_rtw_del_station(struct wiphy *wiphy,
- struct net_device *ndev,
- struct station_del_parameters *params)
-{
- const u8 *mac = params->mac;
- int ret = 0;
- struct list_head *phead;
- u8 updated = 0;
- struct sta_info *psta, *ptmp;
- struct rtw_adapter *padapter = netdev_priv(ndev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- DBG_8723A("+%s(%s)\n", __func__, ndev->name);
-
- if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true) {
- DBG_8723A("%s, fw_state != FW_LINKED|WIFI_AP_STATE\n",
- __func__);
- return -EINVAL;
- }
-
- if (!mac) {
- DBG_8723A("flush all sta, and cam_entry\n");
-
- flush_all_cam_entry23a(padapter); /* clear CAM */
-
- ret = rtw_sta_flush23a(padapter);
-
- return ret;
- }
-
- DBG_8723A("free sta macaddr=%pM\n", mac);
-
- if (is_broadcast_ether_addr(mac))
- return -EINVAL;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- /* check asoc_queue */
- list_for_each_entry_safe(psta, ptmp, phead, asoc_list) {
- if (ether_addr_equal(mac, psta->hwaddr)) {
- if (psta->dot8021xalg == 1 &&
- psta->bpairwise_key_installed == false) {
- DBG_8723A("%s, sta's dot8021xalg = 1 and "
- "key_installed = false\n", __func__);
- } else {
- DBG_8723A("free psta =%p, aid =%d\n", psta,
- psta->aid);
-
- list_del_init(&psta->asoc_list);
- pstapriv->asoc_list_cnt--;
-
- /* spin_unlock_bh(&pstapriv->asoc_list_lock); */
- updated =
- ap_free_sta23a(padapter, psta, true,
- WLAN_REASON_DEAUTH_LEAVING);
- /* spin_lock_bh(&pstapriv->asoc_list_lock); */
-
- psta = NULL;
-
- break;
- }
- }
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- associated_clients_update23a(padapter, updated);
-
- DBG_8723A("-%s(%s)\n", __func__, ndev->name);
-
- return ret;
-}
-
-static int cfg80211_rtw_change_station(struct wiphy *wiphy,
- struct net_device *ndev, const u8 *mac,
- struct station_parameters *params)
-{
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
- return 0;
-}
-
-static int cfg80211_rtw_dump_station(struct wiphy *wiphy,
- struct net_device *ndev, int idx, u8 *mac,
- struct station_info *sinfo)
-{
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
-
- /* TODO: dump scanned queue */
-
- return -ENOENT;
-}
-
-static int cfg80211_rtw_change_bss(struct wiphy *wiphy, struct net_device *ndev,
- struct bss_parameters *params)
-{
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
- return 0;
-}
-#endif /* CONFIG_8723AU_AP_MODE */
-
-static int _cfg80211_rtw_mgmt_tx(struct rtw_adapter *padapter, u8 tx_ch,
- const u8 *buf, size_t len)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- int ret = _FAIL;
- struct ieee80211_hdr *pwlanhdr;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
- ret = -EFAULT;
- goto exit;
- }
-
- rtw_set_scan_deny(padapter, 1000);
-
- rtw_scan_abort23a(padapter);
-
- if (tx_ch != rtw_get_oper_ch23a(padapter)) {
- if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED))
- pmlmeext->cur_channel = tx_ch;
- set_channel_bwmode23a(padapter, tx_ch,
- HAL_PRIME_CHNL_OFFSET_DONT_CARE,
- HT_CHANNEL_WIDTH_20);
- }
-
- /* starting alloc mgmt frame to dump it */
- pmgntframe = alloc_mgtxmitframe23a(pxmitpriv);
- if (!pmgntframe) {
- /* ret = -ENOMEM; */
- ret = _FAIL;
- goto exit;
- }
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib23a(padapter, pattrib);
- pattrib->retry_ctrl = false;
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *) (pmgntframe->buf_addr) + TXDESC_OFFSET;
-
- memcpy(pframe, (void *)buf, len);
- pattrib->pktlen = len;
-
- pwlanhdr = (struct ieee80211_hdr *)pframe;
- /* update seq number */
- pmlmeext->mgnt_seq = le16_to_cpu(pwlanhdr->seq_ctrl) >> 4;
- pattrib->seqnum = pmlmeext->mgnt_seq;
- pmlmeext->mgnt_seq++;
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- ret = dump_mgntframe23a_and_wait_ack23a(padapter, pmgntframe);
-
- if (ret != _SUCCESS)
- DBG_8723A("%s, ack == false\n", __func__);
- else
- DBG_8723A("%s, ack == true\n", __func__);
-
-exit:
-
- DBG_8723A("%s, ret =%d\n", __func__, ret);
-
- return ret;
-}
-
-static int cfg80211_rtw_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
- struct cfg80211_mgmt_tx_params *params,
- u64 *cookie)
-{
- struct rtw_adapter *padapter =
- (struct rtw_adapter *)wiphy_to_adapter(wiphy);
- int ret = 0;
- int tx_ret;
- u32 dump_limit = RTW_MAX_MGMT_TX_CNT;
- u32 dump_cnt = 0;
- bool ack = true;
- u8 category, action;
- unsigned long start = jiffies;
- size_t len = params->len;
- struct ieee80211_channel *chan = params->chan;
- const u8 *buf = params->buf;
- struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *)buf;
- u8 tx_ch = (u8) ieee80211_frequency_to_channel(chan->center_freq);
-
- if (!ieee80211_is_action(hdr->frame_control))
- return -EINVAL;
-
- /* cookie generation */
- *cookie = (unsigned long)buf;
-
- DBG_8723A("%s(%s): len =%zu, ch =%d\n", __func__,
- padapter->pnetdev->name, len, tx_ch);
-
- /* indicate ack before issue frame to avoid racing with rsp frame */
- cfg80211_mgmt_tx_status(padapter->rtw_wdev, *cookie, buf, len, ack,
- GFP_KERNEL);
-
- DBG_8723A("RTW_Tx:tx_ch =%d, da =%pM\n", tx_ch, hdr->da);
- category = hdr->u.action.category;
- action = hdr->u.action.u.wme_action.action_code;
- DBG_8723A("RTW_Tx:category(%u), action(%u)\n", category, action);
-
- do {
- dump_cnt++;
- tx_ret = _cfg80211_rtw_mgmt_tx(padapter, tx_ch, buf, len);
- } while (dump_cnt < dump_limit && tx_ret != _SUCCESS);
-
- if (tx_ret != _SUCCESS || dump_cnt > 1) {
- DBG_8723A("%s(%s): %s (%d/%d) in %d ms\n",
- __func__, padapter->pnetdev->name,
- tx_ret == _SUCCESS ? "OK" : "FAIL", dump_cnt,
- dump_limit, jiffies_to_msecs(jiffies - start));
- }
-
- return ret;
-}
-
-static void cfg80211_rtw_mgmt_frame_register(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- u16 frame_type, bool reg)
-{
- if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
- return;
-
- return;
-}
-
-static struct cfg80211_ops rtw_cfg80211_ops = {
- .change_virtual_intf = cfg80211_rtw_change_iface,
- .add_key = cfg80211_rtw_add_key,
- .get_key = cfg80211_rtw_get_key,
- .del_key = cfg80211_rtw_del_key,
- .set_default_key = cfg80211_rtw_set_default_key,
- .get_station = cfg80211_rtw_get_station,
- .scan = cfg80211_rtw_scan,
- .set_wiphy_params = cfg80211_rtw_set_wiphy_params,
- .connect = cfg80211_rtw_connect,
- .disconnect = cfg80211_rtw_disconnect,
- .join_ibss = cfg80211_rtw_join_ibss,
- .leave_ibss = cfg80211_rtw_leave_ibss,
- .set_tx_power = cfg80211_rtw_set_txpower,
- .get_tx_power = cfg80211_rtw_get_txpower,
- .set_power_mgmt = cfg80211_rtw_set_power_mgmt,
- .set_pmksa = cfg80211_rtw_set_pmksa,
- .del_pmksa = cfg80211_rtw_del_pmksa,
- .flush_pmksa = cfg80211_rtw_flush_pmksa,
-
-#ifdef CONFIG_8723AU_AP_MODE
- .add_virtual_intf = cfg80211_rtw_add_virtual_intf,
- .del_virtual_intf = cfg80211_rtw_del_virtual_intf,
-
- .start_ap = cfg80211_rtw_start_ap,
- .change_beacon = cfg80211_rtw_change_beacon,
- .stop_ap = cfg80211_rtw_stop_ap,
-
- .add_station = cfg80211_rtw_add_station,
- .del_station = cfg80211_rtw_del_station,
- .change_station = cfg80211_rtw_change_station,
- .dump_station = cfg80211_rtw_dump_station,
- .change_bss = cfg80211_rtw_change_bss,
-#endif /* CONFIG_8723AU_AP_MODE */
-
- .mgmt_tx = cfg80211_rtw_mgmt_tx,
- .mgmt_frame_register = cfg80211_rtw_mgmt_frame_register,
-};
-
-static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap,
- enum nl80211_band band, u8 rf_type)
-{
-
-#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
-#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */
-
- ht_cap->ht_supported = true;
-
- ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU;
-
- /*
- *Maximum length of AMPDU that the STA can receive.
- *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
- */
- ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
-
- /*Minimum MPDU start spacing , */
- ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
-
- ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
-
- /*
- *hw->wiphy->bands[NL80211_BAND_2GHZ]
- *base on ant_num
- *rx_mask: RX mask
- *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
- *if rx_ant = 2 rx_mask[1]= 0xff;==>MCS8-MCS15
- *if rx_ant >= 3 rx_mask[2]= 0xff;
- *if BW_40 rx_mask[4]= 0x01;
- *highest supported RX rate
- */
- if (rf_type == RF_1T1R) {
- ht_cap->mcs.rx_mask[0] = 0xFF;
- ht_cap->mcs.rx_mask[1] = 0x00;
- ht_cap->mcs.rx_mask[4] = 0x01;
-
- ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7);
- } else if ((rf_type == RF_1T2R) || (rf_type == RF_2T2R)) {
- ht_cap->mcs.rx_mask[0] = 0xFF;
- ht_cap->mcs.rx_mask[1] = 0xFF;
- ht_cap->mcs.rx_mask[4] = 0x01;
-
- ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
- } else {
- DBG_8723A("%s, error rf_type =%d\n", __func__, rf_type);
- }
-
-}
-
-void rtw_cfg80211_init_wiphy(struct rtw_adapter *padapter)
-{
- u8 rf_type;
- struct ieee80211_supported_band *bands;
- struct wireless_dev *pwdev = padapter->rtw_wdev;
- struct wiphy *wiphy = pwdev->wiphy;
-
- rf_type = rtl8723a_get_rf_type(padapter);
-
- DBG_8723A("%s:rf_type =%d\n", __func__, rf_type);
-
- /* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
- {
- bands = wiphy->bands[NL80211_BAND_2GHZ];
- if (bands)
- rtw_cfg80211_init_ht_capab(&bands->ht_cap,
- NL80211_BAND_2GHZ,
- rf_type);
- }
-
- /* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
- {
- bands = wiphy->bands[NL80211_BAND_5GHZ];
- if (bands)
- rtw_cfg80211_init_ht_capab(&bands->ht_cap,
- NL80211_BAND_5GHZ,
- rf_type);
- }
-}
-
-static void rtw_cfg80211_preinit_wiphy(struct rtw_adapter *padapter,
- struct wiphy *wiphy)
-{
- wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-
- wiphy->max_scan_ssids = RTW_SSID_SCAN_AMOUNT;
- wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
- wiphy->max_num_pmkids = RTW_MAX_NUM_PMKIDS;
-
- wiphy->max_remain_on_channel_duration =
- RTW_MAX_REMAIN_ON_CHANNEL_DURATION;
-
- wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
-#ifdef CONFIG_8723AU_AP_MODE
- BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MONITOR) |
-#endif
- 0;
-
-#ifdef CONFIG_8723AU_AP_MODE
- wiphy->mgmt_stypes = rtw_cfg80211_default_mgmt_stypes;
-#endif /* CONFIG_8723AU_AP_MODE */
-
- wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
-
- /*
- wiphy->iface_combinations = &rtw_combinations;
- wiphy->n_iface_combinations = 1;
- */
-
- wiphy->cipher_suites = rtw_cipher_suites;
- wiphy->n_cipher_suites = ARRAY_SIZE(rtw_cipher_suites);
-
- /* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
- wiphy->bands[NL80211_BAND_2GHZ] =
- rtw_spt_band_alloc(NL80211_BAND_2GHZ);
- /* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
- wiphy->bands[NL80211_BAND_5GHZ] =
- rtw_spt_band_alloc(NL80211_BAND_5GHZ);
-
- wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
- wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAVE_AP_SME;
-
- if (padapter->registrypriv.power_mgnt != PS_MODE_ACTIVE)
- wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
- else
- wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-}
-
-int rtw_wdev_alloc(struct rtw_adapter *padapter, struct device *dev)
-{
- int ret = 0;
- struct wiphy *wiphy;
- struct wireless_dev *wdev;
- struct rtw_wdev_priv *pwdev_priv;
- struct net_device *pnetdev = padapter->pnetdev;
-
- DBG_8723A("%s(padapter =%p)\n", __func__, padapter);
-
- /* wiphy */
- wiphy = wiphy_new(&rtw_cfg80211_ops, sizeof(struct rtw_wdev_priv));
- if (!wiphy) {
- DBG_8723A("Couldn't allocate wiphy device\n");
- ret = -ENOMEM;
- goto exit;
- }
-
- /* wdev */
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev) {
- ret = -ENOMEM;
- goto free_wiphy;
- }
-
- set_wiphy_dev(wiphy, dev);
- rtw_cfg80211_preinit_wiphy(padapter, wiphy);
-
- ret = wiphy_register(wiphy);
- if (ret < 0) {
- DBG_8723A("Couldn't register wiphy device\n");
- goto free_wdev;
- }
-
- wdev->wiphy = wiphy;
- wdev->netdev = pnetdev;
- /* wdev->iftype = NL80211_IFTYPE_STATION; */
- /* for rtw_setopmode_cmd23a() in cfg80211_rtw_change_iface() */
- wdev->iftype = NL80211_IFTYPE_MONITOR;
- padapter->rtw_wdev = wdev;
- pnetdev->ieee80211_ptr = wdev;
-
- /* init pwdev_priv */
- pwdev_priv = wdev_to_priv(wdev);
- pwdev_priv->rtw_wdev = wdev;
- pwdev_priv->pmon_ndev = NULL;
- pwdev_priv->ifname_mon[0] = '\0';
- pwdev_priv->padapter = padapter;
- pwdev_priv->scan_request = NULL;
- spin_lock_init(&pwdev_priv->scan_req_lock);
-
- pwdev_priv->p2p_enabled = false;
-
- if (padapter->registrypriv.power_mgnt != PS_MODE_ACTIVE)
- pwdev_priv->power_mgmt = true;
- else
- pwdev_priv->power_mgmt = false;
-
- return ret;
-free_wdev:
- kfree(wdev);
-free_wiphy:
- wiphy_free(wiphy);
-exit:
- return ret;
-}
-
-void rtw_wdev_free(struct wireless_dev *wdev)
-{
- DBG_8723A("%s(wdev =%p)\n", __func__, wdev);
-
- if (!wdev)
- return;
-
- kfree(wdev->wiphy->bands[NL80211_BAND_2GHZ]);
- kfree(wdev->wiphy->bands[NL80211_BAND_5GHZ]);
-
- wiphy_free(wdev->wiphy);
-
- kfree(wdev);
-}
-
-void rtw_wdev_unregister(struct wireless_dev *wdev)
-{
- struct rtw_wdev_priv *pwdev_priv;
-
- DBG_8723A("%s(wdev =%p)\n", __func__, wdev);
-
- if (!wdev)
- return;
-
- pwdev_priv = wdev_to_priv(wdev);
-
- rtw_cfg80211_indicate_scan_done(pwdev_priv, true);
-
- if (pwdev_priv->pmon_ndev) {
- DBG_8723A("%s, unregister monitor interface\n", __func__);
- unregister_netdev(pwdev_priv->pmon_ndev);
- }
-
- wiphy_unregister(wdev->wiphy);
-}
diff --git a/drivers/staging/rtl8723au/os_dep/mlme_linux.c b/drivers/staging/rtl8723au/os_dep/mlme_linux.c
deleted file mode 100644
index ca24369f1208..000000000000
--- a/drivers/staging/rtl8723au/os_dep/mlme_linux.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#define _MLME_OSDEP_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <mlme_osdep.h>
-
-static struct rt_pmkid_list backupPMKIDList[NUM_PMKID_CACHE];
-
-void rtw_reset_securitypriv23a(struct rtw_adapter *adapter)
-{
- u8 backupPMKIDIndex = 0;
- u8 backupTKIPCountermeasure = 0x00;
- unsigned long backupTKIPcountermeasure_time = 0;
-
- if (adapter->securitypriv.dot11AuthAlgrthm ==
- dot11AuthAlgrthm_8021X) { /* 802.1x */
- /* We have to backup the PMK information for WiFi PMK
- * Caching test item.
- * Backup the btkip_countermeasure information.
- * When the countermeasure is trigger, the driver have to
- * disconnect with AP for 60 seconds.
- */
- memcpy(&backupPMKIDList[0], &adapter->securitypriv.PMKIDList[0],
- sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- backupPMKIDIndex = adapter->securitypriv.PMKIDIndex;
- backupTKIPCountermeasure = adapter->securitypriv.btkip_countermeasure;
- backupTKIPcountermeasure_time = adapter->securitypriv.btkip_countermeasure_time;
-
- memset((unsigned char *)&adapter->securitypriv, 0,
- sizeof (struct security_priv));
- /* Restore the PMK information to securitypriv structure
- * for the following connection.
- */
- memcpy(&adapter->securitypriv.PMKIDList[0], &backupPMKIDList[0],
- sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- adapter->securitypriv.PMKIDIndex = backupPMKIDIndex;
- adapter->securitypriv.btkip_countermeasure = backupTKIPCountermeasure;
- adapter->securitypriv.btkip_countermeasure_time = backupTKIPcountermeasure_time;
-
- adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
- adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
- } else { /* reset values in securitypriv */
- struct security_priv *psec_priv = &adapter->securitypriv;
-
- /* open system */
- psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
- psec_priv->dot11PrivacyAlgrthm = 0;
- psec_priv->dot11PrivacyKeyIndex = 0;
-
- psec_priv->dot118021XGrpPrivacy = 0;
- psec_priv->dot118021XGrpKeyid = 1;
-
- psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
- psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
- }
-}
-
-void rtw_os_indicate_disconnect23a(struct rtw_adapter *adapter)
-{
- /* Do it first for tx broadcast pkt after disconnection issue! */
- netif_carrier_off(adapter->pnetdev);
-
- rtw_cfg80211_indicate_disconnect(adapter);
-
- rtw_reset_securitypriv23a(adapter);
-}
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c
deleted file mode 100644
index b8848c25beb4..000000000000
--- a/drivers/staging/rtl8723au/os_dep/os_intfs.c
+++ /dev/null
@@ -1,852 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _OS_INTFS_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <xmit_osdep.h>
-#include <recv_osdep.h>
-#include <hal_intf.h>
-#include <rtw_version.h>
-
-#include <rtl8723a_hal.h>
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
-MODULE_AUTHOR("Realtek Semiconductor Corp.");
-MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
-MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
-MODULE_VERSION(DRIVERVERSION);
-MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
-MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin");
-MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
-
-/* module param defaults */
-static int rtw_chip_version;
-static int rtw_rfintfs = HWPI;
-static int rtw_debug = 1;
-
-static int rtw_channel = 1;/* ad-hoc support requirement */
-static int rtw_wireless_mode = WIRELESS_11BG_24N;
-static int rtw_vrtl_carrier_sense = AUTO_VCS;
-static int rtw_vcs_type = RTS_CTS;/* */
-static int rtw_rts_thresh = 2347;/* */
-static int rtw_frag_thresh = 2346;/* */
-static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */
-static int rtw_scan_mode = 1;/* active, passive */
-static int rtw_adhoc_tx_pwr = 1;
-static int rtw_soft_ap;
-static int rtw_power_mgnt = 1;
-static int rtw_ips_mode = IPS_NORMAL;
-
-static int rtw_smart_ps = 2;
-
-module_param(rtw_ips_mode, int, 0644);
-MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode");
-
-static int rtw_long_retry_lmt = 7;
-static int rtw_short_retry_lmt = 7;
-static int rtw_busy_thresh = 40;
-static int rtw_ack_policy = NORMAL_ACK;
-
-static int rtw_acm_method;/* 0:By SW 1:By HW. */
-
-static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */
-static int rtw_uapsd_enable;
-
-static int rtw_ht_enable = 1;
-/* 0 :diable, bit(0): enable 2.4g, bit(1): enable 5g */
-static int rtw_cbw40_enable = 3;
-static int rtw_ampdu_enable = 1;/* for enable tx_ampdu */
-/* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, default is set to enable
- * 2.4GHZ for IOT issue with bufflao's AP at 5GHZ
- */
-static int rtw_rx_stbc = 1;
-static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */
-
-/* Use 2 path Tx to transmit MCS0~7 and legacy mode */
-static int rtw_lowrate_two_xmit = 1;
-
-/* int rf_config = RF_1T2R; 1T2R */
-static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */
-static int rtw_low_power;
-static int rtw_wifi_spec;
-static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
-
-#ifdef CONFIG_8723AU_BT_COEXIST
-static int rtw_btcoex_enable = 1;
-static int rtw_bt_iso = 2;/* 0:Low, 1:High, 2:From Efuse */
-/* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy, 5.OtherBusy */
-static int rtw_bt_sco = 3;
-/* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
-static int rtw_bt_ampdu = 1;
-#endif
-
-/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */
-static int rtw_AcceptAddbaReq = true;
-
-static int rtw_antdiv_cfg = 2; /* 0:OFF , 1:ON, 2:decide by Efuse config */
-static int rtw_antdiv_type; /* 0:decide by efuse */
-
-static int rtw_enusbss;/* 0:disable, 1:enable */
-
-static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */
-
-static int rtw_hwpwrp_detect; /* HW power ping detect 0:disable , 1:enable */
-
-static int rtw_hw_wps_pbc = 1;
-
-static int rtw_80211d;
-
-static int rtw_regulatory_id = 0xff;/* Regulatory tab id, 0xff = follow efuse's setting */
-
-module_param(rtw_regulatory_id, int, 0644);
-
-static char *ifname = "wlan%d";
-module_param(ifname, charp, 0644);
-MODULE_PARM_DESC(ifname, "The default name to allocate for first interface");
-
-static char *if2name = "wlan%d";
-module_param(if2name, charp, 0644);
-MODULE_PARM_DESC(if2name, "The default name to allocate for second interface");
-
-module_param(rtw_channel_plan, int, 0644);
-module_param(rtw_chip_version, int, 0644);
-module_param(rtw_rfintfs, int, 0644);
-module_param(rtw_channel, int, 0644);
-module_param(rtw_wmm_enable, int, 0644);
-module_param(rtw_vrtl_carrier_sense, int, 0644);
-module_param(rtw_vcs_type, int, 0644);
-module_param(rtw_busy_thresh, int, 0644);
-module_param(rtw_ht_enable, int, 0644);
-module_param(rtw_cbw40_enable, int, 0644);
-module_param(rtw_ampdu_enable, int, 0644);
-module_param(rtw_rx_stbc, int, 0644);
-module_param(rtw_ampdu_amsdu, int, 0644);
-
-module_param(rtw_lowrate_two_xmit, int, 0644);
-
-module_param(rtw_rf_config, int, 0644);
-module_param(rtw_power_mgnt, int, 0644);
-module_param(rtw_smart_ps, int, 0644);
-module_param(rtw_low_power, int, 0644);
-module_param(rtw_wifi_spec, int, 0644);
-
-module_param(rtw_antdiv_cfg, int, 0644);
-
-module_param(rtw_enusbss, int, 0644);
-module_param(rtw_hwpdn_mode, int, 0644);
-module_param(rtw_hwpwrp_detect, int, 0644);
-
-module_param(rtw_hw_wps_pbc, int, 0644);
-
-static uint rtw_max_roaming_times = 2;
-module_param(rtw_max_roaming_times, uint, 0644);
-MODULE_PARM_DESC(rtw_max_roaming_times, "The max roaming times to try");
-
-module_param(rtw_80211d, int, 0644);
-MODULE_PARM_DESC(rtw_80211d, "Enable 802.11d mechanism");
-
-#ifdef CONFIG_8723AU_BT_COEXIST
-module_param(rtw_btcoex_enable, int, 0644);
-MODULE_PARM_DESC(rtw_btcoex_enable, "Enable BT co-existence mechanism");
-#endif
-
-static uint rtw_notch_filter;
-module_param(rtw_notch_filter, uint, 0644);
-MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P");
-module_param_named(debug, rtw_debug, int, 0444);
-MODULE_PARM_DESC(debug, "Set debug level (1-9) (default 1)");
-
-static int netdev_close(struct net_device *pnetdev);
-
-static void loadparam(struct rtw_adapter *padapter, struct net_device *pnetdev)
-{
- struct registry_priv *registry_par = &padapter->registrypriv;
-
- GlobalDebugLevel23A = rtw_debug;
- registry_par->chip_version = (u8)rtw_chip_version;
- registry_par->rfintfs = (u8)rtw_rfintfs;
- memcpy(registry_par->ssid.ssid, "ANY", 3);
- registry_par->ssid.ssid_len = 3;
- registry_par->channel = (u8)rtw_channel;
- registry_par->wireless_mode = (u8)rtw_wireless_mode;
- registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense;
- registry_par->vcs_type = (u8)rtw_vcs_type;
- registry_par->rts_thresh = (u16)rtw_rts_thresh;
- registry_par->frag_thresh = (u16)rtw_frag_thresh;
- registry_par->preamble = (u8)rtw_preamble;
- registry_par->scan_mode = (u8)rtw_scan_mode;
- registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr;
- registry_par->soft_ap = (u8)rtw_soft_ap;
- registry_par->smart_ps = (u8)rtw_smart_ps;
- registry_par->power_mgnt = (u8)rtw_power_mgnt;
- registry_par->ips_mode = (u8)rtw_ips_mode;
- registry_par->long_retry_lmt = (u8)rtw_long_retry_lmt;
- registry_par->short_retry_lmt = (u8)rtw_short_retry_lmt;
- registry_par->busy_thresh = (u16)rtw_busy_thresh;
- registry_par->ack_policy = (u8)rtw_ack_policy;
- registry_par->acm_method = (u8)rtw_acm_method;
- /* UAPSD */
- registry_par->wmm_enable = (u8)rtw_wmm_enable;
- registry_par->uapsd_enable = (u8)rtw_uapsd_enable;
- registry_par->ht_enable = (u8)rtw_ht_enable;
- registry_par->cbw40_enable = (u8)rtw_cbw40_enable;
- registry_par->ampdu_enable = (u8)rtw_ampdu_enable;
- registry_par->rx_stbc = (u8)rtw_rx_stbc;
- registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu;
- registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit;
- registry_par->rf_config = (u8)rtw_rf_config;
- registry_par->low_power = (u8)rtw_low_power;
- registry_par->wifi_spec = (u8)rtw_wifi_spec;
- registry_par->channel_plan = (u8)rtw_channel_plan;
-#ifdef CONFIG_8723AU_BT_COEXIST
- registry_par->btcoex = (u8)rtw_btcoex_enable;
- registry_par->bt_iso = (u8)rtw_bt_iso;
- registry_par->bt_sco = (u8)rtw_bt_sco;
- registry_par->bt_ampdu = (u8)rtw_bt_ampdu;
-#endif
- registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq;
- registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg;
- registry_par->antdiv_type = (u8)rtw_antdiv_type;
-
- /* 0:disable, 1:enable, 2:by EFUSE config */
- registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode;
- /* 0:disable, 1:enable */
- registry_par->hwpwrp_detect = (u8)rtw_hwpwrp_detect;
- registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc;
- registry_par->max_roaming_times = (u8)rtw_max_roaming_times;
- registry_par->enable80211d = (u8)rtw_80211d;
- snprintf(registry_par->ifname, 16, "%s", ifname);
- snprintf(registry_par->if2name, 16, "%s", if2name);
- registry_par->notch_filter = (u8)rtw_notch_filter;
- registry_par->regulatory_tid = (u8)rtw_regulatory_id;
-}
-
-static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
-{
- struct rtw_adapter *padapter = netdev_priv(pnetdev);
- struct sockaddr *addr = p;
-
- if (!padapter->bup)
- ether_addr_copy(padapter->eeprompriv.mac_addr, addr->sa_data);
- return 0;
-}
-
-static struct net_device_stats *rtw_net_get_stats(struct net_device *pnetdev)
-{
- struct rtw_adapter *padapter = netdev_priv(pnetdev);
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- padapter->stats.tx_packets = pxmitpriv->tx_pkts;
- padapter->stats.rx_packets = precvpriv->rx_pkts;
- padapter->stats.tx_dropped = pxmitpriv->tx_drop;
- padapter->stats.rx_dropped = precvpriv->rx_drop;
- padapter->stats.tx_bytes = pxmitpriv->tx_bytes;
- padapter->stats.rx_bytes = precvpriv->rx_bytes;
-
- return &padapter->stats;
-}
-
-/*
- * AC to queue mapping
- *
- * AC_VO -> queue 0
- * AC_VI -> queue 1
- * AC_BE -> queue 2
- * AC_BK -> queue 3
- */
-static const u16 rtw_1d_to_queue[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
-
-/* Given a data frame determine the 802.1p/1d tag to use. */
-static u32 rtw_classify8021d(struct sk_buff *skb)
-{
- u32 dscp;
-
- /* skb->priority values from 256->263 are magic values to
- * directly indicate a specific 802.1d priority. This is used
- * to allow 802.1d priority to be passed directly in from VLAN
- * tags, etc.
- */
- if (skb->priority >= 256 && skb->priority <= 263)
- return skb->priority - 256;
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- dscp = ip_hdr(skb)->tos & 0xfc;
- break;
- default:
- return 0;
- }
- return dscp >> 5;
-}
-
-static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback)
-{
- struct rtw_adapter *padapter = netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- skb->priority = rtw_classify8021d(skb);
-
- if (pmlmepriv->acm_mask != 0)
- skb->priority = qos_acm23a(pmlmepriv->acm_mask, skb->priority);
- return rtw_1d_to_queue[skb->priority];
-}
-
-u16 rtw_recv_select_queue23a(struct sk_buff *skb)
-{
- struct iphdr *piphdr;
- struct ethhdr *eth = (struct ethhdr *)skb->data;
- unsigned int dscp;
- u16 eth_type = get_unaligned_be16(&eth->h_proto);
- u32 priority;
- u8 *pdata = skb->data;
-
- switch (eth_type) {
- case ETH_P_IP:
- piphdr = (struct iphdr *)(pdata + ETH_HLEN);
- dscp = piphdr->tos & 0xfc;
- priority = dscp >> 5;
- break;
- default:
- priority = 0;
- }
- return rtw_1d_to_queue[priority];
-}
-
-static const struct net_device_ops rtw_netdev_ops = {
- .ndo_open = netdev_open23a,
- .ndo_stop = netdev_close,
- .ndo_start_xmit = rtw_xmit23a_entry23a,
- .ndo_select_queue = rtw_select_queue,
- .ndo_set_mac_address = rtw_net_set_mac_address,
- .ndo_get_stats = rtw_net_get_stats,
-};
-
-int rtw_init_netdev23a_name23a(struct net_device *pnetdev, const char *ifname)
-{
- if (dev_alloc_name(pnetdev, ifname) < 0) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_,
- "dev_alloc_name, fail!\n");
- }
- netif_carrier_off(pnetdev);
- return 0;
-}
-
-static const struct device_type wlan_type = {
- .name = "wlan",
-};
-
-struct net_device *rtw_init_netdev23a(struct rtw_adapter *old_padapter)
-{
- struct rtw_adapter *padapter;
- struct net_device *pnetdev;
-
- RT_TRACE(_module_os_intfs_c_, _drv_info_, "+init_net_dev\n");
-
- pnetdev = alloc_etherdev_mq(sizeof(struct rtw_adapter), 4);
- if (!pnetdev)
- return NULL;
-
- pnetdev->dev.type = &wlan_type;
- padapter = netdev_priv(pnetdev);
- padapter->pnetdev = pnetdev;
-
- DBG_8723A("register rtw_netdev_ops to netdev_ops\n");
- pnetdev->netdev_ops = &rtw_netdev_ops;
-
- pnetdev->watchdog_timeo = HZ*3; /* 3 second timeout */
-
- loadparam(padapter, pnetdev);
- return pnetdev;
-}
-
-static int rtw_init_default_value(struct rtw_adapter *padapter)
-{
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- /* xmit_priv */
- pxmitpriv->vcs = pregistrypriv->vcs_type;
- /* pxmitpriv->rts_thresh = pregistrypriv->rts_thresh; */
- pxmitpriv->frag_len = pregistrypriv->frag_thresh;
-
- /* mlme_priv */
- pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
- pmlmepriv->scan_mode = SCAN_ACTIVE;
-
- /* ht_priv */
- pmlmepriv->htpriv.ampdu_enable = false;/* set to disabled */
-
- /* security_priv */
- psecuritypriv->binstallGrpkey = 0;
-
- /* open system */
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
- psecuritypriv->dot11PrivacyAlgrthm = 0;
-
- psecuritypriv->dot11PrivacyKeyIndex = 0;
-
- psecuritypriv->dot118021XGrpPrivacy = 0;
- psecuritypriv->dot118021XGrpKeyid = 1;
-
- psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
- psecuritypriv->ndisencryptstatus = Ndis802_11WEPDisabled;
-
- /* registry_priv */
- rtw_init_registrypriv_dev_network23a(padapter);
- rtw_update_registrypriv_dev_network23a(padapter);
-
- /* hal_priv */
- rtl8723a_init_default_value(padapter);
-
- /* misc. */
- padapter->bReadPortCancel = false;
- padapter->bWritePortCancel = false;
- return _SUCCESS;
-}
-
-int rtw_reset_drv_sw23a(struct rtw_adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-
- /* hal_priv */
- rtl8723a_init_default_value(padapter);
- padapter->bReadPortCancel = false;
- padapter->bWritePortCancel = false;
- pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
-
- padapter->xmitpriv.tx_pkts = 0;
- padapter->recvpriv.rx_pkts = 0;
-
- pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
-
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);
-
- rtw_sreset_reset_value(padapter);
- pwrctrlpriv->pwr_state_check_cnts = 0;
-
- /* mlmeextpriv */
- padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE;
-
- rtw_set_signal_stat_timer(&padapter->recvpriv);
- return _SUCCESS;
-}
-
-int rtw_init_drv_sw23a(struct rtw_adapter *padapter)
-{
- int ret8 = _SUCCESS;
-
- RT_TRACE(_module_os_intfs_c_, _drv_info_, "+rtw_init_drv_sw23a\n");
-
- if (rtw_init_cmd_priv23a(&padapter->cmdpriv) == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_,
- "Can't init cmd_priv\n");
- ret8 = _FAIL;
- goto exit;
- }
-
- padapter->cmdpriv.padapter = padapter;
-
- if (rtw_init_evt_priv23a(&padapter->evtpriv) == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_,
- "Can't init evt_priv\n");
- ret8 = _FAIL;
- goto exit;
- }
-
- if (rtw_init_mlme_priv23a(padapter) == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_,
- "Can't init mlme_priv\n");
- ret8 = _FAIL;
- goto exit;
- }
-
-
- if (init_mlme_ext_priv23a(padapter) == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_,
- "Can't init mlme_ext_priv\n");
- ret8 = _FAIL;
- goto exit;
- }
-
- if (_rtw_init_xmit_priv23a(&padapter->xmitpriv, padapter) == _FAIL) {
- DBG_8723A("Can't _rtw_init_xmit_priv23a\n");
- ret8 = _FAIL;
- goto exit;
- }
-
- if (_rtw_init_recv_priv23a(&padapter->recvpriv, padapter) == _FAIL) {
- DBG_8723A("Can't _rtw_init_recv_priv23a\n");
- ret8 = _FAIL;
- goto exit;
- }
-
- if (_rtw_init_sta_priv23a(&padapter->stapriv) == _FAIL) {
- DBG_8723A("Can't _rtw_init_sta_priv23a\n");
- ret8 = _FAIL;
- goto exit;
- }
-
- padapter->stapriv.padapter = padapter;
- padapter->setband = GHZ24_50;
- rtw_init_bcmc_stainfo23a(padapter);
-
- rtw_init_pwrctrl_priv23a(padapter);
-
- ret8 = rtw_init_default_value(padapter);
-
- rtl8723a_init_dm_priv(padapter);
-
- rtw_sreset_init(padapter);
-
-exit:
-
- RT_TRACE(_module_os_intfs_c_, _drv_info_, "-rtw_init_drv_sw23a\n");
- return ret8;
-}
-
-void rtw_cancel_all_timer23a(struct rtw_adapter *padapter)
-{
- RT_TRACE(_module_os_intfs_c_, _drv_info_,
- "+rtw_cancel_all_timer23a\n");
-
- del_timer_sync(&padapter->mlmepriv.assoc_timer);
- RT_TRACE(_module_os_intfs_c_, _drv_info_,
- "%s:cancel association timer complete!\n", __func__);
-
- del_timer_sync(&padapter->mlmepriv.scan_to_timer);
- RT_TRACE(_module_os_intfs_c_, _drv_info_,
- "%s:cancel scan_to_timer!\n", __func__);
-
- del_timer_sync(&padapter->mlmepriv.dynamic_chk_timer);
- RT_TRACE(_module_os_intfs_c_, _drv_info_,
- "%s:cancel dynamic_chk_timer!\n", __func__);
-
- del_timer_sync(&padapter->pwrctrlpriv.pwr_state_check_timer);
-
- del_timer_sync(&padapter->mlmepriv.set_scan_deny_timer);
- rtw_clear_scan_deny(padapter);
- RT_TRACE(_module_os_intfs_c_, _drv_info_,
- "%s:cancel set_scan_deny_timer!\n", __func__);
-
- del_timer_sync(&padapter->recvpriv.signal_stat_timer);
-}
-
-int rtw_free_drv_sw23a(struct rtw_adapter *padapter)
-{
- RT_TRACE(_module_os_intfs_c_, _drv_info_, "==>rtw_free_drv_sw23a\n");
-
- free_mlme_ext_priv23a(&padapter->mlmeextpriv);
-
- rtw_free_evt_priv23a(&padapter->evtpriv);
-
- rtw_free_mlme_priv23a(&padapter->mlmepriv);
-
- _rtw_free_xmit_priv23a(&padapter->xmitpriv);
-
- /* will free bcmc_stainfo here */
- _rtw_free_sta_priv23a(&padapter->stapriv);
-
- _rtw_free_recv_priv23a(&padapter->recvpriv);
-
- rtw_free_pwrctrl_priv(padapter);
-
- kfree(padapter->HalData);
- padapter->HalData = NULL;
-
- RT_TRACE(_module_os_intfs_c_, _drv_info_, "-rtw_free_drv_sw23a\n");
- return _SUCCESS;
-}
-
-static int _rtw_drv_register_netdev(struct rtw_adapter *padapter, char *name)
-{
- struct net_device *pnetdev = padapter->pnetdev;
- int ret = _SUCCESS;
-
- /* alloc netdev name */
- rtw_init_netdev23a_name23a(pnetdev, name);
-
- ether_addr_copy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr);
-
- /* Tell the network stack we exist */
- if (register_netdev(pnetdev)) {
- DBG_8723A("%s(%s): Failed!\n", __func__, pnetdev->name);
- ret = _FAIL;
- goto error_register_netdev;
- }
- DBG_8723A("%s, MAC Address (if%d) = %pM\n",
- __func__, padapter->iface_id + 1, pnetdev->dev_addr);
- return ret;
-
-error_register_netdev:
-
- if (padapter->iface_id > IFACE_ID0) {
- rtw_free_drv_sw23a(padapter);
-
- free_netdev(pnetdev);
- }
- return ret;
-}
-
-int rtw_drv_register_netdev(struct rtw_adapter *if1)
-{
- struct dvobj_priv *dvobj = if1->dvobj;
- int i, status = _SUCCESS;
-
- if (dvobj->iface_nums >= IFACE_ID_MAX) {
- status = _FAIL; /* -EINVAL */
- goto exit;
- }
-
- for (i = 0; i < dvobj->iface_nums; i++) {
- struct rtw_adapter *padapter = dvobj->padapters[i];
-
- if (padapter) {
- char *name;
-
- if (padapter->iface_id == IFACE_ID0)
- name = if1->registrypriv.ifname;
- else if (padapter->iface_id == IFACE_ID1)
- name = if1->registrypriv.if2name;
- else
- name = "wlan%d";
- status = _rtw_drv_register_netdev(padapter, name);
- if (status != _SUCCESS)
- break;
- }
- }
-
-exit:
- return status;
-}
-
-int netdev_open23a(struct net_device *pnetdev)
-{
- struct rtw_adapter *padapter = netdev_priv(pnetdev);
- struct pwrctrl_priv *pwrctrlpriv;
- int ret = 0;
- int status;
-
- RT_TRACE(_module_os_intfs_c_, _drv_info_, "+871x_drv - dev_open\n");
- DBG_8723A("+871x_drv - drv_open, bup =%d\n", padapter->bup);
-
- mutex_lock(&adapter_to_dvobj(padapter)->hw_init_mutex);
-
- pwrctrlpriv = &padapter->pwrctrlpriv;
-
- if (!padapter->bup) {
- padapter->bDriverStopped = false;
- padapter->bSurpriseRemoved = false;
- padapter->bCardDisableWOHSM = false;
-
- status = rtl8723au_hal_init(padapter);
- if (status == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_,
- "rtl871x_hal_init(): Can't init h/w!\n");
- goto netdev_open23a_error;
- }
-
- DBG_8723A("MAC Address = %pM\n", pnetdev->dev_addr);
-
- if (init_hw_mlme_ext23a(padapter) == _FAIL) {
- DBG_8723A("can't init mlme_ext_priv\n");
- goto netdev_open23a_error;
- }
-
- rtl8723au_inirp_init(padapter);
-
- rtw_cfg80211_init_wiphy(padapter);
-
- padapter->bup = true;
- }
- padapter->net_closed = false;
-
- mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
- jiffies + msecs_to_jiffies(2000));
-
- padapter->pwrctrlpriv.bips_processing = false;
- rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
-
- /* netif_carrier_on(pnetdev);call this func when
- rtw23a_joinbss_event_cb return success */
- if (!rtw_netif_queue_stopped(pnetdev))
- netif_tx_start_all_queues(pnetdev);
- else
- netif_tx_wake_all_queues(pnetdev);
-
- RT_TRACE(_module_os_intfs_c_, _drv_info_, "-871x_drv - dev_open\n");
- DBG_8723A("-871x_drv - drv_open, bup =%d\n", padapter->bup);
-exit:
- mutex_unlock(&adapter_to_dvobj(padapter)->hw_init_mutex);
- return ret;
-
-netdev_open23a_error:
- padapter->bup = false;
-
- netif_carrier_off(pnetdev);
- netif_tx_stop_all_queues(pnetdev);
-
- RT_TRACE(_module_os_intfs_c_, _drv_err_,
- "-871x_drv - dev_open, fail!\n");
- DBG_8723A("-871x_drv - drv_open fail, bup =%d\n", padapter->bup);
-
- ret = -1;
- goto exit;
-}
-
-static int ips_netdrv_open(struct rtw_adapter *padapter)
-{
- int status = _SUCCESS;
-
- padapter->net_closed = false;
- DBG_8723A("===> %s.........\n", __func__);
-
- padapter->bDriverStopped = false;
- padapter->bSurpriseRemoved = false;
- padapter->bCardDisableWOHSM = false;
-
- status = rtl8723au_hal_init(padapter);
- if (status == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_,
- "ips_netdrv_open(): Can't init h/w!\n");
- goto netdev_open23a_error;
- }
-
- rtl8723au_inirp_init(padapter);
-
- rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
- mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
- jiffies + msecs_to_jiffies(5000));
-
- return _SUCCESS;
-
-netdev_open23a_error:
- /* padapter->bup = false; */
- DBG_8723A("-ips_netdrv_open - drv_open failure, bup =%d\n",
- padapter->bup);
-
- return _FAIL;
-}
-
-int rtw_ips_pwr_up23a(struct rtw_adapter *padapter)
-{
- int result;
- unsigned long start_time = jiffies;
-
- DBG_8723A("===> rtw_ips_pwr_up23a..............\n");
- rtw_reset_drv_sw23a(padapter);
-
- result = ips_netdrv_open(padapter);
-
- DBG_8723A("<=== rtw_ips_pwr_up23a.............. in %dms\n",
- jiffies_to_msecs(jiffies - start_time));
- return result;
-}
-
-void rtw_ips_pwr_down23a(struct rtw_adapter *padapter)
-{
- unsigned long start_time = jiffies;
-
- DBG_8723A("===> rtw_ips_pwr_down23a...................\n");
-
- padapter->bCardDisableWOHSM = true;
- padapter->net_closed = true;
-
- rtw_ips_dev_unload23a(padapter);
- padapter->bCardDisableWOHSM = false;
- DBG_8723A("<=== rtw_ips_pwr_down23a..................... in %dms\n",
- jiffies_to_msecs(jiffies - start_time));
-}
-
-void rtw_ips_dev_unload23a(struct rtw_adapter *padapter)
-{
- rtl8723a_fifo_cleanup(padapter);
-
- rtl8723a_usb_intf_stop(padapter);
-
- /* s5. */
- if (!padapter->bSurpriseRemoved)
- rtl8723au_hal_deinit(padapter);
-}
-
-int pm_netdev_open23a(struct net_device *pnetdev, u8 bnormal)
-{
- int status;
-
- if (bnormal)
- status = netdev_open23a(pnetdev);
- else
- status = (_SUCCESS == ips_netdrv_open(netdev_priv(pnetdev))) ?
- (0) : (-1);
-
- return status;
-}
-
-static int netdev_close(struct net_device *pnetdev)
-{
- struct rtw_adapter *padapter = netdev_priv(pnetdev);
-
- RT_TRACE(_module_os_intfs_c_, _drv_info_, "+871x_drv - drv_close\n");
-
- padapter->net_closed = true;
-
- if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) {
- DBG_8723A("(2)871x_drv - drv_close, bup =%d, "
- "hw_init_completed =%d\n", padapter->bup,
- padapter->hw_init_completed);
-
- /* s1. */
- if (pnetdev) {
- if (!rtw_netif_queue_stopped(pnetdev))
- netif_tx_stop_all_queues(pnetdev);
- }
-
- /* s2. */
- LeaveAllPowerSaveMode23a(padapter);
- rtw_disassoc_cmd23a(padapter, 500, false);
- /* s2-2. indicate disconnect to os */
- rtw_indicate_disconnect23a(padapter);
- /* s2-3. */
- rtw_free_assoc_resources23a(padapter, 1);
- /* s2-4. */
- rtw_free_network_queue23a(padapter);
- }
-
- rtw_scan_abort23a(padapter);
-
- RT_TRACE(_module_os_intfs_c_, _drv_info_, "-871x_drv - drv_close\n");
- DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup);
-
- return 0;
-}
-
-void rtw_ndev_destructor(struct net_device *ndev)
-{
- DBG_8723A("%s(%s)\n", __func__, ndev->name);
- kfree(ndev->ieee80211_ptr);
- free_netdev(ndev);
-}
-
-void _rtw_init_queue23a(struct rtw_queue *pqueue)
-{
- INIT_LIST_HEAD(&pqueue->queue);
- spin_lock_init(&pqueue->lock);
-}
diff --git a/drivers/staging/rtl8723au/os_dep/recv_linux.c b/drivers/staging/rtl8723au/os_dep/recv_linux.c
deleted file mode 100644
index 084b506ae161..000000000000
--- a/drivers/staging/rtl8723au/os_dep/recv_linux.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RECV_OSDEP_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#include <wifi.h>
-#include <recv_osdep.h>
-
-#include <osdep_intf.h>
-
-#include <usb_ops.h>
-
-void rtw_handle_tkip_mic_err23a(struct rtw_adapter *padapter, u8 bgroup)
-{
- enum nl80211_key_type key_type = 0;
- union iwreq_data wrqu;
- struct iw_michaelmicfailure ev;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- unsigned long cur_time;
-
- if (psecuritypriv->last_mic_err_time == 0) {
- psecuritypriv->last_mic_err_time = jiffies;
- } else {
- cur_time = jiffies;
-
- if (cur_time - psecuritypriv->last_mic_err_time < 60*HZ) {
- psecuritypriv->btkip_countermeasure = true;
- psecuritypriv->last_mic_err_time = 0;
- psecuritypriv->btkip_countermeasure_time = cur_time;
- } else {
- psecuritypriv->last_mic_err_time = jiffies;
- }
- }
-
- if (bgroup)
- key_type |= NL80211_KEYTYPE_GROUP;
- else
- key_type |= NL80211_KEYTYPE_PAIRWISE;
-
- cfg80211_michael_mic_failure(padapter->pnetdev,
- (u8 *)&pmlmepriv->assoc_bssid[0],
- key_type, -1, NULL, GFP_ATOMIC);
-
- memset(&ev, 0x00, sizeof(ev));
- if (bgroup)
- ev.flags |= IW_MICFAILURE_GROUP;
- else
- ev.flags |= IW_MICFAILURE_PAIRWISE;
-
- ev.src_addr.sa_family = ARPHRD_ETHER;
- ether_addr_copy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0]);
-
- memset(&wrqu, 0x00, sizeof(wrqu));
- wrqu.data.length = sizeof(ev);
-}
-
-int rtw_recv_indicatepkt23a(struct rtw_adapter *padapter,
- struct recv_frame *precv_frame)
-{
- struct recv_priv *precvpriv;
- struct sk_buff *skb;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- precvpriv = &padapter->recvpriv;
-
- skb = precv_frame->pkt;
- if (!skb) {
- RT_TRACE(_module_recv_osdep_c_, _drv_err_,
- "rtw_recv_indicatepkt23a():skb == NULL!!!!\n");
- goto _recv_indicatepkt_drop;
- }
-
- RT_TRACE(_module_recv_osdep_c_, _drv_info_,
- "rtw_recv_indicatepkt23a():skb != NULL !!!\n");
- RT_TRACE(_module_recv_osdep_c_, _drv_info_,
- "rtw_recv_indicatepkt23a():precv_frame->hdr.rx_data =%p\n",
- precv_frame->pkt->data);
- RT_TRACE(_module_recv_osdep_c_, _drv_info_,
- "skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n",
- skb->head, skb->data,
- skb_tail_pointer(skb), skb_end_pointer(skb), skb->len);
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
- struct sk_buff *pskb2 = NULL;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- int bmcast = is_multicast_ether_addr(pattrib->dst);
-
- /* DBG_8723A("bmcast =%d\n", bmcast); */
-
- if (!ether_addr_equal(pattrib->dst,
- myid(&padapter->eeprompriv))) {
- /* DBG_8723A("not ap psta =%p, addr =%pM\n", psta, pattrib->dst); */
- if (bmcast) {
- psta = rtw_get_bcmc_stainfo23a(padapter);
- pskb2 = skb_clone(skb, GFP_ATOMIC);
- } else {
- psta = rtw_get_stainfo23a(pstapriv, pattrib->dst);
- }
-
- if (psta) {
- struct net_device *pnetdev = padapter->pnetdev;
-
- /* DBG_8723A("directly forwarding to the rtw_xmit23a_entry23a\n"); */
-
- /* skb->ip_summed = CHECKSUM_NONE; */
- skb->dev = pnetdev;
- skb_set_queue_mapping(skb, rtw_recv_select_queue23a(skb));
-
- rtw_xmit23a_entry23a(skb, pnetdev);
-
- if (bmcast)
- skb = pskb2;
- else
- goto _recv_indicatepkt_end;
- }
- } else { /* to APself */
- /* DBG_8723A("to APSelf\n"); */
- }
- }
-
- skb->ip_summed = CHECKSUM_NONE;
- skb->dev = padapter->pnetdev;
- skb->protocol = eth_type_trans(skb, padapter->pnetdev);
-
- netif_rx(skb);
-
-_recv_indicatepkt_end:
-
- precv_frame->pkt = NULL; /* pointers to NULL before rtw_free_recvframe23a() */
-
- rtw_free_recvframe23a(precv_frame);
-
- RT_TRACE(_module_recv_osdep_c_, _drv_info_,
- "rtw_recv_indicatepkt23a :after netif_rx!!!!\n");
- return _SUCCESS;
-
-_recv_indicatepkt_drop:
-
- rtw_free_recvframe23a(precv_frame);
- return _FAIL;
-}
-
-void rtw_init_recv_timer23a(struct recv_reorder_ctrl *preorder_ctrl)
-{
- setup_timer(&preorder_ctrl->reordering_ctrl_timer,
- rtw_reordering_ctrl_timeout_handler23a,
- (unsigned long)preorder_ctrl);
-}
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
deleted file mode 100644
index cf83efffbffd..000000000000
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ /dev/null
@@ -1,627 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _HCI_INTF_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <recv_osdep.h>
-#include <xmit_osdep.h>
-#include <hal_intf.h>
-#include <rtw_version.h>
-#include <osdep_intf.h>
-#include <usb_ops.h>
-#include <rtl8723a_hal.h>
-
-static int rtw_suspend(struct usb_interface *intf, pm_message_t message);
-static int rtw_resume(struct usb_interface *intf);
-static int rtw_drv_init(struct usb_interface *pusb_intf,
- const struct usb_device_id *pdid);
-static void rtw_disconnect(struct usb_interface *pusb_intf);
-
-#define USB_VENDER_ID_REALTEK 0x0BDA
-
-#define RTL8723A_USB_IDS \
- {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDER_ID_REALTEK, 0x8724, \
- 0xff, 0xff, 0xff)}, /* 8723AU 1*1 */ \
- {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDER_ID_REALTEK, 0x1724, \
- 0xff, 0xff, 0xff)}, /* 8723AU 1*1 */ \
- {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDER_ID_REALTEK, 0x0724, \
- 0xff, 0xff, 0xff)}, /* 8723AU 1*1 */
-
-static struct usb_device_id rtl8723a_usb_id_tbl[] = {
- RTL8723A_USB_IDS
- {} /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, rtl8723a_usb_id_tbl);
-
-static struct usb_driver rtl8723a_usb_drv = {
- .name = (char *)"rtl8723au",
- .probe = rtw_drv_init,
- .disconnect = rtw_disconnect,
- .id_table = rtl8723a_usb_id_tbl,
- .suspend = rtw_suspend,
- .resume = rtw_resume,
- .reset_resume = rtw_resume,
-};
-
-static struct usb_driver *usb_drv = &rtl8723a_usb_drv;
-
-static int rtw_init_intf_priv(struct dvobj_priv *dvobj)
-{
- mutex_init(&dvobj->usb_vendor_req_mutex);
-
- return _SUCCESS;
-}
-
-static int rtw_deinit_intf_priv(struct dvobj_priv *dvobj)
-{
- mutex_destroy(&dvobj->usb_vendor_req_mutex);
-
- return _SUCCESS;
-}
-
-static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
-{
- struct dvobj_priv *pdvobjpriv;
- struct usb_host_config *phost_conf;
- struct usb_config_descriptor *pconf_desc;
- struct usb_host_interface *phost_iface;
- struct usb_interface_descriptor *piface_desc;
- struct usb_endpoint_descriptor *pendp_desc;
- struct usb_device *pusbd;
- int i, status = _FAIL;
-
- pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
- if (!pdvobjpriv)
- goto exit;
-
- mutex_init(&pdvobjpriv->hw_init_mutex);
- mutex_init(&pdvobjpriv->h2c_fwcmd_mutex);
- mutex_init(&pdvobjpriv->setch_mutex);
- mutex_init(&pdvobjpriv->setbw_mutex);
-
- pdvobjpriv->pusbintf = usb_intf;
- pusbd = interface_to_usbdev(usb_intf);
- pdvobjpriv->pusbdev = pusbd;
- usb_set_intfdata(usb_intf, pdvobjpriv);
-
- pdvobjpriv->RtNumInPipes = 0;
- pdvobjpriv->RtNumOutPipes = 0;
-
- phost_conf = pusbd->actconfig;
- pconf_desc = &phost_conf->desc;
-
- phost_iface = &usb_intf->altsetting[0];
- piface_desc = &phost_iface->desc;
-
- pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces;
- pdvobjpriv->InterfaceNumber = piface_desc->bInterfaceNumber;
- pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
-
- for (i = 0; i < pdvobjpriv->nr_endpoint; i++) {
- pendp_desc = &phost_iface->endpoint[i].desc;
-
- DBG_8723A("\nusb_endpoint_descriptor(%d):\n", i);
- DBG_8723A("bLength =%x\n", pendp_desc->bLength);
- DBG_8723A("bDescriptorType =%x\n", pendp_desc->bDescriptorType);
- DBG_8723A("bEndpointAddress =%x\n",
- pendp_desc->bEndpointAddress);
- DBG_8723A("wMaxPacketSize =%d\n",
- le16_to_cpu(pendp_desc->wMaxPacketSize));
- DBG_8723A("bInterval =%x\n", pendp_desc->bInterval);
-
- if (usb_endpoint_is_bulk_in(pendp_desc)) {
- DBG_8723A("usb_endpoint_is_bulk_in = %x\n",
- usb_endpoint_num(pendp_desc));
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
- usb_endpoint_num(pendp_desc);
- pdvobjpriv->RtNumInPipes++;
- } else if (usb_endpoint_is_int_in(pendp_desc)) {
- DBG_8723A("usb_endpoint_is_int_in = %x, Interval = "
- "%x\n", usb_endpoint_num(pendp_desc),
- pendp_desc->bInterval);
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
- usb_endpoint_num(pendp_desc);
- pdvobjpriv->RtNumInPipes++;
- } else if (usb_endpoint_is_bulk_out(pendp_desc)) {
- DBG_8723A("usb_endpoint_is_bulk_out = %x\n",
- usb_endpoint_num(pendp_desc));
- pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] =
- usb_endpoint_num(pendp_desc);
- pdvobjpriv->RtNumOutPipes++;
- }
- pdvobjpriv->ep_num[i] = usb_endpoint_num(pendp_desc);
- }
- DBG_8723A("nr_endpoint =%d, in_num =%d, out_num =%d\n\n",
- pdvobjpriv->nr_endpoint, pdvobjpriv->RtNumInPipes,
- pdvobjpriv->RtNumOutPipes);
-
- if (pusbd->speed == USB_SPEED_HIGH) {
- pdvobjpriv->ishighspeed = true;
- DBG_8723A("USB_SPEED_HIGH\n");
- } else {
- pdvobjpriv->ishighspeed = false;
- DBG_8723A("NON USB_SPEED_HIGH\n");
- }
-
- if (rtw_init_intf_priv(pdvobjpriv) == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_,
- "Can't INIT rtw_init_intf_priv\n");
- goto free_dvobj;
- }
- /* 3 misc */
- rtw_reset_continual_urb_error(pdvobjpriv);
- usb_get_dev(pusbd);
- status = _SUCCESS;
-free_dvobj:
- if (status != _SUCCESS && pdvobjpriv) {
- usb_set_intfdata(usb_intf, NULL);
- mutex_destroy(&pdvobjpriv->hw_init_mutex);
- mutex_destroy(&pdvobjpriv->h2c_fwcmd_mutex);
- mutex_destroy(&pdvobjpriv->setch_mutex);
- mutex_destroy(&pdvobjpriv->setbw_mutex);
- kfree(pdvobjpriv);
- pdvobjpriv = NULL;
- }
-exit:
- return pdvobjpriv;
-}
-
-static void usb_dvobj_deinit(struct usb_interface *usb_intf)
-{
- struct dvobj_priv *dvobj = usb_get_intfdata(usb_intf);
-
- usb_set_intfdata(usb_intf, NULL);
- if (dvobj) {
- /* Modify condition for 92DU DMDP 2010.11.18, by Thomas */
- if ((dvobj->NumInterfaces != 2 && dvobj->NumInterfaces != 3) ||
- (dvobj->InterfaceNumber == 1)) {
- if (interface_to_usbdev(usb_intf)->state !=
- USB_STATE_NOTATTACHED) {
- /* If we didn't unplug usb dongle and
- * remove/insert module, driver fails on
- * sitesurvey for the first time when
- * device is up .
- * Reset usb port for sitesurvey fail issue.
- */
- DBG_8723A("usb attached..., try to reset usb device\n");
- usb_reset_device(interface_to_usbdev(usb_intf));
- }
- }
- rtw_deinit_intf_priv(dvobj);
- mutex_destroy(&dvobj->hw_init_mutex);
- mutex_destroy(&dvobj->h2c_fwcmd_mutex);
- mutex_destroy(&dvobj->setch_mutex);
- mutex_destroy(&dvobj->setbw_mutex);
- kfree(dvobj);
- }
- usb_put_dev(interface_to_usbdev(usb_intf));
-}
-
-void rtl8723a_usb_intf_stop(struct rtw_adapter *padapter)
-{
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, "+usb_intf_stop\n");
-
- /* disable_hw_interrupt */
- if (!padapter->bSurpriseRemoved) {
- /* device still exists, so driver can do i/o operation
- * TODO:
- */
- RT_TRACE(_module_hci_intfs_c_, _drv_err_,
- "SurpriseRemoved == false\n");
- }
-
- /* cancel in irp */
- rtl8723au_inirp_deinit(padapter);
-
- /* cancel out irp */
- rtl8723au_write_port_cancel(padapter);
-
- /* todo:cancel other irps */
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, "-usb_intf_stop\n");
-}
-
-static void rtw_dev_unload(struct rtw_adapter *padapter)
-{
- struct submit_ctx *pack_tx_ops = &padapter->xmitpriv.ack_tx_ops;
-
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, "+rtw_dev_unload\n");
-
- if (padapter->bup) {
- DBG_8723A("===> rtw_dev_unload\n");
-
- padapter->bDriverStopped = true;
- if (padapter->xmitpriv.ack_tx)
- rtw23a_sctx_done_err(&pack_tx_ops,
- RTW_SCTX_DONE_DRV_STOP);
-
- /* s3. */
- rtl8723a_usb_intf_stop(padapter);
-
- /* s4. */
- flush_workqueue(padapter->cmdpriv.wq);
-
- /* s5. */
- if (!padapter->bSurpriseRemoved) {
- rtl8723au_hal_deinit(padapter);
- padapter->bSurpriseRemoved = true;
- }
- padapter->bup = false;
- } else {
- RT_TRACE(_module_hci_intfs_c_, _drv_err_,
- "r871x_dev_unload():padapter->bup == false\n");
- }
- DBG_8723A("<=== rtw_dev_unload\n");
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, "-rtw_dev_unload\n");
-}
-
-static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
-{
- struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
- struct rtw_adapter *padapter = dvobj->if1;
- struct net_device *pnetdev = padapter->pnetdev;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- int ret = 0;
- unsigned long start_time = jiffies;
-
- DBG_8723A("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
-
- if ((!padapter->bup) || (padapter->bDriverStopped) ||
- (padapter->bSurpriseRemoved)) {
- DBG_8723A("padapter->bup =%d bDriverStopped =%d bSurpriseRemoved = %d\n",
- padapter->bup, padapter->bDriverStopped,
- padapter->bSurpriseRemoved);
- goto exit;
- }
- pwrpriv->bInSuspend = true;
- rtw_cancel_all_timer23a(padapter);
- LeaveAllPowerSaveMode23a(padapter);
-
- down(&pwrpriv->lock);
- /* padapter->net_closed = true; */
- /* s1. */
- if (pnetdev) {
- netif_carrier_off(pnetdev);
- netif_tx_stop_all_queues(pnetdev);
- }
-
- /* s2. */
- rtw_disassoc_cmd23a(padapter, 0, false);
-
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
- check_fwstate(pmlmepriv, _FW_LINKED)) {
- DBG_8723A("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n",
- __func__, __LINE__,
- pmlmepriv->cur_network.network.Ssid.ssid,
- pmlmepriv->cur_network.network.MacAddress,
- pmlmepriv->cur_network.network.Ssid.ssid_len,
- pmlmepriv->assoc_ssid.ssid_len);
-
- rtw_set_roaming(padapter, 1);
- }
- /* s2-2. indicate disconnect to os */
- rtw_indicate_disconnect23a(padapter);
- /* s2-3. */
- rtw_free_assoc_resources23a(padapter, 1);
- /* s2-4. */
- rtw_free_network_queue23a(padapter);
-
- rtw_dev_unload(padapter);
- up(&pwrpriv->lock);
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
- rtw_cfg80211_indicate_scan_done(
- wdev_to_priv(padapter->rtw_wdev), true);
-
- if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
- rtw_indicate_disconnect23a(padapter);
-
-exit:
- DBG_8723A("<=== %s return %d.............. in %dms\n", __func__,
- ret, jiffies_to_msecs(jiffies - start_time));
-
- return ret;
-}
-
-static int rtw_resume(struct usb_interface *pusb_intf)
-{
- struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
- struct rtw_adapter *padapter = dvobj->if1;
- struct net_device *pnetdev;
- struct pwrctrl_priv *pwrpriv = NULL;
- int ret = -1;
- unsigned long start_time = jiffies;
-
- DBG_8723A("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
-
- if (!padapter)
- goto exit;
- pnetdev = padapter->pnetdev;
- pwrpriv = &padapter->pwrctrlpriv;
-
- down(&pwrpriv->lock);
- rtw_reset_drv_sw23a(padapter);
- pwrpriv->bkeepfwalive = false;
-
- DBG_8723A("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
- if (pm_netdev_open23a(pnetdev, true) != 0) {
- up(&pwrpriv->lock);
- goto exit;
- }
-
- netif_device_attach(pnetdev);
- netif_carrier_on(pnetdev);
-
- up(&pwrpriv->lock);
-
- if (padapter->pid[1] != 0) {
- DBG_8723A("pid[1]:%d\n", padapter->pid[1]);
- kill_pid(find_vpid(padapter->pid[1]), SIGUSR2, 1);
- }
-
- rtw23a_roaming(padapter, NULL);
-
- ret = 0;
-exit:
- if (pwrpriv)
- pwrpriv->bInSuspend = false;
- DBG_8723A("<=== %s return %d.............. in %dms\n", __func__,
- ret, jiffies_to_msecs(jiffies - start_time));
-
- return ret;
-}
-
-/*
- * drv_init() - a device potentially for us
- *
- * notes: drv_init() is called when the bus driver has located a card
- * for us to support.
- * We accept the new device by returning 0.
- */
-static struct rtw_adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
- struct usb_interface *pusb_intf,
- const struct usb_device_id *pdid)
-{
- struct rtw_adapter *padapter = NULL;
- struct net_device *pnetdev = NULL;
- int status = _FAIL;
-
- pnetdev = rtw_init_netdev23a(padapter);
- if (!pnetdev)
- goto free_adapter;
- padapter = netdev_priv(pnetdev);
-
- padapter->dvobj = dvobj;
- padapter->bDriverStopped = true;
- dvobj->if1 = padapter;
- dvobj->padapters[dvobj->iface_nums++] = padapter;
- padapter->iface_id = IFACE_ID0;
-
- rtl8723au_set_hw_type(padapter);
-
- SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
-
- if (rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj)))
- goto free_adapter;
-
- /* step 2. allocate HalData */
- padapter->HalData = kzalloc(sizeof(struct hal_data_8723a), GFP_KERNEL);
- if (!padapter->HalData)
- goto free_wdev;
-
- /* step read_chip_version */
- rtl8723a_read_chip_version(padapter);
-
- /* step usb endpoint mapping */
- if (!rtl8723au_chip_configure(padapter))
- goto free_hal_data;
-
- /* step read efuse/eeprom data and get mac_addr */
- rtl8723a_read_adapter_info(padapter);
-
- /* step 5. */
- if (rtw_init_drv_sw23a(padapter) == _FAIL) {
- RT_TRACE(_module_hci_intfs_c_, _drv_err_,
- "Initialize driver software resource Failed!\n");
- goto free_hal_data;
- }
-
-#ifdef CONFIG_PM
- if (padapter->pwrctrlpriv.bSupportRemoteWakeup) {
- dvobj->pusbdev->do_remote_wakeup = 1;
- pusb_intf->needs_remote_wakeup = 1;
- device_init_wakeup(&pusb_intf->dev, 1);
- DBG_8723A("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~~~~\n");
- DBG_8723A("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~[%d]~~~\n",
- device_may_wakeup(&pusb_intf->dev));
- }
-#endif
- /* 2012-07-11 Move here to prevent the 8723AS-VAU BT
- * auto suspend influence
- */
- if (usb_autopm_get_interface(pusb_intf) < 0)
- DBG_8723A("can't get autopm:\n");
-#ifdef CONFIG_8723AU_BT_COEXIST
- padapter->pwrctrlpriv.autopm_cnt = 1;
-#endif
-
- /* If the eeprom mac address is corrupted, assign a random address */
- if (is_broadcast_ether_addr(padapter->eeprompriv.mac_addr) ||
- is_zero_ether_addr(padapter->eeprompriv.mac_addr))
- eth_random_addr(padapter->eeprompriv.mac_addr);
-
- DBG_8723A("bDriverStopped:%d, bSurpriseRemoved:%d, bup:%d, hw_init_completed:%d\n",
- padapter->bDriverStopped, padapter->bSurpriseRemoved,
- padapter->bup, padapter->hw_init_completed
- );
- status = _SUCCESS;
-
-free_hal_data:
- if (status != _SUCCESS)
- kfree(padapter->HalData);
-free_wdev:
- if (status != _SUCCESS) {
- rtw_wdev_unregister(padapter->rtw_wdev);
- rtw_wdev_free(padapter->rtw_wdev);
- }
-free_adapter:
- if (status != _SUCCESS) {
- if (pnetdev)
- free_netdev(pnetdev);
- padapter = NULL;
- }
- return padapter;
-}
-
-static void rtw_usb_if1_deinit(struct rtw_adapter *if1)
-{
- struct net_device *pnetdev = if1->pnetdev;
- struct mlme_priv *pmlmepriv = &if1->mlmepriv;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- rtw_disassoc_cmd23a(if1, 0, false);
-
-#ifdef CONFIG_8723AU_AP_MODE
- free_mlme_ap_info23a(if1);
-#endif
-
- if (pnetdev)
- unregister_netdev(pnetdev); /* will call netdev_close() */
-
- rtw_cancel_all_timer23a(if1);
-
- rtw_dev_unload(if1);
-
- DBG_8723A("+r871xu_dev_remove, hw_init_completed =%d\n",
- if1->hw_init_completed);
-
- if (if1->rtw_wdev) {
- rtw_wdev_unregister(if1->rtw_wdev);
- rtw_wdev_free(if1->rtw_wdev);
- }
-
-#ifdef CONFIG_8723AU_BT_COEXIST
- if (1 == if1->pwrctrlpriv.autopm_cnt) {
- usb_autopm_put_interface(adapter_to_dvobj(if1)->pusbintf);
- if1->pwrctrlpriv.autopm_cnt--;
- }
-#endif
-
- rtw_free_drv_sw23a(if1);
-
- if (pnetdev)
- free_netdev(pnetdev);
-}
-
-static int rtw_drv_init(struct usb_interface *pusb_intf,
- const struct usb_device_id *pdid)
-{
- struct rtw_adapter *if1 = NULL;
- struct dvobj_priv *dvobj;
- struct usb_device *udev;
- int status = _FAIL;
-
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, "+rtw_drv_init\n");
-
- /* Initialize dvobj_priv */
- dvobj = usb_dvobj_init(pusb_intf);
- if (!dvobj) {
- RT_TRACE(_module_hci_intfs_c_, _drv_err_,
- "initialize device object priv Failed!\n");
- goto exit;
- }
-
- udev = dvobj->pusbdev;
- dev_warn(&udev->dev, "WARNING: The rtl8723au driver is deprecated!");
- dev_warn(&udev->dev, "Please use the rtl8xxxu driver for this device!");
-
- if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid);
- if (!if1) {
- DBG_8723A("rtw_init_primary_adapter Failed!\n");
- goto free_dvobj;
- }
-
- /* dev_alloc_name && register_netdev */
- status = rtw_drv_register_netdev(if1);
- if (status != _SUCCESS)
- goto free_if1;
- RT_TRACE(_module_hci_intfs_c_, _drv_err_,
- "-871x_drv - drv_init, success!\n");
-
- status = _SUCCESS;
-
-free_if1:
- if (status != _SUCCESS && if1)
- rtw_usb_if1_deinit(if1);
-free_dvobj:
- if (status != _SUCCESS)
- usb_dvobj_deinit(pusb_intf);
-exit:
- return status == _SUCCESS ? 0 : -ENODEV;
-}
-
-/* dev_remove() - our device is being removed */
-static void rtw_disconnect(struct usb_interface *pusb_intf)
-{
- struct dvobj_priv *dvobj;
- struct rtw_adapter *padapter;
- struct net_device *pnetdev;
- struct mlme_priv *pmlmepriv;
-
- dvobj = usb_get_intfdata(pusb_intf);
- if (!dvobj)
- return;
-
- padapter = dvobj->if1;
- pnetdev = padapter->pnetdev;
- pmlmepriv = &padapter->mlmepriv;
-
- usb_set_intfdata(pusb_intf, NULL);
-
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, "+dev_remove()\n");
-
- rtw_pm_set_ips23a(padapter, IPS_NONE);
- rtw_pm_set_lps23a(padapter, PS_MODE_ACTIVE);
-
- LeaveAllPowerSaveMode23a(padapter);
-
- rtw_usb_if1_deinit(padapter);
-
- usb_dvobj_deinit(pusb_intf);
-
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, "-dev_remove()\n");
- DBG_8723A("-r871xu_dev_remove, done\n");
-}
-
-static int __init rtw_drv_entry(void)
-{
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, "+rtw_drv_entry\n");
- return usb_register(usb_drv);
-}
-
-static void __exit rtw_drv_halt(void)
-{
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, "+rtw_drv_halt\n");
- DBG_8723A("+rtw_drv_halt\n");
-
- usb_deregister(usb_drv);
-
- DBG_8723A("-rtw_drv_halt\n");
-}
-
-module_init(rtw_drv_entry);
-module_exit(rtw_drv_halt);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
deleted file mode 100644
index cf4a50618670..000000000000
--- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _USB_OPS_LINUX_C_
-
-#include <drv_types.h>
-#include <usb_ops_linux.h>
-#include <rtw_sreset.h>
-
-void rtl8723au_read_port_cancel(struct rtw_adapter *padapter)
-{
- struct recv_buf *precvbuf;
- int i;
-
- precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf;
-
- DBG_8723A("%s\n", __func__);
-
- padapter->bReadPortCancel = true;
-
- for (i = 0; i < NR_RECVBUFF ; i++) {
- if (precvbuf->purb)
- usb_kill_urb(precvbuf->purb);
- precvbuf++;
- }
- usb_kill_urb(padapter->recvpriv.int_in_urb);
-}
-
-static void usb_write_port23a_complete(struct urb *purb)
-{
- struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context;
- struct rtw_adapter *padapter = pxmitbuf->padapter;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct hal_data_8723a *phaldata;
- unsigned long irqL;
-
- switch (pxmitbuf->flags) {
- case HIGH_QUEUE_INX:
-#ifdef CONFIG_8723AU_AP_MODE
- rtw_chk_hi_queue_cmd23a(padapter);
-#endif
- break;
- default:
- break;
- }
-
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped ||
- padapter->bWritePortCancel) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_write_port23a_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n",
- padapter->bDriverStopped, padapter->bSurpriseRemoved);
- DBG_8723A("%s(): TX Warning! bDriverStopped(%d) OR "
- "bSurpriseRemoved(%d) bWritePortCancel(%d) "
- "pxmitbuf->ext_tag(%x)\n", __func__,
- padapter->bDriverStopped, padapter->bSurpriseRemoved,
- padapter->bReadPortCancel, pxmitbuf->ext_tag);
-
- goto check_completion;
- }
-
- if (purb->status) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_write_port23a_complete : purb->status(%d) != 0\n",
- purb->status);
- DBG_8723A("###=> urb_write_port_complete status(%d)\n",
- purb->status);
- if (purb->status == -EPIPE || purb->status == -EPROTO) {
- } else if (purb->status == -EINPROGRESS) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_write_port23a_complete: EINPROGESS\n");
- goto check_completion;
- } else if (purb->status == -ENOENT) {
- DBG_8723A("%s: -ENOENT\n", __func__);
- goto check_completion;
- } else if (purb->status == -ECONNRESET) {
- DBG_8723A("%s: -ECONNRESET\n", __func__);
- goto check_completion;
- } else if (purb->status == -ESHUTDOWN) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_write_port23a_complete: ESHUTDOWN\n");
- padapter->bDriverStopped = true;
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_write_port23a_complete:bDriverStopped = true\n");
- goto check_completion;
- } else {
- padapter->bSurpriseRemoved = true;
- DBG_8723A("bSurpriseRemoved = true\n");
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_write_port23a_complete:bSurpriseRemoved = true\n");
- goto check_completion;
- }
- }
- phaldata = GET_HAL_DATA(padapter);
- phaldata->srestpriv.last_tx_complete_time = jiffies;
-
-check_completion:
- spin_lock_irqsave(&pxmitpriv->lock_sctx, irqL);
- rtw23a_sctx_done_err(&pxmitbuf->sctx,
- purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR :
- RTW_SCTX_DONE_SUCCESS);
- spin_unlock_irqrestore(&pxmitpriv->lock_sctx, irqL);
-
- rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf);
-
- tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
-}
-
-int rtl8723au_write_port(struct rtw_adapter *padapter, u32 addr, u32 cnt,
- struct xmit_buf *pxmitbuf)
-{
- struct urb *purb = NULL;
- struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct xmit_frame *pxmitframe;
- struct usb_device *pusbd = pdvobj->pusbdev;
- unsigned long irqL;
- unsigned int pipe, ep_num;
- int status;
- int ret = _FAIL;
-
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "+usb_write_port23a\n");
-
- if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "%s:(padapter->bDriverStopped || padapter->bSurpriseRemoved)!!!\n",
- __func__);
- rtw23a_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
- goto exit;
- }
-
- pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
- spin_lock_irqsave(&pxmitpriv->lock, irqL);
-
- switch (addr) {
- case VO_QUEUE_INX:
- pxmitbuf->flags = VO_QUEUE_INX;
- break;
- case VI_QUEUE_INX:
- pxmitbuf->flags = VI_QUEUE_INX;
- break;
- case BE_QUEUE_INX:
- pxmitbuf->flags = BE_QUEUE_INX;
- break;
- case BK_QUEUE_INX:
- pxmitbuf->flags = BK_QUEUE_INX;
- break;
- case HIGH_QUEUE_INX:
- pxmitbuf->flags = HIGH_QUEUE_INX;
- break;
- default:
- pxmitbuf->flags = MGT_QUEUE_INX;
- break;
- }
-
- spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
-
- purb = pxmitbuf->pxmit_urb[0];
-
- /* translate DMA FIFO addr to pipehandle */
- ep_num = pdvobj->Queue2Pipe[addr];
- pipe = usb_sndbulkpipe(pusbd, ep_num);
-
- usb_fill_bulk_urb(purb, pusbd, pipe,
- pxmitframe->buf_addr, /* pxmitbuf->pbuf */
- cnt, usb_write_port23a_complete,
- pxmitbuf);/* context is pxmitbuf */
-
- status = usb_submit_urb(purb, GFP_ATOMIC);
- if (!status) {
- struct hal_data_8723a *phaldata = GET_HAL_DATA(padapter);
- phaldata->srestpriv.last_tx_time = jiffies;
- } else {
- rtw23a_sctx_done_err(&pxmitbuf->sctx,
- RTW_SCTX_DONE_WRITE_PORT_ERR);
- DBG_8723A("usb_write_port23a, status =%d\n", status);
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
- "usb_write_port23a(): usb_submit_urb, status =%x\n",
- status);
-
- switch (status) {
- case -ENODEV:
- padapter->bDriverStopped = true;
- break;
- default:
- break;
- }
- goto exit;
- }
- ret = _SUCCESS;
- RT_TRACE(_module_hci_ops_os_c_, _drv_err_, "-usb_write_port23a\n");
-
-exit:
- if (ret != _SUCCESS)
- rtw_free_xmitbuf23a(pxmitpriv, pxmitbuf);
-
- return ret;
-}
-
-void rtl8723au_write_port_cancel(struct rtw_adapter *padapter)
-{
- struct xmit_buf *pxmitbuf;
- int j;
-
- DBG_8723A("%s\n", __func__);
-
- padapter->bWritePortCancel = true;
-
- list_for_each_entry(pxmitbuf, &padapter->xmitpriv.xmitbuf_list,
- list2) {
- for (j = 0; j < 8; j++) {
- if (pxmitbuf->pxmit_urb[j])
- usb_kill_urb(pxmitbuf->pxmit_urb[j]);
- }
- }
- list_for_each_entry(pxmitbuf, &padapter->xmitpriv.xmitextbuf_list,
- list2) {
- for (j = 0; j < 8; j++) {
- if (pxmitbuf->pxmit_urb[j])
- usb_kill_urb(pxmitbuf->pxmit_urb[j]);
- }
- }
-}
diff --git a/drivers/staging/rtl8723au/os_dep/xmit_linux.c b/drivers/staging/rtl8723au/os_dep/xmit_linux.c
deleted file mode 100644
index 64be72ac38ee..000000000000
--- a/drivers/staging/rtl8723au/os_dep/xmit_linux.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _XMIT_OSDEP_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#include <linux/if_ether.h>
-#include <linux/ip.h>
-#include <wifi.h>
-#include <mlme_osdep.h>
-#include <xmit_osdep.h>
-#include <osdep_intf.h>
-
-int rtw_os_xmit_resource_alloc23a(struct rtw_adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 alloc_sz)
-{
- int i;
-
- pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL);
- if (pxmitbuf->pallocated_buf == NULL)
- return _FAIL;
-
- pxmitbuf->pbuf = PTR_ALIGN(pxmitbuf->pallocated_buf, XMITBUF_ALIGN_SZ);
-
- for (i = 0; i < 8; i++) {
- pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
- if (!pxmitbuf->pxmit_urb[i]) {
- DBG_8723A("pxmitbuf->pxmit_urb[i]==NULL");
- return _FAIL;
- }
- }
- return _SUCCESS;
-}
-
-void rtw_os_xmit_resource_free23a(struct rtw_adapter *padapter,
- struct xmit_buf *pxmitbuf)
-{
- int i;
-
- for (i = 0; i < 8; i++)
- usb_free_urb(pxmitbuf->pxmit_urb[i]);
- kfree(pxmitbuf->pallocated_buf);
-}
-
-#define WMM_XMIT_THRESHOLD (NR_XMITFRAME*2/5)
-
-void rtw_os_pkt_complete23a(struct rtw_adapter *padapter, struct sk_buff *pkt)
-{
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- u16 queue;
-
- queue = skb_get_queue_mapping(pkt);
- if (padapter->registrypriv.wifi_spec) {
- if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
- (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
- netif_wake_subqueue(padapter->pnetdev, queue);
- } else {
- if (__netif_subqueue_stopped(padapter->pnetdev, queue))
- netif_wake_subqueue(padapter->pnetdev, queue);
- }
- dev_kfree_skb_any(pkt);
-}
-
-void rtw_os_xmit_complete23a(struct rtw_adapter *padapter,
- struct xmit_frame *pxframe)
-{
- if (pxframe->pkt)
- rtw_os_pkt_complete23a(padapter, pxframe->pkt);
-
- pxframe->pkt = NULL;
-}
-
-void rtw_os_xmit_schedule23a(struct rtw_adapter *padapter)
-{
- struct xmit_priv *pxmitpriv;
-
- if (!padapter)
- return;
- pxmitpriv = &padapter->xmitpriv;
-
- spin_lock_bh(&pxmitpriv->lock);
-
- if (rtw_txframes_pending23a(padapter))
- tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
- spin_unlock_bh(&pxmitpriv->lock);
-}
-
-static void rtw_check_xmit_resource(struct rtw_adapter *padapter,
- struct sk_buff *pkt)
-{
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- u16 queue;
-
- queue = skb_get_queue_mapping(pkt);
- if (padapter->registrypriv.wifi_spec) {
- /* No free space for Tx, tx_worker is too slow */
- if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
- netif_stop_subqueue(padapter->pnetdev, queue);
- } else {
- if (pxmitpriv->free_xmitframe_cnt <= 4) {
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
- netif_stop_subqueue(padapter->pnetdev, queue);
- }
- }
-}
-
-int rtw_xmit23a_entry23a(struct sk_buff *skb, struct net_device *pnetdev)
-{
- struct rtw_adapter *padapter = netdev_priv(pnetdev);
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- int res = 0;
-
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, "+xmit_enry\n");
-
- if (!rtw_if_up23a(padapter)) {
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_,
- "rtw_xmit23a_entry23a: rtw_if_up23a fail\n");
- goto drop_packet;
- }
-
- rtw_check_xmit_resource(padapter, skb);
-
- res = rtw_xmit23a(padapter, skb);
- if (res < 0)
- goto drop_packet;
-
- pxmitpriv->tx_pkts++;
- RT_TRACE(_module_xmit_osdep_c_, _drv_info_,
- "rtw_xmit23a_entry23a: tx_pkts=%d\n",
- (u32)pxmitpriv->tx_pkts);
- goto exit;
-
-drop_packet:
- pxmitpriv->tx_drop++;
- dev_kfree_skb_any(skb);
- RT_TRACE(_module_xmit_osdep_c_, _drv_notice_,
- "rtw_xmit23a_entry23a: drop, tx_drop=%d\n",
- (u32)pxmitpriv->tx_drop);
-exit:
- return 0;
-}
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index 0f0cd4a03cd4..f27df0b4cb44 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -63,12 +63,12 @@ static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER,
- 0xFF, MS_TRANSFER_START | trans_mode);
+ 0xFF, MS_TRANSFER_START | trans_mode);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
- MS_TRANSFER_END, MS_TRANSFER_END);
+ MS_TRANSFER_END, MS_TRANSFER_END);
rtsx_add_cmd(chip, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
@@ -109,8 +109,8 @@ static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
}
static int ms_transfer_data(struct rtsx_chip *chip, u8 trans_mode,
- u8 tpc, u16 sec_cnt, u8 cfg, bool mode_2k,
- int use_sg, void *buf, int buf_len)
+ u8 tpc, u16 sec_cnt, u8 cfg, bool mode_2k,
+ int use_sg, void *buf, int buf_len)
{
int retval;
u8 val, err_code = 0;
@@ -206,7 +206,7 @@ static int ms_write_bytes(struct rtsx_chip *chip,
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD,
MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES);
@@ -253,7 +253,7 @@ static int ms_write_bytes(struct rtsx_chip *chip,
}
static int ms_read_bytes(struct rtsx_chip *chip,
- u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len)
+ u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len)
{
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
@@ -270,12 +270,12 @@ static int ms_read_bytes(struct rtsx_chip *chip,
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
- 0x01, PINGPONG_BUFFER);
+ 0x01, PINGPONG_BUFFER);
rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
- MS_TRANSFER_START | MS_TM_READ_BYTES);
+ MS_TRANSFER_START | MS_TM_READ_BYTES);
rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
- MS_TRANSFER_END, MS_TRANSFER_END);
+ MS_TRANSFER_END, MS_TRANSFER_END);
for (i = 0; i < data_len - 1; i++)
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
@@ -284,7 +284,7 @@ static int ms_read_bytes(struct rtsx_chip *chip,
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len, 0, 0);
else
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len - 1,
- 0, 0);
+ 0, 0);
retval = rtsx_send_cmd(chip, MS_CARD, 5000);
if (retval < 0) {
@@ -334,8 +334,8 @@ static int ms_read_bytes(struct rtsx_chip *chip,
return STATUS_SUCCESS;
}
-static int ms_set_rw_reg_addr(struct rtsx_chip *chip,
- u8 read_start, u8 read_cnt, u8 write_start, u8 write_cnt)
+static int ms_set_rw_reg_addr(struct rtsx_chip *chip, u8 read_start,
+ u8 read_cnt, u8 write_start, u8 write_cnt)
{
int retval, i;
u8 data[4];
@@ -1417,7 +1417,6 @@ static int ms_read_status_reg(struct rtsx_chip *chip)
return STATUS_SUCCESS;
}
-
static int ms_read_extra_data(struct rtsx_chip *chip,
u16 block_addr, u8 page_num, u8 *buf, int buf_len)
{
@@ -1582,7 +1581,6 @@ static int ms_write_extra_data(struct rtsx_chip *chip,
return STATUS_SUCCESS;
}
-
static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
{
struct ms_info *ms_card = &chip->ms_card;
@@ -1667,7 +1665,6 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
return STATUS_SUCCESS;
}
-
static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
{
struct ms_info *ms_card = &chip->ms_card;
@@ -1738,7 +1735,6 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
return STATUS_SUCCESS;
}
-
static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
{
struct ms_info *ms_card = &chip->ms_card;
@@ -1808,7 +1804,6 @@ ERASE_RTY:
return STATUS_SUCCESS;
}
-
static void ms_set_page_status(u16 log_blk, u8 type, u8 *extra, int extra_len)
{
if (!extra || (extra_len < MS_EXTRA_SIZE))
@@ -2152,7 +2147,6 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
return STATUS_SUCCESS;
}
-
static int reset_ms(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
@@ -2471,7 +2465,7 @@ static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off)
if (!ms_card->segment)
return 0xFFFF;
- segment = &(ms_card->segment[seg_no]);
+ segment = &ms_card->segment[seg_no];
if (segment->l2p_table)
return segment->l2p_table[log_off];
@@ -2488,7 +2482,7 @@ static void ms_set_l2p_tbl(struct rtsx_chip *chip,
if (!ms_card->segment)
return;
- segment = &(ms_card->segment[seg_no]);
+ segment = &ms_card->segment[seg_no];
if (segment->l2p_table)
segment->l2p_table[log_off] = phy_blk;
}
@@ -2500,7 +2494,7 @@ static void ms_set_unused_block(struct rtsx_chip *chip, u16 phy_blk)
int seg_no;
seg_no = (int)phy_blk >> 9;
- segment = &(ms_card->segment[seg_no]);
+ segment = &ms_card->segment[seg_no];
segment->free_table[segment->set_index++] = phy_blk;
if (segment->set_index >= MS_FREE_TABLE_CNT)
@@ -2515,7 +2509,7 @@ static u16 ms_get_unused_block(struct rtsx_chip *chip, int seg_no)
struct zone_entry *segment;
u16 phy_blk;
- segment = &(ms_card->segment[seg_no]);
+ segment = &ms_card->segment[seg_no];
if (segment->unused_blk_cnt <= 0)
return 0xFFFF;
@@ -2544,7 +2538,7 @@ static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk,
u16 tmp_blk;
seg_no = (int)phy_blk >> 9;
- segment = &(ms_card->segment[seg_no]);
+ segment = &ms_card->segment[seg_no];
tmp_blk = segment->l2p_table[log_off];
if (us1 != us2) {
@@ -2608,7 +2602,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
else
table_size = 496;
- segment = &(ms_card->segment[seg_no]);
+ segment = &ms_card->segment[seg_no];
if (!segment->l2p_table) {
segment->l2p_table = vmalloc(table_size * 2);
@@ -2717,7 +2711,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
us2 = extra[0] & 0x10;
(void)ms_arbitrate_l2p(chip, phy_blk,
- log_blk-ms_start_idx[seg_no], us1, us2);
+ log_blk - ms_start_idx[seg_no], us1, us2);
continue;
}
@@ -2809,7 +2803,6 @@ BUILD_FAIL:
return STATUS_FAIL;
}
-
int reset_ms_card(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
@@ -2896,7 +2889,6 @@ static int mspro_set_rw_cmd(struct rtsx_chip *chip,
return STATUS_SUCCESS;
}
-
void mspro_stop_seq_mode(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
@@ -3312,7 +3304,6 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
return STATUS_FAIL;
}
-
static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
u16 log_blk, u8 start_page, u8 end_page,
u8 *buf, unsigned int *index,
@@ -3719,7 +3710,6 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
return STATUS_SUCCESS;
}
-
static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
u16 log_blk, u8 page_off)
{
@@ -3770,7 +3760,7 @@ static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
int ms_delay_write(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
- struct ms_delay_write_tag *delay_write = &(ms_card->delay_write);
+ struct ms_delay_write_tag *delay_write = &ms_card->delay_write;
int retval;
if (delay_write->delay_write_flag) {
@@ -3816,7 +3806,7 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
u8 start_page, end_page = 0, page_cnt;
u8 *ptr;
#ifdef MS_DELAY_WRITE
- struct ms_delay_write_tag *delay_write = &(ms_card->delay_write);
+ struct ms_delay_write_tag *delay_write = &ms_card->delay_write;
#endif
ms_set_err_code(chip, MS_NO_ERROR);
@@ -3996,7 +3986,7 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1;
seg_no++) {
- if (log_blk < ms_start_idx[seg_no+1])
+ if (log_blk < ms_start_idx[seg_no + 1])
break;
}
@@ -4082,13 +4072,12 @@ int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
return retval;
}
-
void ms_free_l2p_tbl(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &chip->ms_card;
int i = 0;
- if (ms_card->segment != NULL) {
+ if (ms_card->segment) {
for (i = 0; i < ms_card->segment_cnt; i++) {
vfree(ms_card->segment[i].l2p_table);
ms_card->segment[i].l2p_table = NULL;
@@ -4313,7 +4302,7 @@ int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_trace(chip);
- goto GetEKBFinish;
+ goto free_buffer;
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
@@ -4322,19 +4311,20 @@ int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
rtsx_trace(chip);
- goto GetEKBFinish;
+ goto free_buffer;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
rtsx_clear_ms_error(chip);
rtsx_trace(chip);
- return STATUS_FAIL;
+ retval = STATUS_FAIL;
+ goto free_buffer;
}
bufflen = min_t(int, 1052, scsi_bufflen(srb));
rtsx_stor_set_xfer_buf(buf, bufflen, srb);
-GetEKBFinish:
+free_buffer:
kfree(buf);
return retval;
}
@@ -4566,7 +4556,7 @@ int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
- goto GetICVFinish;
+ goto free_buffer;
}
retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
@@ -4575,19 +4565,20 @@ int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_clear_ms_error(chip);
rtsx_trace(chip);
- goto GetICVFinish;
+ goto free_buffer;
}
if (check_ms_err(chip)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_clear_ms_error(chip);
rtsx_trace(chip);
- return STATUS_FAIL;
+ retval = STATUS_FAIL;
+ goto free_buffer;
}
bufflen = min_t(int, 1028, scsi_bufflen(srb));
rtsx_stor_set_xfer_buf(buf, bufflen, srb);
-GetICVFinish:
+free_buffer:
kfree(buf);
return retval;
}
@@ -4657,8 +4648,8 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_send_cmd_no_wait(chip);
- retval = rtsx_transfer_data(chip, MS_CARD, buf + 4 + i*512,
- 512, 0, DMA_TO_DEVICE, 3000);
+ retval = rtsx_transfer_data(chip, MS_CARD, buf + 4 + i * 512,
+ 512, 0, DMA_TO_DEVICE, 3000);
if ((retval < 0) || check_ms_err(chip)) {
rtsx_clear_ms_error(chip);
if (ms_card->mg_auth == 0) {
@@ -4705,7 +4696,7 @@ SetICVFinish:
void ms_cleanup_work(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
if (CHK_MSPRO(ms_card)) {
if (ms_card->seq_mode) {
@@ -4770,7 +4761,7 @@ int ms_power_off_card3v3(struct rtsx_chip *chip)
int release_ms_card(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
#ifdef MS_DELAY_WRITE
diff --git a/drivers/staging/rts5208/ms.h b/drivers/staging/rts5208/ms.h
index d919170f2720..d7686399df97 100644
--- a/drivers/staging/rts5208/ms.h
+++ b/drivers/staging/rts5208/ms.h
@@ -125,7 +125,6 @@
#define Pro_CatagoryReg 0x06
#define Pro_ClassReg 0x07
-
#define Pro_SystemParm 0x10
#define Pro_DataCount1 0x11
#define Pro_DataCount0 0x12
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 25d095a5ade7..5d65a5cdc748 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -34,27 +34,27 @@ MODULE_DESCRIPTION("Realtek PCI-Express card reader rts5208/rts5288 driver");
MODULE_LICENSE("GPL");
static unsigned int delay_use = 1;
-module_param(delay_use, uint, S_IRUGO | S_IWUSR);
+module_param(delay_use, uint, 0644);
MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
static int ss_en;
-module_param(ss_en, int, S_IRUGO | S_IWUSR);
+module_param(ss_en, int, 0644);
MODULE_PARM_DESC(ss_en, "enable selective suspend");
static int ss_interval = 50;
-module_param(ss_interval, int, S_IRUGO | S_IWUSR);
+module_param(ss_interval, int, 0644);
MODULE_PARM_DESC(ss_interval, "Interval to enter ss state in seconds");
static int auto_delink_en;
-module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
+module_param(auto_delink_en, int, 0644);
MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
static unsigned char aspm_l0s_l1_en;
-module_param(aspm_l0s_l1_en, byte, S_IRUGO | S_IWUSR);
+module_param(aspm_l0s_l1_en, byte, 0644);
MODULE_PARM_DESC(aspm_l0s_l1_en, "enable device aspm");
static int msi_en;
-module_param(msi_en, int, S_IRUGO | S_IWUSR);
+module_param(msi_en, int, 0644);
MODULE_PARM_DESC(msi_en, "enable msi");
static irqreturn_t rtsx_interrupt(int irq, void *dev_id);
@@ -81,14 +81,16 @@ static int slave_alloc(struct scsi_device *sdev)
static int slave_configure(struct scsi_device *sdev)
{
- /* Scatter-gather buffers (all but the last) must have a length
+ /*
+ * Scatter-gather buffers (all but the last) must have a length
* divisible by the bulk maxpacket size. Otherwise a data packet
* would end up being short, causing a premature end to the data
* transfer. Since high-speed bulk pipes have a maxpacket size
* of 512, we'll use that as the scsi device queue's DMA alignment
* mask. Guaranteeing proper alignment of the first buffer will
* have the desired effect because, except at the beginning and
- * the end, scatter-gather buffers follow page boundaries. */
+ * the end, scatter-gather buffers follow page boundaries.
+ */
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
/* Set the SCSI level to at least 2. We'll leave it at 3 if that's
@@ -111,7 +113,6 @@ static int slave_configure(struct scsi_device *sdev)
return 0;
}
-
/***********************************************************************
* /proc/scsi/ functions
***********************************************************************/
@@ -130,7 +131,7 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
struct rtsx_chip *chip = dev->chip;
/* check for state-transition errors */
- if (chip->srb != NULL) {
+ if (chip->srb) {
dev_err(&dev->pci->dev, "Error: chip->srb = %p\n",
chip->srb);
return SCSI_MLQUEUE_HOST_BUSY;
@@ -186,8 +187,10 @@ static int command_abort(struct scsi_cmnd *srb)
return SUCCESS;
}
-/* This invokes the transport reset mechanism to reset the state of the
- * device */
+/*
+ * This invokes the transport reset mechanism to reset the state of the
+ * device
+ */
static int device_reset(struct scsi_cmnd *srb)
{
int result = 0;
@@ -209,7 +212,6 @@ static int bus_reset(struct scsi_cmnd *srb)
return result < 0 ? FAILED : SUCCESS;
}
-
/*
* this defines our host template, with which we'll allocate hosts
*/
@@ -259,7 +261,6 @@ static struct scsi_host_template rtsx_host_template = {
.module = THIS_MODULE
};
-
static int rtsx_acquire_irq(struct rtsx_dev *dev)
{
struct rtsx_chip *chip = dev->chip;
@@ -282,7 +283,6 @@ static int rtsx_acquire_irq(struct rtsx_dev *dev)
return 0;
}
-
int rtsx_read_pci_cfg_byte(u8 bus, u8 dev, u8 func, u8 offset, u8 *val)
{
struct pci_dev *pdev;
@@ -515,7 +515,6 @@ SkipForAbort:
complete_and_exit(&dev->control_exit, 0);
}
-
static int rtsx_polling_thread(void *__dev)
{
struct rtsx_dev *dev = __dev;
@@ -625,7 +624,6 @@ Exit:
return IRQ_HANDLED;
}
-
/* Release all our dynamic resources */
static void rtsx_release_resources(struct rtsx_dev *dev)
{
@@ -660,15 +658,19 @@ static void rtsx_release_resources(struct rtsx_dev *dev)
kfree(dev->chip);
}
-/* First stage of disconnect processing: stop all commands and remove
- * the host */
+/*
+ * First stage of disconnect processing: stop all commands and remove
+ * the host
+ */
static void quiesce_and_remove_host(struct rtsx_dev *dev)
{
struct Scsi_Host *host = rtsx_to_host(dev);
struct rtsx_chip *chip = dev->chip;
- /* Prevent new transfers, stop the current command, and
- * interrupt a SCSI-scan or device-reset delay */
+ /*
+ * Prevent new transfers, stop the current command, and
+ * interrupt a SCSI-scan or device-reset delay
+ */
mutex_lock(&dev->dev_mutex);
scsi_lock(host);
rtsx_set_stat(chip, RTSX_STAT_DISCONNECT);
@@ -680,9 +682,11 @@ static void quiesce_and_remove_host(struct rtsx_dev *dev)
/* Wait some time to let other threads exist */
wait_timeout(100);
- /* queuecommand won't accept any new commands and the control
+ /*
+ * queuecommand won't accept any new commands and the control
* thread won't execute a previously-queued command. If there
- * is such a command pending, complete it with an error. */
+ * is such a command pending, complete it with an error.
+ */
mutex_lock(&dev->dev_mutex);
if (chip->srb) {
chip->srb->result = DID_NO_CONNECT << 16;
@@ -702,8 +706,10 @@ static void release_everything(struct rtsx_dev *dev)
{
rtsx_release_resources(dev);
- /* Drop our reference to the host; the SCSI core will free it
- * when the refcount becomes 0. */
+ /*
+ * Drop our reference to the host; the SCSI core will free it
+ * when the refcount becomes 0.
+ */
scsi_host_put(rtsx_to_host(dev));
}
@@ -942,8 +948,10 @@ static int rtsx_probe(struct pci_dev *pci,
rtsx_init_chip(dev->chip);
- /* set the supported max_lun and max_id for the scsi host
- * NOTE: the minimal value of max_id is 1 */
+ /*
+ * set the supported max_lun and max_id for the scsi host
+ * NOTE: the minimal value of max_id is 1
+ */
host->max_id = 1;
host->max_lun = dev->chip->max_lun;
@@ -994,7 +1002,6 @@ errout:
return err;
}
-
static void rtsx_remove(struct pci_dev *pci)
{
struct rtsx_dev *dev = pci_get_drvdata(pci);
diff --git a/drivers/staging/rts5208/rtsx.h b/drivers/staging/rts5208/rtsx.h
index 1396263e13e6..e725b10ed087 100644
--- a/drivers/staging/rts5208/rtsx.h
+++ b/drivers/staging/rts5208/rtsx.h
@@ -70,14 +70,13 @@
#define rtsx_write_config_byte(chip, where, val) \
pci_write_config_byte((chip)->rtsx->pci, where, val)
-#define wait_timeout_x(task_state, msecs) \
-do { \
- set_current_state((task_state)); \
- schedule_timeout((msecs) * HZ / 1000); \
+#define wait_timeout_x(task_state, msecs) \
+do { \
+ set_current_state((task_state)); \
+ schedule_timeout((msecs) * HZ / 1000); \
} while (0)
#define wait_timeout(msecs) wait_timeout_x(TASK_INTERRUPTIBLE, (msecs))
-
#define STATE_TRANS_NONE 0
#define STATE_TRANS_CMD 1
#define STATE_TRANS_BUF 2
@@ -89,8 +88,6 @@ do { \
#define SCSI_LUN(srb) ((srb)->device->lun)
-typedef unsigned long DELAY_PARA_T;
-
struct rtsx_chip;
struct rtsx_dev {
@@ -131,16 +128,15 @@ struct rtsx_dev {
struct rtsx_chip *chip;
};
-typedef struct rtsx_dev rtsx_dev_t;
-
/* Convert between rtsx_dev and the corresponding Scsi_Host */
static inline struct Scsi_Host *rtsx_to_host(struct rtsx_dev *dev)
{
- return container_of((void *) dev, struct Scsi_Host, hostdata);
+ return container_of((void *)dev, struct Scsi_Host, hostdata);
}
+
static inline struct rtsx_dev *host_to_rtsx(struct Scsi_Host *host)
{
- return (struct rtsx_dev *) host->hostdata;
+ return (struct rtsx_dev *)host->hostdata;
}
static inline void get_current_time(u8 *timeval_buf, int buf_len)
@@ -165,8 +161,10 @@ static inline void get_current_time(u8 *timeval_buf, int buf_len)
timeval_buf[7] = (u8)(tv_usec);
}
-/* The scsi_lock() and scsi_unlock() macros protect the sm_state and the
- * single queue element srb for write access */
+/*
+ * The scsi_lock() and scsi_unlock() macros protect the sm_state and the
+ * single queue element srb for write access
+ */
#define scsi_unlock(host) spin_unlock_irq(host->host_lock)
#define scsi_lock(host) spin_lock_irq(host->host_lock)
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 231833a3045e..97717744962d 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -631,21 +631,21 @@ void rtsx_init_cards(struct rtsx_chip *chip)
int switch_ssc_clock(struct rtsx_chip *chip, int clk)
{
int retval;
- u8 N = (u8)(clk - 2), min_N, max_N;
+ u8 n = (u8)(clk - 2), min_n, max_n;
u8 mcu_cnt, div, max_div, ssc_depth, ssc_depth_mask;
int sd_vpclk_phase_reset = 0;
if (chip->cur_clk == clk)
return STATUS_SUCCESS;
- min_N = 60;
- max_N = 120;
+ min_n = 60;
+ max_n = 120;
max_div = CLK_DIV_4;
dev_dbg(rtsx_dev(chip), "Switch SSC clock to %dMHz (cur_clk = %d)\n",
clk, chip->cur_clk);
- if ((clk <= 2) || (N > max_N)) {
+ if ((clk <= 2) || (n > max_n)) {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -655,15 +655,15 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
mcu_cnt = 7;
div = CLK_DIV_1;
- while ((N < min_N) && (div < max_div)) {
- N = (N + 2) * 2 - 2;
+ while ((n < min_n) && (div < max_div)) {
+ n = (n + 2) * 2 - 2;
div++;
}
- dev_dbg(rtsx_dev(chip), "N = %d, div = %d\n", N, div);
+ dev_dbg(rtsx_dev(chip), "n = %d, div = %d\n", n, div);
if (chip->ssc_en) {
ssc_depth = 0x01;
- N -= 2;
+ n -= 2;
} else {
ssc_depth = 0;
}
@@ -677,7 +677,7 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
rtsx_add_cmd(chip, WRITE_REG_CMD, CLK_DIV, 0xFF, (div << 4) | mcu_cnt);
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL2, ssc_depth_mask, ssc_depth);
- rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, N);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (sd_vpclk_phase_reset) {
rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
@@ -1027,26 +1027,26 @@ int card_share_mode(struct rtsx_chip *chip, int card)
if (CHECK_PID(chip, 0x5208)) {
mask = CARD_SHARE_MASK;
- if (card == SD_CARD)
+ if (card == SD_CARD) {
value = CARD_SHARE_48_SD;
- else if (card == MS_CARD)
+ } else if (card == MS_CARD) {
value = CARD_SHARE_48_MS;
- else if (card == XD_CARD)
+ } else if (card == XD_CARD) {
value = CARD_SHARE_48_XD;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
} else if (CHECK_PID(chip, 0x5288)) {
mask = 0x03;
- if (card == SD_CARD)
+ if (card == SD_CARD) {
value = CARD_SHARE_BAROSSA_SD;
- else if (card == MS_CARD)
+ } else if (card == MS_CARD) {
value = CARD_SHARE_BAROSSA_MS;
- else if (card == XD_CARD)
+ } else if (card == XD_CARD) {
value = CARD_SHARE_BAROSSA_XD;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1065,7 +1065,6 @@ int card_share_mode(struct rtsx_chip *chip, int card)
return STATUS_SUCCESS;
}
-
int select_card(struct rtsx_chip *chip, int card)
{
int retval;
@@ -1073,15 +1072,15 @@ int select_card(struct rtsx_chip *chip, int card)
if (chip->cur_card != card) {
u8 mod;
- if (card == SD_CARD)
+ if (card == SD_CARD) {
mod = SD_MOD_SEL;
- else if (card == MS_CARD)
+ } else if (card == MS_CARD) {
mod = MS_MOD_SEL;
- else if (card == XD_CARD)
+ } else if (card == XD_CARD) {
mod = XD_MOD_SEL;
- else if (card == SPI_CARD)
+ } else if (card == SPI_CARD) {
mod = SPI_MOD_SEL;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index bcc4b666d79f..a10dd6220a7b 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -743,7 +743,7 @@ static inline int check_sd_speed_prior(u32 sd_speed_prior)
int i;
for (i = 0; i < 4; i++) {
- u8 tmp = (u8)(sd_speed_prior >> (i*8));
+ u8 tmp = (u8)(sd_speed_prior >> (i * 8));
if ((tmp < 0x01) || (tmp > 0x04)) {
fake_para = true;
@@ -760,7 +760,7 @@ static inline int check_sd_current_prior(u32 sd_current_prior)
int i;
for (i = 0; i < 4; i++) {
- u8 tmp = (u8)(sd_current_prior >> (i*8));
+ u8 tmp = (u8)(sd_current_prior >> (i * 8));
if (tmp > 0x03) {
fake_para = true;
@@ -2288,7 +2288,7 @@ int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
ptr = buf;
reg_addr = PPBUF_BASE2;
- for (i = 0; i < buf_len/256; i++) {
+ for (i = 0; i < buf_len / 256; i++) {
rtsx_init_cmd(chip);
for (j = 0; j < 256; j++)
@@ -2304,10 +2304,10 @@ int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
ptr += 256;
}
- if (buf_len%256) {
+ if (buf_len % 256) {
rtsx_init_cmd(chip);
- for (j = 0; j < buf_len%256; j++)
+ for (j = 0; j < buf_len % 256; j++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr++, 0, 0);
retval = rtsx_send_cmd(chip, 0, 250);
@@ -2317,7 +2317,7 @@ int rtsx_read_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
}
}
- memcpy(ptr, rtsx_get_cmd_data(chip), buf_len%256);
+ memcpy(ptr, rtsx_get_cmd_data(chip), buf_len % 256);
return STATUS_SUCCESS;
}
@@ -2336,7 +2336,7 @@ int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
ptr = buf;
reg_addr = PPBUF_BASE2;
- for (i = 0; i < buf_len/256; i++) {
+ for (i = 0; i < buf_len / 256; i++) {
rtsx_init_cmd(chip);
for (j = 0; j < 256; j++) {
@@ -2352,10 +2352,10 @@ int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
}
}
- if (buf_len%256) {
+ if (buf_len % 256) {
rtsx_init_cmd(chip);
- for (j = 0; j < buf_len%256; j++) {
+ for (j = 0; j < buf_len % 256; j++) {
rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF,
*ptr);
ptr++;
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
index c08164f3247e..f36642817c6e 100644
--- a/drivers/staging/rts5208/rtsx_chip.h
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -44,8 +44,10 @@
#define MG_SET_ICV_SLOW
/* HW may miss ERR/CMDNK signal when sampling INT status. */
#define MS_SAMPLE_INT_ERR
- /* HW DO NOT support Wait_INT function during READ_BYTES
- * transfer mode */
+ /*
+ * HW DO NOT support Wait_INT function
+ * during READ_BYTES transfer mode
+ */
#define READ_BYTES_WAIT_INT
#endif
@@ -101,18 +103,17 @@
#define TRANSPORT_NO_SENSE 2 /* Command failed, no auto-sense */
#define TRANSPORT_ERROR 3 /* Transport bad (i.e. device dead) */
-
-/*-----------------------------------
- Start-Stop-Unit
------------------------------------*/
+/*
+ * Start-Stop-Unit
+ */
#define STOP_MEDIUM 0x00 /* access disable */
#define MAKE_MEDIUM_READY 0x01 /* access enable */
#define UNLOAD_MEDIUM 0x02 /* unload */
#define LOAD_MEDIUM 0x03 /* load */
-/*-----------------------------------
- STANDARD_INQUIRY
------------------------------------*/
+/*
+ * STANDARD_INQUIRY
+ */
#define QULIFIRE 0x00
#define AENC_FNC 0x00
#define TRML_IOP 0x00
@@ -129,17 +130,15 @@
#define PRDCT_REV_LEN 4 /* Product LOT Length */
/* Dynamic flag definitions: used in set_bit() etc. */
-#define RTSX_FLIDX_TRANS_ACTIVE 18 /* 0x00040000 transfer is active */
-#define RTSX_FLIDX_ABORTING 20 /* 0x00100000 abort is in
- * progress */
-#define RTSX_FLIDX_DISCONNECTING 21 /* 0x00200000 disconnect
- * in progress */
+#define RTSX_FLIDX_TRANS_ACTIVE 18 /* 0x00040000 transfer is active */
+#define RTSX_FLIDX_ABORTING 20 /* 0x00100000 abort is in progress */
+#define RTSX_FLIDX_DISCONNECTING 21 /* 0x00200000 disconnect in progress */
+
#define ABORTING_OR_DISCONNECTING ((1UL << US_FLIDX_ABORTING) | \
(1UL << US_FLIDX_DISCONNECTING))
-#define RTSX_FLIDX_RESETTING 22 /* 0x00400000 device reset
- * in progress */
-#define RTSX_FLIDX_TIMED_OUT 23 /* 0x00800000 SCSI
- * midlayer timed out */
+
+#define RTSX_FLIDX_RESETTING 22 /* 0x00400000 device reset in progress */
+#define RTSX_FLIDX_TIMED_OUT 23 /* 0x00800000 SCSI midlayer timed out */
#define DRCT_ACCESS_DEV 0x00 /* Direct Access Device */
#define RMB_DISC 0x80 /* The Device is Removable */
@@ -174,9 +173,9 @@
#define FIRST_RESET 0x01
#define USED_EXIST 0x02
-/*-----------------------------------
- SENSE_DATA
------------------------------------*/
+/*
+ * SENSE_DATA
+ */
/*---- valid ----*/
#define SENSE_VALID 0x80 /* Sense data is valid as SCSI2 */
#define SENSE_INVALID 0x00 /* Sense data is invalid as SCSI2 */
@@ -228,7 +227,6 @@
#define ASCQ_LOAD_EJCT_ERR 0x00
#define ASCQ_WRITE_PROTECT 0x00
-
struct sense_data_t {
unsigned char err_code; /* error code */
/* bit7 : valid */
@@ -268,22 +266,22 @@ struct sense_data_t {
#define TRIG_DMA (0x01 << 31)
/* Bus interrupt pending register */
-#define CMD_DONE_INT (1 << 31)
-#define DATA_DONE_INT (1 << 30)
-#define TRANS_OK_INT (1 << 29)
-#define TRANS_FAIL_INT (1 << 28)
-#define XD_INT (1 << 27)
-#define MS_INT (1 << 26)
-#define SD_INT (1 << 25)
-#define GPIO0_INT (1 << 24)
-#define OC_INT (1 << 23)
-#define SD_WRITE_PROTECT (1 << 19)
-#define XD_EXIST (1 << 18)
-#define MS_EXIST (1 << 17)
-#define SD_EXIST (1 << 16)
+#define CMD_DONE_INT BIT(31)
+#define DATA_DONE_INT BIT(30)
+#define TRANS_OK_INT BIT(29)
+#define TRANS_FAIL_INT BIT(28)
+#define XD_INT BIT(27)
+#define MS_INT BIT(26)
+#define SD_INT BIT(25)
+#define GPIO0_INT BIT(24)
+#define OC_INT BIT(23)
+#define SD_WRITE_PROTECT BIT(19)
+#define XD_EXIST BIT(18)
+#define MS_EXIST BIT(17)
+#define SD_EXIST BIT(16)
#define DELINK_INT GPIO0_INT
-#define MS_OC_INT (1 << 23)
-#define SD_OC_INT (1 << 22)
+#define MS_OC_INT BIT(23)
+#define SD_OC_INT BIT(22)
#define CARD_INT (XD_INT | MS_INT | SD_INT)
#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
@@ -305,7 +303,6 @@ struct sense_data_t {
#define MS_OC_INT_EN (1 << 23)
#define SD_OC_INT_EN (1 << 22)
-
#define READ_REG_CMD 0
#define WRITE_REG_CMD 1
#define CHECK_REG_CMD 2
@@ -313,7 +310,6 @@ struct sense_data_t {
#define HOST_TO_DEVICE 0
#define DEVICE_TO_HOST 1
-
#define RTSX_RESV_BUF_LEN 4096
#define HOST_CMDS_BUF_LEN 1024
#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
@@ -332,7 +328,6 @@ struct sense_data_t {
#define XD_FREE_TABLE_CNT 1200
#define MS_FREE_TABLE_CNT 512
-
/* Bit Operation */
#define SET_BIT(data, idx) ((data) |= 1 << (idx))
#define CLR_BIT(data, idx) ((data) &= ~(1 << (idx)))
@@ -618,7 +613,6 @@ struct spi_info {
int spi_clock;
};
-
#ifdef _MSG_TRACE
struct trace_msg_t {
u16 line;
@@ -689,7 +683,7 @@ struct trace_msg_t {
#define CLR_SDIO_IGNORED(chip) ((chip)->sdio_func_exist &= ~SDIO_IGNORED)
struct rtsx_chip {
- rtsx_dev_t *rtsx;
+ struct rtsx_dev *rtsx;
u32 int_reg; /* Bus interrupt pending register */
char max_lun;
@@ -712,9 +706,9 @@ struct rtsx_chip {
int cur_card;
unsigned long need_release; /* need release bit map */
- unsigned long need_reset; /* need reset
- * bit map */
- /* Flag to indicate that this card is just resumed from SS state,
+ unsigned long need_reset; /* need reset bit map */
+ /*
+ * Flag to indicate that this card is just resumed from SS state,
* and need released before being resetted
*/
unsigned long need_reinit;
@@ -732,8 +726,10 @@ struct rtsx_chip {
u8 card_ejected; /* card ejected bit map */
u8 card_wp; /* card write protected bit map */
- u8 lun_mc; /* flag to indicate whether to answer
- * MediaChange */
+ u8 lun_mc; /*
+ * flag to indicate whether to answer
+ * MediaChange
+ */
#ifndef LED_AUTO_BLINK
int led_toggle_counter;
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index d2031044ea34..becb4bba166c 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -484,14 +484,14 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
u8 card = get_lun_card(chip, lun);
bool pro_formatter_flag = false;
unsigned char inquiry_buf[] = {
- QULIFIRE|DRCT_ACCESS_DEV,
- RMB_DISC|0x0D,
+ QULIFIRE | DRCT_ACCESS_DEV,
+ RMB_DISC | 0x0D,
0x00,
0x01,
0x1f,
0x02,
0,
- REL_ADR|WBUS_32|WBUS_16|SYNC|LINKED|CMD_QUE|SFT_RE,
+ REL_ADR | WBUS_32 | WBUS_16 | SYNC | LINKED | CMD_QUE | SFT_RE,
};
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
@@ -558,7 +558,6 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_GOOD;
}
-
static int start_stop_unit(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
@@ -594,7 +593,6 @@ static int start_stop_unit(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_ERROR;
}
-
static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int prevent;
@@ -613,7 +611,6 @@ static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_GOOD;
}
-
static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct sense_data_t *sense;
@@ -1400,7 +1397,7 @@ static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf_len = 4 + ((2 + MSG_FUNC_LEN + MSG_FILE_LEN + TIME_VAL_LEN) *
TRACE_ITEM_CNT);
- if ((scsi_bufflen(srb) < buf_len) || (scsi_sglist(srb) == NULL)) {
+ if ((scsi_bufflen(srb) < buf_len) || !scsi_sglist(srb)) {
set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
rtsx_trace(chip);
@@ -1521,7 +1518,7 @@ static int write_host_reg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- unsigned lun = SCSI_LUN(srb);
+ unsigned int lun = SCSI_LUN(srb);
if (srb->cmnd[3] == 1) {
/* Variable Clock */
@@ -1989,8 +1986,8 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_FAILED;
}
- buf[2*i] = (u8)(val >> 8);
- buf[2*i+1] = (u8)val;
+ buf[2 * i] = (u8)(val >> 8);
+ buf[2 * i + 1] = (u8)val;
}
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb),
@@ -2048,7 +2045,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
for (i = 0; i < len / 2; i++) {
- val = ((u16)buf[2*i] << 8) | buf[2*i+1];
+ val = ((u16)buf[2 * i] << 8) | buf[2 * i + 1];
retval = rtsx_write_phy_register(chip, addr + i, val);
if (retval != STATUS_SUCCESS) {
vfree(buf);
@@ -2604,7 +2601,6 @@ static int app_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return result;
}
-
static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
u8 rtsx_status[16];
@@ -3071,18 +3067,18 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf[i++] = 0x80;
if ((dev_info_id == 0x10) || (dev_info_id == 0x13)) {
/* System Information */
- memcpy(buf+i, ms_card->raw_sys_info, 96);
+ memcpy(buf + i, ms_card->raw_sys_info, 96);
} else {
/* Model Name */
- memcpy(buf+i, ms_card->raw_model_name, 48);
+ memcpy(buf + i, ms_card->raw_model_name, 48);
}
rtsx_stor_set_xfer_buf(buf, buf_len, srb);
if (dev_info_id == 0x15)
- scsi_set_resid(srb, scsi_bufflen(srb)-0x3C);
+ scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C);
else
- scsi_set_resid(srb, scsi_bufflen(srb)-0x6C);
+ scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C);
kfree(buf);
return STATUS_SUCCESS;
diff --git a/drivers/staging/rts5208/rtsx_sys.h b/drivers/staging/rts5208/rtsx_sys.h
index 0b6b4d4f1fea..f49bed9ec76a 100644
--- a/drivers/staging/rts5208/rtsx_sys.h
+++ b/drivers/staging/rts5208/rtsx_sys.h
@@ -28,8 +28,6 @@
#include "rtsx_chip.h"
#include "rtsx_card.h"
-typedef dma_addr_t ULONG_PTR;
-
static inline void rtsx_exclusive_enter_ss(struct rtsx_chip *chip)
{
struct rtsx_dev *dev = chip->rtsx;
diff --git a/drivers/staging/rts5208/rtsx_transport.h b/drivers/staging/rts5208/rtsx_transport.h
index 899bc2079dbe..479137398c3d 100644
--- a/drivers/staging/rts5208/rtsx_transport.h
+++ b/drivers/staging/rts5208/rtsx_transport.h
@@ -38,7 +38,6 @@ void rtsx_stor_get_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb);
void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip);
-
#define rtsx_init_cmd(chip) ((chip)->ci = 0)
void rtsx_add_cmd(struct rtsx_chip *chip,
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index 6219e047557e..b0bbb36f8988 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -1428,7 +1428,6 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
continue;
}
-
if (func_to_switch)
break;
@@ -1437,9 +1436,9 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
func_to_switch);
#ifdef SUPPORT_SD_LOCK
- if ((sd_card->sd_lock_status & SD_SDR_RST)
- && (DDR50_SUPPORT == func_to_switch)
- && (sd_card->func_group1_mask & SDR50_SUPPORT_MASK)) {
+ if ((sd_card->sd_lock_status & SD_SDR_RST) &&
+ (func_to_switch == DDR50_SUPPORT) &&
+ (sd_card->func_group1_mask & SDR50_SUPPORT_MASK)) {
func_to_switch = SDR50_SUPPORT;
dev_dbg(rtsx_dev(chip), "Using SDR50 instead of DDR50 for SD Lock\n");
}
@@ -2589,16 +2588,12 @@ Switch_Fail:
#endif
retval = sd_prepare_reset(chip);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
retval = sd_dummy_clock(chip);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) && try_sdio) {
int rty_cnt = 0;
@@ -2606,8 +2601,7 @@ Switch_Fail:
for (; rty_cnt < chip->sdio_retry_cnt; rty_cnt++) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
- rtsx_trace(chip);
- return STATUS_FAIL;
+ goto Status_Fail;
}
retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0,
@@ -2619,8 +2613,7 @@ Switch_Fail:
dev_dbg(rtsx_dev(chip), "SD_IO card (Function number: %d)!\n",
func_num);
chip->sd_io = 1;
- rtsx_trace(chip);
- return STATUS_FAIL;
+ goto Status_Fail;
}
break;
@@ -2638,10 +2631,8 @@ Switch_Fail:
RTY_SD_RST:
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
NULL, 0);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
wait_timeout(20);
@@ -2659,10 +2650,8 @@ RTY_SD_RST:
retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0,
SD_RSP_TYPE_R0, NULL, 0);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
wait_timeout(20);
}
@@ -2673,39 +2662,32 @@ RTY_SD_RST:
if (retval != STATUS_SUCCESS) {
if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
sd_set_err_code(chip, SD_NO_CARD);
- rtsx_trace(chip);
- return STATUS_FAIL;
+ goto Status_Fail;
}
j++;
- if (j < 3) {
+ if (j < 3)
goto RTY_SD_RST;
- } else {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ else
+ goto Status_Fail;
}
retval = sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage,
SD_RSP_TYPE_R3, rsp, 5);
if (retval != STATUS_SUCCESS) {
k++;
- if (k < 3) {
+ if (k < 3)
goto RTY_SD_RST;
- } else {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ else
+ goto Status_Fail;
}
i++;
wait_timeout(20);
} while (!(rsp[1] & 0x80) && (i < 255));
- if (i == 255) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (i == 255)
+ goto Status_Fail;
if (hi_cap_flow) {
if (rsp[1] & 0x40)
@@ -2722,26 +2704,20 @@ RTY_SD_RST:
if (support_1v8) {
retval = sd_voltage_switch(chip);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
}
retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
NULL, 0);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
for (i = 0; i < 3; i++) {
retval = sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0,
SD_RSP_TYPE_R6, rsp, 5);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
sd_card->sd_addr = (u32)rsp[1] << 24;
sd_card->sd_addr += (u32)rsp[2] << 16;
@@ -2751,24 +2727,18 @@ RTY_SD_RST:
}
retval = sd_check_csd(chip, 1);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
retval = sd_select_card(chip, 1);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
#ifdef SUPPORT_SD_LOCK
SD_UNLOCK_ENTRY:
retval = sd_update_lock_status(chip);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
if (sd_card->sd_lock_status & SD_LOCKED) {
sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST);
@@ -2780,32 +2750,24 @@ SD_UNLOCK_ENTRY:
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
retval = sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0,
SD_RSP_TYPE_R1, NULL, 0);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
if (support_1v8) {
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
SD_RSP_TYPE_R1, NULL, 0);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
switch_bus_width = SD_BUS_WIDTH_4;
} else {
@@ -2814,16 +2776,12 @@ SD_UNLOCK_ENTRY:
retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
NULL, 0);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
if (!(sd_card->raw_csd[4] & 0x40))
sd_dont_switch = true;
@@ -2862,17 +2820,13 @@ SD_UNLOCK_ENTRY:
if (!support_1v8) {
retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
SD_RSP_TYPE_R1, NULL, 0);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
SD_RSP_TYPE_R1, NULL, 0);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
}
#ifdef SUPPORT_SD_LOCK
@@ -2890,10 +2844,8 @@ SD_UNLOCK_ENTRY:
}
retval = sd_set_init_para(chip);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
if (CHK_SD_DDR50(sd_card))
retval = sd_ddr_tuning(chip);
@@ -2902,14 +2854,11 @@ SD_UNLOCK_ENTRY:
if (retval != STATUS_SUCCESS) {
if (sd20_mode) {
- rtsx_trace(chip);
- return STATUS_FAIL;
+ goto Status_Fail;
} else {
retval = sd_init_power(chip);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
try_sdio = false;
sd20_mode = true;
@@ -2930,14 +2879,11 @@ SD_UNLOCK_ENTRY:
retval = sd_read_lba0(chip);
if (retval != STATUS_SUCCESS) {
if (sd20_mode) {
- rtsx_trace(chip);
- return STATUS_FAIL;
+ goto Status_Fail;
} else {
retval = sd_init_power(chip);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
try_sdio = false;
sd20_mode = true;
@@ -2948,10 +2894,8 @@ SD_UNLOCK_ENTRY:
}
retval = sd_check_wp_state(chip);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
@@ -2973,8 +2917,11 @@ SD_UNLOCK_ENTRY:
#endif
return STATUS_SUCCESS;
-}
+Status_Fail:
+ rtsx_trace(chip);
+ return STATUS_FAIL;
+}
static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
{
@@ -3105,7 +3052,6 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
return SWITCH_FAIL;
}
-
static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
{
struct sd_info *sd_card = &(chip->sd_card);
@@ -3230,7 +3176,6 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
return STATUS_SUCCESS;
}
-
static int reset_mmc(struct rtsx_chip *chip)
{
struct sd_info *sd_card = &(chip->sd_card);
diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c
index 26eb2a184f91..13c539c83838 100644
--- a/drivers/staging/rts5208/spi.c
+++ b/drivers/staging/rts5208/spi.c
@@ -39,7 +39,8 @@ static int spi_init(struct rtsx_chip *chip)
int retval;
retval = rtsx_write_register(chip, SPI_CONTROL, 0xFF,
- CS_POLARITY_LOW | DTO_MSB_FIRST | SPI_MASTER | SPI_MODE0 | SPI_AUTO);
+ CS_POLARITY_LOW | DTO_MSB_FIRST
+ | SPI_MASTER | SPI_MODE0 | SPI_AUTO);
if (retval) {
rtsx_trace(chip);
return retval;
diff --git a/drivers/staging/rts5208/spi.h b/drivers/staging/rts5208/spi.h
index fc824b5d8d59..c8d2beacd9e5 100644
--- a/drivers/staging/rts5208/spi.h
+++ b/drivers/staging/rts5208/spi.h
@@ -61,5 +61,4 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip);
int spi_erase_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip);
int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip);
-
#endif /* __REALTEK_RTSX_SPI_H */
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index fc1dfe0991d4..1de02bb98839 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -834,7 +834,6 @@ static int xd_check_data_blank(u8 *redunt)
!= (XD_ECC1_ALL1 | XD_ECC2_ALL1))
return 0;
-
for (i = 0; i < 4; i++) {
if (redunt[RESERVED0 + i] != 0xFF)
return 0;
@@ -938,7 +937,7 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
dev_dbg(rtsx_dev(chip), "Set unused block to index %d\n",
zone->set_index);
- zone->free_table[zone->set_index++] = (u16) (phy_blk & 0x3ff);
+ zone->free_table[zone->set_index++] = (u16)(phy_blk & 0x3ff);
if (zone->set_index >= XD_FREE_TABLE_CNT)
zone->set_index = 0;
zone->unused_blk_cnt++;
@@ -1402,7 +1401,6 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
return STATUS_FAIL;
}
-
static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
{
struct xd_info *xd_card = &(chip->xd_card);
@@ -1624,10 +1622,8 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
u8 reg_val, page_cnt;
int zone_no, retval, i;
- if (start_page > end_page) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (start_page > end_page)
+ goto Status_Fail;
page_cnt = end_page - start_page;
zone_no = (int)(log_blk / 1000);
@@ -1643,8 +1639,7 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
- rtsx_trace(chip);
- return STATUS_FAIL;
+ goto Status_Fail;
}
}
}
@@ -1679,8 +1674,7 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
if (retval == -ETIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
- rtsx_trace(chip);
- return STATUS_FAIL;
+ goto Status_Fail;
} else {
rtsx_trace(chip);
goto Fail;
@@ -1713,8 +1707,7 @@ Fail:
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
xd_set_err_code(chip, XD_NO_CARD);
- rtsx_trace(chip);
- return STATUS_FAIL;
+ goto Status_Fail;
}
xd_set_err_code(chip, XD_ECC_ERROR);
@@ -1722,8 +1715,7 @@ Fail:
new_blk = xd_get_unused_block(chip, zone_no);
if (new_blk == NO_NEW_BLK) {
XD_CLR_BAD_OLDBLK(xd_card);
- rtsx_trace(chip);
- return STATUS_FAIL;
+ goto Status_Fail;
}
retval = xd_copy_page(chip, phy_blk, new_blk, 0,
@@ -1737,8 +1729,7 @@ Fail:
XD_CLR_BAD_NEWBLK(xd_card);
}
XD_CLR_BAD_OLDBLK(xd_card);
- rtsx_trace(chip);
- return STATUS_FAIL;
+ goto Status_Fail;
}
xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
xd_erase_block(chip, phy_blk);
@@ -1746,6 +1737,7 @@ Fail:
XD_CLR_BAD_OLDBLK(xd_card);
}
+Status_Fail:
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -1830,7 +1822,6 @@ static int xd_prepare_write(struct rtsx_chip *chip,
return STATUS_SUCCESS;
}
-
static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
u32 new_blk, u32 log_blk, u8 start_page,
u8 end_page, u8 *buf, unsigned int *index,
@@ -1845,10 +1836,8 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
dev_dbg(rtsx_dev(chip), "%s, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x\n",
__func__, old_blk, new_blk, log_blk);
- if (start_page > end_page) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (start_page > end_page)
+ goto Status_Fail;
page_cnt = end_page - start_page;
zone_no = (int)(log_blk / 1000);
@@ -1857,10 +1846,8 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
page_addr = (new_blk << xd_card->block_shift) + start_page;
retval = xd_send_cmd(chip, READ1_1);
- if (retval != STATUS_SUCCESS) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
+ if (retval != STATUS_SUCCESS)
+ goto Status_Fail;
rtsx_init_cmd(chip);
@@ -1895,8 +1882,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
if (retval == -ETIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
- rtsx_trace(chip);
- return STATUS_FAIL;
+ goto Status_Fail;
} else {
rtsx_trace(chip);
goto Fail;
@@ -1936,6 +1922,7 @@ Fail:
xd_mark_bad_block(chip, new_blk);
}
+Status_Fail:
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2000,7 +1987,6 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
return STATUS_FAIL;
}
-
if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h
index cc0afeeb68c1..420546d43002 100644
--- a/drivers/staging/slicoss/slic.h
+++ b/drivers/staging/slicoss/slic.h
@@ -92,6 +92,7 @@ struct slic_rcvbuf_info {
u32 lasttime;
u32 lastid;
};
+
/*
* SLIC Handle structure. Used to restrict handle values to
* 32 bits by using an index rather than an address.
@@ -223,8 +224,8 @@ struct mcast_address {
struct slic_iface_stats {
/*
- * Stats
- */
+ * Stats
+ */
u64 xmt_bytes;
u64 xmt_ucast;
u64 xmt_mcast;
@@ -351,10 +352,35 @@ struct base_driver {
uint cardnuminuse[SLIC_MAX_CARDS];
};
-struct slic_shmem {
- volatile u32 isr;
- volatile u32 linkstatus;
- volatile struct slic_stats inicstats;
+struct slic_stats {
+ /* xmit stats */
+ u64 xmit_tcp_bytes;
+ u64 xmit_tcp_segs;
+ u64 xmit_bytes;
+ u64 xmit_collisions;
+ u64 xmit_unicasts;
+ u64 xmit_other_error;
+ u64 xmit_excess_collisions;
+ /* rcv stats */
+ u64 rcv_tcp_bytes;
+ u64 rcv_tcp_segs;
+ u64 rcv_bytes;
+ u64 rcv_unicasts;
+ u64 rcv_other_error;
+ u64 rcv_drops;
+};
+
+struct slic_shmem_data {
+ u32 isr;
+ u32 lnkstatus;
+ struct slic_stats stats;
+};
+
+struct slic_shmemory {
+ dma_addr_t isr_phaddr;
+ dma_addr_t lnkstatus_phaddr;
+ dma_addr_t stats_phaddr;
+ struct slic_shmem_data __iomem *shmem_data;
};
struct slic_upr {
@@ -414,10 +440,9 @@ struct adapter {
u32 intrregistered;
uint isp_initialized;
uint gennumber;
- struct slic_shmem *pshmem;
+ struct slic_shmemory shmem;
dma_addr_t phys_shmem;
- u32 isrcopy;
- __iomem struct slic_regs *slic_regs;
+ void __iomem *regs;
unsigned char state;
unsigned char linkstate;
unsigned char linkspeed;
@@ -444,8 +469,8 @@ struct adapter {
struct slic_cmdqueue cmdq_all;
struct slic_cmdqmem cmdqmem;
/*
- * SLIC Handles
- */
+ * SLIC Handles
+ */
/* Object handles*/
struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS + 1];
/* Free object handles*/
@@ -487,6 +512,34 @@ struct adapter {
struct slicnet_stats slic_stats;
};
+static inline u32 slic_read32(struct adapter *adapter, unsigned int reg)
+{
+ return ioread32(adapter->regs + reg);
+}
+
+static inline void slic_write32(struct adapter *adapter, unsigned int reg,
+ u32 val)
+{
+ iowrite32(val, adapter->regs + reg);
+}
+
+static inline void slic_write64(struct adapter *adapter, unsigned int reg,
+ u32 val, u32 hiaddr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->bit64reglock, flags);
+ slic_write32(adapter, SLIC_REG_ADDR_UPPER, hiaddr);
+ slic_write32(adapter, reg, val);
+ mmiowb();
+ spin_unlock_irqrestore(&adapter->bit64reglock, flags);
+}
+
+static inline void slic_flush_write(struct adapter *adapter)
+{
+ ioread32(adapter->regs + SLIC_REG_HOSTID);
+}
+
#define UPDATE_STATS(largestat, newstat, oldstat) \
{ \
if ((newstat) < (oldstat)) \
diff --git a/drivers/staging/slicoss/slichw.h b/drivers/staging/slicoss/slichw.h
index 9723b4a104f7..49cb91aa02bb 100644
--- a/drivers/staging/slicoss/slichw.h
+++ b/drivers/staging/slicoss/slichw.h
@@ -289,224 +289,118 @@ struct slic_rspbuf {
u32 pad2[4];
};
-struct slic_regs {
- u32 slic_reset; /* Reset Register */
- u32 pad0;
-
- u32 slic_icr; /* Interrupt Control Register */
- u32 pad2;
-#define SLIC_ICR 0x0008
-
- u32 slic_isp; /* Interrupt status pointer */
- u32 pad1;
-#define SLIC_ISP 0x0010
-
- u32 slic_isr; /* Interrupt status */
- u32 pad3;
-#define SLIC_ISR 0x0018
-
- u32 slic_hbar; /* Header buffer address reg */
- u32 pad4;
- /*
- * 31-8 - phy addr of set of contiguous hdr buffers
- * 7-0 - number of buffers passed
- * Buffers are 256 bytes long on 256-byte boundaries.
- */
-#define SLIC_HBAR 0x0020
-#define SLIC_HBAR_CNT_MSK 0x000000FF
-
- u32 slic_dbar; /* Data buffer handle & address reg */
- u32 pad5;
-
- /* 4 sets of registers; Buffers are 2K bytes long 2 per 4K page. */
-#define SLIC_DBAR 0x0028
-#define SLIC_DBAR_SIZE 2048
-
- u32 slic_cbar; /* Xmt Cmd buf addr regs.*/
- /*
- * 1 per XMT interface
- * 31-5 - phy addr of host command buffer
- * 4-0 - length of cmd in multiples of 32 bytes
- * Buffers are 32 bytes up to 512 bytes long
- */
-#define SLIC_CBAR 0x0030
-#define SLIC_CBAR_LEN_MSK 0x0000001F
-#define SLIC_CBAR_ALIGN 0x00000020
-
- u32 slic_wcs; /* write control store*/
-#define SLIC_WCS 0x0034
-#define SLIC_WCS_START 0x80000000 /*Start the SLIC (Jump to WCS)*/
-#define SLIC_WCS_COMPARE 0x40000000 /* Compare with value in WCS*/
-
- u32 slic_rbar; /* Response buffer address reg.*/
- u32 pad7;
- /*
- * 31-8 - phy addr of set of contiguous response buffers
- * 7-0 - number of buffers passed
- * Buffers are 32 bytes long on 32-byte boundaries.
- */
-#define SLIC_RBAR 0x0038
-#define SLIC_RBAR_CNT_MSK 0x000000FF
-#define SLIC_RBAR_SIZE 32
-
- u32 slic_stats; /* read statistics (UPR) */
- u32 pad8;
-#define SLIC_RSTAT 0x0040
-
- u32 slic_rlsr; /* read link status */
- u32 pad9;
-#define SLIC_LSTAT 0x0048
-
- u32 slic_wmcfg; /* Write Mac Config */
- u32 pad10;
-#define SLIC_WMCFG 0x0050
-
- u32 slic_wphy; /* Write phy register */
- u32 pad11;
-#define SLIC_WPHY 0x0058
-
- u32 slic_rcbar; /* Rcv Cmd buf addr reg */
- u32 pad12;
-#define SLIC_RCBAR 0x0060
-
- u32 slic_rconfig; /* Read SLIC Config*/
- u32 pad13;
-#define SLIC_RCONFIG 0x0068
-
- u32 slic_intagg; /* Interrupt aggregation time */
- u32 pad14;
-#define SLIC_INTAGG 0x0070
-
- u32 slic_wxcfg; /* Write XMIT config reg*/
- u32 pad16;
-#define SLIC_WXCFG 0x0078
-
- u32 slic_wrcfg; /* Write RCV config reg*/
- u32 pad17;
-#define SLIC_WRCFG 0x0080
-
- u32 slic_wraddral; /* Write rcv addr a low*/
- u32 pad18;
-#define SLIC_WRADDRAL 0x0088
-
- u32 slic_wraddrah; /* Write rcv addr a high*/
- u32 pad19;
-#define SLIC_WRADDRAH 0x0090
-
- u32 slic_wraddrbl; /* Write rcv addr b low*/
- u32 pad20;
-#define SLIC_WRADDRBL 0x0098
-
- u32 slic_wraddrbh; /* Write rcv addr b high*/
- u32 pad21;
-#define SLIC_WRADDRBH 0x00a0
-
- u32 slic_mcastlow; /* Low bits of mcast mask*/
- u32 pad22;
-#define SLIC_MCASTLOW 0x00a8
-
- u32 slic_mcasthigh; /* High bits of mcast mask*/
- u32 pad23;
-#define SLIC_MCASTHIGH 0x00b0
-
- u32 slic_ping; /* Ping the card*/
- u32 pad24;
-#define SLIC_PING 0x00b8
-
- u32 slic_dump_cmd; /* Dump command */
- u32 pad25;
-#define SLIC_DUMP_CMD 0x00c0
-
- u32 slic_dump_data; /* Dump data pointer */
- u32 pad26;
-#define SLIC_DUMP_DATA 0x00c8
-
- u32 slic_pcistatus; /* Read card's pci_status register */
- u32 pad27;
-#define SLIC_PCISTATUS 0x00d0
-
- u32 slic_wrhostid; /* Write hostid field */
- u32 pad28;
-#define SLIC_WRHOSTID 0x00d8
-#define SLIC_RDHOSTID_1GB 0x1554
-#define SLIC_RDHOSTID_2GB 0x1554
-
- u32 slic_low_power; /* Put card in a low power state */
- u32 pad29;
-#define SLIC_LOW_POWER 0x00e0
-
- u32 slic_quiesce; /* force slic into quiescent state
- * before soft reset
- */
- u32 pad30;
-#define SLIC_QUIESCE 0x00e8
-
- u32 slic_reset_iface;/* reset interface queues */
- u32 pad31;
-#define SLIC_RESET_IFACE 0x00f0
-
- u32 slic_addr_upper;/* Bits 63-32 for host i/f addrs */
- u32 pad32;
-#define SLIC_ADDR_UPPER 0x00f8 /*Register is only written when it has changed*/
-
- u32 slic_hbar64; /* 64 bit Header buffer address reg */
- u32 pad33;
-#define SLIC_HBAR64 0x0100
-
- u32 slic_dbar64; /* 64 bit Data buffer handle & address reg */
- u32 pad34;
-#define SLIC_DBAR64 0x0108
-
- u32 slic_cbar64; /* 64 bit Xmt Cmd buf addr regs. */
- u32 pad35;
-#define SLIC_CBAR64 0x0110
-
- u32 slic_rbar64; /* 64 bit Response buffer address reg.*/
- u32 pad36;
-#define SLIC_RBAR64 0x0118
-
- u32 slic_rcbar64; /* 64 bit Rcv Cmd buf addr reg*/
- u32 pad37;
-#define SLIC_RCBAR64 0x0120
-
- u32 slic_stats64; /* read statistics (64 bit UPR) */
- u32 pad38;
-#define SLIC_RSTAT64 0x0128
-
- u32 slic_rcv_wcs; /*Download Gigabit RCV sequencer ucode*/
- u32 pad39;
-#define SLIC_RCV_WCS 0x0130
-#define SLIC_RCVWCS_BEGIN 0x40000000
-#define SLIC_RCVWCS_FINISH 0x80000000
-
- u32 slic_wrvlanid; /* Write VlanId field */
- u32 pad40;
-#define SLIC_WRVLANID 0x0138
-
- u32 slic_read_xf_info; /* Read Transformer info */
- u32 pad41;
-#define SLIC_READ_XF_INFO 0x0140
-
- u32 slic_write_xf_info; /* Write Transformer info */
- u32 pad42;
-#define SLIC_WRITE_XF_INFO 0x0148
-
- u32 RSVD1; /* TOE Only */
- u32 pad43;
-
- u32 RSVD2; /* TOE Only */
- u32 pad44;
-
- u32 RSVD3; /* TOE Only */
- u32 pad45;
-
- u32 RSVD4; /* TOE Only */
- u32 pad46;
-
- u32 slic_ticks_per_sec; /* Write card ticks per second */
- u32 pad47;
-#define SLIC_TICKS_PER_SEC 0x0170
-};
+/* Reset Register */
+#define SLIC_REG_RESET 0x0000
+/* Interrupt Control Register */
+#define SLIC_REG_ICR 0x0008
+/* Interrupt status pointer */
+#define SLIC_REG_ISP 0x0010
+/* Interrupt status */
+#define SLIC_REG_ISR 0x0018
+/*
+ * Header buffer address reg
+ * 31-8 - phy addr of set of contiguous hdr buffers
+ * 7-0 - number of buffers passed
+ * Buffers are 256 bytes long on 256-byte boundaries.
+ */
+#define SLIC_REG_HBAR 0x0020
+/*
+ * Data buffer handle & address reg
+ * 4 sets of registers; Buffers are 2K bytes long 2 per 4K page.
+ */
+#define SLIC_REG_DBAR 0x0028
+/*
+ * Xmt Cmd buf addr regs.
+ * 1 per XMT interface
+ * 31-5 - phy addr of host command buffer
+ * 4-0 - length of cmd in multiples of 32 bytes
+ * Buffers are 32 bytes up to 512 bytes long
+ */
+#define SLIC_REG_CBAR 0x0030
+/* Write control store */
+#define SLIC_REG_WCS 0x0034
+/*
+ * Response buffer address reg.
+ * 31-8 - phy addr of set of contiguous response buffers
+ * 7-0 - number of buffers passed
+ * Buffers are 32 bytes long on 32-byte boundaries.
+ */
+#define SLIC_REG_RBAR 0x0038
+/* Read statistics (UPR) */
+#define SLIC_REG_RSTAT 0x0040
+/* Read link status */
+#define SLIC_REG_LSTAT 0x0048
+/* Write Mac Config */
+#define SLIC_REG_WMCFG 0x0050
+/* Write phy register */
+#define SLIC_REG_WPHY 0x0058
+/* Rcv Cmd buf addr reg */
+#define SLIC_REG_RCBAR 0x0060
+/* Read SLIC Config*/
+#define SLIC_REG_RCONFIG 0x0068
+/* Interrupt aggregation time */
+#define SLIC_REG_INTAGG 0x0070
+/* Write XMIT config reg */
+#define SLIC_REG_WXCFG 0x0078
+/* Write RCV config reg */
+#define SLIC_REG_WRCFG 0x0080
+/* Write rcv addr a low */
+#define SLIC_REG_WRADDRAL 0x0088
+/* Write rcv addr a high */
+#define SLIC_REG_WRADDRAH 0x0090
+/* Write rcv addr b low */
+#define SLIC_REG_WRADDRBL 0x0098
+/* Write rcv addr b high */
+#define SLIC_REG_WRADDRBH 0x00a0
+/* Low bits of mcast mask */
+#define SLIC_REG_MCASTLOW 0x00a8
+/* High bits of mcast mask */
+#define SLIC_REG_MCASTHIGH 0x00b0
+/* Ping the card */
+#define SLIC_REG_PING 0x00b8
+/* Dump command */
+#define SLIC_REG_DUMP_CMD 0x00c0
+/* Dump data pointer */
+#define SLIC_REG_DUMP_DATA 0x00c8
+/* Read card's pci_status register */
+#define SLIC_REG_PCISTATUS 0x00d0
+/* Write hostid field */
+#define SLIC_REG_WRHOSTID 0x00d8
+/* Put card in a low power state */
+#define SLIC_REG_LOW_POWER 0x00e0
+/* Force slic into quiescent state before soft reset */
+#define SLIC_REG_QUIESCE 0x00e8
+/* Reset interface queues */
+#define SLIC_REG_RESET_IFACE 0x00f0
+/*
+ * Register is only written when it has changed.
+ * Bits 63-32 for host i/f addrs.
+ */
+#define SLIC_REG_ADDR_UPPER 0x00f8
+/* 64 bit Header buffer address reg */
+#define SLIC_REG_HBAR64 0x0100
+/* 64 bit Data buffer handle & address reg */
+#define SLIC_REG_DBAR64 0x0108
+/* 64 bit Xmt Cmd buf addr regs. */
+#define SLIC_REG_CBAR64 0x0110
+/* 64 bit Response buffer address reg.*/
+#define SLIC_REG_RBAR64 0x0118
+/* 64 bit Rcv Cmd buf addr reg*/
+#define SLIC_REG_RCBAR64 0x0120
+/* Read statistics (64 bit UPR) */
+#define SLIC_REG_RSTAT64 0x0128
+/* Download Gigabit RCV sequencer ucode */
+#define SLIC_REG_RCV_WCS 0x0130
+/* Write VlanId field */
+#define SLIC_REG_WRVLANID 0x0138
+/* Read Transformer info */
+#define SLIC_REG_READ_XF_INFO 0x0140
+/* Write Transformer info */
+#define SLIC_REG_WRITE_XF_INFO 0x0148
+/* Write card ticks per second */
+#define SLIC_REG_TICKS_PER_SEC 0x0170
+
+#define SLIC_REG_HOSTID 0x1554
enum UPR_REQUEST {
SLIC_UPR_STATS,
@@ -565,85 +459,6 @@ struct slic_pnp_capabilities {
struct slicpm_wakeup_capabilities wakeup_capabilities;
};
-struct xmt_stats {
- u32 xmit_tcp_bytes;
- u32 xmit_tcp_segs;
- u32 xmit_bytes;
- u32 xmit_collisions;
- u32 xmit_unicasts;
- u32 xmit_other_error;
- u32 xmit_excess_collisions;
-};
-
-struct rcv_stats {
- u32 rcv_tcp_bytes;
- u32 rcv_tcp_segs;
- u32 rcv_bytes;
- u32 rcv_unicasts;
- u32 rcv_other_error;
- u32 rcv_drops;
-};
-
-struct xmt_statsgb {
- u64 xmit_tcp_bytes;
- u64 xmit_tcp_segs;
- u64 xmit_bytes;
- u64 xmit_collisions;
- u64 xmit_unicasts;
- u64 xmit_other_error;
- u64 xmit_excess_collisions;
-};
-
-struct rcv_statsgb {
- u64 rcv_tcp_bytes;
- u64 rcv_tcp_segs;
- u64 rcv_bytes;
- u64 rcv_unicasts;
- u64 rcv_other_error;
- u64 rcv_drops;
-};
-
-struct slic_stats {
- union {
- struct {
- struct xmt_stats xmt100;
- struct rcv_stats rcv100;
- } stats_100;
- struct {
- struct xmt_statsgb xmtGB;
- struct rcv_statsgb rcvGB;
- } stats_GB;
- } u;
-};
-
-#define xmit_tcp_segs100 u.stats_100.xmt100.xmit_tcp_segs
-#define xmit_tcp_bytes100 u.stats_100.xmt100.xmit_tcp_bytes
-#define xmit_bytes100 u.stats_100.xmt100.xmit_bytes
-#define xmit_collisions100 u.stats_100.xmt100.xmit_collisions
-#define xmit_unicasts100 u.stats_100.xmt100.xmit_unicasts
-#define xmit_other_error100 u.stats_100.xmt100.xmit_other_error
-#define xmit_excess_collisions100 u.stats_100.xmt100.xmit_excess_collisions
-#define rcv_tcp_segs100 u.stats_100.rcv100.rcv_tcp_segs
-#define rcv_tcp_bytes100 u.stats_100.rcv100.rcv_tcp_bytes
-#define rcv_bytes100 u.stats_100.rcv100.rcv_bytes
-#define rcv_unicasts100 u.stats_100.rcv100.rcv_unicasts
-#define rcv_other_error100 u.stats_100.rcv100.rcv_other_error
-#define rcv_drops100 u.stats_100.rcv100.rcv_drops
-#define xmit_tcp_segs_gb u.stats_GB.xmtGB.xmit_tcp_segs
-#define xmit_tcp_bytes_gb u.stats_GB.xmtGB.xmit_tcp_bytes
-#define xmit_bytes_gb u.stats_GB.xmtGB.xmit_bytes
-#define xmit_collisions_gb u.stats_GB.xmtGB.xmit_collisions
-#define xmit_unicasts_gb u.stats_GB.xmtGB.xmit_unicasts
-#define xmit_other_error_gb u.stats_GB.xmtGB.xmit_other_error
-#define xmit_excess_collisions_gb u.stats_GB.xmtGB.xmit_excess_collisions
-
-#define rcv_tcp_segs_gb u.stats_GB.rcvGB.rcv_tcp_segs
-#define rcv_tcp_bytes_gb u.stats_GB.rcvGB.rcv_tcp_bytes
-#define rcv_bytes_gb u.stats_GB.rcvGB.rcv_bytes
-#define rcv_unicasts_gb u.stats_GB.rcvGB.rcv_unicasts
-#define rcv_other_error_gb u.stats_GB.rcvGB.rcv_other_error
-#define rcv_drops_gb u.stats_GB.rcvGB.rcv_drops
-
struct slic_config_mac {
u8 macaddrA[6];
};
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index ac126d4f3117..062307ad7fed 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -124,31 +124,10 @@ static const struct pci_device_id slic_pci_tbl[] = {
{ 0 }
};
-static struct ethtool_ops slic_ethtool_ops;
+static const struct ethtool_ops slic_ethtool_ops;
MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
-static inline void slic_reg32_write(void __iomem *reg, u32 value, bool flush)
-{
- writel(value, reg);
- if (flush)
- mb();
-}
-
-static inline void slic_reg64_write(struct adapter *adapter, void __iomem *reg,
- u32 value, void __iomem *regh, u32 paddrh,
- bool flush)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->bit64reglock, flags);
- writel(paddrh, regh);
- writel(value, reg);
- if (flush)
- mb();
- spin_unlock_irqrestore(&adapter->bit64reglock, flags);
-}
-
static void slic_mcast_set_bit(struct adapter *adapter, char *address)
{
unsigned char crcpoly;
@@ -172,8 +151,6 @@ static void slic_mcast_set_bit(struct adapter *adapter, char *address)
static void slic_mcast_set_mask(struct adapter *adapter)
{
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
-
if (adapter->macopts & (MAC_ALLMCAST | MAC_PROMISC)) {
/*
* Turn on all multicast addresses. We have to do this for
@@ -181,18 +158,17 @@ static void slic_mcast_set_mask(struct adapter *adapter)
* Microcode from having to keep state about the MAC
* configuration.
*/
- slic_reg32_write(&slic_regs->slic_mcastlow, 0xFFFFFFFF, FLUSH);
- slic_reg32_write(&slic_regs->slic_mcasthigh, 0xFFFFFFFF,
- FLUSH);
+ slic_write32(adapter, SLIC_REG_MCASTLOW, 0xFFFFFFFF);
+ slic_write32(adapter, SLIC_REG_MCASTHIGH, 0xFFFFFFFF);
} else {
/*
* Commit our multicast mast to the SLIC by writing to the
* multicast address mask registers
*/
- slic_reg32_write(&slic_regs->slic_mcastlow,
- (u32)(adapter->mcastmask & 0xFFFFFFFF), FLUSH);
- slic_reg32_write(&slic_regs->slic_mcasthigh,
- (u32)((adapter->mcastmask >> 32) & 0xFFFFFFFF), FLUSH);
+ slic_write32(adapter, SLIC_REG_MCASTLOW,
+ (u32)(adapter->mcastmask & 0xFFFFFFFF));
+ slic_write32(adapter, SLIC_REG_MCASTHIGH,
+ (u32)((adapter->mcastmask >> 32) & 0xFFFFFFFF));
}
}
@@ -208,13 +184,6 @@ static void slic_timer_ping(ulong dev)
add_timer(&adapter->pingtimer);
}
-static void slic_unmap_mmio_space(struct adapter *adapter)
-{
- if (adapter->slic_regs)
- iounmap(adapter->slic_regs);
- adapter->slic_regs = NULL;
-}
-
/*
* slic_link_config
*
@@ -224,7 +193,6 @@ static void slic_unmap_mmio_space(struct adapter *adapter)
static void slic_link_config(struct adapter *adapter,
u32 linkspeed, u32 linkduplex)
{
- u32 __iomem *wphy;
u32 speed;
u32 duplex;
u32 phy_config;
@@ -239,8 +207,6 @@ static void slic_link_config(struct adapter *adapter,
if (linkduplex > LINK_AUTOD)
linkduplex = LINK_AUTOD;
- wphy = &adapter->slic_regs->slic_wphy;
-
if ((linkspeed == LINK_AUTOSPEED) || (linkspeed == LINK_1000MB)) {
if (adapter->flags & ADAPT_FLAGS_FIBERMEDIA) {
/*
@@ -252,7 +218,7 @@ static void slic_link_config(struct adapter *adapter,
phy_advreg = (MIICR_REG_4 | (PAR_ADV1000XFD));
/* enable PAUSE frames */
phy_advreg |= PAR_ASYMPAUSE_FIBER;
- slic_reg32_write(wphy, phy_advreg, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY, phy_advreg);
if (linkspeed == LINK_AUTOSPEED) {
/* reset phy, enable auto-neg */
@@ -260,14 +226,17 @@ static void slic_link_config(struct adapter *adapter,
(MIICR_REG_PCR |
(PCR_RESET | PCR_AUTONEG |
PCR_AUTONEG_RST));
- slic_reg32_write(wphy, phy_config, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY,
+ phy_config);
} else { /* forced 1000 Mb FD*/
/*
* power down phy to break link
* this may not work)
*/
phy_config = (MIICR_REG_PCR | PCR_POWERDOWN);
- slic_reg32_write(wphy, phy_config, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY,
+ phy_config);
+ slic_flush_write(adapter);
/*
* wait, Marvell says 1 sec,
* try to get away with 10 ms
@@ -282,7 +251,8 @@ static void slic_link_config(struct adapter *adapter,
(MIICR_REG_PCR |
(PCR_RESET | PCR_SPEED_1000 |
PCR_DUPLEX_FULL));
- slic_reg32_write(wphy, phy_config, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY,
+ phy_config);
}
} else { /* copper gigabit */
@@ -309,10 +279,10 @@ static void slic_link_config(struct adapter *adapter,
phy_advreg |= PAR_ASYMPAUSE;
/* required by the Cicada PHY */
phy_advreg |= PAR_802_3;
- slic_reg32_write(wphy, phy_advreg, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY, phy_advreg);
/* advertise FD only @1000 Mb */
phy_gctlreg = (MIICR_REG_9 | (PGC_ADV1000FD));
- slic_reg32_write(wphy, phy_gctlreg, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY, phy_gctlreg);
if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
/*
@@ -321,20 +291,23 @@ static void slic_link_config(struct adapter *adapter,
*/
phy_config =
(MIICR_REG_16 | (MRV_REG16_XOVERON));
- slic_reg32_write(wphy, phy_config, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY,
+ phy_config);
/* reset phy, enable auto-neg */
phy_config =
(MIICR_REG_PCR |
(PCR_RESET | PCR_AUTONEG |
PCR_AUTONEG_RST));
- slic_reg32_write(wphy, phy_config, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY,
+ phy_config);
} else { /* it's a Cicada PHY */
/* enable and restart auto-neg (don't reset) */
phy_config =
(MIICR_REG_PCR |
(PCR_AUTONEG | PCR_AUTONEG_RST));
- slic_reg32_write(wphy, phy_config, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY,
+ phy_config);
}
}
} else {
@@ -354,13 +327,13 @@ static void slic_link_config(struct adapter *adapter,
* disable auto crossover
*/
phy_config = (MIICR_REG_16 | (MRV_REG16_XOVEROFF));
- slic_reg32_write(wphy, phy_config, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY, phy_config);
}
/* power down phy to break link (this may not work) */
phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN | speed | duplex));
- slic_reg32_write(wphy, phy_config, FLUSH);
-
+ slic_write32(adapter, SLIC_REG_WPHY, phy_config);
+ slic_flush_write(adapter);
/* wait, Marvell says 1 sec, try to get away with 10 ms */
mdelay(10);
@@ -372,11 +345,11 @@ static void slic_link_config(struct adapter *adapter,
*/
phy_config =
(MIICR_REG_PCR | (PCR_RESET | speed | duplex));
- slic_reg32_write(wphy, phy_config, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY, phy_config);
} else { /* it's a Cicada PHY */
/* disable auto-neg, set speed, powerup */
phy_config = (MIICR_REG_PCR | (speed | duplex));
- slic_reg32_write(wphy, phy_config, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY, phy_config);
}
}
}
@@ -386,7 +359,6 @@ static int slic_card_download_gbrcv(struct adapter *adapter)
const struct firmware *fw;
const char *file = "";
int ret;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
u32 codeaddr;
u32 instruction;
int index = 0;
@@ -427,27 +399,28 @@ static int slic_card_download_gbrcv(struct adapter *adapter)
break;
}
/* start download */
- slic_reg32_write(&slic_regs->slic_rcv_wcs, SLIC_RCVWCS_BEGIN, FLUSH);
+ slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN);
/* download the rcv sequencer ucode */
for (codeaddr = 0; codeaddr < rcvucodelen; codeaddr++) {
/* write out instruction address */
- slic_reg32_write(&slic_regs->slic_rcv_wcs, codeaddr, FLUSH);
+ slic_write32(adapter, SLIC_REG_RCV_WCS, codeaddr);
instruction = *(u32 *)(fw->data + index);
index += 4;
/* write out the instruction data low addr */
- slic_reg32_write(&slic_regs->slic_rcv_wcs, instruction, FLUSH);
+ slic_write32(adapter, SLIC_REG_RCV_WCS, instruction);
instruction = *(u8 *)(fw->data + index);
index++;
/* write out the instruction data high addr */
- slic_reg32_write(&slic_regs->slic_rcv_wcs, (u8)instruction,
- FLUSH);
+ slic_write32(adapter, SLIC_REG_RCV_WCS, instruction);
}
/* download finished */
release_firmware(fw);
- slic_reg32_write(&slic_regs->slic_rcv_wcs, SLIC_RCVWCS_FINISH, FLUSH);
+ slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH);
+ slic_flush_write(adapter);
+
return 0;
}
@@ -462,7 +435,6 @@ static int slic_card_download(struct adapter *adapter)
u32 section;
int thissectionsize;
int codeaddr;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
u32 instruction;
u32 baseaddress;
u32 i;
@@ -506,17 +478,17 @@ static int slic_card_download(struct adapter *adapter)
for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
/* Write out instruction address */
- slic_reg32_write(&slic_regs->slic_wcs,
- baseaddress + codeaddr, FLUSH);
+ slic_write32(adapter, SLIC_REG_WCS,
+ baseaddress + codeaddr);
/* Write out instruction to low addr */
- slic_reg32_write(&slic_regs->slic_wcs,
- instruction, FLUSH);
+ slic_write32(adapter, SLIC_REG_WCS,
+ instruction);
instruction = *(u32 *)(fw->data + index);
index += 4;
/* Write out instruction to high addr */
- slic_reg32_write(&slic_regs->slic_wcs,
- instruction, FLUSH);
+ slic_write32(adapter, SLIC_REG_WCS,
+ instruction);
instruction = *(u32 *)(fw->data + index);
index += 4;
}
@@ -531,27 +503,25 @@ static int slic_card_download(struct adapter *adapter)
for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
/* Write out instruction address */
- slic_reg32_write(&slic_regs->slic_wcs,
- SLIC_WCS_COMPARE | (baseaddress + codeaddr),
- FLUSH);
+ slic_write32(adapter, SLIC_REG_WCS,
+ SLIC_WCS_COMPARE | (baseaddress +
+ codeaddr));
/* Write out instruction to low addr */
- slic_reg32_write(&slic_regs->slic_wcs, instruction,
- FLUSH);
+ slic_write32(adapter, SLIC_REG_WCS, instruction);
instruction = *(u32 *)(fw->data + index);
index += 4;
/* Write out instruction to high addr */
- slic_reg32_write(&slic_regs->slic_wcs, instruction,
- FLUSH);
+ slic_write32(adapter, SLIC_REG_WCS, instruction);
instruction = *(u32 *)(fw->data + index);
index += 4;
-
}
}
release_firmware(fw);
/* Everything OK, kick off the card */
mdelay(10);
- slic_reg32_write(&slic_regs->slic_wcs, SLIC_WCS_START, FLUSH);
+ slic_write32(adapter, SLIC_REG_WCS, SLIC_WCS_START);
+ slic_flush_write(adapter);
/*
* stall for 20 ms, long enough for ucode to init card
* and reach mainloop
@@ -583,19 +553,21 @@ static void slic_adapter_set_hwaddr(struct adapter *adapter)
static void slic_intagg_set(struct adapter *adapter, u32 value)
{
- slic_reg32_write(&adapter->slic_regs->slic_intagg, value, FLUSH);
+ slic_write32(adapter, SLIC_REG_INTAGG, value);
adapter->card->loadlevel_current = value;
}
static void slic_soft_reset(struct adapter *adapter)
{
if (adapter->card->state == CARD_UP) {
- slic_reg32_write(&adapter->slic_regs->slic_quiesce, 0, FLUSH);
+ slic_write32(adapter, SLIC_REG_QUIESCE, 0);
+ slic_flush_write(adapter);
mdelay(1);
}
- slic_reg32_write(&adapter->slic_regs->slic_reset, SLIC_RESET_MAGIC,
- FLUSH);
+ slic_write32(adapter, SLIC_REG_RESET, SLIC_RESET_MAGIC);
+ slic_flush_write(adapter);
+
mdelay(1);
}
@@ -603,17 +575,16 @@ static void slic_mac_address_config(struct adapter *adapter)
{
u32 value;
u32 value2;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
value = ntohl(*(__be32 *)&adapter->currmacaddr[2]);
- slic_reg32_write(&slic_regs->slic_wraddral, value, FLUSH);
- slic_reg32_write(&slic_regs->slic_wraddrbl, value, FLUSH);
+ slic_write32(adapter, SLIC_REG_WRADDRAL, value);
+ slic_write32(adapter, SLIC_REG_WRADDRBL, value);
value2 = (u32)((adapter->currmacaddr[0] << 8 |
adapter->currmacaddr[1]) & 0xFFFF);
- slic_reg32_write(&slic_regs->slic_wraddrah, value2, FLUSH);
- slic_reg32_write(&slic_regs->slic_wraddrbh, value2, FLUSH);
+ slic_write32(adapter, SLIC_REG_WRADDRAH, value2);
+ slic_write32(adapter, SLIC_REG_WRADDRBH, value2);
/*
* Write our multicast mask out to the card. This is done
@@ -626,7 +597,6 @@ static void slic_mac_address_config(struct adapter *adapter)
static void slic_mac_config(struct adapter *adapter)
{
u32 value;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
/* Setup GMAC gaps */
if (adapter->linkspeed == LINK_1000MB) {
@@ -650,7 +620,7 @@ static void slic_mac_config(struct adapter *adapter)
}
/* write mac config */
- slic_reg32_write(&slic_regs->slic_wmcfg, value, FLUSH);
+ slic_write32(adapter, SLIC_REG_WMCFG, value);
/* setup mac addresses */
slic_mac_address_config(adapter);
@@ -660,7 +630,6 @@ static void slic_config_set(struct adapter *adapter, bool linkchange)
{
u32 value;
u32 RcrReset;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
if (linkchange) {
/* Setup MAC */
@@ -677,7 +646,7 @@ static void slic_config_set(struct adapter *adapter, bool linkchange)
GXCR_XMTEN | /* Enable transmit */
GXCR_PAUSEEN); /* Enable pause */
- slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH);
+ slic_write32(adapter, SLIC_REG_WXCFG, value);
/* Setup rcvcfg last */
value = (RcrReset | /* Reset, if linkchange */
@@ -690,7 +659,7 @@ static void slic_config_set(struct adapter *adapter, bool linkchange)
value = (GXCR_RESET | /* Always reset */
GXCR_XMTEN); /* Enable transmit */
- slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH);
+ slic_write32(adapter, SLIC_REG_WXCFG, value);
/* Setup rcvcfg last */
value = (RcrReset | /* Reset, if linkchange */
@@ -707,7 +676,7 @@ static void slic_config_set(struct adapter *adapter, bool linkchange)
if (adapter->macopts & MAC_PROMISC)
value |= GRCR_RCVALL;
- slic_reg32_write(&slic_regs->slic_wrcfg, value, FLUSH);
+ slic_write32(adapter, SLIC_REG_WRCFG, value);
}
/*
@@ -717,24 +686,23 @@ static void slic_config_clear(struct adapter *adapter)
{
u32 value;
u32 phy_config;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
/* Setup xmtcfg */
value = (GXCR_RESET | /* Always reset */
GXCR_PAUSEEN); /* Enable pause */
- slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH);
+ slic_write32(adapter, SLIC_REG_WXCFG, value);
value = (GRCR_RESET | /* Always reset */
GRCR_CTLEN | /* Enable CTL frames */
GRCR_ADDRAEN | /* Address A enable */
(GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
- slic_reg32_write(&slic_regs->slic_wrcfg, value, FLUSH);
+ slic_write32(adapter, SLIC_REG_WRCFG, value);
/* power down phy */
phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN));
- slic_reg32_write(&slic_regs->slic_wphy, phy_config, FLUSH);
+ slic_write32(adapter, SLIC_REG_WPHY, phy_config);
}
static bool slic_mac_filter(struct adapter *adapter,
@@ -810,13 +778,11 @@ static void slic_timer_load_check(ulong cardaddr)
{
struct sliccard *card = (struct sliccard *)cardaddr;
struct adapter *adapter = card->master;
- u32 __iomem *intagg;
u32 load = card->events;
u32 level = 0;
if ((adapter) && (adapter->state == ADAPT_UP) &&
(card->state == CARD_UP) && (slic_global.dynamic_intagg)) {
- intagg = &adapter->slic_regs->slic_intagg;
if (adapter->devid == SLIC_1GB_DEVICE_ID) {
if (adapter->linkspeed == LINK_1000MB)
level = 100;
@@ -836,7 +802,7 @@ static void slic_timer_load_check(ulong cardaddr)
}
if (card->loadlevel_current != level) {
card->loadlevel_current = level;
- slic_reg32_write(intagg, level, FLUSH);
+ slic_write32(adapter, SLIC_REG_INTAGG, level);
}
} else {
if (load > SLIC_LOAD_5)
@@ -853,7 +819,7 @@ static void slic_timer_load_check(ulong cardaddr)
level = SLIC_INTAGG_0;
if (card->loadlevel_current != level) {
card->loadlevel_current = level;
- slic_reg32_write(intagg, level, FLUSH);
+ slic_write32(adapter, SLIC_REG_INTAGG, level);
}
}
}
@@ -897,7 +863,6 @@ static int slic_upr_queue_request(struct adapter *adapter,
static void slic_upr_start(struct adapter *adapter)
{
struct slic_upr *upr;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
upr = adapter->upr_list;
if (!upr)
@@ -909,31 +874,27 @@ static void slic_upr_start(struct adapter *adapter)
switch (upr->upr_request) {
case SLIC_UPR_STATS:
if (upr->upr_data_h == 0) {
- slic_reg32_write(&slic_regs->slic_stats, upr->upr_data,
- FLUSH);
+ slic_write32(adapter, SLIC_REG_RSTAT, upr->upr_data);
} else {
- slic_reg64_write(adapter, &slic_regs->slic_stats64,
- upr->upr_data,
- &slic_regs->slic_addr_upper,
- upr->upr_data_h, FLUSH);
+ slic_write64(adapter, SLIC_REG_RSTAT64, upr->upr_data,
+ upr->upr_data_h);
}
break;
case SLIC_UPR_RLSR:
- slic_reg64_write(adapter, &slic_regs->slic_rlsr, upr->upr_data,
- &slic_regs->slic_addr_upper, upr->upr_data_h,
- FLUSH);
+ slic_write64(adapter, SLIC_REG_LSTAT, upr->upr_data,
+ upr->upr_data_h);
break;
case SLIC_UPR_RCONFIG:
- slic_reg64_write(adapter, &slic_regs->slic_rconfig,
- upr->upr_data, &slic_regs->slic_addr_upper,
- upr->upr_data_h, FLUSH);
+ slic_write64(adapter, SLIC_REG_RCONFIG, upr->upr_data,
+ upr->upr_data_h);
break;
case SLIC_UPR_PING:
- slic_reg32_write(&slic_regs->slic_ping, 1, FLUSH);
+ slic_write32(adapter, SLIC_REG_PING, 1);
break;
}
+ slic_flush_write(adapter);
}
static int slic_upr_request(struct adapter *adapter,
@@ -961,42 +922,34 @@ err_unlock_irq:
static void slic_link_upr_complete(struct adapter *adapter, u32 isr)
{
- u32 linkstatus = adapter->pshmem->linkstatus;
+ struct slic_shmemory *sm = &adapter->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+ u32 lst = sm_data->lnkstatus;
uint linkup;
unsigned char linkspeed;
unsigned char linkduplex;
if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
- struct slic_shmem *pshmem;
+ dma_addr_t phaddr = sm->lnkstatus_phaddr;
- pshmem = (struct slic_shmem *)(unsigned long)
- adapter->phys_shmem;
-#if BITS_PER_LONG == 64
- slic_upr_queue_request(adapter,
- SLIC_UPR_RLSR,
- SLIC_GET_ADDR_LOW(&pshmem->linkstatus),
- SLIC_GET_ADDR_HIGH(&pshmem->linkstatus),
+ slic_upr_queue_request(adapter, SLIC_UPR_RLSR,
+ cpu_to_le32(lower_32_bits(phaddr)),
+ cpu_to_le32(upper_32_bits(phaddr)),
0, 0);
-#else
- slic_upr_queue_request(adapter,
- SLIC_UPR_RLSR,
- (u32)&pshmem->linkstatus,
- SLIC_GET_ADDR_HIGH(pshmem), 0, 0);
-#endif
return;
}
if (adapter->state != ADAPT_UP)
return;
- linkup = linkstatus & GIG_LINKUP ? LINK_UP : LINK_DOWN;
- if (linkstatus & GIG_SPEED_1000)
+ linkup = lst & GIG_LINKUP ? LINK_UP : LINK_DOWN;
+ if (lst & GIG_SPEED_1000)
linkspeed = LINK_1000MB;
- else if (linkstatus & GIG_SPEED_100)
+ else if (lst & GIG_SPEED_100)
linkspeed = LINK_100MB;
else
linkspeed = LINK_10MB;
- if (linkstatus & GIG_FULLDUPLEX)
+ if (lst & GIG_FULLDUPLEX)
linkduplex = LINK_FULLD;
else
linkduplex = LINK_HALFD;
@@ -1016,6 +969,7 @@ static void slic_link_upr_complete(struct adapter *adapter, u32 isr)
/* link has gone from up to down */
if (linkup == LINK_DOWN) {
adapter->linkstate = LINK_DOWN;
+ netif_carrier_off(adapter->netdev);
return;
}
@@ -1027,7 +981,7 @@ static void slic_link_upr_complete(struct adapter *adapter, u32 isr)
/* setup the mac */
slic_config_set(adapter, true);
adapter->linkstate = LINK_UP;
- netif_start_queue(adapter->netdev);
+ netif_carrier_on(adapter->netdev);
}
}
@@ -1047,81 +1001,65 @@ static void slic_upr_request_complete(struct adapter *adapter, u32 isr)
upr->next = NULL;
adapter->upr_busy = 0;
switch (upr->upr_request) {
- case SLIC_UPR_STATS:
- {
- struct slic_stats *slicstats =
- (struct slic_stats *)&adapter->pshmem->inicstats;
- struct slic_stats *newstats = slicstats;
- struct slic_stats *old = &adapter->inicstats_prev;
- struct slicnet_stats *stst = &adapter->slic_stats;
-
- if (isr & ISR_UPCERR) {
- dev_err(&adapter->netdev->dev,
- "SLIC_UPR_STATS command failed isr[%x]\n",
- isr);
-
- break;
- }
- UPDATE_STATS_GB(stst->tcp.xmit_tcp_segs,
- newstats->xmit_tcp_segs_gb,
- old->xmit_tcp_segs_gb);
-
- UPDATE_STATS_GB(stst->tcp.xmit_tcp_bytes,
- newstats->xmit_tcp_bytes_gb,
- old->xmit_tcp_bytes_gb);
-
- UPDATE_STATS_GB(stst->tcp.rcv_tcp_segs,
- newstats->rcv_tcp_segs_gb,
- old->rcv_tcp_segs_gb);
-
- UPDATE_STATS_GB(stst->tcp.rcv_tcp_bytes,
- newstats->rcv_tcp_bytes_gb,
- old->rcv_tcp_bytes_gb);
-
- UPDATE_STATS_GB(stst->iface.xmt_bytes,
- newstats->xmit_bytes_gb,
- old->xmit_bytes_gb);
-
- UPDATE_STATS_GB(stst->iface.xmt_ucast,
- newstats->xmit_unicasts_gb,
- old->xmit_unicasts_gb);
-
- UPDATE_STATS_GB(stst->iface.rcv_bytes,
- newstats->rcv_bytes_gb,
- old->rcv_bytes_gb);
-
- UPDATE_STATS_GB(stst->iface.rcv_ucast,
- newstats->rcv_unicasts_gb,
- old->rcv_unicasts_gb);
-
- UPDATE_STATS_GB(stst->iface.xmt_errors,
- newstats->xmit_collisions_gb,
- old->xmit_collisions_gb);
-
- UPDATE_STATS_GB(stst->iface.xmt_errors,
- newstats->xmit_excess_collisions_gb,
- old->xmit_excess_collisions_gb);
-
- UPDATE_STATS_GB(stst->iface.xmt_errors,
- newstats->xmit_other_error_gb,
- old->xmit_other_error_gb);
-
- UPDATE_STATS_GB(stst->iface.rcv_errors,
- newstats->rcv_other_error_gb,
- old->rcv_other_error_gb);
-
- UPDATE_STATS_GB(stst->iface.rcv_discards,
- newstats->rcv_drops_gb,
- old->rcv_drops_gb);
-
- if (newstats->rcv_drops_gb > old->rcv_drops_gb) {
- adapter->rcv_drops +=
- (newstats->rcv_drops_gb -
- old->rcv_drops_gb);
- }
- memcpy(old, newstats, sizeof(struct slic_stats));
+ case SLIC_UPR_STATS: {
+ struct slic_shmemory *sm = &adapter->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+ struct slic_stats *stats = &sm_data->stats;
+ struct slic_stats *old = &adapter->inicstats_prev;
+ struct slicnet_stats *stst = &adapter->slic_stats;
+
+ if (isr & ISR_UPCERR) {
+ dev_err(&adapter->netdev->dev,
+ "SLIC_UPR_STATS command failed isr[%x]\n", isr);
break;
}
+
+ UPDATE_STATS_GB(stst->tcp.xmit_tcp_segs, stats->xmit_tcp_segs,
+ old->xmit_tcp_segs);
+
+ UPDATE_STATS_GB(stst->tcp.xmit_tcp_bytes, stats->xmit_tcp_bytes,
+ old->xmit_tcp_bytes);
+
+ UPDATE_STATS_GB(stst->tcp.rcv_tcp_segs, stats->rcv_tcp_segs,
+ old->rcv_tcp_segs);
+
+ UPDATE_STATS_GB(stst->tcp.rcv_tcp_bytes, stats->rcv_tcp_bytes,
+ old->rcv_tcp_bytes);
+
+ UPDATE_STATS_GB(stst->iface.xmt_bytes, stats->xmit_bytes,
+ old->xmit_bytes);
+
+ UPDATE_STATS_GB(stst->iface.xmt_ucast, stats->xmit_unicasts,
+ old->xmit_unicasts);
+
+ UPDATE_STATS_GB(stst->iface.rcv_bytes, stats->rcv_bytes,
+ old->rcv_bytes);
+
+ UPDATE_STATS_GB(stst->iface.rcv_ucast, stats->rcv_unicasts,
+ old->rcv_unicasts);
+
+ UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_collisions,
+ old->xmit_collisions);
+
+ UPDATE_STATS_GB(stst->iface.xmt_errors,
+ stats->xmit_excess_collisions,
+ old->xmit_excess_collisions);
+
+ UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_other_error,
+ old->xmit_other_error);
+
+ UPDATE_STATS_GB(stst->iface.rcv_errors, stats->rcv_other_error,
+ old->rcv_other_error);
+
+ UPDATE_STATS_GB(stst->iface.rcv_discards, stats->rcv_drops,
+ old->rcv_drops);
+
+ if (stats->rcv_drops > old->rcv_drops)
+ adapter->rcv_drops += (stats->rcv_drops -
+ old->rcv_drops);
+ memcpy_fromio(old, stats, sizeof(*stats));
+ break;
+ }
case SLIC_UPR_RLSR:
slic_link_upr_complete(adapter, isr);
break;
@@ -1186,7 +1124,6 @@ static int slic_rspqueue_init(struct adapter *adapter)
{
int i;
struct slic_rspqueue *rspq = &adapter->rspqueue;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
u32 paddrh = 0;
memset(rspq, 0, sizeof(struct slic_rspqueue));
@@ -1205,14 +1142,12 @@ static int slic_rspqueue_init(struct adapter *adapter)
}
if (paddrh == 0) {
- slic_reg32_write(&slic_regs->slic_rbar,
- (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE),
- DONT_FLUSH);
+ slic_write32(adapter, SLIC_REG_RBAR,
+ rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE);
} else {
- slic_reg64_write(adapter, &slic_regs->slic_rbar64,
- (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE),
- &slic_regs->slic_addr_upper,
- paddrh, DONT_FLUSH);
+ slic_write64(adapter, SLIC_REG_RBAR64,
+ rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE,
+ paddrh);
}
}
rspq->offset = 0;
@@ -1233,9 +1168,9 @@ static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter)
if (++rspq->offset < SLIC_RSPQ_BUFSINPAGE) {
rspq->rspbuf++;
} else {
- slic_reg64_write(adapter, &adapter->slic_regs->slic_rbar64,
- (rspq->paddr[rspq->pageindex] | SLIC_RSPQ_BUFSINPAGE),
- &adapter->slic_regs->slic_addr_upper, 0, DONT_FLUSH);
+ slic_write64(adapter, SLIC_REG_RBAR64,
+ rspq->paddr[rspq->pageindex] |
+ SLIC_RSPQ_BUFSINPAGE, 0);
rspq->pageindex = (rspq->pageindex + 1) % rspq->num_pages;
rspq->offset = 0;
rspq->rspbuf = (struct slic_rspbuf *)
@@ -1569,14 +1504,11 @@ retry_rcvqfill:
}
#endif
if (paddrh == 0) {
- slic_reg32_write(&adapter->slic_regs->slic_hbar,
- (u32)paddrl, DONT_FLUSH);
+ slic_write32(adapter, SLIC_REG_HBAR,
+ (u32)paddrl);
} else {
- slic_reg64_write(adapter,
- &adapter->slic_regs->slic_hbar64,
- paddrl,
- &adapter->slic_regs->slic_addr_upper,
- paddrh, DONT_FLUSH);
+ slic_write64(adapter, SLIC_REG_HBAR64, paddrl,
+ paddrh);
}
if (rcvq->head)
rcvq->tail->next = skb;
@@ -1698,14 +1630,10 @@ static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb)
dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
}
- if (paddrh == 0) {
- slic_reg32_write(&adapter->slic_regs->slic_hbar, (u32)paddrl,
- DONT_FLUSH);
- } else {
- slic_reg64_write(adapter, &adapter->slic_regs->slic_hbar64,
- paddrl, &adapter->slic_regs->slic_addr_upper,
- paddrh, DONT_FLUSH);
- }
+ if (paddrh == 0)
+ slic_write32(adapter, SLIC_REG_HBAR, (u32)paddrl);
+ else
+ slic_write64(adapter, SLIC_REG_HBAR64, paddrl, paddrh);
if (rcvq->head)
rcvq->tail->next = skb;
else
@@ -1728,26 +1656,17 @@ static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb)
static int slic_link_event_handler(struct adapter *adapter)
{
int status;
- struct slic_shmem *pshmem;
+ struct slic_shmemory *sm = &adapter->shmem;
+ dma_addr_t phaddr = sm->lnkstatus_phaddr;
if (adapter->state != ADAPT_UP) {
/* Adapter is not operational. Ignore. */
return -ENODEV;
}
-
- pshmem = (struct slic_shmem *)(unsigned long)adapter->phys_shmem;
-
-#if BITS_PER_LONG == 64
- status = slic_upr_request(adapter,
- SLIC_UPR_RLSR,
- SLIC_GET_ADDR_LOW(&pshmem->linkstatus),
- SLIC_GET_ADDR_HIGH(&pshmem->linkstatus),
- 0, 0);
-#else
+ /* no 4GB wrap guaranteed */
status = slic_upr_request(adapter, SLIC_UPR_RLSR,
- (u32)&pshmem->linkstatus, /* no 4GB wrap guaranteed */
- 0, 0, 0);
-#endif
+ cpu_to_le32(lower_32_bits(phaddr)),
+ cpu_to_le32(upper_32_bits(phaddr)), 0, 0);
return status;
}
@@ -1757,12 +1676,13 @@ static void slic_init_cleanup(struct adapter *adapter)
adapter->intrregistered = 0;
free_irq(adapter->netdev->irq, adapter->netdev);
}
- if (adapter->pshmem) {
- pci_free_consistent(adapter->pcidev,
- sizeof(struct slic_shmem),
- adapter->pshmem, adapter->phys_shmem);
- adapter->pshmem = NULL;
- adapter->phys_shmem = (dma_addr_t)(unsigned long)NULL;
+
+ if (adapter->shmem.shmem_data) {
+ struct slic_shmemory *sm = &adapter->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
+
+ pci_free_consistent(adapter->pcidev, sizeof(*sm_data), sm_data,
+ sm->isr_phaddr);
}
if (adapter->pingtimerset) {
@@ -2147,13 +2067,16 @@ static irqreturn_t slic_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct adapter *adapter = netdev_priv(dev);
+ struct slic_shmemory *sm = &adapter->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
u32 isr;
- if ((adapter->pshmem) && (adapter->pshmem->isr)) {
- slic_reg32_write(&adapter->slic_regs->slic_icr,
- ICR_INT_MASK, FLUSH);
- isr = adapter->isrcopy = adapter->pshmem->isr;
- adapter->pshmem->isr = 0;
+ if (sm_data->isr) {
+ slic_write32(adapter, SLIC_REG_ICR, ICR_INT_MASK);
+ slic_flush_write(adapter);
+
+ isr = sm_data->isr;
+ sm_data->isr = 0;
adapter->num_isrs++;
switch (adapter->card->state) {
case CARD_UP:
@@ -2169,10 +2092,9 @@ static irqreturn_t slic_interrupt(int irq, void *dev_id)
break;
}
- adapter->isrcopy = 0;
adapter->all_reg_writes += 2;
adapter->isr_reg_writes++;
- slic_reg32_write(&adapter->slic_regs->slic_isr, 0, FLUSH);
+ slic_write32(adapter, SLIC_REG_ISR, 0);
} else {
adapter->false_interrupts++;
}
@@ -2224,13 +2146,11 @@ static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev)
}
#endif
if (hcmd->paddrh == 0) {
- slic_reg32_write(&adapter->slic_regs->slic_cbar,
- (hcmd->paddrl | hcmd->cmdsize), DONT_FLUSH);
+ slic_write32(adapter, SLIC_REG_CBAR, (hcmd->paddrl |
+ hcmd->cmdsize));
} else {
- slic_reg64_write(adapter, &adapter->slic_regs->slic_cbar64,
- (hcmd->paddrl | hcmd->cmdsize),
- &adapter->slic_regs->slic_addr_upper,
- hcmd->paddrh, DONT_FLUSH);
+ slic_write64(adapter, SLIC_REG_CBAR64,
+ hcmd->paddrl | hcmd->cmdsize, hcmd->paddrh);
}
xmit_done:
return NETDEV_TX_OK;
@@ -2290,8 +2210,8 @@ static int slic_if_init(struct adapter *adapter, unsigned long *flags)
{
struct sliccard *card = adapter->card;
struct net_device *dev = adapter->netdev;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
- struct slic_shmem *pshmem;
+ struct slic_shmemory *sm = &adapter->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
int rc;
/* adapter should be down at this point */
@@ -2335,28 +2255,20 @@ static int slic_if_init(struct adapter *adapter, unsigned long *flags)
adapter->queues_initialized = 1;
}
- slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
+ slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
+ slic_flush_write(adapter);
mdelay(1);
if (!adapter->isp_initialized) {
unsigned long flags;
- pshmem = (struct slic_shmem *)(unsigned long)
- adapter->phys_shmem;
-
spin_lock_irqsave(&adapter->bit64reglock, flags);
-
-#if BITS_PER_LONG == 64
- slic_reg32_write(&slic_regs->slic_addr_upper,
- SLIC_GET_ADDR_HIGH(&pshmem->isr), DONT_FLUSH);
- slic_reg32_write(&slic_regs->slic_isp,
- SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
-#else
- slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH);
- slic_reg32_write(&slic_regs->slic_isp, (u32)&pshmem->isr,
- FLUSH);
-#endif
+ slic_write32(adapter, SLIC_REG_ADDR_UPPER,
+ cpu_to_le32(upper_32_bits(sm->isr_phaddr)));
+ slic_write32(adapter, SLIC_REG_ISP,
+ cpu_to_le32(lower_32_bits(sm->isr_phaddr)));
spin_unlock_irqrestore(&adapter->bit64reglock, flags);
+
adapter->isp_initialized = 1;
}
@@ -2383,17 +2295,20 @@ static int slic_if_init(struct adapter *adapter, unsigned long *flags)
/*
* clear any pending events, then enable interrupts
*/
- adapter->isrcopy = 0;
- adapter->pshmem->isr = 0;
- slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH);
- slic_reg32_write(&slic_regs->slic_icr, ICR_INT_ON, FLUSH);
+ sm_data->isr = 0;
+ slic_write32(adapter, SLIC_REG_ISR, 0);
+ slic_write32(adapter, SLIC_REG_ICR, ICR_INT_ON);
slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD);
+ slic_flush_write(adapter);
+
rc = slic_link_event_handler(adapter);
if (rc) {
/* disable interrupts then clear pending events */
- slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
- slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH);
+ slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
+ slic_write32(adapter, SLIC_REG_ISR, 0);
+ slic_flush_write(adapter);
+
if (adapter->pingtimerset) {
del_timer(&adapter->pingtimer);
adapter->pingtimerset = 0;
@@ -2417,7 +2332,7 @@ static int slic_entry_open(struct net_device *dev)
unsigned long flags;
int status;
- netif_stop_queue(adapter->netdev);
+ netif_carrier_off(dev);
spin_lock_irqsave(&slic_global.driver_lock, flags);
if (!adapter->activated) {
@@ -2440,6 +2355,9 @@ static int slic_entry_open(struct net_device *dev)
spin_unlock:
spin_unlock_irqrestore(&slic_global.driver_lock, flags);
+
+ netif_start_queue(adapter->netdev);
+
return status;
}
@@ -2463,7 +2381,7 @@ static void slic_entry_remove(struct pci_dev *pcidev)
unregister_netdev(dev);
slic_adapter_freeresources(adapter);
- slic_unmap_mmio_space(adapter);
+ iounmap(adapter->regs);
/* free multicast addresses */
mlist = adapter->mcastaddrs;
@@ -2497,7 +2415,6 @@ static int slic_entry_halt(struct net_device *dev)
{
struct adapter *adapter = netdev_priv(dev);
struct sliccard *card = adapter->card;
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
unsigned long flags;
spin_lock_irqsave(&slic_global.driver_lock, flags);
@@ -2507,7 +2424,7 @@ static int slic_entry_halt(struct net_device *dev)
adapter->upr_list = NULL;
adapter->upr_busy = 0;
adapter->devflags_prev = 0;
- slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
+ slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
adapter->all_reg_writes++;
adapter->icr_reg_writes++;
slic_config_clear(adapter);
@@ -2517,8 +2434,10 @@ static int slic_entry_halt(struct net_device *dev)
adapter->activated = 0;
}
#ifdef AUTOMATIC_RESET
- slic_reg32_write(&slic_regs->slic_reset_iface, 0, FLUSH);
+ slic_write32(adapter, SLIC_REG_RESET_IFACE, 0);
#endif
+ slic_flush_write(adapter);
+
/*
* Reset the adapter's cmd queues
*/
@@ -2530,6 +2449,9 @@ static int slic_entry_halt(struct net_device *dev)
#endif
spin_unlock_irqrestore(&slic_global.driver_lock, flags);
+
+ netif_carrier_off(dev);
+
return 0;
}
@@ -2661,14 +2583,14 @@ static void slic_config_pci(struct pci_dev *pcidev)
static int slic_card_init(struct sliccard *card, struct adapter *adapter)
{
- __iomem struct slic_regs *slic_regs = adapter->slic_regs;
+ struct slic_shmemory *sm = &adapter->shmem;
+ struct slic_shmem_data *sm_data = sm->shmem_data;
struct slic_eeprom *peeprom;
struct oslic_eeprom *pOeeprom;
dma_addr_t phys_config;
u32 phys_configh;
u32 phys_configl;
u32 i = 0;
- struct slic_shmem *pshmem;
int status;
uint macaddrs = card->card_size;
ushort eecodesize;
@@ -2695,27 +2617,26 @@ static int slic_card_init(struct sliccard *card, struct adapter *adapter)
sizeof(struct slic_eeprom),
&phys_config);
- phys_configl = SLIC_GET_ADDR_LOW(phys_config);
- phys_configh = SLIC_GET_ADDR_HIGH(phys_config);
-
if (!peeprom) {
dev_err(&adapter->pcidev->dev,
"Failed to allocate DMA memory for EEPROM.\n");
return -ENOMEM;
}
+ phys_configl = SLIC_GET_ADDR_LOW(phys_config);
+ phys_configh = SLIC_GET_ADDR_HIGH(phys_config);
+
memset(peeprom, 0, sizeof(struct slic_eeprom));
- slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
+ slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
+ slic_flush_write(adapter);
mdelay(1);
- pshmem = (struct slic_shmem *)(unsigned long)
- adapter->phys_shmem;
spin_lock_irqsave(&adapter->bit64reglock, flags);
- slic_reg32_write(&slic_regs->slic_addr_upper,
- SLIC_GET_ADDR_HIGH(&pshmem->isr), DONT_FLUSH);
- slic_reg32_write(&slic_regs->slic_isp,
- SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
+ slic_write32(adapter, SLIC_REG_ADDR_UPPER,
+ cpu_to_le32(upper_32_bits(sm->isr_phaddr)));
+ slic_write32(adapter, SLIC_REG_ISP,
+ cpu_to_le32(lower_32_bits(sm->isr_phaddr)));
spin_unlock_irqrestore(&adapter->bit64reglock, flags);
status = slic_config_get(adapter, phys_configl, phys_configh);
@@ -2726,33 +2647,31 @@ static int slic_card_init(struct sliccard *card, struct adapter *adapter)
}
for (;;) {
- if (adapter->pshmem->isr) {
- if (adapter->pshmem->isr & ISR_UPC) {
- adapter->pshmem->isr = 0;
- slic_reg64_write(adapter,
- &slic_regs->slic_isp, 0,
- &slic_regs->slic_addr_upper,
- 0, FLUSH);
- slic_reg32_write(&slic_regs->slic_isr,
- 0, FLUSH);
+ if (sm_data->isr) {
+ if (sm_data->isr & ISR_UPC) {
+ sm_data->isr = 0;
+ slic_write64(adapter, SLIC_REG_ISP, 0,
+ 0);
+ slic_write32(adapter, SLIC_REG_ISR, 0);
+ slic_flush_write(adapter);
slic_upr_request_complete(adapter, 0);
break;
}
- adapter->pshmem->isr = 0;
- slic_reg32_write(&slic_regs->slic_isr,
- 0, FLUSH);
+ sm_data->isr = 0;
+ slic_write32(adapter, SLIC_REG_ISR, 0);
+ slic_flush_write(adapter);
} else {
mdelay(1);
i++;
if (i > 5000) {
dev_err(&adapter->pcidev->dev,
"Fetch of config data timed out.\n");
- slic_reg64_write(adapter,
- &slic_regs->slic_isp, 0,
- &slic_regs->slic_addr_upper,
- 0, FLUSH);
+ slic_write64(adapter, SLIC_REG_ISP,
+ 0, 0);
+ slic_flush_write(adapter);
+
status = -EINVAL;
goto card_init_err;
}
@@ -2796,7 +2715,6 @@ static int slic_card_init(struct sliccard *card, struct adapter *adapter)
/* see if the EEPROM is valid by checking it's checksum */
if ((eecodesize <= MAX_EECODE_SIZE) &&
(eecodesize >= MIN_EECODE_SIZE)) {
-
ee_chksum =
*(u16 *)((char *)peeprom + (eecodesize - 2));
/*
@@ -2830,9 +2748,8 @@ static int slic_card_init(struct sliccard *card, struct adapter *adapter)
peeprom, phys_config);
if (!card->config.EepromValid) {
- slic_reg64_write(adapter, &slic_regs->slic_isp, 0,
- &slic_regs->slic_addr_upper,
- 0, FLUSH);
+ slic_write64(adapter, SLIC_REG_ISP, 0, 0);
+ slic_flush_write(adapter);
dev_err(&adapter->pcidev->dev, "EEPROM invalid.\n");
return -EINVAL;
}
@@ -2896,14 +2813,17 @@ static void slic_init_driver(void)
}
}
-static void slic_init_adapter(struct net_device *netdev,
- struct pci_dev *pcidev,
- const struct pci_device_id *pci_tbl_entry,
- void __iomem *memaddr, int chip_idx)
+static int slic_init_adapter(struct net_device *netdev,
+ struct pci_dev *pcidev,
+ const struct pci_device_id *pci_tbl_entry,
+ void __iomem *memaddr, int chip_idx)
{
ushort index;
struct slic_handle *pslic_handle;
struct adapter *adapter = netdev_priv(netdev);
+ struct slic_shmemory *sm = &adapter->shmem;
+ struct slic_shmem_data *sm_data;
+ dma_addr_t phaddr;
/* adapter->pcidev = pcidev;*/
adapter->vendid = pci_tbl_entry->vendor;
@@ -2912,7 +2832,7 @@ static void slic_init_adapter(struct net_device *netdev,
adapter->busnumber = pcidev->bus->number;
adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
adapter->functionnumber = (pcidev->devfn & 0x7);
- adapter->slic_regs = memaddr;
+ adapter->regs = memaddr;
adapter->irq = pcidev->irq;
adapter->chipid = chip_idx;
adapter->port = 0;
@@ -2932,19 +2852,23 @@ static void slic_init_adapter(struct net_device *netdev,
*/
for (index = 1, pslic_handle = &adapter->slic_handles[1];
index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) {
-
pslic_handle->token.handle_index = index;
pslic_handle->type = SLIC_HANDLE_FREE;
pslic_handle->next = adapter->pfree_slic_handles;
adapter->pfree_slic_handles = pslic_handle;
}
- adapter->pshmem = (struct slic_shmem *)
- pci_alloc_consistent(adapter->pcidev,
- sizeof(struct slic_shmem),
- &adapter->
- phys_shmem);
- if (adapter->pshmem)
- memset(adapter->pshmem, 0, sizeof(struct slic_shmem));
+ sm_data = pci_zalloc_consistent(adapter->pcidev, sizeof(*sm_data),
+ &phaddr);
+ if (!sm_data)
+ return -ENOMEM;
+
+ sm->shmem_data = sm_data;
+ sm->isr_phaddr = phaddr;
+ sm->lnkstatus_phaddr = phaddr + offsetof(struct slic_shmem_data,
+ lnkstatus);
+ sm->stats_phaddr = phaddr + offsetof(struct slic_shmem_data, stats);
+
+ return 0;
}
static const struct net_device_ops slic_netdev_ops = {
@@ -2964,27 +2888,9 @@ static u32 slic_card_locate(struct adapter *adapter)
struct sliccard *card = slic_global.slic_card;
struct physcard *physcard = slic_global.phys_card;
ushort card_hostid;
- u16 __iomem *hostid_reg;
uint i;
- uint rdhostid_offset = 0;
-
- switch (adapter->devid) {
- case SLIC_2GB_DEVICE_ID:
- rdhostid_offset = SLIC_RDHOSTID_2GB;
- break;
- case SLIC_1GB_DEVICE_ID:
- rdhostid_offset = SLIC_RDHOSTID_1GB;
- break;
- default:
- return -ENODEV;
- }
- hostid_reg =
- (u16 __iomem *)(((u8 __iomem *)(adapter->slic_regs)) +
- rdhostid_offset);
-
- /* read the 16 bit hostid from SRAM */
- card_hostid = (ushort)readw(hostid_reg);
+ card_hostid = slic_read32(adapter, SLIC_REG_HOSTID);
/* Initialize a new card structure if need be */
if (card_hostid == SLIC_HOSTID_DEFAULT) {
@@ -3130,7 +3036,7 @@ static int slic_entry_probe(struct pci_dev *pcidev,
mmio_start = pci_resource_start(pcidev, 0);
mmio_len = pci_resource_len(pcidev, 0);
- memmapped_ioaddr = ioremap(mmio_start, mmio_len);
+ memmapped_ioaddr = ioremap_nocache(mmio_start, mmio_len);
if (!memmapped_ioaddr) {
dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n",
mmio_len, mmio_start);
@@ -3142,13 +3048,17 @@ static int slic_entry_probe(struct pci_dev *pcidev,
slic_init_driver();
- slic_init_adapter(netdev,
- pcidev, pci_tbl_entry, memmapped_ioaddr, cards_found);
+ err = slic_init_adapter(netdev, pcidev, pci_tbl_entry, memmapped_ioaddr,
+ cards_found);
+ if (err) {
+ dev_err(&pcidev->dev, "failed to init adapter: %i\n", err);
+ goto err_out_unmap;
+ }
err = slic_card_locate(adapter);
if (err) {
dev_err(&pcidev->dev, "cannot locate card\n");
- goto err_out_unmap;
+ goto err_clean_init;
}
card = adapter->card;
@@ -3160,7 +3070,7 @@ static int slic_entry_probe(struct pci_dev *pcidev,
err = slic_card_init(card, adapter);
if (err)
- goto err_out_unmap;
+ goto err_clean_init;
slic_adapter_set_hwaddr(adapter);
@@ -3168,17 +3078,21 @@ static int slic_entry_probe(struct pci_dev *pcidev,
netdev->irq = adapter->irq;
netdev->netdev_ops = &slic_netdev_ops;
+ netif_carrier_off(netdev);
+
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err) {
dev_err(&pcidev->dev, "Cannot register net device, aborting.\n");
- goto err_out_unmap;
+ goto err_clean_init;
}
cards_found++;
return 0;
+err_clean_init:
+ slic_init_cleanup(adapter);
err_out_unmap:
iounmap(memmapped_ioaddr);
err_out_free_netdev:
@@ -3209,7 +3123,7 @@ static void __exit slic_module_cleanup(void)
pci_unregister_driver(&slic_driver);
}
-static struct ethtool_ops slic_ethtool_ops = {
+static const struct ethtool_ops slic_ethtool_ops = {
.get_coalesce = slic_get_coalesce,
.set_coalesce = slic_set_coalesce
};
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index f80ee776677f..839d6730bde9 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -6,11 +6,9 @@
#include "ddk750_chip.h"
#include "ddk750_power.h"
-/* n / d + 1 / 2 = (2n + d) / 2d */
-#define roundedDiv(num, denom) ((2 * (num) + (denom)) / (2 * (denom)))
#define MHz(x) ((x) * 1000000)
-logical_chip_type_t getChipType(void)
+logical_chip_type_t sm750_get_chip_type(void)
{
unsigned short physicalID;
char physicalRev;
@@ -37,7 +35,7 @@ static unsigned int get_mxclk_freq(void)
unsigned int pll_reg;
unsigned int M, N, OD, POD;
- if (getChipType() == SM750LE)
+ if (sm750_get_chip_type() == SM750LE)
return MHz(130);
pll_reg = PEEK32(MXCLK_PLL_CTRL);
@@ -60,7 +58,7 @@ static void setChipClock(unsigned int frequency)
unsigned int ulActualMxClk;
/* Cheok_0509: For SM750LE, the chip clock is fixed. Nothing to set. */
- if (getChipType() == SM750LE)
+ if (sm750_get_chip_type() == SM750LE)
return;
if (frequency) {
@@ -71,9 +69,10 @@ static void setChipClock(unsigned int frequency)
pll.clockType = MXCLK_PLL;
/*
- * Call calcPllValue() to fill up the other fields for PLL structure.
- * Sometime, the chip cannot set up the exact clock required by User.
- * Return value from calcPllValue() gives the actual possible clock.
+ * Call calcPllValue() to fill the other fields of PLL structure.
+ * Sometime, the chip cannot set up the exact clock
+ * required by the User.
+ * Return value of calcPllValue gives the actual possible clock.
*/
ulActualMxClk = calcPllValue(frequency, &pll);
@@ -86,18 +85,22 @@ static void setMemoryClock(unsigned int frequency)
{
unsigned int reg, divisor;
- /* Cheok_0509: For SM750LE, the memory clock is fixed. Nothing to set. */
- if (getChipType() == SM750LE)
+ /* Cheok_0509: For SM750LE, the memory clock is fixed.
+ * Nothing to set.
+ */
+ if (sm750_get_chip_type() == SM750LE)
return;
if (frequency) {
- /* Set the frequency to the maximum frequency that the DDR Memory can take
- which is 336MHz. */
+ /*
+ * Set the frequency to the maximum frequency
+ * that the DDR Memory can take which is 336MHz.
+ */
if (frequency > MHz(336))
frequency = MHz(336);
/* Calculate the divisor */
- divisor = roundedDiv(get_mxclk_freq(), frequency);
+ divisor = DIV_ROUND_CLOSEST(get_mxclk_freq(), frequency);
/* Set the corresponding divisor in the register. */
reg = PEEK32(CURRENT_GATE) & ~CURRENT_GATE_M2XCLK_MASK;
@@ -133,18 +136,21 @@ static void setMasterClock(unsigned int frequency)
{
unsigned int reg, divisor;
- /* Cheok_0509: For SM750LE, the memory clock is fixed. Nothing to set. */
- if (getChipType() == SM750LE)
+ /* Cheok_0509: For SM750LE, the memory clock is fixed.
+ * Nothing to set.
+ */
+ if (sm750_get_chip_type() == SM750LE)
return;
if (frequency) {
- /* Set the frequency to the maximum frequency that the SM750 engine can
- run, which is about 190 MHz. */
+ /* Set the frequency to the maximum frequency
+ * that the SM750 engine can run, which is about 190 MHz.
+ */
if (frequency > MHz(190))
frequency = MHz(190);
/* Calculate the divisor */
- divisor = roundedDiv(get_mxclk_freq(), frequency);
+ divisor = DIV_ROUND_CLOSEST(get_mxclk_freq(), frequency);
/* Set the corresponding divisor in the register. */
reg = PEEK32(CURRENT_GATE) & ~CURRENT_GATE_MCLK_MASK;
@@ -174,7 +180,7 @@ unsigned int ddk750_getVMSize(void)
unsigned int data;
/* sm750le only use 64 mb memory*/
- if (getChipType() == SM750LE)
+ if (sm750_get_chip_type() == SM750LE)
return SZ_64M;
/* for 750,always use power mode0*/
@@ -213,7 +219,7 @@ int ddk750_initHw(initchip_param_t *pInitParam)
reg |= (CURRENT_GATE_DISPLAY | CURRENT_GATE_LOCALMEM);
setCurrentGate(reg);
- if (getChipType() != SM750LE) {
+ if (sm750_get_chip_type() != SM750LE) {
/* set panel pll and graphic mode via mmio_88 */
reg = PEEK32(VGA_CONFIGURATION);
reg |= (VGA_CONFIGURATION_PLL | VGA_CONFIGURATION_MODE);
@@ -236,9 +242,10 @@ int ddk750_initHw(initchip_param_t *pInitParam)
setMasterClock(MHz(pInitParam->masterClock));
- /* Reset the memory controller. If the memory controller is not reset in SM750,
- the system might hang when sw accesses the memory.
- The memory should be resetted after changing the MXCLK.
+ /* Reset the memory controller.
+ * If the memory controller is not reset in SM750,
+ * the system might hang when sw accesses the memory.
+ * The memory should be resetted after changing the MXCLK.
*/
if (pInitParam->resetMemory == 1) {
reg = PEEK32(MISC_CTRL);
@@ -282,24 +289,27 @@ int ddk750_initHw(initchip_param_t *pInitParam)
}
/*
- monk liu @ 4/6/2011:
- re-write the calculatePLL function of ddk750.
- the original version function does not use some mathematics tricks and shortcut
- when it doing the calculation of the best N,M,D combination
- I think this version gives a little upgrade in speed
-
- 750 pll clock formular:
- Request Clock = (Input Clock * M )/(N * X)
-
- Input Clock = 14318181 hz
- X = 2 power D
- D ={0,1,2,3,4,5,6}
- M = {1,...,255}
- N = {2,...,15}
-*/
+ * monk liu @ 4/6/2011:
+ * re-write the calculatePLL function of ddk750.
+ * the original version function does not use
+ * some mathematics tricks and shortcut
+ * when it doing the calculation of the best N,M,D combination
+ * I think this version gives a little upgrade in speed
+ *
+ * 750 pll clock formular:
+ * Request Clock = (Input Clock * M )/(N * X)
+ *
+ * Input Clock = 14318181 hz
+ * X = 2 power D
+ * D ={0,1,2,3,4,5,6}
+ * M = {1,...,255}
+ * N = {2,...,15}
+ */
unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
{
- /* as sm750 register definition, N located in 2,15 and M located in 1,255 */
+ /* as sm750 register definition,
+ * N located in 2,15 and M located in 1,255
+ */
int N, M, X, d;
int mini_diff;
unsigned int RN, quo, rem, fl_quo;
@@ -308,9 +318,11 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
const int max_OD = 3;
int max_d = 6;
- if (getChipType() == SM750LE) {
- /* SM750LE don't have prgrammable PLL and M/N values to work on.
- Just return the requested clock. */
+ if (sm750_get_chip_type() == SM750LE) {
+ /* SM750LE don't have
+ * programmable PLL and M/N values to work on.
+ * Just return the requested clock.
+ */
return request_orig;
}
@@ -319,19 +331,23 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
request = request_orig / 1000;
input = pll->inputFreq / 1000;
- /* for MXCLK register , no POD provided, so need be treated differently */
+ /* for MXCLK register,
+ * no POD provided, so need be treated differently
+ */
if (pll->clockType == MXCLK_PLL)
max_d = 3;
for (N = 15; N > 1; N--) {
- /* RN will not exceed maximum long if @request <= 285 MHZ (for 32bit cpu) */
+ /* RN will not exceed maximum long
+ * if @request <= 285 MHZ (for 32bit cpu)
+ */
RN = N * request;
quo = RN / input;
rem = RN % input;/* rem always small than 14318181 */
fl_quo = (rem * 10000 / input);
for (d = max_d; d >= 0; d--) {
- X = (1 << d);
+ X = BIT(d);
M = quo * X;
M += fl_quo * X / 10000;
/* round step */
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 0891384ef3e5..14357fd1cc6b 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -69,7 +69,7 @@ typedef struct _initchip_param_t {
}
initchip_param_t;
-logical_chip_type_t getChipType(void);
+logical_chip_type_t sm750_get_chip_type(void);
unsigned int calcPllValue(unsigned int request, pll_value_t *pll);
unsigned int formatPllReg(pll_value_t *pPLL);
void ddk750_set_mmio(void __iomem *, unsigned short, char);
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index ca4973ee49e4..4023c476b9e4 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -68,8 +68,10 @@ static void waitNextVerticalSync(int ctrl, int delay)
if (!ctrl) {
/* primary controller */
- /* Do not wait when the Primary PLL is off or display control is already off.
- This will prevent the software to wait forever. */
+ /*
+ * Do not wait when the Primary PLL is off or display control is
+ * already off. This will prevent the software to wait forever.
+ */
if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) ||
!(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
return;
@@ -88,9 +90,10 @@ static void waitNextVerticalSync(int ctrl, int delay)
}
} else {
-
- /* Do not wait when the Primary PLL is off or display control is already off.
- This will prevent the software to wait forever. */
+ /*
+ * Do not wait when the Primary PLL is off or display control is
+ * already off. This will prevent the software to wait forever.
+ */
if (!(PEEK32(CRT_PLL_CTRL) & PLL_CTRL_POWER) ||
!(PEEK32(CRT_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
return;
@@ -134,7 +137,6 @@ static void swPanelPowerSequence(int disp, int delay)
reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
primaryWaitVerticalSync(delay);
-
}
void ddk750_setLogicalDispOut(disp_output_t output)
@@ -159,7 +161,6 @@ void ddk750_setLogicalDispOut(disp_output_t output)
/*se blank off */
reg &= ~CRT_DISPLAY_CTRL_BLANK;
POKE32(CRT_DISPLAY_CTRL, reg);
-
}
if (output & PRI_TP_USAGE) {
diff --git a/drivers/staging/sm750fb/ddk750_display.h b/drivers/staging/sm750fb/ddk750_display.h
index ca35aa1df9d7..e3fde428c52b 100644
--- a/drivers/staging/sm750fb/ddk750_display.h
+++ b/drivers/staging/sm750fb/ddk750_display.h
@@ -2,100 +2,98 @@
#define DDK750_DISPLAY_H__
/* panel path select
- 80000[29:28]
-*/
+ * 80000[29:28]
+ */
#define PNL_2_OFFSET 0
#define PNL_2_MASK (3 << PNL_2_OFFSET)
#define PNL_2_USAGE (PNL_2_MASK << 16)
-#define PNL_2_PRI ((0 << PNL_2_OFFSET)|PNL_2_USAGE)
-#define PNL_2_SEC ((2 << PNL_2_OFFSET)|PNL_2_USAGE)
+#define PNL_2_PRI ((0 << PNL_2_OFFSET) | PNL_2_USAGE)
+#define PNL_2_SEC ((2 << PNL_2_OFFSET) | PNL_2_USAGE)
/* primary timing & plane enable bit
- 1: 80000[8] & 80000[2] on
- 0: both off
-*/
+ * 1: 80000[8] & 80000[2] on
+ * 0: both off
+ */
#define PRI_TP_OFFSET 4
#define PRI_TP_MASK BIT(PRI_TP_OFFSET)
#define PRI_TP_USAGE (PRI_TP_MASK << 16)
-#define PRI_TP_ON ((0x1 << PRI_TP_OFFSET)|PRI_TP_USAGE)
-#define PRI_TP_OFF ((0x0 << PRI_TP_OFFSET)|PRI_TP_USAGE)
+#define PRI_TP_ON ((0x1 << PRI_TP_OFFSET) | PRI_TP_USAGE)
+#define PRI_TP_OFF ((0x0 << PRI_TP_OFFSET) | PRI_TP_USAGE)
/* panel sequency status
- 80000[27:24]
-*/
+ * 80000[27:24]
+ */
#define PNL_SEQ_OFFSET 6
#define PNL_SEQ_MASK BIT(PNL_SEQ_OFFSET)
#define PNL_SEQ_USAGE (PNL_SEQ_MASK << 16)
-#define PNL_SEQ_ON (BIT(PNL_SEQ_OFFSET)|PNL_SEQ_USAGE)
-#define PNL_SEQ_OFF ((0 << PNL_SEQ_OFFSET)|PNL_SEQ_USAGE)
+#define PNL_SEQ_ON (BIT(PNL_SEQ_OFFSET) | PNL_SEQ_USAGE)
+#define PNL_SEQ_OFF ((0 << PNL_SEQ_OFFSET) | PNL_SEQ_USAGE)
/* dual digital output
- 80000[19]
-*/
+ * 80000[19]
+ */
#define DUAL_TFT_OFFSET 8
#define DUAL_TFT_MASK BIT(DUAL_TFT_OFFSET)
#define DUAL_TFT_USAGE (DUAL_TFT_MASK << 16)
-#define DUAL_TFT_ON (BIT(DUAL_TFT_OFFSET)|DUAL_TFT_USAGE)
-#define DUAL_TFT_OFF ((0 << DUAL_TFT_OFFSET)|DUAL_TFT_USAGE)
+#define DUAL_TFT_ON (BIT(DUAL_TFT_OFFSET) | DUAL_TFT_USAGE)
+#define DUAL_TFT_OFF ((0 << DUAL_TFT_OFFSET) | DUAL_TFT_USAGE)
/* secondary timing & plane enable bit
- 1:80200[8] & 80200[2] on
- 0: both off
-*/
+ * 1:80200[8] & 80200[2] on
+ * 0: both off
+ */
#define SEC_TP_OFFSET 5
#define SEC_TP_MASK BIT(SEC_TP_OFFSET)
#define SEC_TP_USAGE (SEC_TP_MASK << 16)
-#define SEC_TP_ON ((0x1 << SEC_TP_OFFSET)|SEC_TP_USAGE)
-#define SEC_TP_OFF ((0x0 << SEC_TP_OFFSET)|SEC_TP_USAGE)
+#define SEC_TP_ON ((0x1 << SEC_TP_OFFSET) | SEC_TP_USAGE)
+#define SEC_TP_OFF ((0x0 << SEC_TP_OFFSET) | SEC_TP_USAGE)
/* crt path select
- 80200[19:18]
-*/
+ * 80200[19:18]
+ */
#define CRT_2_OFFSET 2
#define CRT_2_MASK (3 << CRT_2_OFFSET)
#define CRT_2_USAGE (CRT_2_MASK << 16)
-#define CRT_2_PRI ((0x0 << CRT_2_OFFSET)|CRT_2_USAGE)
-#define CRT_2_SEC ((0x2 << CRT_2_OFFSET)|CRT_2_USAGE)
+#define CRT_2_PRI ((0x0 << CRT_2_OFFSET) | CRT_2_USAGE)
+#define CRT_2_SEC ((0x2 << CRT_2_OFFSET) | CRT_2_USAGE)
/* DAC affect both DVI and DSUB
- 4[20]
-*/
+ * 4[20]
+ */
#define DAC_OFFSET 7
#define DAC_MASK BIT(DAC_OFFSET)
#define DAC_USAGE (DAC_MASK << 16)
-#define DAC_ON ((0x0 << DAC_OFFSET)|DAC_USAGE)
-#define DAC_OFF ((0x1 << DAC_OFFSET)|DAC_USAGE)
+#define DAC_ON ((0x0 << DAC_OFFSET) | DAC_USAGE)
+#define DAC_OFF ((0x1 << DAC_OFFSET) | DAC_USAGE)
/* DPMS only affect D-SUB head
- 0[31:30]
-*/
+ * 0[31:30]
+ */
#define DPMS_OFFSET 9
#define DPMS_MASK (3 << DPMS_OFFSET)
#define DPMS_USAGE (DPMS_MASK << 16)
-#define DPMS_OFF ((3 << DPMS_OFFSET)|DPMS_USAGE)
-#define DPMS_ON ((0 << DPMS_OFFSET)|DPMS_USAGE)
+#define DPMS_OFF ((3 << DPMS_OFFSET) | DPMS_USAGE)
+#define DPMS_ON ((0 << DPMS_OFFSET) | DPMS_USAGE)
-/*
- LCD1 means panel path TFT1 & panel path DVI (so enable DAC)
- CRT means crt path DSUB
-*/
+/* LCD1 means panel path TFT1 & panel path DVI (so enable DAC)
+ * CRT means crt path DSUB
+ */
typedef enum _disp_output_t {
- do_LCD1_PRI = PNL_2_PRI|PRI_TP_ON|PNL_SEQ_ON|DAC_ON,
- do_LCD1_SEC = PNL_2_SEC|SEC_TP_ON|PNL_SEQ_ON|DAC_ON,
- do_LCD2_PRI = CRT_2_PRI|PRI_TP_ON|DUAL_TFT_ON,
- do_LCD2_SEC = CRT_2_SEC|SEC_TP_ON|DUAL_TFT_ON,
- /*
- do_DSUB_PRI = CRT_2_PRI|PRI_TP_ON|DPMS_ON|DAC_ON,
- do_DSUB_SEC = CRT_2_SEC|SEC_TP_ON|DPMS_ON|DAC_ON,
- */
- do_CRT_PRI = CRT_2_PRI|PRI_TP_ON|DPMS_ON|DAC_ON,
- do_CRT_SEC = CRT_2_SEC|SEC_TP_ON|DPMS_ON|DAC_ON,
+ do_LCD1_PRI = PNL_2_PRI | PRI_TP_ON | PNL_SEQ_ON | DAC_ON,
+ do_LCD1_SEC = PNL_2_SEC | SEC_TP_ON | PNL_SEQ_ON | DAC_ON,
+ do_LCD2_PRI = CRT_2_PRI | PRI_TP_ON | DUAL_TFT_ON,
+ do_LCD2_SEC = CRT_2_SEC | SEC_TP_ON | DUAL_TFT_ON,
+ /* do_DSUB_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON|DAC_ON,
+ * do_DSUB_SEC = CRT_2_SEC | SEC_TP_ON | DPMS_ON|DAC_ON,
+ */
+ do_CRT_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON | DAC_ON,
+ do_CRT_SEC = CRT_2_SEC | SEC_TP_ON | DPMS_ON | DAC_ON,
}
disp_output_t;
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index a4a255007c8d..8252f771ef9e 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -6,9 +6,11 @@
#include "ddk750_sii164.h"
-/* This global variable contains all the supported driver and its corresponding
- function API. Please set the function pointer to NULL whenever the function
- is not supported. */
+/*
+ * This global variable contains all the supported driver and its corresponding
+ * function API. Please set the function pointer to NULL whenever the function
+ * is not supported.
+ */
static dvi_ctrl_device_t g_dcftSupportedDviController[] = {
#ifdef DVI_CTRL_SII164
{
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index 39c3e1cdbc0c..d391c127ead7 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -21,7 +21,7 @@ unsigned char bus_speed_mode
POKE32(GPIO_MUX, value);
/* Enable Hardware I2C power.
- TODO: Check if we need to enable GPIO power?
+ * TODO: Check if we need to enable GPIO power?
*/
enableI2C(1);
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index ccb4e067661a..05b83646c2d5 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -4,15 +4,14 @@
#include "ddk750_mode.h"
#include "ddk750_chip.h"
-/*
- SM750LE only:
- This function takes care extra registers and bit fields required to set
- up a mode in SM750LE
-
- Explanation about Display Control register:
- HW only supports 7 predefined pixel clocks, and clock select is
- in bit 29:27 of Display Control register.
-*/
+/* SM750LE only:
+ * This function takes care extra registers and bit fields required to set
+ * up a mode in SM750LE
+ *
+ * Explanation about Display Control register:
+ * HW only supports 7 predefined pixel clocks, and clock select is
+ * in bit 29:27 of Display Control register.
+ */
static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam, unsigned long dispControl)
{
unsigned long x, y;
@@ -21,9 +20,9 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
y = pModeParam->vertical_display_end;
/* SM750LE has to set up the top-left and bottom-right
- registers as well.
- Note that normal SM750/SM718 only use those two register for
- auto-centering mode.
+ * registers as well.
+ * Note that normal SM750/SM718 only use those two register for
+ * auto-centering mode.
*/
POKE32(CRT_AUTO_CENTERING_TL, 0);
@@ -33,8 +32,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK));
/* Assume common fields in dispControl have been properly set before
- calling this function.
- This function only sets the extra fields in dispControl.
+ * calling this function.
+ * This function only sets the extra fields in dispControl.
*/
/* Clear bit 29:27 of display control register */
@@ -63,7 +62,7 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
dispControl |= (CRT_DISPLAY_CTRL_CRTSELECT | CRT_DISPLAY_CTRL_RGBBIT);
/* Set bit 14 of display controller */
- dispControl = DISPLAY_CTRL_CLOCK_PHASE;
+ dispControl |= DISPLAY_CTRL_CLOCK_PHASE;
POKE32(CRT_DISPLAY_CTRL, dispControl);
@@ -117,7 +116,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
if (pModeParam->horizontal_sync_polarity)
tmp |= DISPLAY_CTRL_HSYNC_PHASE;
- if (getChipType() == SM750LE) {
+ if (sm750_get_chip_type() == SM750LE) {
displayControlAdjust_SM750LE(pModeParam, tmp);
} else {
reg = PEEK32(CRT_DISPLAY_CTRL) &
@@ -209,7 +208,7 @@ int ddk750_setModeTiming(mode_parameter_t *parm, clock_type_t clock)
pll.clockType = clock;
uiActualPixelClk = calcPllValue(parm->pixel_clock, &pll);
- if (getChipType() == SM750LE) {
+ if (sm750_get_chip_type() == SM750LE) {
/* set graphic mode via IO method */
outb_p(0x88, 0x3d4);
outb_p(0x06, 0x3d5);
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index b3c3791b95bd..7cc6169f884e 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -6,7 +6,7 @@ void ddk750_setDPMS(DPMS_t state)
{
unsigned int value;
- if (getChipType() == SM750LE) {
+ if (sm750_get_chip_type() == SM750LE) {
value = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_DPMS_MASK;
value |= (state << CRT_DISPLAY_CTRL_DPMS_SHIFT);
POKE32(CRT_DISPLAY_CTRL, value);
@@ -19,7 +19,7 @@ void ddk750_setDPMS(DPMS_t state)
static unsigned int getPowerMode(void)
{
- if (getChipType() == SM750LE)
+ if (sm750_get_chip_type() == SM750LE)
return 0;
return PEEK32(POWER_MODE_CTRL) & POWER_MODE_CTRL_MODE_MASK;
}
@@ -35,7 +35,7 @@ void setPowerMode(unsigned int powerMode)
control_value = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK;
- if (getChipType() == SM750LE)
+ if (sm750_get_chip_type() == SM750LE)
return;
switch (powerMode) {
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 67f36e71da7e..99a8683e6383 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -174,8 +174,8 @@ long sii164InitChip(
i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
/* De-skew enabled with default 111b value.
- This will fix some artifacts problem in some mode on board 2.2.
- Somehow this fix does not affect board 2.1.
+ * This fixes some artifacts problem in some mode on board 2.2.
+ * Somehow this fix does not affect board 2.1.
*/
if (deskewEnable == 0)
config = SII164_DESKEW_DISABLE;
@@ -344,7 +344,8 @@ void sii164EnableHotPlugDetection(
detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
/* Depending on each DVI controller, need to enable the hot plug based on each
- individual chip design. */
+ * individual chip design.
+ */
if (enableHotPlug != 0)
sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_USE_MDI);
else
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.c b/drivers/staging/sm750fb/ddk750_swi2c.c
index 8d644a7cba52..72a42330e7a1 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.c
+++ b/drivers/staging/sm750fb/ddk750_swi2c.c
@@ -89,12 +89,12 @@ static void sw_i2c_wait(void)
* always be non-zero,which makes the while loop
* never finish.
* use non-ultimate for loop below is safe
- * */
+ */
/* Change wait algorithm to use PCI bus clock,
- it's more reliable than counter loop ..
- write 0x61 to 0x3ce and read from 0x3cf
- */
+ * it's more reliable than counter loop ..
+ * write 0x61 to 0x3ce and read from 0x3cf
+ */
int i, tmp;
for (i = 0; i < 600; i++) {
@@ -403,7 +403,7 @@ long sm750_sw_i2c_init(
if ((clk_gpio > 31) || (data_gpio > 31))
return -1;
- if (getChipType() == SM750LE)
+ if (sm750_get_chip_type() == SM750LE)
return sm750le_i2c_init(clk_gpio, data_gpio);
/* Initialize the GPIO pin for the i2c Clock Register */
@@ -501,8 +501,8 @@ long sm750_sw_i2c_write_reg(
sw_i2c_start();
/* Send the device address and read the data. All should return success
- in order for the writing processed to be successful
- */
+ * in order for the writing processed to be successful
+ */
if ((sw_i2c_write_byte(addr) != 0) ||
(sw_i2c_write_byte(reg) != 0) ||
(sw_i2c_write_byte(data) != 0)) {
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 6ed004e40855..7d90e250142c 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1176,7 +1176,7 @@ static int __init lynxfb_setup(char *options)
else {
strcat(tmp, opt);
tmp += strlen(opt);
- if (options != NULL)
+ if (options)
*tmp++ = ':';
else
*tmp++ = 0;
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index 8e70ce0d6da4..ff31c5c9cc6f 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -147,17 +147,17 @@ struct lynxfb_output {
int dpms;
int paths;
/* which paths(s) this output stands for,for sm750:
- paths=1:means output for panel paths
- paths=2:means output for crt paths
- paths=3:means output for both panel and crt paths
- */
+ * paths=1:means output for panel paths
+ * paths=2:means output for crt paths
+ * paths=3:means output for both panel and crt paths
+ */
int *channel;
/* which channel these outputs linked with,for sm750:
- *channel=0 means primary channel
- *channel=1 means secondary channel
- output->channel ==> &crtc->channel
- */
+ * *channel=0 means primary channel
+ * *channel=1 means secondary channel
+ * output->channel ==> &crtc->channel
+ */
void *priv;
int (*proc_setBLANK)(struct lynxfb_output*, int);
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 9aa4066ac86d..38adae6b5d83 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -67,7 +67,8 @@ void hw_de_init(struct lynx_accel *accel)
/* set2dformat only be called from setmode functions
* but if you need dual framebuffer driver,need call set2dformat
- * every time you use 2d function */
+ * every time you use 2d function
+ */
void hw_set2dformat(struct lynx_accel *accel, int fmt)
{
@@ -90,7 +91,8 @@ int hw_fillrect(struct lynx_accel *accel,
if (accel->de_wait() != 0) {
/* int time wait and always busy,seems hardware
- * got something error */
+ * got something error
+ */
pr_debug("De engine always busy\n");
return -1;
}
@@ -152,24 +154,26 @@ unsigned int rop2) /* ROP value */
/* Determine direction of operation */
if (sy < dy) {
/* +----------+
- |S |
- | +----------+
- | | | |
- | | | |
- +---|------+ |
- | D|
- +----------+ */
+ * |S |
+ * | +----------+
+ * | | | |
+ * | | | |
+ * +---|------+ |
+ * | D|
+ * +----------+
+ */
nDirection = BOTTOM_TO_TOP;
} else if (sy > dy) {
/* +----------+
- |D |
- | +----------+
- | | | |
- | | | |
- +---|------+ |
- | S|
- +----------+ */
+ * |D |
+ * | +----------+
+ * | | | |
+ * | | | |
+ * +---|------+ |
+ * | S|
+ * +----------+
+ */
nDirection = TOP_TO_BOTTOM;
} else {
@@ -177,22 +181,24 @@ unsigned int rop2) /* ROP value */
if (sx <= dx) {
/* +------+---+------+
- |S | | D|
- | | | |
- | | | |
- | | | |
- +------+---+------+ */
+ * |S | | D|
+ * | | | |
+ * | | | |
+ * | | | |
+ * +------+---+------+
+ */
nDirection = RIGHT_TO_LEFT;
} else {
/* sx > dx */
/* +------+---+------+
- |D | | S|
- | | | |
- | | | |
- | | | |
- +------+---+------+ */
+ * |D | | S|
+ * | | | |
+ * | | | |
+ * | | | |
+ * +------+---+------+
+ */
nDirection = LEFT_TO_RIGHT;
}
@@ -208,32 +214,36 @@ unsigned int rop2) /* ROP value */
}
/* Note:
- DE_FOREGROUND are DE_BACKGROUND are don't care.
- DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS are set by set deSetTransparency().
+ * DE_FOREGROUND are DE_BACKGROUND are don't care.
+ * DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS
+ * are set by set deSetTransparency().
*/
/* 2D Source Base.
- It is an address offset (128 bit aligned) from the beginning of frame buffer.
+ * It is an address offset (128 bit aligned)
+ * from the beginning of frame buffer.
*/
write_dpr(accel, DE_WINDOW_SOURCE_BASE, sBase); /* dpr40 */
/* 2D Destination Base.
- It is an address offset (128 bit aligned) from the beginning of frame buffer.
+ * It is an address offset (128 bit aligned)
+ * from the beginning of frame buffer.
*/
write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
/* Program pitch (distance between the 1st points of two adjacent lines).
- Note that input pitch is BYTE value, but the 2D Pitch register uses
- pixel values. Need Byte to pixel conversion.
- */
+ * Note that input pitch is BYTE value, but the 2D Pitch register uses
+ * pixel values. Need Byte to pixel conversion.
+ */
write_dpr(accel, DE_PITCH,
((dPitch / Bpp << DE_PITCH_DESTINATION_SHIFT) &
DE_PITCH_DESTINATION_MASK) |
(sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
/* Screen Window width in Pixels.
- 2D engine uses this value to calculate the linear address in frame buffer for a given point.
- */
+ * 2D engine uses this value to calculate the linear address in frame buffer
+ * for a given point.
+ */
write_dpr(accel, DE_WINDOW_WIDTH,
((dPitch / Bpp << DE_WINDOW_WIDTH_DST_SHIFT) &
DE_WINDOW_WIDTH_DST_MASK) |
@@ -307,33 +317,37 @@ int hw_imageblit(struct lynx_accel *accel,
return -1;
/* 2D Source Base.
- Use 0 for HOST Blt.
+ * Use 0 for HOST Blt.
*/
write_dpr(accel, DE_WINDOW_SOURCE_BASE, 0);
/* 2D Destination Base.
- It is an address offset (128 bit aligned) from the beginning of frame buffer.
+ * It is an address offset (128 bit aligned)
+ * from the beginning of frame buffer.
*/
write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase);
/* Program pitch (distance between the 1st points of two adjacent lines).
- Note that input pitch is BYTE value, but the 2D Pitch register uses
- pixel values. Need Byte to pixel conversion.
- */
+ * Note that input pitch is BYTE value, but the 2D Pitch register uses
+ * pixel values. Need Byte to pixel conversion.
+ */
write_dpr(accel, DE_PITCH,
((dPitch / bytePerPixel << DE_PITCH_DESTINATION_SHIFT) &
DE_PITCH_DESTINATION_MASK) |
(dPitch / bytePerPixel & DE_PITCH_SOURCE_MASK)); /* dpr10 */
/* Screen Window width in Pixels.
- 2D engine uses this value to calculate the linear address in frame buffer for a given point.
+ * 2D engine uses this value to calculate the linear address
+ * in frame buffer for a given point.
*/
write_dpr(accel, DE_WINDOW_WIDTH,
((dPitch / bytePerPixel << DE_WINDOW_WIDTH_DST_SHIFT) &
DE_WINDOW_WIDTH_DST_MASK) |
(dPitch / bytePerPixel & DE_WINDOW_WIDTH_SRC_MASK));
- /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed, and Y_K2 field is not used.
- For mono bitmap, use startBit for X_K1. */
+ /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed,
+ * and Y_K2 field is not used.
+ * For mono bitmap, use startBit for X_K1.
+ */
write_dpr(accel, DE_SOURCE,
(startBit << DE_SOURCE_X_K1_SHIFT) &
DE_SOURCE_X_K1_MONO_MASK); /* dpr00 */
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index 2daeedd88c30..7dd208caa5eb 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -35,17 +35,17 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
pr_info("mmio phyAddr = %lx\n", sm750_dev->vidreg_start);
/* reserve the vidreg space of smi adaptor
- * if you do this, u need to add release region code
+ * if you do this, you need to add release region code
* in lynxfb_remove, or memory will not be mapped again
* successfully
- * */
+ */
ret = pci_request_region(pdev, 1, "sm750fb");
if (ret) {
pr_err("Can not request PCI regions.\n");
goto exit;
}
- /* now map mmio and vidmem*/
+ /* now map mmio and vidmem */
sm750_dev->pvReg = ioremap_nocache(sm750_dev->vidreg_start,
sm750_dev->vidreg_size);
if (!sm750_dev->pvReg) {
@@ -56,7 +56,6 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
pr_info("mmio virtual addr = %p\n", sm750_dev->pvReg);
}
-
sm750_dev->accel.dprBase = sm750_dev->pvReg + DE_BASE_ADDR_TYPE1;
sm750_dev->accel.dpPortBase = sm750_dev->pvReg + DE_PORT_ADDR_TYPE1;
@@ -64,10 +63,10 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
sm750_dev->vidmem_start = pci_resource_start(pdev, 0);
/* don't use pdev_resource[x].end - resource[x].start to
- * calculate the resource size,its only the maximum available
- * size but not the actual size,use
+ * calculate the resource size, it's only the maximum available
+ * size but not the actual size, using
* @ddk750_getVMSize function can be safe.
- * */
+ */
sm750_dev->vidmem_size = ddk750_getVMSize();
pr_info("video memory phyAddr = %lx, size = %u bytes\n",
sm750_dev->vidmem_start, sm750_dev->vidmem_size);
@@ -86,33 +85,31 @@ exit:
return ret;
}
-
-
int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
{
struct init_status *parm;
parm = &sm750_dev->initParm;
if (parm->chip_clk == 0)
- parm->chip_clk = (getChipType() == SM750LE) ?
+ parm->chip_clk = (sm750_get_chip_type() == SM750LE) ?
DEFAULT_SM750LE_CHIP_CLOCK :
DEFAULT_SM750_CHIP_CLOCK;
if (parm->mem_clk == 0)
parm->mem_clk = parm->chip_clk;
if (parm->master_clk == 0)
- parm->master_clk = parm->chip_clk/3;
+ parm->master_clk = parm->chip_clk / 3;
ddk750_initHw((initchip_param_t *)&sm750_dev->initParm);
- /* for sm718,open pci burst */
+ /* for sm718, open pci burst */
if (sm750_dev->devid == 0x718) {
POKE32(SYSTEM_CTRL,
PEEK32(SYSTEM_CTRL) | SYSTEM_CTRL_PCI_BURST);
}
- if (getChipType() != SM750LE) {
+ if (sm750_get_chip_type() != SM750LE) {
unsigned int val;
- /* does user need CRT ?*/
+ /* does user need CRT? */
if (sm750_dev->nocrt) {
POKE32(MISC_CTRL,
PEEK32(MISC_CTRL) | MISC_CTRL_DAC_POWER_OFF);
@@ -144,19 +141,21 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
}
POKE32(PANEL_DISPLAY_CTRL, val);
} else {
- /* for 750LE ,no DVI chip initialization makes Monitor no signal */
- /* Set up GPIO for software I2C to program DVI chip in the
- Xilinx SP605 board, in order to have video signal.
+ /* for 750LE, no DVI chip initialization
+ * makes Monitor no signal
+ *
+ * Set up GPIO for software I2C to program DVI chip in the
+ * Xilinx SP605 board, in order to have video signal.
*/
sm750_sw_i2c_init(0, 1);
/* Customer may NOT use CH7301 DVI chip, which has to be
- initialized differently.
- */
+ * initialized differently.
+ */
if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) {
/* The following register values for CH7301 are from
- Chrontel app note and our experiment.
- */
+ * Chrontel app note and our experiment.
+ */
pr_info("yes,CH7301 DVI chip found\n");
sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16);
sm750_sw_i2c_write_reg(0xec, 0x21, 0x9);
@@ -173,7 +172,8 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
}
int hw_sm750_output_setMode(struct lynxfb_output *output,
- struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix)
+ struct fb_var_screeninfo *var,
+ struct fb_fix_screeninfo *fix)
{
int ret;
disp_output_t dispSet;
@@ -183,8 +183,7 @@ int hw_sm750_output_setMode(struct lynxfb_output *output,
dispSet = 0;
channel = *output->channel;
-
- if (getChipType() != SM750LE) {
+ if (sm750_get_chip_type() != SM750LE) {
if (channel == sm750_primary) {
pr_info("primary channel\n");
if (output->paths & sm750_panel)
@@ -198,11 +197,10 @@ int hw_sm750_output_setMode(struct lynxfb_output *output,
dispSet |= do_LCD1_SEC;
if (output->paths & sm750_crt)
dispSet |= do_CRT_SEC;
-
}
ddk750_setLogicalDispOut(dispSet);
} else {
- /* just open DISPLAY_CONTROL_750LE register bit 3:0*/
+ /* just open DISPLAY_CONTROL_750LE register bit 3:0 */
u32 reg;
reg = PEEK32(DISPLAY_CONTROL_750LE);
@@ -214,7 +212,8 @@ int hw_sm750_output_setMode(struct lynxfb_output *output,
return ret;
}
-int hw_sm750_crtc_checkMode(struct lynxfb_crtc *crtc, struct fb_var_screeninfo *var)
+int hw_sm750_crtc_checkMode(struct lynxfb_crtc *crtc,
+ struct fb_var_screeninfo *var)
{
struct sm750_dev *sm750_dev;
struct lynxfb_par *par = container_of(crtc, struct lynxfb_par, crtc);
@@ -233,19 +232,15 @@ int hw_sm750_crtc_checkMode(struct lynxfb_crtc *crtc, struct fb_var_screeninfo *
break;
default:
return -EINVAL;
-
}
return 0;
}
-
-/*
- set the controller's mode for @crtc charged with @var and @fix parameters
-*/
+/* set the controller's mode for @crtc charged with @var and @fix parameters */
int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
- struct fb_var_screeninfo *var,
- struct fb_fix_screeninfo *fix)
+ struct fb_var_screeninfo *var,
+ struct fb_fix_screeninfo *fix)
{
int ret, fmt;
u32 reg;
@@ -254,7 +249,6 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
struct sm750_dev *sm750_dev;
struct lynxfb_par *par;
-
ret = 0;
par = container_of(crtc, struct lynxfb_par, crtc);
sm750_dev = par->dev;
@@ -278,17 +272,22 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
/* set timing */
modparm.pixel_clock = ps_to_hz(var->pixclock);
- modparm.vertical_sync_polarity = (var->sync & FB_SYNC_HOR_HIGH_ACT) ? POS:NEG;
- modparm.horizontal_sync_polarity = (var->sync & FB_SYNC_VERT_HIGH_ACT) ? POS:NEG;
- modparm.clock_phase_polarity = (var->sync & FB_SYNC_COMP_HIGH_ACT) ? POS:NEG;
+ modparm.vertical_sync_polarity = (var->sync & FB_SYNC_HOR_HIGH_ACT)
+ ? POS : NEG;
+ modparm.horizontal_sync_polarity = (var->sync & FB_SYNC_VERT_HIGH_ACT)
+ ? POS : NEG;
+ modparm.clock_phase_polarity = (var->sync & FB_SYNC_COMP_HIGH_ACT)
+ ? POS : NEG;
modparm.horizontal_display_end = var->xres;
modparm.horizontal_sync_width = var->hsync_len;
modparm.horizontal_sync_start = var->xres + var->right_margin;
- modparm.horizontal_total = var->xres + var->left_margin + var->right_margin + var->hsync_len;
+ modparm.horizontal_total = var->xres + var->left_margin +
+ var->right_margin + var->hsync_len;
modparm.vertical_display_end = var->yres;
modparm.vertical_sync_height = var->vsync_len;
modparm.vertical_sync_start = var->yres + var->lower_margin;
- modparm.vertical_total = var->yres + var->upper_margin + var->lower_margin + var->vsync_len;
+ modparm.vertical_total = var->yres + var->upper_margin +
+ var->lower_margin + var->vsync_len;
/* choose pll */
if (crtc->channel != sm750_secondary)
@@ -304,12 +303,14 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
}
if (crtc->channel != sm750_secondary) {
- /* set pitch, offset ,width,start address ,etc... */
+ /* set pitch, offset, width, start address, etc... */
POKE32(PANEL_FB_ADDRESS,
crtc->oScreen & PANEL_FB_ADDRESS_ADDRESS_MASK);
reg = var->xres * (var->bits_per_pixel >> 3);
- /* crtc->channel is not equal to par->index on numeric,be aware of that */
+ /* crtc->channel is not equal to par->index on numeric,
+ * be aware of that
+ */
reg = ALIGN(reg, crtc->line_pad);
reg = (reg << PANEL_FB_WIDTH_WIDTH_SHIFT) &
PANEL_FB_WIDTH_WIDTH_MASK;
@@ -321,8 +322,8 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
reg |= (var->xoffset & PANEL_WINDOW_WIDTH_X_MASK);
POKE32(PANEL_WINDOW_WIDTH, reg);
- reg = ((var->yres_virtual - 1) <<
- PANEL_WINDOW_HEIGHT_HEIGHT_SHIFT);
+ reg = (var->yres_virtual - 1) <<
+ PANEL_WINDOW_HEIGHT_HEIGHT_SHIFT;
reg &= PANEL_WINDOW_HEIGHT_HEIGHT_MASK;
reg |= (var->yoffset & PANEL_WINDOW_HEIGHT_Y_MASK);
POKE32(PANEL_WINDOW_HEIGHT, reg);
@@ -341,7 +342,9 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
/* not implemented now */
POKE32(CRT_FB_ADDRESS, crtc->oScreen);
reg = var->xres * (var->bits_per_pixel >> 3);
- /* crtc->channel is not equal to par->index on numeric,be aware of that */
+ /* crtc->channel is not equal to par->index on numeric,
+ * be aware of that
+ */
reg = ALIGN(reg, crtc->line_pad) << CRT_FB_WIDTH_WIDTH_SHIFT;
reg &= CRT_FB_WIDTH_WIDTH_MASK;
reg |= (fix->line_length & CRT_FB_WIDTH_OFFSET_MASK);
@@ -352,20 +355,19 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
reg |= ((var->bits_per_pixel >> 4) &
CRT_DISPLAY_CTRL_FORMAT_MASK);
POKE32(CRT_DISPLAY_CTRL, reg);
-
}
-
exit:
return ret;
}
int hw_sm750_setColReg(struct lynxfb_crtc *crtc, ushort index,
- ushort red, ushort green, ushort blue)
+ ushort red, ushort green, ushort blue)
{
static unsigned int add[] = {PANEL_PALETTE_RAM, CRT_PALETTE_RAM};
- POKE32(add[crtc->channel] + index*4, (red<<16)|(green<<8)|blue);
+ POKE32(add[crtc->channel] + index * 4,
+ (red << 16) | (green << 8) | blue);
return 0;
}
@@ -414,7 +416,9 @@ int hw_sm750_setBLANK(struct lynxfb_output *output, int blank)
{
unsigned int dpms, pps, crtdb;
- dpms = pps = crtdb = 0;
+ dpms = 0;
+ pps = 0;
+ crtdb = 0;
switch (blank) {
case FB_BLANK_UNBLANK:
@@ -461,14 +465,13 @@ int hw_sm750_setBLANK(struct lynxfb_output *output, int blank)
return 0;
}
-
void hw_sm750_initAccel(struct sm750_dev *sm750_dev)
{
u32 reg;
enable2DEngine(1);
- if (getChipType() == SM750LE) {
+ if (sm750_get_chip_type() == SM750LE) {
reg = PEEK32(DE_STATE1);
reg |= DE_STATE1_DE_ABORT;
POKE32(DE_STATE1, reg);
@@ -509,7 +512,6 @@ int hw_sm750le_deWait(void)
return -1;
}
-
int hw_sm750_deWait(void)
{
int i = 0x10000000;
@@ -529,10 +531,10 @@ int hw_sm750_deWait(void)
}
int hw_sm750_pan_display(struct lynxfb_crtc *crtc,
- const struct fb_var_screeninfo *var,
- const struct fb_info *info)
+ const struct fb_var_screeninfo *var,
+ const struct fb_info *info)
{
- uint32_t total;
+ u32 total;
/* check params */
if ((var->xoffset + var->xres > var->xres_virtual) ||
(var->yoffset + var->yres > var->yres_virtual)) {
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c
index 84989711ae67..58abd1d85105 100644
--- a/drivers/staging/speakup/devsynth.c
+++ b/drivers/staging/speakup/devsynth.c
@@ -34,7 +34,7 @@ static ssize_t speakup_file_write(struct file *fp, const char __user *buffer,
synth_write(buf, bytes);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
- return (ssize_t) nbytes;
+ return (ssize_t)nbytes;
}
static ssize_t speakup_file_read(struct file *fp, char __user *buf,
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 528cbdce4227..e744aa9730ff 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -251,7 +251,7 @@ static ssize_t keymap_show(struct kobject *kobj, struct kobj_attribute *attr,
}
cp += sprintf(cp, "0, %d\n", KEY_MAP_VER);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- return (int)(cp-buf);
+ return (int)(cp - buf);
}
/*
@@ -288,8 +288,8 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
cp = spk_s2uchar(cp, cp1);
cp1++;
}
- i = (int)cp1[-2]+1;
- i *= (int)cp1[-1]+1;
+ i = (int)cp1[-2] + 1;
+ i *= (int)cp1[-1] + 1;
i += 2; /* 0 and last map ver */
if (cp1[-3] != KEY_MAP_VER || cp1[-1] > 10 ||
i+SHIFT_TBL_SIZE+4 >= sizeof(spk_key_buf)) {
@@ -350,9 +350,9 @@ static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
} else {
shut = 0;
}
- if (ch&4)
+ if (ch & 4)
shut |= 0x40;
- if (ch&1)
+ if (ch & 1)
spk_shut_up |= shut;
else
spk_shut_up &= ~shut;
@@ -411,11 +411,13 @@ static ssize_t synth_direct_store(struct kobject *kobj,
int len;
int bytes;
const char *ptr = buf;
+ unsigned long flags;
if (!synth)
return -EPERM;
len = strlen(buf);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
while (len > 0) {
bytes = min_t(size_t, len, 250);
strncpy(tmp, ptr, bytes);
@@ -425,6 +427,7 @@ static ssize_t synth_direct_store(struct kobject *kobj,
ptr += bytes;
len -= bytes;
}
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return count;
}
@@ -973,11 +976,11 @@ static struct attribute *i18n_attrs[] = {
* created for the attributes with the directory being the name of the
* attribute group.
*/
-static struct attribute_group main_attr_group = {
+static const struct attribute_group main_attr_group = {
.attrs = main_attrs,
};
-static struct attribute_group i18n_attr_group = {
+static const struct attribute_group i18n_attr_group = {
.attrs = i18n_attrs,
.name = "i18n",
};
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 4f462c35fdd9..54b2f3918628 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -18,7 +18,7 @@
#include "serialio.h"
#define MAXSYNTHS 16 /* Max number of synths in array. */
-static struct spk_synth *synths[MAXSYNTHS];
+static struct spk_synth *synths[MAXSYNTHS + 1];
struct spk_synth *synth;
char spk_pitch_buff[32] = "";
static int module_status;
@@ -407,7 +407,7 @@ static int do_synth_init(struct spk_synth *in_synth)
if (!spk_quiet_boot)
synth_printf("%s found\n", synth->long_name);
if (synth->attributes.name
- && sysfs_create_group(speakup_kobj, &(synth->attributes)) < 0)
+ && sysfs_create_group(speakup_kobj, &synth->attributes) < 0)
return -ENOMEM;
synth_flags = synth->flags;
wake_up_interruptible_all(&speakup_event);
@@ -429,7 +429,7 @@ void synth_release(void)
del_timer(&thread_timer);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth->attributes.name)
- sysfs_remove_group(speakup_kobj, &(synth->attributes));
+ sysfs_remove_group(speakup_kobj, &synth->attributes);
for (var = synth->vars; var->var_id != MAXVARS; var++)
speakup_unregister_var(var->var_id);
spk_stop_serial_interrupt();
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index e1393d2a2b0f..21186e3dc7ad 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -276,7 +276,7 @@ int spk_set_mask_bits(const char *input, const int which, const int how)
u_char *cp;
short mask = spk_punc_info[which].mask;
- if (how&1) {
+ if (how & 1) {
for (cp = (u_char *)spk_punc_info[3].value; *cp; cp++)
spk_chartab[*cp] &= ~mask;
}
@@ -290,14 +290,14 @@ int spk_set_mask_bits(const char *input, const int which, const int how)
if (mask < PUNC) {
if (!(spk_chartab[*cp] & PUNC))
break;
- } else if (spk_chartab[*cp]&B_NUM)
+ } else if (spk_chartab[*cp] & B_NUM)
break;
}
if (*cp)
return -EINVAL;
cp = (u_char *)input;
}
- if (how&2) {
+ if (how & 2) {
for (; *cp; cp++)
if (*cp > SPACE)
spk_chartab[*cp] |= mask;
diff --git a/drivers/staging/unisys/include/channel.h b/drivers/staging/unisys/include/channel.h
index db4e6b28755b..259ef6487959 100644
--- a/drivers/staging/unisys/include/channel.h
+++ b/drivers/staging/unisys/include/channel.h
@@ -75,28 +75,6 @@ enum channel_clientstate {
/* access channel anytime */
};
-static inline const u8 *
-ULTRA_CHANNELCLI_STRING(u32 state)
-{
- switch (state) {
- case CHANNELCLI_DETACHED:
- return (const u8 *)("DETACHED");
- case CHANNELCLI_DISABLED:
- return (const u8 *)("DISABLED");
- case CHANNELCLI_ATTACHING:
- return (const u8 *)("ATTACHING");
- case CHANNELCLI_ATTACHED:
- return (const u8 *)("ATTACHED");
- case CHANNELCLI_BUSY:
- return (const u8 *)("BUSY");
- case CHANNELCLI_OWNED:
- return (const u8 *)("OWNED");
- default:
- break;
- }
- return (const u8 *)("?");
-}
-
#define SPAR_CHANNEL_SERVER_READY(ch) \
(readl(&(ch)->srv_state) == CHANNELSRV_READY)
@@ -132,22 +110,6 @@ ULTRA_CHANNELCLI_STRING(u32 state)
/* throttling invalid boot channel statetransition error due to busy channel */
#define ULTRA_CLIERRORBOOT_THROTTLEMSG_BUSY 0x04
-/* Values for ULTRA_CHANNEL_PROTOCOL.CliErrorOS: */
-/* throttling invalid guest OS channel statetransition error due to
- * client disabled
- */
-#define ULTRA_CLIERROROS_THROTTLEMSG_DISABLED 0x01
-
-/* throttling invalid guest OS channel statetransition error due to
- * client not attached
- */
-#define ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED 0x02
-
-/* throttling invalid guest OS channel statetransition error due to
- * busy channel
- */
-#define ULTRA_CLIERROROS_THROTTLEMSG_BUSY 0x04
-
/* Values for ULTRA_CHANNEL_PROTOCOL.Features: This define exists so
* that windows guest can look at the FeatureFlags in the io channel,
* and configure the windows driver to use interrupts or not based on
@@ -347,148 +309,6 @@ static inline int spar_check_channel_server(uuid_le typeuuid, char *name,
return 1;
}
-/* Given a file pathname <s> (with '/' or '\' separating directory nodes),
- * returns a pointer to the beginning of a node within that pathname such
- * that the number of nodes from that pointer to the end of the string is
- * NOT more than <n>. Note that if the pathname has less than <n> nodes
- * in it, the return pointer will be to the beginning of the string.
- */
-static inline u8 *
-pathname_last_n_nodes(u8 *s, unsigned int n)
-{
- u8 *p = s;
- unsigned int node_count = 0;
-
- while (*p != '\0') {
- if ((*p == '/') || (*p == '\\'))
- node_count++;
- p++;
- }
- if (node_count <= n)
- return s;
- while (n > 0) {
- p--;
- if (p == s)
- break; /* should never happen, unless someone
- * is changing the string while we are
- * looking at it!!
- */
- if ((*p == '/') || (*p == '\\'))
- n--;
- }
- return p + 1;
-}
-
-static inline int
-spar_channel_client_acquire_os(void __iomem *ch, u8 *id)
-{
- struct channel_header __iomem *hdr = ch;
-
- if (readl(&hdr->cli_state_os) == CHANNELCLI_DISABLED) {
- if ((readb(&hdr->cli_error_os)
- & ULTRA_CLIERROROS_THROTTLEMSG_DISABLED) == 0) {
- /* we are NOT throttling this message */
- writeb(readb(&hdr->cli_error_os) |
- ULTRA_CLIERROROS_THROTTLEMSG_DISABLED,
- &hdr->cli_error_os);
- /* throttle until acquire successful */
-
- pr_info("%s Channel StateTransition INVALID! - acquire failed because OS client DISABLED\n",
- id);
- }
- return 0;
- }
- if ((readl(&hdr->cli_state_os) != CHANNELCLI_OWNED) &&
- (readl(&hdr->cli_state_boot) == CHANNELCLI_DISABLED)) {
- /* Our competitor is DISABLED, so we can transition to OWNED */
- pr_info("%s Channel StateTransition (%s) %s(%d)-->%s(%d)\n",
- id, "cli_state_os",
- ULTRA_CHANNELCLI_STRING(readl(&hdr->cli_state_os)),
- readl(&hdr->cli_state_os),
- ULTRA_CHANNELCLI_STRING(CHANNELCLI_OWNED),
- CHANNELCLI_OWNED);
- writel(CHANNELCLI_OWNED, &hdr->cli_state_os);
- mb(); /* required for channel synch */
- }
- if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED) {
- if (readb(&hdr->cli_error_os)) {
- /* we are in an error msg throttling state;
- * come out of it
- */
- pr_info("%s Channel OS client acquire now successful\n",
- id);
- writeb(0, &hdr->cli_error_os);
- }
- return 1;
- }
-
- /* We have to do it the "hard way". We transition to BUSY,
- * and can use the channel iff our competitor has not also
- * transitioned to BUSY.
- */
- if (readl(&hdr->cli_state_os) != CHANNELCLI_ATTACHED) {
- if ((readb(&hdr->cli_error_os)
- & ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED) == 0) {
- /* we are NOT throttling this message */
- writeb(readb(&hdr->cli_error_os) |
- ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED,
- &hdr->cli_error_os);
- /* throttle until acquire successful */
- pr_info("%s Channel StateTransition INVALID! - acquire failed because OS client NOT ATTACHED (state=%s(%d))\n",
- id, ULTRA_CHANNELCLI_STRING(
- readl(&hdr->cli_state_os)),
- readl(&hdr->cli_state_os));
- }
- return 0;
- }
- writel(CHANNELCLI_BUSY, &hdr->cli_state_os);
- mb(); /* required for channel synch */
- if (readl(&hdr->cli_state_boot) == CHANNELCLI_BUSY) {
- if ((readb(&hdr->cli_error_os)
- & ULTRA_CLIERROROS_THROTTLEMSG_BUSY) == 0) {
- /* we are NOT throttling this message */
- writeb(readb(&hdr->cli_error_os) |
- ULTRA_CLIERROROS_THROTTLEMSG_BUSY,
- &hdr->cli_error_os);
- /* throttle until acquire successful */
- pr_info("%s Channel StateTransition failed - host OS acquire failed because boot BUSY\n",
- id);
- }
- /* reset busy */
- writel(CHANNELCLI_ATTACHED, &hdr->cli_state_os);
- mb(); /* required for channel synch */
- return 0;
- }
- if (readb(&hdr->cli_error_os)) {
- /* we are in an error msg throttling state; come out of it */
- pr_info("%s Channel OS client acquire now successful\n", id);
- writeb(0, &hdr->cli_error_os);
- }
- return 1;
-}
-
-static inline void
-spar_channel_client_release_os(void __iomem *ch, u8 *id)
-{
- struct channel_header __iomem *hdr = ch;
-
- if (readb(&hdr->cli_error_os)) {
- /* we are in an error msg throttling state; come out of it */
- pr_info("%s Channel OS client error state cleared\n", id);
- writeb(0, &hdr->cli_error_os);
- }
- if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED)
- return;
- if (readl(&hdr->cli_state_os) != CHANNELCLI_BUSY) {
- pr_info("%s Channel StateTransition INVALID! - release failed because OS client NOT BUSY (state=%s(%d))\n",
- id, ULTRA_CHANNELCLI_STRING(
- readl(&hdr->cli_state_os)),
- readl(&hdr->cli_state_os));
- /* return; */
- }
- writel(CHANNELCLI_ATTACHED, &hdr->cli_state_os); /* release busy */
-}
-
/*
* Routine Description:
* Tries to insert the prebuilt signal pointed to by pSignal into the nth
@@ -569,4 +389,45 @@ unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue,
unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch,
u32 queue);
+/*
+ * CHANNEL Guids
+ */
+
+/* {414815ed-c58c-11da-95a9-00e08161165f} */
+#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID \
+ UUID_LE(0x414815ed, 0xc58c, 0x11da, \
+ 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
+static const uuid_le spar_vhba_channel_protocol_uuid =
+ SPAR_VHBA_CHANNEL_PROTOCOL_UUID;
+#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR \
+ "414815ed-c58c-11da-95a9-00e08161165f"
+
+/* {8cd5994d-c58e-11da-95a9-00e08161165f} */
+#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID \
+ UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \
+ 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
+static const uuid_le spar_vnic_channel_protocol_uuid =
+ SPAR_VNIC_CHANNEL_PROTOCOL_UUID;
+#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR \
+ "8cd5994d-c58e-11da-95a9-00e08161165f"
+
+/* {72120008-4AAB-11DC-8530-444553544200} */
+#define SPAR_SIOVM_UUID \
+ UUID_LE(0x72120008, 0x4AAB, 0x11DC, \
+ 0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
+static const uuid_le spar_siovm_uuid = SPAR_SIOVM_UUID;
+
+/* {5b52c5ac-e5f5-4d42-8dff-429eaecd221f} */
+#define SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID \
+ UUID_LE(0x5b52c5ac, 0xe5f5, 0x4d42, \
+ 0x8d, 0xff, 0x42, 0x9e, 0xae, 0xcd, 0x22, 0x1f)
+
+static const uuid_le spar_controldirector_channel_protocol_uuid =
+ SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID;
+
+/* {b4e79625-aede-4eAA-9e11-D3eddcd4504c} */
+#define SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID \
+ UUID_LE(0xb4e79625, 0xaede, 0x4eaa, \
+ 0x9e, 0x11, 0xd3, 0xed, 0xdc, 0xd4, 0x50, 0x4c)
+
#endif
diff --git a/drivers/staging/unisys/include/channel_guid.h b/drivers/staging/unisys/include/channel_guid.h
deleted file mode 100644
index 17cb499cb53c..000000000000
--- a/drivers/staging/unisys/include/channel_guid.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-/*
- * CHANNEL Guids
- */
-
-/* {414815ed-c58c-11da-95a9-00e08161165f} */
-#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID \
- UUID_LE(0x414815ed, 0xc58c, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le spar_vhba_channel_protocol_uuid =
- SPAR_VHBA_CHANNEL_PROTOCOL_UUID;
-#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR \
- "414815ed-c58c-11da-95a9-00e08161165f"
-
-/* {8cd5994d-c58e-11da-95a9-00e08161165f} */
-#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID \
- UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le spar_vnic_channel_protocol_uuid =
- SPAR_VNIC_CHANNEL_PROTOCOL_UUID;
-#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR \
- "8cd5994d-c58e-11da-95a9-00e08161165f"
-
-/* {72120008-4AAB-11DC-8530-444553544200} */
-#define SPAR_SIOVM_UUID \
- UUID_LE(0x72120008, 0x4AAB, 0x11DC, \
- 0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
-static const uuid_le spar_siovm_uuid = SPAR_SIOVM_UUID;
-
-/* {5b52c5ac-e5f5-4d42-8dff-429eaecd221f} */
-#define SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID \
- UUID_LE(0x5b52c5ac, 0xe5f5, 0x4d42, \
- 0x8d, 0xff, 0x42, 0x9e, 0xae, 0xcd, 0x22, 0x1f)
-
-static const uuid_le spar_controldirector_channel_protocol_uuid =
- SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID;
-
-/* {b4e79625-aede-4eAA-9e11-D3eddcd4504c} */
-#define SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID \
- UUID_LE(0xb4e79625, 0xaede, 0x4eaa, \
- 0x9e, 0x11, 0xd3, 0xed, 0xdc, 0xd4, 0x50, 0x4c)
diff --git a/drivers/staging/unisys/include/diagchannel.h b/drivers/staging/unisys/include/diagchannel.h
deleted file mode 100644
index 6e813c77b97e..000000000000
--- a/drivers/staging/unisys/include/diagchannel.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-#ifndef _DIAG_CHANNEL_H_
-#define _DIAG_CHANNEL_H_
-
-/* Levels of severity for diagnostic events, in order from lowest severity to
- * highest (i.e. fatal errors are the most severe, and should always be logged,
- * but info events rarely need to be logged except during debugging). The
- * values DIAG_SEVERITY_ENUM_BEGIN and DIAG_SEVERITY_ENUM_END are not valid
- * severity values. They exist merely to dilineate the list, so that future
- * additions won't require changes to the driver (i.e. when checking for
- * out-of-range severities in SetSeverity). The values DIAG_SEVERITY_OVERRIDE
- * and DIAG_SEVERITY_SHUTOFF are not valid severity values for logging events
- * but they are valid for controlling the amount of event data. Changes made
- * to the enum, need to be reflected in s-Par.
- */
-enum diag_severity {
- DIAG_SEVERITY_VERBOSE = 0,
- DIAG_SEVERITY_INFO = 1,
- DIAG_SEVERITY_WARNING = 2,
- DIAG_SEVERITY_ERR = 3,
- DIAG_SEVERITY_PRINT = 4,
-};
-
-#endif
diff --git a/drivers/staging/unisys/include/guestlinuxdebug.h b/drivers/staging/unisys/include/guestlinuxdebug.h
deleted file mode 100644
index b81287f5e2c3..000000000000
--- a/drivers/staging/unisys/include/guestlinuxdebug.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-#ifndef __GUESTLINUXDEBUG_H__
-#define __GUESTLINUXDEBUG_H__
-
-/*
- * This file contains supporting interface for "vmcallinterface.h", particularly
- * regarding adding additional structure and functionality to linux
- * ISSUE_IO_VMCALL_POSTCODE_SEVERITY
- */
-
-/******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/
-enum driver_pc { /* POSTCODE driver identifier tuples */
- /* visorchipset driver files */
- VISOR_CHIPSET_PC = 0xA0,
- VISOR_CHIPSET_PC_controlvm_c = 0xA1,
- VISOR_CHIPSET_PC_controlvm_cm2 = 0xA2,
- VISOR_CHIPSET_PC_controlvm_direct_c = 0xA3,
- VISOR_CHIPSET_PC_file_c = 0xA4,
- VISOR_CHIPSET_PC_parser_c = 0xA5,
- VISOR_CHIPSET_PC_testing_c = 0xA6,
- VISOR_CHIPSET_PC_visorchipset_main_c = 0xA7,
- VISOR_CHIPSET_PC_visorswitchbus_c = 0xA8,
- /* visorbus driver files */
- VISOR_BUS_PC = 0xB0,
- VISOR_BUS_PC_businst_attr_c = 0xB1,
- VISOR_BUS_PC_channel_attr_c = 0xB2,
- VISOR_BUS_PC_devmajorminor_attr_c = 0xB3,
- VISOR_BUS_PC_visorbus_main_c = 0xB4,
- /* visorclientbus driver files */
- VISOR_CLIENT_BUS_PC = 0xC0,
- VISOR_CLIENT_BUS_PC_visorclientbus_main_c = 0xC1,
- /* virt hba driver files */
- VIRT_HBA_PC = 0xC2,
- VIRT_HBA_PC_virthba_c = 0xC3,
- /* virtpci driver files */
- VIRT_PCI_PC = 0xC4,
- VIRT_PCI_PC_virtpci_c = 0xC5,
- /* virtnic driver files */
- VIRT_NIC_PC = 0xC6,
- VIRT_NIC_P_virtnic_c = 0xC7,
- /* uislib driver files */
- UISLIB_PC = 0xD0,
- UISLIB_PC_uislib_c = 0xD1,
- UISLIB_PC_uisqueue_c = 0xD2,
- UISLIB_PC_uisthread_c = 0xD3,
- UISLIB_PC_uisutils_c = 0xD4,
-};
-
-enum event_pc { /* POSTCODE event identifier tuples */
- ATTACH_PORT_ENTRY_PC = 0x001,
- ATTACH_PORT_FAILURE_PC = 0x002,
- ATTACH_PORT_SUCCESS_PC = 0x003,
- BUS_FAILURE_PC = 0x004,
- BUS_CREATE_ENTRY_PC = 0x005,
- BUS_CREATE_FAILURE_PC = 0x006,
- BUS_CREATE_EXIT_PC = 0x007,
- BUS_CONFIGURE_ENTRY_PC = 0x008,
- BUS_CONFIGURE_FAILURE_PC = 0x009,
- BUS_CONFIGURE_EXIT_PC = 0x00A,
- CHIPSET_INIT_ENTRY_PC = 0x00B,
- CHIPSET_INIT_SUCCESS_PC = 0x00C,
- CHIPSET_INIT_FAILURE_PC = 0x00D,
- CHIPSET_INIT_EXIT_PC = 0x00E,
- CREATE_WORKQUEUE_PC = 0x00F,
- CREATE_WORKQUEUE_FAILED_PC = 0x0A0,
- CONTROLVM_INIT_FAILURE_PC = 0x0A1,
- DEVICE_CREATE_ENTRY_PC = 0x0A2,
- DEVICE_CREATE_FAILURE_PC = 0x0A3,
- DEVICE_CREATE_SUCCESS_PC = 0x0A4,
- DEVICE_CREATE_EXIT_PC = 0x0A5,
- DEVICE_ADD_PC = 0x0A6,
- DEVICE_REGISTER_FAILURE_PC = 0x0A7,
- DEVICE_CHANGESTATE_ENTRY_PC = 0x0A8,
- DEVICE_CHANGESTATE_FAILURE_PC = 0x0A9,
- DEVICE_CHANGESTATE_EXIT_PC = 0x0AA,
- DRIVER_ENTRY_PC = 0x0AB,
- DRIVER_EXIT_PC = 0x0AC,
- MALLOC_FAILURE_PC = 0x0AD,
- QUEUE_DELAYED_WORK_PC = 0x0AE,
- UISLIB_THREAD_FAILURE_PC = 0x0B7,
- VBUS_CHANNEL_ENTRY_PC = 0x0B8,
- VBUS_CHANNEL_FAILURE_PC = 0x0B9,
- VBUS_CHANNEL_EXIT_PC = 0x0BA,
- VHBA_CREATE_ENTRY_PC = 0x0BB,
- VHBA_CREATE_FAILURE_PC = 0x0BC,
- VHBA_CREATE_EXIT_PC = 0x0BD,
- VHBA_CREATE_SUCCESS_PC = 0x0BE,
- VHBA_COMMAND_HANDLER_PC = 0x0BF,
- VHBA_PROBE_ENTRY_PC = 0x0C0,
- VHBA_PROBE_FAILURE_PC = 0x0C1,
- VHBA_PROBE_EXIT_PC = 0x0C2,
- VNIC_CREATE_ENTRY_PC = 0x0C3,
- VNIC_CREATE_FAILURE_PC = 0x0C4,
- VNIC_CREATE_SUCCESS_PC = 0x0C5,
- VNIC_PROBE_ENTRY_PC = 0x0C6,
- VNIC_PROBE_FAILURE_PC = 0x0C7,
- VNIC_PROBE_EXIT_PC = 0x0C8,
- VPCI_CREATE_ENTRY_PC = 0x0C9,
- VPCI_CREATE_FAILURE_PC = 0x0CA,
- VPCI_CREATE_EXIT_PC = 0x0CB,
- VPCI_PROBE_ENTRY_PC = 0x0CC,
- VPCI_PROBE_FAILURE_PC = 0x0CD,
- VPCI_PROBE_EXIT_PC = 0x0CE,
- CRASH_DEV_ENTRY_PC = 0x0CF,
- CRASH_DEV_EXIT_PC = 0x0D0,
- CRASH_DEV_HADDR_NULL = 0x0D1,
- CRASH_DEV_CONTROLVM_NULL = 0x0D2,
- CRASH_DEV_RD_BUS_FAIULRE_PC = 0x0D3,
- CRASH_DEV_RD_DEV_FAIULRE_PC = 0x0D4,
- CRASH_DEV_BUS_NULL_FAILURE_PC = 0x0D5,
- CRASH_DEV_DEV_NULL_FAILURE_PC = 0x0D6,
- CRASH_DEV_CTRL_RD_FAILURE_PC = 0x0D7,
- CRASH_DEV_COUNT_FAILURE_PC = 0x0D8,
- SAVE_MSG_BUS_FAILURE_PC = 0x0D9,
- SAVE_MSG_DEV_FAILURE_PC = 0x0DA,
- CALLHOME_INIT_FAILURE_PC = 0x0DB
-};
-
-#ifdef __GNUC__
-
-#define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR
-#define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING
-/* TODO-> Info currently doesn't show, so we set info=warning */
-#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT
-
-/* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR);
- * Please also note that the resulting postcode is in hex, so if you are
- * searching for the __LINE__ number, convert it first to decimal. The line
- * number combined with driver and type of call, will allow you to track down
- * exactly what line an error occurred on, or where the last driver
- * entered/exited from.
- */
-
-/* BASE FUNCTIONS */
-#define POSTCODE_LINUX_A(DRIVER_PC, EVENT_PC, pc32bit, severity) \
-do { \
- unsigned long long post_code_temp; \
- post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \
- ((((u64)__LINE__) & 0xFFF) << 32) | \
- (((u64)pc32bit) & 0xFFFFFFFF); \
- ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
-} while (0)
-
-#define POSTCODE_LINUX_B(DRIVER_PC, EVENT_PC, pc16bit1, pc16bit2, severity) \
-do { \
- unsigned long long post_code_temp; \
- post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \
- ((((u64)__LINE__) & 0xFFF) << 32) | \
- ((((u64)pc16bit1) & 0xFFFF) << 16) | \
- (((u64)pc16bit2) & 0xFFFF); \
- ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
-} while (0)
-
-/* MOST COMMON */
-#define POSTCODE_LINUX_2(EVENT_PC, severity) \
- POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, 0x0000, severity)
-
-#define POSTCODE_LINUX_3(EVENT_PC, pc32bit, severity) \
- POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, pc32bit, severity)
-
-#define POSTCODE_LINUX_4(EVENT_PC, pc16bit1, pc16bit2, severity) \
- POSTCODE_LINUX_B(CURRENT_FILE_PC, EVENT_PC, pc16bit1, \
- pc16bit2, severity)
-
-#endif
-#endif
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 5ccf81485d72..cba4433bcd51 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -33,7 +33,6 @@
#include <linux/dma-direction.h>
#include "channel.h"
-#include "channel_guid.h"
#define ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
#define ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
diff --git a/drivers/staging/unisys/include/periodic_work.h b/drivers/staging/unisys/include/periodic_work.h
deleted file mode 100644
index 0b3335a4b206..000000000000
--- a/drivers/staging/unisys/include/periodic_work.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* periodic_work.h
- *
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-#ifndef __PERIODIC_WORK_H__
-#define __PERIODIC_WORK_H__
-
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-
-/* PERIODIC_WORK an opaque structure to users.
- * Fields are declared only in the implementation .c files.
- */
-struct periodic_work;
-
-struct periodic_work *
-visor_periodic_work_create(ulong jiffy_interval,
- struct workqueue_struct *workqueue,
- void (*workfunc)(void *),
- void *workfuncarg,
- const char *devnam);
-void visor_periodic_work_destroy(struct periodic_work *pw);
-bool visor_periodic_work_nextperiod(struct periodic_work *pw);
-bool visor_periodic_work_start(struct periodic_work *pw);
-bool visor_periodic_work_stop(struct periodic_work *pw);
-
-#endif
diff --git a/drivers/staging/unisys/include/vbushelper.h b/drivers/staging/unisys/include/vbushelper.h
deleted file mode 100644
index f1b6aacb79d7..000000000000
--- a/drivers/staging/unisys/include/vbushelper.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* vbushelper.h
- *
- * Copyright (C) 2011 - 2013 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-#ifndef __VBUSHELPER_H__
-#define __VBUSHELPER_H__
-
-/* TARGET_HOSTNAME specified as -DTARGET_HOSTNAME=\"thename\" on the
- * command line
- */
-
-#define TARGET_HOSTNAME "linuxguest"
-
-static inline void bus_device_info_init(
- struct ultra_vbus_deviceinfo *bus_device_info_ptr,
- const char *dev_type, const char *drv_name,
- const char *ver, const char *ver_tag)
-{
- memset(bus_device_info_ptr, 0, sizeof(struct ultra_vbus_deviceinfo));
- snprintf(bus_device_info_ptr->devtype,
- sizeof(bus_device_info_ptr->devtype),
- "%s", (dev_type) ? dev_type : "unknownType");
- snprintf(bus_device_info_ptr->drvname,
- sizeof(bus_device_info_ptr->drvname),
- "%s", (drv_name) ? drv_name : "unknownDriver");
- snprintf(bus_device_info_ptr->infostrs,
- sizeof(bus_device_info_ptr->infostrs), "%s\t%s\t%s",
- (ver) ? ver : "unknownVer",
- (ver_tag) ? ver_tag : "unknownVerTag",
- TARGET_HOSTNAME);
-}
-
-#endif
diff --git a/drivers/staging/unisys/include/version.h b/drivers/staging/unisys/include/version.h
deleted file mode 100644
index 83d1da7a2f81..000000000000
--- a/drivers/staging/unisys/include/version.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-/* version.h */
-
-/* Common version/release info needed by all components goes here.
- * (This file must compile cleanly in all environments.)
- * Ultimately, this will be combined with defines generated dynamically as
- * part of the sysgen, and some of the defines below may in fact end up
- * being replaced with dynamically generated ones.
- */
-#ifndef __VERSION_H__
-#define __VERSION_H__
-
-#define SPARVER1 "1"
-#define SPARVER2 "0"
-#define SPARVER3 "0"
-#define SPARVER4 "0"
-
-#define VERSION SPARVER1 "." SPARVER2 "." SPARVER3 "." SPARVER4
-
-/* Here are various version forms needed in Windows environments.
- */
-#define VISOR_PRODUCTVERSION SPARVERCOMMA
-#define VISOR_PRODUCTVERSION_STR SPARVER1 "." SPARVER2 "." SPARVER3 "." \
- SPARVER4
-#define VISOR_OBJECTVERSION_STR SPARVER1 "," SPARVER2 "," SPARVER3 "," \
- SPARVER4
-
-#define COPYRIGHT "Unisys Corporation"
-#define COPYRIGHTDATE "2010 - 2013"
-
-#endif
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index 9baf1ec70d01..677627c72c4c 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -34,8 +34,9 @@
#include <linux/poll.h>
#include <linux/kernel.h>
#include <linux/uuid.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
-#include "periodic_work.h"
#include "channel.h"
struct visor_driver;
@@ -65,8 +66,6 @@ struct visor_channeltype_descriptor {
* struct visor_driver - Information provided by each visor driver when it
* registers with the visorbus driver.
* @name: Name of the visor driver.
- * @version: The numbered version of the driver (x.x.xxx).
- * @vertag: A human readable version string.
* @owner: The module owner.
* @channel_types: Types of channels handled by this driver, ending with
* a zero GUID. Our specialized BUS.match() method knows
@@ -93,12 +92,9 @@ struct visor_channeltype_descriptor {
* @resume: Behaves similar to pause.
* @driver: Private reference to the device driver. For use by bus
* driver only.
- * @version_attr: Private version field. For use by bus driver only.
*/
struct visor_driver {
const char *name;
- const char *version;
- const char *vertag;
struct module *owner;
struct visor_channeltype_descriptor *channel_types;
int (*probe)(struct visor_device *dev);
@@ -111,7 +107,6 @@ struct visor_driver {
/* These fields are for private use by the bus driver only. */
struct device_driver driver;
- struct driver_attribute version_attr;
};
#define to_visor_driver(x) ((x) ? \
@@ -120,33 +115,33 @@ struct visor_driver {
/**
* struct visor_device - A device type for things "plugged" into the visorbus
* bus
- * visorchannel: Points to the channel that the device is
+ * @visorchannel: Points to the channel that the device is
* associated with.
- * channel_type_guid: Identifies the channel type to the bus driver.
- * device: Device struct meant for use by the bus driver
+ * @channel_type_guid: Identifies the channel type to the bus driver.
+ * @device: Device struct meant for use by the bus driver
* only.
- * list_all: Used by the bus driver to enumerate devices.
- * periodic_work: Device work queue. Private use by bus driver
- * only.
- * being_removed: Indicates that the device is being removed from
+ * @list_all: Used by the bus driver to enumerate devices.
+ * @timer: Timer fired periodically to do interrupt-type
+ * activity.
+ * @being_removed: Indicates that the device is being removed from
* the bus. Private bus driver use only.
- * visordriver_callback_lock: Used by the bus driver to lock when handling
+ * @visordriver_callback_lock: Used by the bus driver to lock when handling
* channel events.
- * pausing: Indicates that a change towards a paused state.
+ * @pausing: Indicates that a change towards a paused state.
* is in progress. Only modified by the bus driver.
- * resuming: Indicates that a change towards a running state
+ * @resuming: Indicates that a change towards a running state
* is in progress. Only modified by the bus driver.
- * chipset_bus_no: Private field used by the bus driver.
- * chipset_dev_no: Private field used the bus driver.
- * state: Used to indicate the current state of the
+ * @chipset_bus_no: Private field used by the bus driver.
+ * @chipset_dev_no: Private field used the bus driver.
+ * @state: Used to indicate the current state of the
* device.
- * inst: Unique GUID for this instance of the device.
- * name: Name of the device.
- * pending_msg_hdr: For private use by bus driver to respond to
+ * @inst: Unique GUID for this instance of the device.
+ * @name: Name of the device.
+ * @pending_msg_hdr: For private use by bus driver to respond to
* hypervisor requests.
- * vbus_hdr_info: A pointer to header info. Private use by bus
+ * @vbus_hdr_info: A pointer to header info. Private use by bus
* driver.
- * partition_uuid: Indicates client partion id. This should be the
+ * @partition_uuid: Indicates client partion id. This should be the
* same across all visor_devices in the current
* guest. Private use by bus driver only.
*/
@@ -157,9 +152,10 @@ struct visor_device {
/* These fields are for private use by the bus driver only. */
struct device device;
struct list_head list_all;
- struct periodic_work *periodic_work;
+ struct timer_list timer;
+ bool timer_active;
bool being_removed;
- struct semaphore visordriver_callback_lock;
+ struct mutex visordriver_callback_lock;
bool pausing;
bool resuming;
u32 chipset_bus_no;
@@ -174,7 +170,6 @@ struct visor_device {
#define to_visor_device(x) container_of(x, struct visor_device, device)
-#ifndef STANDALONE_CLIENT
int visorbus_register_visor_driver(struct visor_driver *);
void visorbus_unregister_visor_driver(struct visor_driver *);
int visorbus_read_channel(struct visor_device *dev,
@@ -183,50 +178,34 @@ int visorbus_read_channel(struct visor_device *dev,
int visorbus_write_channel(struct visor_device *dev,
unsigned long offset, void *src,
unsigned long nbytes);
-int visorbus_clear_channel(struct visor_device *dev,
- unsigned long offset, u8 ch, unsigned long nbytes);
void visorbus_enable_channel_interrupts(struct visor_device *dev);
void visorbus_disable_channel_interrupts(struct visor_device *dev);
-#endif
-/* Note that for visorchannel_create()
- * <channel_bytes> and <guid> arguments may be 0 if we are a channel CLIENT.
- * In this case, the values can simply be read from the channel header.
+/* Levels of severity for diagnostic events, in order from lowest severity to
+ * highest (i.e. fatal errors are the most severe, and should always be logged,
+ * but info events rarely need to be logged except during debugging). The
+ * values DIAG_SEVERITY_ENUM_BEGIN and DIAG_SEVERITY_ENUM_END are not valid
+ * severity values. They exist merely to dilineate the list, so that future
+ * additions won't require changes to the driver (i.e. when checking for
+ * out-of-range severities in SetSeverity). The values DIAG_SEVERITY_OVERRIDE
+ * and DIAG_SEVERITY_SHUTOFF are not valid severity values for logging events
+ * but they are valid for controlling the amount of event data. Changes made
+ * to the enum, need to be reflected in s-Par.
*/
-struct visorchannel *visorchannel_create(u64 physaddr,
- unsigned long channel_bytes,
- gfp_t gfp, uuid_le guid);
-struct visorchannel *visorchannel_create_with_lock(u64 physaddr,
- unsigned long channel_bytes,
- gfp_t gfp, uuid_le guid);
-void visorchannel_destroy(struct visorchannel *channel);
-int visorchannel_read(struct visorchannel *channel, ulong offset,
- void *local, ulong nbytes);
-int visorchannel_write(struct visorchannel *channel, ulong offset,
- void *local, ulong nbytes);
-int visorchannel_clear(struct visorchannel *channel, ulong offset,
- u8 ch, ulong nbytes);
-bool visorchannel_signalremove(struct visorchannel *channel, u32 queue,
- void *msg);
-bool visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
- void *msg);
-bool visorchannel_signalempty(struct visorchannel *channel, u32 queue);
+enum diag_severity {
+ DIAG_SEVERITY_VERBOSE = 0,
+ DIAG_SEVERITY_INFO = 1,
+ DIAG_SEVERITY_WARNING = 2,
+ DIAG_SEVERITY_ERR = 3,
+ DIAG_SEVERITY_PRINT = 4,
+};
-int visorchannel_signalqueue_slots_avail(struct visorchannel *channel,
- u32 queue);
-int visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue);
-u64 visorchannel_get_physaddr(struct visorchannel *channel);
-ulong visorchannel_get_nbytes(struct visorchannel *channel);
-char *visorchannel_id(struct visorchannel *channel, char *s);
-char *visorchannel_zoneid(struct visorchannel *channel, char *s);
-u64 visorchannel_get_clientpartition(struct visorchannel *channel);
-int visorchannel_set_clientpartition(struct visorchannel *channel,
- u64 partition_handle);
+int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
+ void *msg);
+int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
+ void *msg);
+bool visorchannel_signalempty(struct visorchannel *channel, u32 queue);
uuid_le visorchannel_get_uuid(struct visorchannel *channel);
-char *visorchannel_uuid_id(uuid_le *guid, char *s);
-void visorchannel_debug(struct visorchannel *channel, int num_queues,
- struct seq_file *seq, u32 off);
-void __iomem *visorchannel_get_header(struct visorchannel *channel);
#define BUS_ROOT_DEVICE UINT_MAX
struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
diff --git a/drivers/staging/unisys/visorbus/Makefile b/drivers/staging/unisys/visorbus/Makefile
index fc790e7592fc..f3730d8c953e 100644
--- a/drivers/staging/unisys/visorbus/Makefile
+++ b/drivers/staging/unisys/visorbus/Makefile
@@ -7,6 +7,5 @@ obj-$(CONFIG_UNISYS_VISORBUS) += visorbus.o
visorbus-y := visorbus_main.o
visorbus-y += visorchannel.o
visorbus-y += visorchipset.o
-visorbus-y += periodic_work.o
ccflags-y += -Idrivers/staging/unisys/include
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h
index 03e36fb6a5a0..f0bfc4ded892 100644
--- a/drivers/staging/unisys/visorbus/controlvmchannel.h
+++ b/drivers/staging/unisys/visorbus/controlvmchannel.h
@@ -482,4 +482,80 @@ struct spar_controlvm_parameters_header {
u32 reserved; /* Natural alignment */
};
+/* General Errors------------------------------------------------------[0-99] */
+#define CONTROLVM_RESP_SUCCESS 0
+#define CONTROLVM_RESP_ERROR_ALREADY_DONE 1
+#define CONTROLVM_RESP_ERROR_IOREMAP_FAILED 2
+#define CONTROLVM_RESP_ERROR_KMALLOC_FAILED 3
+#define CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN 4
+#define CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT 5
+
+/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */
+#define CONTROLVM_RESP_ERROR_CLIENT_SWITCHCOUNT_NONZERO 100
+#define CONTROLVM_RESP_ERROR_EXPECTED_CHIPSET_INIT 101
+
+/* Maximum Limit----------------------------------------------------[200-299] */
+#define CONTROLVM_RESP_ERROR_MAX_BUSES 201 /* BUS_CREATE */
+#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202 /* DEVICE_CREATE */
+/* Payload and Parameter Related------------------------------------[400-499] */
+#define CONTROLVM_RESP_ERROR_PAYLOAD_INVALID 400 /* SWITCH_ATTACHEXTPORT,
+ * DEVICE_CONFIGURE
+ */
+#define CONTROLVM_RESP_ERROR_INITIATOR_PARAMETER_INVALID 401 /* Multiple */
+#define CONTROLVM_RESP_ERROR_TARGET_PARAMETER_INVALID 402 /* DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_ERROR_CLIENT_PARAMETER_INVALID 403 /* DEVICE_CONFIGURE */
+/* Specified[Packet Structure] Value-------------------------------[500-599] */
+#define CONTROLVM_RESP_ERROR_BUS_INVALID 500 /* SWITCH_ATTACHINTPORT,
+ * BUS_CONFIGURE,
+ * DEVICE_CREATE,
+ * DEVICE_CONFIG
+ * DEVICE_DESTROY
+ */
+#define CONTROLVM_RESP_ERROR_DEVICE_INVALID 501 /* SWITCH_ATTACHINTPORT */
+ /* DEVICE_CREATE,
+ * DEVICE_CONFIGURE,
+ * DEVICE_DESTROY
+ */
+#define CONTROLVM_RESP_ERROR_CHANNEL_INVALID 502 /* DEVICE_CREATE,
+ * DEVICE_CONFIGURE
+ */
+/* Partition Driver Callback Interface----------------------[600-699] */
+#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE 604 /* BUS_CREATE,
+ * BUS_DESTROY,
+ * DEVICE_CREATE,
+ * DEVICE_DESTROY
+ */
+/* Unable to invoke VIRTPCI callback */
+#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR 605
+ /* BUS_CREATE,
+ * BUS_DESTROY,
+ * DEVICE_CREATE,
+ * DEVICE_DESTROY
+ */
+/* VIRTPCI Callback returned error */
+#define CONTROLVM_RESP_ERROR_GENERIC_DRIVER_CALLBACK_ERROR 606
+ /* SWITCH_ATTACHEXTPORT,
+ * SWITCH_DETACHEXTPORT
+ * DEVICE_CONFIGURE
+ */
+
+/* generic device callback returned error */
+/* Bus Related------------------------------------------------------[700-799] */
+#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700 /* BUS_DESTROY */
+/* Channel Related--------------------------------------------------[800-899] */
+#define CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN 800 /* GET_CHANNELINFO,
+ * DEVICE_DESTROY
+ */
+#define CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL 801 /* DEVICE_CREATE */
+/* Chipset Shutdown Related---------------------------------------[1000-1099] */
+#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_FAILED 1000
+#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
+
+/* Chipset Stop Related-------------------------------------------[1100-1199] */
+#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_BUS 1100
+#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_SWITCH 1101
+
+/* Device Related-------------------------------------------------[1400-1499] */
+#define CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT 1400
+
#endif /* __CONTROLVMCHANNEL_H__ */
diff --git a/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h b/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h
deleted file mode 100644
index 23ad0ea6c9fc..000000000000
--- a/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* controlvmcompletionstatus.c
- *
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-/* Defines for all valid values returned in the response message header
- * completionStatus field. See controlvmchannel.h for description of
- * the header: _CONTROLVM_MESSAGE_HEADER.
- */
-
-#ifndef __CONTROLVMCOMPLETIONSTATUS_H__
-#define __CONTROLVMCOMPLETIONSTATUS_H__
-
-/* General Errors------------------------------------------------------[0-99] */
-#define CONTROLVM_RESP_SUCCESS 0
-#define CONTROLVM_RESP_ERROR_ALREADY_DONE 1
-#define CONTROLVM_RESP_ERROR_IOREMAP_FAILED 2
-#define CONTROLVM_RESP_ERROR_KMALLOC_FAILED 3
-#define CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN 4
-#define CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT 5
-
-/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */
-#define CONTROLVM_RESP_ERROR_CLIENT_SWITCHCOUNT_NONZERO 100
-#define CONTROLVM_RESP_ERROR_EXPECTED_CHIPSET_INIT 101
-
-/* Maximum Limit----------------------------------------------------[200-299] */
-#define CONTROLVM_RESP_ERROR_MAX_BUSES 201 /* BUS_CREATE */
-#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202 /* DEVICE_CREATE */
-/* Payload and Parameter Related------------------------------------[400-499] */
-#define CONTROLVM_RESP_ERROR_PAYLOAD_INVALID 400 /* SWITCH_ATTACHEXTPORT,
- * DEVICE_CONFIGURE
- */
-#define CONTROLVM_RESP_ERROR_INITIATOR_PARAMETER_INVALID 401 /* Multiple */
-#define CONTROLVM_RESP_ERROR_TARGET_PARAMETER_INVALID 402 /* DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_ERROR_CLIENT_PARAMETER_INVALID 403 /* DEVICE_CONFIGURE */
-/* Specified[Packet Structure] Value-------------------------------[500-599] */
-#define CONTROLVM_RESP_ERROR_BUS_INVALID 500 /* SWITCH_ATTACHINTPORT,
- * BUS_CONFIGURE,
- * DEVICE_CREATE,
- * DEVICE_CONFIG
- * DEVICE_DESTROY
- */
-#define CONTROLVM_RESP_ERROR_DEVICE_INVALID 501 /* SWITCH_ATTACHINTPORT */
- /* DEVICE_CREATE,
- * DEVICE_CONFIGURE,
- * DEVICE_DESTROY
- */
-#define CONTROLVM_RESP_ERROR_CHANNEL_INVALID 502 /* DEVICE_CREATE,
- * DEVICE_CONFIGURE
- */
-/* Partition Driver Callback Interface----------------------[600-699] */
-#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE 604 /* BUS_CREATE,
- * BUS_DESTROY,
- * DEVICE_CREATE,
- * DEVICE_DESTROY
- */
-/* Unable to invoke VIRTPCI callback */
-#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR 605
- /* BUS_CREATE,
- * BUS_DESTROY,
- * DEVICE_CREATE,
- * DEVICE_DESTROY
- */
-/* VIRTPCI Callback returned error */
-#define CONTROLVM_RESP_ERROR_GENERIC_DRIVER_CALLBACK_ERROR 606
- /* SWITCH_ATTACHEXTPORT,
- * SWITCH_DETACHEXTPORT
- * DEVICE_CONFIGURE
- */
-
-/* generic device callback returned error */
-/* Bus Related------------------------------------------------------[700-799] */
-#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700 /* BUS_DESTROY */
-/* Channel Related--------------------------------------------------[800-899] */
-#define CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN 800 /* GET_CHANNELINFO,
- * DEVICE_DESTROY
- */
-#define CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL 801 /* DEVICE_CREATE */
-/* Chipset Shutdown Related---------------------------------------[1000-1099] */
-#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_FAILED 1000
-#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
-
-/* Chipset Stop Related-------------------------------------------[1100-1199] */
-#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_BUS 1100
-#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_SWITCH 1101
-
-/* Device Related-------------------------------------------------[1400-1499] */
-#define CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT 1400
-
-#endif /* __CONTROLVMCOMPLETIONSTATUS_H__ not defined */
diff --git a/drivers/staging/unisys/visorbus/iovmcall_gnuc.h b/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
deleted file mode 100644
index 98ea7f381a3c..000000000000
--- a/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-/* Linux GCC Version (32-bit and 64-bit) */
-static inline unsigned long
-__unisys_vmcall_gnuc(unsigned long tuple, unsigned long reg_ebx,
- unsigned long reg_ecx)
-{
- unsigned long result = 0;
- unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
-
- cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
- if (!(cpuid_ecx & 0x80000000))
- return -EPERM;
-
- __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
- "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
- return result;
-}
-
-static inline unsigned long
-__unisys_extended_vmcall_gnuc(unsigned long long tuple,
- unsigned long long reg_ebx,
- unsigned long long reg_ecx,
- unsigned long long reg_edx)
-{
- unsigned long result = 0;
- unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
-
- cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
- if (!(cpuid_ecx & 0x80000000))
- return -EPERM;
-
- __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
- "a"(tuple), "b"(reg_ebx), "c"(reg_ecx), "d"(reg_edx));
- return result;
-}
diff --git a/drivers/staging/unisys/visorbus/periodic_work.c b/drivers/staging/unisys/visorbus/periodic_work.c
deleted file mode 100644
index 00b152764f84..000000000000
--- a/drivers/staging/unisys/visorbus/periodic_work.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/* periodic_work.c
- *
- * Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-/*
- * Helper functions to schedule periodic work in Linux kernel mode.
- */
-#include <linux/sched.h>
-
-#include "periodic_work.h"
-
-#define MYDRVNAME "periodic_work"
-
-struct periodic_work {
- rwlock_t lock;
- struct delayed_work work;
- void (*workfunc)(void *);
- void *workfuncarg;
- bool is_scheduled;
- bool want_to_stop;
- ulong jiffy_interval;
- struct workqueue_struct *workqueue;
- const char *devnam;
-};
-
-static void periodic_work_func(struct work_struct *work)
-{
- struct periodic_work *pw;
-
- pw = container_of(work, struct periodic_work, work.work);
- (*pw->workfunc)(pw->workfuncarg);
-}
-
-struct periodic_work
-*visor_periodic_work_create(ulong jiffy_interval,
- struct workqueue_struct *workqueue,
- void (*workfunc)(void *),
- void *workfuncarg,
- const char *devnam)
-{
- struct periodic_work *pw;
-
- pw = kzalloc(sizeof(*pw), GFP_KERNEL | __GFP_NORETRY);
- if (!pw)
- return NULL;
-
- rwlock_init(&pw->lock);
- pw->jiffy_interval = jiffy_interval;
- pw->workqueue = workqueue;
- pw->workfunc = workfunc;
- pw->workfuncarg = workfuncarg;
- pw->devnam = devnam;
- return pw;
-}
-EXPORT_SYMBOL_GPL(visor_periodic_work_create);
-
-void visor_periodic_work_destroy(struct periodic_work *pw)
-{
- kfree(pw);
-}
-EXPORT_SYMBOL_GPL(visor_periodic_work_destroy);
-
-/** Call this from your periodic work worker function to schedule the next
- * call.
- * If this function returns false, there was a failure and the
- * periodic work is no longer scheduled
- */
-bool visor_periodic_work_nextperiod(struct periodic_work *pw)
-{
- bool rc = false;
-
- write_lock(&pw->lock);
- if (pw->want_to_stop) {
- pw->is_scheduled = false;
- pw->want_to_stop = false;
- rc = true; /* yes, true; see visor_periodic_work_stop() */
- goto unlock;
- } else if (!queue_delayed_work(pw->workqueue, &pw->work,
- pw->jiffy_interval)) {
- pw->is_scheduled = false;
- rc = false;
- goto unlock;
- }
- rc = true;
-unlock:
- write_unlock(&pw->lock);
- return rc;
-}
-EXPORT_SYMBOL_GPL(visor_periodic_work_nextperiod);
-
-/** This function returns true iff new periodic work was actually started.
- * If this function returns false, then no work was started
- * (either because it was already started, or because of a failure).
- */
-bool visor_periodic_work_start(struct periodic_work *pw)
-{
- bool rc = false;
-
- write_lock(&pw->lock);
- if (pw->is_scheduled) {
- rc = false;
- goto unlock;
- }
- if (pw->want_to_stop) {
- rc = false;
- goto unlock;
- }
- INIT_DELAYED_WORK(&pw->work, &periodic_work_func);
- if (!queue_delayed_work(pw->workqueue, &pw->work,
- pw->jiffy_interval)) {
- rc = false;
- goto unlock;
- }
- pw->is_scheduled = true;
- rc = true;
-unlock:
- write_unlock(&pw->lock);
- return rc;
-}
-EXPORT_SYMBOL_GPL(visor_periodic_work_start);
-
-/** This function returns true iff your call actually stopped the periodic
- * work.
- *
- * -- PAY ATTENTION... this is important --
- *
- * NO NO #1
- *
- * Do NOT call this function from some function that is running on the
- * same workqueue as the work you are trying to stop might be running
- * on! If you violate this rule, visor_periodic_work_stop() MIGHT work,
- * but it also MIGHT get hung up in an infinite loop saying
- * "waiting for delayed work...". This will happen if the delayed work
- * you are trying to cancel has been put in the workqueue list, but can't
- * run yet because we are running that same workqueue thread right now.
- *
- * Bottom line: If you need to call visor_periodic_work_stop() from a
- * workitem, be sure the workitem is on a DIFFERENT workqueue than the
- * workitem that you are trying to cancel.
- *
- * If I could figure out some way to check for this "no no" condition in
- * the code, I would. It would have saved me the trouble of writing this
- * long comment. And also, don't think this is some "theoretical" race
- * condition. It is REAL, as I have spent the day chasing it.
- *
- * NO NO #2
- *
- * Take close note of the locks that you own when you call this function.
- * You must NOT own any locks that are needed by the periodic work
- * function that is currently installed. If you DO, a deadlock may result,
- * because stopping the periodic work often involves waiting for the last
- * iteration of the periodic work function to complete. Again, if you hit
- * this deadlock, you will get hung up in an infinite loop saying
- * "waiting for delayed work...".
- */
-bool visor_periodic_work_stop(struct periodic_work *pw)
-{
- bool stopped_something = false;
-
- write_lock(&pw->lock);
- stopped_something = pw->is_scheduled && (!pw->want_to_stop);
- while (pw->is_scheduled) {
- pw->want_to_stop = true;
- if (cancel_delayed_work(&pw->work)) {
- /* We get here if the delayed work was pending as
- * delayed work, but was NOT run.
- */
- WARN_ON(!pw->is_scheduled);
- pw->is_scheduled = false;
- } else {
- /* If we get here, either the delayed work:
- * - was run, OR,
- * - is running RIGHT NOW on another processor, OR,
- * - wasn't even scheduled (there is a miniscule
- * timing window where this could be the case)
- * flush_workqueue() would make sure it is finished
- * executing, but that still isn't very useful, which
- * explains the loop...
- */
- }
- if (pw->is_scheduled) {
- write_unlock(&pw->lock);
- schedule_timeout_interruptible(msecs_to_jiffies(10));
- write_lock(&pw->lock);
- } else {
- pw->want_to_stop = false;
- }
- }
- write_unlock(&pw->lock);
- return stopped_something;
-}
-EXPORT_SYMBOL_GPL(visor_periodic_work_stop);
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index 90fa12e62f26..e97917522f6a 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -23,7 +23,6 @@
* the client devices and client drivers for the server end to see.
*/
#include <linux/uuid.h>
-#include "vbusdeviceinfo.h"
#include "channel.h"
/* {193b331b-c58f-11da-95a9-00e08161165f} */
@@ -58,6 +57,216 @@ static const uuid_le spar_vbus_channel_protocol_uuid =
actual_bytes))
#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
+
+/*
+ * An array of this struct is present in the channel area for each vbus.
+ * (See vbuschannel.h.)
+ * It is filled in by the client side to provide info about the device
+ * and driver from the client's perspective.
+ */
+struct ultra_vbus_deviceinfo {
+ u8 devtype[16]; /* short string identifying the device type */
+ u8 drvname[16]; /* driver .sys file name */
+ u8 infostrs[96]; /* kernel version */
+ u8 reserved[128]; /* pad size to 256 bytes */
+};
+
+/**
+ * vbuschannel_sanitize_buffer() - remove non-printable chars from buffer
+ * @p: destination buffer where chars are written to
+ * @remain: number of bytes that can be written starting at #p
+ * @src: pointer to source buffer
+ * @srcmax: number of valid characters at #src
+ *
+ * Reads chars from the buffer at @src for @srcmax bytes, and writes to
+ * the buffer at @p, which is @remain bytes long, ensuring never to
+ * overflow the buffer at @p, using the following rules:
+ * - printable characters are simply copied from the buffer at @src to the
+ * buffer at @p
+ * - intervening streaks of non-printable characters in the buffer at @src
+ * are replaced with a single space in the buffer at @p
+ * Note that we pay no attention to '\0'-termination.
+ *
+ * Pass @p == NULL and @remain == 0 for this special behavior -- In this
+ * case, we simply return the number of bytes that WOULD HAVE been written
+ * to a buffer at @p, had it been infinitely big.
+ *
+ * Return: the number of bytes written to @p (or WOULD HAVE been written to
+ * @p, as described in the previous paragraph)
+ */
+static inline int
+vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
+{
+ int chars = 0;
+ int nonprintable_streak = 0;
+
+ while (srcmax > 0) {
+ if ((*src >= ' ') && (*src < 0x7f)) {
+ if (nonprintable_streak) {
+ if (remain > 0) {
+ *p = ' ';
+ p++;
+ remain--;
+ chars++;
+ } else if (!p) {
+ chars++;
+ }
+ nonprintable_streak = 0;
+ }
+ if (remain > 0) {
+ *p = *src;
+ p++;
+ remain--;
+ chars++;
+ } else if (!p) {
+ chars++;
+ }
+ } else {
+ nonprintable_streak = 1;
+ }
+ src++;
+ srcmax--;
+ }
+ return chars;
+}
+
+#define VBUSCHANNEL_ADDACHAR(ch, p, remain, chars) \
+ do { \
+ if (remain <= 0) \
+ break; \
+ *p = ch; \
+ p++; chars++; remain--; \
+ } while (0)
+
+/**
+ * vbuschannel_itoa() - convert non-negative int to string
+ * @p: destination string
+ * @remain: max number of bytes that can be written to @p
+ * @num: input int to convert
+ *
+ * Converts the non-negative value at @num to an ascii decimal string
+ * at @p, writing at most @remain bytes. Note there is NO '\0' termination
+ * written to @p.
+ *
+ * Return: number of bytes written to @p
+ *
+ */
+static inline int
+vbuschannel_itoa(char *p, int remain, int num)
+{
+ int digits = 0;
+ char s[32];
+ int i;
+
+ if (num == 0) {
+ /* '0' is a special case */
+ if (remain <= 0)
+ return 0;
+ *p = '0';
+ return 1;
+ }
+ /* form a backwards decimal ascii string in <s> */
+ while (num > 0) {
+ if (digits >= (int)sizeof(s))
+ return 0;
+ s[digits++] = (num % 10) + '0';
+ num = num / 10;
+ }
+ if (remain < digits) {
+ /* not enough room left at <p> to hold number, so fill with
+ * '?'
+ */
+ for (i = 0; i < remain; i++, p++)
+ *p = '?';
+ return remain;
+ }
+ /* plug in the decimal ascii string representing the number, by */
+ /* reversing the string we just built in <s> */
+ i = digits;
+ while (i > 0) {
+ i--;
+ *p = s[i];
+ p++;
+ }
+ return digits;
+}
+
+/**
+ * vbuschannel_devinfo_to_string() - format a struct ultra_vbus_deviceinfo
+ * to a printable string
+ * @devinfo: the struct ultra_vbus_deviceinfo to format
+ * @p: destination string area
+ * @remain: size of destination string area in bytes
+ * @devix: the device index to be included in the output data, or -1 if no
+ * device index is to be included
+ *
+ * Reads @devInfo, and converts its contents to a printable string at @p,
+ * writing at most @remain bytes. Note there is NO '\0' termination
+ * written to @p.
+ *
+ * Return: number of bytes written to @p
+ */
+static inline int
+vbuschannel_devinfo_to_string(struct ultra_vbus_deviceinfo *devinfo,
+ char *p, int remain, int devix)
+{
+ char *psrc;
+ int nsrc, x, i, pad;
+ int chars = 0;
+
+ psrc = &devinfo->devtype[0];
+ nsrc = sizeof(devinfo->devtype);
+ if (vbuschannel_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0)
+ return 0;
+
+ /* emit device index */
+ if (devix >= 0) {
+ VBUSCHANNEL_ADDACHAR('[', p, remain, chars);
+ x = vbuschannel_itoa(p, remain, devix);
+ p += x;
+ remain -= x;
+ chars += x;
+ VBUSCHANNEL_ADDACHAR(']', p, remain, chars);
+ } else {
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ }
+
+ /* emit device type */
+ x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ pad = 15 - x; /* pad device type to be exactly 15 chars */
+ for (i = 0; i < pad; i++)
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+
+ /* emit driver name */
+ psrc = &devinfo->drvname[0];
+ nsrc = sizeof(devinfo->drvname);
+ x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ pad = 15 - x; /* pad driver name to be exactly 15 chars */
+ for (i = 0; i < pad; i++)
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+
+ /* emit strings */
+ psrc = &devinfo->infostrs[0];
+ nsrc = sizeof(devinfo->infostrs);
+ x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ VBUSCHANNEL_ADDACHAR('\n', p, remain, chars);
+
+ return chars;
+}
+
struct spar_vbus_headerinfo {
u32 struct_bytes; /* size of this struct in bytes */
u32 device_info_struct_bytes; /* sizeof(ULTRA_VBUS_DEVICEINFO) */
diff --git a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
deleted file mode 100644
index abdab4ad0b36..000000000000
--- a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- */
-
-#ifndef __VBUSDEVICEINFO_H__
-#define __VBUSDEVICEINFO_H__
-
-#include <linux/types.h>
-
-#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
-
-/* An array of this struct is present in the channel area for each vbus.
- * (See vbuschannel.h.)
- * It is filled in by the client side to provide info about the device
- * and driver from the client's perspective.
- */
-struct ultra_vbus_deviceinfo {
- u8 devtype[16]; /* short string identifying the device type */
- u8 drvname[16]; /* driver .sys file name */
- u8 infostrs[96]; /* sequence of tab-delimited id strings: */
- /* <DRIVER_REV> <DRIVER_VERTAG> <DRIVER_COMPILETIME> */
- u8 reserved[128]; /* pad size to 256 bytes */
-};
-
-#pragma pack(pop)
-
-/* Reads chars from the buffer at <src> for <srcmax> bytes, and writes to
- * the buffer at <p>, which is <remain> bytes long, ensuring never to
- * overflow the buffer at <p>, using the following rules:
- * - printable characters are simply copied from the buffer at <src> to the
- * buffer at <p>
- * - intervening streaks of non-printable characters in the buffer at <src>
- * are replaced with a single space in the buffer at <p>
- * Note that we pay no attention to '\0'-termination.
- * Returns the number of bytes written to <p>.
- *
- * Pass <p> == NULL and <remain> == 0 for this special behavior. In this
- * case, we simply return the number of bytes that WOULD HAVE been written
- * to a buffer at <p>, had it been infinitely big.
- */
-static inline int
-vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
-{
- int chars = 0;
- int nonprintable_streak = 0;
-
- while (srcmax > 0) {
- if ((*src >= ' ') && (*src < 0x7f)) {
- if (nonprintable_streak) {
- if (remain > 0) {
- *p = ' ';
- p++;
- remain--;
- chars++;
- } else if (!p) {
- chars++;
- }
- nonprintable_streak = 0;
- }
- if (remain > 0) {
- *p = *src;
- p++;
- remain--;
- chars++;
- } else if (!p) {
- chars++;
- }
- } else {
- nonprintable_streak = 1;
- }
- src++;
- srcmax--;
- }
- return chars;
-}
-
-#define VBUSCHANNEL_ADDACHAR(ch, p, remain, chars) \
- do { \
- if (remain <= 0) \
- break; \
- *p = ch; \
- p++; chars++; remain--; \
- } while (0)
-
-/* Converts the non-negative value at <num> to an ascii decimal string
- * at <p>, writing at most <remain> bytes. Note there is NO '\0' termination
- * written to <p>.
- *
- * Returns the number of bytes written to <p>.
- *
- * Note that we create this function because we need to do this operation in
- * an environment-independent way (since we are in a common header file).
- */
-static inline int
-vbuschannel_itoa(char *p, int remain, int num)
-{
- int digits = 0;
- char s[32];
- int i;
-
- if (num == 0) {
- /* '0' is a special case */
- if (remain <= 0)
- return 0;
- *p = '0';
- return 1;
- }
- /* form a backwards decimal ascii string in <s> */
- while (num > 0) {
- if (digits >= (int)sizeof(s))
- return 0;
- s[digits++] = (num % 10) + '0';
- num = num / 10;
- }
- if (remain < digits) {
- /* not enough room left at <p> to hold number, so fill with
- * '?'
- */
- for (i = 0; i < remain; i++, p++)
- *p = '?';
- return remain;
- }
- /* plug in the decimal ascii string representing the number, by */
- /* reversing the string we just built in <s> */
- i = digits;
- while (i > 0) {
- i--;
- *p = s[i];
- p++;
- }
- return digits;
-}
-
-/* Reads <devInfo>, and converts its contents to a printable string at <p>,
- * writing at most <remain> bytes. Note there is NO '\0' termination
- * written to <p>.
- *
- * Pass <devix> >= 0 if you want a device index presented.
- *
- * Returns the number of bytes written to <p>.
- */
-static inline int
-vbuschannel_devinfo_to_string(struct ultra_vbus_deviceinfo *devinfo,
- char *p, int remain, int devix)
-{
- char *psrc;
- int nsrc, x, i, pad;
- int chars = 0;
-
- psrc = &devinfo->devtype[0];
- nsrc = sizeof(devinfo->devtype);
- if (vbuschannel_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0)
- return 0;
-
- /* emit device index */
- if (devix >= 0) {
- VBUSCHANNEL_ADDACHAR('[', p, remain, chars);
- x = vbuschannel_itoa(p, remain, devix);
- p += x;
- remain -= x;
- chars += x;
- VBUSCHANNEL_ADDACHAR(']', p, remain, chars);
- } else {
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- }
-
- /* emit device type */
- x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
- p += x;
- remain -= x;
- chars += x;
- pad = 15 - x; /* pad device type to be exactly 15 chars */
- for (i = 0; i < pad; i++)
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-
- /* emit driver name */
- psrc = &devinfo->drvname[0];
- nsrc = sizeof(devinfo->drvname);
- x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
- p += x;
- remain -= x;
- chars += x;
- pad = 15 - x; /* pad driver name to be exactly 15 chars */
- for (i = 0; i < pad; i++)
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
- VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-
- /* emit strings */
- psrc = &devinfo->infostrs[0];
- nsrc = sizeof(devinfo->infostrs);
- x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
- p += x;
- remain -= x;
- chars += x;
- VBUSCHANNEL_ADDACHAR('\n', p, remain, chars);
-
- return chars;
-}
-
-#endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index d32b8980a1cf..fec0a54916fe 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -18,62 +18,22 @@
#include "visorbus.h"
#include "visorbus_private.h"
-#include "version.h"
-#include "periodic_work.h"
-#include "vbuschannel.h"
-#include "guestlinuxdebug.h"
#include "vmcallinterface.h"
#define MYDRVNAME "visorbus"
/* module parameters */
-static int visorbus_debug;
static int visorbus_forcematch;
static int visorbus_forcenomatch;
-static int visorbus_debugref;
-#define SERIALLOOPBACKCHANADDR (100 * 1024 * 1024)
/* Display string that is guaranteed to be no longer the 99 characters*/
#define LINESIZE 99
#define CURRENT_FILE_PC VISOR_BUS_PC_visorbus_main_c
-#define POLLJIFFIES_TESTWORK 100
#define POLLJIFFIES_NORMALCHANNEL 10
static int busreg_rc = -ENODEV; /* stores the result from bus registration */
-static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env);
-static int visorbus_match(struct device *xdev, struct device_driver *xdrv);
-static void fix_vbus_dev_info(struct visor_device *visordev);
-
-/* BUS type attributes
- *
- * define & implement display of bus attributes under
- * /sys/bus/visorbus.
- *
- */
-
-static ssize_t version_show(struct bus_type *bus, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", VERSION);
-}
-
-static BUS_ATTR_RO(version);
-
-static struct attribute *visorbus_bus_attrs[] = {
- &bus_attr_version.attr,
- NULL,
-};
-
-static const struct attribute_group visorbus_bus_group = {
- .attrs = visorbus_bus_attrs,
-};
-
-static const struct attribute_group *visorbus_bus_groups[] = {
- &visorbus_bus_group,
- NULL,
-};
-
/*
* DEVICE type attributes
*
@@ -106,61 +66,14 @@ static const struct attribute_group *visorbus_dev_groups[] = {
NULL,
};
-/** This describes the TYPE of bus.
- * (Don't confuse this with an INSTANCE of the bus.)
- */
-struct bus_type visorbus_type = {
- .name = "visorbus",
- .match = visorbus_match,
- .uevent = visorbus_uevent,
- .dev_groups = visorbus_dev_groups,
- .bus_groups = visorbus_bus_groups,
-};
-
-static struct delayed_work periodic_work;
-
-/* YES, we need 2 workqueues.
- * The reason is, workitems on the test queue may need to cancel
- * workitems on the other queue. You will be in for trouble if you try to
- * do this with workitems queued on the same workqueue.
- */
-static struct workqueue_struct *periodic_test_workqueue;
-static struct workqueue_struct *periodic_dev_workqueue;
-static long long bus_count; /** number of bus instances */
- /** ever-increasing */
-
-static void chipset_bus_create(struct visor_device *bus_info);
-static void chipset_bus_destroy(struct visor_device *bus_info);
-static void chipset_device_create(struct visor_device *dev_info);
-static void chipset_device_destroy(struct visor_device *dev_info);
-static void chipset_device_pause(struct visor_device *dev_info);
-static void chipset_device_resume(struct visor_device *dev_info);
-
-/** These functions are implemented herein, and are called by the chipset
- * driver to notify us about specific events.
- */
-static struct visorchipset_busdev_notifiers chipset_notifiers = {
- .bus_create = chipset_bus_create,
- .bus_destroy = chipset_bus_destroy,
- .device_create = chipset_device_create,
- .device_destroy = chipset_device_destroy,
- .device_pause = chipset_device_pause,
- .device_resume = chipset_device_resume,
-};
-
-/** These functions are implemented in the chipset driver, and we call them
- * herein when we want to acknowledge a specific event.
- */
-static struct visorchipset_busdev_responders chipset_responders;
-
/* filled in with info about parent chipset driver when we register with it */
static struct ultra_vbus_deviceinfo chipset_driverinfo;
/* filled in with info about this driver, wrt it servicing client busses */
static struct ultra_vbus_deviceinfo clientbus_driverinfo;
-/** list of visor_device structs, linked via .list_all */
+/* list of visor_device structs, linked via .list_all */
static LIST_HEAD(list_all_bus_instances);
-/** list of visor_device structs, linked via .list_all */
+/* list of visor_device structs, linked via .list_all */
static LIST_HEAD(list_all_device_instances);
static int
@@ -177,9 +90,14 @@ visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
return 0;
}
-/* This is called automatically upon adding a visor_device (device_add), or
- * adding a visor_driver (visorbus_register_visor_driver), and returns 1 iff the
- * provided driver can control the specified device.
+/**
+ * visorbus_match() - called automatically upon adding a visor_device
+ * (device_add), or adding a visor_driver
+ * (visorbus_register_visor_driver)
+ * @xdev: struct device for the device being matched
+ * @xdrv: struct device_driver for driver to match device against
+ *
+ * Return: 1 iff the provided driver can control the specified device
*/
static int
visorbus_match(struct device *xdev, struct device_driver *xdrv)
@@ -211,9 +129,22 @@ visorbus_match(struct device *xdev, struct device_driver *xdrv)
return 0;
}
-/** This is called when device_unregister() is called for the bus device
- * instance, after all other tasks involved with destroying the device
- * are complete.
+/*
+ * This describes the TYPE of bus.
+ * (Don't confuse this with an INSTANCE of the bus.)
+ */
+struct bus_type visorbus_type = {
+ .name = "visorbus",
+ .match = visorbus_match,
+ .uevent = visorbus_uevent,
+ .dev_groups = visorbus_dev_groups,
+};
+
+/**
+ * visorbus_releae_busdevice() - called when device_unregister() is called for
+ * the bus device instance, after all other tasks
+ * involved with destroying the dev are complete
+ * @xdev: struct device for the bus being released
*/
static void
visorbus_release_busdevice(struct device *xdev)
@@ -223,18 +154,16 @@ visorbus_release_busdevice(struct device *xdev)
kfree(dev);
}
-/** This is called when device_unregister() is called for each child
- * device instance.
+/**
+ * visorbus_release_device() - called when device_unregister() is called for
+ * each child device instance
+ * @xdev: struct device for the visor device being released
*/
static void
visorbus_release_device(struct device *xdev)
{
struct visor_device *dev = to_visor_device(xdev);
- if (dev->periodic_work) {
- visor_periodic_work_destroy(dev->periodic_work);
- dev->periodic_work = NULL;
- }
if (dev->visorchannel) {
visorchannel_destroy(dev->visorchannel);
dev->visorchannel = NULL;
@@ -242,9 +171,11 @@ visorbus_release_device(struct device *xdev)
kfree(dev);
}
-/* begin implementation of specific channel attributes to appear under
-* /sys/bus/visorbus<x>/dev<y>/channel
-*/
+/*
+ * begin implementation of specific channel attributes to appear under
+ * /sys/bus/visorbus<x>/dev<y>/channel
+ */
+
static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -349,15 +280,11 @@ static const struct attribute_group *visorbus_channel_groups[] = {
/* end implementation of specific channel attributes */
-/* BUS instance attributes
+/*
+ * BUS instance attributes
*
* define & implement display of bus attributes under
- * /sys/bus/visorbus/busses/visorbus<n>.
- *
- * This is a bit hoaky because the kernel does not yet have the infrastructure
- * to separate bus INSTANCE attributes from bus TYPE attributes...
- * so we roll our own. See businst.c / businst.h.
- *
+ * /sys/bus/visorbus/devices/visorbus<n>.
*/
static ssize_t partition_handle_show(struct device *dev,
@@ -434,8 +361,8 @@ static ssize_t client_bus_info_show(struct device *dev,
if (vdev->name)
partition_name = vdev->name;
shift = snprintf(pos, remain,
- "Client device / client driver info for %s eartition (vbus #%d):\n",
- partition_name, vdev->chipset_dev_no);
+ "Client device / client driver info for %s partition (vbus #%u):\n",
+ partition_name, vdev->chipset_bus_no);
pos += shift;
remain -= shift;
shift = visorchannel_read(channel,
@@ -508,103 +435,48 @@ static const struct attribute_group *visorbus_groups[] = {
NULL
};
-/* DRIVER attributes
- *
- * define & implement display of driver attributes under
- * /sys/bus/visorbus/drivers/<drivername>.
- *
- */
-
-static ssize_t
-DRIVER_ATTR_version(struct device_driver *xdrv, char *buf)
-{
- struct visor_driver *drv = to_visor_driver(xdrv);
-
- return snprintf(buf, PAGE_SIZE, "%s\n", drv->version);
-}
-
-static int
-register_driver_attributes(struct visor_driver *drv)
-{
- struct driver_attribute version =
- __ATTR(version, S_IRUGO, DRIVER_ATTR_version, NULL);
- drv->version_attr = version;
- return driver_create_file(&drv->driver, &drv->version_attr);
-}
-
-static void
-unregister_driver_attributes(struct visor_driver *drv)
-{
- driver_remove_file(&drv->driver, &drv->version_attr);
-}
-
static void
-dev_periodic_work(void *xdev)
+dev_periodic_work(unsigned long __opaque)
{
- struct visor_device *dev = xdev;
+ struct visor_device *dev = (struct visor_device *)__opaque;
struct visor_driver *drv = to_visor_driver(dev->device.driver);
- down(&dev->visordriver_callback_lock);
if (drv->channel_interrupt)
drv->channel_interrupt(dev);
- up(&dev->visordriver_callback_lock);
- if (!visor_periodic_work_nextperiod(dev->periodic_work))
- put_device(&dev->device);
+ mod_timer(&dev->timer, jiffies + POLLJIFFIES_NORMALCHANNEL);
}
static void
dev_start_periodic_work(struct visor_device *dev)
{
- if (dev->being_removed)
+ if (dev->being_removed || dev->timer_active)
return;
/* now up by at least 2 */
get_device(&dev->device);
- if (!visor_periodic_work_start(dev->periodic_work))
- put_device(&dev->device);
+ dev->timer.expires = jiffies + POLLJIFFIES_NORMALCHANNEL;
+ add_timer(&dev->timer);
+ dev->timer_active = true;
}
static void
dev_stop_periodic_work(struct visor_device *dev)
{
- if (visor_periodic_work_stop(dev->periodic_work))
- put_device(&dev->device);
-}
-
-/** This is called automatically upon adding a visor_device (device_add), or
- * adding a visor_driver (visorbus_register_visor_driver), but only after
- * visorbus_match has returned 1 to indicate a successful match between
- * driver and device.
- */
-static int
-visordriver_probe_device(struct device *xdev)
-{
- int res;
- struct visor_driver *drv;
- struct visor_device *dev;
-
- drv = to_visor_driver(xdev->driver);
- dev = to_visor_device(xdev);
-
- if (!drv->probe)
- return -ENODEV;
-
- down(&dev->visordriver_callback_lock);
- dev->being_removed = false;
-
- res = drv->probe(dev);
- if (res >= 0) {
- /* success: reference kept via unmatched get_device() */
- get_device(&dev->device);
- fix_vbus_dev_info(dev);
- }
-
- up(&dev->visordriver_callback_lock);
- return res;
+ if (!dev->timer_active)
+ return;
+ del_timer_sync(&dev->timer);
+ dev->timer_active = false;
+ put_device(&dev->device);
}
-/** This is called when device_unregister() is called for each child device
- * instance, to notify the appropriate visorbus_driver that the device is
- * going away, and to decrease the reference count of the device.
+/**
+ * visordriver_remove_device() - handle visor device going away
+ * @xdev: struct device for the visor device being removed
+ *
+ * This is called when device_unregister() is called for each child device
+ * instance, to notify the appropriate visorbus function driver that the device
+ * is going away, and to decrease the reference count of the device.
+ *
+ * Return: 0 iff successful
*/
static int
visordriver_remove_device(struct device *xdev)
@@ -614,105 +486,44 @@ visordriver_remove_device(struct device *xdev)
dev = to_visor_device(xdev);
drv = to_visor_driver(xdev->driver);
- down(&dev->visordriver_callback_lock);
+ mutex_lock(&dev->visordriver_callback_lock);
dev->being_removed = true;
if (drv->remove)
drv->remove(dev);
- up(&dev->visordriver_callback_lock);
+ mutex_unlock(&dev->visordriver_callback_lock);
dev_stop_periodic_work(dev);
put_device(&dev->device);
return 0;
}
-/** A particular type of visor driver calls this function to register
- * the driver. The caller MUST fill in the following fields within the
- * #drv structure:
- * name, version, owner, channel_types, probe, remove
+/**
+ * visorbus_unregister_visor_driver() - unregisters the provided driver
+ * @drv: the driver to unregister
*
- * Here's how the whole Linux bus / driver / device model works.
- *
- * At system start-up, the visorbus kernel module is loaded, which registers
- * visorbus_type as a bus type, using bus_register().
- *
- * All kernel modules that support particular device types on a
- * visorbus bus are loaded. Each of these kernel modules calls
- * visorbus_register_visor_driver() in their init functions, passing a
- * visor_driver struct. visorbus_register_visor_driver() in turn calls
- * register_driver(&visor_driver.driver). This .driver member is
- * initialized with generic methods (like probe), whose sole responsibility
- * is to act as a broker for the real methods, which are within the
- * visor_driver struct. (This is the way the subclass behavior is
- * implemented, since visor_driver is essentially a subclass of the
- * generic driver.) Whenever a driver_register() happens, core bus code in
- * the kernel does (see device_attach() in drivers/base/dd.c):
- *
- * for each dev associated with the bus (the bus that driver is on) that
- * does not yet have a driver
- * if bus.match(dev,newdriver) == yes_matched ** .match specified
- * ** during bus_register().
- * newdriver.probe(dev) ** for visor drivers, this will call
- * ** the generic driver.probe implemented in visorbus.c,
- * ** which in turn calls the probe specified within the
- * ** struct visor_driver (which was specified by the
- * ** actual device driver as part of
- * ** visorbus_register_visor_driver()).
- *
- * The above dance also happens when a new device appears.
- * So the question is, how are devices created within the system?
- * Basically, just call device_add(dev). See pci_bus_add_devices().
- * pci_scan_device() shows an example of how to build a device struct. It
- * returns the newly-created struct to pci_scan_single_device(), who adds it
- * to the list of devices at PCIBUS.devices. That list of devices is what
- * is traversed by pci_bus_add_devices().
- *
- */
-int visorbus_register_visor_driver(struct visor_driver *drv)
-{
- int rc = 0;
-
- if (busreg_rc < 0)
- return -ENODEV; /*can't register on a nonexistent bus*/
-
- drv->driver.name = drv->name;
- drv->driver.bus = &visorbus_type;
- drv->driver.probe = visordriver_probe_device;
- drv->driver.remove = visordriver_remove_device;
- drv->driver.owner = drv->owner;
-
- /* driver_register does this:
- * bus_add_driver(drv)
- * ->if (drv.bus) ** (bus_type) **
- * driver_attach(drv)
- * for each dev with bus type of drv.bus
- * if (!dev.drv) ** no driver assigned yet **
- * if (bus.match(dev,drv)) [visorbus_match]
- * dev.drv = drv
- * if (!drv.probe(dev)) [visordriver_probe_device]
- * dev.drv = NULL
- */
-
- rc = driver_register(&drv->driver);
- if (rc < 0)
- return rc;
- rc = register_driver_attributes(drv);
- if (rc < 0)
- driver_unregister(&drv->driver);
- return rc;
-}
-EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
-
-/** A particular type of visor driver calls this function to unregister
- * the driver, i.e., within its module_exit function.
+ * A visor function driver calls this function to unregister the driver,
+ * i.e., within its module_exit function.
*/
void
visorbus_unregister_visor_driver(struct visor_driver *drv)
{
- unregister_driver_attributes(drv);
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver);
+/**
+ * visorbus_read_channel() - reads from the designated channel into
+ * the provided buffer
+ * @dev: the device whose channel is read from
+ * @offset: the offset into the channel at which reading starts
+ * @dest: the destination buffer that is written into from the channel
+ * @nbytes: the number of bytes to read from the channel
+ *
+ * If receiving a message, use the visorchannel_signalremove()
+ * function instead.
+ *
+ * Return: integer indicating success (zero) or failure (non-zero)
+ */
int
visorbus_read_channel(struct visor_device *dev, unsigned long offset,
void *dest, unsigned long nbytes)
@@ -721,6 +532,19 @@ visorbus_read_channel(struct visor_device *dev, unsigned long offset,
}
EXPORT_SYMBOL_GPL(visorbus_read_channel);
+/**
+ * visorbus_write_channel() - writes the provided buffer into the designated
+ * channel
+ * @dev: the device whose channel is written to
+ * @offset: the offset into the channel at which writing starts
+ * @src: the source buffer that is written into the channel
+ * @nbytes: the number of bytes to write into the channel
+ *
+ * If sending a message, use the visorchannel_signalinsert()
+ * function instead.
+ *
+ * Return: integer indicating success (zero) or failure (non-zero)
+ */
int
visorbus_write_channel(struct visor_device *dev, unsigned long offset,
void *src, unsigned long nbytes)
@@ -729,16 +553,13 @@ visorbus_write_channel(struct visor_device *dev, unsigned long offset,
}
EXPORT_SYMBOL_GPL(visorbus_write_channel);
-int
-visorbus_clear_channel(struct visor_device *dev, unsigned long offset, u8 ch,
- unsigned long nbytes)
-{
- return visorchannel_clear(dev->visorchannel, offset, ch, nbytes);
-}
-EXPORT_SYMBOL_GPL(visorbus_clear_channel);
-
-/** We don't really have a real interrupt, so for now we just call the
- * interrupt function periodically...
+/**
+ * visorbus_enable_channel_interrupts() - enables interrupts on the
+ * designated device
+ * @dev: the device on which to enable interrupts
+ *
+ * Currently we don't yet have a real interrupt, so for now we just call the
+ * interrupt function periodically via a timer.
*/
void
visorbus_enable_channel_interrupts(struct visor_device *dev)
@@ -747,6 +568,11 @@ visorbus_enable_channel_interrupts(struct visor_device *dev)
}
EXPORT_SYMBOL_GPL(visorbus_enable_channel_interrupts);
+/**
+ * visorbus_disable_channel_interrupts() - disables interrupts on the
+ * designated device
+ * @dev: the device on which to disable interrupts
+ */
void
visorbus_disable_channel_interrupts(struct visor_device *dev)
{
@@ -754,19 +580,28 @@ visorbus_disable_channel_interrupts(struct visor_device *dev)
}
EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
-/** This is how everything starts from the device end.
- * This function is called when a channel first appears via a ControlVM
- * message. In response, this function allocates a visor_device to
- * correspond to the new channel, and attempts to connect it the appropriate
- * driver. If the appropriate driver is found, the visor_driver.probe()
- * function for that driver will be called, and will be passed the new
- * visor_device that we just created.
+/**
+ * create_visor_device() - create visor device as a result of receiving the
+ * controlvm device_create message for a new device
+ * @dev: a freshly-zeroed struct visor_device, containing only filled-in values
+ * for chipset_bus_no and chipset_dev_no, that will be initialized
+ *
+ * This is how everything starts from the device end.
+ * This function is called when a channel first appears via a ControlVM
+ * message. In response, this function allocates a visor_device to
+ * correspond to the new channel, and attempts to connect it the appropriate
+ * driver. If the appropriate driver is found, the visor_driver.probe()
+ * function for that driver will be called, and will be passed the new
+ * visor_device that we just created.
+ *
+ * It's ok if the appropriate driver is not yet loaded, because in that case
+ * the new device struct will just stick around in the bus' list of devices.
+ * When the appropriate driver calls visorbus_register_visor_driver(), the
+ * visor_driver.probe() for the new driver will be called with the new
+ * device.
*
- * It's ok if the appropriate driver is not yet loaded, because in that case
- * the new device struct will just stick around in the bus' list of devices.
- * When the appropriate driver calls visorbus_register_visor_driver(), the
- * visor_driver.probe() for the new driver will be called with the new
- * device.
+ * Return: 0 if successful, otherwise the negative value returned by
+ * device_add() indicating the reason for failure
*/
static int
create_visor_device(struct visor_device *dev)
@@ -778,33 +613,27 @@ create_visor_device(struct visor_device *dev)
POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no,
POSTCODE_SEVERITY_INFO);
- sema_init(&dev->visordriver_callback_lock, 1); /* unlocked */
+ mutex_init(&dev->visordriver_callback_lock);
dev->device.bus = &visorbus_type;
dev->device.groups = visorbus_channel_groups;
device_initialize(&dev->device);
dev->device.release = visorbus_release_device;
/* keep a reference just for us (now 2) */
get_device(&dev->device);
- dev->periodic_work =
- visor_periodic_work_create(POLLJIFFIES_NORMALCHANNEL,
- periodic_dev_workqueue,
- dev_periodic_work,
- dev, dev_name(&dev->device));
- if (!dev->periodic_work) {
- POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, chipset_dev_no,
- DIAG_SEVERITY_ERR);
- err = -EINVAL;
- goto err_put;
- }
+ init_timer(&dev->timer);
+ dev->timer.data = (unsigned long)(dev);
+ dev->timer.function = dev_periodic_work;
- /* bus_id must be a unique name with respect to this bus TYPE
+ /*
+ * bus_id must be a unique name with respect to this bus TYPE
* (NOT bus instance). That's why we need to include the bus
* number within the name.
*/
dev_set_name(&dev->device, "vbus%u:dev%u",
chipset_bus_no, chipset_dev_no);
- /* device_add does this:
+ /*
+ * device_add does this:
* bus_add_device(dev)
* ->device_attach(dev)
* ->for each driver drv registered on the bus that dev is on
@@ -864,11 +693,20 @@ get_vbus_header_info(struct visorchannel *chan,
return 0;
}
-/* Write the contents of <info> to the struct
- * spar_vbus_channel_protocol.chp_info.
+/**
+ * write_vbus_chp_info() - write the contents of <info> to the struct
+ * spar_vbus_channel_protocol.chp_info
+ * @chan: indentifies the s-Par channel that will be updated
+ * @hdr_info: used to find appropriate channel offset to write data
+ * @info: contains the information to write
+ *
+ * Writes chipset info into the channel memory to be used for diagnostic
+ * purposes.
+ *
+ * Returns no value since this is debug information and not needed for
+ * device functionality.
*/
-
-static int
+static void
write_vbus_chp_info(struct visorchannel *chan,
struct spar_vbus_headerinfo *hdr_info,
struct ultra_vbus_deviceinfo *info)
@@ -876,18 +714,25 @@ write_vbus_chp_info(struct visorchannel *chan,
int off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
if (hdr_info->chp_info_offset == 0)
- return -EFAULT;
+ return;
- if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -EFAULT;
- return 0;
+ visorchannel_write(chan, off, info, sizeof(*info));
}
-/* Write the contents of <info> to the struct
- * spar_vbus_channel_protocol.bus_info.
+/**
+ * write_vbus_bus_info() - write the contents of <info> to the struct
+ * spar_vbus_channel_protocol.bus_info
+ * @chan: indentifies the s-Par channel that will be updated
+ * @hdr_info: used to find appropriate channel offset to write data
+ * @info: contains the information to write
+ *
+ * Writes bus info into the channel memory to be used for diagnostic
+ * purposes.
+ *
+ * Returns no value since this is debug information and not needed for
+ * device functionality.
*/
-
-static int
+static void
write_vbus_bus_info(struct visorchannel *chan,
struct spar_vbus_headerinfo *hdr_info,
struct ultra_vbus_deviceinfo *info)
@@ -895,37 +740,46 @@ write_vbus_bus_info(struct visorchannel *chan,
int off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
if (hdr_info->bus_info_offset == 0)
- return -EFAULT;
+ return;
- if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -EFAULT;
- return 0;
+ visorchannel_write(chan, off, info, sizeof(*info));
}
-/* Write the contents of <info> to the
- * struct spar_vbus_channel_protocol.dev_info[<devix>].
+/**
+ * write_vbus_dev_info() - write the contents of <info> to the struct
+ * spar_vbus_channel_protocol.dev_info[<devix>]
+ * @chan: indentifies the s-Par channel that will be updated
+ * @hdr_info: used to find appropriate channel offset to write data
+ * @info: contains the information to write
+ * @devix: the relative device number (0..n-1) of the device on the bus
+ *
+ * Writes device info into the channel memory to be used for diagnostic
+ * purposes.
+ *
+ * Returns no value since this is debug information and not needed for
+ * device functionality.
*/
-static int
+static void
write_vbus_dev_info(struct visorchannel *chan,
struct spar_vbus_headerinfo *hdr_info,
- struct ultra_vbus_deviceinfo *info, int devix)
+ struct ultra_vbus_deviceinfo *info, unsigned int devix)
{
int off =
(sizeof(struct channel_header) + hdr_info->dev_info_offset) +
(hdr_info->device_info_struct_bytes * devix);
if (hdr_info->dev_info_offset == 0)
- return -EFAULT;
+ return;
- if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -EFAULT;
- return 0;
+ visorchannel_write(chan, off, info, sizeof(*info));
}
-/* For a child device just created on a client bus, fill in
- * information about the driver that is controlling this device into
- * the the appropriate slot within the vbus channel of the bus
- * instance.
+/**
+ * fix_vbus_dev_info() - for a child device just created on a client bus, fill
+ * in information about the driver that is controlling
+ * this device into the the appropriate slot within the
+ * vbus channel of the bus instance
+ * @visordev: struct visor_device for the desired device
*/
static void
fix_vbus_dev_info(struct visor_device *visordev)
@@ -933,8 +787,8 @@ fix_vbus_dev_info(struct visor_device *visordev)
int i;
struct visor_device *bdev;
struct visor_driver *visordrv;
- int bus_no = visordev->chipset_bus_no;
- int dev_no = visordev->chipset_dev_no;
+ u32 bus_no = visordev->chipset_bus_no;
+ u32 dev_no = visordev->chipset_dev_no;
struct ultra_vbus_deviceinfo dev_info;
const char *chan_type_name = NULL;
struct spar_vbus_headerinfo *hdr_info;
@@ -942,17 +796,16 @@ fix_vbus_dev_info(struct visor_device *visordev)
if (!visordev->device.driver)
return;
- hdr_info = (struct spar_vbus_headerinfo *)visordev->vbus_hdr_info;
- if (!hdr_info)
- return;
-
bdev = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bdev)
return;
-
+ hdr_info = (struct spar_vbus_headerinfo *)bdev->vbus_hdr_info;
+ if (!hdr_info)
+ return;
visordrv = to_visor_driver(visordev->device.driver);
- /* Within the list of device types (by GUID) that the driver
+ /*
+ * Within the list of device types (by GUID) that the driver
* says it supports, find out which one of those types matches
* the type of this device, so that we can include the device
* type name
@@ -966,20 +819,148 @@ fix_vbus_dev_info(struct visor_device *visordev)
}
}
- bus_device_info_init(&dev_info, chan_type_name,
- visordrv->name, visordrv->version,
- visordrv->vertag);
+ bus_device_info_init(&dev_info, chan_type_name, visordrv->name);
write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no);
- /* Re-write bus+chipset info, because it is possible that this
- * was previously written by our evil counterpart, virtpci.
- */
+ /*
+ * Re-write bus+chipset info, because it is possible that this
+ * was previously written by our evil counterpart, virtpci.
+ */
write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo);
write_vbus_bus_info(bdev->visorchannel, hdr_info,
&clientbus_driverinfo);
}
-/** Create a device instance for the visor bus itself.
+/**
+ * visordriver_probe_device() - handle new visor device coming online
+ * @xdev: struct device for the visor device being probed
+ *
+ * This is called automatically upon adding a visor_device (device_add), or
+ * adding a visor_driver (visorbus_register_visor_driver), but only after
+ * visorbus_match() has returned 1 to indicate a successful match between
+ * driver and device.
+ *
+ * If successful, a reference to the device will be held onto via get_device().
+ *
+ * Return: 0 if successful, meaning the function driver's probe() function
+ * was successful with this device, otherwise a negative errno
+ * value indicating failure reason
+ */
+static int
+visordriver_probe_device(struct device *xdev)
+{
+ int res;
+ struct visor_driver *drv;
+ struct visor_device *dev;
+
+ drv = to_visor_driver(xdev->driver);
+ dev = to_visor_device(xdev);
+
+ if (!drv->probe)
+ return -ENODEV;
+
+ mutex_lock(&dev->visordriver_callback_lock);
+ dev->being_removed = false;
+
+ res = drv->probe(dev);
+ if (res >= 0) {
+ /* success: reference kept via unmatched get_device() */
+ get_device(&dev->device);
+ fix_vbus_dev_info(dev);
+ }
+
+ mutex_unlock(&dev->visordriver_callback_lock);
+ return res;
+}
+
+/**
+ * visorbus_register_visor_driver() - registers the provided visor driver
+ * for handling one or more visor device
+ * types (channel_types)
+ * @drv: the driver to register
+ *
+ * A visor function driver calls this function to register
+ * the driver. The caller MUST fill in the following fields within the
+ * #drv structure:
+ * name, version, owner, channel_types, probe, remove
+ *
+ * Here's how the whole Linux bus / driver / device model works.
+ *
+ * At system start-up, the visorbus kernel module is loaded, which registers
+ * visorbus_type as a bus type, using bus_register().
+ *
+ * All kernel modules that support particular device types on a
+ * visorbus bus are loaded. Each of these kernel modules calls
+ * visorbus_register_visor_driver() in their init functions, passing a
+ * visor_driver struct. visorbus_register_visor_driver() in turn calls
+ * register_driver(&visor_driver.driver). This .driver member is
+ * initialized with generic methods (like probe), whose sole responsibility
+ * is to act as a broker for the real methods, which are within the
+ * visor_driver struct. (This is the way the subclass behavior is
+ * implemented, since visor_driver is essentially a subclass of the
+ * generic driver.) Whenever a driver_register() happens, core bus code in
+ * the kernel does (see device_attach() in drivers/base/dd.c):
+ *
+ * for each dev associated with the bus (the bus that driver is on) that
+ * does not yet have a driver
+ * if bus.match(dev,newdriver) == yes_matched ** .match specified
+ * ** during bus_register().
+ * newdriver.probe(dev) ** for visor drivers, this will call
+ * ** the generic driver.probe implemented in visorbus.c,
+ * ** which in turn calls the probe specified within the
+ * ** struct visor_driver (which was specified by the
+ * ** actual device driver as part of
+ * ** visorbus_register_visor_driver()).
+ *
+ * The above dance also happens when a new device appears.
+ * So the question is, how are devices created within the system?
+ * Basically, just call device_add(dev). See pci_bus_add_devices().
+ * pci_scan_device() shows an example of how to build a device struct. It
+ * returns the newly-created struct to pci_scan_single_device(), who adds it
+ * to the list of devices at PCIBUS.devices. That list of devices is what
+ * is traversed by pci_bus_add_devices().
+ *
+ * Return: integer indicating success (zero) or failure (non-zero)
+ */
+int visorbus_register_visor_driver(struct visor_driver *drv)
+{
+ int rc = 0;
+
+ if (busreg_rc < 0)
+ return -ENODEV; /*can't register on a nonexistent bus*/
+
+ drv->driver.name = drv->name;
+ drv->driver.bus = &visorbus_type;
+ drv->driver.probe = visordriver_probe_device;
+ drv->driver.remove = visordriver_remove_device;
+ drv->driver.owner = drv->owner;
+
+ /*
+ * driver_register does this:
+ * bus_add_driver(drv)
+ * ->if (drv.bus) ** (bus_type) **
+ * driver_attach(drv)
+ * for each dev with bus type of drv.bus
+ * if (!dev.drv) ** no driver assigned yet **
+ * if (bus.match(dev,drv)) [visorbus_match]
+ * dev.drv = drv
+ * if (!drv.probe(dev)) [visordriver_probe_device]
+ * dev.drv = NULL
+ */
+
+ rc = driver_register(&drv->driver);
+ if (rc < 0)
+ driver_unregister(&drv->driver);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
+
+/**
+ * create_bus_instance() - create a device instance for the visor bus itself
+ * @dev: struct visor_device indicating the bus instance
+ *
+ * Return: 0 for success, otherwise negative errno value indicating reason for
+ * failure
*/
static int
create_bus_instance(struct visor_device *dev)
@@ -1014,25 +995,26 @@ create_bus_instance(struct visor_device *dev)
} else {
kfree(hdr_info);
}
- bus_count++;
list_add_tail(&dev->list_all, &list_all_bus_instances);
dev_set_drvdata(&dev->device, dev);
return 0;
}
-/** Remove a device instance for the visor bus itself.
+/**
+ * remove_bus_instance() - remove a device instance for the visor bus itself
+ * @dev: struct visor_device indentifying the bus to remove
*/
static void
remove_bus_instance(struct visor_device *dev)
{
- /* Note that this will result in the release method for
+ /*
+ * Note that this will result in the release method for
* dev->dev being called, which will call
* visorbus_release_busdevice(). This has something to do with
* the put_device() done in device_unregister(), but I have never
* successfully been able to trace thru the code to see where/how
* release() gets called. But I know it does.
*/
- bus_count--;
if (dev->visorchannel) {
visorchannel_destroy(dev->visorchannel);
dev->visorchannel = NULL;
@@ -1042,8 +1024,11 @@ remove_bus_instance(struct visor_device *dev)
device_unregister(&dev->device);
}
-/** Create and register the one-and-only one instance of
- * the visor bus type (visorbus_type).
+/**
+ * create_bus_type() - create and register the one-and-only one instance of
+ * the visor bus type (visorbus_type)
+ * Return: 0 for success, otherwise negative errno value returned by
+ * bus_register() indicating the reason for failure
*/
static int
create_bus_type(void)
@@ -1052,7 +1037,9 @@ create_bus_type(void)
return busreg_rc;
}
-/** Remove the one-and-only one instance of the visor bus type (visorbus_type).
+/**
+ * remove_bus_type() - remove the one-and-only one instance of the visor bus
+ * type (visorbus_type)
*/
static void
remove_bus_type(void)
@@ -1060,7 +1047,8 @@ remove_bus_type(void)
bus_unregister(&visorbus_type);
}
-/** Remove all child visor bus device instances.
+/**
+ * remove_all_visor_devices() - remove all child visor bus device instances
*/
static void
remove_all_visor_devices(void)
@@ -1075,7 +1063,7 @@ remove_all_visor_devices(void)
}
}
-static void
+void
chipset_bus_create(struct visor_device *dev)
{
int rc;
@@ -1092,19 +1080,17 @@ chipset_bus_create(struct visor_device *dev)
POSTCODE_LINUX_3(CHIPSET_INIT_SUCCESS_PC, bus_no,
POSTCODE_SEVERITY_INFO);
- if (chipset_responders.bus_create)
- (*chipset_responders.bus_create) (dev, rc);
+ bus_create_response(dev, rc);
}
-static void
+void
chipset_bus_destroy(struct visor_device *dev)
{
remove_bus_instance(dev);
- if (chipset_responders.bus_destroy)
- (*chipset_responders.bus_destroy)(dev, 0);
+ bus_destroy_response(dev, 0);
}
-static void
+void
chipset_device_create(struct visor_device *dev_info)
{
int rc;
@@ -1115,8 +1101,7 @@ chipset_device_create(struct visor_device *dev_info)
POSTCODE_SEVERITY_INFO);
rc = create_visor_device(dev_info);
- if (chipset_responders.device_create)
- chipset_responders.device_create(dev_info, rc);
+ device_create_response(dev_info, rc);
if (rc < 0)
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
@@ -1126,18 +1111,22 @@ chipset_device_create(struct visor_device *dev_info)
POSTCODE_SEVERITY_INFO);
}
-static void
+void
chipset_device_destroy(struct visor_device *dev_info)
{
remove_visor_device(dev_info);
- if (chipset_responders.device_destroy)
- (*chipset_responders.device_destroy) (dev_info, 0);
+ device_destroy_response(dev_info, 0);
}
-/* This is the callback function specified for a function driver, to
- * be called when a pending "pause device" operation has been
- * completed.
+/**
+ * pause_state_change_complete() - the callback function to be called by a
+ * visorbus function driver when a
+ * pending "pause device" operation has
+ * completed
+ * @dev: struct visor_device identifying the paused device
+ * @status: 0 iff the pause state change completed successfully, otherwise
+ * a negative errno value indicating the reason for failure
*/
static void
pause_state_change_complete(struct visor_device *dev, int status)
@@ -1146,19 +1135,18 @@ pause_state_change_complete(struct visor_device *dev, int status)
return;
dev->pausing = false;
- if (!chipset_responders.device_pause) /* this can never happen! */
- return;
- /* Notify the chipset driver that the pause is complete, which
- * will presumably want to send some sort of response to the
- * initiator.
- */
- (*chipset_responders.device_pause) (dev, status);
+ device_pause_response(dev, status);
}
-/* This is the callback function specified for a function driver, to
- * be called when a pending "resume device" operation has been
- * completed.
+/**
+ * resume_state_change_complete() - the callback function to be called by a
+ * visorbus function driver when a
+ * pending "resume device" operation has
+ * completed
+ * @dev: struct visor_device identifying the resumed device
+ * @status: 0 iff the resume state change completed successfully, otherwise
+ * a negative errno value indicating the reason for failure
*/
static void
resume_state_change_complete(struct visor_device *dev, int status)
@@ -1167,19 +1155,25 @@ resume_state_change_complete(struct visor_device *dev, int status)
return;
dev->resuming = false;
- if (!chipset_responders.device_resume) /* this can never happen! */
- return;
- /* Notify the chipset driver that the resume is complete,
+ /*
+ * Notify the chipset driver that the resume is complete,
* which will presumably want to send some sort of response to
* the initiator.
*/
- (*chipset_responders.device_resume) (dev, status);
+ device_resume_response(dev, status);
}
-/* Tell the subordinate function driver for a specific device to pause
- * or resume that device. Result is returned asynchronously via a
- * callback function.
+/**
+ * initiate_chipset_device_pause_resume() - start a pause or resume operation
+ * for a visor device
+ * @dev: struct visor_device identifying the device being paused or resumed
+ * @is_pause: true to indicate pause operation, false to indicate resume
+ *
+ * Tell the subordinate function driver for a specific device to pause
+ * or resume that device. Success/failure result is returned asynchronously
+ * via a callback function; see pause_state_change_complete() and
+ * resume_state_change_complete().
*/
static void
initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
@@ -1189,9 +1183,9 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
void (*notify_func)(struct visor_device *dev, int response) = NULL;
if (is_pause)
- notify_func = chipset_responders.device_pause;
+ notify_func = device_pause_response;
else
- notify_func = chipset_responders.device_resume;
+ notify_func = device_resume_response;
if (!notify_func)
return;
@@ -1206,7 +1200,8 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
return;
}
- /* Note that even though both drv->pause() and drv->resume
+ /*
+ * Note that even though both drv->pause() and drv->resume
* specify a callback function, it is NOT necessary for us to
* increment our local module usage count. Reason is, there
* is already a linkage dependency between child function
@@ -1246,33 +1241,41 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
}
}
-static void
+/**
+ * chipset_device_pause() - start a pause operation for a visor device
+ * @dev_info: struct visor_device identifying the device being paused
+ *
+ * Tell the subordinate function driver for a specific device to pause
+ * that device. Success/failure result is returned asynchronously
+ * via a callback function; see pause_state_change_complete().
+ */
+void
chipset_device_pause(struct visor_device *dev_info)
{
initiate_chipset_device_pause_resume(dev_info, true);
}
-static void
+/**
+ * chipset_device_resume() - start a resume operation for a visor device
+ * @dev_info: struct visor_device identifying the device being resumed
+ *
+ * Tell the subordinate function driver for a specific device to resume
+ * that device. Success/failure result is returned asynchronously
+ * via a callback function; see resume_state_change_complete().
+ */
+void
chipset_device_resume(struct visor_device *dev_info)
{
initiate_chipset_device_pause_resume(dev_info, false);
}
-struct channel_size_info {
- uuid_le guid;
- unsigned long min_size;
- unsigned long max_size;
-};
-
int
visorbus_init(void)
{
int err;
POSTCODE_LINUX_3(DRIVER_ENTRY_PC, 0, POSTCODE_SEVERITY_INFO);
- bus_device_info_init(&clientbus_driverinfo,
- "clientbus", "visorbus",
- VERSION, NULL);
+ bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
err = create_bus_type();
if (err < 0) {
@@ -1280,19 +1283,7 @@ visorbus_init(void)
goto error;
}
- periodic_dev_workqueue = create_singlethread_workqueue("visorbus_dev");
- if (!periodic_dev_workqueue) {
- POSTCODE_LINUX_2(CREATE_WORKQUEUE_PC, DIAG_SEVERITY_ERR);
- err = -ENOMEM;
- goto error;
- }
-
- /* This enables us to receive notifications when devices appear for
- * which this service partition is to be a server for.
- */
- visorchipset_register_busdev(&chipset_notifiers,
- &chipset_responders,
- &chipset_driverinfo);
+ bus_device_info_init(&chipset_driverinfo, "chipset", "visorchipset");
return 0;
@@ -1306,20 +1297,8 @@ visorbus_exit(void)
{
struct list_head *listentry, *listtmp;
- visorchipset_register_busdev(NULL, NULL, NULL);
remove_all_visor_devices();
- flush_workqueue(periodic_dev_workqueue); /* better not be any work! */
- destroy_workqueue(periodic_dev_workqueue);
- periodic_dev_workqueue = NULL;
-
- if (periodic_test_workqueue) {
- cancel_delayed_work(&periodic_work);
- flush_workqueue(periodic_test_workqueue);
- destroy_workqueue(periodic_test_workqueue);
- periodic_test_workqueue = NULL;
- }
-
list_for_each_safe(listentry, listtmp, &list_all_bus_instances) {
struct visor_device *dev = list_entry(listentry,
struct visor_device,
@@ -1329,9 +1308,6 @@ visorbus_exit(void)
remove_bus_type();
}
-module_param_named(debug, visorbus_debug, int, S_IRUGO);
-MODULE_PARM_DESC(visorbus_debug, "1 to debug");
-
module_param_named(forcematch, visorbus_forcematch, int, S_IRUGO);
MODULE_PARM_DESC(visorbus_forcematch,
"1 to force a successful dev <--> drv match");
@@ -1339,6 +1315,3 @@ MODULE_PARM_DESC(visorbus_forcematch,
module_param_named(forcenomatch, visorbus_forcenomatch, int, S_IRUGO);
MODULE_PARM_DESC(visorbus_forcenomatch,
"1 to force an UNsuccessful dev <--> drv match");
-
-module_param_named(debugref, visorbus_debugref, int, S_IRUGO);
-MODULE_PARM_DESC(visorbus_debugref, "1 to debug reference counting");
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index 39edd2018453..15403fb52847 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -1,4 +1,4 @@
-/* visorchipset.h
+/* visorbus_private.h
*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
@@ -14,55 +14,72 @@
* details.
*/
-#ifndef __VISORCHIPSET_H__
-#define __VISORCHIPSET_H__
+#ifndef __VISORBUS_PRIVATE_H__
+#define __VISORBUS_PRIVATE_H__
#include <linux/uuid.h>
+#include <linux/utsname.h>
#include "controlvmchannel.h"
-#include "vbusdeviceinfo.h"
-#include "vbushelper.h"
+#include "vbuschannel.h"
-/* These functions will be called from within visorchipset when certain
- * events happen. (The implementation of these functions is outside of
- * visorchipset.)
+/* TARGET_HOSTNAME specified as -DTARGET_HOSTNAME=\"thename\" on the
+ * command line
*/
-struct visorchipset_busdev_notifiers {
- void (*bus_create)(struct visor_device *bus_info);
- void (*bus_destroy)(struct visor_device *bus_info);
- void (*device_create)(struct visor_device *bus_info);
- void (*device_destroy)(struct visor_device *bus_info);
- void (*device_pause)(struct visor_device *bus_info);
- void (*device_resume)(struct visor_device *bus_info);
-};
-/* These functions live inside visorchipset, and will be called to indicate
- * responses to specific events (by code outside of visorchipset).
- * For now, the value for each response is simply either:
- * 0 = it worked
- * -1 = it failed
- */
-struct visorchipset_busdev_responders {
- void (*bus_create)(struct visor_device *p, int response);
- void (*bus_destroy)(struct visor_device *p, int response);
- void (*device_create)(struct visor_device *p, int response);
- void (*device_destroy)(struct visor_device *p, int response);
- void (*device_pause)(struct visor_device *p, int response);
- void (*device_resume)(struct visor_device *p, int response);
-};
+static inline void bus_device_info_init(
+ struct ultra_vbus_deviceinfo *bus_device_info_ptr,
+ const char *dev_type, const char *drv_name)
+{
+ memset(bus_device_info_ptr, 0, sizeof(struct ultra_vbus_deviceinfo));
+ snprintf(bus_device_info_ptr->devtype,
+ sizeof(bus_device_info_ptr->devtype),
+ "%s", (dev_type) ? dev_type : "unknownType");
+ snprintf(bus_device_info_ptr->drvname,
+ sizeof(bus_device_info_ptr->drvname),
+ "%s", (drv_name) ? drv_name : "unknownDriver");
+ snprintf(bus_device_info_ptr->infostrs,
+ sizeof(bus_device_info_ptr->infostrs), "kernel ver. %s",
+ utsname()->release);
+}
-/** Register functions (in the bus driver) to get called by visorchipset
- * whenever a bus or device appears for which this guest is to be the
- * client for. visorchipset will fill in <responders>, to indicate
- * functions the bus driver should call to indicate message responses.
- */
-void
-visorchipset_register_busdev(
- struct visorchipset_busdev_notifiers *notifiers,
- struct visorchipset_busdev_responders *responders,
- struct ultra_vbus_deviceinfo *driver_info);
+void chipset_bus_create(struct visor_device *bus_info);
+void chipset_bus_destroy(struct visor_device *bus_info);
+void chipset_device_create(struct visor_device *dev_info);
+void chipset_device_destroy(struct visor_device *dev_info);
+void chipset_device_pause(struct visor_device *dev_info);
+void chipset_device_resume(struct visor_device *dev_info);
+
+void bus_create_response(struct visor_device *p, int response);
+void bus_destroy_response(struct visor_device *p, int response);
+void device_create_response(struct visor_device *p, int response);
+void device_destroy_response(struct visor_device *p, int response);
+void device_resume_response(struct visor_device *p, int response);
+void device_pause_response(struct visor_device *p, int response);
-/* visorbus init and exit functions */
int visorbus_init(void);
void visorbus_exit(void);
+
+/* visorchannel access functions */
+
+struct visorchannel *visorchannel_create(u64 physaddr,
+ unsigned long channel_bytes,
+ gfp_t gfp, uuid_le guid);
+struct visorchannel *visorchannel_create_with_lock(u64 physaddr,
+ unsigned long channel_bytes,
+ gfp_t gfp, uuid_le guid);
+void visorchannel_destroy(struct visorchannel *channel);
+int visorchannel_read(struct visorchannel *channel, ulong offset,
+ void *local, ulong nbytes);
+int visorchannel_write(struct visorchannel *channel, ulong offset,
+ void *local, ulong nbytes);
+u64 visorchannel_get_physaddr(struct visorchannel *channel);
+ulong visorchannel_get_nbytes(struct visorchannel *channel);
+char *visorchannel_id(struct visorchannel *channel, char *s);
+char *visorchannel_zoneid(struct visorchannel *channel, char *s);
+u64 visorchannel_get_clientpartition(struct visorchannel *channel);
+int visorchannel_set_clientpartition(struct visorchannel *channel,
+ u64 partition_handle);
+char *visorchannel_uuid_id(uuid_le *guid, char *s);
+void __iomem *visorchannel_get_header(struct visorchannel *channel);
#endif
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 43373582cf1d..300a65dc5c6c 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -15,14 +15,13 @@
*/
/*
- * This provides Supervisor channel communication primitives, which are
+ * This provides s-Par channel communication primitives, which are
* independent of the mechanism used to access the channel data.
*/
#include <linux/uuid.h>
#include <linux/io.h>
-#include "version.h"
#include "visorbus.h"
#include "controlvmchannel.h"
@@ -55,110 +54,6 @@ struct visorchannel {
uuid_le inst;
};
-/* Creates the struct visorchannel abstraction for a data area in memory,
- * but does NOT modify this data area.
- */
-static struct visorchannel *
-visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
- gfp_t gfp, unsigned long off,
- uuid_le guid, bool needs_lock)
-{
- struct visorchannel *channel;
- int err;
- size_t size = sizeof(struct channel_header);
-
- if (physaddr == 0)
- return NULL;
-
- channel = kzalloc(sizeof(*channel), gfp);
- if (!channel)
- return NULL;
-
- channel->needs_lock = needs_lock;
- spin_lock_init(&channel->insert_lock);
- spin_lock_init(&channel->remove_lock);
-
- /* Video driver constains the efi framebuffer so it will get a
- * conflict resource when requesting its full mem region. Since
- * we are only using the efi framebuffer for video we can ignore
- * this. Remember that we haven't requested it so we don't try to
- * release later on.
- */
- channel->requested = request_mem_region(physaddr, size, MYDRVNAME);
- if (!channel->requested) {
- if (uuid_le_cmp(guid, spar_video_guid)) {
- /* Not the video channel we care about this */
- goto err_destroy_channel;
- }
- }
-
- channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
- if (!channel->mapped) {
- release_mem_region(physaddr, size);
- goto err_destroy_channel;
- }
-
- channel->physaddr = physaddr;
- channel->nbytes = size;
-
- err = visorchannel_read(channel, 0, &channel->chan_hdr,
- sizeof(struct channel_header));
- if (err)
- goto err_destroy_channel;
-
- /* we had better be a CLIENT of this channel */
- if (channel_bytes == 0)
- channel_bytes = (ulong)channel->chan_hdr.size;
- if (uuid_le_cmp(guid, NULL_UUID_LE) == 0)
- guid = channel->chan_hdr.chtype;
-
- memunmap(channel->mapped);
- if (channel->requested)
- release_mem_region(channel->physaddr, channel->nbytes);
- channel->mapped = NULL;
- channel->requested = request_mem_region(channel->physaddr,
- channel_bytes, MYDRVNAME);
- if (!channel->requested) {
- if (uuid_le_cmp(guid, spar_video_guid)) {
- /* Different we care about this */
- goto err_destroy_channel;
- }
- }
-
- channel->mapped = memremap(channel->physaddr, channel_bytes,
- MEMREMAP_WB);
- if (!channel->mapped) {
- release_mem_region(channel->physaddr, channel_bytes);
- goto err_destroy_channel;
- }
-
- channel->nbytes = channel_bytes;
- channel->guid = guid;
- return channel;
-
-err_destroy_channel:
- visorchannel_destroy(channel);
- return NULL;
-}
-
-struct visorchannel *
-visorchannel_create(u64 physaddr, unsigned long channel_bytes,
- gfp_t gfp, uuid_le guid)
-{
- return visorchannel_create_guts(physaddr, channel_bytes, gfp, 0, guid,
- false);
-}
-EXPORT_SYMBOL_GPL(visorchannel_create);
-
-struct visorchannel *
-visorchannel_create_with_lock(u64 physaddr, unsigned long channel_bytes,
- gfp_t gfp, uuid_le guid)
-{
- return visorchannel_create_guts(physaddr, channel_bytes, gfp, 0, guid,
- true);
-}
-EXPORT_SYMBOL_GPL(visorchannel_create_with_lock);
-
void
visorchannel_destroy(struct visorchannel *channel)
{
@@ -171,21 +66,18 @@ visorchannel_destroy(struct visorchannel *channel)
}
kfree(channel);
}
-EXPORT_SYMBOL_GPL(visorchannel_destroy);
u64
visorchannel_get_physaddr(struct visorchannel *channel)
{
return channel->physaddr;
}
-EXPORT_SYMBOL_GPL(visorchannel_get_physaddr);
ulong
visorchannel_get_nbytes(struct visorchannel *channel)
{
return channel->nbytes;
}
-EXPORT_SYMBOL_GPL(visorchannel_get_nbytes);
char *
visorchannel_uuid_id(uuid_le *guid, char *s)
@@ -193,28 +85,24 @@ visorchannel_uuid_id(uuid_le *guid, char *s)
sprintf(s, "%pUL", guid);
return s;
}
-EXPORT_SYMBOL_GPL(visorchannel_uuid_id);
char *
visorchannel_id(struct visorchannel *channel, char *s)
{
return visorchannel_uuid_id(&channel->guid, s);
}
-EXPORT_SYMBOL_GPL(visorchannel_id);
char *
visorchannel_zoneid(struct visorchannel *channel, char *s)
{
return visorchannel_uuid_id(&channel->chan_hdr.zone_uuid, s);
}
-EXPORT_SYMBOL_GPL(visorchannel_zoneid);
u64
visorchannel_get_clientpartition(struct visorchannel *channel)
{
return channel->chan_hdr.partition_handle;
}
-EXPORT_SYMBOL_GPL(visorchannel_get_clientpartition);
int
visorchannel_set_clientpartition(struct visorchannel *channel,
@@ -223,8 +111,13 @@ visorchannel_set_clientpartition(struct visorchannel *channel,
channel->chan_hdr.partition_handle = partition_handle;
return 0;
}
-EXPORT_SYMBOL_GPL(visorchannel_set_clientpartition);
+/**
+ * visorchannel_get_uuid() - queries the UUID of the designated channel
+ * @channel: the channel to query
+ *
+ * Return: the UUID of the provided channel
+ */
uuid_le
visorchannel_get_uuid(struct visorchannel *channel)
{
@@ -243,7 +136,6 @@ visorchannel_read(struct visorchannel *channel, ulong offset,
return 0;
}
-EXPORT_SYMBOL_GPL(visorchannel_read);
int
visorchannel_write(struct visorchannel *channel, ulong offset,
@@ -265,156 +157,125 @@ visorchannel_write(struct visorchannel *channel, ulong offset,
return 0;
}
-EXPORT_SYMBOL_GPL(visorchannel_write);
-
-int
-visorchannel_clear(struct visorchannel *channel, ulong offset, u8 ch,
- ulong nbytes)
-{
- int err;
- int bufsize = PAGE_SIZE;
- int written = 0;
- u8 *buf;
-
- buf = (u8 *)__get_free_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- memset(buf, ch, bufsize);
-
- while (nbytes > 0) {
- int thisbytes = bufsize;
-
- if (nbytes < thisbytes)
- thisbytes = nbytes;
- err = visorchannel_write(channel, offset + written,
- buf, thisbytes);
- if (err)
- goto out_free_page;
-
- written += thisbytes;
- nbytes -= thisbytes;
- }
- err = 0;
-
-out_free_page:
- free_page((unsigned long)buf);
- return err;
-}
-EXPORT_SYMBOL_GPL(visorchannel_clear);
void __iomem *
visorchannel_get_header(struct visorchannel *channel)
{
return (void __iomem *)&channel->chan_hdr;
}
-EXPORT_SYMBOL_GPL(visorchannel_get_header);
-/** Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a
- * channel header
+/*
+ * Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a
+ * channel header
*/
#define SIG_QUEUE_OFFSET(chan_hdr, q) \
((chan_hdr)->ch_space_offset + \
((q) * sizeof(struct signal_queue_header)))
-/** Return offset of a specific queue entry (data) from the beginning of a
- * channel header
+/*
+ * Return offset of a specific queue entry (data) from the beginning of a
+ * channel header
*/
#define SIG_DATA_OFFSET(chan_hdr, q, sig_hdr, slot) \
(SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->sig_base_offset + \
((slot) * (sig_hdr)->signal_size))
-/** Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back
- * into host memory
+/*
+ * Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back
+ * into host memory
*/
#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
- (visorchannel_write(channel, \
- SIG_QUEUE_OFFSET(&channel->chan_hdr, queue) +\
- offsetof(struct signal_queue_header, FIELD), \
- &((sig_hdr)->FIELD), \
- sizeof((sig_hdr)->FIELD)) >= 0)
+ visorchannel_write(channel, \
+ SIG_QUEUE_OFFSET(&channel->chan_hdr, queue) +\
+ offsetof(struct signal_queue_header, FIELD), \
+ &((sig_hdr)->FIELD), \
+ sizeof((sig_hdr)->FIELD))
-static bool
+static int
sig_read_header(struct visorchannel *channel, u32 queue,
struct signal_queue_header *sig_hdr)
{
- int err;
-
if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header))
- return false;
+ return -EINVAL;
/* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */
- err = visorchannel_read(channel,
- SIG_QUEUE_OFFSET(&channel->chan_hdr, queue),
- sig_hdr, sizeof(struct signal_queue_header));
- if (err)
- return false;
-
- return true;
+ return visorchannel_read(channel,
+ SIG_QUEUE_OFFSET(&channel->chan_hdr, queue),
+ sig_hdr, sizeof(struct signal_queue_header));
}
-static inline bool
+static inline int
sig_read_data(struct visorchannel *channel, u32 queue,
struct signal_queue_header *sig_hdr, u32 slot, void *data)
{
- int err;
int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue,
sig_hdr, slot);
- err = visorchannel_read(channel, signal_data_offset,
- data, sig_hdr->signal_size);
- if (err)
- return false;
-
- return true;
+ return visorchannel_read(channel, signal_data_offset,
+ data, sig_hdr->signal_size);
}
-static inline bool
+static inline int
sig_write_data(struct visorchannel *channel, u32 queue,
struct signal_queue_header *sig_hdr, u32 slot, void *data)
{
- int err;
int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue,
sig_hdr, slot);
- err = visorchannel_write(channel, signal_data_offset,
- data, sig_hdr->signal_size);
- if (err)
- return false;
-
- return true;
+ return visorchannel_write(channel, signal_data_offset,
+ data, sig_hdr->signal_size);
}
-static bool
+static int
signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
{
struct signal_queue_header sig_hdr;
+ int error;
+
+ error = sig_read_header(channel, queue, &sig_hdr);
+ if (error)
+ return error;
- if (!sig_read_header(channel, queue, &sig_hdr))
- return false;
if (sig_hdr.head == sig_hdr.tail)
- return false; /* no signals to remove */
+ return -EIO; /* no signals to remove */
sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
- if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg))
- return false;
+
+ error = sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg);
+ if (error)
+ return error;
+
sig_hdr.num_received++;
- /* For each data field in SIGNAL_QUEUE_HEADER that was modified,
+ /*
+ * For each data field in SIGNAL_QUEUE_HEADER that was modified,
* update host memory.
*/
mb(); /* required for channel synch */
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail))
- return false;
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received))
- return false;
- return true;
+
+ error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail);
+ if (error)
+ return error;
+ error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received);
+ if (error)
+ return error;
+
+ return 0;
}
-bool
+/**
+ * visorchannel_signalremove() - removes a message from the designated
+ * channel/queue
+ * @channel: the channel the message will be removed from
+ * @queue: the queue the message will be removed from
+ * @msg: the message to remove
+ *
+ * Return: integer error code indicating the status of the removal
+ */
+int
visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
{
- bool rc;
+ int rc;
unsigned long flags;
if (channel->needs_lock) {
@@ -429,6 +290,15 @@ visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
}
EXPORT_SYMBOL_GPL(visorchannel_signalremove);
+/**
+ * visorchannel_signalempty() - checks if the designated channel/queue
+ * contains any messages
+ * @channel: the channel to query
+ * @queue: the queue in the channel to query
+ *
+ * Return: boolean indicating whether any messages in the designated
+ * channel/queue are present
+ */
bool
visorchannel_signalempty(struct visorchannel *channel, u32 queue)
{
@@ -439,7 +309,7 @@ visorchannel_signalempty(struct visorchannel *channel, u32 queue)
if (channel->needs_lock)
spin_lock_irqsave(&channel->remove_lock, flags);
- if (!sig_read_header(channel, queue, &sig_hdr))
+ if (sig_read_header(channel, queue, &sig_hdr))
rc = true;
if (sig_hdr.head == sig_hdr.tail)
rc = true;
@@ -450,13 +320,15 @@ visorchannel_signalempty(struct visorchannel *channel, u32 queue)
}
EXPORT_SYMBOL_GPL(visorchannel_signalempty);
-static bool
+static int
signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
{
struct signal_queue_header sig_hdr;
+ int error;
- if (!sig_read_header(channel, queue, &sig_hdr))
- return false;
+ error = sig_read_header(channel, queue, &sig_hdr);
+ if (error)
+ return error;
sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots;
if (sig_hdr.head == sig_hdr.tail) {
@@ -467,169 +339,176 @@ signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
num_overflows),
&sig_hdr.num_overflows,
sizeof(sig_hdr.num_overflows));
- return false;
+ return -EIO;
}
- if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg))
- return false;
+ error = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg);
+ if (error)
+ return error;
sig_hdr.num_sent++;
- /* For each data field in SIGNAL_QUEUE_HEADER that was modified,
+ /*
+ * For each data field in SIGNAL_QUEUE_HEADER that was modified,
* update host memory.
*/
mb(); /* required for channel synch */
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, head))
- return false;
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent))
- return false;
- return true;
+ error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head);
+ if (error)
+ return error;
+ error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent);
+ if (error)
+ return error;
+
+ return 0;
}
-bool
-visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg)
+/**
+ * visorchannel_create_guts() - creates the struct visorchannel abstraction
+ * for a data area in memory, but does NOT modify
+ * this data area
+ * @physaddr: physical address of start of channel
+ * @channel_bytes: size of the channel in bytes; this may 0 if the channel has
+ * already been initialized in memory (which is true for all
+ * channels provided to guest environments by the s-Par
+ * back-end), in which case the actual channel size will be
+ * read from the channel header in memory
+ * @gfp: gfp_t to use when allocating memory for the data struct
+ * @guid: uuid that identifies channel type; this may 0 if the channel
+ * has already been initialized in memory (which is true for all
+ * channels provided to guest environments by the s-Par
+ * back-end), in which case the actual channel guid will be
+ * read from the channel header in memory
+ * @needs_lock: must specify true if you have multiple threads of execution
+ * that will be calling visorchannel methods of this
+ * visorchannel at the same time
+ *
+ * Return: pointer to visorchannel that was created if successful,
+ * otherwise NULL
+ */
+static struct visorchannel *
+visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
+ gfp_t gfp, uuid_le guid, bool needs_lock)
{
- bool rc;
- unsigned long flags;
+ struct visorchannel *channel;
+ int err;
+ size_t size = sizeof(struct channel_header);
- if (channel->needs_lock) {
- spin_lock_irqsave(&channel->insert_lock, flags);
- rc = signalinsert_inner(channel, queue, msg);
- spin_unlock_irqrestore(&channel->insert_lock, flags);
- } else {
- rc = signalinsert_inner(channel, queue, msg);
+ if (physaddr == 0)
+ return NULL;
+
+ channel = kzalloc(sizeof(*channel), gfp);
+ if (!channel)
+ return NULL;
+
+ channel->needs_lock = needs_lock;
+ spin_lock_init(&channel->insert_lock);
+ spin_lock_init(&channel->remove_lock);
+
+ /*
+ * Video driver constains the efi framebuffer so it will get a
+ * conflict resource when requesting its full mem region. Since
+ * we are only using the efi framebuffer for video we can ignore
+ * this. Remember that we haven't requested it so we don't try to
+ * release later on.
+ */
+ channel->requested = request_mem_region(physaddr, size, MYDRVNAME);
+ if (!channel->requested) {
+ if (uuid_le_cmp(guid, spar_video_guid)) {
+ /* Not the video channel we care about this */
+ goto err_destroy_channel;
+ }
}
- return rc;
-}
-EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
+ channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
+ if (!channel->mapped) {
+ release_mem_region(physaddr, size);
+ goto err_destroy_channel;
+ }
-int
-visorchannel_signalqueue_slots_avail(struct visorchannel *channel, u32 queue)
-{
- struct signal_queue_header sig_hdr;
- u32 slots_avail, slots_used;
- u32 head, tail;
-
- if (!sig_read_header(channel, queue, &sig_hdr))
- return 0;
- head = sig_hdr.head;
- tail = sig_hdr.tail;
- if (head < tail)
- head = head + sig_hdr.max_slots;
- slots_used = head - tail;
- slots_avail = sig_hdr.max_signals - slots_used;
- return (int)slots_avail;
-}
-EXPORT_SYMBOL_GPL(visorchannel_signalqueue_slots_avail);
+ channel->physaddr = physaddr;
+ channel->nbytes = size;
-int
-visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue)
-{
- struct signal_queue_header sig_hdr;
+ err = visorchannel_read(channel, 0, &channel->chan_hdr,
+ sizeof(struct channel_header));
+ if (err)
+ goto err_destroy_channel;
+
+ /* we had better be a CLIENT of this channel */
+ if (channel_bytes == 0)
+ channel_bytes = (ulong)channel->chan_hdr.size;
+ if (uuid_le_cmp(guid, NULL_UUID_LE) == 0)
+ guid = channel->chan_hdr.chtype;
+
+ memunmap(channel->mapped);
+ if (channel->requested)
+ release_mem_region(channel->physaddr, channel->nbytes);
+ channel->mapped = NULL;
+ channel->requested = request_mem_region(channel->physaddr,
+ channel_bytes, MYDRVNAME);
+ if (!channel->requested) {
+ if (uuid_le_cmp(guid, spar_video_guid)) {
+ /* Different we care about this */
+ goto err_destroy_channel;
+ }
+ }
+
+ channel->mapped = memremap(channel->physaddr, channel_bytes,
+ MEMREMAP_WB);
+ if (!channel->mapped) {
+ release_mem_region(channel->physaddr, channel_bytes);
+ goto err_destroy_channel;
+ }
+
+ channel->nbytes = channel_bytes;
+ channel->guid = guid;
+ return channel;
- if (!sig_read_header(channel, queue, &sig_hdr))
- return 0;
- return (int)sig_hdr.max_signals;
+err_destroy_channel:
+ visorchannel_destroy(channel);
+ return NULL;
}
-EXPORT_SYMBOL_GPL(visorchannel_signalqueue_max_slots);
-static void
-sigqueue_debug(struct signal_queue_header *q, int which, struct seq_file *seq)
+struct visorchannel *
+visorchannel_create(u64 physaddr, unsigned long channel_bytes,
+ gfp_t gfp, uuid_le guid)
{
- seq_printf(seq, "Signal Queue #%d\n", which);
- seq_printf(seq, " VersionId = %lu\n", (ulong)q->version);
- seq_printf(seq, " Type = %lu\n", (ulong)q->chtype);
- seq_printf(seq, " oSignalBase = %llu\n",
- (long long)q->sig_base_offset);
- seq_printf(seq, " SignalSize = %lu\n", (ulong)q->signal_size);
- seq_printf(seq, " MaxSignalSlots = %lu\n",
- (ulong)q->max_slots);
- seq_printf(seq, " MaxSignals = %lu\n", (ulong)q->max_signals);
- seq_printf(seq, " FeatureFlags = %-16.16Lx\n",
- (long long)q->features);
- seq_printf(seq, " NumSignalsSent = %llu\n",
- (long long)q->num_sent);
- seq_printf(seq, " NumSignalsReceived = %llu\n",
- (long long)q->num_received);
- seq_printf(seq, " NumOverflows = %llu\n",
- (long long)q->num_overflows);
- seq_printf(seq, " Head = %lu\n", (ulong)q->head);
- seq_printf(seq, " Tail = %lu\n", (ulong)q->tail);
+ return visorchannel_create_guts(physaddr, channel_bytes, gfp, guid,
+ false);
}
-void
-visorchannel_debug(struct visorchannel *channel, int num_queues,
- struct seq_file *seq, u32 off)
+struct visorchannel *
+visorchannel_create_with_lock(u64 physaddr, unsigned long channel_bytes,
+ gfp_t gfp, uuid_le guid)
{
- u64 addr = 0;
- ulong nbytes = 0, nbytes_region = 0;
- struct channel_header hdr;
- struct channel_header *phdr = &hdr;
- int i = 0;
- int errcode = 0;
+ return visorchannel_create_guts(physaddr, channel_bytes, gfp, guid,
+ true);
+}
- if (!channel)
- return;
+/**
+ * visorchannel_signalinsert() - inserts a message into the designated
+ * channel/queue
+ * @channel: the channel the message will be added to
+ * @queue: the queue the message will be added to
+ * @msg: the message to insert
+ *
+ * Return: integer error code indicating the status of the insertion
+ */
+int
+visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg)
+{
+ int rc;
+ unsigned long flags;
- addr = visorchannel_get_physaddr(channel);
- nbytes_region = visorchannel_get_nbytes(channel);
- errcode = visorchannel_read(channel, off,
- phdr, sizeof(struct channel_header));
- if (errcode < 0) {
- seq_printf(seq,
- "Read of channel header failed with errcode=%d)\n",
- errcode);
- if (off == 0) {
- phdr = &channel->chan_hdr;
- seq_puts(seq, "(following data may be stale)\n");
- } else {
- return;
- }
+ if (channel->needs_lock) {
+ spin_lock_irqsave(&channel->insert_lock, flags);
+ rc = signalinsert_inner(channel, queue, msg);
+ spin_unlock_irqrestore(&channel->insert_lock, flags);
+ } else {
+ rc = signalinsert_inner(channel, queue, msg);
}
- nbytes = (ulong)(phdr->size);
- seq_printf(seq, "--- Begin channel @0x%-16.16Lx for 0x%lx bytes (region=0x%lx bytes) ---\n",
- addr + off, nbytes, nbytes_region);
- seq_printf(seq, "Type = %pUL\n", &phdr->chtype);
- seq_printf(seq, "ZoneGuid = %pUL\n", &phdr->zone_uuid);
- seq_printf(seq, "Signature = 0x%-16.16Lx\n",
- (long long)phdr->signature);
- seq_printf(seq, "LegacyState = %lu\n", (ulong)phdr->legacy_state);
- seq_printf(seq, "SrvState = %lu\n", (ulong)phdr->srv_state);
- seq_printf(seq, "CliStateBoot = %lu\n", (ulong)phdr->cli_state_boot);
- seq_printf(seq, "CliStateOS = %lu\n", (ulong)phdr->cli_state_os);
- seq_printf(seq, "HeaderSize = %lu\n", (ulong)phdr->header_size);
- seq_printf(seq, "Size = %llu\n", (long long)phdr->size);
- seq_printf(seq, "Features = 0x%-16.16llx\n",
- (long long)phdr->features);
- seq_printf(seq, "PartitionHandle = 0x%-16.16llx\n",
- (long long)phdr->partition_handle);
- seq_printf(seq, "Handle = 0x%-16.16llx\n",
- (long long)phdr->handle);
- seq_printf(seq, "VersionId = %lu\n", (ulong)phdr->version_id);
- seq_printf(seq, "oChannelSpace = %llu\n",
- (long long)phdr->ch_space_offset);
- if ((phdr->ch_space_offset == 0) || (errcode < 0))
- ;
- else
- for (i = 0; i < num_queues; i++) {
- struct signal_queue_header q;
-
- errcode = visorchannel_read(channel,
- off +
- phdr->ch_space_offset +
- (i * sizeof(q)),
- &q, sizeof(q));
- if (errcode < 0) {
- seq_printf(seq,
- "failed to read signal queue #%d from channel @0x%-16.16Lx errcode=%d\n",
- i, addr, errcode);
- continue;
- }
- sigqueue_debug(&q, i, seq);
- }
- seq_printf(seq, "--- End channel @0x%-16.16Lx for 0x%lx bytes ---\n",
- addr + off, nbytes);
+
+ return rc;
}
-EXPORT_SYMBOL_GPL(visorchannel_debug);
+EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index d248c946a13b..59871495ea85 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -25,21 +25,12 @@
#include <linux/uuid.h>
#include <linux/crash_dump.h>
-#include "channel_guid.h"
-#include "controlvmchannel.h"
-#include "controlvmcompletionstatus.h"
-#include "guestlinuxdebug.h"
-#include "periodic_work.h"
-#include "version.h"
#include "visorbus.h"
#include "visorbus_private.h"
#include "vmcallinterface.h"
#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
-#define MAX_NAME_SIZE 128
-#define MAX_IP_SIZE 50
-#define MAXOUTSTANDINGCHANNELCOMMAND 256
#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
@@ -58,9 +49,6 @@
* Module parameters
*/
static int visorchipset_major;
-static int visorchipset_visorbusregwait = 1; /* default is on */
-static unsigned long controlvm_payload_bytes_buffered;
-static u32 dump_vhba_bus;
static int
visorchipset_open(struct inode *inode, struct file *file)
@@ -79,15 +67,15 @@ visorchipset_release(struct inode *inode, struct file *file)
return 0;
}
-/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
-* we switch to slow polling mode. As soon as we get a controlvm
-* message, we switch back to fast polling mode.
-*/
+/*
+ * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
+ * we switch to slow polling mode. As soon as we get a controlvm
+ * message, we switch back to fast polling mode.
+ */
#define MIN_IDLE_SECONDS 10
static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
/* when we got our last controlvm message */
static unsigned long most_recent_message_jiffies;
-static int visorbusregistered;
struct parser_context {
unsigned long allocbytes;
@@ -99,51 +87,36 @@ struct parser_context {
};
static struct delayed_work periodic_controlvm_work;
-static DEFINE_SEMAPHORE(notifier_lock);
static struct cdev file_cdev;
static struct visorchannel **file_controlvm_channel;
-static struct controlvm_message_packet g_devicechangestate_packet;
-
-static LIST_HEAD(bus_info_list);
-static LIST_HEAD(dev_info_list);
static struct visorchannel *controlvm_channel;
/* Manages the request payload in the controlvm channel */
struct visor_controlvm_payload_info {
u8 *ptr; /* pointer to base address of payload pool */
- u64 offset; /* offset from beginning of controlvm
+ u64 offset; /*
+ * offset from beginning of controlvm
* channel to beginning of payload * pool
*/
u32 bytes; /* number of bytes in payload pool */
};
static struct visor_controlvm_payload_info controlvm_payload_info;
+static unsigned long controlvm_payload_bytes_buffered;
-/* The following globals are used to handle the scenario where we are unable to
- * offload the payload from a controlvm message due to memory requirements. In
+/*
+ * The following globals are used to handle the scenario where we are unable to
+ * offload the payload from a controlvm message due to memory requirements. In
* this scenario, we simply stash the controlvm message, then attempt to
* process it again the next time controlvm_periodic_work() runs.
*/
static struct controlvm_message controlvm_pending_msg;
static bool controlvm_pending_msg_valid;
-/* This identifies a data buffer that has been received via a controlvm messages
- * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
- */
-struct putfile_buffer_entry {
- struct list_head next; /* putfile_buffer_entry list */
- struct parser_context *parser_ctx; /* points to input data buffer */
-};
-
-/* List of struct putfile_request *, via next_putfile_request member.
- * Each entry in this list identifies an outstanding TRANSMIT_FILE
- * conversation.
- */
-static LIST_HEAD(putfile_request_list);
-
-/* This describes a buffer and its current state of transfer (e.g., how many
+/*
+ * This describes a buffer and its current state of transfer (e.g., how many
* bytes have already been supplied as putfile data, and how many bytes are
* remaining) for a putfile_request.
*/
@@ -155,8 +128,9 @@ struct putfile_active_buffer {
};
#define PUTFILE_REQUEST_SIG 0x0906101302281211
-/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
- * conversation. Structs of this type are dynamically linked into
+/*
+ * This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
+ * conversation. Structs of this type are dynamically linked into
* <Putfile_request_list>.
*/
struct putfile_request {
@@ -168,7 +142,8 @@ struct putfile_request {
/* link to next struct putfile_request */
struct list_head next_putfile_request;
- /* head of putfile_buffer_entry list, which describes the data to be
+ /*
+ * head of putfile_buffer_entry list, which describes the data to be
* supplied as putfile data;
* - this list is added to when controlvm messages come in that supply
* file data
@@ -184,11 +159,13 @@ struct putfile_request {
/* data not yet read within current putfile_buffer_entry */
struct putfile_active_buffer active_buf;
- /* <0 = failed, 0 = in-progress, >0 = successful; */
- /* note that this must be set with req_list_lock, and if you set <0, */
- /* it is your responsibility to also free up all of the other objects */
- /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
- /* before releasing the lock */
+ /*
+ * <0 = failed, 0 = in-progress, >0 = successful;
+ * note that this must be set with req_list_lock, and if you set <0,
+ * it is your responsibility to also free up all of the other objects
+ * in this struct (like input_buffer_list, active_buf.parser_ctx)
+ * before releasing the lock
+ */
int completion_status;
};
@@ -199,289 +176,11 @@ struct parahotplug_request {
struct controlvm_message msg;
};
-static LIST_HEAD(parahotplug_request_list);
-static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
-static void parahotplug_process_list(void);
-
-/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
- * CONTROLVM_REPORTEVENT.
- */
-static struct visorchipset_busdev_notifiers busdev_notifiers;
-
-static void bus_create_response(struct visor_device *p, int response);
-static void bus_destroy_response(struct visor_device *p, int response);
-static void device_create_response(struct visor_device *p, int response);
-static void device_destroy_response(struct visor_device *p, int response);
-static void device_resume_response(struct visor_device *p, int response);
-
-static void visorchipset_device_pause_response(struct visor_device *p,
- int response);
-
-static struct visorchipset_busdev_responders busdev_responders = {
- .bus_create = bus_create_response,
- .bus_destroy = bus_destroy_response,
- .device_create = device_create_response,
- .device_destroy = device_destroy_response,
- .device_pause = visorchipset_device_pause_response,
- .device_resume = device_resume_response,
-};
-
/* info for /dev/visorchipset */
-static dev_t major_dev = -1; /**< indicates major num for device */
+static dev_t major_dev = -1; /*< indicates major num for device */
/* prototypes for attributes */
static ssize_t toolaction_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t toolaction_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-static DEVICE_ATTR_RW(toolaction);
-
-static ssize_t boottotool_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t boottotool_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count);
-static DEVICE_ATTR_RW(boottotool);
-
-static ssize_t error_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-static ssize_t error_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count);
-static DEVICE_ATTR_RW(error);
-
-static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count);
-static DEVICE_ATTR_RW(textid);
-
-static ssize_t remaining_steps_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t remaining_steps_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-static DEVICE_ATTR_RW(remaining_steps);
-
-static ssize_t devicedisabled_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-static DEVICE_ATTR_WO(devicedisabled);
-
-static ssize_t deviceenabled_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-static DEVICE_ATTR_WO(deviceenabled);
-
-static struct attribute *visorchipset_install_attrs[] = {
- &dev_attr_toolaction.attr,
- &dev_attr_boottotool.attr,
- &dev_attr_error.attr,
- &dev_attr_textid.attr,
- &dev_attr_remaining_steps.attr,
- NULL
-};
-
-static struct attribute_group visorchipset_install_group = {
- .name = "install",
- .attrs = visorchipset_install_attrs
-};
-
-static struct attribute *visorchipset_parahotplug_attrs[] = {
- &dev_attr_devicedisabled.attr,
- &dev_attr_deviceenabled.attr,
- NULL
-};
-
-static struct attribute_group visorchipset_parahotplug_group = {
- .name = "parahotplug",
- .attrs = visorchipset_parahotplug_attrs
-};
-
-static const struct attribute_group *visorchipset_dev_groups[] = {
- &visorchipset_install_group,
- &visorchipset_parahotplug_group,
- NULL
-};
-
-static void visorchipset_dev_release(struct device *dev)
-{
-}
-
-/* /sys/devices/platform/visorchipset */
-static struct platform_device visorchipset_platform_device = {
- .name = "visorchipset",
- .id = -1,
- .dev.groups = visorchipset_dev_groups,
- .dev.release = visorchipset_dev_release,
-};
-
-/* Function prototypes */
-static void controlvm_respond(struct controlvm_message_header *msg_hdr,
- int response);
-static void controlvm_respond_chipset_init(
- struct controlvm_message_header *msg_hdr, int response,
- enum ultra_chipset_feature features);
-static void controlvm_respond_physdev_changestate(
- struct controlvm_message_header *msg_hdr, int response,
- struct spar_segment_state state);
-
-static void parser_done(struct parser_context *ctx);
-
-static struct parser_context *
-parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
-{
- int allocbytes = sizeof(struct parser_context) + bytes;
- struct parser_context *ctx;
-
- if (retry)
- *retry = false;
-
- /*
- * alloc an 0 extra byte to ensure payload is
- * '\0'-terminated
- */
- allocbytes++;
- if ((controlvm_payload_bytes_buffered + bytes)
- > MAX_CONTROLVM_PAYLOAD_BYTES) {
- if (retry)
- *retry = true;
- return NULL;
- }
- ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
- if (!ctx) {
- if (retry)
- *retry = true;
- return NULL;
- }
-
- ctx->allocbytes = allocbytes;
- ctx->param_bytes = bytes;
- ctx->curr = NULL;
- ctx->bytes_remaining = 0;
- ctx->byte_stream = false;
- if (local) {
- void *p;
-
- if (addr > virt_to_phys(high_memory - 1))
- goto err_finish_ctx;
- p = __va((unsigned long)(addr));
- memcpy(ctx->data, p, bytes);
- } else {
- void *mapping = memremap(addr, bytes, MEMREMAP_WB);
-
- if (!mapping)
- goto err_finish_ctx;
- memcpy(ctx->data, mapping, bytes);
- memunmap(mapping);
- }
-
- ctx->byte_stream = true;
- controlvm_payload_bytes_buffered += ctx->param_bytes;
-
- return ctx;
-
-err_finish_ctx:
- parser_done(ctx);
- return NULL;
-}
-
-static uuid_le
-parser_id_get(struct parser_context *ctx)
-{
- struct spar_controlvm_parameters_header *phdr = NULL;
-
- if (!ctx)
- return NULL_UUID_LE;
- phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
- return phdr->id;
-}
-
-/** Describes the state from the perspective of which controlvm messages have
- * been received for a bus or device.
- */
-
-enum PARSER_WHICH_STRING {
- PARSERSTRING_INITIATOR,
- PARSERSTRING_TARGET,
- PARSERSTRING_CONNECTION,
- PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
-};
-
-static void
-parser_param_start(struct parser_context *ctx,
- enum PARSER_WHICH_STRING which_string)
-{
- struct spar_controlvm_parameters_header *phdr = NULL;
-
- if (!ctx)
- return;
-
- phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
- switch (which_string) {
- case PARSERSTRING_INITIATOR:
- ctx->curr = ctx->data + phdr->initiator_offset;
- ctx->bytes_remaining = phdr->initiator_length;
- break;
- case PARSERSTRING_TARGET:
- ctx->curr = ctx->data + phdr->target_offset;
- ctx->bytes_remaining = phdr->target_length;
- break;
- case PARSERSTRING_CONNECTION:
- ctx->curr = ctx->data + phdr->connection_offset;
- ctx->bytes_remaining = phdr->connection_length;
- break;
- case PARSERSTRING_NAME:
- ctx->curr = ctx->data + phdr->name_offset;
- ctx->bytes_remaining = phdr->name_length;
- break;
- default:
- break;
- }
-}
-
-static void parser_done(struct parser_context *ctx)
-{
- if (!ctx)
- return;
- controlvm_payload_bytes_buffered -= ctx->param_bytes;
- kfree(ctx);
-}
-
-static void *
-parser_string_get(struct parser_context *ctx)
-{
- u8 *pscan;
- unsigned long nscan;
- int value_length = -1;
- void *value = NULL;
- int i;
-
- if (!ctx)
- return NULL;
- pscan = ctx->curr;
- nscan = ctx->bytes_remaining;
- if (nscan == 0)
- return NULL;
- if (!pscan)
- return NULL;
- for (i = 0, value_length = -1; i < nscan; i++)
- if (pscan[i] == '\0') {
- value_length = i;
- break;
- }
- if (value_length < 0) /* '\0' was not included in the length */
- value_length = nscan;
- value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
- if (!value)
- return NULL;
- if (value_length > 0)
- memcpy(value, pscan, value_length);
- ((u8 *)(value))[value_length] = '\0';
- return value;
-}
-
-static ssize_t toolaction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -513,6 +212,7 @@ static ssize_t toolaction_store(struct device *dev,
return ret;
return count;
}
+static DEVICE_ATTR_RW(toolaction);
static ssize_t boottotool_show(struct device *dev,
struct device_attribute *attr,
@@ -549,6 +249,7 @@ static ssize_t boottotool_store(struct device *dev,
return ret;
return count;
}
+static DEVICE_ATTR_RW(boottotool);
static ssize_t error_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -580,6 +281,7 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
return ret;
return count;
}
+static DEVICE_ATTR_RW(error);
static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -612,6 +314,7 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
return ret;
return count;
}
+static DEVICE_ATTR_RW(textid);
static ssize_t remaining_steps_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -644,6 +347,103 @@ static ssize_t remaining_steps_store(struct device *dev,
return ret;
return count;
}
+static DEVICE_ATTR_RW(remaining_steps);
+
+static uuid_le
+parser_id_get(struct parser_context *ctx)
+{
+ struct spar_controlvm_parameters_header *phdr = NULL;
+
+ if (!ctx)
+ return NULL_UUID_LE;
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+ return phdr->id;
+}
+
+/*
+ * Describes the state from the perspective of which controlvm messages have
+ * been received for a bus or device.
+ */
+
+enum PARSER_WHICH_STRING {
+ PARSERSTRING_INITIATOR,
+ PARSERSTRING_TARGET,
+ PARSERSTRING_CONNECTION,
+ PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
+};
+
+static void
+parser_param_start(struct parser_context *ctx,
+ enum PARSER_WHICH_STRING which_string)
+{
+ struct spar_controlvm_parameters_header *phdr = NULL;
+
+ if (!ctx)
+ return;
+
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+ switch (which_string) {
+ case PARSERSTRING_INITIATOR:
+ ctx->curr = ctx->data + phdr->initiator_offset;
+ ctx->bytes_remaining = phdr->initiator_length;
+ break;
+ case PARSERSTRING_TARGET:
+ ctx->curr = ctx->data + phdr->target_offset;
+ ctx->bytes_remaining = phdr->target_length;
+ break;
+ case PARSERSTRING_CONNECTION:
+ ctx->curr = ctx->data + phdr->connection_offset;
+ ctx->bytes_remaining = phdr->connection_length;
+ break;
+ case PARSERSTRING_NAME:
+ ctx->curr = ctx->data + phdr->name_offset;
+ ctx->bytes_remaining = phdr->name_length;
+ break;
+ default:
+ break;
+ }
+}
+
+static void parser_done(struct parser_context *ctx)
+{
+ if (!ctx)
+ return;
+ controlvm_payload_bytes_buffered -= ctx->param_bytes;
+ kfree(ctx);
+}
+
+static void *
+parser_string_get(struct parser_context *ctx)
+{
+ u8 *pscan;
+ unsigned long nscan;
+ int value_length = -1;
+ void *value = NULL;
+ int i;
+
+ if (!ctx)
+ return NULL;
+ pscan = ctx->curr;
+ nscan = ctx->bytes_remaining;
+ if (nscan == 0)
+ return NULL;
+ if (!pscan)
+ return NULL;
+ for (i = 0, value_length = -1; i < nscan; i++)
+ if (pscan[i] == '\0') {
+ value_length = i;
+ break;
+ }
+ if (value_length < 0) /* '\0' was not included in the length */
+ value_length = nscan;
+ value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
+ if (!value)
+ return NULL;
+ if (value_length > 0)
+ memcpy(value, pscan, value_length);
+ ((u8 *)(value))[value_length] = '\0';
+ return value;
+}
struct visor_busdev {
u32 bus_no;
@@ -683,32 +483,36 @@ struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
vdev = to_visor_device(dev);
return vdev;
}
-EXPORT_SYMBOL(visorbus_get_device_by_id);
-void
-visorchipset_register_busdev(
- struct visorchipset_busdev_notifiers *notifiers,
- struct visorchipset_busdev_responders *responders,
- struct ultra_vbus_deviceinfo *driver_info)
-{
- down(&notifier_lock);
- if (!notifiers) {
- memset(&busdev_notifiers, 0,
- sizeof(busdev_notifiers));
- visorbusregistered = 0; /* clear flag */
- } else {
- busdev_notifiers = *notifiers;
- visorbusregistered = 1; /* set flag */
+static void
+controlvm_init_response(struct controlvm_message *msg,
+ struct controlvm_message_header *msg_hdr, int response)
+{
+ memset(msg, 0, sizeof(struct controlvm_message));
+ memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
+ msg->hdr.payload_bytes = 0;
+ msg->hdr.payload_vm_offset = 0;
+ msg->hdr.payload_max_bytes = 0;
+ if (response < 0) {
+ msg->hdr.flags.failed = 1;
+ msg->hdr.completion_status = (u32)(-response);
}
- if (responders)
- *responders = busdev_responders;
- if (driver_info)
- bus_device_info_init(driver_info, "chipset", "visorchipset",
- VERSION, NULL);
+}
+
+static void
+controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
+ int response,
+ enum ultra_chipset_feature features)
+{
+ struct controlvm_message outmsg;
- up(&notifier_lock);
+ controlvm_init_response(&outmsg, msg_hdr, response);
+ outmsg.cmd.init_chipset.features = features;
+ if (visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ return;
+ }
}
-EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
static void
chipset_init(struct controlvm_message *inmsg)
@@ -725,14 +529,16 @@ chipset_init(struct controlvm_message *inmsg)
chipset_inited = 1;
POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
- /* Set features to indicate we support parahotplug (if Command
+ /*
+ * Set features to indicate we support parahotplug (if Command
* also supports it).
*/
features =
inmsg->cmd.init_chipset.
features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
- /* Set the "reply" bit so Command knows this is a
+ /*
+ * Set the "reply" bit so Command knows this is a
* features-aware driver.
*/
features |= ULTRA_CHIPSET_FEATURE_REPLY;
@@ -743,21 +549,6 @@ out_respond:
}
static void
-controlvm_init_response(struct controlvm_message *msg,
- struct controlvm_message_header *msg_hdr, int response)
-{
- memset(msg, 0, sizeof(struct controlvm_message));
- memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
- msg->hdr.payload_bytes = 0;
- msg->hdr.payload_vm_offset = 0;
- msg->hdr.payload_max_bytes = 0;
- if (response < 0) {
- msg->hdr.flags.failed = 1;
- msg->hdr.completion_status = (u32)(-response);
- }
-}
-
-static void
controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
{
struct controlvm_message outmsg;
@@ -766,23 +557,8 @@ controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
if (outmsg.hdr.flags.test_message == 1)
return;
- if (!visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg)) {
- return;
- }
-}
-
-static void
-controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
- int response,
- enum ultra_chipset_feature features)
-{
- struct controlvm_message outmsg;
-
- controlvm_init_response(&outmsg, msg_hdr, response);
- outmsg.cmd.init_chipset.features = features;
- if (!visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ if (visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
return;
}
}
@@ -796,8 +572,8 @@ static void controlvm_respond_physdev_changestate(
controlvm_init_response(&outmsg, msg_hdr, response);
outmsg.cmd.device_change_state.state = state;
outmsg.cmd.device_change_state.flags.phys_device = 1;
- if (!visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ if (visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
return;
}
}
@@ -894,8 +670,8 @@ device_changestate_responder(enum controlvm_id cmd_id,
outmsg.cmd.device_change_state.dev_no = dev_no;
outmsg.cmd.device_change_state.state = response_state;
- if (!visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg))
+ if (visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg))
return;
}
@@ -920,20 +696,20 @@ bus_epilog(struct visor_device *bus_info,
{
struct controlvm_message_header *pmsg_hdr = NULL;
- down(&notifier_lock);
-
if (!bus_info) {
- /* relying on a valid passed in response code */
- /* be lazy and re-use msg_hdr for this failure, is this ok?? */
+ /*
+ * relying on a valid passed in response code
+ * be lazy and re-use msg_hdr for this failure, is this ok??
+ */
pmsg_hdr = msg_hdr;
- goto out_respond_and_unlock;
+ goto out_respond;
}
if (bus_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
pmsg_hdr = bus_info->pending_msg_hdr;
- goto out_respond_and_unlock;
+ goto out_respond;
}
if (need_response) {
@@ -942,7 +718,7 @@ bus_epilog(struct visor_device *bus_info,
POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
bus_info->chipset_bus_no,
POSTCODE_SEVERITY_ERR);
- goto out_unlock;
+ return;
}
memcpy(pmsg_hdr, msg_hdr,
@@ -953,25 +729,16 @@ bus_epilog(struct visor_device *bus_info,
if (response == CONTROLVM_RESP_SUCCESS) {
switch (cmd) {
case CONTROLVM_BUS_CREATE:
- if (busdev_notifiers.bus_create) {
- (*busdev_notifiers.bus_create) (bus_info);
- goto out_unlock;
- }
+ chipset_bus_create(bus_info);
break;
case CONTROLVM_BUS_DESTROY:
- if (busdev_notifiers.bus_destroy) {
- (*busdev_notifiers.bus_destroy) (bus_info);
- goto out_unlock;
- }
+ chipset_bus_destroy(bus_info);
break;
}
}
-out_respond_and_unlock:
+out_respond:
bus_responder(cmd, pmsg_hdr, response);
-
-out_unlock:
- up(&notifier_lock);
}
static void
@@ -980,31 +747,29 @@ device_epilog(struct visor_device *dev_info,
struct controlvm_message_header *msg_hdr, int response,
bool need_response, bool for_visorbus)
{
- struct visorchipset_busdev_notifiers *notifiers;
struct controlvm_message_header *pmsg_hdr = NULL;
- notifiers = &busdev_notifiers;
-
- down(&notifier_lock);
if (!dev_info) {
- /* relying on a valid passed in response code */
- /* be lazy and re-use msg_hdr for this failure, is this ok?? */
+ /*
+ * relying on a valid passed in response code
+ * be lazy and re-use msg_hdr for this failure, is this ok??
+ */
pmsg_hdr = msg_hdr;
- goto out_respond_and_unlock;
+ goto out_respond;
}
if (dev_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
pmsg_hdr = dev_info->pending_msg_hdr;
- goto out_respond_and_unlock;
+ goto out_respond;
}
if (need_response) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto out_respond_and_unlock;
+ goto out_respond;
}
memcpy(pmsg_hdr, msg_hdr,
@@ -1015,48 +780,34 @@ device_epilog(struct visor_device *dev_info,
if (response >= 0) {
switch (cmd) {
case CONTROLVM_DEVICE_CREATE:
- if (notifiers->device_create) {
- (*notifiers->device_create) (dev_info);
- goto out_unlock;
- }
+ chipset_device_create(dev_info);
break;
case CONTROLVM_DEVICE_CHANGESTATE:
/* ServerReady / ServerRunning / SegmentStateRunning */
if (state.alive == segment_state_running.alive &&
state.operating ==
segment_state_running.operating) {
- if (notifiers->device_resume) {
- (*notifiers->device_resume) (dev_info);
- goto out_unlock;
- }
+ chipset_device_resume(dev_info);
}
/* ServerNotReady / ServerLost / SegmentStateStandby */
else if (state.alive == segment_state_standby.alive &&
state.operating ==
segment_state_standby.operating) {
- /* technically this is standby case
+ /*
+ * technically this is standby case
* where server is lost
*/
- if (notifiers->device_pause) {
- (*notifiers->device_pause) (dev_info);
- goto out_unlock;
- }
+ chipset_device_pause(dev_info);
}
break;
case CONTROLVM_DEVICE_DESTROY:
- if (notifiers->device_destroy) {
- (*notifiers->device_destroy) (dev_info);
- goto out_unlock;
- }
+ chipset_device_destroy(dev_info);
break;
}
}
-out_respond_and_unlock:
+out_respond:
device_responder(cmd, pmsg_hdr, response);
-
-out_unlock:
- up(&notifier_lock);
}
static void
@@ -1103,10 +854,8 @@ bus_create(struct controlvm_message *inmsg)
goto out_bus_epilog;
}
bus_info->visorchannel = visorchannel;
- if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
- dump_vhba_bus = bus_no;
+ if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
save_crash_message(inmsg, CRASH_BUS);
- }
POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
@@ -1303,11 +1052,19 @@ my_device_destroy(struct controlvm_message *inmsg)
inmsg->hdr.flags.response_expected == 1, 1);
}
-/* When provided with the physical address of the controlvm channel
+/**
+ * initialize_controlvm_payload_info() - init controlvm_payload_info struct
+ * @phys_addr: the physical address of controlvm channel
+ * @offset: the offset to payload
+ * @bytes: the size of the payload in bytes
+ * @info: the returning valid struct
+ *
+ * When provided with the physical address of the controlvm channel
* (phys_addr), the offset to the payload area we need to manage
* (offset), and the size of this payload area (bytes), fills in the
- * controlvm_payload_info struct. Returns true for success or false
- * for failure.
+ * controlvm_payload_info struct.
+ *
+ * Return: CONTROLVM_RESP_SUCCESS for success or a negative for failure
*/
static int
initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
@@ -1371,95 +1128,14 @@ initialize_controlvm_payload(void)
&controlvm_payload_info);
}
-/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
- * Returns CONTROLVM_RESP_xxx code.
- */
-static int
-visorchipset_chipset_ready(void)
-{
- kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
- return CONTROLVM_RESP_SUCCESS;
-}
-
-static int
-visorchipset_chipset_selftest(void)
-{
- char env_selftest[20];
- char *envp[] = { env_selftest, NULL };
-
- sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
- kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
- envp);
- return CONTROLVM_RESP_SUCCESS;
-}
-
-/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
- * Returns CONTROLVM_RESP_xxx code.
- */
-static int
-visorchipset_chipset_notready(void)
-{
- kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
- return CONTROLVM_RESP_SUCCESS;
-}
-
-static void
-chipset_ready(struct controlvm_message_header *msg_hdr)
-{
- int rc = visorchipset_chipset_ready();
-
- if (rc != CONTROLVM_RESP_SUCCESS)
- rc = -rc;
- if (msg_hdr->flags.response_expected)
- controlvm_respond(msg_hdr, rc);
-}
-
-static void
-chipset_selftest(struct controlvm_message_header *msg_hdr)
-{
- int rc = visorchipset_chipset_selftest();
-
- if (rc != CONTROLVM_RESP_SUCCESS)
- rc = -rc;
- if (msg_hdr->flags.response_expected)
- controlvm_respond(msg_hdr, rc);
-}
-
-static void
-chipset_notready(struct controlvm_message_header *msg_hdr)
-{
- int rc = visorchipset_chipset_notready();
-
- if (rc != CONTROLVM_RESP_SUCCESS)
- rc = -rc;
- if (msg_hdr->flags.response_expected)
- controlvm_respond(msg_hdr, rc);
-}
-
-/* This is your "one-stop" shop for grabbing the next message from the
- * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
- */
-static bool
-read_controlvm_event(struct controlvm_message *msg)
-{
- if (visorchannel_signalremove(controlvm_channel,
- CONTROLVM_QUEUE_EVENT, msg)) {
- /* got a message */
- if (msg->hdr.flags.test_message == 1)
- return false;
- return true;
- }
- return false;
-}
-
/*
- * The general parahotplug flow works as follows. The visorchipset
+ * The general parahotplug flow works as follows. The visorchipset
* driver receives a DEVICE_CHANGESTATE message from Command
- * specifying a physical device to enable or disable. The CONTROLVM
+ * specifying a physical device to enable or disable. The CONTROLVM
* message handler calls parahotplug_process_message, which then adds
* the message to a global list and kicks off a udev event which
* causes a user level script to enable or disable the specified
- * device. The udev script then writes to
+ * device. The udev script then writes to
* /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
* to get called, at which point the appropriate CONTROLVM message is
* retrieved from the list and responded to.
@@ -1467,9 +1143,11 @@ read_controlvm_event(struct controlvm_message *msg)
#define PARAHOTPLUG_TIMEOUT_MS 2000
-/*
- * Generate unique int to match an outstanding CONTROLVM message with a
- * udev script /proc response
+/**
+ * parahotplug_next_id() - generate unique int to match an outstanding CONTROLVM
+ * message with a udev script /proc response
+ *
+ * Return: a unique integer value
*/
static int
parahotplug_next_id(void)
@@ -1479,9 +1157,12 @@ parahotplug_next_id(void)
return atomic_inc_return(&id);
}
-/*
- * Returns the time (in jiffies) when a CONTROLVM message on the list
- * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
+/**
+ * parahotplug_next_expiration() - returns the time (in jiffies) when a
+ * CONTROLVM message on the list should expire
+ * -- PARAHOTPLUG_TIMEOUT_MS in the future
+ *
+ * Return: expected expiration time (in jiffies)
*/
static unsigned long
parahotplug_next_expiration(void)
@@ -1489,9 +1170,13 @@ parahotplug_next_expiration(void)
return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
}
-/*
- * Create a parahotplug_request, which is basically a wrapper for a
- * CONTROLVM_MESSAGE that we can stick on a list
+/**
+ * parahotplug_request_create() - create a parahotplug_request, which is
+ * basically a wrapper for a CONTROLVM_MESSAGE
+ * that we can stick on a list
+ * @msg: the message to insert in the request
+ *
+ * Return: the request containing the provided message
*/
static struct parahotplug_request *
parahotplug_request_create(struct controlvm_message *msg)
@@ -1509,8 +1194,9 @@ parahotplug_request_create(struct controlvm_message *msg)
return req;
}
-/*
- * Free a parahotplug_request.
+/**
+ * parahotplug_request_destroy() - free a parahotplug_request
+ * @req: the request to deallocate
*/
static void
parahotplug_request_destroy(struct parahotplug_request *req)
@@ -1518,71 +1204,19 @@ parahotplug_request_destroy(struct parahotplug_request *req)
kfree(req);
}
-/*
- * Cause uevent to run the user level script to do the disable/enable
- * specified in (the CONTROLVM message in) the specified
- * parahotplug_request
- */
-static void
-parahotplug_request_kickoff(struct parahotplug_request *req)
-{
- struct controlvm_message_packet *cmd = &req->msg.cmd;
- char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
- env_func[40];
- char *envp[] = {
- env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
- };
-
- sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
- sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
- sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
- cmd->device_change_state.state.active);
- sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
- cmd->device_change_state.bus_no);
- sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
- cmd->device_change_state.dev_no >> 3);
- sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
- cmd->device_change_state.dev_no & 0x7);
-
- kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
- envp);
-}
-
-/*
- * Remove any request from the list that's been on there too long and
- * respond with an error.
- */
-static void
-parahotplug_process_list(void)
-{
- struct list_head *pos;
- struct list_head *tmp;
-
- spin_lock(&parahotplug_request_list_lock);
-
- list_for_each_safe(pos, tmp, &parahotplug_request_list) {
- struct parahotplug_request *req =
- list_entry(pos, struct parahotplug_request, list);
-
- if (!time_after_eq(jiffies, req->expiration))
- continue;
-
- list_del(pos);
- if (req->msg.hdr.flags.response_expected)
- controlvm_respond_physdev_changestate(
- &req->msg.hdr,
- CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
- req->msg.cmd.device_change_state.state);
- parahotplug_request_destroy(req);
- }
-
- spin_unlock(&parahotplug_request_list_lock);
-}
+static LIST_HEAD(parahotplug_request_list);
+static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
-/*
+/**
+ * parahotplug_request_complete() - mark request as complete
+ * @id: the id of the request
+ * @active: indicates whether the request is assigned to active partition
+ *
* Called from the /proc handler, which means the user script has
- * finished the enable/disable. Find the matching identifier, and
+ * finished the enable/disable. Find the matching identifier, and
* respond to the CONTROLVM message with success.
+ *
+ * Return: 0 on success or -EINVAL on failure
*/
static int
parahotplug_request_complete(int id, u16 active)
@@ -1597,7 +1231,8 @@ parahotplug_request_complete(int id, u16 active)
struct parahotplug_request *req =
list_entry(pos, struct parahotplug_request, list);
if (req->id == id) {
- /* Found a match. Remove it from the list and
+ /*
+ * Found a match. Remove it from the list and
* respond.
*/
list_del(pos);
@@ -1616,8 +1251,142 @@ parahotplug_request_complete(int id, u16 active)
return -EINVAL;
}
-/*
- * Enables or disables a PCI device by kicking off a udev script
+/**
+ * devicedisabled_store() - disables the hotplug device
+ * @dev: sysfs interface variable not utilized in this function
+ * @attr: sysfs interface variable not utilized in this function
+ * @buf: buffer containing the device id
+ * @count: the size of the buffer
+ *
+ * The parahotplug/devicedisabled interface gets called by our support script
+ * when an SR-IOV device has been shut down. The ID is passed to the script
+ * and then passed back when the device has been removed.
+ *
+ * Return: the size of the buffer for success or negative for error
+ */
+static ssize_t devicedisabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int id;
+ int err;
+
+ if (kstrtouint(buf, 10, &id))
+ return -EINVAL;
+
+ err = parahotplug_request_complete(id, 0);
+ if (err < 0)
+ return err;
+ return count;
+}
+static DEVICE_ATTR_WO(devicedisabled);
+
+/**
+ * deviceenabled_store() - enables the hotplug device
+ * @dev: sysfs interface variable not utilized in this function
+ * @attr: sysfs interface variable not utilized in this function
+ * @buf: buffer containing the device id
+ * @count: the size of the buffer
+ *
+ * The parahotplug/deviceenabled interface gets called by our support script
+ * when an SR-IOV device has been recovered. The ID is passed to the script
+ * and then passed back when the device has been brought back up.
+ *
+ * Return: the size of the buffer for success or negative for error
+ */
+static ssize_t deviceenabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int id;
+
+ if (kstrtouint(buf, 10, &id))
+ return -EINVAL;
+
+ parahotplug_request_complete(id, 1);
+ return count;
+}
+static DEVICE_ATTR_WO(deviceenabled);
+
+static struct attribute *visorchipset_install_attrs[] = {
+ &dev_attr_toolaction.attr,
+ &dev_attr_boottotool.attr,
+ &dev_attr_error.attr,
+ &dev_attr_textid.attr,
+ &dev_attr_remaining_steps.attr,
+ NULL
+};
+
+static struct attribute_group visorchipset_install_group = {
+ .name = "install",
+ .attrs = visorchipset_install_attrs
+};
+
+static struct attribute *visorchipset_parahotplug_attrs[] = {
+ &dev_attr_devicedisabled.attr,
+ &dev_attr_deviceenabled.attr,
+ NULL
+};
+
+static struct attribute_group visorchipset_parahotplug_group = {
+ .name = "parahotplug",
+ .attrs = visorchipset_parahotplug_attrs
+};
+
+static const struct attribute_group *visorchipset_dev_groups[] = {
+ &visorchipset_install_group,
+ &visorchipset_parahotplug_group,
+ NULL
+};
+
+static void visorchipset_dev_release(struct device *dev)
+{
+}
+
+/* /sys/devices/platform/visorchipset */
+static struct platform_device visorchipset_platform_device = {
+ .name = "visorchipset",
+ .id = -1,
+ .dev.groups = visorchipset_dev_groups,
+ .dev.release = visorchipset_dev_release,
+};
+
+/**
+ * parahotplug_request_kickoff() - initiate parahotplug request
+ * @req: the request to initiate
+ *
+ * Cause uevent to run the user level script to do the disable/enable specified
+ * in the parahotplug_request.
+ */
+static void
+parahotplug_request_kickoff(struct parahotplug_request *req)
+{
+ struct controlvm_message_packet *cmd = &req->msg.cmd;
+ char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
+ env_func[40];
+ char *envp[] = {
+ env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
+ };
+
+ sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
+ sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
+ sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
+ cmd->device_change_state.state.active);
+ sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
+ cmd->device_change_state.bus_no);
+ sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
+ cmd->device_change_state.dev_no >> 3);
+ sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
+ cmd->device_change_state.dev_no & 0x7);
+
+ kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
+ envp);
+}
+
+/**
+ * parahotplug_process_message() - enables or disables a PCI device by kicking
+ * off a udev script
+ * @inmsg: the message indicating whether to enable or disable
*/
static void
parahotplug_process_message(struct controlvm_message *inmsg)
@@ -1630,14 +1399,16 @@ parahotplug_process_message(struct controlvm_message *inmsg)
return;
if (inmsg->cmd.device_change_state.state.active) {
- /* For enable messages, just respond with success
- * right away. This is a bit of a hack, but there are
- * issues with the early enable messages we get (with
- * either the udev script not detecting that the device
- * is up, or not getting called at all). Fortunately
- * the messages that get lost don't matter anyway, as
- * devices are automatically enabled at
- * initialization.
+ /*
+ * For enable messages, just respond with success
+ * right away. This is a bit of a hack, but there are
+ * issues with the early enable messages we get (with
+ * either the udev script not detecting that the device
+ * is up, or not getting called at all). Fortunately
+ * the messages that get lost don't matter anyway, as
+ *
+ * devices are automatically enabled at
+ * initialization.
*/
parahotplug_request_kickoff(req);
controlvm_respond_physdev_changestate
@@ -1646,11 +1417,12 @@ parahotplug_process_message(struct controlvm_message *inmsg)
inmsg->cmd.device_change_state.state);
parahotplug_request_destroy(req);
} else {
- /* For disable messages, add the request to the
- * request list before kicking off the udev script. It
- * won't get responded to until the script has
- * indicated it's done.
- */
+ /*
+ * For disable messages, add the request to the
+ * request list before kicking off the udev script. It
+ * won't get responded to until the script has
+ * indicated it's done.
+ */
spin_lock(&parahotplug_request_list_lock);
list_add_tail(&req->list, &parahotplug_request_list);
spin_unlock(&parahotplug_request_list_lock);
@@ -1659,113 +1431,77 @@ parahotplug_process_message(struct controlvm_message *inmsg)
}
}
-/* Process a controlvm message.
- * Return result:
- * false - this function will return false only in the case where the
- * controlvm message was NOT processed, but processing must be
- * retried before reading the next controlvm message; a
- * scenario where this can occur is when we need to throttle
- * the allocation of memory in which to copy out controlvm
- * payload data
- * true - processing of the controlvm message completed,
- * either successfully or with an error.
+/**
+ * visorchipset_chipset_ready() - sends chipset_ready action
+ *
+ * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
+ *
+ * Return: CONTROLVM_RESP_SUCCESS
*/
-static bool
-handle_command(struct controlvm_message inmsg, u64 channel_addr)
+static int
+visorchipset_chipset_ready(void)
{
- struct controlvm_message_packet *cmd = &inmsg.cmd;
- u64 parm_addr;
- u32 parm_bytes;
- struct parser_context *parser_ctx = NULL;
- bool local_addr;
- struct controlvm_message ackmsg;
+ kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
+ return CONTROLVM_RESP_SUCCESS;
+}
- /* create parsing context if necessary */
- local_addr = (inmsg.hdr.flags.test_message == 1);
- if (channel_addr == 0)
- return true;
- parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
- parm_bytes = inmsg.hdr.payload_bytes;
+static int
+visorchipset_chipset_selftest(void)
+{
+ char env_selftest[20];
+ char *envp[] = { env_selftest, NULL };
- /* Parameter and channel addresses within test messages actually lie
- * within our OS-controlled memory. We need to know that, because it
- * makes a difference in how we compute the virtual address.
- */
- if (parm_addr && parm_bytes) {
- bool retry = false;
+ sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
+ kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
+ envp);
+ return CONTROLVM_RESP_SUCCESS;
+}
- parser_ctx =
- parser_init_byte_stream(parm_addr, parm_bytes,
- local_addr, &retry);
- if (!parser_ctx && retry)
- return false;
- }
+/**
+ * visorchipset_chipset_notready() - sends chipset_notready action
+ *
+ * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
+ *
+ * Return: CONTROLVM_RESP_SUCCESS
+ */
+static int
+visorchipset_chipset_notready(void)
+{
+ kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
+ return CONTROLVM_RESP_SUCCESS;
+}
- if (!local_addr) {
- controlvm_init_response(&ackmsg, &inmsg.hdr,
- CONTROLVM_RESP_SUCCESS);
- if (controlvm_channel)
- visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_ACK,
- &ackmsg);
- }
- switch (inmsg.hdr.id) {
- case CONTROLVM_CHIPSET_INIT:
- chipset_init(&inmsg);
- break;
- case CONTROLVM_BUS_CREATE:
- bus_create(&inmsg);
- break;
- case CONTROLVM_BUS_DESTROY:
- bus_destroy(&inmsg);
- break;
- case CONTROLVM_BUS_CONFIGURE:
- bus_configure(&inmsg, parser_ctx);
- break;
- case CONTROLVM_DEVICE_CREATE:
- my_device_create(&inmsg);
- break;
- case CONTROLVM_DEVICE_CHANGESTATE:
- if (cmd->device_change_state.flags.phys_device) {
- parahotplug_process_message(&inmsg);
- } else {
- /* save the hdr and cmd structures for later use */
- /* when sending back the response to Command */
- my_device_changestate(&inmsg);
- g_devicechangestate_packet = inmsg.cmd;
- break;
- }
- break;
- case CONTROLVM_DEVICE_DESTROY:
- my_device_destroy(&inmsg);
- break;
- case CONTROLVM_DEVICE_CONFIGURE:
- /* no op for now, just send a respond that we passed */
- if (inmsg.hdr.flags.response_expected)
- controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
- break;
- case CONTROLVM_CHIPSET_READY:
- chipset_ready(&inmsg.hdr);
- break;
- case CONTROLVM_CHIPSET_SELFTEST:
- chipset_selftest(&inmsg.hdr);
- break;
- case CONTROLVM_CHIPSET_STOP:
- chipset_notready(&inmsg.hdr);
- break;
- default:
- if (inmsg.hdr.flags.response_expected)
- controlvm_respond
- (&inmsg.hdr,
- -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
- break;
- }
+static void
+chipset_ready(struct controlvm_message_header *msg_hdr)
+{
+ int rc = visorchipset_chipset_ready();
- if (parser_ctx) {
- parser_done(parser_ctx);
- parser_ctx = NULL;
- }
- return true;
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msg_hdr->flags.response_expected)
+ controlvm_respond(msg_hdr, rc);
+}
+
+static void
+chipset_selftest(struct controlvm_message_header *msg_hdr)
+{
+ int rc = visorchipset_chipset_selftest();
+
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msg_hdr->flags.response_expected)
+ controlvm_respond(msg_hdr, rc);
+}
+
+static void
+chipset_notready(struct controlvm_message_header *msg_hdr)
+{
+ int rc = visorchipset_chipset_notready();
+
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msg_hdr->flags.response_expected)
+ controlvm_respond(msg_hdr, rc);
}
static inline unsigned int
@@ -1796,76 +1532,6 @@ static u64 controlvm_get_channel_address(void)
}
static void
-controlvm_periodic_work(struct work_struct *work)
-{
- struct controlvm_message inmsg;
- bool got_command = false;
- bool handle_command_failed = false;
-
- /* make sure visorbus server is registered for controlvm callbacks */
- if (visorchipset_visorbusregwait && !visorbusregistered)
- goto cleanup;
-
- while (visorchannel_signalremove(controlvm_channel,
- CONTROLVM_QUEUE_RESPONSE,
- &inmsg))
- ;
- if (!got_command) {
- if (controlvm_pending_msg_valid) {
- /* we throttled processing of a prior
- * msg, so try to process it again
- * rather than reading a new one
- */
- inmsg = controlvm_pending_msg;
- controlvm_pending_msg_valid = false;
- got_command = true;
- } else {
- got_command = read_controlvm_event(&inmsg);
- }
- }
-
- handle_command_failed = false;
- while (got_command && (!handle_command_failed)) {
- most_recent_message_jiffies = jiffies;
- if (handle_command(inmsg,
- visorchannel_get_physaddr
- (controlvm_channel)))
- got_command = read_controlvm_event(&inmsg);
- else {
- /* this is a scenario where throttling
- * is required, but probably NOT an
- * error...; we stash the current
- * controlvm msg so we will attempt to
- * reprocess it on our next loop
- */
- handle_command_failed = true;
- controlvm_pending_msg = inmsg;
- controlvm_pending_msg_valid = true;
- }
- }
-
- /* parahotplug_worker */
- parahotplug_process_list();
-
-cleanup:
-
- if (time_after(jiffies,
- most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
- /* it's been longer than MIN_IDLE_SECONDS since we
- * processed our last controlvm message; slow down the
- * polling
- */
- if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
- } else {
- if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
- }
-
- schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
-}
-
-static void
setup_crash_devices_work_queue(struct work_struct *work)
{
struct controlvm_message local_crash_bus_msg;
@@ -1874,13 +1540,6 @@ setup_crash_devices_work_queue(struct work_struct *work)
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
- /* make sure visorbus is registered for controlvm callbacks */
- if (visorchipset_visorbusregwait && !visorbusregistered) {
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
- schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
- return;
- }
-
POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
/* send init chipset msg */
@@ -1958,7 +1617,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
}
-static void
+void
bus_create_response(struct visor_device *bus_info, int response)
{
if (response >= 0)
@@ -1971,7 +1630,7 @@ bus_create_response(struct visor_device *bus_info, int response)
bus_info->pending_msg_hdr = NULL;
}
-static void
+void
bus_destroy_response(struct visor_device *bus_info, int response)
{
bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
@@ -1981,7 +1640,7 @@ bus_destroy_response(struct visor_device *bus_info, int response)
bus_info->pending_msg_hdr = NULL;
}
-static void
+void
device_create_response(struct visor_device *dev_info, int response)
{
if (response >= 0)
@@ -1994,7 +1653,7 @@ device_create_response(struct visor_device *dev_info, int response)
dev_info->pending_msg_hdr = NULL;
}
-static void
+void
device_destroy_response(struct visor_device *dev_info, int response)
{
device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
@@ -2004,9 +1663,9 @@ device_destroy_response(struct visor_device *dev_info, int response)
dev_info->pending_msg_hdr = NULL;
}
-static void
-visorchipset_device_pause_response(struct visor_device *dev_info,
- int response)
+void
+device_pause_response(struct visor_device *dev_info,
+ int response)
{
device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
dev_info, response,
@@ -2016,7 +1675,7 @@ visorchipset_device_pause_response(struct visor_device *dev_info,
dev_info->pending_msg_hdr = NULL;
}
-static void
+void
device_resume_response(struct visor_device *dev_info, int response)
{
device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
@@ -2027,40 +1686,6 @@ device_resume_response(struct visor_device *dev_info, int response)
dev_info->pending_msg_hdr = NULL;
}
-/* The parahotplug/devicedisabled interface gets called by our support script
- * when an SR-IOV device has been shut down. The ID is passed to the script
- * and then passed back when the device has been removed.
- */
-static ssize_t devicedisabled_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int id;
-
- if (kstrtouint(buf, 10, &id))
- return -EINVAL;
-
- parahotplug_request_complete(id, 0);
- return count;
-}
-
-/* The parahotplug/deviceenabled interface gets called by our support script
- * when an SR-IOV device has been recovered. The ID is passed to the script
- * and then passed back when the device has been brought back up.
- */
-static ssize_t deviceenabled_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int id;
-
- if (kstrtouint(buf, 10, &id))
- return -EINVAL;
-
- parahotplug_request_complete(id, 1);
- return count;
-}
-
static int
visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
{
@@ -2191,6 +1816,298 @@ visorchipset_file_cleanup(dev_t major_dev)
unregister_chrdev_region(major_dev, 1);
}
+static struct parser_context *
+parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
+{
+ int allocbytes = sizeof(struct parser_context) + bytes;
+ struct parser_context *ctx;
+
+ if (retry)
+ *retry = false;
+
+ /*
+ * alloc an 0 extra byte to ensure payload is
+ * '\0'-terminated
+ */
+ allocbytes++;
+ if ((controlvm_payload_bytes_buffered + bytes)
+ > MAX_CONTROLVM_PAYLOAD_BYTES) {
+ if (retry)
+ *retry = true;
+ return NULL;
+ }
+ ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
+ if (!ctx) {
+ if (retry)
+ *retry = true;
+ return NULL;
+ }
+
+ ctx->allocbytes = allocbytes;
+ ctx->param_bytes = bytes;
+ ctx->curr = NULL;
+ ctx->bytes_remaining = 0;
+ ctx->byte_stream = false;
+ if (local) {
+ void *p;
+
+ if (addr > virt_to_phys(high_memory - 1))
+ goto err_finish_ctx;
+ p = __va((unsigned long)(addr));
+ memcpy(ctx->data, p, bytes);
+ } else {
+ void *mapping = memremap(addr, bytes, MEMREMAP_WB);
+
+ if (!mapping)
+ goto err_finish_ctx;
+ memcpy(ctx->data, mapping, bytes);
+ memunmap(mapping);
+ }
+
+ ctx->byte_stream = true;
+ controlvm_payload_bytes_buffered += ctx->param_bytes;
+
+ return ctx;
+
+err_finish_ctx:
+ parser_done(ctx);
+ return NULL;
+}
+
+/**
+ * handle_command() - process a controlvm message
+ * @inmsg: the message to process
+ * @channel_addr: address of the controlvm channel
+ *
+ * Return:
+ * false - this function will return false only in the case where the
+ * controlvm message was NOT processed, but processing must be
+ * retried before reading the next controlvm message; a
+ * scenario where this can occur is when we need to throttle
+ * the allocation of memory in which to copy out controlvm
+ * payload data
+ * true - processing of the controlvm message completed,
+ * either successfully or with an error
+ */
+static bool
+handle_command(struct controlvm_message inmsg, u64 channel_addr)
+{
+ struct controlvm_message_packet *cmd = &inmsg.cmd;
+ u64 parm_addr;
+ u32 parm_bytes;
+ struct parser_context *parser_ctx = NULL;
+ bool local_addr;
+ struct controlvm_message ackmsg;
+
+ /* create parsing context if necessary */
+ local_addr = (inmsg.hdr.flags.test_message == 1);
+ if (channel_addr == 0)
+ return true;
+ parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
+ parm_bytes = inmsg.hdr.payload_bytes;
+
+ /*
+ * Parameter and channel addresses within test messages actually lie
+ * within our OS-controlled memory. We need to know that, because it
+ * makes a difference in how we compute the virtual address.
+ */
+ if (parm_addr && parm_bytes) {
+ bool retry = false;
+
+ parser_ctx =
+ parser_init_byte_stream(parm_addr, parm_bytes,
+ local_addr, &retry);
+ if (!parser_ctx && retry)
+ return false;
+ }
+
+ if (!local_addr) {
+ controlvm_init_response(&ackmsg, &inmsg.hdr,
+ CONTROLVM_RESP_SUCCESS);
+ if (controlvm_channel)
+ visorchannel_signalinsert(controlvm_channel,
+ CONTROLVM_QUEUE_ACK,
+ &ackmsg);
+ }
+ switch (inmsg.hdr.id) {
+ case CONTROLVM_CHIPSET_INIT:
+ chipset_init(&inmsg);
+ break;
+ case CONTROLVM_BUS_CREATE:
+ bus_create(&inmsg);
+ break;
+ case CONTROLVM_BUS_DESTROY:
+ bus_destroy(&inmsg);
+ break;
+ case CONTROLVM_BUS_CONFIGURE:
+ bus_configure(&inmsg, parser_ctx);
+ break;
+ case CONTROLVM_DEVICE_CREATE:
+ my_device_create(&inmsg);
+ break;
+ case CONTROLVM_DEVICE_CHANGESTATE:
+ if (cmd->device_change_state.flags.phys_device) {
+ parahotplug_process_message(&inmsg);
+ } else {
+ /*
+ * save the hdr and cmd structures for later use
+ * when sending back the response to Command
+ */
+ my_device_changestate(&inmsg);
+ break;
+ }
+ break;
+ case CONTROLVM_DEVICE_DESTROY:
+ my_device_destroy(&inmsg);
+ break;
+ case CONTROLVM_DEVICE_CONFIGURE:
+ /* no op for now, just send a respond that we passed */
+ if (inmsg.hdr.flags.response_expected)
+ controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
+ break;
+ case CONTROLVM_CHIPSET_READY:
+ chipset_ready(&inmsg.hdr);
+ break;
+ case CONTROLVM_CHIPSET_SELFTEST:
+ chipset_selftest(&inmsg.hdr);
+ break;
+ case CONTROLVM_CHIPSET_STOP:
+ chipset_notready(&inmsg.hdr);
+ break;
+ default:
+ if (inmsg.hdr.flags.response_expected)
+ controlvm_respond
+ (&inmsg.hdr,
+ -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
+ break;
+ }
+
+ if (parser_ctx) {
+ parser_done(parser_ctx);
+ parser_ctx = NULL;
+ }
+ return true;
+}
+
+/**
+ * read_controlvm_event() - retreives the next message from the
+ * CONTROLVM_QUEUE_EVENT queue in the controlvm
+ * channel
+ * @msg: pointer to the retrieved message
+ *
+ * Return: true if a valid message was retrieved or false otherwise
+ */
+static bool
+read_controlvm_event(struct controlvm_message *msg)
+{
+ if (!visorchannel_signalremove(controlvm_channel,
+ CONTROLVM_QUEUE_EVENT, msg)) {
+ /* got a message */
+ if (msg->hdr.flags.test_message == 1)
+ return false;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * parahotplug_process_list() - remove any request from the list that's been on
+ * there too long and respond with an error
+ */
+static void
+parahotplug_process_list(void)
+{
+ struct list_head *pos;
+ struct list_head *tmp;
+
+ spin_lock(&parahotplug_request_list_lock);
+
+ list_for_each_safe(pos, tmp, &parahotplug_request_list) {
+ struct parahotplug_request *req =
+ list_entry(pos, struct parahotplug_request, list);
+
+ if (!time_after_eq(jiffies, req->expiration))
+ continue;
+
+ list_del(pos);
+ if (req->msg.hdr.flags.response_expected)
+ controlvm_respond_physdev_changestate(
+ &req->msg.hdr,
+ CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
+ req->msg.cmd.device_change_state.state);
+ parahotplug_request_destroy(req);
+ }
+
+ spin_unlock(&parahotplug_request_list_lock);
+}
+
+static void
+controlvm_periodic_work(struct work_struct *work)
+{
+ struct controlvm_message inmsg;
+ bool got_command = false;
+ bool handle_command_failed = false;
+
+ while (!visorchannel_signalremove(controlvm_channel,
+ CONTROLVM_QUEUE_RESPONSE,
+ &inmsg))
+ ;
+ if (!got_command) {
+ if (controlvm_pending_msg_valid) {
+ /*
+ * we throttled processing of a prior
+ * msg, so try to process it again
+ * rather than reading a new one
+ */
+ inmsg = controlvm_pending_msg;
+ controlvm_pending_msg_valid = false;
+ got_command = true;
+ } else {
+ got_command = read_controlvm_event(&inmsg);
+ }
+ }
+
+ handle_command_failed = false;
+ while (got_command && (!handle_command_failed)) {
+ most_recent_message_jiffies = jiffies;
+ if (handle_command(inmsg,
+ visorchannel_get_physaddr
+ (controlvm_channel)))
+ got_command = read_controlvm_event(&inmsg);
+ else {
+ /*
+ * this is a scenario where throttling
+ * is required, but probably NOT an
+ * error...; we stash the current
+ * controlvm msg so we will attempt to
+ * reprocess it on our next loop
+ */
+ handle_command_failed = true;
+ controlvm_pending_msg = inmsg;
+ controlvm_pending_msg_valid = true;
+ }
+ }
+
+ /* parahotplug_worker */
+ parahotplug_process_list();
+
+ if (time_after(jiffies,
+ most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
+ /*
+ * it's been longer than MIN_IDLE_SECONDS since we
+ * processed our last controlvm message; slow down the
+ * polling
+ */
+ if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
+ poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+ } else {
+ if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
+ poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ }
+
+ schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
+}
+
static int
visorchipset_init(struct acpi_device *acpi_device)
{
@@ -2202,7 +2119,6 @@ visorchipset_init(struct acpi_device *acpi_device)
if (!addr)
goto error;
- memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
controlvm_channel = visorchannel_create_with_lock(addr, 0,
@@ -2341,15 +2257,10 @@ static void exit_unisys(void)
module_param_named(major, visorchipset_major, int, S_IRUGO);
MODULE_PARM_DESC(visorchipset_major,
"major device number to use for the device node");
-module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_visorbusregwait,
- "1 to have the module wait for the visor bus to register");
module_init(init_unisys);
module_exit(exit_unisys);
MODULE_AUTHOR("Unisys");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
- VERSION);
-MODULE_VERSION(VERSION);
+MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");
diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h
index c043fa41ceda..86e695d5a441 100644
--- a/drivers/staging/unisys/visorbus/vmcallinterface.h
+++ b/drivers/staging/unisys/visorbus/vmcallinterface.h
@@ -20,11 +20,39 @@
* Virtualization. The VMCALLs are provided by Monitor and used by IO code
* running on IO Partitions.
*/
+static inline unsigned long
+__unisys_vmcall_gnuc(unsigned long tuple, unsigned long reg_ebx,
+ unsigned long reg_ecx)
+{
+ unsigned long result = 0;
+ unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
-#ifdef __GNUC__
-#include "iovmcall_gnuc.h"
-#endif /* */
-#include "diagchannel.h"
+ cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
+ if (!(cpuid_ecx & 0x80000000))
+ return -EPERM;
+
+ __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
+ return result;
+}
+
+static inline unsigned long
+__unisys_extended_vmcall_gnuc(unsigned long long tuple,
+ unsigned long long reg_ebx,
+ unsigned long long reg_ecx,
+ unsigned long long reg_edx)
+{
+ unsigned long result = 0;
+ unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
+
+ cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
+ if (!(cpuid_ecx & 0x80000000))
+ return -EPERM;
+
+ __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx), "d"(reg_edx));
+ return result;
+}
#ifdef VMCALL_IO_CONTROLVM_ADDR
#undef VMCALL_IO_CONTROLVM_ADDR
@@ -57,7 +85,6 @@ enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */
#define VMCALL_SUCCESS 0
#define VMCALL_SUCCESSFUL(result) (result == 0)
-#ifdef __GNUC__
#define unisys_vmcall(tuple, reg_ebx, reg_ecx) \
__unisys_vmcall_gnuc(tuple, reg_ebx, reg_ecx)
#define unisys_extended_vmcall(tuple, reg_ebx, reg_ecx, reg_edx) \
@@ -74,7 +101,6 @@ enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */
#define ISSUE_IO_VMCALL_POSTCODE_SEVERITY(postcode, severity) \
ISSUE_IO_EXTENDED_VMCALL(VMCALL_POST_CODE_LOGEVENT, severity, \
MDS_APPOS, postcode)
-#endif
/* Structures for IO VMCALLs */
@@ -89,4 +115,156 @@ struct vmcall_io_controlvm_addr_params {
u8 unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */
} __packed;
+/******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/
+enum driver_pc { /* POSTCODE driver identifier tuples */
+ /* visorchipset driver files */
+ VISOR_CHIPSET_PC = 0xA0,
+ VISOR_CHIPSET_PC_controlvm_c = 0xA1,
+ VISOR_CHIPSET_PC_controlvm_cm2 = 0xA2,
+ VISOR_CHIPSET_PC_controlvm_direct_c = 0xA3,
+ VISOR_CHIPSET_PC_file_c = 0xA4,
+ VISOR_CHIPSET_PC_parser_c = 0xA5,
+ VISOR_CHIPSET_PC_testing_c = 0xA6,
+ VISOR_CHIPSET_PC_visorchipset_main_c = 0xA7,
+ VISOR_CHIPSET_PC_visorswitchbus_c = 0xA8,
+ /* visorbus driver files */
+ VISOR_BUS_PC = 0xB0,
+ VISOR_BUS_PC_businst_attr_c = 0xB1,
+ VISOR_BUS_PC_channel_attr_c = 0xB2,
+ VISOR_BUS_PC_devmajorminor_attr_c = 0xB3,
+ VISOR_BUS_PC_visorbus_main_c = 0xB4,
+ /* visorclientbus driver files */
+ VISOR_CLIENT_BUS_PC = 0xC0,
+ VISOR_CLIENT_BUS_PC_visorclientbus_main_c = 0xC1,
+ /* virt hba driver files */
+ VIRT_HBA_PC = 0xC2,
+ VIRT_HBA_PC_virthba_c = 0xC3,
+ /* virtpci driver files */
+ VIRT_PCI_PC = 0xC4,
+ VIRT_PCI_PC_virtpci_c = 0xC5,
+ /* virtnic driver files */
+ VIRT_NIC_PC = 0xC6,
+ VIRT_NIC_P_virtnic_c = 0xC7,
+ /* uislib driver files */
+ UISLIB_PC = 0xD0,
+ UISLIB_PC_uislib_c = 0xD1,
+ UISLIB_PC_uisqueue_c = 0xD2,
+ /* 0xD3 RESERVED */
+ UISLIB_PC_uisutils_c = 0xD4,
+};
+
+enum event_pc { /* POSTCODE event identifier tuples */
+ ATTACH_PORT_ENTRY_PC = 0x001,
+ ATTACH_PORT_FAILURE_PC = 0x002,
+ ATTACH_PORT_SUCCESS_PC = 0x003,
+ BUS_FAILURE_PC = 0x004,
+ BUS_CREATE_ENTRY_PC = 0x005,
+ BUS_CREATE_FAILURE_PC = 0x006,
+ BUS_CREATE_EXIT_PC = 0x007,
+ BUS_CONFIGURE_ENTRY_PC = 0x008,
+ BUS_CONFIGURE_FAILURE_PC = 0x009,
+ BUS_CONFIGURE_EXIT_PC = 0x00A,
+ CHIPSET_INIT_ENTRY_PC = 0x00B,
+ CHIPSET_INIT_SUCCESS_PC = 0x00C,
+ CHIPSET_INIT_FAILURE_PC = 0x00D,
+ CHIPSET_INIT_EXIT_PC = 0x00E,
+ CREATE_WORKQUEUE_PC = 0x00F,
+ CREATE_WORKQUEUE_FAILED_PC = 0x0A0,
+ CONTROLVM_INIT_FAILURE_PC = 0x0A1,
+ DEVICE_CREATE_ENTRY_PC = 0x0A2,
+ DEVICE_CREATE_FAILURE_PC = 0x0A3,
+ DEVICE_CREATE_SUCCESS_PC = 0x0A4,
+ DEVICE_CREATE_EXIT_PC = 0x0A5,
+ DEVICE_ADD_PC = 0x0A6,
+ DEVICE_REGISTER_FAILURE_PC = 0x0A7,
+ DEVICE_CHANGESTATE_ENTRY_PC = 0x0A8,
+ DEVICE_CHANGESTATE_FAILURE_PC = 0x0A9,
+ DEVICE_CHANGESTATE_EXIT_PC = 0x0AA,
+ DRIVER_ENTRY_PC = 0x0AB,
+ DRIVER_EXIT_PC = 0x0AC,
+ MALLOC_FAILURE_PC = 0x0AD,
+ QUEUE_DELAYED_WORK_PC = 0x0AE,
+ /* 0x0B7 RESERVED */
+ VBUS_CHANNEL_ENTRY_PC = 0x0B8,
+ VBUS_CHANNEL_FAILURE_PC = 0x0B9,
+ VBUS_CHANNEL_EXIT_PC = 0x0BA,
+ VHBA_CREATE_ENTRY_PC = 0x0BB,
+ VHBA_CREATE_FAILURE_PC = 0x0BC,
+ VHBA_CREATE_EXIT_PC = 0x0BD,
+ VHBA_CREATE_SUCCESS_PC = 0x0BE,
+ VHBA_COMMAND_HANDLER_PC = 0x0BF,
+ VHBA_PROBE_ENTRY_PC = 0x0C0,
+ VHBA_PROBE_FAILURE_PC = 0x0C1,
+ VHBA_PROBE_EXIT_PC = 0x0C2,
+ VNIC_CREATE_ENTRY_PC = 0x0C3,
+ VNIC_CREATE_FAILURE_PC = 0x0C4,
+ VNIC_CREATE_SUCCESS_PC = 0x0C5,
+ VNIC_PROBE_ENTRY_PC = 0x0C6,
+ VNIC_PROBE_FAILURE_PC = 0x0C7,
+ VNIC_PROBE_EXIT_PC = 0x0C8,
+ VPCI_CREATE_ENTRY_PC = 0x0C9,
+ VPCI_CREATE_FAILURE_PC = 0x0CA,
+ VPCI_CREATE_EXIT_PC = 0x0CB,
+ VPCI_PROBE_ENTRY_PC = 0x0CC,
+ VPCI_PROBE_FAILURE_PC = 0x0CD,
+ VPCI_PROBE_EXIT_PC = 0x0CE,
+ CRASH_DEV_ENTRY_PC = 0x0CF,
+ CRASH_DEV_EXIT_PC = 0x0D0,
+ CRASH_DEV_HADDR_NULL = 0x0D1,
+ CRASH_DEV_CONTROLVM_NULL = 0x0D2,
+ CRASH_DEV_RD_BUS_FAIULRE_PC = 0x0D3,
+ CRASH_DEV_RD_DEV_FAIULRE_PC = 0x0D4,
+ CRASH_DEV_BUS_NULL_FAILURE_PC = 0x0D5,
+ CRASH_DEV_DEV_NULL_FAILURE_PC = 0x0D6,
+ CRASH_DEV_CTRL_RD_FAILURE_PC = 0x0D7,
+ CRASH_DEV_COUNT_FAILURE_PC = 0x0D8,
+ SAVE_MSG_BUS_FAILURE_PC = 0x0D9,
+ SAVE_MSG_DEV_FAILURE_PC = 0x0DA,
+ CALLHOME_INIT_FAILURE_PC = 0x0DB
+};
+
+#define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR
+#define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING
+/* TODO-> Info currently doesn't show, so we set info=warning */
+#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT
+
+/* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR);
+ * Please also note that the resulting postcode is in hex, so if you are
+ * searching for the __LINE__ number, convert it first to decimal. The line
+ * number combined with driver and type of call, will allow you to track down
+ * exactly what line an error occurred on, or where the last driver
+ * entered/exited from.
+ */
+
+/* BASE FUNCTIONS */
+#define POSTCODE_LINUX_A(DRIVER_PC, EVENT_PC, pc32bit, severity) \
+do { \
+ unsigned long long post_code_temp; \
+ post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \
+ ((((u64)__LINE__) & 0xFFF) << 32) | \
+ (((u64)pc32bit) & 0xFFFFFFFF); \
+ ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
+} while (0)
+
+#define POSTCODE_LINUX_B(DRIVER_PC, EVENT_PC, pc16bit1, pc16bit2, severity) \
+do { \
+ unsigned long long post_code_temp; \
+ post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \
+ ((((u64)__LINE__) & 0xFFF) << 32) | \
+ ((((u64)pc16bit1) & 0xFFFF) << 16) | \
+ (((u64)pc16bit2) & 0xFFFF); \
+ ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
+} while (0)
+
+/* MOST COMMON */
+#define POSTCODE_LINUX_2(EVENT_PC, severity) \
+ POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, 0x0000, severity)
+
+#define POSTCODE_LINUX_3(EVENT_PC, pc32bit, severity) \
+ POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, pc32bit, severity)
+
+#define POSTCODE_LINUX_4(EVENT_PC, pc16bit1, pc16bit2, severity) \
+ POSTCODE_LINUX_B(CURRENT_FILE_PC, EVENT_PC, pc16bit1, \
+ pc16bit2, severity)
+
#endif /* __IOMONINTF_H__ */
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 120ba2097e02..5a7a87efed27 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -36,21 +36,6 @@
#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
#define VISORHBA_ERROR_COUNT 30
-static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
- void (*visorhba_cmnd_done)
- (struct scsi_cmnd *));
-#ifdef DEF_SCSI_QCMD
-static DEF_SCSI_QCMD(visorhba_queue_command)
-#else
-#define visorhba_queue_command visorhba_queue_command_lck
-#endif
-static int visorhba_probe(struct visor_device *dev);
-static void visorhba_remove(struct visor_device *dev);
-static int visorhba_pause(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
-static int visorhba_resume(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
-
static struct dentry *visorhba_debugfs_dir;
/* GUIDS for HBA channel type supported by this driver */
@@ -62,20 +47,6 @@ static struct visor_channeltype_descriptor visorhba_channel_types[] = {
{ NULL_UUID_LE, NULL }
};
-/* This is used to tell the visor bus driver which types of visor devices
- * we support, and what functions to call when a visor device that we support
- * is attached or removed.
- */
-static struct visor_driver visorhba_driver = {
- .name = "visorhba",
- .owner = THIS_MODULE,
- .channel_types = visorhba_channel_types,
- .probe = visorhba_probe,
- .remove = visorhba_remove,
- .pause = visorhba_pause,
- .resume = visorhba_resume,
- .channel_interrupt = NULL,
-};
MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
@@ -364,9 +335,9 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
dev_dbg(&scsidev->sdev_gendev,
"visorhba: initiating type=%d taskmgmt command\n", tasktype);
- if (!visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp))
+ if (visorchannel_signalinsert(devdata->dev->visorchannel,
+ IOCHAN_TO_IOPART,
+ cmdrsp))
goto err_del_scsipending_ent;
/* It can take the Service Partition up to 35 seconds to complete
@@ -567,9 +538,9 @@ visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
}
cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
- if (!visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp))
+ if (visorchannel_signalinsert(devdata->dev->visorchannel,
+ IOCHAN_TO_IOPART,
+ cmdrsp))
/* queue must be full and we aren't going to wait */
goto err_del_scsipending_ent;
@@ -580,6 +551,12 @@ err_del_scsipending_ent:
return SCSI_MLQUEUE_DEVICE_BUSY;
}
+#ifdef DEF_SCSI_QCMD
+static DEF_SCSI_QCMD(visorhba_queue_command)
+#else
+#define visorhba_queue_command visorhba_queue_command_lck
+#endif
+
/**
* visorhba_slave_alloc - called when new disk is discovered
* @scsidev: New disk
@@ -950,9 +927,9 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
struct scsi_cmnd *scsicmd;
while (1) {
- if (!visorchannel_signalremove(devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART,
- cmdrsp))
+ if (visorchannel_signalremove(devdata->dev->visorchannel,
+ IOCHAN_FROM_IOPART,
+ cmdrsp))
break; /* queue empty */
if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
@@ -1186,6 +1163,21 @@ static void visorhba_remove(struct visor_device *dev)
debugfs_remove_recursive(devdata->debugfs_dir);
}
+/* This is used to tell the visor bus driver which types of visor devices
+ * we support, and what functions to call when a visor device that we support
+ * is attached or removed.
+ */
+static struct visor_driver visorhba_driver = {
+ .name = "visorhba",
+ .owner = THIS_MODULE,
+ .channel_types = visorhba_channel_types,
+ .probe = visorhba_probe,
+ .remove = visorhba_remove,
+ .pause = visorhba_pause,
+ .resume = visorhba_resume,
+ .channel_interrupt = NULL,
+};
+
/**
* visorhba_init - driver init routine
*
@@ -1228,4 +1220,4 @@ module_exit(visorhba_exit);
MODULE_AUTHOR("Unisys");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par hba driver");
+MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");
diff --git a/drivers/staging/unisys/visorinput/ultrainputreport.h b/drivers/staging/unisys/visorinput/ultrainputreport.h
index 1bc3d2064080..53dde7c53809 100644
--- a/drivers/staging/unisys/visorinput/ultrainputreport.h
+++ b/drivers/staging/unisys/visorinput/ultrainputreport.h
@@ -17,8 +17,6 @@
#include <linux/types.h>
-#include "ultrainputreport.h"
-
/* Identifies mouse and keyboard activity which is specified by the firmware to
* the host using the cmsimpleinput protocol. @ingroup coretypes
*/
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index d67cd76327c0..6f94b646f7c5 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -29,9 +29,7 @@
#include <linux/kernel.h>
#include <linux/uuid.h>
-#include "version.h"
#include "visorbus.h"
-#include "channel.h"
#include "ultrainputreport.h"
/* Keyboard channel {c73416d0-b0b8-44af-b304-9d2ae99f1b3d} */
@@ -63,9 +61,10 @@ enum visorinput_device_type {
*/
struct visorinput_devdata {
struct visor_device *dev;
- struct rw_semaphore lock_visor_dev; /* lock for dev */
+ struct mutex lock_visor_dev; /* lock for dev */
struct input_dev *visorinput_dev;
bool paused;
+ bool interrupts_enabled;
unsigned int keycode_table_bytes; /* size of following array */
/* for keyboard devices: visorkbd_keycode[] + visorkbd_ext_keycode[] */
unsigned char keycode_table[0];
@@ -228,7 +227,21 @@ static int visorinput_open(struct input_dev *visorinput_dev)
return -EINVAL;
}
dev_dbg(&visorinput_dev->dev, "%s opened\n", __func__);
+
+ /*
+ * If we're not paused, really enable interrupts.
+ * Regardless of whether we are paused, set a flag indicating
+ * interrupts should be enabled so when we resume, interrupts
+ * will really be enabled.
+ */
+ mutex_lock(&devdata->lock_visor_dev);
+ devdata->interrupts_enabled = true;
+ if (devdata->paused)
+ goto out_unlock;
visorbus_enable_channel_interrupts(devdata->dev);
+
+out_unlock:
+ mutex_unlock(&devdata->lock_visor_dev);
return 0;
}
@@ -243,20 +256,35 @@ static void visorinput_close(struct input_dev *visorinput_dev)
return;
}
dev_dbg(&visorinput_dev->dev, "%s closed\n", __func__);
+
+ /*
+ * If we're not paused, really disable interrupts.
+ * Regardless of whether we are paused, set a flag indicating
+ * interrupts should be disabled so when we resume we will
+ * not re-enable them.
+ */
+
+ mutex_lock(&devdata->lock_visor_dev);
+ devdata->interrupts_enabled = false;
+ if (devdata->paused)
+ goto out_unlock;
visorbus_disable_channel_interrupts(devdata->dev);
+
+out_unlock:
+ mutex_unlock(&devdata->lock_visor_dev);
}
/*
- * register_client_keyboard() initializes and returns a Linux input node that
+ * setup_client_keyboard() initializes and returns a Linux input node that
* we can use to deliver keyboard inputs to Linux. We of course do this when
* we see keyboard inputs coming in on a keyboard channel.
*/
static struct input_dev *
-register_client_keyboard(void *devdata, /* opaque on purpose */
- unsigned char *keycode_table)
+setup_client_keyboard(void *devdata, /* opaque on purpose */
+ unsigned char *keycode_table)
{
- int i, error;
+ int i;
struct input_dev *visorinput_dev;
visorinput_dev = input_allocate_device();
@@ -290,18 +318,12 @@ register_client_keyboard(void *devdata, /* opaque on purpose */
visorinput_dev->close = visorinput_close;
input_set_drvdata(visorinput_dev, devdata); /* pre input_register! */
- error = input_register_device(visorinput_dev);
- if (error) {
- input_free_device(visorinput_dev);
- return NULL;
- }
return visorinput_dev;
}
static struct input_dev *
-register_client_mouse(void *devdata /* opaque on purpose */)
+setup_client_mouse(void *devdata /* opaque on purpose */)
{
- int error;
struct input_dev *visorinput_dev = NULL;
int xres, yres;
struct fb_info *fb0;
@@ -336,13 +358,6 @@ register_client_mouse(void *devdata /* opaque on purpose */)
visorinput_dev->open = visorinput_open;
visorinput_dev->close = visorinput_close;
input_set_drvdata(visorinput_dev, devdata); /* pre input_register! */
-
- error = input_register_device(visorinput_dev);
- if (error) {
- input_free_device(visorinput_dev);
- return NULL;
- }
-
input_set_capability(visorinput_dev, EV_REL, REL_WHEEL);
return visorinput_dev;
@@ -360,9 +375,19 @@ devdata_create(struct visor_device *dev, enum visorinput_device_type devtype)
devdata = kzalloc(sizeof(*devdata) + extra_bytes, GFP_KERNEL);
if (!devdata)
return NULL;
+ mutex_init(&devdata->lock_visor_dev);
+ mutex_lock(&devdata->lock_visor_dev);
devdata->dev = dev;
/*
+ * visorinput_open() can be called as soon as input_register_device()
+ * happens, and that will enable channel interrupts. Setting paused
+ * prevents us from getting into visorinput_channel_interrupt() prior
+ * to the device structure being totally initialized.
+ */
+ devdata->paused = true;
+
+ /*
* This is an input device in a client guest partition,
* so we need to create whatever input nodes are necessary to
* deliver our inputs to the guest OS.
@@ -374,23 +399,49 @@ devdata_create(struct visor_device *dev, enum visorinput_device_type devtype)
KEYCODE_TABLE_BYTES);
memcpy(devdata->keycode_table + KEYCODE_TABLE_BYTES,
visorkbd_ext_keycode, KEYCODE_TABLE_BYTES);
- devdata->visorinput_dev = register_client_keyboard
+ devdata->visorinput_dev = setup_client_keyboard
(devdata, devdata->keycode_table);
if (!devdata->visorinput_dev)
goto cleanups_register;
break;
case visorinput_mouse:
- devdata->visorinput_dev = register_client_mouse(devdata);
+ devdata->visorinput_dev = setup_client_mouse(devdata);
if (!devdata->visorinput_dev)
goto cleanups_register;
break;
}
- init_rwsem(&devdata->lock_visor_dev);
+ dev_set_drvdata(&dev->device, devdata);
+ mutex_unlock(&devdata->lock_visor_dev);
+
+ /*
+ * Device struct is completely set up now, with the exception of
+ * visorinput_dev being registered.
+ * We need to unlock before we register the device, because this
+ * can cause an on-stack call of visorinput_open(), which would
+ * deadlock if we had the lock.
+ */
+ if (input_register_device(devdata->visorinput_dev)) {
+ input_free_device(devdata->visorinput_dev);
+ goto err_kfree_devdata;
+ }
+
+ mutex_lock(&devdata->lock_visor_dev);
+ /*
+ * Establish calls to visorinput_channel_interrupt() if that is
+ * the desired state that we've kept track of in interrupts_enabled
+ * while the device was being created.
+ */
+ devdata->paused = false;
+ if (devdata->interrupts_enabled)
+ visorbus_enable_channel_interrupts(dev);
+ mutex_unlock(&devdata->lock_visor_dev);
return devdata;
cleanups_register:
+ mutex_unlock(&devdata->lock_visor_dev);
+err_kfree_devdata:
kfree(devdata);
return NULL;
}
@@ -398,7 +449,6 @@ cleanups_register:
static int
visorinput_probe(struct visor_device *dev)
{
- struct visorinput_devdata *devdata = NULL;
uuid_le guid;
enum visorinput_device_type devtype;
@@ -409,10 +459,9 @@ visorinput_probe(struct visor_device *dev)
devtype = visorinput_keyboard;
else
return -ENODEV;
- devdata = devdata_create(dev, devtype);
- if (!devdata)
+ visorbus_disable_channel_interrupts(dev);
+ if (!devdata_create(dev, devtype))
return -ENOMEM;
- dev_set_drvdata(&dev->device, devdata);
return 0;
}
@@ -431,6 +480,7 @@ visorinput_remove(struct visor_device *dev)
if (!devdata)
return;
+ mutex_lock(&devdata->lock_visor_dev);
visorbus_disable_channel_interrupts(dev);
/*
@@ -438,10 +488,10 @@ visorinput_remove(struct visor_device *dev)
* in visorinput_channel_interrupt()
*/
- down_write(&devdata->lock_visor_dev);
dev_set_drvdata(&dev->device, NULL);
+ mutex_unlock(&devdata->lock_visor_dev);
+
unregister_client_input(devdata->visorinput_dev);
- up_write(&devdata->lock_visor_dev);
kfree(devdata);
}
@@ -529,15 +579,9 @@ visorinput_channel_interrupt(struct visor_device *dev)
if (!devdata)
return;
- down_write(&devdata->lock_visor_dev);
- if (devdata->paused) /* don't touch device/channel when paused */
- goto out_locked;
-
visorinput_dev = devdata->visorinput_dev;
- if (!visorinput_dev)
- goto out_locked;
- while (visorchannel_signalremove(dev->visorchannel, 0, &r)) {
+ while (!visorchannel_signalremove(dev->visorchannel, 0, &r)) {
scancode = r.activity.arg1;
keycode = scancode_to_keycode(scancode);
switch (r.activity.action) {
@@ -611,8 +655,6 @@ visorinput_channel_interrupt(struct visor_device *dev)
break;
}
}
-out_locked:
- up_write(&devdata->lock_visor_dev);
}
static int
@@ -627,16 +669,24 @@ visorinput_pause(struct visor_device *dev,
goto out;
}
- down_write(&devdata->lock_visor_dev);
+ mutex_lock(&devdata->lock_visor_dev);
if (devdata->paused) {
rc = -EBUSY;
goto out_locked;
}
+ if (devdata->interrupts_enabled)
+ visorbus_disable_channel_interrupts(dev);
+
+ /*
+ * due to above, at this time no thread of execution will be
+ * in visorinput_channel_interrupt()
+ */
+
devdata->paused = true;
complete_func(dev, 0);
rc = 0;
out_locked:
- up_write(&devdata->lock_visor_dev);
+ mutex_unlock(&devdata->lock_visor_dev);
out:
return rc;
}
@@ -652,16 +702,25 @@ visorinput_resume(struct visor_device *dev,
rc = -ENODEV;
goto out;
}
- down_write(&devdata->lock_visor_dev);
+ mutex_lock(&devdata->lock_visor_dev);
if (!devdata->paused) {
rc = -EBUSY;
goto out_locked;
}
devdata->paused = false;
complete_func(dev, 0);
+
+ /*
+ * Re-establish calls to visorinput_channel_interrupt() if that is
+ * the desired state that we've kept track of in interrupts_enabled
+ * while the device was paused.
+ */
+ if (devdata->interrupts_enabled)
+ visorbus_enable_channel_interrupts(dev);
+
rc = 0;
out_locked:
- up_write(&devdata->lock_visor_dev);
+ mutex_unlock(&devdata->lock_visor_dev);
out:
return rc;
}
@@ -675,7 +734,6 @@ static struct visor_channeltype_descriptor visorinput_channel_types[] = {
static struct visor_driver visorinput_driver = {
.name = "visorinput",
- .vertag = NULL,
.owner = THIS_MODULE,
.channel_types = visorinput_channel_types,
.probe = visorinput_probe,
@@ -704,8 +762,7 @@ MODULE_DEVICE_TABLE(visorbus, visorinput_channel_types);
MODULE_AUTHOR("Unisys");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("s-Par human input driver for guest Linux");
-MODULE_VERSION(VERSION);
+MODULE_DESCRIPTION("s-Par human input driver for virtual keyboard/mouse");
MODULE_ALIAS("visorbus:" SPAR_MOUSE_CHANNEL_PROTOCOL_UUID_STR);
MODULE_ALIAS("visorbus:" SPAR_KEYBOARD_CHANNEL_PROTOCOL_UUID_STR);
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index a28388d3ddc2..136700756485 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -29,8 +29,6 @@
#include "iochannel.h"
#define VISORNIC_INFINITE_RSP_WAIT 0
-#define VISORNICSOPENMAX 32
-#define MAXDEVICES 16384
/* MAX_BUF = 64 lines x 32 MAXVNIC x 80 characters
* = 163840 bytes
@@ -38,27 +36,6 @@
#define MAX_BUF 163840
#define NAPI_WEIGHT 64
-static int visornic_probe(struct visor_device *dev);
-static void visornic_remove(struct visor_device *dev);
-static int visornic_pause(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
-static int visornic_resume(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
-
-/* DEBUGFS declarations */
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset);
-static ssize_t enable_ints_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos);
-static struct dentry *visornic_debugfs_dir;
-static const struct file_operations debugfs_info_fops = {
- .read = info_debugfs_read,
-};
-
-static const struct file_operations debugfs_enable_ints_fops = {
- .write = enable_ints_write,
-};
-
/* GUIDS for director channel type supported by this driver. */
static struct visor_channeltype_descriptor visornic_channel_types[] = {
/* Note that the only channel type we expect to be reported by the
@@ -77,23 +54,6 @@ MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
*/
MODULE_ALIAS("visorbus:" SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR);
-/* This is used to tell the visor bus driver which types of visor devices
- * we support, and what functions to call when a visor device that we support
- * is attached or removed.
- */
-static struct visor_driver visornic_driver = {
- .name = "visornic",
- .version = "1.0.0.0",
- .vertag = NULL,
- .owner = THIS_MODULE,
- .channel_types = visornic_channel_types,
- .probe = visornic_probe,
- .remove = visornic_remove,
- .pause = visornic_pause,
- .resume = visornic_resume,
- .channel_interrupt = NULL,
-};
-
struct chanstat {
unsigned long got_rcv;
unsigned long got_enbdisack;
@@ -181,9 +141,6 @@ struct visornic_devdata {
struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
};
-static int visornic_poll(struct napi_struct *napi, int budget);
-static void poll_for_irq(unsigned long v);
-
/**
* visor_copy_fragsinfo_from_skb(
* @skb_in: skbuff that we are pulling the frags from
@@ -289,6 +246,10 @@ static ssize_t enable_ints_write(struct file *file,
return count;
}
+static const struct file_operations debugfs_enable_ints_fops = {
+ .write = enable_ints_write,
+};
+
/**
* visornic_serverdown_complete - IOPART went down, pause device
* @work: Work queue it was scheduled on
@@ -425,9 +386,9 @@ post_skb(struct uiscmdrsp *cmdrsp,
if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) <= PI_PAGE_SIZE) {
cmdrsp->net.type = NET_RCV_POST;
cmdrsp->cmdtype = CMD_NET_TYPE;
- if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp)) {
+ if (!visorchannel_signalinsert(devdata->dev->visorchannel,
+ IOCHAN_TO_IOPART,
+ cmdrsp)) {
atomic_inc(&devdata->num_rcvbuf_in_iovm);
devdata->chstat.sent_post++;
} else {
@@ -454,9 +415,9 @@ send_enbdis(struct net_device *netdev, int state,
devdata->cmdrsp_rcv->net.enbdis.context = netdev;
devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
- if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- devdata->cmdrsp_rcv))
+ if (!visorchannel_signalinsert(devdata->dev->visorchannel,
+ IOCHAN_TO_IOPART,
+ devdata->cmdrsp_rcv))
devdata->chstat.sent_enbdis++;
}
@@ -920,8 +881,8 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (!visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART, cmdrsp)) {
+ if (visorchannel_signalinsert(devdata->dev->visorchannel,
+ IOCHAN_TO_IOPART, cmdrsp)) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
devdata->busy_cnt++;
@@ -1522,6 +1483,11 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
return bytes_read;
}
+static struct dentry *visornic_debugfs_dir;
+static const struct file_operations debugfs_info_fops = {
+ .read = info_debugfs_read,
+};
+
/**
* send_rcv_posts_if_needed
* @devdata: visornic device
@@ -1573,9 +1539,9 @@ send_rcv_posts_if_needed(struct visornic_devdata *devdata)
static void
drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
{
- while (visorchannel_signalremove(devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART,
- cmdrsp))
+ while (!visorchannel_signalremove(devdata->dev->visorchannel,
+ IOCHAN_FROM_IOPART,
+ cmdrsp))
;
}
@@ -1586,7 +1552,7 @@ drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
*
* Drain the respones queue of any responses from the IO partition.
* Process the responses as we get them.
- * Returns when response queue is empty or when the threadd stops.
+ * Returns when response queue is empty or when the thread stops.
*/
static void
service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
@@ -1599,9 +1565,9 @@ service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
/* TODO: CLIENT ACQUIRE -- Don't really need this at the
* moment
*/
- if (!visorchannel_signalremove(devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART,
- cmdrsp))
+ if (visorchannel_signalremove(devdata->dev->visorchannel,
+ IOCHAN_FROM_IOPART,
+ cmdrsp))
break; /* queue empty */
switch (cmdrsp->net.type) {
@@ -2061,6 +2027,21 @@ static int visornic_resume(struct visor_device *dev,
return 0;
}
+/* This is used to tell the visor bus driver which types of visor devices
+ * we support, and what functions to call when a visor device that we support
+ * is attached or removed.
+ */
+static struct visor_driver visornic_driver = {
+ .name = "visornic",
+ .owner = THIS_MODULE,
+ .channel_types = visornic_channel_types,
+ .probe = visornic_probe,
+ .remove = visornic_remove,
+ .pause = visornic_pause,
+ .resume = visornic_resume,
+ .channel_interrupt = NULL,
+};
+
/**
* visornic_init - Init function
*
@@ -2115,5 +2096,4 @@ module_exit(visornic_cleanup);
MODULE_AUTHOR("Unisys");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("sPAR nic driver for sparlinux: ver 1.0.0.0");
-MODULE_VERSION("1.0.0.0");
+MODULE_DESCRIPTION("s-Par NIC driver for virtual network devices");
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
new file mode 100644
index 000000000000..9676fb29075a
--- /dev/null
+++ b/drivers/staging/vc04_services/Kconfig
@@ -0,0 +1,9 @@
+config BCM2708_VCHIQ
+ tristate "Videocore VCHIQ"
+ depends on RASPBERRYPI_FIRMWARE && BROKEN
+ default y
+ help
+ Kernel to VideoCore communication interface for the
+ BCM2708 family of products.
+ Defaults to Y when the Broadcom Videocore services
+ are included in the build, N otherwise.
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
new file mode 100644
index 000000000000..90ab4781df2c
--- /dev/null
+++ b/drivers/staging/vc04_services/Makefile
@@ -0,0 +1,14 @@
+obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
+
+vchiq-objs := \
+ interface/vchiq_arm/vchiq_core.o \
+ interface/vchiq_arm/vchiq_arm.o \
+ interface/vchiq_arm/vchiq_kern_lib.o \
+ interface/vchiq_arm/vchiq_2835_arm.o \
+ interface/vchiq_arm/vchiq_debugfs.o \
+ interface/vchiq_arm/vchiq_shim.o \
+ interface/vchiq_arm/vchiq_util.o \
+ interface/vchiq_arm/vchiq_connected.o \
+
+ccflags-y += -DVCOS_VERIFY_BKPTS=1 -Idrivers/staging/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
+
diff --git a/drivers/staging/vc04_services/interface/vchi/connections/connection.h b/drivers/staging/vc04_services/interface/vchi/connections/connection.h
new file mode 100644
index 000000000000..fef6ac34c6d2
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchi/connections/connection.h
@@ -0,0 +1,328 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CONNECTION_H_
+#define CONNECTION_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/semaphore.h>
+
+#include "interface/vchi/vchi_cfg_internal.h"
+#include "interface/vchi/vchi_common.h"
+#include "interface/vchi/message_drivers/message.h"
+
+/******************************************************************************
+ Global defs
+ *****************************************************************************/
+
+// Opaque handle for a connection / service pair
+typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
+
+// opaque handle to the connection state information
+typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
+
+typedef struct vchi_connection_t VCHI_CONNECTION_T;
+
+
+/******************************************************************************
+ API
+ *****************************************************************************/
+
+// Routine to init a connection with a particular low level driver
+typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
+ const VCHI_MESSAGE_DRIVER_T * driver );
+
+// Routine to control CRC enabling at a connection level
+typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
+ VCHI_CRC_CONTROL_T control );
+
+// Routine to create a service
+typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
+ int32_t service_id,
+ uint32_t rx_fifo_size,
+ uint32_t tx_fifo_size,
+ int server,
+ VCHI_CALLBACK_T callback,
+ void *callback_param,
+ int32_t want_crc,
+ int32_t want_unaligned_bulk_rx,
+ int32_t want_unaligned_bulk_tx,
+ VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
+
+// Routine to close a service
+typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
+
+// Routine to queue a message
+typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ const void *data,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *msg_handle );
+
+// scatter-gather (vector) message queueing
+typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ VCHI_MSG_VECTOR_T *vector,
+ uint32_t count,
+ VCHI_FLAGS_T flags,
+ void *msg_handle );
+
+// Routine to dequeue a message
+typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void *data,
+ uint32_t max_data_size_to_read,
+ uint32_t *actual_msg_size,
+ VCHI_FLAGS_T flags );
+
+// Routine to peek at a message
+typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void **data,
+ uint32_t *msg_size,
+ VCHI_FLAGS_T flags );
+
+// Routine to hold a message
+typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void **data,
+ uint32_t *msg_size,
+ VCHI_FLAGS_T flags,
+ void **message_handle );
+
+// Routine to initialise a received message iterator
+typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ VCHI_MSG_ITER_T *iter,
+ VCHI_FLAGS_T flags );
+
+// Routine to release a held message
+typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void *message_handle );
+
+// Routine to get info on a held message
+typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void *message_handle,
+ void **data,
+ int32_t *msg_size,
+ uint32_t *tx_timestamp,
+ uint32_t *rx_timestamp );
+
+// Routine to check whether the iterator has a next message
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
+ const VCHI_MSG_ITER_T *iter );
+
+// Routine to advance the iterator
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
+ VCHI_MSG_ITER_T *iter,
+ void **data,
+ uint32_t *msg_size );
+
+// Routine to remove the last message returned by the iterator
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
+ VCHI_MSG_ITER_T *iter );
+
+// Routine to hold the last message returned by the iterator
+typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
+ VCHI_MSG_ITER_T *iter,
+ void **msg_handle );
+
+// Routine to transmit bulk data
+typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ const void *data_src,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *bulk_handle );
+
+// Routine to receive data
+typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
+ void *data_dst,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *bulk_handle );
+
+// Routine to report if a server is available
+typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
+
+// Routine to report the number of RX slots available
+typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
+
+// Routine to report the RX slot size
+typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
+
+// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
+typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
+ int32_t service,
+ uint32_t length,
+ MESSAGE_TX_CHANNEL_T channel,
+ uint32_t channel_params,
+ uint32_t data_length,
+ uint32_t data_offset);
+
+// Callback to inform a service that a Xon or Xoff message has been received
+typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
+
+// Callback to inform a service that a server available reply message has been received
+typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
+
+// Callback to indicate that bulk auxiliary messages have arrived
+typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
+
+// Callback to indicate that bulk auxiliary messages have arrived
+typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
+
+// Callback with all the connection info you require
+typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
+
+// Callback to inform of a disconnect
+typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
+
+// Callback to inform of a power control request
+typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
+
+// allocate memory suitably aligned for this connection
+typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
+
+// free memory allocated by buffer_allocate
+typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
+
+
+/******************************************************************************
+ System driver struct
+ *****************************************************************************/
+
+struct opaque_vchi_connection_api_t
+{
+ // Routine to init the connection
+ VCHI_CONNECTION_INIT_T init;
+
+ // Connection-level CRC control
+ VCHI_CONNECTION_CRC_CONTROL_T crc_control;
+
+ // Routine to connect to or create service
+ VCHI_CONNECTION_SERVICE_CONNECT_T service_connect;
+
+ // Routine to disconnect from a service
+ VCHI_CONNECTION_SERVICE_DISCONNECT_T service_disconnect;
+
+ // Routine to queue a message
+ VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T service_queue_msg;
+
+ // scatter-gather (vector) message queue
+ VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T service_queue_msgv;
+
+ // Routine to dequeue a message
+ VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T service_dequeue_msg;
+
+ // Routine to peek at a message
+ VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T service_peek_msg;
+
+ // Routine to hold a message
+ VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T service_hold_msg;
+
+ // Routine to initialise a received message iterator
+ VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
+
+ // Routine to release a message
+ VCHI_CONNECTION_HELD_MSG_RELEASE_T held_msg_release;
+
+ // Routine to get information on a held message
+ VCHI_CONNECTION_HELD_MSG_INFO_T held_msg_info;
+
+ // Routine to check for next message on iterator
+ VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T msg_iter_has_next;
+
+ // Routine to get next message on iterator
+ VCHI_CONNECTION_MSG_ITER_NEXT_T msg_iter_next;
+
+ // Routine to remove the last message returned by iterator
+ VCHI_CONNECTION_MSG_ITER_REMOVE_T msg_iter_remove;
+
+ // Routine to hold the last message returned by iterator
+ VCHI_CONNECTION_MSG_ITER_HOLD_T msg_iter_hold;
+
+ // Routine to transmit bulk data
+ VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T bulk_queue_transmit;
+
+ // Routine to receive data
+ VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T bulk_queue_receive;
+
+ // Routine to report the available servers
+ VCHI_CONNECTION_SERVER_PRESENT server_present;
+
+ // Routine to report the number of RX slots available
+ VCHI_CONNECTION_RX_SLOTS_AVAILABLE connection_rx_slots_available;
+
+ // Routine to report the RX slot size
+ VCHI_CONNECTION_RX_SLOT_SIZE connection_rx_slot_size;
+
+ // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
+ VCHI_CONNECTION_RX_BULK_BUFFER_ADDED rx_bulk_buffer_added;
+
+ // Callback to inform a service that a Xon or Xoff message has been received
+ VCHI_CONNECTION_FLOW_CONTROL flow_control;
+
+ // Callback to inform a service that a server available reply message has been received
+ VCHI_CONNECTION_SERVER_AVAILABLE_REPLY server_available_reply;
+
+ // Callback to indicate that bulk auxiliary messages have arrived
+ VCHI_CONNECTION_BULK_AUX_RECEIVED bulk_aux_received;
+
+ // Callback to indicate that a bulk auxiliary message has been transmitted
+ VCHI_CONNECTION_BULK_AUX_TRANSMITTED bulk_aux_transmitted;
+
+ // Callback to provide information about the connection
+ VCHI_CONNECTION_INFO connection_info;
+
+ // Callback to notify that peer has requested disconnect
+ VCHI_CONNECTION_DISCONNECT disconnect;
+
+ // Callback to notify that peer has requested power change
+ VCHI_CONNECTION_POWER_CONTROL power_control;
+
+ // allocate memory suitably aligned for this connection
+ VCHI_BUFFER_ALLOCATE buffer_allocate;
+
+ // free memory allocated by buffer_allocate
+ VCHI_BUFFER_FREE buffer_free;
+
+};
+
+struct vchi_connection_t {
+ const VCHI_CONNECTION_API_T *api;
+ VCHI_CONNECTION_STATE_T *state;
+#ifdef VCHI_COARSE_LOCKING
+ struct semaphore sem;
+#endif
+};
+
+
+#endif /* CONNECTION_H_ */
+
+/****************************** End of file **********************************/
diff --git a/drivers/staging/vc04_services/interface/vchi/message_drivers/message.h b/drivers/staging/vc04_services/interface/vchi/message_drivers/message.h
new file mode 100644
index 000000000000..8b3f76735bd4
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchi/message_drivers/message.h
@@ -0,0 +1,204 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VCHI_MESSAGE_H_
+#define _VCHI_MESSAGE_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/semaphore.h>
+
+#include "interface/vchi/vchi_cfg_internal.h"
+#include "interface/vchi/vchi_common.h"
+
+
+typedef enum message_event_type {
+ MESSAGE_EVENT_NONE,
+ MESSAGE_EVENT_NOP,
+ MESSAGE_EVENT_MESSAGE,
+ MESSAGE_EVENT_SLOT_COMPLETE,
+ MESSAGE_EVENT_RX_BULK_PAUSED,
+ MESSAGE_EVENT_RX_BULK_COMPLETE,
+ MESSAGE_EVENT_TX_COMPLETE,
+ MESSAGE_EVENT_MSG_DISCARDED
+} MESSAGE_EVENT_TYPE_T;
+
+typedef enum vchi_msg_flags
+{
+ VCHI_MSG_FLAGS_NONE = 0x0,
+ VCHI_MSG_FLAGS_TERMINATE_DMA = 0x1
+} VCHI_MSG_FLAGS_T;
+
+typedef enum message_tx_channel
+{
+ MESSAGE_TX_CHANNEL_MESSAGE = 0,
+ MESSAGE_TX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
+} MESSAGE_TX_CHANNEL_T;
+
+// Macros used for cycling through bulk channels
+#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
+#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
+
+typedef enum message_rx_channel
+{
+ MESSAGE_RX_CHANNEL_MESSAGE = 0,
+ MESSAGE_RX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
+} MESSAGE_RX_CHANNEL_T;
+
+// Message receive slot information
+typedef struct rx_msg_slot_info {
+
+ struct rx_msg_slot_info *next;
+ //struct slot_info *prev;
+#if !defined VCHI_COARSE_LOCKING
+ struct semaphore sem;
+#endif
+
+ uint8_t *addr; // base address of slot
+ uint32_t len; // length of slot in bytes
+
+ uint32_t write_ptr; // hardware causes this to advance
+ uint32_t read_ptr; // this module does the reading
+ int active; // is this slot in the hardware dma fifo?
+ uint32_t msgs_parsed; // count how many messages are in this slot
+ uint32_t msgs_released; // how many messages have been released
+ void *state; // connection state information
+ uint8_t ref_count[VCHI_MAX_SERVICES_PER_CONNECTION]; // reference count for slots held by services
+} RX_MSG_SLOTINFO_T;
+
+// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
+// In particular, it mustn't use addr and len - they're the client buffer, but the message
+// driver will be tasked with sending the aligned core section.
+typedef struct rx_bulk_slotinfo_t {
+ struct rx_bulk_slotinfo_t *next;
+
+ struct semaphore *blocking;
+
+ // needed by DMA
+ void *addr;
+ uint32_t len;
+
+ // needed for the callback
+ void *service;
+ void *handle;
+ VCHI_FLAGS_T flags;
+} RX_BULK_SLOTINFO_T;
+
+
+/* ----------------------------------------------------------------------
+ * each connection driver will have a pool of the following struct.
+ *
+ * the pool will be managed by vchi_qman_*
+ * this means there will be multiple queues (single linked lists)
+ * a given struct message_info will be on exactly one of these queues
+ * at any one time
+ * -------------------------------------------------------------------- */
+typedef struct rx_message_info {
+
+ struct message_info *next;
+ //struct message_info *prev;
+
+ uint8_t *addr;
+ uint32_t len;
+ RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
+ uint32_t tx_timestamp;
+ uint32_t rx_timestamp;
+
+} RX_MESSAGE_INFO_T;
+
+typedef struct {
+ MESSAGE_EVENT_TYPE_T type;
+
+ struct {
+ // for messages
+ void *addr; // address of message
+ uint16_t slot_delta; // whether this message indicated slot delta
+ uint32_t len; // length of message
+ RX_MSG_SLOTINFO_T *slot; // slot this message is in
+ int32_t service; // service id this message is destined for
+ uint32_t tx_timestamp; // timestamp from the header
+ uint32_t rx_timestamp; // timestamp when we parsed it
+ } message;
+
+ // FIXME: cleanup slot reporting...
+ RX_MSG_SLOTINFO_T *rx_msg;
+ RX_BULK_SLOTINFO_T *rx_bulk;
+ void *tx_handle;
+ MESSAGE_TX_CHANNEL_T tx_channel;
+
+} MESSAGE_EVENT_T;
+
+
+// callbacks
+typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
+
+typedef struct {
+ VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
+} VCHI_MESSAGE_DRIVER_OPEN_T;
+
+
+// handle to this instance of message driver (as returned by ->open)
+typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
+
+struct opaque_vchi_message_driver_t {
+ VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
+ int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
+ int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
+ int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
+ int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot ); // rx message
+ int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot ); // rx data (bulk)
+ int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle ); // tx (message & bulk)
+ void (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event ); // get the next event from message_driver
+ int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
+ int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
+ *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
+
+ int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
+ int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
+ void * (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
+ void (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
+ int (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
+ int (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
+
+ int32_t (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
+ uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
+ int (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
+ int (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
+ void (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
+ void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
+};
+
+
+#endif // _VCHI_MESSAGE_H_
+
+/****************************** End of file ***********************************/
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
new file mode 100644
index 000000000000..1b17e98f7379
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -0,0 +1,378 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHI_H_
+#define VCHI_H_
+
+#include "interface/vchi/vchi_cfg.h"
+#include "interface/vchi/vchi_common.h"
+#include "interface/vchi/connections/connection.h"
+#include "vchi_mh.h"
+
+
+/******************************************************************************
+ Global defs
+ *****************************************************************************/
+
+#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
+#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
+#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
+
+#ifdef USE_VCHIQ_ARM
+#define VCHI_BULK_ALIGNED(x) 1
+#else
+#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
+#endif
+
+struct vchi_version {
+ uint32_t version;
+ uint32_t version_min;
+};
+#define VCHI_VERSION(v_) { v_, v_ }
+#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
+
+typedef enum
+{
+ VCHI_VEC_POINTER,
+ VCHI_VEC_HANDLE,
+ VCHI_VEC_LIST
+} VCHI_MSG_VECTOR_TYPE_T;
+
+typedef struct vchi_msg_vector_ex {
+
+ VCHI_MSG_VECTOR_TYPE_T type;
+ union
+ {
+ // a memory handle
+ struct
+ {
+ VCHI_MEM_HANDLE_T handle;
+ uint32_t offset;
+ int32_t vec_len;
+ } handle;
+
+ // an ordinary data pointer
+ struct
+ {
+ const void *vec_base;
+ int32_t vec_len;
+ } ptr;
+
+ // a nested vector list
+ struct
+ {
+ struct vchi_msg_vector_ex *vec;
+ uint32_t vec_len;
+ } list;
+ } u;
+} VCHI_MSG_VECTOR_EX_T;
+
+
+// Construct an entry in a msg vector for a pointer (p) of length (l)
+#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
+
+// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
+#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE, { { (h), (o), (l) } }
+
+// Macros to manipulate 'FOURCC' values
+#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
+#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
+
+
+// Opaque service information
+struct opaque_vchi_service_t;
+
+// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
+// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
+typedef struct
+{
+ struct opaque_vchi_service_t *service;
+ void *message;
+} VCHI_HELD_MSG_T;
+
+
+
+// structure used to provide the information needed to open a server or a client
+typedef struct {
+ struct vchi_version version;
+ int32_t service_id;
+ VCHI_CONNECTION_T *connection;
+ uint32_t rx_fifo_size;
+ uint32_t tx_fifo_size;
+ VCHI_CALLBACK_T callback;
+ void *callback_param;
+ /* client intends to receive bulk transfers of
+ odd lengths or into unaligned buffers */
+ int32_t want_unaligned_bulk_rx;
+ /* client intends to transmit bulk transfers of
+ odd lengths or out of unaligned buffers */
+ int32_t want_unaligned_bulk_tx;
+ /* client wants to check CRCs on (bulk) xfers.
+ Only needs to be set at 1 end - will do both directions. */
+ int32_t want_crc;
+} SERVICE_CREATION_T;
+
+// Opaque handle for a VCHI instance
+typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
+
+// Opaque handle for a server or client
+typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
+
+// Service registration & startup
+typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
+
+typedef struct service_info_tag {
+ const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
+ VCHI_SERVICE_INIT init; /* Service initialisation function */
+ void *vll_handle; /* VLL handle; NULL when unloaded or a "static VLL" in build */
+} SERVICE_INFO_T;
+
+/******************************************************************************
+ Global funcs - implementation is specific to which side you are on (local / remote)
+ *****************************************************************************/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
+ const VCHI_MESSAGE_DRIVER_T * low_level);
+
+
+// Routine used to initialise the vchi on both local + remote connections
+extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
+
+extern int32_t vchi_exit( void );
+
+extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
+ const uint32_t num_connections,
+ VCHI_INSTANCE_T instance_handle );
+
+//When this is called, ensure that all services have no data pending.
+//Bulk transfers can remain 'queued'
+extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
+
+// Global control over bulk CRC checking
+extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
+ VCHI_CRC_CONTROL_T control );
+
+// helper functions
+extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
+extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
+extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
+
+
+/******************************************************************************
+ Global service API
+ *****************************************************************************/
+// Routine to create a named service
+extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
+ SERVICE_CREATION_T *setup,
+ VCHI_SERVICE_HANDLE_T *handle );
+
+// Routine to destory a service
+extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
+
+// Routine to open a named service
+extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
+ SERVICE_CREATION_T *setup,
+ VCHI_SERVICE_HANDLE_T *handle);
+
+extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
+ short *peer_version );
+
+// Routine to close a named service
+extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
+
+// Routine to increment ref count on a named service
+extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
+
+// Routine to decrement ref count on a named service
+extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
+
+// Routine to set a control option for a named service
+extern int32_t vchi_service_set_option( const VCHI_SERVICE_HANDLE_T handle,
+ VCHI_SERVICE_OPTION_T option,
+ int value);
+
+// Routine to send a message across a service
+extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
+ const void *data,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *msg_handle );
+
+// scatter-gather (vector) and send message
+int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MSG_VECTOR_EX_T *vector,
+ uint32_t count,
+ VCHI_FLAGS_T flags,
+ void *msg_handle );
+
+// legacy scatter-gather (vector) and send message, only handles pointers
+int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MSG_VECTOR_T *vector,
+ uint32_t count,
+ VCHI_FLAGS_T flags,
+ void *msg_handle );
+
+// Routine to receive a msg from a service
+// Dequeue is equivalent to hold, copy into client buffer, release
+extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
+ void *data,
+ uint32_t max_data_size_to_read,
+ uint32_t *actual_msg_size,
+ VCHI_FLAGS_T flags );
+
+// Routine to look at a message in place.
+// The message is not dequeued, so a subsequent call to peek or dequeue
+// will return the same message.
+extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
+ void **data,
+ uint32_t *msg_size,
+ VCHI_FLAGS_T flags );
+
+// Routine to remove a message after it has been read in place with peek
+// The first message on the queue is dequeued.
+extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
+
+// Routine to look at a message in place.
+// The message is dequeued, so the caller is left holding it; the descriptor is
+// filled in and must be released when the user has finished with the message.
+extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
+ void **data, // } may be NULL, as info can be
+ uint32_t *msg_size, // } obtained from HELD_MSG_T
+ VCHI_FLAGS_T flags,
+ VCHI_HELD_MSG_T *message_descriptor );
+
+// Initialise an iterator to look through messages in place
+extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MSG_ITER_T *iter,
+ VCHI_FLAGS_T flags );
+
+/******************************************************************************
+ Global service support API - operations on held messages and message iterators
+ *****************************************************************************/
+
+// Routine to get the address of a held message
+extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
+
+// Routine to get the size of a held message
+extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
+
+// Routine to get the transmit timestamp as written into the header by the peer
+extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
+
+// Routine to get the reception timestamp, written as we parsed the header
+extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
+
+// Routine to release a held message after it has been processed
+extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
+
+// Indicates whether the iterator has a next message.
+extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
+
+// Return the pointer and length for the next message and advance the iterator.
+extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
+ void **data,
+ uint32_t *msg_size );
+
+// Remove the last message returned by vchi_msg_iter_next.
+// Can only be called once after each call to vchi_msg_iter_next.
+extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
+
+// Hold the last message returned by vchi_msg_iter_next.
+// Can only be called once after each call to vchi_msg_iter_next.
+extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
+ VCHI_HELD_MSG_T *message );
+
+// Return information for the next message, and hold it, advancing the iterator.
+extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
+ void **data, // } may be NULL
+ uint32_t *msg_size, // }
+ VCHI_HELD_MSG_T *message );
+
+
+/******************************************************************************
+ Global bulk API
+ *****************************************************************************/
+
+// Routine to prepare interface for a transfer from the other side
+extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
+ void *data_dst,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *transfer_handle );
+
+
+// Prepare interface for a transfer from the other side into relocatable memory.
+int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MEM_HANDLE_T h_dst,
+ uint32_t offset,
+ uint32_t data_size,
+ const VCHI_FLAGS_T flags,
+ void * const bulk_handle );
+
+// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
+extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
+ const void *data_src,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *transfer_handle );
+
+
+/******************************************************************************
+ Configuration plumbing
+ *****************************************************************************/
+
+// function prototypes for the different mid layers (the state info gives the different physical connections)
+extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
+//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
+//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
+
+// declare all message drivers here
+const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
+
+#ifdef __cplusplus
+}
+#endif
+
+extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MEM_HANDLE_T h_src,
+ uint32_t offset,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *transfer_handle );
+#endif /* VCHI_H_ */
+
+/****************************** End of file **********************************/
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
new file mode 100644
index 000000000000..26bc2d38d725
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
@@ -0,0 +1,224 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHI_CFG_H_
+#define VCHI_CFG_H_
+
+/****************************************************************************************
+ * Defines in this first section are part of the VCHI API and may be examined by VCHI
+ * services.
+ ***************************************************************************************/
+
+/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
+/* Really determined by the message driver, and should be available from a run-time call. */
+#ifndef VCHI_BULK_ALIGN
+# if __VCCOREVER__ >= 0x04000000
+# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
+# else
+# define VCHI_BULK_ALIGN 16
+# endif
+#endif
+
+/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
+/* May be less than or greater than VCHI_BULK_ALIGN */
+/* Really determined by the message driver, and should be available from a run-time call. */
+#ifndef VCHI_BULK_GRANULARITY
+# if __VCCOREVER__ >= 0x04000000
+# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
+# else
+# define VCHI_BULK_GRANULARITY 16
+# endif
+#endif
+
+/* The largest possible message to be queued with vchi_msg_queue. */
+#ifndef VCHI_MAX_MSG_SIZE
+# if defined VCHI_LOCAL_HOST_PORT
+# define VCHI_MAX_MSG_SIZE 16384 // makes file transfers fast, but should they be using bulk?
+# else
+# define VCHI_MAX_MSG_SIZE 4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
+# endif
+#endif
+
+/******************************************************************************************
+ * Defines below are system configuration options, and should not be used by VCHI services.
+ *****************************************************************************************/
+
+/* How many connections can we support? A localhost implementation uses 2 connections,
+ * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
+ * driver. */
+#ifndef VCHI_MAX_NUM_CONNECTIONS
+# define VCHI_MAX_NUM_CONNECTIONS 3
+#endif
+
+/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
+ * amount of static memory. */
+#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
+# define VCHI_MAX_SERVICES_PER_CONNECTION 36
+#endif
+
+/* Adjust if using a message driver that supports more logical TX channels */
+#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
+# define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
+#endif
+
+/* Adjust if using a message driver that supports more logical RX channels */
+#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
+# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
+#endif
+
+/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
+ * receive queue space, less message headers. */
+#ifndef VCHI_NUM_READ_SLOTS
+# if defined(VCHI_LOCAL_HOST_PORT)
+# define VCHI_NUM_READ_SLOTS 4
+# else
+# define VCHI_NUM_READ_SLOTS 48
+# endif
+#endif
+
+/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
+ * performance. Only define on VideoCore end, talking to host.
+ */
+//#define VCHI_MSG_RX_OVERRUN
+
+/* How many transmit slots do we use. Generally don't need many, as the hardware driver
+ * underneath VCHI will usually have its own buffering. */
+#ifndef VCHI_NUM_WRITE_SLOTS
+# define VCHI_NUM_WRITE_SLOTS 4
+#endif
+
+/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
+ * then it's taking up too much buffer space, and the peer service will be told to stop
+ * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
+ * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
+ * is too high. */
+#ifndef VCHI_XOFF_THRESHOLD
+# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
+#endif
+
+/* After we've sent an XOFF, the peer will be told to resume transmission once the local
+ * service has dequeued/released enough messages that it's now occupying
+ * VCHI_XON_THRESHOLD slots or fewer. */
+#ifndef VCHI_XON_THRESHOLD
+# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
+#endif
+
+/* A size below which a bulk transfer omits the handshake completely and always goes
+ * via the message channel, if bulk auxiliary is being sent on that service. (The user
+ * can guarantee this by enabling unaligned transmits).
+ * Not API. */
+#ifndef VCHI_MIN_BULK_SIZE
+# define VCHI_MIN_BULK_SIZE ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
+#endif
+
+/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
+ * speed and latency; the smaller the chunk size the better change of messages and other
+ * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
+ * break transmissions into chunks.
+ */
+#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
+# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
+#endif
+
+/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
+ * with multiple-line frames. Only use if the receiver can cope. */
+#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
+# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
+#endif
+
+/* How many TX messages can we have pending in our transmit slots. Once exhausted,
+ * vchi_msg_queue will be blocked. */
+#ifndef VCHI_TX_MSG_QUEUE_SIZE
+# define VCHI_TX_MSG_QUEUE_SIZE 256
+#endif
+
+/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
+ * will be suspended until older messages are dequeued/released. */
+#ifndef VCHI_RX_MSG_QUEUE_SIZE
+# define VCHI_RX_MSG_QUEUE_SIZE 256
+#endif
+
+/* Really should be able to cope if we run out of received message descriptors, by
+ * suspending parsing as the comment above says, but we don't. This sweeps the issue
+ * under the carpet. */
+#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
+# undef VCHI_RX_MSG_QUEUE_SIZE
+# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
+#endif
+
+/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
+ * will be blocked. */
+#ifndef VCHI_TX_BULK_QUEUE_SIZE
+# define VCHI_TX_BULK_QUEUE_SIZE 64
+#endif
+
+/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
+ * will be blocked. */
+#ifndef VCHI_RX_BULK_QUEUE_SIZE
+# define VCHI_RX_BULK_QUEUE_SIZE 64
+#endif
+
+/* A limit on how many outstanding bulk requests we expect the peer to give us. If
+ * the peer asks for more than this, VCHI will fail and assert. The number is determined
+ * by the peer's hardware - it's the number of outstanding requests that can be queued
+ * on all bulk channels. VC3's MPHI peripheral allows 16. */
+#ifndef VCHI_MAX_PEER_BULK_REQUESTS
+# define VCHI_MAX_PEER_BULK_REQUESTS 32
+#endif
+
+/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
+ * transmitter on and off.
+ */
+/*#define VCHI_CCP2TX_MANUAL_POWER*/
+
+#ifndef VCHI_CCP2TX_MANUAL_POWER
+
+/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
+ * negative for no IDLE.
+ */
+# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
+# define VCHI_CCP2TX_IDLE_TIMEOUT 5
+# endif
+
+/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
+ * negative for no OFF.
+ */
+# ifndef VCHI_CCP2TX_OFF_TIMEOUT
+# define VCHI_CCP2TX_OFF_TIMEOUT 1000
+# endif
+
+#endif /* VCHI_CCP2TX_MANUAL_POWER */
+
+#endif /* VCHI_CFG_H_ */
+
+/****************************** End of file **********************************/
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_cfg_internal.h b/drivers/staging/vc04_services/interface/vchi/vchi_cfg_internal.h
new file mode 100644
index 000000000000..35dcba4837d4
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_cfg_internal.h
@@ -0,0 +1,71 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHI_CFG_INTERNAL_H_
+#define VCHI_CFG_INTERNAL_H_
+
+/****************************************************************************************
+ * Control optimisation attempts.
+ ***************************************************************************************/
+
+// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
+#define VCHI_COARSE_LOCKING
+
+// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
+// (only relevant if VCHI_COARSE_LOCKING)
+#define VCHI_ELIDE_BLOCK_EXIT_LOCK
+
+// Avoid lock on non-blocking peek
+// (only relevant if VCHI_COARSE_LOCKING)
+#define VCHI_AVOID_PEEK_LOCK
+
+// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
+#define VCHI_MULTIPLE_HANDLER_THREADS
+
+// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
+// our way through the pool of descriptors.
+#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
+
+// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
+#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
+
+// Don't use message descriptors for TX messages that don't need them
+#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
+
+// Nano-locks for multiqueue
+//#define VCHI_MQUEUE_NANOLOCKS
+
+// Lock-free(er) dequeuing
+//#define VCHI_RX_NANOLOCKS
+
+#endif /*VCHI_CFG_INTERNAL_H_*/
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_common.h b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
new file mode 100644
index 000000000000..d535a72970d3
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
@@ -0,0 +1,175 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHI_COMMON_H_
+#define VCHI_COMMON_H_
+
+
+//flags used when sending messages (must be bitmapped)
+typedef enum
+{
+ VCHI_FLAGS_NONE = 0x0,
+ VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
+ VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
+ VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
+ VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
+ VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
+ VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
+
+ VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
+ VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
+ VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
+ VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
+ VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
+ VCHI_FLAGS_INTERNAL = 0xFF0000
+} VCHI_FLAGS_T;
+
+// constants for vchi_crc_control()
+typedef enum {
+ VCHI_CRC_NOTHING = -1,
+ VCHI_CRC_PER_SERVICE = 0,
+ VCHI_CRC_EVERYTHING = 1,
+} VCHI_CRC_CONTROL_T;
+
+//callback reasons when an event occurs on a service
+typedef enum
+{
+ VCHI_CALLBACK_REASON_MIN,
+
+ //This indicates that there is data available
+ //handle is the msg id that was transmitted with the data
+ // When a message is received and there was no FULL message available previously, send callback
+ // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
+ VCHI_CALLBACK_MSG_AVAILABLE,
+ VCHI_CALLBACK_MSG_SENT,
+ VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
+
+ // This indicates that a transfer from the other side has completed
+ VCHI_CALLBACK_BULK_RECEIVED,
+ //This indicates that data queued up to be sent has now gone
+ //handle is the msg id that was used when sending the data
+ VCHI_CALLBACK_BULK_SENT,
+ VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
+ VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
+
+ VCHI_CALLBACK_SERVICE_CLOSED,
+
+ // this side has sent XOFF to peer due to lack of data consumption by service
+ // (suggests the service may need to take some recovery action if it has
+ // been deliberately holding off consuming data)
+ VCHI_CALLBACK_SENT_XOFF,
+ VCHI_CALLBACK_SENT_XON,
+
+ // indicates that a bulk transfer has finished reading the source buffer
+ VCHI_CALLBACK_BULK_DATA_READ,
+
+ // power notification events (currently host side only)
+ VCHI_CALLBACK_PEER_OFF,
+ VCHI_CALLBACK_PEER_SUSPENDED,
+ VCHI_CALLBACK_PEER_ON,
+ VCHI_CALLBACK_PEER_RESUMED,
+ VCHI_CALLBACK_FORCED_POWER_OFF,
+
+#ifdef USE_VCHIQ_ARM
+ // some extra notifications provided by vchiq_arm
+ VCHI_CALLBACK_SERVICE_OPENED,
+ VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
+ VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
+#endif
+
+ VCHI_CALLBACK_REASON_MAX
+} VCHI_CALLBACK_REASON_T;
+
+// service control options
+typedef enum
+{
+ VCHI_SERVICE_OPTION_MIN,
+
+ VCHI_SERVICE_OPTION_TRACE,
+ VCHI_SERVICE_OPTION_SYNCHRONOUS,
+
+ VCHI_SERVICE_OPTION_MAX
+} VCHI_SERVICE_OPTION_T;
+
+
+//Callback used by all services / bulk transfers
+typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
+ VCHI_CALLBACK_REASON_T reason,
+ void *handle ); //for transmitting msg's only
+
+
+
+/*
+ * Define vector struct for scatter-gather (vector) operations
+ * Vectors can be nested - if a vector element has negative length, then
+ * the data pointer is treated as pointing to another vector array, with
+ * '-vec_len' elements. Thus to append a header onto an existing vector,
+ * you can do this:
+ *
+ * void foo(const VCHI_MSG_VECTOR_T *v, int n)
+ * {
+ * VCHI_MSG_VECTOR_T nv[2];
+ * nv[0].vec_base = my_header;
+ * nv[0].vec_len = sizeof my_header;
+ * nv[1].vec_base = v;
+ * nv[1].vec_len = -n;
+ * ...
+ *
+ */
+typedef struct vchi_msg_vector {
+ const void *vec_base;
+ int32_t vec_len;
+} VCHI_MSG_VECTOR_T;
+
+// Opaque type for a connection API
+typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
+
+// Opaque type for a message driver
+typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
+
+
+// Iterator structure for reading ahead through received message queue. Allocated by client,
+// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
+// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
+// will not proceed to messages received since. Behaviour is undefined if an iterator
+// is used again after messages for that service are removed/dequeued by any
+// means other than vchi_msg_iter_... calls on the iterator itself.
+typedef struct {
+ struct opaque_vchi_service_t *service;
+ void *last;
+ void *next;
+ void *remove;
+} VCHI_MSG_ITER_T;
+
+
+#endif // VCHI_COMMON_H_
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_mh.h b/drivers/staging/vc04_services/interface/vchi/vchi_mh.h
new file mode 100644
index 000000000000..198bd076b666
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_mh.h
@@ -0,0 +1,42 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHI_MH_H_
+#define VCHI_MH_H_
+
+#include <linux/types.h>
+
+typedef int32_t VCHI_MEM_HANDLE_T;
+#define VCHI_MEM_HANDLE_INVALID 0
+
+#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
new file mode 100644
index 000000000000..ad398bae6ee4
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
@@ -0,0 +1,40 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_VCHIQ_H
+#define VCHIQ_VCHIQ_H
+
+#include "vchiq_if.h"
+#include "vchiq_util.h"
+
+#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835.h
new file mode 100644
index 000000000000..7ea5c64d5343
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835.h
@@ -0,0 +1,42 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_2835_H
+#define VCHIQ_2835_H
+
+#include "vchiq_pagelist.h"
+
+#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
+#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
+
+#endif /* VCHIQ_2835_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
new file mode 100644
index 000000000000..c29040fdf9a7
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -0,0 +1,586 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <asm/pgtable.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
+#define dmac_map_area __glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
+
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
+
+#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
+
+#define VCHIQ_ARM_ADDRESS(x) ((void *)((char *)x + g_virt_to_bus_offset))
+
+#include "vchiq_arm.h"
+#include "vchiq_2835.h"
+#include "vchiq_connected.h"
+#include "vchiq_killable.h"
+
+#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
+
+#define BELL0 0x00
+#define BELL2 0x08
+
+typedef struct vchiq_2835_state_struct {
+ int inited;
+ VCHIQ_ARM_STATE_T arm_state;
+} VCHIQ_2835_ARM_STATE_T;
+
+static void __iomem *g_regs;
+static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE);
+static unsigned int g_fragments_size;
+static char *g_fragments_base;
+static char *g_free_fragments;
+static struct semaphore g_free_fragments_sema;
+static unsigned long g_virt_to_bus_offset;
+
+extern int vchiq_arm_log_level;
+
+static DEFINE_SEMAPHORE(g_free_fragments_mutex);
+
+static irqreturn_t
+vchiq_doorbell_irq(int irq, void *dev_id);
+
+static int
+create_pagelist(char __user *buf, size_t count, unsigned short type,
+ struct task_struct *task, PAGELIST_T ** ppagelist);
+
+static void
+free_pagelist(PAGELIST_T *pagelist, int actual);
+
+int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
+{
+ struct device *dev = &pdev->dev;
+ struct rpi_firmware *fw = platform_get_drvdata(pdev);
+ VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
+ struct resource *res;
+ void *slot_mem;
+ dma_addr_t slot_phys;
+ u32 channelbase;
+ int slot_mem_size, frag_mem_size;
+ int err, irq, i;
+
+ g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);
+
+ (void)of_property_read_u32(dev->of_node, "cache-line-size",
+ &g_cache_line_size);
+ g_fragments_size = 2 * g_cache_line_size;
+
+ /* Allocate space for the channels in coherent memory */
+ slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
+ frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
+
+ slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
+ &slot_phys, GFP_KERNEL);
+ if (!slot_mem) {
+ dev_err(dev, "could not allocate DMA memory\n");
+ return -ENOMEM;
+ }
+
+ WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0);
+
+ vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
+ if (!vchiq_slot_zero)
+ return -EINVAL;
+
+ vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
+ (int)slot_phys + slot_mem_size;
+ vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
+ MAX_FRAGMENTS;
+
+ g_fragments_base = (char *)slot_mem + slot_mem_size;
+ slot_mem_size += frag_mem_size;
+
+ g_free_fragments = g_fragments_base;
+ for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
+ *(char **)&g_fragments_base[i*g_fragments_size] =
+ &g_fragments_base[(i + 1)*g_fragments_size];
+ }
+ *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
+ sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
+
+ if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ g_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(g_regs))
+ return PTR_ERR(g_regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed to get IRQ\n");
+ return irq;
+ }
+
+ err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
+ "VCHIQ doorbell", state);
+ if (err) {
+ dev_err(dev, "failed to register irq=%d\n", irq);
+ return err;
+ }
+
+ /* Send the base address of the slots to VideoCore */
+ channelbase = slot_phys;
+ err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
+ &channelbase, sizeof(channelbase));
+ if (err || channelbase) {
+ dev_err(dev, "failed to set channelbase\n");
+ return err ? : -ENXIO;
+ }
+
+ vchiq_log_info(vchiq_arm_log_level,
+ "vchiq_init - done (slots %x, phys %pad)",
+ (unsigned int)vchiq_slot_zero, &slot_phys);
+
+ vchiq_call_connected_callbacks();
+
+ return 0;
+}
+
+VCHIQ_STATUS_T
+vchiq_platform_init_state(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
+ ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
+ status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
+ if(status != VCHIQ_SUCCESS)
+ {
+ ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
+ }
+ return status;
+}
+
+VCHIQ_ARM_STATE_T*
+vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
+{
+ if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
+ {
+ BUG();
+ }
+ return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
+}
+
+void
+remote_event_signal(REMOTE_EVENT_T *event)
+{
+ wmb();
+
+ event->fired = 1;
+
+ dsb(); /* data barrier operation */
+
+ if (event->armed)
+ writel(0, g_regs + BELL2); /* trigger vc interrupt */
+}
+
+int
+vchiq_copy_from_user(void *dst, const void *src, int size)
+{
+ if ((uint32_t)src < TASK_SIZE) {
+ return copy_from_user(dst, src, size);
+ } else {
+ memcpy(dst, src, size);
+ return 0;
+ }
+}
+
+VCHIQ_STATUS_T
+vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
+ void *offset, int size, int dir)
+{
+ PAGELIST_T *pagelist;
+ int ret;
+
+ WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
+
+ ret = create_pagelist((char __user *)offset, size,
+ (dir == VCHIQ_BULK_RECEIVE)
+ ? PAGELIST_READ
+ : PAGELIST_WRITE,
+ current,
+ &pagelist);
+ if (ret != 0)
+ return VCHIQ_ERROR;
+
+ bulk->handle = memhandle;
+ bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
+
+ /* Store the pagelist address in remote_data, which isn't used by the
+ slave. */
+ bulk->remote_data = pagelist;
+
+ return VCHIQ_SUCCESS;
+}
+
+void
+vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
+{
+ if (bulk && bulk->remote_data && bulk->actual)
+ free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
+}
+
+void
+vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
+{
+ /*
+ * This should only be called on the master (VideoCore) side, but
+ * provide an implementation to avoid the need for ifdefery.
+ */
+ BUG();
+}
+
+void
+vchiq_dump_platform_state(void *dump_context)
+{
+ char buf[80];
+ int len;
+ len = snprintf(buf, sizeof(buf),
+ " Platform: 2835 (VC master)");
+ vchiq_dump(dump_context, buf, len + 1);
+}
+
+VCHIQ_STATUS_T
+vchiq_platform_suspend(VCHIQ_STATE_T *state)
+{
+ return VCHIQ_ERROR;
+}
+
+VCHIQ_STATUS_T
+vchiq_platform_resume(VCHIQ_STATE_T *state)
+{
+ return VCHIQ_SUCCESS;
+}
+
+void
+vchiq_platform_paused(VCHIQ_STATE_T *state)
+{
+}
+
+void
+vchiq_platform_resumed(VCHIQ_STATE_T *state)
+{
+}
+
+int
+vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
+{
+ return 1; // autosuspend not supported - videocore always wanted
+}
+
+int
+vchiq_platform_use_suspend_timer(void)
+{
+ return 0;
+}
+void
+vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
+{
+ vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
+}
+void
+vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
+{
+ (void)state;
+}
+/*
+ * Local functions
+ */
+
+static irqreturn_t
+vchiq_doorbell_irq(int irq, void *dev_id)
+{
+ VCHIQ_STATE_T *state = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int status;
+
+ /* Read (and clear) the doorbell */
+ status = readl(g_regs + BELL0);
+
+ if (status & 0x4) { /* Was the doorbell rung? */
+ remote_event_pollall(state);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+/* There is a potential problem with partial cache lines (pages?)
+** at the ends of the block when reading. If the CPU accessed anything in
+** the same line (page?) then it may have pulled old data into the cache,
+** obscuring the new data underneath. We can solve this by transferring the
+** partial cache lines separately, and allowing the ARM to copy into the
+** cached area.
+
+** N.B. This implementation plays slightly fast and loose with the Linux
+** driver programming rules, e.g. its use of dmac_map_area instead of
+** dma_map_single, but it isn't a multi-platform driver and it benefits
+** from increased speed as a result.
+*/
+
+static int
+create_pagelist(char __user *buf, size_t count, unsigned short type,
+ struct task_struct *task, PAGELIST_T ** ppagelist)
+{
+ PAGELIST_T *pagelist;
+ struct page **pages;
+ unsigned long *addrs;
+ unsigned int num_pages, offset, i;
+ char *addr, *base_addr, *next_addr;
+ int run, addridx, actual_pages;
+ unsigned long *need_release;
+
+ offset = (unsigned int)buf & (PAGE_SIZE - 1);
+ num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ *ppagelist = NULL;
+
+ /* Allocate enough storage to hold the page pointers and the page
+ ** list
+ */
+ pagelist = kmalloc(sizeof(PAGELIST_T) +
+ (num_pages * sizeof(unsigned long)) +
+ sizeof(unsigned long) +
+ (num_pages * sizeof(pages[0])),
+ GFP_KERNEL);
+
+ vchiq_log_trace(vchiq_arm_log_level,
+ "create_pagelist - %x", (unsigned int)pagelist);
+ if (!pagelist)
+ return -ENOMEM;
+
+ addrs = pagelist->addrs;
+ need_release = (unsigned long *)(addrs + num_pages);
+ pages = (struct page **)(addrs + num_pages + 1);
+
+ if (is_vmalloc_addr(buf)) {
+ int dir = (type == PAGELIST_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ unsigned long length = count;
+ unsigned int off = offset;
+
+ for (actual_pages = 0; actual_pages < num_pages;
+ actual_pages++) {
+ struct page *pg = vmalloc_to_page(buf + (actual_pages *
+ PAGE_SIZE));
+ size_t bytes = PAGE_SIZE - off;
+
+ if (bytes > length)
+ bytes = length;
+ pages[actual_pages] = pg;
+ dmac_map_area(page_address(pg) + off, bytes, dir);
+ length -= bytes;
+ off = 0;
+ }
+ *need_release = 0; /* do not try and release vmalloc pages */
+ } else {
+ down_read(&task->mm->mmap_sem);
+ actual_pages = get_user_pages(task, task->mm,
+ (unsigned long)buf & ~(PAGE_SIZE - 1),
+ num_pages,
+ (type == PAGELIST_READ) /*Write */ ,
+ 0 /*Force */ ,
+ pages,
+ NULL /*vmas */);
+ up_read(&task->mm->mmap_sem);
+
+ if (actual_pages != num_pages) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "create_pagelist - only %d/%d pages locked",
+ actual_pages,
+ num_pages);
+
+ /* This is probably due to the process being killed */
+ while (actual_pages > 0)
+ {
+ actual_pages--;
+ page_cache_release(pages[actual_pages]);
+ }
+ kfree(pagelist);
+ if (actual_pages == 0)
+ actual_pages = -ENOMEM;
+ return actual_pages;
+ }
+ *need_release = 1; /* release user pages */
+ }
+
+ pagelist->length = count;
+ pagelist->type = type;
+ pagelist->offset = offset;
+
+ /* Group the pages into runs of contiguous pages */
+
+ base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
+ next_addr = base_addr + PAGE_SIZE;
+ addridx = 0;
+ run = 0;
+
+ for (i = 1; i < num_pages; i++) {
+ addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
+ if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
+ next_addr += PAGE_SIZE;
+ run++;
+ } else {
+ addrs[addridx] = (unsigned long)base_addr + run;
+ addridx++;
+ base_addr = addr;
+ next_addr = addr + PAGE_SIZE;
+ run = 0;
+ }
+ }
+
+ addrs[addridx] = (unsigned long)base_addr + run;
+ addridx++;
+
+ /* Partial cache lines (fragments) require special measures */
+ if ((type == PAGELIST_READ) &&
+ ((pagelist->offset & (g_cache_line_size - 1)) ||
+ ((pagelist->offset + pagelist->length) &
+ (g_cache_line_size - 1)))) {
+ char *fragments;
+
+ if (down_interruptible(&g_free_fragments_sema) != 0) {
+ kfree(pagelist);
+ return -EINTR;
+ }
+
+ WARN_ON(g_free_fragments == NULL);
+
+ down(&g_free_fragments_mutex);
+ fragments = g_free_fragments;
+ WARN_ON(fragments == NULL);
+ g_free_fragments = *(char **) g_free_fragments;
+ up(&g_free_fragments_mutex);
+ pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
+ (fragments - g_fragments_base) / g_fragments_size;
+ }
+
+ dmac_flush_range(pagelist, addrs + num_pages);
+
+ *ppagelist = pagelist;
+
+ return 0;
+}
+
+static void
+free_pagelist(PAGELIST_T *pagelist, int actual)
+{
+ unsigned long *need_release;
+ struct page **pages;
+ unsigned int num_pages, i;
+
+ vchiq_log_trace(vchiq_arm_log_level,
+ "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
+
+ num_pages =
+ (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
+ PAGE_SIZE;
+
+ need_release = (unsigned long *)(pagelist->addrs + num_pages);
+ pages = (struct page **)(pagelist->addrs + num_pages + 1);
+
+ /* Deal with any partial cache lines (fragments) */
+ if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
+ char *fragments = g_fragments_base +
+ (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
+ g_fragments_size;
+ int head_bytes, tail_bytes;
+ head_bytes = (g_cache_line_size - pagelist->offset) &
+ (g_cache_line_size - 1);
+ tail_bytes = (pagelist->offset + actual) &
+ (g_cache_line_size - 1);
+
+ if ((actual >= 0) && (head_bytes != 0)) {
+ if (head_bytes > actual)
+ head_bytes = actual;
+
+ memcpy((char *)page_address(pages[0]) +
+ pagelist->offset,
+ fragments,
+ head_bytes);
+ }
+ if ((actual >= 0) && (head_bytes < actual) &&
+ (tail_bytes != 0)) {
+ memcpy((char *)page_address(pages[num_pages - 1]) +
+ ((pagelist->offset + actual) &
+ (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
+ fragments + g_cache_line_size,
+ tail_bytes);
+ }
+
+ down(&g_free_fragments_mutex);
+ *(char **)fragments = g_free_fragments;
+ g_free_fragments = fragments;
+ up(&g_free_fragments_mutex);
+ up(&g_free_fragments_sema);
+ }
+
+ if (*need_release) {
+ unsigned int length = pagelist->length;
+ unsigned int offset = pagelist->offset;
+
+ for (i = 0; i < num_pages; i++) {
+ struct page *pg = pages[i];
+
+ if (pagelist->type != PAGELIST_WRITE) {
+ unsigned int bytes = PAGE_SIZE - offset;
+
+ if (bytes > length)
+ bytes = length;
+ dmac_unmap_area(page_address(pg) + offset,
+ bytes, DMA_FROM_DEVICE);
+ length -= bytes;
+ offset = 0;
+ set_page_dirty(pg);
+ }
+ page_cache_release(pg);
+ }
+ }
+
+ kfree(pagelist);
+}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
new file mode 100644
index 000000000000..e11c0e07471b
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -0,0 +1,2903 @@
+/**
+ * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bug.h>
+#include <linux/semaphore.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
+#include "vchiq_core.h"
+#include "vchiq_ioctl.h"
+#include "vchiq_arm.h"
+#include "vchiq_debugfs.h"
+#include "vchiq_killable.h"
+
+#define DEVICE_NAME "vchiq"
+
+/* Override the default prefix, which would be vchiq_arm (from the filename) */
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX DEVICE_NAME "."
+
+#define VCHIQ_MINOR 0
+
+/* Some per-instance constants */
+#define MAX_COMPLETIONS 16
+#define MAX_SERVICES 64
+#define MAX_ELEMENTS 8
+#define MSG_QUEUE_SIZE 64
+
+#define KEEPALIVE_VER 1
+#define KEEPALIVE_VER_MIN KEEPALIVE_VER
+
+/* Run time control of log level, based on KERN_XXX level. */
+int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
+int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
+
+#define SUSPEND_TIMER_TIMEOUT_MS 100
+#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
+
+#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
+static const char *const suspend_state_names[] = {
+ "VC_SUSPEND_FORCE_CANCELED",
+ "VC_SUSPEND_REJECTED",
+ "VC_SUSPEND_FAILED",
+ "VC_SUSPEND_IDLE",
+ "VC_SUSPEND_REQUESTED",
+ "VC_SUSPEND_IN_PROGRESS",
+ "VC_SUSPEND_SUSPENDED"
+};
+#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
+static const char *const resume_state_names[] = {
+ "VC_RESUME_FAILED",
+ "VC_RESUME_IDLE",
+ "VC_RESUME_REQUESTED",
+ "VC_RESUME_IN_PROGRESS",
+ "VC_RESUME_RESUMED"
+};
+/* The number of times we allow force suspend to timeout before actually
+** _forcing_ suspend. This is to cater for SW which fails to release vchiq
+** correctly - we don't want to prevent ARM suspend indefinitely in this case.
+*/
+#define FORCE_SUSPEND_FAIL_MAX 8
+
+/* The time in ms allowed for videocore to go idle when force suspend has been
+ * requested */
+#define FORCE_SUSPEND_TIMEOUT_MS 200
+
+
+static void suspend_timer_callback(unsigned long context);
+
+
+typedef struct user_service_struct {
+ VCHIQ_SERVICE_T *service;
+ void *userdata;
+ VCHIQ_INSTANCE_T instance;
+ char is_vchi;
+ char dequeue_pending;
+ char close_pending;
+ int message_available_pos;
+ int msg_insert;
+ int msg_remove;
+ struct semaphore insert_event;
+ struct semaphore remove_event;
+ struct semaphore close_event;
+ VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
+} USER_SERVICE_T;
+
+struct bulk_waiter_node {
+ struct bulk_waiter bulk_waiter;
+ int pid;
+ struct list_head list;
+};
+
+struct vchiq_instance_struct {
+ VCHIQ_STATE_T *state;
+ VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
+ int completion_insert;
+ int completion_remove;
+ struct semaphore insert_event;
+ struct semaphore remove_event;
+ struct mutex completion_mutex;
+
+ int connected;
+ int closing;
+ int pid;
+ int mark;
+ int use_close_delivered;
+ int trace;
+
+ struct list_head bulk_waiter_list;
+ struct mutex bulk_waiter_list_mutex;
+
+ VCHIQ_DEBUGFS_NODE_T debugfs_node;
+};
+
+typedef struct dump_context_struct {
+ char __user *buf;
+ size_t actual;
+ size_t space;
+ loff_t offset;
+} DUMP_CONTEXT_T;
+
+static struct cdev vchiq_cdev;
+static dev_t vchiq_devid;
+static VCHIQ_STATE_T g_state;
+static struct class *vchiq_class;
+static struct device *vchiq_dev;
+static DEFINE_SPINLOCK(msg_queue_spinlock);
+
+static const char *const ioctl_names[] = {
+ "CONNECT",
+ "SHUTDOWN",
+ "CREATE_SERVICE",
+ "REMOVE_SERVICE",
+ "QUEUE_MESSAGE",
+ "QUEUE_BULK_TRANSMIT",
+ "QUEUE_BULK_RECEIVE",
+ "AWAIT_COMPLETION",
+ "DEQUEUE_MESSAGE",
+ "GET_CLIENT_ID",
+ "GET_CONFIG",
+ "CLOSE_SERVICE",
+ "USE_SERVICE",
+ "RELEASE_SERVICE",
+ "SET_SERVICE_OPTION",
+ "DUMP_PHYS_MEM",
+ "LIB_VERSION",
+ "CLOSE_DELIVERED"
+};
+
+vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
+ (VCHIQ_IOC_MAX + 1));
+
+static void
+dump_phys_mem(void *virt_addr, uint32_t num_bytes);
+
+/****************************************************************************
+*
+* add_completion
+*
+***************************************************************************/
+
+static VCHIQ_STATUS_T
+add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
+ VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
+ void *bulk_userdata)
+{
+ VCHIQ_COMPLETION_DATA_T *completion;
+ DEBUG_INITIALISE(g_state.local)
+
+ while (instance->completion_insert ==
+ (instance->completion_remove + MAX_COMPLETIONS)) {
+ /* Out of space - wait for the client */
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_log_trace(vchiq_arm_log_level,
+ "add_completion - completion queue full");
+ DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
+ if (down_interruptible(&instance->remove_event) != 0) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "service_callback interrupted");
+ return VCHIQ_RETRY;
+ } else if (instance->closing) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "service_callback closing");
+ return VCHIQ_ERROR;
+ }
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ }
+
+ completion =
+ &instance->completions[instance->completion_insert &
+ (MAX_COMPLETIONS - 1)];
+
+ completion->header = header;
+ completion->reason = reason;
+ /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
+ completion->service_userdata = user_service->service;
+ completion->bulk_userdata = bulk_userdata;
+
+ if (reason == VCHIQ_SERVICE_CLOSED) {
+ /* Take an extra reference, to be held until
+ this CLOSED notification is delivered. */
+ lock_service(user_service->service);
+ if (instance->use_close_delivered)
+ user_service->close_pending = 1;
+ }
+
+ /* A write barrier is needed here to ensure that the entire completion
+ record is written out before the insert point. */
+ wmb();
+
+ if (reason == VCHIQ_MESSAGE_AVAILABLE)
+ user_service->message_available_pos =
+ instance->completion_insert;
+ instance->completion_insert++;
+
+ up(&instance->insert_event);
+
+ return VCHIQ_SUCCESS;
+}
+
+/****************************************************************************
+*
+* service_callback
+*
+***************************************************************************/
+
+static VCHIQ_STATUS_T
+service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
+ VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
+{
+ /* How do we ensure the callback goes to the right client?
+ ** The service_user data points to a USER_SERVICE_T record containing
+ ** the original callback and the user state structure, which contains a
+ ** circular buffer for completion records.
+ */
+ USER_SERVICE_T *user_service;
+ VCHIQ_SERVICE_T *service;
+ VCHIQ_INSTANCE_T instance;
+ DEBUG_INITIALISE(g_state.local)
+
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+
+ service = handle_to_service(handle);
+ BUG_ON(!service);
+ user_service = (USER_SERVICE_T *)service->base.userdata;
+ instance = user_service->instance;
+
+ if (!instance || instance->closing)
+ return VCHIQ_SUCCESS;
+
+ vchiq_log_trace(vchiq_arm_log_level,
+ "service_callback - service %lx(%d,%p), reason %d, header %lx, "
+ "instance %lx, bulk_userdata %lx",
+ (unsigned long)user_service,
+ service->localport, user_service->userdata,
+ reason, (unsigned long)header,
+ (unsigned long)instance, (unsigned long)bulk_userdata);
+
+ if (header && user_service->is_vchi) {
+ spin_lock(&msg_queue_spinlock);
+ while (user_service->msg_insert ==
+ (user_service->msg_remove + MSG_QUEUE_SIZE)) {
+ spin_unlock(&msg_queue_spinlock);
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
+ vchiq_log_trace(vchiq_arm_log_level,
+ "service_callback - msg queue full");
+ /* If there is no MESSAGE_AVAILABLE in the completion
+ ** queue, add one
+ */
+ if ((user_service->message_available_pos -
+ instance->completion_remove) < 0) {
+ VCHIQ_STATUS_T status;
+ vchiq_log_info(vchiq_arm_log_level,
+ "Inserting extra MESSAGE_AVAILABLE");
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ status = add_completion(instance, reason,
+ NULL, user_service, bulk_userdata);
+ if (status != VCHIQ_SUCCESS) {
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ return status;
+ }
+ }
+
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ if (down_interruptible(&user_service->remove_event)
+ != 0) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "service_callback interrupted");
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ return VCHIQ_RETRY;
+ } else if (instance->closing) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "service_callback closing");
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ return VCHIQ_ERROR;
+ }
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ spin_lock(&msg_queue_spinlock);
+ }
+
+ user_service->msg_queue[user_service->msg_insert &
+ (MSG_QUEUE_SIZE - 1)] = header;
+ user_service->msg_insert++;
+ spin_unlock(&msg_queue_spinlock);
+
+ up(&user_service->insert_event);
+
+ /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
+ ** there is a MESSAGE_AVAILABLE in the completion queue then
+ ** bypass the completion queue.
+ */
+ if (((user_service->message_available_pos -
+ instance->completion_remove) >= 0) ||
+ user_service->dequeue_pending) {
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ user_service->dequeue_pending = 0;
+ return VCHIQ_SUCCESS;
+ }
+
+ header = NULL;
+ }
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+
+ return add_completion(instance, reason, header, user_service,
+ bulk_userdata);
+}
+
+/****************************************************************************
+*
+* user_service_free
+*
+***************************************************************************/
+static void
+user_service_free(void *userdata)
+{
+ kfree(userdata);
+}
+
+/****************************************************************************
+*
+* close_delivered
+*
+***************************************************************************/
+static void close_delivered(USER_SERVICE_T *user_service)
+{
+ vchiq_log_info(vchiq_arm_log_level,
+ "close_delivered(handle=%x)",
+ user_service->service->handle);
+
+ if (user_service->close_pending) {
+ /* Allow the underlying service to be culled */
+ unlock_service(user_service->service);
+
+ /* Wake the user-thread blocked in close_ or remove_service */
+ up(&user_service->close_event);
+
+ user_service->close_pending = 0;
+ }
+}
+
+/****************************************************************************
+*
+* vchiq_ioctl
+*
+***************************************************************************/
+static long
+vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ VCHIQ_INSTANCE_T instance = file->private_data;
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ VCHIQ_SERVICE_T *service = NULL;
+ long ret = 0;
+ int i, rc;
+ DEBUG_INITIALISE(g_state.local)
+
+ vchiq_log_trace(vchiq_arm_log_level,
+ "vchiq_ioctl - instance %x, cmd %s, arg %lx",
+ (unsigned int)instance,
+ ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
+ (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
+
+ switch (cmd) {
+ case VCHIQ_IOC_SHUTDOWN:
+ if (!instance->connected)
+ break;
+
+ /* Remove all services */
+ i = 0;
+ while ((service = next_service_by_instance(instance->state,
+ instance, &i)) != NULL) {
+ status = vchiq_remove_service(service->handle);
+ unlock_service(service);
+ if (status != VCHIQ_SUCCESS)
+ break;
+ }
+ service = NULL;
+
+ if (status == VCHIQ_SUCCESS) {
+ /* Wake the completion thread and ask it to exit */
+ instance->closing = 1;
+ up(&instance->insert_event);
+ }
+
+ break;
+
+ case VCHIQ_IOC_CONNECT:
+ if (instance->connected) {
+ ret = -EINVAL;
+ break;
+ }
+ rc = mutex_lock_interruptible(&instance->state->mutex);
+ if (rc != 0) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "vchiq: connect: could not lock mutex for "
+ "state %d: %d",
+ instance->state->id, rc);
+ ret = -EINTR;
+ break;
+ }
+ status = vchiq_connect_internal(instance->state, instance);
+ mutex_unlock(&instance->state->mutex);
+
+ if (status == VCHIQ_SUCCESS)
+ instance->connected = 1;
+ else
+ vchiq_log_error(vchiq_arm_log_level,
+ "vchiq: could not connect: %d", status);
+ break;
+
+ case VCHIQ_IOC_CREATE_SERVICE: {
+ VCHIQ_CREATE_SERVICE_T args;
+ USER_SERVICE_T *user_service = NULL;
+ void *userdata;
+ int srvstate;
+
+ if (copy_from_user
+ (&args, (const void __user *)arg,
+ sizeof(args)) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+
+ user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
+ if (!user_service) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ if (args.is_open) {
+ if (!instance->connected) {
+ ret = -ENOTCONN;
+ kfree(user_service);
+ break;
+ }
+ srvstate = VCHIQ_SRVSTATE_OPENING;
+ } else {
+ srvstate =
+ instance->connected ?
+ VCHIQ_SRVSTATE_LISTENING :
+ VCHIQ_SRVSTATE_HIDDEN;
+ }
+
+ userdata = args.params.userdata;
+ args.params.callback = service_callback;
+ args.params.userdata = user_service;
+ service = vchiq_add_service_internal(
+ instance->state,
+ &args.params, srvstate,
+ instance, user_service_free);
+
+ if (service != NULL) {
+ user_service->service = service;
+ user_service->userdata = userdata;
+ user_service->instance = instance;
+ user_service->is_vchi = (args.is_vchi != 0);
+ user_service->dequeue_pending = 0;
+ user_service->close_pending = 0;
+ user_service->message_available_pos =
+ instance->completion_remove - 1;
+ user_service->msg_insert = 0;
+ user_service->msg_remove = 0;
+ sema_init(&user_service->insert_event, 0);
+ sema_init(&user_service->remove_event, 0);
+ sema_init(&user_service->close_event, 0);
+
+ if (args.is_open) {
+ status = vchiq_open_service_internal
+ (service, instance->pid);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_remove_service(service->handle);
+ service = NULL;
+ ret = (status == VCHIQ_RETRY) ?
+ -EINTR : -EIO;
+ break;
+ }
+ }
+
+ if (copy_to_user((void __user *)
+ &(((VCHIQ_CREATE_SERVICE_T __user *)
+ arg)->handle),
+ (const void *)&service->handle,
+ sizeof(service->handle)) != 0) {
+ ret = -EFAULT;
+ vchiq_remove_service(service->handle);
+ }
+
+ service = NULL;
+ } else {
+ ret = -EEXIST;
+ kfree(user_service);
+ }
+ } break;
+
+ case VCHIQ_IOC_CLOSE_SERVICE: {
+ VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+
+ service = find_service_for_instance(instance, handle);
+ if (service != NULL) {
+ USER_SERVICE_T *user_service =
+ (USER_SERVICE_T *)service->base.userdata;
+ /* close_pending is false on first entry, and when the
+ wait in vchiq_close_service has been interrupted. */
+ if (!user_service->close_pending) {
+ status = vchiq_close_service(service->handle);
+ if (status != VCHIQ_SUCCESS)
+ break;
+ }
+
+ /* close_pending is true once the underlying service
+ has been closed until the client library calls the
+ CLOSE_DELIVERED ioctl, signalling close_event. */
+ if (user_service->close_pending &&
+ down_interruptible(&user_service->close_event))
+ status = VCHIQ_RETRY;
+ }
+ else
+ ret = -EINVAL;
+ } break;
+
+ case VCHIQ_IOC_REMOVE_SERVICE: {
+ VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+
+ service = find_service_for_instance(instance, handle);
+ if (service != NULL) {
+ USER_SERVICE_T *user_service =
+ (USER_SERVICE_T *)service->base.userdata;
+ /* close_pending is false on first entry, and when the
+ wait in vchiq_close_service has been interrupted. */
+ if (!user_service->close_pending) {
+ status = vchiq_remove_service(service->handle);
+ if (status != VCHIQ_SUCCESS)
+ break;
+ }
+
+ /* close_pending is true once the underlying service
+ has been closed until the client library calls the
+ CLOSE_DELIVERED ioctl, signalling close_event. */
+ if (user_service->close_pending &&
+ down_interruptible(&user_service->close_event))
+ status = VCHIQ_RETRY;
+ }
+ else
+ ret = -EINVAL;
+ } break;
+
+ case VCHIQ_IOC_USE_SERVICE:
+ case VCHIQ_IOC_RELEASE_SERVICE: {
+ VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+
+ service = find_service_for_instance(instance, handle);
+ if (service != NULL) {
+ status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
+ vchiq_use_service_internal(service) :
+ vchiq_release_service_internal(service);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s: cmd %s returned error %d for "
+ "service %c%c%c%c:%03d",
+ __func__,
+ (cmd == VCHIQ_IOC_USE_SERVICE) ?
+ "VCHIQ_IOC_USE_SERVICE" :
+ "VCHIQ_IOC_RELEASE_SERVICE",
+ status,
+ VCHIQ_FOURCC_AS_4CHARS(
+ service->base.fourcc),
+ service->client_id);
+ ret = -EINVAL;
+ }
+ } else
+ ret = -EINVAL;
+ } break;
+
+ case VCHIQ_IOC_QUEUE_MESSAGE: {
+ VCHIQ_QUEUE_MESSAGE_T args;
+ if (copy_from_user
+ (&args, (const void __user *)arg,
+ sizeof(args)) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+
+ service = find_service_for_instance(instance, args.handle);
+
+ if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
+ /* Copy elements into kernel space */
+ VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
+ if (copy_from_user(elements, args.elements,
+ args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
+ status = vchiq_queue_message
+ (args.handle,
+ elements, args.count);
+ else
+ ret = -EFAULT;
+ } else {
+ ret = -EINVAL;
+ }
+ } break;
+
+ case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
+ case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
+ VCHIQ_QUEUE_BULK_TRANSFER_T args;
+ struct bulk_waiter_node *waiter = NULL;
+ VCHIQ_BULK_DIR_T dir =
+ (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
+ VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
+
+ if (copy_from_user
+ (&args, (const void __user *)arg,
+ sizeof(args)) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+
+ service = find_service_for_instance(instance, args.handle);
+ if (!service) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
+ waiter = kzalloc(sizeof(struct bulk_waiter_node),
+ GFP_KERNEL);
+ if (!waiter) {
+ ret = -ENOMEM;
+ break;
+ }
+ args.userdata = &waiter->bulk_waiter;
+ } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
+ struct list_head *pos;
+ mutex_lock(&instance->bulk_waiter_list_mutex);
+ list_for_each(pos, &instance->bulk_waiter_list) {
+ if (list_entry(pos, struct bulk_waiter_node,
+ list)->pid == current->pid) {
+ waiter = list_entry(pos,
+ struct bulk_waiter_node,
+ list);
+ list_del(pos);
+ break;
+ }
+
+ }
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
+ if (!waiter) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "no bulk_waiter found for pid %d",
+ current->pid);
+ ret = -ESRCH;
+ break;
+ }
+ vchiq_log_info(vchiq_arm_log_level,
+ "found bulk_waiter %x for pid %d",
+ (unsigned int)waiter, current->pid);
+ args.userdata = &waiter->bulk_waiter;
+ }
+ status = vchiq_bulk_transfer
+ (args.handle,
+ VCHI_MEM_HANDLE_INVALID,
+ args.data, args.size,
+ args.userdata, args.mode,
+ dir);
+ if (!waiter)
+ break;
+ if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
+ !waiter->bulk_waiter.bulk) {
+ if (waiter->bulk_waiter.bulk) {
+ /* Cancel the signal when the transfer
+ ** completes. */
+ spin_lock(&bulk_waiter_spinlock);
+ waiter->bulk_waiter.bulk->userdata = NULL;
+ spin_unlock(&bulk_waiter_spinlock);
+ }
+ kfree(waiter);
+ } else {
+ const VCHIQ_BULK_MODE_T mode_waiting =
+ VCHIQ_BULK_MODE_WAITING;
+ waiter->pid = current->pid;
+ mutex_lock(&instance->bulk_waiter_list_mutex);
+ list_add(&waiter->list, &instance->bulk_waiter_list);
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
+ vchiq_log_info(vchiq_arm_log_level,
+ "saved bulk_waiter %x for pid %d",
+ (unsigned int)waiter, current->pid);
+
+ if (copy_to_user((void __user *)
+ &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
+ arg)->mode),
+ (const void *)&mode_waiting,
+ sizeof(mode_waiting)) != 0)
+ ret = -EFAULT;
+ }
+ } break;
+
+ case VCHIQ_IOC_AWAIT_COMPLETION: {
+ VCHIQ_AWAIT_COMPLETION_T args;
+
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ if (!instance->connected) {
+ ret = -ENOTCONN;
+ break;
+ }
+
+ if (copy_from_user(&args, (const void __user *)arg,
+ sizeof(args)) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+
+ mutex_lock(&instance->completion_mutex);
+
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ while ((instance->completion_remove ==
+ instance->completion_insert)
+ && !instance->closing) {
+ int rc;
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ mutex_unlock(&instance->completion_mutex);
+ rc = down_interruptible(&instance->insert_event);
+ mutex_lock(&instance->completion_mutex);
+ if (rc != 0) {
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ vchiq_log_info(vchiq_arm_log_level,
+ "AWAIT_COMPLETION interrupted");
+ ret = -EINTR;
+ break;
+ }
+ }
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+
+ /* A read memory barrier is needed to stop prefetch of a stale
+ ** completion record
+ */
+ rmb();
+
+ if (ret == 0) {
+ int msgbufcount = args.msgbufcount;
+ for (ret = 0; ret < args.count; ret++) {
+ VCHIQ_COMPLETION_DATA_T *completion;
+ VCHIQ_SERVICE_T *service;
+ USER_SERVICE_T *user_service;
+ VCHIQ_HEADER_T *header;
+ if (instance->completion_remove ==
+ instance->completion_insert)
+ break;
+ completion = &instance->completions[
+ instance->completion_remove &
+ (MAX_COMPLETIONS - 1)];
+
+ service = completion->service_userdata;
+ user_service = service->base.userdata;
+ completion->service_userdata =
+ user_service->userdata;
+
+ header = completion->header;
+ if (header) {
+ void __user *msgbuf;
+ int msglen;
+
+ msglen = header->size +
+ sizeof(VCHIQ_HEADER_T);
+ /* This must be a VCHIQ-style service */
+ if (args.msgbufsize < msglen) {
+ vchiq_log_error(
+ vchiq_arm_log_level,
+ "header %x: msgbufsize"
+ " %x < msglen %x",
+ (unsigned int)header,
+ args.msgbufsize,
+ msglen);
+ WARN(1, "invalid message "
+ "size\n");
+ if (ret == 0)
+ ret = -EMSGSIZE;
+ break;
+ }
+ if (msgbufcount <= 0)
+ /* Stall here for lack of a
+ ** buffer for the message. */
+ break;
+ /* Get the pointer from user space */
+ msgbufcount--;
+ if (copy_from_user(&msgbuf,
+ (const void __user *)
+ &args.msgbufs[msgbufcount],
+ sizeof(msgbuf)) != 0) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Copy the message to user space */
+ if (copy_to_user(msgbuf, header,
+ msglen) != 0) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Now it has been copied, the message
+ ** can be released. */
+ vchiq_release_message(service->handle,
+ header);
+
+ /* The completion must point to the
+ ** msgbuf. */
+ completion->header = msgbuf;
+ }
+
+ if ((completion->reason ==
+ VCHIQ_SERVICE_CLOSED) &&
+ !instance->use_close_delivered)
+ unlock_service(service);
+
+ if (copy_to_user((void __user *)(
+ (size_t)args.buf +
+ ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
+ completion,
+ sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ instance->completion_remove++;
+ }
+
+ if (msgbufcount != args.msgbufcount) {
+ if (copy_to_user((void __user *)
+ &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
+ msgbufcount,
+ &msgbufcount,
+ sizeof(msgbufcount)) != 0) {
+ ret = -EFAULT;
+ }
+ }
+ }
+
+ if (ret != 0)
+ up(&instance->remove_event);
+ mutex_unlock(&instance->completion_mutex);
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ } break;
+
+ case VCHIQ_IOC_DEQUEUE_MESSAGE: {
+ VCHIQ_DEQUEUE_MESSAGE_T args;
+ USER_SERVICE_T *user_service;
+ VCHIQ_HEADER_T *header;
+
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ if (copy_from_user
+ (&args, (const void __user *)arg,
+ sizeof(args)) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+ service = find_service_for_instance(instance, args.handle);
+ if (!service) {
+ ret = -EINVAL;
+ break;
+ }
+ user_service = (USER_SERVICE_T *)service->base.userdata;
+ if (user_service->is_vchi == 0) {
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_lock(&msg_queue_spinlock);
+ if (user_service->msg_remove == user_service->msg_insert) {
+ if (!args.blocking) {
+ spin_unlock(&msg_queue_spinlock);
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ ret = -EWOULDBLOCK;
+ break;
+ }
+ user_service->dequeue_pending = 1;
+ do {
+ spin_unlock(&msg_queue_spinlock);
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ if (down_interruptible(
+ &user_service->insert_event) != 0) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "DEQUEUE_MESSAGE interrupted");
+ ret = -EINTR;
+ break;
+ }
+ spin_lock(&msg_queue_spinlock);
+ } while (user_service->msg_remove ==
+ user_service->msg_insert);
+
+ if (ret)
+ break;
+ }
+
+ BUG_ON((int)(user_service->msg_insert -
+ user_service->msg_remove) < 0);
+
+ header = user_service->msg_queue[user_service->msg_remove &
+ (MSG_QUEUE_SIZE - 1)];
+ user_service->msg_remove++;
+ spin_unlock(&msg_queue_spinlock);
+
+ up(&user_service->remove_event);
+ if (header == NULL)
+ ret = -ENOTCONN;
+ else if (header->size <= args.bufsize) {
+ /* Copy to user space if msgbuf is not NULL */
+ if ((args.buf == NULL) ||
+ (copy_to_user((void __user *)args.buf,
+ header->data,
+ header->size) == 0)) {
+ ret = header->size;
+ vchiq_release_message(
+ service->handle,
+ header);
+ } else
+ ret = -EFAULT;
+ } else {
+ vchiq_log_error(vchiq_arm_log_level,
+ "header %x: bufsize %x < size %x",
+ (unsigned int)header, args.bufsize,
+ header->size);
+ WARN(1, "invalid size\n");
+ ret = -EMSGSIZE;
+ }
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ } break;
+
+ case VCHIQ_IOC_GET_CLIENT_ID: {
+ VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+
+ ret = vchiq_get_client_id(handle);
+ } break;
+
+ case VCHIQ_IOC_GET_CONFIG: {
+ VCHIQ_GET_CONFIG_T args;
+ VCHIQ_CONFIG_T config;
+
+ if (copy_from_user(&args, (const void __user *)arg,
+ sizeof(args)) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+ if (args.config_size > sizeof(config)) {
+ ret = -EINVAL;
+ break;
+ }
+ status = vchiq_get_config(instance, args.config_size, &config);
+ if (status == VCHIQ_SUCCESS) {
+ if (copy_to_user((void __user *)args.pconfig,
+ &config, args.config_size) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+ }
+ } break;
+
+ case VCHIQ_IOC_SET_SERVICE_OPTION: {
+ VCHIQ_SET_SERVICE_OPTION_T args;
+
+ if (copy_from_user(
+ &args, (const void __user *)arg,
+ sizeof(args)) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+
+ service = find_service_for_instance(instance, args.handle);
+ if (!service) {
+ ret = -EINVAL;
+ break;
+ }
+
+ status = vchiq_set_service_option(
+ args.handle, args.option, args.value);
+ } break;
+
+ case VCHIQ_IOC_DUMP_PHYS_MEM: {
+ VCHIQ_DUMP_MEM_T args;
+
+ if (copy_from_user
+ (&args, (const void __user *)arg,
+ sizeof(args)) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+ dump_phys_mem(args.virt_addr, args.num_bytes);
+ } break;
+
+ case VCHIQ_IOC_LIB_VERSION: {
+ unsigned int lib_version = (unsigned int)arg;
+
+ if (lib_version < VCHIQ_VERSION_MIN)
+ ret = -EINVAL;
+ else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
+ instance->use_close_delivered = 1;
+ } break;
+
+ case VCHIQ_IOC_CLOSE_DELIVERED: {
+ VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+
+ service = find_closed_service_for_instance(instance, handle);
+ if (service != NULL) {
+ USER_SERVICE_T *user_service =
+ (USER_SERVICE_T *)service->base.userdata;
+ close_delivered(user_service);
+ }
+ else
+ ret = -EINVAL;
+ } break;
+
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+
+ if (service)
+ unlock_service(service);
+
+ if (ret == 0) {
+ if (status == VCHIQ_ERROR)
+ ret = -EIO;
+ else if (status == VCHIQ_RETRY)
+ ret = -EINTR;
+ }
+
+ if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
+ (ret != -EWOULDBLOCK))
+ vchiq_log_info(vchiq_arm_log_level,
+ " ioctl instance %lx, cmd %s -> status %d, %ld",
+ (unsigned long)instance,
+ (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+ ioctl_names[_IOC_NR(cmd)] :
+ "<invalid>",
+ status, ret);
+ else
+ vchiq_log_trace(vchiq_arm_log_level,
+ " ioctl instance %lx, cmd %s -> status %d, %ld",
+ (unsigned long)instance,
+ (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+ ioctl_names[_IOC_NR(cmd)] :
+ "<invalid>",
+ status, ret);
+
+ return ret;
+}
+
+/****************************************************************************
+*
+* vchiq_open
+*
+***************************************************************************/
+
+static int
+vchiq_open(struct inode *inode, struct file *file)
+{
+ int dev = iminor(inode) & 0x0f;
+ vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
+ switch (dev) {
+ case VCHIQ_MINOR: {
+ int ret;
+ VCHIQ_STATE_T *state = vchiq_get_state();
+ VCHIQ_INSTANCE_T instance;
+
+ if (!state) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "vchiq has no connection to VideoCore");
+ return -ENOTCONN;
+ }
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance)
+ return -ENOMEM;
+
+ instance->state = state;
+ instance->pid = current->tgid;
+
+ ret = vchiq_debugfs_add_instance(instance);
+ if (ret != 0) {
+ kfree(instance);
+ return ret;
+ }
+
+ sema_init(&instance->insert_event, 0);
+ sema_init(&instance->remove_event, 0);
+ mutex_init(&instance->completion_mutex);
+ mutex_init(&instance->bulk_waiter_list_mutex);
+ INIT_LIST_HEAD(&instance->bulk_waiter_list);
+
+ file->private_data = instance;
+ } break;
+
+ default:
+ vchiq_log_error(vchiq_arm_log_level,
+ "Unknown minor device: %d", dev);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/****************************************************************************
+*
+* vchiq_release
+*
+***************************************************************************/
+
+static int
+vchiq_release(struct inode *inode, struct file *file)
+{
+ int dev = iminor(inode) & 0x0f;
+ int ret = 0;
+ switch (dev) {
+ case VCHIQ_MINOR: {
+ VCHIQ_INSTANCE_T instance = file->private_data;
+ VCHIQ_STATE_T *state = vchiq_get_state();
+ VCHIQ_SERVICE_T *service;
+ int i;
+
+ vchiq_log_info(vchiq_arm_log_level,
+ "vchiq_release: instance=%lx",
+ (unsigned long)instance);
+
+ if (!state) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* Ensure videocore is awake to allow termination. */
+ vchiq_use_internal(instance->state, NULL,
+ USE_TYPE_VCHIQ);
+
+ mutex_lock(&instance->completion_mutex);
+
+ /* Wake the completion thread and ask it to exit */
+ instance->closing = 1;
+ up(&instance->insert_event);
+
+ mutex_unlock(&instance->completion_mutex);
+
+ /* Wake the slot handler if the completion queue is full. */
+ up(&instance->remove_event);
+
+ /* Mark all services for termination... */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance,
+ &i)) != NULL) {
+ USER_SERVICE_T *user_service = service->base.userdata;
+
+ /* Wake the slot handler if the msg queue is full. */
+ up(&user_service->remove_event);
+
+ vchiq_terminate_service_internal(service);
+ unlock_service(service);
+ }
+
+ /* ...and wait for them to die */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance, &i))
+ != NULL) {
+ USER_SERVICE_T *user_service = service->base.userdata;
+
+ down(&service->remove_event);
+
+ BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
+
+ spin_lock(&msg_queue_spinlock);
+
+ while (user_service->msg_remove !=
+ user_service->msg_insert) {
+ VCHIQ_HEADER_T *header = user_service->
+ msg_queue[user_service->msg_remove &
+ (MSG_QUEUE_SIZE - 1)];
+ user_service->msg_remove++;
+ spin_unlock(&msg_queue_spinlock);
+
+ if (header)
+ vchiq_release_message(
+ service->handle,
+ header);
+ spin_lock(&msg_queue_spinlock);
+ }
+
+ spin_unlock(&msg_queue_spinlock);
+
+ unlock_service(service);
+ }
+
+ /* Release any closed services */
+ while (instance->completion_remove !=
+ instance->completion_insert) {
+ VCHIQ_COMPLETION_DATA_T *completion;
+ VCHIQ_SERVICE_T *service;
+ completion = &instance->completions[
+ instance->completion_remove &
+ (MAX_COMPLETIONS - 1)];
+ service = completion->service_userdata;
+ if (completion->reason == VCHIQ_SERVICE_CLOSED)
+ {
+ USER_SERVICE_T *user_service =
+ service->base.userdata;
+
+ /* Wake any blocked user-thread */
+ if (instance->use_close_delivered)
+ up(&user_service->close_event);
+ unlock_service(service);
+ }
+ instance->completion_remove++;
+ }
+
+ /* Release the PEER service count. */
+ vchiq_release_internal(instance->state, NULL);
+
+ {
+ struct list_head *pos, *next;
+ list_for_each_safe(pos, next,
+ &instance->bulk_waiter_list) {
+ struct bulk_waiter_node *waiter;
+ waiter = list_entry(pos,
+ struct bulk_waiter_node,
+ list);
+ list_del(pos);
+ vchiq_log_info(vchiq_arm_log_level,
+ "bulk_waiter - cleaned up %x "
+ "for pid %d",
+ (unsigned int)waiter, waiter->pid);
+ kfree(waiter);
+ }
+ }
+
+ vchiq_debugfs_remove_instance(instance);
+
+ kfree(instance);
+ file->private_data = NULL;
+ } break;
+
+ default:
+ vchiq_log_error(vchiq_arm_log_level,
+ "Unknown minor device: %d", dev);
+ ret = -ENXIO;
+ }
+
+out:
+ return ret;
+}
+
+/****************************************************************************
+*
+* vchiq_dump
+*
+***************************************************************************/
+
+void
+vchiq_dump(void *dump_context, const char *str, int len)
+{
+ DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
+
+ if (context->actual < context->space) {
+ int copy_bytes;
+ if (context->offset > 0) {
+ int skip_bytes = min(len, (int)context->offset);
+ str += skip_bytes;
+ len -= skip_bytes;
+ context->offset -= skip_bytes;
+ if (context->offset > 0)
+ return;
+ }
+ copy_bytes = min(len, (int)(context->space - context->actual));
+ if (copy_bytes == 0)
+ return;
+ if (copy_to_user(context->buf + context->actual, str,
+ copy_bytes))
+ context->actual = -EFAULT;
+ context->actual += copy_bytes;
+ len -= copy_bytes;
+
+ /* If tne terminating NUL is included in the length, then it
+ ** marks the end of a line and should be replaced with a
+ ** carriage return. */
+ if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
+ char cr = '\n';
+ if (copy_to_user(context->buf + context->actual - 1,
+ &cr, 1))
+ context->actual = -EFAULT;
+ }
+ }
+}
+
+/****************************************************************************
+*
+* vchiq_dump_platform_instance_state
+*
+***************************************************************************/
+
+void
+vchiq_dump_platform_instances(void *dump_context)
+{
+ VCHIQ_STATE_T *state = vchiq_get_state();
+ char buf[80];
+ int len;
+ int i;
+
+ /* There is no list of instances, so instead scan all services,
+ marking those that have been dumped. */
+
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = state->services[i];
+ VCHIQ_INSTANCE_T instance;
+
+ if (service && (service->base.callback == service_callback)) {
+ instance = service->instance;
+ if (instance)
+ instance->mark = 0;
+ }
+ }
+
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = state->services[i];
+ VCHIQ_INSTANCE_T instance;
+
+ if (service && (service->base.callback == service_callback)) {
+ instance = service->instance;
+ if (instance && !instance->mark) {
+ len = snprintf(buf, sizeof(buf),
+ "Instance %x: pid %d,%s completions "
+ "%d/%d",
+ (unsigned int)instance, instance->pid,
+ instance->connected ? " connected, " :
+ "",
+ instance->completion_insert -
+ instance->completion_remove,
+ MAX_COMPLETIONS);
+
+ vchiq_dump(dump_context, buf, len + 1);
+
+ instance->mark = 1;
+ }
+ }
+ }
+}
+
+/****************************************************************************
+*
+* vchiq_dump_platform_service_state
+*
+***************************************************************************/
+
+void
+vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
+{
+ USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
+ char buf[80];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), " instance %x",
+ (unsigned int)service->instance);
+
+ if ((service->base.callback == service_callback) &&
+ user_service->is_vchi) {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ ", %d/%d messages",
+ user_service->msg_insert - user_service->msg_remove,
+ MSG_QUEUE_SIZE);
+
+ if (user_service->dequeue_pending)
+ len += snprintf(buf + len, sizeof(buf) - len,
+ " (dequeue pending)");
+ }
+
+ vchiq_dump(dump_context, buf, len + 1);
+}
+
+/****************************************************************************
+*
+* dump_user_mem
+*
+***************************************************************************/
+
+static void
+dump_phys_mem(void *virt_addr, uint32_t num_bytes)
+{
+ int rc;
+ uint8_t *end_virt_addr = virt_addr + num_bytes;
+ int num_pages;
+ int offset;
+ int end_offset;
+ int page_idx;
+ int prev_idx;
+ struct page *page;
+ struct page **pages;
+ uint8_t *kmapped_virt_ptr;
+
+ /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
+
+ virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
+ end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
+ ~0x0fuL);
+
+ offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
+ end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
+
+ num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
+ if (pages == NULL) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "Unable to allocation memory for %d pages\n",
+ num_pages);
+ return;
+ }
+
+ down_read(&current->mm->mmap_sem);
+ rc = get_user_pages(current, /* task */
+ current->mm, /* mm */
+ (unsigned long)virt_addr, /* start */
+ num_pages, /* len */
+ 0, /* write */
+ 0, /* force */
+ pages, /* pages (array of page pointers) */
+ NULL); /* vmas */
+ up_read(&current->mm->mmap_sem);
+
+ prev_idx = -1;
+ page = NULL;
+
+ while (offset < end_offset) {
+
+ int page_offset = offset % PAGE_SIZE;
+ page_idx = offset / PAGE_SIZE;
+
+ if (page_idx != prev_idx) {
+
+ if (page != NULL)
+ kunmap(page);
+ page = pages[page_idx];
+ kmapped_virt_ptr = kmap(page);
+
+ prev_idx = page_idx;
+ }
+
+ if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
+ vchiq_log_dump_mem("ph",
+ (uint32_t)(unsigned long)&kmapped_virt_ptr[
+ page_offset],
+ &kmapped_virt_ptr[page_offset], 16);
+
+ offset += 16;
+ }
+ if (page != NULL)
+ kunmap(page);
+
+ for (page_idx = 0; page_idx < num_pages; page_idx++)
+ page_cache_release(pages[page_idx]);
+
+ kfree(pages);
+}
+
+/****************************************************************************
+*
+* vchiq_read
+*
+***************************************************************************/
+
+static ssize_t
+vchiq_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ DUMP_CONTEXT_T context;
+ context.buf = buf;
+ context.actual = 0;
+ context.space = count;
+ context.offset = *ppos;
+
+ vchiq_dump_state(&context, &g_state);
+
+ *ppos += context.actual;
+
+ return context.actual;
+}
+
+VCHIQ_STATE_T *
+vchiq_get_state(void)
+{
+
+ if (g_state.remote == NULL)
+ printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
+ else if (g_state.remote->initialised != 1)
+ printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
+ __func__, g_state.remote->initialised);
+
+ return ((g_state.remote != NULL) &&
+ (g_state.remote->initialised == 1)) ? &g_state : NULL;
+}
+
+static const struct file_operations
+vchiq_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = vchiq_ioctl,
+ .open = vchiq_open,
+ .release = vchiq_release,
+ .read = vchiq_read
+};
+
+/*
+ * Autosuspend related functionality
+ */
+
+int
+vchiq_videocore_wanted(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ if (!arm_state)
+ /* autosuspend not supported - always return wanted */
+ return 1;
+ else if (arm_state->blocked_count)
+ return 1;
+ else if (!arm_state->videocore_use_count)
+ /* usage count zero - check for override unless we're forcing */
+ if (arm_state->resume_blocked)
+ return 0;
+ else
+ return vchiq_platform_videocore_wanted(state);
+ else
+ /* non-zero usage count - videocore still required */
+ return 1;
+}
+
+static VCHIQ_STATUS_T
+vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
+ VCHIQ_HEADER_T *header,
+ VCHIQ_SERVICE_HANDLE_T service_user,
+ void *bulk_user)
+{
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s callback reason %d", __func__, reason);
+ return 0;
+}
+
+static int
+vchiq_keepalive_thread_func(void *v)
+{
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
+ VCHIQ_STATUS_T status;
+ VCHIQ_INSTANCE_T instance;
+ VCHIQ_SERVICE_HANDLE_T ka_handle;
+
+ VCHIQ_SERVICE_PARAMS_T params = {
+ .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
+ .callback = vchiq_keepalive_vchiq_callback,
+ .version = KEEPALIVE_VER,
+ .version_min = KEEPALIVE_VER_MIN
+ };
+
+ status = vchiq_initialise(&instance);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s vchiq_initialise failed %d", __func__, status);
+ goto exit;
+ }
+
+ status = vchiq_connect(instance);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s vchiq_connect failed %d", __func__, status);
+ goto shutdown;
+ }
+
+ status = vchiq_add_service(instance, &params, &ka_handle);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s vchiq_open_service failed %d", __func__, status);
+ goto shutdown;
+ }
+
+ while (1) {
+ long rc = 0, uc = 0;
+ if (wait_for_completion_interruptible(&arm_state->ka_evt)
+ != 0) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s interrupted", __func__);
+ flush_signals(current);
+ continue;
+ }
+
+ /* read and clear counters. Do release_count then use_count to
+ * prevent getting more releases than uses */
+ rc = atomic_xchg(&arm_state->ka_release_count, 0);
+ uc = atomic_xchg(&arm_state->ka_use_count, 0);
+
+ /* Call use/release service the requisite number of times.
+ * Process use before release so use counts don't go negative */
+ while (uc--) {
+ atomic_inc(&arm_state->ka_use_ack_count);
+ status = vchiq_use_service(ka_handle);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s vchiq_use_service error %d",
+ __func__, status);
+ }
+ }
+ while (rc--) {
+ status = vchiq_release_service(ka_handle);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s vchiq_release_service error %d",
+ __func__, status);
+ }
+ }
+ }
+
+shutdown:
+ vchiq_shutdown(instance);
+exit:
+ return 0;
+}
+
+
+
+VCHIQ_STATUS_T
+vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ if (arm_state) {
+ rwlock_init(&arm_state->susp_res_lock);
+
+ init_completion(&arm_state->ka_evt);
+ atomic_set(&arm_state->ka_use_count, 0);
+ atomic_set(&arm_state->ka_use_ack_count, 0);
+ atomic_set(&arm_state->ka_release_count, 0);
+
+ init_completion(&arm_state->vc_suspend_complete);
+
+ init_completion(&arm_state->vc_resume_complete);
+ /* Initialise to 'done' state. We only want to block on resume
+ * completion while videocore is suspended. */
+ set_resume_state(arm_state, VC_RESUME_RESUMED);
+
+ init_completion(&arm_state->resume_blocker);
+ /* Initialise to 'done' state. We only want to block on this
+ * completion while resume is blocked */
+ complete_all(&arm_state->resume_blocker);
+
+ init_completion(&arm_state->blocked_blocker);
+ /* Initialise to 'done' state. We only want to block on this
+ * completion while things are waiting on the resume blocker */
+ complete_all(&arm_state->blocked_blocker);
+
+ arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
+ arm_state->suspend_timer_running = 0;
+ init_timer(&arm_state->suspend_timer);
+ arm_state->suspend_timer.data = (unsigned long)(state);
+ arm_state->suspend_timer.function = suspend_timer_callback;
+
+ arm_state->first_connect = 0;
+
+ }
+ return status;
+}
+
+/*
+** Functions to modify the state variables;
+** set_suspend_state
+** set_resume_state
+**
+** There are more state variables than we might like, so ensure they remain in
+** step. Suspend and resume state are maintained separately, since most of
+** these state machines can operate independently. However, there are a few
+** states where state transitions in one state machine cause a reset to the
+** other state machine. In addition, there are some completion events which
+** need to occur on state machine reset and end-state(s), so these are also
+** dealt with in these functions.
+**
+** In all states we set the state variable according to the input, but in some
+** cases we perform additional steps outlined below;
+**
+** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
+** The suspend completion is completed after any suspend
+** attempt. When we reset the state machine we also reset
+** the completion. This reset occurs when videocore is
+** resumed, and also if we initiate suspend after a suspend
+** failure.
+**
+** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
+** suspend - ie from this point on we must try to suspend
+** before resuming can occur. We therefore also reset the
+** resume state machine to VC_RESUME_IDLE in this state.
+**
+** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
+** complete_all on the suspend completion to notify
+** anything waiting for suspend to happen.
+**
+** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
+** initiate resume, so no need to alter resume state.
+** We call complete_all on the suspend completion to notify
+** of suspend rejection.
+**
+** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
+** suspend completion and reset the resume state machine.
+**
+** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
+** resume completion is in it's 'done' state whenever
+** videcore is running. Therfore, the VC_RESUME_IDLE state
+** implies that videocore is suspended.
+** Hence, any thread which needs to wait until videocore is
+** running can wait on this completion - it will only block
+** if videocore is suspended.
+**
+** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
+** Call complete_all on the resume completion to unblock
+** any threads waiting for resume. Also reset the suspend
+** state machine to it's idle state.
+**
+** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
+*/
+
+void
+set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
+ enum vc_suspend_status new_state)
+{
+ /* set the state in all cases */
+ arm_state->vc_suspend_state = new_state;
+
+ /* state specific additional actions */
+ switch (new_state) {
+ case VC_SUSPEND_FORCE_CANCELED:
+ complete_all(&arm_state->vc_suspend_complete);
+ break;
+ case VC_SUSPEND_REJECTED:
+ complete_all(&arm_state->vc_suspend_complete);
+ break;
+ case VC_SUSPEND_FAILED:
+ complete_all(&arm_state->vc_suspend_complete);
+ arm_state->vc_resume_state = VC_RESUME_RESUMED;
+ complete_all(&arm_state->vc_resume_complete);
+ break;
+ case VC_SUSPEND_IDLE:
+ reinit_completion(&arm_state->vc_suspend_complete);
+ break;
+ case VC_SUSPEND_REQUESTED:
+ break;
+ case VC_SUSPEND_IN_PROGRESS:
+ set_resume_state(arm_state, VC_RESUME_IDLE);
+ break;
+ case VC_SUSPEND_SUSPENDED:
+ complete_all(&arm_state->vc_suspend_complete);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+void
+set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
+ enum vc_resume_status new_state)
+{
+ /* set the state in all cases */
+ arm_state->vc_resume_state = new_state;
+
+ /* state specific additional actions */
+ switch (new_state) {
+ case VC_RESUME_FAILED:
+ break;
+ case VC_RESUME_IDLE:
+ reinit_completion(&arm_state->vc_resume_complete);
+ break;
+ case VC_RESUME_REQUESTED:
+ break;
+ case VC_RESUME_IN_PROGRESS:
+ break;
+ case VC_RESUME_RESUMED:
+ complete_all(&arm_state->vc_resume_complete);
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+
+/* should be called with the write lock held */
+inline void
+start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
+{
+ del_timer(&arm_state->suspend_timer);
+ arm_state->suspend_timer.expires = jiffies +
+ msecs_to_jiffies(arm_state->
+ suspend_timer_timeout);
+ add_timer(&arm_state->suspend_timer);
+ arm_state->suspend_timer_running = 1;
+}
+
+/* should be called with the write lock held */
+static inline void
+stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
+{
+ if (arm_state->suspend_timer_running) {
+ del_timer(&arm_state->suspend_timer);
+ arm_state->suspend_timer_running = 0;
+ }
+}
+
+static inline int
+need_resume(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
+ (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
+ vchiq_videocore_wanted(state);
+}
+
+static int
+block_resume(VCHIQ_ARM_STATE_T *arm_state)
+{
+ int status = VCHIQ_SUCCESS;
+ const unsigned long timeout_val =
+ msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
+ int resume_count = 0;
+
+ /* Allow any threads which were blocked by the last force suspend to
+ * complete if they haven't already. Only give this one shot; if
+ * blocked_count is incremented after blocked_blocker is completed
+ * (which only happens when blocked_count hits 0) then those threads
+ * will have to wait until next time around */
+ if (arm_state->blocked_count) {
+ reinit_completion(&arm_state->blocked_blocker);
+ write_unlock_bh(&arm_state->susp_res_lock);
+ vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
+ "blocked clients", __func__);
+ if (wait_for_completion_interruptible_timeout(
+ &arm_state->blocked_blocker, timeout_val)
+ <= 0) {
+ vchiq_log_error(vchiq_susp_log_level, "%s wait for "
+ "previously blocked clients failed" , __func__);
+ status = VCHIQ_ERROR;
+ write_lock_bh(&arm_state->susp_res_lock);
+ goto out;
+ }
+ vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
+ "clients resumed", __func__);
+ write_lock_bh(&arm_state->susp_res_lock);
+ }
+
+ /* We need to wait for resume to complete if it's in process */
+ while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
+ arm_state->vc_resume_state > VC_RESUME_IDLE) {
+ if (resume_count > 1) {
+ status = VCHIQ_ERROR;
+ vchiq_log_error(vchiq_susp_log_level, "%s waited too "
+ "many times for resume" , __func__);
+ goto out;
+ }
+ write_unlock_bh(&arm_state->susp_res_lock);
+ vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
+ __func__);
+ if (wait_for_completion_interruptible_timeout(
+ &arm_state->vc_resume_complete, timeout_val)
+ <= 0) {
+ vchiq_log_error(vchiq_susp_log_level, "%s wait for "
+ "resume failed (%s)", __func__,
+ resume_state_names[arm_state->vc_resume_state +
+ VC_RESUME_NUM_OFFSET]);
+ status = VCHIQ_ERROR;
+ write_lock_bh(&arm_state->susp_res_lock);
+ goto out;
+ }
+ vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
+ write_lock_bh(&arm_state->susp_res_lock);
+ resume_count++;
+ }
+ reinit_completion(&arm_state->resume_blocker);
+ arm_state->resume_blocked = 1;
+
+out:
+ return status;
+}
+
+static inline void
+unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
+{
+ complete_all(&arm_state->resume_blocker);
+ arm_state->resume_blocked = 0;
+}
+
+/* Initiate suspend via slot handler. Should be called with the write lock
+ * held */
+VCHIQ_STATUS_T
+vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+ status = VCHIQ_SUCCESS;
+
+
+ switch (arm_state->vc_suspend_state) {
+ case VC_SUSPEND_REQUESTED:
+ vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
+ "requested", __func__);
+ break;
+ case VC_SUSPEND_IN_PROGRESS:
+ vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
+ "progress", __func__);
+ break;
+
+ default:
+ /* We don't expect to be in other states, so log but continue
+ * anyway */
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s unexpected suspend state %s", __func__,
+ suspend_state_names[arm_state->vc_suspend_state +
+ VC_SUSPEND_NUM_OFFSET]);
+ /* fall through */
+ case VC_SUSPEND_REJECTED:
+ case VC_SUSPEND_FAILED:
+ /* Ensure any idle state actions have been run */
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
+ /* fall through */
+ case VC_SUSPEND_IDLE:
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s: suspending", __func__);
+ set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
+ /* kick the slot handler thread to initiate suspend */
+ request_poll(state, NULL, 0);
+ break;
+ }
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
+ return status;
+}
+
+void
+vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ int susp = 0;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
+ arm_state->vc_resume_state == VC_RESUME_RESUMED) {
+ set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
+ susp = 1;
+ }
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ if (susp)
+ vchiq_platform_suspend(state);
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
+ return;
+}
+
+
+static void
+output_timeout_error(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ char service_err[50] = "";
+ int vc_use_count = arm_state->videocore_use_count;
+ int active_services = state->unused_service;
+ int i;
+
+ if (!arm_state->videocore_use_count) {
+ snprintf(service_err, 50, " Videocore usecount is 0");
+ goto output_msg;
+ }
+ for (i = 0; i < active_services; i++) {
+ VCHIQ_SERVICE_T *service_ptr = state->services[i];
+ if (service_ptr && service_ptr->service_use_count &&
+ (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
+ snprintf(service_err, 50, " %c%c%c%c(%d) service has "
+ "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
+ service_ptr->base.fourcc),
+ service_ptr->client_id,
+ service_ptr->service_use_count,
+ service_ptr->service_use_count ==
+ vc_use_count ? "" : " (+ more)");
+ break;
+ }
+ }
+
+output_msg:
+ vchiq_log_error(vchiq_susp_log_level,
+ "timed out waiting for vc suspend (%d).%s",
+ arm_state->autosuspend_override, service_err);
+
+}
+
+/* Try to get videocore into suspended state, regardless of autosuspend state.
+** We don't actually force suspend, since videocore may get into a bad state
+** if we force suspend at a bad time. Instead, we wait for autosuspend to
+** determine a good point to suspend. If this doesn't happen within 100ms we
+** report failure.
+**
+** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
+** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
+*/
+VCHIQ_STATUS_T
+vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ long rc = 0;
+ int repeat = -1;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ write_lock_bh(&arm_state->susp_res_lock);
+
+ status = block_resume(arm_state);
+ if (status != VCHIQ_SUCCESS)
+ goto unlock;
+ if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
+ /* Already suspended - just block resume and exit */
+ vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
+ __func__);
+ status = VCHIQ_SUCCESS;
+ goto unlock;
+ } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
+ /* initiate suspend immediately in the case that we're waiting
+ * for the timeout */
+ stop_suspend_timer(arm_state);
+ if (!vchiq_videocore_wanted(state)) {
+ vchiq_log_info(vchiq_susp_log_level, "%s videocore "
+ "idle, initiating suspend", __func__);
+ status = vchiq_arm_vcsuspend(state);
+ } else if (arm_state->autosuspend_override <
+ FORCE_SUSPEND_FAIL_MAX) {
+ vchiq_log_info(vchiq_susp_log_level, "%s letting "
+ "videocore go idle", __func__);
+ status = VCHIQ_SUCCESS;
+ } else {
+ vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
+ "many times - attempting suspend", __func__);
+ status = vchiq_arm_vcsuspend(state);
+ }
+ } else {
+ vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
+ "in progress - wait for completion", __func__);
+ status = VCHIQ_SUCCESS;
+ }
+
+ /* Wait for suspend to happen due to system idle (not forced..) */
+ if (status != VCHIQ_SUCCESS)
+ goto unblock_resume;
+
+ do {
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ rc = wait_for_completion_interruptible_timeout(
+ &arm_state->vc_suspend_complete,
+ msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (rc < 0) {
+ vchiq_log_warning(vchiq_susp_log_level, "%s "
+ "interrupted waiting for suspend", __func__);
+ status = VCHIQ_ERROR;
+ goto unblock_resume;
+ } else if (rc == 0) {
+ if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
+ /* Repeat timeout once if in progress */
+ if (repeat < 0) {
+ repeat = 1;
+ continue;
+ }
+ }
+ arm_state->autosuspend_override++;
+ output_timeout_error(state);
+
+ status = VCHIQ_RETRY;
+ goto unblock_resume;
+ }
+ } while (0 < (repeat--));
+
+ /* Check and report state in case we need to abort ARM suspend */
+ if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
+ status = VCHIQ_RETRY;
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s videocore suspend failed (state %s)", __func__,
+ suspend_state_names[arm_state->vc_suspend_state +
+ VC_SUSPEND_NUM_OFFSET]);
+ /* Reset the state only if it's still in an error state.
+ * Something could have already initiated another suspend. */
+ if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
+
+ goto unblock_resume;
+ }
+
+ /* successfully suspended - unlock and exit */
+ goto unlock;
+
+unblock_resume:
+ /* all error states need to unblock resume before exit */
+ unblock_resume(arm_state);
+
+unlock:
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
+ return status;
+}
+
+void
+vchiq_check_suspend(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
+ arm_state->first_connect &&
+ !vchiq_videocore_wanted(state)) {
+ vchiq_arm_vcsuspend(state);
+ }
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
+ return;
+}
+
+
+int
+vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ int resume = 0;
+ int ret = -1;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ unblock_resume(arm_state);
+ resume = vchiq_check_resume(state);
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ if (resume) {
+ if (wait_for_completion_interruptible(
+ &arm_state->vc_resume_complete) < 0) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s interrupted", __func__);
+ /* failed, cannot accurately derive suspend
+ * state, so exit early. */
+ goto out;
+ }
+ }
+
+ read_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s: Videocore remains suspended", __func__);
+ } else {
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s: Videocore resumed", __func__);
+ ret = 0;
+ }
+ read_unlock_bh(&arm_state->susp_res_lock);
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
+ return ret;
+}
+
+/* This function should be called with the write lock held */
+int
+vchiq_check_resume(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ int resume = 0;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ if (need_resume(state)) {
+ set_resume_state(arm_state, VC_RESUME_REQUESTED);
+ request_poll(state, NULL, 0);
+ resume = 1;
+ }
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
+ return resume;
+}
+
+void
+vchiq_platform_check_resume(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ int res = 0;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->wake_address == 0) {
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s: already awake", __func__);
+ goto unlock;
+ }
+ if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s: already resuming", __func__);
+ goto unlock;
+ }
+
+ if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
+ set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
+ res = 1;
+ } else
+ vchiq_log_trace(vchiq_susp_log_level,
+ "%s: not resuming (resume state %s)", __func__,
+ resume_state_names[arm_state->vc_resume_state +
+ VC_RESUME_NUM_OFFSET]);
+
+unlock:
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ if (res)
+ vchiq_platform_resume(state);
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
+ return;
+
+}
+
+
+
+VCHIQ_STATUS_T
+vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
+ enum USE_TYPE_E use_type)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
+ char entity[16];
+ int *entity_uc;
+ int local_uc, local_entity_uc;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ if (use_type == USE_TYPE_VCHIQ) {
+ sprintf(entity, "VCHIQ: ");
+ entity_uc = &arm_state->peer_use_count;
+ } else if (service) {
+ sprintf(entity, "%c%c%c%c:%03d",
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->client_id);
+ entity_uc = &service->service_use_count;
+ } else {
+ vchiq_log_error(vchiq_susp_log_level, "%s null service "
+ "ptr", __func__);
+ ret = VCHIQ_ERROR;
+ goto out;
+ }
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ while (arm_state->resume_blocked) {
+ /* If we call 'use' while force suspend is waiting for suspend,
+ * then we're about to block the thread which the force is
+ * waiting to complete, so we're bound to just time out. In this
+ * case, set the suspend state such that the wait will be
+ * canceled, so we can complete as quickly as possible. */
+ if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
+ VC_SUSPEND_IDLE) {
+ set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
+ break;
+ }
+ /* If suspend is already in progress then we need to block */
+ if (!try_wait_for_completion(&arm_state->resume_blocker)) {
+ /* Indicate that there are threads waiting on the resume
+ * blocker. These need to be allowed to complete before
+ * a _second_ call to force suspend can complete,
+ * otherwise low priority threads might never actually
+ * continue */
+ arm_state->blocked_count++;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
+ "blocked - waiting...", __func__, entity);
+ if (wait_for_completion_killable(
+ &arm_state->resume_blocker) != 0) {
+ vchiq_log_error(vchiq_susp_log_level, "%s %s "
+ "wait for resume blocker interrupted",
+ __func__, entity);
+ ret = VCHIQ_ERROR;
+ write_lock_bh(&arm_state->susp_res_lock);
+ arm_state->blocked_count--;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ goto out;
+ }
+ vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
+ "unblocked", __func__, entity);
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (--arm_state->blocked_count == 0)
+ complete_all(&arm_state->blocked_blocker);
+ }
+ }
+
+ stop_suspend_timer(arm_state);
+
+ local_uc = ++arm_state->videocore_use_count;
+ local_entity_uc = ++(*entity_uc);
+
+ /* If there's a pending request which hasn't yet been serviced then
+ * just clear it. If we're past VC_SUSPEND_REQUESTED state then
+ * vc_resume_complete will block until we either resume or fail to
+ * suspend */
+ if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
+ set_suspend_state(arm_state, VC_SUSPEND_IDLE);
+
+ if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
+ set_resume_state(arm_state, VC_RESUME_REQUESTED);
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s %s count %d, state count %d",
+ __func__, entity, local_entity_uc, local_uc);
+ request_poll(state, NULL, 0);
+ } else
+ vchiq_log_trace(vchiq_susp_log_level,
+ "%s %s count %d, state count %d",
+ __func__, entity, *entity_uc, local_uc);
+
+
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ /* Completion is in a done state when we're not suspended, so this won't
+ * block for the non-suspended case. */
+ if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
+ vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
+ __func__, entity);
+ if (wait_for_completion_killable(
+ &arm_state->vc_resume_complete) != 0) {
+ vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
+ "resume interrupted", __func__, entity);
+ ret = VCHIQ_ERROR;
+ goto out;
+ }
+ vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
+ entity);
+ }
+
+ if (ret == VCHIQ_SUCCESS) {
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
+ while (ack_cnt && (status == VCHIQ_SUCCESS)) {
+ /* Send the use notify to videocore */
+ status = vchiq_send_remote_use_active(state);
+ if (status == VCHIQ_SUCCESS)
+ ack_cnt--;
+ else
+ atomic_add(ack_cnt,
+ &arm_state->ka_use_ack_count);
+ }
+ }
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
+ return ret;
+}
+
+VCHIQ_STATUS_T
+vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
+ char entity[16];
+ int *entity_uc;
+ int local_uc, local_entity_uc;
+
+ if (!arm_state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ if (service) {
+ sprintf(entity, "%c%c%c%c:%03d",
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->client_id);
+ entity_uc = &service->service_use_count;
+ } else {
+ sprintf(entity, "PEER: ");
+ entity_uc = &arm_state->peer_use_count;
+ }
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (!arm_state->videocore_use_count || !(*entity_uc)) {
+ /* Don't use BUG_ON - don't allow user thread to crash kernel */
+ WARN_ON(!arm_state->videocore_use_count);
+ WARN_ON(!(*entity_uc));
+ ret = VCHIQ_ERROR;
+ goto unlock;
+ }
+ local_uc = --arm_state->videocore_use_count;
+ local_entity_uc = --(*entity_uc);
+
+ if (!vchiq_videocore_wanted(state)) {
+ if (vchiq_platform_use_suspend_timer() &&
+ !arm_state->resume_blocked) {
+ /* Only use the timer if we're not trying to force
+ * suspend (=> resume_blocked) */
+ start_suspend_timer(arm_state);
+ } else {
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s %s count %d, state count %d - suspending",
+ __func__, entity, *entity_uc,
+ arm_state->videocore_use_count);
+ vchiq_arm_vcsuspend(state);
+ }
+ } else
+ vchiq_log_trace(vchiq_susp_log_level,
+ "%s %s count %d, state count %d",
+ __func__, entity, *entity_uc,
+ arm_state->videocore_use_count);
+
+unlock:
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+out:
+ vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
+ return ret;
+}
+
+void
+vchiq_on_remote_use(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+ atomic_inc(&arm_state->ka_use_count);
+ complete(&arm_state->ka_evt);
+}
+
+void
+vchiq_on_remote_release(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+ atomic_inc(&arm_state->ka_release_count);
+ complete(&arm_state->ka_evt);
+}
+
+VCHIQ_STATUS_T
+vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
+{
+ return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
+}
+
+VCHIQ_STATUS_T
+vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
+{
+ return vchiq_release_internal(service->state, service);
+}
+
+VCHIQ_DEBUGFS_NODE_T *
+vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
+{
+ return &instance->debugfs_node;
+}
+
+int
+vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
+{
+ VCHIQ_SERVICE_T *service;
+ int use_count = 0, i;
+ i = 0;
+ while ((service = next_service_by_instance(instance->state,
+ instance, &i)) != NULL) {
+ use_count += service->service_use_count;
+ unlock_service(service);
+ }
+ return use_count;
+}
+
+int
+vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
+{
+ return instance->pid;
+}
+
+int
+vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
+{
+ return instance->trace;
+}
+
+void
+vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
+{
+ VCHIQ_SERVICE_T *service;
+ int i;
+ i = 0;
+ while ((service = next_service_by_instance(instance->state,
+ instance, &i)) != NULL) {
+ service->trace = trace;
+ unlock_service(service);
+ }
+ instance->trace = (trace != 0);
+}
+
+static void suspend_timer_callback(unsigned long context)
+{
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ if (!arm_state)
+ goto out;
+ vchiq_log_info(vchiq_susp_log_level,
+ "%s - suspend timer expired - check suspend", __func__);
+ vchiq_check_suspend(state);
+out:
+ return;
+}
+
+VCHIQ_STATUS_T
+vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ if (service) {
+ ret = vchiq_use_internal(service->state, service,
+ USE_TYPE_SERVICE_NO_RESUME);
+ unlock_service(service);
+ }
+ return ret;
+}
+
+VCHIQ_STATUS_T
+vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ if (service) {
+ ret = vchiq_use_internal(service->state, service,
+ USE_TYPE_SERVICE);
+ unlock_service(service);
+ }
+ return ret;
+}
+
+VCHIQ_STATUS_T
+vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ if (service) {
+ ret = vchiq_release_internal(service->state, service);
+ unlock_service(service);
+ }
+ return ret;
+}
+
+void
+vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ int i, j = 0;
+ /* Only dump 64 services */
+ static const int local_max_services = 64;
+ /* If there's more than 64 services, only dump ones with
+ * non-zero counts */
+ int only_nonzero = 0;
+ static const char *nz = "<-- preventing suspend";
+
+ enum vc_suspend_status vc_suspend_state;
+ enum vc_resume_status vc_resume_state;
+ int peer_count;
+ int vc_use_count;
+ int active_services;
+ struct service_data_struct {
+ int fourcc;
+ int clientid;
+ int use_count;
+ } service_data[local_max_services];
+
+ if (!arm_state)
+ return;
+
+ read_lock_bh(&arm_state->susp_res_lock);
+ vc_suspend_state = arm_state->vc_suspend_state;
+ vc_resume_state = arm_state->vc_resume_state;
+ peer_count = arm_state->peer_use_count;
+ vc_use_count = arm_state->videocore_use_count;
+ active_services = state->unused_service;
+ if (active_services > local_max_services)
+ only_nonzero = 1;
+
+ for (i = 0; (i < active_services) && (j < local_max_services); i++) {
+ VCHIQ_SERVICE_T *service_ptr = state->services[i];
+ if (!service_ptr)
+ continue;
+
+ if (only_nonzero && !service_ptr->service_use_count)
+ continue;
+
+ if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
+ service_data[j].fourcc = service_ptr->base.fourcc;
+ service_data[j].clientid = service_ptr->client_id;
+ service_data[j++].use_count = service_ptr->
+ service_use_count;
+ }
+ }
+
+ read_unlock_bh(&arm_state->susp_res_lock);
+
+ vchiq_log_warning(vchiq_susp_log_level,
+ "-- Videcore suspend state: %s --",
+ suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
+ vchiq_log_warning(vchiq_susp_log_level,
+ "-- Videcore resume state: %s --",
+ resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
+
+ if (only_nonzero)
+ vchiq_log_warning(vchiq_susp_log_level, "Too many active "
+ "services (%d). Only dumping up to first %d services "
+ "with non-zero use-count", active_services,
+ local_max_services);
+
+ for (i = 0; i < j; i++) {
+ vchiq_log_warning(vchiq_susp_log_level,
+ "----- %c%c%c%c:%d service count %d %s",
+ VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
+ service_data[i].clientid,
+ service_data[i].use_count,
+ service_data[i].use_count ? nz : "");
+ }
+ vchiq_log_warning(vchiq_susp_log_level,
+ "----- VCHIQ use count count %d", peer_count);
+ vchiq_log_warning(vchiq_susp_log_level,
+ "--- Overall vchiq instance use count %d", vc_use_count);
+
+ vchiq_dump_platform_use_state(state);
+}
+
+VCHIQ_STATUS_T
+vchiq_check_service(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_ARM_STATE_T *arm_state;
+ VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+
+ if (!service || !service->state)
+ goto out;
+
+ vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
+
+ arm_state = vchiq_platform_get_arm_state(service->state);
+
+ read_lock_bh(&arm_state->susp_res_lock);
+ if (service->service_use_count)
+ ret = VCHIQ_SUCCESS;
+ read_unlock_bh(&arm_state->susp_res_lock);
+
+ if (ret == VCHIQ_ERROR) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s ERROR - %c%c%c%c:%d service count %d, "
+ "state count %d, videocore suspend state %s", __func__,
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->client_id, service->service_use_count,
+ arm_state->videocore_use_count,
+ suspend_state_names[arm_state->vc_suspend_state +
+ VC_SUSPEND_NUM_OFFSET]);
+ vchiq_dump_service_use_state(service->state);
+ }
+out:
+ return ret;
+}
+
+/* stub functions */
+void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
+{
+ (void)state;
+}
+
+void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
+ VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
+{
+ VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
+ get_conn_state_name(oldstate), get_conn_state_name(newstate));
+ if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (!arm_state->first_connect) {
+ char threadname[10];
+ arm_state->first_connect = 1;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
+ state->id);
+ arm_state->ka_thread = kthread_create(
+ &vchiq_keepalive_thread_func,
+ (void *)state,
+ threadname);
+ if (arm_state->ka_thread == NULL) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "vchiq: FATAL: couldn't create thread %s",
+ threadname);
+ } else {
+ wake_up_process(arm_state->ka_thread);
+ }
+ } else
+ write_unlock_bh(&arm_state->susp_res_lock);
+ }
+}
+
+static int vchiq_probe(struct platform_device *pdev)
+{
+ struct device_node *fw_node;
+ struct rpi_firmware *fw;
+ int err;
+ void *ptr_err;
+
+ fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
+/* Remove comment when booting without Device Tree is no longer supported
+ if (!fw_node) {
+ dev_err(&pdev->dev, "Missing firmware node\n");
+ return -ENOENT;
+ }
+*/
+ fw = rpi_firmware_get(fw_node);
+ if (!fw)
+ return -EPROBE_DEFER;
+
+ platform_set_drvdata(pdev, fw);
+
+ /* create debugfs entries */
+ err = vchiq_debugfs_init();
+ if (err != 0)
+ goto failed_debugfs_init;
+
+ err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
+ if (err != 0) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "Unable to allocate device number");
+ goto failed_alloc_chrdev;
+ }
+ cdev_init(&vchiq_cdev, &vchiq_fops);
+ vchiq_cdev.owner = THIS_MODULE;
+ err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
+ if (err != 0) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "Unable to register device");
+ goto failed_cdev_add;
+ }
+
+ /* create sysfs entries */
+ vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
+ ptr_err = vchiq_class;
+ if (IS_ERR(ptr_err))
+ goto failed_class_create;
+
+ vchiq_dev = device_create(vchiq_class, NULL,
+ vchiq_devid, NULL, "vchiq");
+ ptr_err = vchiq_dev;
+ if (IS_ERR(ptr_err))
+ goto failed_device_create;
+
+ err = vchiq_platform_init(pdev, &g_state);
+ if (err != 0)
+ goto failed_platform_init;
+
+ vchiq_log_info(vchiq_arm_log_level,
+ "vchiq: initialised - version %d (min %d), device %d.%d",
+ VCHIQ_VERSION, VCHIQ_VERSION_MIN,
+ MAJOR(vchiq_devid), MINOR(vchiq_devid));
+
+ return 0;
+
+failed_platform_init:
+ device_destroy(vchiq_class, vchiq_devid);
+failed_device_create:
+ class_destroy(vchiq_class);
+failed_class_create:
+ cdev_del(&vchiq_cdev);
+ err = PTR_ERR(ptr_err);
+failed_cdev_add:
+ unregister_chrdev_region(vchiq_devid, 1);
+failed_alloc_chrdev:
+ vchiq_debugfs_deinit();
+failed_debugfs_init:
+ vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
+ return err;
+}
+
+static int vchiq_remove(struct platform_device *pdev)
+{
+ device_destroy(vchiq_class, vchiq_devid);
+ class_destroy(vchiq_class);
+ cdev_del(&vchiq_cdev);
+ unregister_chrdev_region(vchiq_devid, 1);
+
+ return 0;
+}
+
+static const struct of_device_id vchiq_of_match[] = {
+ { .compatible = "brcm,bcm2835-vchiq", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, vchiq_of_match);
+
+static struct platform_driver vchiq_driver = {
+ .driver = {
+ .name = "bcm2835_vchiq",
+ .owner = THIS_MODULE,
+ .of_match_table = vchiq_of_match,
+ },
+ .probe = vchiq_probe,
+ .remove = vchiq_remove,
+};
+module_platform_driver(vchiq_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
new file mode 100644
index 000000000000..9740e1afbc9d
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -0,0 +1,220 @@
+/**
+ * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_ARM_H
+#define VCHIQ_ARM_H
+
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+#include <linux/atomic.h>
+#include "vchiq_core.h"
+#include "vchiq_debugfs.h"
+
+
+enum vc_suspend_status {
+ VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
+ VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
+ VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
+ VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
+ VC_SUSPEND_REQUESTED, /* User has requested suspend */
+ VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
+ VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
+};
+
+enum vc_resume_status {
+ VC_RESUME_FAILED = -1, /* Videocore resume failed */
+ VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
+ VC_RESUME_REQUESTED, /* User has requested resume */
+ VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
+ VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
+};
+
+
+enum USE_TYPE_E {
+ USE_TYPE_SERVICE,
+ USE_TYPE_SERVICE_NO_RESUME,
+ USE_TYPE_VCHIQ
+};
+
+
+
+typedef struct vchiq_arm_state_struct {
+ /* Keepalive-related data */
+ struct task_struct *ka_thread;
+ struct completion ka_evt;
+ atomic_t ka_use_count;
+ atomic_t ka_use_ack_count;
+ atomic_t ka_release_count;
+
+ struct completion vc_suspend_complete;
+ struct completion vc_resume_complete;
+
+ rwlock_t susp_res_lock;
+ enum vc_suspend_status vc_suspend_state;
+ enum vc_resume_status vc_resume_state;
+
+ unsigned int wake_address;
+
+ struct timer_list suspend_timer;
+ int suspend_timer_timeout;
+ int suspend_timer_running;
+
+ /* Global use count for videocore.
+ ** This is equal to the sum of the use counts for all services. When
+ ** this hits zero the videocore suspend procedure will be initiated.
+ */
+ int videocore_use_count;
+
+ /* Use count to track requests from videocore peer.
+ ** This use count is not associated with a service, so needs to be
+ ** tracked separately with the state.
+ */
+ int peer_use_count;
+
+ /* Flag to indicate whether resume is blocked. This happens when the
+ ** ARM is suspending
+ */
+ struct completion resume_blocker;
+ int resume_blocked;
+ struct completion blocked_blocker;
+ int blocked_count;
+
+ int autosuspend_override;
+
+ /* Flag to indicate that the first vchiq connect has made it through.
+ ** This means that both sides should be fully ready, and we should
+ ** be able to suspend after this point.
+ */
+ int first_connect;
+
+ unsigned long long suspend_start_time;
+ unsigned long long sleep_start_time;
+ unsigned long long resume_start_time;
+ unsigned long long last_wake_time;
+
+} VCHIQ_ARM_STATE_T;
+
+extern int vchiq_arm_log_level;
+extern int vchiq_susp_log_level;
+
+int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATE_T *
+vchiq_get_state(void);
+
+extern VCHIQ_STATUS_T
+vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
+
+extern int
+vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_arm_vcresume(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
+
+extern int
+vchiq_check_resume(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_check_suspend(VCHIQ_STATE_T *state);
+ VCHIQ_STATUS_T
+vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
+
+extern VCHIQ_STATUS_T
+vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
+
+extern VCHIQ_STATUS_T
+vchiq_check_service(VCHIQ_SERVICE_T *service);
+
+extern VCHIQ_STATUS_T
+vchiq_platform_suspend(VCHIQ_STATE_T *state);
+
+extern int
+vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
+
+extern int
+vchiq_platform_use_suspend_timer(void);
+
+extern void
+vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
+
+extern VCHIQ_ARM_STATE_T*
+vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
+
+extern int
+vchiq_videocore_wanted(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
+ enum USE_TYPE_E use_type);
+extern VCHIQ_STATUS_T
+vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
+
+extern VCHIQ_DEBUGFS_NODE_T *
+vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance);
+
+extern int
+vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance);
+
+extern int
+vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance);
+
+extern int
+vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance);
+
+extern void
+vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace);
+
+extern void
+set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
+ enum vc_suspend_status new_state);
+
+extern void
+set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
+ enum vc_resume_status new_state);
+
+extern void
+start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
+
+
+#endif /* VCHIQ_ARM_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_build_info.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_build_info.h
new file mode 100644
index 000000000000..df645813bdae
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_build_info.h
@@ -0,0 +1,37 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+const char *vchiq_get_build_hostname(void);
+const char *vchiq_get_build_version(void);
+const char *vchiq_get_build_time(void);
+const char *vchiq_get_build_date(void);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h
new file mode 100644
index 000000000000..d2797db702f9
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h
@@ -0,0 +1,69 @@
+/**
+ * Copyright (c) 2010-2014 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_CFG_H
+#define VCHIQ_CFG_H
+
+#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
+/* The version of VCHIQ - change with any non-trivial change */
+#define VCHIQ_VERSION 8
+/* The minimum compatible version - update to match VCHIQ_VERSION with any
+** incompatible change */
+#define VCHIQ_VERSION_MIN 3
+
+/* The version that introduced the VCHIQ_IOC_LIB_VERSION ioctl */
+#define VCHIQ_VERSION_LIB_VERSION 7
+
+/* The version that introduced the VCHIQ_IOC_CLOSE_DELIVERED ioctl */
+#define VCHIQ_VERSION_CLOSE_DELIVERED 7
+
+/* The version that made it safe to use SYNCHRONOUS mode */
+#define VCHIQ_VERSION_SYNCHRONOUS_MODE 8
+
+#define VCHIQ_MAX_STATES 1
+#define VCHIQ_MAX_SERVICES 4096
+#define VCHIQ_MAX_SLOTS 128
+#define VCHIQ_MAX_SLOTS_PER_SIDE 64
+
+#define VCHIQ_NUM_CURRENT_BULKS 32
+#define VCHIQ_NUM_SERVICE_BULKS 4
+
+#ifndef VCHIQ_ENABLE_DEBUG
+#define VCHIQ_ENABLE_DEBUG 1
+#endif
+
+#ifndef VCHIQ_ENABLE_STATS
+#define VCHIQ_ENABLE_STATS 1
+#endif
+
+#endif /* VCHIQ_CFG_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
new file mode 100644
index 000000000000..5efc62ffb2f5
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -0,0 +1,120 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "vchiq_connected.h"
+#include "vchiq_core.h"
+#include "vchiq_killable.h"
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#define MAX_CALLBACKS 10
+
+static int g_connected;
+static int g_num_deferred_callbacks;
+static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
+static int g_once_init;
+static struct mutex g_connected_mutex;
+
+/****************************************************************************
+*
+* Function to initialize our lock.
+*
+***************************************************************************/
+
+static void connected_init(void)
+{
+ if (!g_once_init) {
+ mutex_init(&g_connected_mutex);
+ g_once_init = 1;
+ }
+}
+
+/****************************************************************************
+*
+* This function is used to defer initialization until the vchiq stack is
+* initialized. If the stack is already initialized, then the callback will
+* be made immediately, otherwise it will be deferred until
+* vchiq_call_connected_callbacks is called.
+*
+***************************************************************************/
+
+void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
+{
+ connected_init();
+
+ if (mutex_lock_interruptible(&g_connected_mutex) != 0)
+ return;
+
+ if (g_connected)
+ /* We're already connected. Call the callback immediately. */
+
+ callback();
+ else {
+ if (g_num_deferred_callbacks >= MAX_CALLBACKS)
+ vchiq_log_error(vchiq_core_log_level,
+ "There already %d callback registered - "
+ "please increase MAX_CALLBACKS",
+ g_num_deferred_callbacks);
+ else {
+ g_deferred_callback[g_num_deferred_callbacks] =
+ callback;
+ g_num_deferred_callbacks++;
+ }
+ }
+ mutex_unlock(&g_connected_mutex);
+}
+
+/****************************************************************************
+*
+* This function is called by the vchiq stack once it has been connected to
+* the videocore and clients can start to use the stack.
+*
+***************************************************************************/
+
+void vchiq_call_connected_callbacks(void)
+{
+ int i;
+
+ connected_init();
+
+ if (mutex_lock_interruptible(&g_connected_mutex) != 0)
+ return;
+
+ for (i = 0; i < g_num_deferred_callbacks; i++)
+ g_deferred_callback[i]();
+
+ g_num_deferred_callbacks = 0;
+ g_connected = 1;
+ mutex_unlock(&g_connected_mutex);
+}
+EXPORT_SYMBOL(vchiq_add_connected_callback);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
new file mode 100644
index 000000000000..863b3e335c1a
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
@@ -0,0 +1,50 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_CONNECTED_H
+#define VCHIQ_CONNECTED_H
+
+/* ---- Include Files ----------------------------------------------------- */
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
+
+/* ---- Variable Externs ------------------------------------------------- */
+
+/* ---- Function Prototypes ---------------------------------------------- */
+
+void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
+void vchiq_call_connected_callbacks(void);
+
+#endif /* VCHIQ_CONNECTED_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
new file mode 100644
index 000000000000..2c98da4307df
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -0,0 +1,3934 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "vchiq_core.h"
+#include "vchiq_killable.h"
+
+#define VCHIQ_SLOT_HANDLER_STACK 8192
+
+#define HANDLE_STATE_SHIFT 12
+
+#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
+#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
+#define SLOT_INDEX_FROM_DATA(state, data) \
+ (((unsigned int)((char *)data - (char *)state->slot_data)) / \
+ VCHIQ_SLOT_SIZE)
+#define SLOT_INDEX_FROM_INFO(state, info) \
+ ((unsigned int)(info - state->slot_info))
+#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
+ ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
+
+#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
+
+#define SRVTRACE_LEVEL(srv) \
+ (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
+#define SRVTRACE_ENABLED(srv, lev) \
+ (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
+
+struct vchiq_open_payload {
+ int fourcc;
+ int client_id;
+ short version;
+ short version_min;
+};
+
+struct vchiq_openack_payload {
+ short version;
+};
+
+enum
+{
+ QMFLAGS_IS_BLOCKING = (1 << 0),
+ QMFLAGS_NO_MUTEX_LOCK = (1 << 1),
+ QMFLAGS_NO_MUTEX_UNLOCK = (1 << 2)
+};
+
+/* we require this for consistency between endpoints */
+vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
+vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
+vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
+vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
+vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
+vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
+
+/* Run time control of log level, based on KERN_XXX level. */
+int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
+int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
+int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
+
+static atomic_t pause_bulks_count = ATOMIC_INIT(0);
+
+static DEFINE_SPINLOCK(service_spinlock);
+DEFINE_SPINLOCK(bulk_waiter_spinlock);
+DEFINE_SPINLOCK(quota_spinlock);
+
+VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
+static unsigned int handle_seq;
+
+static const char *const srvstate_names[] = {
+ "FREE",
+ "HIDDEN",
+ "LISTENING",
+ "OPENING",
+ "OPEN",
+ "OPENSYNC",
+ "CLOSESENT",
+ "CLOSERECVD",
+ "CLOSEWAIT",
+ "CLOSED"
+};
+
+static const char *const reason_names[] = {
+ "SERVICE_OPENED",
+ "SERVICE_CLOSED",
+ "MESSAGE_AVAILABLE",
+ "BULK_TRANSMIT_DONE",
+ "BULK_RECEIVE_DONE",
+ "BULK_TRANSMIT_ABORTED",
+ "BULK_RECEIVE_ABORTED"
+};
+
+static const char *const conn_state_names[] = {
+ "DISCONNECTED",
+ "CONNECTING",
+ "CONNECTED",
+ "PAUSING",
+ "PAUSE_SENT",
+ "PAUSED",
+ "RESUMING",
+ "PAUSE_TIMEOUT",
+ "RESUME_TIMEOUT"
+};
+
+
+static void
+release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
+
+static const char *msg_type_str(unsigned int msg_type)
+{
+ switch (msg_type) {
+ case VCHIQ_MSG_PADDING: return "PADDING";
+ case VCHIQ_MSG_CONNECT: return "CONNECT";
+ case VCHIQ_MSG_OPEN: return "OPEN";
+ case VCHIQ_MSG_OPENACK: return "OPENACK";
+ case VCHIQ_MSG_CLOSE: return "CLOSE";
+ case VCHIQ_MSG_DATA: return "DATA";
+ case VCHIQ_MSG_BULK_RX: return "BULK_RX";
+ case VCHIQ_MSG_BULK_TX: return "BULK_TX";
+ case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
+ case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
+ case VCHIQ_MSG_PAUSE: return "PAUSE";
+ case VCHIQ_MSG_RESUME: return "RESUME";
+ case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
+ case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
+ case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
+ }
+ return "???";
+}
+
+static inline void
+vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
+{
+ vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate],
+ srvstate_names[newstate]);
+ service->srvstate = newstate;
+}
+
+VCHIQ_SERVICE_T *
+find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_SERVICE_T *service;
+
+ spin_lock(&service_spinlock);
+ service = handle_to_service(handle);
+ if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
+ (service->handle == handle)) {
+ BUG_ON(service->ref_count == 0);
+ service->ref_count++;
+ } else
+ service = NULL;
+ spin_unlock(&service_spinlock);
+
+ if (!service)
+ vchiq_log_info(vchiq_core_log_level,
+ "Invalid service handle 0x%x", handle);
+
+ return service;
+}
+
+VCHIQ_SERVICE_T *
+find_service_by_port(VCHIQ_STATE_T *state, int localport)
+{
+ VCHIQ_SERVICE_T *service = NULL;
+ if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
+ spin_lock(&service_spinlock);
+ service = state->services[localport];
+ if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
+ BUG_ON(service->ref_count == 0);
+ service->ref_count++;
+ } else
+ service = NULL;
+ spin_unlock(&service_spinlock);
+ }
+
+ if (!service)
+ vchiq_log_info(vchiq_core_log_level,
+ "Invalid port %d", localport);
+
+ return service;
+}
+
+VCHIQ_SERVICE_T *
+find_service_for_instance(VCHIQ_INSTANCE_T instance,
+ VCHIQ_SERVICE_HANDLE_T handle) {
+ VCHIQ_SERVICE_T *service;
+
+ spin_lock(&service_spinlock);
+ service = handle_to_service(handle);
+ if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
+ (service->handle == handle) &&
+ (service->instance == instance)) {
+ BUG_ON(service->ref_count == 0);
+ service->ref_count++;
+ } else
+ service = NULL;
+ spin_unlock(&service_spinlock);
+
+ if (!service)
+ vchiq_log_info(vchiq_core_log_level,
+ "Invalid service handle 0x%x", handle);
+
+ return service;
+}
+
+VCHIQ_SERVICE_T *
+find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
+ VCHIQ_SERVICE_HANDLE_T handle) {
+ VCHIQ_SERVICE_T *service;
+
+ spin_lock(&service_spinlock);
+ service = handle_to_service(handle);
+ if (service &&
+ ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
+ (service->srvstate == VCHIQ_SRVSTATE_CLOSED)) &&
+ (service->handle == handle) &&
+ (service->instance == instance)) {
+ BUG_ON(service->ref_count == 0);
+ service->ref_count++;
+ } else
+ service = NULL;
+ spin_unlock(&service_spinlock);
+
+ if (!service)
+ vchiq_log_info(vchiq_core_log_level,
+ "Invalid service handle 0x%x", handle);
+
+ return service;
+}
+
+VCHIQ_SERVICE_T *
+next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
+ int *pidx)
+{
+ VCHIQ_SERVICE_T *service = NULL;
+ int idx = *pidx;
+
+ spin_lock(&service_spinlock);
+ while (idx < state->unused_service) {
+ VCHIQ_SERVICE_T *srv = state->services[idx++];
+ if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
+ (srv->instance == instance)) {
+ service = srv;
+ BUG_ON(service->ref_count == 0);
+ service->ref_count++;
+ break;
+ }
+ }
+ spin_unlock(&service_spinlock);
+
+ *pidx = idx;
+
+ return service;
+}
+
+void
+lock_service(VCHIQ_SERVICE_T *service)
+{
+ spin_lock(&service_spinlock);
+ BUG_ON(!service || (service->ref_count == 0));
+ if (service)
+ service->ref_count++;
+ spin_unlock(&service_spinlock);
+}
+
+void
+unlock_service(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_STATE_T *state = service->state;
+ spin_lock(&service_spinlock);
+ BUG_ON(!service || (service->ref_count == 0));
+ if (service && service->ref_count) {
+ service->ref_count--;
+ if (!service->ref_count) {
+ BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
+ state->services[service->localport] = NULL;
+ } else
+ service = NULL;
+ }
+ spin_unlock(&service_spinlock);
+
+ if (service && service->userdata_term)
+ service->userdata_term(service->base.userdata);
+
+ kfree(service);
+}
+
+int
+vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ int id;
+
+ id = service ? service->client_id : 0;
+ if (service)
+ unlock_service(service);
+
+ return id;
+}
+
+void *
+vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_SERVICE_T *service = handle_to_service(handle);
+
+ return service ? service->base.userdata : NULL;
+}
+
+int
+vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_SERVICE_T *service = handle_to_service(handle);
+
+ return service ? service->base.fourcc : 0;
+}
+
+static void
+mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
+{
+ VCHIQ_STATE_T *state = service->state;
+ VCHIQ_SERVICE_QUOTA_T *service_quota;
+
+ service->closing = 1;
+
+ /* Synchronise with other threads. */
+ mutex_lock(&state->recycle_mutex);
+ mutex_unlock(&state->recycle_mutex);
+ if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
+ /* If we're pausing then the slot_mutex is held until resume
+ * by the slot handler. Therefore don't try to acquire this
+ * mutex if we're the slot handler and in the pause sent state.
+ * We don't need to in this case anyway. */
+ mutex_lock(&state->slot_mutex);
+ mutex_unlock(&state->slot_mutex);
+ }
+
+ /* Unblock any sending thread. */
+ service_quota = &state->service_quotas[service->localport];
+ up(&service_quota->quota_event);
+}
+
+static void
+mark_service_closing(VCHIQ_SERVICE_T *service)
+{
+ mark_service_closing_internal(service, 0);
+}
+
+static inline VCHIQ_STATUS_T
+make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
+ VCHIQ_HEADER_T *header, void *bulk_userdata)
+{
+ VCHIQ_STATUS_T status;
+ vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
+ service->state->id, service->localport, reason_names[reason],
+ (unsigned int)header, (unsigned int)bulk_userdata);
+ status = service->base.callback(reason, header, service->handle,
+ bulk_userdata);
+ if (status == VCHIQ_ERROR) {
+ vchiq_log_warning(vchiq_core_log_level,
+ "%d: ignoring ERROR from callback to service %x",
+ service->state->id, service->handle);
+ status = VCHIQ_SUCCESS;
+ }
+ return status;
+}
+
+inline void
+vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
+{
+ VCHIQ_CONNSTATE_T oldstate = state->conn_state;
+ vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
+ conn_state_names[oldstate],
+ conn_state_names[newstate]);
+ state->conn_state = newstate;
+ vchiq_platform_conn_state_changed(state, oldstate, newstate);
+}
+
+static inline void
+remote_event_create(REMOTE_EVENT_T *event)
+{
+ event->armed = 0;
+ /* Don't clear the 'fired' flag because it may already have been set
+ ** by the other side. */
+ sema_init(event->event, 0);
+}
+
+static inline void
+remote_event_destroy(REMOTE_EVENT_T *event)
+{
+ (void)event;
+}
+
+static inline int
+remote_event_wait(REMOTE_EVENT_T *event)
+{
+ if (!event->fired) {
+ event->armed = 1;
+ dsb();
+ if (!event->fired) {
+ if (down_interruptible(event->event) != 0) {
+ event->armed = 0;
+ return 0;
+ }
+ }
+ event->armed = 0;
+ wmb();
+ }
+
+ event->fired = 0;
+ return 1;
+}
+
+static inline void
+remote_event_signal_local(REMOTE_EVENT_T *event)
+{
+ event->armed = 0;
+ up(event->event);
+}
+
+static inline void
+remote_event_poll(REMOTE_EVENT_T *event)
+{
+ if (event->fired && event->armed)
+ remote_event_signal_local(event);
+}
+
+void
+remote_event_pollall(VCHIQ_STATE_T *state)
+{
+ remote_event_poll(&state->local->sync_trigger);
+ remote_event_poll(&state->local->sync_release);
+ remote_event_poll(&state->local->trigger);
+ remote_event_poll(&state->local->recycle);
+}
+
+/* Round up message sizes so that any space at the end of a slot is always big
+** enough for a header. This relies on header size being a power of two, which
+** has been verified earlier by a static assertion. */
+
+static inline unsigned int
+calc_stride(unsigned int size)
+{
+ /* Allow room for the header */
+ size += sizeof(VCHIQ_HEADER_T);
+
+ /* Round up */
+ return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
+ - 1);
+}
+
+/* Called by the slot handler thread */
+static VCHIQ_SERVICE_T *
+get_listening_service(VCHIQ_STATE_T *state, int fourcc)
+{
+ int i;
+
+ WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
+
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = state->services[i];
+ if (service &&
+ (service->public_fourcc == fourcc) &&
+ ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
+ ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
+ (service->remoteport == VCHIQ_PORT_FREE)))) {
+ lock_service(service);
+ return service;
+ }
+ }
+
+ return NULL;
+}
+
+/* Called by the slot handler thread */
+static VCHIQ_SERVICE_T *
+get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
+{
+ int i;
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = state->services[i];
+ if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
+ && (service->remoteport == port)) {
+ lock_service(service);
+ return service;
+ }
+ }
+ return NULL;
+}
+
+inline void
+request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
+{
+ uint32_t value;
+
+ if (service) {
+ do {
+ value = atomic_read(&service->poll_flags);
+ } while (atomic_cmpxchg(&service->poll_flags, value,
+ value | (1 << poll_type)) != value);
+
+ do {
+ value = atomic_read(&state->poll_services[
+ service->localport>>5]);
+ } while (atomic_cmpxchg(
+ &state->poll_services[service->localport>>5],
+ value, value | (1 << (service->localport & 0x1f)))
+ != value);
+ }
+
+ state->poll_needed = 1;
+ wmb();
+
+ /* ... and ensure the slot handler runs. */
+ remote_event_signal_local(&state->local->trigger);
+}
+
+/* Called from queue_message, by the slot handler and application threads,
+** with slot_mutex held */
+static VCHIQ_HEADER_T *
+reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
+{
+ VCHIQ_SHARED_STATE_T *local = state->local;
+ int tx_pos = state->local_tx_pos;
+ int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
+
+ if (space > slot_space) {
+ VCHIQ_HEADER_T *header;
+ /* Fill the remaining space with padding */
+ WARN_ON(state->tx_data == NULL);
+ header = (VCHIQ_HEADER_T *)
+ (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
+ header->msgid = VCHIQ_MSGID_PADDING;
+ header->size = slot_space - sizeof(VCHIQ_HEADER_T);
+
+ tx_pos += slot_space;
+ }
+
+ /* If necessary, get the next slot. */
+ if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
+ int slot_index;
+
+ /* If there is no free slot... */
+
+ if (down_trylock(&state->slot_available_event) != 0) {
+ /* ...wait for one. */
+
+ VCHIQ_STATS_INC(state, slot_stalls);
+
+ /* But first, flush through the last slot. */
+ state->local_tx_pos = tx_pos;
+ local->tx_pos = tx_pos;
+ remote_event_signal(&state->remote->trigger);
+
+ if (!is_blocking ||
+ (down_interruptible(
+ &state->slot_available_event) != 0))
+ return NULL; /* No space available */
+ }
+
+ BUG_ON(tx_pos ==
+ (state->slot_queue_available * VCHIQ_SLOT_SIZE));
+
+ slot_index = local->slot_queue[
+ SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
+ VCHIQ_SLOT_QUEUE_MASK];
+ state->tx_data =
+ (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
+ }
+
+ state->local_tx_pos = tx_pos + space;
+
+ return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
+}
+
+/* Called by the recycle thread. */
+static void
+process_free_queue(VCHIQ_STATE_T *state)
+{
+ VCHIQ_SHARED_STATE_T *local = state->local;
+ BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
+ int slot_queue_available;
+
+ /* Use a read memory barrier to ensure that any state that may have
+ ** been modified by another thread is not masked by stale prefetched
+ ** values. */
+ rmb();
+
+ /* Find slots which have been freed by the other side, and return them
+ ** to the available queue. */
+ slot_queue_available = state->slot_queue_available;
+
+ while (slot_queue_available != local->slot_queue_recycle) {
+ unsigned int pos;
+ int slot_index = local->slot_queue[slot_queue_available++ &
+ VCHIQ_SLOT_QUEUE_MASK];
+ char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
+ int data_found = 0;
+
+ vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
+ state->id, slot_index, (unsigned int)data,
+ local->slot_queue_recycle, slot_queue_available);
+
+ /* Initialise the bitmask for services which have used this
+ ** slot */
+ BITSET_ZERO(service_found);
+
+ pos = 0;
+
+ while (pos < VCHIQ_SLOT_SIZE) {
+ VCHIQ_HEADER_T *header =
+ (VCHIQ_HEADER_T *)(data + pos);
+ int msgid = header->msgid;
+ if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
+ int port = VCHIQ_MSG_SRCPORT(msgid);
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &state->service_quotas[port];
+ int count;
+ spin_lock(&quota_spinlock);
+ count = service_quota->message_use_count;
+ if (count > 0)
+ service_quota->message_use_count =
+ count - 1;
+ spin_unlock(&quota_spinlock);
+
+ if (count == service_quota->message_quota)
+ /* Signal the service that it
+ ** has dropped below its quota
+ */
+ up(&service_quota->quota_event);
+ else if (count == 0) {
+ vchiq_log_error(vchiq_core_log_level,
+ "service %d "
+ "message_use_count=%d "
+ "(header %x, msgid %x, "
+ "header->msgid %x, "
+ "header->size %x)",
+ port,
+ service_quota->
+ message_use_count,
+ (unsigned int)header, msgid,
+ header->msgid,
+ header->size);
+ WARN(1, "invalid message use count\n");
+ }
+ if (!BITSET_IS_SET(service_found, port)) {
+ /* Set the found bit for this service */
+ BITSET_SET(service_found, port);
+
+ spin_lock(&quota_spinlock);
+ count = service_quota->slot_use_count;
+ if (count > 0)
+ service_quota->slot_use_count =
+ count - 1;
+ spin_unlock(&quota_spinlock);
+
+ if (count > 0) {
+ /* Signal the service in case
+ ** it has dropped below its
+ ** quota */
+ up(&service_quota->quota_event);
+ vchiq_log_trace(
+ vchiq_core_log_level,
+ "%d: pfq:%d %x@%x - "
+ "slot_use->%d",
+ state->id, port,
+ header->size,
+ (unsigned int)header,
+ count - 1);
+ } else {
+ vchiq_log_error(
+ vchiq_core_log_level,
+ "service %d "
+ "slot_use_count"
+ "=%d (header %x"
+ ", msgid %x, "
+ "header->msgid"
+ " %x, header->"
+ "size %x)",
+ port, count,
+ (unsigned int)header,
+ msgid,
+ header->msgid,
+ header->size);
+ WARN(1, "bad slot use count\n");
+ }
+ }
+
+ data_found = 1;
+ }
+
+ pos += calc_stride(header->size);
+ if (pos > VCHIQ_SLOT_SIZE) {
+ vchiq_log_error(vchiq_core_log_level,
+ "pfq - pos %x: header %x, msgid %x, "
+ "header->msgid %x, header->size %x",
+ pos, (unsigned int)header, msgid,
+ header->msgid, header->size);
+ WARN(1, "invalid slot position\n");
+ }
+ }
+
+ if (data_found) {
+ int count;
+ spin_lock(&quota_spinlock);
+ count = state->data_use_count;
+ if (count > 0)
+ state->data_use_count =
+ count - 1;
+ spin_unlock(&quota_spinlock);
+ if (count == state->data_quota)
+ up(&state->data_quota_event);
+ }
+
+ state->slot_queue_available = slot_queue_available;
+ up(&state->slot_available_event);
+ }
+}
+
+/* Called by the slot handler and application threads */
+static VCHIQ_STATUS_T
+queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
+ int msgid, const VCHIQ_ELEMENT_T *elements,
+ int count, int size, int flags)
+{
+ VCHIQ_SHARED_STATE_T *local;
+ VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
+ VCHIQ_HEADER_T *header;
+ int type = VCHIQ_MSG_TYPE(msgid);
+
+ unsigned int stride;
+
+ local = state->local;
+
+ stride = calc_stride(size);
+
+ WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
+
+ if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
+ (mutex_lock_interruptible(&state->slot_mutex) != 0))
+ return VCHIQ_RETRY;
+
+ if (type == VCHIQ_MSG_DATA) {
+ int tx_end_index;
+
+ BUG_ON(!service);
+ BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
+ QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
+
+ if (service->closing) {
+ /* The service has been closed */
+ mutex_unlock(&state->slot_mutex);
+ return VCHIQ_ERROR;
+ }
+
+ service_quota = &state->service_quotas[service->localport];
+
+ spin_lock(&quota_spinlock);
+
+ /* Ensure this service doesn't use more than its quota of
+ ** messages or slots */
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
+ state->local_tx_pos + stride - 1);
+
+ /* Ensure data messages don't use more than their quota of
+ ** slots */
+ while ((tx_end_index != state->previous_data_index) &&
+ (state->data_use_count == state->data_quota)) {
+ VCHIQ_STATS_INC(state, data_stalls);
+ spin_unlock(&quota_spinlock);
+ mutex_unlock(&state->slot_mutex);
+
+ if (down_interruptible(&state->data_quota_event)
+ != 0)
+ return VCHIQ_RETRY;
+
+ mutex_lock(&state->slot_mutex);
+ spin_lock(&quota_spinlock);
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
+ state->local_tx_pos + stride - 1);
+ if ((tx_end_index == state->previous_data_index) ||
+ (state->data_use_count < state->data_quota)) {
+ /* Pass the signal on to other waiters */
+ up(&state->data_quota_event);
+ break;
+ }
+ }
+
+ while ((service_quota->message_use_count ==
+ service_quota->message_quota) ||
+ ((tx_end_index != service_quota->previous_tx_index) &&
+ (service_quota->slot_use_count ==
+ service_quota->slot_quota))) {
+ spin_unlock(&quota_spinlock);
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: qm:%d %s,%x - quota stall "
+ "(msg %d, slot %d)",
+ state->id, service->localport,
+ msg_type_str(type), size,
+ service_quota->message_use_count,
+ service_quota->slot_use_count);
+ VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
+ mutex_unlock(&state->slot_mutex);
+ if (down_interruptible(&service_quota->quota_event)
+ != 0)
+ return VCHIQ_RETRY;
+ if (service->closing)
+ return VCHIQ_ERROR;
+ if (mutex_lock_interruptible(&state->slot_mutex) != 0)
+ return VCHIQ_RETRY;
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
+ /* The service has been closed */
+ mutex_unlock(&state->slot_mutex);
+ return VCHIQ_ERROR;
+ }
+ spin_lock(&quota_spinlock);
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
+ state->local_tx_pos + stride - 1);
+ }
+
+ spin_unlock(&quota_spinlock);
+ }
+
+ header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
+
+ if (!header) {
+ if (service)
+ VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
+ /* In the event of a failure, return the mutex to the
+ state it was in */
+ if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
+ mutex_unlock(&state->slot_mutex);
+ return VCHIQ_RETRY;
+ }
+
+ if (type == VCHIQ_MSG_DATA) {
+ int i, pos;
+ int tx_end_index;
+ int slot_use_count;
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: qm %s@%x,%x (%d->%d)",
+ state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ (unsigned int)header, size,
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid));
+
+ BUG_ON(!service);
+ BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
+ QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
+
+ for (i = 0, pos = 0; i < (unsigned int)count;
+ pos += elements[i++].size)
+ if (elements[i].size) {
+ if (vchiq_copy_from_user
+ (header->data + pos, elements[i].data,
+ (size_t) elements[i].size) !=
+ VCHIQ_SUCCESS) {
+ mutex_unlock(&state->slot_mutex);
+ VCHIQ_SERVICE_STATS_INC(service,
+ error_count);
+ return VCHIQ_ERROR;
+ }
+ if (i == 0) {
+ if (SRVTRACE_ENABLED(service,
+ VCHIQ_LOG_INFO))
+ vchiq_log_dump_mem("Sent", 0,
+ header->data + pos,
+ min(64u,
+ elements[0].size));
+ }
+ }
+
+ spin_lock(&quota_spinlock);
+ service_quota->message_use_count++;
+
+ tx_end_index =
+ SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
+
+ /* If this transmission can't fit in the last slot used by any
+ ** service, the data_use_count must be increased. */
+ if (tx_end_index != state->previous_data_index) {
+ state->previous_data_index = tx_end_index;
+ state->data_use_count++;
+ }
+
+ /* If this isn't the same slot last used by this service,
+ ** the service's slot_use_count must be increased. */
+ if (tx_end_index != service_quota->previous_tx_index) {
+ service_quota->previous_tx_index = tx_end_index;
+ slot_use_count = ++service_quota->slot_use_count;
+ } else {
+ slot_use_count = 0;
+ }
+
+ spin_unlock(&quota_spinlock);
+
+ if (slot_use_count)
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
+ state->id, service->localport,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
+ slot_use_count, header);
+
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
+ } else {
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: qm %s@%x,%x (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ (unsigned int)header, size,
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid));
+ if (size != 0) {
+ WARN_ON(!((count == 1) && (size == elements[0].size)));
+ memcpy(header->data, elements[0].data,
+ elements[0].size);
+ }
+ VCHIQ_STATS_INC(state, ctrl_tx_count);
+ }
+
+ header->msgid = msgid;
+ header->size = size;
+
+ {
+ int svc_fourcc;
+
+ svc_fourcc = service
+ ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+
+ vchiq_log_info(SRVTRACE_LEVEL(service),
+ "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ VCHIQ_MSG_TYPE(msgid),
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid),
+ size);
+ }
+
+ /* Make sure the new header is visible to the peer. */
+ wmb();
+
+ /* Make the new tx_pos visible to the peer. */
+ local->tx_pos = state->local_tx_pos;
+ wmb();
+
+ if (service && (type == VCHIQ_MSG_CLOSE))
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
+
+ if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
+ mutex_unlock(&state->slot_mutex);
+
+ remote_event_signal(&state->remote->trigger);
+
+ return VCHIQ_SUCCESS;
+}
+
+/* Called by the slot handler and application threads */
+static VCHIQ_STATUS_T
+queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
+ int msgid, const VCHIQ_ELEMENT_T *elements,
+ int count, int size, int is_blocking)
+{
+ VCHIQ_SHARED_STATE_T *local;
+ VCHIQ_HEADER_T *header;
+
+ local = state->local;
+
+ if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
+ (mutex_lock_interruptible(&state->sync_mutex) != 0))
+ return VCHIQ_RETRY;
+
+ remote_event_wait(&local->sync_release);
+
+ rmb();
+
+ header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
+ local->slot_sync);
+
+ {
+ int oldmsgid = header->msgid;
+ if (oldmsgid != VCHIQ_MSGID_PADDING)
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: qms - msgid %x, not PADDING",
+ state->id, oldmsgid);
+ }
+
+ if (service) {
+ int i, pos;
+
+ vchiq_log_info(vchiq_sync_log_level,
+ "%d: qms %s@%x,%x (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ (unsigned int)header, size,
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid));
+
+ for (i = 0, pos = 0; i < (unsigned int)count;
+ pos += elements[i++].size)
+ if (elements[i].size) {
+ if (vchiq_copy_from_user
+ (header->data + pos, elements[i].data,
+ (size_t) elements[i].size) !=
+ VCHIQ_SUCCESS) {
+ mutex_unlock(&state->sync_mutex);
+ VCHIQ_SERVICE_STATS_INC(service,
+ error_count);
+ return VCHIQ_ERROR;
+ }
+ if (i == 0) {
+ if (vchiq_sync_log_level >=
+ VCHIQ_LOG_TRACE)
+ vchiq_log_dump_mem("Sent Sync",
+ 0, header->data + pos,
+ min(64u,
+ elements[0].size));
+ }
+ }
+
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
+ } else {
+ vchiq_log_info(vchiq_sync_log_level,
+ "%d: qms %s@%x,%x (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ (unsigned int)header, size,
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid));
+ if (size != 0) {
+ WARN_ON(!((count == 1) && (size == elements[0].size)));
+ memcpy(header->data, elements[0].data,
+ elements[0].size);
+ }
+ VCHIQ_STATS_INC(state, ctrl_tx_count);
+ }
+
+ header->size = size;
+ header->msgid = msgid;
+
+ if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
+ int svc_fourcc;
+
+ svc_fourcc = service
+ ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+
+ vchiq_log_trace(vchiq_sync_log_level,
+ "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ VCHIQ_MSG_TYPE(msgid),
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
+ VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid),
+ size);
+ }
+
+ /* Make sure the new header is visible to the peer. */
+ wmb();
+
+ remote_event_signal(&state->remote->sync_trigger);
+
+ if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
+ mutex_unlock(&state->sync_mutex);
+
+ return VCHIQ_SUCCESS;
+}
+
+static inline void
+claim_slot(VCHIQ_SLOT_INFO_T *slot)
+{
+ slot->use_count++;
+}
+
+static void
+release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
+ VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
+{
+ int release_count;
+
+ mutex_lock(&state->recycle_mutex);
+
+ if (header) {
+ int msgid = header->msgid;
+ if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
+ (service && service->closing)) {
+ mutex_unlock(&state->recycle_mutex);
+ return;
+ }
+
+ /* Rewrite the message header to prevent a double
+ ** release */
+ header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
+ }
+
+ release_count = slot_info->release_count;
+ slot_info->release_count = ++release_count;
+
+ if (release_count == slot_info->use_count) {
+ int slot_queue_recycle;
+ /* Add to the freed queue */
+
+ /* A read barrier is necessary here to prevent speculative
+ ** fetches of remote->slot_queue_recycle from overtaking the
+ ** mutex. */
+ rmb();
+
+ slot_queue_recycle = state->remote->slot_queue_recycle;
+ state->remote->slot_queue[slot_queue_recycle &
+ VCHIQ_SLOT_QUEUE_MASK] =
+ SLOT_INDEX_FROM_INFO(state, slot_info);
+ state->remote->slot_queue_recycle = slot_queue_recycle + 1;
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: release_slot %d - recycle->%x",
+ state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
+ state->remote->slot_queue_recycle);
+
+ /* A write barrier is necessary, but remote_event_signal
+ ** contains one. */
+ remote_event_signal(&state->remote->recycle);
+ }
+
+ mutex_unlock(&state->recycle_mutex);
+}
+
+/* Called by the slot handler - don't hold the bulk mutex */
+static VCHIQ_STATUS_T
+notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
+ int retry_poll)
+{
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: nb:%d %cx - p=%x rn=%x r=%x",
+ service->state->id, service->localport,
+ (queue == &service->bulk_tx) ? 't' : 'r',
+ queue->process, queue->remote_notify, queue->remove);
+
+ if (service->state->is_master) {
+ while (queue->remote_notify != queue->process) {
+ VCHIQ_BULK_T *bulk =
+ &queue->bulks[BULK_INDEX(queue->remote_notify)];
+ int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
+ VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
+ int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
+ service->remoteport);
+ VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
+ /* Only reply to non-dummy bulk requests */
+ if (bulk->remote_data) {
+ status = queue_message(service->state, NULL,
+ msgid, &element, 1, 4, 0);
+ if (status != VCHIQ_SUCCESS)
+ break;
+ }
+ queue->remote_notify++;
+ }
+ } else {
+ queue->remote_notify = queue->process;
+ }
+
+ if (status == VCHIQ_SUCCESS) {
+ while (queue->remove != queue->remote_notify) {
+ VCHIQ_BULK_T *bulk =
+ &queue->bulks[BULK_INDEX(queue->remove)];
+
+ /* Only generate callbacks for non-dummy bulk
+ ** requests, and non-terminated services */
+ if (bulk->data && service->instance) {
+ if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
+ if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
+ VCHIQ_SERVICE_STATS_INC(service,
+ bulk_tx_count);
+ VCHIQ_SERVICE_STATS_ADD(service,
+ bulk_tx_bytes,
+ bulk->actual);
+ } else {
+ VCHIQ_SERVICE_STATS_INC(service,
+ bulk_rx_count);
+ VCHIQ_SERVICE_STATS_ADD(service,
+ bulk_rx_bytes,
+ bulk->actual);
+ }
+ } else {
+ VCHIQ_SERVICE_STATS_INC(service,
+ bulk_aborted_count);
+ }
+ if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
+ struct bulk_waiter *waiter;
+ spin_lock(&bulk_waiter_spinlock);
+ waiter = bulk->userdata;
+ if (waiter) {
+ waiter->actual = bulk->actual;
+ up(&waiter->event);
+ }
+ spin_unlock(&bulk_waiter_spinlock);
+ } else if (bulk->mode ==
+ VCHIQ_BULK_MODE_CALLBACK) {
+ VCHIQ_REASON_T reason = (bulk->dir ==
+ VCHIQ_BULK_TRANSMIT) ?
+ ((bulk->actual ==
+ VCHIQ_BULK_ACTUAL_ABORTED) ?
+ VCHIQ_BULK_TRANSMIT_ABORTED :
+ VCHIQ_BULK_TRANSMIT_DONE) :
+ ((bulk->actual ==
+ VCHIQ_BULK_ACTUAL_ABORTED) ?
+ VCHIQ_BULK_RECEIVE_ABORTED :
+ VCHIQ_BULK_RECEIVE_DONE);
+ status = make_service_callback(service,
+ reason, NULL, bulk->userdata);
+ if (status == VCHIQ_RETRY)
+ break;
+ }
+ }
+
+ queue->remove++;
+ up(&service->bulk_remove_event);
+ }
+ if (!retry_poll)
+ status = VCHIQ_SUCCESS;
+ }
+
+ if (status == VCHIQ_RETRY)
+ request_poll(service->state, service,
+ (queue == &service->bulk_tx) ?
+ VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
+
+ return status;
+}
+
+/* Called by the slot handler thread */
+static void
+poll_services(VCHIQ_STATE_T *state)
+{
+ int group, i;
+
+ for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
+ uint32_t flags;
+ flags = atomic_xchg(&state->poll_services[group], 0);
+ for (i = 0; flags; i++) {
+ if (flags & (1 << i)) {
+ VCHIQ_SERVICE_T *service =
+ find_service_by_port(state,
+ (group<<5) + i);
+ uint32_t service_flags;
+ flags &= ~(1 << i);
+ if (!service)
+ continue;
+ service_flags =
+ atomic_xchg(&service->poll_flags, 0);
+ if (service_flags &
+ (1 << VCHIQ_POLL_REMOVE)) {
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: ps - remove %d<->%d",
+ state->id, service->localport,
+ service->remoteport);
+
+ /* Make it look like a client, because
+ it must be removed and not left in
+ the LISTENING state. */
+ service->public_fourcc =
+ VCHIQ_FOURCC_INVALID;
+
+ if (vchiq_close_service_internal(
+ service, 0/*!close_recvd*/) !=
+ VCHIQ_SUCCESS)
+ request_poll(state, service,
+ VCHIQ_POLL_REMOVE);
+ } else if (service_flags &
+ (1 << VCHIQ_POLL_TERMINATE)) {
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: ps - terminate %d<->%d",
+ state->id, service->localport,
+ service->remoteport);
+ if (vchiq_close_service_internal(
+ service, 0/*!close_recvd*/) !=
+ VCHIQ_SUCCESS)
+ request_poll(state, service,
+ VCHIQ_POLL_TERMINATE);
+ }
+ if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
+ notify_bulks(service,
+ &service->bulk_tx,
+ 1/*retry_poll*/);
+ if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
+ notify_bulks(service,
+ &service->bulk_rx,
+ 1/*retry_poll*/);
+ unlock_service(service);
+ }
+ }
+ }
+}
+
+/* Called by the slot handler or application threads, holding the bulk mutex. */
+static int
+resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
+{
+ VCHIQ_STATE_T *state = service->state;
+ int resolved = 0;
+ int rc;
+
+ while ((queue->process != queue->local_insert) &&
+ (queue->process != queue->remote_insert)) {
+ VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: rb:%d %cx - li=%x ri=%x p=%x",
+ state->id, service->localport,
+ (queue == &service->bulk_tx) ? 't' : 'r',
+ queue->local_insert, queue->remote_insert,
+ queue->process);
+
+ WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
+ WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
+
+ rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
+ if (rc != 0)
+ break;
+
+ vchiq_transfer_bulk(bulk);
+ mutex_unlock(&state->bulk_transfer_mutex);
+
+ if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
+ const char *header = (queue == &service->bulk_tx) ?
+ "Send Bulk to" : "Recv Bulk from";
+ if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
+ vchiq_log_info(SRVTRACE_LEVEL(service),
+ "%s %c%c%c%c d:%d len:%d %x<->%x",
+ header,
+ VCHIQ_FOURCC_AS_4CHARS(
+ service->base.fourcc),
+ service->remoteport,
+ bulk->size,
+ (unsigned int)bulk->data,
+ (unsigned int)bulk->remote_data);
+ else
+ vchiq_log_info(SRVTRACE_LEVEL(service),
+ "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
+ " rx len:%d %x<->%x",
+ header,
+ VCHIQ_FOURCC_AS_4CHARS(
+ service->base.fourcc),
+ service->remoteport,
+ bulk->size,
+ bulk->remote_size,
+ (unsigned int)bulk->data,
+ (unsigned int)bulk->remote_data);
+ }
+
+ vchiq_complete_bulk(bulk);
+ queue->process++;
+ resolved++;
+ }
+ return resolved;
+}
+
+/* Called with the bulk_mutex held */
+static void
+abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
+{
+ int is_tx = (queue == &service->bulk_tx);
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: aob:%d %cx - li=%x ri=%x p=%x",
+ service->state->id, service->localport, is_tx ? 't' : 'r',
+ queue->local_insert, queue->remote_insert, queue->process);
+
+ WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
+ WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
+
+ while ((queue->process != queue->local_insert) ||
+ (queue->process != queue->remote_insert)) {
+ VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
+
+ if (queue->process == queue->remote_insert) {
+ /* fabricate a matching dummy bulk */
+ bulk->remote_data = NULL;
+ bulk->remote_size = 0;
+ queue->remote_insert++;
+ }
+
+ if (queue->process != queue->local_insert) {
+ vchiq_complete_bulk(bulk);
+
+ vchiq_log_info(SRVTRACE_LEVEL(service),
+ "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
+ "rx len:%d",
+ is_tx ? "Send Bulk to" : "Recv Bulk from",
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->remoteport,
+ bulk->size,
+ bulk->remote_size);
+ } else {
+ /* fabricate a matching dummy bulk */
+ bulk->data = NULL;
+ bulk->size = 0;
+ bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
+ bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
+ VCHIQ_BULK_RECEIVE;
+ queue->local_insert++;
+ }
+
+ queue->process++;
+ }
+}
+
+/* Called from the slot handler thread */
+static void
+pause_bulks(VCHIQ_STATE_T *state)
+{
+ if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
+ WARN_ON_ONCE(1);
+ atomic_set(&pause_bulks_count, 1);
+ return;
+ }
+
+ /* Block bulk transfers from all services */
+ mutex_lock(&state->bulk_transfer_mutex);
+}
+
+/* Called from the slot handler thread */
+static void
+resume_bulks(VCHIQ_STATE_T *state)
+{
+ int i;
+ if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
+ WARN_ON_ONCE(1);
+ atomic_set(&pause_bulks_count, 0);
+ return;
+ }
+
+ /* Allow bulk transfers from all services */
+ mutex_unlock(&state->bulk_transfer_mutex);
+
+ if (state->deferred_bulks == 0)
+ return;
+
+ /* Deal with any bulks which had to be deferred due to being in
+ * paused state. Don't try to match up to number of deferred bulks
+ * in case we've had something come and close the service in the
+ * interim - just process all bulk queues for all services */
+ vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
+ __func__, state->deferred_bulks);
+
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = state->services[i];
+ int resolved_rx = 0;
+ int resolved_tx = 0;
+ if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
+ continue;
+
+ mutex_lock(&service->bulk_mutex);
+ resolved_rx = resolve_bulks(service, &service->bulk_rx);
+ resolved_tx = resolve_bulks(service, &service->bulk_tx);
+ mutex_unlock(&service->bulk_mutex);
+ if (resolved_rx)
+ notify_bulks(service, &service->bulk_rx, 1);
+ if (resolved_tx)
+ notify_bulks(service, &service->bulk_tx, 1);
+ }
+ state->deferred_bulks = 0;
+}
+
+static int
+parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
+{
+ VCHIQ_SERVICE_T *service = NULL;
+ int msgid, size;
+ int type;
+ unsigned int localport, remoteport;
+
+ msgid = header->msgid;
+ size = header->size;
+ type = VCHIQ_MSG_TYPE(msgid);
+ localport = VCHIQ_MSG_DSTPORT(msgid);
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
+ if (size >= sizeof(struct vchiq_open_payload)) {
+ const struct vchiq_open_payload *payload =
+ (struct vchiq_open_payload *)header->data;
+ unsigned int fourcc;
+
+ fourcc = payload->fourcc;
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs OPEN@%x (%d->'%c%c%c%c')",
+ state->id, (unsigned int)header,
+ localport,
+ VCHIQ_FOURCC_AS_4CHARS(fourcc));
+
+ service = get_listening_service(state, fourcc);
+
+ if (service) {
+ /* A matching service exists */
+ short version = payload->version;
+ short version_min = payload->version_min;
+ if ((service->version < version_min) ||
+ (version < service->version_min)) {
+ /* Version mismatch */
+ vchiq_loud_error_header();
+ vchiq_loud_error("%d: service %d (%c%c%c%c) "
+ "version mismatch - local (%d, min %d)"
+ " vs. remote (%d, min %d)",
+ state->id, service->localport,
+ VCHIQ_FOURCC_AS_4CHARS(fourcc),
+ service->version, service->version_min,
+ version, version_min);
+ vchiq_loud_error_footer();
+ unlock_service(service);
+ service = NULL;
+ goto fail_open;
+ }
+ service->peer_version = version;
+
+ if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
+ struct vchiq_openack_payload ack_payload = {
+ service->version
+ };
+ VCHIQ_ELEMENT_T body = {
+ &ack_payload,
+ sizeof(ack_payload)
+ };
+
+ if (state->version_common <
+ VCHIQ_VERSION_SYNCHRONOUS_MODE)
+ service->sync = 0;
+
+ /* Acknowledge the OPEN */
+ if (service->sync &&
+ (state->version_common >=
+ VCHIQ_VERSION_SYNCHRONOUS_MODE)) {
+ if (queue_message_sync(state, NULL,
+ VCHIQ_MAKE_MSG(
+ VCHIQ_MSG_OPENACK,
+ service->localport,
+ remoteport),
+ &body, 1, sizeof(ack_payload),
+ 0) == VCHIQ_RETRY)
+ goto bail_not_ready;
+ } else {
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(
+ VCHIQ_MSG_OPENACK,
+ service->localport,
+ remoteport),
+ &body, 1, sizeof(ack_payload),
+ 0) == VCHIQ_RETRY)
+ goto bail_not_ready;
+ }
+
+ /* The service is now open */
+ vchiq_set_service_state(service,
+ service->sync ? VCHIQ_SRVSTATE_OPENSYNC
+ : VCHIQ_SRVSTATE_OPEN);
+ }
+
+ service->remoteport = remoteport;
+ service->client_id = ((int *)header->data)[1];
+ if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
+ NULL, NULL) == VCHIQ_RETRY) {
+ /* Bail out if not ready */
+ service->remoteport = VCHIQ_PORT_FREE;
+ goto bail_not_ready;
+ }
+
+ /* Success - the message has been dealt with */
+ unlock_service(service);
+ return 1;
+ }
+ }
+
+fail_open:
+ /* No available service, or an invalid request - send a CLOSE */
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
+ NULL, 0, 0, 0) == VCHIQ_RETRY)
+ goto bail_not_ready;
+
+ return 1;
+
+bail_not_ready:
+ if (service)
+ unlock_service(service);
+
+ return 0;
+}
+
+/* Called by the slot handler thread */
+static void
+parse_rx_slots(VCHIQ_STATE_T *state)
+{
+ VCHIQ_SHARED_STATE_T *remote = state->remote;
+ VCHIQ_SERVICE_T *service = NULL;
+ int tx_pos;
+ DEBUG_INITIALISE(state->local)
+
+ tx_pos = remote->tx_pos;
+
+ while (state->rx_pos != tx_pos) {
+ VCHIQ_HEADER_T *header;
+ int msgid, size;
+ int type;
+ unsigned int localport, remoteport;
+
+ DEBUG_TRACE(PARSE_LINE);
+ if (!state->rx_data) {
+ int rx_index;
+ WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
+ rx_index = remote->slot_queue[
+ SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
+ VCHIQ_SLOT_QUEUE_MASK];
+ state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
+ rx_index);
+ state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
+
+ /* Initialise use_count to one, and increment
+ ** release_count at the end of the slot to avoid
+ ** releasing the slot prematurely. */
+ state->rx_info->use_count = 1;
+ state->rx_info->release_count = 0;
+ }
+
+ header = (VCHIQ_HEADER_T *)(state->rx_data +
+ (state->rx_pos & VCHIQ_SLOT_MASK));
+ DEBUG_VALUE(PARSE_HEADER, (int)header);
+ msgid = header->msgid;
+ DEBUG_VALUE(PARSE_MSGID, msgid);
+ size = header->size;
+ type = VCHIQ_MSG_TYPE(msgid);
+ localport = VCHIQ_MSG_DSTPORT(msgid);
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
+
+ if (type != VCHIQ_MSG_DATA)
+ VCHIQ_STATS_INC(state, ctrl_rx_count);
+
+ switch (type) {
+ case VCHIQ_MSG_OPENACK:
+ case VCHIQ_MSG_CLOSE:
+ case VCHIQ_MSG_DATA:
+ case VCHIQ_MSG_BULK_RX:
+ case VCHIQ_MSG_BULK_TX:
+ case VCHIQ_MSG_BULK_RX_DONE:
+ case VCHIQ_MSG_BULK_TX_DONE:
+ service = find_service_by_port(state, localport);
+ if ((!service ||
+ ((service->remoteport != remoteport) &&
+ (service->remoteport != VCHIQ_PORT_FREE))) &&
+ (localport == 0) &&
+ (type == VCHIQ_MSG_CLOSE)) {
+ /* This could be a CLOSE from a client which
+ hadn't yet received the OPENACK - look for
+ the connected service */
+ if (service)
+ unlock_service(service);
+ service = get_connected_service(state,
+ remoteport);
+ if (service)
+ vchiq_log_warning(vchiq_core_log_level,
+ "%d: prs %s@%x (%d->%d) - "
+ "found connected service %d",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport,
+ service->localport);
+ }
+
+ if (!service) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: prs %s@%x (%d->%d) - "
+ "invalid/closed service %d",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport, localport);
+ goto skip_message;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
+ int svc_fourcc;
+
+ svc_fourcc = service
+ ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+ vchiq_log_info(SRVTRACE_LEVEL(service),
+ "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
+ "len:%d",
+ msg_type_str(type), type,
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
+ remoteport, localport, size);
+ if (size > 0)
+ vchiq_log_dump_mem("Rcvd", 0, header->data,
+ min(64, size));
+ }
+
+ if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
+ > VCHIQ_SLOT_SIZE) {
+ vchiq_log_error(vchiq_core_log_level,
+ "header %x (msgid %x) - size %x too big for "
+ "slot",
+ (unsigned int)header, (unsigned int)msgid,
+ (unsigned int)size);
+ WARN(1, "oversized for slot\n");
+ }
+
+ switch (type) {
+ case VCHIQ_MSG_OPEN:
+ WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
+ if (!parse_open(state, header))
+ goto bail_not_ready;
+ break;
+ case VCHIQ_MSG_OPENACK:
+ if (size >= sizeof(struct vchiq_openack_payload)) {
+ const struct vchiq_openack_payload *payload =
+ (struct vchiq_openack_payload *)
+ header->data;
+ service->peer_version = payload->version;
+ }
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
+ state->id, (unsigned int)header, size,
+ remoteport, localport, service->peer_version);
+ if (service->srvstate ==
+ VCHIQ_SRVSTATE_OPENING) {
+ service->remoteport = remoteport;
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_OPEN);
+ up(&service->remove_event);
+ } else
+ vchiq_log_error(vchiq_core_log_level,
+ "OPENACK received in state %s",
+ srvstate_names[service->srvstate]);
+ break;
+ case VCHIQ_MSG_CLOSE:
+ WARN_ON(size != 0); /* There should be no data */
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs CLOSE@%x (%d->%d)",
+ state->id, (unsigned int)header,
+ remoteport, localport);
+
+ mark_service_closing_internal(service, 1);
+
+ if (vchiq_close_service_internal(service,
+ 1/*close_recvd*/) == VCHIQ_RETRY)
+ goto bail_not_ready;
+
+ vchiq_log_info(vchiq_core_log_level,
+ "Close Service %c%c%c%c s:%u d:%d",
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->localport,
+ service->remoteport);
+ break;
+ case VCHIQ_MSG_DATA:
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs DATA@%x,%x (%d->%d)",
+ state->id, (unsigned int)header, size,
+ remoteport, localport);
+
+ if ((service->remoteport == remoteport)
+ && (service->srvstate ==
+ VCHIQ_SRVSTATE_OPEN)) {
+ header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
+ claim_slot(state->rx_info);
+ DEBUG_TRACE(PARSE_LINE);
+ if (make_service_callback(service,
+ VCHIQ_MESSAGE_AVAILABLE, header,
+ NULL) == VCHIQ_RETRY) {
+ DEBUG_TRACE(PARSE_LINE);
+ goto bail_not_ready;
+ }
+ VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
+ size);
+ } else {
+ VCHIQ_STATS_INC(state, error_count);
+ }
+ break;
+ case VCHIQ_MSG_CONNECT:
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs CONNECT@%x",
+ state->id, (unsigned int)header);
+ state->version_common = ((VCHIQ_SLOT_ZERO_T *)
+ state->slot_data)->version;
+ up(&state->connect);
+ break;
+ case VCHIQ_MSG_BULK_RX:
+ case VCHIQ_MSG_BULK_TX: {
+ VCHIQ_BULK_QUEUE_T *queue;
+ WARN_ON(!state->is_master);
+ queue = (type == VCHIQ_MSG_BULK_RX) ?
+ &service->bulk_tx : &service->bulk_rx;
+ if ((service->remoteport == remoteport)
+ && (service->srvstate ==
+ VCHIQ_SRVSTATE_OPEN)) {
+ VCHIQ_BULK_T *bulk;
+ int resolved = 0;
+
+ DEBUG_TRACE(PARSE_LINE);
+ if (mutex_lock_interruptible(
+ &service->bulk_mutex) != 0) {
+ DEBUG_TRACE(PARSE_LINE);
+ goto bail_not_ready;
+ }
+
+ WARN_ON(!(queue->remote_insert < queue->remove +
+ VCHIQ_NUM_SERVICE_BULKS));
+ bulk = &queue->bulks[
+ BULK_INDEX(queue->remote_insert)];
+ bulk->remote_data =
+ (void *)((int *)header->data)[0];
+ bulk->remote_size = ((int *)header->data)[1];
+ wmb();
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs %s@%x (%d->%d) %x@%x",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport,
+ bulk->remote_size,
+ (unsigned int)bulk->remote_data);
+
+ queue->remote_insert++;
+
+ if (atomic_read(&pause_bulks_count)) {
+ state->deferred_bulks++;
+ vchiq_log_info(vchiq_core_log_level,
+ "%s: deferring bulk (%d)",
+ __func__,
+ state->deferred_bulks);
+ if (state->conn_state !=
+ VCHIQ_CONNSTATE_PAUSE_SENT)
+ vchiq_log_error(
+ vchiq_core_log_level,
+ "%s: bulks paused in "
+ "unexpected state %s",
+ __func__,
+ conn_state_names[
+ state->conn_state]);
+ } else if (state->conn_state ==
+ VCHIQ_CONNSTATE_CONNECTED) {
+ DEBUG_TRACE(PARSE_LINE);
+ resolved = resolve_bulks(service,
+ queue);
+ }
+
+ mutex_unlock(&service->bulk_mutex);
+ if (resolved)
+ notify_bulks(service, queue,
+ 1/*retry_poll*/);
+ }
+ } break;
+ case VCHIQ_MSG_BULK_RX_DONE:
+ case VCHIQ_MSG_BULK_TX_DONE:
+ WARN_ON(state->is_master);
+ if ((service->remoteport == remoteport)
+ && (service->srvstate !=
+ VCHIQ_SRVSTATE_FREE)) {
+ VCHIQ_BULK_QUEUE_T *queue;
+ VCHIQ_BULK_T *bulk;
+
+ queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
+ &service->bulk_rx : &service->bulk_tx;
+
+ DEBUG_TRACE(PARSE_LINE);
+ if (mutex_lock_interruptible(
+ &service->bulk_mutex) != 0) {
+ DEBUG_TRACE(PARSE_LINE);
+ goto bail_not_ready;
+ }
+ if ((int)(queue->remote_insert -
+ queue->local_insert) >= 0) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: prs %s@%x (%d->%d) "
+ "unexpected (ri=%d,li=%d)",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport,
+ queue->remote_insert,
+ queue->local_insert);
+ mutex_unlock(&service->bulk_mutex);
+ break;
+ }
+
+ BUG_ON(queue->process == queue->local_insert);
+ BUG_ON(queue->process != queue->remote_insert);
+
+ bulk = &queue->bulks[
+ BULK_INDEX(queue->remote_insert)];
+ bulk->actual = *(int *)header->data;
+ queue->remote_insert++;
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: prs %s@%x (%d->%d) %x@%x",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport,
+ bulk->actual, (unsigned int)bulk->data);
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs:%d %cx li=%x ri=%x p=%x",
+ state->id, localport,
+ (type == VCHIQ_MSG_BULK_RX_DONE) ?
+ 'r' : 't',
+ queue->local_insert,
+ queue->remote_insert, queue->process);
+
+ DEBUG_TRACE(PARSE_LINE);
+ WARN_ON(queue->process == queue->local_insert);
+ vchiq_complete_bulk(bulk);
+ queue->process++;
+ mutex_unlock(&service->bulk_mutex);
+ DEBUG_TRACE(PARSE_LINE);
+ notify_bulks(service, queue, 1/*retry_poll*/);
+ DEBUG_TRACE(PARSE_LINE);
+ }
+ break;
+ case VCHIQ_MSG_PADDING:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs PADDING@%x,%x",
+ state->id, (unsigned int)header, size);
+ break;
+ case VCHIQ_MSG_PAUSE:
+ /* If initiated, signal the application thread */
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs PAUSE@%x,%x",
+ state->id, (unsigned int)header, size);
+ if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: PAUSE received in state PAUSED",
+ state->id);
+ break;
+ }
+ if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
+ /* Send a PAUSE in response */
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
+ NULL, 0, 0, QMFLAGS_NO_MUTEX_UNLOCK)
+ == VCHIQ_RETRY)
+ goto bail_not_ready;
+ if (state->is_master)
+ pause_bulks(state);
+ }
+ /* At this point slot_mutex is held */
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
+ vchiq_platform_paused(state);
+ break;
+ case VCHIQ_MSG_RESUME:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: prs RESUME@%x,%x",
+ state->id, (unsigned int)header, size);
+ /* Release the slot mutex */
+ mutex_unlock(&state->slot_mutex);
+ if (state->is_master)
+ resume_bulks(state);
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+ vchiq_platform_resumed(state);
+ break;
+
+ case VCHIQ_MSG_REMOTE_USE:
+ vchiq_on_remote_use(state);
+ break;
+ case VCHIQ_MSG_REMOTE_RELEASE:
+ vchiq_on_remote_release(state);
+ break;
+ case VCHIQ_MSG_REMOTE_USE_ACTIVE:
+ vchiq_on_remote_use_active(state);
+ break;
+
+ default:
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: prs invalid msgid %x@%x,%x",
+ state->id, msgid, (unsigned int)header, size);
+ WARN(1, "invalid message\n");
+ break;
+ }
+
+skip_message:
+ if (service) {
+ unlock_service(service);
+ service = NULL;
+ }
+
+ state->rx_pos += calc_stride(size);
+
+ DEBUG_TRACE(PARSE_LINE);
+ /* Perform some housekeeping when the end of the slot is
+ ** reached. */
+ if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
+ /* Remove the extra reference count. */
+ release_slot(state, state->rx_info, NULL, NULL);
+ state->rx_data = NULL;
+ }
+ }
+
+bail_not_ready:
+ if (service)
+ unlock_service(service);
+}
+
+/* Called by the slot handler thread */
+static int
+slot_handler_func(void *v)
+{
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
+ VCHIQ_SHARED_STATE_T *local = state->local;
+ DEBUG_INITIALISE(local)
+
+ while (1) {
+ DEBUG_COUNT(SLOT_HANDLER_COUNT);
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
+ remote_event_wait(&local->trigger);
+
+ rmb();
+
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
+ if (state->poll_needed) {
+ /* Check if we need to suspend - may change our
+ * conn_state */
+ vchiq_platform_check_suspend(state);
+
+ state->poll_needed = 0;
+
+ /* Handle service polling and other rare conditions here
+ ** out of the mainline code */
+ switch (state->conn_state) {
+ case VCHIQ_CONNSTATE_CONNECTED:
+ /* Poll the services as requested */
+ poll_services(state);
+ break;
+
+ case VCHIQ_CONNSTATE_PAUSING:
+ if (state->is_master)
+ pause_bulks(state);
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
+ NULL, 0, 0,
+ QMFLAGS_NO_MUTEX_UNLOCK)
+ != VCHIQ_RETRY) {
+ vchiq_set_conn_state(state,
+ VCHIQ_CONNSTATE_PAUSE_SENT);
+ } else {
+ if (state->is_master)
+ resume_bulks(state);
+ /* Retry later */
+ state->poll_needed = 1;
+ }
+ break;
+
+ case VCHIQ_CONNSTATE_PAUSED:
+ vchiq_platform_resume(state);
+ break;
+
+ case VCHIQ_CONNSTATE_RESUMING:
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
+ NULL, 0, 0, QMFLAGS_NO_MUTEX_LOCK)
+ != VCHIQ_RETRY) {
+ if (state->is_master)
+ resume_bulks(state);
+ vchiq_set_conn_state(state,
+ VCHIQ_CONNSTATE_CONNECTED);
+ vchiq_platform_resumed(state);
+ } else {
+ /* This should really be impossible,
+ ** since the PAUSE should have flushed
+ ** through outstanding messages. */
+ vchiq_log_error(vchiq_core_log_level,
+ "Failed to send RESUME "
+ "message");
+ BUG();
+ }
+ break;
+
+ case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
+ case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
+ vchiq_platform_handle_timeout(state);
+ break;
+ default:
+ break;
+ }
+
+
+ }
+
+ DEBUG_TRACE(SLOT_HANDLER_LINE);
+ parse_rx_slots(state);
+ }
+ return 0;
+}
+
+
+/* Called by the recycle thread */
+static int
+recycle_func(void *v)
+{
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
+ VCHIQ_SHARED_STATE_T *local = state->local;
+
+ while (1) {
+ remote_event_wait(&local->recycle);
+
+ process_free_queue(state);
+ }
+ return 0;
+}
+
+
+/* Called by the sync thread */
+static int
+sync_func(void *v)
+{
+ VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
+ VCHIQ_SHARED_STATE_T *local = state->local;
+ VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
+ state->remote->slot_sync);
+
+ while (1) {
+ VCHIQ_SERVICE_T *service;
+ int msgid, size;
+ int type;
+ unsigned int localport, remoteport;
+
+ remote_event_wait(&local->sync_trigger);
+
+ rmb();
+
+ msgid = header->msgid;
+ size = header->size;
+ type = VCHIQ_MSG_TYPE(msgid);
+ localport = VCHIQ_MSG_DSTPORT(msgid);
+ remoteport = VCHIQ_MSG_SRCPORT(msgid);
+
+ service = find_service_by_port(state, localport);
+
+ if (!service) {
+ vchiq_log_error(vchiq_sync_log_level,
+ "%d: sf %s@%x (%d->%d) - "
+ "invalid/closed service %d",
+ state->id, msg_type_str(type),
+ (unsigned int)header,
+ remoteport, localport, localport);
+ release_message_sync(state, header);
+ continue;
+ }
+
+ if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
+ int svc_fourcc;
+
+ svc_fourcc = service
+ ? service->base.fourcc
+ : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+ vchiq_log_trace(vchiq_sync_log_level,
+ "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
+ msg_type_str(type),
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
+ remoteport, localport, size);
+ if (size > 0)
+ vchiq_log_dump_mem("Rcvd", 0, header->data,
+ min(64, size));
+ }
+
+ switch (type) {
+ case VCHIQ_MSG_OPENACK:
+ if (size >= sizeof(struct vchiq_openack_payload)) {
+ const struct vchiq_openack_payload *payload =
+ (struct vchiq_openack_payload *)
+ header->data;
+ service->peer_version = payload->version;
+ }
+ vchiq_log_info(vchiq_sync_log_level,
+ "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
+ state->id, (unsigned int)header, size,
+ remoteport, localport, service->peer_version);
+ if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
+ service->remoteport = remoteport;
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_OPENSYNC);
+ service->sync = 1;
+ up(&service->remove_event);
+ }
+ release_message_sync(state, header);
+ break;
+
+ case VCHIQ_MSG_DATA:
+ vchiq_log_trace(vchiq_sync_log_level,
+ "%d: sf DATA@%x,%x (%d->%d)",
+ state->id, (unsigned int)header, size,
+ remoteport, localport);
+
+ if ((service->remoteport == remoteport) &&
+ (service->srvstate ==
+ VCHIQ_SRVSTATE_OPENSYNC)) {
+ if (make_service_callback(service,
+ VCHIQ_MESSAGE_AVAILABLE, header,
+ NULL) == VCHIQ_RETRY)
+ vchiq_log_error(vchiq_sync_log_level,
+ "synchronous callback to "
+ "service %d returns "
+ "VCHIQ_RETRY",
+ localport);
+ }
+ break;
+
+ default:
+ vchiq_log_error(vchiq_sync_log_level,
+ "%d: sf unexpected msgid %x@%x,%x",
+ state->id, msgid, (unsigned int)header, size);
+ release_message_sync(state, header);
+ break;
+ }
+
+ unlock_service(service);
+ }
+
+ return 0;
+}
+
+
+static void
+init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
+{
+ queue->local_insert = 0;
+ queue->remote_insert = 0;
+ queue->process = 0;
+ queue->remote_notify = 0;
+ queue->remove = 0;
+}
+
+
+inline const char *
+get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
+{
+ return conn_state_names[conn_state];
+}
+
+
+VCHIQ_SLOT_ZERO_T *
+vchiq_init_slots(void *mem_base, int mem_size)
+{
+ int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
+ VCHIQ_SLOT_ZERO_T *slot_zero =
+ (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
+ int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
+ int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
+
+ /* Ensure there is enough memory to run an absolutely minimum system */
+ num_slots -= first_data_slot;
+
+ if (num_slots < 4) {
+ vchiq_log_error(vchiq_core_log_level,
+ "vchiq_init_slots - insufficient memory %x bytes",
+ mem_size);
+ return NULL;
+ }
+
+ memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
+
+ slot_zero->magic = VCHIQ_MAGIC;
+ slot_zero->version = VCHIQ_VERSION;
+ slot_zero->version_min = VCHIQ_VERSION_MIN;
+ slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
+ slot_zero->slot_size = VCHIQ_SLOT_SIZE;
+ slot_zero->max_slots = VCHIQ_MAX_SLOTS;
+ slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
+
+ slot_zero->master.slot_sync = first_data_slot;
+ slot_zero->master.slot_first = first_data_slot + 1;
+ slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
+ slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
+ slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
+ slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
+
+ return slot_zero;
+}
+
+VCHIQ_STATUS_T
+vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
+ int is_master)
+{
+ VCHIQ_SHARED_STATE_T *local;
+ VCHIQ_SHARED_STATE_T *remote;
+ VCHIQ_STATUS_T status;
+ char threadname[10];
+ static int id;
+ int i;
+
+ vchiq_log_warning(vchiq_core_log_level,
+ "%s: slot_zero = 0x%08lx, is_master = %d",
+ __func__, (unsigned long)slot_zero, is_master);
+
+ /* Check the input configuration */
+
+ if (slot_zero->magic != VCHIQ_MAGIC) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("Invalid VCHIQ magic value found.");
+ vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
+ (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+
+ if (slot_zero->version < VCHIQ_VERSION_MIN) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("Incompatible VCHIQ versions found.");
+ vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
+ "(minimum %d)",
+ (unsigned int)slot_zero, slot_zero->version,
+ VCHIQ_VERSION_MIN);
+ vchiq_loud_error("Restart with a newer VideoCore image.");
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+
+ if (VCHIQ_VERSION < slot_zero->version_min) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("Incompatible VCHIQ versions found.");
+ vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
+ "minimum %d)",
+ (unsigned int)slot_zero, VCHIQ_VERSION,
+ slot_zero->version_min);
+ vchiq_loud_error("Restart with a newer kernel.");
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+
+ if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
+ (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
+ (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
+ (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
+ vchiq_loud_error_header();
+ if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
+ vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
+ "(expected %x)",
+ (unsigned int)slot_zero,
+ slot_zero->slot_zero_size,
+ sizeof(VCHIQ_SLOT_ZERO_T));
+ if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
+ vchiq_loud_error("slot_zero=%x: slot_size=%d "
+ "(expected %d",
+ (unsigned int)slot_zero, slot_zero->slot_size,
+ VCHIQ_SLOT_SIZE);
+ if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
+ vchiq_loud_error("slot_zero=%x: max_slots=%d "
+ "(expected %d)",
+ (unsigned int)slot_zero, slot_zero->max_slots,
+ VCHIQ_MAX_SLOTS);
+ if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
+ vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
+ "(expected %d)",
+ (unsigned int)slot_zero,
+ slot_zero->max_slots_per_side,
+ VCHIQ_MAX_SLOTS_PER_SIDE);
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+
+ if (VCHIQ_VERSION < slot_zero->version)
+ slot_zero->version = VCHIQ_VERSION;
+
+ if (is_master) {
+ local = &slot_zero->master;
+ remote = &slot_zero->slave;
+ } else {
+ local = &slot_zero->slave;
+ remote = &slot_zero->master;
+ }
+
+ if (local->initialised) {
+ vchiq_loud_error_header();
+ if (remote->initialised)
+ vchiq_loud_error("local state has already been "
+ "initialised");
+ else
+ vchiq_loud_error("master/slave mismatch - two %ss",
+ is_master ? "master" : "slave");
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+
+ memset(state, 0, sizeof(VCHIQ_STATE_T));
+
+ state->id = id++;
+ state->is_master = is_master;
+
+ /*
+ initialize shared state pointers
+ */
+
+ state->local = local;
+ state->remote = remote;
+ state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
+
+ /*
+ initialize events and mutexes
+ */
+
+ sema_init(&state->connect, 0);
+ mutex_init(&state->mutex);
+ sema_init(&state->trigger_event, 0);
+ sema_init(&state->recycle_event, 0);
+ sema_init(&state->sync_trigger_event, 0);
+ sema_init(&state->sync_release_event, 0);
+
+ mutex_init(&state->slot_mutex);
+ mutex_init(&state->recycle_mutex);
+ mutex_init(&state->sync_mutex);
+ mutex_init(&state->bulk_transfer_mutex);
+
+ sema_init(&state->slot_available_event, 0);
+ sema_init(&state->slot_remove_event, 0);
+ sema_init(&state->data_quota_event, 0);
+
+ state->slot_queue_available = 0;
+
+ for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &state->service_quotas[i];
+ sema_init(&service_quota->quota_event, 0);
+ }
+
+ for (i = local->slot_first; i <= local->slot_last; i++) {
+ local->slot_queue[state->slot_queue_available++] = i;
+ up(&state->slot_available_event);
+ }
+
+ state->default_slot_quota = state->slot_queue_available/2;
+ state->default_message_quota =
+ min((unsigned short)(state->default_slot_quota * 256),
+ (unsigned short)~0);
+
+ state->previous_data_index = -1;
+ state->data_use_count = 0;
+ state->data_quota = state->slot_queue_available - 1;
+
+ local->trigger.event = &state->trigger_event;
+ remote_event_create(&local->trigger);
+ local->tx_pos = 0;
+
+ local->recycle.event = &state->recycle_event;
+ remote_event_create(&local->recycle);
+ local->slot_queue_recycle = state->slot_queue_available;
+
+ local->sync_trigger.event = &state->sync_trigger_event;
+ remote_event_create(&local->sync_trigger);
+
+ local->sync_release.event = &state->sync_release_event;
+ remote_event_create(&local->sync_release);
+
+ /* At start-of-day, the slot is empty and available */
+ ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
+ = VCHIQ_MSGID_PADDING;
+ remote_event_signal_local(&local->sync_release);
+
+ local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
+
+ status = vchiq_platform_init_state(state);
+
+ /*
+ bring up slot handler thread
+ */
+ snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
+ state->slot_handler_thread = kthread_create(&slot_handler_func,
+ (void *)state,
+ threadname);
+
+ if (state->slot_handler_thread == NULL) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("couldn't create thread %s", threadname);
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+ set_user_nice(state->slot_handler_thread, -19);
+ wake_up_process(state->slot_handler_thread);
+
+ snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
+ state->recycle_thread = kthread_create(&recycle_func,
+ (void *)state,
+ threadname);
+ if (state->recycle_thread == NULL) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("couldn't create thread %s", threadname);
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+ set_user_nice(state->recycle_thread, -19);
+ wake_up_process(state->recycle_thread);
+
+ snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
+ state->sync_thread = kthread_create(&sync_func,
+ (void *)state,
+ threadname);
+ if (state->sync_thread == NULL) {
+ vchiq_loud_error_header();
+ vchiq_loud_error("couldn't create thread %s", threadname);
+ vchiq_loud_error_footer();
+ return VCHIQ_ERROR;
+ }
+ set_user_nice(state->sync_thread, -20);
+ wake_up_process(state->sync_thread);
+
+ BUG_ON(state->id >= VCHIQ_MAX_STATES);
+ vchiq_states[state->id] = state;
+
+ /* Indicate readiness to the other side */
+ local->initialised = 1;
+
+ return status;
+}
+
+/* Called from application thread when a client or server service is created. */
+VCHIQ_SERVICE_T *
+vchiq_add_service_internal(VCHIQ_STATE_T *state,
+ const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
+ VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term)
+{
+ VCHIQ_SERVICE_T *service;
+
+ service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
+ if (service) {
+ service->base.fourcc = params->fourcc;
+ service->base.callback = params->callback;
+ service->base.userdata = params->userdata;
+ service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
+ service->ref_count = 1;
+ service->srvstate = VCHIQ_SRVSTATE_FREE;
+ service->userdata_term = userdata_term;
+ service->localport = VCHIQ_PORT_FREE;
+ service->remoteport = VCHIQ_PORT_FREE;
+
+ service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
+ VCHIQ_FOURCC_INVALID : params->fourcc;
+ service->client_id = 0;
+ service->auto_close = 1;
+ service->sync = 0;
+ service->closing = 0;
+ service->trace = 0;
+ atomic_set(&service->poll_flags, 0);
+ service->version = params->version;
+ service->version_min = params->version_min;
+ service->state = state;
+ service->instance = instance;
+ service->service_use_count = 0;
+ init_bulk_queue(&service->bulk_tx);
+ init_bulk_queue(&service->bulk_rx);
+ sema_init(&service->remove_event, 0);
+ sema_init(&service->bulk_remove_event, 0);
+ mutex_init(&service->bulk_mutex);
+ memset(&service->stats, 0, sizeof(service->stats));
+ } else {
+ vchiq_log_error(vchiq_core_log_level,
+ "Out of memory");
+ }
+
+ if (service) {
+ VCHIQ_SERVICE_T **pservice = NULL;
+ int i;
+
+ /* Although it is perfectly possible to use service_spinlock
+ ** to protect the creation of services, it is overkill as it
+ ** disables interrupts while the array is searched.
+ ** The only danger is of another thread trying to create a
+ ** service - service deletion is safe.
+ ** Therefore it is preferable to use state->mutex which,
+ ** although slower to claim, doesn't block interrupts while
+ ** it is held.
+ */
+
+ mutex_lock(&state->mutex);
+
+ /* Prepare to use a previously unused service */
+ if (state->unused_service < VCHIQ_MAX_SERVICES)
+ pservice = &state->services[state->unused_service];
+
+ if (srvstate == VCHIQ_SRVSTATE_OPENING) {
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *srv = state->services[i];
+ if (!srv) {
+ pservice = &state->services[i];
+ break;
+ }
+ }
+ } else {
+ for (i = (state->unused_service - 1); i >= 0; i--) {
+ VCHIQ_SERVICE_T *srv = state->services[i];
+ if (!srv)
+ pservice = &state->services[i];
+ else if ((srv->public_fourcc == params->fourcc)
+ && ((srv->instance != instance) ||
+ (srv->base.callback !=
+ params->callback))) {
+ /* There is another server using this
+ ** fourcc which doesn't match. */
+ pservice = NULL;
+ break;
+ }
+ }
+ }
+
+ if (pservice) {
+ service->localport = (pservice - state->services);
+ if (!handle_seq)
+ handle_seq = VCHIQ_MAX_STATES *
+ VCHIQ_MAX_SERVICES;
+ service->handle = handle_seq |
+ (state->id * VCHIQ_MAX_SERVICES) |
+ service->localport;
+ handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
+ *pservice = service;
+ if (pservice == &state->services[state->unused_service])
+ state->unused_service++;
+ }
+
+ mutex_unlock(&state->mutex);
+
+ if (!pservice) {
+ kfree(service);
+ service = NULL;
+ }
+ }
+
+ if (service) {
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &state->service_quotas[service->localport];
+ service_quota->slot_quota = state->default_slot_quota;
+ service_quota->message_quota = state->default_message_quota;
+ if (service_quota->slot_use_count == 0)
+ service_quota->previous_tx_index =
+ SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
+ - 1;
+
+ /* Bring this service online */
+ vchiq_set_service_state(service, srvstate);
+
+ vchiq_log_info(vchiq_core_msg_log_level,
+ "%s Service %c%c%c%c SrcPort:%d",
+ (srvstate == VCHIQ_SRVSTATE_OPENING)
+ ? "Open" : "Add",
+ VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
+ service->localport);
+ }
+
+ /* Don't unlock the service - leave it with a ref_count of 1. */
+
+ return service;
+}
+
+VCHIQ_STATUS_T
+vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
+{
+ struct vchiq_open_payload payload = {
+ service->base.fourcc,
+ client_id,
+ service->version,
+ service->version_min
+ };
+ VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ service->client_id = client_id;
+ vchiq_use_service_internal(service);
+ status = queue_message(service->state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
+ &body, 1, sizeof(payload), QMFLAGS_IS_BLOCKING);
+ if (status == VCHIQ_SUCCESS) {
+ /* Wait for the ACK/NAK */
+ if (down_interruptible(&service->remove_event) != 0) {
+ status = VCHIQ_RETRY;
+ vchiq_release_service_internal(service);
+ } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
+ (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
+ if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: osi - srvstate = %s (ref %d)",
+ service->state->id,
+ srvstate_names[service->srvstate],
+ service->ref_count);
+ status = VCHIQ_ERROR;
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ vchiq_release_service_internal(service);
+ }
+ }
+ return status;
+}
+
+static void
+release_service_messages(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_STATE_T *state = service->state;
+ int slot_last = state->remote->slot_last;
+ int i;
+
+ /* Release any claimed messages aimed at this service */
+
+ if (service->sync) {
+ VCHIQ_HEADER_T *header =
+ (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
+ state->remote->slot_sync);
+ if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
+ release_message_sync(state, header);
+
+ return;
+ }
+
+ for (i = state->remote->slot_first; i <= slot_last; i++) {
+ VCHIQ_SLOT_INFO_T *slot_info =
+ SLOT_INFO_FROM_INDEX(state, i);
+ if (slot_info->release_count != slot_info->use_count) {
+ char *data =
+ (char *)SLOT_DATA_FROM_INDEX(state, i);
+ unsigned int pos, end;
+
+ end = VCHIQ_SLOT_SIZE;
+ if (data == state->rx_data)
+ /* This buffer is still being read from - stop
+ ** at the current read position */
+ end = state->rx_pos & VCHIQ_SLOT_MASK;
+
+ pos = 0;
+
+ while (pos < end) {
+ VCHIQ_HEADER_T *header =
+ (VCHIQ_HEADER_T *)(data + pos);
+ int msgid = header->msgid;
+ int port = VCHIQ_MSG_DSTPORT(msgid);
+ if ((port == service->localport) &&
+ (msgid & VCHIQ_MSGID_CLAIMED)) {
+ vchiq_log_info(vchiq_core_log_level,
+ " fsi - hdr %x",
+ (unsigned int)header);
+ release_slot(state, slot_info, header,
+ NULL);
+ }
+ pos += calc_stride(header->size);
+ if (pos > VCHIQ_SLOT_SIZE) {
+ vchiq_log_error(vchiq_core_log_level,
+ "fsi - pos %x: header %x, "
+ "msgid %x, header->msgid %x, "
+ "header->size %x",
+ pos, (unsigned int)header,
+ msgid, header->msgid,
+ header->size);
+ WARN(1, "invalid slot position\n");
+ }
+ }
+ }
+ }
+}
+
+static int
+do_abort_bulks(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_STATUS_T status;
+
+ /* Abort any outstanding bulk transfers */
+ if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
+ return 0;
+ abort_outstanding_bulks(service, &service->bulk_tx);
+ abort_outstanding_bulks(service, &service->bulk_rx);
+ mutex_unlock(&service->bulk_mutex);
+
+ status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
+ if (status == VCHIQ_SUCCESS)
+ status = notify_bulks(service, &service->bulk_rx,
+ 0/*!retry_poll*/);
+ return (status == VCHIQ_SUCCESS);
+}
+
+static VCHIQ_STATUS_T
+close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
+{
+ VCHIQ_STATUS_T status;
+ int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
+ int newstate;
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_OPEN:
+ case VCHIQ_SRVSTATE_CLOSESENT:
+ case VCHIQ_SRVSTATE_CLOSERECVD:
+ if (is_server) {
+ if (service->auto_close) {
+ service->client_id = 0;
+ service->remoteport = VCHIQ_PORT_FREE;
+ newstate = VCHIQ_SRVSTATE_LISTENING;
+ } else
+ newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
+ } else
+ newstate = VCHIQ_SRVSTATE_CLOSED;
+ vchiq_set_service_state(service, newstate);
+ break;
+ case VCHIQ_SRVSTATE_LISTENING:
+ break;
+ default:
+ vchiq_log_error(vchiq_core_log_level,
+ "close_service_complete(%x) called in state %s",
+ service->handle, srvstate_names[service->srvstate]);
+ WARN(1, "close_service_complete in unexpected state\n");
+ return VCHIQ_ERROR;
+ }
+
+ status = make_service_callback(service,
+ VCHIQ_SERVICE_CLOSED, NULL, NULL);
+
+ if (status != VCHIQ_RETRY) {
+ int uc = service->service_use_count;
+ int i;
+ /* Complete the close process */
+ for (i = 0; i < uc; i++)
+ /* cater for cases where close is forced and the
+ ** client may not close all it's handles */
+ vchiq_release_service_internal(service);
+
+ service->client_id = 0;
+ service->remoteport = VCHIQ_PORT_FREE;
+
+ if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
+ vchiq_free_service_internal(service);
+ else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
+ if (is_server)
+ service->closing = 0;
+
+ up(&service->remove_event);
+ }
+ } else
+ vchiq_set_service_state(service, failstate);
+
+ return status;
+}
+
+/* Called by the slot handler */
+VCHIQ_STATUS_T
+vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
+{
+ VCHIQ_STATE_T *state = service->state;
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
+
+ vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
+ service->state->id, service->localport, close_recvd,
+ srvstate_names[service->srvstate]);
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_CLOSED:
+ case VCHIQ_SRVSTATE_HIDDEN:
+ case VCHIQ_SRVSTATE_LISTENING:
+ case VCHIQ_SRVSTATE_CLOSEWAIT:
+ if (close_recvd)
+ vchiq_log_error(vchiq_core_log_level,
+ "vchiq_close_service_internal(1) called "
+ "in state %s",
+ srvstate_names[service->srvstate]);
+ else if (is_server) {
+ if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
+ status = VCHIQ_ERROR;
+ } else {
+ service->client_id = 0;
+ service->remoteport = VCHIQ_PORT_FREE;
+ if (service->srvstate ==
+ VCHIQ_SRVSTATE_CLOSEWAIT)
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_LISTENING);
+ }
+ up(&service->remove_event);
+ } else
+ vchiq_free_service_internal(service);
+ break;
+ case VCHIQ_SRVSTATE_OPENING:
+ if (close_recvd) {
+ /* The open was rejected - tell the user */
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_CLOSEWAIT);
+ up(&service->remove_event);
+ } else {
+ /* Shutdown mid-open - let the other side know */
+ status = queue_message(state, service,
+ VCHIQ_MAKE_MSG
+ (VCHIQ_MSG_CLOSE,
+ service->localport,
+ VCHIQ_MSG_DSTPORT(service->remoteport)),
+ NULL, 0, 0, 0);
+ }
+ break;
+
+ case VCHIQ_SRVSTATE_OPENSYNC:
+ mutex_lock(&state->sync_mutex);
+ /* Drop through */
+
+ case VCHIQ_SRVSTATE_OPEN:
+ if (state->is_master || close_recvd) {
+ if (!do_abort_bulks(service))
+ status = VCHIQ_RETRY;
+ }
+
+ release_service_messages(service);
+
+ if (status == VCHIQ_SUCCESS)
+ status = queue_message(state, service,
+ VCHIQ_MAKE_MSG
+ (VCHIQ_MSG_CLOSE,
+ service->localport,
+ VCHIQ_MSG_DSTPORT(service->remoteport)),
+ NULL, 0, 0, QMFLAGS_NO_MUTEX_UNLOCK);
+
+ if (status == VCHIQ_SUCCESS) {
+ if (!close_recvd) {
+ /* Change the state while the mutex is
+ still held */
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_CLOSESENT);
+ mutex_unlock(&state->slot_mutex);
+ if (service->sync)
+ mutex_unlock(&state->sync_mutex);
+ break;
+ }
+ } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
+ mutex_unlock(&state->sync_mutex);
+ break;
+ } else
+ break;
+
+ /* Change the state while the mutex is still held */
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
+ mutex_unlock(&state->slot_mutex);
+ if (service->sync)
+ mutex_unlock(&state->sync_mutex);
+
+ status = close_service_complete(service,
+ VCHIQ_SRVSTATE_CLOSERECVD);
+ break;
+
+ case VCHIQ_SRVSTATE_CLOSESENT:
+ if (!close_recvd)
+ /* This happens when a process is killed mid-close */
+ break;
+
+ if (!state->is_master) {
+ if (!do_abort_bulks(service)) {
+ status = VCHIQ_RETRY;
+ break;
+ }
+ }
+
+ if (status == VCHIQ_SUCCESS)
+ status = close_service_complete(service,
+ VCHIQ_SRVSTATE_CLOSERECVD);
+ break;
+
+ case VCHIQ_SRVSTATE_CLOSERECVD:
+ if (!close_recvd && is_server)
+ /* Force into LISTENING mode */
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_LISTENING);
+ status = close_service_complete(service,
+ VCHIQ_SRVSTATE_CLOSERECVD);
+ break;
+
+ default:
+ vchiq_log_error(vchiq_core_log_level,
+ "vchiq_close_service_internal(%d) called in state %s",
+ close_recvd, srvstate_names[service->srvstate]);
+ break;
+ }
+
+ return status;
+}
+
+/* Called from the application process upon process death */
+void
+vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_STATE_T *state = service->state;
+
+ vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
+ state->id, service->localport, service->remoteport);
+
+ mark_service_closing(service);
+
+ /* Mark the service for removal by the slot handler */
+ request_poll(state, service, VCHIQ_POLL_REMOVE);
+}
+
+/* Called from the slot handler */
+void
+vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
+{
+ VCHIQ_STATE_T *state = service->state;
+
+ vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
+ state->id, service->localport);
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_OPENING:
+ case VCHIQ_SRVSTATE_CLOSED:
+ case VCHIQ_SRVSTATE_HIDDEN:
+ case VCHIQ_SRVSTATE_LISTENING:
+ case VCHIQ_SRVSTATE_CLOSEWAIT:
+ break;
+ default:
+ vchiq_log_error(vchiq_core_log_level,
+ "%d: fsi - (%d) in state %s",
+ state->id, service->localport,
+ srvstate_names[service->srvstate]);
+ return;
+ }
+
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
+
+ up(&service->remove_event);
+
+ /* Release the initial lock */
+ unlock_service(service);
+}
+
+VCHIQ_STATUS_T
+vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
+{
+ VCHIQ_SERVICE_T *service;
+ int i;
+
+ /* Find all services registered to this client and enable them. */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance,
+ &i)) != NULL) {
+ if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
+ vchiq_set_service_state(service,
+ VCHIQ_SRVSTATE_LISTENING);
+ unlock_service(service);
+ }
+
+ if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
+ if (queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
+ 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
+ return VCHIQ_RETRY;
+
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
+ }
+
+ if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
+ if (down_interruptible(&state->connect) != 0)
+ return VCHIQ_RETRY;
+
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+ up(&state->connect);
+ }
+
+ return VCHIQ_SUCCESS;
+}
+
+VCHIQ_STATUS_T
+vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
+{
+ VCHIQ_SERVICE_T *service;
+ int i;
+
+ /* Find all services registered to this client and enable them. */
+ i = 0;
+ while ((service = next_service_by_instance(state, instance,
+ &i)) != NULL) {
+ (void)vchiq_remove_service(service->handle);
+ unlock_service(service);
+ }
+
+ return VCHIQ_SUCCESS;
+}
+
+VCHIQ_STATUS_T
+vchiq_pause_internal(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ switch (state->conn_state) {
+ case VCHIQ_CONNSTATE_CONNECTED:
+ /* Request a pause */
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
+ request_poll(state, NULL, 0);
+ break;
+ default:
+ vchiq_log_error(vchiq_core_log_level,
+ "vchiq_pause_internal in state %s\n",
+ conn_state_names[state->conn_state]);
+ status = VCHIQ_ERROR;
+ VCHIQ_STATS_INC(state, error_count);
+ break;
+ }
+
+ return status;
+}
+
+VCHIQ_STATUS_T
+vchiq_resume_internal(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
+ request_poll(state, NULL, 0);
+ } else {
+ status = VCHIQ_ERROR;
+ VCHIQ_STATS_INC(state, error_count);
+ }
+
+ return status;
+}
+
+VCHIQ_STATUS_T
+vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ /* Unregister the service */
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ if (!service)
+ return VCHIQ_ERROR;
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: close_service:%d",
+ service->state->id, service->localport);
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
+ (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
+ (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
+ unlock_service(service);
+ return VCHIQ_ERROR;
+ }
+
+ mark_service_closing(service);
+
+ if (current == service->state->slot_handler_thread) {
+ status = vchiq_close_service_internal(service,
+ 0/*!close_recvd*/);
+ BUG_ON(status == VCHIQ_RETRY);
+ } else {
+ /* Mark the service for termination by the slot handler */
+ request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
+ }
+
+ while (1) {
+ if (down_interruptible(&service->remove_event) != 0) {
+ status = VCHIQ_RETRY;
+ break;
+ }
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
+ (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN))
+ break;
+
+ vchiq_log_warning(vchiq_core_log_level,
+ "%d: close_service:%d - waiting in state %s",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate]);
+ }
+
+ if ((status == VCHIQ_SUCCESS) &&
+ (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
+ (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
+ status = VCHIQ_ERROR;
+
+ unlock_service(service);
+
+ return status;
+}
+
+VCHIQ_STATUS_T
+vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ /* Unregister the service */
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
+ if (!service)
+ return VCHIQ_ERROR;
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: remove_service:%d",
+ service->state->id, service->localport);
+
+ if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
+ unlock_service(service);
+ return VCHIQ_ERROR;
+ }
+
+ mark_service_closing(service);
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
+ (current == service->state->slot_handler_thread)) {
+ /* Make it look like a client, because it must be removed and
+ not left in the LISTENING state. */
+ service->public_fourcc = VCHIQ_FOURCC_INVALID;
+
+ status = vchiq_close_service_internal(service,
+ 0/*!close_recvd*/);
+ BUG_ON(status == VCHIQ_RETRY);
+ } else {
+ /* Mark the service for removal by the slot handler */
+ request_poll(service->state, service, VCHIQ_POLL_REMOVE);
+ }
+ while (1) {
+ if (down_interruptible(&service->remove_event) != 0) {
+ status = VCHIQ_RETRY;
+ break;
+ }
+
+ if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
+ (service->srvstate == VCHIQ_SRVSTATE_OPEN))
+ break;
+
+ vchiq_log_warning(vchiq_core_log_level,
+ "%d: remove_service:%d - waiting in state %s",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate]);
+ }
+
+ if ((status == VCHIQ_SUCCESS) &&
+ (service->srvstate != VCHIQ_SRVSTATE_FREE))
+ status = VCHIQ_ERROR;
+
+ unlock_service(service);
+
+ return status;
+}
+
+
+/* This function may be called by kernel threads or user threads.
+ * User threads may receive VCHIQ_RETRY to indicate that a signal has been
+ * received and the call should be retried after being returned to user
+ * context.
+ * When called in blocking mode, the userdata field points to a bulk_waiter
+ * structure.
+ */
+VCHIQ_STATUS_T
+vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
+ VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
+ VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
+{
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_BULK_QUEUE_T *queue;
+ VCHIQ_BULK_T *bulk;
+ VCHIQ_STATE_T *state;
+ struct bulk_waiter *bulk_waiter = NULL;
+ const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
+ const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
+ VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+
+ if (!service ||
+ (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
+ ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
+ (vchiq_check_service(service) != VCHIQ_SUCCESS))
+ goto error_exit;
+
+ switch (mode) {
+ case VCHIQ_BULK_MODE_NOCALLBACK:
+ case VCHIQ_BULK_MODE_CALLBACK:
+ break;
+ case VCHIQ_BULK_MODE_BLOCKING:
+ bulk_waiter = (struct bulk_waiter *)userdata;
+ sema_init(&bulk_waiter->event, 0);
+ bulk_waiter->actual = 0;
+ bulk_waiter->bulk = NULL;
+ break;
+ case VCHIQ_BULK_MODE_WAITING:
+ bulk_waiter = (struct bulk_waiter *)userdata;
+ bulk = bulk_waiter->bulk;
+ goto waiting;
+ default:
+ goto error_exit;
+ }
+
+ state = service->state;
+
+ queue = (dir == VCHIQ_BULK_TRANSMIT) ?
+ &service->bulk_tx : &service->bulk_rx;
+
+ if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
+ status = VCHIQ_RETRY;
+ goto error_exit;
+ }
+
+ if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
+ VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
+ do {
+ mutex_unlock(&service->bulk_mutex);
+ if (down_interruptible(&service->bulk_remove_event)
+ != 0) {
+ status = VCHIQ_RETRY;
+ goto error_exit;
+ }
+ if (mutex_lock_interruptible(&service->bulk_mutex)
+ != 0) {
+ status = VCHIQ_RETRY;
+ goto error_exit;
+ }
+ } while (queue->local_insert == queue->remove +
+ VCHIQ_NUM_SERVICE_BULKS);
+ }
+
+ bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
+
+ bulk->mode = mode;
+ bulk->dir = dir;
+ bulk->userdata = userdata;
+ bulk->size = size;
+ bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
+
+ if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
+ VCHIQ_SUCCESS)
+ goto unlock_error_exit;
+
+ wmb();
+
+ vchiq_log_info(vchiq_core_log_level,
+ "%d: bt (%d->%d) %cx %x@%x %x",
+ state->id,
+ service->localport, service->remoteport, dir_char,
+ size, (unsigned int)bulk->data, (unsigned int)userdata);
+
+ /* The slot mutex must be held when the service is being closed, so
+ claim it here to ensure that isn't happening */
+ if (mutex_lock_interruptible(&state->slot_mutex) != 0) {
+ status = VCHIQ_RETRY;
+ goto cancel_bulk_error_exit;
+ }
+
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
+ goto unlock_both_error_exit;
+
+ if (state->is_master) {
+ queue->local_insert++;
+ if (resolve_bulks(service, queue))
+ request_poll(state, service,
+ (dir == VCHIQ_BULK_TRANSMIT) ?
+ VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
+ } else {
+ int payload[2] = { (int)bulk->data, bulk->size };
+ VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
+
+ status = queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(dir_msgtype,
+ service->localport, service->remoteport),
+ &element, 1, sizeof(payload),
+ QMFLAGS_IS_BLOCKING |
+ QMFLAGS_NO_MUTEX_LOCK |
+ QMFLAGS_NO_MUTEX_UNLOCK);
+ if (status != VCHIQ_SUCCESS) {
+ goto unlock_both_error_exit;
+ }
+ queue->local_insert++;
+ }
+
+ mutex_unlock(&state->slot_mutex);
+ mutex_unlock(&service->bulk_mutex);
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%d: bt:%d %cx li=%x ri=%x p=%x",
+ state->id,
+ service->localport, dir_char,
+ queue->local_insert, queue->remote_insert, queue->process);
+
+waiting:
+ unlock_service(service);
+
+ status = VCHIQ_SUCCESS;
+
+ if (bulk_waiter) {
+ bulk_waiter->bulk = bulk;
+ if (down_interruptible(&bulk_waiter->event) != 0)
+ status = VCHIQ_RETRY;
+ else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ status = VCHIQ_ERROR;
+ }
+
+ return status;
+
+unlock_both_error_exit:
+ mutex_unlock(&state->slot_mutex);
+cancel_bulk_error_exit:
+ vchiq_complete_bulk(bulk);
+unlock_error_exit:
+ mutex_unlock(&service->bulk_mutex);
+
+error_exit:
+ if (service)
+ unlock_service(service);
+ return status;
+}
+
+VCHIQ_STATUS_T
+vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+ const VCHIQ_ELEMENT_T *elements, unsigned int count)
+{
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+
+ unsigned int size = 0;
+ unsigned int i;
+
+ if (!service ||
+ (vchiq_check_service(service) != VCHIQ_SUCCESS))
+ goto error_exit;
+
+ for (i = 0; i < (unsigned int)count; i++) {
+ if (elements[i].size) {
+ if (elements[i].data == NULL) {
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ goto error_exit;
+ }
+ size += elements[i].size;
+ }
+ }
+
+ if (size > VCHIQ_MAX_MSG_SIZE) {
+ VCHIQ_SERVICE_STATS_INC(service, error_count);
+ goto error_exit;
+ }
+
+ switch (service->srvstate) {
+ case VCHIQ_SRVSTATE_OPEN:
+ status = queue_message(service->state, service,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
+ service->localport,
+ service->remoteport),
+ elements, count, size, 1);
+ break;
+ case VCHIQ_SRVSTATE_OPENSYNC:
+ status = queue_message_sync(service->state, service,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
+ service->localport,
+ service->remoteport),
+ elements, count, size, 1);
+ break;
+ default:
+ status = VCHIQ_ERROR;
+ break;
+ }
+
+error_exit:
+ if (service)
+ unlock_service(service);
+
+ return status;
+}
+
+void
+vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
+{
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_SHARED_STATE_T *remote;
+ VCHIQ_STATE_T *state;
+ int slot_index;
+
+ if (!service)
+ return;
+
+ state = service->state;
+ remote = state->remote;
+
+ slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
+
+ if ((slot_index >= remote->slot_first) &&
+ (slot_index <= remote->slot_last)) {
+ int msgid = header->msgid;
+ if (msgid & VCHIQ_MSGID_CLAIMED) {
+ VCHIQ_SLOT_INFO_T *slot_info =
+ SLOT_INFO_FROM_INDEX(state, slot_index);
+
+ release_slot(state, slot_info, header, service);
+ }
+ } else if (slot_index == remote->slot_sync)
+ release_message_sync(state, header);
+
+ unlock_service(service);
+}
+
+static void
+release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
+{
+ header->msgid = VCHIQ_MSGID_PADDING;
+ wmb();
+ remote_event_signal(&state->remote->sync_release);
+}
+
+VCHIQ_STATUS_T
+vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
+{
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+
+ if (!service ||
+ (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
+ !peer_version)
+ goto exit;
+ *peer_version = service->peer_version;
+ status = VCHIQ_SUCCESS;
+
+exit:
+ if (service)
+ unlock_service(service);
+ return status;
+}
+
+VCHIQ_STATUS_T
+vchiq_get_config(VCHIQ_INSTANCE_T instance,
+ int config_size, VCHIQ_CONFIG_T *pconfig)
+{
+ VCHIQ_CONFIG_T config;
+
+ (void)instance;
+
+ config.max_msg_size = VCHIQ_MAX_MSG_SIZE;
+ config.bulk_threshold = VCHIQ_MAX_MSG_SIZE;
+ config.max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
+ config.max_services = VCHIQ_MAX_SERVICES;
+ config.version = VCHIQ_VERSION;
+ config.version_min = VCHIQ_VERSION_MIN;
+
+ if (config_size > sizeof(VCHIQ_CONFIG_T))
+ return VCHIQ_ERROR;
+
+ memcpy(pconfig, &config,
+ min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
+
+ return VCHIQ_SUCCESS;
+}
+
+VCHIQ_STATUS_T
+vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
+ VCHIQ_SERVICE_OPTION_T option, int value)
+{
+ VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+
+ if (service) {
+ switch (option) {
+ case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
+ service->auto_close = value;
+ status = VCHIQ_SUCCESS;
+ break;
+
+ case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &service->state->service_quotas[
+ service->localport];
+ if (value == 0)
+ value = service->state->default_slot_quota;
+ if ((value >= service_quota->slot_use_count) &&
+ (value < (unsigned short)~0)) {
+ service_quota->slot_quota = value;
+ if ((value >= service_quota->slot_use_count) &&
+ (service_quota->message_quota >=
+ service_quota->message_use_count)) {
+ /* Signal the service that it may have
+ ** dropped below its quota */
+ up(&service_quota->quota_event);
+ }
+ status = VCHIQ_SUCCESS;
+ }
+ } break;
+
+ case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &service->state->service_quotas[
+ service->localport];
+ if (value == 0)
+ value = service->state->default_message_quota;
+ if ((value >= service_quota->message_use_count) &&
+ (value < (unsigned short)~0)) {
+ service_quota->message_quota = value;
+ if ((value >=
+ service_quota->message_use_count) &&
+ (service_quota->slot_quota >=
+ service_quota->slot_use_count))
+ /* Signal the service that it may have
+ ** dropped below its quota */
+ up(&service_quota->quota_event);
+ status = VCHIQ_SUCCESS;
+ }
+ } break;
+
+ case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
+ if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
+ (service->srvstate ==
+ VCHIQ_SRVSTATE_LISTENING)) {
+ service->sync = value;
+ status = VCHIQ_SUCCESS;
+ }
+ break;
+
+ case VCHIQ_SERVICE_OPTION_TRACE:
+ service->trace = value;
+ status = VCHIQ_SUCCESS;
+ break;
+
+ default:
+ break;
+ }
+ unlock_service(service);
+ }
+
+ return status;
+}
+
+void
+vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
+ VCHIQ_SHARED_STATE_T *shared, const char *label)
+{
+ static const char *const debug_names[] = {
+ "<entries>",
+ "SLOT_HANDLER_COUNT",
+ "SLOT_HANDLER_LINE",
+ "PARSE_LINE",
+ "PARSE_HEADER",
+ "PARSE_MSGID",
+ "AWAIT_COMPLETION_LINE",
+ "DEQUEUE_MESSAGE_LINE",
+ "SERVICE_CALLBACK_LINE",
+ "MSG_QUEUE_FULL_COUNT",
+ "COMPLETION_QUEUE_FULL_COUNT"
+ };
+ int i;
+
+ char buf[80];
+ int len;
+ len = snprintf(buf, sizeof(buf),
+ " %s: slots %d-%d tx_pos=%x recycle=%x",
+ label, shared->slot_first, shared->slot_last,
+ shared->tx_pos, shared->slot_queue_recycle);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " Slots claimed:");
+ vchiq_dump(dump_context, buf, len + 1);
+
+ for (i = shared->slot_first; i <= shared->slot_last; i++) {
+ VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
+ if (slot_info.use_count != slot_info.release_count) {
+ len = snprintf(buf, sizeof(buf),
+ " %d: %d/%d", i, slot_info.use_count,
+ slot_info.release_count);
+ vchiq_dump(dump_context, buf, len + 1);
+ }
+ }
+
+ for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
+ len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
+ debug_names[i], shared->debug[i], shared->debug[i]);
+ vchiq_dump(dump_context, buf, len + 1);
+ }
+}
+
+void
+vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
+{
+ char buf[80];
+ int len;
+ int i;
+
+ len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
+ conn_state_names[state->conn_state]);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " tx_pos=%x(@%x), rx_pos=%x(@%x)",
+ state->local->tx_pos,
+ (uint32_t)state->tx_data +
+ (state->local_tx_pos & VCHIQ_SLOT_MASK),
+ state->rx_pos,
+ (uint32_t)state->rx_data +
+ (state->rx_pos & VCHIQ_SLOT_MASK));
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " Version: %d (min %d)",
+ VCHIQ_VERSION, VCHIQ_VERSION_MIN);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ if (VCHIQ_ENABLE_STATS) {
+ len = snprintf(buf, sizeof(buf),
+ " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
+ "error_count=%d",
+ state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
+ state->stats.error_count);
+ vchiq_dump(dump_context, buf, len + 1);
+ }
+
+ len = snprintf(buf, sizeof(buf),
+ " Slots: %d available (%d data), %d recyclable, %d stalls "
+ "(%d data)",
+ ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
+ state->local_tx_pos) / VCHIQ_SLOT_SIZE,
+ state->data_quota - state->data_use_count,
+ state->local->slot_queue_recycle - state->slot_queue_available,
+ state->stats.slot_stalls, state->stats.data_stalls);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ vchiq_dump_platform_state(dump_context);
+
+ vchiq_dump_shared_state(dump_context, state, state->local, "Local");
+ vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
+
+ vchiq_dump_platform_instances(dump_context);
+
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
+
+ if (service) {
+ vchiq_dump_service_state(dump_context, service);
+ unlock_service(service);
+ }
+ }
+}
+
+void
+vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
+{
+ char buf[80];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
+ service->localport, srvstate_names[service->srvstate],
+ service->ref_count - 1); /*Don't include the lock just taken*/
+
+ if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
+ char remoteport[30];
+ VCHIQ_SERVICE_QUOTA_T *service_quota =
+ &service->state->service_quotas[service->localport];
+ int fourcc = service->base.fourcc;
+ int tx_pending, rx_pending;
+ if (service->remoteport != VCHIQ_PORT_FREE) {
+ int len2 = snprintf(remoteport, sizeof(remoteport),
+ "%d", service->remoteport);
+ if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
+ snprintf(remoteport + len2,
+ sizeof(remoteport) - len2,
+ " (client %x)", service->client_id);
+ } else
+ strcpy(remoteport, "n/a");
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
+ VCHIQ_FOURCC_AS_4CHARS(fourcc),
+ remoteport,
+ service_quota->message_use_count,
+ service_quota->message_quota,
+ service_quota->slot_use_count,
+ service_quota->slot_quota);
+
+ vchiq_dump(dump_context, buf, len + 1);
+
+ tx_pending = service->bulk_tx.local_insert -
+ service->bulk_tx.remote_insert;
+
+ rx_pending = service->bulk_rx.local_insert -
+ service->bulk_rx.remote_insert;
+
+ len = snprintf(buf, sizeof(buf),
+ " Bulk: tx_pending=%d (size %d),"
+ " rx_pending=%d (size %d)",
+ tx_pending,
+ tx_pending ? service->bulk_tx.bulks[
+ BULK_INDEX(service->bulk_tx.remove)].size : 0,
+ rx_pending,
+ rx_pending ? service->bulk_rx.bulks[
+ BULK_INDEX(service->bulk_rx.remove)].size : 0);
+
+ if (VCHIQ_ENABLE_STATS) {
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " Ctrl: tx_count=%d, tx_bytes=%llu, "
+ "rx_count=%d, rx_bytes=%llu",
+ service->stats.ctrl_tx_count,
+ service->stats.ctrl_tx_bytes,
+ service->stats.ctrl_rx_count,
+ service->stats.ctrl_rx_bytes);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " Bulk: tx_count=%d, tx_bytes=%llu, "
+ "rx_count=%d, rx_bytes=%llu",
+ service->stats.bulk_tx_count,
+ service->stats.bulk_tx_bytes,
+ service->stats.bulk_rx_count,
+ service->stats.bulk_rx_bytes);
+ vchiq_dump(dump_context, buf, len + 1);
+
+ len = snprintf(buf, sizeof(buf),
+ " %d quota stalls, %d slot stalls, "
+ "%d bulk stalls, %d aborted, %d errors",
+ service->stats.quota_stalls,
+ service->stats.slot_stalls,
+ service->stats.bulk_stalls,
+ service->stats.bulk_aborted_count,
+ service->stats.error_count);
+ }
+ }
+
+ vchiq_dump(dump_context, buf, len + 1);
+
+ if (service->srvstate != VCHIQ_SRVSTATE_FREE)
+ vchiq_dump_platform_service_state(dump_context, service);
+}
+
+
+void
+vchiq_loud_error_header(void)
+{
+ vchiq_log_error(vchiq_core_log_level,
+ "============================================================"
+ "================");
+ vchiq_log_error(vchiq_core_log_level,
+ "============================================================"
+ "================");
+ vchiq_log_error(vchiq_core_log_level, "=====");
+}
+
+void
+vchiq_loud_error_footer(void)
+{
+ vchiq_log_error(vchiq_core_log_level, "=====");
+ vchiq_log_error(vchiq_core_log_level,
+ "============================================================"
+ "================");
+ vchiq_log_error(vchiq_core_log_level,
+ "============================================================"
+ "================");
+}
+
+
+VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
+ status = queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
+ NULL, 0, 0, 0);
+ return status;
+}
+
+VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
+ status = queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
+ NULL, 0, 0, 0);
+ return status;
+}
+
+VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
+{
+ VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
+ status = queue_message(state, NULL,
+ VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
+ NULL, 0, 0, 0);
+ return status;
+}
+
+void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
+ size_t numBytes)
+{
+ const uint8_t *mem = (const uint8_t *)voidMem;
+ size_t offset;
+ char lineBuf[100];
+ char *s;
+
+ while (numBytes > 0) {
+ s = lineBuf;
+
+ for (offset = 0; offset < 16; offset++) {
+ if (offset < numBytes)
+ s += snprintf(s, 4, "%02x ", mem[offset]);
+ else
+ s += snprintf(s, 4, " ");
+ }
+
+ for (offset = 0; offset < 16; offset++) {
+ if (offset < numBytes) {
+ uint8_t ch = mem[offset];
+
+ if ((ch < ' ') || (ch > '~'))
+ ch = '.';
+ *s++ = (char)ch;
+ }
+ }
+ *s++ = '\0';
+
+ if ((label != NULL) && (*label != '\0'))
+ vchiq_log_trace(VCHIQ_LOG_TRACE,
+ "%s: %08x: %s", label, addr, lineBuf);
+ else
+ vchiq_log_trace(VCHIQ_LOG_TRACE,
+ "%08x: %s", addr, lineBuf);
+
+ addr += 16;
+ mem += 16;
+ if (numBytes > 16)
+ numBytes -= 16;
+ else
+ numBytes = 0;
+ }
+}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
new file mode 100644
index 000000000000..9be484c776d0
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -0,0 +1,712 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_CORE_H
+#define VCHIQ_CORE_H
+
+#include <linux/mutex.h>
+#include <linux/semaphore.h>
+#include <linux/kthread.h>
+
+#include "vchiq_cfg.h"
+
+#include "vchiq.h"
+
+/* Run time control of log level, based on KERN_XXX level. */
+#define VCHIQ_LOG_DEFAULT 4
+#define VCHIQ_LOG_ERROR 3
+#define VCHIQ_LOG_WARNING 4
+#define VCHIQ_LOG_INFO 6
+#define VCHIQ_LOG_TRACE 7
+
+#define VCHIQ_LOG_PREFIX KERN_INFO "vchiq: "
+
+#ifndef vchiq_log_error
+#define vchiq_log_error(cat, fmt, ...) \
+ do { if (cat >= VCHIQ_LOG_ERROR) \
+ printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#endif
+#ifndef vchiq_log_warning
+#define vchiq_log_warning(cat, fmt, ...) \
+ do { if (cat >= VCHIQ_LOG_WARNING) \
+ printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#endif
+#ifndef vchiq_log_info
+#define vchiq_log_info(cat, fmt, ...) \
+ do { if (cat >= VCHIQ_LOG_INFO) \
+ printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#endif
+#ifndef vchiq_log_trace
+#define vchiq_log_trace(cat, fmt, ...) \
+ do { if (cat >= VCHIQ_LOG_TRACE) \
+ printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
+#endif
+
+#define vchiq_loud_error(...) \
+ vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
+
+#ifndef vchiq_static_assert
+#define vchiq_static_assert(cond) __attribute__((unused)) \
+ extern int vchiq_static_assert[(cond) ? 1 : -1]
+#endif
+
+#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
+
+/* Ensure that the slot size and maximum number of slots are powers of 2 */
+vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
+vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
+vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
+
+#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
+#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
+#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
+ VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
+
+#define VCHIQ_MSG_PADDING 0 /* - */
+#define VCHIQ_MSG_CONNECT 1 /* - */
+#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
+#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
+#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
+#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
+#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
+#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
+#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
+#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
+#define VCHIQ_MSG_PAUSE 10 /* - */
+#define VCHIQ_MSG_RESUME 11 /* - */
+#define VCHIQ_MSG_REMOTE_USE 12 /* - */
+#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
+#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
+
+#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
+#define VCHIQ_PORT_FREE 0x1000
+#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
+#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
+ ((type<<24) | (srcport<<12) | (dstport<<0))
+#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
+#define VCHIQ_MSG_SRCPORT(msgid) \
+ (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
+#define VCHIQ_MSG_DSTPORT(msgid) \
+ ((unsigned short)msgid & 0xfff)
+
+#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
+ ((fourcc) >> 24) & 0xff, \
+ ((fourcc) >> 16) & 0xff, \
+ ((fourcc) >> 8) & 0xff, \
+ (fourcc) & 0xff
+
+/* Ensure the fields are wide enough */
+vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
+ == 0);
+vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
+vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
+ (unsigned int)VCHIQ_PORT_FREE);
+
+#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
+#define VCHIQ_MSGID_CLAIMED 0x40000000
+
+#define VCHIQ_FOURCC_INVALID 0x00000000
+#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
+
+#define VCHIQ_BULK_ACTUAL_ABORTED -1
+
+typedef uint32_t BITSET_T;
+
+vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
+
+#define BITSET_SIZE(b) ((b + 31) >> 5)
+#define BITSET_WORD(b) (b >> 5)
+#define BITSET_BIT(b) (1 << (b & 31))
+#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
+#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
+#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
+#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
+
+#if VCHIQ_ENABLE_STATS
+#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
+#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
+#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
+ (service->stats. stat += addend)
+#else
+#define VCHIQ_STATS_INC(state, stat) ((void)0)
+#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
+#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
+#endif
+
+enum {
+ DEBUG_ENTRIES,
+#if VCHIQ_ENABLE_DEBUG
+ DEBUG_SLOT_HANDLER_COUNT,
+ DEBUG_SLOT_HANDLER_LINE,
+ DEBUG_PARSE_LINE,
+ DEBUG_PARSE_HEADER,
+ DEBUG_PARSE_MSGID,
+ DEBUG_AWAIT_COMPLETION_LINE,
+ DEBUG_DEQUEUE_MESSAGE_LINE,
+ DEBUG_SERVICE_CALLBACK_LINE,
+ DEBUG_MSG_QUEUE_FULL_COUNT,
+ DEBUG_COMPLETION_QUEUE_FULL_COUNT,
+#endif
+ DEBUG_MAX
+};
+
+#if VCHIQ_ENABLE_DEBUG
+
+#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
+#define DEBUG_TRACE(d) \
+ do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
+#define DEBUG_VALUE(d, v) \
+ do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
+#define DEBUG_COUNT(d) \
+ do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
+
+#else /* VCHIQ_ENABLE_DEBUG */
+
+#define DEBUG_INITIALISE(local)
+#define DEBUG_TRACE(d)
+#define DEBUG_VALUE(d, v)
+#define DEBUG_COUNT(d)
+
+#endif /* VCHIQ_ENABLE_DEBUG */
+
+typedef enum {
+ VCHIQ_CONNSTATE_DISCONNECTED,
+ VCHIQ_CONNSTATE_CONNECTING,
+ VCHIQ_CONNSTATE_CONNECTED,
+ VCHIQ_CONNSTATE_PAUSING,
+ VCHIQ_CONNSTATE_PAUSE_SENT,
+ VCHIQ_CONNSTATE_PAUSED,
+ VCHIQ_CONNSTATE_RESUMING,
+ VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
+ VCHIQ_CONNSTATE_RESUME_TIMEOUT
+} VCHIQ_CONNSTATE_T;
+
+enum {
+ VCHIQ_SRVSTATE_FREE,
+ VCHIQ_SRVSTATE_HIDDEN,
+ VCHIQ_SRVSTATE_LISTENING,
+ VCHIQ_SRVSTATE_OPENING,
+ VCHIQ_SRVSTATE_OPEN,
+ VCHIQ_SRVSTATE_OPENSYNC,
+ VCHIQ_SRVSTATE_CLOSESENT,
+ VCHIQ_SRVSTATE_CLOSERECVD,
+ VCHIQ_SRVSTATE_CLOSEWAIT,
+ VCHIQ_SRVSTATE_CLOSED
+};
+
+enum {
+ VCHIQ_POLL_TERMINATE,
+ VCHIQ_POLL_REMOVE,
+ VCHIQ_POLL_TXNOTIFY,
+ VCHIQ_POLL_RXNOTIFY,
+ VCHIQ_POLL_COUNT
+};
+
+typedef enum {
+ VCHIQ_BULK_TRANSMIT,
+ VCHIQ_BULK_RECEIVE
+} VCHIQ_BULK_DIR_T;
+
+typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
+
+typedef struct vchiq_bulk_struct {
+ short mode;
+ short dir;
+ void *userdata;
+ VCHI_MEM_HANDLE_T handle;
+ void *data;
+ int size;
+ void *remote_data;
+ int remote_size;
+ int actual;
+} VCHIQ_BULK_T;
+
+typedef struct vchiq_bulk_queue_struct {
+ int local_insert; /* Where to insert the next local bulk */
+ int remote_insert; /* Where to insert the next remote bulk (master) */
+ int process; /* Bulk to transfer next */
+ int remote_notify; /* Bulk to notify the remote client of next (mstr) */
+ int remove; /* Bulk to notify the local client of, and remove,
+ ** next */
+ VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
+} VCHIQ_BULK_QUEUE_T;
+
+typedef struct remote_event_struct {
+ int armed;
+ int fired;
+ struct semaphore *event;
+} REMOTE_EVENT_T;
+
+typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
+
+typedef struct vchiq_state_struct VCHIQ_STATE_T;
+
+typedef struct vchiq_slot_struct {
+ char data[VCHIQ_SLOT_SIZE];
+} VCHIQ_SLOT_T;
+
+typedef struct vchiq_slot_info_struct {
+ /* Use two counters rather than one to avoid the need for a mutex. */
+ short use_count;
+ short release_count;
+} VCHIQ_SLOT_INFO_T;
+
+typedef struct vchiq_service_struct {
+ VCHIQ_SERVICE_BASE_T base;
+ VCHIQ_SERVICE_HANDLE_T handle;
+ unsigned int ref_count;
+ int srvstate;
+ VCHIQ_USERDATA_TERM_T userdata_term;
+ unsigned int localport;
+ unsigned int remoteport;
+ int public_fourcc;
+ int client_id;
+ char auto_close;
+ char sync;
+ char closing;
+ char trace;
+ atomic_t poll_flags;
+ short version;
+ short version_min;
+ short peer_version;
+
+ VCHIQ_STATE_T *state;
+ VCHIQ_INSTANCE_T instance;
+
+ int service_use_count;
+
+ VCHIQ_BULK_QUEUE_T bulk_tx;
+ VCHIQ_BULK_QUEUE_T bulk_rx;
+
+ struct semaphore remove_event;
+ struct semaphore bulk_remove_event;
+ struct mutex bulk_mutex;
+
+ struct service_stats_struct {
+ int quota_stalls;
+ int slot_stalls;
+ int bulk_stalls;
+ int error_count;
+ int ctrl_tx_count;
+ int ctrl_rx_count;
+ int bulk_tx_count;
+ int bulk_rx_count;
+ int bulk_aborted_count;
+ uint64_t ctrl_tx_bytes;
+ uint64_t ctrl_rx_bytes;
+ uint64_t bulk_tx_bytes;
+ uint64_t bulk_rx_bytes;
+ } stats;
+} VCHIQ_SERVICE_T;
+
+/* The quota information is outside VCHIQ_SERVICE_T so that it can be
+ statically allocated, since for accounting reasons a service's slot
+ usage is carried over between users of the same port number.
+ */
+typedef struct vchiq_service_quota_struct {
+ unsigned short slot_quota;
+ unsigned short slot_use_count;
+ unsigned short message_quota;
+ unsigned short message_use_count;
+ struct semaphore quota_event;
+ int previous_tx_index;
+} VCHIQ_SERVICE_QUOTA_T;
+
+typedef struct vchiq_shared_state_struct {
+
+ /* A non-zero value here indicates that the content is valid. */
+ int initialised;
+
+ /* The first and last (inclusive) slots allocated to the owner. */
+ int slot_first;
+ int slot_last;
+
+ /* The slot allocated to synchronous messages from the owner. */
+ int slot_sync;
+
+ /* Signalling this event indicates that owner's slot handler thread
+ ** should run. */
+ REMOTE_EVENT_T trigger;
+
+ /* Indicates the byte position within the stream where the next message
+ ** will be written. The least significant bits are an index into the
+ ** slot. The next bits are the index of the slot in slot_queue. */
+ int tx_pos;
+
+ /* This event should be signalled when a slot is recycled. */
+ REMOTE_EVENT_T recycle;
+
+ /* The slot_queue index where the next recycled slot will be written. */
+ int slot_queue_recycle;
+
+ /* This event should be signalled when a synchronous message is sent. */
+ REMOTE_EVENT_T sync_trigger;
+
+ /* This event should be signalled when a synchronous message has been
+ ** released. */
+ REMOTE_EVENT_T sync_release;
+
+ /* A circular buffer of slot indexes. */
+ int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
+
+ /* Debugging state */
+ int debug[DEBUG_MAX];
+} VCHIQ_SHARED_STATE_T;
+
+typedef struct vchiq_slot_zero_struct {
+ int magic;
+ short version;
+ short version_min;
+ int slot_zero_size;
+ int slot_size;
+ int max_slots;
+ int max_slots_per_side;
+ int platform_data[2];
+ VCHIQ_SHARED_STATE_T master;
+ VCHIQ_SHARED_STATE_T slave;
+ VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
+} VCHIQ_SLOT_ZERO_T;
+
+struct vchiq_state_struct {
+ int id;
+ int initialised;
+ VCHIQ_CONNSTATE_T conn_state;
+ int is_master;
+ short version_common;
+
+ VCHIQ_SHARED_STATE_T *local;
+ VCHIQ_SHARED_STATE_T *remote;
+ VCHIQ_SLOT_T *slot_data;
+
+ unsigned short default_slot_quota;
+ unsigned short default_message_quota;
+
+ /* Event indicating connect message received */
+ struct semaphore connect;
+
+ /* Mutex protecting services */
+ struct mutex mutex;
+ VCHIQ_INSTANCE_T *instance;
+
+ /* Processes incoming messages */
+ struct task_struct *slot_handler_thread;
+
+ /* Processes recycled slots */
+ struct task_struct *recycle_thread;
+
+ /* Processes synchronous messages */
+ struct task_struct *sync_thread;
+
+ /* Local implementation of the trigger remote event */
+ struct semaphore trigger_event;
+
+ /* Local implementation of the recycle remote event */
+ struct semaphore recycle_event;
+
+ /* Local implementation of the sync trigger remote event */
+ struct semaphore sync_trigger_event;
+
+ /* Local implementation of the sync release remote event */
+ struct semaphore sync_release_event;
+
+ char *tx_data;
+ char *rx_data;
+ VCHIQ_SLOT_INFO_T *rx_info;
+
+ struct mutex slot_mutex;
+
+ struct mutex recycle_mutex;
+
+ struct mutex sync_mutex;
+
+ struct mutex bulk_transfer_mutex;
+
+ /* Indicates the byte position within the stream from where the next
+ ** message will be read. The least significant bits are an index into
+ ** the slot.The next bits are the index of the slot in
+ ** remote->slot_queue. */
+ int rx_pos;
+
+ /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
+ from remote->tx_pos. */
+ int local_tx_pos;
+
+ /* The slot_queue index of the slot to become available next. */
+ int slot_queue_available;
+
+ /* A flag to indicate if any poll has been requested */
+ int poll_needed;
+
+ /* Ths index of the previous slot used for data messages. */
+ int previous_data_index;
+
+ /* The number of slots occupied by data messages. */
+ unsigned short data_use_count;
+
+ /* The maximum number of slots to be occupied by data messages. */
+ unsigned short data_quota;
+
+ /* An array of bit sets indicating which services must be polled. */
+ atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
+
+ /* The number of the first unused service */
+ int unused_service;
+
+ /* Signalled when a free slot becomes available. */
+ struct semaphore slot_available_event;
+
+ struct semaphore slot_remove_event;
+
+ /* Signalled when a free data slot becomes available. */
+ struct semaphore data_quota_event;
+
+ /* Incremented when there are bulk transfers which cannot be processed
+ * whilst paused and must be processed on resume */
+ int deferred_bulks;
+
+ struct state_stats_struct {
+ int slot_stalls;
+ int data_stalls;
+ int ctrl_tx_count;
+ int ctrl_rx_count;
+ int error_count;
+ } stats;
+
+ VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
+ VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
+ VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
+
+ VCHIQ_PLATFORM_STATE_T platform_state;
+};
+
+struct bulk_waiter {
+ VCHIQ_BULK_T *bulk;
+ struct semaphore event;
+ int actual;
+};
+
+extern spinlock_t bulk_waiter_spinlock;
+
+extern int vchiq_core_log_level;
+extern int vchiq_core_msg_log_level;
+extern int vchiq_sync_log_level;
+
+extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
+
+extern const char *
+get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
+
+extern VCHIQ_SLOT_ZERO_T *
+vchiq_init_slots(void *mem_base, int mem_size);
+
+extern VCHIQ_STATUS_T
+vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
+ int is_master);
+
+extern VCHIQ_STATUS_T
+vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
+
+extern VCHIQ_SERVICE_T *
+vchiq_add_service_internal(VCHIQ_STATE_T *state,
+ const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
+ VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term);
+
+extern VCHIQ_STATUS_T
+vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
+
+extern VCHIQ_STATUS_T
+vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
+
+extern void
+vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
+
+extern void
+vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
+
+extern VCHIQ_STATUS_T
+vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
+
+extern VCHIQ_STATUS_T
+vchiq_pause_internal(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_resume_internal(VCHIQ_STATE_T *state);
+
+extern void
+remote_event_pollall(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
+ VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
+ VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
+
+extern void
+vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
+
+extern void
+vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
+
+extern void
+vchiq_loud_error_header(void);
+
+extern void
+vchiq_loud_error_footer(void);
+
+extern void
+request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
+
+static inline VCHIQ_SERVICE_T *
+handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
+{
+ VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
+ (VCHIQ_MAX_STATES - 1)];
+ if (!state)
+ return NULL;
+
+ return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
+}
+
+extern VCHIQ_SERVICE_T *
+find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
+
+extern VCHIQ_SERVICE_T *
+find_service_by_port(VCHIQ_STATE_T *state, int localport);
+
+extern VCHIQ_SERVICE_T *
+find_service_for_instance(VCHIQ_INSTANCE_T instance,
+ VCHIQ_SERVICE_HANDLE_T handle);
+
+extern VCHIQ_SERVICE_T *
+find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
+ VCHIQ_SERVICE_HANDLE_T handle);
+
+extern VCHIQ_SERVICE_T *
+next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
+ int *pidx);
+
+extern void
+lock_service(VCHIQ_SERVICE_T *service);
+
+extern void
+unlock_service(VCHIQ_SERVICE_T *service);
+
+/* The following functions are called from vchiq_core, and external
+** implementations must be provided. */
+
+extern VCHIQ_STATUS_T
+vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
+ VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
+
+extern void
+vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
+
+extern void
+vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
+
+extern VCHIQ_STATUS_T
+vchiq_copy_from_user(void *dst, const void *src, int size);
+
+extern void
+remote_event_signal(REMOTE_EVENT_T *event);
+
+void
+vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_platform_paused(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_platform_resume(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_platform_resumed(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_dump(void *dump_context, const char *str, int len);
+
+extern void
+vchiq_dump_platform_state(void *dump_context);
+
+extern void
+vchiq_dump_platform_instances(void *dump_context);
+
+extern void
+vchiq_dump_platform_service_state(void *dump_context,
+ VCHIQ_SERVICE_T *service);
+
+extern VCHIQ_STATUS_T
+vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
+
+extern VCHIQ_STATUS_T
+vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
+
+extern void
+vchiq_on_remote_use(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_on_remote_release(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_platform_init_state(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_check_service(VCHIQ_SERVICE_T *service);
+
+extern void
+vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_send_remote_use(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_send_remote_release(VCHIQ_STATE_T *state);
+
+extern VCHIQ_STATUS_T
+vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
+ VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
+
+extern void
+vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
+
+extern void
+vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
+
+
+extern void
+vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
+ size_t numBytes);
+
+#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
new file mode 100644
index 000000000000..7e032130d967
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -0,0 +1,383 @@
+/**
+ * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <linux/debugfs.h>
+#include "vchiq_core.h"
+#include "vchiq_arm.h"
+#include "vchiq_debugfs.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+/****************************************************************************
+*
+* log category entries
+*
+***************************************************************************/
+#define DEBUGFS_WRITE_BUF_SIZE 256
+
+#define VCHIQ_LOG_ERROR_STR "error"
+#define VCHIQ_LOG_WARNING_STR "warning"
+#define VCHIQ_LOG_INFO_STR "info"
+#define VCHIQ_LOG_TRACE_STR "trace"
+
+
+/* Top-level debug info */
+struct vchiq_debugfs_info {
+ /* Global 'vchiq' debugfs entry used by all instances */
+ struct dentry *vchiq_cfg_dir;
+
+ /* one entry per client process */
+ struct dentry *clients;
+
+ /* log categories */
+ struct dentry *log_categories;
+};
+
+static struct vchiq_debugfs_info debugfs_info;
+
+/* Log category debugfs entries */
+struct vchiq_debugfs_log_entry {
+ const char *name;
+ int *plevel;
+ struct dentry *dir;
+};
+
+static struct vchiq_debugfs_log_entry vchiq_debugfs_log_entries[] = {
+ { "core", &vchiq_core_log_level },
+ { "msg", &vchiq_core_msg_log_level },
+ { "sync", &vchiq_sync_log_level },
+ { "susp", &vchiq_susp_log_level },
+ { "arm", &vchiq_arm_log_level },
+};
+static int n_log_entries =
+ sizeof(vchiq_debugfs_log_entries)/sizeof(vchiq_debugfs_log_entries[0]);
+
+
+static struct dentry *vchiq_clients_top(void);
+static struct dentry *vchiq_debugfs_top(void);
+
+static int debugfs_log_show(struct seq_file *f, void *offset)
+{
+ int *levp = f->private;
+ char *log_value = NULL;
+
+ switch (*levp) {
+ case VCHIQ_LOG_ERROR:
+ log_value = VCHIQ_LOG_ERROR_STR;
+ break;
+ case VCHIQ_LOG_WARNING:
+ log_value = VCHIQ_LOG_WARNING_STR;
+ break;
+ case VCHIQ_LOG_INFO:
+ log_value = VCHIQ_LOG_INFO_STR;
+ break;
+ case VCHIQ_LOG_TRACE:
+ log_value = VCHIQ_LOG_TRACE_STR;
+ break;
+ default:
+ break;
+ }
+
+ seq_printf(f, "%s\n", log_value ? log_value : "(null)");
+
+ return 0;
+}
+
+static int debugfs_log_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_log_show, inode->i_private);
+}
+
+static int debugfs_log_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *f = (struct seq_file *)file->private_data;
+ int *levp = f->private;
+ char kbuf[DEBUGFS_WRITE_BUF_SIZE + 1];
+
+ memset(kbuf, 0, DEBUGFS_WRITE_BUF_SIZE + 1);
+ if (count >= DEBUGFS_WRITE_BUF_SIZE)
+ count = DEBUGFS_WRITE_BUF_SIZE;
+
+ if (copy_from_user(kbuf, buffer, count) != 0)
+ return -EFAULT;
+ kbuf[count - 1] = 0;
+
+ if (strncmp("error", kbuf, strlen("error")) == 0)
+ *levp = VCHIQ_LOG_ERROR;
+ else if (strncmp("warning", kbuf, strlen("warning")) == 0)
+ *levp = VCHIQ_LOG_WARNING;
+ else if (strncmp("info", kbuf, strlen("info")) == 0)
+ *levp = VCHIQ_LOG_INFO;
+ else if (strncmp("trace", kbuf, strlen("trace")) == 0)
+ *levp = VCHIQ_LOG_TRACE;
+ else
+ *levp = VCHIQ_LOG_DEFAULT;
+
+ *ppos += count;
+
+ return count;
+}
+
+static const struct file_operations debugfs_log_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_log_open,
+ .write = debugfs_log_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* create an entry under <debugfs>/vchiq/log for each log category */
+static int vchiq_debugfs_create_log_entries(struct dentry *top)
+{
+ struct dentry *dir;
+ size_t i;
+ int ret = 0;
+ dir = debugfs_create_dir("log", vchiq_debugfs_top());
+ if (!dir)
+ return -ENOMEM;
+ debugfs_info.log_categories = dir;
+
+ for (i = 0; i < n_log_entries; i++) {
+ void *levp = (void *)vchiq_debugfs_log_entries[i].plevel;
+ dir = debugfs_create_file(vchiq_debugfs_log_entries[i].name,
+ 0644,
+ debugfs_info.log_categories,
+ levp,
+ &debugfs_log_fops);
+ if (!dir) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ vchiq_debugfs_log_entries[i].dir = dir;
+ }
+ return ret;
+}
+
+static int debugfs_usecount_show(struct seq_file *f, void *offset)
+{
+ VCHIQ_INSTANCE_T instance = f->private;
+ int use_count;
+
+ use_count = vchiq_instance_get_use_count(instance);
+ seq_printf(f, "%d\n", use_count);
+
+ return 0;
+}
+
+static int debugfs_usecount_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_usecount_show, inode->i_private);
+}
+
+static const struct file_operations debugfs_usecount_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_usecount_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int debugfs_trace_show(struct seq_file *f, void *offset)
+{
+ VCHIQ_INSTANCE_T instance = f->private;
+ int trace;
+
+ trace = vchiq_instance_get_trace(instance);
+ seq_printf(f, "%s\n", trace ? "Y" : "N");
+
+ return 0;
+}
+
+static int debugfs_trace_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_trace_show, inode->i_private);
+}
+
+static int debugfs_trace_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *f = (struct seq_file *)file->private_data;
+ VCHIQ_INSTANCE_T instance = f->private;
+ char firstchar;
+
+ if (copy_from_user(&firstchar, buffer, 1) != 0)
+ return -EFAULT;
+
+ switch (firstchar) {
+ case 'Y':
+ case 'y':
+ case '1':
+ vchiq_instance_set_trace(instance, 1);
+ break;
+ case 'N':
+ case 'n':
+ case '0':
+ vchiq_instance_set_trace(instance, 0);
+ break;
+ default:
+ break;
+ }
+
+ *ppos += count;
+
+ return count;
+}
+
+static const struct file_operations debugfs_trace_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_trace_open,
+ .write = debugfs_trace_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* add an instance (process) to the debugfs entries */
+int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
+{
+ char pidstr[16];
+ struct dentry *top, *use_count, *trace;
+ struct dentry *clients = vchiq_clients_top();
+
+ snprintf(pidstr, sizeof(pidstr), "%d",
+ vchiq_instance_get_pid(instance));
+
+ top = debugfs_create_dir(pidstr, clients);
+ if (!top)
+ goto fail_top;
+
+ use_count = debugfs_create_file("use_count",
+ 0444, top,
+ instance,
+ &debugfs_usecount_fops);
+ if (!use_count)
+ goto fail_use_count;
+
+ trace = debugfs_create_file("trace",
+ 0644, top,
+ instance,
+ &debugfs_trace_fops);
+ if (!trace)
+ goto fail_trace;
+
+ vchiq_instance_get_debugfs_node(instance)->dentry = top;
+
+ return 0;
+
+fail_trace:
+ debugfs_remove(use_count);
+fail_use_count:
+ debugfs_remove(top);
+fail_top:
+ return -ENOMEM;
+}
+
+void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
+{
+ VCHIQ_DEBUGFS_NODE_T *node = vchiq_instance_get_debugfs_node(instance);
+ debugfs_remove_recursive(node->dentry);
+}
+
+
+int vchiq_debugfs_init(void)
+{
+ BUG_ON(debugfs_info.vchiq_cfg_dir != NULL);
+
+ debugfs_info.vchiq_cfg_dir = debugfs_create_dir("vchiq", NULL);
+ if (debugfs_info.vchiq_cfg_dir == NULL)
+ goto fail;
+
+ debugfs_info.clients = debugfs_create_dir("clients",
+ vchiq_debugfs_top());
+ if (!debugfs_info.clients)
+ goto fail;
+
+ if (vchiq_debugfs_create_log_entries(vchiq_debugfs_top()) != 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ vchiq_debugfs_deinit();
+ vchiq_log_error(vchiq_arm_log_level,
+ "%s: failed to create debugfs directory",
+ __func__);
+
+ return -ENOMEM;
+}
+
+/* remove all the debugfs entries */
+void vchiq_debugfs_deinit(void)
+{
+ debugfs_remove_recursive(vchiq_debugfs_top());
+}
+
+static struct dentry *vchiq_clients_top(void)
+{
+ return debugfs_info.clients;
+}
+
+static struct dentry *vchiq_debugfs_top(void)
+{
+ BUG_ON(debugfs_info.vchiq_cfg_dir == NULL);
+ return debugfs_info.vchiq_cfg_dir;
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+int vchiq_debugfs_init(void)
+{
+ return 0;
+}
+
+void vchiq_debugfs_deinit(void)
+{
+}
+
+int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
+{
+ return 0;
+}
+
+void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
new file mode 100644
index 000000000000..4d6a3788e9c5
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
@@ -0,0 +1,52 @@
+/**
+ * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_DEBUGFS_H
+#define VCHIQ_DEBUGFS_H
+
+#include "vchiq_core.h"
+
+typedef struct vchiq_debugfs_node_struct
+{
+ struct dentry *dentry;
+} VCHIQ_DEBUGFS_NODE_T;
+
+int vchiq_debugfs_init(void);
+
+void vchiq_debugfs_deinit(void);
+
+int vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance);
+
+void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance);
+
+#endif /* VCHIQ_DEBUGFS_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_genversion b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_genversion
new file mode 100644
index 000000000000..9f5b6344b9b7
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_genversion
@@ -0,0 +1,87 @@
+#!/usr/bin/perl -w
+
+use strict;
+
+#
+# Generate a version from available information
+#
+
+my $prefix = shift @ARGV;
+my $root = shift @ARGV;
+
+
+if ( not defined $root ) {
+ die "usage: $0 prefix root-dir\n";
+}
+
+if ( ! -d $root ) {
+ die "root directory $root not found\n";
+}
+
+my $version = "unknown";
+my $tainted = "";
+
+if ( -d "$root/.git" ) {
+ # attempt to work out git version. only do so
+ # on a linux build host, as cygwin builds are
+ # already slow enough
+
+ if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
+ if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
+ $version = "no git version";
+ }
+ else {
+ $version = <F>;
+ $version =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
+ $version =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
+ }
+
+ if (open(G, "git --git-dir $root/.git status --porcelain|")) {
+ $tainted = <G>;
+ $tainted =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
+ $tainted =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
+ if (length $tainted) {
+ $version = join ' ', $version, "(tainted)";
+ }
+ else {
+ $version = join ' ', $version, "(clean)";
+ }
+ }
+ }
+}
+
+my $hostname = `hostname`;
+$hostname =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
+$hostname =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
+
+
+print STDERR "Version $version\n";
+print <<EOF;
+#include "${prefix}_build_info.h"
+#include <linux/broadcom/vc_debug_sym.h>
+
+VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
+VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
+VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time, __TIME__ );
+VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date, __DATE__ );
+
+const char *vchiq_get_build_hostname( void )
+{
+ return vchiq_build_hostname;
+}
+
+const char *vchiq_get_build_version( void )
+{
+ return vchiq_build_version;
+}
+
+const char *vchiq_get_build_date( void )
+{
+ return vchiq_build_date;
+}
+
+const char *vchiq_get_build_time( void )
+{
+ return vchiq_build_time;
+}
+EOF
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
new file mode 100644
index 000000000000..8067bbe7ce8d
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -0,0 +1,189 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_IF_H
+#define VCHIQ_IF_H
+
+#include "interface/vchi/vchi_mh.h"
+
+#define VCHIQ_SERVICE_HANDLE_INVALID 0
+
+#define VCHIQ_SLOT_SIZE 4096
+#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
+#define VCHIQ_CHANNEL_SIZE VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
+
+#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
+ (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
+#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
+#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
+
+typedef enum {
+ VCHIQ_SERVICE_OPENED, /* service, -, - */
+ VCHIQ_SERVICE_CLOSED, /* service, -, - */
+ VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
+ VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
+ VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
+ VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
+ VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
+} VCHIQ_REASON_T;
+
+typedef enum {
+ VCHIQ_ERROR = -1,
+ VCHIQ_SUCCESS = 0,
+ VCHIQ_RETRY = 1
+} VCHIQ_STATUS_T;
+
+typedef enum {
+ VCHIQ_BULK_MODE_CALLBACK,
+ VCHIQ_BULK_MODE_BLOCKING,
+ VCHIQ_BULK_MODE_NOCALLBACK,
+ VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
+} VCHIQ_BULK_MODE_T;
+
+typedef enum {
+ VCHIQ_SERVICE_OPTION_AUTOCLOSE,
+ VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
+ VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
+ VCHIQ_SERVICE_OPTION_SYNCHRONOUS,
+ VCHIQ_SERVICE_OPTION_TRACE
+} VCHIQ_SERVICE_OPTION_T;
+
+typedef struct vchiq_header_struct {
+ /* The message identifier - opaque to applications. */
+ int msgid;
+
+ /* Size of message data. */
+ unsigned int size;
+
+ char data[0]; /* message */
+} VCHIQ_HEADER_T;
+
+typedef struct {
+ const void *data;
+ unsigned int size;
+} VCHIQ_ELEMENT_T;
+
+typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
+
+typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
+ VCHIQ_SERVICE_HANDLE_T, void *);
+
+typedef struct vchiq_service_base_struct {
+ int fourcc;
+ VCHIQ_CALLBACK_T callback;
+ void *userdata;
+} VCHIQ_SERVICE_BASE_T;
+
+typedef struct vchiq_service_params_struct {
+ int fourcc;
+ VCHIQ_CALLBACK_T callback;
+ void *userdata;
+ short version; /* Increment for non-trivial changes */
+ short version_min; /* Update for incompatible changes */
+} VCHIQ_SERVICE_PARAMS_T;
+
+typedef struct vchiq_config_struct {
+ unsigned int max_msg_size;
+ unsigned int bulk_threshold; /* The message size above which it
+ is better to use a bulk transfer
+ (<= max_msg_size) */
+ unsigned int max_outstanding_bulks;
+ unsigned int max_services;
+ short version; /* The version of VCHIQ */
+ short version_min; /* The minimum compatible version of VCHIQ */
+} VCHIQ_CONFIG_T;
+
+typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
+typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
+
+extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
+extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
+extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
+extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
+ const VCHIQ_SERVICE_PARAMS_T *params,
+ VCHIQ_SERVICE_HANDLE_T *pservice);
+extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
+ const VCHIQ_SERVICE_PARAMS_T *params,
+ VCHIQ_SERVICE_HANDLE_T *pservice);
+extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
+extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
+extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
+extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
+ VCHIQ_SERVICE_HANDLE_T service);
+extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
+
+extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
+ const VCHIQ_ELEMENT_T *elements, unsigned int count);
+extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
+ VCHIQ_HEADER_T *header);
+extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
+ const void *data, unsigned int size, void *userdata);
+extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
+ void *data, unsigned int size, void *userdata);
+extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
+ VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
+ const void *offset, unsigned int size, void *userdata);
+extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
+ VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
+ void *offset, unsigned int size, void *userdata);
+extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
+ const void *data, unsigned int size, void *userdata,
+ VCHIQ_BULK_MODE_T mode);
+extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
+ void *data, unsigned int size, void *userdata,
+ VCHIQ_BULK_MODE_T mode);
+extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
+ VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
+ void *userdata, VCHIQ_BULK_MODE_T mode);
+extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
+ VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
+ void *userdata, VCHIQ_BULK_MODE_T mode);
+extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
+extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
+extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
+extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
+ int config_size, VCHIQ_CONFIG_T *pconfig);
+extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
+ VCHIQ_SERVICE_OPTION_T option, int value);
+
+extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
+ VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
+extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
+
+extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
+ void *ptr, size_t num_bytes);
+
+extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
+ short *peer_version);
+
+#endif /* VCHIQ_IF_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
new file mode 100644
index 000000000000..6137ae9de1c1
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
@@ -0,0 +1,131 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_IOCTLS_H
+#define VCHIQ_IOCTLS_H
+
+#include <linux/ioctl.h>
+#include "vchiq_if.h"
+
+#define VCHIQ_IOC_MAGIC 0xc4
+#define VCHIQ_INVALID_HANDLE (~0)
+
+typedef struct {
+ VCHIQ_SERVICE_PARAMS_T params;
+ int is_open;
+ int is_vchi;
+ unsigned int handle; /* OUT */
+} VCHIQ_CREATE_SERVICE_T;
+
+typedef struct {
+ unsigned int handle;
+ unsigned int count;
+ const VCHIQ_ELEMENT_T *elements;
+} VCHIQ_QUEUE_MESSAGE_T;
+
+typedef struct {
+ unsigned int handle;
+ void *data;
+ unsigned int size;
+ void *userdata;
+ VCHIQ_BULK_MODE_T mode;
+} VCHIQ_QUEUE_BULK_TRANSFER_T;
+
+typedef struct {
+ VCHIQ_REASON_T reason;
+ VCHIQ_HEADER_T *header;
+ void *service_userdata;
+ void *bulk_userdata;
+} VCHIQ_COMPLETION_DATA_T;
+
+typedef struct {
+ unsigned int count;
+ VCHIQ_COMPLETION_DATA_T *buf;
+ unsigned int msgbufsize;
+ unsigned int msgbufcount; /* IN/OUT */
+ void **msgbufs;
+} VCHIQ_AWAIT_COMPLETION_T;
+
+typedef struct {
+ unsigned int handle;
+ int blocking;
+ unsigned int bufsize;
+ void *buf;
+} VCHIQ_DEQUEUE_MESSAGE_T;
+
+typedef struct {
+ unsigned int config_size;
+ VCHIQ_CONFIG_T *pconfig;
+} VCHIQ_GET_CONFIG_T;
+
+typedef struct {
+ unsigned int handle;
+ VCHIQ_SERVICE_OPTION_T option;
+ int value;
+} VCHIQ_SET_SERVICE_OPTION_T;
+
+typedef struct {
+ void *virt_addr;
+ size_t num_bytes;
+} VCHIQ_DUMP_MEM_T;
+
+#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
+#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
+#define VCHIQ_IOC_CREATE_SERVICE \
+ _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
+#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
+#define VCHIQ_IOC_QUEUE_MESSAGE \
+ _IOW(VCHIQ_IOC_MAGIC, 4, VCHIQ_QUEUE_MESSAGE_T)
+#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
+ _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
+#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
+ _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
+#define VCHIQ_IOC_AWAIT_COMPLETION \
+ _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
+#define VCHIQ_IOC_DEQUEUE_MESSAGE \
+ _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
+#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
+#define VCHIQ_IOC_GET_CONFIG \
+ _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
+#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
+#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
+#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
+#define VCHIQ_IOC_SET_SERVICE_OPTION \
+ _IOW(VCHIQ_IOC_MAGIC, 14, VCHIQ_SET_SERVICE_OPTION_T)
+#define VCHIQ_IOC_DUMP_PHYS_MEM \
+ _IOW(VCHIQ_IOC_MAGIC, 15, VCHIQ_DUMP_MEM_T)
+#define VCHIQ_IOC_LIB_VERSION _IO(VCHIQ_IOC_MAGIC, 16)
+#define VCHIQ_IOC_CLOSE_DELIVERED _IO(VCHIQ_IOC_MAGIC, 17)
+#define VCHIQ_IOC_MAX 17
+
+#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
new file mode 100644
index 000000000000..25e7011edc50
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
@@ -0,0 +1,458 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* ---- Include Files ---------------------------------------------------- */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include "vchiq_core.h"
+#include "vchiq_arm.h"
+#include "vchiq_killable.h"
+
+/* ---- Public Variables ------------------------------------------------- */
+
+/* ---- Private Constants and Types -------------------------------------- */
+
+struct bulk_waiter_node {
+ struct bulk_waiter bulk_waiter;
+ int pid;
+ struct list_head list;
+};
+
+struct vchiq_instance_struct {
+ VCHIQ_STATE_T *state;
+
+ int connected;
+
+ struct list_head bulk_waiter_list;
+ struct mutex bulk_waiter_list_mutex;
+};
+
+static VCHIQ_STATUS_T
+vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
+ unsigned int size, VCHIQ_BULK_DIR_T dir);
+
+/****************************************************************************
+*
+* vchiq_initialise
+*
+***************************************************************************/
+#define VCHIQ_INIT_RETRIES 10
+VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
+{
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ VCHIQ_STATE_T *state;
+ VCHIQ_INSTANCE_T instance = NULL;
+ int i;
+
+ vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
+
+ /* VideoCore may not be ready due to boot up timing.
+ It may never be ready if kernel and firmware are mismatched, so don't block forever. */
+ for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
+ state = vchiq_get_state();
+ if (state)
+ break;
+ udelay(500);
+ }
+ if (i==VCHIQ_INIT_RETRIES) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%s: videocore not initialized\n", __func__);
+ goto failed;
+ } else if (i>0) {
+ vchiq_log_warning(vchiq_core_log_level,
+ "%s: videocore initialized after %d retries\n", __func__, i);
+ }
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%s: error allocating vchiq instance\n", __func__);
+ goto failed;
+ }
+
+ instance->connected = 0;
+ instance->state = state;
+ mutex_init(&instance->bulk_waiter_list_mutex);
+ INIT_LIST_HEAD(&instance->bulk_waiter_list);
+
+ *instanceOut = instance;
+
+ status = VCHIQ_SUCCESS;
+
+failed:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p): returning %d", __func__, instance, status);
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_initialise);
+
+/****************************************************************************
+*
+* vchiq_shutdown
+*
+***************************************************************************/
+
+VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
+{
+ VCHIQ_STATUS_T status;
+ VCHIQ_STATE_T *state = instance->state;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p) called", __func__, instance);
+
+ if (mutex_lock_interruptible(&state->mutex) != 0)
+ return VCHIQ_RETRY;
+
+ /* Remove all services */
+ status = vchiq_shutdown_internal(state, instance);
+
+ mutex_unlock(&state->mutex);
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p): returning %d", __func__, instance, status);
+
+ if (status == VCHIQ_SUCCESS) {
+ struct list_head *pos, *next;
+ list_for_each_safe(pos, next,
+ &instance->bulk_waiter_list) {
+ struct bulk_waiter_node *waiter;
+ waiter = list_entry(pos,
+ struct bulk_waiter_node,
+ list);
+ list_del(pos);
+ vchiq_log_info(vchiq_arm_log_level,
+ "bulk_waiter - cleaned up %x "
+ "for pid %d",
+ (unsigned int)waiter, waiter->pid);
+ kfree(waiter);
+ }
+ kfree(instance);
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_shutdown);
+
+/****************************************************************************
+*
+* vchiq_is_connected
+*
+***************************************************************************/
+
+int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
+{
+ return instance->connected;
+}
+
+/****************************************************************************
+*
+* vchiq_connect
+*
+***************************************************************************/
+
+VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
+{
+ VCHIQ_STATUS_T status;
+ VCHIQ_STATE_T *state = instance->state;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p) called", __func__, instance);
+
+ if (mutex_lock_interruptible(&state->mutex) != 0) {
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s: call to mutex_lock failed", __func__);
+ status = VCHIQ_RETRY;
+ goto failed;
+ }
+ status = vchiq_connect_internal(state, instance);
+
+ if (status == VCHIQ_SUCCESS)
+ instance->connected = 1;
+
+ mutex_unlock(&state->mutex);
+
+failed:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p): returning %d", __func__, instance, status);
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_connect);
+
+/****************************************************************************
+*
+* vchiq_add_service
+*
+***************************************************************************/
+
+VCHIQ_STATUS_T vchiq_add_service(
+ VCHIQ_INSTANCE_T instance,
+ const VCHIQ_SERVICE_PARAMS_T *params,
+ VCHIQ_SERVICE_HANDLE_T *phandle)
+{
+ VCHIQ_STATUS_T status;
+ VCHIQ_STATE_T *state = instance->state;
+ VCHIQ_SERVICE_T *service = NULL;
+ int srvstate;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p) called", __func__, instance);
+
+ *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
+
+ srvstate = vchiq_is_connected(instance)
+ ? VCHIQ_SRVSTATE_LISTENING
+ : VCHIQ_SRVSTATE_HIDDEN;
+
+ service = vchiq_add_service_internal(
+ state,
+ params,
+ srvstate,
+ instance,
+ NULL);
+
+ if (service) {
+ *phandle = service->handle;
+ status = VCHIQ_SUCCESS;
+ } else
+ status = VCHIQ_ERROR;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p): returning %d", __func__, instance, status);
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_add_service);
+
+/****************************************************************************
+*
+* vchiq_open_service
+*
+***************************************************************************/
+
+VCHIQ_STATUS_T vchiq_open_service(
+ VCHIQ_INSTANCE_T instance,
+ const VCHIQ_SERVICE_PARAMS_T *params,
+ VCHIQ_SERVICE_HANDLE_T *phandle)
+{
+ VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ VCHIQ_STATE_T *state = instance->state;
+ VCHIQ_SERVICE_T *service = NULL;
+
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p) called", __func__, instance);
+
+ *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
+
+ if (!vchiq_is_connected(instance))
+ goto failed;
+
+ service = vchiq_add_service_internal(state,
+ params,
+ VCHIQ_SRVSTATE_OPENING,
+ instance,
+ NULL);
+
+ if (service) {
+ *phandle = service->handle;
+ status = vchiq_open_service_internal(service, current->pid);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_remove_service(service->handle);
+ *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
+ }
+ }
+
+failed:
+ vchiq_log_trace(vchiq_core_log_level,
+ "%s(%p): returning %d", __func__, instance, status);
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_open_service);
+
+VCHIQ_STATUS_T
+vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
+ const void *data, unsigned int size, void *userdata)
+{
+ return vchiq_bulk_transfer(handle,
+ VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
+ VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
+}
+EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
+
+VCHIQ_STATUS_T
+vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
+ unsigned int size, void *userdata)
+{
+ return vchiq_bulk_transfer(handle,
+ VCHI_MEM_HANDLE_INVALID, data, size, userdata,
+ VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
+}
+EXPORT_SYMBOL(vchiq_queue_bulk_receive);
+
+VCHIQ_STATUS_T
+vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
+ unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
+{
+ VCHIQ_STATUS_T status;
+
+ switch (mode) {
+ case VCHIQ_BULK_MODE_NOCALLBACK:
+ case VCHIQ_BULK_MODE_CALLBACK:
+ status = vchiq_bulk_transfer(handle,
+ VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
+ mode, VCHIQ_BULK_TRANSMIT);
+ break;
+ case VCHIQ_BULK_MODE_BLOCKING:
+ status = vchiq_blocking_bulk_transfer(handle,
+ (void *)data, size, VCHIQ_BULK_TRANSMIT);
+ break;
+ default:
+ return VCHIQ_ERROR;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_bulk_transmit);
+
+VCHIQ_STATUS_T
+vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
+ unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
+{
+ VCHIQ_STATUS_T status;
+
+ switch (mode) {
+ case VCHIQ_BULK_MODE_NOCALLBACK:
+ case VCHIQ_BULK_MODE_CALLBACK:
+ status = vchiq_bulk_transfer(handle,
+ VCHI_MEM_HANDLE_INVALID, data, size, userdata,
+ mode, VCHIQ_BULK_RECEIVE);
+ break;
+ case VCHIQ_BULK_MODE_BLOCKING:
+ status = vchiq_blocking_bulk_transfer(handle,
+ (void *)data, size, VCHIQ_BULK_RECEIVE);
+ break;
+ default:
+ return VCHIQ_ERROR;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(vchiq_bulk_receive);
+
+static VCHIQ_STATUS_T
+vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
+ unsigned int size, VCHIQ_BULK_DIR_T dir)
+{
+ VCHIQ_INSTANCE_T instance;
+ VCHIQ_SERVICE_T *service;
+ VCHIQ_STATUS_T status;
+ struct bulk_waiter_node *waiter = NULL;
+ struct list_head *pos;
+
+ service = find_service_by_handle(handle);
+ if (!service)
+ return VCHIQ_ERROR;
+
+ instance = service->instance;
+
+ unlock_service(service);
+
+ mutex_lock(&instance->bulk_waiter_list_mutex);
+ list_for_each(pos, &instance->bulk_waiter_list) {
+ if (list_entry(pos, struct bulk_waiter_node,
+ list)->pid == current->pid) {
+ waiter = list_entry(pos,
+ struct bulk_waiter_node,
+ list);
+ list_del(pos);
+ break;
+ }
+ }
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
+
+ if (waiter) {
+ VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
+ if (bulk) {
+ /* This thread has an outstanding bulk transfer. */
+ if ((bulk->data != data) ||
+ (bulk->size != size)) {
+ /* This is not a retry of the previous one.
+ ** Cancel the signal when the transfer
+ ** completes. */
+ spin_lock(&bulk_waiter_spinlock);
+ bulk->userdata = NULL;
+ spin_unlock(&bulk_waiter_spinlock);
+ }
+ }
+ }
+
+ if (!waiter) {
+ waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
+ if (!waiter) {
+ vchiq_log_error(vchiq_core_log_level,
+ "%s - out of memory", __func__);
+ return VCHIQ_ERROR;
+ }
+ }
+
+ status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
+ data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
+ dir);
+ if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
+ !waiter->bulk_waiter.bulk) {
+ VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
+ if (bulk) {
+ /* Cancel the signal when the transfer
+ ** completes. */
+ spin_lock(&bulk_waiter_spinlock);
+ bulk->userdata = NULL;
+ spin_unlock(&bulk_waiter_spinlock);
+ }
+ kfree(waiter);
+ } else {
+ waiter->pid = current->pid;
+ mutex_lock(&instance->bulk_waiter_list_mutex);
+ list_add(&waiter->list, &instance->bulk_waiter_list);
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
+ vchiq_log_info(vchiq_arm_log_level,
+ "saved bulk_waiter %x for pid %d",
+ (unsigned int)waiter, current->pid);
+ }
+
+ return status;
+}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
new file mode 100644
index 000000000000..335446e05476
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
@@ -0,0 +1,69 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_KILLABLE_H
+#define VCHIQ_KILLABLE_H
+
+#include <linux/mutex.h>
+#include <linux/semaphore.h>
+
+#define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTRAP) | sigmask(SIGSTOP) | sigmask(SIGCONT))
+
+static inline int __must_check down_interruptible_killable(struct semaphore *sem)
+{
+ /* Allow interception of killable signals only. We don't want to be interrupted by harmless signals like SIGALRM */
+ int ret;
+ sigset_t blocked, oldset;
+ siginitsetinv(&blocked, SHUTDOWN_SIGS);
+ sigprocmask(SIG_SETMASK, &blocked, &oldset);
+ ret = down_interruptible(sem);
+ sigprocmask(SIG_SETMASK, &oldset, NULL);
+ return ret;
+}
+#define down_interruptible down_interruptible_killable
+
+
+static inline int __must_check mutex_lock_interruptible_killable(struct mutex *lock)
+{
+ /* Allow interception of killable signals only. We don't want to be interrupted by harmless signals like SIGALRM */
+ int ret;
+ sigset_t blocked, oldset;
+ siginitsetinv(&blocked, SHUTDOWN_SIGS);
+ sigprocmask(SIG_SETMASK, &blocked, &oldset);
+ ret = mutex_lock_interruptible(lock);
+ sigprocmask(SIG_SETMASK, &oldset, NULL);
+ return ret;
+}
+#define mutex_lock_interruptible mutex_lock_interruptible_killable
+
+#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
new file mode 100644
index 000000000000..d02e7764bd0d
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
@@ -0,0 +1,71 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_MEMDRV_H
+#define VCHIQ_MEMDRV_H
+
+/* ---- Include Files ----------------------------------------------------- */
+
+#include <linux/kernel.h>
+#include "vchiq_if.h"
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+typedef struct {
+ void *armSharedMemVirt;
+ dma_addr_t armSharedMemPhys;
+ size_t armSharedMemSize;
+
+ void *vcSharedMemVirt;
+ dma_addr_t vcSharedMemPhys;
+ size_t vcSharedMemSize;
+} VCHIQ_SHARED_MEM_INFO_T;
+
+/* ---- Variable Externs ------------------------------------------------- */
+
+/* ---- Function Prototypes ---------------------------------------------- */
+
+void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
+
+VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
+
+VCHIQ_STATUS_T vchiq_userdrv_create_instance(
+ const VCHIQ_PLATFORM_DATA_T * platform_data);
+
+VCHIQ_STATUS_T vchiq_userdrv_suspend(
+ const VCHIQ_PLATFORM_DATA_T * platform_data);
+
+VCHIQ_STATUS_T vchiq_userdrv_resume(
+ const VCHIQ_PLATFORM_DATA_T * platform_data);
+
+#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
new file mode 100644
index 000000000000..54a3ecec69ef
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
@@ -0,0 +1,58 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_PAGELIST_H
+#define VCHIQ_PAGELIST_H
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+#define CACHE_LINE_SIZE 32
+#define PAGELIST_WRITE 0
+#define PAGELIST_READ 1
+#define PAGELIST_READ_WITH_FRAGMENTS 2
+
+typedef struct pagelist_struct {
+ unsigned long length;
+ unsigned short type;
+ unsigned short offset;
+ unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
+ pages at consecutive addresses. */
+} PAGELIST_T;
+
+typedef struct fragments_struct {
+ char headbuf[CACHE_LINE_SIZE];
+ char tailbuf[CACHE_LINE_SIZE];
+} FRAGMENTS_T;
+
+#endif /* VCHIQ_PAGELIST_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
new file mode 100644
index 000000000000..8072ff613636
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -0,0 +1,860 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "interface/vchi/vchi.h"
+#include "vchiq.h"
+#include "vchiq_core.h"
+
+#include "vchiq_util.h"
+
+#include <stddef.h>
+
+#define vchiq_status_to_vchi(status) ((int32_t)status)
+
+typedef struct {
+ VCHIQ_SERVICE_HANDLE_T handle;
+
+ VCHIU_QUEUE_T queue;
+
+ VCHI_CALLBACK_T callback;
+ void *callback_param;
+} SHIM_SERVICE_T;
+
+/* ----------------------------------------------------------------------
+ * return pointer to the mphi message driver function table
+ * -------------------------------------------------------------------- */
+const VCHI_MESSAGE_DRIVER_T *
+vchi_mphi_message_driver_func_table(void)
+{
+ return NULL;
+}
+
+/* ----------------------------------------------------------------------
+ * return a pointer to the 'single' connection driver fops
+ * -------------------------------------------------------------------- */
+const VCHI_CONNECTION_API_T *
+single_get_func_table(void)
+{
+ return NULL;
+}
+
+VCHI_CONNECTION_T *vchi_create_connection(
+ const VCHI_CONNECTION_API_T *function_table,
+ const VCHI_MESSAGE_DRIVER_T *low_level)
+{
+ (void)function_table;
+ (void)low_level;
+ return NULL;
+}
+
+/***********************************************************
+ * Name: vchi_msg_peek
+ *
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle,
+ * void **data,
+ * uint32_t *msg_size,
+
+
+ * VCHI_FLAGS_T flags
+ *
+ * Description: Routine to return a pointer to the current message (to allow in
+ * place processing). The message can be removed using
+ * vchi_msg_remove when you're finished
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
+ void **data,
+ uint32_t *msg_size,
+ VCHI_FLAGS_T flags)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_HEADER_T *header;
+
+ WARN_ON((flags != VCHI_FLAGS_NONE) &&
+ (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
+
+ if (flags == VCHI_FLAGS_NONE)
+ if (vchiu_queue_is_empty(&service->queue))
+ return -1;
+
+ header = vchiu_queue_peek(&service->queue);
+
+ *data = header->data;
+ *msg_size = header->size;
+
+ return 0;
+}
+EXPORT_SYMBOL(vchi_msg_peek);
+
+/***********************************************************
+ * Name: vchi_msg_remove
+ *
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle,
+ *
+ * Description: Routine to remove a message (after it has been read with
+ * vchi_msg_peek)
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_HEADER_T *header;
+
+ header = vchiu_queue_pop(&service->queue);
+
+ vchiq_release_message(service->handle, header);
+
+ return 0;
+}
+EXPORT_SYMBOL(vchi_msg_remove);
+
+/***********************************************************
+ * Name: vchi_msg_queue
+ *
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * const void *data,
+ * uint32_t data_size,
+ * VCHI_FLAGS_T flags,
+ * void *msg_handle,
+ *
+ * Description: Thin wrapper to queue a message onto a connection
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
+ const void *data,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *msg_handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_ELEMENT_T element = {data, data_size};
+ VCHIQ_STATUS_T status;
+
+ (void)msg_handle;
+
+ WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
+
+ status = vchiq_queue_message(service->handle, &element, 1);
+
+ /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
+ ** implement a retry mechanism since this function is supposed
+ ** to block until queued
+ */
+ while (status == VCHIQ_RETRY) {
+ msleep(1);
+ status = vchiq_queue_message(service->handle, &element, 1);
+ }
+
+ return vchiq_status_to_vchi(status);
+}
+EXPORT_SYMBOL(vchi_msg_queue);
+
+/***********************************************************
+ * Name: vchi_bulk_queue_receive
+ *
+ * Arguments: VCHI_BULK_HANDLE_T handle,
+ * void *data_dst,
+ * const uint32_t data_size,
+ * VCHI_FLAGS_T flags
+ * void *bulk_handle
+ *
+ * Description: Routine to setup a rcv buffer
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
+ void *data_dst,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *bulk_handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_BULK_MODE_T mode;
+ VCHIQ_STATUS_T status;
+
+ switch ((int)flags) {
+ case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
+ | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
+ WARN_ON(!service->callback);
+ mode = VCHIQ_BULK_MODE_CALLBACK;
+ break;
+ case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
+ mode = VCHIQ_BULK_MODE_BLOCKING;
+ break;
+ case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
+ case VCHI_FLAGS_NONE:
+ mode = VCHIQ_BULK_MODE_NOCALLBACK;
+ break;
+ default:
+ WARN(1, "unsupported message\n");
+ return vchiq_status_to_vchi(VCHIQ_ERROR);
+ }
+
+ status = vchiq_bulk_receive(service->handle, data_dst, data_size,
+ bulk_handle, mode);
+
+ /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
+ ** implement a retry mechanism since this function is supposed
+ ** to block until queued
+ */
+ while (status == VCHIQ_RETRY) {
+ msleep(1);
+ status = vchiq_bulk_receive(service->handle, data_dst,
+ data_size, bulk_handle, mode);
+ }
+
+ return vchiq_status_to_vchi(status);
+}
+EXPORT_SYMBOL(vchi_bulk_queue_receive);
+
+/***********************************************************
+ * Name: vchi_bulk_queue_transmit
+ *
+ * Arguments: VCHI_BULK_HANDLE_T handle,
+ * const void *data_src,
+ * uint32_t data_size,
+ * VCHI_FLAGS_T flags,
+ * void *bulk_handle
+ *
+ * Description: Routine to transmit some data
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
+ const void *data_src,
+ uint32_t data_size,
+ VCHI_FLAGS_T flags,
+ void *bulk_handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_BULK_MODE_T mode;
+ VCHIQ_STATUS_T status;
+
+ switch ((int)flags) {
+ case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
+ | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
+ WARN_ON(!service->callback);
+ mode = VCHIQ_BULK_MODE_CALLBACK;
+ break;
+ case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
+ case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
+ mode = VCHIQ_BULK_MODE_BLOCKING;
+ break;
+ case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
+ case VCHI_FLAGS_NONE:
+ mode = VCHIQ_BULK_MODE_NOCALLBACK;
+ break;
+ default:
+ WARN(1, "unsupported message\n");
+ return vchiq_status_to_vchi(VCHIQ_ERROR);
+ }
+
+ status = vchiq_bulk_transmit(service->handle, data_src, data_size,
+ bulk_handle, mode);
+
+ /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
+ ** implement a retry mechanism since this function is supposed
+ ** to block until queued
+ */
+ while (status == VCHIQ_RETRY) {
+ msleep(1);
+ status = vchiq_bulk_transmit(service->handle, data_src,
+ data_size, bulk_handle, mode);
+ }
+
+ return vchiq_status_to_vchi(status);
+}
+EXPORT_SYMBOL(vchi_bulk_queue_transmit);
+
+/***********************************************************
+ * Name: vchi_msg_dequeue
+ *
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * void *data,
+ * uint32_t max_data_size_to_read,
+ * uint32_t *actual_msg_size
+ * VCHI_FLAGS_T flags
+ *
+ * Description: Routine to dequeue a message into the supplied buffer
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
+ void *data,
+ uint32_t max_data_size_to_read,
+ uint32_t *actual_msg_size,
+ VCHI_FLAGS_T flags)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_HEADER_T *header;
+
+ WARN_ON((flags != VCHI_FLAGS_NONE) &&
+ (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
+
+ if (flags == VCHI_FLAGS_NONE)
+ if (vchiu_queue_is_empty(&service->queue))
+ return -1;
+
+ header = vchiu_queue_pop(&service->queue);
+
+ memcpy(data, header->data, header->size < max_data_size_to_read ?
+ header->size : max_data_size_to_read);
+
+ *actual_msg_size = header->size;
+
+ vchiq_release_message(service->handle, header);
+
+ return 0;
+}
+EXPORT_SYMBOL(vchi_msg_dequeue);
+
+/***********************************************************
+ * Name: vchi_msg_queuev
+ *
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * VCHI_MSG_VECTOR_T *vector,
+ * uint32_t count,
+ * VCHI_FLAGS_T flags,
+ * void *msg_handle
+ *
+ * Description: Thin wrapper to queue a message onto a connection
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+
+vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
+vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
+ offsetof(VCHIQ_ELEMENT_T, data));
+vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
+ offsetof(VCHIQ_ELEMENT_T, size));
+
+int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
+ VCHI_MSG_VECTOR_T *vector,
+ uint32_t count,
+ VCHI_FLAGS_T flags,
+ void *msg_handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+
+ (void)msg_handle;
+
+ WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
+
+ return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
+ (const VCHIQ_ELEMENT_T *)vector, count));
+}
+EXPORT_SYMBOL(vchi_msg_queuev);
+
+/***********************************************************
+ * Name: vchi_held_msg_release
+ *
+ * Arguments: VCHI_HELD_MSG_T *message
+ *
+ * Description: Routine to release a held message (after it has been read with
+ * vchi_msg_hold)
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
+{
+ vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
+ (VCHIQ_HEADER_T *)message->message);
+
+ return 0;
+}
+EXPORT_SYMBOL(vchi_held_msg_release);
+
+/***********************************************************
+ * Name: vchi_msg_hold
+ *
+ * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * void **data,
+ * uint32_t *msg_size,
+ * VCHI_FLAGS_T flags,
+ * VCHI_HELD_MSG_T *message_handle
+ *
+ * Description: Routine to return a pointer to the current message (to allow
+ * in place processing). The message is dequeued - don't forget
+ * to release the message using vchi_held_msg_release when you're
+ * finished.
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
+ void **data,
+ uint32_t *msg_size,
+ VCHI_FLAGS_T flags,
+ VCHI_HELD_MSG_T *message_handle)
+{
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_HEADER_T *header;
+
+ WARN_ON((flags != VCHI_FLAGS_NONE) &&
+ (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
+
+ if (flags == VCHI_FLAGS_NONE)
+ if (vchiu_queue_is_empty(&service->queue))
+ return -1;
+
+ header = vchiu_queue_pop(&service->queue);
+
+ *data = header->data;
+ *msg_size = header->size;
+
+ message_handle->service =
+ (struct opaque_vchi_service_t *)service->handle;
+ message_handle->message = header;
+
+ return 0;
+}
+EXPORT_SYMBOL(vchi_msg_hold);
+
+/***********************************************************
+ * Name: vchi_initialise
+ *
+ * Arguments: VCHI_INSTANCE_T *instance_handle
+ *
+ * Description: Initialises the hardware but does not transmit anything
+ * When run as a Host App this will be called twice hence the need
+ * to malloc the state information
+ *
+ * Returns: 0 if successful, failure otherwise
+ *
+ ***********************************************************/
+
+int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
+{
+ VCHIQ_INSTANCE_T instance;
+ VCHIQ_STATUS_T status;
+
+ status = vchiq_initialise(&instance);
+
+ *instance_handle = (VCHI_INSTANCE_T)instance;
+
+ return vchiq_status_to_vchi(status);
+}
+EXPORT_SYMBOL(vchi_initialise);
+
+/***********************************************************
+ * Name: vchi_connect
+ *
+ * Arguments: VCHI_CONNECTION_T **connections
+ * const uint32_t num_connections
+ * VCHI_INSTANCE_T instance_handle)
+ *
+ * Description: Starts the command service on each connection,
+ * causing INIT messages to be pinged back and forth
+ *
+ * Returns: 0 if successful, failure otherwise
+ *
+ ***********************************************************/
+int32_t vchi_connect(VCHI_CONNECTION_T **connections,
+ const uint32_t num_connections,
+ VCHI_INSTANCE_T instance_handle)
+{
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+
+ (void)connections;
+ (void)num_connections;
+
+ return vchiq_connect(instance);
+}
+EXPORT_SYMBOL(vchi_connect);
+
+
+/***********************************************************
+ * Name: vchi_disconnect
+ *
+ * Arguments: VCHI_INSTANCE_T instance_handle
+ *
+ * Description: Stops the command service on each connection,
+ * causing DE-INIT messages to be pinged back and forth
+ *
+ * Returns: 0 if successful, failure otherwise
+ *
+ ***********************************************************/
+int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
+{
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ return vchiq_status_to_vchi(vchiq_shutdown(instance));
+}
+EXPORT_SYMBOL(vchi_disconnect);
+
+
+/***********************************************************
+ * Name: vchi_service_open
+ * Name: vchi_service_create
+ *
+ * Arguments: VCHI_INSTANCE_T *instance_handle
+ * SERVICE_CREATION_T *setup,
+ * VCHI_SERVICE_HANDLE_T *handle
+ *
+ * Description: Routine to open a service
+ *
+ * Returns: int32_t - success == 0
+ *
+ ***********************************************************/
+
+static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
+ VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
+{
+ SHIM_SERVICE_T *service =
+ (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
+
+ if (!service->callback)
+ goto release;
+
+ switch (reason) {
+ case VCHIQ_MESSAGE_AVAILABLE:
+ vchiu_queue_push(&service->queue, header);
+
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_MSG_AVAILABLE, NULL);
+
+ goto done;
+ break;
+
+ case VCHIQ_BULK_TRANSMIT_DONE:
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_BULK_SENT, bulk_user);
+ break;
+
+ case VCHIQ_BULK_RECEIVE_DONE:
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
+ break;
+
+ case VCHIQ_SERVICE_CLOSED:
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_SERVICE_CLOSED, NULL);
+ break;
+
+ case VCHIQ_SERVICE_OPENED:
+ /* No equivalent VCHI reason */
+ break;
+
+ case VCHIQ_BULK_TRANSMIT_ABORTED:
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
+ bulk_user);
+ break;
+
+ case VCHIQ_BULK_RECEIVE_ABORTED:
+ service->callback(service->callback_param,
+ VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
+ bulk_user);
+ break;
+
+ default:
+ WARN(1, "not supported\n");
+ break;
+ }
+
+release:
+ vchiq_release_message(service->handle, header);
+done:
+ return VCHIQ_SUCCESS;
+}
+
+static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
+ SERVICE_CREATION_T *setup)
+{
+ SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
+
+ (void)instance;
+
+ if (service) {
+ if (vchiu_queue_init(&service->queue, 64)) {
+ service->callback = setup->callback;
+ service->callback_param = setup->callback_param;
+ } else {
+ kfree(service);
+ service = NULL;
+ }
+ }
+
+ return service;
+}
+
+static void service_free(SHIM_SERVICE_T *service)
+{
+ if (service) {
+ vchiu_queue_delete(&service->queue);
+ kfree(service);
+ }
+}
+
+int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
+ SERVICE_CREATION_T *setup,
+ VCHI_SERVICE_HANDLE_T *handle)
+{
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ SHIM_SERVICE_T *service = service_alloc(instance, setup);
+
+ *handle = (VCHI_SERVICE_HANDLE_T)service;
+
+ if (service) {
+ VCHIQ_SERVICE_PARAMS_T params;
+ VCHIQ_STATUS_T status;
+
+ memset(&params, 0, sizeof(params));
+ params.fourcc = setup->service_id;
+ params.callback = shim_callback;
+ params.userdata = service;
+ params.version = setup->version.version;
+ params.version_min = setup->version.version_min;
+
+ status = vchiq_open_service(instance, &params,
+ &service->handle);
+ if (status != VCHIQ_SUCCESS) {
+ service_free(service);
+ service = NULL;
+ *handle = NULL;
+ }
+ }
+
+ return (service != NULL) ? 0 : -1;
+}
+EXPORT_SYMBOL(vchi_service_open);
+
+int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
+ SERVICE_CREATION_T *setup,
+ VCHI_SERVICE_HANDLE_T *handle)
+{
+ VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ SHIM_SERVICE_T *service = service_alloc(instance, setup);
+
+ *handle = (VCHI_SERVICE_HANDLE_T)service;
+
+ if (service) {
+ VCHIQ_SERVICE_PARAMS_T params;
+ VCHIQ_STATUS_T status;
+
+ memset(&params, 0, sizeof(params));
+ params.fourcc = setup->service_id;
+ params.callback = shim_callback;
+ params.userdata = service;
+ params.version = setup->version.version;
+ params.version_min = setup->version.version_min;
+ status = vchiq_add_service(instance, &params, &service->handle);
+
+ if (status != VCHIQ_SUCCESS) {
+ service_free(service);
+ service = NULL;
+ *handle = NULL;
+ }
+ }
+
+ return (service != NULL) ? 0 : -1;
+}
+EXPORT_SYMBOL(vchi_service_create);
+
+int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
+{
+ int32_t ret = -1;
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ if (service) {
+ VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
+ if (status == VCHIQ_SUCCESS) {
+ service_free(service);
+ service = NULL;
+ }
+
+ ret = vchiq_status_to_vchi(status);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(vchi_service_close);
+
+int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
+{
+ int32_t ret = -1;
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ if (service) {
+ VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
+ if (status == VCHIQ_SUCCESS) {
+ service_free(service);
+ service = NULL;
+ }
+
+ ret = vchiq_status_to_vchi(status);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(vchi_service_destroy);
+
+int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
+ VCHI_SERVICE_OPTION_T option,
+ int value)
+{
+ int32_t ret = -1;
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ VCHIQ_SERVICE_OPTION_T vchiq_option;
+ switch (option) {
+ case VCHI_SERVICE_OPTION_TRACE:
+ vchiq_option = VCHIQ_SERVICE_OPTION_TRACE;
+ break;
+ case VCHI_SERVICE_OPTION_SYNCHRONOUS:
+ vchiq_option = VCHIQ_SERVICE_OPTION_SYNCHRONOUS;
+ break;
+ default:
+ service = NULL;
+ break;
+ }
+ if (service) {
+ VCHIQ_STATUS_T status =
+ vchiq_set_service_option(service->handle,
+ vchiq_option,
+ value);
+
+ ret = vchiq_status_to_vchi(status);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(vchi_service_set_option);
+
+int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
+{
+ int32_t ret = -1;
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ if(service)
+ {
+ VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
+ ret = vchiq_status_to_vchi( status );
+ }
+ return ret;
+}
+EXPORT_SYMBOL(vchi_get_peer_version);
+
+/* ----------------------------------------------------------------------
+ * read a uint32_t from buffer.
+ * network format is defined to be little endian
+ * -------------------------------------------------------------------- */
+uint32_t
+vchi_readbuf_uint32(const void *_ptr)
+{
+ const unsigned char *ptr = _ptr;
+ return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
+}
+
+/* ----------------------------------------------------------------------
+ * write a uint32_t to buffer.
+ * network format is defined to be little endian
+ * -------------------------------------------------------------------- */
+void
+vchi_writebuf_uint32(void *_ptr, uint32_t value)
+{
+ unsigned char *ptr = _ptr;
+ ptr[0] = (unsigned char)((value >> 0) & 0xFF);
+ ptr[1] = (unsigned char)((value >> 8) & 0xFF);
+ ptr[2] = (unsigned char)((value >> 16) & 0xFF);
+ ptr[3] = (unsigned char)((value >> 24) & 0xFF);
+}
+
+/* ----------------------------------------------------------------------
+ * read a uint16_t from buffer.
+ * network format is defined to be little endian
+ * -------------------------------------------------------------------- */
+uint16_t
+vchi_readbuf_uint16(const void *_ptr)
+{
+ const unsigned char *ptr = _ptr;
+ return ptr[0] | (ptr[1] << 8);
+}
+
+/* ----------------------------------------------------------------------
+ * write a uint16_t into the buffer.
+ * network format is defined to be little endian
+ * -------------------------------------------------------------------- */
+void
+vchi_writebuf_uint16(void *_ptr, uint16_t value)
+{
+ unsigned char *ptr = _ptr;
+ ptr[0] = (value >> 0) & 0xFF;
+ ptr[1] = (value >> 8) & 0xFF;
+}
+
+/***********************************************************
+ * Name: vchi_service_use
+ *
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle
+ *
+ * Description: Routine to increment refcount on a service
+ *
+ * Returns: void
+ *
+ ***********************************************************/
+int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
+{
+ int32_t ret = -1;
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ if (service)
+ ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
+ return ret;
+}
+EXPORT_SYMBOL(vchi_service_use);
+
+/***********************************************************
+ * Name: vchi_service_release
+ *
+ * Arguments: const VCHI_SERVICE_HANDLE_T handle
+ *
+ * Description: Routine to decrement refcount on a service
+ *
+ * Returns: void
+ *
+ ***********************************************************/
+int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
+{
+ int32_t ret = -1;
+ SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ if (service)
+ ret = vchiq_status_to_vchi(
+ vchiq_release_service(service->handle));
+ return ret;
+}
+EXPORT_SYMBOL(vchi_service_release);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
new file mode 100644
index 000000000000..384acb8d2eae
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -0,0 +1,156 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "vchiq_util.h"
+#include "vchiq_killable.h"
+
+static inline int is_pow2(int i)
+{
+ return i && !(i & (i - 1));
+}
+
+int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
+{
+ WARN_ON(!is_pow2(size));
+
+ queue->size = size;
+ queue->read = 0;
+ queue->write = 0;
+ queue->initialized = 1;
+
+ sema_init(&queue->pop, 0);
+ sema_init(&queue->push, 0);
+
+ queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
+ if (queue->storage == NULL) {
+ vchiu_queue_delete(queue);
+ return 0;
+ }
+ return 1;
+}
+
+void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
+{
+ if (queue->storage != NULL)
+ kfree(queue->storage);
+}
+
+int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
+{
+ return queue->read == queue->write;
+}
+
+int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
+{
+ return queue->write == queue->read + queue->size;
+}
+
+void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
+{
+ if (!queue->initialized)
+ return;
+
+ while (queue->write == queue->read + queue->size) {
+ if (down_interruptible(&queue->pop) != 0) {
+ flush_signals(current);
+ }
+ }
+
+ /*
+ * Write to queue->storage must be visible after read from
+ * queue->read
+ */
+ smp_mb();
+
+ queue->storage[queue->write & (queue->size - 1)] = header;
+
+ /*
+ * Write to queue->storage must be visible before write to
+ * queue->write
+ */
+ smp_wmb();
+
+ queue->write++;
+
+ up(&queue->push);
+}
+
+VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
+{
+ while (queue->write == queue->read) {
+ if (down_interruptible(&queue->push) != 0) {
+ flush_signals(current);
+ }
+ }
+
+ up(&queue->push); // We haven't removed anything from the queue.
+
+ /*
+ * Read from queue->storage must be visible after read from
+ * queue->write
+ */
+ smp_rmb();
+
+ return queue->storage[queue->read & (queue->size - 1)];
+}
+
+VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
+{
+ VCHIQ_HEADER_T *header;
+
+ while (queue->write == queue->read) {
+ if (down_interruptible(&queue->push) != 0) {
+ flush_signals(current);
+ }
+ }
+
+ /*
+ * Read from queue->storage must be visible after read from
+ * queue->write
+ */
+ smp_rmb();
+
+ header = queue->storage[queue->read & (queue->size - 1)];
+
+ /*
+ * Read from queue->storage must be visible before write to
+ * queue->read
+ */
+ smp_mb();
+
+ queue->read++;
+
+ up(&queue->pop);
+
+ return header;
+}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h
new file mode 100644
index 000000000000..4055d4bf9f74
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h
@@ -0,0 +1,82 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VCHIQ_UTIL_H
+#define VCHIQ_UTIL_H
+
+#include <linux/types.h>
+#include <linux/semaphore.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/ctype.h>
+#include <linux/uaccess.h>
+#include <linux/time.h> /* for time_t */
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "vchiq_if.h"
+
+typedef struct {
+ int size;
+ int read;
+ int write;
+ int initialized;
+
+ struct semaphore pop;
+ struct semaphore push;
+
+ VCHIQ_HEADER_T **storage;
+} VCHIU_QUEUE_T;
+
+extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
+extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
+
+extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
+extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
+
+extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
+
+extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
+extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
+
+#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_version.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_version.c
new file mode 100644
index 000000000000..b6bfa21155e4
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_version.c
@@ -0,0 +1,59 @@
+/**
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "vchiq_build_info.h"
+#include <linux/broadcom/vc_debug_sym.h>
+
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
+VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
+
+const char *vchiq_get_build_hostname( void )
+{
+ return vchiq_build_hostname;
+}
+
+const char *vchiq_get_build_version( void )
+{
+ return vchiq_build_version;
+}
+
+const char *vchiq_get_build_date( void )
+{
+ return vchiq_build_date;
+}
+
+const char *vchiq_get_build_time( void )
+{
+ return vchiq_build_time;
+}
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index 28a45689e2f4..8e66a520266c 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -466,23 +466,23 @@ static void __exit pio2_exit(void)
/* These are required for each board */
MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the board is connected");
-module_param_array(bus, int, &bus_num, S_IRUGO);
+module_param_array(bus, int, &bus_num, 0444);
MODULE_PARM_DESC(base, "Base VME address for PIO2 Registers");
-module_param_array(base, long, &base_num, S_IRUGO);
+module_param_array(base, long, &base_num, 0444);
MODULE_PARM_DESC(vector, "VME IRQ Vector (Lower 4 bits masked)");
-module_param_array(vector, int, &vector_num, S_IRUGO);
+module_param_array(vector, int, &vector_num, 0444);
MODULE_PARM_DESC(level, "VME IRQ Level");
-module_param_array(level, int, &level_num, S_IRUGO);
+module_param_array(level, int, &level_num, 0444);
MODULE_PARM_DESC(variant, "Last 4 characters of PIO2 board variant");
-module_param_array(variant, charp, &variant_num, S_IRUGO);
+module_param_array(variant, charp, &variant_num, 0444);
/* This is for debugging */
MODULE_PARM_DESC(loopback, "Enable loopback mode on all cards");
-module_param(loopback, bool, S_IRUGO);
+module_param(loopback, bool, 0444);
MODULE_DESCRIPTION("GE PIO2 6U VME I/O Driver");
MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index b95883bc68fe..5dd430f8f921 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -773,7 +773,7 @@ static void __exit vme_user_exit(void)
}
MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
-module_param_array(bus, int, &bus_num, 0);
+module_param_array(bus, int, &bus_num, 0000);
MODULE_DESCRIPTION("VME User Space Access Driver");
MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 654d072bdc28..de503a316e71 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -1729,7 +1729,7 @@ BBuGetFrameTime(
unsigned int uFrameTime;
unsigned int uPreamble;
unsigned int uTmp;
- unsigned int uRateIdx = (unsigned int) wRate;
+ unsigned int uRateIdx = (unsigned int)wRate;
unsigned int uRate = 0;
if (uRateIdx > RATE_54M)
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index afb1e8bde975..dbcea4434725 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -619,7 +619,7 @@ CARDvSafeResetRx(
static unsigned short CARDwGetCCKControlRate(struct vnt_private *priv,
unsigned short wRateIdx)
{
- unsigned int ui = (unsigned int) wRateIdx;
+ unsigned int ui = (unsigned int)wRateIdx;
while (ui > RATE_1M) {
if (priv->basic_rates & ((u32)0x1 << ui))
@@ -645,7 +645,7 @@ static unsigned short CARDwGetCCKControlRate(struct vnt_private *priv,
static unsigned short CARDwGetOFDMControlRate(struct vnt_private *priv,
unsigned short wRateIdx)
{
- unsigned int ui = (unsigned int) wRateIdx;
+ unsigned int ui = (unsigned int)wRateIdx;
pr_debug("BASIC RATE: %X\n", priv->basic_rates);
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index b7d43a5622ba..029a8df4ca1c 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -193,7 +193,8 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
MACvRegBitsOn(priv->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
/* TX_PE will reserve 3 us for MAX2829 A mode only,
- it is for better TX throughput */
+ * it is for better TX throughput
+ */
if (priv->byRFType == RF_AIROHA7230)
RFbAL7230SelectChannelPostProcess(priv, priv->byCurrentCh,
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 494164045a0f..f109eeac358d 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -113,10 +113,10 @@ DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits");
DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
/* BasebandType[] baseband type selected
- 0: indicate 802.11a type
- 1: indicate 802.11b type
- 2: indicate 802.11g type
-*/
+ * 0: indicate 802.11a type
+ * 1: indicate 802.11b type
+ * 2: indicate 802.11g type
+ */
#define BBP_TYPE_MIN 0
#define BBP_TYPE_MAX 2
#define BBP_TYPE_DEF 2
@@ -477,7 +477,7 @@ static bool device_init_rings(struct vnt_private *priv)
CB_MAX_BUF_SIZE,
&priv->tx_bufs_dma0,
GFP_ATOMIC);
- if (priv->tx0_bufs == NULL) {
+ if (!priv->tx0_bufs) {
dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
dma_free_coherent(&priv->pcid->dev,
@@ -735,7 +735,7 @@ static bool device_alloc_rx_buf(struct vnt_private *priv,
struct vnt_rd_info *rd_info = rd->rd_info;
rd_info->skb = dev_alloc_skb((int)priv->rx_buf_sz);
- if (rd_info->skb == NULL)
+ if (!rd_info->skb)
return false;
rd_info->skb_dma =
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index ffcaf25fdd8b..e161d5d9aebb 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -31,16 +31,6 @@
#include "key.h"
#include "mac.h"
-int vnt_key_init_table(struct vnt_private *priv)
-{
- u32 i;
-
- for (i = 0; i < MAX_KEY_TABLE; i++)
- MACvDisableKeyEntry(priv, i);
-
- return 0;
-}
-
static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
struct ieee80211_key_conf *key, u32 key_type, u32 mode,
bool onfly_latch)
diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h
index 261f8181d410..d72719741a56 100644
--- a/drivers/staging/vt6655/key.h
+++ b/drivers/staging/vt6655/key.h
@@ -61,8 +61,6 @@
struct vnt_private;
-int vnt_key_init_table(struct vnt_private *);
-
int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, struct ieee80211_key_conf *key);
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index bc8ca981a629..7d6e7464ae51 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -52,7 +52,7 @@
/*--------------------- Export Functions --------------------------*/
-/*+
+/*
*
* Routine Description:
* Enable hw power saving functions
@@ -60,7 +60,7 @@
* Return Value:
* None.
*
- -*/
+ */
void
PSvEnablePowerSaving(
@@ -104,7 +104,7 @@ PSvEnablePowerSaving(
pr_debug("PS:Power Saving Mode Enable...\n");
}
-/*+
+/*
*
* Routine Description:
* Disable hw power saving functions
@@ -112,7 +112,7 @@ PSvEnablePowerSaving(
* Return Value:
* None.
*
- -*/
+ */
void
PSvDisablePowerSaving(
@@ -134,7 +134,7 @@ PSvDisablePowerSaving(
}
-/*+
+/*
*
* Routine Description:
* Check if Next TBTT must wake up
@@ -142,7 +142,7 @@ PSvDisablePowerSaving(
* Return Value:
* None.
*
- -*/
+ */
bool
PSbIsNextTBTTWakeUp(
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index ae10da21ddd0..447882c7a6be 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -169,7 +169,8 @@ static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
};
/* 40MHz reference frequency
- * Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.*/
+ * Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
+ */
static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
@@ -463,7 +464,8 @@ static bool s_bAL7230Init(struct vnt_private *priv)
}
/* Need to Pull PLLON low when writing channel registers through
- * 3-wire interface */
+ * 3-wire interface
+ */
static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
void __iomem *dwIoBase = priv->PortOffset;
@@ -873,7 +875,8 @@ bool RFbRawSetPower(
case RF_AIROHA7230:
/* 0x080F1B00 for 3 wire control TxGain(D10)
- * and 0x31 as TX Gain value */
+ * and 0x31 as TX Gain value
+ */
dwMax7230Pwr = 0x080C0B00 | ((byPwr) << 12) |
(BY_AL7230_REG_LEN << 3) | IFREGCTL_REGW;
@@ -886,7 +889,7 @@ bool RFbRawSetPower(
return ret;
}
-/*+
+/*
*
* Routine Description:
* Translate RSSI to dBm
@@ -900,7 +903,7 @@ bool RFbRawSetPower(
*
* Return Value: none
*
- -*/
+ */
void
RFvRSSITodBm(
struct vnt_private *priv,
@@ -927,7 +930,8 @@ RFvRSSITodBm(
}
/* Post processing for the 11b/g and 11a.
- * for save time on changing Reg2,3,5,7,10,12,15 */
+ * for save time on changing Reg2,3,5,7,10,12,15
+ */
bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv,
u16 byOldChannel,
u16 byNewChannel)
@@ -938,7 +942,8 @@ bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv,
/* if change between 11 b/g and 11a need to update the following
* register
- * Channel Index 1~14 */
+ * Channel Index 1~14
+ */
if ((byOldChannel <= CB_MAX_CHANNEL_24G) && (byNewChannel > CB_MAX_CHANNEL_24G)) {
/* Change from 2.4G to 5G [Reg] */
ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[2]);
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index e4c3165ae027..7e69bc99d60f 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -64,8 +64,10 @@
/*--------------------- Static Functions --------------------------*/
/*--------------------- Static Definitions -------------------------*/
-#define CRITICAL_PACKET_LEN 256 /* if packet size < 256 -> in-direct send
- packet size >= 256 -> direct send */
+/* if packet size < 256 -> in-direct send
+ * vpacket size >= 256 -> direct send
+ */
+#define CRITICAL_PACKET_LEN 256
static const unsigned short wTimeStampOff[2][MAX_RATE] = {
{384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23}, /* Long Preamble */
@@ -158,11 +160,11 @@ static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
[rate % MAX_RATE]);
}
-/*byPktType : PK_TYPE_11A 0
- PK_TYPE_11B 1
- PK_TYPE_11GB 2
- PK_TYPE_11GA 3
-*/
+/* byPktType : PK_TYPE_11A 0
+ * PK_TYPE_11B 1
+ * PK_TYPE_11GB 2
+ * PK_TYPE_11GA 3
+ */
static
unsigned int
s_uGetTxRsvTime(
@@ -502,7 +504,7 @@ s_uFillDataHead(
)
{
- if (pTxDataHead == NULL)
+ if (!pTxDataHead)
return 0;
@@ -646,17 +648,20 @@ s_vFillRTSHead(
{
unsigned int uRTSFrameLen = 20;
- if (pvRTS == NULL)
+ if (!pvRTS)
return;
if (bDisCRC) {
- /* When CRCDIS bit is on, H/W forgot to generate FCS for RTS frame,
- in this case we need to decrease its length by 4. */
+ /* When CRCDIS bit is on, H/W forgot to generate FCS for
+ * RTS frame, in this case we need to decrease its length by 4.
+ */
uRTSFrameLen -= 4;
}
- /* Note: So far RTSHead doesn't appear in ATIM & Beacom DMA, so we don't need to take them into account.
- Otherwise, we need to modify codes for them. */
+ /* Note: So far RTSHead doesn't appear in ATIM & Beacom DMA,
+ * so we don't need to take them into account.
+ * Otherwise, we need to modify codes for them.
+ */
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (byFBOption == AUTO_FB_NONE) {
struct vnt_rts_g *buf = pvRTS;
@@ -838,12 +843,13 @@ s_vFillCTSHead(
{
unsigned int uCTSFrameLen = 14;
- if (pvCTS == NULL)
+ if (!pvCTS)
return;
if (bDisCRC) {
- /* When CRCDIS bit is on, H/W forgot to generate FCS for CTS frame,
- in this case we need to decrease its length by 4. */
+ /* When CRCDIS bit is on, H/W forgot to generate FCS for
+ * CTS frame, in this case we need to decrease its length by 4.
+ */
uCTSFrameLen -= 4;
}
@@ -915,7 +921,7 @@ s_vFillCTSHead(
}
}
-/*+
+/*
*
* Description:
* Generate FIFO control for MAC & Baseband controller
@@ -937,7 +943,8 @@ s_vFillCTSHead(
* Return Value: none
*
-
- * unsigned int cbFrameSize, Hdr+Payload+FCS */
+ * unsigned int cbFrameSize, Hdr+Payload+FCS
+ */
static
void
s_vGenerateTxParameter(
@@ -972,8 +979,8 @@ s_vGenerateTxParameter(
return;
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- if (pvRTS != NULL) { /* RTS_need
- Fill RsvTime */
+ if (pvRTS != NULL) { /* RTS_need */
+ /* Fill RsvTime */
struct vnt_rrv_time_rts *buf = pvRrvTime;
buf->rts_rrv_time_aa = s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate);
@@ -1002,7 +1009,7 @@ s_vGenerateTxParameter(
/* Fill RTS */
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
- } else if (pvRTS == NULL) {/* RTS_needless, non PCF mode */
+ } else if (!pvRTS) {/* RTS_needless, non PCF mode */
struct vnt_rrv_time_ab *buf = pvRrvTime;
buf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11A, cbFrameSize, wCurrentRate, bNeedACK);
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index 807a5809b5d9..7cc13874f8f1 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -84,10 +84,10 @@ struct vnt_phy_field {
} __packed;
unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
- unsigned int frame_length, u16 tx_rate);
+ unsigned int frame_length, u16 tx_rate);
void vnt_get_phy_field(struct vnt_private *, u32 frame_length,
- u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
+ u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
void vnt_set_short_slot_time(struct vnt_private *);
void vnt_set_vga_gain_offset(struct vnt_private *, u8);
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index a382fc6aa9d3..53b469c71dc2 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -46,10 +46,11 @@
#include "key.h"
#include "usbpipe.h"
-/* const u16 cwRXBCNTSFOff[MAX_RATE] =
- {17, 34, 96, 192, 34, 23, 17, 11, 8, 5, 4, 3}; */
+/* const u16 cw_rxbcntsf_off[MAX_RATE] =
+ * {17, 34, 96, 192, 34, 23, 17, 11, 8, 5, 4, 3};
+ */
-static const u16 cwRXBCNTSFOff[MAX_RATE] = {
+static const u16 cw_rxbcntsf_off[MAX_RATE] = {
192, 96, 34, 17, 34, 23, 17, 11, 8, 5, 4, 3
};
@@ -65,7 +66,6 @@ static const u16 cwRXBCNTSFOff[MAX_RATE] = {
*/
void vnt_set_channel(struct vnt_private *priv, u32 connection_channel)
{
-
if (connection_channel > CB_MAX_CHANNEL || !connection_channel)
return;
@@ -76,10 +76,10 @@ void vnt_set_channel(struct vnt_private *priv, u32 connection_channel)
vnt_mac_reg_bits_off(priv, MAC_REG_CHANNEL, 0xb0);
vnt_control_out(priv, MESSAGE_TYPE_SELECT_CHANNEL,
- connection_channel, 0, 0, NULL);
+ connection_channel, 0, 0, NULL);
vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG, MAC_REG_CHANNEL,
- (u8)(connection_channel | 0x80));
+ (u8)(connection_channel | 0x80));
}
/*
@@ -126,11 +126,11 @@ static u16 vnt_get_ofdm_rate(struct vnt_private *priv, u16 rate_idx)
u16 ui = rate_idx;
dev_dbg(&priv->usb->dev, "%s basic rate: %d\n",
- __func__, priv->basic_rates);
+ __func__, priv->basic_rates);
if (!vnt_ofdm_min_rate(priv)) {
dev_dbg(&priv->usb->dev, "%s (NO OFDM) %d\n",
- __func__, rate_idx);
+ __func__, rate_idx);
if (rate_idx > RATE_24M)
rate_idx = RATE_24M;
return rate_idx;
@@ -139,7 +139,7 @@ static u16 vnt_get_ofdm_rate(struct vnt_private *priv, u16 rate_idx)
while (ui > RATE_11M) {
if (priv->basic_rates & (1 << ui)) {
dev_dbg(&priv->usb->dev, "%s rate: %d\n",
- __func__, ui);
+ __func__, ui);
return ui;
}
ui--;
@@ -165,9 +165,8 @@ static u16 vnt_get_ofdm_rate(struct vnt_private *priv, u16 rate_idx)
*
*/
static void vnt_calculate_ofdm_rate(u16 rate, u8 bb_type,
- u8 *tx_rate, u8 *rsv_time)
+ u8 *tx_rate, u8 *rsv_time)
{
-
switch (rate) {
case RATE_6M:
if (bb_type == BB_TYPE_11A) {
@@ -267,20 +266,20 @@ void vnt_set_rspinf(struct vnt_private *priv, u8 bb_type)
int i;
/*RSPINF_b_1*/
- vnt_get_phy_field(priv, 14,
- vnt_get_cck_rate(priv, RATE_1M), PK_TYPE_11B, &phy[0]);
+ vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_1M),
+ PK_TYPE_11B, &phy[0]);
/*RSPINF_b_2*/
- vnt_get_phy_field(priv, 14,
- vnt_get_cck_rate(priv, RATE_2M), PK_TYPE_11B, &phy[1]);
+ vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_2M),
+ PK_TYPE_11B, &phy[1]);
/*RSPINF_b_5*/
- vnt_get_phy_field(priv, 14,
- vnt_get_cck_rate(priv, RATE_5M), PK_TYPE_11B, &phy[2]);
+ vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_5M),
+ PK_TYPE_11B, &phy[2]);
/*RSPINF_b_11*/
- vnt_get_phy_field(priv, 14,
- vnt_get_cck_rate(priv, RATE_11M), PK_TYPE_11B, &phy[3]);
+ vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_11M),
+ PK_TYPE_11B, &phy[3]);
/*RSPINF_a_6*/
vnt_calculate_ofdm_rate(RATE_6M, bb_type, &tx_rate[0], &rsv_time[0]);
@@ -299,19 +298,19 @@ void vnt_set_rspinf(struct vnt_private *priv, u8 bb_type)
/*RSPINF_a_36*/
vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_36M),
- bb_type, &tx_rate[5], &rsv_time[5]);
+ bb_type, &tx_rate[5], &rsv_time[5]);
/*RSPINF_a_48*/
vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_48M),
- bb_type, &tx_rate[6], &rsv_time[6]);
+ bb_type, &tx_rate[6], &rsv_time[6]);
/*RSPINF_a_54*/
vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_54M),
- bb_type, &tx_rate[7], &rsv_time[7]);
+ bb_type, &tx_rate[7], &rsv_time[7]);
/*RSPINF_a_72*/
vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_54M),
- bb_type, &tx_rate[8], &rsv_time[8]);
+ bb_type, &tx_rate[8], &rsv_time[8]);
put_unaligned(phy[0].len, (u16 *)&data[0]);
data[2] = phy[0].signal;
@@ -334,8 +333,8 @@ void vnt_set_rspinf(struct vnt_private *priv, u8 bb_type)
data[16 + i * 2 + 1] = rsv_time[i];
}
- vnt_control_out(priv, MESSAGE_TYPE_WRITE,
- MAC_REG_RSPINF_B_1, MESSAGE_REQUEST_MACREG, 34, &data[0]);
+ vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_RSPINF_B_1,
+ MESSAGE_REQUEST_MACREG, 34, &data[0]);
}
/*
@@ -429,12 +428,12 @@ void vnt_update_ifs(struct vnt_private *priv)
data[3] = (u8)priv->slot;
vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_SIFS,
- MESSAGE_REQUEST_MACREG, 4, &data[0]);
+ MESSAGE_REQUEST_MACREG, 4, &data[0]);
max_min |= 0xa0;
vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_CWMAXMIN0,
- MESSAGE_REQUEST_MACREG, 1, &max_min);
+ MESSAGE_REQUEST_MACREG, 1, &max_min);
}
void vnt_update_top_rates(struct vnt_private *priv)
@@ -478,7 +477,6 @@ int vnt_ofdm_min_rate(struct vnt_private *priv)
u8 vnt_get_pkt_type(struct vnt_private *priv)
{
-
if (priv->bb_type == BB_TYPE_11A || priv->bb_type == BB_TYPE_11B)
return (u8)priv->bb_type;
else if (vnt_ofdm_min_rate(priv))
@@ -506,7 +504,7 @@ u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2)
u64 tsf_offset = 0;
u16 rx_bcn_offset;
- rx_bcn_offset = cwRXBCNTSFOff[rx_rate % MAX_RATE];
+ rx_bcn_offset = cw_rxbcntsf_off[rx_rate % MAX_RATE];
tsf2 += (u64)rx_bcn_offset;
@@ -531,7 +529,7 @@ u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2)
*
*/
void vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
- u64 time_stamp, u64 local_tsf)
+ u64 time_stamp, u64 local_tsf)
{
u64 tsf_offset = 0;
u8 data[8];
@@ -548,8 +546,9 @@ void vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
data[7] = (u8)(tsf_offset >> 56);
vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
- MESSAGE_REQUEST_TSF, 0, 8, data);
+ MESSAGE_REQUEST_TSF, 0, 8, data);
}
+
/*
* Description: Read NIC TSF counter
* Get local TSF counter
@@ -565,7 +564,6 @@ void vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
*/
bool vnt_get_current_tsf(struct vnt_private *priv, u64 *current_tsf)
{
-
*current_tsf = priv->current_tsf;
return true;
@@ -584,7 +582,6 @@ bool vnt_get_current_tsf(struct vnt_private *priv, u64 *current_tsf)
*/
bool vnt_clear_current_tsf(struct vnt_private *priv)
{
-
vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
priv->current_tsf = 0;
@@ -657,7 +654,7 @@ void vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval)
data[7] = (u8)(next_tbtt >> 56);
vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
- MESSAGE_REQUEST_TBTT, 0, 8, data);
+ MESSAGE_REQUEST_TBTT, 0, 8, data);
}
/*
@@ -676,7 +673,7 @@ void vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval)
*
*/
void vnt_update_next_tbtt(struct vnt_private *priv, u64 tsf,
- u16 beacon_interval)
+ u16 beacon_interval)
{
u8 data[8];
@@ -721,7 +718,7 @@ int vnt_radio_power_off(struct vnt_private *priv)
case RF_VT3226D0:
case RF_VT3342A0:
vnt_mac_reg_bits_off(priv, MAC_REG_SOFTPWRCTL,
- (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
+ (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
break;
}
@@ -762,7 +759,7 @@ int vnt_radio_power_on(struct vnt_private *priv)
case RF_VT3226D0:
case RF_VT3342A0:
vnt_mac_reg_bits_on(priv, MAC_REG_SOFTPWRCTL,
- (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
+ (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
break;
}
@@ -795,7 +792,7 @@ void vnt_set_bss_mode(struct vnt_private *priv)
priv->bb_vga[0] = 0x20;
vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG,
- 0xe7, priv->bb_vga[0]);
+ 0xe7, priv->bb_vga[0]);
}
priv->bb_vga[2] = 0x10;
@@ -805,7 +802,7 @@ void vnt_set_bss_mode(struct vnt_private *priv)
priv->bb_vga[0] = 0x1c;
vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG,
- 0xe7, priv->bb_vga[0]);
+ 0xe7, priv->bb_vga[0]);
}
priv->bb_vga[2] = 0x0;
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 6019aac8bdd5..655f0002f880 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -34,7 +34,7 @@
#include "rf.h"
int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
- unsigned long bytes_received)
+ unsigned long bytes_received)
{
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_supported_band *sband;
@@ -46,7 +46,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
__le64 *tsf_time;
u32 frame_size;
int ii, r;
- u8 *rx_sts, *rx_rate, *sq, *sq_3;
+ u8 *rx_rate, *sq, *sq_3;
u32 wbk_status;
u8 *skb_data;
u16 *pay_load_len;
@@ -75,22 +75,21 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
skb_data = (u8 *)skb->data;
- rx_sts = skb_data+4;
- rx_rate = skb_data+5;
+ rx_rate = skb_data + 5;
/* real Frame Size = USBframe_size -4WbkStatus - 4RxStatus */
/* -8TSF - 4RSR - 4SQ3 - ?Padding */
/* if SQ3 the range is 24~27, if no SQ3 the range is 20~23 */
- pay_load_len = (u16 *) (skb_data + 6);
+ pay_load_len = (u16 *)(skb_data + 6);
/*Fix hardware bug => PLCP_Length error */
if (((bytes_received - (*pay_load_len)) > 27) ||
- ((bytes_received - (*pay_load_len)) < 24) ||
- (bytes_received < (*pay_load_len))) {
+ ((bytes_received - (*pay_load_len)) < 24) ||
+ (bytes_received < (*pay_load_len))) {
dev_dbg(&priv->usb->dev, "Wrong PLCP Length %x\n",
- *pay_load_len);
+ *pay_load_len);
return false;
}
diff --git a/drivers/staging/vt6656/dpc.h b/drivers/staging/vt6656/dpc.h
index 5a92bd86cee2..ff1850c4a927 100644
--- a/drivers/staging/vt6656/dpc.h
+++ b/drivers/staging/vt6656/dpc.h
@@ -29,6 +29,6 @@
#include "device.h"
int vnt_rx_data(struct vnt_private *, struct vnt_rcb *,
- unsigned long bytes_received);
+ unsigned long bytes_received);
#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index ac4fecb30d0e..0594828bdabf 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -440,10 +440,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
/* allocate URBs */
tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tx_context->urb) {
- dev_err(&priv->usb->dev, "alloc tx urb failed\n");
+ if (!tx_context->urb)
goto free_tx;
- }
tx_context->in_use = false;
}
@@ -462,10 +460,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
/* allocate URBs */
rcb->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rcb->urb) {
- dev_err(&priv->usb->dev, "Failed to alloc rx urb\n");
+ if (!rcb->urb)
goto free_rx_tx;
- }
rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
if (!rcb->skb)
@@ -479,10 +475,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
}
priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!priv->interrupt_urb) {
- dev_err(&priv->usb->dev, "Failed to alloc int urb\n");
+ if (!priv->interrupt_urb)
goto free_rx_tx;
- }
priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
if (!priv->int_buf.data_buf) {
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index f546553de66f..e9b6b21f7422 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -28,8 +28,9 @@
* vnt_control_in_u8 - Read one byte from MEM/BB/MAC/EEPROM
*
* Revision History:
- * 04-05-2004 Jerry Chen: Initial release
- * 11-24-2004 Warren Hsu: Add ControlvWriteByte,ControlvReadByte,ControlvMaskByte
+ * 04-05-2004 Jerry Chen: Initial release
+ * 11-24-2004 Warren Hsu: Add ControlvWriteByte,ControlvReadByte,
+ * ControlvMaskByte
*
*/
diff --git a/drivers/staging/wilc1000/TODO b/drivers/staging/wilc1000/TODO
index ec93b2ee0b08..ae61b55f14fd 100644
--- a/drivers/staging/wilc1000/TODO
+++ b/drivers/staging/wilc1000/TODO
@@ -3,7 +3,6 @@ TODO:
- remove OS wrapper functions
- remove custom debug and tracing functions
- rework comments and function headers(also coding style)
-- replace all semaphores with mutexes or completions
- Move handling for each individual members of 'union message_body' out
into a separate 'struct work_struct' and completely remove the multiplexer
that is currently part of host_if_work(), allowing movement of the
diff --git a/drivers/staging/wilc1000/coreconfigurator.h b/drivers/staging/wilc1000/coreconfigurator.h
index 076e06ac0d66..cff16984167b 100644
--- a/drivers/staging/wilc1000/coreconfigurator.h
+++ b/drivers/staging/wilc1000/coreconfigurator.h
@@ -70,11 +70,11 @@ enum connect_status {
CONNECT_STS_FORCE_16_BIT = 0xFFFF
};
-typedef struct {
+struct tstrRSSI {
u8 u8Full;
u8 u8Index;
s8 as8RSSI[NUM_RSSI];
-} tstrRSSI;
+};
struct network_info {
s8 rssi;
@@ -93,7 +93,7 @@ struct network_info {
u8 *ies;
u16 ies_len;
void *join_params;
- tstrRSSI str_rssi;
+ struct tstrRSSI str_rssi;
u64 tsf_hi;
};
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 78f524fcd214..78f5613e9467 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -3279,7 +3279,6 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
int wilc_hif_set_cfg(struct wilc_vif *vif,
struct cfg_param_attr *cfg_param)
{
- int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
@@ -3293,9 +3292,7 @@ int wilc_hif_set_cfg(struct wilc_vif *vif,
msg.body.cfg_info = *cfg_param;
msg.vif = vif;
- result = wilc_enqueue_cmd(&msg);
-
- return result;
+ return wilc_enqueue_cmd(&msg);
}
static void GetPeriodicRSSI(unsigned long arg)
@@ -3329,7 +3326,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
init_completion(&hif_wait_response);
- hif_drv = kzalloc(sizeof(struct host_if_drv), GFP_KERNEL);
+ hif_drv = kzalloc(sizeof(*hif_drv), GFP_KERNEL);
if (!hif_drv) {
result = -ENOMEM;
goto _fail_;
@@ -3878,7 +3875,7 @@ static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo)
pu8IEs = ptstrNetworkInfo->ies;
u16IEsLen = ptstrNetworkInfo->ies_len;
- pNewJoinBssParam = kzalloc(sizeof(struct join_bss_param), GFP_KERNEL);
+ pNewJoinBssParam = kzalloc(sizeof(*pNewJoinBssParam), GFP_KERNEL);
if (pNewJoinBssParam) {
pNewJoinBssParam->dtim_period = ptstrNetworkInfo->dtim_period;
pNewJoinBssParam->beacon_period = ptstrNetworkInfo->beacon_period;
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 32215110d597..6370a5efe343 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/mutex.h>
-#include <linux/semaphore.h>
#include <linux/completion.h>
static int dev_state_ev_handler(struct notifier_block *this,
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index fcbc95d19d8e..802bb1d5e207 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -29,7 +29,7 @@ static struct dentry *wilc_dir;
#define ERR BIT(3)
#define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR)
-atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
+static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
/*
@@ -102,35 +102,16 @@ static struct wilc_debugfs_info_t debugfs_info[] = {
static int __init wilc_debugfs_init(void)
{
int i;
-
- struct dentry *debugfs_files;
struct wilc_debugfs_info_t *info;
wilc_dir = debugfs_create_dir("wilc_wifi", NULL);
- if (wilc_dir == ERR_PTR(-ENODEV)) {
- /* it's not error. the debugfs is just not being enabled. */
- printk("ERR, kernel has built without debugfs support\n");
- return 0;
- }
-
- if (!wilc_dir) {
- printk("ERR, debugfs create dir\n");
- return -1;
- }
-
for (i = 0; i < ARRAY_SIZE(debugfs_info); i++) {
info = &debugfs_info[i];
- debugfs_files = debugfs_create_file(info->name,
- info->perm,
- wilc_dir,
- &info->data,
- &info->fops);
-
- if (!debugfs_files) {
- printk("ERR fail to create the debugfs file, %s\n", info->name);
- debugfs_remove_recursive(wilc_dir);
- return -1;
- }
+ debugfs_create_file(info->name,
+ info->perm,
+ wilc_dir,
+ &info->data,
+ &info->fops);
}
return 0;
}
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index 22cf4b7857e5..f08cf6d9e1af 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -410,8 +410,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (len2 > ARRAY_SIZE(wb)) {
dev_err(&spi->dev, "spi buffer size too small (%d) (%zu)\n",
len2, ARRAY_SIZE(wb));
- result = N_FAIL;
- return result;
+ return N_FAIL;
}
/* zero spi write buffers. */
for (wix = len; wix < len2; wix++)
@@ -420,8 +419,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (wilc_spi_tx_rx(wilc, wb, rb, len2)) {
dev_err(&spi->dev, "Failed cmd write, bus error...\n");
- result = N_FAIL;
- return result;
+ return N_FAIL;
}
/**
@@ -442,8 +440,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
dev_err(&spi->dev,
"Failed cmd response, cmd (%02x), resp (%02x)\n",
cmd, rsp);
- result = N_FAIL;
- return result;
+ return N_FAIL;
}
/**
@@ -453,8 +450,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (rsp != 0x00) {
dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
rsp);
- result = N_FAIL;
- return result;
+ return N_FAIL;
}
if ((cmd == CMD_INTERNAL_READ) || (cmd == CMD_SINGLE_READ)
@@ -481,8 +477,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (retry <= 0) {
dev_err(&spi->dev,
"Error, data read response (%02x)\n", rsp);
- result = N_RESET;
- return result;
+ return N_RESET;
}
if ((cmd == CMD_INTERNAL_READ) || (cmd == CMD_SINGLE_READ)) {
@@ -497,8 +492,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
} else {
dev_err(&spi->dev,
"buffer overrun when reading data.\n");
- result = N_FAIL;
- return result;
+ return N_FAIL;
}
if (!g_spi.crc_off) {
@@ -510,8 +504,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
crc[1] = rb[rix++];
} else {
dev_err(&spi->dev, "buffer overrun when reading crc.\n");
- result = N_FAIL;
- return result;
+ return N_FAIL;
}
}
} else if ((cmd == CMD_DMA_READ) || (cmd == CMD_DMA_EXT_READ)) {
@@ -551,7 +544,6 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
}
}
-
ix += nbytes;
sz -= nbytes;
}
@@ -587,7 +579,6 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (result == N_FAIL)
break;
-
/**
* Read bytes
**/
@@ -687,7 +678,6 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
sz -= nbytes;
} while (sz);
-
return result;
}
@@ -850,7 +840,6 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
static int isinit;
if (isinit) {
-
if (!wilc_spi_read_reg(wilc, 0x1000, &chipid)) {
dev_err(&spi->dev, "Fail cmd read chip id...\n");
return 0;
@@ -871,7 +860,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
/* Read failed. Try with CRC off. This might happen when module
* is removed but chip isn't reset*/
g_spi.crc_off = 1;
- dev_err(&spi->dev, "Failed internal read protocol with CRC on, retyring with CRC off...\n");
+ dev_err(&spi->dev, "Failed internal read protocol with CRC on, retrying with CRC off...\n");
if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
/* Reaad failed with both CRC on and off, something went bad */
dev_err(&spi->dev,
@@ -890,7 +879,6 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
g_spi.crc_off = 1;
}
-
/**
* make sure can read back chip id correctly
**/
@@ -931,14 +919,10 @@ static int wilc_spi_read_size(struct wilc *wilc, u32 *size)
*size = tmp;
}
-
-
_fail_:
return ret;
}
-
-
static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status)
{
struct spi_device *spi = to_spi_device(wilc->dev);
@@ -993,7 +977,6 @@ static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status)
}
*int_status = tmp;
-
}
_fail_:
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 2c2e8aca8305..60d8b055bb2f 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -2200,7 +2200,7 @@ static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
return ret;
}
-static struct cfg80211_ops wilc_cfg80211_ops = {
+static const struct cfg80211_ops wilc_cfg80211_ops = {
.set_monitor_channel = set_channel,
.scan = scan,
.connect = connect,
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index 5cc6a82d8081..ec6b1674cf38 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -131,7 +131,7 @@ struct wilc_priv {
struct wilc_wfi_key *wilc_gtk[MAX_NUM_STA];
struct wilc_wfi_key *wilc_ptk[MAX_NUM_STA];
u8 wilc_groupkey;
- /* semaphores */
+ /* mutexes */
struct mutex scan_req_lock;
/* */
bool gbAutoRateAdjusted;
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 19a580939dfc..bc5ad20af0a3 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -621,9 +621,9 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if (!ret)
break;
- if ((reg & 0x1) == 0) {
+ if ((reg & 0x1) == 0)
break;
- }
+
counter++;
if (counter > 200) {
counter = 0;
@@ -1001,8 +1001,7 @@ int wilc_wlan_start(struct wilc *wilc)
ret = wilc->hif_func->hif_write_reg(wilc, WILC_VMM_CORE_CFG, reg);
if (!ret) {
release_bus(wilc, RELEASE_ONLY);
- ret = -EIO;
- return ret;
+ return -EIO;
}
reg = 0;
if (wilc->io_type == HIF_SDIO && wilc->dev_irq_num)
@@ -1034,8 +1033,7 @@ int wilc_wlan_start(struct wilc *wilc)
ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_1, reg);
if (!ret) {
release_bus(wilc, RELEASE_ONLY);
- ret = -EIO;
- return ret;
+ return -EIO;
}
wilc->hif_func->hif_sync_ext(wilc, NUM_INT_EXT);
@@ -1043,8 +1041,7 @@ int wilc_wlan_start(struct wilc *wilc)
ret = wilc->hif_func->hif_read_reg(wilc, 0x1000, &chipid);
if (!ret) {
release_bus(wilc, RELEASE_ONLY);
- ret = -EIO;
- return ret;
+ return -EIO;
}
wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index 30e5312ee87e..de6c4ddbf45a 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -192,7 +192,7 @@
#define ENABLE_RX_VMM (SEL_VMM_TBL1 | EN_VMM)
#define ENABLE_TX_VMM (SEL_VMM_TBL0 | EN_VMM)
-/*time for expiring the semaphores of cfg packets*/
+/*time for expiring the completion of cfg packets*/
#define CFG_PKTS_TIMEOUT 2000
/********************************************
*
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 410bfc034319..439ac6f8d533 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -10,7 +10,6 @@
#ifndef WILC_WLAN_IF_H
#define WILC_WLAN_IF_H
-#include <linux/semaphore.h>
#include <linux/netdevice.h>
/********************************************
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index f46dfe6b24e8..182b2d564627 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -35,7 +35,7 @@ static const u32 prism2_cipher_suites[PRISM2_NUM_CIPHER_SUITES] = {
/* prism2 device private data */
struct prism2_wiphy_private {
- wlandevice_t *wlandev;
+ struct wlandevice *wlandev;
struct ieee80211_supported_band band;
struct ieee80211_channel channels[ARRAY_SIZE(prism2_channels)];
@@ -69,11 +69,11 @@ static int prism2_result2err(int prism2_result)
return err;
}
-static int prism2_domibset_uint32(wlandevice_t *wlandev, u32 did, u32 data)
+static int prism2_domibset_uint32(struct wlandevice *wlandev, u32 did, u32 data)
{
struct p80211msg_dot11req_mibset msg;
- p80211item_uint32_t *mibitem =
- (p80211item_uint32_t *)&msg.mibattribute.data;
+ struct p80211item_uint32 *mibitem =
+ (struct p80211item_uint32 *)&msg.mibattribute.data;
msg.msgcode = DIDmsg_dot11req_mibset;
mibitem->did = did;
@@ -82,12 +82,12 @@ static int prism2_domibset_uint32(wlandevice_t *wlandev, u32 did, u32 data)
return p80211req_dorequest(wlandev, (u8 *)&msg);
}
-static int prism2_domibset_pstr32(wlandevice_t *wlandev,
+static int prism2_domibset_pstr32(struct wlandevice *wlandev,
u32 did, u8 len, const u8 *data)
{
struct p80211msg_dot11req_mibset msg;
- p80211item_pstr32_t *mibitem =
- (p80211item_pstr32_t *)&msg.mibattribute.data;
+ struct p80211item_pstr32 *mibitem =
+ (struct p80211item_pstr32 *)&msg.mibattribute.data;
msg.msgcode = DIDmsg_dot11req_mibset;
mibitem->did = did;
@@ -103,7 +103,7 @@ static int prism2_change_virtual_intf(struct wiphy *wiphy,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- wlandevice_t *wlandev = dev->ml_priv;
+ struct wlandevice *wlandev = dev->ml_priv;
u32 data;
int result;
int err = 0;
@@ -144,12 +144,15 @@ static int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_index, bool pairwise, const u8 *mac_addr,
struct key_params *params)
{
- wlandevice_t *wlandev = dev->ml_priv;
+ struct wlandevice *wlandev = dev->ml_priv;
u32 did;
int err = 0;
int result = 0;
+ if (key_index >= NUM_WEPKEYS)
+ return -EINVAL;
+
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -160,27 +163,7 @@ static int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
goto exit;
/* send key to driver */
- switch (key_index) {
- case 0:
- did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0;
- break;
-
- case 1:
- did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1;
- break;
-
- case 2:
- did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2;
- break;
-
- case 3:
- did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3;
- break;
-
- default:
- err = -EINVAL;
- goto exit;
- }
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(key_index + 1);
result = prism2_domibset_pstr32(wlandev, did,
params->key_len, params->key);
@@ -205,7 +188,7 @@ static int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie, struct key_params*))
{
- wlandevice_t *wlandev = dev->ml_priv;
+ struct wlandevice *wlandev = dev->ml_priv;
struct key_params params;
int len;
@@ -233,7 +216,7 @@ static int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
static int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_index, bool pairwise, const u8 *mac_addr)
{
- wlandevice_t *wlandev = dev->ml_priv;
+ struct wlandevice *wlandev = dev->ml_priv;
u32 did;
int err = 0;
int result = 0;
@@ -242,36 +225,13 @@ static int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
* a key, so we will cheat by setting the key to a bogus value
*/
- /* send key to driver */
- switch (key_index) {
- case 0:
- did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0;
- break;
-
- case 1:
- did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1;
- break;
-
- case 2:
- did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2;
- break;
-
- case 3:
- did =
- DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3;
- break;
-
- default:
- err = -EINVAL;
- goto exit;
- }
+ if (key_index >= NUM_WEPKEYS)
+ return -EINVAL;
+ /* send key to driver */
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(key_index + 1);
result = prism2_domibset_pstr32(wlandev, did, 13, "0000000000000");
-exit:
if (result)
err = -EFAULT;
@@ -281,7 +241,7 @@ exit:
static int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_index, bool unicast, bool multicast)
{
- wlandevice_t *wlandev = dev->ml_priv;
+ struct wlandevice *wlandev = dev->ml_priv;
int err = 0;
int result = 0;
@@ -299,13 +259,13 @@ static int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_info *sinfo)
{
- wlandevice_t *wlandev = dev->ml_priv;
+ struct wlandevice *wlandev = dev->ml_priv;
struct p80211msg_lnxreq_commsquality quality;
int result;
memset(sinfo, 0, sizeof(*sinfo));
- if ((wlandev == NULL) || (wlandev->msdstate != WLAN_MSD_RUNNING))
+ if (!wlandev || (wlandev->msdstate != WLAN_MSD_RUNNING))
return -EOPNOTSUPP;
/* build request message */
@@ -314,7 +274,7 @@ static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
quality.dbm.status = P80211ENUM_msgitem_status_data_ok;
/* send message to nsd */
- if (wlandev->mlmerequest == NULL)
+ if (!wlandev->mlmerequest)
return -EOPNOTSUPP;
result = wlandev->mlmerequest(wlandev, (struct p80211msg *)&quality);
@@ -334,7 +294,7 @@ static int prism2_scan(struct wiphy *wiphy,
{
struct net_device *dev;
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
- wlandevice_t *wlandev;
+ struct wlandevice *wlandev;
struct p80211msg_dot11req_scan msg1;
struct p80211msg_dot11req_scan_results msg2;
struct cfg80211_bss *bss;
@@ -374,7 +334,7 @@ static int prism2_scan(struct wiphy *wiphy,
msg1.scantype.data = P80211ENUM_scantype_active;
msg1.ssid.data.len = request->ssids->ssid_len;
memcpy(msg1.ssid.data.data,
- request->ssids->ssid, request->ssids->ssid_len);
+ request->ssids->ssid, request->ssids->ssid_len);
} else {
msg1.scantype.data = 0;
}
@@ -451,7 +411,7 @@ exit:
static int prism2_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
- wlandevice_t *wlandev = priv->wlandev;
+ struct wlandevice *wlandev = priv->wlandev;
u32 data;
int result;
int err = 0;
@@ -493,7 +453,7 @@ exit:
static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
- wlandevice_t *wlandev = dev->ml_priv;
+ struct wlandevice *wlandev = dev->ml_priv;
struct ieee80211_channel *channel = sme->channel;
struct p80211msg_lnxreq_autojoin msg_join;
u32 did;
@@ -516,11 +476,11 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
/* Set the authorization */
if ((sme->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) ||
- ((sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) && !is_wep))
- msg_join.authtype.data = P80211ENUM_authalg_opensystem;
+ ((sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) && !is_wep))
+ msg_join.authtype.data = P80211ENUM_authalg_opensystem;
else if ((sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY) ||
- ((sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) && is_wep))
- msg_join.authtype.data = P80211ENUM_authalg_sharedkey;
+ ((sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) && is_wep))
+ msg_join.authtype.data = P80211ENUM_authalg_sharedkey;
else
netdev_warn(dev,
"Unhandled authorisation type for connect (%d)\n",
@@ -529,6 +489,11 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
/* Set the encryption - we only support wep */
if (is_wep) {
if (sme->key) {
+ if (sme->key_idx >= NUM_WEPKEYS) {
+ err = -EINVAL;
+ goto exit;
+ }
+
result = prism2_domibset_uint32(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
sme->key_idx);
@@ -536,28 +501,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
goto exit;
/* send key to driver */
- switch (sme->key_idx) {
- case 0:
- did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0;
- break;
-
- case 1:
- did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1;
- break;
-
- case 2:
- did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2;
- break;
-
- case 3:
- did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3;
- break;
-
- default:
- err = -EINVAL;
- goto exit;
- }
-
+ did = DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(
+ sme->key_idx + 1);
result = prism2_domibset_pstr32(wlandev,
did, sme->key_len,
(u8 *)sme->key);
@@ -618,7 +563,7 @@ exit:
static int prism2_disconnect(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code)
{
- wlandevice_t *wlandev = dev->ml_priv;
+ struct wlandevice *wlandev = dev->ml_priv;
struct p80211msg_lnxreq_autojoin msg_join;
int result;
int err = 0;
@@ -652,7 +597,7 @@ static int prism2_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, int mbm)
{
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
- wlandevice_t *wlandev = priv->wlandev;
+ struct wlandevice *wlandev = priv->wlandev;
u32 data;
int result;
int err = 0;
@@ -679,13 +624,13 @@ static int prism2_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
int *dbm)
{
struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
- wlandevice_t *wlandev = priv->wlandev;
+ struct wlandevice *wlandev = priv->wlandev;
struct p80211msg_dot11req_mibget msg;
- p80211item_uint32_t *mibitem;
+ struct p80211item_uint32 *mibitem;
int result;
int err = 0;
- mibitem = (p80211item_uint32_t *)&msg.mibattribute.data;
+ mibitem = (struct p80211item_uint32 *)&msg.mibattribute.data;
msg.msgcode = DIDmsg_dot11req_mibget;
mibitem->did =
DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel;
@@ -704,7 +649,7 @@ exit:
}
/* Interface callback functions, passing data back up to the cfg80211 layer */
-void prism2_connect_result(wlandevice_t *wlandev, u8 failed)
+void prism2_connect_result(struct wlandevice *wlandev, u8 failed)
{
u16 status = failed ?
WLAN_STATUS_UNSPECIFIED_FAILURE : WLAN_STATUS_SUCCESS;
@@ -713,13 +658,13 @@ void prism2_connect_result(wlandevice_t *wlandev, u8 failed)
NULL, 0, NULL, 0, status, GFP_KERNEL);
}
-void prism2_disconnected(wlandevice_t *wlandev)
+void prism2_disconnected(struct wlandevice *wlandev)
{
cfg80211_disconnected(wlandev->netdev, 0, NULL,
- 0, false, GFP_KERNEL);
+ 0, false, GFP_KERNEL);
}
-void prism2_roamed(wlandevice_t *wlandev)
+void prism2_roamed(struct wlandevice *wlandev)
{
cfg80211_roamed(wlandev->netdev, NULL, wlandev->bssid,
NULL, 0, NULL, 0, GFP_KERNEL);
@@ -744,7 +689,7 @@ static const struct cfg80211_ops prism2_usb_cfg_ops = {
};
/* Functions to create/free wiphy interface */
-static struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
+static struct wiphy *wlan_create_wiphy(struct device *dev, struct wlandevice *wlandev)
{
struct wiphy *wiphy;
struct prism2_wiphy_private *priv;
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index cec6d0ba3b65..43c299c3b631 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1,57 +1,57 @@
/* hfa384x.h
-*
-* Defines the constants and data structures for the hfa384x
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* [Implementation and usage notes]
-*
-* [References]
-* CW10 Programmer's Manual v1.5
-* IEEE 802.11 D10.0
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Defines the constants and data structures for the hfa384x
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * [Implementation and usage notes]
+ *
+ * [References]
+ * CW10 Programmer's Manual v1.5
+ * IEEE 802.11 D10.0
+ *
+ * --------------------------------------------------------------------
+ */
#ifndef _HFA384x_H
#define _HFA384x_H
@@ -63,7 +63,7 @@
/*--- Mins & Maxs -----------------------------------*/
#define HFA384x_PORTID_MAX ((u16)7)
-#define HFA384x_NUMPORTS_MAX ((u16)(HFA384x_PORTID_MAX+1))
+#define HFA384x_NUMPORTS_MAX ((u16)(HFA384x_PORTID_MAX + 1))
#define HFA384x_PDR_LEN_MAX ((u16)512) /* in bytes, from EK */
#define HFA384x_PDA_RECS_MAX ((u16)200) /* a guess */
#define HFA384x_PDA_LEN_MAX ((u16)1024) /* in bytes, from EK*/
@@ -110,20 +110,21 @@
#define HFA384x_ADDR_FLAT_CMD_OFF_MASK (0x0000ffff)
/* Mask bits for discarding unwanted pieces in AUX format
- 16-bit address parts */
+ * 16-bit address parts
+ */
#define HFA384x_ADDR_AUX_PAGE_MASK (0xffff)
#define HFA384x_ADDR_AUX_OFF_MASK (0x007f)
/* Make a 32-bit flat address from AUX format 16-bit page and offset */
#define HFA384x_ADDR_AUX_MKFLAT(p, o) \
- ((((u32)(((u16)(p))&HFA384x_ADDR_AUX_PAGE_MASK)) << 7) | \
- ((u32)(((u16)(o))&HFA384x_ADDR_AUX_OFF_MASK)))
+ ((((u32)(((u16)(p)) & HFA384x_ADDR_AUX_PAGE_MASK)) << 7) | \
+ ((u32)(((u16)(o)) & HFA384x_ADDR_AUX_OFF_MASK)))
/* Make CMD format offset and page from a 32-bit flat address */
#define HFA384x_ADDR_CMD_MKPAGE(f) \
- ((u16)((((u32)(f))&HFA384x_ADDR_FLAT_CMD_PAGE_MASK)>>16))
+ ((u16)((((u32)(f)) & HFA384x_ADDR_FLAT_CMD_PAGE_MASK) >> 16))
#define HFA384x_ADDR_CMD_MKOFF(f) \
- ((u16)(((u32)(f))&HFA384x_ADDR_FLAT_CMD_OFF_MASK))
+ ((u16)(((u32)(f)) & HFA384x_ADDR_FLAT_CMD_OFF_MASK))
/*--- Controller Memory addresses -------------------*/
#define HFA3842_PDA_BASE (0x007f0000UL)
@@ -173,11 +174,12 @@
#define HFA384x_CMD_ERR ((u16)(0x7F))
/*--- Programming Modes --------------------------
- MODE 0: Disable programming
- MODE 1: Enable volatile memory programming
- MODE 2: Enable non-volatile memory programming
- MODE 3: Program non-volatile memory section
---------------------------------------------------*/
+ * MODE 0: Disable programming
+ * MODE 1: Enable volatile memory programming
+ * MODE 2: Enable non-volatile memory programming
+ * MODE 3: Program non-volatile memory section
+ *-------------------------------------------------
+ */
#define HFA384x_PROGMODE_DISABLE ((u16)0x00)
#define HFA384x_PROGMODE_RAM ((u16)0x01)
#define HFA384x_PROGMODE_NV ((u16)0x02)
@@ -185,8 +187,9 @@
/*--- Record ID Constants --------------------------*/
/*--------------------------------------------------------------------
-Configuration RIDs: Network Parameters, Static Configuration Entities
---------------------------------------------------------------------*/
+ * Configuration RIDs: Network Parameters, Static Configuration Entities
+ *--------------------------------------------------------------------
+ */
#define HFA384x_RID_CNFPORTTYPE ((u16)0xFC00)
#define HFA384x_RID_CNFOWNMACADDR ((u16)0xFC01)
#define HFA384x_RID_CNFDESIREDSSID ((u16)0xFC02)
@@ -195,17 +198,19 @@ Configuration RIDs: Network Parameters, Static Configuration Entities
#define HFA384x_RID_CNFMAXDATALEN ((u16)0xFC07)
/*--------------------------------------------------------------------
-Configuration RID lengths: Network Params, Static Config Entities
- This is the length of JUST the DATA part of the RID (does not
- include the len or code fields)
---------------------------------------------------------------------*/
+ * Configuration RID lengths: Network Params, Static Config Entities
+ * This is the length of JUST the DATA part of the RID (does not
+ * include the len or code fields)
+ *--------------------------------------------------------------------
+ */
#define HFA384x_RID_CNFOWNMACADDR_LEN ((u16)6)
#define HFA384x_RID_CNFDESIREDSSID_LEN ((u16)34)
#define HFA384x_RID_CNFOWNSSID_LEN ((u16)34)
/*--------------------------------------------------------------------
-Configuration RIDs: Network Parameters, Dynamic Configuration Entities
---------------------------------------------------------------------*/
+ * Configuration RIDs: Network Parameters, Dynamic Configuration Entities
+ *--------------------------------------------------------------------
+ */
#define HFA384x_RID_CREATEIBSS ((u16)0xFC81)
#define HFA384x_RID_FRAGTHRESH ((u16)0xFC82)
#define HFA384x_RID_RTSTHRESH ((u16)0xFC83)
@@ -213,8 +218,9 @@ Configuration RIDs: Network Parameters, Dynamic Configuration Entities
#define HFA384x_RID_PROMISCMODE ((u16)0xFC85)
/*----------------------------------------------------------------------
-Information RIDs: NIC Information
---------------------------------------------------------------------*/
+ * Information RIDs: NIC Information
+ *----------------------------------------------------------------------
+ */
#define HFA384x_RID_MAXLOADTIME ((u16)0xFD00)
#define HFA384x_RID_DOWNLOADBUFFER ((u16)0xFD01)
#define HFA384x_RID_PRIIDENTITY ((u16)0xFD02)
@@ -230,15 +236,17 @@ Information RIDs: NIC Information
#define HFA384x_RID_STA_CFIACTRANGES ((u16)0xFD23)
/*----------------------------------------------------------------------
-Information RID Lengths: NIC Information
- This is the length of JUST the DATA part of the RID (does not
- include the len or code fields)
---------------------------------------------------------------------*/
+ * Information RID Lengths: NIC Information
+ * This is the length of JUST the DATA part of the RID (does not
+ * include the len or code fields)
+ *---------------------------------------------------------------------
+ */
#define HFA384x_RID_NICSERIALNUMBER_LEN ((u16)12)
/*--------------------------------------------------------------------
-Information RIDs: MAC Information
---------------------------------------------------------------------*/
+ * Information RIDs: MAC Information
+ *--------------------------------------------------------------------
+ */
#define HFA384x_RID_PORTSTATUS ((u16)0xFD40)
#define HFA384x_RID_CURRENTSSID ((u16)0xFD41)
#define HFA384x_RID_CURRENTBSSID ((u16)0xFD42)
@@ -250,23 +258,26 @@ Information RIDs: MAC Information
#define HFA384x_RID_DBMCOMMSQUALITY ((u16)0xFD51)
/*--------------------------------------------------------------------
-Information RID Lengths: MAC Information
- This is the length of JUST the DATA part of the RID (does not
- include the len or code fields)
---------------------------------------------------------------------*/
+ * Information RID Lengths: MAC Information
+ * This is the length of JUST the DATA part of the RID (does not
+ * include the len or code fields)
+ *--------------------------------------------------------------------
+ */
#define HFA384x_RID_DBMCOMMSQUALITY_LEN \
- ((u16) sizeof(hfa384x_dbmcommsquality_t))
+ ((u16)sizeof(struct hfa384x_dbmcommsquality))
#define HFA384x_RID_JOINREQUEST_LEN \
- ((u16)sizeof(hfa384x_JoinRequest_data_t))
+ ((u16)sizeof(struct hfa384x_JoinRequest_data))
/*--------------------------------------------------------------------
-Information RIDs: Modem Information
---------------------------------------------------------------------*/
+ * Information RIDs: Modem Information
+ *--------------------------------------------------------------------
+ */
#define HFA384x_RID_CURRENTCHANNEL ((u16)0xFDC1)
/*--------------------------------------------------------------------
-API ENHANCEMENTS (NOT ALREADY IMPLEMENTED)
---------------------------------------------------------------------*/
+ * API ENHANCEMENTS (NOT ALREADY IMPLEMENTED)
+ *--------------------------------------------------------------------
+ */
#define HFA384x_RID_CNFWEPDEFAULTKEYID ((u16)0xFC23)
#define HFA384x_RID_CNFWEPDEFAULTKEY0 ((u16)0xFC24)
#define HFA384x_RID_CNFWEPDEFAULTKEY1 ((u16)0xFC25)
@@ -290,8 +301,9 @@ API ENHANCEMENTS (NOT ALREADY IMPLEMENTED)
#define HFA384x_RID_CNFWEP128DEFAULTKEY_LEN ((u16)14)
/*--------------------------------------------------------------------
-PD Record codes
---------------------------------------------------------------------*/
+ * PD Record codes
+ *--------------------------------------------------------------------
+ */
#define HFA384x_PDR_PCB_PARTNUM ((u16)0x0001)
#define HFA384x_PDR_PDAVER ((u16)0x0002)
#define HFA384x_PDR_NIC_SERIAL ((u16)0x0003)
@@ -355,31 +367,32 @@ struct hfa384x_bytestr {
u8 data[0];
} __packed;
-typedef struct hfa384x_bytestr32 {
+struct hfa384x_bytestr32 {
u16 len;
u8 data[32];
-} __packed hfa384x_bytestr32_t;
+} __packed;
/*--------------------------------------------------------------------
-Configuration Record Structures:
- Network Parameters, Static Configuration Entities
---------------------------------------------------------------------*/
+ * Configuration Record Structures:
+ * Network Parameters, Static Configuration Entities
+ *--------------------------------------------------------------------
+ */
/*-- Hardware/Firmware Component Information ----------*/
-typedef struct hfa384x_compident {
+struct hfa384x_compident {
u16 id;
u16 variant;
u16 major;
u16 minor;
-} __packed hfa384x_compident_t;
+} __packed;
-typedef struct hfa384x_caplevel {
+struct hfa384x_caplevel {
u16 role;
u16 id;
u16 variant;
u16 bottom;
u16 top;
-} __packed hfa384x_caplevel_t;
+} __packed;
/*-- Configuration Record: cnfAuthentication --*/
#define HFA384x_CNFAUTHENTICATION_OPENSYSTEM 0x0001
@@ -387,77 +400,81 @@ typedef struct hfa384x_caplevel {
#define HFA384x_CNFAUTHENTICATION_LEAP 0x0004
/*--------------------------------------------------------------------
-Configuration Record Structures:
- Network Parameters, Dynamic Configuration Entities
---------------------------------------------------------------------*/
+ * Configuration Record Structures:
+ * Network Parameters, Dynamic Configuration Entities
+ *--------------------------------------------------------------------
+ */
#define HFA384x_CREATEIBSS_JOINCREATEIBSS 0
/*-- Configuration Record: HostScanRequest (data portion only) --*/
-typedef struct hfa384x_HostScanRequest_data {
+struct hfa384x_HostScanRequest_data {
u16 channelList;
u16 txRate;
- hfa384x_bytestr32_t ssid;
-} __packed hfa384x_HostScanRequest_data_t;
+ struct hfa384x_bytestr32 ssid;
+} __packed;
/*-- Configuration Record: JoinRequest (data portion only) --*/
-typedef struct hfa384x_JoinRequest_data {
+struct hfa384x_JoinRequest_data {
u8 bssid[WLAN_BSSID_LEN];
u16 channel;
-} __packed hfa384x_JoinRequest_data_t;
+} __packed;
/*-- Configuration Record: authenticateStation (data portion only) --*/
-typedef struct hfa384x_authenticateStation_data {
+struct hfa384x_authenticateStation_data {
u8 address[ETH_ALEN];
u16 status;
u16 algorithm;
-} __packed hfa384x_authenticateStation_data_t;
+} __packed;
/*-- Configuration Record: WPAData (data portion only) --*/
-typedef struct hfa384x_WPAData {
+struct hfa384x_WPAData {
u16 datalen;
u8 data[0]; /* max 80 */
-} __packed hfa384x_WPAData_t;
+} __packed;
/*--------------------------------------------------------------------
-Information Record Structures: NIC Information
---------------------------------------------------------------------*/
+ * Information Record Structures: NIC Information
+ *--------------------------------------------------------------------
+ */
/*-- Information Record: DownLoadBuffer --*/
/* NOTE: The page and offset are in AUX format */
-typedef struct hfa384x_downloadbuffer {
+struct hfa384x_downloadbuffer {
u16 page;
u16 offset;
u16 len;
-} __packed hfa384x_downloadbuffer_t;
+} __packed;
/*--------------------------------------------------------------------
-Information Record Structures: NIC Information
---------------------------------------------------------------------*/
+ * Information Record Structures: NIC Information
+ *--------------------------------------------------------------------
+ */
#define HFA384x_PSTATUS_CONN_IBSS ((u16)3)
/*-- Information Record: commsquality --*/
-typedef struct hfa384x_commsquality {
+struct hfa384x_commsquality {
u16 CQ_currBSS;
u16 ASL_currBSS;
u16 ANL_currFC;
-} __packed hfa384x_commsquality_t;
+} __packed;
/*-- Information Record: dmbcommsquality --*/
-typedef struct hfa384x_dbmcommsquality {
+struct hfa384x_dbmcommsquality {
u16 CQdbm_currBSS;
u16 ASLdbm_currBSS;
u16 ANLdbm_currFC;
-} __packed hfa384x_dbmcommsquality_t;
+} __packed;
/*--------------------------------------------------------------------
-FRAME STRUCTURES: Communication Frames
-----------------------------------------------------------------------
-Communication Frames: Transmit Frames
---------------------------------------------------------------------*/
+ * FRAME STRUCTURES: Communication Frames
+ *--------------------------------------------------------------------
+ * Communication Frames: Transmit Frames
+ *--------------------------------------------------------------------
+ */
/*-- Communication Frame: Transmit Frame Structure --*/
-typedef struct hfa384x_tx_frame {
+struct hfa384x_tx_frame {
u16 status;
u16 reserved1;
u16 reserved2;
@@ -482,10 +499,11 @@ typedef struct hfa384x_tx_frame {
u8 dest_addr[6];
u8 src_addr[6];
u16 data_length; /* big endian format */
-} __packed hfa384x_tx_frame_t;
+} __packed;
/*--------------------------------------------------------------------
-Communication Frames: Field Masks for Transmit Frames
---------------------------------------------------------------------*/
+ * Communication Frames: Field Masks for Transmit Frames
+ *--------------------------------------------------------------------
+ */
/*-- Status Field --*/
#define HFA384x_TXSTATUS_ACKERR ((u16)BIT(5))
#define HFA384x_TXSTATUS_FORMERR ((u16)BIT(3))
@@ -499,16 +517,17 @@ Communication Frames: Field Masks for Transmit Frames
#define HFA384x_TX_TXEX ((u16)BIT(2))
#define HFA384x_TX_TXOK ((u16)BIT(1))
/*--------------------------------------------------------------------
-Communication Frames: Test/Get/Set Field Values for Transmit Frames
---------------------------------------------------------------------*/
+ * Communication Frames: Test/Get/Set Field Values for Transmit Frames
+ *--------------------------------------------------------------------
+ */
/*-- Status Field --*/
#define HFA384x_TXSTATUS_ISERROR(v) \
- (((u16)(v))&\
- (HFA384x_TXSTATUS_ACKERR|HFA384x_TXSTATUS_FORMERR|\
- HFA384x_TXSTATUS_DISCON|HFA384x_TXSTATUS_AGEDERR|\
+ (((u16)(v)) & \
+ (HFA384x_TXSTATUS_ACKERR | HFA384x_TXSTATUS_FORMERR | \
+ HFA384x_TXSTATUS_DISCON | HFA384x_TXSTATUS_AGEDERR | \
HFA384x_TXSTATUS_RETRYERR))
-#define HFA384x_TX_SET(v, m, s) ((((u16)(v))<<((u16)(s)))&((u16)(m)))
+#define HFA384x_TX_SET(v, m, s) ((((u16)(v)) << ((u16)(s))) & ((u16)(m)))
#define HFA384x_TX_MACPORT_SET(v) HFA384x_TX_SET(v, HFA384x_TX_MACPORT, 8)
#define HFA384x_TX_STRUCTYPE_SET(v) HFA384x_TX_SET(v, \
@@ -516,10 +535,11 @@ Communication Frames: Test/Get/Set Field Values for Transmit Frames
#define HFA384x_TX_TXEX_SET(v) HFA384x_TX_SET(v, HFA384x_TX_TXEX, 2)
#define HFA384x_TX_TXOK_SET(v) HFA384x_TX_SET(v, HFA384x_TX_TXOK, 1)
/*--------------------------------------------------------------------
-Communication Frames: Receive Frames
---------------------------------------------------------------------*/
+ * Communication Frames: Receive Frames
+ *--------------------------------------------------------------------
+ */
/*-- Communication Frame: Receive Frame Structure --*/
-typedef struct hfa384x_rx_frame {
+struct hfa384x_rx_frame {
/*-- MAC rx descriptor (hfa384x byte order) --*/
u16 status;
u32 time;
@@ -544,10 +564,11 @@ typedef struct hfa384x_rx_frame {
u8 dest_addr[6];
u8 src_addr[6];
u16 data_length; /* IEEE? (big endian) format */
-} __packed hfa384x_rx_frame_t;
+} __packed;
/*--------------------------------------------------------------------
-Communication Frames: Field Masks for Receive Frames
---------------------------------------------------------------------*/
+ * Communication Frames: Field Masks for Receive Frames
+ *--------------------------------------------------------------------
+ */
/*-- Status Fields --*/
#define HFA384x_RXSTATUS_MACPORT ((u16)(BIT(10) | \
@@ -555,17 +576,19 @@ Communication Frames: Field Masks for Receive Frames
BIT(8)))
#define HFA384x_RXSTATUS_FCSERR ((u16)BIT(0))
/*--------------------------------------------------------------------
-Communication Frames: Test/Get/Set Field Values for Receive Frames
---------------------------------------------------------------------*/
+ * Communication Frames: Test/Get/Set Field Values for Receive Frames
+ *--------------------------------------------------------------------
+ */
#define HFA384x_RXSTATUS_MACPORT_GET(value) ((u16)((((u16)(value)) \
& HFA384x_RXSTATUS_MACPORT) >> 8))
#define HFA384x_RXSTATUS_ISFCSERR(value) ((u16)(((u16)(value)) \
& HFA384x_RXSTATUS_FCSERR))
/*--------------------------------------------------------------------
- FRAME STRUCTURES: Information Types and Information Frame Structures
-----------------------------------------------------------------------
-Information Types
---------------------------------------------------------------------*/
+ * FRAME STRUCTURES: Information Types and Information Frame Structures
+ *--------------------------------------------------------------------
+ * Information Types
+ *--------------------------------------------------------------------
+ */
#define HFA384x_IT_HANDOVERADDR ((u16)0xF000UL)
#define HFA384x_IT_COMMTALLIES ((u16)0xF100UL)
#define HFA384x_IT_SCANRESULTS ((u16)0xF101UL)
@@ -580,13 +603,14 @@ Information Types
#define HFA384x_IT_MICFAILURE ((u16)0xF206UL)
/*--------------------------------------------------------------------
-Information Frames Structures
-----------------------------------------------------------------------
-Information Frames: Notification Frame Structures
---------------------------------------------------------------------*/
+ * Information Frames Structures
+ *--------------------------------------------------------------------
+ * Information Frames: Notification Frame Structures
+ *--------------------------------------------------------------------
+ */
/*-- Inquiry Frame, Diagnose: Communication Tallies --*/
-typedef struct hfa384x_CommTallies16 {
+struct hfa384x_CommTallies16 {
u16 txunicastframes;
u16 txmulticastframes;
u16 txfragments;
@@ -608,9 +632,9 @@ typedef struct hfa384x_CommTallies16 {
u16 rxdiscardswepundecr;
u16 rxmsginmsgfrag;
u16 rxmsginbadmsgfrag;
-} __packed hfa384x_CommTallies16_t;
+} __packed;
-typedef struct hfa384x_CommTallies32 {
+struct hfa384x_CommTallies32 {
u32 txunicastframes;
u32 txmulticastframes;
u32 txfragments;
@@ -632,62 +656,62 @@ typedef struct hfa384x_CommTallies32 {
u32 rxdiscardswepundecr;
u32 rxmsginmsgfrag;
u32 rxmsginbadmsgfrag;
-} __packed hfa384x_CommTallies32_t;
+} __packed;
/*-- Inquiry Frame, Diagnose: Scan Results & Subfields--*/
-typedef struct hfa384x_ScanResultSub {
+struct hfa384x_ScanResultSub {
u16 chid;
u16 anl;
u16 sl;
u8 bssid[WLAN_BSSID_LEN];
u16 bcnint;
u16 capinfo;
- hfa384x_bytestr32_t ssid;
+ struct hfa384x_bytestr32 ssid;
u8 supprates[10]; /* 802.11 info element */
u16 proberesp_rate;
-} __packed hfa384x_ScanResultSub_t;
+} __packed;
-typedef struct hfa384x_ScanResult {
+struct hfa384x_ScanResult {
u16 rsvd;
u16 scanreason;
- hfa384x_ScanResultSub_t result[HFA384x_SCANRESULT_MAX];
-} __packed hfa384x_ScanResult_t;
+ struct hfa384x_ScanResultSub result[HFA384x_SCANRESULT_MAX];
+} __packed;
/*-- Inquiry Frame, Diagnose: ChInfo Results & Subfields--*/
-typedef struct hfa384x_ChInfoResultSub {
+struct hfa384x_ChInfoResultSub {
u16 chid;
u16 anl;
u16 pnl;
u16 active;
-} __packed hfa384x_ChInfoResultSub_t;
+} __packed;
#define HFA384x_CHINFORESULT_BSSACTIVE BIT(0)
#define HFA384x_CHINFORESULT_PCFACTIVE BIT(1)
-typedef struct hfa384x_ChInfoResult {
+struct hfa384x_ChInfoResult {
u16 scanchannels;
- hfa384x_ChInfoResultSub_t result[HFA384x_CHINFORESULT_MAX];
-} __packed hfa384x_ChInfoResult_t;
+ struct hfa384x_ChInfoResultSub result[HFA384x_CHINFORESULT_MAX];
+} __packed;
/*-- Inquiry Frame, Diagnose: Host Scan Results & Subfields--*/
-typedef struct hfa384x_HScanResultSub {
+struct hfa384x_HScanResultSub {
u16 chid;
u16 anl;
u16 sl;
u8 bssid[WLAN_BSSID_LEN];
u16 bcnint;
u16 capinfo;
- hfa384x_bytestr32_t ssid;
+ struct hfa384x_bytestr32 ssid;
u8 supprates[10]; /* 802.11 info element */
u16 proberesp_rate;
u16 atim;
-} __packed hfa384x_HScanResultSub_t;
+} __packed;
-typedef struct hfa384x_HScanResult {
+struct hfa384x_HScanResult {
u16 nresult;
u16 rsvd;
- hfa384x_HScanResultSub_t result[HFA384x_HSCANRESULT_MAX];
-} __packed hfa384x_HScanResult_t;
+ struct hfa384x_HScanResultSub result[HFA384x_HSCANRESULT_MAX];
+} __packed;
/*-- Unsolicited Frame, MAC Mgmt: LinkStatus --*/
@@ -699,9 +723,9 @@ typedef struct hfa384x_HScanResult {
#define HFA384x_LINK_AP_INRANGE ((u16)5)
#define HFA384x_LINK_ASSOCFAIL ((u16)6)
-typedef struct hfa384x_LinkStatus {
+struct hfa384x_LinkStatus {
u16 linkstatus;
-} __packed hfa384x_LinkStatus_t;
+} __packed;
/*-- Unsolicited Frame, MAC Mgmt: AssociationStatus (--*/
@@ -709,56 +733,57 @@ typedef struct hfa384x_LinkStatus {
#define HFA384x_ASSOCSTATUS_REASSOC ((u16)2)
#define HFA384x_ASSOCSTATUS_AUTHFAIL ((u16)5)
-typedef struct hfa384x_AssocStatus {
+struct hfa384x_AssocStatus {
u16 assocstatus;
u8 sta_addr[ETH_ALEN];
/* old_ap_addr is only valid if assocstatus == 2 */
u8 old_ap_addr[ETH_ALEN];
u16 reason;
u16 reserved;
-} __packed hfa384x_AssocStatus_t;
+} __packed;
/*-- Unsolicited Frame, MAC Mgmt: AuthRequest (AP Only) --*/
-typedef struct hfa384x_AuthRequest {
+struct hfa384x_AuthRequest {
u8 sta_addr[ETH_ALEN];
u16 algorithm;
-} __packed hfa384x_AuthReq_t;
+} __packed;
/*-- Unsolicited Frame, MAC Mgmt: PSUserCount (AP Only) --*/
-typedef struct hfa384x_PSUserCount {
+struct hfa384x_PSUserCount {
u16 usercnt;
-} __packed hfa384x_PSUserCount_t;
+} __packed;
-typedef struct hfa384x_KeyIDChanged {
+struct hfa384x_KeyIDChanged {
u8 sta_addr[ETH_ALEN];
u16 keyid;
-} __packed hfa384x_KeyIDChanged_t;
+} __packed;
/*-- Collection of all Inf frames ---------------*/
-typedef union hfa384x_infodata {
- hfa384x_CommTallies16_t commtallies16;
- hfa384x_CommTallies32_t commtallies32;
- hfa384x_ScanResult_t scanresult;
- hfa384x_ChInfoResult_t chinforesult;
- hfa384x_HScanResult_t hscanresult;
- hfa384x_LinkStatus_t linkstatus;
- hfa384x_AssocStatus_t assocstatus;
- hfa384x_AuthReq_t authreq;
- hfa384x_PSUserCount_t psusercnt;
- hfa384x_KeyIDChanged_t keyidchanged;
-} __packed hfa384x_infodata_t;
-
-typedef struct hfa384x_InfFrame {
+union hfa384x_infodata {
+ struct hfa384x_CommTallies16 commtallies16;
+ struct hfa384x_CommTallies32 commtallies32;
+ struct hfa384x_ScanResult scanresult;
+ struct hfa384x_ChInfoResult chinforesult;
+ struct hfa384x_HScanResult hscanresult;
+ struct hfa384x_LinkStatus linkstatus;
+ struct hfa384x_AssocStatus assocstatus;
+ struct hfa384x_AuthRequest authreq;
+ struct hfa384x_PSUserCount psusercnt;
+ struct hfa384x_KeyIDChanged keyidchanged;
+} __packed;
+
+struct hfa384x_InfFrame {
u16 framelen;
u16 infotype;
- hfa384x_infodata_t info;
-} __packed hfa384x_InfFrame_t;
+ union hfa384x_infodata info;
+} __packed;
/*--------------------------------------------------------------------
-USB Packet structures and constants.
---------------------------------------------------------------------*/
+ * USB Packet structures and constants.
+ *--------------------------------------------------------------------
+ */
/* Should be sent to the bulkout endpoint */
#define HFA384x_USB_TXFRM 0
@@ -783,143 +808,140 @@ USB Packet structures and constants.
/*------------------------------------*/
/* Request (bulk OUT) packet contents */
-typedef struct hfa384x_usb_txfrm {
- hfa384x_tx_frame_t desc;
+struct hfa384x_usb_txfrm {
+ struct hfa384x_tx_frame desc;
u8 data[WLAN_DATA_MAXLEN];
-} __packed hfa384x_usb_txfrm_t;
+} __packed;
-typedef struct hfa384x_usb_cmdreq {
+struct hfa384x_usb_cmdreq {
u16 type;
u16 cmd;
u16 parm0;
u16 parm1;
u16 parm2;
u8 pad[54];
-} __packed hfa384x_usb_cmdreq_t;
+} __packed;
-typedef struct hfa384x_usb_wridreq {
+struct hfa384x_usb_wridreq {
u16 type;
u16 frmlen;
u16 rid;
u8 data[HFA384x_RIDDATA_MAXLEN];
-} __packed hfa384x_usb_wridreq_t;
+} __packed;
-typedef struct hfa384x_usb_rridreq {
+struct hfa384x_usb_rridreq {
u16 type;
u16 frmlen;
u16 rid;
u8 pad[58];
-} __packed hfa384x_usb_rridreq_t;
+} __packed;
-typedef struct hfa384x_usb_wmemreq {
+struct hfa384x_usb_wmemreq {
u16 type;
u16 frmlen;
u16 offset;
u16 page;
u8 data[HFA384x_USB_RWMEM_MAXLEN];
-} __packed hfa384x_usb_wmemreq_t;
+} __packed;
-typedef struct hfa384x_usb_rmemreq {
+struct hfa384x_usb_rmemreq {
u16 type;
u16 frmlen;
u16 offset;
u16 page;
u8 pad[56];
-} __packed hfa384x_usb_rmemreq_t;
+} __packed;
/*------------------------------------*/
/* Response (bulk IN) packet contents */
-typedef struct hfa384x_usb_rxfrm {
- hfa384x_rx_frame_t desc;
+struct hfa384x_usb_rxfrm {
+ struct hfa384x_rx_frame desc;
u8 data[WLAN_DATA_MAXLEN];
-} __packed hfa384x_usb_rxfrm_t;
+} __packed;
-typedef struct hfa384x_usb_infofrm {
+struct hfa384x_usb_infofrm {
u16 type;
- hfa384x_InfFrame_t info;
-} __packed hfa384x_usb_infofrm_t;
+ struct hfa384x_InfFrame info;
+} __packed;
-typedef struct hfa384x_usb_statusresp {
+struct hfa384x_usb_statusresp {
u16 type;
u16 status;
u16 resp0;
u16 resp1;
u16 resp2;
-} __packed hfa384x_usb_cmdresp_t;
-
-typedef hfa384x_usb_cmdresp_t hfa384x_usb_wridresp_t;
+} __packed;
-typedef struct hfa384x_usb_rridresp {
+struct hfa384x_usb_rridresp {
u16 type;
u16 frmlen;
u16 rid;
u8 data[HFA384x_RIDDATA_MAXLEN];
-} __packed hfa384x_usb_rridresp_t;
-
-typedef hfa384x_usb_cmdresp_t hfa384x_usb_wmemresp_t;
+} __packed;
-typedef struct hfa384x_usb_rmemresp {
+struct hfa384x_usb_rmemresp {
u16 type;
u16 frmlen;
u8 data[HFA384x_USB_RWMEM_MAXLEN];
-} __packed hfa384x_usb_rmemresp_t;
+} __packed;
-typedef struct hfa384x_usb_bufavail {
+struct hfa384x_usb_bufavail {
u16 type;
u16 frmlen;
-} __packed hfa384x_usb_bufavail_t;
+} __packed;
-typedef struct hfa384x_usb_error {
+struct hfa384x_usb_error {
u16 type;
u16 errortype;
-} __packed hfa384x_usb_error_t;
+} __packed;
/*----------------------------------------------------------*/
/* Unions for packaging all the known packet types together */
-typedef union hfa384x_usbout {
+union hfa384x_usbout {
__le16 type;
- hfa384x_usb_txfrm_t txfrm;
- hfa384x_usb_cmdreq_t cmdreq;
- hfa384x_usb_wridreq_t wridreq;
- hfa384x_usb_rridreq_t rridreq;
- hfa384x_usb_wmemreq_t wmemreq;
- hfa384x_usb_rmemreq_t rmemreq;
-} __packed hfa384x_usbout_t;
-
-typedef union hfa384x_usbin {
+ struct hfa384x_usb_txfrm txfrm;
+ struct hfa384x_usb_cmdreq cmdreq;
+ struct hfa384x_usb_wridreq wridreq;
+ struct hfa384x_usb_rridreq rridreq;
+ struct hfa384x_usb_wmemreq wmemreq;
+ struct hfa384x_usb_rmemreq rmemreq;
+} __packed;
+
+union hfa384x_usbin {
__le16 type;
- hfa384x_usb_rxfrm_t rxfrm;
- hfa384x_usb_txfrm_t txfrm;
- hfa384x_usb_infofrm_t infofrm;
- hfa384x_usb_cmdresp_t cmdresp;
- hfa384x_usb_wridresp_t wridresp;
- hfa384x_usb_rridresp_t rridresp;
- hfa384x_usb_wmemresp_t wmemresp;
- hfa384x_usb_rmemresp_t rmemresp;
- hfa384x_usb_bufavail_t bufavail;
- hfa384x_usb_error_t usberror;
+ struct hfa384x_usb_rxfrm rxfrm;
+ struct hfa384x_usb_txfrm txfrm;
+ struct hfa384x_usb_infofrm infofrm;
+ struct hfa384x_usb_statusresp cmdresp;
+ struct hfa384x_usb_statusresp wridresp;
+ struct hfa384x_usb_rridresp rridresp;
+ struct hfa384x_usb_statusresp wmemresp;
+ struct hfa384x_usb_rmemresp rmemresp;
+ struct hfa384x_usb_bufavail bufavail;
+ struct hfa384x_usb_error usberror;
u8 boguspad[3000];
-} __packed hfa384x_usbin_t;
+} __packed;
/*--------------------------------------------------------------------
-PD record structures.
---------------------------------------------------------------------*/
+ * PD record structures.
+ *--------------------------------------------------------------------
+ */
-typedef struct hfa384x_pdr_pcb_partnum {
+struct hfa384x_pdr_pcb_partnum {
u8 num[8];
-} __packed hfa384x_pdr_pcb_partnum_t;
+} __packed;
-typedef struct hfa384x_pdr_pcb_tracenum {
+struct hfa384x_pdr_pcb_tracenum {
u8 num[8];
-} __packed hfa384x_pdr_pcb_tracenum_t;
+} __packed;
-typedef struct hfa384x_pdr_nic_serial {
+struct hfa384x_pdr_nic_serial {
u8 num[12];
-} __packed hfa384x_pdr_nic_serial_t;
+} __packed;
-typedef struct hfa384x_pdr_mkk_measurements {
+struct hfa384x_pdr_mkk_measurements {
double carrier_freq;
double occupied_band;
double power_density;
@@ -935,192 +957,193 @@ typedef struct hfa384x_pdr_mkk_measurements {
double rx_spur_f2;
double rx_spur_l1;
double rx_spur_l2;
-} __packed hfa384x_pdr_mkk_measurements_t;
+} __packed;
-typedef struct hfa384x_pdr_nic_ramsize {
+struct hfa384x_pdr_nic_ramsize {
u8 size[12]; /* units of KB */
-} __packed hfa384x_pdr_nic_ramsize_t;
+} __packed;
-typedef struct hfa384x_pdr_mfisuprange {
+struct hfa384x_pdr_mfisuprange {
u16 id;
u16 variant;
u16 bottom;
u16 top;
-} __packed hfa384x_pdr_mfisuprange_t;
+} __packed;
-typedef struct hfa384x_pdr_cfisuprange {
+struct hfa384x_pdr_cfisuprange {
u16 id;
u16 variant;
u16 bottom;
u16 top;
-} __packed hfa384x_pdr_cfisuprange_t;
+} __packed;
-typedef struct hfa384x_pdr_nicid {
+struct hfa384x_pdr_nicid {
u16 id;
u16 variant;
u16 major;
u16 minor;
-} __packed hfa384x_pdr_nicid_t;
+} __packed;
-typedef struct hfa384x_pdr_refdac_measurements {
+struct hfa384x_pdr_refdac_measurements {
u16 value[0];
-} __packed hfa384x_pdr_refdac_measurements_t;
+} __packed;
-typedef struct hfa384x_pdr_vgdac_measurements {
+struct hfa384x_pdr_vgdac_measurements {
u16 value[0];
-} __packed hfa384x_pdr_vgdac_measurements_t;
+} __packed;
-typedef struct hfa384x_pdr_level_comp_measurements {
+struct hfa384x_pdr_level_comp_measurements {
u16 value[0];
-} __packed hfa384x_pdr_level_compc_measurements_t;
+} __packed;
-typedef struct hfa384x_pdr_mac_address {
+struct hfa384x_pdr_mac_address {
u8 addr[6];
-} __packed hfa384x_pdr_mac_address_t;
+} __packed;
-typedef struct hfa384x_pdr_mkk_callname {
+struct hfa384x_pdr_mkk_callname {
u8 callname[8];
-} __packed hfa384x_pdr_mkk_callname_t;
+} __packed;
-typedef struct hfa384x_pdr_regdomain {
+struct hfa384x_pdr_regdomain {
u16 numdomains;
u16 domain[5];
-} __packed hfa384x_pdr_regdomain_t;
+} __packed;
-typedef struct hfa384x_pdr_allowed_channel {
+struct hfa384x_pdr_allowed_channel {
u16 ch_bitmap;
-} __packed hfa384x_pdr_allowed_channel_t;
+} __packed;
-typedef struct hfa384x_pdr_default_channel {
+struct hfa384x_pdr_default_channel {
u16 channel;
-} __packed hfa384x_pdr_default_channel_t;
+} __packed;
-typedef struct hfa384x_pdr_privacy_option {
+struct hfa384x_pdr_privacy_option {
u16 available;
-} __packed hfa384x_pdr_privacy_option_t;
+} __packed;
-typedef struct hfa384x_pdr_temptype {
+struct hfa384x_pdr_temptype {
u16 type;
-} __packed hfa384x_pdr_temptype_t;
+} __packed;
-typedef struct hfa384x_pdr_refdac_setup {
+struct hfa384x_pdr_refdac_setup {
u16 ch_value[14];
-} __packed hfa384x_pdr_refdac_setup_t;
+} __packed;
-typedef struct hfa384x_pdr_vgdac_setup {
+struct hfa384x_pdr_vgdac_setup {
u16 ch_value[14];
-} __packed hfa384x_pdr_vgdac_setup_t;
+} __packed;
-typedef struct hfa384x_pdr_level_comp_setup {
+struct hfa384x_pdr_level_comp_setup {
u16 ch_value[14];
-} __packed hfa384x_pdr_level_comp_setup_t;
+} __packed;
-typedef struct hfa384x_pdr_trimdac_setup {
+struct hfa384x_pdr_trimdac_setup {
u16 trimidac;
u16 trimqdac;
-} __packed hfa384x_pdr_trimdac_setup_t;
+} __packed;
-typedef struct hfa384x_pdr_ifr_setting {
+struct hfa384x_pdr_ifr_setting {
u16 value[3];
-} __packed hfa384x_pdr_ifr_setting_t;
+} __packed;
-typedef struct hfa384x_pdr_rfr_setting {
+struct hfa384x_pdr_rfr_setting {
u16 value[3];
-} __packed hfa384x_pdr_rfr_setting_t;
+} __packed;
-typedef struct hfa384x_pdr_hfa3861_baseline {
+struct hfa384x_pdr_hfa3861_baseline {
u16 value[50];
-} __packed hfa384x_pdr_hfa3861_baseline_t;
+} __packed;
-typedef struct hfa384x_pdr_hfa3861_shadow {
+struct hfa384x_pdr_hfa3861_shadow {
u32 value[32];
-} __packed hfa384x_pdr_hfa3861_shadow_t;
+} __packed;
-typedef struct hfa384x_pdr_hfa3861_ifrf {
+struct hfa384x_pdr_hfa3861_ifrf {
u32 value[20];
-} __packed hfa384x_pdr_hfa3861_ifrf_t;
+} __packed;
-typedef struct hfa384x_pdr_hfa3861_chcalsp {
+struct hfa384x_pdr_hfa3861_chcalsp {
u16 value[14];
-} __packed hfa384x_pdr_hfa3861_chcalsp_t;
+} __packed;
-typedef struct hfa384x_pdr_hfa3861_chcali {
+struct hfa384x_pdr_hfa3861_chcali {
u16 value[17];
-} __packed hfa384x_pdr_hfa3861_chcali_t;
+} __packed;
-typedef struct hfa384x_pdr_hfa3861_nic_config {
+struct hfa384x_pdr_hfa3861_nic_config {
u16 config_bitmap;
-} __packed hfa384x_pdr_nic_config_t;
+} __packed;
-typedef struct hfa384x_pdr_hfo_delay {
+struct hfa384x_pdr_hfo_delay {
u8 hfo_delay;
-} __packed hfa384x_hfo_delay_t;
+} __packed;
-typedef struct hfa384x_pdr_hfa3861_manf_testsp {
+struct hfa384x_pdr_hfa3861_manf_testsp {
u16 value[30];
-} __packed hfa384x_pdr_hfa3861_manf_testsp_t;
+} __packed;
-typedef struct hfa384x_pdr_hfa3861_manf_testi {
+struct hfa384x_pdr_hfa3861_manf_testi {
u16 value[30];
-} __packed hfa384x_pdr_hfa3861_manf_testi_t;
+} __packed;
-typedef struct hfa384x_end_of_pda {
+struct hfa384x_pdr_end_of_pda {
u16 crc;
-} __packed hfa384x_pdr_end_of_pda_t;
+} __packed;
-typedef struct hfa384x_pdrec {
+struct hfa384x_pdrec {
u16 len; /* in words */
u16 code;
union pdr {
- hfa384x_pdr_pcb_partnum_t pcb_partnum;
- hfa384x_pdr_pcb_tracenum_t pcb_tracenum;
- hfa384x_pdr_nic_serial_t nic_serial;
- hfa384x_pdr_mkk_measurements_t mkk_measurements;
- hfa384x_pdr_nic_ramsize_t nic_ramsize;
- hfa384x_pdr_mfisuprange_t mfisuprange;
- hfa384x_pdr_cfisuprange_t cfisuprange;
- hfa384x_pdr_nicid_t nicid;
- hfa384x_pdr_refdac_measurements_t refdac_measurements;
- hfa384x_pdr_vgdac_measurements_t vgdac_measurements;
- hfa384x_pdr_level_compc_measurements_t level_compc_measurements;
- hfa384x_pdr_mac_address_t mac_address;
- hfa384x_pdr_mkk_callname_t mkk_callname;
- hfa384x_pdr_regdomain_t regdomain;
- hfa384x_pdr_allowed_channel_t allowed_channel;
- hfa384x_pdr_default_channel_t default_channel;
- hfa384x_pdr_privacy_option_t privacy_option;
- hfa384x_pdr_temptype_t temptype;
- hfa384x_pdr_refdac_setup_t refdac_setup;
- hfa384x_pdr_vgdac_setup_t vgdac_setup;
- hfa384x_pdr_level_comp_setup_t level_comp_setup;
- hfa384x_pdr_trimdac_setup_t trimdac_setup;
- hfa384x_pdr_ifr_setting_t ifr_setting;
- hfa384x_pdr_rfr_setting_t rfr_setting;
- hfa384x_pdr_hfa3861_baseline_t hfa3861_baseline;
- hfa384x_pdr_hfa3861_shadow_t hfa3861_shadow;
- hfa384x_pdr_hfa3861_ifrf_t hfa3861_ifrf;
- hfa384x_pdr_hfa3861_chcalsp_t hfa3861_chcalsp;
- hfa384x_pdr_hfa3861_chcali_t hfa3861_chcali;
- hfa384x_pdr_nic_config_t nic_config;
- hfa384x_hfo_delay_t hfo_delay;
- hfa384x_pdr_hfa3861_manf_testsp_t hfa3861_manf_testsp;
- hfa384x_pdr_hfa3861_manf_testi_t hfa3861_manf_testi;
- hfa384x_pdr_end_of_pda_t end_of_pda;
+ struct hfa384x_pdr_pcb_partnum pcb_partnum;
+ struct hfa384x_pdr_pcb_tracenum pcb_tracenum;
+ struct hfa384x_pdr_nic_serial nic_serial;
+ struct hfa384x_pdr_mkk_measurements mkk_measurements;
+ struct hfa384x_pdr_nic_ramsize nic_ramsize;
+ struct hfa384x_pdr_mfisuprange mfisuprange;
+ struct hfa384x_pdr_cfisuprange cfisuprange;
+ struct hfa384x_pdr_nicid nicid;
+ struct hfa384x_pdr_refdac_measurements refdac_measurements;
+ struct hfa384x_pdr_vgdac_measurements vgdac_measurements;
+ struct hfa384x_pdr_level_comp_measurements level_compc_measurements;
+ struct hfa384x_pdr_mac_address mac_address;
+ struct hfa384x_pdr_mkk_callname mkk_callname;
+ struct hfa384x_pdr_regdomain regdomain;
+ struct hfa384x_pdr_allowed_channel allowed_channel;
+ struct hfa384x_pdr_default_channel default_channel;
+ struct hfa384x_pdr_privacy_option privacy_option;
+ struct hfa384x_pdr_temptype temptype;
+ struct hfa384x_pdr_refdac_setup refdac_setup;
+ struct hfa384x_pdr_vgdac_setup vgdac_setup;
+ struct hfa384x_pdr_level_comp_setup level_comp_setup;
+ struct hfa384x_pdr_trimdac_setup trimdac_setup;
+ struct hfa384x_pdr_ifr_setting ifr_setting;
+ struct hfa384x_pdr_rfr_setting rfr_setting;
+ struct hfa384x_pdr_hfa3861_baseline hfa3861_baseline;
+ struct hfa384x_pdr_hfa3861_shadow hfa3861_shadow;
+ struct hfa384x_pdr_hfa3861_ifrf hfa3861_ifrf;
+ struct hfa384x_pdr_hfa3861_chcalsp hfa3861_chcalsp;
+ struct hfa384x_pdr_hfa3861_chcali hfa3861_chcali;
+ struct hfa384x_pdr_hfa3861_nic_config nic_config;
+ struct hfa384x_pdr_hfo_delay hfo_delay;
+ struct hfa384x_pdr_hfa3861_manf_testsp hfa3861_manf_testsp;
+ struct hfa384x_pdr_hfa3861_manf_testi hfa3861_manf_testi;
+ struct hfa384x_pdr_end_of_pda end_of_pda;
} data;
-} __packed hfa384x_pdrec_t;
+} __packed;
#ifdef __KERNEL__
/*--------------------------------------------------------------------
---- MAC state structure, argument to all functions --
---- Also, a collection of support types --
---------------------------------------------------------------------*/
-typedef struct hfa384x_statusresult {
+ * --- MAC state structure, argument to all functions --
+ * --- Also, a collection of support types --
+ *--------------------------------------------------------------------
+ */
+struct hfa384x_cmdresult {
u16 status;
u16 resp0;
u16 resp1;
u16 resp2;
-} hfa384x_cmdresult_t;
+};
/* USB Control Exchange (CTLX):
* A queue of the structure below is maintained for all of the
@@ -1129,11 +1152,11 @@ typedef struct hfa384x_statusresult {
/* The following hfa384x_* structures are arguments to
* the usercb() for the different CTLX types.
*/
-typedef struct hfa384x_rridresult {
+struct hfa384x_rridresult {
u16 rid;
const void *riddata;
unsigned int riddata_len;
-} hfa384x_rridresult_t;
+};
enum ctlx_state {
CTLX_START = 0, /* Start state, not queued */
@@ -1156,12 +1179,12 @@ typedef void (*ctlx_cmdcb_t) (struct hfa384x *, const struct hfa384x_usbctlx *);
typedef void (*ctlx_usercb_t) (struct hfa384x *hw,
void *ctlxresult, void *usercb_data);
-typedef struct hfa384x_usbctlx {
+struct hfa384x_usbctlx {
struct list_head list;
size_t outbufsize;
- hfa384x_usbout_t outbuf; /* pkt buf for OUT */
- hfa384x_usbin_t inbuf; /* pkt buf for IN(a copy) */
+ union hfa384x_usbout outbuf; /* pkt buf for OUT */
+ union hfa384x_usbin inbuf; /* pkt buf for IN(a copy) */
CTLX_STATE state; /* Tracks running state */
@@ -1173,25 +1196,25 @@ typedef struct hfa384x_usbctlx {
void *usercb_data; /* at CTLX completion */
int variant; /* Identifies cmd variant */
-} hfa384x_usbctlx_t;
+};
-typedef struct hfa384x_usbctlxq {
+struct hfa384x_usbctlxq {
spinlock_t lock;
struct list_head pending;
struct list_head active;
struct list_head completing;
struct list_head reapable;
-} hfa384x_usbctlxq_t;
+};
-typedef struct hfa484x_metacmd {
+struct hfa384x_metacmd {
u16 cmd;
u16 parm0;
u16 parm1;
u16 parm2;
- hfa384x_cmdresult_t result;
-} hfa384x_metacmd_t;
+ struct hfa384x_cmdresult result;
+};
#define MAX_GRP_ADDR 32
#define WLAN_COMMENT_MAX 80 /* Max. length of user comment string. */
@@ -1218,15 +1241,15 @@ struct prism2sta_accesslist {
u8 addr1[WLAN_ACCESS_MAX][ETH_ALEN];
};
-typedef struct hfa384x {
+struct hfa384x {
/* USB support data */
struct usb_device *usb;
struct urb rx_urb;
struct sk_buff *rx_urb_skb;
struct urb tx_urb;
struct urb ctlx_urb;
- hfa384x_usbout_t txbuff;
- hfa384x_usbctlxq_t ctlxq;
+ union hfa384x_usbout txbuff;
+ struct hfa384x_usbctlxq ctlxq;
struct timer_list reqtimer;
struct timer_list resptimer;
@@ -1265,20 +1288,20 @@ typedef struct hfa384x {
/* Download support */
unsigned int dlstate;
- hfa384x_downloadbuffer_t bufinfo;
+ struct hfa384x_downloadbuffer bufinfo;
u16 dltimeout;
int scanflag; /* to signal scan complete */
int join_ap; /* are we joined to a specific ap */
int join_retries; /* number of join retries till we fail */
- hfa384x_JoinRequest_data_t joinreq; /* join request saved data */
+ struct hfa384x_JoinRequest_data joinreq; /* join request saved data */
- wlandevice_t *wlandev;
+ struct wlandevice *wlandev;
/* Timer to allow for the deferred processing of linkstatus messages */
struct work_struct link_bh;
struct work_struct commsqual_bh;
- hfa384x_commsquality_t qual;
+ struct hfa384x_commsquality qual;
struct timer_list commsqual_timer;
u16 link_status;
@@ -1297,92 +1320,93 @@ typedef struct hfa384x {
int dbmadjust;
/* Group Addresses - right now, there are up to a total
- of MAX_GRP_ADDR group addresses */
+ * of MAX_GRP_ADDR group addresses
+ */
u8 dot11_grp_addr[MAX_GRP_ADDR][ETH_ALEN];
unsigned int dot11_grpcnt;
/* Component Identities */
- hfa384x_compident_t ident_nic;
- hfa384x_compident_t ident_pri_fw;
- hfa384x_compident_t ident_sta_fw;
- hfa384x_compident_t ident_ap_fw;
+ struct hfa384x_compident ident_nic;
+ struct hfa384x_compident ident_pri_fw;
+ struct hfa384x_compident ident_sta_fw;
+ struct hfa384x_compident ident_ap_fw;
u16 mm_mods;
/* Supplier compatibility ranges */
- hfa384x_caplevel_t cap_sup_mfi;
- hfa384x_caplevel_t cap_sup_cfi;
- hfa384x_caplevel_t cap_sup_pri;
- hfa384x_caplevel_t cap_sup_sta;
- hfa384x_caplevel_t cap_sup_ap;
+ struct hfa384x_caplevel cap_sup_mfi;
+ struct hfa384x_caplevel cap_sup_cfi;
+ struct hfa384x_caplevel cap_sup_pri;
+ struct hfa384x_caplevel cap_sup_sta;
+ struct hfa384x_caplevel cap_sup_ap;
/* Actor compatibility ranges */
- hfa384x_caplevel_t cap_act_pri_cfi; /*
- * pri f/w to controller
- * interface
- */
+ struct hfa384x_caplevel cap_act_pri_cfi; /*
+ * pri f/w to controller
+ * interface
+ */
- hfa384x_caplevel_t cap_act_sta_cfi; /*
- * sta f/w to controller
- * interface
- */
+ struct hfa384x_caplevel cap_act_sta_cfi; /*
+ * sta f/w to controller
+ * interface
+ */
- hfa384x_caplevel_t cap_act_sta_mfi; /* sta f/w to modem interface */
+ struct hfa384x_caplevel cap_act_sta_mfi; /* sta f/w to modem interface */
- hfa384x_caplevel_t cap_act_ap_cfi; /*
+ struct hfa384x_caplevel cap_act_ap_cfi; /*
* ap f/w to controller
* interface
*/
- hfa384x_caplevel_t cap_act_ap_mfi; /* ap f/w to modem interface */
+ struct hfa384x_caplevel cap_act_ap_mfi; /* ap f/w to modem interface */
u32 psusercount; /* Power save user count. */
- hfa384x_CommTallies32_t tallies; /* Communication tallies. */
+ struct hfa384x_CommTallies32 tallies; /* Communication tallies. */
u8 comment[WLAN_COMMENT_MAX + 1]; /* User comment */
/* Channel Info request results (AP only) */
struct {
atomic_t done;
u8 count;
- hfa384x_ChInfoResult_t results;
+ struct hfa384x_ChInfoResult results;
} channel_info;
- hfa384x_InfFrame_t *scanresults;
+ struct hfa384x_InfFrame *scanresults;
struct prism2sta_authlist authlist; /* Authenticated station list. */
unsigned int accessmode; /* Access mode. */
struct prism2sta_accesslist allow; /* Allowed station list. */
struct prism2sta_accesslist deny; /* Denied station list. */
-} hfa384x_t;
+};
-void hfa384x_create(hfa384x_t *hw, struct usb_device *usb);
-void hfa384x_destroy(hfa384x_t *hw);
+void hfa384x_create(struct hfa384x *hw, struct usb_device *usb);
+void hfa384x_destroy(struct hfa384x *hw);
int
-hfa384x_corereset(hfa384x_t *hw, int holdtime, int settletime, int genesis);
-int hfa384x_drvr_disable(hfa384x_t *hw, u16 macport);
-int hfa384x_drvr_enable(hfa384x_t *hw, u16 macport);
-int hfa384x_drvr_flashdl_enable(hfa384x_t *hw);
-int hfa384x_drvr_flashdl_disable(hfa384x_t *hw);
-int hfa384x_drvr_flashdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len);
-int hfa384x_drvr_getconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len);
-int hfa384x_drvr_ramdl_enable(hfa384x_t *hw, u32 exeaddr);
-int hfa384x_drvr_ramdl_disable(hfa384x_t *hw);
-int hfa384x_drvr_ramdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len);
-int hfa384x_drvr_readpda(hfa384x_t *hw, void *buf, unsigned int len);
-int hfa384x_drvr_setconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len);
-
-static inline int hfa384x_drvr_getconfig16(hfa384x_t *hw, u16 rid, void *val)
+hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int genesis);
+int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport);
+int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport);
+int hfa384x_drvr_flashdl_enable(struct hfa384x *hw);
+int hfa384x_drvr_flashdl_disable(struct hfa384x *hw);
+int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len);
+int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len);
+int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr);
+int hfa384x_drvr_ramdl_disable(struct hfa384x *hw);
+int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len);
+int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len);
+int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len);
+
+static inline int hfa384x_drvr_getconfig16(struct hfa384x *hw, u16 rid, void *val)
{
int result = 0;
result = hfa384x_drvr_getconfig(hw, rid, val, sizeof(u16));
if (result == 0)
- *((u16 *) val) = le16_to_cpu(*((u16 *) val));
+ *((u16 *)val) = le16_to_cpu(*((u16 *)val));
return result;
}
-static inline int hfa384x_drvr_setconfig16(hfa384x_t *hw, u16 rid, u16 val)
+static inline int hfa384x_drvr_setconfig16(struct hfa384x *hw, u16 rid, u16 val)
{
u16 value = cpu_to_le16(val);
@@ -1390,13 +1414,13 @@ static inline int hfa384x_drvr_setconfig16(hfa384x_t *hw, u16 rid, u16 val)
}
int
-hfa384x_drvr_setconfig_async(hfa384x_t *hw,
+hfa384x_drvr_setconfig_async(struct hfa384x *hw,
u16 rid,
void *buf,
u16 len, ctlx_usercb_t usercb, void *usercb_data);
static inline int
-hfa384x_drvr_setconfig16_async(hfa384x_t *hw, u16 rid, u16 val)
+hfa384x_drvr_setconfig16_async(struct hfa384x *hw, u16 rid, u16 val)
{
u16 value = cpu_to_le16(val);
@@ -1404,21 +1428,21 @@ hfa384x_drvr_setconfig16_async(hfa384x_t *hw, u16 rid, u16 val)
NULL, NULL);
}
-int hfa384x_drvr_start(hfa384x_t *hw);
-int hfa384x_drvr_stop(hfa384x_t *hw);
+int hfa384x_drvr_start(struct hfa384x *hw);
+int hfa384x_drvr_stop(struct hfa384x *hw);
int
-hfa384x_drvr_txframe(hfa384x_t *hw, struct sk_buff *skb,
+hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep);
-void hfa384x_tx_timeout(wlandevice_t *wlandev);
+void hfa384x_tx_timeout(struct wlandevice *wlandev);
-int hfa384x_cmd_initialize(hfa384x_t *hw);
-int hfa384x_cmd_enable(hfa384x_t *hw, u16 macport);
-int hfa384x_cmd_disable(hfa384x_t *hw, u16 macport);
-int hfa384x_cmd_allocate(hfa384x_t *hw, u16 len);
-int hfa384x_cmd_monitor(hfa384x_t *hw, u16 enable);
+int hfa384x_cmd_initialize(struct hfa384x *hw);
+int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport);
+int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport);
+int hfa384x_cmd_allocate(struct hfa384x *hw, u16 len);
+int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable);
int
-hfa384x_cmd_download(hfa384x_t *hw,
+hfa384x_cmd_download(struct hfa384x *hw,
u16 mode, u16 lowaddr, u16 highaddr, u16 codelen);
#endif /*__KERNEL__ */
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 337810750f2b..6a107f8a06e2 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -154,13 +154,13 @@ static void dbprint_urb(struct urb *urb);
#endif
static void
-hfa384x_int_rxmonitor(wlandevice_t *wlandev, hfa384x_usb_rxfrm_t *rxfrm);
+hfa384x_int_rxmonitor(struct wlandevice *wlandev, struct hfa384x_usb_rxfrm *rxfrm);
static void hfa384x_usb_defer(struct work_struct *data);
-static int submit_rx_urb(hfa384x_t *hw, gfp_t flags);
+static int submit_rx_urb(struct hfa384x *hw, gfp_t flags);
-static int submit_tx_urb(hfa384x_t *hw, struct urb *tx_urb, gfp_t flags);
+static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t flags);
/*---------------------------------------------------*/
/* Callbacks */
@@ -169,19 +169,19 @@ static void hfa384x_ctlxout_callback(struct urb *urb);
static void hfa384x_usbin_callback(struct urb *urb);
static void
-hfa384x_usbin_txcompl(wlandevice_t *wlandev, hfa384x_usbin_t *usbin);
+hfa384x_usbin_txcompl(struct wlandevice *wlandev, union hfa384x_usbin *usbin);
-static void hfa384x_usbin_rx(wlandevice_t *wlandev, struct sk_buff *skb);
+static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb);
-static void hfa384x_usbin_info(wlandevice_t *wlandev, hfa384x_usbin_t *usbin);
+static void hfa384x_usbin_info(struct wlandevice *wlandev, union hfa384x_usbin *usbin);
-static void hfa384x_usbin_ctlx(hfa384x_t *hw, hfa384x_usbin_t *usbin,
+static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
int urb_status);
/*---------------------------------------------------*/
/* Functions to support the prism2 usb command queue */
-static void hfa384x_usbctlxq_run(hfa384x_t *hw);
+static void hfa384x_usbctlxq_run(struct hfa384x *hw);
static void hfa384x_usbctlx_reqtimerfn(unsigned long data);
@@ -193,42 +193,42 @@ static void hfa384x_usbctlx_completion_task(unsigned long data);
static void hfa384x_usbctlx_reaper_task(unsigned long data);
-static int hfa384x_usbctlx_submit(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
+static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
-static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
+static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
struct usbctlx_completor {
int (*complete)(struct usbctlx_completor *);
};
static int
-hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
- hfa384x_usbctlx_t *ctlx,
+hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
+ struct hfa384x_usbctlx *ctlx,
struct usbctlx_completor *completor);
static int
-unlocked_usbctlx_cancel_async(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
+unlocked_usbctlx_cancel_async(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
-static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx);
+static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx);
static int
-usbctlx_get_status(const hfa384x_usb_cmdresp_t *cmdresp,
- hfa384x_cmdresult_t *result);
+usbctlx_get_status(const struct hfa384x_usb_statusresp *cmdresp,
+ struct hfa384x_cmdresult *result);
static void
-usbctlx_get_rridresult(const hfa384x_usb_rridresp_t *rridresp,
- hfa384x_rridresult_t *result);
+usbctlx_get_rridresult(const struct hfa384x_usb_rridresp *rridresp,
+ struct hfa384x_rridresult *result);
/*---------------------------------------------------*/
/* Low level req/resp CTLX formatters and submitters */
static int
-hfa384x_docmd(hfa384x_t *hw,
+hfa384x_docmd(struct hfa384x *hw,
enum cmd_mode mode,
- hfa384x_metacmd_t *cmd,
+ struct hfa384x_metacmd *cmd,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data);
static int
-hfa384x_dorrid(hfa384x_t *hw,
+hfa384x_dorrid(struct hfa384x *hw,
enum cmd_mode mode,
u16 rid,
void *riddata,
@@ -236,7 +236,7 @@ hfa384x_dorrid(hfa384x_t *hw,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data);
static int
-hfa384x_dowrid(hfa384x_t *hw,
+hfa384x_dowrid(struct hfa384x *hw,
enum cmd_mode mode,
u16 rid,
void *riddata,
@@ -244,7 +244,7 @@ hfa384x_dowrid(hfa384x_t *hw,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data);
static int
-hfa384x_dormem(hfa384x_t *hw,
+hfa384x_dormem(struct hfa384x *hw,
enum cmd_mode mode,
u16 page,
u16 offset,
@@ -253,7 +253,7 @@ hfa384x_dormem(hfa384x_t *hw,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data);
static int
-hfa384x_dowmem(hfa384x_t *hw,
+hfa384x_dowmem(struct hfa384x *hw,
enum cmd_mode mode,
u16 page,
u16 offset,
@@ -278,9 +278,9 @@ static inline const char *ctlxstr(CTLX_STATE s)
return ctlx_str[s];
};
-static inline hfa384x_usbctlx_t *get_active_ctlx(hfa384x_t *hw)
+static inline struct hfa384x_usbctlx *get_active_ctlx(struct hfa384x *hw)
{
- return list_entry(hw->ctlxq.active.next, hfa384x_usbctlx_t, list);
+ return list_entry(hw->ctlxq.active.next, struct hfa384x_usbctlx, list);
}
#ifdef DEBUG_USB
@@ -322,12 +322,12 @@ void dbprint_urb(struct urb *urb)
* Call context:
* Any
----------------------------------------------------------------*/
-static int submit_rx_urb(hfa384x_t *hw, gfp_t memflags)
+static int submit_rx_urb(struct hfa384x *hw, gfp_t memflags)
{
struct sk_buff *skb;
int result;
- skb = dev_alloc_skb(sizeof(hfa384x_usbin_t));
+ skb = dev_alloc_skb(sizeof(union hfa384x_usbin));
if (!skb) {
result = -ENOMEM;
goto done;
@@ -336,7 +336,7 @@ static int submit_rx_urb(hfa384x_t *hw, gfp_t memflags)
/* Post the IN urb */
usb_fill_bulk_urb(&hw->rx_urb, hw->usb,
hw->endp_in,
- skb->data, sizeof(hfa384x_usbin_t),
+ skb->data, sizeof(union hfa384x_usbin),
hfa384x_usbin_callback, hw->wlandev);
hw->rx_urb_skb = skb;
@@ -384,7 +384,7 @@ done:
* Call context:
* Any
----------------------------------------------------------------*/
-static int submit_tx_urb(hfa384x_t *hw, struct urb *tx_urb, gfp_t memflags)
+static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags)
{
struct net_device *netdev = hw->wlandev->netdev;
int result;
@@ -429,7 +429,7 @@ static int submit_tx_urb(hfa384x_t *hw, struct urb *tx_urb, gfp_t memflags)
----------------------------------------------------------------*/
static void hfa384x_usb_defer(struct work_struct *data)
{
- hfa384x_t *hw = container_of(data, struct hfa384x, usb_work);
+ struct hfa384x *hw = container_of(data, struct hfa384x, usb_work);
struct net_device *netdev = hw->wlandev->netdev;
/* Don't bother trying to reset anything if the plug
@@ -503,7 +503,7 @@ static void hfa384x_usb_defer(struct work_struct *data)
/*----------------------------------------------------------------
* hfa384x_create
*
-* Sets up the hfa384x_t data structure for use. Note this
+* Sets up the struct hfa384x data structure for use. Note this
* does _not_ initialize the actual hardware, just the data structures
* we use to keep track of its state.
*
@@ -521,9 +521,9 @@ static void hfa384x_usb_defer(struct work_struct *data)
* Call context:
* process
----------------------------------------------------------------*/
-void hfa384x_create(hfa384x_t *hw, struct usb_device *usb)
+void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
{
- memset(hw, 0, sizeof(hfa384x_t));
+ memset(hw, 0, sizeof(struct hfa384x));
hw->usb = usb;
/* set up the endpoints */
@@ -592,7 +592,7 @@ void hfa384x_create(hfa384x_t *hw, struct usb_device *usb)
* Call context:
* process
----------------------------------------------------------------*/
-void hfa384x_destroy(hfa384x_t *hw)
+void hfa384x_destroy(struct hfa384x *hw)
{
struct sk_buff *skb;
@@ -608,9 +608,9 @@ void hfa384x_destroy(hfa384x_t *hw)
dev_kfree_skb(skb);
}
-static hfa384x_usbctlx_t *usbctlx_alloc(void)
+static struct hfa384x_usbctlx *usbctlx_alloc(void)
{
- hfa384x_usbctlx_t *ctlx;
+ struct hfa384x_usbctlx *ctlx;
ctlx = kzalloc(sizeof(*ctlx),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
@@ -621,8 +621,8 @@ static hfa384x_usbctlx_t *usbctlx_alloc(void)
}
static int
-usbctlx_get_status(const hfa384x_usb_cmdresp_t *cmdresp,
- hfa384x_cmdresult_t *result)
+usbctlx_get_status(const struct hfa384x_usb_statusresp *cmdresp,
+ struct hfa384x_cmdresult *result)
{
result->status = le16_to_cpu(cmdresp->status);
result->resp0 = le16_to_cpu(cmdresp->resp0);
@@ -636,8 +636,8 @@ usbctlx_get_status(const hfa384x_usb_cmdresp_t *cmdresp,
}
static void
-usbctlx_get_rridresult(const hfa384x_usb_rridresp_t *rridresp,
- hfa384x_rridresult_t *result)
+usbctlx_get_rridresult(const struct hfa384x_usb_rridresp *rridresp,
+ struct hfa384x_rridresult *result)
{
result->rid = le16_to_cpu(rridresp->rid);
result->riddata = rridresp->data;
@@ -647,13 +647,13 @@ usbctlx_get_rridresult(const hfa384x_usb_rridresp_t *rridresp,
/*----------------------------------------------------------------
* Completor object:
* This completor must be passed to hfa384x_usbctlx_complete_sync()
-* when processing a CTLX that returns a hfa384x_cmdresult_t structure.
+* when processing a CTLX that returns a struct hfa384x_cmdresult structure.
----------------------------------------------------------------*/
struct usbctlx_cmd_completor {
struct usbctlx_completor head;
- const hfa384x_usb_cmdresp_t *cmdresp;
- hfa384x_cmdresult_t *result;
+ const struct hfa384x_usb_statusresp *cmdresp;
+ struct hfa384x_cmdresult *result;
};
static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head)
@@ -667,9 +667,9 @@ static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head)
static inline struct usbctlx_completor *init_cmd_completor(
struct usbctlx_cmd_completor
*completor,
- const hfa384x_usb_cmdresp_t
+ const struct hfa384x_usb_statusresp
*cmdresp,
- hfa384x_cmdresult_t *result)
+ struct hfa384x_cmdresult *result)
{
completor->head.complete = usbctlx_cmd_completor_fn;
completor->cmdresp = cmdresp;
@@ -685,7 +685,7 @@ static inline struct usbctlx_completor *init_cmd_completor(
struct usbctlx_rrid_completor {
struct usbctlx_completor head;
- const hfa384x_usb_rridresp_t *rridresp;
+ const struct hfa384x_usb_rridresp *rridresp;
void *riddata;
unsigned int riddatalen;
};
@@ -693,7 +693,7 @@ struct usbctlx_rrid_completor {
static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head)
{
struct usbctlx_rrid_completor *complete;
- hfa384x_rridresult_t rridresult;
+ struct hfa384x_rridresult rridresult;
complete = (struct usbctlx_rrid_completor *)head;
usbctlx_get_rridresult(complete->rridresp, &rridresult);
@@ -713,7 +713,7 @@ static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head)
static inline struct usbctlx_completor *init_rrid_completor(
struct usbctlx_rrid_completor
*completor,
- const hfa384x_usb_rridresp_t
+ const struct hfa384x_usb_rridresp
*rridresp,
void *riddata,
unsigned int riddatalen)
@@ -744,7 +744,7 @@ static inline struct usbctlx_completor *init_rrid_completor(
struct usbctlx_rmem_completor {
struct usbctlx_completor head;
- const hfa384x_usb_rmemresp_t *rmemresp;
+ const struct hfa384x_usb_rmemresp *rmemresp;
void *data;
unsigned int len;
};
@@ -762,7 +762,7 @@ static int usbctlx_rmem_completor_fn(struct usbctlx_completor *head)
static inline struct usbctlx_completor *init_rmem_completor(
struct usbctlx_rmem_completor
*completor,
- hfa384x_usb_rmemresp_t
+ struct hfa384x_usb_rmemresp
*rmemresp,
void *data,
unsigned int len)
@@ -795,10 +795,10 @@ static inline struct usbctlx_completor *init_rmem_completor(
* Call context:
* interrupt
----------------------------------------------------------------*/
-static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx)
+static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx)
{
if (ctlx->usercb) {
- hfa384x_cmdresult_t cmdresult;
+ struct hfa384x_cmdresult cmdresult;
if (ctlx->state != CTLX_COMPLETE) {
memset(&cmdresult, 0, sizeof(cmdresult));
@@ -812,21 +812,21 @@ static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx)
}
}
-static inline int hfa384x_docmd_wait(hfa384x_t *hw, hfa384x_metacmd_t *cmd)
+static inline int hfa384x_docmd_wait(struct hfa384x *hw, struct hfa384x_metacmd *cmd)
{
return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL);
}
static inline int
-hfa384x_docmd_async(hfa384x_t *hw,
- hfa384x_metacmd_t *cmd,
+hfa384x_docmd_async(struct hfa384x *hw,
+ struct hfa384x_metacmd *cmd,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
{
return hfa384x_docmd(hw, DOASYNC, cmd, cmdcb, usercb, usercb_data);
}
static inline int
-hfa384x_dorrid_wait(hfa384x_t *hw, u16 rid, void *riddata,
+hfa384x_dorrid_wait(struct hfa384x *hw, u16 rid, void *riddata,
unsigned int riddatalen)
{
return hfa384x_dorrid(hw, DOWAIT,
@@ -834,7 +834,7 @@ hfa384x_dorrid_wait(hfa384x_t *hw, u16 rid, void *riddata,
}
static inline int
-hfa384x_dorrid_async(hfa384x_t *hw,
+hfa384x_dorrid_async(struct hfa384x *hw,
u16 rid, void *riddata, unsigned int riddatalen,
ctlx_cmdcb_t cmdcb,
ctlx_usercb_t usercb, void *usercb_data)
@@ -845,7 +845,7 @@ hfa384x_dorrid_async(hfa384x_t *hw,
}
static inline int
-hfa384x_dowrid_wait(hfa384x_t *hw, u16 rid, void *riddata,
+hfa384x_dowrid_wait(struct hfa384x *hw, u16 rid, void *riddata,
unsigned int riddatalen)
{
return hfa384x_dowrid(hw, DOWAIT,
@@ -853,7 +853,7 @@ hfa384x_dowrid_wait(hfa384x_t *hw, u16 rid, void *riddata,
}
static inline int
-hfa384x_dowrid_async(hfa384x_t *hw,
+hfa384x_dowrid_async(struct hfa384x *hw,
u16 rid, void *riddata, unsigned int riddatalen,
ctlx_cmdcb_t cmdcb,
ctlx_usercb_t usercb, void *usercb_data)
@@ -864,7 +864,7 @@ hfa384x_dowrid_async(hfa384x_t *hw,
}
static inline int
-hfa384x_dormem_wait(hfa384x_t *hw,
+hfa384x_dormem_wait(struct hfa384x *hw,
u16 page, u16 offset, void *data, unsigned int len)
{
return hfa384x_dormem(hw, DOWAIT,
@@ -872,7 +872,7 @@ hfa384x_dormem_wait(hfa384x_t *hw,
}
static inline int
-hfa384x_dormem_async(hfa384x_t *hw,
+hfa384x_dormem_async(struct hfa384x *hw,
u16 page, u16 offset, void *data, unsigned int len,
ctlx_cmdcb_t cmdcb,
ctlx_usercb_t usercb, void *usercb_data)
@@ -883,7 +883,7 @@ hfa384x_dormem_async(hfa384x_t *hw,
}
static inline int
-hfa384x_dowmem_wait(hfa384x_t *hw,
+hfa384x_dowmem_wait(struct hfa384x *hw,
u16 page, u16 offset, void *data, unsigned int len)
{
return hfa384x_dowmem(hw, DOWAIT,
@@ -891,7 +891,7 @@ hfa384x_dowmem_wait(hfa384x_t *hw,
}
static inline int
-hfa384x_dowmem_async(hfa384x_t *hw,
+hfa384x_dowmem_async(struct hfa384x *hw,
u16 page,
u16 offset,
void *data,
@@ -923,11 +923,11 @@ hfa384x_dowmem_async(hfa384x_t *hw,
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_cmd_initialize(hfa384x_t *hw)
+int hfa384x_cmd_initialize(struct hfa384x *hw)
{
int result = 0;
int i;
- hfa384x_metacmd_t cmd;
+ struct hfa384x_metacmd cmd;
cmd.cmd = HFA384x_CMDCODE_INIT;
cmd.parm0 = 0;
@@ -969,9 +969,9 @@ int hfa384x_cmd_initialize(hfa384x_t *hw)
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_cmd_disable(hfa384x_t *hw, u16 macport)
+int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport)
{
- hfa384x_metacmd_t cmd;
+ struct hfa384x_metacmd cmd;
cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_DISABLE) |
HFA384x_CMD_MACPORT_SET(macport);
@@ -1002,9 +1002,9 @@ int hfa384x_cmd_disable(hfa384x_t *hw, u16 macport)
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_cmd_enable(hfa384x_t *hw, u16 macport)
+int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport)
{
- hfa384x_metacmd_t cmd;
+ struct hfa384x_metacmd cmd;
cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_ENABLE) |
HFA384x_CMD_MACPORT_SET(macport);
@@ -1044,9 +1044,9 @@ int hfa384x_cmd_enable(hfa384x_t *hw, u16 macport)
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_cmd_monitor(hfa384x_t *hw, u16 enable)
+int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable)
{
- hfa384x_metacmd_t cmd;
+ struct hfa384x_metacmd cmd;
cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_MONITOR) |
HFA384x_CMD_AINFO_SET(enable);
@@ -1095,10 +1095,10 @@ int hfa384x_cmd_monitor(hfa384x_t *hw, u16 enable)
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_cmd_download(hfa384x_t *hw, u16 mode, u16 lowaddr,
+int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr,
u16 highaddr, u16 codelen)
{
- hfa384x_metacmd_t cmd;
+ struct hfa384x_metacmd cmd;
pr_debug("mode=%d, lowaddr=0x%04x, highaddr=0x%04x, codelen=%d\n",
mode, lowaddr, highaddr, codelen);
@@ -1136,7 +1136,7 @@ int hfa384x_cmd_download(hfa384x_t *hw, u16 mode, u16 lowaddr,
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_corereset(hfa384x_t *hw, int holdtime, int settletime, int genesis)
+int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int genesis)
{
int result;
@@ -1173,8 +1173,8 @@ int hfa384x_corereset(hfa384x_t *hw, int holdtime, int settletime, int genesis)
* Call context:
* process
----------------------------------------------------------------*/
-static int hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
- hfa384x_usbctlx_t *ctlx,
+static int hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
+ struct hfa384x_usbctlx *ctlx,
struct usbctlx_completor *completor)
{
unsigned long flags;
@@ -1289,13 +1289,13 @@ cleanup:
* process
----------------------------------------------------------------*/
static int
-hfa384x_docmd(hfa384x_t *hw,
+hfa384x_docmd(struct hfa384x *hw,
enum cmd_mode mode,
- hfa384x_metacmd_t *cmd,
+ struct hfa384x_metacmd *cmd,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
{
int result;
- hfa384x_usbctlx_t *ctlx;
+ struct hfa384x_usbctlx *ctlx;
ctlx = usbctlx_alloc();
if (!ctlx) {
@@ -1377,7 +1377,7 @@ done:
* process (DOWAIT or DOASYNC)
----------------------------------------------------------------*/
static int
-hfa384x_dorrid(hfa384x_t *hw,
+hfa384x_dorrid(struct hfa384x *hw,
enum cmd_mode mode,
u16 rid,
void *riddata,
@@ -1385,7 +1385,7 @@ hfa384x_dorrid(hfa384x_t *hw,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
{
int result;
- hfa384x_usbctlx_t *ctlx;
+ struct hfa384x_usbctlx *ctlx;
ctlx = usbctlx_alloc();
if (!ctlx) {
@@ -1458,7 +1458,7 @@ done:
* process (DOWAIT or DOASYNC)
----------------------------------------------------------------*/
static int
-hfa384x_dowrid(hfa384x_t *hw,
+hfa384x_dowrid(struct hfa384x *hw,
enum cmd_mode mode,
u16 rid,
void *riddata,
@@ -1466,7 +1466,7 @@ hfa384x_dowrid(hfa384x_t *hw,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
{
int result;
- hfa384x_usbctlx_t *ctlx;
+ struct hfa384x_usbctlx *ctlx;
ctlx = usbctlx_alloc();
if (!ctlx) {
@@ -1497,7 +1497,7 @@ hfa384x_dowrid(hfa384x_t *hw,
kfree(ctlx);
} else if (mode == DOWAIT) {
struct usbctlx_cmd_completor completor;
- hfa384x_cmdresult_t wridresult;
+ struct hfa384x_cmdresult wridresult;
result = hfa384x_usbctlx_complete_sync(hw,
ctlx,
@@ -1545,7 +1545,7 @@ done:
* process (DOWAIT or DOASYNC)
----------------------------------------------------------------*/
static int
-hfa384x_dormem(hfa384x_t *hw,
+hfa384x_dormem(struct hfa384x *hw,
enum cmd_mode mode,
u16 page,
u16 offset,
@@ -1554,7 +1554,7 @@ hfa384x_dormem(hfa384x_t *hw,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
{
int result;
- hfa384x_usbctlx_t *ctlx;
+ struct hfa384x_usbctlx *ctlx;
ctlx = usbctlx_alloc();
if (!ctlx) {
@@ -1636,7 +1636,7 @@ done:
* process (DOWAIT or DOASYNC)
----------------------------------------------------------------*/
static int
-hfa384x_dowmem(hfa384x_t *hw,
+hfa384x_dowmem(struct hfa384x *hw,
enum cmd_mode mode,
u16 page,
u16 offset,
@@ -1645,7 +1645,7 @@ hfa384x_dowmem(hfa384x_t *hw,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
{
int result;
- hfa384x_usbctlx_t *ctlx;
+ struct hfa384x_usbctlx *ctlx;
pr_debug("page=0x%04x offset=0x%04x len=%d\n", page, offset, len);
@@ -1679,7 +1679,7 @@ hfa384x_dowmem(hfa384x_t *hw,
kfree(ctlx);
} else if (mode == DOWAIT) {
struct usbctlx_cmd_completor completor;
- hfa384x_cmdresult_t wmemresult;
+ struct hfa384x_cmdresult wmemresult;
result = hfa384x_usbctlx_complete_sync(hw,
ctlx,
@@ -1715,7 +1715,7 @@ done:
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_disable(hfa384x_t *hw, u16 macport)
+int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport)
{
int result = 0;
@@ -1753,7 +1753,7 @@ int hfa384x_drvr_disable(hfa384x_t *hw, u16 macport)
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_enable(hfa384x_t *hw, u16 macport)
+int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport)
{
int result = 0;
@@ -1790,7 +1790,7 @@ int hfa384x_drvr_enable(hfa384x_t *hw, u16 macport)
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_flashdl_enable(hfa384x_t *hw)
+int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
{
int result = 0;
int i;
@@ -1849,7 +1849,7 @@ int hfa384x_drvr_flashdl_enable(hfa384x_t *hw)
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_flashdl_disable(hfa384x_t *hw)
+int hfa384x_drvr_flashdl_disable(struct hfa384x *hw)
{
/* Check that we're already in the download state */
if (hw->dlstate != HFA384x_DLSTATE_FLASHENABLED)
@@ -1894,7 +1894,7 @@ int hfa384x_drvr_flashdl_disable(hfa384x_t *hw)
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_flashdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
+int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
{
int result = 0;
u32 dlbufaddr;
@@ -2032,7 +2032,7 @@ exit_proc:
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_getconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len)
+int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
{
return hfa384x_dorrid_wait(hw, rid, buf, len);
}
@@ -2061,7 +2061,7 @@ int hfa384x_drvr_getconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len)
* process
----------------------------------------------------------------*/
int
-hfa384x_drvr_setconfig_async(hfa384x_t *hw,
+hfa384x_drvr_setconfig_async(struct hfa384x *hw,
u16 rid,
void *buf,
u16 len, ctlx_usercb_t usercb, void *usercb_data)
@@ -2088,7 +2088,7 @@ hfa384x_drvr_setconfig_async(hfa384x_t *hw,
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_ramdl_disable(hfa384x_t *hw)
+int hfa384x_drvr_ramdl_disable(struct hfa384x *hw)
{
/* Check that we're already in the download state */
if (hw->dlstate != HFA384x_DLSTATE_RAMENABLED)
@@ -2128,7 +2128,7 @@ int hfa384x_drvr_ramdl_disable(hfa384x_t *hw)
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_ramdl_enable(hfa384x_t *hw, u32 exeaddr)
+int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
{
int result = 0;
u16 lowaddr;
@@ -2196,7 +2196,7 @@ int hfa384x_drvr_ramdl_enable(hfa384x_t *hw, u32 exeaddr)
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_ramdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
+int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
{
int result = 0;
int nwrites;
@@ -2276,7 +2276,7 @@ int hfa384x_drvr_ramdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
* Call context:
* process or non-card interrupt.
----------------------------------------------------------------*/
-int hfa384x_drvr_readpda(hfa384x_t *hw, void *buf, unsigned int len)
+int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
{
int result = 0;
u16 *pda = buf;
@@ -2386,7 +2386,7 @@ int hfa384x_drvr_readpda(hfa384x_t *hw, void *buf, unsigned int len)
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_setconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len)
+int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
{
return hfa384x_dowrid_wait(hw, rid, buf, len);
}
@@ -2411,7 +2411,7 @@ int hfa384x_drvr_setconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len)
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_start(hfa384x_t *hw)
+int hfa384x_drvr_start(struct hfa384x *hw)
{
int result, result1, result2;
u16 status;
@@ -2512,7 +2512,7 @@ done:
* Call context:
* process
----------------------------------------------------------------*/
-int hfa384x_drvr_stop(hfa384x_t *hw)
+int hfa384x_drvr_stop(struct hfa384x *hw)
{
int i;
@@ -2562,11 +2562,11 @@ int hfa384x_drvr_stop(hfa384x_t *hw)
* Call context:
* interrupt
----------------------------------------------------------------*/
-int hfa384x_drvr_txframe(hfa384x_t *hw, struct sk_buff *skb,
+int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
{
- int usbpktlen = sizeof(hfa384x_tx_frame_t);
+ int usbpktlen = sizeof(struct hfa384x_tx_frame);
int result;
int ret;
char *ptr;
@@ -2656,9 +2656,9 @@ exit:
return result;
}
-void hfa384x_tx_timeout(wlandevice_t *wlandev)
+void hfa384x_tx_timeout(struct wlandevice *wlandev)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -2681,7 +2681,7 @@ void hfa384x_tx_timeout(wlandevice_t *wlandev)
* Tasklet to delete dead CTLX objects
*
* Arguments:
-* data ptr to a hfa384x_t
+* data ptr to a struct hfa384x
*
* Returns:
*
@@ -2690,8 +2690,8 @@ void hfa384x_tx_timeout(wlandevice_t *wlandev)
----------------------------------------------------------------*/
static void hfa384x_usbctlx_reaper_task(unsigned long data)
{
- hfa384x_t *hw = (hfa384x_t *)data;
- hfa384x_usbctlx_t *ctlx, *temp;
+ struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -2713,7 +2713,7 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data)
* Tasklet to call completion handlers for returned CTLXs
*
* Arguments:
-* data ptr to hfa384x_t
+* data ptr to struct hfa384x
*
* Returns:
* Nothing
@@ -2723,8 +2723,8 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data)
----------------------------------------------------------------*/
static void hfa384x_usbctlx_completion_task(unsigned long data)
{
- hfa384x_t *hw = (hfa384x_t *)data;
- hfa384x_usbctlx_t *ctlx, *temp;
+ struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
int reap = 0;
@@ -2787,7 +2787,7 @@ static void hfa384x_usbctlx_completion_task(unsigned long data)
* next command on the queue is run afterwards.
*
* Arguments:
-* hw ptr to the hfa384x_t structure
+* hw ptr to the struct hfa384x structure
* ctlx ptr to a CTLX structure
*
* Returns:
@@ -2797,8 +2797,8 @@ static void hfa384x_usbctlx_completion_task(unsigned long data)
* Call context:
* Either process or interrupt, but presumably interrupt
----------------------------------------------------------------*/
-static int unlocked_usbctlx_cancel_async(hfa384x_t *hw,
- hfa384x_usbctlx_t *ctlx)
+static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
+ struct hfa384x_usbctlx *ctlx)
{
int ret;
@@ -2836,7 +2836,7 @@ static int unlocked_usbctlx_cancel_async(hfa384x_t *hw,
* tasklet is scheduled.
*
* Arguments:
-* hw ptr to a hfa384x_t structure
+* hw ptr to a struct hfa384x structure
* ctlx ptr to a ctlx structure
*
* Returns:
@@ -2847,7 +2847,7 @@ static int unlocked_usbctlx_cancel_async(hfa384x_t *hw,
* Call context:
* Either, assume interrupt
----------------------------------------------------------------*/
-static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx)
+static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx)
{
/* Timers have been stopped, and ctlx should be in
* a terminal state. Retire it from the "active"
@@ -2876,7 +2876,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx)
* Checks to see if the head item is running. If not, starts it.
*
* Arguments:
-* hw ptr to hfa384x_t
+* hw ptr to struct hfa384x
*
* Returns:
* nothing
@@ -2886,7 +2886,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx)
* Call context:
* any
----------------------------------------------------------------*/
-static void hfa384x_usbctlxq_run(hfa384x_t *hw)
+static void hfa384x_usbctlxq_run(struct hfa384x *hw)
{
unsigned long flags;
@@ -2905,12 +2905,12 @@ static void hfa384x_usbctlxq_run(hfa384x_t *hw)
goto unlock;
while (!list_empty(&hw->ctlxq.pending)) {
- hfa384x_usbctlx_t *head;
+ struct hfa384x_usbctlx *head;
int result;
/* This is the first pending command */
head = list_entry(hw->ctlxq.pending.next,
- hfa384x_usbctlx_t, list);
+ struct hfa384x_usbctlx, list);
/* We need to split this off to avoid a race condition */
list_move_tail(&head->list, &hw->ctlxq.active);
@@ -2988,9 +2988,9 @@ unlock:
----------------------------------------------------------------*/
static void hfa384x_usbin_callback(struct urb *urb)
{
- wlandevice_t *wlandev = urb->context;
- hfa384x_t *hw;
- hfa384x_usbin_t *usbin = (hfa384x_usbin_t *)urb->transfer_buffer;
+ struct wlandevice *wlandev = urb->context;
+ struct hfa384x *hw;
+ union hfa384x_usbin *usbin = (union hfa384x_usbin *)urb->transfer_buffer;
struct sk_buff *skb = NULL;
int result;
int urb_status;
@@ -3154,7 +3154,7 @@ exit:
* queue and our state updated accordingly.
*
* Arguments:
-* hw ptr to hfa384x_t
+* hw ptr to struct hfa384x
* usbin ptr to USB IN packet
* urb_status status of this Bulk-In URB
*
@@ -3166,10 +3166,10 @@ exit:
* Call context:
* interrupt
----------------------------------------------------------------*/
-static void hfa384x_usbin_ctlx(hfa384x_t *hw, hfa384x_usbin_t *usbin,
+static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
int urb_status)
{
- hfa384x_usbctlx_t *ctlx;
+ struct hfa384x_usbctlx *ctlx;
int run_queue = 0;
unsigned long flags;
@@ -3285,8 +3285,8 @@ unlock:
* Call context:
* interrupt
----------------------------------------------------------------*/
-static void hfa384x_usbin_txcompl(wlandevice_t *wlandev,
- hfa384x_usbin_t *usbin)
+static void hfa384x_usbin_txcompl(struct wlandevice *wlandev,
+ union hfa384x_usbin *usbin)
{
u16 status;
@@ -3316,10 +3316,10 @@ static void hfa384x_usbin_txcompl(wlandevice_t *wlandev,
* Call context:
* interrupt
----------------------------------------------------------------*/
-static void hfa384x_usbin_rx(wlandevice_t *wlandev, struct sk_buff *skb)
+static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
{
- hfa384x_usbin_t *usbin = (hfa384x_usbin_t *)skb->data;
- hfa384x_t *hw = wlandev->priv;
+ union hfa384x_usbin *usbin = (union hfa384x_usbin *)skb->data;
+ struct hfa384x *hw = wlandev->priv;
int hdrlen;
struct p80211_rxmeta *rxmeta;
u16 data_len;
@@ -3346,7 +3346,7 @@ static void hfa384x_usbin_rx(wlandevice_t *wlandev, struct sk_buff *skb)
hdrlen = p80211_headerlen(fc);
/* Pull off the descriptor */
- skb_pull(skb, sizeof(hfa384x_rx_frame_t));
+ skb_pull(skb, sizeof(struct hfa384x_rx_frame));
/* Now shunt the header block up against the data block
* with an "overlapping" copy
@@ -3416,17 +3416,17 @@ static void hfa384x_usbin_rx(wlandevice_t *wlandev, struct sk_buff *skb)
* Call context:
* interrupt
----------------------------------------------------------------*/
-static void hfa384x_int_rxmonitor(wlandevice_t *wlandev,
- hfa384x_usb_rxfrm_t *rxfrm)
+static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
+ struct hfa384x_usb_rxfrm *rxfrm)
{
- hfa384x_rx_frame_t *rxdesc = &(rxfrm->desc);
+ struct hfa384x_rx_frame *rxdesc = &(rxfrm->desc);
unsigned int hdrlen = 0;
unsigned int datalen = 0;
unsigned int skblen = 0;
u8 *datap;
u16 fc;
struct sk_buff *skb;
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
/* Remember the status, time, and data_len fields are in host order */
/* Figure out how big the frame is */
@@ -3517,7 +3517,8 @@ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev,
* Call context:
* interrupt
----------------------------------------------------------------*/
-static void hfa384x_usbin_info(wlandevice_t *wlandev, hfa384x_usbin_t *usbin)
+static void hfa384x_usbin_info(struct wlandevice *wlandev,
+ union hfa384x_usbin *usbin)
{
usbin->infofrm.info.framelen =
le16_to_cpu(usbin->infofrm.info.framelen);
@@ -3542,7 +3543,7 @@ static void hfa384x_usbin_info(wlandevice_t *wlandev, hfa384x_usbin_t *usbin)
----------------------------------------------------------------*/
static void hfa384x_usbout_callback(struct urb *urb)
{
- wlandevice_t *wlandev = urb->context;
+ struct wlandevice *wlandev = urb->context;
#ifdef DEBUG_USB
dbprint_urb(urb);
@@ -3556,7 +3557,7 @@ static void hfa384x_usbout_callback(struct urb *urb)
case -EPIPE:
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
netdev_warn(hw->wlandev->netdev,
"%s tx pipe stalled: requesting reset\n",
@@ -3572,7 +3573,7 @@ static void hfa384x_usbout_callback(struct urb *urb)
case -ETIMEDOUT:
case -EILSEQ:
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
if (!test_and_set_bit
(THROTTLE_TX, &hw->usb_flags) &&
@@ -3617,11 +3618,11 @@ static void hfa384x_usbout_callback(struct urb *urb)
----------------------------------------------------------------*/
static void hfa384x_ctlxout_callback(struct urb *urb)
{
- hfa384x_t *hw = urb->context;
+ struct hfa384x *hw = urb->context;
int delete_resptimer = 0;
int timer_ok = 1;
int run_queue = 0;
- hfa384x_usbctlx_t *ctlx;
+ struct hfa384x_usbctlx *ctlx;
unsigned long flags;
pr_debug("urb->status=%d\n", urb->status);
@@ -3736,7 +3737,7 @@ delresp:
* URB containing a Prism2.x XXX_Request was never called.
*
* Arguments:
-* data a ptr to the hfa384x_t
+* data a ptr to the struct hfa384x
*
* Returns:
* nothing
@@ -3748,7 +3749,7 @@ delresp:
----------------------------------------------------------------*/
static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
{
- hfa384x_t *hw = (hfa384x_t *)data;
+ struct hfa384x *hw = (struct hfa384x *)data;
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -3765,7 +3766,7 @@ static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
*/
hw->ctlx_urb.transfer_flags |= URB_ASYNC_UNLINK;
if (usb_unlink_urb(&hw->ctlx_urb) == -EINPROGRESS) {
- hfa384x_usbctlx_t *ctlx = get_active_ctlx(hw);
+ struct hfa384x_usbctlx *ctlx = get_active_ctlx(hw);
ctlx->state = CTLX_REQ_FAILED;
@@ -3794,7 +3795,7 @@ static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
* URB containing a Prism2.x XXX_Response was never called.
*
* Arguments:
-* data a ptr to the hfa384x_t
+* data a ptr to the struct hfa384x
*
* Returns:
* nothing
@@ -3806,7 +3807,7 @@ static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
----------------------------------------------------------------*/
static void hfa384x_usbctlx_resptimerfn(unsigned long data)
{
- hfa384x_t *hw = (hfa384x_t *)data;
+ struct hfa384x *hw = (struct hfa384x *)data;
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -3817,7 +3818,7 @@ static void hfa384x_usbctlx_resptimerfn(unsigned long data)
* adapter has been unplugged ...
*/
if (!list_empty(&hw->ctlxq.active)) {
- hfa384x_usbctlx_t *ctlx = get_active_ctlx(hw);
+ struct hfa384x_usbctlx *ctlx = get_active_ctlx(hw);
if (unlocked_usbctlx_cancel_async(hw, ctlx) == 0) {
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
@@ -3845,7 +3846,7 @@ static void hfa384x_usbctlx_resptimerfn(unsigned long data)
----------------------------------------------------------------*/
static void hfa384x_usb_throttlefn(unsigned long data)
{
- hfa384x_t *hw = (hfa384x_t *)data;
+ struct hfa384x *hw = (struct hfa384x *)data;
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -3885,7 +3886,7 @@ static void hfa384x_usb_throttlefn(unsigned long data)
* Call context:
* process or interrupt
----------------------------------------------------------------*/
-static int hfa384x_usbctlx_submit(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx)
+static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx)
{
unsigned long flags;
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 6354036ffb42..0247cbc29145 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -104,7 +104,7 @@ static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
* May be called in interrupt or non-interrupt context
*----------------------------------------------------------------
*/
-int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
+int skb_ether_to_p80211(struct wlandevice *wlandev, u32 ethconv,
struct sk_buff *skb, union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
{
@@ -232,7 +232,7 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
}
/* jkriegl: from orinoco, modified */
-static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac,
+static void orinoco_spy_gather(struct wlandevice *wlandev, char *mac,
struct p80211_rxmeta *rxmeta)
{
int i;
@@ -274,10 +274,10 @@ static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac,
* May be called in interrupt or non-interrupt context
*----------------------------------------------------------------
*/
-int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
+int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
struct sk_buff *skb)
{
- netdevice_t *netdev = wlandev->netdev;
+ struct net_device *netdev = wlandev->netdev;
u16 fc;
unsigned int payload_length;
unsigned int payload_offset;
diff --git a/drivers/staging/wlan-ng/p80211metadef.h b/drivers/staging/wlan-ng/p80211metadef.h
index 0ccfba1294de..b0d3567ca0ad 100644
--- a/drivers/staging/wlan-ng/p80211metadef.h
+++ b/drivers/staging/wlan-ng/p80211metadef.h
@@ -155,22 +155,9 @@
#define DIDmib_dot11smt_dot11WEPDefaultKeysTable \
(P80211DID_MKSECTION(1) | \
P80211DID_MKGROUP(4))
-#define DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0 \
- (P80211DID_MKSECTION(1) | \
- P80211DID_MKGROUP(4) | \
- P80211DID_MKITEM(1) | 0x0c000000)
-#define DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1 \
- (P80211DID_MKSECTION(1) | \
- P80211DID_MKGROUP(4) | \
- P80211DID_MKITEM(2) | 0x0c000000)
-#define DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2 \
- (P80211DID_MKSECTION(1) | \
- P80211DID_MKGROUP(4) | \
- P80211DID_MKITEM(3) | 0x0c000000)
-#define DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3 \
- (P80211DID_MKSECTION(1) | \
- P80211DID_MKGROUP(4) | \
- P80211DID_MKITEM(4) | 0x0c000000)
+#define DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(_i) \
+ (DIDmib_dot11smt_dot11WEPDefaultKeysTable | \
+ P80211DID_MKITEM(_i) | 0x0c000000)
#define DIDmib_dot11smt_dot11PrivacyTable \
(P80211DID_MKSECTION(1) | \
P80211DID_MKGROUP(6))
diff --git a/drivers/staging/wlan-ng/p80211metastruct.h b/drivers/staging/wlan-ng/p80211metastruct.h
index c501162c3020..850d897fc163 100644
--- a/drivers/staging/wlan-ng/p80211metastruct.h
+++ b/drivers/staging/wlan-ng/p80211metastruct.h
@@ -51,221 +51,221 @@ struct p80211msg_dot11req_mibget {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_unk392_t mibattribute;
- p80211item_uint32_t resultcode;
+ struct p80211item_unk392 mibattribute;
+ struct p80211item_uint32 resultcode;
} __packed;
struct p80211msg_dot11req_mibset {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_unk392_t mibattribute;
- p80211item_uint32_t resultcode;
+ struct p80211item_unk392 mibattribute;
+ struct p80211item_uint32 resultcode;
} __packed;
struct p80211msg_dot11req_scan {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_uint32_t bsstype;
- p80211item_pstr6_t bssid;
+ struct p80211item_uint32 bsstype;
+ struct p80211item_pstr6 bssid;
u8 pad_0C[1];
- p80211item_pstr32_t ssid;
+ struct p80211item_pstr32 ssid;
u8 pad_1D[3];
- p80211item_uint32_t scantype;
- p80211item_uint32_t probedelay;
- p80211item_pstr14_t channellist;
+ struct p80211item_uint32 scantype;
+ struct p80211item_uint32 probedelay;
+ struct p80211item_pstr14 channellist;
u8 pad_2C[1];
- p80211item_uint32_t minchanneltime;
- p80211item_uint32_t maxchanneltime;
- p80211item_uint32_t resultcode;
- p80211item_uint32_t numbss;
- p80211item_uint32_t append;
+ struct p80211item_uint32 minchanneltime;
+ struct p80211item_uint32 maxchanneltime;
+ struct p80211item_uint32 resultcode;
+ struct p80211item_uint32 numbss;
+ struct p80211item_uint32 append;
} __packed;
struct p80211msg_dot11req_scan_results {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_uint32_t bssindex;
- p80211item_uint32_t resultcode;
- p80211item_uint32_t signal;
- p80211item_uint32_t noise;
- p80211item_pstr6_t bssid;
+ struct p80211item_uint32 bssindex;
+ struct p80211item_uint32 resultcode;
+ struct p80211item_uint32 signal;
+ struct p80211item_uint32 noise;
+ struct p80211item_pstr6 bssid;
u8 pad_3C[1];
- p80211item_pstr32_t ssid;
+ struct p80211item_pstr32 ssid;
u8 pad_4D[3];
- p80211item_uint32_t bsstype;
- p80211item_uint32_t beaconperiod;
- p80211item_uint32_t dtimperiod;
- p80211item_uint32_t timestamp;
- p80211item_uint32_t localtime;
- p80211item_uint32_t fhdwelltime;
- p80211item_uint32_t fhhopset;
- p80211item_uint32_t fhhoppattern;
- p80211item_uint32_t fhhopindex;
- p80211item_uint32_t dschannel;
- p80211item_uint32_t cfpcount;
- p80211item_uint32_t cfpperiod;
- p80211item_uint32_t cfpmaxduration;
- p80211item_uint32_t cfpdurremaining;
- p80211item_uint32_t ibssatimwindow;
- p80211item_uint32_t cfpollable;
- p80211item_uint32_t cfpollreq;
- p80211item_uint32_t privacy;
- p80211item_uint32_t capinfo;
- p80211item_uint32_t basicrate1;
- p80211item_uint32_t basicrate2;
- p80211item_uint32_t basicrate3;
- p80211item_uint32_t basicrate4;
- p80211item_uint32_t basicrate5;
- p80211item_uint32_t basicrate6;
- p80211item_uint32_t basicrate7;
- p80211item_uint32_t basicrate8;
- p80211item_uint32_t supprate1;
- p80211item_uint32_t supprate2;
- p80211item_uint32_t supprate3;
- p80211item_uint32_t supprate4;
- p80211item_uint32_t supprate5;
- p80211item_uint32_t supprate6;
- p80211item_uint32_t supprate7;
- p80211item_uint32_t supprate8;
+ struct p80211item_uint32 bsstype;
+ struct p80211item_uint32 beaconperiod;
+ struct p80211item_uint32 dtimperiod;
+ struct p80211item_uint32 timestamp;
+ struct p80211item_uint32 localtime;
+ struct p80211item_uint32 fhdwelltime;
+ struct p80211item_uint32 fhhopset;
+ struct p80211item_uint32 fhhoppattern;
+ struct p80211item_uint32 fhhopindex;
+ struct p80211item_uint32 dschannel;
+ struct p80211item_uint32 cfpcount;
+ struct p80211item_uint32 cfpperiod;
+ struct p80211item_uint32 cfpmaxduration;
+ struct p80211item_uint32 cfpdurremaining;
+ struct p80211item_uint32 ibssatimwindow;
+ struct p80211item_uint32 cfpollable;
+ struct p80211item_uint32 cfpollreq;
+ struct p80211item_uint32 privacy;
+ struct p80211item_uint32 capinfo;
+ struct p80211item_uint32 basicrate1;
+ struct p80211item_uint32 basicrate2;
+ struct p80211item_uint32 basicrate3;
+ struct p80211item_uint32 basicrate4;
+ struct p80211item_uint32 basicrate5;
+ struct p80211item_uint32 basicrate6;
+ struct p80211item_uint32 basicrate7;
+ struct p80211item_uint32 basicrate8;
+ struct p80211item_uint32 supprate1;
+ struct p80211item_uint32 supprate2;
+ struct p80211item_uint32 supprate3;
+ struct p80211item_uint32 supprate4;
+ struct p80211item_uint32 supprate5;
+ struct p80211item_uint32 supprate6;
+ struct p80211item_uint32 supprate7;
+ struct p80211item_uint32 supprate8;
} __packed;
struct p80211msg_dot11req_start {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_pstr32_t ssid;
+ struct p80211item_pstr32 ssid;
u8 pad_12D[3];
- p80211item_uint32_t bsstype;
- p80211item_uint32_t beaconperiod;
- p80211item_uint32_t dtimperiod;
- p80211item_uint32_t cfpperiod;
- p80211item_uint32_t cfpmaxduration;
- p80211item_uint32_t fhdwelltime;
- p80211item_uint32_t fhhopset;
- p80211item_uint32_t fhhoppattern;
- p80211item_uint32_t dschannel;
- p80211item_uint32_t ibssatimwindow;
- p80211item_uint32_t probedelay;
- p80211item_uint32_t cfpollable;
- p80211item_uint32_t cfpollreq;
- p80211item_uint32_t basicrate1;
- p80211item_uint32_t basicrate2;
- p80211item_uint32_t basicrate3;
- p80211item_uint32_t basicrate4;
- p80211item_uint32_t basicrate5;
- p80211item_uint32_t basicrate6;
- p80211item_uint32_t basicrate7;
- p80211item_uint32_t basicrate8;
- p80211item_uint32_t operationalrate1;
- p80211item_uint32_t operationalrate2;
- p80211item_uint32_t operationalrate3;
- p80211item_uint32_t operationalrate4;
- p80211item_uint32_t operationalrate5;
- p80211item_uint32_t operationalrate6;
- p80211item_uint32_t operationalrate7;
- p80211item_uint32_t operationalrate8;
- p80211item_uint32_t resultcode;
+ struct p80211item_uint32 bsstype;
+ struct p80211item_uint32 beaconperiod;
+ struct p80211item_uint32 dtimperiod;
+ struct p80211item_uint32 cfpperiod;
+ struct p80211item_uint32 cfpmaxduration;
+ struct p80211item_uint32 fhdwelltime;
+ struct p80211item_uint32 fhhopset;
+ struct p80211item_uint32 fhhoppattern;
+ struct p80211item_uint32 dschannel;
+ struct p80211item_uint32 ibssatimwindow;
+ struct p80211item_uint32 probedelay;
+ struct p80211item_uint32 cfpollable;
+ struct p80211item_uint32 cfpollreq;
+ struct p80211item_uint32 basicrate1;
+ struct p80211item_uint32 basicrate2;
+ struct p80211item_uint32 basicrate3;
+ struct p80211item_uint32 basicrate4;
+ struct p80211item_uint32 basicrate5;
+ struct p80211item_uint32 basicrate6;
+ struct p80211item_uint32 basicrate7;
+ struct p80211item_uint32 basicrate8;
+ struct p80211item_uint32 operationalrate1;
+ struct p80211item_uint32 operationalrate2;
+ struct p80211item_uint32 operationalrate3;
+ struct p80211item_uint32 operationalrate4;
+ struct p80211item_uint32 operationalrate5;
+ struct p80211item_uint32 operationalrate6;
+ struct p80211item_uint32 operationalrate7;
+ struct p80211item_uint32 operationalrate8;
+ struct p80211item_uint32 resultcode;
} __packed;
struct p80211msg_lnxreq_ifstate {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_uint32_t ifstate;
- p80211item_uint32_t resultcode;
+ struct p80211item_uint32 ifstate;
+ struct p80211item_uint32 resultcode;
} __packed;
struct p80211msg_lnxreq_wlansniff {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_uint32_t enable;
- p80211item_uint32_t channel;
- p80211item_uint32_t prismheader;
- p80211item_uint32_t wlanheader;
- p80211item_uint32_t keepwepflags;
- p80211item_uint32_t stripfcs;
- p80211item_uint32_t packet_trunc;
- p80211item_uint32_t resultcode;
+ struct p80211item_uint32 enable;
+ struct p80211item_uint32 channel;
+ struct p80211item_uint32 prismheader;
+ struct p80211item_uint32 wlanheader;
+ struct p80211item_uint32 keepwepflags;
+ struct p80211item_uint32 stripfcs;
+ struct p80211item_uint32 packet_trunc;
+ struct p80211item_uint32 resultcode;
} __packed;
struct p80211msg_lnxreq_hostwep {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_uint32_t resultcode;
- p80211item_uint32_t decrypt;
- p80211item_uint32_t encrypt;
+ struct p80211item_uint32 resultcode;
+ struct p80211item_uint32 decrypt;
+ struct p80211item_uint32 encrypt;
} __packed;
struct p80211msg_lnxreq_commsquality {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_uint32_t resultcode;
- p80211item_uint32_t dbm;
- p80211item_uint32_t link;
- p80211item_uint32_t level;
- p80211item_uint32_t noise;
- p80211item_uint32_t txrate;
+ struct p80211item_uint32 resultcode;
+ struct p80211item_uint32 dbm;
+ struct p80211item_uint32 link;
+ struct p80211item_uint32 level;
+ struct p80211item_uint32 noise;
+ struct p80211item_uint32 txrate;
} __packed;
struct p80211msg_lnxreq_autojoin {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_pstr32_t ssid;
+ struct p80211item_pstr32 ssid;
u8 pad_19D[3];
- p80211item_uint32_t authtype;
- p80211item_uint32_t resultcode;
+ struct p80211item_uint32 authtype;
+ struct p80211item_uint32 resultcode;
} __packed;
struct p80211msg_p2req_readpda {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_unk1024_t pda;
- p80211item_uint32_t resultcode;
+ struct p80211item_unk1024 pda;
+ struct p80211item_uint32 resultcode;
} __packed;
struct p80211msg_p2req_ramdl_state {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_uint32_t enable;
- p80211item_uint32_t exeaddr;
- p80211item_uint32_t resultcode;
+ struct p80211item_uint32 enable;
+ struct p80211item_uint32 exeaddr;
+ struct p80211item_uint32 resultcode;
} __packed;
struct p80211msg_p2req_ramdl_write {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_uint32_t addr;
- p80211item_uint32_t len;
- p80211item_unk4096_t data;
- p80211item_uint32_t resultcode;
+ struct p80211item_uint32 addr;
+ struct p80211item_uint32 len;
+ struct p80211item_unk4096 data;
+ struct p80211item_uint32 resultcode;
} __packed;
struct p80211msg_p2req_flashdl_state {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_uint32_t enable;
- p80211item_uint32_t resultcode;
+ struct p80211item_uint32 enable;
+ struct p80211item_uint32 resultcode;
} __packed;
struct p80211msg_p2req_flashdl_write {
u32 msgcode;
u32 msglen;
u8 devname[WLAN_DEVNAMELEN_MAX];
- p80211item_uint32_t addr;
- p80211item_uint32_t len;
- p80211item_unk4096_t data;
- p80211item_uint32_t resultcode;
+ struct p80211item_uint32 addr;
+ struct p80211item_uint32 len;
+ struct p80211item_unk4096 data;
+ struct p80211item_uint32 resultcode;
} __packed;
#endif
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 90cc8cdcf969..825a63a7c0e3 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -91,17 +91,17 @@
#include "cfg80211.c"
/* netdevice method functions */
-static int p80211knetdev_init(netdevice_t *netdev);
-static int p80211knetdev_open(netdevice_t *netdev);
-static int p80211knetdev_stop(netdevice_t *netdev);
+static int p80211knetdev_init(struct net_device *netdev);
+static int p80211knetdev_open(struct net_device *netdev);
+static int p80211knetdev_stop(struct net_device *netdev);
static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
- netdevice_t *netdev);
-static void p80211knetdev_set_multicast_list(netdevice_t *dev);
-static int p80211knetdev_do_ioctl(netdevice_t *dev, struct ifreq *ifr,
+ struct net_device *netdev);
+static void p80211knetdev_set_multicast_list(struct net_device *dev);
+static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd);
-static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr);
-static void p80211knetdev_tx_timeout(netdevice_t *netdev);
-static int p80211_rx_typedrop(wlandevice_t *wlandev, u16 fc);
+static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr);
+static void p80211knetdev_tx_timeout(struct net_device *netdev);
+static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc);
int wlan_watchdog = 5000;
module_param(wlan_watchdog, int, 0644);
@@ -123,7 +123,7 @@ MODULE_PARM_DESC(wlan_wext_write, "enable write wireless extensions");
* Returns:
* nothing
----------------------------------------------------------------*/
-static int p80211knetdev_init(netdevice_t *netdev)
+static int p80211knetdev_init(struct net_device *netdev)
{
/* Called in response to register_netdev */
/* This is usually the probe function, but the probe has */
@@ -146,10 +146,10 @@ static int p80211knetdev_init(netdevice_t *netdev)
* Returns:
* zero on success, non-zero otherwise
----------------------------------------------------------------*/
-static int p80211knetdev_open(netdevice_t *netdev)
+static int p80211knetdev_open(struct net_device *netdev)
{
int result = 0; /* success */
- wlandevice_t *wlandev = netdev->ml_priv;
+ struct wlandevice *wlandev = netdev->ml_priv;
/* Check to make sure the MSD is running */
if (wlandev->msdstate != WLAN_MSD_RUNNING)
@@ -181,10 +181,10 @@ static int p80211knetdev_open(netdevice_t *netdev)
* Returns:
* zero on success, non-zero otherwise
----------------------------------------------------------------*/
-static int p80211knetdev_stop(netdevice_t *netdev)
+static int p80211knetdev_stop(struct net_device *netdev)
{
int result = 0;
- wlandevice_t *wlandev = netdev->ml_priv;
+ struct wlandevice *wlandev = netdev->ml_priv;
if (wlandev->close)
result = wlandev->close(wlandev);
@@ -208,7 +208,7 @@ static int p80211knetdev_stop(netdevice_t *netdev)
* Side effects:
*
----------------------------------------------------------------*/
-void p80211netdev_rx(wlandevice_t *wlandev, struct sk_buff *skb)
+void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb)
{
/* Enqueue for post-irq processing */
skb_queue_tail(&wlandev->nsd_rxq, skb);
@@ -227,11 +227,11 @@ void p80211netdev_rx(wlandevice_t *wlandev, struct sk_buff *skb)
* CONV_TO_ETHER_FAILED if conversion failed
* CONV_TO_ETHER_SKIPPED if frame is ignored
*/
-static int p80211_convert_to_ether(wlandevice_t *wlandev, struct sk_buff *skb)
+static int p80211_convert_to_ether(struct wlandevice *wlandev, struct sk_buff *skb)
{
struct p80211_hdr_a3 *hdr;
- hdr = (struct p80211_hdr_a3 *) skb->data;
+ hdr = (struct p80211_hdr_a3 *)skb->data;
if (p80211_rx_typedrop(wlandev, hdr->fc))
return CONV_TO_ETHER_SKIPPED;
@@ -265,9 +265,9 @@ static int p80211_convert_to_ether(wlandevice_t *wlandev, struct sk_buff *skb)
*/
static void p80211netdev_rx_bh(unsigned long arg)
{
- wlandevice_t *wlandev = (wlandevice_t *) arg;
+ struct wlandevice *wlandev = (struct wlandevice *)arg;
struct sk_buff *skb = NULL;
- netdevice_t *dev = wlandev->netdev;
+ struct net_device *dev = wlandev->netdev;
/* Let's empty our our queue */
while ((skb = skb_dequeue(&wlandev->nsd_rxq))) {
@@ -318,11 +318,11 @@ static void p80211netdev_rx_bh(unsigned long arg)
* zero on success, non-zero on failure.
----------------------------------------------------------------*/
static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
- netdevice_t *netdev)
+ struct net_device *netdev)
{
int result = 0;
int txresult = -1;
- wlandevice_t *wlandev = netdev->ml_priv;
+ struct wlandevice *wlandev = netdev->ml_priv;
union p80211_hdr p80211_hdr;
struct p80211_metawep p80211_wep;
@@ -446,9 +446,9 @@ failed:
* Returns:
* nothing
----------------------------------------------------------------*/
-static void p80211knetdev_set_multicast_list(netdevice_t *dev)
+static void p80211knetdev_set_multicast_list(struct net_device *dev)
{
- wlandevice_t *wlandev = dev->ml_priv;
+ struct wlandevice *wlandev = dev->ml_priv;
/* TODO: real multicast support as well */
@@ -459,7 +459,7 @@ static void p80211knetdev_set_multicast_list(netdevice_t *dev)
#ifdef SIOCETHTOOL
-static int p80211netdev_ethtool(wlandevice_t *wlandev, void __user *useraddr)
+static int p80211netdev_ethtool(struct wlandevice *wlandev, void __user *useraddr)
{
u32 ethcmd;
struct ethtool_drvinfo info;
@@ -531,11 +531,11 @@ static int p80211netdev_ethtool(wlandevice_t *wlandev, void __user *useraddr)
* Process thread (ioctl caller). TODO: SMP support may require
* locks.
----------------------------------------------------------------*/
-static int p80211knetdev_do_ioctl(netdevice_t *dev, struct ifreq *ifr, int cmd)
+static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int result = 0;
- struct p80211ioctl_req *req = (struct p80211ioctl_req *) ifr;
- wlandevice_t *wlandev = dev->ml_priv;
+ struct p80211ioctl_req *req = (struct p80211ioctl_req *)ifr;
+ struct wlandevice *wlandev = dev->ml_priv;
u8 *msgbuf;
netdev_dbg(dev, "rx'd ioctl, cmd=%d, len=%d\n", cmd, req->len);
@@ -610,13 +610,13 @@ bail:
*
* by: Collin R. Mulliner <collin@mulliner.org>
----------------------------------------------------------------*/
-static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr)
+static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *new_addr = addr;
struct p80211msg_dot11req_mibset dot11req;
- p80211item_unk392_t *mibattr;
- p80211item_pstr6_t *macaddr;
- p80211item_uint32_t *resultcode;
+ struct p80211item_unk392 *mibattr;
+ struct p80211item_pstr6 *macaddr;
+ struct p80211item_uint32 *resultcode;
int result;
/* If we're running, we don't allow MAC address changes */
@@ -625,7 +625,7 @@ static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr)
/* Set up some convenience pointers. */
mibattr = &dot11req.mibattribute;
- macaddr = (p80211item_pstr6_t *) &mibattr->data;
+ macaddr = (struct p80211item_pstr6 *)&mibattr->data;
resultcode = &dot11req.resultcode;
/* Set up a dot11req_mibset */
@@ -633,7 +633,7 @@ static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr)
dot11req.msgcode = DIDmsg_dot11req_mibset;
dot11req.msglen = sizeof(struct p80211msg_dot11req_mibset);
memcpy(dot11req.devname,
- ((wlandevice_t *) dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1);
+ ((struct wlandevice *)dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1);
/* Set up the mibattribute argument */
mibattr->did = DIDmsg_dot11req_mibset_mibattribute;
@@ -653,7 +653,7 @@ static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr)
resultcode->data = 0;
/* now fire the request */
- result = p80211req_dorequest(dev->ml_priv, (u8 *) &dot11req);
+ result = p80211req_dorequest(dev->ml_priv, (u8 *)&dot11req);
/* If the request wasn't successful, report an error and don't
* change the netdev address
@@ -669,7 +669,7 @@ static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr)
return result;
}
-static int wlan_change_mtu(netdevice_t *dev, int new_mtu)
+static int wlan_change_mtu(struct net_device *dev, int new_mtu)
{
/* 2312 is max 802.11 payload, 20 is overhead, (ether + llc +snap)
and another 8 for wep. */
@@ -717,10 +717,10 @@ static const struct net_device_ops p80211_netdev_ops = {
* compiled drivers, this function will be called in the
* context of the kernel startup code.
----------------------------------------------------------------*/
-int wlan_setup(wlandevice_t *wlandev, struct device *physdev)
+int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
{
int result = 0;
- netdevice_t *netdev;
+ struct net_device *netdev;
struct wiphy *wiphy;
struct wireless_dev *wdev;
@@ -783,7 +783,7 @@ int wlan_setup(wlandevice_t *wlandev, struct device *physdev)
* compiled drivers, this function will be called in the
* context of the kernel startup code.
----------------------------------------------------------------*/
-void wlan_unsetup(wlandevice_t *wlandev)
+void wlan_unsetup(struct wlandevice *wlandev)
{
struct wireless_dev *wdev;
@@ -817,7 +817,7 @@ void wlan_unsetup(wlandevice_t *wlandev)
* Call Context:
* Can be either interrupt or not.
----------------------------------------------------------------*/
-int register_wlandev(wlandevice_t *wlandev)
+int register_wlandev(struct wlandevice *wlandev)
{
return register_netdev(wlandev->netdev);
}
@@ -839,7 +839,7 @@ int register_wlandev(wlandevice_t *wlandev)
* Call Context:
* Can be either interrupt or not.
----------------------------------------------------------------*/
-int unregister_wlandev(wlandevice_t *wlandev)
+int unregister_wlandev(struct wlandevice *wlandev)
{
struct sk_buff *skb;
@@ -882,7 +882,7 @@ int unregister_wlandev(wlandevice_t *wlandev)
* Call context:
* Usually interrupt.
----------------------------------------------------------------*/
-void p80211netdev_hwremoved(wlandevice_t *wlandev)
+void p80211netdev_hwremoved(struct wlandevice *wlandev)
{
wlandev->hwremoved = 1;
if (wlandev->state == WLAN_DEVICE_OPEN)
@@ -912,7 +912,7 @@ void p80211netdev_hwremoved(wlandevice_t *wlandev)
* Call context:
* interrupt
----------------------------------------------------------------*/
-static int p80211_rx_typedrop(wlandevice_t *wlandev, u16 fc)
+static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc)
{
u16 ftype;
u16 fstype;
@@ -1071,9 +1071,9 @@ static int p80211_rx_typedrop(wlandevice_t *wlandev, u16 fc)
return drop;
}
-static void p80211knetdev_tx_timeout(netdevice_t *netdev)
+static void p80211knetdev_tx_timeout(struct net_device *netdev)
{
- wlandevice_t *wlandev = netdev->ml_priv;
+ struct wlandevice *wlandev = netdev->ml_priv;
if (wlandev->tx_timeout) {
wlandev->tx_timeout(wlandev);
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 820a0e20a941..1e6a774fc7c5 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -57,9 +57,6 @@
#include <linux/wireless.h>
#include <linux/netdevice.h>
-#undef netdevice_t
-typedef struct net_device netdevice_t;
-
#define WLAN_RELEASE "0.3.0-staging"
#define WLAN_DEVICE_CLOSED 0
@@ -101,7 +98,7 @@ typedef struct net_device netdevice_t;
#define P80211_NSDCAP_NOSCAN 0x200 /* nsd can scan */
/* Received frame statistics */
-typedef struct p80211_frmrx_t {
+struct p80211_frmrx {
u32 mgmt;
u32 assocreq;
u32 assocresp;
@@ -135,10 +132,10 @@ typedef struct p80211_frmrx_t {
u32 data_unknown;
u32 decrypt;
u32 decrypt_err;
-} p80211_frmrx_t;
+};
/* called by /proc/net/wireless */
-struct iw_statistics *p80211wext_get_wireless_stats(netdevice_t *dev);
+struct iw_statistics *p80211wext_get_wireless_stats(struct net_device *dev);
/* wireless extensions' ioctls */
extern struct iw_handler_def p80211wext_handler_def;
@@ -157,7 +154,7 @@ extern int wlan_watchdog;
extern int wlan_wext_write;
/* WLAN device type */
-typedef struct wlandevice {
+struct wlandevice {
void *priv; /* private data for MSD */
/* Subsystem State */
@@ -186,12 +183,12 @@ typedef struct wlandevice {
struct p80211_metawep *p80211_wep);
int (*mlmerequest)(struct wlandevice *wlandev, struct p80211msg *msg);
int (*set_multicast_list)(struct wlandevice *wlandev,
- netdevice_t *dev);
+ struct net_device *dev);
void (*tx_timeout)(struct wlandevice *wlandev);
/* 802.11 State */
u8 bssid[WLAN_BSSID_LEN];
- p80211pstr32_t ssid;
+ struct p80211pstr32 ssid;
u32 macmode;
int linkstatus;
@@ -206,7 +203,7 @@ typedef struct wlandevice {
/* netlink socket */
/* queue for indications waiting for cmd completion */
/* Linux netdevice and support */
- netdevice_t *netdev; /* ptr to linux netdevice */
+ struct net_device *netdev; /* ptr to linux netdevice */
/* Rx bottom half */
struct tasklet_struct rx_bh;
@@ -214,7 +211,7 @@ typedef struct wlandevice {
struct sk_buff_head nsd_rxq;
/* 802.11 device statistics */
- struct p80211_frmrx_t rx;
+ struct p80211_frmrx rx;
struct iw_statistics wstats;
@@ -222,19 +219,19 @@ typedef struct wlandevice {
u8 spy_number;
char spy_address[IW_MAX_SPY][ETH_ALEN];
struct iw_quality spy_stat[IW_MAX_SPY];
-} wlandevice_t;
+};
/* WEP stuff */
-int wep_change_key(wlandevice_t *wlandev, int keynum, u8 *key, int keylen);
-int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override,
- u8 *iv, u8 *icv);
-int wep_encrypt(wlandevice_t *wlandev, u8 *buf, u8 *dst, u32 len, int keynum,
+int wep_change_key(struct wlandevice *wlandev, int keynum, u8 *key, int keylen);
+int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
u8 *iv, u8 *icv);
-
-int wlan_setup(wlandevice_t *wlandev, struct device *physdev);
-void wlan_unsetup(wlandevice_t *wlandev);
-int register_wlandev(wlandevice_t *wlandev);
-int unregister_wlandev(wlandevice_t *wlandev);
-void p80211netdev_rx(wlandevice_t *wlandev, struct sk_buff *skb);
-void p80211netdev_hwremoved(wlandevice_t *wlandev);
+int wep_encrypt(struct wlandevice *wlandev, u8 *buf, u8 *dst, u32 len,
+ int keynum, u8 *iv, u8 *icv);
+
+int wlan_setup(struct wlandevice *wlandev, struct device *physdev);
+void wlan_unsetup(struct wlandevice *wlandev);
+int register_wlandev(struct wlandevice *wlandev);
+int unregister_wlandev(struct wlandevice *wlandev);
+void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb);
+void p80211netdev_hwremoved(struct wlandevice *wlandev);
#endif
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index 4b84b568f6ca..d43e85b5d49b 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -72,11 +72,26 @@
#include "p80211metastruct.h"
#include "p80211req.h"
-static void p80211req_handlemsg(wlandevice_t *wlandev, struct p80211msg *msg);
-static void p80211req_mibset_mibget(wlandevice_t *wlandev,
+static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg);
+static void p80211req_mibset_mibget(struct wlandevice *wlandev,
struct p80211msg_dot11req_mibget *mib_msg,
int isget);
+static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data,
+ int isget, u32 flag)
+{
+ if (isget) {
+ if (wlandev->hostwep & flag)
+ *data = P80211ENUM_truth_true;
+ else
+ *data = P80211ENUM_truth_false;
+ } else {
+ wlandev->hostwep &= ~flag;
+ if (*data == P80211ENUM_truth_true)
+ wlandev->hostwep |= flag;
+ }
+}
+
/*----------------------------------------------------------------
* p80211req_dorequest
*
@@ -93,9 +108,9 @@ static void p80211req_mibset_mibget(wlandevice_t *wlandev,
* Potentially blocks the caller, so it's a good idea to
* not call this function from an interrupt context.
----------------------------------------------------------------*/
-int p80211req_dorequest(wlandevice_t *wlandev, u8 *msgbuf)
+int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
{
- struct p80211msg *msg = (struct p80211msg *) msgbuf;
+ struct p80211msg *msg = (struct p80211msg *)msgbuf;
/* Check to make sure the MSD is running */
if (!((wlandev->msdstate == WLAN_MSD_HWPRESENT &&
@@ -149,13 +164,13 @@ int p80211req_dorequest(wlandevice_t *wlandev, u8 *msgbuf)
* Call context:
* Process thread
----------------------------------------------------------------*/
-static void p80211req_handlemsg(wlandevice_t *wlandev, struct p80211msg *msg)
+static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg)
{
switch (msg->msgcode) {
case DIDmsg_lnxreq_hostwep:{
struct p80211msg_lnxreq_hostwep *req =
- (struct p80211msg_lnxreq_hostwep *) msg;
+ (struct p80211msg_lnxreq_hostwep *)msg;
wlandev->hostwep &=
~(HOSTWEP_DECRYPT | HOSTWEP_ENCRYPT);
if (req->decrypt.data == P80211ENUM_truth_true)
@@ -169,44 +184,34 @@ static void p80211req_handlemsg(wlandevice_t *wlandev, struct p80211msg *msg)
case DIDmsg_dot11req_mibset:{
int isget = (msg->msgcode == DIDmsg_dot11req_mibget);
struct p80211msg_dot11req_mibget *mib_msg =
- (struct p80211msg_dot11req_mibget *) msg;
+ (struct p80211msg_dot11req_mibget *)msg;
p80211req_mibset_mibget(wlandev, mib_msg, isget);
break;
}
} /* switch msg->msgcode */
}
-static void p80211req_mibset_mibget(wlandevice_t *wlandev,
+static void p80211req_mibset_mibget(struct wlandevice *wlandev,
struct p80211msg_dot11req_mibget *mib_msg,
int isget)
{
- p80211itemd_t *mibitem = (p80211itemd_t *) mib_msg->mibattribute.data;
- p80211pstrd_t *pstr = (p80211pstrd_t *) mibitem->data;
- u8 *key = mibitem->data + sizeof(p80211pstrd_t);
+ struct p80211itemd *mibitem = (struct p80211itemd *)mib_msg->mibattribute.data;
+ struct p80211pstrd *pstr = (struct p80211pstrd *)mibitem->data;
+ u8 *key = mibitem->data + sizeof(struct p80211pstrd);
switch (mibitem->did) {
- case DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0:{
- if (!isget)
- wep_change_key(wlandev, 0, key, pstr->len);
- break;
- }
- case DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1:{
- if (!isget)
- wep_change_key(wlandev, 1, key, pstr->len);
- break;
- }
- case DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2:{
- if (!isget)
- wep_change_key(wlandev, 2, key, pstr->len);
- break;
- }
- case DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3:{
+ case DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(1):
+ case DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(2):
+ case DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(3):
+ case DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(4):
if (!isget)
- wep_change_key(wlandev, 3, key, pstr->len);
- break;
- }
+ wep_change_key(wlandev,
+ P80211DID_ITEM(mibitem->did) - 1,
+ key, pstr->len);
+ break;
+
case DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID:{
- u32 *data = (u32 *) mibitem->data;
+ u32 *data = (u32 *)mibitem->data;
if (isget) {
*data = wlandev->hostwep & HOSTWEP_DEFAULTKEY_MASK;
@@ -217,33 +222,17 @@ static void p80211req_mibset_mibget(wlandevice_t *wlandev,
break;
}
case DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked:{
- u32 *data = (u32 *) mibitem->data;
+ u32 *data = (u32 *)mibitem->data;
- if (isget) {
- if (wlandev->hostwep & HOSTWEP_PRIVACYINVOKED)
- *data = P80211ENUM_truth_true;
- else
- *data = P80211ENUM_truth_false;
- } else {
- wlandev->hostwep &= ~(HOSTWEP_PRIVACYINVOKED);
- if (*data == P80211ENUM_truth_true)
- wlandev->hostwep |= HOSTWEP_PRIVACYINVOKED;
- }
+ p80211req_handle_action(wlandev, data, isget,
+ HOSTWEP_PRIVACYINVOKED);
break;
}
case DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted:{
- u32 *data = (u32 *) mibitem->data;
+ u32 *data = (u32 *)mibitem->data;
- if (isget) {
- if (wlandev->hostwep & HOSTWEP_EXCLUDEUNENCRYPTED)
- *data = P80211ENUM_truth_true;
- else
- *data = P80211ENUM_truth_false;
- } else {
- wlandev->hostwep &= ~(HOSTWEP_EXCLUDEUNENCRYPTED);
- if (*data == P80211ENUM_truth_true)
- wlandev->hostwep |= HOSTWEP_EXCLUDEUNENCRYPTED;
- }
+ p80211req_handle_action(wlandev, data, isget,
+ HOSTWEP_EXCLUDEUNENCRYPTED);
break;
}
}
diff --git a/drivers/staging/wlan-ng/p80211req.h b/drivers/staging/wlan-ng/p80211req.h
index a95a45a6814d..8d3054c22a05 100644
--- a/drivers/staging/wlan-ng/p80211req.h
+++ b/drivers/staging/wlan-ng/p80211req.h
@@ -48,6 +48,6 @@
#ifndef _LINUX_P80211REQ_H
#define _LINUX_P80211REQ_H
-int p80211req_dorequest(wlandevice_t *wlandev, u8 *msgbuf);
+int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf);
#endif
diff --git a/drivers/staging/wlan-ng/p80211types.h b/drivers/staging/wlan-ng/p80211types.h
index 8cb4fc6448a0..263ef2ddb197 100644
--- a/drivers/staging/wlan-ng/p80211types.h
+++ b/drivers/staging/wlan-ng/p80211types.h
@@ -1,58 +1,59 @@
-/* p80211types.h
-*
-* Macros, constants, types, and funcs for p80211 data types
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-* The contents of this file are subject to the Mozilla Public
-* License Version 1.1 (the "License"); you may not use this file
-* except in compliance with the License. You may obtain a copy of
-* the License at http://www.mozilla.org/MPL/
-*
-* Software distributed under the License is distributed on an "AS
-* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-* implied. See the License for the specific language governing
-* rights and limitations under the License.
-*
-* Alternatively, the contents of this file may be used under the
-* terms of the GNU Public License version 2 (the "GPL"), in which
-* case the provisions of the GPL are applicable instead of the
-* above. If you wish to allow the use of your version of this file
-* only under the terms of the GPL and not to allow others to use
-* your version of this file under the MPL, indicate your decision
-* by deleting the provisions above and replace them with the notice
-* and other provisions required by the GPL. If you do not delete
-* the provisions above, a recipient may use your version of this
-* file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares some of the constants and types used in various
-* parts of the linux-wlan system.
-*
-* Notes:
-* - Constant values are always in HOST byte order.
-*
-* All functions and statics declared here are implemented in p80211types.c
-* --------------------------------------------------------------------
-*/
+/*
+ * p80211types.h
+ *
+ * Macros, constants, types, and funcs for p80211 data types
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ * The contents of this file are subject to the Mozilla Public
+ * License Version 1.1 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of
+ * the License at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS
+ * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * rights and limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU Public License version 2 (the "GPL"), in which
+ * case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete
+ * the provisions above, a recipient may use your version of this
+ * file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares some of the constants and types used in various
+ * parts of the linux-wlan system.
+ *
+ * Notes:
+ * - Constant values are always in HOST byte order.
+ *
+ * All functions and statics declared here are implemented in p80211types.c
+ * --------------------------------------------------------------------
+ */
#ifndef _P80211TYPES_H
#define _P80211TYPES_H
@@ -123,18 +124,18 @@
#define MKENUMNAME(name) p80211enum_ ## name
/*----------------------------------------------------------------
-* The following constants and macros are used to construct and
-* deconstruct the Data ID codes. The coding is as follows:
-*
-* ...rwtnnnnnnnniiiiiiggggggssssss s - Section
-* g - Group
-* i - Item
-* n - Index
-* t - Table flag
-* w - Write flag
-* r - Read flag
-* . - Unused
-*/
+ * The following constants and macros are used to construct and
+ * deconstruct the Data ID codes. The coding is as follows:
+ *
+ * ...rwtnnnnnnnniiiiiiggggggssssss s - Section
+ * g - Group
+ * i - Item
+ * n - Index
+ * t - Table flag
+ * w - Write flag
+ * r - Read flag
+ * . - Unused
+ */
#define P80211DID_LSB_SECTION (0)
#define P80211DID_LSB_GROUP (6)
@@ -200,138 +201,138 @@
/* The following structure types are used for the representation */
/* of ENUMint type metadata. */
-typedef struct p80211enumpair {
+struct p80211enumpair {
u32 val;
char *name;
-} p80211enumpair_t;
+};
-typedef struct p80211enum {
+struct p80211enum {
int nitems;
- p80211enumpair_t *list;
-} p80211enum_t;
+ struct p80211enumpair *list;
+};
/*----------------------------------------------------------------*/
/* The following structure types are used to store data items in */
/* messages. */
/* Template pascal string */
-typedef struct p80211pstr {
+struct p80211pstr {
u8 len;
-} __packed p80211pstr_t;
+} __packed;
-typedef struct p80211pstrd {
+struct p80211pstrd {
u8 len;
u8 data[0];
-} __packed p80211pstrd_t;
+} __packed;
/* Maximum pascal string */
-typedef struct p80211pstr255 {
+struct p80211pstr255 {
u8 len;
u8 data[MAXLEN_PSTR255];
-} __packed p80211pstr255_t;
+} __packed;
/* pascal string for macaddress and bssid */
-typedef struct p80211pstr6 {
+struct p80211pstr6 {
u8 len;
u8 data[MAXLEN_PSTR6];
-} __packed p80211pstr6_t;
+} __packed;
/* pascal string for channel list */
-typedef struct p80211pstr14 {
+struct p80211pstr14 {
u8 len;
u8 data[MAXLEN_PSTR14];
-} __packed p80211pstr14_t;
+} __packed;
/* pascal string for ssid */
-typedef struct p80211pstr32 {
+struct p80211pstr32 {
u8 len;
u8 data[MAXLEN_PSTR32];
-} __packed p80211pstr32_t;
+} __packed;
/* MAC address array */
-typedef struct p80211macarray {
+struct p80211macarray {
u32 cnt;
u8 data[1][MAXLEN_PSTR6];
-} __packed p80211macarray_t;
+} __packed;
/* prototype template */
-typedef struct p80211item {
+struct p80211item {
u32 did;
u16 status;
u16 len;
-} __packed p80211item_t;
+} __packed;
/* prototype template w/ data item */
-typedef struct p80211itemd {
+struct p80211itemd {
u32 did;
u16 status;
u16 len;
u8 data[0];
-} __packed p80211itemd_t;
+} __packed;
/* message data item for int, BOUNDEDINT, ENUMINT */
-typedef struct p80211item_uint32 {
+struct p80211item_uint32 {
u32 did;
u16 status;
u16 len;
u32 data;
-} __packed p80211item_uint32_t;
+} __packed;
/* message data item for OCTETSTR, DISPLAYSTR */
-typedef struct p80211item_pstr6 {
+struct p80211item_pstr6 {
u32 did;
u16 status;
u16 len;
- p80211pstr6_t data;
-} __packed p80211item_pstr6_t;
+ struct p80211pstr6 data;
+} __packed;
/* message data item for OCTETSTR, DISPLAYSTR */
-typedef struct p80211item_pstr14 {
+struct p80211item_pstr14 {
u32 did;
u16 status;
u16 len;
- p80211pstr14_t data;
-} __packed p80211item_pstr14_t;
+ struct p80211pstr14 data;
+} __packed;
/* message data item for OCTETSTR, DISPLAYSTR */
-typedef struct p80211item_pstr32 {
+struct p80211item_pstr32 {
u32 did;
u16 status;
u16 len;
- p80211pstr32_t data;
-} __packed p80211item_pstr32_t;
+ struct p80211pstr32 data;
+} __packed;
/* message data item for OCTETSTR, DISPLAYSTR */
-typedef struct p80211item_pstr255 {
+struct p80211item_pstr255 {
u32 did;
u16 status;
u16 len;
- p80211pstr255_t data;
-} __packed p80211item_pstr255_t;
+ struct p80211pstr255 data;
+} __packed;
/* message data item for UNK 392, namely mib items */
-typedef struct p80211item_unk392 {
+struct p80211item_unk392 {
u32 did;
u16 status;
u16 len;
u8 data[MAXLEN_MIBATTRIBUTE];
-} __packed p80211item_unk392_t;
+} __packed;
/* message data item for UNK 1025, namely p2 pdas */
-typedef struct p80211item_unk1024 {
+struct p80211item_unk1024 {
u32 did;
u16 status;
u16 len;
u8 data[1024];
-} __packed p80211item_unk1024_t;
+} __packed;
/* message data item for UNK 4096, namely p2 download chunks */
-typedef struct p80211item_unk4096 {
+struct p80211item_unk4096 {
u32 did;
u16 status;
u16 len;
u8 data[4096];
-} __packed p80211item_unk4096_t;
+} __packed;
struct catlistitem;
@@ -351,25 +352,25 @@ typedef u32(*p80211_valid_t) (struct catlistitem *, u32 did, u8 *itembuf);
/* The following are the external declarations */
/* for all enumerations */
-extern p80211enum_t MKENUMNAME(truth);
-extern p80211enum_t MKENUMNAME(ifstate);
-extern p80211enum_t MKENUMNAME(powermgmt);
-extern p80211enum_t MKENUMNAME(bsstype);
-extern p80211enum_t MKENUMNAME(authalg);
-extern p80211enum_t MKENUMNAME(phytype);
-extern p80211enum_t MKENUMNAME(temptype);
-extern p80211enum_t MKENUMNAME(regdomain);
-extern p80211enum_t MKENUMNAME(ccamode);
-extern p80211enum_t MKENUMNAME(diversity);
-extern p80211enum_t MKENUMNAME(scantype);
-extern p80211enum_t MKENUMNAME(resultcode);
-extern p80211enum_t MKENUMNAME(reason);
-extern p80211enum_t MKENUMNAME(status);
-extern p80211enum_t MKENUMNAME(msgcode);
-extern p80211enum_t MKENUMNAME(msgitem_status);
-
-extern p80211enum_t MKENUMNAME(lnxroam_reason);
-
-extern p80211enum_t MKENUMNAME(p2preamble);
+extern struct p80211enum MKENUMNAME(truth);
+extern struct p80211enum MKENUMNAME(ifstate);
+extern struct p80211enum MKENUMNAME(powermgmt);
+extern struct p80211enum MKENUMNAME(bsstype);
+extern struct p80211enum MKENUMNAME(authalg);
+extern struct p80211enum MKENUMNAME(phytype);
+extern struct p80211enum MKENUMNAME(temptype);
+extern struct p80211enum MKENUMNAME(regdomain);
+extern struct p80211enum MKENUMNAME(ccamode);
+extern struct p80211enum MKENUMNAME(diversity);
+extern struct p80211enum MKENUMNAME(scantype);
+extern struct p80211enum MKENUMNAME(resultcode);
+extern struct p80211enum MKENUMNAME(reason);
+extern struct p80211enum MKENUMNAME(status);
+extern struct p80211enum MKENUMNAME(msgcode);
+extern struct p80211enum MKENUMNAME(msgitem_status);
+
+extern struct p80211enum MKENUMNAME(lnxroam_reason);
+
+extern struct p80211enum MKENUMNAME(p2preamble);
#endif /* _P80211TYPES_H */
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 22c79703e328..23b183738037 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -119,7 +119,7 @@ static const u32 wep_crc32_table[256] = {
/* keylen in bytes! */
-int wep_change_key(wlandevice_t *wlandev, int keynum, u8 *key, int keylen)
+int wep_change_key(struct wlandevice *wlandev, int keynum, u8 *key, int keylen)
{
if (keylen < 0)
return -1;
@@ -143,7 +143,7 @@ int wep_change_key(wlandevice_t *wlandev, int keynum, u8 *key, int keylen)
* 4-byte IV at start of buffer, 4-byte ICV at end of buffer.
* if successful, buf start is payload begin, length -= 8;
*/
-int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override,
+int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
u8 *iv, u8 *icv)
{
u32 i, j, k, crc, keylen;
@@ -217,7 +217,7 @@ int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override,
}
/* encrypts in-place. */
-int wep_encrypt(wlandevice_t *wlandev, u8 *buf, u8 *dst, u32 len, int keynum,
+int wep_encrypt(struct wlandevice *wlandev, u8 *buf, u8 *dst, u32 len, int keynum,
u8 *iv, u8 *icv)
{
u32 i, j, k, crc, keylen;
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 56bffd93c982..96aa21188669 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -96,16 +96,16 @@ struct s3inforec {
u16 len;
u16 type;
union {
- hfa384x_compident_t version;
- hfa384x_caplevel_t compat;
+ struct hfa384x_compident version;
+ struct hfa384x_caplevel compat;
u16 buildseq;
- hfa384x_compident_t platform;
+ struct hfa384x_compident platform;
} info;
};
struct pda {
u8 buf[HFA384x_PDA_LEN_MAX];
- hfa384x_pdrec_t *rec[HFA384x_PDA_RECS_MAX];
+ struct hfa384x_pdrec *rec[HFA384x_PDA_RECS_MAX];
unsigned int nrec;
};
@@ -152,22 +152,22 @@ static struct imgchunk fchunk[CHUNKS_MAX];
/* PDA, built from [card|newfile]+[addfile1+addfile2...] */
static struct pda pda;
-static hfa384x_compident_t nicid;
-static hfa384x_caplevel_t rfid;
-static hfa384x_caplevel_t macid;
-static hfa384x_caplevel_t priid;
+static struct hfa384x_compident nicid;
+static struct hfa384x_caplevel rfid;
+static struct hfa384x_caplevel macid;
+static struct hfa384x_caplevel priid;
/*================================================================*/
/* Local Function Declarations */
static int prism2_fwapply(const struct ihex_binrec *rfptr,
-wlandevice_t *wlandev);
+struct wlandevice *wlandev);
static int read_fwfile(const struct ihex_binrec *rfptr);
static int mkimage(struct imgchunk *clist, unsigned int *ccnt);
-static int read_cardpda(struct pda *pda, wlandevice_t *wlandev);
+static int read_cardpda(struct pda *pda, struct wlandevice *wlandev);
static int mkpdrlist(struct pda *pda);
@@ -177,7 +177,7 @@ static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
struct s3crcrec *s3crc, unsigned int ns3crc);
-static int writeimage(wlandevice_t *wlandev, struct imgchunk *fchunk,
+static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
unsigned int nfchunks);
static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks);
@@ -201,7 +201,7 @@ static int validate_identity(void);
* 0 - success
* ~0 - failure
----------------------------------------------------------------*/
-static int prism2_fwtry(struct usb_device *udev, wlandevice_t *wlandev)
+static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev)
{
const struct firmware *fw_entry = NULL;
@@ -239,11 +239,11 @@ static int prism2_fwtry(struct usb_device *udev, wlandevice_t *wlandev)
* ~0 - failure
----------------------------------------------------------------*/
static int prism2_fwapply(const struct ihex_binrec *rfptr,
- wlandevice_t *wlandev)
+ struct wlandevice *wlandev)
{
signed int result = 0;
struct p80211msg_dot11req_mibget getmsg;
- p80211itemd_t *item;
+ struct p80211itemd *item;
u32 *data;
/* Initialize the data structures */
@@ -266,7 +266,7 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
/* clear the pda and add an initial END record */
memset(&pda, 0, sizeof(pda));
- pda.rec[0] = (hfa384x_pdrec_t *) pda.buf;
+ pda.rec[0] = (struct hfa384x_pdrec *)pda.buf;
pda.rec[0]->len = cpu_to_le16(2); /* len in words */
pda.rec[0]->code = cpu_to_le16(HFA384x_PDR_END_OF_PDA);
pda.nrec = 1;
@@ -293,11 +293,11 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
getmsg.resultcode.did = DIDmsg_dot11req_mibget_resultcode;
getmsg.resultcode.status = P80211ENUM_msgitem_status_no_value;
- item = (p80211itemd_t *) getmsg.mibattribute.data;
+ item = (struct p80211itemd *)getmsg.mibattribute.data;
item->did = DIDmib_p2_p2NIC_p2PRISupRange;
item->status = P80211ENUM_msgitem_status_no_value;
- data = (u32 *) item->data;
+ data = (u32 *)item->data;
/* DIDmsg_dot11req_mibget */
prism2mgmt_mibset_mibget(wlandev, &getmsg);
@@ -592,14 +592,14 @@ static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
----------------------------------------------------------------*/
static int mkpdrlist(struct pda *pda)
{
- u16 *pda16 = (u16 *) pda->buf;
+ u16 *pda16 = (u16 *)pda->buf;
int curroff; /* in 'words' */
pda->nrec = 0;
curroff = 0;
while (curroff < (HFA384x_PDA_LEN_MAX / 2 - 1) &&
le16_to_cpu(pda16[curroff + 1]) != HFA384x_PDR_END_OF_PDA) {
- pda->rec[pda->nrec] = (hfa384x_pdrec_t *) &(pda16[curroff]);
+ pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]);
if (le16_to_cpu(pda->rec[pda->nrec]->code) ==
HFA384x_PDR_NICID) {
@@ -638,7 +638,7 @@ static int mkpdrlist(struct pda *pda)
curroff, pda->nrec);
return 1;
}
- pda->rec[pda->nrec] = (hfa384x_pdrec_t *) &(pda16[curroff]);
+ pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]);
(pda->nrec)++;
return 0;
}
@@ -766,7 +766,7 @@ static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
* 0 - success
* ~0 - failure (probably an errno)
----------------------------------------------------------------*/
-static int read_cardpda(struct pda *pda, wlandevice_t *wlandev)
+static int read_cardpda(struct pda *pda, struct wlandevice *wlandev)
{
int result = 0;
struct p80211msg_p2req_readpda *msg;
@@ -879,8 +879,8 @@ static int read_fwfile(const struct ihex_binrec *record)
addr = be32_to_cpu(record->addr);
/* Point into data for different word lengths */
- ptr32 = (u32 *) record->data;
- ptr16 = (u16 *) record->data;
+ ptr32 = (u32 *)record->data;
+ ptr16 = (u16 *)record->data;
/* parse what was an S3 srec and put it in the right array */
switch (addr) {
@@ -954,7 +954,7 @@ static int read_fwfile(const struct ihex_binrec *record)
default: /* Data record */
s3data[ns3data].addr = addr;
s3data[ns3data].len = len;
- s3data[ns3data].data = (uint8_t *) record->data;
+ s3data[ns3data].data = (uint8_t *)record->data;
ns3data++;
if (ns3data == S3DATA_MAX) {
pr_err("S3 datarec limit reached - aborting\n");
@@ -982,7 +982,7 @@ static int read_fwfile(const struct ihex_binrec *record)
* 0 success
* ~0 failure
----------------------------------------------------------------*/
-static int writeimage(wlandevice_t *wlandev, struct imgchunk *fchunk,
+static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
unsigned int nfchunks)
{
int result = 0;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index d8ed9a05789c..170de1c9eac4 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -113,16 +113,16 @@
* process thread (usually)
* interrupt
----------------------------------------------------------------*/
-int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp)
+int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
struct p80211msg_dot11req_scan *msg = msgp;
u16 roamingmode, word;
int i, timeout;
int istmpenable = 0;
- hfa384x_HostScanRequest_data_t scanreq;
+ struct hfa384x_HostScanRequest_data scanreq;
/* gatekeeper check */
if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major,
@@ -292,7 +292,7 @@ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp)
result = hfa384x_drvr_setconfig(hw,
HFA384x_RID_HOSTSCAN, &scanreq,
- sizeof(hfa384x_HostScanRequest_data_t));
+ sizeof(struct hfa384x_HostScanRequest_data));
if (result) {
netdev_err(wlandev->netdev,
"setconfig(SCANREQUEST) failed. result=%d\n",
@@ -366,12 +366,12 @@ exit:
* process thread (usually)
* interrupt
----------------------------------------------------------------*/
-int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
+int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
struct p80211msg_dot11req_scan_results *req;
- hfa384x_t *hw = wlandev->priv;
- hfa384x_HScanResultSub_t *item = NULL;
+ struct hfa384x *hw = wlandev->priv;
+ struct hfa384x_HScanResultSub *item = NULL;
int count;
@@ -525,15 +525,15 @@ exit:
* process thread (usually)
* interrupt
----------------------------------------------------------------*/
-int prism2mgmt_start(wlandevice_t *wlandev, void *msgp)
+int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
struct p80211msg_dot11req_start *msg = msgp;
- p80211pstrd_t *pstr;
+ struct p80211pstrd *pstr;
u8 bytebuf[80];
- struct hfa384x_bytestr *p2bytestr = (struct hfa384x_bytestr *) bytebuf;
+ struct hfa384x_bytestr *p2bytestr = (struct hfa384x_bytestr *)bytebuf;
u16 word;
wlandev->macmode = WLAN_MACMODE_NONE;
@@ -558,7 +558,7 @@ int prism2mgmt_start(wlandevice_t *wlandev, void *msgp)
/*** STATION ***/
/* Set the REQUIRED config items */
/* SSID */
- pstr = (p80211pstrd_t *) &(msg->ssid.data);
+ pstr = (struct p80211pstrd *)&(msg->ssid.data);
prism2mgmt_pstr2bytestr(p2bytestr, pstr);
result = hfa384x_drvr_setconfig(hw, HFA384x_RID_CNFOWNSSID,
bytebuf, HFA384x_RID_CNFOWNSSID_LEN);
@@ -685,9 +685,7 @@ failed:
msg->resultcode.data = P80211ENUM_resultcode_invalid_parameters;
done:
- result = 0;
-
- return result;
+ return 0;
}
/*----------------------------------------------------------------
@@ -708,9 +706,9 @@ done:
* Call context:
* process thread (usually)
----------------------------------------------------------------*/
-int prism2mgmt_readpda(wlandevice_t *wlandev, void *msgp)
+int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
struct p80211msg_p2req_readpda *msg = msgp;
int result;
@@ -774,9 +772,9 @@ int prism2mgmt_readpda(wlandevice_t *wlandev, void *msgp)
* Call context:
* process thread (usually)
----------------------------------------------------------------*/
-int prism2mgmt_ramdl_state(wlandevice_t *wlandev, void *msgp)
+int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
struct p80211msg_p2req_ramdl_state *msg = msgp;
if (wlandev->msdstate != WLAN_MSD_FWLOAD) {
@@ -829,9 +827,9 @@ int prism2mgmt_ramdl_state(wlandevice_t *wlandev, void *msgp)
* Call context:
* process thread (usually)
----------------------------------------------------------------*/
-int prism2mgmt_ramdl_write(wlandevice_t *wlandev, void *msgp)
+int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
struct p80211msg_p2req_ramdl_write *msg = msgp;
u32 addr;
u32 len;
@@ -890,10 +888,10 @@ int prism2mgmt_ramdl_write(wlandevice_t *wlandev, void *msgp)
* Call context:
* process thread (usually)
----------------------------------------------------------------*/
-int prism2mgmt_flashdl_state(wlandevice_t *wlandev, void *msgp)
+int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
struct p80211msg_p2req_flashdl_state *msg = msgp;
if (wlandev->msdstate != WLAN_MSD_FWLOAD) {
@@ -961,9 +959,9 @@ int prism2mgmt_flashdl_state(wlandevice_t *wlandev, void *msgp)
* Call context:
* process thread (usually)
----------------------------------------------------------------*/
-int prism2mgmt_flashdl_write(wlandevice_t *wlandev, void *msgp)
+int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
struct p80211msg_p2req_flashdl_write *msg = msgp;
u32 addr;
u32 len;
@@ -1021,16 +1019,16 @@ int prism2mgmt_flashdl_write(wlandevice_t *wlandev, void *msgp)
* process thread (usually)
* interrupt
----------------------------------------------------------------*/
-int prism2mgmt_autojoin(wlandevice_t *wlandev, void *msgp)
+int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
int result = 0;
u16 reg;
u16 port_type;
struct p80211msg_lnxreq_autojoin *msg = msgp;
- p80211pstrd_t *pstr;
+ struct p80211pstrd *pstr;
u8 bytebuf[256];
- struct hfa384x_bytestr *p2bytestr = (struct hfa384x_bytestr *) bytebuf;
+ struct hfa384x_bytestr *p2bytestr = (struct hfa384x_bytestr *)bytebuf;
wlandev->macmode = WLAN_MACMODE_NONE;
@@ -1054,7 +1052,7 @@ int prism2mgmt_autojoin(wlandevice_t *wlandev, void *msgp)
/* Set the ssid */
memset(bytebuf, 0, 256);
- pstr = (p80211pstrd_t *) &(msg->ssid.data);
+ pstr = (struct p80211pstrd *)&(msg->ssid.data);
prism2mgmt_pstr2bytestr(p2bytestr, pstr);
result = hfa384x_drvr_setconfig(hw, HFA384x_RID_CNFDESIREDSSID,
bytebuf,
@@ -1092,12 +1090,12 @@ int prism2mgmt_autojoin(wlandevice_t *wlandev, void *msgp)
* process thread (usually)
* interrupt
----------------------------------------------------------------*/
-int prism2mgmt_wlansniff(wlandevice_t *wlandev, void *msgp)
+int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
{
int result = 0;
struct p80211msg_lnxreq_wlansniff *msg = msgp;
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
u16 word;
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index e6472034da33..cc1ac7a60dfe 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -63,43 +63,44 @@
extern int prism2_reset_holdtime;
extern int prism2_reset_settletime;
-u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate);
+u32 prism2sta_ifstate(struct wlandevice *wlandev, u32 ifstate);
-void prism2sta_ev_info(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf);
-void prism2sta_ev_txexc(wlandevice_t *wlandev, u16 status);
-void prism2sta_ev_tx(wlandevice_t *wlandev, u16 status);
-void prism2sta_ev_alloc(wlandevice_t *wlandev);
+void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf);
+void prism2sta_ev_txexc(struct wlandevice *wlandev, u16 status);
+void prism2sta_ev_tx(struct wlandevice *wlandev, u16 status);
+void prism2sta_ev_alloc(struct wlandevice *wlandev);
-int prism2mgmt_mibset_mibget(wlandevice_t *wlandev, void *msgp);
-int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp);
-int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp);
-int prism2mgmt_start(wlandevice_t *wlandev, void *msgp);
-int prism2mgmt_wlansniff(wlandevice_t *wlandev, void *msgp);
-int prism2mgmt_readpda(wlandevice_t *wlandev, void *msgp);
-int prism2mgmt_ramdl_state(wlandevice_t *wlandev, void *msgp);
-int prism2mgmt_ramdl_write(wlandevice_t *wlandev, void *msgp);
-int prism2mgmt_flashdl_state(wlandevice_t *wlandev, void *msgp);
-int prism2mgmt_flashdl_write(wlandevice_t *wlandev, void *msgp);
-int prism2mgmt_autojoin(wlandevice_t *wlandev, void *msgp);
+int prism2mgmt_mibset_mibget(struct wlandevice *wlandev, void *msgp);
+int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp);
+int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp);
+int prism2mgmt_start(struct wlandevice *wlandev, void *msgp);
+int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp);
+int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp);
+int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp);
+int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp);
+int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp);
+int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp);
+int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp);
/*---------------------------------------------------------------
* conversion functions going between wlan message data types and
* Prism2 data types
---------------------------------------------------------------*/
/* byte area conversion functions*/
-void prism2mgmt_bytearea2pstr(u8 *bytearea, p80211pstrd_t *pstr, int len);
+void prism2mgmt_bytearea2pstr(u8 *bytearea, struct p80211pstrd *pstr, int len);
/* byte string conversion functions*/
void prism2mgmt_pstr2bytestr(struct hfa384x_bytestr *bytestr,
- p80211pstrd_t *pstr);
+ struct p80211pstrd *pstr);
void prism2mgmt_bytestr2pstr(struct hfa384x_bytestr *bytestr,
- p80211pstrd_t *pstr);
+ struct p80211pstrd *pstr);
/* functions to convert Group Addresses */
-void prism2mgmt_get_grpaddr(u32 did, p80211pstrd_t *pstr, hfa384x_t *priv);
+void prism2mgmt_get_grpaddr(u32 did, struct p80211pstrd *pstr,
+ struct hfa384x *priv);
int prism2mgmt_set_grpaddr(u32 did,
- u8 *prism2buf, p80211pstrd_t *pstr,
- hfa384x_t *priv);
+ u8 *prism2buf, struct p80211pstrd *pstr,
+ struct hfa384x *priv);
int prism2mgmt_get_grpaddr_index(u32 did);
void prism2sta_processing_defer(struct work_struct *data);
@@ -108,8 +109,8 @@ void prism2sta_commsqual_defer(struct work_struct *data);
void prism2sta_commsqual_timer(unsigned long data);
/* Interface callback functions, passing data back up to the cfg80211 layer */
-void prism2_connect_result(wlandevice_t *wlandev, u8 failed);
-void prism2_disconnected(wlandevice_t *wlandev);
-void prism2_roamed(wlandevice_t *wlandev);
+void prism2_connect_result(struct wlandevice *wlandev, u8 failed);
+void prism2_disconnected(struct wlandevice *wlandev);
+void prism2_roamed(struct wlandevice *wlandev);
#endif
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index fe914b1f904b..63ab6bc88654 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -87,80 +87,80 @@ struct mibrec {
u16 parm3;
int (*func)(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data);
};
static int prism2mib_bytearea2pstr(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data);
static int prism2mib_uint32(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data);
static int prism2mib_flag(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data);
static int prism2mib_wepdefaultkey(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data);
static int prism2mib_privacyinvoked(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data);
static int prism2mib_excludeunencrypted(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data);
static int prism2mib_fragmentationthreshold(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data);
static int prism2mib_priv(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data);
static struct mibrec mibtab[] = {
/* dot11smt MIB's */
- {DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0,
+ {DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(1),
F_STA | F_WRITE,
HFA384x_RID_CNFWEPDEFAULTKEY0, 0, 0,
prism2mib_wepdefaultkey},
- {DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1,
+ {DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(2),
F_STA | F_WRITE,
HFA384x_RID_CNFWEPDEFAULTKEY1, 0, 0,
prism2mib_wepdefaultkey},
- {DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2,
+ {DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(3),
F_STA | F_WRITE,
HFA384x_RID_CNFWEPDEFAULTKEY2, 0, 0,
prism2mib_wepdefaultkey},
- {DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3,
+ {DIDmib_dot11smt_dot11WEPDefaultKeysTable_key(4),
F_STA | F_WRITE,
HFA384x_RID_CNFWEPDEFAULTKEY3, 0, 0,
prism2mib_wepdefaultkey},
@@ -237,36 +237,36 @@ static struct mibrec mibtab[] = {
{0, 0, 0, 0, 0, NULL}
};
-/*----------------------------------------------------------------
-* prism2mgmt_mibset_mibget
-*
-* Set the value of a mib item.
-*
-* Arguments:
-* wlandev wlan device structure
-* msgp ptr to msg buffer
-*
-* Returns:
-* 0 success and done
-* <0 success, but we're waiting for something to finish.
-* >0 an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-* process thread (usually)
-* interrupt
-----------------------------------------------------------------*/
-
-int prism2mgmt_mibset_mibget(wlandevice_t *wlandev, void *msgp)
+/*
+ * prism2mgmt_mibset_mibget
+ *
+ * Set the value of a mib item.
+ *
+ * Arguments:
+ * wlandev wlan device structure
+ * msgp ptr to msg buffer
+ *
+ * Returns:
+ * 0 success and done
+ * <0 success, but we're waiting for something to finish.
+ * >0 an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ * process thread (usually)
+ * interrupt
+ */
+
+int prism2mgmt_mibset_mibget(struct wlandevice *wlandev, void *msgp)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
int result, isget;
struct mibrec *mib;
u16 which;
struct p80211msg_dot11req_mibset *msg = msgp;
- p80211itemd_t *mibitem;
+ struct p80211itemd *mibitem;
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
msg->resultcode.data = P80211ENUM_resultcode_success;
@@ -284,7 +284,7 @@ int prism2mgmt_mibset_mibget(wlandevice_t *wlandev, void *msgp)
** MIB table.
*/
- mibitem = (p80211itemd_t *) msg->mibattribute.data;
+ mibitem = (struct p80211itemd *)msg->mibattribute.data;
for (mib = mibtab; mib->did != 0; mib++)
if (mib->did == mibitem->did && (mib->flag & which))
@@ -346,40 +346,40 @@ done:
return 0;
}
-/*----------------------------------------------------------------
-* prism2mib_bytearea2pstr
-*
-* Get/set pstr data to/from a byte area.
-*
-* MIB record parameters:
-* parm1 Prism2 RID value.
-* parm2 Number of bytes of RID data.
-* parm3 Not used.
-*
-* Arguments:
-* mib MIB record.
-* isget MIBGET/MIBSET flag.
-* wlandev wlan device structure.
-* priv "priv" structure.
-* hw "hw" structure.
-* msg Message structure.
-* data Data buffer.
-*
-* Returns:
-* 0 - Success.
-* ~0 - Error.
-*
-----------------------------------------------------------------*/
+/*
+ * prism2mib_bytearea2pstr
+ *
+ * Get/set pstr data to/from a byte area.
+ *
+ * MIB record parameters:
+ * parm1 Prism2 RID value.
+ * parm2 Number of bytes of RID data.
+ * parm3 Not used.
+ *
+ * Arguments:
+ * mib MIB record.
+ * isget MIBGET/MIBSET flag.
+ * wlandev wlan device structure.
+ * priv "priv" structure.
+ * hw "hw" structure.
+ * msg Message structure.
+ * data Data buffer.
+ *
+ * Returns:
+ * 0 - Success.
+ * ~0 - Error.
+ *
+ */
static int prism2mib_bytearea2pstr(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data)
{
int result;
- p80211pstrd_t *pstr = data;
+ struct p80211pstrd *pstr = data;
u8 bytebuf[MIB_TMP_MAXLEN];
if (isget) {
@@ -396,41 +396,41 @@ static int prism2mib_bytearea2pstr(struct mibrec *mib,
return result;
}
-/*----------------------------------------------------------------
-* prism2mib_uint32
-*
-* Get/set uint32 data.
-*
-* MIB record parameters:
-* parm1 Prism2 RID value.
-* parm2 Not used.
-* parm3 Not used.
-*
-* Arguments:
-* mib MIB record.
-* isget MIBGET/MIBSET flag.
-* wlandev wlan device structure.
-* priv "priv" structure.
-* hw "hw" structure.
-* msg Message structure.
-* data Data buffer.
-*
-* Returns:
-* 0 - Success.
-* ~0 - Error.
-*
-----------------------------------------------------------------*/
+/*
+ * prism2mib_uint32
+ *
+ * Get/set uint32 data.
+ *
+ * MIB record parameters:
+ * parm1 Prism2 RID value.
+ * parm2 Not used.
+ * parm3 Not used.
+ *
+ * Arguments:
+ * mib MIB record.
+ * isget MIBGET/MIBSET flag.
+ * wlandev wlan device structure.
+ * priv "priv" structure.
+ * hw "hw" structure.
+ * msg Message structure.
+ * data Data buffer.
+ *
+ * Returns:
+ * 0 - Success.
+ * ~0 - Error.
+ *
+ */
static int prism2mib_uint32(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data)
{
int result;
u32 *uint32 = data;
u8 bytebuf[MIB_TMP_MAXLEN];
- u16 *wordbuf = (u16 *) bytebuf;
+ u16 *wordbuf = (u16 *)bytebuf;
if (isget) {
result = hfa384x_drvr_getconfig16(hw, mib->parm1, wordbuf);
@@ -443,41 +443,41 @@ static int prism2mib_uint32(struct mibrec *mib,
return result;
}
-/*----------------------------------------------------------------
-* prism2mib_flag
-*
-* Get/set a flag.
-*
-* MIB record parameters:
-* parm1 Prism2 RID value.
-* parm2 Bit to get/set.
-* parm3 Not used.
-*
-* Arguments:
-* mib MIB record.
-* isget MIBGET/MIBSET flag.
-* wlandev wlan device structure.
-* priv "priv" structure.
-* hw "hw" structure.
-* msg Message structure.
-* data Data buffer.
-*
-* Returns:
-* 0 - Success.
-* ~0 - Error.
-*
-----------------------------------------------------------------*/
+/*
+ * prism2mib_flag
+ *
+ * Get/set a flag.
+ *
+ * MIB record parameters:
+ * parm1 Prism2 RID value.
+ * parm2 Bit to get/set.
+ * parm3 Not used.
+ *
+ * Arguments:
+ * mib MIB record.
+ * isget MIBGET/MIBSET flag.
+ * wlandev wlan device structure.
+ * priv "priv" structure.
+ * hw "hw" structure.
+ * msg Message structure.
+ * data Data buffer.
+ *
+ * Returns:
+ * 0 - Success.
+ * ~0 - Error.
+ *
+ */
static int prism2mib_flag(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data)
{
int result;
u32 *uint32 = data;
u8 bytebuf[MIB_TMP_MAXLEN];
- u16 *wordbuf = (u16 *) bytebuf;
+ u16 *wordbuf = (u16 *)bytebuf;
u32 flags;
result = hfa384x_drvr_getconfig16(hw, mib->parm1, wordbuf);
@@ -500,40 +500,40 @@ static int prism2mib_flag(struct mibrec *mib,
return result;
}
-/*----------------------------------------------------------------
-* prism2mib_wepdefaultkey
-*
-* Get/set WEP default keys.
-*
-* MIB record parameters:
-* parm1 Prism2 RID value.
-* parm2 Number of bytes of RID data.
-* parm3 Not used.
-*
-* Arguments:
-* mib MIB record.
-* isget MIBGET/MIBSET flag.
-* wlandev wlan device structure.
-* priv "priv" structure.
-* hw "hw" structure.
-* msg Message structure.
-* data Data buffer.
-*
-* Returns:
-* 0 - Success.
-* ~0 - Error.
-*
-----------------------------------------------------------------*/
+/*
+ * prism2mib_wepdefaultkey
+ *
+ * Get/set WEP default keys.
+ *
+ * MIB record parameters:
+ * parm1 Prism2 RID value.
+ * parm2 Number of bytes of RID data.
+ * parm3 Not used.
+ *
+ * Arguments:
+ * mib MIB record.
+ * isget MIBGET/MIBSET flag.
+ * wlandev wlan device structure.
+ * priv "priv" structure.
+ * hw "hw" structure.
+ * msg Message structure.
+ * data Data buffer.
+ *
+ * Returns:
+ * 0 - Success.
+ * ~0 - Error.
+ *
+ */
static int prism2mib_wepdefaultkey(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data)
{
int result;
- p80211pstrd_t *pstr = data;
+ struct p80211pstrd *pstr = data;
u8 bytebuf[MIB_TMP_MAXLEN];
u16 len;
@@ -550,35 +550,35 @@ static int prism2mib_wepdefaultkey(struct mibrec *mib,
return result;
}
-/*----------------------------------------------------------------
-* prism2mib_privacyinvoked
-*
-* Get/set the dot11PrivacyInvoked value.
-*
-* MIB record parameters:
-* parm1 Prism2 RID value.
-* parm2 Bit value for PrivacyInvoked flag.
-* parm3 Not used.
-*
-* Arguments:
-* mib MIB record.
-* isget MIBGET/MIBSET flag.
-* wlandev wlan device structure.
-* priv "priv" structure.
-* hw "hw" structure.
-* msg Message structure.
-* data Data buffer.
-*
-* Returns:
-* 0 - Success.
-* ~0 - Error.
-*
-----------------------------------------------------------------*/
+/*
+ * prism2mib_privacyinvoked
+ *
+ * Get/set the dot11PrivacyInvoked value.
+ *
+ * MIB record parameters:
+ * parm1 Prism2 RID value.
+ * parm2 Bit value for PrivacyInvoked flag.
+ * parm3 Not used.
+ *
+ * Arguments:
+ * mib MIB record.
+ * isget MIBGET/MIBSET flag.
+ * wlandev wlan device structure.
+ * priv "priv" structure.
+ * hw "hw" structure.
+ * msg Message structure.
+ * data Data buffer.
+ *
+ * Returns:
+ * 0 - Success.
+ * ~0 - Error.
+ *
+ */
static int prism2mib_privacyinvoked(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data)
{
@@ -592,35 +592,35 @@ static int prism2mib_privacyinvoked(struct mibrec *mib,
return prism2mib_flag(mib, isget, wlandev, hw, msg, data);
}
-/*----------------------------------------------------------------
-* prism2mib_excludeunencrypted
-*
-* Get/set the dot11ExcludeUnencrypted value.
-*
-* MIB record parameters:
-* parm1 Prism2 RID value.
-* parm2 Bit value for ExcludeUnencrypted flag.
-* parm3 Not used.
-*
-* Arguments:
-* mib MIB record.
-* isget MIBGET/MIBSET flag.
-* wlandev wlan device structure.
-* priv "priv" structure.
-* hw "hw" structure.
-* msg Message structure.
-* data Data buffer.
-*
-* Returns:
-* 0 - Success.
-* ~0 - Error.
-*
-----------------------------------------------------------------*/
+/*
+ * prism2mib_excludeunencrypted
+ *
+ * Get/set the dot11ExcludeUnencrypted value.
+ *
+ * MIB record parameters:
+ * parm1 Prism2 RID value.
+ * parm2 Bit value for ExcludeUnencrypted flag.
+ * parm3 Not used.
+ *
+ * Arguments:
+ * mib MIB record.
+ * isget MIBGET/MIBSET flag.
+ * wlandev wlan device structure.
+ * priv "priv" structure.
+ * hw "hw" structure.
+ * msg Message structure.
+ * data Data buffer.
+ *
+ * Returns:
+ * 0 - Success.
+ * ~0 - Error.
+ *
+ */
static int prism2mib_excludeunencrypted(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data)
{
@@ -628,35 +628,35 @@ static int prism2mib_excludeunencrypted(struct mibrec *mib,
return prism2mib_flag(mib, isget, wlandev, hw, msg, data);
}
-/*----------------------------------------------------------------
-* prism2mib_fragmentationthreshold
-*
-* Get/set the fragmentation threshold.
-*
-* MIB record parameters:
-* parm1 Prism2 RID value.
-* parm2 Not used.
-* parm3 Not used.
-*
-* Arguments:
-* mib MIB record.
-* isget MIBGET/MIBSET flag.
-* wlandev wlan device structure.
-* priv "priv" structure.
-* hw "hw" structure.
-* msg Message structure.
-* data Data buffer.
-*
-* Returns:
-* 0 - Success.
-* ~0 - Error.
-*
-----------------------------------------------------------------*/
+/*
+ * prism2mib_fragmentationthreshold
+ *
+ * Get/set the fragmentation threshold.
+ *
+ * MIB record parameters:
+ * parm1 Prism2 RID value.
+ * parm2 Not used.
+ * parm3 Not used.
+ *
+ * Arguments:
+ * mib MIB record.
+ * isget MIBGET/MIBSET flag.
+ * wlandev wlan device structure.
+ * priv "priv" structure.
+ * hw "hw" structure.
+ * msg Message structure.
+ * data Data buffer.
+ *
+ * Returns:
+ * 0 - Success.
+ * ~0 - Error.
+ *
+ */
static int prism2mib_fragmentationthreshold(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg,
void *data)
{
@@ -674,47 +674,47 @@ static int prism2mib_fragmentationthreshold(struct mibrec *mib,
return prism2mib_uint32(mib, isget, wlandev, hw, msg, data);
}
-/*----------------------------------------------------------------
-* prism2mib_priv
-*
-* Get/set values in the "priv" data structure.
-*
-* MIB record parameters:
-* parm1 Not used.
-* parm2 Not used.
-* parm3 Not used.
-*
-* Arguments:
-* mib MIB record.
-* isget MIBGET/MIBSET flag.
-* wlandev wlan device structure.
-* priv "priv" structure.
-* hw "hw" structure.
-* msg Message structure.
-* data Data buffer.
-*
-* Returns:
-* 0 - Success.
-* ~0 - Error.
-*
-----------------------------------------------------------------*/
+/*
+ * prism2mib_priv
+ *
+ * Get/set values in the "priv" data structure.
+ *
+ * MIB record parameters:
+ * parm1 Not used.
+ * parm2 Not used.
+ * parm3 Not used.
+ *
+ * Arguments:
+ * mib MIB record.
+ * isget MIBGET/MIBSET flag.
+ * wlandev wlan device structure.
+ * priv "priv" structure.
+ * hw "hw" structure.
+ * msg Message structure.
+ * data Data buffer.
+ *
+ * Returns:
+ * 0 - Success.
+ * ~0 - Error.
+ *
+ */
static int prism2mib_priv(struct mibrec *mib,
int isget,
- wlandevice_t *wlandev,
- hfa384x_t *hw,
+ struct wlandevice *wlandev,
+ struct hfa384x *hw,
struct p80211msg_dot11req_mibset *msg, void *data)
{
- p80211pstrd_t *pstr = data;
+ struct p80211pstrd *pstr = data;
switch (mib->did) {
case DIDmib_lnx_lnxConfigTable_lnxRSNAIE:{
- hfa384x_WPAData_t wpa;
+ struct hfa384x_WPAData wpa;
if (isget) {
hfa384x_drvr_getconfig(hw,
HFA384x_RID_CNFWPADATA,
- (u8 *) &wpa,
+ (u8 *)&wpa,
sizeof(wpa));
pstr->len = le16_to_cpu(wpa.datalen);
memcpy(pstr->data, wpa.data, pstr->len);
@@ -724,7 +724,7 @@ static int prism2mib_priv(struct mibrec *mib,
hfa384x_drvr_setconfig(hw,
HFA384x_RID_CNFWPADATA,
- (u8 *) &wpa,
+ (u8 *)&wpa,
sizeof(wpa));
}
break;
@@ -736,67 +736,67 @@ static int prism2mib_priv(struct mibrec *mib,
return 0;
}
-/*----------------------------------------------------------------
-* prism2mgmt_pstr2bytestr
-*
-* Convert the pstr data in the WLAN message structure into an hfa384x
-* byte string format.
-*
-* Arguments:
-* bytestr hfa384x byte string data type
-* pstr wlan message data
-*
-* Returns:
-* Nothing
-*
-----------------------------------------------------------------*/
+/*
+ * prism2mgmt_pstr2bytestr
+ *
+ * Convert the pstr data in the WLAN message structure into an hfa384x
+ * byte string format.
+ *
+ * Arguments:
+ * bytestr hfa384x byte string data type
+ * pstr wlan message data
+ *
+ * Returns:
+ * Nothing
+ *
+ */
void prism2mgmt_pstr2bytestr(struct hfa384x_bytestr *bytestr,
- p80211pstrd_t *pstr)
+ struct p80211pstrd *pstr)
{
- bytestr->len = cpu_to_le16((u16) (pstr->len));
+ bytestr->len = cpu_to_le16((u16)(pstr->len));
memcpy(bytestr->data, pstr->data, pstr->len);
}
-/*----------------------------------------------------------------
-* prism2mgmt_bytestr2pstr
-*
-* Convert the data in an hfa384x byte string format into a
-* pstr in the WLAN message.
-*
-* Arguments:
-* bytestr hfa384x byte string data type
-* msg wlan message
-*
-* Returns:
-* Nothing
-*
-----------------------------------------------------------------*/
+/*
+ * prism2mgmt_bytestr2pstr
+ *
+ * Convert the data in an hfa384x byte string format into a
+ * pstr in the WLAN message.
+ *
+ * Arguments:
+ * bytestr hfa384x byte string data type
+ * msg wlan message
+ *
+ * Returns:
+ * Nothing
+ *
+ */
void prism2mgmt_bytestr2pstr(struct hfa384x_bytestr *bytestr,
- p80211pstrd_t *pstr)
+ struct p80211pstrd *pstr)
{
- pstr->len = (u8) (le16_to_cpu((u16) (bytestr->len)));
+ pstr->len = (u8)(le16_to_cpu((u16)(bytestr->len)));
memcpy(pstr->data, bytestr->data, pstr->len);
}
-/*----------------------------------------------------------------
-* prism2mgmt_bytearea2pstr
-*
-* Convert the data in an hfa384x byte area format into a pstr
-* in the WLAN message.
-*
-* Arguments:
-* bytearea hfa384x byte area data type
-* msg wlan message
-*
-* Returns:
-* Nothing
-*
-----------------------------------------------------------------*/
-
-void prism2mgmt_bytearea2pstr(u8 *bytearea, p80211pstrd_t *pstr, int len)
+/*
+ * prism2mgmt_bytearea2pstr
+ *
+ * Convert the data in an hfa384x byte area format into a pstr
+ * in the WLAN message.
+ *
+ * Arguments:
+ * bytearea hfa384x byte area data type
+ * msg wlan message
+ *
+ * Returns:
+ * Nothing
+ *
+ */
+
+void prism2mgmt_bytearea2pstr(u8 *bytearea, struct p80211pstrd *pstr, int len)
{
- pstr->len = (u8) len;
+ pstr->len = (u8)len;
memcpy(pstr->data, bytearea, len);
}
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 64f90722b01b..e1b4a94292ff 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -81,7 +81,7 @@
#include "prism2mgmt.h"
static char *dev_info = "prism2_usb";
-static wlandevice_t *create_wlan(void);
+static struct wlandevice *create_wlan(void);
int prism2_reset_holdtime = 30; /* Reset hold time in ms */
int prism2_reset_settletime = 100; /* Reset settle time in ms */
@@ -98,37 +98,38 @@ MODULE_PARM_DESC(prism2_reset_settletime, "reset settle time in ms");
MODULE_LICENSE("Dual MPL/GPL");
-static int prism2sta_open(wlandevice_t *wlandev);
-static int prism2sta_close(wlandevice_t *wlandev);
-static void prism2sta_reset(wlandevice_t *wlandev);
-static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb,
+static int prism2sta_open(struct wlandevice *wlandev);
+static int prism2sta_close(struct wlandevice *wlandev);
+static void prism2sta_reset(struct wlandevice *wlandev);
+static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep);
-static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg);
-static int prism2sta_getcardinfo(wlandevice_t *wlandev);
-static int prism2sta_globalsetup(wlandevice_t *wlandev);
-static int prism2sta_setmulticast(wlandevice_t *wlandev, netdevice_t *dev);
-
-static void prism2sta_inf_handover(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf);
-static void prism2sta_inf_tallies(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf);
-static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf);
-static void prism2sta_inf_scanresults(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf);
-static void prism2sta_inf_chinforesults(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf);
-static void prism2sta_inf_linkstatus(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf);
-static void prism2sta_inf_assocstatus(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf);
-static void prism2sta_inf_authreq(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf);
-static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf);
-static void prism2sta_inf_psusercnt(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf);
+static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg);
+static int prism2sta_getcardinfo(struct wlandevice *wlandev);
+static int prism2sta_globalsetup(struct wlandevice *wlandev);
+static int prism2sta_setmulticast(struct wlandevice *wlandev,
+ struct net_device *dev);
+
+static void prism2sta_inf_handover(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf);
+static void prism2sta_inf_tallies(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf);
+static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf);
+static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf);
+static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf);
+static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf);
+static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf);
+static void prism2sta_inf_authreq(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf);
+static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf);
+static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf);
/*
* prism2sta_open
@@ -151,7 +152,7 @@ static void prism2sta_inf_psusercnt(wlandevice_t *wlandev,
* Call context:
* process thread
*/
-static int prism2sta_open(wlandevice_t *wlandev)
+static int prism2sta_open(struct wlandevice *wlandev)
{
/* We don't currently have to do anything else.
* The setup of the MAC should be subsequently completed via
@@ -185,7 +186,7 @@ static int prism2sta_open(wlandevice_t *wlandev)
* Call context:
* process thread
*/
-static int prism2sta_close(wlandevice_t *wlandev)
+static int prism2sta_close(struct wlandevice *wlandev)
{
/* We don't currently have to do anything else.
* Higher layers know we're not ready from dev->start==0 and
@@ -213,7 +214,7 @@ static int prism2sta_close(wlandevice_t *wlandev)
* Call context:
* process thread
*/
-static void prism2sta_reset(wlandevice_t *wlandev)
+static void prism2sta_reset(struct wlandevice *wlandev)
{
}
@@ -238,11 +239,11 @@ static void prism2sta_reset(wlandevice_t *wlandev)
* Call context:
* process thread
*/
-static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb,
+static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
/* If necessary, set the 802.11 WEP bit */
if ((wlandev->hostwep & (HOSTWEP_PRIVACYINVOKED | HOSTWEP_ENCRYPT)) ==
@@ -277,9 +278,9 @@ static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb,
* Call context:
* process thread
*/
-static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg)
+static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
int result = 0;
@@ -337,7 +338,7 @@ static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg)
struct p80211msg_lnxreq_ifstate *ifstatemsg;
pr_debug("Received mlme ifstate request\n");
- ifstatemsg = (struct p80211msg_lnxreq_ifstate *) msg;
+ ifstatemsg = (struct p80211msg_lnxreq_ifstate *)msg;
result =
prism2sta_ifstate(wlandev,
ifstatemsg->ifstate.data);
@@ -360,7 +361,7 @@ static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg)
pr_debug("Received commsquality request\n");
- qualmsg = (struct p80211msg_lnxreq_commsquality *) msg;
+ qualmsg = (struct p80211msg_lnxreq_commsquality *)msg;
qualmsg->link.status =
P80211ENUM_msgitem_status_data_ok;
@@ -407,9 +408,9 @@ static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg)
* process thread (usually)
* interrupt
*/
-u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
+u32 prism2sta_ifstate(struct wlandevice *wlandev, u32 ifstate)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
u32 result;
result = P80211ENUM_resultcode_implementation_failure;
@@ -580,10 +581,10 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
* Call context:
* Either.
*/
-static int prism2sta_getcardinfo(wlandevice_t *wlandev)
+static int prism2sta_getcardinfo(struct wlandevice *wlandev)
{
int result = 0;
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
u16 temp;
u8 snum[HFA384x_RID_NICSERIALNUMBER_LEN];
@@ -592,7 +593,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
/* NIC identity */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICIDENTITY,
&hw->ident_nic,
- sizeof(hfa384x_compident_t));
+ sizeof(struct hfa384x_compident));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve NICIDENTITY\n");
goto failed;
@@ -611,7 +612,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
/* Primary f/w identity */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRIIDENTITY,
&hw->ident_pri_fw,
- sizeof(hfa384x_compident_t));
+ sizeof(struct hfa384x_compident));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve PRIIDENTITY\n");
goto failed;
@@ -630,7 +631,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
/* Station (Secondary?) f/w identity */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STAIDENTITY,
&hw->ident_sta_fw,
- sizeof(hfa384x_compident_t));
+ sizeof(struct hfa384x_compident));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve STAIDENTITY\n");
goto failed;
@@ -651,7 +652,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
/* strip out the 'special' variant bits */
hw->mm_mods = hw->ident_sta_fw.variant & (BIT(14) | BIT(15));
- hw->ident_sta_fw.variant &= ~((u16) (BIT(14) | BIT(15)));
+ hw->ident_sta_fw.variant &= ~((u16)(BIT(14) | BIT(15)));
if (hw->ident_sta_fw.id == 0x1f) {
netdev_info(wlandev->netdev,
@@ -670,7 +671,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
/* Compatibility range, Modem supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_MFISUPRANGE,
&hw->cap_sup_mfi,
- sizeof(hfa384x_caplevel_t));
+ sizeof(struct hfa384x_caplevel));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve MFISUPRANGE\n");
goto failed;
@@ -694,7 +695,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
/* Compatibility range, Controller supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CFISUPRANGE,
&hw->cap_sup_cfi,
- sizeof(hfa384x_caplevel_t));
+ sizeof(struct hfa384x_caplevel));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve CFISUPRANGE\n");
goto failed;
@@ -718,7 +719,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
/* Compatibility range, Primary f/w supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRISUPRANGE,
&hw->cap_sup_pri,
- sizeof(hfa384x_caplevel_t));
+ sizeof(struct hfa384x_caplevel));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve PRISUPRANGE\n");
goto failed;
@@ -742,7 +743,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
/* Compatibility range, Station f/w supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STASUPRANGE,
&hw->cap_sup_sta,
- sizeof(hfa384x_caplevel_t));
+ sizeof(struct hfa384x_caplevel));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve STASUPRANGE\n");
goto failed;
@@ -774,7 +775,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
/* Compatibility range, primary f/w actor, CFI supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRI_CFIACTRANGES,
&hw->cap_act_pri_cfi,
- sizeof(hfa384x_caplevel_t));
+ sizeof(struct hfa384x_caplevel));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve PRI_CFIACTRANGES\n");
goto failed;
@@ -798,7 +799,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
/* Compatibility range, sta f/w actor, CFI supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_CFIACTRANGES,
&hw->cap_act_sta_cfi,
- sizeof(hfa384x_caplevel_t));
+ sizeof(struct hfa384x_caplevel));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve STA_CFIACTRANGES\n");
goto failed;
@@ -822,7 +823,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
/* Compatibility range, sta f/w actor, MFI supplier */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_MFIACTRANGES,
&hw->cap_act_sta_mfi,
- sizeof(hfa384x_caplevel_t));
+ sizeof(struct hfa384x_caplevel));
if (result) {
netdev_err(wlandev->netdev, "Failed to retrieve STA_MFIACTRANGES\n");
goto failed;
@@ -909,19 +910,20 @@ done:
* Call context:
* process thread
*/
-static int prism2sta_globalsetup(wlandevice_t *wlandev)
+static int prism2sta_globalsetup(struct wlandevice *wlandev)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
/* Set the maximum frame size */
return hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFMAXDATALEN,
WLAN_DATA_MAXLEN);
}
-static int prism2sta_setmulticast(wlandevice_t *wlandev, netdevice_t *dev)
+static int prism2sta_setmulticast(struct wlandevice *wlandev,
+ struct net_device *dev)
{
int result = 0;
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
u16 promisc;
@@ -959,8 +961,8 @@ exit:
* Call context:
* interrupt
*/
-static void prism2sta_inf_handover(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf)
+static void prism2sta_inf_handover(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf)
{
pr_debug("received infoframe:HANDOVER (unhandled)\n");
}
@@ -982,10 +984,10 @@ static void prism2sta_inf_handover(wlandevice_t *wlandev,
* Call context:
* interrupt
*/
-static void prism2sta_inf_tallies(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf)
+static void prism2sta_inf_tallies(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
u16 *src16;
u32 *dst;
u32 *src32;
@@ -997,15 +999,15 @@ static void prism2sta_inf_tallies(wlandevice_t *wlandev,
* record length of the info record.
*/
- cnt = sizeof(hfa384x_CommTallies32_t) / sizeof(u32);
+ cnt = sizeof(struct hfa384x_CommTallies32) / sizeof(u32);
if (inf->framelen > 22) {
- dst = (u32 *) &hw->tallies;
- src32 = (u32 *) &inf->info.commtallies32;
+ dst = (u32 *)&hw->tallies;
+ src32 = (u32 *)&inf->info.commtallies32;
for (i = 0; i < cnt; i++, dst++, src32++)
*dst += le32_to_cpu(*src32);
} else {
- dst = (u32 *) &hw->tallies;
- src16 = (u16 *) &inf->info.commtallies16;
+ dst = (u32 *)&hw->tallies;
+ src16 = (u16 *)&inf->info.commtallies16;
for (i = 0; i < cnt; i++, dst++, src16++)
*dst += le16_to_cpu(*src16);
}
@@ -1028,21 +1030,20 @@ static void prism2sta_inf_tallies(wlandevice_t *wlandev,
* Call context:
* interrupt
*/
-static void prism2sta_inf_scanresults(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf)
+static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf)
{
-
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
int nbss;
- hfa384x_ScanResult_t *sr = &(inf->info.scanresult);
+ struct hfa384x_ScanResult *sr = &(inf->info.scanresult);
int i;
- hfa384x_JoinRequest_data_t joinreq;
+ struct hfa384x_JoinRequest_data joinreq;
int result;
/* Get the number of results, first in bytes, then in results */
nbss = (inf->framelen * sizeof(u16)) -
sizeof(inf->infotype) - sizeof(inf->info.scanresult.scanreason);
- nbss /= sizeof(hfa384x_ScanResultSub_t);
+ nbss /= sizeof(struct hfa384x_ScanResultSub);
/* Print em */
pr_debug("rx scanresults, reason=%d, nbss=%d:\n",
@@ -1084,10 +1085,10 @@ static void prism2sta_inf_scanresults(wlandevice_t *wlandev,
* Call context:
* interrupt
*/
-static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf)
+static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
int nbss;
nbss = (inf->framelen - 3) / 32;
@@ -1098,7 +1099,7 @@ static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev,
kfree(hw->scanresults);
- hw->scanresults = kmemdup(inf, sizeof(hfa384x_InfFrame_t), GFP_ATOMIC);
+ hw->scanresults = kmemdup(inf, sizeof(struct hfa384x_InfFrame), GFP_ATOMIC);
if (nbss == 0)
nbss = -1;
@@ -1125,18 +1126,18 @@ static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev,
* Call context:
* interrupt
*/
-static void prism2sta_inf_chinforesults(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf)
+static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
unsigned int i, n;
hw->channel_info.results.scanchannels =
le16_to_cpu(inf->info.chinforesult.scanchannels);
for (i = 0, n = 0; i < HFA384x_CHINFORESULT_MAX; i++) {
- hfa384x_ChInfoResultSub_t *result;
- hfa384x_ChInfoResultSub_t *chinforesult;
+ struct hfa384x_ChInfoResultSub *result;
+ struct hfa384x_ChInfoResultSub *chinforesult;
int chan;
if (!(hw->channel_info.results.scanchannels & (1 << i)))
@@ -1170,18 +1171,18 @@ static void prism2sta_inf_chinforesults(wlandevice_t *wlandev,
void prism2sta_processing_defer(struct work_struct *data)
{
- hfa384x_t *hw = container_of(data, struct hfa384x, link_bh);
- wlandevice_t *wlandev = hw->wlandev;
- hfa384x_bytestr32_t ssid;
+ struct hfa384x *hw = container_of(data, struct hfa384x, link_bh);
+ struct wlandevice *wlandev = hw->wlandev;
+ struct hfa384x_bytestr32 ssid;
int result;
/* First let's process the auth frames */
{
struct sk_buff *skb;
- hfa384x_InfFrame_t *inf;
+ struct hfa384x_InfFrame *inf;
while ((skb = skb_dequeue(&hw->authq))) {
- inf = (hfa384x_InfFrame_t *) skb->data;
+ inf = (struct hfa384x_InfFrame *)skb->data;
prism2sta_inf_authreq_defer(wlandev, inf);
}
@@ -1256,8 +1257,8 @@ void prism2sta_processing_defer(struct work_struct *data)
return;
}
prism2mgmt_bytestr2pstr(
- (struct hfa384x_bytestr *) &ssid,
- (p80211pstrd_t *) &wlandev->ssid);
+ (struct hfa384x_bytestr *)&ssid,
+ (struct p80211pstrd *)&wlandev->ssid);
/* Collect the port status */
result = hfa384x_drvr_getconfig16(hw,
@@ -1337,8 +1338,8 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID, result);
return;
}
- prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *) &ssid,
- (p80211pstrd_t *) &wlandev->ssid);
+ prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *)&ssid,
+ (struct p80211pstrd *)&wlandev->ssid);
hw->link_status = HFA384x_LINK_CONNECTED;
netif_carrier_on(wlandev->netdev);
@@ -1390,7 +1391,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* Disable Transmits, Ignore receives of data frames
*/
if (hw->join_ap && --hw->join_retries > 0) {
- hfa384x_JoinRequest_data_t joinreq;
+ struct hfa384x_JoinRequest_data joinreq;
joinreq = hw->joinreq;
/* Send the join request */
@@ -1438,10 +1439,10 @@ void prism2sta_processing_defer(struct work_struct *data)
* Call context:
* interrupt
*/
-static void prism2sta_inf_linkstatus(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf)
+static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
hw->link_status_new = le16_to_cpu(inf->info.linkstatus.linkstatus);
@@ -1466,11 +1467,11 @@ static void prism2sta_inf_linkstatus(wlandevice_t *wlandev,
* Call context:
* interrupt
*/
-static void prism2sta_inf_assocstatus(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf)
+static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf)
{
- hfa384x_t *hw = wlandev->priv;
- hfa384x_AssocStatus_t rec;
+ struct hfa384x *hw = wlandev->priv;
+ struct hfa384x_AssocStatus rec;
int i;
memcpy(&rec, &inf->info.assocstatus, sizeof(rec));
@@ -1527,10 +1528,10 @@ static void prism2sta_inf_assocstatus(wlandevice_t *wlandev,
* interrupt
*
*/
-static void prism2sta_inf_authreq(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf)
+static void prism2sta_inf_authreq(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
struct sk_buff *skb;
skb = dev_alloc_skb(sizeof(*inf));
@@ -1542,11 +1543,11 @@ static void prism2sta_inf_authreq(wlandevice_t *wlandev,
}
}
-static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf)
+static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf)
{
- hfa384x_t *hw = wlandev->priv;
- hfa384x_authenticateStation_data_t rec;
+ struct hfa384x *hw = wlandev->priv;
+ struct hfa384x_authenticateStation_data rec;
int i, added, result, cnt;
u8 *addr;
@@ -1716,10 +1717,10 @@ static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev,
* Call context:
* interrupt
*/
-static void prism2sta_inf_psusercnt(wlandevice_t *wlandev,
- hfa384x_InfFrame_t *inf)
+static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
+ struct hfa384x_InfFrame *inf)
{
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
hw->psusercount = le16_to_cpu(inf->info.psusercnt.usercnt);
}
@@ -1741,7 +1742,7 @@ static void prism2sta_inf_psusercnt(wlandevice_t *wlandev,
* Call context:
* interrupt
*/
-void prism2sta_ev_info(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf)
+void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf)
{
inf->infotype = le16_to_cpu(inf->infotype);
/* Dispatch */
@@ -1808,7 +1809,7 @@ void prism2sta_ev_info(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf)
* Call context:
* interrupt
*/
-void prism2sta_ev_txexc(wlandevice_t *wlandev, u16 status)
+void prism2sta_ev_txexc(struct wlandevice *wlandev, u16 status)
{
pr_debug("TxExc status=0x%x.\n", status);
}
@@ -1829,7 +1830,7 @@ void prism2sta_ev_txexc(wlandevice_t *wlandev, u16 status)
* Call context:
* interrupt
*/
-void prism2sta_ev_tx(wlandevice_t *wlandev, u16 status)
+void prism2sta_ev_tx(struct wlandevice *wlandev, u16 status)
{
pr_debug("Tx Complete, status=0x%04x\n", status);
/* update linux network stats */
@@ -1852,7 +1853,7 @@ void prism2sta_ev_tx(wlandevice_t *wlandev, u16 status)
* Call context:
* interrupt
*/
-void prism2sta_ev_alloc(wlandevice_t *wlandev)
+void prism2sta_ev_alloc(struct wlandevice *wlandev)
{
netif_wake_queue(wlandev->netdev);
}
@@ -1860,14 +1861,14 @@ void prism2sta_ev_alloc(wlandevice_t *wlandev)
/*
* create_wlan
*
-* Called at module init time. This creates the wlandevice_t structure
+* Called at module init time. This creates the struct wlandevice structure
* and initializes it with relevant bits.
*
* Arguments:
* none
*
* Returns:
-* the created wlandevice_t structure.
+* the created struct wlandevice structure.
*
* Side effects:
* also allocates the priv/hw structures.
@@ -1876,14 +1877,14 @@ void prism2sta_ev_alloc(wlandevice_t *wlandev)
* process thread
*
*/
-static wlandevice_t *create_wlan(void)
+static struct wlandevice *create_wlan(void)
{
- wlandevice_t *wlandev = NULL;
- hfa384x_t *hw = NULL;
+ struct wlandevice *wlandev = NULL;
+ struct hfa384x *hw = NULL;
/* Alloc our structures */
- wlandev = kzalloc(sizeof(wlandevice_t), GFP_KERNEL);
- hw = kzalloc(sizeof(hfa384x_t), GFP_KERNEL);
+ wlandev = kzalloc(sizeof(struct wlandevice), GFP_KERNEL);
+ hw = kzalloc(sizeof(struct hfa384x), GFP_KERNEL);
if (!wlandev || !hw) {
kfree(wlandev);
@@ -1913,11 +1914,11 @@ static wlandevice_t *create_wlan(void)
void prism2sta_commsqual_defer(struct work_struct *data)
{
- hfa384x_t *hw = container_of(data, struct hfa384x, commsqual_bh);
- wlandevice_t *wlandev = hw->wlandev;
- hfa384x_bytestr32_t ssid;
+ struct hfa384x *hw = container_of(data, struct hfa384x, commsqual_bh);
+ struct wlandevice *wlandev = hw->wlandev;
+ struct hfa384x_bytestr32 ssid;
struct p80211msg_dot11req_mibget msg;
- p80211item_uint32_t *mibitem = (p80211item_uint32_t *)
+ struct p80211item_uint32 *mibitem = (struct p80211item_uint32 *)
&msg.mibattribute.data;
int result = 0;
@@ -1950,7 +1951,7 @@ void prism2sta_commsqual_defer(struct work_struct *data)
/* Get the signal rate */
msg.msgcode = DIDmsg_dot11req_mibget;
mibitem->did = DIDmib_p2_p2MAC_p2CurrentTxRate;
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ result = p80211req_dorequest(wlandev, (u8 *)&msg);
if (result) {
pr_debug("get signal rate failed, result = %d\n",
@@ -1993,8 +1994,8 @@ void prism2sta_commsqual_defer(struct work_struct *data)
HFA384x_RID_CURRENTSSID, result);
return;
}
- prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *) &ssid,
- (p80211pstrd_t *) &wlandev->ssid);
+ prism2mgmt_bytestr2pstr((struct hfa384x_bytestr *)&ssid,
+ (struct p80211pstrd *)&wlandev->ssid);
/* Reschedule timer */
mod_timer(&hw->commsqual_timer, jiffies + HZ);
@@ -2002,7 +2003,7 @@ void prism2sta_commsqual_defer(struct work_struct *data)
void prism2sta_commsqual_timer(unsigned long data)
{
- hfa384x_t *hw = (hfa384x_t *) data;
+ struct hfa384x *hw = (struct hfa384x *)data;
schedule_work(&hw->commsqual_bh);
}
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index b26d09ff840c..bfb6b0a6528d 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -47,11 +47,11 @@ static const struct usb_device_id usb_prism_tbl[] = {
PRISM_DEV(0x0bb2, 0x0302, "Ambit Microsystems Corp."),
PRISM_DEV(0x9016, 0x182d, "Sitecom WL-022 802.11b USB Adapter"),
PRISM_DEV(0x0543, 0x0f01,
- "ViewSonic Airsync USB Adapter 11Mbps (Prism2.5)"),
+ "ViewSonic Airsync USB Adapter 11Mbps (Prism2.5)"),
PRISM_DEV(0x067c, 0x1022,
- "Siemens SpeedStream 1022 11Mbps WLAN USB Adapter"),
+ "Siemens SpeedStream 1022 11Mbps WLAN USB Adapter"),
PRISM_DEV(0x049f, 0x0033,
- "Compaq/Intel W100 PRO/Wireless 11Mbps multiport WLAN Adapter"),
+ "Compaq/Intel W100 PRO/Wireless 11Mbps multiport WLAN Adapter"),
{ } /* terminator */
};
MODULE_DEVICE_TABLE(usb, usb_prism_tbl);
@@ -61,8 +61,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
{
struct usb_device *dev;
- wlandevice_t *wlandev = NULL;
- hfa384x_t *hw = NULL;
+ struct wlandevice *wlandev = NULL;
+ struct hfa384x *hw = NULL;
int result = 0;
dev = interface_to_usbdev(interface);
@@ -74,7 +74,7 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
}
hw = wlandev->priv;
- if (wlan_setup(wlandev, &(interface->dev)) != 0) {
+ if (wlan_setup(wlandev, &interface->dev) != 0) {
dev_err(&interface->dev, "wlan_setup() failed.\n");
result = -EIO;
goto failed;
@@ -87,7 +87,7 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
/* Register the wlandev, this gets us a name and registers the
* linux netdevice.
*/
- SET_NETDEV_DEV(wlandev->netdev, &(interface->dev));
+ SET_NETDEV_DEV(wlandev->netdev, &interface->dev);
/* Do a chip-level reset on the MAC */
if (prism2_doreset) {
@@ -134,15 +134,15 @@ done:
static void prism2sta_disconnect_usb(struct usb_interface *interface)
{
- wlandevice_t *wlandev;
+ struct wlandevice *wlandev;
- wlandev = (wlandevice_t *)usb_get_intfdata(interface);
- if (wlandev != NULL) {
+ wlandev = (struct wlandevice *)usb_get_intfdata(interface);
+ if (wlandev) {
LIST_HEAD(cleanlist);
- hfa384x_usbctlx_t *ctlx, *temp;
+ struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
- hfa384x_t *hw = wlandev->priv;
+ struct hfa384x *hw = wlandev->priv;
if (!hw)
goto exit;
@@ -216,12 +216,12 @@ exit:
#ifdef CONFIG_PM
static int prism2sta_suspend(struct usb_interface *interface,
- pm_message_t message)
+ pm_message_t message)
{
- hfa384x_t *hw = NULL;
- wlandevice_t *wlandev;
+ struct hfa384x *hw = NULL;
+ struct wlandevice *wlandev;
- wlandev = (wlandevice_t *)usb_get_intfdata(interface);
+ wlandev = (struct wlandevice *)usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
@@ -241,10 +241,10 @@ static int prism2sta_suspend(struct usb_interface *interface,
static int prism2sta_resume(struct usb_interface *interface)
{
int result = 0;
- hfa384x_t *hw = NULL;
- wlandevice_t *wlandev;
+ struct hfa384x *hw = NULL;
+ struct wlandevice *wlandev;
- wlandev = (wlandevice_t *)usb_get_intfdata(interface);
+ wlandev = (struct wlandevice *)usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index d56ef1425f6b..0c78491ff5a1 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -62,7 +62,6 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex = 0, ClockIndex = 0;
unsigned short RefreshRateTableIndex = 0;
- int Clock;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
@@ -73,9 +72,7 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
ClockIndex = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
- Clock = XGI_VCLKData[ClockIndex].CLOCK * 1000;
-
- return Clock;
+ return XGI_VCLKData[ClockIndex].CLOCK * 1000;
}
static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
@@ -1227,7 +1224,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
unsigned int vtotal = 0;
unsigned int drate = 0, hrate = 0;
int found_mode = 0;
- int refresh_rate, search_idx;
+ int search_idx;
if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
vtotal = var->upper_margin + var->yres + var->lower_margin
@@ -1263,10 +1260,6 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
xgifb_info->refresh_rate = 60;
}
- /* Calculation wrong for 1024x600 - force it to 60Hz */
- if ((var->xres == 1024) && (var->yres == 600))
- refresh_rate = 60;
-
search_idx = 0;
while ((XGIbios_mode[search_idx].mode_no != 0) &&
(XGIbios_mode[search_idx].xres <= var->xres)) {
@@ -2085,7 +2078,7 @@ static int __init xgifb_init(void)
{
char *option = NULL;
- if (forcecrt2type != NULL)
+ if (forcecrt2type)
XGIfb_search_crt2type(forcecrt2type);
if (fb_get_options("xgifb", &option))
return -ENODEV;
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 50c8ea4f5ab7..d8010c5c1a70 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -214,7 +214,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
if (!(pVBInfo->VBInfo & TVSetPAL) &&
(modeflag & NoSupportSimuTV) &&
(pVBInfo->VBInfo & SetInSlaveMode) &&
- (!(pVBInfo->VBInfo & SetNotSimuMode)))
+ !(pVBInfo->VBInfo & SetNotSimuMode))
return 0;
}
@@ -1647,7 +1647,6 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
-
unsigned short index, modeflag;
unsigned char tempal;
@@ -1655,7 +1654,7 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
if ((pVBInfo->SetFlag & ProgrammingCRT2) &&
- (!(pVBInfo->LCDInfo & EnableScalingLCD))) { /* {LCDA/LCDB} */
+ !(pVBInfo->LCDInfo & EnableScalingLCD)) { /* {LCDA/LCDB} */
index = XGI_GetLCDCapPtr(pVBInfo);
tempal = pVBInfo->LCDCapList[index].LCD_VCLK;
@@ -1678,7 +1677,6 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
if (!(modeflag & Charx8Dot))
tempal = TVCLKBASE_315 +
HiTVTextVCLK;
-
}
return tempal;
}
@@ -1716,7 +1714,7 @@ static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
{
if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B
| VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
- if ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) &&
+ if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
(pVBInfo->SetFlag & ProgrammingCRT2)) {
*di_0 = XGI_VBVCLKData[tempal].Part4_A;
*di_1 = XGI_VBVCLKData[tempal].Part4_B;
@@ -1741,8 +1739,8 @@ static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
for (i = 0; i < 4; i++) {
xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
(unsigned short) (0x10 * i));
- if ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
- && (!(pVBInfo->VBInfo & SetInSlaveMode))) {
+ if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
+ !(pVBInfo->VBInfo & SetInSlaveMode)) {
xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
xgifb_reg_set(pVBInfo->P3c4, 0x2f, di_1);
} else {
@@ -1915,7 +1913,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
}
}
- if (pVBInfo->VBType & (VB_SIS301LV|VB_SIS302LV|VB_XGI301C)) {
+ if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
if (temp & SetYPbPr) {
/* shampoo add for new scratch */
temp = xgifb_reg_get(pVBInfo->P3d4, 0x35);
@@ -1986,7 +1984,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
}
if (!(tempbx & DisableCRT2Display)) {
- if ((!(tempbx & DriverMode)) || (!(modeflag & CRT2Mode))) {
+ if (!(tempbx & DriverMode) || !(modeflag & CRT2Mode)) {
if (!(tempbx & XGI_SetCRT2ToLCDA))
tempbx |= (SetInSlaveMode | SetSimuScanMode);
}
@@ -2132,7 +2130,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeIdIndex,
if ((pVBInfo->LCDResInfo == Panel_1400x1050) &&
(pVBInfo->VBInfo & SetCRT2ToLCD) && (resinfo == 9) &&
- (!(tempbx & EnableScalingLCD)))
+ !(tempbx & EnableScalingLCD))
/*
* set to center in 1280x1024 LCDB
* for Panel_1400x1050
@@ -2245,7 +2243,6 @@ static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
/* CR B4[1] */
xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
-
}
temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
@@ -2274,7 +2271,6 @@ static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
/* CR B4[1] */
xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
-
}
xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~tempbh0, tempbl0);
@@ -2291,7 +2287,6 @@ static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
struct xgi_hw_device_info *pXGIHWDE,
struct vb_device_info *pVBInfo)
{
-
xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x00);
if (pXGIHWDE->jChipType == XG21) {
if (pVBInfo->IF_DEF_LVDS == 1) {
@@ -2310,7 +2305,6 @@ static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
/* DVO/DVI signal on */
XGI_XG21BLSignalVDD(0x20, 0x20, pVBInfo);
}
-
}
if (pXGIHWDE->jChipType == XG27) {
@@ -2330,7 +2324,6 @@ static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
/* DVO/DVI signal on */
XGI_XG27BLSignalVDD(0x20, 0x20, pVBInfo);
}
-
}
}
@@ -2338,7 +2331,6 @@ void XGI_DisplayOff(struct xgifb_video_info *xgifb_info,
struct xgi_hw_device_info *pXGIHWDE,
struct vb_device_info *pVBInfo)
{
-
if (pXGIHWDE->jChipType == XG21) {
if (pVBInfo->IF_DEF_LVDS == 1) {
/* LVDS backlight off */
@@ -2455,7 +2447,6 @@ exit:
static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
{
-
if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) &&
(pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
return 1;
@@ -3922,7 +3913,7 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
xgifb_reg_set(pVBInfo->Part2Port, i, Tap4TimingPtr->Reg[j]);
if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
- (!(pVBInfo->VBInfo & SetCRT2ToHiVision))) {
+ !(pVBInfo->VBInfo & SetCRT2ToHiVision)) {
/* Set Vertical Scaling */
Tap4TimingPtr = XGI_GetTap4Ptr(1, pVBInfo);
for (i = 0xC0, j = 0; i < 0xFF; i++, j++)
@@ -3932,7 +3923,7 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
}
if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
- (!(pVBInfo->VBInfo & SetCRT2ToHiVision)))
+ !(pVBInfo->VBInfo & SetCRT2ToHiVision))
/* Enable V.Scaling */
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x04);
else
@@ -4132,7 +4123,6 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVision)) {
if (pVBInfo->VGAHDE > 800)
xgifb_reg_or(pVBInfo->Part4Port, 0x1E, 0x08);
-
}
temp = 0x0036;
@@ -4141,9 +4131,9 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
| TVSetYPbPr525p | TVSetYPbPr750p
| TVSetHiVision))) {
temp |= 0x0001;
- if ((pVBInfo->VBInfo & SetInSlaveMode)
- && (!(pVBInfo->TVInfo
- & TVSimuMode)))
+ if ((pVBInfo->VBInfo & SetInSlaveMode) &&
+ !(pVBInfo->TVInfo
+ & TVSimuMode))
temp &= (~0x0001);
}
}
@@ -4389,7 +4379,6 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
(value << 2) & 0x7C);
for (temp = 0, value = 0; temp < 3; temp++) {
-
xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
xgifb_reg_set(pVBInfo->P3c4,
0x2B, xgifb_info->lvds_data.VCLKData1);
@@ -4409,7 +4398,6 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
inb(pVBInfo->P3da); /* reset 3da */
}
-
}
/* --------------------------------------------------------------------- */
@@ -4476,7 +4464,7 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
if ((pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode)) ||
- ((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) &&
+ (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))))
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
@@ -4587,7 +4575,7 @@ static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
| VB_SIS302LV | VB_XGI301C)) {
- if ((!(pVBInfo->VBInfo & SetInSlaveMode)) || (pVBInfo->TVInfo
+ if (!(pVBInfo->VBInfo & SetInSlaveMode) || (pVBInfo->TVInfo
& TVSimuMode)) {
*tempbx += 8;
*tempcl += 1;
@@ -4988,8 +4976,8 @@ reg_and_or:
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD
| XGI_SetCRT2ToLCDA)) {
tempah &= (~0x08);
- if ((pVBInfo->ModeType == ModeVGA) && (!(pVBInfo->VBInfo
- & SetInSlaveMode))) {
+ if ((pVBInfo->ModeType == ModeVGA) && !(pVBInfo->VBInfo
+ & SetInSlaveMode)) {
tempah |= 0x010;
}
tempah |= 0x080;
@@ -5416,7 +5404,6 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
if (HwDeviceExtension->jChipType >= XG21) {
temp = xgifb_reg_get(pVBInfo->P3d4, 0x38);
if (temp & 0xA0) {
-
if (HwDeviceExtension->jChipType == XG27)
XGI_SetXG27CRTC(RefreshRateTableIndex, pVBInfo);
else
@@ -5486,7 +5473,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA) ||
- (!(pVBInfo->VBInfo & SwitchCRT2))) {
+ !(pVBInfo->VBInfo & SwitchCRT2)) {
XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 0ae0b131abfc..2fb1bf1a26c5 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -24,6 +24,7 @@
#include <net/ip6_route.h>
#include <net/addrconf.h>
+#include <libcxgb_cm.h>
#include "cxgbit.h"
#include "clip_tbl.h"
@@ -72,15 +73,6 @@ out:
return wr_waitp->ret;
}
-/* Returns whether a CPL status conveys negative advice.
- */
-static int cxgbit_is_neg_adv(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE ||
- status == CPL_ERR_KEEPALV_NEG_ADVICE;
-}
-
static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
{
return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
@@ -623,21 +615,14 @@ void cxgbit_free_np(struct iscsi_np *np)
static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
- struct cpl_close_con_req *req;
- unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16);
+ u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
- req = (struct cpl_close_con_req *)__skb_put(skb, len);
- memset(req, 0, len);
-
- set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
- INIT_TP_WR(req, csk->tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
- csk->tid));
- req->rsvd = 0;
+ cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
+ NULL, NULL);
cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
__skb_queue_tail(&csk->txq, skb);
@@ -662,9 +647,8 @@ static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
{
- struct cpl_abort_req *req;
- unsigned int len = roundup(sizeof(*req), 16);
struct sk_buff *skb;
+ u32 len = roundup(sizeof(struct cpl_abort_req), 16);
pr_debug("%s: csk %p tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
@@ -675,15 +659,9 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
cxgbit_send_tx_flowc_wr(csk);
skb = __skb_dequeue(&csk->skbq);
- req = (struct cpl_abort_req *)__skb_put(skb, len);
- memset(req, 0, len);
+ cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
+ csk->com.cdev, cxgbit_abort_arp_failure);
- set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
- t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure);
- INIT_TP_WR(req, csk->tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ,
- csk->tid));
- req->cmd = CPL_ABORT_SEND_RST;
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
@@ -789,109 +767,6 @@ void _cxgbit_free_csk(struct kref *kref)
kfree(csk);
}
-static void
-cxgbit_get_tuple_info(struct cpl_pass_accept_req *req, int *iptype,
- __u8 *local_ip, __u8 *peer_ip, __be16 *local_port,
- __be16 *peer_port)
-{
- u32 eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- u32 ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
- struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
- struct tcphdr *tcp = (struct tcphdr *)
- ((u8 *)(req + 1) + eth_len + ip_len);
-
- if (ip->version == 4) {
- pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
- __func__,
- ntohl(ip->saddr), ntohl(ip->daddr),
- ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 4;
- memcpy(peer_ip, &ip->saddr, 4);
- memcpy(local_ip, &ip->daddr, 4);
- } else {
- pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
- __func__,
- ip6->saddr.s6_addr, ip6->daddr.s6_addr,
- ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 6;
- memcpy(peer_ip, ip6->saddr.s6_addr, 16);
- memcpy(local_ip, ip6->daddr.s6_addr, 16);
- }
-
- *peer_port = tcp->source;
- *local_port = tcp->dest;
-}
-
-static int
-cxgbit_our_interface(struct cxgbit_device *cdev, struct net_device *egress_dev)
-{
- u8 i;
-
- egress_dev = cxgbit_get_real_dev(egress_dev);
- for (i = 0; i < cdev->lldi.nports; i++)
- if (cdev->lldi.ports[i] == egress_dev)
- return 1;
- return 0;
-}
-
-static struct dst_entry *
-cxgbit_find_route6(struct cxgbit_device *cdev, __u8 *local_ip, __u8 *peer_ip,
- __be16 local_port, __be16 peer_port, u8 tos,
- __u32 sin6_scope_id)
-{
- struct dst_entry *dst = NULL;
-
- if (IS_ENABLED(CONFIG_IPV6)) {
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- memcpy(&fl6.daddr, peer_ip, 16);
- memcpy(&fl6.saddr, local_ip, 16);
- if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
- fl6.flowi6_oif = sin6_scope_id;
- dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst)
- goto out;
- if (!cxgbit_our_interface(cdev, ip6_dst_idev(dst)->dev) &&
- !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
- dst_release(dst);
- dst = NULL;
- }
- }
-out:
- return dst;
-}
-
-static struct dst_entry *
-cxgbit_find_route(struct cxgbit_device *cdev, __be32 local_ip, __be32 peer_ip,
- __be16 local_port, __be16 peer_port, u8 tos)
-{
- struct rtable *rt;
- struct flowi4 fl4;
- struct neighbour *n;
-
- rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip,
- local_ip,
- peer_port, local_port, IPPROTO_TCP,
- tos, 0);
- if (IS_ERR(rt))
- return NULL;
- n = dst_neigh_lookup(&rt->dst, &peer_ip);
- if (!n)
- return NULL;
- if (!cxgbit_our_interface(cdev, n->dev) &&
- !(n->dev->flags & IFF_LOOPBACK)) {
- neigh_release(n);
- dst_release(&rt->dst);
- return NULL;
- }
- neigh_release(n);
- return &rt->dst;
-}
-
static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
{
unsigned int linkspeed;
@@ -1072,21 +947,14 @@ int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
{
- struct cpl_tid_release *req;
- unsigned int len = roundup(sizeof(*req), 16);
+ u32 len = roundup(sizeof(struct cpl_tid_release), 16);
struct sk_buff *skb;
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
- req = (struct cpl_tid_release *)__skb_put(skb, len);
- memset(req, 0, len);
-
- INIT_TP_WR(req, tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(
- CPL_TID_RELEASE, tid));
- set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ cxgb_mk_tid_release(skb, len, tid, 0);
cxgbit_ofld_send(cdev, skb);
}
@@ -1108,20 +976,6 @@ cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
return ret < 0 ? ret : 0;
}
-static void
-cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu,
- unsigned int *idx, int use_ts, int ipv6)
-{
- unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) :
- sizeof(struct iphdr)) +
- sizeof(struct tcphdr) +
- (use_ts ? round_up(TCPOLEN_TIMESTAMP,
- 4) : 0);
- unsigned short data_size = mtu - hdr_size;
-
- cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
-}
-
static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
{
if (csk->com.state != CSK_STATE_ESTABLISHED) {
@@ -1140,22 +994,18 @@ static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
- struct cpl_rx_data_ack *req;
- unsigned int len = roundup(sizeof(*req), 16);
+ u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
+ u32 credit_dack;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -1;
- req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
- memset(req, 0, len);
+ credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
+ RX_CREDITS_V(csk->rx_credits);
- set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx);
- INIT_TP_WR(req, csk->tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
- csk->tid));
- req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
- RX_CREDITS_V(csk->rx_credits));
+ cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
+ credit_dack);
csk->rx_credits = 0;
@@ -1210,15 +1060,6 @@ out:
return -ENOMEM;
}
-static u32 cxgbit_compute_wscale(u32 win)
-{
- u32 wscale = 0;
-
- while (wscale < 14 && (65535 << wscale) < win)
- wscale++;
- return wscale;
-}
-
static void
cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
{
@@ -1246,10 +1087,10 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
INIT_TP_WR(rpl5, csk->tid);
OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
csk->tid));
- cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
- req->tcpopt.tstamp,
- (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
- wscale = cxgbit_compute_wscale(csk->rcv_win);
+ cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
+ req->tcpopt.tstamp,
+ (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(csk->rcv_win);
/*
* Specify the largest window that will fit in opt0. The
* remainder will be specified in the rx_data_ack.
@@ -1340,8 +1181,8 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
goto rel_skb;
}
- cxgbit_get_tuple_info(req, &iptype, local_ip, peer_ip,
- &local_port, &peer_port);
+ cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
+ peer_ip, &local_port, &peer_port);
/* Find output route */
if (iptype == 4) {
@@ -1350,21 +1191,23 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
, __func__, cnp, tid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
- dst = cxgbit_find_route(cdev, *(__be32 *)local_ip,
- *(__be32 *)peer_ip,
- local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+ dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
+ *(__be32 *)local_ip,
+ *(__be32 *)peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
} else {
pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
"lport %d rport %d peer_mss %d\n"
, __func__, cnp, tid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
- dst = cxgbit_find_route6(cdev, local_ip, peer_ip,
- local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
- ((struct sockaddr_in6 *)
- &cnp->com.local_addr)->sin6_scope_id);
+ dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
+ local_ip, peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+ ((struct sockaddr_in6 *)
+ &cnp->com.local_addr)->sin6_scope_id);
}
if (!dst) {
pr_err("%s - failed to find dst entry!\n",
@@ -1795,16 +1638,15 @@ static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cpl_abort_req_rss *hdr = cplhdr(skb);
unsigned int tid = GET_TID(hdr);
- struct cpl_abort_rpl *rpl;
struct sk_buff *rpl_skb;
bool release = false;
bool wakeup_thread = false;
- unsigned int len = roundup(sizeof(*rpl), 16);
+ u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, tid, csk->com.state);
- if (cxgbit_is_neg_adv(hdr->status)) {
+ if (cxgb_is_neg_adv(hdr->status)) {
pr_err("%s: got neg advise %d on tid %u\n",
__func__, hdr->status, tid);
goto rel_skb;
@@ -1839,14 +1681,8 @@ static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
cxgbit_send_tx_flowc_wr(csk);
rpl_skb = __skb_dequeue(&csk->skbq);
- set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
-
- rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len);
- memset(rpl, 0, len);
- INIT_TP_WR(rpl, csk->tid);
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
- rpl->cmd = CPL_ABORT_NO_RST;
+ cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
cxgbit_ofld_send(csk->com.cdev, rpl_skb);
if (wakeup_thread) {
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 27dd11aff934..ad26b9372f10 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -652,6 +652,9 @@ static struct iscsit_transport cxgbit_transport = {
static struct cxgb4_uld_info cxgbit_uld_info = {
.name = DRV_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .rxq_size = 1024,
+ .lro = true,
.add = cxgbit_uld_add,
.state_change = cxgbit_uld_state_change,
.lro_rx_handler = cxgbit_uld_lro_rx_handler,
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 51e0d32883ba..a23fa5ed1d67 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -800,7 +800,7 @@ out_free_file:
return retval;
}
-static struct file_operations ptmx_fops;
+static struct file_operations ptmx_fops __ro_after_init;
static void __init unix98_pty_init(void)
{
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 1a16feac9a36..a697a8585ddc 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -31,6 +31,11 @@ struct uart_8250_dma {
struct dma_chan *rxchan;
struct dma_chan *txchan;
+ /* Device address base for DMA operations */
+ phys_addr_t rx_dma_addr;
+ phys_addr_t tx_dma_addr;
+
+ /* DMA address of the buffer in memory */
dma_addr_t rx_addr;
dma_addr_t tx_addr;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index dcf43f66404f..240a361b674f 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -639,7 +639,7 @@ static int univ8250_console_match(struct console *co, char *name, int idx,
{
char match[] = "uart"; /* 8250-specific earlycon name */
unsigned char iotype;
- unsigned long addr;
+ resource_size_t addr;
int i;
if (strncmp(name, match, 4) != 0)
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 3590d012001f..fdbddbc6375d 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -142,7 +142,7 @@ void serial8250_rx_dma_flush(struct uart_8250_port *p)
if (dma->rx_running) {
dmaengine_pause(dma->rxchan);
__dma_rx_complete(p);
- dmaengine_terminate_all(dma->rxchan);
+ dmaengine_terminate_async(dma->rxchan);
}
}
EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
@@ -150,6 +150,10 @@ EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
int serial8250_request_dma(struct uart_8250_port *p)
{
struct uart_8250_dma *dma = p->dma;
+ phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
+ dma->rx_dma_addr : p->port.mapbase;
+ phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
+ dma->tx_dma_addr : p->port.mapbase;
dma_cap_mask_t mask;
struct dma_slave_caps caps;
int ret;
@@ -157,11 +161,11 @@ int serial8250_request_dma(struct uart_8250_port *p)
/* Default slave configuration parameters */
dma->rxconf.direction = DMA_DEV_TO_MEM;
dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- dma->rxconf.src_addr = p->port.mapbase + UART_RX;
+ dma->rxconf.src_addr = rx_dma_addr + UART_RX;
dma->txconf.direction = DMA_MEM_TO_DEV;
dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- dma->txconf.dst_addr = p->port.mapbase + UART_TX;
+ dma->txconf.dst_addr = tx_dma_addr + UART_TX;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -247,14 +251,14 @@ void serial8250_release_dma(struct uart_8250_port *p)
return;
/* Release RX resources */
- dmaengine_terminate_all(dma->rxchan);
+ dmaengine_terminate_sync(dma->rxchan);
dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
dma->rx_addr);
dma_release_channel(dma->rxchan);
dma->rxchan = NULL;
/* Release TX resources */
- dmaengine_terminate_all(dma->txchan);
+ dmaengine_terminate_sync(dma->txchan);
dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
dma_release_channel(dma->txchan);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 5c0c123565ad..459d726f9d59 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -365,18 +365,19 @@ static int dw8250_probe(struct platform_device *pdev)
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int irq = platform_get_irq(pdev, 0);
struct uart_port *p = &uart.port;
+ struct device *dev = &pdev->dev;
struct dw8250_data *data;
int err;
u32 val;
if (!regs) {
- dev_err(&pdev->dev, "no registers defined\n");
+ dev_err(dev, "no registers defined\n");
return -EINVAL;
}
if (irq < 0) {
if (irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "cannot get irq\n");
+ dev_err(dev, "cannot get irq\n");
return irq;
}
@@ -387,16 +388,16 @@ static int dw8250_probe(struct platform_device *pdev)
p->pm = dw8250_do_pm;
p->type = PORT_8250;
p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT;
- p->dev = &pdev->dev;
+ p->dev = dev;
p->iotype = UPIO_MEM;
p->serial_in = dw8250_serial_in;
p->serial_out = dw8250_serial_out;
- p->membase = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ p->membase = devm_ioremap(dev, regs->start, resource_size(regs));
if (!p->membase)
return -ENOMEM;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -404,57 +405,57 @@ static int dw8250_probe(struct platform_device *pdev)
data->usr_reg = DW_UART_USR;
p->private_data = data;
- data->uart_16550_compatible = device_property_read_bool(p->dev,
+ data->uart_16550_compatible = device_property_read_bool(dev,
"snps,uart-16550-compatible");
- err = device_property_read_u32(p->dev, "reg-shift", &val);
+ err = device_property_read_u32(dev, "reg-shift", &val);
if (!err)
p->regshift = val;
- err = device_property_read_u32(p->dev, "reg-io-width", &val);
+ err = device_property_read_u32(dev, "reg-io-width", &val);
if (!err && val == 4) {
p->iotype = UPIO_MEM32;
p->serial_in = dw8250_serial_in32;
p->serial_out = dw8250_serial_out32;
}
- if (device_property_read_bool(p->dev, "dcd-override")) {
+ if (device_property_read_bool(dev, "dcd-override")) {
/* Always report DCD as active */
data->msr_mask_on |= UART_MSR_DCD;
data->msr_mask_off |= UART_MSR_DDCD;
}
- if (device_property_read_bool(p->dev, "dsr-override")) {
+ if (device_property_read_bool(dev, "dsr-override")) {
/* Always report DSR as active */
data->msr_mask_on |= UART_MSR_DSR;
data->msr_mask_off |= UART_MSR_DDSR;
}
- if (device_property_read_bool(p->dev, "cts-override")) {
+ if (device_property_read_bool(dev, "cts-override")) {
/* Always report CTS as active */
data->msr_mask_on |= UART_MSR_CTS;
data->msr_mask_off |= UART_MSR_DCTS;
}
- if (device_property_read_bool(p->dev, "ri-override")) {
+ if (device_property_read_bool(dev, "ri-override")) {
/* Always report Ring indicator as inactive */
data->msr_mask_off |= UART_MSR_RI;
data->msr_mask_off |= UART_MSR_TERI;
}
/* Always ask for fixed clock rate from a property. */
- device_property_read_u32(p->dev, "clock-frequency", &p->uartclk);
+ device_property_read_u32(dev, "clock-frequency", &p->uartclk);
/* If there is separate baudclk, get the rate from it. */
- data->clk = devm_clk_get(&pdev->dev, "baudclk");
+ data->clk = devm_clk_get(dev, "baudclk");
if (IS_ERR(data->clk) && PTR_ERR(data->clk) != -EPROBE_DEFER)
- data->clk = devm_clk_get(&pdev->dev, NULL);
+ data->clk = devm_clk_get(dev, NULL);
if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!IS_ERR_OR_NULL(data->clk)) {
err = clk_prepare_enable(data->clk);
if (err)
- dev_warn(&pdev->dev, "could not enable optional baudclk: %d\n",
+ dev_warn(dev, "could not enable optional baudclk: %d\n",
err);
else
p->uartclk = clk_get_rate(data->clk);
@@ -462,24 +463,24 @@ static int dw8250_probe(struct platform_device *pdev)
/* If no clock rate is defined, fail. */
if (!p->uartclk) {
- dev_err(&pdev->dev, "clock rate not defined\n");
+ dev_err(dev, "clock rate not defined\n");
return -EINVAL;
}
- data->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER) {
+ data->pclk = devm_clk_get(dev, "apb_pclk");
+ if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
goto err_clk;
}
if (!IS_ERR(data->pclk)) {
err = clk_prepare_enable(data->pclk);
if (err) {
- dev_err(&pdev->dev, "could not enable apb_pclk\n");
+ dev_err(dev, "could not enable apb_pclk\n");
goto err_clk;
}
}
- data->rst = devm_reset_control_get_optional(&pdev->dev, NULL);
+ data->rst = devm_reset_control_get_optional(dev, NULL);
if (IS_ERR(data->rst) && PTR_ERR(data->rst) == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
goto err_pclk;
@@ -511,8 +512,8 @@ static int dw8250_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
return 0;
@@ -624,6 +625,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
{ "APMC0D08", 0},
{ "AMD0020", 0 },
{ "AMDI0020", 0 },
+ { "HISI0031", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
new file mode 100644
index 000000000000..886fcf37f291
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -0,0 +1,378 @@
+/*
+ * 8250_lpss.c - Driver for UART on Intel Braswell and various other Intel SoCs
+ *
+ * Copyright (C) 2016 Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rational.h>
+
+#include <linux/dmaengine.h>
+#include <linux/dma/dw.h>
+
+#include "8250.h"
+
+#define PCI_DEVICE_ID_INTEL_QRK_UARTx 0x0936
+
+#define PCI_DEVICE_ID_INTEL_BYT_UART1 0x0f0a
+#define PCI_DEVICE_ID_INTEL_BYT_UART2 0x0f0c
+
+#define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a
+#define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c
+
+#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3
+#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4
+
+/* Intel LPSS specific registers */
+
+#define BYT_PRV_CLK 0x800
+#define BYT_PRV_CLK_EN BIT(0)
+#define BYT_PRV_CLK_M_VAL_SHIFT 1
+#define BYT_PRV_CLK_N_VAL_SHIFT 16
+#define BYT_PRV_CLK_UPDATE BIT(31)
+
+#define BYT_TX_OVF_INT 0x820
+#define BYT_TX_OVF_INT_MASK BIT(1)
+
+struct lpss8250;
+
+struct lpss8250_board {
+ unsigned long freq;
+ unsigned int base_baud;
+ int (*setup)(struct lpss8250 *, struct uart_port *p);
+ void (*exit)(struct lpss8250 *);
+};
+
+struct lpss8250 {
+ int line;
+ struct lpss8250_board *board;
+
+ /* DMA parameters */
+ struct uart_8250_dma dma;
+ struct dw_dma_chip dma_chip;
+ struct dw_dma_slave dma_param;
+ u8 dma_maxburst;
+};
+
+static void byt_set_termios(struct uart_port *p, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud = tty_termios_baud_rate(termios);
+ struct lpss8250 *lpss = p->private_data;
+ unsigned long fref = lpss->board->freq, fuart = baud * 16;
+ unsigned long w = BIT(15) - 1;
+ unsigned long m, n;
+ u32 reg;
+
+ /* Gracefully handle the B0 case: fall back to B9600 */
+ fuart = fuart ? fuart : 9600 * 16;
+
+ /* Get Fuart closer to Fref */
+ fuart *= rounddown_pow_of_two(fref / fuart);
+
+ /*
+ * For baud rates 0.5M, 1M, 1.5M, 2M, 2.5M, 3M, 3.5M and 4M the
+ * dividers must be adjusted.
+ *
+ * uartclk = (m / n) * 100 MHz, where m <= n
+ */
+ rational_best_approximation(fuart, fref, w, w, &m, &n);
+ p->uartclk = fuart;
+
+ /* Reset the clock */
+ reg = (m << BYT_PRV_CLK_M_VAL_SHIFT) | (n << BYT_PRV_CLK_N_VAL_SHIFT);
+ writel(reg, p->membase + BYT_PRV_CLK);
+ reg |= BYT_PRV_CLK_EN | BYT_PRV_CLK_UPDATE;
+ writel(reg, p->membase + BYT_PRV_CLK);
+
+ p->status &= ~UPSTAT_AUTOCTS;
+ if (termios->c_cflag & CRTSCTS)
+ p->status |= UPSTAT_AUTOCTS;
+
+ serial8250_do_set_termios(p, termios, old);
+}
+
+static unsigned int byt_get_mctrl(struct uart_port *port)
+{
+ unsigned int ret = serial8250_do_get_mctrl(port);
+
+ /* Force DCD and DSR signals to permanently be reported as active */
+ ret |= TIOCM_CAR | TIOCM_DSR;
+
+ return ret;
+}
+
+static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
+{
+ struct dw_dma_slave *param = &lpss->dma_param;
+ struct uart_8250_port *up = up_to_u8250p(port);
+ struct pci_dev *pdev = to_pci_dev(port->dev);
+ unsigned int dma_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+ struct pci_dev *dma_dev = pci_get_slot(pdev->bus, dma_devfn);
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_BYT_UART1:
+ case PCI_DEVICE_ID_INTEL_BSW_UART1:
+ case PCI_DEVICE_ID_INTEL_BDW_UART1:
+ param->src_id = 3;
+ param->dst_id = 2;
+ break;
+ case PCI_DEVICE_ID_INTEL_BYT_UART2:
+ case PCI_DEVICE_ID_INTEL_BSW_UART2:
+ case PCI_DEVICE_ID_INTEL_BDW_UART2:
+ param->src_id = 5;
+ param->dst_id = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ param->dma_dev = &dma_dev->dev;
+ param->m_master = 0;
+ param->p_master = 1;
+
+ /* TODO: Detect FIFO size automaticaly for DesignWare 8250 */
+ port->fifosize = 64;
+ up->tx_loadsz = 64;
+
+ lpss->dma_maxburst = 16;
+
+ port->set_termios = byt_set_termios;
+ port->get_mctrl = byt_get_mctrl;
+
+ /* Disable TX counter interrupts */
+ writel(BYT_TX_OVF_INT_MASK, port->membase + BYT_TX_OVF_INT);
+
+ return 0;
+}
+
+#ifdef CONFIG_SERIAL_8250_DMA
+static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
+ .nr_channels = 2,
+ .is_private = true,
+ .is_nollp = true,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+ .block_size = 4095,
+ .nr_masters = 1,
+ .data_width = {4},
+};
+
+static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
+{
+ struct uart_8250_dma *dma = &lpss->dma;
+ struct dw_dma_chip *chip = &lpss->dma_chip;
+ struct dw_dma_slave *param = &lpss->dma_param;
+ struct pci_dev *pdev = to_pci_dev(port->dev);
+ int ret;
+
+ chip->dev = &pdev->dev;
+ chip->irq = pdev->irq;
+ chip->regs = pci_ioremap_bar(pdev, 1);
+ chip->pdata = &qrk_serial_dma_pdata;
+
+ /* Falling back to PIO mode if DMA probing fails */
+ ret = dw_dma_probe(chip);
+ if (ret)
+ return;
+
+ /* Special DMA address for UART */
+ dma->rx_dma_addr = 0xfffff000;
+ dma->tx_dma_addr = 0xfffff000;
+
+ param->dma_dev = &pdev->dev;
+ param->src_id = 0;
+ param->dst_id = 1;
+ param->hs_polarity = true;
+
+ lpss->dma_maxburst = 8;
+}
+
+static void qrk_serial_exit_dma(struct lpss8250 *lpss)
+{
+ struct dw_dma_slave *param = &lpss->dma_param;
+
+ if (!param->dma_dev)
+ return;
+ dw_dma_remove(&lpss->dma_chip);
+}
+#else /* CONFIG_SERIAL_8250_DMA */
+static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port) {}
+static void qrk_serial_exit_dma(struct lpss8250 *lpss) {}
+#endif /* !CONFIG_SERIAL_8250_DMA */
+
+static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
+{
+ struct pci_dev *pdev = to_pci_dev(port->dev);
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
+ if (ret < 0)
+ return ret;
+
+ port->irq = pci_irq_vector(pdev, 0);
+
+ qrk_serial_setup_dma(lpss, port);
+ return 0;
+}
+
+static void qrk_serial_exit(struct lpss8250 *lpss)
+{
+ qrk_serial_exit_dma(lpss);
+}
+
+static bool lpss8250_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_slave *dws = param;
+
+ if (dws->dma_dev != chan->device->dev)
+ return false;
+
+ chan->private = dws;
+ return true;
+}
+
+static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port)
+{
+ struct uart_8250_dma *dma = &lpss->dma;
+ struct dw_dma_slave *rx_param, *tx_param;
+ struct device *dev = port->port.dev;
+
+ if (!lpss->dma_param.dma_dev)
+ return 0;
+
+ rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
+ if (!rx_param)
+ return -ENOMEM;
+
+ tx_param = devm_kzalloc(dev, sizeof(*tx_param), GFP_KERNEL);
+ if (!tx_param)
+ return -ENOMEM;
+
+ *rx_param = lpss->dma_param;
+ dma->rxconf.src_maxburst = lpss->dma_maxburst;
+
+ *tx_param = lpss->dma_param;
+ dma->txconf.dst_maxburst = lpss->dma_maxburst;
+
+ dma->fn = lpss8250_dma_filter;
+ dma->rx_param = rx_param;
+ dma->tx_param = tx_param;
+
+ port->dma = dma;
+ return 0;
+}
+
+static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct uart_8250_port uart;
+ struct lpss8250 *lpss;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ lpss = devm_kzalloc(&pdev->dev, sizeof(*lpss), GFP_KERNEL);
+ if (!lpss)
+ return -ENOMEM;
+
+ lpss->board = (struct lpss8250_board *)id->driver_data;
+
+ memset(&uart, 0, sizeof(struct uart_8250_port));
+
+ uart.port.dev = &pdev->dev;
+ uart.port.irq = pdev->irq;
+ uart.port.private_data = lpss;
+ uart.port.type = PORT_16550A;
+ uart.port.iotype = UPIO_MEM;
+ uart.port.regshift = 2;
+ uart.port.uartclk = lpss->board->base_baud * 16;
+ uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ uart.capabilities = UART_CAP_FIFO | UART_CAP_AFE;
+ uart.port.mapbase = pci_resource_start(pdev, 0);
+ uart.port.membase = pcim_iomap(pdev, 0, 0);
+ if (!uart.port.membase)
+ return -ENOMEM;
+
+ ret = lpss->board->setup(lpss, &uart.port);
+ if (ret)
+ return ret;
+
+ ret = lpss8250_dma_setup(lpss, &uart);
+ if (ret)
+ goto err_exit;
+
+ ret = serial8250_register_8250_port(&uart);
+ if (ret < 0)
+ goto err_exit;
+
+ lpss->line = ret;
+
+ pci_set_drvdata(pdev, lpss);
+ return 0;
+
+err_exit:
+ if (lpss->board->exit)
+ lpss->board->exit(lpss);
+ return ret;
+}
+
+static void lpss8250_remove(struct pci_dev *pdev)
+{
+ struct lpss8250 *lpss = pci_get_drvdata(pdev);
+
+ if (lpss->board->exit)
+ lpss->board->exit(lpss);
+
+ serial8250_unregister_port(lpss->line);
+}
+
+static const struct lpss8250_board byt_board = {
+ .freq = 100000000,
+ .base_baud = 2764800,
+ .setup = byt_serial_setup,
+};
+
+static const struct lpss8250_board qrk_board = {
+ .freq = 44236800,
+ .base_baud = 2764800,
+ .setup = qrk_serial_setup,
+ .exit = qrk_serial_exit,
+};
+
+#define LPSS_DEVICE(id, board) { PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&board }
+
+static const struct pci_device_id pci_ids[] = {
+ LPSS_DEVICE(PCI_DEVICE_ID_INTEL_QRK_UARTx, qrk_board),
+ LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BYT_UART1, byt_board),
+ LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BYT_UART2, byt_board),
+ LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BSW_UART1, byt_board),
+ LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BSW_UART2, byt_board),
+ LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BDW_UART1, byt_board),
+ LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BDW_UART2, byt_board),
+ { },
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver lpss8250_pci_driver = {
+ .name = "8250_lpss",
+ .id_table = pci_ids,
+ .probe = lpss8250_probe,
+ .remove = lpss8250_remove,
+};
+
+module_pci_driver(lpss8250_pci_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel LPSS UART driver");
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index 20c5db2f4264..39c2324484dd 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -99,27 +99,27 @@ static int dnv_handle_irq(struct uart_port *p)
struct uart_8250_port *up = up_to_u8250p(p);
unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
u32 status;
- int ret = IRQ_NONE;
+ int ret = 0;
int err;
if (fisr & BIT(2)) {
err = hsu_dma_get_status(&mid->dma_chip, 1, &status);
if (err > 0) {
serial8250_rx_dma_flush(up);
- ret |= IRQ_HANDLED;
+ ret |= 1;
} else if (err == 0)
ret |= hsu_dma_do_irq(&mid->dma_chip, 1, status);
}
if (fisr & BIT(1)) {
err = hsu_dma_get_status(&mid->dma_chip, 0, &status);
if (err > 0)
- ret |= IRQ_HANDLED;
+ ret |= 1;
else if (err == 0)
ret |= hsu_dma_do_irq(&mid->dma_chip, 0, status);
}
if (fisr & BIT(0))
ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
- return ret;
+ return IRQ_RETVAL(ret);
}
#define DNV_DMA_CHAN_OFFSET 0x80
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 3611ec9bb4fa..ce0cc471bfc3 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -62,7 +62,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
*/
baud = uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / 0xffff,
- port->uartclk / 16);
+ port->uartclk);
if (baud <= 115200) {
serial_port_out(port, UART_MTK_HIGHS, 0x0);
@@ -76,10 +76,6 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
quot = DIV_ROUND_UP(port->uartclk, 4 * baud);
} else {
serial_port_out(port, UART_MTK_HIGHS, 0x3);
-
- /* Set to highest baudrate supported */
- if (baud >= 1152000)
- baud = 921600;
quot = DIV_ROUND_UP(port->uartclk, 256 * baud);
}
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 38963d7bcf84..7a8b5fc81a19 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -195,6 +195,7 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
switch (port_type) {
case PORT_8250 ... PORT_MAX_8250:
{
+ u32 tx_threshold;
struct uart_8250_port port8250;
memset(&port8250, 0, sizeof(port8250));
port8250.port = port;
@@ -202,6 +203,12 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
if (port.fifosize)
port8250.capabilities = UART_CAP_FIFO;
+ /* Check for TX FIFO threshold & set tx_loadsz */
+ if ((of_property_read_u32(ofdev->dev.of_node, "tx-threshold",
+ &tx_threshold) == 0) &&
+ (tx_threshold < port.fifosize))
+ port8250.tx_loadsz = port.fifosize - tx_threshold;
+
if (of_property_read_bool(ofdev->dev.of_node,
"auto-flow-control"))
port8250.capabilities |= UART_CAP_AFE;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index bc51b32b2774..b98c1578f45a 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -21,14 +21,10 @@
#include <linux/serial_core.h>
#include <linux/8250_pci.h>
#include <linux/bitops.h>
-#include <linux/rational.h>
#include <asm/byteorder.h>
#include <asm/io.h>
-#include <linux/dmaengine.h>
-#include <linux/platform_data/dma-dw.h>
-
#include "8250.h"
/*
@@ -1349,160 +1345,6 @@ ce4100_serial_setup(struct serial_private *priv,
return ret;
}
-#define PCI_DEVICE_ID_INTEL_BYT_UART1 0x0f0a
-#define PCI_DEVICE_ID_INTEL_BYT_UART2 0x0f0c
-
-#define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a
-#define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c
-
-#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3
-#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4
-
-#define BYT_PRV_CLK 0x800
-#define BYT_PRV_CLK_EN (1 << 0)
-#define BYT_PRV_CLK_M_VAL_SHIFT 1
-#define BYT_PRV_CLK_N_VAL_SHIFT 16
-#define BYT_PRV_CLK_UPDATE (1 << 31)
-
-#define BYT_TX_OVF_INT 0x820
-#define BYT_TX_OVF_INT_MASK (1 << 1)
-
-static void
-byt_set_termios(struct uart_port *p, struct ktermios *termios,
- struct ktermios *old)
-{
- unsigned int baud = tty_termios_baud_rate(termios);
- unsigned long fref = 100000000, fuart = baud * 16;
- unsigned long w = BIT(15) - 1;
- unsigned long m, n;
- u32 reg;
-
- /* Gracefully handle the B0 case: fall back to B9600 */
- fuart = fuart ? fuart : 9600 * 16;
-
- /* Get Fuart closer to Fref */
- fuart *= rounddown_pow_of_two(fref / fuart);
-
- /*
- * For baud rates 0.5M, 1M, 1.5M, 2M, 2.5M, 3M, 3.5M and 4M the
- * dividers must be adjusted.
- *
- * uartclk = (m / n) * 100 MHz, where m <= n
- */
- rational_best_approximation(fuart, fref, w, w, &m, &n);
- p->uartclk = fuart;
-
- /* Reset the clock */
- reg = (m << BYT_PRV_CLK_M_VAL_SHIFT) | (n << BYT_PRV_CLK_N_VAL_SHIFT);
- writel(reg, p->membase + BYT_PRV_CLK);
- reg |= BYT_PRV_CLK_EN | BYT_PRV_CLK_UPDATE;
- writel(reg, p->membase + BYT_PRV_CLK);
-
- p->status &= ~UPSTAT_AUTOCTS;
- if (termios->c_cflag & CRTSCTS)
- p->status |= UPSTAT_AUTOCTS;
-
- serial8250_do_set_termios(p, termios, old);
-}
-
-static bool byt_dma_filter(struct dma_chan *chan, void *param)
-{
- struct dw_dma_slave *dws = param;
-
- if (dws->dma_dev != chan->device->dev)
- return false;
-
- chan->private = dws;
- return true;
-}
-
-static unsigned int
-byt_get_mctrl(struct uart_port *port)
-{
- unsigned int ret = serial8250_do_get_mctrl(port);
-
- /* Force DCD and DSR signals to permanently be reported as active. */
- ret |= TIOCM_CAR | TIOCM_DSR;
-
- return ret;
-}
-
-static int
-byt_serial_setup(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *port, int idx)
-{
- struct pci_dev *pdev = priv->dev;
- struct device *dev = port->port.dev;
- struct uart_8250_dma *dma;
- struct dw_dma_slave *tx_param, *rx_param;
- struct pci_dev *dma_dev;
- int ret;
-
- dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
- if (!dma)
- return -ENOMEM;
-
- tx_param = devm_kzalloc(dev, sizeof(*tx_param), GFP_KERNEL);
- if (!tx_param)
- return -ENOMEM;
-
- rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
- if (!rx_param)
- return -ENOMEM;
-
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_BYT_UART1:
- case PCI_DEVICE_ID_INTEL_BSW_UART1:
- case PCI_DEVICE_ID_INTEL_BDW_UART1:
- rx_param->src_id = 3;
- tx_param->dst_id = 2;
- break;
- case PCI_DEVICE_ID_INTEL_BYT_UART2:
- case PCI_DEVICE_ID_INTEL_BSW_UART2:
- case PCI_DEVICE_ID_INTEL_BDW_UART2:
- rx_param->src_id = 5;
- tx_param->dst_id = 4;
- break;
- default:
- return -EINVAL;
- }
-
- rx_param->m_master = 0;
- rx_param->p_master = 1;
-
- dma->rxconf.src_maxburst = 16;
-
- tx_param->m_master = 0;
- tx_param->p_master = 1;
-
- dma->txconf.dst_maxburst = 16;
-
- dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
- rx_param->dma_dev = &dma_dev->dev;
- tx_param->dma_dev = &dma_dev->dev;
-
- dma->fn = byt_dma_filter;
- dma->rx_param = rx_param;
- dma->tx_param = tx_param;
-
- ret = pci_default_setup(priv, board, port, idx);
- port->port.iotype = UPIO_MEM;
- port->port.type = PORT_16550A;
- port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
- port->port.set_termios = byt_set_termios;
- port->port.get_mctrl = byt_get_mctrl;
- port->port.fifosize = 64;
- port->tx_loadsz = 64;
- port->dma = dma;
- port->capabilities = UART_CAP_FIFO | UART_CAP_AFE;
-
- /* Disable Tx counter interrupts */
- writel(BYT_TX_OVF_INT_MASK, port->port.membase + BYT_TX_OVF_INT);
-
- return ret;
-}
-
static int
pci_omegapci_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -1741,6 +1583,19 @@ static int pci_eg20t_init(struct pci_dev *dev)
#define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358
#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
+#define UART_EXAR_MPIOINT_7_0 0x8f /* MPIOINT[7:0] */
+#define UART_EXAR_MPIOLVL_7_0 0x90 /* MPIOLVL[7:0] */
+#define UART_EXAR_MPIO3T_7_0 0x91 /* MPIO3T[7:0] */
+#define UART_EXAR_MPIOINV_7_0 0x92 /* MPIOINV[7:0] */
+#define UART_EXAR_MPIOSEL_7_0 0x93 /* MPIOSEL[7:0] */
+#define UART_EXAR_MPIOOD_7_0 0x94 /* MPIOOD[7:0] */
+#define UART_EXAR_MPIOINT_15_8 0x95 /* MPIOINT[15:8] */
+#define UART_EXAR_MPIOLVL_15_8 0x96 /* MPIOLVL[15:8] */
+#define UART_EXAR_MPIO3T_15_8 0x97 /* MPIO3T[15:8] */
+#define UART_EXAR_MPIOINV_15_8 0x98 /* MPIOINV[15:8] */
+#define UART_EXAR_MPIOSEL_15_8 0x99 /* MPIOSEL[15:8] */
+#define UART_EXAR_MPIOOD_15_8 0x9a /* MPIOOD[15:8] */
+
static int
pci_xr17c154_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -1783,18 +1638,18 @@ pci_xr17v35x_setup(struct serial_private *priv,
* Setup Multipurpose Input/Output pins.
*/
if (idx == 0) {
- writeb(0x00, p + 0x8f); /*MPIOINT[7:0]*/
- writeb(0x00, p + 0x90); /*MPIOLVL[7:0]*/
- writeb(0x00, p + 0x91); /*MPIO3T[7:0]*/
- writeb(0x00, p + 0x92); /*MPIOINV[7:0]*/
- writeb(0x00, p + 0x93); /*MPIOSEL[7:0]*/
- writeb(0x00, p + 0x94); /*MPIOOD[7:0]*/
- writeb(0x00, p + 0x95); /*MPIOINT[15:8]*/
- writeb(0x00, p + 0x96); /*MPIOLVL[15:8]*/
- writeb(0x00, p + 0x97); /*MPIO3T[15:8]*/
- writeb(0x00, p + 0x98); /*MPIOINV[15:8]*/
- writeb(0x00, p + 0x99); /*MPIOSEL[15:8]*/
- writeb(0x00, p + 0x9a); /*MPIOOD[15:8]*/
+ writeb(0x00, p + UART_EXAR_MPIOINT_7_0);
+ writeb(0x00, p + UART_EXAR_MPIOLVL_7_0);
+ writeb(0x00, p + UART_EXAR_MPIO3T_7_0);
+ writeb(0x00, p + UART_EXAR_MPIOINV_7_0);
+ writeb(0x00, p + UART_EXAR_MPIOSEL_7_0);
+ writeb(0x00, p + UART_EXAR_MPIOOD_7_0);
+ writeb(0x00, p + UART_EXAR_MPIOINT_15_8);
+ writeb(0x00, p + UART_EXAR_MPIOLVL_15_8);
+ writeb(0x00, p + UART_EXAR_MPIO3T_15_8);
+ writeb(0x00, p + UART_EXAR_MPIOINV_15_8);
+ writeb(0x00, p + UART_EXAR_MPIOSEL_15_8);
+ writeb(0x00, p + UART_EXAR_MPIOOD_15_8);
}
writeb(0x00, p + UART_EXAR_8XMODE);
writeb(UART_FCTR_EXAR_TRGD, p + UART_EXAR_FCTR);
@@ -1830,20 +1685,20 @@ pci_fastcom335_setup(struct serial_private *priv,
switch (priv->dev->device) {
case PCI_DEVICE_ID_COMMTECH_4222PCI335:
case PCI_DEVICE_ID_COMMTECH_4224PCI335:
- writeb(0x78, p + 0x90); /* MPIOLVL[7:0] */
- writeb(0x00, p + 0x92); /* MPIOINV[7:0] */
- writeb(0x00, p + 0x93); /* MPIOSEL[7:0] */
+ writeb(0x78, p + UART_EXAR_MPIOLVL_7_0);
+ writeb(0x00, p + UART_EXAR_MPIOINV_7_0);
+ writeb(0x00, p + UART_EXAR_MPIOSEL_7_0);
break;
case PCI_DEVICE_ID_COMMTECH_2324PCI335:
case PCI_DEVICE_ID_COMMTECH_2328PCI335:
- writeb(0x00, p + 0x90); /* MPIOLVL[7:0] */
- writeb(0xc0, p + 0x92); /* MPIOINV[7:0] */
- writeb(0xc0, p + 0x93); /* MPIOSEL[7:0] */
+ writeb(0x00, p + UART_EXAR_MPIOLVL_7_0);
+ writeb(0xc0, p + UART_EXAR_MPIOINV_7_0);
+ writeb(0xc0, p + UART_EXAR_MPIOSEL_7_0);
break;
}
- writeb(0x00, p + 0x8f); /* MPIOINT[7:0] */
- writeb(0x00, p + 0x91); /* MPIO3T[7:0] */
- writeb(0x00, p + 0x94); /* MPIOOD[7:0] */
+ writeb(0x00, p + UART_EXAR_MPIOINT_7_0);
+ writeb(0x00, p + UART_EXAR_MPIO3T_7_0);
+ writeb(0x00, p + UART_EXAR_MPIOOD_7_0);
}
writeb(0x00, p + UART_EXAR_8XMODE);
writeb(UART_FCTR_EXAR_TRGD, p + UART_EXAR_FCTR);
@@ -1934,7 +1789,6 @@ pci_wch_ch38x_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022
#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
#define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
-#define PCI_DEVICE_ID_INTEL_QRK_UART 0x0936
#define PCI_VENDOR_ID_SUNIX 0x1fd4
#define PCI_DEVICE_ID_SUNIX_1999 0x1999
@@ -2078,48 +1932,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = kt_serial_setup,
},
- {
- .vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_BYT_UART1,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = byt_serial_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_BYT_UART2,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = byt_serial_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_BSW_UART1,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = byt_serial_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_BSW_UART2,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = byt_serial_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_BDW_UART1,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = byt_serial_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_BDW_UART2,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = byt_serial_setup,
- },
/*
* ITE
*/
@@ -2992,8 +2804,6 @@ enum pci_board_num_t {
pbn_ADDIDATA_PCIe_4_3906250,
pbn_ADDIDATA_PCIe_8_3906250,
pbn_ce4100_1_115200,
- pbn_byt,
- pbn_qrk,
pbn_omegapci,
pbn_NETMOS9900_2s_115200,
pbn_brcm_trumanage,
@@ -3769,18 +3579,6 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 921600,
.reg_shift = 2,
},
- [pbn_byt] = {
- .flags = FL_BASE0,
- .num_ports = 1,
- .base_baud = 2764800,
- .reg_shift = 2,
- },
- [pbn_qrk] = {
- .flags = FL_BASE0,
- .num_ports = 1,
- .base_baud = 2764800,
- .reg_shift = 2,
- },
[pbn_omegapci] = {
.flags = FL_BASE0,
.num_ports = 8,
@@ -3892,6 +3690,15 @@ static const struct pci_device_id blacklist[] = {
{ PCI_VDEVICE(INTEL, 0x081d), },
{ PCI_VDEVICE(INTEL, 0x1191), },
{ PCI_VDEVICE(INTEL, 0x19d8), },
+
+ /* Intel platforms with DesignWare UART */
+ { PCI_VDEVICE(INTEL, 0x0936), },
+ { PCI_VDEVICE(INTEL, 0x0f0a), },
+ { PCI_VDEVICE(INTEL, 0x0f0c), },
+ { PCI_VDEVICE(INTEL, 0x228a), },
+ { PCI_VDEVICE(INTEL, 0x228c), },
+ { PCI_VDEVICE(INTEL, 0x9ce3), },
+ { PCI_VDEVICE(INTEL, 0x9ce4), },
};
/*
@@ -5659,41 +5466,8 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100_UART,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_ce4100_1_115200 },
- /* Intel BayTrail */
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT_UART1,
- PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
- pbn_byt },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT_UART2,
- PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
- pbn_byt },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW_UART1,
- PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
- pbn_byt },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW_UART2,
- PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
- pbn_byt },
-
- /* Intel Broadwell */
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
- PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
- pbn_byt },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
- PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
- pbn_byt },
/*
- * Intel Quark x1000
- */
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_UART,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_qrk },
- /*
* Cronyx Omega PCI
*/
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA,
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index bdfa659b9606..1bfb6fdbaa20 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -178,7 +178,7 @@ static const struct serial8250_config uart_config[] = {
.fifo_size = 16,
.tx_loadsz = 16,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
- .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ .flags = UART_CAP_FIFO /* | UART_CAP_AFE */,
},
[PORT_U6_16550A] = {
.name = "U6_16550A",
@@ -585,11 +585,11 @@ EXPORT_SYMBOL_GPL(serial8250_rpm_put);
*/
int serial8250_em485_init(struct uart_8250_port *p)
{
- if (p->em485 != NULL)
+ if (p->em485)
return 0;
p->em485 = kmalloc(sizeof(struct uart_8250_em485), GFP_ATOMIC);
- if (p->em485 == NULL)
+ if (!p->em485)
return -ENOMEM;
setup_timer(&p->em485->stop_tx_timer,
@@ -619,7 +619,7 @@ EXPORT_SYMBOL_GPL(serial8250_em485_init);
*/
void serial8250_em485_destroy(struct uart_8250_port *p)
{
- if (p->em485 == NULL)
+ if (!p->em485)
return;
del_timer(&p->em485->start_tx_timer);
@@ -1402,10 +1402,8 @@ static void serial8250_stop_rx(struct uart_port *port)
static void __do_stop_tx_rs485(struct uart_8250_port *p)
{
- if (!p->em485)
- return;
-
serial8250_em485_rts_after_send(p);
+
/*
* Empty the RX FIFO, we are not interested in anything
* received during the half-duplex transmission.
@@ -1414,12 +1412,8 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
serial8250_clear_fifos(p);
- serial8250_rpm_get(p);
-
p->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_port_out(&p->port, UART_IER, p->ier);
-
- serial8250_rpm_put(p);
}
}
@@ -1429,6 +1423,7 @@ static void serial8250_em485_handle_stop_tx(unsigned long arg)
struct uart_8250_em485 *em485 = p->em485;
unsigned long flags;
+ serial8250_rpm_get(p);
spin_lock_irqsave(&p->port.lock, flags);
if (em485 &&
em485->active_timer == &em485->stop_tx_timer) {
@@ -1436,15 +1431,13 @@ static void serial8250_em485_handle_stop_tx(unsigned long arg)
em485->active_timer = NULL;
}
spin_unlock_irqrestore(&p->port.lock, flags);
+ serial8250_rpm_put(p);
}
static void __stop_tx_rs485(struct uart_8250_port *p)
{
struct uart_8250_em485 *em485 = p->em485;
- if (!em485)
- return;
-
/*
* __do_stop_tx_rs485 is going to set RTS according to config
* AND flush RX FIFO if required.
@@ -1475,7 +1468,7 @@ static inline void __stop_tx(struct uart_8250_port *p)
unsigned char lsr = serial_in(p, UART_LSR);
/*
* To provide required timeing and allow FIFO transfer,
- * __stop_tx_rs485 must be called only when both FIFO and
+ * __stop_tx_rs485() must be called only when both FIFO and
* shift register are empty. It is for device driver to enable
* interrupt on TEMT.
*/
@@ -1484,9 +1477,10 @@ static inline void __stop_tx(struct uart_8250_port *p)
del_timer(&em485->start_tx_timer);
em485->active_timer = NULL;
+
+ __stop_tx_rs485(p);
}
__do_stop_tx(p);
- __stop_tx_rs485(p);
}
static void serial8250_stop_tx(struct uart_port *port)
@@ -1876,6 +1870,30 @@ static int exar_handle_irq(struct uart_port *port)
return ret;
}
+/*
+ * Newer 16550 compatible parts such as the SC16C650 & Altera 16550 Soft IP
+ * have a programmable TX threshold that triggers the THRE interrupt in
+ * the IIR register. In this case, the THRE interrupt indicates the FIFO
+ * has space available. Load it up with tx_loadsz bytes.
+ */
+static int serial8250_tx_threshold_handle_irq(struct uart_port *port)
+{
+ unsigned long flags;
+ unsigned int iir = serial_port_in(port, UART_IIR);
+
+ /* TX Threshold IRQ triggered so load up FIFO */
+ if ((iir & UART_IIR_ID) == UART_IIR_THRI) {
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ spin_lock_irqsave(&port->lock, flags);
+ serial8250_tx_chars(up);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ iir = serial_port_in(port, UART_IIR);
+ return serial8250_handle_irq(port, iir);
+}
+
static unsigned int serial8250_tx_empty(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -1988,6 +2006,7 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
if (--tmout == 0)
break;
udelay(1);
+ touch_nmi_watchdog();
}
/* Wait up to 1s for flow control if necessary */
@@ -2164,6 +2183,25 @@ int serial8250_do_startup(struct uart_port *port)
serial_port_out(port, UART_LCR, 0);
}
+ /*
+ * For the Altera 16550 variants, set TX threshold trigger level.
+ */
+ if (((port->type == PORT_ALTR_16550_F32) ||
+ (port->type == PORT_ALTR_16550_F64) ||
+ (port->type == PORT_ALTR_16550_F128)) && (port->fifosize > 1)) {
+ /* Bounds checking of TX threshold (valid 0 to fifosize-2) */
+ if ((up->tx_loadsz < 2) || (up->tx_loadsz > port->fifosize)) {
+ pr_err("ttyS%d TX FIFO Threshold errors, skipping\n",
+ serial_index(port));
+ } else {
+ serial_port_out(port, UART_ALTR_AFR,
+ UART_ALTR_EN_TXFIFO_LW);
+ serial_port_out(port, UART_ALTR_TX_LOW,
+ port->fifosize - up->tx_loadsz);
+ port->handle_irq = serial8250_tx_threshold_handle_irq;
+ }
+ }
+
if (port->irq) {
unsigned char iir1;
/*
@@ -2499,8 +2537,6 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
- unsigned int tolerance = port->uartclk / 100;
-
/*
* Ask the core to calculate the divisor for us.
* Allow 1% tolerance at the upper limit so uart clks marginally
@@ -2509,7 +2545,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
*/
return uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / 0xffff,
- (port->uartclk + tolerance) / 16);
+ port->uartclk);
}
void
@@ -2546,12 +2582,9 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
/*
* MCR-based auto flow control. When AFE is enabled, RTS will be
* deasserted when the receive FIFO contains more characters than
- * the trigger, or the MCR RTS bit is cleared. In the case where
- * the remote UART is not using CTS auto flow control, we must
- * have sufficient FIFO entries for the latency of the remote
- * UART to respond. IOW, at least 32 bytes of FIFO.
+ * the trigger, or the MCR RTS bit is cleared.
*/
- if (up->capabilities & UART_CAP_AFE && port->fifosize >= 32) {
+ if (up->capabilities & UART_CAP_AFE) {
up->mcr &= ~UART_MCR_AFE;
if (termios->c_cflag & CRTSCTS)
up->mcr |= UART_MCR_AFE;
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 7c6f7afca5dd..899834776b36 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -120,7 +120,6 @@ config SERIAL_8250_PCI
tristate "8250/16550 PCI device support" if EXPERT
depends on SERIAL_8250 && PCI
default SERIAL_8250
- select RATIONAL
help
This builds standard PCI serial support. You may be able to
disable this feature if you only need legacy serial support.
@@ -402,6 +401,21 @@ config SERIAL_8250_INGENIC
If you have a system using an Ingenic SoC and wish to make use of
its UARTs, say Y to this option. If unsure, say N.
+config SERIAL_8250_LPSS
+ tristate "Support for serial ports on Intel LPSS platforms" if EXPERT
+ default SERIAL_8250
+ depends on SERIAL_8250 && PCI
+ depends on X86 || COMPILE_TEST
+ select DW_DMAC_CORE if SERIAL_8250_DMA
+ select DW_DMAC_PCI if (SERIAL_8250_DMA && X86_INTEL_LPSS)
+ select RATIONAL
+ help
+ Selecting this option will enable handling of the extra features
+ present on the UART found on various Intel platforms such as:
+ - Intel Baytrail SoC
+ - Intel Braswell SoC
+ - Intel Quark X1000 SoC
+
config SERIAL_8250_MID
tristate "Support for serial ports on Intel MID platforms" if EXPERT
default SERIAL_8250
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 367d403d28d5..276c6fb60337 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SERIAL_8250_LPC18XX) += 8250_lpc18xx.o
obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o
obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o
+obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
obj-$(CONFIG_SERIAL_8250_MOXA) += 8250_moxa.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 518db24a5b36..c7831407a882 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1380,7 +1380,7 @@ config SERIAL_IFX6X60
config SERIAL_PCH_UART
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART"
- depends on PCI && (X86_32 || COMPILE_TEST)
+ depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
select SERIAL_CORE
help
This driver is for PCH(Platform controller Hub) UART of Intel EG20T
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 32df2a0cb060..e409d7dac7ab 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -280,7 +280,7 @@ static int altera_jtaguart_verify_port(struct uart_port *port,
/*
* Define the basic serial functions we support.
*/
-static struct uart_ops altera_jtaguart_ops = {
+static const struct uart_ops altera_jtaguart_ops = {
.tx_empty = altera_jtaguart_tx_empty,
.get_mctrl = altera_jtaguart_get_mctrl,
.set_mctrl = altera_jtaguart_set_mctrl,
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 61b607f2488e..820a74208696 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -404,7 +404,7 @@ static void altera_uart_poll_put_char(struct uart_port *port, unsigned char c)
/*
* Define the basic serial functions we support.
*/
-static struct uart_ops altera_uart_ops = {
+static const struct uart_ops altera_uart_ops = {
.tx_empty = altera_uart_tx_empty,
.get_mctrl = altera_uart_get_mctrl,
.set_mctrl = altera_uart_set_mctrl,
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8a9e213387a7..e2c33b9528d8 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -93,6 +93,10 @@ static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
struct vendor_data {
const u16 *reg_offset;
unsigned int ifls;
+ unsigned int fr_busy;
+ unsigned int fr_dsr;
+ unsigned int fr_cts;
+ unsigned int fr_ri;
bool access_32b;
bool oversampling;
bool dma_threshold;
@@ -111,6 +115,10 @@ static unsigned int get_fifosize_arm(struct amba_device *dev)
static struct vendor_data vendor_arm = {
.reg_offset = pl011_std_offsets,
.ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
+ .fr_busy = UART01x_FR_BUSY,
+ .fr_dsr = UART01x_FR_DSR,
+ .fr_cts = UART01x_FR_CTS,
+ .fr_ri = UART011_FR_RI,
.oversampling = false,
.dma_threshold = false,
.cts_event_workaround = false,
@@ -121,6 +129,10 @@ static struct vendor_data vendor_arm = {
static struct vendor_data vendor_sbsa = {
.reg_offset = pl011_std_offsets,
+ .fr_busy = UART01x_FR_BUSY,
+ .fr_dsr = UART01x_FR_DSR,
+ .fr_cts = UART01x_FR_CTS,
+ .fr_ri = UART011_FR_RI,
.access_32b = true,
.oversampling = false,
.dma_threshold = false,
@@ -164,6 +176,10 @@ static unsigned int get_fifosize_st(struct amba_device *dev)
static struct vendor_data vendor_st = {
.reg_offset = pl011_st_offsets,
.ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
+ .fr_busy = UART01x_FR_BUSY,
+ .fr_dsr = UART01x_FR_DSR,
+ .fr_cts = UART01x_FR_CTS,
+ .fr_ri = UART011_FR_RI,
.oversampling = true,
.dma_threshold = true,
.cts_event_workaround = true,
@@ -188,11 +204,20 @@ static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
[REG_DMACR] = ZX_UART011_DMACR,
};
-static struct vendor_data vendor_zte __maybe_unused = {
+static unsigned int get_fifosize_zte(struct amba_device *dev)
+{
+ return 16;
+}
+
+static struct vendor_data vendor_zte = {
.reg_offset = pl011_zte_offsets,
.access_32b = true,
.ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
- .get_fifosize = get_fifosize_arm,
+ .fr_busy = ZX_UART01x_FR_BUSY,
+ .fr_dsr = ZX_UART01x_FR_DSR,
+ .fr_cts = ZX_UART01x_FR_CTS,
+ .fr_ri = ZX_UART011_FR_RI,
+ .get_fifosize = get_fifosize_zte,
};
/* Deals with DMA transactions */
@@ -1167,7 +1192,7 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
return;
/* Disable RX and TX DMA */
- while (pl011_read(uap, REG_FR) & UART01x_FR_BUSY)
+ while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
cpu_relax();
spin_lock_irq(&uap->port.lock);
@@ -1416,11 +1441,12 @@ static void pl011_modem_status(struct uart_amba_port *uap)
if (delta & UART01x_FR_DCD)
uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
- if (delta & UART01x_FR_DSR)
+ if (delta & uap->vendor->fr_dsr)
uap->port.icount.dsr++;
- if (delta & UART01x_FR_CTS)
- uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
+ if (delta & uap->vendor->fr_cts)
+ uart_handle_cts_change(&uap->port,
+ status & uap->vendor->fr_cts);
wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
}
@@ -1493,7 +1519,8 @@ static unsigned int pl011_tx_empty(struct uart_port *port)
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
unsigned int status = pl011_read(uap, REG_FR);
- return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
+ return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
+ 0 : TIOCSER_TEMT;
}
static unsigned int pl011_get_mctrl(struct uart_port *port)
@@ -1508,9 +1535,9 @@ static unsigned int pl011_get_mctrl(struct uart_port *port)
result |= tiocmbit
TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
- TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
- TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
- TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
+ TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
+ TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
+ TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
#undef TIOCMBIT
return result;
}
@@ -2191,7 +2218,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
* Finally, wait for transmitter to become empty
* and restore the TCR
*/
- while (pl011_read(uap, REG_FR) & UART01x_FR_BUSY)
+ while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
cpu_relax();
if (!uap->vendor->always_enabled)
pl011_write(old_cr, uap, REG_CR);
@@ -2555,7 +2582,8 @@ static int sbsa_uart_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
- dev_err(&pdev->dev, "cannot obtain irq\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "cannot obtain irq\n");
return ret;
}
uap->port.irq = ret;
@@ -2622,6 +2650,11 @@ static struct amba_id pl011_ids[] = {
.mask = 0x00ffffff,
.data = &vendor_st,
},
+ {
+ .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
+ .mask = 0x00ffffff,
+ .data = &vendor_zte,
+ },
{ 0, 0 },
};
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 3a1de5c87cb4..5ac06fcaa9c6 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -464,7 +464,7 @@ static int arc_serial_poll_getchar(struct uart_port *port)
}
#endif
-static struct uart_ops arc_serial_pops = {
+static const struct uart_ops arc_serial_pops = {
.tx_empty = arc_serial_tx_empty,
.set_mctrl = arc_serial_set_mctrl,
.get_mctrl = arc_serial_get_mctrl,
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 2eaa18ddef61..fd8aa1f4ba78 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,6 +166,7 @@ struct atmel_uart_port {
u32 rts_low;
bool ms_irq_enabled;
u32 rtor; /* address of receiver timeout register if it exists */
+ bool has_frac_baudrate;
bool has_hw_timer;
struct timer_list uart_timer;
@@ -1634,8 +1635,8 @@ static void atmel_init_property(struct atmel_uart_port *atmel_port,
if (np) {
/* DMA/PDC usage specification */
- if (of_get_property(np, "atmel,use-dma-rx", NULL)) {
- if (of_get_property(np, "dmas", NULL)) {
+ if (of_property_read_bool(np, "atmel,use-dma-rx")) {
+ if (of_property_read_bool(np, "dmas")) {
atmel_port->use_dma_rx = true;
atmel_port->use_pdc_rx = false;
} else {
@@ -1647,8 +1648,8 @@ static void atmel_init_property(struct atmel_uart_port *atmel_port,
atmel_port->use_pdc_rx = false;
}
- if (of_get_property(np, "atmel,use-dma-tx", NULL)) {
- if (of_get_property(np, "dmas", NULL)) {
+ if (of_property_read_bool(np, "atmel,use-dma-tx")) {
+ if (of_property_read_bool(np, "dmas")) {
atmel_port->use_dma_tx = true;
atmel_port->use_pdc_tx = false;
} else {
@@ -1745,6 +1746,11 @@ static void atmel_get_ip_name(struct uart_port *port)
dbgu_uart = 0x44424755; /* DBGU */
new_uart = 0x55415254; /* UART */
+ /*
+ * Only USART devices from at91sam9260 SOC implement fractional
+ * baudrate.
+ */
+ atmel_port->has_frac_baudrate = false;
atmel_port->has_hw_timer = false;
if (name == new_uart) {
@@ -1753,6 +1759,7 @@ static void atmel_get_ip_name(struct uart_port *port)
atmel_port->rtor = ATMEL_UA_RTOR;
} else if (name == usart) {
dev_dbg(port->dev, "Usart\n");
+ atmel_port->has_frac_baudrate = true;
atmel_port->has_hw_timer = true;
atmel_port->rtor = ATMEL_US_RTOR;
} else if (name == dbgu_uart) {
@@ -1764,6 +1771,7 @@ static void atmel_get_ip_name(struct uart_port *port)
case 0x302:
case 0x10213:
dev_dbg(port->dev, "This version is usart\n");
+ atmel_port->has_frac_baudrate = true;
atmel_port->has_hw_timer = true;
atmel_port->rtor = ATMEL_US_RTOR;
break;
@@ -1929,6 +1937,9 @@ static void atmel_shutdown(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ /* Disable modem control lines interrupts */
+ atmel_disable_ms(port);
+
/* Disable interrupts at device level */
atmel_uart_writel(port, ATMEL_US_IDR, -1);
@@ -1979,8 +1990,6 @@ static void atmel_shutdown(struct uart_port *port)
*/
free_irq(port->irq, port);
- atmel_port->ms_irq_enabled = false;
-
atmel_flush_buffer(port);
}
@@ -2025,8 +2034,9 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned long flags;
- unsigned int old_mode, mode, imr, quot, baud;
+ unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0;
/* save the current mode register */
mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
@@ -2036,12 +2046,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
ATMEL_US_PAR | ATMEL_US_USMODE);
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
- quot = uart_get_divisor(port, baud);
-
- if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */
- quot /= 8;
- mode |= ATMEL_US_USCLKS_MCK_DIV8;
- }
/* byte size */
switch (termios->c_cflag & CSIZE) {
@@ -2160,7 +2164,31 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
atmel_uart_writel(port, ATMEL_US_CR, rts_state);
}
- /* set the baud rate */
+ /*
+ * Set the baud rate:
+ * Fractional baudrate allows to setup output frequency more
+ * accurately. This feature is enabled only when using normal mode.
+ * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
+ * Currently, OVER is always set to 0 so we get
+ * baudrate = selected clock / (16 * (CD + FP / 8))
+ * then
+ * 8 CD + FP = selected clock / (2 * baudrate)
+ */
+ if (atmel_port->has_frac_baudrate &&
+ (mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_NORMAL) {
+ div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
+ cd = div >> 3;
+ fp = div & ATMEL_US_FP_MASK;
+ } else {
+ cd = uart_get_divisor(port, baud);
+ }
+
+ if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */
+ cd /= 8;
+ mode |= ATMEL_US_USCLKS_MCK_DIV8;
+ }
+ quot = cd | fp << ATMEL_US_FP_OFFSET;
+
atmel_uart_writel(port, ATMEL_US_BRGR, quot);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
@@ -2292,7 +2320,7 @@ static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
}
#endif
-static struct uart_ops atmel_pops = {
+static const struct uart_ops atmel_pops = {
.tx_empty = atmel_tx_empty,
.set_mctrl = atmel_set_mctrl,
.get_mctrl = atmel_get_mctrl,
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 5108fab953aa..583c9a0c7ecc 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -631,7 +631,7 @@ static int bcm_uart_verify_port(struct uart_port *port,
}
/* serial core callbacks */
-static struct uart_ops bcm_uart_ops = {
+static const struct uart_ops bcm_uart_ops = {
.tx_empty = bcm_uart_tx_empty,
.get_mctrl = bcm_uart_get_mctrl,
.set_mctrl = bcm_uart_set_mctrl,
diff --git a/drivers/tty/serial/earlycon-arm-semihost.c b/drivers/tty/serial/earlycon-arm-semihost.c
index 383db10fbb49..6bbeb699777c 100644
--- a/drivers/tty/serial/earlycon-arm-semihost.c
+++ b/drivers/tty/serial/earlycon-arm-semihost.c
@@ -53,7 +53,8 @@ static void smh_write(struct console *con, const char *s, unsigned n)
uart_console_write(&dev->port, s, n, smh_putc);
}
-int __init early_smh_setup(struct earlycon_device *device, const char *opt)
+static int
+__init early_smh_setup(struct earlycon_device *device, const char *opt)
{
device->con->write = smh_write;
return 0;
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 067783f0523c..c3651540e1ba 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -21,6 +21,7 @@
#include <linux/sizes.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/acpi.h>
#ifdef CONFIG_FIX_EARLYCON_MEM
#include <asm/fixmap.h>
@@ -38,7 +39,7 @@ static struct earlycon_device early_console_dev = {
.con = &early_con,
};
-static void __iomem * __init earlycon_map(unsigned long paddr, size_t size)
+static void __iomem * __init earlycon_map(resource_size_t paddr, size_t size)
{
void __iomem *base;
#ifdef CONFIG_FIX_EARLYCON_MEM
@@ -49,8 +50,7 @@ static void __iomem * __init earlycon_map(unsigned long paddr, size_t size)
base = ioremap(paddr, size);
#endif
if (!base)
- pr_err("%s: Couldn't map 0x%llx\n", __func__,
- (unsigned long long)paddr);
+ pr_err("%s: Couldn't map %pa\n", __func__, &paddr);
return base;
}
@@ -92,7 +92,7 @@ static int __init parse_options(struct earlycon_device *device, char *options)
{
struct uart_port *port = &device->port;
int length;
- unsigned long addr;
+ resource_size_t addr;
if (uart_parse_earlycon(options, &port->iotype, &addr, &options))
return -EINVAL;
@@ -199,6 +199,14 @@ int __init setup_earlycon(char *buf)
return -ENOENT;
}
+/*
+ * When CONFIG_ACPI_SPCR_TABLE is defined, "earlycon" without parameters in
+ * command line does not start DT earlycon immediately, instead it defers
+ * starting it until DT/ACPI decision is made. At that time if ACPI is enabled
+ * call parse_spcr(), else call early_init_dt_scan_chosen_stdout()
+ */
+bool earlycon_init_is_deferred __initdata;
+
/* early_param wrapper for setup_earlycon() */
static int __init param_setup_earlycon(char *buf)
{
@@ -208,8 +216,14 @@ static int __init param_setup_earlycon(char *buf)
* Just 'earlycon' is a valid param for devicetree earlycons;
* don't generate a warning from parse_early_params() in that case
*/
- if (!buf || !buf[0])
- return 0;
+ if (!buf || !buf[0]) {
+ if (IS_ENABLED(CONFIG_ACPI_SPCR_TABLE)) {
+ earlycon_init_is_deferred = true;
+ return 0;
+ } else {
+ return early_init_dt_scan_chosen_stdout();
+ }
+ }
err = setup_earlycon(buf);
if (err == -ENOENT || err == -EALREADY)
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 7f95f782a485..de9d5107c00a 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -224,7 +224,8 @@
#define UARTWATER_TXWATER_OFF 0
#define UARTWATER_RXWATER_OFF 16
-#define FSL_UART_RX_DMA_BUFFER_SIZE 64
+/* Rx DMA timeout in ms, which is used to calculate Rx ring buffer size */
+#define DMA_RX_TIMEOUT (10)
#define DRIVER_NAME "fsl-lpuart"
#define DEV_NAME "ttyLP"
@@ -243,18 +244,18 @@ struct lpuart_port {
struct dma_chan *dma_rx_chan;
struct dma_async_tx_descriptor *dma_tx_desc;
struct dma_async_tx_descriptor *dma_rx_desc;
- dma_addr_t dma_tx_buf_bus;
- dma_addr_t dma_rx_buf_bus;
dma_cookie_t dma_tx_cookie;
dma_cookie_t dma_rx_cookie;
- unsigned char *dma_tx_buf_virt;
- unsigned char *dma_rx_buf_virt;
unsigned int dma_tx_bytes;
unsigned int dma_rx_bytes;
- int dma_tx_in_progress;
- int dma_rx_in_progress;
+ bool dma_tx_in_progress;
unsigned int dma_rx_timeout;
struct timer_list lpuart_timer;
+ struct scatterlist rx_sgl, tx_sgl[2];
+ struct circ_buf rx_ring;
+ int rx_dma_rng_buf_len;
+ unsigned int dma_tx_nents;
+ wait_queue_head_t dma_wait;
};
static const struct of_device_id lpuart_dt_ids[] = {
@@ -270,7 +271,6 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
/* Forward declare this for the dma callbacks*/
static void lpuart_dma_tx_complete(void *arg);
-static void lpuart_dma_rx_complete(void *arg);
static u32 lpuart32_read(void __iomem *addr)
{
@@ -316,141 +316,103 @@ static void lpuart32_stop_rx(struct uart_port *port)
lpuart32_write(temp & ~UARTCTRL_RE, port->membase + UARTCTRL);
}
-static void lpuart_copy_rx_to_tty(struct lpuart_port *sport,
- struct tty_port *tty, int count)
+static void lpuart_dma_tx(struct lpuart_port *sport)
{
- int copied;
-
- sport->port.icount.rx += count;
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ struct scatterlist *sgl = sport->tx_sgl;
+ struct device *dev = sport->port.dev;
+ int ret;
- if (!tty) {
- dev_err(sport->port.dev, "No tty port\n");
+ if (sport->dma_tx_in_progress)
return;
- }
- dma_sync_single_for_cpu(sport->port.dev, sport->dma_rx_buf_bus,
- FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
- copied = tty_insert_flip_string(tty,
- ((unsigned char *)(sport->dma_rx_buf_virt)), count);
+ sport->dma_tx_bytes = uart_circ_chars_pending(xmit);
- if (copied != count) {
- WARN_ON(1);
- dev_err(sport->port.dev, "RxData copy to tty layer failed\n");
+ if (xmit->tail < xmit->head) {
+ sport->dma_tx_nents = 1;
+ sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes);
+ } else {
+ sport->dma_tx_nents = 2;
+ sg_init_table(sgl, 2);
+ sg_set_buf(sgl, xmit->buf + xmit->tail,
+ UART_XMIT_SIZE - xmit->tail);
+ sg_set_buf(sgl + 1, xmit->buf, xmit->head);
}
- dma_sync_single_for_device(sport->port.dev, sport->dma_rx_buf_bus,
- FSL_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
-}
-
-static void lpuart_pio_tx(struct lpuart_port *sport)
-{
- struct circ_buf *xmit = &sport->port.state->xmit;
- unsigned long flags;
-
- spin_lock_irqsave(&sport->port.lock, flags);
-
- while (!uart_circ_empty(xmit) &&
- readb(sport->port.membase + UARTTCFIFO) < sport->txfifo_size) {
- writeb(xmit->buf[xmit->tail], sport->port.membase + UARTDR);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- sport->port.icount.tx++;
+ ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ if (!ret) {
+ dev_err(dev, "DMA mapping error for TX.\n");
+ return;
}
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&sport->port);
-
- if (uart_circ_empty(xmit))
- writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_TDMAS,
- sport->port.membase + UARTCR5);
-
- spin_unlock_irqrestore(&sport->port.lock, flags);
-}
-
-static int lpuart_dma_tx(struct lpuart_port *sport, unsigned long count)
-{
- struct circ_buf *xmit = &sport->port.state->xmit;
- dma_addr_t tx_bus_addr;
-
- dma_sync_single_for_device(sport->port.dev, sport->dma_tx_buf_bus,
- UART_XMIT_SIZE, DMA_TO_DEVICE);
- sport->dma_tx_bytes = count & ~(sport->txfifo_size - 1);
- tx_bus_addr = sport->dma_tx_buf_bus + xmit->tail;
- sport->dma_tx_desc = dmaengine_prep_slave_single(sport->dma_tx_chan,
- tx_bus_addr, sport->dma_tx_bytes,
+ sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl,
+ sport->dma_tx_nents,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
-
if (!sport->dma_tx_desc) {
- dev_err(sport->port.dev, "Not able to get desc for tx\n");
- return -EIO;
+ dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ dev_err(dev, "Cannot prepare TX slave DMA!\n");
+ return;
}
sport->dma_tx_desc->callback = lpuart_dma_tx_complete;
sport->dma_tx_desc->callback_param = sport;
- sport->dma_tx_in_progress = 1;
+ sport->dma_tx_in_progress = true;
sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
dma_async_issue_pending(sport->dma_tx_chan);
- return 0;
-}
-
-static void lpuart_prepare_tx(struct lpuart_port *sport)
-{
- struct circ_buf *xmit = &sport->port.state->xmit;
- unsigned long count = CIRC_CNT_TO_END(xmit->head,
- xmit->tail, UART_XMIT_SIZE);
-
- if (!count)
- return;
-
- if (count < sport->txfifo_size)
- writeb(readb(sport->port.membase + UARTCR5) & ~UARTCR5_TDMAS,
- sport->port.membase + UARTCR5);
- else {
- writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_TDMAS,
- sport->port.membase + UARTCR5);
- lpuart_dma_tx(sport, count);
- }
}
static void lpuart_dma_tx_complete(void *arg)
{
struct lpuart_port *sport = arg;
+ struct scatterlist *sgl = &sport->tx_sgl[0];
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long flags;
- async_tx_ack(sport->dma_tx_desc);
-
spin_lock_irqsave(&sport->port.lock, flags);
+ dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+
xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1);
- sport->dma_tx_in_progress = 0;
+
+ sport->port.icount.tx += sport->dma_tx_bytes;
+ sport->dma_tx_in_progress = false;
+ spin_unlock_irqrestore(&sport->port.lock, flags);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
- lpuart_prepare_tx(sport);
+ if (waitqueue_active(&sport->dma_wait)) {
+ wake_up(&sport->dma_wait);
+ return;
+ }
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
+ lpuart_dma_tx(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
-static int lpuart_dma_rx(struct lpuart_port *sport)
+static int lpuart_dma_tx_request(struct uart_port *port)
{
- dma_sync_single_for_device(sport->port.dev, sport->dma_rx_buf_bus,
- FSL_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
- sport->dma_rx_desc = dmaengine_prep_slave_single(sport->dma_rx_chan,
- sport->dma_rx_buf_bus, FSL_UART_RX_DMA_BUFFER_SIZE,
- DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct dma_slave_config dma_tx_sconfig = {};
+ int ret;
- if (!sport->dma_rx_desc) {
- dev_err(sport->port.dev, "Not able to get desc for rx\n");
- return -EIO;
- }
+ dma_tx_sconfig.dst_addr = sport->port.mapbase + UARTDR;
+ dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_tx_sconfig.dst_maxburst = 1;
+ dma_tx_sconfig.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
- sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
- sport->dma_rx_desc->callback_param = sport;
- sport->dma_rx_in_progress = 1;
- sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
- dma_async_issue_pending(sport->dma_rx_chan);
+ if (ret) {
+ dev_err(sport->port.dev,
+ "DMA slave config failed, err = %d\n", ret);
+ return ret;
+ }
return 0;
}
@@ -458,75 +420,17 @@ static int lpuart_dma_rx(struct lpuart_port *sport)
static void lpuart_flush_buffer(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+
if (sport->lpuart_dma_tx_use) {
+ if (sport->dma_tx_in_progress) {
+ dma_unmap_sg(sport->port.dev, &sport->tx_sgl[0],
+ sport->dma_tx_nents, DMA_TO_DEVICE);
+ sport->dma_tx_in_progress = false;
+ }
dmaengine_terminate_all(sport->dma_tx_chan);
- sport->dma_tx_in_progress = 0;
}
}
-static void lpuart_dma_rx_complete(void *arg)
-{
- struct lpuart_port *sport = arg;
- struct tty_port *port = &sport->port.state->port;
- unsigned long flags;
-
- async_tx_ack(sport->dma_rx_desc);
- mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout);
-
- spin_lock_irqsave(&sport->port.lock, flags);
-
- sport->dma_rx_in_progress = 0;
- lpuart_copy_rx_to_tty(sport, port, FSL_UART_RX_DMA_BUFFER_SIZE);
- tty_flip_buffer_push(port);
- lpuart_dma_rx(sport);
-
- spin_unlock_irqrestore(&sport->port.lock, flags);
-}
-
-static void lpuart_timer_func(unsigned long data)
-{
- struct lpuart_port *sport = (struct lpuart_port *)data;
- struct tty_port *port = &sport->port.state->port;
- struct dma_tx_state state;
- unsigned long flags;
- unsigned char temp;
- int count;
-
- del_timer(&sport->lpuart_timer);
- dmaengine_pause(sport->dma_rx_chan);
- dmaengine_tx_status(sport->dma_rx_chan, sport->dma_rx_cookie, &state);
- dmaengine_terminate_all(sport->dma_rx_chan);
- count = FSL_UART_RX_DMA_BUFFER_SIZE - state.residue;
- async_tx_ack(sport->dma_rx_desc);
-
- spin_lock_irqsave(&sport->port.lock, flags);
-
- sport->dma_rx_in_progress = 0;
- lpuart_copy_rx_to_tty(sport, port, count);
- tty_flip_buffer_push(port);
- temp = readb(sport->port.membase + UARTCR5);
- writeb(temp & ~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
-
- spin_unlock_irqrestore(&sport->port.lock, flags);
-}
-
-static inline void lpuart_prepare_rx(struct lpuart_port *sport)
-{
- unsigned long flags;
- unsigned char temp;
-
- spin_lock_irqsave(&sport->port.lock, flags);
-
- sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
- add_timer(&sport->lpuart_timer);
-
- lpuart_dma_rx(sport);
- temp = readb(sport->port.membase + UARTCR5);
- writeb(temp | UARTCR5_RDMAS, sport->port.membase + UARTCR5);
-
- spin_unlock_irqrestore(&sport->port.lock, flags);
-}
-
static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
@@ -580,8 +484,8 @@ static void lpuart_start_tx(struct uart_port *port)
writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
if (sport->lpuart_dma_tx_use) {
- if (!uart_circ_empty(xmit) && !sport->dma_tx_in_progress)
- lpuart_prepare_tx(sport);
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(port))
+ lpuart_dma_tx(sport);
} else {
if (readb(port->membase + UARTSR1) & UARTSR1_TDRE)
lpuart_transmit_buffer(sport);
@@ -600,6 +504,29 @@ static void lpuart32_start_tx(struct uart_port *port)
lpuart32_transmit_buffer(sport);
}
+/* return TIOCSER_TEMT when transmitter is not busy */
+static unsigned int lpuart_tx_empty(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ unsigned char sr1 = readb(port->membase + UARTSR1);
+ unsigned char sfifo = readb(port->membase + UARTSFIFO);
+
+ if (sport->dma_tx_in_progress)
+ return 0;
+
+ if (sr1 & UARTSR1_TC && sfifo & UARTSFIFO_TXEMPT)
+ return TIOCSER_TEMT;
+
+ return 0;
+}
+
+static unsigned int lpuart32_tx_empty(struct uart_port *port)
+{
+ return (lpuart32_read(port->membase + UARTSTAT) & UARTSTAT_TC) ?
+ TIOCSER_TEMT : 0;
+}
+
static irqreturn_t lpuart_txint(int irq, void *dev_id)
{
struct lpuart_port *sport = dev_id;
@@ -766,23 +693,15 @@ out:
static irqreturn_t lpuart_int(int irq, void *dev_id)
{
struct lpuart_port *sport = dev_id;
- unsigned char sts, crdma;
+ unsigned char sts;
sts = readb(sport->port.membase + UARTSR1);
- crdma = readb(sport->port.membase + UARTCR5);
- if (sts & UARTSR1_RDRF && !(crdma & UARTCR5_RDMAS)) {
- if (sport->lpuart_dma_rx_use)
- lpuart_prepare_rx(sport);
- else
- lpuart_rxint(irq, dev_id);
- }
- if (sts & UARTSR1_TDRE && !(crdma & UARTCR5_TDMAS)) {
- if (sport->lpuart_dma_tx_use)
- lpuart_pio_tx(sport);
- else
- lpuart_txint(irq, dev_id);
- }
+ if (sts & UARTSR1_RDRF)
+ lpuart_rxint(irq, dev_id);
+
+ if (sts & UARTSR1_TDRE)
+ lpuart_txint(irq, dev_id);
return IRQ_HANDLED;
}
@@ -807,17 +726,241 @@ static irqreturn_t lpuart32_int(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* return TIOCSER_TEMT when transmitter is not busy */
-static unsigned int lpuart_tx_empty(struct uart_port *port)
+static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
{
- return (readb(port->membase + UARTSR1) & UARTSR1_TC) ?
- TIOCSER_TEMT : 0;
+ struct tty_port *port = &sport->port.state->port;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+ struct circ_buf *ring = &sport->rx_ring;
+ unsigned long flags;
+ int count = 0;
+ unsigned char sr;
+
+ sr = readb(sport->port.membase + UARTSR1);
+
+ if (sr & (UARTSR1_PE | UARTSR1_FE)) {
+ /* Read DR to clear the error flags */
+ readb(sport->port.membase + UARTDR);
+
+ if (sr & UARTSR1_PE)
+ sport->port.icount.parity++;
+ else if (sr & UARTSR1_FE)
+ sport->port.icount.frame++;
+ }
+
+ async_tx_ack(sport->dma_rx_desc);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ dmastat = dmaengine_tx_status(sport->dma_rx_chan,
+ sport->dma_rx_cookie,
+ &state);
+
+ if (dmastat == DMA_ERROR) {
+ dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ return;
+ }
+
+ /* CPU claims ownership of RX DMA buffer */
+ dma_sync_sg_for_cpu(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+
+ /*
+ * ring->head points to the end of data already written by the DMA.
+ * ring->tail points to the beginning of data to be read by the
+ * framework.
+ * The current transfer size should not be larger than the dma buffer
+ * length.
+ */
+ ring->head = sport->rx_sgl.length - state.residue;
+ BUG_ON(ring->head > sport->rx_sgl.length);
+ /*
+ * At this point ring->head may point to the first byte right after the
+ * last byte of the dma buffer:
+ * 0 <= ring->head <= sport->rx_sgl.length
+ *
+ * However ring->tail must always points inside the dma buffer:
+ * 0 <= ring->tail <= sport->rx_sgl.length - 1
+ *
+ * Since we use a ring buffer, we have to handle the case
+ * where head is lower than tail. In such a case, we first read from
+ * tail to the end of the buffer then reset tail.
+ */
+ if (ring->head < ring->tail) {
+ count = sport->rx_sgl.length - ring->tail;
+
+ tty_insert_flip_string(port, ring->buf + ring->tail, count);
+ ring->tail = 0;
+ sport->port.icount.rx += count;
+ }
+
+ /* Finally we read data from tail to head */
+ if (ring->tail < ring->head) {
+ count = ring->head - ring->tail;
+ tty_insert_flip_string(port, ring->buf + ring->tail, count);
+ /* Wrap ring->head if needed */
+ if (ring->head >= sport->rx_sgl.length)
+ ring->head = 0;
+ ring->tail = ring->head;
+ sport->port.icount.rx += count;
+ }
+
+ dma_sync_sg_for_device(sport->port.dev, &sport->rx_sgl, 1,
+ DMA_FROM_DEVICE);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ tty_flip_buffer_push(port);
+ mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout);
}
-static unsigned int lpuart32_tx_empty(struct uart_port *port)
+static void lpuart_dma_rx_complete(void *arg)
{
- return (lpuart32_read(port->membase + UARTSTAT) & UARTSTAT_TC) ?
- TIOCSER_TEMT : 0;
+ struct lpuart_port *sport = arg;
+
+ lpuart_copy_rx_to_tty(sport);
+}
+
+static void lpuart_timer_func(unsigned long data)
+{
+ struct lpuart_port *sport = (struct lpuart_port *)data;
+
+ lpuart_copy_rx_to_tty(sport);
+}
+
+static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
+{
+ struct dma_slave_config dma_rx_sconfig = {};
+ struct circ_buf *ring = &sport->rx_ring;
+ int ret, nent;
+ int bits, baud;
+ struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port);
+ struct ktermios *termios = &tty->termios;
+
+ baud = tty_get_baud_rate(tty);
+
+ bits = (termios->c_cflag & CSIZE) == CS7 ? 9 : 10;
+ if (termios->c_cflag & PARENB)
+ bits++;
+
+ /*
+ * Calculate length of one DMA buffer size to keep latency below
+ * 10ms at any baud rate.
+ */
+ sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud / bits / 1000) * 2;
+ sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1));
+ if (sport->rx_dma_rng_buf_len < 16)
+ sport->rx_dma_rng_buf_len = 16;
+
+ ring->buf = kmalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC);
+ if (!ring->buf) {
+ dev_err(sport->port.dev, "Ring buf alloc failed\n");
+ return -ENOMEM;
+ }
+
+ sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
+ sg_set_buf(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
+ nent = dma_map_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+
+ if (!nent) {
+ dev_err(sport->port.dev, "DMA Rx mapping error\n");
+ return -EINVAL;
+ }
+
+ dma_rx_sconfig.src_addr = sport->port.mapbase + UARTDR;
+ dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_rx_sconfig.src_maxburst = 1;
+ dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(sport->dma_rx_chan, &dma_rx_sconfig);
+
+ if (ret < 0) {
+ dev_err(sport->port.dev,
+ "DMA Rx slave config failed, err = %d\n", ret);
+ return ret;
+ }
+
+ sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan,
+ sg_dma_address(&sport->rx_sgl),
+ sport->rx_sgl.length,
+ sport->rx_sgl.length / 2,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!sport->dma_rx_desc) {
+ dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n");
+ return -EFAULT;
+ }
+
+ sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
+ sport->dma_rx_desc->callback_param = sport;
+ sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
+ dma_async_issue_pending(sport->dma_rx_chan);
+
+ writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS,
+ sport->port.membase + UARTCR5);
+
+ return 0;
+}
+
+static void lpuart_dma_rx_free(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+
+ if (sport->dma_rx_chan)
+ dmaengine_terminate_all(sport->dma_rx_chan);
+
+ dma_unmap_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ kfree(sport->rx_ring.buf);
+ sport->rx_ring.tail = 0;
+ sport->rx_ring.head = 0;
+ sport->dma_rx_desc = NULL;
+ sport->dma_rx_cookie = -EINVAL;
+}
+
+static int lpuart_config_rs485(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+
+ u8 modem = readb(sport->port.membase + UARTMODEM) &
+ ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
+ writeb(modem, sport->port.membase + UARTMODEM);
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ /* Enable auto RS-485 RTS mode */
+ modem |= UARTMODEM_TXRTSE;
+
+ /*
+ * RTS needs to be logic HIGH either during transer _or_ after
+ * transfer, other variants are not supported by the hardware.
+ */
+
+ if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND)))
+ rs485->flags |= SER_RS485_RTS_ON_SEND;
+
+ if (rs485->flags & SER_RS485_RTS_ON_SEND &&
+ rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+
+ /*
+ * The hardware defaults to RTS logic HIGH while transfer.
+ * Switch polarity in case RTS shall be logic HIGH
+ * after transfer.
+ * Note: UART is assumed to be active high.
+ */
+ if (rs485->flags & SER_RS485_RTS_ON_SEND)
+ modem &= ~UARTMODEM_TXRTSPOL;
+ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ modem |= UARTMODEM_TXRTSPOL;
+ }
+
+ /* Store the new configuration */
+ sport->port.rs485 = *rs485;
+
+ writeb(modem, sport->port.membase + UARTMODEM);
+ return 0;
}
static unsigned int lpuart_get_mctrl(struct uart_port *port)
@@ -853,17 +996,22 @@ static unsigned int lpuart32_get_mctrl(struct uart_port *port)
static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned char temp;
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
- temp = readb(port->membase + UARTMODEM) &
+ /* Make sure RXRTSE bit is not set when RS485 is enabled */
+ if (!(sport->port.rs485.flags & SER_RS485_ENABLED)) {
+ temp = readb(sport->port.membase + UARTMODEM) &
~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
- if (mctrl & TIOCM_RTS)
- temp |= UARTMODEM_RXRTSE;
+ if (mctrl & TIOCM_RTS)
+ temp |= UARTMODEM_RXRTSE;
- if (mctrl & TIOCM_CTS)
- temp |= UARTMODEM_TXCTSE;
+ if (mctrl & TIOCM_CTS)
+ temp |= UARTMODEM_TXCTSE;
- writeb(temp, port->membase + UARTMODEM);
+ writeb(temp, port->membase + UARTMODEM);
+ }
}
static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -921,13 +1069,16 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
sport->port.membase + UARTPFIFO);
- /* explicitly clear RDRF */
- readb(sport->port.membase + UARTSR1);
-
/* flush Tx and Rx FIFO */
writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
sport->port.membase + UARTCFIFO);
+ /* explicitly clear RDRF */
+ if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) {
+ readb(sport->port.membase + UARTDR);
+ writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
+ }
+
writeb(0, sport->port.membase + UARTTWFIFO);
writeb(1, sport->port.membase + UARTRWFIFO);
@@ -960,110 +1111,12 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
lpuart32_write(ctrl_saved, sport->port.membase + UARTCTRL);
}
-static int lpuart_dma_tx_request(struct uart_port *port)
-{
- struct lpuart_port *sport = container_of(port,
- struct lpuart_port, port);
- struct dma_slave_config dma_tx_sconfig;
- dma_addr_t dma_bus;
- unsigned char *dma_buf;
- int ret;
-
- dma_bus = dma_map_single(sport->dma_tx_chan->device->dev,
- sport->port.state->xmit.buf,
- UART_XMIT_SIZE, DMA_TO_DEVICE);
-
- if (dma_mapping_error(sport->dma_tx_chan->device->dev, dma_bus)) {
- dev_err(sport->port.dev, "dma_map_single tx failed\n");
- return -ENOMEM;
- }
-
- dma_buf = sport->port.state->xmit.buf;
- dma_tx_sconfig.dst_addr = sport->port.mapbase + UARTDR;
- dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- dma_tx_sconfig.dst_maxburst = sport->txfifo_size;
- dma_tx_sconfig.direction = DMA_MEM_TO_DEV;
- ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
-
- if (ret < 0) {
- dev_err(sport->port.dev,
- "Dma slave config failed, err = %d\n", ret);
- return ret;
- }
-
- sport->dma_tx_buf_virt = dma_buf;
- sport->dma_tx_buf_bus = dma_bus;
- sport->dma_tx_in_progress = 0;
-
- return 0;
-}
-
-static int lpuart_dma_rx_request(struct uart_port *port)
+static void rx_dma_timer_init(struct lpuart_port *sport)
{
- struct lpuart_port *sport = container_of(port,
- struct lpuart_port, port);
- struct dma_slave_config dma_rx_sconfig;
- dma_addr_t dma_bus;
- unsigned char *dma_buf;
- int ret;
-
- dma_buf = devm_kzalloc(sport->port.dev,
- FSL_UART_RX_DMA_BUFFER_SIZE, GFP_KERNEL);
-
- if (!dma_buf) {
- dev_err(sport->port.dev, "Dma rx alloc failed\n");
- return -ENOMEM;
- }
-
- dma_bus = dma_map_single(sport->dma_rx_chan->device->dev, dma_buf,
- FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
-
- if (dma_mapping_error(sport->dma_rx_chan->device->dev, dma_bus)) {
- dev_err(sport->port.dev, "dma_map_single rx failed\n");
- return -ENOMEM;
- }
-
- dma_rx_sconfig.src_addr = sport->port.mapbase + UARTDR;
- dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- dma_rx_sconfig.src_maxburst = 1;
- dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
- ret = dmaengine_slave_config(sport->dma_rx_chan, &dma_rx_sconfig);
-
- if (ret < 0) {
- dev_err(sport->port.dev,
- "Dma slave config failed, err = %d\n", ret);
- return ret;
- }
-
- sport->dma_rx_buf_virt = dma_buf;
- sport->dma_rx_buf_bus = dma_bus;
- sport->dma_rx_in_progress = 0;
-
- return 0;
-}
-
-static void lpuart_dma_tx_free(struct uart_port *port)
-{
- struct lpuart_port *sport = container_of(port,
- struct lpuart_port, port);
-
- dma_unmap_single(sport->port.dev, sport->dma_tx_buf_bus,
- UART_XMIT_SIZE, DMA_TO_DEVICE);
-
- sport->dma_tx_buf_bus = 0;
- sport->dma_tx_buf_virt = NULL;
-}
-
-static void lpuart_dma_rx_free(struct uart_port *port)
-{
- struct lpuart_port *sport = container_of(port,
- struct lpuart_port, port);
-
- dma_unmap_single(sport->port.dev, sport->dma_rx_buf_bus,
- FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
-
- sport->dma_rx_buf_bus = 0;
- sport->dma_rx_buf_virt = NULL;
+ setup_timer(&sport->lpuart_timer, lpuart_timer_func,
+ (unsigned long)sport);
+ sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
+ add_timer(&sport->lpuart_timer);
}
static int lpuart_startup(struct uart_port *port)
@@ -1084,22 +1137,6 @@ static int lpuart_startup(struct uart_port *port)
sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK) + 1);
- if (sport->dma_rx_chan && !lpuart_dma_rx_request(port)) {
- sport->lpuart_dma_rx_use = true;
- setup_timer(&sport->lpuart_timer, lpuart_timer_func,
- (unsigned long)sport);
- } else
- sport->lpuart_dma_rx_use = false;
-
-
- if (sport->dma_tx_chan && !lpuart_dma_tx_request(port)) {
- sport->lpuart_dma_tx_use = true;
- temp = readb(port->membase + UARTCR5);
- temp &= ~UARTCR5_RDMAS;
- writeb(temp | UARTCR5_TDMAS, port->membase + UARTCR5);
- } else
- sport->lpuart_dma_tx_use = false;
-
ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0,
DRIVER_NAME, sport);
if (ret)
@@ -1113,7 +1150,29 @@ static int lpuart_startup(struct uart_port *port)
temp |= (UARTCR2_RIE | UARTCR2_TIE | UARTCR2_RE | UARTCR2_TE);
writeb(temp, sport->port.membase + UARTCR2);
+ if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) {
+ /* set Rx DMA timeout */
+ sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT);
+ if (!sport->dma_rx_timeout)
+ sport->dma_rx_timeout = 1;
+
+ sport->lpuart_dma_rx_use = true;
+ rx_dma_timer_init(sport);
+ } else {
+ sport->lpuart_dma_rx_use = false;
+ }
+
+ if (sport->dma_tx_chan && !lpuart_dma_tx_request(port)) {
+ init_waitqueue_head(&sport->dma_wait);
+ sport->lpuart_dma_tx_use = true;
+ temp = readb(port->membase + UARTCR5);
+ writeb(temp | UARTCR5_TDMAS, port->membase + UARTCR5);
+ } else {
+ sport->lpuart_dma_tx_use = false;
+ }
+
spin_unlock_irqrestore(&sport->port.lock, flags);
+
return 0;
}
@@ -1170,12 +1229,19 @@ static void lpuart_shutdown(struct uart_port *port)
devm_free_irq(port->dev, port->irq, sport);
if (sport->lpuart_dma_rx_use) {
- lpuart_dma_rx_free(&sport->port);
del_timer_sync(&sport->lpuart_timer);
+ lpuart_dma_rx_free(&sport->port);
}
- if (sport->lpuart_dma_tx_use)
- lpuart_dma_tx_free(&sport->port);
+ if (sport->lpuart_dma_tx_use) {
+ if (wait_event_interruptible(sport->dma_wait,
+ !sport->dma_tx_in_progress) != false) {
+ sport->dma_tx_in_progress = false;
+ dmaengine_terminate_all(sport->dma_tx_chan);
+ }
+
+ lpuart_stop_tx(port);
+ }
}
static void lpuart32_shutdown(struct uart_port *port)
@@ -1203,13 +1269,14 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
- unsigned char cr1, old_cr1, old_cr2, cr4, bdh, modem;
+ unsigned char cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem;
unsigned int baud;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
unsigned int sbr, brfa;
cr1 = old_cr1 = readb(sport->port.membase + UARTCR1);
old_cr2 = readb(sport->port.membase + UARTCR2);
+ cr3 = readb(sport->port.membase + UARTCR3);
cr4 = readb(sport->port.membase + UARTCR4);
bdh = readb(sport->port.membase + UARTBDH);
modem = readb(sport->port.membase + UARTMODEM);
@@ -1240,6 +1307,13 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
cr1 |= UARTCR1_M;
}
+ /*
+ * When auto RS-485 RTS mode is enabled,
+ * hardware flow control need to be disabled.
+ */
+ if (sport->port.rs485.flags & SER_RS485_ENABLED)
+ termios->c_cflag &= ~CRTSCTS;
+
if (termios->c_cflag & CRTSCTS) {
modem |= (UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
} else {
@@ -1257,7 +1331,10 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
if ((termios->c_cflag & PARENB)) {
if (termios->c_cflag & CMSPAR) {
cr1 &= ~UARTCR1_PE;
- cr1 |= UARTCR1_M;
+ if (termios->c_cflag & PARODD)
+ cr3 |= UARTCR3_T8;
+ else
+ cr3 &= ~UARTCR3_T8;
} else {
cr1 |= UARTCR1_PE;
if ((termios->c_cflag & CSIZE) == CS8)
@@ -1297,17 +1374,6 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
- if (sport->lpuart_dma_rx_use) {
- /* Calculate delay for 1.5 DMA buffers */
- sport->dma_rx_timeout = (sport->port.timeout - HZ / 50) *
- FSL_UART_RX_DMA_BUFFER_SIZE * 3 /
- sport->rxfifo_size / 2;
- dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n",
- sport->dma_rx_timeout * 1000 / HZ, sport->port.timeout);
- if (sport->dma_rx_timeout < msecs_to_jiffies(20))
- sport->dma_rx_timeout = msecs_to_jiffies(20);
- }
-
/* wait transmit engin complete */
while (!(readb(sport->port.membase + UARTSR1) & UARTSR1_TC))
barrier();
@@ -1325,12 +1391,31 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
writeb(cr4 | brfa, sport->port.membase + UARTCR4);
writeb(bdh, sport->port.membase + UARTBDH);
writeb(sbr & 0xFF, sport->port.membase + UARTBDL);
+ writeb(cr3, sport->port.membase + UARTCR3);
writeb(cr1, sport->port.membase + UARTCR1);
writeb(modem, sport->port.membase + UARTMODEM);
/* restore control register */
writeb(old_cr2, sport->port.membase + UARTCR2);
+ /*
+ * If new baud rate is set, we will also need to update the Ring buffer
+ * length according to the selected baud rate and restart Rx DMA path.
+ */
+ if (old) {
+ if (sport->lpuart_dma_rx_use) {
+ del_timer_sync(&sport->lpuart_timer);
+ lpuart_dma_rx_free(&sport->port);
+ }
+
+ if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) {
+ sport->lpuart_dma_rx_use = true;
+ rx_dma_timer_init(sport);
+ } else {
+ sport->lpuart_dma_rx_use = false;
+ }
+ }
+
spin_unlock_irqrestore(&sport->port.lock, flags);
}
@@ -1494,7 +1579,7 @@ static int lpuart_verify_port(struct uart_port *port, struct serial_struct *ser)
return ret;
}
-static struct uart_ops lpuart_pops = {
+static const struct uart_ops lpuart_pops = {
.tx_empty = lpuart_tx_empty,
.set_mctrl = lpuart_set_mctrl,
.get_mctrl = lpuart_get_mctrl,
@@ -1513,7 +1598,7 @@ static struct uart_ops lpuart_pops = {
.flush_buffer = lpuart_flush_buffer,
};
-static struct uart_ops lpuart32_pops = {
+static const struct uart_ops lpuart32_pops = {
.tx_empty = lpuart32_tx_empty,
.set_mctrl = lpuart32_set_mctrl,
.get_mctrl = lpuart32_get_mctrl,
@@ -1843,6 +1928,8 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.ops = &lpuart_pops;
sport->port.flags = UPF_BOOT_AUTOCONF;
+ sport->port.rs485_config = lpuart_config_rs485;
+
sport->clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->clk)) {
ret = PTR_ERR(sport->clk);
@@ -1883,6 +1970,12 @@ static int lpuart_probe(struct platform_device *pdev)
dev_info(sport->port.dev, "DMA rx channel request failed, "
"operating without rx DMA\n");
+ if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time")) {
+ sport->port.rs485.flags |= SER_RS485_ENABLED;
+ sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
+ writeb(UARTMODEM_TXRTSE, sport->port.membase + UARTMODEM);
+ }
+
return 0;
}
@@ -1923,6 +2016,32 @@ static int lpuart_suspend(struct device *dev)
uart_suspend_port(&lpuart_reg, &sport->port);
+ if (sport->lpuart_dma_rx_use) {
+ /*
+ * EDMA driver during suspend will forcefully release any
+ * non-idle DMA channels. If port wakeup is enabled or if port
+ * is console port or 'no_console_suspend' is set the Rx DMA
+ * cannot resume as as expected, hence gracefully release the
+ * Rx DMA path before suspend and start Rx DMA path on resume.
+ */
+ if (sport->port.irq_wake) {
+ del_timer_sync(&sport->lpuart_timer);
+ lpuart_dma_rx_free(&sport->port);
+ }
+
+ /* Disable Rx DMA to use UART port as wakeup source */
+ writeb(readb(sport->port.membase + UARTCR5) & ~UARTCR5_RDMAS,
+ sport->port.membase + UARTCR5);
+ }
+
+ if (sport->lpuart_dma_tx_use) {
+ sport->dma_tx_in_progress = false;
+ dmaengine_terminate_all(sport->dma_tx_chan);
+ }
+
+ if (sport->port.suspended && !sport->port.irq_wake)
+ clk_disable_unprepare(sport->clk);
+
return 0;
}
@@ -1931,6 +2050,9 @@ static int lpuart_resume(struct device *dev)
struct lpuart_port *sport = dev_get_drvdata(dev);
unsigned long temp;
+ if (sport->port.suspended && !sport->port.irq_wake)
+ clk_prepare_enable(sport->clk);
+
if (sport->lpuart32) {
lpuart32_setup_watermark(sport);
temp = lpuart32_read(sport->port.membase + UARTCTRL);
@@ -1944,6 +2066,26 @@ static int lpuart_resume(struct device *dev)
writeb(temp, sport->port.membase + UARTCR2);
}
+ if (sport->lpuart_dma_rx_use) {
+ if (sport->port.irq_wake) {
+ if (!lpuart_start_rx_dma(sport)) {
+ sport->lpuart_dma_rx_use = true;
+ rx_dma_timer_init(sport);
+ } else {
+ sport->lpuart_dma_rx_use = false;
+ }
+ }
+ }
+
+ if (sport->dma_tx_chan && !lpuart_dma_tx_request(&sport->port)) {
+ init_waitqueue_head(&sport->dma_wait);
+ sport->lpuart_dma_tx_use = true;
+ writeb(readb(sport->port.membase + UARTCR5) |
+ UARTCR5_TDMAS, sport->port.membase + UARTCR5);
+ } else {
+ sport->lpuart_dma_tx_use = false;
+ }
+
uart_resume_port(&lpuart_reg, &sport->port);
return 0;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 0df2b1c091ae..a70356dad1b7 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -190,6 +190,7 @@
enum imx_uart_type {
IMX1_UART,
IMX21_UART,
+ IMX53_UART,
IMX6Q_UART,
};
@@ -222,6 +223,9 @@ struct imx_port {
struct dma_chan *dma_chan_rx, *dma_chan_tx;
struct scatterlist rx_sgl, tx_sgl[2];
void *rx_buf;
+ struct circ_buf rx_ring;
+ unsigned int rx_periods;
+ dma_cookie_t rx_cookie;
unsigned int tx_bytes;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
@@ -244,6 +248,10 @@ static struct imx_uart_data imx_uart_devdata[] = {
.uts_reg = IMX21_UTS,
.devtype = IMX21_UART,
},
+ [IMX53_UART] = {
+ .uts_reg = IMX21_UTS,
+ .devtype = IMX53_UART,
+ },
[IMX6Q_UART] = {
.uts_reg = IMX21_UTS,
.devtype = IMX6Q_UART,
@@ -258,6 +266,9 @@ static const struct platform_device_id imx_uart_devtype[] = {
.name = "imx21-uart",
.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
}, {
+ .name = "imx53-uart",
+ .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX53_UART],
+ }, {
.name = "imx6q-uart",
.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART],
}, {
@@ -268,6 +279,7 @@ MODULE_DEVICE_TABLE(platform, imx_uart_devtype);
static const struct of_device_id imx_uart_dt_ids[] = {
{ .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], },
+ { .compatible = "fsl,imx53-uart", .data = &imx_uart_devdata[IMX53_UART], },
{ .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
{ .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
{ /* sentinel */ }
@@ -289,6 +301,11 @@ static inline int is_imx21_uart(struct imx_port *sport)
return sport->devdata->devtype == IMX21_UART;
}
+static inline int is_imx53_uart(struct imx_port *sport)
+{
+ return sport->devdata->devtype == IMX53_UART;
+}
+
static inline int is_imx6q_uart(struct imx_port *sport)
{
return sport->devdata->devtype == IMX6Q_UART;
@@ -701,6 +718,7 @@ out:
return IRQ_HANDLED;
}
+static void clear_rx_errors(struct imx_port *sport);
static int start_rx_dma(struct imx_port *sport);
/*
* If the RXFIFO is filled with some data, and then we
@@ -726,6 +744,11 @@ static void imx_dma_rxint(struct imx_port *sport)
temp &= ~(UCR2_ATEN);
writel(temp, sport->port.membase + UCR2);
+ /* disable the rx errors interrupts */
+ temp = readl(sport->port.membase + UCR4);
+ temp &= ~UCR4_OREN;
+ writel(temp, sport->port.membase + UCR4);
+
/* tell the DMA to receive the data. */
start_rx_dma(sport);
}
@@ -740,12 +763,13 @@ static unsigned int imx_get_hwmctrl(struct imx_port *sport)
{
unsigned int tmp = TIOCM_DSR;
unsigned usr1 = readl(sport->port.membase + USR1);
+ unsigned usr2 = readl(sport->port.membase + USR2);
if (usr1 & USR1_RTSS)
tmp |= TIOCM_CTS;
/* in DCE mode DCDIN is always 0 */
- if (!(usr1 & USR2_DCDIN))
+ if (!(usr2 & USR2_DCDIN))
tmp |= TIOCM_CAR;
if (sport->dte_mode)
@@ -932,30 +956,6 @@ static void imx_timeout(unsigned long data)
}
#define RX_BUF_SIZE (PAGE_SIZE)
-static void imx_rx_dma_done(struct imx_port *sport)
-{
- unsigned long temp;
- unsigned long flags;
-
- spin_lock_irqsave(&sport->port.lock, flags);
-
- /* re-enable interrupts to get notified when new symbols are incoming */
- temp = readl(sport->port.membase + UCR1);
- temp |= UCR1_RRDYEN;
- writel(temp, sport->port.membase + UCR1);
-
- temp = readl(sport->port.membase + UCR2);
- temp |= UCR2_ATEN;
- writel(temp, sport->port.membase + UCR2);
-
- sport->dma_is_rxing = 0;
-
- /* Is the shutdown waiting for us? */
- if (waitqueue_active(&sport->dma_wait))
- wake_up(&sport->dma_wait);
-
- spin_unlock_irqrestore(&sport->port.lock, flags);
-}
/*
* There are two kinds of RX DMA interrupts(such as in the MX6Q):
@@ -972,43 +972,76 @@ static void dma_rx_callback(void *data)
struct scatterlist *sgl = &sport->rx_sgl;
struct tty_port *port = &sport->port.state->port;
struct dma_tx_state state;
+ struct circ_buf *rx_ring = &sport->rx_ring;
enum dma_status status;
- unsigned int count;
-
- /* unmap it first */
- dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
+ unsigned int w_bytes = 0;
+ unsigned int r_bytes;
+ unsigned int bd_size;
status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
- count = RX_BUF_SIZE - state.residue;
- dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
+ if (status == DMA_ERROR) {
+ dev_err(sport->port.dev, "DMA transaction error.\n");
+ clear_rx_errors(sport);
+ return;
+ }
+
+ if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
+
+ /*
+ * The state-residue variable represents the empty space
+ * relative to the entire buffer. Taking this in consideration
+ * the head is always calculated base on the buffer total
+ * length - DMA transaction residue. The UART script from the
+ * SDMA firmware will jump to the next buffer descriptor,
+ * once a DMA transaction if finalized (IMX53 RM - A.4.1.2.4).
+ * Taking this in consideration the tail is always at the
+ * beginning of the buffer descriptor that contains the head.
+ */
+
+ /* Calculate the head */
+ rx_ring->head = sg_dma_len(sgl) - state.residue;
+
+ /* Calculate the tail. */
+ bd_size = sg_dma_len(sgl) / sport->rx_periods;
+ rx_ring->tail = ((rx_ring->head-1) / bd_size) * bd_size;
+
+ if (rx_ring->head <= sg_dma_len(sgl) &&
+ rx_ring->head > rx_ring->tail) {
+
+ /* Move data from tail to head */
+ r_bytes = rx_ring->head - rx_ring->tail;
- if (count) {
- if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
- int bytes = tty_insert_flip_string(port, sport->rx_buf,
- count);
+ /* CPU claims ownership of RX DMA buffer */
+ dma_sync_sg_for_cpu(sport->port.dev, sgl, 1,
+ DMA_FROM_DEVICE);
- if (bytes != count)
+ w_bytes = tty_insert_flip_string(port,
+ sport->rx_buf + rx_ring->tail, r_bytes);
+
+ /* UART retrieves ownership of RX DMA buffer */
+ dma_sync_sg_for_device(sport->port.dev, sgl, 1,
+ DMA_FROM_DEVICE);
+
+ if (w_bytes != r_bytes)
sport->port.icount.buf_overrun++;
+
+ sport->port.icount.rx += w_bytes;
+ } else {
+ WARN_ON(rx_ring->head > sg_dma_len(sgl));
+ WARN_ON(rx_ring->head <= rx_ring->tail);
}
- tty_flip_buffer_push(port);
- sport->port.icount.rx += count;
}
- /*
- * Restart RX DMA directly if more data is available in order to skip
- * the roundtrip through the IRQ handler. If there is some data already
- * in the FIFO, DMA needs to be restarted soon anyways.
- *
- * Otherwise stop the DMA and reactivate FIFO IRQs to restart DMA once
- * data starts to arrive again.
- */
- if (readl(sport->port.membase + USR2) & USR2_RDR)
- start_rx_dma(sport);
- else
- imx_rx_dma_done(sport);
+ if (w_bytes) {
+ tty_flip_buffer_push(port);
+ dev_dbg(sport->port.dev, "We get %d bytes.\n", w_bytes);
+ }
}
+/* RX DMA buffer periods */
+#define RX_DMA_PERIODS 4
+
static int start_rx_dma(struct imx_port *sport)
{
struct scatterlist *sgl = &sport->rx_sgl;
@@ -1017,14 +1050,21 @@ static int start_rx_dma(struct imx_port *sport)
struct dma_async_tx_descriptor *desc;
int ret;
+ sport->rx_ring.head = 0;
+ sport->rx_ring.tail = 0;
+ sport->rx_periods = RX_DMA_PERIODS;
+
sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
if (ret == 0) {
dev_err(dev, "DMA mapping error for RX.\n");
return -EINVAL;
}
- desc = dmaengine_prep_slave_sg(chan, sgl, 1, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT);
+
+ desc = dmaengine_prep_dma_cyclic(chan, sg_dma_address(sgl),
+ sg_dma_len(sgl), sg_dma_len(sgl) / sport->rx_periods,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+
if (!desc) {
dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE);
dev_err(dev, "We cannot prepare for the RX slave dma!\n");
@@ -1034,11 +1074,36 @@ static int start_rx_dma(struct imx_port *sport)
desc->callback_param = sport;
dev_dbg(dev, "RX: prepare for the DMA.\n");
- dmaengine_submit(desc);
+ sport->rx_cookie = dmaengine_submit(desc);
dma_async_issue_pending(chan);
return 0;
}
+static void clear_rx_errors(struct imx_port *sport)
+{
+ unsigned int status_usr1, status_usr2;
+
+ status_usr1 = readl(sport->port.membase + USR1);
+ status_usr2 = readl(sport->port.membase + USR2);
+
+ if (status_usr2 & USR2_BRCD) {
+ sport->port.icount.brk++;
+ writel(USR2_BRCD, sport->port.membase + USR2);
+ } else if (status_usr1 & USR1_FRAMERR) {
+ sport->port.icount.frame++;
+ writel(USR1_FRAMERR, sport->port.membase + USR1);
+ } else if (status_usr1 & USR1_PARITYERR) {
+ sport->port.icount.parity++;
+ writel(USR1_PARITYERR, sport->port.membase + USR1);
+ }
+
+ if (status_usr2 & USR2_ORE) {
+ sport->port.icount.overrun++;
+ writel(USR2_ORE, sport->port.membase + USR2);
+ }
+
+}
+
#define TXTL_DEFAULT 2 /* reset default */
#define RXTL_DEFAULT 1 /* reset default */
#define TXTL_DMA 8 /* DMA burst setting */
@@ -1058,14 +1123,16 @@ static void imx_setup_ufcr(struct imx_port *sport,
static void imx_uart_dma_exit(struct imx_port *sport)
{
if (sport->dma_chan_rx) {
+ dmaengine_terminate_sync(sport->dma_chan_rx);
dma_release_channel(sport->dma_chan_rx);
sport->dma_chan_rx = NULL;
-
+ sport->rx_cookie = -EINVAL;
kfree(sport->rx_buf);
sport->rx_buf = NULL;
}
if (sport->dma_chan_tx) {
+ dmaengine_terminate_sync(sport->dma_chan_tx);
dma_release_channel(sport->dma_chan_tx);
sport->dma_chan_tx = NULL;
}
@@ -1103,6 +1170,7 @@ static int imx_uart_dma_init(struct imx_port *sport)
ret = -ENOMEM;
goto err;
}
+ sport->rx_ring.buf = sport->rx_buf;
/* Prepare for TX : */
sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
@@ -1201,8 +1269,7 @@ static int imx_startup(struct uart_port *port)
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
/* Can we enable the DMA support? */
- if (is_imx6q_uart(sport) && !uart_console(port) &&
- !sport->dma_is_inited)
+ if (!uart_console(port) && !sport->dma_is_inited)
imx_uart_dma_init(sport);
spin_lock_irqsave(&sport->port.lock, flags);
@@ -1283,17 +1350,11 @@ static void imx_shutdown(struct uart_port *port)
unsigned long flags;
if (sport->dma_is_enabled) {
- int ret;
+ sport->dma_is_rxing = 0;
+ sport->dma_is_txing = 0;
+ dmaengine_terminate_sync(sport->dma_chan_tx);
+ dmaengine_terminate_sync(sport->dma_chan_rx);
- /* We have to wait for the DMA to finish. */
- ret = wait_event_interruptible(sport->dma_wait,
- !sport->dma_is_rxing && !sport->dma_is_txing);
- if (ret != 0) {
- sport->dma_is_rxing = 0;
- sport->dma_is_txing = 0;
- dmaengine_terminate_all(sport->dma_chan_tx);
- dmaengine_terminate_all(sport->dma_chan_rx);
- }
spin_lock_irqsave(&sport->port.lock, flags);
imx_stop_tx(port);
imx_stop_rx(port);
@@ -1690,7 +1751,7 @@ static int imx_rs485_config(struct uart_port *port,
return 0;
}
-static struct uart_ops imx_pops = {
+static const struct uart_ops imx_pops = {
.tx_empty = imx_tx_empty,
.set_mctrl = imx_set_mctrl,
.get_mctrl = imx_get_mctrl,
@@ -2077,8 +2138,10 @@ static int serial_imx_probe(struct platform_device *pdev)
/* For register access, we only need to enable the ipg clock. */
ret = clk_prepare_enable(sport->clk_ipg);
- if (ret)
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret);
return ret;
+ }
/* Disable interrupts before requesting them */
reg = readl_relaxed(sport->port.membase + UCR1);
@@ -2095,18 +2158,26 @@ static int serial_imx_probe(struct platform_device *pdev)
if (txirq > 0) {
ret = devm_request_irq(&pdev->dev, rxirq, imx_rxint, 0,
dev_name(&pdev->dev), sport);
- if (ret)
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request rx irq: %d\n",
+ ret);
return ret;
+ }
ret = devm_request_irq(&pdev->dev, txirq, imx_txint, 0,
dev_name(&pdev->dev), sport);
- if (ret)
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request tx irq: %d\n",
+ ret);
return ret;
+ }
} else {
ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
dev_name(&pdev->dev), sport);
- if (ret)
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
return ret;
+ }
}
imx_ports[sport->port.line] = sport;
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index c5ddfe542451..ec7d8383900f 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -346,7 +346,7 @@ static void jsm_config_port(struct uart_port *port, int flags)
port->type = PORT_JSM;
}
-static struct uart_ops jsm_ops = {
+static const struct uart_ops jsm_ops = {
.tx_empty = jsm_tty_tx_empty,
.set_mctrl = jsm_tty_set_mctrl,
.get_mctrl = jsm_tty_get_mctrl,
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 5c4c280b3207..ace82645b123 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -712,7 +712,7 @@ static void max3100_break_ctl(struct uart_port *port, int break_state)
dev_dbg(&s->spi->dev, "%s\n", __func__);
}
-static struct uart_ops max3100_ops = {
+static const struct uart_ops max3100_ops = {
.tx_empty = max3100_tx_empty,
.set_mctrl = max3100_set_mctrl,
.get_mctrl = max3100_get_mctrl,
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 9360801df3c4..8a3e92638e10 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1329,9 +1329,9 @@ static int max310x_spi_probe(struct spi_device *spi)
const struct spi_device_id *id_entry = spi_get_device_id(spi);
devtype = (struct max310x_devtype *)id_entry->driver_data;
- flags = IRQF_TRIGGER_FALLING;
}
+ flags = IRQF_TRIGGER_FALLING;
regcfg.max_register = devtype->nr * 0x20 - 1;
regmap = devm_regmap_init_spi(spi, &regcfg);
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index a44290e9b5a8..e72ea61c70db 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -775,7 +775,7 @@ static int men_z135_verify_port(struct uart_port *port,
return -EINVAL;
}
-static struct uart_ops men_z135_ops = {
+static const struct uart_ops men_z135_ops = {
.tx_empty = men_z135_tx_empty,
.set_mctrl = men_z135_set_mctrl,
.get_mctrl = men_z135_get_mctrl,
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index eb54e5c77ead..770454e0dfa3 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1317,7 +1317,7 @@ static void mxs_auart_break_ctl(struct uart_port *u, int ctl)
mxs_clr(AUART_LINECTRL_BRK, s, REG_LINECTRL);
}
-static struct uart_ops mxs_auart_ops = {
+static const struct uart_ops mxs_auart_ops = {
.tx_empty = mxs_auart_tx_empty,
.start_tx = mxs_auart_start_tx,
.stop_tx = mxs_auart_stop_tx,
@@ -1510,10 +1510,7 @@ static int mxs_get_clks(struct mxs_auart_port *s,
if (!is_asm9260_auart(s)) {
s->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(s->clk))
- return PTR_ERR(s->clk);
-
- return 0;
+ return PTR_ERR_OR_ZERO(s->clk);
}
s->clk = devm_clk_get(s->dev, "mod");
@@ -1537,16 +1534,20 @@ static int mxs_get_clks(struct mxs_auart_port *s,
err = clk_set_rate(s->clk, clk_get_rate(s->clk_ahb));
if (err) {
dev_err(s->dev, "Failed to set rate!\n");
- return err;
+ goto disable_clk_ahb;
}
err = clk_prepare_enable(s->clk);
if (err) {
dev_err(s->dev, "Failed to enable clk!\n");
- return err;
+ goto disable_clk_ahb;
}
return 0;
+
+disable_clk_ahb:
+ clk_disable_unprepare(s->clk_ahb);
+ return err;
}
/*
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index ea4ffc2ebb2f..d391650b82e7 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -31,6 +31,7 @@
#include <linux/dmi.h>
#include <linux/nmi.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/debugfs.h>
#include <linux/dmaengine.h>
@@ -1603,7 +1604,7 @@ static void pch_uart_put_poll_char(struct uart_port *port,
}
#endif /* CONFIG_CONSOLE_POLL */
-static struct uart_ops pch_uart_ops = {
+static const struct uart_ops pch_uart_ops = {
.tx_empty = pch_uart_tx_empty,
.set_mctrl = pch_uart_set_mctrl,
.get_mctrl = pch_uart_get_mctrl,
@@ -1826,6 +1827,10 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
priv->trigger_level = 1;
priv->fcr = 0;
+ if (pdev->dev.of_node)
+ of_property_read_u32(pdev->dev.of_node, "clock-frequency"
+ , &user_uartclk);
+
#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
pch_uart_ports[board->line_no] = priv;
#endif
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index ae2095a66708..f44615fa474d 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1577,7 +1577,7 @@ static void s3c24xx_serial_resetport(struct uart_port *port,
}
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index 2ae4fcee1814..a04acef1cb20 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -102,7 +102,7 @@ struct s3c24xx_uart_port {
struct s3c24xx_uart_dma *dma;
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
struct notifier_block freq_transition;
#endif
};
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index f36e6df2fa90..a9d94f7cf683 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1205,6 +1205,10 @@ static int sc16is7xx_probe(struct device *dev,
}
#endif
+ /* reset device, purging any pending irq / data */
+ regmap_write(s->regmap, SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
+ SC16IS7XX_IOCONTROL_SRESET_BIT);
+
for (i = 0; i < devtype->nr_uart; ++i) {
s->p[i].line = i;
/* Initialize port data */
@@ -1234,6 +1238,22 @@ static int sc16is7xx_probe(struct device *dev,
init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
/* Register port */
uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
+
+ /* Enable EFR */
+ sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG,
+ SC16IS7XX_LCR_CONF_MODE_B);
+
+ regcache_cache_bypass(s->regmap, true);
+
+ /* Enable write access to enhanced features */
+ sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFR_REG,
+ SC16IS7XX_EFR_ENABLE_BIT);
+
+ regcache_cache_bypass(s->regmap, false);
+
+ /* Restore access to general registers */
+ sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG, 0x00);
+
/* Go to suspend mode */
sc16is7xx_power(&s->p[i].port, 0);
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 9fc15335c8c5..6e4f63627479 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -235,18 +235,9 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state,
if (tty_port_initialized(port))
return 0;
- /*
- * Set the TTY IO error marker - we will only clear this
- * once we have successfully opened the port.
- */
- set_bit(TTY_IO_ERROR, &tty->flags);
-
retval = uart_port_startup(tty, state, init_hw);
- if (!retval) {
- tty_port_set_initialized(port, 1);
- clear_bit(TTY_IO_ERROR, &tty->flags);
- } else if (retval > 0)
- retval = 0;
+ if (retval)
+ set_bit(TTY_IO_ERROR, &tty->flags);
return retval;
}
@@ -972,8 +963,11 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
}
uart_change_speed(tty, state, NULL);
}
- } else
+ } else {
retval = uart_startup(tty, state, 1);
+ if (retval > 0)
+ retval = 0;
+ }
exit:
return retval;
}
@@ -1139,6 +1133,8 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
uport->ops->config_port(uport, flags);
ret = uart_startup(tty, state, 1);
+ if (ret > 0)
+ ret = 0;
}
out:
mutex_unlock(&port->mutex);
@@ -1465,7 +1461,6 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port;
- struct uart_port *uport;
if (!state) {
struct uart_driver *drv = tty->driver->driver_state;
@@ -1481,56 +1476,36 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
port = &state->port;
pr_debug("uart_close(%d) called\n", tty->index);
- if (tty_port_close_start(port, tty, filp) == 0)
- return;
+ tty_port_close(tty->port, tty, filp);
+}
- mutex_lock(&port->mutex);
- uport = uart_port_check(state);
+static void uart_tty_port_shutdown(struct tty_port *port)
+{
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport = uart_port_check(state);
/*
* At this point, we stop accepting input. To do this, we
* disable the receive line status interrupts.
*/
- if (tty_port_initialized(port) &&
- !WARN(!uport, "detached port still initialized!\n")) {
- spin_lock_irq(&uport->lock);
- uport->ops->stop_rx(uport);
- spin_unlock_irq(&uport->lock);
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- uart_wait_until_sent(tty, uport->timeout);
- }
-
- uart_shutdown(tty, state);
- tty_port_tty_set(port, NULL);
+ if (WARN(!uport, "detached port still initialized!\n"))
+ return;
- spin_lock_irq(&port->lock);
+ spin_lock_irq(&uport->lock);
+ uport->ops->stop_rx(uport);
+ spin_unlock_irq(&uport->lock);
- if (port->blocked_open) {
- spin_unlock_irq(&port->lock);
- if (port->close_delay)
- msleep_interruptible(jiffies_to_msecs(port->close_delay));
- spin_lock_irq(&port->lock);
- } else if (uport && !uart_console(uport)) {
- spin_unlock_irq(&port->lock);
- uart_change_pm(state, UART_PM_STATE_OFF);
- spin_lock_irq(&port->lock);
- }
- spin_unlock_irq(&port->lock);
- tty_port_set_active(port, 0);
+ uart_port_shutdown(port);
/*
- * Wake up anyone trying to open this port.
+ * It's possible for shutdown to be called after suspend if we get
+ * a DCD drop (hangup) at just the right time. Clear suspended bit so
+ * we don't try to resume a port that has been shutdown.
*/
- wake_up_interruptible(&port->open_wait);
+ tty_port_set_suspended(port, 0);
- mutex_unlock(&port->mutex);
+ uart_change_pm(state, UART_PM_STATE_OFF);
- tty_ldisc_flush(tty);
- tty->closing = 0;
}
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
@@ -1711,52 +1686,31 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
struct uart_driver *drv = tty->driver->driver_state;
int retval, line = tty->index;
struct uart_state *state = drv->state + line;
- struct tty_port *port = &state->port;
- struct uart_port *uport;
- pr_debug("uart_open(%d) called\n", line);
+ tty->driver_data = state;
- spin_lock_irq(&port->lock);
- ++port->count;
- spin_unlock_irq(&port->lock);
+ retval = tty_port_open(&state->port, tty, filp);
+ if (retval > 0)
+ retval = 0;
- /*
- * We take the semaphore here to guarantee that we won't be re-entered
- * while allocating the state structure, or while we request any IRQs
- * that the driver may need. This also has the nice side-effect that
- * it delays the action of uart_hangup, so we can guarantee that
- * state->port.tty will always contain something reasonable.
- */
- if (mutex_lock_interruptible(&port->mutex)) {
- retval = -ERESTARTSYS;
- goto end;
- }
+ return retval;
+}
+
+static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport;
uport = uart_port_check(state);
- if (!uport || uport->flags & UPF_DEAD) {
- retval = -ENXIO;
- goto err_unlock;
- }
+ if (!uport || uport->flags & UPF_DEAD)
+ return -ENXIO;
- tty->driver_data = state;
- uport->state = state;
port->low_latency = (uport->flags & UPF_LOW_LATENCY) ? 1 : 0;
- tty_port_tty_set(port, tty);
/*
* Start up the serial port.
*/
- retval = uart_startup(tty, state, 0);
-
- /*
- * If we succeeded, wait until the port is ready.
- */
-err_unlock:
- mutex_unlock(&port->mutex);
- if (retval == 0)
- retval = tty_port_block_til_ready(port, tty, filp);
-end:
- return retval;
+ return uart_startup(tty, state, 0);
}
static const char *uart_type(struct uart_port *port)
@@ -1940,7 +1894,7 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
*
* Returns 0 on success or -EINVAL on failure
*/
-int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr,
+int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
char **options)
{
if (strncmp(p, "mmio,", 5) == 0) {
@@ -1968,7 +1922,11 @@ int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr,
return -EINVAL;
}
- *addr = simple_strtoul(p, NULL, 0);
+ /*
+ * Before you replace it with kstrtoull(), think about options separator
+ * (',') it will not tolerate
+ */
+ *addr = simple_strtoull(p, NULL, 0);
p = strchr(p, ',');
if (p)
p++;
@@ -2470,6 +2428,8 @@ static const struct tty_operations uart_ops = {
static const struct tty_port_operations uart_port_ops = {
.carrier_raised = uart_carrier_raised,
.dtr_rts = uart_dtr_rts,
+ .activate = uart_port_activate,
+ .shutdown = uart_tty_port_shutdown,
};
/**
@@ -2786,6 +2746,8 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
uport->cons = drv->cons;
uport->minor = drv->tty_driver->minor_start + uport->line;
+ port->console = uart_console(uport);
+
/*
* If this port is a console, then the spinlock is already
* initialised.
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index d86eee38aae6..4b26252c2885 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2533,7 +2533,7 @@ static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
return 0;
}
-static struct uart_ops sci_uart_ops = {
+static const struct uart_ops sci_uart_ops = {
.tx_empty = sci_tx_empty,
.set_mctrl = sci_set_mctrl,
.get_mctrl = sci_get_mctrl,
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 2d78cb3627ae..379e5bd37df9 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -639,7 +639,7 @@ static void asc_put_poll_char(struct uart_port *port, unsigned char c)
/*---------------------------------------------------------------------*/
-static struct uart_ops asc_uart_ops = {
+static const struct uart_ops asc_uart_ops = {
.tx_empty = asc_tx_empty,
.set_mctrl = asc_set_mctrl,
.get_mctrl = asc_get_mctrl,
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index f89d1f79be18..033856287ca2 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -1,6 +1,7 @@
/*
* Copyright (C) Maxime Coquelin 2015
- * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * Gerald Baeza <gerald.baeza@st.com>
* License terms: GNU General Public License (GPL), version 2
*
* Inspired by st-asc.c from STMicroelectronics (c)
@@ -10,120 +11,31 @@
#define SUPPORT_SYSRQ
#endif
-#include <linux/module.h>
-#include <linux/serial.h>
+#include <linux/clk.h>
#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/irq.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/pm_runtime.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/serial_core.h>
-#include <linux/clk.h>
-
-#define DRIVER_NAME "stm32-usart"
-
-/* Register offsets */
-#define USART_SR 0x00
-#define USART_DR 0x04
-#define USART_BRR 0x08
-#define USART_CR1 0x0c
-#define USART_CR2 0x10
-#define USART_CR3 0x14
-#define USART_GTPR 0x18
-
-/* USART_SR */
-#define USART_SR_PE BIT(0)
-#define USART_SR_FE BIT(1)
-#define USART_SR_NF BIT(2)
-#define USART_SR_ORE BIT(3)
-#define USART_SR_IDLE BIT(4)
-#define USART_SR_RXNE BIT(5)
-#define USART_SR_TC BIT(6)
-#define USART_SR_TXE BIT(7)
-#define USART_SR_LBD BIT(8)
-#define USART_SR_CTS BIT(9)
-#define USART_SR_ERR_MASK (USART_SR_LBD | USART_SR_ORE | \
- USART_SR_FE | USART_SR_PE)
-/* Dummy bits */
-#define USART_SR_DUMMY_RX BIT(16)
-
-/* USART_DR */
-#define USART_DR_MASK GENMASK(8, 0)
-
-/* USART_BRR */
-#define USART_BRR_DIV_F_MASK GENMASK(3, 0)
-#define USART_BRR_DIV_M_MASK GENMASK(15, 4)
-#define USART_BRR_DIV_M_SHIFT 4
-
-/* USART_CR1 */
-#define USART_CR1_SBK BIT(0)
-#define USART_CR1_RWU BIT(1)
-#define USART_CR1_RE BIT(2)
-#define USART_CR1_TE BIT(3)
-#define USART_CR1_IDLEIE BIT(4)
-#define USART_CR1_RXNEIE BIT(5)
-#define USART_CR1_TCIE BIT(6)
-#define USART_CR1_TXEIE BIT(7)
-#define USART_CR1_PEIE BIT(8)
-#define USART_CR1_PS BIT(9)
-#define USART_CR1_PCE BIT(10)
-#define USART_CR1_WAKE BIT(11)
-#define USART_CR1_M BIT(12)
-#define USART_CR1_UE BIT(13)
-#define USART_CR1_OVER8 BIT(15)
-#define USART_CR1_IE_MASK GENMASK(8, 4)
-
-/* USART_CR2 */
-#define USART_CR2_ADD_MASK GENMASK(3, 0)
-#define USART_CR2_LBDL BIT(5)
-#define USART_CR2_LBDIE BIT(6)
-#define USART_CR2_LBCL BIT(8)
-#define USART_CR2_CPHA BIT(9)
-#define USART_CR2_CPOL BIT(10)
-#define USART_CR2_CLKEN BIT(11)
-#define USART_CR2_STOP_2B BIT(13)
-#define USART_CR2_STOP_MASK GENMASK(13, 12)
-#define USART_CR2_LINEN BIT(14)
-
-/* USART_CR3 */
-#define USART_CR3_EIE BIT(0)
-#define USART_CR3_IREN BIT(1)
-#define USART_CR3_IRLP BIT(2)
-#define USART_CR3_HDSEL BIT(3)
-#define USART_CR3_NACK BIT(4)
-#define USART_CR3_SCEN BIT(5)
-#define USART_CR3_DMAR BIT(6)
-#define USART_CR3_DMAT BIT(7)
-#define USART_CR3_RTSE BIT(8)
-#define USART_CR3_CTSE BIT(9)
-#define USART_CR3_CTSIE BIT(10)
-#define USART_CR3_ONEBIT BIT(11)
-
-/* USART_GTPR */
-#define USART_GTPR_PSC_MASK GENMASK(7, 0)
-#define USART_GTPR_GT_MASK GENMASK(15, 8)
-
-#define DRIVER_NAME "stm32-usart"
-#define STM32_SERIAL_NAME "ttyS"
-#define STM32_MAX_PORTS 6
-
-struct stm32_port {
- struct uart_port port;
- struct clk *clk;
- bool hw_flow_control;
-};
+#include <linux/serial.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/tty_flip.h>
+#include <linux/tty.h>
-static struct stm32_port stm32_ports[STM32_MAX_PORTS];
-static struct uart_driver stm32_usart_driver;
+#include "stm32-usart.h"
static void stm32_stop_tx(struct uart_port *port);
+static void stm32_transmit_chars(struct uart_port *port);
static inline struct stm32_port *to_stm32_port(struct uart_port *port)
{
@@ -148,19 +60,64 @@ static void stm32_clr_bits(struct uart_port *port, u32 reg, u32 bits)
writel_relaxed(val, port->membase + reg);
}
-static void stm32_receive_chars(struct uart_port *port)
+static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
+ bool threaded)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ enum dma_status status;
+ struct dma_tx_state state;
+
+ *sr = readl_relaxed(port->membase + ofs->isr);
+
+ if (threaded && stm32_port->rx_ch) {
+ status = dmaengine_tx_status(stm32_port->rx_ch,
+ stm32_port->rx_ch->cookie,
+ &state);
+ if ((status == DMA_IN_PROGRESS) &&
+ (*last_res != state.residue))
+ return 1;
+ else
+ return 0;
+ } else if (*sr & USART_SR_RXNE) {
+ return 1;
+ }
+ return 0;
+}
+
+static unsigned long
+stm32_get_char(struct uart_port *port, u32 *sr, int *last_res)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ unsigned long c;
+
+ if (stm32_port->rx_ch) {
+ c = stm32_port->rx_buf[RX_BUF_L - (*last_res)--];
+ if ((*last_res) == 0)
+ *last_res = RX_BUF_L;
+ return c;
+ } else {
+ return readl_relaxed(port->membase + ofs->rdr);
+ }
+}
+
+static void stm32_receive_chars(struct uart_port *port, bool threaded)
{
struct tty_port *tport = &port->state->port;
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long c;
u32 sr;
char flag;
+ static int last_res = RX_BUF_L;
if (port->irq_wake)
pm_wakeup_event(tport->tty->dev, 0);
- while ((sr = readl_relaxed(port->membase + USART_SR)) & USART_SR_RXNE) {
+ while (stm32_pending_rx(port, &sr, &last_res, threaded)) {
sr |= USART_SR_DUMMY_RX;
- c = readl_relaxed(port->membase + USART_DR);
+ c = stm32_get_char(port, &sr, &last_res);
flag = TTY_NORMAL;
port->icount.rx++;
@@ -170,6 +127,10 @@ static void stm32_receive_chars(struct uart_port *port)
if (uart_handle_break(port))
continue;
} else if (sr & USART_SR_ORE) {
+ if (ofs->icr != UNDEF_REG)
+ writel_relaxed(USART_ICR_ORECF,
+ port->membase +
+ ofs->icr);
port->icount.overrun++;
} else if (sr & USART_SR_PE) {
port->icount.parity++;
@@ -197,14 +158,138 @@ static void stm32_receive_chars(struct uart_port *port)
spin_lock(&port->lock);
}
+static void stm32_tx_dma_complete(void *arg)
+{
+ struct uart_port *port = arg;
+ struct stm32_port *stm32port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ unsigned int isr;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
+ isr,
+ (isr & USART_SR_TC),
+ 10, 100000);
+
+ if (ret)
+ dev_err(port->dev, "terminal count not set\n");
+
+ if (ofs->icr == UNDEF_REG)
+ stm32_clr_bits(port, ofs->isr, USART_SR_TC);
+ else
+ stm32_set_bits(port, ofs->icr, USART_CR_TC);
+
+ stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ stm32port->tx_dma_busy = false;
+
+ /* Let's see if we have pending data to send */
+ stm32_transmit_chars(port);
+}
+
+static void stm32_transmit_chars_pio(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int isr;
+ int ret;
+
+ if (stm32_port->tx_dma_busy) {
+ stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ stm32_port->tx_dma_busy = false;
+ }
+
+ ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
+ isr,
+ (isr & USART_SR_TXE),
+ 10, 100);
+
+ if (ret)
+ dev_err(port->dev, "tx empty not set\n");
+
+ stm32_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
+
+ writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+}
+
+static void stm32_transmit_chars_dma(struct uart_port *port)
+{
+ struct stm32_port *stm32port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct dma_async_tx_descriptor *desc = NULL;
+ dma_cookie_t cookie;
+ unsigned int count, i;
+
+ if (stm32port->tx_dma_busy)
+ return;
+
+ stm32port->tx_dma_busy = true;
+
+ count = uart_circ_chars_pending(xmit);
+
+ if (count > TX_BUF_L)
+ count = TX_BUF_L;
+
+ if (xmit->tail < xmit->head) {
+ memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
+ } else {
+ size_t one = UART_XMIT_SIZE - xmit->tail;
+ size_t two;
+
+ if (one > count)
+ one = count;
+ two = count - one;
+
+ memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
+ if (two)
+ memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
+ }
+
+ desc = dmaengine_prep_slave_single(stm32port->tx_ch,
+ stm32port->tx_dma_buf,
+ count,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+
+ if (!desc) {
+ for (i = count; i > 0; i--)
+ stm32_transmit_chars_pio(port);
+ return;
+ }
+
+ desc->callback = stm32_tx_dma_complete;
+ desc->callback_param = port;
+
+ /* Push current DMA TX transaction in the pending queue */
+ cookie = dmaengine_submit(desc);
+
+ /* Issue pending DMA TX requests */
+ dma_async_issue_pending(stm32port->tx_ch);
+
+ stm32_clr_bits(port, ofs->isr, USART_SR_TC);
+ stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);
+
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ port->icount.tx += count;
+}
+
static void stm32_transmit_chars(struct uart_port *port)
{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
if (port->x_char) {
- writel_relaxed(port->x_char, port->membase + USART_DR);
+ if (stm32_port->tx_dma_busy)
+ stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ writel_relaxed(port->x_char, port->membase + ofs->tdr);
port->x_char = 0;
port->icount.tx++;
+ if (stm32_port->tx_dma_busy)
+ stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);
return;
}
@@ -218,9 +303,10 @@ static void stm32_transmit_chars(struct uart_port *port)
return;
}
- writel_relaxed(xmit->buf[xmit->tail], port->membase + USART_DR);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
+ if (stm32_port->tx_ch)
+ stm32_transmit_chars_dma(port);
+ else
+ stm32_transmit_chars_pio(port);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -232,34 +318,60 @@ static void stm32_transmit_chars(struct uart_port *port)
static irqreturn_t stm32_interrupt(int irq, void *ptr)
{
struct uart_port *port = ptr;
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 sr;
spin_lock(&port->lock);
- sr = readl_relaxed(port->membase + USART_SR);
+ sr = readl_relaxed(port->membase + ofs->isr);
- if (sr & USART_SR_RXNE)
- stm32_receive_chars(port);
+ if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
+ stm32_receive_chars(port, false);
- if (sr & USART_SR_TXE)
+ if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch))
stm32_transmit_chars(port);
spin_unlock(&port->lock);
+ if (stm32_port->rx_ch)
+ return IRQ_WAKE_THREAD;
+ else
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_threaded_interrupt(int irq, void *ptr)
+{
+ struct uart_port *port = ptr;
+ struct stm32_port *stm32_port = to_stm32_port(port);
+
+ spin_lock(&port->lock);
+
+ if (stm32_port->rx_ch)
+ stm32_receive_chars(port, true);
+
+ spin_unlock(&port->lock);
+
return IRQ_HANDLED;
}
static unsigned int stm32_tx_empty(struct uart_port *port)
{
- return readl_relaxed(port->membase + USART_SR) & USART_SR_TXE;
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ return readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE;
}
static void stm32_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
- stm32_set_bits(port, USART_CR3, USART_CR3_RTSE);
+ stm32_set_bits(port, ofs->cr3, USART_CR3_RTSE);
else
- stm32_clr_bits(port, USART_CR3, USART_CR3_RTSE);
+ stm32_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
}
static unsigned int stm32_get_mctrl(struct uart_port *port)
@@ -271,7 +383,10 @@ static unsigned int stm32_get_mctrl(struct uart_port *port)
/* Transmit stop */
static void stm32_stop_tx(struct uart_port *port)
{
- stm32_clr_bits(port, USART_CR1, USART_CR1_TXEIE);
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ stm32_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
}
/* There are probably characters waiting to be transmitted. */
@@ -282,33 +397,40 @@ static void stm32_start_tx(struct uart_port *port)
if (uart_circ_empty(xmit))
return;
- stm32_set_bits(port, USART_CR1, USART_CR1_TXEIE | USART_CR1_TE);
+ stm32_transmit_chars(port);
}
/* Throttle the remote when input buffer is about to overflow. */
static void stm32_throttle(struct uart_port *port)
{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- stm32_clr_bits(port, USART_CR1, USART_CR1_RXNEIE);
+ stm32_clr_bits(port, ofs->cr1, USART_CR1_RXNEIE);
spin_unlock_irqrestore(&port->lock, flags);
}
/* Unthrottle the remote, the input buffer can now accept data. */
static void stm32_unthrottle(struct uart_port *port)
{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- stm32_set_bits(port, USART_CR1, USART_CR1_RXNEIE);
+ stm32_set_bits(port, ofs->cr1, USART_CR1_RXNEIE);
spin_unlock_irqrestore(&port->lock, flags);
}
/* Receive stop */
static void stm32_stop_rx(struct uart_port *port)
{
- stm32_clr_bits(port, USART_CR1, USART_CR1_RXNEIE);
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ stm32_clr_bits(port, ofs->cr1, USART_CR1_RXNEIE);
}
/* Handle breaks - ignored by us */
@@ -318,26 +440,34 @@ static void stm32_break_ctl(struct uart_port *port, int break_state)
static int stm32_startup(struct uart_port *port)
{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
const char *name = to_platform_device(port->dev)->name;
u32 val;
int ret;
- ret = request_irq(port->irq, stm32_interrupt, 0, name, port);
+ ret = request_threaded_irq(port->irq, stm32_interrupt,
+ stm32_threaded_interrupt,
+ IRQF_NO_SUSPEND, name, port);
if (ret)
return ret;
val = USART_CR1_RXNEIE | USART_CR1_TE | USART_CR1_RE;
- stm32_set_bits(port, USART_CR1, val);
+ stm32_set_bits(port, ofs->cr1, val);
return 0;
}
static void stm32_shutdown(struct uart_port *port)
{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ struct stm32_usart_config *cfg = &stm32_port->info->cfg;
u32 val;
val = USART_CR1_TXEIE | USART_CR1_RXNEIE | USART_CR1_TE | USART_CR1_RE;
- stm32_set_bits(port, USART_CR1, val);
+ val |= BIT(cfg->uart_enable_bit);
+ stm32_clr_bits(port, ofs->cr1, val);
free_irq(port->irq, port);
}
@@ -346,6 +476,8 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ struct stm32_usart_config *cfg = &stm32_port->info->cfg;
unsigned int baud;
u32 usartdiv, mantissa, fraction, oversampling;
tcflag_t cflag = termios->c_cflag;
@@ -360,9 +492,10 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
spin_lock_irqsave(&port->lock, flags);
/* Stop serial port and reset value */
- writel_relaxed(0, port->membase + USART_CR1);
+ writel_relaxed(0, port->membase + ofs->cr1);
- cr1 = USART_CR1_TE | USART_CR1_RE | USART_CR1_UE | USART_CR1_RXNEIE;
+ cr1 = USART_CR1_TE | USART_CR1_RE | USART_CR1_RXNEIE;
+ cr1 |= BIT(cfg->uart_enable_bit);
cr2 = 0;
cr3 = 0;
@@ -371,8 +504,12 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
if (cflag & PARENB) {
cr1 |= USART_CR1_PCE;
- if ((cflag & CSIZE) == CS8)
- cr1 |= USART_CR1_M;
+ if ((cflag & CSIZE) == CS8) {
+ if (cfg->has_7bits_data)
+ cr1 |= USART_CR1_M0;
+ else
+ cr1 |= USART_CR1_M;
+ }
}
if (cflag & PARODD)
@@ -394,15 +531,15 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
*/
if (usartdiv < 16) {
oversampling = 8;
- stm32_set_bits(port, USART_CR1, USART_CR1_OVER8);
+ stm32_set_bits(port, ofs->cr1, USART_CR1_OVER8);
} else {
oversampling = 16;
- stm32_clr_bits(port, USART_CR1, USART_CR1_OVER8);
+ stm32_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
}
mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
fraction = usartdiv % oversampling;
- writel_relaxed(mantissa | fraction, port->membase + USART_BRR);
+ writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
uart_update_timeout(port, cflag, baud);
@@ -430,9 +567,12 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= USART_SR_DUMMY_RX;
- writel_relaxed(cr3, port->membase + USART_CR3);
- writel_relaxed(cr2, port->membase + USART_CR2);
- writel_relaxed(cr1, port->membase + USART_CR1);
+ if (stm32_port->rx_ch)
+ cr3 |= USART_CR3_DMAR;
+
+ writel_relaxed(cr3, port->membase + ofs->cr3);
+ writel_relaxed(cr2, port->membase + ofs->cr2);
+ writel_relaxed(cr1, port->membase + ofs->cr1);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -469,6 +609,8 @@ static void stm32_pm(struct uart_port *port, unsigned int state,
{
struct stm32_port *stm32port = container_of(port,
struct stm32_port, port);
+ struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ struct stm32_usart_config *cfg = &stm32port->info->cfg;
unsigned long flags = 0;
switch (state) {
@@ -477,7 +619,7 @@ static void stm32_pm(struct uart_port *port, unsigned int state,
break;
case UART_PM_STATE_OFF:
spin_lock_irqsave(&port->lock, flags);
- stm32_clr_bits(port, USART_CR1, USART_CR1_UE);
+ stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
spin_unlock_irqrestore(&port->lock, flags);
clk_disable_unprepare(stm32port->clk);
break;
@@ -539,8 +681,6 @@ static int stm32_init_port(struct stm32_port *stm32port,
if (!stm32port->port.uartclk)
ret = -EINVAL;
- clk_disable_unprepare(stm32port->clk);
-
return ret;
}
@@ -560,30 +700,162 @@ static struct stm32_port *stm32_of_get_stm32_port(struct platform_device *pdev)
return NULL;
stm32_ports[id].hw_flow_control = of_property_read_bool(np,
- "auto-flow-control");
+ "st,hw-flow-ctrl");
stm32_ports[id].port.line = id;
return &stm32_ports[id];
}
#ifdef CONFIG_OF
static const struct of_device_id stm32_match[] = {
- { .compatible = "st,stm32-usart", },
- { .compatible = "st,stm32-uart", },
+ { .compatible = "st,stm32-usart", .data = &stm32f4_info},
+ { .compatible = "st,stm32-uart", .data = &stm32f4_info},
+ { .compatible = "st,stm32f7-usart", .data = &stm32f7_info},
+ { .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
{},
};
MODULE_DEVICE_TABLE(of, stm32_match);
#endif
-static int stm32_serial_probe(struct platform_device *pdev)
+static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
+ struct platform_device *pdev)
{
+ struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ struct uart_port *port = &stm32port->port;
+ struct device *dev = &pdev->dev;
+ struct dma_slave_config config;
+ struct dma_async_tx_descriptor *desc = NULL;
+ dma_cookie_t cookie;
int ret;
+
+ /* Request DMA RX channel */
+ stm32port->rx_ch = dma_request_slave_channel(dev, "rx");
+ if (!stm32port->rx_ch) {
+ dev_info(dev, "rx dma alloc failed\n");
+ return -ENODEV;
+ }
+ stm32port->rx_buf = dma_alloc_coherent(&pdev->dev, RX_BUF_L,
+ &stm32port->rx_dma_buf,
+ GFP_KERNEL);
+ if (!stm32port->rx_buf) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+
+ /* Configure DMA channel */
+ memset(&config, 0, sizeof(config));
+ config.src_addr = port->mapbase + ofs->rdr;
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+ ret = dmaengine_slave_config(stm32port->rx_ch, &config);
+ if (ret < 0) {
+ dev_err(dev, "rx dma channel config failed\n");
+ ret = -ENODEV;
+ goto config_err;
+ }
+
+ /* Prepare a DMA cyclic transaction */
+ desc = dmaengine_prep_dma_cyclic(stm32port->rx_ch,
+ stm32port->rx_dma_buf,
+ RX_BUF_L, RX_BUF_P, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "rx dma prep cyclic failed\n");
+ ret = -ENODEV;
+ goto config_err;
+ }
+
+ /* No callback as dma buffer is drained on usart interrupt */
+ desc->callback = NULL;
+ desc->callback_param = NULL;
+
+ /* Push current DMA transaction in the pending queue */
+ cookie = dmaengine_submit(desc);
+
+ /* Issue pending DMA requests */
+ dma_async_issue_pending(stm32port->rx_ch);
+
+ return 0;
+
+config_err:
+ dma_free_coherent(&pdev->dev,
+ RX_BUF_L, stm32port->rx_buf,
+ stm32port->rx_dma_buf);
+
+alloc_err:
+ dma_release_channel(stm32port->rx_ch);
+ stm32port->rx_ch = NULL;
+
+ return ret;
+}
+
+static int stm32_of_dma_tx_probe(struct stm32_port *stm32port,
+ struct platform_device *pdev)
+{
+ struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ struct uart_port *port = &stm32port->port;
+ struct device *dev = &pdev->dev;
+ struct dma_slave_config config;
+ int ret;
+
+ stm32port->tx_dma_busy = false;
+
+ /* Request DMA TX channel */
+ stm32port->tx_ch = dma_request_slave_channel(dev, "tx");
+ if (!stm32port->tx_ch) {
+ dev_info(dev, "tx dma alloc failed\n");
+ return -ENODEV;
+ }
+ stm32port->tx_buf = dma_alloc_coherent(&pdev->dev, TX_BUF_L,
+ &stm32port->tx_dma_buf,
+ GFP_KERNEL);
+ if (!stm32port->tx_buf) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+
+ /* Configure DMA channel */
+ memset(&config, 0, sizeof(config));
+ config.dst_addr = port->mapbase + ofs->tdr;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+ ret = dmaengine_slave_config(stm32port->tx_ch, &config);
+ if (ret < 0) {
+ dev_err(dev, "tx dma channel config failed\n");
+ ret = -ENODEV;
+ goto config_err;
+ }
+
+ return 0;
+
+config_err:
+ dma_free_coherent(&pdev->dev,
+ TX_BUF_L, stm32port->tx_buf,
+ stm32port->tx_dma_buf);
+
+alloc_err:
+ dma_release_channel(stm32port->tx_ch);
+ stm32port->tx_ch = NULL;
+
+ return ret;
+}
+
+static int stm32_serial_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
struct stm32_port *stm32port;
+ int ret;
stm32port = stm32_of_get_stm32_port(pdev);
if (!stm32port)
return -ENODEV;
+ match = of_match_device(stm32_match, &pdev->dev);
+ if (match && match->data)
+ stm32port->info = (struct stm32_usart_info *)match->data;
+ else
+ return -EINVAL;
+
ret = stm32_init_port(stm32port, pdev);
if (ret)
return ret;
@@ -592,6 +864,14 @@ static int stm32_serial_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = stm32_of_dma_rx_probe(stm32port, pdev);
+ if (ret)
+ dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
+
+ ret = stm32_of_dma_tx_probe(stm32port, pdev);
+ if (ret)
+ dev_info(&pdev->dev, "interrupt mode used for tx (no dma)\n");
+
platform_set_drvdata(pdev, &stm32port->port);
return 0;
@@ -600,6 +880,30 @@ static int stm32_serial_probe(struct platform_device *pdev)
static int stm32_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+
+ if (stm32_port->rx_ch)
+ dma_release_channel(stm32_port->rx_ch);
+
+ if (stm32_port->rx_dma_buf)
+ dma_free_coherent(&pdev->dev,
+ RX_BUF_L, stm32_port->rx_buf,
+ stm32_port->rx_dma_buf);
+
+ stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+
+ if (stm32_port->tx_ch)
+ dma_release_channel(stm32_port->tx_ch);
+
+ if (stm32_port->tx_dma_buf)
+ dma_free_coherent(&pdev->dev,
+ TX_BUF_L, stm32_port->tx_buf,
+ stm32_port->tx_dma_buf);
+
+ clk_disable_unprepare(stm32_port->clk);
return uart_remove_one_port(&stm32_usart_driver, port);
}
@@ -608,15 +912,21 @@ static int stm32_serial_remove(struct platform_device *pdev)
#ifdef CONFIG_SERIAL_STM32_CONSOLE
static void stm32_console_putchar(struct uart_port *port, int ch)
{
- while (!(readl_relaxed(port->membase + USART_SR) & USART_SR_TXE))
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
cpu_relax();
- writel_relaxed(ch, port->membase + USART_DR);
+ writel_relaxed(ch, port->membase + ofs->tdr);
}
static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
{
struct uart_port *port = &stm32_ports[co->index].port;
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ struct stm32_usart_config *cfg = &stm32_port->info->cfg;
unsigned long flags;
u32 old_cr1, new_cr1;
int locked = 1;
@@ -629,15 +939,16 @@ static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
else
spin_lock(&port->lock);
- /* Save and disable interrupts */
- old_cr1 = readl_relaxed(port->membase + USART_CR1);
+ /* Save and disable interrupts, enable the transmitter */
+ old_cr1 = readl_relaxed(port->membase + ofs->cr1);
new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
- writel_relaxed(new_cr1, port->membase + USART_CR1);
+ new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit);
+ writel_relaxed(new_cr1, port->membase + ofs->cr1);
uart_console_write(port, s, cnt, stm32_console_putchar);
/* Restore interrupt state */
- writel_relaxed(old_cr1, port->membase + USART_CR1);
+ writel_relaxed(old_cr1, port->membase + ofs->cr1);
if (locked)
spin_unlock(&port->lock);
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
new file mode 100644
index 000000000000..41d974923102
--- /dev/null
+++ b/drivers/tty/serial/stm32-usart.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * Gerald Baeza <gerald_baeza@yahoo.fr>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#define DRIVER_NAME "stm32-usart"
+
+struct stm32_usart_offsets {
+ u8 cr1;
+ u8 cr2;
+ u8 cr3;
+ u8 brr;
+ u8 gtpr;
+ u8 rtor;
+ u8 rqr;
+ u8 isr;
+ u8 icr;
+ u8 rdr;
+ u8 tdr;
+};
+
+struct stm32_usart_config {
+ u8 uart_enable_bit; /* USART_CR1_UE */
+ bool has_7bits_data;
+};
+
+struct stm32_usart_info {
+ struct stm32_usart_offsets ofs;
+ struct stm32_usart_config cfg;
+};
+
+#define UNDEF_REG ~0
+
+/* Register offsets */
+struct stm32_usart_info stm32f4_info = {
+ .ofs = {
+ .isr = 0x00,
+ .rdr = 0x04,
+ .tdr = 0x04,
+ .brr = 0x08,
+ .cr1 = 0x0c,
+ .cr2 = 0x10,
+ .cr3 = 0x14,
+ .gtpr = 0x18,
+ .rtor = UNDEF_REG,
+ .rqr = UNDEF_REG,
+ .icr = UNDEF_REG,
+ },
+ .cfg = {
+ .uart_enable_bit = 13,
+ .has_7bits_data = false,
+ }
+};
+
+struct stm32_usart_info stm32f7_info = {
+ .ofs = {
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ },
+ .cfg = {
+ .uart_enable_bit = 0,
+ .has_7bits_data = true,
+ }
+};
+
+/* USART_SR (F4) / USART_ISR (F7) */
+#define USART_SR_PE BIT(0)
+#define USART_SR_FE BIT(1)
+#define USART_SR_NF BIT(2)
+#define USART_SR_ORE BIT(3)
+#define USART_SR_IDLE BIT(4)
+#define USART_SR_RXNE BIT(5)
+#define USART_SR_TC BIT(6)
+#define USART_SR_TXE BIT(7)
+#define USART_SR_LBD BIT(8)
+#define USART_SR_CTSIF BIT(9)
+#define USART_SR_CTS BIT(10) /* F7 */
+#define USART_SR_RTOF BIT(11) /* F7 */
+#define USART_SR_EOBF BIT(12) /* F7 */
+#define USART_SR_ABRE BIT(14) /* F7 */
+#define USART_SR_ABRF BIT(15) /* F7 */
+#define USART_SR_BUSY BIT(16) /* F7 */
+#define USART_SR_CMF BIT(17) /* F7 */
+#define USART_SR_SBKF BIT(18) /* F7 */
+#define USART_SR_TEACK BIT(21) /* F7 */
+#define USART_SR_ERR_MASK (USART_SR_LBD | USART_SR_ORE | \
+ USART_SR_FE | USART_SR_PE)
+/* Dummy bits */
+#define USART_SR_DUMMY_RX BIT(16)
+
+/* USART_ICR (F7) */
+#define USART_CR_TC BIT(6)
+
+/* USART_DR */
+#define USART_DR_MASK GENMASK(8, 0)
+
+/* USART_BRR */
+#define USART_BRR_DIV_F_MASK GENMASK(3, 0)
+#define USART_BRR_DIV_M_MASK GENMASK(15, 4)
+#define USART_BRR_DIV_M_SHIFT 4
+
+/* USART_CR1 */
+#define USART_CR1_SBK BIT(0)
+#define USART_CR1_RWU BIT(1) /* F4 */
+#define USART_CR1_RE BIT(2)
+#define USART_CR1_TE BIT(3)
+#define USART_CR1_IDLEIE BIT(4)
+#define USART_CR1_RXNEIE BIT(5)
+#define USART_CR1_TCIE BIT(6)
+#define USART_CR1_TXEIE BIT(7)
+#define USART_CR1_PEIE BIT(8)
+#define USART_CR1_PS BIT(9)
+#define USART_CR1_PCE BIT(10)
+#define USART_CR1_WAKE BIT(11)
+#define USART_CR1_M BIT(12)
+#define USART_CR1_M0 BIT(12) /* F7 */
+#define USART_CR1_MME BIT(13) /* F7 */
+#define USART_CR1_CMIE BIT(14) /* F7 */
+#define USART_CR1_OVER8 BIT(15)
+#define USART_CR1_DEDT_MASK GENMASK(20, 16) /* F7 */
+#define USART_CR1_DEAT_MASK GENMASK(25, 21) /* F7 */
+#define USART_CR1_RTOIE BIT(26) /* F7 */
+#define USART_CR1_EOBIE BIT(27) /* F7 */
+#define USART_CR1_M1 BIT(28) /* F7 */
+#define USART_CR1_IE_MASK (GENMASK(8, 4) | BIT(14) | BIT(26) | BIT(27))
+
+/* USART_CR2 */
+#define USART_CR2_ADD_MASK GENMASK(3, 0) /* F4 */
+#define USART_CR2_ADDM7 BIT(4) /* F7 */
+#define USART_CR2_LBDL BIT(5)
+#define USART_CR2_LBDIE BIT(6)
+#define USART_CR2_LBCL BIT(8)
+#define USART_CR2_CPHA BIT(9)
+#define USART_CR2_CPOL BIT(10)
+#define USART_CR2_CLKEN BIT(11)
+#define USART_CR2_STOP_2B BIT(13)
+#define USART_CR2_STOP_MASK GENMASK(13, 12)
+#define USART_CR2_LINEN BIT(14)
+#define USART_CR2_SWAP BIT(15) /* F7 */
+#define USART_CR2_RXINV BIT(16) /* F7 */
+#define USART_CR2_TXINV BIT(17) /* F7 */
+#define USART_CR2_DATAINV BIT(18) /* F7 */
+#define USART_CR2_MSBFIRST BIT(19) /* F7 */
+#define USART_CR2_ABREN BIT(20) /* F7 */
+#define USART_CR2_ABRMOD_MASK GENMASK(22, 21) /* F7 */
+#define USART_CR2_RTOEN BIT(23) /* F7 */
+#define USART_CR2_ADD_F7_MASK GENMASK(31, 24) /* F7 */
+
+/* USART_CR3 */
+#define USART_CR3_EIE BIT(0)
+#define USART_CR3_IREN BIT(1)
+#define USART_CR3_IRLP BIT(2)
+#define USART_CR3_HDSEL BIT(3)
+#define USART_CR3_NACK BIT(4)
+#define USART_CR3_SCEN BIT(5)
+#define USART_CR3_DMAR BIT(6)
+#define USART_CR3_DMAT BIT(7)
+#define USART_CR3_RTSE BIT(8)
+#define USART_CR3_CTSE BIT(9)
+#define USART_CR3_CTSIE BIT(10)
+#define USART_CR3_ONEBIT BIT(11)
+#define USART_CR3_OVRDIS BIT(12) /* F7 */
+#define USART_CR3_DDRE BIT(13) /* F7 */
+#define USART_CR3_DEM BIT(14) /* F7 */
+#define USART_CR3_DEP BIT(15) /* F7 */
+#define USART_CR3_SCARCNT_MASK GENMASK(19, 17) /* F7 */
+
+/* USART_GTPR */
+#define USART_GTPR_PSC_MASK GENMASK(7, 0)
+#define USART_GTPR_GT_MASK GENMASK(15, 8)
+
+/* USART_RTOR */
+#define USART_RTOR_RTO_MASK GENMASK(23, 0) /* F7 */
+#define USART_RTOR_BLEN_MASK GENMASK(31, 24) /* F7 */
+
+/* USART_RQR */
+#define USART_RQR_ABRRQ BIT(0) /* F7 */
+#define USART_RQR_SBKRQ BIT(1) /* F7 */
+#define USART_RQR_MMRQ BIT(2) /* F7 */
+#define USART_RQR_RXFRQ BIT(3) /* F7 */
+#define USART_RQR_TXFRQ BIT(4) /* F7 */
+
+/* USART_ICR */
+#define USART_ICR_PECF BIT(0) /* F7 */
+#define USART_ICR_FFECF BIT(1) /* F7 */
+#define USART_ICR_NCF BIT(2) /* F7 */
+#define USART_ICR_ORECF BIT(3) /* F7 */
+#define USART_ICR_IDLECF BIT(4) /* F7 */
+#define USART_ICR_TCCF BIT(6) /* F7 */
+#define USART_ICR_LBDCF BIT(8) /* F7 */
+#define USART_ICR_CTSCF BIT(9) /* F7 */
+#define USART_ICR_RTOCF BIT(11) /* F7 */
+#define USART_ICR_EOBCF BIT(12) /* F7 */
+#define USART_ICR_CMCF BIT(17) /* F7 */
+
+#define STM32_SERIAL_NAME "ttyS"
+#define STM32_MAX_PORTS 6
+
+#define RX_BUF_L 200 /* dma rx buffer length */
+#define RX_BUF_P RX_BUF_L /* dma rx buffer period */
+#define TX_BUF_L 200 /* dma tx buffer length */
+
+struct stm32_port {
+ struct uart_port port;
+ struct clk *clk;
+ struct stm32_usart_info *info;
+ struct dma_chan *rx_ch; /* dma rx channel */
+ dma_addr_t rx_dma_buf; /* dma rx buffer bus address */
+ unsigned char *rx_buf; /* dma rx buffer cpu address */
+ struct dma_chan *tx_ch; /* dma tx channel */
+ dma_addr_t tx_dma_buf; /* dma tx buffer bus address */
+ unsigned char *tx_buf; /* dma tx buffer cpu address */
+ bool tx_dma_busy; /* dma tx busy */
+ bool hw_flow_control;
+};
+
+static struct stm32_port stm32_ports[STM32_MAX_PORTS];
+static struct uart_driver stm32_usart_driver;
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 512c162634a3..5da7fe40e391 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -394,7 +394,7 @@ static int timbuart_verify_port(struct uart_port *port,
return -EINVAL;
}
-static struct uart_ops timbuart_ops = {
+static const struct uart_ops timbuart_ops = {
.tx_empty = timbuart_tx_empty,
.set_mctrl = timbuart_set_mctrl,
.get_mctrl = timbuart_get_mctrl,
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 05089b6c2f30..817bb0d3f326 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -387,7 +387,7 @@ static void ulite_put_poll_char(struct uart_port *port, unsigned char ch)
}
#endif
-static struct uart_ops ulite_ops = {
+static const struct uart_ops ulite_ops = {
.tx_empty = ulite_tx_empty,
.set_mctrl = ulite_set_mctrl,
.get_mctrl = ulite_get_mctrl,
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 23cfc5e16b45..6b85adce0ac9 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -118,7 +118,7 @@ struct vt8500_port {
* have been allocated as we can't use pdev->id in
* devicetree
*/
-static unsigned long vt8500_ports_in_use;
+static DECLARE_BITMAP(vt8500_ports_in_use, VT8500_MAX_PORTS);
static inline void vt8500_write(struct uart_port *port, unsigned int val,
unsigned int off)
@@ -663,15 +663,15 @@ static int vt8500_serial_probe(struct platform_device *pdev)
if (port < 0) {
/* calculate the port id */
- port = find_first_zero_bit(&vt8500_ports_in_use,
- sizeof(vt8500_ports_in_use));
+ port = find_first_zero_bit(vt8500_ports_in_use,
+ VT8500_MAX_PORTS);
}
if (port >= VT8500_MAX_PORTS)
return -ENODEV;
/* reserve the port id */
- if (test_and_set_bit(port, &vt8500_ports_in_use)) {
+ if (test_and_set_bit(port, vt8500_ports_in_use)) {
/* port already in use - shouldn't really happen */
return -EBUSY;
}
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 9ca1a4d1b66a..f37edaa5ac75 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -57,7 +57,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
#define CDNS_UART_IMR 0x10 /* Interrupt Mask */
#define CDNS_UART_ISR 0x14 /* Interrupt Status */
#define CDNS_UART_BAUDGEN 0x18 /* Baud Rate Generator */
-#define CDNS_UART_RXTOUT 0x1C /* RX Timeout */
+#define CDNS_UART_RXTOUT 0x1C /* RX Timeout */
#define CDNS_UART_RXWM 0x20 /* RX FIFO Trigger Level */
#define CDNS_UART_MODEMCR 0x24 /* Modem Control */
#define CDNS_UART_MODEMSR 0x28 /* Modem Status */
@@ -68,6 +68,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
#define CDNS_UART_IRRX_PWIDTH 0x3C /* IR Min Received Pulse Width */
#define CDNS_UART_IRTX_PWIDTH 0x40 /* IR Transmitted pulse Width */
#define CDNS_UART_TXWM 0x44 /* TX FIFO Trigger Level */
+#define CDNS_UART_RXBS 0x48 /* RX FIFO byte status register */
/* Control Register Bit Definitions */
#define CDNS_UART_CR_STOPBRK 0x00000100 /* Stop TX break */
@@ -79,6 +80,9 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
#define CDNS_UART_CR_TXRST 0x00000002 /* TX logic reset */
#define CDNS_UART_CR_RXRST 0x00000001 /* RX logic reset */
#define CDNS_UART_CR_RST_TO 0x00000040 /* Restart Timeout Counter */
+#define CDNS_UART_RXBS_PARITY 0x00000001 /* Parity error status */
+#define CDNS_UART_RXBS_FRAMING 0x00000002 /* Framing error status */
+#define CDNS_UART_RXBS_BRK 0x00000004 /* Overrun error status */
/*
* Mode Register:
@@ -126,13 +130,27 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
#define CDNS_UART_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
#define CDNS_UART_IXR_MASK 0x00001FFF /* Valid bit mask */
-#define CDNS_UART_RX_IRQS (CDNS_UART_IXR_PARITY | CDNS_UART_IXR_FRAMING | \
- CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_RXTRIG | \
+ /*
+ * Do not enable parity error interrupt for the following
+ * reason: When parity error interrupt is enabled, each Rx
+ * parity error always results in 2 events. The first one
+ * being parity error interrupt and the second one with a
+ * proper Rx interrupt with the incoming data. Disabling
+ * parity error interrupt ensures better handling of parity
+ * error events. With this change, for a parity error case, we
+ * get a Rx interrupt with parity error set in ISR register
+ * and we still handle parity errors in the desired way.
+ */
+
+#define CDNS_UART_RX_IRQS (CDNS_UART_IXR_FRAMING | \
+ CDNS_UART_IXR_OVERRUN | \
+ CDNS_UART_IXR_RXTRIG | \
CDNS_UART_IXR_TOUT)
/* Goes in read_status_mask for break detection as the HW doesn't do it*/
-#define CDNS_UART_IXR_BRK 0x80000000
+#define CDNS_UART_IXR_BRK 0x00002000
+#define CDNS_UART_RXBS_SUPPORT BIT(1)
/*
* Modem Control register:
* The read/write Modem Control register controls the interface with the modem
@@ -172,46 +190,66 @@ struct cdns_uart {
struct clk *pclk;
unsigned int baud;
struct notifier_block clk_rate_change_nb;
+ u32 quirks;
+};
+struct cdns_platform_data {
+ u32 quirks;
};
#define to_cdns_uart(_nb) container_of(_nb, struct cdns_uart, \
clk_rate_change_nb);
-static void cdns_uart_handle_rx(struct uart_port *port, unsigned int isrstatus)
+/**
+ * cdns_uart_handle_rx - Handle the received bytes along with Rx errors.
+ * @dev_id: Id of the UART port
+ * @isrstatus: The interrupt status register value as read
+ * Return: None
+ */
+static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
{
- /*
- * There is no hardware break detection, so we interpret framing
- * error with all-zeros data as a break sequence. Most of the time,
- * there's another non-zero byte at the end of the sequence.
- */
- if (isrstatus & CDNS_UART_IXR_FRAMING) {
- while (!(readl(port->membase + CDNS_UART_SR) &
- CDNS_UART_SR_RXEMPTY)) {
- if (!readl(port->membase + CDNS_UART_FIFO)) {
+ struct uart_port *port = (struct uart_port *)dev_id;
+ struct cdns_uart *cdns_uart = port->private_data;
+ unsigned int data;
+ unsigned int rxbs_status = 0;
+ unsigned int status_mask;
+ unsigned int framerrprocessed = 0;
+ char status = TTY_NORMAL;
+ bool is_rxbs_support;
+
+ is_rxbs_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT;
+
+ while ((readl(port->membase + CDNS_UART_SR) &
+ CDNS_UART_SR_RXEMPTY) != CDNS_UART_SR_RXEMPTY) {
+ if (is_rxbs_support)
+ rxbs_status = readl(port->membase + CDNS_UART_RXBS);
+ data = readl(port->membase + CDNS_UART_FIFO);
+ port->icount.rx++;
+ /*
+ * There is no hardware break detection in Zynq, so we interpret
+ * framing error with all-zeros data as a break sequence.
+ * Most of the time, there's another non-zero byte at the
+ * end of the sequence.
+ */
+ if (!is_rxbs_support && (isrstatus & CDNS_UART_IXR_FRAMING)) {
+ if (!data) {
port->read_status_mask |= CDNS_UART_IXR_BRK;
- isrstatus &= ~CDNS_UART_IXR_FRAMING;
+ framerrprocessed = 1;
+ continue;
}
}
- writel(CDNS_UART_IXR_FRAMING, port->membase + CDNS_UART_ISR);
- }
-
- /* drop byte with parity error if IGNPAR specified */
- if (isrstatus & port->ignore_status_mask & CDNS_UART_IXR_PARITY)
- isrstatus &= ~(CDNS_UART_IXR_RXTRIG | CDNS_UART_IXR_TOUT);
-
- isrstatus &= port->read_status_mask;
- isrstatus &= ~port->ignore_status_mask;
-
- if (!(isrstatus & (CDNS_UART_IXR_TOUT | CDNS_UART_IXR_RXTRIG)))
- return;
-
- while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)) {
- u32 data;
- char status = TTY_NORMAL;
+ if (is_rxbs_support && (rxbs_status & CDNS_UART_RXBS_BRK)) {
+ port->icount.brk++;
+ status = TTY_BREAK;
+ if (uart_handle_break(port))
+ continue;
+ }
- data = readl(port->membase + CDNS_UART_FIFO);
+ isrstatus &= port->read_status_mask;
+ isrstatus &= ~port->ignore_status_mask;
+ status_mask = port->read_status_mask;
+ status_mask &= ~port->ignore_status_mask;
- /* Non-NULL byte after BREAK is garbage (99%) */
- if (data && (port->read_status_mask & CDNS_UART_IXR_BRK)) {
+ if (data &&
+ (port->read_status_mask & CDNS_UART_IXR_BRK)) {
port->read_status_mask &= ~CDNS_UART_IXR_BRK;
port->icount.brk++;
if (uart_handle_break(port))
@@ -221,57 +259,83 @@ static void cdns_uart_handle_rx(struct uart_port *port, unsigned int isrstatus)
if (uart_handle_sysrq_char(port, data))
continue;
- port->icount.rx++;
-
- if (isrstatus & CDNS_UART_IXR_PARITY) {
- port->icount.parity++;
- status = TTY_PARITY;
- } else if (isrstatus & CDNS_UART_IXR_FRAMING) {
- port->icount.frame++;
- status = TTY_FRAME;
- } else if (isrstatus & CDNS_UART_IXR_OVERRUN) {
+ if (is_rxbs_support) {
+ if ((rxbs_status & CDNS_UART_RXBS_PARITY)
+ && (status_mask & CDNS_UART_IXR_PARITY)) {
+ port->icount.parity++;
+ status = TTY_PARITY;
+ }
+ if ((rxbs_status & CDNS_UART_RXBS_FRAMING)
+ && (status_mask & CDNS_UART_IXR_PARITY)) {
+ port->icount.frame++;
+ status = TTY_FRAME;
+ }
+ } else {
+ if (isrstatus & CDNS_UART_IXR_PARITY) {
+ port->icount.parity++;
+ status = TTY_PARITY;
+ }
+ if ((isrstatus & CDNS_UART_IXR_FRAMING) &&
+ !framerrprocessed) {
+ port->icount.frame++;
+ status = TTY_FRAME;
+ }
+ }
+ if (isrstatus & CDNS_UART_IXR_OVERRUN) {
port->icount.overrun++;
+ tty_insert_flip_char(&port->state->port, 0,
+ TTY_OVERRUN);
}
-
- uart_insert_char(port, isrstatus, CDNS_UART_IXR_OVERRUN,
- data, status);
+ tty_insert_flip_char(&port->state->port, data, status);
+ isrstatus = 0;
}
+ spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
+ spin_lock(&port->lock);
}
-static void cdns_uart_handle_tx(struct uart_port *port)
+/**
+ * cdns_uart_handle_tx - Handle the bytes to be Txed.
+ * @dev_id: Id of the UART port
+ * Return: None
+ */
+static void cdns_uart_handle_tx(void *dev_id)
{
+ struct uart_port *port = (struct uart_port *)dev_id;
unsigned int numbytes;
if (uart_circ_empty(&port->state->xmit)) {
writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR);
- return;
- }
-
- numbytes = port->fifosize;
- while (numbytes && !uart_circ_empty(&port->state->xmit) &&
- !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) {
- /*
- * Get the data from the UART circular buffer
- * and write it to the cdns_uart's TX_FIFO
- * register.
- */
- writel(port->state->xmit.buf[port->state->xmit.tail],
- port->membase + CDNS_UART_FIFO);
- port->icount.tx++;
-
- /*
- * Adjust the tail of the UART buffer and wrap
- * the buffer if it reaches limit.
- */
- port->state->xmit.tail =
- (port->state->xmit.tail + 1) & (UART_XMIT_SIZE - 1);
+ } else {
+ numbytes = port->fifosize;
+ while (numbytes && !uart_circ_empty(&port->state->xmit) &&
+ !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) {
+ /*
+ * Get the data from the UART circular buffer
+ * and write it to the cdns_uart's TX_FIFO
+ * register.
+ */
+ writel(
+ port->state->xmit.buf[port->state->xmit.
+ tail], port->membase + CDNS_UART_FIFO);
+
+ port->icount.tx++;
+
+ /*
+ * Adjust the tail of the UART buffer and wrap
+ * the buffer if it reaches limit.
+ */
+ port->state->xmit.tail =
+ (port->state->xmit.tail + 1) &
+ (UART_XMIT_SIZE - 1);
+
+ numbytes--;
+ }
- numbytes--;
+ if (uart_circ_chars_pending(
+ &port->state->xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
}
-
- if (uart_circ_chars_pending(&port->state->xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
}
/**
@@ -284,27 +348,24 @@ static void cdns_uart_handle_tx(struct uart_port *port)
static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
- unsigned long flags;
unsigned int isrstatus;
- spin_lock_irqsave(&port->lock, flags);
+ spin_lock(&port->lock);
/* Read the interrupt status register to determine which
- * interrupt(s) is/are active.
+ * interrupt(s) is/are active and clear them.
*/
isrstatus = readl(port->membase + CDNS_UART_ISR);
-
- if (isrstatus & CDNS_UART_RX_IRQS)
- cdns_uart_handle_rx(port, isrstatus);
-
- if ((isrstatus & CDNS_UART_IXR_TXEMPTY) == CDNS_UART_IXR_TXEMPTY)
- cdns_uart_handle_tx(port);
-
writel(isrstatus, port->membase + CDNS_UART_ISR);
- /* be sure to release the lock and tty before leaving */
- spin_unlock_irqrestore(&port->lock, flags);
+ if (isrstatus & CDNS_UART_IXR_TXEMPTY) {
+ cdns_uart_handle_tx(dev_id);
+ isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
+ }
+ if (isrstatus & CDNS_UART_IXR_MASK)
+ cdns_uart_handle_rx(dev_id, isrstatus);
+ spin_unlock(&port->lock);
return IRQ_HANDLED;
}
@@ -653,6 +714,10 @@ static void cdns_uart_set_termios(struct uart_port *port,
ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
+ while (readl(port->membase + CDNS_UART_CR) &
+ (CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
+ cpu_relax();
+
/*
* Clear the RX disable and TX disable bits and then set the TX enable
* bit and RX enable bit to enable the transmitter and receiver.
@@ -736,10 +801,14 @@ static void cdns_uart_set_termios(struct uart_port *port,
*/
static int cdns_uart_startup(struct uart_port *port)
{
+ struct cdns_uart *cdns_uart = port->private_data;
+ bool is_brk_support;
int ret;
unsigned long flags;
unsigned int status = 0;
+ is_brk_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT;
+
spin_lock_irqsave(&port->lock, flags);
/* Disable the TX and RX */
@@ -752,6 +821,10 @@ static int cdns_uart_startup(struct uart_port *port)
writel(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST,
port->membase + CDNS_UART_CR);
+ while (readl(port->membase + CDNS_UART_CR) &
+ (CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
+ cpu_relax();
+
/*
* Clear the RX disable bit and then set the RX enable bit to enable
* the receiver.
@@ -794,7 +867,11 @@ static int cdns_uart_startup(struct uart_port *port)
}
/* Set the Interrupt Registers with desired interrupts */
- writel(CDNS_UART_RX_IRQS, port->membase + CDNS_UART_IER);
+ if (is_brk_support)
+ writel(CDNS_UART_RX_IRQS | CDNS_UART_IXR_BRK,
+ port->membase + CDNS_UART_IER);
+ else
+ writel(CDNS_UART_RX_IRQS, port->membase + CDNS_UART_IER);
return 0;
}
@@ -993,7 +1070,7 @@ static void cdns_uart_pm(struct uart_port *port, unsigned int state,
}
}
-static struct uart_ops cdns_uart_ops = {
+static const struct uart_ops cdns_uart_ops = {
.set_mctrl = cdns_uart_set_mctrl,
.get_mctrl = cdns_uart_get_mctrl,
.start_tx = cdns_uart_start_tx,
@@ -1088,9 +1165,34 @@ static void __init cdns_early_write(struct console *con, const char *s,
static int __init cdns_early_console_setup(struct earlycon_device *device,
const char *opt)
{
- if (!device->port.membase)
+ struct uart_port *port = &device->port;
+
+ if (!port->membase)
return -ENODEV;
+ /* initialise control register */
+ writel(CDNS_UART_CR_TX_EN|CDNS_UART_CR_TXRST|CDNS_UART_CR_RXRST,
+ port->membase + CDNS_UART_CR);
+
+ /* only set baud if specified on command line - otherwise
+ * assume it has been initialized by a boot loader.
+ */
+ if (device->baud) {
+ u32 cd = 0, bdiv = 0;
+ u32 mr;
+ int div8;
+
+ cdns_uart_calc_baud_divs(port->uartclk, device->baud,
+ &bdiv, &cd, &div8);
+ mr = CDNS_UART_MR_PARITY_NONE;
+ if (div8)
+ mr |= CDNS_UART_MR_CLKSEL;
+
+ writel(mr, port->membase + CDNS_UART_MR);
+ writel(cd, port->membase + CDNS_UART_BAUDGEN);
+ writel(bdiv, port->membase + CDNS_UART_BAUDDIV);
+ }
+
device->con->write = cdns_early_write;
return 0;
@@ -1328,6 +1430,18 @@ static int cdns_uart_resume(struct device *device)
static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend,
cdns_uart_resume);
+static const struct cdns_platform_data zynqmp_uart_def = {
+ .quirks = CDNS_UART_RXBS_SUPPORT, };
+
+/* Match table for of_platform binding */
+static const struct of_device_id cdns_uart_of_match[] = {
+ { .compatible = "xlnx,xuartps", },
+ { .compatible = "cdns,uart-r1p8", },
+ { .compatible = "cdns,uart-r1p12", .data = &zynqmp_uart_def },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
+
/**
* cdns_uart_probe - Platform driver probe
* @pdev: Pointer to the platform device structure
@@ -1340,12 +1454,20 @@ static int cdns_uart_probe(struct platform_device *pdev)
struct uart_port *port;
struct resource *res;
struct cdns_uart *cdns_uart_data;
+ const struct of_device_id *match;
cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
GFP_KERNEL);
if (!cdns_uart_data)
return -ENOMEM;
+ match = of_match_node(cdns_uart_of_match, pdev->dev.of_node);
+ if (match && match->data) {
+ const struct cdns_platform_data *data = match->data;
+
+ cdns_uart_data->quirks = data->quirks;
+ }
+
cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(cdns_uart_data->pclk)) {
cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "aper_clk");
@@ -1471,14 +1593,6 @@ static int cdns_uart_remove(struct platform_device *pdev)
return rc;
}
-/* Match table for of_platform binding */
-static const struct of_device_id cdns_uart_of_match[] = {
- { .compatible = "xlnx,xuartps", },
- { .compatible = "cdns,uart-r1p8", },
- {}
-};
-MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
-
static struct platform_driver cdns_uart_platform_driver = {
.probe = cdns_uart_probe,
.remove = cdns_uart_remove,
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 2705ca960e92..06fb39c1d6dd 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1312,12 +1312,12 @@ static int vc_t416_color(struct vc_data *vc, int i,
if (i > vc->vc_npar)
return i;
- if (vc->vc_par[i] == 5 && i < vc->vc_npar) {
- /* 256 colours -- ubiquitous */
+ if (vc->vc_par[i] == 5 && i + 1 <= vc->vc_npar) {
+ /* 256 colours */
i++;
rgb_from_256(vc->vc_par[i], &c);
- } else if (vc->vc_par[i] == 2 && i <= vc->vc_npar + 3) {
- /* 24 bit -- extremely rare */
+ } else if (vc->vc_par[i] == 2 && i + 3 <= vc->vc_npar) {
+ /* 24 bit */
c.r = vc->vc_par[i + 1];
c.g = vc->vc_par[i + 2];
c.b = vc->vc_par[i + 3];
@@ -1415,6 +1415,11 @@ static void csi_m(struct vc_data *vc)
(vc->vc_color & 0x0f);
break;
default:
+ if (vc->vc_par[i] >= 90 && vc->vc_par[i] <= 107) {
+ if (vc->vc_par[i] < 100)
+ vc->vc_intensity = 2;
+ vc->vc_par[i] -= 60;
+ }
if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
vc->vc_color = color_table[vc->vc_par[i] - 30]
| (vc->vc_color & 0xf0);
@@ -3182,11 +3187,11 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
pr_info("Console: switching ");
if (!deflt)
- printk("consoles %d-%d ", first+1, last+1);
+ printk(KERN_CONT "consoles %d-%d ", first+1, last+1);
if (j >= 0) {
struct vc_data *vc = vc_cons[j].d;
- printk("to %s %s %dx%d\n",
+ printk(KERN_CONT "to %s %s %dx%d\n",
vc->vc_can_do_color ? "colour" : "mono",
desc, vc->vc_cols, vc->vc_rows);
@@ -3195,7 +3200,7 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
update_screen(vc);
}
} else
- printk("to %s\n", desc);
+ printk(KERN_CONT "to %s\n", desc);
retval = 0;
err:
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 915facbf552e..e1134a4d97f3 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -229,7 +229,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
++uiomem;
}
- priv->dmem_region_start = i;
+ priv->dmem_region_start = uiomem - &uioinfo->mem[0];
priv->num_dmem_regions = pdata->num_dynamic_regions;
for (i = 0; i < pdata->num_dynamic_regions; ++i) {
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 8689dcba5201..644e978cbd3e 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -152,7 +152,8 @@ source "drivers/usb/gadget/Kconfig"
config USB_LED_TRIG
bool "USB LED Triggers"
- depends on LEDS_CLASS && USB_COMMON && LEDS_TRIGGERS
+ depends on LEDS_CLASS && LEDS_TRIGGERS
+ select USB_COMMON
help
This option adds LED triggers for USB host and/or gadget activity.
@@ -160,4 +161,25 @@ config USB_LED_TRIG
LEDs and you want to use them as activity indicators for USB host or
gadget.
+config USB_ULPI_BUS
+ tristate "USB ULPI PHY interface support"
+ select USB_COMMON
+ help
+ UTMI+ Low Pin Interface (ULPI) is specification for a commonly used
+ USB 2.0 PHY interface. The ULPI specification defines a standard set
+ of registers that can be used to detect the vendor and product which
+ allows ULPI to be handled as a bus. This module is the driver for that
+ bus.
+
+ The ULPI interfaces (the buses) are registered by the drivers for USB
+ controllers which support ULPI register access and have ULPI PHY
+ attached to them. The ULPI PHY drivers themselves are normal PHY
+ drivers.
+
+ ULPI PHYs provide often functions such as ADP sensing/probing (OTG
+ protocol) and USB charger detection.
+
+ To compile this driver as a module, choose M here: the module will
+ be called ulpi.
+
endif # USB_SUPPORT
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 0a866e90b49c..f9fe86b6f7b5 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1139,10 +1139,8 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
/* instance init */
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
- if (!instance) {
- usb_dbg(usbatm_instance, "cxacru_bind: no memory for instance data\n");
+ if (!instance)
return -ENOMEM;
- }
instance->usbatm = usbatm_instance;
instance->modem_type = (struct cxacru_modem_type *) id->driver_info;
@@ -1168,13 +1166,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
}
instance->rcv_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!instance->rcv_urb) {
- usb_dbg(usbatm_instance, "cxacru_bind: no memory for rcv_urb\n");
ret = -ENOMEM;
goto fail;
}
instance->snd_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!instance->snd_urb) {
- usb_dbg(usbatm_instance, "cxacru_bind: no memory for snd_urb\n");
ret = -ENOMEM;
goto fail;
}
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 0270d1312f83..5083eb5b0d5e 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -817,7 +817,6 @@ static int speedtch_bind(struct usbatm_data *usbatm,
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
- usb_err(usbatm, "%s: no memory for instance data!\n", __func__);
ret = -ENOMEM;
goto fail_release;
}
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 4333dc576a12..df67815f74e6 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -2196,17 +2196,12 @@ static int uea_boot(struct uea_softc *sc)
load_XILINX_firmware(sc);
intr = kmalloc(size, GFP_KERNEL);
- if (!intr) {
- uea_err(INS_TO_USBDEV(sc),
- "cannot allocate interrupt package\n");
+ if (!intr)
goto err0;
- }
sc->urb_int = usb_alloc_urb(0, GFP_KERNEL);
- if (!sc->urb_int) {
- uea_err(INS_TO_USBDEV(sc), "cannot allocate interrupt URB\n");
+ if (!sc->urb_int)
goto err1;
- }
usb_fill_int_urb(sc->urb_int, sc->usb_dev,
usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE),
@@ -2561,10 +2556,8 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
}
sc = kzalloc(sizeof(struct uea_softc), GFP_KERNEL);
- if (!sc) {
- uea_err(usb, "uea_init: not enough memory !\n");
+ if (!sc)
return -ENOMEM;
- }
sc->usb_dev = usb;
usbatm->driver_data = sc;
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index db322d9ccb6e..4dec9df8764b 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -819,7 +819,6 @@ static int usbatm_atm_open(struct atm_vcc *vcc)
new = kzalloc(sizeof(struct usbatm_vcc_data), GFP_KERNEL);
if (!new) {
- atm_err(instance, "%s: no memory for vcc_data!\n", __func__);
ret = -ENOMEM;
goto fail;
}
@@ -1032,10 +1031,8 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
/* instance init */
instance = kzalloc(sizeof(*instance) + sizeof(struct urb *) * (num_rcv_urbs + num_snd_urbs), GFP_KERNEL);
- if (!instance) {
- dev_err(dev, "%s: no memory for instance data!\n", __func__);
+ if (!instance)
return -ENOMEM;
- }
/* public fields */
@@ -1141,7 +1138,6 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
urb = usb_alloc_urb(iso_packets, GFP_KERNEL);
if (!urb) {
- dev_err(dev, "%s: no memory for urb %d!\n", __func__, i);
error = -ENOMEM;
goto fail_unbind;
}
@@ -1151,7 +1147,6 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
/* zero the tx padding to avoid leaking information */
buffer = kzalloc(channel->buf_size, GFP_KERNEL);
if (!buffer) {
- dev_err(dev, "%s: no memory for buffer %d!\n", __func__, i);
error = -ENOMEM;
goto fail_unbind;
}
@@ -1182,7 +1177,6 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
instance->cell_buf = kmalloc(instance->rx_channel.stride, GFP_KERNEL);
if (!instance->cell_buf) {
- dev_err(dev, "%s: no memory for cell buffer!\n", __func__);
error = -ENOMEM;
goto fail_unbind;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index dedc33e589f4..099179457f60 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -140,6 +140,9 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
if (of_find_property(np, "disable-over-current", NULL))
data->disable_oc = 1;
+ if (of_find_property(np, "over-current-active-high", NULL))
+ data->oc_polarity = 1;
+
if (of_find_property(np, "external-vbus-divider", NULL))
data->evdo = 1;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index 635717e9354a..409aa5ca8dda 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -17,6 +17,7 @@ struct imx_usbmisc_data {
int index;
unsigned int disable_oc:1; /* over current detect disabled */
+ unsigned int oc_polarity:1; /* over current polarity if oc enabled */
unsigned int evdo:1; /* set external vbus divider option */
};
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 053bac9d983c..96ae69502c86 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -81,12 +81,15 @@ static int ehci_ci_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
struct ci_hdrc *ci = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int ret;
ret = ehci_setup(hcd);
if (ret)
return ret;
+ ehci->need_io_watchdog = 0;
+
ci_platform_configure(ci);
return ret;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index b93356834bb5..661f43fe0f9e 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -59,7 +59,7 @@ ctrl_endpt_in_desc = {
*/
static inline int hw_ep_bit(int num, int dir)
{
- return num + (dir ? 16 : 0);
+ return num + ((dir == TX) ? 16 : 0);
}
static inline int ep_to_bit(struct ci_hdrc *ci, int n)
@@ -121,9 +121,8 @@ static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
*/
static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
{
- hw_ep_flush(ci, num, dir);
hw_write(ci, OP_ENDPTCTRL + num,
- dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
+ (dir == TX) ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
return 0;
}
@@ -139,7 +138,7 @@ static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
{
u32 mask, data;
- if (dir) {
+ if (dir == TX) {
mask = ENDPTCTRL_TXT; /* type */
data = type << __ffs(mask);
@@ -171,7 +170,7 @@ static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
*/
static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
{
- u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+ u32 mask = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
}
@@ -188,6 +187,9 @@ static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
{
int n = hw_ep_bit(num, dir);
+ /* Synchronize before ep prime */
+ wmb();
+
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
return -EAGAIN;
@@ -218,8 +220,8 @@ static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
do {
enum ci_hw_regs reg = OP_ENDPTCTRL + num;
- u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
- u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
+ u32 mask_xs = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+ u32 mask_xr = (dir == TX) ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
/* data toggle - reserved for EP0 but it's in ESS */
hw_write(ci, reg, mask_xs|mask_xr,
@@ -348,8 +350,7 @@ static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
if (node == NULL)
return -ENOMEM;
- node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC,
- &node->dma);
+ node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
if (node->ptr == NULL) {
kfree(node);
return -ENOMEM;
@@ -506,8 +507,6 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
}
- wmb(); /* synchronize before ep prime */
-
ret = hw_ep_prime(ci, hwep->num, hwep->dir,
hwep->type == USB_ENDPOINT_XFER_CONTROL);
done:
@@ -534,9 +533,6 @@ static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
hwep->qh.ptr->td.token &=
cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
- /* Synchronize before ep prime */
- wmb();
-
return hw_ep_prime(ci, hwep->num, hwep->dir,
hwep->type == USB_ENDPOINT_XFER_CONTROL);
}
@@ -590,7 +586,7 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
}
if (remaining_length) {
- if (hwep->dir) {
+ if (hwep->dir == TX) {
hwreq->req.status = -EPROTO;
break;
}
@@ -1051,9 +1047,9 @@ __acquires(ci->lock)
if (req.wLength != 0)
break;
num = le16_to_cpu(req.wIndex);
- dir = num & USB_ENDPOINT_DIR_MASK;
+ dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
num &= USB_ENDPOINT_NUMBER_MASK;
- if (dir) /* TX */
+ if (dir == TX)
num += ci->hw_ep_max / 2;
if (!ci->ci_hw_ep[num].wedge) {
spin_unlock(&ci->lock);
@@ -1103,9 +1099,9 @@ __acquires(ci->lock)
if (req.wLength != 0)
break;
num = le16_to_cpu(req.wIndex);
- dir = num & USB_ENDPOINT_DIR_MASK;
+ dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
num &= USB_ENDPOINT_NUMBER_MASK;
- if (dir) /* TX */
+ if (dir == TX)
num += ci->hw_ep_max / 2;
spin_unlock(&ci->lock);
@@ -1680,12 +1676,10 @@ static int init_eps(struct ci_hdrc *ci)
usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
INIT_LIST_HEAD(&hwep->qh.queue);
- hwep->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
- &hwep->qh.dma);
+ hwep->qh.ptr = dma_pool_zalloc(ci->qh_pool, GFP_KERNEL,
+ &hwep->qh.dma);
if (hwep->qh.ptr == NULL)
retval = -ENOMEM;
- else
- memset(hwep->qh.ptr, 0, sizeof(*hwep->qh.ptr));
/*
* set up shorthands for ep0 out and in endpoints,
@@ -1999,7 +1993,7 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
return -ENXIO;
- rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
+ rdrv = devm_kzalloc(ci->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index ab8b027e8cc8..20d02a5e418d 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -56,6 +56,7 @@
#define MX6_BM_NON_BURST_SETTING BIT(1)
#define MX6_BM_OVER_CUR_DIS BIT(7)
+#define MX6_BM_OVER_CUR_POLARITY BIT(8)
#define MX6_BM_WAKEUP_ENABLE BIT(10)
#define MX6_BM_ID_WAKEUP BIT(16)
#define MX6_BM_VBUS_WAKEUP BIT(17)
@@ -266,11 +267,14 @@ static int usbmisc_imx6q_init(struct imx_usbmisc_data *data)
spin_lock_irqsave(&usbmisc->lock, flags);
+ reg = readl(usbmisc->base + data->index * 4);
if (data->disable_oc) {
- reg = readl(usbmisc->base + data->index * 4);
- writel(reg | MX6_BM_OVER_CUR_DIS,
- usbmisc->base + data->index * 4);
+ reg |= MX6_BM_OVER_CUR_DIS;
+ } else if (data->oc_polarity == 1) {
+ /* High active */
+ reg &= ~(MX6_BM_OVER_CUR_DIS | MX6_BM_OVER_CUR_POLARITY);
}
+ writel(reg, usbmisc->base + data->index * 4);
/* SoC non-burst setting */
reg = readl(usbmisc->base + data->index * 4);
@@ -365,10 +369,14 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data)
return -EINVAL;
spin_lock_irqsave(&usbmisc->lock, flags);
+ reg = readl(usbmisc->base);
if (data->disable_oc) {
- reg = readl(usbmisc->base);
- writel(reg | MX6_BM_OVER_CUR_DIS, usbmisc->base);
+ reg |= MX6_BM_OVER_CUR_DIS;
+ } else if (data->oc_polarity == 1) {
+ /* High active */
+ reg &= ~(MX6_BM_OVER_CUR_DIS | MX6_BM_OVER_CUR_POLARITY);
}
+ writel(reg, usbmisc->base);
reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
reg &= ~MX7D_USB_VBUS_WAKEUP_SOURCE_MASK;
@@ -492,6 +500,10 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
.compatible = "fsl,imx6ul-usbmisc",
.data = &imx6sx_usbmisc_ops,
},
+ {
+ .compatible = "fsl,imx7d-usbmisc",
+ .data = &imx7d_usbmisc_ops,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, usbmisc_imx_dt_ids);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0f3f62e81e5b..78f0f85bebdc 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -368,17 +368,17 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
if (!test_and_clear_bit(index, &acm->read_urbs_free))
return 0;
- dev_vdbg(&acm->data->dev, "%s - urb %d\n", __func__, index);
-
res = usb_submit_urb(acm->read_urbs[index], mem_flags);
if (res) {
if (res != -EPERM) {
dev_err(&acm->data->dev,
- "%s - usb_submit_urb failed: %d\n",
- __func__, res);
+ "urb %d failed submission with %d\n",
+ index, res);
}
set_bit(index, &acm->read_urbs_free);
return res;
+ } else {
+ dev_vdbg(&acm->data->dev, "submitted urb %d\n", index);
}
return 0;
@@ -415,8 +415,9 @@ static void acm_read_bulk_callback(struct urb *urb)
unsigned long flags;
int status = urb->status;
- dev_vdbg(&acm->data->dev, "%s - urb %d, len %d\n", __func__,
- rb->index, urb->actual_length);
+ dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
+ rb->index, urb->actual_length,
+ status);
if (!acm->dev) {
set_bit(rb->index, &acm->read_urbs_free);
@@ -426,8 +427,6 @@ static void acm_read_bulk_callback(struct urb *urb)
if (status) {
set_bit(rb->index, &acm->read_urbs_free);
- dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
- __func__, status);
if ((status != -ENOENT) || (urb->actual_length == 0))
return;
}
@@ -462,8 +461,7 @@ static void acm_write_bulk(struct urb *urb)
int status = urb->status;
if (status || (urb->actual_length != urb->transfer_buffer_length))
- dev_vdbg(&acm->data->dev, "%s - len %d/%d, status %d\n",
- __func__,
+ dev_vdbg(&acm->data->dev, "wrote len %d/%d, status %d\n",
urb->actual_length,
urb->transfer_buffer_length,
status);
@@ -478,8 +476,6 @@ static void acm_softint(struct work_struct *work)
{
struct acm *acm = container_of(work, struct acm, work);
- dev_vdbg(&acm->data->dev, "%s\n", __func__);
-
tty_port_tty_wakeup(&acm->port);
}
@@ -492,8 +488,6 @@ static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
struct acm *acm;
int retval;
- dev_dbg(tty->dev, "%s\n", __func__);
-
acm = acm_get_by_minor(tty->index);
if (!acm)
return -ENODEV;
@@ -515,8 +509,6 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
- dev_dbg(tty->dev, "%s\n", __func__);
-
return tty_port_open(&acm->port, tty, filp);
}
@@ -545,8 +537,6 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
int retval = -ENODEV;
int i;
- dev_dbg(&acm->control->dev, "%s\n", __func__);
-
mutex_lock(&acm->mutex);
if (acm->disconnected)
goto disconnected;
@@ -607,8 +597,6 @@ static void acm_port_destruct(struct tty_port *port)
{
struct acm *acm = container_of(port, struct acm, port);
- dev_dbg(&acm->control->dev, "%s\n", __func__);
-
acm_release_minor(acm);
usb_put_intf(acm->control);
kfree(acm->country_codes);
@@ -622,8 +610,6 @@ static void acm_port_shutdown(struct tty_port *port)
struct acm_wb *wb;
int i;
- dev_dbg(&acm->control->dev, "%s\n", __func__);
-
/*
* Need to grab write_lock to prevent race with resume, but no need to
* hold it due to the tty-port initialised flag.
@@ -654,21 +640,21 @@ static void acm_port_shutdown(struct tty_port *port)
static void acm_tty_cleanup(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- dev_dbg(&acm->control->dev, "%s\n", __func__);
+
tty_port_put(&acm->port);
}
static void acm_tty_hangup(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- dev_dbg(&acm->control->dev, "%s\n", __func__);
+
tty_port_hangup(&acm->port);
}
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
- dev_dbg(&acm->control->dev, "%s\n", __func__);
+
tty_port_close(&acm->port, tty, filp);
}
@@ -684,7 +670,7 @@ static int acm_tty_write(struct tty_struct *tty,
if (!count)
return 0;
- dev_vdbg(&acm->data->dev, "%s - count %d\n", __func__, count);
+ dev_vdbg(&acm->data->dev, "%d bytes from tty layer\n", count);
spin_lock_irqsave(&acm->write_lock, flags);
wbn = acm_wb_alloc(acm);
@@ -701,7 +687,7 @@ static int acm_tty_write(struct tty_struct *tty,
}
count = (count > acm->writesize) ? acm->writesize : count;
- dev_vdbg(&acm->data->dev, "%s - write %d\n", __func__, count);
+ dev_vdbg(&acm->data->dev, "writing %d bytes\n", count);
memcpy(wb->buf, buf, count);
wb->len = count;
@@ -1193,6 +1179,9 @@ static int acm_probe(struct usb_interface *intf,
return -EINVAL;
}
+ if (!intf->cur_altsetting)
+ return -EINVAL;
+
if (!buflen) {
if (intf->cur_altsetting->endpoint &&
intf->cur_altsetting->endpoint->extralen &&
@@ -1246,6 +1235,8 @@ static int acm_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "no interfaces\n");
return -ENODEV;
}
+ if (!data_interface->cur_altsetting || !control_interface->cur_altsetting)
+ return -ENODEV;
if (data_intf_num != call_intf_num)
dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
@@ -1533,8 +1524,6 @@ static void stop_data_traffic(struct acm *acm)
{
int i;
- dev_dbg(&acm->control->dev, "%s\n", __func__);
-
usb_kill_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_kill_urb(acm->wb[i].urb);
@@ -1551,8 +1540,6 @@ static void acm_disconnect(struct usb_interface *intf)
struct tty_struct *tty;
int i;
- dev_dbg(&intf->dev, "%s\n", __func__);
-
/* sibling interface is already cleaning up */
if (!acm)
return;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 337948c42110..0a6369510f2d 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -58,6 +58,7 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
#define WDM_SUSPENDING 8
#define WDM_RESETTING 9
#define WDM_OVERFLOW 10
+#define WDM_DRAIN_ON_OPEN 11
#define WDM_MAX 16
@@ -154,6 +155,9 @@ static void wdm_out_callback(struct urb *urb)
wake_up(&desc->wait);
}
+/* forward declaration */
+static int service_outstanding_interrupt(struct wdm_device *desc);
+
static void wdm_in_callback(struct urb *urb)
{
struct wdm_device *desc = urb->context;
@@ -167,18 +171,18 @@ static void wdm_in_callback(struct urb *urb)
switch (status) {
case -ENOENT:
dev_dbg(&desc->intf->dev,
- "nonzero urb status received: -ENOENT");
+ "nonzero urb status received: -ENOENT\n");
goto skip_error;
case -ECONNRESET:
dev_dbg(&desc->intf->dev,
- "nonzero urb status received: -ECONNRESET");
+ "nonzero urb status received: -ECONNRESET\n");
goto skip_error;
case -ESHUTDOWN:
dev_dbg(&desc->intf->dev,
- "nonzero urb status received: -ESHUTDOWN");
+ "nonzero urb status received: -ESHUTDOWN\n");
goto skip_error;
case -EPIPE:
- dev_err(&desc->intf->dev,
+ dev_dbg(&desc->intf->dev,
"nonzero urb status received: -EPIPE\n");
break;
default:
@@ -188,7 +192,13 @@ static void wdm_in_callback(struct urb *urb)
}
}
- desc->rerr = status;
+ /*
+ * only set a new error if there is no previous error.
+ * Errors are only cleared during read/open
+ */
+ if (desc->rerr == 0)
+ desc->rerr = status;
+
if (length + desc->length > desc->wMaxCommand) {
/* The buffer would overflow */
set_bit(WDM_OVERFLOW, &desc->flags);
@@ -200,10 +210,40 @@ static void wdm_in_callback(struct urb *urb)
desc->reslength = length;
}
}
+
+ /*
+ * Handling devices with the WDM_DRAIN_ON_OPEN flag set:
+ * If desc->resp_count is unset, then the urb was submitted
+ * without a prior notification. If the device returned any
+ * data, then this implies that it had messages queued without
+ * notifying us. Continue reading until that queue is flushed.
+ */
+ if (!desc->resp_count) {
+ if (!length) {
+ /* do not propagate the expected -EPIPE */
+ desc->rerr = 0;
+ goto unlock;
+ }
+ dev_dbg(&desc->intf->dev, "got %d bytes without notification\n", length);
+ set_bit(WDM_RESPONDING, &desc->flags);
+ usb_submit_urb(desc->response, GFP_ATOMIC);
+ }
+
skip_error:
+ set_bit(WDM_READ, &desc->flags);
wake_up(&desc->wait);
- set_bit(WDM_READ, &desc->flags);
+ if (desc->rerr) {
+ /*
+ * Since there was an error, userspace may decide to not read
+ * any data after poll'ing.
+ * We should respond to further attempts from the device to send
+ * data, so that we can get unstuck.
+ */
+ service_outstanding_interrupt(desc);
+ }
+
+unlock:
spin_unlock(&desc->iuspin);
}
@@ -244,18 +284,18 @@ static void wdm_int_callback(struct urb *urb)
switch (dr->bNotificationType) {
case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
dev_dbg(&desc->intf->dev,
- "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
+ "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d\n",
le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
break;
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
dev_dbg(&desc->intf->dev,
- "NOTIFY_NETWORK_CONNECTION %s network",
+ "NOTIFY_NETWORK_CONNECTION %s network\n",
dr->wValue ? "connected to" : "disconnected from");
goto exit;
case USB_CDC_NOTIFY_SPEED_CHANGE:
- dev_dbg(&desc->intf->dev, "SPEED_CHANGE received (len %u)",
+ dev_dbg(&desc->intf->dev, "SPEED_CHANGE received (len %u)\n",
urb->actual_length);
goto exit;
default:
@@ -274,8 +314,7 @@ static void wdm_int_callback(struct urb *urb)
&& !test_bit(WDM_DISCONNECTING, &desc->flags)
&& !test_bit(WDM_SUSPENDING, &desc->flags)) {
rv = usb_submit_urb(desc->response, GFP_ATOMIC);
- dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
- __func__, rv);
+ dev_dbg(&desc->intf->dev, "submit response URB %d\n", rv);
}
spin_unlock(&desc->iuspin);
if (rv < 0) {
@@ -417,7 +456,7 @@ static ssize_t wdm_write
rv = usb_translate_errors(rv);
goto out_free_mem_pm;
} else {
- dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
+ dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d\n",
le16_to_cpu(req->wIndex));
}
@@ -436,17 +475,14 @@ out_free_mem:
}
/*
- * clear WDM_READ flag and possibly submit the read urb if resp_count
- * is non-zero.
+ * Submit the read urb if resp_count is non-zero.
*
* Called with desc->iuspin locked
*/
-static int clear_wdm_read_flag(struct wdm_device *desc)
+static int service_outstanding_interrupt(struct wdm_device *desc)
{
int rv = 0;
- clear_bit(WDM_READ, &desc->flags);
-
/* submit read urb only if the device is waiting for it */
if (!desc->resp_count || !--desc->resp_count)
goto out;
@@ -537,8 +573,9 @@ retry:
}
if (!desc->reslength) { /* zero length read */
- dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
- rv = clear_wdm_read_flag(desc);
+ dev_dbg(&desc->intf->dev, "zero length - clearing WDM_READ\n");
+ clear_bit(WDM_READ, &desc->flags);
+ rv = service_outstanding_interrupt(desc);
spin_unlock_irq(&desc->iuspin);
if (rv < 0)
goto err;
@@ -563,8 +600,10 @@ retry:
desc->length -= cntr;
/* in case we had outstanding data */
- if (!desc->length)
- clear_wdm_read_flag(desc);
+ if (!desc->length) {
+ clear_bit(WDM_READ, &desc->flags);
+ service_outstanding_interrupt(desc);
+ }
spin_unlock_irq(&desc->iuspin);
rv = cntr;
@@ -647,6 +686,17 @@ static int wdm_open(struct inode *inode, struct file *file)
dev_err(&desc->intf->dev,
"Error submitting int urb - %d\n", rv);
rv = usb_translate_errors(rv);
+ } else if (test_bit(WDM_DRAIN_ON_OPEN, &desc->flags)) {
+ /*
+ * Some devices keep pending messages queued
+ * without resending notifications. We must
+ * flush the message queue before we can
+ * assume a one-to-one relationship between
+ * notifications and messages in the queue
+ */
+ dev_dbg(&desc->intf->dev, "draining queued data\n");
+ set_bit(WDM_RESPONDING, &desc->flags);
+ rv = usb_submit_urb(desc->response, GFP_KERNEL);
}
} else {
rv = 0;
@@ -673,7 +723,7 @@ static int wdm_release(struct inode *inode, struct file *file)
if (!desc->count) {
if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
- dev_dbg(&desc->intf->dev, "wdm_release: cleanup");
+ dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
kill_urbs(desc);
spin_lock_irq(&desc->iuspin);
desc->resp_count = 0;
@@ -753,7 +803,8 @@ static void wdm_rxwork(struct work_struct *work)
/* --- hotplug --- */
static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep,
- u16 bufsize, int (*manage_power)(struct usb_interface *, int))
+ u16 bufsize, int (*manage_power)(struct usb_interface *, int),
+ bool drain_on_open)
{
int rv = -ENOMEM;
struct wdm_device *desc;
@@ -840,6 +891,68 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
desc->manage_power = manage_power;
+ /*
+ * "drain_on_open" enables a hack to work around a firmware
+ * issue observed on network functions, in particular MBIM
+ * functions.
+ *
+ * Quoting section 7 of the CDC-WMC r1.1 specification:
+ *
+ * "The firmware shall interpret GetEncapsulatedResponse as a
+ * request to read response bytes. The firmware shall send
+ * the next wLength bytes from the response. The firmware
+ * shall allow the host to retrieve data using any number of
+ * GetEncapsulatedResponse requests. The firmware shall
+ * return a zero- length reply if there are no data bytes
+ * available.
+ *
+ * The firmware shall send ResponseAvailable notifications
+ * periodically, using any appropriate algorithm, to inform
+ * the host that there is data available in the reply
+ * buffer. The firmware is allowed to send ResponseAvailable
+ * notifications even if there is no data available, but
+ * this will obviously reduce overall performance."
+ *
+ * These requirements, although they make equally sense, are
+ * often not implemented by network functions. Some firmwares
+ * will queue data indefinitely, without ever resending a
+ * notification. The result is that the driver and firmware
+ * loses "syncronization" if the driver ever fails to respond
+ * to a single notification, something which easily can happen
+ * on release(). When this happens, the driver will appear to
+ * never receive notifications for the most current data. Each
+ * notification will only cause a single read, which returns
+ * the oldest data in the firmware's queue.
+ *
+ * The "drain_on_open" hack resolves the situation by draining
+ * data from the firmware until none is returned, without a
+ * prior notification.
+ *
+ * This will inevitably race with the firmware, risking that
+ * we read data from the device before handling the associated
+ * notification. To make things worse, some of the devices
+ * needing the hack do not implement the "return zero if no
+ * data is available" requirement either. Instead they return
+ * an error on the subsequent read in this case. This means
+ * that "winning" the race can cause an unexpected EIO to
+ * userspace.
+ *
+ * "winning" the race is more likely on resume() than on
+ * open(), and the unexpected error is more harmful in the
+ * middle of an open session. The hack is therefore only
+ * applied on open(), and not on resume() where it logically
+ * would be equally necessary. So we define open() as the only
+ * driver <-> device "syncronization point". Should we happen
+ * to lose a notification after open(), then syncronization
+ * will be lost until release()
+ *
+ * The hack should not be enabled for CDC WDM devices
+ * conforming to the CDC-WMC r1.1 specification. This is
+ * ensured by setting drain_on_open to false in wdm_probe().
+ */
+ if (drain_on_open)
+ set_bit(WDM_DRAIN_ON_OPEN, &desc->flags);
+
spin_lock(&wdm_device_list_lock);
list_add(&desc->device_list, &wdm_device_list);
spin_unlock(&wdm_device_list_lock);
@@ -893,7 +1006,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto err;
ep = &iface->endpoint[0].desc;
- rv = wdm_create(intf, ep, maxcom, &wdm_manage_power);
+ rv = wdm_create(intf, ep, maxcom, &wdm_manage_power, false);
err:
return rv;
@@ -925,7 +1038,7 @@ struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
{
int rv = -EINVAL;
- rv = wdm_create(intf, ep, bufsize, manage_power);
+ rv = wdm_create(intf, ep, bufsize, manage_power, true);
if (rv < 0)
goto err;
@@ -967,7 +1080,7 @@ static void wdm_disconnect(struct usb_interface *intf)
if (!desc->count)
cleanup(desc);
else
- dev_dbg(&intf->dev, "%s: %d open files - postponing cleanup\n", __func__, desc->count);
+ dev_dbg(&intf->dev, "%d open files - postponing cleanup\n", desc->count);
mutex_unlock(&wdm_mutex);
}
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 917a55c4480d..a6c1fae7d52a 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -141,6 +141,7 @@ static void usbtmc_delete(struct kref *kref)
struct usbtmc_device_data *data = to_usbtmc_data(kref);
usb_put_dev(data->usb_dev);
+ kfree(data);
}
static int usbtmc_open(struct inode *inode, struct file *filp)
@@ -1379,7 +1380,7 @@ static int usbtmc_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "%s called\n", __func__);
- data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1467,10 +1468,8 @@ static int usbtmc_probe(struct usb_interface *intf,
if (data->iin_ep_present) {
/* allocate int urb */
data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!data->iin_urb) {
- dev_err(&intf->dev, "Failed to allocate int urb\n");
+ if (!data->iin_urb)
goto error_register;
- }
/* will reference data in int urb */
kref_get(&data->kref);
@@ -1478,10 +1477,8 @@ static int usbtmc_probe(struct usb_interface *intf,
/* allocate buffer for interrupt in */
data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
GFP_KERNEL);
- if (!data->iin_buffer) {
- dev_err(&intf->dev, "Failed to allocate int buf\n");
+ if (!data->iin_buffer)
goto error_register;
- }
/* fill interrupt urb */
usb_fill_int_urb(data->iin_urb, data->usb_dev,
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 01c0c0477a9e..8b317702d761 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -21,13 +21,13 @@
int ulpi_read(struct ulpi *ulpi, u8 addr)
{
- return ulpi->ops->read(ulpi->ops, addr);
+ return ulpi->ops->read(ulpi->dev.parent, addr);
}
EXPORT_SYMBOL_GPL(ulpi_read);
int ulpi_write(struct ulpi *ulpi, u8 addr, u8 val)
{
- return ulpi->ops->write(ulpi->ops, addr, val);
+ return ulpi->ops->write(ulpi->dev.parent, addr, val);
}
EXPORT_SYMBOL_GPL(ulpi_write);
@@ -127,16 +127,17 @@ static struct device_type ulpi_dev_type = {
*
* Registers a driver with the ULPI bus.
*/
-int ulpi_register_driver(struct ulpi_driver *drv)
+int __ulpi_register_driver(struct ulpi_driver *drv, struct module *module)
{
if (!drv->probe)
return -EINVAL;
+ drv->driver.owner = module;
drv->driver.bus = &ulpi_bus;
return driver_register(&drv->driver);
}
-EXPORT_SYMBOL_GPL(ulpi_register_driver);
+EXPORT_SYMBOL_GPL(__ulpi_register_driver);
/**
* ulpi_unregister_driver - unregister a driver with the ULPI bus
@@ -156,6 +157,8 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
{
int ret;
+ ulpi->dev.parent = dev; /* needed early for ops */
+
/* Test the interface */
ret = ulpi_write(ulpi, ULPI_SCRATCH, 0xaa);
if (ret < 0)
@@ -174,7 +177,6 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
ulpi->id.product = ulpi_read(ulpi, ULPI_PRODUCT_ID_LOW);
ulpi->id.product |= ulpi_read(ulpi, ULPI_PRODUCT_ID_HIGH) << 8;
- ulpi->dev.parent = dev;
ulpi->dev.bus = &ulpi_bus;
ulpi->dev.type = &ulpi_dev_type;
dev_set_name(&ulpi->dev, "%s.ulpi", dev_name(dev));
@@ -201,7 +203,8 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
* Allocates and registers a ULPI device and an interface for it. Called from
* the USB controller that provides the ULPI interface.
*/
-struct ulpi *ulpi_register_interface(struct device *dev, struct ulpi_ops *ops)
+struct ulpi *ulpi_register_interface(struct device *dev,
+ const struct ulpi_ops *ops)
{
struct ulpi *ulpi;
int ret;
@@ -211,7 +214,6 @@ struct ulpi *ulpi_register_interface(struct device *dev, struct ulpi_ops *ops)
return ERR_PTR(-ENOMEM);
ulpi->ops = ops;
- ops->dev = dev;
ret = ulpi_register(dev, ulpi);
if (ret) {
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index dd280108758f..0e5a889742b3 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -83,23 +83,10 @@ config USB_OTG_FSM
Implements OTG Finite State Machine as specified in On-The-Go
and Embedded Host Supplement to the USB Revision 2.0 Specification.
-config USB_ULPI_BUS
- tristate "USB ULPI PHY interface support"
- depends on USB_SUPPORT
+config USB_LEDS_TRIGGER_USBPORT
+ tristate "USB port LED trigger"
+ depends on USB && LEDS_TRIGGERS
help
- UTMI+ Low Pin Interface (ULPI) is specification for a commonly used
- USB 2.0 PHY interface. The ULPI specification defines a standard set
- of registers that can be used to detect the vendor and product which
- allows ULPI to be handled as a bus. This module is the driver for that
- bus.
-
- The ULPI interfaces (the buses) are registered by the drivers for USB
- controllers which support ULPI register access and have ULPI PHY
- attached to them. The ULPI PHY drivers themselves are normal PHY
- drivers.
-
- ULPI PHYs provide often functions such as ADP sensing/probing (OTG
- protocol) and USB charger detection.
-
- To compile this driver as a module, choose M here: the module will
- be called ulpi.
+ This driver allows LEDs to be controlled by USB events. Enabling this
+ trigger allows specifying list of USB ports that should turn on LED
+ when some USB device gets connected.
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index 9780877010b4..b99b871c4b9d 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -5,9 +5,12 @@
usbcore-y := usb.o hub.o hcd.o urb.o message.o driver.o
usbcore-y += config.o file.o buffer.o sysfs.o endpoint.o
usbcore-y += devio.o notify.o generic.o quirks.o devices.o
-usbcore-y += port.o of.o
+usbcore-y += port.o
+usbcore-$(CONFIG_OF) += of.o
usbcore-$(CONFIG_PCI) += hcd-pci.o
usbcore-$(CONFIG_ACPI) += usb-acpi.o
obj-$(CONFIG_USB) += usbcore.o
+
+obj-$(CONFIG_USB_LEDS_TRIGGER_USBPORT) += ledtrig-usbport.o
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index d2e3f655c26f..479e223f9cff 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -46,6 +46,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/phy.h>
+#include <linux/usb/otg.h>
#include "usb.h"
@@ -2517,10 +2518,8 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
struct usb_hcd *hcd;
hcd = kzalloc(sizeof(*hcd) + driver->hcd_priv_size, GFP_KERNEL);
- if (!hcd) {
- dev_dbg (dev, "hcd alloc failed\n");
+ if (!hcd)
return NULL;
- }
if (primary_hcd == NULL) {
hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex),
GFP_KERNEL);
@@ -3033,7 +3032,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
+#if IS_ENABLED(CONFIG_USB_MON)
const struct usb_mon_operations *mon_ops;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1d5fc32d06d0..cbb146736f57 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1823,10 +1823,8 @@ descriptor_error:
dev_info(&intf->dev, "USB hub found\n");
hub = kzalloc(sizeof(*hub), GFP_KERNEL);
- if (!hub) {
- dev_dbg(&intf->dev, "couldn't kmalloc hub struct\n");
+ if (!hub)
return -ENOMEM;
- }
kref_init(&hub->kref);
hub->intfdev = &intf->dev;
@@ -3106,7 +3104,7 @@ static int usb_disable_remote_wakeup(struct usb_device *udev)
USB_CTRL_SET_TIMEOUT);
else
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE,
+ USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE,
USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
@@ -5337,11 +5335,10 @@ static int descriptors_changed(struct usb_device *udev,
}
buf = kmalloc(len, GFP_NOIO);
- if (buf == NULL) {
- dev_err(&udev->dev, "no mem to re-read configs after reset\n");
+ if (!buf)
/* assume the worst */
return 1;
- }
+
for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
new file mode 100644
index 000000000000..3ed5162677ad
--- /dev/null
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -0,0 +1,314 @@
+/*
+ * USB port LED trigger
+ *
+ * Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+struct usbport_trig_data {
+ struct led_classdev *led_cdev;
+ struct list_head ports;
+ struct notifier_block nb;
+ int count; /* Amount of connected matching devices */
+};
+
+struct usbport_trig_port {
+ struct usbport_trig_data *data;
+ struct usb_device *hub;
+ int portnum;
+ char *port_name;
+ bool observed;
+ struct device_attribute attr;
+ struct list_head list;
+};
+
+/***************************************
+ * Helpers
+ ***************************************/
+
+/**
+ * usbport_trig_usb_dev_observed - Check if dev is connected to observed port
+ */
+static bool usbport_trig_usb_dev_observed(struct usbport_trig_data *usbport_data,
+ struct usb_device *usb_dev)
+{
+ struct usbport_trig_port *port;
+
+ if (!usb_dev->parent)
+ return false;
+
+ list_for_each_entry(port, &usbport_data->ports, list) {
+ if (usb_dev->parent == port->hub &&
+ usb_dev->portnum == port->portnum)
+ return port->observed;
+ }
+
+ return false;
+}
+
+static int usbport_trig_usb_dev_check(struct usb_device *usb_dev, void *data)
+{
+ struct usbport_trig_data *usbport_data = data;
+
+ if (usbport_trig_usb_dev_observed(usbport_data, usb_dev))
+ usbport_data->count++;
+
+ return 0;
+}
+
+/**
+ * usbport_trig_update_count - Recalculate amount of connected matching devices
+ */
+static void usbport_trig_update_count(struct usbport_trig_data *usbport_data)
+{
+ struct led_classdev *led_cdev = usbport_data->led_cdev;
+
+ usbport_data->count = 0;
+ usb_for_each_dev(usbport_data, usbport_trig_usb_dev_check);
+ led_cdev->brightness_set(led_cdev,
+ usbport_data->count ? LED_FULL : LED_OFF);
+}
+
+/***************************************
+ * Device attr
+ ***************************************/
+
+static ssize_t usbport_trig_port_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbport_trig_port *port = container_of(attr,
+ struct usbport_trig_port,
+ attr);
+
+ return sprintf(buf, "%d\n", port->observed) + 1;
+}
+
+static ssize_t usbport_trig_port_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct usbport_trig_port *port = container_of(attr,
+ struct usbport_trig_port,
+ attr);
+
+ if (!strcmp(buf, "0") || !strcmp(buf, "0\n"))
+ port->observed = 0;
+ else if (!strcmp(buf, "1") || !strcmp(buf, "1\n"))
+ port->observed = 1;
+ else
+ return -EINVAL;
+
+ usbport_trig_update_count(port->data);
+
+ return size;
+}
+
+static struct attribute *ports_attrs[] = {
+ NULL,
+};
+static const struct attribute_group ports_group = {
+ .name = "ports",
+ .attrs = ports_attrs,
+};
+
+/***************************************
+ * Adding & removing ports
+ ***************************************/
+
+static int usbport_trig_add_port(struct usbport_trig_data *usbport_data,
+ struct usb_device *usb_dev,
+ const char *hub_name, int portnum)
+{
+ struct led_classdev *led_cdev = usbport_data->led_cdev;
+ struct usbport_trig_port *port;
+ size_t len;
+ int err;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ port->data = usbport_data;
+ port->hub = usb_dev;
+ port->portnum = portnum;
+
+ len = strlen(hub_name) + 8;
+ port->port_name = kzalloc(len, GFP_KERNEL);
+ if (!port->port_name) {
+ err = -ENOMEM;
+ goto err_free_port;
+ }
+ snprintf(port->port_name, len, "%s-port%d", hub_name, portnum);
+
+ port->attr.attr.name = port->port_name;
+ port->attr.attr.mode = S_IRUSR | S_IWUSR;
+ port->attr.show = usbport_trig_port_show;
+ port->attr.store = usbport_trig_port_store;
+
+ err = sysfs_add_file_to_group(&led_cdev->dev->kobj, &port->attr.attr,
+ ports_group.name);
+ if (err)
+ goto err_free_port_name;
+
+ list_add_tail(&port->list, &usbport_data->ports);
+
+ return 0;
+
+err_free_port_name:
+ kfree(port->port_name);
+err_free_port:
+ kfree(port);
+err_out:
+ return err;
+}
+
+static int usbport_trig_add_usb_dev_ports(struct usb_device *usb_dev,
+ void *data)
+{
+ struct usbport_trig_data *usbport_data = data;
+ int i;
+
+ for (i = 1; i <= usb_dev->maxchild; i++)
+ usbport_trig_add_port(usbport_data, usb_dev,
+ dev_name(&usb_dev->dev), i);
+
+ return 0;
+}
+
+static void usbport_trig_remove_port(struct usbport_trig_data *usbport_data,
+ struct usbport_trig_port *port)
+{
+ struct led_classdev *led_cdev = usbport_data->led_cdev;
+
+ list_del(&port->list);
+ sysfs_remove_file_from_group(&led_cdev->dev->kobj, &port->attr.attr,
+ ports_group.name);
+ kfree(port->port_name);
+ kfree(port);
+}
+
+static void usbport_trig_remove_usb_dev_ports(struct usbport_trig_data *usbport_data,
+ struct usb_device *usb_dev)
+{
+ struct usbport_trig_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &usbport_data->ports, list) {
+ if (port->hub == usb_dev)
+ usbport_trig_remove_port(usbport_data, port);
+ }
+}
+
+/***************************************
+ * Init, exit, etc.
+ ***************************************/
+
+static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct usbport_trig_data *usbport_data =
+ container_of(nb, struct usbport_trig_data, nb);
+ struct led_classdev *led_cdev = usbport_data->led_cdev;
+ struct usb_device *usb_dev = data;
+ bool observed;
+
+ observed = usbport_trig_usb_dev_observed(usbport_data, usb_dev);
+
+ switch (action) {
+ case USB_DEVICE_ADD:
+ usbport_trig_add_usb_dev_ports(usb_dev, usbport_data);
+ if (observed && usbport_data->count++ == 0)
+ led_cdev->brightness_set(led_cdev, LED_FULL);
+ return NOTIFY_OK;
+ case USB_DEVICE_REMOVE:
+ usbport_trig_remove_usb_dev_ports(usbport_data, usb_dev);
+ if (observed && --usbport_data->count == 0)
+ led_cdev->brightness_set(led_cdev, LED_OFF);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void usbport_trig_activate(struct led_classdev *led_cdev)
+{
+ struct usbport_trig_data *usbport_data;
+ int err;
+
+ usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
+ if (!usbport_data)
+ return;
+ usbport_data->led_cdev = led_cdev;
+
+ /* List of ports */
+ INIT_LIST_HEAD(&usbport_data->ports);
+ err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
+ if (err)
+ goto err_free;
+ usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
+
+ /* Notifications */
+ usbport_data->nb.notifier_call = usbport_trig_notify,
+ led_cdev->trigger_data = usbport_data;
+ usb_register_notify(&usbport_data->nb);
+
+ led_cdev->activated = true;
+ return;
+
+err_free:
+ kfree(usbport_data);
+}
+
+static void usbport_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct usbport_trig_data *usbport_data = led_cdev->trigger_data;
+ struct usbport_trig_port *port, *tmp;
+
+ if (!led_cdev->activated)
+ return;
+
+ list_for_each_entry_safe(port, tmp, &usbport_data->ports, list) {
+ usbport_trig_remove_port(usbport_data, port);
+ }
+
+ usb_unregister_notify(&usbport_data->nb);
+
+ sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
+
+ kfree(usbport_data);
+
+ led_cdev->activated = false;
+}
+
+static struct led_trigger usbport_led_trigger = {
+ .name = "usbport",
+ .activate = usbport_trig_activate,
+ .deactivate = usbport_trig_deactivate,
+};
+
+static int __init usbport_trig_init(void)
+{
+ return led_trigger_register(&usbport_led_trigger);
+}
+
+static void __exit usbport_trig_exit(void)
+{
+ led_trigger_unregister(&usbport_led_trigger);
+}
+
+module_init(usbport_trig_init);
+module_exit(usbport_trig_exit);
+
+MODULE_AUTHOR("Rafał Miłecki <rafal@milecki.pl>");
+MODULE_DESCRIPTION("USB port trigger");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 0406a59f0551..3a4707746157 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1760,17 +1760,14 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
nintf = cp->desc.bNumInterfaces;
new_interfaces = kmalloc(nintf * sizeof(*new_interfaces),
GFP_NOIO);
- if (!new_interfaces) {
- dev_err(&dev->dev, "Out of memory\n");
+ if (!new_interfaces)
return -ENOMEM;
- }
for (; n < nintf; ++n) {
new_interfaces[n] = kzalloc(
sizeof(struct usb_interface),
GFP_NOIO);
if (!new_interfaces[n]) {
- dev_err(&dev->dev, "Out of memory\n");
ret = -ENOMEM;
free_interfaces:
while (--n >= 0)
@@ -1862,7 +1859,12 @@ free_interfaces:
intf->dev.bus = &usb_bus_type;
intf->dev.type = &usb_if_device_type;
intf->dev.groups = usb_interface_groups;
+ /*
+ * Please refer to usb_alloc_dev() to see why we set
+ * dma_mask and dma_pfn_offset.
+ */
intf->dev.dma_mask = dev->dev.dma_mask;
+ intf->dev.dma_pfn_offset = dev->dev.dma_pfn_offset;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
intf->minor = -1;
device_initialize(&intf->dev);
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index 2289700c31d6..3de4f8873984 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -18,6 +18,7 @@
*/
#include <linux/of.h>
+#include <linux/usb/of.h>
/**
* usb_of_get_child_node - Find the device node match port number
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index a95b0c989c21..085049d37d7a 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -38,7 +38,7 @@ static struct usb_device_id whitelist_table[] = {
{ USB_DEVICE(0x0525, 0xa4a2), },
#endif
-#if defined(CONFIG_USB_TEST) || defined(CONFIG_USB_TEST_MODULE)
+#if IS_ENABLED(CONFIG_USB_TEST)
/* gadget zero, for testing */
{ USB_DEVICE(0x0525, 0xa4a0), },
#endif
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index c601e25b609f..a9039696476e 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -68,10 +68,8 @@ struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
urb = kmalloc(sizeof(struct urb) +
iso_packets * sizeof(struct usb_iso_packet_descriptor),
mem_flags);
- if (!urb) {
- printk(KERN_ERR "alloc_urb: kmalloc failed\n");
+ if (!urb)
return NULL;
- }
usb_init_urb(urb);
return urb;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 5e80697ef952..592151461017 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -440,7 +440,18 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
dev->dev.bus = &usb_bus_type;
dev->dev.type = &usb_device_type;
dev->dev.groups = usb_device_groups;
+ /*
+ * Fake a dma_mask/offset for the USB device:
+ * We cannot really use the dma-mapping API (dma_alloc_* and
+ * dma_map_*) for USB devices but instead need to use
+ * usb_alloc_coherent and pass data in 'urb's, but some subsystems
+ * manually look into the mask/offset pair to determine whether
+ * they need bounce buffers.
+ * Note: calling dma_set_mask() on a USB device would set the
+ * mask for the entire HCD, so don't do that.
+ */
dev->dev.dma_mask = bus->controller->dma_mask;
+ dev->dev.dma_pfn_offset = bus->controller->dma_pfn_offset;
set_dev_node(&dev->dev, dev_to_node(bus->controller));
dev->state = USB_STATE_ATTACHED;
dev->lpm_disable_count = 1;
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 4135a5ff67ca..fa9b26b91507 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -238,6 +238,77 @@ int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
return ret;
}
+/**
+ * dwc2_wait_for_mode() - Waits for the controller mode.
+ * @hsotg: Programming view of the DWC_otg controller.
+ * @host_mode: If true, waits for host mode, otherwise device mode.
+ */
+static void dwc2_wait_for_mode(struct dwc2_hsotg *hsotg,
+ bool host_mode)
+{
+ ktime_t start;
+ ktime_t end;
+ unsigned int timeout = 110;
+
+ dev_vdbg(hsotg->dev, "Waiting for %s mode\n",
+ host_mode ? "host" : "device");
+
+ start = ktime_get();
+
+ while (1) {
+ s64 ms;
+
+ if (dwc2_is_host_mode(hsotg) == host_mode) {
+ dev_vdbg(hsotg->dev, "%s mode set\n",
+ host_mode ? "Host" : "Device");
+ break;
+ }
+
+ end = ktime_get();
+ ms = ktime_to_ms(ktime_sub(end, start));
+
+ if (ms >= (s64)timeout) {
+ dev_warn(hsotg->dev, "%s: Couldn't set %s mode\n",
+ __func__, host_mode ? "host" : "device");
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ }
+}
+
+/**
+ * dwc2_iddig_filter_enabled() - Returns true if the IDDIG debounce
+ * filter is enabled.
+ */
+static bool dwc2_iddig_filter_enabled(struct dwc2_hsotg *hsotg)
+{
+ u32 gsnpsid;
+ u32 ghwcfg4;
+
+ if (!dwc2_hw_is_otg(hsotg))
+ return false;
+
+ /* Check if core configuration includes the IDDIG filter. */
+ ghwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
+ if (!(ghwcfg4 & GHWCFG4_IDDIG_FILT_EN))
+ return false;
+
+ /*
+ * Check if the IDDIG debounce filter is bypassed. Available
+ * in core version >= 3.10a.
+ */
+ gsnpsid = dwc2_readl(hsotg->regs + GSNPSID);
+ if (gsnpsid >= DWC2_CORE_REV_3_10a) {
+ u32 gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+
+ if (gotgctl & GOTGCTL_DBNCE_FLTR_BYPASS)
+ return false;
+ }
+
+ return true;
+}
+
/*
* Do core a soft reset of the core. Be careful with this because it
* resets all the internal state machines of the core.
@@ -246,9 +317,30 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg)
{
u32 greset;
int count = 0;
+ bool wait_for_host_mode = false;
dev_vdbg(hsotg->dev, "%s()\n", __func__);
+ /*
+ * If the current mode is host, either due to the force mode
+ * bit being set (which persists after core reset) or the
+ * connector id pin, a core soft reset will temporarily reset
+ * the mode to device. A delay from the IDDIG debounce filter
+ * will occur before going back to host mode.
+ *
+ * Determine whether we will go back into host mode after a
+ * reset and account for this delay after the reset.
+ */
+ if (dwc2_iddig_filter_enabled(hsotg)) {
+ u32 gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ u32 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+
+ if (!(gotgctl & GOTGCTL_CONID_B) ||
+ (gusbcfg & GUSBCFG_FORCEHOSTMODE)) {
+ wait_for_host_mode = true;
+ }
+ }
+
/* Core Soft Reset */
greset = dwc2_readl(hsotg->regs + GRSTCTL);
greset |= GRSTCTL_CSFTRST;
@@ -277,6 +369,9 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg)
}
} while (!(greset & GRSTCTL_AHBIDLE));
+ if (wait_for_host_mode)
+ dwc2_wait_for_mode(hsotg, true);
+
return 0;
}
@@ -300,9 +395,9 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg)
* Checks are done in this function to determine whether doing a force
* would be valid or not.
*
- * If a force is done, it requires a 25ms delay to take effect.
- *
- * Returns true if the mode was forced.
+ * If a force is done, it requires a IDDIG debounce filter delay if
+ * the filter is configured and enabled. We poll the current mode of
+ * the controller to account for this delay.
*/
static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
{
@@ -337,12 +432,18 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
gusbcfg |= set;
dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
- msleep(25);
+ dwc2_wait_for_mode(hsotg, host);
return true;
}
-/*
- * Clears the force mode bits.
+/**
+ * dwc2_clear_force_mode() - Clears the force mode bits.
+ *
+ * After clearing the bits, wait up to 100 ms to account for any
+ * potential IDDIG filter delay. We can't know if we expect this delay
+ * or not because the value of the connector ID status is affected by
+ * the force mode. We only need to call this once during probe if
+ * dr_mode == OTG.
*/
static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
{
@@ -353,11 +454,8 @@ static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
- /*
- * NOTE: This long sleep is _very_ important, otherwise the core will
- * not stay in host mode after a connector ID change!
- */
- msleep(25);
+ if (dwc2_iddig_filter_enabled(hsotg))
+ usleep_range(100000, 110000);
}
/*
@@ -380,12 +478,6 @@ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
__func__, hsotg->dr_mode);
break;
}
-
- /*
- * NOTE: This is required for some rockchip soc based
- * platforms.
- */
- msleep(50);
}
/*
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index d64551243789..aad4107ef927 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -259,13 +259,6 @@ enum dwc2_lx_state {
DWC2_L3, /* Off state */
};
-/*
- * Gadget periodic tx fifo sizes as used by legacy driver
- * EP0 is not included
- */
-#define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \
- 768, 0, 0, 0, 0, 0, 0, 0}
-
/* Gadget ep0 states */
enum dwc2_ep0_state {
DWC2_EP0_SETUP,
@@ -890,6 +883,7 @@ struct dwc2_hsotg {
#define DWC2_CORE_REV_2_92a 0x4f54292a
#define DWC2_CORE_REV_2_94a 0x4f54294a
#define DWC2_CORE_REV_3_00a 0x4f54300a
+#define DWC2_CORE_REV_3_10a 0x4f54310a
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
union dwc2_hcd_internal_flags {
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index af46adfae41c..4cd6403a7566 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -186,9 +186,10 @@ static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
*/
static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
{
- unsigned int ep;
+ unsigned int fifo;
unsigned int addr;
int timeout;
+ u32 dptxfsizn;
u32 val;
/* Reset fifo map if not correctly cleared during previous session */
@@ -216,16 +217,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
* them to endpoints dynamically according to maxpacket size value of
* given endpoint.
*/
- for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
- if (!hsotg->g_tx_fifo_sz[ep])
- continue;
- val = addr;
- val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
- WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
- "insufficient fifo memory");
- addr += hsotg->g_tx_fifo_sz[ep];
+ for (fifo = 1; fifo < MAX_EPS_CHANNELS; fifo++) {
+ dptxfsizn = dwc2_readl(hsotg->regs + DPTXFSIZN(fifo));
+
+ val = (dptxfsizn & FIFOSIZE_DEPTH_MASK) | addr;
+ addr += dptxfsizn >> FIFOSIZE_DEPTH_SHIFT;
+
+ if (addr > hsotg->fifo_mem)
+ break;
- dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
+ dwc2_writel(val, hsotg->regs + DPTXFSIZN(fifo));
}
/*
@@ -388,7 +389,8 @@ static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
return -ENOSPC;
}
} else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
- can_write = dwc2_readl(hsotg->regs + DTXFSTS(hs_ep->index));
+ can_write = dwc2_readl(hsotg->regs +
+ DTXFSTS(hs_ep->fifo_index));
can_write &= 0xffff;
can_write *= 4;
@@ -2432,7 +2434,7 @@ static void kill_all_requests(struct dwc2_hsotg *hsotg,
if (!hsotg->dedicated_fifos)
return;
- size = (dwc2_readl(hsotg->regs + DTXFSTS(ep->index)) & 0xffff) * 4;
+ size = (dwc2_readl(hsotg->regs + DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
if (size < ep->fifo_size)
dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
}
@@ -3041,22 +3043,11 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
break;
}
- /* If fifo is already allocated for this ep */
- if (hs_ep->fifo_index) {
- size = hs_ep->ep.maxpacket * hs_ep->mc;
- /* If bigger fifo is required deallocate current one */
- if (size > hs_ep->fifo_size) {
- hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
- hs_ep->fifo_index = 0;
- hs_ep->fifo_size = 0;
- }
- }
-
/*
* if the hardware has dedicated fifos, we must give each IN EP
* a unique tx-fifo even if it is non-periodic.
*/
- if (dir_in && hsotg->dedicated_fifos && !hs_ep->fifo_index) {
+ if (dir_in && hsotg->dedicated_fifos) {
u32 fifo_index = 0;
u32 fifo_size = UINT_MAX;
size = hs_ep->ep.maxpacket*hs_ep->mc;
@@ -3129,10 +3120,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
spin_lock_irqsave(&hsotg->lock, flags);
- hsotg->fifo_map &= ~(1<<hs_ep->fifo_index);
- hs_ep->fifo_index = 0;
- hs_ep->fifo_size = 0;
-
ctrl = dwc2_readl(hsotg->regs + epctrl_reg);
ctrl &= ~DXEPCTL_EPENA;
ctrl &= ~DXEPCTL_USBACTEP;
@@ -3147,6 +3134,10 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
/* terminate all requests with shutdown */
kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
+ hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
+ hs_ep->fifo_index = 0;
+ hs_ep->fifo_size = 0;
+
spin_unlock_irqrestore(&hsotg->lock, flags);
return 0;
}
@@ -3475,8 +3466,11 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
spin_lock_irqsave(&hsotg->lock, flags);
- dwc2_hsotg_init(hsotg);
- dwc2_hsotg_core_init_disconnected(hsotg, false);
+ if (dwc2_hw_is_device(hsotg)) {
+ dwc2_hsotg_init(hsotg);
+ dwc2_hsotg_core_init_disconnected(hsotg, false);
+ }
+
hsotg->enabled = 0;
spin_unlock_irqrestore(&hsotg->lock, flags);
@@ -3813,36 +3807,10 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg)
{
struct device_node *np = hsotg->dev->of_node;
- u32 len = 0;
- u32 i = 0;
/* Enable dma if requested in device tree */
hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
- /*
- * Register TX periodic fifo size per endpoint.
- * EP0 is excluded since it has no fifo configuration.
- */
- if (!of_find_property(np, "g-tx-fifo-size", &len))
- goto rx_fifo;
-
- len /= sizeof(u32);
-
- /* Read tx fifo sizes other than ep0 */
- if (of_property_read_u32_array(np, "g-tx-fifo-size",
- &hsotg->g_tx_fifo_sz[1], len))
- goto rx_fifo;
-
- /* Add ep0 */
- len++;
-
- /* Make remaining TX fifos unavailable */
- if (len < MAX_EPS_CHANNELS) {
- for (i = len; i < MAX_EPS_CHANNELS; i++)
- hsotg->g_tx_fifo_sz[i] = 0;
- }
-
-rx_fifo:
/* Register RX fifo size */
of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
@@ -3864,13 +3832,10 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
struct device *dev = hsotg->dev;
int epnum;
int ret;
- int i;
- u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
/* Initialize to legacy fifo configuration values */
hsotg->g_rx_fifo_sz = 2048;
hsotg->g_np_g_tx_fifo_sz = 1024;
- memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
/* Device tree specific probe */
dwc2_hsotg_of_probe(hsotg);
@@ -3888,9 +3853,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
hsotg->g_np_g_tx_fifo_sz);
dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
- for (i = 0; i < MAX_EPS_CHANNELS; i++)
- dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
- hsotg->g_tx_fifo_sz[i]);
hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
@@ -3908,17 +3870,13 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
- if (!hsotg->ctrl_buff) {
- dev_err(dev, "failed to allocate ctrl request buff\n");
+ if (!hsotg->ctrl_buff)
return -ENOMEM;
- }
hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
- if (!hsotg->ep0_buff) {
- dev_err(dev, "failed to allocate ctrl reply buff\n");
+ if (!hsotg->ep0_buff)
return -ENOMEM;
- }
ret = devm_request_irq(hsotg->dev, irq, dwc2_hsotg_irq, IRQF_SHARED,
dev_name(hsotg->dev), hsotg);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 2df3d04d26f5..df5a06578005 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -5040,7 +5040,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
/* Create new workqueue and init work */
retval = -ENOMEM;
- hsotg->wq_otg = create_singlethread_workqueue("dwc2");
+ hsotg->wq_otg = alloc_ordered_workqueue("dwc2", 0);
if (!hsotg->wq_otg) {
dev_err(hsotg->dev, "Failed to create workqueue\n");
goto error2;
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index efc3bcde2822..91058441e62a 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -48,6 +48,7 @@
#define GOTGCTL_ASESVLD (1 << 18)
#define GOTGCTL_DBNC_SHORT (1 << 17)
#define GOTGCTL_CONID_B (1 << 16)
+#define GOTGCTL_DBNCE_FLTR_BYPASS (1 << 15)
#define GOTGCTL_DEVHNPEN (1 << 11)
#define GOTGCTL_HSTSETHNPEN (1 << 10)
#define GOTGCTL_HNPREQ (1 << 9)
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 530959a8a6d1..8e1728b39a49 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -182,6 +182,38 @@ static const struct dwc2_core_params params_ltq = {
.hibernation = -1,
};
+static const struct dwc2_core_params params_amlogic = {
+ .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE,
+ .otg_ver = -1,
+ .dma_enable = 1,
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = DWC2_SPEED_PARAM_HIGH,
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 512,
+ .host_nperio_tx_fifo_size = 500,
+ .host_perio_tx_fifo_size = 500,
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = 16,
+ .phy_type = DWC2_PHY_TYPE_PARAM_UTMI,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = 1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
/*
* Check the dr_mode against the module configuration and hardware
* capabilities.
@@ -486,6 +518,8 @@ static const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
{ .compatible = "snps,dwc2", .data = NULL },
{ .compatible = "samsung,s3c6400-hsotg", .data = NULL},
+ { .compatible = "amlogic,meson8b-usb", .data = &params_amlogic },
+ { .compatible = "amlogic,meson-gxbb-usb", .data = &params_amlogic },
{},
};
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index a64ce1c94d6d..b97cde76914d 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,7 +1,7 @@
config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
depends on (USB || USB_GADGET) && HAS_DMA
- select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
+ select USB_XHCI_PLATFORM if USB_XHCI_HCD
help
Say Y or M here if your system has a Dual Role SuperSpeed
USB controller based on the DesignWare USB3 IP Core.
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 35d092456bec..7287a763cd0c 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -49,6 +49,57 @@
#define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */
+/**
+ * dwc3_get_dr_mode - Validates and sets dr_mode
+ * @dwc: pointer to our context structure
+ */
+static int dwc3_get_dr_mode(struct dwc3 *dwc)
+{
+ enum usb_dr_mode mode;
+ struct device *dev = dwc->dev;
+ unsigned int hw_mode;
+
+ if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
+ dwc->dr_mode = USB_DR_MODE_OTG;
+
+ mode = dwc->dr_mode;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
+ switch (hw_mode) {
+ case DWC3_GHWPARAMS0_MODE_GADGET:
+ if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) {
+ dev_err(dev,
+ "Controller does not support host mode.\n");
+ return -EINVAL;
+ }
+ mode = USB_DR_MODE_PERIPHERAL;
+ break;
+ case DWC3_GHWPARAMS0_MODE_HOST:
+ if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
+ dev_err(dev,
+ "Controller does not support device mode.\n");
+ return -EINVAL;
+ }
+ mode = USB_DR_MODE_HOST;
+ break;
+ default:
+ if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
+ mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
+ mode = USB_DR_MODE_PERIPHERAL;
+ }
+
+ if (mode != dwc->dr_mode) {
+ dev_warn(dev,
+ "Configuration mismatch. dr_mode forced to %s\n",
+ mode == USB_DR_MODE_HOST ? "host" : "gadget");
+
+ dwc->dr_mode = mode;
+ }
+
+ return 0;
+}
+
void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
{
u32 reg;
@@ -448,6 +499,9 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->dis_u3_susphy_quirk)
reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+ if (dwc->dis_del_phy_power_chg_quirk)
+ reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE;
+
dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
@@ -485,6 +539,23 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
break;
}
+ switch (dwc->hsphy_mode) {
+ case USBPHY_INTERFACE_MODE_UTMI:
+ reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
+ DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
+ reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) |
+ DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT);
+ break;
+ case USBPHY_INTERFACE_MODE_UTMIW:
+ reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
+ DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
+ reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) |
+ DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT);
+ break;
+ default:
+ break;
+ }
+
/*
* Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to
* '0' during coreConsultant configuration. So default value will
@@ -500,6 +571,9 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->dis_enblslpm_quirk)
reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+ if (dwc->dis_u2_freeclk_exists_quirk)
+ reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS;
+
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
return 0;
@@ -666,6 +740,32 @@ static int dwc3_core_init(struct dwc3 *dwc)
goto err4;
}
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ break;
+ case USB_DR_MODE_HOST:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+ break;
+ case USB_DR_MODE_OTG:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
+ break;
+ default:
+ dev_warn(dwc->dev, "Unsupported mode %d\n", dwc->dr_mode);
+ break;
+ }
+
+ /*
+ * ENDXFER polling is available on version 3.10a and later of
+ * the DWC_usb3 controller. It is NOT available in the
+ * DWC_usb31 controller.
+ */
+ if (!dwc3_is_usb31(dwc) && dwc->revision >= DWC3_REVISION_310A) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
+ reg |= DWC3_GUCTL2_RST_ACTBITLATER;
+ dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
+ }
+
return 0;
err4:
@@ -763,7 +863,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -772,7 +871,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
}
break;
case USB_DR_MODE_HOST:
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
ret = dwc3_host_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -781,7 +879,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
}
break;
case USB_DR_MODE_OTG:
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
ret = dwc3_host_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -888,6 +985,7 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->maximum_speed = usb_get_maximum_speed(dev);
dwc->dr_mode = usb_get_dr_mode(dev);
+ dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
dwc->has_lpm_erratum = device_property_read_bool(dev,
"snps,has-lpm-erratum");
@@ -924,6 +1022,10 @@ static int dwc3_probe(struct platform_device *pdev)
"snps,dis_enblslpm_quirk");
dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev,
"snps,dis_rxdet_inp3_quirk");
+ dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev,
+ "snps,dis-u2-freeclk-exists-quirk");
+ dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev,
+ "snps,dis-del-phy-power-chg-quirk");
dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
"snps,tx_de_emphasis_quirk");
@@ -972,17 +1074,9 @@ static int dwc3_probe(struct platform_device *pdev)
goto err2;
}
- if (IS_ENABLED(CONFIG_USB_DWC3_HOST) &&
- (dwc->dr_mode == USB_DR_MODE_OTG ||
- dwc->dr_mode == USB_DR_MODE_UNKNOWN))
- dwc->dr_mode = USB_DR_MODE_HOST;
- else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET) &&
- (dwc->dr_mode == USB_DR_MODE_OTG ||
- dwc->dr_mode == USB_DR_MODE_UNKNOWN))
- dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
-
- if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
- dwc->dr_mode = USB_DR_MODE_OTG;
+ ret = dwc3_get_dr_mode(dwc);
+ if (ret)
+ goto err3;
ret = dwc3_alloc_scratch_buffers(dwc);
if (ret)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 45d6de5107c7..6b60e42626a2 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -109,6 +109,7 @@
#define DWC3_GPRTBIMAP_HS1 0xc184
#define DWC3_GPRTBIMAP_FS0 0xc188
#define DWC3_GPRTBIMAP_FS1 0xc18c
+#define DWC3_GUCTL2 0xc19c
#define DWC3_VER_NUMBER 0xc1a0
#define DWC3_VER_TYPE 0xc1a4
@@ -199,9 +200,18 @@
/* Global USB2 PHY Configuration Register */
#define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31)
+#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS (1 << 30)
#define DWC3_GUSB2PHYCFG_SUSPHY (1 << 6)
#define DWC3_GUSB2PHYCFG_ULPI_UTMI (1 << 4)
#define DWC3_GUSB2PHYCFG_ENBLSLPM (1 << 8)
+#define DWC3_GUSB2PHYCFG_PHYIF(n) (n << 3)
+#define DWC3_GUSB2PHYCFG_PHYIF_MASK DWC3_GUSB2PHYCFG_PHYIF(1)
+#define DWC3_GUSB2PHYCFG_USBTRDTIM(n) (n << 10)
+#define DWC3_GUSB2PHYCFG_USBTRDTIM_MASK DWC3_GUSB2PHYCFG_USBTRDTIM(0xf)
+#define USBTRDTIM_UTMI_8_BIT 9
+#define USBTRDTIM_UTMI_16_BIT 5
+#define UTMI_PHYIF_16_BIT 1
+#define UTMI_PHYIF_8_BIT 0
/* Global USB2 PHY Vendor Control Register */
#define DWC3_GUSB2PHYACC_NEWREGREQ (1 << 25)
@@ -235,7 +245,10 @@
#define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff)
/* Global HWPARAMS0 Register */
-#define DWC3_GHWPARAMS0_USB3_MODE(n) ((n) & 0x3)
+#define DWC3_GHWPARAMS0_MODE(n) ((n) & 0x3)
+#define DWC3_GHWPARAMS0_MODE_GADGET 0
+#define DWC3_GHWPARAMS0_MODE_HOST 1
+#define DWC3_GHWPARAMS0_MODE_DRD 2
#define DWC3_GHWPARAMS0_MBUS_TYPE(n) (((n) >> 3) & 0x7)
#define DWC3_GHWPARAMS0_SBUS_TYPE(n) (((n) >> 6) & 0x3)
#define DWC3_GHWPARAMS0_MDWIDTH(n) (((n) >> 8) & 0xff)
@@ -279,6 +292,9 @@
#define DWC3_GFLADJ_30MHZ_SDBND_SEL (1 << 7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
+/* Global User Control Register 2 */
+#define DWC3_GUCTL2_RST_ACTBITLATER (1 << 14)
+
/* Device Configuration Register */
#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
@@ -685,6 +701,8 @@ struct dwc3_hwparams {
* @request: struct usb_request to be transferred
* @list: a list_head used for request queueing
* @dep: struct dwc3_ep owning this request
+ * @sg: pointer to first incomplete sg
+ * @num_pending_sgs: counter to pending sgs
* @first_trb_index: index to first trb used by this request
* @epnum: endpoint number to which this request refers
* @trb: pointer to struct dwc3_trb
@@ -697,7 +715,9 @@ struct dwc3_request {
struct usb_request request;
struct list_head list;
struct dwc3_ep *dep;
+ struct scatterlist *sg;
+ unsigned num_pending_sgs;
u8 first_trb_index;
u8 epnum;
struct dwc3_trb *trb;
@@ -743,6 +763,9 @@ struct dwc3_scratchpad_array {
* @maximum_speed: maximum speed requested (mainly for testing purposes)
* @revision: revision register contents
* @dr_mode: requested mode of operation
+ * @hsphy_mode: UTMI phy mode, one of following:
+ * - USBPHY_INTERFACE_MODE_UTMI
+ * - USBPHY_INTERFACE_MODE_UTMIW
* @usb2_phy: pointer to USB2 PHY
* @usb3_phy: pointer to USB3 PHY
* @usb2_generic_phy: pointer to USB2 PHY
@@ -799,6 +822,11 @@ struct dwc3_scratchpad_array {
* @dis_u2_susphy_quirk: set if we disable usb2 suspend phy
* @dis_enblslpm_quirk: set if we clear enblslpm in GUSB2PHYCFG,
* disabling the suspend signal to the PHY.
+ * @dis_u2_freeclk_exists_quirk : set if we clear u2_freeclk_exists
+ * in GUSB2PHYCFG, specify that USB2 PHY doesn't
+ * provide a free-running PHY clock.
+ * @dis_del_phy_power_chg_quirk: set if we disable delay phy power
+ * change quirk.
* @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
* @tx_de_emphasis: Tx de-emphasis value
* 0 - -6dB de-emphasis
@@ -845,6 +873,7 @@ struct dwc3 {
size_t regs_size;
enum usb_dr_mode dr_mode;
+ enum usb_phy_interface hsphy_mode;
u32 fladj;
u32 irq_gadget;
@@ -880,6 +909,8 @@ struct dwc3 {
#define DWC3_REVISION_260A 0x5533260a
#define DWC3_REVISION_270A 0x5533270a
#define DWC3_REVISION_280A 0x5533280a
+#define DWC3_REVISION_300A 0x5533300a
+#define DWC3_REVISION_310A 0x5533310a
/*
* NOTICE: we're using bit 31 as a "is usb 3.1" flag. This is really
@@ -942,6 +973,8 @@ struct dwc3 {
unsigned dis_u2_susphy_quirk:1;
unsigned dis_enblslpm_quirk:1;
unsigned dis_rxdet_inp3_quirk:1;
+ unsigned dis_u2_freeclk_exists_quirk:1;
+ unsigned dis_del_phy_power_chg_quirk:1;
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index e56d59b19a0e..fe414e7a9c78 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -36,36 +36,25 @@ struct dwc3_of_simple {
int num_clocks;
};
-static int dwc3_of_simple_probe(struct platform_device *pdev)
+static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
{
- struct dwc3_of_simple *simple;
- struct device *dev = &pdev->dev;
+ struct device *dev = simple->dev;
struct device_node *np = dev->of_node;
-
- unsigned int count;
- int ret;
int i;
- simple = devm_kzalloc(dev, sizeof(*simple), GFP_KERNEL);
- if (!simple)
- return -ENOMEM;
+ simple->num_clocks = count;
- count = of_clk_get_parent_count(np);
if (!count)
- return -ENOENT;
-
- simple->num_clocks = count;
+ return 0;
simple->clks = devm_kcalloc(dev, simple->num_clocks,
sizeof(struct clk *), GFP_KERNEL);
if (!simple->clks)
return -ENOMEM;
- platform_set_drvdata(pdev, simple);
- simple->dev = dev;
-
for (i = 0; i < simple->num_clocks; i++) {
struct clk *clk;
+ int ret;
clk = of_clk_get(np, i);
if (IS_ERR(clk)) {
@@ -88,6 +77,29 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
simple->clks[i] = clk;
}
+ return 0;
+}
+
+static int dwc3_of_simple_probe(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ int ret;
+ int i;
+
+ simple = devm_kzalloc(dev, sizeof(*simple), GFP_KERNEL);
+ if (!simple)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, simple);
+ simple->dev = dev;
+
+ ret = dwc3_of_simple_clk_init(simple, of_clk_get_parent_count(np));
+ if (ret)
+ return ret;
+
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
for (i = 0; i < simple->num_clocks; i++) {
@@ -112,7 +124,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
int i;
for (i = 0; i < simple->num_clocks; i++) {
- clk_unprepare(simple->clks[i]);
+ clk_disable_unprepare(simple->clks[i]);
clk_put(simple->clks[i]);
}
@@ -162,7 +174,9 @@ static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "qcom,dwc3" },
+ { .compatible = "rockchip,rk3399-dwc3" },
{ .compatible = "xlnx,zynqmp-dwc3" },
+ { .compatible = "cavium,octeon-7130-usb-uctl" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 122e64df2f4d..07cc8929f271 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -174,15 +174,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status)
{
struct dwc3 *dwc = dep->dwc;
- int i;
- if (req->started) {
- i = 0;
- do {
- dwc3_ep_inc_deq(dep);
- } while(++i < req->request.num_mapped_sgs);
- req->started = false;
- }
+ req->started = false;
list_del(&req->list);
req->trb = NULL;
@@ -348,7 +341,8 @@ static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
* IN transfers due to a mishandled error condition. Synopsys
* STAR 9000614252.
*/
- if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
+ if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) &&
+ (dwc->gadget.speed >= USB_SPEED_SUPER))
cmd |= DWC3_DEPCMD_CLEARPENDIN;
memset(&params, 0, sizeof(params));
@@ -490,7 +484,8 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
params.param0 |= DWC3_DEPCFG_ACTION_INIT;
}
- params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
+ if (usb_endpoint_xfer_control(desc))
+ params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
@@ -764,6 +759,8 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
kfree(req);
}
+static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep);
+
/**
* dwc3_prepare_one_trb - setup one TRB from one request
* @dep: endpoint for which this request is prepared
@@ -771,15 +768,13 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
*/
static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
struct dwc3_request *req, dma_addr_t dma,
- unsigned length, unsigned last, unsigned chain, unsigned node)
+ unsigned length, unsigned chain, unsigned node)
{
struct dwc3_trb *trb;
- dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
+ dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s",
dep->name, req, (unsigned long long) dma,
- length, last ? " last" : "",
- chain ? " chain" : "");
-
+ length, chain ? " chain" : "");
trb = &dep->trb_pool[dep->trb_enqueue];
@@ -826,12 +821,10 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
/* always enable Continue on Short Packet */
trb->ctrl |= DWC3_TRB_CTRL_CSP;
- if (!req->request.no_interrupt && !chain)
+ if ((!req->request.no_interrupt && !chain) ||
+ (dwc3_calc_trbs_left(dep) == 0))
trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
- if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc))
- trb->ctrl |= DWC3_TRB_CTRL_LST;
-
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
@@ -856,12 +849,12 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
*/
static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
{
- if (!index)
- index = DWC3_TRB_NUM - 2;
- else
- index = dep->trb_enqueue - 1;
+ u8 tmp = index;
- return &dep->trb_pool[index];
+ if (!tmp)
+ tmp = DWC3_TRB_NUM - 1;
+
+ return &dep->trb_pool[tmp - 1];
}
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
@@ -894,65 +887,42 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
}
static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
- struct dwc3_request *req, unsigned int trbs_left,
- unsigned int more_coming)
+ struct dwc3_request *req)
{
- struct usb_request *request = &req->request;
- struct scatterlist *sg = request->sg;
+ struct scatterlist *sg = req->sg;
struct scatterlist *s;
- unsigned int last = false;
unsigned int length;
dma_addr_t dma;
int i;
- for_each_sg(sg, s, request->num_mapped_sgs, i) {
+ for_each_sg(sg, s, req->num_pending_sgs, i) {
unsigned chain = true;
length = sg_dma_len(s);
dma = sg_dma_address(s);
- if (sg_is_last(s)) {
- if (usb_endpoint_xfer_int(dep->endpoint.desc) ||
- !more_coming)
- last = true;
-
- chain = false;
- }
-
- if (!trbs_left--)
- last = true;
-
- if (last)
+ if (sg_is_last(s))
chain = false;
dwc3_prepare_one_trb(dep, req, dma, length,
- last, chain, i);
+ chain, i);
- if (last)
+ if (!dwc3_calc_trbs_left(dep))
break;
}
}
static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
- struct dwc3_request *req, unsigned int trbs_left,
- unsigned int more_coming)
+ struct dwc3_request *req)
{
- unsigned int last = false;
unsigned int length;
dma_addr_t dma;
dma = req->request.dma;
length = req->request.length;
- if (!trbs_left)
- last = true;
-
- /* Is this the last request? */
- if (usb_endpoint_xfer_int(dep->endpoint.desc) || !more_coming)
- last = true;
-
dwc3_prepare_one_trb(dep, req, dma, length,
- last, false, 0);
+ false, 0);
}
/*
@@ -966,26 +936,19 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
static void dwc3_prepare_trbs(struct dwc3_ep *dep)
{
struct dwc3_request *req, *n;
- unsigned int more_coming;
- u32 trbs_left;
BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
- trbs_left = dwc3_calc_trbs_left(dep);
- if (!trbs_left)
+ if (!dwc3_calc_trbs_left(dep))
return;
- more_coming = dep->allocated_requests - dep->queued_requests;
-
list_for_each_entry_safe(req, n, &dep->pending_list, list) {
- if (req->request.num_mapped_sgs > 0)
- dwc3_prepare_one_trb_sg(dep, req, trbs_left--,
- more_coming);
+ if (req->num_pending_sgs > 0)
+ dwc3_prepare_one_trb_sg(dep, req);
else
- dwc3_prepare_one_trb_linear(dep, req, trbs_left--,
- more_coming);
+ dwc3_prepare_one_trb_linear(dep, req);
- if (!trbs_left)
+ if (!dwc3_calc_trbs_left(dep))
return;
}
}
@@ -1101,93 +1064,29 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
trace_dwc3_ep_queue(req);
- /*
- * We only add to our list of requests now and
- * start consuming the list once we get XferNotReady
- * IRQ.
- *
- * That way, we avoid doing anything that we don't need
- * to do now and defer it until the point we receive a
- * particular token from the Host side.
- *
- * This will also avoid Host cancelling URBs due to too
- * many NAKs.
- */
ret = usb_gadget_map_request(&dwc->gadget, &req->request,
dep->direction);
if (ret)
return ret;
- list_add_tail(&req->list, &dep->pending_list);
-
- /*
- * If there are no pending requests and the endpoint isn't already
- * busy, we will just start the request straight away.
- *
- * This will save one IRQ (XFER_NOT_READY) and possibly make it a
- * little bit faster.
- */
- if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- !usb_endpoint_xfer_int(dep->endpoint.desc)) {
- ret = __dwc3_gadget_kick_transfer(dep, 0);
- goto out;
- }
-
- /*
- * There are a few special cases:
- *
- * 1. XferNotReady with empty list of requests. We need to kick the
- * transfer here in that situation, otherwise we will be NAKing
- * forever. If we get XferNotReady before gadget driver has a
- * chance to queue a request, we will ACK the IRQ but won't be
- * able to receive the data until the next request is queued.
- * The following code is handling exactly that.
- *
- */
- if (dep->flags & DWC3_EP_PENDING_REQUEST) {
- /*
- * If xfernotready is already elapsed and it is a case
- * of isoc transfer, then issue END TRANSFER, so that
- * you can receive xfernotready again and can have
- * notion of current microframe.
- */
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- if (list_empty(&dep->started_list)) {
- dwc3_stop_active_transfer(dwc, dep->number, true);
- dep->flags = DWC3_EP_ENABLED;
- }
- return 0;
- }
-
- ret = __dwc3_gadget_kick_transfer(dep, 0);
- if (!ret)
- dep->flags &= ~DWC3_EP_PENDING_REQUEST;
+ req->sg = req->request.sg;
+ req->num_pending_sgs = req->request.num_mapped_sgs;
- goto out;
- }
+ list_add_tail(&req->list, &dep->pending_list);
- /*
- * 2. XferInProgress on Isoc EP with an active transfer. We need to
- * kick the transfer here after queuing a request, otherwise the
- * core may not see the modified TRB(s).
- */
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- (dep->flags & DWC3_EP_BUSY) &&
- !(dep->flags & DWC3_EP_MISSED_ISOC)) {
- WARN_ON_ONCE(!dep->resource_index);
- ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index);
- goto out;
+ dep->flags & DWC3_EP_PENDING_REQUEST) {
+ if (list_empty(&dep->started_list)) {
+ dwc3_stop_active_transfer(dwc, dep->number, true);
+ dep->flags = DWC3_EP_ENABLED;
+ }
+ return 0;
}
- /*
- * 4. Stream Capable Bulk Endpoints. We need to start the transfer
- * right away, otherwise host will not know we have streams to be
- * handled.
- */
- if (dep->stream_capable)
- ret = __dwc3_gadget_kick_transfer(dep, 0);
+ if (!dwc3_calc_trbs_left(dep))
+ return 0;
-out:
+ ret = __dwc3_gadget_kick_transfer(dep, 0);
if (ret && ret != -EBUSY)
dwc3_trace(trace_dwc3_gadget,
"%s: failed to kick transfers",
@@ -1963,6 +1862,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
unsigned int trb_status;
dep->queued_requests--;
+ dwc3_ep_inc_deq(dep);
trace_dwc3_complete_trb(dep, trb);
/*
@@ -1982,6 +1882,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
return 1;
count = trb->size & DWC3_TRB_SIZE_MASK;
+ req->request.actual += count;
if (dep->direction) {
if (count) {
@@ -2021,48 +1922,51 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
if (s_pkt && !chain)
return 1;
- if ((event->status & DEPEVT_STATUS_LST) &&
- (trb->ctrl & (DWC3_TRB_CTRL_LST |
- DWC3_TRB_CTRL_HWO)))
- return 1;
+
if ((event->status & DEPEVT_STATUS_IOC) &&
(trb->ctrl & DWC3_TRB_CTRL_IOC))
return 1;
+
return 0;
}
static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
const struct dwc3_event_depevt *event, int status)
{
- struct dwc3_request *req;
+ struct dwc3_request *req, *n;
struct dwc3_trb *trb;
- unsigned int slot;
- unsigned int i;
- int count = 0;
+ bool ioc = false;
int ret;
- do {
+ list_for_each_entry_safe(req, n, &dep->started_list, list) {
+ unsigned length;
+ unsigned actual;
int chain;
- req = next_request(&dep->started_list);
- if (WARN_ON_ONCE(!req))
- return 1;
-
- chain = req->request.num_mapped_sgs > 0;
- i = 0;
- do {
- slot = req->first_trb_index + i;
- if (slot == DWC3_TRB_NUM - 1)
- slot++;
- slot %= DWC3_TRB_NUM;
- trb = &dep->trb_pool[slot];
- count += trb->size & DWC3_TRB_SIZE_MASK;
+ length = req->request.length;
+ chain = req->num_pending_sgs > 0;
+ if (chain) {
+ struct scatterlist *sg = req->sg;
+ struct scatterlist *s;
+ unsigned int pending = req->num_pending_sgs;
+ unsigned int i;
+ for_each_sg(sg, s, pending, i) {
+ trb = &dep->trb_pool[dep->trb_dequeue];
+
+ req->sg = sg_next(s);
+ req->num_pending_sgs--;
+
+ ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
+ event, status, chain);
+ if (ret)
+ break;
+ }
+ } else {
+ trb = &dep->trb_pool[dep->trb_dequeue];
ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
event, status, chain);
- if (ret)
- break;
- } while (++i < req->request.num_mapped_sgs);
+ }
/*
* We assume here we will always receive the entire data block
@@ -2071,12 +1975,21 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
* should receive and we simply bounce the request back to the
* gadget driver for further processing.
*/
- req->request.actual += req->request.length - count;
+ actual = length - req->request.actual;
+ req->request.actual = actual;
+
+ if (ret && chain && (actual < length) && req->num_pending_sgs)
+ return __dwc3_gadget_kick_transfer(dep, 0);
+
dwc3_gadget_giveback(dep, req, status);
- if (ret)
+ if (ret) {
+ if ((event->status & DEPEVT_STATUS_IOC) &&
+ (trb->ctrl & DWC3_TRB_CTRL_IOC))
+ ioc = true;
break;
- } while (1);
+ }
+ }
/*
* Our endpoint might get disabled by another thread during
@@ -2103,10 +2016,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
return 1;
}
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
- if ((event->status & DEPEVT_STATUS_IOC) &&
- (trb->ctrl & DWC3_TRB_CTRL_IOC))
- return 0;
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc)
+ return 0;
+
return 1;
}
@@ -2322,6 +2234,18 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
*
* - Issue EndTransfer WITH CMDIOC bit set
* - Wait 100us
+ *
+ * As of IP version 3.10a of the DWC_usb3 IP, the controller
+ * supports a mode to work around the above limitation. The
+ * software can poll the CMDACT bit in the DEPCMD register
+ * after issuing a EndTransfer command. This mode is enabled
+ * by writing GUCTL2[14]. This polling is already done in the
+ * dwc3_send_gadget_ep_cmd() function so if the mode is
+ * enabled, the EndTransfer command will have completed upon
+ * returning from this function and we don't need to delay for
+ * 100us.
+ *
+ * This mode is NOT available on the DWC_usb31 IP.
*/
cmd = DWC3_DEPCMD_ENDTRANSFER;
@@ -2333,7 +2257,9 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
WARN_ON_ONCE(ret);
dep->resource_index = 0;
dep->flags &= ~DWC3_EP_BUSY;
- udelay(100);
+
+ if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
+ udelay(100);
}
static void dwc3_stop_active_transfers(struct dwc3 *dwc)
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index ec004c6d76f2..bd86f84f3790 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -35,9 +35,9 @@ static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
return -ETIMEDOUT;
}
-static int dwc3_ulpi_read(struct ulpi_ops *ops, u8 addr)
+static int dwc3_ulpi_read(struct device *dev, u8 addr)
{
- struct dwc3 *dwc = dev_get_drvdata(ops->dev);
+ struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
int ret;
@@ -53,9 +53,9 @@ static int dwc3_ulpi_read(struct ulpi_ops *ops, u8 addr)
return DWC3_GUSB2PHYACC_DATA(reg);
}
-static int dwc3_ulpi_write(struct ulpi_ops *ops, u8 addr, u8 val)
+static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
{
- struct dwc3 *dwc = dev_get_drvdata(ops->dev);
+ struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
@@ -65,7 +65,7 @@ static int dwc3_ulpi_write(struct ulpi_ops *ops, u8 addr, u8 val)
return dwc3_ulpi_busyloop(dwc);
}
-static struct ulpi_ops dwc3_ulpi_ops = {
+static const struct ulpi_ops dwc3_ulpi_ops = {
.read = dwc3_ulpi_read,
.write = dwc3_ulpi_write,
};
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 3c3f31ceece7..8ad203296079 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -209,25 +209,6 @@ config USB_F_PRINTER
config USB_F_TCM
tristate
-choice
- tristate "USB Gadget Drivers"
- default USB_ETH
- help
- A Linux "Gadget Driver" talks to the USB Peripheral Controller
- driver through the abstract "gadget" API. Some other operating
- systems call these "client" drivers, of which "class drivers"
- are a subset (implementing a USB device class specification).
- A gadget driver implements one or more USB functions using
- the peripheral hardware.
-
- Gadget drivers are hardware-neutral, or "platform independent",
- except that they sometimes must understand quirks or limitations
- of the particular controllers they work with. For example, when
- a controller doesn't support alternate configurations or provide
- enough of the right types of endpoints, the gadget driver might
- not be able work with that controller, or might need to implement
- a less common variant of a device class protocol.
-
# this first set of drivers all depend on bulk-capable hardware.
config USB_CONFIGFS
@@ -439,6 +420,7 @@ config USB_CONFIGFS_F_HID
config USB_CONFIGFS_F_UVC
bool "USB Webcam function"
depends on USB_CONFIGFS
+ depends on VIDEO_V4L2
depends on VIDEO_DEV
select VIDEOBUF2_VMALLOC
select USB_F_UVC
@@ -475,6 +457,25 @@ config USB_CONFIGFS_F_TCM
Both protocols can work on USB2.0 and USB3.0.
UAS utilizes the USB 3.0 feature called streams support.
+choice
+ tristate "USB Gadget Drivers"
+ default USB_ETH
+ help
+ A Linux "Gadget Driver" talks to the USB Peripheral Controller
+ driver through the abstract "gadget" API. Some other operating
+ systems call these "client" drivers, of which "class drivers"
+ are a subset (implementing a USB device class specification).
+ A gadget driver implements one or more USB functions using
+ the peripheral hardware.
+
+ Gadget drivers are hardware-neutral, or "platform independent",
+ except that they sometimes must understand quirks or limitations
+ of the particular controllers they work with. For example, when
+ a controller doesn't support alternate configurations or provide
+ enough of the right types of endpoints, the gadget driver might
+ not be able work with that controller, or might need to implement
+ a less common variant of a device class protocol.
+
source "drivers/usb/gadget/legacy/Kconfig"
endchoice
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5ebe6af7976e..32176f779861 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1893,17 +1893,21 @@ unknown:
/* functions always handle their interfaces and endpoints...
* punt other recipients (other, WUSB, ...) to the current
* configuration code.
- *
- * REVISIT it could make sense to let the composite device
- * take such requests too, if that's ever needed: to work
- * in config 0, etc.
*/
if (cdev->config) {
list_for_each_entry(f, &cdev->config->functions, list)
- if (f->req_match && f->req_match(f, ctrl))
+ if (f->req_match &&
+ f->req_match(f, ctrl, false))
goto try_fun_setup;
- f = NULL;
+ } else {
+ struct usb_configuration *c;
+ list_for_each_entry(c, &cdev->configs, list)
+ list_for_each_entry(f, &c->functions, list)
+ if (f->req_match &&
+ f->req_match(f, ctrl, true))
+ goto try_fun_setup;
}
+ f = NULL;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index f9237fe2be05..3984787f8e97 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1211,8 +1211,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
list_move_tail(&f->list, &cfg->func_list);
if (f->unbind) {
- dev_err(&gi->cdev.gadget->dev, "unbind function"
- " '%s'/%p\n", f->name, f);
+ dev_dbg(&gi->cdev.gadget->dev,
+ "unbind function '%s'/%p\n",
+ f->name, f);
f->unbind(c, f);
}
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 5c8429f23a89..0aeed85bb5cb 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -98,6 +98,9 @@ static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
static void ffs_func_disable(struct usb_function *);
static int ffs_func_setup(struct usb_function *,
const struct usb_ctrlrequest *);
+static bool ffs_func_req_match(struct usb_function *,
+ const struct usb_ctrlrequest *,
+ bool config0);
static void ffs_func_suspend(struct usb_function *);
static void ffs_func_resume(struct usb_function *);
@@ -2243,7 +2246,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
FUNCTIONFS_HAS_SS_DESC |
FUNCTIONFS_HAS_MS_OS_DESC |
FUNCTIONFS_VIRTUAL_ADDR |
- FUNCTIONFS_EVENTFD)) {
+ FUNCTIONFS_EVENTFD |
+ FUNCTIONFS_ALL_CTRL_RECIP |
+ FUNCTIONFS_CONFIG0_SETUP)) {
ret = -ENOSYS;
goto error;
}
@@ -3094,8 +3099,9 @@ static int ffs_func_setup(struct usb_function *f,
* handle them. All other either handled by composite or
* passed to usb_configuration->setup() (if one is set). No
* matter, we will handle requests directed to endpoint here
- * as well (as it's straightforward) but what to do with any
- * other request?
+ * as well (as it's straightforward). Other request recipient
+ * types are only handled when the user flag FUNCTIONFS_ALL_CTRL_RECIP
+ * is being used.
*/
if (ffs->state != FFS_ACTIVE)
return -ENODEV;
@@ -3116,7 +3122,10 @@ static int ffs_func_setup(struct usb_function *f,
break;
default:
- return -EOPNOTSUPP;
+ if (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP)
+ ret = le16_to_cpu(creq->wIndex);
+ else
+ return -EOPNOTSUPP;
}
spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
@@ -3128,6 +3137,28 @@ static int ffs_func_setup(struct usb_function *f,
return 0;
}
+static bool ffs_func_req_match(struct usb_function *f,
+ const struct usb_ctrlrequest *creq,
+ bool config0)
+{
+ struct ffs_function *func = ffs_func_from_usb(f);
+
+ if (config0 && !(func->ffs->user_flags & FUNCTIONFS_CONFIG0_SETUP))
+ return false;
+
+ switch (creq->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_INTERFACE:
+ return ffs_func_revmap_intf(func,
+ le16_to_cpu(creq->wIndex) >= 0);
+ case USB_RECIP_ENDPOINT:
+ return ffs_func_revmap_ep(func,
+ le16_to_cpu(creq->wIndex) >= 0);
+ default:
+ return (bool) (func->ffs->user_flags &
+ FUNCTIONFS_ALL_CTRL_RECIP);
+ }
+}
+
static void ffs_func_suspend(struct usb_function *f)
{
ENTER();
@@ -3378,6 +3409,7 @@ static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
func->function.set_alt = ffs_func_set_alt;
func->function.disable = ffs_func_disable;
func->function.setup = ffs_func_setup;
+ func->function.req_match = ffs_func_req_match;
func->function.suspend = ffs_func_suspend;
func->function.resume = ffs_func_resume;
func->function.free_func = ffs_free;
@@ -3470,6 +3502,11 @@ static void _ffs_free_dev(struct ffs_dev *dev)
list_del(&dev->entry);
if (dev->name_allocated)
kfree(dev->name);
+
+ /* Clear the private_data pointer to stop incorrect dev access */
+ if (dev->ffs_data)
+ dev->ffs_data->private_data = NULL;
+
kfree(dev);
if (list_empty(&ffs_devices))
functionfs_cleanup();
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 51980c50546d..e2966f87c860 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -365,7 +365,7 @@ static int f_hidg_open(struct inode *inode, struct file *fd)
static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep,
unsigned length)
{
- return alloc_ep_req(ep, length, length);
+ return alloc_ep_req(ep, length);
}
static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
@@ -617,14 +617,10 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
/* preallocate request and buffer */
status = -ENOMEM;
- hidg->req = usb_ep_alloc_request(hidg->in_ep, GFP_KERNEL);
+ hidg->req = alloc_ep_req(hidg->in_ep, hidg->report_length);
if (!hidg->req)
goto fail;
- hidg->req->buf = kmalloc(hidg->report_length, GFP_KERNEL);
- if (!hidg->req->buf)
- goto fail;
-
/* set descriptor dynamic values */
hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
@@ -677,11 +673,8 @@ fail_free_descs:
usb_free_all_descriptors(f);
fail:
ERROR(f->config->cdev, "hidg_bind FAILED\n");
- if (hidg->req != NULL) {
- kfree(hidg->req->buf);
- if (hidg->in_ep != NULL)
- usb_ep_free_request(hidg->in_ep, hidg->req);
- }
+ if (hidg->req != NULL)
+ free_ep_req(hidg->in_ep, hidg->req);
return status;
}
@@ -809,11 +802,21 @@ end:
CONFIGFS_ATTR(f_hid_opts_, report_desc);
+static ssize_t f_hid_opts_dev_show(struct config_item *item, char *page)
+{
+ struct f_hid_opts *opts = to_f_hid_opts(item);
+
+ return sprintf(page, "%d:%d\n", major, opts->minor);
+}
+
+CONFIGFS_ATTR_RO(f_hid_opts_, dev);
+
static struct configfs_attribute *hid_attrs[] = {
&f_hid_opts_attr_subclass,
&f_hid_opts_attr_protocol,
&f_hid_opts_attr_report_length,
&f_hid_opts_attr_report_desc,
+ &f_hid_opts_attr_dev,
NULL,
};
@@ -910,8 +913,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
/* disable/free request and end point */
usb_ep_disable(hidg->in_ep);
- kfree(hidg->req->buf);
- usb_ep_free_request(hidg->in_ep, hidg->req);
+ free_ep_req(hidg->in_ep, hidg->req);
usb_free_all_descriptors(f);
}
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 3a9f8f9c77bd..e70093835e14 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -308,9 +308,7 @@ static void disable_loopback(struct f_loopback *loop)
static inline struct usb_request *lb_alloc_ep_req(struct usb_ep *ep, int len)
{
- struct f_loopback *loop = ep->driver_data;
-
- return alloc_ep_req(ep, len, loop->buflen);
+ return alloc_ep_req(ep, len);
}
static int alloc_requests(struct usb_composite_dev *cdev,
@@ -333,7 +331,7 @@ static int alloc_requests(struct usb_composite_dev *cdev,
if (!in_req)
goto fail;
- out_req = lb_alloc_ep_req(loop->out_ep, 0);
+ out_req = lb_alloc_ep_req(loop->out_ep, loop->buflen);
if (!out_req)
goto fail_in;
@@ -593,13 +591,9 @@ DECLARE_USB_FUNCTION(Loopback, loopback_alloc_instance, loopback_alloc);
int __init lb_modinit(void)
{
- int ret;
-
- ret = usb_function_register(&Loopbackusb_func);
- if (ret)
- return ret;
- return ret;
+ return usb_function_register(&Loopbackusb_func);
}
+
void __exit lb_modexit(void)
{
usb_function_unregister(&Loopbackusb_func);
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 2505117e88e8..8f3659b65f53 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -311,11 +311,7 @@ struct fsg_common {
/* Gadget's private data. */
void *private_data;
- /*
- * Vendor (8 chars), product (16 chars), release (4
- * hexadecimal digits) and NUL byte
- */
- char inquiry_string[8 + 16 + 4 + 1];
+ char inquiry_string[INQUIRY_STRING_LEN];
struct kref ref;
};
@@ -1107,7 +1103,12 @@ static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
buf[5] = 0; /* No special options */
buf[6] = 0;
buf[7] = 0;
- memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
+ if (curlun->inquiry_string[0])
+ memcpy(buf + 8, curlun->inquiry_string,
+ sizeof(curlun->inquiry_string));
+ else
+ memcpy(buf + 8, common->inquiry_string,
+ sizeof(common->inquiry_string));
return 36;
}
@@ -3209,12 +3210,27 @@ static ssize_t fsg_lun_opts_nofua_store(struct config_item *item,
CONFIGFS_ATTR(fsg_lun_opts_, nofua);
+static ssize_t fsg_lun_opts_inquiry_string_show(struct config_item *item,
+ char *page)
+{
+ return fsg_show_inquiry_string(to_fsg_lun_opts(item)->lun, page);
+}
+
+static ssize_t fsg_lun_opts_inquiry_string_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ return fsg_store_inquiry_string(to_fsg_lun_opts(item)->lun, page, len);
+}
+
+CONFIGFS_ATTR(fsg_lun_opts_, inquiry_string);
+
static struct configfs_attribute *fsg_lun_attrs[] = {
&fsg_lun_opts_attr_file,
&fsg_lun_opts_attr_ro,
&fsg_lun_opts_attr_removable,
&fsg_lun_opts_attr_cdrom,
&fsg_lun_opts_attr_nofua,
+ &fsg_lun_opts_attr_inquiry_string,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
index b6a9918eaefb..d3902313b8ac 100644
--- a/drivers/usb/gadget/function/f_mass_storage.h
+++ b/drivers/usb/gadget/function/f_mass_storage.h
@@ -100,6 +100,7 @@ struct fsg_lun_config {
char removable;
char cdrom;
char nofua;
+ char inquiry_string[INQUIRY_STRING_LEN];
};
struct fsg_config {
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 58fc199a18ec..a5719f271bf0 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -51,6 +51,19 @@ static const char f_midi_longname[] = "MIDI Gadget";
*/
#define MAX_PORTS 16
+/* MIDI message states */
+enum {
+ STATE_INITIAL = 0, /* pseudo state */
+ STATE_1PARAM,
+ STATE_2PARAM_1,
+ STATE_2PARAM_2,
+ STATE_SYSEX_0,
+ STATE_SYSEX_1,
+ STATE_SYSEX_2,
+ STATE_REAL_TIME,
+ STATE_FINISHED, /* pseudo state */
+};
+
/*
* This is a gadget, and the IN/OUT naming is from the host's perspective.
* USB -> OUT endpoint -> rawmidi
@@ -61,13 +74,6 @@ struct gmidi_in_port {
int active;
uint8_t cable;
uint8_t state;
-#define STATE_UNKNOWN 0
-#define STATE_1PARAM 1
-#define STATE_2PARAM_1 2
-#define STATE_2PARAM_2 3
-#define STATE_SYSEX_0 4
-#define STATE_SYSEX_1 5
-#define STATE_SYSEX_2 6
uint8_t data[2];
};
@@ -205,7 +211,7 @@ static struct usb_gadget_strings *midi_strings[] = {
static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep,
unsigned length)
{
- return alloc_ep_req(ep, length, length);
+ return alloc_ep_req(ep, length);
}
static const uint8_t f_midi_cin_length[] = {
@@ -299,6 +305,19 @@ f_midi_complete(struct usb_ep *ep, struct usb_request *req)
}
}
+static void f_midi_drop_out_substreams(struct f_midi *midi)
+{
+ unsigned int i;
+
+ for (i = 0; i < midi->in_ports; i++) {
+ struct gmidi_in_port *port = midi->in_ports_array + i;
+ struct snd_rawmidi_substream *substream = port->substream;
+
+ if (port->active && substream)
+ snd_rawmidi_drop_output(substream);
+ }
+}
+
static int f_midi_start_ep(struct f_midi *midi,
struct usb_function *f,
struct usb_ep *ep)
@@ -360,9 +379,8 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* allocate a bunch of read buffers and queue them all at once. */
for (i = 0; i < midi->qlen && err == 0; i++) {
struct usb_request *req =
- midi_alloc_ep_req(midi->out_ep,
- max_t(unsigned, midi->buflen,
- bulk_out_desc.wMaxPacketSize));
+ midi_alloc_ep_req(midi->out_ep, midi->buflen);
+
if (req == NULL)
return -ENOMEM;
@@ -397,6 +415,8 @@ static void f_midi_disable(struct usb_function *f)
/* release IN requests */
while (kfifo_get(&midi->in_req_fifo, &req))
free_ep_req(midi->in_ep, req);
+
+ f_midi_drop_out_substreams(midi);
}
static int f_midi_snd_free(struct snd_device *device)
@@ -404,130 +424,166 @@ static int f_midi_snd_free(struct snd_device *device)
return 0;
}
-static void f_midi_transmit_packet(struct usb_request *req, uint8_t p0,
- uint8_t p1, uint8_t p2, uint8_t p3)
-{
- unsigned length = req->length;
- u8 *buf = (u8 *)req->buf + length;
-
- buf[0] = p0;
- buf[1] = p1;
- buf[2] = p2;
- buf[3] = p3;
- req->length = length + 4;
-}
-
/*
* Converts MIDI commands to USB MIDI packets.
*/
static void f_midi_transmit_byte(struct usb_request *req,
struct gmidi_in_port *port, uint8_t b)
{
- uint8_t p0 = port->cable << 4;
+ uint8_t p[4] = { port->cable << 4, 0, 0, 0 };
+ uint8_t next_state = STATE_INITIAL;
+
+ switch (b) {
+ case 0xf8 ... 0xff:
+ /* System Real-Time Messages */
+ p[0] |= 0x0f;
+ p[1] = b;
+ next_state = port->state;
+ port->state = STATE_REAL_TIME;
+ break;
- if (b >= 0xf8) {
- f_midi_transmit_packet(req, p0 | 0x0f, b, 0, 0);
- } else if (b >= 0xf0) {
+ case 0xf7:
+ /* End of SysEx */
+ switch (port->state) {
+ case STATE_SYSEX_0:
+ p[0] |= 0x05;
+ p[1] = 0xf7;
+ next_state = STATE_FINISHED;
+ break;
+ case STATE_SYSEX_1:
+ p[0] |= 0x06;
+ p[1] = port->data[0];
+ p[2] = 0xf7;
+ next_state = STATE_FINISHED;
+ break;
+ case STATE_SYSEX_2:
+ p[0] |= 0x07;
+ p[1] = port->data[0];
+ p[2] = port->data[1];
+ p[3] = 0xf7;
+ next_state = STATE_FINISHED;
+ break;
+ default:
+ /* Ignore byte */
+ next_state = port->state;
+ port->state = STATE_INITIAL;
+ }
+ break;
+
+ case 0xf0 ... 0xf6:
+ /* System Common Messages */
+ port->data[0] = port->data[1] = 0;
+ port->state = STATE_INITIAL;
switch (b) {
case 0xf0:
port->data[0] = b;
- port->state = STATE_SYSEX_1;
+ port->data[1] = 0;
+ next_state = STATE_SYSEX_1;
break;
case 0xf1:
case 0xf3:
port->data[0] = b;
- port->state = STATE_1PARAM;
+ next_state = STATE_1PARAM;
break;
case 0xf2:
port->data[0] = b;
- port->state = STATE_2PARAM_1;
+ next_state = STATE_2PARAM_1;
break;
case 0xf4:
case 0xf5:
- port->state = STATE_UNKNOWN;
+ next_state = STATE_INITIAL;
break;
case 0xf6:
- f_midi_transmit_packet(req, p0 | 0x05, 0xf6, 0, 0);
- port->state = STATE_UNKNOWN;
- break;
- case 0xf7:
- switch (port->state) {
- case STATE_SYSEX_0:
- f_midi_transmit_packet(req,
- p0 | 0x05, 0xf7, 0, 0);
- break;
- case STATE_SYSEX_1:
- f_midi_transmit_packet(req,
- p0 | 0x06, port->data[0], 0xf7, 0);
- break;
- case STATE_SYSEX_2:
- f_midi_transmit_packet(req,
- p0 | 0x07, port->data[0],
- port->data[1], 0xf7);
- break;
- }
- port->state = STATE_UNKNOWN;
+ p[0] |= 0x05;
+ p[1] = 0xf6;
+ next_state = STATE_FINISHED;
break;
}
- } else if (b >= 0x80) {
+ break;
+
+ case 0x80 ... 0xef:
+ /*
+ * Channel Voice Messages, Channel Mode Messages
+ * and Control Change Messages.
+ */
port->data[0] = b;
+ port->data[1] = 0;
+ port->state = STATE_INITIAL;
if (b >= 0xc0 && b <= 0xdf)
- port->state = STATE_1PARAM;
+ next_state = STATE_1PARAM;
else
- port->state = STATE_2PARAM_1;
- } else { /* b < 0x80 */
+ next_state = STATE_2PARAM_1;
+ break;
+
+ case 0x00 ... 0x7f:
+ /* Message parameters */
switch (port->state) {
case STATE_1PARAM:
- if (port->data[0] < 0xf0) {
- p0 |= port->data[0] >> 4;
- } else {
- p0 |= 0x02;
- port->state = STATE_UNKNOWN;
- }
- f_midi_transmit_packet(req, p0, port->data[0], b, 0);
+ if (port->data[0] < 0xf0)
+ p[0] |= port->data[0] >> 4;
+ else
+ p[0] |= 0x02;
+
+ p[1] = port->data[0];
+ p[2] = b;
+ /* This is to allow Running State Messages */
+ next_state = STATE_1PARAM;
break;
case STATE_2PARAM_1:
port->data[1] = b;
- port->state = STATE_2PARAM_2;
+ next_state = STATE_2PARAM_2;
break;
case STATE_2PARAM_2:
- if (port->data[0] < 0xf0) {
- p0 |= port->data[0] >> 4;
- port->state = STATE_2PARAM_1;
- } else {
- p0 |= 0x03;
- port->state = STATE_UNKNOWN;
- }
- f_midi_transmit_packet(req,
- p0, port->data[0], port->data[1], b);
+ if (port->data[0] < 0xf0)
+ p[0] |= port->data[0] >> 4;
+ else
+ p[0] |= 0x03;
+
+ p[1] = port->data[0];
+ p[2] = port->data[1];
+ p[3] = b;
+ /* This is to allow Running State Messages */
+ next_state = STATE_2PARAM_1;
break;
case STATE_SYSEX_0:
port->data[0] = b;
- port->state = STATE_SYSEX_1;
+ next_state = STATE_SYSEX_1;
break;
case STATE_SYSEX_1:
port->data[1] = b;
- port->state = STATE_SYSEX_2;
+ next_state = STATE_SYSEX_2;
break;
case STATE_SYSEX_2:
- f_midi_transmit_packet(req,
- p0 | 0x04, port->data[0], port->data[1], b);
- port->state = STATE_SYSEX_0;
+ p[0] |= 0x04;
+ p[1] = port->data[0];
+ p[2] = port->data[1];
+ p[3] = b;
+ next_state = STATE_SYSEX_0;
break;
}
+ break;
}
-}
-static void f_midi_drop_out_substreams(struct f_midi *midi)
-{
- unsigned int i;
+ /* States where we have to write into the USB request */
+ if (next_state == STATE_FINISHED ||
+ port->state == STATE_SYSEX_2 ||
+ port->state == STATE_1PARAM ||
+ port->state == STATE_2PARAM_2 ||
+ port->state == STATE_REAL_TIME) {
- for (i = 0; i < midi->in_ports; i++) {
- struct gmidi_in_port *port = midi->in_ports_array + i;
- struct snd_rawmidi_substream *substream = port->substream;
- if (port->active && substream)
- snd_rawmidi_drop_output(substream);
+ unsigned int length = req->length;
+ u8 *buf = (u8 *)req->buf + length;
+
+ memcpy(buf, p, sizeof(p));
+ req->length = length + sizeof(p);
+
+ if (next_state == STATE_FINISHED) {
+ next_state = STATE_INITIAL;
+ port->data[0] = port->data[1] = 0;
+ }
}
+
+ port->state = next_state;
}
static int f_midi_do_transmit(struct f_midi *midi, struct usb_ep *ep)
@@ -642,7 +698,7 @@ static int f_midi_in_open(struct snd_rawmidi_substream *substream)
VDBG(midi, "%s()\n", __func__);
port = midi->in_ports_array + substream->number;
port->substream = substream;
- port->state = STATE_UNKNOWN;
+ port->state = STATE_INITIAL;
return 0;
}
@@ -1123,7 +1179,7 @@ static struct usb_function_instance *f_midi_alloc_inst(void)
opts->func_inst.free_func_inst = f_midi_free_inst;
opts->index = SNDRV_DEFAULT_IDX1;
opts->id = SNDRV_DEFAULT_STR1;
- opts->buflen = 256;
+ opts->buflen = 512;
opts->qlen = 32;
opts->in_ports = 1;
opts->out_ports = 1;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 97f0a9bc84df..639603722709 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -90,7 +90,9 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
/* peak (theoretical) bulk transfer rate in bits-per-second */
static inline unsigned ncm_bitrate(struct usb_gadget *g)
{
- if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 13 * 1024 * 8 * 1000 * 8;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
return 13 * 512 * 8 * 1000 * 8;
else
return 19 * 64 * 1 * 1000 * 8;
@@ -333,6 +335,76 @@ static struct usb_descriptor_header *ncm_hs_function[] = {
NULL,
};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_ncm_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS)
+};
+
+static struct usb_ss_ep_comp_descriptor ss_ncm_notify_comp_desc = {
+ .bLength = sizeof(ss_ncm_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ss_ncm_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor ss_ncm_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_ncm_bulk_comp_desc = {
+ .bLength = sizeof(ss_ncm_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ncm_ss_function[] = {
+ (struct usb_descriptor_header *) &ncm_iad_desc,
+ /* CDC NCM control descriptors */
+ (struct usb_descriptor_header *) &ncm_control_intf,
+ (struct usb_descriptor_header *) &ncm_header_desc,
+ (struct usb_descriptor_header *) &ncm_union_desc,
+ (struct usb_descriptor_header *) &ecm_desc,
+ (struct usb_descriptor_header *) &ncm_desc,
+ (struct usb_descriptor_header *) &ss_ncm_notify_desc,
+ (struct usb_descriptor_header *) &ss_ncm_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ncm_data_nop_intf,
+ (struct usb_descriptor_header *) &ncm_data_intf,
+ (struct usb_descriptor_header *) &ss_ncm_in_desc,
+ (struct usb_descriptor_header *) &ss_ncm_bulk_comp_desc,
+ (struct usb_descriptor_header *) &ss_ncm_out_desc,
+ (struct usb_descriptor_header *) &ss_ncm_bulk_comp_desc,
+ NULL,
+};
+
/* string descriptors: */
#define STRING_CTRL_IDX 0
@@ -852,6 +924,8 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
*/
ncm->port.is_zlp_ok =
gadget_is_zlp_supported(cdev->gadget);
+ ncm->port.no_skb_reserve =
+ gadget_avoids_skb_reserve(cdev->gadget);
ncm->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate ncm\n");
net = gether_connect(&ncm->port);
@@ -1431,8 +1505,13 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
hs_ncm_notify_desc.bEndpointAddress =
fs_ncm_notify_desc.bEndpointAddress;
+ ss_ncm_in_desc.bEndpointAddress = fs_ncm_in_desc.bEndpointAddress;
+ ss_ncm_out_desc.bEndpointAddress = fs_ncm_out_desc.bEndpointAddress;
+ ss_ncm_notify_desc.bEndpointAddress =
+ fs_ncm_notify_desc.bEndpointAddress;
+
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
- NULL, NULL);
+ ncm_ss_function, NULL);
if (status)
goto fail;
@@ -1450,6 +1529,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->task_timer.function = ncm_tx_timeout;
DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
ncm->port.in_ep->name, ncm->port.out_ep->name,
ncm->notify->name);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 64706a789580..0de36cda6e41 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -889,13 +889,17 @@ static void printer_soft_reset(struct printer_dev *dev)
/*-------------------------------------------------------------------------*/
static bool gprinter_req_match(struct usb_function *f,
- const struct usb_ctrlrequest *ctrl)
+ const struct usb_ctrlrequest *ctrl,
+ bool config0)
{
struct printer_dev *dev = func_to_printer(f);
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
+ if (config0)
+ return false;
+
if ((ctrl->bRequestType & USB_RECIP_MASK) != USB_RECIP_INTERFACE ||
(ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS)
return false;
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index df0189ddfdd5..8784fa12ea2c 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -293,9 +293,7 @@ static struct usb_gadget_strings *sourcesink_strings[] = {
static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
{
- struct f_sourcesink *ss = ep->driver_data;
-
- return alloc_ep_req(ep, len, ss->buflen);
+ return alloc_ep_req(ep, len);
}
static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
@@ -606,7 +604,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
} else {
ep = is_in ? ss->in_ep : ss->out_ep;
qlen = ss->bulk_qlen;
- size = 0;
+ size = ss->buflen;
}
for (i = 0; i < qlen; i++) {
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 29b41b5dee04..27ed51b5082f 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -258,6 +258,13 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
v4l2_event_queue(&uvc->vdev, &v4l2_event);
+ /* Pass additional setup data to userspace */
+ if (uvc->event_setup_out && uvc->event_length) {
+ uvc->control_req->length = uvc->event_length;
+ return usb_ep_queue(uvc->func.config->cdev->gadget->ep0,
+ uvc->control_req, GFP_ATOMIC);
+ }
+
return 0;
}
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index 990df221c629..8fbf6861690d 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -369,6 +369,12 @@ ssize_t fsg_show_removable(struct fsg_lun *curlun, char *buf)
}
EXPORT_SYMBOL_GPL(fsg_show_removable);
+ssize_t fsg_show_inquiry_string(struct fsg_lun *curlun, char *buf)
+{
+ return sprintf(buf, "%s\n", curlun->inquiry_string);
+}
+EXPORT_SYMBOL_GPL(fsg_show_inquiry_string);
+
/*
* The caller must hold fsg->filesem for reading when calling this function.
*/
@@ -499,4 +505,22 @@ ssize_t fsg_store_removable(struct fsg_lun *curlun, const char *buf,
}
EXPORT_SYMBOL_GPL(fsg_store_removable);
+ssize_t fsg_store_inquiry_string(struct fsg_lun *curlun, const char *buf,
+ size_t count)
+{
+ const size_t len = min(count, sizeof(curlun->inquiry_string));
+
+ if (len == 0 || buf[0] == '\n') {
+ curlun->inquiry_string[0] = 0;
+ } else {
+ snprintf(curlun->inquiry_string,
+ sizeof(curlun->inquiry_string), "%-28s", buf);
+ if (curlun->inquiry_string[len-1] == '\n')
+ curlun->inquiry_string[len-1] = ' ';
+ }
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(fsg_store_inquiry_string);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h
index c3544e61da66..e69848994cb4 100644
--- a/drivers/usb/gadget/function/storage_common.h
+++ b/drivers/usb/gadget/function/storage_common.h
@@ -88,6 +88,12 @@ do { \
#define ASC(x) ((u8) ((x) >> 8))
#define ASCQ(x) ((u8) (x))
+/*
+ * Vendor (8 chars), product (16 chars), release (4 hexadecimal digits) and NUL
+ * byte
+ */
+#define INQUIRY_STRING_LEN ((size_t) (8 + 16 + 4 + 1))
+
struct fsg_lun {
struct file *filp;
loff_t file_length;
@@ -112,6 +118,7 @@ struct fsg_lun {
struct device dev;
const char *name; /* "lun.name" */
const char **name_pfx; /* "function.name" */
+ char inquiry_string[INQUIRY_STRING_LEN];
};
static inline bool fsg_lun_is_open(struct fsg_lun *curlun)
@@ -210,6 +217,7 @@ ssize_t fsg_show_ro(struct fsg_lun *curlun, char *buf);
ssize_t fsg_show_nofua(struct fsg_lun *curlun, char *buf);
ssize_t fsg_show_file(struct fsg_lun *curlun, struct rw_semaphore *filesem,
char *buf);
+ssize_t fsg_show_inquiry_string(struct fsg_lun *curlun, char *buf);
ssize_t fsg_show_cdrom(struct fsg_lun *curlun, char *buf);
ssize_t fsg_show_removable(struct fsg_lun *curlun, char *buf);
ssize_t fsg_store_ro(struct fsg_lun *curlun, struct rw_semaphore *filesem,
@@ -221,5 +229,7 @@ ssize_t fsg_store_cdrom(struct fsg_lun *curlun, struct rw_semaphore *filesem,
const char *buf, size_t count);
ssize_t fsg_store_removable(struct fsg_lun *curlun, const char *buf,
size_t count);
+ssize_t fsg_store_inquiry_string(struct fsg_lun *curlun, const char *buf,
+ size_t count);
#endif /* USB_STORAGE_COMMON_H */
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 5f562c1ec795..9c8c9ed1dc9e 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -82,6 +82,7 @@ struct eth_dev {
#define WORK_RX_MEMORY 0
bool zlp;
+ bool no_skb_reserve;
u8 host_mac[ETH_ALEN];
u8 dev_mac[ETH_ALEN];
};
@@ -233,7 +234,8 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
* but on at least one, checksumming fails otherwise. Note:
* RNDIS headers involve variable numbers of LE32 values.
*/
- skb_reserve(skb, NET_IP_ALIGN);
+ if (likely(!dev->no_skb_reserve))
+ skb_reserve(skb, NET_IP_ALIGN);
req->buf = skb->data;
req->length = size;
@@ -569,7 +571,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
req->complete = tx_complete;
/* NCM requires no zlp if transfer is dwNtbInMaxSize */
- if (dev->port_usb->is_fixed &&
+ if (dev->port_usb &&
+ dev->port_usb->is_fixed &&
length == dev->port_usb->fixed_in_len &&
(length % in->maxpacket) == 0)
req->zero = 0;
@@ -1063,6 +1066,7 @@ struct net_device *gether_connect(struct gether *link)
if (result == 0) {
dev->zlp = link->is_zlp_ok;
+ dev->no_skb_reserve = link->no_skb_reserve;
DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
dev->header_len = link->header_len;
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index c77145bd6b5b..81d94a7ae4b4 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -64,6 +64,7 @@ struct gether {
struct usb_ep *out_ep;
bool is_zlp_ok;
+ bool no_skb_reserve;
u16 cdc_filter;
diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c
index fc2ac150f5ff..0bf39c3ccdb1 100644
--- a/drivers/usb/gadget/legacy/gmidi.c
+++ b/drivers/usb/gadget/legacy/gmidi.c
@@ -47,7 +47,7 @@ static char *id = SNDRV_DEFAULT_STR1;
module_param(id, charp, S_IRUGO);
MODULE_PARM_DESC(id, "ID string for the USB MIDI Gadget adapter.");
-static unsigned int buflen = 256;
+static unsigned int buflen = 512;
module_param(buflen, uint, S_IRUGO);
MODULE_PARM_DESC(buflen, "MIDI buffer length");
diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c
index 4bc7eea8bfc8..18839732c840 100644
--- a/drivers/usb/gadget/u_f.c
+++ b/drivers/usb/gadget/u_f.c
@@ -12,14 +12,16 @@
*/
#include "u_f.h"
+#include <linux/usb/ch9.h>
-struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len)
+struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (req) {
- req->length = len ?: default_len;
+ req->length = usb_endpoint_dir_out(ep->desc) ?
+ usb_ep_align(ep, len) : len;
req->buf = kmalloc(req->length, GFP_ATOMIC);
if (!req->buf) {
usb_ep_free_request(ep, req);
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
index 4247cc098a89..7d53a4773d1a 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/drivers/usb/gadget/u_f.h
@@ -47,8 +47,21 @@
struct usb_ep;
struct usb_request;
-/* Requests allocated via alloc_ep_req() must be freed by free_ep_req(). */
-struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len);
+/**
+ * alloc_ep_req - returns a usb_request allocated by the gadget driver and
+ * allocates the request's buffer.
+ *
+ * @ep: the endpoint to allocate a usb_request
+ * @len: usb_requests's buffer suggested size
+ *
+ * In case @ep direction is OUT, the @len will be aligned to ep's
+ * wMaxPacketSize. In order to avoid memory leaks or drops, *always* use
+ * usb_requests's length (req->length) to refer to the allocated buffer size.
+ * Requests allocated via alloc_ep_req() *must* be freed by free_ep_req().
+ */
+struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len);
+
+/* Frees a usb_request previously allocated by alloc_ep_req() */
static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
{
kfree(req->buf);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 40c04bb25f2f..9483489080f6 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -107,10 +107,8 @@ int usb_ep_enable(struct usb_ep *ep)
goto out;
ret = ep->ops->enable(ep, ep->desc);
- if (ret) {
- ret = ret;
+ if (ret)
goto out;
- }
ep->enabled = true;
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 8bb011ea78f7..4fff51b8a18e 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -421,10 +421,8 @@ static int qe_ep_rxbd_update(struct qe_ep *ep)
bd = ep->rxbase;
ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
- if (ep->rxframe == NULL) {
- dev_err(ep->udc->dev, "malloc rxframe failed\n");
+ if (!ep->rxframe)
return -ENOMEM;
- }
qe_frame_init(ep->rxframe);
@@ -435,9 +433,7 @@ static int qe_ep_rxbd_update(struct qe_ep *ep)
size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
- if (ep->rxbuffer == NULL) {
- dev_err(ep->udc->dev, "malloc rxbuffer failed,size=%d\n",
- size);
+ if (!ep->rxbuffer) {
kfree(ep->rxframe);
return -ENOMEM;
}
@@ -668,10 +664,8 @@ static int qe_ep_init(struct qe_udc *udc,
if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
- if (ep->txframe == NULL) {
- dev_err(udc->dev, "malloc txframe failed\n");
+ if (!ep->txframe)
goto en_done2;
- }
qe_frame_init(ep->txframe);
}
@@ -2344,10 +2338,8 @@ static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
u32 offset;
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
- if (udc == NULL) {
- dev_err(&ofdev->dev, "malloc udc failed\n");
+ if (!udc)
goto cleanup;
- }
udc->dev = &ofdev->dev;
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index d2205d9e0c8b..5107987bd353 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1767,8 +1767,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* alloc, and start init */
dev = kzalloc (sizeof *dev, GFP_KERNEL);
- if (dev == NULL){
- pr_debug("enomem %s\n", pci_name(pdev));
+ if (!dev) {
retval = -ENOMEM;
goto err;
}
@@ -1839,6 +1838,8 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err:
if (dev)
goku_remove (pdev);
+ /* gadget_release is not registered yet, kfree explicitly */
+ kfree(dev);
return retval;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 614ab951a4ae..61c938c36d88 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -589,7 +589,7 @@ static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
ep = container_of(_ep, struct net2280_ep, ep);
if (!_ep || !_req) {
- dev_err(&ep->dev->pdev->dev, "%s: Inavlid ep=%p or req=%p\n",
+ dev_err(&ep->dev->pdev->dev, "%s: Invalid ep=%p or req=%p\n",
__func__, _ep, _req);
return;
}
@@ -1137,8 +1137,10 @@ dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
done(ep, req, status);
}
-static void scan_dma_completions(struct net2280_ep *ep)
+static int scan_dma_completions(struct net2280_ep *ep)
{
+ int num_completed = 0;
+
/* only look at descriptors that were "naturally" retired,
* so fifo and list head state won't matter
*/
@@ -1166,6 +1168,7 @@ static void scan_dma_completions(struct net2280_ep *ep)
break;
/* single transfer mode */
dma_done(ep, req, tmp, 0);
+ num_completed++;
break;
} else if (!ep->is_in &&
(req->req.length % ep->ep.maxpacket) &&
@@ -1194,7 +1197,10 @@ static void scan_dma_completions(struct net2280_ep *ep)
}
}
dma_done(ep, req, tmp, 0);
+ num_completed++;
}
+
+ return num_completed;
}
static void restart_dma(struct net2280_ep *ep)
@@ -1567,6 +1573,44 @@ static struct usb_ep *net2280_match_ep(struct usb_gadget *_gadget,
return ep;
}
+ /* USB3380: Only first four endpoints have DMA channels. Allocate
+ * slower interrupt endpoints from PIO hw endpoints, to allow bulk/isoc
+ * endpoints use DMA hw endpoints.
+ */
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
+ usb_endpoint_dir_in(desc)) {
+ ep = gadget_find_ep_by_name(_gadget, "ep2in");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ ep = gadget_find_ep_by_name(_gadget, "ep4in");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ } else if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
+ !usb_endpoint_dir_in(desc)) {
+ ep = gadget_find_ep_by_name(_gadget, "ep1out");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ ep = gadget_find_ep_by_name(_gadget, "ep3out");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ } else if (usb_endpoint_type(desc) != USB_ENDPOINT_XFER_BULK &&
+ usb_endpoint_dir_in(desc)) {
+ ep = gadget_find_ep_by_name(_gadget, "ep1in");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ ep = gadget_find_ep_by_name(_gadget, "ep3in");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ } else if (usb_endpoint_type(desc) != USB_ENDPOINT_XFER_BULK &&
+ !usb_endpoint_dir_in(desc)) {
+ ep = gadget_find_ep_by_name(_gadget, "ep2out");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ ep = gadget_find_ep_by_name(_gadget, "ep4out");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ }
+
/* USB3380: use same address for usb and hardware endpoints */
snprintf(name, sizeof(name), "ep%d%s", usb_endpoint_num(desc),
usb_endpoint_dir_in(desc) ? "in" : "out");
@@ -2547,8 +2591,11 @@ static void handle_ep_small(struct net2280_ep *ep)
/* manual DMA queue advance after short OUT */
if (likely(ep->dma)) {
if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
- u32 count;
+ struct net2280_request *stuck_req = NULL;
int stopped = ep->stopped;
+ int num_completed;
+ int stuck = 0;
+ u32 count;
/* TRANSFERRED works around OUT_DONE erratum 0112.
* we expect (N <= maxpacket) bytes; host wrote M.
@@ -2560,7 +2607,7 @@ static void handle_ep_small(struct net2280_ep *ep)
/* any preceding dma transfers must finish.
* dma handles (M >= N), may empty the queue
*/
- scan_dma_completions(ep);
+ num_completed = scan_dma_completions(ep);
if (unlikely(list_empty(&ep->queue) ||
ep->out_overflow)) {
req = NULL;
@@ -2580,6 +2627,31 @@ static void handle_ep_small(struct net2280_ep *ep)
req = NULL;
break;
}
+
+ /* Escape loop if no dma transfers completed
+ * after few retries.
+ */
+ if (num_completed == 0) {
+ if (stuck_req == req &&
+ readl(&ep->dma->dmadesc) !=
+ req->td_dma && stuck++ > 5) {
+ count = readl(
+ &ep->dma->dmacount);
+ count &= DMA_BYTE_COUNT_MASK;
+ req = NULL;
+ ep_dbg(ep->dev, "%s escape stuck %d, count %u\n",
+ ep->ep.name, stuck,
+ count);
+ break;
+ } else if (stuck_req != req) {
+ stuck_req = req;
+ stuck = 0;
+ }
+ } else {
+ stuck_req = NULL;
+ stuck = 0;
+ }
+
udelay(1);
}
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 9b7d39484ed3..a8709f9e5648 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2875,7 +2875,7 @@ bad_on_1710:
xceiv = NULL;
/* "udc" is now valid */
pullup_disable(udc);
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
udc->gadget.is_otg = (config->otg != 0);
#endif
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index ad140aa00132..7fa60f5b7ae4 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -33,6 +33,7 @@
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/phy.h>
#include "pxa27x_udc.h"
@@ -1655,6 +1656,37 @@ static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
return -EOPNOTSUPP;
}
+/**
+ * pxa_udc_phy_event - Called by phy upon VBus event
+ * @nb: notifier block
+ * @action: phy action, is vbus connect or disconnect
+ * @data: the usb_gadget structure in pxa_udc
+ *
+ * Called by the USB Phy when a cable connect or disconnect is sensed.
+ *
+ * Returns 0
+ */
+static int pxa_udc_phy_event(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct usb_gadget *gadget = data;
+
+ switch (action) {
+ case USB_EVENT_VBUS:
+ usb_gadget_vbus_connect(gadget);
+ return NOTIFY_OK;
+ case USB_EVENT_NONE:
+ usb_gadget_vbus_disconnect(gadget);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block pxa27x_udc_phy = {
+ .notifier_call = pxa_udc_phy_event,
+};
+
static int pxa27x_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int pxa27x_udc_stop(struct usb_gadget *g);
@@ -2432,7 +2464,14 @@ static int pxa_udc_probe(struct platform_device *pdev)
return udc->irq;
udc->dev = &pdev->dev;
- udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (of_have_populated_dt()) {
+ udc->transceiver =
+ devm_usb_get_phy_by_phandle(udc->dev, "phys", 0);
+ if (IS_ERR(udc->transceiver))
+ return PTR_ERR(udc->transceiver);
+ } else {
+ udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+ }
if (IS_ERR(udc->gpiod)) {
dev_err(&pdev->dev, "Couldn't find or request D+ gpio : %ld\n",
@@ -2465,14 +2504,20 @@ static int pxa_udc_probe(struct platform_device *pdev)
goto err;
}
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ usb_register_notifier(udc->transceiver, &pxa27x_udc_phy);
retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (retval)
- goto err;
+ goto err_add_gadget;
pxa_init_debugfs(udc);
if (should_enable_udc(udc))
udc_enable(udc);
return 0;
+
+err_add_gadget:
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
err:
clk_unprepare(udc->clk);
return retval;
@@ -2489,6 +2534,8 @@ static int pxa_udc_remove(struct platform_device *_dev)
usb_del_gadget_udc(&udc->gadget);
pxa_cleanup_debugfs(udc);
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
usb_put_phy(udc->transceiver);
udc->transceiver = NULL;
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index f8bf290f1894..588e2531b8b8 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -973,10 +973,8 @@ static struct usb_request *xudc_ep_alloc_request(struct usb_ep *_ep,
udc = ep->udc;
req = kzalloc(sizeof(*req), gfp_flags);
- if (!req) {
- dev_err(udc->dev, "%s:not enough memory", __func__);
+ if (!req)
return NULL;
- }
req->ep = ep;
INIT_LIST_HEAD(&req->queue);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2e710a4cca52..0b80cee30da4 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -472,7 +472,7 @@ config USB_OHCI_HCD_AT91
config USB_OHCI_HCD_OMAP3
tristate "OHCI support for OMAP3 and later chips"
- depends on (ARCH_OMAP3 || ARCH_OMAP4)
+ depends on (ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5)
default y
---help---
Enables support for the on-chip OHCI controller on
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 172ef17911aa..5f425c89faf1 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
@@ -34,6 +35,9 @@ MODULE_AUTHOR("Hauke Mehrtens");
MODULE_DESCRIPTION("Common USB driver for BCMA Bus");
MODULE_LICENSE("GPL");
+/* See BCMA_CLKCTLST_EXTRESREQ and BCMA_CLKCTLST_EXTRESST */
+#define USB_BCMA_CLKCTLST_USB_CLK_REQ 0x00000100
+
struct bcma_hcd_device {
struct bcma_device *core;
struct platform_device *ehci_dev;
@@ -165,44 +169,80 @@ static void bcma_hcd_init_chip_mips(struct bcma_device *dev)
}
}
-static void bcma_hcd_init_chip_arm_phy(struct bcma_device *dev)
+/**
+ * bcma_hcd_usb20_old_arm_init - Initialize old USB 2.0 controller on ARM
+ *
+ * Old USB 2.0 core is identified as BCMA_CORE_USB20_HOST and was introduced
+ * long before Northstar devices. It seems some cheaper chipsets like BCM53573
+ * still use it.
+ * Initialization of this old core differs between MIPS and ARM.
+ */
+static int bcma_hcd_usb20_old_arm_init(struct bcma_hcd_device *usb_dev)
{
- struct bcma_device *arm_core;
- void __iomem *dmu;
-
- arm_core = bcma_find_core(dev->bus, BCMA_CORE_ARMCA9);
- if (!arm_core) {
- dev_err(&dev->dev, "can not find ARM Cortex A9 ihost core\n");
- return;
+ struct bcma_device *core = usb_dev->core;
+ struct device *dev = &core->dev;
+ struct bcma_device *pmu_core;
+
+ usleep_range(10000, 20000);
+ if (core->id.rev < 5)
+ return 0;
+
+ pmu_core = bcma_find_core(core->bus, BCMA_CORE_PMU);
+ if (!pmu_core) {
+ dev_err(dev, "Could not find PMU core\n");
+ return -ENOENT;
}
- dmu = ioremap_nocache(arm_core->addr_s[0], 0x1000);
- if (!dmu) {
- dev_err(&dev->dev, "can not map ARM Cortex A9 ihost core\n");
- return;
- }
-
- /* Unlock DMU PLL settings */
- iowrite32(0x0000ea68, dmu + 0x180);
-
- /* Write USB 2.0 PLL control setting */
- iowrite32(0x00dd10c3, dmu + 0x164);
+ /* Take USB core out of reset */
+ bcma_awrite32(core, BCMA_IOCTL, BCMA_IOCTL_CLK | BCMA_IOCTL_FGC);
+ usleep_range(100, 200);
+ bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+ usleep_range(100, 200);
+ bcma_awrite32(core, BCMA_RESET_CTL, 0);
+ usleep_range(100, 200);
+ bcma_awrite32(core, BCMA_IOCTL, BCMA_IOCTL_CLK);
+ usleep_range(100, 200);
+
+ /* Enable Misc PLL */
+ bcma_write32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT |
+ BCMA_CLKCTLST_HQCLKREQ |
+ USB_BCMA_CLKCTLST_USB_CLK_REQ);
+ usleep_range(100, 200);
+
+ bcma_write32(core, 0x510, 0xc7f85000);
+ bcma_write32(core, 0x510, 0xc7f85003);
+ usleep_range(300, 600);
+
+ /* Program USB PHY PLL parameters */
+ bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_ADDR, 0x6);
+ bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_DATA, 0x005360c1);
+ usleep_range(100, 200);
+ bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_ADDR, 0x7);
+ bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_DATA, 0x0);
+ usleep_range(100, 200);
+ bcma_set32(pmu_core, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
+ usleep_range(100, 200);
+
+ bcma_write32(core, 0x510, 0x7f8d007);
+ udelay(1000);
+
+ /* Take controller out of reset */
+ bcma_write32(core, 0x200, 0x4ff);
+ usleep_range(25, 50);
+ bcma_write32(core, 0x200, 0x6ff);
+ usleep_range(25, 50);
+ bcma_write32(core, 0x200, 0x7ff);
+ usleep_range(25, 50);
+
+ of_platform_default_populate(dev->of_node, NULL, dev);
- /* Lock DMU PLL settings */
- iowrite32(0x00000000, dmu + 0x180);
-
- iounmap(dmu);
+ return 0;
}
-static void bcma_hcd_init_chip_arm_hc(struct bcma_device *dev)
+static void bcma_hcd_usb20_ns_init_hc(struct bcma_device *dev)
{
u32 val;
- /*
- * Delay after PHY initialized to ensure HC is ready to be configured
- */
- usleep_range(1000, 2000);
-
/* Set packet buffer OUT threshold */
val = bcma_read32(dev, 0x94);
val &= 0xffff;
@@ -213,20 +253,33 @@ static void bcma_hcd_init_chip_arm_hc(struct bcma_device *dev)
val = bcma_read32(dev, 0x9c);
val |= 1;
bcma_write32(dev, 0x9c, val);
+
+ /*
+ * Broadcom initializes PHY and then waits to ensure HC is ready to be
+ * configured. In our case the order is reversed. We just initialized
+ * controller and we let HCD initialize PHY, so let's wait (sleep) now.
+ */
+ usleep_range(1000, 2000);
}
-static void bcma_hcd_init_chip_arm(struct bcma_device *dev)
+/**
+ * bcma_hcd_usb20_ns_init - Initialize Northstar USB 2.0 controller
+ */
+static int bcma_hcd_usb20_ns_init(struct bcma_hcd_device *bcma_hcd)
{
- bcma_core_enable(dev, 0);
+ struct bcma_device *core = bcma_hcd->core;
+ struct bcma_chipinfo *ci = &core->bus->chipinfo;
+ struct device *dev = &core->dev;
- if (dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM4707 ||
- dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM53018) {
- if (dev->bus->chipinfo.pkg == BCMA_PKG_ID_BCM4707 ||
- dev->bus->chipinfo.pkg == BCMA_PKG_ID_BCM4708)
- bcma_hcd_init_chip_arm_phy(dev);
+ bcma_core_enable(core, 0);
- bcma_hcd_init_chip_arm_hc(dev);
- }
+ if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+ ci->id == BCMA_CHIP_ID_BCM53018)
+ bcma_hcd_usb20_ns_init_hc(core);
+
+ of_platform_default_populate(dev->of_node, NULL, dev);
+
+ return 0;
}
static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val)
@@ -299,16 +352,7 @@ static int bcma_hcd_usb20_init(struct bcma_hcd_device *usb_dev)
if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
return -EOPNOTSUPP;
- switch (dev->id.id) {
- case BCMA_CORE_NS_USB20:
- bcma_hcd_init_chip_arm(dev);
- break;
- case BCMA_CORE_USB20_HOST:
- bcma_hcd_init_chip_mips(dev);
- break;
- default:
- return -ENODEV;
- }
+ bcma_hcd_init_chip_mips(dev);
/* In AI chips EHCI is addrspace 0, OHCI is 1 */
ohci_addr = dev->addr_s[0];
@@ -338,6 +382,18 @@ err_unregister_ohci_dev:
return err;
}
+static int bcma_hcd_usb30_init(struct bcma_hcd_device *bcma_hcd)
+{
+ struct bcma_device *core = bcma_hcd->core;
+ struct device *dev = &core->dev;
+
+ bcma_core_enable(core, 0);
+
+ of_platform_default_populate(dev->of_node, NULL, dev);
+
+ return 0;
+}
+
static int bcma_hcd_probe(struct bcma_device *core)
{
int err;
@@ -357,14 +413,24 @@ static int bcma_hcd_probe(struct bcma_device *core)
switch (core->id.id) {
case BCMA_CORE_USB20_HOST:
+ if (IS_ENABLED(CONFIG_ARM))
+ err = bcma_hcd_usb20_old_arm_init(usb_dev);
+ else if (IS_ENABLED(CONFIG_MIPS))
+ err = bcma_hcd_usb20_init(usb_dev);
+ else
+ err = -ENOTSUPP;
+ break;
case BCMA_CORE_NS_USB20:
- err = bcma_hcd_usb20_init(usb_dev);
- if (err)
- return err;
+ err = bcma_hcd_usb20_ns_init(usb_dev);
+ break;
+ case BCMA_CORE_NS_USB30:
+ err = bcma_hcd_usb30_init(usb_dev);
break;
default:
return -ENODEV;
}
+ if (err)
+ return err;
bcma_set_drvdata(core, usb_dev);
return 0;
@@ -416,6 +482,7 @@ static int bcma_hcd_resume(struct bcma_device *dev)
static const struct bcma_device_id bcma_hcd_table[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_USB20_HOST, BCMA_ANY_REV, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_USB20, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_USB30, BCMA_ANY_REV, BCMA_ANY_CLASS),
{},
};
MODULE_DEVICE_TABLE(bcma, bcma_hcd_table);
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 6816b8c371d0..876dca4fc216 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -38,7 +38,7 @@
#include "ehci.h"
#define DRIVER_DESC "EHCI generic platform driver"
-#define EHCI_MAX_CLKS 3
+#define EHCI_MAX_CLKS 4
#define EHCI_MAX_RSTS 3
#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 0960f41f945a..55a0ae6f2d74 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -310,10 +310,8 @@ static struct fhci_usb *fhci_create_lld(struct fhci_hcd *fhci)
/* allocate memory for SCC data structure */
usb = kzalloc(sizeof(*usb), GFP_KERNEL);
- if (!usb) {
- fhci_err(fhci, "no memory for SCC data struct\n");
+ if (!usb)
return NULL;
- }
usb->fhci = fhci;
usb->hc_list = fhci->hc_list;
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 1044b0f9d656..f07ccb25bc24 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -222,23 +222,17 @@ static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
pdata->controller_ver = usb_get_ver_info(np);
/* Activate Erratum by reading property in device tree */
- if (of_get_property(np, "fsl,usb-erratum-a007792", NULL))
- pdata->has_fsl_erratum_a007792 = 1;
- else
- pdata->has_fsl_erratum_a007792 = 0;
- if (of_get_property(np, "fsl,usb-erratum-a005275", NULL))
- pdata->has_fsl_erratum_a005275 = 1;
- else
- pdata->has_fsl_erratum_a005275 = 0;
+ pdata->has_fsl_erratum_a007792 =
+ of_property_read_bool(np, "fsl,usb-erratum-a007792");
+ pdata->has_fsl_erratum_a005275 =
+ of_property_read_bool(np, "fsl,usb-erratum-a005275");
/*
* Determine whether phy_clk_valid needs to be checked
* by reading property in device tree
*/
- if (of_get_property(np, "phy-clk-valid", NULL))
- pdata->check_phy_clk_valid = 1;
- else
- pdata->check_phy_clk_valid = 0;
+ pdata->check_phy_clk_valid =
+ of_property_read_bool(np, "phy-clk-valid");
if (pdata->have_sysif_regs) {
if (pdata->controller_ver == FSL_USB_VER_NONE) {
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 2f7690092a7f..369869a29ebd 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1856,15 +1856,11 @@ max3421_probe(struct spi_device *spi)
INIT_LIST_HEAD(&max3421_hcd->ep_list);
max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL);
- if (!max3421_hcd->tx) {
- dev_err(&spi->dev, "failed to kmalloc tx buffer\n");
+ if (!max3421_hcd->tx)
goto error;
- }
max3421_hcd->rx = kmalloc(sizeof(*max3421_hcd->rx), GFP_KERNEL);
- if (!max3421_hcd->rx) {
- dev_err(&spi->dev, "failed to kmalloc rx buffer\n");
+ if (!max3421_hcd->rx)
goto error;
- }
max3421_hcd->spi_thread = kthread_run(max3421_spi_thread, hcd,
"max3421_spi_thread");
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index d177372bb357..5b5880c0ae19 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -21,8 +21,11 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <soc/at91/atmel-sfr.h>
#include "ohci.h"
@@ -51,6 +54,7 @@ struct ohci_at91_priv {
struct clk *hclk;
bool clocked;
bool wakeup; /* Saved wake-up state for resume */
+ struct regmap *sfr_regmap;
};
/* interface and function clocks; sometimes also an AHB clock */
@@ -134,6 +138,17 @@ static void at91_stop_hc(struct platform_device *pdev)
static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
+static struct regmap *at91_dt_syscon_sfr(void)
+{
+ struct regmap *regmap;
+
+ regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
+ if (IS_ERR(regmap))
+ regmap = NULL;
+
+ return regmap;
+}
+
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
@@ -197,6 +212,10 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
goto err;
}
+ ohci_at91->sfr_regmap = at91_dt_syscon_sfr();
+ if (!ohci_at91->sfr_regmap)
+ dev_warn(dev, "failed to find sfr node\n");
+
board = hcd->self.controller->platform_data;
ohci = hcd_to_ohci(hcd);
ohci->num_ports = board->ports;
@@ -282,6 +301,28 @@ static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
return length;
}
+static int ohci_at91_port_suspend(struct regmap *regmap, u8 set)
+{
+ u32 regval;
+ int ret;
+
+ if (!regmap)
+ return 0;
+
+ ret = regmap_read(regmap, AT91_SFR_OHCIICR, &regval);
+ if (ret)
+ return ret;
+
+ if (set)
+ regval |= AT91_OHCIICR_USB_SUSPEND;
+ else
+ regval &= ~AT91_OHCIICR_USB_SUSPEND;
+
+ regmap_write(regmap, AT91_SFR_OHCIICR, regval);
+
+ return 0;
+}
+
/*
* Look at the control requests to the root hub and see if we need to override.
*/
@@ -289,6 +330,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct at91_usbh_data *pdata = dev_get_platdata(hcd->self.controller);
+ struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
struct usb_hub_descriptor *desc;
int ret = -EINVAL;
u32 *data = (u32 *)buf;
@@ -301,7 +343,8 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
switch (typeReq) {
case SetPortFeature:
- if (wValue == USB_PORT_FEAT_POWER) {
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
dev_dbg(hcd->self.controller, "SetPortFeat: POWER\n");
if (valid_port(wIndex)) {
ohci_at91_usb_set_power(pdata, wIndex, 1);
@@ -309,6 +352,15 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
goto out;
+
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n");
+ if (valid_port(wIndex)) {
+ ohci_at91_port_suspend(ohci_at91->sfr_regmap,
+ 1);
+ return 0;
+ }
+ break;
}
break;
@@ -342,6 +394,16 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
ohci_at91_usb_set_power(pdata, wIndex, 0);
return 0;
}
+ break;
+
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n");
+ if (valid_port(wIndex)) {
+ ohci_at91_port_suspend(ohci_at91->sfr_regmap,
+ 0);
+ return 0;
+ }
+ break;
}
break;
}
@@ -599,6 +661,8 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
if (ohci_at91->wakeup)
enable_irq_wake(hcd->irq);
+ ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
+
ret = ohci_suspend(hcd, ohci_at91->wakeup);
if (ret) {
if (ohci_at91->wakeup)
@@ -638,6 +702,9 @@ ohci_hcd_at91_drv_resume(struct device *dev)
at91_start_clock(ohci_at91);
ohci_resume(hcd, false);
+
+ ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
+
return 0;
}
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index de7c68602a7e..495c1454b9e8 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -36,7 +36,6 @@
#include <mach/mux.h>
#include <mach/hardware.h>
-#include <mach/irqs.h>
#include <mach/usb.h>
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 2ac266d692a2..3a9ea32508df 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -13,9 +13,7 @@
* This file is licenced under the GPL.
*/
-#include <mach/hardware.h>
#include <asm/mach-types.h>
-#include <mach/assabet.h>
#include <asm/hardware/sa1111.h>
#ifndef CONFIG_SA1111
@@ -127,7 +125,7 @@ static int sa1111_start_hc(struct sa1111_dev *dev)
dev_dbg(&dev->dev, "starting SA-1111 OHCI USB Controller\n");
if (machine_is_xp860() ||
- machine_has_neponset() ||
+ machine_is_assabet() ||
machine_is_pfs168() ||
machine_is_badge4())
usb_rst = USB_RESET_PWRSENSELOW | USB_RESET_PWRCTRLLOW;
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index a7de8e8bb458..5d3d914ab4fb 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -601,11 +601,8 @@ static int uhci_start(struct usb_hcd *hcd)
uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu),
GFP_KERNEL);
- if (!uhci->frame_cpu) {
- dev_err(uhci_dev(uhci),
- "unable to allocate memory for frame pointers\n");
+ if (!uhci->frame_cpu)
goto err_alloc_frame_cpu;
- }
uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
sizeof(struct uhci_td), 16, 0);
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
index e36372393bb1..ad8eb575c30a 100644
--- a/drivers/usb/host/whci/init.c
+++ b/drivers/usb/host/whci/init.c
@@ -65,7 +65,7 @@ int whc_init(struct whc *whc)
init_waitqueue_head(&whc->cmd_wq);
init_waitqueue_head(&whc->async_list_wq);
init_waitqueue_head(&whc->periodic_list_wq);
- whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev));
+ whc->workqueue = alloc_ordered_workqueue(dev_name(&whc->umc->dev), 0);
if (whc->workqueue == NULL) {
ret = -ENOMEM;
goto error;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 0f53ae0f464e..a59fafb4b329 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1033,7 +1033,6 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra->phys = devm_kcalloc(&pdev->dev, tegra->num_phys,
sizeof(*tegra->phys), GFP_KERNEL);
if (!tegra->phys) {
- dev_err(&pdev->dev, "failed to allocate PHY array\n");
err = -ENOMEM;
goto put_padctl;
}
@@ -1117,6 +1116,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra->hcd);
if (!xhci->shared_hcd) {
dev_err(&pdev->dev, "failed to create shared HCD\n");
+ err = -ENOMEM;
goto remove_usb2;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 01d96c9b3a75..1a4ca02729c2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -295,10 +295,8 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
xhci->msix_entries =
kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
GFP_KERNEL);
- if (!xhci->msix_entries) {
- xhci_err(xhci, "Failed to allocate MSI-X entries\n");
+ if (!xhci->msix_entries)
return -ENOMEM;
- }
for (i = 0; i < xhci->msix_count; i++) {
xhci->msix_entries[i].entry = i;
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index eb8f8d37cd95..47b357760afc 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -240,6 +240,12 @@ config USB_HSIC_USB3503
help
This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
+config USB_HSIC_USB4604
+ tristate "USB4604 HSIC to USB20 Driver"
+ depends on I2C
+ help
+ This option enables support for SMSC USB4604 HSIC to USB 2.0 Driver.
+
config USB_LINK_LAYER_TEST
tristate "USB Link Layer Test driver"
help
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 3d79faaad2fb..3d1992750da4 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_USB_USS720) += uss720.o
obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o
obj-$(CONFIG_USB_YUREX) += yurex.o
obj-$(CONFIG_USB_HSIC_USB3503) += usb3503.o
+obj-$(CONFIG_USB_HSIC_USB4604) += usb4604.o
obj-$(CONFIG_USB_CHAOSKEY) += chaoskey.o
obj-$(CONFIG_UCSI) += ucsi.o
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 3071c0ef909b..564268fca07a 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -672,8 +672,7 @@ static int adu_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct adu_device), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (!dev) {
retval = -ENOMEM;
goto exit;
}
@@ -710,7 +709,6 @@ static int adu_probe(struct usb_interface *interface,
dev->read_buffer_primary = kmalloc((4 * in_end_size), GFP_KERNEL);
if (!dev->read_buffer_primary) {
- dev_err(&interface->dev, "Couldn't allocate read_buffer_primary\n");
retval = -ENOMEM;
goto error;
}
@@ -723,7 +721,6 @@ static int adu_probe(struct usb_interface *interface,
dev->read_buffer_secondary = kmalloc((4 * in_end_size), GFP_KERNEL);
if (!dev->read_buffer_secondary) {
- dev_err(&interface->dev, "Couldn't allocate read_buffer_secondary\n");
retval = -ENOMEM;
goto error;
}
@@ -735,29 +732,21 @@ static int adu_probe(struct usb_interface *interface,
memset(dev->read_buffer_secondary + (3 * in_end_size), 'h', in_end_size);
dev->interrupt_in_buffer = kmalloc(in_end_size, GFP_KERNEL);
- if (!dev->interrupt_in_buffer) {
- dev_err(&interface->dev, "Couldn't allocate interrupt_in_buffer\n");
+ if (!dev->interrupt_in_buffer)
goto error;
- }
/* debug code prime the buffer */
memset(dev->interrupt_in_buffer, 'i', in_end_size);
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->interrupt_in_urb) {
- dev_err(&interface->dev, "Couldn't allocate interrupt_in_urb\n");
+ if (!dev->interrupt_in_urb)
goto error;
- }
dev->interrupt_out_buffer = kmalloc(out_end_size, GFP_KERNEL);
- if (!dev->interrupt_out_buffer) {
- dev_err(&interface->dev, "Couldn't allocate interrupt_out_buffer\n");
+ if (!dev->interrupt_out_buffer)
goto error;
- }
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->interrupt_out_urb) {
- dev_err(&interface->dev, "Couldn't allocate interrupt_out_urb\n");
+ if (!dev->interrupt_out_urb)
goto error;
- }
if (!usb_string(udev, udev->descriptor.iSerialNumber, dev->serial_number,
sizeof(dev->serial_number))) {
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index a0a3827b4aff..da5ff401a354 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -85,7 +85,6 @@ struct appledisplay {
};
static atomic_t count_displays = ATOMIC_INIT(0);
-static struct workqueue_struct *wq;
static void appledisplay_complete(struct urb *urb)
{
@@ -122,7 +121,7 @@ static void appledisplay_complete(struct urb *urb)
case ACD_BTN_BRIGHT_UP:
case ACD_BTN_BRIGHT_DOWN:
pdata->button_pressed = 1;
- queue_delayed_work(wq, &pdata->work, 0);
+ schedule_delayed_work(&pdata->work, 0);
break;
case ACD_BTN_NONE:
default:
@@ -239,7 +238,6 @@ static int appledisplay_probe(struct usb_interface *iface,
pdata = kzalloc(sizeof(struct appledisplay), GFP_KERNEL);
if (!pdata) {
retval = -ENOMEM;
- dev_err(&iface->dev, "Out of memory\n");
goto error;
}
@@ -253,8 +251,6 @@ static int appledisplay_probe(struct usb_interface *iface,
pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
if (!pdata->msgdata) {
retval = -ENOMEM;
- dev_err(&iface->dev,
- "Allocating buffer for control messages failed\n");
goto error;
}
@@ -262,7 +258,6 @@ static int appledisplay_probe(struct usb_interface *iface,
pdata->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pdata->urb) {
retval = -ENOMEM;
- dev_err(&iface->dev, "Allocating URB failed\n");
goto error;
}
@@ -344,7 +339,7 @@ static void appledisplay_disconnect(struct usb_interface *iface)
if (pdata) {
usb_kill_urb(pdata->urb);
- cancel_delayed_work(&pdata->work);
+ cancel_delayed_work_sync(&pdata->work);
backlight_device_unregister(pdata->bd);
usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
@@ -365,19 +360,11 @@ static struct usb_driver appledisplay_driver = {
static int __init appledisplay_init(void)
{
- wq = create_singlethread_workqueue("appledisplay");
- if (!wq) {
- printk(KERN_ERR "appledisplay: Could not create work queue\n");
- return -ENOMEM;
- }
-
return usb_register(&appledisplay_driver);
}
static void __exit appledisplay_exit(void)
{
- flush_workqueue(wq);
- destroy_workqueue(wq);
usb_deregister(&appledisplay_driver);
}
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 402b94dd2531..5c93a888c40e 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -79,7 +79,6 @@ static int vendor_command(struct cypress *dev, unsigned char request,
/* allocate some memory for the i/o buffer*/
iobuf = kzalloc(CYPRESS_MAX_REQSIZE, GFP_KERNEL);
if (!iobuf) {
- dev_err(&dev->udev->dev, "Out of memory!\n");
retval = -ENOMEM;
goto error;
}
@@ -208,10 +207,8 @@ static int cypress_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&interface->dev, "Out of memory!\n");
+ if (!dev)
goto error_mem;
- }
dev->udev = usb_get_dev(interface_to_usbdev(interface));
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 9bab1a33bc16..9d8bb8dacdcd 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -101,10 +101,8 @@ static ssize_t set_brightness(struct device *dev, struct device_attribute *attr,
int retval;
buffer = kmalloc(8, GFP_KERNEL);
- if (!buffer) {
- dev_err(&cytherm->udev->dev, "out of memory\n");
+ if (!buffer)
return 0;
- }
cytherm->brightness = simple_strtoul(buf, NULL, 10);
@@ -148,10 +146,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char
int temp, sign;
buffer = kmalloc(8, GFP_KERNEL);
- if (!buffer) {
- dev_err(&cytherm->udev->dev, "out of memory\n");
+ if (!buffer)
return 0;
- }
/* read temperature */
retval = vendor_command(cytherm->udev, READ_RAM, TEMP, 0, buffer, 8);
@@ -192,10 +188,8 @@ static ssize_t show_button(struct device *dev, struct device_attribute *attr, ch
unsigned char *buffer;
buffer = kmalloc(8, GFP_KERNEL);
- if (!buffer) {
- dev_err(&cytherm->udev->dev, "out of memory\n");
+ if (!buffer)
return 0;
- }
/* check button */
retval = vendor_command(cytherm->udev, READ_RAM, BUTTON, 0, buffer, 8);
@@ -230,10 +224,8 @@ static ssize_t show_port0(struct device *dev, struct device_attribute *attr, cha
unsigned char *buffer;
buffer = kmalloc(8, GFP_KERNEL);
- if (!buffer) {
- dev_err(&cytherm->udev->dev, "out of memory\n");
+ if (!buffer)
return 0;
- }
retval = vendor_command(cytherm->udev, READ_PORT, 0, 0, buffer, 8);
if (retval)
@@ -257,10 +249,8 @@ static ssize_t set_port0(struct device *dev, struct device_attribute *attr, cons
int tmp;
buffer = kmalloc(8, GFP_KERNEL);
- if (!buffer) {
- dev_err(&cytherm->udev->dev, "out of memory\n");
+ if (!buffer)
return 0;
- }
tmp = simple_strtoul(buf, NULL, 10);
@@ -290,10 +280,8 @@ static ssize_t show_port1(struct device *dev, struct device_attribute *attr, cha
unsigned char *buffer;
buffer = kmalloc(8, GFP_KERNEL);
- if (!buffer) {
- dev_err(&cytherm->udev->dev, "out of memory\n");
+ if (!buffer)
return 0;
- }
retval = vendor_command(cytherm->udev, READ_PORT, 1, 0, buffer, 8);
if (retval)
@@ -317,10 +305,8 @@ static ssize_t set_port1(struct device *dev, struct device_attribute *attr, cons
int tmp;
buffer = kmalloc(8, GFP_KERNEL);
- if (!buffer) {
- dev_err(&cytherm->udev->dev, "out of memory\n");
+ if (!buffer)
return 0;
- }
tmp = simple_strtoul(buf, NULL, 10);
@@ -351,10 +337,8 @@ static int cytherm_probe(struct usb_interface *interface,
int retval = -ENOMEM;
dev = kzalloc (sizeof(struct usb_cytherm), GFP_KERNEL);
- if (dev == NULL) {
- dev_err (&interface->dev, "Out of memory\n");
+ if (!dev)
goto error_mem;
- }
dev->udev = usb_get_dev(udev);
diff --git a/drivers/usb/misc/ezusb.c b/drivers/usb/misc/ezusb.c
index 947811bc8126..837208f14f86 100644
--- a/drivers/usb/misc/ezusb.c
+++ b/drivers/usb/misc/ezusb.c
@@ -22,7 +22,7 @@ struct ezusb_fx_type {
unsigned short max_internal_adress;
};
-static struct ezusb_fx_type ezusb_fx1 = {
+static const struct ezusb_fx_type ezusb_fx1 = {
.cpucs_reg = 0x7F92,
.max_internal_adress = 0x1B3F,
};
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 9b5b3b2281ca..9a82f8308ad7 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -61,9 +61,6 @@ module_param(distrust_firmware, bool, 0);
MODULE_PARM_DESC(distrust_firmware,
"true to distrust firmware power/overcurrent setup");
extern struct platform_driver u132_platform_driver;
-static struct workqueue_struct *status_queue;
-static struct workqueue_struct *command_queue;
-static struct workqueue_struct *respond_queue;
/*
* ftdi_module_lock exists to protect access to global variables
*
@@ -228,56 +225,56 @@ static void ftdi_elan_init_kref(struct usb_ftdi *ftdi)
static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (!queue_delayed_work(status_queue, &ftdi->status_work, delta))
+ if (!schedule_delayed_work(&ftdi->status_work, delta))
kref_put(&ftdi->kref, ftdi_elan_delete);
}
static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
+ if (schedule_delayed_work(&ftdi->status_work, delta))
kref_get(&ftdi->kref);
}
static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
{
- if (cancel_delayed_work(&ftdi->status_work))
+ if (cancel_delayed_work_sync(&ftdi->status_work))
kref_put(&ftdi->kref, ftdi_elan_delete);
}
static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (!queue_delayed_work(command_queue, &ftdi->command_work, delta))
+ if (!schedule_delayed_work(&ftdi->command_work, delta))
kref_put(&ftdi->kref, ftdi_elan_delete);
}
static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (queue_delayed_work(command_queue, &ftdi->command_work, delta))
+ if (schedule_delayed_work(&ftdi->command_work, delta))
kref_get(&ftdi->kref);
}
static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
{
- if (cancel_delayed_work(&ftdi->command_work))
+ if (cancel_delayed_work_sync(&ftdi->command_work))
kref_put(&ftdi->kref, ftdi_elan_delete);
}
static void ftdi_response_requeue_work(struct usb_ftdi *ftdi,
unsigned int delta)
{
- if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+ if (!schedule_delayed_work(&ftdi->respond_work, delta))
kref_put(&ftdi->kref, ftdi_elan_delete);
}
static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
{
- if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+ if (schedule_delayed_work(&ftdi->respond_work, delta))
kref_get(&ftdi->kref);
}
static void ftdi_response_cancel_work(struct usb_ftdi *ftdi)
{
- if (cancel_delayed_work(&ftdi->respond_work))
+ if (cancel_delayed_work_sync(&ftdi->respond_work))
kref_put(&ftdi->kref, ftdi_elan_delete);
}
@@ -785,11 +782,8 @@ static int ftdi_elan_command_engine(struct usb_ftdi *ftdi)
return 0;
total_size = ftdi_elan_total_command_size(ftdi, command_size);
urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- dev_err(&ftdi->udev->dev, "could not get a urb to write %d commands totaling %d bytes to the Uxxx\n",
- command_size, total_size);
+ if (!urb)
return -ENOMEM;
- }
buf = usb_alloc_coherent(ftdi->udev, total_size, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
@@ -1948,10 +1942,8 @@ static int ftdi_elan_synchronize_flush(struct usb_ftdi *ftdi)
int I = 257;
int i = 0;
urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- dev_err(&ftdi->udev->dev, "could not alloc a urb for flush sequence\n");
+ if (!urb)
return -ENOMEM;
- }
buf = usb_alloc_coherent(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma);
if (!buf) {
dev_err(&ftdi->udev->dev, "could not get a buffer for flush sequence\n");
@@ -1988,10 +1980,8 @@ static int ftdi_elan_synchronize_reset(struct usb_ftdi *ftdi)
int I = 4;
int i = 0;
urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- dev_err(&ftdi->udev->dev, "could not get a urb for the reset sequence\n");
+ if (!urb)
return -ENOMEM;
- }
buf = usb_alloc_coherent(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma);
if (!buf) {
dev_err(&ftdi->udev->dev, "could not get a buffer for the reset sequence\n");
@@ -2740,7 +2730,6 @@ static int ftdi_elan_probe(struct usb_interface *interface,
ftdi->bulk_in_endpointAddr = endpoint->bEndpointAddress;
ftdi->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!ftdi->bulk_in_buffer) {
- dev_err(&ftdi->udev->dev, "Could not allocate bulk_in_buffer\n");
retval = -ENOMEM;
goto error;
}
@@ -2823,9 +2812,6 @@ static void ftdi_elan_disconnect(struct usb_interface *interface)
ftdi->initialized = 0;
ftdi->registered = 0;
}
- flush_workqueue(status_queue);
- flush_workqueue(command_queue);
- flush_workqueue(respond_queue);
ftdi->disconnected += 1;
usb_set_intfdata(interface, NULL);
dev_info(&ftdi->udev->dev, "USB FTDI U132 host controller interface now disconnected\n");
@@ -2845,31 +2831,12 @@ static int __init ftdi_elan_init(void)
pr_info("driver %s\n", ftdi_elan_driver.name);
mutex_init(&ftdi_module_lock);
INIT_LIST_HEAD(&ftdi_static_list);
- status_queue = create_singlethread_workqueue("ftdi-status-control");
- if (!status_queue)
- goto err_status_queue;
- command_queue = create_singlethread_workqueue("ftdi-command-engine");
- if (!command_queue)
- goto err_command_queue;
- respond_queue = create_singlethread_workqueue("ftdi-respond-engine");
- if (!respond_queue)
- goto err_respond_queue;
result = usb_register(&ftdi_elan_driver);
if (result) {
- destroy_workqueue(status_queue);
- destroy_workqueue(command_queue);
- destroy_workqueue(respond_queue);
pr_err("usb_register failed. Error number %d\n", result);
}
return result;
-err_respond_queue:
- destroy_workqueue(command_queue);
-err_command_queue:
- destroy_workqueue(status_queue);
-err_status_queue:
- pr_err("%s couldn't create workqueue\n", ftdi_elan_driver.name);
- return -ENOMEM;
}
static void __exit ftdi_elan_exit(void)
@@ -2882,15 +2849,7 @@ static void __exit ftdi_elan_exit(void)
ftdi_status_cancel_work(ftdi);
ftdi_command_cancel_work(ftdi);
ftdi_response_cancel_work(ftdi);
- } flush_workqueue(status_queue);
- destroy_workqueue(status_queue);
- status_queue = NULL;
- flush_workqueue(command_queue);
- destroy_workqueue(command_queue);
- command_queue = NULL;
- flush_workqueue(respond_queue);
- destroy_workqueue(respond_queue);
- respond_queue = NULL;
+ }
}
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 5105397e62fc..2975e80b7a56 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -366,7 +366,6 @@ static int idmouse_probe(struct usb_interface *interface,
kmalloc(IMGSIZE + dev->bulk_in_size, GFP_KERNEL);
if (!dev->bulk_in_buffer) {
- dev_err(&interface->dev, "Unable to allocate input buffer.\n");
idmouse_delete(dev);
return -ENOMEM;
}
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 1950e87b4219..095778ff984d 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -278,7 +278,7 @@ static ssize_t iowarrior_read(struct file *file, char __user *buffer,
dev = file->private_data;
/* verify that the device wasn't unplugged */
- if (dev == NULL || !dev->present)
+ if (!dev || !dev->present)
return -ENODEV;
dev_dbg(&dev->interface->dev, "minor %d, count = %zd\n",
@@ -413,8 +413,6 @@ static ssize_t iowarrior_write(struct file *file,
int_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!int_out_urb) {
retval = -ENOMEM;
- dev_dbg(&dev->interface->dev,
- "Unable to allocate urb\n");
goto error_no_urb;
}
buf = usb_alloc_coherent(dev->udev, dev->report_size,
@@ -482,9 +480,8 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
int io_res; /* checks for bytes read/written and copy_to/from_user results */
dev = file->private_data;
- if (dev == NULL) {
+ if (!dev)
return -ENODEV;
- }
buffer = kzalloc(dev->report_size, GFP_KERNEL);
if (!buffer)
@@ -654,9 +651,8 @@ static int iowarrior_release(struct inode *inode, struct file *file)
int retval = 0;
dev = file->private_data;
- if (dev == NULL) {
+ if (!dev)
return -ENODEV;
- }
dev_dbg(&dev->interface->dev, "minor %d\n", dev->minor);
@@ -766,10 +762,8 @@ static int iowarrior_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (!dev)
return retval;
- }
mutex_init(&dev->mutex);
@@ -812,15 +806,11 @@ static int iowarrior_probe(struct usb_interface *interface,
/* create the urb and buffer for reading */
dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->int_in_urb) {
- dev_err(&interface->dev, "Couldn't allocate interrupt_in_urb\n");
+ if (!dev->int_in_urb)
goto error;
- }
dev->int_in_buffer = kmalloc(dev->report_size, GFP_KERNEL);
- if (!dev->int_in_buffer) {
- dev_err(&interface->dev, "Couldn't allocate int_in_buffer\n");
+ if (!dev->int_in_buffer)
goto error;
- }
usb_fill_int_urb(dev->int_in_urb, dev->udev,
usb_rcvintpipe(dev->udev,
dev->int_in_endpoint->bEndpointAddress),
@@ -831,10 +821,8 @@ static int iowarrior_probe(struct usb_interface *interface,
dev->read_queue =
kmalloc(((dev->report_size + 1) * MAX_INTERRUPT_BUFFER),
GFP_KERNEL);
- if (!dev->read_queue) {
- dev_err(&interface->dev, "Couldn't allocate read_queue\n");
+ if (!dev->read_queue)
goto error;
- }
/* Get the serial-number of the chip */
memset(dev->chip_serial, 0x00, sizeof(dev->chip_serial));
usb_string(udev, udev->descriptor.iSerialNumber, dev->chip_serial,
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index cce22ff1c2eb..9ca595632f17 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -658,10 +658,8 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&intf->dev, "Out of memory\n");
+ if (!dev)
goto exit;
- }
mutex_init(&dev->mutex);
spin_lock_init(&dev->rbsl);
dev->intf = intf;
@@ -674,10 +672,8 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
(le16_to_cpu(udev->descriptor.idProduct) == USB_DEVICE_ID_LD_COM3LAB)) &&
(le16_to_cpu(udev->descriptor.bcdDevice) <= 0x103)) {
buffer = kmalloc(256, GFP_KERNEL);
- if (buffer == NULL) {
- dev_err(&intf->dev, "Couldn't allocate string buffer\n");
+ if (!buffer)
goto error;
- }
/* usb_string makes SETUP+STALL to leave always ControlReadLoop */
usb_string(udev, 255, buffer, 256);
kfree(buffer);
@@ -704,32 +700,22 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
dev->ring_buffer = kmalloc(ring_buffer_size*(sizeof(size_t)+dev->interrupt_in_endpoint_size), GFP_KERNEL);
- if (!dev->ring_buffer) {
- dev_err(&intf->dev, "Couldn't allocate ring_buffer\n");
+ if (!dev->ring_buffer)
goto error;
- }
dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
- if (!dev->interrupt_in_buffer) {
- dev_err(&intf->dev, "Couldn't allocate interrupt_in_buffer\n");
+ if (!dev->interrupt_in_buffer)
goto error;
- }
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->interrupt_in_urb) {
- dev_err(&intf->dev, "Couldn't allocate interrupt_in_urb\n");
+ if (!dev->interrupt_in_urb)
goto error;
- }
dev->interrupt_out_endpoint_size = dev->interrupt_out_endpoint ? usb_endpoint_maxp(dev->interrupt_out_endpoint) :
udev->descriptor.bMaxPacketSize0;
dev->interrupt_out_buffer = kmalloc(write_buffer_size*dev->interrupt_out_endpoint_size, GFP_KERNEL);
- if (!dev->interrupt_out_buffer) {
- dev_err(&intf->dev, "Couldn't allocate interrupt_out_buffer\n");
+ if (!dev->interrupt_out_buffer)
goto error;
- }
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->interrupt_out_urb) {
- dev_err(&intf->dev, "Couldn't allocate interrupt_out_urb\n");
+ if (!dev->interrupt_out_urb)
goto error;
- }
dev->interrupt_in_interval = min_interrupt_in_interval > dev->interrupt_in_endpoint->bInterval ? min_interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
if (dev->interrupt_out_endpoint)
dev->interrupt_out_interval = min_interrupt_out_interval > dev->interrupt_out_endpoint->bInterval ? min_interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 7771be3ac178..c8fbe7b739a0 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -817,10 +817,8 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(idev, "Out of memory\n");
+ if (!dev)
goto exit;
- }
mutex_init(&dev->lock);
@@ -871,51 +869,23 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
}
dev->read_buffer = kmalloc (read_buffer_size, GFP_KERNEL);
- if (!dev->read_buffer) {
- dev_err(idev, "Couldn't allocate read_buffer\n");
+ if (!dev->read_buffer)
goto error;
- }
dev->interrupt_in_buffer = kmalloc (usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
- if (!dev->interrupt_in_buffer) {
- dev_err(idev, "Couldn't allocate interrupt_in_buffer\n");
+ if (!dev->interrupt_in_buffer)
goto error;
- }
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->interrupt_in_urb) {
- dev_err(idev, "Couldn't allocate interrupt_in_urb\n");
+ if (!dev->interrupt_in_urb)
goto error;
- }
dev->interrupt_out_buffer = kmalloc (write_buffer_size, GFP_KERNEL);
- if (!dev->interrupt_out_buffer) {
- dev_err(idev, "Couldn't allocate interrupt_out_buffer\n");
+ if (!dev->interrupt_out_buffer)
goto error;
- }
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->interrupt_out_urb) {
- dev_err(idev, "Couldn't allocate interrupt_out_urb\n");
+ if (!dev->interrupt_out_urb)
goto error;
- }
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
- /* we can register the device now, as it is ready */
- usb_set_intfdata (interface, dev);
-
- retval = usb_register_dev (interface, &tower_class);
-
- if (retval) {
- /* something prevented us from registering this driver */
- dev_err(idev, "Not able to get a minor for this device.\n");
- usb_set_intfdata (interface, NULL);
- goto error;
- }
- dev->minor = interface->minor;
-
- /* let the user know what node this device is now attached to */
- dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
- "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
- USB_MAJOR, dev->minor);
-
/* get the firmware version and log it */
result = usb_control_msg (udev,
usb_rcvctrlpipe(udev, 0),
@@ -936,6 +906,23 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
get_version_reply.minor,
le16_to_cpu(get_version_reply.build_no));
+ /* we can register the device now, as it is ready */
+ usb_set_intfdata (interface, dev);
+
+ retval = usb_register_dev (interface, &tower_class);
+
+ if (retval) {
+ /* something prevented us from registering this driver */
+ dev_err(idev, "Not able to get a minor for this device.\n");
+ usb_set_intfdata (interface, NULL);
+ goto error;
+ }
+ dev->minor = interface->minor;
+
+ /* let the user know what node this device is now attached to */
+ dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
+ "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
+ USB_MAJOR, dev->minor);
exit:
return retval;
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index 86b4e4b2ab9a..77176511658f 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -34,8 +34,6 @@ struct lvs_rh {
struct usb_hub_descriptor descriptor;
/* urb for polling interrupt pipe */
struct urb *urb;
- /* LVS RH work queue */
- struct workqueue_struct *rh_queue;
/* LVH RH work */
struct work_struct rh_work;
/* RH port status */
@@ -247,10 +245,8 @@ static ssize_t get_dev_desc_store(struct device *dev,
int ret;
descriptor = kmalloc(sizeof(*descriptor), GFP_KERNEL);
- if (!descriptor) {
- dev_err(dev, "failed to allocate descriptor memory\n");
+ if (!descriptor)
return -ENOMEM;
- }
udev = create_lvs_device(intf);
if (!udev) {
@@ -355,7 +351,7 @@ static void lvs_rh_irq(struct urb *urb)
{
struct lvs_rh *lvs = urb->context;
- queue_work(lvs->rh_queue, &lvs->rh_work);
+ schedule_work(&lvs->rh_work);
}
static int lvs_rh_probe(struct usb_interface *intf,
@@ -397,24 +393,15 @@ static int lvs_rh_probe(struct usb_interface *intf,
/* submit urb to poll interrupt endpoint */
lvs->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!lvs->urb) {
- dev_err(&intf->dev, "couldn't allocate lvs urb\n");
+ if (!lvs->urb)
return -ENOMEM;
- }
-
- lvs->rh_queue = create_singlethread_workqueue("lvs_rh_queue");
- if (!lvs->rh_queue) {
- dev_err(&intf->dev, "couldn't create workqueue\n");
- ret = -ENOMEM;
- goto free_urb;
- }
INIT_WORK(&lvs->rh_work, lvs_rh_work);
ret = sysfs_create_group(&intf->dev.kobj, &lvs_attr_group);
if (ret < 0) {
dev_err(&intf->dev, "Failed to create sysfs node %d\n", ret);
- goto destroy_queue;
+ goto free_urb;
}
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
@@ -432,8 +419,6 @@ static int lvs_rh_probe(struct usb_interface *intf,
sysfs_remove:
sysfs_remove_group(&intf->dev.kobj, &lvs_attr_group);
-destroy_queue:
- destroy_workqueue(lvs->rh_queue);
free_urb:
usb_free_urb(lvs->urb);
return ret;
@@ -444,7 +429,7 @@ static void lvs_rh_disconnect(struct usb_interface *intf)
struct lvs_rh *lvs = usb_get_intfdata(intf);
sysfs_remove_group(&intf->dev.kobj, &lvs_attr_group);
- destroy_workqueue(lvs->rh_queue);
+ flush_work(&lvs->rh_work);
usb_free_urb(lvs->urb);
}
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 02abfcdfbf7b..05bd39d62568 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3084,7 +3084,6 @@ static int sisusb_probe(struct usb_interface *intf,
/* Allocate URBs */
sisusb->sisurbin = usb_alloc_urb(0, GFP_KERNEL);
if (!sisusb->sisurbin) {
- dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate URBs\n");
retval = -ENOMEM;
goto error_3;
}
@@ -3093,8 +3092,6 @@ static int sisusb_probe(struct usb_interface *intf,
for (i = 0; i < sisusb->numobufs; i++) {
sisusb->sisurbout[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!sisusb->sisurbout[i]) {
- dev_err(&sisusb->sisusb_dev->dev,
- "Failed to allocate URBs\n");
retval = -ENOMEM;
goto error_4;
}
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index 4145314a515b..9795457723d8 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -95,8 +95,7 @@ static int tv_probe(struct usb_interface *interface,
int retval;
dev = kzalloc(sizeof(struct trancevibrator), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (!dev) {
retval = -ENOMEM;
goto error;
}
diff --git a/drivers/usb/misc/usb4604.c b/drivers/usb/misc/usb4604.c
new file mode 100644
index 000000000000..e9f37fb746ac
--- /dev/null
+++ b/drivers/usb/misc/usb4604.c
@@ -0,0 +1,175 @@
+/*
+ * Driver for SMSC USB4604 USB HSIC 4-port 2.0 hub controller driver
+ * Based on usb3503 driver
+ *
+ * Copyright (c) 2012-2013 Dongjin Kim (tobetter@gmail.com)
+ * Copyright (c) 2016 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+
+enum usb4604_mode {
+ USB4604_MODE_UNKNOWN,
+ USB4604_MODE_HUB,
+ USB4604_MODE_STANDBY,
+};
+
+struct usb4604 {
+ enum usb4604_mode mode;
+ struct device *dev;
+ struct gpio_desc *gpio_reset;
+};
+
+static void usb4604_reset(struct usb4604 *hub, int state)
+{
+ gpiod_set_value_cansleep(hub->gpio_reset, state);
+
+ /* Wait for i2c logic to come up */
+ if (state)
+ msleep(250);
+}
+
+static int usb4604_connect(struct usb4604 *hub)
+{
+ struct device *dev = hub->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ int err;
+ u8 connect_cmd[] = { 0xaa, 0x55, 0x00 };
+
+ usb4604_reset(hub, 1);
+
+ err = i2c_master_send(client, connect_cmd, ARRAY_SIZE(connect_cmd));
+ if (err < 0) {
+ usb4604_reset(hub, 0);
+ return err;
+ }
+
+ hub->mode = USB4604_MODE_HUB;
+ dev_dbg(dev, "switched to HUB mode\n");
+
+ return 0;
+}
+
+static int usb4604_switch_mode(struct usb4604 *hub, enum usb4604_mode mode)
+{
+ struct device *dev = hub->dev;
+ int err = 0;
+
+ switch (mode) {
+ case USB4604_MODE_HUB:
+ err = usb4604_connect(hub);
+ break;
+
+ case USB4604_MODE_STANDBY:
+ usb4604_reset(hub, 0);
+ dev_dbg(dev, "switched to STANDBY mode\n");
+ break;
+
+ default:
+ dev_err(dev, "unknown mode is requested\n");
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int usb4604_probe(struct usb4604 *hub)
+{
+ struct device *dev = hub->dev;
+ struct device_node *np = dev->of_node;
+ struct gpio_desc *gpio;
+ u32 mode = USB4604_MODE_HUB;
+
+ gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+ hub->gpio_reset = gpio;
+
+ if (of_property_read_u32(np, "initial-mode", &hub->mode))
+ hub->mode = mode;
+
+ return usb4604_switch_mode(hub, hub->mode);
+}
+
+static int usb4604_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct usb4604 *hub;
+
+ hub = devm_kzalloc(&i2c->dev, sizeof(*hub), GFP_KERNEL);
+ if (!hub)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, hub);
+ hub->dev = &i2c->dev;
+
+ return usb4604_probe(hub);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int usb4604_i2c_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct usb4604 *hub = i2c_get_clientdata(client);
+
+ usb4604_switch_mode(hub, USB4604_MODE_STANDBY);
+
+ return 0;
+}
+
+static int usb4604_i2c_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct usb4604 *hub = i2c_get_clientdata(client);
+
+ usb4604_switch_mode(hub, hub->mode);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(usb4604_i2c_pm_ops, usb4604_i2c_suspend,
+ usb4604_i2c_resume);
+
+static const struct i2c_device_id usb4604_id[] = {
+ { "usb4604", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, usb4604_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id usb4604_of_match[] = {
+ { .compatible = "smsc,usb4604" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, usb4604_of_match);
+#endif
+
+static struct i2c_driver usb4604_i2c_driver = {
+ .driver = {
+ .name = "usb4604",
+ .pm = &usb4604_i2c_pm_ops,
+ .of_match_table = of_match_ptr(usb4604_of_match),
+ },
+ .probe = usb4604_i2c_probe,
+ .id_table = usb4604_id,
+};
+module_i2c_driver(usb4604_i2c_driver);
+
+MODULE_DESCRIPTION("USB4604 USB HUB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 1184390508e9..9f48419abc46 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -321,10 +321,8 @@ static int lcd_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (!dev)
goto error;
- }
kref_init(&dev->kref);
sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
init_usb_anchor(&dev->submitted);
@@ -351,11 +349,8 @@ static int lcd_probe(struct usb_interface *interface,
dev->bulk_in_size = buffer_size;
dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!dev->bulk_in_buffer) {
- dev_err(&interface->dev,
- "Could not allocate bulk_in_buffer\n");
+ if (!dev->bulk_in_buffer)
goto error;
- }
}
if (!dev->bulk_out_endpointAddr &&
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 1fe6b73c22f3..a0ba5298160c 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -128,10 +128,8 @@ static void update_display_visual(struct usb_sevsegdev *mydev, gfp_t mf)
return;
buffer = kzalloc(MAXLEN, mf);
- if (!buffer) {
- dev_err(&mydev->udev->dev, "out of memory\n");
+ if (!buffer)
return;
- }
/* The device is right to left, where as you write left to right */
for (i = 0; i < mydev->textlength; i++)
@@ -346,10 +344,8 @@ static int sevseg_probe(struct usb_interface *interface,
int rc = -ENOMEM;
mydev = kzalloc(sizeof(struct usb_sevsegdev), GFP_KERNEL);
- if (mydev == NULL) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (!mydev)
goto error_mem;
- }
mydev->udev = usb_get_dev(udev);
mydev->intf = interface;
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index bbd029c9c725..356d312add57 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -150,10 +150,8 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p
if (!usbdev)
return NULL;
rq = kzalloc(sizeof(struct uss720_async_request), mem_flags);
- if (!rq) {
- dev_err(&usbdev->dev, "submit_async_request out of memory\n");
+ if (!rq)
return NULL;
- }
kref_init(&rq->ref_count);
INIT_LIST_HEAD(&rq->asynclist);
init_completion(&rq->compl);
@@ -162,7 +160,6 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p
rq->urb = usb_alloc_urb(0, mem_flags);
if (!rq->urb) {
kref_put(&rq->ref_count, destroy_async);
- dev_err(&usbdev->dev, "submit_async_request out of memory\n");
return NULL;
}
rq->dr = kmalloc(sizeof(*rq->dr), mem_flags);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 343fa6ff9f4b..54e53ac4c08f 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -200,10 +200,8 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (!dev)
goto error;
- }
kref_init(&dev->kref);
mutex_init(&dev->io_mutex);
spin_lock_init(&dev->lock);
@@ -231,17 +229,13 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
/* allocate control URB */
dev->cntl_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->cntl_urb) {
- dev_err(&interface->dev, "Could not allocate control URB\n");
+ if (!dev->cntl_urb)
goto error;
- }
/* allocate buffer for control req */
dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL);
- if (!dev->cntl_req) {
- dev_err(&interface->dev, "Could not allocate cntl_req\n");
+ if (!dev->cntl_req)
goto error;
- }
/* allocate buffer for control msg */
dev->cntl_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
@@ -269,10 +263,8 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
/* allocate interrupt URB */
dev->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->urb) {
- dev_err(&interface->dev, "Could not allocate URB\n");
+ if (!dev->urb)
goto error;
- }
/* allocate buffer for interrupt in */
dev->int_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 73cfa13fc0dc..72a2a5040848 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -82,7 +82,7 @@ config USB_MUSB_DA8XX
tristate "DA8xx/OMAP-L1x"
depends on ARCH_DAVINCI_DA8XX
depends on NOP_USB_XCEIV
- depends on BROKEN
+ select PHY_DA8XX_USB
config USB_MUSB_TUSB6010
tristate "TUSB6010"
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index c41fe588d14d..50ca8052bc8e 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -474,10 +474,8 @@ static int am35x_probe(struct platform_device *pdev)
int ret = -ENOMEM;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
- if (!glue) {
- dev_err(&pdev->dev, "failed to allocate glue context\n");
+ if (!glue)
goto err0;
- }
phy_clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(phy_clk)) {
@@ -512,8 +510,10 @@ static int am35x_probe(struct platform_device *pdev)
pdata->platform_ops = &am35x_ops;
glue->phy = usb_phy_generic_register();
- if (IS_ERR(glue->phy))
+ if (IS_ERR(glue->phy)) {
+ ret = PTR_ERR(glue->phy);
goto err7;
+ }
platform_set_drvdata(pdev, glue);
pinfo = am35x_dev_info;
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index b03d3b867fca..210b7e43a6fd 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -30,13 +30,11 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/usb_phy_generic.h>
-#include <mach/da8xx.h>
-#include <linux/platform_data/usb-davinci.h>
-
#include "musb_core.h"
/*
@@ -80,61 +78,15 @@
#define DA8XX_MENTOR_CORE_OFFSET 0x400
-#define CFGCHIP2 IO_ADDRESS(DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP2_REG)
-
struct da8xx_glue {
struct device *dev;
struct platform_device *musb;
- struct platform_device *phy;
+ struct platform_device *usb_phy;
struct clk *clk;
+ struct phy *phy;
};
/*
- * REVISIT (PM): we should be able to keep the PHY in low power mode most
- * of the time (24 MHz oscillator and PLL off, etc.) by setting POWER.D0
- * and, when in host mode, autosuspending idle root ports... PHY_PLLON
- * (overriding SUSPENDM?) then likely needs to stay off.
- */
-
-static inline void phy_on(void)
-{
- u32 cfgchip2 = __raw_readl(CFGCHIP2);
-
- /*
- * Start the on-chip PHY and its PLL.
- */
- cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN);
- cfgchip2 |= CFGCHIP2_PHY_PLLON;
- __raw_writel(cfgchip2, CFGCHIP2);
-
- pr_info("Waiting for USB PHY clock good...\n");
- while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
- cpu_relax();
-}
-
-static inline void phy_off(void)
-{
- u32 cfgchip2 = __raw_readl(CFGCHIP2);
-
- /*
- * Ensure that USB 1.1 reference clock is not being sourced from
- * USB 2.0 PHY. Otherwise do not power down the PHY.
- */
- if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX) &&
- (cfgchip2 & CFGCHIP2_USB1SUSPENDM)) {
- pr_warning("USB 1.1 clocked from USB 2.0 PHY -- "
- "can't power it down\n");
- return;
- }
-
- /*
- * Power down the on-chip PHY.
- */
- cfgchip2 |= CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN;
- __raw_writel(cfgchip2, CFGCHIP2);
-}
-
-/*
* Because we don't set CTRL.UINT, it's "important" to:
* - not read/write INTRUSB/INTRUSBE (except during
* initial setup, as a workaround);
@@ -385,29 +337,29 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode)
{
- u32 cfgchip2 = __raw_readl(CFGCHIP2);
+ struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
+ enum phy_mode phy_mode;
- cfgchip2 &= ~CFGCHIP2_OTGMODE;
switch (musb_mode) {
case MUSB_HOST: /* Force VBUS valid, ID = 0 */
- cfgchip2 |= CFGCHIP2_FORCE_HOST;
+ phy_mode = PHY_MODE_USB_HOST;
break;
case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
- cfgchip2 |= CFGCHIP2_FORCE_DEVICE;
+ phy_mode = PHY_MODE_USB_DEVICE;
break;
case MUSB_OTG: /* Don't override the VBUS/ID comparators */
- cfgchip2 |= CFGCHIP2_NO_OVERRIDE;
+ phy_mode = PHY_MODE_USB_OTG;
break;
default:
- dev_dbg(musb->controller, "Trying to set unsupported mode %u\n", musb_mode);
+ return -EINVAL;
}
- __raw_writel(cfgchip2, CFGCHIP2);
- return 0;
+ return phy_set_mode(glue->phy, phy_mode);
}
static int da8xx_musb_init(struct musb *musb)
{
+ struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
void __iomem *reg_base = musb->ctrl_base;
u32 rev;
int ret = -ENODEV;
@@ -425,32 +377,56 @@ static int da8xx_musb_init(struct musb *musb)
goto fail;
}
+ ret = clk_prepare_enable(glue->clk);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable clock\n");
+ goto fail;
+ }
+
setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
/* Reset the controller */
musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK);
/* Start the on-chip PHY and its PLL. */
- phy_on();
+ ret = phy_init(glue->phy);
+ if (ret) {
+ dev_err(glue->dev, "Failed to init phy.\n");
+ goto err_phy_init;
+ }
+
+ ret = phy_power_on(glue->phy);
+ if (ret) {
+ dev_err(glue->dev, "Failed to power on phy.\n");
+ goto err_phy_power_on;
+ }
msleep(5);
/* NOTE: IRQs are in mixed mode, not bypass to pure MUSB */
- pr_debug("DA8xx OTG revision %08x, PHY %03x, control %02x\n",
- rev, __raw_readl(CFGCHIP2),
+ pr_debug("DA8xx OTG revision %08x, control %02x\n", rev,
musb_readb(reg_base, DA8XX_USB_CTRL_REG));
musb->isr = da8xx_musb_interrupt;
return 0;
+
+err_phy_power_on:
+ phy_exit(glue->phy);
+err_phy_init:
+ clk_disable_unprepare(glue->clk);
fail:
return ret;
}
static int da8xx_musb_exit(struct musb *musb)
{
+ struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
+
del_timer_sync(&otg_workaround);
- phy_off();
+ phy_power_off(glue->phy);
+ phy_exit(glue->phy);
+ clk_disable_unprepare(glue->clk);
usb_put_phy(musb->xceiv);
@@ -486,30 +462,25 @@ static int da8xx_probe(struct platform_device *pdev)
{
struct resource musb_resources[2];
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct platform_device *musb;
struct da8xx_glue *glue;
struct platform_device_info pinfo;
struct clk *clk;
+ int ret;
- int ret = -ENOMEM;
-
- glue = kzalloc(sizeof(*glue), GFP_KERNEL);
- if (!glue) {
- dev_err(&pdev->dev, "failed to allocate glue context\n");
- goto err0;
- }
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
- clk = clk_get(&pdev->dev, "usb20");
+ clk = devm_clk_get(&pdev->dev, "usb20");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
- ret = PTR_ERR(clk);
- goto err3;
+ return PTR_ERR(clk);
}
- ret = clk_enable(clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable clock\n");
- goto err4;
+ glue->phy = devm_phy_get(&pdev->dev, "usb-phy");
+ if (IS_ERR(glue->phy)) {
+ dev_err(&pdev->dev, "failed to get phy\n");
+ return PTR_ERR(glue->phy);
}
glue->dev = &pdev->dev;
@@ -517,10 +488,11 @@ static int da8xx_probe(struct platform_device *pdev)
pdata->platform_ops = &da8xx_ops;
- glue->phy = usb_phy_generic_register();
- if (IS_ERR(glue->phy)) {
- ret = PTR_ERR(glue->phy);
- goto err5;
+ glue->usb_phy = usb_phy_generic_register();
+ ret = PTR_ERR_OR_ZERO(glue->usb_phy);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register usb_phy\n");
+ return ret;
}
platform_set_drvdata(pdev, glue);
@@ -544,28 +516,13 @@ static int da8xx_probe(struct platform_device *pdev)
pinfo.data = pdata;
pinfo.size_data = sizeof(*pdata);
- glue->musb = musb = platform_device_register_full(&pinfo);
- if (IS_ERR(musb)) {
- ret = PTR_ERR(musb);
+ glue->musb = platform_device_register_full(&pinfo);
+ ret = PTR_ERR_OR_ZERO(glue->musb);
+ if (ret) {
dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
- goto err6;
+ usb_phy_generic_unregister(glue->usb_phy);
}
- return 0;
-
-err6:
- usb_phy_generic_unregister(glue->phy);
-
-err5:
- clk_disable(clk);
-
-err4:
- clk_put(clk);
-
-err3:
- kfree(glue);
-
-err0:
return ret;
}
@@ -574,10 +531,7 @@ static int da8xx_remove(struct platform_device *pdev)
struct da8xx_glue *glue = platform_get_drvdata(pdev);
platform_device_unregister(glue->musb);
- usb_phy_generic_unregister(glue->phy);
- clk_disable(glue->clk);
- clk_put(glue->clk);
- kfree(glue);
+ usb_phy_generic_unregister(glue->usb_phy);
return 0;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 74fc3069cb42..27dadc0d9114 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1448,7 +1448,7 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
{
u8 reg;
char *type;
- char aInfo[90], aRevision[32], aDate[12];
+ char aInfo[90];
void __iomem *mbase = musb->mregs;
int status = 0;
int i;
@@ -1482,7 +1482,6 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo);
- aDate[0] = 0;
if (MUSB_CONTROLLER_MHDRC == musb_type) {
musb->is_multipoint = 1;
type = "M";
@@ -1497,11 +1496,10 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
/* log release info */
musb->hwvers = musb_read_hwvers(mbase);
- snprintf(aRevision, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb->hwvers),
- MUSB_HWVERS_MINOR(musb->hwvers),
- (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
- pr_debug("%s: %sHDRC RTL version %s %s\n",
- musb_driver_name, type, aRevision, aDate);
+ pr_debug("%s: %sHDRC RTL version %d.%d%s\n",
+ musb_driver_name, type, MUSB_HWVERS_MAJOR(musb->hwvers),
+ MUSB_HWVERS_MINOR(musb->hwvers),
+ (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
/* configure ep0 */
musb_configure_ep0(musb);
@@ -1831,11 +1829,80 @@ static const struct attribute_group musb_attr_group = {
.attrs = musb_attributes,
};
+#define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
+ (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
+ MUSB_DEVCTL_SESSION)
+#define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
+ MUSB_DEVCTL_SESSION)
+
+/*
+ * Check the musb devctl session bit to determine if we want to
+ * allow PM runtime for the device. In general, we want to keep things
+ * active when the session bit is set except after host disconnect.
+ *
+ * Only called from musb_irq_work. If this ever needs to get called
+ * elsewhere, proper locking must be implemented for musb->session.
+ */
+static void musb_pm_runtime_check_session(struct musb *musb)
+{
+ u8 devctl, s;
+ int error;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ /* Handle session status quirks first */
+ s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
+ MUSB_DEVCTL_HR;
+ switch (devctl & ~s) {
+ case MUSB_QUIRK_B_INVALID_VBUS_91:
+ if (!musb->session && !musb->quirk_invalid_vbus) {
+ musb->quirk_invalid_vbus = true;
+ musb_dbg(musb,
+ "First invalid vbus, assume no session");
+ return;
+ }
+ break;
+ case MUSB_QUIRK_A_DISCONNECT_19:
+ if (!musb->session)
+ break;
+ musb_dbg(musb, "Allow PM on possible host mode disconnect");
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ musb->session = false;
+ return;
+ default:
+ break;
+ }
+
+ /* No need to do anything if session has not changed */
+ s = devctl & MUSB_DEVCTL_SESSION;
+ if (s == musb->session)
+ return;
+
+ /* Block PM or allow PM? */
+ if (s) {
+ musb_dbg(musb, "Block PM on active session: %02x", devctl);
+ error = pm_runtime_get_sync(musb->controller);
+ if (error < 0)
+ dev_err(musb->controller, "Could not enable: %i\n",
+ error);
+ } else {
+ musb_dbg(musb, "Allow PM with no session: %02x", devctl);
+ musb->quirk_invalid_vbus = false;
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ }
+
+ musb->session = s;
+}
+
/* Only used to provide driver mode change events */
static void musb_irq_work(struct work_struct *data)
{
struct musb *musb = container_of(data, struct musb, irq_work);
+ musb_pm_runtime_check_session(musb);
+
if (musb->xceiv->otg->state != musb->xceiv_old_state) {
musb->xceiv_old_state = musb->xceiv->otg->state;
sysfs_notify(&musb->controller->kobj, NULL, "mode");
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index b55a776b03eb..2cb88a498f8a 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -378,6 +378,8 @@ struct musb {
u8 min_power; /* vbus for periph, in mA/2 */
int port_mode; /* MUSB_PORT_MODE_* */
+ bool session;
+ bool quirk_invalid_vbus;
bool is_host;
int a_wait_bcon; /* VBUS timeout in msecs */
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 2537179636db..0f17d2140db6 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -145,43 +145,6 @@ static const struct debugfs_reg32 dsps_musb_regs[] = {
{ "mode", 0xe8 },
};
-static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
-{
- struct device *dev = musb->controller;
- struct dsps_glue *glue = dev_get_drvdata(dev->parent);
-
- if (timeout == 0)
- timeout = jiffies + msecs_to_jiffies(3);
-
- /* Never idle if active, or when VBUS timeout is not set as host */
- if (musb->is_active || (musb->a_wait_bcon == 0 &&
- musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
- dev_dbg(musb->controller, "%s active, deleting timer\n",
- usb_otg_state_string(musb->xceiv->otg->state));
- del_timer(&glue->timer);
- glue->last_timer = jiffies;
- return;
- }
- if (musb->port_mode != MUSB_PORT_MODE_DUAL_ROLE)
- return;
-
- if (!musb->g.dev.driver)
- return;
-
- if (time_after(glue->last_timer, timeout) &&
- timer_pending(&glue->timer)) {
- dev_dbg(musb->controller,
- "Longer idle timer already pending, ignoring...\n");
- return;
- }
- glue->last_timer = timeout;
-
- dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
- usb_otg_state_string(musb->xceiv->otg->state),
- jiffies_to_msecs(timeout - jiffies));
- mod_timer(&glue->timer, timeout);
-}
-
/**
* dsps_musb_enable - enable interrupts
*/
@@ -206,7 +169,6 @@ static void dsps_musb_enable(struct musb *musb)
musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
mod_timer(&glue->timer, jiffies +
msecs_to_jiffies(wrp->poll_timeout));
- dsps_musb_try_idle(musb, 0);
}
/**
@@ -236,6 +198,11 @@ static void otg_timer(unsigned long _musb)
u8 devctl;
unsigned long flags;
int skip_session = 0;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0)
+ dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
/*
* We poll because DSPS IP's won't expose several OTG-critical
@@ -247,6 +214,10 @@ static void otg_timer(unsigned long _musb)
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_VRISE:
+ mod_timer(&glue->timer, jiffies +
+ msecs_to_jiffies(wrp->poll_timeout));
+ break;
case OTG_STATE_A_WAIT_BCON:
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
skip_session = 1;
@@ -275,6 +246,9 @@ static void otg_timer(unsigned long _musb)
break;
}
spin_unlock_irqrestore(&musb->lock, flags);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
static irqreturn_t dsps_interrupt(int irq, void *hci)
@@ -338,7 +312,8 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
MUSB_HST_MODE(musb);
musb->xceiv->otg->default_a = 1;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
- del_timer(&glue->timer);
+ mod_timer(&glue->timer, jiffies +
+ msecs_to_jiffies(wrp->poll_timeout));
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
@@ -358,11 +333,17 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
if (musb->int_tx || musb->int_rx || musb->int_usb)
ret |= musb_interrupt(musb);
- /* Poll for ID change in OTG port mode */
- if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
- musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
+ /* Poll for ID change and connect */
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_B_IDLE:
+ case OTG_STATE_A_WAIT_BCON:
mod_timer(&glue->timer, jiffies +
msecs_to_jiffies(wrp->poll_timeout));
+ break;
+ default:
+ break;
+ }
+
out:
spin_unlock_irqrestore(&musb->lock, flags);
@@ -461,6 +442,9 @@ static int dsps_musb_init(struct musb *musb)
musb_writeb(musb->mregs, MUSB_BABBLE_CTL, val);
}
+ mod_timer(&glue->timer, jiffies +
+ msecs_to_jiffies(glue->wrp->poll_timeout));
+
return dsps_musb_dbg_init(musb, glue);
}
@@ -620,7 +604,6 @@ static struct musb_platform_ops dsps_ops = {
.enable = dsps_musb_enable,
.disable = dsps_musb_disable,
- .try_idle = dsps_musb_try_idle,
.set_mode = dsps_musb_set_mode,
.recover = dsps_musb_recover,
};
@@ -784,6 +767,8 @@ static int dsps_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, glue);
pm_runtime_enable(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 200);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
@@ -795,11 +780,15 @@ static int dsps_probe(struct platform_device *pdev)
if (ret)
goto err3;
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
err3:
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
err2:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -811,7 +800,8 @@ static int dsps_remove(struct platform_device *pdev)
platform_device_unregister(glue->musb);
/* disable usbss clocks */
- pm_runtime_put(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6d1e975e9605..bff4869a57cd 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1964,6 +1964,9 @@ static int musb_gadget_stop(struct usb_gadget *g)
* that currently misbehaves.
*/
+ /* Force check of devctl register for PM runtime */
+ schedule_work(&musb->irq_work);
+
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index fe08e776fec3..61b5f1c3c5bc 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -245,6 +245,7 @@ void musb_root_disconnect(struct musb *musb)
usb_otg_state_string(musb->xceiv->otg->state));
}
}
+EXPORT_SYMBOL_GPL(musb_root_disconnect);
/*---------------------------------------------------------------------*/
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 0b4cec940386..1ab6973d4f61 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -49,9 +49,6 @@ struct omap2430_glue {
enum musb_vbus_id_status status;
struct work_struct omap_musb_mailbox_work;
struct device *control_otghs;
- bool cable_connected;
- bool enabled;
- bool powered;
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
@@ -141,45 +138,6 @@ static inline void omap2430_low_level_init(struct musb *musb)
musb_writel(musb->mregs, OTG_FORCESTDBY, l);
}
-/*
- * We can get multiple cable events so we need to keep track
- * of the power state. Only keep power enabled if USB cable is
- * connected and a gadget is started.
- */
-static void omap2430_set_power(struct musb *musb, bool enabled, bool cable)
-{
- struct device *dev = musb->controller;
- struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- bool power_up;
- int res;
-
- if (glue->enabled != enabled)
- glue->enabled = enabled;
-
- if (glue->cable_connected != cable)
- glue->cable_connected = cable;
-
- power_up = glue->enabled && glue->cable_connected;
- if (power_up == glue->powered) {
- dev_warn(musb->controller, "power state already %i\n",
- power_up);
- return;
- }
-
- glue->powered = power_up;
-
- if (power_up) {
- res = pm_runtime_get_sync(musb->controller);
- if (res < 0) {
- dev_err(musb->controller, "could not enable: %i", res);
- glue->powered = false;
- }
- } else {
- pm_runtime_mark_last_busy(musb->controller);
- pm_runtime_put_autosuspend(musb->controller);
- }
-}
-
static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
{
struct omap2430_glue *glue = _glue;
@@ -203,21 +161,15 @@ static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
static void omap_musb_set_mailbox(struct omap2430_glue *glue)
{
struct musb *musb = glue_to_musb(glue);
- struct device *dev = musb->controller;
- struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
+ struct musb_hdrc_platform_data *pdata =
+ dev_get_platdata(musb->controller);
struct omap_musb_board_data *data = pdata->board_data;
struct usb_otg *otg = musb->xceiv->otg;
- bool cable_connected;
-
- cable_connected = ((glue->status == MUSB_ID_GROUND) ||
- (glue->status == MUSB_VBUS_VALID));
-
- if (cable_connected)
- omap2430_set_power(musb, glue->enabled, cable_connected);
+ pm_runtime_get_sync(musb->controller);
switch (glue->status) {
case MUSB_ID_GROUND:
- dev_dbg(dev, "ID GND\n");
+ dev_dbg(musb->controller, "ID GND\n");
otg->default_a = true;
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
@@ -230,7 +182,7 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
break;
case MUSB_VBUS_VALID:
- dev_dbg(dev, "VBUS Connect\n");
+ dev_dbg(musb->controller, "VBUS Connect\n");
otg->default_a = false;
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
@@ -240,7 +192,7 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
case MUSB_ID_FLOAT:
case MUSB_VBUS_OFF:
- dev_dbg(dev, "VBUS Disconnect\n");
+ dev_dbg(musb->controller, "VBUS Disconnect\n");
musb->xceiv->last_event = USB_EVENT_NONE;
if (musb->gadget_driver)
@@ -253,12 +205,10 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
USB_MODE_DISCONNECT);
break;
default:
- dev_dbg(dev, "ID float\n");
+ dev_dbg(musb->controller, "ID float\n");
}
-
- if (!cable_connected)
- omap2430_set_power(musb, glue->enabled, cable_connected);
-
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
atomic_notifier_call_chain(&musb->xceiv->notifier,
musb->xceiv->last_event, NULL);
}
@@ -376,8 +326,6 @@ static void omap2430_musb_enable(struct musb *musb)
if (!WARN_ON(!musb->phy))
phy_power_on(musb->phy);
- omap2430_set_power(musb, true, glue->cable_connected);
-
switch (glue->status) {
case MUSB_ID_GROUND:
@@ -419,8 +367,6 @@ static void omap2430_musb_disable(struct musb *musb)
if (glue->status != MUSB_UNKNOWN)
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
-
- omap2430_set_power(musb, false, glue->cable_connected);
}
static int omap2430_musb_exit(struct musb *musb)
@@ -571,7 +517,7 @@ static int omap2430_probe(struct platform_device *pdev)
pm_runtime_enable(glue->dev);
pm_runtime_use_autosuspend(glue->dev);
- pm_runtime_set_autosuspend_delay(glue->dev, 500);
+ pm_runtime_set_autosuspend_delay(glue->dev, 100);
ret = platform_device_add(musb);
if (ret) {
@@ -591,11 +537,9 @@ err0:
static int omap2430_remove(struct platform_device *pdev)
{
struct omap2430_glue *glue = platform_get_drvdata(pdev);
- struct musb *musb = glue_to_musb(glue);
pm_runtime_get_sync(glue->dev);
platform_device_unregister(glue->musb);
- omap2430_set_power(musb, false, false);
pm_runtime_put_sync(glue->dev);
pm_runtime_dont_use_autosuspend(glue->dev);
pm_runtime_disable(glue->dev);
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index c6ee16660572..1408245be18e 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -74,6 +74,7 @@
#define SUNXI_MUSB_FL_HAS_SRAM 5
#define SUNXI_MUSB_FL_HAS_RESET 6
#define SUNXI_MUSB_FL_NO_CONFIGDATA 7
+#define SUNXI_MUSB_FL_PHY_MODE_PEND 8
/* Our read/write methods need access and do not get passed in a musb ref :| */
static struct musb *sunxi_musb;
@@ -87,6 +88,7 @@ struct sunxi_glue {
struct phy *phy;
struct platform_device *usb_phy;
struct usb_phy *xceiv;
+ enum phy_mode phy_mode;
unsigned long flags;
struct work_struct work;
struct extcon_dev *extcon;
@@ -140,6 +142,9 @@ static void sunxi_musb_work(struct work_struct *work)
clear_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
}
}
+
+ if (test_and_clear_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags))
+ phy_set_mode(glue->phy, glue->phy_mode);
}
static void sunxi_musb_set_vbus(struct musb *musb, int is_on)
@@ -341,6 +346,50 @@ static void sunxi_musb_dma_controller_destroy(struct dma_controller *c)
{
}
+static int sunxi_musb_set_mode(struct musb *musb, u8 mode)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+ enum phy_mode new_mode;
+
+ switch (mode) {
+ case MUSB_HOST:
+ new_mode = PHY_MODE_USB_HOST;
+ break;
+ case MUSB_PERIPHERAL:
+ new_mode = PHY_MODE_USB_DEVICE;
+ break;
+ case MUSB_OTG:
+ new_mode = PHY_MODE_USB_OTG;
+ break;
+ default:
+ dev_err(musb->controller->parent,
+ "Error requested mode not supported by this kernel\n");
+ return -EINVAL;
+ }
+
+ if (glue->phy_mode == new_mode)
+ return 0;
+
+ if (musb->port_mode != MUSB_PORT_MODE_DUAL_ROLE) {
+ dev_err(musb->controller->parent,
+ "Error changing modes is only supported in dual role mode\n");
+ return -EINVAL;
+ }
+
+ if (musb->port1_status & USB_PORT_STAT_ENABLE)
+ musb_root_disconnect(musb);
+
+ /*
+ * phy_set_mode may sleep, and we're called with a spinlock held,
+ * so let sunxi_musb_work deal with it.
+ */
+ glue->phy_mode = new_mode;
+ set_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags);
+ schedule_work(&glue->work);
+
+ return 0;
+}
+
/*
* sunxi musb register layout
* 0x00 - 0x17 fifo regs, 1 long per fifo
@@ -568,6 +617,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
.writew = sunxi_musb_writew,
.dma_init = sunxi_musb_dma_controller_create,
.dma_exit = sunxi_musb_dma_controller_destroy,
+ .set_mode = sunxi_musb_set_mode,
.set_vbus = sunxi_musb_set_vbus,
.pre_root_reset_end = sunxi_musb_pre_root_reset_end,
.post_root_reset_end = sunxi_musb_post_root_reset_end,
@@ -614,21 +664,28 @@ static int sunxi_musb_probe(struct platform_device *pdev)
return -EINVAL;
}
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
memset(&pdata, 0, sizeof(pdata));
switch (usb_get_dr_mode(&pdev->dev)) {
#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_HOST
case USB_DR_MODE_HOST:
pdata.mode = MUSB_PORT_MODE_HOST;
+ glue->phy_mode = PHY_MODE_USB_HOST;
break;
#endif
#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_GADGET
case USB_DR_MODE_PERIPHERAL:
pdata.mode = MUSB_PORT_MODE_GADGET;
+ glue->phy_mode = PHY_MODE_USB_DEVICE;
break;
#endif
#ifdef CONFIG_USB_MUSB_DUAL_ROLE
case USB_DR_MODE_OTG:
pdata.mode = MUSB_PORT_MODE_DUAL_ROLE;
+ glue->phy_mode = PHY_MODE_USB_OTG;
break;
#endif
default:
@@ -638,10 +695,6 @@ static int sunxi_musb_probe(struct platform_device *pdev)
pdata.platform_ops = &sunxi_musb_ops;
pdata.config = &sunxi_musb_hdrc_config;
- glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
- if (!glue)
- return -ENOMEM;
-
glue->dev = &pdev->dev;
INIT_WORK(&glue->work, sunxi_musb_work);
glue->host_nb.notifier_call = sunxi_musb_host_notifier;
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 0c912d3950a5..a03caf4b1327 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -1248,7 +1248,7 @@ static void ab8500_usb_set_ab8500_tuning_values(struct ab8500_usb *ab)
err = abx500_set_register_interruptible(ab->dev,
AB8500_DEBUG, AB8500_USB_PHY_TUNE3, 0x78);
if (err < 0)
- dev_err(ab->dev, "Failed to set PHY_TUNE3 regester err=%d\n",
+ dev_err(ab->dev, "Failed to set PHY_TUNE3 register err=%d\n",
err);
/* Switch to normal mode/disable Bank 0x12 access */
@@ -1290,7 +1290,7 @@ static void ab8500_usb_set_ab8505_tuning_values(struct ab8500_usb *ab)
0xFC, 0x80);
if (err < 0)
- dev_err(ab->dev, "Failed to set PHY_TUNE3 regester err=%d\n",
+ dev_err(ab->dev, "Failed to set PHY_TUNE3 register err=%d\n",
err);
/* Switch to normal mode/disable Bank 0x12 access */
@@ -1321,7 +1321,7 @@ static void ab8500_usb_set_ab8540_tuning_values(struct ab8500_usb *ab)
err = abx500_set_register_interruptible(ab->dev,
AB8540_DEBUG, AB8500_USB_PHY_TUNE3, 0x90);
if (err < 0)
- dev_err(ab->dev, "Failed to set PHY_TUNE3 regester ret=%d\n",
+ dev_err(ab->dev, "Failed to set PHY_TUNE3 register ret=%d\n",
err);
}
@@ -1351,7 +1351,7 @@ static void ab8500_usb_set_ab9540_tuning_values(struct ab8500_usb *ab)
err = abx500_set_register_interruptible(ab->dev,
AB8500_DEBUG, AB8500_USB_PHY_TUNE3, 0x80);
if (err < 0)
- dev_err(ab->dev, "Failed to set PHY_TUNE3 regester err=%d\n",
+ dev_err(ab->dev, "Failed to set PHY_TUNE3 register err=%d\n",
err);
/* Switch to normal mode/disable Bank 0x12 access */
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 427efb5eebae..8311ba2968cd 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -118,8 +118,6 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
status = USB_EVENT_VBUS;
otg->state = OTG_STATE_B_PERIPHERAL;
nop->phy.last_event = status;
- if (otg->gadget)
- usb_gadget_vbus_connect(otg->gadget);
/* drawing a "unit load" is *always* OK, except for OTG */
nop_set_vbus_draw(nop, 100);
@@ -129,8 +127,6 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
} else {
nop_set_vbus_draw(nop, 0);
- if (otg->gadget)
- usb_gadget_vbus_disconnect(otg->gadget);
status = USB_EVENT_NONE;
otg->state = OTG_STATE_B_IDLE;
nop->phy.last_event = status;
@@ -191,7 +187,8 @@ static int nop_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
otg->gadget = gadget;
if (otg->state == OTG_STATE_B_PERIPHERAL)
- usb_gadget_vbus_connect(gadget);
+ atomic_notifier_call_chain(&otg->usb_phy->notifier,
+ USB_EVENT_VBUS, otg->gadget);
else
otg->state = OTG_STATE_B_IDLE;
return 0;
@@ -326,6 +323,8 @@ static int usb_phy_generic_probe(struct platform_device *pdev)
gpiod_to_irq(nop->gpiod_vbus), err);
return err;
}
+ nop->phy.otg->state = gpiod_get_value(nop->gpiod_vbus) ?
+ OTG_STATE_B_PERIPHERAL : OTG_STATE_B_IDLE;
}
nop->phy.init = usb_gen_phy_init;
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 00bfea01be65..0e2f1a36d315 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -27,6 +27,7 @@
#define DRIVER_NAME "mxs_phy"
#define HW_USBPHY_PWD 0x00
+#define HW_USBPHY_TX 0x10
#define HW_USBPHY_CTRL 0x30
#define HW_USBPHY_CTRL_SET 0x34
#define HW_USBPHY_CTRL_CLR 0x38
@@ -38,6 +39,10 @@
#define HW_USBPHY_IP_SET 0x94
#define HW_USBPHY_IP_CLR 0x98
+#define GM_USBPHY_TX_TXCAL45DP(x) (((x) & 0xf) << 16)
+#define GM_USBPHY_TX_TXCAL45DN(x) (((x) & 0xf) << 8)
+#define GM_USBPHY_TX_D_CAL(x) (((x) & 0xf) << 0)
+
#define BM_USBPHY_CTRL_SFTRST BIT(31)
#define BM_USBPHY_CTRL_CLKGATE BIT(30)
#define BM_USBPHY_CTRL_OTG_ID_VALUE BIT(27)
@@ -115,6 +120,12 @@
*/
#define MXS_PHY_NEED_IP_FIX BIT(3)
+/* Minimum and maximum values for device tree entries */
+#define MXS_PHY_TX_CAL45_MIN 30
+#define MXS_PHY_TX_CAL45_MAX 55
+#define MXS_PHY_TX_D_CAL_MIN 79
+#define MXS_PHY_TX_D_CAL_MAX 119
+
struct mxs_phy_data {
unsigned int flags;
};
@@ -164,6 +175,8 @@ struct mxs_phy {
const struct mxs_phy_data *data;
struct regmap *regmap_anatop;
int port_id;
+ u32 tx_reg_set;
+ u32 tx_reg_mask;
};
static inline bool is_imx6q_phy(struct mxs_phy *mxs_phy)
@@ -185,6 +198,20 @@ static void mxs_phy_clock_switch_delay(void)
usleep_range(300, 400);
}
+static void mxs_phy_tx_init(struct mxs_phy *mxs_phy)
+{
+ void __iomem *base = mxs_phy->phy.io_priv;
+ u32 phytx;
+
+ /* Update TX register if there is anything to write */
+ if (mxs_phy->tx_reg_mask) {
+ phytx = readl(base + HW_USBPHY_TX);
+ phytx &= ~mxs_phy->tx_reg_mask;
+ phytx |= mxs_phy->tx_reg_set;
+ writel(phytx, base + HW_USBPHY_TX);
+ }
+}
+
static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
{
int ret;
@@ -214,6 +241,8 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
if (mxs_phy->data->flags & MXS_PHY_NEED_IP_FIX)
writel(BM_USBPHY_IP_FIX, base + HW_USBPHY_IP_SET);
+ mxs_phy_tx_init(mxs_phy);
+
return 0;
}
@@ -459,6 +488,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
int ret;
const struct of_device_id *of_id;
struct device_node *np = pdev->dev.of_node;
+ u32 val;
of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev);
if (!of_id)
@@ -491,6 +521,37 @@ static int mxs_phy_probe(struct platform_device *pdev)
}
}
+ /* Precompute which bits of the TX register are to be updated, if any */
+ if (!of_property_read_u32(np, "fsl,tx-cal-45-dn-ohms", &val) &&
+ val >= MXS_PHY_TX_CAL45_MIN && val <= MXS_PHY_TX_CAL45_MAX) {
+ /* Scale to a 4-bit value */
+ val = (MXS_PHY_TX_CAL45_MAX - val) * 0xF
+ / (MXS_PHY_TX_CAL45_MAX - MXS_PHY_TX_CAL45_MIN);
+ mxs_phy->tx_reg_mask |= GM_USBPHY_TX_TXCAL45DN(~0);
+ mxs_phy->tx_reg_set |= GM_USBPHY_TX_TXCAL45DN(val);
+ }
+
+ if (!of_property_read_u32(np, "fsl,tx-cal-45-dp-ohms", &val) &&
+ val >= MXS_PHY_TX_CAL45_MIN && val <= MXS_PHY_TX_CAL45_MAX) {
+ /* Scale to a 4-bit value. */
+ val = (MXS_PHY_TX_CAL45_MAX - val) * 0xF
+ / (MXS_PHY_TX_CAL45_MAX - MXS_PHY_TX_CAL45_MIN);
+ mxs_phy->tx_reg_mask |= GM_USBPHY_TX_TXCAL45DP(~0);
+ mxs_phy->tx_reg_set |= GM_USBPHY_TX_TXCAL45DP(val);
+ }
+
+ if (!of_property_read_u32(np, "fsl,tx-d-cal", &val) &&
+ val >= MXS_PHY_TX_D_CAL_MIN && val <= MXS_PHY_TX_D_CAL_MAX) {
+ /* Scale to a 4-bit value. Round up the values and heavily
+ * weight the rounding by adding 2/3 of the denominator.
+ */
+ val = ((MXS_PHY_TX_D_CAL_MAX - val) * 0xF
+ + (MXS_PHY_TX_D_CAL_MAX - MXS_PHY_TX_D_CAL_MIN) * 2/3)
+ / (MXS_PHY_TX_D_CAL_MAX - MXS_PHY_TX_D_CAL_MIN);
+ mxs_phy->tx_reg_mask |= GM_USBPHY_TX_D_CAL(~0);
+ mxs_phy->tx_reg_set |= GM_USBPHY_TX_D_CAL(val);
+ }
+
ret = of_alias_get_id(np, "usbphy");
if (ret < 0)
dev_dbg(&pdev->dev, "failed to get alias id, errno %d\n", ret);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index ac67bab9124c..012a37aa3e0d 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -482,6 +482,10 @@ static const struct of_device_id usbhs_of_match[] = {
.data = (void *)USBHS_TYPE_RCAR_GEN3,
},
{
+ .compatible = "renesas,usbhs-r8a7796",
+ .data = (void *)USBHS_TYPE_RCAR_GEN3,
+ },
+ {
.compatible = "renesas,rcar-gen2-usbhs",
.data = (void *)USBHS_TYPE_RCAR_GEN2,
},
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index c4c64740a3e7..5bc7a6138855 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -335,7 +335,6 @@ static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv,
buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
if (!buf) {
usb_ep_free_request(&dcp->ep, req);
- dev_err(dev, "recip data allocation fail\n");
return;
}
@@ -1062,14 +1061,11 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
int ret;
gpriv = kzalloc(sizeof(struct usbhsg_gpriv), GFP_KERNEL);
- if (!gpriv) {
- dev_err(dev, "Could not allocate gadget priv\n");
+ if (!gpriv)
return -ENOMEM;
- }
uep = kzalloc(sizeof(struct usbhsg_uep) * pipe_size, GFP_KERNEL);
if (!uep) {
- dev_err(dev, "Could not allocate ep\n");
ret = -ENOMEM;
goto usbhs_mod_gadget_probe_err_gpriv;
}
@@ -1106,6 +1102,8 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
gpriv->gadget.name = "renesas_usbhs_udc";
gpriv->gadget.ops = &usbhsg_gadget_ops;
gpriv->gadget.max_speed = USB_SPEED_HIGH;
+ gpriv->gadget.quirk_avoids_skb_reserve = usbhs_get_dparam(priv,
+ has_usb_dmac);
INIT_LIST_HEAD(&gpriv->gadget.ep_list);
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 3bf0b72eb359..165e81bfd93a 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -166,14 +166,10 @@ static struct usbhsh_request *usbhsh_ureq_alloc(struct usbhsh_hpriv *hpriv,
gfp_t mem_flags)
{
struct usbhsh_request *ureq;
- struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
- struct device *dev = usbhs_priv_to_dev(priv);
ureq = kzalloc(sizeof(struct usbhsh_request), mem_flags);
- if (!ureq) {
- dev_err(dev, "ureq alloc fail\n");
+ if (!ureq)
return NULL;
- }
usbhs_pkt_init(&ureq->pkt);
ureq->urb = urb;
@@ -388,10 +384,8 @@ static int usbhsh_endpoint_attach(struct usbhsh_hpriv *hpriv,
unsigned long flags;
uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
- if (!uep) {
- dev_err(dev, "usbhsh_ep alloc fail\n");
+ if (!uep)
return -ENOMEM;
- }
/******************** spin lock ********************/
usbhs_lock(priv, flags);
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index c238772b9e9e..9396a8c14af8 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -804,10 +804,8 @@ int usbhs_pipe_probe(struct usbhs_priv *priv)
}
info->pipe = kzalloc(sizeof(struct usbhs_pipe) * pipe_size, GFP_KERNEL);
- if (!info->pipe) {
- dev_err(dev, "Could not allocate pipe\n");
+ if (!info->pipe)
return -ENOMEM;
- }
info->size = pipe_size;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 4d6a5c672a3d..54a4de0efdba 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 4f7e072e4e00..e49ad0c63ad8 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -30,12 +30,12 @@
#include <linux/usb/ezusb.h>
/* make a simple define to handle if we are compiling keyspan_pda or xircom support */
-#if defined(CONFIG_USB_SERIAL_KEYSPAN_PDA) || defined(CONFIG_USB_SERIAL_KEYSPAN_PDA_MODULE)
+#if IS_ENABLED(CONFIG_USB_SERIAL_KEYSPAN_PDA)
#define KEYSPAN
#else
#undef KEYSPAN
#endif
-#if defined(CONFIG_USB_SERIAL_XIRCOM) || defined(CONFIG_USB_SERIAL_XIRCOM_MODULE)
+#if IS_ENABLED(CONFIG_USB_SERIAL_XIRCOM)
#define XIRCOM
#else
#undef XIRCOM
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 07b4bf01061d..a8b9bdba314f 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -179,23 +179,23 @@
/* Config struct */
struct ti_uart_config {
- __u16 wBaudRate;
- __u16 wFlags;
- __u8 bDataBits;
- __u8 bParity;
- __u8 bStopBits;
+ __be16 wBaudRate;
+ __be16 wFlags;
+ u8 bDataBits;
+ u8 bParity;
+ u8 bStopBits;
char cXon;
char cXoff;
- __u8 bUartMode;
+ u8 bUartMode;
} __packed;
/* Get port status */
struct ti_port_status {
- __u8 bCmdCode;
- __u8 bModuleId;
- __u8 bErrorCode;
- __u8 bMSR;
- __u8 bLSR;
+ u8 bCmdCode;
+ u8 bModuleId;
+ u8 bErrorCode;
+ u8 bMSR;
+ u8 bLSR;
} __packed;
/* Purge modes */
@@ -218,12 +218,12 @@ struct ti_port_status {
#define TI_RW_DATA_DOUBLE_WORD 0x04
struct ti_write_data_bytes {
- __u8 bAddrType;
- __u8 bDataType;
- __u8 bDataCounter;
+ u8 bAddrType;
+ u8 bDataType;
+ u8 bDataCounter;
__be16 wBaseAddrHi;
__be16 wBaseAddrLo;
- __u8 bData[0];
+ u8 bData[0];
} __packed;
struct ti_read_data_request {
@@ -258,7 +258,7 @@ struct ti_interrupt {
/* Firmware image header */
struct ti_firmware_header {
__le16 wLength;
- __u8 bCheckSum;
+ u8 bCheckSum;
} __packed;
/* UART addresses */
@@ -276,9 +276,6 @@ struct ti_firmware_header {
#define TI_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */
-/* supported setserial flags */
-#define TI_SET_SERIAL_FLAGS 0
-
/* read urb states */
#define TI_READ_URB_RUNNING 0
#define TI_READ_URB_STOPPING 1
@@ -288,11 +285,10 @@ struct ti_firmware_header {
struct ti_port {
int tp_is_open;
- __u8 tp_msr;
- __u8 tp_shadow_mcr;
- __u8 tp_uart_mode; /* 232 or 485 modes */
+ u8 tp_msr;
+ u8 tp_shadow_mcr;
+ u8 tp_uart_mode; /* 232 or 485 modes */
unsigned int tp_uart_base_addr;
- int tp_flags;
struct ti_device *tp_tdev;
struct usb_serial_port *tp_port;
spinlock_t tp_lock;
@@ -306,7 +302,6 @@ struct ti_device {
struct usb_serial *td_serial;
int td_is_3410;
bool td_rs485_only;
- int td_urb_error;
};
static int ti_startup(struct usb_serial *serial);
@@ -343,7 +338,7 @@ static int ti_get_serial_info(struct ti_port *tport,
struct serial_struct __user *ret_arg);
static int ti_set_serial_info(struct tty_struct *tty, struct ti_port *tport,
struct serial_struct __user *new_arg);
-static void ti_handle_new_msr(struct ti_port *tport, __u8 msr);
+static void ti_handle_new_msr(struct ti_port *tport, u8 msr);
static void ti_stop_read(struct ti_port *tport, struct tty_struct *tty);
static int ti_restart_read(struct ti_port *tport, struct tty_struct *tty);
@@ -354,7 +349,7 @@ static int ti_command_in_sync(struct ti_device *tdev, __u8 command,
__u16 moduleid, __u16 value, __u8 *data, int size);
static int ti_write_byte(struct usb_serial_port *port, struct ti_device *tdev,
- unsigned long addr, __u8 mask, __u8 byte);
+ unsigned long addr, u8 mask, u8 byte);
static int ti_download_firmware(struct ti_device *tdev);
@@ -647,12 +642,11 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
struct urb *urb;
int port_number;
int status;
- __u16 open_settings = (__u8)(TI_PIPE_MODE_CONTINUOUS |
- TI_PIPE_TIMEOUT_ENABLE |
- (TI_TRANSFER_TIMEOUT << 2));
+ u16 open_settings;
- if (tport == NULL)
- return -ENODEV;
+ open_settings = (TI_PIPE_MODE_CONTINUOUS |
+ TI_PIPE_TIMEOUT_ENABLE |
+ (TI_TRANSFER_TIMEOUT << 2));
dev = port->serial->dev;
tdev = tport->tp_tdev;
@@ -686,7 +680,6 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
if (tty)
ti_set_termios(tty, port, &tty->termios);
- dev_dbg(&port->dev, "%s - sending TI_OPEN_PORT\n", __func__);
status = ti_command_out_sync(tdev, TI_OPEN_PORT,
(__u8)(TI_UART1_PORT + port_number), open_settings, NULL, 0);
if (status) {
@@ -695,7 +688,6 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
goto unlink_int_urb;
}
- dev_dbg(&port->dev, "%s - sending TI_START_PORT\n", __func__);
status = ti_command_out_sync(tdev, TI_START_PORT,
(__u8)(TI_UART1_PORT + port_number), 0, NULL, 0);
if (status) {
@@ -704,7 +696,6 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
goto unlink_int_urb;
}
- dev_dbg(&port->dev, "%s - sending TI_PURGE_PORT\n", __func__);
status = ti_command_out_sync(tdev, TI_PURGE_PORT,
(__u8)(TI_UART1_PORT + port_number), TI_PURGE_INPUT, NULL, 0);
if (status) {
@@ -728,7 +719,6 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
if (tty)
ti_set_termios(tty, port, &tty->termios);
- dev_dbg(&port->dev, "%s - sending TI_OPEN_PORT (2)\n", __func__);
status = ti_command_out_sync(tdev, TI_OPEN_PORT,
(__u8)(TI_UART1_PORT + port_number), open_settings, NULL, 0);
if (status) {
@@ -737,7 +727,6 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
goto unlink_int_urb;
}
- dev_dbg(&port->dev, "%s - sending TI_START_PORT (2)\n", __func__);
status = ti_command_out_sync(tdev, TI_START_PORT,
(__u8)(TI_UART1_PORT + port_number), 0, NULL, 0);
if (status) {
@@ -747,7 +736,6 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
}
/* start read urb */
- dev_dbg(&port->dev, "%s - start read urb\n", __func__);
urb = port->read_urb;
if (!urb) {
dev_err(&port->dev, "%s - no read urb\n", __func__);
@@ -773,7 +761,6 @@ unlink_int_urb:
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
release_lock:
mutex_unlock(&tdev->td_open_close_lock);
- dev_dbg(&port->dev, "%s - exit %d\n", __func__, status);
return status;
}
@@ -789,8 +776,6 @@ static void ti_close(struct usb_serial_port *port)
tdev = usb_get_serial_data(port->serial);
tport = usb_get_serial_port_data(port);
- if (tdev == NULL || tport == NULL)
- return;
tport->tp_is_open = 0;
@@ -803,7 +788,6 @@ static void ti_close(struct usb_serial_port *port)
port_number = port->port_number;
- dev_dbg(&port->dev, "%s - sending TI_CLOSE_PORT\n", __func__);
status = ti_command_out_sync(tdev, TI_CLOSE_PORT,
(__u8)(TI_UART1_PORT + port_number), 0, NULL, 0);
if (status)
@@ -830,11 +814,10 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
struct ti_port *tport = usb_get_serial_port_data(port);
if (count == 0) {
- dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__);
return 0;
}
- if (tport == NULL || !tport->tp_is_open)
+ if (!tport->tp_is_open)
return -ENODEV;
count = kfifo_in_locked(&port->write_fifo, data, count,
@@ -852,9 +835,6 @@ static int ti_write_room(struct tty_struct *tty)
int room = 0;
unsigned long flags;
- if (tport == NULL)
- return 0;
-
spin_lock_irqsave(&tport->tp_lock, flags);
room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
@@ -871,9 +851,6 @@ static int ti_chars_in_buffer(struct tty_struct *tty)
int chars = 0;
unsigned long flags;
- if (tport == NULL)
- return 0;
-
spin_lock_irqsave(&tport->tp_lock, flags);
chars = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
@@ -900,9 +877,6 @@ static void ti_throttle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
- if (tport == NULL)
- return;
-
if (I_IXOFF(tty) || C_CRTSCTS(tty))
ti_stop_read(tport, tty);
@@ -915,9 +889,6 @@ static void ti_unthrottle(struct tty_struct *tty)
struct ti_port *tport = usb_get_serial_port_data(port);
int status;
- if (tport == NULL)
- return;
-
if (I_IXOFF(tty) || C_CRTSCTS(tty)) {
status = ti_restart_read(tport, tty);
if (status)
@@ -932,16 +903,11 @@ static int ti_ioctl(struct tty_struct *tty,
struct usb_serial_port *port = tty->driver_data;
struct ti_port *tport = usb_get_serial_port_data(port);
- if (tport == NULL)
- return -ENODEV;
-
switch (cmd) {
case TIOCGSERIAL:
- dev_dbg(&port->dev, "%s - TIOCGSERIAL\n", __func__);
return ti_get_serial_info(tport,
(struct serial_struct __user *)arg);
case TIOCSSERIAL:
- dev_dbg(&port->dev, "%s - TIOCSSERIAL\n", __func__);
return ti_set_serial_info(tty, tport,
(struct serial_struct __user *)arg);
}
@@ -959,6 +925,8 @@ static void ti_set_termios(struct tty_struct *tty,
int status;
int port_number = port->port_number;
unsigned int mcr;
+ u16 wbaudrate;
+ u16 wflags = 0;
cflag = tty->termios.c_cflag;
iflag = tty->termios.c_iflag;
@@ -967,21 +935,16 @@ static void ti_set_termios(struct tty_struct *tty,
dev_dbg(&port->dev, "%s - old clfag %08x, old iflag %08x\n", __func__,
old_termios->c_cflag, old_termios->c_iflag);
- if (tport == NULL)
- return;
-
config = kmalloc(sizeof(*config), GFP_KERNEL);
if (!config)
return;
- config->wFlags = 0;
-
/* these flags must be set */
- config->wFlags |= TI_UART_ENABLE_MS_INTS;
- config->wFlags |= TI_UART_ENABLE_AUTO_START_DMA;
- config->bUartMode = (__u8)(tport->tp_uart_mode);
+ wflags |= TI_UART_ENABLE_MS_INTS;
+ wflags |= TI_UART_ENABLE_AUTO_START_DMA;
+ config->bUartMode = tport->tp_uart_mode;
- switch (cflag & CSIZE) {
+ switch (C_CSIZE(tty)) {
case CS5:
config->bDataBits = TI_UART_5_DATA_BITS;
break;
@@ -1000,29 +963,29 @@ static void ti_set_termios(struct tty_struct *tty,
/* CMSPAR isn't supported by this driver */
tty->termios.c_cflag &= ~CMSPAR;
- if (cflag & PARENB) {
- if (cflag & PARODD) {
- config->wFlags |= TI_UART_ENABLE_PARITY_CHECKING;
+ if (C_PARENB(tty)) {
+ if (C_PARODD(tty)) {
+ wflags |= TI_UART_ENABLE_PARITY_CHECKING;
config->bParity = TI_UART_ODD_PARITY;
} else {
- config->wFlags |= TI_UART_ENABLE_PARITY_CHECKING;
+ wflags |= TI_UART_ENABLE_PARITY_CHECKING;
config->bParity = TI_UART_EVEN_PARITY;
}
} else {
- config->wFlags &= ~TI_UART_ENABLE_PARITY_CHECKING;
+ wflags &= ~TI_UART_ENABLE_PARITY_CHECKING;
config->bParity = TI_UART_NO_PARITY;
}
- if (cflag & CSTOPB)
+ if (C_CSTOPB(tty))
config->bStopBits = TI_UART_2_STOP_BITS;
else
config->bStopBits = TI_UART_1_STOP_BITS;
- if (cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
/* RTS flow control must be off to drop RTS for baud rate B0 */
- if ((cflag & CBAUD) != B0)
- config->wFlags |= TI_UART_ENABLE_RTS_IN;
- config->wFlags |= TI_UART_ENABLE_CTS_OUT;
+ if ((C_BAUD(tty)) != B0)
+ wflags |= TI_UART_ENABLE_RTS_IN;
+ wflags |= TI_UART_ENABLE_CTS_OUT;
} else {
ti_restart_read(tport, tty);
}
@@ -1032,34 +995,34 @@ static void ti_set_termios(struct tty_struct *tty,
config->cXoff = STOP_CHAR(tty);
if (I_IXOFF(tty))
- config->wFlags |= TI_UART_ENABLE_X_IN;
+ wflags |= TI_UART_ENABLE_X_IN;
else
ti_restart_read(tport, tty);
if (I_IXON(tty))
- config->wFlags |= TI_UART_ENABLE_X_OUT;
+ wflags |= TI_UART_ENABLE_X_OUT;
}
baud = tty_get_baud_rate(tty);
if (!baud)
baud = 9600;
if (tport->tp_tdev->td_is_3410)
- config->wBaudRate = (__u16)((923077 + baud/2) / baud);
+ wbaudrate = (923077 + baud/2) / baud;
else
- config->wBaudRate = (__u16)((461538 + baud/2) / baud);
+ wbaudrate = (461538 + baud/2) / baud;
/* FIXME: Should calculate resulting baud here and report it back */
- if ((cflag & CBAUD) != B0)
+ if ((C_BAUD(tty)) != B0)
tty_encode_baud_rate(tty, baud, baud);
dev_dbg(&port->dev,
"%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n",
- __func__, baud, config->wBaudRate, config->wFlags,
+ __func__, baud, wbaudrate, wflags,
config->bDataBits, config->bParity, config->bStopBits,
config->cXon, config->cXoff, config->bUartMode);
- cpu_to_be16s(&config->wBaudRate);
- cpu_to_be16s(&config->wFlags);
+ config->wBaudRate = cpu_to_be16(wbaudrate);
+ config->wFlags = cpu_to_be16(wflags);
status = ti_command_out_sync(tport->tp_tdev, TI_SET_CONFIG,
(__u8)(TI_UART1_PORT + port_number), 0, (__u8 *)config,
@@ -1071,7 +1034,7 @@ static void ti_set_termios(struct tty_struct *tty,
/* SET_CONFIG asserts RTS and DTR, reset them correctly */
mcr = tport->tp_shadow_mcr;
/* if baud rate is B0, clear RTS and DTR */
- if ((cflag & CBAUD) == B0)
+ if (C_BAUD(tty) == B0)
mcr &= ~(TI_MCR_DTR | TI_MCR_RTS);
status = ti_set_mcr(tport, mcr);
if (status)
@@ -1092,9 +1055,6 @@ static int ti_tiocmget(struct tty_struct *tty)
unsigned int mcr;
unsigned long flags;
- if (tport == NULL)
- return -ENODEV;
-
spin_lock_irqsave(&tport->tp_lock, flags);
msr = tport->tp_msr;
mcr = tport->tp_shadow_mcr;
@@ -1122,9 +1082,6 @@ static int ti_tiocmset(struct tty_struct *tty,
unsigned int mcr;
unsigned long flags;
- if (tport == NULL)
- return -ENODEV;
-
spin_lock_irqsave(&tport->tp_lock, flags);
mcr = tport->tp_shadow_mcr;
@@ -1155,9 +1112,6 @@ static void ti_break(struct tty_struct *tty, int break_state)
dev_dbg(&port->dev, "%s - state = %d\n", __func__, break_state);
- if (tport == NULL)
- return;
-
status = ti_write_byte(port, tport->tp_tdev,
tport->tp_uart_base_addr + TI_UART_OFFSET_LCR,
TI_LCR_BREAK, break_state == -1 ? TI_LCR_BREAK : 0);
@@ -1189,7 +1143,7 @@ static void ti_interrupt_callback(struct urb *urb)
int function;
int status = urb->status;
int retval;
- __u8 msr;
+ u8 msr;
switch (status) {
case 0:
@@ -1198,11 +1152,9 @@ static void ti_interrupt_callback(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
dev_dbg(dev, "%s - urb shutting down, %d\n", __func__, status);
- tdev->td_urb_error = 1;
return;
default:
dev_err(dev, "%s - nonzero urb status, %d\n", __func__, status);
- tdev->td_urb_error = 1;
goto exit;
}
@@ -1275,12 +1227,10 @@ static void ti_bulk_in_callback(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
dev_dbg(dev, "%s - urb shutting down, %d\n", __func__, status);
- tport->tp_tdev->td_urb_error = 1;
return;
default:
dev_err(dev, "%s - nonzero urb status, %d\n",
__func__, status);
- tport->tp_tdev->td_urb_error = 1;
}
if (status == -EPIPE)
@@ -1335,12 +1285,10 @@ static void ti_bulk_out_callback(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb shutting down, %d\n", __func__, status);
- tport->tp_tdev->td_urb_error = 1;
return;
default:
dev_err_console(port, "%s - nonzero urb status, %d\n",
__func__, status);
- tport->tp_tdev->td_urb_error = 1;
}
/* send any buffered data */
@@ -1490,7 +1438,6 @@ static int ti_get_serial_info(struct ti_port *tport,
ret_serial.type = PORT_16550A;
ret_serial.line = port->minor;
ret_serial.port = port->port_number;
- ret_serial.flags = tport->tp_flags;
ret_serial.xmit_fifo_size = kfifo_size(&port->write_fifo);
ret_serial.baud_base = tport->tp_tdev->td_is_3410 ? 921600 : 460800;
ret_serial.closing_wait = cwait;
@@ -1515,14 +1462,13 @@ static int ti_set_serial_info(struct tty_struct *tty, struct ti_port *tport,
if (cwait != ASYNC_CLOSING_WAIT_NONE)
cwait = msecs_to_jiffies(10 * new_serial.closing_wait);
- tport->tp_flags = new_serial.flags & TI_SET_SERIAL_FLAGS;
tport->tp_port->port.closing_wait = cwait;
return 0;
}
-static void ti_handle_new_msr(struct ti_port *tport, __u8 msr)
+static void ti_handle_new_msr(struct ti_port *tport, u8 msr)
{
struct async_icount *icount;
struct tty_struct *tty;
@@ -1634,8 +1580,8 @@ static int ti_command_in_sync(struct ti_device *tdev, __u8 command,
static int ti_write_byte(struct usb_serial_port *port,
- struct ti_device *tdev, unsigned long addr,
- __u8 mask, __u8 byte)
+ struct ti_device *tdev, unsigned long addr,
+ u8 mask, u8 byte)
{
int status;
unsigned int size;
@@ -1679,11 +1625,10 @@ static int ti_do_download(struct usb_device *dev, int pipe,
int len;
for (pos = sizeof(struct ti_firmware_header); pos < size; pos++)
- cs = (__u8)(cs + buffer[pos]);
+ cs = (u8)(cs + buffer[pos]);
header = (struct ti_firmware_header *)buffer;
- header->wLength = cpu_to_le16((__u16)(size
- - sizeof(struct ti_firmware_header)));
+ header->wLength = cpu_to_le16(size - sizeof(*header));
header->bCheckSum = cs;
dev_dbg(&dev->dev, "%s - downloading firmware\n", __func__);
@@ -1701,7 +1646,7 @@ static int ti_download_firmware(struct ti_device *tdev)
{
int status;
int buffer_size;
- __u8 *buffer;
+ u8 *buffer;
struct usb_device *dev = tdev->td_serial->dev;
unsigned int pipe = usb_sndbulkpipe(dev,
tdev->td_serial->port[0]->bulk_out_endpointAddress);
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 1d8b03c81030..878b4b8761f5 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -939,10 +939,8 @@ static int alauda_read_data(struct us_data *us, unsigned long address,
len = min(sectors, blocksize) * (pagesize + 64);
buffer = kmalloc(len, GFP_NOIO);
- if (buffer == NULL) {
- printk(KERN_WARNING "alauda_read_data: Out of memory\n");
+ if (!buffer)
return USB_STOR_TRANSPORT_ERROR;
- }
/* Figure out the initial LBA and page */
lba = address >> blockshift;
@@ -1033,18 +1031,15 @@ static int alauda_write_data(struct us_data *us, unsigned long address,
len = min(sectors, blocksize) * pagesize;
buffer = kmalloc(len, GFP_NOIO);
- if (buffer == NULL) {
- printk(KERN_WARNING "alauda_write_data: Out of memory\n");
+ if (!buffer)
return USB_STOR_TRANSPORT_ERROR;
- }
/*
* We also need a temporary block buffer, where we read in the old data,
* overwrite parts with the new data, and manipulate the redundancy data
*/
blockbuffer = kmalloc((pagesize + 64) * blocksize, GFP_NOIO);
- if (blockbuffer == NULL) {
- printk(KERN_WARNING "alauda_write_data: Out of memory\n");
+ if (!blockbuffer) {
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 33eb923df892..8cd2926fb1fe 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -296,6 +296,14 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
+ /* Some even totally fail to indicate a cache */
+ if (us->fflags & US_FL_ALWAYS_SYNC) {
+ /* don't read caching information */
+ sdev->skip_ms_page_8 = 1;
+ sdev->skip_ms_page_3f = 1;
+ /* assume sync is needed */
+ sdev->wce_default_on = 1;
+ }
} else {
/*
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index c5797fa2125e..3aeaa536c44f 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -766,10 +766,8 @@ sddr09_read_data(struct us_data *us,
len = min(sectors, (unsigned int) info->blocksize) * info->pagesize;
buffer = kmalloc(len, GFP_NOIO);
- if (buffer == NULL) {
- printk(KERN_WARNING "sddr09_read_data: Out of memory\n");
+ if (!buffer)
return -ENOMEM;
- }
// This could be made much more efficient by checking for
// contiguous LBA's. Another exercise left to the student.
@@ -1004,10 +1002,8 @@ sddr09_write_data(struct us_data *us,
pagelen = (1 << info->pageshift) + (1 << CONTROL_SHIFT);
blocklen = (pagelen << info->blockshift);
blockbuffer = kmalloc(blocklen, GFP_NOIO);
- if (!blockbuffer) {
- printk(KERN_WARNING "sddr09_write_data: Out of memory\n");
+ if (!blockbuffer)
return -ENOMEM;
- }
/*
* Since we don't write the user data directly to the device,
@@ -1017,8 +1013,7 @@ sddr09_write_data(struct us_data *us,
len = min(sectors, (unsigned int) info->blocksize) * info->pagesize;
buffer = kmalloc(len, GFP_NOIO);
- if (buffer == NULL) {
- printk(KERN_WARNING "sddr09_write_data: Out of memory\n");
+ if (!buffer) {
kfree(blockbuffer);
return -ENOMEM;
}
@@ -1241,8 +1236,7 @@ sddr09_read_map(struct us_data *us) {
alloc_blocks = min(numblocks, SDDR09_READ_MAP_BUFSZ >> CONTROL_SHIFT);
alloc_len = (alloc_blocks << CONTROL_SHIFT);
buffer = kmalloc(alloc_len, GFP_NOIO);
- if (buffer == NULL) {
- printk(KERN_WARNING "sddr09_read_map: out of memory\n");
+ if (!buffer) {
result = -1;
goto done;
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index aa3539238848..af3c7eecff91 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -338,6 +338,13 @@ UNUSUAL_DEV( 0x046b, 0xff40, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_WP_DETECT),
+/* Reported by Egbert Eich <eich@suse.com> */
+UNUSUAL_DEV( 0x0480, 0xd010, 0x0100, 0x9999,
+ "Toshiba",
+ "External USB 3.0",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_ALWAYS_SYNC),
+
/* Patch submitted by Philipp Friedrich <philipp@void.at> */
UNUSUAL_DEV( 0x0482, 0x0100, 0x0100, 0x0100,
"Kyocera",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index ef2d8cde6ef7..2cba13a532cd 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -498,7 +498,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
- US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
+ US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS |
+ US_FL_ALWAYS_SYNC);
p = quirks;
while (*p) {
@@ -581,6 +582,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'w':
f |= US_FL_NO_WP_DETECT;
break;
+ case 'y':
+ f |= US_FL_ALWAYS_SYNC;
+ break;
/* Ignore unrecognized flag characters */
}
}
@@ -794,10 +798,8 @@ static int usb_stor_acquire_resources(struct us_data *us)
struct task_struct *th;
us->current_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!us->current_urb) {
- usb_stor_dbg(us, "URB allocation failed\n");
+ if (!us->current_urb)
return -ENOMEM;
- }
/*
* Just before we start our control thread, initialize
@@ -1070,17 +1072,17 @@ int usb_stor_probe2(struct us_data *us)
result = usb_stor_acquire_resources(us);
if (result)
goto BadDevice;
+ usb_autopm_get_interface_no_resume(us->pusb_intf);
snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s",
dev_name(&us->pusb_intf->dev));
result = scsi_add_host(us_to_host(us), dev);
if (result) {
dev_warn(dev,
"Unable to add the scsi host\n");
- goto BadDevice;
+ goto HostAddErr;
}
/* Submit the delayed_work for SCSI-device scanning */
- usb_autopm_get_interface_no_resume(us->pusb_intf);
set_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
if (delay_use > 0)
@@ -1090,6 +1092,8 @@ int usb_stor_probe2(struct us_data *us)
return 0;
/* We come here if there are any problems */
+HostAddErr:
+ usb_autopm_put_interface_no_suspend(us->pusb_intf);
BadDevice:
usb_stor_dbg(us, "storage_probe() failed\n");
release_everything(us);
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 545d09b8081d..5133a0792eb0 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -499,10 +499,8 @@ static int skel_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&interface->dev, "Out of memory\n");
+ if (!dev)
goto error;
- }
kref_init(&dev->kref);
sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
mutex_init(&dev->io_mutex);
@@ -526,17 +524,11 @@ static int skel_probe(struct usb_interface *interface,
dev->bulk_in_size = buffer_size;
dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!dev->bulk_in_buffer) {
- dev_err(&interface->dev,
- "Could not allocate bulk_in_buffer\n");
+ if (!dev->bulk_in_buffer)
goto error;
- }
dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->bulk_in_urb) {
- dev_err(&interface->dev,
- "Could not allocate bulk_in_urb\n");
+ if (!dev->bulk_in_urb)
goto error;
- }
}
if (!dev->bulk_out_endpointAddr &&
diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
index 17646b25343f..eeefa29f8aa2 100644
--- a/drivers/usb/usbip/Kconfig
+++ b/drivers/usb/usbip/Kconfig
@@ -1,6 +1,7 @@
config USBIP_CORE
tristate "USB/IP support"
- depends on USB_COMMON && NET
+ depends on NET
+ select USB_COMMON
---help---
This enables pushing USB packets over IP to allow remote
machines direct access to USB devices. It provides the
@@ -24,6 +25,27 @@ config USBIP_VHCI_HCD
To compile this driver as a module, choose M here: the
module will be called vhci-hcd.
+config USBIP_VHCI_HC_PORTS
+ int "Number of ports per USB/IP virtual host controller"
+ range 1 31
+ default 8
+ depends on USBIP_VHCI_HCD
+ ---help---
+ To increase number of ports available for USB/IP virtual
+ host controller driver, this defines number of ports per
+ USB/IP virtual host controller.
+
+config USBIP_VHCI_NR_HCS
+ int "Number of USB/IP virtual host controllers"
+ range 1 128
+ default 1
+ depends on USBIP_VHCI_HCD
+ ---help---
+ To increase number of ports available for USB/IP virtual
+ host controller driver, this defines number of USB/IP
+ virtual host controllers as if adding physical host
+ controllers.
+
config USBIP_HOST
tristate "Host driver"
depends on USBIP_CORE && USB
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 2df63e305722..191b176ffedf 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -461,7 +461,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
priv->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->urb) {
- dev_err(&udev->dev, "malloc urb\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
index a863a98a91ce..88b71c4e068f 100644
--- a/drivers/usb/usbip/vhci.h
+++ b/drivers/usb/usbip/vhci.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
+ * Copyright (C) 2015 Nobuo Iwata
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -72,13 +73,25 @@ struct vhci_unlink {
};
/* Number of supported ports. Value has an upperbound of USB_MAXCHILDREN */
-#define VHCI_NPORTS 8
+#ifdef CONFIG_USBIP_VHCI_HC_PORTS
+#define VHCI_HC_PORTS CONFIG_USBIP_VHCI_HC_PORTS
+#else
+#define VHCI_HC_PORTS 8
+#endif
+
+#ifdef CONFIG_USBIP_VHCI_NR_HCS
+#define VHCI_NR_HCS CONFIG_USBIP_VHCI_NR_HCS
+#else
+#define VHCI_NR_HCS 1
+#endif
+
+#define MAX_STATUS_NAME 16
/* for usb_bus.hcpriv */
struct vhci_hcd {
spinlock_t lock;
- u32 port_status[VHCI_NPORTS];
+ u32 port_status[VHCI_HC_PORTS];
unsigned resuming:1;
unsigned long re_timeout;
@@ -90,14 +103,19 @@ struct vhci_hcd {
* wIndex shows the port number and begins from 1.
* But, the index of this array begins from 0.
*/
- struct vhci_device vdev[VHCI_NPORTS];
+ struct vhci_device vdev[VHCI_HC_PORTS];
};
-extern struct vhci_hcd *the_controller;
-extern const struct attribute_group dev_attr_group;
+extern int vhci_num_controllers;
+extern struct platform_device **vhci_pdevs;
+extern struct attribute_group vhci_attr_group;
/* vhci_hcd.c */
-void rh_port_connect(int rhport, enum usb_device_speed speed);
+void rh_port_connect(struct vhci_device *vdev, enum usb_device_speed speed);
+
+/* vhci_sysfs.c */
+int vhci_init_attr_group(void);
+void vhci_finish_attr_group(void);
/* vhci_rx.c */
struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum);
@@ -106,9 +124,14 @@ int vhci_rx_loop(void *data);
/* vhci_tx.c */
int vhci_tx_loop(void *data);
-static inline struct vhci_device *port_to_vdev(__u32 port)
+static inline __u32 port_to_rhport(__u32 port)
+{
+ return port % VHCI_HC_PORTS;
+}
+
+static inline int port_to_pdev_nr(__u32 port)
{
- return &the_controller->vdev[port];
+ return port / VHCI_HC_PORTS;
}
static inline struct vhci_hcd *hcd_to_vhci(struct usb_hcd *hcd)
@@ -116,14 +139,25 @@ static inline struct vhci_hcd *hcd_to_vhci(struct usb_hcd *hcd)
return (struct vhci_hcd *) (hcd->hcd_priv);
}
+static inline struct device *hcd_dev(struct usb_hcd *hcd)
+{
+ return (hcd)->self.controller;
+}
+
+static inline const char *hcd_name(struct usb_hcd *hcd)
+{
+ return (hcd)->self.bus_name;
+}
+
static inline struct usb_hcd *vhci_to_hcd(struct vhci_hcd *vhci)
{
return container_of((void *) vhci, struct usb_hcd, hcd_priv);
}
-static inline struct device *vhci_dev(struct vhci_hcd *vhci)
+static inline struct vhci_hcd *vdev_to_vhci(struct vhci_device *vdev)
{
- return vhci_to_hcd(vhci)->self.controller;
+ return container_of(
+ (void *)(vdev - vdev->rhport), struct vhci_hcd, vdev);
}
#endif /* __USBIP_VHCI_H */
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 2e0450bec1b1..03eccf29ace0 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
+ * Copyright (C) 2015-2016 Nobuo Iwata
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,7 +57,9 @@ static int vhci_get_frame_number(struct usb_hcd *hcd);
static const char driver_name[] = "vhci_hcd";
static const char driver_desc[] = "USB/IP Virtual Host Controller";
-struct vhci_hcd *the_controller;
+int vhci_num_controllers = VHCI_NR_HCS;
+
+struct platform_device **vhci_pdevs;
static const char * const bit_desc[] = {
"CONNECTION", /*0*/
@@ -119,47 +122,59 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status)
pr_debug("\n");
}
-void rh_port_connect(int rhport, enum usb_device_speed speed)
+void rh_port_connect(struct vhci_device *vdev, enum usb_device_speed speed)
{
+ struct vhci_hcd *vhci = vdev_to_vhci(vdev);
+ int rhport = vdev->rhport;
+ u32 status;
unsigned long flags;
usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock_irqsave(&vhci->lock, flags);
+
+ status = vhci->port_status[rhport];
- the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION
- | (1 << USB_PORT_FEAT_C_CONNECTION);
+ status |= USB_PORT_STAT_CONNECTION | (1 << USB_PORT_FEAT_C_CONNECTION);
switch (speed) {
case USB_SPEED_HIGH:
- the_controller->port_status[rhport] |= USB_PORT_STAT_HIGH_SPEED;
+ status |= USB_PORT_STAT_HIGH_SPEED;
break;
case USB_SPEED_LOW:
- the_controller->port_status[rhport] |= USB_PORT_STAT_LOW_SPEED;
+ status |= USB_PORT_STAT_LOW_SPEED;
break;
default:
break;
}
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ vhci->port_status[rhport] = status;
+
+ spin_unlock_irqrestore(&vhci->lock, flags);
- usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
+ usb_hcd_poll_rh_status(vhci_to_hcd(vhci));
}
-static void rh_port_disconnect(int rhport)
+static void rh_port_disconnect(struct vhci_device *vdev)
{
+ struct vhci_hcd *vhci = vdev_to_vhci(vdev);
+ int rhport = vdev->rhport;
+ u32 status;
unsigned long flags;
usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock_irqsave(&vhci->lock, flags);
+
+ status = vhci->port_status[rhport];
+
+ status &= ~USB_PORT_STAT_CONNECTION;
+ status |= (1 << USB_PORT_FEAT_C_CONNECTION);
- the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION;
- the_controller->port_status[rhport] |=
- (1 << USB_PORT_FEAT_C_CONNECTION);
+ vhci->port_status[rhport] = status;
- spin_unlock_irqrestore(&the_controller->lock, flags);
- usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
+ spin_unlock_irqrestore(&vhci->lock, flags);
+ usb_hcd_poll_rh_status(vhci_to_hcd(vhci));
}
#define PORT_C_MASK \
@@ -188,7 +203,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
int changed = 0;
unsigned long flags;
- retval = DIV_ROUND_UP(VHCI_NPORTS + 1, 8);
+ retval = DIV_ROUND_UP(VHCI_HC_PORTS + 1, 8);
memset(buf, 0, retval);
vhci = hcd_to_vhci(hcd);
@@ -200,7 +215,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
}
/* check pseudo status register for each port */
- for (rhport = 0; rhport < VHCI_NPORTS; rhport++) {
+ for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
if ((vhci->port_status[rhport] & PORT_C_MASK)) {
/* The status of a port has been changed, */
usbip_dbg_vhci_rh("port %d status changed\n", rhport);
@@ -225,7 +240,7 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
- desc->bNbrPorts = VHCI_NPORTS;
+ desc->bNbrPorts = VHCI_HC_PORTS;
desc->u.hs.DeviceRemovable[0] = 0xff;
desc->u.hs.DeviceRemovable[1] = 0xff;
}
@@ -238,7 +253,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
int rhport;
unsigned long flags;
- u32 prev_port_status[VHCI_NPORTS];
+ u32 prev_port_status[VHCI_HC_PORTS];
if (!HCD_HW_ACCESSIBLE(hcd))
return -ETIMEDOUT;
@@ -249,7 +264,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
*/
usbip_dbg_vhci_rh("typeReq %x wValue %x wIndex %x\n", typeReq, wValue,
wIndex);
- if (wIndex > VHCI_NPORTS)
+ if (wIndex > VHCI_HC_PORTS)
pr_err("invalid port number %d\n", wIndex);
rhport = ((__u8)(wIndex & 0x00ff)) - 1;
@@ -315,7 +330,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case GetPortStatus:
usbip_dbg_vhci_rh(" GetPortStatus port %x\n", wIndex);
- if (wIndex > VHCI_NPORTS || wIndex < 1) {
+ if (wIndex > VHCI_HC_PORTS || wIndex < 1) {
pr_err("invalid port number %d\n", wIndex);
retval = -EPIPE;
}
@@ -416,14 +431,27 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
static struct vhci_device *get_vdev(struct usb_device *udev)
{
- int i;
+ struct platform_device *pdev;
+ struct usb_hcd *hcd;
+ struct vhci_hcd *vhci;
+ int pdev_nr, rhport;
if (!udev)
return NULL;
- for (i = 0; i < VHCI_NPORTS; i++)
- if (the_controller->vdev[i].udev == udev)
- return port_to_vdev(i);
+ for (pdev_nr = 0; pdev_nr < vhci_num_controllers; pdev_nr++) {
+ pdev = *(vhci_pdevs + pdev_nr);
+ if (pdev == NULL)
+ continue;
+ hcd = platform_get_drvdata(pdev);
+ if (hcd == NULL)
+ continue;
+ vhci = hcd_to_vhci(hcd);
+ for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
+ if (vhci->vdev[rhport].udev == udev)
+ return &vhci->vdev[rhport];
+ }
+ }
return NULL;
}
@@ -432,6 +460,7 @@ static void vhci_tx_urb(struct urb *urb)
{
struct vhci_device *vdev = get_vdev(urb->dev);
struct vhci_priv *priv;
+ struct vhci_hcd *vhci = vdev_to_vhci(vdev);
unsigned long flags;
if (!vdev) {
@@ -447,7 +476,7 @@ static void vhci_tx_urb(struct urb *urb)
spin_lock_irqsave(&vdev->priv_lock, flags);
- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
+ priv->seqnum = atomic_inc_return(&vhci->seqnum);
if (priv->seqnum == 0xffff)
dev_info(&urb->dev->dev, "seqnum max\n");
@@ -465,7 +494,9 @@ static void vhci_tx_urb(struct urb *urb)
static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
+ struct vhci_hcd *vhci = hcd_to_vhci(hcd);
struct device *dev = &urb->dev->dev;
+ u8 portnum = urb->dev->portnum;
int ret = 0;
struct vhci_device *vdev;
unsigned long flags;
@@ -473,26 +504,30 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
hcd, urb, mem_flags);
+ if (portnum > VHCI_HC_PORTS) {
+ pr_err("invalid port number %d\n", portnum);
+ return -ENODEV;
+ }
+ vdev = &vhci->vdev[portnum-1];
+
/* patch to usb_sg_init() is in 2.5.60 */
BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock_irqsave(&vhci->lock, flags);
if (urb->status != -EINPROGRESS) {
dev_err(dev, "URB already unlinked!, status %d\n", urb->status);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
return urb->status;
}
- vdev = port_to_vdev(urb->dev->portnum-1);
-
/* refuse enqueue for dead connection */
spin_lock(&vdev->ud.lock);
if (vdev->ud.status == VDEV_ST_NULL ||
vdev->ud.status == VDEV_ST_ERROR) {
dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport);
spin_unlock(&vdev->ud.lock);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
return -ENODEV;
}
spin_unlock(&vdev->ud.lock);
@@ -565,17 +600,16 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
out:
vhci_tx_urb(urb);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
return 0;
no_need_xmit:
usb_hcd_unlink_urb_from_ep(hcd, urb);
no_need_unlink:
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
if (!ret)
- usb_hcd_giveback_urb(vhci_to_hcd(the_controller),
- urb, urb->status);
+ usb_hcd_giveback_urb(hcd, urb, urb->status);
return ret;
}
@@ -627,19 +661,20 @@ no_need_unlink:
*/
static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
+ struct vhci_hcd *vhci = hcd_to_vhci(hcd);
struct vhci_priv *priv;
struct vhci_device *vdev;
unsigned long flags;
pr_info("dequeue a urb %p\n", urb);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock_irqsave(&vhci->lock, flags);
priv = urb->hcpriv;
if (!priv) {
/* URB was never linked! or will be soon given back by
* vhci_rx. */
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
return -EIDRM;
}
@@ -648,7 +683,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret) {
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
return ret;
}
}
@@ -676,10 +711,9 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&the_controller->lock, flags);
- usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
- urb->status);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
+ usb_hcd_giveback_urb(vhci_to_hcd(vhci), urb, urb->status);
+ spin_lock_irqsave(&vhci->lock, flags);
} else {
/* tcp connection is alive */
@@ -691,12 +725,12 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC);
if (!unlink) {
spin_unlock(&vdev->priv_lock);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
return -ENOMEM;
}
- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
+ unlink->seqnum = atomic_inc_return(&vhci->seqnum);
if (unlink->seqnum == 0xffff)
pr_info("seqnum max\n");
@@ -712,7 +746,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
spin_unlock(&vdev->priv_lock);
}
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
usbip_dbg_vhci_hc("leave\n");
return 0;
@@ -720,10 +754,12 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
{
+ struct vhci_hcd *vhci = vdev_to_vhci(vdev);
+ struct usb_hcd *hcd = vhci_to_hcd(vhci);
struct vhci_unlink *unlink, *tmp;
unsigned long flags;
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock_irqsave(&vhci->lock, flags);
spin_lock(&vdev->priv_lock);
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
@@ -752,24 +788,23 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
urb->status = -ENODEV;
- usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
list_del(&unlink->list);
spin_unlock(&vdev->priv_lock);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
- usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
- urb->status);
+ usb_hcd_giveback_urb(hcd, urb, urb->status);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock_irqsave(&vhci->lock, flags);
spin_lock(&vdev->priv_lock);
kfree(unlink);
}
spin_unlock(&vdev->priv_lock);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
}
/*
@@ -827,7 +862,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
* is actually given back by vhci_rx after receiving its return pdu.
*
*/
- rh_port_disconnect(vdev->rhport);
+ rh_port_disconnect(vdev);
pr_info("disconnect device\n");
}
@@ -866,7 +901,7 @@ static void vhci_device_unusable(struct usbip_device *ud)
static void vhci_device_init(struct vhci_device *vdev)
{
- memset(vdev, 0, sizeof(*vdev));
+ memset(vdev, 0, sizeof(struct vhci_device));
vdev->ud.side = USBIP_VHCI;
vdev->ud.status = VDEV_ST_NULL;
@@ -887,17 +922,34 @@ static void vhci_device_init(struct vhci_device *vdev)
usbip_start_eh(&vdev->ud);
}
+static int hcd_name_to_id(const char *name)
+{
+ char *c;
+ long val;
+ int ret;
+
+ c = strchr(name, '.');
+ if (c == NULL)
+ return 0;
+
+ ret = kstrtol(c+1, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
static int vhci_start(struct usb_hcd *hcd)
{
struct vhci_hcd *vhci = hcd_to_vhci(hcd);
- int rhport;
+ int id, rhport;
int err = 0;
usbip_dbg_vhci_hc("enter vhci_start\n");
/* initialize private data of usb_hcd */
- for (rhport = 0; rhport < VHCI_NPORTS; rhport++) {
+ for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
struct vhci_device *vdev = &vhci->vdev[rhport];
vhci_device_init(vdev);
@@ -910,11 +962,26 @@ static int vhci_start(struct usb_hcd *hcd)
hcd->power_budget = 0; /* no limit */
hcd->uses_new_polling = 1;
+ id = hcd_name_to_id(hcd_name(hcd));
+ if (id < 0) {
+ pr_err("invalid vhci name %s\n", hcd_name(hcd));
+ return -EINVAL;
+ }
+
/* vhci_hcd is now ready to be controlled through sysfs */
- err = sysfs_create_group(&vhci_dev(vhci)->kobj, &dev_attr_group);
- if (err) {
- pr_err("create sysfs files\n");
- return err;
+ if (id == 0) {
+ err = vhci_init_attr_group();
+ if (err) {
+ pr_err("init attr group\n");
+ return err;
+ }
+ err = sysfs_create_group(&hcd_dev(hcd)->kobj, &vhci_attr_group);
+ if (err) {
+ pr_err("create sysfs files\n");
+ vhci_finish_attr_group();
+ return err;
+ }
+ pr_info("created sysfs %s\n", hcd_name(hcd));
}
return 0;
@@ -923,15 +990,19 @@ static int vhci_start(struct usb_hcd *hcd)
static void vhci_stop(struct usb_hcd *hcd)
{
struct vhci_hcd *vhci = hcd_to_vhci(hcd);
- int rhport = 0;
+ int id, rhport;
usbip_dbg_vhci_hc("stop VHCI controller\n");
/* 1. remove the userland interface of vhci_hcd */
- sysfs_remove_group(&vhci_dev(vhci)->kobj, &dev_attr_group);
+ id = hcd_name_to_id(hcd_name(hcd));
+ if (id == 0) {
+ sysfs_remove_group(&hcd_dev(hcd)->kobj, &vhci_attr_group);
+ vhci_finish_attr_group();
+ }
/* 2. shutdown all the ports of vhci_hcd */
- for (rhport = 0; rhport < VHCI_NPORTS; rhport++) {
+ for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
struct vhci_device *vdev = &vhci->vdev[rhport];
usbip_event_add(&vdev->ud, VDEV_EVENT_REMOVED);
@@ -1025,9 +1096,6 @@ static int vhci_hcd_probe(struct platform_device *pdev)
}
hcd->has_tt = 1;
- /* this is private data for vhci_hcd */
- the_controller = hcd_to_vhci(hcd);
-
/*
* Finish generic HCD structure initialization and register.
* Call the driver's reset() and start() routines.
@@ -1036,7 +1104,6 @@ static int vhci_hcd_probe(struct platform_device *pdev)
if (ret != 0) {
pr_err("usb_add_hcd failed %d\n", ret);
usb_put_hcd(hcd);
- the_controller = NULL;
return ret;
}
@@ -1059,7 +1126,6 @@ static int vhci_hcd_remove(struct platform_device *pdev)
*/
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
- the_controller = NULL;
return 0;
}
@@ -1070,21 +1136,24 @@ static int vhci_hcd_remove(struct platform_device *pdev)
static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
{
struct usb_hcd *hcd;
- int rhport = 0;
+ struct vhci_hcd *vhci;
+ int rhport;
int connected = 0;
int ret = 0;
unsigned long flags;
hcd = platform_get_drvdata(pdev);
+ if (!hcd)
+ return 0;
+ vhci = hcd_to_vhci(hcd);
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock_irqsave(&vhci->lock, flags);
- for (rhport = 0; rhport < VHCI_NPORTS; rhport++)
- if (the_controller->port_status[rhport] &
- USB_PORT_STAT_CONNECTION)
+ for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++)
+ if (vhci->port_status[rhport] & USB_PORT_STAT_CONNECTION)
connected += 1;
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
if (connected > 0) {
dev_info(&pdev->dev,
@@ -1106,6 +1175,8 @@ static int vhci_hcd_resume(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s\n", __func__);
hcd = platform_get_drvdata(pdev);
+ if (!hcd)
+ return 0;
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
@@ -1129,52 +1200,78 @@ static struct platform_driver vhci_driver = {
},
};
-/*
- * The VHCI 'device' is 'virtual'; not a real plug&play hardware.
- * We need to add this virtual device as a platform device arbitrarily:
- * 1. platform_device_register()
- */
-static void the_pdev_release(struct device *dev)
+static int add_platform_device(int id)
{
+ struct platform_device *pdev;
+ int dev_nr;
+
+ if (id == 0)
+ dev_nr = -1;
+ else
+ dev_nr = id;
+
+ pdev = platform_device_register_simple(driver_name, dev_nr, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ *(vhci_pdevs + id) = pdev;
+ return 0;
}
-static struct platform_device the_pdev = {
- /* should be the same name as driver_name */
- .name = driver_name,
- .id = -1,
- .dev = {
- .release = the_pdev_release,
- },
-};
+static void del_platform_devices(void)
+{
+ struct platform_device *pdev;
+ int i;
+
+ for (i = 0; i < vhci_num_controllers; i++) {
+ pdev = *(vhci_pdevs + i);
+ if (pdev != NULL)
+ platform_device_unregister(pdev);
+ *(vhci_pdevs + i) = NULL;
+ }
+ sysfs_remove_link(&platform_bus.kobj, driver_name);
+}
static int __init vhci_hcd_init(void)
{
- int ret;
+ int i, ret;
if (usb_disabled())
return -ENODEV;
+ if (vhci_num_controllers < 1)
+ vhci_num_controllers = 1;
+
+ vhci_pdevs = kcalloc(vhci_num_controllers, sizeof(void *), GFP_KERNEL);
+ if (vhci_pdevs == NULL)
+ return -ENOMEM;
+
ret = platform_driver_register(&vhci_driver);
if (ret)
goto err_driver_register;
- ret = platform_device_register(&the_pdev);
- if (ret)
- goto err_platform_device_register;
+ for (i = 0; i < vhci_num_controllers; i++) {
+ ret = add_platform_device(i);
+ if (ret)
+ goto err_platform_device_register;
+ }
pr_info(DRIVER_DESC " v" USBIP_VERSION "\n");
return ret;
err_platform_device_register:
+ del_platform_devices();
platform_driver_unregister(&vhci_driver);
err_driver_register:
+ kfree(vhci_pdevs);
return ret;
}
static void __exit vhci_hcd_exit(void)
{
- platform_device_unregister(&the_pdev);
+ del_platform_devices();
platform_driver_unregister(&vhci_driver);
+ kfree(vhci_pdevs);
}
module_init(vhci_hcd_init);
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index d656e0edc3d5..fc2d319e2360 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -70,6 +70,7 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
static void vhci_recv_ret_submit(struct vhci_device *vdev,
struct usbip_header *pdu)
{
+ struct vhci_hcd *vhci = vdev_to_vhci(vdev);
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
unsigned long flags;
@@ -81,7 +82,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
if (!urb) {
pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
pr_info("max seqnum %d\n",
- atomic_read(&the_controller->seqnum));
+ atomic_read(&vhci->seqnum));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
}
@@ -105,11 +106,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
- spin_lock_irqsave(&the_controller->lock, flags);
- usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_lock_irqsave(&vhci->lock, flags);
+ usb_hcd_unlink_urb_from_ep(vhci_to_hcd(vhci), urb);
+ spin_unlock_irqrestore(&vhci->lock, flags);
- usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
+ usb_hcd_giveback_urb(vhci_to_hcd(vhci), urb, urb->status);
usbip_dbg_vhci_rx("Leave\n");
}
@@ -142,6 +143,7 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
static void vhci_recv_ret_unlink(struct vhci_device *vdev,
struct usbip_header *pdu)
{
+ struct vhci_hcd *vhci = vdev_to_vhci(vdev);
struct vhci_unlink *unlink;
struct urb *urb;
unsigned long flags;
@@ -174,12 +176,11 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
urb->status = pdu->u.ret_unlink.status;
pr_info("urb->status %d\n", urb->status);
- spin_lock_irqsave(&the_controller->lock, flags);
- usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_lock_irqsave(&vhci->lock, flags);
+ usb_hcd_unlink_urb_from_ep(vhci_to_hcd(vhci), urb);
+ spin_unlock_irqrestore(&vhci->lock, flags);
- usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
- urb->status);
+ usb_hcd_giveback_urb(vhci_to_hcd(vhci), urb, urb->status);
}
kfree(unlink);
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index 5b5462eb1665..c404017c1b5a 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
+ * Copyright (C) 2015-2016 Nobuo Iwata
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,6 +21,8 @@
#include <linux/kthread.h>
#include <linux/file.h>
#include <linux/net.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "usbip_common.h"
#include "vhci.h"
@@ -27,106 +30,190 @@
/* TODO: refine locking ?*/
/* Sysfs entry to show port status */
-static ssize_t status_show(struct device *dev, struct device_attribute *attr,
- char *out)
+static ssize_t status_show_vhci(int pdev_nr, char *out)
{
+ struct platform_device *pdev = *(vhci_pdevs + pdev_nr);
+ struct vhci_hcd *vhci;
char *s = out;
int i = 0;
unsigned long flags;
- BUG_ON(!the_controller || !out);
+ if (!pdev || !out) {
+ usbip_dbg_vhci_sysfs("show status error\n");
+ return 0;
+ }
+
+ vhci = hcd_to_vhci(platform_get_drvdata(pdev));
- spin_lock_irqsave(&the_controller->lock, flags);
+ spin_lock_irqsave(&vhci->lock, flags);
/*
* output example:
- * prt sta spd dev socket local_busid
- * 000 004 000 000 c5a7bb80 1-2.3
- * 001 004 000 000 d8cee980 2-3.4
+ * port sta spd dev socket local_busid
+ * 0000 004 000 00000000 c5a7bb80 1-2.3
+ * 0001 004 000 00000000 d8cee980 2-3.4
*
* IP address can be retrieved from a socket pointer address by looking
* up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
* port number and its peer IP address.
*/
- out += sprintf(out,
- "prt sta spd bus dev socket local_busid\n");
-
- for (i = 0; i < VHCI_NPORTS; i++) {
- struct vhci_device *vdev = port_to_vdev(i);
+ for (i = 0; i < VHCI_HC_PORTS; i++) {
+ struct vhci_device *vdev = &vhci->vdev[i];
spin_lock(&vdev->ud.lock);
- out += sprintf(out, "%03u %03u ", i, vdev->ud.status);
+ out += sprintf(out, "%04u %03u ",
+ (pdev_nr * VHCI_HC_PORTS) + i,
+ vdev->ud.status);
if (vdev->ud.status == VDEV_ST_USED) {
out += sprintf(out, "%03u %08x ",
- vdev->speed, vdev->devid);
- out += sprintf(out, "%16p ", vdev->ud.tcp_socket);
- out += sprintf(out, "%s", dev_name(&vdev->udev->dev));
+ vdev->speed, vdev->devid);
+ out += sprintf(out, "%16p %s",
+ vdev->ud.tcp_socket,
+ dev_name(&vdev->udev->dev));
} else {
- out += sprintf(out, "000 000 000 0000000000000000 0-0");
+ out += sprintf(out, "000 00000000 ");
+ out += sprintf(out, "0000000000000000 0-0");
}
out += sprintf(out, "\n");
spin_unlock(&vdev->ud.lock);
}
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
+
+ return out - s;
+}
+
+static ssize_t status_show_not_ready(int pdev_nr, char *out)
+{
+ char *s = out;
+ int i = 0;
+
+ for (i = 0; i < VHCI_HC_PORTS; i++) {
+ out += sprintf(out, "%04u %03u ",
+ (pdev_nr * VHCI_HC_PORTS) + i,
+ VDEV_ST_NOTASSIGNED);
+ out += sprintf(out, "000 00000000 0000000000000000 0-0");
+ out += sprintf(out, "\n");
+ }
+ return out - s;
+}
+
+static int status_name_to_id(const char *name)
+{
+ char *c;
+ long val;
+ int ret;
+
+ c = strchr(name, '.');
+ if (c == NULL)
+ return 0;
+ ret = kstrtol(c+1, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *out)
+{
+ char *s = out;
+ int pdev_nr;
+
+ out += sprintf(out,
+ "port sta spd dev socket local_busid\n");
+
+ pdev_nr = status_name_to_id(attr->attr.name);
+ if (pdev_nr < 0)
+ out += status_show_not_ready(pdev_nr, out);
+ else
+ out += status_show_vhci(pdev_nr, out);
+
+ return out - s;
+}
+
+static ssize_t nports_show(struct device *dev, struct device_attribute *attr,
+ char *out)
+{
+ char *s = out;
+
+ out += sprintf(out, "%d\n", VHCI_HC_PORTS * vhci_num_controllers);
return out - s;
}
-static DEVICE_ATTR_RO(status);
+static DEVICE_ATTR_RO(nports);
/* Sysfs entry to shutdown a virtual connection */
-static int vhci_port_disconnect(__u32 rhport)
+static int vhci_port_disconnect(struct vhci_hcd *vhci, __u32 rhport)
{
- struct vhci_device *vdev;
+ struct vhci_device *vdev = &vhci->vdev[rhport];
unsigned long flags;
usbip_dbg_vhci_sysfs("enter\n");
/* lock */
- spin_lock_irqsave(&the_controller->lock, flags);
-
- vdev = port_to_vdev(rhport);
-
+ spin_lock_irqsave(&vhci->lock, flags);
spin_lock(&vdev->ud.lock);
+
if (vdev->ud.status == VDEV_ST_NULL) {
pr_err("not connected %d\n", vdev->ud.status);
/* unlock */
spin_unlock(&vdev->ud.lock);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
return -EINVAL;
}
/* unlock */
spin_unlock(&vdev->ud.lock);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
return 0;
}
+static int valid_port(__u32 pdev_nr, __u32 rhport)
+{
+ if (pdev_nr >= vhci_num_controllers) {
+ pr_err("pdev %u\n", pdev_nr);
+ return 0;
+ }
+ if (rhport >= VHCI_HC_PORTS) {
+ pr_err("rhport %u\n", rhport);
+ return 0;
+ }
+ return 1;
+}
+
static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int err;
- __u32 rhport = 0;
+ __u32 port = 0, pdev_nr = 0, rhport = 0;
+ struct usb_hcd *hcd;
+ int ret;
- if (sscanf(buf, "%u", &rhport) != 1)
+ if (kstrtoint(buf, 10, &port) < 0)
return -EINVAL;
- /* check rhport */
- if (rhport >= VHCI_NPORTS) {
- dev_err(dev, "invalid port %u\n", rhport);
+ pdev_nr = port_to_pdev_nr(port);
+ rhport = port_to_rhport(port);
+
+ if (!valid_port(pdev_nr, rhport))
return -EINVAL;
+
+ hcd = platform_get_drvdata(*(vhci_pdevs + pdev_nr));
+ if (hcd == NULL) {
+ dev_err(dev, "port is not ready %u\n", port);
+ return -EAGAIN;
}
- err = vhci_port_disconnect(rhport);
- if (err < 0)
+ ret = vhci_port_disconnect(hcd_to_vhci(hcd), rhport);
+ if (ret < 0)
return -EINVAL;
usbip_dbg_vhci_sysfs("Leave\n");
@@ -135,16 +222,12 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(detach, S_IWUSR, NULL, store_detach);
-/* Sysfs entry to establish a virtual connection */
-static int valid_args(__u32 rhport, enum usb_device_speed speed)
+static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed speed)
{
- /* check rhport */
- if (rhport >= VHCI_NPORTS) {
- pr_err("port %u\n", rhport);
- return -EINVAL;
+ if (!valid_port(pdev_nr, rhport)) {
+ return 0;
}
- /* check speed */
switch (speed) {
case USB_SPEED_LOW:
case USB_SPEED_FULL:
@@ -154,12 +237,13 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
default:
pr_err("Failed attach request for unsupported USB speed: %s\n",
usb_speed_string(speed));
- return -EINVAL;
+ return 0;
}
- return 0;
+ return 1;
}
+/* Sysfs entry to establish a virtual connection */
/*
* To start a new USB/IP attachment, a userland program needs to setup a TCP
* connection and then write its socket descriptor with remote device
@@ -174,10 +258,12 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct vhci_device *vdev;
struct socket *socket;
int sockfd = 0;
- __u32 rhport = 0, devid = 0, speed = 0;
+ __u32 port = 0, pdev_nr = 0, rhport = 0, devid = 0, speed = 0;
+ struct usb_hcd *hcd;
+ struct vhci_hcd *vhci;
+ struct vhci_device *vdev;
int err;
unsigned long flags;
@@ -187,16 +273,28 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
* @devid: unique device identifier in a remote host
* @speed: usb device speed in a remote host
*/
- if (sscanf(buf, "%u %u %u %u", &rhport, &sockfd, &devid, &speed) != 4)
+ if (sscanf(buf, "%u %u %u %u", &port, &sockfd, &devid, &speed) != 4)
return -EINVAL;
+ pdev_nr = port_to_pdev_nr(port);
+ rhport = port_to_rhport(port);
- usbip_dbg_vhci_sysfs("rhport(%u) sockfd(%u) devid(%u) speed(%u)\n",
- rhport, sockfd, devid, speed);
+ usbip_dbg_vhci_sysfs("port(%u) pdev(%d) rhport(%u)\n",
+ port, pdev_nr, rhport);
+ usbip_dbg_vhci_sysfs("sockfd(%u) devid(%u) speed(%u)\n",
+ sockfd, devid, speed);
/* check received parameters */
- if (valid_args(rhport, speed) < 0)
+ if (!valid_args(pdev_nr, rhport, speed))
return -EINVAL;
+ hcd = platform_get_drvdata(*(vhci_pdevs + pdev_nr));
+ if (hcd == NULL) {
+ dev_err(dev, "port %d is not ready\n", port);
+ return -EAGAIN;
+ }
+ vhci = hcd_to_vhci(hcd);
+ vdev = &vhci->vdev[rhport];
+
/* Extract socket from fd. */
socket = sockfd_lookup(sockfd, &err);
if (!socket)
@@ -205,14 +303,13 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
/* now need lock until setting vdev status as used */
/* begin a lock */
- spin_lock_irqsave(&the_controller->lock, flags);
- vdev = port_to_vdev(rhport);
+ spin_lock_irqsave(&vhci->lock, flags);
spin_lock(&vdev->ud.lock);
if (vdev->ud.status != VDEV_ST_NULL) {
/* end of the lock */
spin_unlock(&vdev->ud.lock);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
sockfd_put(socket);
@@ -220,9 +317,10 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- dev_info(dev,
- "rhport(%u) sockfd(%d) devid(%u) speed(%u) speed_str(%s)\n",
- rhport, sockfd, devid, speed, usb_speed_string(speed));
+ dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n",
+ pdev_nr, rhport, sockfd);
+ dev_info(dev, "devid(%u) speed(%u) speed_str(%s)\n",
+ devid, speed, usb_speed_string(speed));
vdev->devid = devid;
vdev->speed = speed;
@@ -230,26 +328,92 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
vdev->ud.status = VDEV_ST_NOTASSIGNED;
spin_unlock(&vdev->ud.lock);
- spin_unlock_irqrestore(&the_controller->lock, flags);
+ spin_unlock_irqrestore(&vhci->lock, flags);
/* end the lock */
vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
vdev->ud.tcp_tx = kthread_get_run(vhci_tx_loop, &vdev->ud, "vhci_tx");
- rh_port_connect(rhport, speed);
+ rh_port_connect(vdev, speed);
return count;
}
static DEVICE_ATTR(attach, S_IWUSR, NULL, store_attach);
-static struct attribute *dev_attrs[] = {
- &dev_attr_status.attr,
- &dev_attr_detach.attr,
- &dev_attr_attach.attr,
- &dev_attr_usbip_debug.attr,
- NULL,
+#define MAX_STATUS_NAME 16
+
+struct status_attr {
+ struct device_attribute attr;
+ char name[MAX_STATUS_NAME+1];
};
-const struct attribute_group dev_attr_group = {
- .attrs = dev_attrs,
+static struct status_attr *status_attrs;
+
+static void set_status_attr(int id)
+{
+ struct status_attr *status;
+
+ status = status_attrs + id;
+ if (id == 0)
+ strcpy(status->name, "status");
+ else
+ snprintf(status->name, MAX_STATUS_NAME+1, "status.%d", id);
+ status->attr.attr.name = status->name;
+ status->attr.attr.mode = S_IRUGO;
+ status->attr.show = status_show;
+}
+
+static int init_status_attrs(void)
+{
+ int id;
+
+ status_attrs = kcalloc(vhci_num_controllers, sizeof(struct status_attr),
+ GFP_KERNEL);
+ if (status_attrs == NULL)
+ return -ENOMEM;
+
+ for (id = 0; id < vhci_num_controllers; id++)
+ set_status_attr(id);
+
+ return 0;
+}
+
+static void finish_status_attrs(void)
+{
+ kfree(status_attrs);
+}
+
+struct attribute_group vhci_attr_group = {
+ .attrs = NULL,
};
+
+int vhci_init_attr_group(void)
+{
+ struct attribute **attrs;
+ int ret, i;
+
+ attrs = kcalloc((vhci_num_controllers + 5), sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (attrs == NULL)
+ return -ENOMEM;
+
+ ret = init_status_attrs();
+ if (ret) {
+ kfree(attrs);
+ return ret;
+ }
+ *attrs = &dev_attr_nports.attr;
+ *(attrs + 1) = &dev_attr_detach.attr;
+ *(attrs + 2) = &dev_attr_attach.attr;
+ *(attrs + 3) = &dev_attr_usbip_debug.attr;
+ for (i = 0; i < vhci_num_controllers; i++)
+ *(attrs + i + 4) = &((status_attrs + i)->attr.attr);
+ vhci_attr_group.attrs = attrs;
+ return 0;
+}
+
+void vhci_finish_attr_group(void)
+{
+ finish_status_attrs();
+ kfree(vhci_attr_group.attrs);
+}
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index 8994a13819ab..7091848df6c8 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -450,7 +450,7 @@ static void vudc_shutdown(struct usbip_device *ud)
if (ud->tcp_socket)
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
- if (ud->tcp_tx) {
+ if (ud->tcp_rx) {
kthread_stop_put(ud->tcp_rx);
ud->tcp_rx = NULL;
}
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index 344bd9473475..e429b59f6f8a 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -142,7 +142,7 @@ static int v_recv_cmd_submit(struct vudc *udc,
urb_p->urb->status = -EINPROGRESS;
/* FIXME: more pipe setup to please usbip_common */
- urb_p->urb->pipe &= ~(11 << 30);
+ urb_p->urb->pipe &= ~(3 << 30);
switch (urb_p->ep->type) {
case USB_ENDPOINT_XFER_BULK:
urb_p->urb->pipe |= (PIPE_BULK << 30);
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index da1b872918b5..fb70cbef0671 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -610,8 +610,7 @@ static int cbaf_probe(struct usb_interface *iface,
cbaf->usb_iface = usb_get_intf(iface);
result = cbaf_check(cbaf);
if (result < 0) {
- dev_err(dev, "This device is not WUSB-CBAF compliant"
- "and is not supported yet.\n");
+ dev_err(dev, "This device is not WUSB-CBAF compliant and is not supported yet.\n");
goto error_check;
}
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 33acd1599e99..79b2b628066d 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -229,10 +229,8 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
dst_buf = kzalloc(dst_size, GFP_KERNEL);
- if (dst_buf == NULL) {
- printk(KERN_ERR "E: can't alloc destination buffer\n");
+ if (!dst_buf)
goto error_dst_buf;
- }
memset(iv, 0, sizeof(iv));
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index b66faaf3e842..8c9421b69da0 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -374,10 +374,8 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
struct wusb_keydvt_out keydvt_out;
hs = kcalloc(3, sizeof(hs[0]), GFP_KERNEL);
- if (hs == NULL) {
- dev_err(dev, "can't allocate handshake data\n");
+ if (!hs)
goto error_kzalloc;
- }
/* We need to turn encryption before beginning the 4way
* hshake (WUSB1.0[.3.2.2]) */
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
index 60a10d21947d..ed4622279c63 100644
--- a/drivers/usb/wusbcore/wa-nep.c
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -271,16 +271,11 @@ int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
epd = &iface->cur_altsetting->endpoint[0].desc;
wa->nep_buffer_size = 1024;
wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
- if (wa->nep_buffer == NULL) {
- dev_err(dev,
- "Unable to allocate notification's read buffer\n");
+ if (!wa->nep_buffer)
goto error_nep_buffer;
- }
wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->nep_urb == NULL) {
- dev_err(dev, "Unable to allocate notification URB\n");
+ if (wa->nep_urb == NULL)
goto error_urb_alloc;
- }
usb_fill_int_urb(wa->nep_urb, usb_dev,
usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
wa->nep_buffer, wa->nep_buffer_size,
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 69af4fd9e072..167fcc71f5f6 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -2865,10 +2865,8 @@ int wa_dti_start(struct wahc *wa)
goto out;
wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->dti_urb == NULL) {
- dev_err(dev, "Can't allocate DTI URB\n");
+ if (wa->dti_urb == NULL)
goto error_dti_urb_alloc;
- }
usb_fill_bulk_urb(
wa->dti_urb, wa->usb_dev,
usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 0257f35cfb9d..0aa6c3c29d17 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -701,10 +701,8 @@ static int hwarc_neep_init(struct uwb_rc *rc)
goto error_rd_buffer;
}
hwarc->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (hwarc->neep_urb == NULL) {
- dev_err(dev, "Unable to allocate notification URB\n");
+ if (hwarc->neep_urb == NULL)
goto error_urb_alloc;
- }
usb_fill_int_urb(hwarc->neep_urb, usb_dev,
usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
hwarc->rd_buffer, PAGE_SIZE,
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 688691d9058d..65d4a3015542 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -70,7 +70,7 @@ static const u8 pci_cap_length[PCI_CAP_ID_MAX + 1] = {
/*
* Lengths of PCIe/PCI-X Extended Config Capabilities
- * 0: Removed or masked from the user visible capabilty list
+ * 0: Removed or masked from the user visible capability list
* FF: Variable length
*/
static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = {
@@ -355,7 +355,7 @@ static int alloc_perm_bits(struct perm_bits *perm, int size)
* ignore whether a read/write exceeds the defined capability
* structure. We can do this because:
* - Standard config space is already dword aligned
- * - Capabilities are all dword alinged (bits 0:1 of next reserved)
+ * - Capabilities are all dword aligned (bits 0:1 of next reserved)
* - Express capabilities defined as dword aligned
*/
size = round_up(size, 4);
@@ -804,6 +804,40 @@ static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
return 0;
}
+static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ __le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
+ offset + PCI_EXP_DEVCTL);
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ /*
+ * The FLR bit is virtualized, if set and the device supports PCIe
+ * FLR, issue a reset_function. Regardless, clear the bit, the spec
+ * requires it to be always read as zero. NB, reset_function might
+ * not use a PCIe FLR, we don't have that level of granularity.
+ */
+ if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
+ u32 cap;
+ int ret;
+
+ *ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
+
+ ret = pci_user_read_config_dword(vdev->pdev,
+ pos - offset + PCI_EXP_DEVCAP,
+ &cap);
+
+ if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
+ pci_try_reset_function(vdev->pdev);
+ }
+
+ return count;
+}
+
/* Permissions for PCI Express capability */
static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
{
@@ -811,26 +845,64 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
return -ENOMEM;
+ perm->writefn = vfio_exp_config_write;
+
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
- * Allow writes to device control fields (includes FLR!)
- * but not to devctl_phantom which could confuse IOMMU
- * or to the ARI bit in devctl2 which is set at probe time
+ * Allow writes to device control fields, except devctl_phantom,
+ * which could confuse IOMMU, and the ARI bit in devctl2, which
+ * is set at probe time. FLR gets virtualized via our writefn.
*/
- p_setw(perm, PCI_EXP_DEVCTL, NO_VIRT, ~PCI_EXP_DEVCTL_PHANTOM);
+ p_setw(perm, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM);
p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
return 0;
}
+static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ /*
+ * The FLR bit is virtualized, if set and the device supports AF
+ * FLR, issue a reset_function. Regardless, clear the bit, the spec
+ * requires it to be always read as zero. NB, reset_function might
+ * not use an AF FLR, we don't have that level of granularity.
+ */
+ if (*ctrl & PCI_AF_CTRL_FLR) {
+ u8 cap;
+ int ret;
+
+ *ctrl &= ~PCI_AF_CTRL_FLR;
+
+ ret = pci_user_read_config_byte(vdev->pdev,
+ pos - offset + PCI_AF_CAP,
+ &cap);
+
+ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
+ pci_try_reset_function(vdev->pdev);
+ }
+
+ return count;
+}
+
/* Permissions for Advanced Function capability */
static int __init init_pci_cap_af_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
return -ENOMEM;
+ perm->writefn = vfio_af_config_write;
+
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
- p_setb(perm, PCI_AF_CTRL, NO_VIRT, PCI_AF_CTRL_FLR);
+ p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
return 0;
}
@@ -1516,10 +1588,10 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
* space which tracks reads and writes to bits that we emulate for
* the user. Initial values filled from device.
*
- * Using shared stuct perm_bits between all vfio-pci devices saves
+ * Using shared struct perm_bits between all vfio-pci devices saves
* us from allocating cfg_size buffers for virt and write for every
* device. We could remove vconfig and allocate individual buffers
- * for each area requring emulated bits, but the array of pointers
+ * for each area requiring emulated bits, but the array of pointers
* would be comparable in size (at least for standard config space).
*/
int vfio_config_init(struct vfio_pci_device *vdev)
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 152b43822ef1..c2e60893cd09 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -250,6 +250,7 @@ static irqreturn_t vfio_msihandler(int irq, void *arg)
static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
+ unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
int ret;
if (!is_irq_none(vdev))
@@ -259,35 +260,13 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
if (!vdev->ctx)
return -ENOMEM;
- if (msix) {
- int i;
-
- vdev->msix = kzalloc(nvec * sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!vdev->msix) {
- kfree(vdev->ctx);
- return -ENOMEM;
- }
-
- for (i = 0; i < nvec; i++)
- vdev->msix[i].entry = i;
-
- ret = pci_enable_msix_range(pdev, vdev->msix, 1, nvec);
- if (ret < nvec) {
- if (ret > 0)
- pci_disable_msix(pdev);
- kfree(vdev->msix);
- kfree(vdev->ctx);
- return ret;
- }
- } else {
- ret = pci_enable_msi_range(pdev, 1, nvec);
- if (ret < nvec) {
- if (ret > 0)
- pci_disable_msi(pdev);
- kfree(vdev->ctx);
- return ret;
- }
+ /* return the number of supported vectors if we can't get all: */
+ ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
+ if (ret < nvec) {
+ if (ret > 0)
+ pci_free_irq_vectors(pdev);
+ kfree(vdev->ctx);
+ return ret;
}
vdev->num_ctx = nvec;
@@ -315,7 +294,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
if (vector < 0 || vector >= vdev->num_ctx)
return -EINVAL;
- irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
+ irq = pci_irq_vector(pdev, vector);
if (vdev->ctx[vector].trigger) {
free_irq(irq, vdev->ctx[vector].trigger);
@@ -408,11 +387,14 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
- if (msix) {
- pci_disable_msix(vdev->pdev);
- kfree(vdev->msix);
- } else
- pci_disable_msi(pdev);
+ pci_free_irq_vectors(pdev);
+
+ /*
+ * Both disable paths above use pci_intx_for_msi() to clear DisINTx
+ * via their shutdown paths. Restore for NoINTx devices.
+ */
+ if (vdev->nointx)
+ pci_intx(pdev, 0);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
vdev->num_ctx = 0;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 2128de86c80d..f561ac1c78a0 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -72,7 +72,6 @@ struct vfio_pci_device {
struct perm_bits *msi_perm;
spinlock_t irqlock;
struct mutex igate;
- struct msix_entry *msix;
struct vfio_pci_irq_ctx *ctx;
int num_ctx;
int irq_type;
diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
index d4030d0c38e9..bcd419cfd79c 100644
--- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
+++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
@@ -56,7 +56,7 @@ static void xmdio_write(void *ioaddr, unsigned int mmd,
iowrite32(value, ioaddr + ((mmd_address & 0xff) << 2));
}
-int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
+static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
{
struct vfio_platform_region *xgmac_regs = &vdev->regions[0];
struct vfio_platform_region *xpcs_regs = &vdev->regions[1];
diff --git a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
index e3d3d948e661..49e5df6e8f29 100644
--- a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
+++ b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
@@ -57,7 +57,7 @@ static inline void xgmac_mac_disable(void __iomem *ioaddr)
writel(value, ioaddr + XGMAC_CONTROL);
}
-int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev)
+static int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev)
{
struct vfio_platform_region *reg = &vdev->regions[0];
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 1cf2d462b53d..d78142830754 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -73,7 +73,7 @@ static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev,
return WARN_ON(!vdev->acpihid) ? -EINVAL : 0;
}
-int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
+static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
const char **extra_dbg)
{
#ifdef CONFIG_ACPI
@@ -95,7 +95,7 @@ int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
#endif
}
-bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev)
+static bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev)
{
#ifdef CONFIG_ACPI
struct device *dev = vdev->device;
@@ -637,7 +637,7 @@ static const struct vfio_device_ops vfio_platform_ops = {
.mmap = vfio_platform_mmap,
};
-int vfio_platform_of_probe(struct vfio_platform_device *vdev,
+static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
struct device *dev)
{
int ret;
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 97fb2f8fa930..3cc98c07dcd3 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -322,18 +322,7 @@ static struct miscdevice vhost_test_misc = {
"vhost-test",
&vhost_test_fops,
};
-
-static int vhost_test_init(void)
-{
- return misc_register(&vhost_test_misc);
-}
-module_init(vhost_test_init);
-
-static void vhost_test_exit(void)
-{
- misc_deregister(&vhost_test_misc);
-}
-module_exit(vhost_test_exit);
+module_misc_device(vhost_test_misc);
MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index b2b366bb0f97..12614006211e 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -55,7 +55,7 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb, int brightness)
dev_err(pb->dev, "failed to enable power supply\n");
if (pb->enable_gpio)
- gpiod_set_value(pb->enable_gpio, 1);
+ gpiod_set_value_cansleep(pb->enable_gpio, 1);
pwm_enable(pb->pwm);
pb->enabled = true;
@@ -70,7 +70,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
pwm_disable(pb->pwm);
if (pb->enable_gpio)
- gpiod_set_value(pb->enable_gpio, 0);
+ gpiod_set_value_cansleep(pb->enable_gpio, 0);
regulator_disable(pb->power_supply);
pb->enabled = false;
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 83742d806391..97067597e6bf 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -163,7 +163,6 @@ MODULE_DEVICE_TABLE(i2c, tosa_bl_id);
static struct i2c_driver tosa_bl_driver = {
.driver = {
.name = "tosa-bl",
- .owner = THIS_MODULE,
.pm = &tosa_bl_pm_ops,
},
.probe = tosa_bl_probe,
diff --git a/drivers/vme/bridges/Kconfig b/drivers/vme/bridges/Kconfig
index f6d854584906..f6ddc3715401 100644
--- a/drivers/vme/bridges/Kconfig
+++ b/drivers/vme/bridges/Kconfig
@@ -13,3 +13,11 @@ config VME_TSI148
help
If you say Y here you get support for the Tundra TSI148 VME bridge
chip.
+
+config VME_FAKE
+ tristate "Fake"
+ help
+ If you say Y here you get support for the fake VME bridge. This
+ provides a virtualised VME Bus for devices with no VME bridge. This
+ is mainly useful for VME development (in the absence of VME
+ hardware).
diff --git a/drivers/vme/bridges/Makefile b/drivers/vme/bridges/Makefile
index 59638afcd502..b074542495c5 100644
--- a/drivers/vme/bridges/Makefile
+++ b/drivers/vme/bridges/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_VME_CA91CX42) += vme_ca91cx42.o
obj-$(CONFIG_VME_TSI148) += vme_tsi148.o
+obj-$(CONFIG_VME_FAKE) += vme_fake.o
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 9f2c834e43e0..6b5ee896af63 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -47,6 +47,8 @@ static const struct pci_device_id ca91cx42_ids[] = {
{ },
};
+MODULE_DEVICE_TABLE(pci, ca91cx42_ids);
+
static struct pci_driver ca91cx42_driver = {
.name = driver_name,
.id_table = ca91cx42_ids,
@@ -69,7 +71,7 @@ static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
for (i = 0; i < 4; i++) {
if (stat & CA91CX42_LINT_LM[i]) {
/* We only enable interrupts if the callback is set */
- bridge->lm_callback[i](i);
+ bridge->lm_callback[i](bridge->lm_data[i]);
serviced |= CA91CX42_LINT_LM[i];
}
}
@@ -1410,7 +1412,7 @@ static int ca91cx42_lm_get(struct vme_lm_resource *lm,
* Callback will be passed the monitor triggered.
*/
static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
- void (*callback)(int))
+ void (*callback)(void *), void *data)
{
u32 lm_ctl, tmp;
struct ca91cx42_driver *bridge;
@@ -1438,6 +1440,7 @@ static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
/* Attach callback */
bridge->lm_callback[monitor] = callback;
+ bridge->lm_data[monitor] = data;
/* Enable Location Monitor interrupt */
tmp = ioread32(bridge->base + LINT_EN);
@@ -1477,6 +1480,7 @@ static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
/* Detach callback */
bridge->lm_callback[monitor] = NULL;
+ bridge->lm_data[monitor] = NULL;
/* If all location monitors disabled, disable global Location Monitor */
if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
diff --git a/drivers/vme/bridges/vme_ca91cx42.h b/drivers/vme/bridges/vme_ca91cx42.h
index d54119e59d55..f35c9f5348a9 100644
--- a/drivers/vme/bridges/vme_ca91cx42.h
+++ b/drivers/vme/bridges/vme_ca91cx42.h
@@ -43,7 +43,8 @@ struct ca91cx42_driver {
wait_queue_head_t dma_queue;
wait_queue_head_t iack_queue;
wait_queue_head_t mbox_queue;
- void (*lm_callback[4])(int); /* Called in interrupt handler */
+ void (*lm_callback[4])(void *); /* Called in interrupt handler */
+ void *lm_data[4];
void *crcsr_kernel;
dma_addr_t crcsr_bus;
struct mutex vme_rmw; /* Only one RMW cycle at a time */
diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
new file mode 100644
index 000000000000..30b3acc93833
--- /dev/null
+++ b/drivers/vme/bridges/vme_fake.c
@@ -0,0 +1,1306 @@
+/*
+ * Fake VME bridge support.
+ *
+ * This drive provides a fake VME bridge chip, this enables debugging of the
+ * VME framework in the absence of a VME system.
+ *
+ * This driver has to do a number of things in software that would be driven
+ * by hardware if it was available, it will also result in extra overhead at
+ * times when compared with driving actual hardware.
+ *
+ * Author: Martyn Welch <martyn@welches.me.uk>
+ * Copyright (c) 2014 Martyn Welch
+ *
+ * Based on vme_tsi148.c:
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * Based on work by Tom Armistead and Ajit Prem
+ * Copyright 2004 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/vme.h>
+
+#include "../vme_bridge.h"
+
+/*
+ * Define the number of each that the fake driver supports.
+ */
+#define FAKE_MAX_MASTER 8 /* Max Master Windows */
+#define FAKE_MAX_SLAVE 8 /* Max Slave Windows */
+
+/* Structures to hold information normally held in device registers */
+struct fake_slave_window {
+ int enabled;
+ unsigned long long vme_base;
+ unsigned long long size;
+ void *buf_base;
+ u32 aspace;
+ u32 cycle;
+};
+
+struct fake_master_window {
+ int enabled;
+ unsigned long long vme_base;
+ unsigned long long size;
+ u32 aspace;
+ u32 cycle;
+ u32 dwidth;
+};
+
+/* Structure used to hold driver specific information */
+struct fake_driver {
+ struct vme_bridge *parent;
+ struct fake_slave_window slaves[FAKE_MAX_SLAVE];
+ struct fake_master_window masters[FAKE_MAX_MASTER];
+ u32 lm_enabled;
+ unsigned long long lm_base;
+ u32 lm_aspace;
+ u32 lm_cycle;
+ void (*lm_callback[4])(void *);
+ void *lm_data[4];
+ struct tasklet_struct int_tasklet;
+ int int_level;
+ int int_statid;
+ void *crcsr_kernel;
+ dma_addr_t crcsr_bus;
+ /* Only one VME interrupt can be generated at a time, provide locking */
+ struct mutex vme_int;
+};
+
+/* Module parameter */
+static int geoid;
+
+static const char driver_name[] = "vme_fake";
+
+static struct vme_bridge *exit_pointer;
+
+static struct device *vme_root;
+
+/*
+ * Calling VME bus interrupt callback if provided.
+ */
+static void fake_VIRQ_tasklet(unsigned long data)
+{
+ struct vme_bridge *fake_bridge;
+ struct fake_driver *bridge;
+
+ fake_bridge = (struct vme_bridge *) data;
+ bridge = fake_bridge->driver_priv;
+
+ vme_irq_handler(fake_bridge, bridge->int_level, bridge->int_statid);
+}
+
+/*
+ * Configure VME interrupt
+ */
+static void fake_irq_set(struct vme_bridge *fake_bridge, int level,
+ int state, int sync)
+{
+ /* Nothing to do */
+}
+
+static void *fake_pci_to_ptr(dma_addr_t addr)
+{
+ return (void *)(uintptr_t)addr;
+}
+
+static dma_addr_t fake_ptr_to_pci(void *addr)
+{
+ return (dma_addr_t)(uintptr_t)addr;
+}
+
+/*
+ * Generate a VME bus interrupt at the requested level & vector. Wait for
+ * interrupt to be acked.
+ */
+static int fake_irq_generate(struct vme_bridge *fake_bridge, int level,
+ int statid)
+{
+ struct fake_driver *bridge;
+
+ bridge = fake_bridge->driver_priv;
+
+ mutex_lock(&bridge->vme_int);
+
+ bridge->int_level = level;
+
+ bridge->int_statid = statid;
+
+ /*
+ * Schedule tasklet to run VME handler to emulate normal VME interrupt
+ * handler behaviour.
+ */
+ tasklet_schedule(&bridge->int_tasklet);
+
+ mutex_unlock(&bridge->vme_int);
+
+ return 0;
+}
+
+/*
+ * Initialize a slave window with the requested attributes.
+ */
+static int fake_slave_set(struct vme_slave_resource *image, int enabled,
+ unsigned long long vme_base, unsigned long long size,
+ dma_addr_t buf_base, u32 aspace, u32 cycle)
+{
+ unsigned int i, granularity = 0;
+ unsigned long long vme_bound;
+ struct vme_bridge *fake_bridge;
+ struct fake_driver *bridge;
+
+ fake_bridge = image->parent;
+ bridge = fake_bridge->driver_priv;
+
+ i = image->number;
+
+ switch (aspace) {
+ case VME_A16:
+ granularity = 0x10;
+ break;
+ case VME_A24:
+ granularity = 0x1000;
+ break;
+ case VME_A32:
+ granularity = 0x10000;
+ break;
+ case VME_A64:
+ granularity = 0x10000;
+ break;
+ case VME_CRCSR:
+ case VME_USER1:
+ case VME_USER2:
+ case VME_USER3:
+ case VME_USER4:
+ default:
+ pr_err("Invalid address space\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Bound address is a valid address for the window, adjust
+ * accordingly
+ */
+ vme_bound = vme_base + size - granularity;
+
+ if (vme_base & (granularity - 1)) {
+ pr_err("Invalid VME base alignment\n");
+ return -EINVAL;
+ }
+ if (vme_bound & (granularity - 1)) {
+ pr_err("Invalid VME bound alignment\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&image->mtx);
+
+ bridge->slaves[i].enabled = enabled;
+ bridge->slaves[i].vme_base = vme_base;
+ bridge->slaves[i].size = size;
+ bridge->slaves[i].buf_base = fake_pci_to_ptr(buf_base);
+ bridge->slaves[i].aspace = aspace;
+ bridge->slaves[i].cycle = cycle;
+
+ mutex_unlock(&image->mtx);
+
+ return 0;
+}
+
+/*
+ * Get slave window configuration.
+ */
+static int fake_slave_get(struct vme_slave_resource *image, int *enabled,
+ unsigned long long *vme_base, unsigned long long *size,
+ dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
+{
+ unsigned int i;
+ struct fake_driver *bridge;
+
+ bridge = image->parent->driver_priv;
+
+ i = image->number;
+
+ mutex_lock(&image->mtx);
+
+ *enabled = bridge->slaves[i].enabled;
+ *vme_base = bridge->slaves[i].vme_base;
+ *size = bridge->slaves[i].size;
+ *buf_base = fake_ptr_to_pci(bridge->slaves[i].buf_base);
+ *aspace = bridge->slaves[i].aspace;
+ *cycle = bridge->slaves[i].cycle;
+
+ mutex_unlock(&image->mtx);
+
+ return 0;
+}
+
+/*
+ * Set the attributes of an outbound window.
+ */
+static int fake_master_set(struct vme_master_resource *image, int enabled,
+ unsigned long long vme_base, unsigned long long size,
+ u32 aspace, u32 cycle, u32 dwidth)
+{
+ int retval = 0;
+ unsigned int i;
+ struct vme_bridge *fake_bridge;
+ struct fake_driver *bridge;
+
+ fake_bridge = image->parent;
+
+ bridge = fake_bridge->driver_priv;
+
+ /* Verify input data */
+ if (vme_base & 0xFFFF) {
+ pr_err("Invalid VME Window alignment\n");
+ retval = -EINVAL;
+ goto err_window;
+ }
+
+ if (size & 0xFFFF) {
+ pr_err("Invalid size alignment\n");
+ retval = -EINVAL;
+ goto err_window;
+ }
+
+ if ((size == 0) && (enabled != 0)) {
+ pr_err("Size must be non-zero for enabled windows\n");
+ retval = -EINVAL;
+ goto err_window;
+ }
+
+ /* Setup data width */
+ switch (dwidth) {
+ case VME_D8:
+ case VME_D16:
+ case VME_D32:
+ break;
+ default:
+ pr_err("Invalid data width\n");
+ retval = -EINVAL;
+ goto err_dwidth;
+ }
+
+ /* Setup address space */
+ switch (aspace) {
+ case VME_A16:
+ case VME_A24:
+ case VME_A32:
+ case VME_A64:
+ case VME_CRCSR:
+ case VME_USER1:
+ case VME_USER2:
+ case VME_USER3:
+ case VME_USER4:
+ break;
+ default:
+ pr_err("Invalid address space\n");
+ retval = -EINVAL;
+ goto err_aspace;
+ }
+
+ spin_lock(&image->lock);
+
+ i = image->number;
+
+ bridge->masters[i].enabled = enabled;
+ bridge->masters[i].vme_base = vme_base;
+ bridge->masters[i].size = size;
+ bridge->masters[i].aspace = aspace;
+ bridge->masters[i].cycle = cycle;
+ bridge->masters[i].dwidth = dwidth;
+
+ spin_unlock(&image->lock);
+
+ return 0;
+
+err_aspace:
+err_dwidth:
+err_window:
+ return retval;
+
+}
+
+/*
+ * Set the attributes of an outbound window.
+ */
+static int __fake_master_get(struct vme_master_resource *image, int *enabled,
+ unsigned long long *vme_base, unsigned long long *size,
+ u32 *aspace, u32 *cycle, u32 *dwidth)
+{
+ unsigned int i;
+ struct fake_driver *bridge;
+
+ bridge = image->parent->driver_priv;
+
+ i = image->number;
+
+ *enabled = bridge->masters[i].enabled;
+ *vme_base = bridge->masters[i].vme_base;
+ *size = bridge->masters[i].size;
+ *aspace = bridge->masters[i].aspace;
+ *cycle = bridge->masters[i].cycle;
+ *dwidth = bridge->masters[i].dwidth;
+
+ return 0;
+}
+
+
+static int fake_master_get(struct vme_master_resource *image, int *enabled,
+ unsigned long long *vme_base, unsigned long long *size,
+ u32 *aspace, u32 *cycle, u32 *dwidth)
+{
+ int retval;
+
+ spin_lock(&image->lock);
+
+ retval = __fake_master_get(image, enabled, vme_base, size, aspace,
+ cycle, dwidth);
+
+ spin_unlock(&image->lock);
+
+ return retval;
+}
+
+
+static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
+ u32 aspace, u32 cycle)
+{
+ struct vme_bridge *fake_bridge;
+ unsigned long long lm_base;
+ u32 lm_aspace, lm_cycle;
+ int i;
+ struct vme_lm_resource *lm;
+ struct list_head *pos = NULL, *n;
+
+ /* Get vme_bridge */
+ fake_bridge = bridge->parent;
+
+ /* Loop through each location monitor resource */
+ list_for_each_safe(pos, n, &fake_bridge->lm_resources) {
+ lm = list_entry(pos, struct vme_lm_resource, list);
+
+ /* If disabled, we're done */
+ if (bridge->lm_enabled == 0)
+ return;
+
+ lm_base = bridge->lm_base;
+ lm_aspace = bridge->lm_aspace;
+ lm_cycle = bridge->lm_cycle;
+
+ /* First make sure that the cycle and address space match */
+ if ((lm_aspace == aspace) && (lm_cycle == cycle)) {
+ for (i = 0; i < lm->monitors; i++) {
+ /* Each location monitor covers 8 bytes */
+ if (((lm_base + (8 * i)) <= addr) &&
+ ((lm_base + (8 * i) + 8) > addr)) {
+ if (bridge->lm_callback[i] != NULL)
+ bridge->lm_callback[i](
+ bridge->lm_data[i]);
+ }
+ }
+ }
+ }
+}
+
+static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
+ u32 aspace, u32 cycle)
+{
+ u8 retval = 0xff;
+ int i;
+ unsigned long long start, end, offset;
+ u8 *loc;
+
+ for (i = 0; i < FAKE_MAX_SLAVE; i++) {
+ start = bridge->slaves[i].vme_base;
+ end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
+
+ if (aspace != bridge->slaves[i].aspace)
+ continue;
+
+ if (cycle != bridge->slaves[i].cycle)
+ continue;
+
+ if ((addr >= start) && (addr < end)) {
+ offset = addr - bridge->slaves[i].vme_base;
+ loc = (u8 *)(bridge->slaves[i].buf_base + offset);
+ retval = *loc;
+
+ break;
+ }
+ }
+
+ fake_lm_check(bridge, addr, aspace, cycle);
+
+ return retval;
+}
+
+static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
+ u32 aspace, u32 cycle)
+{
+ u16 retval = 0xffff;
+ int i;
+ unsigned long long start, end, offset;
+ u16 *loc;
+
+ for (i = 0; i < FAKE_MAX_SLAVE; i++) {
+ if (aspace != bridge->slaves[i].aspace)
+ continue;
+
+ if (cycle != bridge->slaves[i].cycle)
+ continue;
+
+ start = bridge->slaves[i].vme_base;
+ end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
+
+ if ((addr >= start) && ((addr + 1) < end)) {
+ offset = addr - bridge->slaves[i].vme_base;
+ loc = (u16 *)(bridge->slaves[i].buf_base + offset);
+ retval = *loc;
+
+ break;
+ }
+ }
+
+ fake_lm_check(bridge, addr, aspace, cycle);
+
+ return retval;
+}
+
+static u32 fake_vmeread32(struct fake_driver *bridge, unsigned long long addr,
+ u32 aspace, u32 cycle)
+{
+ u32 retval = 0xffffffff;
+ int i;
+ unsigned long long start, end, offset;
+ u32 *loc;
+
+ for (i = 0; i < FAKE_MAX_SLAVE; i++) {
+ if (aspace != bridge->slaves[i].aspace)
+ continue;
+
+ if (cycle != bridge->slaves[i].cycle)
+ continue;
+
+ start = bridge->slaves[i].vme_base;
+ end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
+
+ if ((addr >= start) && ((addr + 3) < end)) {
+ offset = addr - bridge->slaves[i].vme_base;
+ loc = (u32 *)(bridge->slaves[i].buf_base + offset);
+ retval = *loc;
+
+ break;
+ }
+ }
+
+ fake_lm_check(bridge, addr, aspace, cycle);
+
+ return retval;
+}
+
+static ssize_t fake_master_read(struct vme_master_resource *image, void *buf,
+ size_t count, loff_t offset)
+{
+ int retval;
+ u32 aspace, cycle, dwidth;
+ struct vme_bridge *fake_bridge;
+ struct fake_driver *priv;
+ int i;
+ unsigned long long addr;
+ unsigned int done = 0;
+ unsigned int count32;
+
+ fake_bridge = image->parent;
+
+ priv = fake_bridge->driver_priv;
+
+ i = image->number;
+
+ addr = (unsigned long long)priv->masters[i].vme_base + offset;
+ aspace = priv->masters[i].aspace;
+ cycle = priv->masters[i].cycle;
+ dwidth = priv->masters[i].dwidth;
+
+ spin_lock(&image->lock);
+
+ /* The following code handles VME address alignment. We cannot use
+ * memcpy_xxx here because it may cut data transfers in to 8-bit
+ * cycles when D16 or D32 cycles are required on the VME bus.
+ * On the other hand, the bridge itself assures that the maximum data
+ * cycle configured for the transfer is used and splits it
+ * automatically for non-aligned addresses, so we don't want the
+ * overhead of needlessly forcing small transfers for the entire cycle.
+ */
+ if (addr & 0x1) {
+ *(u8 *)buf = fake_vmeread8(priv, addr, aspace, cycle);
+ done += 1;
+ if (done == count)
+ goto out;
+ }
+ if ((dwidth == VME_D16) || (dwidth == VME_D32)) {
+ if ((addr + done) & 0x2) {
+ if ((count - done) < 2) {
+ *(u8 *)(buf + done) = fake_vmeread8(priv,
+ addr + done, aspace, cycle);
+ done += 1;
+ goto out;
+ } else {
+ *(u16 *)(buf + done) = fake_vmeread16(priv,
+ addr + done, aspace, cycle);
+ done += 2;
+ }
+ }
+ }
+
+ if (dwidth == VME_D32) {
+ count32 = (count - done) & ~0x3;
+ while (done < count32) {
+ *(u32 *)(buf + done) = fake_vmeread32(priv, addr + done,
+ aspace, cycle);
+ done += 4;
+ }
+ } else if (dwidth == VME_D16) {
+ count32 = (count - done) & ~0x3;
+ while (done < count32) {
+ *(u16 *)(buf + done) = fake_vmeread16(priv, addr + done,
+ aspace, cycle);
+ done += 2;
+ }
+ } else if (dwidth == VME_D8) {
+ count32 = (count - done);
+ while (done < count32) {
+ *(u8 *)(buf + done) = fake_vmeread8(priv, addr + done,
+ aspace, cycle);
+ done += 1;
+ }
+
+ }
+
+ if ((dwidth == VME_D16) || (dwidth == VME_D32)) {
+ if ((count - done) & 0x2) {
+ *(u16 *)(buf + done) = fake_vmeread16(priv, addr + done,
+ aspace, cycle);
+ done += 2;
+ }
+ }
+ if ((count - done) & 0x1) {
+ *(u8 *)(buf + done) = fake_vmeread8(priv, addr + done, aspace,
+ cycle);
+ done += 1;
+ }
+
+out:
+ retval = count;
+
+ spin_unlock(&image->lock);
+
+ return retval;
+}
+
+static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
+ unsigned long long addr, u32 aspace, u32 cycle)
+{
+ int i;
+ unsigned long long start, end, offset;
+ u8 *loc;
+
+ for (i = 0; i < FAKE_MAX_SLAVE; i++) {
+ if (aspace != bridge->slaves[i].aspace)
+ continue;
+
+ if (cycle != bridge->slaves[i].cycle)
+ continue;
+
+ start = bridge->slaves[i].vme_base;
+ end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
+
+ if ((addr >= start) && (addr < end)) {
+ offset = addr - bridge->slaves[i].vme_base;
+ loc = (u8 *)((void *)bridge->slaves[i].buf_base + offset);
+ *loc = *buf;
+
+ break;
+ }
+ }
+
+ fake_lm_check(bridge, addr, aspace, cycle);
+
+}
+
+static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
+ unsigned long long addr, u32 aspace, u32 cycle)
+{
+ int i;
+ unsigned long long start, end, offset;
+ u16 *loc;
+
+ for (i = 0; i < FAKE_MAX_SLAVE; i++) {
+ if (aspace != bridge->slaves[i].aspace)
+ continue;
+
+ if (cycle != bridge->slaves[i].cycle)
+ continue;
+
+ start = bridge->slaves[i].vme_base;
+ end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
+
+ if ((addr >= start) && ((addr + 1) < end)) {
+ offset = addr - bridge->slaves[i].vme_base;
+ loc = (u16 *)((void *)bridge->slaves[i].buf_base + offset);
+ *loc = *buf;
+
+ break;
+ }
+ }
+
+ fake_lm_check(bridge, addr, aspace, cycle);
+
+}
+
+static void fake_vmewrite32(struct fake_driver *bridge, u32 *buf,
+ unsigned long long addr, u32 aspace, u32 cycle)
+{
+ int i;
+ unsigned long long start, end, offset;
+ u32 *loc;
+
+ for (i = 0; i < FAKE_MAX_SLAVE; i++) {
+ if (aspace != bridge->slaves[i].aspace)
+ continue;
+
+ if (cycle != bridge->slaves[i].cycle)
+ continue;
+
+ start = bridge->slaves[i].vme_base;
+ end = bridge->slaves[i].vme_base + bridge->slaves[i].size;
+
+ if ((addr >= start) && ((addr + 3) < end)) {
+ offset = addr - bridge->slaves[i].vme_base;
+ loc = (u32 *)((void *)bridge->slaves[i].buf_base + offset);
+ *loc = *buf;
+
+ break;
+ }
+ }
+
+ fake_lm_check(bridge, addr, aspace, cycle);
+
+}
+
+static ssize_t fake_master_write(struct vme_master_resource *image, void *buf,
+ size_t count, loff_t offset)
+{
+ int retval = 0;
+ u32 aspace, cycle, dwidth;
+ unsigned long long addr;
+ int i;
+ unsigned int done = 0;
+ unsigned int count32;
+
+ struct vme_bridge *fake_bridge;
+ struct fake_driver *bridge;
+
+ fake_bridge = image->parent;
+
+ bridge = fake_bridge->driver_priv;
+
+ i = image->number;
+
+ addr = bridge->masters[i].vme_base + offset;
+ aspace = bridge->masters[i].aspace;
+ cycle = bridge->masters[i].cycle;
+ dwidth = bridge->masters[i].dwidth;
+
+ spin_lock(&image->lock);
+
+ /* Here we apply for the same strategy we do in master_read
+ * function in order to assure the correct cycles.
+ */
+ if (addr & 0x1) {
+ fake_vmewrite8(bridge, (u8 *)buf, addr, aspace, cycle);
+ done += 1;
+ if (done == count)
+ goto out;
+ }
+
+ if ((dwidth == VME_D16) || (dwidth == VME_D32)) {
+ if ((addr + done) & 0x2) {
+ if ((count - done) < 2) {
+ fake_vmewrite8(bridge, (u8 *)(buf + done),
+ addr + done, aspace, cycle);
+ done += 1;
+ goto out;
+ } else {
+ fake_vmewrite16(bridge, (u16 *)(buf + done),
+ addr + done, aspace, cycle);
+ done += 2;
+ }
+ }
+ }
+
+ if (dwidth == VME_D32) {
+ count32 = (count - done) & ~0x3;
+ while (done < count32) {
+ fake_vmewrite32(bridge, (u32 *)(buf + done),
+ addr + done, aspace, cycle);
+ done += 4;
+ }
+ } else if (dwidth == VME_D16) {
+ count32 = (count - done) & ~0x3;
+ while (done < count32) {
+ fake_vmewrite16(bridge, (u16 *)(buf + done),
+ addr + done, aspace, cycle);
+ done += 2;
+ }
+ } else if (dwidth == VME_D8) {
+ count32 = (count - done);
+ while (done < count32) {
+ fake_vmewrite8(bridge, (u8 *)(buf + done), addr + done,
+ aspace, cycle);
+ done += 1;
+ }
+
+ }
+
+ if ((dwidth == VME_D16) || (dwidth == VME_D32)) {
+ if ((count - done) & 0x2) {
+ fake_vmewrite16(bridge, (u16 *)(buf + done),
+ addr + done, aspace, cycle);
+ done += 2;
+ }
+ }
+
+ if ((count - done) & 0x1) {
+ fake_vmewrite8(bridge, (u8 *)(buf + done), addr + done, aspace,
+ cycle);
+ done += 1;
+ }
+
+out:
+ retval = count;
+
+ spin_unlock(&image->lock);
+
+ return retval;
+}
+
+/*
+ * Perform an RMW cycle on the VME bus.
+ *
+ * Requires a previously configured master window, returns final value.
+ */
+static unsigned int fake_master_rmw(struct vme_master_resource *image,
+ unsigned int mask, unsigned int compare, unsigned int swap,
+ loff_t offset)
+{
+ u32 tmp, base;
+ u32 aspace, cycle;
+ int i;
+ struct fake_driver *bridge;
+
+ bridge = image->parent->driver_priv;
+
+ /* Find the PCI address that maps to the desired VME address */
+ i = image->number;
+
+ base = bridge->masters[i].vme_base;
+ aspace = bridge->masters[i].aspace;
+ cycle = bridge->masters[i].cycle;
+
+ /* Lock image */
+ spin_lock(&image->lock);
+
+ /* Read existing value */
+ tmp = fake_vmeread32(bridge, base + offset, aspace, cycle);
+
+ /* Perform check */
+ if ((tmp && mask) == (compare && mask)) {
+ tmp = tmp | (mask | swap);
+ tmp = tmp & (~mask | swap);
+
+ /* Write back */
+ fake_vmewrite32(bridge, &tmp, base + offset, aspace, cycle);
+ }
+
+ /* Unlock image */
+ spin_unlock(&image->lock);
+
+ return tmp;
+}
+
+/*
+ * All 4 location monitors reside at the same base - this is therefore a
+ * system wide configuration.
+ *
+ * This does not enable the LM monitor - that should be done when the first
+ * callback is attached and disabled when the last callback is removed.
+ */
+static int fake_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
+ u32 aspace, u32 cycle)
+{
+ int i;
+ struct vme_bridge *fake_bridge;
+ struct fake_driver *bridge;
+
+ fake_bridge = lm->parent;
+
+ bridge = fake_bridge->driver_priv;
+
+ mutex_lock(&lm->mtx);
+
+ /* If we already have a callback attached, we can't move it! */
+ for (i = 0; i < lm->monitors; i++) {
+ if (bridge->lm_callback[i] != NULL) {
+ mutex_unlock(&lm->mtx);
+ pr_err("Location monitor callback attached, can't reset\n");
+ return -EBUSY;
+ }
+ }
+
+ switch (aspace) {
+ case VME_A16:
+ case VME_A24:
+ case VME_A32:
+ case VME_A64:
+ break;
+ default:
+ mutex_unlock(&lm->mtx);
+ pr_err("Invalid address space\n");
+ return -EINVAL;
+ }
+
+ bridge->lm_base = lm_base;
+ bridge->lm_aspace = aspace;
+ bridge->lm_cycle = cycle;
+
+ mutex_unlock(&lm->mtx);
+
+ return 0;
+}
+
+/* Get configuration of the callback monitor and return whether it is enabled
+ * or disabled.
+ */
+static int fake_lm_get(struct vme_lm_resource *lm,
+ unsigned long long *lm_base, u32 *aspace, u32 *cycle)
+{
+ struct fake_driver *bridge;
+
+ bridge = lm->parent->driver_priv;
+
+ mutex_lock(&lm->mtx);
+
+ *lm_base = bridge->lm_base;
+ *aspace = bridge->lm_aspace;
+ *cycle = bridge->lm_cycle;
+
+ mutex_unlock(&lm->mtx);
+
+ return bridge->lm_enabled;
+}
+
+/*
+ * Attach a callback to a specific location monitor.
+ *
+ * Callback will be passed the monitor triggered.
+ */
+static int fake_lm_attach(struct vme_lm_resource *lm, int monitor,
+ void (*callback)(void *), void *data)
+{
+ struct vme_bridge *fake_bridge;
+ struct fake_driver *bridge;
+
+ fake_bridge = lm->parent;
+
+ bridge = fake_bridge->driver_priv;
+
+ mutex_lock(&lm->mtx);
+
+ /* Ensure that the location monitor is configured - need PGM or DATA */
+ if (bridge->lm_cycle == 0) {
+ mutex_unlock(&lm->mtx);
+ pr_err("Location monitor not properly configured\n");
+ return -EINVAL;
+ }
+
+ /* Check that a callback isn't already attached */
+ if (bridge->lm_callback[monitor] != NULL) {
+ mutex_unlock(&lm->mtx);
+ pr_err("Existing callback attached\n");
+ return -EBUSY;
+ }
+
+ /* Attach callback */
+ bridge->lm_callback[monitor] = callback;
+ bridge->lm_data[monitor] = data;
+
+ /* Ensure that global Location Monitor Enable set */
+ bridge->lm_enabled = 1;
+
+ mutex_unlock(&lm->mtx);
+
+ return 0;
+}
+
+/*
+ * Detach a callback function forn a specific location monitor.
+ */
+static int fake_lm_detach(struct vme_lm_resource *lm, int monitor)
+{
+ u32 tmp;
+ int i;
+ struct fake_driver *bridge;
+
+ bridge = lm->parent->driver_priv;
+
+ mutex_lock(&lm->mtx);
+
+ /* Detach callback */
+ bridge->lm_callback[monitor] = NULL;
+ bridge->lm_data[monitor] = NULL;
+
+ /* If all location monitors disabled, disable global Location Monitor */
+ tmp = 0;
+ for (i = 0; i < lm->monitors; i++) {
+ if (bridge->lm_callback[i] != NULL)
+ tmp = 1;
+ }
+
+ if (tmp == 0)
+ bridge->lm_enabled = 0;
+
+ mutex_unlock(&lm->mtx);
+
+ return 0;
+}
+
+/*
+ * Determine Geographical Addressing
+ */
+static int fake_slot_get(struct vme_bridge *fake_bridge)
+{
+ return geoid;
+}
+
+static void *fake_alloc_consistent(struct device *parent, size_t size,
+ dma_addr_t *dma)
+{
+ void *alloc = kmalloc(size, GFP_KERNEL);
+
+ if (alloc != NULL)
+ *dma = fake_ptr_to_pci(alloc);
+
+ return alloc;
+}
+
+static void fake_free_consistent(struct device *parent, size_t size,
+ void *vaddr, dma_addr_t dma)
+{
+ kfree(vaddr);
+/*
+ dma_free_coherent(parent, size, vaddr, dma);
+*/
+}
+
+/*
+ * Configure CR/CSR space
+ *
+ * Access to the CR/CSR can be configured at power-up. The location of the
+ * CR/CSR registers in the CR/CSR address space is determined by the boards
+ * Geographic address.
+ *
+ * Each board has a 512kB window, with the highest 4kB being used for the
+ * boards registers, this means there is a fix length 508kB window which must
+ * be mapped onto PCI memory.
+ */
+static int fake_crcsr_init(struct vme_bridge *fake_bridge)
+{
+ u32 vstat;
+ struct fake_driver *bridge;
+
+ bridge = fake_bridge->driver_priv;
+
+ /* Allocate mem for CR/CSR image */
+ bridge->crcsr_kernel = kzalloc(VME_CRCSR_BUF_SIZE, GFP_KERNEL);
+ bridge->crcsr_bus = fake_ptr_to_pci(bridge->crcsr_kernel);
+ if (bridge->crcsr_kernel == NULL)
+ return -ENOMEM;
+
+ vstat = fake_slot_get(fake_bridge);
+
+ pr_info("CR/CSR Offset: %d\n", vstat);
+
+ return 0;
+}
+
+static void fake_crcsr_exit(struct vme_bridge *fake_bridge)
+{
+ struct fake_driver *bridge;
+
+ bridge = fake_bridge->driver_priv;
+
+ kfree(bridge->crcsr_kernel);
+}
+
+
+static int __init fake_init(void)
+{
+ int retval, i;
+ struct list_head *pos = NULL, *n;
+ struct vme_bridge *fake_bridge;
+ struct fake_driver *fake_device;
+ struct vme_master_resource *master_image;
+ struct vme_slave_resource *slave_image;
+ struct vme_lm_resource *lm;
+
+ /* We need a fake parent device */
+ vme_root = __root_device_register("vme", THIS_MODULE);
+
+ /* If we want to support more than one bridge at some point, we need to
+ * dynamically allocate this so we get one per device.
+ */
+ fake_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
+ if (fake_bridge == NULL) {
+ retval = -ENOMEM;
+ goto err_struct;
+ }
+
+ fake_device = kzalloc(sizeof(struct fake_driver), GFP_KERNEL);
+ if (fake_device == NULL) {
+ retval = -ENOMEM;
+ goto err_driver;
+ }
+
+ fake_bridge->driver_priv = fake_device;
+
+ fake_bridge->parent = vme_root;
+
+ fake_device->parent = fake_bridge;
+
+ /* Initialize wait queues & mutual exclusion flags */
+ mutex_init(&fake_device->vme_int);
+ mutex_init(&fake_bridge->irq_mtx);
+ tasklet_init(&fake_device->int_tasklet, fake_VIRQ_tasklet,
+ (unsigned long) fake_bridge);
+
+ strcpy(fake_bridge->name, driver_name);
+
+ /* Add master windows to list */
+ INIT_LIST_HEAD(&fake_bridge->master_resources);
+ for (i = 0; i < FAKE_MAX_MASTER; i++) {
+ master_image = kmalloc(sizeof(struct vme_master_resource),
+ GFP_KERNEL);
+ if (master_image == NULL) {
+ retval = -ENOMEM;
+ goto err_master;
+ }
+ master_image->parent = fake_bridge;
+ spin_lock_init(&master_image->lock);
+ master_image->locked = 0;
+ master_image->number = i;
+ master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
+ VME_A64;
+ master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
+ VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
+ VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
+ VME_PROG | VME_DATA;
+ master_image->width_attr = VME_D16 | VME_D32;
+ memset(&master_image->bus_resource, 0,
+ sizeof(struct resource));
+ master_image->kern_base = NULL;
+ list_add_tail(&master_image->list,
+ &fake_bridge->master_resources);
+ }
+
+ /* Add slave windows to list */
+ INIT_LIST_HEAD(&fake_bridge->slave_resources);
+ for (i = 0; i < FAKE_MAX_SLAVE; i++) {
+ slave_image = kmalloc(sizeof(struct vme_slave_resource),
+ GFP_KERNEL);
+ if (slave_image == NULL) {
+ retval = -ENOMEM;
+ goto err_slave;
+ }
+ slave_image->parent = fake_bridge;
+ mutex_init(&slave_image->mtx);
+ slave_image->locked = 0;
+ slave_image->number = i;
+ slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
+ VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
+ VME_USER3 | VME_USER4;
+ slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
+ VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
+ VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
+ VME_PROG | VME_DATA;
+ list_add_tail(&slave_image->list,
+ &fake_bridge->slave_resources);
+ }
+
+ /* Add location monitor to list */
+ INIT_LIST_HEAD(&fake_bridge->lm_resources);
+ lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
+ if (lm == NULL) {
+ pr_err("Failed to allocate memory for location monitor resource structure\n");
+ retval = -ENOMEM;
+ goto err_lm;
+ }
+ lm->parent = fake_bridge;
+ mutex_init(&lm->mtx);
+ lm->locked = 0;
+ lm->number = 1;
+ lm->monitors = 4;
+ list_add_tail(&lm->list, &fake_bridge->lm_resources);
+
+ fake_bridge->slave_get = fake_slave_get;
+ fake_bridge->slave_set = fake_slave_set;
+ fake_bridge->master_get = fake_master_get;
+ fake_bridge->master_set = fake_master_set;
+ fake_bridge->master_read = fake_master_read;
+ fake_bridge->master_write = fake_master_write;
+ fake_bridge->master_rmw = fake_master_rmw;
+ fake_bridge->irq_set = fake_irq_set;
+ fake_bridge->irq_generate = fake_irq_generate;
+ fake_bridge->lm_set = fake_lm_set;
+ fake_bridge->lm_get = fake_lm_get;
+ fake_bridge->lm_attach = fake_lm_attach;
+ fake_bridge->lm_detach = fake_lm_detach;
+ fake_bridge->slot_get = fake_slot_get;
+ fake_bridge->alloc_consistent = fake_alloc_consistent;
+ fake_bridge->free_consistent = fake_free_consistent;
+
+ pr_info("Board is%s the VME system controller\n",
+ (geoid == 1) ? "" : " not");
+
+ pr_info("VME geographical address is set to %d\n", geoid);
+
+ retval = fake_crcsr_init(fake_bridge);
+ if (retval) {
+ pr_err("CR/CSR configuration failed.\n");
+ goto err_crcsr;
+ }
+
+ retval = vme_register_bridge(fake_bridge);
+ if (retval != 0) {
+ pr_err("Chip Registration failed.\n");
+ goto err_reg;
+ }
+
+ exit_pointer = fake_bridge;
+
+ return 0;
+
+err_reg:
+ fake_crcsr_exit(fake_bridge);
+err_crcsr:
+err_lm:
+ /* resources are stored in link list */
+ list_for_each_safe(pos, n, &fake_bridge->lm_resources) {
+ lm = list_entry(pos, struct vme_lm_resource, list);
+ list_del(pos);
+ kfree(lm);
+ }
+err_slave:
+ /* resources are stored in link list */
+ list_for_each_safe(pos, n, &fake_bridge->slave_resources) {
+ slave_image = list_entry(pos, struct vme_slave_resource, list);
+ list_del(pos);
+ kfree(slave_image);
+ }
+err_master:
+ /* resources are stored in link list */
+ list_for_each_safe(pos, n, &fake_bridge->master_resources) {
+ master_image = list_entry(pos, struct vme_master_resource,
+ list);
+ list_del(pos);
+ kfree(master_image);
+ }
+
+ kfree(fake_device);
+err_driver:
+ kfree(fake_bridge);
+err_struct:
+ return retval;
+
+}
+
+
+static void __exit fake_exit(void)
+{
+ struct list_head *pos = NULL;
+ struct list_head *tmplist;
+ struct vme_master_resource *master_image;
+ struct vme_slave_resource *slave_image;
+ int i;
+ struct vme_bridge *fake_bridge;
+ struct fake_driver *bridge;
+
+ fake_bridge = exit_pointer;
+
+ bridge = fake_bridge->driver_priv;
+
+ pr_debug("Driver is being unloaded.\n");
+
+ /*
+ * Shutdown all inbound and outbound windows.
+ */
+ for (i = 0; i < FAKE_MAX_MASTER; i++)
+ bridge->masters[i].enabled = 0;
+
+ for (i = 0; i < FAKE_MAX_SLAVE; i++)
+ bridge->slaves[i].enabled = 0;
+
+ /*
+ * Shutdown Location monitor.
+ */
+ bridge->lm_enabled = 0;
+
+ vme_unregister_bridge(fake_bridge);
+
+ fake_crcsr_exit(fake_bridge);
+ /* resources are stored in link list */
+ list_for_each_safe(pos, tmplist, &fake_bridge->slave_resources) {
+ slave_image = list_entry(pos, struct vme_slave_resource, list);
+ list_del(pos);
+ kfree(slave_image);
+ }
+
+ /* resources are stored in link list */
+ list_for_each_safe(pos, tmplist, &fake_bridge->master_resources) {
+ master_image = list_entry(pos, struct vme_master_resource,
+ list);
+ list_del(pos);
+ kfree(master_image);
+ }
+
+ kfree(fake_bridge->driver_priv);
+
+ kfree(fake_bridge);
+
+ root_device_unregister(vme_root);
+}
+
+
+MODULE_PARM_DESC(geoid, "Set geographical addressing");
+module_param(geoid, int, 0);
+
+MODULE_DESCRIPTION("Fake VME bridge driver");
+MODULE_LICENSE("GPL");
+
+module_init(fake_init);
+module_exit(fake_exit);
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 4bc5d451ec6c..fc1b634b969a 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -50,6 +50,8 @@ static const struct pci_device_id tsi148_ids[] = {
{ },
};
+MODULE_DEVICE_TABLE(pci, tsi148_ids);
+
static struct pci_driver tsi148_driver = {
.name = driver_name,
.id_table = tsi148_ids,
@@ -102,7 +104,7 @@ static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
for (i = 0; i < 4; i++) {
if (stat & TSI148_LCSR_INTS_LMS[i]) {
/* We only enable interrupts if the callback is set */
- bridge->lm_callback[i](i);
+ bridge->lm_callback[i](bridge->lm_data[i]);
serviced |= TSI148_LCSR_INTC_LMC[i];
}
}
@@ -2047,7 +2049,7 @@ static int tsi148_lm_get(struct vme_lm_resource *lm,
* Callback will be passed the monitor triggered.
*/
static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
- void (*callback)(int))
+ void (*callback)(void *), void *data)
{
u32 lm_ctl, tmp;
struct vme_bridge *tsi148_bridge;
@@ -2077,6 +2079,7 @@ static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
/* Attach callback */
bridge->lm_callback[monitor] = callback;
+ bridge->lm_data[monitor] = data;
/* Enable Location Monitor interrupt */
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
@@ -2124,6 +2127,7 @@ static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
/* Detach callback */
bridge->lm_callback[monitor] = NULL;
+ bridge->lm_data[monitor] = NULL;
/* If all location monitors disabled, disable global Location Monitor */
if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
diff --git a/drivers/vme/bridges/vme_tsi148.h b/drivers/vme/bridges/vme_tsi148.h
index f5ed14382a8d..0935d85d32ec 100644
--- a/drivers/vme/bridges/vme_tsi148.h
+++ b/drivers/vme/bridges/vme_tsi148.h
@@ -38,7 +38,8 @@ struct tsi148_driver {
void __iomem *base; /* Base Address of device registers */
wait_queue_head_t dma_queue[2];
wait_queue_head_t iack_queue;
- void (*lm_callback[4])(int); /* Called in interrupt handler */
+ void (*lm_callback[4])(void *); /* Called in interrupt handler */
+ void *lm_data[4];
void *crcsr_kernel;
dma_addr_t crcsr_bus;
struct vme_master_resource *flush_image;
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 37ac0a58e59a..15b64076bc26 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -13,8 +13,8 @@
* option) any later version.
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -39,7 +39,6 @@ static unsigned int vme_bus_numbers;
static LIST_HEAD(vme_bus_list);
static DEFINE_MUTEX(vme_buses_lock);
-static void __exit vme_exit(void);
static int __init vme_init(void);
static struct vme_dev *dev_to_vme_dev(struct device *dev)
@@ -1321,7 +1320,7 @@ int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
EXPORT_SYMBOL(vme_lm_get);
int vme_lm_attach(struct vme_resource *resource, int monitor,
- void (*callback)(int))
+ void (*callback)(void *), void *data)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
@@ -1338,7 +1337,7 @@ int vme_lm_attach(struct vme_resource *resource, int monitor,
return -EINVAL;
}
- return bridge->lm_attach(lm, monitor, callback);
+ return bridge->lm_attach(lm, monitor, callback, data);
}
EXPORT_SYMBOL(vme_lm_attach);
@@ -1622,25 +1621,10 @@ static int vme_bus_probe(struct device *dev)
return retval;
}
-static int vme_bus_remove(struct device *dev)
-{
- int retval = -ENODEV;
- struct vme_driver *driver;
- struct vme_dev *vdev = dev_to_vme_dev(dev);
-
- driver = dev->platform_data;
-
- if (driver->remove != NULL)
- retval = driver->remove(vdev);
-
- return retval;
-}
-
struct bus_type vme_bus_type = {
.name = "vme",
.match = vme_bus_match,
.probe = vme_bus_probe,
- .remove = vme_bus_remove,
};
EXPORT_SYMBOL(vme_bus_type);
@@ -1648,11 +1632,4 @@ static int __init vme_init(void)
{
return bus_register(&vme_bus_type);
}
-
-static void __exit vme_exit(void)
-{
- bus_unregister(&vme_bus_type);
-}
-
subsys_initcall(vme_init);
-module_exit(vme_exit);
diff --git a/drivers/vme/vme_bridge.h b/drivers/vme/vme_bridge.h
index cb8246fd97be..2662e916b96a 100644
--- a/drivers/vme/vme_bridge.h
+++ b/drivers/vme/vme_bridge.h
@@ -160,7 +160,8 @@ struct vme_bridge {
int (*lm_set) (struct vme_lm_resource *, unsigned long long, u32, u32);
int (*lm_get) (struct vme_lm_resource *, unsigned long long *, u32 *,
u32 *);
- int (*lm_attach) (struct vme_lm_resource *, int, void (*callback)(int));
+ int (*lm_attach)(struct vme_lm_resource *, int,
+ void (*callback)(void *), void *);
int (*lm_detach) (struct vme_lm_resource *, int);
/* CR/CSR space functions */
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 581a300fd6cd..82611f197b0a 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -66,7 +66,7 @@ struct w1_therm_family_data {
/* return the address of the refcnt in the family data */
#define THERM_REFCNT(family_data) \
- (&((struct w1_therm_family_data*)family_data)->refcnt)
+ (&((struct w1_therm_family_data *)family_data)->refcnt)
static int w1_therm_add_slave(struct w1_slave *sl)
{
@@ -81,7 +81,8 @@ static int w1_therm_add_slave(struct w1_slave *sl)
static void w1_therm_remove_slave(struct w1_slave *sl)
{
int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data));
- while(refcnt) {
+
+ while (refcnt) {
msleep(1000);
refcnt = atomic_read(THERM_REFCNT(sl->family_data));
}
@@ -151,8 +152,7 @@ static struct w1_family w1_therm_family_DS1825 = {
.fops = &w1_therm_fops,
};
-struct w1_therm_family_converter
-{
+struct w1_therm_family_converter {
u8 broken;
u16 reserved;
struct w1_family *f;
@@ -293,7 +293,7 @@ static inline int w1_DS18B20_precision(struct device *device, int val)
uint8_t precision_bits;
uint8_t mask = 0x60;
- if(val > 12 || val < 9) {
+ if (val > 12 || val < 9) {
pr_warn("Unsupported precision\n");
return -1;
}
@@ -336,7 +336,8 @@ static inline int w1_DS18B20_precision(struct device *device, int val)
/* read values to only alter precision bits */
w1_write_8(dev, W1_READ_SCRATCHPAD);
- if ((count = w1_read_block(dev, rom, 9)) != 9)
+ count = w1_read_block(dev, rom, 9);
+ if (count != 9)
dev_warn(device, "w1_read_block() returned %u instead of 9.\n", count);
crc = w1_calc_crc8(rom, 8);
@@ -366,6 +367,7 @@ post_unlock:
static inline int w1_DS18B20_convert_temp(u8 rom[9])
{
s16 t = le16_to_cpup((__le16 *)rom);
+
return t*1000/16;
}
@@ -415,7 +417,7 @@ static ssize_t w1_slave_store(struct device *device,
for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i) {
if (w1_therm_families[i].f->fid == sl->family->fid) {
/* zero value indicates to write current configuration to eeprom */
- if (0 == val)
+ if (val == 0)
ret = w1_therm_families[i].eeprom(device);
else
ret = w1_therm_families[i].precision(device, val);
@@ -439,8 +441,7 @@ static ssize_t w1_slave_show(struct device *device,
if (ret != 0)
goto post_unlock;
- if(!sl->family_data)
- {
+ if (!sl->family_data) {
ret = -ENODEV;
goto pre_unlock;
}
@@ -495,7 +496,8 @@ static ssize_t w1_slave_show(struct device *device,
if (!w1_reset_select_slave(sl)) {
w1_write_8(dev, W1_READ_SCRATCHPAD);
- if ((count = w1_read_block(dev, rom, 9)) != 9) {
+ count = w1_read_block(dev, rom, 9);
+ if (count != 9) {
dev_warn(device, "w1_read_block() "
"returned %u instead of 9.\n",
count);
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index bb34362e930a..e213c678bbfe 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -53,8 +53,8 @@ int w1_max_slave_ttl = 10;
module_param_named(timeout, w1_timeout, int, 0);
MODULE_PARM_DESC(timeout, "time in seconds between automatic slave searches");
module_param_named(timeout_us, w1_timeout_us, int, 0);
-MODULE_PARM_DESC(timeout, "time in microseconds between automatic slave"
- " searches");
+MODULE_PARM_DESC(timeout_us,
+ "time in microseconds between automatic slave searches");
/* A search stops when w1_max_slave_count devices have been found in that
* search. The next search will start over and detect the same set of devices
* on a static 1-wire bus. Memory is not allocated based on this number, just
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 68952d9ccf83..99ebf6ea3de6 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -666,10 +666,8 @@ static int usb_pcwd_probe(struct usb_interface *interface,
/* allocate the urb's */
usb_pcwd->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!usb_pcwd->intr_urb) {
- pr_err("Out of memory\n");
+ if (!usb_pcwd->intr_urb)
goto error;
- }
/* initialise the intr urb's */
usb_fill_int_urb(usb_pcwd->intr_urb, udev, pipe,
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index d5dbdb9d24d8..9ecfcdcdd6d6 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1314,9 +1314,6 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
if (!VALID_EVTCHN(evtchn))
return -1;
- if (!xen_support_evtchn_rebind())
- return -1;
-
/* Send future instances of this interrupt to other vcpu. */
bind_vcpu.port = evtchn;
bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
@@ -1650,20 +1647,15 @@ void xen_callback_vector(void)
{
int rc;
uint64_t callback_via;
- if (xen_have_vector_callback) {
- callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
- rc = xen_set_callback_via(callback_via);
- if (rc) {
- pr_err("Request for Xen HVM callback vector failed\n");
- xen_have_vector_callback = 0;
- return;
- }
- pr_info("Xen HVM callback vector for event delivery is enabled\n");
- /* in the restore case the vector has already been allocated */
- if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
- xen_hvm_callback_vector);
- }
+
+ callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
+ rc = xen_set_callback_via(callback_via);
+ BUG_ON(rc);
+ pr_info("Xen HVM callback vector for event delivery is enabled\n");
+ /* in the restore case the vector has already been allocated */
+ if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
+ xen_hvm_callback_vector);
}
#else
void xen_callback_vector(void) {}
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 266c2c733039..7ef27c6ed72f 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -418,30 +418,18 @@ static int evtchn_fifo_alloc_control_block(unsigned cpu)
return ret;
}
-static int evtchn_fifo_cpu_notification(struct notifier_block *self,
- unsigned long action,
- void *hcpu)
+static int xen_evtchn_cpu_prepare(unsigned int cpu)
{
- int cpu = (long)hcpu;
- int ret = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- if (!per_cpu(cpu_control_block, cpu))
- ret = evtchn_fifo_alloc_control_block(cpu);
- break;
- case CPU_DEAD:
- __evtchn_fifo_handle_events(cpu, true);
- break;
- default:
- break;
- }
- return ret < 0 ? NOTIFY_BAD : NOTIFY_OK;
+ if (!per_cpu(cpu_control_block, cpu))
+ return evtchn_fifo_alloc_control_block(cpu);
+ return 0;
}
-static struct notifier_block evtchn_fifo_cpu_notifier = {
- .notifier_call = evtchn_fifo_cpu_notification,
-};
+static int xen_evtchn_cpu_dead(unsigned int cpu)
+{
+ __evtchn_fifo_handle_events(cpu, true);
+ return 0;
+}
int __init xen_evtchn_fifo_init(void)
{
@@ -456,7 +444,9 @@ int __init xen_evtchn_fifo_init(void)
evtchn_ops = &evtchn_ops_fifo;
- register_cpu_notifier(&evtchn_fifo_cpu_notifier);
+ cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
+ "CPUHP_XEN_EVTCHN_PREPARE",
+ xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
out:
put_cpu();
return ret;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index cf9666680c8c..b59c9455aae1 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -42,7 +42,6 @@
static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
-static uint64_t callback_via;
static unsigned long alloc_xen_mmio(unsigned long len)
{
@@ -55,51 +54,6 @@ static unsigned long alloc_xen_mmio(unsigned long len)
return addr;
}
-static uint64_t get_callback_via(struct pci_dev *pdev)
-{
- u8 pin;
- int irq;
-
- irq = pdev->irq;
- if (irq < 16)
- return irq; /* ISA IRQ */
-
- pin = pdev->pin;
-
- /* We don't know the GSI. Specify the PCI INTx line instead. */
- return ((uint64_t)0x01 << 56) | /* PCI INTx identifier */
- ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
- ((uint64_t)pdev->bus->number << 16) |
- ((uint64_t)(pdev->devfn & 0xff) << 8) |
- ((uint64_t)(pin - 1) & 3);
-}
-
-static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
-{
- xen_hvm_evtchn_do_upcall();
- return IRQ_HANDLED;
-}
-
-static int xen_allocate_irq(struct pci_dev *pdev)
-{
- return request_irq(pdev->irq, do_hvm_evtchn_intr,
- IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
- "xen-platform-pci", pdev);
-}
-
-static int platform_pci_resume(struct pci_dev *pdev)
-{
- int err;
- if (xen_have_vector_callback)
- return 0;
- err = xen_set_callback_via(callback_via);
- if (err) {
- dev_err(&pdev->dev, "platform_pci_resume failure!\n");
- return err;
- }
- return 0;
-}
-
static int platform_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -138,21 +92,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
platform_mmio = mmio_addr;
platform_mmiolen = mmio_len;
- if (!xen_have_vector_callback) {
- ret = xen_allocate_irq(pdev);
- if (ret) {
- dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
- goto out;
- }
- callback_via = get_callback_via(pdev);
- ret = xen_set_callback_via(callback_via);
- if (ret) {
- dev_warn(&pdev->dev, "Unable to set the evtchn callback "
- "err=%d\n", ret);
- goto out;
- }
- }
-
max_nr_gframes = gnttab_max_grant_frames();
grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -184,9 +123,6 @@ static struct pci_driver platform_driver = {
.name = DRV_NAME,
.probe = platform_pci_probe,
.id_table = platform_pci_tbl,
-#ifdef CONFIG_PM
- .resume_early = platform_pci_resume,
-#endif
};
static int __init platform_pci_init(void)
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index 6881b3ceb675..84106f9c456c 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -215,7 +215,7 @@ static const struct attribute_group xen_compilation_group = {
.attrs = xen_compile_attrs,
};
-static int __init xen_compilation_init(void)
+static int __init xen_sysfs_compilation_init(void)
{
return sysfs_create_group(hypervisor_kobj, &xen_compilation_group);
}
@@ -341,7 +341,7 @@ static const struct attribute_group xen_properties_group = {
.attrs = xen_properties_attrs,
};
-static int __init xen_properties_init(void)
+static int __init xen_sysfs_properties_init(void)
{
return sysfs_create_group(hypervisor_kobj, &xen_properties_group);
}
@@ -455,7 +455,7 @@ static const struct attribute_group xen_pmu_group = {
.attrs = xen_pmu_attrs,
};
-static int __init xen_pmu_init(void)
+static int __init xen_sysfs_pmu_init(void)
{
return sysfs_create_group(hypervisor_kobj, &xen_pmu_group);
}
@@ -474,18 +474,18 @@ static int __init hyper_sysfs_init(void)
ret = xen_sysfs_version_init();
if (ret)
goto version_out;
- ret = xen_compilation_init();
+ ret = xen_sysfs_compilation_init();
if (ret)
goto comp_out;
ret = xen_sysfs_uuid_init();
if (ret)
goto uuid_out;
- ret = xen_properties_init();
+ ret = xen_sysfs_properties_init();
if (ret)
goto prop_out;
#ifdef CONFIG_XEN_HAVE_VPMU
if (xen_initial_domain()) {
- ret = xen_pmu_init();
+ ret = xen_sysfs_pmu_init();
if (ret) {
sysfs_remove_group(hypervisor_kobj,
&xen_properties_group);
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 258b7c325649..6331a95691a4 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -25,6 +25,8 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
+#define PCISTUB_DRIVER_NAME "pciback"
+
static char *pci_devs_to_hide;
wait_queue_head_t xen_pcibk_aer_wait_queue;
/*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
@@ -149,13 +151,10 @@ static inline void pcistub_device_put(struct pcistub_device *psdev)
kref_put(&psdev->kref, pcistub_device_release);
}
-static struct pcistub_device *pcistub_device_find(int domain, int bus,
- int slot, int func)
+static struct pcistub_device *pcistub_device_find_locked(int domain, int bus,
+ int slot, int func)
{
- struct pcistub_device *psdev = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&pcistub_devices_lock, flags);
+ struct pcistub_device *psdev;
list_for_each_entry(psdev, &pcistub_devices, dev_list) {
if (psdev->dev != NULL
@@ -163,15 +162,25 @@ static struct pcistub_device *pcistub_device_find(int domain, int bus,
&& bus == psdev->dev->bus->number
&& slot == PCI_SLOT(psdev->dev->devfn)
&& func == PCI_FUNC(psdev->dev->devfn)) {
- pcistub_device_get(psdev);
- goto out;
+ return psdev;
}
}
- /* didn't find it */
- psdev = NULL;
+ return NULL;
+}
+
+static struct pcistub_device *pcistub_device_find(int domain, int bus,
+ int slot, int func)
+{
+ struct pcistub_device *psdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ psdev = pcistub_device_find_locked(domain, bus, slot, func);
+ if (psdev)
+ pcistub_device_get(psdev);
-out:
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
return psdev;
}
@@ -207,16 +216,9 @@ struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
spin_lock_irqsave(&pcistub_devices_lock, flags);
- list_for_each_entry(psdev, &pcistub_devices, dev_list) {
- if (psdev->dev != NULL
- && domain == pci_domain_nr(psdev->dev->bus)
- && bus == psdev->dev->bus->number
- && slot == PCI_SLOT(psdev->dev->devfn)
- && func == PCI_FUNC(psdev->dev->devfn)) {
- found_dev = pcistub_device_get_pci_dev(pdev, psdev);
- break;
- }
- }
+ psdev = pcistub_device_find_locked(domain, bus, slot, func);
+ if (psdev)
+ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
return found_dev;
@@ -478,15 +480,48 @@ static int __init pcistub_init_devices_late(void)
return 0;
}
-static int pcistub_seize(struct pci_dev *dev)
+static void pcistub_device_id_add_list(struct pcistub_device_id *new,
+ int domain, int bus, unsigned int devfn)
+{
+ struct pcistub_device_id *pci_dev_id;
+ unsigned long flags;
+ int found = 0;
+
+ spin_lock_irqsave(&device_ids_lock, flags);
+
+ list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
+ if (pci_dev_id->domain == domain && pci_dev_id->bus == bus &&
+ pci_dev_id->devfn == devfn) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ new->domain = domain;
+ new->bus = bus;
+ new->devfn = devfn;
+ list_add_tail(&new->slot_list, &pcistub_device_ids);
+ }
+
+ spin_unlock_irqrestore(&device_ids_lock, flags);
+
+ if (found)
+ kfree(new);
+}
+
+static int pcistub_seize(struct pci_dev *dev,
+ struct pcistub_device_id *pci_dev_id)
{
struct pcistub_device *psdev;
unsigned long flags;
int err = 0;
psdev = pcistub_device_alloc(dev);
- if (!psdev)
+ if (!psdev) {
+ kfree(pci_dev_id);
return -ENOMEM;
+ }
spin_lock_irqsave(&pcistub_devices_lock, flags);
@@ -507,8 +542,12 @@ static int pcistub_seize(struct pci_dev *dev)
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
- if (err)
+ if (err) {
+ kfree(pci_dev_id);
pcistub_device_put(psdev);
+ } else if (pci_dev_id)
+ pcistub_device_id_add_list(pci_dev_id, pci_domain_nr(dev->bus),
+ dev->bus->number, dev->devfn);
return err;
}
@@ -517,11 +556,16 @@ static int pcistub_seize(struct pci_dev *dev)
* other functions that take the sysfs lock. */
static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int err = 0;
+ int err = 0, match;
+ struct pcistub_device_id *pci_dev_id = NULL;
dev_dbg(&dev->dev, "probing...\n");
- if (pcistub_match(dev)) {
+ match = pcistub_match(dev);
+
+ if ((dev->driver_override &&
+ !strcmp(dev->driver_override, PCISTUB_DRIVER_NAME)) ||
+ match) {
if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
&& dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
@@ -532,8 +576,16 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto out;
}
+ if (!match) {
+ pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC);
+ if (!pci_dev_id) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+
dev_info(&dev->dev, "seizing device\n");
- err = pcistub_seize(dev);
+ err = pcistub_seize(dev, pci_dev_id);
} else
/* Didn't find the device */
err = -ENODEV;
@@ -945,7 +997,7 @@ static const struct pci_error_handlers xen_pcibk_error_handler = {
static struct pci_driver xen_pcibk_pci_driver = {
/* The name should be xen_pciback, but until the tools are updated
* we will keep it as pciback. */
- .name = "pciback",
+ .name = PCISTUB_DRIVER_NAME,
.id_table = pcistub_ids,
.probe = pcistub_probe,
.remove = pcistub_remove,
@@ -1012,7 +1064,6 @@ static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
static int pcistub_device_id_add(int domain, int bus, int slot, int func)
{
struct pcistub_device_id *pci_dev_id;
- unsigned long flags;
int rc = 0, devfn = PCI_DEVFN(slot, func);
if (slot < 0) {
@@ -1042,16 +1093,10 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
if (!pci_dev_id)
return -ENOMEM;
- pci_dev_id->domain = domain;
- pci_dev_id->bus = bus;
- pci_dev_id->devfn = devfn;
-
pr_debug("wants to seize %04x:%02x:%02x.%d\n",
domain, bus, slot, func);
- spin_lock_irqsave(&device_ids_lock, flags);
- list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
- spin_unlock_irqrestore(&device_ids_lock, flags);
+ pcistub_device_id_add_list(pci_dev_id, domain, bus, devfn);
return 0;
}
diff --git a/fs/Kconfig b/fs/Kconfig
index 3ef62bad8f2b..4bd03a2b0518 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -200,6 +200,9 @@ config HUGETLBFS
config HUGETLB_PAGE
def_bool HUGETLBFS
+config ARCH_HAS_GIGANTIC_PAGE
+ bool
+
source "fs/configfs/Kconfig"
source "fs/efivarfs/Kconfig"
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index c7efddf6e038..4c09d93d9569 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -89,7 +89,7 @@ config BINFMT_SCRIPT
config BINFMT_FLAT
bool "Kernel support for flat binaries"
- depends on !MMU || M68K
+ depends on !MMU || ARM || M68K
depends on !FRV || BROKEN
help
Support uClinux FLAT format binaries.
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 7ef637d7f3a5..1e9d2f84e5b5 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -461,8 +461,8 @@ static void afs_callback_updater(struct work_struct *work)
*/
int __init afs_callback_update_init(void)
{
- afs_callback_update_worker =
- create_singlethread_workqueue("kafs_callbackd");
+ afs_callback_update_worker = alloc_ordered_workqueue("kafs_callbackd",
+ WQ_MEM_RECLAIM);
return afs_callback_update_worker ? 0 : -ENOMEM;
}
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 85737e96ab8b..2037e7a77a37 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -17,19 +17,12 @@
#include "internal.h"
#include "afs_cm.h"
-#if 0
-struct workqueue_struct *afs_cm_workqueue;
-#endif /* 0 */
-
-static int afs_deliver_cb_init_call_back_state(struct afs_call *,
- struct sk_buff *, bool);
-static int afs_deliver_cb_init_call_back_state3(struct afs_call *,
- struct sk_buff *, bool);
-static int afs_deliver_cb_probe(struct afs_call *, struct sk_buff *, bool);
-static int afs_deliver_cb_callback(struct afs_call *, struct sk_buff *, bool);
-static int afs_deliver_cb_probe_uuid(struct afs_call *, struct sk_buff *, bool);
-static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *,
- struct sk_buff *, bool);
+static int afs_deliver_cb_init_call_back_state(struct afs_call *);
+static int afs_deliver_cb_init_call_back_state3(struct afs_call *);
+static int afs_deliver_cb_probe(struct afs_call *);
+static int afs_deliver_cb_callback(struct afs_call *);
+static int afs_deliver_cb_probe_uuid(struct afs_call *);
+static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *);
static void afs_cm_destructor(struct afs_call *);
/*
@@ -134,7 +127,7 @@ static void afs_cm_destructor(struct afs_call *call)
* received. The step number here must match the final number in
* afs_deliver_cb_callback().
*/
- if (call->unmarshall == 6) {
+ if (call->unmarshall == 5) {
ASSERT(call->server && call->count && call->request);
afs_break_callbacks(call->server, call->count, call->request);
}
@@ -168,27 +161,27 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
/*
* deliver request data to a CB.CallBack call
*/
-static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
- bool last)
+static int afs_deliver_cb_callback(struct afs_call *call)
{
+ struct sockaddr_rxrpc srx;
struct afs_callback *cb;
struct afs_server *server;
- struct in_addr addr;
__be32 *bp;
u32 tmp;
int ret, loop;
- _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+ _enter("{%u}", call->unmarshall);
switch (call->unmarshall) {
case 0:
+ rxrpc_kernel_get_peer(afs_socket, call->rxcall, &srx);
call->offset = 0;
call->unmarshall++;
/* extract the FID array and its count in two steps */
case 1:
_debug("extract FID count");
- ret = afs_extract_data(call, skb, last, &call->tmp, 4);
+ ret = afs_extract_data(call, &call->tmp, 4, true);
if (ret < 0)
return ret;
@@ -205,8 +198,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
case 2:
_debug("extract FID array");
- ret = afs_extract_data(call, skb, last, call->buffer,
- call->count * 3 * 4);
+ ret = afs_extract_data(call, call->buffer,
+ call->count * 3 * 4, true);
if (ret < 0)
return ret;
@@ -232,7 +225,7 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
/* extract the callback array and its count in two steps */
case 3:
_debug("extract CB count");
- ret = afs_extract_data(call, skb, last, &call->tmp, 4);
+ ret = afs_extract_data(call, &call->tmp, 4, true);
if (ret < 0)
return ret;
@@ -242,13 +235,11 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
return -EBADMSG;
call->offset = 0;
call->unmarshall++;
- if (tmp == 0)
- goto empty_cb_array;
case 4:
_debug("extract CB array");
- ret = afs_extract_data(call, skb, last, call->request,
- call->count * 3 * 4);
+ ret = afs_extract_data(call, call->buffer,
+ call->count * 3 * 4, false);
if (ret < 0)
return ret;
@@ -261,15 +252,9 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
cb->type = ntohl(*bp++);
}
- empty_cb_array:
call->offset = 0;
call->unmarshall++;
- case 5:
- ret = afs_data_complete(call, skb, last);
- if (ret < 0)
- return ret;
-
/* Record that the message was unmarshalled successfully so
* that the call destructor can know do the callback breaking
* work, even if the final ACK isn't received.
@@ -278,17 +263,15 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
* updated also.
*/
call->unmarshall++;
- case 6:
+ case 5:
break;
}
-
call->state = AFS_CALL_REPLYING;
/* we'll need the file server record as that tells us which set of
* vnodes to operate upon */
- memcpy(&addr, &ip_hdr(skb)->saddr, 4);
- server = afs_find_server(&addr);
+ server = afs_find_server(&srx);
if (!server)
return -ENOTCONN;
call->server = server;
@@ -315,17 +298,17 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
/*
* deliver request data to a CB.InitCallBackState call
*/
-static int afs_deliver_cb_init_call_back_state(struct afs_call *call,
- struct sk_buff *skb,
- bool last)
+static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
{
+ struct sockaddr_rxrpc srx;
struct afs_server *server;
- struct in_addr addr;
int ret;
- _enter(",{%u},%d", skb->len, last);
+ _enter("");
+
+ rxrpc_kernel_get_peer(afs_socket, call->rxcall, &srx);
- ret = afs_data_complete(call, skb, last);
+ ret = afs_extract_data(call, NULL, 0, false);
if (ret < 0)
return ret;
@@ -334,8 +317,7 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call,
/* we'll need the file server record as that tells us which set of
* vnodes to operate upon */
- memcpy(&addr, &ip_hdr(skb)->saddr, 4);
- server = afs_find_server(&addr);
+ server = afs_find_server(&srx);
if (!server)
return -ENOTCONN;
call->server = server;
@@ -348,27 +330,68 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call,
/*
* deliver request data to a CB.InitCallBackState3 call
*/
-static int afs_deliver_cb_init_call_back_state3(struct afs_call *call,
- struct sk_buff *skb,
- bool last)
+static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
{
+ struct sockaddr_rxrpc srx;
struct afs_server *server;
- struct in_addr addr;
+ struct afs_uuid *r;
+ unsigned loop;
+ __be32 *b;
+ int ret;
+
+ _enter("");
+
+ rxrpc_kernel_get_peer(afs_socket, call->rxcall, &srx);
+
+ _enter("{%u}", call->unmarshall);
- _enter(",{%u},%d", skb->len, last);
+ switch (call->unmarshall) {
+ case 0:
+ call->offset = 0;
+ call->buffer = kmalloc(11 * sizeof(__be32), GFP_KERNEL);
+ if (!call->buffer)
+ return -ENOMEM;
+ call->unmarshall++;
- /* There are some arguments that we ignore */
- afs_data_consumed(call, skb);
- if (!last)
- return -EAGAIN;
+ case 1:
+ _debug("extract UUID");
+ ret = afs_extract_data(call, call->buffer,
+ 11 * sizeof(__be32), false);
+ switch (ret) {
+ case 0: break;
+ case -EAGAIN: return 0;
+ default: return ret;
+ }
+
+ _debug("unmarshall UUID");
+ call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
+ if (!call->request)
+ return -ENOMEM;
+
+ b = call->buffer;
+ r = call->request;
+ r->time_low = ntohl(b[0]);
+ r->time_mid = ntohl(b[1]);
+ r->time_hi_and_version = ntohl(b[2]);
+ r->clock_seq_hi_and_reserved = ntohl(b[3]);
+ r->clock_seq_low = ntohl(b[4]);
+
+ for (loop = 0; loop < 6; loop++)
+ r->node[loop] = ntohl(b[loop + 5]);
+
+ call->offset = 0;
+ call->unmarshall++;
+
+ case 2:
+ break;
+ }
/* no unmarshalling required */
call->state = AFS_CALL_REPLYING;
/* we'll need the file server record as that tells us which set of
* vnodes to operate upon */
- memcpy(&addr, &ip_hdr(skb)->saddr, 4);
- server = afs_find_server(&addr);
+ server = afs_find_server(&srx);
if (!server)
return -ENOTCONN;
call->server = server;
@@ -393,14 +416,13 @@ static void SRXAFSCB_Probe(struct work_struct *work)
/*
* deliver request data to a CB.Probe call
*/
-static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb,
- bool last)
+static int afs_deliver_cb_probe(struct afs_call *call)
{
int ret;
- _enter(",{%u},%d", skb->len, last);
+ _enter("");
- ret = afs_data_complete(call, skb, last);
+ ret = afs_extract_data(call, NULL, 0, false);
if (ret < 0)
return ret;
@@ -426,7 +448,6 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
_enter("");
-
if (memcmp(r, &afs_uuid, sizeof(afs_uuid)) == 0)
reply.match = htonl(0);
else
@@ -439,19 +460,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
/*
* deliver request data to a CB.ProbeUuid call
*/
-static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
- bool last)
+static int afs_deliver_cb_probe_uuid(struct afs_call *call)
{
struct afs_uuid *r;
unsigned loop;
__be32 *b;
int ret;
- _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
-
- ret = afs_data_complete(call, skb, last);
- if (ret < 0)
- return ret;
+ _enter("{%u}", call->unmarshall);
switch (call->unmarshall) {
case 0:
@@ -463,8 +479,8 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
case 1:
_debug("extract UUID");
- ret = afs_extract_data(call, skb, last, call->buffer,
- 11 * sizeof(__be32));
+ ret = afs_extract_data(call, call->buffer,
+ 11 * sizeof(__be32), false);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
@@ -491,16 +507,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
call->unmarshall++;
case 2:
- _debug("trailer");
- if (skb->len != 0)
- return -EBADMSG;
break;
}
- ret = afs_data_complete(call, skb, last);
- if (ret < 0)
- return ret;
-
call->state = AFS_CALL_REPLYING;
INIT_WORK(&call->work, SRXAFSCB_ProbeUuid);
@@ -574,14 +583,13 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
/*
* deliver request data to a CB.TellMeAboutYourself call
*/
-static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call)
{
int ret;
- _enter(",{%u},%d", skb->len, last);
+ _enter("");
- ret = afs_data_complete(call, skb, last);
+ ret = afs_extract_data(call, NULL, 0, false);
if (ret < 0)
return ret;
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index d91a9c9cfbd0..3191dff2c156 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -36,8 +36,8 @@ static int afs_init_lock_manager(void)
if (!afs_lock_manager) {
mutex_lock(&afs_lock_manager_mutex);
if (!afs_lock_manager) {
- afs_lock_manager =
- create_singlethread_workqueue("kafs_lockd");
+ afs_lock_manager = alloc_workqueue("kafs_lockd",
+ WQ_MEM_RECLAIM, 0);
if (!afs_lock_manager)
ret = -ENOMEM;
}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 9312b92e54be..96f4d764d1a6 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -235,16 +235,15 @@ static void xdr_decode_AFSFetchVolumeStatus(const __be32 **_bp,
/*
* deliver reply data to an FS.FetchStatus
*/
-static int afs_deliver_fs_fetch_status(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_fetch_status(struct afs_call *call)
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
- _enter(",,%u", last);
+ _enter("");
- ret = afs_transfer_reply(call, skb, last);
+ ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
@@ -307,8 +306,7 @@ int afs_fs_fetch_file_status(struct afs_server *server,
/*
* deliver reply data to an FS.FetchData
*/
-static int afs_deliver_fs_fetch_data(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_fetch_data(struct afs_call *call)
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
@@ -316,7 +314,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
void *buffer;
int ret;
- _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+ _enter("{%u}", call->unmarshall);
switch (call->unmarshall) {
case 0:
@@ -332,7 +330,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
* client) */
case 1:
_debug("extract data length (MSW)");
- ret = afs_extract_data(call, skb, last, &call->tmp, 4);
+ ret = afs_extract_data(call, &call->tmp, 4, true);
if (ret < 0)
return ret;
@@ -347,7 +345,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
/* extract the returned data length */
case 2:
_debug("extract data length");
- ret = afs_extract_data(call, skb, last, &call->tmp, 4);
+ ret = afs_extract_data(call, &call->tmp, 4, true);
if (ret < 0)
return ret;
@@ -363,10 +361,10 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
_debug("extract data");
if (call->count > 0) {
page = call->reply3;
- buffer = kmap_atomic(page);
- ret = afs_extract_data(call, skb, last, buffer,
- call->count);
- kunmap_atomic(buffer);
+ buffer = kmap(page);
+ ret = afs_extract_data(call, buffer,
+ call->count, true);
+ kunmap(buffer);
if (ret < 0)
return ret;
}
@@ -376,8 +374,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
/* extract the metadata */
case 4:
- ret = afs_extract_data(call, skb, last, call->buffer,
- (21 + 3 + 6) * 4);
+ ret = afs_extract_data(call, call->buffer,
+ (21 + 3 + 6) * 4, false);
if (ret < 0)
return ret;
@@ -391,18 +389,15 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
call->unmarshall++;
case 5:
- ret = afs_data_complete(call, skb, last);
- if (ret < 0)
- return ret;
break;
}
if (call->count < PAGE_SIZE) {
_debug("clear");
page = call->reply3;
- buffer = kmap_atomic(page);
+ buffer = kmap(page);
memset(buffer + call->count, 0, PAGE_SIZE - call->count);
- kunmap_atomic(buffer);
+ kunmap(buffer);
}
_leave(" = 0 [done]");
@@ -515,13 +510,12 @@ int afs_fs_fetch_data(struct afs_server *server,
/*
* deliver reply data to an FS.GiveUpCallBacks
*/
-static int afs_deliver_fs_give_up_callbacks(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_give_up_callbacks(struct afs_call *call)
{
- _enter(",{%u},%d", skb->len, last);
+ _enter("");
/* shouldn't be any reply data */
- return afs_data_complete(call, skb, last);
+ return afs_extract_data(call, NULL, 0, false);
}
/*
@@ -599,16 +593,15 @@ int afs_fs_give_up_callbacks(struct afs_server *server,
/*
* deliver reply data to an FS.CreateFile or an FS.MakeDir
*/
-static int afs_deliver_fs_create_vnode(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_create_vnode(struct afs_call *call)
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
- _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+ _enter("{%u}", call->unmarshall);
- ret = afs_transfer_reply(call, skb, last);
+ ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
@@ -696,16 +689,15 @@ int afs_fs_create(struct afs_server *server,
/*
* deliver reply data to an FS.RemoveFile or FS.RemoveDir
*/
-static int afs_deliver_fs_remove(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_remove(struct afs_call *call)
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
- _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+ _enter("{%u}", call->unmarshall);
- ret = afs_transfer_reply(call, skb, last);
+ ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
@@ -777,16 +769,15 @@ int afs_fs_remove(struct afs_server *server,
/*
* deliver reply data to an FS.Link
*/
-static int afs_deliver_fs_link(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_link(struct afs_call *call)
{
struct afs_vnode *dvnode = call->reply, *vnode = call->reply2;
const __be32 *bp;
int ret;
- _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+ _enter("{%u}", call->unmarshall);
- ret = afs_transfer_reply(call, skb, last);
+ ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
@@ -863,16 +854,15 @@ int afs_fs_link(struct afs_server *server,
/*
* deliver reply data to an FS.Symlink
*/
-static int afs_deliver_fs_symlink(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_symlink(struct afs_call *call)
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
- _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+ _enter("{%u}", call->unmarshall);
- ret = afs_transfer_reply(call, skb, last);
+ ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
@@ -968,16 +958,15 @@ int afs_fs_symlink(struct afs_server *server,
/*
* deliver reply data to an FS.Rename
*/
-static int afs_deliver_fs_rename(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_rename(struct afs_call *call)
{
struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2;
const __be32 *bp;
int ret;
- _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+ _enter("{%u}", call->unmarshall);
- ret = afs_transfer_reply(call, skb, last);
+ ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
@@ -1072,16 +1061,15 @@ int afs_fs_rename(struct afs_server *server,
/*
* deliver reply data to an FS.StoreData
*/
-static int afs_deliver_fs_store_data(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_store_data(struct afs_call *call)
{
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
- _enter(",,%u", last);
+ _enter("");
- ret = afs_transfer_reply(call, skb, last);
+ ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
@@ -1251,17 +1239,16 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
/*
* deliver reply data to an FS.StoreStatus
*/
-static int afs_deliver_fs_store_status(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_store_status(struct afs_call *call)
{
afs_dataversion_t *store_version;
struct afs_vnode *vnode = call->reply;
const __be32 *bp;
int ret;
- _enter(",,%u", last);
+ _enter("");
- ret = afs_transfer_reply(call, skb, last);
+ ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
@@ -1443,14 +1430,13 @@ int afs_fs_setattr(struct afs_server *server, struct key *key,
/*
* deliver reply data to an FS.GetVolumeStatus
*/
-static int afs_deliver_fs_get_volume_status(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_get_volume_status(struct afs_call *call)
{
const __be32 *bp;
char *p;
int ret;
- _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+ _enter("{%u}", call->unmarshall);
switch (call->unmarshall) {
case 0:
@@ -1460,8 +1446,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
/* extract the returned status record */
case 1:
_debug("extract status");
- ret = afs_extract_data(call, skb, last, call->buffer,
- 12 * 4);
+ ret = afs_extract_data(call, call->buffer,
+ 12 * 4, true);
if (ret < 0)
return ret;
@@ -1472,7 +1458,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
/* extract the volume name length */
case 2:
- ret = afs_extract_data(call, skb, last, &call->tmp, 4);
+ ret = afs_extract_data(call, &call->tmp, 4, true);
if (ret < 0)
return ret;
@@ -1487,8 +1473,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
case 3:
_debug("extract volname");
if (call->count > 0) {
- ret = afs_extract_data(call, skb, last, call->reply3,
- call->count);
+ ret = afs_extract_data(call, call->reply3,
+ call->count, true);
if (ret < 0)
return ret;
}
@@ -1508,8 +1494,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
call->count = 4 - (call->count & 3);
case 4:
- ret = afs_extract_data(call, skb, last, call->buffer,
- call->count);
+ ret = afs_extract_data(call, call->buffer,
+ call->count, true);
if (ret < 0)
return ret;
@@ -1519,7 +1505,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
/* extract the offline message length */
case 5:
- ret = afs_extract_data(call, skb, last, &call->tmp, 4);
+ ret = afs_extract_data(call, &call->tmp, 4, true);
if (ret < 0)
return ret;
@@ -1534,8 +1520,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
case 6:
_debug("extract offline");
if (call->count > 0) {
- ret = afs_extract_data(call, skb, last, call->reply3,
- call->count);
+ ret = afs_extract_data(call, call->reply3,
+ call->count, true);
if (ret < 0)
return ret;
}
@@ -1555,8 +1541,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
call->count = 4 - (call->count & 3);
case 7:
- ret = afs_extract_data(call, skb, last, call->buffer,
- call->count);
+ ret = afs_extract_data(call, call->buffer,
+ call->count, true);
if (ret < 0)
return ret;
@@ -1566,7 +1552,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
/* extract the message of the day length */
case 8:
- ret = afs_extract_data(call, skb, last, &call->tmp, 4);
+ ret = afs_extract_data(call, &call->tmp, 4, true);
if (ret < 0)
return ret;
@@ -1581,8 +1567,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
case 9:
_debug("extract motd");
if (call->count > 0) {
- ret = afs_extract_data(call, skb, last, call->reply3,
- call->count);
+ ret = afs_extract_data(call, call->reply3,
+ call->count, true);
if (ret < 0)
return ret;
}
@@ -1595,26 +1581,17 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
call->unmarshall++;
/* extract the message of the day padding */
- if ((call->count & 3) == 0) {
- call->unmarshall++;
- goto no_motd_padding;
- }
- call->count = 4 - (call->count & 3);
+ call->count = (4 - (call->count & 3)) & 3;
case 10:
- ret = afs_extract_data(call, skb, last, call->buffer,
- call->count);
+ ret = afs_extract_data(call, call->buffer,
+ call->count, false);
if (ret < 0)
return ret;
call->offset = 0;
call->unmarshall++;
- no_motd_padding:
-
case 11:
- ret = afs_data_complete(call, skb, last);
- if (ret < 0)
- return ret;
break;
}
@@ -1685,15 +1662,14 @@ int afs_fs_get_volume_status(struct afs_server *server,
/*
* deliver reply data to an FS.SetLock, FS.ExtendLock or FS.ReleaseLock
*/
-static int afs_deliver_fs_xxxx_lock(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_fs_xxxx_lock(struct afs_call *call)
{
const __be32 *bp;
int ret;
- _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+ _enter("{%u}", call->unmarshall);
- ret = afs_transfer_reply(call, skb, last);
+ ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index df976b2a7f40..5497c8496055 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -13,13 +13,13 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
-#include <linux/skbuff.h>
#include <linux/rxrpc.h>
#include <linux/key.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/fscache.h>
#include <linux/backing-dev.h>
+#include <net/af_rxrpc.h>
#include "afs.h"
#include "afs_vl.h"
@@ -56,7 +56,7 @@ struct afs_mount_params {
*/
struct afs_wait_mode {
/* RxRPC received message notification */
- void (*rx_wakeup)(struct afs_call *call);
+ rxrpc_notify_rx_t notify_rx;
/* synchronous call waiter and call dispatched notification */
int (*wait)(struct afs_call *call);
@@ -75,10 +75,8 @@ struct afs_call {
const struct afs_call_type *type; /* type of call */
const struct afs_wait_mode *wait_mode; /* completion wait mode */
wait_queue_head_t waitq; /* processes awaiting completion */
- void (*async_workfn)(struct afs_call *call); /* asynchronous work function */
struct work_struct async_work; /* asynchronous work processor */
struct work_struct work; /* actual work processor */
- struct sk_buff_head rx_queue; /* received packets */
struct rxrpc_call *rxcall; /* RxRPC call handle */
struct key *key; /* security for this call */
struct afs_server *server; /* server affected by incoming CM call */
@@ -92,6 +90,7 @@ struct afs_call {
void *reply4; /* reply buffer (fourth part) */
pgoff_t first; /* first page in mapping to deal with */
pgoff_t last; /* last page in mapping to deal with */
+ size_t offset; /* offset into received data store */
enum { /* call state */
AFS_CALL_REQUESTING, /* request is being sent for outgoing call */
AFS_CALL_AWAIT_REPLY, /* awaiting reply to outgoing call */
@@ -99,21 +98,18 @@ struct afs_call {
AFS_CALL_AWAIT_REQUEST, /* awaiting request data on incoming call */
AFS_CALL_REPLYING, /* replying to incoming call */
AFS_CALL_AWAIT_ACK, /* awaiting final ACK of incoming call */
- AFS_CALL_COMPLETE, /* successfully completed */
- AFS_CALL_BUSY, /* server was busy */
- AFS_CALL_ABORTED, /* call was aborted */
- AFS_CALL_ERROR, /* call failed due to error */
+ AFS_CALL_COMPLETE, /* Completed or failed */
} state;
int error; /* error code */
+ u32 abort_code; /* Remote abort ID or 0 */
unsigned request_size; /* size of request data */
unsigned reply_max; /* maximum size of reply */
- unsigned reply_size; /* current size of reply */
unsigned first_offset; /* offset into mapping[first] */
unsigned last_to; /* amount of mapping[last] */
- unsigned offset; /* offset into received data store */
unsigned char unmarshall; /* unmarshalling phase */
bool incoming; /* T if incoming call */
bool send_pages; /* T if data from mapping should be sent */
+ bool need_attention; /* T if RxRPC poked us */
u16 service_id; /* RxRPC service ID to call */
__be16 port; /* target UDP port */
__be32 operation_ID; /* operation ID for an incoming call */
@@ -128,8 +124,7 @@ struct afs_call_type {
/* deliver request or reply data to an call
* - returning an error will cause the call to be aborted
*/
- int (*deliver)(struct afs_call *call, struct sk_buff *skb,
- bool last);
+ int (*deliver)(struct afs_call *call);
/* map an abort code to an error number */
int (*abort_to_error)(u32 abort_code);
@@ -607,29 +602,22 @@ extern void afs_proc_cell_remove(struct afs_cell *);
/*
* rxrpc.c
*/
+extern struct socket *afs_socket;
+
extern int afs_open_socket(void);
extern void afs_close_socket(void);
-extern void afs_data_consumed(struct afs_call *, struct sk_buff *);
extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t,
const struct afs_wait_mode *);
extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *,
size_t, size_t);
extern void afs_flat_call_destructor(struct afs_call *);
-extern int afs_transfer_reply(struct afs_call *, struct sk_buff *, bool);
extern void afs_send_empty_reply(struct afs_call *);
extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
-extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *,
- size_t);
+extern int afs_extract_data(struct afs_call *, void *, size_t, bool);
-static inline int afs_data_complete(struct afs_call *call, struct sk_buff *skb,
- bool last)
+static inline int afs_transfer_reply(struct afs_call *call)
{
- if (skb->len > 0)
- return -EBADMSG;
- afs_data_consumed(call, skb);
- if (!last)
- return -EAGAIN;
- return 0;
+ return afs_extract_data(call, call->buffer, call->reply_max, false);
}
/*
@@ -654,7 +642,7 @@ do { \
extern struct afs_server *afs_lookup_server(struct afs_cell *,
const struct in_addr *);
-extern struct afs_server *afs_find_server(const struct in_addr *);
+extern struct afs_server *afs_find_server(const struct sockaddr_rxrpc *);
extern void afs_put_server(struct afs_server *);
extern void __exit afs_purge_servers(void);
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 35de0c04729f..0b187ef3b5b7 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/sched.h>
+#include <linux/random.h>
#include "internal.h"
MODULE_DESCRIPTION("AFS Client File System");
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 14d04c848465..59bdaa7527b6 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -16,34 +16,36 @@
#include "internal.h"
#include "afs_cm.h"
-static struct socket *afs_socket; /* my RxRPC socket */
+struct socket *afs_socket; /* my RxRPC socket */
static struct workqueue_struct *afs_async_calls;
+static struct afs_call *afs_spare_incoming_call;
static atomic_t afs_outstanding_calls;
-static atomic_t afs_outstanding_skbs;
-static void afs_wake_up_call_waiter(struct afs_call *);
+static void afs_free_call(struct afs_call *);
+static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
static int afs_wait_for_call_to_complete(struct afs_call *);
-static void afs_wake_up_async_call(struct afs_call *);
+static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
static int afs_dont_wait_for_call_to_complete(struct afs_call *);
-static void afs_process_async_call(struct afs_call *);
-static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
-static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
+static void afs_process_async_call(struct work_struct *);
+static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
+static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
+static int afs_deliver_cm_op_id(struct afs_call *);
/* synchronous call management */
const struct afs_wait_mode afs_sync_call = {
- .rx_wakeup = afs_wake_up_call_waiter,
+ .notify_rx = afs_wake_up_call_waiter,
.wait = afs_wait_for_call_to_complete,
};
/* asynchronous call management */
const struct afs_wait_mode afs_async_call = {
- .rx_wakeup = afs_wake_up_async_call,
+ .notify_rx = afs_wake_up_async_call,
.wait = afs_dont_wait_for_call_to_complete,
};
/* asynchronous incoming call management */
static const struct afs_wait_mode afs_async_incoming_call = {
- .rx_wakeup = afs_wake_up_async_call,
+ .notify_rx = afs_wake_up_async_call,
};
/* asynchronous incoming call initial processing */
@@ -53,17 +55,9 @@ static const struct afs_call_type afs_RXCMxxxx = {
.abort_to_error = afs_abort_to_error,
};
-static void afs_collect_incoming_call(struct work_struct *);
+static void afs_charge_preallocation(struct work_struct *);
-static struct sk_buff_head afs_incoming_calls;
-static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
-
-static void afs_async_workfn(struct work_struct *work)
-{
- struct afs_call *call = container_of(work, struct afs_call, async_work);
-
- call->async_workfn(call);
-}
+static DECLARE_WORK(afs_charge_preallocation_work, afs_charge_preallocation);
static int afs_wait_atomic_t(atomic_t *p)
{
@@ -83,10 +77,8 @@ int afs_open_socket(void)
_enter("");
- skb_queue_head_init(&afs_incoming_calls);
-
ret = -ENOMEM;
- afs_async_calls = create_singlethread_workqueue("kafsd");
+ afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0);
if (!afs_async_calls)
goto error_0;
@@ -110,13 +102,15 @@ int afs_open_socket(void)
if (ret < 0)
goto error_2;
+ rxrpc_kernel_new_call_notification(socket, afs_rx_new_call,
+ afs_rx_discard_new_call);
+
ret = kernel_listen(socket, INT_MAX);
if (ret < 0)
goto error_2;
- rxrpc_kernel_intercept_rx_messages(socket, afs_rx_interceptor);
-
afs_socket = socket;
+ afs_charge_preallocation(NULL);
_leave(" = 0");
return 0;
@@ -136,52 +130,28 @@ void afs_close_socket(void)
{
_enter("");
+ if (afs_spare_incoming_call) {
+ atomic_inc(&afs_outstanding_calls);
+ afs_free_call(afs_spare_incoming_call);
+ afs_spare_incoming_call = NULL;
+ }
+
+ _debug("outstanding %u", atomic_read(&afs_outstanding_calls));
wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t,
TASK_UNINTERRUPTIBLE);
_debug("no outstanding calls");
+ flush_workqueue(afs_async_calls);
+ kernel_sock_shutdown(afs_socket, SHUT_RDWR);
+ flush_workqueue(afs_async_calls);
sock_release(afs_socket);
_debug("dework");
destroy_workqueue(afs_async_calls);
-
- ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0);
_leave("");
}
/*
- * Note that the data in a socket buffer is now consumed.
- */
-void afs_data_consumed(struct afs_call *call, struct sk_buff *skb)
-{
- if (!skb) {
- _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs));
- dump_stack();
- } else {
- _debug("DLVR %p{%u} [%d]",
- skb, skb->mark, atomic_read(&afs_outstanding_skbs));
- rxrpc_kernel_data_consumed(call->rxcall, skb);
- }
-}
-
-/*
- * free a socket buffer
- */
-static void afs_free_skb(struct sk_buff *skb)
-{
- if (!skb) {
- _debug("FREE NULL [%d]", atomic_read(&afs_outstanding_skbs));
- dump_stack();
- } else {
- _debug("FREE %p{%u} [%d]",
- skb, skb->mark, atomic_read(&afs_outstanding_skbs));
- if (atomic_dec_return(&afs_outstanding_skbs) == -1)
- BUG();
- rxrpc_kernel_free_skb(skb);
- }
-}
-
-/*
* free a call
*/
static void afs_free_call(struct afs_call *call)
@@ -191,7 +161,6 @@ static void afs_free_call(struct afs_call *call)
ASSERTCMP(call->rxcall, ==, NULL);
ASSERT(!work_pending(&call->async_work));
- ASSERT(skb_queue_empty(&call->rx_queue));
ASSERT(call->type->name != NULL);
kfree(call->request);
@@ -207,7 +176,7 @@ static void afs_free_call(struct afs_call *call)
static void afs_end_call_nofree(struct afs_call *call)
{
if (call->rxcall) {
- rxrpc_kernel_end_call(call->rxcall);
+ rxrpc_kernel_end_call(afs_socket, call->rxcall);
call->rxcall = NULL;
}
if (call->type->destructor)
@@ -227,7 +196,7 @@ static void afs_end_call(struct afs_call *call)
* allocate a call with flat request and reply buffers
*/
struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
- size_t request_size, size_t reply_size)
+ size_t request_size, size_t reply_max)
{
struct afs_call *call;
@@ -241,7 +210,7 @@ struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
call->type = type;
call->request_size = request_size;
- call->reply_max = reply_size;
+ call->reply_max = reply_max;
if (request_size) {
call->request = kmalloc(request_size, GFP_NOFS);
@@ -249,14 +218,13 @@ struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
goto nomem_free;
}
- if (reply_size) {
- call->buffer = kmalloc(reply_size, GFP_NOFS);
+ if (reply_max) {
+ call->buffer = kmalloc(reply_max, GFP_NOFS);
if (!call->buffer)
goto nomem_free;
}
init_waitqueue_head(&call->waitq);
- skb_queue_head_init(&call->rx_queue);
return call;
nomem_free:
@@ -325,8 +293,8 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
* returns from sending the request */
if (first + loop >= last)
call->state = AFS_CALL_AWAIT_REPLY;
- ret = rxrpc_kernel_send_data(call->rxcall, msg,
- to - offset);
+ ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
+ msg, to - offset);
kunmap(pages[loop]);
if (ret < 0)
break;
@@ -354,7 +322,6 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
struct msghdr msg;
struct kvec iov[1];
int ret;
- struct sk_buff *skb;
_enter("%x,{%d},", addr->s_addr, ntohs(call->port));
@@ -366,8 +333,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
atomic_read(&afs_outstanding_calls));
call->wait_mode = wait_mode;
- call->async_workfn = afs_process_async_call;
- INIT_WORK(&call->async_work, afs_async_workfn);
+ INIT_WORK(&call->async_work, afs_process_async_call);
memset(&srx, 0, sizeof(srx));
srx.srx_family = AF_RXRPC;
@@ -380,7 +346,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
/* create a call */
rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key,
- (unsigned long) call, gfp);
+ (unsigned long) call, gfp,
+ wait_mode->notify_rx);
call->key = NULL;
if (IS_ERR(rxcall)) {
ret = PTR_ERR(rxcall);
@@ -406,7 +373,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
* request */
if (!call->send_pages)
call->state = AFS_CALL_AWAIT_REPLY;
- ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size);
+ ret = rxrpc_kernel_send_data(afs_socket, rxcall,
+ &msg, call->request_size);
if (ret < 0)
goto error_do_abort;
@@ -421,9 +389,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
return wait_mode->wait(call);
error_do_abort:
- rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
- while ((skb = skb_dequeue(&call->rx_queue)))
- afs_free_skb(skb);
+ rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
error_kill_call:
afs_end_call(call);
_leave(" = %d", ret);
@@ -431,140 +397,77 @@ error_kill_call:
}
/*
- * Handles intercepted messages that were arriving in the socket's Rx queue.
- *
- * Called from the AF_RXRPC call processor in waitqueue process context. For
- * each call, it is guaranteed this will be called in order of packet to be
- * delivered.
- */
-static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID,
- struct sk_buff *skb)
-{
- struct afs_call *call = (struct afs_call *) user_call_ID;
-
- _enter("%p,,%u", call, skb->mark);
-
- _debug("ICPT %p{%u} [%d]",
- skb, skb->mark, atomic_read(&afs_outstanding_skbs));
-
- ASSERTCMP(sk, ==, afs_socket->sk);
- atomic_inc(&afs_outstanding_skbs);
-
- if (!call) {
- /* its an incoming call for our callback service */
- skb_queue_tail(&afs_incoming_calls, skb);
- queue_work(afs_wq, &afs_collect_incoming_call_work);
- } else {
- /* route the messages directly to the appropriate call */
- skb_queue_tail(&call->rx_queue, skb);
- call->wait_mode->rx_wakeup(call);
- }
-
- _leave("");
-}
-
-/*
* deliver messages to a call
*/
static void afs_deliver_to_call(struct afs_call *call)
{
- struct sk_buff *skb;
- bool last;
u32 abort_code;
int ret;
- _enter("");
-
- while ((call->state == AFS_CALL_AWAIT_REPLY ||
- call->state == AFS_CALL_AWAIT_OP_ID ||
- call->state == AFS_CALL_AWAIT_REQUEST ||
- call->state == AFS_CALL_AWAIT_ACK) &&
- (skb = skb_dequeue(&call->rx_queue))) {
- switch (skb->mark) {
- case RXRPC_SKB_MARK_DATA:
- _debug("Rcv DATA");
- last = rxrpc_kernel_is_data_last(skb);
- ret = call->type->deliver(call, skb, last);
- switch (ret) {
- case -EAGAIN:
- if (last) {
- _debug("short data");
- goto unmarshal_error;
- }
- break;
- case 0:
- ASSERT(last);
- if (call->state == AFS_CALL_AWAIT_REPLY)
- call->state = AFS_CALL_COMPLETE;
- break;
- case -ENOTCONN:
- abort_code = RX_CALL_DEAD;
- goto do_abort;
- case -ENOTSUPP:
- abort_code = RX_INVALID_OPERATION;
- goto do_abort;
- default:
- unmarshal_error:
- abort_code = RXGEN_CC_UNMARSHAL;
- if (call->state != AFS_CALL_AWAIT_REPLY)
- abort_code = RXGEN_SS_UNMARSHAL;
- do_abort:
- rxrpc_kernel_abort_call(call->rxcall,
- abort_code);
- call->error = ret;
- call->state = AFS_CALL_ERROR;
- break;
+ _enter("%s", call->type->name);
+
+ while (call->state == AFS_CALL_AWAIT_REPLY ||
+ call->state == AFS_CALL_AWAIT_OP_ID ||
+ call->state == AFS_CALL_AWAIT_REQUEST ||
+ call->state == AFS_CALL_AWAIT_ACK
+ ) {
+ if (call->state == AFS_CALL_AWAIT_ACK) {
+ size_t offset = 0;
+ ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall,
+ NULL, 0, &offset, false,
+ &call->abort_code);
+ if (ret == -EINPROGRESS || ret == -EAGAIN)
+ return;
+ if (ret == 1) {
+ call->state = AFS_CALL_COMPLETE;
+ goto done;
}
- break;
- case RXRPC_SKB_MARK_FINAL_ACK:
- _debug("Rcv ACK");
- call->state = AFS_CALL_COMPLETE;
- break;
- case RXRPC_SKB_MARK_BUSY:
- _debug("Rcv BUSY");
- call->error = -EBUSY;
- call->state = AFS_CALL_BUSY;
- break;
- case RXRPC_SKB_MARK_REMOTE_ABORT:
- abort_code = rxrpc_kernel_get_abort_code(skb);
- call->error = call->type->abort_to_error(abort_code);
- call->state = AFS_CALL_ABORTED;
- _debug("Rcv ABORT %u -> %d", abort_code, call->error);
- break;
- case RXRPC_SKB_MARK_LOCAL_ABORT:
- abort_code = rxrpc_kernel_get_abort_code(skb);
- call->error = call->type->abort_to_error(abort_code);
- call->state = AFS_CALL_ABORTED;
- _debug("Loc ABORT %u -> %d", abort_code, call->error);
- break;
- case RXRPC_SKB_MARK_NET_ERROR:
- call->error = -rxrpc_kernel_get_error_number(skb);
- call->state = AFS_CALL_ERROR;
- _debug("Rcv NET ERROR %d", call->error);
- break;
- case RXRPC_SKB_MARK_LOCAL_ERROR:
- call->error = -rxrpc_kernel_get_error_number(skb);
- call->state = AFS_CALL_ERROR;
- _debug("Rcv LOCAL ERROR %d", call->error);
- break;
- default:
- BUG();
- break;
+ return;
}
- afs_free_skb(skb);
- }
-
- /* make sure the queue is empty if the call is done with (we might have
- * aborted the call early because of an unmarshalling error) */
- if (call->state >= AFS_CALL_COMPLETE) {
- while ((skb = skb_dequeue(&call->rx_queue)))
- afs_free_skb(skb);
- if (call->incoming)
- afs_end_call(call);
+ ret = call->type->deliver(call);
+ switch (ret) {
+ case 0:
+ if (call->state == AFS_CALL_AWAIT_REPLY)
+ call->state = AFS_CALL_COMPLETE;
+ goto done;
+ case -EINPROGRESS:
+ case -EAGAIN:
+ goto out;
+ case -ENOTCONN:
+ abort_code = RX_CALL_DEAD;
+ rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ abort_code, -ret, "KNC");
+ goto do_abort;
+ case -ENOTSUPP:
+ abort_code = RX_INVALID_OPERATION;
+ rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ abort_code, -ret, "KIV");
+ goto do_abort;
+ case -ENODATA:
+ case -EBADMSG:
+ case -EMSGSIZE:
+ default:
+ abort_code = RXGEN_CC_UNMARSHAL;
+ if (call->state != AFS_CALL_AWAIT_REPLY)
+ abort_code = RXGEN_SS_UNMARSHAL;
+ rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ abort_code, EBADMSG, "KUM");
+ goto do_abort;
+ }
}
+done:
+ if (call->state == AFS_CALL_COMPLETE && call->incoming)
+ afs_end_call(call);
+out:
_leave("");
+ return;
+
+do_abort:
+ call->error = ret;
+ call->state = AFS_CALL_COMPLETE;
+ goto done;
}
/*
@@ -572,7 +475,7 @@ static void afs_deliver_to_call(struct afs_call *call)
*/
static int afs_wait_for_call_to_complete(struct afs_call *call)
{
- struct sk_buff *skb;
+ const char *abort_why;
int ret;
DECLARE_WAITQUEUE(myself, current);
@@ -584,15 +487,18 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
set_current_state(TASK_INTERRUPTIBLE);
/* deliver any messages that are in the queue */
- if (!skb_queue_empty(&call->rx_queue)) {
+ if (call->state < AFS_CALL_COMPLETE && call->need_attention) {
+ call->need_attention = false;
__set_current_state(TASK_RUNNING);
afs_deliver_to_call(call);
continue;
}
+ abort_why = "KWC";
ret = call->error;
- if (call->state >= AFS_CALL_COMPLETE)
+ if (call->state == AFS_CALL_COMPLETE)
break;
+ abort_why = "KWI";
ret = -EINTR;
if (signal_pending(current))
break;
@@ -605,9 +511,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
/* kill the call */
if (call->state < AFS_CALL_COMPLETE) {
_debug("call incomplete");
- rxrpc_kernel_abort_call(call->rxcall, RX_CALL_DEAD);
- while ((skb = skb_dequeue(&call->rx_queue)))
- afs_free_skb(skb);
+ rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ RX_CALL_DEAD, -ret, abort_why);
}
_debug("call complete");
@@ -619,17 +524,24 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
/*
* wake up a waiting call
*/
-static void afs_wake_up_call_waiter(struct afs_call *call)
+static void afs_wake_up_call_waiter(struct sock *sk, struct rxrpc_call *rxcall,
+ unsigned long call_user_ID)
{
+ struct afs_call *call = (struct afs_call *)call_user_ID;
+
+ call->need_attention = true;
wake_up(&call->waitq);
}
/*
* wake up an asynchronous call
*/
-static void afs_wake_up_async_call(struct afs_call *call)
+static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
+ unsigned long call_user_ID)
{
- _enter("");
+ struct afs_call *call = (struct afs_call *)call_user_ID;
+
+ call->need_attention = true;
queue_work(afs_async_calls, &call->async_work);
}
@@ -647,8 +559,10 @@ static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
/*
* delete an asynchronous call
*/
-static void afs_delete_async_call(struct afs_call *call)
+static void afs_delete_async_call(struct work_struct *work)
{
+ struct afs_call *call = container_of(work, struct afs_call, async_work);
+
_enter("");
afs_free_call(call);
@@ -658,17 +572,19 @@ static void afs_delete_async_call(struct afs_call *call)
/*
* perform processing on an asynchronous call
- * - on a multiple-thread workqueue this work item may try to run on several
- * CPUs at the same time
*/
-static void afs_process_async_call(struct afs_call *call)
+static void afs_process_async_call(struct work_struct *work)
{
+ struct afs_call *call = container_of(work, struct afs_call, async_work);
+
_enter("");
- if (!skb_queue_empty(&call->rx_queue))
+ if (call->state < AFS_CALL_COMPLETE && call->need_attention) {
+ call->need_attention = false;
afs_deliver_to_call(call);
+ }
- if (call->state >= AFS_CALL_COMPLETE && call->wait_mode) {
+ if (call->state == AFS_CALL_COMPLETE && call->wait_mode) {
if (call->wait_mode->async_complete)
call->wait_mode->async_complete(call->reply,
call->error);
@@ -679,122 +595,93 @@ static void afs_process_async_call(struct afs_call *call)
/* we can't just delete the call because the work item may be
* queued */
- call->async_workfn = afs_delete_async_call;
+ call->async_work.func = afs_delete_async_call;
queue_work(afs_async_calls, &call->async_work);
}
_leave("");
}
-/*
- * Empty a socket buffer into a flat reply buffer.
- */
-int afs_transfer_reply(struct afs_call *call, struct sk_buff *skb, bool last)
+static void afs_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID)
{
- size_t len = skb->len;
-
- if (len > call->reply_max - call->reply_size) {
- _leave(" = -EBADMSG [%zu > %u]",
- len, call->reply_max - call->reply_size);
- return -EBADMSG;
- }
+ struct afs_call *call = (struct afs_call *)user_call_ID;
- if (len > 0) {
- if (skb_copy_bits(skb, 0, call->buffer + call->reply_size,
- len) < 0)
- BUG();
- call->reply_size += len;
- }
-
- afs_data_consumed(call, skb);
- if (!last)
- return -EAGAIN;
-
- if (call->reply_size != call->reply_max) {
- _leave(" = -EBADMSG [%u != %u]",
- call->reply_size, call->reply_max);
- return -EBADMSG;
- }
- return 0;
+ call->rxcall = rxcall;
}
/*
- * accept the backlog of incoming calls
+ * Charge the incoming call preallocation.
*/
-static void afs_collect_incoming_call(struct work_struct *work)
+static void afs_charge_preallocation(struct work_struct *work)
{
- struct rxrpc_call *rxcall;
- struct afs_call *call = NULL;
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&afs_incoming_calls))) {
- _debug("new call");
-
- /* don't need the notification */
- afs_free_skb(skb);
+ struct afs_call *call = afs_spare_incoming_call;
+ for (;;) {
if (!call) {
call = kzalloc(sizeof(struct afs_call), GFP_KERNEL);
- if (!call) {
- rxrpc_kernel_reject_call(afs_socket);
- return;
- }
+ if (!call)
+ break;
- call->async_workfn = afs_process_async_call;
- INIT_WORK(&call->async_work, afs_async_workfn);
+ INIT_WORK(&call->async_work, afs_process_async_call);
call->wait_mode = &afs_async_incoming_call;
call->type = &afs_RXCMxxxx;
init_waitqueue_head(&call->waitq);
- skb_queue_head_init(&call->rx_queue);
call->state = AFS_CALL_AWAIT_OP_ID;
-
- _debug("CALL %p{%s} [%d]",
- call, call->type->name,
- atomic_read(&afs_outstanding_calls));
- atomic_inc(&afs_outstanding_calls);
}
- rxcall = rxrpc_kernel_accept_call(afs_socket,
- (unsigned long) call);
- if (!IS_ERR(rxcall)) {
- call->rxcall = rxcall;
- call = NULL;
- }
+ if (rxrpc_kernel_charge_accept(afs_socket,
+ afs_wake_up_async_call,
+ afs_rx_attach,
+ (unsigned long)call,
+ GFP_KERNEL) < 0)
+ break;
+ call = NULL;
}
+ afs_spare_incoming_call = call;
+}
+
+/*
+ * Discard a preallocated call when a socket is shut down.
+ */
+static void afs_rx_discard_new_call(struct rxrpc_call *rxcall,
+ unsigned long user_call_ID)
+{
+ struct afs_call *call = (struct afs_call *)user_call_ID;
- if (call)
- afs_free_call(call);
+ atomic_inc(&afs_outstanding_calls);
+ call->rxcall = NULL;
+ afs_free_call(call);
+}
+
+/*
+ * Notification of an incoming call.
+ */
+static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
+ unsigned long user_call_ID)
+{
+ atomic_inc(&afs_outstanding_calls);
+ queue_work(afs_wq, &afs_charge_preallocation_work);
}
/*
* Grab the operation ID from an incoming cache manager call. The socket
* buffer is discarded on error or if we don't yet have sufficient data.
*/
-static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
- bool last)
+static int afs_deliver_cm_op_id(struct afs_call *call)
{
- size_t len = skb->len;
- void *oibuf = (void *) &call->operation_ID;
+ int ret;
- _enter("{%u},{%zu},%d", call->offset, len, last);
+ _enter("{%zu}", call->offset);
ASSERTCMP(call->offset, <, 4);
/* the operation ID forms the first four bytes of the request data */
- len = min_t(size_t, len, 4 - call->offset);
- if (skb_copy_bits(skb, 0, oibuf + call->offset, len) < 0)
- BUG();
- if (!pskb_pull(skb, len))
- BUG();
- call->offset += len;
-
- if (call->offset < 4) {
- afs_data_consumed(call, skb);
- _leave(" = -EAGAIN");
- return -EAGAIN;
- }
+ ret = afs_extract_data(call, &call->operation_ID, 4, true);
+ if (ret < 0)
+ return ret;
call->state = AFS_CALL_AWAIT_REQUEST;
+ call->offset = 0;
/* ask the cache manager to route the call (it'll change the call type
* if successful) */
@@ -803,7 +690,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
/* pass responsibility for the remainer of this message off to the
* cache manager op */
- return call->type->deliver(call, skb, last);
+ return call->type->deliver(call);
}
/*
@@ -823,14 +710,15 @@ void afs_send_empty_reply(struct afs_call *call)
msg.msg_flags = 0;
call->state = AFS_CALL_AWAIT_ACK;
- switch (rxrpc_kernel_send_data(call->rxcall, &msg, 0)) {
+ switch (rxrpc_kernel_send_data(afs_socket, call->rxcall, &msg, 0)) {
case 0:
_leave(" [replied]");
return;
case -ENOMEM:
_debug("oom");
- rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
+ rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ RX_USER_ABORT, ENOMEM, "KOO");
default:
afs_end_call(call);
_leave(" [error]");
@@ -859,7 +747,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
msg.msg_flags = 0;
call->state = AFS_CALL_AWAIT_ACK;
- n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
+ n = rxrpc_kernel_send_data(afs_socket, call->rxcall, &msg, len);
if (n >= 0) {
/* Success */
_leave(" [replied]");
@@ -868,7 +756,8 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
if (n == -ENOMEM) {
_debug("oom");
- rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
+ rxrpc_kernel_abort_call(afs_socket, call->rxcall,
+ RX_USER_ABORT, ENOMEM, "KOO");
}
afs_end_call(call);
_leave(" [error]");
@@ -877,25 +766,40 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
/*
* Extract a piece of data from the received data socket buffers.
*/
-int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
- bool last, void *buf, size_t count)
+int afs_extract_data(struct afs_call *call, void *buf, size_t count,
+ bool want_more)
{
- size_t len = skb->len;
+ int ret;
- _enter("{%u},{%zu},%d,,%zu", call->offset, len, last, count);
+ _enter("{%s,%zu},,%zu,%d",
+ call->type->name, call->offset, count, want_more);
- ASSERTCMP(call->offset, <, count);
+ ASSERTCMP(call->offset, <=, count);
- len = min_t(size_t, len, count - call->offset);
- if (skb_copy_bits(skb, 0, buf + call->offset, len) < 0 ||
- !pskb_pull(skb, len))
- BUG();
- call->offset += len;
+ ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall,
+ buf, count, &call->offset,
+ want_more, &call->abort_code);
+ if (ret == 0 || ret == -EAGAIN)
+ return ret;
- if (call->offset < count) {
- afs_data_consumed(call, skb);
- _leave(" = -EAGAIN");
- return -EAGAIN;
+ if (ret == 1) {
+ switch (call->state) {
+ case AFS_CALL_AWAIT_REPLY:
+ call->state = AFS_CALL_COMPLETE;
+ break;
+ case AFS_CALL_AWAIT_REQUEST:
+ call->state = AFS_CALL_REPLYING;
+ break;
+ default:
+ break;
+ }
+ return 0;
}
- return 0;
+
+ if (ret == -ECONNABORTED)
+ call->error = call->type->abort_to_error(call->abort_code);
+ else
+ call->error = ret;
+ call->state = AFS_CALL_COMPLETE;
+ return ret;
}
diff --git a/fs/afs/server.c b/fs/afs/server.c
index f342acf3547d..d4066ab7dd55 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -178,13 +178,18 @@ server_in_two_cells:
/*
* look up a server by its IP address
*/
-struct afs_server *afs_find_server(const struct in_addr *_addr)
+struct afs_server *afs_find_server(const struct sockaddr_rxrpc *srx)
{
struct afs_server *server = NULL;
struct rb_node *p;
- struct in_addr addr = *_addr;
+ struct in_addr addr = srx->transport.sin.sin_addr;
- _enter("%pI4", &addr.s_addr);
+ _enter("{%d,%pI4}", srx->transport.family, &addr.s_addr);
+
+ if (srx->transport.family != AF_INET) {
+ WARN(true, "AFS does not yes support non-IPv4 addresses\n");
+ return NULL;
+ }
read_lock(&afs_servers_lock);
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index f94d1abdc3eb..94bcd97d22b8 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -58,17 +58,16 @@ static int afs_vl_abort_to_error(u32 abort_code)
/*
* deliver reply data to a VL.GetEntryByXXX call
*/
-static int afs_deliver_vl_get_entry_by_xxx(struct afs_call *call,
- struct sk_buff *skb, bool last)
+static int afs_deliver_vl_get_entry_by_xxx(struct afs_call *call)
{
struct afs_cache_vlocation *entry;
__be32 *bp;
u32 tmp;
int loop, ret;
- _enter(",,%u", last);
+ _enter("");
- ret = afs_transfer_reply(call, skb, last);
+ ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 52976785a32c..45a86396fd2d 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -594,8 +594,8 @@ static void afs_vlocation_reaper(struct work_struct *work)
*/
int __init afs_vlocation_update_init(void)
{
- afs_vlocation_update_worker =
- create_singlethread_workqueue("kafs_vlupdated");
+ afs_vlocation_update_worker = alloc_workqueue("kafs_vlupdated",
+ WQ_MEM_RECLAIM, 0);
return afs_vlocation_update_worker ? 0 : -ENOMEM;
}
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 431fd7ee3488..e44271dfceb6 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -431,8 +431,8 @@ int autofs4_wait(struct autofs_sb_info *sbi,
memcpy(&wq->name, &qstr, sizeof(struct qstr));
wq->dev = autofs4_get_dev(sbi);
wq->ino = autofs4_get_ino(sbi);
- wq->uid = current_uid();
- wq->gid = current_gid();
+ wq->uid = current_real_cred()->uid;
+ wq->gid = current_real_cred()->gid;
wq->pid = pid;
wq->tgid = tgid;
wq->status = -EINTR; /* Status return if interrupted */
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 7da05b159ade..bfe9f9994935 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -789,7 +789,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
* Will be set to real fs blocksize later.
*
* Linux 2.4.10 and later refuse to read blocks smaller than
- * the hardsect size for the device. But we also need to read at
+ * the logical block size for the device. But we also need to read at
* least 1k to get the second 512 bytes of the volume.
* -WD 10-26-01
*/
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 08ae99343d92..376e4e426324 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -180,9 +180,6 @@ blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
struct file *file = iocb->ki_filp;
struct inode *inode = bdev_file_inode(file);
- if (IS_DAX(inode))
- return dax_do_io(iocb, inode, iter, blkdev_get_block,
- NULL, DIO_SKIP_DIO_COUNT);
return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter,
blkdev_get_block, NULL, NULL,
DIO_SKIP_DIO_COUNT);
@@ -302,14 +299,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
error = sb->s_op->thaw_super(sb);
else
error = thaw_super(sb);
- if (error) {
+ if (error)
bdev->bd_fsfreeze_count++;
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return error;
- }
out:
mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return 0;
+ return error;
}
EXPORT_SYMBOL(thaw_bdev);
@@ -1275,7 +1269,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
- bdev->bd_inode->i_flags = 0;
if (!partno) {
ret = -ENXIO;
@@ -1303,11 +1296,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
}
}
- if (!ret) {
+ if (!ret)
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
- if (!bdev_dax_capable(bdev))
- bdev->bd_inode->i_flags &= ~S_DAX;
- }
/*
* If the device is invalidated, rescan partition
@@ -1342,8 +1332,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
goto out_clear;
}
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
- if (!bdev_dax_capable(bdev))
- bdev->bd_inode->i_flags &= ~S_DAX;
}
} else {
if (bdev->bd_contains == bdev) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e6811c42e41e..ca01106795ea 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8412,7 +8412,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
if (!bio)
return -ENOMEM;
- bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf);
+ bio_set_op_attrs(bio, bio_op(orig_bio), bio_flags(orig_bio));
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
@@ -8450,7 +8450,8 @@ next_block:
start_sector, GFP_NOFS);
if (!bio)
goto out_err;
- bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf);
+ bio_set_op_attrs(bio, bio_op(orig_bio),
+ bio_flags(orig_bio));
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
diff --git a/fs/coda/file.c b/fs/coda/file.c
index f47c7483863b..8415d4f8d1a1 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -38,27 +38,6 @@ coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
}
static ssize_t
-coda_file_splice_read(struct file *coda_file, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t count,
- unsigned int flags)
-{
- ssize_t (*splice_read)(struct file *, loff_t *,
- struct pipe_inode_info *, size_t, unsigned int);
- struct coda_file_info *cfi;
- struct file *host_file;
-
- cfi = CODA_FTOC(coda_file);
- BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
- host_file = cfi->cfi_container;
-
- splice_read = host_file->f_op->splice_read;
- if (!splice_read)
- splice_read = default_file_splice_read;
-
- return splice_read(host_file, ppos, pipe, count, flags);
-}
-
-static ssize_t
coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *coda_file = iocb->ki_filp;
@@ -225,6 +204,6 @@ const struct file_operations coda_file_operations = {
.open = coda_open,
.release = coda_release,
.fsync = coda_fsync,
- .splice_read = coda_file_splice_read,
+ .splice_read = generic_file_splice_read,
};
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index c502c116924c..61057b7dbddb 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -28,7 +28,6 @@
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/fscrypto.h>
-#include <linux/ecryptfs.h>
static unsigned int num_prealloc_crypto_pages = 32;
static unsigned int num_prealloc_crypto_ctxs = 128;
@@ -128,11 +127,11 @@ struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
EXPORT_SYMBOL(fscrypt_get_ctx);
/**
- * fscrypt_complete() - The completion callback for page encryption
- * @req: The asynchronous encryption request context
- * @res: The result of the encryption operation
+ * page_crypt_complete() - completion callback for page crypto
+ * @req: The asynchronous cipher request context
+ * @res: The result of the cipher operation
*/
-static void fscrypt_complete(struct crypto_async_request *req, int res)
+static void page_crypt_complete(struct crypto_async_request *req, int res)
{
struct fscrypt_completion_result *ecr = req->data;
@@ -170,7 +169,7 @@ static int do_page_crypto(struct inode *inode,
skcipher_request_set_callback(
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- fscrypt_complete, &ecr);
+ page_crypt_complete, &ecr);
BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index));
memcpy(xts_tweak, &index, sizeof(index));
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 5d6d49113efa..9a28133ac3b8 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -10,21 +10,16 @@
* This has not yet undergone a rigorous security audit.
*/
-#include <keys/encrypted-type.h>
-#include <keys/user-type.h>
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <linux/fscrypto.h>
-static u32 size_round_up(size_t size, size_t blksize)
-{
- return ((size + blksize - 1) / blksize) * blksize;
-}
-
/**
- * dir_crypt_complete() -
+ * fname_crypt_complete() - completion callback for filename crypto
+ * @req: The asynchronous cipher request context
+ * @res: The result of the cipher operation
*/
-static void dir_crypt_complete(struct crypto_async_request *req, int res)
+static void fname_crypt_complete(struct crypto_async_request *req, int res)
{
struct fscrypt_completion_result *ecr = req->data;
@@ -35,11 +30,11 @@ static void dir_crypt_complete(struct crypto_async_request *req, int res)
}
/**
- * fname_encrypt() -
+ * fname_encrypt() - encrypt a filename
*
- * This function encrypts the input filename, and returns the length of the
- * ciphertext. Errors are returned as negative numbers. We trust the caller to
- * allocate sufficient memory to oname string.
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
*/
static int fname_encrypt(struct inode *inode,
const struct qstr *iname, struct fscrypt_str *oname)
@@ -60,10 +55,9 @@ static int fname_encrypt(struct inode *inode,
if (iname->len <= 0 || iname->len > lim)
return -EIO;
- ciphertext_len = (iname->len < FS_CRYPTO_BLOCK_SIZE) ?
- FS_CRYPTO_BLOCK_SIZE : iname->len;
- ciphertext_len = size_round_up(ciphertext_len, padding);
- ciphertext_len = (ciphertext_len > lim) ? lim : ciphertext_len;
+ ciphertext_len = max(iname->len, (u32)FS_CRYPTO_BLOCK_SIZE);
+ ciphertext_len = round_up(ciphertext_len, padding);
+ ciphertext_len = min(ciphertext_len, lim);
if (ciphertext_len <= sizeof(buf)) {
workbuf = buf;
@@ -84,7 +78,7 @@ static int fname_encrypt(struct inode *inode,
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- dir_crypt_complete, &ecr);
+ fname_crypt_complete, &ecr);
/* Copy the input */
memcpy(workbuf, iname->name, iname->len);
@@ -105,20 +99,22 @@ static int fname_encrypt(struct inode *inode,
}
kfree(alloc_buf);
skcipher_request_free(req);
- if (res < 0)
+ if (res < 0) {
printk_ratelimited(KERN_ERR
"%s: Error (error code %d)\n", __func__, res);
+ return res;
+ }
oname->len = ciphertext_len;
- return res;
+ return 0;
}
-/*
- * fname_decrypt()
- * This function decrypts the input filename, and returns
- * the length of the plaintext.
- * Errors are returned as negative numbers.
- * We trust the caller to allocate sufficient memory to oname string.
+/**
+ * fname_decrypt() - decrypt a filename
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
*/
static int fname_decrypt(struct inode *inode,
const struct fscrypt_str *iname,
@@ -146,7 +142,7 @@ static int fname_decrypt(struct inode *inode,
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- dir_crypt_complete, &ecr);
+ fname_crypt_complete, &ecr);
/* Initialize IV */
memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
@@ -168,7 +164,7 @@ static int fname_decrypt(struct inode *inode,
}
oname->len = strnlen(oname->name, iname->len);
- return oname->len;
+ return 0;
}
static const char *lookup_table =
@@ -231,9 +227,8 @@ u32 fscrypt_fname_encrypted_size(struct inode *inode, u32 ilen)
if (ci)
padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
- if (ilen < FS_CRYPTO_BLOCK_SIZE)
- ilen = FS_CRYPTO_BLOCK_SIZE;
- return size_round_up(ilen, padding);
+ ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE);
+ return round_up(ilen, padding);
}
EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
@@ -279,6 +274,10 @@ EXPORT_SYMBOL(fscrypt_fname_free_buffer);
/**
* fscrypt_fname_disk_to_usr() - converts a filename from disk space to user
* space
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
*/
int fscrypt_fname_disk_to_usr(struct inode *inode,
u32 hash, u32 minor_hash,
@@ -287,13 +286,12 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
{
const struct qstr qname = FSTR_TO_QSTR(iname);
char buf[24];
- int ret;
if (fscrypt_is_dot_dotdot(&qname)) {
oname->name[0] = '.';
oname->name[iname->len - 1] = '.';
oname->len = iname->len;
- return oname->len;
+ return 0;
}
if (iname->len < FS_CRYPTO_BLOCK_SIZE)
@@ -303,9 +301,9 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
return fname_decrypt(inode, iname, oname);
if (iname->len <= FS_FNAME_CRYPTO_DIGEST_SIZE) {
- ret = digest_encode(iname->name, iname->len, oname->name);
- oname->len = ret;
- return ret;
+ oname->len = digest_encode(iname->name, iname->len,
+ oname->name);
+ return 0;
}
if (hash) {
memcpy(buf, &hash, 4);
@@ -315,15 +313,18 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
}
memcpy(buf + 8, iname->name + iname->len - 16, 16);
oname->name[0] = '_';
- ret = digest_encode(buf, 24, oname->name + 1);
- oname->len = ret + 1;
- return ret + 1;
+ oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
+ return 0;
}
EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
/**
* fscrypt_fname_usr_to_disk() - converts a filename from user space to disk
* space
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
*/
int fscrypt_fname_usr_to_disk(struct inode *inode,
const struct qstr *iname,
@@ -333,7 +334,7 @@ int fscrypt_fname_usr_to_disk(struct inode *inode,
oname->name[0] = '.';
oname->name[iname->len - 1] = '.';
oname->len = iname->len;
- return oname->len;
+ return 0;
}
if (inode->i_crypt_info)
return fname_encrypt(inode, iname, oname);
@@ -367,10 +368,10 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
if (dir->i_crypt_info) {
ret = fscrypt_fname_alloc_buffer(dir, iname->len,
&fname->crypto_buf);
- if (ret < 0)
+ if (ret)
return ret;
ret = fname_encrypt(dir, iname, &fname->crypto_buf);
- if (ret < 0)
+ if (ret)
goto errout;
fname->disk_name.name = fname->crypto_buf.name;
fname->disk_name.len = fname->crypto_buf.len;
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 1ac263eddc4e..82f0285f5d08 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -8,11 +8,8 @@
* Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
*/
-#include <keys/encrypted-type.h>
#include <keys/user-type.h>
-#include <linux/random.h>
#include <linux/scatterlist.h>
-#include <uapi/linux/keyctl.h>
#include <linux/fscrypto.h>
static void derive_crypt_complete(struct crypto_async_request *req, int rc)
@@ -139,6 +136,38 @@ out:
return res;
}
+static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
+ const char **cipher_str_ret, int *keysize_ret)
+{
+ if (S_ISREG(inode->i_mode)) {
+ if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
+ *cipher_str_ret = "xts(aes)";
+ *keysize_ret = FS_AES_256_XTS_KEY_SIZE;
+ return 0;
+ }
+ pr_warn_once("fscrypto: unsupported contents encryption mode "
+ "%d for inode %lu\n",
+ ci->ci_data_mode, inode->i_ino);
+ return -ENOKEY;
+ }
+
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+ if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
+ *cipher_str_ret = "cts(cbc(aes))";
+ *keysize_ret = FS_AES_256_CTS_KEY_SIZE;
+ return 0;
+ }
+ pr_warn_once("fscrypto: unsupported filenames encryption mode "
+ "%d for inode %lu\n",
+ ci->ci_filename_mode, inode->i_ino);
+ return -ENOKEY;
+ }
+
+ pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n",
+ (inode->i_mode & S_IFMT), inode->i_ino);
+ return -ENOKEY;
+}
+
static void put_crypt_info(struct fscrypt_info *ci)
{
if (!ci)
@@ -155,8 +184,8 @@ int get_crypt_info(struct inode *inode)
struct fscrypt_context ctx;
struct crypto_skcipher *ctfm;
const char *cipher_str;
+ int keysize;
u8 raw_key[FS_MAX_KEY_SIZE];
- u8 mode;
int res;
res = fscrypt_initialize();
@@ -179,13 +208,19 @@ retry:
if (res < 0) {
if (!fscrypt_dummy_context_enabled(inode))
return res;
+ ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
ctx.flags = 0;
} else if (res != sizeof(ctx)) {
return -EINVAL;
}
- res = 0;
+
+ if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
+ return -EINVAL;
+
+ if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
+ return -EINVAL;
crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
if (!crypt_info)
@@ -198,27 +233,11 @@ retry:
crypt_info->ci_keyring_key = NULL;
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
sizeof(crypt_info->ci_master_key));
- if (S_ISREG(inode->i_mode))
- mode = crypt_info->ci_data_mode;
- else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
- mode = crypt_info->ci_filename_mode;
- else
- BUG();
-
- switch (mode) {
- case FS_ENCRYPTION_MODE_AES_256_XTS:
- cipher_str = "xts(aes)";
- break;
- case FS_ENCRYPTION_MODE_AES_256_CTS:
- cipher_str = "cts(cbc(aes))";
- break;
- default:
- printk_once(KERN_WARNING
- "%s: unsupported key mode %d (ino %u)\n",
- __func__, mode, (unsigned) inode->i_ino);
- res = -ENOKEY;
+
+ res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
+ if (res)
goto out;
- }
+
if (fscrypt_dummy_context_enabled(inode)) {
memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
goto got_key;
@@ -253,7 +272,7 @@ got_key:
crypt_info->ci_ctfm = ctfm;
crypto_skcipher_clear_flags(ctfm, ~0);
crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
- res = crypto_skcipher_setkey(ctfm, raw_key, fscrypt_key_size(mode));
+ res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
if (res)
goto out;
diff --git a/fs/dax.c b/fs/dax.c
index 993dc6fe0416..014defd2e744 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -31,6 +31,8 @@
#include <linux/vmstat.h>
#include <linux/pfn_t.h>
#include <linux/sizes.h>
+#include <linux/iomap.h>
+#include "internal.h"
/*
* We use lowest available bit in exceptional entry for locking, other two
@@ -580,14 +582,13 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
return VM_FAULT_LOCKED;
}
-static int copy_user_bh(struct page *to, struct inode *inode,
- struct buffer_head *bh, unsigned long vaddr)
+static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
+ struct page *to, unsigned long vaddr)
{
struct blk_dax_ctl dax = {
- .sector = to_sector(bh, inode),
- .size = bh->b_size,
+ .sector = sector,
+ .size = size,
};
- struct block_device *bdev = bh->b_bdev;
void *vto;
if (dax_map_atomic(bdev, &dax) < 0)
@@ -790,14 +791,13 @@ int dax_writeback_mapping_range(struct address_space *mapping,
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
static int dax_insert_mapping(struct address_space *mapping,
- struct buffer_head *bh, void **entryp,
- struct vm_area_struct *vma, struct vm_fault *vmf)
+ struct block_device *bdev, sector_t sector, size_t size,
+ void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
{
unsigned long vaddr = (unsigned long)vmf->virtual_address;
- struct block_device *bdev = bh->b_bdev;
struct blk_dax_ctl dax = {
- .sector = to_sector(bh, mapping->host),
- .size = bh->b_size,
+ .sector = sector,
+ .size = size,
};
void *ret;
void *entry = *entryp;
@@ -868,7 +868,8 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
if (vmf->cow_page) {
struct page *new_page = vmf->cow_page;
if (buffer_written(&bh))
- error = copy_user_bh(new_page, inode, &bh, vaddr);
+ error = copy_user_dax(bh.b_bdev, to_sector(&bh, inode),
+ bh.b_size, new_page, vaddr);
else
clear_user_highpage(new_page, vaddr);
if (error)
@@ -898,7 +899,8 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
/* Filesystem should not return unwritten buffers to us! */
WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
- error = dax_insert_mapping(mapping, &bh, &entry, vma, vmf);
+ error = dax_insert_mapping(mapping, bh.b_bdev, to_sector(&bh, inode),
+ bh.b_size, &entry, vma, vmf);
unlock_entry:
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
out:
@@ -1034,7 +1036,7 @@ int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
if (!write && !buffer_mapped(&bh)) {
spinlock_t *ptl;
pmd_t entry;
- struct page *zero_page = get_huge_zero_page();
+ struct page *zero_page = mm_get_huge_zero_page(vma->vm_mm);
if (unlikely(!zero_page)) {
dax_pmd_dbg(&bh, address, "no zero page");
@@ -1241,3 +1243,229 @@ int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
return dax_zero_page_range(inode, from, length, get_block);
}
EXPORT_SYMBOL_GPL(dax_truncate_page);
+
+#ifdef CONFIG_FS_IOMAP
+static loff_t
+iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap)
+{
+ struct iov_iter *iter = data;
+ loff_t end = pos + length, done = 0;
+ ssize_t ret = 0;
+
+ if (iov_iter_rw(iter) == READ) {
+ end = min(end, i_size_read(inode));
+ if (pos >= end)
+ return 0;
+
+ if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
+ return iov_iter_zero(min(length, end - pos), iter);
+ }
+
+ if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
+ return -EIO;
+
+ while (pos < end) {
+ unsigned offset = pos & (PAGE_SIZE - 1);
+ struct blk_dax_ctl dax = { 0 };
+ ssize_t map_len;
+
+ dax.sector = iomap->blkno +
+ (((pos & PAGE_MASK) - iomap->offset) >> 9);
+ dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
+ map_len = dax_map_atomic(iomap->bdev, &dax);
+ if (map_len < 0) {
+ ret = map_len;
+ break;
+ }
+
+ dax.addr += offset;
+ map_len -= offset;
+ if (map_len > end - pos)
+ map_len = end - pos;
+
+ if (iov_iter_rw(iter) == WRITE)
+ map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
+ else
+ map_len = copy_to_iter(dax.addr, map_len, iter);
+ dax_unmap_atomic(iomap->bdev, &dax);
+ if (map_len <= 0) {
+ ret = map_len ? map_len : -EFAULT;
+ break;
+ }
+
+ pos += map_len;
+ length -= map_len;
+ done += map_len;
+ }
+
+ return done ? done : ret;
+}
+
+/**
+ * iomap_dax_rw - Perform I/O to a DAX file
+ * @iocb: The control block for this I/O
+ * @iter: The addresses to do I/O from or to
+ * @ops: iomap ops passed from the file system
+ *
+ * This function performs read and write operations to directly mapped
+ * persistent memory. The callers needs to take care of read/write exclusion
+ * and evicting any page cache pages in the region under I/O.
+ */
+ssize_t
+iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
+ struct iomap_ops *ops)
+{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ struct inode *inode = mapping->host;
+ loff_t pos = iocb->ki_pos, ret = 0, done = 0;
+ unsigned flags = 0;
+
+ if (iov_iter_rw(iter) == WRITE)
+ flags |= IOMAP_WRITE;
+
+ /*
+ * Yes, even DAX files can have page cache attached to them: A zeroed
+ * page is inserted into the pagecache when we have to serve a write
+ * fault on a hole. It should never be dirtied and can simply be
+ * dropped from the pagecache once we get real data for the page.
+ *
+ * XXX: This is racy against mmap, and there's nothing we can do about
+ * it. We'll eventually need to shift this down even further so that
+ * we can check if we allocated blocks over a hole first.
+ */
+ if (mapping->nrpages) {
+ ret = invalidate_inode_pages2_range(mapping,
+ pos >> PAGE_SHIFT,
+ (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
+ WARN_ON_ONCE(ret);
+ }
+
+ while (iov_iter_count(iter)) {
+ ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
+ iter, iomap_dax_actor);
+ if (ret <= 0)
+ break;
+ pos += ret;
+ done += ret;
+ }
+
+ iocb->ki_pos += done;
+ return done ? done : ret;
+}
+EXPORT_SYMBOL_GPL(iomap_dax_rw);
+
+/**
+ * iomap_dax_fault - handle a page fault on a DAX file
+ * @vma: The virtual memory area where the fault occurred
+ * @vmf: The description of the fault
+ * @ops: iomap ops passed from the file system
+ *
+ * When a page fault occurs, filesystems may call this helper in their fault
+ * or mkwrite handler for DAX files. Assumes the caller has done all the
+ * necessary locking for the page fault to proceed successfully.
+ */
+int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+ struct iomap_ops *ops)
+{
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ struct inode *inode = mapping->host;
+ unsigned long vaddr = (unsigned long)vmf->virtual_address;
+ loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
+ sector_t sector;
+ struct iomap iomap = { 0 };
+ unsigned flags = 0;
+ int error, major = 0;
+ void *entry;
+
+ /*
+ * Check whether offset isn't beyond end of file now. Caller is supposed
+ * to hold locks serializing us with truncate / punch hole so this is
+ * a reliable test.
+ */
+ if (pos >= i_size_read(inode))
+ return VM_FAULT_SIGBUS;
+
+ entry = grab_mapping_entry(mapping, vmf->pgoff);
+ if (IS_ERR(entry)) {
+ error = PTR_ERR(entry);
+ goto out;
+ }
+
+ if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
+ flags |= IOMAP_WRITE;
+
+ /*
+ * Note that we don't bother to use iomap_apply here: DAX required
+ * the file system block size to be equal the page size, which means
+ * that we never have to deal with more than a single extent here.
+ */
+ error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
+ if (error)
+ goto unlock_entry;
+ if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
+ error = -EIO; /* fs corruption? */
+ goto unlock_entry;
+ }
+
+ sector = iomap.blkno + (((pos & PAGE_MASK) - iomap.offset) >> 9);
+
+ if (vmf->cow_page) {
+ switch (iomap.type) {
+ case IOMAP_HOLE:
+ case IOMAP_UNWRITTEN:
+ clear_user_highpage(vmf->cow_page, vaddr);
+ break;
+ case IOMAP_MAPPED:
+ error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
+ vmf->cow_page, vaddr);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ error = -EIO;
+ break;
+ }
+
+ if (error)
+ goto unlock_entry;
+ if (!radix_tree_exceptional_entry(entry)) {
+ vmf->page = entry;
+ return VM_FAULT_LOCKED;
+ }
+ vmf->entry = entry;
+ return VM_FAULT_DAX_LOCKED;
+ }
+
+ switch (iomap.type) {
+ case IOMAP_MAPPED:
+ if (iomap.flags & IOMAP_F_NEW) {
+ count_vm_event(PGMAJFAULT);
+ mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+ major = VM_FAULT_MAJOR;
+ }
+ error = dax_insert_mapping(mapping, iomap.bdev, sector,
+ PAGE_SIZE, &entry, vma, vmf);
+ break;
+ case IOMAP_UNWRITTEN:
+ case IOMAP_HOLE:
+ if (!(vmf->flags & FAULT_FLAG_WRITE))
+ return dax_load_hole(mapping, entry, vmf);
+ /*FALLTHRU*/
+ default:
+ WARN_ON_ONCE(1);
+ error = -EIO;
+ break;
+ }
+
+ unlock_entry:
+ put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+ out:
+ if (error == -ENOMEM)
+ return VM_FAULT_OOM | major;
+ /* -EBUSY is fine, somebody else faulted on the same PTE */
+ if (error < 0 && error != -EBUSY)
+ return VM_FAULT_SIGBUS | major;
+ return VM_FAULT_NOPAGE | major;
+}
+EXPORT_SYMBOL_GPL(iomap_dax_fault);
+#endif /* CONFIG_FS_IOMAP */
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 592059f88e04..354e2ab62031 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -97,9 +97,6 @@ EXPORT_SYMBOL_GPL(debugfs_use_file_finish);
#define F_DENTRY(filp) ((filp)->f_path.dentry)
-#define REAL_FOPS_DEREF(dentry) \
- ((const struct file_operations *)(dentry)->d_fsdata)
-
static int open_proxy_open(struct inode *inode, struct file *filp)
{
const struct dentry *dentry = F_DENTRY(filp);
@@ -112,7 +109,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
goto out;
}
- real_fops = REAL_FOPS_DEREF(dentry);
+ real_fops = debugfs_real_fops(filp);
real_fops = fops_get(real_fops);
if (!real_fops) {
/* Huh? Module did not clean up after itself at exit? */
@@ -143,7 +140,7 @@ static ret_type full_proxy_ ## name(proto) \
{ \
const struct dentry *dentry = F_DENTRY(filp); \
const struct file_operations *real_fops = \
- REAL_FOPS_DEREF(dentry); \
+ debugfs_real_fops(filp); \
int srcu_idx; \
ret_type r; \
\
@@ -176,7 +173,7 @@ static unsigned int full_proxy_poll(struct file *filp,
struct poll_table_struct *wait)
{
const struct dentry *dentry = F_DENTRY(filp);
- const struct file_operations *real_fops = REAL_FOPS_DEREF(dentry);
+ const struct file_operations *real_fops = debugfs_real_fops(filp);
int srcu_idx;
unsigned int r = 0;
@@ -193,7 +190,7 @@ static unsigned int full_proxy_poll(struct file *filp,
static int full_proxy_release(struct inode *inode, struct file *filp)
{
const struct dentry *dentry = F_DENTRY(filp);
- const struct file_operations *real_fops = REAL_FOPS_DEREF(dentry);
+ const struct file_operations *real_fops = debugfs_real_fops(filp);
const struct file_operations *proxy_fops = filp->f_op;
int r = 0;
@@ -209,7 +206,7 @@ static int full_proxy_release(struct inode *inode, struct file *filp)
replace_fops(filp, d_inode(dentry)->i_fop);
kfree((void *)proxy_fops);
fops_put(real_fops);
- return 0;
+ return r;
}
static void __full_proxy_fops_init(struct file_operations *proxy_fops,
@@ -241,7 +238,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
goto out;
}
- real_fops = REAL_FOPS_DEREF(dentry);
+ real_fops = debugfs_real_fops(filp);
real_fops = fops_get(real_fops);
if (!real_fops) {
/* Huh? Module did not cleanup after itself at exit? */
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index bba52634b995..b3e8443a1f47 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -19,8 +19,4 @@ extern const struct file_operations debugfs_noop_file_operations;
extern const struct file_operations debugfs_open_proxy_file_operations;
extern const struct file_operations debugfs_full_proxy_file_operations;
-struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops);
-
#endif /* _DEBUGFS_INTERNAL_H_ */
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 79a5941c2474..442d1a7e671b 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -272,13 +272,8 @@ static int mknod_ptmx(struct super_block *sb)
struct dentry *root = sb->s_root;
struct pts_fs_info *fsi = DEVPTS_SB(sb);
struct pts_mount_opts *opts = &fsi->mount_opts;
- kuid_t root_uid;
- kgid_t root_gid;
-
- root_uid = make_kuid(current_user_ns(), 0);
- root_gid = make_kgid(current_user_ns(), 0);
- if (!uid_valid(root_uid) || !gid_valid(root_gid))
- return -EINVAL;
+ kuid_t ptmx_uid = current_fsuid();
+ kgid_t ptmx_gid = current_fsgid();
inode_lock(d_inode(root));
@@ -309,8 +304,8 @@ static int mknod_ptmx(struct super_block *sb)
mode = S_IFCHR|opts->ptmxmode;
init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2));
- inode->i_uid = root_uid;
- inode->i_gid = root_gid;
+ inode->i_uid = ptmx_uid;
+ inode->i_gid = ptmx_gid;
d_add(dentry, inode);
@@ -336,7 +331,6 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data)
struct pts_fs_info *fsi = DEVPTS_SB(sb);
struct pts_mount_opts *opts = &fsi->mount_opts;
- sync_filesystem(sb);
err = parse_mount_options(data, PARSE_REMOUNT, opts);
/*
@@ -395,6 +389,7 @@ static int
devpts_fill_super(struct super_block *s, void *data, int silent)
{
struct inode *inode;
+ int error;
s->s_iflags &= ~SB_I_NODEV;
s->s_blocksize = 1024;
@@ -403,10 +398,16 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
s->s_op = &devpts_sops;
s->s_time_gran = 1;
+ error = -ENOMEM;
s->s_fs_info = new_pts_fs_info(s);
if (!s->s_fs_info)
goto fail;
+ error = parse_mount_options(data, PARSE_MOUNT, &DEVPTS_SB(s)->mount_opts);
+ if (error)
+ goto fail;
+
+ error = -ENOMEM;
inode = new_inode(s);
if (!inode)
goto fail;
@@ -418,13 +419,21 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
set_nlink(inode, 2);
s->s_root = d_make_root(inode);
- if (s->s_root)
- return 0;
+ if (!s->s_root) {
+ pr_err("get root dentry failed\n");
+ goto fail;
+ }
- pr_err("get root dentry failed\n");
+ error = mknod_ptmx(s);
+ if (error)
+ goto fail_dput;
+ return 0;
+fail_dput:
+ dput(s->s_root);
+ s->s_root = NULL;
fail:
- return -ENOMEM;
+ return error;
}
/*
@@ -436,43 +445,15 @@ fail:
static struct dentry *devpts_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- int error;
- struct pts_mount_opts opts;
- struct super_block *s;
-
- error = parse_mount_options(data, PARSE_MOUNT, &opts);
- if (error)
- return ERR_PTR(error);
-
- s = sget(fs_type, NULL, set_anon_super, flags, NULL);
- if (IS_ERR(s))
- return ERR_CAST(s);
-
- if (!s->s_root) {
- error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
- if (error)
- goto out_undo_sget;
- s->s_flags |= MS_ACTIVE;
- }
-
- memcpy(&(DEVPTS_SB(s))->mount_opts, &opts, sizeof(opts));
-
- error = mknod_ptmx(s);
- if (error)
- goto out_undo_sget;
-
- return dget(s->s_root);
-
-out_undo_sget:
- deactivate_locked_super(s);
- return ERR_PTR(error);
+ return mount_nodev(fs_type, flags, data, devpts_fill_super);
}
static void devpts_kill_sb(struct super_block *sb)
{
struct pts_fs_info *fsi = DEVPTS_SB(sb);
- ida_destroy(&fsi->allocated_ptys);
+ if (fsi)
+ ida_destroy(&fsi->allocated_ptys);
kfree(fsi);
kill_litter_super(sb);
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 7c3ce73cb617..fb9aa16a7727 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -246,6 +246,9 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
if ((dio->op == REQ_OP_READ) &&
((offset + transferred) > dio->i_size))
transferred = dio->i_size - offset;
+ /* ignore EFAULT if some IO has been done */
+ if (unlikely(ret == -EFAULT) && transferred)
+ ret = 0;
}
if (ret == 0)
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig
index c634874e12d9..36bea5adcaba 100644
--- a/fs/ext2/Kconfig
+++ b/fs/ext2/Kconfig
@@ -1,5 +1,6 @@
config EXT2_FS
tristate "Second extended fs support"
+ select FS_IOMAP if FS_DAX
help
Ext2 is a standard Linux file system for hard disks.
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 06af2f92226c..37e2be784ac7 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -814,6 +814,7 @@ extern const struct file_operations ext2_file_operations;
/* inode.c */
extern const struct address_space_operations ext2_aops;
extern const struct address_space_operations ext2_nobh_aops;
+extern struct iomap_ops ext2_iomap_ops;
/* namei.c */
extern const struct inode_operations ext2_dir_inode_operations;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 5efeefe17abb..0ca363d1341c 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -22,11 +22,59 @@
#include <linux/pagemap.h>
#include <linux/dax.h>
#include <linux/quotaops.h>
+#include <linux/iomap.h>
+#include <linux/uio.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
#ifdef CONFIG_FS_DAX
+static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+ ssize_t ret;
+
+ if (!iov_iter_count(to))
+ return 0; /* skip atime */
+
+ inode_lock_shared(inode);
+ ret = iomap_dax_rw(iocb, to, &ext2_iomap_ops);
+ inode_unlock_shared(inode);
+
+ file_accessed(iocb->ki_filp);
+ return ret;
+}
+
+static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ ssize_t ret;
+
+ inode_lock(inode);
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out_unlock;
+ ret = file_remove_privs(file);
+ if (ret)
+ goto out_unlock;
+ ret = file_update_time(file);
+ if (ret)
+ goto out_unlock;
+
+ ret = iomap_dax_rw(iocb, from, &ext2_iomap_ops);
+ if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
+ i_size_write(inode, iocb->ki_pos);
+ mark_inode_dirty(inode);
+ }
+
+out_unlock:
+ inode_unlock(inode);
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
+ return ret;
+}
+
/*
* The lock ordering for ext2 DAX fault paths is:
*
@@ -51,7 +99,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
down_read(&ei->dax_sem);
- ret = dax_fault(vma, vmf, ext2_get_block);
+ ret = iomap_dax_fault(vma, vmf, &ext2_iomap_ops);
up_read(&ei->dax_sem);
if (vmf->flags & FAULT_FLAG_WRITE)
@@ -156,14 +204,28 @@ int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
return ret;
}
-/*
- * We have mostly NULL's here: the current defaults are ok for
- * the ext2 filesystem.
- */
+static ssize_t ext2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+#ifdef CONFIG_FS_DAX
+ if (IS_DAX(iocb->ki_filp->f_mapping->host))
+ return ext2_dax_read_iter(iocb, to);
+#endif
+ return generic_file_read_iter(iocb, to);
+}
+
+static ssize_t ext2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+#ifdef CONFIG_FS_DAX
+ if (IS_DAX(iocb->ki_filp->f_mapping->host))
+ return ext2_dax_write_iter(iocb, from);
+#endif
+ return generic_file_write_iter(iocb, from);
+}
+
const struct file_operations ext2_file_operations = {
.llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
- .write_iter = generic_file_write_iter,
+ .read_iter = ext2_file_read_iter,
+ .write_iter = ext2_file_write_iter,
.unlocked_ioctl = ext2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
@@ -172,6 +234,7 @@ const struct file_operations ext2_file_operations = {
.open = dquot_file_open,
.release = ext2_release_file,
.fsync = ext2_fsync,
+ .get_unmapped_area = thp_get_unmapped_area,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
};
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index efe5fb21c533..04e73a99902b 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -465,6 +465,11 @@ struct inode *ext2_new_inode(struct inode *dir, umode_t mode,
for (i = 0; i < sbi->s_groups_count; i++) {
gdp = ext2_get_group_desc(sb, group, &bh2);
+ if (!gdp) {
+ if (++group == sbi->s_groups_count)
+ group = 0;
+ continue;
+ }
brelse(bitmap_bh);
bitmap_bh = read_inode_bitmap(sb, group);
if (!bitmap_bh) {
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index d5c7d09919f3..1e72d425fd3b 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -32,6 +32,7 @@
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/fiemap.h>
+#include <linux/iomap.h>
#include <linux/namei.h>
#include <linux/uio.h>
#include "ext2.h"
@@ -618,7 +619,7 @@ static void ext2_splice_branch(struct inode *inode,
*/
static int ext2_get_blocks(struct inode *inode,
sector_t iblock, unsigned long maxblocks,
- struct buffer_head *bh_result,
+ u32 *bno, bool *new, bool *boundary,
int create)
{
int err = -EIO;
@@ -644,7 +645,6 @@ static int ext2_get_blocks(struct inode *inode,
/* Simplest case - block found, no allocation needed */
if (!partial) {
first_block = le32_to_cpu(chain[depth - 1].key);
- clear_buffer_new(bh_result); /* What's this do? */
count++;
/*map more blocks*/
while (count < maxblocks && count <= blocks_to_boundary) {
@@ -699,7 +699,6 @@ static int ext2_get_blocks(struct inode *inode,
mutex_unlock(&ei->truncate_mutex);
if (err)
goto cleanup;
- clear_buffer_new(bh_result);
goto got_it;
}
}
@@ -733,6 +732,16 @@ static int ext2_get_blocks(struct inode *inode,
}
if (IS_DAX(inode)) {
+ int i;
+
+ /*
+ * We must unmap blocks before zeroing so that writeback cannot
+ * overwrite zeros with stale data from block device page cache.
+ */
+ for (i = 0; i < count; i++) {
+ unmap_underlying_metadata(inode->i_sb->s_bdev,
+ le32_to_cpu(chain[depth-1].key) + i);
+ }
/*
* block must be initialised before we put it in the tree
* so that it's not found by another thread before it's
@@ -745,15 +754,16 @@ static int ext2_get_blocks(struct inode *inode,
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
}
- } else
- set_buffer_new(bh_result);
+ } else {
+ *new = true;
+ }
ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
mutex_unlock(&ei->truncate_mutex);
got_it:
- map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
+ *bno = le32_to_cpu(chain[depth-1].key);
if (count > blocks_to_boundary)
- set_buffer_boundary(bh_result);
+ *boundary = true;
err = count;
/* Clean up and exit */
partial = chain + depth - 1; /* the whole chain */
@@ -765,19 +775,82 @@ cleanup:
return err;
}
-int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
+int ext2_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
{
unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
- int ret = ext2_get_blocks(inode, iblock, max_blocks,
- bh_result, create);
- if (ret > 0) {
- bh_result->b_size = (ret << inode->i_blkbits);
- ret = 0;
+ bool new = false, boundary = false;
+ u32 bno;
+ int ret;
+
+ ret = ext2_get_blocks(inode, iblock, max_blocks, &bno, &new, &boundary,
+ create);
+ if (ret <= 0)
+ return ret;
+
+ map_bh(bh_result, inode->i_sb, bno);
+ bh_result->b_size = (ret << inode->i_blkbits);
+ if (new)
+ set_buffer_new(bh_result);
+ if (boundary)
+ set_buffer_boundary(bh_result);
+ return 0;
+
+}
+
+#ifdef CONFIG_FS_DAX
+static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ unsigned flags, struct iomap *iomap)
+{
+ unsigned int blkbits = inode->i_blkbits;
+ unsigned long first_block = offset >> blkbits;
+ unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits;
+ bool new = false, boundary = false;
+ u32 bno;
+ int ret;
+
+ ret = ext2_get_blocks(inode, first_block, max_blocks,
+ &bno, &new, &boundary, flags & IOMAP_WRITE);
+ if (ret < 0)
+ return ret;
+
+ iomap->flags = 0;
+ iomap->bdev = inode->i_sb->s_bdev;
+ iomap->offset = (u64)first_block << blkbits;
+
+ if (ret == 0) {
+ iomap->type = IOMAP_HOLE;
+ iomap->blkno = IOMAP_NULL_BLOCK;
+ iomap->length = 1 << blkbits;
+ } else {
+ iomap->type = IOMAP_MAPPED;
+ iomap->blkno = (sector_t)bno << (blkbits - 9);
+ iomap->length = (u64)ret << blkbits;
+ iomap->flags |= IOMAP_F_MERGED;
}
- return ret;
+ if (new)
+ iomap->flags |= IOMAP_F_NEW;
+ return 0;
}
+static int
+ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
+ ssize_t written, unsigned flags, struct iomap *iomap)
+{
+ if (iomap->type == IOMAP_MAPPED &&
+ written < length &&
+ (flags & IOMAP_WRITE))
+ ext2_write_failed(inode->i_mapping, offset + length);
+ return 0;
+}
+
+struct iomap_ops ext2_iomap_ops = {
+ .iomap_begin = ext2_iomap_begin,
+ .iomap_end = ext2_iomap_end,
+};
+#endif /* CONFIG_FS_DAX */
+
int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
@@ -863,11 +936,10 @@ ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
loff_t offset = iocb->ki_pos;
ssize_t ret;
- if (IS_DAX(inode))
- ret = dax_do_io(iocb, inode, iter, ext2_get_block, NULL,
- DIO_LOCKING);
- else
- ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
+ if (WARN_ON_ONCE(IS_DAX(inode)))
+ return -EIO;
+
+ ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
if (ret < 0 && iov_iter_rw(iter) == WRITE)
ext2_write_failed(mapping, offset + count);
return ret;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 67415e0e6af0..e8b365000d73 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -260,11 +260,12 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
/* Directory is encrypted */
err = fscrypt_fname_disk_to_usr(inode,
0, 0, &de_name, &fstr);
+ de_name = fstr;
fstr.len = save_len;
- if (err < 0)
+ if (err)
goto errout;
if (!dir_emit(ctx,
- fstr.name, err,
+ de_name.name, de_name.len,
le32_to_cpu(de->inode),
get_dtype(sb, de->file_type)))
goto done;
@@ -627,7 +628,7 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
int buf_size)
{
struct ext4_dir_entry_2 *de;
- int nlen, rlen;
+ int rlen;
unsigned int offset = 0;
char *top;
@@ -637,7 +638,6 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
if (ext4_check_dir_entry(dir, NULL, de, bh,
buf, buf_size, offset))
return -EFSCORRUPTED;
- nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
offset += rlen;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ea31931386ec..282a51b07c57 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -262,6 +262,9 @@ struct ext4_io_submit {
(s)->s_first_ino)
#endif
#define EXT4_BLOCK_ALIGN(size, blkbits) ALIGN((size), (1 << (blkbits)))
+#define EXT4_MAX_BLOCKS(size, offset, blkbits) \
+ ((EXT4_BLOCK_ALIGN(size + offset, blkbits) >> blkbits) - (offset >> \
+ blkbits))
/* Translate a block number to a cluster number */
#define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits)
@@ -1117,9 +1120,15 @@ struct ext4_inode_info {
#define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
#define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */
#define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */
-#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */
-#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
-#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
+#define EXT4_MOUNT_QUOTA 0x40000 /* Some quota option set */
+#define EXT4_MOUNT_USRQUOTA 0x80000 /* "old" user quota,
+ * enable enforcement for hidden
+ * quota files */
+#define EXT4_MOUNT_GRPQUOTA 0x100000 /* "old" group quota, enable
+ * enforcement for hidden quota
+ * files */
+#define EXT4_MOUNT_PRJQUOTA 0x200000 /* Enable project quota
+ * enforcement */
#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */
#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
@@ -1636,26 +1645,6 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
* Feature set definitions
*/
-/* Use the ext4_{has,set,clear}_feature_* helpers; these will be removed */
-#define EXT4_HAS_COMPAT_FEATURE(sb,mask) \
- ((EXT4_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask)) != 0)
-#define EXT4_HAS_RO_COMPAT_FEATURE(sb,mask) \
- ((EXT4_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask)) != 0)
-#define EXT4_HAS_INCOMPAT_FEATURE(sb,mask) \
- ((EXT4_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask)) != 0)
-#define EXT4_SET_COMPAT_FEATURE(sb,mask) \
- EXT4_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
-#define EXT4_SET_RO_COMPAT_FEATURE(sb,mask) \
- EXT4_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
-#define EXT4_SET_INCOMPAT_FEATURE(sb,mask) \
- EXT4_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
-#define EXT4_CLEAR_COMPAT_FEATURE(sb,mask) \
- EXT4_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
-#define EXT4_CLEAR_RO_COMPAT_FEATURE(sb,mask) \
- EXT4_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
-#define EXT4_CLEAR_INCOMPAT_FEATURE(sb,mask) \
- EXT4_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
-
#define EXT4_FEATURE_COMPAT_DIR_PREALLOC 0x0001
#define EXT4_FEATURE_COMPAT_IMAGIC_INODES 0x0002
#define EXT4_FEATURE_COMPAT_HAS_JOURNAL 0x0004
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index d7ccb7f51dfc..c930a0110fb4 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4679,6 +4679,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
unsigned int credits;
loff_t epos;
+ BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
map.m_lblk = offset;
map.m_len = len;
/*
@@ -4693,13 +4694,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, len);
- /*
- * We can only call ext_depth() on extent based inodes
- */
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- depth = ext_depth(inode);
- else
- depth = -1;
+ depth = ext_depth(inode);
retry:
while (ret >= 0 && len) {
@@ -4966,13 +4961,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
trace_ext4_fallocate_enter(inode, offset, len, mode);
lblk = offset >> blkbits;
- /*
- * We can't just convert len to max_blocks because
- * If blocksize = 4096 offset = 3072 and len = 2048
- */
- max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
- - lblk;
+ max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
@@ -5035,12 +5025,8 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
unsigned int credits, blkbits = inode->i_blkbits;
map.m_lblk = offset >> blkbits;
- /*
- * We can't just convert len to max_blocks because
- * If blocksize = 4096 offset = 3072 and len = 2048
- */
- max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
- map.m_lblk);
+ max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
+
/*
* This is somewhat ugly but the idea is clear: When transaction is
* reserved, everything goes into it. Otherwise we rather start several
@@ -5734,6 +5720,9 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
+ } else {
+ ext4_ext_drop_refs(path);
+ kfree(path);
}
ret = ext4_es_remove_extent(inode, offset_lblk,
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 261ac3734c58..36d49cfbf2dc 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -91,9 +91,7 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
static ssize_t
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(iocb->ki_filp);
- struct blk_plug plug;
int o_direct = iocb->ki_flags & IOCB_DIRECT;
int unaligned_aio = 0;
int overwrite = 0;
@@ -134,18 +132,16 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (o_direct) {
size_t length = iov_iter_count(from);
loff_t pos = iocb->ki_pos;
- blk_start_plug(&plug);
/* check whether we do a DIO overwrite or not */
if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
- !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
+ pos + length <= i_size_read(inode)) {
struct ext4_map_blocks map;
unsigned int blkbits = inode->i_blkbits;
int err, len;
map.m_lblk = pos >> blkbits;
- map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
- - map.m_lblk;
+ map.m_len = EXT4_MAX_BLOCKS(length, pos, blkbits);
len = map.m_len;
err = ext4_map_blocks(NULL, inode, &map, 0);
@@ -171,8 +167,6 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret > 0)
ret = generic_write_sync(iocb, ret);
- if (o_direct)
- blk_finish_plug(&plug);
return ret;
@@ -703,6 +697,7 @@ const struct file_operations ext4_file_operations = {
.open = ext4_file_open,
.release = ext4_release_file,
.fsync = ext4_sync_file,
+ .get_unmapped_area = thp_get_unmapped_area,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = ext4_fallocate,
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 5c4372512ef7..88effb1053c7 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -61,6 +61,13 @@ static int ext4_sync_parent(struct inode *inode)
break;
iput(inode);
inode = next;
+ /*
+ * The directory inode may have gone through rmdir by now. But
+ * the inode itself and its blocks are still allocated (we hold
+ * a reference to the inode so it didn't go through
+ * ext4_evict_inode()) and so we are safe to flush metadata
+ * blocks and the inode.
+ */
ret = sync_mapping_buffers(inode->i_mapping);
if (ret)
break;
@@ -107,7 +114,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (!journal) {
ret = __generic_file_fsync(file, start, end, datasync);
- if (!ret && !hlist_empty(&inode->i_dentry))
+ if (!ret)
ret = ext4_sync_parent(inode);
if (test_opt(inode->i_sb, BARRIER))
goto issue_flush;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 9e66cd1d7b78..170421edfdfe 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -802,7 +802,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
} else
inode_init_owner(inode, dir, mode);
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_PROJECT) &&
+ if (ext4_has_feature_project(sb) &&
ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT))
ei->i_projid = EXT4_I(dir)->i_projid;
else
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c6ea25a190f8..cd918823b352 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -647,11 +647,19 @@ found:
/*
* We have to zeroout blocks before inserting them into extent
* status tree. Otherwise someone could look them up there and
- * use them before they are really zeroed.
+ * use them before they are really zeroed. We also have to
+ * unmap metadata before zeroing as otherwise writeback can
+ * overwrite zeros with stale data from block device.
*/
if (flags & EXT4_GET_BLOCKS_ZERO &&
map->m_flags & EXT4_MAP_MAPPED &&
map->m_flags & EXT4_MAP_NEW) {
+ ext4_lblk_t i;
+
+ for (i = 0; i < map->m_len; i++) {
+ unmap_underlying_metadata(inode->i_sb->s_bdev,
+ map->m_pblk + i);
+ }
ret = ext4_issue_zeroout(inode, map->m_lblk,
map->m_pblk, map->m_len);
if (ret) {
@@ -1649,6 +1657,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
if (invalidate) {
+ if (page_mapped(page))
+ clear_page_dirty_for_io(page);
block_invalidatepage(page, 0, PAGE_SIZE);
ClearPageUptodate(page);
}
@@ -3526,35 +3536,31 @@ out:
static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
{
- int unlocked = 0;
- struct inode *inode = iocb->ki_filp->f_mapping->host;
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ struct inode *inode = mapping->host;
ssize_t ret;
- if (ext4_should_dioread_nolock(inode)) {
- /*
- * Nolock dioread optimization may be dynamically disabled
- * via ext4_inode_block_unlocked_dio(). Check inode's state
- * while holding extra i_dio_count ref.
- */
- inode_dio_begin(inode);
- smp_mb();
- if (unlikely(ext4_test_inode_state(inode,
- EXT4_STATE_DIOREAD_LOCK)))
- inode_dio_end(inode);
- else
- unlocked = 1;
- }
+ /*
+ * Shared inode_lock is enough for us - it protects against concurrent
+ * writes & truncates and since we take care of writing back page cache,
+ * we are protected against page writeback as well.
+ */
+ inode_lock_shared(inode);
if (IS_DAX(inode)) {
- ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block,
- NULL, unlocked ? 0 : DIO_LOCKING);
+ ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block, NULL, 0);
} else {
+ size_t count = iov_iter_count(iter);
+
+ ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
+ iocb->ki_pos + count);
+ if (ret)
+ goto out_unlock;
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
iter, ext4_dio_get_block,
- NULL, NULL,
- unlocked ? 0 : DIO_LOCKING);
+ NULL, NULL, 0);
}
- if (unlocked)
- inode_dio_end(inode);
+out_unlock:
+ inode_unlock_shared(inode);
return ret;
}
@@ -3890,7 +3896,7 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
}
/*
- * ext4_punch_hole: punches a hole in a file by releaseing the blocks
+ * ext4_punch_hole: punches a hole in a file by releasing the blocks
* associated with the given offset and length
*
* @inode: File inode
@@ -3919,7 +3925,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
* Write out all dirty pages to avoid race conditions
* Then release them.
*/
- if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + length - 1);
if (ret)
@@ -4414,7 +4420,7 @@ static inline void ext4_iget_extra_inode(struct inode *inode,
int ext4_get_projid(struct inode *inode, kprojid_t *projid)
{
- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_PROJECT))
+ if (!ext4_has_feature_project(inode->i_sb))
return -EOPNOTSUPP;
*projid = EXT4_I(inode)->i_projid;
return 0;
@@ -4481,7 +4487,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_PROJECT) &&
+ if (ext4_has_feature_project(sb) &&
EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
@@ -4814,14 +4820,14 @@ static int ext4_do_update_inode(handle_t *handle,
* Fix up interoperability with old kernels. Otherwise, old inodes get
* re-used with the upper 16 bits of the uid/gid intact
*/
- if (!ei->i_dtime) {
+ if (ei->i_dtime && list_empty(&ei->i_orphan)) {
+ raw_inode->i_uid_high = 0;
+ raw_inode->i_gid_high = 0;
+ } else {
raw_inode->i_uid_high =
cpu_to_le16(high_16_bits(i_uid));
raw_inode->i_gid_high =
cpu_to_le16(high_16_bits(i_gid));
- } else {
- raw_inode->i_uid_high = 0;
- raw_inode->i_gid_high = 0;
}
} else {
raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
@@ -4885,8 +4891,7 @@ static int ext4_do_update_inode(handle_t *handle,
}
}
- BUG_ON(!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
- EXT4_FEATURE_RO_COMPAT_PROJECT) &&
+ BUG_ON(!ext4_has_feature_project(inode->i_sb) &&
i_projid != EXT4_DEF_PROJID);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 1bb7df5e4536..bf5ae8ebbc97 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -19,8 +19,6 @@
#include "ext4_jbd2.h"
#include "ext4.h"
-#define MAX_32_NUM ((((unsigned long long) 1) << 32) - 1)
-
/**
* Swap memory between @a and @b for @len bytes.
*
@@ -310,8 +308,7 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
struct ext4_inode *raw_inode;
struct dquot *transfer_to[MAXQUOTAS] = { };
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
- EXT4_FEATURE_RO_COMPAT_PROJECT)) {
+ if (!ext4_has_feature_project(sb)) {
if (projid != EXT4_DEF_PROJID)
return -EOPNOTSUPP;
else
@@ -772,6 +769,9 @@ resizefs_out:
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct fscrypt_policy policy;
+ if (!ext4_has_feature_encrypt(sb))
+ return -EOPNOTSUPP;
+
if (copy_from_user(&policy,
(struct fscrypt_policy __user *)arg,
sizeof(policy)))
@@ -842,8 +842,7 @@ resizefs_out:
ext4_get_inode_flags(ei);
fa.fsx_xflags = ext4_iflags_to_xflags(ei->i_flags & EXT4_FL_USER_VISIBLE);
- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
- EXT4_FEATURE_RO_COMPAT_PROJECT)) {
+ if (ext4_has_feature_project(inode->i_sb)) {
fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
EXT4_I(inode)->i_projid);
}
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index a920c5d29fac..6fc14def0c70 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -598,6 +598,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
return -EOPNOTSUPP;
}
+ if (ext4_encrypted_inode(orig_inode) ||
+ ext4_encrypted_inode(donor_inode)) {
+ ext4_msg(orig_inode->i_sb, KERN_ERR,
+ "Online defrag not supported for encrypted files");
+ return -EOPNOTSUPP;
+ }
+
/* Protect orig and donor inodes against a truncate */
lock_two_nondirectories(orig_inode, donor_inode);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 34c0142caf6a..c344b819cffa 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -639,7 +639,7 @@ static struct stats dx_show_leaf(struct inode *dir,
res = fscrypt_fname_alloc_buffer(
dir, len,
&fname_crypto_str);
- if (res < 0)
+ if (res)
printk(KERN_WARNING "Error "
"allocating crypto "
"buffer--skipping "
@@ -647,7 +647,7 @@ static struct stats dx_show_leaf(struct inode *dir,
res = fscrypt_fname_disk_to_usr(dir,
0, 0, &de_name,
&fname_crypto_str);
- if (res < 0) {
+ if (res) {
printk(KERN_WARNING "Error "
"converting filename "
"from disk to usr"
@@ -1011,7 +1011,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
err = fscrypt_fname_disk_to_usr(dir, hinfo->hash,
hinfo->minor_hash, &de_name,
&fname_crypto_str);
- if (err < 0) {
+ if (err) {
count = err;
goto errout;
}
@@ -2044,33 +2044,31 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
frame->entries = entries;
frame->at = entries;
frame->bh = bh;
- bh = bh2;
retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
if (retval)
goto out_frames;
- retval = ext4_handle_dirty_dirent_node(handle, dir, bh);
+ retval = ext4_handle_dirty_dirent_node(handle, dir, bh2);
if (retval)
goto out_frames;
- de = do_split(handle,dir, &bh, frame, &fname->hinfo);
+ de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
if (IS_ERR(de)) {
retval = PTR_ERR(de);
goto out_frames;
}
- dx_release(frames);
- retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh);
- brelse(bh);
- return retval;
+ retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2);
out_frames:
/*
* Even if the block split failed, we have to properly write
* out all the changes we did so far. Otherwise we can end up
* with corrupted filesystem.
*/
- ext4_mark_inode_dirty(handle, dir);
+ if (retval)
+ ext4_mark_inode_dirty(handle, dir);
dx_release(frames);
+ brelse(bh2);
return retval;
}
@@ -3144,7 +3142,7 @@ static int ext4_symlink(struct inode *dir,
istr.name = (const unsigned char *) symname;
istr.len = len;
err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
- if (err < 0)
+ if (err)
goto err_drop_inode;
sd->len = cpu_to_le16(ostr.len);
disk_link.name = (char *) sd;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index a6132a730967..b4cbee936cf8 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -405,14 +405,12 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
{
struct page *data_page = NULL;
struct inode *inode = page->mapping->host;
- unsigned block_start, blocksize;
+ unsigned block_start;
struct buffer_head *bh, *head;
int ret = 0;
int nr_submitted = 0;
int nr_to_submit = 0;
- blocksize = 1 << inode->i_blkbits;
-
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 3ec8708989ca..6db81fbcbaa6 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -78,6 +78,8 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly);
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
static void ext4_clear_request_list(void);
+static struct inode *ext4_get_journal_inode(struct super_block *sb,
+ unsigned int journal_inum);
/*
* Lock ordering
@@ -1267,7 +1269,7 @@ enum {
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
- Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_dax,
+ Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
Opt_lazytime, Opt_nolazytime,
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
@@ -1327,6 +1329,7 @@ static const match_table_t tokens = {
{Opt_noquota, "noquota"},
{Opt_quota, "quota"},
{Opt_usrquota, "usrquota"},
+ {Opt_prjquota, "prjquota"},
{Opt_barrier, "barrier=%u"},
{Opt_barrier, "barrier"},
{Opt_nobarrier, "nobarrier"},
@@ -1546,8 +1549,11 @@ static const struct mount_opts {
MOPT_SET | MOPT_Q},
{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
MOPT_SET | MOPT_Q},
+ {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
+ MOPT_SET | MOPT_Q},
{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
- EXT4_MOUNT_GRPQUOTA), MOPT_CLEAR | MOPT_Q},
+ EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
+ MOPT_CLEAR | MOPT_Q},
{Opt_usrjquota, 0, MOPT_Q},
{Opt_grpjquota, 0, MOPT_Q},
{Opt_offusrjquota, 0, MOPT_Q},
@@ -1836,13 +1842,17 @@ static int parse_options(char *options, struct super_block *sb,
return 0;
}
#ifdef CONFIG_QUOTA
- if (ext4_has_feature_quota(sb) &&
- (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
- ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
- "mount options ignored.");
- clear_opt(sb, USRQUOTA);
- clear_opt(sb, GRPQUOTA);
- } else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
+ /*
+ * We do the test below only for project quotas. 'usrquota' and
+ * 'grpquota' mount options are allowed even without quota feature
+ * to support legacy quotas in quota files.
+ */
+ if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
+ ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
+ "Cannot enable project quota enforcement.");
+ return 0;
+ }
+ if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sb, USRQUOTA);
@@ -2741,7 +2751,6 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
sb = elr->lr_super;
ngroups = EXT4_SB(sb)->s_groups_count;
- sb_start_write(sb);
for (group = elr->lr_next_group; group < ngroups; group++) {
gdp = ext4_get_group_desc(sb, group, NULL);
if (!gdp) {
@@ -2768,8 +2777,6 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
elr->lr_next_sched = jiffies + elr->lr_timeout;
elr->lr_next_group = group + 1;
}
- sb_end_write(sb);
-
return ret;
}
@@ -2834,19 +2841,43 @@ cont_thread:
mutex_unlock(&eli->li_list_mtx);
goto exit_thread;
}
-
list_for_each_safe(pos, n, &eli->li_request_list) {
+ int err = 0;
+ int progress = 0;
elr = list_entry(pos, struct ext4_li_request,
lr_request);
- if (time_after_eq(jiffies, elr->lr_next_sched)) {
- if (ext4_run_li_request(elr) != 0) {
- /* error, remove the lazy_init job */
- ext4_remove_li_request(elr);
- continue;
+ if (time_before(jiffies, elr->lr_next_sched)) {
+ if (time_before(elr->lr_next_sched, next_wakeup))
+ next_wakeup = elr->lr_next_sched;
+ continue;
+ }
+ if (down_read_trylock(&elr->lr_super->s_umount)) {
+ if (sb_start_write_trylock(elr->lr_super)) {
+ progress = 1;
+ /*
+ * We hold sb->s_umount, sb can not
+ * be removed from the list, it is
+ * now safe to drop li_list_mtx
+ */
+ mutex_unlock(&eli->li_list_mtx);
+ err = ext4_run_li_request(elr);
+ sb_end_write(elr->lr_super);
+ mutex_lock(&eli->li_list_mtx);
+ n = pos->next;
}
+ up_read((&elr->lr_super->s_umount));
+ }
+ /* error, remove the lazy_init job */
+ if (err) {
+ ext4_remove_li_request(elr);
+ continue;
+ }
+ if (!progress) {
+ elr->lr_next_sched = jiffies +
+ (prandom_u32()
+ % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
}
-
if (time_before(elr->lr_next_sched, next_wakeup))
next_wakeup = elr->lr_next_sched;
}
@@ -3179,6 +3210,8 @@ int ext4_calculate_overhead(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
+ struct inode *j_inode;
+ unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
ext4_fsblk_t overhead = 0;
char *buf = (char *) get_zeroed_page(GFP_NOFS);
@@ -3209,10 +3242,23 @@ int ext4_calculate_overhead(struct super_block *sb)
memset(buf, 0, PAGE_SIZE);
cond_resched();
}
- /* Add the internal journal blocks as well */
+
+ /*
+ * Add the internal journal blocks whether the journal has been
+ * loaded or not
+ */
if (sbi->s_journal && !sbi->journal_bdev)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
-
+ else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
+ j_inode = ext4_get_journal_inode(sb, j_inum);
+ if (j_inode) {
+ j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
+ overhead += EXT4_NUM_B2C(sbi, j_blocks);
+ iput(j_inode);
+ } else {
+ ext4_msg(sb, KERN_ERR, "can't get journal size");
+ }
+ }
sbi->s_overhead = overhead;
smp_wmb();
free_page((unsigned long) buf);
@@ -4208,18 +4254,16 @@ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
write_unlock(&journal->j_state_lock);
}
-static journal_t *ext4_get_journal(struct super_block *sb,
- unsigned int journal_inum)
+static struct inode *ext4_get_journal_inode(struct super_block *sb,
+ unsigned int journal_inum)
{
struct inode *journal_inode;
- journal_t *journal;
-
- BUG_ON(!ext4_has_feature_journal(sb));
-
- /* First, test for the existence of a valid inode on disk. Bad
- * things happen if we iget() an unused inode, as the subsequent
- * iput() will try to delete it. */
+ /*
+ * Test for the existence of a valid inode on disk. Bad things
+ * happen if we iget() an unused inode, as the subsequent iput()
+ * will try to delete it.
+ */
journal_inode = ext4_iget(sb, journal_inum);
if (IS_ERR(journal_inode)) {
ext4_msg(sb, KERN_ERR, "no journal found");
@@ -4239,6 +4283,20 @@ static journal_t *ext4_get_journal(struct super_block *sb,
iput(journal_inode);
return NULL;
}
+ return journal_inode;
+}
+
+static journal_t *ext4_get_journal(struct super_block *sb,
+ unsigned int journal_inum)
+{
+ struct inode *journal_inode;
+ journal_t *journal;
+
+ BUG_ON(!ext4_has_feature_journal(sb));
+
+ journal_inode = ext4_get_journal_inode(sb, journal_inum);
+ if (!journal_inode)
+ return NULL;
journal = jbd2_journal_init_inode(journal_inode);
if (!journal) {
@@ -5250,12 +5308,18 @@ static int ext4_enable_quotas(struct super_block *sb)
le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
};
+ bool quota_mopt[EXT4_MAXQUOTAS] = {
+ test_opt(sb, USRQUOTA),
+ test_opt(sb, GRPQUOTA),
+ test_opt(sb, PRJQUOTA),
+ };
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
for (type = 0; type < EXT4_MAXQUOTAS; type++) {
if (qf_inums[type]) {
err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
- DQUOT_USAGE_ENABLED);
+ DQUOT_USAGE_ENABLED |
+ (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
if (err) {
ext4_warning(sb,
"Failed to enable quota tracking "
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 4d83d9e05f2e..fdf1c6154745 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -30,7 +30,6 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
char *caddr, *paddr = NULL;
struct fscrypt_str cstr, pstr;
struct fscrypt_symlink_data *sd;
- loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
int res;
u32 max_size = inode->i_sb->s_blocksize;
@@ -49,7 +48,6 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
if (IS_ERR(cpage))
return ERR_CAST(cpage);
caddr = page_address(cpage);
- caddr[size] = 0;
}
/* Symlink is encrypted */
@@ -65,16 +63,14 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
if (res)
goto errout;
+ paddr = pstr.name;
res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
- if (res < 0)
+ if (res)
goto errout;
- paddr = pstr.name;
-
/* Null-terminate the name */
- if (res <= pstr.len)
- paddr[res] = '\0';
+ paddr[pstr.len] = '\0';
if (cpage)
put_page(cpage);
set_delayed_call(done, kfree_link, paddr);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 2eb935ca5d9e..c15d63389957 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -199,6 +199,8 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
}
while (!IS_LAST_ENTRY(entry)) {
+ if (entry->e_value_block != 0)
+ return -EFSCORRUPTED;
if (entry->e_value_size != 0 &&
(value_start + le16_to_cpu(entry->e_value_offs) <
(void *)e + sizeof(__u32) ||
@@ -641,7 +643,7 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
size_t *min_offs, void *base, int *total)
{
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
- if (!last->e_value_block && last->e_value_size) {
+ if (last->e_value_size) {
size_t offs = le16_to_cpu(last->e_value_offs);
if (offs < *min_offs)
*min_offs = offs;
@@ -661,7 +663,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
/* Compute min_offs and last. */
last = s->first;
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
- if (!last->e_value_block && last->e_value_size) {
+ if (last->e_value_size) {
size_t offs = le16_to_cpu(last->e_value_offs);
if (offs < min_offs)
min_offs = offs;
@@ -669,7 +671,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
}
free = min_offs - ((void *)last - s->base) - sizeof(__u32);
if (!s->not_found) {
- if (!s->here->e_value_block && s->here->e_value_size) {
+ if (s->here->e_value_size) {
size_t size = le32_to_cpu(s->here->e_value_size);
free += EXT4_XATTR_SIZE(size);
}
@@ -691,7 +693,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
s->here->e_name_len = name_len;
memcpy(s->here->e_name, i->name, name_len);
} else {
- if (!s->here->e_value_block && s->here->e_value_size) {
+ if (s->here->e_value_size) {
void *first_val = s->base + min_offs;
size_t offs = le16_to_cpu(s->here->e_value_offs);
void *val = s->base + offs;
@@ -725,8 +727,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
last = s->first;
while (!IS_LAST_ENTRY(last)) {
size_t o = le16_to_cpu(last->e_value_offs);
- if (!last->e_value_block &&
- last->e_value_size && o < offs)
+ if (last->e_value_size && o < offs)
last->e_value_offs =
cpu_to_le16(o + size);
last = EXT4_XATTR_NEXT(last);
@@ -1318,18 +1319,19 @@ retry:
*/
static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
int value_offs_shift, void *to,
- void *from, size_t n, int blocksize)
+ void *from, size_t n)
{
struct ext4_xattr_entry *last = entry;
int new_offs;
+ /* We always shift xattr headers further thus offsets get lower */
+ BUG_ON(value_offs_shift > 0);
+
/* Adjust the value offsets of the entries */
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
- if (!last->e_value_block && last->e_value_size) {
+ if (last->e_value_size) {
new_offs = le16_to_cpu(last->e_value_offs) +
value_offs_shift;
- BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
- > blocksize);
last->e_value_offs = cpu_to_le16(new_offs);
}
}
@@ -1338,6 +1340,141 @@ static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
}
/*
+ * Move xattr pointed to by 'entry' from inode into external xattr block
+ */
+static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ struct ext4_inode *raw_inode,
+ struct ext4_xattr_entry *entry)
+{
+ struct ext4_xattr_ibody_find *is = NULL;
+ struct ext4_xattr_block_find *bs = NULL;
+ char *buffer = NULL, *b_entry_name = NULL;
+ size_t value_offs, value_size;
+ struct ext4_xattr_info i = {
+ .value = NULL,
+ .value_len = 0,
+ .name_index = entry->e_name_index,
+ };
+ struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
+ int error;
+
+ value_offs = le16_to_cpu(entry->e_value_offs);
+ value_size = le32_to_cpu(entry->e_value_size);
+
+ is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
+ bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
+ buffer = kmalloc(value_size, GFP_NOFS);
+ b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
+ if (!is || !bs || !buffer || !b_entry_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ is->s.not_found = -ENODATA;
+ bs->s.not_found = -ENODATA;
+ is->iloc.bh = NULL;
+ bs->bh = NULL;
+
+ /* Save the entry name and the entry value */
+ memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
+ memcpy(b_entry_name, entry->e_name, entry->e_name_len);
+ b_entry_name[entry->e_name_len] = '\0';
+ i.name = b_entry_name;
+
+ error = ext4_get_inode_loc(inode, &is->iloc);
+ if (error)
+ goto out;
+
+ error = ext4_xattr_ibody_find(inode, &i, is);
+ if (error)
+ goto out;
+
+ /* Remove the chosen entry from the inode */
+ error = ext4_xattr_ibody_set(handle, inode, &i, is);
+ if (error)
+ goto out;
+
+ i.name = b_entry_name;
+ i.value = buffer;
+ i.value_len = value_size;
+ error = ext4_xattr_block_find(inode, &i, bs);
+ if (error)
+ goto out;
+
+ /* Add entry which was removed from the inode into the block */
+ error = ext4_xattr_block_set(handle, inode, &i, bs);
+ if (error)
+ goto out;
+ error = 0;
+out:
+ kfree(b_entry_name);
+ kfree(buffer);
+ if (is)
+ brelse(is->iloc.bh);
+ kfree(is);
+ kfree(bs);
+
+ return error;
+}
+
+static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
+ struct ext4_inode *raw_inode,
+ int isize_diff, size_t ifree,
+ size_t bfree, int *total_ino)
+{
+ struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
+ struct ext4_xattr_entry *small_entry;
+ struct ext4_xattr_entry *entry;
+ struct ext4_xattr_entry *last;
+ unsigned int entry_size; /* EA entry size */
+ unsigned int total_size; /* EA entry size + value size */
+ unsigned int min_total_size;
+ int error;
+
+ while (isize_diff > ifree) {
+ entry = NULL;
+ small_entry = NULL;
+ min_total_size = ~0U;
+ last = IFIRST(header);
+ /* Find the entry best suited to be pushed into EA block */
+ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ total_size =
+ EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
+ EXT4_XATTR_LEN(last->e_name_len);
+ if (total_size <= bfree &&
+ total_size < min_total_size) {
+ if (total_size + ifree < isize_diff) {
+ small_entry = last;
+ } else {
+ entry = last;
+ min_total_size = total_size;
+ }
+ }
+ }
+
+ if (entry == NULL) {
+ if (small_entry == NULL)
+ return -ENOSPC;
+ entry = small_entry;
+ }
+
+ entry_size = EXT4_XATTR_LEN(entry->e_name_len);
+ total_size = entry_size +
+ EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size));
+ error = ext4_xattr_move_to_block(handle, inode, raw_inode,
+ entry);
+ if (error)
+ return error;
+
+ *total_ino -= entry_size;
+ ifree += total_size;
+ bfree -= total_size;
+ }
+
+ return 0;
+}
+
+/*
* Expand an inode by new_extra_isize bytes when EAs are present.
* Returns 0 on success or negative error number on failure.
*/
@@ -1345,14 +1482,11 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
struct ext4_inode *raw_inode, handle_t *handle)
{
struct ext4_xattr_ibody_header *header;
- struct ext4_xattr_entry *entry, *last, *first;
struct buffer_head *bh = NULL;
- struct ext4_xattr_ibody_find *is = NULL;
- struct ext4_xattr_block_find *bs = NULL;
- char *buffer = NULL, *b_entry_name = NULL;
- size_t min_offs, free;
+ size_t min_offs;
+ size_t ifree, bfree;
int total_ino;
- void *base, *start, *end;
+ void *base, *end;
int error = 0, tried_min_extra_isize = 0;
int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
int isize_diff; /* How much do we need to grow i_extra_isize */
@@ -1368,34 +1502,24 @@ retry:
goto out;
header = IHDR(inode, raw_inode);
- entry = IFIRST(header);
/*
* Check if enough free space is available in the inode to shift the
* entries ahead by new_extra_isize.
*/
- base = start = entry;
+ base = IFIRST(header);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
min_offs = end - base;
- last = entry;
total_ino = sizeof(struct ext4_xattr_ibody_header);
error = xattr_check_inode(inode, header, end);
if (error)
goto cleanup;
- free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
- if (free >= isize_diff) {
- entry = IFIRST(header);
- ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
- - new_extra_isize, (void *)raw_inode +
- EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
- (void *)header, total_ino,
- inode->i_sb->s_blocksize);
- EXT4_I(inode)->i_extra_isize = new_extra_isize;
- goto out;
- }
+ ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
+ if (ifree >= isize_diff)
+ goto shift;
/*
* Enough free space isn't available in the inode, check if
@@ -1413,146 +1537,44 @@ retry:
goto cleanup;
}
base = BHDR(bh);
- first = BFIRST(bh);
end = bh->b_data + bh->b_size;
min_offs = end - base;
- free = ext4_xattr_free_space(first, &min_offs, base, NULL);
- if (free < isize_diff) {
+ bfree = ext4_xattr_free_space(BFIRST(bh), &min_offs, base,
+ NULL);
+ if (bfree + ifree < isize_diff) {
if (!tried_min_extra_isize && s_min_extra_isize) {
tried_min_extra_isize++;
new_extra_isize = s_min_extra_isize;
brelse(bh);
goto retry;
}
- error = -1;
+ error = -ENOSPC;
goto cleanup;
}
} else {
- free = inode->i_sb->s_blocksize;
+ bfree = inode->i_sb->s_blocksize;
}
- while (isize_diff > 0) {
- size_t offs, size, entry_size;
- struct ext4_xattr_entry *small_entry = NULL;
- struct ext4_xattr_info i = {
- .value = NULL,
- .value_len = 0,
- };
- unsigned int total_size; /* EA entry size + value size */
- unsigned int shift_bytes; /* No. of bytes to shift EAs by? */
- unsigned int min_total_size = ~0U;
-
- is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
- bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
- if (!is || !bs) {
- error = -ENOMEM;
- goto cleanup;
- }
-
- is->s.not_found = -ENODATA;
- bs->s.not_found = -ENODATA;
- is->iloc.bh = NULL;
- bs->bh = NULL;
-
- last = IFIRST(header);
- /* Find the entry best suited to be pushed into EA block */
- entry = NULL;
- for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
- total_size =
- EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
- EXT4_XATTR_LEN(last->e_name_len);
- if (total_size <= free && total_size < min_total_size) {
- if (total_size < isize_diff) {
- small_entry = last;
- } else {
- entry = last;
- min_total_size = total_size;
- }
- }
- }
-
- if (entry == NULL) {
- if (small_entry) {
- entry = small_entry;
- } else {
- if (!tried_min_extra_isize &&
- s_min_extra_isize) {
- tried_min_extra_isize++;
- new_extra_isize = s_min_extra_isize;
- kfree(is); is = NULL;
- kfree(bs); bs = NULL;
- brelse(bh);
- goto retry;
- }
- error = -1;
- goto cleanup;
- }
- }
- offs = le16_to_cpu(entry->e_value_offs);
- size = le32_to_cpu(entry->e_value_size);
- entry_size = EXT4_XATTR_LEN(entry->e_name_len);
- i.name_index = entry->e_name_index,
- buffer = kmalloc(EXT4_XATTR_SIZE(size), GFP_NOFS);
- b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
- if (!buffer || !b_entry_name) {
- error = -ENOMEM;
- goto cleanup;
+ error = ext4_xattr_make_inode_space(handle, inode, raw_inode,
+ isize_diff, ifree, bfree,
+ &total_ino);
+ if (error) {
+ if (error == -ENOSPC && !tried_min_extra_isize &&
+ s_min_extra_isize) {
+ tried_min_extra_isize++;
+ new_extra_isize = s_min_extra_isize;
+ brelse(bh);
+ goto retry;
}
- /* Save the entry name and the entry value */
- memcpy(buffer, (void *)IFIRST(header) + offs,
- EXT4_XATTR_SIZE(size));
- memcpy(b_entry_name, entry->e_name, entry->e_name_len);
- b_entry_name[entry->e_name_len] = '\0';
- i.name = b_entry_name;
-
- error = ext4_get_inode_loc(inode, &is->iloc);
- if (error)
- goto cleanup;
-
- error = ext4_xattr_ibody_find(inode, &i, is);
- if (error)
- goto cleanup;
-
- /* Remove the chosen entry from the inode */
- error = ext4_xattr_ibody_set(handle, inode, &i, is);
- if (error)
- goto cleanup;
- total_ino -= entry_size;
-
- entry = IFIRST(header);
- if (entry_size + EXT4_XATTR_SIZE(size) >= isize_diff)
- shift_bytes = isize_diff;
- else
- shift_bytes = entry_size + EXT4_XATTR_SIZE(size);
- /* Adjust the offsets and shift the remaining entries ahead */
- ext4_xattr_shift_entries(entry, -shift_bytes,
- (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
- EXT4_I(inode)->i_extra_isize + shift_bytes,
- (void *)header, total_ino, inode->i_sb->s_blocksize);
-
- isize_diff -= shift_bytes;
- EXT4_I(inode)->i_extra_isize += shift_bytes;
- header = IHDR(inode, raw_inode);
-
- i.name = b_entry_name;
- i.value = buffer;
- i.value_len = size;
- error = ext4_xattr_block_find(inode, &i, bs);
- if (error)
- goto cleanup;
-
- /* Add entry which was removed from the inode into the block */
- error = ext4_xattr_block_set(handle, inode, &i, bs);
- if (error)
- goto cleanup;
- kfree(b_entry_name);
- kfree(buffer);
- b_entry_name = NULL;
- buffer = NULL;
- brelse(is->iloc.bh);
- kfree(is);
- kfree(bs);
+ goto cleanup;
}
+shift:
+ /* Adjust the offsets and shift the remaining entries ahead */
+ ext4_xattr_shift_entries(IFIRST(header), EXT4_I(inode)->i_extra_isize
+ - new_extra_isize, (void *)raw_inode +
+ EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
+ (void *)header, total_ino);
+ EXT4_I(inode)->i_extra_isize = new_extra_isize;
brelse(bh);
out:
ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
@@ -1560,12 +1582,6 @@ out:
return 0;
cleanup:
- kfree(b_entry_name);
- kfree(buffer);
- if (is)
- brelse(is->iloc.bh);
- kfree(is);
- kfree(bs);
brelse(bh);
/*
* We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
@@ -1734,7 +1750,7 @@ static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header,
*name++;
}
- if (entry->e_value_block == 0 && entry->e_value_size != 0) {
+ if (entry->e_value_size != 0) {
__le32 *value = (__le32 *)((char *)header +
le16_to_cpu(entry->e_value_offs));
for (n = (le32_to_cpu(entry->e_value_size) +
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 4dcc9e28dc5c..1e29630f49c1 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -109,14 +109,16 @@ fail:
return ERR_PTR(-EINVAL);
}
-static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size)
+static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi,
+ const struct posix_acl *acl, size_t *size)
{
struct f2fs_acl_header *f2fs_acl;
struct f2fs_acl_entry *entry;
int i;
- f2fs_acl = f2fs_kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count *
- sizeof(struct f2fs_acl_entry), GFP_NOFS);
+ f2fs_acl = f2fs_kmalloc(sbi, sizeof(struct f2fs_acl_header) +
+ acl->a_count * sizeof(struct f2fs_acl_entry),
+ GFP_NOFS);
if (!f2fs_acl)
return ERR_PTR(-ENOMEM);
@@ -175,7 +177,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage);
if (retval > 0) {
- value = f2fs_kmalloc(retval, GFP_F2FS_ZERO);
+ value = f2fs_kmalloc(F2FS_I_SB(inode), retval, GFP_F2FS_ZERO);
if (!value)
return ERR_PTR(-ENOMEM);
retval = f2fs_getxattr(inode, name_index, "", value,
@@ -230,7 +232,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
}
if (acl) {
- value = f2fs_acl_to_disk(acl, &size);
+ value = f2fs_acl_to_disk(F2FS_I_SB(inode), acl, &size);
if (IS_ERR(value)) {
clear_inode_flag(inode, FI_ACL_MODE);
return (int)PTR_ERR(value);
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index b2334d11dae8..2c685185c24d 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -41,7 +41,6 @@ extern int f2fs_set_acl(struct inode *, struct posix_acl *, int);
extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
struct page *);
#else
-#define f2fs_check_acl NULL
#define f2fs_get_acl NULL
#define f2fs_set_acl NULL
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index f94d01e7d001..7e9b504bd8b2 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -28,7 +28,7 @@ struct kmem_cache *inode_entry_slab;
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
{
- set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
sbi->sb->s_flags |= MS_RDONLY;
if (!end_io)
f2fs_flush_merged_bios(sbi);
@@ -267,7 +267,6 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
- struct blk_plug plug;
long diff, written;
/* collect a number of dirty meta pages and write together */
@@ -280,9 +279,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
/* if mounting is failed, skip writing node pages */
mutex_lock(&sbi->cp_mutex);
diff = nr_pages_to_write(sbi, META, wbc);
- blk_start_plug(&plug);
written = sync_meta_pages(sbi, META, wbc->nr_to_write);
- blk_finish_plug(&plug);
mutex_unlock(&sbi->cp_mutex);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0;
@@ -388,6 +385,9 @@ const struct address_space_operations f2fs_meta_aops = {
.set_page_dirty = f2fs_set_meta_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
@@ -491,7 +491,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
spin_lock(&im->ino_lock);
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(FAULT_ORPHAN)) {
+ if (time_to_inject(sbi, FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock);
return -ENOSPC;
}
@@ -531,8 +531,20 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode;
+ struct node_info ni;
+ int err = acquire_orphan_inode(sbi);
+
+ if (err) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: orphan failed (ino=%x), run fsck to fix.",
+ __func__, ino);
+ return err;
+ }
- inode = f2fs_iget(sbi->sb, ino);
+ __add_ino_entry(sbi, ino, ORPHAN_INO);
+
+ inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) {
/*
* there should be a bug that we can't find the entry
@@ -546,6 +558,18 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
/* truncate all the data during iput */
iput(inode);
+
+ get_node_info(sbi, ino, &ni);
+
+ /* ENOMEM was fully retried in f2fs_evict_inode. */
+ if (ni.blk_addr != NULL_ADDR) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: orphan failed (ino=%x), run fsck to fix.",
+ __func__, ino);
+ return -EIO;
+ }
+ __remove_ino_entry(sbi, ino, ORPHAN_INO);
return 0;
}
@@ -554,7 +578,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
block_t start_blk, orphan_blocks, i, j;
int err;
- if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
+ if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
return 0;
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
@@ -578,7 +602,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
f2fs_put_page(page, 1);
}
/* clear Orphan Flag */
- clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
+ clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
return 0;
}
@@ -639,45 +663,55 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
}
}
-static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
- block_t cp_addr, unsigned long long *version)
+static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
+ struct f2fs_checkpoint **cp_block, struct page **cp_page,
+ unsigned long long *version)
{
- struct page *cp_page_1, *cp_page_2 = NULL;
unsigned long blk_size = sbi->blocksize;
- struct f2fs_checkpoint *cp_block;
- unsigned long long cur_version = 0, pre_version = 0;
- size_t crc_offset;
+ size_t crc_offset = 0;
__u32 crc = 0;
- /* Read the 1st cp block in this CP pack */
- cp_page_1 = get_meta_page(sbi, cp_addr);
+ *cp_page = get_meta_page(sbi, cp_addr);
+ *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
- /* get the version number */
- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
- crc_offset = le32_to_cpu(cp_block->checksum_offset);
- if (crc_offset >= blk_size)
- goto invalid_cp1;
+ crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
+ if (crc_offset >= blk_size) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "invalid crc_offset: %zu", crc_offset);
+ return -EINVAL;
+ }
- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
- if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset))
- goto invalid_cp1;
+ crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
+ + crc_offset)));
+ if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+ f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
+ return -EINVAL;
+ }
- pre_version = cur_cp_version(cp_block);
+ *version = cur_cp_version(*cp_block);
+ return 0;
+}
- /* Read the 2nd cp block in this CP pack */
- cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
- cp_page_2 = get_meta_page(sbi, cp_addr);
+static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
+ block_t cp_addr, unsigned long long *version)
+{
+ struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
+ struct f2fs_checkpoint *cp_block = NULL;
+ unsigned long long cur_version = 0, pre_version = 0;
+ int err;
- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
- crc_offset = le32_to_cpu(cp_block->checksum_offset);
- if (crc_offset >= blk_size)
- goto invalid_cp2;
+ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+ &cp_page_1, version);
+ if (err)
+ goto invalid_cp1;
+ pre_version = *version;
- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
- if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset))
+ cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
+ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+ &cp_page_2, version);
+ if (err)
goto invalid_cp2;
-
- cur_version = cur_cp_version(cp_block);
+ cur_version = *version;
if (cur_version == pre_version) {
*version = cur_version;
@@ -972,10 +1006,40 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
finish_wait(&sbi->cp_wait, &wait);
}
+static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+{
+ unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+
+ spin_lock(&sbi->cp_lock);
+
+ if (cpc->reason == CP_UMOUNT)
+ __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+
+ if (cpc->reason == CP_FASTBOOT)
+ __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
+
+ if (orphan_num)
+ __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
+ __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+
+ /* set this flag to activate crc|cp_ver for recovery */
+ __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
+
+ spin_unlock(&sbi->cp_lock);
+}
+
static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
nid_t last_nid = nm_i->next_scan_nid;
@@ -984,19 +1048,10 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
__u32 crc32 = 0;
int i;
int cp_payload_blks = __cp_payload(sbi);
- block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
- bool invalidate = false;
struct super_block *sb = sbi->sb;
struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
u64 kbytes_written;
- /*
- * This avoids to conduct wrong roll-forward operations and uses
- * metapages, so should be called prior to sync_meta_pages below.
- */
- if (!test_opt(sbi, LFS) && discard_next_dnode(sbi, discard_blk))
- invalidate = true;
-
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META)) {
sync_meta_pages(sbi, META, LONG_MAX);
@@ -1036,10 +1091,12 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* 2 cp + n data seg summary + orphan inode blocks */
data_sum_blocks = npages_for_summary_flush(sbi, false);
+ spin_lock(&sbi->cp_lock);
if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
- set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
else
- clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ spin_unlock(&sbi->cp_lock);
orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
@@ -1054,23 +1111,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
cp_payload_blks + data_sum_blocks +
orphan_blocks);
- if (cpc->reason == CP_UMOUNT)
- set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
-
- if (cpc->reason == CP_FASTBOOT)
- set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
-
- if (orphan_num)
- set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
-
- if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
- set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+ /* update ckpt flag for checkpoint */
+ update_ckpt_flags(sbi, cpc);
/* update SIT/NAT bitmap */
get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
@@ -1137,14 +1179,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* wait for previous submitted meta pages writeback */
wait_on_all_pages_writeback(sbi);
- /*
- * invalidate meta page which is used temporarily for zeroing out
- * block at the end of warm node chain.
- */
- if (invalidate)
- invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
- discard_blk);
-
release_ino_entry(sbi, false);
if (unlikely(f2fs_cp_error(sbi)))
@@ -1152,6 +1186,17 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
clear_prefree_segments(sbi, cpc);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
+ clear_sbi_flag(sbi, SBI_NEED_CP);
+
+ /*
+ * redirty superblock if metadata like node page or inode cache is
+ * updated during writing checkpoint.
+ */
+ if (get_pages(sbi, F2FS_DIRTY_NODES) ||
+ get_pages(sbi, F2FS_DIRTY_IMETA))
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
+
+ f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
return 0;
}
@@ -1190,6 +1235,18 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_flush_merged_bios(sbi);
+ /* this is the case of multiple fstrims without any changes */
+ if (cpc->reason == CP_DISCARD && !is_sbi_flag_set(sbi, SBI_IS_DIRTY)) {
+ f2fs_bug_on(sbi, NM_I(sbi)->dirty_nat_cnt);
+ f2fs_bug_on(sbi, SIT_I(sbi)->dirty_sentries);
+ f2fs_bug_on(sbi, prefree_segments(sbi));
+ flush_sit_entries(sbi, cpc);
+ clear_prefree_segments(sbi, cpc);
+ f2fs_wait_all_discard_bio(sbi);
+ unblock_operations(sbi);
+ goto out;
+ }
+
/*
* update checkpoint pack index
* Increase the version number so that
@@ -1205,6 +1262,8 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* unlock all the fs_lock[] in do_checkpoint() */
err = do_checkpoint(sbi, cpc);
+ f2fs_wait_all_discard_bio(sbi);
+
unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index ccb401eebc11..0d0177c9149c 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -34,6 +34,11 @@ static void f2fs_read_end_io(struct bio *bio)
struct bio_vec *bvec;
int i;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
+ bio->bi_error = -EIO;
+#endif
+
if (f2fs_bio_encrypted(bio)) {
if (bio->bi_error) {
fscrypt_release_ctx(bio->bi_private);
@@ -626,11 +631,13 @@ ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret = 0;
map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
- map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
- map.m_next_pgofs = NULL;
+ map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
+ if (map.m_len > map.m_lblk)
+ map.m_len -= map.m_lblk;
+ else
+ map.m_len = 0;
- if (f2fs_encrypted_inode(inode))
- return 0;
+ map.m_next_pgofs = NULL;
if (iocb->ki_flags & IOCB_DIRECT) {
ret = f2fs_convert_inline_inode(inode);
@@ -672,6 +679,9 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
bool allocated = false;
block_t blkaddr;
+ if (!maxblocks)
+ return 0;
+
map->m_len = 0;
map->m_flags = 0;
@@ -783,6 +793,7 @@ skip:
err = reserve_new_blocks(&dn, prealloc);
if (err)
goto sync_out;
+ allocated = dn.node_changed;
map->m_len += dn.ofs_in_node - ofs_in_node;
if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
@@ -966,8 +977,8 @@ out:
return ret;
}
-struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
- unsigned nr_pages)
+static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
+ unsigned nr_pages)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct fscrypt_ctx *ctx = NULL;
@@ -1284,7 +1295,7 @@ write:
if (!wbc->for_reclaim)
need_balance_fs = true;
- else if (has_not_enough_free_secs(sbi, 0))
+ else if (has_not_enough_free_secs(sbi, 0, 0))
goto redirty_out;
err = -EAGAIN;
@@ -1344,6 +1355,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int cycled;
int range_whole = 0;
int tag;
+ int nwritten = 0;
pagevec_init(&pvec, 0);
@@ -1418,6 +1430,8 @@ continue_unlock:
done_index = page->index + 1;
done = 1;
break;
+ } else {
+ nwritten++;
}
if (--wbc->nr_to_write <= 0 &&
@@ -1439,6 +1453,10 @@ continue_unlock:
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
+ if (nwritten)
+ f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
+ NULL, 0, DATA, WRITE);
+
return ret;
}
@@ -1480,7 +1498,6 @@ static int f2fs_write_data_pages(struct address_space *mapping,
* if some pages were truncated, we cannot guarantee its mapping->host
* to detect pending bios.
*/
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
remove_dirty_inode(inode);
return ret;
@@ -1518,8 +1535,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
* we already allocated all the blocks, so we don't need to get
* the block addresses when there is no need to fill the page.
*/
- if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
- len == PAGE_SIZE)
+ if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
return 0;
if (f2fs_has_inline_data(inode) ||
@@ -1616,7 +1632,7 @@ repeat:
if (err)
goto fail;
- if (need_balance && has_not_enough_free_secs(sbi, 0)) {
+ if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
unlock_page(page);
f2fs_balance_fs(sbi, true);
lock_page(page);
@@ -1633,22 +1649,12 @@ repeat:
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
- if (len == PAGE_SIZE)
- goto out_update;
- if (PageUptodate(page))
- goto out_clear;
-
- if ((pos & PAGE_MASK) >= i_size_read(inode)) {
- unsigned start = pos & (PAGE_SIZE - 1);
- unsigned end = start + len;
-
- /* Reading beyond i_size is simple: memset to zero */
- zero_user_segments(page, 0, start, end, PAGE_SIZE);
- goto out_update;
- }
+ if (len == PAGE_SIZE || PageUptodate(page))
+ return 0;
if (blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
+ SetPageUptodate(page);
} else {
struct bio *bio;
@@ -1676,11 +1682,6 @@ repeat:
goto fail;
}
}
-out_update:
- if (!PageUptodate(page))
- SetPageUptodate(page);
-out_clear:
- clear_cold_data(page);
return 0;
fail:
@@ -1698,11 +1699,26 @@ static int f2fs_write_end(struct file *file,
trace_f2fs_write_end(inode, pos, len, copied);
+ /*
+ * This should be come from len == PAGE_SIZE, and we expect copied
+ * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
+ * let generic_perform_write() try to copy data again through copied=0.
+ */
+ if (!PageUptodate(page)) {
+ if (unlikely(copied != PAGE_SIZE))
+ copied = 0;
+ else
+ SetPageUptodate(page);
+ }
+ if (!copied)
+ goto unlock_out;
+
set_page_dirty(page);
+ clear_cold_data(page);
if (pos + copied > i_size_read(inode))
f2fs_i_size_write(inode, pos + copied);
-
+unlock_out:
f2fs_put_page(page, 1);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
@@ -1873,6 +1889,58 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, get_data_block_bmap);
}
+#ifdef CONFIG_MIGRATION
+#include <linux/migrate.h>
+
+int f2fs_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc, extra_count;
+ struct f2fs_inode_info *fi = F2FS_I(mapping->host);
+ bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
+
+ BUG_ON(PageWriteback(page));
+
+ /* migrating an atomic written page is safe with the inmem_lock hold */
+ if (atomic_written && !mutex_trylock(&fi->inmem_lock))
+ return -EAGAIN;
+
+ /*
+ * A reference is expected if PagePrivate set when move mapping,
+ * however F2FS breaks this for maintaining dirty page counts when
+ * truncating pages. So here adjusting the 'extra_count' make it work.
+ */
+ extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
+ rc = migrate_page_move_mapping(mapping, newpage,
+ page, NULL, mode, extra_count);
+ if (rc != MIGRATEPAGE_SUCCESS) {
+ if (atomic_written)
+ mutex_unlock(&fi->inmem_lock);
+ return rc;
+ }
+
+ if (atomic_written) {
+ struct inmem_pages *cur;
+ list_for_each_entry(cur, &fi->inmem_pages, list)
+ if (cur->page == page) {
+ cur->page = newpage;
+ break;
+ }
+ mutex_unlock(&fi->inmem_lock);
+ put_page(page);
+ get_page(newpage);
+ }
+
+ if (PagePrivate(page))
+ SetPagePrivate(newpage);
+ set_page_private(newpage, page_private(page));
+
+ migrate_page_copy(newpage, page);
+
+ return MIGRATEPAGE_SUCCESS;
+}
+#endif
+
const struct address_space_operations f2fs_dblock_aops = {
.readpage = f2fs_read_data_page,
.readpages = f2fs_read_data_pages,
@@ -1885,4 +1953,7 @@ const struct address_space_operations f2fs_dblock_aops = {
.releasepage = f2fs_release_page,
.direct_IO = f2fs_direct_IO,
.bmap = f2fs_bmap,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index badd407bb622..fb245bd302e4 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -45,6 +45,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
+ si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
@@ -54,6 +55,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi);
si->valid_count = valid_user_blocks(sbi);
+ si->discard_blks = discard_blocks(sbi);
si->valid_node_count = valid_node_count(sbi);
si->valid_inode_count = valid_inode_count(sbi);
si->inline_xattr = atomic_read(&sbi->inline_xattr);
@@ -154,7 +156,9 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct sit_info);
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
- si->base_mem += 3 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ if (f2fs_discard_en(sbi))
+ si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
@@ -228,8 +232,13 @@ static int stat_show(struct seq_file *s, void *v)
si->ssa_area_segs, si->main_area_segs);
seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
si->overp_segs, si->rsvd_segs);
- seq_printf(s, "Utilization: %d%% (%d valid blocks)\n",
- si->utilization, si->valid_count);
+ if (test_opt(si->sbi, DISCARD))
+ seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
+ si->utilization, si->valid_count, si->discard_blks);
+ else
+ seq_printf(s, "Utilization: %u%% (%u valid blocks)\n",
+ si->utilization, si->valid_count);
+
seq_printf(s, " - Node: %u (Inode: %u, ",
si->valid_node_count, si->valid_inode_count);
seq_printf(s, "Other: %u)\n - Data: %u\n",
@@ -311,6 +320,8 @@ static int stat_show(struct seq_file *s, void *v)
si->ndirty_data, si->ndirty_files);
seq_printf(s, " - meta: %4lld in %4d\n",
si->ndirty_meta, si->meta_pages);
+ seq_printf(s, " - imeta: %4lld\n",
+ si->ndirty_imeta);
seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
si->dirty_nats, si->nats, si->dirty_sits, si->sits);
seq_printf(s, " - free_nids: %9d\n",
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 9054aeac8015..12b5836a1033 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -37,7 +37,7 @@ static unsigned int bucket_blocks(unsigned int level)
return 4;
}
-unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
+static unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
[F2FS_FT_UNKNOWN] = DT_UNKNOWN,
[F2FS_FT_REG_FILE] = DT_REG,
[F2FS_FT_DIR] = DT_DIR,
@@ -172,7 +172,10 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
int max_slots;
f2fs_hash_t namehash;
- namehash = f2fs_dentry_hash(&name);
+ if(fname->hash)
+ namehash = cpu_to_le32(fname->hash);
+ else
+ namehash = f2fs_dentry_hash(&name);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
@@ -212,31 +215,17 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
return de;
}
-/*
- * Find an entry in the specified directory with the wanted name.
- * It returns the page where the entry was found (as a parameter - res_page),
- * and the entry itself. Page is returned mapped and unlocked.
- * Entry is guaranteed to be valid.
- */
-struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
- const struct qstr *child, struct page **res_page)
+struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ struct fscrypt_name *fname, struct page **res_page)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
unsigned int max_depth;
unsigned int level;
- struct fscrypt_name fname;
- int err;
-
- err = fscrypt_setup_filename(dir, child, 1, &fname);
- if (err) {
- *res_page = ERR_PTR(err);
- return NULL;
- }
if (f2fs_has_inline_dentry(dir)) {
*res_page = NULL;
- de = find_in_inline_dir(dir, &fname, res_page);
+ de = find_in_inline_dir(dir, fname, res_page);
goto out;
}
@@ -256,11 +245,35 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
for (level = 0; level < max_depth; level++) {
*res_page = NULL;
- de = find_in_level(dir, level, &fname, res_page);
+ de = find_in_level(dir, level, fname, res_page);
if (de || IS_ERR(*res_page))
break;
}
out:
+ return de;
+}
+
+/*
+ * Find an entry in the specified directory with the wanted name.
+ * It returns the page where the entry was found (as a parameter - res_page),
+ * and the entry itself. Page is returned mapped and unlocked.
+ * Entry is guaranteed to be valid.
+ */
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
+ const struct qstr *child, struct page **res_page)
+{
+ struct f2fs_dir_entry *de = NULL;
+ struct fscrypt_name fname;
+ int err;
+
+ err = fscrypt_setup_filename(dir, child, 1, &fname);
+ if (err) {
+ *res_page = ERR_PTR(err);
+ return NULL;
+ }
+
+ de = __f2fs_find_entry(dir, &fname, res_page);
+
fscrypt_free_filename(&fname);
return de;
}
@@ -375,7 +388,8 @@ static int make_empty_dir(struct inode *inode,
}
struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct qstr *name, struct page *dpage)
+ const struct qstr *new_name, const struct qstr *orig_name,
+ struct page *dpage)
{
struct page *page;
int err;
@@ -400,7 +414,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
if (err)
goto put_error;
- err = f2fs_init_security(inode, dir, name, page);
+ err = f2fs_init_security(inode, dir, orig_name, page);
if (err)
goto put_error;
@@ -417,8 +431,8 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
set_cold_node(inode, page);
}
- if (name)
- init_dent_inode(name, page);
+ if (new_name)
+ init_dent_inode(new_name, page);
/*
* This file should be checkpointed during fsync.
@@ -496,7 +510,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
de->ino = cpu_to_le32(ino);
set_de_type(de, mode);
for (i = 0; i < slots; i++) {
- test_and_set_bit_le(bit_pos + i, (void *)d->bitmap);
+ __set_bit_le(bit_pos + i, (void *)d->bitmap);
/* avoid wrong garbage data for readdir */
if (i)
(de + i)->name_len = 0;
@@ -504,6 +518,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
}
int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
struct inode *inode, nid_t ino, umode_t mode)
{
unsigned int bit_pos;
@@ -530,7 +545,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
start:
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(FAULT_DIR_DEPTH))
+ if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH))
return -ENOSPC;
#endif
if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
@@ -569,7 +584,8 @@ add_dentry:
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, new_name, NULL);
+ page = init_inode_metadata(inode, dir, new_name,
+ orig_name, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@@ -599,6 +615,26 @@ fail:
return err;
}
+int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
+ struct inode *inode, nid_t ino, umode_t mode)
+{
+ struct qstr new_name;
+ int err = -EAGAIN;
+
+ new_name.name = fname_name(fname);
+ new_name.len = fname_len(fname);
+
+ if (f2fs_has_inline_dentry(dir))
+ err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname,
+ inode, ino, mode);
+ if (err == -EAGAIN)
+ err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname,
+ inode, ino, mode);
+
+ f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
+ return err;
+}
+
/*
* Caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
@@ -607,24 +643,15 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode)
{
struct fscrypt_name fname;
- struct qstr new_name;
int err;
err = fscrypt_setup_filename(dir, name, 0, &fname);
if (err)
return err;
- new_name.name = fname_name(&fname);
- new_name.len = fname_len(&fname);
-
- err = -EAGAIN;
- if (f2fs_has_inline_dentry(dir))
- err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
- if (err == -EAGAIN)
- err = f2fs_add_regular_entry(dir, &new_name, inode, ino, mode);
+ err = __f2fs_do_add_link(dir, &fname, inode, ino, mode);
fscrypt_free_filename(&fname);
- f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
return err;
}
@@ -634,7 +661,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
int err = 0;
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, NULL, NULL);
+ page = init_inode_metadata(inode, dir, NULL, NULL, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@@ -786,19 +813,12 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
if (f2fs_encrypted_inode(d->inode)) {
int save_len = fstr->len;
- int ret;
+ int err;
- de_name.name = f2fs_kmalloc(de_name.len, GFP_NOFS);
- if (!de_name.name)
- return false;
-
- memcpy(de_name.name, d->filename[bit_pos], de_name.len);
-
- ret = fscrypt_fname_disk_to_usr(d->inode,
+ err = fscrypt_fname_disk_to_usr(d->inode,
(u32)de->hash_code, 0,
&de_name, fstr);
- kfree(de_name.name);
- if (ret < 0)
+ if (err)
return true;
de_name = *fstr;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 14f5fe2b841e..9e8de18a168a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -46,6 +46,8 @@ enum {
FAULT_BLOCK,
FAULT_DIR_DEPTH,
FAULT_EVICT_INODE,
+ FAULT_IO,
+ FAULT_CHECKPOINT,
FAULT_MAX,
};
@@ -55,40 +57,8 @@ struct f2fs_fault_info {
unsigned int inject_type;
};
-extern struct f2fs_fault_info f2fs_fault;
extern char *fault_name[FAULT_MAX];
-#define IS_FAULT_SET(type) (f2fs_fault.inject_type & (1 << (type)))
-
-static inline bool time_to_inject(int type)
-{
- if (!f2fs_fault.inject_rate)
- return false;
- if (type == FAULT_KMALLOC && !IS_FAULT_SET(type))
- return false;
- else if (type == FAULT_PAGE_ALLOC && !IS_FAULT_SET(type))
- return false;
- else if (type == FAULT_ALLOC_NID && !IS_FAULT_SET(type))
- return false;
- else if (type == FAULT_ORPHAN && !IS_FAULT_SET(type))
- return false;
- else if (type == FAULT_BLOCK && !IS_FAULT_SET(type))
- return false;
- else if (type == FAULT_DIR_DEPTH && !IS_FAULT_SET(type))
- return false;
- else if (type == FAULT_EVICT_INODE && !IS_FAULT_SET(type))
- return false;
-
- atomic_inc(&f2fs_fault.inject_ops);
- if (atomic_read(&f2fs_fault.inject_ops) >= f2fs_fault.inject_rate) {
- atomic_set(&f2fs_fault.inject_ops, 0);
- printk("%sF2FS-fs : inject %s in %pF\n",
- KERN_INFO,
- fault_name[type],
- __builtin_return_address(0));
- return true;
- }
- return false;
-}
+#define IS_FAULT_SET(fi, type) (fi->inject_type & (1 << (type)))
#endif
/*
@@ -158,7 +128,7 @@ enum {
CP_DISCARD,
};
-#define DEF_BATCHED_TRIM_SECTIONS 32
+#define DEF_BATCHED_TRIM_SECTIONS 2
#define BATCHED_TRIM_SEGMENTS(sbi) \
(SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
#define BATCHED_TRIM_BLOCKS(sbi) \
@@ -211,6 +181,13 @@ struct discard_entry {
int len; /* # of consecutive blocks of the discard */
};
+struct bio_entry {
+ struct list_head list;
+ struct bio *bio;
+ struct completion event;
+ int error;
+};
+
/* for the list of fsync inodes, used only during recovery */
struct fsync_inode_entry {
struct list_head list; /* list head */
@@ -645,6 +622,7 @@ struct f2fs_sm_info {
/* for small discard management */
struct list_head discard_list; /* 4KB discard list */
+ struct list_head wait_list; /* linked with issued discard bio */
int nr_discards; /* # of discards in the list */
int max_discards; /* max. discards to be issued */
@@ -748,6 +726,7 @@ enum {
SBI_NEED_FSCK, /* need fsck.f2fs to fix */
SBI_POR_DOING, /* recovery is doing or not */
SBI_NEED_SB_WRITE, /* need to recover superblock */
+ SBI_NEED_CP, /* need to checkpoint */
};
enum {
@@ -765,7 +744,7 @@ struct f2fs_sb_info {
struct proc_dir_entry *s_proc; /* proc entry */
struct f2fs_super_block *raw_super; /* raw super block pointer */
int valid_super_block; /* valid super block no */
- int s_flag; /* flags for sbi */
+ unsigned long s_flag; /* flags for sbi */
#ifdef CONFIG_F2FS_FS_ENCRYPTION
u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
@@ -785,6 +764,7 @@ struct f2fs_sb_info {
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
+ spinlock_t cp_lock; /* for flag in ckpt */
struct inode *meta_inode; /* cache meta blocks */
struct mutex cp_mutex; /* checkpoint procedure lock */
struct rw_semaphore cp_rwsem; /* blocking FS operations */
@@ -892,8 +872,37 @@ struct f2fs_sb_info {
/* Reference to checksum algorithm driver via cryptoapi */
struct crypto_shash *s_chksum_driver;
+
+ /* For fault injection */
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct f2fs_fault_info fault_info;
+#endif
};
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+{
+ struct f2fs_fault_info *ffi = &sbi->fault_info;
+
+ if (!ffi->inject_rate)
+ return false;
+
+ if (!IS_FAULT_SET(ffi, type))
+ return false;
+
+ atomic_inc(&ffi->inject_ops);
+ if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
+ atomic_set(&ffi->inject_ops, 0);
+ printk("%sF2FS-fs : inject %s in %pF\n",
+ KERN_INFO,
+ fault_name[type],
+ __builtin_return_address(0));
+ return true;
+ }
+ return false;
+}
+#endif
+
/* For write statistics. Suppose sector size is 512 bytes,
* and the return value is in kbytes. s is of struct f2fs_sb_info.
*/
@@ -1034,17 +1043,17 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{
- return sbi->s_flag & (0x01 << type);
+ return test_bit(type, &sbi->s_flag);
}
static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
- sbi->s_flag |= (0x01 << type);
+ set_bit(type, &sbi->s_flag);
}
static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
- sbi->s_flag &= ~(0x01 << type);
+ clear_bit(type, &sbi->s_flag);
}
static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
@@ -1052,26 +1061,57 @@ static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
return le64_to_cpu(cp->checkpoint_ver);
}
-static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+
return ckpt_flags & f;
}
-static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
- unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
+}
+
+static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags;
+
+ ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags |= f;
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
-static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
- unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ spin_lock(&sbi->cp_lock);
+ __set_ckpt_flags(F2FS_CKPT(sbi), f);
+ spin_unlock(&sbi->cp_lock);
+}
+
+static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags;
+
+ ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags &= (~f);
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
+static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
+{
+ spin_lock(&sbi->cp_lock);
+ __clear_ckpt_flags(F2FS_CKPT(sbi), f);
+ spin_unlock(&sbi->cp_lock);
+}
+
+static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
+{
+ struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
+
+ return blk_queue_discard(q);
+}
+
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
down_read(&sbi->cp_rwsem);
@@ -1110,8 +1150,8 @@ static inline bool __remain_node_summaries(int reason)
static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
{
- return (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) ||
- is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FASTBOOT_FLAG));
+ return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
+ is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
}
/*
@@ -1151,7 +1191,7 @@ static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
blkcnt_t diff;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(FAULT_BLOCK))
+ if (time_to_inject(sbi, FAULT_BLOCK))
return false;
#endif
/*
@@ -1193,6 +1233,10 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
{
percpu_counter_inc(&sbi->nr_pages[count_type]);
+
+ if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES)
+ return;
+
set_sbi_flag(sbi, SBI_IS_DIRTY);
}
@@ -1243,6 +1287,11 @@ static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
return sbi->total_valid_block_count;
}
+static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
+{
+ return sbi->discard_blks;
+}
+
static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -1376,7 +1425,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
if (page)
return page;
- if (time_to_inject(FAULT_PAGE_ALLOC))
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
return NULL;
#endif
if (!for_write)
@@ -1804,7 +1853,7 @@ static inline int f2fs_readonly(struct super_block *sb)
static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
{
- return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+ return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
}
static inline bool is_dot_dotdot(const struct qstr *str)
@@ -1827,10 +1876,11 @@ static inline bool f2fs_may_extent_tree(struct inode *inode)
return S_ISREG(inode->i_mode);
}
-static inline void *f2fs_kmalloc(size_t size, gfp_t flags)
+static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
{
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(FAULT_KMALLOC))
+ if (time_to_inject(sbi, FAULT_KMALLOC))
return NULL;
#endif
return kmalloc(size, flags);
@@ -1885,6 +1935,7 @@ long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
*/
void f2fs_set_inode_flags(struct inode *);
struct inode *f2fs_iget(struct super_block *, unsigned long);
+struct inode *f2fs_iget_retry(struct super_block *, unsigned long);
int try_to_free_nats(struct f2fs_sb_info *, int);
int update_inode(struct inode *, struct page *);
int update_inode_page(struct inode *);
@@ -1900,7 +1951,6 @@ struct dentry *f2fs_get_parent(struct dentry *child);
/*
* dir.c
*/
-extern unsigned char f2fs_filetype_table[F2FS_FT_MAX];
void set_de_type(struct f2fs_dir_entry *, umode_t);
unsigned char get_de_type(struct f2fs_dir_entry *);
struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *,
@@ -1910,10 +1960,12 @@ bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
void do_make_empty_dir(struct inode *, struct inode *,
struct f2fs_dentry_ptr *);
struct page *init_inode_metadata(struct inode *, struct inode *,
- const struct qstr *, struct page *);
+ const struct qstr *, const struct qstr *, struct page *);
void update_parent_metadata(struct inode *, struct inode *, unsigned int);
int room_for_filename(const void *, int, int);
void f2fs_drop_nlink(struct inode *, struct inode *);
+struct f2fs_dir_entry *__f2fs_find_entry(struct inode *, struct fscrypt_name *,
+ struct page **);
struct f2fs_dir_entry *f2fs_find_entry(struct inode *, const struct qstr *,
struct page **);
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
@@ -1924,7 +1976,9 @@ int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
const struct qstr *, f2fs_hash_t , unsigned int);
int f2fs_add_regular_entry(struct inode *, const struct qstr *,
- struct inode *, nid_t, umode_t);
+ const struct qstr *, struct inode *, nid_t, umode_t);
+int __f2fs_do_add_link(struct inode *, struct fscrypt_name*, struct inode *,
+ nid_t, umode_t);
int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
umode_t);
void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
@@ -2010,9 +2064,9 @@ void destroy_flush_cmd_control(struct f2fs_sb_info *);
void invalidate_blocks(struct f2fs_sb_info *, block_t);
bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
+void f2fs_wait_all_discard_bio(struct f2fs_sb_info *);
void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
void release_discard_addrs(struct f2fs_sb_info *);
-bool discard_next_dnode(struct f2fs_sb_info *, block_t);
int npages_for_summary_flush(struct f2fs_sb_info *, bool);
void allocate_new_segments(struct f2fs_sb_info *);
int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
@@ -2095,6 +2149,10 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
void f2fs_set_page_dirty_nobuffers(struct page *);
void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
int f2fs_release_page(struct page *, gfp_t);
+#ifdef CONFIG_MIGRATION
+int f2fs_migrate_page(struct address_space *, struct page *, struct page *,
+ enum migrate_mode);
+#endif
/*
* gc.c
@@ -2123,13 +2181,14 @@ struct f2fs_stat_info {
unsigned long long hit_largest, hit_cached, hit_rbtree;
unsigned long long hit_total, total_ext;
int ext_tree, zombie_tree, ext_node;
- s64 ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, inmem_pages;
+ s64 ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
+ s64 inmem_pages;
unsigned int ndirty_dirs, ndirty_files, ndirty_all;
int nats, dirty_nats, sits, dirty_sits, fnids;
int total_count, utilization;
int bg_gc, wb_bios;
int inline_xattr, inline_inode, inline_dir, orphans;
- unsigned int valid_count, valid_node_count, valid_inode_count;
+ unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
int rsvd_segs, overp_segs;
@@ -2294,8 +2353,8 @@ bool recover_inline_data(struct inode *, struct page *);
struct f2fs_dir_entry *find_in_inline_dir(struct inode *,
struct fscrypt_name *, struct page **);
int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *);
-int f2fs_add_inline_entry(struct inode *, const struct qstr *, struct inode *,
- nid_t, umode_t);
+int f2fs_add_inline_entry(struct inode *, const struct qstr *,
+ const struct qstr *, struct inode *, nid_t, umode_t);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *,
struct inode *, struct inode *);
bool f2fs_empty_inline_dir(struct inode *);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 28f4f4cbb8d8..f8b4fe05385d 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -135,7 +135,7 @@ static inline bool need_do_checkpoint(struct inode *inode)
if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
need_cp = true;
- else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
+ else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
need_cp = true;
else if (file_wrong_pino(inode))
need_cp = true;
@@ -523,7 +523,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
return 0;
if (cache_only) {
- page = f2fs_grab_cache_page(mapping, index, false);
+ page = find_lock_page(mapping, index);
if (page && PageUptodate(page))
goto truncate_out;
f2fs_put_page(page, 1);
@@ -1454,7 +1454,7 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
- unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
+ unsigned int flags;
unsigned int oldflags;
int ret;
@@ -1954,7 +1954,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
* avoid defragment running in SSR mode when free section are allocated
* intensively
*/
- if (has_not_enough_free_secs(sbi, sec_num)) {
+ if (has_not_enough_free_secs(sbi, 0, sec_num)) {
err = -EAGAIN;
goto out;
}
@@ -2085,6 +2085,13 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
return -EOPNOTSUPP;
+ if (src == dst) {
+ if (pos_in == pos_out)
+ return 0;
+ if (pos_out > pos_in && pos_out < pos_in + len)
+ return -EINVAL;
+ }
+
inode_lock(src);
if (src != dst) {
if (!inode_trylock(dst)) {
@@ -2136,8 +2143,9 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
- ret = __exchange_data_block(src, dst, pos_in,
- pos_out, len >> F2FS_BLKSIZE_BITS, false);
+ ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
+ pos_out >> F2FS_BLKSIZE_BITS,
+ len >> F2FS_BLKSIZE_BITS, false);
if (!ret) {
if (dst_max_i_size)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 8f7fa326ce95..93985c64d8a8 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -47,6 +47,11 @@ static int gc_thread_func(void *data)
continue;
}
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_CHECKPOINT))
+ f2fs_stop_checkpoint(sbi, false);
+#endif
+
/*
* [GC triggering condition]
* 0. GC is not conducted currently.
@@ -96,7 +101,7 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
dev_t dev = sbi->sb->s_bdev->bd_dev;
int err = 0;
- gc_th = f2fs_kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
+ gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
if (!gc_th) {
err = -ENOMEM;
goto out;
@@ -270,7 +275,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct victim_sel_policy p;
- unsigned int secno, max_cost, last_victim;
+ unsigned int secno, last_victim;
unsigned int last_segment = MAIN_SEGS(sbi);
unsigned int nsearched = 0;
@@ -280,7 +285,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
select_policy(sbi, gc_type, type, &p);
p.min_segno = NULL_SEGNO;
- p.min_cost = max_cost = get_max_cost(sbi, &p);
+ p.min_cost = get_max_cost(sbi, &p);
if (p.max_search == 0)
goto out;
@@ -423,10 +428,10 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
static void gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
- bool initial = true;
struct f2fs_summary *entry;
block_t start_addr;
int off;
+ int phase = 0;
start_addr = START_BLOCK(sbi, segno);
@@ -439,16 +444,24 @@ next_step:
struct node_info ni;
/* stop BG_GC if there is not enough free sections. */
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
return;
if (check_valid_map(sbi, segno, off) == 0)
continue;
- if (initial) {
+ if (phase == 0) {
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+ META_NAT, true);
+ continue;
+ }
+
+ if (phase == 1) {
ra_node_page(sbi, nid);
continue;
}
+
+ /* phase == 2 */
node_page = get_node_page(sbi, nid);
if (IS_ERR(node_page))
continue;
@@ -469,10 +482,8 @@ next_step:
stat_inc_node_blk_count(sbi, 1, gc_type);
}
- if (initial) {
- initial = false;
+ if (++phase < 3)
goto next_step;
- }
}
/*
@@ -706,16 +717,23 @@ next_step:
struct node_info dni; /* dnode info for the data */
unsigned int ofs_in_node, nofs;
block_t start_bidx;
+ nid_t nid = le32_to_cpu(entry->nid);
/* stop BG_GC if there is not enough free sections. */
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
return;
if (check_valid_map(sbi, segno, off) == 0)
continue;
if (phase == 0) {
- ra_node_page(sbi, le32_to_cpu(entry->nid));
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+ META_NAT, true);
+ continue;
+ }
+
+ if (phase == 1) {
+ ra_node_page(sbi, nid);
continue;
}
@@ -723,14 +741,14 @@ next_step:
if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
continue;
- if (phase == 1) {
+ if (phase == 2) {
ra_node_page(sbi, dni.ino);
continue;
}
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
- if (phase == 2) {
+ if (phase == 3) {
inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode) || is_bad_inode(inode))
continue;
@@ -756,7 +774,7 @@ next_step:
continue;
}
- /* phase 3 */
+ /* phase 4 */
inode = find_gc_inode(gc_list, dni.ino);
if (inode) {
struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -789,7 +807,7 @@ next_step:
}
}
- if (++phase < 4)
+ if (++phase < 5)
goto next_step;
}
@@ -815,7 +833,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
struct blk_plug plug;
unsigned int segno = start_segno;
unsigned int end_segno = start_segno + sbi->segs_per_sec;
- int seg_freed = 0;
+ int sec_freed = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
@@ -834,8 +852,9 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
for (segno = start_segno; segno < end_segno; segno++) {
- if (get_valid_blocks(sbi, segno, 1) == 0)
- continue;
+ if (get_valid_blocks(sbi, segno, 1) == 0 ||
+ unlikely(f2fs_cp_error(sbi)))
+ goto next;
/* find segment summary of victim */
sum_page = find_get_page(META_MAPPING(sbi),
@@ -861,7 +880,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
gc_type);
stat_inc_seg_count(sbi, type, gc_type);
-
+next:
f2fs_put_page(sum_page, 0);
}
@@ -871,22 +890,20 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
blk_finish_plug(&plug);
- if (gc_type == FG_GC) {
- while (start_segno < end_segno)
- if (get_valid_blocks(sbi, start_segno++, 1) == 0)
- seg_freed++;
- }
+ if (gc_type == FG_GC &&
+ get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
+ sec_freed = 1;
stat_inc_call_count(sbi->stat_info);
- return seg_freed;
+ return sec_freed;
}
int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
{
unsigned int segno;
int gc_type = sync ? FG_GC : BG_GC;
- int sec_freed = 0, seg_freed;
+ int sec_freed = 0;
int ret = -EINVAL;
struct cp_control cpc;
struct gc_inode_list gc_list = {
@@ -905,7 +922,7 @@ gc_more:
goto stop;
}
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
gc_type = FG_GC;
/*
* If there is no victim and no prefree segment but still not
@@ -914,10 +931,14 @@ gc_more:
*/
if (__get_victim(sbi, &segno, gc_type) ||
prefree_segments(sbi)) {
- write_checkpoint(sbi, &cpc);
+ ret = write_checkpoint(sbi, &cpc);
+ if (ret)
+ goto stop;
segno = NULL_SEGNO;
- } else if (has_not_enough_free_secs(sbi, 0)) {
- write_checkpoint(sbi, &cpc);
+ } else if (has_not_enough_free_secs(sbi, 0, 0)) {
+ ret = write_checkpoint(sbi, &cpc);
+ if (ret)
+ goto stop;
}
}
@@ -925,20 +946,19 @@ gc_more:
goto stop;
ret = 0;
- seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
-
- if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
+ if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
+ gc_type == FG_GC)
sec_freed++;
if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO;
if (!sync) {
- if (has_not_enough_free_secs(sbi, sec_freed))
+ if (has_not_enough_free_secs(sbi, sec_freed, 0))
goto gc_more;
if (gc_type == FG_GC)
- write_checkpoint(sbi, &cpc);
+ ret = write_checkpoint(sbi, &cpc);
}
stop:
mutex_unlock(&sbi->gc_mutex);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index ccea8735de59..34234d84a38b 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -424,7 +424,7 @@ static int f2fs_add_inline_entries(struct inode *dir,
ino = le32_to_cpu(de->ino);
fake_mode = get_de_type(de) << S_SHIFT;
- err = f2fs_add_regular_entry(dir, &new_name, NULL,
+ err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
ino, fake_mode);
if (err)
goto punch_dentry_pages;
@@ -445,8 +445,8 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
struct f2fs_inline_dentry *backup_dentry;
int err;
- backup_dentry = f2fs_kmalloc(sizeof(struct f2fs_inline_dentry),
- GFP_F2FS_ZERO);
+ backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
+ sizeof(struct f2fs_inline_dentry), GFP_F2FS_ZERO);
if (!backup_dentry) {
f2fs_put_page(ipage, 1);
return -ENOMEM;
@@ -488,17 +488,17 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
}
-int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
- struct inode *inode, nid_t ino, umode_t mode)
+int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
+ struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage;
unsigned int bit_pos;
f2fs_hash_t name_hash;
- size_t namelen = name->len;
struct f2fs_inline_dentry *dentry_blk = NULL;
struct f2fs_dentry_ptr d;
- int slots = GET_DENTRY_SLOTS(namelen);
+ int slots = GET_DENTRY_SLOTS(new_name->len);
struct page *page = NULL;
int err = 0;
@@ -519,18 +519,21 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, name, ipage);
+ page = init_inode_metadata(inode, dir, new_name,
+ orig_name, ipage);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
}
+ if (f2fs_encrypted_inode(dir))
+ file_set_enc_name(inode);
}
f2fs_wait_on_page_writeback(ipage, NODE, true);
- name_hash = f2fs_dentry_hash(name);
+ name_hash = f2fs_dentry_hash(new_name);
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
- f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos);
+ f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
set_page_dirty(ipage);
@@ -563,7 +566,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
inline_dentry = inline_data_addr(page);
bit_pos = dentry - inline_dentry->dentry;
for (i = 0; i < slots; i++)
- test_and_clear_bit_le(bit_pos + i,
+ __clear_bit_le(bit_pos + i,
&inline_dentry->dentry_bitmap);
set_page_dirty(page);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 9ac5efc15347..d7369895a78a 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
+#include <linux/backing-dev.h>
#include <linux/writeback.h>
#include "f2fs.h"
@@ -234,6 +235,20 @@ bad_inode:
return ERR_PTR(ret);
}
+struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
+{
+ struct inode *inode;
+retry:
+ inode = f2fs_iget(sb, ino);
+ if (IS_ERR(inode)) {
+ if (PTR_ERR(inode) == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
+ }
+ return inode;
+}
+
int update_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_inode *ri;
@@ -354,7 +369,7 @@ void f2fs_evict_inode(struct inode *inode)
goto no_delete;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(FAULT_EVICT_INODE))
+ if (time_to_inject(sbi, FAULT_EVICT_INODE))
goto no_delete;
#endif
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 73fa356f8fbb..5625b879c98a 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -91,18 +91,23 @@ static int is_multimedia_file(const unsigned char *s, const char *sub)
{
size_t slen = strlen(s);
size_t sublen = strlen(sub);
+ int i;
/*
* filename format of multimedia file should be defined as:
- * "filename + '.' + extension".
+ * "filename + '.' + extension + (optional: '.' + temp extension)".
*/
if (slen < sublen + 2)
return 0;
- if (s[slen - sublen - 1] != '.')
- return 0;
+ for (i = 1; i < slen - sublen; i++) {
+ if (s[i] != '.')
+ continue;
+ if (!strncasecmp(s + i + 1, sub, sublen))
+ return 1;
+ }
- return !strncasecmp(s + slen - sublen, sub, sublen);
+ return 0;
}
/*
@@ -449,7 +454,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
ostr.name = sd->encrypted_path;
ostr.len = disk_link.len;
err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
- if (err < 0)
+ if (err)
goto err_out;
sd->len = cpu_to_le16(ostr.len);
@@ -1010,7 +1015,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
struct fscrypt_str cstr = FSTR_INIT(NULL, 0);
struct fscrypt_str pstr = FSTR_INIT(NULL, 0);
struct fscrypt_symlink_data *sd;
- loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
u32 max_size = inode->i_sb->s_blocksize;
int res;
@@ -1025,7 +1029,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
if (IS_ERR(cpage))
return ERR_CAST(cpage);
caddr = page_address(cpage);
- caddr[size] = 0;
/* Symlink is encrypted */
sd = (struct fscrypt_symlink_data *)caddr;
@@ -1048,7 +1051,7 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
goto errout;
res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
- if (res < 0)
+ if (res)
goto errout;
/* this is broken symlink case */
@@ -1060,7 +1063,7 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
paddr = pstr.name;
/* Null-terminate the name */
- paddr[res] = '\0';
+ paddr[pstr.len] = '\0';
put_page(cpage);
set_delayed_call(done, kfree_link, paddr);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index f75d197d5beb..883103593f33 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -54,8 +54,6 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
if (excess_cached_nats(sbi))
res = false;
- if (nm_i->nat_cnt > DEF_NAT_CACHE_THRESHOLD)
- res = false;
} else if (type == DIRTY_DENTS) {
if (sbi->sb->s_bdi->wb.dirty_exceeded)
return false;
@@ -1314,6 +1312,7 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *last_page = NULL;
bool marked = false;
nid_t ino = inode->i_ino;
+ int nwritten = 0;
if (atomic) {
last_page = last_fsync_dnode(sbi, ino);
@@ -1387,7 +1386,10 @@ continue_unlock:
unlock_page(page);
f2fs_put_page(last_page, 0);
break;
+ } else {
+ nwritten++;
}
+
if (page == last_page) {
f2fs_put_page(page, 0);
marked = true;
@@ -1409,6 +1411,9 @@ continue_unlock:
unlock_page(last_page);
goto retry;
}
+
+ if (nwritten)
+ f2fs_submit_merged_bio_cond(sbi, NULL, NULL, ino, NODE, WRITE);
return ret ? -EIO: 0;
}
@@ -1418,6 +1423,7 @@ int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
struct pagevec pvec;
int step = 0;
int nwritten = 0;
+ int ret = 0;
pagevec_init(&pvec, 0);
@@ -1438,7 +1444,8 @@ next_step:
if (unlikely(f2fs_cp_error(sbi))) {
pagevec_release(&pvec);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
/*
@@ -1489,6 +1496,8 @@ continue_unlock:
if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
unlock_page(page);
+ else
+ nwritten++;
if (--wbc->nr_to_write == 0)
break;
@@ -1506,7 +1515,10 @@ continue_unlock:
step++;
goto next_step;
}
- return nwritten;
+out:
+ if (nwritten)
+ f2fs_submit_merged_bio(sbi, NODE, WRITE);
+ return ret;
}
int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
@@ -1672,6 +1684,9 @@ const struct address_space_operations f2fs_node_aops = {
.set_page_dirty = f2fs_set_node_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
@@ -1838,7 +1853,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct free_nid *i = NULL;
retry:
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(FAULT_ALLOC_NID))
+ if (time_to_inject(sbi, FAULT_ALLOC_NID))
return false;
#endif
if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
@@ -2015,10 +2030,12 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL;
-
+retry:
ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
- if (!ipage)
- return -ENOMEM;
+ if (!ipage) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
/* Should not use this inode from free nid list */
remove_free_nid(NM_I(sbi), ino);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index fc7684554b1a..868bec65e51c 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -229,6 +229,37 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
f2fs_change_bit(block_off, nm_i->nat_bitmap);
}
+static inline nid_t ino_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.ino);
+}
+
+static inline nid_t nid_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.nid);
+}
+
+static inline unsigned int ofs_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ unsigned flag = le32_to_cpu(rn->footer.flag);
+ return flag >> OFFSET_BIT_SHIFT;
+}
+
+static inline __u64 cpver_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le64_to_cpu(rn->footer.cp_ver);
+}
+
+static inline block_t next_blkaddr_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.next_blkaddr);
+}
+
static inline void fill_node_footer(struct page *page, nid_t nid,
nid_t ino, unsigned int ofs, bool reset)
{
@@ -259,40 +290,30 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
struct f2fs_node *rn = F2FS_NODE(page);
+ size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
+ __u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver);
- rn->footer.cp_ver = ckpt->checkpoint_ver;
+ if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
+ __u64 crc = le32_to_cpu(*((__le32 *)
+ ((unsigned char *)ckpt + crc_offset)));
+ cp_ver |= (crc << 32);
+ }
+ rn->footer.cp_ver = cpu_to_le64(cp_ver);
rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
}
-static inline nid_t ino_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.ino);
-}
-
-static inline nid_t nid_of_node(struct page *node_page)
+static inline bool is_recoverable_dnode(struct page *page)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.nid);
-}
-
-static inline unsigned int ofs_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- unsigned flag = le32_to_cpu(rn->footer.flag);
- return flag >> OFFSET_BIT_SHIFT;
-}
-
-static inline unsigned long long cpver_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le64_to_cpu(rn->footer.cp_ver);
-}
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
+ size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
+ __u64 cp_ver = cur_cp_version(ckpt);
-static inline block_t next_blkaddr_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.next_blkaddr);
+ if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
+ __u64 crc = le32_to_cpu(*((__le32 *)
+ ((unsigned char *)ckpt + crc_offset)));
+ cp_ver |= (crc << 32);
+ }
+ return cpu_to_le64(cp_ver) == cpver_of_node(page);
}
/*
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 9e652d5a659b..2fc84a991325 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -68,15 +68,17 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
return NULL;
}
-static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
- struct inode *inode)
+static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
+ struct list_head *head, nid_t ino)
{
+ struct inode *inode;
struct fsync_inode_entry *entry;
- entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
- if (!entry)
- return NULL;
+ inode = f2fs_iget_retry(sbi->sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
entry->inode = inode;
list_add_tail(&entry->list, head);
@@ -96,48 +98,41 @@ static int recover_dentry(struct inode *inode, struct page *ipage,
struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de;
- struct qstr name;
+ struct fscrypt_name fname;
struct page *page;
struct inode *dir, *einode;
struct fsync_inode_entry *entry;
int err = 0;
+ char *name;
entry = get_fsync_inode(dir_list, pino);
if (!entry) {
- dir = f2fs_iget(inode->i_sb, pino);
- if (IS_ERR(dir)) {
- err = PTR_ERR(dir);
- goto out;
- }
-
- entry = add_fsync_inode(dir_list, dir);
- if (!entry) {
- err = -ENOMEM;
- iput(dir);
+ entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, pino);
+ if (IS_ERR(entry)) {
+ dir = ERR_CAST(entry);
+ err = PTR_ERR(entry);
goto out;
}
}
dir = entry->inode;
- if (file_enc_name(inode))
- return 0;
+ memset(&fname, 0, sizeof(struct fscrypt_name));
+ fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
+ fname.disk_name.name = raw_inode->i_name;
- name.len = le32_to_cpu(raw_inode->i_namelen);
- name.name = raw_inode->i_name;
-
- if (unlikely(name.len > F2FS_NAME_LEN)) {
+ if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
WARN_ON(1);
err = -ENAMETOOLONG;
goto out;
}
retry:
- de = f2fs_find_entry(dir, &name, &page);
+ de = __f2fs_find_entry(dir, &fname, &page);
if (de && inode->i_ino == le32_to_cpu(de->ino))
goto out_unmap_put;
if (de) {
- einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
+ einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
if (IS_ERR(einode)) {
WARN_ON(1);
err = PTR_ERR(einode);
@@ -156,18 +151,24 @@ retry:
} else if (IS_ERR(page)) {
err = PTR_ERR(page);
} else {
- err = __f2fs_add_link(dir, &name, inode,
+ err = __f2fs_do_add_link(dir, &fname, inode,
inode->i_ino, inode->i_mode);
}
+ if (err == -ENOMEM)
+ goto retry;
goto out;
out_unmap_put:
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
out:
+ if (file_enc_name(inode))
+ name = "<encrypted>";
+ else
+ name = raw_inode->i_name;
f2fs_msg(inode->i_sb, KERN_NOTICE,
"%s: ino = %x, name = %s, dir = %lx, err = %d",
- __func__, ino_of_node(ipage), raw_inode->i_name,
+ __func__, ino_of_node(ipage), name,
IS_ERR(dir) ? 0 : dir->i_ino, err);
return err;
}
@@ -223,9 +224,7 @@ static bool is_same_inode(struct inode *inode, struct page *ipage)
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
- struct inode *inode;
struct page *page = NULL;
block_t blkaddr;
int err = 0;
@@ -242,7 +241,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
page = get_tmp_page(sbi, blkaddr);
- if (cp_ver != cpver_of_node(page))
+ if (!is_recoverable_dnode(page))
break;
if (!is_fsync_dnode(page))
@@ -263,23 +262,15 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
* CP | dnode(F) | inode(DF)
* For this case, we should not give up now.
*/
- inode = f2fs_iget(sbi->sb, ino_of_node(page));
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
+ entry = add_fsync_inode(sbi, head, ino_of_node(page));
+ if (IS_ERR(entry)) {
+ err = PTR_ERR(entry);
if (err == -ENOENT) {
err = 0;
goto next;
}
break;
}
-
- /* add this fsync inode to the list */
- entry = add_fsync_inode(head, inode);
- if (!entry) {
- err = -ENOMEM;
- iput(inode);
- break;
- }
}
entry->blkaddr = blkaddr;
@@ -363,7 +354,7 @@ got_it:
if (ino != dn->inode->i_ino) {
/* Deallocate previous index in the node page */
- inode = f2fs_iget(sbi->sb, ino);
+ inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode))
return PTR_ERR(inode);
} else {
@@ -431,10 +422,15 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
end = start + ADDRS_PER_PAGE(page, inode);
set_new_dnode(&dn, inode, NULL, NULL, 0);
-
+retry_dn:
err = get_dnode_of_data(&dn, start, ALLOC_NODE);
- if (err)
+ if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry_dn;
+ }
goto out;
+ }
f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
@@ -485,11 +481,16 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
if (err)
goto err;
}
-
+retry_prev:
/* Check the previous node page having this index */
err = check_index_in_prev_nodes(sbi, dest, &dn);
- if (err)
+ if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry_prev;
+ }
goto err;
+ }
/* write dummy data page */
f2fs_replace_block(sbi, &dn, src, dest,
@@ -514,7 +515,6 @@ out:
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
struct list_head *dir_list)
{
- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page = NULL;
int err = 0;
@@ -534,7 +534,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
page = get_tmp_page(sbi, blkaddr);
- if (cp_ver != cpver_of_node(page)) {
+ if (!is_recoverable_dnode(page)) {
f2fs_put_page(page, 1);
break;
}
@@ -626,38 +626,20 @@ out:
}
clear_sbi_flag(sbi, SBI_POR_DOING);
- if (err) {
- bool invalidate = false;
-
- if (test_opt(sbi, LFS)) {
- update_meta_page(sbi, NULL, blkaddr);
- invalidate = true;
- } else if (discard_next_dnode(sbi, blkaddr)) {
- invalidate = true;
- }
-
- /* Flush all the NAT/SIT pages */
- while (get_pages(sbi, F2FS_DIRTY_META))
- sync_meta_pages(sbi, META, LONG_MAX);
+ if (err)
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ mutex_unlock(&sbi->cp_mutex);
- /* invalidate temporary meta page */
- if (invalidate)
- invalidate_mapping_pages(META_MAPPING(sbi),
- blkaddr, blkaddr);
+ /* let's drop all the directory inodes for clean checkpoint */
+ destroy_fsync_dnodes(&dir_list);
- set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
- mutex_unlock(&sbi->cp_mutex);
- } else if (need_writecp) {
+ if (!err && need_writecp) {
struct cp_control cpc = {
.reason = CP_RECOVERY,
};
- mutex_unlock(&sbi->cp_mutex);
err = write_checkpoint(sbi, &cpc);
- } else {
- mutex_unlock(&sbi->cp_mutex);
}
- destroy_fsync_dnodes(&dir_list);
kmem_cache_destroy(fsync_entry_slab);
return ret ? ret: err;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a46296f57b02..fc886f008449 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -26,6 +26,7 @@
#define __reverse_ffz(x) __reverse_ffs(~(x))
static struct kmem_cache *discard_entry_slab;
+static struct kmem_cache *bio_entry_slab;
static struct kmem_cache *sit_entry_set_slab;
static struct kmem_cache *inmem_entry_slab;
@@ -344,6 +345,11 @@ int commit_inmem_pages(struct inode *inode)
*/
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_CHECKPOINT))
+ f2fs_stop_checkpoint(sbi, false);
+#endif
+
if (!need)
return;
@@ -355,7 +361,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
*/
- if (has_not_enough_free_secs(sbi, 0)) {
+ if (has_not_enough_free_secs(sbi, 0, 0)) {
mutex_lock(&sbi->gc_mutex);
f2fs_gc(sbi, false);
}
@@ -580,6 +586,74 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
mutex_unlock(&dirty_i->seglist_lock);
}
+static struct bio_entry *__add_bio_entry(struct f2fs_sb_info *sbi,
+ struct bio *bio)
+{
+ struct list_head *wait_list = &(SM_I(sbi)->wait_list);
+ struct bio_entry *be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
+
+ INIT_LIST_HEAD(&be->list);
+ be->bio = bio;
+ init_completion(&be->event);
+ list_add_tail(&be->list, wait_list);
+
+ return be;
+}
+
+void f2fs_wait_all_discard_bio(struct f2fs_sb_info *sbi)
+{
+ struct list_head *wait_list = &(SM_I(sbi)->wait_list);
+ struct bio_entry *be, *tmp;
+
+ list_for_each_entry_safe(be, tmp, wait_list, list) {
+ struct bio *bio = be->bio;
+ int err;
+
+ wait_for_completion_io(&be->event);
+ err = be->error;
+ if (err == -EOPNOTSUPP)
+ err = 0;
+
+ if (err)
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Issue discard failed, ret: %d", err);
+
+ bio_put(bio);
+ list_del(&be->list);
+ kmem_cache_free(bio_entry_slab, be);
+ }
+}
+
+static void f2fs_submit_bio_wait_endio(struct bio *bio)
+{
+ struct bio_entry *be = (struct bio_entry *)bio->bi_private;
+
+ be->error = bio->bi_error;
+ complete(&be->event);
+}
+
+/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
+int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+ struct bio *bio = NULL;
+ int err;
+
+ err = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
+ &bio);
+ if (!err && bio) {
+ struct bio_entry *be = __add_bio_entry(sbi, bio);
+
+ bio->bi_private = be;
+ bio->bi_end_io = f2fs_submit_bio_wait_endio;
+ bio->bi_opf |= REQ_SYNC;
+ submit_bio(bio);
+ }
+
+ return err;
+}
+
static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
block_t blkstart, block_t blklen)
{
@@ -597,29 +671,7 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
sbi->discard_blks--;
}
trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
- return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
-}
-
-bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
-{
- int err = -EOPNOTSUPP;
-
- if (test_opt(sbi, DISCARD)) {
- struct seg_entry *se = get_seg_entry(sbi,
- GET_SEGNO(sbi, blkaddr));
- unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
-
- if (f2fs_test_bit(offset, se->discard_map))
- return false;
-
- err = f2fs_issue_discard(sbi, blkaddr, 1);
- }
-
- if (err) {
- update_meta_page(sbi, NULL, blkaddr);
- return true;
- }
- return false;
+ return __f2fs_issue_discard_async(sbi, start, len, GFP_NOFS, 0);
}
static void __add_discard_entry(struct f2fs_sb_info *sbi,
@@ -660,7 +712,7 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
bool force = (cpc->reason == CP_DISCARD);
int i;
- if (se->valid_blocks == max_blocks)
+ if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
return;
if (!force) {
@@ -719,11 +771,14 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct list_head *head = &(SM_I(sbi)->discard_list);
struct discard_entry *entry, *this;
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ struct blk_plug plug;
unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
unsigned int start = 0, end = -1;
unsigned int secno, start_segno;
bool force = (cpc->reason == CP_DISCARD);
+ blk_start_plug(&plug);
+
mutex_lock(&dirty_i->seglist_lock);
while (1) {
@@ -772,6 +827,8 @@ skip:
SM_I(sbi)->nr_discards -= entry->len;
kmem_cache_free(discard_entry_slab, entry);
}
+
+ blk_finish_plug(&plug);
}
static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
@@ -818,12 +875,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
if (del > 0) {
if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
f2fs_bug_on(sbi, 1);
- if (!f2fs_test_and_set_bit(offset, se->discard_map))
+ if (f2fs_discard_en(sbi) &&
+ !f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
} else {
if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
f2fs_bug_on(sbi, 1);
- if (f2fs_test_and_clear_bit(offset, se->discard_map))
+ if (f2fs_discard_en(sbi) &&
+ f2fs_test_and_clear_bit(offset, se->discard_map))
sbi->discard_blks++;
}
if (!f2fs_test_bit(offset, se->ckpt_valid_map))
@@ -1202,7 +1261,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
struct curseg_info *curseg = CURSEG_I(sbi, type);
const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
- if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
+ if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
return v_ops->get_victim(sbi,
&(curseg)->next_segno, BG_GC, type, SSR);
@@ -1277,6 +1336,12 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
if (end <= MAIN_BLKADDR(sbi))
goto out;
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Found FS corruption, run fsck to fix.");
+ goto out;
+ }
+
/* start/end segment number in main_area */
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
@@ -1301,6 +1366,10 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
mutex_lock(&sbi->gc_mutex);
err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
+ if (err)
+ break;
+
+ schedule();
}
out:
range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
@@ -1391,7 +1460,7 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
/* direct_io'ed data is aligned to the segment for better performance */
if (direct_io && curseg->next_blkoff &&
- !has_not_enough_free_secs(sbi, 0))
+ !has_not_enough_free_secs(sbi, 0, 0))
__allocate_new_segments(sbi, type);
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
@@ -1589,11 +1658,9 @@ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
{
struct page *cpage;
- if (blkaddr == NEW_ADDR)
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
return;
- f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
-
cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
if (cpage) {
f2fs_wait_on_page_writeback(cpage, DATA, true);
@@ -1739,7 +1806,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
int type = CURSEG_HOT_DATA;
int err;
- if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
+ if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
int npages = npages_for_summary_flush(sbi, true);
if (npages >= 2)
@@ -1836,7 +1903,7 @@ static void write_normal_summaries(struct f2fs_sb_info *sbi,
void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
{
- if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
+ if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
write_compacted_summaries(sbi, start_blk);
else
write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
@@ -2127,12 +2194,16 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
sit_i->sentries[start].ckpt_valid_map
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
- sit_i->sentries[start].discard_map
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map ||
- !sit_i->sentries[start].ckpt_valid_map ||
- !sit_i->sentries[start].discard_map)
+ !sit_i->sentries[start].ckpt_valid_map)
return -ENOMEM;
+
+ if (f2fs_discard_en(sbi)) {
+ sit_i->sentries[start].discard_map
+ = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ if (!sit_i->sentries[start].discard_map)
+ return -ENOMEM;
+ }
}
sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
@@ -2239,6 +2310,8 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
struct f2fs_journal *journal = curseg->journal;
+ struct seg_entry *se;
+ struct f2fs_sit_entry sit;
int sit_blk_cnt = SIT_BLK_CNT(sbi);
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
@@ -2251,41 +2324,58 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
end = (start_blk + readed) * sit_i->sents_per_block;
for (; start < end && start < MAIN_SEGS(sbi); start++) {
- struct seg_entry *se = &sit_i->sentries[start];
struct f2fs_sit_block *sit_blk;
- struct f2fs_sit_entry sit;
struct page *page;
- down_read(&curseg->journal_rwsem);
- for (i = 0; i < sits_in_cursum(journal); i++) {
- if (le32_to_cpu(segno_in_journal(journal, i))
- == start) {
- sit = sit_in_journal(journal, i);
- up_read(&curseg->journal_rwsem);
- goto got_it;
- }
- }
- up_read(&curseg->journal_rwsem);
-
+ se = &sit_i->sentries[start];
page = get_current_sit_page(sbi, start);
sit_blk = (struct f2fs_sit_block *)page_address(page);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
f2fs_put_page(page, 1);
-got_it:
+
check_block_count(sbi, start, &sit);
seg_info_from_raw_sit(se, &sit);
/* build discard map only one time */
- memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
- sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
-
- if (sbi->segs_per_sec > 1) {
- struct sec_entry *e = get_sec_entry(sbi, start);
- e->valid_blocks += se->valid_blocks;
+ if (f2fs_discard_en(sbi)) {
+ memcpy(se->discard_map, se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks += sbi->blocks_per_seg -
+ se->valid_blocks;
}
+
+ if (sbi->segs_per_sec > 1)
+ get_sec_entry(sbi, start)->valid_blocks +=
+ se->valid_blocks;
}
start_blk += readed;
} while (start_blk < sit_blk_cnt);
+
+ down_read(&curseg->journal_rwsem);
+ for (i = 0; i < sits_in_cursum(journal); i++) {
+ unsigned int old_valid_blocks;
+
+ start = le32_to_cpu(segno_in_journal(journal, i));
+ se = &sit_i->sentries[start];
+ sit = sit_in_journal(journal, i);
+
+ old_valid_blocks = se->valid_blocks;
+
+ check_block_count(sbi, start, &sit);
+ seg_info_from_raw_sit(se, &sit);
+
+ if (f2fs_discard_en(sbi)) {
+ memcpy(se->discard_map, se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks += old_valid_blocks -
+ se->valid_blocks;
+ }
+
+ if (sbi->segs_per_sec > 1)
+ get_sec_entry(sbi, start)->valid_blocks +=
+ se->valid_blocks - old_valid_blocks;
+ }
+ up_read(&curseg->journal_rwsem);
}
static void init_free_segmap(struct f2fs_sb_info *sbi)
@@ -2427,6 +2517,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
INIT_LIST_HEAD(&sm_info->discard_list);
+ INIT_LIST_HEAD(&sm_info->wait_list);
sm_info->nr_discards = 0;
sm_info->max_discards = 0;
@@ -2570,10 +2661,15 @@ int __init create_segment_manager_caches(void)
if (!discard_entry_slab)
goto fail;
+ bio_entry_slab = f2fs_kmem_cache_create("bio_entry",
+ sizeof(struct bio_entry));
+ if (!bio_entry_slab)
+ goto destroy_discard_entry;
+
sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
sizeof(struct sit_entry_set));
if (!sit_entry_set_slab)
- goto destory_discard_entry;
+ goto destroy_bio_entry;
inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
sizeof(struct inmem_pages));
@@ -2583,7 +2679,9 @@ int __init create_segment_manager_caches(void)
destroy_sit_entry_set:
kmem_cache_destroy(sit_entry_set_slab);
-destory_discard_entry:
+destroy_bio_entry:
+ kmem_cache_destroy(bio_entry_slab);
+destroy_discard_entry:
kmem_cache_destroy(discard_entry_slab);
fail:
return -ENOMEM;
@@ -2592,6 +2690,7 @@ fail:
void destroy_segment_manager_caches(void)
{
kmem_cache_destroy(sit_entry_set_slab);
+ kmem_cache_destroy(bio_entry_slab);
kmem_cache_destroy(discard_entry_slab);
kmem_cache_destroy(inmem_entry_slab);
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index b33f73ec60a4..fecb856ad874 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -479,7 +479,8 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
reserved_sections(sbi) + 1);
}
-static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
+static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
+ int freed, int needed)
{
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
@@ -489,8 +490,8 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
- return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
- reserved_sections(sbi));
+ return (free_sections(sbi) + freed) <=
+ (node_secs + 2 * dent_secs + reserved_sections(sbi) + needed);
}
static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
@@ -587,8 +588,8 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{
- f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
- || blk_addr >= MAX_BLKADDR(sbi));
+ BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
+ || blk_addr >= MAX_BLKADDR(sbi));
}
/*
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 7f863a645ab1..6132b4ce4e4c 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -40,7 +40,6 @@ static struct kmem_cache *f2fs_inode_cachep;
static struct kset *f2fs_kset;
#ifdef CONFIG_F2FS_FAULT_INJECTION
-struct f2fs_fault_info f2fs_fault;
char *fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
@@ -50,16 +49,21 @@ char *fault_name[FAULT_MAX] = {
[FAULT_BLOCK] = "no more block",
[FAULT_DIR_DEPTH] = "too big dir depth",
[FAULT_EVICT_INODE] = "evict_inode fail",
+ [FAULT_IO] = "IO error",
+ [FAULT_CHECKPOINT] = "checkpoint error",
};
-static void f2fs_build_fault_attr(unsigned int rate)
+static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
+ unsigned int rate)
{
+ struct f2fs_fault_info *ffi = &sbi->fault_info;
+
if (rate) {
- atomic_set(&f2fs_fault.inject_ops, 0);
- f2fs_fault.inject_rate = rate;
- f2fs_fault.inject_type = (1 << FAULT_MAX) - 1;
+ atomic_set(&ffi->inject_ops, 0);
+ ffi->inject_rate = rate;
+ ffi->inject_type = (1 << FAULT_MAX) - 1;
} else {
- memset(&f2fs_fault, 0, sizeof(struct f2fs_fault_info));
+ memset(ffi, 0, sizeof(struct f2fs_fault_info));
}
}
#endif
@@ -87,6 +91,7 @@ enum {
Opt_inline_xattr,
Opt_inline_data,
Opt_inline_dentry,
+ Opt_noinline_dentry,
Opt_flush_merge,
Opt_noflush_merge,
Opt_nobarrier,
@@ -118,6 +123,7 @@ static match_table_t f2fs_tokens = {
{Opt_inline_xattr, "inline_xattr"},
{Opt_inline_data, "inline_data"},
{Opt_inline_dentry, "inline_dentry"},
+ {Opt_noinline_dentry, "noinline_dentry"},
{Opt_flush_merge, "flush_merge"},
{Opt_noflush_merge, "noflush_merge"},
{Opt_nobarrier, "nobarrier"},
@@ -167,7 +173,7 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
#ifdef CONFIG_F2FS_FAULT_INJECTION
else if (struct_type == FAULT_INFO_RATE ||
struct_type == FAULT_INFO_TYPE)
- return (unsigned char *)&f2fs_fault;
+ return (unsigned char *)&sbi->fault_info;
#endif
return NULL;
}
@@ -312,6 +318,10 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(dirty_nats_ratio),
ATTR_LIST(cp_interval),
ATTR_LIST(idle_interval),
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ ATTR_LIST(inject_rate),
+ ATTR_LIST(inject_type),
+#endif
ATTR_LIST(lifetime_write_kbytes),
NULL,
};
@@ -327,22 +337,6 @@ static struct kobj_type f2fs_ktype = {
.release = f2fs_sb_release,
};
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-/* sysfs for f2fs fault injection */
-static struct kobject f2fs_fault_inject;
-
-static struct attribute *f2fs_fault_attrs[] = {
- ATTR_LIST(inject_rate),
- ATTR_LIST(inject_type),
- NULL
-};
-
-static struct kobj_type f2fs_fault_ktype = {
- .default_attrs = f2fs_fault_attrs,
- .sysfs_ops = &f2fs_attr_ops,
-};
-#endif
-
void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
{
struct va_format vaf;
@@ -370,10 +364,6 @@ static int parse_options(struct super_block *sb, char *options)
char *p, *name;
int arg = 0;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- f2fs_build_fault_attr(0);
-#endif
-
if (!options)
return 0;
@@ -488,6 +478,9 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_inline_dentry:
set_opt(sbi, INLINE_DENTRY);
break;
+ case Opt_noinline_dentry:
+ clear_opt(sbi, INLINE_DENTRY);
+ break;
case Opt_flush_merge:
set_opt(sbi, FLUSH_MERGE);
break;
@@ -533,7 +526,7 @@ static int parse_options(struct super_block *sb, char *options)
if (args->from && match_int(args, &arg))
return -EINVAL;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- f2fs_build_fault_attr(arg);
+ f2fs_build_fault_attr(sbi, arg);
#else
f2fs_msg(sb, KERN_INFO,
"FAULT_INJECTION was not selected");
@@ -730,7 +723,7 @@ static void f2fs_put_super(struct super_block *sb)
* clean checkpoint again.
*/
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
- !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
@@ -878,6 +871,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",noinline_data");
if (test_opt(sbi, INLINE_DENTRY))
seq_puts(seq, ",inline_dentry");
+ else
+ seq_puts(seq, ",noinline_dentry");
if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
seq_puts(seq, ",flush_merge");
if (test_opt(sbi, NOBARRIER))
@@ -946,7 +941,7 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%d|%-3u|", se->type,
get_valid_blocks(sbi, i, 1));
for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
- seq_printf(seq, "%x ", se->cur_valid_map[j]);
+ seq_printf(seq, " %.2x", se->cur_valid_map[j]);
seq_putc(seq, '\n');
}
return 0;
@@ -975,6 +970,7 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_DATA);
+ set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
sbi->sb->s_flags |= MS_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
@@ -991,6 +987,10 @@ static void default_options(struct f2fs_sb_info *sbi)
#ifdef CONFIG_F2FS_FS_POSIX_ACL
set_opt(sbi, POSIX_ACL);
#endif
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ f2fs_build_fault_attr(sbi, 0);
+#endif
}
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
@@ -1001,6 +1001,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
bool need_restart_gc = false;
bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct f2fs_fault_info ffi = sbi->fault_info;
+#endif
/*
* Save the old mount options in case we
@@ -1096,6 +1099,9 @@ restore_gc:
restore_opts:
sbi->mount_opt = org_mount_opt;
sbi->active_logs = active_logs;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ sbi->fault_info = ffi;
+#endif
return err;
}
@@ -1469,6 +1475,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
mutex_init(&sbi->umount_mutex);
mutex_init(&sbi->wio_mutex[NODE]);
mutex_init(&sbi->wio_mutex[DATA]);
+ spin_lock_init(&sbi->cp_lock);
#ifdef CONFIG_F2FS_FS_ENCRYPTION
memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
@@ -1810,7 +1817,7 @@ try_onemore:
* previous checkpoint was not done by clean system shutdown.
*/
if (bdev_read_only(sb->s_bdev) &&
- !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
err = -EROFS;
goto free_kobj;
}
@@ -1818,6 +1825,9 @@ try_onemore:
if (need_fsck)
set_sbi_flag(sbi, SBI_NEED_FSCK);
+ if (!retry)
+ goto skip_recovery;
+
err = recover_fsync_data(sbi, false);
if (err < 0) {
need_fsck = true;
@@ -1835,7 +1845,7 @@ try_onemore:
goto free_kobj;
}
}
-
+skip_recovery:
/* recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING);
@@ -1879,7 +1889,9 @@ free_root_inode:
dput(sb->s_root);
sb->s_root = NULL;
free_node_inode:
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
mutex_lock(&sbi->umount_mutex);
+ release_ino_entry(sbi, true);
f2fs_leave_shrinker(sbi);
iput(sbi->node_inode);
mutex_unlock(&sbi->umount_mutex);
@@ -1978,16 +1990,6 @@ static int __init init_f2fs_fs(void)
err = -ENOMEM;
goto free_extent_cache;
}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- f2fs_fault_inject.kset = f2fs_kset;
- f2fs_build_fault_attr(0);
- err = kobject_init_and_add(&f2fs_fault_inject, &f2fs_fault_ktype,
- NULL, "fault_injection");
- if (err) {
- f2fs_fault_inject.kset = NULL;
- goto free_kset;
- }
-#endif
err = register_shrinker(&f2fs_shrinker_info);
if (err)
goto free_kset;
@@ -2006,10 +2008,6 @@ free_filesystem:
free_shrinker:
unregister_shrinker(&f2fs_shrinker_info);
free_kset:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (f2fs_fault_inject.kset)
- kobject_put(&f2fs_fault_inject);
-#endif
kset_unregister(f2fs_kset);
free_extent_cache:
destroy_extent_cache();
@@ -2031,9 +2029,6 @@ static void __exit exit_f2fs_fs(void)
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
unregister_shrinker(&f2fs_shrinker_info);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- kobject_put(&f2fs_fault_inject);
-#endif
kset_unregister(f2fs_kset);
destroy_extent_cache();
destroy_checkpoint_caches();
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index c8898b5148eb..1f74876233b6 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -217,18 +217,20 @@ static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index,
return entry;
}
-static void *read_all_xattrs(struct inode *inode, struct page *ipage)
+static int read_all_xattrs(struct inode *inode, struct page *ipage,
+ void **base_addr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_header *header;
size_t size = PAGE_SIZE, inline_size = 0;
void *txattr_addr;
+ int err;
inline_size = inline_xattr_size(inode);
txattr_addr = kzalloc(inline_size + size, GFP_F2FS_ZERO);
if (!txattr_addr)
- return NULL;
+ return -ENOMEM;
/* read from inline xattr */
if (inline_size) {
@@ -239,8 +241,10 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
inline_addr = inline_xattr_addr(ipage);
} else {
page = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
goto fail;
+ }
inline_addr = inline_xattr_addr(page);
}
memcpy(txattr_addr, inline_addr, inline_size);
@@ -254,8 +258,10 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
/* The inode already has an extended attribute block. */
xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
- if (IS_ERR(xpage))
+ if (IS_ERR(xpage)) {
+ err = PTR_ERR(xpage);
goto fail;
+ }
xattr_addr = page_address(xpage);
memcpy(txattr_addr + inline_size, xattr_addr, PAGE_SIZE);
@@ -269,10 +275,11 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
header->h_refcount = cpu_to_le32(1);
}
- return txattr_addr;
+ *base_addr = txattr_addr;
+ return 0;
fail:
kzfree(txattr_addr);
- return NULL;
+ return err;
}
static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
@@ -366,9 +373,9 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (len > F2FS_NAME_LEN)
return -ERANGE;
- base_addr = read_all_xattrs(inode, ipage);
- if (!base_addr)
- return -ENOMEM;
+ error = read_all_xattrs(inode, ipage, &base_addr);
+ if (error)
+ return error;
entry = __find_xattr(base_addr, index, len, name);
if (IS_XATTR_LAST_ENTRY(entry)) {
@@ -402,9 +409,9 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
int error = 0;
size_t rest = buffer_size;
- base_addr = read_all_xattrs(inode, NULL);
- if (!base_addr)
- return -ENOMEM;
+ error = read_all_xattrs(inode, NULL, &base_addr);
+ if (error)
+ return error;
list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler =
@@ -463,9 +470,9 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (size > MAX_VALUE_LEN(inode))
return -E2BIG;
- base_addr = read_all_xattrs(inode, ipage);
- if (!base_addr)
- return -ENOMEM;
+ error = read_all_xattrs(inode, ipage, &base_addr);
+ if (error)
+ return error;
/* find entry with wanted name. */
here = __find_xattr(base_addr, index, len, name);
@@ -548,6 +555,8 @@ static int __f2fs_setxattr(struct inode *inode, int index,
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
f2fs_mark_inode_dirty_sync(inode);
+ if (!error && S_ISDIR(inode->i_mode))
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
exit:
kzfree(base_addr);
return error;
diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig
index 1b2f6c2c3aaf..76f09ce7e5b2 100644
--- a/fs/fuse/Kconfig
+++ b/fs/fuse/Kconfig
@@ -1,5 +1,6 @@
config FUSE_FS
tristate "FUSE (Filesystem in Userspace) support"
+ select FS_POSIX_ACL
help
With FUSE it is possible to implement a fully functional filesystem
in a userspace program.
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index e95eeb445e58..60da84a86dab 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
-fuse-objs := dev.o dir.o file.o inode.o control.o
+fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o
diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c
new file mode 100644
index 000000000000..ec85765502f1
--- /dev/null
+++ b/fs/fuse/acl.c
@@ -0,0 +1,99 @@
+/*
+ * FUSE: Filesystem in Userspace
+ * Copyright (C) 2016 Canonical Ltd. <seth.forshee@canonical.com>
+ *
+ * This program can be distributed under the terms of the GNU GPL.
+ * See the file COPYING.
+ */
+
+#include "fuse_i.h"
+
+#include <linux/posix_acl.h>
+#include <linux/posix_acl_xattr.h>
+
+struct posix_acl *fuse_get_acl(struct inode *inode, int type)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ int size;
+ const char *name;
+ void *value = NULL;
+ struct posix_acl *acl;
+
+ if (!fc->posix_acl || fc->no_getxattr)
+ return NULL;
+
+ if (type == ACL_TYPE_ACCESS)
+ name = XATTR_NAME_POSIX_ACL_ACCESS;
+ else if (type == ACL_TYPE_DEFAULT)
+ name = XATTR_NAME_POSIX_ACL_DEFAULT;
+ else
+ return ERR_PTR(-EOPNOTSUPP);
+
+ value = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!value)
+ return ERR_PTR(-ENOMEM);
+ size = fuse_getxattr(inode, name, value, PAGE_SIZE);
+ if (size > 0)
+ acl = posix_acl_from_xattr(&init_user_ns, value, size);
+ else if ((size == 0) || (size == -ENODATA) ||
+ (size == -EOPNOTSUPP && fc->no_getxattr))
+ acl = NULL;
+ else if (size == -ERANGE)
+ acl = ERR_PTR(-E2BIG);
+ else
+ acl = ERR_PTR(size);
+
+ kfree(value);
+ return acl;
+}
+
+int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ const char *name;
+ int ret;
+
+ if (!fc->posix_acl || fc->no_setxattr)
+ return -EOPNOTSUPP;
+
+ if (type == ACL_TYPE_ACCESS)
+ name = XATTR_NAME_POSIX_ACL_ACCESS;
+ else if (type == ACL_TYPE_DEFAULT)
+ name = XATTR_NAME_POSIX_ACL_DEFAULT;
+ else
+ return -EINVAL;
+
+ if (acl) {
+ /*
+ * Fuse userspace is responsible for updating access
+ * permissions in the inode, if needed. fuse_setxattr
+ * invalidates the inode attributes, which will force
+ * them to be refreshed the next time they are used,
+ * and it also updates i_ctime.
+ */
+ size_t size = posix_acl_xattr_size(acl->a_count);
+ void *value;
+
+ if (size > PAGE_SIZE)
+ return -E2BIG;
+
+ value = kmalloc(size, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+
+ ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
+ if (ret < 0) {
+ kfree(value);
+ return ret;
+ }
+
+ ret = fuse_setxattr(inode, name, value, size, 0);
+ kfree(value);
+ } else {
+ ret = fuse_removexattr(inode, name);
+ }
+ forget_all_cached_acls(inode);
+ fuse_invalidate_attr(inode);
+
+ return ret;
+}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a94d2ed81ab4..70ea57c7b6bb 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -728,7 +728,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
struct pipe_buffer *buf = cs->pipebufs;
if (!cs->write) {
- err = buf->ops->confirm(cs->pipe, buf);
+ err = pipe_buf_confirm(cs->pipe, buf);
if (err)
return err;
@@ -767,7 +767,6 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
cs->len = err;
cs->offset = off;
cs->pg = page;
- cs->offset = off;
iov_iter_advance(cs->iter, err);
}
@@ -828,7 +827,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
fuse_copy_finish(cs);
- err = buf->ops->confirm(cs->pipe, buf);
+ err = pipe_buf_confirm(cs->pipe, buf);
if (err)
return err;
@@ -841,7 +840,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
if (cs->len != PAGE_SIZE)
goto out_fallback;
- if (buf->ops->steal(cs->pipe, buf) != 0)
+ if (pipe_buf_steal(cs->pipe, buf) != 0)
goto out_fallback;
newpage = buf->page;
@@ -1342,9 +1341,8 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
- int ret;
+ int total, ret;
int page_nr = 0;
- int do_wakeup = 0;
struct pipe_buffer *bufs;
struct fuse_copy_state cs;
struct fuse_dev *fud = fuse_get_dev(in);
@@ -1363,52 +1361,23 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
if (ret < 0)
goto out;
- ret = 0;
- pipe_lock(pipe);
-
- if (!pipe->readers) {
- send_sig(SIGPIPE, current, 0);
- if (!ret)
- ret = -EPIPE;
- goto out_unlock;
- }
-
if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
ret = -EIO;
- goto out_unlock;
+ goto out;
}
- while (page_nr < cs.nr_segs) {
- int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- struct pipe_buffer *buf = pipe->bufs + newbuf;
-
- buf->page = bufs[page_nr].page;
- buf->offset = bufs[page_nr].offset;
- buf->len = bufs[page_nr].len;
+ for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
/*
* Need to be careful about this. Having buf->ops in module
* code can Oops if the buffer persists after module unload.
*/
- buf->ops = &nosteal_pipe_buf_ops;
-
- pipe->nrbufs++;
- page_nr++;
- ret += buf->len;
-
- if (pipe->files)
- do_wakeup = 1;
- }
-
-out_unlock:
- pipe_unlock(pipe);
-
- if (do_wakeup) {
- smp_mb();
- if (waitqueue_active(&pipe->wait))
- wake_up_interruptible(&pipe->wait);
- kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ bufs[page_nr].ops = &nosteal_pipe_buf_ops;
+ ret = add_to_pipe(pipe, &bufs[page_nr++]);
+ if (unlikely(ret < 0))
+ break;
}
-
+ if (total)
+ ret = total;
out:
for (; page_nr < cs.nr_segs; page_nr++)
put_page(bufs[page_nr].page);
@@ -1993,7 +1962,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
} else {
- ibuf->ops->get(pipe, ibuf);
+ pipe_buf_get(pipe, ibuf);
*obuf = *ibuf;
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
obuf->len = rem;
@@ -2015,10 +1984,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
ret = fuse_dev_do_write(fud, &cs, len);
- for (idx = 0; idx < nbuf; idx++) {
- struct pipe_buffer *buf = &bufs[idx];
- buf->ops->release(pipe, buf);
- }
+ for (idx = 0; idx < nbuf; idx++)
+ pipe_buf_release(pipe, &bufs[idx]);
+
out:
kfree(bufs);
return ret;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c47b7780ce37..f7c84ab835ca 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -13,6 +13,8 @@
#include <linux/sched.h>
#include <linux/namei.h>
#include <linux/slab.h>
+#include <linux/xattr.h>
+#include <linux/posix_acl.h>
static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx)
{
@@ -37,47 +39,39 @@ static void fuse_advise_use_readdirplus(struct inode *dir)
set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
}
-#if BITS_PER_LONG >= 64
+union fuse_dentry {
+ u64 time;
+ struct rcu_head rcu;
+};
+
static inline void fuse_dentry_settime(struct dentry *entry, u64 time)
{
- entry->d_time = time;
+ ((union fuse_dentry *) entry->d_fsdata)->time = time;
}
static inline u64 fuse_dentry_time(struct dentry *entry)
{
- return entry->d_time;
-}
-#else
-/*
- * On 32 bit archs store the high 32 bits of time in d_fsdata
- */
-static void fuse_dentry_settime(struct dentry *entry, u64 time)
-{
- entry->d_time = time;
- entry->d_fsdata = (void *) (unsigned long) (time >> 32);
-}
-
-static u64 fuse_dentry_time(struct dentry *entry)
-{
- return (u64) entry->d_time +
- ((u64) (unsigned long) entry->d_fsdata << 32);
+ return ((union fuse_dentry *) entry->d_fsdata)->time;
}
-#endif
/*
* FUSE caches dentries and attributes with separate timeout. The
* time in jiffies until the dentry/attributes are valid is stored in
- * dentry->d_time and fuse_inode->i_time respectively.
+ * dentry->d_fsdata and fuse_inode->i_time respectively.
*/
/*
* Calculate the time in jiffies until a dentry/attributes are valid
*/
-static u64 time_to_jiffies(unsigned long sec, unsigned long nsec)
+static u64 time_to_jiffies(u64 sec, u32 nsec)
{
if (sec || nsec) {
- struct timespec ts = {sec, nsec};
- return get_jiffies_64() + timespec_to_jiffies(&ts);
+ struct timespec64 ts = {
+ sec,
+ max_t(u32, nsec, NSEC_PER_SEC - 1)
+ };
+
+ return get_jiffies_64() + timespec64_to_jiffies(&ts);
} else
return 0;
}
@@ -243,6 +237,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
if (ret || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
goto invalid;
+ forget_all_cached_acls(inode);
fuse_change_attributes(inode, &outarg.attr,
entry_attr_timeout(&outarg),
attr_version);
@@ -272,8 +267,23 @@ static int invalid_nodeid(u64 nodeid)
return !nodeid || nodeid == FUSE_ROOT_ID;
}
+static int fuse_dentry_init(struct dentry *dentry)
+{
+ dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry), GFP_KERNEL);
+
+ return dentry->d_fsdata ? 0 : -ENOMEM;
+}
+static void fuse_dentry_release(struct dentry *dentry)
+{
+ union fuse_dentry *fd = dentry->d_fsdata;
+
+ kfree_rcu(fd, rcu);
+}
+
const struct dentry_operations fuse_dentry_operations = {
.d_revalidate = fuse_dentry_revalidate,
+ .d_init = fuse_dentry_init,
+ .d_release = fuse_dentry_release,
};
int fuse_valid_type(int m)
@@ -634,7 +644,7 @@ static int fuse_symlink(struct inode *dir, struct dentry *entry,
return create_new_entry(fc, &args, dir, entry, S_IFLNK);
}
-static inline void fuse_update_ctime(struct inode *inode)
+void fuse_update_ctime(struct inode *inode)
{
if (!IS_NOCMTIME(inode)) {
inode->i_ctime = current_fs_time(inode->i_sb);
@@ -917,6 +927,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
if (time_before64(fi->i_time, get_jiffies_64())) {
r = true;
+ forget_all_cached_acls(inode);
err = fuse_do_getattr(inode, stat, file);
} else {
r = false;
@@ -1017,7 +1028,7 @@ int fuse_allow_current_process(struct fuse_conn *fc)
{
const struct cred *cred;
- if (fc->flags & FUSE_ALLOW_OTHER)
+ if (fc->allow_other)
return 1;
cred = current_cred();
@@ -1064,6 +1075,7 @@ static int fuse_perm_getattr(struct inode *inode, int mask)
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
+ forget_all_cached_acls(inode);
return fuse_do_getattr(inode, NULL, NULL);
}
@@ -1092,7 +1104,7 @@ static int fuse_permission(struct inode *inode, int mask)
/*
* If attributes are needed, refresh them before proceeding
*/
- if ((fc->flags & FUSE_DEFAULT_PERMISSIONS) ||
+ if (fc->default_permissions ||
((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
struct fuse_inode *fi = get_fuse_inode(inode);
@@ -1105,7 +1117,7 @@ static int fuse_permission(struct inode *inode, int mask)
}
}
- if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
+ if (fc->default_permissions) {
err = generic_permission(inode, mask);
/* If permission is denied, try to refresh file
@@ -1233,6 +1245,7 @@ retry:
fi->nlookup++;
spin_unlock(&fc->lock);
+ forget_all_cached_acls(inode);
fuse_change_attributes(inode, &o->attr,
entry_attr_timeout(o),
attr_version);
@@ -1605,7 +1618,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
int err;
bool trust_local_cmtime = is_wb && S_ISREG(inode->i_mode);
- if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
+ if (!fc->default_permissions)
attr->ia_valid |= ATTR_FORCE;
err = inode_change_ok(inode, attr);
@@ -1702,172 +1715,75 @@ error:
static int fuse_setattr(struct dentry *entry, struct iattr *attr)
{
struct inode *inode = d_inode(entry);
-
- if (!fuse_allow_current_process(get_fuse_conn(inode)))
- return -EACCES;
-
- if (attr->ia_valid & ATTR_FILE)
- return fuse_do_setattr(inode, attr, attr->ia_file);
- else
- return fuse_do_setattr(inode, attr, NULL);
-}
-
-static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
- struct kstat *stat)
-{
- struct inode *inode = d_inode(entry);
struct fuse_conn *fc = get_fuse_conn(inode);
+ struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
+ int ret;
- if (!fuse_allow_current_process(fc))
+ if (!fuse_allow_current_process(get_fuse_conn(inode)))
return -EACCES;
- return fuse_update_attributes(inode, stat, NULL, NULL);
-}
-
-static int fuse_setxattr(struct dentry *unused, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
-{
- struct fuse_conn *fc = get_fuse_conn(inode);
- FUSE_ARGS(args);
- struct fuse_setxattr_in inarg;
- int err;
-
- if (fc->no_setxattr)
- return -EOPNOTSUPP;
+ if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
+ attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
+ ATTR_MODE);
- memset(&inarg, 0, sizeof(inarg));
- inarg.size = size;
- inarg.flags = flags;
- args.in.h.opcode = FUSE_SETXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 3;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = strlen(name) + 1;
- args.in.args[1].value = name;
- args.in.args[2].size = size;
- args.in.args[2].value = value;
- err = fuse_simple_request(fc, &args);
- if (err == -ENOSYS) {
- fc->no_setxattr = 1;
- err = -EOPNOTSUPP;
- }
- if (!err) {
- fuse_invalidate_attr(inode);
- fuse_update_ctime(inode);
+ /*
+ * The only sane way to reliably kill suid/sgid is to do it in
+ * the userspace filesystem
+ *
+ * This should be done on write(), truncate() and chown().
+ */
+ if (!fc->handle_killpriv) {
+ int kill;
+
+ /*
+ * ia_mode calculation may have used stale i_mode.
+ * Refresh and recalculate.
+ */
+ ret = fuse_do_getattr(inode, NULL, file);
+ if (ret)
+ return ret;
+
+ attr->ia_mode = inode->i_mode;
+ kill = should_remove_suid(entry);
+ if (kill & ATTR_KILL_SUID) {
+ attr->ia_valid |= ATTR_MODE;
+ attr->ia_mode &= ~S_ISUID;
+ }
+ if (kill & ATTR_KILL_SGID) {
+ attr->ia_valid |= ATTR_MODE;
+ attr->ia_mode &= ~S_ISGID;
+ }
+ }
}
- return err;
-}
-
-static ssize_t fuse_getxattr(struct dentry *entry, struct inode *inode,
- const char *name, void *value, size_t size)
-{
- struct fuse_conn *fc = get_fuse_conn(inode);
- FUSE_ARGS(args);
- struct fuse_getxattr_in inarg;
- struct fuse_getxattr_out outarg;
- ssize_t ret;
+ if (!attr->ia_valid)
+ return 0;
- if (fc->no_getxattr)
- return -EOPNOTSUPP;
+ ret = fuse_do_setattr(inode, attr, file);
+ if (!ret) {
+ /*
+ * If filesystem supports acls it may have updated acl xattrs in
+ * the filesystem, so forget cached acls for the inode.
+ */
+ if (fc->posix_acl)
+ forget_all_cached_acls(inode);
- memset(&inarg, 0, sizeof(inarg));
- inarg.size = size;
- args.in.h.opcode = FUSE_GETXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 2;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = strlen(name) + 1;
- args.in.args[1].value = name;
- /* This is really two different operations rolled into one */
- args.out.numargs = 1;
- if (size) {
- args.out.argvar = 1;
- args.out.args[0].size = size;
- args.out.args[0].value = value;
- } else {
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
- }
- ret = fuse_simple_request(fc, &args);
- if (!ret && !size)
- ret = outarg.size;
- if (ret == -ENOSYS) {
- fc->no_getxattr = 1;
- ret = -EOPNOTSUPP;
+ /* Directory mode changed, may need to revalidate access */
+ if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE))
+ fuse_invalidate_entry_cache(entry);
}
return ret;
}
-static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
+static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
+ struct kstat *stat)
{
struct inode *inode = d_inode(entry);
struct fuse_conn *fc = get_fuse_conn(inode);
- FUSE_ARGS(args);
- struct fuse_getxattr_in inarg;
- struct fuse_getxattr_out outarg;
- ssize_t ret;
if (!fuse_allow_current_process(fc))
return -EACCES;
- if (fc->no_listxattr)
- return -EOPNOTSUPP;
-
- memset(&inarg, 0, sizeof(inarg));
- inarg.size = size;
- args.in.h.opcode = FUSE_LISTXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- /* This is really two different operations rolled into one */
- args.out.numargs = 1;
- if (size) {
- args.out.argvar = 1;
- args.out.args[0].size = size;
- args.out.args[0].value = list;
- } else {
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
- }
- ret = fuse_simple_request(fc, &args);
- if (!ret && !size)
- ret = outarg.size;
- if (ret == -ENOSYS) {
- fc->no_listxattr = 1;
- ret = -EOPNOTSUPP;
- }
- return ret;
-}
-
-static int fuse_removexattr(struct dentry *entry, const char *name)
-{
- struct inode *inode = d_inode(entry);
- struct fuse_conn *fc = get_fuse_conn(inode);
- FUSE_ARGS(args);
- int err;
-
- if (fc->no_removexattr)
- return -EOPNOTSUPP;
-
- args.in.h.opcode = FUSE_REMOVEXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = strlen(name) + 1;
- args.in.args[0].value = name;
- err = fuse_simple_request(fc, &args);
- if (err == -ENOSYS) {
- fc->no_removexattr = 1;
- err = -EOPNOTSUPP;
- }
- if (!err) {
- fuse_invalidate_attr(inode);
- fuse_update_ctime(inode);
- }
- return err;
+ return fuse_update_attributes(inode, stat, NULL, NULL);
}
static const struct inode_operations fuse_dir_inode_operations = {
@@ -1884,10 +1800,12 @@ static const struct inode_operations fuse_dir_inode_operations = {
.mknod = fuse_mknod,
.permission = fuse_permission,
.getattr = fuse_getattr,
- .setxattr = fuse_setxattr,
- .getxattr = fuse_getxattr,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = fuse_listxattr,
- .removexattr = fuse_removexattr,
+ .removexattr = generic_removexattr,
+ .get_acl = fuse_get_acl,
+ .set_acl = fuse_set_acl,
};
static const struct file_operations fuse_dir_operations = {
@@ -1905,10 +1823,12 @@ static const struct inode_operations fuse_common_inode_operations = {
.setattr = fuse_setattr,
.permission = fuse_permission,
.getattr = fuse_getattr,
- .setxattr = fuse_setxattr,
- .getxattr = fuse_getxattr,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = fuse_listxattr,
- .removexattr = fuse_removexattr,
+ .removexattr = generic_removexattr,
+ .get_acl = fuse_get_acl,
+ .set_acl = fuse_set_acl,
};
static const struct inode_operations fuse_symlink_inode_operations = {
@@ -1916,10 +1836,10 @@ static const struct inode_operations fuse_symlink_inode_operations = {
.get_link = fuse_get_link,
.readlink = generic_readlink,
.getattr = fuse_getattr,
- .setxattr = fuse_setxattr,
- .getxattr = fuse_getxattr,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = fuse_listxattr,
- .removexattr = fuse_removexattr,
+ .removexattr = generic_removexattr,
};
void fuse_init_common(struct inode *inode)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 3988b43c2f5a..b7beb67bf005 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2326,49 +2326,6 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
return retval;
}
-static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
- unsigned int nr_segs, size_t bytes, bool to_user)
-{
- struct iov_iter ii;
- int page_idx = 0;
-
- if (!bytes)
- return 0;
-
- iov_iter_init(&ii, to_user ? READ : WRITE, iov, nr_segs, bytes);
-
- while (iov_iter_count(&ii)) {
- struct page *page = pages[page_idx++];
- size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
- void *kaddr;
-
- kaddr = kmap(page);
-
- while (todo) {
- char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
- size_t iov_len = ii.iov->iov_len - ii.iov_offset;
- size_t copy = min(todo, iov_len);
- size_t left;
-
- if (!to_user)
- left = copy_from_user(kaddr, uaddr, copy);
- else
- left = copy_to_user(uaddr, kaddr, copy);
-
- if (unlikely(left))
- return -EFAULT;
-
- iov_iter_advance(&ii, copy);
- todo -= copy;
- kaddr += copy;
- }
-
- kunmap(page);
- }
-
- return 0;
-}
-
/*
* CUSE servers compiled on 32bit broke on 64bit kernels because the
* ABI was defined to be 'struct iovec' which is different on 32bit
@@ -2520,8 +2477,9 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
struct iovec *iov_page = NULL;
struct iovec *in_iov = NULL, *out_iov = NULL;
unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
- size_t in_size, out_size, transferred;
- int err;
+ size_t in_size, out_size, transferred, c;
+ int err, i;
+ struct iov_iter ii;
#if BITS_PER_LONG == 32
inarg.flags |= FUSE_IOCTL_32BIT;
@@ -2603,10 +2561,13 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
req->in.args[1].size = in_size;
req->in.argpages = 1;
- err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
- false);
- if (err)
- goto out;
+ err = -EFAULT;
+ iov_iter_init(&ii, WRITE, in_iov, in_iovs, in_size);
+ for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= num_pages); i++) {
+ c = copy_page_from_iter(pages[i], 0, PAGE_SIZE, &ii);
+ if (c != PAGE_SIZE && iov_iter_count(&ii))
+ goto out;
+ }
}
req->out.numargs = 2;
@@ -2672,7 +2633,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
if (transferred > inarg.out_size)
goto out;
- err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
+ err = -EFAULT;
+ iov_iter_init(&ii, READ, out_iov, out_iovs, transferred);
+ for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= num_pages); i++) {
+ c = copy_page_to_iter(pages[i], 0, PAGE_SIZE, &ii);
+ if (c != PAGE_SIZE && iov_iter_count(&ii))
+ goto out;
+ }
+ err = 0;
out:
if (req)
fuse_put_request(fc, req);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index d98d8cc84def..24ada5dc4dae 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -23,6 +23,7 @@
#include <linux/poll.h>
#include <linux/workqueue.h>
#include <linux/kref.h>
+#include <linux/xattr.h>
/** Max number of pages that can be used in a single read request */
#define FUSE_MAX_PAGES_PER_REQ 32
@@ -36,15 +37,6 @@
/** Number of dentries for each connection in the control filesystem */
#define FUSE_CTL_NUM_DENTRIES 5
-/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem
- module will check permissions based on the file mode. Otherwise no
- permission checking is done in the kernel */
-#define FUSE_DEFAULT_PERMISSIONS (1 << 0)
-
-/** If the FUSE_ALLOW_OTHER flag is given, then not only the user
- doing the mount will be allowed to access the filesystem */
-#define FUSE_ALLOW_OTHER (1 << 1)
-
/** Number of page pointers embedded in fuse_req */
#define FUSE_REQ_INLINE_PAGES 1
@@ -469,9 +461,6 @@ struct fuse_conn {
/** The group id for this mount */
kgid_t group_id;
- /** The fuse mount flags for this mount */
- unsigned flags;
-
/** Maximum read size */
unsigned max_read;
@@ -547,6 +536,9 @@ struct fuse_conn {
/** allow parallel lookups and readdir (default is serialized) */
unsigned parallel_dirops:1;
+ /** handle fs handles killing suid/sgid/cap on write/chown/trunc */
+ unsigned handle_killpriv:1;
+
/*
* The following bitfields are only for optimization purposes
* and hence races in setting them will not cause malfunction
@@ -624,6 +616,15 @@ struct fuse_conn {
/** Is lseek not implemented by fs? */
unsigned no_lseek:1;
+ /** Does the filesystem support posix acls? */
+ unsigned posix_acl:1;
+
+ /** Check permissions based on the file mode or not? */
+ unsigned default_permissions:1;
+
+ /** Allow other than the mounter user to access the filesystem ? */
+ unsigned allow_other:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
@@ -902,6 +903,8 @@ int fuse_allow_current_process(struct fuse_conn *fc);
u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id);
+void fuse_update_ctime(struct inode *inode);
+
int fuse_update_attributes(struct inode *inode, struct kstat *stat,
struct file *file, bool *refreshed);
@@ -966,4 +969,17 @@ void fuse_set_initialized(struct fuse_conn *fc);
void fuse_unlock_inode(struct inode *inode);
void fuse_lock_inode(struct inode *inode);
+int fuse_setxattr(struct inode *inode, const char *name, const void *value,
+ size_t size, int flags);
+ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
+ size_t size);
+ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size);
+int fuse_removexattr(struct inode *inode, const char *name);
+extern const struct xattr_handler *fuse_xattr_handlers[];
+extern const struct xattr_handler *fuse_acl_xattr_handlers[];
+
+struct posix_acl;
+struct posix_acl *fuse_get_acl(struct inode *inode, int type);
+int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+
#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 4e05b51120f4..17141099f2e7 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -20,6 +20,7 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/exportfs.h>
+#include <linux/posix_acl.h>
MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
MODULE_DESCRIPTION("Filesystem in Userspace");
@@ -66,7 +67,8 @@ struct fuse_mount_data {
unsigned rootmode_present:1;
unsigned user_id_present:1;
unsigned group_id_present:1;
- unsigned flags;
+ unsigned default_permissions:1;
+ unsigned allow_other:1;
unsigned max_read;
unsigned blksize;
};
@@ -192,7 +194,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
* check in may_delete().
*/
fi->orig_i_mode = inode->i_mode;
- if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
+ if (!fc->default_permissions)
inode->i_mode &= ~S_ISVTX;
fi->orig_ino = attr->ino;
@@ -340,6 +342,7 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
return -ENOENT;
fuse_invalidate_attr(inode);
+ forget_all_cached_acls(inode);
if (offset >= 0) {
pg_start = offset >> PAGE_SHIFT;
if (len <= 0)
@@ -532,11 +535,11 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
break;
case OPT_DEFAULT_PERMISSIONS:
- d->flags |= FUSE_DEFAULT_PERMISSIONS;
+ d->default_permissions = 1;
break;
case OPT_ALLOW_OTHER:
- d->flags |= FUSE_ALLOW_OTHER;
+ d->allow_other = 1;
break;
case OPT_MAX_READ:
@@ -570,9 +573,9 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
seq_printf(m, ",user_id=%u", from_kuid_munged(&init_user_ns, fc->user_id));
seq_printf(m, ",group_id=%u", from_kgid_munged(&init_user_ns, fc->group_id));
- if (fc->flags & FUSE_DEFAULT_PERMISSIONS)
+ if (fc->default_permissions)
seq_puts(m, ",default_permissions");
- if (fc->flags & FUSE_ALLOW_OTHER)
+ if (fc->allow_other)
seq_puts(m, ",allow_other");
if (fc->max_read != ~0)
seq_printf(m, ",max_read=%u", fc->max_read);
@@ -910,8 +913,15 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->writeback_cache = 1;
if (arg->flags & FUSE_PARALLEL_DIROPS)
fc->parallel_dirops = 1;
+ if (arg->flags & FUSE_HANDLE_KILLPRIV)
+ fc->handle_killpriv = 1;
if (arg->time_gran && arg->time_gran <= 1000000000)
fc->sb->s_time_gran = arg->time_gran;
+ if ((arg->flags & FUSE_POSIX_ACL)) {
+ fc->default_permissions = 1;
+ fc->posix_acl = 1;
+ fc->sb->s_xattr = fuse_acl_xattr_handlers;
+ }
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
@@ -941,7 +951,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
- FUSE_PARALLEL_DIROPS;
+ FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
@@ -1071,6 +1081,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
}
sb->s_magic = FUSE_SUPER_MAGIC;
sb->s_op = &fuse_super_operations;
+ sb->s_xattr = fuse_xattr_handlers;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_time_gran = 1;
sb->s_export_op = &fuse_export_operations;
@@ -1109,7 +1120,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fc->dont_mask = 1;
sb->s_flags |= MS_POSIXACL;
- fc->flags = d.flags;
+ fc->default_permissions = d.default_permissions;
+ fc->allow_other = d.allow_other;
fc->user_id = d.user_id;
fc->group_id = d.group_id;
fc->max_read = max_t(unsigned, 4096, d.max_read);
diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
new file mode 100644
index 000000000000..3caac46b08b0
--- /dev/null
+++ b/fs/fuse/xattr.c
@@ -0,0 +1,211 @@
+/*
+ * FUSE: Filesystem in Userspace
+ * Copyright (C) 2001-2016 Miklos Szeredi <miklos@szeredi.hu>
+ *
+ * This program can be distributed under the terms of the GNU GPL.
+ * See the file COPYING.
+ */
+
+#include "fuse_i.h"
+
+#include <linux/xattr.h>
+#include <linux/posix_acl_xattr.h>
+
+int fuse_setxattr(struct inode *inode, const char *name, const void *value,
+ size_t size, int flags)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ FUSE_ARGS(args);
+ struct fuse_setxattr_in inarg;
+ int err;
+
+ if (fc->no_setxattr)
+ return -EOPNOTSUPP;
+
+ memset(&inarg, 0, sizeof(inarg));
+ inarg.size = size;
+ inarg.flags = flags;
+ args.in.h.opcode = FUSE_SETXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 3;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = strlen(name) + 1;
+ args.in.args[1].value = name;
+ args.in.args[2].size = size;
+ args.in.args[2].value = value;
+ err = fuse_simple_request(fc, &args);
+ if (err == -ENOSYS) {
+ fc->no_setxattr = 1;
+ err = -EOPNOTSUPP;
+ }
+ if (!err) {
+ fuse_invalidate_attr(inode);
+ fuse_update_ctime(inode);
+ }
+ return err;
+}
+
+ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
+ size_t size)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ FUSE_ARGS(args);
+ struct fuse_getxattr_in inarg;
+ struct fuse_getxattr_out outarg;
+ ssize_t ret;
+
+ if (fc->no_getxattr)
+ return -EOPNOTSUPP;
+
+ memset(&inarg, 0, sizeof(inarg));
+ inarg.size = size;
+ args.in.h.opcode = FUSE_GETXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 2;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = strlen(name) + 1;
+ args.in.args[1].value = name;
+ /* This is really two different operations rolled into one */
+ args.out.numargs = 1;
+ if (size) {
+ args.out.argvar = 1;
+ args.out.args[0].size = size;
+ args.out.args[0].value = value;
+ } else {
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ }
+ ret = fuse_simple_request(fc, &args);
+ if (!ret && !size)
+ ret = min_t(ssize_t, outarg.size, XATTR_SIZE_MAX);
+ if (ret == -ENOSYS) {
+ fc->no_getxattr = 1;
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int fuse_verify_xattr_list(char *list, size_t size)
+{
+ size_t origsize = size;
+
+ while (size) {
+ size_t thislen = strnlen(list, size);
+
+ if (!thislen || thislen == size)
+ return -EIO;
+
+ size -= thislen + 1;
+ list += thislen + 1;
+ }
+
+ return origsize;
+}
+
+ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
+{
+ struct inode *inode = d_inode(entry);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ FUSE_ARGS(args);
+ struct fuse_getxattr_in inarg;
+ struct fuse_getxattr_out outarg;
+ ssize_t ret;
+
+ if (!fuse_allow_current_process(fc))
+ return -EACCES;
+
+ if (fc->no_listxattr)
+ return -EOPNOTSUPP;
+
+ memset(&inarg, 0, sizeof(inarg));
+ inarg.size = size;
+ args.in.h.opcode = FUSE_LISTXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ /* This is really two different operations rolled into one */
+ args.out.numargs = 1;
+ if (size) {
+ args.out.argvar = 1;
+ args.out.args[0].size = size;
+ args.out.args[0].value = list;
+ } else {
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ }
+ ret = fuse_simple_request(fc, &args);
+ if (!ret && !size)
+ ret = min_t(ssize_t, outarg.size, XATTR_LIST_MAX);
+ if (ret > 0 && size)
+ ret = fuse_verify_xattr_list(list, ret);
+ if (ret == -ENOSYS) {
+ fc->no_listxattr = 1;
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+int fuse_removexattr(struct inode *inode, const char *name)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ FUSE_ARGS(args);
+ int err;
+
+ if (fc->no_removexattr)
+ return -EOPNOTSUPP;
+
+ args.in.h.opcode = FUSE_REMOVEXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = strlen(name) + 1;
+ args.in.args[0].value = name;
+ err = fuse_simple_request(fc, &args);
+ if (err == -ENOSYS) {
+ fc->no_removexattr = 1;
+ err = -EOPNOTSUPP;
+ }
+ if (!err) {
+ fuse_invalidate_attr(inode);
+ fuse_update_ctime(inode);
+ }
+ return err;
+}
+
+static int fuse_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *value, size_t size)
+{
+ return fuse_getxattr(inode, name, value, size);
+}
+
+static int fuse_xattr_set(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size,
+ int flags)
+{
+ if (!value)
+ return fuse_removexattr(inode, name);
+
+ return fuse_setxattr(inode, name, value, size, flags);
+}
+
+static const struct xattr_handler fuse_xattr_handler = {
+ .prefix = "",
+ .get = fuse_xattr_get,
+ .set = fuse_xattr_set,
+};
+
+const struct xattr_handler *fuse_xattr_handlers[] = {
+ &fuse_xattr_handler,
+ NULL
+};
+
+const struct xattr_handler *fuse_acl_xattr_handlers[] = {
+ &posix_acl_access_xattr_handler,
+ &posix_acl_default_xattr_handler,
+ &fuse_xattr_handler,
+ NULL
+};
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 82df36886938..5a6f52ea2722 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -187,7 +187,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
ClearPageChecked(page);
if (!page_has_buffers(page)) {
create_empty_buffers(page, inode->i_sb->s_blocksize,
- (1 << BH_Dirty)|(1 << BH_Uptodate));
+ BIT(BH_Dirty)|BIT(BH_Uptodate));
}
gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
}
@@ -1147,6 +1147,16 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
if (!page_has_buffers(page))
return 0;
+ /*
+ * From xfs_vm_releasepage: mm accommodates an old ext3 case where
+ * clean pages might not have had the dirty bit cleared. Thus, it can
+ * send actual dirty pages to ->releasepage() via shrink_active_list().
+ *
+ * As a workaround, we skip pages that contain dirty buffers below.
+ * Once ->releasepage isn't called on dirty pages anymore, we can warn
+ * on dirty buffers like we used to here again.
+ */
+
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_ail_lock);
head = bh = page_buffers(page);
@@ -1156,8 +1166,8 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
bd = bh->b_private;
if (bd && bd->bd_tr)
goto cannot_release;
- if (buffer_pinned(bh) || buffer_dirty(bh))
- goto not_possible;
+ if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
+ goto cannot_release;
bh = bh->b_this_page;
} while(bh != head);
spin_unlock(&sdp->sd_ail_lock);
@@ -1180,9 +1190,6 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
return try_to_free_buffers(page);
-not_possible: /* Should never happen */
- WARN_ON(buffer_dirty(bh));
- WARN_ON(buffer_pinned(bh));
cannot_release:
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 6e2bec1cd289..645721f3ff00 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -82,8 +82,8 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
}
if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << inode->i_blkbits,
- (1 << BH_Uptodate));
+ create_empty_buffers(page, BIT(inode->i_blkbits),
+ BIT(BH_Uptodate));
bh = page_buffers(page);
@@ -690,7 +690,7 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
BUG_ON(!dblock);
BUG_ON(!new);
- bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5));
+ bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
ret = gfs2_block_map(inode, lblock, &bh, create);
*extlen = bh.b_size >> inode->i_blkbits;
*dblock = bh.b_blocknr;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index fcb59b23f1e3..db8fbeb62483 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -351,7 +351,7 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
if (hc)
return hc;
- hsize = 1 << ip->i_depth;
+ hsize = BIT(ip->i_depth);
hsize *= sizeof(__be64);
if (hsize != i_size_read(&ip->i_inode)) {
gfs2_consist_inode(ip);
@@ -819,8 +819,8 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
if (ip->i_diskflags & GFS2_DIF_EXHASH) {
struct gfs2_leaf *leaf;
- unsigned hsize = 1 << ip->i_depth;
- unsigned index;
+ unsigned int hsize = BIT(ip->i_depth);
+ unsigned int index;
u64 ln;
if (hsize * sizeof(u64) != i_size_read(inode)) {
gfs2_consist_inode(ip);
@@ -932,7 +932,7 @@ static int dir_make_exhash(struct inode *inode)
return -ENOSPC;
bn = bh->b_blocknr;
- gfs2_assert(sdp, dip->i_entries < (1 << 16));
+ gfs2_assert(sdp, dip->i_entries < BIT(16));
leaf->lf_entries = cpu_to_be16(dip->i_entries);
/* Copy dirents */
@@ -1041,7 +1041,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
bn = nbh->b_blocknr;
/* Compute the start and len of leaf pointers in the hash table. */
- len = 1 << (dip->i_depth - be16_to_cpu(oleaf->lf_depth));
+ len = BIT(dip->i_depth - be16_to_cpu(oleaf->lf_depth));
half_len = len >> 1;
if (!half_len) {
pr_warn("i_depth %u lf_depth %u index %u\n",
@@ -1163,7 +1163,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
int x;
int error = 0;
- hsize = 1 << dip->i_depth;
+ hsize = BIT(dip->i_depth);
hsize_bytes = hsize * sizeof(__be64);
hc = gfs2_dir_get_hash_table(dip);
@@ -1539,7 +1539,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx,
int error = 0;
unsigned depth = 0;
- hsize = 1 << dip->i_depth;
+ hsize = BIT(dip->i_depth);
hash = gfs2_dir_offset2hash(ctx->pos);
index = hash >> (32 - dip->i_depth);
@@ -1558,7 +1558,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx,
if (error)
break;
- len = 1 << (dip->i_depth - depth);
+ len = BIT(dip->i_depth - depth);
index = (index & ~(len - 1)) + len;
}
@@ -2113,7 +2113,7 @@ int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
u64 leaf_no;
int error = 0, last;
- hsize = 1 << dip->i_depth;
+ hsize = BIT(dip->i_depth);
lp = gfs2_dir_get_hash_table(dip);
if (IS_ERR(lp))
@@ -2126,7 +2126,7 @@ int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
if (error)
goto out;
leaf = (struct gfs2_leaf *)bh->b_data;
- len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth));
+ len = BIT(dip->i_depth - be16_to_cpu(leaf->lf_depth));
next_index = (index & ~(len - 1)) + len;
last = ((next_index >= hsize) ? 1 : 0);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 320e65e61938..e23ff70b3435 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -395,9 +395,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb);
- /* Update file times before taking page lock */
- file_update_time(vma->vm_file);
-
ret = gfs2_rsqa_alloc(ip);
if (ret)
goto out;
@@ -409,6 +406,9 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto out_uninit;
+ /* Update file times before taking page lock */
+ file_update_time(vma->vm_file);
+
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
@@ -954,30 +954,6 @@ out_uninit:
return ret;
}
-static ssize_t gfs2_file_splice_read(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags)
-{
- struct inode *inode = in->f_mapping->host;
- struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_holder gh;
- int ret;
-
- inode_lock(inode);
-
- ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
- if (ret) {
- inode_unlock(inode);
- return ret;
- }
-
- gfs2_glock_dq_uninit(&gh);
- inode_unlock(inode);
-
- return generic_file_splice_read(in, ppos, pipe, len, flags);
-}
-
-
static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos,
size_t len, unsigned int flags)
@@ -1140,7 +1116,7 @@ const struct file_operations gfs2_file_fops = {
.fsync = gfs2_fsync,
.lock = gfs2_lock,
.flock = gfs2_flock,
- .splice_read = gfs2_file_splice_read,
+ .splice_read = generic_file_splice_read,
.splice_write = gfs2_file_splice_write,
.setlease = simple_nosetlease,
.fallocate = gfs2_fallocate,
@@ -1168,7 +1144,7 @@ const struct file_operations gfs2_file_fops_nolock = {
.open = gfs2_open,
.release = gfs2_release,
.fsync = gfs2_fsync,
- .splice_read = gfs2_file_splice_read,
+ .splice_read = generic_file_splice_read,
.splice_write = gfs2_file_splice_write,
.setlease = generic_setlease,
.fallocate = gfs2_fallocate,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 3a90b2b5b9bb..14cbf60167a7 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -69,7 +69,7 @@ static atomic_t lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(lru_lock);
#define GFS2_GL_HASH_SHIFT 15
-#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
+#define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
static struct rhashtable_params ht_parms = {
.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
@@ -1781,7 +1781,13 @@ int __init gfs2_glock_init(void)
return -ENOMEM;
}
- register_shrinker(&glock_shrinker);
+ ret = register_shrinker(&glock_shrinker);
+ if (ret) {
+ destroy_workqueue(gfs2_delete_workqueue);
+ destroy_workqueue(glock_workqueue);
+ rhashtable_destroy(&gl_hash_table);
+ return ret;
+ }
return 0;
}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index e4da0ecd3285..fb3a810b506f 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -187,6 +187,10 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
}
gfs2_set_iop(inode);
+
+ inode->i_atime.tv_sec = 0;
+ inode->i_atime.tv_nsec = 0;
+
unlock_new_inode(inode);
}
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 7710dfd3af35..aace8ce34a18 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -85,7 +85,7 @@ static inline int gfs2_check_internal_file_size(struct inode *inode,
u64 size = i_size_read(inode);
if (size < minsize || size > maxsize)
goto err;
- if (size & ((1 << inode->i_blkbits) - 1))
+ if (size & (BIT(inode->i_blkbits) - 1))
goto err;
return 0;
err:
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 74fd0139e6c2..67d1fc4668f7 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -145,7 +145,9 @@ static int __init init_gfs2_fs(void)
if (!gfs2_qadata_cachep)
goto fail;
- register_shrinker(&gfs2_qd_shrinker);
+ error = register_shrinker(&gfs2_qd_shrinker);
+ if (error)
+ goto fail;
error = register_filesystem(&gfs2_fs_type);
if (error)
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 950b8be68e41..373639a59782 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -216,23 +216,26 @@ static void gfs2_meta_read_endio(struct bio *bio)
static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
int num)
{
- struct buffer_head *bh = bhs[0];
- struct bio *bio;
- int i;
-
- if (!num)
- return;
-
- bio = bio_alloc(GFP_NOIO, num);
- bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio->bi_bdev = bh->b_bdev;
- for (i = 0; i < num; i++) {
- bh = bhs[i];
- bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+ while (num > 0) {
+ struct buffer_head *bh = *bhs;
+ struct bio *bio;
+
+ bio = bio_alloc(GFP_NOIO, num);
+ bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+ bio->bi_bdev = bh->b_bdev;
+ while (num > 0) {
+ bh = *bhs;
+ if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
+ BUG_ON(bio->bi_iter.bi_size == 0);
+ break;
+ }
+ bhs++;
+ num--;
+ }
+ bio->bi_end_io = gfs2_meta_read_endio;
+ bio_set_op_attrs(bio, op, op_flags);
+ submit_bio(bio);
}
- bio->bi_end_io = gfs2_meta_read_endio;
- bio_set_op_attrs(bio, op, op_flags);
- submit_bio(bio);
}
/**
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index ef1e1822977f..ff72ac6439c8 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -58,7 +58,7 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_quota_scale_num = 1;
gt->gt_quota_scale_den = 1;
gt->gt_new_files_jdata = 0;
- gt->gt_max_readahead = 1 << 18;
+ gt->gt_max_readahead = BIT(18);
gt->gt_complain_secs = 10;
}
@@ -284,7 +284,7 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
GFS2_BASIC_BLOCK_SHIFT;
- sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
+ sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_dinode)) / sizeof(u64);
sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
@@ -302,7 +302,7 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
/* Compute maximum reservation required to add a entry to a directory */
- hash_blocks = DIV_ROUND_UP(sizeof(u64) * (1 << GFS2_DIR_MAX_DEPTH),
+ hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH),
sdp->sd_jbsize);
ind_blocks = 0;
@@ -1089,7 +1089,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
GFS2_BASIC_BLOCK_SHIFT;
- sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
+ sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 77930ca25303..8af2dfa09236 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -75,7 +75,7 @@
#include "util.h"
#define GFS2_QD_HASH_SHIFT 12
-#define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT)
+#define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
@@ -384,7 +384,7 @@ static int bh_get(struct gfs2_quota_data *qd)
block = qd->qd_slot / sdp->sd_qc_per_block;
offset = qd->qd_slot % sdp->sd_qc_per_block;
- bh_map.b_size = 1 << ip->i_inode.i_blkbits;
+ bh_map.b_size = BIT(ip->i_inode.i_blkbits);
error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
if (error)
goto fail;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 3a7e60bb39f8..e3ee387a6dfe 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -359,7 +359,7 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
u64 size = i_size_read(jd->jd_inode);
- if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, 1 << 30))
+ if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
return -EIO;
jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 4ea71eba40a5..7337cac29e9e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -416,7 +416,6 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
for (i = 0; i < pagevec_count(&pvec); ++i) {
struct page *page = pvec.pages[i];
- bool rsv_on_error;
u32 hash;
/*
@@ -458,18 +457,17 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
* cache (remove_huge_page) BEFORE removing the
* region/reserve map (hugetlb_unreserve_pages). In
* rare out of memory conditions, removal of the
- * region/reserve map could fail. Before free'ing
- * the page, note PagePrivate which is used in case
- * of error.
+ * region/reserve map could fail. Correspondingly,
+ * the subpool and global reserve usage count can need
+ * to be adjusted.
*/
- rsv_on_error = !PagePrivate(page);
+ VM_BUG_ON(PagePrivate(page));
remove_huge_page(page);
freed++;
if (!truncate_op) {
if (unlikely(hugetlb_unreserve_pages(inode,
next, next + 1, 1)))
- hugetlb_fix_reserve_counts(inode,
- rsv_on_error);
+ hugetlb_fix_reserve_counts(inode);
}
unlock_page(page);
diff --git a/fs/internal.h b/fs/internal.h
index ba0737649d4a..859178692ce4 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -12,6 +12,7 @@
struct super_block;
struct file_system_type;
struct iomap;
+struct iomap_ops;
struct linux_binprm;
struct path;
struct mount;
@@ -164,3 +165,13 @@ extern struct dentry_operations ns_dentry_operations;
extern int do_vfs_ioctl(struct file *file, unsigned int fd, unsigned int cmd,
unsigned long arg);
extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+/*
+ * iomap support:
+ */
+typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
+ void *data, struct iomap *iomap);
+
+loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
+ unsigned flags, struct iomap_ops *ops, void *data,
+ iomap_actor_t actor);
diff --git a/fs/iomap.c b/fs/iomap.c
index 706270f21b35..013d1d36fbbf 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -27,9 +27,6 @@
#include <linux/dax.h>
#include "internal.h"
-typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
- void *data, struct iomap *iomap);
-
/*
* Execute a iomap write on a segment of the mapping that spans a
* contiguous range of pages that have identical block mapping state.
@@ -41,7 +38,7 @@ typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
* resources they require in the iomap_begin call, and release them in the
* iomap_end call.
*/
-static loff_t
+loff_t
iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
struct iomap_ops *ops, void *data, iomap_actor_t actor)
{
@@ -252,6 +249,88 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
}
EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
+static struct page *
+__iomap_read_page(struct inode *inode, loff_t offset)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+
+ page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
+ if (IS_ERR(page))
+ return page;
+ if (!PageUptodate(page)) {
+ put_page(page);
+ return ERR_PTR(-EIO);
+ }
+ return page;
+}
+
+static loff_t
+iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap)
+{
+ long status = 0;
+ ssize_t written = 0;
+
+ do {
+ struct page *page, *rpage;
+ unsigned long offset; /* Offset into pagecache page */
+ unsigned long bytes; /* Bytes to write to page */
+
+ offset = (pos & (PAGE_SIZE - 1));
+ bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
+
+ rpage = __iomap_read_page(inode, pos);
+ if (IS_ERR(rpage))
+ return PTR_ERR(rpage);
+
+ status = iomap_write_begin(inode, pos, bytes,
+ AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE,
+ &page, iomap);
+ put_page(rpage);
+ if (unlikely(status))
+ return status;
+
+ WARN_ON_ONCE(!PageUptodate(page));
+
+ status = iomap_write_end(inode, pos, bytes, bytes, page);
+ if (unlikely(status <= 0)) {
+ if (WARN_ON_ONCE(status == 0))
+ return -EIO;
+ return status;
+ }
+
+ cond_resched();
+
+ pos += status;
+ written += status;
+ length -= status;
+
+ balance_dirty_pages_ratelimited(inode->i_mapping);
+ } while (length);
+
+ return written;
+}
+
+int
+iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
+ struct iomap_ops *ops)
+{
+ loff_t ret;
+
+ while (len) {
+ ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
+ iomap_dirty_actor);
+ if (ret <= 0)
+ return ret;
+ pos += ret;
+ len -= ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_file_dirty);
+
static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
unsigned bytes, struct iomap *iomap)
{
@@ -430,6 +509,8 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
if (iomap->flags & IOMAP_F_MERGED)
flags |= FIEMAP_EXTENT_MERGED;
+ if (iomap->flags & IOMAP_F_SHARED)
+ flags |= FIEMAP_EXTENT_SHARED;
return fiemap_fill_next_extent(fi, iomap->offset,
iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 46261a6f902d..927da4956a89 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1090,11 +1090,15 @@ static void jbd2_stats_proc_exit(journal_t *journal)
* very few fields yet: that has to wait until we have created the
* journal structures from from scratch, or loaded them from disk. */
-static journal_t * journal_init_common (void)
+static journal_t *journal_init_common(struct block_device *bdev,
+ struct block_device *fs_dev,
+ unsigned long long start, int len, int blocksize)
{
static struct lock_class_key jbd2_trans_commit_key;
journal_t *journal;
int err;
+ struct buffer_head *bh;
+ int n;
journal = kzalloc(sizeof(*journal), GFP_KERNEL);
if (!journal)
@@ -1131,6 +1135,32 @@ static journal_t * journal_init_common (void)
lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle",
&jbd2_trans_commit_key, 0);
+ /* journal descriptor can store up to n blocks -bzzz */
+ journal->j_blocksize = blocksize;
+ journal->j_dev = bdev;
+ journal->j_fs_dev = fs_dev;
+ journal->j_blk_offset = start;
+ journal->j_maxlen = len;
+ n = journal->j_blocksize / sizeof(journal_block_tag_t);
+ journal->j_wbufsize = n;
+ journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
+ GFP_KERNEL);
+ if (!journal->j_wbuf) {
+ kfree(journal);
+ return NULL;
+ }
+
+ bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
+ if (!bh) {
+ pr_err("%s: Cannot get buffer for journal superblock\n",
+ __func__);
+ kfree(journal->j_wbuf);
+ kfree(journal);
+ return NULL;
+ }
+ journal->j_sb_buffer = bh;
+ journal->j_superblock = (journal_superblock_t *)bh->b_data;
+
return journal;
}
@@ -1157,51 +1187,21 @@ static journal_t * journal_init_common (void)
* range of blocks on an arbitrary block device.
*
*/
-journal_t * jbd2_journal_init_dev(struct block_device *bdev,
+journal_t *jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int blocksize)
{
- journal_t *journal = journal_init_common();
- struct buffer_head *bh;
- int n;
+ journal_t *journal;
+ journal = journal_init_common(bdev, fs_dev, start, len, blocksize);
if (!journal)
return NULL;
- /* journal descriptor can store up to n blocks -bzzz */
- journal->j_blocksize = blocksize;
- journal->j_dev = bdev;
- journal->j_fs_dev = fs_dev;
- journal->j_blk_offset = start;
- journal->j_maxlen = len;
bdevname(journal->j_dev, journal->j_devname);
strreplace(journal->j_devname, '/', '!');
jbd2_stats_proc_init(journal);
- n = journal->j_blocksize / sizeof(journal_block_tag_t);
- journal->j_wbufsize = n;
- journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
- if (!journal->j_wbuf) {
- printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
- __func__);
- goto out_err;
- }
-
- bh = __getblk(journal->j_dev, start, journal->j_blocksize);
- if (!bh) {
- printk(KERN_ERR
- "%s: Cannot get buffer for journal superblock\n",
- __func__);
- goto out_err;
- }
- journal->j_sb_buffer = bh;
- journal->j_superblock = (journal_superblock_t *)bh->b_data;
return journal;
-out_err:
- kfree(journal->j_wbuf);
- jbd2_stats_proc_exit(journal);
- kfree(journal);
- return NULL;
}
/**
@@ -1212,67 +1212,36 @@ out_err:
* the journal. The inode must exist already, must support bmap() and
* must have all data blocks preallocated.
*/
-journal_t * jbd2_journal_init_inode (struct inode *inode)
+journal_t *jbd2_journal_init_inode(struct inode *inode)
{
- struct buffer_head *bh;
- journal_t *journal = journal_init_common();
+ journal_t *journal;
char *p;
- int err;
- int n;
unsigned long long blocknr;
+ blocknr = bmap(inode, 0);
+ if (!blocknr) {
+ pr_err("%s: Cannot locate journal superblock\n",
+ __func__);
+ return NULL;
+ }
+
+ jbd_debug(1, "JBD2: inode %s/%ld, size %lld, bits %d, blksize %ld\n",
+ inode->i_sb->s_id, inode->i_ino, (long long) inode->i_size,
+ inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
+
+ journal = journal_init_common(inode->i_sb->s_bdev, inode->i_sb->s_bdev,
+ blocknr, inode->i_size >> inode->i_sb->s_blocksize_bits,
+ inode->i_sb->s_blocksize);
if (!journal)
return NULL;
- journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
journal->j_inode = inode;
bdevname(journal->j_dev, journal->j_devname);
p = strreplace(journal->j_devname, '/', '!');
sprintf(p, "-%lu", journal->j_inode->i_ino);
- jbd_debug(1,
- "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
- journal, inode->i_sb->s_id, inode->i_ino,
- (long long) inode->i_size,
- inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
-
- journal->j_maxlen = inode->i_size >> inode->i_sb->s_blocksize_bits;
- journal->j_blocksize = inode->i_sb->s_blocksize;
jbd2_stats_proc_init(journal);
- /* journal descriptor can store up to n blocks -bzzz */
- n = journal->j_blocksize / sizeof(journal_block_tag_t);
- journal->j_wbufsize = n;
- journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
- if (!journal->j_wbuf) {
- printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
- __func__);
- goto out_err;
- }
-
- err = jbd2_journal_bmap(journal, 0, &blocknr);
- /* If that failed, give up */
- if (err) {
- printk(KERN_ERR "%s: Cannot locate journal superblock\n",
- __func__);
- goto out_err;
- }
-
- bh = getblk_unmovable(journal->j_dev, blocknr, journal->j_blocksize);
- if (!bh) {
- printk(KERN_ERR
- "%s: Cannot get buffer for journal superblock\n",
- __func__);
- goto out_err;
- }
- journal->j_sb_buffer = bh;
- journal->j_superblock = (journal_superblock_t *)bh->b_data;
-
return journal;
-out_err:
- kfree(journal->j_wbuf);
- jbd2_stats_proc_exit(journal);
- kfree(journal);
- return NULL;
}
/*
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index b5bc3e249163..3d8246a9faa4 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -159,6 +159,7 @@ static void wait_transaction_locked(journal_t *journal)
read_unlock(&journal->j_state_lock);
if (need_to_start)
jbd2_log_start_commit(journal, tid);
+ jbd2_might_wait_for_commit(journal);
schedule();
finish_wait(&journal->j_wait_transaction_locked, &wait);
}
@@ -182,8 +183,6 @@ static int add_transaction_credits(journal_t *journal, int blocks,
int needed;
int total = blocks + rsv_blocks;
- jbd2_might_wait_for_commit(journal);
-
/*
* If the current transaction is locked down for commit, wait
* for the lock to be released.
@@ -214,6 +213,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
if (atomic_read(&journal->j_reserved_credits) + total >
journal->j_max_transaction_buffers) {
read_unlock(&journal->j_state_lock);
+ jbd2_might_wait_for_commit(journal);
wait_event(journal->j_wait_reserved,
atomic_read(&journal->j_reserved_credits) + total <=
journal->j_max_transaction_buffers);
@@ -238,6 +238,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
atomic_sub(total, &t->t_outstanding_credits);
read_unlock(&journal->j_state_lock);
+ jbd2_might_wait_for_commit(journal);
write_lock(&journal->j_state_lock);
if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
__jbd2_log_wait_for_space(journal);
@@ -255,6 +256,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
sub_reserved_credits(journal, rsv_blocks);
atomic_sub(total, &t->t_outstanding_credits);
read_unlock(&journal->j_state_lock);
+ jbd2_might_wait_for_commit(journal);
wait_event(journal->j_wait_reserved,
atomic_read(&journal->j_reserved_credits) + rsv_blocks
<= journal->j_max_transaction_buffers / 2);
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 2e58978d6f45..4d973524c887 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -2893,8 +2893,7 @@ restart:
* on anon_list2. Let's check.
*/
if (!list_empty(&TxAnchor.anon_list2)) {
- list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
- INIT_LIST_HEAD(&TxAnchor.anon_list2);
+ list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
goto restart;
}
TXN_UNLOCK();
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 90b3bc21e9b0..bd9b641ada2c 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -379,8 +379,14 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
* cached in meta-data cache, and not written out
* by txCommit();
*/
- filemap_fdatawait(ipbmap->i_mapping);
- filemap_write_and_wait(ipbmap->i_mapping);
+ rc = filemap_fdatawait(ipbmap->i_mapping);
+ if (rc)
+ goto error_out;
+
+ rc = filemap_write_and_wait(ipbmap->i_mapping);
+ if (rc)
+ goto error_out;
+
diWriteSpecial(ipbmap, 0);
newPage = nPages; /* first new page number */
diff --git a/fs/locks.c b/fs/locks.c
index 133fb2543d21..90ec67108b22 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2603,9 +2603,20 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
struct inode *inode = NULL;
unsigned int fl_pid;
- if (fl->fl_nspid)
- fl_pid = pid_vnr(fl->fl_nspid);
- else
+ if (fl->fl_nspid) {
+ struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
+
+ /* Don't let fl_pid change based on who is reading the file */
+ fl_pid = pid_nr_ns(fl->fl_nspid, proc_pidns);
+
+ /*
+ * If there isn't a fl_pid don't display who is waiting on
+ * the lock if we are called from locks_show, or if we are
+ * called from __show_fd_info - skip lock entirely
+ */
+ if (fl_pid == 0)
+ return;
+ } else
fl_pid = fl->fl_pid;
if (fl->fl_file != NULL)
@@ -2677,9 +2688,13 @@ static int locks_show(struct seq_file *f, void *v)
{
struct locks_iterator *iter = f->private;
struct file_lock *fl, *bfl;
+ struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
fl = hlist_entry(v, struct file_lock, fl_link);
+ if (fl->fl_nspid && !pid_nr_ns(fl->fl_nspid, proc_pidns))
+ return 0;
+
lock_get_status(f, fl, iter->li_pos, "");
list_for_each_entry(bfl, &fl->fl_block, fl_block)
diff --git a/fs/mbcache.c b/fs/mbcache.c
index eccda3a02de6..c5bd19ffa326 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -366,7 +366,11 @@ struct mb_cache *mb_cache_create(int bucket_bits)
cache->c_shrink.count_objects = mb_cache_count;
cache->c_shrink.scan_objects = mb_cache_scan;
cache->c_shrink.seeks = DEFAULT_SEEKS;
- register_shrinker(&cache->c_shrink);
+ if (register_shrinker(&cache->c_shrink)) {
+ kfree(cache->c_hash);
+ kfree(cache);
+ goto err_out;
+ }
INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
diff --git a/fs/mount.h b/fs/mount.h
index 14db05d424f7..d2e25d7b64b3 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -10,9 +10,12 @@ struct mnt_namespace {
struct mount * root;
struct list_head list;
struct user_namespace *user_ns;
+ struct ucounts *ucounts;
u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
u64 event;
+ unsigned int mounts; /* # of mounts in the namespace */
+ unsigned int pending_mounts;
};
struct mnt_pcp {
diff --git a/fs/namespace.c b/fs/namespace.c
index 7bb2cda3bfef..db1b5a38864e 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -27,6 +27,9 @@
#include "pnode.h"
#include "internal.h"
+/* Maximum number of mounts in a mount namespace */
+unsigned int sysctl_mount_max __read_mostly = 100000;
+
static unsigned int m_hash_mask __read_mostly;
static unsigned int m_hash_shift __read_mostly;
static unsigned int mp_hash_mask __read_mostly;
@@ -899,6 +902,9 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)
list_splice(&head, n->list.prev);
+ n->mounts += n->pending_mounts;
+ n->pending_mounts = 0;
+
attach_shadowed(mnt, parent, shadows);
touch_mnt_namespace(n);
}
@@ -1419,11 +1425,16 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
propagate_umount(&tmp_list);
while (!list_empty(&tmp_list)) {
+ struct mnt_namespace *ns;
bool disconnect;
p = list_first_entry(&tmp_list, struct mount, mnt_list);
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
- __touch_mnt_namespace(p->mnt_ns);
+ ns = p->mnt_ns;
+ if (ns) {
+ ns->mounts--;
+ __touch_mnt_namespace(ns);
+ }
p->mnt_ns = NULL;
if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
@@ -1840,6 +1851,28 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
return 0;
}
+int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
+{
+ unsigned int max = READ_ONCE(sysctl_mount_max);
+ unsigned int mounts = 0, old, pending, sum;
+ struct mount *p;
+
+ for (p = mnt; p; p = next_mnt(p, mnt))
+ mounts++;
+
+ old = ns->mounts;
+ pending = ns->pending_mounts;
+ sum = old + pending;
+ if ((old > sum) ||
+ (pending > sum) ||
+ (max < sum) ||
+ (mounts > (max - sum)))
+ return -ENOSPC;
+
+ ns->pending_mounts = pending + mounts;
+ return 0;
+}
+
/*
* @source_mnt : mount tree to be attached
* @nd : place the mount tree @source_mnt is attached
@@ -1909,10 +1942,18 @@ static int attach_recursive_mnt(struct mount *source_mnt,
struct path *parent_path)
{
HLIST_HEAD(tree_list);
+ struct mnt_namespace *ns = dest_mnt->mnt_ns;
struct mount *child, *p;
struct hlist_node *n;
int err;
+ /* Is there space to add these mounts to the mount namespace? */
+ if (!parent_path) {
+ err = count_mounts(ns, source_mnt);
+ if (err)
+ goto out;
+ }
+
if (IS_MNT_SHARED(dest_mnt)) {
err = invent_group_ids(source_mnt, true);
if (err)
@@ -1949,11 +1990,13 @@ static int attach_recursive_mnt(struct mount *source_mnt,
out_cleanup_ids:
while (!hlist_empty(&tree_list)) {
child = hlist_entry(tree_list.first, struct mount, mnt_hash);
+ child->mnt_parent->mnt_ns->pending_mounts = 0;
umount_tree(child, UMOUNT_SYNC);
}
unlock_mount_hash();
cleanup_group_ids(source_mnt, NULL);
out:
+ ns->pending_mounts = 0;
return err;
}
@@ -2719,9 +2762,20 @@ dput_out:
return retval;
}
+static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
+{
+ return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
+}
+
+static void dec_mnt_namespaces(struct ucounts *ucounts)
+{
+ dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
+}
+
static void free_mnt_ns(struct mnt_namespace *ns)
{
ns_free_inum(&ns->ns);
+ dec_mnt_namespaces(ns->ucounts);
put_user_ns(ns->user_ns);
kfree(ns);
}
@@ -2738,14 +2792,22 @@ static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
{
struct mnt_namespace *new_ns;
+ struct ucounts *ucounts;
int ret;
+ ucounts = inc_mnt_namespaces(user_ns);
+ if (!ucounts)
+ return ERR_PTR(-ENOSPC);
+
new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
- if (!new_ns)
+ if (!new_ns) {
+ dec_mnt_namespaces(ucounts);
return ERR_PTR(-ENOMEM);
+ }
ret = ns_alloc_inum(&new_ns->ns);
if (ret) {
kfree(new_ns);
+ dec_mnt_namespaces(ucounts);
return ERR_PTR(ret);
}
new_ns->ns.ops = &mntns_operations;
@@ -2756,6 +2818,9 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
init_waitqueue_head(&new_ns->poll);
new_ns->event = 0;
new_ns->user_ns = get_user_ns(user_ns);
+ new_ns->ucounts = ucounts;
+ new_ns->mounts = 0;
+ new_ns->pending_mounts = 0;
return new_ns;
}
@@ -2805,6 +2870,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
q = new;
while (p) {
q->mnt_ns = new_ns;
+ new_ns->mounts++;
if (new_fs) {
if (&p->mnt == new_fs->root.mnt) {
new_fs->root.mnt = mntget(&q->mnt);
@@ -2843,6 +2909,7 @@ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
struct mount *mnt = real_mount(m);
mnt->mnt_ns = new_ns;
new_ns->root = mnt;
+ new_ns->mounts++;
list_add(&mnt->mnt_list, &new_ns->list);
} else {
mntput(m);
@@ -3348,10 +3415,16 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
return 0;
}
+static struct user_namespace *mntns_owner(struct ns_common *ns)
+{
+ return to_mnt_ns(ns)->user_ns;
+}
+
const struct proc_ns_operations mntns_operations = {
.name = "mnt",
.type = CLONE_NEWNS,
.get = mntns_get,
.put = mntns_put,
.install = mntns_install,
+ .owner = mntns_owner,
};
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index ca699ddc11c1..2efbdde36c3e 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -182,29 +182,6 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
}
EXPORT_SYMBOL_GPL(nfs_file_read);
-ssize_t
-nfs_file_splice_read(struct file *filp, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t count,
- unsigned int flags)
-{
- struct inode *inode = file_inode(filp);
- ssize_t res;
-
- dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n",
- filp, (unsigned long) count, (unsigned long long) *ppos);
-
- nfs_start_io_read(inode);
- res = nfs_revalidate_mapping(inode, filp->f_mapping);
- if (!res) {
- res = generic_file_splice_read(filp, ppos, pipe, count, flags);
- if (res > 0)
- nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, res);
- }
- nfs_end_io_read(inode);
- return res;
-}
-EXPORT_SYMBOL_GPL(nfs_file_splice_read);
-
int
nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
{
@@ -871,7 +848,7 @@ const struct file_operations nfs_file_operations = {
.fsync = nfs_file_fsync,
.lock = nfs_lock,
.flock = nfs_flock,
- .splice_read = nfs_file_splice_read,
+ .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.check_flags = nfs_check_flags,
.setlease = simple_nosetlease,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 74935a19e4bf..4b308a1487a5 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -365,8 +365,6 @@ int nfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *)
int nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
loff_t nfs_file_llseek(struct file *, loff_t, int);
ssize_t nfs_file_read(struct kiocb *, struct iov_iter *);
-ssize_t nfs_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *,
- size_t, unsigned int);
int nfs_file_mmap(struct file *, struct vm_area_struct *);
ssize_t nfs_file_write(struct kiocb *, struct iov_iter *);
int nfs_file_release(struct inode *, struct file *);
@@ -681,11 +679,11 @@ unsigned int nfs_page_length(struct page *page)
loff_t i_size = i_size_read(page_file_mapping(page)->host);
if (i_size > 0) {
- pgoff_t page_index = page_file_index(page);
+ pgoff_t index = page_index(page);
pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT;
- if (page_index < end_index)
+ if (index < end_index)
return PAGE_SIZE;
- if (page_index == end_index)
+ if (index == end_index)
return ((i_size - 1) & ~PAGE_MASK) + 1;
}
return 0;
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index d085ad794884..89a77950e0b0 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -248,7 +248,7 @@ const struct file_operations nfs4_file_operations = {
.fsync = nfs_file_fsync,
.lock = nfs_lock,
.flock = nfs_flock,
- .splice_read = nfs_file_splice_read,
+ .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.check_flags = nfs_check_flags,
.setlease = simple_nosetlease,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 174dd4cf5747..965db474f4b0 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -342,7 +342,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
* update_nfs_request below if the region is not locked. */
req->wb_page = page;
if (page) {
- req->wb_index = page_file_index(page);
+ req->wb_index = page_index(page);
get_page(page);
}
req->wb_offset = offset;
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 572e5b3b06f1..defc9233e985 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -295,7 +295,7 @@ int nfs_readpage(struct file *file, struct page *page)
int error;
dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
- page, PAGE_SIZE, page_file_index(page));
+ page, PAGE_SIZE, page_index(page));
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
nfs_add_stats(inode, NFSIOS_READPAGES, 1);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3a6724c6eb5f..53211838f72a 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -151,7 +151,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
spin_lock(&inode->i_lock);
i_size = i_size_read(inode);
end_index = (i_size - 1) >> PAGE_SHIFT;
- if (i_size > 0 && page_file_index(page) < end_index)
+ if (i_size > 0 && page_index(page) < end_index)
goto out;
end = page_file_offset(page) + ((loff_t)offset+count);
if (i_size >= end)
@@ -603,7 +603,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
{
int ret;
- nfs_pageio_cond_complete(pgio, page_file_index(page));
+ nfs_pageio_cond_complete(pgio, page_index(page));
ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE,
launder);
if (ret == -EAGAIN) {
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 9d46a0bdd9f9..62469c60be23 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -55,10 +55,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
goto oom;
for (i = 0; i < rqgi->ngroups; i++) {
- if (gid_eq(GLOBAL_ROOT_GID, GROUP_AT(rqgi, i)))
- GROUP_AT(gi, i) = exp->ex_anon_gid;
+ if (gid_eq(GLOBAL_ROOT_GID, rqgi->gid[i]))
+ gi->gid[i] = exp->ex_anon_gid;
else
- GROUP_AT(gi, i) = GROUP_AT(rqgi, i);
+ gi->gid[i] = rqgi->gid[i];
}
} else {
gi = get_group_info(rqgi);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index a204d7e109d4..39bfaba9c99c 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1903,7 +1903,7 @@ static bool groups_equal(struct group_info *g1, struct group_info *g2)
if (g1->ngroups != g2->ngroups)
return false;
for (i=0; i<g1->ngroups; i++)
- if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
+ if (!gid_eq(g1->gid[i], g2->gid[i]))
return false;
return true;
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index a64313868d3a..7ebfca6a1427 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -49,12 +49,12 @@ struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
* enough to fit in "count". Return an error pointer if the count
* is not large enough.
*
- * Called with the group->notification_mutex held.
+ * Called with the group->notification_lock held.
*/
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
size_t count)
{
- BUG_ON(!mutex_is_locked(&group->notification_mutex));
+ assert_spin_locked(&group->notification_lock);
pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
@@ -64,7 +64,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
if (FAN_EVENT_METADATA_LEN > count)
return ERR_PTR(-EINVAL);
- /* held the notification_mutex the whole time, so this is the
+ /* held the notification_lock the whole time, so this is the
* same event we peeked above */
return fsnotify_remove_first_event(group);
}
@@ -147,7 +147,7 @@ static struct fanotify_perm_event_info *dequeue_event(
{
struct fanotify_perm_event_info *event, *return_e = NULL;
- spin_lock(&group->fanotify_data.access_lock);
+ spin_lock(&group->notification_lock);
list_for_each_entry(event, &group->fanotify_data.access_list,
fae.fse.list) {
if (event->fd != fd)
@@ -157,7 +157,7 @@ static struct fanotify_perm_event_info *dequeue_event(
return_e = event;
break;
}
- spin_unlock(&group->fanotify_data.access_lock);
+ spin_unlock(&group->notification_lock);
pr_debug("%s: found return_re=%p\n", __func__, return_e);
@@ -244,10 +244,10 @@ static unsigned int fanotify_poll(struct file *file, poll_table *wait)
int ret = 0;
poll_wait(file, &group->notification_waitq, wait);
- mutex_lock(&group->notification_mutex);
+ spin_lock(&group->notification_lock);
if (!fsnotify_notify_queue_is_empty(group))
ret = POLLIN | POLLRDNORM;
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
return ret;
}
@@ -268,9 +268,9 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
add_wait_queue(&group->notification_waitq, &wait);
while (1) {
- mutex_lock(&group->notification_mutex);
+ spin_lock(&group->notification_lock);
kevent = get_one_event(group, count);
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
if (IS_ERR(kevent)) {
ret = PTR_ERR(kevent);
@@ -309,10 +309,10 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
wake_up(&group->fanotify_data.access_waitq);
break;
}
- spin_lock(&group->fanotify_data.access_lock);
+ spin_lock(&group->notification_lock);
list_add_tail(&kevent->list,
&group->fanotify_data.access_list);
- spin_unlock(&group->fanotify_data.access_lock);
+ spin_unlock(&group->notification_lock);
#endif
}
buf += ret;
@@ -371,7 +371,7 @@ static int fanotify_release(struct inode *ignored, struct file *file)
* Process all permission events on access_list and notification queue
* and simulate reply from userspace.
*/
- spin_lock(&group->fanotify_data.access_lock);
+ spin_lock(&group->notification_lock);
list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
fae.fse.list) {
pr_debug("%s: found group=%p event=%p\n", __func__, group,
@@ -380,22 +380,22 @@ static int fanotify_release(struct inode *ignored, struct file *file)
list_del_init(&event->fae.fse.list);
event->response = FAN_ALLOW;
}
- spin_unlock(&group->fanotify_data.access_lock);
/*
* Destroy all non-permission events. For permission events just
* dequeue them and set the response. They will be freed once the
* response is consumed and fanotify_get_response() returns.
*/
- mutex_lock(&group->notification_mutex);
while (!fsnotify_notify_queue_is_empty(group)) {
fsn_event = fsnotify_remove_first_event(group);
- if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
+ if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
+ spin_unlock(&group->notification_lock);
fsnotify_destroy_event(group, fsn_event);
- else
+ spin_lock(&group->notification_lock);
+ } else
FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
}
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
/* Response for all permission events it set, wakeup waiters */
wake_up(&group->fanotify_data.access_waitq);
@@ -421,10 +421,10 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar
switch (cmd) {
case FIONREAD:
- mutex_lock(&group->notification_mutex);
+ spin_lock(&group->notification_lock);
list_for_each_entry(fsn_event, &group->notification_list, list)
send_len += FAN_EVENT_METADATA_LEN;
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
ret = put_user(send_len, (int __user *) p);
break;
}
@@ -765,7 +765,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
event_f_flags |= O_LARGEFILE;
group->fanotify_data.f_flags = event_f_flags;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
- spin_lock_init(&group->fanotify_data.access_lock);
init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list);
#endif
diff --git a/fs/notify/group.c b/fs/notify/group.c
index b47f7cfdcaa4..fbe3cbebec16 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -45,9 +45,9 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
*/
void fsnotify_group_stop_queueing(struct fsnotify_group *group)
{
- mutex_lock(&group->notification_mutex);
+ spin_lock(&group->notification_lock);
group->shutdown = true;
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
}
/*
@@ -125,7 +125,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
atomic_set(&group->refcnt, 1);
atomic_set(&group->num_marks, 0);
- mutex_init(&group->notification_mutex);
+ spin_lock_init(&group->notification_lock);
INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq);
group->max_events = UINT_MAX;
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index b8d08d0d0a4d..69d1ea3d292a 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -115,10 +115,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
int ret = 0;
poll_wait(file, &group->notification_waitq, wait);
- mutex_lock(&group->notification_mutex);
+ spin_lock(&group->notification_lock);
if (!fsnotify_notify_queue_is_empty(group))
ret = POLLIN | POLLRDNORM;
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
return ret;
}
@@ -138,7 +138,7 @@ static int round_event_name_len(struct fsnotify_event *fsn_event)
* enough to fit in "count". Return an error pointer if
* not large enough.
*
- * Called with the group->notification_mutex held.
+ * Called with the group->notification_lock held.
*/
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
size_t count)
@@ -157,7 +157,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
if (event_size > count)
return ERR_PTR(-EINVAL);
- /* held the notification_mutex the whole time, so this is the
+ /* held the notification_lock the whole time, so this is the
* same event we peeked above */
fsnotify_remove_first_event(group);
@@ -234,9 +234,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
add_wait_queue(&group->notification_waitq, &wait);
while (1) {
- mutex_lock(&group->notification_mutex);
+ spin_lock(&group->notification_lock);
kevent = get_one_event(group, count);
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
@@ -300,13 +300,13 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case FIONREAD:
- mutex_lock(&group->notification_mutex);
+ spin_lock(&group->notification_lock);
list_for_each_entry(fsn_event, &group->notification_list,
list) {
send_len += sizeof(struct inotify_event);
send_len += round_event_name_len(fsn_event);
}
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
ret = put_user(send_len, (int __user *) p);
break;
}
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index e455e83ceeeb..66f85c651c52 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
/* return true if the notify queue is empty, false otherwise */
bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
{
- BUG_ON(!mutex_is_locked(&group->notification_mutex));
+ assert_spin_locked(&group->notification_lock);
return list_empty(&group->notification_list) ? true : false;
}
@@ -73,8 +73,17 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
/* Overflow events are per-group and we don't want to free them */
if (!event || event->mask == FS_Q_OVERFLOW)
return;
- /* If the event is still queued, we have a problem... */
- WARN_ON(!list_empty(&event->list));
+ /*
+ * If the event is still queued, we have a problem... Do an unreliable
+ * lockless check first to avoid locking in the common case. The
+ * locking may be necessary for permission events which got removed
+ * from the list by a different CPU than the one freeing the event.
+ */
+ if (!list_empty(&event->list)) {
+ spin_lock(&group->notification_lock);
+ WARN_ON(!list_empty(&event->list));
+ spin_unlock(&group->notification_lock);
+ }
group->ops->free_event(event);
}
@@ -95,10 +104,10 @@ int fsnotify_add_event(struct fsnotify_group *group,
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
- mutex_lock(&group->notification_mutex);
+ spin_lock(&group->notification_lock);
if (group->shutdown) {
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
return 2;
}
@@ -106,7 +115,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
ret = 2;
/* Queue overflow event only if it isn't already queued */
if (!list_empty(&group->overflow_event->list)) {
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
return ret;
}
event = group->overflow_event;
@@ -116,7 +125,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
if (!list_empty(list) && merge) {
ret = merge(list, event);
if (ret) {
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
return ret;
}
}
@@ -124,7 +133,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
queue:
group->q_len++;
list_add_tail(&event->list, list);
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
wake_up(&group->notification_waitq);
kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
@@ -139,7 +148,7 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
{
struct fsnotify_event *event;
- BUG_ON(!mutex_is_locked(&group->notification_mutex));
+ assert_spin_locked(&group->notification_lock);
pr_debug("%s: group=%p\n", __func__, group);
@@ -161,7 +170,7 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
*/
struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
{
- BUG_ON(!mutex_is_locked(&group->notification_mutex));
+ assert_spin_locked(&group->notification_lock);
return list_first_entry(&group->notification_list,
struct fsnotify_event, list);
@@ -175,12 +184,14 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
{
struct fsnotify_event *event;
- mutex_lock(&group->notification_mutex);
+ spin_lock(&group->notification_lock);
while (!fsnotify_notify_queue_is_empty(group)) {
event = fsnotify_remove_first_event(group);
+ spin_unlock(&group->notification_lock);
fsnotify_destroy_event(group, event);
+ spin_lock(&group->notification_lock);
}
- mutex_unlock(&group->notification_mutex);
+ spin_unlock(&group->notification_lock);
}
/*
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 8f20d6016e20..30bb10034120 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -5,11 +5,16 @@
#include <linux/magic.h>
#include <linux/ktime.h>
#include <linux/seq_file.h>
+#include <linux/user_namespace.h>
+#include <linux/nsfs.h>
static struct vfsmount *nsfs_mnt;
+static long ns_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg);
static const struct file_operations ns_file_operations = {
.llseek = no_llseek,
+ .unlocked_ioctl = ns_ioctl,
};
static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
@@ -44,22 +49,14 @@ static void nsfs_evict(struct inode *inode)
ns->ops->put(ns);
}
-void *ns_get_path(struct path *path, struct task_struct *task,
- const struct proc_ns_operations *ns_ops)
+static void *__ns_get_path(struct path *path, struct ns_common *ns)
{
- struct vfsmount *mnt = mntget(nsfs_mnt);
+ struct vfsmount *mnt = nsfs_mnt;
struct qstr qname = { .name = "", };
struct dentry *dentry;
struct inode *inode;
- struct ns_common *ns;
unsigned long d;
-again:
- ns = ns_ops->get(task);
- if (!ns) {
- mntput(mnt);
- return ERR_PTR(-ENOENT);
- }
rcu_read_lock();
d = atomic_long_read(&ns->stashed);
if (!d)
@@ -68,17 +65,16 @@ again:
if (!lockref_get_not_dead(&dentry->d_lockref))
goto slow;
rcu_read_unlock();
- ns_ops->put(ns);
+ ns->ops->put(ns);
got_it:
- path->mnt = mnt;
+ path->mnt = mntget(mnt);
path->dentry = dentry;
return NULL;
slow:
rcu_read_unlock();
inode = new_inode_pseudo(mnt->mnt_sb);
if (!inode) {
- ns_ops->put(ns);
- mntput(mnt);
+ ns->ops->put(ns);
return ERR_PTR(-ENOMEM);
}
inode->i_ino = ns->inum;
@@ -91,21 +87,96 @@ slow:
dentry = d_alloc_pseudo(mnt->mnt_sb, &qname);
if (!dentry) {
iput(inode);
- mntput(mnt);
return ERR_PTR(-ENOMEM);
}
d_instantiate(dentry, inode);
- dentry->d_fsdata = (void *)ns_ops;
+ dentry->d_fsdata = (void *)ns->ops;
d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
if (d) {
d_delete(dentry); /* make sure ->d_prune() does nothing */
dput(dentry);
cpu_relax();
- goto again;
+ return ERR_PTR(-EAGAIN);
}
goto got_it;
}
+void *ns_get_path(struct path *path, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops)
+{
+ struct ns_common *ns;
+ void *ret;
+
+again:
+ ns = ns_ops->get(task);
+ if (!ns)
+ return ERR_PTR(-ENOENT);
+
+ ret = __ns_get_path(path, ns);
+ if (IS_ERR(ret) && PTR_ERR(ret) == -EAGAIN)
+ goto again;
+ return ret;
+}
+
+static int open_related_ns(struct ns_common *ns,
+ struct ns_common *(*get_ns)(struct ns_common *ns))
+{
+ struct path path = {};
+ struct file *f;
+ void *err;
+ int fd;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ while (1) {
+ struct ns_common *relative;
+
+ relative = get_ns(ns);
+ if (IS_ERR(relative)) {
+ put_unused_fd(fd);
+ return PTR_ERR(relative);
+ }
+
+ err = __ns_get_path(&path, relative);
+ if (IS_ERR(err) && PTR_ERR(err) == -EAGAIN)
+ continue;
+ break;
+ }
+ if (IS_ERR(err)) {
+ put_unused_fd(fd);
+ return PTR_ERR(err);
+ }
+
+ f = dentry_open(&path, O_RDONLY, current_cred());
+ path_put(&path);
+ if (IS_ERR(f)) {
+ put_unused_fd(fd);
+ fd = PTR_ERR(f);
+ } else
+ fd_install(fd, f);
+
+ return fd;
+}
+
+static long ns_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct ns_common *ns = get_proc_ns(file_inode(filp));
+
+ switch (ioctl) {
+ case NS_GET_USERNS:
+ return open_related_ns(ns, ns_get_owner);
+ case NS_GET_PARENT:
+ if (!ns->ops->get_parent)
+ return -EINVAL;
+ return open_related_ns(ns, ns->ops->get_parent);
+ default:
+ return -ENOTTY;
+ }
+}
+
int ns_get_name(char *buf, size_t size, struct task_struct *task,
const struct proc_ns_operations *ns_ops)
{
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 1d67fcbf7160..8abab16b4602 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -2104,7 +2104,7 @@ int o2net_start_listening(struct o2nm_node *node)
BUG_ON(o2net_listen_sock != NULL);
mlog(ML_KTHREAD, "starting o2net thread...\n");
- o2net_wq = create_singlethread_workqueue("o2net");
+ o2net_wq = alloc_ordered_workqueue("o2net", WQ_MEM_RECLAIM);
if (o2net_wq == NULL) {
mlog(ML_ERROR, "unable to launch o2net thread\n");
return -ENOMEM; /* ? */
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 533bd524e41e..733e4e79c8e2 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1904,7 +1904,7 @@ static int dlm_join_domain(struct dlm_ctxt *dlm)
}
snprintf(wq_name, O2NM_MAX_NAME_LEN, "dlm_wq-%s", dlm->name);
- dlm->dlm_worker = create_singlethread_workqueue(wq_name);
+ dlm->dlm_worker = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 0);
if (!dlm->dlm_worker) {
status = -ENOMEM;
mlog_errno(status);
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index ef474cdd6404..354cdf9714aa 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -646,7 +646,7 @@ static int __init init_dlmfs_fs(void)
}
cleanup_inode = 1;
- user_dlm_worker = create_singlethread_workqueue("user_dlm");
+ user_dlm_worker = alloc_workqueue("user_dlm", WQ_MEM_RECLAIM, 0);
if (!user_dlm_worker) {
status = -ENOMEM;
goto bail;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 0b055bfb8e86..8f91639f8364 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2321,36 +2321,6 @@ out_mutex:
return ret;
}
-static ssize_t ocfs2_file_splice_read(struct file *in,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len,
- unsigned int flags)
-{
- int ret = 0, lock_level = 0;
- struct inode *inode = file_inode(in);
-
- trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- in->f_path.dentry->d_name.len,
- in->f_path.dentry->d_name.name, len);
-
- /*
- * See the comment in ocfs2_file_read_iter()
- */
- ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level);
- if (ret < 0) {
- mlog_errno(ret);
- goto bail;
- }
- ocfs2_inode_unlock(inode, lock_level);
-
- ret = generic_file_splice_read(in, ppos, pipe, len, flags);
-
-bail:
- return ret;
-}
-
static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
struct iov_iter *to)
{
@@ -2509,7 +2479,7 @@ const struct file_operations ocfs2_fops = {
#endif
.lock = ocfs2_lock,
.flock = ocfs2_flock,
- .splice_read = ocfs2_file_splice_read,
+ .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = ocfs2_fallocate,
};
@@ -2554,7 +2524,7 @@ const struct file_operations ocfs2_fops_no_plocks = {
.compat_ioctl = ocfs2_compat_ioctl,
#endif
.flock = ocfs2_flock,
- .splice_read = ocfs2_file_splice_read,
+ .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = ocfs2_fallocate,
};
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 50cc55047443..5af68fcdf9d3 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -123,8 +123,6 @@ static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode)
#define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL)
#define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL)
-extern struct kmem_cache *ocfs2_inode_cache;
-
extern const struct address_space_operations ocfs2_aops;
extern const struct ocfs2_caching_operations ocfs2_inode_caching_ops;
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index f8f5fc5e6c05..0b58abcf1c6d 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -1314,8 +1314,6 @@ DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_write);
DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_write);
-DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_read);
-
DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_read);
DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_truncate_file);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 603b28d6f008..f56fe39fab04 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -2329,7 +2329,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
}
cleancache_init_shared_fs(sb);
- osb->ocfs2_wq = create_singlethread_workqueue("ocfs2_wq");
+ osb->ocfs2_wq = alloc_ordered_workqueue("ocfs2_wq", WQ_MEM_RECLAIM);
if (!osb->ocfs2_wq) {
status = -ENOMEM;
mlog_errno(status);
diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c
index 00235bf644dc..1e8fe844e69f 100644
--- a/fs/orangefs/dcache.c
+++ b/fs/orangefs/dcache.c
@@ -73,7 +73,7 @@ static int orangefs_revalidate_lookup(struct dentry *dentry)
}
}
- dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+ dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
ret = 1;
out_release_op:
op_release(new_op);
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index a287a66d94e3..516ffb4dc9a0 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -11,14 +11,19 @@
#include "orangefs-kernel.h"
#include "orangefs-dev-proto.h"
#include "orangefs-bufmap.h"
+#include "orangefs-debugfs.h"
#include <linux/debugfs.h>
#include <linux/slab.h>
/* this file implements the /dev/pvfs2-req device node */
+uint32_t orangefs_userspace_version;
+
static int open_access_count;
+static DEFINE_MUTEX(devreq_mutex);
+
#define DUMP_DEVICE_ERROR() \
do { \
gossip_err("*****************************************************\n");\
@@ -43,7 +48,7 @@ static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
{
int index = hash_func(op->tag, hash_table_size);
- list_add_tail(&op->list, &htable_ops_in_progress[index]);
+ list_add_tail(&op->list, &orangefs_htable_ops_in_progress[index]);
}
/*
@@ -57,20 +62,20 @@ static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
index = hash_func(tag, hash_table_size);
- spin_lock(&htable_ops_in_progress_lock);
+ spin_lock(&orangefs_htable_ops_in_progress_lock);
list_for_each_entry_safe(op,
next,
- &htable_ops_in_progress[index],
+ &orangefs_htable_ops_in_progress[index],
list) {
if (op->tag == tag && !op_state_purged(op) &&
!op_state_given_up(op)) {
list_del_init(&op->list);
- spin_unlock(&htable_ops_in_progress_lock);
+ spin_unlock(&orangefs_htable_ops_in_progress_lock);
return op;
}
}
- spin_unlock(&htable_ops_in_progress_lock);
+ spin_unlock(&orangefs_htable_ops_in_progress_lock);
return NULL;
}
@@ -276,11 +281,11 @@ restart:
if (ret != 0)
goto error;
- spin_lock(&htable_ops_in_progress_lock);
+ spin_lock(&orangefs_htable_ops_in_progress_lock);
spin_lock(&cur_op->lock);
if (unlikely(op_state_given_up(cur_op))) {
spin_unlock(&cur_op->lock);
- spin_unlock(&htable_ops_in_progress_lock);
+ spin_unlock(&orangefs_htable_ops_in_progress_lock);
complete(&cur_op->waitq);
goto restart;
}
@@ -298,7 +303,7 @@ restart:
current->comm);
orangefs_devreq_add_op(cur_op);
spin_unlock(&cur_op->lock);
- spin_unlock(&htable_ops_in_progress_lock);
+ spin_unlock(&orangefs_htable_ops_in_progress_lock);
/* The client only asks to read one size buffer. */
return MAX_DEV_REQ_UPSIZE;
@@ -387,6 +392,13 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
return -EPROTO;
}
+ if (!orangefs_userspace_version) {
+ orangefs_userspace_version = head.version;
+ } else if (orangefs_userspace_version != head.version) {
+ gossip_err("Error: userspace version changes\n");
+ return -EPROTO;
+ }
+
/* remove the op from the in progress hash table */
op = orangefs_devreq_remove_op(head.tag);
if (!op) {
@@ -527,6 +539,7 @@ static int orangefs_devreq_release(struct inode *inode, struct file *file)
gossip_debug(GOSSIP_DEV_DEBUG,
"pvfs2-client-core: device close complete\n");
open_access_count = 0;
+ orangefs_userspace_version = 0;
mutex_unlock(&devreq_mutex);
return 0;
}
@@ -576,8 +589,6 @@ static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE;
struct ORANGEFS_dev_map_desc user_desc;
int ret = 0;
- struct dev_mask_info_s mask_info = { 0 };
- struct dev_mask2_info_s mask2_info = { 0, 0 };
int upstream_kmod = 1;
struct orangefs_sb_info_s *orangefs_sb;
@@ -619,7 +630,7 @@ static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
* all of the remounts are serviced (to avoid ops between
* mounts to fail)
*/
- ret = mutex_lock_interruptible(&request_mutex);
+ ret = mutex_lock_interruptible(&orangefs_request_mutex);
if (ret < 0)
return ret;
gossip_debug(GOSSIP_DEV_DEBUG,
@@ -654,7 +665,7 @@ static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
gossip_debug(GOSSIP_DEV_DEBUG,
"%s: priority remount complete\n",
__func__);
- mutex_unlock(&request_mutex);
+ mutex_unlock(&orangefs_request_mutex);
return ret;
case ORANGEFS_DEV_UPSTREAM:
@@ -668,134 +679,11 @@ static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
return ret;
case ORANGEFS_DEV_CLIENT_MASK:
- ret = copy_from_user(&mask2_info,
- (void __user *)arg,
- sizeof(struct dev_mask2_info_s));
-
- if (ret != 0)
- return -EIO;
-
- client_debug_mask.mask1 = mask2_info.mask1_value;
- client_debug_mask.mask2 = mask2_info.mask2_value;
-
- pr_info("%s: client debug mask has been been received "
- ":%llx: :%llx:\n",
- __func__,
- (unsigned long long)client_debug_mask.mask1,
- (unsigned long long)client_debug_mask.mask2);
-
- return ret;
-
+ return orangefs_debugfs_new_client_mask((void __user *)arg);
case ORANGEFS_DEV_CLIENT_STRING:
- ret = copy_from_user(&client_debug_array_string,
- (void __user *)arg,
- ORANGEFS_MAX_DEBUG_STRING_LEN);
- /*
- * The real client-core makes an effort to ensure
- * that actual strings that aren't too long to fit in
- * this buffer is what we get here. We're going to use
- * string functions on the stuff we got, so we'll make
- * this extra effort to try and keep from
- * flowing out of this buffer when we use the string
- * functions, even if somehow the stuff we end up
- * with here is garbage.
- */
- client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] =
- '\0';
-
- if (ret != 0) {
- pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
- __func__);
- return -EIO;
- }
-
- pr_info("%s: client debug array string has been received.\n",
- __func__);
-
- if (!help_string_initialized) {
-
- /* Free the "we don't know yet" default string... */
- kfree(debug_help_string);
-
- /* build a proper debug help string */
- if (orangefs_prepare_debugfs_help_string(0)) {
- gossip_err("%s: no debug help string \n",
- __func__);
- return -EIO;
- }
-
- /* Replace the boilerplate boot-time debug-help file. */
- debugfs_remove(help_file_dentry);
-
- help_file_dentry =
- debugfs_create_file(
- ORANGEFS_KMOD_DEBUG_HELP_FILE,
- 0444,
- debug_dir,
- debug_help_string,
- &debug_help_fops);
-
- if (!help_file_dentry) {
- gossip_err("%s: debugfs_create_file failed for"
- " :%s:!\n",
- __func__,
- ORANGEFS_KMOD_DEBUG_HELP_FILE);
- return -EIO;
- }
- }
-
- debug_mask_to_string(&client_debug_mask, 1);
-
- debugfs_remove(client_debug_dentry);
-
- orangefs_client_debug_init();
-
- help_string_initialized++;
-
- return ret;
-
+ return orangefs_debugfs_new_client_string((void __user *)arg);
case ORANGEFS_DEV_DEBUG:
- ret = copy_from_user(&mask_info,
- (void __user *)arg,
- sizeof(mask_info));
-
- if (ret != 0)
- return -EIO;
-
- if (mask_info.mask_type == KERNEL_MASK) {
- if ((mask_info.mask_value == 0)
- && (kernel_mask_set_mod_init)) {
- /*
- * the kernel debug mask was set when the
- * kernel module was loaded; don't override
- * it if the client-core was started without
- * a value for ORANGEFS_KMODMASK.
- */
- return 0;
- }
- debug_mask_to_string(&mask_info.mask_value,
- mask_info.mask_type);
- gossip_debug_mask = mask_info.mask_value;
- pr_info("%s: kernel debug mask has been modified to "
- ":%s: :%llx:\n",
- __func__,
- kernel_debug_string,
- (unsigned long long)gossip_debug_mask);
- } else if (mask_info.mask_type == CLIENT_MASK) {
- debug_mask_to_string(&mask_info.mask_value,
- mask_info.mask_type);
- pr_info("%s: client debug mask has been modified to"
- ":%s: :%llx:\n",
- __func__,
- client_debug_string,
- llu(mask_info.mask_value));
- } else {
- gossip_lerr("Invalid mask type....\n");
- return -EINVAL;
- }
-
- return ret;
-
+ return orangefs_debugfs_new_debug((void __user *)arg);
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/orangefs/downcall.h b/fs/orangefs/downcall.h
index 66b99210f1f9..3b8923f8bf21 100644
--- a/fs/orangefs/downcall.h
+++ b/fs/orangefs/downcall.h
@@ -83,7 +83,10 @@ struct orangefs_listxattr_response {
};
struct orangefs_param_response {
- __s64 value;
+ union {
+ __s64 value64;
+ __s32 value32[2];
+ } u;
};
#define PERF_COUNT_BUF_SIZE 4096
@@ -98,6 +101,11 @@ struct orangefs_fs_key_response {
char fs_key[FS_KEY_BUF_SIZE];
};
+/* 2.9.6 */
+struct orangefs_features_response {
+ __u64 features;
+};
+
struct orangefs_downcall_s {
__s32 type;
__s32 status;
@@ -119,6 +127,7 @@ struct orangefs_downcall_s {
struct orangefs_param_response param;
struct orangefs_perf_count_response perf_count;
struct orangefs_fs_key_response fs_key;
+ struct orangefs_features_response features;
} resp;
};
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 526040e09f78..3386886596d6 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -14,6 +14,32 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
+static int flush_racache(struct inode *inode)
+{
+ struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+ struct orangefs_kernel_op_s *new_op;
+ int ret;
+
+ gossip_debug(GOSSIP_UTILS_DEBUG,
+ "%s: %pU: Handle is %pU | fs_id %d\n", __func__,
+ get_khandle_from_ino(inode), &orangefs_inode->refn.khandle,
+ orangefs_inode->refn.fs_id);
+
+ new_op = op_alloc(ORANGEFS_VFS_OP_RA_FLUSH);
+ if (!new_op)
+ return -ENOMEM;
+ new_op->upcall.req.ra_cache_flush.refn = orangefs_inode->refn;
+
+ ret = service_operation(new_op, "orangefs_flush_racache",
+ get_interruptible_flag(inode));
+
+ gossip_debug(GOSSIP_UTILS_DEBUG, "%s: got return value of %d\n",
+ __func__, ret);
+
+ op_release(new_op);
+ return ret;
+}
+
/*
* Copy to client-core's address space from the buffers specified
* by the iovec upto total_size bytes.
@@ -386,7 +412,7 @@ ssize_t orangefs_inode_read(struct inode *inode,
size_t bufmap_size;
ssize_t ret = -EINVAL;
- g_orangefs_stats.reads++;
+ orangefs_stats.reads++;
bufmap_size = orangefs_bufmap_size_query();
if (count > bufmap_size) {
@@ -427,7 +453,7 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter
gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n");
- g_orangefs_stats.reads++;
+ orangefs_stats.reads++;
rc = do_readv_writev(ORANGEFS_IO_READ, file, &pos, iter);
iocb->ki_pos = pos;
@@ -488,7 +514,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
}
iocb->ki_pos = pos;
- g_orangefs_stats.writes++;
+ orangefs_stats.writes++;
out:
@@ -591,15 +617,24 @@ static int orangefs_file_release(struct inode *inode, struct file *file)
orangefs_flush_inode(inode);
/*
- * remove all associated inode pages from the page cache and mmap
+ * remove all associated inode pages from the page cache and
* readahead cache (if any); this forces an expensive refresh of
* data for the next caller of mmap (or 'get_block' accesses)
*/
if (file->f_path.dentry->d_inode &&
file->f_path.dentry->d_inode->i_mapping &&
- mapping_nrpages(&file->f_path.dentry->d_inode->i_data))
+ mapping_nrpages(&file->f_path.dentry->d_inode->i_data)) {
+ if (orangefs_features & ORANGEFS_FEATURE_READAHEAD) {
+ gossip_debug(GOSSIP_INODE_DEBUG,
+ "calling flush_racache on %pU\n",
+ get_khandle_from_ino(inode));
+ flush_racache(inode);
+ gossip_debug(GOSSIP_INODE_DEBUG,
+ "flush_racache finished\n");
+ }
truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,
0);
+ }
return 0;
}
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 62c525936ee8..35269e31de92 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -72,7 +72,7 @@ static int orangefs_create(struct inode *dir,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
- dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+ dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
@@ -183,7 +183,7 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+ dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn);
if (IS_ERR(inode)) {
@@ -322,7 +322,7 @@ static int orangefs_symlink(struct inode *dir,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
- dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+ dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
@@ -386,7 +386,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
d_instantiate(dentry, inode);
unlock_new_inode(inode);
- dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+ dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
diff --git a/fs/orangefs/orangefs-cache.c b/fs/orangefs/orangefs-cache.c
index b6edbe9fb309..aa3830b741c7 100644
--- a/fs/orangefs/orangefs-cache.c
+++ b/fs/orangefs/orangefs-cache.c
@@ -73,8 +73,8 @@ char *get_opname_string(struct orangefs_kernel_op_s *new_op)
return "OP_STATFS";
else if (type == ORANGEFS_VFS_OP_TRUNCATE)
return "OP_TRUNCATE";
- else if (type == ORANGEFS_VFS_OP_MMAP_RA_FLUSH)
- return "OP_MMAP_RA_FLUSH";
+ else if (type == ORANGEFS_VFS_OP_RA_FLUSH)
+ return "OP_RA_FLUSH";
else if (type == ORANGEFS_VFS_OP_FS_MOUNT)
return "OP_FS_MOUNT";
else if (type == ORANGEFS_VFS_OP_FS_UMOUNT)
@@ -97,6 +97,8 @@ char *get_opname_string(struct orangefs_kernel_op_s *new_op)
return "OP_FSYNC";
else if (type == ORANGEFS_VFS_OP_FSKEY)
return "OP_FSKEY";
+ else if (type == ORANGEFS_VFS_OP_FEATURES)
+ return "OP_FEATURES";
}
return "OP_UNKNOWN?";
}
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 1714a737d556..9b24107c82a8 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -43,36 +43,35 @@
#include "protocol.h"
#include "orangefs-kernel.h"
-static int orangefs_debug_disabled = 1;
-
-static int orangefs_debug_help_open(struct inode *, struct file *);
+#define DEBUG_HELP_STRING_SIZE 4096
+#define HELP_STRING_UNINITIALIZED \
+ "Client Debug Keywords are unknown until the first time\n" \
+ "the client is started after boot.\n"
+#define ORANGEFS_KMOD_DEBUG_HELP_FILE "debug-help"
+#define ORANGEFS_KMOD_DEBUG_FILE "kernel-debug"
+#define ORANGEFS_CLIENT_DEBUG_FILE "client-debug"
+#define ORANGEFS_VERBOSE "verbose"
+#define ORANGEFS_ALL "all"
-const struct file_operations debug_help_fops = {
- .open = orangefs_debug_help_open,
- .read = seq_read,
- .release = seq_release,
- .llseek = seq_lseek,
+/*
+ * An array of client_debug_mask will be built to hold debug keyword/mask
+ * values fetched from userspace.
+ */
+struct client_debug_mask {
+ char *keyword;
+ __u64 mask1;
+ __u64 mask2;
};
+static int orangefs_kernel_debug_init(void);
+
+static int orangefs_debug_help_open(struct inode *, struct file *);
static void *help_start(struct seq_file *, loff_t *);
static void *help_next(struct seq_file *, void *, loff_t *);
static void help_stop(struct seq_file *, void *);
static int help_show(struct seq_file *, void *);
-static const struct seq_operations help_debug_ops = {
- .start = help_start,
- .next = help_next,
- .stop = help_stop,
- .show = help_show,
-};
-
-/*
- * Used to protect data in ORANGEFS_KMOD_DEBUG_FILE and
- * ORANGEFS_KMOD_DEBUG_FILE.
- */
-static DEFINE_MUTEX(orangefs_debug_lock);
-
-int orangefs_debug_open(struct inode *, struct file *);
+static int orangefs_debug_open(struct inode *, struct file *);
static ssize_t orangefs_debug_read(struct file *,
char __user *,
@@ -84,6 +83,43 @@ static ssize_t orangefs_debug_write(struct file *,
size_t,
loff_t *);
+static int orangefs_prepare_cdm_array(char *);
+static void debug_mask_to_string(void *, int);
+static void do_k_string(void *, int);
+static void do_c_string(void *, int);
+static int keyword_is_amalgam(char *);
+static int check_amalgam_keyword(void *, int);
+static void debug_string_to_mask(char *, void *, int);
+static void do_c_mask(int, char *, struct client_debug_mask **);
+static void do_k_mask(int, char *, __u64 **);
+
+static char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN] = "none";
+static char *debug_help_string;
+static char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+static char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+
+static struct dentry *help_file_dentry;
+static struct dentry *client_debug_dentry;
+static struct dentry *debug_dir;
+
+static unsigned int kernel_mask_set_mod_init;
+static int orangefs_debug_disabled = 1;
+static int help_string_initialized;
+
+static const struct seq_operations help_debug_ops = {
+ .start = help_start,
+ .next = help_next,
+ .stop = help_stop,
+ .show = help_show,
+};
+
+const struct file_operations debug_help_fops = {
+ .open = orangefs_debug_help_open,
+ .read = seq_read,
+ .release = seq_release,
+ .llseek = seq_lseek,
+};
+
static const struct file_operations kernel_debug_fops = {
.open = orangefs_debug_open,
.read = orangefs_debug_read,
@@ -91,15 +127,55 @@ static const struct file_operations kernel_debug_fops = {
.llseek = generic_file_llseek,
};
+static int client_all_index;
+static int client_verbose_index;
+
+static struct client_debug_mask *cdm_array;
+static int cdm_element_count;
+
+static struct client_debug_mask client_debug_mask;
+
+/*
+ * Used to protect data in ORANGEFS_KMOD_DEBUG_FILE and
+ * ORANGEFS_KMOD_DEBUG_FILE.
+ */
+static DEFINE_MUTEX(orangefs_debug_lock);
+
/*
* initialize kmod debug operations, create orangefs debugfs dir and
* ORANGEFS_KMOD_DEBUG_HELP_FILE.
*/
-int orangefs_debugfs_init(void)
+int orangefs_debugfs_init(int debug_mask)
{
-
int rc = -ENOMEM;
+ /* convert input debug mask to a 64-bit unsigned integer */
+ orangefs_gossip_debug_mask = (unsigned long long)debug_mask;
+
+ /*
+ * set the kernel's gossip debug string; invalid mask values will
+ * be ignored.
+ */
+ debug_mask_to_string(&orangefs_gossip_debug_mask, 0);
+
+ /* remove any invalid values from the mask */
+ debug_string_to_mask(kernel_debug_string, &orangefs_gossip_debug_mask,
+ 0);
+
+ /*
+ * if the mask has a non-zero value, then indicate that the mask
+ * was set when the kernel module was loaded. The orangefs dev ioctl
+ * command will look at this boolean to determine if the kernel's
+ * debug mask should be overwritten when the client-core is started.
+ */
+ if (orangefs_gossip_debug_mask != 0)
+ kernel_mask_set_mod_init = true;
+
+ pr_info("%s: called with debug mask: :%s: :%llx:\n",
+ __func__,
+ kernel_debug_string,
+ (unsigned long long)orangefs_gossip_debug_mask);
+
debug_dir = debugfs_create_dir("orangefs", NULL);
if (!debug_dir) {
pr_info("%s: debugfs_create_dir failed.\n", __func__);
@@ -117,13 +193,58 @@ int orangefs_debugfs_init(void)
}
orangefs_debug_disabled = 0;
+
+ rc = orangefs_kernel_debug_init();
+
+out:
+
+ return rc;
+}
+
+/*
+ * initialize the kernel-debug file.
+ */
+static int orangefs_kernel_debug_init(void)
+{
+ int rc = -ENOMEM;
+ struct dentry *ret;
+ char *k_buffer = NULL;
+
+ gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
+
+ k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+ if (!k_buffer)
+ goto out;
+
+ if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
+ strcpy(k_buffer, kernel_debug_string);
+ strcat(k_buffer, "\n");
+ } else {
+ strcpy(k_buffer, "none\n");
+ pr_info("%s: overflow 1!\n", __func__);
+ }
+
+ ret = debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE,
+ 0444,
+ debug_dir,
+ k_buffer,
+ &kernel_debug_fops);
+ if (!ret) {
+ pr_info("%s: failed to create %s.\n",
+ __func__,
+ ORANGEFS_KMOD_DEBUG_FILE);
+ goto out;
+ }
+
rc = 0;
out:
+ gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
return rc;
}
+
void orangefs_debugfs_cleanup(void)
{
debugfs_remove_recursive(debug_dir);
@@ -196,49 +317,6 @@ static int help_show(struct seq_file *m, void *v)
}
/*
- * initialize the kernel-debug file.
- */
-int orangefs_kernel_debug_init(void)
-{
- int rc = -ENOMEM;
- struct dentry *ret;
- char *k_buffer = NULL;
-
- gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
-
- k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
- if (!k_buffer)
- goto out;
-
- if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
- strcpy(k_buffer, kernel_debug_string);
- strcat(k_buffer, "\n");
- } else {
- strcpy(k_buffer, "none\n");
- pr_info("%s: overflow 1!\n", __func__);
- }
-
- ret = debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE,
- 0444,
- debug_dir,
- k_buffer,
- &kernel_debug_fops);
- if (!ret) {
- pr_info("%s: failed to create %s.\n",
- __func__,
- ORANGEFS_KMOD_DEBUG_FILE);
- goto out;
- }
-
- rc = 0;
-
-out:
-
- gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
- return rc;
-}
-
-/*
* initialize the client-debug file.
*/
int orangefs_client_debug_init(void)
@@ -282,7 +360,7 @@ out:
}
/* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/
-int orangefs_debug_open(struct inode *inode, struct file *file)
+static int orangefs_debug_open(struct inode *inode, struct file *file)
{
int rc = -ENODEV;
@@ -384,8 +462,8 @@ static ssize_t orangefs_debug_write(struct file *file,
*/
if (!strcmp(file->f_path.dentry->d_name.name,
ORANGEFS_KMOD_DEBUG_FILE)) {
- debug_string_to_mask(buf, &gossip_debug_mask, 0);
- debug_mask_to_string(&gossip_debug_mask, 0);
+ debug_string_to_mask(buf, &orangefs_gossip_debug_mask, 0);
+ debug_mask_to_string(&orangefs_gossip_debug_mask, 0);
debug_string = kernel_debug_string;
gossip_debug(GOSSIP_DEBUGFS_DEBUG,
"New kernel debug string is %s\n",
@@ -452,3 +530,546 @@ out:
kfree(buf);
return rc;
}
+
+/*
+ * After obtaining a string representation of the client's debug
+ * keywords and their associated masks, this function is called to build an
+ * array of these values.
+ */
+static int orangefs_prepare_cdm_array(char *debug_array_string)
+{
+ int i;
+ int rc = -EINVAL;
+ char *cds_head = NULL;
+ char *cds_delimiter = NULL;
+ int keyword_len = 0;
+
+ gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+ /*
+ * figure out how many elements the cdm_array needs.
+ */
+ for (i = 0; i < strlen(debug_array_string); i++)
+ if (debug_array_string[i] == '\n')
+ cdm_element_count++;
+
+ if (!cdm_element_count) {
+ pr_info("No elements in client debug array string!\n");
+ goto out;
+ }
+
+ cdm_array =
+ kzalloc(cdm_element_count * sizeof(struct client_debug_mask),
+ GFP_KERNEL);
+ if (!cdm_array) {
+ pr_info("malloc failed for cdm_array!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ cds_head = debug_array_string;
+
+ for (i = 0; i < cdm_element_count; i++) {
+ cds_delimiter = strchr(cds_head, '\n');
+ *cds_delimiter = '\0';
+
+ keyword_len = strcspn(cds_head, " ");
+
+ cdm_array[i].keyword = kzalloc(keyword_len + 1, GFP_KERNEL);
+ if (!cdm_array[i].keyword) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ sscanf(cds_head,
+ "%s %llx %llx",
+ cdm_array[i].keyword,
+ (unsigned long long *)&(cdm_array[i].mask1),
+ (unsigned long long *)&(cdm_array[i].mask2));
+
+ if (!strcmp(cdm_array[i].keyword, ORANGEFS_VERBOSE))
+ client_verbose_index = i;
+
+ if (!strcmp(cdm_array[i].keyword, ORANGEFS_ALL))
+ client_all_index = i;
+
+ cds_head = cds_delimiter + 1;
+ }
+
+ rc = cdm_element_count;
+
+ gossip_debug(GOSSIP_UTILS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+
+out:
+
+ return rc;
+
+}
+
+/*
+ * /sys/kernel/debug/orangefs/debug-help can be catted to
+ * see all the available kernel and client debug keywords.
+ *
+ * When the kernel boots, we have no idea what keywords the
+ * client supports, nor their associated masks.
+ *
+ * We pass through this function once at boot and stamp a
+ * boilerplate "we don't know" message for the client in the
+ * debug-help file. We pass through here again when the client
+ * starts and then we can fill out the debug-help file fully.
+ *
+ * The client might be restarted any number of times between
+ * reboots, we only build the debug-help file the first time.
+ */
+int orangefs_prepare_debugfs_help_string(int at_boot)
+{
+ int rc = -EINVAL;
+ int i;
+ int byte_count = 0;
+ char *client_title = "Client Debug Keywords:\n";
+ char *kernel_title = "Kernel Debug Keywords:\n";
+
+ gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+ if (at_boot) {
+ byte_count += strlen(HELP_STRING_UNINITIALIZED);
+ client_title = HELP_STRING_UNINITIALIZED;
+ } else {
+ /*
+ * fill the client keyword/mask array and remember
+ * how many elements there were.
+ */
+ cdm_element_count =
+ orangefs_prepare_cdm_array(client_debug_array_string);
+ if (cdm_element_count <= 0)
+ goto out;
+
+ /* Count the bytes destined for debug_help_string. */
+ byte_count += strlen(client_title);
+
+ for (i = 0; i < cdm_element_count; i++) {
+ byte_count += strlen(cdm_array[i].keyword + 2);
+ if (byte_count >= DEBUG_HELP_STRING_SIZE) {
+ pr_info("%s: overflow 1!\n", __func__);
+ goto out;
+ }
+ }
+
+ gossip_debug(GOSSIP_UTILS_DEBUG,
+ "%s: cdm_element_count:%d:\n",
+ __func__,
+ cdm_element_count);
+ }
+
+ byte_count += strlen(kernel_title);
+ for (i = 0; i < num_kmod_keyword_mask_map; i++) {
+ byte_count +=
+ strlen(s_kmod_keyword_mask_map[i].keyword + 2);
+ if (byte_count >= DEBUG_HELP_STRING_SIZE) {
+ pr_info("%s: overflow 2!\n", __func__);
+ goto out;
+ }
+ }
+
+ /* build debug_help_string. */
+ debug_help_string = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL);
+ if (!debug_help_string) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ strcat(debug_help_string, client_title);
+
+ if (!at_boot) {
+ for (i = 0; i < cdm_element_count; i++) {
+ strcat(debug_help_string, "\t");
+ strcat(debug_help_string, cdm_array[i].keyword);
+ strcat(debug_help_string, "\n");
+ }
+ }
+
+ strcat(debug_help_string, "\n");
+ strcat(debug_help_string, kernel_title);
+
+ for (i = 0; i < num_kmod_keyword_mask_map; i++) {
+ strcat(debug_help_string, "\t");
+ strcat(debug_help_string, s_kmod_keyword_mask_map[i].keyword);
+ strcat(debug_help_string, "\n");
+ }
+
+ rc = 0;
+
+out:
+
+ return rc;
+
+}
+
+/*
+ * kernel = type 0
+ * client = type 1
+ */
+static void debug_mask_to_string(void *mask, int type)
+{
+ int i;
+ int len = 0;
+ char *debug_string;
+ int element_count = 0;
+
+ gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+ if (type) {
+ debug_string = client_debug_string;
+ element_count = cdm_element_count;
+ } else {
+ debug_string = kernel_debug_string;
+ element_count = num_kmod_keyword_mask_map;
+ }
+
+ memset(debug_string, 0, ORANGEFS_MAX_DEBUG_STRING_LEN);
+
+ /*
+ * Some keywords, like "all" or "verbose", are amalgams of
+ * numerous other keywords. Make a special check for those
+ * before grinding through the whole mask only to find out
+ * later...
+ */
+ if (check_amalgam_keyword(mask, type))
+ goto out;
+
+ /* Build the debug string. */
+ for (i = 0; i < element_count; i++)
+ if (type)
+ do_c_string(mask, i);
+ else
+ do_k_string(mask, i);
+
+ len = strlen(debug_string);
+
+ if ((len) && (type))
+ client_debug_string[len - 1] = '\0';
+ else if (len)
+ kernel_debug_string[len - 1] = '\0';
+ else if (type)
+ strcpy(client_debug_string, "none");
+ else
+ strcpy(kernel_debug_string, "none");
+
+out:
+gossip_debug(GOSSIP_UTILS_DEBUG, "%s: string:%s:\n", __func__, debug_string);
+
+ return;
+
+}
+
+static void do_k_string(void *k_mask, int index)
+{
+ __u64 *mask = (__u64 *) k_mask;
+
+ if (keyword_is_amalgam((char *) s_kmod_keyword_mask_map[index].keyword))
+ goto out;
+
+ if (*mask & s_kmod_keyword_mask_map[index].mask_val) {
+ if ((strlen(kernel_debug_string) +
+ strlen(s_kmod_keyword_mask_map[index].keyword))
+ < ORANGEFS_MAX_DEBUG_STRING_LEN - 1) {
+ strcat(kernel_debug_string,
+ s_kmod_keyword_mask_map[index].keyword);
+ strcat(kernel_debug_string, ",");
+ } else {
+ gossip_err("%s: overflow!\n", __func__);
+ strcpy(kernel_debug_string, ORANGEFS_ALL);
+ goto out;
+ }
+ }
+
+out:
+
+ return;
+}
+
+static void do_c_string(void *c_mask, int index)
+{
+ struct client_debug_mask *mask = (struct client_debug_mask *) c_mask;
+
+ if (keyword_is_amalgam(cdm_array[index].keyword))
+ goto out;
+
+ if ((mask->mask1 & cdm_array[index].mask1) ||
+ (mask->mask2 & cdm_array[index].mask2)) {
+ if ((strlen(client_debug_string) +
+ strlen(cdm_array[index].keyword) + 1)
+ < ORANGEFS_MAX_DEBUG_STRING_LEN - 2) {
+ strcat(client_debug_string,
+ cdm_array[index].keyword);
+ strcat(client_debug_string, ",");
+ } else {
+ gossip_err("%s: overflow!\n", __func__);
+ strcpy(client_debug_string, ORANGEFS_ALL);
+ goto out;
+ }
+ }
+out:
+ return;
+}
+
+static int keyword_is_amalgam(char *keyword)
+{
+ int rc = 0;
+
+ if ((!strcmp(keyword, ORANGEFS_ALL)) || (!strcmp(keyword, ORANGEFS_VERBOSE)))
+ rc = 1;
+
+ return rc;
+}
+
+/*
+ * kernel = type 0
+ * client = type 1
+ *
+ * return 1 if we found an amalgam.
+ */
+static int check_amalgam_keyword(void *mask, int type)
+{
+ __u64 *k_mask;
+ struct client_debug_mask *c_mask;
+ int k_all_index = num_kmod_keyword_mask_map - 1;
+ int rc = 0;
+
+ if (type) {
+ c_mask = (struct client_debug_mask *) mask;
+
+ if ((c_mask->mask1 == cdm_array[client_all_index].mask1) &&
+ (c_mask->mask2 == cdm_array[client_all_index].mask2)) {
+ strcpy(client_debug_string, ORANGEFS_ALL);
+ rc = 1;
+ goto out;
+ }
+
+ if ((c_mask->mask1 == cdm_array[client_verbose_index].mask1) &&
+ (c_mask->mask2 == cdm_array[client_verbose_index].mask2)) {
+ strcpy(client_debug_string, ORANGEFS_VERBOSE);
+ rc = 1;
+ goto out;
+ }
+
+ } else {
+ k_mask = (__u64 *) mask;
+
+ if (*k_mask >= s_kmod_keyword_mask_map[k_all_index].mask_val) {
+ strcpy(kernel_debug_string, ORANGEFS_ALL);
+ rc = 1;
+ goto out;
+ }
+ }
+
+out:
+
+ return rc;
+}
+
+/*
+ * kernel = type 0
+ * client = type 1
+ */
+static void debug_string_to_mask(char *debug_string, void *mask, int type)
+{
+ char *unchecked_keyword;
+ int i;
+ char *strsep_fodder = kstrdup(debug_string, GFP_KERNEL);
+ char *original_pointer;
+ int element_count = 0;
+ struct client_debug_mask *c_mask = NULL;
+ __u64 *k_mask = NULL;
+
+ gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+ if (type) {
+ c_mask = (struct client_debug_mask *)mask;
+ element_count = cdm_element_count;
+ } else {
+ k_mask = (__u64 *)mask;
+ *k_mask = 0;
+ element_count = num_kmod_keyword_mask_map;
+ }
+
+ original_pointer = strsep_fodder;
+ while ((unchecked_keyword = strsep(&strsep_fodder, ",")))
+ if (strlen(unchecked_keyword)) {
+ for (i = 0; i < element_count; i++)
+ if (type)
+ do_c_mask(i,
+ unchecked_keyword,
+ &c_mask);
+ else
+ do_k_mask(i,
+ unchecked_keyword,
+ &k_mask);
+ }
+
+ kfree(original_pointer);
+}
+
+static void do_c_mask(int i, char *unchecked_keyword,
+ struct client_debug_mask **sane_mask)
+{
+
+ if (!strcmp(cdm_array[i].keyword, unchecked_keyword)) {
+ (**sane_mask).mask1 = (**sane_mask).mask1 | cdm_array[i].mask1;
+ (**sane_mask).mask2 = (**sane_mask).mask2 | cdm_array[i].mask2;
+ }
+}
+
+static void do_k_mask(int i, char *unchecked_keyword, __u64 **sane_mask)
+{
+
+ if (!strcmp(s_kmod_keyword_mask_map[i].keyword, unchecked_keyword))
+ **sane_mask = (**sane_mask) |
+ s_kmod_keyword_mask_map[i].mask_val;
+}
+
+int orangefs_debugfs_new_client_mask(void __user *arg)
+{
+ struct dev_mask2_info_s mask2_info = {0};
+ int ret;
+
+ ret = copy_from_user(&mask2_info,
+ (void __user *)arg,
+ sizeof(struct dev_mask2_info_s));
+
+ if (ret != 0)
+ return -EIO;
+
+ client_debug_mask.mask1 = mask2_info.mask1_value;
+ client_debug_mask.mask2 = mask2_info.mask2_value;
+
+ pr_info("%s: client debug mask has been been received "
+ ":%llx: :%llx:\n",
+ __func__,
+ (unsigned long long)client_debug_mask.mask1,
+ (unsigned long long)client_debug_mask.mask2);
+
+ return ret;
+}
+
+int orangefs_debugfs_new_client_string(void __user *arg)
+{
+ int ret;
+
+ ret = copy_from_user(&client_debug_array_string,
+ (void __user *)arg,
+ ORANGEFS_MAX_DEBUG_STRING_LEN);
+ if (ret != 0)
+ return -EIO;
+
+ /*
+ * The real client-core makes an effort to ensure
+ * that actual strings that aren't too long to fit in
+ * this buffer is what we get here. We're going to use
+ * string functions on the stuff we got, so we'll make
+ * this extra effort to try and keep from
+ * flowing out of this buffer when we use the string
+ * functions, even if somehow the stuff we end up
+ * with here is garbage.
+ */
+ client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] =
+ '\0';
+
+ if (ret != 0) {
+ pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
+ __func__);
+ return -EIO;
+ }
+
+ pr_info("%s: client debug array string has been received.\n",
+ __func__);
+
+ if (!help_string_initialized) {
+
+ /* Free the "we don't know yet" default string... */
+ kfree(debug_help_string);
+
+ /* build a proper debug help string */
+ if (orangefs_prepare_debugfs_help_string(0)) {
+ gossip_err("%s: no debug help string \n",
+ __func__);
+ return -EIO;
+ }
+
+ /* Replace the boilerplate boot-time debug-help file. */
+ debugfs_remove(help_file_dentry);
+
+ help_file_dentry =
+ debugfs_create_file(
+ ORANGEFS_KMOD_DEBUG_HELP_FILE,
+ 0444,
+ debug_dir,
+ debug_help_string,
+ &debug_help_fops);
+
+ if (!help_file_dentry) {
+ gossip_err("%s: debugfs_create_file failed for"
+ " :%s:!\n",
+ __func__,
+ ORANGEFS_KMOD_DEBUG_HELP_FILE);
+ return -EIO;
+ }
+ }
+
+ debug_mask_to_string(&client_debug_mask, 1);
+
+ debugfs_remove(client_debug_dentry);
+
+ orangefs_client_debug_init();
+
+ help_string_initialized++;
+
+ return ret;
+}
+
+int orangefs_debugfs_new_debug(void __user *arg)
+{
+ struct dev_mask_info_s mask_info = {0};
+ int ret;
+
+ ret = copy_from_user(&mask_info,
+ (void __user *)arg,
+ sizeof(mask_info));
+
+ if (ret != 0)
+ return -EIO;
+
+ if (mask_info.mask_type == KERNEL_MASK) {
+ if ((mask_info.mask_value == 0)
+ && (kernel_mask_set_mod_init)) {
+ /*
+ * the kernel debug mask was set when the
+ * kernel module was loaded; don't override
+ * it if the client-core was started without
+ * a value for ORANGEFS_KMODMASK.
+ */
+ return 0;
+ }
+ debug_mask_to_string(&mask_info.mask_value,
+ mask_info.mask_type);
+ orangefs_gossip_debug_mask = mask_info.mask_value;
+ pr_info("%s: kernel debug mask has been modified to "
+ ":%s: :%llx:\n",
+ __func__,
+ kernel_debug_string,
+ (unsigned long long)orangefs_gossip_debug_mask);
+ } else if (mask_info.mask_type == CLIENT_MASK) {
+ debug_mask_to_string(&mask_info.mask_value,
+ mask_info.mask_type);
+ pr_info("%s: client debug mask has been modified to"
+ ":%s: :%llx:\n",
+ __func__,
+ client_debug_string,
+ llu(mask_info.mask_value));
+ } else {
+ gossip_lerr("Invalid mask type....\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/fs/orangefs/orangefs-debugfs.h b/fs/orangefs/orangefs-debugfs.h
index e4828c0e3ef9..803517269ba6 100644
--- a/fs/orangefs/orangefs-debugfs.h
+++ b/fs/orangefs/orangefs-debugfs.h
@@ -1,3 +1,7 @@
-int orangefs_debugfs_init(void);
-int orangefs_kernel_debug_init(void);
+int orangefs_debugfs_init(int);
void orangefs_debugfs_cleanup(void);
+int orangefs_client_debug_init(void);
+int orangefs_prepare_debugfs_help_string(int);
+int orangefs_debugfs_new_client_mask(void __user *);
+int orangefs_debugfs_new_client_string(void __user *);
+int orangefs_debugfs_new_debug(void __user *);
diff --git a/fs/orangefs/orangefs-dev-proto.h b/fs/orangefs/orangefs-dev-proto.h
index 9eac9d9a3f3a..a3d84ffee905 100644
--- a/fs/orangefs/orangefs-dev-proto.h
+++ b/fs/orangefs/orangefs-dev-proto.h
@@ -28,7 +28,7 @@
#define ORANGEFS_VFS_OP_RENAME 0xFF00000A
#define ORANGEFS_VFS_OP_STATFS 0xFF00000B
#define ORANGEFS_VFS_OP_TRUNCATE 0xFF00000C
-#define ORANGEFS_VFS_OP_MMAP_RA_FLUSH 0xFF00000D
+#define ORANGEFS_VFS_OP_RA_FLUSH 0xFF00000D
#define ORANGEFS_VFS_OP_FS_MOUNT 0xFF00000E
#define ORANGEFS_VFS_OP_FS_UMOUNT 0xFF00000F
#define ORANGEFS_VFS_OP_GETXATTR 0xFF000010
@@ -41,6 +41,10 @@
#define ORANGEFS_VFS_OP_FSYNC 0xFF00EE01
#define ORANGEFS_VFS_OP_FSKEY 0xFF00EE02
#define ORANGEFS_VFS_OP_READDIRPLUS 0xFF00EE03
+#define ORANGEFS_VFS_OP_FEATURES 0xFF00EE05 /* 2.9.6 */
+
+/* features is a 64-bit unsigned bitmask */
+#define ORANGEFS_FEATURE_READAHEAD 1
/*
* Misc constants. Please retain them as multiples of 8!
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 633c07a6e3d8..0a82048f3aaf 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -100,16 +100,6 @@ enum orangefs_vfs_op_states {
};
/*
- * An array of client_debug_mask will be built to hold debug keyword/mask
- * values fetched from userspace.
- */
-struct client_debug_mask {
- char *keyword;
- __u64 mask1;
- __u64 mask2;
-};
-
-/*
* orangefs kernel memory related flags
*/
@@ -119,29 +109,6 @@ struct client_debug_mask {
#define ORANGEFS_CACHE_CREATE_FLAGS 0
#endif /* ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB)) */
-/* these functions are defined in orangefs-utils.c */
-int orangefs_prepare_cdm_array(char *debug_array_string);
-int orangefs_prepare_debugfs_help_string(int);
-
-/* defined in orangefs-debugfs.c */
-int orangefs_client_debug_init(void);
-
-void debug_string_to_mask(char *, void *, int);
-void do_c_mask(int, char *, struct client_debug_mask **);
-void do_k_mask(int, char *, __u64 **);
-
-void debug_mask_to_string(void *, int);
-void do_k_string(void *, int);
-void do_c_string(void *, int);
-int check_amalgam_keyword(void *, int);
-int keyword_is_amalgam(char *);
-
-/*these variables are defined in orangefs-mod.c */
-extern char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
-extern char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
-extern char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
-extern unsigned int kernel_mask_set_mod_init;
-
extern int orangefs_init_acl(struct inode *inode, struct inode *dir);
extern const struct xattr_handler *orangefs_xattr_handlers[];
@@ -331,7 +298,7 @@ struct orangefs_stats {
unsigned long writes;
};
-extern struct orangefs_stats g_orangefs_stats;
+extern struct orangefs_stats orangefs_stats;
/*
* NOTE: See Documentation/filesystems/porting for information
@@ -447,6 +414,8 @@ void purge_waiting_ops(void);
/*
* defined in super.c
*/
+extern uint64_t orangefs_features;
+
struct dentry *orangefs_mount(struct file_system_type *fst,
int flags,
const char *devname,
@@ -506,6 +475,8 @@ ssize_t orangefs_inode_read(struct inode *inode,
/*
* defined in devorangefs-req.c
*/
+extern uint32_t orangefs_userspace_version;
+
int orangefs_dev_init(void);
void orangefs_dev_cleanup(void);
int is_daemon_in_service(void);
@@ -543,20 +514,18 @@ bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op);
int orangefs_normalize_to_errno(__s32 error_code);
-extern struct mutex devreq_mutex;
-extern struct mutex request_mutex;
-extern int debug;
+extern struct mutex orangefs_request_mutex;
extern int op_timeout_secs;
extern int slot_timeout_secs;
-extern int dcache_timeout_msecs;
-extern int getattr_timeout_msecs;
+extern int orangefs_dcache_timeout_msecs;
+extern int orangefs_getattr_timeout_msecs;
extern struct list_head orangefs_superblocks;
extern spinlock_t orangefs_superblocks_lock;
extern struct list_head orangefs_request_list;
extern spinlock_t orangefs_request_list_lock;
extern wait_queue_head_t orangefs_request_list_waitq;
-extern struct list_head *htable_ops_in_progress;
-extern spinlock_t htable_ops_in_progress_lock;
+extern struct list_head *orangefs_htable_ops_in_progress;
+extern spinlock_t orangefs_htable_ops_in_progress_lock;
extern int hash_table_size;
extern const struct address_space_operations orangefs_address_operations;
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
index e9fd5755c05f..2e5b03065f34 100644
--- a/fs/orangefs/orangefs-mod.c
+++ b/fs/orangefs/orangefs-mod.c
@@ -21,34 +21,17 @@
* global variables declared here
*/
-/* array of client debug keyword/mask values */
-struct client_debug_mask *cdm_array;
-int cdm_element_count;
-
-char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN] = "none";
-char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
-char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
-
-char *debug_help_string;
-int help_string_initialized;
-struct dentry *help_file_dentry;
-struct dentry *client_debug_dentry;
-struct dentry *debug_dir;
-int client_verbose_index;
-int client_all_index;
-struct orangefs_stats g_orangefs_stats;
+struct orangefs_stats orangefs_stats;
/* the size of the hash tables for ops in progress */
int hash_table_size = 509;
static ulong module_parm_debug_mask;
-__u64 gossip_debug_mask;
-struct client_debug_mask client_debug_mask = { NULL, 0, 0 };
-unsigned int kernel_mask_set_mod_init; /* implicitly false */
+__u64 orangefs_gossip_debug_mask;
int op_timeout_secs = ORANGEFS_DEFAULT_OP_TIMEOUT_SECS;
int slot_timeout_secs = ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS;
-int dcache_timeout_msecs = 50;
-int getattr_timeout_msecs = 50;
+int orangefs_dcache_timeout_msecs = 50;
+int orangefs_getattr_timeout_msecs = 50;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("ORANGEFS Development Team");
@@ -71,20 +54,17 @@ module_param(module_parm_debug_mask, ulong, 0644);
module_param(op_timeout_secs, int, 0);
module_param(slot_timeout_secs, int, 0);
-/* synchronizes the request device file */
-DEFINE_MUTEX(devreq_mutex);
-
/*
* Blocks non-priority requests from being queued for servicing. This
* could be used for protecting the request list data structure, but
* for now it's only being used to stall the op addition to the request
* list
*/
-DEFINE_MUTEX(request_mutex);
+DEFINE_MUTEX(orangefs_request_mutex);
/* hash table for storing operations waiting for matching downcall */
-struct list_head *htable_ops_in_progress;
-DEFINE_SPINLOCK(htable_ops_in_progress_lock);
+struct list_head *orangefs_htable_ops_in_progress;
+DEFINE_SPINLOCK(orangefs_htable_ops_in_progress_lock);
/* list for queueing upcall operations */
LIST_HEAD(orangefs_request_list);
@@ -100,32 +80,6 @@ static int __init orangefs_init(void)
int ret = -1;
__u32 i = 0;
- /* convert input debug mask to a 64-bit unsigned integer */
- gossip_debug_mask = (unsigned long long) module_parm_debug_mask;
-
- /*
- * set the kernel's gossip debug string; invalid mask values will
- * be ignored.
- */
- debug_mask_to_string(&gossip_debug_mask, 0);
-
- /* remove any invalid values from the mask */
- debug_string_to_mask(kernel_debug_string, &gossip_debug_mask, 0);
-
- /*
- * if the mask has a non-zero value, then indicate that the mask
- * was set when the kernel module was loaded. The orangefs dev ioctl
- * command will look at this boolean to determine if the kernel's
- * debug mask should be overwritten when the client-core is started.
- */
- if (gossip_debug_mask != 0)
- kernel_mask_set_mod_init = true;
-
- pr_info("%s: called with debug mask: :%s: :%llx:\n",
- __func__,
- kernel_debug_string,
- (unsigned long long)gossip_debug_mask);
-
ret = bdi_init(&orangefs_backing_dev_info);
if (ret)
@@ -146,9 +100,9 @@ static int __init orangefs_init(void)
if (ret < 0)
goto cleanup_op;
- htable_ops_in_progress =
+ orangefs_htable_ops_in_progress =
kcalloc(hash_table_size, sizeof(struct list_head), GFP_KERNEL);
- if (!htable_ops_in_progress) {
+ if (!orangefs_htable_ops_in_progress) {
gossip_err("Failed to initialize op hashtable");
ret = -ENOMEM;
goto cleanup_inode;
@@ -156,7 +110,7 @@ static int __init orangefs_init(void)
/* initialize a doubly linked at each hash table index */
for (i = 0; i < hash_table_size; i++)
- INIT_LIST_HEAD(&htable_ops_in_progress[i]);
+ INIT_LIST_HEAD(&orangefs_htable_ops_in_progress[i]);
ret = fsid_key_table_initialize();
if (ret < 0)
@@ -179,14 +133,10 @@ static int __init orangefs_init(void)
if (ret)
goto cleanup_key_table;
- ret = orangefs_debugfs_init();
+ ret = orangefs_debugfs_init(module_parm_debug_mask);
if (ret)
goto debugfs_init_failed;
- ret = orangefs_kernel_debug_init();
- if (ret)
- goto kernel_debug_init_failed;
-
ret = orangefs_sysfs_init();
if (ret)
goto sysfs_init_failed;
@@ -214,8 +164,6 @@ cleanup_device:
sysfs_init_failed:
-kernel_debug_init_failed:
-
debugfs_init_failed:
orangefs_debugfs_cleanup();
@@ -223,7 +171,7 @@ cleanup_key_table:
fsid_key_table_finalize();
cleanup_progress_table:
- kfree(htable_ops_in_progress);
+ kfree(orangefs_htable_ops_in_progress);
cleanup_inode:
orangefs_inode_cache_finalize();
@@ -250,12 +198,12 @@ static void __exit orangefs_exit(void)
orangefs_dev_cleanup();
BUG_ON(!list_empty(&orangefs_request_list));
for (i = 0; i < hash_table_size; i++)
- BUG_ON(!list_empty(&htable_ops_in_progress[i]));
+ BUG_ON(!list_empty(&orangefs_htable_ops_in_progress[i]));
orangefs_inode_cache_finalize();
op_cache_finalize();
- kfree(htable_ops_in_progress);
+ kfree(orangefs_htable_ops_in_progress);
bdi_destroy(&orangefs_backing_dev_info);
@@ -274,10 +222,10 @@ void purge_inprogress_ops(void)
struct orangefs_kernel_op_s *op;
struct orangefs_kernel_op_s *next;
- spin_lock(&htable_ops_in_progress_lock);
+ spin_lock(&orangefs_htable_ops_in_progress_lock);
list_for_each_entry_safe(op,
next,
- &htable_ops_in_progress[i],
+ &orangefs_htable_ops_in_progress[i],
list) {
set_op_state_purged(op);
gossip_debug(GOSSIP_DEV_DEBUG,
@@ -287,7 +235,7 @@ void purge_inprogress_ops(void)
op->op_state,
current->comm);
}
- spin_unlock(&htable_ops_in_progress_lock);
+ spin_unlock(&orangefs_htable_ops_in_progress_lock);
}
}
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
index 375708c2db87..a799546a67f7 100644
--- a/fs/orangefs/orangefs-sysfs.c
+++ b/fs/orangefs/orangefs-sysfs.c
@@ -73,6 +73,24 @@
* Description:
* Time getattr is valid in milliseconds.
*
+ * What: /sys/fs/orangefs/readahead_count
+ * Date: Aug 2016
+ * Contact: Martin Brandenburg <martin@omnibond.com>
+ * Description:
+ * Readahead cache buffer count.
+ *
+ * What: /sys/fs/orangefs/readahead_size
+ * Date: Aug 2016
+ * Contact: Martin Brandenburg <martin@omnibond.com>
+ * Description:
+ * Readahead cache buffer size.
+ *
+ * What: /sys/fs/orangefs/readahead_count_size
+ * Date: Aug 2016
+ * Contact: Martin Brandenburg <martin@omnibond.com>
+ * Description:
+ * Readahead cache buffer count and size.
+ *
* What: /sys/fs/orangefs/acache/...
* Date: Jun 2015
* Contact: Martin Brandenburg <martin@omnibond.com>
@@ -121,159 +139,34 @@
#define PC_KOBJ_ID "pc"
#define STATS_KOBJ_ID "stats"
-struct orangefs_obj {
- struct kobject kobj;
- int op_timeout_secs;
- int perf_counter_reset;
- int perf_history_size;
- int perf_time_interval_secs;
- int slot_timeout_secs;
- int dcache_timeout_msecs;
- int getattr_timeout_msecs;
-};
-
-struct acache_orangefs_obj {
- struct kobject kobj;
- int hard_limit;
- int reclaim_percentage;
- int soft_limit;
- int timeout_msecs;
-};
-
-struct capcache_orangefs_obj {
- struct kobject kobj;
- int hard_limit;
- int reclaim_percentage;
- int soft_limit;
- int timeout_secs;
-};
-
-struct ccache_orangefs_obj {
- struct kobject kobj;
- int hard_limit;
- int reclaim_percentage;
- int soft_limit;
- int timeout_secs;
-};
-
-struct ncache_orangefs_obj {
- struct kobject kobj;
- int hard_limit;
- int reclaim_percentage;
- int soft_limit;
- int timeout_msecs;
-};
-
-struct pc_orangefs_obj {
- struct kobject kobj;
- char *acache;
- char *capcache;
- char *ncache;
-};
-
-struct stats_orangefs_obj {
- struct kobject kobj;
- int reads;
- int writes;
-};
+/*
+ * Every item calls orangefs_attr_show and orangefs_attr_store through
+ * orangefs_sysfs_ops. They look at the orangefs_attributes further below to
+ * call one of sysfs_int_show, sysfs_int_store, sysfs_service_op_show, or
+ * sysfs_service_op_store.
+ */
struct orangefs_attribute {
struct attribute attr;
- ssize_t (*show)(struct orangefs_obj *orangefs_obj,
+ ssize_t (*show)(struct kobject *kobj,
struct orangefs_attribute *attr,
char *buf);
- ssize_t (*store)(struct orangefs_obj *orangefs_obj,
+ ssize_t (*store)(struct kobject *kobj,
struct orangefs_attribute *attr,
const char *buf,
size_t count);
};
-struct acache_orangefs_attribute {
- struct attribute attr;
- ssize_t (*show)(struct acache_orangefs_obj *acache_orangefs_obj,
- struct acache_orangefs_attribute *attr,
- char *buf);
- ssize_t (*store)(struct acache_orangefs_obj *acache_orangefs_obj,
- struct acache_orangefs_attribute *attr,
- const char *buf,
- size_t count);
-};
-
-struct capcache_orangefs_attribute {
- struct attribute attr;
- ssize_t (*show)(struct capcache_orangefs_obj *capcache_orangefs_obj,
- struct capcache_orangefs_attribute *attr,
- char *buf);
- ssize_t (*store)(struct capcache_orangefs_obj *capcache_orangefs_obj,
- struct capcache_orangefs_attribute *attr,
- const char *buf,
- size_t count);
-};
-
-struct ccache_orangefs_attribute {
- struct attribute attr;
- ssize_t (*show)(struct ccache_orangefs_obj *ccache_orangefs_obj,
- struct ccache_orangefs_attribute *attr,
- char *buf);
- ssize_t (*store)(struct ccache_orangefs_obj *ccache_orangefs_obj,
- struct ccache_orangefs_attribute *attr,
- const char *buf,
- size_t count);
-};
-
-struct ncache_orangefs_attribute {
- struct attribute attr;
- ssize_t (*show)(struct ncache_orangefs_obj *ncache_orangefs_obj,
- struct ncache_orangefs_attribute *attr,
- char *buf);
- ssize_t (*store)(struct ncache_orangefs_obj *ncache_orangefs_obj,
- struct ncache_orangefs_attribute *attr,
- const char *buf,
- size_t count);
-};
-
-struct pc_orangefs_attribute {
- struct attribute attr;
- ssize_t (*show)(struct pc_orangefs_obj *pc_orangefs_obj,
- struct pc_orangefs_attribute *attr,
- char *buf);
- ssize_t (*store)(struct pc_orangefs_obj *pc_orangefs_obj,
- struct pc_orangefs_attribute *attr,
- const char *buf,
- size_t count);
-};
-
-struct stats_orangefs_attribute {
- struct attribute attr;
- ssize_t (*show)(struct stats_orangefs_obj *stats_orangefs_obj,
- struct stats_orangefs_attribute *attr,
- char *buf);
- ssize_t (*store)(struct stats_orangefs_obj *stats_orangefs_obj,
- struct stats_orangefs_attribute *attr,
- const char *buf,
- size_t count);
-};
-
static ssize_t orangefs_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct orangefs_attribute *attribute;
- struct orangefs_obj *orangefs_obj;
- int rc;
attribute = container_of(attr, struct orangefs_attribute, attr);
- orangefs_obj = container_of(kobj, struct orangefs_obj, kobj);
-
- if (!attribute->show) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->show(orangefs_obj, attribute, buf);
-
-out:
- return rc;
+ if (!attribute->show)
+ return -EIO;
+ return attribute->show(kobj, attribute, buf);
}
static ssize_t orangefs_attr_store(struct kobject *kobj,
@@ -282,24 +175,15 @@ static ssize_t orangefs_attr_store(struct kobject *kobj,
size_t len)
{
struct orangefs_attribute *attribute;
- struct orangefs_obj *orangefs_obj;
- int rc;
- gossip_debug(GOSSIP_SYSFS_DEBUG,
- "orangefs_attr_store: start\n");
+ if (!strcmp(kobj->name, PC_KOBJ_ID) ||
+ !strcmp(kobj->name, STATS_KOBJ_ID))
+ return -EPERM;
attribute = container_of(attr, struct orangefs_attribute, attr);
- orangefs_obj = container_of(kobj, struct orangefs_obj, kobj);
-
- if (!attribute->store) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->store(orangefs_obj, attribute, buf, len);
-
-out:
- return rc;
+ if (!attribute->store)
+ return -EIO;
+ return attribute->store(kobj, attribute, buf, len);
}
static const struct sysfs_ops orangefs_sysfs_ops = {
@@ -307,402 +191,58 @@ static const struct sysfs_ops orangefs_sysfs_ops = {
.store = orangefs_attr_store,
};
-static ssize_t acache_orangefs_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct acache_orangefs_attribute *attribute;
- struct acache_orangefs_obj *acache_orangefs_obj;
- int rc;
-
- attribute = container_of(attr, struct acache_orangefs_attribute, attr);
- acache_orangefs_obj =
- container_of(kobj, struct acache_orangefs_obj, kobj);
-
- if (!attribute->show) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->show(acache_orangefs_obj, attribute, buf);
-
-out:
- return rc;
-}
-
-static ssize_t acache_orangefs_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct acache_orangefs_attribute *attribute;
- struct acache_orangefs_obj *acache_orangefs_obj;
- int rc;
-
- gossip_debug(GOSSIP_SYSFS_DEBUG,
- "acache_orangefs_attr_store: start\n");
-
- attribute = container_of(attr, struct acache_orangefs_attribute, attr);
- acache_orangefs_obj =
- container_of(kobj, struct acache_orangefs_obj, kobj);
-
- if (!attribute->store) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->store(acache_orangefs_obj, attribute, buf, len);
-
-out:
- return rc;
-}
-
-static const struct sysfs_ops acache_orangefs_sysfs_ops = {
- .show = acache_orangefs_attr_show,
- .store = acache_orangefs_attr_store,
-};
-
-static ssize_t capcache_orangefs_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct capcache_orangefs_attribute *attribute;
- struct capcache_orangefs_obj *capcache_orangefs_obj;
- int rc;
-
- attribute =
- container_of(attr, struct capcache_orangefs_attribute, attr);
- capcache_orangefs_obj =
- container_of(kobj, struct capcache_orangefs_obj, kobj);
-
- if (!attribute->show) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->show(capcache_orangefs_obj, attribute, buf);
-
-out:
- return rc;
-}
-
-static ssize_t capcache_orangefs_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct capcache_orangefs_attribute *attribute;
- struct capcache_orangefs_obj *capcache_orangefs_obj;
- int rc;
-
- gossip_debug(GOSSIP_SYSFS_DEBUG,
- "capcache_orangefs_attr_store: start\n");
-
- attribute =
- container_of(attr, struct capcache_orangefs_attribute, attr);
- capcache_orangefs_obj =
- container_of(kobj, struct capcache_orangefs_obj, kobj);
-
- if (!attribute->store) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->store(capcache_orangefs_obj, attribute, buf, len);
-
-out:
- return rc;
-}
-
-static const struct sysfs_ops capcache_orangefs_sysfs_ops = {
- .show = capcache_orangefs_attr_show,
- .store = capcache_orangefs_attr_store,
-};
-
-static ssize_t ccache_orangefs_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ccache_orangefs_attribute *attribute;
- struct ccache_orangefs_obj *ccache_orangefs_obj;
- int rc;
-
- attribute =
- container_of(attr, struct ccache_orangefs_attribute, attr);
- ccache_orangefs_obj =
- container_of(kobj, struct ccache_orangefs_obj, kobj);
-
- if (!attribute->show) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->show(ccache_orangefs_obj, attribute, buf);
-
-out:
- return rc;
-}
-
-static ssize_t ccache_orangefs_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct ccache_orangefs_attribute *attribute;
- struct ccache_orangefs_obj *ccache_orangefs_obj;
- int rc;
-
- gossip_debug(GOSSIP_SYSFS_DEBUG,
- "ccache_orangefs_attr_store: start\n");
-
- attribute =
- container_of(attr, struct ccache_orangefs_attribute, attr);
- ccache_orangefs_obj =
- container_of(kobj, struct ccache_orangefs_obj, kobj);
-
- if (!attribute->store) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->store(ccache_orangefs_obj, attribute, buf, len);
-
-out:
- return rc;
-}
-
-static const struct sysfs_ops ccache_orangefs_sysfs_ops = {
- .show = ccache_orangefs_attr_show,
- .store = ccache_orangefs_attr_store,
-};
-
-static ssize_t ncache_orangefs_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct ncache_orangefs_attribute *attribute;
- struct ncache_orangefs_obj *ncache_orangefs_obj;
- int rc;
-
- attribute = container_of(attr, struct ncache_orangefs_attribute, attr);
- ncache_orangefs_obj =
- container_of(kobj, struct ncache_orangefs_obj, kobj);
-
- if (!attribute->show) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->show(ncache_orangefs_obj, attribute, buf);
-
-out:
- return rc;
-}
-
-static ssize_t ncache_orangefs_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t len)
-{
- struct ncache_orangefs_attribute *attribute;
- struct ncache_orangefs_obj *ncache_orangefs_obj;
- int rc;
-
- gossip_debug(GOSSIP_SYSFS_DEBUG,
- "ncache_orangefs_attr_store: start\n");
-
- attribute = container_of(attr, struct ncache_orangefs_attribute, attr);
- ncache_orangefs_obj =
- container_of(kobj, struct ncache_orangefs_obj, kobj);
-
- if (!attribute->store) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->store(ncache_orangefs_obj, attribute, buf, len);
-
-out:
- return rc;
-}
-
-static const struct sysfs_ops ncache_orangefs_sysfs_ops = {
- .show = ncache_orangefs_attr_show,
- .store = ncache_orangefs_attr_store,
-};
-
-static ssize_t pc_orangefs_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct pc_orangefs_attribute *attribute;
- struct pc_orangefs_obj *pc_orangefs_obj;
- int rc;
-
- attribute = container_of(attr, struct pc_orangefs_attribute, attr);
- pc_orangefs_obj =
- container_of(kobj, struct pc_orangefs_obj, kobj);
-
- if (!attribute->show) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->show(pc_orangefs_obj, attribute, buf);
-
-out:
- return rc;
-}
-
-static const struct sysfs_ops pc_orangefs_sysfs_ops = {
- .show = pc_orangefs_attr_show,
-};
-
-static ssize_t stats_orangefs_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct stats_orangefs_attribute *attribute;
- struct stats_orangefs_obj *stats_orangefs_obj;
- int rc;
-
- attribute = container_of(attr, struct stats_orangefs_attribute, attr);
- stats_orangefs_obj =
- container_of(kobj, struct stats_orangefs_obj, kobj);
-
- if (!attribute->show) {
- rc = -EIO;
- goto out;
- }
-
- rc = attribute->show(stats_orangefs_obj, attribute, buf);
-
-out:
- return rc;
-}
-
-static const struct sysfs_ops stats_orangefs_sysfs_ops = {
- .show = stats_orangefs_attr_show,
-};
-
-static void orangefs_release(struct kobject *kobj)
-{
- struct orangefs_obj *orangefs_obj;
-
- orangefs_obj = container_of(kobj, struct orangefs_obj, kobj);
- kfree(orangefs_obj);
-}
-
-static void acache_orangefs_release(struct kobject *kobj)
-{
- struct acache_orangefs_obj *acache_orangefs_obj;
-
- acache_orangefs_obj =
- container_of(kobj, struct acache_orangefs_obj, kobj);
- kfree(acache_orangefs_obj);
-}
-
-static void capcache_orangefs_release(struct kobject *kobj)
-{
- struct capcache_orangefs_obj *capcache_orangefs_obj;
-
- capcache_orangefs_obj =
- container_of(kobj, struct capcache_orangefs_obj, kobj);
- kfree(capcache_orangefs_obj);
-}
-
-static void ccache_orangefs_release(struct kobject *kobj)
-{
- struct ccache_orangefs_obj *ccache_orangefs_obj;
-
- ccache_orangefs_obj =
- container_of(kobj, struct ccache_orangefs_obj, kobj);
- kfree(ccache_orangefs_obj);
-}
-
-static void ncache_orangefs_release(struct kobject *kobj)
-{
- struct ncache_orangefs_obj *ncache_orangefs_obj;
-
- ncache_orangefs_obj =
- container_of(kobj, struct ncache_orangefs_obj, kobj);
- kfree(ncache_orangefs_obj);
-}
-
-static void pc_orangefs_release(struct kobject *kobj)
-{
- struct pc_orangefs_obj *pc_orangefs_obj;
-
- pc_orangefs_obj =
- container_of(kobj, struct pc_orangefs_obj, kobj);
- kfree(pc_orangefs_obj);
-}
-
-static void stats_orangefs_release(struct kobject *kobj)
-{
- struct stats_orangefs_obj *stats_orangefs_obj;
-
- stats_orangefs_obj =
- container_of(kobj, struct stats_orangefs_obj, kobj);
- kfree(stats_orangefs_obj);
-}
-
-static ssize_t sysfs_int_show(char *kobj_id, char *buf, void *attr)
+static ssize_t sysfs_int_show(struct kobject *kobj,
+ struct orangefs_attribute *attr, char *buf)
{
int rc = -EIO;
- struct orangefs_attribute *orangefs_attr;
- struct stats_orangefs_attribute *stats_orangefs_attr;
- gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_int_show: id:%s:\n", kobj_id);
+ gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_int_show: id:%s:\n",
+ kobj->name);
- if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) {
- orangefs_attr = (struct orangefs_attribute *)attr;
-
- if (!strcmp(orangefs_attr->attr.name, "op_timeout_secs")) {
+ if (!strcmp(kobj->name, ORANGEFS_KOBJ_ID)) {
+ if (!strcmp(attr->attr.name, "op_timeout_secs")) {
rc = scnprintf(buf,
PAGE_SIZE,
"%d\n",
op_timeout_secs);
goto out;
- } else if (!strcmp(orangefs_attr->attr.name,
+ } else if (!strcmp(attr->attr.name,
"slot_timeout_secs")) {
rc = scnprintf(buf,
PAGE_SIZE,
"%d\n",
slot_timeout_secs);
goto out;
- } else if (!strcmp(orangefs_attr->attr.name,
+ } else if (!strcmp(attr->attr.name,
"dcache_timeout_msecs")) {
rc = scnprintf(buf,
PAGE_SIZE,
"%d\n",
- dcache_timeout_msecs);
+ orangefs_dcache_timeout_msecs);
goto out;
- } else if (!strcmp(orangefs_attr->attr.name,
+ } else if (!strcmp(attr->attr.name,
"getattr_timeout_msecs")) {
rc = scnprintf(buf,
PAGE_SIZE,
"%d\n",
- getattr_timeout_msecs);
+ orangefs_getattr_timeout_msecs);
goto out;
} else {
goto out;
}
- } else if (!strcmp(kobj_id, STATS_KOBJ_ID)) {
- stats_orangefs_attr = (struct stats_orangefs_attribute *)attr;
-
- if (!strcmp(stats_orangefs_attr->attr.name, "reads")) {
+ } else if (!strcmp(kobj->name, STATS_KOBJ_ID)) {
+ if (!strcmp(attr->attr.name, "reads")) {
rc = scnprintf(buf,
PAGE_SIZE,
"%lu\n",
- g_orangefs_stats.reads);
+ orangefs_stats.reads);
goto out;
- } else if (!strcmp(stats_orangefs_attr->attr.name, "writes")) {
+ } else if (!strcmp(attr->attr.name, "writes")) {
rc = scnprintf(buf,
PAGE_SIZE,
"%lu\n",
- g_orangefs_stats.writes);
+ orangefs_stats.writes);
goto out;
} else {
goto out;
@@ -714,45 +254,13 @@ out:
return rc;
}
-static ssize_t int_orangefs_show(struct orangefs_obj *orangefs_obj,
- struct orangefs_attribute *attr,
- char *buf)
-{
- int rc;
-
- gossip_debug(GOSSIP_SYSFS_DEBUG,
- "int_orangefs_show:start attr->attr.name:%s:\n",
- attr->attr.name);
-
- rc = sysfs_int_show(ORANGEFS_KOBJ_ID, buf, (void *) attr);
-
- return rc;
-}
-
-static ssize_t int_stats_show(struct stats_orangefs_obj *stats_orangefs_obj,
- struct stats_orangefs_attribute *attr,
- char *buf)
-{
- int rc;
-
- gossip_debug(GOSSIP_SYSFS_DEBUG,
- "int_stats_show:start attr->attr.name:%s:\n",
- attr->attr.name);
-
- rc = sysfs_int_show(STATS_KOBJ_ID, buf, (void *) attr);
-
- return rc;
-}
-
-static ssize_t int_store(struct orangefs_obj *orangefs_obj,
- struct orangefs_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t sysfs_int_store(struct kobject *kobj,
+ struct orangefs_attribute *attr, const char *buf, size_t count)
{
int rc = 0;
gossip_debug(GOSSIP_SYSFS_DEBUG,
- "int_store: start attr->attr.name:%s: buf:%s:\n",
+ "sysfs_int_store: start attr->attr.name:%s: buf:%s:\n",
attr->attr.name, buf);
if (!strcmp(attr->attr.name, "op_timeout_secs")) {
@@ -762,10 +270,10 @@ static ssize_t int_store(struct orangefs_obj *orangefs_obj,
rc = kstrtoint(buf, 0, &slot_timeout_secs);
goto out;
} else if (!strcmp(attr->attr.name, "dcache_timeout_msecs")) {
- rc = kstrtoint(buf, 0, &dcache_timeout_msecs);
+ rc = kstrtoint(buf, 0, &orangefs_dcache_timeout_msecs);
goto out;
} else if (!strcmp(attr->attr.name, "getattr_timeout_msecs")) {
- rc = kstrtoint(buf, 0, &getattr_timeout_msecs);
+ rc = kstrtoint(buf, 0, &orangefs_getattr_timeout_msecs);
goto out;
} else {
goto out;
@@ -783,24 +291,19 @@ out:
/*
* obtain attribute values from userspace with a service operation.
*/
-static int sysfs_service_op_show(char *kobj_id, char *buf, void *attr)
+static ssize_t sysfs_service_op_show(struct kobject *kobj,
+ struct orangefs_attribute *attr, char *buf)
{
struct orangefs_kernel_op_s *new_op = NULL;
int rc = 0;
char *ser_op_type = NULL;
- struct orangefs_attribute *orangefs_attr;
- struct acache_orangefs_attribute *acache_attr;
- struct capcache_orangefs_attribute *capcache_attr;
- struct ccache_orangefs_attribute *ccache_attr;
- struct ncache_orangefs_attribute *ncache_attr;
- struct pc_orangefs_attribute *pc_attr;
__u32 op_alloc_type;
gossip_debug(GOSSIP_SYSFS_DEBUG,
"sysfs_service_op_show: id:%s:\n",
- kobj_id);
+ kobj->name);
- if (strcmp(kobj_id, PC_KOBJ_ID))
+ if (strcmp(kobj->name, PC_KOBJ_ID))
op_alloc_type = ORANGEFS_VFS_OP_PARAM;
else
op_alloc_type = ORANGEFS_VFS_OP_PERF_COUNT;
@@ -818,124 +321,135 @@ static int sysfs_service_op_show(char *kobj_id, char *buf, void *attr)
goto out;
}
- if (strcmp(kobj_id, PC_KOBJ_ID))
+ if (strcmp(kobj->name, PC_KOBJ_ID))
new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_GET;
- if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) {
- orangefs_attr = (struct orangefs_attribute *)attr;
+ if (!strcmp(kobj->name, ORANGEFS_KOBJ_ID)) {
+ /* Drop unsupported requests first. */
+ if (!(orangefs_features & ORANGEFS_FEATURE_READAHEAD) &&
+ (!strcmp(attr->attr.name, "readahead_count") ||
+ !strcmp(attr->attr.name, "readahead_size") ||
+ !strcmp(attr->attr.name, "readahead_count_size"))) {
+ rc = -EINVAL;
+ goto out;
+ }
- if (!strcmp(orangefs_attr->attr.name, "perf_history_size"))
+ if (!strcmp(attr->attr.name, "perf_history_size"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE;
- else if (!strcmp(orangefs_attr->attr.name,
+ else if (!strcmp(attr->attr.name,
"perf_time_interval_secs"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS;
- else if (!strcmp(orangefs_attr->attr.name,
+ else if (!strcmp(attr->attr.name,
"perf_counter_reset"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_PERF_RESET;
- } else if (!strcmp(kobj_id, ACACHE_KOBJ_ID)) {
- acache_attr = (struct acache_orangefs_attribute *)attr;
+ else if (!strcmp(attr->attr.name,
+ "readahead_count"))
+ new_op->upcall.req.param.op =
+ ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT;
+
+ else if (!strcmp(attr->attr.name,
+ "readahead_size"))
+ new_op->upcall.req.param.op =
+ ORANGEFS_PARAM_REQUEST_OP_READAHEAD_SIZE;
- if (!strcmp(acache_attr->attr.name, "timeout_msecs"))
+ else if (!strcmp(attr->attr.name,
+ "readahead_count_size"))
+ new_op->upcall.req.param.op =
+ ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE;
+ } else if (!strcmp(kobj->name, ACACHE_KOBJ_ID)) {
+ if (!strcmp(attr->attr.name, "timeout_msecs"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS;
- if (!strcmp(acache_attr->attr.name, "hard_limit"))
+ if (!strcmp(attr->attr.name, "hard_limit"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT;
- if (!strcmp(acache_attr->attr.name, "soft_limit"))
+ if (!strcmp(attr->attr.name, "soft_limit"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT;
- if (!strcmp(acache_attr->attr.name, "reclaim_percentage"))
+ if (!strcmp(attr->attr.name, "reclaim_percentage"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE;
- } else if (!strcmp(kobj_id, CAPCACHE_KOBJ_ID)) {
- capcache_attr = (struct capcache_orangefs_attribute *)attr;
-
- if (!strcmp(capcache_attr->attr.name, "timeout_secs"))
+ } else if (!strcmp(kobj->name, CAPCACHE_KOBJ_ID)) {
+ if (!strcmp(attr->attr.name, "timeout_secs"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS;
- if (!strcmp(capcache_attr->attr.name, "hard_limit"))
+ if (!strcmp(attr->attr.name, "hard_limit"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT;
- if (!strcmp(capcache_attr->attr.name, "soft_limit"))
+ if (!strcmp(attr->attr.name, "soft_limit"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT;
- if (!strcmp(capcache_attr->attr.name, "reclaim_percentage"))
+ if (!strcmp(attr->attr.name, "reclaim_percentage"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE;
- } else if (!strcmp(kobj_id, CCACHE_KOBJ_ID)) {
- ccache_attr = (struct ccache_orangefs_attribute *)attr;
-
- if (!strcmp(ccache_attr->attr.name, "timeout_secs"))
+ } else if (!strcmp(kobj->name, CCACHE_KOBJ_ID)) {
+ if (!strcmp(attr->attr.name, "timeout_secs"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS;
- if (!strcmp(ccache_attr->attr.name, "hard_limit"))
+ if (!strcmp(attr->attr.name, "hard_limit"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT;
- if (!strcmp(ccache_attr->attr.name, "soft_limit"))
+ if (!strcmp(attr->attr.name, "soft_limit"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT;
- if (!strcmp(ccache_attr->attr.name, "reclaim_percentage"))
+ if (!strcmp(attr->attr.name, "reclaim_percentage"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE;
- } else if (!strcmp(kobj_id, NCACHE_KOBJ_ID)) {
- ncache_attr = (struct ncache_orangefs_attribute *)attr;
-
- if (!strcmp(ncache_attr->attr.name, "timeout_msecs"))
+ } else if (!strcmp(kobj->name, NCACHE_KOBJ_ID)) {
+ if (!strcmp(attr->attr.name, "timeout_msecs"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS;
- if (!strcmp(ncache_attr->attr.name, "hard_limit"))
+ if (!strcmp(attr->attr.name, "hard_limit"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT;
- if (!strcmp(ncache_attr->attr.name, "soft_limit"))
+ if (!strcmp(attr->attr.name, "soft_limit"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT;
- if (!strcmp(ncache_attr->attr.name, "reclaim_percentage"))
+ if (!strcmp(attr->attr.name, "reclaim_percentage"))
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE;
- } else if (!strcmp(kobj_id, PC_KOBJ_ID)) {
- pc_attr = (struct pc_orangefs_attribute *)attr;
-
- if (!strcmp(pc_attr->attr.name, ACACHE_KOBJ_ID))
+ } else if (!strcmp(kobj->name, PC_KOBJ_ID)) {
+ if (!strcmp(attr->attr.name, ACACHE_KOBJ_ID))
new_op->upcall.req.perf_count.type =
ORANGEFS_PERF_COUNT_REQUEST_ACACHE;
- if (!strcmp(pc_attr->attr.name, CAPCACHE_KOBJ_ID))
+ if (!strcmp(attr->attr.name, CAPCACHE_KOBJ_ID))
new_op->upcall.req.perf_count.type =
ORANGEFS_PERF_COUNT_REQUEST_CAPCACHE;
- if (!strcmp(pc_attr->attr.name, NCACHE_KOBJ_ID))
+ if (!strcmp(attr->attr.name, NCACHE_KOBJ_ID))
new_op->upcall.req.perf_count.type =
ORANGEFS_PERF_COUNT_REQUEST_NCACHE;
} else {
gossip_err("sysfs_service_op_show: unknown kobj_id:%s:\n",
- kobj_id);
+ kobj->name);
rc = -EINVAL;
goto out;
}
- if (strcmp(kobj_id, PC_KOBJ_ID))
+ if (strcmp(kobj->name, PC_KOBJ_ID))
ser_op_type = "orangefs_param";
else
ser_op_type = "orangefs_perf_count";
@@ -948,11 +462,18 @@ static int sysfs_service_op_show(char *kobj_id, char *buf, void *attr)
out:
if (!rc) {
- if (strcmp(kobj_id, PC_KOBJ_ID)) {
- rc = scnprintf(buf,
- PAGE_SIZE,
- "%d\n",
- (int)new_op->downcall.resp.param.value);
+ if (strcmp(kobj->name, PC_KOBJ_ID)) {
+ if (new_op->upcall.req.param.op ==
+ ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE) {
+ rc = scnprintf(buf, PAGE_SIZE, "%d %d\n",
+ (int)new_op->downcall.resp.param.u.
+ value32[0],
+ (int)new_op->downcall.resp.param.u.
+ value32[1]);
+ } else {
+ rc = scnprintf(buf, PAGE_SIZE, "%d\n",
+ (int)new_op->downcall.resp.param.u.value64);
+ }
} else {
rc = scnprintf(
buf,
@@ -968,77 +489,6 @@ out:
}
-static ssize_t service_orangefs_show(struct orangefs_obj *orangefs_obj,
- struct orangefs_attribute *attr,
- char *buf)
-{
- int rc = 0;
-
- rc = sysfs_service_op_show(ORANGEFS_KOBJ_ID, buf, (void *)attr);
-
- return rc;
-}
-
-static ssize_t
- service_acache_show(struct acache_orangefs_obj *acache_orangefs_obj,
- struct acache_orangefs_attribute *attr,
- char *buf)
-{
- int rc = 0;
-
- rc = sysfs_service_op_show(ACACHE_KOBJ_ID, buf, (void *)attr);
-
- return rc;
-}
-
-static ssize_t service_capcache_show(struct capcache_orangefs_obj
- *capcache_orangefs_obj,
- struct capcache_orangefs_attribute *attr,
- char *buf)
-{
- int rc = 0;
-
- rc = sysfs_service_op_show(CAPCACHE_KOBJ_ID, buf, (void *)attr);
-
- return rc;
-}
-
-static ssize_t service_ccache_show(struct ccache_orangefs_obj
- *ccache_orangefs_obj,
- struct ccache_orangefs_attribute *attr,
- char *buf)
-{
- int rc = 0;
-
- rc = sysfs_service_op_show(CCACHE_KOBJ_ID, buf, (void *)attr);
-
- return rc;
-}
-
-static ssize_t
- service_ncache_show(struct ncache_orangefs_obj *ncache_orangefs_obj,
- struct ncache_orangefs_attribute *attr,
- char *buf)
-{
- int rc = 0;
-
- rc = sysfs_service_op_show(NCACHE_KOBJ_ID, buf, (void *)attr);
-
- return rc;
-}
-
-static ssize_t
- service_pc_show(struct pc_orangefs_obj *pc_orangefs_obj,
- struct pc_orangefs_attribute *attr,
- char *buf)
-{
- int rc = 0;
-
- rc = sysfs_service_op_show(PC_KOBJ_ID, buf, (void *)attr);
-
- return rc;
-}
-
/*
* pass attribute values back to userspace with a service operation.
*
@@ -1050,20 +500,16 @@ static ssize_t
* We want to return 1 if we think everything went OK, and
* EINVAL if not.
*/
-static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
+static ssize_t sysfs_service_op_store(struct kobject *kobj,
+ struct orangefs_attribute *attr, const char *buf, size_t count)
{
struct orangefs_kernel_op_s *new_op = NULL;
int val = 0;
int rc = 0;
- struct orangefs_attribute *orangefs_attr;
- struct acache_orangefs_attribute *acache_attr;
- struct capcache_orangefs_attribute *capcache_attr;
- struct ccache_orangefs_attribute *ccache_attr;
- struct ncache_orangefs_attribute *ncache_attr;
gossip_debug(GOSSIP_SYSFS_DEBUG,
"sysfs_service_op_store: id:%s:\n",
- kobj_id);
+ kobj->name);
new_op = op_alloc(ORANGEFS_VFS_OP_PARAM);
if (!new_op)
@@ -1079,16 +525,29 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
}
/*
- * The value we want to send back to userspace is in buf.
+ * The value we want to send back to userspace is in buf, unless this
+ * there are two parameters, which is specially handled below.
*/
- rc = kstrtoint(buf, 0, &val);
- if (rc)
- goto out;
+ if (strcmp(kobj->name, ORANGEFS_KOBJ_ID) ||
+ strcmp(attr->attr.name, "readahead_count_size")) {
+ rc = kstrtoint(buf, 0, &val);
+ if (rc)
+ goto out;
+ }
- if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) {
- orangefs_attr = (struct orangefs_attribute *)attr;
+ new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET;
+
+ if (!strcmp(kobj->name, ORANGEFS_KOBJ_ID)) {
+ /* Drop unsupported requests first. */
+ if (!(orangefs_features & ORANGEFS_FEATURE_READAHEAD) &&
+ (!strcmp(attr->attr.name, "readahead_count") ||
+ !strcmp(attr->attr.name, "readahead_size") ||
+ !strcmp(attr->attr.name, "readahead_count_size"))) {
+ rc = -EINVAL;
+ goto out;
+ }
- if (!strcmp(orangefs_attr->attr.name, "perf_history_size")) {
+ if (!strcmp(attr->attr.name, "perf_history_size")) {
if (val > 0) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE;
@@ -1096,7 +555,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(orangefs_attr->attr.name,
+ } else if (!strcmp(attr->attr.name,
"perf_time_interval_secs")) {
if (val > 0) {
new_op->upcall.req.param.op =
@@ -1105,7 +564,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(orangefs_attr->attr.name,
+ } else if (!strcmp(attr->attr.name,
"perf_counter_reset")) {
if ((val == 0) || (val == 1)) {
new_op->upcall.req.param.op =
@@ -1114,12 +573,55 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
+ } else if (!strcmp(attr->attr.name,
+ "readahead_count")) {
+ if ((val >= 0)) {
+ new_op->upcall.req.param.op =
+ ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT;
+ } else {
+ rc = 0;
+ goto out;
+ }
+ } else if (!strcmp(attr->attr.name,
+ "readahead_size")) {
+ if ((val >= 0)) {
+ new_op->upcall.req.param.op =
+ ORANGEFS_PARAM_REQUEST_OP_READAHEAD_SIZE;
+ } else {
+ rc = 0;
+ goto out;
+ }
+ } else if (!strcmp(attr->attr.name,
+ "readahead_count_size")) {
+ int val1, val2;
+ rc = sscanf(buf, "%d %d", &val1, &val2);
+ if (rc < 2) {
+ rc = 0;
+ goto out;
+ }
+ if ((val1 >= 0) && (val2 >= 0)) {
+ new_op->upcall.req.param.op =
+ ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE;
+ } else {
+ rc = 0;
+ goto out;
+ }
+ new_op->upcall.req.param.u.value32[0] = val1;
+ new_op->upcall.req.param.u.value32[1] = val2;
+ goto value_set;
+ } else if (!strcmp(attr->attr.name,
+ "perf_counter_reset")) {
+ if ((val > 0)) {
+ new_op->upcall.req.param.op =
+ ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE;
+ } else {
+ rc = 0;
+ goto out;
+ }
}
- } else if (!strcmp(kobj_id, ACACHE_KOBJ_ID)) {
- acache_attr = (struct acache_orangefs_attribute *)attr;
-
- if (!strcmp(acache_attr->attr.name, "hard_limit")) {
+ } else if (!strcmp(kobj->name, ACACHE_KOBJ_ID)) {
+ if (!strcmp(attr->attr.name, "hard_limit")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT;
@@ -1127,7 +629,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(acache_attr->attr.name, "soft_limit")) {
+ } else if (!strcmp(attr->attr.name, "soft_limit")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT;
@@ -1135,7 +637,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(acache_attr->attr.name,
+ } else if (!strcmp(attr->attr.name,
"reclaim_percentage")) {
if ((val > -1) && (val < 101)) {
new_op->upcall.req.param.op =
@@ -1144,7 +646,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(acache_attr->attr.name, "timeout_msecs")) {
+ } else if (!strcmp(attr->attr.name, "timeout_msecs")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS;
@@ -1154,10 +656,8 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
}
}
- } else if (!strcmp(kobj_id, CAPCACHE_KOBJ_ID)) {
- capcache_attr = (struct capcache_orangefs_attribute *)attr;
-
- if (!strcmp(capcache_attr->attr.name, "hard_limit")) {
+ } else if (!strcmp(kobj->name, CAPCACHE_KOBJ_ID)) {
+ if (!strcmp(attr->attr.name, "hard_limit")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT;
@@ -1165,7 +665,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(capcache_attr->attr.name, "soft_limit")) {
+ } else if (!strcmp(attr->attr.name, "soft_limit")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT;
@@ -1173,7 +673,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(capcache_attr->attr.name,
+ } else if (!strcmp(attr->attr.name,
"reclaim_percentage")) {
if ((val > -1) && (val < 101)) {
new_op->upcall.req.param.op =
@@ -1182,7 +682,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(capcache_attr->attr.name, "timeout_secs")) {
+ } else if (!strcmp(attr->attr.name, "timeout_secs")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS;
@@ -1192,10 +692,8 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
}
}
- } else if (!strcmp(kobj_id, CCACHE_KOBJ_ID)) {
- ccache_attr = (struct ccache_orangefs_attribute *)attr;
-
- if (!strcmp(ccache_attr->attr.name, "hard_limit")) {
+ } else if (!strcmp(kobj->name, CCACHE_KOBJ_ID)) {
+ if (!strcmp(attr->attr.name, "hard_limit")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT;
@@ -1203,7 +701,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(ccache_attr->attr.name, "soft_limit")) {
+ } else if (!strcmp(attr->attr.name, "soft_limit")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT;
@@ -1211,7 +709,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(ccache_attr->attr.name,
+ } else if (!strcmp(attr->attr.name,
"reclaim_percentage")) {
if ((val > -1) && (val < 101)) {
new_op->upcall.req.param.op =
@@ -1220,7 +718,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(ccache_attr->attr.name, "timeout_secs")) {
+ } else if (!strcmp(attr->attr.name, "timeout_secs")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS;
@@ -1230,10 +728,8 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
}
}
- } else if (!strcmp(kobj_id, NCACHE_KOBJ_ID)) {
- ncache_attr = (struct ncache_orangefs_attribute *)attr;
-
- if (!strcmp(ncache_attr->attr.name, "hard_limit")) {
+ } else if (!strcmp(kobj->name, NCACHE_KOBJ_ID)) {
+ if (!strcmp(attr->attr.name, "hard_limit")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT;
@@ -1241,7 +737,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(ncache_attr->attr.name, "soft_limit")) {
+ } else if (!strcmp(attr->attr.name, "soft_limit")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT;
@@ -1249,7 +745,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(ncache_attr->attr.name,
+ } else if (!strcmp(attr->attr.name,
"reclaim_percentage")) {
if ((val > -1) && (val < 101)) {
new_op->upcall.req.param.op =
@@ -1258,7 +754,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc = 0;
goto out;
}
- } else if (!strcmp(ncache_attr->attr.name, "timeout_msecs")) {
+ } else if (!strcmp(attr->attr.name, "timeout_msecs")) {
if (val > -1) {
new_op->upcall.req.param.op =
ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS;
@@ -1270,14 +766,13 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
} else {
gossip_err("sysfs_service_op_store: unknown kobj_id:%s:\n",
- kobj_id);
+ kobj->name);
rc = -EINVAL;
goto out;
}
- new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET;
-
- new_op->upcall.req.param.value = val;
+ new_op->upcall.req.param.u.value64 = val;
+value_set:
/*
* The service_operation will return a errno return code on
@@ -1290,7 +785,7 @@ static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
rc);
rc = 0;
} else {
- rc = 1;
+ rc = count;
}
out:
@@ -1302,127 +797,56 @@ out:
return rc;
}
-static ssize_t
- service_orangefs_store(struct orangefs_obj *orangefs_obj,
- struct orangefs_attribute *attr,
- const char *buf,
- size_t count)
-{
- int rc = 0;
-
- rc = sysfs_service_op_store(ORANGEFS_KOBJ_ID, buf, (void *) attr);
-
- /* rc should have an errno value if the service_op went bad. */
- if (rc == 1)
- rc = count;
-
- return rc;
-}
-
-static ssize_t
- service_acache_store(struct acache_orangefs_obj *acache_orangefs_obj,
- struct acache_orangefs_attribute *attr,
- const char *buf,
- size_t count)
-{
- int rc = 0;
-
- rc = sysfs_service_op_store(ACACHE_KOBJ_ID, buf, (void *) attr);
-
- /* rc should have an errno value if the service_op went bad. */
- if (rc == 1)
- rc = count;
-
- return rc;
-}
-
-static ssize_t
- service_capcache_store(struct capcache_orangefs_obj
- *capcache_orangefs_obj,
- struct capcache_orangefs_attribute *attr,
- const char *buf,
- size_t count)
-{
- int rc = 0;
-
- rc = sysfs_service_op_store(CAPCACHE_KOBJ_ID, buf, (void *) attr);
-
- /* rc should have an errno value if the service_op went bad. */
- if (rc == 1)
- rc = count;
-
- return rc;
-}
-
-static ssize_t service_ccache_store(struct ccache_orangefs_obj
- *ccache_orangefs_obj,
- struct ccache_orangefs_attribute *attr,
- const char *buf,
- size_t count)
-{
- int rc = 0;
-
- rc = sysfs_service_op_store(CCACHE_KOBJ_ID, buf, (void *) attr);
-
- /* rc should have an errno value if the service_op went bad. */
- if (rc == 1)
- rc = count;
-
- return rc;
-}
-
-static ssize_t
- service_ncache_store(struct ncache_orangefs_obj *ncache_orangefs_obj,
- struct ncache_orangefs_attribute *attr,
- const char *buf,
- size_t count)
-{
- int rc = 0;
-
- rc = sysfs_service_op_store(NCACHE_KOBJ_ID, buf, (void *) attr);
-
- /* rc should have an errno value if the service_op went bad. */
- if (rc == 1)
- rc = count;
-
- return rc;
-}
-
static struct orangefs_attribute op_timeout_secs_attribute =
- __ATTR(op_timeout_secs, 0664, int_orangefs_show, int_store);
+ __ATTR(op_timeout_secs, 0664, sysfs_int_show, sysfs_int_store);
static struct orangefs_attribute slot_timeout_secs_attribute =
- __ATTR(slot_timeout_secs, 0664, int_orangefs_show, int_store);
+ __ATTR(slot_timeout_secs, 0664, sysfs_int_show, sysfs_int_store);
static struct orangefs_attribute dcache_timeout_msecs_attribute =
- __ATTR(dcache_timeout_msecs, 0664, int_orangefs_show, int_store);
+ __ATTR(dcache_timeout_msecs, 0664, sysfs_int_show, sysfs_int_store);
static struct orangefs_attribute getattr_timeout_msecs_attribute =
- __ATTR(getattr_timeout_msecs, 0664, int_orangefs_show, int_store);
+ __ATTR(getattr_timeout_msecs, 0664, sysfs_int_show, sysfs_int_store);
+
+static struct orangefs_attribute readahead_count_attribute =
+ __ATTR(readahead_count, 0664, sysfs_service_op_show,
+ sysfs_service_op_store);
+
+static struct orangefs_attribute readahead_size_attribute =
+ __ATTR(readahead_size, 0664, sysfs_service_op_show,
+ sysfs_service_op_store);
+
+static struct orangefs_attribute readahead_count_size_attribute =
+ __ATTR(readahead_count_size, 0664, sysfs_service_op_show,
+ sysfs_service_op_store);
static struct orangefs_attribute perf_counter_reset_attribute =
__ATTR(perf_counter_reset,
0664,
- service_orangefs_show,
- service_orangefs_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
static struct orangefs_attribute perf_history_size_attribute =
__ATTR(perf_history_size,
0664,
- service_orangefs_show,
- service_orangefs_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
static struct orangefs_attribute perf_time_interval_secs_attribute =
__ATTR(perf_time_interval_secs,
0664,
- service_orangefs_show,
- service_orangefs_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
static struct attribute *orangefs_default_attrs[] = {
&op_timeout_secs_attribute.attr,
&slot_timeout_secs_attribute.attr,
&dcache_timeout_msecs_attribute.attr,
&getattr_timeout_msecs_attribute.attr,
+ &readahead_count_attribute.attr,
+ &readahead_size_attribute.attr,
+ &readahead_count_size_attribute.attr,
&perf_counter_reset_attribute.attr,
&perf_history_size_attribute.attr,
&perf_time_interval_secs_attribute.attr,
@@ -1431,33 +855,32 @@ static struct attribute *orangefs_default_attrs[] = {
static struct kobj_type orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
- .release = orangefs_release,
.default_attrs = orangefs_default_attrs,
};
-static struct acache_orangefs_attribute acache_hard_limit_attribute =
+static struct orangefs_attribute acache_hard_limit_attribute =
__ATTR(hard_limit,
0664,
- service_acache_show,
- service_acache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct acache_orangefs_attribute acache_reclaim_percent_attribute =
+static struct orangefs_attribute acache_reclaim_percent_attribute =
__ATTR(reclaim_percentage,
0664,
- service_acache_show,
- service_acache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct acache_orangefs_attribute acache_soft_limit_attribute =
+static struct orangefs_attribute acache_soft_limit_attribute =
__ATTR(soft_limit,
0664,
- service_acache_show,
- service_acache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct acache_orangefs_attribute acache_timeout_msecs_attribute =
+static struct orangefs_attribute acache_timeout_msecs_attribute =
__ATTR(timeout_msecs,
0664,
- service_acache_show,
- service_acache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
static struct attribute *acache_orangefs_default_attrs[] = {
&acache_hard_limit_attribute.attr,
@@ -1468,34 +891,33 @@ static struct attribute *acache_orangefs_default_attrs[] = {
};
static struct kobj_type acache_orangefs_ktype = {
- .sysfs_ops = &acache_orangefs_sysfs_ops,
- .release = acache_orangefs_release,
+ .sysfs_ops = &orangefs_sysfs_ops,
.default_attrs = acache_orangefs_default_attrs,
};
-static struct capcache_orangefs_attribute capcache_hard_limit_attribute =
+static struct orangefs_attribute capcache_hard_limit_attribute =
__ATTR(hard_limit,
0664,
- service_capcache_show,
- service_capcache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct capcache_orangefs_attribute capcache_reclaim_percent_attribute =
+static struct orangefs_attribute capcache_reclaim_percent_attribute =
__ATTR(reclaim_percentage,
0664,
- service_capcache_show,
- service_capcache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct capcache_orangefs_attribute capcache_soft_limit_attribute =
+static struct orangefs_attribute capcache_soft_limit_attribute =
__ATTR(soft_limit,
0664,
- service_capcache_show,
- service_capcache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct capcache_orangefs_attribute capcache_timeout_secs_attribute =
+static struct orangefs_attribute capcache_timeout_secs_attribute =
__ATTR(timeout_secs,
0664,
- service_capcache_show,
- service_capcache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
static struct attribute *capcache_orangefs_default_attrs[] = {
&capcache_hard_limit_attribute.attr,
@@ -1506,34 +928,33 @@ static struct attribute *capcache_orangefs_default_attrs[] = {
};
static struct kobj_type capcache_orangefs_ktype = {
- .sysfs_ops = &capcache_orangefs_sysfs_ops,
- .release = capcache_orangefs_release,
+ .sysfs_ops = &orangefs_sysfs_ops,
.default_attrs = capcache_orangefs_default_attrs,
};
-static struct ccache_orangefs_attribute ccache_hard_limit_attribute =
+static struct orangefs_attribute ccache_hard_limit_attribute =
__ATTR(hard_limit,
0664,
- service_ccache_show,
- service_ccache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct ccache_orangefs_attribute ccache_reclaim_percent_attribute =
+static struct orangefs_attribute ccache_reclaim_percent_attribute =
__ATTR(reclaim_percentage,
0664,
- service_ccache_show,
- service_ccache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct ccache_orangefs_attribute ccache_soft_limit_attribute =
+static struct orangefs_attribute ccache_soft_limit_attribute =
__ATTR(soft_limit,
0664,
- service_ccache_show,
- service_ccache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct ccache_orangefs_attribute ccache_timeout_secs_attribute =
+static struct orangefs_attribute ccache_timeout_secs_attribute =
__ATTR(timeout_secs,
0664,
- service_ccache_show,
- service_ccache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
static struct attribute *ccache_orangefs_default_attrs[] = {
&ccache_hard_limit_attribute.attr,
@@ -1544,34 +965,33 @@ static struct attribute *ccache_orangefs_default_attrs[] = {
};
static struct kobj_type ccache_orangefs_ktype = {
- .sysfs_ops = &ccache_orangefs_sysfs_ops,
- .release = ccache_orangefs_release,
+ .sysfs_ops = &orangefs_sysfs_ops,
.default_attrs = ccache_orangefs_default_attrs,
};
-static struct ncache_orangefs_attribute ncache_hard_limit_attribute =
+static struct orangefs_attribute ncache_hard_limit_attribute =
__ATTR(hard_limit,
0664,
- service_ncache_show,
- service_ncache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct ncache_orangefs_attribute ncache_reclaim_percent_attribute =
+static struct orangefs_attribute ncache_reclaim_percent_attribute =
__ATTR(reclaim_percentage,
0664,
- service_ncache_show,
- service_ncache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct ncache_orangefs_attribute ncache_soft_limit_attribute =
+static struct orangefs_attribute ncache_soft_limit_attribute =
__ATTR(soft_limit,
0664,
- service_ncache_show,
- service_ncache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
-static struct ncache_orangefs_attribute ncache_timeout_msecs_attribute =
+static struct orangefs_attribute ncache_timeout_msecs_attribute =
__ATTR(timeout_msecs,
0664,
- service_ncache_show,
- service_ncache_store);
+ sysfs_service_op_show,
+ sysfs_service_op_store);
static struct attribute *ncache_orangefs_default_attrs[] = {
&ncache_hard_limit_attribute.attr,
@@ -1582,27 +1002,26 @@ static struct attribute *ncache_orangefs_default_attrs[] = {
};
static struct kobj_type ncache_orangefs_ktype = {
- .sysfs_ops = &ncache_orangefs_sysfs_ops,
- .release = ncache_orangefs_release,
+ .sysfs_ops = &orangefs_sysfs_ops,
.default_attrs = ncache_orangefs_default_attrs,
};
-static struct pc_orangefs_attribute pc_acache_attribute =
+static struct orangefs_attribute pc_acache_attribute =
__ATTR(acache,
0664,
- service_pc_show,
+ sysfs_service_op_show,
NULL);
-static struct pc_orangefs_attribute pc_capcache_attribute =
+static struct orangefs_attribute pc_capcache_attribute =
__ATTR(capcache,
0664,
- service_pc_show,
+ sysfs_service_op_show,
NULL);
-static struct pc_orangefs_attribute pc_ncache_attribute =
+static struct orangefs_attribute pc_ncache_attribute =
__ATTR(ncache,
0664,
- service_pc_show,
+ sysfs_service_op_show,
NULL);
static struct attribute *pc_orangefs_default_attrs[] = {
@@ -1613,21 +1032,20 @@ static struct attribute *pc_orangefs_default_attrs[] = {
};
static struct kobj_type pc_orangefs_ktype = {
- .sysfs_ops = &pc_orangefs_sysfs_ops,
- .release = pc_orangefs_release,
+ .sysfs_ops = &orangefs_sysfs_ops,
.default_attrs = pc_orangefs_default_attrs,
};
-static struct stats_orangefs_attribute stats_reads_attribute =
+static struct orangefs_attribute stats_reads_attribute =
__ATTR(reads,
0664,
- int_stats_show,
+ sysfs_int_show,
NULL);
-static struct stats_orangefs_attribute stats_writes_attribute =
+static struct orangefs_attribute stats_writes_attribute =
__ATTR(writes,
0664,
- int_stats_show,
+ sysfs_int_show,
NULL);
static struct attribute *stats_orangefs_default_attrs[] = {
@@ -1637,18 +1055,17 @@ static struct attribute *stats_orangefs_default_attrs[] = {
};
static struct kobj_type stats_orangefs_ktype = {
- .sysfs_ops = &stats_orangefs_sysfs_ops,
- .release = stats_orangefs_release,
+ .sysfs_ops = &orangefs_sysfs_ops,
.default_attrs = stats_orangefs_default_attrs,
};
-static struct orangefs_obj *orangefs_obj;
-static struct acache_orangefs_obj *acache_orangefs_obj;
-static struct capcache_orangefs_obj *capcache_orangefs_obj;
-static struct ccache_orangefs_obj *ccache_orangefs_obj;
-static struct ncache_orangefs_obj *ncache_orangefs_obj;
-static struct pc_orangefs_obj *pc_orangefs_obj;
-static struct stats_orangefs_obj *stats_orangefs_obj;
+static struct kobject *orangefs_obj;
+static struct kobject *acache_orangefs_obj;
+static struct kobject *capcache_orangefs_obj;
+static struct kobject *ccache_orangefs_obj;
+static struct kobject *ncache_orangefs_obj;
+static struct kobject *pc_orangefs_obj;
+static struct kobject *stats_orangefs_obj;
int orangefs_sysfs_init(void)
{
@@ -1661,7 +1078,7 @@ int orangefs_sysfs_init(void)
if (!orangefs_obj)
goto out;
- rc = kobject_init_and_add(&orangefs_obj->kobj,
+ rc = kobject_init_and_add(orangefs_obj,
&orangefs_ktype,
fs_kobj,
ORANGEFS_KOBJ_ID);
@@ -1669,7 +1086,7 @@ int orangefs_sysfs_init(void)
if (rc)
goto ofs_obj_bail;
- kobject_uevent(&orangefs_obj->kobj, KOBJ_ADD);
+ kobject_uevent(orangefs_obj, KOBJ_ADD);
/* create /sys/fs/orangefs/acache. */
acache_orangefs_obj = kzalloc(sizeof(*acache_orangefs_obj), GFP_KERNEL);
@@ -1678,15 +1095,15 @@ int orangefs_sysfs_init(void)
goto ofs_obj_bail;
}
- rc = kobject_init_and_add(&acache_orangefs_obj->kobj,
+ rc = kobject_init_and_add(acache_orangefs_obj,
&acache_orangefs_ktype,
- &orangefs_obj->kobj,
+ orangefs_obj,
ACACHE_KOBJ_ID);
if (rc)
goto acache_obj_bail;
- kobject_uevent(&acache_orangefs_obj->kobj, KOBJ_ADD);
+ kobject_uevent(acache_orangefs_obj, KOBJ_ADD);
/* create /sys/fs/orangefs/capcache. */
capcache_orangefs_obj =
@@ -1696,14 +1113,14 @@ int orangefs_sysfs_init(void)
goto acache_obj_bail;
}
- rc = kobject_init_and_add(&capcache_orangefs_obj->kobj,
+ rc = kobject_init_and_add(capcache_orangefs_obj,
&capcache_orangefs_ktype,
- &orangefs_obj->kobj,
+ orangefs_obj,
CAPCACHE_KOBJ_ID);
if (rc)
goto capcache_obj_bail;
- kobject_uevent(&capcache_orangefs_obj->kobj, KOBJ_ADD);
+ kobject_uevent(capcache_orangefs_obj, KOBJ_ADD);
/* create /sys/fs/orangefs/ccache. */
ccache_orangefs_obj =
@@ -1713,14 +1130,14 @@ int orangefs_sysfs_init(void)
goto capcache_obj_bail;
}
- rc = kobject_init_and_add(&ccache_orangefs_obj->kobj,
+ rc = kobject_init_and_add(ccache_orangefs_obj,
&ccache_orangefs_ktype,
- &orangefs_obj->kobj,
+ orangefs_obj,
CCACHE_KOBJ_ID);
if (rc)
goto ccache_obj_bail;
- kobject_uevent(&ccache_orangefs_obj->kobj, KOBJ_ADD);
+ kobject_uevent(ccache_orangefs_obj, KOBJ_ADD);
/* create /sys/fs/orangefs/ncache. */
ncache_orangefs_obj = kzalloc(sizeof(*ncache_orangefs_obj), GFP_KERNEL);
@@ -1729,15 +1146,15 @@ int orangefs_sysfs_init(void)
goto ccache_obj_bail;
}
- rc = kobject_init_and_add(&ncache_orangefs_obj->kobj,
+ rc = kobject_init_and_add(ncache_orangefs_obj,
&ncache_orangefs_ktype,
- &orangefs_obj->kobj,
+ orangefs_obj,
NCACHE_KOBJ_ID);
if (rc)
goto ncache_obj_bail;
- kobject_uevent(&ncache_orangefs_obj->kobj, KOBJ_ADD);
+ kobject_uevent(ncache_orangefs_obj, KOBJ_ADD);
/* create /sys/fs/orangefs/perf_counters. */
pc_orangefs_obj = kzalloc(sizeof(*pc_orangefs_obj), GFP_KERNEL);
@@ -1746,15 +1163,15 @@ int orangefs_sysfs_init(void)
goto ncache_obj_bail;
}
- rc = kobject_init_and_add(&pc_orangefs_obj->kobj,
+ rc = kobject_init_and_add(pc_orangefs_obj,
&pc_orangefs_ktype,
- &orangefs_obj->kobj,
+ orangefs_obj,
"perf_counters");
if (rc)
goto pc_obj_bail;
- kobject_uevent(&pc_orangefs_obj->kobj, KOBJ_ADD);
+ kobject_uevent(pc_orangefs_obj, KOBJ_ADD);
/* create /sys/fs/orangefs/stats. */
stats_orangefs_obj = kzalloc(sizeof(*stats_orangefs_obj), GFP_KERNEL);
@@ -1763,37 +1180,31 @@ int orangefs_sysfs_init(void)
goto pc_obj_bail;
}
- rc = kobject_init_and_add(&stats_orangefs_obj->kobj,
+ rc = kobject_init_and_add(stats_orangefs_obj,
&stats_orangefs_ktype,
- &orangefs_obj->kobj,
+ orangefs_obj,
STATS_KOBJ_ID);
if (rc)
goto stats_obj_bail;
- kobject_uevent(&stats_orangefs_obj->kobj, KOBJ_ADD);
+ kobject_uevent(stats_orangefs_obj, KOBJ_ADD);
goto out;
stats_obj_bail:
- kobject_put(&stats_orangefs_obj->kobj);
-
+ kobject_put(stats_orangefs_obj);
pc_obj_bail:
- kobject_put(&pc_orangefs_obj->kobj);
-
+ kobject_put(pc_orangefs_obj);
ncache_obj_bail:
- kobject_put(&ncache_orangefs_obj->kobj);
-
+ kobject_put(ncache_orangefs_obj);
ccache_obj_bail:
- kobject_put(&ccache_orangefs_obj->kobj);
-
+ kobject_put(ccache_orangefs_obj);
capcache_obj_bail:
- kobject_put(&capcache_orangefs_obj->kobj);
-
+ kobject_put(capcache_orangefs_obj);
acache_obj_bail:
- kobject_put(&acache_orangefs_obj->kobj);
-
+ kobject_put(acache_orangefs_obj);
ofs_obj_bail:
- kobject_put(&orangefs_obj->kobj);
+ kobject_put(orangefs_obj);
out:
return rc;
}
@@ -1801,13 +1212,11 @@ out:
void orangefs_sysfs_exit(void)
{
gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_exit: start\n");
-
- kobject_put(&acache_orangefs_obj->kobj);
- kobject_put(&capcache_orangefs_obj->kobj);
- kobject_put(&ccache_orangefs_obj->kobj);
- kobject_put(&ncache_orangefs_obj->kobj);
- kobject_put(&pc_orangefs_obj->kobj);
- kobject_put(&stats_orangefs_obj->kobj);
-
- kobject_put(&orangefs_obj->kobj);
+ kobject_put(acache_orangefs_obj);
+ kobject_put(capcache_orangefs_obj);
+ kobject_put(ccache_orangefs_obj);
+ kobject_put(ncache_orangefs_obj);
+ kobject_put(pc_orangefs_obj);
+ kobject_put(stats_orangefs_obj);
+ kobject_put(orangefs_obj);
}
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index d13c7291fd05..06af81f71e10 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -50,7 +50,7 @@ __s32 fsid_of_op(struct orangefs_kernel_op_s *op)
case ORANGEFS_VFS_OP_TRUNCATE:
fsid = op->upcall.req.truncate.refn.fs_id;
break;
- case ORANGEFS_VFS_OP_MMAP_RA_FLUSH:
+ case ORANGEFS_VFS_OP_RA_FLUSH:
fsid = op->upcall.req.ra_cache_flush.refn.fs_id;
break;
case ORANGEFS_VFS_OP_FS_UMOUNT:
@@ -347,7 +347,8 @@ int orangefs_inode_getattr(struct inode *inode, int new, int bypass)
inode->i_mode = type | (is_root_handle(inode) ? S_ISVTX : 0) |
orangefs_inode_perms(&new_op->downcall.resp.getattr.attributes);
- orangefs_inode->getattr_time = jiffies + getattr_timeout_msecs*HZ/1000;
+ orangefs_inode->getattr_time = jiffies +
+ orangefs_getattr_timeout_msecs*HZ/1000;
ret = 0;
out:
op_release(new_op);
@@ -656,401 +657,3 @@ __s32 ORANGEFS_util_translate_mode(int mode)
return ret;
}
#undef NUM_MODES
-
-/*
- * After obtaining a string representation of the client's debug
- * keywords and their associated masks, this function is called to build an
- * array of these values.
- */
-int orangefs_prepare_cdm_array(char *debug_array_string)
-{
- int i;
- int rc = -EINVAL;
- char *cds_head = NULL;
- char *cds_delimiter = NULL;
- int keyword_len = 0;
-
- gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
-
- /*
- * figure out how many elements the cdm_array needs.
- */
- for (i = 0; i < strlen(debug_array_string); i++)
- if (debug_array_string[i] == '\n')
- cdm_element_count++;
-
- if (!cdm_element_count) {
- pr_info("No elements in client debug array string!\n");
- goto out;
- }
-
- cdm_array =
- kzalloc(cdm_element_count * sizeof(struct client_debug_mask),
- GFP_KERNEL);
- if (!cdm_array) {
- pr_info("malloc failed for cdm_array!\n");
- rc = -ENOMEM;
- goto out;
- }
-
- cds_head = debug_array_string;
-
- for (i = 0; i < cdm_element_count; i++) {
- cds_delimiter = strchr(cds_head, '\n');
- *cds_delimiter = '\0';
-
- keyword_len = strcspn(cds_head, " ");
-
- cdm_array[i].keyword = kzalloc(keyword_len + 1, GFP_KERNEL);
- if (!cdm_array[i].keyword) {
- rc = -ENOMEM;
- goto out;
- }
-
- sscanf(cds_head,
- "%s %llx %llx",
- cdm_array[i].keyword,
- (unsigned long long *)&(cdm_array[i].mask1),
- (unsigned long long *)&(cdm_array[i].mask2));
-
- if (!strcmp(cdm_array[i].keyword, ORANGEFS_VERBOSE))
- client_verbose_index = i;
-
- if (!strcmp(cdm_array[i].keyword, ORANGEFS_ALL))
- client_all_index = i;
-
- cds_head = cds_delimiter + 1;
- }
-
- rc = cdm_element_count;
-
- gossip_debug(GOSSIP_UTILS_DEBUG, "%s: rc:%d:\n", __func__, rc);
-
-out:
-
- return rc;
-
-}
-
-/*
- * /sys/kernel/debug/orangefs/debug-help can be catted to
- * see all the available kernel and client debug keywords.
- *
- * When the kernel boots, we have no idea what keywords the
- * client supports, nor their associated masks.
- *
- * We pass through this function once at boot and stamp a
- * boilerplate "we don't know" message for the client in the
- * debug-help file. We pass through here again when the client
- * starts and then we can fill out the debug-help file fully.
- *
- * The client might be restarted any number of times between
- * reboots, we only build the debug-help file the first time.
- */
-int orangefs_prepare_debugfs_help_string(int at_boot)
-{
- int rc = -EINVAL;
- int i;
- int byte_count = 0;
- char *client_title = "Client Debug Keywords:\n";
- char *kernel_title = "Kernel Debug Keywords:\n";
-
- gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
-
- if (at_boot) {
- byte_count += strlen(HELP_STRING_UNINITIALIZED);
- client_title = HELP_STRING_UNINITIALIZED;
- } else {
- /*
- * fill the client keyword/mask array and remember
- * how many elements there were.
- */
- cdm_element_count =
- orangefs_prepare_cdm_array(client_debug_array_string);
- if (cdm_element_count <= 0)
- goto out;
-
- /* Count the bytes destined for debug_help_string. */
- byte_count += strlen(client_title);
-
- for (i = 0; i < cdm_element_count; i++) {
- byte_count += strlen(cdm_array[i].keyword + 2);
- if (byte_count >= DEBUG_HELP_STRING_SIZE) {
- pr_info("%s: overflow 1!\n", __func__);
- goto out;
- }
- }
-
- gossip_debug(GOSSIP_UTILS_DEBUG,
- "%s: cdm_element_count:%d:\n",
- __func__,
- cdm_element_count);
- }
-
- byte_count += strlen(kernel_title);
- for (i = 0; i < num_kmod_keyword_mask_map; i++) {
- byte_count +=
- strlen(s_kmod_keyword_mask_map[i].keyword + 2);
- if (byte_count >= DEBUG_HELP_STRING_SIZE) {
- pr_info("%s: overflow 2!\n", __func__);
- goto out;
- }
- }
-
- /* build debug_help_string. */
- debug_help_string = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL);
- if (!debug_help_string) {
- rc = -ENOMEM;
- goto out;
- }
-
- strcat(debug_help_string, client_title);
-
- if (!at_boot) {
- for (i = 0; i < cdm_element_count; i++) {
- strcat(debug_help_string, "\t");
- strcat(debug_help_string, cdm_array[i].keyword);
- strcat(debug_help_string, "\n");
- }
- }
-
- strcat(debug_help_string, "\n");
- strcat(debug_help_string, kernel_title);
-
- for (i = 0; i < num_kmod_keyword_mask_map; i++) {
- strcat(debug_help_string, "\t");
- strcat(debug_help_string, s_kmod_keyword_mask_map[i].keyword);
- strcat(debug_help_string, "\n");
- }
-
- rc = 0;
-
-out:
-
- return rc;
-
-}
-
-/*
- * kernel = type 0
- * client = type 1
- */
-void debug_mask_to_string(void *mask, int type)
-{
- int i;
- int len = 0;
- char *debug_string;
- int element_count = 0;
-
- gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
-
- if (type) {
- debug_string = client_debug_string;
- element_count = cdm_element_count;
- } else {
- debug_string = kernel_debug_string;
- element_count = num_kmod_keyword_mask_map;
- }
-
- memset(debug_string, 0, ORANGEFS_MAX_DEBUG_STRING_LEN);
-
- /*
- * Some keywords, like "all" or "verbose", are amalgams of
- * numerous other keywords. Make a special check for those
- * before grinding through the whole mask only to find out
- * later...
- */
- if (check_amalgam_keyword(mask, type))
- goto out;
-
- /* Build the debug string. */
- for (i = 0; i < element_count; i++)
- if (type)
- do_c_string(mask, i);
- else
- do_k_string(mask, i);
-
- len = strlen(debug_string);
-
- if ((len) && (type))
- client_debug_string[len - 1] = '\0';
- else if (len)
- kernel_debug_string[len - 1] = '\0';
- else if (type)
- strcpy(client_debug_string, "none");
- else
- strcpy(kernel_debug_string, "none");
-
-out:
-gossip_debug(GOSSIP_UTILS_DEBUG, "%s: string:%s:\n", __func__, debug_string);
-
- return;
-
-}
-
-void do_k_string(void *k_mask, int index)
-{
- __u64 *mask = (__u64 *) k_mask;
-
- if (keyword_is_amalgam((char *) s_kmod_keyword_mask_map[index].keyword))
- goto out;
-
- if (*mask & s_kmod_keyword_mask_map[index].mask_val) {
- if ((strlen(kernel_debug_string) +
- strlen(s_kmod_keyword_mask_map[index].keyword))
- < ORANGEFS_MAX_DEBUG_STRING_LEN - 1) {
- strcat(kernel_debug_string,
- s_kmod_keyword_mask_map[index].keyword);
- strcat(kernel_debug_string, ",");
- } else {
- gossip_err("%s: overflow!\n", __func__);
- strcpy(kernel_debug_string, ORANGEFS_ALL);
- goto out;
- }
- }
-
-out:
-
- return;
-}
-
-void do_c_string(void *c_mask, int index)
-{
- struct client_debug_mask *mask = (struct client_debug_mask *) c_mask;
-
- if (keyword_is_amalgam(cdm_array[index].keyword))
- goto out;
-
- if ((mask->mask1 & cdm_array[index].mask1) ||
- (mask->mask2 & cdm_array[index].mask2)) {
- if ((strlen(client_debug_string) +
- strlen(cdm_array[index].keyword) + 1)
- < ORANGEFS_MAX_DEBUG_STRING_LEN - 2) {
- strcat(client_debug_string,
- cdm_array[index].keyword);
- strcat(client_debug_string, ",");
- } else {
- gossip_err("%s: overflow!\n", __func__);
- strcpy(client_debug_string, ORANGEFS_ALL);
- goto out;
- }
- }
-out:
- return;
-}
-
-int keyword_is_amalgam(char *keyword)
-{
- int rc = 0;
-
- if ((!strcmp(keyword, ORANGEFS_ALL)) || (!strcmp(keyword, ORANGEFS_VERBOSE)))
- rc = 1;
-
- return rc;
-}
-
-/*
- * kernel = type 0
- * client = type 1
- *
- * return 1 if we found an amalgam.
- */
-int check_amalgam_keyword(void *mask, int type)
-{
- __u64 *k_mask;
- struct client_debug_mask *c_mask;
- int k_all_index = num_kmod_keyword_mask_map - 1;
- int rc = 0;
-
- if (type) {
- c_mask = (struct client_debug_mask *) mask;
-
- if ((c_mask->mask1 == cdm_array[client_all_index].mask1) &&
- (c_mask->mask2 == cdm_array[client_all_index].mask2)) {
- strcpy(client_debug_string, ORANGEFS_ALL);
- rc = 1;
- goto out;
- }
-
- if ((c_mask->mask1 == cdm_array[client_verbose_index].mask1) &&
- (c_mask->mask2 == cdm_array[client_verbose_index].mask2)) {
- strcpy(client_debug_string, ORANGEFS_VERBOSE);
- rc = 1;
- goto out;
- }
-
- } else {
- k_mask = (__u64 *) mask;
-
- if (*k_mask >= s_kmod_keyword_mask_map[k_all_index].mask_val) {
- strcpy(kernel_debug_string, ORANGEFS_ALL);
- rc = 1;
- goto out;
- }
- }
-
-out:
-
- return rc;
-}
-
-/*
- * kernel = type 0
- * client = type 1
- */
-void debug_string_to_mask(char *debug_string, void *mask, int type)
-{
- char *unchecked_keyword;
- int i;
- char *strsep_fodder = kstrdup(debug_string, GFP_KERNEL);
- char *original_pointer;
- int element_count = 0;
- struct client_debug_mask *c_mask;
- __u64 *k_mask;
-
- gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
-
- if (type) {
- c_mask = (struct client_debug_mask *)mask;
- element_count = cdm_element_count;
- } else {
- k_mask = (__u64 *)mask;
- *k_mask = 0;
- element_count = num_kmod_keyword_mask_map;
- }
-
- original_pointer = strsep_fodder;
- while ((unchecked_keyword = strsep(&strsep_fodder, ",")))
- if (strlen(unchecked_keyword)) {
- for (i = 0; i < element_count; i++)
- if (type)
- do_c_mask(i,
- unchecked_keyword,
- &c_mask);
- else
- do_k_mask(i,
- unchecked_keyword,
- &k_mask);
- }
-
- kfree(original_pointer);
-}
-
-void do_c_mask(int i,
- char *unchecked_keyword,
- struct client_debug_mask **sane_mask)
-{
-
- if (!strcmp(cdm_array[i].keyword, unchecked_keyword)) {
- (**sane_mask).mask1 = (**sane_mask).mask1 | cdm_array[i].mask1;
- (**sane_mask).mask2 = (**sane_mask).mask2 | cdm_array[i].mask2;
- }
-}
-
-void do_k_mask(int i, char *unchecked_keyword, __u64 **sane_mask)
-{
-
- if (!strcmp(s_kmod_keyword_mask_map[i].keyword, unchecked_keyword))
- **sane_mask = (**sane_mask) |
- s_kmod_keyword_mask_map[i].mask_val;
-}
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h
index 3d7418c728f5..971307ad69be 100644
--- a/fs/orangefs/protocol.h
+++ b/fs/orangefs/protocol.h
@@ -4,26 +4,6 @@
#include <linux/slab.h>
#include <linux/ioctl.h>
-extern struct client_debug_mask *cdm_array;
-extern char *debug_help_string;
-extern int help_string_initialized;
-extern struct dentry *debug_dir;
-extern struct dentry *help_file_dentry;
-extern struct dentry *client_debug_dentry;
-extern const struct file_operations debug_help_fops;
-extern int client_all_index;
-extern int client_verbose_index;
-extern int cdm_element_count;
-#define DEBUG_HELP_STRING_SIZE 4096
-#define HELP_STRING_UNINITIALIZED \
- "Client Debug Keywords are unknown until the first time\n" \
- "the client is started after boot.\n"
-#define ORANGEFS_KMOD_DEBUG_HELP_FILE "debug-help"
-#define ORANGEFS_KMOD_DEBUG_FILE "kernel-debug"
-#define ORANGEFS_CLIENT_DEBUG_FILE "client-debug"
-#define ORANGEFS_VERBOSE "verbose"
-#define ORANGEFS_ALL "all"
-
/* pvfs2-config.h ***********************************************************/
#define ORANGEFS_VERSION_MAJOR 2
#define ORANGEFS_VERSION_MINOR 9
@@ -426,13 +406,12 @@ do { \
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
} while (0)
#else
-extern __u64 gossip_debug_mask;
-extern struct client_debug_mask client_debug_mask;
+extern __u64 orangefs_gossip_debug_mask;
/* try to avoid function call overhead by checking masks in macro */
#define gossip_debug(mask, fmt, ...) \
do { \
- if (gossip_debug_mask & (mask)) \
+ if (orangefs_gossip_debug_mask & (mask)) \
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
} while (0)
#endif /* GOSSIP_DISABLE_DEBUG */
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index b9da9a0281c9..c48859f16e7b 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -33,6 +33,7 @@ static const match_table_t tokens = {
{ Opt_err, NULL }
};
+uint64_t orangefs_features;
static int parse_mount_options(struct super_block *sb, char *options,
int silent)
@@ -249,6 +250,19 @@ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb)
}
op_release(new_op);
+
+ if (orangefs_userspace_version >= 20906) {
+ new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
+ if (!new_op)
+ return -ENOMEM;
+ new_op->upcall.req.features.features = 0;
+ ret = service_operation(new_op, "orangefs_features", 0);
+ orangefs_features = new_op->downcall.resp.features.features;
+ op_release(new_op);
+ } else {
+ orangefs_features = 0;
+ }
+
return ret;
}
@@ -492,6 +506,19 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
list_add_tail(&ORANGEFS_SB(sb)->list, &orangefs_superblocks);
spin_unlock(&orangefs_superblocks_lock);
op_release(new_op);
+
+ if (orangefs_userspace_version >= 20906) {
+ new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
+ if (!new_op)
+ return ERR_PTR(-ENOMEM);
+ new_op->upcall.req.features.features = 0;
+ ret = service_operation(new_op, "orangefs_features", 0);
+ orangefs_features = new_op->downcall.resp.features.features;
+ op_release(new_op);
+ } else {
+ orangefs_features = 0;
+ }
+
return dget(sb->s_root);
free_op:
@@ -530,8 +557,8 @@ void orangefs_kill_sb(struct super_block *sb)
* make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
* gets completed before we free the dang thing.
*/
- mutex_lock(&request_mutex);
- mutex_unlock(&request_mutex);
+ mutex_lock(&orangefs_request_mutex);
+ mutex_unlock(&orangefs_request_mutex);
/* free the orangefs superblock private data */
kfree(ORANGEFS_SB(sb));
diff --git a/fs/orangefs/upcall.h b/fs/orangefs/upcall.h
index 001b20239407..af0b0e36d559 100644
--- a/fs/orangefs/upcall.h
+++ b/fs/orangefs/upcall.h
@@ -98,7 +98,7 @@ struct orangefs_truncate_request_s {
__s64 size;
};
-struct orangefs_mmap_ra_cache_flush_request_s {
+struct orangefs_ra_cache_flush_request_s {
struct orangefs_object_kref refn;
};
@@ -179,12 +179,18 @@ enum orangefs_param_request_op {
ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT = 23,
ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE = 24,
ORANGEFS_PARAM_REQUEST_OP_TWO_MASK_VALUES = 25,
+ ORANGEFS_PARAM_REQUEST_OP_READAHEAD_SIZE = 26,
+ ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT = 27,
+ ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE = 28,
};
struct orangefs_param_request_s {
enum orangefs_param_request_type type;
enum orangefs_param_request_op op;
- __s64 value;
+ union {
+ __s64 value64;
+ __s32 value32[2];
+ } u;
char s_value[ORANGEFS_MAX_DEBUG_STRING_LEN];
};
@@ -204,6 +210,11 @@ struct orangefs_fs_key_request_s {
__s32 __pad1;
};
+/* 2.9.6 */
+struct orangefs_features_request_s {
+ __u64 features;
+};
+
struct orangefs_upcall_s {
__s32 type;
__u32 uid;
@@ -228,7 +239,7 @@ struct orangefs_upcall_s {
struct orangefs_rename_request_s rename;
struct orangefs_statfs_request_s statfs;
struct orangefs_truncate_request_s truncate;
- struct orangefs_mmap_ra_cache_flush_request_s ra_cache_flush;
+ struct orangefs_ra_cache_flush_request_s ra_cache_flush;
struct orangefs_fs_mount_request_s fs_mount;
struct orangefs_fs_umount_request_s fs_umount;
struct orangefs_getxattr_request_s getxattr;
@@ -240,6 +251,7 @@ struct orangefs_upcall_s {
struct orangefs_param_request_s param;
struct orangefs_perf_count_request_s perf_count;
struct orangefs_fs_key_request_s fs_key;
+ struct orangefs_features_request_s features;
} req;
};
diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
index 31635bc303fe..abcfa3fa9992 100644
--- a/fs/orangefs/waitqueue.c
+++ b/fs/orangefs/waitqueue.c
@@ -87,9 +87,9 @@ retry_servicing:
*/
if (!(flags & ORANGEFS_OP_NO_MUTEX)) {
if (flags & ORANGEFS_OP_INTERRUPTIBLE)
- ret = mutex_lock_interruptible(&request_mutex);
+ ret = mutex_lock_interruptible(&orangefs_request_mutex);
else
- ret = mutex_lock_killable(&request_mutex);
+ ret = mutex_lock_killable(&orangefs_request_mutex);
/*
* check to see if we were interrupted while waiting for
* mutex
@@ -129,7 +129,7 @@ retry_servicing:
spin_unlock(&orangefs_request_list_lock);
if (!(flags & ORANGEFS_OP_NO_MUTEX))
- mutex_unlock(&request_mutex);
+ mutex_unlock(&orangefs_request_mutex);
ret = wait_for_matching_downcall(op, timeout,
flags & ORANGEFS_OP_INTERRUPTIBLE);
@@ -272,9 +272,9 @@ static void
} else if (op_state_in_progress(op)) {
/* op must be removed from the in progress htable */
spin_unlock(&op->lock);
- spin_lock(&htable_ops_in_progress_lock);
+ spin_lock(&orangefs_htable_ops_in_progress_lock);
list_del_init(&op->list);
- spin_unlock(&htable_ops_in_progress_lock);
+ spin_unlock(&orangefs_htable_ops_in_progress_lock);
gossip_debug(GOSSIP_WAIT_DEBUG,
"Interrupted: Removed op %p"
" from htable_ops_in_progress\n",
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 43fdc2765aea..db37a0e02d32 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -105,6 +105,13 @@ retry:
goto retry;
}
+ error = security_inode_copy_up_xattr(name);
+ if (error < 0 && error != -EOPNOTSUPP)
+ break;
+ if (error == 1) {
+ error = 0;
+ continue; /* Discard */
+ }
error = vfs_setxattr(new, name, value, size, 0);
if (error)
break;
@@ -248,6 +255,8 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
struct dentry *upper = NULL;
umode_t mode = stat->mode;
int err;
+ const struct cred *old_creds = NULL;
+ struct cred *new_creds = NULL;
newdentry = ovl_lookup_temp(workdir, dentry);
err = PTR_ERR(newdentry);
@@ -260,10 +269,23 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
if (IS_ERR(upper))
goto out1;
+ err = security_inode_copy_up(dentry, &new_creds);
+ if (err < 0)
+ goto out2;
+
+ if (new_creds)
+ old_creds = override_creds(new_creds);
+
/* Can't properly set mode on creation because of the umask */
stat->mode &= S_IFMT;
err = ovl_create_real(wdir, newdentry, stat, link, NULL, true);
stat->mode = mode;
+
+ if (new_creds) {
+ revert_creds(old_creds);
+ put_cred(new_creds);
+ }
+
if (err)
goto out2;
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 1560fdc09a5f..b0ffa1d1677e 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -489,6 +489,15 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
if (override_cred) {
override_cred->fsuid = inode->i_uid;
override_cred->fsgid = inode->i_gid;
+ if (!hardlink) {
+ err = security_dentry_create_files_as(dentry,
+ stat->mode, &dentry->d_name, old_cred,
+ override_cred);
+ if (err) {
+ put_cred(override_cred);
+ goto out_revert_creds;
+ }
+ }
put_cred(override_creds(override_cred));
put_cred(override_cred);
@@ -499,6 +508,7 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
err = ovl_create_over_whiteout(dentry, inode, stat,
link, hardlink);
}
+out_revert_creds:
revert_creds(old_cred);
if (!err) {
struct inode *realinode = d_inode(ovl_dentry_upper(dentry));
diff --git a/fs/pipe.c b/fs/pipe.c
index 4ebe6b2e5217..4fc422f0dea8 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -267,7 +267,6 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
if (bufs) {
int curbuf = pipe->curbuf;
struct pipe_buffer *buf = pipe->bufs + curbuf;
- const struct pipe_buf_operations *ops = buf->ops;
size_t chars = buf->len;
size_t written;
int error;
@@ -275,7 +274,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
if (chars > total_len)
chars = total_len;
- error = ops->confirm(pipe, buf);
+ error = pipe_buf_confirm(pipe, buf);
if (error) {
if (!ret)
ret = error;
@@ -299,8 +298,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
}
if (!buf->len) {
- buf->ops = NULL;
- ops->release(pipe, buf);
+ pipe_buf_release(pipe, buf);
curbuf = (curbuf + 1) & (pipe->buffers - 1);
pipe->curbuf = curbuf;
pipe->nrbufs = --bufs;
@@ -383,11 +381,10 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
(pipe->buffers - 1);
struct pipe_buffer *buf = pipe->bufs + lastbuf;
- const struct pipe_buf_operations *ops = buf->ops;
int offset = buf->offset + buf->len;
- if (ops->can_merge && offset + chars <= PAGE_SIZE) {
- ret = ops->confirm(pipe, buf);
+ if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) {
+ ret = pipe_buf_confirm(pipe, buf);
if (ret)
goto out;
@@ -664,7 +661,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
for (i = 0; i < pipe->buffers; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
- buf->ops->release(pipe, buf);
+ pipe_buf_release(pipe, buf);
}
if (pipe->tmp_page)
__free_page(pipe->tmp_page);
diff --git a/fs/pnode.c b/fs/pnode.c
index 99899705b105..234a9ac49958 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -259,7 +259,7 @@ static int propagate_one(struct mount *m)
read_sequnlock_excl(&mount_lock);
}
hlist_add_head(&child->mnt_hash, list);
- return 0;
+ return count_mounts(m->mnt_ns, child);
}
/*
diff --git a/fs/pnode.h b/fs/pnode.h
index 0fcdbe7ca648..550f5a8b4fcf 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -52,4 +52,5 @@ void mnt_set_mountpoint(struct mount *, struct mountpoint *,
struct mount *copy_tree(struct mount *, struct dentry *, int);
bool is_path_reachable(struct mount *, struct dentry *,
const struct path *root);
+int count_mounts(struct mnt_namespace *ns, struct mount *mnt);
#endif /* _LINUX_PNODE_H */
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 88c7de12197b..89600fd5963d 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -186,51 +186,45 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
task_unlock(p);
rcu_read_unlock();
- seq_printf(m,
- "State:\t%s\n"
- "Tgid:\t%d\n"
- "Ngid:\t%d\n"
- "Pid:\t%d\n"
- "PPid:\t%d\n"
- "TracerPid:\t%d\n"
- "Uid:\t%d\t%d\t%d\t%d\n"
- "Gid:\t%d\t%d\t%d\t%d\n"
- "FDSize:\t%d\nGroups:\t",
- get_task_state(p),
- tgid, ngid, pid_nr_ns(pid, ns), ppid, tpid,
- from_kuid_munged(user_ns, cred->uid),
- from_kuid_munged(user_ns, cred->euid),
- from_kuid_munged(user_ns, cred->suid),
- from_kuid_munged(user_ns, cred->fsuid),
- from_kgid_munged(user_ns, cred->gid),
- from_kgid_munged(user_ns, cred->egid),
- from_kgid_munged(user_ns, cred->sgid),
- from_kgid_munged(user_ns, cred->fsgid),
- max_fds);
-
+ seq_printf(m, "State:\t%s", get_task_state(p));
+
+ seq_put_decimal_ull(m, "\nTgid:\t", tgid);
+ seq_put_decimal_ull(m, "\nNgid:\t", ngid);
+ seq_put_decimal_ull(m, "\nPid:\t", pid_nr_ns(pid, ns));
+ seq_put_decimal_ull(m, "\nPPid:\t", ppid);
+ seq_put_decimal_ull(m, "\nTracerPid:\t", tpid);
+ seq_put_decimal_ull(m, "\nUid:\t", from_kuid_munged(user_ns, cred->uid));
+ seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->euid));
+ seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->suid));
+ seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->fsuid));
+ seq_put_decimal_ull(m, "\nGid:\t", from_kgid_munged(user_ns, cred->gid));
+ seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->egid));
+ seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->sgid));
+ seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->fsgid));
+ seq_put_decimal_ull(m, "\nFDSize:\t", max_fds);
+
+ seq_puts(m, "\nGroups:\t");
group_info = cred->group_info;
for (g = 0; g < group_info->ngroups; g++)
- seq_printf(m, "%d ",
- from_kgid_munged(user_ns, GROUP_AT(group_info, g)));
+ seq_put_decimal_ull(m, g ? " " : "",
+ from_kgid_munged(user_ns, group_info->gid[g]));
put_cred(cred);
+ /* Trailing space shouldn't have been added in the first place. */
+ seq_putc(m, ' ');
#ifdef CONFIG_PID_NS
seq_puts(m, "\nNStgid:");
for (g = ns->level; g <= pid->level; g++)
- seq_printf(m, "\t%d",
- task_tgid_nr_ns(p, pid->numbers[g].ns));
+ seq_put_decimal_ull(m, "\t", task_tgid_nr_ns(p, pid->numbers[g].ns));
seq_puts(m, "\nNSpid:");
for (g = ns->level; g <= pid->level; g++)
- seq_printf(m, "\t%d",
- task_pid_nr_ns(p, pid->numbers[g].ns));
+ seq_put_decimal_ull(m, "\t", task_pid_nr_ns(p, pid->numbers[g].ns));
seq_puts(m, "\nNSpgid:");
for (g = ns->level; g <= pid->level; g++)
- seq_printf(m, "\t%d",
- task_pgrp_nr_ns(p, pid->numbers[g].ns));
+ seq_put_decimal_ull(m, "\t", task_pgrp_nr_ns(p, pid->numbers[g].ns));
seq_puts(m, "\nNSsid:");
for (g = ns->level; g <= pid->level; g++)
- seq_printf(m, "\t%d",
- task_session_nr_ns(p, pid->numbers[g].ns));
+ seq_put_decimal_ull(m, "\t", task_session_nr_ns(p, pid->numbers[g].ns));
#endif
seq_putc(m, '\n');
}
@@ -299,11 +293,12 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
unlock_task_sighand(p, &flags);
}
- seq_printf(m, "Threads:\t%d\n", num_threads);
- seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim);
+ seq_put_decimal_ull(m, "Threads:\t", num_threads);
+ seq_put_decimal_ull(m, "\nSigQ:\t", qsize);
+ seq_put_decimal_ull(m, "/", qlim);
/* render them all */
- render_sigset_t(m, "SigPnd:\t", &pending);
+ render_sigset_t(m, "\nSigPnd:\t", &pending);
render_sigset_t(m, "ShdPnd:\t", &shpending);
render_sigset_t(m, "SigBlk:\t", &blocked);
render_sigset_t(m, "SigIgn:\t", &ignored);
@@ -348,17 +343,17 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
{
#ifdef CONFIG_SECCOMP
- seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode);
+ seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode);
+ seq_putc(m, '\n');
#endif
}
static inline void task_context_switch_counts(struct seq_file *m,
struct task_struct *p)
{
- seq_printf(m, "voluntary_ctxt_switches:\t%lu\n"
- "nonvoluntary_ctxt_switches:\t%lu\n",
- p->nvcsw,
- p->nivcsw);
+ seq_put_decimal_ull(m, "voluntary_ctxt_switches:\t", p->nvcsw);
+ seq_put_decimal_ull(m, "\nnonvoluntary_ctxt_switches:\t", p->nivcsw);
+ seq_putc(m, '\n');
}
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
@@ -490,41 +485,41 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
start_time = nsec_to_clock_t(task->real_start_time);
seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state);
- seq_put_decimal_ll(m, ' ', ppid);
- seq_put_decimal_ll(m, ' ', pgid);
- seq_put_decimal_ll(m, ' ', sid);
- seq_put_decimal_ll(m, ' ', tty_nr);
- seq_put_decimal_ll(m, ' ', tty_pgrp);
- seq_put_decimal_ull(m, ' ', task->flags);
- seq_put_decimal_ull(m, ' ', min_flt);
- seq_put_decimal_ull(m, ' ', cmin_flt);
- seq_put_decimal_ull(m, ' ', maj_flt);
- seq_put_decimal_ull(m, ' ', cmaj_flt);
- seq_put_decimal_ull(m, ' ', cputime_to_clock_t(utime));
- seq_put_decimal_ull(m, ' ', cputime_to_clock_t(stime));
- seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cutime));
- seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cstime));
- seq_put_decimal_ll(m, ' ', priority);
- seq_put_decimal_ll(m, ' ', nice);
- seq_put_decimal_ll(m, ' ', num_threads);
- seq_put_decimal_ull(m, ' ', 0);
- seq_put_decimal_ull(m, ' ', start_time);
- seq_put_decimal_ull(m, ' ', vsize);
- seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
- seq_put_decimal_ull(m, ' ', rsslim);
- seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
- seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
- seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
- seq_put_decimal_ull(m, ' ', esp);
- seq_put_decimal_ull(m, ' ', eip);
+ seq_put_decimal_ll(m, " ", ppid);
+ seq_put_decimal_ll(m, " ", pgid);
+ seq_put_decimal_ll(m, " ", sid);
+ seq_put_decimal_ll(m, " ", tty_nr);
+ seq_put_decimal_ll(m, " ", tty_pgrp);
+ seq_put_decimal_ull(m, " ", task->flags);
+ seq_put_decimal_ull(m, " ", min_flt);
+ seq_put_decimal_ull(m, " ", cmin_flt);
+ seq_put_decimal_ull(m, " ", maj_flt);
+ seq_put_decimal_ull(m, " ", cmaj_flt);
+ seq_put_decimal_ull(m, " ", cputime_to_clock_t(utime));
+ seq_put_decimal_ull(m, " ", cputime_to_clock_t(stime));
+ seq_put_decimal_ll(m, " ", cputime_to_clock_t(cutime));
+ seq_put_decimal_ll(m, " ", cputime_to_clock_t(cstime));
+ seq_put_decimal_ll(m, " ", priority);
+ seq_put_decimal_ll(m, " ", nice);
+ seq_put_decimal_ll(m, " ", num_threads);
+ seq_put_decimal_ull(m, " ", 0);
+ seq_put_decimal_ull(m, " ", start_time);
+ seq_put_decimal_ull(m, " ", vsize);
+ seq_put_decimal_ull(m, " ", mm ? get_mm_rss(mm) : 0);
+ seq_put_decimal_ull(m, " ", rsslim);
+ seq_put_decimal_ull(m, " ", mm ? (permitted ? mm->start_code : 1) : 0);
+ seq_put_decimal_ull(m, " ", mm ? (permitted ? mm->end_code : 1) : 0);
+ seq_put_decimal_ull(m, " ", (permitted && mm) ? mm->start_stack : 0);
+ seq_put_decimal_ull(m, " ", esp);
+ seq_put_decimal_ull(m, " ", eip);
/* The signal information here is obsolete.
* It must be decimal for Linux 2.0 compatibility.
* Use /proc/#/status for real-time signals.
*/
- seq_put_decimal_ull(m, ' ', task->pending.signal.sig[0] & 0x7fffffffUL);
- seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL);
- seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL);
- seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL);
+ seq_put_decimal_ull(m, " ", task->pending.signal.sig[0] & 0x7fffffffUL);
+ seq_put_decimal_ull(m, " ", task->blocked.sig[0] & 0x7fffffffUL);
+ seq_put_decimal_ull(m, " ", sigign.sig[0] & 0x7fffffffUL);
+ seq_put_decimal_ull(m, " ", sigcatch.sig[0] & 0x7fffffffUL);
/*
* We used to output the absolute kernel address, but that's an
@@ -538,31 +533,31 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
else
seq_puts(m, " 0");
- seq_put_decimal_ull(m, ' ', 0);
- seq_put_decimal_ull(m, ' ', 0);
- seq_put_decimal_ll(m, ' ', task->exit_signal);
- seq_put_decimal_ll(m, ' ', task_cpu(task));
- seq_put_decimal_ull(m, ' ', task->rt_priority);
- seq_put_decimal_ull(m, ' ', task->policy);
- seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
- seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
- seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
+ seq_put_decimal_ull(m, " ", 0);
+ seq_put_decimal_ull(m, " ", 0);
+ seq_put_decimal_ll(m, " ", task->exit_signal);
+ seq_put_decimal_ll(m, " ", task_cpu(task));
+ seq_put_decimal_ull(m, " ", task->rt_priority);
+ seq_put_decimal_ull(m, " ", task->policy);
+ seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task));
+ seq_put_decimal_ull(m, " ", cputime_to_clock_t(gtime));
+ seq_put_decimal_ll(m, " ", cputime_to_clock_t(cgtime));
if (mm && permitted) {
- seq_put_decimal_ull(m, ' ', mm->start_data);
- seq_put_decimal_ull(m, ' ', mm->end_data);
- seq_put_decimal_ull(m, ' ', mm->start_brk);
- seq_put_decimal_ull(m, ' ', mm->arg_start);
- seq_put_decimal_ull(m, ' ', mm->arg_end);
- seq_put_decimal_ull(m, ' ', mm->env_start);
- seq_put_decimal_ull(m, ' ', mm->env_end);
+ seq_put_decimal_ull(m, " ", mm->start_data);
+ seq_put_decimal_ull(m, " ", mm->end_data);
+ seq_put_decimal_ull(m, " ", mm->start_brk);
+ seq_put_decimal_ull(m, " ", mm->arg_start);
+ seq_put_decimal_ull(m, " ", mm->arg_end);
+ seq_put_decimal_ull(m, " ", mm->env_start);
+ seq_put_decimal_ull(m, " ", mm->env_end);
} else
- seq_printf(m, " 0 0 0 0 0 0 0");
+ seq_puts(m, " 0 0 0 0 0 0 0");
if (permitted)
- seq_put_decimal_ll(m, ' ', task->exit_code);
+ seq_put_decimal_ll(m, " ", task->exit_code);
else
- seq_put_decimal_ll(m, ' ', 0);
+ seq_puts(m, " 0");
seq_putc(m, '\n');
if (mm)
@@ -598,13 +593,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
* seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
* size, resident, shared, text, data);
*/
- seq_put_decimal_ull(m, 0, size);
- seq_put_decimal_ull(m, ' ', resident);
- seq_put_decimal_ull(m, ' ', shared);
- seq_put_decimal_ull(m, ' ', text);
- seq_put_decimal_ull(m, ' ', 0);
- seq_put_decimal_ull(m, ' ', data);
- seq_put_decimal_ull(m, ' ', 0);
+ seq_put_decimal_ull(m, "", size);
+ seq_put_decimal_ull(m, " ", resident);
+ seq_put_decimal_ull(m, " ", shared);
+ seq_put_decimal_ull(m, " ", text);
+ seq_put_decimal_ull(m, " ", 0);
+ seq_put_decimal_ull(m, " ", data);
+ seq_put_decimal_ull(m, " ", 0);
seq_putc(m, '\n');
return 0;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 3b792ab3c0dc..dc7fe5f3a53c 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2280,16 +2280,27 @@ static ssize_t timerslack_ns_write(struct file *file, const char __user *buf,
if (!p)
return -ESRCH;
- if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) {
- task_lock(p);
- if (slack_ns == 0)
- p->timer_slack_ns = p->default_timer_slack_ns;
- else
- p->timer_slack_ns = slack_ns;
- task_unlock(p);
- } else
- count = -EPERM;
+ if (p != current) {
+ if (!capable(CAP_SYS_NICE)) {
+ count = -EPERM;
+ goto out;
+ }
+
+ err = security_task_setscheduler(p);
+ if (err) {
+ count = err;
+ goto out;
+ }
+ }
+
+ task_lock(p);
+ if (slack_ns == 0)
+ p->timer_slack_ns = p->default_timer_slack_ns;
+ else
+ p->timer_slack_ns = slack_ns;
+ task_unlock(p);
+out:
put_task_struct(p);
return count;
@@ -2299,19 +2310,28 @@ static int timerslack_ns_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
struct task_struct *p;
- int err = 0;
+ int err = 0;
p = get_proc_task(inode);
if (!p)
return -ESRCH;
- if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) {
- task_lock(p);
- seq_printf(m, "%llu\n", p->timer_slack_ns);
- task_unlock(p);
- } else
- err = -EPERM;
+ if (p != current) {
+
+ if (!capable(CAP_SYS_NICE)) {
+ err = -EPERM;
+ goto out;
+ }
+ err = security_task_getscheduler(p);
+ if (err)
+ goto out;
+ }
+ task_lock(p);
+ seq_printf(m, "%llu\n", p->timer_slack_ns);
+ task_unlock(p);
+
+out:
put_task_struct(p);
return err;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index c633476616e0..bca66d83a765 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -390,6 +390,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
atomic_set(&ent->count, 1);
spin_lock_init(&ent->pde_unload_lock);
INIT_LIST_HEAD(&ent->pde_openers);
+ proc_set_user(ent, (*parent)->uid, (*parent)->gid);
+
out:
return ent;
}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index b9a8c813e5e6..8a428498d6b2 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -23,6 +23,25 @@ void __attribute__((weak)) arch_report_meminfo(struct seq_file *m)
{
}
+static void show_val_kb(struct seq_file *m, const char *s, unsigned long num)
+{
+ char v[32];
+ static const char blanks[7] = {' ', ' ', ' ', ' ',' ', ' ', ' '};
+ int len;
+
+ len = num_to_str(v, sizeof(v), num << (PAGE_SHIFT - 10));
+
+ seq_write(m, s, 16);
+
+ if (len > 0) {
+ if (len < 8)
+ seq_write(m, blanks, 8 - len);
+
+ seq_write(m, v, len);
+ }
+ seq_write(m, " kB\n", 4);
+}
+
static int meminfo_proc_show(struct seq_file *m, void *v)
{
struct sysinfo i;
@@ -32,10 +51,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
unsigned long pages[NR_LRU_LISTS];
int lru;
-/*
- * display in kilobytes.
- */
-#define K(x) ((x) << (PAGE_SHIFT - 10))
si_meminfo(&i);
si_swapinfo(&i);
committed = percpu_counter_read_positive(&vm_committed_as);
@@ -50,136 +65,100 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
available = si_mem_available();
- /*
- * Tagged format, for easy grepping and expansion.
- */
- seq_printf(m,
- "MemTotal: %8lu kB\n"
- "MemFree: %8lu kB\n"
- "MemAvailable: %8lu kB\n"
- "Buffers: %8lu kB\n"
- "Cached: %8lu kB\n"
- "SwapCached: %8lu kB\n"
- "Active: %8lu kB\n"
- "Inactive: %8lu kB\n"
- "Active(anon): %8lu kB\n"
- "Inactive(anon): %8lu kB\n"
- "Active(file): %8lu kB\n"
- "Inactive(file): %8lu kB\n"
- "Unevictable: %8lu kB\n"
- "Mlocked: %8lu kB\n"
-#ifdef CONFIG_HIGHMEM
- "HighTotal: %8lu kB\n"
- "HighFree: %8lu kB\n"
- "LowTotal: %8lu kB\n"
- "LowFree: %8lu kB\n"
-#endif
-#ifndef CONFIG_MMU
- "MmapCopy: %8lu kB\n"
-#endif
- "SwapTotal: %8lu kB\n"
- "SwapFree: %8lu kB\n"
- "Dirty: %8lu kB\n"
- "Writeback: %8lu kB\n"
- "AnonPages: %8lu kB\n"
- "Mapped: %8lu kB\n"
- "Shmem: %8lu kB\n"
- "Slab: %8lu kB\n"
- "SReclaimable: %8lu kB\n"
- "SUnreclaim: %8lu kB\n"
- "KernelStack: %8lu kB\n"
- "PageTables: %8lu kB\n"
-#ifdef CONFIG_QUICKLIST
- "Quicklists: %8lu kB\n"
-#endif
- "NFS_Unstable: %8lu kB\n"
- "Bounce: %8lu kB\n"
- "WritebackTmp: %8lu kB\n"
- "CommitLimit: %8lu kB\n"
- "Committed_AS: %8lu kB\n"
- "VmallocTotal: %8lu kB\n"
- "VmallocUsed: %8lu kB\n"
- "VmallocChunk: %8lu kB\n"
-#ifdef CONFIG_MEMORY_FAILURE
- "HardwareCorrupted: %5lu kB\n"
-#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- "AnonHugePages: %8lu kB\n"
- "ShmemHugePages: %8lu kB\n"
- "ShmemPmdMapped: %8lu kB\n"
-#endif
-#ifdef CONFIG_CMA
- "CmaTotal: %8lu kB\n"
- "CmaFree: %8lu kB\n"
-#endif
- ,
- K(i.totalram),
- K(i.freeram),
- K(available),
- K(i.bufferram),
- K(cached),
- K(total_swapcache_pages()),
- K(pages[LRU_ACTIVE_ANON] + pages[LRU_ACTIVE_FILE]),
- K(pages[LRU_INACTIVE_ANON] + pages[LRU_INACTIVE_FILE]),
- K(pages[LRU_ACTIVE_ANON]),
- K(pages[LRU_INACTIVE_ANON]),
- K(pages[LRU_ACTIVE_FILE]),
- K(pages[LRU_INACTIVE_FILE]),
- K(pages[LRU_UNEVICTABLE]),
- K(global_page_state(NR_MLOCK)),
+ show_val_kb(m, "MemTotal: ", i.totalram);
+ show_val_kb(m, "MemFree: ", i.freeram);
+ show_val_kb(m, "MemAvailable: ", available);
+ show_val_kb(m, "Buffers: ", i.bufferram);
+ show_val_kb(m, "Cached: ", cached);
+ show_val_kb(m, "SwapCached: ", total_swapcache_pages());
+ show_val_kb(m, "Active: ", pages[LRU_ACTIVE_ANON] +
+ pages[LRU_ACTIVE_FILE]);
+ show_val_kb(m, "Inactive: ", pages[LRU_INACTIVE_ANON] +
+ pages[LRU_INACTIVE_FILE]);
+ show_val_kb(m, "Active(anon): ", pages[LRU_ACTIVE_ANON]);
+ show_val_kb(m, "Inactive(anon): ", pages[LRU_INACTIVE_ANON]);
+ show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]);
+ show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]);
+ show_val_kb(m, "Unevictable: ", pages[LRU_UNEVICTABLE]);
+ show_val_kb(m, "Mlocked: ", global_page_state(NR_MLOCK));
+
#ifdef CONFIG_HIGHMEM
- K(i.totalhigh),
- K(i.freehigh),
- K(i.totalram-i.totalhigh),
- K(i.freeram-i.freehigh),
+ show_val_kb(m, "HighTotal: ", i.totalhigh);
+ show_val_kb(m, "HighFree: ", i.freehigh);
+ show_val_kb(m, "LowTotal: ", i.totalram - i.totalhigh);
+ show_val_kb(m, "LowFree: ", i.freeram - i.freehigh);
#endif
+
#ifndef CONFIG_MMU
- K((unsigned long) atomic_long_read(&mmap_pages_allocated)),
+ show_val_kb(m, "MmapCopy: ",
+ (unsigned long)atomic_long_read(&mmap_pages_allocated));
#endif
- K(i.totalswap),
- K(i.freeswap),
- K(global_node_page_state(NR_FILE_DIRTY)),
- K(global_node_page_state(NR_WRITEBACK)),
- K(global_node_page_state(NR_ANON_MAPPED)),
- K(global_node_page_state(NR_FILE_MAPPED)),
- K(i.sharedram),
- K(global_page_state(NR_SLAB_RECLAIMABLE) +
- global_page_state(NR_SLAB_UNRECLAIMABLE)),
- K(global_page_state(NR_SLAB_RECLAIMABLE)),
- K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
- global_page_state(NR_KERNEL_STACK_KB),
- K(global_page_state(NR_PAGETABLE)),
+
+ show_val_kb(m, "SwapTotal: ", i.totalswap);
+ show_val_kb(m, "SwapFree: ", i.freeswap);
+ show_val_kb(m, "Dirty: ",
+ global_node_page_state(NR_FILE_DIRTY));
+ show_val_kb(m, "Writeback: ",
+ global_node_page_state(NR_WRITEBACK));
+ show_val_kb(m, "AnonPages: ",
+ global_node_page_state(NR_ANON_MAPPED));
+ show_val_kb(m, "Mapped: ",
+ global_node_page_state(NR_FILE_MAPPED));
+ show_val_kb(m, "Shmem: ", i.sharedram);
+ show_val_kb(m, "Slab: ",
+ global_page_state(NR_SLAB_RECLAIMABLE) +
+ global_page_state(NR_SLAB_UNRECLAIMABLE));
+
+ show_val_kb(m, "SReclaimable: ",
+ global_page_state(NR_SLAB_RECLAIMABLE));
+ show_val_kb(m, "SUnreclaim: ",
+ global_page_state(NR_SLAB_UNRECLAIMABLE));
+ seq_printf(m, "KernelStack: %8lu kB\n",
+ global_page_state(NR_KERNEL_STACK_KB));
+ show_val_kb(m, "PageTables: ",
+ global_page_state(NR_PAGETABLE));
#ifdef CONFIG_QUICKLIST
- K(quicklist_total_size()),
+ show_val_kb(m, "Quicklists: ", quicklist_total_size());
#endif
- K(global_node_page_state(NR_UNSTABLE_NFS)),
- K(global_page_state(NR_BOUNCE)),
- K(global_node_page_state(NR_WRITEBACK_TEMP)),
- K(vm_commit_limit()),
- K(committed),
- (unsigned long)VMALLOC_TOTAL >> 10,
- 0ul, // used to be vmalloc 'used'
- 0ul // used to be vmalloc 'largest_chunk'
+
+ show_val_kb(m, "NFS_Unstable: ",
+ global_node_page_state(NR_UNSTABLE_NFS));
+ show_val_kb(m, "Bounce: ",
+ global_page_state(NR_BOUNCE));
+ show_val_kb(m, "WritebackTmp: ",
+ global_node_page_state(NR_WRITEBACK_TEMP));
+ show_val_kb(m, "CommitLimit: ", vm_commit_limit());
+ show_val_kb(m, "Committed_AS: ", committed);
+ seq_printf(m, "VmallocTotal: %8lu kB\n",
+ (unsigned long)VMALLOC_TOTAL >> 10);
+ show_val_kb(m, "VmallocUsed: ", 0ul);
+ show_val_kb(m, "VmallocChunk: ", 0ul);
+
#ifdef CONFIG_MEMORY_FAILURE
- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
+ seq_printf(m, "HardwareCorrupted: %5lu kB\n",
+ atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10));
#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- , K(global_node_page_state(NR_ANON_THPS) * HPAGE_PMD_NR)
- , K(global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR)
- , K(global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR)
+ show_val_kb(m, "AnonHugePages: ",
+ global_node_page_state(NR_ANON_THPS) * HPAGE_PMD_NR);
+ show_val_kb(m, "ShmemHugePages: ",
+ global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
+ show_val_kb(m, "ShmemPmdMapped: ",
+ global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
#endif
+
#ifdef CONFIG_CMA
- , K(totalcma_pages)
- , K(global_page_state(NR_FREE_CMA_PAGES))
+ show_val_kb(m, "CmaTotal: ", totalcma_pages);
+ show_val_kb(m, "CmaFree: ",
+ global_page_state(NR_FREE_CMA_PAGES));
#endif
- );
hugetlb_report_meminfo(m);
arch_report_meminfo(m);
return 0;
-#undef K
}
static int meminfo_proc_open(struct inode *inode, struct file *file)
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index c8bbc68cdb05..7ae6b1da7cab 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -21,6 +21,7 @@
#include <linux/bitops.h>
#include <linux/mount.h>
#include <linux/nsproxy.h>
+#include <linux/uidgid.h>
#include <net/net_namespace.h>
#include <linux/seq_file.h>
@@ -185,6 +186,8 @@ const struct file_operations proc_net_operations = {
static __net_init int proc_net_ns_init(struct net *net)
{
struct proc_dir_entry *netd, *net_statd;
+ kuid_t uid;
+ kgid_t gid;
int err;
err = -ENOMEM;
@@ -199,6 +202,16 @@ static __net_init int proc_net_ns_init(struct net *net)
netd->parent = &proc_root;
memcpy(netd->name, "net", 4);
+ uid = make_kuid(net->user_ns, 0);
+ if (!uid_valid(uid))
+ uid = netd->uid;
+
+ gid = make_kgid(net->user_ns, 0);
+ if (!gid_valid(gid))
+ gid = netd->gid;
+
+ proc_set_user(netd, uid, gid);
+
err = -EEXIST;
net_statd = proc_net_mkdir(net, "stat", netd);
if (!net_statd)
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 1b93650dda2f..71025b9e2a4e 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -72,7 +72,7 @@ static DEFINE_SPINLOCK(sysctl_lock);
static void drop_sysctl_table(struct ctl_table_header *header);
static int sysctl_follow_link(struct ctl_table_header **phead,
- struct ctl_table **pentry, struct nsproxy *namespaces);
+ struct ctl_table **pentry);
static int insert_links(struct ctl_table_header *head);
static void put_links(struct ctl_table_header *header);
@@ -319,11 +319,11 @@ static void sysctl_head_finish(struct ctl_table_header *head)
}
static struct ctl_table_set *
-lookup_header_set(struct ctl_table_root *root, struct nsproxy *namespaces)
+lookup_header_set(struct ctl_table_root *root)
{
struct ctl_table_set *set = &root->default_set;
if (root->lookup)
- set = root->lookup(root, namespaces);
+ set = root->lookup(root);
return set;
}
@@ -430,6 +430,7 @@ static int sysctl_perm(struct ctl_table_header *head, struct ctl_table *table, i
static struct inode *proc_sys_make_inode(struct super_block *sb,
struct ctl_table_header *head, struct ctl_table *table)
{
+ struct ctl_table_root *root = head->root;
struct inode *inode;
struct proc_inode *ei;
@@ -457,6 +458,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
if (is_empty_dir(head))
make_empty_dir_inode(inode);
}
+
+ if (root->set_ownership)
+ root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
+
out:
return inode;
}
@@ -491,7 +496,7 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
goto out;
if (S_ISLNK(p->mode)) {
- ret = sysctl_follow_link(&h, &p, current->nsproxy);
+ ret = sysctl_follow_link(&h, &p);
err = ERR_PTR(ret);
if (ret)
goto out;
@@ -659,7 +664,7 @@ static bool proc_sys_link_fill_cache(struct file *file,
if (S_ISLNK(table->mode)) {
/* It is not an error if we can not follow the link ignore it */
- int err = sysctl_follow_link(&head, &table, current->nsproxy);
+ int err = sysctl_follow_link(&head, &table);
if (err)
goto out;
}
@@ -976,7 +981,7 @@ static struct ctl_dir *xlate_dir(struct ctl_table_set *set, struct ctl_dir *dir)
}
static int sysctl_follow_link(struct ctl_table_header **phead,
- struct ctl_table **pentry, struct nsproxy *namespaces)
+ struct ctl_table **pentry)
{
struct ctl_table_header *head;
struct ctl_table_root *root;
@@ -988,7 +993,7 @@ static int sysctl_follow_link(struct ctl_table_header **phead,
ret = 0;
spin_lock(&sysctl_lock);
root = (*pentry)->data;
- set = lookup_header_set(root, namespaces);
+ set = lookup_header_set(root);
dir = xlate_dir(set, (*phead)->parent);
if (IS_ERR(dir))
ret = PTR_ERR(dir);
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 7907e456ac4f..d700c42b3572 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -115,17 +115,16 @@ static int show_stat(struct seq_file *p, void *v)
}
sum += arch_irq_stat();
- seq_puts(p, "cpu ");
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
+ seq_put_decimal_ull(p, "cpu ", cputime64_to_clock_t(user));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
seq_putc(p, '\n');
for_each_online_cpu(i) {
@@ -141,23 +140,23 @@ static int show_stat(struct seq_file *p, void *v)
guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
seq_printf(p, "cpu%d", i);
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest));
- seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
+ seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
seq_putc(p, '\n');
}
- seq_printf(p, "intr %llu", (unsigned long long)sum);
+ seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
/* sum again ? it could be updated? */
for_each_irq_nr(j)
- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
+ seq_put_decimal_ull(p, " ", kstat_irqs_usr(j));
seq_printf(p,
"\nctxt %llu\n"
@@ -171,10 +170,10 @@ static int show_stat(struct seq_file *p, void *v)
nr_running(),
nr_iowait());
- seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
+ seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq);
for (i = 0; i < NR_SOFTIRQS; i++)
- seq_put_decimal_ull(p, ' ', per_softirq_sums[i]);
+ seq_put_decimal_ull(p, " ", per_softirq_sums[i]);
seq_putc(p, '\n');
return 0;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f6fa99eca515..6909582ce5e5 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -147,7 +147,7 @@ m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
{
if (m->count < m->size) /* vma is copied successfully */
- m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
+ m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
}
static void *m_start(struct seq_file *m, loff_t *ppos)
@@ -175,8 +175,10 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
priv->tail_vma = get_gate_vma(mm);
if (last_addr) {
- vma = find_vma(mm, last_addr);
- if (vma && (vma = m_next_vma(priv, vma)))
+ vma = find_vma(mm, last_addr - 1);
+ if (vma && vma->vm_start <= last_addr)
+ vma = m_next_vma(priv, vma);
+ if (vma)
return vma;
}
@@ -1070,7 +1072,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
}
mmu_notifier_invalidate_range_start(mm, 0, -1);
}
- walk_page_range(0, ~0UL, &clear_refs_walk);
+ walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(mm, 0, -1);
flush_tlb_mm(mm);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 16ecca5b72d8..14984d902a99 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -623,6 +623,40 @@ static int pstore_write_compat(enum pstore_type_id type,
size, psi);
}
+static int pstore_write_buf_user_compat(enum pstore_type_id type,
+ enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
+ const char __user *buf,
+ bool compressed, size_t size,
+ struct pstore_info *psi)
+{
+ unsigned long flags = 0;
+ size_t i, bufsize = size;
+ long ret = 0;
+
+ if (unlikely(!access_ok(VERIFY_READ, buf, size)))
+ return -EFAULT;
+ if (bufsize > psinfo->bufsize)
+ bufsize = psinfo->bufsize;
+ spin_lock_irqsave(&psinfo->buf_lock, flags);
+ for (i = 0; i < size; ) {
+ size_t c = min(size - i, bufsize);
+
+ ret = __copy_from_user(psinfo->buf, buf + i, c);
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = psi->write_buf(type, reason, id, part, psinfo->buf,
+ compressed, c, psi);
+ if (unlikely(ret < 0))
+ break;
+ i += c;
+ }
+ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ return unlikely(ret < 0) ? ret : size;
+}
+
/*
* platform specific persistent storage driver registers with
* us here. If pstore is already mounted, call the platform
@@ -645,6 +679,8 @@ int pstore_register(struct pstore_info *psi)
if (!psi->write)
psi->write = pstore_write_compat;
+ if (!psi->write_buf_user)
+ psi->write_buf_user = pstore_write_buf_user_compat;
psinfo = psi;
mutex_init(&psinfo->read_mutex);
spin_unlock(&pstore_lock);
@@ -659,13 +695,14 @@ int pstore_register(struct pstore_info *psi)
if (pstore_is_mounted())
pstore_get_records(0);
- pstore_register_kmsg();
-
- if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
+ if (psi->flags & PSTORE_FLAGS_DMESG)
+ pstore_register_kmsg();
+ if (psi->flags & PSTORE_FLAGS_CONSOLE)
pstore_register_console();
+ if (psi->flags & PSTORE_FLAGS_FTRACE)
pstore_register_ftrace();
+ if (psi->flags & PSTORE_FLAGS_PMSG)
pstore_register_pmsg();
- }
if (pstore_update_ms >= 0) {
pstore_timer.expires = jiffies +
@@ -689,12 +726,14 @@ EXPORT_SYMBOL_GPL(pstore_register);
void pstore_unregister(struct pstore_info *psi)
{
- if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
+ if (psi->flags & PSTORE_FLAGS_PMSG)
pstore_unregister_pmsg();
+ if (psi->flags & PSTORE_FLAGS_FTRACE)
pstore_unregister_ftrace();
+ if (psi->flags & PSTORE_FLAGS_CONSOLE)
pstore_unregister_console();
- }
- pstore_unregister_kmsg();
+ if (psi->flags & PSTORE_FLAGS_DMESG)
+ pstore_unregister_kmsg();
free_buf_for_compression();
diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
index 7de20cd3797f..78f6176c020f 100644
--- a/fs/pstore/pmsg.c
+++ b/fs/pstore/pmsg.c
@@ -19,48 +19,25 @@
#include "internal.h"
static DEFINE_MUTEX(pmsg_lock);
-#define PMSG_MAX_BOUNCE_BUFFER_SIZE (2*PAGE_SIZE)
static ssize_t write_pmsg(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- size_t i, buffer_size;
- char *buffer;
+ u64 id;
+ int ret;
if (!count)
return 0;
+ /* check outside lock, page in any data. write_buf_user also checks */
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
- buffer_size = count;
- if (buffer_size > PMSG_MAX_BOUNCE_BUFFER_SIZE)
- buffer_size = PMSG_MAX_BOUNCE_BUFFER_SIZE;
- buffer = vmalloc(buffer_size);
- if (!buffer)
- return -ENOMEM;
-
mutex_lock(&pmsg_lock);
- for (i = 0; i < count; ) {
- size_t c = min(count - i, buffer_size);
- u64 id;
- long ret;
-
- ret = __copy_from_user(buffer, buf + i, c);
- if (unlikely(ret != 0)) {
- mutex_unlock(&pmsg_lock);
- vfree(buffer);
- return -EFAULT;
- }
- psinfo->write_buf(PSTORE_TYPE_PMSG, 0, &id, 0, buffer, 0, c,
- psinfo);
-
- i += c;
- }
-
+ ret = psinfo->write_buf_user(PSTORE_TYPE_PMSG, 0, &id, 0, buf, 0, count,
+ psinfo);
mutex_unlock(&pmsg_lock);
- vfree(buffer);
- return count;
+ return ret ? ret : count;
}
static const struct file_operations pmsg_fops = {
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 7a034d62cf8c..6ad831b9d1b8 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -331,6 +331,24 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
return 0;
}
+static int notrace ramoops_pstore_write_buf_user(enum pstore_type_id type,
+ enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
+ const char __user *buf,
+ bool compressed, size_t size,
+ struct pstore_info *psi)
+{
+ if (type == PSTORE_TYPE_PMSG) {
+ struct ramoops_context *cxt = psi->data;
+
+ if (!cxt->mprz)
+ return -ENOMEM;
+ return persistent_ram_write_user(cxt->mprz, buf, size);
+ }
+
+ return -EINVAL;
+}
+
static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, int count,
struct timespec time, struct pstore_info *psi)
{
@@ -369,6 +387,7 @@ static struct ramoops_context oops_cxt = {
.open = ramoops_pstore_open,
.read = ramoops_pstore_read,
.write_buf = ramoops_pstore_write_buf,
+ .write_buf_user = ramoops_pstore_write_buf_user,
.erase = ramoops_pstore_erase,
},
};
@@ -377,13 +396,14 @@ static void ramoops_free_przs(struct ramoops_context *cxt)
{
int i;
- cxt->max_dump_cnt = 0;
if (!cxt->przs)
return;
- for (i = 0; !IS_ERR_OR_NULL(cxt->przs[i]); i++)
+ for (i = 0; i < cxt->max_dump_cnt; i++)
persistent_ram_free(cxt->przs[i]);
+
kfree(cxt->przs);
+ cxt->max_dump_cnt = 0;
}
static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
@@ -408,7 +428,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
GFP_KERNEL);
if (!cxt->przs) {
dev_err(dev, "failed to initialize a prz array for dumps\n");
- goto fail_prz;
+ goto fail_mem;
}
for (i = 0; i < cxt->max_dump_cnt; i++) {
@@ -419,6 +439,11 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
err = PTR_ERR(cxt->przs[i]);
dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
cxt->record_size, (unsigned long long)*paddr, err);
+
+ while (i > 0) {
+ i--;
+ persistent_ram_free(cxt->przs[i]);
+ }
goto fail_prz;
}
*paddr += cxt->record_size;
@@ -426,7 +451,9 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
return 0;
fail_prz:
- ramoops_free_przs(cxt);
+ kfree(cxt->przs);
+fail_mem:
+ cxt->max_dump_cnt = 0;
return err;
}
@@ -608,12 +635,20 @@ static int ramoops_probe(struct platform_device *pdev)
cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */
cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
- spin_lock_init(&cxt->pstore.buf_lock);
if (!cxt->pstore.buf) {
pr_err("cannot allocate pstore buffer\n");
err = -ENOMEM;
goto fail_clear;
}
+ spin_lock_init(&cxt->pstore.buf_lock);
+
+ cxt->pstore.flags = PSTORE_FLAGS_DMESG;
+ if (cxt->console_size)
+ cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
+ if (cxt->ftrace_size)
+ cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
+ if (cxt->pmsg_size)
+ cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
err = pstore_register(&cxt->pstore);
if (err) {
@@ -659,7 +694,6 @@ static int ramoops_remove(struct platform_device *pdev)
struct ramoops_context *cxt = &oops_cxt;
pstore_unregister(&cxt->pstore);
- cxt->max_dump_cnt = 0;
kfree(cxt->pstore.buf);
cxt->pstore.bufsize = 0;
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 76c3f80efdfa..3975deec02f8 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -17,15 +17,16 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/memblock.h>
+#include <linux/pstore_ram.h>
#include <linux/rslib.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-#include <linux/pstore_ram.h>
#include <asm/page.h>
struct persistent_ram_buffer {
@@ -47,43 +48,10 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
return atomic_read(&prz->buffer->start);
}
-/* increase and wrap the start pointer, returning the old value */
-static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
-{
- int old;
- int new;
-
- do {
- old = atomic_read(&prz->buffer->start);
- new = old + a;
- while (unlikely(new >= prz->buffer_size))
- new -= prz->buffer_size;
- } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
-
- return old;
-}
-
-/* increase the size counter until it hits the max size */
-static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
-{
- size_t old;
- size_t new;
-
- if (atomic_read(&prz->buffer->size) == prz->buffer_size)
- return;
-
- do {
- old = atomic_read(&prz->buffer->size);
- new = old + a;
- if (new > prz->buffer_size)
- new = prz->buffer_size;
- } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
-}
-
static DEFINE_RAW_SPINLOCK(buffer_lock);
/* increase and wrap the start pointer, returning the old value */
-static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
+static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
{
int old;
int new;
@@ -103,7 +71,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
}
/* increase the size counter until it hits the max size */
-static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
+static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
{
size_t old;
size_t new;
@@ -124,9 +92,6 @@ exit:
raw_spin_unlock_irqrestore(&buffer_lock, flags);
}
-static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
-static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
-
static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
uint8_t *data, size_t len, uint8_t *ecc)
{
@@ -299,10 +264,20 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
const void *s, unsigned int start, unsigned int count)
{
struct persistent_ram_buffer *buffer = prz->buffer;
- memcpy(buffer->data + start, s, count);
+ memcpy_toio(buffer->data + start, s, count);
persistent_ram_update_ecc(prz, start, count);
}
+static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
+ const void __user *s, unsigned int start, unsigned int count)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ int ret = unlikely(__copy_from_user(buffer->data + start, s, count)) ?
+ -EFAULT : 0;
+ persistent_ram_update_ecc(prz, start, count);
+ return ret;
+}
+
void persistent_ram_save_old(struct persistent_ram_zone *prz)
{
struct persistent_ram_buffer *buffer = prz->buffer;
@@ -322,8 +297,8 @@ void persistent_ram_save_old(struct persistent_ram_zone *prz)
}
prz->old_log_size = size;
- memcpy(prz->old_log, &buffer->data[start], size - start);
- memcpy(prz->old_log + size - start, &buffer->data[0], start);
+ memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
+ memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
}
int notrace persistent_ram_write(struct persistent_ram_zone *prz,
@@ -356,6 +331,38 @@ int notrace persistent_ram_write(struct persistent_ram_zone *prz,
return count;
}
+int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
+ const void __user *s, unsigned int count)
+{
+ int rem, ret = 0, c = count;
+ size_t start;
+
+ if (unlikely(!access_ok(VERIFY_READ, s, count)))
+ return -EFAULT;
+ if (unlikely(c > prz->buffer_size)) {
+ s += c - prz->buffer_size;
+ c = prz->buffer_size;
+ }
+
+ buffer_size_add(prz, c);
+
+ start = buffer_start_add(prz, c);
+
+ rem = prz->buffer_size - start;
+ if (unlikely(rem < c)) {
+ ret = persistent_ram_update_user(prz, s, start, rem);
+ s += rem;
+ c -= rem;
+ start = 0;
+ }
+ if (likely(!ret))
+ ret = persistent_ram_update_user(prz, s, start, c);
+
+ persistent_ram_update_header_ecc(prz);
+
+ return unlikely(ret) ? ret : count;
+}
+
size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
{
return prz->old_log_size;
@@ -426,9 +433,6 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
return NULL;
}
- buffer_start_add = buffer_start_add_locked;
- buffer_size_add = buffer_size_add_locked;
-
if (memtype)
va = ioremap(start, size);
else
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 35df08ee9c97..2d445425aad7 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -341,6 +341,7 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
struct qc_state state;
int ret;
+ memset(&state, 0, sizeof (struct qc_state));
ret = sb->s_qcop->get_state(sb, &state);
if (ret < 0)
return ret;
@@ -365,17 +366,19 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
- if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) {
+
+ /* Inodes may be allocated even if inactive; copy out if present */
+ if (state.s_state[USRQUOTA].ino) {
fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
}
- if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) {
+ if (state.s_state[GRPQUOTA].ino) {
fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
}
- if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) {
+ if (state.s_state[PRJQUOTA].ino) {
/*
* Q_XGETQSTAT doesn't have room for both group and project
* quotas. So, allow the project quota values to be copied out
@@ -411,6 +414,7 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
struct qc_state state;
int ret;
+ memset(&state, 0, sizeof (struct qc_state));
ret = sb->s_qcop->get_state(sb, &state);
if (ret < 0)
return ret;
@@ -435,17 +439,19 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
- if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) {
+
+ /* Inodes may be allocated even if inactive; copy out if present */
+ if (state.s_state[USRQUOTA].ino) {
fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
}
- if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) {
+ if (state.s_state[GRPQUOTA].ino) {
fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
}
- if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) {
+ if (state.s_state[PRJQUOTA].ino) {
fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino;
fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks;
fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 7a4a85a6821e..74d5ddd26296 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -190,7 +190,15 @@ static int remove_save_link_only(struct super_block *s,
static int reiserfs_quota_on_mount(struct super_block *, int);
#endif
-/* look for uncompleted unlinks and truncates and complete them */
+/*
+ * Look for uncompleted unlinks and truncates and complete them
+ *
+ * Called with superblock write locked. If quotas are enabled, we have to
+ * release/retake lest we call dquot_quota_on_mount(), proceed to
+ * schedule_on_each_cpu() in invalidate_bdev() and deadlock waiting for the per
+ * cpu worklets to complete flush_async_commits() that in turn wait for the
+ * superblock write lock.
+ */
static int finish_unfinished(struct super_block *s)
{
INITIALIZE_PATH(path);
@@ -237,7 +245,9 @@ static int finish_unfinished(struct super_block *s)
quota_enabled[i] = 0;
continue;
}
+ reiserfs_write_unlock(s);
ret = reiserfs_quota_on_mount(s, i);
+ reiserfs_write_lock(s);
if (ret < 0)
reiserfs_warning(s, "reiserfs-2500",
"cannot turn on journaled "
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 6dc4296eed62..368bfb92b115 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -679,11 +679,11 @@ EXPORT_SYMBOL(seq_puts);
/*
* A helper routine for putting decimal numbers without rich format of printf().
* only 'unsigned long long' is supported.
- * This routine will put one byte delimiter + number into seq_file.
+ * This routine will put strlen(delimiter) + number into seq_file.
* This routine is very quick when you show lots of numbers.
* In usual cases, it will be better to use seq_printf(). It's easier to read.
*/
-void seq_put_decimal_ull(struct seq_file *m, char delimiter,
+void seq_put_decimal_ull(struct seq_file *m, const char *delimiter,
unsigned long long num)
{
int len;
@@ -691,8 +691,15 @@ void seq_put_decimal_ull(struct seq_file *m, char delimiter,
if (m->count + 2 >= m->size) /* we'll write 2 bytes at least */
goto overflow;
- if (delimiter)
- m->buf[m->count++] = delimiter;
+ len = strlen(delimiter);
+ if (m->count + len >= m->size)
+ goto overflow;
+
+ memcpy(m->buf + m->count, delimiter, len);
+ m->count += len;
+
+ if (m->count + 1 >= m->size)
+ goto overflow;
if (num < 10) {
m->buf[m->count++] = num + '0';
@@ -702,6 +709,7 @@ void seq_put_decimal_ull(struct seq_file *m, char delimiter,
len = num_to_str(m->buf + m->count, m->size - m->count, num);
if (!len)
goto overflow;
+
m->count += len;
return;
@@ -710,19 +718,42 @@ overflow:
}
EXPORT_SYMBOL(seq_put_decimal_ull);
-void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num)
+void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num)
{
+ int len;
+
+ if (m->count + 3 >= m->size) /* we'll write 2 bytes at least */
+ goto overflow;
+
+ len = strlen(delimiter);
+ if (m->count + len >= m->size)
+ goto overflow;
+
+ memcpy(m->buf + m->count, delimiter, len);
+ m->count += len;
+
+ if (m->count + 2 >= m->size)
+ goto overflow;
+
if (num < 0) {
- if (m->count + 3 >= m->size) {
- seq_set_overflow(m);
- return;
- }
- if (delimiter)
- m->buf[m->count++] = delimiter;
+ m->buf[m->count++] = '-';
num = -num;
- delimiter = '-';
}
- seq_put_decimal_ull(m, delimiter, num);
+
+ if (num < 10) {
+ m->buf[m->count++] = num + '0';
+ return;
+ }
+
+ len = num_to_str(m->buf + m->count, m->size - m->count, num);
+ if (!len)
+ goto overflow;
+
+ m->count += len;
+ return;
+
+overflow:
+ seq_set_overflow(m);
}
EXPORT_SYMBOL(seq_put_decimal_ll);
diff --git a/fs/splice.c b/fs/splice.c
index dd9bf7e410d2..aa38901a4f10 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -183,82 +183,39 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd)
{
unsigned int spd_pages = spd->nr_pages;
- int ret, do_wakeup, page_nr;
+ int ret = 0, page_nr = 0;
if (!spd_pages)
return 0;
- ret = 0;
- do_wakeup = 0;
- page_nr = 0;
-
- pipe_lock(pipe);
-
- for (;;) {
- if (!pipe->readers) {
- send_sig(SIGPIPE, current, 0);
- if (!ret)
- ret = -EPIPE;
- break;
- }
-
- if (pipe->nrbufs < pipe->buffers) {
- int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- struct pipe_buffer *buf = pipe->bufs + newbuf;
-
- buf->page = spd->pages[page_nr];
- buf->offset = spd->partial[page_nr].offset;
- buf->len = spd->partial[page_nr].len;
- buf->private = spd->partial[page_nr].private;
- buf->ops = spd->ops;
- if (spd->flags & SPLICE_F_GIFT)
- buf->flags |= PIPE_BUF_FLAG_GIFT;
-
- pipe->nrbufs++;
- page_nr++;
- ret += buf->len;
-
- if (pipe->files)
- do_wakeup = 1;
+ if (unlikely(!pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ ret = -EPIPE;
+ goto out;
+ }
- if (!--spd->nr_pages)
- break;
- if (pipe->nrbufs < pipe->buffers)
- continue;
+ while (pipe->nrbufs < pipe->buffers) {
+ int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
+ struct pipe_buffer *buf = pipe->bufs + newbuf;
- break;
- }
+ buf->page = spd->pages[page_nr];
+ buf->offset = spd->partial[page_nr].offset;
+ buf->len = spd->partial[page_nr].len;
+ buf->private = spd->partial[page_nr].private;
+ buf->ops = spd->ops;
- if (spd->flags & SPLICE_F_NONBLOCK) {
- if (!ret)
- ret = -EAGAIN;
- break;
- }
+ pipe->nrbufs++;
+ page_nr++;
+ ret += buf->len;
- if (signal_pending(current)) {
- if (!ret)
- ret = -ERESTARTSYS;
+ if (!--spd->nr_pages)
break;
- }
-
- if (do_wakeup) {
- smp_mb();
- if (waitqueue_active(&pipe->wait))
- wake_up_interruptible_sync(&pipe->wait);
- kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
- do_wakeup = 0;
- }
-
- pipe->waiting_writers++;
- pipe_wait(pipe);
- pipe->waiting_writers--;
}
- pipe_unlock(pipe);
-
- if (do_wakeup)
- wakeup_pipe_readers(pipe);
+ if (!ret)
+ ret = -EAGAIN;
+out:
while (page_nr < spd_pages)
spd->spd_release(spd, page_nr++);
@@ -266,6 +223,26 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
}
EXPORT_SYMBOL_GPL(splice_to_pipe);
+ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
+{
+ int ret;
+
+ if (unlikely(!pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ ret = -EPIPE;
+ } else if (pipe->nrbufs == pipe->buffers) {
+ ret = -EAGAIN;
+ } else {
+ int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
+ pipe->bufs[newbuf] = *buf;
+ pipe->nrbufs++;
+ return buf->len;
+ }
+ pipe_buf_release(pipe, buf);
+ return ret;
+}
+EXPORT_SYMBOL(add_to_pipe);
+
void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
{
put_page(spd->pages[i]);
@@ -303,207 +280,6 @@ void splice_shrink_spd(struct splice_pipe_desc *spd)
kfree(spd->partial);
}
-static int
-__generic_file_splice_read(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags)
-{
- struct address_space *mapping = in->f_mapping;
- unsigned int loff, nr_pages, req_pages;
- struct page *pages[PIPE_DEF_BUFFERS];
- struct partial_page partial[PIPE_DEF_BUFFERS];
- struct page *page;
- pgoff_t index, end_index;
- loff_t isize;
- int error, page_nr;
- struct splice_pipe_desc spd = {
- .pages = pages,
- .partial = partial,
- .nr_pages_max = PIPE_DEF_BUFFERS,
- .flags = flags,
- .ops = &page_cache_pipe_buf_ops,
- .spd_release = spd_release_page,
- };
-
- if (splice_grow_spd(pipe, &spd))
- return -ENOMEM;
-
- index = *ppos >> PAGE_SHIFT;
- loff = *ppos & ~PAGE_MASK;
- req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
- nr_pages = min(req_pages, spd.nr_pages_max);
-
- /*
- * Lookup the (hopefully) full range of pages we need.
- */
- spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages);
- index += spd.nr_pages;
-
- /*
- * If find_get_pages_contig() returned fewer pages than we needed,
- * readahead/allocate the rest and fill in the holes.
- */
- if (spd.nr_pages < nr_pages)
- page_cache_sync_readahead(mapping, &in->f_ra, in,
- index, req_pages - spd.nr_pages);
-
- error = 0;
- while (spd.nr_pages < nr_pages) {
- /*
- * Page could be there, find_get_pages_contig() breaks on
- * the first hole.
- */
- page = find_get_page(mapping, index);
- if (!page) {
- /*
- * page didn't exist, allocate one.
- */
- page = page_cache_alloc_cold(mapping);
- if (!page)
- break;
-
- error = add_to_page_cache_lru(page, mapping, index,
- mapping_gfp_constraint(mapping, GFP_KERNEL));
- if (unlikely(error)) {
- put_page(page);
- if (error == -EEXIST)
- continue;
- break;
- }
- /*
- * add_to_page_cache() locks the page, unlock it
- * to avoid convoluting the logic below even more.
- */
- unlock_page(page);
- }
-
- spd.pages[spd.nr_pages++] = page;
- index++;
- }
-
- /*
- * Now loop over the map and see if we need to start IO on any
- * pages, fill in the partial map, etc.
- */
- index = *ppos >> PAGE_SHIFT;
- nr_pages = spd.nr_pages;
- spd.nr_pages = 0;
- for (page_nr = 0; page_nr < nr_pages; page_nr++) {
- unsigned int this_len;
-
- if (!len)
- break;
-
- /*
- * this_len is the max we'll use from this page
- */
- this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
- page = spd.pages[page_nr];
-
- if (PageReadahead(page))
- page_cache_async_readahead(mapping, &in->f_ra, in,
- page, index, req_pages - page_nr);
-
- /*
- * If the page isn't uptodate, we may need to start io on it
- */
- if (!PageUptodate(page)) {
- lock_page(page);
-
- /*
- * Page was truncated, or invalidated by the
- * filesystem. Redo the find/create, but this time the
- * page is kept locked, so there's no chance of another
- * race with truncate/invalidate.
- */
- if (!page->mapping) {
- unlock_page(page);
-retry_lookup:
- page = find_or_create_page(mapping, index,
- mapping_gfp_mask(mapping));
-
- if (!page) {
- error = -ENOMEM;
- break;
- }
- put_page(spd.pages[page_nr]);
- spd.pages[page_nr] = page;
- }
- /*
- * page was already under io and is now done, great
- */
- if (PageUptodate(page)) {
- unlock_page(page);
- goto fill_it;
- }
-
- /*
- * need to read in the page
- */
- error = mapping->a_ops->readpage(in, page);
- if (unlikely(error)) {
- /*
- * Re-lookup the page
- */
- if (error == AOP_TRUNCATED_PAGE)
- goto retry_lookup;
-
- break;
- }
- }
-fill_it:
- /*
- * i_size must be checked after PageUptodate.
- */
- isize = i_size_read(mapping->host);
- end_index = (isize - 1) >> PAGE_SHIFT;
- if (unlikely(!isize || index > end_index))
- break;
-
- /*
- * if this is the last page, see if we need to shrink
- * the length and stop
- */
- if (end_index == index) {
- unsigned int plen;
-
- /*
- * max good bytes in this page
- */
- plen = ((isize - 1) & ~PAGE_MASK) + 1;
- if (plen <= loff)
- break;
-
- /*
- * force quit after adding this page
- */
- this_len = min(this_len, plen - loff);
- len = this_len;
- }
-
- spd.partial[page_nr].offset = loff;
- spd.partial[page_nr].len = this_len;
- len -= this_len;
- loff = 0;
- spd.nr_pages++;
- index++;
- }
-
- /*
- * Release any pages at the end, if we quit early. 'page_nr' is how far
- * we got, 'nr_pages' is how many pages are in the map.
- */
- while (page_nr < nr_pages)
- put_page(spd.pages[page_nr++]);
- in->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
-
- if (spd.nr_pages)
- error = splice_to_pipe(pipe, &spd);
-
- splice_shrink_spd(&spd);
- return error;
-}
-
/**
* generic_file_splice_read - splice data from file to a pipe
* @in: file to splice from
@@ -514,39 +290,53 @@ fill_it:
*
* Description:
* Will read pages from given file and fill them into a pipe. Can be
- * used as long as the address_space operations for the source implements
- * a readpage() hook.
+ * used as long as it has more or less sane ->read_iter().
*
*/
ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
- loff_t isize, left;
- int ret;
-
- if (IS_DAX(in->f_mapping->host))
- return default_file_splice_read(in, ppos, pipe, len, flags);
+ struct iov_iter to;
+ struct kiocb kiocb;
+ loff_t isize;
+ int idx, ret;
isize = i_size_read(in->f_mapping->host);
if (unlikely(*ppos >= isize))
return 0;
- left = isize - *ppos;
- if (unlikely(left < len))
- len = left;
-
- ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
+ iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len);
+ idx = to.idx;
+ init_sync_kiocb(&kiocb, in);
+ kiocb.ki_pos = *ppos;
+ ret = in->f_op->read_iter(&kiocb, &to);
if (ret > 0) {
- *ppos += ret;
+ *ppos = kiocb.ki_pos;
file_accessed(in);
+ } else if (ret < 0) {
+ if (WARN_ON(to.idx != idx || to.iov_offset)) {
+ /*
+ * a bogus ->read_iter() has copied something and still
+ * returned an error instead of a short read.
+ */
+ to.idx = idx;
+ to.iov_offset = 0;
+ iov_iter_advance(&to, 0); /* to free what was emitted */
+ }
+ /*
+ * callers of ->splice_read() expect -EAGAIN on
+ * "can't put anything in there", rather than -EFAULT.
+ */
+ if (ret == -EFAULT)
+ ret = -EAGAIN;
}
return ret;
}
EXPORT_SYMBOL(generic_file_splice_read);
-static const struct pipe_buf_operations default_pipe_buf_ops = {
+const struct pipe_buf_operations default_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = generic_pipe_buf_release,
@@ -570,7 +360,7 @@ const struct pipe_buf_operations nosteal_pipe_buf_ops = {
};
EXPORT_SYMBOL(nosteal_pipe_buf_ops);
-static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
+static ssize_t kernel_readv(struct file *file, const struct kvec *vec,
unsigned long vlen, loff_t offset)
{
mm_segment_t old_fs;
@@ -602,102 +392,70 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
}
EXPORT_SYMBOL(kernel_write);
-ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+static ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
+ struct kvec *vec, __vec[PIPE_DEF_BUFFERS];
+ struct iov_iter to;
+ struct page **pages;
unsigned int nr_pages;
- unsigned int nr_freed;
- size_t offset;
- struct page *pages[PIPE_DEF_BUFFERS];
- struct partial_page partial[PIPE_DEF_BUFFERS];
- struct iovec *vec, __vec[PIPE_DEF_BUFFERS];
+ size_t offset, dummy, copied = 0;
ssize_t res;
- size_t this_len;
- int error;
int i;
- struct splice_pipe_desc spd = {
- .pages = pages,
- .partial = partial,
- .nr_pages_max = PIPE_DEF_BUFFERS,
- .flags = flags,
- .ops = &default_pipe_buf_ops,
- .spd_release = spd_release_page,
- };
- if (splice_grow_spd(pipe, &spd))
+ if (pipe->nrbufs == pipe->buffers)
+ return -EAGAIN;
+
+ /*
+ * Try to keep page boundaries matching to source pagecache ones -
+ * it probably won't be much help, but...
+ */
+ offset = *ppos & ~PAGE_MASK;
+
+ iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len + offset);
+
+ res = iov_iter_get_pages_alloc(&to, &pages, len + offset, &dummy);
+ if (res <= 0)
return -ENOMEM;
- res = -ENOMEM;
+ nr_pages = res / PAGE_SIZE;
+
vec = __vec;
- if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
- vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
- if (!vec)
- goto shrink_ret;
+ if (nr_pages > PIPE_DEF_BUFFERS) {
+ vec = kmalloc(nr_pages * sizeof(struct kvec), GFP_KERNEL);
+ if (unlikely(!vec)) {
+ res = -ENOMEM;
+ goto out;
+ }
}
- offset = *ppos & ~PAGE_MASK;
- nr_pages = (len + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
- for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
- struct page *page;
+ pipe->bufs[to.idx].offset = offset;
+ pipe->bufs[to.idx].len -= offset;
- page = alloc_page(GFP_USER);
- error = -ENOMEM;
- if (!page)
- goto err;
-
- this_len = min_t(size_t, len, PAGE_SIZE - offset);
- vec[i].iov_base = (void __user *) page_address(page);
+ for (i = 0; i < nr_pages; i++) {
+ size_t this_len = min_t(size_t, len, PAGE_SIZE - offset);
+ vec[i].iov_base = page_address(pages[i]) + offset;
vec[i].iov_len = this_len;
- spd.pages[i] = page;
- spd.nr_pages++;
len -= this_len;
offset = 0;
}
- res = kernel_readv(in, vec, spd.nr_pages, *ppos);
- if (res < 0) {
- error = res;
- goto err;
- }
-
- error = 0;
- if (!res)
- goto err;
-
- nr_freed = 0;
- for (i = 0; i < spd.nr_pages; i++) {
- this_len = min_t(size_t, vec[i].iov_len, res);
- spd.partial[i].offset = 0;
- spd.partial[i].len = this_len;
- if (!this_len) {
- __free_page(spd.pages[i]);
- spd.pages[i] = NULL;
- nr_freed++;
- }
- res -= this_len;
- }
- spd.nr_pages -= nr_freed;
-
- res = splice_to_pipe(pipe, &spd);
- if (res > 0)
+ res = kernel_readv(in, vec, nr_pages, *ppos);
+ if (res > 0) {
+ copied = res;
*ppos += res;
+ }
-shrink_ret:
if (vec != __vec)
kfree(vec);
- splice_shrink_spd(&spd);
+out:
+ for (i = 0; i < nr_pages; i++)
+ put_page(pages[i]);
+ kvfree(pages);
+ iov_iter_advance(&to, copied); /* truncates and discards */
return res;
-
-err:
- for (i = 0; i < spd.nr_pages; i++)
- __free_page(spd.pages[i]);
-
- res = error;
- goto shrink_ret;
}
-EXPORT_SYMBOL(default_file_splice_read);
/*
* Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
@@ -757,13 +515,12 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
while (pipe->nrbufs) {
struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
- const struct pipe_buf_operations *ops = buf->ops;
sd->len = buf->len;
if (sd->len > sd->total_len)
sd->len = sd->total_len;
- ret = buf->ops->confirm(pipe, buf);
+ ret = pipe_buf_confirm(pipe, buf);
if (unlikely(ret)) {
if (ret == -ENODATA)
ret = 0;
@@ -783,8 +540,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
sd->total_len -= ret;
if (!buf->len) {
- buf->ops = NULL;
- ops->release(pipe, buf);
+ pipe_buf_release(pipe, buf);
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
if (pipe->files)
@@ -1003,7 +759,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
if (idx == pipe->buffers - 1)
idx = -1;
- ret = buf->ops->confirm(pipe, buf);
+ ret = pipe_buf_confirm(pipe, buf);
if (unlikely(ret)) {
if (ret == -ENODATA)
ret = 0;
@@ -1030,11 +786,9 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
while (ret) {
struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
if (ret >= buf->len) {
- const struct pipe_buf_operations *ops = buf->ops;
ret -= buf->len;
buf->len = 0;
- buf->ops = NULL;
- ops->release(pipe, buf);
+ pipe_buf_release(pipe, buf);
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
if (pipe->files)
@@ -1273,10 +1027,8 @@ out_release:
for (i = 0; i < pipe->buffers; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
- if (buf->ops) {
- buf->ops->release(pipe, buf);
- buf->ops = NULL;
- }
+ if (buf->ops)
+ pipe_buf_release(pipe, buf);
}
if (!bytes)
@@ -1342,6 +1094,20 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
}
EXPORT_SYMBOL(do_splice_direct);
+static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
+{
+ while (pipe->nrbufs == pipe->buffers) {
+ if (flags & SPLICE_F_NONBLOCK)
+ return -EAGAIN;
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ pipe->waiting_writers++;
+ pipe_wait(pipe);
+ pipe->waiting_writers--;
+ }
+ return 0;
+}
+
static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags);
@@ -1424,8 +1190,13 @@ static long do_splice(struct file *in, loff_t __user *off_in,
offset = in->f_pos;
}
- ret = do_splice_to(in, &offset, opipe, len, flags);
-
+ pipe_lock(opipe);
+ ret = wait_for_space(opipe, flags);
+ if (!ret)
+ ret = do_splice_to(in, &offset, opipe, len, flags);
+ pipe_unlock(opipe);
+ if (ret > 0)
+ wakeup_pipe_readers(opipe);
if (!off_in)
in->f_pos = offset;
else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
@@ -1437,106 +1208,50 @@ static long do_splice(struct file *in, loff_t __user *off_in,
return -EINVAL;
}
-/*
- * Map an iov into an array of pages and offset/length tupples. With the
- * partial_page structure, we can map several non-contiguous ranges into
- * our ones pages[] map instead of splitting that operation into pieces.
- * Could easily be exported as a generic helper for other users, in which
- * case one would probably want to add a 'max_nr_pages' parameter as well.
- */
-static int get_iovec_page_array(const struct iovec __user *iov,
- unsigned int nr_vecs, struct page **pages,
- struct partial_page *partial, bool aligned,
- unsigned int pipe_buffers)
+static int iter_to_pipe(struct iov_iter *from,
+ struct pipe_inode_info *pipe,
+ unsigned flags)
{
- int buffers = 0, error = 0;
-
- while (nr_vecs) {
- unsigned long off, npages;
- struct iovec entry;
- void __user *base;
- size_t len;
- int i;
-
- error = -EFAULT;
- if (copy_from_user(&entry, iov, sizeof(entry)))
- break;
-
- base = entry.iov_base;
- len = entry.iov_len;
-
- /*
- * Sanity check this iovec. 0 read succeeds.
- */
- error = 0;
- if (unlikely(!len))
- break;
- error = -EFAULT;
- if (!access_ok(VERIFY_READ, base, len))
- break;
-
- /*
- * Get this base offset and number of pages, then map
- * in the user pages.
- */
- off = (unsigned long) base & ~PAGE_MASK;
-
- /*
- * If asked for alignment, the offset must be zero and the
- * length a multiple of the PAGE_SIZE.
- */
- error = -EINVAL;
- if (aligned && (off || len & ~PAGE_MASK))
- break;
-
- npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (npages > pipe_buffers - buffers)
- npages = pipe_buffers - buffers;
-
- error = get_user_pages_fast((unsigned long)base, npages,
- 0, &pages[buffers]);
-
- if (unlikely(error <= 0))
+ struct pipe_buffer buf = {
+ .ops = &user_page_pipe_buf_ops,
+ .flags = flags
+ };
+ size_t total = 0;
+ int ret = 0;
+ bool failed = false;
+
+ while (iov_iter_count(from) && !failed) {
+ struct page *pages[16];
+ ssize_t copied;
+ size_t start;
+ int n;
+
+ copied = iov_iter_get_pages(from, pages, ~0UL, 16, &start);
+ if (copied <= 0) {
+ ret = copied;
break;
-
- /*
- * Fill this contiguous range into the partial page map.
- */
- for (i = 0; i < error; i++) {
- const int plen = min_t(size_t, len, PAGE_SIZE - off);
-
- partial[buffers].offset = off;
- partial[buffers].len = plen;
-
- off = 0;
- len -= plen;
- buffers++;
}
- /*
- * We didn't complete this iov, stop here since it probably
- * means we have to move some of this into a pipe to
- * be able to continue.
- */
- if (len)
- break;
-
- /*
- * Don't continue if we mapped fewer pages than we asked for,
- * or if we mapped the max number of pages that we have
- * room for.
- */
- if (error < npages || buffers == pipe_buffers)
- break;
-
- nr_vecs--;
- iov++;
+ for (n = 0; copied; n++, start = 0) {
+ int size = min_t(int, copied, PAGE_SIZE - start);
+ if (!failed) {
+ buf.page = pages[n];
+ buf.offset = start;
+ buf.len = size;
+ ret = add_to_pipe(pipe, &buf);
+ if (unlikely(ret < 0)) {
+ failed = true;
+ } else {
+ iov_iter_advance(from, ret);
+ total += ret;
+ }
+ } else {
+ put_page(pages[n]);
+ }
+ copied -= size;
+ }
}
-
- if (buffers)
- return buffers;
-
- return error;
+ return total ? total : ret;
}
static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
@@ -1590,38 +1305,36 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
* as splice-from-memory, where the regular splice is splice-from-file (or
* to file). In both cases the output is a pipe, naturally.
*/
-static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
+static long vmsplice_to_pipe(struct file *file, const struct iovec __user *uiov,
unsigned long nr_segs, unsigned int flags)
{
struct pipe_inode_info *pipe;
- struct page *pages[PIPE_DEF_BUFFERS];
- struct partial_page partial[PIPE_DEF_BUFFERS];
- struct splice_pipe_desc spd = {
- .pages = pages,
- .partial = partial,
- .nr_pages_max = PIPE_DEF_BUFFERS,
- .flags = flags,
- .ops = &user_page_pipe_buf_ops,
- .spd_release = spd_release_page,
- };
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov = iovstack;
+ struct iov_iter from;
long ret;
+ unsigned buf_flag = 0;
+
+ if (flags & SPLICE_F_GIFT)
+ buf_flag = PIPE_BUF_FLAG_GIFT;
pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
- if (splice_grow_spd(pipe, &spd))
- return -ENOMEM;
-
- spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
- spd.partial, false,
- spd.nr_pages_max);
- if (spd.nr_pages <= 0)
- ret = spd.nr_pages;
- else
- ret = splice_to_pipe(pipe, &spd);
+ ret = import_iovec(WRITE, uiov, nr_segs,
+ ARRAY_SIZE(iovstack), &iov, &from);
+ if (ret < 0)
+ return ret;
- splice_shrink_spd(&spd);
+ pipe_lock(pipe);
+ ret = wait_for_space(pipe, flags);
+ if (!ret)
+ ret = iter_to_pipe(&from, pipe, buf_flag);
+ pipe_unlock(pipe);
+ if (ret > 0)
+ wakeup_pipe_readers(pipe);
+ kfree(iov);
return ret;
}
@@ -1876,7 +1589,7 @@ retry:
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
- ibuf->ops->get(ipipe, ibuf);
+ pipe_buf_get(ipipe, ibuf);
*obuf = *ibuf;
/*
@@ -1948,7 +1661,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
- ibuf->ops->get(ipipe, ibuf);
+ pipe_buf_get(ipipe, ibuf);
obuf = opipe->bufs + nbuf;
*obuf = *ibuf;
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index dc1358b5ec95..ac2de0ed69ad 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -233,8 +233,8 @@ void sysfs_remove_group(struct kobject *kobj,
kn = kernfs_find_and_get(parent, grp->name);
if (!kn) {
WARN(!kn, KERN_WARNING
- "sysfs group %p not found for kobject '%s'\n",
- grp, kobject_name(kobj));
+ "sysfs group '%s' not found for kobject '%s'\n",
+ grp->name, kobject_name(kobj));
return;
}
} else {
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 632570617327..e855bf8d74b4 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -94,7 +94,7 @@ static int udf_adinicb_write_begin(struct file *file,
return -ENOMEM;
*pagep = page;
- if (!PageUptodate(page) && len != PAGE_SIZE)
+ if (!PageUptodate(page))
__udf_adinicb_readpage(page);
return 0;
}
@@ -105,11 +105,25 @@ static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
return 0;
}
+static int udf_adinicb_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{
+ struct inode *inode = page->mapping->host;
+ loff_t last_pos = pos + copied;
+ if (last_pos > inode->i_size)
+ i_size_write(inode, last_pos);
+ set_page_dirty(page);
+ unlock_page(page);
+ put_page(page);
+ return copied;
+}
+
const struct address_space_operations udf_adinicb_aops = {
.readpage = udf_adinicb_readpage,
.writepage = udf_adinicb_writepage,
.write_begin = udf_adinicb_write_begin,
- .write_end = simple_write_end,
+ .write_end = udf_adinicb_write_end,
.direct_IO = udf_adinicb_direct_IO,
};
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index fc593c869493..584e87e11cb6 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -52,6 +52,7 @@ xfs-y += $(addprefix libxfs/, \
xfs_inode_fork.o \
xfs_inode_buf.o \
xfs_log_rlimit.o \
+ xfs_ag_resv.o \
xfs_rmap.o \
xfs_rmap_btree.o \
xfs_sb.o \
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
new file mode 100644
index 000000000000..e3ae0f2b4294
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_trans.h"
+#include "xfs_bit.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ag_resv.h"
+#include "xfs_trans_space.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_btree.h"
+
+/*
+ * Per-AG Block Reservations
+ *
+ * For some kinds of allocation group metadata structures, it is advantageous
+ * to reserve a small number of blocks in each AG so that future expansions of
+ * that data structure do not encounter ENOSPC because errors during a btree
+ * split cause the filesystem to go offline.
+ *
+ * Prior to the introduction of reflink, this wasn't an issue because the free
+ * space btrees maintain a reserve of space (the AGFL) to handle any expansion
+ * that may be necessary; and allocations of other metadata (inodes, BMBT,
+ * dir/attr) aren't restricted to a single AG. However, with reflink it is
+ * possible to allocate all the space in an AG, have subsequent reflink/CoW
+ * activity expand the refcount btree, and discover that there's no space left
+ * to handle that expansion. Since we can calculate the maximum size of the
+ * refcount btree, we can reserve space for it and avoid ENOSPC.
+ *
+ * Handling per-AG reservations consists of three changes to the allocator's
+ * behavior: First, because these reservations are always needed, we decrease
+ * the ag_max_usable counter to reflect the size of the AG after the reserved
+ * blocks are taken. Second, the reservations must be reflected in the
+ * fdblocks count to maintain proper accounting. Third, each AG must maintain
+ * its own reserved block counter so that we can calculate the amount of space
+ * that must remain free to maintain the reservations. Fourth, the "remaining
+ * reserved blocks" count must be used when calculating the length of the
+ * longest free extent in an AG and to clamp maxlen in the per-AG allocation
+ * functions. In other words, we maintain a virtual allocation via in-core
+ * accounting tricks so that we don't have to clean up after a crash. :)
+ *
+ * Reserved blocks can be managed by passing one of the enum xfs_ag_resv_type
+ * values via struct xfs_alloc_arg or directly to the xfs_free_extent
+ * function. It might seem a little funny to maintain a reservoir of blocks
+ * to feed another reservoir, but the AGFL only holds enough blocks to get
+ * through the next transaction. The per-AG reservation is to ensure (we
+ * hope) that each AG never runs out of blocks. Each data structure wanting
+ * to use the reservation system should update ask/used in xfs_ag_resv_init.
+ */
+
+/*
+ * Are we critically low on blocks? For now we'll define that as the number
+ * of blocks we can get our hands on being less than 10% of what we reserved
+ * or less than some arbitrary number (maximum btree height).
+ */
+bool
+xfs_ag_resv_critical(
+ struct xfs_perag *pag,
+ enum xfs_ag_resv_type type)
+{
+ xfs_extlen_t avail;
+ xfs_extlen_t orig;
+
+ switch (type) {
+ case XFS_AG_RESV_METADATA:
+ avail = pag->pagf_freeblks - pag->pag_agfl_resv.ar_reserved;
+ orig = pag->pag_meta_resv.ar_asked;
+ break;
+ case XFS_AG_RESV_AGFL:
+ avail = pag->pagf_freeblks + pag->pagf_flcount -
+ pag->pag_meta_resv.ar_reserved;
+ orig = pag->pag_agfl_resv.ar_asked;
+ break;
+ default:
+ ASSERT(0);
+ return false;
+ }
+
+ trace_xfs_ag_resv_critical(pag, type, avail);
+
+ /* Critically low if less than 10% or max btree height remains. */
+ return avail < orig / 10 || avail < XFS_BTREE_MAXLEVELS;
+}
+
+/*
+ * How many blocks are reserved but not used, and therefore must not be
+ * allocated away?
+ */
+xfs_extlen_t
+xfs_ag_resv_needed(
+ struct xfs_perag *pag,
+ enum xfs_ag_resv_type type)
+{
+ xfs_extlen_t len;
+
+ len = pag->pag_meta_resv.ar_reserved + pag->pag_agfl_resv.ar_reserved;
+ switch (type) {
+ case XFS_AG_RESV_METADATA:
+ case XFS_AG_RESV_AGFL:
+ len -= xfs_perag_resv(pag, type)->ar_reserved;
+ break;
+ case XFS_AG_RESV_NONE:
+ /* empty */
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ trace_xfs_ag_resv_needed(pag, type, len);
+
+ return len;
+}
+
+/* Clean out a reservation */
+static int
+__xfs_ag_resv_free(
+ struct xfs_perag *pag,
+ enum xfs_ag_resv_type type)
+{
+ struct xfs_ag_resv *resv;
+ xfs_extlen_t oldresv;
+ int error;
+
+ trace_xfs_ag_resv_free(pag, type, 0);
+
+ resv = xfs_perag_resv(pag, type);
+ pag->pag_mount->m_ag_max_usable += resv->ar_asked;
+ /*
+ * AGFL blocks are always considered "free", so whatever
+ * was reserved at mount time must be given back at umount.
+ */
+ if (type == XFS_AG_RESV_AGFL)
+ oldresv = resv->ar_orig_reserved;
+ else
+ oldresv = resv->ar_reserved;
+ error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true);
+ resv->ar_reserved = 0;
+ resv->ar_asked = 0;
+
+ if (error)
+ trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno,
+ error, _RET_IP_);
+ return error;
+}
+
+/* Free a per-AG reservation. */
+int
+xfs_ag_resv_free(
+ struct xfs_perag *pag)
+{
+ int error;
+ int err2;
+
+ error = __xfs_ag_resv_free(pag, XFS_AG_RESV_AGFL);
+ err2 = __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA);
+ if (err2 && !error)
+ error = err2;
+ return error;
+}
+
+static int
+__xfs_ag_resv_init(
+ struct xfs_perag *pag,
+ enum xfs_ag_resv_type type,
+ xfs_extlen_t ask,
+ xfs_extlen_t used)
+{
+ struct xfs_mount *mp = pag->pag_mount;
+ struct xfs_ag_resv *resv;
+ int error;
+
+ resv = xfs_perag_resv(pag, type);
+ if (used > ask)
+ ask = used;
+ resv->ar_asked = ask;
+ resv->ar_reserved = resv->ar_orig_reserved = ask - used;
+ mp->m_ag_max_usable -= ask;
+
+ trace_xfs_ag_resv_init(pag, type, ask);
+
+ error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true);
+ if (error)
+ trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
+ error, _RET_IP_);
+
+ return error;
+}
+
+/* Create a per-AG block reservation. */
+int
+xfs_ag_resv_init(
+ struct xfs_perag *pag)
+{
+ xfs_extlen_t ask;
+ xfs_extlen_t used;
+ int error = 0;
+
+ /* Create the metadata reservation. */
+ if (pag->pag_meta_resv.ar_asked == 0) {
+ ask = used = 0;
+
+ error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+ ask, used);
+ if (error)
+ goto out;
+ }
+
+ /* Create the AGFL metadata reservation */
+ if (pag->pag_agfl_resv.ar_asked == 0) {
+ ask = used = 0;
+
+ error = __xfs_ag_resv_init(pag, XFS_AG_RESV_AGFL, ask, used);
+ if (error)
+ goto out;
+ }
+
+out:
+ return error;
+}
+
+/* Allocate a block from the reservation. */
+void
+xfs_ag_resv_alloc_extent(
+ struct xfs_perag *pag,
+ enum xfs_ag_resv_type type,
+ struct xfs_alloc_arg *args)
+{
+ struct xfs_ag_resv *resv;
+ xfs_extlen_t len;
+ uint field;
+
+ trace_xfs_ag_resv_alloc_extent(pag, type, args->len);
+
+ switch (type) {
+ case XFS_AG_RESV_METADATA:
+ case XFS_AG_RESV_AGFL:
+ resv = xfs_perag_resv(pag, type);
+ break;
+ default:
+ ASSERT(0);
+ /* fall through */
+ case XFS_AG_RESV_NONE:
+ field = args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :
+ XFS_TRANS_SB_FDBLOCKS;
+ xfs_trans_mod_sb(args->tp, field, -(int64_t)args->len);
+ return;
+ }
+
+ len = min_t(xfs_extlen_t, args->len, resv->ar_reserved);
+ resv->ar_reserved -= len;
+ if (type == XFS_AG_RESV_AGFL)
+ return;
+ /* Allocations of reserved blocks only need on-disk sb updates... */
+ xfs_trans_mod_sb(args->tp, XFS_TRANS_SB_RES_FDBLOCKS, -(int64_t)len);
+ /* ...but non-reserved blocks need in-core and on-disk updates. */
+ if (args->len > len)
+ xfs_trans_mod_sb(args->tp, XFS_TRANS_SB_FDBLOCKS,
+ -((int64_t)args->len - len));
+}
+
+/* Free a block to the reservation. */
+void
+xfs_ag_resv_free_extent(
+ struct xfs_perag *pag,
+ enum xfs_ag_resv_type type,
+ struct xfs_trans *tp,
+ xfs_extlen_t len)
+{
+ xfs_extlen_t leftover;
+ struct xfs_ag_resv *resv;
+
+ trace_xfs_ag_resv_free_extent(pag, type, len);
+
+ switch (type) {
+ case XFS_AG_RESV_METADATA:
+ case XFS_AG_RESV_AGFL:
+ resv = xfs_perag_resv(pag, type);
+ break;
+ default:
+ ASSERT(0);
+ /* fall through */
+ case XFS_AG_RESV_NONE:
+ xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (int64_t)len);
+ return;
+ }
+
+ leftover = min_t(xfs_extlen_t, len, resv->ar_asked - resv->ar_reserved);
+ resv->ar_reserved += leftover;
+ if (type == XFS_AG_RESV_AGFL)
+ return;
+ /* Freeing into the reserved pool only requires on-disk update... */
+ xfs_trans_mod_sb(tp, XFS_TRANS_SB_RES_FDBLOCKS, len);
+ /* ...but freeing beyond that requires in-core and on-disk update. */
+ if (len > leftover)
+ xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, len - leftover);
+}
diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h
new file mode 100644
index 000000000000..8d6c687deef3
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_ag_resv.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_AG_RESV_H__
+#define __XFS_AG_RESV_H__
+
+int xfs_ag_resv_free(struct xfs_perag *pag);
+int xfs_ag_resv_init(struct xfs_perag *pag);
+
+bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type);
+xfs_extlen_t xfs_ag_resv_needed(struct xfs_perag *pag,
+ enum xfs_ag_resv_type type);
+
+void xfs_ag_resv_alloc_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type,
+ struct xfs_alloc_arg *args);
+void xfs_ag_resv_free_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type,
+ struct xfs_trans *tp, xfs_extlen_t len);
+
+#endif /* __XFS_AG_RESV_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 05b5243d89f6..ca75dc90ebe0 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -37,6 +37,7 @@
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_log.h"
+#include "xfs_ag_resv.h"
struct workqueue_struct *xfs_alloc_wq;
@@ -74,14 +75,8 @@ xfs_prealloc_blocks(
* extents need to be actually allocated. To get around this, we explicitly set
* aside a few blocks which will not be reserved in delayed allocation.
*
- * When rmap is disabled, we need to reserve 4 fsbs _per AG_ for the freelist
- * and 4 more to handle a potential split of the file's bmap btree.
- *
- * When rmap is enabled, we must also be able to handle two rmap btree inserts
- * to record both the file data extent and a new bmbt block. The bmbt block
- * might not be in the same AG as the file data extent. In the worst case
- * the bmap btree splits multiple levels and all the new blocks come from
- * different AGs, so set aside enough to handle rmap btree splits in all AGs.
+ * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
+ * potential split of the file's bmap btree.
*/
unsigned int
xfs_alloc_set_aside(
@@ -90,8 +85,6 @@ xfs_alloc_set_aside(
unsigned int blocks;
blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
- if (xfs_sb_version_hasrmapbt(&mp->m_sb))
- blocks += mp->m_sb.sb_agcount * mp->m_rmap_maxlevels;
return blocks;
}
@@ -265,7 +258,7 @@ xfs_alloc_compute_diff(
xfs_agblock_t wantbno, /* target starting block */
xfs_extlen_t wantlen, /* target length */
xfs_extlen_t alignment, /* target alignment */
- char userdata, /* are we allocating data? */
+ int datatype, /* are we allocating data? */
xfs_agblock_t freebno, /* freespace's starting block */
xfs_extlen_t freelen, /* freespace's length */
xfs_agblock_t *newbnop) /* result: best start block from free */
@@ -276,6 +269,7 @@ xfs_alloc_compute_diff(
xfs_extlen_t newlen1=0; /* length with newbno1 */
xfs_extlen_t newlen2=0; /* length with newbno2 */
xfs_agblock_t wantend; /* end of target extent */
+ bool userdata = xfs_alloc_is_userdata(datatype);
ASSERT(freelen >= wantlen);
freeend = freebno + freelen;
@@ -680,12 +674,29 @@ xfs_alloc_ag_vextent(
xfs_alloc_arg_t *args) /* argument structure for allocation */
{
int error=0;
+ xfs_extlen_t reservation;
+ xfs_extlen_t oldmax;
ASSERT(args->minlen > 0);
ASSERT(args->maxlen > 0);
ASSERT(args->minlen <= args->maxlen);
ASSERT(args->mod < args->prod);
ASSERT(args->alignment > 0);
+
+ /*
+ * Clamp maxlen to the amount of free space minus any reservations
+ * that have been made.
+ */
+ oldmax = args->maxlen;
+ reservation = xfs_ag_resv_needed(args->pag, args->resv);
+ if (args->maxlen > args->pag->pagf_freeblks - reservation)
+ args->maxlen = args->pag->pagf_freeblks - reservation;
+ if (args->maxlen == 0) {
+ args->agbno = NULLAGBLOCK;
+ args->maxlen = oldmax;
+ return 0;
+ }
+
/*
* Branch to correct routine based on the type.
*/
@@ -705,12 +716,14 @@ xfs_alloc_ag_vextent(
/* NOTREACHED */
}
+ args->maxlen = oldmax;
+
if (error || args->agbno == NULLAGBLOCK)
return error;
ASSERT(args->len >= args->minlen);
ASSERT(args->len <= args->maxlen);
- ASSERT(!args->wasfromfl || !args->isfl);
+ ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
ASSERT(args->agbno % args->alignment == 0);
/* if not file data, insert new block into the reverse map btree */
@@ -732,12 +745,7 @@ xfs_alloc_ag_vextent(
args->agbno, args->len));
}
- if (!args->isfl) {
- xfs_trans_mod_sb(args->tp, args->wasdel ?
- XFS_TRANS_SB_RES_FDBLOCKS :
- XFS_TRANS_SB_FDBLOCKS,
- -((long)(args->len)));
- }
+ xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
XFS_STATS_INC(args->mp, xs_allocx);
XFS_STATS_ADD(args->mp, xs_allocb, args->len);
@@ -917,7 +925,7 @@ xfs_alloc_find_best_extent(
sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
args->alignment,
- args->userdata, *sbnoa,
+ args->datatype, *sbnoa,
*slena, &new);
/*
@@ -1101,7 +1109,7 @@ restart:
if (args->len < blen)
continue;
ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment, args->userdata, ltbnoa,
+ args->alignment, args->datatype, ltbnoa,
ltlena, &ltnew);
if (ltnew != NULLAGBLOCK &&
(args->len > blen || ltdiff < bdiff)) {
@@ -1254,7 +1262,7 @@ restart:
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment, args->userdata, ltbnoa,
+ args->alignment, args->datatype, ltbnoa,
ltlena, &ltnew);
error = xfs_alloc_find_best_extent(args,
@@ -1271,7 +1279,7 @@ restart:
args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
xfs_alloc_fix_len(args);
gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment, args->userdata, gtbnoa,
+ args->alignment, args->datatype, gtbnoa,
gtlena, &gtnew);
error = xfs_alloc_find_best_extent(args,
@@ -1331,7 +1339,7 @@ restart:
}
rlen = args->len;
(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
- args->userdata, ltbnoa, ltlena, &ltnew);
+ args->datatype, ltbnoa, ltlena, &ltnew);
ASSERT(ltnew >= ltbno);
ASSERT(ltnew + rlen <= ltbnoa + ltlena);
ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
@@ -1583,6 +1591,7 @@ xfs_alloc_ag_vextent_small(
int *stat) /* status: 0-freelist, 1-normal/none */
{
struct xfs_owner_info oinfo;
+ struct xfs_perag *pag;
int error;
xfs_agblock_t fbno;
xfs_extlen_t flen;
@@ -1600,7 +1609,8 @@ xfs_alloc_ag_vextent_small(
* to respect minleft even when pulling from the
* freelist.
*/
- else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
+ else if (args->minlen == 1 && args->alignment == 1 &&
+ args->resv != XFS_AG_RESV_AGFL &&
(be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
> args->minleft)) {
error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
@@ -1608,9 +1618,9 @@ xfs_alloc_ag_vextent_small(
goto error0;
if (fbno != NULLAGBLOCK) {
xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
- args->userdata);
+ xfs_alloc_allow_busy_reuse(args->datatype));
- if (args->userdata) {
+ if (xfs_alloc_is_userdata(args->datatype)) {
xfs_buf_t *bp;
bp = xfs_btree_get_bufs(args->mp, args->tp,
@@ -1629,13 +1639,18 @@ xfs_alloc_ag_vextent_small(
/*
* If we're feeding an AGFL block to something that
* doesn't live in the free space, we need to clear
- * out the OWN_AG rmap.
+ * out the OWN_AG rmap and add the block back to
+ * the AGFL per-AG reservation.
*/
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
error = xfs_rmap_free(args->tp, args->agbp, args->agno,
fbno, 1, &oinfo);
if (error)
goto error0;
+ pag = xfs_perag_get(args->mp, args->agno);
+ xfs_ag_resv_free_extent(pag, XFS_AG_RESV_AGFL,
+ args->tp, 1);
+ xfs_perag_put(pag);
*stat = 0;
return 0;
@@ -1683,7 +1698,7 @@ xfs_free_ag_extent(
xfs_agblock_t bno,
xfs_extlen_t len,
struct xfs_owner_info *oinfo,
- int isfl)
+ enum xfs_ag_resv_type type)
{
xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
@@ -1911,21 +1926,22 @@ xfs_free_ag_extent(
*/
pag = xfs_perag_get(mp, agno);
error = xfs_alloc_update_counters(tp, pag, agbp, len);
+ xfs_ag_resv_free_extent(pag, type, tp, len);
xfs_perag_put(pag);
if (error)
goto error0;
- if (!isfl)
- xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
XFS_STATS_INC(mp, xs_freex);
XFS_STATS_ADD(mp, xs_freeb, len);
- trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
+ trace_xfs_free_extent(mp, agno, bno, len, type == XFS_AG_RESV_AGFL,
+ haveleft, haveright);
return 0;
error0:
- trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
+ trace_xfs_free_extent(mp, agno, bno, len, type == XFS_AG_RESV_AGFL,
+ -1, -1);
if (bno_cur)
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
if (cnt_cur)
@@ -1950,21 +1966,43 @@ xfs_alloc_compute_maxlevels(
}
/*
- * Find the length of the longest extent in an AG.
+ * Find the length of the longest extent in an AG. The 'need' parameter
+ * specifies how much space we're going to need for the AGFL and the
+ * 'reserved' parameter tells us how many blocks in this AG are reserved for
+ * other callers.
*/
xfs_extlen_t
xfs_alloc_longest_free_extent(
struct xfs_mount *mp,
struct xfs_perag *pag,
- xfs_extlen_t need)
+ xfs_extlen_t need,
+ xfs_extlen_t reserved)
{
xfs_extlen_t delta = 0;
+ /*
+ * If the AGFL needs a recharge, we'll have to subtract that from the
+ * longest extent.
+ */
if (need > pag->pagf_flcount)
delta = need - pag->pagf_flcount;
+ /*
+ * If we cannot maintain others' reservations with space from the
+ * not-longest freesp extents, we'll have to subtract /that/ from
+ * the longest extent too.
+ */
+ if (pag->pagf_freeblks - pag->pagf_longest < reserved)
+ delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
+
+ /*
+ * If the longest extent is long enough to satisfy all the
+ * reservations and AGFL rules in place, we can return this extent.
+ */
if (pag->pagf_longest > delta)
return pag->pagf_longest - delta;
+
+ /* Otherwise, let the caller try for 1 block if there's space. */
return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
}
@@ -2004,20 +2042,24 @@ xfs_alloc_space_available(
{
struct xfs_perag *pag = args->pag;
xfs_extlen_t longest;
+ xfs_extlen_t reservation; /* blocks that are still reserved */
int available;
if (flags & XFS_ALLOC_FLAG_FREEING)
return true;
+ reservation = xfs_ag_resv_needed(pag, args->resv);
+
/* do we have enough contiguous free space for the allocation? */
- longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free);
+ longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
+ reservation);
if ((args->minlen + args->alignment + args->minalignslop - 1) > longest)
return false;
- /* do have enough free space remaining for the allocation? */
+ /* do we have enough free space remaining for the allocation? */
available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
- min_free - args->total);
- if (available < (int)args->minleft)
+ reservation - min_free - args->total);
+ if (available < (int)args->minleft || available <= 0)
return false;
return true;
@@ -2058,7 +2100,7 @@ xfs_alloc_fix_freelist(
* somewhere else if we are not being asked to try harder at this
* point
*/
- if (pag->pagf_metadata && args->userdata &&
+ if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
(flags & XFS_ALLOC_FLAG_TRYLOCK)) {
ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
goto out_agbp_relse;
@@ -2124,7 +2166,7 @@ xfs_alloc_fix_freelist(
if (error)
goto out_agbp_relse;
error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1,
- &targs.oinfo, 1);
+ &targs.oinfo, XFS_AG_RESV_AGFL);
if (error)
goto out_agbp_relse;
bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
@@ -2135,7 +2177,7 @@ xfs_alloc_fix_freelist(
targs.mp = mp;
targs.agbp = agbp;
targs.agno = args->agno;
- targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
+ targs.alignment = targs.minlen = targs.prod = 1;
targs.type = XFS_ALLOCTYPE_THIS_AG;
targs.pag = pag;
error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
@@ -2146,6 +2188,7 @@ xfs_alloc_fix_freelist(
while (pag->pagf_flcount < need) {
targs.agbno = 0;
targs.maxlen = need - pag->pagf_flcount;
+ targs.resv = XFS_AG_RESV_AGFL;
/* Allocate as many blocks as possible at once. */
error = xfs_alloc_ag_vextent(&targs);
@@ -2633,7 +2676,7 @@ xfs_alloc_vextent(
* Try near allocation first, then anywhere-in-ag after
* the first a.g. fails.
*/
- if ((args->userdata & XFS_ALLOC_INITIAL_USER_DATA) &&
+ if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
(mp->m_flags & XFS_MOUNT_32BITINODES)) {
args->fsbno = XFS_AGB_TO_FSB(mp,
((mp->m_agfrotor / rotorstep) %
@@ -2766,7 +2809,7 @@ xfs_alloc_vextent(
#endif
/* Zero the extent if we were asked to do so */
- if (args->userdata & XFS_ALLOC_USERDATA_ZERO) {
+ if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
error = xfs_zero_extent(args->ip, args->fsbno, args->len);
if (error)
goto error0;
@@ -2825,7 +2868,8 @@ xfs_free_extent(
struct xfs_trans *tp, /* transaction pointer */
xfs_fsblock_t bno, /* starting block number of extent */
xfs_extlen_t len, /* length of extent */
- struct xfs_owner_info *oinfo) /* extent owner */
+ struct xfs_owner_info *oinfo, /* extent owner */
+ enum xfs_ag_resv_type type) /* block reservation type */
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_buf *agbp;
@@ -2834,6 +2878,7 @@ xfs_free_extent(
int error;
ASSERT(len != 0);
+ ASSERT(type != XFS_AG_RESV_AGFL);
if (XFS_TEST_ERROR(false, mp,
XFS_ERRTAG_FREE_EXTENT,
@@ -2851,7 +2896,7 @@ xfs_free_extent(
agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
err);
- error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, 0);
+ error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
if (error)
goto err;
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 6fe2d6b7cfe9..7c404a6b0ae3 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -85,20 +85,33 @@ typedef struct xfs_alloc_arg {
xfs_extlen_t len; /* output: actual size of extent */
xfs_alloctype_t type; /* allocation type XFS_ALLOCTYPE_... */
xfs_alloctype_t otype; /* original allocation type */
+ int datatype; /* mask defining data type treatment */
char wasdel; /* set if allocation was prev delayed */
char wasfromfl; /* set if allocation is from freelist */
- char isfl; /* set if is freelist blocks - !acctg */
- char userdata; /* mask defining userdata treatment */
xfs_fsblock_t firstblock; /* io first block allocated */
struct xfs_owner_info oinfo; /* owner of blocks being allocated */
+ enum xfs_ag_resv_type resv; /* block reservation to use */
} xfs_alloc_arg_t;
/*
- * Defines for userdata
+ * Defines for datatype
*/
#define XFS_ALLOC_USERDATA (1 << 0)/* allocation is for user data*/
#define XFS_ALLOC_INITIAL_USER_DATA (1 << 1)/* special case start of file */
#define XFS_ALLOC_USERDATA_ZERO (1 << 2)/* zero extent on allocation */
+#define XFS_ALLOC_NOBUSY (1 << 3)/* Busy extents not allowed */
+
+static inline bool
+xfs_alloc_is_userdata(int datatype)
+{
+ return (datatype & ~XFS_ALLOC_NOBUSY) != 0;
+}
+
+static inline bool
+xfs_alloc_allow_busy_reuse(int datatype)
+{
+ return (datatype & XFS_ALLOC_NOBUSY) == 0;
+}
/* freespace limit calculations */
#define XFS_ALLOC_AGFL_RESERVE 4
@@ -106,7 +119,8 @@ unsigned int xfs_alloc_set_aside(struct xfs_mount *mp);
unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp);
xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_mount *mp,
- struct xfs_perag *pag, xfs_extlen_t need);
+ struct xfs_perag *pag, xfs_extlen_t need,
+ xfs_extlen_t reserved);
unsigned int xfs_alloc_min_freelist(struct xfs_mount *mp,
struct xfs_perag *pag);
@@ -184,7 +198,8 @@ xfs_free_extent(
struct xfs_trans *tp, /* transaction pointer */
xfs_fsblock_t bno, /* starting block number of extent */
xfs_extlen_t len, /* length of extent */
- struct xfs_owner_info *oinfo);/* extent owner */
+ struct xfs_owner_info *oinfo, /* extent owner */
+ enum xfs_ag_resv_type type); /* block reservation type */
int /* error */
xfs_alloc_lookup_ge(
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index b060bca93402..9d7f61d36645 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -47,6 +47,7 @@
#include "xfs_attr_leaf.h"
#include "xfs_filestream.h"
#include "xfs_rmap.h"
+#include "xfs_ag_resv.h"
kmem_zone_t *xfs_bmap_free_item_zone;
@@ -1388,7 +1389,7 @@ xfs_bmap_search_multi_extents(
* Else, *lastxp will be set to the index of the found
* entry; *gotp will contain the entry.
*/
-STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
+xfs_bmbt_rec_host_t * /* pointer to found extent entry */
xfs_bmap_search_extents(
xfs_inode_t *ip, /* incore inode pointer */
xfs_fileoff_t bno, /* block number searched for */
@@ -3347,7 +3348,8 @@ xfs_bmap_adjacent(
mp = ap->ip->i_mount;
nullfb = *ap->firstblock == NULLFSBLOCK;
- rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
+ rt = XFS_IS_REALTIME_INODE(ap->ip) &&
+ xfs_alloc_is_userdata(ap->datatype);
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
/*
* If allocating at eof, and there's a previous real block,
@@ -3501,7 +3503,8 @@ xfs_bmap_longest_free_extent(
}
longest = xfs_alloc_longest_free_extent(mp, pag,
- xfs_alloc_min_freelist(mp, pag));
+ xfs_alloc_min_freelist(mp, pag),
+ xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
if (*blen < longest)
*blen = longest;
@@ -3622,7 +3625,7 @@ xfs_bmap_btalloc(
{
xfs_mount_t *mp; /* mount point structure */
xfs_alloctype_t atype = 0; /* type for allocation routines */
- xfs_extlen_t align; /* minimum allocation alignment */
+ xfs_extlen_t align = 0; /* minimum allocation alignment */
xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
xfs_agnumber_t ag;
xfs_alloc_arg_t args;
@@ -3645,7 +3648,8 @@ xfs_bmap_btalloc(
else if (mp->m_dalign)
stripe_align = mp->m_dalign;
- align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
+ if (xfs_alloc_is_userdata(ap->datatype))
+ align = xfs_get_extsz_hint(ap->ip);
if (unlikely(align)) {
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
align, 0, ap->eof, 0, ap->conv,
@@ -3658,7 +3662,8 @@ xfs_bmap_btalloc(
nullfb = *ap->firstblock == NULLFSBLOCK;
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
if (nullfb) {
- if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
+ if (xfs_alloc_is_userdata(ap->datatype) &&
+ xfs_inode_is_filestream(ap->ip)) {
ag = xfs_filestream_lookup_ag(ap->ip);
ag = (ag != NULLAGNUMBER) ? ag : 0;
ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
@@ -3698,7 +3703,8 @@ xfs_bmap_btalloc(
* enough for the request. If one isn't found, then adjust
* the minimum allocation size to the largest space found.
*/
- if (ap->userdata && xfs_inode_is_filestream(ap->ip))
+ if (xfs_alloc_is_userdata(ap->datatype) &&
+ xfs_inode_is_filestream(ap->ip))
error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
else
error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
@@ -3781,9 +3787,9 @@ xfs_bmap_btalloc(
}
args.minleft = ap->minleft;
args.wasdel = ap->wasdel;
- args.isfl = 0;
- args.userdata = ap->userdata;
- if (ap->userdata & XFS_ALLOC_USERDATA_ZERO)
+ args.resv = XFS_AG_RESV_NONE;
+ args.datatype = ap->datatype;
+ if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
args.ip = ap->ip;
error = xfs_alloc_vextent(&args);
@@ -3877,7 +3883,8 @@ STATIC int
xfs_bmap_alloc(
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
{
- if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
+ if (XFS_IS_REALTIME_INODE(ap->ip) &&
+ xfs_alloc_is_userdata(ap->datatype))
return xfs_bmap_rtalloc(ap);
return xfs_bmap_btalloc(ap);
}
@@ -4074,7 +4081,7 @@ xfs_bmapi_read(
return 0;
}
-STATIC int
+int
xfs_bmapi_reserve_delalloc(
struct xfs_inode *ip,
xfs_fileoff_t aoff,
@@ -4170,91 +4177,6 @@ out_unreserve_quota:
return error;
}
-/*
- * Map file blocks to filesystem blocks, adding delayed allocations as needed.
- */
-int
-xfs_bmapi_delay(
- struct xfs_inode *ip, /* incore inode */
- xfs_fileoff_t bno, /* starting file offs. mapped */
- xfs_filblks_t len, /* length to map in file */
- struct xfs_bmbt_irec *mval, /* output: map values */
- int *nmap, /* i/o: mval size/count */
- int flags) /* XFS_BMAPI_... */
-{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
- struct xfs_bmbt_irec got; /* current file extent record */
- struct xfs_bmbt_irec prev; /* previous file extent record */
- xfs_fileoff_t obno; /* old block number (offset) */
- xfs_fileoff_t end; /* end of mapped file region */
- xfs_extnum_t lastx; /* last useful extent number */
- int eof; /* we've hit the end of extents */
- int n = 0; /* current extent index */
- int error = 0;
-
- ASSERT(*nmap >= 1);
- ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
- ASSERT(!(flags & ~XFS_BMAPI_ENTIRE));
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
- XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
- }
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
- XFS_STATS_INC(mp, xs_blk_mapw);
-
- if (!(ifp->if_flags & XFS_IFEXTENTS)) {
- error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
- if (error)
- return error;
- }
-
- xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev);
- end = bno + len;
- obno = bno;
-
- while (bno < end && n < *nmap) {
- if (eof || got.br_startoff > bno) {
- error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got,
- &prev, &lastx, eof);
- if (error) {
- if (n == 0) {
- *nmap = 0;
- return error;
- }
- break;
- }
- }
-
- /* set up the extent map to return. */
- xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
- xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
-
- /* If we're done, stop now. */
- if (bno >= end || n >= *nmap)
- break;
-
- /* Else go on to the next record. */
- prev = got;
- if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
- else
- eof = 1;
- }
-
- *nmap = n;
- return 0;
-}
-
-
static int
xfs_bmapi_allocate(
struct xfs_bmalloca *bma)
@@ -4287,15 +4209,21 @@ xfs_bmapi_allocate(
}
/*
- * Indicate if this is the first user data in the file, or just any
- * user data. And if it is userdata, indicate whether it needs to
- * be initialised to zero during allocation.
+ * Set the data type being allocated. For the data fork, the first data
+ * in the file is treated differently to all other allocations. For the
+ * attribute fork, we only need to ensure the allocated range is not on
+ * the busy list.
*/
if (!(bma->flags & XFS_BMAPI_METADATA)) {
- bma->userdata = (bma->offset == 0) ?
- XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA;
+ bma->datatype = XFS_ALLOC_NOBUSY;
+ if (whichfork == XFS_DATA_FORK) {
+ if (bma->offset == 0)
+ bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
+ else
+ bma->datatype |= XFS_ALLOC_USERDATA;
+ }
if (bma->flags & XFS_BMAPI_ZERO)
- bma->userdata |= XFS_ALLOC_USERDATA_ZERO;
+ bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
}
bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
@@ -4565,7 +4493,7 @@ xfs_bmapi_write(
bma.tp = tp;
bma.ip = ip;
bma.total = total;
- bma.userdata = 0;
+ bma.datatype = 0;
bma.dfops = dfops;
bma.firstblock = firstblock;
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 254034f96941..8395f6e8cf7d 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -54,7 +54,7 @@ struct xfs_bmalloca {
bool wasdel; /* replacing a delayed allocation */
bool aeof; /* allocated space at eof */
bool conv; /* overwriting unwritten extents */
- char userdata;/* userdata mask */
+ int datatype;/* data type being allocated */
int flags;
};
@@ -181,9 +181,6 @@ int xfs_bmap_read_extents(struct xfs_trans *tp, struct xfs_inode *ip,
int xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno,
xfs_filblks_t len, struct xfs_bmbt_irec *mval,
int *nmap, int flags);
-int xfs_bmapi_delay(struct xfs_inode *ip, xfs_fileoff_t bno,
- xfs_filblks_t len, struct xfs_bmbt_irec *mval,
- int *nmap, int flags);
int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
xfs_fsblock_t *firstblock, xfs_extlen_t total,
@@ -202,5 +199,12 @@ int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
struct xfs_defer_ops *dfops, enum shift_direction direction,
int num_exts);
int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
+struct xfs_bmbt_rec_host *
+ xfs_bmap_search_extents(struct xfs_inode *ip, xfs_fileoff_t bno,
+ int fork, int *eofp, xfs_extnum_t *lastxp,
+ struct xfs_bmbt_irec *gotp, struct xfs_bmbt_irec *prevp);
+int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, xfs_fileoff_t aoff,
+ xfs_filblks_t len, struct xfs_bmbt_irec *got,
+ struct xfs_bmbt_irec *prev, xfs_extnum_t *lastx, int eof);
#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 08569792fe20..aa1752f918b8 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -2070,7 +2070,7 @@ __xfs_btree_updkeys(
struct xfs_buf *bp0,
bool force_all)
{
- union xfs_btree_bigkey key; /* keys from current level */
+ union xfs_btree_key key; /* keys from current level */
union xfs_btree_key *lkey; /* keys from the next level up */
union xfs_btree_key *hkey;
union xfs_btree_key *nlkey; /* keys from the next level up */
@@ -2086,7 +2086,7 @@ __xfs_btree_updkeys(
trace_xfs_btree_updkeys(cur, level, bp0);
- lkey = (union xfs_btree_key *)&key;
+ lkey = &key;
hkey = xfs_btree_high_key_from_key(cur, lkey);
xfs_btree_get_keys(cur, block, lkey);
for (level++; level < cur->bc_nlevels; level++) {
@@ -3226,7 +3226,7 @@ xfs_btree_insrec(
struct xfs_buf *bp; /* buffer for block */
union xfs_btree_ptr nptr; /* new block ptr */
struct xfs_btree_cur *ncur; /* new btree cursor */
- union xfs_btree_bigkey nkey; /* new block key */
+ union xfs_btree_key nkey; /* new block key */
union xfs_btree_key *lkey;
int optr; /* old key/record index */
int ptr; /* key/record index */
@@ -3241,7 +3241,7 @@ xfs_btree_insrec(
XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, &rec);
ncur = NULL;
- lkey = (union xfs_btree_key *)&nkey;
+ lkey = &nkey;
/*
* If we have an external root pointer, and we've made it to the
@@ -3444,14 +3444,14 @@ xfs_btree_insert(
union xfs_btree_ptr nptr; /* new block number (split result) */
struct xfs_btree_cur *ncur; /* new cursor (split result) */
struct xfs_btree_cur *pcur; /* previous level's cursor */
- union xfs_btree_bigkey bkey; /* key of block to insert */
+ union xfs_btree_key bkey; /* key of block to insert */
union xfs_btree_key *key;
union xfs_btree_rec rec; /* record to insert */
level = 0;
ncur = NULL;
pcur = cur;
- key = (union xfs_btree_key *)&bkey;
+ key = &bkey;
xfs_btree_set_ptr_null(cur, &nptr);
@@ -4797,3 +4797,50 @@ xfs_btree_query_range(
return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
fn, priv);
}
+
+/*
+ * Calculate the number of blocks needed to store a given number of records
+ * in a short-format (per-AG metadata) btree.
+ */
+xfs_extlen_t
+xfs_btree_calc_size(
+ struct xfs_mount *mp,
+ uint *limits,
+ unsigned long long len)
+{
+ int level;
+ int maxrecs;
+ xfs_extlen_t rval;
+
+ maxrecs = limits[0];
+ for (level = 0, rval = 0; len > 1; level++) {
+ len += maxrecs - 1;
+ do_div(len, maxrecs);
+ maxrecs = limits[1];
+ rval += len;
+ }
+ return rval;
+}
+
+int
+xfs_btree_count_blocks_helper(
+ struct xfs_btree_cur *cur,
+ int level,
+ void *data)
+{
+ xfs_extlen_t *blocks = data;
+ (*blocks)++;
+
+ return 0;
+}
+
+/* Count the blocks in a btree and return the result in *blocks. */
+int
+xfs_btree_count_blocks(
+ struct xfs_btree_cur *cur,
+ xfs_extlen_t *blocks)
+{
+ *blocks = 0;
+ return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
+ blocks);
+}
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 04d0865e5e6d..3f8556a5c2ad 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -37,30 +37,18 @@ union xfs_btree_ptr {
__be64 l; /* long form ptr */
};
-union xfs_btree_key {
- struct xfs_bmbt_key bmbt;
- xfs_bmdr_key_t bmbr; /* bmbt root block */
- xfs_alloc_key_t alloc;
- struct xfs_inobt_key inobt;
- struct xfs_rmap_key rmap;
-};
-
/*
- * In-core key that holds both low and high keys for overlapped btrees.
- * The two keys are packed next to each other on disk, so do the same
- * in memory. Preserve the existing xfs_btree_key as a single key to
- * avoid the mental model breakage that would happen if we passed a
- * bigkey into a function that operates on a single key.
+ * The in-core btree key. Overlapping btrees actually store two keys
+ * per pointer, so we reserve enough memory to hold both. The __*bigkey
+ * items should never be accessed directly.
*/
-union xfs_btree_bigkey {
+union xfs_btree_key {
struct xfs_bmbt_key bmbt;
xfs_bmdr_key_t bmbr; /* bmbt root block */
xfs_alloc_key_t alloc;
struct xfs_inobt_key inobt;
- struct {
- struct xfs_rmap_key rmap;
- struct xfs_rmap_key rmap_hi;
- };
+ struct xfs_rmap_key rmap;
+ struct xfs_rmap_key __rmap_bigkey[2];
};
union xfs_btree_rec {
@@ -513,6 +501,8 @@ bool xfs_btree_sblock_v5hdr_verify(struct xfs_buf *bp);
bool xfs_btree_sblock_verify(struct xfs_buf *bp, unsigned int max_recs);
uint xfs_btree_compute_maxlevels(struct xfs_mount *mp, uint *limits,
unsigned long len);
+xfs_extlen_t xfs_btree_calc_size(struct xfs_mount *mp, uint *limits,
+ unsigned long long len);
/* return codes */
#define XFS_BTREE_QUERY_RANGE_CONTINUE 0 /* keep iterating */
@@ -529,4 +519,6 @@ typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
xfs_btree_visit_blocks_fn fn, void *data);
+int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_extlen_t *blocks);
+
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index c221d0ecd52e..613c5cf19436 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -81,6 +81,10 @@
* - For each work item attached to the log intent item,
* * Perform the described action.
* * Attach the work item to the log done item.
+ * * If the result of doing the work was -EAGAIN, ->finish work
+ * wants a new transaction. See the "Requesting a Fresh
+ * Transaction while Finishing Deferred Work" section below for
+ * details.
*
* The key here is that we must log an intent item for all pending
* work items every time we roll the transaction, and that we must log
@@ -88,6 +92,34 @@
* we can perform complex remapping operations, chaining intent items
* as needed.
*
+ * Requesting a Fresh Transaction while Finishing Deferred Work
+ *
+ * If ->finish_item decides that it needs a fresh transaction to
+ * finish the work, it must ask its caller (xfs_defer_finish) for a
+ * continuation. The most likely cause of this circumstance are the
+ * refcount adjust functions deciding that they've logged enough items
+ * to be at risk of exceeding the transaction reservation.
+ *
+ * To get a fresh transaction, we want to log the existing log done
+ * item to prevent the log intent item from replaying, immediately log
+ * a new log intent item with the unfinished work items, roll the
+ * transaction, and re-call ->finish_item wherever it left off. The
+ * log done item and the new log intent item must be in the same
+ * transaction or atomicity cannot be guaranteed; defer_finish ensures
+ * that this happens.
+ *
+ * This requires some coordination between ->finish_item and
+ * defer_finish. Upon deciding to request a new transaction,
+ * ->finish_item should update the current work item to reflect the
+ * unfinished work. Next, it should reset the log done item's list
+ * count to the number of items finished, and return -EAGAIN.
+ * defer_finish sees the -EAGAIN, logs the new log intent item
+ * with the remaining work items, and leaves the xfs_defer_pending
+ * item at the head of the dop_work queue. Then it rolls the
+ * transaction and picks up processing where it left off. It is
+ * required that ->finish_item must be careful to leave enough
+ * transaction reservation to fit the new log intent item.
+ *
* This is an example of remapping the extent (E, E+B) into file X at
* offset A and dealing with the extent (C, C+B) already being mapped
* there:
@@ -104,21 +136,26 @@
* | Intent to add rmap (X, E, A, B) |
* +-------------------------------------------------+
* | Reduce refcount for extent (C, B) | t2
- * | Done reducing refcount for extent (C, B) |
+ * | Done reducing refcount for extent (C, 9) |
+ * | Intent to reduce refcount for extent (C+9, B-9) |
+ * | (ran out of space after 9 refcount updates) |
+ * +-------------------------------------------------+
+ * | Reduce refcount for extent (C+9, B+9) | t3
+ * | Done reducing refcount for extent (C+9, B-9) |
* | Increase refcount for extent (E, B) |
* | Done increasing refcount for extent (E, B) |
* | Intent to free extent (C, B) |
* | Intent to free extent (F, 1) (refcountbt block) |
* | Intent to remove rmap (F, 1, REFC) |
* +-------------------------------------------------+
- * | Remove rmap (X, C, A, B) | t3
+ * | Remove rmap (X, C, A, B) | t4
* | Done removing rmap (X, C, A, B) |
* | Add rmap (X, E, A, B) |
* | Done adding rmap (X, E, A, B) |
* | Remove rmap (F, 1, REFC) |
* | Done removing rmap (F, 1, REFC) |
* +-------------------------------------------------+
- * | Free extent (C, B) | t4
+ * | Free extent (C, B) | t5
* | Done freeing extent (C, B) |
* | Free extent (D, 1) |
* | Done freeing extent (D, 1) |
@@ -141,6 +178,9 @@
* - Intent to free extent (C, B)
* - Intent to free extent (F, 1) (refcountbt block)
* - Intent to remove rmap (F, 1, REFC)
+ *
+ * Note that the continuation requested between t2 and t3 is likely to
+ * reoccur.
*/
static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX];
@@ -323,7 +363,16 @@ xfs_defer_finish(
dfp->dfp_count--;
error = dfp->dfp_type->finish_item(*tp, dop, li,
dfp->dfp_done, &state);
- if (error) {
+ if (error == -EAGAIN) {
+ /*
+ * Caller wants a fresh transaction;
+ * put the work item back on the list
+ * and jump out.
+ */
+ list_add(li, &dfp->dfp_work);
+ dfp->dfp_count++;
+ break;
+ } else if (error) {
/*
* Clean up after ourselves and jump out.
* xfs_defer_cancel will take care of freeing
@@ -335,9 +384,25 @@ xfs_defer_finish(
goto out;
}
}
- /* Done with the dfp, free it. */
- list_del(&dfp->dfp_list);
- kmem_free(dfp);
+ if (error == -EAGAIN) {
+ /*
+ * Caller wants a fresh transaction, so log a
+ * new log intent item to replace the old one
+ * and roll the transaction. See "Requesting
+ * a Fresh Transaction while Finishing
+ * Deferred Work" above.
+ */
+ dfp->dfp_intent = dfp->dfp_type->create_intent(*tp,
+ dfp->dfp_count);
+ dfp->dfp_done = NULL;
+ list_for_each(li, &dfp->dfp_work)
+ dfp->dfp_type->log_item(*tp, dfp->dfp_intent,
+ li);
+ } else {
+ /* Done with the dfp, free it. */
+ list_del(&dfp->dfp_list);
+ kmem_free(dfp);
+ }
if (cleanup_fn)
cleanup_fn(*tp, state, error);
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 31ca2208c03d..eab68ae2e011 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -132,7 +132,7 @@ xfs_inobt_free_block(
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
return xfs_free_extent(cur->bc_tp,
XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
- &oinfo);
+ &oinfo, XFS_AG_RESV_NONE);
}
STATIC int
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index a6eed43fa7cd..fc5eef85d61e 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -647,9 +647,17 @@ struct xfs_rui_log_format {
__uint16_t rui_size; /* size of this item */
__uint32_t rui_nextents; /* # extents to free */
__uint64_t rui_id; /* rui identifier */
- struct xfs_map_extent rui_extents[1]; /* array of extents to rmap */
+ struct xfs_map_extent rui_extents[]; /* array of extents to rmap */
};
+static inline size_t
+xfs_rui_log_format_sizeof(
+ unsigned int nr)
+{
+ return sizeof(struct xfs_rui_log_format) +
+ nr * sizeof(struct xfs_map_extent);
+}
+
/*
* This is the structure used to lay out an rud log item in the
* log. The rud_extents array is a variable size array whose
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 7575cfc3ad15..4a28fa91e3b1 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -200,7 +200,7 @@ xfs_setfilesize_trans_alloc(
* Update on-disk file size now that data has been written to disk.
*/
STATIC int
-xfs_setfilesize(
+__xfs_setfilesize(
struct xfs_inode *ip,
struct xfs_trans *tp,
xfs_off_t offset,
@@ -225,6 +225,23 @@ xfs_setfilesize(
return xfs_trans_commit(tp);
}
+int
+xfs_setfilesize(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ size_t size)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
+ if (error)
+ return error;
+
+ return __xfs_setfilesize(ip, tp, offset, size);
+}
+
STATIC int
xfs_setfilesize_ioend(
struct xfs_ioend *ioend,
@@ -247,7 +264,7 @@ xfs_setfilesize_ioend(
return error;
}
- return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
+ return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
}
/*
@@ -1336,13 +1353,12 @@ xfs_end_io_direct_write(
{
struct inode *inode = file_inode(iocb->ki_filp);
struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
uintptr_t flags = (uintptr_t)private;
int error = 0;
trace_xfs_end_io_direct_write(ip, offset, size);
- if (XFS_FORCED_SHUTDOWN(mp))
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
if (size <= 0)
@@ -1380,14 +1396,9 @@ xfs_end_io_direct_write(
error = xfs_iomap_write_unwritten(ip, offset, size);
} else if (flags & XFS_DIO_FLAG_APPEND) {
- struct xfs_trans *tp;
-
trace_xfs_end_io_direct_write_append(ip, offset, size);
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0,
- &tp);
- if (!error)
- error = xfs_setfilesize(ip, tp, offset, size);
+ error = xfs_setfilesize(ip, offset, size);
}
return error;
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index bf2d9a141a73..1950e3bca2ac 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -62,6 +62,7 @@ int xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
int xfs_end_io_direct_write(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private);
+int xfs_setfilesize(struct xfs_inode *ip, xfs_off_t offset, size_t size);
extern void xfs_count_page_state(struct page *, int *, int *);
extern struct block_device *xfs_find_bdev_for_inode(struct inode *);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 4ece4f2ffc72..e827d657c314 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -182,7 +182,7 @@ xfs_bmap_rtalloc(
XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
/* Zero the extent if we were asked to do so */
- if (ap->userdata & XFS_ALLOC_USERDATA_ZERO) {
+ if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
if (error)
return error;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index e455f9098d49..2975cb2319f4 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -865,7 +865,7 @@ xfs_buf_item_log_segment(
*/
if (bit) {
end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
- mask = ((1 << (end_bit - bit)) - 1) << bit;
+ mask = ((1U << (end_bit - bit)) - 1) << bit;
*wordp |= mask;
wordp++;
bits_set = end_bit - bit;
@@ -888,7 +888,7 @@ xfs_buf_item_log_segment(
*/
end_bit = bits_to_set - bits_set;
if (end_bit) {
- mask = (1 << end_bit) - 1;
+ mask = (1U << end_bit) - 1;
*wordp |= mask;
}
}
@@ -1095,7 +1095,8 @@ xfs_buf_iodone_callback_error(
bp->b_last_error != bp->b_error) {
bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
bp->b_last_error = bp->b_error;
- if (cfg->retry_timeout && !bp->b_first_retry_time)
+ if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
+ !bp->b_first_retry_time)
bp->b_first_retry_time = jiffies;
xfs_buf_ioerror(bp, 0);
@@ -1111,7 +1112,7 @@ xfs_buf_iodone_callback_error(
if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
++bp->b_retries > cfg->max_retries)
goto permanent_error;
- if (cfg->retry_timeout &&
+ if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
goto permanent_error;
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index c263e079273e..162dc186cf04 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -384,7 +384,7 @@ restart:
* If this is a metadata allocation, try to reuse the busy
* extent instead of trimming the allocation.
*/
- if (!args->userdata &&
+ if (!xfs_alloc_is_userdata(args->datatype) &&
!(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
if (!xfs_extent_busy_update_extent(args->mp, args->pag,
busyp, fbno, flen,
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index e612a0233710..26acfbb331ae 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -269,6 +269,8 @@ xfs_file_dio_aio_read(
return -EINVAL;
}
+ file_accessed(iocb->ki_filp);
+
/*
* Locking is a bit tricky here. If we take an exclusive lock for direct
* IO, we effectively serialise all new concurrent read IO to this file
@@ -323,7 +325,6 @@ xfs_file_dio_aio_read(
}
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
- file_accessed(iocb->ki_filp);
return ret;
}
@@ -332,10 +333,7 @@ xfs_file_dax_read(
struct kiocb *iocb,
struct iov_iter *to)
{
- struct address_space *mapping = iocb->ki_filp->f_mapping;
- struct inode *inode = mapping->host;
- struct xfs_inode *ip = XFS_I(inode);
- struct iov_iter data = *to;
+ struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
size_t count = iov_iter_count(to);
ssize_t ret = 0;
@@ -345,11 +343,7 @@ xfs_file_dax_read(
return 0; /* skip atime */
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
- ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct, NULL, 0);
- if (ret > 0) {
- iocb->ki_pos += ret;
- iov_iter_advance(to, ret);
- }
+ ret = iomap_dax_rw(iocb, to, &xfs_iomap_ops);
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
file_accessed(iocb->ki_filp);
@@ -399,45 +393,6 @@ xfs_file_read_iter(
return ret;
}
-STATIC ssize_t
-xfs_file_splice_read(
- struct file *infilp,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t count,
- unsigned int flags)
-{
- struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
- ssize_t ret;
-
- XFS_STATS_INC(ip->i_mount, xs_read_calls);
-
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return -EIO;
-
- trace_xfs_file_splice_read(ip, count, *ppos);
-
- /*
- * DAX inodes cannot ues the page cache for splice, so we have to push
- * them through the VFS IO path. This means it goes through
- * ->read_iter, which for us takes the XFS_IOLOCK_SHARED. Hence we
- * cannot lock the splice operation at this level for DAX inodes.
- */
- if (IS_DAX(VFS_I(ip))) {
- ret = default_file_splice_read(infilp, ppos, pipe, count,
- flags);
- goto out;
- }
-
- xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
- ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
- xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
-out:
- if (ret > 0)
- XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret);
- return ret;
-}
-
/*
* Zero any on disk space between the current EOF and the new, larger EOF.
*
@@ -711,70 +666,32 @@ xfs_file_dax_write(
struct kiocb *iocb,
struct iov_iter *from)
{
- struct address_space *mapping = iocb->ki_filp->f_mapping;
- struct inode *inode = mapping->host;
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- ssize_t ret = 0;
- int unaligned_io = 0;
- int iolock;
- struct iov_iter data;
+ int iolock = XFS_IOLOCK_EXCL;
+ ssize_t ret, error = 0;
+ size_t count;
+ loff_t pos;
- /* "unaligned" here means not aligned to a filesystem block */
- if ((iocb->ki_pos & mp->m_blockmask) ||
- ((iocb->ki_pos + iov_iter_count(from)) & mp->m_blockmask)) {
- unaligned_io = 1;
- iolock = XFS_IOLOCK_EXCL;
- } else if (mapping->nrpages) {
- iolock = XFS_IOLOCK_EXCL;
- } else {
- iolock = XFS_IOLOCK_SHARED;
- }
xfs_rw_ilock(ip, iolock);
-
ret = xfs_file_aio_write_checks(iocb, from, &iolock);
if (ret)
goto out;
- /*
- * Yes, even DAX files can have page cache attached to them: A zeroed
- * page is inserted into the pagecache when we have to serve a write
- * fault on a hole. It should never be dirtied and can simply be
- * dropped from the pagecache once we get real data for the page.
- *
- * XXX: This is racy against mmap, and there's nothing we can do about
- * it. dax_do_io() should really do this invalidation internally as
- * it will know if we've allocated over a holei for this specific IO and
- * if so it needs to update the mapping tree and invalidate existing
- * PTEs over the newly allocated range. Remove this invalidation when
- * dax_do_io() is fixed up.
- */
- if (mapping->nrpages) {
- loff_t end = iocb->ki_pos + iov_iter_count(from) - 1;
+ pos = iocb->ki_pos;
+ count = iov_iter_count(from);
- ret = invalidate_inode_pages2_range(mapping,
- iocb->ki_pos >> PAGE_SHIFT,
- end >> PAGE_SHIFT);
- WARN_ON_ONCE(ret);
- }
+ trace_xfs_file_dax_write(ip, count, pos);
- if (iolock == XFS_IOLOCK_EXCL && !unaligned_io) {
- xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
- iolock = XFS_IOLOCK_SHARED;
+ ret = iomap_dax_rw(iocb, from, &xfs_iomap_ops);
+ if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
+ i_size_write(inode, iocb->ki_pos);
+ error = xfs_setfilesize(ip, pos, ret);
}
- trace_xfs_file_dax_write(ip, iov_iter_count(from), iocb->ki_pos);
-
- data = *from;
- ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct,
- xfs_end_io_direct_write, 0);
- if (ret > 0) {
- iocb->ki_pos += ret;
- iov_iter_advance(from, ret);
- }
out:
xfs_rw_iunlock(ip, iolock);
- return ret;
+ return error ? error : ret;
}
STATIC ssize_t
@@ -1513,7 +1430,7 @@ xfs_filemap_page_mkwrite(
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) {
- ret = dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault);
+ ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
} else {
ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
ret = block_page_mkwrite_return(ret);
@@ -1547,7 +1464,7 @@ xfs_filemap_fault(
* changes to xfs_get_blocks_direct() to map unwritten extent
* ioend for conversion on read-only mappings.
*/
- ret = dax_fault(vma, vmf, xfs_get_blocks_dax_fault);
+ ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
} else
ret = filemap_fault(vma, vmf);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
@@ -1652,7 +1569,7 @@ const struct file_operations xfs_file_operations = {
.llseek = xfs_file_llseek,
.read_iter = xfs_file_read_iter,
.write_iter = xfs_file_write_iter,
- .splice_read = xfs_file_splice_read,
+ .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = xfs_file_ioctl,
#ifdef CONFIG_COMPAT
@@ -1662,6 +1579,7 @@ const struct file_operations xfs_file_operations = {
.open = xfs_file_open,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
+ .get_unmapped_area = thp_get_unmapped_area,
.fallocate = xfs_file_fallocate,
};
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 4a33a3304369..043ca3808ea2 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -30,6 +30,7 @@
#include "xfs_mru_cache.h"
#include "xfs_filestream.h"
#include "xfs_trace.h"
+#include "xfs_ag_resv.h"
struct xfs_fstrm_item {
struct xfs_mru_cache_elem mru;
@@ -198,7 +199,8 @@ xfs_filestream_pick_ag(
}
longest = xfs_alloc_longest_free_extent(mp, pag,
- xfs_alloc_min_freelist(mp, pag));
+ xfs_alloc_min_freelist(mp, pag),
+ xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
if (((minlen && longest >= minlen) ||
(!minlen && pag->pagf_freeblks >= minfree)) &&
(!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) ||
@@ -369,7 +371,8 @@ xfs_filestream_new_ag(
struct xfs_mount *mp = ip->i_mount;
xfs_extlen_t minlen = ap->length;
xfs_agnumber_t startag = 0;
- int flags, err = 0;
+ int flags = 0;
+ int err = 0;
struct xfs_mru_cache_elem *mru;
*agp = NULLAGNUMBER;
@@ -385,8 +388,10 @@ xfs_filestream_new_ag(
startag = (item->ag + 1) % mp->m_sb.sb_agcount;
}
- flags = (ap->userdata ? XFS_PICK_USERDATA : 0) |
- (ap->dfops->dop_low ? XFS_PICK_LOWSPACE : 0);
+ if (xfs_alloc_is_userdata(ap->datatype))
+ flags |= XFS_PICK_USERDATA;
+ if (ap->dfops->dop_low)
+ flags |= XFS_PICK_LOWSPACE;
err = xfs_filestream_pick_ag(pip, startag, agp, flags, minlen);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 0b7f986745c1..94ac06f3d908 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -553,7 +553,7 @@ xfs_growfs_data_private(
error = xfs_free_extent(tp,
XFS_AGB_TO_FSB(mp, agno,
be32_to_cpu(agf->agf_length) - new),
- new, &oinfo);
+ new, &oinfo, XFS_AG_RESV_NONE);
if (error)
goto error0;
}
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index fb39a66914dd..65b2e3f85f52 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1414,6 +1414,16 @@ xfs_inode_set_eofblocks_tag(
struct xfs_perag *pag;
int tagged;
+ /*
+ * Don't bother locking the AG and looking up in the radix trees
+ * if we already know that we have the tag set.
+ */
+ if (ip->i_flags & XFS_IEOFBLOCKS)
+ return;
+ spin_lock(&ip->i_flags_lock);
+ ip->i_flags |= XFS_IEOFBLOCKS;
+ spin_unlock(&ip->i_flags_lock);
+
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
trace_xfs_inode_set_eofblocks_tag(ip);
@@ -1449,6 +1459,10 @@ xfs_inode_clear_eofblocks_tag(
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
+ spin_lock(&ip->i_flags_lock);
+ ip->i_flags &= ~XFS_IEOFBLOCKS;
+ spin_unlock(&ip->i_flags_lock);
+
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
trace_xfs_inode_clear_eofblocks_tag(ip);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index e1a411e08f00..8f30d2533b48 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -216,6 +216,7 @@ xfs_get_initial_prid(struct xfs_inode *dp)
#define __XFS_IPINNED_BIT 8 /* wakeup key for zero pin count */
#define XFS_IPINNED (1 << __XFS_IPINNED_BIT)
#define XFS_IDONTCACHE (1 << 9) /* don't cache the inode long term */
+#define XFS_IEOFBLOCKS (1 << 10)/* has the preallocblocks tag set */
/*
* Per-lifetime flags need to be reset when re-using a reclaimable inode during
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 2af0dda1c978..c08253e11545 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * Copyright (c) 2016 Christoph Hellwig.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -42,17 +43,40 @@
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
<< mp->m_writeio_log)
-#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
-STATIC int
-xfs_iomap_eof_align_last_fsb(
- xfs_mount_t *mp,
- xfs_inode_t *ip,
- xfs_extlen_t extsize,
- xfs_fileoff_t *last_fsb)
+void
+xfs_bmbt_to_iomap(
+ struct xfs_inode *ip,
+ struct iomap *iomap,
+ struct xfs_bmbt_irec *imap)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ if (imap->br_startblock == HOLESTARTBLOCK) {
+ iomap->blkno = IOMAP_NULL_BLOCK;
+ iomap->type = IOMAP_HOLE;
+ } else if (imap->br_startblock == DELAYSTARTBLOCK) {
+ iomap->blkno = IOMAP_NULL_BLOCK;
+ iomap->type = IOMAP_DELALLOC;
+ } else {
+ iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock);
+ if (imap->br_state == XFS_EXT_UNWRITTEN)
+ iomap->type = IOMAP_UNWRITTEN;
+ else
+ iomap->type = IOMAP_MAPPED;
+ }
+ iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
+ iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
+ iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
+}
+
+static xfs_extlen_t
+xfs_eof_alignment(
+ struct xfs_inode *ip,
+ xfs_extlen_t extsize)
{
- xfs_extlen_t align = 0;
- int eof, error;
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_extlen_t align = 0;
if (!XFS_IS_REALTIME_INODE(ip)) {
/*
@@ -83,8 +107,21 @@ xfs_iomap_eof_align_last_fsb(
align = extsize;
}
+ return align;
+}
+
+STATIC int
+xfs_iomap_eof_align_last_fsb(
+ struct xfs_inode *ip,
+ xfs_extlen_t extsize,
+ xfs_fileoff_t *last_fsb)
+{
+ xfs_extlen_t align = xfs_eof_alignment(ip, extsize);
+
if (align) {
xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
+ int eof, error;
+
error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
if (error)
return error;
@@ -154,7 +191,7 @@ xfs_iomap_write_direct(
*/
ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
XFS_IFEXTENTS);
- error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
+ error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
if (error)
goto out_unlock;
} else {
@@ -274,130 +311,6 @@ out_trans_cancel:
goto out_unlock;
}
-/*
- * If the caller is doing a write at the end of the file, then extend the
- * allocation out to the file system's write iosize. We clean up any extra
- * space left over when the file is closed in xfs_inactive().
- *
- * If we find we already have delalloc preallocation beyond EOF, don't do more
- * preallocation as it it not needed.
- */
-STATIC int
-xfs_iomap_eof_want_preallocate(
- xfs_mount_t *mp,
- xfs_inode_t *ip,
- xfs_off_t offset,
- size_t count,
- xfs_bmbt_irec_t *imap,
- int nimaps,
- int *prealloc)
-{
- xfs_fileoff_t start_fsb;
- xfs_filblks_t count_fsb;
- int n, error, imaps;
- int found_delalloc = 0;
-
- *prealloc = 0;
- if (offset + count <= XFS_ISIZE(ip))
- return 0;
-
- /*
- * If the file is smaller than the minimum prealloc and we are using
- * dynamic preallocation, don't do any preallocation at all as it is
- * likely this is the only write to the file that is going to be done.
- */
- if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
- XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))
- return 0;
-
- /*
- * If there are any real blocks past eof, then don't
- * do any speculative allocation.
- */
- start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
- count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
- while (count_fsb > 0) {
- imaps = nimaps;
- error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps,
- 0);
- if (error)
- return error;
- for (n = 0; n < imaps; n++) {
- if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
- (imap[n].br_startblock != DELAYSTARTBLOCK))
- return 0;
- start_fsb += imap[n].br_blockcount;
- count_fsb -= imap[n].br_blockcount;
-
- if (imap[n].br_startblock == DELAYSTARTBLOCK)
- found_delalloc = 1;
- }
- }
- if (!found_delalloc)
- *prealloc = 1;
- return 0;
-}
-
-/*
- * Determine the initial size of the preallocation. We are beyond the current
- * EOF here, but we need to take into account whether this is a sparse write or
- * an extending write when determining the preallocation size. Hence we need to
- * look up the extent that ends at the current write offset and use the result
- * to determine the preallocation size.
- *
- * If the extent is a hole, then preallocation is essentially disabled.
- * Otherwise we take the size of the preceeding data extent as the basis for the
- * preallocation size. If the size of the extent is greater than half the
- * maximum extent length, then use the current offset as the basis. This ensures
- * that for large files the preallocation size always extends to MAXEXTLEN
- * rather than falling short due to things like stripe unit/width alignment of
- * real extents.
- */
-STATIC xfs_fsblock_t
-xfs_iomap_eof_prealloc_initial_size(
- struct xfs_mount *mp,
- struct xfs_inode *ip,
- xfs_off_t offset,
- xfs_bmbt_irec_t *imap,
- int nimaps)
-{
- xfs_fileoff_t start_fsb;
- int imaps = 1;
- int error;
-
- ASSERT(nimaps >= imaps);
-
- /* if we are using a specific prealloc size, return now */
- if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
- return 0;
-
- /* If the file is small, then use the minimum prealloc */
- if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign))
- return 0;
-
- /*
- * As we write multiple pages, the offset will always align to the
- * start of a page and hence point to a hole at EOF. i.e. if the size is
- * 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096)
- * will return FSB 1. Hence if there are blocks in the file, we want to
- * point to the block prior to the EOF block and not the hole that maps
- * directly at @offset.
- */
- start_fsb = XFS_B_TO_FSB(mp, offset);
- if (start_fsb)
- start_fsb--;
- error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE);
- if (error)
- return 0;
-
- ASSERT(imaps == 1);
- if (imap[0].br_startblock == HOLESTARTBLOCK)
- return 0;
- if (imap[0].br_blockcount <= (MAXEXTLEN >> 1))
- return imap[0].br_blockcount << 1;
- return XFS_B_TO_FSB(mp, offset);
-}
-
STATIC bool
xfs_quota_need_throttle(
struct xfs_inode *ip,
@@ -459,27 +372,76 @@ xfs_quota_calc_throttle(
}
/*
+ * If we are doing a write at the end of the file and there are no allocations
+ * past this one, then extend the allocation out to the file system's write
+ * iosize.
+ *
* If we don't have a user specified preallocation size, dynamically increase
- * the preallocation size as the size of the file grows. Cap the maximum size
+ * the preallocation size as the size of the file grows. Cap the maximum size
* at a single extent or less if the filesystem is near full. The closer the
* filesystem is to full, the smaller the maximum prealocation.
+ *
+ * As an exception we don't do any preallocation at all if the file is smaller
+ * than the minimum preallocation and we are using the default dynamic
+ * preallocation scheme, as it is likely this is the only write to the file that
+ * is going to be done.
+ *
+ * We clean up any extra space left over when the file is closed in
+ * xfs_inactive().
*/
STATIC xfs_fsblock_t
xfs_iomap_prealloc_size(
- struct xfs_mount *mp,
struct xfs_inode *ip,
- xfs_off_t offset,
- struct xfs_bmbt_irec *imap,
- int nimaps)
+ loff_t offset,
+ loff_t count,
+ xfs_extnum_t idx,
+ struct xfs_bmbt_irec *prev)
{
- xfs_fsblock_t alloc_blocks = 0;
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
int shift = 0;
int64_t freesp;
xfs_fsblock_t qblocks;
int qshift = 0;
+ xfs_fsblock_t alloc_blocks = 0;
+
+ if (offset + count <= XFS_ISIZE(ip))
+ return 0;
- alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset,
- imap, nimaps);
+ if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
+ (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
+ return 0;
+
+ /*
+ * If an explicit allocsize is set, the file is small, or we
+ * are writing behind a hole, then use the minimum prealloc:
+ */
+ if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
+ XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
+ idx == 0 ||
+ prev->br_startoff + prev->br_blockcount < offset_fsb)
+ return mp->m_writeio_blocks;
+
+ /*
+ * Determine the initial size of the preallocation. We are beyond the
+ * current EOF here, but we need to take into account whether this is
+ * a sparse write or an extending write when determining the
+ * preallocation size. Hence we need to look up the extent that ends
+ * at the current write offset and use the result to determine the
+ * preallocation size.
+ *
+ * If the extent is a hole, then preallocation is essentially disabled.
+ * Otherwise we take the size of the preceding data extent as the basis
+ * for the preallocation size. If the size of the extent is greater than
+ * half the maximum extent length, then use the current offset as the
+ * basis. This ensures that for large files the preallocation size
+ * always extends to MAXEXTLEN rather than falling short due to things
+ * like stripe unit/width alignment of real extents.
+ */
+ if (prev->br_blockcount <= (MAXEXTLEN >> 1))
+ alloc_blocks = prev->br_blockcount << 1;
+ else
+ alloc_blocks = XFS_B_TO_FSB(mp, offset);
if (!alloc_blocks)
goto check_writeio;
qblocks = alloc_blocks;
@@ -550,120 +512,145 @@ xfs_iomap_prealloc_size(
*/
while (alloc_blocks && alloc_blocks >= freesp)
alloc_blocks >>= 4;
-
check_writeio:
if (alloc_blocks < mp->m_writeio_blocks)
alloc_blocks = mp->m_writeio_blocks;
-
trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
mp->m_writeio_blocks);
-
return alloc_blocks;
}
-int
-xfs_iomap_write_delay(
- xfs_inode_t *ip,
- xfs_off_t offset,
- size_t count,
- xfs_bmbt_irec_t *ret_imap)
+static int
+xfs_file_iomap_begin_delay(
+ struct inode *inode,
+ loff_t offset,
+ loff_t count,
+ unsigned flags,
+ struct iomap *iomap)
{
- xfs_mount_t *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb;
- xfs_fileoff_t last_fsb;
- xfs_off_t aligned_offset;
- xfs_fileoff_t ioalign;
- xfs_extlen_t extsz;
- int nimaps;
- xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
- int prealloc;
- int error;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
- /*
- * Make sure that the dquots are there. This doesn't hold
- * the ilock across a disk read.
- */
- error = xfs_qm_dqattach_locked(ip, 0);
- if (error)
- return error;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t maxbytes_fsb =
+ XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
+ xfs_fileoff_t end_fsb, orig_end_fsb;
+ int error = 0, eof = 0;
+ struct xfs_bmbt_irec got;
+ struct xfs_bmbt_irec prev;
+ xfs_extnum_t idx;
- extsz = xfs_get_extsz_hint(ip);
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ ASSERT(!XFS_IS_REALTIME_INODE(ip));
+ ASSERT(!xfs_get_extsz_hint(ip));
- error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
- imap, XFS_WRITE_IMAPS, &prealloc);
- if (error)
- return error;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
-retry:
- if (prealloc) {
- xfs_fsblock_t alloc_blocks;
+ if (unlikely(XFS_TEST_ERROR(
+ (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
+ mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ error = -EFSCORRUPTED;
+ goto out_unlock;
+ }
- alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap,
- XFS_WRITE_IMAPS);
+ XFS_STATS_INC(mp, xs_blk_mapw);
- aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
- ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
- last_fsb = ioalign + alloc_blocks;
- } else {
- last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+ if (error)
+ goto out_unlock;
}
- if (prealloc || extsz) {
- error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
- if (error)
- return error;
+ xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx,
+ &got, &prev);
+ if (!eof && got.br_startoff <= offset_fsb) {
+ trace_xfs_iomap_found(ip, offset, count, 0, &got);
+ goto done;
}
+ error = xfs_qm_dqattach_locked(ip, 0);
+ if (error)
+ goto out_unlock;
+
/*
- * Make sure preallocation does not create extents beyond the range we
- * actually support in this filesystem.
+ * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages
+ * to keep the chunks of work done where somewhat symmetric with the
+ * work writeback does. This is a completely arbitrary number pulled
+ * out of thin air as a best guess for initial testing.
+ *
+ * Note that the values needs to be less than 32-bits wide until
+ * the lower level functions are updated.
*/
- if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes))
- last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
+ count = min_t(loff_t, count, 1024 * PAGE_SIZE);
+ end_fsb = orig_end_fsb =
+ min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
+
+ if (eof) {
+ xfs_fsblock_t prealloc_blocks;
- ASSERT(last_fsb > offset_fsb);
+ prealloc_blocks =
+ xfs_iomap_prealloc_size(ip, offset, count, idx, &prev);
+ if (prealloc_blocks) {
+ xfs_extlen_t align;
+ xfs_off_t end_offset;
- nimaps = XFS_WRITE_IMAPS;
- error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb,
- imap, &nimaps, XFS_BMAPI_ENTIRE);
+ end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
+ end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
+ prealloc_blocks;
+
+ align = xfs_eof_alignment(ip, 0);
+ if (align)
+ end_fsb = roundup_64(end_fsb, align);
+
+ end_fsb = min(end_fsb, maxbytes_fsb);
+ ASSERT(end_fsb > offset_fsb);
+ }
+ }
+
+retry:
+ error = xfs_bmapi_reserve_delalloc(ip, offset_fsb,
+ end_fsb - offset_fsb, &got,
+ &prev, &idx, eof);
switch (error) {
case 0:
+ break;
case -ENOSPC:
case -EDQUOT:
- break;
- default:
- return error;
- }
-
- /*
- * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry
- * without EOF preallocation.
- */
- if (nimaps == 0) {
+ /* retry without any preallocation */
trace_xfs_delalloc_enospc(ip, offset, count);
- if (prealloc) {
- prealloc = 0;
- error = 0;
+ if (end_fsb != orig_end_fsb) {
+ end_fsb = orig_end_fsb;
goto retry;
}
- return error ? error : -ENOSPC;
+ /*FALLTHRU*/
+ default:
+ goto out_unlock;
}
- if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
- return xfs_alert_fsblock_zero(ip, &imap[0]);
-
/*
* Tag the inode as speculatively preallocated so we can reclaim this
* space on demand, if necessary.
*/
- if (prealloc)
+ if (end_fsb != orig_end_fsb)
xfs_inode_set_eofblocks_tag(ip);
- *ret_imap = imap[0];
- return 0;
+ trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
+done:
+ if (isnullstartblock(got.br_startblock))
+ got.br_startblock = DELAYSTARTBLOCK;
+
+ if (!got.br_startblock) {
+ error = xfs_alert_fsblock_zero(ip, &got);
+ if (error)
+ goto out_unlock;
+ }
+
+ xfs_bmbt_to_iomap(ip, iomap, &got);
+
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
}
/*
@@ -947,37 +934,13 @@ error_on_bmapi_transaction:
return error;
}
-void
-xfs_bmbt_to_iomap(
- struct xfs_inode *ip,
- struct iomap *iomap,
- struct xfs_bmbt_irec *imap)
-{
- struct xfs_mount *mp = ip->i_mount;
-
- if (imap->br_startblock == HOLESTARTBLOCK) {
- iomap->blkno = IOMAP_NULL_BLOCK;
- iomap->type = IOMAP_HOLE;
- } else if (imap->br_startblock == DELAYSTARTBLOCK) {
- iomap->blkno = IOMAP_NULL_BLOCK;
- iomap->type = IOMAP_DELALLOC;
- } else {
- iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock);
- if (imap->br_state == XFS_EXT_UNWRITTEN)
- iomap->type = IOMAP_UNWRITTEN;
- else
- iomap->type = IOMAP_MAPPED;
- }
- iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
- iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
- iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
-}
-
-static inline bool imap_needs_alloc(struct xfs_bmbt_irec *imap, int nimaps)
+static inline bool imap_needs_alloc(struct inode *inode,
+ struct xfs_bmbt_irec *imap, int nimaps)
{
return !nimaps ||
imap->br_startblock == HOLESTARTBLOCK ||
- imap->br_startblock == DELAYSTARTBLOCK;
+ imap->br_startblock == DELAYSTARTBLOCK ||
+ (IS_DAX(inode) && ISUNWRITTEN(imap));
}
static int
@@ -993,11 +956,18 @@ xfs_file_iomap_begin(
struct xfs_bmbt_irec imap;
xfs_fileoff_t offset_fsb, end_fsb;
int nimaps = 1, error = 0;
+ unsigned lockmode;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
+ if ((flags & IOMAP_WRITE) &&
+ !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
+ return xfs_file_iomap_begin_delay(inode, offset, length, flags,
+ iomap);
+ }
+
+ lockmode = xfs_ilock_data_map_shared(ip);
ASSERT(offset <= mp->m_super->s_maxbytes);
if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
@@ -1008,11 +978,11 @@ xfs_file_iomap_begin(
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, XFS_BMAPI_ENTIRE);
if (error) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_iunlock(ip, lockmode);
return error;
}
- if ((flags & IOMAP_WRITE) && imap_needs_alloc(&imap, nimaps)) {
+ if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
/*
* We cap the maximum length we map here to MAX_WRITEBACK_PAGES
* pages to keep the chunks of work done where somewhat symmetric
@@ -1024,27 +994,23 @@ xfs_file_iomap_begin(
* the lower level functions are updated.
*/
length = min_t(loff_t, length, 1024 * PAGE_SIZE);
- if (xfs_get_extsz_hint(ip)) {
- /*
- * xfs_iomap_write_direct() expects the shared lock. It
- * is unlocked on return.
- */
- xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
- error = xfs_iomap_write_direct(ip, offset, length, &imap,
- nimaps);
- } else {
- error = xfs_iomap_write_delay(ip, offset, length, &imap);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- }
-
+ /*
+ * xfs_iomap_write_direct() expects the shared lock. It
+ * is unlocked on return.
+ */
+ if (lockmode == XFS_ILOCK_EXCL)
+ xfs_ilock_demote(ip, lockmode);
+ error = xfs_iomap_write_direct(ip, offset, length, &imap,
+ nimaps);
if (error)
return error;
+ iomap->flags = IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
} else {
ASSERT(nimaps);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_iunlock(ip, lockmode);
trace_xfs_iomap_found(ip, offset, length, 0, &imap);
}
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index fb8aca3d69ab..6498be485932 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -25,8 +25,6 @@ struct xfs_bmbt_irec;
int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
struct xfs_bmbt_irec *, int);
-int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
- struct xfs_bmbt_irec *);
int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t,
struct xfs_bmbt_irec *);
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 765f084759b5..2b6eec52178e 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -413,7 +413,8 @@ struct xlog {
/* log record crc error injection factor */
uint32_t l_badcrc_factor;
#endif
-
+ /* log recovery lsn tracking (for buffer submission */
+ xfs_lsn_t l_recovery_lsn;
};
#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index e8638fd2c0c3..846483d56949 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -44,6 +44,7 @@
#include "xfs_error.h"
#include "xfs_dir2.h"
#include "xfs_rmap_item.h"
+#include "xfs_buf_item.h"
#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
@@ -381,6 +382,15 @@ xlog_recover_iodone(
SHUTDOWN_META_IO_ERROR);
}
}
+
+ /*
+ * On v5 supers, a bli could be attached to update the metadata LSN.
+ * Clean it up.
+ */
+ if (bp->b_fspriv)
+ xfs_buf_item_relse(bp);
+ ASSERT(bp->b_fspriv == NULL);
+
bp->b_iodone = NULL;
xfs_buf_ioend(bp);
}
@@ -2360,12 +2370,14 @@ static void
xlog_recover_validate_buf_type(
struct xfs_mount *mp,
struct xfs_buf *bp,
- xfs_buf_log_format_t *buf_f)
+ xfs_buf_log_format_t *buf_f,
+ xfs_lsn_t current_lsn)
{
struct xfs_da_blkinfo *info = bp->b_addr;
__uint32_t magic32;
__uint16_t magic16;
__uint16_t magicda;
+ char *warnmsg = NULL;
/*
* We can only do post recovery validation on items on CRC enabled
@@ -2404,31 +2416,27 @@ xlog_recover_validate_buf_type(
bp->b_ops = &xfs_rmapbt_buf_ops;
break;
default:
- xfs_warn(mp, "Bad btree block magic!");
- ASSERT(0);
+ warnmsg = "Bad btree block magic!";
break;
}
break;
case XFS_BLFT_AGF_BUF:
if (magic32 != XFS_AGF_MAGIC) {
- xfs_warn(mp, "Bad AGF block magic!");
- ASSERT(0);
+ warnmsg = "Bad AGF block magic!";
break;
}
bp->b_ops = &xfs_agf_buf_ops;
break;
case XFS_BLFT_AGFL_BUF:
if (magic32 != XFS_AGFL_MAGIC) {
- xfs_warn(mp, "Bad AGFL block magic!");
- ASSERT(0);
+ warnmsg = "Bad AGFL block magic!";
break;
}
bp->b_ops = &xfs_agfl_buf_ops;
break;
case XFS_BLFT_AGI_BUF:
if (magic32 != XFS_AGI_MAGIC) {
- xfs_warn(mp, "Bad AGI block magic!");
- ASSERT(0);
+ warnmsg = "Bad AGI block magic!";
break;
}
bp->b_ops = &xfs_agi_buf_ops;
@@ -2438,8 +2446,7 @@ xlog_recover_validate_buf_type(
case XFS_BLFT_GDQUOT_BUF:
#ifdef CONFIG_XFS_QUOTA
if (magic16 != XFS_DQUOT_MAGIC) {
- xfs_warn(mp, "Bad DQUOT block magic!");
- ASSERT(0);
+ warnmsg = "Bad DQUOT block magic!";
break;
}
bp->b_ops = &xfs_dquot_buf_ops;
@@ -2451,16 +2458,14 @@ xlog_recover_validate_buf_type(
break;
case XFS_BLFT_DINO_BUF:
if (magic16 != XFS_DINODE_MAGIC) {
- xfs_warn(mp, "Bad INODE block magic!");
- ASSERT(0);
+ warnmsg = "Bad INODE block magic!";
break;
}
bp->b_ops = &xfs_inode_buf_ops;
break;
case XFS_BLFT_SYMLINK_BUF:
if (magic32 != XFS_SYMLINK_MAGIC) {
- xfs_warn(mp, "Bad symlink block magic!");
- ASSERT(0);
+ warnmsg = "Bad symlink block magic!";
break;
}
bp->b_ops = &xfs_symlink_buf_ops;
@@ -2468,8 +2473,7 @@ xlog_recover_validate_buf_type(
case XFS_BLFT_DIR_BLOCK_BUF:
if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
magic32 != XFS_DIR3_BLOCK_MAGIC) {
- xfs_warn(mp, "Bad dir block magic!");
- ASSERT(0);
+ warnmsg = "Bad dir block magic!";
break;
}
bp->b_ops = &xfs_dir3_block_buf_ops;
@@ -2477,8 +2481,7 @@ xlog_recover_validate_buf_type(
case XFS_BLFT_DIR_DATA_BUF:
if (magic32 != XFS_DIR2_DATA_MAGIC &&
magic32 != XFS_DIR3_DATA_MAGIC) {
- xfs_warn(mp, "Bad dir data magic!");
- ASSERT(0);
+ warnmsg = "Bad dir data magic!";
break;
}
bp->b_ops = &xfs_dir3_data_buf_ops;
@@ -2486,8 +2489,7 @@ xlog_recover_validate_buf_type(
case XFS_BLFT_DIR_FREE_BUF:
if (magic32 != XFS_DIR2_FREE_MAGIC &&
magic32 != XFS_DIR3_FREE_MAGIC) {
- xfs_warn(mp, "Bad dir3 free magic!");
- ASSERT(0);
+ warnmsg = "Bad dir3 free magic!";
break;
}
bp->b_ops = &xfs_dir3_free_buf_ops;
@@ -2495,8 +2497,7 @@ xlog_recover_validate_buf_type(
case XFS_BLFT_DIR_LEAF1_BUF:
if (magicda != XFS_DIR2_LEAF1_MAGIC &&
magicda != XFS_DIR3_LEAF1_MAGIC) {
- xfs_warn(mp, "Bad dir leaf1 magic!");
- ASSERT(0);
+ warnmsg = "Bad dir leaf1 magic!";
break;
}
bp->b_ops = &xfs_dir3_leaf1_buf_ops;
@@ -2504,8 +2505,7 @@ xlog_recover_validate_buf_type(
case XFS_BLFT_DIR_LEAFN_BUF:
if (magicda != XFS_DIR2_LEAFN_MAGIC &&
magicda != XFS_DIR3_LEAFN_MAGIC) {
- xfs_warn(mp, "Bad dir leafn magic!");
- ASSERT(0);
+ warnmsg = "Bad dir leafn magic!";
break;
}
bp->b_ops = &xfs_dir3_leafn_buf_ops;
@@ -2513,8 +2513,7 @@ xlog_recover_validate_buf_type(
case XFS_BLFT_DA_NODE_BUF:
if (magicda != XFS_DA_NODE_MAGIC &&
magicda != XFS_DA3_NODE_MAGIC) {
- xfs_warn(mp, "Bad da node magic!");
- ASSERT(0);
+ warnmsg = "Bad da node magic!";
break;
}
bp->b_ops = &xfs_da3_node_buf_ops;
@@ -2522,24 +2521,21 @@ xlog_recover_validate_buf_type(
case XFS_BLFT_ATTR_LEAF_BUF:
if (magicda != XFS_ATTR_LEAF_MAGIC &&
magicda != XFS_ATTR3_LEAF_MAGIC) {
- xfs_warn(mp, "Bad attr leaf magic!");
- ASSERT(0);
+ warnmsg = "Bad attr leaf magic!";
break;
}
bp->b_ops = &xfs_attr3_leaf_buf_ops;
break;
case XFS_BLFT_ATTR_RMT_BUF:
if (magic32 != XFS_ATTR3_RMT_MAGIC) {
- xfs_warn(mp, "Bad attr remote magic!");
- ASSERT(0);
+ warnmsg = "Bad attr remote magic!";
break;
}
bp->b_ops = &xfs_attr3_rmt_buf_ops;
break;
case XFS_BLFT_SB_BUF:
if (magic32 != XFS_SB_MAGIC) {
- xfs_warn(mp, "Bad SB block magic!");
- ASSERT(0);
+ warnmsg = "Bad SB block magic!";
break;
}
bp->b_ops = &xfs_sb_buf_ops;
@@ -2556,6 +2552,40 @@ xlog_recover_validate_buf_type(
xfs_blft_from_flags(buf_f));
break;
}
+
+ /*
+ * Nothing else to do in the case of a NULL current LSN as this means
+ * the buffer is more recent than the change in the log and will be
+ * skipped.
+ */
+ if (current_lsn == NULLCOMMITLSN)
+ return;
+
+ if (warnmsg) {
+ xfs_warn(mp, warnmsg);
+ ASSERT(0);
+ }
+
+ /*
+ * We must update the metadata LSN of the buffer as it is written out to
+ * ensure that older transactions never replay over this one and corrupt
+ * the buffer. This can occur if log recovery is interrupted at some
+ * point after the current transaction completes, at which point a
+ * subsequent mount starts recovery from the beginning.
+ *
+ * Write verifiers update the metadata LSN from log items attached to
+ * the buffer. Therefore, initialize a bli purely to carry the LSN to
+ * the verifier. We'll clean it up in our ->iodone() callback.
+ */
+ if (bp->b_ops) {
+ struct xfs_buf_log_item *bip;
+
+ ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
+ bp->b_iodone = xlog_recover_iodone;
+ xfs_buf_item_init(bp, mp);
+ bip = bp->b_fspriv;
+ bip->bli_item.li_lsn = current_lsn;
+ }
}
/*
@@ -2569,7 +2599,8 @@ xlog_recover_do_reg_buffer(
struct xfs_mount *mp,
xlog_recover_item_t *item,
struct xfs_buf *bp,
- xfs_buf_log_format_t *buf_f)
+ xfs_buf_log_format_t *buf_f,
+ xfs_lsn_t current_lsn)
{
int i;
int bit;
@@ -2642,7 +2673,7 @@ xlog_recover_do_reg_buffer(
/* Shouldn't be any more regions */
ASSERT(i == item->ri_total);
- xlog_recover_validate_buf_type(mp, bp, buf_f);
+ xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
}
/*
@@ -2685,7 +2716,7 @@ xlog_recover_do_dquot_buffer(
if (log->l_quotaoffs_flag & type)
return false;
- xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
+ xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
return true;
}
@@ -2773,7 +2804,8 @@ xlog_recover_buffer_pass2(
*/
lsn = xlog_recover_get_buf_lsn(mp, bp);
if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
- xlog_recover_validate_buf_type(mp, bp, buf_f);
+ trace_xfs_log_recover_buf_skip(log, buf_f);
+ xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
goto out_release;
}
@@ -2789,7 +2821,7 @@ xlog_recover_buffer_pass2(
if (!dirty)
goto out_release;
} else {
- xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
+ xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
}
/*
@@ -3846,14 +3878,13 @@ STATIC int
xlog_recover_commit_trans(
struct xlog *log,
struct xlog_recover *trans,
- int pass)
+ int pass,
+ struct list_head *buffer_list)
{
int error = 0;
- int error2;
int items_queued = 0;
struct xlog_recover_item *item;
struct xlog_recover_item *next;
- LIST_HEAD (buffer_list);
LIST_HEAD (ra_list);
LIST_HEAD (done_list);
@@ -3876,7 +3907,7 @@ xlog_recover_commit_trans(
items_queued++;
if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
error = xlog_recover_items_pass2(log, trans,
- &buffer_list, &ra_list);
+ buffer_list, &ra_list);
list_splice_tail_init(&ra_list, &done_list);
items_queued = 0;
}
@@ -3894,15 +3925,14 @@ out:
if (!list_empty(&ra_list)) {
if (!error)
error = xlog_recover_items_pass2(log, trans,
- &buffer_list, &ra_list);
+ buffer_list, &ra_list);
list_splice_tail_init(&ra_list, &done_list);
}
if (!list_empty(&done_list))
list_splice_init(&done_list, &trans->r_itemq);
- error2 = xfs_buf_delwri_submit(&buffer_list);
- return error ? error : error2;
+ return error;
}
STATIC void
@@ -4085,7 +4115,8 @@ xlog_recovery_process_trans(
char *dp,
unsigned int len,
unsigned int flags,
- int pass)
+ int pass,
+ struct list_head *buffer_list)
{
int error = 0;
bool freeit = false;
@@ -4109,7 +4140,8 @@ xlog_recovery_process_trans(
error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
break;
case XLOG_COMMIT_TRANS:
- error = xlog_recover_commit_trans(log, trans, pass);
+ error = xlog_recover_commit_trans(log, trans, pass,
+ buffer_list);
/* success or fail, we are now done with this transaction. */
freeit = true;
break;
@@ -4191,10 +4223,12 @@ xlog_recover_process_ophdr(
struct xlog_op_header *ohead,
char *dp,
char *end,
- int pass)
+ int pass,
+ struct list_head *buffer_list)
{
struct xlog_recover *trans;
unsigned int len;
+ int error;
/* Do we understand who wrote this op? */
if (ohead->oh_clientid != XFS_TRANSACTION &&
@@ -4221,8 +4255,39 @@ xlog_recover_process_ophdr(
return 0;
}
+ /*
+ * The recovered buffer queue is drained only once we know that all
+ * recovery items for the current LSN have been processed. This is
+ * required because:
+ *
+ * - Buffer write submission updates the metadata LSN of the buffer.
+ * - Log recovery skips items with a metadata LSN >= the current LSN of
+ * the recovery item.
+ * - Separate recovery items against the same metadata buffer can share
+ * a current LSN. I.e., consider that the LSN of a recovery item is
+ * defined as the starting LSN of the first record in which its
+ * transaction appears, that a record can hold multiple transactions,
+ * and/or that a transaction can span multiple records.
+ *
+ * In other words, we are allowed to submit a buffer from log recovery
+ * once per current LSN. Otherwise, we may incorrectly skip recovery
+ * items and cause corruption.
+ *
+ * We don't know up front whether buffers are updated multiple times per
+ * LSN. Therefore, track the current LSN of each commit log record as it
+ * is processed and drain the queue when it changes. Use commit records
+ * because they are ordered correctly by the logging code.
+ */
+ if (log->l_recovery_lsn != trans->r_lsn &&
+ ohead->oh_flags & XLOG_COMMIT_TRANS) {
+ error = xfs_buf_delwri_submit(buffer_list);
+ if (error)
+ return error;
+ log->l_recovery_lsn = trans->r_lsn;
+ }
+
return xlog_recovery_process_trans(log, trans, dp, len,
- ohead->oh_flags, pass);
+ ohead->oh_flags, pass, buffer_list);
}
/*
@@ -4240,7 +4305,8 @@ xlog_recover_process_data(
struct hlist_head rhash[],
struct xlog_rec_header *rhead,
char *dp,
- int pass)
+ int pass,
+ struct list_head *buffer_list)
{
struct xlog_op_header *ohead;
char *end;
@@ -4254,6 +4320,7 @@ xlog_recover_process_data(
if (xlog_header_check_recover(log->l_mp, rhead))
return -EIO;
+ trace_xfs_log_recover_record(log, rhead, pass);
while ((dp < end) && num_logops) {
ohead = (struct xlog_op_header *)dp;
@@ -4262,7 +4329,7 @@ xlog_recover_process_data(
/* errors will abort recovery */
error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
- dp, end, pass);
+ dp, end, pass, buffer_list);
if (error)
return error;
@@ -4685,7 +4752,8 @@ xlog_recover_process(
struct hlist_head rhash[],
struct xlog_rec_header *rhead,
char *dp,
- int pass)
+ int pass,
+ struct list_head *buffer_list)
{
int error;
__le32 crc;
@@ -4732,7 +4800,8 @@ xlog_recover_process(
if (error)
return error;
- return xlog_recover_process_data(log, rhash, rhead, dp, pass);
+ return xlog_recover_process_data(log, rhash, rhead, dp, pass,
+ buffer_list);
}
STATIC int
@@ -4793,9 +4862,11 @@ xlog_do_recovery_pass(
char *offset;
xfs_buf_t *hbp, *dbp;
int error = 0, h_size, h_len;
+ int error2 = 0;
int bblks, split_bblks;
int hblks, split_hblks, wrapped_hblks;
struct hlist_head rhash[XLOG_RHASH_SIZE];
+ LIST_HEAD (buffer_list);
ASSERT(head_blk != tail_blk);
rhead_blk = 0;
@@ -4981,7 +5052,7 @@ xlog_do_recovery_pass(
}
error = xlog_recover_process(log, rhash, rhead, offset,
- pass);
+ pass, &buffer_list);
if (error)
goto bread_err2;
@@ -5012,7 +5083,8 @@ xlog_do_recovery_pass(
if (error)
goto bread_err2;
- error = xlog_recover_process(log, rhash, rhead, offset, pass);
+ error = xlog_recover_process(log, rhash, rhead, offset, pass,
+ &buffer_list);
if (error)
goto bread_err2;
@@ -5025,10 +5097,17 @@ xlog_do_recovery_pass(
bread_err1:
xlog_put_bp(hbp);
+ /*
+ * Submit buffers that have been added from the last record processed,
+ * regardless of error status.
+ */
+ if (!list_empty(&buffer_list))
+ error2 = xfs_buf_delwri_submit(&buffer_list);
+
if (error && first_bad)
*first_bad = rhead_blk;
- return error;
+ return error ? error : error2;
}
/*
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index faeead671f9f..56e85a6c85c7 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -934,6 +934,20 @@ xfs_mountfs(
}
/*
+ * Now the log is fully replayed, we can transition to full read-only
+ * mode for read-only mounts. This will sync all the metadata and clean
+ * the log so that the recovery we just performed does not have to be
+ * replayed again on the next mount.
+ *
+ * We use the same quiesce mechanism as the rw->ro remount, as they are
+ * semantically identical operations.
+ */
+ if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) ==
+ XFS_MOUNT_RDONLY) {
+ xfs_quiesce_attr(mp);
+ }
+
+ /*
* Complete the quota initialisation, post-log-replay component.
*/
if (quotamount) {
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index b36676cde103..041d9493e798 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -57,10 +57,16 @@ enum {
#define XFS_ERR_RETRY_FOREVER -1
+/*
+ * Although retry_timeout is in jiffies which is normally an unsigned long,
+ * we limit the retry timeout to 86400 seconds, or one day. So even a
+ * signed 32-bit long is sufficient for a HZ value up to 24855. Making it
+ * signed lets us store the special "-1" value, meaning retry forever.
+ */
struct xfs_error_cfg {
struct xfs_kobj kobj;
int max_retries;
- unsigned long retry_timeout; /* in jiffies, 0 = no timeout */
+ long retry_timeout; /* in jiffies, -1 = infinite */
};
typedef struct xfs_mount {
@@ -325,6 +331,22 @@ xfs_mp_fail_writes(struct xfs_mount *mp)
}
#endif
+/* per-AG block reservation data structures*/
+enum xfs_ag_resv_type {
+ XFS_AG_RESV_NONE = 0,
+ XFS_AG_RESV_METADATA,
+ XFS_AG_RESV_AGFL,
+};
+
+struct xfs_ag_resv {
+ /* number of blocks originally reserved here */
+ xfs_extlen_t ar_orig_reserved;
+ /* number of blocks reserved here */
+ xfs_extlen_t ar_reserved;
+ /* number of blocks originally asked for */
+ xfs_extlen_t ar_asked;
+};
+
/*
* Per-ag incore structure, copies of information in agf and agi, to improve the
* performance of allocation group selection.
@@ -372,8 +394,28 @@ typedef struct xfs_perag {
/* for rcu-safe freeing */
struct rcu_head rcu_head;
int pagb_count; /* pagb slots in use */
+
+ /* Blocks reserved for all kinds of metadata. */
+ struct xfs_ag_resv pag_meta_resv;
+ /* Blocks reserved for just AGFL-based metadata. */
+ struct xfs_ag_resv pag_agfl_resv;
} xfs_perag_t;
+static inline struct xfs_ag_resv *
+xfs_perag_resv(
+ struct xfs_perag *pag,
+ enum xfs_ag_resv_type type)
+{
+ switch (type) {
+ case XFS_AG_RESV_METADATA:
+ return &pag->pag_meta_resv;
+ case XFS_AG_RESV_AGFL:
+ return &pag->pag_agfl_resv;
+ default:
+ return NULL;
+ }
+}
+
extern void xfs_uuid_table_free(void);
extern int xfs_log_sbcount(xfs_mount_t *);
extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 2500f28689d5..0432a459871c 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -51,28 +51,16 @@ xfs_rui_item_free(
kmem_zone_free(xfs_rui_zone, ruip);
}
-/*
- * This returns the number of iovecs needed to log the given rui item.
- * We only need 1 iovec for an rui item. It just logs the rui_log_format
- * structure.
- */
-static inline int
-xfs_rui_item_sizeof(
- struct xfs_rui_log_item *ruip)
-{
- return sizeof(struct xfs_rui_log_format) +
- (ruip->rui_format.rui_nextents - 1) *
- sizeof(struct xfs_map_extent);
-}
-
STATIC void
xfs_rui_item_size(
struct xfs_log_item *lip,
int *nvecs,
int *nbytes)
{
+ struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
+
*nvecs += 1;
- *nbytes += xfs_rui_item_sizeof(RUI_ITEM(lip));
+ *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
}
/*
@@ -97,7 +85,7 @@ xfs_rui_item_format(
ruip->rui_format.rui_size = 1;
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
- xfs_rui_item_sizeof(ruip));
+ xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
}
/*
@@ -205,16 +193,12 @@ xfs_rui_init(
{
struct xfs_rui_log_item *ruip;
- uint size;
ASSERT(nextents > 0);
- if (nextents > XFS_RUI_MAX_FAST_EXTENTS) {
- size = (uint)(sizeof(struct xfs_rui_log_item) +
- ((nextents - 1) * sizeof(struct xfs_map_extent)));
- ruip = kmem_zalloc(size, KM_SLEEP);
- } else {
+ if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
+ ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), KM_SLEEP);
+ else
ruip = kmem_zone_zalloc(xfs_rui_zone, KM_SLEEP);
- }
xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
ruip->rui_format.rui_nextents = nextents;
@@ -239,14 +223,12 @@ xfs_rui_copy_format(
uint len;
src_rui_fmt = buf->i_addr;
- len = sizeof(struct xfs_rui_log_format) +
- (src_rui_fmt->rui_nextents - 1) *
- sizeof(struct xfs_map_extent);
+ len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
if (buf->i_len != len)
return -EFSCORRUPTED;
- memcpy((char *)dst_rui_fmt, (char *)src_rui_fmt, len);
+ memcpy(dst_rui_fmt, src_rui_fmt, len);
return 0;
}
diff --git a/fs/xfs/xfs_rmap_item.h b/fs/xfs/xfs_rmap_item.h
index aefcc3a318a5..340c968e1f9c 100644
--- a/fs/xfs/xfs_rmap_item.h
+++ b/fs/xfs/xfs_rmap_item.h
@@ -70,6 +70,14 @@ struct xfs_rui_log_item {
struct xfs_rui_log_format rui_format;
};
+static inline size_t
+xfs_rui_log_item_sizeof(
+ unsigned int nr)
+{
+ return offsetof(struct xfs_rui_log_item, rui_format) +
+ xfs_rui_log_format_sizeof(nr);
+}
+
/*
* This is the "rmap update done" log item. It is used to log the fact that
* some rmapbt updates mentioned in an earlier rui item have been performed.
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index fd6be45b3a1e..2d092f9577ca 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1137,7 +1137,7 @@ xfs_restore_resvblks(struct xfs_mount *mp)
* Note: xfs_log_quiesce() stops background log work - the callers must ensure
* it is started again when appropriate.
*/
-static void
+void
xfs_quiesce_attr(
struct xfs_mount *mp)
{
@@ -1782,9 +1782,8 @@ xfs_init_zones(void)
if (!xfs_rud_zone)
goto out_destroy_icreate_zone;
- xfs_rui_zone = kmem_zone_init((sizeof(struct xfs_rui_log_item) +
- ((XFS_RUI_MAX_FAST_EXTENTS - 1) *
- sizeof(struct xfs_map_extent))),
+ xfs_rui_zone = kmem_zone_init(
+ xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
"xfs_rui_item");
if (!xfs_rui_zone)
goto out_destroy_rud_zone;
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index 529bce9fc37e..b6418abd85ad 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -61,6 +61,7 @@ struct xfs_mount;
struct xfs_buftarg;
struct block_device;
+extern void xfs_quiesce_attr(struct xfs_mount *mp);
extern void xfs_flush_inodes(struct xfs_mount *mp);
extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
extern xfs_agnumber_t xfs_set_inode_alloc(struct xfs_mount *,
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index 79cfd3fc5324..5f8d55d29a11 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -393,9 +393,15 @@ max_retries_show(
struct kobject *kobject,
char *buf)
{
+ int retries;
struct xfs_error_cfg *cfg = to_error_cfg(kobject);
- return snprintf(buf, PAGE_SIZE, "%d\n", cfg->max_retries);
+ if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
+ retries = -1;
+ else
+ retries = cfg->max_retries;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", retries);
}
static ssize_t
@@ -415,7 +421,10 @@ max_retries_store(
if (val < -1)
return -EINVAL;
- cfg->max_retries = val;
+ if (val == -1)
+ cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
+ else
+ cfg->max_retries = val;
return count;
}
XFS_SYSFS_ATTR_RW(max_retries);
@@ -425,10 +434,15 @@ retry_timeout_seconds_show(
struct kobject *kobject,
char *buf)
{
+ int timeout;
struct xfs_error_cfg *cfg = to_error_cfg(kobject);
- return snprintf(buf, PAGE_SIZE, "%ld\n",
- jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC);
+ if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
+ timeout = -1;
+ else
+ timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", timeout);
}
static ssize_t
@@ -445,11 +459,16 @@ retry_timeout_seconds_store(
if (ret)
return ret;
- /* 1 day timeout maximum */
- if (val < 0 || val > 86400)
+ /* 1 day timeout maximum, -1 means infinite */
+ if (val < -1 || val > 86400)
return -EINVAL;
- cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
+ if (val == -1)
+ cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
+ else {
+ cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
+ ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
+ }
return count;
}
XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
@@ -519,18 +538,19 @@ struct xfs_error_init {
static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
{ .name = "default",
.max_retries = XFS_ERR_RETRY_FOREVER,
- .retry_timeout = 0,
+ .retry_timeout = XFS_ERR_RETRY_FOREVER,
},
{ .name = "EIO",
.max_retries = XFS_ERR_RETRY_FOREVER,
- .retry_timeout = 0,
+ .retry_timeout = XFS_ERR_RETRY_FOREVER,
},
{ .name = "ENOSPC",
.max_retries = XFS_ERR_RETRY_FOREVER,
- .retry_timeout = 0,
+ .retry_timeout = XFS_ERR_RETRY_FOREVER,
},
{ .name = "ENODEV",
- .max_retries = 0,
+ .max_retries = 0, /* We can't recover from devices disappearing */
+ .retry_timeout = 0,
},
};
@@ -561,7 +581,10 @@ xfs_error_sysfs_init_class(
goto out_error;
cfg->max_retries = init[i].max_retries;
- cfg->retry_timeout = msecs_to_jiffies(
+ if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
+ cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
+ else
+ cfg->retry_timeout = msecs_to_jiffies(
init[i].retry_timeout * MSEC_PER_SEC);
}
return 0;
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index d303a665dba9..16093c7dacde 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -1170,7 +1170,6 @@ DEFINE_RW_EVENT(xfs_file_dax_read);
DEFINE_RW_EVENT(xfs_file_buffered_write);
DEFINE_RW_EVENT(xfs_file_direct_write);
DEFINE_RW_EVENT(xfs_file_dax_write);
-DEFINE_RW_EVENT(xfs_file_splice_read);
DECLARE_EVENT_CLASS(xfs_page_class,
TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
@@ -1570,14 +1569,15 @@ TRACE_EVENT(xfs_agf,
TRACE_EVENT(xfs_free_extent,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
- xfs_extlen_t len, bool isfl, int haveleft, int haveright),
- TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright),
+ xfs_extlen_t len, enum xfs_ag_resv_type resv, int haveleft,
+ int haveright),
+ TP_ARGS(mp, agno, agbno, len, resv, haveleft, haveright),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, len)
- __field(int, isfl)
+ __field(int, resv)
__field(int, haveleft)
__field(int, haveright)
),
@@ -1586,16 +1586,16 @@ TRACE_EVENT(xfs_free_extent,
__entry->agno = agno;
__entry->agbno = agbno;
__entry->len = len;
- __entry->isfl = isfl;
+ __entry->resv = resv;
__entry->haveleft = haveleft;
__entry->haveright = haveright;
),
- TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s",
+ TP_printk("dev %d:%d agno %u agbno %u len %u resv %d %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agbno,
__entry->len,
- __entry->isfl,
+ __entry->resv,
__entry->haveleft ?
(__entry->haveright ? "both" : "left") :
(__entry->haveright ? "right" : "none"))
@@ -1622,8 +1622,8 @@ DECLARE_EVENT_CLASS(xfs_alloc_class,
__field(short, otype)
__field(char, wasdel)
__field(char, wasfromfl)
- __field(char, isfl)
- __field(char, userdata)
+ __field(int, resv)
+ __field(int, datatype)
__field(xfs_fsblock_t, firstblock)
),
TP_fast_assign(
@@ -1643,14 +1643,14 @@ DECLARE_EVENT_CLASS(xfs_alloc_class,
__entry->otype = args->otype;
__entry->wasdel = args->wasdel;
__entry->wasfromfl = args->wasfromfl;
- __entry->isfl = args->isfl;
- __entry->userdata = args->userdata;
+ __entry->resv = args->resv;
+ __entry->datatype = args->datatype;
__entry->firstblock = args->firstblock;
),
TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u "
"prod %u minleft %u total %u alignment %u minalignslop %u "
- "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d "
- "userdata %d firstblock 0x%llx",
+ "len %u type %s otype %s wasdel %d wasfromfl %d resv %d "
+ "datatype 0x%x firstblock 0x%llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agbno,
@@ -1667,8 +1667,8 @@ DECLARE_EVENT_CLASS(xfs_alloc_class,
__print_symbolic(__entry->otype, XFS_ALLOC_TYPES),
__entry->wasdel,
__entry->wasfromfl,
- __entry->isfl,
- __entry->userdata,
+ __entry->resv,
+ __entry->datatype,
(unsigned long long)__entry->firstblock)
)
@@ -1984,6 +1984,29 @@ DEFINE_EVENT(xfs_swap_extent_class, name, \
DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
+TRACE_EVENT(xfs_log_recover_record,
+ TP_PROTO(struct xlog *log, struct xlog_rec_header *rhead, int pass),
+ TP_ARGS(log, rhead, pass),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_lsn_t, lsn)
+ __field(int, len)
+ __field(int, num_logops)
+ __field(int, pass)
+ ),
+ TP_fast_assign(
+ __entry->dev = log->l_mp->m_super->s_dev;
+ __entry->lsn = be64_to_cpu(rhead->h_lsn);
+ __entry->len = be32_to_cpu(rhead->h_len);
+ __entry->num_logops = be32_to_cpu(rhead->h_num_logops);
+ __entry->pass = pass;
+ ),
+ TP_printk("dev %d:%d lsn 0x%llx len 0x%x num_logops 0x%x pass %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->lsn, __entry->len, __entry->num_logops,
+ __entry->pass)
+)
+
DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
TP_PROTO(struct xlog *log, struct xlog_recover *trans,
struct xlog_recover_item *item, int pass),
@@ -1992,6 +2015,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
__field(dev_t, dev)
__field(unsigned long, item)
__field(xlog_tid_t, tid)
+ __field(xfs_lsn_t, lsn)
__field(int, type)
__field(int, pass)
__field(int, count)
@@ -2001,15 +2025,17 @@ DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
__entry->dev = log->l_mp->m_super->s_dev;
__entry->item = (unsigned long)item;
__entry->tid = trans->r_log_tid;
+ __entry->lsn = trans->r_lsn;
__entry->type = ITEM_TYPE(item);
__entry->pass = pass;
__entry->count = item->ri_cnt;
__entry->total = item->ri_total;
),
- TP_printk("dev %d:%d trans 0x%x, pass %d, item 0x%p, item type %s "
- "item region count/total %d/%d",
+ TP_printk("dev %d:%d tid 0x%x lsn 0x%llx, pass %d, item 0x%p, "
+ "item type %s item region count/total %d/%d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->tid,
+ __entry->lsn,
__entry->pass,
(void *)__entry->item,
__print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
@@ -2068,6 +2094,7 @@ DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel);
DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add);
DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc);
DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover);
+DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_skip);
DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf);
DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
@@ -2558,6 +2585,60 @@ DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range_result);
DEFINE_RMAPBT_EVENT(xfs_rmap_find_right_neighbor_result);
DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_result);
+/* per-AG reservation */
+DECLARE_EVENT_CLASS(xfs_ag_resv_class,
+ TP_PROTO(struct xfs_perag *pag, enum xfs_ag_resv_type resv,
+ xfs_extlen_t len),
+ TP_ARGS(pag, resv, len),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(int, resv)
+ __field(xfs_extlen_t, freeblks)
+ __field(xfs_extlen_t, flcount)
+ __field(xfs_extlen_t, reserved)
+ __field(xfs_extlen_t, asked)
+ __field(xfs_extlen_t, len)
+ ),
+ TP_fast_assign(
+ struct xfs_ag_resv *r = xfs_perag_resv(pag, resv);
+
+ __entry->dev = pag->pag_mount->m_super->s_dev;
+ __entry->agno = pag->pag_agno;
+ __entry->resv = resv;
+ __entry->freeblks = pag->pagf_freeblks;
+ __entry->flcount = pag->pagf_flcount;
+ __entry->reserved = r ? r->ar_reserved : 0;
+ __entry->asked = r ? r->ar_asked : 0;
+ __entry->len = len;
+ ),
+ TP_printk("dev %d:%d agno %u resv %d freeblks %u flcount %u resv %u ask %u len %u\n",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->resv,
+ __entry->freeblks,
+ __entry->flcount,
+ __entry->reserved,
+ __entry->asked,
+ __entry->len)
+)
+#define DEFINE_AG_RESV_EVENT(name) \
+DEFINE_EVENT(xfs_ag_resv_class, name, \
+ TP_PROTO(struct xfs_perag *pag, enum xfs_ag_resv_type type, \
+ xfs_extlen_t len), \
+ TP_ARGS(pag, type, len))
+
+/* per-AG reservation tracepoints */
+DEFINE_AG_RESV_EVENT(xfs_ag_resv_init);
+DEFINE_AG_RESV_EVENT(xfs_ag_resv_free);
+DEFINE_AG_RESV_EVENT(xfs_ag_resv_alloc_extent);
+DEFINE_AG_RESV_EVENT(xfs_ag_resv_free_extent);
+DEFINE_AG_RESV_EVENT(xfs_ag_resv_critical);
+DEFINE_AG_RESV_EVENT(xfs_ag_resv_needed);
+
+DEFINE_AG_ERROR_EVENT(xfs_ag_resv_free_error);
+DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 5f3d33d16e67..70f42ea86dfb 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -217,7 +217,7 @@ undo_log:
undo_blocks:
if (blocks > 0) {
- xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
+ xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
tp->t_blk_res = 0;
}
@@ -318,7 +318,6 @@ xfs_trans_mod_sb(
* in-core superblock's counter. This should only
* be applied to the on-disk superblock.
*/
- ASSERT(delta < 0);
tp->t_res_fdblocks_delta += delta;
if (xfs_sb_version_haslazysbcount(&mp->m_sb))
flags &= ~XFS_TRANS_SB_DIRTY;
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index 459ddec137a4..ab438647592a 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -79,7 +79,8 @@ xfs_trans_free_extent(
trace_xfs_bmap_free_deferred(tp->t_mountp, agno, 0, agbno, ext_len);
- error = xfs_free_extent(tp, start_block, ext_len, oinfo);
+ error = xfs_free_extent(tp, start_block, ext_len, oinfo,
+ XFS_AG_RESV_NONE);
/*
* Mark the transaction dirty, even on error. This ensures the
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index ea62245fee26..62900938f26d 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -147,6 +147,7 @@ __xfs_xattr_put_listent(
arraytop = context->count + prefix_len + namelen + 1;
if (arraytop > context->firstu) {
context->count = -1; /* insufficient space */
+ context->seen_enough = 1;
return 0;
}
offset = (char *)context->alist + context->count;
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index d4458b6dbfb4..c4f8fd2fd384 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -800,6 +800,9 @@ static inline int pmd_clear_huge(pmd_t *pmd)
#endif
#endif
+struct file;
+int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t *vma_prot);
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 24563970ff7b..3e42bcdd014b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -454,6 +454,12 @@
*(.spinlock.text) \
VMLINUX_SYMBOL(__lock_text_end) = .;
+#define CPUIDLE_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__cpuidle_text_start) = .; \
+ *(.cpuidle.text) \
+ VMLINUX_SYMBOL(__cpuidle_text_end) = .;
+
#define KPROBES_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__kprobes_text_start) = .; \
diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h
index 85b467b3a207..6cb4e90f81fc 100644
--- a/include/dt-bindings/clock/exynos5410.h
+++ b/include/dt-bindings/clock/exynos5410.h
@@ -19,6 +19,7 @@
#define CLK_FOUT_MPLL 4
#define CLK_FOUT_BPLL 5
#define CLK_FOUT_KPLL 6
+#define CLK_FOUT_EPLL 7
/* gate for special clocks (sclk) */
#define CLK_SCLK_UART0 128
@@ -55,6 +56,8 @@
#define CLK_MMC0 351
#define CLK_MMC1 352
#define CLK_MMC2 353
+#define CLK_PDMA0 362
+#define CLK_PDMA1 363
#define CLK_USBH20 365
#define CLK_USBD300 366
#define CLK_USBD301 367
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 17ab8394bec7..6fd21c291416 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -214,6 +214,9 @@
#define CLK_MOUT_SW_ACLK400 651
#define CLK_MOUT_USER_ACLK300_GSCL 652
#define CLK_MOUT_SW_ACLK300_GSCL 653
+#define CLK_MOUT_MCLK_CDREX 654
+#define CLK_MOUT_BPLL 655
+#define CLK_MOUT_MX_MSPLL_CCORE 656
/* divider clocks */
#define CLK_DOUT_PIXEL 768
@@ -239,8 +242,14 @@
#define CLK_DOUT_ACLK300_DISP1 788
#define CLK_DOUT_ACLK300_GSCL 789
#define CLK_DOUT_ACLK400_DISP1 790
+#define CLK_DOUT_PCLK_CDREX 791
+#define CLK_DOUT_SCLK_CDREX 792
+#define CLK_DOUT_ACLK_CDREX1 793
+#define CLK_DOUT_CCLK_DREX0 794
+#define CLK_DOUT_CLK2X_PHY0 795
+#define CLK_DOUT_PCLK_CORE_MEM 796
/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 791
+#define CLK_NR_CLKS 797
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */
diff --git a/include/dt-bindings/clock/exynos5440.h b/include/dt-bindings/clock/exynos5440.h
index c66fc405a79a..842cdc0adff1 100644
--- a/include/dt-bindings/clock/exynos5440.h
+++ b/include/dt-bindings/clock/exynos5440.h
@@ -14,6 +14,8 @@
#define CLK_XTAL 1
#define CLK_ARM_CLK 2
+#define CLK_CPLLA 3
+#define CLK_CPLLB 4
#define CLK_SPI_BAUD 16
#define CLK_PB0_250 17
#define CLK_PR0_250 18
diff --git a/include/dt-bindings/clock/gxbb-aoclkc.h b/include/dt-bindings/clock/gxbb-aoclkc.h
new file mode 100644
index 000000000000..31751482d13c
--- /dev/null
+++ b/include/dt-bindings/clock/gxbb-aoclkc.h
@@ -0,0 +1,66 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_GXBB_AOCLK
+#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_GXBB_AOCLK
+
+#define CLKID_AO_REMOTE 0
+#define CLKID_AO_I2C_MASTER 1
+#define CLKID_AO_I2C_SLAVE 2
+#define CLKID_AO_UART1 3
+#define CLKID_AO_UART2 4
+#define CLKID_AO_IR_BLASTER 5
+
+#endif
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index f889d80246cb..baade6f429d0 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -6,7 +6,23 @@
#define __GXBB_CLKC_H
#define CLKID_CPUCLK 1
+#define CLKID_HDMI_PLL 2
+#define CLKID_FCLK_DIV2 4
+#define CLKID_FCLK_DIV3 5
+#define CLKID_FCLK_DIV4 6
#define CLKID_CLK81 12
+#define CLKID_MPLL2 15
+#define CLKID_SPI 34
+#define CLKID_I2C 22
#define CLKID_ETH 36
+#define CLKID_USB0 50
+#define CLKID_USB1 51
+#define CLKID_USB 55
+#define CLKID_USB1_DDR_BRIDGE 64
+#define CLKID_USB0_DDR_BRIDGE 65
+#define CLKID_AO_I2C 93
+#define CLKID_SD_EMMC_A 94
+#define CLKID_SD_EMMC_B 95
+#define CLKID_SD_EMMC_C 96
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/imx5-clock.h b/include/dt-bindings/clock/imx5-clock.h
index f4b7478e23c8..d382fc71aa83 100644
--- a/include/dt-bindings/clock/imx5-clock.h
+++ b/include/dt-bindings/clock/imx5-clock.h
@@ -201,6 +201,19 @@
#define IMX5_CLK_STEP_SEL 189
#define IMX5_CLK_CPU_PODF_SEL 190
#define IMX5_CLK_ARM 191
-#define IMX5_CLK_END 192
+#define IMX5_CLK_FIRI_PRED 192
+#define IMX5_CLK_FIRI_SEL 193
+#define IMX5_CLK_FIRI_PODF 194
+#define IMX5_CLK_FIRI_SERIAL_GATE 195
+#define IMX5_CLK_FIRI_IPG_GATE 196
+#define IMX5_CLK_CSI0_MCLK1_PRED 197
+#define IMX5_CLK_CSI0_MCLK1_SEL 198
+#define IMX5_CLK_CSI0_MCLK1_PODF 199
+#define IMX5_CLK_CSI0_MCLK1_GATE 200
+#define IMX5_CLK_IEEE1588_PRED 201
+#define IMX5_CLK_IEEE1588_SEL 202
+#define IMX5_CLK_IEEE1588_PODF 203
+#define IMX5_CLK_IEEE1588_GATE 204
+#define IMX5_CLK_END 205
#endif /* __DT_BINDINGS_CLOCK_IMX5_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 29050337d9d5..da59fd9cdb5e 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -269,6 +269,8 @@
#define IMX6QDL_CLK_PRG0_APB 256
#define IMX6QDL_CLK_PRG1_APB 257
#define IMX6QDL_CLK_PRE_AXI 258
-#define IMX6QDL_CLK_END 259
+#define IMX6QDL_CLK_MLB_SEL 259
+#define IMX6QDL_CLK_MLB_PODF 260
+#define IMX6QDL_CLK_END 261
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/maxim,max77620.h b/include/dt-bindings/clock/maxim,max77620.h
new file mode 100644
index 000000000000..82aba2849681
--- /dev/null
+++ b/include/dt-bindings/clock/maxim,max77620.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Device Tree binding constants clocks for the Maxim 77620 PMIC.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H
+#define _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H
+
+/* Fixed rate clocks. */
+
+#define MAX77620_CLK_32K_OUT0 0
+
+/* Total number of clocks. */
+#define MAX77620_CLKS_NUM (MAX77620_CLK_32K_OUT0 + 1)
+
+#endif /* _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H */
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 595a58d0969a..a55ff8c9b30f 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -22,6 +22,4 @@
#define CLKID_MPEG_SEL 14
#define CLKID_MPEG_DIV 15
-#define CLK_NR_CLKS (CLKID_MPEG_DIV + 1)
-
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h
new file mode 100644
index 000000000000..2062c67e2e51
--- /dev/null
+++ b/include/dt-bindings/clock/mt2701-clk.h
@@ -0,0 +1,486 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT2701_H
+#define _DT_BINDINGS_CLK_MT2701_H
+
+/* TOPCKGEN */
+#define CLK_TOP_SYSPLL 1
+#define CLK_TOP_SYSPLL_D2 2
+#define CLK_TOP_SYSPLL_D3 3
+#define CLK_TOP_SYSPLL_D5 4
+#define CLK_TOP_SYSPLL_D7 5
+#define CLK_TOP_SYSPLL1_D2 6
+#define CLK_TOP_SYSPLL1_D4 7
+#define CLK_TOP_SYSPLL1_D8 8
+#define CLK_TOP_SYSPLL1_D16 9
+#define CLK_TOP_SYSPLL2_D2 10
+#define CLK_TOP_SYSPLL2_D4 11
+#define CLK_TOP_SYSPLL2_D8 12
+#define CLK_TOP_SYSPLL3_D2 13
+#define CLK_TOP_SYSPLL3_D4 14
+#define CLK_TOP_SYSPLL4_D2 15
+#define CLK_TOP_SYSPLL4_D4 16
+#define CLK_TOP_UNIVPLL 17
+#define CLK_TOP_UNIVPLL_D2 18
+#define CLK_TOP_UNIVPLL_D3 19
+#define CLK_TOP_UNIVPLL_D5 20
+#define CLK_TOP_UNIVPLL_D7 21
+#define CLK_TOP_UNIVPLL_D26 22
+#define CLK_TOP_UNIVPLL_D52 23
+#define CLK_TOP_UNIVPLL_D108 24
+#define CLK_TOP_USB_PHY48M 25
+#define CLK_TOP_UNIVPLL1_D2 26
+#define CLK_TOP_UNIVPLL1_D4 27
+#define CLK_TOP_UNIVPLL1_D8 28
+#define CLK_TOP_UNIVPLL2_D2 29
+#define CLK_TOP_UNIVPLL2_D4 30
+#define CLK_TOP_UNIVPLL2_D8 31
+#define CLK_TOP_UNIVPLL2_D16 32
+#define CLK_TOP_UNIVPLL2_D32 33
+#define CLK_TOP_UNIVPLL3_D2 34
+#define CLK_TOP_UNIVPLL3_D4 35
+#define CLK_TOP_UNIVPLL3_D8 36
+#define CLK_TOP_MSDCPLL 37
+#define CLK_TOP_MSDCPLL_D2 38
+#define CLK_TOP_MSDCPLL_D4 39
+#define CLK_TOP_MSDCPLL_D8 40
+#define CLK_TOP_MMPLL 41
+#define CLK_TOP_MMPLL_D2 42
+#define CLK_TOP_DMPLL 43
+#define CLK_TOP_DMPLL_D2 44
+#define CLK_TOP_DMPLL_D4 45
+#define CLK_TOP_DMPLL_X2 46
+#define CLK_TOP_TVDPLL 47
+#define CLK_TOP_TVDPLL_D2 48
+#define CLK_TOP_TVDPLL_D4 49
+#define CLK_TOP_TVD2PLL 50
+#define CLK_TOP_TVD2PLL_D2 51
+#define CLK_TOP_HADDS2PLL_98M 52
+#define CLK_TOP_HADDS2PLL_294M 53
+#define CLK_TOP_HADDS2_FB 54
+#define CLK_TOP_MIPIPLL_D2 55
+#define CLK_TOP_MIPIPLL_D4 56
+#define CLK_TOP_HDMIPLL 57
+#define CLK_TOP_HDMIPLL_D2 58
+#define CLK_TOP_HDMIPLL_D3 59
+#define CLK_TOP_HDMI_SCL_RX 60
+#define CLK_TOP_HDMI_0_PIX340M 61
+#define CLK_TOP_HDMI_0_DEEP340M 62
+#define CLK_TOP_HDMI_0_PLL340M 63
+#define CLK_TOP_AUD1PLL_98M 64
+#define CLK_TOP_AUD2PLL_90M 65
+#define CLK_TOP_AUDPLL 66
+#define CLK_TOP_AUDPLL_D4 67
+#define CLK_TOP_AUDPLL_D8 68
+#define CLK_TOP_AUDPLL_D16 69
+#define CLK_TOP_AUDPLL_D24 70
+#define CLK_TOP_ETHPLL_500M 71
+#define CLK_TOP_VDECPLL 72
+#define CLK_TOP_VENCPLL 73
+#define CLK_TOP_MIPIPLL 74
+#define CLK_TOP_ARMPLL_1P3G 75
+
+#define CLK_TOP_MM_SEL 76
+#define CLK_TOP_DDRPHYCFG_SEL 77
+#define CLK_TOP_MEM_SEL 78
+#define CLK_TOP_AXI_SEL 79
+#define CLK_TOP_CAMTG_SEL 80
+#define CLK_TOP_MFG_SEL 81
+#define CLK_TOP_VDEC_SEL 82
+#define CLK_TOP_PWM_SEL 83
+#define CLK_TOP_MSDC30_0_SEL 84
+#define CLK_TOP_USB20_SEL 85
+#define CLK_TOP_SPI0_SEL 86
+#define CLK_TOP_UART_SEL 87
+#define CLK_TOP_AUDINTBUS_SEL 88
+#define CLK_TOP_AUDIO_SEL 89
+#define CLK_TOP_MSDC30_2_SEL 90
+#define CLK_TOP_MSDC30_1_SEL 91
+#define CLK_TOP_DPI1_SEL 92
+#define CLK_TOP_DPI0_SEL 93
+#define CLK_TOP_SCP_SEL 94
+#define CLK_TOP_PMICSPI_SEL 95
+#define CLK_TOP_APLL_SEL 96
+#define CLK_TOP_HDMI_SEL 97
+#define CLK_TOP_TVE_SEL 98
+#define CLK_TOP_EMMC_HCLK_SEL 99
+#define CLK_TOP_NFI2X_SEL 100
+#define CLK_TOP_RTC_SEL 101
+#define CLK_TOP_OSD_SEL 102
+#define CLK_TOP_NR_SEL 103
+#define CLK_TOP_DI_SEL 104
+#define CLK_TOP_FLASH_SEL 105
+#define CLK_TOP_ASM_M_SEL 106
+#define CLK_TOP_ASM_I_SEL 107
+#define CLK_TOP_INTDIR_SEL 108
+#define CLK_TOP_HDMIRX_BIST_SEL 109
+#define CLK_TOP_ETHIF_SEL 110
+#define CLK_TOP_MS_CARD_SEL 111
+#define CLK_TOP_ASM_H_SEL 112
+#define CLK_TOP_SPI1_SEL 113
+#define CLK_TOP_CMSYS_SEL 114
+#define CLK_TOP_MSDC30_3_SEL 115
+#define CLK_TOP_HDMIRX26_24_SEL 116
+#define CLK_TOP_AUD2DVD_SEL 117
+#define CLK_TOP_8BDAC_SEL 118
+#define CLK_TOP_SPI2_SEL 119
+#define CLK_TOP_AUD_MUX1_SEL 120
+#define CLK_TOP_AUD_MUX2_SEL 121
+#define CLK_TOP_AUDPLL_MUX_SEL 122
+#define CLK_TOP_AUD_K1_SRC_SEL 123
+#define CLK_TOP_AUD_K2_SRC_SEL 124
+#define CLK_TOP_AUD_K3_SRC_SEL 125
+#define CLK_TOP_AUD_K4_SRC_SEL 126
+#define CLK_TOP_AUD_K5_SRC_SEL 127
+#define CLK_TOP_AUD_K6_SRC_SEL 128
+#define CLK_TOP_PADMCLK_SEL 129
+#define CLK_TOP_AUD_EXTCK1_DIV 130
+#define CLK_TOP_AUD_EXTCK2_DIV 131
+#define CLK_TOP_AUD_MUX1_DIV 132
+#define CLK_TOP_AUD_MUX2_DIV 133
+#define CLK_TOP_AUD_K1_SRC_DIV 134
+#define CLK_TOP_AUD_K2_SRC_DIV 135
+#define CLK_TOP_AUD_K3_SRC_DIV 136
+#define CLK_TOP_AUD_K4_SRC_DIV 137
+#define CLK_TOP_AUD_K5_SRC_DIV 138
+#define CLK_TOP_AUD_K6_SRC_DIV 139
+#define CLK_TOP_AUD_I2S1_MCLK 140
+#define CLK_TOP_AUD_I2S2_MCLK 141
+#define CLK_TOP_AUD_I2S3_MCLK 142
+#define CLK_TOP_AUD_I2S4_MCLK 143
+#define CLK_TOP_AUD_I2S5_MCLK 144
+#define CLK_TOP_AUD_I2S6_MCLK 145
+#define CLK_TOP_AUD_48K_TIMING 146
+#define CLK_TOP_AUD_44K_TIMING 147
+
+#define CLK_TOP_32K_INTERNAL 148
+#define CLK_TOP_32K_EXTERNAL 149
+#define CLK_TOP_CLK26M_D8 150
+#define CLK_TOP_8BDAC 151
+#define CLK_TOP_WBG_DIG_416M 152
+#define CLK_TOP_DPI 153
+#define CLK_TOP_HDMITX_CLKDIG_CTS 154
+#define CLK_TOP_DSI0_LNTC_DSI 155
+#define CLK_TOP_AUD_EXT1 156
+#define CLK_TOP_AUD_EXT2 157
+#define CLK_TOP_NFI1X_PAD 158
+#define CLK_TOP_NR 159
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL 1
+#define CLK_APMIXED_MAINPLL 2
+#define CLK_APMIXED_UNIVPLL 3
+#define CLK_APMIXED_MMPLL 4
+#define CLK_APMIXED_MSDCPLL 5
+#define CLK_APMIXED_TVDPLL 6
+#define CLK_APMIXED_AUD1PLL 7
+#define CLK_APMIXED_TRGPLL 8
+#define CLK_APMIXED_ETHPLL 9
+#define CLK_APMIXED_VDECPLL 10
+#define CLK_APMIXED_HADDS2PLL 11
+#define CLK_APMIXED_AUD2PLL 12
+#define CLK_APMIXED_TVD2PLL 13
+#define CLK_APMIXED_NR 14
+
+/* DDRPHY */
+
+#define CLK_DDRPHY_VENCPLL 1
+#define CLK_DDRPHY_NR 2
+
+/* INFRACFG */
+
+#define CLK_INFRA_DBG 1
+#define CLK_INFRA_SMI 2
+#define CLK_INFRA_QAXI_CM4 3
+#define CLK_INFRA_AUD_SPLIN_B 4
+#define CLK_INFRA_AUDIO 5
+#define CLK_INFRA_EFUSE 6
+#define CLK_INFRA_L2C_SRAM 7
+#define CLK_INFRA_M4U 8
+#define CLK_INFRA_CONNMCU 9
+#define CLK_INFRA_TRNG 10
+#define CLK_INFRA_RAMBUFIF 11
+#define CLK_INFRA_CPUM 12
+#define CLK_INFRA_KP 13
+#define CLK_INFRA_CEC 14
+#define CLK_INFRA_IRRX 15
+#define CLK_INFRA_PMICSPI 16
+#define CLK_INFRA_PMICWRAP 17
+#define CLK_INFRA_DDCCI 18
+#define CLK_INFRA_CLK_13M 19
+#define CLK_INFRA_NR 20
+
+/* PERICFG */
+
+#define CLK_PERI_NFI 1
+#define CLK_PERI_THERM 2
+#define CLK_PERI_PWM1 3
+#define CLK_PERI_PWM2 4
+#define CLK_PERI_PWM3 5
+#define CLK_PERI_PWM4 6
+#define CLK_PERI_PWM5 7
+#define CLK_PERI_PWM6 8
+#define CLK_PERI_PWM7 9
+#define CLK_PERI_PWM 10
+#define CLK_PERI_USB0 11
+#define CLK_PERI_USB1 12
+#define CLK_PERI_AP_DMA 13
+#define CLK_PERI_MSDC30_0 14
+#define CLK_PERI_MSDC30_1 15
+#define CLK_PERI_MSDC30_2 16
+#define CLK_PERI_MSDC30_3 17
+#define CLK_PERI_MSDC50_3 18
+#define CLK_PERI_NLI 19
+#define CLK_PERI_UART0 20
+#define CLK_PERI_UART1 21
+#define CLK_PERI_UART2 22
+#define CLK_PERI_UART3 23
+#define CLK_PERI_BTIF 24
+#define CLK_PERI_I2C0 25
+#define CLK_PERI_I2C1 26
+#define CLK_PERI_I2C2 27
+#define CLK_PERI_I2C3 28
+#define CLK_PERI_AUXADC 29
+#define CLK_PERI_SPI0 30
+#define CLK_PERI_ETH 31
+#define CLK_PERI_USB0_MCU 32
+
+#define CLK_PERI_USB1_MCU 33
+#define CLK_PERI_USB_SLV 34
+#define CLK_PERI_GCPU 35
+#define CLK_PERI_NFI_ECC 36
+#define CLK_PERI_NFI_PAD 37
+#define CLK_PERI_FLASH 38
+#define CLK_PERI_HOST89_INT 39
+#define CLK_PERI_HOST89_SPI 40
+#define CLK_PERI_HOST89_DVD 41
+#define CLK_PERI_SPI1 42
+#define CLK_PERI_SPI2 43
+#define CLK_PERI_FCI 44
+
+#define CLK_PERI_UART0_SEL 45
+#define CLK_PERI_UART1_SEL 46
+#define CLK_PERI_UART2_SEL 47
+#define CLK_PERI_UART3_SEL 48
+#define CLK_PERI_NR 49
+
+/* AUDIO */
+
+#define CLK_AUD_AFE 1
+#define CLK_AUD_LRCK_DETECT 2
+#define CLK_AUD_I2S 3
+#define CLK_AUD_APLL_TUNER 4
+#define CLK_AUD_HDMI 5
+#define CLK_AUD_SPDF 6
+#define CLK_AUD_SPDF2 7
+#define CLK_AUD_APLL 8
+#define CLK_AUD_TML 9
+#define CLK_AUD_AHB_IDLE_EXT 10
+#define CLK_AUD_AHB_IDLE_INT 11
+
+#define CLK_AUD_I2SIN1 12
+#define CLK_AUD_I2SIN2 13
+#define CLK_AUD_I2SIN3 14
+#define CLK_AUD_I2SIN4 15
+#define CLK_AUD_I2SIN5 16
+#define CLK_AUD_I2SIN6 17
+#define CLK_AUD_I2SO1 18
+#define CLK_AUD_I2SO2 19
+#define CLK_AUD_I2SO3 20
+#define CLK_AUD_I2SO4 21
+#define CLK_AUD_I2SO5 22
+#define CLK_AUD_I2SO6 23
+#define CLK_AUD_ASRCI1 24
+#define CLK_AUD_ASRCI2 25
+#define CLK_AUD_ASRCO1 26
+#define CLK_AUD_ASRCO2 27
+#define CLK_AUD_ASRC11 28
+#define CLK_AUD_ASRC12 29
+#define CLK_AUD_HDMIRX 30
+#define CLK_AUD_INTDIR 31
+#define CLK_AUD_A1SYS 32
+#define CLK_AUD_A2SYS 33
+#define CLK_AUD_AFE_CONN 34
+#define CLK_AUD_AFE_PCMIF 35
+#define CLK_AUD_AFE_MRGIF 36
+
+#define CLK_AUD_MMIF_UL1 37
+#define CLK_AUD_MMIF_UL2 38
+#define CLK_AUD_MMIF_UL3 39
+#define CLK_AUD_MMIF_UL4 40
+#define CLK_AUD_MMIF_UL5 41
+#define CLK_AUD_MMIF_UL6 42
+#define CLK_AUD_MMIF_DL1 43
+#define CLK_AUD_MMIF_DL2 44
+#define CLK_AUD_MMIF_DL3 45
+#define CLK_AUD_MMIF_DL4 46
+#define CLK_AUD_MMIF_DL5 47
+#define CLK_AUD_MMIF_DL6 48
+#define CLK_AUD_MMIF_DLMCH 49
+#define CLK_AUD_MMIF_ARB1 50
+#define CLK_AUD_MMIF_AWB1 51
+#define CLK_AUD_MMIF_AWB2 52
+#define CLK_AUD_MMIF_DAI 53
+
+#define CLK_AUD_DMIC1 54
+#define CLK_AUD_DMIC2 55
+#define CLK_AUD_ASRCI3 56
+#define CLK_AUD_ASRCI4 57
+#define CLK_AUD_ASRCI5 58
+#define CLK_AUD_ASRCI6 59
+#define CLK_AUD_ASRCO3 60
+#define CLK_AUD_ASRCO4 61
+#define CLK_AUD_ASRCO5 62
+#define CLK_AUD_ASRCO6 63
+#define CLK_AUD_MEM_ASRC1 64
+#define CLK_AUD_MEM_ASRC2 65
+#define CLK_AUD_MEM_ASRC3 66
+#define CLK_AUD_MEM_ASRC4 67
+#define CLK_AUD_MEM_ASRC5 68
+#define CLK_AUD_DSD_ENC 69
+#define CLK_AUD_ASRC_BRG 70
+#define CLK_AUD_NR 71
+
+/* MMSYS */
+
+#define CLK_MM_SMI_COMMON 1
+#define CLK_MM_SMI_LARB0 2
+#define CLK_MM_CMDQ 3
+#define CLK_MM_MUTEX 4
+#define CLK_MM_DISP_COLOR 5
+#define CLK_MM_DISP_BLS 6
+#define CLK_MM_DISP_WDMA 7
+#define CLK_MM_DISP_RDMA 8
+#define CLK_MM_DISP_OVL 9
+#define CLK_MM_MDP_TDSHP 10
+#define CLK_MM_MDP_WROT 11
+#define CLK_MM_MDP_WDMA 12
+#define CLK_MM_MDP_RSZ1 13
+#define CLK_MM_MDP_RSZ0 14
+#define CLK_MM_MDP_RDMA 15
+#define CLK_MM_MDP_BLS_26M 16
+#define CLK_MM_CAM_MDP 17
+#define CLK_MM_FAKE_ENG 18
+#define CLK_MM_MUTEX_32K 19
+#define CLK_MM_DISP_RDMA1 20
+#define CLK_MM_DISP_UFOE 21
+
+#define CLK_MM_DSI_ENGINE 22
+#define CLK_MM_DSI_DIG 23
+#define CLK_MM_DPI_DIGL 24
+#define CLK_MM_DPI_ENGINE 25
+#define CLK_MM_DPI1_DIGL 26
+#define CLK_MM_DPI1_ENGINE 27
+#define CLK_MM_TVE_OUTPUT 28
+#define CLK_MM_TVE_INPUT 29
+#define CLK_MM_HDMI_PIXEL 30
+#define CLK_MM_HDMI_PLL 31
+#define CLK_MM_HDMI_AUDIO 32
+#define CLK_MM_HDMI_SPDIF 33
+#define CLK_MM_TVE_FMM 34
+#define CLK_MM_NR 35
+
+/* IMGSYS */
+
+#define CLK_IMG_SMI_COMM 1
+#define CLK_IMG_RESZ 2
+#define CLK_IMG_JPGDEC_SMI 3
+#define CLK_IMG_JPGDEC 4
+#define CLK_IMG_VENC_LT 5
+#define CLK_IMG_VENC 6
+#define CLK_IMG_NR 7
+
+/* VDEC */
+
+#define CLK_VDEC_CKGEN 1
+#define CLK_VDEC_LARB 2
+#define CLK_VDEC_NR 3
+
+/* HIFSYS */
+
+#define CLK_HIFSYS_USB0PHY 1
+#define CLK_HIFSYS_USB1PHY 2
+#define CLK_HIFSYS_PCIE0 3
+#define CLK_HIFSYS_PCIE1 4
+#define CLK_HIFSYS_PCIE2 5
+#define CLK_HIFSYS_NR 6
+
+/* ETHSYS */
+#define CLK_ETHSYS_HSDMA 1
+#define CLK_ETHSYS_ESW 2
+#define CLK_ETHSYS_GP2 3
+#define CLK_ETHSYS_GP1 4
+#define CLK_ETHSYS_PCM 5
+#define CLK_ETHSYS_GDMA 6
+#define CLK_ETHSYS_I2S 7
+#define CLK_ETHSYS_CRYPTO 8
+#define CLK_ETHSYS_NR 9
+
+/* BDP */
+
+#define CLK_BDP_BRG_BA 1
+#define CLK_BDP_BRG_DRAM 2
+#define CLK_BDP_LARB_DRAM 3
+#define CLK_BDP_WR_VDI_PXL 4
+#define CLK_BDP_WR_VDI_DRAM 5
+#define CLK_BDP_WR_B 6
+#define CLK_BDP_DGI_IN 7
+#define CLK_BDP_DGI_OUT 8
+#define CLK_BDP_FMT_MAST_27 9
+#define CLK_BDP_FMT_B 10
+#define CLK_BDP_OSD_B 11
+#define CLK_BDP_OSD_DRAM 12
+#define CLK_BDP_OSD_AGENT 13
+#define CLK_BDP_OSD_PXL 14
+#define CLK_BDP_RLE_B 15
+#define CLK_BDP_RLE_AGENT 16
+#define CLK_BDP_RLE_DRAM 17
+#define CLK_BDP_F27M 18
+#define CLK_BDP_F27M_VDOUT 19
+#define CLK_BDP_F27_74_74 20
+#define CLK_BDP_F2FS 21
+#define CLK_BDP_F2FS74_148 22
+#define CLK_BDP_FB 23
+#define CLK_BDP_VDO_DRAM 24
+#define CLK_BDP_VDO_2FS 25
+#define CLK_BDP_VDO_B 26
+#define CLK_BDP_WR_DI_PXL 27
+#define CLK_BDP_WR_DI_DRAM 28
+#define CLK_BDP_WR_DI_B 29
+#define CLK_BDP_NR_PXL 30
+#define CLK_BDP_NR_DRAM 31
+#define CLK_BDP_NR_B 32
+
+#define CLK_BDP_RX_F 33
+#define CLK_BDP_RX_X 34
+#define CLK_BDP_RXPDT 35
+#define CLK_BDP_RX_CSCL_N 36
+#define CLK_BDP_RX_CSCL 37
+#define CLK_BDP_RX_DDCSCL_N 38
+#define CLK_BDP_RX_DDCSCL 39
+#define CLK_BDP_RX_VCO 40
+#define CLK_BDP_RX_DP 41
+#define CLK_BDP_RX_P 42
+#define CLK_BDP_RX_M 43
+#define CLK_BDP_RX_PLL 44
+#define CLK_BDP_BRG_RT_B 45
+#define CLK_BDP_BRG_RT_DRAM 46
+#define CLK_BDP_LARBRT_DRAM 47
+#define CLK_BDP_TMDS_SYN 48
+#define CLK_BDP_HDMI_MON 49
+#define CLK_BDP_NR 50
+
+#endif /* _DT_BINDINGS_CLK_MT2701_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9615.h b/include/dt-bindings/clock/qcom,gcc-mdm9615.h
new file mode 100644
index 000000000000..9ab2c4087120
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-mdm9615.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MDM_GCC_9615_H
+#define _DT_BINDINGS_CLK_MDM_GCC_9615_H
+
+#define AFAB_CLK_SRC 0
+#define AFAB_CORE_CLK 1
+#define SFAB_MSS_Q6_SW_A_CLK 2
+#define SFAB_MSS_Q6_FW_A_CLK 3
+#define QDSS_STM_CLK 4
+#define SCSS_A_CLK 5
+#define SCSS_H_CLK 6
+#define SCSS_XO_SRC_CLK 7
+#define AFAB_EBI1_CH0_A_CLK 8
+#define AFAB_EBI1_CH1_A_CLK 9
+#define AFAB_AXI_S0_FCLK 10
+#define AFAB_AXI_S1_FCLK 11
+#define AFAB_AXI_S2_FCLK 12
+#define AFAB_AXI_S3_FCLK 13
+#define AFAB_AXI_S4_FCLK 14
+#define SFAB_CORE_CLK 15
+#define SFAB_AXI_S0_FCLK 16
+#define SFAB_AXI_S1_FCLK 17
+#define SFAB_AXI_S2_FCLK 18
+#define SFAB_AXI_S3_FCLK 19
+#define SFAB_AXI_S4_FCLK 20
+#define SFAB_AHB_S0_FCLK 21
+#define SFAB_AHB_S1_FCLK 22
+#define SFAB_AHB_S2_FCLK 23
+#define SFAB_AHB_S3_FCLK 24
+#define SFAB_AHB_S4_FCLK 25
+#define SFAB_AHB_S5_FCLK 26
+#define SFAB_AHB_S6_FCLK 27
+#define SFAB_AHB_S7_FCLK 28
+#define QDSS_AT_CLK_SRC 29
+#define QDSS_AT_CLK 30
+#define QDSS_TRACECLKIN_CLK_SRC 31
+#define QDSS_TRACECLKIN_CLK 32
+#define QDSS_TSCTR_CLK_SRC 33
+#define QDSS_TSCTR_CLK 34
+#define SFAB_ADM0_M0_A_CLK 35
+#define SFAB_ADM0_M1_A_CLK 36
+#define SFAB_ADM0_M2_H_CLK 37
+#define ADM0_CLK 38
+#define ADM0_PBUS_CLK 39
+#define MSS_XPU_CLK 40
+#define IMEM0_A_CLK 41
+#define QDSS_H_CLK 42
+#define PCIE_A_CLK 43
+#define PCIE_AUX_CLK 44
+#define PCIE_PHY_REF_CLK 45
+#define PCIE_H_CLK 46
+#define SFAB_CLK_SRC 47
+#define MAHB0_CLK 48
+#define Q6SW_CLK_SRC 49
+#define Q6SW_CLK 50
+#define Q6FW_CLK_SRC 51
+#define Q6FW_CLK 52
+#define SFAB_MSS_M_A_CLK 53
+#define SFAB_USB3_M_A_CLK 54
+#define SFAB_LPASS_Q6_A_CLK 55
+#define SFAB_AFAB_M_A_CLK 56
+#define AFAB_SFAB_M0_A_CLK 57
+#define AFAB_SFAB_M1_A_CLK 58
+#define SFAB_SATA_S_H_CLK 59
+#define DFAB_CLK_SRC 60
+#define DFAB_CLK 61
+#define SFAB_DFAB_M_A_CLK 62
+#define DFAB_SFAB_M_A_CLK 63
+#define DFAB_SWAY0_H_CLK 64
+#define DFAB_SWAY1_H_CLK 65
+#define DFAB_ARB0_H_CLK 66
+#define DFAB_ARB1_H_CLK 67
+#define PPSS_H_CLK 68
+#define PPSS_PROC_CLK 69
+#define PPSS_TIMER0_CLK 70
+#define PPSS_TIMER1_CLK 71
+#define PMEM_A_CLK 72
+#define DMA_BAM_H_CLK 73
+#define SIC_H_CLK 74
+#define SPS_TIC_H_CLK 75
+#define SLIMBUS_H_CLK 76
+#define SLIMBUS_XO_SRC_CLK 77
+#define CFPB_2X_CLK_SRC 78
+#define CFPB_CLK 79
+#define CFPB0_H_CLK 80
+#define CFPB1_H_CLK 81
+#define CFPB2_H_CLK 82
+#define SFAB_CFPB_M_H_CLK 83
+#define CFPB_MASTER_H_CLK 84
+#define SFAB_CFPB_S_H_CLK 85
+#define CFPB_SPLITTER_H_CLK 86
+#define TSIF_H_CLK 87
+#define TSIF_INACTIVITY_TIMERS_CLK 88
+#define TSIF_REF_SRC 89
+#define TSIF_REF_CLK 90
+#define CE1_H_CLK 91
+#define CE1_CORE_CLK 92
+#define CE1_SLEEP_CLK 93
+#define CE2_H_CLK 94
+#define CE2_CORE_CLK 95
+#define SFPB_H_CLK_SRC 97
+#define SFPB_H_CLK 98
+#define SFAB_SFPB_M_H_CLK 99
+#define SFAB_SFPB_S_H_CLK 100
+#define RPM_PROC_CLK 101
+#define RPM_BUS_H_CLK 102
+#define RPM_SLEEP_CLK 103
+#define RPM_TIMER_CLK 104
+#define RPM_MSG_RAM_H_CLK 105
+#define PMIC_ARB0_H_CLK 106
+#define PMIC_ARB1_H_CLK 107
+#define PMIC_SSBI2_SRC 108
+#define PMIC_SSBI2_CLK 109
+#define SDC1_H_CLK 110
+#define SDC2_H_CLK 111
+#define SDC3_H_CLK 112
+#define SDC4_H_CLK 113
+#define SDC5_H_CLK 114
+#define SDC1_SRC 115
+#define SDC2_SRC 116
+#define SDC3_SRC 117
+#define SDC4_SRC 118
+#define SDC5_SRC 119
+#define SDC1_CLK 120
+#define SDC2_CLK 121
+#define SDC3_CLK 122
+#define SDC4_CLK 123
+#define SDC5_CLK 124
+#define DFAB_A2_H_CLK 125
+#define USB_HS1_H_CLK 126
+#define USB_HS1_XCVR_SRC 127
+#define USB_HS1_XCVR_CLK 128
+#define USB_HSIC_H_CLK 129
+#define USB_HSIC_XCVR_FS_SRC 130
+#define USB_HSIC_XCVR_FS_CLK 131
+#define USB_HSIC_SYSTEM_CLK_SRC 132
+#define USB_HSIC_SYSTEM_CLK 133
+#define CFPB0_C0_H_CLK 134
+#define CFPB0_C1_H_CLK 135
+#define CFPB0_D0_H_CLK 136
+#define CFPB0_D1_H_CLK 137
+#define USB_FS1_H_CLK 138
+#define USB_FS1_XCVR_FS_SRC 139
+#define USB_FS1_XCVR_FS_CLK 140
+#define USB_FS1_SYSTEM_CLK 141
+#define USB_FS2_H_CLK 142
+#define USB_FS2_XCVR_FS_SRC 143
+#define USB_FS2_XCVR_FS_CLK 144
+#define USB_FS2_SYSTEM_CLK 145
+#define GSBI_COMMON_SIM_SRC 146
+#define GSBI1_H_CLK 147
+#define GSBI2_H_CLK 148
+#define GSBI3_H_CLK 149
+#define GSBI4_H_CLK 150
+#define GSBI5_H_CLK 151
+#define GSBI6_H_CLK 152
+#define GSBI7_H_CLK 153
+#define GSBI8_H_CLK 154
+#define GSBI9_H_CLK 155
+#define GSBI10_H_CLK 156
+#define GSBI11_H_CLK 157
+#define GSBI12_H_CLK 158
+#define GSBI1_UART_SRC 159
+#define GSBI1_UART_CLK 160
+#define GSBI2_UART_SRC 161
+#define GSBI2_UART_CLK 162
+#define GSBI3_UART_SRC 163
+#define GSBI3_UART_CLK 164
+#define GSBI4_UART_SRC 165
+#define GSBI4_UART_CLK 166
+#define GSBI5_UART_SRC 167
+#define GSBI5_UART_CLK 168
+#define GSBI6_UART_SRC 169
+#define GSBI6_UART_CLK 170
+#define GSBI7_UART_SRC 171
+#define GSBI7_UART_CLK 172
+#define GSBI8_UART_SRC 173
+#define GSBI8_UART_CLK 174
+#define GSBI9_UART_SRC 175
+#define GSBI9_UART_CLK 176
+#define GSBI10_UART_SRC 177
+#define GSBI10_UART_CLK 178
+#define GSBI11_UART_SRC 179
+#define GSBI11_UART_CLK 180
+#define GSBI12_UART_SRC 181
+#define GSBI12_UART_CLK 182
+#define GSBI1_QUP_SRC 183
+#define GSBI1_QUP_CLK 184
+#define GSBI2_QUP_SRC 185
+#define GSBI2_QUP_CLK 186
+#define GSBI3_QUP_SRC 187
+#define GSBI3_QUP_CLK 188
+#define GSBI4_QUP_SRC 189
+#define GSBI4_QUP_CLK 190
+#define GSBI5_QUP_SRC 191
+#define GSBI5_QUP_CLK 192
+#define GSBI6_QUP_SRC 193
+#define GSBI6_QUP_CLK 194
+#define GSBI7_QUP_SRC 195
+#define GSBI7_QUP_CLK 196
+#define GSBI8_QUP_SRC 197
+#define GSBI8_QUP_CLK 198
+#define GSBI9_QUP_SRC 199
+#define GSBI9_QUP_CLK 200
+#define GSBI10_QUP_SRC 201
+#define GSBI10_QUP_CLK 202
+#define GSBI11_QUP_SRC 203
+#define GSBI11_QUP_CLK 204
+#define GSBI12_QUP_SRC 205
+#define GSBI12_QUP_CLK 206
+#define GSBI1_SIM_CLK 207
+#define GSBI2_SIM_CLK 208
+#define GSBI3_SIM_CLK 209
+#define GSBI4_SIM_CLK 210
+#define GSBI5_SIM_CLK 211
+#define GSBI6_SIM_CLK 212
+#define GSBI7_SIM_CLK 213
+#define GSBI8_SIM_CLK 214
+#define GSBI9_SIM_CLK 215
+#define GSBI10_SIM_CLK 216
+#define GSBI11_SIM_CLK 217
+#define GSBI12_SIM_CLK 218
+#define USB_HSIC_HSIC_CLK_SRC 219
+#define USB_HSIC_HSIC_CLK 220
+#define USB_HSIC_HSIO_CAL_CLK 221
+#define SPDM_CFG_H_CLK 222
+#define SPDM_MSTR_H_CLK 223
+#define SPDM_FF_CLK_SRC 224
+#define SPDM_FF_CLK 225
+#define SEC_CTRL_CLK 226
+#define SEC_CTRL_ACC_CLK_SRC 227
+#define SEC_CTRL_ACC_CLK 228
+#define TLMM_H_CLK 229
+#define TLMM_CLK 230
+#define SFAB_MSS_S_H_CLK 231
+#define MSS_SLP_CLK 232
+#define MSS_Q6SW_JTAG_CLK 233
+#define MSS_Q6FW_JTAG_CLK 234
+#define MSS_S_H_CLK 235
+#define MSS_CXO_SRC_CLK 236
+#define SATA_H_CLK 237
+#define SATA_CLK_SRC 238
+#define SATA_RXOOB_CLK 239
+#define SATA_PMALIVE_CLK 240
+#define SATA_PHY_REF_CLK 241
+#define TSSC_CLK_SRC 242
+#define TSSC_CLK 243
+#define PDM_SRC 244
+#define PDM_CLK 245
+#define GP0_SRC 246
+#define GP0_CLK 247
+#define GP1_SRC 248
+#define GP1_CLK 249
+#define GP2_SRC 250
+#define GP2_CLK 251
+#define MPM_CLK 252
+#define EBI1_CLK_SRC 253
+#define EBI1_CH0_CLK 254
+#define EBI1_CH1_CLK 255
+#define EBI1_2X_CLK 256
+#define EBI1_CH0_DQ_CLK 257
+#define EBI1_CH1_DQ_CLK 258
+#define EBI1_CH0_CA_CLK 259
+#define EBI1_CH1_CA_CLK 260
+#define EBI1_XO_CLK 261
+#define SFAB_SMPSS_S_H_CLK 262
+#define PRNG_SRC 263
+#define PRNG_CLK 264
+#define PXO_SRC 265
+#define LPASS_CXO_CLK 266
+#define LPASS_PXO_CLK 267
+#define SPDM_CY_PORT0_CLK 268
+#define SPDM_CY_PORT1_CLK 269
+#define SPDM_CY_PORT2_CLK 270
+#define SPDM_CY_PORT3_CLK 271
+#define SPDM_CY_PORT4_CLK 272
+#define SPDM_CY_PORT5_CLK 273
+#define SPDM_CY_PORT6_CLK 274
+#define SPDM_CY_PORT7_CLK 275
+#define PLL0 276
+#define PLL0_VOTE 277
+#define PLL3 278
+#define PLL3_VOTE 279
+#define PLL4_VOTE 280
+#define PLL5 281
+#define PLL5_VOTE 282
+#define PLL6 283
+#define PLL6_VOTE 284
+#define PLL7_VOTE 285
+#define PLL8 286
+#define PLL8_VOTE 287
+#define PLL9 288
+#define PLL10 289
+#define PLL11 290
+#define PLL12 291
+#define PLL13 292
+#define PLL14 293
+#define PLL14_VOTE 294
+#define USB_HS3_H_CLK 295
+#define USB_HS3_XCVR_SRC 296
+#define USB_HS3_XCVR_CLK 297
+#define USB_HS4_H_CLK 298
+#define USB_HS4_XCVR_SRC 299
+#define USB_HS4_XCVR_CLK 300
+#define SATA_PHY_CFG_CLK 301
+#define SATA_A_CLK 302
+#define CE3_SRC 303
+#define CE3_CORE_CLK 304
+#define CE3_H_CLK 305
+#define USB_HS1_SYSTEM_CLK_SRC 306
+#define USB_HS1_SYSTEM_CLK 307
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h
index 6f814db11c7e..1828723eb621 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8996.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h
@@ -335,6 +335,11 @@
#define GCC_MSMPU_BCR 98
#define GCC_MSS_Q6_BCR 99
#define GCC_QREFS_VBG_CAL_BCR 100
+#define GCC_PCIE_PHY_COM_BCR 101
+#define GCC_PCIE_PHY_COM_NOCSR_BCR 102
+#define GCC_USB3_PHY_BCR 103
+#define GCC_USB3PHY_PHY_BCR 104
+
/* Indexes for GDSCs */
#define AGGRE0_NOC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,lcc-mdm9615.h b/include/dt-bindings/clock/qcom,lcc-mdm9615.h
new file mode 100644
index 000000000000..cac963a2fdcb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lcc-mdm9615.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_MDM9615_H
+#define _DT_BINDINGS_CLK_LCC_MDM9615_H
+
+#define PLL4 0
+#define MI2S_OSR_SRC 1
+#define MI2S_OSR_CLK 2
+#define MI2S_DIV_CLK 3
+#define MI2S_BIT_DIV_CLK 4
+#define MI2S_BIT_CLK 5
+#define PCM_SRC 6
+#define PCM_CLK_OUT 7
+#define PCM_CLK 8
+#define SLIMBUS_SRC 9
+#define AUDIO_SLIMBUS_CLK 10
+#define SPS_SLIMBUS_CLK 11
+#define CODEC_I2S_MIC_OSR_SRC 12
+#define CODEC_I2S_MIC_OSR_CLK 13
+#define CODEC_I2S_MIC_DIV_CLK 14
+#define CODEC_I2S_MIC_BIT_DIV_CLK 15
+#define CODEC_I2S_MIC_BIT_CLK 16
+#define SPARE_I2S_MIC_OSR_SRC 17
+#define SPARE_I2S_MIC_OSR_CLK 18
+#define SPARE_I2S_MIC_DIV_CLK 19
+#define SPARE_I2S_MIC_BIT_DIV_CLK 20
+#define SPARE_I2S_MIC_BIT_CLK 21
+#define CODEC_I2S_SPKR_OSR_SRC 22
+#define CODEC_I2S_SPKR_OSR_CLK 23
+#define CODEC_I2S_SPKR_DIV_CLK 24
+#define CODEC_I2S_SPKR_BIT_DIV_CLK 25
+#define CODEC_I2S_SPKR_BIT_CLK 26
+#define SPARE_I2S_SPKR_OSR_SRC 27
+#define SPARE_I2S_SPKR_OSR_CLK 28
+#define SPARE_I2S_SPKR_DIV_CLK 29
+#define SPARE_I2S_SPKR_BIT_DIV_CLK 30
+#define SPARE_I2S_SPKR_BIT_CLK 31
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8996.h b/include/dt-bindings/clock/qcom,mmcc-msm8996.h
index 7d3a7fa1a1bd..5abc445ad815 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8996.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8996.h
@@ -298,5 +298,6 @@
#define FD_GDSC 12
#define MDSS_GDSC 13
#define GPU_GX_GDSC 14
+#define MMAGIC_BIMC_GDSC 15
#endif
diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h
index 5128f4d94f44..3cd813896d08 100644
--- a/include/dt-bindings/clock/r7s72100-clock.h
+++ b/include/dt-bindings/clock/r7s72100-clock.h
@@ -25,6 +25,9 @@
#define R7S72100_CLK_SCIF6 1
#define R7S72100_CLK_SCIF7 0
+/* MSTP7 */
+#define R7S72100_CLK_ETHER 4
+
/* MSTP9 */
#define R7S72100_CLK_I2C0 7
#define R7S72100_CLK_I2C1 6
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index a3491ba2f6ec..9d02f5317c7c 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -67,6 +67,7 @@
#define R8A7794_CLK_IRQC 7
/* MSTP5 */
+#define R8A7794_CLK_AUDIO_DMAC0 2
#define R8A7794_CLK_PWM 23
/* MSTP7 */
@@ -107,6 +108,30 @@
#define R8A7794_CLK_I2C1 30
#define R8A7794_CLK_I2C0 31
+/* MSTP10 */
+#define R8A7794_CLK_SSI_ALL 5
+#define R8A7794_CLK_SSI9 6
+#define R8A7794_CLK_SSI8 7
+#define R8A7794_CLK_SSI7 8
+#define R8A7794_CLK_SSI6 9
+#define R8A7794_CLK_SSI5 10
+#define R8A7794_CLK_SSI4 11
+#define R8A7794_CLK_SSI3 12
+#define R8A7794_CLK_SSI2 13
+#define R8A7794_CLK_SSI1 14
+#define R8A7794_CLK_SSI0 15
+#define R8A7794_CLK_SCU_ALL 17
+#define R8A7794_CLK_SCU_DVC1 18
+#define R8A7794_CLK_SCU_DVC0 19
+#define R8A7794_CLK_SCU_CTU1_MIX1 20
+#define R8A7794_CLK_SCU_CTU0_MIX0 21
+#define R8A7794_CLK_SCU_SRC6 25
+#define R8A7794_CLK_SCU_SRC5 26
+#define R8A7794_CLK_SCU_SRC4 27
+#define R8A7794_CLK_SCU_SRC3 28
+#define R8A7794_CLK_SCU_SRC2 29
+#define R8A7794_CLK_SCU_SRC1 30
+
/* MSTP11 */
#define R8A7794_CLK_SCIFA3 6
#define R8A7794_CLK_SCIFA4 7
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
index 50a44cffb070..220a60f20d3b 100644
--- a/include/dt-bindings/clock/rk3399-cru.h
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -131,12 +131,15 @@
#define SCLK_DPHY_RX0_CFG 165
#define SCLK_RMII_SRC 166
#define SCLK_PCIEPHY_REF100M 167
+#define SCLK_DDRC 168
#define DCLK_VOP0 180
#define DCLK_VOP1 181
#define DCLK_VOP0_DIV 182
#define DCLK_VOP1_DIV 183
#define DCLK_M0_PERILP 184
+#define DCLK_VOP0_FRAC 185
+#define DCLK_VOP1_FRAC 186
#define FCLK_CM0S 190
diff --git a/include/dt-bindings/clock/sun6i-a31-ccu.h b/include/dt-bindings/clock/sun6i-a31-ccu.h
new file mode 100644
index 000000000000..4482530fb6f5
--- /dev/null
+++ b/include/dt-bindings/clock/sun6i-a31-ccu.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN6I_A31_H_
+#define _DT_BINDINGS_CLK_SUN6I_A31_H_
+
+#define CLK_PLL_PERIPH 10
+
+#define CLK_CPU 18
+
+#define CLK_AHB1_MIPIDSI 23
+#define CLK_AHB1_SS 24
+#define CLK_AHB1_DMA 25
+#define CLK_AHB1_MMC0 26
+#define CLK_AHB1_MMC1 27
+#define CLK_AHB1_MMC2 28
+#define CLK_AHB1_MMC3 29
+#define CLK_AHB1_NAND1 30
+#define CLK_AHB1_NAND0 31
+#define CLK_AHB1_SDRAM 32
+#define CLK_AHB1_EMAC 33
+#define CLK_AHB1_TS 34
+#define CLK_AHB1_HSTIMER 35
+#define CLK_AHB1_SPI0 36
+#define CLK_AHB1_SPI1 37
+#define CLK_AHB1_SPI2 38
+#define CLK_AHB1_SPI3 39
+#define CLK_AHB1_OTG 40
+#define CLK_AHB1_EHCI0 41
+#define CLK_AHB1_EHCI1 42
+#define CLK_AHB1_OHCI0 43
+#define CLK_AHB1_OHCI1 44
+#define CLK_AHB1_OHCI2 45
+#define CLK_AHB1_VE 46
+#define CLK_AHB1_LCD0 47
+#define CLK_AHB1_LCD1 48
+#define CLK_AHB1_CSI 49
+#define CLK_AHB1_HDMI 50
+#define CLK_AHB1_BE0 51
+#define CLK_AHB1_BE1 52
+#define CLK_AHB1_FE0 53
+#define CLK_AHB1_FE1 54
+#define CLK_AHB1_MP 55
+#define CLK_AHB1_GPU 56
+#define CLK_AHB1_DEU0 57
+#define CLK_AHB1_DEU1 58
+#define CLK_AHB1_DRC0 59
+#define CLK_AHB1_DRC1 60
+
+#define CLK_APB1_CODEC 61
+#define CLK_APB1_SPDIF 62
+#define CLK_APB1_DIGITAL_MIC 63
+#define CLK_APB1_PIO 64
+#define CLK_APB1_DAUDIO0 65
+#define CLK_APB1_DAUDIO1 66
+
+#define CLK_APB2_I2C0 67
+#define CLK_APB2_I2C1 68
+#define CLK_APB2_I2C2 69
+#define CLK_APB2_I2C3 70
+#define CLK_APB2_UART0 71
+#define CLK_APB2_UART1 72
+#define CLK_APB2_UART2 73
+#define CLK_APB2_UART3 74
+#define CLK_APB2_UART4 75
+#define CLK_APB2_UART5 76
+
+#define CLK_NAND0 77
+#define CLK_NAND1 78
+#define CLK_MMC0 79
+#define CLK_MMC0_SAMPLE 80
+#define CLK_MMC0_OUTPUT 81
+#define CLK_MMC1 82
+#define CLK_MMC1_SAMPLE 83
+#define CLK_MMC1_OUTPUT 84
+#define CLK_MMC2 85
+#define CLK_MMC2_SAMPLE 86
+#define CLK_MMC2_OUTPUT 87
+#define CLK_MMC3 88
+#define CLK_MMC3_SAMPLE 89
+#define CLK_MMC3_OUTPUT 90
+#define CLK_TS 91
+#define CLK_SS 92
+#define CLK_SPI0 93
+#define CLK_SPI1 94
+#define CLK_SPI2 95
+#define CLK_SPI3 96
+#define CLK_DAUDIO0 97
+#define CLK_DAUDIO1 98
+#define CLK_SPDIF 99
+#define CLK_USB_PHY0 100
+#define CLK_USB_PHY1 101
+#define CLK_USB_PHY2 102
+#define CLK_USB_OHCI0 103
+#define CLK_USB_OHCI1 104
+#define CLK_USB_OHCI2 105
+
+#define CLK_DRAM_VE 110
+#define CLK_DRAM_CSI_ISP 111
+#define CLK_DRAM_TS 112
+#define CLK_DRAM_DRC0 113
+#define CLK_DRAM_DRC1 114
+#define CLK_DRAM_DEU0 115
+#define CLK_DRAM_DEU1 116
+#define CLK_DRAM_FE0 117
+#define CLK_DRAM_FE1 118
+#define CLK_DRAM_BE0 119
+#define CLK_DRAM_BE1 120
+#define CLK_DRAM_MP 121
+
+#define CLK_BE0 122
+#define CLK_BE1 123
+#define CLK_FE0 124
+#define CLK_FE1 125
+#define CLK_MP 126
+#define CLK_LCD0_CH0 127
+#define CLK_LCD1_CH0 128
+#define CLK_LCD0_CH1 129
+#define CLK_LCD1_CH1 130
+#define CLK_CSI0_SCLK 131
+#define CLK_CSI0_MCLK 132
+#define CLK_CSI1_MCLK 133
+#define CLK_VE 134
+#define CLK_CODEC 135
+#define CLK_AVS 136
+#define CLK_DIGITAL_MIC 137
+#define CLK_HDMI 138
+#define CLK_HDMI_DDC 139
+#define CLK_PS 140
+
+#define CLK_MIPI_DSI 143
+#define CLK_MIPI_DSI_DPHY 144
+#define CLK_MIPI_CSI_DPHY 145
+#define CLK_IEP_DRC0 146
+#define CLK_IEP_DRC1 147
+#define CLK_IEP_DEU0 148
+#define CLK_IEP_DEU1 149
+#define CLK_GPU_CORE 150
+#define CLK_GPU_MEMORY 151
+#define CLK_GPU_HYD 152
+#define CLK_ATS 153
+#define CLK_TRACE 154
+
+#define CLK_OUT_A 155
+#define CLK_OUT_B 156
+#define CLK_OUT_C 157
+
+#endif /* _DT_BINDINGS_CLK_SUN6I_A31_H_ */
diff --git a/include/dt-bindings/clock/sun8i-a23-a33-ccu.h b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h
new file mode 100644
index 000000000000..f8222b6b2cc3
--- /dev/null
+++ b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN8I_A23_A33_H_
+#define _DT_BINDINGS_CLK_SUN8I_A23_A33_H_
+
+#define CLK_CPUX 18
+
+#define CLK_BUS_MIPI_DSI 23
+#define CLK_BUS_SS 24
+#define CLK_BUS_DMA 25
+#define CLK_BUS_MMC0 26
+#define CLK_BUS_MMC1 27
+#define CLK_BUS_MMC2 28
+#define CLK_BUS_NAND 29
+#define CLK_BUS_DRAM 30
+#define CLK_BUS_HSTIMER 31
+#define CLK_BUS_SPI0 32
+#define CLK_BUS_SPI1 33
+#define CLK_BUS_OTG 34
+#define CLK_BUS_EHCI 35
+#define CLK_BUS_OHCI 36
+#define CLK_BUS_VE 37
+#define CLK_BUS_LCD 38
+#define CLK_BUS_CSI 39
+#define CLK_BUS_DE_BE 40
+#define CLK_BUS_DE_FE 41
+#define CLK_BUS_GPU 42
+#define CLK_BUS_MSGBOX 43
+#define CLK_BUS_SPINLOCK 44
+#define CLK_BUS_DRC 45
+#define CLK_BUS_SAT 46
+#define CLK_BUS_CODEC 47
+#define CLK_BUS_PIO 48
+#define CLK_BUS_I2S0 49
+#define CLK_BUS_I2S1 50
+#define CLK_BUS_I2C0 51
+#define CLK_BUS_I2C1 52
+#define CLK_BUS_I2C2 53
+#define CLK_BUS_UART0 54
+#define CLK_BUS_UART1 55
+#define CLK_BUS_UART2 56
+#define CLK_BUS_UART3 57
+#define CLK_BUS_UART4 58
+#define CLK_NAND 59
+#define CLK_MMC0 60
+#define CLK_MMC0_SAMPLE 61
+#define CLK_MMC0_OUTPUT 62
+#define CLK_MMC1 63
+#define CLK_MMC1_SAMPLE 64
+#define CLK_MMC1_OUTPUT 65
+#define CLK_MMC2 66
+#define CLK_MMC2_SAMPLE 67
+#define CLK_MMC2_OUTPUT 68
+#define CLK_SS 69
+#define CLK_SPI0 70
+#define CLK_SPI1 71
+#define CLK_I2S0 72
+#define CLK_I2S1 73
+#define CLK_USB_PHY0 74
+#define CLK_USB_PHY1 75
+#define CLK_USB_HSIC 76
+#define CLK_USB_HSIC_12M 77
+#define CLK_USB_OHCI 78
+
+#define CLK_DRAM_VE 80
+#define CLK_DRAM_CSI 81
+#define CLK_DRAM_DRC 82
+#define CLK_DRAM_DE_FE 83
+#define CLK_DRAM_DE_BE 84
+#define CLK_DE_BE 85
+#define CLK_DE_FE 86
+#define CLK_LCD_CH0 87
+#define CLK_LCD_CH1 88
+#define CLK_CSI_SCLK 89
+#define CLK_CSI_MCLK 90
+#define CLK_VE 91
+#define CLK_AC_DIG 92
+#define CLK_AC_DIG_4X 93
+#define CLK_AVS 94
+
+#define CLK_DSI_SCLK 96
+#define CLK_DSI_DPHY 97
+#define CLK_DRC 98
+#define CLK_GPU 99
+#define CLK_ATS 100
+
+#endif /* _DT_BINDINGS_CLK_SUN8I_A23_A33_H_ */
diff --git a/include/dt-bindings/clock/zx296718-clock.h b/include/dt-bindings/clock/zx296718-clock.h
new file mode 100644
index 000000000000..822d52385080
--- /dev/null
+++ b/include/dt-bindings/clock/zx296718-clock.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2015 - 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __DT_BINDINGS_CLOCK_ZX296718_H
+#define __DT_BINDINGS_CLOCK_ZX296718_H
+
+/* PLL */
+#define ZX296718_PLL_CPU 1
+#define ZX296718_PLL_MAC 2
+#define ZX296718_PLL_MM0 3
+#define ZX296718_PLL_MM1 4
+#define ZX296718_PLL_VGA 5
+#define ZX296718_PLL_DDR 6
+#define ZX296718_PLL_AUDIO 7
+#define ZX296718_PLL_HSIC 8
+#define CPU_DBG_GATE 9
+#define A72_GATE 10
+#define CPU_PERI_GATE 11
+#define A53_GATE 12
+#define DDR1_GATE 13
+#define DDR0_GATE 14
+#define SD1_WCLK 15
+#define SD1_AHB 16
+#define SD0_WCLK 17
+#define SD0_AHB 18
+#define EMMC_WCLK 19
+#define EMMC_NAND_AXI 20
+#define NAND_WCLK 21
+#define EMMC_NAND_AHB 22
+#define LSP1_148M5 23
+#define LSP1_99M 24
+#define LSP1_24M 25
+#define LSP0_74M25 26
+#define LSP0_32K 27
+#define LSP0_148M5 28
+#define LSP0_99M 29
+#define LSP0_24M 30
+#define DEMUX_AXI 31
+#define DEMUX_APB 32
+#define DEMUX_148M5 33
+#define DEMUX_108M 34
+#define AUDIO_APB 35
+#define AUDIO_99M 36
+#define AUDIO_24M 37
+#define AUDIO_16M384 38
+#define AUDIO_32K 39
+#define WDT_WCLK 40
+#define TIMER_WCLK 41
+#define VDE_ACLK 42
+#define VCE_ACLK 43
+#define HDE_ACLK 44
+#define GPU_ACLK 45
+#define SAPPU_ACLK 46
+#define SAPPU_WCLK 47
+#define VOU_ACLK 48
+#define VOU_MAIN_WCLK 49
+#define VOU_AUX_WCLK 50
+#define VOU_PPU_WCLK 51
+#define MIPI_CFG_CLK 52
+#define VGA_I2C_WCLK 53
+#define MIPI_REF_CLK 54
+#define HDMI_OSC_CEC 55
+#define HDMI_OSC_CLK 56
+#define HDMI_XCLK 57
+#define VIU_M0_ACLK 58
+#define VIU_M1_ACLK 59
+#define VIU_WCLK 60
+#define VIU_JPEG_WCLK 61
+#define VIU_CFG_CLK 62
+#define TS_SYS_WCLK 63
+#define TS_SYS_108M 64
+#define USB20_HCLK 65
+#define USB20_PHY_CLK 66
+#define USB21_HCLK 67
+#define USB21_PHY_CLK 68
+#define GMAC_RMIICLK 69
+#define GMAC_PCLK 70
+#define GMAC_ACLK 71
+#define GMAC_RFCLK 72
+#define TEMPSENSOR_GATE 73
+
+#define TOP_NR_CLKS 74
+
+
+#define LSP0_TIMER3_PCLK 1
+#define LSP0_TIMER3_WCLK 2
+#define LSP0_TIMER4_PCLK 3
+#define LSP0_TIMER4_WCLK 4
+#define LSP0_TIMER5_PCLK 5
+#define LSP0_TIMER5_WCLK 6
+#define LSP0_UART3_PCLK 7
+#define LSP0_UART3_WCLK 8
+#define LSP0_UART1_PCLK 9
+#define LSP0_UART1_WCLK 10
+#define LSP0_UART2_PCLK 11
+#define LSP0_UART2_WCLK 12
+#define LSP0_SPIFC0_PCLK 13
+#define LSP0_SPIFC0_WCLK 14
+#define LSP0_I2C4_PCLK 15
+#define LSP0_I2C4_WCLK 16
+#define LSP0_I2C5_PCLK 17
+#define LSP0_I2C5_WCLK 18
+#define LSP0_SSP0_PCLK 19
+#define LSP0_SSP0_WCLK 20
+#define LSP0_SSP1_PCLK 21
+#define LSP0_SSP1_WCLK 22
+#define LSP0_USIM_PCLK 23
+#define LSP0_USIM_WCLK 24
+#define LSP0_GPIO_PCLK 25
+#define LSP0_GPIO_WCLK 26
+#define LSP0_I2C3_PCLK 27
+#define LSP0_I2C3_WCLK 28
+
+#define LSP0_NR_CLKS 29
+
+
+#define LSP1_UART4_PCLK 1
+#define LSP1_UART4_WCLK 2
+#define LSP1_UART5_PCLK 3
+#define LSP1_UART5_WCLK 4
+#define LSP1_PWM_PCLK 5
+#define LSP1_PWM_WCLK 6
+#define LSP1_I2C2_PCLK 7
+#define LSP1_I2C2_WCLK 8
+#define LSP1_SSP2_PCLK 9
+#define LSP1_SSP2_WCLK 10
+#define LSP1_SSP3_PCLK 11
+#define LSP1_SSP3_WCLK 12
+#define LSP1_SSP4_PCLK 13
+#define LSP1_SSP4_WCLK 14
+#define LSP1_USIM1_PCLK 15
+#define LSP1_USIM1_WCLK 16
+
+#define LSP1_NR_CLKS 17
+
+
+#define AUDIO_I2S0_WCLK 1
+#define AUDIO_I2S0_PCLK 2
+#define AUDIO_I2S1_WCLK 3
+#define AUDIO_I2S1_PCLK 4
+#define AUDIO_I2S2_WCLK 5
+#define AUDIO_I2S2_PCLK 6
+#define AUDIO_I2S3_WCLK 7
+#define AUDIO_I2S3_PCLK 8
+#define AUDIO_I2C0_WCLK 9
+#define AUDIO_I2C0_PCLK 10
+#define AUDIO_SPDIF0_WCLK 11
+#define AUDIO_SPDIF0_PCLK 12
+#define AUDIO_SPDIF1_WCLK 13
+#define AUDIO_SPDIF1_PCLK 14
+#define AUDIO_TIMER_WCLK 15
+#define AUDIO_TIMER_PCLK 16
+#define AUDIO_TDM_WCLK 17
+#define AUDIO_TDM_PCLK 18
+#define AUDIO_TS_PCLK 19
+
+#define AUDIO_NR_CLKS 20
+
+#endif
diff --git a/include/dt-bindings/mfd/qcom-rpm.h b/include/dt-bindings/mfd/qcom-rpm.h
index 13a9d4bf2662..54aef5e21763 100644
--- a/include/dt-bindings/mfd/qcom-rpm.h
+++ b/include/dt-bindings/mfd/qcom-rpm.h
@@ -147,6 +147,28 @@
#define QCOM_RPM_SMB208_S1b 137
#define QCOM_RPM_SMB208_S2a 138
#define QCOM_RPM_SMB208_S2b 139
+#define QCOM_RPM_PM8018_SMPS1 140
+#define QCOM_RPM_PM8018_SMPS2 141
+#define QCOM_RPM_PM8018_SMPS3 142
+#define QCOM_RPM_PM8018_SMPS4 143
+#define QCOM_RPM_PM8018_SMPS5 144
+#define QCOM_RPM_PM8018_LDO1 145
+#define QCOM_RPM_PM8018_LDO2 146
+#define QCOM_RPM_PM8018_LDO3 147
+#define QCOM_RPM_PM8018_LDO4 148
+#define QCOM_RPM_PM8018_LDO5 149
+#define QCOM_RPM_PM8018_LDO6 150
+#define QCOM_RPM_PM8018_LDO7 151
+#define QCOM_RPM_PM8018_LDO8 152
+#define QCOM_RPM_PM8018_LDO9 153
+#define QCOM_RPM_PM8018_LDO10 154
+#define QCOM_RPM_PM8018_LDO11 155
+#define QCOM_RPM_PM8018_LDO12 156
+#define QCOM_RPM_PM8018_LDO13 157
+#define QCOM_RPM_PM8018_LDO14 158
+#define QCOM_RPM_PM8018_LVS1 159
+#define QCOM_RPM_PM8018_NCP 160
+#define QCOM_RPM_VOLTAGE_CORNER 161
/*
* Constants used to select force mode for regulators.
diff --git a/include/dt-bindings/mfd/stm32f4-rcc.h b/include/dt-bindings/mfd/stm32f4-rcc.h
new file mode 100644
index 000000000000..e98942dc0d44
--- /dev/null
+++ b/include/dt-bindings/mfd/stm32f4-rcc.h
@@ -0,0 +1,98 @@
+/*
+ * This header provides constants for the STM32F4 RCC IP
+ */
+
+#ifndef _DT_BINDINGS_MFD_STM32F4_RCC_H
+#define _DT_BINDINGS_MFD_STM32F4_RCC_H
+
+/* AHB1 */
+#define STM32F4_RCC_AHB1_GPIOA 0
+#define STM32F4_RCC_AHB1_GPIOB 1
+#define STM32F4_RCC_AHB1_GPIOC 2
+#define STM32F4_RCC_AHB1_GPIOD 3
+#define STM32F4_RCC_AHB1_GPIOE 4
+#define STM32F4_RCC_AHB1_GPIOF 5
+#define STM32F4_RCC_AHB1_GPIOG 6
+#define STM32F4_RCC_AHB1_GPIOH 7
+#define STM32F4_RCC_AHB1_GPIOI 8
+#define STM32F4_RCC_AHB1_GPIOJ 9
+#define STM32F4_RCC_AHB1_GPIOK 10
+#define STM32F4_RCC_AHB1_CRC 12
+#define STM32F4_RCC_AHB1_DMA1 21
+#define STM32F4_RCC_AHB1_DMA2 22
+#define STM32F4_RCC_AHB1_DMA2D 23
+#define STM32F4_RCC_AHB1_ETHMAC 25
+#define STM32F4_RCC_AHB1_OTGHS 29
+
+#define STM32F4_AHB1_RESET(bit) (STM32F4_RCC_AHB1_##bit + (0x10 * 8))
+#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit + (0x30 * 8))
+
+
+/* AHB2 */
+#define STM32F4_RCC_AHB2_DCMI 0
+#define STM32F4_RCC_AHB2_CRYP 4
+#define STM32F4_RCC_AHB2_HASH 5
+#define STM32F4_RCC_AHB2_RNG 6
+#define STM32F4_RCC_AHB2_OTGFS 7
+
+#define STM32F4_AHB2_RESET(bit) (STM32F4_RCC_AHB2_##bit + (0x14 * 8))
+#define STM32F4_AHB2_CLOCK(bit) (STM32F4_RCC_AHB2_##bit + (0x34 * 8))
+
+/* AHB3 */
+#define STM32F4_RCC_AHB3_FMC 0
+
+#define STM32F4_AHB3_RESET(bit) (STM32F4_RCC_AHB3_##bit + (0x18 * 8))
+#define STM32F4_AHB3_CLOCK(bit) (STM32F4_RCC_AHB3_##bit + (0x38 * 8))
+
+/* APB1 */
+#define STM32F4_RCC_APB1_TIM2 0
+#define STM32F4_RCC_APB1_TIM3 1
+#define STM32F4_RCC_APB1_TIM4 2
+#define STM32F4_RCC_APB1_TIM5 3
+#define STM32F4_RCC_APB1_TIM6 4
+#define STM32F4_RCC_APB1_TIM7 5
+#define STM32F4_RCC_APB1_TIM12 6
+#define STM32F4_RCC_APB1_TIM13 7
+#define STM32F4_RCC_APB1_TIM14 8
+#define STM32F4_RCC_APB1_WWDG 11
+#define STM32F4_RCC_APB1_SPI2 14
+#define STM32F4_RCC_APB1_SPI3 15
+#define STM32F4_RCC_APB1_UART2 17
+#define STM32F4_RCC_APB1_UART3 18
+#define STM32F4_RCC_APB1_UART4 19
+#define STM32F4_RCC_APB1_UART5 20
+#define STM32F4_RCC_APB1_I2C1 21
+#define STM32F4_RCC_APB1_I2C2 22
+#define STM32F4_RCC_APB1_I2C3 23
+#define STM32F4_RCC_APB1_CAN1 25
+#define STM32F4_RCC_APB1_CAN2 26
+#define STM32F4_RCC_APB1_PWR 28
+#define STM32F4_RCC_APB1_DAC 29
+#define STM32F4_RCC_APB1_UART7 30
+#define STM32F4_RCC_APB1_UART8 31
+
+#define STM32F4_APB1_RESET(bit) (STM32F4_RCC_APB1_##bit + (0x20 * 8))
+#define STM32F4_APB1_CLOCK(bit) (STM32F4_RCC_APB1_##bit + (0x40 * 8))
+
+/* APB2 */
+#define STM32F4_RCC_APB2_TIM1 0
+#define STM32F4_RCC_APB2_TIM8 1
+#define STM32F4_RCC_APB2_USART1 4
+#define STM32F4_RCC_APB2_USART6 5
+#define STM32F4_RCC_APB2_ADC 8
+#define STM32F4_RCC_APB2_SDIO 11
+#define STM32F4_RCC_APB2_SPI1 12
+#define STM32F4_RCC_APB2_SPI4 13
+#define STM32F4_RCC_APB2_SYSCFG 14
+#define STM32F4_RCC_APB2_TIM9 16
+#define STM32F4_RCC_APB2_TIM10 17
+#define STM32F4_RCC_APB2_TIM11 18
+#define STM32F4_RCC_APB2_SPI5 20
+#define STM32F4_RCC_APB2_SPI6 21
+#define STM32F4_RCC_APB2_SAI1 22
+#define STM32F4_RCC_APB2_LTDC 26
+
+#define STM32F4_APB2_RESET(bit) (STM32F4_RCC_APB2_##bit + (0x24 * 8))
+#define STM32F4_APB2_CLOCK(bit) (STM32F4_RCC_APB2_##bit + (0x44 * 8))
+
+#endif /* _DT_BINDINGS_MFD_STM32F4_RCC_H */
diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h
new file mode 100644
index 000000000000..2383dd20ff43
--- /dev/null
+++ b/include/dt-bindings/net/mscc-phy-vsc8531.h
@@ -0,0 +1,21 @@
+/*
+ * Device Tree constants for Microsemi VSC8531 PHY
+ *
+ * Author: Nagaraju Lakkaraju
+ *
+ * License: Dual MIT/GPL
+ * Copyright (c) 2016 Microsemi Corporation
+ */
+
+#ifndef _DT_BINDINGS_MSCC_VSC8531_H
+#define _DT_BINDINGS_MSCC_VSC8531_H
+
+/* MAC interface Edge rate control VDDMAC in milli Volts */
+#define MSCC_VDDMAC_3300 3300
+#define MSCC_VDDMAC_2500 2500
+#define MSCC_VDDMAC_1800 1800
+#define MSCC_VDDMAC_1500 1500
+#define MSCC_VDDMAC_MAX 4
+#define MSCC_SLOWDOWN_MAX 8
+
+#endif
diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h
new file mode 100644
index 000000000000..6276eb785e2b
--- /dev/null
+++ b/include/dt-bindings/pinctrl/samsung.h
@@ -0,0 +1,57 @@
+/*
+ * Samsung's Exynos pinctrl bindings
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Author: Krzysztof Kozlowski <krzk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__
+#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__
+
+#define EXYNOS_PIN_PULL_NONE 0
+#define EXYNOS_PIN_PULL_DOWN 1
+#define EXYNOS_PIN_PULL_UP 3
+
+#define S3C64XX_PIN_PULL_NONE 0
+#define S3C64XX_PIN_PULL_DOWN 1
+#define S3C64XX_PIN_PULL_UP 2
+
+/* Pin function in power down mode */
+#define EXYNOS_PIN_PDN_OUT0 0
+#define EXYNOS_PIN_PDN_OUT1 1
+#define EXYNOS_PIN_PDN_INPUT 2
+#define EXYNOS_PIN_PDN_PREV 3
+
+/* Drive strengths for Exynos3250, Exynos4 (all) and Exynos5250 */
+#define EXYNOS4_PIN_DRV_LV1 0
+#define EXYNOS4_PIN_DRV_LV2 2
+#define EXYNOS4_PIN_DRV_LV3 1
+#define EXYNOS4_PIN_DRV_LV4 3
+
+/* Drive strengths for Exynos5260 */
+#define EXYNOS5260_PIN_DRV_LV1 0
+#define EXYNOS5260_PIN_DRV_LV2 1
+#define EXYNOS5260_PIN_DRV_LV4 2
+#define EXYNOS5260_PIN_DRV_LV6 3
+
+/* Drive strengths for Exynos5410, Exynos542x and Exynos5800 */
+#define EXYNOS5420_PIN_DRV_LV1 0
+#define EXYNOS5420_PIN_DRV_LV2 1
+#define EXYNOS5420_PIN_DRV_LV3 2
+#define EXYNOS5420_PIN_DRV_LV4 3
+
+#define EXYNOS_PIN_FUNC_INPUT 0
+#define EXYNOS_PIN_FUNC_OUTPUT 1
+#define EXYNOS_PIN_FUNC_2 2
+#define EXYNOS_PIN_FUNC_3 3
+#define EXYNOS_PIN_FUNC_4 4
+#define EXYNOS_PIN_FUNC_5 5
+#define EXYNOS_PIN_FUNC_6 6
+#define EXYNOS_PIN_FUNC_F 0xf
+
+#endif /* __DT_BINDINGS_PINCTRL_SAMSUNG_H__ */
diff --git a/include/dt-bindings/reset/gxbb-aoclkc.h b/include/dt-bindings/reset/gxbb-aoclkc.h
new file mode 100644
index 000000000000..9e3fd60c309c
--- /dev/null
+++ b/include/dt-bindings/reset/gxbb-aoclkc.h
@@ -0,0 +1,66 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_GXBB_AOCLK
+#define DT_BINDINGS_RESET_AMLOGIC_MESON_GXBB_AOCLK
+
+#define RESET_AO_REMOTE 0
+#define RESET_AO_I2C_MASTER 1
+#define RESET_AO_I2C_SLAVE 2
+#define RESET_AO_UART1 3
+#define RESET_AO_UART2 4
+#define RESET_AO_IR_BLASTER 5
+
+#endif
diff --git a/include/dt-bindings/reset/mt2701-resets.h b/include/dt-bindings/reset/mt2701-resets.h
new file mode 100644
index 000000000000..aaf03057f755
--- /dev/null
+++ b/include/dt-bindings/reset/mt2701-resets.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015 MediaTek, Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT2701
+#define _DT_BINDINGS_RESET_CONTROLLER_MT2701
+
+/* INFRACFG resets */
+#define MT2701_INFRA_EMI_REG_RST 0
+#define MT2701_INFRA_DRAMC0_A0_RST 1
+#define MT2701_INFRA_FHCTL_RST 2
+#define MT2701_INFRA_APCIRQ_EINT_RST 3
+#define MT2701_INFRA_APXGPT_RST 4
+#define MT2701_INFRA_SCPSYS_RST 5
+#define MT2701_INFRA_KP_RST 6
+#define MT2701_INFRA_PMIC_WRAP_RST 7
+#define MT2701_INFRA_MIPI_RST 8
+#define MT2701_INFRA_IRRX_RST 9
+#define MT2701_INFRA_CEC_RST 10
+#define MT2701_INFRA_EMI_RST 32
+#define MT2701_INFRA_DRAMC0_RST 34
+#define MT2701_INFRA_TRNG_RST 37
+#define MT2701_INFRA_SYSIRQ_RST 38
+
+/* PERICFG resets */
+#define MT2701_PERI_UART0_SW_RST 0
+#define MT2701_PERI_UART1_SW_RST 1
+#define MT2701_PERI_UART2_SW_RST 2
+#define MT2701_PERI_UART3_SW_RST 3
+#define MT2701_PERI_GCPU_SW_RST 5
+#define MT2701_PERI_BTIF_SW_RST 6
+#define MT2701_PERI_PWM_SW_RST 8
+#define MT2701_PERI_AUXADC_SW_RST 10
+#define MT2701_PERI_DMA_SW_RST 11
+#define MT2701_PERI_NFI_SW_RST 14
+#define MT2701_PERI_NLI_SW_RST 15
+#define MT2701_PERI_THERM_SW_RST 16
+#define MT2701_PERI_MSDC2_SW_RST 17
+#define MT2701_PERI_MSDC0_SW_RST 19
+#define MT2701_PERI_MSDC1_SW_RST 20
+#define MT2701_PERI_I2C0_SW_RST 22
+#define MT2701_PERI_I2C1_SW_RST 23
+#define MT2701_PERI_I2C2_SW_RST 24
+#define MT2701_PERI_I2C3_SW_RST 25
+#define MT2701_PERI_USB_SW_RST 28
+#define MT2701_PERI_ETH_SW_RST 29
+#define MT2701_PERI_SPI0_SW_RST 33
+
+/* TOPRGU resets */
+#define MT2701_TOPRGU_INFRA_RST 0
+#define MT2701_TOPRGU_MM_RST 1
+#define MT2701_TOPRGU_MFG_RST 2
+#define MT2701_TOPRGU_ETHDMA_RST 3
+#define MT2701_TOPRGU_VDEC_RST 4
+#define MT2701_TOPRGU_VENC_IMG_RST 5
+#define MT2701_TOPRGU_DDRPHY_RST 6
+#define MT2701_TOPRGU_MD_RST 7
+#define MT2701_TOPRGU_INFRA_AO_RST 8
+#define MT2701_TOPRGU_CONN_RST 9
+#define MT2701_TOPRGU_APMIXED_RST 10
+#define MT2701_TOPRGU_HIFSYS_RST 11
+#define MT2701_TOPRGU_CONN_MCU_RST 12
+#define MT2701_TOPRGU_BDP_DISP_RST 13
+
+/* HIFSYS resets */
+#define MT2701_HIFSYS_UHOST0_RST 3
+#define MT2701_HIFSYS_UHOST1_RST 4
+#define MT2701_HIFSYS_UPHY0_RST 21
+#define MT2701_HIFSYS_UPHY1_RST 22
+#define MT2701_HIFSYS_PCIE0_RST 24
+#define MT2701_HIFSYS_PCIE1_RST 25
+#define MT2701_HIFSYS_PCIE2_RST 26
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT2701 */
diff --git a/include/dt-bindings/reset/qcom,gcc-mdm9615.h b/include/dt-bindings/reset/qcom,gcc-mdm9615.h
new file mode 100644
index 000000000000..7f86e9a59df4
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-mdm9615.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_GCC_MDM9615_H
+#define _DT_BINDINGS_RESET_GCC_MDM9615_H
+
+#define SFAB_MSS_Q6_SW_RESET 0
+#define SFAB_MSS_Q6_FW_RESET 1
+#define QDSS_STM_RESET 2
+#define AFAB_SMPSS_S_RESET 3
+#define AFAB_SMPSS_M1_RESET 4
+#define AFAB_SMPSS_M0_RESET 5
+#define AFAB_EBI1_CH0_RESET 6
+#define AFAB_EBI1_CH1_RESET 7
+#define SFAB_ADM0_M0_RESET 8
+#define SFAB_ADM0_M1_RESET 9
+#define SFAB_ADM0_M2_RESET 10
+#define ADM0_C2_RESET 11
+#define ADM0_C1_RESET 12
+#define ADM0_C0_RESET 13
+#define ADM0_PBUS_RESET 14
+#define ADM0_RESET 15
+#define QDSS_CLKS_SW_RESET 16
+#define QDSS_POR_RESET 17
+#define QDSS_TSCTR_RESET 18
+#define QDSS_HRESET_RESET 19
+#define QDSS_AXI_RESET 20
+#define QDSS_DBG_RESET 21
+#define PCIE_A_RESET 22
+#define PCIE_AUX_RESET 23
+#define PCIE_H_RESET 24
+#define SFAB_PCIE_M_RESET 25
+#define SFAB_PCIE_S_RESET 26
+#define SFAB_MSS_M_RESET 27
+#define SFAB_USB3_M_RESET 28
+#define SFAB_RIVA_M_RESET 29
+#define SFAB_LPASS_RESET 30
+#define SFAB_AFAB_M_RESET 31
+#define AFAB_SFAB_M0_RESET 32
+#define AFAB_SFAB_M1_RESET 33
+#define SFAB_SATA_S_RESET 34
+#define SFAB_DFAB_M_RESET 35
+#define DFAB_SFAB_M_RESET 36
+#define DFAB_SWAY0_RESET 37
+#define DFAB_SWAY1_RESET 38
+#define DFAB_ARB0_RESET 39
+#define DFAB_ARB1_RESET 40
+#define PPSS_PROC_RESET 41
+#define PPSS_RESET 42
+#define DMA_BAM_RESET 43
+#define SPS_TIC_H_RESET 44
+#define SLIMBUS_H_RESET 45
+#define SFAB_CFPB_M_RESET 46
+#define SFAB_CFPB_S_RESET 47
+#define TSIF_H_RESET 48
+#define CE1_H_RESET 49
+#define CE1_CORE_RESET 50
+#define CE1_SLEEP_RESET 51
+#define CE2_H_RESET 52
+#define CE2_CORE_RESET 53
+#define SFAB_SFPB_M_RESET 54
+#define SFAB_SFPB_S_RESET 55
+#define RPM_PROC_RESET 56
+#define PMIC_SSBI2_RESET 57
+#define SDC1_RESET 58
+#define SDC2_RESET 59
+#define SDC3_RESET 60
+#define SDC4_RESET 61
+#define SDC5_RESET 62
+#define DFAB_A2_RESET 63
+#define USB_HS1_RESET 64
+#define USB_HSIC_RESET 65
+#define USB_FS1_XCVR_RESET 66
+#define USB_FS1_RESET 67
+#define USB_FS2_XCVR_RESET 68
+#define USB_FS2_RESET 69
+#define GSBI1_RESET 70
+#define GSBI2_RESET 71
+#define GSBI3_RESET 72
+#define GSBI4_RESET 73
+#define GSBI5_RESET 74
+#define GSBI6_RESET 75
+#define GSBI7_RESET 76
+#define GSBI8_RESET 77
+#define GSBI9_RESET 78
+#define GSBI10_RESET 79
+#define GSBI11_RESET 80
+#define GSBI12_RESET 81
+#define SPDM_RESET 82
+#define TLMM_H_RESET 83
+#define SFAB_MSS_S_RESET 84
+#define MSS_SLP_RESET 85
+#define MSS_Q6SW_JTAG_RESET 86
+#define MSS_Q6FW_JTAG_RESET 87
+#define MSS_RESET 88
+#define SATA_H_RESET 89
+#define SATA_RXOOB_RESE 90
+#define SATA_PMALIVE_RESET 91
+#define SATA_SFAB_M_RESET 92
+#define TSSC_RESET 93
+#define PDM_RESET 94
+#define MPM_H_RESET 95
+#define MPM_RESET 96
+#define SFAB_SMPSS_S_RESET 97
+#define PRNG_RESET 98
+#define RIVA_RESET 99
+#define USB_HS3_RESET 100
+#define USB_HS4_RESET 101
+#define CE3_RESET 102
+#define PCIE_EXT_PCI_RESET 103
+#define PCIE_PHY_RESET 104
+#define PCIE_PCI_RESET 105
+#define PCIE_POR_RESET 106
+#define PCIE_HCLK_RESET 107
+#define PCIE_ACLK_RESET 108
+#define CE3_H_RESET 109
+#define SFAB_CE3_M_RESET 110
+#define SFAB_CE3_S_RESET 111
+#define SATA_RESET 112
+#define CE3_SLEEP_RESET 113
+#define GSS_SLP_RESET 114
+#define GSS_RESET 115
+
+#endif
diff --git a/include/dt-bindings/reset/sun6i-a31-ccu.h b/include/dt-bindings/reset/sun6i-a31-ccu.h
new file mode 100644
index 000000000000..fbff365ed6e1
--- /dev/null
+++ b/include/dt-bindings/reset/sun6i-a31-ccu.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN6I_A31_H_
+#define _DT_BINDINGS_RST_SUN6I_A31_H_
+
+#define RST_USB_PHY0 0
+#define RST_USB_PHY1 1
+#define RST_USB_PHY2 2
+
+#define RST_AHB1_MIPI_DSI 3
+#define RST_AHB1_SS 4
+#define RST_AHB1_DMA 5
+#define RST_AHB1_MMC0 6
+#define RST_AHB1_MMC1 7
+#define RST_AHB1_MMC2 8
+#define RST_AHB1_MMC3 9
+#define RST_AHB1_NAND1 10
+#define RST_AHB1_NAND0 11
+#define RST_AHB1_SDRAM 12
+#define RST_AHB1_EMAC 13
+#define RST_AHB1_TS 14
+#define RST_AHB1_HSTIMER 15
+#define RST_AHB1_SPI0 16
+#define RST_AHB1_SPI1 17
+#define RST_AHB1_SPI2 18
+#define RST_AHB1_SPI3 19
+#define RST_AHB1_OTG 20
+#define RST_AHB1_EHCI0 21
+#define RST_AHB1_EHCI1 22
+#define RST_AHB1_OHCI0 23
+#define RST_AHB1_OHCI1 24
+#define RST_AHB1_OHCI2 25
+#define RST_AHB1_VE 26
+#define RST_AHB1_LCD0 27
+#define RST_AHB1_LCD1 28
+#define RST_AHB1_CSI 29
+#define RST_AHB1_HDMI 30
+#define RST_AHB1_BE0 31
+#define RST_AHB1_BE1 32
+#define RST_AHB1_FE0 33
+#define RST_AHB1_FE1 34
+#define RST_AHB1_MP 35
+#define RST_AHB1_GPU 36
+#define RST_AHB1_DEU0 37
+#define RST_AHB1_DEU1 38
+#define RST_AHB1_DRC0 39
+#define RST_AHB1_DRC1 40
+#define RST_AHB1_LVDS 41
+
+#define RST_APB1_CODEC 42
+#define RST_APB1_SPDIF 43
+#define RST_APB1_DIGITAL_MIC 44
+#define RST_APB1_DAUDIO0 45
+#define RST_APB1_DAUDIO1 46
+#define RST_APB2_I2C0 47
+#define RST_APB2_I2C1 48
+#define RST_APB2_I2C2 49
+#define RST_APB2_I2C3 50
+#define RST_APB2_UART0 51
+#define RST_APB2_UART1 52
+#define RST_APB2_UART2 53
+#define RST_APB2_UART3 54
+#define RST_APB2_UART4 55
+#define RST_APB2_UART5 56
+
+#endif /* _DT_BINDINGS_RST_SUN6I_A31_H_ */
diff --git a/include/dt-bindings/reset/sun8i-a23-a33-ccu.h b/include/dt-bindings/reset/sun8i-a23-a33-ccu.h
new file mode 100644
index 000000000000..6121f2b0cd0a
--- /dev/null
+++ b/include/dt-bindings/reset/sun8i-a23-a33-ccu.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN8I_A23_A33_H_
+#define _DT_BINDINGS_RST_SUN8I_A23_A33_H_
+
+#define RST_USB_PHY0 0
+#define RST_USB_PHY1 1
+#define RST_USB_HSIC 2
+#define RST_MBUS 3
+#define RST_BUS_MIPI_DSI 4
+#define RST_BUS_SS 5
+#define RST_BUS_DMA 6
+#define RST_BUS_MMC0 7
+#define RST_BUS_MMC1 8
+#define RST_BUS_MMC2 9
+#define RST_BUS_NAND 10
+#define RST_BUS_DRAM 11
+#define RST_BUS_HSTIMER 12
+#define RST_BUS_SPI0 13
+#define RST_BUS_SPI1 14
+#define RST_BUS_OTG 15
+#define RST_BUS_EHCI 16
+#define RST_BUS_OHCI 17
+#define RST_BUS_VE 18
+#define RST_BUS_LCD 19
+#define RST_BUS_CSI 20
+#define RST_BUS_DE_BE 21
+#define RST_BUS_DE_FE 22
+#define RST_BUS_GPU 23
+#define RST_BUS_MSGBOX 24
+#define RST_BUS_SPINLOCK 25
+#define RST_BUS_DRC 26
+#define RST_BUS_SAT 27
+#define RST_BUS_LVDS 28
+#define RST_BUS_CODEC 29
+#define RST_BUS_I2S0 30
+#define RST_BUS_I2S1 31
+#define RST_BUS_I2C0 32
+#define RST_BUS_I2C1 33
+#define RST_BUS_I2C2 34
+#define RST_BUS_UART0 35
+#define RST_BUS_UART1 36
+#define RST_BUS_UART2 37
+#define RST_BUS_UART3 38
+#define RST_BUS_UART4 39
+
+#endif /* _DT_BINDINGS_RST_SUN8I_A23_A33_H_ */
diff --git a/include/dt-bindings/soc/rockchip,boot-mode.h b/include/dt-bindings/soc/rockchip,boot-mode.h
new file mode 100644
index 000000000000..ae7c867e736a
--- /dev/null
+++ b/include/dt-bindings/soc/rockchip,boot-mode.h
@@ -0,0 +1,15 @@
+#ifndef __ROCKCHIP_BOOT_MODE_H
+#define __ROCKCHIP_BOOT_MODE_H
+
+/*high 24 bits is tag, low 8 bits is type*/
+#define REBOOT_FLAG 0x5242C300
+/* normal boot */
+#define BOOT_NORMAL (REBOOT_FLAG + 0)
+/* enter bootloader rockusb mode */
+#define BOOT_BL_DOWNLOAD (REBOOT_FLAG + 1)
+/* enter recovery */
+#define BOOT_RECOVERY (REBOOT_FLAG + 3)
+ /* enter fastboot mode */
+#define BOOT_FASTBOOT (REBOOT_FLAG + 9)
+
+#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 19b698ef3336..002f0922cd92 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -20,9 +20,11 @@
#include <linux/kvm.h>
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
+#include <linux/static_key.h>
#include <linux/types.h>
#include <kvm/iodev.h>
#include <linux/list.h>
+#include <linux/jump_label.h>
#define VGIC_V3_MAX_CPUS 255
#define VGIC_V2_MAX_CPUS 8
@@ -49,6 +51,9 @@ struct vgic_global {
/* Physical address of vgic virtual cpu interface */
phys_addr_t vcpu_base;
+ /* GICV mapping */
+ void __iomem *vcpu_base_va;
+
/* virtual control interface mapping */
void __iomem *vctrl_base;
@@ -63,6 +68,9 @@ struct vgic_global {
/* Only needed for the legacy KVM_CREATE_IRQCHIP */
bool can_emulate_gicv2;
+
+ /* GIC system register CPU interface */
+ struct static_key_false gicv3_cpuif;
};
extern struct vgic_global kvm_vgic_global_state;
@@ -217,7 +225,6 @@ struct vgic_v2_cpu_if {
};
struct vgic_v3_cpu_if {
-#ifdef CONFIG_KVM_ARM_VGIC_V3
u32 vgic_hcr;
u32 vgic_vmcr;
u32 vgic_sre; /* Restored only, change ignored */
@@ -227,7 +234,6 @@ struct vgic_v3_cpu_if {
u32 vgic_ap0r[4];
u32 vgic_ap1r[4];
u64 vgic_lr[VGIC_V3_MAX_LRS];
-#endif
};
struct vgic_cpu {
@@ -265,6 +271,8 @@ struct vgic_cpu {
bool lpis_enabled;
};
+extern struct static_key_false vgic_v2_cpuif_trap;
+
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm, u32 type);
@@ -294,13 +302,7 @@ bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
-#ifdef CONFIG_KVM_ARM_VGIC_V3
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
-#else
-static inline void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
-{
-}
-#endif
/**
* kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 19e650c940b6..94afcb2c384c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1099,4 +1099,10 @@ extern bool acpi_has_watchdog(void);
static inline bool acpi_has_watchdog(void) { return false; }
#endif
+#ifdef CONFIG_ACPI_SPCR_TABLE
+int parse_spcr(bool earlycon);
+#else
+static inline int parse_spcr(bool earlycon) { return 0; }
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 164049357e5c..04602cbe85dc 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -63,7 +63,7 @@ static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
}
#endif
-void cper_print_aer(struct pci_dev *dev, int cper_severity,
+void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer);
int cper_severity_to_aer(int cper_severity);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 3d8dcdd1aeae..d143c13bed26 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -53,8 +53,14 @@ enum amba_vendor {
AMBA_VENDOR_ST = 0x80,
AMBA_VENDOR_QCOM = 0x51,
AMBA_VENDOR_LSI = 0xb6,
+ AMBA_VENDOR_LINUX = 0xfe, /* This value is not official */
};
+/* This is used to generate pseudo-ID for AMBA device */
+#define AMBA_LINUX_ID(conf, rev, part) \
+ (((conf) & 0xff) << 24 | ((rev) & 0xf) << 20 | \
+ AMBA_VENDOR_LINUX << 12 | ((part) & 0xfff))
+
extern struct bus_type amba_bustype;
#define to_amba_device(d) container_of(d, struct amba_device, dev)
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index d76a19ba2cff..ad0965e21a5e 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -104,6 +104,15 @@
#define UART01x_FR_CTS 0x001
#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
+/*
+ * Some bits of Flag Register on ZTE device have different position from
+ * standard ones.
+ */
+#define ZX_UART01x_FR_BUSY 0x100
+#define ZX_UART01x_FR_DSR 0x008
+#define ZX_UART01x_FR_CTS 0x002
+#define ZX_UART011_FR_RI 0x001
+
#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */
#define UART011_CR_OUT2 0x2000 /* OUT2 */
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 2b08e79f5100..09751d349963 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -22,6 +22,20 @@
#include <linux/types.h>
+/*
+ * This is mainly used to communicate information back-and-forth
+ * between SVM and IOMMU for setting up and tearing down posted
+ * interrupt
+ */
+struct amd_iommu_pi_data {
+ u32 ga_tag;
+ u32 prev_ga_tag;
+ u64 base;
+ bool is_guest_mode;
+ struct vcpu_data *vcpu_data;
+ void *ir_data;
+};
+
#ifdef CONFIG_AMD_IOMMU
struct task_struct;
@@ -168,11 +182,34 @@ typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
amd_iommu_invalidate_ctx cb);
-
-#else
+#else /* CONFIG_AMD_IOMMU */
static inline int amd_iommu_detect(void) { return -ENODEV; }
-#endif
+#endif /* CONFIG_AMD_IOMMU */
+
+#if defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP)
+
+/* IOMMU AVIC Function */
+extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
+
+extern int
+amd_iommu_update_ga(int cpu, bool is_run, void *data);
+
+#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
+
+static inline int
+amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
+{
+ return 0;
+}
+
+static inline int
+amd_iommu_update_ga(int cpu, bool is_run, void *data)
+{
+ return 0;
+}
+
+#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index 5a4d664af87a..bd2560502f3c 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -118,6 +118,8 @@
#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
+#define ATMEL_US_FP_OFFSET 16 /* Fractional Part */
+#define ATMEL_US_FP_MASK 0x7
#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register for USART */
#define ATMEL_UA_RTOR 0x28 /* Receiver Time-out Register for UART */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 3db25df396cb..8eeedb2db924 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -205,6 +205,9 @@ struct bcma_host_ops {
#define BCMA_PKG_ID_BCM4709 0
#define BCMA_CHIP_ID_BCM47094 53030
#define BCMA_CHIP_ID_BCM53018 53018
+#define BCMA_CHIP_ID_BCM53573 53573
+#define BCMA_PKG_ID_BCM53573 0
+#define BCMA_PKG_ID_BCM47189 1
/* Board types (on PCI usually equals to the subsystem dev id) */
/* BCM4313 */
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index ebd5c1fcdea4..9986f8288d01 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -10,6 +10,7 @@
#define BCMA_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */
#define BCMA_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */
#define BCMA_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */
+#define BCMA_CLKCTLST_HQCLKREQ 0x00000040 /* HQ Clock */
#define BCMA_CLKCTLST_EXTRESREQ 0x00000700 /* Mask of external resource requests */
#define BCMA_CLKCTLST_EXTRESREQ_SHIFT 8
#define BCMA_CLKCTLST_HAVEALP 0x00010000 /* ALP available */
@@ -23,6 +24,7 @@
#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */
/* Agent registers (common for every core) */
+#define BCMA_OOB_SEL_OUT_A30 0x0100
#define BCMA_IOCTL 0x0408 /* IO control */
#define BCMA_IOCTL_CLK 0x0001
#define BCMA_IOCTL_FGC 0x0002
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 23ddf4b46a9b..97cb48f03dc7 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -1,6 +1,4 @@
/*
- * 2.5 block I/O model
- *
* Copyright (C) 2001 Jens Axboe <axboe@suse.de>
*
* This program is free software; you can redistribute it and/or modify
@@ -461,6 +459,7 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
extern void bio_copy_data(struct bio *dst, struct bio *src);
extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
+extern void bio_free_pages(struct bio *bio);
extern struct bio *bio_copy_user_iov(struct request_queue *,
struct rq_map_data *,
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
new file mode 100644
index 000000000000..f6505d83069d
--- /dev/null
+++ b/include/linux/bitfield.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BITFIELD_H
+#define _LINUX_BITFIELD_H
+
+#include <linux/bug.h>
+
+/*
+ * Bitfield access macros
+ *
+ * FIELD_{GET,PREP} macros take as first parameter shifted mask
+ * from which they extract the base mask and shift amount.
+ * Mask must be a compilation time constant.
+ *
+ * Example:
+ *
+ * #define REG_FIELD_A GENMASK(6, 0)
+ * #define REG_FIELD_B BIT(7)
+ * #define REG_FIELD_C GENMASK(15, 8)
+ * #define REG_FIELD_D GENMASK(31, 16)
+ *
+ * Get:
+ * a = FIELD_GET(REG_FIELD_A, reg);
+ * b = FIELD_GET(REG_FIELD_B, reg);
+ *
+ * Set:
+ * reg = FIELD_PREP(REG_FIELD_A, 1) |
+ * FIELD_PREP(REG_FIELD_B, 0) |
+ * FIELD_PREP(REG_FIELD_C, c) |
+ * FIELD_PREP(REG_FIELD_D, 0x40);
+ *
+ * Modify:
+ * reg &= ~REG_FIELD_C;
+ * reg |= FIELD_PREP(REG_FIELD_C, c);
+ */
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
+ ({ \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
+ _pfx "mask is not constant"); \
+ BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
+ BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
+ ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
+ _pfx "value too large for the field"); \
+ BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
+ _pfx "type of reg too small for mask"); \
+ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
+ (1ULL << __bf_shf(_mask))); \
+ })
+
+/**
+ * FIELD_PREP() - prepare a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ */
+#define FIELD_PREP(_mask, _val) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
+ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ })
+
+/**
+ * FIELD_GET() - extract a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_reg: 32bit value of entire bitfield
+ *
+ * FIELD_GET() extracts the field specified by @_mask from the
+ * bitfield passed in as @_reg by masking and shifting it down.
+ */
+#define FIELD_GET(_mask, _reg) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
+ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ })
+
+#endif
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 299e76b59fe9..a83c822c35c2 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -65,16 +65,6 @@ static inline int get_bitmask_order(unsigned int count)
return order; /* We could be slightly more clever with -1 here... */
}
-static inline int get_count_order(unsigned int count)
-{
- int order;
-
- order = fls(count) - 1;
- if (count & (count - 1))
- order++;
- return order;
-}
-
static __always_inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
@@ -191,6 +181,32 @@ static inline unsigned fls_long(unsigned long l)
return fls64(l);
}
+static inline int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+/**
+ * get_count_order_long - get order after rounding @l up to power of 2
+ * @l: parameter
+ *
+ * it is same as get_count_order() but with long type parameter
+ */
+static inline int get_count_order_long(unsigned long l)
+{
+ if (l == 0UL)
+ return -1;
+ else if (l & (l - 1UL))
+ return (int)fls_long(l);
+ else
+ return (int)fls_long(l) - 1;
+}
+
/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 10648e300c93..cbdbf34de5b6 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -45,7 +45,7 @@ struct blkcg {
spinlock_t lock;
struct radix_tree_root blkg_tree;
- struct blkcg_gq *blkg_hint;
+ struct blkcg_gq __rcu *blkg_hint;
struct hlist_head blkg_list;
struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
new file mode 100644
index 000000000000..6ab595259112
--- /dev/null
+++ b/include/linux/blk-mq-pci.h
@@ -0,0 +1,9 @@
+#ifndef _LINUX_BLK_MQ_PCI_H
+#define _LINUX_BLK_MQ_PCI_H
+
+struct blk_mq_tag_set;
+struct pci_dev;
+
+int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev);
+
+#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e43bbffb5b7a..535ab2e13d2e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -2,31 +2,19 @@
#define BLK_MQ_H
#include <linux/blkdev.h>
+#include <linux/sbitmap.h>
struct blk_mq_tags;
struct blk_flush_queue;
-struct blk_mq_cpu_notifier {
- struct list_head list;
- void *data;
- int (*notify)(void *data, unsigned long action, unsigned int cpu);
-};
-
-struct blk_mq_ctxmap {
- unsigned int size;
- unsigned int bits_per_word;
- struct blk_align_bitmap *map;
-};
-
struct blk_mq_hw_ctx {
struct {
spinlock_t lock;
struct list_head dispatch;
+ unsigned long state; /* BLK_MQ_S_* flags */
} ____cacheline_aligned_in_smp;
- unsigned long state; /* BLK_MQ_S_* flags */
- struct delayed_work run_work;
- struct delayed_work delay_work;
+ struct work_struct run_work;
cpumask_var_t cpumask;
int next_cpu;
int next_cpu_batch;
@@ -38,10 +26,10 @@ struct blk_mq_hw_ctx {
void *driver_data;
- struct blk_mq_ctxmap ctx_map;
+ struct sbitmap ctx_map;
- unsigned int nr_ctx;
struct blk_mq_ctx **ctxs;
+ unsigned int nr_ctx;
atomic_t wait_index;
@@ -49,7 +37,7 @@ struct blk_mq_hw_ctx {
unsigned long queued;
unsigned long run;
-#define BLK_MQ_MAX_DISPATCH_ORDER 10
+#define BLK_MQ_MAX_DISPATCH_ORDER 7
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
unsigned int numa_node;
@@ -57,14 +45,18 @@ struct blk_mq_hw_ctx {
atomic_t nr_active;
- struct blk_mq_cpu_notifier cpu_notifier;
+ struct delayed_work delay_work;
+
+ struct hlist_node cpuhp_dead;
struct kobject kobj;
+ unsigned long poll_considered;
unsigned long poll_invoked;
unsigned long poll_success;
};
struct blk_mq_tag_set {
+ unsigned int *mq_map;
struct blk_mq_ops *ops;
unsigned int nr_hw_queues;
unsigned int queue_depth; /* max hw supported */
@@ -88,7 +80,6 @@ struct blk_mq_queue_data {
};
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
-typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
@@ -102,6 +93,7 @@ typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
+typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
struct blk_mq_ops {
@@ -111,11 +103,6 @@ struct blk_mq_ops {
queue_rq_fn *queue_rq;
/*
- * Map to specific hardware queue
- */
- map_queue_fn *map_queue;
-
- /*
* Called on request timeout
*/
timeout_fn *timeout;
@@ -147,6 +134,8 @@ struct blk_mq_ops {
init_request_fn *init_request;
exit_request_fn *exit_request;
reinit_request_fn *reinit_request;
+
+ map_queues_fn *map_queues;
};
enum {
@@ -158,6 +147,7 @@ enum {
BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_SG_MERGE = 1 << 2,
BLK_MQ_F_DEFER_ISSUE = 1 << 4,
+ BLK_MQ_F_BLOCKING = 1 << 5,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -178,8 +168,8 @@ enum {
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);
-int blk_mq_register_disk(struct gendisk *);
-void blk_mq_unregister_disk(struct gendisk *);
+int blk_mq_register_dev(struct device *, struct request_queue *);
+void blk_mq_unregister_dev(struct device *, struct request_queue *);
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
@@ -201,7 +191,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
unsigned int flags, unsigned int hctx_idx);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
-struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -220,8 +209,6 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
-struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
-struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
@@ -232,6 +219,7 @@ void blk_mq_requeue_request(struct request *rq);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
void blk_mq_cancel_requeue_work(struct request_queue *q);
void blk_mq_kick_requeue_list(struct request_queue *q);
+void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_abort_requeue_list(struct request_queue *q);
void blk_mq_complete_request(struct request *rq, int error);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 436f43f87da9..cd395ecec99d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -16,7 +16,6 @@ struct block_device;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
-typedef void (bio_destructor_t) (struct bio *);
#ifdef CONFIG_BLOCK
/*
@@ -89,14 +88,22 @@ struct bio {
struct bio_vec bi_inline_vecs[0];
};
-#define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS)
+#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS)
+#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1))
#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
-#define bio_set_op_attrs(bio, op, op_flags) do { \
- WARN_ON(op >= (1 << REQ_OP_BITS)); \
- (bio)->bi_opf &= ((1 << BIO_OP_SHIFT) - 1); \
- (bio)->bi_opf |= ((unsigned int) (op) << BIO_OP_SHIFT); \
- (bio)->bi_opf |= op_flags; \
+#define bio_set_op_attrs(bio, op, op_flags) do { \
+ if (__builtin_constant_p(op)) \
+ BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \
+ else \
+ WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \
+ if (__builtin_constant_p(op_flags)) \
+ BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
+ else \
+ WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
+ (bio)->bi_opf = bio_flags(bio); \
+ (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \
+ (bio)->bi_opf |= (op_flags); \
} while (0)
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e79055c8b577..c47c358ba052 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -449,7 +449,7 @@ struct request_queue {
struct list_head requeue_list;
spinlock_t requeue_lock;
- struct work_struct requeue_work;
+ struct delayed_work requeue_work;
struct mutex sysfs_lock;
@@ -1440,8 +1440,8 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
return bio_will_gap(req->q, bio, req->bio);
}
-struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
+int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h
index e44b88ba552b..225bdb7daec7 100644
--- a/include/linux/blockgroup_lock.h
+++ b/include/linux/blockgroup_lock.h
@@ -10,28 +10,10 @@
#include <linux/cache.h>
#ifdef CONFIG_SMP
-
-/*
- * We want a power-of-two. Is there a better way than this?
- */
-
-#if NR_CPUS >= 32
-#define NR_BG_LOCKS 128
-#elif NR_CPUS >= 16
-#define NR_BG_LOCKS 64
-#elif NR_CPUS >= 8
-#define NR_BG_LOCKS 32
-#elif NR_CPUS >= 4
-#define NR_BG_LOCKS 16
-#elif NR_CPUS >= 2
-#define NR_BG_LOCKS 8
+#define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32))
#else
-#define NR_BG_LOCKS 4
-#endif
-
-#else /* CONFIG_SMP */
#define NR_BG_LOCKS 1
-#endif /* CONFIG_SMP */
+#endif
struct bgl_lock {
spinlock_t lock;
@@ -49,14 +31,10 @@ static inline void bgl_lock_init(struct blockgroup_lock *bgl)
spin_lock_init(&bgl->locks[i].lock);
}
-/*
- * The accessor is a macro so we can embed a blockgroup_lock into different
- * superblock types
- */
static inline spinlock_t *
bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group)
{
- return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock;
+ return &bgl->locks[block_group & (NR_BG_LOCKS-1)].lock;
}
#endif
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index f9be32691718..962164d36506 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -7,6 +7,7 @@
#include <linux/mmzone.h>
#include <linux/mm_types.h>
#include <asm/dma.h>
+#include <asm/processor.h>
/*
* simple boot-time physical memory area allocator.
@@ -119,6 +120,10 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
#endif
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_align(x, align) \
@@ -180,10 +185,6 @@ static inline void * __init memblock_virt_alloc_nopanic(
NUMA_NO_NODE);
}
-#ifndef ARCH_LOW_ADDRESS_LIMIT
-#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
-#endif
-
static inline void * __init memblock_virt_alloc_low(
phys_addr_t size, phys_addr_t align)
{
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 11134238417d..c201017b5730 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -96,6 +96,7 @@ enum bpf_return_type {
struct bpf_func_proto {
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool gpl_only;
+ bool pkt_access;
enum bpf_return_type ret_type;
enum bpf_arg_type arg1_type;
enum bpf_arg_type arg2_type;
@@ -138,6 +139,13 @@ enum bpf_reg_type {
*/
PTR_TO_PACKET,
PTR_TO_PACKET_END, /* skb->data + headlen */
+
+ /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
+ * elem value. We only allow this if we can statically verify that
+ * access from this register are going to fall within the size of the
+ * map element.
+ */
+ PTR_TO_MAP_VALUE_ADJ,
};
struct bpf_prog;
@@ -151,7 +159,8 @@ struct bpf_verifier_ops {
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type);
-
+ int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
+ const struct bpf_prog *prog);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn, struct bpf_prog *prog);
@@ -297,6 +306,10 @@ static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
+static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
new file mode 100644
index 000000000000..7035b997aaa5
--- /dev/null
+++ b/include/linux/bpf_verifier.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_BPF_VERIFIER_H
+#define _LINUX_BPF_VERIFIER_H 1
+
+#include <linux/bpf.h> /* for enum bpf_reg_type */
+#include <linux/filter.h> /* for MAX_BPF_STACK */
+
+ /* Just some arbitrary values so we can safely do math without overflowing and
+ * are obviously wrong for any sort of memory access.
+ */
+#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
+#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024)
+
+struct bpf_reg_state {
+ enum bpf_reg_type type;
+ /*
+ * Used to determine if any memory access using this register will
+ * result in a bad access.
+ */
+ u64 min_value, max_value;
+ union {
+ /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
+ s64 imm;
+
+ /* valid when type == PTR_TO_PACKET* */
+ struct {
+ u32 id;
+ u16 off;
+ u16 range;
+ };
+
+ /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
+ * PTR_TO_MAP_VALUE_OR_NULL
+ */
+ struct bpf_map *map_ptr;
+ };
+};
+
+enum bpf_stack_slot_type {
+ STACK_INVALID, /* nothing was stored in this stack slot */
+ STACK_SPILL, /* register spilled into stack */
+ STACK_MISC /* BPF program wrote some data into this slot */
+};
+
+#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+
+/* state of the program:
+ * type of all registers and stack info
+ */
+struct bpf_verifier_state {
+ struct bpf_reg_state regs[MAX_BPF_REG];
+ u8 stack_slot_type[MAX_BPF_STACK];
+ struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
+};
+
+/* linked list of verifier states used to prune search */
+struct bpf_verifier_state_list {
+ struct bpf_verifier_state state;
+ struct bpf_verifier_state_list *next;
+};
+
+struct bpf_insn_aux_data {
+ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
+};
+
+#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+
+struct bpf_verifier_env;
+struct bpf_ext_analyzer_ops {
+ int (*insn_hook)(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
+};
+
+/* single container for all structs
+ * one verifier_env per bpf_check() call
+ */
+struct bpf_verifier_env {
+ struct bpf_prog *prog; /* eBPF program being verified */
+ struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
+ int stack_size; /* number of states to be processed */
+ struct bpf_verifier_state cur_state; /* current verifier state */
+ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
+ const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
+ void *analyzer_priv; /* pointer to external analyzer's private data */
+ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ u32 used_map_cnt; /* number of used maps */
+ u32 id_gen; /* used to generate unique reg IDs */
+ bool allow_ptr_leaks;
+ bool seen_direct_write;
+ bool varlen_map_value_access;
+ struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
+};
+
+int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
+ void *priv);
+
+#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index e51b0709e78d..292d6a10b0c2 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -13,6 +13,7 @@ enum bug_trap_type {
struct pt_regs;
#ifdef __CHECKER__
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_ZERO(e) (0)
#define BUILD_BUG_ON_NULL(e) ((void*)0)
@@ -24,6 +25,8 @@ struct pt_regs;
#else /* __CHECKER__ */
/* Force a compilation error if a constant expression is not a power of 2 */
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 984f73b719a9..440a72164a11 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -497,6 +497,23 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
}
+/**
+ * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
+ * @task: the task to be tested
+ * @ancestor: possible ancestor of @task's cgroup
+ *
+ * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
+ * It follows all the same rules as cgroup_is_descendant, and only applies
+ * to the default hierarchy.
+ */
+static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
+ struct cgroup *ancestor)
+{
+ struct css_set *cset = task_css_set(task);
+
+ return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
+}
+
/* no synchronization, the result can only be used as a hint */
static inline bool cgroup_is_populated(struct cgroup *cgrp)
{
@@ -557,6 +574,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
+struct cgroup;
static inline void css_put(struct cgroup_subsys_state *css) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
@@ -574,6 +592,11 @@ static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
+static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
+ struct cgroup *ancestor)
+{
+ return true;
+}
#endif /* !CONFIG_CGROUPS */
/*
@@ -621,6 +644,7 @@ struct cgroup_namespace {
atomic_t count;
struct ns_common ns;
struct user_namespace *user_ns;
+ struct ucounts *ucounts;
struct css_set *root_cset;
};
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index a39c0c530778..af596381fa0f 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -772,7 +772,7 @@ struct clk_onecell_data {
};
struct clk_hw_onecell_data {
- size_t num;
+ unsigned int num;
struct clk_hw *hws[];
};
@@ -780,6 +780,18 @@ extern struct of_device_id __clk_of_table;
#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
+/*
+ * Use this macro when you have a driver that requires two initialization
+ * routines, one at of_clk_init(), and one at platform device probe
+ */
+#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
+ static void name##_of_clk_init_driver(struct device_node *np) \
+ { \
+ of_node_clear_flag(np, OF_POPULATED); \
+ fn(np); \
+ } \
+ OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver)
+
#ifdef CONFIG_OF
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *args,
@@ -842,7 +854,7 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
{
return ERR_PTR(-ENOENT);
}
-static inline int of_clk_get_parent_count(struct device_node *np)
+static inline unsigned int of_clk_get_parent_count(struct device_node *np)
{
return 0;
}
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index d4e106b5dc27..0d8415820fc3 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -6,8 +6,10 @@
* Lower value means higher priority, analogically to reclaim priority.
*/
enum compact_priority {
+ COMPACT_PRIO_SYNC_FULL,
+ MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL,
COMPACT_PRIO_SYNC_LIGHT,
- MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
+ MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
COMPACT_PRIO_ASYNC,
INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC
@@ -49,14 +51,37 @@ enum compact_result {
COMPACT_CONTENDED,
/*
- * direct compaction partially compacted a zone and there might be
- * suitable pages
+ * direct compaction terminated after concluding that the allocation
+ * should now succeed
*/
- COMPACT_PARTIAL,
+ COMPACT_SUCCESS,
};
struct alloc_context; /* in mm/internal.h */
+/*
+ * Number of free order-0 pages that should be available above given watermark
+ * to make sure compaction has reasonable chance of not running out of free
+ * pages that it needs to isolate as migration target during its work.
+ */
+static inline unsigned long compact_gap(unsigned int order)
+{
+ /*
+ * Although all the isolations for migration are temporary, compaction
+ * free scanner may have up to 1 << order pages on its list and then
+ * try to split an (order - 1) free page. At that point, a gap of
+ * 1 << order might not be enough, so it's safer to require twice that
+ * amount. Note that the number of pages on the list is also
+ * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum
+ * that the migrate scanner can have isolated on migrate list, and free
+ * scanner is only invoked when the number of isolated free pages is
+ * lower than that. But it's not worth to complicate the formula here
+ * as a bigger gap for higher orders than strictly necessary can also
+ * improve chances of compaction success.
+ */
+ return 2UL << order;
+}
+
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@@ -70,7 +95,6 @@ extern int fragmentation_index(struct zone *zone, unsigned int order);
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, unsigned int alloc_flags,
const struct alloc_context *ac, enum compact_priority prio);
-extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern enum compact_result compaction_suitable(struct zone *zone, int order,
unsigned int alloc_flags, int classzone_idx);
@@ -89,7 +113,7 @@ static inline bool compaction_made_progress(enum compact_result result)
* that the compaction successfully isolated and migrated some
* pageblocks.
*/
- if (result == COMPACT_PARTIAL)
+ if (result == COMPACT_SUCCESS)
return true;
return false;
@@ -154,10 +178,6 @@ extern void kcompactd_stop(int nid);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
#else
-static inline void compact_pgdat(pg_data_t *pgdat, int order)
-{
-}
-
static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
diff --git a/include/linux/console.h b/include/linux/console.h
index d530c4627e54..3672809234a7 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -173,6 +173,12 @@ static inline void console_sysfs_notify(void)
#endif
extern bool console_suspend_enabled;
+#ifdef CONFIG_OF
+extern void console_set_by_of(void);
+#else
+static inline void console_set_by_of(void) {}
+#endif
+
/* Suspend and resume console messages over PM events */
extern void suspend_console(void);
extern void resume_console(void);
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 385d62e64abb..2a5982c37dfb 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -232,8 +232,9 @@ struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
int (*trace_id)(struct coresight_device *csdev);
int (*enable)(struct coresight_device *csdev,
- struct perf_event_attr *attr, u32 mode);
- void (*disable)(struct coresight_device *csdev);
+ struct perf_event *event, u32 mode);
+ void (*disable)(struct coresight_device *csdev,
+ struct perf_event *event);
};
struct coresight_ops {
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 7572d9e9dced..b886dc17f2f3 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -231,6 +231,11 @@ void cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
+/* Attach to any functions which should be considered cpuidle. */
+#define __cpuidle __attribute__((__section__(".cpuidle.text")))
+
+bool cpu_in_idle(unsigned long pc);
+
void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 7b6c446ee17f..9b207a8c5af3 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -43,6 +43,8 @@ enum cpuhp_state {
CPUHP_CPUIDLE_COUPLED_PREPARE,
CPUHP_POWERPC_PMAC_PREPARE,
CPUHP_POWERPC_MMU_CTX_PREPARE,
+ CPUHP_XEN_PREPARE,
+ CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_NOTIFY_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
@@ -114,6 +116,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_NOTIFY_ONLINE,
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 257db64562e5..f0e70a1bb3ac 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -26,15 +26,10 @@ struct inode;
/*
* COW Supplementary groups list
*/
-#define NGROUPS_SMALL 32
-#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(kgid_t)))
-
struct group_info {
atomic_t usage;
int ngroups;
- int nblocks;
- kgid_t small_block[NGROUPS_SMALL];
- kgid_t *blocks[0];
+ kgid_t gid[0];
};
/**
@@ -88,10 +83,6 @@ extern void set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
extern bool may_setgroups(void);
-/* access the groups "array" with this macro */
-#define GROUP_AT(gi, i) \
- ((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK])
-
/*
* The security context of a task
*
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 9c6dc7704043..add6c4bc568f 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -6,13 +6,19 @@
#include <linux/radix-tree.h>
#include <asm/pgtable.h>
+struct iomap_ops;
+
/* We use lowest available exceptional entry bit for locking */
#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
+ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
+ struct iomap_ops *ops);
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
get_block_t, dio_iodone_t, int flags);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
+int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+ struct iomap_ops *ops);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 1438e2322d5c..4d3f0d1aec73 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -45,6 +45,23 @@ extern struct dentry *arch_debugfs_dir;
extern struct srcu_struct debugfs_srcu;
+/**
+ * debugfs_real_fops - getter for the real file operation
+ * @filp: a pointer to a struct file
+ *
+ * Must only be called under the protection established by
+ * debugfs_use_file_start().
+ */
+static inline const struct file_operations *debugfs_real_fops(struct file *filp)
+ __must_hold(&debugfs_srcu)
+{
+ /*
+ * Neither the pointer to the struct file_operations, nor its
+ * contents ever change -- srcu_dereference() is not needed here.
+ */
+ return filp->f_path.dentry->d_fsdata;
+}
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_create_file(const char *name, umode_t mode,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 91acfce74a22..ef7962e84444 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -590,6 +590,7 @@ extern struct ratelimit_state dm_ratelimit_state;
#define DM_MAPIO_SUBMITTED 0
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
+#define DM_MAPIO_DELAY_REQUEUE 3
#define dm_sector_div64(x, y)( \
{ \
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index fe8cb610deac..c7d844f09c3a 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -56,6 +56,13 @@ extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
extern void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr);
+extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
+ size_t size, int direction,
+ dma_addr_t dma_addr);
+
+extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
+ size_t size, int direction);
+
extern void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
int direction);
@@ -141,6 +148,18 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
{
}
+static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
+ size_t size, int direction,
+ dma_addr_t dma_addr)
+{
+}
+
+static inline void debug_dma_unmap_resource(struct device *dev,
+ dma_addr_t dma_addr, size_t size,
+ int direction)
+{
+}
+
static inline void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index dc69df04abc1..0f90eb5e3c6b 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -95,6 +95,12 @@ struct dma_map_ops {
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
unsigned long attrs);
+ dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
@@ -258,6 +264,41 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
debug_dma_unmap_page(dev, addr, size, dir, false);
}
+static inline dma_addr_t dma_map_resource(struct device *dev,
+ phys_addr_t phys_addr,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ BUG_ON(!valid_dma_direction(dir));
+
+ /* Don't allow RAM to be mapped */
+ BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
+
+ addr = phys_addr;
+ if (ops->map_resource)
+ addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
+
+ debug_dma_map_resource(dev, phys_addr, size, dir, addr);
+
+ return addr;
+}
+
+static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_resource)
+ ops->unmap_resource(dev, addr, size, dir, attrs);
+ debug_dma_unmap_resource(dev, addr, size, dir);
+}
+
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir)
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index f2e538aaddad..ccfd0c3777df 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -40,8 +40,13 @@ struct dw_dma_chip {
};
/* Export to the platform drivers */
+#if IS_ENABLED(CONFIG_DW_DMAC_CORE)
int dw_dma_probe(struct dw_dma_chip *chip);
int dw_dma_remove(struct dw_dma_chip *chip);
+#else
+static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
+static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; }
+#endif /* CONFIG_DW_DMAC_CORE */
/* DMA API extensions */
struct dw_desc;
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
index aaff68efba5d..197eec63e501 100644
--- a/include/linux/dma/hsu.h
+++ b/include/linux/dma/hsu.h
@@ -41,8 +41,7 @@ struct hsu_dma_chip {
/* Export to the internal users */
int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
u32 *status);
-irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
- u32 status);
+int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status);
/* Export to the platform drivers */
int hsu_dma_probe(struct hsu_dma_chip *chip);
@@ -53,10 +52,10 @@ static inline int hsu_dma_get_status(struct hsu_dma_chip *chip,
{
return 0;
}
-static inline irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip,
- unsigned short nr, u32 status)
+static inline int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
+ u32 status)
{
- return IRQ_NONE;
+ return 0;
}
static inline int hsu_dma_probe(struct hsu_dma_chip *chip) { return -ENODEV; }
static inline int hsu_dma_remove(struct hsu_dma_chip *chip) { return 0; }
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 30de0197263a..cc535a478bae 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -441,6 +441,21 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
typedef void (*dma_async_tx_callback)(void *dma_async_param);
+enum dmaengine_tx_result {
+ DMA_TRANS_NOERROR = 0, /* SUCCESS */
+ DMA_TRANS_READ_FAILED, /* Source DMA read failed */
+ DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */
+ DMA_TRANS_ABORTED, /* Op never submitted / aborted */
+};
+
+struct dmaengine_result {
+ enum dmaengine_tx_result result;
+ u32 residue;
+};
+
+typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
+ const struct dmaengine_result *result);
+
struct dmaengine_unmap_data {
u8 map_cnt;
u8 to_cnt;
@@ -478,6 +493,7 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
int (*desc_free)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
void *callback_param;
struct dmaengine_unmap_data *unmap;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 61004413dc64..b871c0cb1f02 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -29,6 +29,15 @@
#include <linux/device.h>
/*
+ * Define the type of supported external connectors
+ */
+#define EXTCON_TYPE_USB BIT(0) /* USB connector */
+#define EXTCON_TYPE_CHG BIT(1) /* Charger connector */
+#define EXTCON_TYPE_JACK BIT(2) /* Jack connector */
+#define EXTCON_TYPE_DISP BIT(3) /* Display connector */
+#define EXTCON_TYPE_MISC BIT(4) /* Miscellaneous connector */
+
+/*
* Define the unique id of supported external connectors
*/
#define EXTCON_NONE 0
@@ -44,6 +53,7 @@
#define EXTCON_CHG_USB_ACA 8 /* Accessory Charger Adapter */
#define EXTCON_CHG_USB_FAST 9
#define EXTCON_CHG_USB_SLOW 10
+#define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */
/* Jack external connector */
#define EXTCON_JACK_MICROPHONE 20
@@ -60,6 +70,8 @@
#define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */
#define EXTCON_DISP_DVI 42 /* Digital Visual Interface */
#define EXTCON_DISP_VGA 43 /* Video Graphics Array */
+#define EXTCON_DISP_DP 44 /* Display Port */
+#define EXTCON_DISP_HMD 45 /* Head-Mounted Display */
/* Miscellaneous external connector */
#define EXTCON_DOCK 60
@@ -68,6 +80,85 @@
#define EXTCON_NUM 63
+/*
+ * Define the property of supported external connectors.
+ *
+ * When adding the new extcon property, they *must* have
+ * the type/value/default information. Also, you *have to*
+ * modify the EXTCON_PROP_[type]_START/END definitions
+ * which mean the range of the supported properties
+ * for each extcon type.
+ *
+ * The naming style of property
+ * : EXTCON_PROP_[type]_[property name]
+ *
+ * EXTCON_PROP_USB_[property name] : USB property
+ * EXTCON_PROP_CHG_[property name] : Charger property
+ * EXTCON_PROP_JACK_[property name] : Jack property
+ * EXTCON_PROP_DISP_[property name] : Display property
+ */
+
+/*
+ * Properties of EXTCON_TYPE_USB.
+ *
+ * - EXTCON_PROP_USB_VBUS
+ * @type: integer (intval)
+ * @value: 0 (low) or 1 (high)
+ * @default: 0 (low)
+ * - EXTCON_PROP_USB_TYPEC_POLARITY
+ * @type: integer (intval)
+ * @value: 0 (normal) or 1 (flip)
+ * @default: 0 (normal)
+ * - EXTCON_PROP_USB_SS (SuperSpeed)
+ * @type: integer (intval)
+ * @value: 0 (USB/USB2) or 1 (USB3)
+ * @default: 0 (USB/USB2)
+ *
+ */
+#define EXTCON_PROP_USB_VBUS 0
+#define EXTCON_PROP_USB_TYPEC_POLARITY 1
+#define EXTCON_PROP_USB_SS 2
+
+#define EXTCON_PROP_USB_MIN 0
+#define EXTCON_PROP_USB_MAX 2
+#define EXTCON_PROP_USB_CNT (EXTCON_PROP_USB_MAX - EXTCON_PROP_USB_MIN + 1)
+
+/* Properties of EXTCON_TYPE_CHG. */
+#define EXTCON_PROP_CHG_MIN 50
+#define EXTCON_PROP_CHG_MAX 50
+#define EXTCON_PROP_CHG_CNT (EXTCON_PROP_CHG_MAX - EXTCON_PROP_CHG_MIN + 1)
+
+/* Properties of EXTCON_TYPE_JACK. */
+#define EXTCON_PROP_JACK_MIN 100
+#define EXTCON_PROP_JACK_MAX 100
+#define EXTCON_PROP_JACK_CNT (EXTCON_PROP_JACK_MAX - EXTCON_PROP_JACK_MIN + 1)
+
+/*
+ * Properties of EXTCON_TYPE_DISP.
+ *
+ * - EXTCON_PROP_DISP_HPD (Hot Plug Detect)
+ * @type: integer (intval)
+ * @value: 0 (no hpd) or 1 (hpd)
+ * @default: 0 (no hpd)
+ *
+ */
+#define EXTCON_PROP_DISP_HPD 150
+
+/* Properties of EXTCON_TYPE_DISP. */
+#define EXTCON_PROP_DISP_MIN 150
+#define EXTCON_PROP_DISP_MAX 151
+#define EXTCON_PROP_DISP_CNT (EXTCON_PROP_DISP_MAX - EXTCON_PROP_DISP_MIN + 1)
+
+/*
+ * Define the type of property's value.
+ *
+ * Define the property's value as union type. Because each property
+ * would need the different data type to store it.
+ */
+union extcon_property_value {
+ int intval; /* type : integer (intval) */
+};
+
struct extcon_cable;
/**
@@ -150,26 +241,42 @@ extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
/*
- * get/set/update_state access the 32b encoded state value, which represents
- * states of all possible cables of the multistate port. For example, if one
- * calls extcon_set_state(edev, 0x7), it may mean that all the three cables
- * are attached to the port.
+ * get/set_state access each bit of the 32b encoded state value.
+ * They are used to access the status of each cable based on the cable id.
*/
-static inline u32 extcon_get_state(struct extcon_dev *edev)
-{
- return edev->state;
-}
+extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
+extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+ bool cable_state);
+extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+ bool cable_state);
+/*
+ * Synchronize the state and property data for a specific external connector.
+ */
+extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
-extern int extcon_set_state(struct extcon_dev *edev, u32 state);
-extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
+/*
+ * get/set_property access the property value of each external connector.
+ * They are used to access the property of each cable based on the property id.
+ */
+extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value *prop_val);
+extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val);
+extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val);
/*
- * get/set_cable_state access each bit of the 32b encoded state value.
- * They are used to access the status of each cable based on the cable id.
+ * get/set_property_capability set the capability of the property for each
+ * external connector. They are used to set the capability of the property
+ * of each external connector based on the id and property.
*/
-extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
-extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
- bool cable_state);
+extern int extcon_get_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop);
+extern int extcon_set_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop);
/*
* Following APIs are to monitor every action of a notifier.
@@ -232,30 +339,57 @@ static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
-static inline u32 extcon_get_state(struct extcon_dev *edev)
+
+static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
+{
+ return 0;
+}
+
+static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+ bool cable_state)
+{
+ return 0;
+}
+
+static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+ bool cable_state)
{
return 0;
}
-static inline int extcon_set_state(struct extcon_dev *edev, u32 state)
+static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
{
return 0;
}
-static inline int extcon_update_state(struct extcon_dev *edev, u32 mask,
- u32 state)
+static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value *prop_val)
+{
+ return 0;
+}
+static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val)
{
return 0;
}
-static inline int extcon_get_cable_state_(struct extcon_dev *edev,
- unsigned int id)
+static inline int extcon_set_property_sync(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop,
+ union extcon_property_value prop_val)
{
return 0;
}
-static inline int extcon_set_cable_state_(struct extcon_dev *edev,
- unsigned int id, bool cable_state)
+static inline int extcon_get_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop)
+{
+ return 0;
+}
+
+static inline int extcon_set_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop)
{
return 0;
}
@@ -320,4 +454,15 @@ static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
{
return -EINVAL;
}
+
+static inline int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id)
+{
+ return extcon_get_state(edev, id);
+}
+
+static inline int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
+ bool cable_state)
+{
+ return extcon_set_state_sync(edev, id, cable_state);
+}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index ac85f2061351..a0e03b13b449 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -20,8 +20,8 @@
/**
* struct adc_jack_cond - condition to use an extcon state
- * @state: the corresponding extcon state (if 0, this struct
* denotes the last adc_jack_cond element among the array)
+ * @id: the unique id of each external connector
* @min_adc: min adc value for this condition
* @max_adc: max adc value for this condition
*
@@ -33,7 +33,7 @@
* because when no adc_jack_cond is met, state = 0 is automatically chosen.
*/
struct adc_jack_cond {
- u32 state; /* extcon state value. 0 if invalid */
+ unsigned int id;
u32 min_adc;
u32 max_adc;
};
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 4c02c6521fef..422630b8e588 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -100,6 +100,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_CRC_RECOVERY_FLAG 0x00000040
#define CP_FASTBOOT_FLAG 0x00000020
#define CP_FSCK_FLAG 0x00000010
#define CP_ERROR_FLAG 0x00000008
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a16439b99fd9..1f09c521adfe 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -314,6 +314,70 @@ struct bpf_prog_aux;
bpf_size; \
})
+#define BPF_SIZEOF(type) \
+ ({ \
+ const int __size = bytes_to_bpf_size(sizeof(type)); \
+ BUILD_BUG_ON(__size < 0); \
+ __size; \
+ })
+
+#define BPF_FIELD_SIZEOF(type, field) \
+ ({ \
+ const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
+ BUILD_BUG_ON(__size < 0); \
+ __size; \
+ })
+
+#define __BPF_MAP_0(m, v, ...) v
+#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
+#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
+#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
+#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
+#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
+
+#define __BPF_REG_0(...) __BPF_PAD(5)
+#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
+#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
+#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
+#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
+#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
+
+#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
+#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
+
+#define __BPF_CAST(t, a) \
+ (__force t) \
+ (__force \
+ typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \
+ (unsigned long)0, (t)0))) a
+#define __BPF_V void
+#define __BPF_N
+
+#define __BPF_DECL_ARGS(t, a) t a
+#define __BPF_DECL_REGS(t, a) u64 a
+
+#define __BPF_PAD(n) \
+ __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
+ u64, __ur_3, u64, __ur_4, u64, __ur_5)
+
+#define BPF_CALL_x(x, name, ...) \
+ static __always_inline \
+ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ { \
+ return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
+ } \
+ static __always_inline \
+ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
+
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
+
#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
new file mode 100644
index 000000000000..8e953c6f394a
--- /dev/null
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MESON_SM_FW_H_
+#define _MESON_SM_FW_H_
+
+enum {
+ SM_EFUSE_READ,
+ SM_EFUSE_WRITE,
+ SM_EFUSE_USER_MAX,
+};
+
+struct meson_sm_firmware;
+
+int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, u32 arg1,
+ u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_write(void *buffer, unsigned int b_size, unsigned int cmd_index,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0, u32 arg1,
+ u32 arg2, u32 arg3, u32 arg4);
+
+#endif /* _MESON_SM_FW_H_ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 901e25d495cc..b04883e74579 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2794,8 +2794,6 @@ extern void block_sync_page(struct page *page);
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
-extern ssize_t default_file_splice_read(struct file *, loff_t *,
- struct pipe_inode_info *, size_t, unsigned int);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index 76cff18bb032..ff8b11b26f31 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -111,23 +111,6 @@ struct fscrypt_completion_result {
struct fscrypt_completion_result ecr = { \
COMPLETION_INITIALIZER((ecr).completion), 0 }
-static inline int fscrypt_key_size(int mode)
-{
- switch (mode) {
- case FS_ENCRYPTION_MODE_AES_256_XTS:
- return FS_AES_256_XTS_KEY_SIZE;
- case FS_ENCRYPTION_MODE_AES_256_GCM:
- return FS_AES_256_GCM_KEY_SIZE;
- case FS_ENCRYPTION_MODE_AES_256_CBC:
- return FS_AES_256_CBC_KEY_SIZE;
- case FS_ENCRYPTION_MODE_AES_256_CTS:
- return FS_AES_256_CTS_KEY_SIZE;
- default:
- BUG();
- }
- return 0;
-}
-
#define FS_FNAME_NUM_SCATTER_ENTRIES 4
#define FS_CRYPTO_BLOCK_SIZE 16
#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
@@ -202,13 +185,6 @@ static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
}
-static inline u32 fscrypt_validate_encryption_key_size(u32 mode, u32 size)
-{
- if (size == fscrypt_key_size(mode))
- return size;
- return 0;
-}
-
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
{
if (str->len == 1 && str->name[0] == '.')
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 7268ed076be8..79467b239fcf 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -135,7 +135,7 @@ struct fsnotify_group {
const struct fsnotify_ops *ops; /* how this group handles things */
/* needed to send notification to userspace */
- struct mutex notification_mutex; /* protect the notification_list */
+ spinlock_t notification_lock; /* protect the notification_list */
struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */
unsigned int q_len; /* events on the queue */
@@ -177,7 +177,6 @@ struct fsnotify_group {
struct fanotify_group_private_data {
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
/* allows a group to block waiting for a userspace response */
- spinlock_t access_lock;
struct list_head access_list;
wait_queue_head_t access_waitq;
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6f93ac46e7f0..b3d34d3e0e7e 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -794,7 +794,9 @@ struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
unsigned long long calltime;
+#ifdef CONFIG_FUNCTION_PROFILER
unsigned long long subtime;
+#endif
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
unsigned long fp;
#endif
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index dca7bf8cffe2..4ec2c9b205f2 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -3,11 +3,34 @@
#ifdef CONFIG_FTRACE_NMI_ENTER
-extern void ftrace_nmi_enter(void);
-extern void ftrace_nmi_exit(void);
+extern void arch_ftrace_nmi_enter(void);
+extern void arch_ftrace_nmi_exit(void);
#else
-static inline void ftrace_nmi_enter(void) { }
-static inline void ftrace_nmi_exit(void) { }
+static inline void arch_ftrace_nmi_enter(void) { }
+static inline void arch_ftrace_nmi_exit(void) { }
#endif
+#ifdef CONFIG_HWLAT_TRACER
+extern bool trace_hwlat_callback_enabled;
+extern void trace_hwlat_callback(bool enter);
+#endif
+
+static inline void ftrace_nmi_enter(void)
+{
+#ifdef CONFIG_HWLAT_TRACER
+ if (trace_hwlat_callback_enabled)
+ trace_hwlat_callback(true);
+#endif
+ arch_ftrace_nmi_enter();
+}
+
+static inline void ftrace_nmi_exit(void)
+{
+ arch_ftrace_nmi_exit();
+#ifdef CONFIG_HWLAT_TRACER
+ if (trace_hwlat_callback_enabled)
+ trace_hwlat_callback(false);
+#endif
+}
+
#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 50882e09289b..1f0be7213e6d 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -3,7 +3,6 @@
#include <linux/device.h>
#include <linux/types.h>
-#include <linux/module.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
@@ -16,6 +15,7 @@ struct of_phandle_args;
struct device_node;
struct seq_file;
struct gpio_device;
+struct module;
#ifdef CONFIG_GPIOLIB
@@ -112,6 +112,10 @@ enum single_ended_mode {
* initialization, provided by GPIO driver
* @irq_parent: GPIO IRQ chip parent/bank linux irq number,
* provided by GPIO driver
+ * @irq_need_valid_mask: If set core allocates @irq_valid_mask with all
+ * bits set to one
+ * @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to
+ * be included in IRQ domain of the chip
* @lock_key: per GPIO IRQ chip lockdep class
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
@@ -190,6 +194,8 @@ struct gpio_chip {
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
int irq_parent;
+ bool irq_need_valid_mask;
+ unsigned long *irq_valid_mask;
struct lock_class_key *lock_key;
#endif
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index c02b5ce6c5cd..dd85f3503410 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -236,6 +236,7 @@ struct hid_sensor_common {
struct hid_sensor_hub_attribute_info report_state;
struct hid_sensor_hub_attribute_info power_state;
struct hid_sensor_hub_attribute_info sensitivity;
+ struct work_struct work;
};
/* Convert from hid unit expo to regular exponent */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 75b66eccc692..b2ec82712baa 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -837,7 +837,7 @@ __u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
*/
static inline void hid_device_io_start(struct hid_device *hid) {
if (hid->io_started) {
- dev_warn(&hid->dev, "io already started");
+ dev_warn(&hid->dev, "io already started\n");
return;
}
hid->io_started = true;
@@ -857,7 +857,7 @@ static inline void hid_device_io_start(struct hid_device *hid) {
*/
static inline void hid_device_io_stop(struct hid_device *hid) {
if (!hid->io_started) {
- dev_warn(&hid->dev, "io already stopped");
+ dev_warn(&hid->dev, "io already stopped\n");
return;
}
hid->io_started = false;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 6f14de45b5ce..9b9f65d99873 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -87,6 +87,10 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
extern unsigned long transparent_hugepage_flags;
+extern unsigned long thp_get_unmapped_area(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
extern void prep_transhuge_page(struct page *page);
extern void free_transhuge_page(struct page *page);
@@ -152,8 +156,8 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
return is_huge_zero_page(pmd_page(pmd));
}
-struct page *get_huge_zero_page(void);
-void put_huge_zero_page(void);
+struct page *mm_get_huge_zero_page(struct mm_struct *mm);
+void mm_put_huge_zero_page(struct mm_struct *mm);
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
@@ -169,6 +173,9 @@ void put_huge_zero_page(void);
static inline void prep_transhuge_page(struct page *page) {}
#define transparent_hugepage_flags 0UL
+
+#define thp_get_unmapped_area NULL
+
static inline int
split_huge_page_to_list(struct page *page, struct list_head *list)
{
@@ -213,9 +220,9 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
-static inline void put_huge_zero_page(void)
+static inline void mm_put_huge_zero_page(struct mm_struct *mm)
{
- BUILD_BUG();
+ return;
}
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index c26d4638f665..48c76d612d40 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -90,7 +90,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
void free_huge_page(struct page *page);
-void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
+void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
struct vm_area_struct *vma,
@@ -450,8 +450,8 @@ static inline pgoff_t basepage_index(struct page *page)
return __basepage_index(page);
}
-extern void dissolve_free_huge_pages(unsigned long start_pfn,
- unsigned long end_pfn);
+extern int dissolve_free_huge_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
static inline bool hugepage_migration_supported(struct hstate *h)
{
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
@@ -518,7 +518,7 @@ static inline pgoff_t basepage_index(struct page *page)
{
return page->index;
}
-#define dissolve_free_huge_pages(s, e) do {} while (0)
+#define dissolve_free_huge_pages(s, e) 0
#define hugepage_migration_supported(h) false
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 09354f6c1d63..9d2f8bde7d12 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -14,9 +14,341 @@
#ifndef _HWMON_H_
#define _HWMON_H_
+#include <linux/bitops.h>
+
struct device;
struct attribute_group;
+enum hwmon_sensor_types {
+ hwmon_chip,
+ hwmon_temp,
+ hwmon_in,
+ hwmon_curr,
+ hwmon_power,
+ hwmon_energy,
+ hwmon_humidity,
+ hwmon_fan,
+ hwmon_pwm,
+};
+
+enum hwmon_chip_attributes {
+ hwmon_chip_temp_reset_history,
+ hwmon_chip_in_reset_history,
+ hwmon_chip_curr_reset_history,
+ hwmon_chip_power_reset_history,
+ hwmon_chip_register_tz,
+ hwmon_chip_update_interval,
+ hwmon_chip_alarms,
+};
+
+#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
+#define HWMON_C_IN_RESET_HISTORY BIT(hwmon_chip_in_reset_history)
+#define HWMON_C_CURR_RESET_HISTORY BIT(hwmon_chip_curr_reset_history)
+#define HWMON_C_POWER_RESET_HISTORY BIT(hwmon_chip_power_reset_history)
+#define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz)
+#define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval)
+#define HWMON_C_ALARMS BIT(hwmon_chip_alarms)
+
+enum hwmon_temp_attributes {
+ hwmon_temp_input = 0,
+ hwmon_temp_type,
+ hwmon_temp_lcrit,
+ hwmon_temp_lcrit_hyst,
+ hwmon_temp_min,
+ hwmon_temp_min_hyst,
+ hwmon_temp_max,
+ hwmon_temp_max_hyst,
+ hwmon_temp_crit,
+ hwmon_temp_crit_hyst,
+ hwmon_temp_emergency,
+ hwmon_temp_emergency_hyst,
+ hwmon_temp_alarm,
+ hwmon_temp_lcrit_alarm,
+ hwmon_temp_min_alarm,
+ hwmon_temp_max_alarm,
+ hwmon_temp_crit_alarm,
+ hwmon_temp_emergency_alarm,
+ hwmon_temp_fault,
+ hwmon_temp_offset,
+ hwmon_temp_label,
+ hwmon_temp_lowest,
+ hwmon_temp_highest,
+ hwmon_temp_reset_history,
+};
+
+#define HWMON_T_INPUT BIT(hwmon_temp_input)
+#define HWMON_T_TYPE BIT(hwmon_temp_type)
+#define HWMON_T_LCRIT BIT(hwmon_temp_lcrit)
+#define HWMON_T_LCRIT_HYST BIT(hwmon_temp_lcrit_hyst)
+#define HWMON_T_MIN BIT(hwmon_temp_min)
+#define HWMON_T_MIN_HYST BIT(hwmon_temp_min_hyst)
+#define HWMON_T_MAX BIT(hwmon_temp_max)
+#define HWMON_T_MAX_HYST BIT(hwmon_temp_max_hyst)
+#define HWMON_T_CRIT BIT(hwmon_temp_crit)
+#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst)
+#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency)
+#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst)
+#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
+#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
+#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
+#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm)
+#define HWMON_T_FAULT BIT(hwmon_temp_fault)
+#define HWMON_T_OFFSET BIT(hwmon_temp_offset)
+#define HWMON_T_LABEL BIT(hwmon_temp_label)
+#define HWMON_T_LOWEST BIT(hwmon_temp_lowest)
+#define HWMON_T_HIGHEST BIT(hwmon_temp_highest)
+#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
+
+enum hwmon_in_attributes {
+ hwmon_in_input,
+ hwmon_in_min,
+ hwmon_in_max,
+ hwmon_in_lcrit,
+ hwmon_in_crit,
+ hwmon_in_average,
+ hwmon_in_lowest,
+ hwmon_in_highest,
+ hwmon_in_reset_history,
+ hwmon_in_label,
+ hwmon_in_alarm,
+ hwmon_in_min_alarm,
+ hwmon_in_max_alarm,
+ hwmon_in_lcrit_alarm,
+ hwmon_in_crit_alarm,
+};
+
+#define HWMON_I_INPUT BIT(hwmon_in_input)
+#define HWMON_I_MIN BIT(hwmon_in_min)
+#define HWMON_I_MAX BIT(hwmon_in_max)
+#define HWMON_I_LCRIT BIT(hwmon_in_lcrit)
+#define HWMON_I_CRIT BIT(hwmon_in_crit)
+#define HWMON_I_AVERAGE BIT(hwmon_in_average)
+#define HWMON_I_LOWEST BIT(hwmon_in_lowest)
+#define HWMON_I_HIGHEST BIT(hwmon_in_highest)
+#define HWMON_I_RESET_HISTORY BIT(hwmon_in_reset_history)
+#define HWMON_I_LABEL BIT(hwmon_in_label)
+#define HWMON_I_ALARM BIT(hwmon_in_alarm)
+#define HWMON_I_MIN_ALARM BIT(hwmon_in_min_alarm)
+#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
+#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
+#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
+
+enum hwmon_curr_attributes {
+ hwmon_curr_input,
+ hwmon_curr_min,
+ hwmon_curr_max,
+ hwmon_curr_lcrit,
+ hwmon_curr_crit,
+ hwmon_curr_average,
+ hwmon_curr_lowest,
+ hwmon_curr_highest,
+ hwmon_curr_reset_history,
+ hwmon_curr_label,
+ hwmon_curr_alarm,
+ hwmon_curr_min_alarm,
+ hwmon_curr_max_alarm,
+ hwmon_curr_lcrit_alarm,
+ hwmon_curr_crit_alarm,
+};
+
+#define HWMON_C_INPUT BIT(hwmon_curr_input)
+#define HWMON_C_MIN BIT(hwmon_curr_min)
+#define HWMON_C_MAX BIT(hwmon_curr_max)
+#define HWMON_C_LCRIT BIT(hwmon_curr_lcrit)
+#define HWMON_C_CRIT BIT(hwmon_curr_crit)
+#define HWMON_C_AVERAGE BIT(hwmon_curr_average)
+#define HWMON_C_LOWEST BIT(hwmon_curr_lowest)
+#define HWMON_C_HIGHEST BIT(hwmon_curr_highest)
+#define HWMON_C_RESET_HISTORY BIT(hwmon_curr_reset_history)
+#define HWMON_C_LABEL BIT(hwmon_curr_label)
+#define HWMON_C_ALARM BIT(hwmon_curr_alarm)
+#define HWMON_C_MIN_ALARM BIT(hwmon_curr_min_alarm)
+#define HWMON_C_MAX_ALARM BIT(hwmon_curr_max_alarm)
+#define HWMON_C_LCRIT_ALARM BIT(hwmon_curr_lcrit_alarm)
+#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
+
+enum hwmon_power_attributes {
+ hwmon_power_average,
+ hwmon_power_average_interval,
+ hwmon_power_average_interval_max,
+ hwmon_power_average_interval_min,
+ hwmon_power_average_highest,
+ hwmon_power_average_lowest,
+ hwmon_power_average_max,
+ hwmon_power_average_min,
+ hwmon_power_input,
+ hwmon_power_input_highest,
+ hwmon_power_input_lowest,
+ hwmon_power_reset_history,
+ hwmon_power_accuracy,
+ hwmon_power_cap,
+ hwmon_power_cap_hyst,
+ hwmon_power_cap_max,
+ hwmon_power_cap_min,
+ hwmon_power_max,
+ hwmon_power_crit,
+ hwmon_power_label,
+ hwmon_power_alarm,
+ hwmon_power_cap_alarm,
+ hwmon_power_max_alarm,
+ hwmon_power_crit_alarm,
+};
+
+#define HWMON_P_AVERAGE BIT(hwmon_power_average)
+#define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval)
+#define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max)
+#define HWMON_P_AVERAGE_INTERVAL_MIN BIT(hwmon_power_average_interval_min)
+#define HWMON_P_AVERAGE_HIGHEST BIT(hwmon_power_average_highest)
+#define HWMON_P_AVERAGE_LOWEST BIT(hwmon_power_average_lowest)
+#define HWMON_P_AVERAGE_MAX BIT(hwmon_power_average_max)
+#define HWMON_P_AVERAGE_MIN BIT(hwmon_power_average_min)
+#define HWMON_P_INPUT BIT(hwmon_power_input)
+#define HWMON_P_INPUT_HIGHEST BIT(hwmon_power_input_highest)
+#define HWMON_P_INPUT_LOWEST BIT(hwmon_power_input_lowest)
+#define HWMON_P_RESET_HISTORY BIT(hwmon_power_reset_history)
+#define HWMON_P_ACCURACY BIT(hwmon_power_accuracy)
+#define HWMON_P_CAP BIT(hwmon_power_cap)
+#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst)
+#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max)
+#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min)
+#define HWMON_P_MAX BIT(hwmon_power_max)
+#define HWMON_P_CRIT BIT(hwmon_power_crit)
+#define HWMON_P_LABEL BIT(hwmon_power_label)
+#define HWMON_P_ALARM BIT(hwmon_power_alarm)
+#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
+#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
+#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
+
+enum hwmon_energy_attributes {
+ hwmon_energy_input,
+ hwmon_energy_label,
+};
+
+#define HWMON_E_INPUT BIT(hwmon_energy_input)
+#define HWMON_E_LABEL BIT(hwmon_energy_label)
+
+enum hwmon_humidity_attributes {
+ hwmon_humidity_input,
+ hwmon_humidity_label,
+ hwmon_humidity_min,
+ hwmon_humidity_min_hyst,
+ hwmon_humidity_max,
+ hwmon_humidity_max_hyst,
+ hwmon_humidity_alarm,
+ hwmon_humidity_fault,
+};
+
+#define HWMON_H_INPUT BIT(hwmon_humidity_input)
+#define HWMON_H_LABEL BIT(hwmon_humidity_label)
+#define HWMON_H_MIN BIT(hwmon_humidity_min)
+#define HWMON_H_MIN_HYST BIT(hwmon_humidity_min_hyst)
+#define HWMON_H_MAX BIT(hwmon_humidity_max)
+#define HWMON_H_MAX_HYST BIT(hwmon_humidity_max_hyst)
+#define HWMON_H_ALARM BIT(hwmon_humidity_alarm)
+#define HWMON_H_FAULT BIT(hwmon_humidity_fault)
+
+enum hwmon_fan_attributes {
+ hwmon_fan_input,
+ hwmon_fan_label,
+ hwmon_fan_min,
+ hwmon_fan_max,
+ hwmon_fan_div,
+ hwmon_fan_pulses,
+ hwmon_fan_target,
+ hwmon_fan_alarm,
+ hwmon_fan_min_alarm,
+ hwmon_fan_max_alarm,
+ hwmon_fan_fault,
+};
+
+#define HWMON_F_INPUT BIT(hwmon_fan_input)
+#define HWMON_F_LABEL BIT(hwmon_fan_label)
+#define HWMON_F_MIN BIT(hwmon_fan_min)
+#define HWMON_F_MAX BIT(hwmon_fan_max)
+#define HWMON_F_DIV BIT(hwmon_fan_div)
+#define HWMON_F_PULSES BIT(hwmon_fan_pulses)
+#define HWMON_F_TARGET BIT(hwmon_fan_target)
+#define HWMON_F_ALARM BIT(hwmon_fan_alarm)
+#define HWMON_F_MIN_ALARM BIT(hwmon_fan_min_alarm)
+#define HWMON_F_MAX_ALARM BIT(hwmon_fan_max_alarm)
+#define HWMON_F_FAULT BIT(hwmon_fan_fault)
+
+enum hwmon_pwm_attributes {
+ hwmon_pwm_input,
+ hwmon_pwm_enable,
+ hwmon_pwm_mode,
+ hwmon_pwm_freq,
+};
+
+#define HWMON_PWM_INPUT BIT(hwmon_pwm_input)
+#define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable)
+#define HWMON_PWM_MODE BIT(hwmon_pwm_mode)
+#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq)
+
+/**
+ * struct hwmon_ops - hwmon device operations
+ * @is_visible: Callback to return attribute visibility. Mandatory.
+ * Parameters are:
+ * @const void *drvdata:
+ * Pointer to driver-private data structure passed
+ * as argument to hwmon_device_register_with_info().
+ * @type: Sensor type
+ * @attr: Sensor attribute
+ * @channel:
+ * Channel number
+ * The function returns the file permissions.
+ * If the return value is 0, no attribute will be created.
+ * @read: Read callback. Optional. If not provided, attributes
+ * will not be readable.
+ * Parameters are:
+ * @dev: Pointer to hardware monitoring device
+ * @type: Sensor type
+ * @attr: Sensor attribute
+ * @channel:
+ * Channel number
+ * @val: Pointer to returned value
+ * The function returns 0 on success or a negative error number.
+ * @write: Write callback. Optional. If not provided, attributes
+ * will not be writable.
+ * Parameters are:
+ * @dev: Pointer to hardware monitoring device
+ * @type: Sensor type
+ * @attr: Sensor attribute
+ * @channel:
+ * Channel number
+ * @val: Value to write
+ * The function returns 0 on success or a negative error number.
+ */
+struct hwmon_ops {
+ umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel);
+ int (*read)(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val);
+ int (*write)(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val);
+};
+
+/**
+ * Channel information
+ * @type: Channel type.
+ * @config: Pointer to NULL-terminated list of channel parameters.
+ * Use for per-channel attributes.
+ */
+struct hwmon_channel_info {
+ enum hwmon_sensor_types type;
+ const u32 *config;
+};
+
+/**
+ * Chip configuration
+ * @ops: Pointer to hwmon operations.
+ * @info: Null-terminated list of channel information.
+ */
+struct hwmon_chip_info {
+ const struct hwmon_ops *ops;
+ const struct hwmon_channel_info **info;
+};
+
struct device *hwmon_device_register(struct device *dev);
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
@@ -26,6 +358,16 @@ struct device *
devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
const struct attribute_group **groups);
+struct device *
+hwmon_device_register_with_info(struct device *dev,
+ const char *name, void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **groups);
+struct device *
+devm_hwmon_device_register_with_info(struct device *dev,
+ const char *name, void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **groups);
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b10954a66939..6824556d37ed 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -674,6 +674,11 @@ enum hv_signal_policy {
HV_SIGNAL_POLICY_EXPLICIT,
};
+enum hv_numa_policy {
+ HV_BALANCED = 0,
+ HV_LOCALIZED,
+};
+
enum vmbus_device_type {
HV_IDE = 0,
HV_SCSI,
@@ -701,9 +706,6 @@ struct vmbus_device {
};
struct vmbus_channel {
- /* Unique channel id */
- int id;
-
struct list_head listentry;
struct hv_device *device_obj;
@@ -850,6 +852,43 @@ struct vmbus_channel {
* ring lock to preserve the current behavior.
*/
bool acquire_ring_lock;
+ /*
+ * For performance critical channels (storage, networking
+ * etc,), Hyper-V has a mechanism to enhance the throughput
+ * at the expense of latency:
+ * When the host is to be signaled, we just set a bit in a shared page
+ * and this bit will be inspected by the hypervisor within a certain
+ * window and if the bit is set, the host will be signaled. The window
+ * of time is the monitor latency - currently around 100 usecs. This
+ * mechanism improves throughput by:
+ *
+ * A) Making the host more efficient - each time it wakes up,
+ * potentially it will process morev number of packets. The
+ * monitor latency allows a batch to build up.
+ * B) By deferring the hypercall to signal, we will also minimize
+ * the interrupts.
+ *
+ * Clearly, these optimizations improve throughput at the expense of
+ * latency. Furthermore, since the channel is shared for both
+ * control and data messages, control messages currently suffer
+ * unnecessary latency adversley impacting performance and boot
+ * time. To fix this issue, permit tagging the channel as being
+ * in "low latency" mode. In this mode, we will bypass the monitor
+ * mechanism.
+ */
+ bool low_latency;
+
+ /*
+ * NUMA distribution policy:
+ * We support teo policies:
+ * 1) Balanced: Here all performance critical channels are
+ * distributed evenly amongst all the NUMA nodes.
+ * This policy will be the default policy.
+ * 2) Localized: All channels of a given instance of a
+ * performance critical service will be assigned CPUs
+ * within a selected NUMA node.
+ */
+ enum hv_numa_policy affinity_policy;
};
@@ -870,6 +909,12 @@ static inline void set_channel_signal_state(struct vmbus_channel *c,
c->signal_policy = policy;
}
+static inline void set_channel_affinity_state(struct vmbus_channel *c,
+ enum hv_numa_policy policy)
+{
+ c->affinity_policy = policy;
+}
+
static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
{
c->batched_reading = state;
@@ -891,6 +936,16 @@ static inline void set_channel_pending_send_size(struct vmbus_channel *c,
c->outbound.ring_buffer->pending_send_sz = size;
}
+static inline void set_low_latency_mode(struct vmbus_channel *c)
+{
+ c->low_latency = true;
+}
+
+static inline void clear_low_latency_mode(struct vmbus_channel *c)
+{
+ c->low_latency = false;
+}
+
void vmbus_onmessage(void *context);
int vmbus_request_offers(void);
@@ -1114,6 +1169,13 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
+static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
+{
+ const struct kobject *kobj = &device_obj->device.kobj;
+
+ return kobj->name;
+}
+
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
@@ -1257,6 +1319,27 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
/*
+ * Linux doesn't support the 3 devices: the first two are for
+ * Automatic Virtual Machine Activation, and the third is for
+ * Remote Desktop Virtualization.
+ * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
+ * {3375baf4-9e15-4b30-b765-67acb10d607b}
+ * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
+ */
+
+#define HV_AVMA1_GUID \
+ .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
+ 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
+
+#define HV_AVMA2_GUID \
+ .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
+ 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
+
+#define HV_RDV_GUID \
+ .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
+ 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+
+/*
* Common header for Hyper-V ICs
*/
@@ -1344,6 +1427,15 @@ struct ictimesync_data {
u8 flags;
} __packed;
+struct ictimesync_ref_data {
+ u64 parenttime;
+ u64 vmreferencetime;
+ u8 flags;
+ char leapflags;
+ char stratum;
+ u8 reserved[3];
+} __packed;
+
struct hyperv_service_callback {
u8 msg_type;
char *log_msg;
@@ -1357,6 +1449,9 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
struct icmsg_negotiate *, u8 *, int,
int);
+void hv_event_tasklet_disable(struct vmbus_channel *channel);
+void hv_event_tasklet_enable(struct vmbus_channel *channel);
+
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
/*
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index d4c1d12f900d..bd74d5706f3b 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -32,7 +32,9 @@
struct i2c_mux_core {
struct i2c_adapter *parent;
struct device *dev;
- bool mux_locked;
+ unsigned int mux_locked:1;
+ unsigned int arbitrator:1;
+ unsigned int gate:1;
void *priv;
@@ -51,7 +53,9 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
int (*deselect)(struct i2c_mux_core *, u32));
/* flags for i2c_mux_alloc */
-#define I2C_MUX_LOCKED BIT(0)
+#define I2C_MUX_LOCKED BIT(0)
+#define I2C_MUX_ARBITRATOR BIT(1)
+#define I2C_MUX_GATE BIT(2)
static inline void *i2c_mux_priv(struct i2c_mux_core *muxc)
{
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index fffdc270ca18..6422eef428c4 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -427,6 +427,20 @@ struct i2c_algorithm {
};
/**
+ * struct i2c_lock_operations - represent I2C locking operations
+ * @lock_bus: Get exclusive access to an I2C bus segment
+ * @trylock_bus: Try to get exclusive access to an I2C bus segment
+ * @unlock_bus: Release exclusive access to an I2C bus segment
+ *
+ * The main operations are wrapped by i2c_lock_bus and i2c_unlock_bus.
+ */
+struct i2c_lock_operations {
+ void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
+ int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
+ void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
+};
+
+/**
* struct i2c_timings - I2C timing information
* @bus_freq_hz: the bus frequency in Hz
* @scl_rise_ns: time SCL signal takes to rise in ns; t(r) in the I2C specification
@@ -536,6 +550,7 @@ struct i2c_adapter {
void *algo_data;
/* data fields that are valid for all devices */
+ const struct i2c_lock_operations *lock_ops;
struct rt_mutex bus_lock;
struct rt_mutex mux_lock;
@@ -552,10 +567,6 @@ struct i2c_adapter {
struct i2c_bus_recovery_info *bus_recovery_info;
const struct i2c_adapter_quirks *quirks;
-
- void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
- int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
- void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -597,7 +608,21 @@ int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
static inline void
i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
{
- adapter->lock_bus(adapter, flags);
+ adapter->lock_ops->lock_bus(adapter, flags);
+}
+
+/**
+ * i2c_trylock_bus - Try to get exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER tries to locks the root i2c adapter,
+ * I2C_LOCK_SEGMENT tries to lock only this branch in the adapter tree
+ *
+ * Return: true if the I2C bus segment is locked, false otherwise
+ */
+static inline int
+i2c_trylock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ return adapter->lock_ops->trylock_bus(adapter, flags);
}
/**
@@ -609,7 +634,7 @@ i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
static inline void
i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
{
- adapter->unlock_bus(adapter, flags);
+ adapter->lock_ops->unlock_bus(adapter, flags);
}
static inline void
@@ -673,6 +698,7 @@ extern void i2c_clients_command(struct i2c_adapter *adap,
extern struct i2c_adapter *i2c_get_adapter(int nr);
extern void i2c_put_adapter(struct i2c_adapter *adap);
+extern unsigned int i2c_adapter_depth(struct i2c_adapter *adapter);
void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults);
@@ -766,4 +792,13 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
}
#endif /* CONFIG_OF */
+#if IS_ENABLED(CONFIG_ACPI)
+u32 i2c_acpi_find_bus_speed(struct device *dev);
+#else
+static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_ACPI */
+
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index dcb89e3515db..c6587c01d951 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -45,6 +45,7 @@ struct br_ip_list {
#define BR_PROXYARP BIT(8)
#define BR_LEARNING_SYNC BIT(9)
#define BR_PROXYARP_WIFI BIT(10)
+#define BR_MCAST_FLOOD BIT(11)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index f923d15b432c..0b17c585b5cd 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -25,5 +25,6 @@ struct ifla_vf_info {
__u32 max_tx_rate;
__u32 rss_query_en;
__u32 trusted;
+ __be16 vlan_proto;
};
#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a5f6ce6b578c..3319d97d789d 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -81,6 +81,7 @@ static inline bool is_vlan_dev(const struct net_device *dev)
#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
+#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -271,6 +272,23 @@ static inline int vlan_get_encap_level(struct net_device *dev)
}
#endif
+/**
+ * eth_type_vlan - check for valid vlan ether type.
+ * @ethertype: ether type to check
+ *
+ * Returns true if the ether type is a vlan ether type.
+ */
+static inline bool eth_type_vlan(__be16 ethertype)
+{
+ switch (ethertype) {
+ case htons(ETH_P_8021Q):
+ case htons(ETH_P_8021AD):
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline bool vlan_hw_offload_capable(netdev_features_t features,
__be16 proto)
{
@@ -424,8 +442,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
- if (veth->h_vlan_proto != htons(ETH_P_8021Q) &&
- veth->h_vlan_proto != htons(ETH_P_8021AD))
+ if (!eth_type_vlan(veth->h_vlan_proto))
return -EINVAL;
*vlan_tci = ntohs(veth->h_vlan_TCI);
@@ -487,7 +504,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
* ETH_HLEN otherwise
*/
- if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+ if (eth_type_vlan(type)) {
if (vlan_depth) {
if (WARN_ON(vlan_depth < VLAN_HLEN))
return 0;
@@ -505,8 +522,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
vh = (struct vlan_hdr *)(skb->data + vlan_depth);
type = vh->h_vlan_encapsulated_proto;
vlan_depth += VLAN_HLEN;
- } while (type == htons(ETH_P_8021Q) ||
- type == htons(ETH_P_8021AD));
+ } while (eth_type_vlan(type));
}
if (depth)
@@ -571,8 +587,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
{
if (!skb_vlan_tag_present(skb) &&
- likely(skb->protocol != htons(ETH_P_8021Q) &&
- skb->protocol != htons(ETH_P_8021AD)))
+ likely(!eth_type_vlan(skb->protocol)))
return false;
return true;
@@ -592,15 +607,14 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
if (!skb_vlan_tag_present(skb)) {
struct vlan_ethhdr *veh;
- if (likely(protocol != htons(ETH_P_8021Q) &&
- protocol != htons(ETH_P_8021AD)))
+ if (likely(!eth_type_vlan(protocol)))
return false;
veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}
- if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD))
+ if (!eth_type_vlan(protocol))
return false;
return true;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 3d672f72e7ec..9edccfba1ffb 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -165,6 +165,18 @@ struct iio_channel
*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer);
/**
+ * iio_channel_cb_get_iio_dev() - get access to the underlying device.
+ * @cb_buffer: The callback buffer from whom we want the device
+ * information.
+ *
+ * This function allows one to obtain information about the device.
+ * The primary aim is to allow drivers that are consuming a device to query
+ * things like current trigger.
+ */
+struct iio_dev
+*iio_channel_cb_get_iio_dev(const struct iio_cb_buffer *cb_buffer);
+
+/**
* iio_read_channel_raw() - read from a given channel
* @chan: The channel being queried.
* @val: Value read back.
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 854e2dad1e0d..b4a0679e4a49 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -483,6 +483,7 @@ struct iio_buffer_setup_ops {
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
* @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
+ * @trig_readonly [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
@@ -523,6 +524,7 @@ struct iio_dev {
bool scan_timestamp;
unsigned scan_index_timestamp;
struct iio_trigger *trig;
+ bool trig_readonly;
struct iio_poll_func *pollfunc;
struct iio_poll_func *pollfunc_event;
@@ -642,6 +644,7 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv)
}
void iio_device_free(struct iio_dev *indio_dev);
+int devm_iio_device_match(struct device *dev, void *res, void *data);
struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 1c9e028e0d4a..4f1154f7a33c 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -56,6 +56,9 @@ struct iio_trigger_ops {
* @subirqs: [INTERN] information about the 'child' irqs.
* @pool: [INTERN] bitmap of irqs currently in use.
* @pool_lock: [INTERN] protection of the irq pool.
+ * @attached_own_device:[INTERN] if we are using our own device as trigger,
+ * i.e. if we registered a poll function to the same
+ * device as the one providing the trigger.
**/
struct iio_trigger {
const struct iio_trigger_ops *ops;
@@ -73,6 +76,7 @@ struct iio_trigger {
struct iio_subirq subirqs[CONFIG_IIO_CONSUMERS_PER_TRIGGER];
unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
struct mutex pool_lock;
+ bool attached_own_device;
};
@@ -125,12 +129,27 @@ static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig)
**/
int iio_trigger_register(struct iio_trigger *trig_info);
+int devm_iio_trigger_register(struct device *dev,
+ struct iio_trigger *trig_info);
+
/**
* iio_trigger_unregister() - unregister a trigger from the core
* @trig_info: trigger to be unregistered
**/
void iio_trigger_unregister(struct iio_trigger *trig_info);
+void devm_iio_trigger_unregister(struct device *dev,
+ struct iio_trigger *trig_info);
+
+/**
+ * iio_trigger_set_immutable() - set an immutable trigger on destination
+ *
+ * @indio_dev - IIO device structure containing the device
+ * @trig - trigger to assign to device
+ *
+ **/
+int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
+
/**
* iio_trigger_poll() - called on a trigger occurring
* @trig: trigger which occurred
@@ -145,6 +164,13 @@ irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...);
void iio_trigger_free(struct iio_trigger *trig);
+/**
+ * iio_trigger_using_own() - tells us if we use our own HW trigger ourselves
+ * @indio_dev: device to check
+ */
+bool iio_trigger_using_own(struct iio_dev *indio_dev);
+
+
#else
struct iio_trigger;
struct iio_trigger_ops;
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index f72f70d5a97b..30145616773d 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -12,4 +12,12 @@ int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
const struct iio_buffer_setup_ops *setup_ops);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
+int devm_iio_triggered_buffer_setup(struct device *dev,
+ struct iio_dev *indio_dev,
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ const struct iio_buffer_setup_ops *ops);
+void devm_iio_triggered_buffer_cleanup(struct device *dev,
+ struct iio_dev *indio_dev);
+
#endif
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index feb04ea20f11..65da430e260f 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -37,7 +37,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh);
+ const struct nlmsghdr *unlh, bool net_admin);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
@@ -56,7 +56,7 @@ void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
- struct user_namespace *user_ns);
+ struct user_namespace *user_ns, bool net_admin);
extern int inet_diag_register(const struct inet_diag_handler *handler);
extern void inet_diag_unregister(const struct inet_diag_handler *handler);
diff --git a/include/linux/init.h b/include/linux/init.h
index 6935d02474aa..5a3321a7909b 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -41,22 +41,11 @@
discard it in modules) */
#define __init __section(.init.text) __cold notrace
#define __initdata __section(.init.data)
-#define __initconst __constsection(.init.rodata)
+#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
#define __exit_call __used __section(.exitcall.exit)
/*
- * Some architecture have tool chains which do not handle rodata attributes
- * correctly. For those disable special sections for const, so that other
- * architectures can annotate correctly.
- */
-#ifdef CONFIG_BROKEN_RODATA
-#define __constsection(x)
-#else
-#define __constsection(x) __section(x)
-#endif
-
-/*
* modpost check for section mismatches during the kernel build.
* A section mismatch happens when there are references from a
* code or data section to an init section (both code or data).
@@ -75,7 +64,7 @@
*/
#define __ref __section(.ref.text) noinline
#define __refdata __section(.ref.data)
-#define __refconst __constsection(.ref.rodata)
+#define __refconst __section(.ref.rodata)
#ifdef MODULE
#define __exitused
@@ -88,10 +77,10 @@
/* Used for MEMORY_HOTPLUG */
#define __meminit __section(.meminit.text) __cold notrace
#define __meminitdata __section(.meminit.data)
-#define __meminitconst __constsection(.meminit.rodata)
+#define __meminitconst __section(.meminit.rodata)
#define __memexit __section(.memexit.text) __exitused __cold notrace
#define __memexitdata __section(.memexit.data)
-#define __memexitconst __constsection(.memexit.rodata)
+#define __memexitconst __section(.memexit.rodata)
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 3d70ece10313..e63e288dee83 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -22,6 +22,8 @@ struct vm_fault;
* Flags for iomap mappings:
*/
#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
+#define IOMAP_F_SHARED 0x02 /* block shared with another file */
+#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */
/*
* Magic value for blkno:
@@ -64,6 +66,8 @@ struct iomap_ops {
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
struct iomap_ops *ops);
+int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
+ struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, struct iomap_ops *ops);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index beb9ce1c2c23..8c1239020d79 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -7,7 +7,6 @@
/*
* Gives us 8 prio classes with 13-bits of data for each class
*/
-#define IOPRIO_BITS (16)
#define IOPRIO_CLASS_SHIFT (13)
#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index d10e54f03c09..848e5796400e 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -58,6 +58,7 @@ struct ipc_namespace {
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
+ struct ucounts *ucounts;
struct ns_common ns;
};
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index c6dbcd84a2c7..7e9a789be5e0 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -18,6 +18,7 @@ struct ipv6_devconf {
__s32 dad_transmits;
__s32 rtr_solicits;
__s32 rtr_solicit_interval;
+ __s32 rtr_solicit_max_interval;
__s32 rtr_solicit_delay;
__s32 force_mld_version;
__s32 mldv1_unsolicited_report_interval;
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 5fdc55312334..589d14e970ad 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -150,15 +150,19 @@ static inline u64 get_jiffies_64(void)
/* time_is_before_jiffies(a) return true if a is before jiffies */
#define time_is_before_jiffies(a) time_after(jiffies, a)
+#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a)
/* time_is_after_jiffies(a) return true if a is after jiffies */
#define time_is_after_jiffies(a) time_before(jiffies, a)
+#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a)
/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/
#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a)
+#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a)
/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/
#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a)
+#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a)
/*
* Have the 32 bit jiffies value wrap 5 minutes after boot
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h
index c2ce155d83cc..f282d4e87258 100644
--- a/include/linux/kern_levels.h
+++ b/include/linux/kern_levels.h
@@ -20,7 +20,7 @@
* line that had no enclosing \n). Only to be used by core/arch code
* during early bootup (a continued line is not SMP-safe otherwise).
*/
-#define KERN_CONT ""
+#define KERN_CONT KERN_SOH "c"
/* integer equivalents of KERN_<LEVEL> */
#define LOGLEVEL_SCHED -2 /* Deferred messages from sched code
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 74fd6f05bc5b..bc6ed52a39b9 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -733,17 +733,25 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* strict type-checking.. See the
* "unnecessary" pointer comparison.
*/
-#define min(x, y) ({ \
- typeof(x) _min1 = (x); \
- typeof(y) _min2 = (y); \
- (void) (&_min1 == &_min2); \
- _min1 < _min2 ? _min1 : _min2; })
-
-#define max(x, y) ({ \
- typeof(x) _max1 = (x); \
- typeof(y) _max2 = (y); \
- (void) (&_max1 == &_max2); \
- _max1 > _max2 ? _max1 : _max2; })
+#define __min(t1, t2, min1, min2, x, y) ({ \
+ t1 min1 = (x); \
+ t2 min2 = (y); \
+ (void) (&min1 == &min2); \
+ min1 < min2 ? min1 : min2; })
+#define min(x, y) \
+ __min(typeof(x), typeof(y), \
+ __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
+ x, y)
+
+#define __max(t1, t2, max1, max2, x, y) ({ \
+ t1 max1 = (x); \
+ t2 max2 = (y); \
+ (void) (&max1 == &max2); \
+ max1 > max2 ? max1 : max2; })
+#define max(x, y) \
+ __max(typeof(x), typeof(y), \
+ __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
+ x, y)
#define min3(x, y, z) min((typeof(x))min(x, y), z)
#define max3(x, y, z) max((typeof(x))max(x, y), z)
@@ -775,15 +783,15 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
*
* Or not use min/max/clamp at all, of course.
*/
-#define min_t(type, x, y) ({ \
- type __min1 = (x); \
- type __min2 = (y); \
- __min1 < __min2 ? __min1: __min2; })
-
-#define max_t(type, x, y) ({ \
- type __max1 = (x); \
- type __max2 = (y); \
- __max1 > __max2 ? __max1: __max2; })
+#define min_t(type, x, y) \
+ __min(type, type, \
+ __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
+ x, y)
+
+#define max_t(type, x, y) \
+ __max(type, type, \
+ __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
+ x, y)
/**
* clamp_t - return a value clamped to a given range using a given type
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 3ffc69ebe967..0fb7ffb1775f 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -238,6 +238,11 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
}
+static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
+{
+ return ktime_sub_ns(kt, msec * NSEC_PER_MSEC);
+}
+
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
/**
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9c28b4d4c90b..01c0b9cc3915 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -265,6 +265,7 @@ struct kvm_vcpu {
#endif
bool preempted;
struct kvm_vcpu_arch arch;
+ struct dentry *debugfs_dentry;
};
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
@@ -749,6 +750,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
+bool kvm_arch_has_vcpu_debugfs(void);
+int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
+
int kvm_arch_hardware_enable(void);
void kvm_arch_hardware_disable(void);
int kvm_arch_hardware_setup(void);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 8a3b5d29602f..ddfcb2df3656 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -359,6 +359,11 @@ struct led_platform_data {
struct led_info *leds;
};
+struct gpio_desc;
+typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+
/* For the leds-gpio driver */
struct gpio_led {
const char *name;
@@ -382,9 +387,7 @@ struct gpio_led_platform_data {
#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */
#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */
#define GPIO_LED_BLINK 2 /* Please, blink */
- int (*gpio_blink_set)(struct gpio_desc *desc, int state,
- unsigned long *delay_on,
- unsigned long *delay_off);
+ gpio_blink_set_t gpio_blink_set;
};
#ifdef CONFIG_NEW_LEDS
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index ba78b8306674..d190786e4ad8 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -352,7 +352,10 @@ struct nvm_dev {
/* Backend device */
struct request_queue *q;
+ struct device dev;
+ struct device *parent_dev;
char name[DISK_NAME_LEN];
+ void *private_data;
struct mutex mlock;
spinlock_t lock;
@@ -524,9 +527,9 @@ extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
unsigned long);
extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
-extern int nvm_register(struct request_queue *, char *,
- struct nvm_dev_ops *);
-extern void nvm_unregister(char *);
+extern struct nvm_dev *nvm_alloc_dev(int);
+extern int nvm_register(struct nvm_dev *);
+extern void nvm_unregister(struct nvm_dev *);
void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type);
@@ -575,11 +578,14 @@ extern int nvm_dev_factory(struct nvm_dev *, int flags);
#else /* CONFIG_NVM */
struct nvm_dev_ops;
-static inline int nvm_register(struct request_queue *q, char *disk_name,
- struct nvm_dev_ops *ops)
+static inline struct nvm_dev *nvm_alloc_dev(int node)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline int nvm_register(struct nvm_dev *dev)
{
return -EINVAL;
}
-static inline void nvm_unregister(char *disk_name) {}
+static inline void nvm_unregister(struct nvm_dev *dev) {}
#endif /* CONFIG_NVM */
#endif /* LIGHTNVM.H */
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index a93a0b23dc8d..9072f04db616 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -116,6 +116,9 @@ int klp_unregister_patch(struct klp_patch *);
int klp_enable_patch(struct klp_patch *);
int klp_disable_patch(struct klp_patch *);
+void arch_klp_init_object_loaded(struct klp_patch *patch,
+ struct klp_object *obj);
+
/* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index eabe0138eb06..c1458fede1f9 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -16,6 +16,8 @@ struct lockdep_map;
extern int prove_locking;
extern int lock_stat;
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
#ifdef CONFIG_LOCKDEP
#include <linux/linkage.h>
@@ -29,8 +31,6 @@ extern int lock_stat;
*/
#define XXX_LOCK_USAGE_STATES (1+3*4)
-#define MAX_LOCKDEP_SUBCLASSES 8UL
-
/*
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
* cached in the instance of lockdep_map
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index ffb9c9da4f39..e58e577117b6 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -59,6 +59,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_INODE 9
#define LSM_AUDIT_DATA_DENTRY 10
#define LSM_AUDIT_DATA_IOCTL_OP 11
+#define LSM_AUDIT_DATA_FILE 12
union {
struct path path;
struct dentry *dentry;
@@ -75,6 +76,7 @@ struct common_audit_data {
#endif
char *kmod_name;
struct lsm_ioctlop_audit *op;
+ struct file *file;
} u;
/* this union contains LSM specific data */
union {
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 101bf19c0f41..558adfa5c8a8 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -151,6 +151,16 @@
* @name name of the last path component used to create file
* @ctx pointer to place the pointer to the resulting context in.
* @ctxlen point to place the length of the resulting context.
+ * @dentry_create_files_as:
+ * Compute a context for a dentry as the inode is not yet available
+ * and set that context in passed in creds so that new files are
+ * created using that context. Context is calculated using the
+ * passed in creds and not the creds of the caller.
+ * @dentry dentry to use in calculating the context.
+ * @mode mode used to determine resource type.
+ * @name name of the last path component used to create file
+ * @old creds which should be used for context calculation
+ * @new creds to modify
*
*
* Security hooks for inode operations.
@@ -401,6 +411,23 @@
* @inode contains a pointer to the inode.
* @secid contains a pointer to the location where result will be saved.
* In case of failure, @secid will be set to zero.
+ * @inode_copy_up:
+ * A file is about to be copied up from lower layer to upper layer of
+ * overlay filesystem. Security module can prepare a set of new creds
+ * and modify as need be and return new creds. Caller will switch to
+ * new creds temporarily to create new file and release newly allocated
+ * creds.
+ * @src indicates the union dentry of file that is being copied up.
+ * @new pointer to pointer to return newly allocated creds.
+ * Returns 0 on success or a negative error code on error.
+ * @inode_copy_up_xattr:
+ * Filter the xattrs being copied up when a unioned file is copied
+ * up from a lower layer to the union/overlay layer.
+ * @name indicates the name of the xattr.
+ * Returns 0 to accept the xattr, 1 to discard the xattr, -EOPNOTSUPP if
+ * security module does not know about attribute or a negative error code
+ * to abort the copy up. Note that the caller is responsible for reading
+ * and writing the xattrs as this hook is merely a filter.
*
* Security hooks for file operations
*
@@ -1358,6 +1385,10 @@ union security_list_options {
int (*dentry_init_security)(struct dentry *dentry, int mode,
const struct qstr *name, void **ctx,
u32 *ctxlen);
+ int (*dentry_create_files_as)(struct dentry *dentry, int mode,
+ struct qstr *name,
+ const struct cred *old,
+ struct cred *new);
#ifdef CONFIG_SECURITY_PATH
@@ -1425,6 +1456,8 @@ union security_list_options {
int (*inode_listsecurity)(struct inode *inode, char *buffer,
size_t buffer_size);
void (*inode_getsecid)(struct inode *inode, u32 *secid);
+ int (*inode_copy_up)(struct dentry *src, struct cred **new);
+ int (*inode_copy_up_xattr)(const char *name);
int (*file_permission)(struct file *file, int mask);
int (*file_alloc_security)(struct file *file);
@@ -1455,7 +1488,6 @@ union security_list_options {
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
int (*kernel_module_request)(char *kmod_name);
- int (*kernel_module_from_file)(struct file *file);
int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
@@ -1656,6 +1688,7 @@ struct security_hook_heads {
struct list_head sb_clone_mnt_opts;
struct list_head sb_parse_opts_str;
struct list_head dentry_init_security;
+ struct list_head dentry_create_files_as;
#ifdef CONFIG_SECURITY_PATH
struct list_head path_unlink;
struct list_head path_mkdir;
@@ -1696,6 +1729,8 @@ struct security_hook_heads {
struct list_head inode_setsecurity;
struct list_head inode_listsecurity;
struct list_head inode_getsecid;
+ struct list_head inode_copy_up;
+ struct list_head inode_copy_up_xattr;
struct list_head file_permission;
struct list_head file_alloc_security;
struct list_head file_free_security;
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index d610232762e3..2931aa43dab1 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -11,6 +11,8 @@
#ifndef __LINUX_MBUS_H
#define __LINUX_MBUS_H
+#include <linux/errno.h>
+
struct resource;
struct mbus_dram_target_info
@@ -55,6 +57,8 @@ struct mbus_dram_target_info
#ifdef CONFIG_PLAT_ORION
extern const struct mbus_dram_target_info *mv_mbus_dram_info(void);
extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void);
+int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
+ u8 *attr);
#else
static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
{
@@ -64,14 +68,24 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo
{
return NULL;
}
+static inline int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size,
+ u8 *target, u8 *attr)
+{
+ /*
+ * On all ARM32 MVEBU platforms with MBus support, this stub
+ * function will not get called. The real function from the
+ * MBus driver is called instead. ARM64 MVEBU platforms like
+ * the Armada 3700 could use the mv_xor device driver which calls
+ * into this function
+ */
+ return -EINVAL;
+}
#endif
int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr);
-int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
- u8 *attr);
int mvebu_mbus_add_window_remap_by_id(unsigned int target,
unsigned int attribute,
phys_addr_t base, size_t size,
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index ead13d233a97..4097ac9ea13a 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -41,15 +41,17 @@ struct mcb_bus {
char name[CHAMELEON_FILENAME_LEN + 1];
int (*get_irq)(struct mcb_device *dev);
};
-#define to_mcb_bus(b) container_of((b), struct mcb_bus, dev)
+
+static inline struct mcb_bus *to_mcb_bus(struct device *dev)
+{
+ return container_of(dev, struct mcb_bus, dev);
+}
/**
* struct mcb_device - MEN Chameleon Bus device
*
- * @bus_list: internal list handling for bus code
* @dev: device in kernel representation
* @bus: mcb bus the device is plugged to
- * @subordinate: subordinate MCBus in case of bridge
* @is_added: flag to check if device is added to bus
* @driver: associated mcb_driver
* @id: mcb device id
@@ -62,10 +64,8 @@ struct mcb_bus {
* @memory: memory resource
*/
struct mcb_device {
- struct list_head bus_list;
struct device dev;
struct mcb_bus *bus;
- struct mcb_bus *subordinate;
bool is_added;
struct mcb_driver *driver;
u16 id;
@@ -76,8 +76,13 @@ struct mcb_device {
int rev;
struct resource irq;
struct resource mem;
+ struct device *dma_dev;
};
-#define to_mcb_device(x) container_of((x), struct mcb_device, dev)
+
+static inline struct mcb_device *to_mcb_device(struct device *dev)
+{
+ return container_of(dev, struct mcb_device, dev);
+}
/**
* struct mcb_driver - MEN Chameleon Bus device driver
@@ -95,7 +100,11 @@ struct mcb_driver {
void (*remove)(struct mcb_device *mdev);
void (*shutdown)(struct mcb_device *mdev);
};
-#define to_mcb_driver(x) container_of((x), struct mcb_driver, driver)
+
+static inline struct mcb_driver *to_mcb_driver(struct device_driver *drv)
+{
+ return container_of(drv, struct mcb_driver, driver);
+}
static inline void *mcb_get_drvdata(struct mcb_device *dev)
{
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 2925da23505d..5b759c9acf97 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -328,6 +328,7 @@ phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t memblock_phys_mem_size(void);
+phys_addr_t memblock_reserved_size(void);
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5d8ca6e02e39..61d20c17f3b7 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -366,6 +366,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup *,
struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+int mem_cgroup_scan_tasks(struct mem_cgroup *,
+ int (*)(struct task_struct *, void *), void *);
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
@@ -446,6 +448,8 @@ unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
void mem_cgroup_handle_over_high(void);
+unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
+
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
@@ -639,6 +643,12 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
{
}
+static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ int (*fn)(struct task_struct *, void *), void *arg)
+{
+ return 0;
+}
+
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
return 0;
@@ -669,6 +679,11 @@ mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
return 0;
}
+static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
static inline void
mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
@@ -758,13 +773,13 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-void sock_update_memcg(struct sock *sk);
-void sock_release_memcg(struct sock *sk);
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
+void mem_cgroup_sk_alloc(struct sock *sk);
+void mem_cgroup_sk_free(struct sock *sk);
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
@@ -777,6 +792,8 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
}
#else
#define mem_cgroup_sockets_enabled 0
+static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
+static inline void mem_cgroup_sk_free(struct sock *sk) { };
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
return false;
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index d409ceb2231e..c118a7ec94d6 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -350,7 +350,7 @@ static inline int pm80x_dev_suspend(struct device *dev)
int irq = platform_get_irq(pdev, 0);
if (device_may_wakeup(dev))
- set_bit((1 << irq), &chip->wu_flag);
+ set_bit(irq, &chip->wu_flag);
return 0;
}
@@ -362,7 +362,7 @@ static inline int pm80x_dev_resume(struct device *dev)
int irq = platform_get_irq(pdev, 0);
if (device_may_wakeup(dev))
- clear_bit((1 << irq), &chip->wu_flag);
+ clear_bit(irq, &chip->wu_flag);
return 0;
}
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 9475fee2bfc5..d33c245e75ca 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -63,6 +63,8 @@ enum ab8500_version {
#define AB8500_STE_TEST 0x14
#define AB8500_OTP_EMUL 0x15
+#define AB8500_DEBUG_FIELD_LAST 0x16
+
/*
* Interrupts
* Values used to index into array ab8500_irq_regoffset[] defined in
diff --git a/include/linux/mfd/ac100.h b/include/linux/mfd/ac100.h
new file mode 100644
index 000000000000..3c148f196b9f
--- /dev/null
+++ b/include/linux/mfd/ac100.h
@@ -0,0 +1,178 @@
+/*
+ * Functions and registers to access AC100 codec / RTC combo IC.
+ *
+ * Copyright (C) 2016 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_AC100_H
+#define __LINUX_MFD_AC100_H
+
+#include <linux/regmap.h>
+
+struct ac100_dev {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+/* Audio codec related registers */
+#define AC100_CHIP_AUDIO_RST 0x00
+#define AC100_PLL_CTRL1 0x01
+#define AC100_PLL_CTRL2 0x02
+#define AC100_SYSCLK_CTRL 0x03
+#define AC100_MOD_CLK_ENA 0x04
+#define AC100_MOD_RST_CTRL 0x05
+#define AC100_I2S_SR_CTRL 0x06
+
+/* I2S1 interface */
+#define AC100_I2S1_CLK_CTRL 0x10
+#define AC100_I2S1_SND_OUT_CTRL 0x11
+#define AC100_I2S1_SND_IN_CTRL 0x12
+#define AC100_I2S1_MXR_SRC 0x13
+#define AC100_I2S1_VOL_CTRL1 0x14
+#define AC100_I2S1_VOL_CTRL2 0x15
+#define AC100_I2S1_VOL_CTRL3 0x16
+#define AC100_I2S1_VOL_CTRL4 0x17
+#define AC100_I2S1_MXR_GAIN 0x18
+
+/* I2S2 interface */
+#define AC100_I2S2_CLK_CTRL 0x20
+#define AC100_I2S2_SND_OUT_CTRL 0x21
+#define AC100_I2S2_SND_IN_CTRL 0x22
+#define AC100_I2S2_MXR_SRC 0x23
+#define AC100_I2S2_VOL_CTRL1 0x24
+#define AC100_I2S2_VOL_CTRL2 0x25
+#define AC100_I2S2_VOL_CTRL3 0x26
+#define AC100_I2S2_VOL_CTRL4 0x27
+#define AC100_I2S2_MXR_GAIN 0x28
+
+/* I2S3 interface */
+#define AC100_I2S3_CLK_CTRL 0x30
+#define AC100_I2S3_SND_OUT_CTRL 0x31
+#define AC100_I2S3_SND_IN_CTRL 0x32
+#define AC100_I2S3_SIG_PATH_CTRL 0x33
+
+/* ADC digital controls */
+#define AC100_ADC_DIG_CTRL 0x40
+#define AC100_ADC_VOL_CTRL 0x41
+
+/* HMIC plug sensing / key detection */
+#define AC100_HMIC_CTRL1 0x44
+#define AC100_HMIC_CTRL2 0x45
+#define AC100_HMIC_STATUS 0x46
+
+/* DAC digital controls */
+#define AC100_DAC_DIG_CTRL 0x48
+#define AC100_DAC_VOL_CTRL 0x49
+#define AC100_DAC_MXR_SRC 0x4c
+#define AC100_DAC_MXR_GAIN 0x4d
+
+/* Analog controls */
+#define AC100_ADC_APC_CTRL 0x50
+#define AC100_ADC_SRC 0x51
+#define AC100_ADC_SRC_BST_CTRL 0x52
+#define AC100_OUT_MXR_DAC_A_CTRL 0x53
+#define AC100_OUT_MXR_SRC 0x54
+#define AC100_OUT_MXR_SRC_BST 0x55
+#define AC100_HPOUT_CTRL 0x56
+#define AC100_ERPOUT_CTRL 0x57
+#define AC100_SPKOUT_CTRL 0x58
+#define AC100_LINEOUT_CTRL 0x59
+
+/* ADC digital audio processing (high pass filter & auto gain control */
+#define AC100_ADC_DAP_L_STA 0x80
+#define AC100_ADC_DAP_R_STA 0x81
+#define AC100_ADC_DAP_L_CTRL 0x82
+#define AC100_ADC_DAP_R_CTRL 0x83
+#define AC100_ADC_DAP_L_T_L 0x84 /* Left Target Level */
+#define AC100_ADC_DAP_R_T_L 0x85 /* Right Target Level */
+#define AC100_ADC_DAP_L_H_A_C 0x86 /* Left High Avg. Coef */
+#define AC100_ADC_DAP_L_L_A_C 0x87 /* Left Low Avg. Coef */
+#define AC100_ADC_DAP_R_H_A_C 0x88 /* Right High Avg. Coef */
+#define AC100_ADC_DAP_R_L_A_C 0x89 /* Right Low Avg. Coef */
+#define AC100_ADC_DAP_L_D_T 0x8a /* Left Decay Time */
+#define AC100_ADC_DAP_L_A_T 0x8b /* Left Attack Time */
+#define AC100_ADC_DAP_R_D_T 0x8c /* Right Decay Time */
+#define AC100_ADC_DAP_R_A_T 0x8d /* Right Attack Time */
+#define AC100_ADC_DAP_N_TH 0x8e /* Noise Threshold */
+#define AC100_ADC_DAP_L_H_N_A_C 0x8f /* Left High Noise Avg. Coef */
+#define AC100_ADC_DAP_L_L_N_A_C 0x90 /* Left Low Noise Avg. Coef */
+#define AC100_ADC_DAP_R_H_N_A_C 0x91 /* Right High Noise Avg. Coef */
+#define AC100_ADC_DAP_R_L_N_A_C 0x92 /* Right Low Noise Avg. Coef */
+#define AC100_ADC_DAP_H_HPF_C 0x93 /* High High-Pass-Filter Coef */
+#define AC100_ADC_DAP_L_HPF_C 0x94 /* Low High-Pass-Filter Coef */
+#define AC100_ADC_DAP_OPT 0x95 /* AGC Optimum */
+
+/* DAC digital audio processing (high pass filter & dynamic range control) */
+#define AC100_DAC_DAP_CTRL 0xa0
+#define AC100_DAC_DAP_H_HPF_C 0xa1 /* High High-Pass-Filter Coef */
+#define AC100_DAC_DAP_L_HPF_C 0xa2 /* Low High-Pass-Filter Coef */
+#define AC100_DAC_DAP_L_H_E_A_C 0xa3 /* Left High Energy Avg Coef */
+#define AC100_DAC_DAP_L_L_E_A_C 0xa4 /* Left Low Energy Avg Coef */
+#define AC100_DAC_DAP_R_H_E_A_C 0xa5 /* Right High Energy Avg Coef */
+#define AC100_DAC_DAP_R_L_E_A_C 0xa6 /* Right Low Energy Avg Coef */
+#define AC100_DAC_DAP_H_G_D_T_C 0xa7 /* High Gain Delay Time Coef */
+#define AC100_DAC_DAP_L_G_D_T_C 0xa8 /* Low Gain Delay Time Coef */
+#define AC100_DAC_DAP_H_G_A_T_C 0xa9 /* High Gain Attack Time Coef */
+#define AC100_DAC_DAP_L_G_A_T_C 0xaa /* Low Gain Attack Time Coef */
+#define AC100_DAC_DAP_H_E_TH 0xab /* High Energy Threshold */
+#define AC100_DAC_DAP_L_E_TH 0xac /* Low Energy Threshold */
+#define AC100_DAC_DAP_H_G_K 0xad /* High Gain K parameter */
+#define AC100_DAC_DAP_L_G_K 0xae /* Low Gain K parameter */
+#define AC100_DAC_DAP_H_G_OFF 0xaf /* High Gain offset */
+#define AC100_DAC_DAP_L_G_OFF 0xb0 /* Low Gain offset */
+#define AC100_DAC_DAP_OPT 0xb1 /* DRC optimum */
+
+/* Digital audio processing enable */
+#define AC100_ADC_DAP_ENA 0xb4
+#define AC100_DAC_DAP_ENA 0xb5
+
+/* SRC control */
+#define AC100_SRC1_CTRL1 0xb8
+#define AC100_SRC1_CTRL2 0xb9
+#define AC100_SRC1_CTRL3 0xba
+#define AC100_SRC1_CTRL4 0xbb
+#define AC100_SRC2_CTRL1 0xbc
+#define AC100_SRC2_CTRL2 0xbd
+#define AC100_SRC2_CTRL3 0xbe
+#define AC100_SRC2_CTRL4 0xbf
+
+/* RTC clk control */
+#define AC100_CLK32K_ANALOG_CTRL 0xc0
+#define AC100_CLKOUT_CTRL1 0xc1
+#define AC100_CLKOUT_CTRL2 0xc2
+#define AC100_CLKOUT_CTRL3 0xc3
+
+/* RTC module */
+#define AC100_RTC_RST 0xc6
+#define AC100_RTC_CTRL 0xc7
+#define AC100_RTC_SEC 0xc8 /* second */
+#define AC100_RTC_MIN 0xc9 /* minute */
+#define AC100_RTC_HOU 0xca /* hour */
+#define AC100_RTC_WEE 0xcb /* weekday */
+#define AC100_RTC_DAY 0xcc /* day */
+#define AC100_RTC_MON 0xcd /* month */
+#define AC100_RTC_YEA 0xce /* year */
+#define AC100_RTC_UPD 0xcf /* update trigger */
+
+/* RTC alarm */
+#define AC100_ALM_INT_ENA 0xd0
+#define AC100_ALM_INT_STA 0xd1
+#define AC100_ALM_SEC 0xd8
+#define AC100_ALM_MIN 0xd9
+#define AC100_ALM_HOU 0xda
+#define AC100_ALM_WEE 0xdb
+#define AC100_ALM_DAY 0xdc
+#define AC100_ALM_MON 0xdd
+#define AC100_ALM_YEA 0xde
+#define AC100_ALM_UPD 0xdf
+
+/* RTC general purpose register 0 ~ 15 */
+#define AC100_RTC_GP(x) (0xe0 + (x))
+
+#endif /* __LINUX_MFD_AC100_H */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 58ab4c0fe761..b31b3be7f8c9 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -13,6 +13,7 @@
#ifndef _WM_ARIZONA_CORE_H
#define _WM_ARIZONA_CORE_H
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/regmap.h>
@@ -21,6 +22,12 @@
#define ARIZONA_MAX_CORE_SUPPLIES 2
+enum {
+ ARIZONA_MCLK1,
+ ARIZONA_MCLK2,
+ ARIZONA_NUM_MCLK
+};
+
enum arizona_type {
WM5102 = 1,
WM5110 = 2,
@@ -139,6 +146,8 @@ struct arizona {
struct mutex clk_lock;
int clk32k_ref;
+ struct clk *mclk[ARIZONA_NUM_MCLK];
+
bool ctrlif_error;
struct snd_soc_dapm_context *dapm;
@@ -182,7 +191,4 @@ int cs47l24_patch(struct arizona *arizona);
int wm8997_patch(struct arizona *arizona);
int wm8998_patch(struct arizona *arizona);
-extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
- bool mandatory);
-
#endif
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 0be4982f08fe..fec597fb34cb 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -20,6 +20,7 @@ enum {
AXP221_ID,
AXP223_ID,
AXP288_ID,
+ AXP806_ID,
AXP809_ID,
NR_AXP20X_VARIANTS,
};
@@ -91,6 +92,30 @@ enum {
#define AXP22X_ALDO3_V_OUT 0x2a
#define AXP22X_CHRG_CTRL3 0x35
+#define AXP806_STARTUP_SRC 0x00
+#define AXP806_CHIP_ID 0x03
+#define AXP806_PWR_OUT_CTRL1 0x10
+#define AXP806_PWR_OUT_CTRL2 0x11
+#define AXP806_DCDCA_V_CTRL 0x12
+#define AXP806_DCDCB_V_CTRL 0x13
+#define AXP806_DCDCC_V_CTRL 0x14
+#define AXP806_DCDCD_V_CTRL 0x15
+#define AXP806_DCDCE_V_CTRL 0x16
+#define AXP806_ALDO1_V_CTRL 0x17
+#define AXP806_ALDO2_V_CTRL 0x18
+#define AXP806_ALDO3_V_CTRL 0x19
+#define AXP806_DCDC_MODE_CTRL1 0x1a
+#define AXP806_DCDC_MODE_CTRL2 0x1b
+#define AXP806_DCDC_FREQ_CTRL 0x1c
+#define AXP806_BLDO1_V_CTRL 0x20
+#define AXP806_BLDO2_V_CTRL 0x21
+#define AXP806_BLDO3_V_CTRL 0x22
+#define AXP806_BLDO4_V_CTRL 0x23
+#define AXP806_CLDO1_V_CTRL 0x24
+#define AXP806_CLDO2_V_CTRL 0x25
+#define AXP806_CLDO3_V_CTRL 0x26
+#define AXP806_VREF_TEMP_WARN_L 0xf3
+
/* Interrupt */
#define AXP152_IRQ1_EN 0x40
#define AXP152_IRQ2_EN 0x41
@@ -266,6 +291,26 @@ enum {
};
enum {
+ AXP806_DCDCA = 0,
+ AXP806_DCDCB,
+ AXP806_DCDCC,
+ AXP806_DCDCD,
+ AXP806_DCDCE,
+ AXP806_ALDO1,
+ AXP806_ALDO2,
+ AXP806_ALDO3,
+ AXP806_BLDO1,
+ AXP806_BLDO2,
+ AXP806_BLDO3,
+ AXP806_BLDO4,
+ AXP806_CLDO1,
+ AXP806_CLDO2,
+ AXP806_CLDO3,
+ AXP806_SW,
+ AXP806_REG_ID_MAX,
+};
+
+enum {
AXP809_DCDC1 = 0,
AXP809_DCDC2,
AXP809_DCDC3,
@@ -414,6 +459,21 @@ enum axp288_irqs {
AXP288_IRQ_BC_USB_CHNG,
};
+enum axp806_irqs {
+ AXP806_IRQ_DIE_TEMP_HIGH_LV1,
+ AXP806_IRQ_DIE_TEMP_HIGH_LV2,
+ AXP806_IRQ_DCDCA_V_LOW,
+ AXP806_IRQ_DCDCB_V_LOW,
+ AXP806_IRQ_DCDCC_V_LOW,
+ AXP806_IRQ_DCDCD_V_LOW,
+ AXP806_IRQ_DCDCE_V_LOW,
+ AXP806_IRQ_PWROK_LONG,
+ AXP806_IRQ_PWROK_SHORT,
+ AXP806_IRQ_WAKEUP,
+ AXP806_IRQ_PWROK_FALL,
+ AXP806_IRQ_PWROK_RISE,
+};
+
enum axp809_irqs {
AXP809_IRQ_ACIN_OVER_V = 1,
AXP809_IRQ_ACIN_PLUGIN,
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index d641a18abacb..76f7ef4d3a0d 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -109,6 +109,10 @@ struct cros_ec_command {
* should check msg.result for the EC's result code.
* @pkt_xfer: send packet to EC and get response
* @lock: one transaction at a time
+ * @mkbp_event_supported: true if this EC supports the MKBP event protocol.
+ * @event_notifier: interrupt event notifier for transport devices.
+ * @event_data: raw payload transferred with the MKBP event.
+ * @event_size: size in bytes of the event data.
*/
struct cros_ec_device {
@@ -137,6 +141,11 @@ struct cros_ec_device {
int (*pkt_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
struct mutex lock;
+ bool mkbp_event_supported;
+ struct blocking_notifier_head event_notifier;
+
+ struct ec_response_get_next_event event_data;
+ int event_size;
};
/* struct cros_ec_platform - ChromeOS EC platform information
@@ -269,6 +278,15 @@ int cros_ec_register(struct cros_ec_device *ec_dev);
*/
int cros_ec_query_all(struct cros_ec_device *ec_dev);
+/**
+ * cros_ec_get_next_event - Fetch next event from the ChromeOS EC
+ *
+ * @ec_dev: Device to fetch event from
+ *
+ * Returns: 0 on success, Linux error number on failure
+ */
+int cros_ec_get_next_event(struct cros_ec_device *ec_dev);
+
/* sysfs stuff */
extern struct attribute_group cros_ec_attr_group;
extern struct attribute_group cros_ec_lightbar_attr_group;
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 7e7a8d4b4551..76728ff37d01 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -1793,6 +1793,40 @@ struct ec_result_keyscan_seq_ctrl {
};
} __packed;
+/*
+ * Command for retrieving the next pending MKBP event from the EC device
+ *
+ * The device replies with UNAVAILABLE if there aren't any pending events.
+ */
+#define EC_CMD_GET_NEXT_EVENT 0x67
+
+enum ec_mkbp_event {
+ /* Keyboard matrix changed. The event data is the new matrix state. */
+ EC_MKBP_EVENT_KEY_MATRIX = 0,
+
+ /* New host event. The event data is 4 bytes of host event flags. */
+ EC_MKBP_EVENT_HOST_EVENT = 1,
+
+ /* New Sensor FIFO data. The event data is fifo_info structure. */
+ EC_MKBP_EVENT_SENSOR_FIFO = 2,
+
+ /* Number of MKBP events */
+ EC_MKBP_EVENT_COUNT,
+};
+
+union ec_response_get_next_data {
+ uint8_t key_matrix[13];
+
+ /* Unaligned */
+ uint32_t host_event;
+} __packed;
+
+struct ec_response_get_next_event {
+ uint8_t event_type;
+ /* Followed by event data if any */
+ union ec_response_get_next_data data;
+} __packed;
+
/*****************************************************************************/
/* Temperature sensor commands */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 621af82123c6..f3ae65db4c86 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -3,8 +3,8 @@
*
* Copyright 2012 Dialog Semiconductor Ltd.
*
- * Author: Michal Hajduk <michal.hajduk@diasemi.com>
- * Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ * Author: Michal Hajduk, Dialog Semiconductor
+ * Author: Krystian Garbaciak, Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 612383bd80ae..8a125701ef7b 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -3,8 +3,8 @@
*
* Copyright 2012 Dialog Semiconductor Ltd.
*
- * Author: Michal Hajduk <michal.hajduk@diasemi.com>
- * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ * Author: Michal Hajduk, Dialog Semiconductor
+ * Author: Krystian Garbaciak, Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index 2e0ba6d5fbc3..5d42859cb441 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -3,8 +3,8 @@
*
* Copyright 2012 Dialog Semiconductor Ltd.
*
- * Author: Michal Hajduk <michal.hajduk@diasemi.com>
- * Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ * Author: Michal Hajduk, Dialog Semiconductor
+ * Author: Krystian Garbaciak, Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 0bd69446bb05..7ba67b55b312 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -538,7 +538,6 @@ int db8500_prcmu_get_arm_opp(void);
int db8500_prcmu_set_ape_opp(u8 opp);
int db8500_prcmu_get_ape_opp(void);
int db8500_prcmu_request_ape_opp_100_voltage(bool enable);
-int db8500_prcmu_set_ddr_opp(u8 opp);
int db8500_prcmu_get_ddr_opp(void);
u32 db8500_prcmu_read(unsigned int reg);
@@ -594,11 +593,6 @@ static inline int prcmu_release_usb_wakeup_state(void)
return 0;
}
-static inline int db8500_prcmu_set_ddr_opp(u8 opp)
-{
- return 0;
-}
-
static inline int db8500_prcmu_get_ddr_opp(void)
{
return DDR_100_OPP;
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index 5d374601404c..2e2c6a63a065 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -269,10 +269,6 @@ unsigned long prcmu_clock_rate(u8 clock);
long prcmu_round_clock_rate(u8 clock, unsigned long rate);
int prcmu_set_clock_rate(u8 clock, unsigned long rate);
-static inline int prcmu_set_ddr_opp(u8 opp)
-{
- return db8500_prcmu_set_ddr_opp(opp);
-}
static inline int prcmu_get_ddr_opp(void)
{
return db8500_prcmu_get_ddr_opp();
@@ -489,11 +485,6 @@ static inline int prcmu_get_arm_opp(void)
return ARM_100_OPP;
}
-static inline int prcmu_set_ddr_opp(u8 opp)
-{
- return 0;
-}
-
static inline int prcmu_get_ddr_opp(void)
{
return DDR_100_OPP;
diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h
new file mode 100644
index 000000000000..edbec8350a49
--- /dev/null
+++ b/include/linux/mfd/lp873x.h
@@ -0,0 +1,268 @@
+/*
+ * Functions to access LP873X power management chip.
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_LP873X_H
+#define __LINUX_MFD_LP873X_H
+
+#include <linux/i2c.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+/* LP873x chip id list */
+#define LP873X 0x00
+
+/* All register addresses */
+#define LP873X_REG_DEV_REV 0X00
+#define LP873X_REG_OTP_REV 0X01
+#define LP873X_REG_BUCK0_CTRL_1 0X02
+#define LP873X_REG_BUCK0_CTRL_2 0X03
+#define LP873X_REG_BUCK1_CTRL_1 0X04
+#define LP873X_REG_BUCK1_CTRL_2 0X05
+#define LP873X_REG_BUCK0_VOUT 0X06
+#define LP873X_REG_BUCK1_VOUT 0X07
+#define LP873X_REG_LDO0_CTRL 0X08
+#define LP873X_REG_LDO1_CTRL 0X09
+#define LP873X_REG_LDO0_VOUT 0X0A
+#define LP873X_REG_LDO1_VOUT 0X0B
+#define LP873X_REG_BUCK0_DELAY 0X0C
+#define LP873X_REG_BUCK1_DELAY 0X0D
+#define LP873X_REG_LDO0_DELAY 0X0E
+#define LP873X_REG_LDO1_DELAY 0X0F
+#define LP873X_REG_GPO_DELAY 0X10
+#define LP873X_REG_GPO2_DELAY 0X11
+#define LP873X_REG_GPO_CTRL 0X12
+#define LP873X_REG_CONFIG 0X13
+#define LP873X_REG_PLL_CTRL 0X14
+#define LP873X_REG_PGOOD_CTRL1 0X15
+#define LP873X_REG_PGOOD_CTRL2 0X16
+#define LP873X_REG_PG_FAULT 0X17
+#define LP873X_REG_RESET 0X18
+#define LP873X_REG_INT_TOP_1 0X19
+#define LP873X_REG_INT_TOP_2 0X1A
+#define LP873X_REG_INT_BUCK 0X1B
+#define LP873X_REG_INT_LDO 0X1C
+#define LP873X_REG_TOP_STAT 0X1D
+#define LP873X_REG_BUCK_STAT 0X1E
+#define LP873X_REG_LDO_STAT 0x1F
+#define LP873X_REG_TOP_MASK_1 0x20
+#define LP873X_REG_TOP_MASK_2 0x21
+#define LP873X_REG_BUCK_MASK 0x22
+#define LP873X_REG_LDO_MASK 0x23
+#define LP873X_REG_SEL_I_LOAD 0x24
+#define LP873X_REG_I_LOAD_2 0x25
+#define LP873X_REG_I_LOAD_1 0x26
+
+#define LP873X_REG_MAX LP873X_REG_I_LOAD_1
+
+/* Register field definitions */
+#define LP873X_DEV_REV_DEV_ID 0xC0
+#define LP873X_DEV_REV_ALL_LAYER 0x30
+#define LP873X_DEV_REV_METAL_LAYER 0x0F
+
+#define LP873X_OTP_REV_OTP_ID 0xFF
+
+#define LP873X_BUCK0_CTRL_1_BUCK0_FPWM BIT(3)
+#define LP873X_BUCK0_CTRL_1_BUCK0_RDIS_EN BIT(2)
+#define LP873X_BUCK0_CTRL_1_BUCK0_EN_PIN_CTRL BIT(1)
+#define LP873X_BUCK0_CTRL_1_BUCK0_EN BIT(0)
+
+#define LP873X_BUCK0_CTRL_2_BUCK0_ILIM 0x38
+#define LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE 0x07
+
+#define LP873X_BUCK1_CTRL_1_BUCK1_FPWM BIT(3)
+#define LP873X_BUCK1_CTRL_1_BUCK1_RDIS_EN BIT(2)
+#define LP873X_BUCK1_CTRL_1_BUCK1_EN_PIN_CTRL BIT(1)
+#define LP873X_BUCK1_CTRL_1_BUCK1_EN BIT(0)
+
+#define LP873X_BUCK1_CTRL_2_BUCK1_ILIM 0x38
+#define LP873X_BUCK1_CTRL_2_BUCK1_SLEW_RATE 0x07
+
+#define LP873X_BUCK0_VOUT_BUCK0_VSET 0xFF
+
+#define LP873X_BUCK1_VOUT_BUCK1_VSET 0xFF
+
+#define LP873X_LDO0_CTRL_LDO0_RDIS_EN BIT(2)
+#define LP873X_LDO0_CTRL_LDO0_EN_PIN_CTRL BIT(1)
+#define LP873X_LDO0_CTRL_LDO0_EN BIT(0)
+
+#define LP873X_LDO1_CTRL_LDO1_RDIS_EN BIT(2)
+#define LP873X_LDO1_CTRL_LDO1_EN_PIN_CTRL BIT(1)
+#define LP873X_LDO1_CTRL_LDO1_EN BIT(0)
+
+#define LP873X_LDO0_VOUT_LDO0_VSET 0x1F
+
+#define LP873X_LDO1_VOUT_LDO1_VSET 0x1F
+
+#define LP873X_BUCK0_DELAY_BUCK0_SD_DELAY 0xF0
+#define LP873X_BUCK0_DELAY_BUCK0_SU_DELAY 0x0F
+
+#define LP873X_BUCK1_DELAY_BUCK1_SD_DELAY 0xF0
+#define LP873X_BUCK1_DELAY_BUCK1_SU_DELAY 0x0F
+
+#define LP873X_LDO0_DELAY_LDO0_SD_DELAY 0xF0
+#define LP873X_LDO0_DELAY_LDO0_SU_DELAY 0x0F
+
+#define LP873X_LDO1_DELAY_LDO1_SD_DELAY 0xF0
+#define LP873X_LDO1_DELAY_LDO1_SU_DELAY 0x0F
+
+#define LP873X_GPO_DELAY_GPO_SD_DELAY 0xF0
+#define LP873X_GPO_DELAY_GPO_SU_DELAY 0x0F
+
+#define LP873X_GPO2_DELAY_GPO2_SD_DELAY 0xF0
+#define LP873X_GPO2_DELAY_GPO2_SU_DELAY 0x0F
+
+#define LP873X_GPO_CTRL_GPO2_OD BIT(6)
+#define LP873X_GPO_CTRL_GPO2_EN_PIN_CTRL BIT(5)
+#define LP873X_GPO_CTRL_GPO2_EN BIT(4)
+#define LP873X_GPO_CTRL_GPO_OD BIT(2)
+#define LP873X_GPO_CTRL_GPO_EN_PIN_CTRL BIT(1)
+#define LP873X_GPO_CTRL_GPO_EN BIT(0)
+
+#define LP873X_CONFIG_SU_DELAY_SEL BIT(6)
+#define LP873X_CONFIG_SD_DELAY_SEL BIT(5)
+#define LP873X_CONFIG_CLKIN_PIN_SEL BIT(4)
+#define LP873X_CONFIG_CLKIN_PD BIT(3)
+#define LP873X_CONFIG_EN_PD BIT(2)
+#define LP873X_CONFIG_TDIE_WARN_LEVEL BIT(1)
+#define LP873X_EN_SPREAD_SPEC BIT(0)
+
+#define LP873X_PLL_CTRL_EN_PLL BIT(6)
+#define LP873X_EXT_CLK_FREQ 0x1F
+
+#define LP873X_PGOOD_CTRL1_PGOOD_POL BIT(7)
+#define LP873X_PGOOD_CTRL1_PGOOD_OD BIT(6)
+#define LP873X_PGOOD_CTRL1_PGOOD_WINDOW_LDO BIT(5)
+#define LP873X_PGOOD_CTRL1_PGOOD_WINDOWN_BUCK BIT(4)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_LDO1 BIT(3)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_LDO0 BIT(2)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_BUCK1 BIT(1)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_BUCK0 BIT(0)
+
+#define LP873X_PGOOD_CTRL2_EN_PGOOD_TWARN BIT(2)
+#define LP873X_PGOOD_CTRL2_EN_PG_FAULT_GATE BIT(1)
+#define LP873X_PGOOD_CTRL2_PGOOD_MODE BIT(0)
+
+#define LP873X_PG_FAULT_PG_FAULT_LDO1 BIT(3)
+#define LP873X_PG_FAULT_PG_FAULT_LDO0 BIT(2)
+#define LP873X_PG_FAULT_PG_FAULT_BUCK1 BIT(1)
+#define LP873X_PG_FAULT_PG_FAULT_BUCK0 BIT(0)
+
+#define LP873X_RESET_SW_RESET BIT(0)
+
+#define LP873X_INT_TOP_1_PGOOD_INT BIT(7)
+#define LP873X_INT_TOP_1_LDO_INT BIT(6)
+#define LP873X_INT_TOP_1_BUCK_INT BIT(5)
+#define LP873X_INT_TOP_1_SYNC_CLK_INT BIT(4)
+#define LP873X_INT_TOP_1_TDIE_SD_INT BIT(3)
+#define LP873X_INT_TOP_1_TDIE_WARN_INT BIT(2)
+#define LP873X_INT_TOP_1_OVP_INT BIT(1)
+#define LP873X_INT_TOP_1_I_MEAS_INT BIT(0)
+
+#define LP873X_INT_TOP_2_RESET_REG_INT BIT(0)
+
+#define LP873X_INT_BUCK_BUCK1_PG_INT BIT(6)
+#define LP873X_INT_BUCK_BUCK1_SC_INT BIT(5)
+#define LP873X_INT_BUCK_BUCK1_ILIM_INT BIT(4)
+#define LP873X_INT_BUCK_BUCK0_PG_INT BIT(2)
+#define LP873X_INT_BUCK_BUCK0_SC_INT BIT(1)
+#define LP873X_INT_BUCK_BUCK0_ILIM_INT BIT(0)
+
+#define LP873X_INT_LDO_LDO1_PG_INT BIT(6)
+#define LP873X_INT_LDO_LDO1_SC_INT BIT(5)
+#define LP873X_INT_LDO_LDO1_ILIM_INT BIT(4)
+#define LP873X_INT_LDO_LDO0_PG_INT BIT(2)
+#define LP873X_INT_LDO_LDO0_SC_INT BIT(1)
+#define LP873X_INT_LDO_LDO0_ILIM_INT BIT(0)
+
+#define LP873X_TOP_STAT_PGOOD_STAT BIT(7)
+#define LP873X_TOP_STAT_SYNC_CLK_STAT BIT(4)
+#define LP873X_TOP_STAT_TDIE_SD_STAT BIT(3)
+#define LP873X_TOP_STAT_TDIE_WARN_STAT BIT(2)
+#define LP873X_TOP_STAT_OVP_STAT BIT(1)
+
+#define LP873X_BUCK_STAT_BUCK1_STAT BIT(7)
+#define LP873X_BUCK_STAT_BUCK1_PG_STAT BIT(6)
+#define LP873X_BUCK_STAT_BUCK1_ILIM_STAT BIT(4)
+#define LP873X_BUCK_STAT_BUCK0_STAT BIT(3)
+#define LP873X_BUCK_STAT_BUCK0_PG_STAT BIT(2)
+#define LP873X_BUCK_STAT_BUCK0_ILIM_STAT BIT(0)
+
+#define LP873X_LDO_STAT_LDO1_STAT BIT(7)
+#define LP873X_LDO_STAT_LDO1_PG_STAT BIT(6)
+#define LP873X_LDO_STAT_LDO1_ILIM_STAT BIT(4)
+#define LP873X_LDO_STAT_LDO0_STAT BIT(3)
+#define LP873X_LDO_STAT_LDO0_PG_STAT BIT(2)
+#define LP873X_LDO_STAT_LDO0_ILIM_STAT BIT(0)
+
+#define LP873X_TOP_MASK_1_PGOOD_INT_MASK BIT(7)
+#define LP873X_TOP_MASK_1_SYNC_CLK_MASK BIT(4)
+#define LP873X_TOP_MASK_1_TDIE_WARN_MASK BIT(2)
+#define LP873X_TOP_MASK_1_I_MEAS_MASK BIT(0)
+
+#define LP873X_TOP_MASK_2_RESET_REG_MASK BIT(0)
+
+#define LP873X_BUCK_MASK_BUCK1_PGF_MASK BIT(7)
+#define LP873X_BUCK_MASK_BUCK1_PGR_MASK BIT(6)
+#define LP873X_BUCK_MASK_BUCK1_ILIM_MASK BIT(4)
+#define LP873X_BUCK_MASK_BUCK0_PGF_MASK BIT(3)
+#define LP873X_BUCK_MASK_BUCK0_PGR_MASK BIT(2)
+#define LP873X_BUCK_MASK_BUCK0_ILIM_MASK BIT(0)
+
+#define LP873X_LDO_MASK_LDO1_PGF_MASK BIT(7)
+#define LP873X_LDO_MASK_LDO1_PGR_MASK BIT(6)
+#define LP873X_LDO_MASK_LDO1_ILIM_MASK BIT(4)
+#define LP873X_LDO_MASK_LDO0_PGF_MASK BIT(3)
+#define LP873X_LDO_MASK_LDO0_PGR_MASK BIT(2)
+#define LP873X_LDO_MASK_LDO0_ILIM_MASK BIT(0)
+
+#define LP873X_SEL_I_LOAD_CURRENT_BUCK_SELECT BIT(0)
+
+#define LP873X_I_LOAD_2_BUCK_LOAD_CURRENT BIT(0)
+
+#define LP873X_I_LOAD_1_BUCK_LOAD_CURRENT 0xFF
+
+#define LP873X_MAX_REG_ID LP873X_LDO_1
+
+/* Number of step-down converters available */
+#define LP873X_NUM_BUCK 2
+/* Number of LDO voltage regulators available */
+#define LP873X_NUM_LDO 2
+/* Number of total regulators available */
+#define LP873X_NUM_REGULATOR (LP873X_NUM_BUCK + LP873X_NUM_LDO)
+
+enum lp873x_regulator_id {
+ /* BUCK's */
+ LP873X_BUCK_0,
+ LP873X_BUCK_1,
+ /* LDOs */
+ LP873X_LDO_0,
+ LP873X_LDO_1,
+};
+
+/**
+ * struct lp873x - state holder for the lp873x driver
+ * @dev: struct device pointer for MFD device
+ * @rev: revision of the lp873x
+ * @lock: lock guarding the data structure
+ * @regmap: register map of the lp873x PMIC
+ *
+ * Device data may be used to access the LP873X chip
+ */
+struct lp873x {
+ struct device *dev;
+ u8 rev;
+ struct regmap *regmap;
+};
+#endif /* __LINUX_MFD_LP873X_H */
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index f01c1fae4d84..df75234f979d 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2014 Samsung Electrnoics
* Chanwoo Choi <cw00.choi@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index ccfaf952c31b..d81b52bb8bee 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2014 Samsung Electrnoics
* Chanwoo Choi <cw00.choi@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 441b6ee72691..6d435a3c06bc 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -1,11 +1,15 @@
/*
- * rk808.h for Rockchip RK808
+ * Register definitions for Rockchip's RK808/RK818 PMIC
*
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
*
* Author: Chris Zhong <zyw@rock-chips.com>
* Author: Zhang Qing <zhangqing@rock-chips.com>
*
+ * Copyright (C) 2016 PHYTEC Messtechnik GmbH
+ *
+ * Author: Wadim Egorov <w.egorov@phytec.de>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -16,8 +20,8 @@
* more details.
*/
-#ifndef __LINUX_REGULATOR_rk808_H
-#define __LINUX_REGULATOR_rk808_H
+#ifndef __LINUX_REGULATOR_RK808_H
+#define __LINUX_REGULATOR_RK808_H
#include <linux/regulator/machine.h>
#include <linux/regmap.h>
@@ -28,7 +32,7 @@
#define RK808_DCDC1 0 /* (0+RK808_START) */
#define RK808_LDO1 4 /* (4+RK808_START) */
-#define RK808_NUM_REGULATORS 14
+#define RK808_NUM_REGULATORS 14
enum rk808_reg {
RK808_ID_DCDC1,
@@ -65,6 +69,8 @@ enum rk808_reg {
#define RK808_RTC_INT_REG 0x12
#define RK808_RTC_COMP_LSB_REG 0x13
#define RK808_RTC_COMP_MSB_REG 0x14
+#define RK808_ID_MSB 0x17
+#define RK808_ID_LSB 0x18
#define RK808_CLK32OUT_REG 0x20
#define RK808_VB_MON_REG 0x21
#define RK808_THERMAL_REG 0x22
@@ -115,7 +121,92 @@ enum rk808_reg {
#define RK808_INT_STS_MSK_REG2 0x4f
#define RK808_IO_POL_REG 0x50
-/* IRQ Definitions */
+/* RK818 */
+#define RK818_DCDC1 0
+#define RK818_LDO1 4
+#define RK818_NUM_REGULATORS 17
+
+enum rk818_reg {
+ RK818_ID_DCDC1,
+ RK818_ID_DCDC2,
+ RK818_ID_DCDC3,
+ RK818_ID_DCDC4,
+ RK818_ID_BOOST,
+ RK818_ID_LDO1,
+ RK818_ID_LDO2,
+ RK818_ID_LDO3,
+ RK818_ID_LDO4,
+ RK818_ID_LDO5,
+ RK818_ID_LDO6,
+ RK818_ID_LDO7,
+ RK818_ID_LDO8,
+ RK818_ID_LDO9,
+ RK818_ID_SWITCH,
+ RK818_ID_HDMI_SWITCH,
+ RK818_ID_OTG_SWITCH,
+};
+
+#define RK818_DCDC_EN_REG 0x23
+#define RK818_LDO_EN_REG 0x24
+#define RK818_SLEEP_SET_OFF_REG1 0x25
+#define RK818_SLEEP_SET_OFF_REG2 0x26
+#define RK818_DCDC_UV_STS_REG 0x27
+#define RK818_DCDC_UV_ACT_REG 0x28
+#define RK818_LDO_UV_STS_REG 0x29
+#define RK818_LDO_UV_ACT_REG 0x2a
+#define RK818_DCDC_PG_REG 0x2b
+#define RK818_LDO_PG_REG 0x2c
+#define RK818_VOUT_MON_TDB_REG 0x2d
+#define RK818_BUCK1_CONFIG_REG 0x2e
+#define RK818_BUCK1_ON_VSEL_REG 0x2f
+#define RK818_BUCK1_SLP_VSEL_REG 0x30
+#define RK818_BUCK2_CONFIG_REG 0x32
+#define RK818_BUCK2_ON_VSEL_REG 0x33
+#define RK818_BUCK2_SLP_VSEL_REG 0x34
+#define RK818_BUCK3_CONFIG_REG 0x36
+#define RK818_BUCK4_CONFIG_REG 0x37
+#define RK818_BUCK4_ON_VSEL_REG 0x38
+#define RK818_BUCK4_SLP_VSEL_REG 0x39
+#define RK818_BOOST_CONFIG_REG 0x3a
+#define RK818_LDO1_ON_VSEL_REG 0x3b
+#define RK818_LDO1_SLP_VSEL_REG 0x3c
+#define RK818_LDO2_ON_VSEL_REG 0x3d
+#define RK818_LDO2_SLP_VSEL_REG 0x3e
+#define RK818_LDO3_ON_VSEL_REG 0x3f
+#define RK818_LDO3_SLP_VSEL_REG 0x40
+#define RK818_LDO4_ON_VSEL_REG 0x41
+#define RK818_LDO4_SLP_VSEL_REG 0x42
+#define RK818_LDO5_ON_VSEL_REG 0x43
+#define RK818_LDO5_SLP_VSEL_REG 0x44
+#define RK818_LDO6_ON_VSEL_REG 0x45
+#define RK818_LDO6_SLP_VSEL_REG 0x46
+#define RK818_LDO7_ON_VSEL_REG 0x47
+#define RK818_LDO7_SLP_VSEL_REG 0x48
+#define RK818_LDO8_ON_VSEL_REG 0x49
+#define RK818_LDO8_SLP_VSEL_REG 0x4a
+#define RK818_BOOST_LDO9_ON_VSEL_REG 0x54
+#define RK818_BOOST_LDO9_SLP_VSEL_REG 0x55
+#define RK818_DEVCTRL_REG 0x4b
+#define RK818_INT_STS_REG1 0X4c
+#define RK818_INT_STS_MSK_REG1 0x4d
+#define RK818_INT_STS_REG2 0x4e
+#define RK818_INT_STS_MSK_REG2 0x4f
+#define RK818_IO_POL_REG 0x50
+#define RK818_H5V_EN_REG 0x52
+#define RK818_SLEEP_SET_OFF_REG3 0x53
+#define RK818_BOOST_LDO9_ON_VSEL_REG 0x54
+#define RK818_BOOST_LDO9_SLP_VSEL_REG 0x55
+#define RK818_BOOST_CTRL_REG 0x56
+#define RK818_DCDC_ILMAX 0x90
+#define RK818_USB_CTRL_REG 0xa1
+
+#define RK818_H5V_EN BIT(0)
+#define RK818_REF_RDY_CTRL BIT(1)
+#define RK818_USB_ILIM_SEL_MASK 0xf
+#define RK818_USB_ILMIN_2000MA 0x7
+#define RK818_USB_CHG_SD_VSEL_MASK 0x70
+
+/* RK808 IRQ Definitions */
#define RK808_IRQ_VOUT_LO 0
#define RK808_IRQ_VB_LO 1
#define RK808_IRQ_PWRON 2
@@ -137,6 +228,43 @@ enum rk808_reg {
#define RK808_IRQ_PLUG_IN_INT_MSK BIT(0)
#define RK808_IRQ_PLUG_OUT_INT_MSK BIT(1)
+/* RK818 IRQ Definitions */
+#define RK818_IRQ_VOUT_LO 0
+#define RK818_IRQ_VB_LO 1
+#define RK818_IRQ_PWRON 2
+#define RK818_IRQ_PWRON_LP 3
+#define RK818_IRQ_HOTDIE 4
+#define RK818_IRQ_RTC_ALARM 5
+#define RK818_IRQ_RTC_PERIOD 6
+#define RK818_IRQ_USB_OV 7
+#define RK818_IRQ_PLUG_IN 8
+#define RK818_IRQ_PLUG_OUT 9
+#define RK818_IRQ_CHG_OK 10
+#define RK818_IRQ_CHG_TE 11
+#define RK818_IRQ_CHG_TS1 12
+#define RK818_IRQ_TS2 13
+#define RK818_IRQ_CHG_CVTLIM 14
+#define RK818_IRQ_DISCHG_ILIM 15
+
+#define RK818_IRQ_VOUT_LO_MSK BIT(0)
+#define RK818_IRQ_VB_LO_MSK BIT(1)
+#define RK818_IRQ_PWRON_MSK BIT(2)
+#define RK818_IRQ_PWRON_LP_MSK BIT(3)
+#define RK818_IRQ_HOTDIE_MSK BIT(4)
+#define RK818_IRQ_RTC_ALARM_MSK BIT(5)
+#define RK818_IRQ_RTC_PERIOD_MSK BIT(6)
+#define RK818_IRQ_USB_OV_MSK BIT(7)
+#define RK818_IRQ_PLUG_IN_MSK BIT(0)
+#define RK818_IRQ_PLUG_OUT_MSK BIT(1)
+#define RK818_IRQ_CHG_OK_MSK BIT(2)
+#define RK818_IRQ_CHG_TE_MSK BIT(3)
+#define RK818_IRQ_CHG_TS1_MSK BIT(4)
+#define RK818_IRQ_TS2_MSK BIT(5)
+#define RK818_IRQ_CHG_CVTLIM_MSK BIT(6)
+#define RK818_IRQ_DISCHG_ILIM_MSK BIT(7)
+
+#define RK818_NUM_IRQ 16
+
#define RK808_VBAT_LOW_2V8 0x00
#define RK808_VBAT_LOW_2V9 0x01
#define RK808_VBAT_LOW_3V0 0x02
@@ -191,9 +319,17 @@ enum {
BOOST_ILMIN_250MA,
};
+enum {
+ RK808_ID = 0x0000,
+ RK818_ID = 0x8181,
+};
+
struct rk808 {
- struct i2c_client *i2c;
- struct regmap_irq_chip_data *irq_data;
- struct regmap *regmap;
+ struct i2c_client *i2c;
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap *regmap;
+ long variant;
+ const struct regmap_config *regmap_cfg;
+ const struct regmap_irq_chip *regmap_irq_chip;
};
-#endif /* __LINUX_REGULATOR_rk808_H */
+#endif /* __LINUX_REGULATOR_RK808_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index de748bc7525e..4a827af17e59 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -26,6 +26,7 @@ enum stmpe_partnum {
STMPE610,
STMPE801,
STMPE811,
+ STMPE1600,
STMPE1601,
STMPE1801,
STMPE2401,
@@ -39,22 +40,42 @@ enum stmpe_partnum {
*/
enum {
STMPE_IDX_CHIP_ID,
+ STMPE_IDX_SYS_CTRL,
+ STMPE_IDX_SYS_CTRL2,
STMPE_IDX_ICR_LSB,
STMPE_IDX_IER_LSB,
+ STMPE_IDX_IER_MSB,
STMPE_IDX_ISR_LSB,
STMPE_IDX_ISR_MSB,
STMPE_IDX_GPMR_LSB,
+ STMPE_IDX_GPMR_CSB,
+ STMPE_IDX_GPMR_MSB,
STMPE_IDX_GPSR_LSB,
+ STMPE_IDX_GPSR_CSB,
+ STMPE_IDX_GPSR_MSB,
STMPE_IDX_GPCR_LSB,
+ STMPE_IDX_GPCR_CSB,
+ STMPE_IDX_GPCR_MSB,
STMPE_IDX_GPDR_LSB,
+ STMPE_IDX_GPDR_CSB,
+ STMPE_IDX_GPDR_MSB,
+ STMPE_IDX_GPEDR_LSB,
+ STMPE_IDX_GPEDR_CSB,
STMPE_IDX_GPEDR_MSB,
STMPE_IDX_GPRER_LSB,
+ STMPE_IDX_GPRER_CSB,
+ STMPE_IDX_GPRER_MSB,
STMPE_IDX_GPFER_LSB,
+ STMPE_IDX_GPFER_CSB,
+ STMPE_IDX_GPFER_MSB,
STMPE_IDX_GPPUR_LSB,
STMPE_IDX_GPPDR_LSB,
STMPE_IDX_GPAFR_U_MSB,
STMPE_IDX_IEGPIOR_LSB,
+ STMPE_IDX_IEGPIOR_CSB,
+ STMPE_IDX_IEGPIOR_MSB,
STMPE_IDX_ISGPIOR_LSB,
+ STMPE_IDX_ISGPIOR_CSB,
STMPE_IDX_ISGPIOR_MSB,
STMPE_IDX_MAX,
};
diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h
index 76f30f940c70..c28ff21ca4d2 100644
--- a/include/linux/mfd/syscon/exynos5-pmu.h
+++ b/include/linux/mfd/syscon/exynos5-pmu.h
@@ -43,8 +43,10 @@
#define EXYNOS5433_MIPI_PHY2_CONTROL (0x718)
#define EXYNOS5_PHY_ENABLE BIT(0)
-
#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1)
#define EXYNOS5_MIPI_PHY_M_RESETN BIT(2)
+#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
+#define EXYNOS5433_PAD_INITIATE_WAKEUP_FROM_LOWPWR BIT(28)
+
#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ */
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index 1c88231496d3..4ccda8969639 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -73,6 +73,7 @@
#define TPS65217_PPATH_AC_CURRENT_MASK 0x0C
#define TPS65217_PPATH_USB_CURRENT_MASK 0x03
+#define TPS65217_INT_RESERVEDM BIT(7)
#define TPS65217_INT_PBM BIT(6)
#define TPS65217_INT_ACM BIT(5)
#define TPS65217_INT_USBM BIT(4)
@@ -233,6 +234,13 @@ struct tps65217_bl_pdata {
int dft_brightness;
};
+enum tps65217_irq_type {
+ TPS65217_IRQ_PB,
+ TPS65217_IRQ_AC,
+ TPS65217_IRQ_USB,
+ TPS65217_NUM_IRQ
+};
+
/**
* struct tps65217_board - packages regulator init data
* @tps65217_regulator_data: regulator initialization values
@@ -258,6 +266,10 @@ struct tps65217 {
struct regulator_desc desc[TPS65217_NUM_REGULATOR];
struct regmap *regmap;
u8 *strobes;
+ struct irq_domain *irq_domain;
+ struct mutex irq_lock;
+ u8 irq_mask;
+ int irq;
};
static inline struct tps65217 *dev_to_tps65217(struct device *dev)
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index 7fdf5326f34e..d1db9527fab5 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -63,6 +63,11 @@
#define TPS65218_CHIPID_CHIP_MASK 0xF8
#define TPS65218_CHIPID_REV_MASK 0x07
+#define TPS65218_REV_1_0 0x0
+#define TPS65218_REV_1_1 0x1
+#define TPS65218_REV_2_0 0x2
+#define TPS65218_REV_2_1 0x3
+
#define TPS65218_INT1_VPRG BIT(5)
#define TPS65218_INT1_AC BIT(4)
#define TPS65218_INT1_PB BIT(3)
@@ -267,6 +272,7 @@ struct tps_info {
struct tps65218 {
struct device *dev;
unsigned int id;
+ u8 rev;
struct mutex tps_lock; /* lock guarding the data structure */
/* IRQ Data */
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 36795a1be479..a2e88761c09f 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -168,7 +168,7 @@
#define TWL6040_VIBROCDET 0x20
#define TWL6040_TSHUTDET 0x40
-#define TWL6040_CELLS 3
+#define TWL6040_CELLS 4
#define TWL6040_REV_ES1_0 0x00
#define TWL6040_REV_ES1_1 0x01 /* Rev ES1.1 and ES1.2 */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 543037465973..722698a43d79 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -3,6 +3,7 @@
#include <linux/major.h>
#include <linux/list.h>
#include <linux/types.h>
+#include <linux/device.h>
/*
* These allocations are managed by device@lanana.org. If you use an
@@ -70,6 +71,13 @@ struct miscdevice {
extern int misc_register(struct miscdevice *misc);
extern void misc_deregister(struct miscdevice *misc);
+/*
+ * Helper macro for drivers that don't do anything special in module init / exit
+ * call. This helps in eleminating of boilerplate code.
+ */
+#define module_misc_device(__misc_device) \
+ module_driver(__misc_device, misc_register, misc_deregister)
+
#define MODULE_ALIAS_MISCDEV(minor) \
MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \
"-" __stringify(minor))
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 116b284bc4ce..1f3568694a57 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -309,7 +309,8 @@ int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
struct ifla_vf_stats *vf_stats);
u32 mlx4_comm_get_version(void);
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
-int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
+int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan,
+ u8 qos, __be16 proto);
int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
int max_tx_rate);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 42da3552f7cb..f6a164297358 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -71,7 +71,8 @@ enum {
MLX4_FLAG_SLAVE = 1 << 3,
MLX4_FLAG_SRIOV = 1 << 4,
MLX4_FLAG_OLD_REG_MAC = 1 << 6,
- MLX4_FLAG_BONDED = 1 << 7
+ MLX4_FLAG_BONDED = 1 << 7,
+ MLX4_FLAG_SECURE_HOST = 1 << 8,
};
enum {
@@ -221,6 +222,8 @@ enum {
MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
+ MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
+ MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
};
enum {
@@ -448,6 +451,7 @@ enum {
MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
+ MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP = 0x17,
};
/* Port mgmt change event handling */
@@ -459,6 +463,11 @@ enum {
MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
};
+union sl2vl_tbl_to_u64 {
+ u8 sl8[8];
+ u64 sl64;
+};
+
enum {
MLX4_DEVICE_STATE_UP = 1 << 0,
MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1,
@@ -945,6 +954,9 @@ struct mlx4_eqe {
__be32 block_ptr;
__be32 tbl_entries_mask;
} __packed tbl_change_info;
+ struct {
+ u8 sl2vl_table[8];
+ } __packed sl2vl_tbl_change_info;
} params;
} __packed port_mgmt_change;
struct {
@@ -1371,6 +1383,8 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
+int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
+ bool *vlan_offload_disabled);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index deaa2217214d..b4ee8f62ce8d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -160,6 +160,7 @@ struct mlx4_qp_path {
enum { /* fl */
MLX4_FL_CV = 1 << 6,
+ MLX4_FL_SV = 1 << 5,
MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
@@ -267,6 +268,7 @@ enum {
MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
+ MLX4_UPD_QP_PATH_MASK_SV = 22 + 32,
};
enum { /* param3 */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2566f6d6444f..7c3c0d3aca37 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
int mlx5_init_cq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_create_cq_mbox_in *in, int inlen);
+ u32 *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_query_cq_mbox_out *out);
+ u32 *out, int outlen);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_modify_cq_mbox_in *in, int in_sz);
+ u32 *in, int inlen);
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0b6d15cddb2f..77c141797152 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -198,19 +198,6 @@ enum {
};
enum {
- MLX5_ACCESS_MODE_PA = 0,
- MLX5_ACCESS_MODE_MTT = 1,
- MLX5_ACCESS_MODE_KLM = 2
-};
-
-enum {
- MLX5_MKEY_REMOTE_INVAL = 1 << 24,
- MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
- MLX5_MKEY_BSF_EN = 1 << 30,
- MLX5_MKEY_LEN64 = 1 << 31,
-};
-
-enum {
MLX5_EN_RD = (u64)1,
MLX5_EN_WR = (u64)2
};
@@ -411,33 +398,6 @@ enum {
MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
};
-struct mlx5_inbox_hdr {
- __be16 opcode;
- u8 rsvd[4];
- __be16 opmod;
-};
-
-struct mlx5_outbox_hdr {
- u8 status;
- u8 rsvd[3];
- __be32 syndrome;
-};
-
-struct mlx5_cmd_query_adapter_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cmd_query_adapter_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[24];
- u8 intapin;
- u8 rsvd1[13];
- __be16 vsd_vendor_id;
- u8 vsd[208];
- u8 vsd_psid[16];
-};
-
enum mlx5_odp_transport_cap_bits {
MLX5_ODP_SUPPORT_SEND = 1 << 31,
MLX5_ODP_SUPPORT_RECV = 1 << 30,
@@ -455,30 +415,6 @@ struct mlx5_odp_caps {
char reserved2[0xe4];
};
-struct mlx5_cmd_init_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 profile;
- u8 rsvd1[4];
-};
-
-struct mlx5_cmd_init_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cmd_teardown_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 profile;
- u8 rsvd1[4];
-};
-
-struct mlx5_cmd_teardown_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
struct mlx5_cmd_layout {
u8 type;
u8 rsvd0[3];
@@ -494,7 +430,6 @@ struct mlx5_cmd_layout {
u8 status_own;
};
-
struct health_buffer {
__be32 assert_var[5];
__be32 rsvd0[3];
@@ -856,245 +791,15 @@ struct mlx5_cqe128 {
struct mlx5_cqe64 cqe64;
};
-struct mlx5_srq_ctx {
- u8 state_log_sz;
- u8 rsvd0[3];
- __be32 flags_xrcd;
- __be32 pgoff_cqn;
- u8 rsvd1[4];
- u8 log_pg_sz;
- u8 rsvd2[7];
- __be32 pd;
- __be16 lwm;
- __be16 wqe_cnt;
- u8 rsvd3[8];
- __be64 db_record;
-};
-
-struct mlx5_create_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_srqn;
- u8 rsvd0[4];
- struct mlx5_srq_ctx ctx;
- u8 rsvd1[208];
- __be64 pas[0];
-};
-
-struct mlx5_create_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 srqn;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_query_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
- struct mlx5_srq_ctx ctx;
- u8 rsvd1[32];
- __be64 pas[0];
-};
-
-struct mlx5_arm_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- __be16 rsvd;
- __be16 lwm;
-};
-
-struct mlx5_arm_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cq_context {
- u8 status;
- u8 cqe_sz_flags;
- u8 st;
- u8 rsvd3;
- u8 rsvd4[6];
- __be16 page_offset;
- __be32 log_sz_usr_page;
- __be16 cq_period;
- __be16 cq_max_count;
- __be16 rsvd20;
- __be16 c_eqn;
- u8 log_pg_sz;
- u8 rsvd25[7];
- __be32 last_notified_index;
- __be32 solicit_producer_index;
- __be32 consumer_counter;
- __be32 producer_counter;
- u8 rsvd48[8];
- __be64 db_record_addr;
-};
-
-struct mlx5_create_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_cqn;
- u8 rsvdx[4];
- struct mlx5_cq_context ctx;
- u8 rsvd6[192];
- __be64 pas[0];
-};
-
-struct mlx5_create_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_query_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_query_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
- struct mlx5_cq_context ctx;
- u8 rsvd6[16];
- __be64 pas[0];
-};
-
-struct mlx5_modify_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- __be32 field_select;
- struct mlx5_cq_context ctx;
- u8 rsvd[192];
- __be64 pas[0];
-};
-
-struct mlx5_modify_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_enable_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_enable_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_disable_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_disable_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_eq_context {
- u8 status;
- u8 ec_oi;
- u8 st;
- u8 rsvd2[7];
- __be16 page_pffset;
- __be32 log_sz_usr_page;
- u8 rsvd3[7];
- u8 intr;
- u8 log_page_size;
- u8 rsvd4[15];
- __be32 consumer_counter;
- __be32 produser_counter;
- u8 rsvd5[16];
-};
-
-struct mlx5_create_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 input_eqn;
- u8 rsvd1[4];
- struct mlx5_eq_context ctx;
- u8 rsvd2[8];
- __be64 events_mask;
- u8 rsvd3[176];
- __be64 pas[0];
-};
-
-struct mlx5_create_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[3];
- u8 eq_number;
- u8 rsvd1[4];
-};
-
-struct mlx5_destroy_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 eqn;
- u8 rsvd1[4];
-};
-
-struct mlx5_destroy_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_map_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be64 mask;
- u8 mu;
- u8 rsvd0[2];
- u8 eqn;
- u8 rsvd1[24];
-};
-
-struct mlx5_map_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 eqn;
- u8 rsvd1[4];
-};
-
-struct mlx5_query_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- struct mlx5_eq_context ctx;
+enum {
+ MLX5_MKEY_STATUS_FREE = 1 << 6,
};
enum {
- MLX5_MKEY_STATUS_FREE = 1 << 6,
+ MLX5_MKEY_REMOTE_INVAL = 1 << 24,
+ MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
+ MLX5_MKEY_BSF_EN = 1 << 30,
+ MLX5_MKEY_LEN64 = 1 << 31,
};
struct mlx5_mkey_seg {
@@ -1119,134 +824,12 @@ struct mlx5_mkey_seg {
u8 rsvd4[4];
};
-struct mlx5_query_special_ctxs_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_special_ctxs_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 dump_fill_mkey;
- __be32 reserved_lkey;
-};
-
-struct mlx5_create_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_mkey_index;
- __be32 flags;
- struct mlx5_mkey_seg seg;
- u8 rsvd1[16];
- __be32 xlat_oct_act_size;
- __be32 rsvd2;
- u8 rsvd3[168];
- __be64 pas[0];
-};
-
-struct mlx5_create_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 mkey;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
-};
-
-struct mlx5_query_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be64 pas[0];
-};
-
-struct mlx5_modify_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
- __be64 pas[0];
-};
-
-struct mlx5_modify_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_dump_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
-};
-
-struct mlx5_dump_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 mkey;
-};
-
-struct mlx5_mad_ifc_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be16 remote_lid;
- u8 rsvd0;
- u8 port;
- u8 rsvd1[4];
- u8 data[256];
-};
-
-struct mlx5_mad_ifc_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- u8 data[256];
-};
-
-struct mlx5_access_reg_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 register_id;
- __be32 arg;
- __be32 data[0];
-};
-
-struct mlx5_access_reg_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- __be32 data[0];
-};
-
#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
enum {
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
};
-struct mlx5_allocate_psv_in {
- struct mlx5_inbox_hdr hdr;
- __be32 npsv_pd;
- __be32 rsvd_psv0;
-};
-
-struct mlx5_allocate_psv_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- __be32 psv_idx[4];
-};
-
-struct mlx5_destroy_psv_in {
- struct mlx5_inbox_hdr hdr;
- __be32 psv_number;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_psv_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
enum {
VPORT_STATE_DOWN = 0x0,
VPORT_STATE_UP = 0x1,
@@ -1381,6 +964,18 @@ enum mlx5_cap_type {
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ccea6fb16482..85c4786427e4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -49,10 +49,6 @@
#include <linux/mlx5/srq.h>
enum {
- MLX5_RQ_BITMASK_VSD = 1 << 1,
-};
-
-enum {
MLX5_BOARD_ID_LEN = 64,
MLX5_MAX_NAME_LEN = 16,
};
@@ -481,6 +477,7 @@ struct mlx5_fc_stats {
};
struct mlx5_eswitch;
+struct mlx5_lag;
struct mlx5_rl_entry {
u32 rate;
@@ -554,6 +551,7 @@ struct mlx5_priv {
struct mlx5_flow_steering *steering;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
+ struct mlx5_lag *lag;
unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
@@ -771,14 +769,15 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
-int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
-int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
+
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size, mlx5_cmd_cbk_t callback,
void *context);
+void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
+
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
@@ -807,15 +806,18 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen,
+ u32 *out, int outlen,
+ mlx5_cmd_cbk_t callback, void *context);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
- struct mlx5_create_mkey_mbox_in *in, int inlen,
- mlx5_cmd_cbk_t callback, void *context,
- struct mlx5_create_mkey_mbox_out *out);
+ u32 *in, int inlen);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- struct mlx5_query_mkey_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
@@ -826,8 +828,6 @@ void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
-int mlx5_sriov_init(struct mlx5_core_dev *dev);
-int mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
@@ -865,7 +865,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- struct mlx5_query_eq_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
@@ -930,6 +930,8 @@ enum {
struct mlx5_interface {
void * (*add)(struct mlx5_core_dev *dev);
void (*remove)(struct mlx5_core_dev *dev, void *context);
+ int (*attach)(struct mlx5_core_dev *dev, void *context);
+ void (*detach)(struct mlx5_core_dev *dev, void *context);
void (*event)(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param);
void * (*get_dev)(void *context);
@@ -942,6 +944,11 @@ int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
+int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
+int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+
struct mlx5_profile {
u64 mask;
u8 log_max_qp;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index e036d6030867..93ebc5e21334 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -54,6 +54,7 @@ static inline void build_leftovers_ft_param(int *priority,
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_LAG,
MLX5_FLOW_NAMESPACE_OFFLOADS,
MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL,
@@ -62,6 +63,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ MLX5_FLOW_NAMESPACE_SNIFFER_RX,
+ MLX5_FLOW_NAMESPACE_SNIFFER_TX,
};
struct mlx5_flow_table;
@@ -106,6 +109,9 @@ mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
u32 level, u16 vport);
+struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
+ struct mlx5_flow_namespace *ns,
+ int prio, u32 level);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values:
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index d1f9a581aca8..6045d4d58065 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -152,7 +152,7 @@ enum {
MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
MLX5_CMD_OP_ACCESS_REG = 0x805,
MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
- MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
MLX5_CMD_OP_MAD_IFC = 0x50d,
MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -174,6 +174,12 @@ enum {
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
MLX5_CMD_OP_SET_WOL_ROL = 0x830,
MLX5_CMD_OP_QUERY_WOL_ROL = 0x831,
+ MLX5_CMD_OP_CREATE_LAG = 0x840,
+ MLX5_CMD_OP_MODIFY_LAG = 0x841,
+ MLX5_CMD_OP_QUERY_LAG = 0x842,
+ MLX5_CMD_OP_DESTROY_LAG = 0x843,
+ MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844,
+ MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845,
MLX5_CMD_OP_CREATE_TIR = 0x900,
MLX5_CMD_OP_MODIFY_TIR = 0x901,
MLX5_CMD_OP_DESTROY_TIR = 0x902,
@@ -212,6 +218,8 @@ enum {
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
+ MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
MLX5_CMD_OP_MAX
};
@@ -281,7 +289,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1];
- u8 reserved_at_7[0x19];
+ u8 encap[0x1];
+ u8 decap[0x1];
+ u8 reserved_at_9[0x17];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
@@ -473,7 +483,9 @@ struct mlx5_ifc_ads_bits {
struct mlx5_ifc_flow_table_nic_cap_bits {
u8 nic_rx_multi_path_tirs[0x1];
- u8 reserved_at_1[0x1ff];
+ u8 nic_rx_multi_path_tirs_fts[0x1];
+ u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
+ u8 reserved_at_3[0x1fd];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
@@ -512,7 +524,15 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
- u8 reserved_at_20[0x7e0];
+ u8 vxlan_encap_decap[0x1];
+ u8 nvgre_encap_decap[0x1];
+ u8 reserved_at_22[0x9];
+ u8 log_max_encap_headers[0x5];
+ u8 reserved_2b[0x6];
+ u8 max_encap_header_size[0xa];
+
+ u8 reserved_40[0x7c0];
+
};
struct mlx5_ifc_qos_cap_bits {
@@ -767,7 +787,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 out_of_seq_cnt[0x1];
u8 vport_counters[0x1];
u8 retransmission_q_counters[0x1];
- u8 reserved_at_183[0x3];
+ u8 reserved_at_183[0x1];
+ u8 modify_rq_counter_set_id[0x1];
+ u8 reserved_at_185[0x1];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
@@ -870,7 +892,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pad_tx_eth_packet[0x1];
u8 reserved_at_263[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0x10];
+
+ u8 reserved_at_270[0xb];
+ u8 lag_master[0x1];
+ u8 num_lag_ports[0x4];
u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
@@ -1904,7 +1929,7 @@ enum {
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
- u8 reserved_at_4[0x4];
+ u8 lag_tx_port_affinity[0x4];
u8 st[0x8];
u8 reserved_at_10[0x3];
u8 pm_state[0x2];
@@ -1966,7 +1991,10 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_at_3e0[0x8];
u8 cqn_snd[0x18];
- u8 reserved_at_400[0x40];
+ u8 reserved_at_400[0x8];
+ u8 deth_sqpn[0x18];
+
+ u8 reserved_at_420[0x20];
u8 reserved_at_440[0x8];
u8 last_acked_psn[0x18];
@@ -2064,6 +2092,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
+ MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
+ MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
};
struct mlx5_ifc_flow_context_bits {
@@ -2083,7 +2113,9 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
- u8 reserved_at_c0[0x140];
+ u8 encap_id[0x20];
+
+ u8 reserved_at_e0[0x120];
struct mlx5_ifc_fte_match_param_bits match_value;
@@ -2146,7 +2178,11 @@ struct mlx5_ifc_traffic_counter_bits {
};
struct mlx5_ifc_tisc_bits {
- u8 reserved_at_0[0xc];
+ u8 strict_lag_tx_port_affinity[0x1];
+ u8 reserved_at_1[0x3];
+ u8 lag_tx_port_affinity[0x04];
+
+ u8 reserved_at_8[0x4];
u8 prio[0x4];
u8 reserved_at_10[0x10];
@@ -2808,7 +2844,7 @@ struct mlx5_ifc_xrqc_bits {
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
- u8 reserved_at_180[0x180];
+ u8 reserved_at_180[0x200];
struct mlx5_ifc_wq_bits wq;
};
@@ -3489,7 +3525,7 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x20];
+ u8 dump_fill_mkey[0x20];
u8 resd_lkey[0x20];
};
@@ -4213,6 +4249,85 @@ struct mlx5_ifc_query_eq_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_encap_header_in_bits {
+ u8 reserved_at_0[0x5];
+ u8 header_type[0x3];
+ u8 reserved_at_8[0xe];
+ u8 encap_header_size[0xa];
+
+ u8 reserved_at_20[0x10];
+ u8 encap_header[2][0x8];
+
+ u8 more_encap_header[0][0x8];
+};
+
+struct mlx5_ifc_query_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0xa0];
+
+ struct mlx5_ifc_encap_header_in_bits encap_header[0];
+};
+
+struct mlx5_ifc_query_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_at_60[0xa0];
+};
+
+struct mlx5_ifc_alloc_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xa0];
+
+ struct mlx5_ifc_encap_header_in_bits encap_header;
+};
+
+struct mlx5_ifc_dealloc_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_60[0x20];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4517,7 +4632,9 @@ struct mlx5_ifc_modify_tis_out_bits {
struct mlx5_ifc_modify_tis_bitmask_bits {
u8 reserved_at_0[0x20];
- u8 reserved_at_20[0x1f];
+ u8 reserved_at_20[0x1d];
+ u8 lag_tx_port_affinity[0x1];
+ u8 strict_lag_tx_port_affinity[0x1];
u8 prio[0x1];
};
@@ -4652,6 +4769,11 @@ struct mlx5_ifc_modify_rq_out_bits {
u8 reserved_at_40[0x40];
};
+enum {
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3,
+};
+
struct mlx5_ifc_modify_rq_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -4721,7 +4843,7 @@ struct mlx5_ifc_modify_nic_vport_field_select_bits {
u8 reserved_at_0[0x16];
u8 node_guid[0x1];
u8 port_guid[0x1];
- u8 reserved_at_18[0x1];
+ u8 min_inline[0x1];
u8 mtu[0x1];
u8 change_event[0x1];
u8 promisc[0x1];
@@ -6099,7 +6221,9 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 reserved_at_a0[0x20];
- u8 reserved_at_c0[0x4];
+ u8 encap_en[0x1];
+ u8 decap_en[0x1];
+ u8 reserved_at_c2[0x2];
u8 table_miss_mode[0x4];
u8 level[0x8];
u8 reserved_at_d0[0x8];
@@ -6108,7 +6232,10 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 reserved_at_e0[0x8];
u8 table_miss_id[0x18];
- u8 reserved_at_100[0x100];
+ u8 reserved_at_100[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_120[0x80];
};
struct mlx5_ifc_create_flow_group_out_bits {
@@ -7563,7 +7690,8 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
};
enum {
- MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = 0x1,
+ MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = (1UL << 0),
+ MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15),
};
struct mlx5_ifc_modify_flow_table_out_bits {
@@ -7602,7 +7730,10 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 reserved_at_e0[0x8];
u8 table_miss_id[0x18];
- u8 reserved_at_100[0x100];
+ u8 reserved_at_100[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_120[0x80];
};
struct mlx5_ifc_ets_tcn_config_reg_bits {
@@ -7710,4 +7841,134 @@ struct mlx5_ifc_dcbx_param_bits {
u8 error[0x8];
u8 reserved_at_a0[0x160];
};
+
+struct mlx5_ifc_lagc_bits {
+ u8 reserved_at_0[0x1d];
+ u8 lag_state[0x3];
+
+ u8 reserved_at_20[0x14];
+ u8 tx_remap_affinity_2[0x4];
+ u8 reserved_at_38[0x4];
+ u8 tx_remap_affinity_1[0x4];
+};
+
+struct mlx5_ifc_create_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_modify_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+ u8 field_select[0x20];
+
+ struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_query_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_query_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_vport_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_vport_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_vport_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_vport_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index e3012cc64b8a..b3065acd20b4 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -61,6 +61,39 @@ enum mlx5_an_status {
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
+enum mlx5e_link_mode {
+ MLX5E_1000BASE_CX_SGMII = 0,
+ MLX5E_1000BASE_KX = 1,
+ MLX5E_10GBASE_CX4 = 2,
+ MLX5E_10GBASE_KX4 = 3,
+ MLX5E_10GBASE_KR = 4,
+ MLX5E_20GBASE_KR2 = 5,
+ MLX5E_40GBASE_CR4 = 6,
+ MLX5E_40GBASE_KR4 = 7,
+ MLX5E_56GBASE_R4 = 8,
+ MLX5E_10GBASE_CR = 12,
+ MLX5E_10GBASE_SR = 13,
+ MLX5E_10GBASE_ER = 14,
+ MLX5E_40GBASE_SR4 = 15,
+ MLX5E_40GBASE_LR4 = 16,
+ MLX5E_50GBASE_SR2 = 18,
+ MLX5E_100GBASE_CR4 = 20,
+ MLX5E_100GBASE_SR4 = 21,
+ MLX5E_100GBASE_KR4 = 22,
+ MLX5E_100GBASE_LR4 = 23,
+ MLX5E_100BASE_TX = 24,
+ MLX5E_1000BASE_T = 25,
+ MLX5E_10GBASE_T = 26,
+ MLX5E_25GBASE_CR = 27,
+ MLX5E_25GBASE_KR = 28,
+ MLX5E_25GBASE_SR = 29,
+ MLX5E_50GBASE_CR2 = 30,
+ MLX5E_50GBASE_KR2 = 31,
+ MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
@@ -70,9 +103,10 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port);
-int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, int proto_mask,
- u8 local_port);
+int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, u8 local_port);
+int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
+ u32 *proto_oper, u8 local_port);
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, int proto_mask);
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 7879bf411891..0aacb2a7480d 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -123,12 +123,13 @@ enum {
};
enum {
- MLX5_NON_ZERO_RQ = 0 << 24,
- MLX5_SRQ_RQ = 1 << 24,
- MLX5_CRQ_RQ = 2 << 24,
- MLX5_ZERO_LEN_RQ = 3 << 24
+ MLX5_NON_ZERO_RQ = 0x0,
+ MLX5_SRQ_RQ = 0x1,
+ MLX5_CRQ_RQ = 0x2,
+ MLX5_ZERO_LEN_RQ = 0x3
};
+/* TODO REM */
enum {
/* params1 */
MLX5_QP_BIT_SRE = 1 << 15,
@@ -178,12 +179,6 @@ enum {
};
enum {
- MLX5_QP_LAT_SENSITIVE = 1 << 28,
- MLX5_QP_BLOCK_MCAST = 1 << 30,
- MLX5_QP_ENABLE_SIG = 1 << 31,
-};
-
-enum {
MLX5_RCV_DBR = 0,
MLX5_SND_DBR = 1,
};
@@ -484,6 +479,7 @@ struct mlx5_qp_path {
u8 rmac[6];
};
+/* FIXME: use mlx5_ifc.h qpc */
struct mlx5_qp_context {
__be32 flags;
__be32 flags_pd;
@@ -525,99 +521,6 @@ struct mlx5_qp_context {
u8 rsvd1[24];
};
-struct mlx5_create_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_qpn;
- u8 rsvd0[4];
- __be32 opt_param_mask;
- u8 rsvd1[4];
- struct mlx5_qp_context ctx;
- u8 rsvd3[16];
- __be64 pas[0];
-};
-
-struct mlx5_create_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_modify_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
- __be32 optparam;
- u8 rsvd1[4];
- struct mlx5_qp_context ctx;
- u8 rsvd2[16];
-};
-
-struct mlx5_modify_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_query_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd[4];
-};
-
-struct mlx5_query_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd1[8];
- __be32 optparam;
- u8 rsvd0[4];
- struct mlx5_qp_context ctx;
- u8 rsvd2[16];
- __be64 pas[0];
-};
-
-struct mlx5_conf_sqp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd[3];
- u8 type;
-};
-
-struct mlx5_conf_sqp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_xrcd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_xrcd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 xrcdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_xrcd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 xrcdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_xrcd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
{
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
@@ -628,28 +531,17 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
}
-struct mlx5_page_fault_resume_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 flags_qpn;
- u8 reserved[4];
-};
-
-struct mlx5_page_fault_resume_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
- struct mlx5_create_qp_mbox_in *in,
+ u32 *in,
int inlen);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
- struct mlx5_modify_qp_mbox_in *in, int sqd_event,
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
+ u32 opt_param_mask, void *qpc,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- struct mlx5_query_qp_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index e087b7d047ac..451b0bde9083 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline);
+int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
u16 vport, u8 *addr);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5f14534f0c90..e9caec6a51e9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -126,7 +126,7 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
-#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
+#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
/*
* Linux kernel virtual memory manager primitives.
@@ -1048,28 +1048,16 @@ struct address_space *page_file_mapping(struct page *page)
return page->mapping;
}
-/*
- * Return the pagecache index of the passed page. Regular pagecache pages
- * use ->index whereas swapcache pages use ->private
- */
-static inline pgoff_t page_index(struct page *page)
-{
- if (unlikely(PageSwapCache(page)))
- return page_private(page);
- return page->index;
-}
-
extern pgoff_t __page_file_index(struct page *page);
/*
- * Return the file index of the page. Regular pagecache pages use ->index
- * whereas swapcache pages use swp_offset(->private)
+ * Return the pagecache index of the passed page. Regular pagecache pages
+ * use ->index whereas swapcache pages use swp_offset(->private)
*/
-static inline pgoff_t page_file_index(struct page *page)
+static inline pgoff_t page_index(struct page *page)
{
if (unlikely(PageSwapCache(page)))
return __page_file_index(page);
-
return page->index;
}
@@ -1197,10 +1185,10 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
* @pte_hole: if set, called for each hole at all levels
* @hugetlb_entry: if set, called for each hugetlb entry
* @test_walk: caller specific callback function to determine whether
- * we walk over the current vma or not. A positive returned
+ * we walk over the current vma or not. Returning 0
* value means "do page table walk over the current vma,"
* and a negative one means "abort current page table walk
- * right now." 0 means "skip the current vma."
+ * right now." 1 means "skip the current vma."
* @mm: mm_struct representing the target process of page table walk
* @vma: vma currently walked (NULL if walking outside vmas)
* @private: private data for callbacks' usage
@@ -1529,7 +1517,7 @@ static inline int pte_devmap(pte_t pte)
}
#endif
-int vma_wants_writenotify(struct vm_area_struct *vma);
+int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
@@ -1924,10 +1912,12 @@ extern void show_mem(unsigned int flags);
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
+#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
+extern unsigned long arch_reserved_kernel_pages(void);
+#endif
-extern __printf(3, 4)
-void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
- const char *fmt, ...);
+extern __printf(2, 3)
+void warn_alloc(gfp_t gfp_mask, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
@@ -1977,8 +1967,14 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
+extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
+ struct vm_area_struct *expand);
+static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
+{
+ return __vma_adjust(vma, start, end, pgoff, insert, NULL);
+}
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 903200f4ec41..4a8acedf4b7d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -515,9 +515,7 @@ struct mm_struct {
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
-#ifdef CONFIG_MMU
struct work_struct async_put_work;
-#endif
};
static inline void mm_init_cpumask(struct mm_struct *mm)
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d8673ca968ba..73fad83acbcb 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -292,6 +292,7 @@ struct mmc_card {
u32 raw_cid[4]; /* raw card CID */
u32 raw_csd[4]; /* raw card CSD */
u32 raw_scr[2]; /* raw card SCR */
+ u32 raw_ssr[16]; /* raw card SSR */
struct mmc_cid cid; /* card identification */
struct mmc_csd csd; /* card specific */
struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index b01e77de1a74..2b953eb8ceae 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -55,6 +55,9 @@ struct mmc_command {
#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+/* Can be used by core to poll after switch to MMC HS mode */
+#define MMC_RSP_R1_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE)
+
#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
/*
@@ -133,8 +136,12 @@ struct mmc_request {
struct mmc_command *stop;
struct completion completion;
+ struct completion cmd_completion;
void (*done)(struct mmc_request *);/* completion function */
struct mmc_host *host;
+
+ /* Allow other commands during this ongoing data transfer or busy wait */
+ bool cap_cmd_during_tfr;
};
struct mmc_card;
@@ -146,6 +153,9 @@ extern struct mmc_async_req *mmc_start_req(struct mmc_host *,
struct mmc_async_req *, int *);
extern int mmc_interrupt_hpi(struct mmc_card *);
extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
+extern void mmc_wait_for_req_done(struct mmc_host *host,
+ struct mmc_request *mrq);
+extern bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 83b0edfce471..f5af2bd35e7f 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -17,6 +17,7 @@
#include <linux/scatterlist.h>
#include <linux/mmc/core.h>
#include <linux/dmaengine.h>
+#include <linux/reset.h>
#define MAX_MCI_SLOTS 2
@@ -259,6 +260,7 @@ struct dw_mci_board {
/* delay in mS before detecting cards after interrupt */
u32 detect_delay_ms;
+ struct reset_control *rstc;
struct dw_mci_dma_ops *dma_ops;
struct dma_pdata *data;
};
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index aa4bfbf129e4..0b2439441cc8 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -281,6 +281,7 @@ struct mmc_host {
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
+#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */
@@ -382,6 +383,9 @@ struct mmc_host {
struct mmc_async_req *areq; /* active async req */
struct mmc_context_info context_info; /* async synchronization info */
+ /* Ongoing data transfer that allows commands during transfer */
+ struct mmc_request *ongoing_mrq;
+
#ifdef CONFIG_FAIL_MMC_REQUEST
struct fault_attr fail_mmc_request;
#endif
@@ -418,6 +422,7 @@ int mmc_power_restore_host(struct mmc_host *host);
void mmc_detect_change(struct mmc_host *, unsigned long delay);
void mmc_request_done(struct mmc_host *, struct mmc_request *);
+void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 0d126aeb3ec0..d43ef96bf075 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -32,6 +32,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
+#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 54a594d49733..1172cce949a4 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -96,4 +96,6 @@ extern void mark_mounts_for_expiry(struct list_head *mounts);
extern dev_t name_to_dev_t(const char *name);
+extern unsigned int sysctl_mount_max;
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index b9f0ff4d489c..cd0c8bd0a1de 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -25,6 +25,7 @@
#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
#include <linux/once.h>
+#include <linux/fs.h>
#include <uapi/linux/net.h>
@@ -128,6 +129,9 @@ struct page;
struct sockaddr;
struct msghdr;
struct module;
+struct sk_buff;
+typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
+ unsigned int, size_t);
struct proto_ops {
int family;
@@ -186,6 +190,8 @@ struct proto_ops {
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
int (*set_peek_off)(struct sock *sk, int val);
int (*peek_len)(struct socket *sock);
+ int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
};
#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e8d79d4ebcfe..136ae6bbe81e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -52,6 +52,7 @@
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
+#include <linux/hashtable.h>
struct netpoll_info;
struct device;
@@ -788,6 +789,7 @@ enum {
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
TC_SETUP_MATCHALL,
+ TC_SETUP_CLSBPF,
};
struct tc_cls_u32_offload;
@@ -799,6 +801,7 @@ struct tc_to_netdev {
struct tc_cls_u32_offload *cls_u32;
struct tc_cls_flower_offload *cls_flower;
struct tc_cls_matchall_offload *cls_mall;
+ struct tc_cls_bpf_offload *cls_bpf;
};
};
@@ -923,6 +926,14 @@ struct netdev_xdp {
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
+ * bool (*ndo_has_offload_stats)(int attr_id)
+ * Return true if this device supports offload stats of this attr_id.
+ *
+ * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
+ * void *attr_data)
+ * Get statistics for offload operations by attr_id. Write it into the
+ * attr_data pointer.
+ *
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device supports VLAN filtering this function is called when a
* VLAN id is registered.
@@ -935,7 +946,8 @@ struct netdev_xdp {
*
* SR-IOV management functions.
* int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
- * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
+ * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
+ * u8 qos, __be16 proto);
* int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
* int max_tx_rate);
* int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
@@ -1030,7 +1042,7 @@ struct netdev_xdp {
* Deletes the FDB entry from dev coresponding to addr.
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
- * int idx)
+ * int *idx)
* Used to add FDB entries to dump requests. Implementers should add
* entries to skb and update idx with the number of entries.
*
@@ -1154,6 +1166,10 @@ struct net_device_ops {
struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
+ bool (*ndo_has_offload_stats)(int attr_id);
+ int (*ndo_get_offload_stats)(int attr_id,
+ const struct net_device *dev,
+ void *attr_data);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
@@ -1172,7 +1188,8 @@ struct net_device_ops {
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
- int queue, u16 vlan, u8 qos);
+ int queue, u16 vlan,
+ u8 qos, __be16 proto);
int (*ndo_set_vf_rate)(struct net_device *dev,
int vf, int min_tx_rate,
int max_tx_rate);
@@ -1262,7 +1279,7 @@ struct net_device_ops {
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
- int idx);
+ int *idx);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
@@ -1561,8 +1578,6 @@ enum netdev_priv_flags {
*
* @xps_maps: XXX: need comments on this one
*
- * @offload_fwd_mark: Offload device fwding mark
- *
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
@@ -1784,7 +1799,7 @@ struct net_device {
#endif
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
- struct list_head nf_hooks_ingress;
+ struct nf_hook_entry __rcu *nf_hooks_ingress;
#endif
unsigned char broadcast[MAX_ADDR_LEN];
@@ -1800,6 +1815,9 @@ struct net_device {
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
struct Qdisc *qdisc;
+#ifdef CONFIG_NET_SCHED
+ DECLARE_HASHTABLE (qdisc_hash, 4);
+#endif
unsigned long tx_queue_len;
spinlock_t tx_global_lock;
int watchdog_timeo;
@@ -1810,9 +1828,6 @@ struct net_device {
#ifdef CONFIG_NET_CLS_ACT
struct tcf_proto __rcu *egress_cl_list;
#endif
-#ifdef CONFIG_NET_SWITCHDEV
- u32 offload_fwd_mark;
-#endif
/* These may be needed for future network-power-down code. */
struct timer_list watchdog_timer;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 9230f9aee896..abc7fdcb9eb1 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -55,12 +55,34 @@ struct nf_hook_state {
struct net_device *out;
struct sock *sk;
struct net *net;
- struct list_head *hook_list;
+ struct nf_hook_entry __rcu *hook_entries;
int (*okfn)(struct net *, struct sock *, struct sk_buff *);
};
+typedef unsigned int nf_hookfn(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
+struct nf_hook_ops {
+ struct list_head list;
+
+ /* User fills in from here down. */
+ nf_hookfn *hook;
+ struct net_device *dev;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
+ /* Hooks are ordered in ascending priority. */
+ int priority;
+};
+
+struct nf_hook_entry {
+ struct nf_hook_entry __rcu *next;
+ struct nf_hook_ops ops;
+ const struct nf_hook_ops *orig_ops;
+};
+
static inline void nf_hook_state_init(struct nf_hook_state *p,
- struct list_head *hook_list,
+ struct nf_hook_entry *hook_entry,
unsigned int hook,
int thresh, u_int8_t pf,
struct net_device *indev,
@@ -76,26 +98,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
p->out = outdev;
p->sk = sk;
p->net = net;
- p->hook_list = hook_list;
+ RCU_INIT_POINTER(p->hook_entries, hook_entry);
p->okfn = okfn;
}
-typedef unsigned int nf_hookfn(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state);
-
-struct nf_hook_ops {
- struct list_head list;
- /* User fills in from here down. */
- nf_hookfn *hook;
- struct net_device *dev;
- void *priv;
- u_int8_t pf;
- unsigned int hooknum;
- /* Hooks are ordered in ascending priority. */
- int priority;
-};
struct nf_sockopt_ops {
struct list_head list;
@@ -133,6 +140,8 @@ int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
+int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
+void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
/* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */
@@ -161,7 +170,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
int thresh)
{
- struct list_head *hook_list;
+ struct nf_hook_entry *hook_head;
+ int ret = 1;
#ifdef HAVE_JUMP_LABEL
if (__builtin_constant_p(pf) &&
@@ -170,16 +180,19 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
return 1;
#endif
- hook_list = &net->nf.hooks[pf][hook];
-
- if (!list_empty(hook_list)) {
+ rcu_read_lock();
+ hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
+ if (hook_head) {
struct nf_hook_state state;
- nf_hook_state_init(&state, hook_list, hook, thresh,
+ nf_hook_state_init(&state, hook_head, hook, thresh,
pf, indev, outdev, sk, net, okfn);
- return nf_hook_slow(skb, &state);
+
+ ret = nf_hook_slow(skb, &state);
}
- return 1;
+ rcu_read_unlock();
+
+ return ret;
}
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 275505792664..1d1ef4e20512 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -4,13 +4,9 @@
#include <uapi/linux/netfilter/nf_conntrack_common.h>
struct ip_conntrack_stat {
- unsigned int searched;
unsigned int found;
- unsigned int new;
unsigned int invalid;
unsigned int ignore;
- unsigned int delete;
- unsigned int delete_list;
unsigned int insert;
unsigned int insert_failed;
unsigned int drop;
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index df78dc2b5524..dee0acd0dd31 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -1,68 +1,8 @@
#ifndef _CONNTRACK_PROTO_GRE_H
#define _CONNTRACK_PROTO_GRE_H
#include <asm/byteorder.h>
-
-/* GRE PROTOCOL HEADER */
-
-/* GRE Version field */
-#define GRE_VERSION_1701 0x0
-#define GRE_VERSION_PPTP 0x1
-
-/* GRE Protocol field */
-#define GRE_PROTOCOL_PPTP 0x880B
-
-/* GRE Flags */
-#define GRE_FLAG_C 0x80
-#define GRE_FLAG_R 0x40
-#define GRE_FLAG_K 0x20
-#define GRE_FLAG_S 0x10
-#define GRE_FLAG_A 0x80
-
-#define GRE_IS_C(f) ((f)&GRE_FLAG_C)
-#define GRE_IS_R(f) ((f)&GRE_FLAG_R)
-#define GRE_IS_K(f) ((f)&GRE_FLAG_K)
-#define GRE_IS_S(f) ((f)&GRE_FLAG_S)
-#define GRE_IS_A(f) ((f)&GRE_FLAG_A)
-
-/* GRE is a mess: Four different standards */
-struct gre_hdr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u16 rec:3,
- srr:1,
- seq:1,
- key:1,
- routing:1,
- csum:1,
- version:3,
- reserved:4,
- ack:1;
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u16 csum:1,
- routing:1,
- key:1,
- seq:1,
- srr:1,
- rec:3,
- ack:1,
- reserved:4,
- version:3;
-#else
-#error "Adjust your <asm/byteorder.h> defines"
-#endif
- __be16 protocol;
-};
-
-/* modified GRE header for PPTP */
-struct gre_hdr_pptp {
- __u8 flags; /* bitfield */
- __u8 version; /* should be GRE_VERSION_PPTP */
- __be16 protocol; /* should be GRE_PROTOCOL_PPTP */
- __be16 payload_len; /* size of ppp payload, not inc. gre header */
- __be16 call_id; /* peer's call_id for this session */
- __be32 seq; /* sequence number. Present if S==1 */
- __be32 ack; /* seq number of highest packet received by */
- /* sender in this session */
-};
+#include <net/gre.h>
+#include <net/pptp.h>
struct nf_ct_gre {
unsigned int stream_timeout;
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 5fcd375ef175..33e37fb41d5d 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -11,22 +11,30 @@ static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
return false;
#endif
- return !list_empty(&skb->dev->nf_hooks_ingress);
+ return rcu_access_pointer(skb->dev->nf_hooks_ingress);
}
+/* caller must hold rcu_read_lock */
static inline int nf_hook_ingress(struct sk_buff *skb)
{
+ struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
struct nf_hook_state state;
- nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
- NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
- skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
+ /* Must recheck the ingress hook head, in the event it became NULL
+ * after the check in nf_hook_ingress_active evaluated to true.
+ */
+ if (unlikely(!e))
+ return 0;
+
+ nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN,
+ NFPROTO_NETDEV, skb->dev, NULL, NULL,
+ dev_net(skb->dev), NULL);
return nf_hook_slow(skb, &state);
}
static inline void nf_hook_ingress_init(struct net_device *dev)
{
- INIT_LIST_HEAD(&dev->nf_hooks_ingress);
+ RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
}
#else /* CONFIG_NETFILTER_INGRESS */
static inline int nf_hook_ingress_active(struct sk_buff *skb)
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 4630eeae18e0..a78c35cff1ae 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -35,21 +35,34 @@ static inline void hardlockup_detector_disable(void) {}
* base function. Return whether such support was available,
* to allow calling code to fall back to some other mechanism:
*/
-#ifdef arch_trigger_all_cpu_backtrace
+#ifdef arch_trigger_cpumask_backtrace
static inline bool trigger_all_cpu_backtrace(void)
{
- arch_trigger_all_cpu_backtrace(true);
-
+ arch_trigger_cpumask_backtrace(cpu_online_mask, false);
return true;
}
+
static inline bool trigger_allbutself_cpu_backtrace(void)
{
- arch_trigger_all_cpu_backtrace(false);
+ arch_trigger_cpumask_backtrace(cpu_online_mask, true);
+ return true;
+}
+
+static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
+{
+ arch_trigger_cpumask_backtrace(mask, false);
+ return true;
+}
+
+static inline bool trigger_single_cpu_backtrace(int cpu)
+{
+ arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
return true;
}
/* generic implementation */
-void nmi_trigger_all_cpu_backtrace(bool include_self,
+void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
+ bool exclude_self,
void (*raise)(cpumask_t *mask));
bool nmi_cpu_backtrace(struct pt_regs *regs);
@@ -62,6 +75,14 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
{
return false;
}
+static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
+{
+ return false;
+}
+static inline bool trigger_single_cpu_backtrace(int cpu)
+{
+ return false;
+}
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
diff --git a/include/linux/of.h b/include/linux/of.h
index 3d9ff8e9d803..299aeb192727 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -291,20 +291,24 @@ extern int of_property_count_elems_of_size(const struct device_node *np,
extern int of_property_read_u32_index(const struct device_node *np,
const char *propname,
u32 index, u32 *out_value);
-extern int of_property_read_u8_array(const struct device_node *np,
- const char *propname, u8 *out_values, size_t sz);
-extern int of_property_read_u16_array(const struct device_node *np,
- const char *propname, u16 *out_values, size_t sz);
-extern int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values,
- size_t sz);
+extern int of_property_read_variable_u8_array(const struct device_node *np,
+ const char *propname, u8 *out_values,
+ size_t sz_min, size_t sz_max);
+extern int of_property_read_variable_u16_array(const struct device_node *np,
+ const char *propname, u16 *out_values,
+ size_t sz_min, size_t sz_max);
+extern int of_property_read_variable_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values,
+ size_t sz_min,
+ size_t sz_max);
extern int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value);
-extern int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values,
- size_t sz);
+extern int of_property_read_variable_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values,
+ size_t sz_min,
+ size_t sz_max);
extern int of_property_read_string(const struct device_node *np,
const char *propname,
@@ -380,6 +384,122 @@ extern int of_detach_node(struct device_node *);
#define of_match_ptr(_ptr) (_ptr)
+/**
+ * of_property_read_u8_array - Find and read an array of u8 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 8-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * dts entry of array should be like:
+ * property = /bits/ 8 <0x50 0x60 0x70>;
+ *
+ * The out_values is modified only if a valid u8 value can be decoded.
+ */
+static inline int of_property_read_u8_array(const struct device_node *np,
+ const char *propname,
+ u8 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u8_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u16_array - Find and read an array of u16 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 16-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * dts entry of array should be like:
+ * property = /bits/ 16 <0x5000 0x6000 0x7000>;
+ *
+ * The out_values is modified only if a valid u16 value can be decoded.
+ */
+static inline int of_property_read_u16_array(const struct device_node *np,
+ const char *propname,
+ u16 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u16_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u32_array - Find and read an array of 32 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+static inline int of_property_read_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u32_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u64_array - Find and read an array of 64 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 64-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u64 value can be decoded.
+ */
+static inline int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u64_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
/*
* struct property *prop;
* const __be32 *p;
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 26c3302ae58f..4341f32516d8 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/errno.h>
/* Definitions used by the flattened device tree */
#define OF_DT_HEADER 0xd00dfeed /* marker */
@@ -66,6 +67,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
int depth, void *data);
extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data);
+extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
@@ -94,6 +96,7 @@ extern void early_get_first_memblock_info(void *, phys_addr_t *);
extern u64 of_flat_dt_translate_address(unsigned long node);
extern void of_fdt_limit_memory(int limit);
#else /* CONFIG_OF_FLATTREE */
+static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; }
static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline void early_init_fdt_reserve_self(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 092186c62ff4..3f87ea5b8bee 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -61,8 +61,6 @@ static inline int of_mm_gpiochip_add(struct device_node *np,
}
extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
-extern int of_gpiochip_add(struct gpio_chip *gc);
-extern void of_gpiochip_remove(struct gpio_chip *gc);
extern int of_gpio_simple_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec,
u32 *flags);
@@ -86,9 +84,6 @@ static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
return -ENOSYS;
}
-static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
-static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
-
#endif /* CONFIG_OF_GPIO */
/**
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 1d99b61adc65..290081620b3e 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -297,6 +297,7 @@ struct omap_system_dma_plat_info {
#define dma_omap15xx() __dma_omap15xx(d)
#define dma_omap16xx() __dma_omap16xx(d)
+#if defined(CONFIG_ARCH_OMAP)
extern struct omap_system_dma_plat_info *omap_get_plat_info(void);
extern void omap_set_dma_priority(int lch, int dst_port, int priority);
@@ -355,4 +356,22 @@ static inline int omap_lcd_dma_running(void)
}
#endif
+#else /* CONFIG_ARCH_OMAP */
+
+static inline struct omap_system_dma_plat_info *omap_get_plat_info(void)
+{
+ return NULL;
+}
+
+static inline int omap_request_dma(int dev_id, const char *dev_name,
+ void (*callback)(int lch, u16 ch_status, void *data),
+ void *data, int *dma_ch)
+{
+ return -ENODEV;
+}
+
+static inline void omap_free_dma(int ch) { }
+
+#endif /* CONFIG_ARCH_OMAP */
+
#endif /* __LINUX_OMAP_DMA_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 9e9d79e8efa5..35d0fd7a4948 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -29,8 +29,8 @@ struct gpmc_nand_regs;
struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs);
#else
-static inline gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
- int cs)
+static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
+ int cs)
{
return NULL;
}
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 5bc0457ee3a8..b4e36e92bc87 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -34,23 +34,11 @@ struct oom_control {
* for display purposes.
*/
const int order;
-};
-
-/*
- * Types of limitations to the nodes from which allocations may occur
- */
-enum oom_constraint {
- CONSTRAINT_NONE,
- CONSTRAINT_CPUSET,
- CONSTRAINT_MEMORY_POLICY,
- CONSTRAINT_MEMCG,
-};
-enum oom_scan_t {
- OOM_SCAN_OK, /* scan thread and find its badness */
- OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */
- OOM_SCAN_ABORT, /* abort the iteration and return */
- OOM_SCAN_SELECT, /* always select this thread first */
+ /* Used by oom implementation, do not set */
+ unsigned long totalpages;
+ struct task_struct *chosen;
+ unsigned long chosen_points;
};
extern struct mutex oom_lock;
@@ -70,45 +58,27 @@ static inline bool oom_task_origin(const struct task_struct *p)
return p->signal->oom_flag_origin;
}
-extern void mark_oom_victim(struct task_struct *tsk);
-
-#ifdef CONFIG_MMU
-extern void wake_oom_reaper(struct task_struct *tsk);
-#else
-static inline void wake_oom_reaper(struct task_struct *tsk)
+static inline bool tsk_is_oom_victim(struct task_struct * tsk)
{
+ return tsk->signal->oom_mm;
}
-#endif
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
-extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
- unsigned int points, unsigned long totalpages,
- const char *message);
-
-extern void check_panic_on_oom(struct oom_control *oc,
- enum oom_constraint constraint);
-
-extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
- struct task_struct *task);
-
extern bool out_of_memory(struct oom_control *oc);
-extern void exit_oom_victim(struct task_struct *tsk);
+extern void exit_oom_victim(void);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
-extern bool oom_killer_disabled;
-extern bool oom_killer_disable(void);
+extern bool oom_killer_disable(signed long timeout);
extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
-bool task_will_free_mem(struct task_struct *task);
-
/* sysctls */
extern int sysctl_oom_dump_tasks;
extern int sysctl_oom_kill_allocating_task;
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 03f2a3e7d76d..9298c393ddaa 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -7,6 +7,8 @@
struct pglist_data;
struct page_ext_operations {
+ size_t offset;
+ size_t size;
bool (*need)(void);
void (*init)(void);
};
@@ -42,12 +44,6 @@ enum page_ext_flags {
*/
struct page_ext {
unsigned long flags;
-#ifdef CONFIG_PAGE_OWNER
- unsigned int order;
- gfp_t gfp_mask;
- int last_migrate_reason;
- depot_stack_handle_t handle;
-#endif
};
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 30583ab0ffb1..2be728d156b5 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -14,6 +14,8 @@ extern void __split_page_owner(struct page *page, unsigned int order);
extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
extern void __dump_page_owner(struct page *page);
+extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
+ pg_data_t *pgdat, struct zone *zone);
static inline void reset_page_owner(struct page *page, unsigned int order)
{
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 01e84436cddf..794dbcb91084 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -25,6 +25,8 @@ enum mapping_flags {
AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
+ /* writeback related tags are not used */
+ AS_NO_WRITEBACK_TAGS = __GFP_BITS_SHIFT + 5,
};
static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -64,6 +66,16 @@ static inline int mapping_exiting(struct address_space *mapping)
return test_bit(AS_EXITING, &mapping->flags);
}
+static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
+{
+ set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
+}
+
+static inline int mapping_use_writeback_tags(struct address_space *mapping)
+{
+ return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
+}
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
@@ -396,7 +408,7 @@ static inline loff_t page_offset(struct page *page)
static inline loff_t page_file_offset(struct page *page)
{
- return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
+ return ((loff_t)page_index(page)) << PAGE_SHIFT;
}
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 7cc0acba8939..0e49f70dbd9b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -187,8 +187,9 @@ enum pci_irq_reroute_variant {
typedef unsigned short __bitwise pci_bus_flags_t;
enum pci_bus_flags {
- PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
- PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
+ PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
+ PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
+ PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
};
/* These values come from the PCI Express Spec */
@@ -268,6 +269,9 @@ struct pci_dev {
unsigned int class; /* 3 bytes: (base,sub,prog-if) */
u8 revision; /* PCI revision, low byte of class word */
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
+#ifdef CONFIG_PCIEAER
+ u16 aer_cap; /* AER capability offset */
+#endif
u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
@@ -308,6 +312,9 @@ struct pci_dev {
powered on/off by the
corresponding bridge */
unsigned int ignore_hotplug:1; /* Ignore hotplug events */
+ unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
+ controlled exclusively by
+ user sysfs */
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
@@ -367,6 +374,12 @@ struct pci_dev {
int rom_attr_enabled; /* has display of the rom attribute been enabled? */
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
+
+#ifdef CONFIG_PCIE_PTM
+ unsigned int ptm_root:1;
+ unsigned int ptm_enabled:1;
+ u8 ptm_granularity;
+#endif
#ifdef CONFIG_PCI_MSI
const struct attribute_group **msi_irq_groups;
#endif
@@ -1368,9 +1381,11 @@ static inline bool pcie_aspm_support_enabled(void) { return false; }
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
bool pci_aer_available(void);
+int pci_aer_init(struct pci_dev *dev);
#else
static inline void pci_no_aer(void) { }
static inline bool pci_aer_available(void) { return false; }
+static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -1402,6 +1417,13 @@ static inline void pci_disable_ats(struct pci_dev *d) { }
static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
#endif
+#ifdef CONFIG_PCIE_PTM
+int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
+#else
+static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
+{ return -EINVAL; }
+#endif
+
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5c5362584aba..060d0ede88df 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -690,6 +690,10 @@ struct perf_event {
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
+#ifdef CONFIG_BPF_SYSCALL
+ perf_overflow_handler_t orig_overflow_handler;
+ struct bpf_prog *prog;
+#endif
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call *tp_event;
@@ -802,6 +806,11 @@ struct perf_output_handle {
int page;
};
+struct bpf_perf_event_data_kern {
+ struct pt_regs *regs;
+ struct perf_sample_data *data;
+};
+
#ifdef CONFIG_CGROUP_PERF
/*
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2d24b283aa2d..e25f1830fbcf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -80,6 +80,7 @@ typedef enum {
PHY_INTERFACE_MODE_XGMII,
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_TRGMII,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -123,6 +124,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "moca";
case PHY_INTERFACE_MODE_QSGMII:
return "qsgmii";
+ case PHY_INTERFACE_MODE_TRGMII:
+ return "trgmii";
default:
return "unknown";
}
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index f08b67238b58..ee1bed7dbfc6 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -36,6 +36,7 @@ enum phy_mode {
* @power_on: powering on the phy
* @power_off: powering off the phy
* @set_mode: set the mode of the phy
+ * @reset: resetting the phy
* @owner: the module owner containing the ops
*/
struct phy_ops {
@@ -44,6 +45,7 @@ struct phy_ops {
int (*power_on)(struct phy *phy);
int (*power_off)(struct phy *phy);
int (*set_mode)(struct phy *phy, enum phy_mode mode);
+ int (*reset)(struct phy *phy);
struct module *owner;
};
@@ -136,6 +138,7 @@ int phy_exit(struct phy *phy);
int phy_power_on(struct phy *phy);
int phy_power_off(struct phy *phy);
int phy_set_mode(struct phy *phy, enum phy_mode mode);
+int phy_reset(struct phy *phy);
static inline int phy_get_bus_width(struct phy *phy)
{
return phy->attrs.bus_width;
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 918b117a7cd3..34cce96741bc 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -40,6 +40,7 @@ struct pid_namespace {
struct fs_pin *bacct;
#endif
struct user_namespace *user_ns;
+ struct ucounts *ucounts;
struct work_struct proc_work;
kgid_t pid_gid;
int hide_pid;
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 24f5470d3944..e7497c9dde7f 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -66,15 +66,10 @@ struct pipe_inode_info {
*
* ->confirm()
* ->steal()
- * ...
- * ->map()
- * ...
- * ->unmap()
*
- * That is, ->map() must be called on a confirmed buffer,
- * same goes for ->steal(). See below for the meaning of each
- * operation. Also see kerneldoc in fs/pipe.c for the pipe
- * and generic variants of these hooks.
+ * That is, ->steal() must be called on a confirmed buffer.
+ * See below for the meaning of each operation. Also see kerneldoc
+ * in fs/pipe.c for the pipe and generic variants of these hooks.
*/
struct pipe_buf_operations {
/*
@@ -115,6 +110,53 @@ struct pipe_buf_operations {
void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
};
+/**
+ * pipe_buf_get - get a reference to a pipe_buffer
+ * @pipe: the pipe that the buffer belongs to
+ * @buf: the buffer to get a reference to
+ */
+static inline void pipe_buf_get(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ buf->ops->get(pipe, buf);
+}
+
+/**
+ * pipe_buf_release - put a reference to a pipe_buffer
+ * @pipe: the pipe that the buffer belongs to
+ * @buf: the buffer to put a reference to
+ */
+static inline void pipe_buf_release(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ const struct pipe_buf_operations *ops = buf->ops;
+
+ buf->ops = NULL;
+ ops->release(pipe, buf);
+}
+
+/**
+ * pipe_buf_confirm - verify contents of the pipe buffer
+ * @pipe: the pipe that the buffer belongs to
+ * @buf: the buffer to confirm
+ */
+static inline int pipe_buf_confirm(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ return buf->ops->confirm(pipe, buf);
+}
+
+/**
+ * pipe_buf_steal - attempt to take ownership of a pipe_buffer
+ * @pipe: the pipe that the buffer belongs to
+ * @buf: the buffer to attempt to steal
+ */
+static inline int pipe_buf_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ return buf->ops->steal(pipe, buf);
+}
+
/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
#define PIPE_SIZE PAGE_SIZE
@@ -129,7 +171,6 @@ extern unsigned long pipe_user_pages_hard;
extern unsigned long pipe_user_pages_soft;
int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe);
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index d15d8ba8cc24..5f0e11e7354c 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -23,6 +23,7 @@
* @dst_id: dst request line
* @m_master: memory master for transfers on allocated channel
* @p_master: peripheral master for transfers on allocated channel
+ * @hs_polarity:set active low polarity of handshake interface
*/
struct dw_dma_slave {
struct device *dma_dev;
@@ -30,6 +31,7 @@ struct dw_dma_slave {
u8 dst_id;
u8 m_master;
u8 p_master;
+ bool hs_polarity;
};
/**
@@ -38,6 +40,7 @@ struct dw_dma_slave {
* @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator.
* @is_memcpy: The device channels do support memory-to-memory transfers.
+ * @is_nollp: The device channels does not support multi block transfers.
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
@@ -49,6 +52,7 @@ struct dw_dma_platform_data {
unsigned int nr_channels;
bool is_private;
bool is_memcpy;
+ bool is_nollp;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
unsigned char chan_allocation_order;
diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h
index 0c72886030ef..422d4504dbac 100644
--- a/include/linux/platform_data/dma-mmp_tdma.h
+++ b/include/linux/platform_data/dma-mmp_tdma.h
@@ -28,7 +28,7 @@ struct sram_platdata {
int granularity;
};
-#ifdef CONFIG_ARM
+#ifdef CONFIG_MMP_SRAM
extern struct gen_pool *sram_get_gpool(char *pool_name);
#else
static inline struct gen_pool *sram_get_gpool(char *pool_name)
diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h
index 89ba1b0c90e4..4f9aba405e96 100644
--- a/include/linux/platform_data/dma-s3c24xx.h
+++ b/include/linux/platform_data/dma-s3c24xx.h
@@ -30,16 +30,22 @@ struct s3c24xx_dma_channel {
u16 chansel;
};
+struct dma_slave_map;
+
/**
* struct s3c24xx_dma_platdata - platform specific settings
* @num_phy_channels: number of physical channels
* @channels: array of virtual channel descriptions
* @num_channels: number of virtual channels
+ * @slave_map: dma slave map matching table
+ * @slavecnt: number of elements in slave_map
*/
struct s3c24xx_dma_platdata {
int num_phy_channels;
struct s3c24xx_dma_channel *channels;
int num_channels;
+ const struct dma_slave_map *slave_map;
+ int slavecnt;
};
struct dma_chan;
diff --git a/include/linux/mfd/htc-egpio.h b/include/linux/platform_data/gpio-htc-egpio.h
index b4201c971367..b4201c971367 100644
--- a/include/linux/mfd/htc-egpio.h
+++ b/include/linux/platform_data/gpio-htc-egpio.h
diff --git a/include/linux/platform_data/gpio-lpc32xx.h b/include/linux/platform_data/gpio-lpc32xx.h
deleted file mode 100644
index a544e962a818..000000000000
--- a/include/linux/platform_data/gpio-lpc32xx.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_GPIO_LPC32XX_H
-#define __MACH_GPIO_LPC32XX_H
-
-/*
- * Note!
- * Muxed GP pins need to be setup to the GP state in the board level
- * code prior to using this driver.
- * GPI pins : 28xP3 group
- * GPO pins : 24xP3 group
- * GPIO pins: 8xP0 group, 24xP1 group, 13xP2 group, 6xP3 group
- */
-
-#define LPC32XX_GPIO_P0_MAX 8
-#define LPC32XX_GPIO_P1_MAX 24
-#define LPC32XX_GPIO_P2_MAX 13
-#define LPC32XX_GPIO_P3_MAX 6
-#define LPC32XX_GPI_P3_MAX 29
-#define LPC32XX_GPO_P3_MAX 24
-
-#define LPC32XX_GPIO_P0_GRP 0
-#define LPC32XX_GPIO_P1_GRP (LPC32XX_GPIO_P0_GRP + LPC32XX_GPIO_P0_MAX)
-#define LPC32XX_GPIO_P2_GRP (LPC32XX_GPIO_P1_GRP + LPC32XX_GPIO_P1_MAX)
-#define LPC32XX_GPIO_P3_GRP (LPC32XX_GPIO_P2_GRP + LPC32XX_GPIO_P2_MAX)
-#define LPC32XX_GPI_P3_GRP (LPC32XX_GPIO_P3_GRP + LPC32XX_GPIO_P3_MAX)
-#define LPC32XX_GPO_P3_GRP (LPC32XX_GPI_P3_GRP + LPC32XX_GPI_P3_MAX)
-
-/*
- * A specific GPIO can be selected with this macro
- * ie, GPIO_05 can be selected with LPC32XX_GPIO(LPC32XX_GPIO_P3_GRP, 5)
- * See the LPC32x0 User's guide for GPIO group numbers
- */
-#define LPC32XX_GPIO(x, y) ((x) + (y))
-
-#endif /* __MACH_GPIO_LPC32XX_H */
diff --git a/include/linux/platform_data/remoteproc-omap.h b/include/linux/platform_data/remoteproc-omap.h
index bfbd12b41162..71a1b2399c48 100644
--- a/include/linux/platform_data/remoteproc-omap.h
+++ b/include/linux/platform_data/remoteproc-omap.h
@@ -39,9 +39,9 @@ struct omap_rproc_pdata {
const char *firmware;
const char *mbox_name;
const struct rproc_ops *ops;
- int (*device_enable) (struct platform_device *pdev);
- int (*device_shutdown) (struct platform_device *pdev);
- void(*set_bootaddr)(u32);
+ int (*device_enable)(struct platform_device *pdev);
+ int (*device_shutdown)(struct platform_device *pdev);
+ void (*set_bootaddr)(u32);
};
#if defined(CONFIG_OMAP_REMOTEPROC) || defined(CONFIG_OMAP_REMOTEPROC_MODULE)
diff --git a/include/linux/power/bq24735-charger.h b/include/linux/power/bq24735-charger.h
index 6b750c1a45fa..b04be59f914c 100644
--- a/include/linux/power/bq24735-charger.h
+++ b/include/linux/power/bq24735-charger.h
@@ -28,10 +28,6 @@ struct bq24735_platform {
const char *name;
- int status_gpio;
- int status_gpio_active_low;
- bool status_gpio_valid;
-
bool ext_control;
char **supplied_to;
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index b50c0492629d..e30deb046156 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -58,6 +58,7 @@ struct bq27xxx_device_info {
unsigned long last_update;
struct delayed_work work;
struct power_supply *bat;
+ struct list_head list;
struct mutex lock;
u8 *regs;
};
diff --git a/include/linux/power/sbs-battery.h b/include/linux/power/sbs-battery.h
index 2b0a9d9ff57e..519b8b43239a 100644
--- a/include/linux/power/sbs-battery.h
+++ b/include/linux/power/sbs-battery.h
@@ -26,17 +26,13 @@
/**
* struct sbs_platform_data - platform data for sbs devices
- * @battery_detect: GPIO which is used to detect battery presence
- * @battery_detect_present: gpio state when battery is present (0 / 1)
* @i2c_retry_count: # of times to retry on i2c IO failure
* @poll_retry_count: # of times to retry looking for new status after
* external change notification
*/
struct sbs_platform_data {
- int battery_detect;
- int battery_detect_present;
- int i2c_retry_count;
- int poll_retry_count;
+ u32 i2c_retry_count;
+ u32 poll_retry_count;
};
#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 696a56be7d3e..eac1af8502bb 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -16,6 +16,7 @@ static inline int printk_get_level(const char *buffer)
switch (buffer[1]) {
case '0' ... '7':
case 'd': /* KERN_DEFAULT */
+ case 'c': /* KERN_CONT */
return buffer[1];
}
}
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index de0e7719d4c5..12cb8bd81d2d 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -18,6 +18,8 @@ struct proc_ns_operations {
struct ns_common *(*get)(struct task_struct *task);
void (*put)(struct ns_common *ns);
int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
+ struct user_namespace *(*owner)(struct ns_common *ns);
+ struct ns_common *(*get_parent)(struct ns_common *ns);
};
extern const struct proc_ns_operations netns_operations;
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 899e95e84400..92013cc9cc8c 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -22,12 +22,13 @@
#ifndef _LINUX_PSTORE_H
#define _LINUX_PSTORE_H
-#include <linux/time.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
#include <linux/kmsg_dump.h>
#include <linux/mutex.h>
-#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/types.h>
/* types */
enum pstore_type_id {
@@ -68,13 +69,21 @@ struct pstore_info {
enum kmsg_dump_reason reason, u64 *id,
unsigned int part, const char *buf, bool compressed,
size_t size, struct pstore_info *psi);
+ int (*write_buf_user)(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
+ unsigned int part, const char __user *buf,
+ bool compressed, size_t size, struct pstore_info *psi);
int (*erase)(enum pstore_type_id type, u64 id,
int count, struct timespec time,
struct pstore_info *psi);
void *data;
};
-#define PSTORE_FLAGS_FRAGILE 1
+#define PSTORE_FLAGS_DMESG (1 << 0)
+#define PSTORE_FLAGS_FRAGILE PSTORE_FLAGS_DMESG
+#define PSTORE_FLAGS_CONSOLE (1 << 1)
+#define PSTORE_FLAGS_FTRACE (1 << 2)
+#define PSTORE_FLAGS_PMSG (1 << 3)
extern int pstore_register(struct pstore_info *);
extern void pstore_unregister(struct pstore_info *);
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 4660aaa3195e..c668c861c96c 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -17,11 +17,12 @@
#ifndef __LINUX_PSTORE_RAM_H__
#define __LINUX_PSTORE_RAM_H__
+#include <linux/compiler.h>
#include <linux/device.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/types.h>
-#include <linux/init.h>
struct persistent_ram_buffer;
struct rs_control;
@@ -59,7 +60,9 @@ void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
- unsigned int count);
+ unsigned int count);
+int persistent_ram_write_user(struct persistent_ram_zone *prz,
+ const void __user *s, unsigned int count);
void persistent_ram_save_old(struct persistent_ram_zone *prz);
size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 6b15e168148a..5ad54fc66cf0 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -127,6 +127,11 @@ struct ptp_clock;
*
* @info: Structure describing the new clock.
* @parent: Pointer to the parent device of the new clock.
+ *
+ * Returns a valid pointer on success or PTR_ERR on failure. If PHC
+ * support is missing at the configuration level, this function
+ * returns NULL, and drivers are expected to gracefully handle that
+ * case separately.
*/
extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 2a097d176ba9..2d6f0c39ed68 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -83,7 +83,6 @@
#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */
#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */
-#ifdef CONFIG_ARCH_PXA
#define RX_THRESH_DFLT 8
#define TX_THRESH_DFLT 8
@@ -95,19 +94,16 @@
#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
-#else
-
-#define RX_THRESH_DFLT 2
-#define TX_THRESH_DFLT 2
+#define RX_THRESH_CE4100_DFLT 2
+#define TX_THRESH_CE4100_DFLT 2
-#define SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */
-#define SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */
+#define CE4100_SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */
+#define CE4100_SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */
-#define SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */
-#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */
-#define SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */
-#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
-#endif
+#define CE4100_SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */
+#define CE4100_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */
+#define CE4100_SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */
+#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
/* QUARK_X1000 SSCR0 bit definition */
#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 40c0ada01806..734deb094618 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -5,28 +5,77 @@
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
+#ifndef _COMMON_HSI_H
+#define _COMMON_HSI_H
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+/* dma_addr_t manip */
+#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
+#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
+#define DMA_REGPAIR_LE(x, val) do { \
+ (x).hi = DMA_HI_LE((val)); \
+ (x).lo = DMA_LO_LE((val)); \
+ } while (0)
+
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
+#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
#ifndef __COMMON_HSI__
#define __COMMON_HSI__
-#define CORE_SPQE_PAGE_SIZE_BYTES 4096
#define X_FINAL_CLEANUP_AGG_INT 1
+
+#define EVENT_RING_PAGE_SIZE_BYTES 4096
+
#define NUM_OF_GLOBAL_QUEUES 128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
+
+#define ISCSI_CDU_TASK_SEG_TYPE 0
+#define RDMA_CDU_TASK_SEG_TYPE 1
+
+#define FW_ASSERT_GENERAL_ATTN_IDX 32
+
+#define MAX_PINNED_CCFC 32
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
-#define MSTORM_QZONE_SIZE 0
+#define MSTORM_QZONE_SIZE 16
#define USTORM_QZONE_SIZE 8
#define XSTORM_QZONE_SIZE 8
#define YSTORM_QZONE_SIZE 0
#define PSTORM_QZONE_SIZE 0
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+
+/********************************/
+/* CORE (LIGHT L2) FW CONSTANTS */
+/********************************/
+
+#define CORE_LL2_MAX_RAMROD_PER_CON 8
+#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1
+
+#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12
+
+#define CORE_SPQE_PAGE_SIZE_BYTES 4096
+
+#define MAX_NUM_LL2_RX_QUEUES 32
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 10
-#define FW_REVISION_VERSION 5
+#define FW_REVISION_VERSION 10
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -83,6 +132,20 @@
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
+/* Clock values */
+#define MASTER_CLK_FREQ_E4 (375e6)
+#define STORM_CLK_FREQ_E4 (1000e6)
+#define CLK25M_CLK_FREQ_E4 (25e6)
+
+/* Global PXP windows (GTT) */
+#define NUM_OF_GTT 19
+#define GTT_DWORD_SIZE_BITS 10
+#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
+#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
+
+/* Tools Version */
+#define TOOLS_VERSION 10
+
/*****************/
/* CDU CONSTANTS */
/*****************/
@@ -90,6 +153,8 @@
#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
/*****************/
/* DQ CONSTANTS */
/*****************/
@@ -115,6 +180,11 @@
#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -159,13 +229,16 @@
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
/* XCM agg counter flag selection */
-#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
/* UCM agg counter flag selection (HW) */
#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -178,9 +251,45 @@
#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
/* UCM agg counter flag selection (FW) */
-#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
-#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
-
+#define DQ_UCM_ETH_PMD_TX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+
+/* TCM agg counter flag selection (HW) */
+#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
+#define DQ_TCM_AGG_FLG_SHIFT_CF1 1
+#define DQ_TCM_AGG_FLG_SHIFT_CF2 2
+#define DQ_TCM_AGG_FLG_SHIFT_CF3 3
+#define DQ_TCM_AGG_FLG_SHIFT_CF4 4
+#define DQ_TCM_AGG_FLG_SHIFT_CF5 5
+#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
+#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
+/* TCM agg counter flag selection (FW) */
+#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+
+/* PWM address mapping */
+#define DQ_PWM_OFFSET_DPM_BASE 0x0
+#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM16_BASE 0x40
+#define DQ_PWM_OFFSET_XCM32_BASE 0x44
+#define DQ_PWM_OFFSET_UCM16_BASE 0x48
+#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
+#define DQ_PWM_OFFSET_UCM16_4 0x50
+#define DQ_PWM_OFFSET_TCM16_BASE 0x58
+#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
+#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
+
+#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4)
+#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
+#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
+#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
#define DQ_REGION_SHIFT (12)
/* DPM */
@@ -214,15 +323,17 @@
*/
#define CM_TX_PQ_BASE 0x200
+/* number of global Vport/QCN rate limiters */
+#define MAX_QM_GLOBAL_RLS 256
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
-#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1))
#define QM_BYTE_CRD_REG_WIDTH 24
-#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_SIGN_BIT BIT((QM_BYTE_CRD_REG_WIDTH - 1))
#define QM_WFQ_CRD_REG_WIDTH 32
-#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_SIGN_BIT BIT((QM_WFQ_CRD_REG_WIDTH - 1))
#define QM_RL_CRD_REG_WIDTH 32
-#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_SIGN_BIT BIT((QM_RL_CRD_REG_WIDTH - 1))
/*****************/
/* CAU CONSTANTS */
@@ -287,6 +398,17 @@
/* PXP CONSTANTS */
/*****************/
+/* Bars for Blocks */
+#define PXP_BAR_GRC 0
+#define PXP_BAR_TSDM 0
+#define PXP_BAR_USDM 0
+#define PXP_BAR_XSDM 0
+#define PXP_BAR_MSDM 0
+#define PXP_BAR_YSDM 0
+#define PXP_BAR_PSDM 0
+#define PXP_BAR_IGU 0
+#define PXP_BAR_DQ 1
+
/* PTT and GTT */
#define PXP_NUM_PF_WINDOWS 12
#define PXP_PER_PF_ENTRY_SIZE 8
@@ -334,6 +456,52 @@
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+/* PF BAR */
+#define PXP_BAR0_START_GRC 0x0000
+#define PXP_BAR0_GRC_LENGTH 0x1C00000
+#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
+ PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU 0x1C00000
+#define PXP_BAR0_IGU_LENGTH 0x10000
+#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
+ PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM 0x1C80000
+#define PXP_BAR0_SDM_LENGTH 0x40000
+#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
+#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_MSDM 0x1D00000
+#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_USDM 0x1D80000
+#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_XSDM 0x1E00000
+#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_YSDM 0x1E80000
+#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_PSDM 0x1F00000
+#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1)
+
+/* VF BAR */
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_GRC 0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
+ PXP_VF_BAR0_GRC_LENGTH - 1)
#define PXP_VF_BAR0_START_IGU 0
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
@@ -399,6 +567,20 @@
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+#define PXP_QUEUES_ZONE_MAX_NUM 320
+/*****************/
+/* PRM CONSTANTS */
+/*****************/
+#define PRM_DMA_PAD_BYTES_NUM 2
+/******************/
+/* SDMs CONSTANTS */
+/******************/
+#define SDM_OP_GEN_TRIG_NONE 0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
+#define SDM_OP_GEN_TRIG_AGG_INT 2
+#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
+#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
#define SDM_COMP_TYPE_NONE 0
#define SDM_COMP_TYPE_WAKE_THREAD 1
@@ -424,6 +606,8 @@
/* PRS CONSTANTS */
/*****************/
+#define PRS_GFT_CAM_LINES_NO_MATCH 31
+
/* Async data KCQ CQE */
struct async_data {
__le32 cid;
@@ -440,20 +624,6 @@ struct coalescing_timeset {
#define COALESCING_TIMESET_VALID_SHIFT 7
};
-struct common_prs_pf_msg_info {
- __le32 value;
-#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3
-#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF
-#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4
-};
-
struct common_queue_zone {
__le16 ring_drv_data_consumer;
__le16 reserved;
@@ -473,6 +643,19 @@ struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
+struct iscsi_eqe_data {
+ __le32 cid;
+ __le16 conn_id;
+ u8 error_code;
+ u8 error_pdu_opcode_reserved;
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
+#define ISCSI_EQE_DATA_RESERVED0_MASK 0x1
+#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
+};
+
struct malicious_vf_eqe_data {
u8 vf_id;
u8 err_id;
@@ -488,8 +671,10 @@ struct initial_cleanup_eqe_data {
union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
+ struct iscsi_eqe_data iscsi_info;
struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
+ struct regpair roce_handle;
};
/* Event Ring Entry */
@@ -616,6 +801,52 @@ enum db_dest {
MAX_DB_DEST
};
+/* Enum of doorbell DPM types */
+enum db_dpm_type {
+ DPM_LEGACY,
+ DPM_ROCE,
+ DPM_L2_INLINE,
+ DPM_L2_BD,
+ MAX_DB_DPM_TYPE
+};
+
+/* Structure for doorbell data, in L2 DPM mode, for 1st db in a DPM burst */
+struct db_l2_dpm_data {
+ __le16 icid;
+ __le16 bd_prod;
+ __le32 params;
+#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
+#define DB_L2_DPM_DATA_SIZE_SHIFT 0
+#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
+#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
+#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF
+#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
+#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
+#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
+#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
+#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
+#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
+#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+};
+
+/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
+struct db_l2_dpm_sge {
+ struct regpair addr;
+ __le16 nbytes;
+ __le16 bitfields;
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
+#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
+#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
+#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
+ __le32 reserved2;
+};
+
/* Structure for doorbell address, in legacy mode */
struct db_legacy_addr {
__le32 addr;
@@ -627,6 +858,49 @@ struct db_legacy_addr {
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
+/* Structure for doorbell address, in PWM mode */
+struct db_pwm_addr {
+ __le32 addr;
+#define DB_PWM_ADDR_RESERVED0_MASK 0x7
+#define DB_PWM_ADDR_RESERVED0_SHIFT 0
+#define DB_PWM_ADDR_OFFSET_MASK 0x7F
+#define DB_PWM_ADDR_OFFSET_SHIFT 3
+#define DB_PWM_ADDR_WID_MASK 0x3
+#define DB_PWM_ADDR_WID_SHIFT 10
+#define DB_PWM_ADDR_DPI_MASK 0xFFFF
+#define DB_PWM_ADDR_DPI_SHIFT 12
+#define DB_PWM_ADDR_RESERVED1_MASK 0xF
+#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+};
+
+/* Parameters to RoCE firmware, passed in EDPM doorbell */
+struct db_roce_dpm_params {
+ __le32 params;
+#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30
+};
+
+/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
+struct db_roce_dpm_data {
+ __le16 icid;
+ __le16 prod_val;
+ struct db_roce_dpm_params params;
+};
+
/* Igu interrupt command */
enum igu_int_cmd {
IGU_INT_ENABLE = 0,
@@ -764,6 +1038,19 @@ struct pxp_ptt_entry {
struct pxp_pretend_cmd pretend;
};
+/* VF Zone A Permission Register. */
+struct pxp_vf_zone_a_permission {
+ __le32 control;
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+};
+
/* RSS hash type */
struct rdif_task_context {
__le32 initial_ref_tag;
@@ -831,6 +1118,7 @@ struct rdif_task_context {
__le32 reserved2;
};
+/* RSS hash type */
enum rss_hash_type {
RSS_HASH_TYPE_DEFAULT = 0,
RSS_HASH_TYPE_IPV4 = 1,
@@ -942,7 +1230,7 @@ struct tdif_task_context {
};
struct timers_context {
- __le32 logical_client0;
+ __le32 logical_client_0;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
@@ -951,7 +1239,7 @@ struct timers_context {
#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
- __le32 logical_client1;
+ __le32 logical_client_1;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
@@ -960,7 +1248,7 @@ struct timers_context {
#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
- __le32 logical_client2;
+ __le32 logical_client_2;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
@@ -978,3 +1266,4 @@ struct timers_context {
#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
};
#endif /* __COMMON_HSI__ */
+#endif
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index b5ebc697d05f..1aa0727c4136 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -13,9 +13,12 @@
/* ETH FW CONSTANTS */
/********************/
#define ETH_HSI_VER_MAJOR 3
-#define ETH_HSI_VER_MINOR 0
-#define ETH_CACHE_LINE_SIZE 64
+#define ETH_HSI_VER_MINOR 10
+
+#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
+#define ETH_CACHE_LINE_SIZE 64
+#define ETH_RX_CQE_GAP 32
#define ETH_MAX_RAMROD_PER_CON 8
#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
@@ -24,15 +27,25 @@
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
#define ETH_TX_MAX_LSO_HDR_NBD 4
#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
-#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
#define ETH_TX_MAX_LSO_HDR_BYTES 510
+#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
+#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
+#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
+#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
@@ -59,6 +72,8 @@
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
+/* Control frame check constants */
+#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
struct eth_tx_1st_bd_flags {
u8 bitfields;
@@ -82,10 +97,10 @@ struct eth_tx_1st_bd_flags {
/* The parsing information data fo rthe first tx bd of a given packet. */
struct eth_tx_data_1st_bd {
- __le16 vlan;
- u8 nbds;
- struct eth_tx_1st_bd_flags bd_flags;
- __le16 bitfields;
+ __le16 vlan;
+ u8 nbds;
+ struct eth_tx_1st_bd_flags bd_flags;
+ __le16 bitfields;
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
@@ -96,7 +111,7 @@ struct eth_tx_data_1st_bd {
/* The parsing information data for the second tx bd of a given packet. */
struct eth_tx_data_2nd_bd {
- __le16 tunn_ip_size;
+ __le16 tunn_ip_size;
__le16 bitfields1;
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
@@ -125,9 +140,14 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
+/* Firmware data for L2-EDPM packet. */
+struct eth_edpm_fw_data {
+ struct eth_tx_data_1st_bd data_1st_bd;
+ struct eth_tx_data_2nd_bd data_2nd_bd;
+ __le32 reserved;
+};
+
struct eth_fast_path_cqe_fw_debug {
- u8 reserved0;
- u8 reserved1;
__le16 reserved2;
};
@@ -148,6 +168,17 @@ struct eth_tunnel_parsing_flags {
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
};
+/* PMD flow control bits */
+struct eth_pmd_flow_flags {
+ u8 flags;
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+};
+
/* Regular ETH Rx FP CQE. */
struct eth_fast_path_rx_reg_cqe {
u8 type;
@@ -166,64 +197,63 @@ struct eth_fast_path_rx_reg_cqe {
u8 placement_offset;
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num;
- u8 reserved[7];
+ u8 reserved[9];
struct eth_fast_path_cqe_fw_debug fw_debug;
u8 reserved1[3];
- u8 flags;
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
+ struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-continue ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_cont_cqe {
- u8 type;
- u8 tpa_agg_index;
- __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
- u8 reserved[5];
- u8 reserved1;
- __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 type;
+ u8 tpa_agg_index;
+ __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 reserved;
+ u8 reserved1;
+ __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 reserved3[3];
+ struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-end ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_end_cqe {
- u8 type;
- u8 tpa_agg_index;
- __le16 total_packet_len;
- u8 num_of_bds;
- u8 end_reason;
- __le16 num_of_coalesced_segs;
- __le32 ts_delta;
- __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
- u8 reserved1[3];
- u8 reserved2;
- __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ u8 type;
+ u8 tpa_agg_index;
+ __le16 total_packet_len;
+ u8 num_of_bds;
+ u8 end_reason;
+ __le16 num_of_coalesced_segs;
+ __le32 ts_delta;
+ __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ __le16 reserved1;
+ u8 reserved2;
+ struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-start ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_start_cqe {
- u8 type;
- u8 bitfields;
+ u8 type;
+ u8 bitfields;
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
- __le16 seg_len;
+ __le16 seg_len;
struct parsing_and_err_flags pars_flags;
- __le16 vlan_tag;
- __le32 rss_hash;
- __le16 len_on_first_bd;
- u8 placement_offset;
+ __le16 vlan_tag;
+ __le32 rss_hash;
+ __le16 len_on_first_bd;
+ u8 placement_offset;
struct eth_tunnel_parsing_flags tunnel_pars_flags;
- u8 tpa_agg_index;
- u8 header_len;
- __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
+ u8 tpa_agg_index;
+ u8 header_len;
+ __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
struct eth_fast_path_cqe_fw_debug fw_debug;
+ u8 reserved;
+ struct eth_pmd_flow_flags pmd_flags;
};
/* The L4 pseudo checksum mode for Ethernet */
@@ -245,15 +275,7 @@ struct eth_slow_path_rx_cqe {
u8 reserved[25];
__le16 echo;
u8 reserved1;
- u8 flags;
-/* for PMD mode - valid indication */
-#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
-#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
-/* for PMD mode - valid toggle indication */
-#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
-#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
-#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
-#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
+ struct eth_pmd_flow_flags pmd_flags;
};
/* union for all ETH Rx CQE types */
@@ -276,6 +298,11 @@ enum eth_rx_cqe_type {
MAX_ETH_RX_CQE_TYPE
};
+struct eth_rx_pmd_cqe {
+ union eth_rx_cqe cqe;
+ u8 reserved[ETH_RX_CQE_GAP];
+};
+
enum eth_rx_tunn_type {
ETH_RX_NO_TUNN,
ETH_RX_TUNN_GENEVE,
@@ -313,8 +340,8 @@ struct eth_tx_2nd_bd {
/* The parsing information data for the third tx bd of a given packet. */
struct eth_tx_data_3rd_bd {
- __le16 lso_mss;
- __le16 bitfields;
+ __le16 lso_mss;
+ __le16 bitfields;
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
@@ -323,8 +350,8 @@ struct eth_tx_data_3rd_bd {
#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
- u8 tunn_l4_hdr_start_offset_w;
- u8 tunn_hdr_size_w;
+ u8 tunn_l4_hdr_start_offset_w;
+ u8 tunn_hdr_size_w;
};
/* The third tx bd of a given packet */
@@ -355,10 +382,10 @@ struct eth_tx_bd {
};
union eth_tx_bd_types {
- struct eth_tx_1st_bd first_bd;
- struct eth_tx_2nd_bd second_bd;
- struct eth_tx_3rd_bd third_bd;
- struct eth_tx_bd reg_bd;
+ struct eth_tx_1st_bd first_bd;
+ struct eth_tx_2nd_bd second_bd;
+ struct eth_tx_3rd_bd third_bd;
+ struct eth_tx_bd reg_bd;
};
/* Mstorm Queue Zone */
@@ -389,8 +416,8 @@ struct eth_db_data {
#define ETH_DB_DATA_RESERVED_SHIFT 5
#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
- u8 agg_flags;
- __le16 bd_prod;
+ u8 agg_flags;
+ __le16 bd_prod;
};
#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index b3c0feb15ae9..8f64b1223c2f 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -311,7 +311,7 @@ struct iscsi_login_req_hdr {
#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
- __le32 isid_TABC;
+ __le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
__le32 itt;
@@ -464,7 +464,7 @@ struct iscsi_login_response_hdr {
#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
- __le32 isid_TABC;
+ __le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
__le32 itt;
@@ -688,8 +688,7 @@ union iscsi_cqe {
enum iscsi_cqes_type {
ISCSI_CQE_TYPE_SOLICITED = 1,
ISCSI_CQE_TYPE_UNSOLICITED,
- ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE
- ,
+ ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE,
ISCSI_CQE_TYPE_TASK_CLEANUP,
ISCSI_CQE_TYPE_DUMMY,
MAX_ISCSI_CQES_TYPE
@@ -769,9 +768,9 @@ enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_UPDATE_CONN,
ISCSI_EVENT_TYPE_CLEAR_SQ,
ISCSI_EVENT_TYPE_TERMINATE_CONN,
+ ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
- RESERVED8,
RESERVED9,
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
@@ -867,6 +866,7 @@ enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
+ ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
MAX_ISCSI_RAMROD_CMD_ID
};
@@ -883,6 +883,16 @@ union iscsi_seq_num {
__le16 r2t_sn;
};
+struct iscsi_spe_conn_mac_update {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ __le16 remote_mac_addr_lo;
+ __le16 remote_mac_addr_mid;
+ __le16 remote_mac_addr_hi;
+ u8 reserved0[2];
+};
+
struct iscsi_spe_conn_offload {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
@@ -1302,14 +1312,6 @@ struct mstorm_iscsi_stats_drv {
struct regpair iscsi_rx_dropped_pdus_task_not_valid;
};
-struct ooo_opaque {
- __le32 cid;
- u8 drop_isle;
- u8 drop_size;
- u8 ooo_opcode;
- u8 ooo_isle;
-};
-
struct pstorm_iscsi_stats_drv {
struct regpair iscsi_tx_bytes_cnt;
struct regpair iscsi_tx_packet_cnt;
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 7e441bdeabdc..72d88cf3ca25 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -16,19 +16,6 @@
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
-/* dma_addr_t manip */
-#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
-#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
-#define DMA_REGPAIR_LE(x, val) do { \
- (x).hi = DMA_HI_LE((val)); \
- (x).lo = DMA_LO_LE((val)); \
- } while (0)
-
-#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
-#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
-#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
-
enum qed_chain_mode {
/* Each Page contains a next pointer at its end */
QED_CHAIN_MODE_NEXT_PTR,
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 4475a9d8ae15..33c24ebc9b7f 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -23,6 +23,9 @@ struct qed_dev_eth_info {
u8 port_mac[ETH_ALEN];
u8 num_vlan_filters;
+
+ /* Legacy VF - this affects the datapath, so qede has to know */
+ bool is_legacy;
};
struct qed_update_vport_rss_params {
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index d6c4177df7cb..f9ae903bbb84 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -34,6 +34,8 @@ enum dcbx_protocol_type {
DCBX_MAX_PROTOCOL_TYPE
};
+#define QED_ROCE_PROTOCOL_INDEX (3)
+
#ifdef CONFIG_DCB
#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
#define QED_LLDP_PORT_ID_STAT_LEN 4
@@ -260,15 +262,15 @@ struct qed_dev_info {
/* MFW version */
u32 mfw_rev;
- bool rdma_supported;
-
u32 flash_size;
u8 mf_mode;
bool tx_switching;
+ bool rdma_supported;
};
enum qed_sb_type {
QED_SB_TYPE_L2_QUEUE,
+ QED_SB_TYPE_CNQ,
};
enum qed_protocol {
@@ -276,6 +278,21 @@ enum qed_protocol {
QED_PROTOCOL_ISCSI,
};
+enum qed_link_mode_bits {
+ QED_LM_FIBRE_BIT = BIT(0),
+ QED_LM_Autoneg_BIT = BIT(1),
+ QED_LM_Asym_Pause_BIT = BIT(2),
+ QED_LM_Pause_BIT = BIT(3),
+ QED_LM_1000baseT_Half_BIT = BIT(4),
+ QED_LM_1000baseT_Full_BIT = BIT(5),
+ QED_LM_10000baseKR_Full_BIT = BIT(6),
+ QED_LM_25000baseKR_Full_BIT = BIT(7),
+ QED_LM_40000baseLR4_Full_BIT = BIT(8),
+ QED_LM_50000baseKR2_Full_BIT = BIT(9),
+ QED_LM_100000baseKR4_Full_BIT = BIT(10),
+ QED_LM_COUNT = 11
+};
+
struct qed_link_params {
bool link_up;
@@ -303,9 +320,11 @@ struct qed_link_params {
struct qed_link_output {
bool link_up;
- u32 supported_caps; /* In SUPPORTED defs */
- u32 advertised_caps; /* In ADVERTISED defs */
- u32 lp_caps; /* In ADVERTISED defs */
+ /* In QED_LM_* defs */
+ u32 supported_caps;
+ u32 advertised_caps;
+ u32 lp_caps;
+
u32 speed; /* In Mb/s */
u8 duplex; /* In DUPLEX defs */
u8 port; /* In PORT defs */
@@ -438,6 +457,10 @@ struct qed_common_ops {
void (*simd_handler_clean)(struct qed_dev *cdev,
int index);
+ int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
+
+ int (*dbg_all_data_size) (struct qed_dev *cdev);
+
/**
* @brief can_link_change - can the instance change the link or not
*
@@ -606,8 +629,9 @@ enum DP_MODULE {
QED_MSG_SP = 0x100000,
QED_MSG_STORAGE = 0x200000,
QED_MSG_CXT = 0x800000,
+ QED_MSG_LL2 = 0x1000000,
QED_MSG_ILT = 0x2000000,
- QED_MSG_ROCE = 0x4000000,
+ QED_MSG_RDMA = 0x4000000,
QED_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
new file mode 100644
index 000000000000..fd75c265dba3
--- /dev/null
+++ b/include/linux/qed/qed_ll2_if.h
@@ -0,0 +1,139 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_LL2_IF_H
+#define _QED_LL2_IF_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+
+struct qed_ll2_stats {
+ u64 gsi_invalid_hdr;
+ u64 gsi_invalid_pkt_length;
+ u64 gsi_unsupported_pkt_typ;
+ u64 gsi_crcchksm_error;
+
+ u64 packet_too_big_discard;
+ u64 no_buff_discard;
+
+ u64 rcv_ucast_bytes;
+ u64 rcv_mcast_bytes;
+ u64 rcv_bcast_bytes;
+ u64 rcv_ucast_pkts;
+ u64 rcv_mcast_pkts;
+ u64 rcv_bcast_pkts;
+
+ u64 sent_ucast_bytes;
+ u64 sent_mcast_bytes;
+ u64 sent_bcast_bytes;
+ u64 sent_ucast_pkts;
+ u64 sent_mcast_pkts;
+ u64 sent_bcast_pkts;
+};
+
+#define QED_LL2_UNUSED_HANDLE (0xff)
+
+struct qed_ll2_cb_ops {
+ int (*rx_cb)(void *, struct sk_buff *, u32, u32);
+ int (*tx_cb)(void *, struct sk_buff *, bool);
+};
+
+struct qed_ll2_params {
+ u16 mtu;
+ bool drop_ttl0_packets;
+ bool rx_vlan_stripping;
+ u8 tx_tc;
+ bool frags_mapped;
+ u8 ll2_mac_address[ETH_ALEN];
+};
+
+struct qed_ll2_ops {
+/**
+ * @brief start - initializes ll2
+ *
+ * @param cdev
+ * @param params - protocol driver configuration for the ll2.
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
+
+/**
+ * @brief stop - stops the ll2
+ *
+ * @param cdev
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*stop)(struct qed_dev *cdev);
+
+/**
+ * @brief start_xmit - transmits an skb over the ll2 interface
+ *
+ * @param cdev
+ * @param skb
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb);
+
+/**
+ * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+ * packets. Should be called before `start'.
+ *
+ * @param cdev
+ * @param cookie - to be passed to the callback functions.
+ * @param ops - the callback functions to register for Rx / Tx.
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ void (*register_cb_ops)(struct qed_dev *cdev,
+ const struct qed_ll2_cb_ops *ops,
+ void *cookie);
+
+/**
+ * @brief get LL2 related statistics
+ *
+ * @param cdev
+ * @param stats - pointer to struct that would be filled with stats
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
+};
+
+#ifdef CONFIG_QED_LL2
+int qed_ll2_alloc_if(struct qed_dev *);
+void qed_ll2_dealloc_if(struct qed_dev *);
+#else
+static const struct qed_ll2_ops qed_ll2_ops_pass = {
+ .start = NULL,
+ .stop = NULL,
+ .start_xmit = NULL,
+ .register_cb_ops = NULL,
+ .get_stats = NULL,
+};
+
+static inline int qed_ll2_alloc_if(struct qed_dev *cdev)
+{
+ return 0;
+}
+
+static inline void qed_ll2_dealloc_if(struct qed_dev *cdev)
+{
+}
+#endif
+#endif
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
new file mode 100644
index 000000000000..53047d3fa678
--- /dev/null
+++ b/include/linux/qed/qed_roce_if.h
@@ -0,0 +1,604 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_ROCE_IF_H
+#define _QED_ROCE_IF_H
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include <linux/qed/rdma_common.h>
+
+enum qed_roce_ll2_tx_dest {
+ /* Light L2 TX Destination to the Network */
+ QED_ROCE_LL2_TX_DEST_NW,
+
+ /* Light L2 TX Destination to the Loopback */
+ QED_ROCE_LL2_TX_DEST_LB,
+ QED_ROCE_LL2_TX_DEST_MAX
+};
+
+#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
+
+/* rdma interface */
+
+enum qed_roce_qp_state {
+ QED_ROCE_QP_STATE_RESET,
+ QED_ROCE_QP_STATE_INIT,
+ QED_ROCE_QP_STATE_RTR,
+ QED_ROCE_QP_STATE_RTS,
+ QED_ROCE_QP_STATE_SQD,
+ QED_ROCE_QP_STATE_ERR,
+ QED_ROCE_QP_STATE_SQE
+};
+
+enum qed_rdma_tid_type {
+ QED_RDMA_TID_REGISTERED_MR,
+ QED_RDMA_TID_FMR,
+ QED_RDMA_TID_MW_TYPE1,
+ QED_RDMA_TID_MW_TYPE2A
+};
+
+struct qed_rdma_events {
+ void *context;
+ void (*affiliated_event)(void *context, u8 fw_event_code,
+ void *fw_handle);
+ void (*unaffiliated_event)(void *context, u8 event_code);
+};
+
+struct qed_rdma_device {
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u64 fw_ver;
+
+ u64 node_guid;
+ u64 sys_image_guid;
+
+ u8 max_cnq;
+ u8 max_sge;
+ u8 max_srq_sge;
+ u16 max_inline;
+ u32 max_wqe;
+ u32 max_srq_wqe;
+ u8 max_qp_resp_rd_atomic_resc;
+ u8 max_qp_req_rd_atomic_resc;
+ u64 max_dev_resp_rd_atomic_resc;
+ u32 max_cq;
+ u32 max_qp;
+ u32 max_srq;
+ u32 max_mr;
+ u64 max_mr_size;
+ u32 max_cqe;
+ u32 max_mw;
+ u32 max_fmr;
+ u32 max_mr_mw_fmr_pbl;
+ u64 max_mr_mw_fmr_size;
+ u32 max_pd;
+ u32 max_ah;
+ u8 max_pkey;
+ u16 max_srq_wr;
+ u8 max_stats_queues;
+ u32 dev_caps;
+
+ /* Abilty to support RNR-NAK generation */
+
+#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1
+#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0
+ /* Abilty to support shutdown port */
+#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
+#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
+ /* Abilty to support port active event */
+#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
+#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
+ /* Abilty to support port change event */
+#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
+#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
+ /* Abilty to support system image GUID */
+#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1
+#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4
+ /* Abilty to support bad P_Key counter support */
+#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
+ /* Abilty to support atomic operations */
+#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1
+#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6
+#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1
+#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7
+ /* Abilty to support modifying the maximum number of
+ * outstanding work requests per QP
+ */
+#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
+#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
+ /* Abilty to support automatic path migration */
+#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
+#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
+ /* Abilty to support the base memory management extensions */
+#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
+#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
+ /* Abilty to support multipile page sizes per memory region */
+#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
+ /* Abilty to support block list physical buffer list */
+#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1
+#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13
+ /* Abilty to support zero based virtual addresses */
+#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1
+#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14
+ /* Abilty to support local invalidate fencing */
+#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
+#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
+ /* Abilty to support Loopback on QP */
+#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1
+#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16
+ u64 page_size_caps;
+ u8 dev_ack_delay;
+ u32 reserved_lkey;
+ u32 bad_pkey_counter;
+ struct qed_rdma_events events;
+};
+
+enum qed_port_state {
+ QED_RDMA_PORT_UP,
+ QED_RDMA_PORT_DOWN,
+};
+
+enum qed_roce_capability {
+ QED_ROCE_V1 = 1 << 0,
+ QED_ROCE_V2 = 1 << 1,
+};
+
+struct qed_rdma_port {
+ enum qed_port_state port_state;
+ int link_speed;
+ u64 max_msg_size;
+ u8 source_gid_table_len;
+ void *source_gid_table_ptr;
+ u8 pkey_table_len;
+ void *pkey_table_ptr;
+ u32 pkey_bad_counter;
+ enum qed_roce_capability capability;
+};
+
+struct qed_rdma_cnq_params {
+ u8 num_pbl_pages;
+ u64 pbl_ptr;
+};
+
+/* The CQ Mode affects the CQ doorbell transaction size.
+ * 64/32 bit machines should configure to 32/16 bits respectively.
+ */
+enum qed_rdma_cq_mode {
+ QED_RDMA_CQ_MODE_16_BITS,
+ QED_RDMA_CQ_MODE_32_BITS,
+};
+
+struct qed_roce_dcqcn_params {
+ u8 notification_point;
+ u8 reaction_point;
+
+ /* fields for notification point */
+ u32 cnp_send_timeout;
+
+ /* fields for reaction point */
+ u32 rl_bc_rate;
+ u16 rl_max_rate;
+ u16 rl_r_ai;
+ u16 rl_r_hai;
+ u16 dcqcn_g;
+ u32 dcqcn_k_us;
+ u32 dcqcn_timeout_us;
+};
+
+struct qed_rdma_start_in_params {
+ struct qed_rdma_events *events;
+ struct qed_rdma_cnq_params cnq_pbl_list[128];
+ u8 desired_cnq;
+ enum qed_rdma_cq_mode cq_mode;
+ struct qed_roce_dcqcn_params dcqcn_params;
+ u16 max_mtu;
+ u8 mac_addr[ETH_ALEN];
+ u8 iwarp_flags;
+};
+
+struct qed_rdma_add_user_out_params {
+ u16 dpi;
+ u64 dpi_addr;
+ u64 dpi_phys_addr;
+ u32 dpi_size;
+};
+
+enum roce_mode {
+ ROCE_V1,
+ ROCE_V2_IPV4,
+ ROCE_V2_IPV6,
+ MAX_ROCE_MODE
+};
+
+union qed_gid {
+ u8 bytes[16];
+ u16 words[8];
+ u32 dwords[4];
+ u64 qwords[2];
+ u32 ipv4_addr;
+};
+
+struct qed_rdma_register_tid_in_params {
+ u32 itid;
+ enum qed_rdma_tid_type tid_type;
+ u8 key;
+ u16 pd;
+ bool local_read;
+ bool local_write;
+ bool remote_read;
+ bool remote_write;
+ bool remote_atomic;
+ bool mw_bind;
+ u64 pbl_ptr;
+ bool pbl_two_level;
+ u8 pbl_page_size_log;
+ u8 page_size_log;
+ u32 fbo;
+ u64 length;
+ u64 vaddr;
+ bool zbva;
+ bool phy_mr;
+ bool dma_mr;
+
+ bool dif_enabled;
+ u64 dif_error_addr;
+ u64 dif_runt_addr;
+};
+
+struct qed_rdma_create_cq_in_params {
+ u32 cq_handle_lo;
+ u32 cq_handle_hi;
+ u32 cq_size;
+ u16 dpi;
+ bool pbl_two_level;
+ u64 pbl_ptr;
+ u16 pbl_num_pages;
+ u8 pbl_page_size_log;
+ u8 cnq_id;
+ u16 int_timeout;
+};
+
+struct qed_rdma_create_srq_in_params {
+ u64 pbl_base_addr;
+ u64 prod_pair_addr;
+ u16 num_pages;
+ u16 pd_id;
+ u16 page_size;
+};
+
+struct qed_rdma_destroy_cq_in_params {
+ u16 icid;
+};
+
+struct qed_rdma_destroy_cq_out_params {
+ u16 num_cq_notif;
+};
+
+struct qed_rdma_create_qp_in_params {
+ u32 qp_handle_lo;
+ u32 qp_handle_hi;
+ u32 qp_handle_async_lo;
+ u32 qp_handle_async_hi;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+ u16 pd;
+ u16 dpi;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ u64 sq_pbl_ptr;
+ u8 max_sq_sges;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ u64 rq_pbl_ptr;
+ u16 srq_id;
+ u8 stats_queue;
+};
+
+struct qed_rdma_create_qp_out_params {
+ u32 qp_id;
+ u16 icid;
+ void *rq_pbl_virt;
+ dma_addr_t rq_pbl_phys;
+ void *sq_pbl_virt;
+ dma_addr_t sq_pbl_phys;
+};
+
+struct qed_rdma_modify_qp_in_params {
+ u32 modify_flags;
+#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
+#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
+#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
+#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
+#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
+#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
+#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
+#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
+#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
+#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
+#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
+#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
+#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
+
+ enum qed_roce_qp_state new_state;
+ u16 pkey;
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+ u32 dest_qp;
+ bool lb_indication;
+ u16 mtu;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u32 flow_label;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ u16 udp_src_port;
+
+ u16 vlan_id;
+
+ u32 rq_psn;
+ u32 sq_psn;
+ u8 max_rd_atomic_resp;
+ u8 max_rd_atomic_req;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+ bool use_local_mac;
+ enum roce_mode roce_mode;
+};
+
+struct qed_rdma_query_qp_out_params {
+ enum qed_roce_qp_state state;
+ u32 rq_psn;
+ u32 sq_psn;
+ bool draining;
+ u16 mtu;
+ u32 dest_qp;
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ u32 flow_label;
+ u8 hop_limit_ttl;
+ u8 traffic_class_tos;
+ u32 timeout;
+ u8 rnr_retry;
+ u8 retry_cnt;
+ u8 min_rnr_nak_timer;
+ u16 pkey_index;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ bool sqd_async;
+};
+
+struct qed_rdma_create_srq_out_params {
+ u16 srq_id;
+};
+
+struct qed_rdma_destroy_srq_in_params {
+ u16 srq_id;
+};
+
+struct qed_rdma_modify_srq_in_params {
+ u32 wqe_limit;
+ u16 srq_id;
+};
+
+struct qed_rdma_stats_out_params {
+ u64 sent_bytes;
+ u64 sent_pkts;
+ u64 rcv_bytes;
+ u64 rcv_pkts;
+};
+
+struct qed_rdma_counters_out_params {
+ u64 pd_count;
+ u64 max_pd;
+ u64 dpi_count;
+ u64 max_dpi;
+ u64 cq_count;
+ u64 max_cq;
+ u64 qp_count;
+ u64 max_qp;
+ u64 tid_count;
+ u64 max_tid;
+};
+
+#define QED_ROCE_TX_HEAD_FAILURE (1)
+#define QED_ROCE_TX_FRAG_FAILURE (2)
+
+struct qed_roce_ll2_header {
+ void *vaddr;
+ dma_addr_t baddr;
+ size_t len;
+};
+
+struct qed_roce_ll2_buffer {
+ dma_addr_t baddr;
+ size_t len;
+};
+
+struct qed_roce_ll2_packet {
+ struct qed_roce_ll2_header header;
+ int n_seg;
+ struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
+ int roce_mode;
+ enum qed_roce_ll2_tx_dest tx_dest;
+};
+
+struct qed_roce_ll2_tx_params {
+ int reserved;
+};
+
+struct qed_roce_ll2_rx_params {
+ u16 vlan_id;
+ u8 smac[ETH_ALEN];
+ int rc;
+};
+
+struct qed_roce_ll2_cbs {
+ void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
+
+ void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_rx_params *params);
+};
+
+struct qed_roce_ll2_params {
+ u16 max_rx_buffers;
+ u16 max_tx_buffers;
+ u16 mtu;
+ u8 mac_address[ETH_ALEN];
+ struct qed_roce_ll2_cbs cbs;
+ void *cb_cookie;
+};
+
+struct qed_roce_ll2_info {
+ u8 handle;
+ struct qed_roce_ll2_cbs cbs;
+ u8 mac_address[ETH_ALEN];
+ void *cb_cookie;
+
+ /* Lock to protect ll2 */
+ struct mutex lock;
+};
+
+enum qed_rdma_type {
+ QED_RDMA_TYPE_ROCE,
+};
+
+struct qed_dev_rdma_info {
+ struct qed_dev_info common;
+ enum qed_rdma_type rdma_type;
+};
+
+struct qed_rdma_ops {
+ const struct qed_common_ops *common;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info);
+ void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
+
+ int (*rdma_init)(struct qed_dev *dev,
+ struct qed_rdma_start_in_params *iparams);
+
+ int (*rdma_add_user)(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *oparams);
+
+ void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
+ int (*rdma_stop)(void *rdma_cxt);
+ struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
+ struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
+ int (*rdma_get_start_sb)(struct qed_dev *cdev);
+ int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
+ void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
+ int (*rdma_get_rdma_int)(struct qed_dev *cdev,
+ struct qed_int_info *info);
+ int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
+ int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
+ void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
+ int (*rdma_create_cq)(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid);
+ int (*rdma_destroy_cq)(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *iparams,
+ struct qed_rdma_destroy_cq_out_params *oparams);
+ struct qed_rdma_qp *
+ (*rdma_create_qp)(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *iparams,
+ struct qed_rdma_create_qp_out_params *oparams);
+
+ int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *iparams);
+
+ int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *oparams);
+ int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
+ int
+ (*rdma_register_tid)(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *iparams);
+ int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
+ int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
+ void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
+ int (*roce_ll2_start)(struct qed_dev *cdev,
+ struct qed_roce_ll2_params *params);
+ int (*roce_ll2_stop)(struct qed_dev *cdev);
+ int (*roce_ll2_tx)(struct qed_dev *cdev,
+ struct qed_roce_ll2_packet *packet,
+ struct qed_roce_ll2_tx_params *params);
+ int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
+ struct qed_roce_ll2_buffer *buf,
+ u64 cookie, u8 notify_fw);
+ int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ u8 *new_mac_address);
+ int (*roce_ll2_stats)(struct qed_dev *cdev,
+ struct qed_ll2_stats *stats);
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops(void);
+
+#endif
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
new file mode 100644
index 000000000000..99fbe6d55acb
--- /dev/null
+++ b/include/linux/qed/qede_roce.h
@@ -0,0 +1,88 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef QEDE_ROCE_H
+#define QEDE_ROCE_H
+
+struct qedr_dev;
+struct qed_dev;
+struct qede_dev;
+
+enum qede_roce_event {
+ QEDE_UP,
+ QEDE_DOWN,
+ QEDE_CHANGE_ADDR,
+ QEDE_CLOSE
+};
+
+struct qede_roce_event_work {
+ struct list_head list;
+ struct work_struct work;
+ void *ptr;
+ enum qede_roce_event event;
+};
+
+struct qedr_driver {
+ unsigned char name[32];
+
+ struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *,
+ struct net_device *);
+
+ void (*remove)(struct qedr_dev *);
+ void (*notify)(struct qedr_dev *, enum qede_roce_event);
+};
+
+/* APIs for RoCE driver to register callback handlers,
+ * which will be invoked when device is added, removed, ifup, ifdown
+ */
+int qede_roce_register_driver(struct qedr_driver *drv);
+void qede_roce_unregister_driver(struct qedr_driver *drv);
+
+bool qede_roce_supported(struct qede_dev *dev);
+
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+int qede_roce_dev_add(struct qede_dev *dev);
+void qede_roce_dev_event_open(struct qede_dev *dev);
+void qede_roce_dev_event_close(struct qede_dev *dev);
+void qede_roce_dev_remove(struct qede_dev *dev);
+void qede_roce_event_changeaddr(struct qede_dev *qedr);
+#else
+static inline int qede_roce_dev_add(struct qede_dev *dev)
+{
+ return 0;
+}
+
+static inline void qede_roce_dev_event_open(struct qede_dev *dev) {}
+static inline void qede_roce_dev_event_close(struct qede_dev *dev) {}
+static inline void qede_roce_dev_remove(struct qede_dev *dev) {}
+static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {}
+#endif
+#endif
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 187991c1f439..7663725faa94 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -28,6 +28,7 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index accba0e6b704..dc3889d1bbe6 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -11,6 +11,14 @@
#define TCP_INVALID_TIMEOUT_VAL -1
+struct ooo_opaque {
+ __le32 cid;
+ u8 drop_isle;
+ u8 drop_size;
+ u8 ooo_opcode;
+ u8 ooo_isle;
+};
+
enum tcp_connect_mode {
TCP_CONNECT_ACTIVE,
TCP_CONNECT_PASSIVE,
@@ -18,14 +26,10 @@ enum tcp_connect_mode {
};
struct tcp_init_params {
- __le32 max_cwnd;
- __le16 dup_ack_threshold;
+ __le32 two_msl_timer;
__le16 tx_sws_timer;
- __le16 min_rto;
- __le16 min_rto_rt;
- __le16 max_rto;
u8 maxfinrt;
- u8 reserved[1];
+ u8 reserved[9];
};
enum tcp_ip_version {
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 4c45105dece3..52b97db93830 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -280,9 +280,9 @@ bool __radix_tree_delete_node(struct radix_tree_root *root,
struct radix_tree_node *node);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
-struct radix_tree_node *radix_tree_replace_clear_tags(
- struct radix_tree_root *root,
- unsigned long index, void *entry);
+void radix_tree_clear_tags(struct radix_tree_root *root,
+ struct radix_tree_node *node,
+ void **slot);
unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
void **results, unsigned long first_index,
unsigned int max_items);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index a0118d5929a9..4d57bbaaa1bf 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -102,7 +102,11 @@ extern const struct raid6_calls raid6_altivec8;
extern const struct raid6_calls raid6_avx2x1;
extern const struct raid6_calls raid6_avx2x2;
extern const struct raid6_calls raid6_avx2x4;
+extern const struct raid6_calls raid6_avx512x1;
+extern const struct raid6_calls raid6_avx512x2;
+extern const struct raid6_calls raid6_avx512x4;
extern const struct raid6_calls raid6_tilegx8;
+extern const struct raid6_calls raid6_s390vx8;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -115,6 +119,8 @@ struct raid6_recov_calls {
extern const struct raid6_recov_calls raid6_recov_intx1;
extern const struct raid6_recov_calls raid6_recov_ssse3;
extern const struct raid6_recov_calls raid6_recov_avx2;
+extern const struct raid6_recov_calls raid6_recov_avx512;
+extern const struct raid6_recov_calls raid6_recov_s390xc;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 2c12cc5af744..9adc7b21903d 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -241,9 +241,9 @@ typedef void (*regmap_unlock)(void *);
* register cache support).
* @num_reg_defaults: Number of elements in reg_defaults.
*
- * @read_flag_mask: Mask to be set in the top byte of the register when doing
+ * @read_flag_mask: Mask to be set in the top bytes of the register when doing
* a read.
- * @write_flag_mask: Mask to be set in the top byte of the register when doing
+ * @write_flag_mask: Mask to be set in the top bytes of the register when doing
* a write. If both read_flag_mask and write_flag_mask are
* empty the regmap_bus default masks are used.
* @use_single_rw: If set, converts the bulk read and write operations into
@@ -299,8 +299,8 @@ struct regmap_config {
const void *reg_defaults_raw;
unsigned int num_reg_defaults_raw;
- u8 read_flag_mask;
- u8 write_flag_mask;
+ unsigned long read_flag_mask;
+ unsigned long write_flag_mask;
bool use_single_rw;
bool can_multi_write;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index cae500b2c1d7..692108222271 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -140,8 +140,6 @@ struct regulator;
*
* @supply: The name of the supply. Initialised by the user before
* using the bulk regulator APIs.
- * @optional: The supply should be considered optional. Initialised by the user
- * before using the bulk regulator APIs.
* @consumer: The regulator consumer for the supply. This will be managed
* by the bulk API.
*
@@ -151,7 +149,6 @@ struct regulator;
*/
struct regulator_bulk_data {
const char *supply;
- bool optional;
struct regulator *consumer;
/* private: Internal use */
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fcfa40a6692c..37b532410528 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -113,10 +113,14 @@ struct regulator_linear_range {
* stabilise after being enabled, in microseconds.
* @set_ramp_delay: Set the ramp delay for the regulator. The driver should
* select ramp delay equal to or less than(closest) ramp_delay.
+ * @set_voltage_time: Time taken for the regulator voltage output voltage
+ * to stabilise after being set to a new value, in microseconds.
+ * The function receives the from and to voltage as input, it
+ * should return the worst case.
* @set_voltage_time_sel: Time taken for the regulator voltage output voltage
* to stabilise after being set to a new value, in microseconds.
- * The function provides the from and to voltage selector, the
- * function should return the worst case.
+ * The function receives the from and to voltage selector as
+ * input, it should return the worst case.
* @set_soft_start: Enable soft start for the regulator.
*
* @set_suspend_voltage: Set the voltage for the regulator when the system
@@ -168,6 +172,8 @@ struct regulator_ops {
/* Time taken to enable or set voltage on the regulator */
int (*enable_time) (struct regulator_dev *);
int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay);
+ int (*set_voltage_time) (struct regulator_dev *, int old_uV,
+ int new_uV);
int (*set_voltage_time_sel) (struct regulator_dev *,
unsigned int old_selector,
unsigned int new_selector);
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 1c457a8dd5a6..930023b7c825 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -118,7 +118,7 @@ enum fw_resource_type {
RSC_LAST = 4,
};
-#define FW_RSC_ADDR_ANY (0xFFFFFFFFFFFFFFFF)
+#define FW_RSC_ADDR_ANY (-1)
/**
* struct fw_rsc_carveout - physically contiguous memory request
@@ -241,7 +241,7 @@ struct fw_rsc_trace {
* @notifyid is a unique rproc-wide notify index for this vring. This notify
* index is used when kicking a remote processor, to let it know that this
* vring is triggered.
- * @reserved: reserved (must be zero)
+ * @pa: physical address
*
* This descriptor is not a resource entry by itself; it is part of the
* vdev resource type (see below).
@@ -255,7 +255,7 @@ struct fw_rsc_vdev_vring {
u32 align;
u32 num;
u32 notifyid;
- u32 reserved;
+ u32 pa;
} __packed;
/**
@@ -409,7 +409,6 @@ enum rproc_crash_type {
* @max_notifyid: largest allocated notify id.
* @table_ptr: pointer to the resource table in effect
* @cached_table: copy of the resource table
- * @table_csum: checksum of the resource table
* @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
@@ -435,14 +434,14 @@ struct rproc {
struct idr notifyids;
int index;
struct work_struct crash_handler;
- unsigned crash_cnt;
+ unsigned int crash_cnt;
struct completion crash_comp;
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
struct resource_table *cached_table;
- u32 table_csum;
bool has_iommu;
+ bool auto_boot;
};
/* we currently support only two vrings per rvdev */
@@ -489,11 +488,12 @@ struct rproc_vdev {
struct rproc *rproc_get_by_phandle(phandle phandle);
struct rproc *rproc_alloc(struct device *dev, const char *name,
- const struct rproc_ops *ops,
- const char *firmware, int len);
+ const struct rproc_ops *ops,
+ const char *firmware, int len);
void rproc_put(struct rproc *rproc);
int rproc_add(struct rproc *rproc);
int rproc_del(struct rproc *rproc);
+void rproc_free(struct rproc *rproc);
int rproc_boot(struct rproc *rproc);
void rproc_shutdown(struct rproc *rproc);
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 3eef0802a0cd..5c132d3188be 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,7 +1,7 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
- * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
@@ -53,6 +53,11 @@ struct rhash_head {
struct rhash_head __rcu *next;
};
+struct rhlist_head {
+ struct rhash_head rhead;
+ struct rhlist_head __rcu *next;
+};
+
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
@@ -137,6 +142,7 @@ struct rhashtable_params {
* @key_len: Key length for hashfn
* @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
+ * @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
@@ -147,12 +153,21 @@ struct rhashtable {
unsigned int key_len;
unsigned int elasticity;
struct rhashtable_params p;
+ bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
};
/**
+ * struct rhltable - Hash table with duplicate objects in a list
+ * @ht: Underlying rhtable
+ */
+struct rhltable {
+ struct rhashtable ht;
+};
+
+/**
* struct rhashtable_walker - Hash table walker
* @list: List entry on list of walkers
* @tbl: The table that we were walking over
@@ -163,9 +178,10 @@ struct rhashtable_walker {
};
/**
- * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * struct rhashtable_iter - Hash table iterator
* @ht: Table to iterate through
* @p: Current pointer
+ * @list: Current hash list pointer
* @walker: Associated rhashtable walker
* @slot: Current slot
* @skip: Number of entries to skip in slot
@@ -173,7 +189,8 @@ struct rhashtable_walker {
struct rhashtable_iter {
struct rhashtable *ht;
struct rhash_head *p;
- struct rhashtable_walker *walker;
+ struct rhlist_head *list;
+ struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
};
@@ -339,15 +356,14 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
+int rhltable_init(struct rhltable *hlt,
+ const struct rhashtable_params *params);
-struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
- const void *key,
- struct rhash_head *obj,
- struct bucket_table *old_tbl);
-int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
+void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj);
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
- gfp_t gfp);
+void rhashtable_walk_enter(struct rhashtable *ht,
+ struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
void *rhashtable_walk_next(struct rhashtable_iter *iter);
@@ -506,6 +522,31 @@ void rhashtable_destroy(struct rhashtable *ht);
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member)
+/**
+ * rhl_for_each_rcu - iterate over rcu hash table list
+ * @pos: the &struct rlist_head to use as a loop cursor.
+ * @list: the head of the list
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_rcu(pos, list) \
+ for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
+
+/**
+ * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rlist_head to use as a loop cursor.
+ * @list: the head of the list
+ * @member: name of the &struct rlist_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
+ for (pos = list; pos && rht_entry(tpos, pos, member); \
+ pos = rcu_dereference_raw(pos->next))
+
static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
const void *obj)
{
@@ -515,18 +556,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
}
-/**
- * rhashtable_lookup_fast - search hash table, inlined version
- * @ht: hash table
- * @key: the pointer to the key
- * @params: hash table parameters
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * Returns the first entry on which the compare function returned true.
- */
-static inline void *rhashtable_lookup_fast(
+/* Internal function, do not use. */
+static inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
@@ -538,8 +569,6 @@ static inline void *rhashtable_lookup_fast(
struct rhash_head *he;
unsigned int hash;
- rcu_read_lock();
-
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
@@ -548,8 +577,7 @@ restart:
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
continue;
- rcu_read_unlock();
- return rht_obj(ht, he);
+ return he;
}
/* Ensure we see any new tables. */
@@ -558,89 +586,165 @@ restart:
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(tbl))
goto restart;
- rcu_read_unlock();
return NULL;
}
-/* Internal function, please use rhashtable_insert_fast() instead */
-static inline int __rhashtable_insert_fast(
- struct rhashtable *ht, const void *key, struct rhash_head *obj,
+/**
+ * rhashtable_lookup - search hash table
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup(
+ struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+
+ return he ? rht_obj(ht, he) : NULL;
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, without RCU read lock
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Only use this function when you have other mechanisms guaranteeing
+ * that the object won't go away after the RCU read lock is released.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ void *obj;
+
+ rcu_read_lock();
+ obj = rhashtable_lookup(ht, key, params);
+ rcu_read_unlock();
+
+ return obj;
+}
+
+/**
+ * rhltable_lookup - search hash list table
+ * @hlt: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. All matching entries are returned
+ * in a list.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the list of entries that match the given key.
+ */
+static inline struct rhlist_head *rhltable_lookup(
+ struct rhltable *hlt, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
+
+ return he ? container_of(he, struct rhlist_head, rhead) : NULL;
+}
+
+/* Internal function, please use rhashtable_insert_fast() instead. This
+ * function returns the existing element already in hashes in there is a clash,
+ * otherwise it returns an error via ERR_PTR().
+ */
+static inline void *__rhashtable_insert_fast(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params, bool rhlist)
+{
struct rhashtable_compare_arg arg = {
.ht = ht,
.key = key,
};
- struct bucket_table *tbl, *new_tbl;
+ struct rhash_head __rcu **pprev;
+ struct bucket_table *tbl;
struct rhash_head *head;
spinlock_t *lock;
- unsigned int elasticity;
unsigned int hash;
- int err;
+ int elasticity;
+ void *data;
-restart:
rcu_read_lock();
tbl = rht_dereference_rcu(ht->tbl, ht);
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+ spin_lock_bh(lock);
- /* All insertions must grab the oldest table containing
- * the hashed bucket that is yet to be rehashed.
- */
- for (;;) {
- hash = rht_head_hashfn(ht, tbl, obj, params);
- lock = rht_bucket_lock(tbl, hash);
- spin_lock_bh(lock);
-
- if (tbl->rehash <= hash)
- break;
-
- spin_unlock_bh(lock);
- tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- }
-
- new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (unlikely(new_tbl)) {
- tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
- if (!IS_ERR_OR_NULL(tbl))
- goto slow_path;
-
- err = PTR_ERR(tbl);
- goto out;
- }
-
- err = -E2BIG;
- if (unlikely(rht_grow_above_max(ht, tbl)))
- goto out;
-
- if (unlikely(rht_grow_above_100(ht, tbl))) {
+ if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
slow_path:
spin_unlock_bh(lock);
- err = rhashtable_insert_rehash(ht, tbl);
rcu_read_unlock();
- if (err)
- return err;
-
- goto restart;
+ return rhashtable_insert_slow(ht, key, obj);
}
- err = -EEXIST;
elasticity = ht->elasticity;
+ pprev = &tbl->buckets[hash];
rht_for_each(head, tbl, hash) {
- if (key &&
- unlikely(!(params.obj_cmpfn ?
- params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head)))))
+ struct rhlist_head *plist;
+ struct rhlist_head *list;
+
+ elasticity--;
+ if (!key ||
+ (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head))))
+ continue;
+
+ data = rht_obj(ht, head);
+
+ if (!rhlist)
goto out;
- if (!--elasticity)
- goto slow_path;
+
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ plist = container_of(head, struct rhlist_head, rhead);
+
+ RCU_INIT_POINTER(list->next, plist);
+ head = rht_dereference_bucket(head->next, tbl, hash);
+ RCU_INIT_POINTER(list->rhead.next, head);
+ rcu_assign_pointer(*pprev, obj);
+
+ goto good;
}
- err = 0;
+ if (elasticity <= 0)
+ goto slow_path;
+
+ data = ERR_PTR(-E2BIG);
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ goto out;
+
+ if (unlikely(rht_grow_above_100(ht, tbl)))
+ goto slow_path;
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
RCU_INIT_POINTER(obj->next, head);
+ if (rhlist) {
+ struct rhlist_head *list;
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ RCU_INIT_POINTER(list->next, NULL);
+ }
rcu_assign_pointer(tbl->buckets[hash], obj);
@@ -648,11 +752,14 @@ slow_path:
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
+good:
+ data = NULL;
+
out:
spin_unlock_bh(lock);
rcu_read_unlock();
- return err;
+ return data;
}
/**
@@ -675,7 +782,65 @@ static inline int rhashtable_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
- return __rhashtable_insert_fast(ht, NULL, obj, params);
+ void *ret;
+
+ ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
+}
+
+/**
+ * rhltable_insert_key - insert object into hash list table
+ * @hlt: hash list table
+ * @key: the pointer to the key
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert_key(
+ struct rhltable *hlt, const void *key, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
+ params, true));
+}
+
+/**
+ * rhltable_insert - insert object into hash list table
+ * @hlt: hash list table
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert(
+ struct rhltable *hlt, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(&hlt->ht, &list->rhead);
+
+ key += params.key_offset;
+
+ return rhltable_insert_key(hlt, key, list, params);
}
/**
@@ -704,11 +869,16 @@ static inline int rhashtable_lookup_insert_fast(
const struct rhashtable_params params)
{
const char *key = rht_obj(ht, obj);
+ void *ret;
BUG_ON(ht->p.obj_hashfn);
- return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
- params);
+ ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
+ false);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
}
/**
@@ -737,15 +907,42 @@ static inline int rhashtable_lookup_insert_key(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
+ void *ret;
+
+ BUG_ON(!ht->p.obj_hashfn || !key);
+
+ ret = __rhashtable_insert_fast(ht, key, obj, params, false);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
+}
+
+/**
+ * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ * @data: pointer to element data already in hashes
+ *
+ * Just like rhashtable_lookup_insert_key(), but this function returns the
+ * object if it exists, NULL if it does not and the insertion was successful,
+ * and an ERR_PTR otherwise.
+ */
+static inline void *rhashtable_lookup_get_insert_key(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
BUG_ON(!ht->p.obj_hashfn || !key);
- return __rhashtable_insert_fast(ht, key, obj, params);
+ return __rhashtable_insert_fast(ht, key, obj, params, false);
}
/* Internal function, please use rhashtable_remove_fast() instead */
-static inline int __rhashtable_remove_fast(
+static inline int __rhashtable_remove_fast_one(
struct rhashtable *ht, struct bucket_table *tbl,
- struct rhash_head *obj, const struct rhashtable_params params)
+ struct rhash_head *obj, const struct rhashtable_params params,
+ bool rhlist)
{
struct rhash_head __rcu **pprev;
struct rhash_head *he;
@@ -760,39 +957,66 @@ static inline int __rhashtable_remove_fast(
pprev = &tbl->buckets[hash];
rht_for_each(he, tbl, hash) {
+ struct rhlist_head *list;
+
+ list = container_of(he, struct rhlist_head, rhead);
+
if (he != obj) {
+ struct rhlist_head __rcu **lpprev;
+
pprev = &he->next;
- continue;
+
+ if (!rhlist)
+ continue;
+
+ do {
+ lpprev = &list->next;
+ list = rht_dereference_bucket(list->next,
+ tbl, hash);
+ } while (list && obj != &list->rhead);
+
+ if (!list)
+ continue;
+
+ list = rht_dereference_bucket(list->next, tbl, hash);
+ RCU_INIT_POINTER(*lpprev, list);
+ err = 0;
+ break;
}
- rcu_assign_pointer(*pprev, obj->next);
- err = 0;
+ obj = rht_dereference_bucket(obj->next, tbl, hash);
+ err = 1;
+
+ if (rhlist) {
+ list = rht_dereference_bucket(list->next, tbl, hash);
+ if (list) {
+ RCU_INIT_POINTER(list->rhead.next, obj);
+ obj = &list->rhead;
+ err = 0;
+ }
+ }
+
+ rcu_assign_pointer(*pprev, obj);
break;
}
spin_unlock_bh(lock);
+ if (err > 0) {
+ atomic_dec(&ht->nelems);
+ if (unlikely(ht->p.automatic_shrinking &&
+ rht_shrink_below_30(ht, tbl)))
+ schedule_work(&ht->run_work);
+ err = 0;
+ }
+
return err;
}
-/**
- * rhashtable_remove_fast - remove object from hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Since the hash chain is single linked, the removal operation needs to
- * walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
- *
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
- *
- * Returns zero on success, -ENOENT if the entry could not be found.
- */
-static inline int rhashtable_remove_fast(
+/* Internal function, please use rhashtable_remove_fast() instead */
+static inline int __rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
- const struct rhashtable_params params)
+ const struct rhashtable_params params, bool rhlist)
{
struct bucket_table *tbl;
int err;
@@ -806,24 +1030,60 @@ static inline int rhashtable_remove_fast(
* visible then that guarantees the entry to still be in
* the old tbl if it exists.
*/
- while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+ while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
+ rhlist)) &&
(tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
;
- if (err)
- goto out;
-
- atomic_dec(&ht->nelems);
- if (unlikely(ht->p.automatic_shrinking &&
- rht_shrink_below_30(ht, tbl)))
- schedule_work(&ht->run_work);
-
-out:
rcu_read_unlock();
return err;
}
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_remove_fast(ht, obj, params, false);
+}
+
+/**
+ * rhltable_remove - remove object from hash list table
+ * @hlt: hash list table
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhltable_remove(
+ struct rhltable *hlt, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
+}
+
/* Internal function, please use rhashtable_replace_fast() instead */
static inline int __rhashtable_replace_fast(
struct rhashtable *ht, struct bucket_table *tbl,
@@ -906,4 +1166,59 @@ static inline int rhashtable_replace_fast(
return err;
}
+/* Obsolete function, do not use in new code. */
+static inline int rhashtable_walk_init(struct rhashtable *ht,
+ struct rhashtable_iter *iter, gfp_t gfp)
+{
+ rhashtable_walk_enter(ht, iter);
+ return 0;
+}
+
+/**
+ * rhltable_walk_enter - Initialise an iterator
+ * @hlt: Table to walk over
+ * @iter: Hash table Iterator
+ *
+ * This function prepares a hash table walk.
+ *
+ * Note that if you restart a walk after rhashtable_walk_stop you
+ * may see the same object twice. Also, you may miss objects if
+ * there are removals in between rhashtable_walk_stop and the next
+ * call to rhashtable_walk_start.
+ *
+ * For a completely stable walk you should construct your own data
+ * structure outside the hash table.
+ *
+ * This function may sleep so you must not call it from interrupt
+ * context or with spin locks held.
+ *
+ * You must call rhashtable_walk_exit after this function returns.
+ */
+static inline void rhltable_walk_enter(struct rhltable *hlt,
+ struct rhashtable_iter *iter)
+{
+ return rhashtable_walk_enter(&hlt->ht, iter);
+}
+
+/**
+ * rhltable_free_and_destroy - free elements and destroy hash list table
+ * @hlt: the hash list table to destroy
+ * @free_fn: callback to release resources of element
+ * @arg: pointer passed to free_fn
+ *
+ * See documentation for rhashtable_free_and_destroy.
+ */
+static inline void rhltable_free_and_destroy(struct rhltable *hlt,
+ void (*free_fn)(void *ptr,
+ void *arg),
+ void *arg)
+{
+ return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
+}
+
+static inline void rhltable_destroy(struct rhltable *hlt)
+{
+ return rhltable_free_and_destroy(hlt, NULL, NULL);
+}
+
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index ada50ff36da0..452d393cc8dd 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -41,65 +41,27 @@
#include <linux/kref.h>
#include <linux/mutex.h>
-/* The feature bitmap for virtio rpmsg */
-#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */
+#define RPMSG_ADDR_ANY 0xFFFFFFFF
+
+struct rpmsg_device;
+struct rpmsg_endpoint;
+struct rpmsg_device_ops;
+struct rpmsg_endpoint_ops;
/**
- * struct rpmsg_hdr - common header for all rpmsg messages
- * @src: source address
+ * struct rpmsg_channel_info - channel info representation
+ * @name: name of service
+ * @src: local address
* @dst: destination address
- * @reserved: reserved for future use
- * @len: length of payload (in bytes)
- * @flags: message flags
- * @data: @len bytes of message payload data
- *
- * Every message sent(/received) on the rpmsg bus begins with this header.
*/
-struct rpmsg_hdr {
+struct rpmsg_channel_info {
+ char name[RPMSG_NAME_SIZE];
u32 src;
u32 dst;
- u32 reserved;
- u16 len;
- u16 flags;
- u8 data[0];
-} __packed;
-
-/**
- * struct rpmsg_ns_msg - dynamic name service announcement message
- * @name: name of remote service that is published
- * @addr: address of remote service that is published
- * @flags: indicates whether service is created or destroyed
- *
- * This message is sent across to publish a new service, or announce
- * about its removal. When we receive these messages, an appropriate
- * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
- * or ->remove() handler of the appropriate rpmsg driver will be invoked
- * (if/as-soon-as one is registered).
- */
-struct rpmsg_ns_msg {
- char name[RPMSG_NAME_SIZE];
- u32 addr;
- u32 flags;
-} __packed;
-
-/**
- * enum rpmsg_ns_flags - dynamic name service announcement flags
- *
- * @RPMSG_NS_CREATE: a new remote service was just created
- * @RPMSG_NS_DESTROY: a known remote service was just destroyed
- */
-enum rpmsg_ns_flags {
- RPMSG_NS_CREATE = 0,
- RPMSG_NS_DESTROY = 1,
};
-#define RPMSG_ADDR_ANY 0xFFFFFFFF
-
-struct virtproc_info;
-
/**
- * rpmsg_channel - devices that belong to the rpmsg bus are called channels
- * @vrp: the remote processor this channel belongs to
+ * rpmsg_device - device that belong to the rpmsg bus
* @dev: the device struct
* @id: device id (used to match between rpmsg drivers and devices)
* @src: local address
@@ -107,17 +69,18 @@ struct virtproc_info;
* @ept: the rpmsg endpoint of this channel
* @announce: if set, rpmsg will announce the creation/removal of this channel
*/
-struct rpmsg_channel {
- struct virtproc_info *vrp;
+struct rpmsg_device {
struct device dev;
struct rpmsg_device_id id;
u32 src;
u32 dst;
struct rpmsg_endpoint *ept;
bool announce;
+
+ const struct rpmsg_device_ops *ops;
};
-typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
+typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32);
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
@@ -143,12 +106,14 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
* create additional endpoints by themselves (see rpmsg_create_ept()).
*/
struct rpmsg_endpoint {
- struct rpmsg_channel *rpdev;
+ struct rpmsg_device *rpdev;
struct kref refcount;
rpmsg_rx_cb_t cb;
struct mutex cb_lock;
u32 addr;
void *priv;
+
+ const struct rpmsg_endpoint_ops *ops;
};
/**
@@ -162,20 +127,19 @@ struct rpmsg_endpoint {
struct rpmsg_driver {
struct device_driver drv;
const struct rpmsg_device_id *id_table;
- int (*probe)(struct rpmsg_channel *dev);
- void (*remove)(struct rpmsg_channel *dev);
- void (*callback)(struct rpmsg_channel *, void *, int, void *, u32);
+ int (*probe)(struct rpmsg_device *dev);
+ void (*remove)(struct rpmsg_device *dev);
+ int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
};
-int register_rpmsg_device(struct rpmsg_channel *dev);
-void unregister_rpmsg_device(struct rpmsg_channel *dev);
+int register_rpmsg_device(struct rpmsg_device *dev);
+void unregister_rpmsg_device(struct rpmsg_device *dev);
int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
void unregister_rpmsg_driver(struct rpmsg_driver *drv);
void rpmsg_destroy_ept(struct rpmsg_endpoint *);
-struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *,
- rpmsg_rx_cb_t cb, void *priv, u32 addr);
-int
-rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool);
+struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *,
+ rpmsg_rx_cb_t cb, void *priv,
+ struct rpmsg_channel_info chinfo);
/* use a macro to avoid include chaining to get THIS_MODULE */
#define register_rpmsg_driver(drv) \
@@ -193,156 +157,14 @@ rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool);
module_driver(__rpmsg_driver, register_rpmsg_driver, \
unregister_rpmsg_driver)
-/**
- * rpmsg_send() - send a message across to the remote processor
- * @rpdev: the rpmsg channel
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len on the @rpdev channel.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to, using @rpdev's source and destination addresses.
- * In case there are no TX buffers available, the function will block until
- * one becomes available, or a timeout of 15 seconds elapses. When the latter
- * happens, -ERESTARTSYS is returned.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline int rpmsg_send(struct rpmsg_channel *rpdev, void *data, int len)
-{
- u32 src = rpdev->src, dst = rpdev->dst;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
-}
-
-/**
- * rpmsg_sendto() - send a message across to the remote processor, specify dst
- * @rpdev: the rpmsg channel
- * @data: payload of message
- * @len: length of payload
- * @dst: destination address
- *
- * This function sends @data of length @len to the remote @dst address.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to, using @rpdev's source address.
- * In case there are no TX buffers available, the function will block until
- * one becomes available, or a timeout of 15 seconds elapses. When the latter
- * happens, -ERESTARTSYS is returned.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline
-int rpmsg_sendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst)
-{
- u32 src = rpdev->src;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
-}
-
-/**
- * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
- * @rpdev: the rpmsg channel
- * @src: source address
- * @dst: destination address
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len to the remote @dst address,
- * and uses @src as the source address.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to.
- * In case there are no TX buffers available, the function will block until
- * one becomes available, or a timeout of 15 seconds elapses. When the latter
- * happens, -ERESTARTSYS is returned.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline
-int rpmsg_send_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
- void *data, int len)
-{
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
-}
-
-/**
- * rpmsg_send() - send a message across to the remote processor
- * @rpdev: the rpmsg channel
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len on the @rpdev channel.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to, using @rpdev's source and destination addresses.
- * In case there are no TX buffers available, the function will immediately
- * return -ENOMEM without waiting until one becomes available.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline
-int rpmsg_trysend(struct rpmsg_channel *rpdev, void *data, int len)
-{
- u32 src = rpdev->src, dst = rpdev->dst;
+int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
+int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len);
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
-}
-
-/**
- * rpmsg_sendto() - send a message across to the remote processor, specify dst
- * @rpdev: the rpmsg channel
- * @data: payload of message
- * @len: length of payload
- * @dst: destination address
- *
- * This function sends @data of length @len to the remote @dst address.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to, using @rpdev's source address.
- * In case there are no TX buffers available, the function will immediately
- * return -ENOMEM without waiting until one becomes available.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline
-int rpmsg_trysendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst)
-{
- u32 src = rpdev->src;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
-}
-
-/**
- * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
- * @rpdev: the rpmsg channel
- * @src: source address
- * @dst: destination address
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len to the remote @dst address,
- * and uses @src as the source address.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to.
- * In case there are no TX buffers available, the function will immediately
- * return -ENOMEM without waiting until one becomes available.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline
-int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
- void *data, int len)
-{
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
-}
+int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
+int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len);
#endif /* _LINUX_RPMSG_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2daece8979f7..57e54847b0b9 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -105,7 +105,7 @@ extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
- int idx);
+ int *idx);
extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
new file mode 100644
index 000000000000..f017fd6e69c4
--- /dev/null
+++ b/include/linux/sbitmap.h
@@ -0,0 +1,373 @@
+/*
+ * Fast and scalable bitmaps.
+ *
+ * Copyright (C) 2016 Facebook
+ * Copyright (C) 2013-2014 Jens Axboe
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LINUX_SCALE_BITMAP_H
+#define __LINUX_SCALE_BITMAP_H
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+/**
+ * struct sbitmap_word - Word in a &struct sbitmap.
+ */
+struct sbitmap_word {
+ /**
+ * @word: The bitmap word itself.
+ */
+ unsigned long word;
+
+ /**
+ * @depth: Number of bits being used in @word.
+ */
+ unsigned long depth;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct sbitmap - Scalable bitmap.
+ *
+ * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
+ * trades off higher memory usage for better scalability.
+ */
+struct sbitmap {
+ /**
+ * @depth: Number of bits used in the whole bitmap.
+ */
+ unsigned int depth;
+
+ /**
+ * @shift: log2(number of bits used per word)
+ */
+ unsigned int shift;
+
+ /**
+ * @map_nr: Number of words (cachelines) being used for the bitmap.
+ */
+ unsigned int map_nr;
+
+ /**
+ * @map: Allocated bitmap.
+ */
+ struct sbitmap_word *map;
+};
+
+#define SBQ_WAIT_QUEUES 8
+#define SBQ_WAKE_BATCH 8
+
+/**
+ * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
+ */
+struct sbq_wait_state {
+ /**
+ * @wait_cnt: Number of frees remaining before we wake up.
+ */
+ atomic_t wait_cnt;
+
+ /**
+ * @wait: Wait queue.
+ */
+ wait_queue_head_t wait;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
+ * bits.
+ *
+ * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
+ * avoid contention on the wait queue spinlock. This ensures that we don't hit a
+ * scalability wall when we run out of free bits and have to start putting tasks
+ * to sleep.
+ */
+struct sbitmap_queue {
+ /**
+ * @sb: Scalable bitmap.
+ */
+ struct sbitmap sb;
+
+ /*
+ * @alloc_hint: Cache of last successfully allocated or freed bit.
+ *
+ * This is per-cpu, which allows multiple users to stick to different
+ * cachelines until the map is exhausted.
+ */
+ unsigned int __percpu *alloc_hint;
+
+ /**
+ * @wake_batch: Number of bits which must be freed before we wake up any
+ * waiters.
+ */
+ unsigned int wake_batch;
+
+ /**
+ * @wake_index: Next wait queue in @ws to wake up.
+ */
+ atomic_t wake_index;
+
+ /**
+ * @ws: Wait queues.
+ */
+ struct sbq_wait_state *ws;
+
+ /**
+ * @round_robin: Allocate bits in strict round-robin order.
+ */
+ bool round_robin;
+};
+
+/**
+ * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
+ * @sb: Bitmap to initialize.
+ * @depth: Number of bits to allocate.
+ * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
+ * given, a good default is chosen.
+ * @flags: Allocation flags.
+ * @node: Memory node to allocate on.
+ *
+ * Return: Zero on success or negative errno on failure.
+ */
+int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
+ gfp_t flags, int node);
+
+/**
+ * sbitmap_free() - Free memory used by a &struct sbitmap.
+ * @sb: Bitmap to free.
+ */
+static inline void sbitmap_free(struct sbitmap *sb)
+{
+ kfree(sb->map);
+ sb->map = NULL;
+}
+
+/**
+ * sbitmap_resize() - Resize a &struct sbitmap.
+ * @sb: Bitmap to resize.
+ * @depth: New number of bits to resize to.
+ *
+ * Doesn't reallocate anything. It's up to the caller to ensure that the new
+ * depth doesn't exceed the depth that the sb was initialized with.
+ */
+void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
+
+/**
+ * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
+ * @sb: Bitmap to allocate from.
+ * @alloc_hint: Hint for where to start searching for a free bit.
+ * @round_robin: If true, be stricter about allocation order; always allocate
+ * starting from the last allocated bit. This is less efficient
+ * than the default behavior (false).
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
+
+/**
+ * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
+ * @sb: Bitmap to check.
+ *
+ * Return: true if any bit in the bitmap is set, false otherwise.
+ */
+bool sbitmap_any_bit_set(const struct sbitmap *sb);
+
+/**
+ * sbitmap_any_bit_clear() - Check for an unset bit in a &struct
+ * sbitmap.
+ * @sb: Bitmap to check.
+ *
+ * Return: true if any bit in the bitmap is clear, false otherwise.
+ */
+bool sbitmap_any_bit_clear(const struct sbitmap *sb);
+
+typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
+
+/**
+ * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
+ * @sb: Bitmap to iterate over.
+ * @fn: Callback. Should return true to continue or false to break early.
+ * @data: Pointer to pass to callback.
+ *
+ * This is inline even though it's non-trivial so that the function calls to the
+ * callback will hopefully get optimized away.
+ */
+static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
+ void *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ struct sbitmap_word *word = &sb->map[i];
+ unsigned int off, nr;
+
+ if (!word->word)
+ continue;
+
+ nr = 0;
+ off = i << sb->shift;
+ while (1) {
+ nr = find_next_bit(&word->word, word->depth, nr);
+ if (nr >= word->depth)
+ break;
+
+ if (!fn(sb, off + nr, data))
+ return;
+
+ nr++;
+ }
+ }
+}
+
+#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
+#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
+
+static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
+ unsigned int bitnr)
+{
+ return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
+}
+
+/* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
+
+static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
+{
+ set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
+}
+
+static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
+{
+ clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
+}
+
+static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
+{
+ return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
+}
+
+unsigned int sbitmap_weight(const struct sbitmap *sb);
+
+/**
+ * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
+ * memory node.
+ * @sbq: Bitmap queue to initialize.
+ * @depth: See sbitmap_init_node().
+ * @shift: See sbitmap_init_node().
+ * @round_robin: See sbitmap_get().
+ * @flags: Allocation flags.
+ * @node: Memory node to allocate on.
+ *
+ * Return: Zero on success or negative errno on failure.
+ */
+int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
+ int shift, bool round_robin, gfp_t flags, int node);
+
+/**
+ * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
+ *
+ * @sbq: Bitmap queue to free.
+ */
+static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
+{
+ kfree(sbq->ws);
+ free_percpu(sbq->alloc_hint);
+ sbitmap_free(&sbq->sb);
+}
+
+/**
+ * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
+ * @sbq: Bitmap queue to resize.
+ * @depth: New number of bits to resize to.
+ *
+ * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
+ * some extra work on the &struct sbitmap_queue, so it's not safe to just
+ * resize the underlying &struct sbitmap.
+ */
+void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
+
+/**
+ * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
+ * sbitmap_queue with preemption already disabled.
+ * @sbq: Bitmap queue to allocate from.
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+int __sbitmap_queue_get(struct sbitmap_queue *sbq);
+
+/**
+ * sbitmap_queue_get() - Try to allocate a free bit from a &struct
+ * sbitmap_queue.
+ * @sbq: Bitmap queue to allocate from.
+ * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
+ * sbitmap_queue_clear()).
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
+ unsigned int *cpu)
+{
+ int nr;
+
+ *cpu = get_cpu();
+ nr = __sbitmap_queue_get(sbq);
+ put_cpu();
+ return nr;
+}
+
+/**
+ * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
+ * &struct sbitmap_queue.
+ * @sbq: Bitmap to free from.
+ * @nr: Bit number to free.
+ * @cpu: CPU the bit was allocated on.
+ */
+void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
+ unsigned int cpu);
+
+static inline int sbq_index_inc(int index)
+{
+ return (index + 1) & (SBQ_WAIT_QUEUES - 1);
+}
+
+static inline void sbq_index_atomic_inc(atomic_t *index)
+{
+ int old = atomic_read(index);
+ int new = sbq_index_inc(old);
+ atomic_cmpxchg(index, old, new);
+}
+
+/**
+ * sbq_wait_ptr() - Get the next wait queue to use for a &struct
+ * sbitmap_queue.
+ * @sbq: Bitmap queue to wait on.
+ * @wait_index: A counter per "user" of @sbq.
+ */
+static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
+ atomic_t *wait_index)
+{
+ struct sbq_wait_state *ws;
+
+ ws = &sbq->ws[atomic_read(wait_index)];
+ sbq_index_atomic_inc(wait_index);
+ return ws;
+}
+
+/**
+ * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
+ * sbitmap_queue.
+ * @sbq: Bitmap queue to wake up.
+ */
+void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
+
+#endif /* __LINUX_SCALE_BITMAP_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7543a476178b..348f51b0ec92 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -524,8 +524,9 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_HAS_UPROBES 19 /* has uprobes */
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
-#define MMF_OOM_REAPED 21 /* mm has been already reaped */
-#define MMF_OOM_NOT_REAPABLE 22 /* mm couldn't be reaped */
+#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
+#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
+#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
@@ -673,7 +674,6 @@ struct signal_struct {
atomic_t sigcnt;
atomic_t live;
int nr_threads;
- atomic_t oom_victims; /* # of TIF_MEDIE threads in this thread group */
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
@@ -806,6 +806,8 @@ struct signal_struct {
short oom_score_adj; /* OOM kill score adjustment */
short oom_score_adj_min; /* OOM kill score adjustment min value.
* Only settable by CAP_SYS_RESOURCE. */
+ struct mm_struct *oom_mm; /* recorded mm when the thread group got
+ * killed by the oom killer */
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
@@ -2876,6 +2878,20 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
+static inline void mmdrop_async_fn(struct work_struct *work)
+{
+ struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
+ __mmdrop(mm);
+}
+
+static inline void mmdrop_async(struct mm_struct *mm)
+{
+ if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
+ INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
+ schedule_work(&mm->async_put_work);
+ }
+}
+
static inline bool mmget_not_zero(struct mm_struct *mm)
{
return atomic_inc_not_zero(&mm->mm_users);
diff --git a/include/linux/security.h b/include/linux/security.h
index 7831cd57bcf7..c2125e9093e8 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -242,6 +242,10 @@ int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
int security_dentry_init_security(struct dentry *dentry, int mode,
const struct qstr *name, void **ctx,
u32 *ctxlen);
+int security_dentry_create_files_as(struct dentry *dentry, int mode,
+ struct qstr *name,
+ const struct cred *old,
+ struct cred *new);
int security_inode_alloc(struct inode *inode);
void security_inode_free(struct inode *inode);
@@ -282,6 +286,8 @@ int security_inode_getsecurity(struct inode *inode, const char *name, void **buf
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
void security_inode_getsecid(struct inode *inode, u32 *secid);
+int security_inode_copy_up(struct dentry *src, struct cred **new);
+int security_inode_copy_up_xattr(const char *name);
int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
void security_file_free(struct file *file);
@@ -307,7 +313,6 @@ void security_transfer_creds(struct cred *new, const struct cred *old);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
-int security_kernel_module_from_file(struct file *file);
int security_kernel_read_file(struct file *file, enum kernel_read_file_id id);
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
@@ -598,6 +603,14 @@ static inline int security_dentry_init_security(struct dentry *dentry,
return -EOPNOTSUPP;
}
+static inline int security_dentry_create_files_as(struct dentry *dentry,
+ int mode, struct qstr *name,
+ const struct cred *old,
+ struct cred *new)
+{
+ return 0;
+}
+
static inline int security_inode_init_security(struct inode *inode,
struct inode *dir,
@@ -758,6 +771,16 @@ static inline void security_inode_getsecid(struct inode *inode, u32 *secid)
*secid = 0;
}
+static inline int security_inode_copy_up(struct dentry *src, struct cred **new)
+{
+ return 0;
+}
+
+static inline int security_inode_copy_up_xattr(const char *name)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int security_file_permission(struct file *file, int mask)
{
return 0;
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index f3d45dd42695..e305b66a9fb9 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -117,9 +117,9 @@ __printf(2, 3)
void seq_printf(struct seq_file *m, const char *fmt, ...);
void seq_putc(struct seq_file *m, char c);
void seq_puts(struct seq_file *m, const char *s);
-void seq_put_decimal_ull(struct seq_file *m, char delimiter,
+void seq_put_decimal_ull(struct seq_file *m, const char *delimiter,
unsigned long long num);
-void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num);
+void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num);
void seq_escape(struct seq_file *m, const char *s, const char *esc);
void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 2f44e2013654..344201437017 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -367,14 +367,21 @@ extern const struct earlycon_id __earlycon_table_end[];
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
-extern int setup_earlycon(char *buf);
extern int of_setup_earlycon(const struct earlycon_id *match,
unsigned long node,
const char *options);
+#ifdef CONFIG_SERIAL_EARLYCON
+extern bool earlycon_init_is_deferred __initdata;
+int setup_earlycon(char *buf);
+#else
+static const bool earlycon_init_is_deferred;
+static inline int setup_earlycon(char *buf) { return 0; }
+#endif
+
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
-int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr,
+int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
char **options);
void uart_parse_options(char *options, int *baud, int *parity, int *bits,
int *flow);
@@ -412,7 +419,7 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
static inline int uart_tx_stopped(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
- if (tty->stopped || port->hw_stopped)
+ if ((tty && tty->stopped) || port->hw_stopped)
return 1;
return 0;
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0f665cb26b50..601258f6e621 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -612,7 +612,6 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
- * @offload_fwd_mark: fwding offload mark
* @mark: Generic packet mark
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
@@ -677,13 +676,23 @@ struct sk_buff {
*/
kmemcheck_bitfield_begin(flags1);
__u16 queue_mapping;
+
+/* if you move cloned around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define CLONED_MASK (1 << 7)
+#else
+#define CLONED_MASK 1
+#endif
+#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
+
+ __u8 __cloned_offset[0];
__u8 cloned:1,
nohdr:1,
fclone:2,
peeked:1,
head_frag:1,
- xmit_more:1;
- /* one bit hole */
+ xmit_more:1,
+ __unused:1; /* one bit hole */
kmemcheck_bitfield_end(flags1);
/* fields enclosed in headers_start/headers_end are copied
@@ -730,7 +739,10 @@ struct sk_buff {
__u8 ipvs_property:1;
__u8 inner_protocol_type:1;
__u8 remcsum_offload:1;
- /* 3 or 5 bit hole */
+#ifdef CONFIG_NET_SWITCHDEV
+ __u8 offload_fwd_mark:1;
+#endif
+ /* 2, 4 or 5 bit hole */
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@@ -757,14 +769,9 @@ struct sk_buff {
unsigned int sender_cpu;
};
#endif
- union {
#ifdef CONFIG_NETWORK_SECMARK
- __u32 secmark;
-#endif
-#ifdef CONFIG_NET_SWITCHDEV
- __u32 offload_fwd_mark;
+ __u32 secmark;
#endif
- };
union {
__u32 mark;
@@ -2295,7 +2302,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
-static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
{
if (unlikely(skb_is_nonlinear(skb))) {
WARN_ON(1);
@@ -2305,6 +2312,11 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
skb_set_tail_pointer(skb, len);
}
+static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+{
+ __skb_set_length(skb, len);
+}
+
void skb_trim(struct sk_buff *skb, unsigned int len);
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
@@ -2335,6 +2347,20 @@ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
BUG_ON(err);
}
+static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int diff = len - skb->len;
+
+ if (skb_tailroom(skb) < diff) {
+ int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
+ GFP_ATOMIC);
+ if (ret)
+ return ret;
+ }
+ __skb_set_length(skb, len);
+ return 0;
+}
+
/**
* skb_orphan - orphan a buffer
* @skb: buffer to orphan
@@ -2386,6 +2412,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
+void skb_rbtree_purge(struct rb_root *root);
+
void *netdev_alloc_frag(unsigned int fragsz);
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2938,6 +2966,21 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
return __pskb_trim(skb, len);
}
+static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+ __skb_trim(skb, len);
+ return 0;
+}
+
+static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+ return __skb_grow(skb, len);
+}
+
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
@@ -3021,15 +3064,9 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int len, __wsum csum);
-ssize_t skb_socket_splice(struct sock *sk,
- struct pipe_inode_info *pipe,
- struct splice_pipe_desc *spd);
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
- unsigned int flags,
- ssize_t (*splice_cb)(struct sock *,
- struct pipe_inode_info *,
- struct splice_pipe_desc *));
+ unsigned int flags);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -3042,6 +3079,7 @@ bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
@@ -3726,6 +3764,13 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
+static inline void skb_gso_reset(struct sk_buff *skb)
+{
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_segs = 0;
+ skb_shinfo(skb)->gso_type = 0;
+}
+
void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline bool skb_warn_if_lro(const struct sk_buff *skb)
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
index cbb0f06c41b2..f148e0ffbec7 100644
--- a/include/linux/soc/qcom/smd.h
+++ b/include/linux/soc/qcom/smd.h
@@ -55,11 +55,16 @@ void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel,
const char *name,
qcom_smd_cb_t cb);
+void qcom_smd_close_channel(struct qcom_smd_channel *channel);
void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel);
void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data);
int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
+ struct device_node *node);
+int qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
+
#else
static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv)
@@ -83,14 +88,20 @@ qcom_smd_open_channel(struct qcom_smd_channel *channel,
return NULL;
}
-void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
+static inline void qcom_smd_close_channel(struct qcom_smd_channel *channel)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
{
/* This shouldn't be possible */
WARN_ON(1);
return NULL;
}
-void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
+static inline void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
{
/* This shouldn't be possible */
WARN_ON(1);
@@ -104,6 +115,20 @@ static inline int qcom_smd_send(struct qcom_smd_channel *channel,
return -ENXIO;
}
+static inline struct qcom_smd_edge *
+qcom_smd_register_edge(struct device *parent,
+ struct device_node *node)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+ return -ENXIO;
+}
+
#endif
#define module_qcom_smd_driver(__smd_driver) \
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 072cb2aa2413..4b743ac35396 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -312,6 +312,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @flags: other constraints relevant to this driver
* @max_transfer_size: function that returns the max transfer size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
+ * @max_message_size: function that returns the max message size for
+ * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @io_mutex: mutex for physical bus access
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for exclusion of multiple callers
@@ -442,10 +444,11 @@ struct spi_master {
#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */
/*
- * on some hardware transfer size may be constrained
+ * on some hardware transfer / message size may be constrained
* the limit may depend on device transfer settings
*/
size_t (*max_transfer_size)(struct spi_device *spi);
+ size_t (*max_message_size)(struct spi_device *spi);
/* I/O mutex */
struct mutex io_mutex;
@@ -905,12 +908,26 @@ extern int spi_async_locked(struct spi_device *spi,
struct spi_message *message);
static inline size_t
-spi_max_transfer_size(struct spi_device *spi)
+spi_max_message_size(struct spi_device *spi)
{
struct spi_master *master = spi->master;
- if (!master->max_transfer_size)
+ if (!master->max_message_size)
return SIZE_MAX;
- return master->max_transfer_size(spi);
+ return master->max_message_size(spi);
+}
+
+static inline size_t
+spi_max_transfer_size(struct spi_device *spi)
+{
+ struct spi_master *master = spi->master;
+ size_t tr_max = SIZE_MAX;
+ size_t msg_max = spi_max_message_size(spi);
+
+ if (master->max_transfer_size)
+ tr_max = master->max_transfer_size(spi);
+
+ /* transfer size limit must not be greater than messsage size limit */
+ return min(tr_max, msg_max);
}
/*---------------------------------------------------------------------------*/
@@ -980,6 +997,30 @@ extern int spi_bus_lock(struct spi_master *master);
extern int spi_bus_unlock(struct spi_master *master);
/**
+ * spi_sync_transfer - synchronous SPI data transfer
+ * @spi: device with which data will be exchanged
+ * @xfers: An array of spi_transfers
+ * @num_xfers: Number of items in the xfer array
+ * Context: can sleep
+ *
+ * Does a synchronous SPI data transfer of the given spi_transfer array.
+ *
+ * For more specific semantics see spi_sync().
+ *
+ * Return: Return: zero on success, else a negative error code.
+ */
+static inline int
+spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
+ unsigned int num_xfers)
+{
+ struct spi_message msg;
+
+ spi_message_init_with_transfers(&msg, xfers, num_xfers);
+
+ return spi_sync(spi, &msg);
+}
+
+/**
* spi_write - SPI synchronous write
* @spi: device to which data will be written
* @buf: data buffer
@@ -998,11 +1039,8 @@ spi_write(struct spi_device *spi, const void *buf, size_t len)
.tx_buf = buf,
.len = len,
};
- struct spi_message m;
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
- return spi_sync(spi, &m);
+ return spi_sync_transfer(spi, &t, 1);
}
/**
@@ -1024,35 +1062,8 @@ spi_read(struct spi_device *spi, void *buf, size_t len)
.rx_buf = buf,
.len = len,
};
- struct spi_message m;
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
- return spi_sync(spi, &m);
-}
-/**
- * spi_sync_transfer - synchronous SPI data transfer
- * @spi: device with which data will be exchanged
- * @xfers: An array of spi_transfers
- * @num_xfers: Number of items in the xfer array
- * Context: can sleep
- *
- * Does a synchronous SPI data transfer of the given spi_transfer array.
- *
- * For more specific semantics see spi_sync().
- *
- * Return: Return: zero on success, else a negative error code.
- */
-static inline int
-spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
- unsigned int num_xfers)
-{
- struct spi_message msg;
-
- spi_message_init_with_transfers(&msg, xfers, num_xfers);
-
- return spi_sync(spi, &msg);
+ return spi_sync_transfer(spi, &t, 1);
}
/* this copies txbuf and rxbuf data; for small transfers only! */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index da2751d3b93d..00a21166e268 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -72,6 +72,8 @@ extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
struct splice_desc *, splice_actor *);
extern ssize_t splice_to_pipe(struct pipe_inode_info *,
struct splice_pipe_desc *);
+extern ssize_t add_to_pipe(struct pipe_inode_info *,
+ struct pipe_buffer *);
extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
splice_direct_actor *);
@@ -83,4 +85,5 @@ extern void splice_shrink_spd(struct splice_pipe_desc *);
extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
+extern const struct pipe_buf_operations default_pipe_buf_ops;
#endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4a529c984a3f..a56523cefb9b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -191,6 +191,11 @@ struct percpu_cluster {
unsigned int next; /* Likely next allocation offset */
};
+struct swap_cluster_list {
+ struct swap_cluster_info head;
+ struct swap_cluster_info tail;
+};
+
/*
* The in-memory structure used to track swap areas.
*/
@@ -203,8 +208,7 @@ struct swap_info_struct {
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
- struct swap_cluster_info free_cluster_head; /* free cluster list head */
- struct swap_cluster_info free_cluster_tail; /* free cluster list tail */
+ struct swap_cluster_list free_clusters; /* free clusters list */
unsigned int lowest_bit; /* index of first free in swap_map */
unsigned int highest_bit; /* index of last free in swap_map */
unsigned int pages; /* total of usable pages of swap */
@@ -235,8 +239,7 @@ struct swap_info_struct {
* first.
*/
struct work_struct discard_work; /* discard worker */
- struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */
- struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
+ struct swap_cluster_list discard_clusters; /* discard clusters list */
};
/* linux/mm/workingset.c */
@@ -257,7 +260,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
static inline void workingset_node_pages_dec(struct radix_tree_node *node)
{
- VM_BUG_ON(!workingset_node_pages(node));
+ VM_WARN_ON_ONCE(!workingset_node_pages(node));
node->count--;
}
@@ -273,7 +276,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
{
- VM_BUG_ON(!workingset_node_shadows(node));
+ VM_WARN_ON_ONCE(!workingset_node_shadows(node));
node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
}
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index a4f7203a9017..adf4e51cf597 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -25,6 +25,7 @@
#include <linux/rcupdate.h>
#include <linux/wait.h>
#include <linux/rbtree.h>
+#include <linux/uidgid.h>
#include <uapi/linux/sysctl.h>
/* For the /proc/sys support */
@@ -157,8 +158,10 @@ struct ctl_table_set {
struct ctl_table_root {
struct ctl_table_set default_set;
- struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
- struct nsproxy *namespaces);
+ struct ctl_table_set *(*lookup)(struct ctl_table_root *root);
+ void (*set_ownership)(struct ctl_table_header *head,
+ struct ctl_table *table,
+ kuid_t *uid, kgid_t *gid);
int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
};
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index dd8de82cf5b5..9fba9dd33544 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -5,6 +5,26 @@
#include <linux/blkdev.h>
/*
+ * A T10 PI-capable target device can be formatted with different
+ * protection schemes. Currently 0 through 3 are defined:
+ *
+ * Type 0 is regular (unprotected) I/O
+ *
+ * Type 1 defines the contents of the guard and reference tags
+ *
+ * Type 2 defines the contents of the guard and reference tags and
+ * uses 32-byte commands to seed the latter
+ *
+ * Type 3 defines the contents of the guard tag only
+ */
+enum t10_dif_type {
+ T10_PI_TYPE0_PROTECTION = 0x0,
+ T10_PI_TYPE1_PROTECTION = 0x1,
+ T10_PI_TYPE2_PROTECTION = 0x2,
+ T10_PI_TYPE3_PROTECTION = 0x3,
+};
+
+/*
* T10 Protection Information tuple.
*/
struct t10_pi_tuple {
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 7be9b1242354..a17ae7b85218 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,6 +19,7 @@
#include <linux/skbuff.h>
+#include <linux/win_minmax.h>
#include <net/sock.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -212,7 +213,8 @@ struct tcp_sock {
u8 reord; /* reordering detected */
} rack;
u16 advmss; /* Advertised MSS */
- u8 unused;
+ u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
+ unused:7;
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
thin_dupack : 1,/* Fast retransmit on first dupack */
@@ -234,9 +236,7 @@ struct tcp_sock {
u32 mdev_max_us; /* maximal mdev for the last rtt period */
u32 rttvar_us; /* smoothed mdev_max */
u32 rtt_seq; /* sequence number to update rttvar */
- struct rtt_meas {
- u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */
- } rtt_min[3];
+ struct minmax rtt_min;
u32 packets_out; /* Packets which are "in flight" */
u32 retrans_out; /* Retransmitted packets out */
@@ -268,6 +268,12 @@ struct tcp_sock {
* receiver in Recovery. */
u32 prr_out; /* Total number of pkts sent during Recovery. */
u32 delivered; /* Total data packets delivered incl. rexmits */
+ u32 lost; /* Total data packets lost incl. rexmits */
+ u32 app_limited; /* limited until "delivered" reaches this val */
+ struct skb_mstamp first_tx_mstamp; /* start of window send phase */
+ struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */
+ u32 rate_delivered; /* saved rate sample: packets delivered */
+ u32 rate_interval_us; /* saved rate sample: time elapsed */
u32 rcv_wnd; /* Current receiver window */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
@@ -281,10 +287,9 @@ struct tcp_sock {
struct sk_buff* lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
- /* OOO segments go in this list. Note that socket lock must be held,
- * as we do not use sk_buff_head lock.
- */
- struct sk_buff_head out_of_order_queue;
+ /* OOO segments go in this rbtree. Socket lock must be held. */
+ struct rb_root out_of_order_queue;
+ struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
/* SACKs data, these 2 need to be together (see tcp_options_write) */
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 75b4aaf31a9d..b5ebe6dca404 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -13,6 +13,7 @@
#include <uapi/linux/uio.h>
struct page;
+struct pipe_inode_info;
struct kvec {
void *iov_base; /* and that should *never* hold a userland pointer */
@@ -23,6 +24,7 @@ enum {
ITER_IOVEC = 0,
ITER_KVEC = 2,
ITER_BVEC = 4,
+ ITER_PIPE = 8,
};
struct iov_iter {
@@ -33,8 +35,12 @@ struct iov_iter {
const struct iovec *iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
+ struct pipe_inode_info *pipe;
+ };
+ union {
+ unsigned long nr_segs;
+ int idx;
};
- unsigned long nr_segs;
};
/*
@@ -64,7 +70,7 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
}
#define iov_for_each(iov, iter, start) \
- if (!((start).type & ITER_BVEC)) \
+ if (!((start).type & (ITER_BVEC | ITER_PIPE))) \
for (iter = (start); \
(iter).count && \
((iov = iov_iter_iovec(&(iter))), 1); \
@@ -94,6 +100,8 @@ void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
unsigned long nr_segs, size_t count);
void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
+void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe,
+ size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
@@ -109,7 +117,7 @@ static inline size_t iov_iter_count(struct iov_iter *i)
static inline bool iter_is_iovec(struct iov_iter *i)
{
- return !(i->type & (ITER_BVEC | ITER_KVEC));
+ return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
}
/*
diff --git a/include/linux/ulpi/driver.h b/include/linux/ulpi/driver.h
index 388f6e08b9d4..a7af21a55248 100644
--- a/include/linux/ulpi/driver.h
+++ b/include/linux/ulpi/driver.h
@@ -15,7 +15,7 @@ struct ulpi_ops;
*/
struct ulpi {
struct ulpi_device_id id;
- struct ulpi_ops *ops;
+ const struct ulpi_ops *ops;
struct device dev;
};
@@ -47,7 +47,11 @@ struct ulpi_driver {
#define to_ulpi_driver(d) container_of(d, struct ulpi_driver, driver)
-int ulpi_register_driver(struct ulpi_driver *drv);
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define ulpi_register_driver(drv) __ulpi_register_driver(drv, THIS_MODULE)
+int __ulpi_register_driver(struct ulpi_driver *drv, struct module *module);
void ulpi_unregister_driver(struct ulpi_driver *drv);
#define module_ulpi_driver(__ulpi_driver) \
diff --git a/include/linux/ulpi/interface.h b/include/linux/ulpi/interface.h
index 4de8ab491038..a2011a919eb6 100644
--- a/include/linux/ulpi/interface.h
+++ b/include/linux/ulpi/interface.h
@@ -4,20 +4,19 @@
#include <linux/types.h>
struct ulpi;
+struct device;
/**
* struct ulpi_ops - ULPI register access
- * @dev: the interface provider
* @read: read operation for ULPI register access
* @write: write operation for ULPI register access
*/
struct ulpi_ops {
- struct device *dev;
- int (*read)(struct ulpi_ops *ops, u8 addr);
- int (*write)(struct ulpi_ops *ops, u8 addr, u8 val);
+ int (*read)(struct device *dev, u8 addr);
+ int (*write)(struct device *dev, u8 addr, u8 val);
};
-struct ulpi *ulpi_register_interface(struct device *, struct ulpi_ops *);
+struct ulpi *ulpi_register_interface(struct device *, const struct ulpi_ops *);
void ulpi_unregister_interface(struct ulpi *);
#endif /* __LINUX_ULPI_INTERFACE_H */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 2b81b24eb5aa..4616a49a1c2e 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -220,7 +220,8 @@ struct usb_function {
int (*setup)(struct usb_function *,
const struct usb_ctrlrequest *);
bool (*req_match)(struct usb_function *,
- const struct usb_ctrlrequest *);
+ const struct usb_ctrlrequest *,
+ bool config0);
void (*suspend)(struct usb_function *);
void (*resume)(struct usb_function *);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 612dbdfa388e..8e81f9eb95e4 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -346,6 +346,8 @@ struct usb_gadget_ops {
* or B-Peripheral wants to take host role.
* @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
* MaxPacketSize.
+ * @quirk_avoids_skb_reserve: udc/platform wants to avoid skb_reserve() in
+ * u_ether.c to improve performance.
* @is_selfpowered: if the gadget is self-powered.
* @deactivated: True if gadget is deactivated - in deactivated state it cannot
* be connected.
@@ -398,6 +400,7 @@ struct usb_gadget {
unsigned quirk_altset_not_supp:1;
unsigned quirk_stall_not_supp:1;
unsigned quirk_zlp_not_supp:1;
+ unsigned quirk_avoids_skb_reserve:1;
unsigned is_selfpowered:1;
unsigned deactivated:1;
unsigned connected:1;
@@ -418,8 +421,20 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
/**
+ * usb_ep_align - returns @len aligned to ep's maxpacketsize.
+ * @ep: the endpoint whose maxpacketsize is used to align @len
+ * @len: buffer size's length to align to @ep's maxpacketsize
+ *
+ * This helper is used to align buffer's size to an ep's maxpacketsize.
+ */
+static inline size_t usb_ep_align(struct usb_ep *ep, size_t len)
+{
+ return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize));
+}
+
+/**
* usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
- * requires quirk_ep_out_aligned_size, otherwise reguens len.
+ * requires quirk_ep_out_aligned_size, otherwise returns len.
* @g: controller to check for quirk
* @ep: the endpoint whose maxpacketsize is used to align @len
* @len: buffer size's length to align to @ep's maxpacketsize
@@ -430,8 +445,7 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
static inline size_t
usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len)
{
- return !g->quirk_ep_out_aligned_size ? len :
- round_up(len, (size_t)ep->desc->wMaxPacketSize);
+ return g->quirk_ep_out_aligned_size ? usb_ep_align(ep, len) : len;
}
/**
@@ -463,6 +477,16 @@ static inline int gadget_is_zlp_supported(struct usb_gadget *g)
}
/**
+ * gadget_avoids_skb_reserve - return true iff the hardware would like to avoid
+ * skb_reserve to improve performance.
+ * @g: controller to check for quirk
+ */
+static inline int gadget_avoids_skb_reserve(struct usb_gadget *g)
+{
+ return g->quirk_avoids_skb_reserve;
+}
+
+/**
* gadget_is_dualspeed - return true iff the hardware handles high speed
* @g: controller that might support both high and full speeds
*/
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 245f57dbbb61..0aae1b2ee931 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -81,6 +81,8 @@
/* Sets max_sectors to 240 */ \
US_FLAG(NO_REPORT_LUNS, 0x10000000) \
/* Cannot handle REPORT_LUNS */ \
+ US_FLAG(ALWAYS_SYNC, 0x20000000) \
+ /* lies about caching, so always sync */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 9217169c64cb..eb209d4523f5 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -22,6 +22,19 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */
#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
+struct ucounts;
+
+enum ucount_type {
+ UCOUNT_USER_NAMESPACES,
+ UCOUNT_PID_NAMESPACES,
+ UCOUNT_UTS_NAMESPACES,
+ UCOUNT_IPC_NAMESPACES,
+ UCOUNT_NET_NAMESPACES,
+ UCOUNT_MNT_NAMESPACES,
+ UCOUNT_CGROUP_NAMESPACES,
+ UCOUNT_COUNTS,
+};
+
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
@@ -39,10 +52,30 @@ struct user_namespace {
struct key *persistent_keyring_register;
struct rw_semaphore persistent_keyring_register_sem;
#endif
+ struct work_struct work;
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_set set;
+ struct ctl_table_header *sysctls;
+#endif
+ struct ucounts *ucounts;
+ int ucount_max[UCOUNT_COUNTS];
+};
+
+struct ucounts {
+ struct hlist_node node;
+ struct user_namespace *ns;
+ kuid_t uid;
+ atomic_t count;
+ atomic_t ucount[UCOUNT_COUNTS];
};
extern struct user_namespace init_user_ns;
+bool setup_userns_sysctls(struct user_namespace *ns);
+void retire_userns_sysctls(struct user_namespace *ns);
+struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
+void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
+
#ifdef CONFIG_USER_NS
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -54,12 +87,12 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
extern int create_user_ns(struct cred *new);
extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
-extern void free_user_ns(struct user_namespace *ns);
+extern void __put_user_ns(struct user_namespace *ns);
static inline void put_user_ns(struct user_namespace *ns)
{
if (ns && atomic_dec_and_test(&ns->count))
- free_user_ns(ns);
+ __put_user_ns(ns);
}
struct seq_operations;
@@ -73,6 +106,8 @@ extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t,
extern int proc_setgroups_show(struct seq_file *m, void *v);
extern bool userns_may_setgroups(const struct user_namespace *ns);
extern bool current_in_userns(const struct user_namespace *target_ns);
+
+struct ns_common *ns_get_owner(struct ns_common *ns);
#else
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -106,6 +141,11 @@ static inline bool current_in_userns(const struct user_namespace *target_ns)
{
return true;
}
+
+static inline struct ns_common *ns_get_owner(struct ns_common *ns)
+{
+ return ERR_PTR(-EPERM);
+}
#endif
#endif /* _LINUX_USER_H */
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 5093f58ae192..60f0bb83b313 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -24,6 +24,7 @@ struct uts_namespace {
struct kref kref;
struct new_utsname name;
struct user_namespace *user_ns;
+ struct ucounts *ucounts;
struct ns_common ns;
};
extern struct uts_namespace init_uts_ns;
diff --git a/include/linux/vme.h b/include/linux/vme.h
index 71e4a6dec5ac..ea6095deba20 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -166,7 +166,7 @@ struct vme_resource *vme_lm_request(struct vme_dev *);
int vme_lm_count(struct vme_resource *);
int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
-int vme_lm_attach(struct vme_resource *, int, void (*callback)(int));
+int vme_lm_attach(struct vme_resource *, int, void (*callback)(void *), void *);
int vme_lm_detach(struct vme_resource *, int);
void vme_lm_free(struct vme_resource *);
diff --git a/include/linux/win_minmax.h b/include/linux/win_minmax.h
new file mode 100644
index 000000000000..56569604278f
--- /dev/null
+++ b/include/linux/win_minmax.h
@@ -0,0 +1,37 @@
+/**
+ * lib/minmax.c: windowed min/max tracker by Kathleen Nichols.
+ *
+ */
+#ifndef MINMAX_H
+#define MINMAX_H
+
+#include <linux/types.h>
+
+/* A single data point for our parameterized min-max tracker */
+struct minmax_sample {
+ u32 t; /* time measurement was taken */
+ u32 v; /* value measured */
+};
+
+/* State for the parameterized min-max tracker */
+struct minmax {
+ struct minmax_sample s[3];
+};
+
+static inline u32 minmax_get(const struct minmax *m)
+{
+ return m->s[0].v;
+}
+
+static inline u32 minmax_reset(struct minmax *m, u32 t, u32 meas)
+{
+ struct minmax_sample val = { .t = t, .v = meas };
+
+ m->s[2] = m->s[1] = m->s[0] = val;
+ return m->s[0].v;
+}
+
+u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas);
+u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas);
+
+#endif
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 26cc1df280d6..fc6e22186405 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -442,6 +442,7 @@ extern int schedule_on_each_cpu(work_func_t func);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern bool flush_work(struct work_struct *work);
+extern bool cancel_work(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);
extern bool flush_delayed_work(struct delayed_work *dwork);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fc1e16c25a29..797100e10010 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -319,7 +319,6 @@ void laptop_mode_timer_fn(unsigned long data);
#else
static inline void laptop_sync_completion(void) { }
#endif
-void throttle_vm_writeout(gfp_t gfp_mask);
bool node_dirty_ok(struct pglist_data *pgdat);
int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
#ifdef CONFIG_CGROUP_WRITEBACK
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 9826d3a9464c..f2d072787947 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -1,8 +1,9 @@
#ifndef _ADDRCONF_H
#define _ADDRCONF_H
-#define MAX_RTR_SOLICITATIONS 3
+#define MAX_RTR_SOLICITATIONS -1 /* unlimited */
#define RTR_SOLICITATION_INTERVAL (4*HZ)
+#define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */
#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 7b0f88699b25..1061a472a3e3 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -12,42 +12,39 @@
#ifndef _NET_RXRPC_H
#define _NET_RXRPC_H
-#include <linux/skbuff.h>
#include <linux/rxrpc.h>
+struct key;
+struct sock;
+struct socket;
struct rxrpc_call;
-/*
- * the mark applied to socket buffers that may be intercepted
- */
-enum rxrpc_skb_mark {
- RXRPC_SKB_MARK_DATA, /* data message */
- RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
- RXRPC_SKB_MARK_BUSY, /* server busy message */
- RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
- RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
- RXRPC_SKB_MARK_NET_ERROR, /* network error message */
- RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
- RXRPC_SKB_MARK_NEW_CALL, /* local error message */
-};
+typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
+ unsigned long);
+typedef void (*rxrpc_notify_new_call_t)(struct sock *, struct rxrpc_call *,
+ unsigned long);
+typedef void (*rxrpc_discard_new_call_t)(struct rxrpc_call *, unsigned long);
+typedef void (*rxrpc_user_attach_call_t)(struct rxrpc_call *, unsigned long);
-typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long,
- struct sk_buff *);
-void rxrpc_kernel_intercept_rx_messages(struct socket *, rxrpc_interceptor_t);
+void rxrpc_kernel_new_call_notification(struct socket *,
+ rxrpc_notify_new_call_t,
+ rxrpc_discard_new_call_t);
struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
struct sockaddr_rxrpc *,
struct key *,
unsigned long,
- gfp_t);
-int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
-void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
-void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
-void rxrpc_kernel_end_call(struct rxrpc_call *);
-bool rxrpc_kernel_is_data_last(struct sk_buff *);
-u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
-int rxrpc_kernel_get_error_number(struct sk_buff *);
-void rxrpc_kernel_free_skb(struct sk_buff *);
-struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
-int rxrpc_kernel_reject_call(struct socket *);
+ gfp_t,
+ rxrpc_notify_rx_t);
+int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
+ struct msghdr *, size_t);
+int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
+ void *, size_t, size_t *, bool, u32 *);
+void rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
+ u32, int, const char *);
+void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
+void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
+ struct sockaddr_rxrpc *);
+int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
+ rxrpc_user_attach_call_t, unsigned long, gfp_t);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index bfd1590821d6..0a1e21d7bce1 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -29,7 +29,8 @@
#include <net/sock.h>
#include <linux/seq_file.h>
-#define BT_SUBSYS_VERSION "2.21"
+#define BT_SUBSYS_VERSION 2
+#define BT_SUBSYS_REVISION 22
#ifndef AF_BLUETOOTH
#define AF_BLUETOOTH 31
@@ -371,6 +372,7 @@ void hci_sock_set_flag(struct sock *sk, int nr);
void hci_sock_clear_flag(struct sock *sk, int nr);
int hci_sock_test_flag(struct sock *sk, int nr);
unsigned short hci_sock_get_channel(struct sock *sk);
+u32 hci_sock_get_cookie(struct sock *sk);
int hci_sock_init(void);
void hci_sock_cleanup(void);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 003b25283407..99aa5e5e3100 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -63,6 +63,7 @@
#define HCI_SDIO 6
#define HCI_SPI 7
#define HCI_I2C 8
+#define HCI_SMD 9
/* HCI controller types */
#define HCI_PRIMARY 0x00
@@ -207,7 +208,11 @@ enum {
HCI_MGMT_INDEX_EVENTS,
HCI_MGMT_UNCONF_INDEX_EVENTS,
HCI_MGMT_EXT_INDEX_EVENTS,
- HCI_MGMT_GENERIC_EVENTS,
+ HCI_MGMT_EXT_INFO_EVENTS,
+ HCI_MGMT_OPTION_EVENTS,
+ HCI_MGMT_SETTING_EVENTS,
+ HCI_MGMT_DEV_CLASS_EVENTS,
+ HCI_MGMT_LOCAL_NAME_EVENTS,
HCI_MGMT_OOB_DATA_EVENTS,
};
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ee7fc47680a1..f00bf667ec33 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -211,6 +211,7 @@ struct hci_dev {
__u8 dev_name[HCI_MAX_NAME_LENGTH];
__u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
__u8 eir[HCI_MAX_EIR_LENGTH];
+ __u16 appearance;
__u8 dev_class[3];
__u8 major_class;
__u8 minor_class;
@@ -399,7 +400,9 @@ struct hci_dev {
struct delayed_work rpa_expired;
bdaddr_t rpa;
+#if IS_ENABLED(CONFIG_BT_LEDS)
struct led_trigger *power_led;
+#endif
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
@@ -1026,8 +1029,8 @@ int hci_resume_dev(struct hci_dev *hdev);
int hci_reset_dev(struct hci_dev *hdev);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
-void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
-void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
int hci_dev_open(__u16 dev);
int hci_dev_close(__u16 dev);
int hci_dev_do_close(struct hci_dev *hdev);
@@ -1404,6 +1407,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
int flag, struct sock *skip_sk);
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
+ void *data, u16 data_len, ktime_t tstamp,
+ int flag, struct sock *skip_sk);
void hci_sock_dev_event(struct hci_dev *hdev, int event);
@@ -1449,6 +1455,7 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
#define DISCOV_BREDR_INQUIRY_LEN 0x08
#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
+void mgmt_fill_version_info(void *ver);
int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev);
void mgmt_index_removed(struct hci_dev *hdev);
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 587d0131b349..240786b04a46 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -45,6 +45,10 @@ struct hci_mon_hdr {
#define HCI_MON_VENDOR_DIAG 11
#define HCI_MON_SYSTEM_NOTE 12
#define HCI_MON_USER_LOGGING 13
+#define HCI_MON_CTRL_OPEN 14
+#define HCI_MON_CTRL_CLOSE 15
+#define HCI_MON_CTRL_COMMAND 16
+#define HCI_MON_CTRL_EVENT 17
struct hci_mon_new_index {
__u8 type;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 7647964b1efa..72a456bbbcd5 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -586,6 +586,24 @@ struct mgmt_rp_get_adv_size_info {
#define MGMT_OP_START_LIMITED_DISCOVERY 0x0041
+#define MGMT_OP_READ_EXT_INFO 0x0042
+#define MGMT_READ_EXT_INFO_SIZE 0
+struct mgmt_rp_read_ext_info {
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_OP_SET_APPEARANCE 0x0043
+struct mgmt_cp_set_appearance {
+ __u16 appearance;
+} __packed;
+#define MGMT_SET_APPEARANCE_SIZE 2
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -800,3 +818,9 @@ struct mgmt_ev_advertising_added {
struct mgmt_ev_advertising_removed {
__u8 instance;
} __packed;
+
+#define MGMT_EV_EXT_INFO_CHANGED 0x0025
+struct mgmt_ev_ext_info_changed {
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index beb7610d64e9..fe78f02a242e 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5,7 +5,7 @@
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2015 Intel Deutschland GmbH
+ * Copyright 2015-2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -593,6 +593,8 @@ struct survey_info {
s8 noise;
};
+#define CFG80211_MAX_WEP_KEYS 4
+
/**
* struct cfg80211_crypto_settings - Crypto settings
* @wpa_versions: indicates which, if any, WPA versions are enabled
@@ -610,6 +612,9 @@ struct survey_info {
* allowed through even on unauthorized ports
* @control_port_no_encrypt: TRUE to prevent encryption of control port
* protocol frames.
+ * @wep_keys: static WEP keys, if not NULL points to an array of
+ * CFG80211_MAX_WEP_KEYS WEP keys
+ * @wep_tx_key: key index (0..3) of the default TX static WEP key
*/
struct cfg80211_crypto_settings {
u32 wpa_versions;
@@ -621,6 +626,8 @@ struct cfg80211_crypto_settings {
bool control_port;
__be16 control_port_ethertype;
bool control_port_no_encrypt;
+ struct key_params *wep_keys;
+ int wep_tx_key;
};
/**
@@ -676,6 +683,18 @@ struct cfg80211_acl_data {
struct mac_address mac_addrs[];
};
+/*
+ * cfg80211_bitrate_mask - masks for bitrate control
+ */
+struct cfg80211_bitrate_mask {
+ struct {
+ u32 legacy;
+ u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
+ u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ enum nl80211_txrate_gi gi;
+ } control[NUM_NL80211_BANDS];
+};
+
/**
* struct cfg80211_ap_settings - AP configuration
*
@@ -700,6 +719,7 @@ struct cfg80211_acl_data {
* MAC address based access control
* @pbss: If set, start as a PCP instead of AP. Relevant for DMG
* networks.
+ * @beacon_rate: bitrate to be used for beacons
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -719,6 +739,7 @@ struct cfg80211_ap_settings {
bool p2p_opp_ps;
const struct cfg80211_acl_data *acl;
bool pbss;
+ struct cfg80211_bitrate_mask beacon_rate;
};
/**
@@ -1351,6 +1372,7 @@ struct mesh_config {
* @beacon_interval: beacon interval to use
* @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
* @basic_rates: basic rates to use when creating the mesh
+ * @beacon_rate: bitrate to be used for beacons
*
* These parameters are fixed when the mesh is created.
*/
@@ -1371,6 +1393,7 @@ struct mesh_setup {
u16 beacon_interval;
int mcast_rate[NUM_NL80211_BANDS];
u32 basic_rates;
+ struct cfg80211_bitrate_mask beacon_rate;
};
/**
@@ -2010,17 +2033,6 @@ enum wiphy_params_flags {
WIPHY_PARAM_DYN_ACK = 1 << 5,
};
-/*
- * cfg80211_bitrate_mask - masks for bitrate control
- */
-struct cfg80211_bitrate_mask {
- struct {
- u32 legacy;
- u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
- u16 vht_mcs[NL80211_VHT_NSS_MAX];
- enum nl80211_txrate_gi gi;
- } control[NUM_NL80211_BANDS];
-};
/**
* struct cfg80211_pmksa - PMK Security Association
*
@@ -2302,6 +2314,98 @@ struct cfg80211_qos_map {
};
/**
+ * struct cfg80211_nan_conf - NAN configuration
+ *
+ * This struct defines NAN configuration parameters
+ *
+ * @master_pref: master preference (1 - 255)
+ * @dual: dual band operation mode, see &enum nl80211_nan_dual_band_conf
+ */
+struct cfg80211_nan_conf {
+ u8 master_pref;
+ u8 dual;
+};
+
+/**
+ * enum cfg80211_nan_conf_changes - indicates changed fields in NAN
+ * configuration
+ *
+ * @CFG80211_NAN_CONF_CHANGED_PREF: master preference
+ * @CFG80211_NAN_CONF_CHANGED_DUAL: dual band operation
+ */
+enum cfg80211_nan_conf_changes {
+ CFG80211_NAN_CONF_CHANGED_PREF = BIT(0),
+ CFG80211_NAN_CONF_CHANGED_DUAL = BIT(1),
+};
+
+/**
+ * struct cfg80211_nan_func_filter - a NAN function Rx / Tx filter
+ *
+ * @filter: the content of the filter
+ * @len: the length of the filter
+ */
+struct cfg80211_nan_func_filter {
+ const u8 *filter;
+ u8 len;
+};
+
+/**
+ * struct cfg80211_nan_func - a NAN function
+ *
+ * @type: &enum nl80211_nan_function_type
+ * @service_id: the service ID of the function
+ * @publish_type: &nl80211_nan_publish_type
+ * @close_range: if true, the range should be limited. Threshold is
+ * implementation specific.
+ * @publish_bcast: if true, the solicited publish should be broadcasted
+ * @subscribe_active: if true, the subscribe is active
+ * @followup_id: the instance ID for follow up
+ * @followup_reqid: the requestor instance ID for follow up
+ * @followup_dest: MAC address of the recipient of the follow up
+ * @ttl: time to live counter in DW.
+ * @serv_spec_info: Service Specific Info
+ * @serv_spec_info_len: Service Specific Info length
+ * @srf_include: if true, SRF is inclusive
+ * @srf_bf: Bloom Filter
+ * @srf_bf_len: Bloom Filter length
+ * @srf_bf_idx: Bloom Filter index
+ * @srf_macs: SRF MAC addresses
+ * @srf_num_macs: number of MAC addresses in SRF
+ * @rx_filters: rx filters that are matched with corresponding peer's tx_filter
+ * @tx_filters: filters that should be transmitted in the SDF.
+ * @num_rx_filters: length of &rx_filters.
+ * @num_tx_filters: length of &tx_filters.
+ * @instance_id: driver allocated id of the function.
+ * @cookie: unique NAN function identifier.
+ */
+struct cfg80211_nan_func {
+ enum nl80211_nan_function_type type;
+ u8 service_id[NL80211_NAN_FUNC_SERVICE_ID_LEN];
+ u8 publish_type;
+ bool close_range;
+ bool publish_bcast;
+ bool subscribe_active;
+ u8 followup_id;
+ u8 followup_reqid;
+ struct mac_address followup_dest;
+ u32 ttl;
+ const u8 *serv_spec_info;
+ u8 serv_spec_info_len;
+ bool srf_include;
+ const u8 *srf_bf;
+ u8 srf_bf_len;
+ u8 srf_bf_idx;
+ struct mac_address *srf_macs;
+ int srf_num_macs;
+ struct cfg80211_nan_func_filter *rx_filters;
+ struct cfg80211_nan_func_filter *tx_filters;
+ u8 num_tx_filters;
+ u8 num_rx_filters;
+ u8 instance_id;
+ u64 cookie;
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -2432,7 +2536,8 @@ struct cfg80211_qos_map {
* cases, the result of roaming is indicated with a call to
* cfg80211_roamed() or cfg80211_roamed_bss().
* (invoked with the wireless_dev mutex held)
- * @disconnect: Disconnect from the BSS/ESS.
+ * @disconnect: Disconnect from the BSS/ESS. Once done, call
+ * cfg80211_disconnected().
* (invoked with the wireless_dev mutex held)
*
* @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
@@ -2588,6 +2693,19 @@ struct cfg80211_qos_map {
* and returning to the base channel for communication with the AP.
* @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both
* peers must be on the base channel when the call completes.
+ * @start_nan: Start the NAN interface.
+ * @stop_nan: Stop the NAN interface.
+ * @add_nan_func: Add a NAN function. Returns negative value on failure.
+ * On success @nan_func ownership is transferred to the driver and
+ * it may access it outside of the scope of this function. The driver
+ * should free the @nan_func when no longer needed by calling
+ * cfg80211_free_nan_func().
+ * On success the driver should assign an instance_id in the
+ * provided @nan_func.
+ * @del_nan_func: Delete a NAN function.
+ * @nan_change_conf: changes NAN configuration. The changed parameters must
+ * be specified in @changes (using &enum cfg80211_nan_conf_changes);
+ * All other parameters must be ignored.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2853,6 +2971,17 @@ struct cfg80211_ops {
void (*tdls_cancel_channel_switch)(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr);
+ int (*start_nan)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf);
+ void (*stop_nan)(struct wiphy *wiphy, struct wireless_dev *wdev);
+ int (*add_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_nan_func *nan_func);
+ void (*del_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ u64 cookie);
+ int (*nan_change_conf)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf,
+ u32 changes);
};
/*
@@ -2899,6 +3028,8 @@ struct cfg80211_ops {
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
* @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
* beaconing mode (AP, IBSS, Mesh, ...).
+ * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation
+ * before connection.
*/
enum wiphy_flags {
/* use hole at 0 */
@@ -2924,6 +3055,7 @@ enum wiphy_flags {
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
+ WIPHY_FLAG_HAS_STATIC_WEP = BIT(24),
};
/**
@@ -3301,6 +3433,8 @@ struct wiphy_iftype_ext_capab {
* @bss_select_support: bitmask indicating the BSS selection criteria supported
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
+ *
+ * @cookie_counter: unique generic cookie counter, used to identify objects.
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -3430,6 +3564,8 @@ struct wiphy {
u32 bss_select_support;
+ u64 cookie_counter;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -3610,6 +3746,7 @@ struct cfg80211_cached_keys;
* beacons, 0 when not valid
* @address: The address for this device, valid only if @netdev is %NULL
* @p2p_started: true if this is a P2P Device that has been started
+ * @nan_started: true if this is a NAN interface that has been started
* @cac_started: true if DFS channel availability check has been started
* @cac_start_time: timestamp (jiffies) when the dfs state was entered.
* @cac_time_ms: CAC time in ms
@@ -3641,7 +3778,7 @@ struct wireless_dev {
struct mutex mtx;
- bool use_4addr, p2p_started;
+ bool use_4addr, p2p_started, nan_started;
u8 address[ETH_ALEN] __aligned(sizeof(u16));
@@ -3955,6 +4092,34 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
struct cfg80211_qos_map *qos_map);
/**
+ * cfg80211_find_ie_match - match information element and byte array in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ * @match: byte array to match
+ * @match_len: number of bytes in the match array
+ * @match_offset: offset in the IE where the byte array should match.
+ * If match_len is zero, this must also be set to zero.
+ * Otherwise this must be set to 2 or more, because the first
+ * byte is the element id, which is already compared to eid, and
+ * the second byte is the IE length.
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match, or a pointer to the first
+ * byte of the requested element, that is the byte containing the
+ * element ID.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data and being large enough for the
+ * byte array to match.
+ */
+const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
+ const u8 *match, int match_len,
+ int match_offset);
+
+/**
* cfg80211_find_ie - find information element in data
*
* @eid: element ID
@@ -3969,7 +4134,10 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
* Note: There are no checks on the element length other than
* having to fit into the given data.
*/
-const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
+static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
+{
+ return cfg80211_find_ie_match(eid, ies, len, NULL, 0, 0);
+}
/**
* cfg80211_find_vendor_ie - find vendor specific information element in data
@@ -5518,6 +5686,67 @@ wiphy_ext_feature_isset(struct wiphy *wiphy,
return (ft_byte & BIT(ftidx % 8)) != 0;
}
+/**
+ * cfg80211_free_nan_func - free NAN function
+ * @f: NAN function that should be freed
+ *
+ * Frees all the NAN function and all it's allocated members.
+ */
+void cfg80211_free_nan_func(struct cfg80211_nan_func *f);
+
+/**
+ * struct cfg80211_nan_match_params - NAN match parameters
+ * @type: the type of the function that triggered a match. If it is
+ * %NL80211_NAN_FUNC_SUBSCRIBE it means that we replied to a subscriber.
+ * If it is %NL80211_NAN_FUNC_PUBLISH, it means that we got a discovery
+ * result.
+ * If it is %NL80211_NAN_FUNC_FOLLOW_UP, we received a follow up.
+ * @inst_id: the local instance id
+ * @peer_inst_id: the instance id of the peer's function
+ * @addr: the MAC address of the peer
+ * @info_len: the length of the &info
+ * @info: the Service Specific Info from the peer (if any)
+ * @cookie: unique identifier of the corresponding function
+ */
+struct cfg80211_nan_match_params {
+ enum nl80211_nan_function_type type;
+ u8 inst_id;
+ u8 peer_inst_id;
+ const u8 *addr;
+ u8 info_len;
+ const u8 *info;
+ u64 cookie;
+};
+
+/**
+ * cfg80211_nan_match - report a match for a NAN function.
+ * @wdev: the wireless device reporting the match
+ * @match: match notification parameters
+ * @gfp: allocation flags
+ *
+ * This function reports that the a NAN function had a match. This
+ * can be a subscribe that had a match or a solicited publish that
+ * was sent. It can also be a follow up that was received.
+ */
+void cfg80211_nan_match(struct wireless_dev *wdev,
+ struct cfg80211_nan_match_params *match, gfp_t gfp);
+
+/**
+ * cfg80211_nan_func_terminated - notify about NAN function termination.
+ *
+ * @wdev: the wireless device reporting the match
+ * @inst_id: the local instance id
+ * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*)
+ * @cookie: unique NAN function identifier
+ * @gfp: allocation flags
+ *
+ * This function reports that the a NAN function is terminated.
+ */
+void cfg80211_nan_func_terminated(struct wireless_dev *wdev,
+ u8 inst_id,
+ enum nl80211_nan_func_term_reason reason,
+ u64 cookie, gfp_t gfp);
+
/* ethtool helper */
void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
diff --git a/include/net/devlink.h b/include/net/devlink.h
index c99ffe8cef3c..211bd3c37028 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -50,7 +50,6 @@ struct devlink_sb_pool_info {
};
struct devlink_ops {
- size_t priv_size;
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
int (*port_split)(struct devlink *devlink, unsigned int port_index,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 2217a3f817f8..b122196d5a1f 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -26,6 +26,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_TRAILER,
DSA_TAG_PROTO_EDSA,
DSA_TAG_PROTO_BRCM,
+ DSA_TAG_PROTO_QCA,
DSA_TAG_LAST, /* MUST BE LAST */
};
@@ -142,6 +143,7 @@ struct dsa_port {
struct net_device *netdev;
struct device_node *dn;
unsigned int ageing_time;
+ u8 stp_state;
};
struct dsa_switch {
@@ -165,9 +167,9 @@ struct dsa_switch {
struct dsa_chip_data *cd;
/*
- * The used switch driver.
+ * The switch operations.
*/
- struct dsa_switch_driver *drv;
+ struct dsa_switch_ops *ops;
/*
* An array of which element [a] indicates which port on this
@@ -234,19 +236,21 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
struct switchdev_trans;
struct switchdev_obj;
struct switchdev_obj_port_fdb;
+struct switchdev_obj_port_mdb;
struct switchdev_obj_port_vlan;
-struct dsa_switch_driver {
+struct dsa_switch_ops {
struct list_head list;
- enum dsa_tag_protocol tag_protocol;
-
/*
* Probing and setup.
*/
const char *(*probe)(struct device *dsa_dev,
struct device *host_dev, int sw_addr,
void **priv);
+
+ enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds);
+
int (*setup)(struct dsa_switch *ds);
int (*set_addr)(struct dsa_switch *ds, u8 *addr);
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
@@ -336,6 +340,7 @@ struct dsa_switch_driver {
void (*port_bridge_leave)(struct dsa_switch *ds, int port);
void (*port_stp_state_set)(struct dsa_switch *ds, int port,
u8 state);
+ void (*port_fast_age)(struct dsa_switch *ds, int port);
/*
* VLAN support
@@ -368,17 +373,27 @@ struct dsa_switch_driver {
int (*port_fdb_dump)(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
int (*cb)(struct switchdev_obj *obj));
+
+ /*
+ * Multicast database
+ */
+ int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans);
+ void (*port_mdb_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans);
+ int (*port_mdb_del)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+ int (*port_mdb_dump)(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_mdb *mdb,
+ int (*cb)(struct switchdev_obj *obj));
};
-void register_switch_driver(struct dsa_switch_driver *type);
-void unregister_switch_driver(struct dsa_switch_driver *type);
+void register_switch_driver(struct dsa_switch_ops *type);
+void unregister_switch_driver(struct dsa_switch_ops *type);
struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
-static inline void *ds_to_priv(struct dsa_switch *ds)
-{
- return ds->priv;
-}
-
static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
{
return dst->rcv != NULL;
@@ -386,4 +401,18 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds, struct device_node *np);
+#ifdef CONFIG_PM_SLEEP
+int dsa_switch_suspend(struct dsa_switch *ds);
+int dsa_switch_resume(struct dsa_switch *ds);
+#else
+static inline int dsa_switch_suspend(struct dsa_switch *ds)
+{
+ return 0;
+}
+static inline int dsa_switch_resume(struct dsa_switch *ds)
+{
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
#endif
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 5db9f5910428..6965c8f68ade 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -112,12 +112,13 @@ static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb
return &dst->u.tun_info;
}
-static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
- __be16 flags,
- __be64 tunnel_id,
- int md_size)
+static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
+ __be32 daddr,
+ __u8 tos, __u8 ttl,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
{
- const struct iphdr *iph = ip_hdr(skb);
struct metadata_dst *tun_dst;
tun_dst = tun_rx_dst(md_size);
@@ -125,17 +126,30 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
return NULL;
ip_tunnel_key_init(&tun_dst->u.tun_info.key,
- iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ saddr, daddr, tos, ttl,
0, 0, 0, tunnel_id, flags);
return tun_dst;
}
-static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
__be16 flags,
__be64 tunnel_id,
int md_size)
{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ const struct iphdr *iph = ip_hdr(skb);
+
+ return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ flags, tunnel_id, md_size);
+}
+
+static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u8 tos, __u8 ttl,
+ __be32 label,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
struct metadata_dst *tun_dst;
struct ip_tunnel_info *info;
@@ -150,14 +164,26 @@ static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
info->key.tp_src = 0;
info->key.tp_dst = 0;
- info->key.u.ipv6.src = ip6h->saddr;
- info->key.u.ipv6.dst = ip6h->daddr;
+ info->key.u.ipv6.src = *saddr;
+ info->key.u.ipv6.dst = *daddr;
- info->key.tos = ipv6_get_dsfield(ip6h);
- info->key.ttl = ip6h->hop_limit;
- info->key.label = ip6_flowlabel(ip6h);
+ info->key.tos = tos;
+ info->key.ttl = ttl;
+ info->key.label = label;
return tun_dst;
}
+static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
+ ipv6_get_dsfield(ip6h), ip6h->hop_limit,
+ ip6_flowlabel(ip6h), flags, tunnel_id,
+ md_size);
+}
#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/flow.h b/include/net/flow.h
index d47ef4bb5423..035aa7716967 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -34,8 +34,7 @@ struct flowi_common {
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
-#define FLOWI_FLAG_L3MDEV_SRC 0x04
-#define FLOWI_FLAG_SKIP_NH_OIF 0x08
+#define FLOWI_FLAG_SKIP_NH_OIF 0x04
__u32 flowic_secid;
struct flowi_tunnel flowic_tun_key;
};
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index d3d60dccd19f..d9534927d93b 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -32,8 +32,13 @@ struct flow_dissector_key_basic {
};
struct flow_dissector_key_tags {
- u32 vlan_id:12,
- flow_label:20;
+ u32 flow_label;
+};
+
+struct flow_dissector_key_vlan {
+ u16 vlan_id:12,
+ vlan_priority:3;
+ u16 padding;
};
struct flow_dissector_key_keyid {
@@ -119,7 +124,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
- FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */
FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
@@ -148,6 +153,7 @@ struct flow_keys {
#define FLOW_KEYS_HASH_START_FIELD basic
struct flow_dissector_key_basic basic;
struct flow_dissector_key_tags tags;
+ struct flow_dissector_key_vlan vlan;
struct flow_dissector_key_keyid keyid;
struct flow_dissector_key_ports ports;
struct flow_dissector_key_addrs addrs;
@@ -177,7 +183,7 @@ struct flow_keys_digest {
void make_flow_keys_digest(struct flow_keys_digest *digest,
const struct flow_keys *flow);
-static inline bool flow_keys_have_l4(struct flow_keys *keys)
+static inline bool flow_keys_have_l4(const struct flow_keys *keys)
{
return (keys->ports.ports || keys->tags.flow_label);
}
diff --git a/include/net/fq.h b/include/net/fq.h
index 268b49049c37..6d8521a30c5c 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -72,9 +72,12 @@ struct fq {
u32 flows_cnt;
u32 perturbation;
u32 limit;
+ u32 memory_limit;
+ u32 memory_usage;
u32 quantum;
u32 backlog;
u32 overlimit;
+ u32 overmemory;
u32 collisions;
};
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 163f3ed0f05a..4e6131cd3f43 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -29,6 +29,7 @@ static struct sk_buff *fq_flow_dequeue(struct fq *fq,
tin->backlog_packets--;
flow->backlog -= skb->len;
fq->backlog--;
+ fq->memory_usage -= skb->truesize;
if (flow->backlog == 0) {
list_del_init(&flow->backlogchain);
@@ -154,6 +155,7 @@ static void fq_tin_enqueue(struct fq *fq,
flow->backlog += skb->len;
tin->backlog_bytes += skb->len;
tin->backlog_packets++;
+ fq->memory_usage += skb->truesize;
fq->backlog++;
fq_recalc_backlog(fq, tin, flow);
@@ -166,7 +168,7 @@ static void fq_tin_enqueue(struct fq *fq,
__skb_queue_tail(&flow->queue, skb);
- if (fq->backlog > fq->limit) {
+ if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) {
flow = list_first_entry_or_null(&fq->backlogs,
struct fq_flow,
backlogchain);
@@ -181,6 +183,8 @@ static void fq_tin_enqueue(struct fq *fq,
flow->tin->overlimit++;
fq->overlimit++;
+ if (fq->memory_usage > fq->memory_limit)
+ fq->overmemory++;
}
}
@@ -251,6 +255,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
fq->perturbation = prandom_u32();
fq->quantum = 300;
fq->limit = 8192;
+ fq->memory_limit = 16 << 20; /* 16 MBytes */
fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
if (!fq->flows)
diff --git a/include/net/gre.h b/include/net/gre.h
index 73ea256eb7d7..d25d836c129b 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -7,7 +7,15 @@
struct gre_base_hdr {
__be16 flags;
__be16 protocol;
-};
+} __packed;
+
+struct gre_full_hdr {
+ struct gre_base_hdr fixed_header;
+ __be16 csum;
+ __be16 reserved1;
+ __be32 key;
+ __be32 seq;
+} __packed;
#define GRE_HEADER_SECTION 4
#define GREPROTO_CISCO 0
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index b0fd9476c538..ba07b9d8ed63 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -190,6 +190,10 @@ struct ieee80211_radiotap_header {
* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16
*
* Contains VHT information about this frame.
+ *
+ * IEEE80211_RADIOTAP_TIMESTAMP u64, u16, u8, u8 variable
+ *
+ * Contains timestamp information for this frame.
*/
enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_TSFT = 0,
@@ -214,6 +218,7 @@ enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_MCS = 19,
IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
IEEE80211_RADIOTAP_VHT = 21,
+ IEEE80211_RADIOTAP_TIMESTAMP = 22,
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -321,6 +326,22 @@ enum ieee80211_radiotap_type {
#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04
#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08
+/* For IEEE80211_RADIOTAP_TIMESTAMP */
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK 0x000F
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS 0x0000
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US 0x0001
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS 0x0003
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK 0x00F0
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU 0x0000
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU 0x0010
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU 0x0020
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ 0x0030
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN 0x00F0
+
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT 0x00
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT 0x01
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY 0x02
+
/* helpers */
static inline int ieee80211_get_radiotap_len(unsigned char *data)
{
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 1c8b6820b694..515352c6280a 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -201,6 +201,7 @@ struct inet6_dev {
struct ipv6_devstat stats;
struct timer_list rs_timer;
+ __s32 rs_interval; /* in jiffies */
__u8 rs_probes;
__u8 addr_gen_mode;
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 49dcad4fe99e..197a30d221e9 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -134,8 +134,8 @@ struct inet_connection_sock {
} icsk_mtup;
u32 icsk_user_timeout;
- u64 icsk_ca_priv[64 / sizeof(u64)];
-#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64))
+ u64 icsk_ca_priv[88 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
diff --git a/include/net/ip.h b/include/net/ip.h
index 9742b92dc933..bc43c0fcae12 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -219,6 +219,29 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
}
#endif
+#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; stats_list[i].name; i++) \
+ buff64[i] += snmp_get_cpu_field64( \
+ mib_statistic, \
+ c, stats_list[i].entry, \
+ offset); \
+ } \
+}
+
+#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; stats_list[i].name; i++) \
+ buff[i] += snmp_get_cpu_field( \
+ mib_statistic, \
+ c, stats_list[i].entry); \
+ } \
+}
+
void inet_get_local_port_range(struct net *net, int *low, int *high);
#ifdef CONFIG_SYSCTL
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index d97305d0e71f..e0cd318d5103 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -64,6 +64,9 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
}
void ip6_route_input(struct sk_buff *skb);
+struct dst_entry *ip6_route_input_lookup(struct net *net,
+ struct net_device *dev,
+ struct flowi6 *fl6, int flags);
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags);
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 43a5a0e4524c..20ed9699fcd4 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -23,6 +23,7 @@ struct __ip6_tnl_parm {
__u8 proto; /* tunnel protocol */
__u8 encap_limit; /* encapsulation limit for tunnel */
__u8 hop_limit; /* hop limit for tunnel */
+ bool collect_md;
__be32 flowinfo; /* traffic class and flowlabel for tunnel */
__u32 flags; /* tunnel flags */
struct in6_addr laddr; /* local tunnel end-point address */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 7d4a72e75f33..b9314b48e39f 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -22,6 +22,7 @@
#include <net/fib_rules.h>
#include <net/inetpeer.h>
#include <linux/percpu.h>
+#include <linux/notifier.h>
struct fib_config {
u8 fc_dst_len;
@@ -122,6 +123,7 @@ struct fib_info {
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_weight;
#endif
+ unsigned int fib_offload_cnt;
struct rcu_head rcu;
struct fib_nh fib_nh[0];
#define fib_dev fib_nh[0].nh_dev
@@ -173,6 +175,18 @@ struct fib_result_nl {
__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+static inline void fib_info_offload_inc(struct fib_info *fi)
+{
+ fi->fib_offload_cnt++;
+ fi->fib_flags |= RTNH_F_OFFLOAD;
+}
+
+static inline void fib_info_offload_dec(struct fib_info *fi)
+{
+ if (--fi->fib_offload_cnt == 0)
+ fi->fib_flags &= ~RTNH_F_OFFLOAD;
+}
+
#define FIB_RES_SADDR(net, res) \
((FIB_RES_NH(res).nh_saddr_genid == \
atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
@@ -185,6 +199,33 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
#define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \
FIB_RES_SADDR(net, res))
+struct fib_notifier_info {
+ struct net *net;
+};
+
+struct fib_entry_notifier_info {
+ struct fib_notifier_info info; /* must be first */
+ u32 dst;
+ int dst_len;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+ u32 tb_id;
+ u32 nlflags;
+};
+
+enum fib_event_type {
+ FIB_EVENT_ENTRY_ADD,
+ FIB_EVENT_ENTRY_DEL,
+ FIB_EVENT_RULE_ADD,
+ FIB_EVENT_RULE_DEL,
+};
+
+int register_fib_notifier(struct notifier_block *nb);
+int unregister_fib_notifier(struct notifier_block *nb);
+int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+
struct fib_table {
struct hlist_node tb_hlist;
u32 tb_id;
@@ -196,13 +237,12 @@ struct fib_table {
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags);
-int fib_table_insert(struct fib_table *, struct fib_config *);
-int fib_table_delete(struct fib_table *, struct fib_config *);
+int fib_table_insert(struct net *, struct fib_table *, struct fib_config *);
+int fib_table_delete(struct net *, struct fib_table *, struct fib_config *);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
-int fib_table_flush(struct fib_table *table);
+int fib_table_flush(struct net *net, struct fib_table *table);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
-void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
#ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -315,7 +355,6 @@ static inline int fib_num_tclassid_users(struct net *net)
}
#endif
int fib_unmerge(struct net *net);
-void fib_flush_external(struct net *net);
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index a5e7035fb93f..59557c07904b 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -222,6 +222,25 @@ static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
}
+static inline __be64 key32_to_tunnel_id(__be32 key)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be64)key;
+#else
+ return (__force __be64)((__force u64)key << 32);
+#endif
+}
+
+/* Returns the least-significant 32 bits of a __be64. */
+static inline __be32 tunnel_id_to_key32(__be64 tun_id)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be32)tun_id;
+#else
+ return (__force __be32)((__force u64)tun_id >> 32);
+#endif
+}
+
#ifdef CONFIG_INET
int ip_tunnel_init(struct net_device *dev);
@@ -236,6 +255,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
+void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ const u8 proto);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/include/net/kcm.h b/include/net/kcm.h
index 2840b5825dcc..2a8965819db0 100644
--- a/include/net/kcm.h
+++ b/include/net/kcm.h
@@ -13,6 +13,7 @@
#include <linux/skbuff.h>
#include <net/sock.h>
+#include <net/strparser.h>
#include <uapi/linux/kcm.h>
extern unsigned int kcm_net_id;
@@ -21,16 +22,8 @@ extern unsigned int kcm_net_id;
#define KCM_STATS_INCR(stat) ((stat)++)
struct kcm_psock_stats {
- unsigned long long rx_msgs;
- unsigned long long rx_bytes;
unsigned long long tx_msgs;
unsigned long long tx_bytes;
- unsigned int rx_aborts;
- unsigned int rx_mem_fail;
- unsigned int rx_need_more_hdr;
- unsigned int rx_msg_too_big;
- unsigned int rx_msg_timeouts;
- unsigned int rx_bad_hdr_len;
unsigned long long reserved;
unsigned long long unreserved;
unsigned int tx_aborts;
@@ -64,13 +57,6 @@ struct kcm_tx_msg {
struct sk_buff *last_skb;
};
-struct kcm_rx_msg {
- int full_len;
- int accum_len;
- int offset;
- int early_eaten;
-};
-
/* Socket structure for KCM client sockets */
struct kcm_sock {
struct sock sk;
@@ -87,6 +73,7 @@ struct kcm_sock {
struct work_struct tx_work;
struct list_head wait_psock_list;
struct sk_buff *seq_skb;
+ u32 tx_stopped : 1;
/* Don't use bit fields here, these are set under different locks */
bool tx_wait;
@@ -104,11 +91,11 @@ struct bpf_prog;
/* Structure for an attached lower socket */
struct kcm_psock {
struct sock *sk;
+ struct strparser strp;
struct kcm_mux *mux;
int index;
u32 tx_stopped : 1;
- u32 rx_stopped : 1;
u32 done : 1;
u32 unattaching : 1;
@@ -121,18 +108,12 @@ struct kcm_psock {
struct kcm_psock_stats stats;
/* Receive */
- struct sk_buff *rx_skb_head;
- struct sk_buff **rx_skb_nextp;
- struct sk_buff *ready_rx_msg;
struct list_head psock_ready_list;
- struct work_struct rx_work;
- struct delayed_work rx_delayed_work;
struct bpf_prog *bpf_prog;
struct kcm_sock *rx_kcm;
unsigned long long saved_rx_bytes;
unsigned long long saved_rx_msgs;
- struct timer_list rx_msg_timer;
- unsigned int rx_need_bytes;
+ struct sk_buff *ready_rx_msg;
/* Transmit */
struct kcm_sock *tx_kcm;
@@ -146,6 +127,7 @@ struct kcm_net {
struct mutex mutex;
struct kcm_psock_stats aggregate_psock_stats;
struct kcm_mux_stats aggregate_mux_stats;
+ struct strp_aggr_stats aggregate_strp_stats;
struct list_head mux_list;
int count;
};
@@ -163,6 +145,7 @@ struct kcm_mux {
struct kcm_mux_stats stats;
struct kcm_psock_stats aggregate_psock_stats;
+ struct strp_aggr_stats aggregate_strp_stats;
/* Receive */
spinlock_t rx_lock ____cacheline_aligned_in_smp;
@@ -190,14 +173,6 @@ static inline void aggregate_psock_stats(struct kcm_psock_stats *stats,
/* Save psock statistics in the mux when psock is being unattached. */
#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
- SAVE_PSOCK_STATS(rx_msgs);
- SAVE_PSOCK_STATS(rx_bytes);
- SAVE_PSOCK_STATS(rx_aborts);
- SAVE_PSOCK_STATS(rx_mem_fail);
- SAVE_PSOCK_STATS(rx_need_more_hdr);
- SAVE_PSOCK_STATS(rx_msg_too_big);
- SAVE_PSOCK_STATS(rx_msg_timeouts);
- SAVE_PSOCK_STATS(rx_bad_hdr_len);
SAVE_PSOCK_STATS(tx_msgs);
SAVE_PSOCK_STATS(tx_bytes);
SAVE_PSOCK_STATS(reserved);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index e90095091aa0..b220dabeab45 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -11,6 +11,7 @@
#ifndef _NET_L3MDEV_H_
#define _NET_L3MDEV_H_
+#include <net/dst.h>
#include <net/fib_rules.h>
/**
@@ -18,30 +19,24 @@
*
* @l3mdev_fib_table: Get FIB table id to use for lookups
*
- * @l3mdev_get_rtable: Get cached IPv4 rtable (dst_entry) for device
+ * @l3mdev_l3_rcv: Hook in L3 receive path
*
- * @l3mdev_get_saddr: Get source address for a flow
+ * @l3mdev_l3_out: Hook in L3 output path
*
- * @l3mdev_get_rt6_dst: Get cached IPv6 rt6_info (dst_entry) for device
+ * @l3mdev_link_scope_lookup: IPv6 lookup for linklocal and mcast destinations
*/
struct l3mdev_ops {
u32 (*l3mdev_fib_table)(const struct net_device *dev);
struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev,
struct sk_buff *skb, u16 proto);
-
- /* IPv4 ops */
- struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev,
- const struct flowi4 *fl4);
- int (*l3mdev_get_saddr)(struct net_device *dev,
- struct flowi4 *fl4);
+ struct sk_buff * (*l3mdev_l3_out)(struct net_device *dev,
+ struct sock *sk, struct sk_buff *skb,
+ u16 proto);
/* IPv6 ops */
- struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *dev,
+ struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *dev,
struct flowi6 *fl6);
- int (*l3mdev_get_saddr6)(struct net_device *dev,
- const struct sock *sk,
- struct flowi6 *fl6);
};
#ifdef CONFIG_NET_L3_MASTER_DEV
@@ -49,6 +44,8 @@ struct l3mdev_ops {
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg);
+void l3mdev_update_flow(struct net *net, struct flowi *fl);
+
int l3mdev_master_ifindex_rcu(const struct net_device *dev);
static inline int l3mdev_master_ifindex(struct net_device *dev)
{
@@ -80,7 +77,7 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
}
static inline
-const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
+struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
{
/* netdev_master_upper_dev_get_rcu calls
* list_first_or_null_rcu to walk the upper dev list.
@@ -89,7 +86,7 @@ const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
* typecast to remove the const
*/
struct net_device *dev = (struct net_device *)_dev;
- const struct net_device *master;
+ struct net_device *master;
if (!dev)
return NULL;
@@ -104,26 +101,6 @@ const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
return master;
}
-/* get index of an interface to use for FIB lookups. For devices
- * enslaved to an L3 master device FIB lookups are based on the
- * master index
- */
-static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
-{
- return l3mdev_master_ifindex_rcu(dev) ? : dev->ifindex;
-}
-
-static inline int l3mdev_fib_oif(struct net_device *dev)
-{
- int oif;
-
- rcu_read_lock();
- oif = l3mdev_fib_oif_rcu(dev);
- rcu_read_unlock();
-
- return oif;
-}
-
u32 l3mdev_fib_table_rcu(const struct net_device *dev);
u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
static inline u32 l3mdev_fib_table(const struct net_device *dev)
@@ -137,39 +114,7 @@ static inline u32 l3mdev_fib_table(const struct net_device *dev)
return tb_id;
}
-static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
- const struct flowi4 *fl4)
-{
- if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rtable)
- return dev->l3mdev_ops->l3mdev_get_rtable(dev, fl4);
-
- return NULL;
-}
-
-static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
-{
- struct net_device *dev;
- bool rc = false;
-
- if (ifindex == 0)
- return false;
-
- rcu_read_lock();
-
- dev = dev_get_by_index_rcu(net, ifindex);
- if (dev)
- rc = netif_is_l3_master(dev);
-
- rcu_read_unlock();
-
- return rc;
-}
-
-int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4);
-
-struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6);
-int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
- struct flowi6 *fl6);
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6);
static inline
struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
@@ -199,6 +144,34 @@ struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
return l3mdev_l3_rcv(skb, AF_INET6);
}
+static inline
+struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
+{
+ struct net_device *dev = skb_dst(skb)->dev;
+
+ if (netif_is_l3_slave(dev)) {
+ struct net_device *master;
+
+ master = netdev_master_upper_dev_get_rcu(dev);
+ if (master && master->l3mdev_ops->l3mdev_l3_out)
+ skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
+ skb, proto);
+ }
+
+ return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
+{
+ return l3mdev_l3_out(sk, skb, AF_INET);
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
+{
+ return l3mdev_l3_out(sk, skb, AF_INET6);
+}
#else
static inline int l3mdev_master_ifindex_rcu(const struct net_device *dev)
@@ -216,20 +189,11 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
}
static inline
-const struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
+struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
{
return NULL;
}
-static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
-{
- return dev ? dev->ifindex : 0;
-}
-static inline int l3mdev_fib_oif(struct net_device *dev)
-{
- return dev ? dev->ifindex : 0;
-}
-
static inline u32 l3mdev_fib_table_rcu(const struct net_device *dev)
{
return 0;
@@ -243,43 +207,32 @@ static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
return 0;
}
-static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
- const struct flowi4 *fl4)
+static inline
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6)
{
return NULL;
}
-static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
-{
- return false;
-}
-
-static inline int l3mdev_get_saddr(struct net *net, int ifindex,
- struct flowi4 *fl4)
-{
- return 0;
-}
-
static inline
-struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
{
- return NULL;
+ return skb;
}
-static inline int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
- struct flowi6 *fl6)
+static inline
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
{
- return 0;
+ return skb;
}
static inline
-struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
+struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
{
return skb;
}
static inline
-struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
+struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
{
return skb;
}
@@ -290,6 +243,10 @@ int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
{
return 1;
}
+static inline
+void l3mdev_update_flow(struct net *net, struct flowi *fl)
+{
+}
#endif
#endif /* _NET_L3MDEV_H_ */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index e9f116e29c22..ea3f80f58fd6 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -13,6 +13,13 @@
/* lw tunnel state flags */
#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0)
#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
+#define LWTUNNEL_STATE_XMIT_REDIRECT BIT(2)
+
+enum {
+ LWTUNNEL_XMIT_DONE,
+ LWTUNNEL_XMIT_CONTINUE,
+};
+
struct lwtunnel_state {
__u16 type;
@@ -21,6 +28,7 @@ struct lwtunnel_state {
int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*orig_input)(struct sk_buff *);
int len;
+ __u16 headroom;
__u8 data[0];
};
@@ -34,6 +42,7 @@ struct lwtunnel_encap_ops {
struct lwtunnel_state *lwtstate);
int (*get_encap_size)(struct lwtunnel_state *lwtstate);
int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+ int (*xmit)(struct sk_buff *skb);
};
#ifdef CONFIG_LWTUNNEL
@@ -75,6 +84,24 @@ static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
return false;
}
+
+static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_XMIT_REDIRECT))
+ return true;
+
+ return false;
+}
+
+static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
+ unsigned int mtu)
+{
+ if (lwtunnel_xmit_redirect(lwtstate) && lwtstate->headroom < mtu)
+ return lwtstate->headroom;
+
+ return 0;
+}
+
int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
unsigned int num);
int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
@@ -90,6 +117,7 @@ struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int lwtunnel_input(struct sk_buff *skb);
+int lwtunnel_xmit(struct sk_buff *skb);
#else
@@ -117,6 +145,17 @@ static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
return false;
}
+static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
+static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
+ unsigned int mtu)
+{
+ return 0;
+}
+
static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
unsigned int num)
{
@@ -170,6 +209,11 @@ static inline int lwtunnel_input(struct sk_buff *skb)
return -EOPNOTSUPP;
}
+static inline int lwtunnel_xmit(struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_LWTUNNEL */
#define MODULE_ALIAS_RTNL_LWT(encap_type) MODULE_ALIAS("rtnl-lwt-" __stringify(encap_type))
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cca510a585c3..a810dfcb83c2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -715,6 +715,7 @@ enum mac80211_tx_info_flags {
* frame (PS-Poll or uAPSD).
* @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
* @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
+ * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
*
* These flags are used in tx_info->control.flags.
*/
@@ -723,6 +724,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
IEEE80211_TX_CTRL_AMSDU = BIT(3),
+ IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
};
/*
@@ -1735,6 +1737,9 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
+ * that this station is allowed to transmit to us.
+ * Can be modified by driver.
* @wme: indicates whether the STA supports QoS/WME (if local devices does,
* otherwise always false)
* @drv_priv: data area for driver use, will always be aligned to
@@ -1775,6 +1780,7 @@ struct ieee80211_sta {
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
+ u8 max_rx_aggregation_subframes;
bool wme;
u8 uapsd_queues;
u8 max_sp;
@@ -2014,6 +2020,11 @@ struct ieee80211_txq {
* @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list
* skbs, needed for zero-copy software A-MSDU.
*
+ * @IEEE80211_HW_REPORTS_LOW_ACK: The driver (or firmware) reports low ack event
+ * by ieee80211_report_low_ack() based on its own algorithm. For such
+ * drivers, mac80211 packet loss mechanism will not be triggered and driver
+ * is completely depending on firmware event for station kickout.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2054,6 +2065,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_USES_RSS,
IEEE80211_HW_TX_AMSDU,
IEEE80211_HW_TX_FRAG_LIST,
+ IEEE80211_HW_REPORTS_LOW_ACK,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2141,6 +2153,14 @@ enum ieee80211_hw_flags {
* the default is _GI | _BANDWIDTH.
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
*
+ * @radiotap_timestamp: Information for the radiotap timestamp field; if the
+ * 'units_pos' member is set to a non-negative value it must be set to
+ * a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
+ * IEEE80211_RADIOTAP_TIMESTAMP_SPOS_* value, and then the timestamp
+ * field will be added and populated from the &struct ieee80211_rx_status
+ * device_timestamp. If the 'accuracy' member is non-negative, it's put
+ * into the accuracy radiotap field and the accuracy known flag is set.
+ *
* @netdev_features: netdev features to be set in each netdev created
* from this HW. Note that not all features are usable with mac80211,
* other features will be rejected during HW registration.
@@ -2159,6 +2179,8 @@ enum ieee80211_hw_flags {
* @n_cipher_schemes: a size of an array of cipher schemes definitions.
* @cipher_schemes: a pointer to an array of cipher scheme definitions
* supported by HW.
+ * @max_nan_de_entries: maximum number of NAN DE functions supported by the
+ * device.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -2184,11 +2206,16 @@ struct ieee80211_hw {
u8 offchannel_tx_hw_queue;
u8 radiotap_mcs_details;
u16 radiotap_vht_details;
+ struct {
+ int units_pos;
+ s16 accuracy;
+ } radiotap_timestamp;
netdev_features_t netdev_features;
u8 uapsd_queues;
u8 uapsd_max_sp_len;
u8 n_cipher_schemes;
const struct ieee80211_cipher_scheme *cipher_schemes;
+ u8 max_nan_de_entries;
};
static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
@@ -3085,11 +3112,8 @@ enum ieee80211_reconfig_type {
*
* @sta_add_debugfs: Drivers can use this callback to add debugfs files
* when a station is added to mac80211's station list. This callback
- * and @sta_remove_debugfs should be within a CONFIG_MAC80211_DEBUGFS
- * conditional. This callback can sleep.
- *
- * @sta_remove_debugfs: Remove the debugfs files which were added using
- * @sta_add_debugfs. This callback can sleep.
+ * should be within a CONFIG_MAC80211_DEBUGFS conditional. This
+ * callback can sleep.
*
* @sta_notify: Notifies low level driver about power state transition of an
* associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating
@@ -3147,6 +3171,12 @@ enum ieee80211_reconfig_type {
* required function.
* The callback can sleep.
*
+ * @offset_tsf: Offset the TSF timer by the specified value in the
+ * firmware/hardware. Preferred to set_tsf as it avoids delay between
+ * calling set_tsf() and hardware getting programmed, which will show up
+ * as TSF delay. Is not a required function.
+ * The callback can sleep.
+ *
* @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize
* with other STAs in the IBSS. This is only used in IBSS mode. This
* function is optional if the firmware/hardware takes full care of
@@ -3401,6 +3431,21 @@ enum ieee80211_reconfig_type {
* synchronization which is needed in case driver has in its RSS queues
* pending frames that were received prior to the control path action
* currently taken (e.g. disassociation) but are not processed yet.
+ *
+ * @start_nan: join an existing NAN cluster, or create a new one.
+ * @stop_nan: leave the NAN cluster.
+ * @nan_change_conf: change NAN configuration. The data in cfg80211_nan_conf
+ * contains full new configuration and changes specify which parameters
+ * are changed with respect to the last NAN config.
+ * The driver gets both full configuration and the changed parameters since
+ * some devices may need the full configuration while others need only the
+ * changed parameters.
+ * @add_nan_func: Add a NAN function. Returns 0 on success. The data in
+ * cfg80211_nan_func must not be referenced outside the scope of
+ * this call.
+ * @del_nan_func: Remove a NAN function. The driver must call
+ * ieee80211_nan_func_terminated() with
+ * NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST reason code upon removal.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3485,10 +3530,6 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
- void (*sta_remove_debugfs)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct dentry *dir);
#endif
void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd, struct ieee80211_sta *sta);
@@ -3516,6 +3557,8 @@ struct ieee80211_ops {
u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u64 tsf);
+ void (*offset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ s64 offset);
void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int (*tx_last_beacon)(struct ieee80211_hw *hw);
int (*ampdu_action)(struct ieee80211_hw *hw,
@@ -3640,6 +3683,21 @@ struct ieee80211_ops {
void (*wake_tx_queue)(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
void (*sync_rx_queues)(struct ieee80211_hw *hw);
+
+ int (*start_nan)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf);
+ int (*stop_nan)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ int (*nan_change_conf)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf, u32 changes);
+ int (*add_nan_func)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_nan_func *nan_func);
+ void (*del_nan_func)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 instance_id);
};
/**
@@ -5713,4 +5771,36 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
unsigned long *frame_cnt,
unsigned long *byte_cnt);
+
+/**
+ * ieee80211_nan_func_terminated - notify about NAN function termination.
+ *
+ * This function is used to notify mac80211 about NAN function termination.
+ * Note that this function can't be called from hard irq.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @inst_id: the local instance id
+ * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*)
+ * @gfp: allocation flags
+ */
+void ieee80211_nan_func_terminated(struct ieee80211_vif *vif,
+ u8 inst_id,
+ enum nl80211_nan_func_term_reason reason,
+ gfp_t gfp);
+
+/**
+ * ieee80211_nan_func_match - notify about NAN function match event.
+ *
+ * This function is used to notify mac80211 about NAN function match. The
+ * cookie inside the match struct will be assigned by mac80211.
+ * Note that this function can't be called from hard irq.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @match: match event information
+ * @gfp: allocation flags
+ */
+void ieee80211_nan_func_match(struct ieee80211_vif *vif,
+ struct cfg80211_nan_match_params *match,
+ gfp_t gfp);
+
#endif /* MAC80211_H */
diff --git a/include/net/mpls.h b/include/net/mpls.h
index 5b3b5addfb08..1dbc669b770e 100644
--- a/include/net/mpls.h
+++ b/include/net/mpls.h
@@ -19,21 +19,18 @@
#define MPLS_HLEN 4
+struct mpls_shim_hdr {
+ __be32 label_stack_entry;
+};
+
static inline bool eth_p_mpls(__be16 eth_type)
{
return eth_type == htons(ETH_P_MPLS_UC) ||
eth_type == htons(ETH_P_MPLS_MC);
}
-/*
- * For non-MPLS skbs this will correspond to the network header.
- * For MPLS skbs it will be before the network_header as the MPLS
- * label stack lies between the end of the mac header and the network
- * header. That is, for MPLS skbs the end of the mac header
- * is the top of the MPLS label stack.
- */
-static inline unsigned char *skb_mpls_header(struct sk_buff *skb)
+static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
{
- return skb_mac_header(skb) + skb->mac_len;
+ return (struct mpls_shim_hdr *)skb_network_header(skb);
}
#endif
diff --git a/include/net/ncsi.h b/include/net/ncsi.h
index 1dbf42f79750..68680baac0fd 100644
--- a/include/net/ncsi.h
+++ b/include/net/ncsi.h
@@ -31,6 +31,7 @@ struct ncsi_dev {
struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
void (*notifier)(struct ncsi_dev *nd));
int ncsi_start_dev(struct ncsi_dev *nd);
+void ncsi_stop_dev(struct ncsi_dev *nd);
void ncsi_unregister_dev(struct ncsi_dev *nd);
#else /* !CONFIG_NET_NCSI */
static inline struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
@@ -44,6 +45,10 @@ static inline int ncsi_start_dev(struct ncsi_dev *nd)
return -ENOTTY;
}
+static void ncsi_stop_dev(struct ncsi_dev *nd)
+{
+}
+
static inline void ncsi_unregister_dev(struct ncsi_dev *nd)
{
}
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 0933c7455a30..fc4f757107df 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -60,6 +60,7 @@ struct net {
struct list_head exit_list; /* Use only net_mutex */
struct user_namespace *user_ns; /* Owning user namespace */
+ struct ucounts *ucounts;
spinlock_t nsid_lock;
struct idr netns_ids;
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index e8d1448425a7..0b0c35c37125 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -15,6 +15,12 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
void nf_bridge_update_protocol(struct sk_buff *skb);
+int br_nf_hook_thresh(unsigned int hook, struct net *net, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct net *, struct sock *,
+ struct sk_buff *));
+
static inline struct nf_bridge_info *
nf_bridge_info_get(const struct sk_buff *skb)
{
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 445b019c2078..50418052a520 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -42,7 +42,6 @@ union nf_conntrack_expect_proto {
#include <linux/types.h>
#include <linux/skbuff.h>
-#include <linux/timer.h>
#ifdef CONFIG_NETFILTER_DEBUG
#define NF_CT_ASSERT(x) WARN_ON(!(x))
@@ -73,7 +72,7 @@ struct nf_conn_help {
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
struct nf_conn {
- /* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
+ /* Usage count in here is 1 for hash table, 1 per skb,
* plus 1 for any connection(s) we are `master' for
*
* Hint, SKB address this struct and refcnt via skb->nfct and
@@ -96,8 +95,8 @@ struct nf_conn {
/* Have we seen traffic both ways yet? (bitset) */
unsigned long status;
- /* Timer function; drops refcnt when it goes off. */
- struct timer_list timeout;
+ /* jiffies32 when this ct is considered dead */
+ u32 timeout;
possible_net_t ct_net;
@@ -220,21 +219,14 @@ static inline void nf_ct_refresh(struct nf_conn *ct,
__nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
}
-bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb, int do_acct);
-
/* kill conntrack and do accounting */
-static inline bool nf_ct_kill_acct(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb)
-{
- return __nf_ct_kill_acct(ct, ctinfo, skb, 1);
-}
+bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb);
/* kill conntrack without accounting */
static inline bool nf_ct_kill(struct nf_conn *ct)
{
- return __nf_ct_kill_acct(ct, 0, NULL, 0);
+ return nf_ct_delete(ct, 0, 0);
}
/* These are for NAT. Icky. */
@@ -291,21 +283,55 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
}
+#define nfct_time_stamp ((u32)(jiffies))
+
/* jiffies until ct expires, 0 if already expired */
static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
{
- long timeout = (long)ct->timeout.expires - (long)jiffies;
+ s32 timeout = ct->timeout - nfct_time_stamp;
return timeout > 0 ? timeout : 0;
}
+static inline bool nf_ct_is_expired(const struct nf_conn *ct)
+{
+ return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
+}
+
+/* use after obtaining a reference count */
+static inline bool nf_ct_should_gc(const struct nf_conn *ct)
+{
+ return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
+ !nf_ct_is_dying(ct);
+}
+
struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
int nf_conntrack_hash_resize(unsigned int hashsize);
+
+extern struct hlist_nulls_head *nf_conntrack_hash;
extern unsigned int nf_conntrack_htable_size;
+extern seqcount_t nf_conntrack_generation;
extern unsigned int nf_conntrack_max;
+/* must be called with rcu read lock held */
+static inline void
+nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize)
+{
+ struct hlist_nulls_head *hptr;
+ unsigned int sequence, hsz;
+
+ do {
+ sequence = read_seqcount_begin(&nf_conntrack_generation);
+ hsz = nf_conntrack_htable_size;
+ hptr = nf_conntrack_hash;
+ } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+
+ *hash = hptr;
+ *hsize = hsz;
+}
+
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
gfp_t flags);
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 79d7ac5c9740..62e17d1319ff 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -51,8 +51,6 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto);
-void nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize);
-
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net *net,
@@ -83,7 +81,6 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
#define CONNTRACK_LOCKS 1024
-extern struct hlist_nulls_head *nf_conntrack_hash;
extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
void nf_conntrack_lock(spinlock_t *lock);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index fa36447371c6..12d967b58726 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -12,12 +12,19 @@
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/netfilter/nf_conntrack_extend.h>
+enum nf_ct_ecache_state {
+ NFCT_ECACHE_UNKNOWN, /* destroy event not sent */
+ NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */
+ NFCT_ECACHE_DESTROY_SENT, /* sent destroy event after failure */
+};
+
struct nf_conntrack_ecache {
- unsigned long cache; /* bitops want long */
- unsigned long missed; /* missed events */
- u16 ctmask; /* bitmask of ct events to be delivered */
- u16 expmask; /* bitmask of expect events to be delivered */
- u32 portid; /* netlink portid of destroyer */
+ unsigned long cache; /* bitops want long */
+ unsigned long missed; /* missed events */
+ u16 ctmask; /* bitmask of ct events to be delivered */
+ u16 expmask; /* bitmask of expect events to be delivered */
+ u32 portid; /* netlink portid of destroyer */
+ enum nf_ct_ecache_state state; /* ecache state */
};
static inline struct nf_conntrack_ecache *
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index cdc920b4c4c2..8992e4229da9 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -63,10 +63,6 @@ struct nf_conntrack_l3proto {
size_t nla_size;
-#ifdef CONFIG_SYSCTL
- const char *ctl_table_path;
-#endif /* CONFIG_SYSCTL */
-
/* Init l3proto pernet data */
int (*init_net)(struct net *net);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 1a5fb36f165f..de629f1520df 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -134,14 +134,6 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
-static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
-{
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- kfree(pn->ctl_compat_table);
- pn->ctl_compat_table = NULL;
-#endif
-}
-
/* Generic netlink helpers */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple);
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 83d855ba6af1..309cd267be4f 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -2,15 +2,10 @@
#define _NF_LOG_H
#include <linux/netfilter.h>
+#include <linux/netfilter/nf_log.h>
-/* those NF_LOG_* defines and struct nf_loginfo are legacy definitios that will
- * disappear once iptables is replaced with pkttables. Please DO NOT use them
- * for any new code! */
-#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
-#define NF_LOG_TCPOPT 0x02 /* Log TCP options */
-#define NF_LOG_IPOPT 0x04 /* Log IP options */
-#define NF_LOG_UID 0x08 /* Log UID owning local socket */
-#define NF_LOG_MASK 0x0f
+/* Log tcp sequence, tcp options, ip options and uid owning local socket */
+#define NF_LOG_DEFAULT_MASK 0x0f
/* This flag indicates that copy_len field in nf_loginfo is set */
#define NF_LOG_F_COPY_LEN 0x1
@@ -60,8 +55,7 @@ struct nf_logger {
int nf_log_register(u_int8_t pf, struct nf_logger *logger);
void nf_log_unregister(struct nf_logger *logger);
-void nf_log_set(struct net *net, u_int8_t pf,
- const struct nf_logger *logger);
+int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger);
void nf_log_unset(struct net *net, const struct nf_logger *logger);
int nf_log_bind_pf(struct net *net, u_int8_t pf,
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 0dbce55437f2..2280cfe86c56 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -11,7 +11,6 @@ struct nf_queue_entry {
struct sk_buff *skb;
unsigned int id;
- struct nf_hook_ops *elem;
struct nf_hook_state state;
u16 size; /* sizeof(entry) + saved route keys */
@@ -22,10 +21,10 @@ struct nf_queue_entry {
/* Packet queuing */
struct nf_queue_handler {
- int (*outfn)(struct nf_queue_entry *entry,
- unsigned int queuenum);
- void (*nf_hook_drop)(struct net *net,
- struct nf_hook_ops *ops);
+ int (*outfn)(struct nf_queue_entry *entry,
+ unsigned int queuenum);
+ void (*nf_hook_drop)(struct net *net,
+ const struct nf_hook_entry *hooks);
};
void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
@@ -41,23 +40,19 @@ static inline void init_hashrandom(u32 *jhash_initval)
*jhash_initval = prandom_u32();
}
-static inline u32 hash_v4(const struct sk_buff *skb, u32 jhash_initval)
+static inline u32 hash_v4(const struct iphdr *iph, u32 initval)
{
- const struct iphdr *iph = ip_hdr(skb);
-
/* packets in either direction go into same queue */
if ((__force u32)iph->saddr < (__force u32)iph->daddr)
return jhash_3words((__force u32)iph->saddr,
- (__force u32)iph->daddr, iph->protocol, jhash_initval);
+ (__force u32)iph->daddr, iph->protocol, initval);
return jhash_3words((__force u32)iph->daddr,
- (__force u32)iph->saddr, iph->protocol, jhash_initval);
+ (__force u32)iph->saddr, iph->protocol, initval);
}
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
+static inline u32 hash_v6(const struct ipv6hdr *ip6h, u32 initval)
{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
u32 a, b, c;
if ((__force u32)ip6h->saddr.s6_addr32[3] <
@@ -75,20 +70,50 @@ static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
else
c = (__force u32) ip6h->daddr.s6_addr32[1];
- return jhash_3words(a, b, c, jhash_initval);
+ return jhash_3words(a, b, c, initval);
+}
+
+static inline u32 hash_bridge(const struct sk_buff *skb, u32 initval)
+{
+ struct ipv6hdr *ip6h, _ip6h;
+ struct iphdr *iph, _iph;
+
+ switch (eth_hdr(skb)->h_proto) {
+ case htons(ETH_P_IP):
+ iph = skb_header_pointer(skb, skb_network_offset(skb),
+ sizeof(*iph), &_iph);
+ if (iph)
+ return hash_v4(iph, initval);
+ break;
+ case htons(ETH_P_IPV6):
+ ip6h = skb_header_pointer(skb, skb_network_offset(skb),
+ sizeof(*ip6h), &_ip6h);
+ if (ip6h)
+ return hash_v6(ip6h, initval);
+ break;
+ }
+
+ return 0;
}
-#endif
static inline u32
nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
- u32 jhash_initval)
+ u32 initval)
{
- if (family == NFPROTO_IPV4)
- queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32;
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
- else if (family == NFPROTO_IPV6)
- queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32;
-#endif
+ switch (family) {
+ case NFPROTO_IPV4:
+ queue += reciprocal_scale(hash_v4(ip_hdr(skb), initval),
+ queues_total);
+ break;
+ case NFPROTO_IPV6:
+ queue += reciprocal_scale(hash_v6(ipv6_hdr(skb), initval),
+ queues_total);
+ break;
+ case NFPROTO_BRIDGE:
+ queue += reciprocal_scale(hash_bridge(skb, initval),
+ queues_total);
+ break;
+ }
return queue;
}
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index f2f13399ce44..5031e072567b 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -19,6 +19,7 @@ struct nft_pktinfo {
const struct net_device *out;
u8 pf;
u8 hook;
+ bool tprot_set;
u8 tprot;
/* for x_tables compatibility */
struct xt_action_param xt;
@@ -36,6 +37,23 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
pkt->pf = pkt->xt.family = state->pf;
}
+static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
+{
+ pkt->tprot_set = false;
+ pkt->tprot = 0;
+ pkt->xt.thoff = 0;
+ pkt->xt.fragoff = 0;
+}
+
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+}
+
/**
* struct nft_verdict - nf_tables verdict
*
@@ -127,6 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
}
+unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
unsigned int nft_parse_register(const struct nlattr *attr);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
@@ -251,7 +270,8 @@ struct nft_set_ops {
int (*insert)(const struct net *net,
const struct nft_set *set,
- const struct nft_set_elem *elem);
+ const struct nft_set_elem *elem,
+ struct nft_set_ext **ext);
void (*activate)(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem);
diff --git a/include/net/netfilter/nf_tables_bridge.h b/include/net/netfilter/nf_tables_bridge.h
deleted file mode 100644
index 511fb79f6dad..000000000000
--- a/include/net/netfilter/nf_tables_bridge.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _NET_NF_TABLES_BRIDGE_H
-#define _NET_NF_TABLES_BRIDGE_H
-
-int nft_bridge_iphdr_validate(struct sk_buff *skb);
-int nft_bridge_ip6hdr_validate(struct sk_buff *skb);
-
-#endif /* _NET_NF_TABLES_BRIDGE_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index a9060dd99db7..00f4f6b1b1ba 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -28,6 +28,9 @@ extern const struct nft_expr_ops nft_cmp_fast_ops;
int nft_cmp_module_init(void);
void nft_cmp_module_exit(void);
+int nft_range_module_init(void);
+void nft_range_module_exit(void);
+
int nft_lookup_module_init(void);
void nft_lookup_module_exit(void);
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index ca6ef6bf775e..25e33aee91e7 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -14,11 +14,53 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
nft_set_pktinfo(pkt, skb, state);
ip = ip_hdr(pkt->skb);
+ pkt->tprot_set = true;
pkt->tprot = ip->protocol;
pkt->xt.thoff = ip_hdrlen(pkt->skb);
pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
}
+static inline int
+__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct iphdr *iph, _iph;
+ u32 len, thoff;
+
+ iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
+ &_iph);
+ if (!iph)
+ return -1;
+
+ if (iph->ihl < 5 || iph->version != 4)
+ return -1;
+
+ len = ntohs(iph->tot_len);
+ thoff = iph->ihl * 4;
+ if (skb->len < len)
+ return -1;
+ else if (len < thoff)
+ return -1;
+
+ pkt->tprot_set = true;
+ pkt->tprot = iph->protocol;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+}
+
+static inline void
+nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0)
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+}
+
extern struct nft_af_info nft_af_ipv4;
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 8ad39a6a5fe1..d150b5066201 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -4,7 +4,7 @@
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/ipv6.h>
-static inline int
+static inline void
nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -15,15 +15,64 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
nft_set_pktinfo(pkt, skb, state);
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
- /* If malformed, drop it */
+ if (protohdr < 0) {
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+ return;
+ }
+
+ pkt->tprot_set = true;
+ pkt->tprot = protohdr;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = frag_off;
+}
+
+static inline int
+__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ip6h, _ip6h;
+ unsigned int thoff = 0;
+ unsigned short frag_off;
+ int protohdr;
+ u32 pkt_len;
+
+ ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
+ &_ip6h);
+ if (!ip6h)
+ return -1;
+
+ if (ip6h->version != 6)
+ return -1;
+
+ pkt_len = ntohs(ip6h->payload_len);
+ if (pkt_len + sizeof(*ip6h) > skb->len)
+ return -1;
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
if (protohdr < 0)
return -1;
+ pkt->tprot_set = true;
pkt->tprot = protohdr;
pkt->xt.thoff = thoff;
pkt->xt.fragoff = frag_off;
return 0;
+#else
+ return -1;
+#endif
+}
+
+static inline void
+nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0)
+ nft_set_pktinfo_proto_unspec(pkt, skb);
}
extern struct nft_af_info nft_af_ipv6;
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 38b1a80517f0..e469e85de3f9 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -15,10 +15,6 @@ struct nf_proto_net {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *ctl_table_header;
struct ctl_table *ctl_table;
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- struct ctl_table_header *ctl_compat_header;
- struct ctl_table *ctl_compat_table;
-#endif
#endif
unsigned int users;
};
@@ -58,10 +54,6 @@ struct nf_ip_net {
struct nf_udp_net udp;
struct nf_icmp_net icmp;
struct nf_icmp_net icmpv6;
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- struct ctl_table_header *ctl_table_header;
- struct ctl_table *ctl_table;
-#endif
};
struct ct_pcpu {
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d061ffeb1e71..7adf4386ac8f 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,7 +40,6 @@ struct netns_ipv4 {
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
bool fib_has_custom_rules;
- struct fib_table __rcu *fib_local;
struct fib_table __rcu *fib_main;
struct fib_table __rcu *fib_default;
#endif
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 36d723579af2..58487b1cc99a 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -16,6 +16,6 @@ struct netns_nf {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
#endif
- struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+ struct nf_hook_entry __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
};
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 24cd3949a9a4..27bb9633c69d 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -11,7 +11,7 @@
struct ctl_table_header;
struct xfrm_policy_hash {
- struct hlist_head *table;
+ struct hlist_head __rcu *table;
unsigned int hmask;
u8 dbits4;
u8 sbits4;
@@ -38,14 +38,12 @@ struct netns_xfrm {
* mode. Also, it can be used by ah/esp icmp error handler to find
* offending SA.
*/
- struct hlist_head *state_bydst;
- struct hlist_head *state_bysrc;
- struct hlist_head *state_byspi;
+ struct hlist_head __rcu *state_bydst;
+ struct hlist_head __rcu *state_bysrc;
+ struct hlist_head __rcu *state_byspi;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
- struct hlist_head state_gc_list;
- struct work_struct state_gc_work;
struct list_head policy_all;
struct hlist_head *policy_byidx;
@@ -73,7 +71,7 @@ struct netns_xfrm {
struct dst_ops xfrm6_dst_ops;
#endif
spinlock_t xfrm_state_lock;
- rwlock_t xfrm_policy_lock;
+ spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
/* flow cache part */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index c99508d426cc..767b03a3fe67 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -69,17 +69,19 @@ struct tcf_exts {
int police;
};
-static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police)
+static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
{
#ifdef CONFIG_NET_CLS_ACT
exts->type = 0;
exts->nr_actions = 0;
exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
GFP_KERNEL);
- WARN_ON(!exts->actions); /* TODO: propagate the error to callers */
+ if (!exts->actions)
+ return -ENOMEM;
#endif
exts->action = action;
exts->police = police;
+ return 0;
}
/**
@@ -121,7 +123,7 @@ static inline void tcf_exts_to_list(const struct tcf_exts *exts,
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
- list_add(&a->list, actions);
+ list_add_tail(&a->list, actions);
}
#endif
}
@@ -484,4 +486,20 @@ struct tc_cls_matchall_offload {
unsigned long cookie;
};
+enum tc_clsbpf_command {
+ TC_CLSBPF_ADD,
+ TC_CLSBPF_REPLACE,
+ TC_CLSBPF_DESTROY,
+ TC_CLSBPF_STATS,
+};
+
+struct tc_cls_bpf_offload {
+ enum tc_clsbpf_command command;
+ struct tcf_exts *exts;
+ struct bpf_prog *prog;
+ const char *name;
+ bool exts_integrated;
+ u32 gen_flags;
+};
+
#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 7caa99b482c6..cd334c9584e9 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -90,8 +90,8 @@ int unregister_qdisc(struct Qdisc_ops *qops);
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
-void qdisc_list_add(struct Qdisc *q);
-void qdisc_list_del(struct Qdisc *q);
+void qdisc_hash_add(struct Qdisc *q);
+void qdisc_hash_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
diff --git a/include/net/pptp.h b/include/net/pptp.h
new file mode 100644
index 000000000000..92e9f1fe2628
--- /dev/null
+++ b/include/net/pptp.h
@@ -0,0 +1,23 @@
+#ifndef _NET_PPTP_H
+#define _NET_PPTP_H
+
+#define PPP_LCP_ECHOREQ 0x09
+#define PPP_LCP_ECHOREP 0x0A
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+#define MISSING_WINDOW 20
+#define WRAPPED(curseq, lastseq)\
+ ((((curseq) & 0xffffff00) == 0) &&\
+ (((lastseq) & 0xffffff00) == 0xffffff00))
+
+#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
+struct pptp_gre_header {
+ struct gre_base_hdr gre_hd;
+ __be16 payload_len;
+ __be16 call_id;
+ __be32 seq;
+ __be32 ack;
+} __packed;
+
+
+#endif
diff --git a/include/net/route.h b/include/net/route.h
index ad777d79af94..0429d47cad25 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -29,7 +29,6 @@
#include <net/flow.h>
#include <net/inet_sock.h>
#include <net/ip_fib.h>
-#include <net/l3mdev.h>
#include <linux/in_route.h>
#include <linux/rtnetlink.h>
#include <linux/rcupdate.h>
@@ -285,15 +284,6 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
sport, dport, sk);
- if (!src && oif) {
- int rc;
-
- rc = l3mdev_get_saddr(net, oif, fl4);
- if (rc < 0)
- return ERR_PTR(rc);
-
- src = fl4->saddr;
- }
if (!dst || !src) {
rt = __ip_route_output_key(net, fl4);
if (IS_ERR(rt))
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 909aff2db2b3..e6aa0a249672 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -36,6 +36,14 @@ struct qdisc_size_table {
u16 data[];
};
+/* similar to sk_buff_head, but skb->prev pointer is undefined. */
+struct qdisc_skb_head {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ __u32 qlen;
+ spinlock_t lock;
+};
+
struct Qdisc {
int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
@@ -61,7 +69,7 @@ struct Qdisc {
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
- struct list_head list;
+ struct hlist_node hash;
u32 handle;
u32 parent;
void *u32_node;
@@ -76,7 +84,7 @@ struct Qdisc {
* For performance sake on SMP, we put highly modified fields at the end
*/
struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
- struct sk_buff_head q;
+ struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
@@ -592,7 +600,7 @@ static inline void qdisc_qstats_drop(struct Qdisc *sch)
static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
{
- qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
+ this_cpu_inc(sch->cpu_qstats->drops);
}
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
@@ -600,10 +608,27 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
sch->qstats.overlimits++;
}
+static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
+{
+ qh->head = NULL;
+ qh->tail = NULL;
+ qh->qlen = 0;
+}
+
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff_head *list)
+ struct qdisc_skb_head *qh)
{
- __skb_queue_tail(list, skb);
+ struct sk_buff *last = qh->tail;
+
+ if (last) {
+ skb->next = NULL;
+ last->next = skb;
+ qh->tail = skb;
+ } else {
+ qh->tail = skb;
+ qh->head = skb;
+ }
+ qh->qlen++;
qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
@@ -614,14 +639,16 @@ static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
return __qdisc_enqueue_tail(skb, sch, &sch->q);
}
-static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
- struct sk_buff_head *list)
+static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
{
- struct sk_buff *skb = __skb_dequeue(list);
+ struct sk_buff *skb = qh->head;
if (likely(skb != NULL)) {
- qdisc_qstats_backlog_dec(sch, skb);
- qdisc_bstats_update(sch, skb);
+ qh->head = skb->next;
+ qh->qlen--;
+ if (qh->head == NULL)
+ qh->tail = NULL;
+ skb->next = NULL;
}
return skb;
@@ -629,7 +656,14 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
- return __qdisc_dequeue_head(sch, &sch->q);
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+
+ if (likely(skb != NULL)) {
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ }
+
+ return skb;
}
/* Instead of calling kfree_skb() while root qdisc lock is held,
@@ -642,10 +676,10 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
}
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
- struct sk_buff_head *list,
+ struct qdisc_skb_head *qh,
struct sk_buff **to_free)
{
- struct sk_buff *skb = __skb_dequeue(list);
+ struct sk_buff *skb = __qdisc_dequeue_head(qh);
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
@@ -666,7 +700,9 @@ static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
- return skb_peek(&sch->q);
+ const struct qdisc_skb_head *qh = &sch->q;
+
+ return qh->head;
}
/* generic pseudo peek method for non-work-conserving qdisc */
@@ -701,15 +737,19 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
return skb;
}
-static inline void __qdisc_reset_queue(struct sk_buff_head *list)
+static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
{
/*
* We do not know the backlog in bytes of this list, it
* is up to the caller to correct it
*/
- if (!skb_queue_empty(list)) {
- rtnl_kfree_skbs(list->next, list->prev);
- __skb_queue_head_init(list);
+ ASSERT_RTNL();
+ if (qh->qlen) {
+ rtnl_kfree_skbs(qh->head, qh->tail);
+
+ qh->head = NULL;
+ qh->tail = NULL;
+ qh->qlen = 0;
}
}
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 632e205ca54b..87a7f42e7639 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -83,9 +83,9 @@
#endif
/* Round an int up to the next multiple of 4. */
-#define WORD_ROUND(s) (((s)+3)&~3)
+#define SCTP_PAD4(s) (((s)+3)&~3)
/* Truncate to the previous multiple of 4. */
-#define WORD_TRUNC(s) ((s)&~3)
+#define SCTP_TRUNC4(s) ((s)&~3)
/*
* Function declarations.
@@ -433,7 +433,7 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
- frag = WORD_TRUNC(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
+ frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
return frag;
}
@@ -462,7 +462,7 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
for (pos.v = chunk->member;\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
- pos.v += WORD_ROUND(ntohs(pos.p->length)))
+ pos.v += SCTP_PAD4(ntohs(pos.p->length)))
#define sctp_walk_errors(err, chunk_hdr)\
_sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
@@ -472,7 +472,7 @@ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(sctp_chunkhdr_t));\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
- err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
+ err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
#define sctp_walk_fwdtsn(pos, chunk)\
_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index bafe2a0ab908..ca6c971dd74a 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -307,85 +307,27 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
}
/* Compare two TSNs */
+#define TSN_lt(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) < 0))
-/* RFC 1982 - Serial Number Arithmetic
- *
- * 2. Comparison
- * Then, s1 is said to be equal to s2 if and only if i1 is equal to i2,
- * in all other cases, s1 is not equal to s2.
- *
- * s1 is said to be less than s2 if, and only if, s1 is not equal to s2,
- * and
- *
- * (i1 < i2 and i2 - i1 < 2^(SERIAL_BITS - 1)) or
- * (i1 > i2 and i1 - i2 > 2^(SERIAL_BITS - 1))
- *
- * s1 is said to be greater than s2 if, and only if, s1 is not equal to
- * s2, and
- *
- * (i1 < i2 and i2 - i1 > 2^(SERIAL_BITS - 1)) or
- * (i1 > i2 and i1 - i2 < 2^(SERIAL_BITS - 1))
- */
-
-/*
- * RFC 2960
- * 1.6 Serial Number Arithmetic
- *
- * Comparisons and arithmetic on TSNs in this document SHOULD use Serial
- * Number Arithmetic as defined in [RFC1982] where SERIAL_BITS = 32.
- */
-
-enum {
- TSN_SIGN_BIT = (1<<31)
-};
-
-static inline int TSN_lt(__u32 s, __u32 t)
-{
- return ((s) - (t)) & TSN_SIGN_BIT;
-}
-
-static inline int TSN_lte(__u32 s, __u32 t)
-{
- return ((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT);
-}
+#define TSN_lte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) <= 0))
/* Compare two SSNs */
-
-/*
- * RFC 2960
- * 1.6 Serial Number Arithmetic
- *
- * Comparisons and arithmetic on Stream Sequence Numbers in this document
- * SHOULD use Serial Number Arithmetic as defined in [RFC1982] where
- * SERIAL_BITS = 16.
- */
-enum {
- SSN_SIGN_BIT = (1<<15)
-};
-
-static inline int SSN_lt(__u16 s, __u16 t)
-{
- return ((s) - (t)) & SSN_SIGN_BIT;
-}
-
-static inline int SSN_lte(__u16 s, __u16 t)
-{
- return ((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT);
-}
-
-/*
- * ADDIP 3.1.1
- * The valid range of Serial Number is from 0 to 4294967295 (2**32 - 1). Serial
- * Numbers wrap back to 0 after reaching 4294967295.
- */
-enum {
- ADDIP_SERIAL_SIGN_BIT = (1<<31)
-};
-
-static inline int ADDIP_SERIAL_gte(__u32 s, __u32 t)
-{
- return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT);
-}
+#define SSN_lt(a,b) \
+ (typecheck(__u16, a) && \
+ typecheck(__u16, b) && \
+ ((__s16)((a) - (b)) < 0))
+
+/* ADDIP 3.1.1 */
+#define ADDIP_SERIAL_gte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((b) - (a)) <= 0))
/* Check VTAG of the packet matches the sender's own tag. */
static inline int
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index ced0df374e60..11c3bf262a85 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -537,6 +537,7 @@ struct sctp_datamsg {
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
struct sctp_sndrcvinfo *,
struct iov_iter *);
+void sctp_datamsg_free(struct sctp_datamsg *);
void sctp_datamsg_put(struct sctp_datamsg *);
void sctp_chunk_fail(struct sctp_chunk *, int error);
int sctp_chunk_abandoned(struct sctp_chunk *);
@@ -1069,7 +1070,7 @@ struct sctp_outq {
void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
void sctp_outq_teardown(struct sctp_outq *);
void sctp_outq_free(struct sctp_outq*);
-int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
+void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
int sctp_outq_is_empty(const struct sctp_outq *);
void sctp_outq_restart(struct sctp_outq *);
@@ -1077,7 +1078,7 @@ void sctp_outq_restart(struct sctp_outq *);
void sctp_retransmit(struct sctp_outq *, struct sctp_transport *,
sctp_retransmit_reason_t);
void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
-int sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
+void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
void sctp_prsctp_prune(struct sctp_association *asoc,
struct sctp_sndrcvinfo *sinfo, int msg_len);
/* Uncork and flush an outqueue. */
diff --git a/include/net/sock.h b/include/net/sock.h
index 8741988e6880..ebf75db08e06 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1020,7 +1020,6 @@ struct proto {
void (*unhash)(struct sock *sk);
void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
- void (*clear_sk)(struct sock *sk, int size);
/* Keeping track of sockets in use */
#ifdef CONFIG_PROC_FS
@@ -1114,6 +1113,16 @@ static inline bool sk_stream_is_writeable(const struct sock *sk)
sk_stream_memory_free(sk);
}
+static inline int sk_under_cgroup_hierarchy(struct sock *sk,
+ struct cgroup *ancestor)
+{
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
+ ancestor);
+#else
+ return -ENOTSUPP;
+#endif
+}
static inline bool sk_has_memory_pressure(const struct sock *sk)
{
@@ -1232,8 +1241,6 @@ static inline int __sk_prot_rehash(struct sock *sk)
return sk->sk_prot->hash(sk);
}
-void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
-
/* About 10 seconds */
#define SOCK_DESTROY_TIME (10*HZ)
diff --git a/include/net/strparser.h b/include/net/strparser.h
new file mode 100644
index 000000000000..0c28ad97c52f
--- /dev/null
+++ b/include/net/strparser.h
@@ -0,0 +1,142 @@
+/*
+ * Stream Parser
+ *
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __NET_STRPARSER_H_
+#define __NET_STRPARSER_H_
+
+#include <linux/skbuff.h>
+#include <net/sock.h>
+
+#define STRP_STATS_ADD(stat, count) ((stat) += (count))
+#define STRP_STATS_INCR(stat) ((stat)++)
+
+struct strp_stats {
+ unsigned long long rx_msgs;
+ unsigned long long rx_bytes;
+ unsigned int rx_mem_fail;
+ unsigned int rx_need_more_hdr;
+ unsigned int rx_msg_too_big;
+ unsigned int rx_msg_timeouts;
+ unsigned int rx_bad_hdr_len;
+};
+
+struct strp_aggr_stats {
+ unsigned long long rx_msgs;
+ unsigned long long rx_bytes;
+ unsigned int rx_mem_fail;
+ unsigned int rx_need_more_hdr;
+ unsigned int rx_msg_too_big;
+ unsigned int rx_msg_timeouts;
+ unsigned int rx_bad_hdr_len;
+ unsigned int rx_aborts;
+ unsigned int rx_interrupted;
+ unsigned int rx_unrecov_intr;
+};
+
+struct strparser;
+
+/* Callbacks are called with lock held for the attached socket */
+struct strp_callbacks {
+ int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
+ void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
+ int (*read_sock_done)(struct strparser *strp, int err);
+ void (*abort_parser)(struct strparser *strp, int err);
+};
+
+struct strp_rx_msg {
+ int full_len;
+ int offset;
+};
+
+static inline struct strp_rx_msg *strp_rx_msg(struct sk_buff *skb)
+{
+ return (struct strp_rx_msg *)((void *)skb->cb +
+ offsetof(struct qdisc_skb_cb, data));
+}
+
+/* Structure for an attached lower socket */
+struct strparser {
+ struct sock *sk;
+
+ u32 rx_stopped : 1;
+ u32 rx_paused : 1;
+ u32 rx_aborted : 1;
+ u32 rx_interrupted : 1;
+ u32 rx_unrecov_intr : 1;
+
+ struct sk_buff **rx_skb_nextp;
+ struct timer_list rx_msg_timer;
+ struct sk_buff *rx_skb_head;
+ unsigned int rx_need_bytes;
+ struct delayed_work rx_delayed_work;
+ struct work_struct rx_work;
+ struct strp_stats stats;
+ struct strp_callbacks cb;
+};
+
+/* Must be called with lock held for attached socket */
+static inline void strp_pause(struct strparser *strp)
+{
+ strp->rx_paused = 1;
+}
+
+/* May be called without holding lock for attached socket */
+void strp_unpause(struct strparser *strp);
+
+static inline void save_strp_stats(struct strparser *strp,
+ struct strp_aggr_stats *agg_stats)
+{
+ /* Save psock statistics in the mux when psock is being unattached. */
+
+#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += \
+ strp->stats._stat)
+ SAVE_PSOCK_STATS(rx_msgs);
+ SAVE_PSOCK_STATS(rx_bytes);
+ SAVE_PSOCK_STATS(rx_mem_fail);
+ SAVE_PSOCK_STATS(rx_need_more_hdr);
+ SAVE_PSOCK_STATS(rx_msg_too_big);
+ SAVE_PSOCK_STATS(rx_msg_timeouts);
+ SAVE_PSOCK_STATS(rx_bad_hdr_len);
+#undef SAVE_PSOCK_STATS
+
+ if (strp->rx_aborted)
+ agg_stats->rx_aborts++;
+ if (strp->rx_interrupted)
+ agg_stats->rx_interrupted++;
+ if (strp->rx_unrecov_intr)
+ agg_stats->rx_unrecov_intr++;
+}
+
+static inline void aggregate_strp_stats(struct strp_aggr_stats *stats,
+ struct strp_aggr_stats *agg_stats)
+{
+#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
+ SAVE_PSOCK_STATS(rx_msgs);
+ SAVE_PSOCK_STATS(rx_bytes);
+ SAVE_PSOCK_STATS(rx_mem_fail);
+ SAVE_PSOCK_STATS(rx_need_more_hdr);
+ SAVE_PSOCK_STATS(rx_msg_too_big);
+ SAVE_PSOCK_STATS(rx_msg_timeouts);
+ SAVE_PSOCK_STATS(rx_bad_hdr_len);
+ SAVE_PSOCK_STATS(rx_aborts);
+ SAVE_PSOCK_STATS(rx_interrupted);
+ SAVE_PSOCK_STATS(rx_unrecov_intr);
+#undef SAVE_PSOCK_STATS
+
+}
+
+void strp_done(struct strparser *strp);
+void strp_stop(struct strparser *strp);
+void strp_check_rcv(struct strparser *strp);
+int strp_init(struct strparser *strp, struct sock *csk,
+ struct strp_callbacks *cb);
+void strp_data_ready(struct strparser *strp);
+
+#endif /* __NET_STRPARSER_H_ */
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 62f6a967a1b7..eba80c4fc56f 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -68,7 +68,6 @@ struct switchdev_attr {
enum switchdev_obj_id {
SWITCHDEV_OBJ_ID_UNDEFINED,
SWITCHDEV_OBJ_ID_PORT_VLAN,
- SWITCHDEV_OBJ_ID_IPV4_FIB,
SWITCHDEV_OBJ_ID_PORT_FDB,
SWITCHDEV_OBJ_ID_PORT_MDB,
};
@@ -92,21 +91,6 @@ struct switchdev_obj_port_vlan {
#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
container_of(obj, struct switchdev_obj_port_vlan, obj)
-/* SWITCHDEV_OBJ_ID_IPV4_FIB */
-struct switchdev_obj_ipv4_fib {
- struct switchdev_obj obj;
- u32 dst;
- int dst_len;
- struct fib_info *fi;
- u8 tos;
- u8 type;
- u32 nlflags;
- u32 tb_id;
-};
-
-#define SWITCHDEV_OBJ_IPV4_FIB(obj) \
- container_of(obj, struct switchdev_obj_ipv4_fib, obj)
-
/* SWITCHDEV_OBJ_ID_PORT_FDB */
struct switchdev_obj_port_fdb {
struct switchdev_obj obj;
@@ -209,11 +193,6 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags);
int switchdev_port_bridge_dellink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags);
-int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 nlflags, u32 tb_id);
-int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id);
-void switchdev_fib_ipv4_abort(struct fib_info *fi);
int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
u16 vid, u16 nlm_flags);
@@ -222,7 +201,7 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
u16 vid);
int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
- struct net_device *filter_dev, int idx);
+ struct net_device *filter_dev, int *idx);
void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
@@ -304,25 +283,6 @@ static inline int switchdev_port_bridge_dellink(struct net_device *dev,
return -EOPNOTSUPP;
}
-static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type,
- u32 nlflags, u32 tb_id)
-{
- return 0;
-}
-
-static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id)
-{
- return 0;
-}
-
-static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
-{
-}
-
static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
@@ -342,15 +302,9 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
- int idx)
-{
- return idx;
-}
-
-static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
- struct net_device *group_dev,
- bool joining)
+ int *idx)
{
+ return *idx;
}
static inline bool switchdev_port_same_parent_id(struct net_device *a,
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index 5164bd7a38fb..9fd2bea0a6e0 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -50,9 +50,11 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
+int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi);
int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
int ife_validate_meta_u32(void *val, int len);
int ife_validate_meta_u16(void *val, int len);
+int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi);
void ife_release_meta_gen(struct tcf_meta_info *mi);
int register_ife_op(struct tcf_meta_ops *mops);
int unregister_ife_op(struct tcf_meta_ops *mops);
diff --git a/include/net/tc_act/tc_skbmod.h b/include/net/tc_act/tc_skbmod.h
new file mode 100644
index 000000000000..644a2116b47b
--- /dev/null
+++ b/include/net/tc_act/tc_skbmod.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Jamal Hadi Salim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#ifndef __NET_TC_SKBMOD_H
+#define __NET_TC_SKBMOD_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_skbmod.h>
+
+struct tcf_skbmod_params {
+ struct rcu_head rcu;
+ u64 flags; /*up to 64 types of operations; extend if needed */
+ u8 eth_dst[ETH_ALEN];
+ u16 eth_type;
+ u8 eth_src[ETH_ALEN];
+};
+
+struct tcf_skbmod {
+ struct tc_action common;
+ struct tcf_skbmod_params __rcu *skbmod_p;
+};
+#define to_skbmod(a) ((struct tcf_skbmod *)a)
+
+#endif /* __NET_TC_SKBMOD_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
new file mode 100644
index 000000000000..253f8da6c2a6
--- /dev/null
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __NET_TC_TUNNEL_KEY_H
+#define __NET_TC_TUNNEL_KEY_H
+
+#include <net/act_api.h>
+
+struct tcf_tunnel_key_params {
+ struct rcu_head rcu;
+ int tcft_action;
+ int action;
+ struct metadata_dst *tcft_enc_metadata;
+};
+
+struct tcf_tunnel_key {
+ struct tc_action common;
+ struct tcf_tunnel_key_params __rcu *params;
+};
+
+#define to_tunnel_key(a) ((struct tcf_tunnel_key *)a)
+
+#endif /* __NET_TC_TUNNEL_KEY_H */
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index e29f52e8bdf1..48cca321ee6c 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -11,6 +11,7 @@
#define __NET_TC_VLAN_H
#include <net/act_api.h>
+#include <linux/tc_act/tc_vlan.h>
#define VLAN_F_POP 0x1
#define VLAN_F_PUSH 0x2
@@ -20,7 +21,32 @@ struct tcf_vlan {
int tcfv_action;
u16 tcfv_push_vid;
__be16 tcfv_push_proto;
+ u8 tcfv_push_prio;
};
#define to_vlan(a) ((struct tcf_vlan *)a)
+static inline bool is_tcf_vlan(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->type == TCA_ACT_VLAN)
+ return true;
+#endif
+ return false;
+}
+
+static inline u32 tcf_vlan_action(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_action;
+}
+
+static inline u16 tcf_vlan_push_vid(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_vid;
+}
+
+static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_proto;
+}
+
#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7717302cab91..f83b7f220a65 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -227,10 +227,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TFO_SERVER_COOKIE_NOT_REQD 0x200
/* Force enable TFO on all listeners, i.e., not requiring the
- * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
+ * TCP_FASTOPEN socket option.
*/
#define TFO_SERVER_WO_SOCKOPT1 0x400
-#define TFO_SERVER_WO_SOCKOPT2 0x800
extern struct inet_timewait_death_row tcp_death_row;
@@ -534,6 +533,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
+u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
+ int min_tso_segs);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
bool tcp_may_send_now(struct sock *sk);
@@ -604,8 +605,6 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
void tcp_get_info(struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
-typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
- unsigned int, size_t);
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
@@ -643,7 +642,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (skb_queue_empty(&tp->out_of_order_queue) &&
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data)
@@ -674,7 +673,7 @@ static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
/* Minimum RTT in usec. ~0 means not available. */
static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
{
- return tp->rtt_min[0].rtt;
+ return minmax_get(&tp->rtt_min);
}
/* Compute the actual receive window we are currently advertising.
@@ -766,8 +765,16 @@ struct tcp_skb_cb {
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
- /* There is space for up to 20 bytes */
- __u32 in_flight;/* Bytes in flight when packet sent */
+ /* There is space for up to 24 bytes */
+ __u32 in_flight:30,/* Bytes in flight at transmit */
+ is_app_limited:1, /* cwnd not fully used? */
+ unused:1;
+ /* pkts S/ACKed so far upon tx of skb, incl retrans: */
+ __u32 delivered;
+ /* start of send pipeline phase */
+ struct skb_mstamp first_tx_mstamp;
+ /* when we reached the "delivered" count */
+ struct skb_mstamp delivered_mstamp;
} tx; /* only used for outgoing skbs */
union {
struct inet_skb_parm h4;
@@ -863,6 +870,27 @@ struct ack_sample {
u32 in_flight;
};
+/* A rate sample measures the number of (original/retransmitted) data
+ * packets delivered "delivered" over an interval of time "interval_us".
+ * The tcp_rate.c code fills in the rate sample, and congestion
+ * control modules that define a cong_control function to run at the end
+ * of ACK processing can optionally chose to consult this sample when
+ * setting cwnd and pacing rate.
+ * A sample is invalid if "delivered" or "interval_us" is negative.
+ */
+struct rate_sample {
+ struct skb_mstamp prior_mstamp; /* starting timestamp for interval */
+ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
+ s32 delivered; /* number of packets delivered over interval */
+ long interval_us; /* time for tp->delivered to incr "delivered" */
+ long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
+ int losses; /* number of packets marked lost upon ACK */
+ u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
+ u32 prior_in_flight; /* in flight before this ACK */
+ bool is_app_limited; /* is sample from packet with bubble in pipe? */
+ bool is_retrans; /* is sample from retransmission? */
+};
+
struct tcp_congestion_ops {
struct list_head list;
u32 key;
@@ -887,6 +915,14 @@ struct tcp_congestion_ops {
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
+ /* suggest number of segments for each skb to transmit (optional) */
+ u32 (*tso_segs_goal)(struct sock *sk);
+ /* returns the multiplier used in tcp_sndbuf_expand (optional) */
+ u32 (*sndbuf_expand)(struct sock *sk);
+ /* call when packets are delivered to update cwnd and pacing rate,
+ * after all the ca_state processing. (optional)
+ */
+ void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
/* get info for inet_diag (optional) */
size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info);
@@ -949,6 +985,14 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
icsk->icsk_ca_ops->cwnd_event(sk, event);
}
+/* From tcp_rate.c */
+void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
+void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
+ struct rate_sample *rs);
+void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
+ struct skb_mstamp *now, struct rate_sample *rs);
+void tcp_rate_check_app_limited(struct sock *sk);
+
/* These functions determine how the current flow behaves in respect of SACK
* handling. SACK is negotiated with the peer, and therefore it can vary
* between different flows.
@@ -1164,6 +1208,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
}
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
+bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
#undef STATE_TRACE
@@ -1853,6 +1898,8 @@ static inline int tcp_inq(struct sock *sk)
return answ;
}
+int tcp_peek_len(struct socket *sock);
+
static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
{
u16 segs_in;
diff --git a/include/net/udp.h b/include/net/udp.h
index 8894d7144189..ea53a87d880f 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -251,6 +251,7 @@ int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
void udp_err(struct sk_buff *, u32);
+int udp_abort(struct sock *sk, int err);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udp_push_pending_frames(struct sock *sk);
void udp_flush_pending_frames(struct sock *sk);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index b96d0360c095..0255613a54a4 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -350,24 +350,6 @@ static inline __be32 vxlan_vni_field(__be32 vni)
#endif
}
-static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id)
-{
-#if defined(__BIG_ENDIAN)
- return (__force __be32)tun_id;
-#else
- return (__force __be32)((__force u64)tun_id >> 32);
-#endif
-}
-
-static inline __be64 vxlan_vni_to_tun_id(__be32 vni)
-{
-#if defined(__BIG_ENDIAN)
- return (__force __be64)vni;
-#else
- return (__force __be64)((u64)(__force u32)vni << 32);
-#endif
-}
-
static inline size_t vxlan_rco_start(__be32 vni_field)
{
return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 17934312eecb..31947b9c21d6 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -187,7 +187,7 @@ struct xfrm_state {
struct xfrm_replay_state_esn *preplay_esn;
/* The functions for replay detection. */
- struct xfrm_replay *repl;
+ const struct xfrm_replay *repl;
/* internal flag that only holds state for delayed aevent at the
* moment
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
new file mode 100644
index 000000000000..408439fe911e
--- /dev/null
+++ b/include/rdma/ib_hdrs.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef IB_HDRS_H
+#define IB_HDRS_H
+
+#include <linux/types.h>
+#include <asm/unaligned.h>
+#include <rdma/ib_verbs.h>
+
+#define IB_SEQ_NAK (3 << 29)
+
+/* AETH NAK opcode values */
+#define IB_RNR_NAK 0x20
+#define IB_NAK_PSN_ERROR 0x60
+#define IB_NAK_INVALID_REQUEST 0x61
+#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
+#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
+#define IB_NAK_INVALID_RD_REQUEST 0x64
+
+#define IB_BTH_REQ_ACK BIT(31)
+#define IB_BTH_SOLICITED BIT(23)
+#define IB_BTH_MIG_REQ BIT(22)
+
+#define IB_GRH_VERSION 6
+#define IB_GRH_VERSION_MASK 0xF
+#define IB_GRH_VERSION_SHIFT 28
+#define IB_GRH_TCLASS_MASK 0xFF
+#define IB_GRH_TCLASS_SHIFT 20
+#define IB_GRH_FLOW_MASK 0xFFFFF
+#define IB_GRH_FLOW_SHIFT 0
+#define IB_GRH_NEXT_HDR 0x1B
+
+struct ib_reth {
+ __be64 vaddr; /* potentially unaligned */
+ __be32 rkey;
+ __be32 length;
+} __packed;
+
+struct ib_atomic_eth {
+ __be64 vaddr; /* potentially unaligned */
+ __be32 rkey;
+ __be64 swap_data; /* potentially unaligned */
+ __be64 compare_data; /* potentially unaligned */
+} __packed;
+
+union ib_ehdrs {
+ struct {
+ __be32 deth[2];
+ __be32 imm_data;
+ } ud;
+ struct {
+ struct ib_reth reth;
+ __be32 imm_data;
+ } rc;
+ struct {
+ __be32 aeth;
+ __be64 atomic_ack_eth; /* potentially unaligned */
+ } __packed at;
+ __be32 imm_data;
+ __be32 aeth;
+ __be32 ieth;
+ struct ib_atomic_eth atomic_eth;
+} __packed;
+
+struct ib_other_headers {
+ __be32 bth[3];
+ union ib_ehdrs u;
+} __packed;
+
+struct ib_header {
+ __be16 lrh[4];
+ union {
+ struct {
+ struct ib_grh grh;
+ struct ib_other_headers oth;
+ } l;
+ struct ib_other_headers oth;
+ } u;
+} __packed;
+
+/* accessors for unaligned __be64 items */
+
+static inline u64 ib_u64_get(__be64 *p)
+{
+ return get_unaligned_be64(p);
+}
+
+static inline void ib_u64_put(u64 val, __be64 *p)
+{
+ put_unaligned_be64(val, p);
+}
+
+static inline u64 get_ib_reth_vaddr(struct ib_reth *reth)
+{
+ return ib_u64_get(&reth->vaddr);
+}
+
+static inline void put_ib_reth_vaddr(u64 val, struct ib_reth *reth)
+{
+ ib_u64_put(val, &reth->vaddr);
+}
+
+static inline u64 get_ib_ateth_vaddr(struct ib_atomic_eth *ateth)
+{
+ return ib_u64_get(&ateth->vaddr);
+}
+
+static inline void put_ib_ateth_vaddr(u64 val, struct ib_atomic_eth *ateth)
+{
+ ib_u64_put(val, &ateth->vaddr);
+}
+
+static inline u64 get_ib_ateth_swap(struct ib_atomic_eth *ateth)
+{
+ return ib_u64_get(&ateth->swap_data);
+}
+
+static inline void put_ib_ateth_swap(u64 val, struct ib_atomic_eth *ateth)
+{
+ ib_u64_put(val, &ateth->swap_data);
+}
+
+static inline u64 get_ib_ateth_compare(struct ib_atomic_eth *ateth)
+{
+ return ib_u64_get(&ateth->compare_data);
+}
+
+static inline void put_ib_ateth_compare(u64 val, struct ib_atomic_eth *ateth)
+{
+ ib_u64_put(val, &ateth->compare_data);
+}
+
+#endif /* IB_HDRS_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e1f96737c2a1..5ad43a487745 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -261,6 +261,16 @@ struct ib_odp_caps {
} per_transport_caps;
};
+struct ib_rss_caps {
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_UD
+ */
+ u32 supported_qpts;
+ u32 max_rwq_indirection_tables;
+ u32 max_rwq_indirection_table_size;
+};
+
enum ib_cq_creation_flags {
IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
@@ -318,6 +328,8 @@ struct ib_device_attr {
struct ib_odp_caps odp_caps;
uint64_t timestamp_mask;
uint64_t hca_core_clock; /* in KHZ */
+ struct ib_rss_caps rss_caps;
+ u32 max_wq_type_rq;
};
enum ib_mtu {
@@ -525,9 +537,11 @@ enum ib_device_modify_flags {
IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
};
+#define IB_DEVICE_NODE_DESC_MAX 64
+
struct ib_device_modify {
u64 sys_image_guid;
- char node_desc[64];
+ char node_desc[IB_DEVICE_NODE_DESC_MAX];
};
enum ib_port_modify_flags {
@@ -1370,10 +1384,17 @@ struct ib_udata {
struct ib_pd {
u32 local_dma_lkey;
+ u32 flags;
struct ib_device *device;
struct ib_uobject *uobject;
atomic_t usecnt; /* count all resources */
- struct ib_mr *local_mr;
+
+ u32 unsafe_global_rkey;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct ib_mr *__internal_mr;
};
struct ib_xrcd {
@@ -1604,6 +1625,8 @@ struct ib_flow_eth_filter {
u8 src_mac[6];
__be16 ether_type;
__be16 vlan_tag;
+ /* Must be last */
+ u8 real_sz[0];
};
struct ib_flow_spec_eth {
@@ -1616,6 +1639,8 @@ struct ib_flow_spec_eth {
struct ib_flow_ib_filter {
__be16 dlid;
__u8 sl;
+ /* Must be last */
+ u8 real_sz[0];
};
struct ib_flow_spec_ib {
@@ -1625,9 +1650,22 @@ struct ib_flow_spec_ib {
struct ib_flow_ib_filter mask;
};
+/* IPv4 header flags */
+enum ib_ipv4_flags {
+ IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
+ IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
+ last have this flag set */
+};
+
struct ib_flow_ipv4_filter {
__be32 src_ip;
__be32 dst_ip;
+ u8 proto;
+ u8 tos;
+ u8 ttl;
+ u8 flags;
+ /* Must be last */
+ u8 real_sz[0];
};
struct ib_flow_spec_ipv4 {
@@ -1640,6 +1678,12 @@ struct ib_flow_spec_ipv4 {
struct ib_flow_ipv6_filter {
u8 src_ip[16];
u8 dst_ip[16];
+ __be32 flow_label;
+ u8 next_hdr;
+ u8 traffic_class;
+ u8 hop_limit;
+ /* Must be last */
+ u8 real_sz[0];
};
struct ib_flow_spec_ipv6 {
@@ -1652,6 +1696,8 @@ struct ib_flow_spec_ipv6 {
struct ib_flow_tcp_udp_filter {
__be16 dst_port;
__be16 src_port;
+ /* Must be last */
+ u8 real_sz[0];
};
struct ib_flow_spec_tcp_udp {
@@ -1739,6 +1785,14 @@ struct ib_dma_mapping_ops {
void (*unmap_sg)(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction);
+ int (*map_sg_attrs)(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long attrs);
+ void (*unmap_sg_attrs)(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long attrs);
void (*sync_single_for_cpu)(struct ib_device *dev,
u64 dma_handle,
size_t size,
@@ -2033,7 +2087,7 @@ struct ib_device {
u64 uverbs_cmd_mask;
u64 uverbs_ex_cmd_mask;
- char node_desc[64];
+ char node_desc[IB_DEVICE_NODE_DESC_MAX];
__be64 node_guid;
u32 local_dma_lkey;
u16 is_switch:1;
@@ -2497,8 +2551,23 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
int ib_find_pkey(struct ib_device *device,
u8 port_num, u16 pkey, u16 *index);
-struct ib_pd *ib_alloc_pd(struct ib_device *device);
+enum ib_pd_flags {
+ /*
+ * Create a memory registration for all memory in the system and place
+ * the rkey for it into pd->unsafe_global_rkey. This can be used by
+ * ULPs to avoid the overhead of dynamic MRs.
+ *
+ * This flag is generally considered unsafe and must only be used in
+ * extremly trusted environments. Every use of it will log a warning
+ * in the kernel log.
+ */
+ IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
+};
+struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
+ const char *caller);
+#define ib_alloc_pd(device, flags) \
+ __ib_alloc_pd((device), (flags), __func__)
void ib_dealloc_pd(struct ib_pd *pd);
/**
@@ -2852,18 +2921,6 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
}
/**
- * ib_get_dma_mr - Returns a memory region for system memory that is
- * usable for DMA.
- * @pd: The protection domain associated with the memory region.
- * @mr_access_flags: Specifies the memory access rights.
- *
- * Note that the ib_dma_*() functions defined below must be used
- * to create/destroy addresses used with the Lkey or Rkey returned
- * by ib_get_dma_mr().
- */
-struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
-
-/**
* ib_dma_mapping_error - check a DMA addr for error
* @dev: The device for which the dma_addr was created
* @dma_addr: The DMA address to check
@@ -3000,8 +3057,12 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction,
unsigned long dma_attrs)
{
- return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
- dma_attrs);
+ if (dev->dma_ops)
+ return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
+ dma_attrs);
+ else
+ return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
}
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
@@ -3009,7 +3070,12 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction,
unsigned long dma_attrs)
{
- dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
+ if (dev->dma_ops)
+ return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
+ dma_attrs);
+ else
+ dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
}
/**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index bd34d0b56bf7..2c5183ef0243 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -466,6 +466,25 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
}
/**
+ * rvt_get_qp - get a QP reference
+ * @qp - the QP to hold
+ */
+static inline void rvt_get_qp(struct rvt_qp *qp)
+{
+ atomic_inc(&qp->refcount);
+}
+
+/**
+ * rvt_put_qp - release a QP reference
+ * @qp - the QP to release
+ */
+static inline void rvt_put_qp(struct rvt_qp *qp)
+{
+ if (qp && atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+/**
* rvt_qp_wqe_reserve - reserve operation
* @qp - the rvt qp
* @wqe - the send wqe
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index b2017440b765..703a64b4681a 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -24,6 +24,7 @@ typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
*/
struct rxrpc_wire_header {
__be32 epoch; /* client boot timestamp */
+#define RXRPC_RANDOM_EPOCH 0x80000000 /* Random if set, date-based if not */
__be32 cid; /* connection and channel ID */
#define RXRPC_MAXCALLS 4 /* max active calls per conn */
@@ -33,8 +34,6 @@ struct rxrpc_wire_header {
#define RXRPC_CID_INC (1 << RXRPC_CIDSHIFT) /* connection ID increment */
__be32 callNumber; /* call ID (0 for connection-level packets) */
-#define RXRPC_PROCESS_MAXCALLS (1<<2) /* maximum number of active calls per conn (power of 2) */
-
__be32 seq; /* sequence number of pkt in call stream */
__be32 serial; /* serial number of pkt sent to network */
@@ -92,10 +91,14 @@ struct rxrpc_wire_header {
struct rxrpc_jumbo_header {
uint8_t flags; /* packet flags (as per rxrpc_header) */
uint8_t pad;
- __be16 _rsvd; /* reserved (used by kerberos security as cksum) */
+ union {
+ __be16 _rsvd; /* reserved */
+ __be16 cksum; /* kerberos security checksum */
+ };
};
#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
+#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
/*****************************************************************************/
/*
@@ -120,6 +123,7 @@ struct rxrpc_ackpacket {
#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */
#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */
#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */
+#define RXRPC_ACK__INVALID 10 /* Representation of invalid ACK reason */
uint8_t nAcks; /* number of ACKs */
#define RXRPC_MAXACKS 255
@@ -130,6 +134,13 @@ struct rxrpc_ackpacket {
} __packed;
+/* Some ACKs refer to specific packets and some are general and can be updated. */
+#define RXRPC_ACK_UPDATEABLE ((1 << RXRPC_ACK_REQUESTED) | \
+ (1 << RXRPC_ACK_PING_RESPONSE) | \
+ (1 << RXRPC_ACK_DELAY) | \
+ (1 << RXRPC_ACK_IDLE))
+
+
/*
* ACK packets can have a further piece of information tagged on the end
*/
diff --git a/include/soc/at91/atmel-sfr.h b/include/soc/at91/atmel-sfr.h
index 2f9bb984a4df..506ea8ffda19 100644
--- a/include/soc/at91/atmel-sfr.h
+++ b/include/soc/at91/atmel-sfr.h
@@ -13,6 +13,20 @@
#ifndef _LINUX_MFD_SYSCON_ATMEL_SFR_H
#define _LINUX_MFD_SYSCON_ATMEL_SFR_H
+#define AT91_SFR_DDRCFG 0x04 /* DDR Configuration Register */
+/* 0x08 ~ 0x0c: Reserved */
+#define AT91_SFR_OHCIICR 0x10 /* OHCI INT Configuration Register */
+#define AT91_SFR_OHCIISR 0x14 /* OHCI INT Status Register */
#define AT91_SFR_I2SCLKSEL 0x90 /* I2SC Register */
+/* Field definitions */
+#define AT91_OHCIICR_SUSPEND_A BIT(8)
+#define AT91_OHCIICR_SUSPEND_B BIT(9)
+#define AT91_OHCIICR_SUSPEND_C BIT(10)
+
+#define AT91_OHCIICR_USB_SUSPEND (AT91_OHCIICR_SUSPEND_A | \
+ AT91_OHCIICR_SUSPEND_B | \
+ AT91_OHCIICR_SUSPEND_C)
+
+
#endif /* _LINUX_MFD_SYSCON_ATMEL_SFR_H */
diff --git a/include/soc/rockchip/rockchip_sip.h b/include/soc/rockchip/rockchip_sip.h
new file mode 100644
index 000000000000..7e28092c4d3d
--- /dev/null
+++ b/include/soc/rockchip/rockchip_sip.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
+ * Author: Lin Huang <hl@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef __SOC_ROCKCHIP_SIP_H
+#define __SOC_ROCKCHIP_SIP_H
+
+#define ROCKCHIP_SIP_DRAM_FREQ 0x82000008
+#define ROCKCHIP_SIP_CONFIG_DRAM_INIT 0x00
+#define ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE 0x01
+#define ROCKCHIP_SIP_CONFIG_DRAM_ROUND_RATE 0x02
+#define ROCKCHIP_SIP_CONFIG_DRAM_SET_AT_SR 0x03
+#define ROCKCHIP_SIP_CONFIG_DRAM_GET_BW 0x04
+#define ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE 0x05
+#define ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ 0x06
+#define ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM 0x07
+
+#endif
diff --git a/include/sound/da7219.h b/include/sound/da7219.h
index 02876acdc840..409ef1397fd3 100644
--- a/include/sound/da7219.h
+++ b/include/sound/da7219.h
@@ -34,6 +34,8 @@ enum da7219_mic_amp_in_sel {
struct da7219_aad_pdata;
struct da7219_pdata {
+ bool wakeup_source;
+
/* Mic */
enum da7219_micbias_voltage micbias_lvl;
enum da7219_mic_amp_in_sel mic_amp_in_sel;
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index ff1aecf325e8..0013063db7f2 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -89,6 +89,19 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_SD_BDLPL 0x18
#define AZX_REG_SD_BDLPU 0x1c
+/* GTS registers */
+#define AZX_REG_LLCH 0x14
+
+#define AZX_REG_GTS_BASE 0x520
+
+#define AZX_REG_GTSCC (AZX_REG_GTS_BASE + 0x00)
+#define AZX_REG_WALFCC (AZX_REG_GTS_BASE + 0x04)
+#define AZX_REG_TSCCL (AZX_REG_GTS_BASE + 0x08)
+#define AZX_REG_TSCCU (AZX_REG_GTS_BASE + 0x0C)
+#define AZX_REG_LLPFOC (AZX_REG_GTS_BASE + 0x14)
+#define AZX_REG_LLPCL (AZX_REG_GTS_BASE + 0x18)
+#define AZX_REG_LLPCU (AZX_REG_GTS_BASE + 0x1C)
+
/* Haswell/Broadwell display HD-A controller Extended Mode registers */
#define AZX_REG_HSW_EM4 0x100c
#define AZX_REG_HSW_EM5 0x1010
@@ -242,6 +255,29 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
/* Interval used to calculate the iterating register offset */
#define AZX_DRSM_INTERVAL 0x08
+/* Global time synchronization registers */
+#define GTSCC_TSCCD_MASK 0x80000000
+#define GTSCC_TSCCD_SHIFT BIT(31)
+#define GTSCC_TSCCI_MASK 0x20
+#define GTSCC_CDMAS_DMA_DIR_SHIFT 4
+
+#define WALFCC_CIF_MASK 0x1FF
+#define WALFCC_FN_SHIFT 9
+#define HDA_CLK_CYCLES_PER_FRAME 512
+
+/*
+ * An error occurs near frame "rollover". The clocks in frame value indicates
+ * whether this error may have occurred. Here we use the value of 10. Please
+ * see the errata for the right number [<10]
+ */
+#define HDA_MAX_CYCLE_VALUE 499
+#define HDA_MAX_CYCLE_OFFSET 10
+#define HDA_MAX_CYCLE_READ_RETRY 10
+
+#define TSCCU_CCU_SHIFT 32
+#define LLPC_CCU_SHIFT 32
+
+
/*
* helpers to read the stream position
*/
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 93e63c56f48f..56004ec8d441 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -245,6 +245,12 @@ struct hdac_rb {
/*
* HD-audio bus base driver
+ *
+ * @ppcap: pp capabilities pointer
+ * @spbcap: SPIB capabilities pointer
+ * @mlcap: MultiLink capabilities pointer
+ * @gtscap: gts capabilities pointer
+ * @drsmcap: dma resume capabilities pointer
*/
struct hdac_bus {
struct device *dev;
@@ -256,6 +262,12 @@ struct hdac_bus {
void __iomem *remap_addr;
int irq;
+ void __iomem *ppcap;
+ void __iomem *spbcap;
+ void __iomem *mlcap;
+ void __iomem *gtscap;
+ void __iomem *drsmcap;
+
/* codec linked list */
struct list_head codec_list;
unsigned int num_codecs;
@@ -335,6 +347,7 @@ static inline void snd_hdac_codec_link_down(struct hdac_device *codec)
int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val);
int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr,
unsigned int *res);
+int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus);
int snd_hdac_link_power(struct hdac_device *codec, bool enable);
bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index b9593b201599..8660a7f10851 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -8,11 +8,6 @@
*
* @bus: hdac bus
* @num_streams: streams supported
- * @ppcap: pp capabilities pointer
- * @spbcap: SPIB capabilities pointer
- * @mlcap: MultiLink capabilities pointer
- * @gtscap: gts capabilities pointer
- * @drsmcap: dma resume capabilities pointer
* @hlink_list: link list of HDA links
* @lock: lock for link mgmt
* @cmd_dma_state: state of cmd DMAs: CORB and RIRB
@@ -22,12 +17,6 @@ struct hdac_ext_bus {
int num_streams;
int idx;
- void __iomem *ppcap;
- void __iomem *spbcap;
- void __iomem *mlcap;
- void __iomem *gtscap;
- void __iomem *drsmcap;
-
struct list_head hlink_list;
struct mutex lock;
@@ -54,7 +43,6 @@ void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
#define HDA_CODEC_EXT_ENTRY(_vid, _revid, _name, _drv_data) \
HDA_CODEC_REV_EXT_ENTRY(_vid, _revid, _name, _drv_data)
-int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *sbus);
void snd_hdac_ext_bus_ppcap_enable(struct hdac_ext_bus *chip, bool enable);
void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_ext_bus *chip, bool enable);
diff --git a/include/sound/l3.h b/include/sound/l3.h
index 423a08f0f1b0..1471da22adad 100644
--- a/include/sound/l3.h
+++ b/include/sound/l3.h
@@ -2,9 +2,15 @@
#define _L3_H_ 1
struct l3_pins {
- void (*setdat)(int);
- void (*setclk)(int);
- void (*setmode)(int);
+ void (*setdat)(struct l3_pins *, int);
+ void (*setclk)(struct l3_pins *, int);
+ void (*setmode)(struct l3_pins *, int);
+
+ int gpio_data;
+ int gpio_clk;
+ int gpio_mode;
+ int use_gpios;
+
int data_hold;
int data_setup;
int clock_high;
@@ -13,6 +19,9 @@ struct l3_pins {
int mode_setup;
};
+struct device;
+
int l3_write(struct l3_pins *adap, u8 addr, u8 *data, int len);
+int l3_set_gpio_ops(struct device *dev, struct l3_pins *adap);
#endif
diff --git a/include/sound/rt5660.h b/include/sound/rt5660.h
new file mode 100644
index 000000000000..065f83a24db6
--- /dev/null
+++ b/include/sound/rt5660.h
@@ -0,0 +1,31 @@
+/*
+ * linux/sound/rt5660.h -- Platform data for RT5660
+ *
+ * Copyright 2016 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT5660_H
+#define __LINUX_SND_RT5660_H
+
+enum rt5660_dmic1_data_pin {
+ RT5660_DMIC1_NULL,
+ RT5660_DMIC1_DATA_GPIO2,
+ RT5660_DMIC1_DATA_IN1P,
+};
+
+struct rt5660_platform_data {
+ /* IN1 & IN3 can optionally be differential */
+ bool in1_diff;
+ bool in3_diff;
+ bool use_ldo2;
+ bool poweroff_codec_in_suspend;
+
+ enum rt5660_dmic1_data_pin dmic1_data_pin;
+};
+
+#endif
diff --git a/include/sound/s3c24xx_uda134x.h b/include/sound/s3c24xx_uda134x.h
index 33df4cb909d3..ffaf1f098c8e 100644
--- a/include/sound/s3c24xx_uda134x.h
+++ b/include/sound/s3c24xx_uda134x.h
@@ -7,7 +7,6 @@ struct s3c24xx_uda134x_platform_data {
int l3_clk;
int l3_mode;
int l3_data;
- void (*power) (int);
int model;
};
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 86088aed9002..fd6412551145 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -27,10 +27,45 @@ int asoc_simple_card_parse_daifmt(struct device *dev,
struct device_node *codec,
char *prefix,
unsigned int *retfmt);
+__printf(3, 4)
int asoc_simple_card_set_dailink_name(struct device *dev,
struct snd_soc_dai_link *dai_link,
const char *fmt, ...);
int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
char *prefix);
+#define asoc_simple_card_parse_clk_cpu(node, dai_link, simple_dai) \
+ asoc_simple_card_parse_clk(node, dai_link->cpu_of_node, simple_dai)
+#define asoc_simple_card_parse_clk_codec(node, dai_link, simple_dai) \
+ asoc_simple_card_parse_clk(node, dai_link->codec_of_node, simple_dai)
+int asoc_simple_card_parse_clk(struct device_node *node,
+ struct device_node *dai_of_node,
+ struct asoc_simple_dai *simple_dai);
+
+#define asoc_simple_card_parse_cpu(node, dai_link, \
+ list_name, cells_name, is_single_link) \
+ asoc_simple_card_parse_dai(node, &dai_link->cpu_of_node, \
+ &dai_link->cpu_dai_name, list_name, cells_name, is_single_link)
+#define asoc_simple_card_parse_codec(node, dai_link, list_name, cells_name) \
+ asoc_simple_card_parse_dai(node, &dai_link->codec_of_node, \
+ &dai_link->codec_dai_name, list_name, cells_name, NULL)
+#define asoc_simple_card_parse_platform(node, dai_link, list_name, cells_name) \
+ asoc_simple_card_parse_dai(node, &dai_link->platform_of_node, \
+ NULL, list_name, cells_name, NULL)
+int asoc_simple_card_parse_dai(struct device_node *node,
+ struct device_node **endpoint_np,
+ const char **dai_name,
+ const char *list_name,
+ const char *cells_name,
+ int *is_single_links);
+
+int asoc_simple_card_init_dai(struct snd_soc_dai *dai,
+ struct asoc_simple_dai *simple_dai);
+
+int asoc_simple_card_canonicalize_dailink(struct snd_soc_dai_link *dai_link);
+void asoc_simple_card_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
+ int is_single_links);
+
+int asoc_simple_card_clean_reference(struct snd_soc_card *card);
+
#endif /* __SIMPLE_CARD_CORE_H */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 6144882cc96a..4f1c784e44f6 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -898,14 +898,6 @@ struct snd_soc_codec_driver {
int (*resume)(struct snd_soc_codec *);
struct snd_soc_component_driver component_driver;
- /* Default control and setup, added after probe() is run */
- const struct snd_kcontrol_new *controls;
- int num_controls;
- const struct snd_soc_dapm_widget *dapm_widgets;
- int num_dapm_widgets;
- const struct snd_soc_dapm_route *dapm_routes;
- int num_dapm_routes;
-
/* codec wide operations */
int (*set_sysclk)(struct snd_soc_codec *codec,
int clk_id, int source, unsigned int freq, int dir);
@@ -1547,17 +1539,6 @@ static inline void *snd_soc_platform_get_drvdata(struct snd_soc_platform *platfo
return snd_soc_component_get_drvdata(&platform->component);
}
-static inline void snd_soc_pcm_set_drvdata(struct snd_soc_pcm_runtime *rtd,
- void *data)
-{
- dev_set_drvdata(rtd->dev, data);
-}
-
-static inline void *snd_soc_pcm_get_drvdata(struct snd_soc_pcm_runtime *rtd)
-{
- return dev_get_drvdata(rtd->dev);
-}
-
static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
{
INIT_LIST_HEAD(&card->codec_dev_list);
diff --git a/include/sound/tlv.h b/include/sound/tlv.h
index df97d1966468..3677ebb928d5 100644
--- a/include/sound/tlv.h
+++ b/include/sound/tlv.h
@@ -22,67 +22,39 @@
*
*/
-/*
- * TLV structure is right behind the struct snd_ctl_tlv:
- * unsigned int type - see SNDRV_CTL_TLVT_*
- * unsigned int length
- * .... data aligned to sizeof(unsigned int), use
- * block_length = (length + (sizeof(unsigned int) - 1)) &
- * ~(sizeof(unsigned int) - 1)) ....
- */
-
#include <uapi/sound/tlv.h>
-#define TLV_ITEM(type, ...) \
- (type), TLV_LENGTH(__VA_ARGS__), __VA_ARGS__
-#define TLV_LENGTH(...) \
- ((unsigned int)sizeof((const unsigned int[]) { __VA_ARGS__ }))
+/* For historical reasons, these macros are aliases to the ones in UAPI. */
+#define TLV_ITEM SNDRV_CTL_TLVD_ITEM
+#define TLV_LENGTH SNDRV_CTL_TLVD_LENGTH
+
+#define TLV_CONTAINER_ITEM SNDRV_CTL_TLVD_CONTAINER_ITEM
+#define DECLARE_TLV_CONTAINER SNDRV_CTL_TLVD_DECLARE_CONTAINER
-#define TLV_CONTAINER_ITEM(...) \
- TLV_ITEM(SNDRV_CTL_TLVT_CONTAINER, __VA_ARGS__)
-#define DECLARE_TLV_CONTAINER(name, ...) \
- unsigned int name[] = { TLV_CONTAINER_ITEM(__VA_ARGS__) }
+#define TLV_DB_SCALE_MASK SNDRV_CTL_TLVD_DB_SCALE_MASK
+#define TLV_DB_SCALE_MUTE SNDRV_CTL_TLVD_DB_SCALE_MUTE
+#define TLV_DB_SCALE_ITEM SNDRV_CTL_TLVD_DB_SCALE_ITEM
+#define DECLARE_TLV_DB_SCALE SNDRV_CTL_TLVD_DECLARE_DB_SCALE
-#define TLV_DB_SCALE_MASK 0xffff
-#define TLV_DB_SCALE_MUTE 0x10000
-#define TLV_DB_SCALE_ITEM(min, step, mute) \
- TLV_ITEM(SNDRV_CTL_TLVT_DB_SCALE, \
- (min), \
- ((step) & TLV_DB_SCALE_MASK) | \
- ((mute) ? TLV_DB_SCALE_MUTE : 0))
-#define DECLARE_TLV_DB_SCALE(name, min, step, mute) \
- unsigned int name[] = { TLV_DB_SCALE_ITEM(min, step, mute) }
+#define TLV_DB_MINMAX_ITEM SNDRV_CTL_TLVD_DB_MINMAX_ITEM
+#define TLV_DB_MINMAX_MUTE_ITEM SNDRV_CTL_TLVD_DB_MINMAX_MUTE_ITEM
+#define DECLARE_TLV_DB_MINMAX SNDRV_CTL_TLVD_DECLARE_DB_MINMAX
+#define DECLARE_TLV_DB_MINMAX_MUTE SNDRV_CTL_TLVD_DECLARE_DB_MINMAX_MUTE
-/* dB scale specified with min/max values instead of step */
-#define TLV_DB_MINMAX_ITEM(min_dB, max_dB) \
- TLV_ITEM(SNDRV_CTL_TLVT_DB_MINMAX, (min_dB), (max_dB))
-#define TLV_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) \
- TLV_ITEM(SNDRV_CTL_TLVT_DB_MINMAX_MUTE, (min_dB), (max_dB))
-#define DECLARE_TLV_DB_MINMAX(name, min_dB, max_dB) \
- unsigned int name[] = { TLV_DB_MINMAX_ITEM(min_dB, max_dB) }
-#define DECLARE_TLV_DB_MINMAX_MUTE(name, min_dB, max_dB) \
- unsigned int name[] = { TLV_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) }
+#define TLV_DB_LINEAR_ITEM SNDRV_CTL_TLVD_DB_LINEAR_ITEM
+#define DECLARE_TLV_DB_LINEAR SNDRV_CTL_TLVD_DECLARE_DB_LINEAR
-/* linear volume between min_dB and max_dB (.01dB unit) */
-#define TLV_DB_LINEAR_ITEM(min_dB, max_dB) \
- TLV_ITEM(SNDRV_CTL_TLVT_DB_LINEAR, (min_dB), (max_dB))
-#define DECLARE_TLV_DB_LINEAR(name, min_dB, max_dB) \
- unsigned int name[] = { TLV_DB_LINEAR_ITEM(min_dB, max_dB) }
+#define TLV_DB_RANGE_ITEM SNDRV_CTL_TLVD_DB_RANGE_ITEM
+#define DECLARE_TLV_DB_RANGE SNDRV_CTL_TLVD_DECLARE_DB_RANGE
-/* dB range container:
- * Items in dB range container must be ordered by their values and by their
- * dB values. This implies that larger values must correspond with larger
- * dB values (which is also required for all other mixer controls).
+#define TLV_DB_GAIN_MUTE SNDRV_CTL_TLVD_DB_GAIN_MUTE
+
+/*
+ * The below assumes that each item TLV is 4 words like DB_SCALE or LINEAR.
+ * This is an old fasion and obsoleted by commit bf1d1c9b6179("ALSA: tlv: add
+ * DECLARE_TLV_DB_RANGE()").
*/
-/* Each item is: <min> <max> <TLV> */
-#define TLV_DB_RANGE_ITEM(...) \
- TLV_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__)
-#define DECLARE_TLV_DB_RANGE(name, ...) \
- unsigned int name[] = { TLV_DB_RANGE_ITEM(__VA_ARGS__) }
-/* The below assumes that each item TLV is 4 words like DB_SCALE or LINEAR */
-#define TLV_DB_RANGE_HEAD(num) \
+#define TLV_DB_RANGE_HEAD(num) \
SNDRV_CTL_TLVT_DB_RANGE, 6 * (num) * sizeof(unsigned int)
-#define TLV_DB_GAIN_MUTE -9999999
-
#endif /* __SOUND_TLV_H */
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index c2ba402ab256..cbdb90b6b308 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -13,7 +13,7 @@
EM( COMPACT_SKIPPED, "skipped") \
EM( COMPACT_DEFERRED, "deferred") \
EM( COMPACT_CONTINUE, "continue") \
- EM( COMPACT_PARTIAL, "partial") \
+ EM( COMPACT_SUCCESS, "success") \
EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \
EM( COMPACT_COMPLETE, "complete") \
EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index ff95fd02116f..903a09165bb1 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -58,16 +58,12 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | WRITE_FLUSH_FUA))
#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
-#define show_bio_type(op, op_flags) show_bio_op(op), \
- show_bio_op_flags(op_flags), show_bio_extra(op_flags)
-
-#define show_bio_op(op) \
- __print_symbolic(op, \
- { READ, "READ" }, \
- { WRITE, "WRITE" })
+#define show_bio_type(op_flags) show_bio_op_flags(op_flags), \
+ show_bio_extra(op_flags)
#define show_bio_op_flags(flags) \
__print_symbolic(F2FS_BIO_FLAG_MASK(flags), \
+ { 0, "WRITE" }, \
{ REQ_RAHEAD, "READAHEAD" }, \
{ READ_SYNC, "READ_SYNC" }, \
{ WRITE_SYNC, "WRITE_SYNC" }, \
@@ -754,12 +750,12 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
- "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%si%s, type = %s",
+ "oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s%s, type = %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
(unsigned long long)__entry->old_blkaddr,
(unsigned long long)__entry->new_blkaddr,
- show_bio_type(__entry->op, __entry->op_flags),
+ show_bio_type(__entry->op_flags),
show_block_type(__entry->type))
);
@@ -806,9 +802,9 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
__entry->size = bio->bi_iter.bi_size;
),
- TP_printk("dev = (%d,%d), %s%s%s, %s, sector = %lld, size = %u",
+ TP_printk("dev = (%d,%d), rw = %s%s, %s, sector = %lld, size = %u",
show_dev(__entry),
- show_bio_type(__entry->op, __entry->op_flags),
+ show_bio_type(__entry->op_flags),
show_block_type(__entry->type),
(unsigned long long)__entry->sector,
__entry->size)
diff --git a/include/trace/events/intel_ish.h b/include/trace/events/intel_ish.h
new file mode 100644
index 000000000000..92f7d5b23177
--- /dev/null
+++ b/include/trace/events/intel_ish.h
@@ -0,0 +1,30 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM intel_ish
+
+#if !defined(_TRACE_INTEL_ISH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INTEL_ISH_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(ishtp_dump,
+
+ TP_PROTO(const char *message),
+
+ TP_ARGS(message),
+
+ TP_STRUCT__entry(
+ __string(message, message)
+ ),
+
+ TP_fast_assign(
+ __assign_str(message, message);
+ ),
+
+ TP_printk("%s", __get_str(message))
+);
+
+
+#endif /* _TRACE_INTEL_ISH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index f95f25e786ef..1c41b74581f7 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -75,7 +75,7 @@ TRACE_EVENT(irq_handler_entry,
* @ret: return value
*
* If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
- * @action->handler scuccessully handled this irq. Otherwise, the irq might be
+ * @action->handler successfully handled this irq. Otherwise, the irq might be
* a shared irq line, or the irq was not handled successfully. Can be used in
* conjunction with the irq_handler_entry to understand irq handler latencies.
*/
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
new file mode 100644
index 000000000000..0383e5e9a0f3
--- /dev/null
+++ b/include/trace/events/rxrpc.h
@@ -0,0 +1,625 @@
+/* AF_RXRPC tracepoints
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rxrpc
+
+#if !defined(_TRACE_RXRPC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RXRPC_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rxrpc_conn,
+ TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
+ int usage, const void *where),
+
+ TP_ARGS(conn, op, usage, where),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_connection *, conn )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+ ),
+
+ TP_printk("C=%p %s u=%d sp=%pSR",
+ __entry->conn,
+ rxrpc_conn_traces[__entry->op],
+ __entry->usage,
+ __entry->where)
+ );
+
+TRACE_EVENT(rxrpc_client,
+ TP_PROTO(struct rxrpc_connection *conn, int channel,
+ enum rxrpc_client_trace op),
+
+ TP_ARGS(conn, channel, op),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_connection *, conn )
+ __field(u32, cid )
+ __field(int, channel )
+ __field(int, usage )
+ __field(enum rxrpc_client_trace, op )
+ __field(enum rxrpc_conn_cache_state, cs )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn;
+ __entry->channel = channel;
+ __entry->usage = atomic_read(&conn->usage);
+ __entry->op = op;
+ __entry->cid = conn->proto.cid;
+ __entry->cs = conn->cache_state;
+ ),
+
+ TP_printk("C=%p h=%2d %s %s i=%08x u=%d",
+ __entry->conn,
+ __entry->channel,
+ rxrpc_client_traces[__entry->op],
+ rxrpc_conn_cache_states[__entry->cs],
+ __entry->cid,
+ __entry->usage)
+ );
+
+TRACE_EVENT(rxrpc_call,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
+ int usage, const void *where, const void *aux),
+
+ TP_ARGS(call, op, usage, where, aux),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
+ __field(const void *, aux )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+ __entry->aux = aux;
+ ),
+
+ TP_printk("c=%p %s u=%d sp=%pSR a=%p",
+ __entry->call,
+ rxrpc_call_traces[__entry->op],
+ __entry->usage,
+ __entry->where,
+ __entry->aux)
+ );
+
+TRACE_EVENT(rxrpc_skb,
+ TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
+ int usage, int mod_count, const void *where),
+
+ TP_ARGS(skb, op, usage, mod_count, where),
+
+ TP_STRUCT__entry(
+ __field(struct sk_buff *, skb )
+ __field(enum rxrpc_skb_trace, op )
+ __field(int, usage )
+ __field(int, mod_count )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->skb = skb;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->mod_count = mod_count;
+ __entry->where = where;
+ ),
+
+ TP_printk("s=%p %s u=%d m=%d p=%pSR",
+ __entry->skb,
+ rxrpc_skb_traces[__entry->op],
+ __entry->usage,
+ __entry->mod_count,
+ __entry->where)
+ );
+
+TRACE_EVENT(rxrpc_rx_packet,
+ TP_PROTO(struct rxrpc_skb_priv *sp),
+
+ TP_ARGS(sp),
+
+ TP_STRUCT__entry(
+ __field_struct(struct rxrpc_host_header, hdr )
+ ),
+
+ TP_fast_assign(
+ memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
+ ),
+
+ TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s",
+ __entry->hdr.epoch, __entry->hdr.cid,
+ __entry->hdr.callNumber, __entry->hdr.serviceId,
+ __entry->hdr.serial, __entry->hdr.seq,
+ __entry->hdr.type, __entry->hdr.flags,
+ __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+ );
+
+TRACE_EVENT(rxrpc_rx_done,
+ TP_PROTO(int result, int abort_code),
+
+ TP_ARGS(result, abort_code),
+
+ TP_STRUCT__entry(
+ __field(int, result )
+ __field(int, abort_code )
+ ),
+
+ TP_fast_assign(
+ __entry->result = result;
+ __entry->abort_code = abort_code;
+ ),
+
+ TP_printk("r=%d a=%d", __entry->result, __entry->abort_code)
+ );
+
+TRACE_EVENT(rxrpc_abort,
+ TP_PROTO(const char *why, u32 cid, u32 call_id, rxrpc_seq_t seq,
+ int abort_code, int error),
+
+ TP_ARGS(why, cid, call_id, seq, abort_code, error),
+
+ TP_STRUCT__entry(
+ __array(char, why, 4 )
+ __field(u32, cid )
+ __field(u32, call_id )
+ __field(rxrpc_seq_t, seq )
+ __field(int, abort_code )
+ __field(int, error )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->why, why, 4);
+ __entry->cid = cid;
+ __entry->call_id = call_id;
+ __entry->abort_code = abort_code;
+ __entry->error = error;
+ __entry->seq = seq;
+ ),
+
+ TP_printk("%08x:%08x s=%u a=%d e=%d %s",
+ __entry->cid, __entry->call_id, __entry->seq,
+ __entry->abort_code, __entry->error, __entry->why)
+ );
+
+TRACE_EVENT(rxrpc_transmit,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_transmit_trace why),
+
+ TP_ARGS(call, why),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_transmit_trace, why )
+ __field(rxrpc_seq_t, tx_hard_ack )
+ __field(rxrpc_seq_t, tx_top )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->tx_hard_ack = call->tx_hard_ack;
+ __entry->tx_top = call->tx_top;
+ ),
+
+ TP_printk("c=%p %s f=%08x n=%u",
+ __entry->call,
+ rxrpc_transmit_traces[__entry->why],
+ __entry->tx_hard_ack + 1,
+ __entry->tx_top - __entry->tx_hard_ack)
+ );
+
+TRACE_EVENT(rxrpc_rx_ack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t first, u8 reason, u8 n_acks),
+
+ TP_ARGS(call, first, reason, n_acks),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, first )
+ __field(u8, reason )
+ __field(u8, n_acks )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->first = first;
+ __entry->reason = reason;
+ __entry->n_acks = n_acks;
+ ),
+
+ TP_printk("c=%p %s f=%08x n=%u",
+ __entry->call,
+ rxrpc_ack_names[__entry->reason],
+ __entry->first,
+ __entry->n_acks)
+ );
+
+TRACE_EVENT(rxrpc_tx_data,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
+
+ TP_ARGS(call, seq, serial, flags, retrans, lose),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, seq )
+ __field(rxrpc_serial_t, serial )
+ __field(u8, flags )
+ __field(bool, retrans )
+ __field(bool, lose )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->seq = seq;
+ __entry->serial = serial;
+ __entry->flags = flags;
+ __entry->retrans = retrans;
+ __entry->lose = lose;
+ ),
+
+ TP_printk("c=%p DATA %08x q=%08x fl=%02x%s%s",
+ __entry->call,
+ __entry->serial,
+ __entry->seq,
+ __entry->flags,
+ __entry->retrans ? " *RETRANS*" : "",
+ __entry->lose ? " *LOSE*" : "")
+ );
+
+TRACE_EVENT(rxrpc_tx_ack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
+ u8 reason, u8 n_acks),
+
+ TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, ack_first )
+ __field(rxrpc_serial_t, ack_serial )
+ __field(u8, reason )
+ __field(u8, n_acks )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->serial = serial;
+ __entry->ack_first = ack_first;
+ __entry->ack_serial = ack_serial;
+ __entry->reason = reason;
+ __entry->n_acks = n_acks;
+ ),
+
+ TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u",
+ __entry->call,
+ __entry->serial,
+ rxrpc_ack_names[__entry->reason],
+ __entry->ack_first,
+ __entry->ack_serial,
+ __entry->n_acks)
+ );
+
+TRACE_EVENT(rxrpc_receive,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_receive_trace why,
+ rxrpc_serial_t serial, rxrpc_seq_t seq),
+
+ TP_ARGS(call, why, serial, seq),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_receive_trace, why )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, seq )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, top )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->seq = seq;
+ __entry->hard_ack = call->rx_hard_ack;
+ __entry->top = call->rx_top;
+ ),
+
+ TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x",
+ __entry->call,
+ rxrpc_receive_traces[__entry->why],
+ __entry->serial,
+ __entry->seq,
+ __entry->hard_ack,
+ __entry->top)
+ );
+
+TRACE_EVENT(rxrpc_recvmsg,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_recvmsg_trace why,
+ rxrpc_seq_t seq, unsigned int offset, unsigned int len,
+ int ret),
+
+ TP_ARGS(call, why, seq, offset, len, ret),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_recvmsg_trace, why )
+ __field(rxrpc_seq_t, seq )
+ __field(unsigned int, offset )
+ __field(unsigned int, len )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->seq = seq;
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d",
+ __entry->call,
+ rxrpc_recvmsg_traces[__entry->why],
+ __entry->seq,
+ __entry->offset,
+ __entry->len,
+ __entry->ret)
+ );
+
+TRACE_EVENT(rxrpc_rtt_tx,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
+ rxrpc_serial_t send_serial),
+
+ TP_ARGS(call, why, send_serial),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_rtt_tx_trace, why )
+ __field(rxrpc_serial_t, send_serial )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->send_serial = send_serial;
+ ),
+
+ TP_printk("c=%p %s sr=%08x",
+ __entry->call,
+ rxrpc_rtt_tx_traces[__entry->why],
+ __entry->send_serial)
+ );
+
+TRACE_EVENT(rxrpc_rtt_rx,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
+ s64 rtt, u8 nr, s64 avg),
+
+ TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_rtt_rx_trace, why )
+ __field(u8, nr )
+ __field(rxrpc_serial_t, send_serial )
+ __field(rxrpc_serial_t, resp_serial )
+ __field(s64, rtt )
+ __field(u64, avg )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->send_serial = send_serial;
+ __entry->resp_serial = resp_serial;
+ __entry->rtt = rtt;
+ __entry->nr = nr;
+ __entry->avg = avg;
+ ),
+
+ TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
+ __entry->call,
+ rxrpc_rtt_rx_traces[__entry->why],
+ __entry->send_serial,
+ __entry->resp_serial,
+ __entry->rtt,
+ __entry->nr,
+ __entry->avg)
+ );
+
+TRACE_EVENT(rxrpc_timer,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
+ ktime_t now, unsigned long now_j),
+
+ TP_ARGS(call, why, now, now_j),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_timer_trace, why )
+ __field_struct(ktime_t, now )
+ __field_struct(ktime_t, expire_at )
+ __field_struct(ktime_t, ack_at )
+ __field_struct(ktime_t, resend_at )
+ __field(unsigned long, now_j )
+ __field(unsigned long, timer )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->now = now;
+ __entry->expire_at = call->expire_at;
+ __entry->ack_at = call->ack_at;
+ __entry->resend_at = call->resend_at;
+ __entry->now_j = now_j;
+ __entry->timer = call->timer.expires;
+ ),
+
+ TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
+ __entry->call,
+ rxrpc_timer_traces[__entry->why],
+ ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
+ ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
+ ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
+ __entry->timer - __entry->now_j)
+ );
+
+TRACE_EVENT(rxrpc_rx_lose,
+ TP_PROTO(struct rxrpc_skb_priv *sp),
+
+ TP_ARGS(sp),
+
+ TP_STRUCT__entry(
+ __field_struct(struct rxrpc_host_header, hdr )
+ ),
+
+ TP_fast_assign(
+ memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
+ ),
+
+ TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s *LOSE*",
+ __entry->hdr.epoch, __entry->hdr.cid,
+ __entry->hdr.callNumber, __entry->hdr.serviceId,
+ __entry->hdr.serial, __entry->hdr.seq,
+ __entry->hdr.type, __entry->hdr.flags,
+ __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+ );
+
+TRACE_EVENT(rxrpc_propose_ack,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
+ u8 ack_reason, rxrpc_serial_t serial, bool immediate,
+ bool background, enum rxrpc_propose_ack_outcome outcome),
+
+ TP_ARGS(call, why, ack_reason, serial, immediate, background,
+ outcome),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_propose_ack_trace, why )
+ __field(rxrpc_serial_t, serial )
+ __field(u8, ack_reason )
+ __field(bool, immediate )
+ __field(bool, background )
+ __field(enum rxrpc_propose_ack_outcome, outcome )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->ack_reason = ack_reason;
+ __entry->immediate = immediate;
+ __entry->background = background;
+ __entry->outcome = outcome;
+ ),
+
+ TP_printk("c=%p %s %s r=%08x i=%u b=%u%s",
+ __entry->call,
+ rxrpc_propose_ack_traces[__entry->why],
+ rxrpc_ack_names[__entry->ack_reason],
+ __entry->serial,
+ __entry->immediate,
+ __entry->background,
+ rxrpc_propose_ack_outcomes[__entry->outcome])
+ );
+
+TRACE_EVENT(rxrpc_retransmit,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, u8 annotation,
+ s64 expiry),
+
+ TP_ARGS(call, seq, annotation, expiry),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, seq )
+ __field(u8, annotation )
+ __field(s64, expiry )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->seq = seq;
+ __entry->annotation = annotation;
+ __entry->expiry = expiry;
+ ),
+
+ TP_printk("c=%p q=%x a=%02x xp=%lld",
+ __entry->call,
+ __entry->seq,
+ __entry->annotation,
+ __entry->expiry)
+ );
+
+TRACE_EVENT(rxrpc_congest,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
+ rxrpc_serial_t ack_serial, enum rxrpc_congest_change change),
+
+ TP_ARGS(call, summary, ack_serial, change),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_congest_change, change )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, top )
+ __field(rxrpc_seq_t, lowest_nak )
+ __field(rxrpc_serial_t, ack_serial )
+ __field_struct(struct rxrpc_ack_summary, sum )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->change = change;
+ __entry->hard_ack = call->tx_hard_ack;
+ __entry->top = call->tx_top;
+ __entry->lowest_nak = call->acks_lowest_nak;
+ __entry->ack_serial = ack_serial;
+ memcpy(&__entry->sum, summary, sizeof(__entry->sum));
+ ),
+
+ TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ __entry->call,
+ __entry->ack_serial,
+ rxrpc_ack_names[__entry->sum.ack_reason],
+ __entry->hard_ack,
+ rxrpc_congest_modes[__entry->sum.mode],
+ __entry->sum.cwnd,
+ __entry->sum.ssthresh,
+ __entry->sum.nr_acks, __entry->sum.nr_nacks,
+ __entry->sum.nr_new_acks, __entry->sum.nr_new_nacks,
+ __entry->sum.nr_rot_new_acks,
+ __entry->top - __entry->hard_ack,
+ __entry->sum.cumulative_acks,
+ __entry->sum.dup_acks,
+ __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "",
+ rxrpc_congest_changes[__entry->change],
+ __entry->sum.retrans_timeo ? " rTxTo" : "")
+ );
+
+#endif /* _TRACE_RXRPC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 185f8ea2702f..d0352a971ebd 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -71,6 +71,7 @@ header-y += binfmts.h
header-y += blkpg.h
header-y += blktrace_api.h
header-y += bpf_common.h
+header-y += bpf_perf_event.h
header-y += bpf.h
header-y += bpqether.h
header-y += bsg.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 82e8aa59446b..208df7b44e90 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -329,9 +329,11 @@ enum {
#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001
#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002
#define AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH 0x00000004
+#define AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND 0x00000008
#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \
AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME | \
- AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH)
+ AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH | \
+ AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND)
/* deprecated: AUDIT_VERSION_* */
#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index 0fbf6fd4711b..734fe83ab645 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -23,6 +23,42 @@
#define BATADV_NL_MCAST_GROUP_TPMETER "tpmeter"
/**
+ * enum batadv_tt_client_flags - TT client specific flags
+ * @BATADV_TT_CLIENT_DEL: the client has to be deleted from the table
+ * @BATADV_TT_CLIENT_ROAM: the client roamed to/from another node and the new
+ * update telling its new real location has not been received/sent yet
+ * @BATADV_TT_CLIENT_WIFI: this client is connected through a wifi interface.
+ * This information is used by the "AP Isolation" feature
+ * @BATADV_TT_CLIENT_ISOLA: this client is considered "isolated". This
+ * information is used by the Extended Isolation feature
+ * @BATADV_TT_CLIENT_NOPURGE: this client should never be removed from the table
+ * @BATADV_TT_CLIENT_NEW: this client has been added to the local table but has
+ * not been announced yet
+ * @BATADV_TT_CLIENT_PENDING: this client is marked for removal but it is kept
+ * in the table for one more originator interval for consistency purposes
+ * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be part of
+ * the network but no nnode has already announced it
+ *
+ * Bits from 0 to 7 are called _remote flags_ because they are sent on the wire.
+ * Bits from 8 to 15 are called _local flags_ because they are used for local
+ * computations only.
+ *
+ * Bits from 4 to 7 - a subset of remote flags - are ensured to be in sync with
+ * the other nodes in the network. To achieve this goal these flags are included
+ * in the TT CRC computation.
+ */
+enum batadv_tt_client_flags {
+ BATADV_TT_CLIENT_DEL = (1 << 0),
+ BATADV_TT_CLIENT_ROAM = (1 << 1),
+ BATADV_TT_CLIENT_WIFI = (1 << 4),
+ BATADV_TT_CLIENT_ISOLA = (1 << 5),
+ BATADV_TT_CLIENT_NOPURGE = (1 << 8),
+ BATADV_TT_CLIENT_NEW = (1 << 9),
+ BATADV_TT_CLIENT_PENDING = (1 << 10),
+ BATADV_TT_CLIENT_TEMP = (1 << 11),
+};
+
+/**
* enum batadv_nl_attrs - batman-adv netlink attributes
*
* @BATADV_ATTR_UNSPEC: unspecified attribute to catch errors
@@ -40,6 +76,26 @@
* @BATADV_ATTR_TPMETER_BYTES: amount of acked bytes during run
* @BATADV_ATTR_TPMETER_COOKIE: session cookie to match tp_meter session
* @BATADV_ATTR_PAD: attribute used for padding for 64-bit alignment
+ * @BATADV_ATTR_ACTIVE: Flag indicating if the hard interface is active
+ * @BATADV_ATTR_TT_ADDRESS: Client MAC address
+ * @BATADV_ATTR_TT_TTVN: Translation table version
+ * @BATADV_ATTR_TT_LAST_TTVN: Previous translation table version
+ * @BATADV_ATTR_TT_CRC32: CRC32 over translation table
+ * @BATADV_ATTR_TT_VID: VLAN ID
+ * @BATADV_ATTR_TT_FLAGS: Translation table client flags
+ * @BATADV_ATTR_FLAG_BEST: Flags indicating entry is the best
+ * @BATADV_ATTR_LAST_SEEN_MSECS: Time in milliseconds since last seen
+ * @BATADV_ATTR_NEIGH_ADDRESS: Neighbour MAC address
+ * @BATADV_ATTR_TQ: TQ to neighbour
+ * @BATADV_ATTR_THROUGHPUT: Estimated throughput to Neighbour
+ * @BATADV_ATTR_BANDWIDTH_UP: Reported uplink bandwidth
+ * @BATADV_ATTR_BANDWIDTH_DOWN: Reported downlink bandwidth
+ * @BATADV_ATTR_ROUTER: Gateway router MAC address
+ * @BATADV_ATTR_BLA_OWN: Flag indicating own originator
+ * @BATADV_ATTR_BLA_ADDRESS: Bridge loop avoidance claim MAC address
+ * @BATADV_ATTR_BLA_VID: BLA VLAN ID
+ * @BATADV_ATTR_BLA_BACKBONE: BLA gateway originator MAC address
+ * @BATADV_ATTR_BLA_CRC: BLA CRC
* @__BATADV_ATTR_AFTER_LAST: internal use
* @NUM_BATADV_ATTR: total number of batadv_nl_attrs available
* @BATADV_ATTR_MAX: highest attribute number currently defined
@@ -60,6 +116,26 @@ enum batadv_nl_attrs {
BATADV_ATTR_TPMETER_BYTES,
BATADV_ATTR_TPMETER_COOKIE,
BATADV_ATTR_PAD,
+ BATADV_ATTR_ACTIVE,
+ BATADV_ATTR_TT_ADDRESS,
+ BATADV_ATTR_TT_TTVN,
+ BATADV_ATTR_TT_LAST_TTVN,
+ BATADV_ATTR_TT_CRC32,
+ BATADV_ATTR_TT_VID,
+ BATADV_ATTR_TT_FLAGS,
+ BATADV_ATTR_FLAG_BEST,
+ BATADV_ATTR_LAST_SEEN_MSECS,
+ BATADV_ATTR_NEIGH_ADDRESS,
+ BATADV_ATTR_TQ,
+ BATADV_ATTR_THROUGHPUT,
+ BATADV_ATTR_BANDWIDTH_UP,
+ BATADV_ATTR_BANDWIDTH_DOWN,
+ BATADV_ATTR_ROUTER,
+ BATADV_ATTR_BLA_OWN,
+ BATADV_ATTR_BLA_ADDRESS,
+ BATADV_ATTR_BLA_VID,
+ BATADV_ATTR_BLA_BACKBONE,
+ BATADV_ATTR_BLA_CRC,
/* add attributes above here, update the policy in netlink.c */
__BATADV_ATTR_AFTER_LAST,
NUM_BATADV_ATTR = __BATADV_ATTR_AFTER_LAST,
@@ -73,6 +149,15 @@ enum batadv_nl_attrs {
* @BATADV_CMD_GET_MESH_INFO: Query basic information about batman-adv device
* @BATADV_CMD_TP_METER: Start a tp meter session
* @BATADV_CMD_TP_METER_CANCEL: Cancel a tp meter session
+ * @BATADV_CMD_GET_ROUTING_ALGOS: Query the list of routing algorithms.
+ * @BATADV_CMD_GET_HARDIFS: Query list of hard interfaces
+ * @BATADV_CMD_GET_TRANSTABLE_LOCAL: Query list of local translations
+ * @BATADV_CMD_GET_TRANSTABLE_GLOBAL Query list of global translations
+ * @BATADV_CMD_GET_ORIGINATORS: Query list of originators
+ * @BATADV_CMD_GET_NEIGHBORS: Query list of neighbours
+ * @BATADV_CMD_GET_GATEWAYS: Query list of gateways
+ * @BATADV_CMD_GET_BLA_CLAIM: Query list of bridge loop avoidance claims
+ * @BATADV_CMD_GET_BLA_BACKBONE: Query list of bridge loop avoidance backbones
* @__BATADV_CMD_AFTER_LAST: internal use
* @BATADV_CMD_MAX: highest used command number
*/
@@ -81,6 +166,15 @@ enum batadv_nl_commands {
BATADV_CMD_GET_MESH_INFO,
BATADV_CMD_TP_METER,
BATADV_CMD_TP_METER_CANCEL,
+ BATADV_CMD_GET_ROUTING_ALGOS,
+ BATADV_CMD_GET_HARDIFS,
+ BATADV_CMD_GET_TRANSTABLE_LOCAL,
+ BATADV_CMD_GET_TRANSTABLE_GLOBAL,
+ BATADV_CMD_GET_ORIGINATORS,
+ BATADV_CMD_GET_NEIGHBORS,
+ BATADV_CMD_GET_GATEWAYS,
+ BATADV_CMD_GET_BLA_CLAIM,
+ BATADV_CMD_GET_BLA_BACKBONE,
/* add new commands above here */
__BATADV_CMD_AFTER_LAST,
BATADV_CMD_MAX = __BATADV_CMD_AFTER_LAST - 1
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 9e5fc168c8a3..f09c70b97eca 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -95,6 +95,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SCHED_ACT,
BPF_PROG_TYPE_TRACEPOINT,
BPF_PROG_TYPE_XDP,
+ BPF_PROG_TYPE_PERF_EVENT,
};
#define BPF_PSEUDO_MAP_FD 1
@@ -375,6 +376,56 @@ enum bpf_func_id {
*/
BPF_FUNC_probe_write_user,
+ /**
+ * bpf_current_task_under_cgroup(map, index) - Check cgroup2 membership of current task
+ * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+ * @index: index of the cgroup in the bpf_map
+ * Return:
+ * == 0 current failed the cgroup2 descendant test
+ * == 1 current succeeded the cgroup2 descendant test
+ * < 0 error
+ */
+ BPF_FUNC_current_task_under_cgroup,
+
+ /**
+ * bpf_skb_change_tail(skb, len, flags)
+ * The helper will resize the skb to the given new size,
+ * to be used f.e. with control messages.
+ * @skb: pointer to skb
+ * @len: new skb length
+ * @flags: reserved
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_change_tail,
+
+ /**
+ * bpf_skb_pull_data(skb, len)
+ * The helper will pull in non-linear data in case the
+ * skb is non-linear and not all of len are part of the
+ * linear section. Only needed for read/write with direct
+ * packet access.
+ * @skb: pointer to skb
+ * @len: len to make read/writeable
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_pull_data,
+
+ /**
+ * bpf_csum_update(skb, csum)
+ * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+ * @skb: pointer to skb
+ * @csum: csum to add
+ * Return: csum on success or negative error
+ */
+ BPF_FUNC_csum_update,
+
+ /**
+ * bpf_set_hash_invalid(skb)
+ * Invalidate current skb>hash.
+ * @skb: pointer to skb
+ */
+ BPF_FUNC_set_hash_invalid,
+
__BPF_FUNC_MAX_ID,
};
diff --git a/include/uapi/linux/bpf_perf_event.h b/include/uapi/linux/bpf_perf_event.h
new file mode 100644
index 000000000000..067427259820
--- /dev/null
+++ b/include/uapi/linux/bpf_perf_event.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
+#define _UAPI__LINUX_BPF_PERF_EVENT_H__
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+struct bpf_perf_event_data {
+ struct pt_regs regs;
+ __u64 sample_period;
+};
+
+#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index b8f38e84d93a..099a4200732c 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1362,7 +1362,14 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
- ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
+ ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
+
/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
* 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1371,7 +1378,7 @@ enum ethtool_link_mode_bit_indices {
*/
__ETHTOOL_LINK_MODE_LAST
- = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
};
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 27e17363263a..42fa977e3b14 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -108,6 +108,10 @@
*
* 7.25
* - add FUSE_PARALLEL_DIROPS
+ *
+ * 7.26
+ * - add FUSE_HANDLE_KILLPRIV
+ * - add FUSE_POSIX_ACL
*/
#ifndef _LINUX_FUSE_H
@@ -143,7 +147,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 25
+#define FUSE_KERNEL_MINOR_VERSION 26
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -238,6 +242,8 @@ struct fuse_file_lock {
* FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
* FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
* FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir
+ * FUSE_HANDLE_KILLPRIV: fs handles killing suid/sgid/cap on write/chown/trunc
+ * FUSE_POSIX_ACL: filesystem supports posix acls
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -258,6 +264,8 @@ struct fuse_file_lock {
#define FUSE_WRITEBACK_CACHE (1 << 16)
#define FUSE_NO_OPEN_SUPPORT (1 << 17)
#define FUSE_PARALLEL_DIROPS (1 << 18)
+#define FUSE_HANDLE_KILLPRIV (1 << 19)
+#define FUSE_POSIX_ACL (1 << 20)
/**
* CUSE INIT request/reply flags
diff --git a/include/uapi/linux/hsi/hsi_char.h b/include/uapi/linux/hsi/hsi_char.h
index 76160b4f455d..c00a463d55f9 100644
--- a/include/uapi/linux/hsi/hsi_char.h
+++ b/include/uapi/linux/hsi/hsi_char.h
@@ -20,10 +20,11 @@
* 02110-1301 USA
*/
-
#ifndef __HSI_CHAR_H
#define __HSI_CHAR_H
+#include <linux/types.h>
+
#define HSI_CHAR_MAGIC 'k'
#define HSC_IOW(num, dtype) _IOW(HSI_CHAR_MAGIC, num, dtype)
#define HSC_IOR(num, dtype) _IOR(HSI_CHAR_MAGIC, num, dtype)
@@ -48,16 +49,16 @@
#define HSC_ARB_PRIO 1
struct hsc_rx_config {
- uint32_t mode;
- uint32_t flow;
- uint32_t channels;
+ __u32 mode;
+ __u32 flow;
+ __u32 channels;
};
struct hsc_tx_config {
- uint32_t mode;
- uint32_t channels;
- uint32_t speed;
- uint32_t arb_mode;
+ __u32 mode;
+ __u32 channels;
+ __u32 speed;
+ __u32 arb_mode;
};
#endif /* __HSI_CHAR_H */
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index c186f64fffca..ab92bca6d448 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -140,7 +140,7 @@ struct bridge_vlan_xstats {
__u64 tx_bytes;
__u64 tx_packets;
__u16 vid;
- __u16 pad1;
+ __u16 flags;
__u32 pad2;
};
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index a1b5202c5f6b..b4fba662cd32 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -318,6 +318,7 @@ enum {
IFLA_BRPORT_FLUSH,
IFLA_BRPORT_MULTICAST_ROUTER,
IFLA_BRPORT_PAD,
+ IFLA_BRPORT_MCAST_FLOOD,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -463,6 +464,7 @@ enum {
enum ipvlan_mode {
IPVLAN_MODE_L2 = 0,
IPVLAN_MODE_L3,
+ IPVLAN_MODE_L3S,
IPVLAN_MODE_MAX
};
@@ -617,7 +619,7 @@ enum {
enum {
IFLA_VF_UNSPEC,
IFLA_VF_MAC, /* Hardware queue specific attributes */
- IFLA_VF_VLAN,
+ IFLA_VF_VLAN, /* VLAN ID and QoS */
IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */
IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
@@ -629,6 +631,7 @@ enum {
IFLA_VF_TRUST, /* Trust VF */
IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */
IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */
+ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */
__IFLA_VF_MAX,
};
@@ -645,6 +648,22 @@ struct ifla_vf_vlan {
__u32 qos;
};
+enum {
+ IFLA_VF_VLAN_INFO_UNSPEC,
+ IFLA_VF_VLAN_INFO, /* VLAN ID, QoS and VLAN protocol */
+ __IFLA_VF_VLAN_INFO_MAX,
+};
+
+#define IFLA_VF_VLAN_INFO_MAX (__IFLA_VF_VLAN_INFO_MAX - 1)
+#define MAX_VLAN_LIST_LEN 1
+
+struct ifla_vf_vlan_info {
+ __u32 vf;
+ __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
+ __u32 qos;
+ __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */
+};
+
struct ifla_vf_tx_rate {
__u32 vf;
__u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
@@ -825,6 +844,7 @@ enum {
IFLA_STATS_LINK_64,
IFLA_STATS_LINK_XSTATS,
IFLA_STATS_LINK_XSTATS_SLAVE,
+ IFLA_STATS_LINK_OFFLOAD_XSTATS,
__IFLA_STATS_MAX,
};
@@ -844,6 +864,14 @@ enum {
};
#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
+/* These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS */
+enum {
+ IFLA_OFFLOAD_XSTATS_UNSPEC,
+ IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */
+ __IFLA_OFFLOAD_XSTATS_MAX
+};
+#define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1)
+
/* XDP section */
enum {
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 777b6cdb1b7b..92f3c8677523 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -27,9 +27,23 @@
#define GRE_SEQ __cpu_to_be16(0x1000)
#define GRE_STRICT __cpu_to_be16(0x0800)
#define GRE_REC __cpu_to_be16(0x0700)
-#define GRE_FLAGS __cpu_to_be16(0x00F8)
+#define GRE_ACK __cpu_to_be16(0x0080)
+#define GRE_FLAGS __cpu_to_be16(0x0078)
#define GRE_VERSION __cpu_to_be16(0x0007)
+#define GRE_IS_CSUM(f) ((f) & GRE_CSUM)
+#define GRE_IS_ROUTING(f) ((f) & GRE_ROUTING)
+#define GRE_IS_KEY(f) ((f) & GRE_KEY)
+#define GRE_IS_SEQ(f) ((f) & GRE_SEQ)
+#define GRE_IS_STRICT(f) ((f) & GRE_STRICT)
+#define GRE_IS_REC(f) ((f) & GRE_REC)
+#define GRE_IS_ACK(f) ((f) & GRE_ACK)
+
+#define GRE_VERSION_0 __cpu_to_be16(0x0000)
+#define GRE_VERSION_1 __cpu_to_be16(0x0001)
+#define GRE_PROTO_PPP __cpu_to_be16(0x880b)
+#define GRE_PPTP_KEY_MASK __cpu_to_be32(0xffff)
+
struct ip_tunnel_parm {
char name[IFNAMSIZ];
int link;
@@ -60,6 +74,7 @@ enum {
IFLA_IPTUN_ENCAP_FLAGS,
IFLA_IPTUN_ENCAP_SPORT,
IFLA_IPTUN_ENCAP_DPORT,
+ IFLA_IPTUN_COLLECT_METADATA,
__IFLA_IPTUN_MAX,
};
#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index abbd1dc5d683..509cd961068d 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -73,6 +73,7 @@ enum {
INET_DIAG_BC_S_COND,
INET_DIAG_BC_D_COND,
INET_DIAG_BC_DEV_COND, /* u32 ifindex */
+ INET_DIAG_BC_MARK_COND,
};
struct inet_diag_hostcond {
@@ -82,6 +83,11 @@ struct inet_diag_hostcond {
__be32 addr[0];
};
+struct inet_diag_markcond {
+ __u32 mark;
+ __u32 mask;
+};
+
/* Base info structure. It contains socket identity (addrs/ports/cookie)
* and, alas, the information shown by netstat. */
struct inet_diag_msg {
@@ -117,6 +123,8 @@ enum {
INET_DIAG_LOCALS,
INET_DIAG_PEERS,
INET_DIAG_PAD,
+ INET_DIAG_MARK,
+ INET_DIAG_BBRINFO,
__INET_DIAG_MAX,
};
@@ -150,8 +158,20 @@ struct tcp_dctcp_info {
__u32 dctcp_ab_tot;
};
+/* INET_DIAG_BBRINFO */
+
+struct tcp_bbr_info {
+ /* u64 bw: max-filtered BW (app throughput) estimate in Byte per sec: */
+ __u32 bbr_bw_lo; /* lower 32 bits of bw */
+ __u32 bbr_bw_hi; /* upper 32 bits of bw */
+ __u32 bbr_min_rtt; /* min-filtered RTT in uSec */
+ __u32 bbr_pacing_gain; /* pacing gain shifted left 8 bits */
+ __u32 bbr_cwnd_gain; /* cwnd gain shifted left 8 bits */
+};
+
union tcp_cc_info {
struct tcpvegas_info vegas;
struct tcp_dctcp_info dctcp;
+ struct tcp_bbr_info bbr;
};
#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index c51494119817..e794f7bee22f 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -248,6 +248,7 @@ struct input_mask {
#define BUS_SPI 0x1C
#define BUS_RMI 0x1D
#define BUS_CEC 0x1E
+#define BUS_INTEL_ISHTP 0x1F
/*
* MT_TOOL types
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 395876060f50..8c2772340c3f 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -177,6 +177,7 @@ enum {
DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
DEVCONF_DROP_UNSOLICITED_NA,
DEVCONF_KEEP_ADDR_ON_DOWN,
+ DEVCONF_RTR_SOLICIT_MAX_INTERVAL,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index 237fac4bc17b..15d8510cdae0 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -48,6 +48,7 @@
#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
#define BMCR_RESET 0x8000 /* Reset to default state */
+#define BMCR_SPEED10 0x0000 /* Select 10Mbps */
/* Basic mode status register. */
#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
new file mode 100644
index 000000000000..8be21e02387d
--- /dev/null
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -0,0 +1,12 @@
+#ifndef _NETFILTER_NF_LOG_H
+#define _NETFILTER_NF_LOG_H
+
+#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
+#define NF_LOG_TCPOPT 0x02 /* Log TCP options */
+#define NF_LOG_IPOPT 0x04 /* Log IP options */
+#define NF_LOG_UID 0x08 /* Log UID owning local socket */
+#define NF_LOG_NFLOG 0x10 /* Unsupported, don't reuse */
+#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
+#define NF_LOG_MASK 0x2f
+
+#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index c674ba2563b7..c6c4477c136b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -546,6 +546,35 @@ enum nft_cmp_attributes {
};
#define NFTA_CMP_MAX (__NFTA_CMP_MAX - 1)
+/**
+ * enum nft_range_ops - nf_tables range operator
+ *
+ * @NFT_RANGE_EQ: equal
+ * @NFT_RANGE_NEQ: not equal
+ */
+enum nft_range_ops {
+ NFT_RANGE_EQ,
+ NFT_RANGE_NEQ,
+};
+
+/**
+ * enum nft_range_attributes - nf_tables range expression netlink attributes
+ *
+ * @NFTA_RANGE_SREG: source register of data to compare (NLA_U32: nft_registers)
+ * @NFTA_RANGE_OP: cmp operation (NLA_U32: nft_cmp_ops)
+ * @NFTA_RANGE_FROM_DATA: data range from (NLA_NESTED: nft_data_attributes)
+ * @NFTA_RANGE_TO_DATA: data range to (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_range_attributes {
+ NFTA_RANGE_UNSPEC,
+ NFTA_RANGE_SREG,
+ NFTA_RANGE_OP,
+ NFTA_RANGE_FROM_DATA,
+ NFTA_RANGE_TO_DATA,
+ __NFTA_RANGE_MAX
+};
+#define NFTA_RANGE_MAX (__NFTA_RANGE_MAX - 1)
+
enum nft_lookup_flags {
NFT_LOOKUP_F_INV = (1 << 0),
};
@@ -575,6 +604,10 @@ enum nft_dynset_ops {
NFT_DYNSET_OP_UPDATE,
};
+enum nft_dynset_flags {
+ NFT_DYNSET_F_INV = (1 << 0),
+};
+
/**
* enum nft_dynset_attributes - dynset expression attributes
*
@@ -585,6 +618,7 @@ enum nft_dynset_ops {
* @NFTA_DYNSET_SREG_DATA: source register of the data (NLA_U32)
* @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64)
* @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes)
+ * @NFTA_DYNSET_FLAGS: flags (NLA_U32)
*/
enum nft_dynset_attributes {
NFTA_DYNSET_UNSPEC,
@@ -596,6 +630,7 @@ enum nft_dynset_attributes {
NFTA_DYNSET_TIMEOUT,
NFTA_DYNSET_EXPR,
NFTA_DYNSET_PAD,
+ NFTA_DYNSET_FLAGS,
__NFTA_DYNSET_MAX,
};
#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
@@ -724,6 +759,28 @@ enum nft_meta_keys {
};
/**
+ * enum nft_hash_attributes - nf_tables hash expression netlink attributes
+ *
+ * @NFTA_HASH_SREG: source register (NLA_U32)
+ * @NFTA_HASH_DREG: destination register (NLA_U32)
+ * @NFTA_HASH_LEN: source data length (NLA_U32)
+ * @NFTA_HASH_MODULUS: modulus value (NLA_U32)
+ * @NFTA_HASH_SEED: seed value (NLA_U32)
+ * @NFTA_HASH_OFFSET: add this offset value to hash result (NLA_U32)
+ */
+enum nft_hash_attributes {
+ NFTA_HASH_UNSPEC,
+ NFTA_HASH_SREG,
+ NFTA_HASH_DREG,
+ NFTA_HASH_LEN,
+ NFTA_HASH_MODULUS,
+ NFTA_HASH_SEED,
+ NFTA_HASH_OFFSET,
+ __NFTA_HASH_MAX,
+};
+#define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1)
+
+/**
* enum nft_meta_attributes - nf_tables meta expression netlink attributes
*
* @NFTA_META_DREG: destination register (NLA_U32)
@@ -866,12 +923,14 @@ enum nft_log_attributes {
* @NFTA_QUEUE_NUM: netlink queue to send messages to (NLA_U16)
* @NFTA_QUEUE_TOTAL: number of queues to load balance packets on (NLA_U16)
* @NFTA_QUEUE_FLAGS: various flags (NLA_U16)
+ * @NFTA_QUEUE_SREG_QNUM: source register of queue number (NLA_U32: nft_registers)
*/
enum nft_queue_attributes {
NFTA_QUEUE_UNSPEC,
NFTA_QUEUE_NUM,
NFTA_QUEUE_TOTAL,
NFTA_QUEUE_FLAGS,
+ NFTA_QUEUE_SREG_QNUM,
__NFTA_QUEUE_MAX
};
#define NFTA_QUEUE_MAX (__NFTA_QUEUE_MAX - 1)
@@ -880,6 +939,25 @@ enum nft_queue_attributes {
#define NFT_QUEUE_FLAG_CPU_FANOUT 0x02 /* use current CPU (no hashing) */
#define NFT_QUEUE_FLAG_MASK 0x03
+enum nft_quota_flags {
+ NFT_QUOTA_F_INV = (1 << 0),
+};
+
+/**
+ * enum nft_quota_attributes - nf_tables quota expression netlink attributes
+ *
+ * @NFTA_QUOTA_BYTES: quota in bytes (NLA_U16)
+ * @NFTA_QUOTA_FLAGS: flags (NLA_U32)
+ */
+enum nft_quota_attributes {
+ NFTA_QUOTA_UNSPEC,
+ NFTA_QUOTA_BYTES,
+ NFTA_QUOTA_FLAGS,
+ NFTA_QUOTA_PAD,
+ __NFTA_QUOTA_MAX
+};
+#define NFTA_QUOTA_MAX (__NFTA_QUOTA_MAX - 1)
+
/**
* enum nft_reject_types - nf_tables reject expression reject types
*
@@ -1051,7 +1129,7 @@ enum nft_gen_attributes {
* @NFTA_TRACE_NFPROTO: nf protocol processed (NLA_U32)
* @NFTA_TRACE_POLICY: policy that decided fate of packet (NLA_U32)
*/
-enum nft_trace_attibutes {
+enum nft_trace_attributes {
NFTA_TRACE_UNSPEC,
NFTA_TRACE_TABLE,
NFTA_TRACE_CHAIN,
@@ -1082,4 +1160,30 @@ enum nft_trace_types {
__NFT_TRACETYPE_MAX
};
#define NFT_TRACETYPE_MAX (__NFT_TRACETYPE_MAX - 1)
+
+/**
+ * enum nft_ng_attributes - nf_tables number generator expression netlink attributes
+ *
+ * @NFTA_NG_DREG: destination register (NLA_U32)
+ * @NFTA_NG_MODULUS: maximum counter value (NLA_U32)
+ * @NFTA_NG_TYPE: operation type (NLA_U32)
+ * @NFTA_NG_OFFSET: offset to be added to the counter (NLA_U32)
+ */
+enum nft_ng_attributes {
+ NFTA_NG_UNSPEC,
+ NFTA_NG_DREG,
+ NFTA_NG_MODULUS,
+ NFTA_NG_TYPE,
+ NFTA_NG_OFFSET,
+ __NFTA_NG_MAX
+};
+#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
+
+enum nft_ng_types {
+ NFT_NG_INCREMENTAL,
+ NFT_NG_RANDOM,
+ __NFT_NG_MAX
+};
+#define NFT_NG_MAX (__NFT_NG_MAX - 1)
+
#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 9df789709abe..6deb8867c5fc 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -231,13 +231,13 @@ enum ctattr_secctx {
enum ctattr_stats_cpu {
CTA_STATS_UNSPEC,
- CTA_STATS_SEARCHED,
+ CTA_STATS_SEARCHED, /* no longer used */
CTA_STATS_FOUND,
- CTA_STATS_NEW,
+ CTA_STATS_NEW, /* no longer used */
CTA_STATS_INVALID,
CTA_STATS_IGNORE,
- CTA_STATS_DELETE,
- CTA_STATS_DELETE_LIST,
+ CTA_STATS_DELETE, /* no longer used */
+ CTA_STATS_DELETE_LIST, /* no longer used */
CTA_STATS_INSERT,
CTA_STATS_INSERT_FAILED,
CTA_STATS_DROP,
diff --git a/include/uapi/linux/netfilter/xt_hashlimit.h b/include/uapi/linux/netfilter/xt_hashlimit.h
index 6db90372f09c..3efc0ca18345 100644
--- a/include/uapi/linux/netfilter/xt_hashlimit.h
+++ b/include/uapi/linux/netfilter/xt_hashlimit.h
@@ -6,6 +6,7 @@
/* timings are in milliseconds. */
#define XT_HASHLIMIT_SCALE 10000
+#define XT_HASHLIMIT_SCALE_v2 1000000llu
/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
* seconds, or one packet every 59 hours.
*/
@@ -63,6 +64,20 @@ struct hashlimit_cfg1 {
__u8 srcmask, dstmask;
};
+struct hashlimit_cfg2 {
+ __u64 avg; /* Average secs between packets * scale */
+ __u64 burst; /* Period multiplier for upper limit. */
+ __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */
+
+ /* user specified */
+ __u32 size; /* how many buckets */
+ __u32 max; /* max number of entries */
+ __u32 gc_interval; /* gc interval */
+ __u32 expire; /* when do entries expire? */
+
+ __u8 srcmask, dstmask;
+};
+
struct xt_hashlimit_mtinfo1 {
char name[IFNAMSIZ];
struct hashlimit_cfg1 cfg;
@@ -71,4 +86,12 @@ struct xt_hashlimit_mtinfo1 {
struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
};
+struct xt_hashlimit_mtinfo2 {
+ char name[NAME_MAX];
+ struct hashlimit_cfg2 cfg;
+
+ /* Used internally by the kernel */
+ struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
+};
+
#endif /* _UAPI_XT_HASHLIMIT_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 220694151434..56368e9b4622 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -48,6 +48,7 @@
#define NL80211_MULTICAST_GROUP_REG "regulatory"
#define NL80211_MULTICAST_GROUP_MLME "mlme"
#define NL80211_MULTICAST_GROUP_VENDOR "vendor"
+#define NL80211_MULTICAST_GROUP_NAN "nan"
#define NL80211_MULTICAST_GROUP_TESTMODE "testmode"
/**
@@ -838,6 +839,41 @@
* not running. The driver indicates the status of the scan through
* cfg80211_scan_done().
*
+ * @NL80211_CMD_START_NAN: Start NAN operation, identified by its
+ * %NL80211_ATTR_WDEV interface. This interface must have been previously
+ * created with %NL80211_CMD_NEW_INTERFACE. After it has been started, the
+ * NAN interface will create or join a cluster. This command must have a
+ * valid %NL80211_ATTR_NAN_MASTER_PREF attribute and optional
+ * %NL80211_ATTR_NAN_DUAL attributes.
+ * After this command NAN functions can be added.
+ * @NL80211_CMD_STOP_NAN: Stop the NAN operation, identified by
+ * its %NL80211_ATTR_WDEV interface.
+ * @NL80211_CMD_ADD_NAN_FUNCTION: Add a NAN function. The function is defined
+ * with %NL80211_ATTR_NAN_FUNC nested attribute. When called, this
+ * operation returns the strictly positive and unique instance id
+ * (%NL80211_ATTR_NAN_FUNC_INST_ID) and a cookie (%NL80211_ATTR_COOKIE)
+ * of the function upon success.
+ * Since instance ID's can be re-used, this cookie is the right
+ * way to identify the function. This will avoid races when a termination
+ * event is handled by the user space after it has already added a new
+ * function that got the same instance id from the kernel as the one
+ * which just terminated.
+ * This cookie may be used in NAN events even before the command
+ * returns, so userspace shouldn't process NAN events until it processes
+ * the response to this command.
+ * Look at %NL80211_ATTR_SOCKET_OWNER as well.
+ * @NL80211_CMD_DEL_NAN_FUNCTION: Delete a NAN function by cookie.
+ * This command is also used as a notification sent when a NAN function is
+ * terminated. This will contain a %NL80211_ATTR_NAN_FUNC_INST_ID
+ * and %NL80211_ATTR_COOKIE attributes.
+ * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN configuration. NAN
+ * must be operational (%NL80211_CMD_START_NAN was executed).
+ * It must contain at least one of the following attributes:
+ * %NL80211_ATTR_NAN_MASTER_PREF, %NL80211_ATTR_NAN_DUAL.
+ * @NL80211_CMD_NAN_FUNC_MATCH: Notification sent when a match is reported.
+ * This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and
+ * %NL80211_ATTR_COOKIE.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1026,6 +1062,13 @@ enum nl80211_commands {
NL80211_CMD_ABORT_SCAN,
+ NL80211_CMD_START_NAN,
+ NL80211_CMD_STOP_NAN,
+ NL80211_CMD_ADD_NAN_FUNCTION,
+ NL80211_CMD_DEL_NAN_FUNCTION,
+ NL80211_CMD_CHANGE_NAN_CONFIG,
+ NL80211_CMD_NAN_MATCH,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1343,7 +1386,13 @@ enum nl80211_commands {
* enum nl80211_band value is used as the index (nla_type() of the nested
* data. If a band is not included, it will be configured to allow all
* rates based on negotiated supported rates information. This attribute
- * is used with %NL80211_CMD_SET_TX_BITRATE_MASK.
+ * is used with %NL80211_CMD_SET_TX_BITRATE_MASK and with starting AP,
+ * and joining mesh networks (not IBSS yet). In the later case, it must
+ * specify just a single bitrate, which is to be used for the beacon.
+ * The driver must also specify support for this with the extended
+ * features NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
+ * NL80211_EXT_FEATURE_BEACON_RATE_HT and
+ * NL80211_EXT_FEATURE_BEACON_RATE_VHT.
*
* @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
* at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
@@ -1733,6 +1782,12 @@ enum nl80211_commands {
* regulatory indoor configuration would be owned by the netlink socket
* that configured the indoor setting, and the indoor operation would be
* cleared when the socket is closed.
+ * If set during NAN interface creation, the interface will be destroyed
+ * if the socket is closed just like any other interface. Moreover, only
+ * the netlink socket that created the interface will be allowed to add
+ * and remove functions. NAN notifications will be sent in unicast to that
+ * socket. Without this attribute, any socket can add functions and the
+ * notifications will be sent to the %NL80211_MCGRP_NAN multicast group.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
@@ -1867,6 +1922,21 @@ enum nl80211_commands {
* @NL80211_ATTR_MESH_PEER_AID: Association ID for the mesh peer (u16). This is
* used to pull the stored data for mesh peer in power save state.
*
+ * @NL80211_ATTR_NAN_MASTER_PREF: the master preference to be used by
+ * %NL80211_CMD_START_NAN and optionally with
+ * %NL80211_CMD_CHANGE_NAN_CONFIG. Its type is u8 and it can't be 0.
+ * Also, values 1 and 255 are reserved for certification purposes and
+ * should not be used during a normal device operation.
+ * @NL80211_ATTR_NAN_DUAL: NAN dual band operation config (see
+ * &enum nl80211_nan_dual_band_conf). This attribute is used with
+ * %NL80211_CMD_START_NAN and optionally with
+ * %NL80211_CMD_CHANGE_NAN_CONFIG.
+ * @NL80211_ATTR_NAN_FUNC: a function that can be added to NAN. See
+ * &enum nl80211_nan_func_attributes for description of this nested
+ * attribute.
+ * @NL80211_ATTR_NAN_MATCH: used to report a match. This is a nested attribute.
+ * See &enum nl80211_nan_match_attributes.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2261,6 +2331,11 @@ enum nl80211_attrs {
NL80211_ATTR_MESH_PEER_AID,
+ NL80211_ATTR_NAN_MASTER_PREF,
+ NL80211_ATTR_NAN_DUAL,
+ NL80211_ATTR_NAN_FUNC,
+ NL80211_ATTR_NAN_MATCH,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2339,6 +2414,7 @@ enum nl80211_attrs {
* commands to create and destroy one
* @NL80211_IF_TYPE_OCB: Outside Context of a BSS
* This mode corresponds to the MIB variable dot11OCBActivated=true
+ * @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev)
* @NL80211_IFTYPE_MAX: highest interface type number currently defined
* @NUM_NL80211_IFTYPES: number of defined interface types
*
@@ -2359,6 +2435,7 @@ enum nl80211_iftype {
NL80211_IFTYPE_P2P_GO,
NL80211_IFTYPE_P2P_DEVICE,
NL80211_IFTYPE_OCB,
+ NL80211_IFTYPE_NAN,
/* keep last */
NUM_NL80211_IFTYPES,
@@ -4551,6 +4628,12 @@ enum nl80211_feature_flags {
* (if available).
* @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of
* channel dwell time.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_LEGACY: Driver supports beacon rate
+ * configuration (AP/mesh), supporting a legacy (non HT/VHT) rate.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_HT: Driver supports beacon rate
+ * configuration (AP/mesh) with HT rates.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
+ * configuration (AP/mesh) with VHT rates.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4562,6 +4645,9 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_SCAN_START_TIME,
NL80211_EXT_FEATURE_BSS_PARENT_TSF,
NL80211_EXT_FEATURE_SET_SCAN_DWELL,
+ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
+ NL80211_EXT_FEATURE_BEACON_RATE_HT,
+ NL80211_EXT_FEATURE_BEACON_RATE_VHT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -4855,4 +4941,186 @@ enum nl80211_bss_select_attr {
NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1
};
+/**
+ * enum nl80211_nan_dual_band_conf - NAN dual band configuration
+ *
+ * Defines the NAN dual band mode of operation
+ *
+ * @NL80211_NAN_BAND_DEFAULT: device default mode
+ * @NL80211_NAN_BAND_2GHZ: 2.4GHz mode
+ * @NL80211_NAN_BAND_5GHZ: 5GHz mode
+ */
+enum nl80211_nan_dual_band_conf {
+ NL80211_NAN_BAND_DEFAULT = 1 << 0,
+ NL80211_NAN_BAND_2GHZ = 1 << 1,
+ NL80211_NAN_BAND_5GHZ = 1 << 2,
+};
+
+/**
+ * enum nl80211_nan_function_type - NAN function type
+ *
+ * Defines the function type of a NAN function
+ *
+ * @NL80211_NAN_FUNC_PUBLISH: function is publish
+ * @NL80211_NAN_FUNC_SUBSCRIBE: function is subscribe
+ * @NL80211_NAN_FUNC_FOLLOW_UP: function is follow-up
+ */
+enum nl80211_nan_function_type {
+ NL80211_NAN_FUNC_PUBLISH,
+ NL80211_NAN_FUNC_SUBSCRIBE,
+ NL80211_NAN_FUNC_FOLLOW_UP,
+
+ /* keep last */
+ __NL80211_NAN_FUNC_TYPE_AFTER_LAST,
+ NL80211_NAN_FUNC_MAX_TYPE = __NL80211_NAN_FUNC_TYPE_AFTER_LAST - 1,
+};
+
+/**
+ * enum nl80211_nan_publish_type - NAN publish tx type
+ *
+ * Defines how to send publish Service Discovery Frames
+ *
+ * @NL80211_NAN_SOLICITED_PUBLISH: publish function is solicited
+ * @NL80211_NAN_UNSOLICITED_PUBLISH: publish function is unsolicited
+ */
+enum nl80211_nan_publish_type {
+ NL80211_NAN_SOLICITED_PUBLISH = 1 << 0,
+ NL80211_NAN_UNSOLICITED_PUBLISH = 1 << 1,
+};
+
+/**
+ * enum nl80211_nan_func_term_reason - NAN functions termination reason
+ *
+ * Defines termination reasons of a NAN function
+ *
+ * @NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST: requested by user
+ * @NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED: timeout
+ * @NL80211_NAN_FUNC_TERM_REASON_ERROR: errored
+ */
+enum nl80211_nan_func_term_reason {
+ NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST,
+ NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED,
+ NL80211_NAN_FUNC_TERM_REASON_ERROR,
+};
+
+#define NL80211_NAN_FUNC_SERVICE_ID_LEN 6
+#define NL80211_NAN_FUNC_SERVICE_SPEC_INFO_MAX_LEN 0xff
+#define NL80211_NAN_FUNC_SRF_MAX_LEN 0xff
+
+/**
+ * enum nl80211_nan_func_attributes - NAN function attributes
+ * @__NL80211_NAN_FUNC_INVALID: invalid
+ * @NL80211_NAN_FUNC_TYPE: &enum nl80211_nan_function_type (u8).
+ * @NL80211_NAN_FUNC_SERVICE_ID: 6 bytes of the service ID hash as
+ * specified in NAN spec. This is a binary attribute.
+ * @NL80211_NAN_FUNC_PUBLISH_TYPE: relevant if the function's type is
+ * publish. Defines the transmission type for the publish Service Discovery
+ * Frame, see &enum nl80211_nan_publish_type. Its type is u8.
+ * @NL80211_NAN_FUNC_PUBLISH_BCAST: relevant if the function is a solicited
+ * publish. Should the solicited publish Service Discovery Frame be sent to
+ * the NAN Broadcast address. This is a flag.
+ * @NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE: relevant if the function's type is
+ * subscribe. Is the subscribe active. This is a flag.
+ * @NL80211_NAN_FUNC_FOLLOW_UP_ID: relevant if the function's type is follow up.
+ * The instance ID for the follow up Service Discovery Frame. This is u8.
+ * @NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID: relevant if the function's type
+ * is follow up. This is a u8.
+ * The requestor instance ID for the follow up Service Discovery Frame.
+ * @NL80211_NAN_FUNC_FOLLOW_UP_DEST: the MAC address of the recipient of the
+ * follow up Service Discovery Frame. This is a binary attribute.
+ * @NL80211_NAN_FUNC_CLOSE_RANGE: is this function limited for devices in a
+ * close range. The range itself (RSSI) is defined by the device.
+ * This is a flag.
+ * @NL80211_NAN_FUNC_TTL: strictly positive number of DWs this function should
+ * stay active. If not present infinite TTL is assumed. This is a u32.
+ * @NL80211_NAN_FUNC_SERVICE_INFO: array of bytes describing the service
+ * specific info. This is a binary attribute.
+ * @NL80211_NAN_FUNC_SRF: Service Receive Filter. This is a nested attribute.
+ * See &enum nl80211_nan_srf_attributes.
+ * @NL80211_NAN_FUNC_RX_MATCH_FILTER: Receive Matching filter. This is a nested
+ * attribute. It is a list of binary values.
+ * @NL80211_NAN_FUNC_TX_MATCH_FILTER: Transmit Matching filter. This is a
+ * nested attribute. It is a list of binary values.
+ * @NL80211_NAN_FUNC_INSTANCE_ID: The instance ID of the function.
+ * Its type is u8 and it cannot be 0.
+ * @NL80211_NAN_FUNC_TERM_REASON: NAN function termination reason.
+ * See &enum nl80211_nan_func_term_reason.
+ *
+ * @NUM_NL80211_NAN_FUNC_ATTR: internal
+ * @NL80211_NAN_FUNC_ATTR_MAX: highest NAN function attribute
+ */
+enum nl80211_nan_func_attributes {
+ __NL80211_NAN_FUNC_INVALID,
+ NL80211_NAN_FUNC_TYPE,
+ NL80211_NAN_FUNC_SERVICE_ID,
+ NL80211_NAN_FUNC_PUBLISH_TYPE,
+ NL80211_NAN_FUNC_PUBLISH_BCAST,
+ NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE,
+ NL80211_NAN_FUNC_FOLLOW_UP_ID,
+ NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID,
+ NL80211_NAN_FUNC_FOLLOW_UP_DEST,
+ NL80211_NAN_FUNC_CLOSE_RANGE,
+ NL80211_NAN_FUNC_TTL,
+ NL80211_NAN_FUNC_SERVICE_INFO,
+ NL80211_NAN_FUNC_SRF,
+ NL80211_NAN_FUNC_RX_MATCH_FILTER,
+ NL80211_NAN_FUNC_TX_MATCH_FILTER,
+ NL80211_NAN_FUNC_INSTANCE_ID,
+ NL80211_NAN_FUNC_TERM_REASON,
+
+ /* keep last */
+ NUM_NL80211_NAN_FUNC_ATTR,
+ NL80211_NAN_FUNC_ATTR_MAX = NUM_NL80211_NAN_FUNC_ATTR - 1
+};
+
+/**
+ * enum nl80211_nan_srf_attributes - NAN Service Response filter attributes
+ * @__NL80211_NAN_SRF_INVALID: invalid
+ * @NL80211_NAN_SRF_INCLUDE: present if the include bit of the SRF set.
+ * This is a flag.
+ * @NL80211_NAN_SRF_BF: Bloom Filter. Present if and only if
+ * &NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary.
+ * @NL80211_NAN_SRF_BF_IDX: index of the Bloom Filter. Mandatory if
+ * &NL80211_NAN_SRF_BF is present. This is a u8.
+ * @NL80211_NAN_SRF_MAC_ADDRS: list of MAC addresses for the SRF. Present if
+ * and only if &NL80211_NAN_SRF_BF isn't present. This is a nested
+ * attribute. Each nested attribute is a MAC address.
+ * @NUM_NL80211_NAN_SRF_ATTR: internal
+ * @NL80211_NAN_SRF_ATTR_MAX: highest NAN SRF attribute
+ */
+enum nl80211_nan_srf_attributes {
+ __NL80211_NAN_SRF_INVALID,
+ NL80211_NAN_SRF_INCLUDE,
+ NL80211_NAN_SRF_BF,
+ NL80211_NAN_SRF_BF_IDX,
+ NL80211_NAN_SRF_MAC_ADDRS,
+
+ /* keep last */
+ NUM_NL80211_NAN_SRF_ATTR,
+ NL80211_NAN_SRF_ATTR_MAX = NUM_NL80211_NAN_SRF_ATTR - 1,
+};
+
+/**
+ * enum nl80211_nan_match_attributes - NAN match attributes
+ * @__NL80211_NAN_MATCH_INVALID: invalid
+ * @NL80211_NAN_MATCH_FUNC_LOCAL: the local function that had the
+ * match. This is a nested attribute.
+ * See &enum nl80211_nan_func_attributes.
+ * @NL80211_NAN_MATCH_FUNC_PEER: the peer function
+ * that caused the match. This is a nested attribute.
+ * See &enum nl80211_nan_func_attributes.
+ *
+ * @NUM_NL80211_NAN_MATCH_ATTR: internal
+ * @NL80211_NAN_MATCH_ATTR_MAX: highest NAN match attribute
+ */
+enum nl80211_nan_match_attributes {
+ __NL80211_NAN_MATCH_INVALID,
+ NL80211_NAN_MATCH_FUNC_LOCAL,
+ NL80211_NAN_MATCH_FUNC_PEER,
+
+ /* keep last */
+ NUM_NL80211_NAN_MATCH_ATTR,
+ NL80211_NAN_MATCH_ATTR_MAX = NUM_NL80211_NAN_MATCH_ATTR - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/nsfs.h b/include/uapi/linux/nsfs.h
new file mode 100644
index 000000000000..3af617230d1b
--- /dev/null
+++ b/include/uapi/linux/nsfs.h
@@ -0,0 +1,13 @@
+#ifndef __LINUX_NSFS_H
+#define __LINUX_NSFS_H
+
+#include <linux/ioctl.h>
+
+#define NSIO 0xb7
+
+/* Returns a file descriptor that refers to an owning user namespace */
+#define NS_GET_USERNS _IO(NSIO, 0x1)
+/* Returns a file descriptor that refers to a parent namespace */
+#define NS_GET_PARENT _IO(NSIO, 0x2)
+
+#endif /* __LINUX_NSFS_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 54c3b4f4aceb..59ed3992c760 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -605,13 +605,13 @@ struct ovs_action_push_mpls {
* @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set
* (but it will not be set in the 802.1Q header that is pushed).
*
- * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID
- * values are those that the kernel module also parses as 802.1Q headers, to
- * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN
- * from having surprising results.
+ * The @vlan_tpid value is typically %ETH_P_8021Q or %ETH_P_8021AD.
+ * The only acceptable TPID values are those that the kernel module also parses
+ * as 802.1Q or 802.1AD headers, to prevent %OVS_ACTION_ATTR_PUSH_VLAN followed
+ * by %OVS_ACTION_ATTR_POP_VLAN from having surprising results.
*/
struct ovs_action_push_vlan {
- __be16 vlan_tpid; /* 802.1Q TPID. */
+ __be16 vlan_tpid; /* 802.1Q or 802.1ad TPID. */
__be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */
};
@@ -721,9 +721,10 @@ enum ovs_nat_attr {
* is copied from the value to the packet header field, rest of the bits are
* left unchanged. The non-masked value bits must be passed in as zeroes.
* Masking is not supported for the %OVS_KEY_ATTR_TUNNEL attribute.
- * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
- * packet.
- * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
+ * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q or 802.1ad header
+ * onto the packet.
+ * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q or 802.1ad header
+ * from the packet.
* @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
* the nested %OVS_SAMPLE_ATTR_* attributes.
* @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 404095124ae2..d812172d1d7b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -671,7 +671,8 @@
#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DPC
+#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
@@ -964,4 +965,13 @@
#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
+/* Precision Time Measurement */
+#define PCI_PTM_CAP 0x04 /* PTM Capability */
+#define PCI_PTM_CAP_REQ 0x00000001 /* Requester capable */
+#define PCI_PTM_CAP_ROOT 0x00000004 /* Root capable */
+#define PCI_PTM_GRANULARITY_MASK 0x0000FF00 /* Clock granularity */
+#define PCI_PTM_CTRL 0x08 /* PTM Control */
+#define PCI_PTM_CTRL_ENABLE 0x00000001 /* PTM enable */
+#define PCI_PTM_CTRL_ROOT 0x00000002 /* Root select */
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index d1c1ccaba787..8fd715f806a2 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -396,6 +396,7 @@ enum {
TCA_BPF_FD,
TCA_BPF_NAME,
TCA_BPF_FLAGS,
+ TCA_BPF_FLAGS_GEN,
__TCA_BPF_MAX,
};
@@ -428,6 +429,24 @@ enum {
TCA_FLOWER_KEY_UDP_DST, /* be16 */
TCA_FLOWER_FLAGS,
+ TCA_FLOWER_KEY_VLAN_ID, /* be16 */
+ TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
+
+ TCA_FLOWER_KEY_ENC_KEY_ID, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */
+ TCA_FLOWER_KEY_ENC_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
+
+ TCA_FLOWER_KEY_TCP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST_MASK, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST_MASK, /* be16 */
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 2382eed50278..df7451d35131 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -792,6 +792,8 @@ enum {
TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
+ TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+
__TCA_FQ_MAX
};
@@ -809,7 +811,7 @@ struct tc_fq_qd_stats {
__u32 flows;
__u32 inactive_flows;
__u32 throttled_flows;
- __u32 pad;
+ __u32 unthrottle_latency_ns;
};
/* Heavy-Hitter Filter */
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index 1e5ac4e776da..b4c04842a8c0 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -376,5 +376,13 @@
#define UART_EXAR_TXTRG 0x0a /* Tx FIFO trigger level write-only */
#define UART_EXAR_RXTRG 0x0b /* Rx FIFO trigger level write-only */
+/*
+ * These are definitions for the Altera ALTR_16550_F32/F64/F128
+ * Normalized from 0x100 to 0x40 because of shift by 2 (32 bit regs).
+ */
+#define UART_ALTR_AFR 0x40 /* Additional Features Register */
+#define UART_ALTR_EN_TXFIFO_LW 0x01 /* Enable the TX FIFO Low Watermark */
+#define UART_ALTR_TX_LOW 0x41 /* Tx FIFO Low Watermark */
+
#endif /* _LINUX_SERIAL_REG_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 25a9ad8bcef1..e7a31f830690 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -235,6 +235,7 @@ enum
LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */
LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */
LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */
+ LINUX_MIB_TCPMD5FAILURE, /* TCPMD5Failure */
LINUX_MIB_SACKSHIFTED,
LINUX_MIB_SACKMERGED,
LINUX_MIB_SACKSHIFTFALLBACK,
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
index 4ece02a77b9a..cd18360eca24 100644
--- a/include/uapi/linux/tc_act/tc_ife.h
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -32,8 +32,9 @@ enum {
#define IFE_META_HASHID 2
#define IFE_META_PRIO 3
#define IFE_META_QMAP 4
+#define IFE_META_TCINDEX 5
/*Can be overridden at runtime by module option*/
-#define __IFE_META_MAX 5
+#define __IFE_META_MAX 6
#define IFE_META_MAX (__IFE_META_MAX - 1)
#endif
diff --git a/include/uapi/linux/tc_act/tc_skbmod.h b/include/uapi/linux/tc_act/tc_skbmod.h
new file mode 100644
index 000000000000..10fc07da6c69
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_skbmod.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, Jamal Hadi Salim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#ifndef __LINUX_TC_SKBMOD_H
+#define __LINUX_TC_SKBMOD_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_SKBMOD 15
+
+#define SKBMOD_F_DMAC 0x1
+#define SKBMOD_F_SMAC 0x2
+#define SKBMOD_F_ETYPE 0x4
+#define SKBMOD_F_SWAPMAC 0x8
+
+struct tc_skbmod {
+ tc_gen;
+ __u64 flags;
+};
+
+enum {
+ TCA_SKBMOD_UNSPEC,
+ TCA_SKBMOD_TM,
+ TCA_SKBMOD_PARMS,
+ TCA_SKBMOD_DMAC,
+ TCA_SKBMOD_SMAC,
+ TCA_SKBMOD_ETYPE,
+ TCA_SKBMOD_PAD,
+ __TCA_SKBMOD_MAX
+};
+#define TCA_SKBMOD_MAX (__TCA_SKBMOD_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
new file mode 100644
index 000000000000..890106ff16e6
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TC_TUNNEL_KEY_H
+#define __LINUX_TC_TUNNEL_KEY_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_TUNNEL_KEY 17
+
+#define TCA_TUNNEL_KEY_ACT_SET 1
+#define TCA_TUNNEL_KEY_ACT_RELEASE 2
+
+struct tc_tunnel_key {
+ tc_gen;
+ int t_action;
+};
+
+enum {
+ TCA_TUNNEL_KEY_UNSPEC,
+ TCA_TUNNEL_KEY_TM,
+ TCA_TUNNEL_KEY_PARMS,
+ TCA_TUNNEL_KEY_ENC_IPV4_SRC, /* be32 */
+ TCA_TUNNEL_KEY_ENC_IPV4_DST, /* be32 */
+ TCA_TUNNEL_KEY_ENC_IPV6_SRC, /* struct in6_addr */
+ TCA_TUNNEL_KEY_ENC_IPV6_DST, /* struct in6_addr */
+ TCA_TUNNEL_KEY_ENC_KEY_ID, /* be64 */
+ TCA_TUNNEL_KEY_PAD,
+ __TCA_TUNNEL_KEY_MAX,
+};
+
+#define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index 31151ff6264f..bddb272b843f 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -16,6 +16,7 @@
#define TCA_VLAN_ACT_POP 1
#define TCA_VLAN_ACT_PUSH 2
+#define TCA_VLAN_ACT_MODIFY 3
struct tc_vlan {
tc_gen;
@@ -29,6 +30,7 @@ enum {
TCA_VLAN_PUSH_VLAN_ID,
TCA_VLAN_PUSH_VLAN_PROTOCOL,
TCA_VLAN_PAD,
+ TCA_VLAN_PUSH_VLAN_PRIORITY,
__TCA_VLAN_MAX,
};
#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 482898fc433a..73ac0db487f8 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -167,6 +167,7 @@ struct tcp_info {
__u8 tcpi_backoff;
__u8 tcpi_options;
__u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
+ __u8 tcpi_delivery_rate_app_limited:1;
__u32 tcpi_rto;
__u32 tcpi_ato;
@@ -211,6 +212,8 @@ struct tcp_info {
__u32 tcpi_min_rtt;
__u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */
__u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */
+
+ __u64 tcpi_delivery_rate;
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 5f3f6d09fb79..f9edd20fe9ba 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -59,6 +59,9 @@ enum {
TIPC_NL_MON_SET,
TIPC_NL_MON_GET,
TIPC_NL_MON_PEER_GET,
+ TIPC_NL_PEER_REMOVE,
+ TIPC_NL_BEARER_ADD,
+ TIPC_NL_UDP_GET_REMOTEIP,
__TIPC_NL_CMD_MAX,
TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1
@@ -98,6 +101,7 @@ enum {
TIPC_NLA_UDP_UNSPEC,
TIPC_NLA_UDP_LOCAL, /* sockaddr_storage */
TIPC_NLA_UDP_REMOTE, /* sockaddr_storage */
+ TIPC_NLA_UDP_MULTI_REMOTEIP, /* flag */
__TIPC_NLA_UDP_MAX,
TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 108dd7997014..acc63697a0cc 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -21,6 +21,8 @@ enum functionfs_flags {
FUNCTIONFS_HAS_MS_OS_DESC = 8,
FUNCTIONFS_VIRTUAL_ADDR = 16,
FUNCTIONFS_EVENTFD = 32,
+ FUNCTIONFS_ALL_CTRL_RECIP = 64,
+ FUNCTIONFS_CONFIG0_SETUP = 128,
};
/* Descriptor of an non-audio endpoint */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 143338978b48..1fc62b239f1b 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -298,7 +298,7 @@ enum xfrm_attr_type_t {
XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */
XFRMA_MARK, /* struct xfrm_mark */
XFRMA_TFCPAD, /* __u32 */
- XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_esn */
+ XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_state_esn */
XFRMA_SA_EXTRA_FLAGS, /* __u32 */
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index 4edb0f2b4f9f..f14ab7ff5fee 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -7,3 +7,10 @@ header-y += rdma_netlink.h
header-y += rdma_user_cm.h
header-y += hfi/
header-y += rdma_user_rxe.h
+header-y += cxgb3-abi.h
+header-y += cxgb4-abi.h
+header-y += mlx4-abi.h
+header-y += mlx5-abi.h
+header-y += mthca-abi.h
+header-y += nes-abi.h
+header-y += ocrdma-abi.h
diff --git a/drivers/infiniband/hw/cxgb3/iwch_user.h b/include/uapi/rdma/cxgb3-abi.h
index a277c31fcaf7..48a19bda071b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_user.h
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -29,8 +29,10 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __IWCH_USER_H__
-#define __IWCH_USER_H__
+#ifndef CXGB3_ABI_USER_H
+#define CXBG3_ABI_USER_H
+
+#include <linux/types.h>
#define IWCH_UVERBS_ABI_VERSION 1
@@ -71,4 +73,4 @@ struct iwch_create_qp_resp {
struct iwch_reg_user_mr_resp {
__u32 pbl_addr;
};
-#endif
+#endif /* CXGB3_ABI_USER_H */
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/include/uapi/rdma/cxgb4-abi.h
index 295f422b9a3a..472b15990894 100644
--- a/drivers/infiniband/hw/cxgb4/user.h
+++ b/include/uapi/rdma/cxgb4-abi.h
@@ -29,8 +29,10 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __C4IW_USER_H__
-#define __C4IW_USER_H__
+#ifndef CXGB4_ABI_USER_H
+#define CXGB4_ABI_USER_H
+
+#include <linux/types.h>
#define C4IW_UVERBS_ABI_VERSION 3
@@ -51,9 +53,8 @@ struct c4iw_create_cq_resp {
__u32 reserved; /* explicit padding (optional for i386) */
};
-
enum {
- C4IW_QPF_ONCHIP = (1<<0)
+ C4IW_QPF_ONCHIP = (1 << 0)
};
struct c4iw_create_qp_resp {
@@ -77,4 +78,4 @@ struct c4iw_alloc_ucontext_resp {
__u32 status_page_size;
__u32 reserved; /* explicit padding (optional for i386) */
};
-#endif
+#endif /* CXGB4_ABI_USER_H */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 7f035f4b53b0..25225ebbc7d5 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -224,6 +224,17 @@ struct ib_uverbs_odp_caps {
__u32 reserved;
};
+struct ib_uverbs_rss_caps {
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_UD
+ */
+ __u32 supported_qpts;
+ __u32 max_rwq_indirection_tables;
+ __u32 max_rwq_indirection_table_size;
+ __u32 reserved;
+};
+
struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_query_device_resp base;
__u32 comp_mask;
@@ -232,6 +243,9 @@ struct ib_uverbs_ex_query_device_resp {
__u64 timestamp_mask;
__u64 hca_core_clock; /* in KHZ */
__u64 device_cap_flags_ex;
+ struct ib_uverbs_rss_caps rss_caps;
+ __u32 max_wq_type_rq;
+ __u32 reserved;
};
struct ib_uverbs_query_port {
@@ -834,6 +848,10 @@ struct ib_uverbs_flow_spec_eth {
struct ib_uverbs_flow_ipv4_filter {
__be32 src_ip;
__be32 dst_ip;
+ __u8 proto;
+ __u8 tos;
+ __u8 ttl;
+ __u8 flags;
};
struct ib_uverbs_flow_spec_ipv4 {
@@ -868,8 +886,13 @@ struct ib_uverbs_flow_spec_tcp_udp {
};
struct ib_uverbs_flow_ipv6_filter {
- __u8 src_ip[16];
- __u8 dst_ip[16];
+ __u8 src_ip[16];
+ __u8 dst_ip[16];
+ __be32 flow_label;
+ __u8 next_hdr;
+ __u8 traffic_class;
+ __u8 hop_limit;
+ __u8 reserved;
};
struct ib_uverbs_flow_spec_ipv6 {
diff --git a/drivers/infiniband/hw/mlx4/user.h b/include/uapi/rdma/mlx4-abi.h
index 07e6769ef43b..af431752655c 100644
--- a/drivers/infiniband/hw/mlx4/user.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -31,8 +31,8 @@
* SOFTWARE.
*/
-#ifndef MLX4_IB_USER_H
-#define MLX4_IB_USER_H
+#ifndef MLX4_ABI_USER_H
+#define MLX4_ABI_USER_H
#include <linux/types.h>
@@ -104,4 +104,4 @@ struct mlx4_ib_create_qp {
__u8 reserved[5];
};
-#endif /* MLX4_IB_USER_H */
+#endif /* MLX4_ABI_USER_H */
diff --git a/drivers/infiniband/hw/mlx5/user.h b/include/uapi/rdma/mlx5-abi.h
index 188dac4301b5..f5d0f4e83b59 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -30,13 +30,11 @@
* SOFTWARE.
*/
-#ifndef MLX5_IB_USER_H
-#define MLX5_IB_USER_H
+#ifndef MLX5_ABI_USER_H
+#define MLX5_ABI_USER_H
#include <linux/types.h>
-#include "mlx5_ib.h"
-
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
@@ -50,7 +48,6 @@ enum {
MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
};
-
/* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/
@@ -121,10 +118,17 @@ struct mlx5_ib_tso_caps {
__u32 supported_qpts;
};
+struct mlx5_ib_rss_caps {
+ __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
+ __u8 reserved[7];
+};
+
struct mlx5_ib_query_device_resp {
__u32 comp_mask;
__u32 response_length;
struct mlx5_ib_tso_caps tso_caps;
+ struct mlx5_ib_rss_caps rss_caps;
};
struct mlx5_ib_create_cq {
@@ -242,40 +246,4 @@ struct mlx5_ib_modify_wq {
__u32 comp_mask;
__u32 reserved;
};
-
-static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
- struct mlx5_ib_create_qp *ucmd,
- int inlen,
- u32 *user_index)
-{
- u8 cqe_version = ucontext->cqe_version;
-
- if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
- !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
- return 0;
-
- if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
- !!cqe_version))
- return -EINVAL;
-
- return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
-}
-
-static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
- struct mlx5_ib_create_srq *ucmd,
- int inlen,
- u32 *user_index)
-{
- u8 cqe_version = ucontext->cqe_version;
-
- if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
- !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
- return 0;
-
- if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
- !!cqe_version))
- return -EINVAL;
-
- return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
-}
-#endif /* MLX5_IB_USER_H */
+#endif /* MLX5_ABI_USER_H */
diff --git a/include/uapi/rdma/mthca-abi.h b/include/uapi/rdma/mthca-abi.h
new file mode 100644
index 000000000000..bcbf4ff2f6d1
--- /dev/null
+++ b/include/uapi/rdma/mthca-abi.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MTHCA_ABI_USER_H
+#define MTHCA_ABI_USER_H
+
+#include <linux/types.h>
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define MTHCA_UVERBS_ABI_VERSION 1
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+struct mthca_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+ __u32 uarc_size;
+};
+
+struct mthca_alloc_pd_resp {
+ __u32 pdn;
+ __u32 reserved;
+};
+
+/*
+ * Mark the memory region with a DMA attribute that causes
+ * in-flight DMA to be flushed when the region is written to:
+ */
+#define MTHCA_MR_DMASYNC 0x1
+
+struct mthca_reg_mr {
+ __u32 mr_attrs;
+ __u32 reserved;
+};
+
+struct mthca_create_cq {
+ __u32 lkey;
+ __u32 pdn;
+ __u64 arm_db_page;
+ __u64 set_db_page;
+ __u32 arm_db_index;
+ __u32 set_db_index;
+};
+
+struct mthca_create_cq_resp {
+ __u32 cqn;
+ __u32 reserved;
+};
+
+struct mthca_resize_cq {
+ __u32 lkey;
+ __u32 reserved;
+};
+
+struct mthca_create_srq {
+ __u32 lkey;
+ __u32 db_index;
+ __u64 db_page;
+};
+
+struct mthca_create_srq_resp {
+ __u32 srqn;
+ __u32 reserved;
+};
+
+struct mthca_create_qp {
+ __u32 lkey;
+ __u32 reserved;
+ __u64 sq_db_page;
+ __u64 rq_db_page;
+ __u32 sq_db_index;
+ __u32 rq_db_index;
+};
+#endif /* MTHCA_ABI_USER_H */
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/include/uapi/rdma/nes-abi.h
index 529c421bb15c..6eb3734394a2 100644
--- a/drivers/infiniband/hw/nes/nes_user.h
+++ b/include/uapi/rdma/nes-abi.h
@@ -34,8 +34,8 @@
*
*/
-#ifndef NES_USER_H
-#define NES_USER_H
+#ifndef NES_ABI_USER_H
+#define NES_ABI_USER_H
#include <linux/types.h>
@@ -111,4 +111,4 @@ struct nes_create_qp_resp {
__u32 nes_drv_opt;
};
-#endif /* NES_USER_H */
+#endif /* NES_ABI_USER_H */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/include/uapi/rdma/ocrdma-abi.h
index 430b1350fe96..9f28191bef4d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/include/uapi/rdma/ocrdma-abi.h
@@ -40,110 +40,112 @@
* Costa Mesa, CA 92626
*/
-#ifndef __OCRDMA_ABI_H__
-#define __OCRDMA_ABI_H__
+#ifndef OCRDMA_ABI_USER_H
+#define OCRDMA_ABI_USER_H
+
+#include <linux/types.h>
#define OCRDMA_ABI_VERSION 2
#define OCRDMA_BE_ROCE_ABI_VERSION 1
/* user kernel communication data structures. */
struct ocrdma_alloc_ucontext_resp {
- u32 dev_id;
- u32 wqe_size;
- u32 max_inline_data;
- u32 dpp_wqe_size;
- u64 ah_tbl_page;
- u32 ah_tbl_len;
- u32 rqe_size;
- u8 fw_ver[32];
+ __u32 dev_id;
+ __u32 wqe_size;
+ __u32 max_inline_data;
+ __u32 dpp_wqe_size;
+ __u64 ah_tbl_page;
+ __u32 ah_tbl_len;
+ __u32 rqe_size;
+ __u8 fw_ver[32];
/* for future use/new features in progress */
- u64 rsvd1;
- u64 rsvd2;
+ __u64 rsvd1;
+ __u64 rsvd2;
};
struct ocrdma_alloc_pd_ureq {
- u64 rsvd1;
+ __u64 rsvd1;
};
struct ocrdma_alloc_pd_uresp {
- u32 id;
- u32 dpp_enabled;
- u32 dpp_page_addr_hi;
- u32 dpp_page_addr_lo;
- u64 rsvd1;
+ __u32 id;
+ __u32 dpp_enabled;
+ __u32 dpp_page_addr_hi;
+ __u32 dpp_page_addr_lo;
+ __u64 rsvd1;
};
struct ocrdma_create_cq_ureq {
- u32 dpp_cq;
- u32 rsvd; /* pad */
+ __u32 dpp_cq;
+ __u32 rsvd; /* pad */
};
#define MAX_CQ_PAGES 8
struct ocrdma_create_cq_uresp {
- u32 cq_id;
- u32 page_size;
- u32 num_pages;
- u32 max_hw_cqe;
- u64 page_addr[MAX_CQ_PAGES];
- u64 db_page_addr;
- u32 db_page_size;
- u32 phase_change;
+ __u32 cq_id;
+ __u32 page_size;
+ __u32 num_pages;
+ __u32 max_hw_cqe;
+ __u64 page_addr[MAX_CQ_PAGES];
+ __u64 db_page_addr;
+ __u32 db_page_size;
+ __u32 phase_change;
/* for future use/new features in progress */
- u64 rsvd1;
- u64 rsvd2;
+ __u64 rsvd1;
+ __u64 rsvd2;
};
#define MAX_QP_PAGES 8
#define MAX_UD_AV_PAGES 8
struct ocrdma_create_qp_ureq {
- u8 enable_dpp_cq;
- u8 rsvd;
- u16 dpp_cq_id;
- u32 rsvd1; /* pad */
+ __u8 enable_dpp_cq;
+ __u8 rsvd;
+ __u16 dpp_cq_id;
+ __u32 rsvd1; /* pad */
};
struct ocrdma_create_qp_uresp {
- u16 qp_id;
- u16 sq_dbid;
- u16 rq_dbid;
- u16 resv0; /* pad */
- u32 sq_page_size;
- u32 rq_page_size;
- u32 num_sq_pages;
- u32 num_rq_pages;
- u64 sq_page_addr[MAX_QP_PAGES];
- u64 rq_page_addr[MAX_QP_PAGES];
- u64 db_page_addr;
- u32 db_page_size;
- u32 dpp_credit;
- u32 dpp_offset;
- u32 num_wqe_allocated;
- u32 num_rqe_allocated;
- u32 db_sq_offset;
- u32 db_rq_offset;
- u32 db_shift;
- u64 rsvd[11];
+ __u16 qp_id;
+ __u16 sq_dbid;
+ __u16 rq_dbid;
+ __u16 resv0; /* pad */
+ __u32 sq_page_size;
+ __u32 rq_page_size;
+ __u32 num_sq_pages;
+ __u32 num_rq_pages;
+ __u64 sq_page_addr[MAX_QP_PAGES];
+ __u64 rq_page_addr[MAX_QP_PAGES];
+ __u64 db_page_addr;
+ __u32 db_page_size;
+ __u32 dpp_credit;
+ __u32 dpp_offset;
+ __u32 num_wqe_allocated;
+ __u32 num_rqe_allocated;
+ __u32 db_sq_offset;
+ __u32 db_rq_offset;
+ __u32 db_shift;
+ __u64 rsvd[11];
} __packed;
struct ocrdma_create_srq_uresp {
- u16 rq_dbid;
- u16 resv0; /* pad */
- u32 resv1;
+ __u16 rq_dbid;
+ __u16 resv0; /* pad */
+ __u32 resv1;
- u32 rq_page_size;
- u32 num_rq_pages;
+ __u32 rq_page_size;
+ __u32 num_rq_pages;
- u64 rq_page_addr[MAX_QP_PAGES];
- u64 db_page_addr;
+ __u64 rq_page_addr[MAX_QP_PAGES];
+ __u64 db_page_addr;
- u32 db_page_size;
- u32 num_rqe_allocated;
- u32 db_rq_offset;
- u32 db_shift;
+ __u32 db_page_size;
+ __u32 num_rqe_allocated;
+ __u32 db_rq_offset;
+ __u32 db_shift;
- u64 rsvd2;
- u64 rsvd3;
+ __u64 rsvd2;
+ __u64 rsvd3;
};
-#endif /* __OCRDMA_ABI_H__ */
+#endif /* OCRDMA_ABI_USER_H */
diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
index 2302f3ce5f86..6bf1f8a022b1 100644
--- a/include/uapi/scsi/cxlflash_ioctl.h
+++ b/include/uapi/scsi/cxlflash_ioctl.h
@@ -39,19 +39,28 @@ struct dk_cxlflash_hdr {
* at this time, this provides future flexibility.
*/
#define DK_CXLFLASH_ALL_PORTS_ACTIVE 0x0000000000000001ULL
+#define DK_CXLFLASH_APP_CLOSE_ADAP_FD 0x0000000000000002ULL
/*
- * Notes:
- * -----
+ * General Notes:
+ * -------------
* The 'context_id' field of all ioctl structures contains the context
* identifier for a context in the lower 32-bits (upper 32-bits are not
* to be used when identifying a context to the AFU). That said, the value
* in its entirety (all 64-bits) is to be treated as an opaque cookie and
* should be presented as such when issuing ioctls.
+ */
+
+/*
+ * DK_CXLFLASH_ATTACH Notes:
+ * ------------------------
+ * Read/write access permissions are specified via the O_RDONLY, O_WRONLY,
+ * and O_RDWR flags defined in the fcntl.h header file.
*
- * For DK_CXLFLASH_ATTACH ioctl, user specifies read/write access
- * permissions via the O_RDONLY, O_WRONLY, and O_RDWR flags defined in
- * the fcntl.h header file.
+ * A valid adapter file descriptor (fd >= 0) is only returned on the initial
+ * attach (successful) of a context. When a context is shared(reused), the user
+ * is expected to already 'know' the adapter file descriptor associated with the
+ * context.
*/
#define DK_CXLFLASH_ATTACH_REUSE_CONTEXT 0x8000000000000000ULL
diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild
index 691984cb0b91..9578d8bdbf31 100644
--- a/include/uapi/sound/Kbuild
+++ b/include/uapi/sound/Kbuild
@@ -13,3 +13,4 @@ header-y += sb16_csp.h
header-y += sfnt_info.h
header-y += tlv.h
header-y += usb_stream.h
+header-y += snd_sst_tokens.h
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index e4701a3c6331..33d00a4ce656 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -83,7 +83,7 @@
#define SND_SOC_TPLG_NUM_TEXTS 16
/* ABI version */
-#define SND_SOC_TPLG_ABI_VERSION 0x4
+#define SND_SOC_TPLG_ABI_VERSION 0x5
/* Max size of TLV data */
#define SND_SOC_TPLG_TLV_SIZE 32
@@ -105,7 +105,8 @@
#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
#define SND_SOC_TPLG_TYPE_BACKEND_LINK 10
#define SND_SOC_TPLG_TYPE_PDATA 11
-#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA
+#define SND_SOC_TPLG_TYPE_BE_DAI 12
+#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_BE_DAI
/* vendor block IDs - please add new vendor types to end */
#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
@@ -124,6 +125,11 @@
#define SND_SOC_TPLG_TUPLE_TYPE_WORD 4
#define SND_SOC_TPLG_TUPLE_TYPE_SHORT 5
+/* BE DAI flags */
+#define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_RATES (1 << 0)
+#define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS (1 << 1)
+#define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2)
+
/*
* Block Header.
* This header precedes all object and object arrays below.
@@ -251,6 +257,7 @@ struct snd_soc_tplg_stream_caps {
__le32 period_size_max; /* max period size bytes */
__le32 buffer_size_min; /* min buffer size bytes */
__le32 buffer_size_max; /* max buffer size bytes */
+ __le32 sig_bits; /* number of bits of content */
} __attribute__((packed));
/*
@@ -285,6 +292,8 @@ struct snd_soc_tplg_manifest {
__le32 graph_elems; /* number of graph elements */
__le32 pcm_elems; /* number of PCM elements */
__le32 dai_link_elems; /* number of DAI link elements */
+ __le32 be_dai_elems; /* number of BE DAI elements */
+ __le32 reserved[20]; /* reserved for new ABI element types */
struct snd_soc_tplg_private priv;
} __attribute__((packed));
@@ -450,4 +459,26 @@ struct snd_soc_tplg_link_config {
struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* supported configs playback and captrure */
__le32 num_streams; /* number of streams */
} __attribute__((packed));
+
+/*
+ * Describes SW/FW specific features of BE DAI.
+ *
+ * File block representation for BE DAI :-
+ * +-----------------------------------+-----+
+ * | struct snd_soc_tplg_hdr | 1 |
+ * +-----------------------------------+-----+
+ * | struct snd_soc_tplg_be_dai | N |
+ * +-----------------------------------+-----+
+ */
+struct snd_soc_tplg_be_dai {
+ __le32 size; /* in bytes of this structure */
+ char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* name - used to match */
+ __le32 dai_id; /* unique ID - used to match */
+ __le32 playback; /* supports playback mode */
+ __le32 capture; /* supports capture mode */
+ struct snd_soc_tplg_stream_caps caps[2]; /* playback and capture for DAI */
+ __le32 flag_mask; /* bitmask of flags to configure */
+ __le32 flags; /* SND_SOC_TPLG_DAI_FLGBIT_* */
+ struct snd_soc_tplg_private priv;
+} __attribute__((packed));
#endif
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 609cadb8739d..be353a78c303 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -106,9 +106,10 @@ enum {
SNDRV_HWDEP_IFACE_FW_OXFW, /* Oxford OXFW970/971 based device */
SNDRV_HWDEP_IFACE_FW_DIGI00X, /* Digidesign Digi 002/003 family */
SNDRV_HWDEP_IFACE_FW_TASCAM, /* TASCAM FireWire series */
+ SNDRV_HWDEP_IFACE_LINE6, /* Line6 USB processors */
/* Don't forget to change the following: */
- SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_TASCAM
+ SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_LINE6
};
struct snd_hwdep_info {
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
new file mode 100644
index 000000000000..1ee2e943d66a
--- /dev/null
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -0,0 +1,214 @@
+/*
+ * snd_sst_tokens.h - Intel SST tokens definition
+ *
+ * Copyright (C) 2016 Intel Corp
+ * Author: Shreyas NC <shreyas.nc@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __SND_SST_TOKENS_H__
+#define __SND_SST_TOKENS_H__
+
+/**
+ * %SKL_TKN_UUID: Module UUID
+ *
+ * %SKL_TKN_U8_BLOCK_TYPE: Type of the private data block.Can be:
+ * tuples, bytes, short and words
+ *
+ * %SKL_TKN_U8_IN_PIN_TYPE: Input pin type,
+ * homogenous=0, heterogenous=1
+ *
+ * %SKL_TKN_U8_OUT_PIN_TYPE: Output pin type,
+ * homogenous=0, heterogenous=1
+ * %SKL_TKN_U8_DYN_IN_PIN: Configure Input pin dynamically
+ * if true
+ *
+ * %SKL_TKN_U8_DYN_OUT_PIN: Configure Output pin dynamically
+ * if true
+ *
+ * %SKL_TKN_U8_IN_QUEUE_COUNT: Store the number of Input pins
+ *
+ * %SKL_TKN_U8_OUT_QUEUE_COUNT: Store the number of Output pins
+ *
+ * %SKL_TKN_U8_TIME_SLOT: TDM slot number
+ *
+ * %SKL_TKN_U8_CORE_ID: Stores module affinity value.Can take
+ * the values:
+ * SKL_AFFINITY_CORE_0 = 0,
+ * SKL_AFFINITY_CORE_1,
+ * SKL_AFFINITY_CORE_MAX
+ *
+ * %SKL_TKN_U8_MOD_TYPE: Module type value.
+ *
+ * %SKL_TKN_U8_CONN_TYPE: Module connection type can be a FE,
+ * BE or NONE as defined :
+ * SKL_PIPE_CONN_TYPE_NONE = 0,
+ * SKL_PIPE_CONN_TYPE_FE = 1 (HOST_DMA)
+ * SKL_PIPE_CONN_TYPE_BE = 2 (LINK_DMA)
+ *
+ * %SKL_TKN_U8_DEV_TYPE: Type of device to which the module is
+ * connected
+ * Can take the values:
+ * SKL_DEVICE_BT = 0x0,
+ * SKL_DEVICE_DMIC = 0x1,
+ * SKL_DEVICE_I2S = 0x2,
+ * SKL_DEVICE_SLIMBUS = 0x3,
+ * SKL_DEVICE_HDALINK = 0x4,
+ * SKL_DEVICE_HDAHOST = 0x5,
+ * SKL_DEVICE_NONE
+ *
+ * %SKL_TKN_U8_HW_CONN_TYPE: Connection type of the HW to which the
+ * module is connected
+ * SKL_CONN_NONE = 0,
+ * SKL_CONN_SOURCE = 1,
+ * SKL_CONN_SINK = 2
+ *
+ * %SKL_TKN_U16_PIN_INST_ID: Stores the pin instance id
+ *
+ * %SKL_TKN_U16_MOD_INST_ID: Stores the mdule instance id
+ *
+ * %SKL_TKN_U32_MAX_MCPS: Module max mcps value
+ *
+ * %SKL_TKN_U32_MEM_PAGES: Module resource pages
+ *
+ * %SKL_TKN_U32_OBS: Stores Output Buffer size
+ *
+ * %SKL_TKN_U32_IBS: Stores input buffer size
+ *
+ * %SKL_TKN_U32_VBUS_ID: Module VBUS_ID. PDM=0, SSP0=0,
+ * SSP1=1,SSP2=2,
+ * SSP3=3, SSP4=4,
+ * SSP5=5, SSP6=6,INVALID
+ *
+ * %SKL_TKN_U32_PARAMS_FIXUP: Module Params fixup mask
+ * %SKL_TKN_U32_CONVERTER: Module params converter mask
+ * %SKL_TKN_U32_PIPE_ID: Stores the pipe id
+ *
+ * %SKL_TKN_U32_PIPE_CONN_TYPE: Type of the token to which the pipe is
+ * connected to. It can be
+ * SKL_PIPE_CONN_TYPE_NONE = 0,
+ * SKL_PIPE_CONN_TYPE_FE = 1 (HOST_DMA),
+ * SKL_PIPE_CONN_TYPE_BE = 2 (LINK_DMA),
+ *
+ * %SKL_TKN_U32_PIPE_PRIORITY: Pipe priority value
+ * %SKL_TKN_U32_PIPE_MEM_PGS: Pipe resource pages
+ *
+ * %SKL_TKN_U32_DIR_PIN_COUNT: Value for the direction to set input/output
+ * formats and the pin count.
+ * The first 4 bits have the direction
+ * value and the next 4 have
+ * the pin count value.
+ * SKL_DIR_IN = 0, SKL_DIR_OUT = 1.
+ * The input and output formats
+ * share the same set of tokens
+ * with the distinction between input
+ * and output made by reading direction
+ * token.
+ *
+ * %SKL_TKN_U32_FMT_CH: Supported channel count
+ *
+ * %SKL_TKN_U32_FMT_FREQ: Supported frequency/sample rate
+ *
+ * %SKL_TKN_U32_FMT_BIT_DEPTH: Supported container size
+ *
+ * %SKL_TKN_U32_FMT_SAMPLE_SIZE:Number of samples in the container
+ *
+ * %SKL_TKN_U32_FMT_CH_CONFIG: Supported channel configurations for the
+ * input/output.
+ *
+ * %SKL_TKN_U32_FMT_INTERLEAVE: Interleaving style which can be per
+ * channel or per sample. The values can be :
+ * SKL_INTERLEAVING_PER_CHANNEL = 0,
+ * SKL_INTERLEAVING_PER_SAMPLE = 1,
+ *
+ * %SKL_TKN_U32_FMT_SAMPLE_TYPE:
+ * Specifies the sample type. Can take the
+ * values: SKL_SAMPLE_TYPE_INT_MSB = 0,
+ * SKL_SAMPLE_TYPE_INT_LSB = 1,
+ * SKL_SAMPLE_TYPE_INT_SIGNED = 2,
+ * SKL_SAMPLE_TYPE_INT_UNSIGNED = 3,
+ * SKL_SAMPLE_TYPE_FLOAT = 4
+ *
+ * %SKL_TKN_U32_CH_MAP: Channel map values
+ * %SKL_TKN_U32_MOD_SET_PARAMS: It can take these values:
+ * SKL_PARAM_DEFAULT, SKL_PARAM_INIT,
+ * SKL_PARAM_SET, SKL_PARAM_BIND
+ *
+ * %SKL_TKN_U32_MOD_PARAM_ID: ID of the module params
+ *
+ * %SKL_TKN_U32_CAPS_SET_PARAMS:
+ * Set params value
+ *
+ * %SKL_TKN_U32_CAPS_PARAMS_ID: Params ID
+ *
+ * %SKL_TKN_U32_CAPS_SIZE: Caps size
+ *
+ * %SKL_TKN_U32_PROC_DOMAIN: Specify processing domain
+ *
+ * %SKL_TKN_U32_LIB_COUNT: Specifies the number of libraries
+ *
+ * %SKL_TKN_STR_LIB_NAME: Specifies the library name
+ *
+ * module_id and loadable flags dont have tokens as these values will be
+ * read from the DSP FW manifest
+ */
+enum SKL_TKNS {
+ SKL_TKN_UUID = 1,
+ SKL_TKN_U8_NUM_BLOCKS,
+ SKL_TKN_U8_BLOCK_TYPE,
+ SKL_TKN_U8_IN_PIN_TYPE,
+ SKL_TKN_U8_OUT_PIN_TYPE,
+ SKL_TKN_U8_DYN_IN_PIN,
+ SKL_TKN_U8_DYN_OUT_PIN,
+ SKL_TKN_U8_IN_QUEUE_COUNT,
+ SKL_TKN_U8_OUT_QUEUE_COUNT,
+ SKL_TKN_U8_TIME_SLOT,
+ SKL_TKN_U8_CORE_ID,
+ SKL_TKN_U8_MOD_TYPE,
+ SKL_TKN_U8_CONN_TYPE,
+ SKL_TKN_U8_DEV_TYPE,
+ SKL_TKN_U8_HW_CONN_TYPE,
+ SKL_TKN_U16_MOD_INST_ID,
+ SKL_TKN_U16_BLOCK_SIZE,
+ SKL_TKN_U32_MAX_MCPS,
+ SKL_TKN_U32_MEM_PAGES,
+ SKL_TKN_U32_OBS,
+ SKL_TKN_U32_IBS,
+ SKL_TKN_U32_VBUS_ID,
+ SKL_TKN_U32_PARAMS_FIXUP,
+ SKL_TKN_U32_CONVERTER,
+ SKL_TKN_U32_PIPE_ID,
+ SKL_TKN_U32_PIPE_CONN_TYPE,
+ SKL_TKN_U32_PIPE_PRIORITY,
+ SKL_TKN_U32_PIPE_MEM_PGS,
+ SKL_TKN_U32_DIR_PIN_COUNT,
+ SKL_TKN_U32_FMT_CH,
+ SKL_TKN_U32_FMT_FREQ,
+ SKL_TKN_U32_FMT_BIT_DEPTH,
+ SKL_TKN_U32_FMT_SAMPLE_SIZE,
+ SKL_TKN_U32_FMT_CH_CONFIG,
+ SKL_TKN_U32_FMT_INTERLEAVE,
+ SKL_TKN_U32_FMT_SAMPLE_TYPE,
+ SKL_TKN_U32_FMT_CH_MAP,
+ SKL_TKN_U32_PIN_MOD_ID,
+ SKL_TKN_U32_PIN_INST_ID,
+ SKL_TKN_U32_MOD_SET_PARAMS,
+ SKL_TKN_U32_MOD_PARAM_ID,
+ SKL_TKN_U32_CAPS_SET_PARAMS,
+ SKL_TKN_U32_CAPS_PARAMS_ID,
+ SKL_TKN_U32_CAPS_SIZE,
+ SKL_TKN_U32_PROC_DOMAIN,
+ SKL_TKN_U32_LIB_COUNT,
+ SKL_TKN_STR_LIB_NAME,
+ SKL_TKN_MAX = SKL_TKN_STR_LIB_NAME,
+};
+
+#endif
diff --git a/include/uapi/sound/tlv.h b/include/uapi/sound/tlv.h
index ffc4f203146c..b4df440c015b 100644
--- a/include/uapi/sound/tlv.h
+++ b/include/uapi/sound/tlv.h
@@ -28,4 +28,73 @@
#define SNDRV_CTL_TLVT_CHMAP_VAR 0x102 /* channels freely swappable */
#define SNDRV_CTL_TLVT_CHMAP_PAIRED 0x103 /* pair-wise swappable */
+/*
+ * TLV structure is right behind the struct snd_ctl_tlv:
+ * unsigned int type - see SNDRV_CTL_TLVT_*
+ * unsigned int length
+ * .... data aligned to sizeof(unsigned int), use
+ * block_length = (length + (sizeof(unsigned int) - 1)) &
+ * ~(sizeof(unsigned int) - 1)) ....
+ */
+#define SNDRV_CTL_TLVD_ITEM(type, ...) \
+ (type), SNDRV_CTL_TLVD_LENGTH(__VA_ARGS__), __VA_ARGS__
+#define SNDRV_CTL_TLVD_LENGTH(...) \
+ ((unsigned int)sizeof((const unsigned int[]) { __VA_ARGS__ }))
+
+#define SNDRV_CTL_TLVD_CONTAINER_ITEM(...) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_CONTAINER, __VA_ARGS__)
+#define SNDRV_CTL_TLVD_DECLARE_CONTAINER(name, ...) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_CONTAINER_ITEM(__VA_ARGS__) \
+ }
+
+#define SNDRV_CTL_TLVD_DB_SCALE_MASK 0xffff
+#define SNDRV_CTL_TLVD_DB_SCALE_MUTE 0x10000
+#define SNDRV_CTL_TLVD_DB_SCALE_ITEM(min, step, mute) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_SCALE, \
+ (min), \
+ ((step) & SNDRV_CTL_TLVD_DB_SCALE_MASK) | \
+ ((mute) ? SNDRV_CTL_TLVD_DB_SCALE_MUTE : 0))
+#define SNDRV_CTL_TLVD_DECLARE_DB_SCALE(name, min, step, mute) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_DB_SCALE_ITEM(min, step, mute) \
+ }
+
+/* dB scale specified with min/max values instead of step */
+#define SNDRV_CTL_TLVD_DB_MINMAX_ITEM(min_dB, max_dB) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_MINMAX, (min_dB), (max_dB))
+#define SNDRV_CTL_TLVD_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_MINMAX_MUTE, (min_dB), (max_dB))
+#define SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(name, min_dB, max_dB) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_DB_MINMAX_ITEM(min_dB, max_dB) \
+ }
+#define SNDRV_CTL_TLVD_DECLARE_DB_MINMAX_MUTE(name, min_dB, max_dB) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) \
+ }
+
+/* linear volume between min_dB and max_dB (.01dB unit) */
+#define SNDRV_CTL_TLVD_DB_LINEAR_ITEM(min_dB, max_dB) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_LINEAR, (min_dB), (max_dB))
+#define SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(name, min_dB, max_dB) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_DB_LINEAR_ITEM(min_dB, max_dB) \
+ }
+
+/* dB range container:
+ * Items in dB range container must be ordered by their values and by their
+ * dB values. This implies that larger values must correspond with larger
+ * dB values (which is also required for all other mixer controls).
+ */
+/* Each item is: <min> <max> <TLV> */
+#define SNDRV_CTL_TLVD_DB_RANGE_ITEM(...) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__)
+#define SNDRV_CTL_TLVD_DECLARE_DB_RANGE(name, ...) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_DB_RANGE_ITEM(__VA_ARGS__) \
+ }
+
+#define SNDRV_CTL_TLVD_DB_GAIN_MUTE -9999999
+
#endif
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 0c0e3ef4c45d..f0f0252cff9a 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -38,8 +38,7 @@ extern enum xen_domain_type xen_domain_type;
*/
#include <xen/features.h>
#define xen_pvh_domain() (xen_pv_domain() && \
- xen_feature(XENFEAT_auto_translated_physmap) && \
- xen_have_vector_callback)
+ xen_feature(XENFEAT_auto_translated_physmap))
#else
#define xen_pvh_domain() (0)
#endif
diff --git a/init/Kconfig b/init/Kconfig
index 3b9a47fe843b..d7fc22639665 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -2118,12 +2118,6 @@ config PADATA
depends on SMP
bool
-# Can be selected by architectures with broken toolchains
-# that get confused by correct const<->read_only section
-# mappings
-config BROKEN_RODATA
- bool
-
config ASN1
tristate
help
diff --git a/ipc/namespace.c b/ipc/namespace.c
index d87e6baa1323..0abdea496493 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -16,39 +16,61 @@
#include "util.h"
+static struct ucounts *inc_ipc_namespaces(struct user_namespace *ns)
+{
+ return inc_ucount(ns, current_euid(), UCOUNT_IPC_NAMESPACES);
+}
+
+static void dec_ipc_namespaces(struct ucounts *ucounts)
+{
+ dec_ucount(ucounts, UCOUNT_IPC_NAMESPACES);
+}
+
static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
struct ipc_namespace *old_ns)
{
struct ipc_namespace *ns;
+ struct ucounts *ucounts;
int err;
+ err = -ENOSPC;
+ ucounts = inc_ipc_namespaces(user_ns);
+ if (!ucounts)
+ goto fail;
+
+ err = -ENOMEM;
ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL);
if (ns == NULL)
- return ERR_PTR(-ENOMEM);
+ goto fail_dec;
err = ns_alloc_inum(&ns->ns);
- if (err) {
- kfree(ns);
- return ERR_PTR(err);
- }
+ if (err)
+ goto fail_free;
ns->ns.ops = &ipcns_operations;
atomic_set(&ns->count, 1);
ns->user_ns = get_user_ns(user_ns);
+ ns->ucounts = ucounts;
err = mq_init_ns(ns);
- if (err) {
- put_user_ns(ns->user_ns);
- ns_free_inum(&ns->ns);
- kfree(ns);
- return ERR_PTR(err);
- }
+ if (err)
+ goto fail_put;
sem_init_ns(ns);
msg_init_ns(ns);
shm_init_ns(ns);
return ns;
+
+fail_put:
+ put_user_ns(ns->user_ns);
+ ns_free_inum(&ns->ns);
+fail_free:
+ kfree(ns);
+fail_dec:
+ dec_ipc_namespaces(ucounts);
+fail:
+ return ERR_PTR(err);
}
struct ipc_namespace *copy_ipcs(unsigned long flags,
@@ -96,6 +118,7 @@ static void free_ipc_ns(struct ipc_namespace *ns)
msg_exit_ns(ns);
shm_exit_ns(ns);
+ dec_ipc_namespaces(ns->ucounts);
put_user_ns(ns->user_ns);
ns_free_inum(&ns->ns);
kfree(ns);
@@ -165,10 +188,16 @@ static int ipcns_install(struct nsproxy *nsproxy, struct ns_common *new)
return 0;
}
+static struct user_namespace *ipcns_owner(struct ns_common *ns)
+{
+ return to_ipc_ns(ns)->user_ns;
+}
+
const struct proc_ns_operations ipcns_operations = {
.name = "ipc",
.type = CLONE_NEWIPC,
.get = ipcns_get,
.put = ipcns_put,
.install = ipcns_install,
+ .owner = ipcns_owner,
};
diff --git a/kernel/Makefile b/kernel/Makefile
index e2ec54e2b952..eb26e12c6c2a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -9,7 +9,7 @@ obj-y = fork.o exec_domain.o panic.o \
extable.o params.o \
kthread.o sys_ni.o nsproxy.o \
notifier.o ksysfs.o cred.o reboot.o \
- async.o range.o smpboot.o
+ async.o range.o smpboot.o ucount.o
obj-$(CONFIG_MULTIUSER) += groups.o
diff --git a/kernel/audit.c b/kernel/audit.c
index a8a91bd2b2a9..f1ca11613379 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -877,6 +877,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return err;
}
if (s.mask & AUDIT_STATUS_PID) {
+ /* NOTE: we are using task_tgid_vnr() below because
+ * the s.pid value is relative to the namespace
+ * of the caller; at present this doesn't matter
+ * much since you can really only run auditd
+ * from the initial pid namespace, but something
+ * to keep in mind if this changes */
int new_pid = s.pid;
pid_t requesting_pid = task_tgid_vnr(current);
@@ -1917,7 +1923,7 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
" euid=%u suid=%u fsuid=%u"
" egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
task_ppid_nr(tsk),
- task_pid_nr(tsk),
+ task_tgid_nr(tsk),
from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
from_kuid(&init_user_ns, cred->uid),
from_kgid(&init_user_ns, cred->gid),
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 5abf1dc1f91c..2cd5256dbff7 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -457,7 +457,7 @@ static int audit_filter_rules(struct task_struct *tsk,
switch (f->type) {
case AUDIT_PID:
- pid = task_pid_nr(tsk);
+ pid = task_tgid_nr(tsk);
result = audit_comparator(pid, f->op, f->val);
break;
case AUDIT_PPID:
@@ -1993,7 +1993,7 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
loginuid = from_kuid(&init_user_ns, kloginuid),
tty = audit_get_tty(current);
- audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
+ audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid);
audit_log_task_context(ab);
audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
oldloginuid, loginuid, tty ? tty_name(tty) : "(none)",
@@ -2220,7 +2220,7 @@ void __audit_ptrace(struct task_struct *t)
{
struct audit_context *context = current->audit_context;
- context->target_pid = task_pid_nr(t);
+ context->target_pid = task_tgid_nr(t);
context->target_auid = audit_get_loginuid(t);
context->target_uid = task_uid(t);
context->target_sessionid = audit_get_sessionid(t);
@@ -2245,7 +2245,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
if (audit_pid && t->tgid == audit_pid) {
if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
- audit_sig_pid = task_pid_nr(tsk);
+ audit_sig_pid = task_tgid_nr(tsk);
if (uid_valid(tsk->loginuid))
audit_sig_uid = tsk->loginuid;
else
@@ -2345,7 +2345,7 @@ int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
void __audit_log_capset(const struct cred *new, const struct cred *old)
{
struct audit_context *context = current->audit_context;
- context->capset.pid = task_pid_nr(current);
+ context->capset.pid = task_tgid_nr(current);
context->capset.cap.effective = new->cap_effective;
context->capset.cap.inheritable = new->cap_effective;
context->capset.cap.permitted = new->cap_permitted;
@@ -2377,7 +2377,7 @@ static void audit_log_task(struct audit_buffer *ab)
from_kgid(&init_user_ns, gid),
sessionid);
audit_log_task_context(ab);
- audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
+ audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
audit_log_untrustedstring(ab, get_task_comm(comm, current));
audit_log_d_path_exe(ab, current->mm);
}
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 633a650d7aeb..a2ac051c342f 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -538,7 +538,7 @@ static int __init register_perf_event_array_map(void)
}
late_initcall(register_perf_event_array_map);
-#ifdef CONFIG_SOCK_CGROUP_DATA
+#ifdef CONFIG_CGROUPS
static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
struct file *map_file /* not used */,
int fd)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 03fd23d4d587..aa6d98154106 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1018,7 +1018,7 @@ void bpf_user_rnd_init_once(void)
prandom_init_once(&bpf_user_rnd_state);
}
-u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_user_rnd_u32)
{
/* Should someone ever have the rather unwise idea to use some
* of the registers passed into this function, then note that
@@ -1031,7 +1031,7 @@ u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
state = &get_cpu_var(bpf_user_rnd_state);
res = prandom_u32_state(state);
- put_cpu_var(state);
+ put_cpu_var(bpf_user_rnd_state);
return res;
}
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1ea3afba1a4f..39918402e6e9 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -16,6 +16,7 @@
#include <linux/ktime.h>
#include <linux/sched.h>
#include <linux/uidgid.h>
+#include <linux/filter.h>
/* If kernel subsystem is allowing eBPF programs to call this function,
* inside its own verifier_ops->get_func_proto() callback it should return
@@ -26,48 +27,32 @@
* if program is allowed to access maps, so check rcu_read_lock_held in
* all three functions.
*/
-static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
{
- /* verifier checked that R1 contains a valid pointer to bpf_map
- * and R2 points to a program stack and map->key_size bytes were
- * initialized
- */
- struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
- void *key = (void *) (unsigned long) r2;
- void *value;
-
WARN_ON_ONCE(!rcu_read_lock_held());
-
- value = map->ops->map_lookup_elem(map, key);
-
- /* lookup() returns either pointer to element value or NULL
- * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type
- */
- return (unsigned long) value;
+ return (unsigned long) map->ops->map_lookup_elem(map, key);
}
const struct bpf_func_proto bpf_map_lookup_elem_proto = {
.func = bpf_map_lookup_elem,
.gpl_only = false,
+ .pkt_access = true,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_MAP_KEY,
};
-static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
+ void *, value, u64, flags)
{
- struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
- void *key = (void *) (unsigned long) r2;
- void *value = (void *) (unsigned long) r3;
-
WARN_ON_ONCE(!rcu_read_lock_held());
-
- return map->ops->map_update_elem(map, key, value, r4);
+ return map->ops->map_update_elem(map, key, value, flags);
}
const struct bpf_func_proto bpf_map_update_elem_proto = {
.func = bpf_map_update_elem,
.gpl_only = false,
+ .pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_MAP_KEY,
@@ -75,19 +60,16 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
.arg4_type = ARG_ANYTHING,
};
-static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
{
- struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
- void *key = (void *) (unsigned long) r2;
-
WARN_ON_ONCE(!rcu_read_lock_held());
-
return map->ops->map_delete_elem(map, key);
}
const struct bpf_func_proto bpf_map_delete_elem_proto = {
.func = bpf_map_delete_elem,
.gpl_only = false,
+ .pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_MAP_KEY,
@@ -99,7 +81,7 @@ const struct bpf_func_proto bpf_get_prandom_u32_proto = {
.ret_type = RET_INTEGER,
};
-static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_get_smp_processor_id)
{
return smp_processor_id();
}
@@ -110,7 +92,7 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
.ret_type = RET_INTEGER,
};
-static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_ktime_get_ns)
{
/* NMI safe access to clock monotonic */
return ktime_get_mono_fast_ns();
@@ -122,11 +104,11 @@ const struct bpf_func_proto bpf_ktime_get_ns_proto = {
.ret_type = RET_INTEGER,
};
-static u64 bpf_get_current_pid_tgid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_get_current_pid_tgid)
{
struct task_struct *task = current;
- if (!task)
+ if (unlikely(!task))
return -EINVAL;
return (u64) task->tgid << 32 | task->pid;
@@ -138,18 +120,18 @@ const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
.ret_type = RET_INTEGER,
};
-static u64 bpf_get_current_uid_gid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_get_current_uid_gid)
{
struct task_struct *task = current;
kuid_t uid;
kgid_t gid;
- if (!task)
+ if (unlikely(!task))
return -EINVAL;
current_uid_gid(&uid, &gid);
return (u64) from_kgid(&init_user_ns, gid) << 32 |
- from_kuid(&init_user_ns, uid);
+ from_kuid(&init_user_ns, uid);
}
const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
@@ -158,10 +140,9 @@ const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
.ret_type = RET_INTEGER,
};
-static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
{
struct task_struct *task = current;
- char *buf = (char *) (long) r1;
if (unlikely(!task))
goto err_clear;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index bf4495fcd25d..732ae16d12b7 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -116,10 +116,9 @@ free_smap:
return ERR_PTR(err);
}
-u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
+BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
+ u64, flags)
{
- struct pt_regs *regs = (struct pt_regs *) (long) r1;
- struct bpf_map *map = (struct bpf_map *) (long) r2;
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
struct perf_callchain_entry *trace;
struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index daea765d72e6..99a7e5b388f2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
#include <linux/file.h>
@@ -126,76 +127,16 @@
* are set to NOT_INIT to indicate that they are no longer readable.
*/
-struct reg_state {
- enum bpf_reg_type type;
- union {
- /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
- s64 imm;
-
- /* valid when type == PTR_TO_PACKET* */
- struct {
- u32 id;
- u16 off;
- u16 range;
- };
-
- /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
- * PTR_TO_MAP_VALUE_OR_NULL
- */
- struct bpf_map *map_ptr;
- };
-};
-
-enum bpf_stack_slot_type {
- STACK_INVALID, /* nothing was stored in this stack slot */
- STACK_SPILL, /* register spilled into stack */
- STACK_MISC /* BPF program wrote some data into this slot */
-};
-
-#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
-
-/* state of the program:
- * type of all registers and stack info
- */
-struct verifier_state {
- struct reg_state regs[MAX_BPF_REG];
- u8 stack_slot_type[MAX_BPF_STACK];
- struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
-};
-
-/* linked list of verifier states used to prune search */
-struct verifier_state_list {
- struct verifier_state state;
- struct verifier_state_list *next;
-};
-
/* verifier_state + insn_idx are pushed to stack when branch is encountered */
-struct verifier_stack_elem {
+struct bpf_verifier_stack_elem {
/* verifer state is 'st'
* before processing instruction 'insn_idx'
* and after processing instruction 'prev_insn_idx'
*/
- struct verifier_state st;
+ struct bpf_verifier_state st;
int insn_idx;
int prev_insn_idx;
- struct verifier_stack_elem *next;
-};
-
-#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
-
-/* single container for all structs
- * one verifier_env per bpf_check() call
- */
-struct verifier_env {
- struct bpf_prog *prog; /* eBPF program being verified */
- struct verifier_stack_elem *head; /* stack of verifier states to be processed */
- int stack_size; /* number of states to be processed */
- struct verifier_state cur_state; /* current verifier state */
- struct verifier_state_list **explored_states; /* search pruning optimization */
- struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
- u32 used_map_cnt; /* number of used maps */
- u32 id_gen; /* used to generate unique reg IDs */
- bool allow_ptr_leaks;
+ struct bpf_verifier_stack_elem *next;
};
#define BPF_COMPLEXITY_LIMIT_INSNS 65536
@@ -204,6 +145,7 @@ struct verifier_env {
struct bpf_call_arg_meta {
struct bpf_map *map_ptr;
bool raw_mode;
+ bool pkt_access;
int regno;
int access_size;
};
@@ -240,6 +182,7 @@ static const char * const reg_type_str[] = {
[CONST_PTR_TO_MAP] = "map_ptr",
[PTR_TO_MAP_VALUE] = "map_value",
[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
+ [PTR_TO_MAP_VALUE_ADJ] = "map_value_adj",
[FRAME_PTR] = "fp",
[PTR_TO_STACK] = "fp",
[CONST_IMM] = "imm",
@@ -247,9 +190,9 @@ static const char * const reg_type_str[] = {
[PTR_TO_PACKET_END] = "pkt_end",
};
-static void print_verifier_state(struct verifier_state *state)
+static void print_verifier_state(struct bpf_verifier_state *state)
{
- struct reg_state *reg;
+ struct bpf_reg_state *reg;
enum bpf_reg_type t;
int i;
@@ -267,10 +210,17 @@ static void print_verifier_state(struct verifier_state *state)
else if (t == UNKNOWN_VALUE && reg->imm)
verbose("%lld", reg->imm);
else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
- t == PTR_TO_MAP_VALUE_OR_NULL)
+ t == PTR_TO_MAP_VALUE_OR_NULL ||
+ t == PTR_TO_MAP_VALUE_ADJ)
verbose("(ks=%d,vs=%d)",
reg->map_ptr->key_size,
reg->map_ptr->value_size);
+ if (reg->min_value != BPF_REGISTER_MIN_RANGE)
+ verbose(",min_value=%llu",
+ (unsigned long long)reg->min_value);
+ if (reg->max_value != BPF_REGISTER_MAX_RANGE)
+ verbose(",max_value=%llu",
+ (unsigned long long)reg->max_value);
}
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (state->stack_slot_type[i] == STACK_SPILL)
@@ -425,9 +375,9 @@ static void print_bpf_insn(struct bpf_insn *insn)
}
}
-static int pop_stack(struct verifier_env *env, int *prev_insn_idx)
+static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx)
{
- struct verifier_stack_elem *elem;
+ struct bpf_verifier_stack_elem *elem;
int insn_idx;
if (env->head == NULL)
@@ -444,12 +394,12 @@ static int pop_stack(struct verifier_env *env, int *prev_insn_idx)
return insn_idx;
}
-static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx,
- int prev_insn_idx)
+static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx)
{
- struct verifier_stack_elem *elem;
+ struct bpf_verifier_stack_elem *elem;
- elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL);
+ elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
if (!elem)
goto err;
@@ -475,13 +425,15 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
-static void init_reg_state(struct reg_state *regs)
+static void init_reg_state(struct bpf_reg_state *regs)
{
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
regs[i].type = NOT_INIT;
regs[i].imm = 0;
+ regs[i].min_value = BPF_REGISTER_MIN_RANGE;
+ regs[i].max_value = BPF_REGISTER_MAX_RANGE;
}
/* frame pointer */
@@ -491,20 +443,26 @@ static void init_reg_state(struct reg_state *regs)
regs[BPF_REG_1].type = PTR_TO_CTX;
}
-static void mark_reg_unknown_value(struct reg_state *regs, u32 regno)
+static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
{
BUG_ON(regno >= MAX_BPF_REG);
regs[regno].type = UNKNOWN_VALUE;
regs[regno].imm = 0;
}
+static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
+{
+ regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
+ regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
+}
+
enum reg_arg_type {
SRC_OP, /* register is used as source operand */
DST_OP, /* register is used as destination operand */
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
-static int check_reg_arg(struct reg_state *regs, u32 regno,
+static int check_reg_arg(struct bpf_reg_state *regs, u32 regno,
enum reg_arg_type t)
{
if (regno >= MAX_BPF_REG) {
@@ -564,8 +522,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
-static int check_stack_write(struct verifier_state *state, int off, int size,
- int value_regno)
+static int check_stack_write(struct bpf_verifier_state *state, int off,
+ int size, int value_regno)
{
int i;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
@@ -590,7 +548,7 @@ static int check_stack_write(struct verifier_state *state, int off, int size,
} else {
/* regular write of data into stack */
state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
- (struct reg_state) {};
+ (struct bpf_reg_state) {};
for (i = 0; i < size; i++)
state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
@@ -598,7 +556,7 @@ static int check_stack_write(struct verifier_state *state, int off, int size,
return 0;
}
-static int check_stack_read(struct verifier_state *state, int off, int size,
+static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
int value_regno)
{
u8 *slot_type;
@@ -639,7 +597,7 @@ static int check_stack_read(struct verifier_state *state, int off, int size,
}
/* check read/write into map element returned by bpf_map_lookup_elem() */
-static int check_map_access(struct verifier_env *env, u32 regno, int off,
+static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
int size)
{
struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
@@ -654,24 +612,31 @@ static int check_map_access(struct verifier_env *env, u32 regno, int off,
#define MAX_PACKET_OFF 0xffff
-static bool may_write_pkt_data(enum bpf_prog_type type)
+static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
+ const struct bpf_call_arg_meta *meta)
{
- switch (type) {
+ switch (env->prog->type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_XDP:
+ if (meta)
+ return meta->pkt_access;
+
+ env->seen_direct_write = true;
return true;
default:
return false;
}
}
-static int check_packet_access(struct verifier_env *env, u32 regno, int off,
+static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
int size)
{
- struct reg_state *regs = env->cur_state.regs;
- struct reg_state *reg = &regs[regno];
+ struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *reg = &regs[regno];
off += reg->off;
- if (off < 0 || off + size > reg->range) {
+ if (off < 0 || size <= 0 || off + size > reg->range) {
verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
off, size, regno, reg->id, reg->off, reg->range);
return -EACCES;
@@ -680,9 +645,13 @@ static int check_packet_access(struct verifier_env *env, u32 regno, int off,
}
/* check access to 'struct bpf_context' fields */
-static int check_ctx_access(struct verifier_env *env, int off, int size,
+static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type)
{
+ /* for analyzer ctx accesses are already validated and converted */
+ if (env->analyzer_ops)
+ return 0;
+
if (env->prog->aux->ops->is_valid_access &&
env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
/* remember the offset of last byte accessed in ctx */
@@ -695,7 +664,7 @@ static int check_ctx_access(struct verifier_env *env, int off, int size,
return -EACCES;
}
-static bool is_pointer_value(struct verifier_env *env, int regno)
+static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{
if (env->allow_ptr_leaks)
return false;
@@ -709,28 +678,19 @@ static bool is_pointer_value(struct verifier_env *env, int regno)
}
}
-static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg,
- int off, int size)
+static int check_ptr_alignment(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg, int off, int size)
{
- if (reg->type != PTR_TO_PACKET) {
+ if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) {
if (off % size != 0) {
- verbose("misaligned access off %d size %d\n", off, size);
+ verbose("misaligned access off %d size %d\n",
+ off, size);
return -EACCES;
} else {
return 0;
}
}
- switch (env->prog->type) {
- case BPF_PROG_TYPE_SCHED_CLS:
- case BPF_PROG_TYPE_SCHED_ACT:
- case BPF_PROG_TYPE_XDP:
- break;
- default:
- verbose("verifier is misconfigured\n");
- return -EACCES;
- }
-
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
/* misaligned access to packet is ok on x86,arm,arm64 */
return 0;
@@ -741,7 +701,8 @@ static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg,
}
/* skb->data is NET_IP_ALIGN-ed */
- if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
+ if (reg->type == PTR_TO_PACKET &&
+ (NET_IP_ALIGN + reg->off + off) % size != 0) {
verbose("misaligned packet access off %d+%d+%d size %d\n",
NET_IP_ALIGN, reg->off, off, size);
return -EACCES;
@@ -755,12 +716,12 @@ static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg,
* if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory
*/
-static int check_mem_access(struct verifier_env *env, u32 regno, int off,
+static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
int bpf_size, enum bpf_access_type t,
int value_regno)
{
- struct verifier_state *state = &env->cur_state;
- struct reg_state *reg = &state->regs[regno];
+ struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_reg_state *reg = &state->regs[regno];
int size, err = 0;
if (reg->type == PTR_TO_STACK)
@@ -774,12 +735,52 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
if (err)
return err;
- if (reg->type == PTR_TO_MAP_VALUE) {
+ if (reg->type == PTR_TO_MAP_VALUE ||
+ reg->type == PTR_TO_MAP_VALUE_ADJ) {
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose("R%d leaks addr into map\n", value_regno);
return -EACCES;
}
+
+ /* If we adjusted the register to this map value at all then we
+ * need to change off and size to min_value and max_value
+ * respectively to make sure our theoretical access will be
+ * safe.
+ */
+ if (reg->type == PTR_TO_MAP_VALUE_ADJ) {
+ if (log_level)
+ print_verifier_state(state);
+ env->varlen_map_value_access = true;
+ /* The minimum value is only important with signed
+ * comparisons where we can't assume the floor of a
+ * value is 0. If we are using signed variables for our
+ * index'es we need to make sure that whatever we use
+ * will have a set floor within our range.
+ */
+ if ((s64)reg->min_value < 0) {
+ verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
+ regno);
+ return -EACCES;
+ }
+ err = check_map_access(env, regno, reg->min_value + off,
+ size);
+ if (err) {
+ verbose("R%d min value is outside of the array range\n",
+ regno);
+ return err;
+ }
+
+ /* If we haven't set a max value then we need to bail
+ * since we can't be sure we won't do bad things.
+ */
+ if (reg->max_value == BPF_REGISTER_MAX_RANGE) {
+ verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n",
+ regno);
+ return -EACCES;
+ }
+ off += reg->max_value;
+ }
err = check_map_access(env, regno, off, size);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown_value(state->regs, value_regno);
@@ -795,9 +796,8 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
err = check_ctx_access(env, off, size, t, &reg_type);
if (!err && t == BPF_READ && value_regno >= 0) {
mark_reg_unknown_value(state->regs, value_regno);
- if (env->allow_ptr_leaks)
- /* note that reg.[id|off|range] == 0 */
- state->regs[value_regno].type = reg_type;
+ /* note that reg.[id|off|range] == 0 */
+ state->regs[value_regno].type = reg_type;
}
} else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
@@ -817,7 +817,7 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
err = check_stack_read(state, off, size, value_regno);
}
} else if (state->regs[regno].type == PTR_TO_PACKET) {
- if (t == BPF_WRITE && !may_write_pkt_data(env->prog->type)) {
+ if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL)) {
verbose("cannot write into packet\n");
return -EACCES;
}
@@ -846,9 +846,9 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
return err;
}
-static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
+static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *regs = env->cur_state.regs;
int err;
if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
@@ -882,12 +882,12 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
* bytes from that pointer, make sure that it's within stack boundary
* and all elements of stack are initialized
*/
-static int check_stack_boundary(struct verifier_env *env, int regno,
+static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
- struct verifier_state *state = &env->cur_state;
- struct reg_state *regs = state->regs;
+ struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_reg_state *regs = state->regs;
int off, i;
if (regs[regno].type != PTR_TO_STACK) {
@@ -926,18 +926,18 @@ static int check_stack_boundary(struct verifier_env *env, int regno,
return 0;
}
-static int check_func_arg(struct verifier_env *env, u32 regno,
+static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
struct bpf_call_arg_meta *meta)
{
- struct reg_state *reg = env->cur_state.regs + regno;
- enum bpf_reg_type expected_type;
+ struct bpf_reg_state *regs = env->cur_state.regs, *reg = &regs[regno];
+ enum bpf_reg_type expected_type, type = reg->type;
int err = 0;
if (arg_type == ARG_DONTCARE)
return 0;
- if (reg->type == NOT_INIT) {
+ if (type == NOT_INIT) {
verbose("R%d !read_ok\n", regno);
return -EACCES;
}
@@ -950,16 +950,29 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
return 0;
}
+ if (type == PTR_TO_PACKET && !may_access_direct_pkt_data(env, meta)) {
+ verbose("helper access to the packet is not allowed\n");
+ return -EACCES;
+ }
+
if (arg_type == ARG_PTR_TO_MAP_KEY ||
arg_type == ARG_PTR_TO_MAP_VALUE) {
expected_type = PTR_TO_STACK;
+ if (type != PTR_TO_PACKET && type != expected_type)
+ goto err_type;
} else if (arg_type == ARG_CONST_STACK_SIZE ||
arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) {
expected_type = CONST_IMM;
+ if (type != expected_type)
+ goto err_type;
} else if (arg_type == ARG_CONST_MAP_PTR) {
expected_type = CONST_PTR_TO_MAP;
+ if (type != expected_type)
+ goto err_type;
} else if (arg_type == ARG_PTR_TO_CTX) {
expected_type = PTR_TO_CTX;
+ if (type != expected_type)
+ goto err_type;
} else if (arg_type == ARG_PTR_TO_STACK ||
arg_type == ARG_PTR_TO_RAW_STACK) {
expected_type = PTR_TO_STACK;
@@ -967,20 +980,16 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
* passed in as argument, it's a CONST_IMM type. Final test
* happens during stack boundary checking.
*/
- if (reg->type == CONST_IMM && reg->imm == 0)
- expected_type = CONST_IMM;
+ if (type == CONST_IMM && reg->imm == 0)
+ /* final test in check_stack_boundary() */;
+ else if (type != PTR_TO_PACKET && type != expected_type)
+ goto err_type;
meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK;
} else {
verbose("unsupported arg_type %d\n", arg_type);
return -EFAULT;
}
- if (reg->type != expected_type) {
- verbose("R%d type=%s expected=%s\n", regno,
- reg_type_str[reg->type], reg_type_str[expected_type]);
- return -EACCES;
- }
-
if (arg_type == ARG_CONST_MAP_PTR) {
/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
meta->map_ptr = reg->map_ptr;
@@ -998,8 +1007,13 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
verbose("invalid map_ptr to access map->key\n");
return -EACCES;
}
- err = check_stack_boundary(env, regno, meta->map_ptr->key_size,
- false, NULL);
+ if (type == PTR_TO_PACKET)
+ err = check_packet_access(env, regno, 0,
+ meta->map_ptr->key_size);
+ else
+ err = check_stack_boundary(env, regno,
+ meta->map_ptr->key_size,
+ false, NULL);
} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
/* bpf_map_xxx(..., map_ptr, ..., value) call:
* check [value, value + map->value_size) validity
@@ -1009,9 +1023,13 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
verbose("invalid map_ptr to access map->value\n");
return -EACCES;
}
- err = check_stack_boundary(env, regno,
- meta->map_ptr->value_size,
- false, NULL);
+ if (type == PTR_TO_PACKET)
+ err = check_packet_access(env, regno, 0,
+ meta->map_ptr->value_size);
+ else
+ err = check_stack_boundary(env, regno,
+ meta->map_ptr->value_size,
+ false, NULL);
} else if (arg_type == ARG_CONST_STACK_SIZE ||
arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) {
bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO);
@@ -1025,11 +1043,18 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
verbose("ARG_CONST_STACK_SIZE cannot be first argument\n");
return -EACCES;
}
- err = check_stack_boundary(env, regno - 1, reg->imm,
- zero_size_allowed, meta);
+ if (regs[regno - 1].type == PTR_TO_PACKET)
+ err = check_packet_access(env, regno - 1, 0, reg->imm);
+ else
+ err = check_stack_boundary(env, regno - 1, reg->imm,
+ zero_size_allowed, meta);
}
return err;
+err_type:
+ verbose("R%d type=%s expected=%s\n", regno,
+ reg_type_str[type], reg_type_str[expected_type]);
+ return -EACCES;
}
static int check_map_func_compatibility(struct bpf_map *map, int func_id)
@@ -1053,7 +1078,8 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
goto error;
break;
case BPF_MAP_TYPE_CGROUP_ARRAY:
- if (func_id != BPF_FUNC_skb_under_cgroup)
+ if (func_id != BPF_FUNC_skb_under_cgroup &&
+ func_id != BPF_FUNC_current_task_under_cgroup)
goto error;
break;
default:
@@ -1075,6 +1101,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
goto error;
break;
+ case BPF_FUNC_current_task_under_cgroup:
case BPF_FUNC_skb_under_cgroup:
if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
goto error;
@@ -1108,10 +1135,10 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
return count > 1 ? -EINVAL : 0;
}
-static void clear_all_pkt_pointers(struct verifier_env *env)
+static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
{
- struct verifier_state *state = &env->cur_state;
- struct reg_state *regs = state->regs, *reg;
+ struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
@@ -1131,12 +1158,12 @@ static void clear_all_pkt_pointers(struct verifier_env *env)
}
}
-static int check_call(struct verifier_env *env, int func_id)
+static int check_call(struct bpf_verifier_env *env, int func_id)
{
- struct verifier_state *state = &env->cur_state;
+ struct bpf_verifier_state *state = &env->cur_state;
const struct bpf_func_proto *fn = NULL;
- struct reg_state *regs = state->regs;
- struct reg_state *reg;
+ struct bpf_reg_state *regs = state->regs;
+ struct bpf_reg_state *reg;
struct bpf_call_arg_meta meta;
bool changes_data;
int i, err;
@@ -1164,6 +1191,7 @@ static int check_call(struct verifier_env *env, int func_id)
changes_data = bpf_helper_changes_skb_data(fn->func);
memset(&meta, 0, sizeof(meta));
+ meta.pkt_access = fn->pkt_access;
/* We only support one arg being in raw mode at the moment, which
* is sufficient for the helper functions we have right now.
@@ -1214,6 +1242,7 @@ static int check_call(struct verifier_env *env, int func_id)
regs[BPF_REG_0].type = NOT_INIT;
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
+ regs[BPF_REG_0].max_value = regs[BPF_REG_0].min_value = 0;
/* remember map_ptr, so that check_map_access()
* can check 'value_size' boundary of memory access
* to map element returned from bpf_map_lookup_elem()
@@ -1238,12 +1267,13 @@ static int check_call(struct verifier_env *env, int func_id)
return 0;
}
-static int check_packet_ptr_add(struct verifier_env *env, struct bpf_insn *insn)
+static int check_packet_ptr_add(struct bpf_verifier_env *env,
+ struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
- struct reg_state *dst_reg = &regs[insn->dst_reg];
- struct reg_state *src_reg = &regs[insn->src_reg];
- struct reg_state tmp_reg;
+ struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
+ struct bpf_reg_state *src_reg = &regs[insn->src_reg];
+ struct bpf_reg_state tmp_reg;
s32 imm;
if (BPF_SRC(insn->code) == BPF_K) {
@@ -1311,10 +1341,10 @@ add_imm:
return 0;
}
-static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn)
+static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
- struct reg_state *dst_reg = &regs[insn->dst_reg];
+ struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
u8 opcode = BPF_OP(insn->code);
s64 imm_log2;
@@ -1324,7 +1354,7 @@ static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn)
*/
if (BPF_SRC(insn->code) == BPF_X) {
- struct reg_state *src_reg = &regs[insn->src_reg];
+ struct bpf_reg_state *src_reg = &regs[insn->src_reg];
if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 &&
dst_reg->imm && opcode == BPF_ADD) {
@@ -1413,11 +1443,12 @@ static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn)
return 0;
}
-static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn)
+static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
+ struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
- struct reg_state *dst_reg = &regs[insn->dst_reg];
- struct reg_state *src_reg = &regs[insn->src_reg];
+ struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
+ struct bpf_reg_state *src_reg = &regs[insn->src_reg];
u8 opcode = BPF_OP(insn->code);
/* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn.
@@ -1433,10 +1464,110 @@ static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn)
return 0;
}
+static void check_reg_overflow(struct bpf_reg_state *reg)
+{
+ if (reg->max_value > BPF_REGISTER_MAX_RANGE)
+ reg->max_value = BPF_REGISTER_MAX_RANGE;
+ if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE)
+ reg->min_value = BPF_REGISTER_MIN_RANGE;
+}
+
+static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
+ struct bpf_insn *insn)
+{
+ struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
+ u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE;
+ bool min_set = false, max_set = false;
+ u8 opcode = BPF_OP(insn->code);
+
+ dst_reg = &regs[insn->dst_reg];
+ if (BPF_SRC(insn->code) == BPF_X) {
+ check_reg_overflow(&regs[insn->src_reg]);
+ min_val = regs[insn->src_reg].min_value;
+ max_val = regs[insn->src_reg].max_value;
+
+ /* If the source register is a random pointer then the
+ * min_value/max_value values represent the range of the known
+ * accesses into that value, not the actual min/max value of the
+ * register itself. In this case we have to reset the reg range
+ * values so we know it is not safe to look at.
+ */
+ if (regs[insn->src_reg].type != CONST_IMM &&
+ regs[insn->src_reg].type != UNKNOWN_VALUE) {
+ min_val = BPF_REGISTER_MIN_RANGE;
+ max_val = BPF_REGISTER_MAX_RANGE;
+ }
+ } else if (insn->imm < BPF_REGISTER_MAX_RANGE &&
+ (s64)insn->imm > BPF_REGISTER_MIN_RANGE) {
+ min_val = max_val = insn->imm;
+ min_set = max_set = true;
+ }
+
+ /* We don't know anything about what was done to this register, mark it
+ * as unknown.
+ */
+ if (min_val == BPF_REGISTER_MIN_RANGE &&
+ max_val == BPF_REGISTER_MAX_RANGE) {
+ reset_reg_range_values(regs, insn->dst_reg);
+ return;
+ }
+
+ switch (opcode) {
+ case BPF_ADD:
+ dst_reg->min_value += min_val;
+ dst_reg->max_value += max_val;
+ break;
+ case BPF_SUB:
+ dst_reg->min_value -= min_val;
+ dst_reg->max_value -= max_val;
+ break;
+ case BPF_MUL:
+ dst_reg->min_value *= min_val;
+ dst_reg->max_value *= max_val;
+ break;
+ case BPF_AND:
+ /* & is special since it could end up with 0 bits set. */
+ dst_reg->min_value &= min_val;
+ dst_reg->max_value = max_val;
+ break;
+ case BPF_LSH:
+ /* Gotta have special overflow logic here, if we're shifting
+ * more than MAX_RANGE then just assume we have an invalid
+ * range.
+ */
+ if (min_val > ilog2(BPF_REGISTER_MAX_RANGE))
+ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+ else
+ dst_reg->min_value <<= min_val;
+
+ if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
+ dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
+ else
+ dst_reg->max_value <<= max_val;
+ break;
+ case BPF_RSH:
+ dst_reg->min_value >>= min_val;
+ dst_reg->max_value >>= max_val;
+ break;
+ case BPF_MOD:
+ /* % is special since it is an unsigned modulus, so the floor
+ * will always be 0.
+ */
+ dst_reg->min_value = 0;
+ dst_reg->max_value = max_val - 1;
+ break;
+ default:
+ reset_reg_range_values(regs, insn->dst_reg);
+ break;
+ }
+
+ check_reg_overflow(dst_reg);
+}
+
/* check validity of 32-bit and 64-bit arithmetic operations */
-static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
+static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs, *dst_reg;
+ struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
u8 opcode = BPF_OP(insn->code);
int err;
@@ -1496,6 +1627,11 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
if (err)
return err;
+ /* we are setting our register to something new, we need to
+ * reset its range values.
+ */
+ reset_reg_range_values(regs, insn->dst_reg);
+
if (BPF_SRC(insn->code) == BPF_X) {
if (BPF_CLASS(insn->code) == BPF_ALU64) {
/* case: R1 = R2
@@ -1517,6 +1653,8 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
*/
regs[insn->dst_reg].type = CONST_IMM;
regs[insn->dst_reg].imm = insn->imm;
+ regs[insn->dst_reg].max_value = insn->imm;
+ regs[insn->dst_reg].min_value = insn->imm;
}
} else if (opcode > BPF_END) {
@@ -1569,6 +1707,9 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
dst_reg = &regs[insn->dst_reg];
+ /* first we want to adjust our ranges. */
+ adjust_reg_min_max_vals(env, insn);
+
/* pattern match 'bpf_add Rx, imm' instruction */
if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) {
@@ -1603,28 +1744,58 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
return -EACCES;
}
- /* mark dest operand */
- mark_reg_unknown_value(regs, insn->dst_reg);
+ /* If we did pointer math on a map value then just set it to our
+ * PTR_TO_MAP_VALUE_ADJ type so we can deal with any stores or
+ * loads to this register appropriately, otherwise just mark the
+ * register as unknown.
+ */
+ if (env->allow_ptr_leaks &&
+ (dst_reg->type == PTR_TO_MAP_VALUE ||
+ dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
+ dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
+ else
+ mark_reg_unknown_value(regs, insn->dst_reg);
}
return 0;
}
-static void find_good_pkt_pointers(struct verifier_env *env,
- struct reg_state *dst_reg)
+static void find_good_pkt_pointers(struct bpf_verifier_state *state,
+ struct bpf_reg_state *dst_reg)
{
- struct verifier_state *state = &env->cur_state;
- struct reg_state *regs = state->regs, *reg;
+ struct bpf_reg_state *regs = state->regs, *reg;
int i;
- /* r2 = r3;
- * r2 += 8
- * if (r2 > pkt_end) goto somewhere
- * r2 == dst_reg, pkt_end == src_reg,
- * r2=pkt(id=n,off=8,r=0)
- * r3=pkt(id=n,off=0,r=0)
- * find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
- * so that range of bytes [r3, r3 + 8) is safe to access
+
+ /* LLVM can generate two kind of checks:
+ *
+ * Type 1:
+ *
+ * r2 = r3;
+ * r2 += 8;
+ * if (r2 > pkt_end) goto <handle exception>
+ * <access okay>
+ *
+ * Where:
+ * r2 == dst_reg, pkt_end == src_reg
+ * r2=pkt(id=n,off=8,r=0)
+ * r3=pkt(id=n,off=0,r=0)
+ *
+ * Type 2:
+ *
+ * r2 = r3;
+ * r2 += 8;
+ * if (pkt_end >= r2) goto <access okay>
+ * <handle exception>
+ *
+ * Where:
+ * pkt_end == dst_reg, r2 == src_reg
+ * r2=pkt(id=n,off=8,r=0)
+ * r3=pkt(id=n,off=0,r=0)
+ *
+ * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
+ * so that range of bytes [r3, r3 + 8) is safe to access.
*/
+
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
regs[i].range = dst_reg->off;
@@ -1638,11 +1809,109 @@ static void find_good_pkt_pointers(struct verifier_env *env,
}
}
-static int check_cond_jmp_op(struct verifier_env *env,
+/* Adjusts the register min/max values in the case that the dst_reg is the
+ * variable register that we are working on, and src_reg is a constant or we're
+ * simply doing a BPF_K check.
+ */
+static void reg_set_min_max(struct bpf_reg_state *true_reg,
+ struct bpf_reg_state *false_reg, u64 val,
+ u8 opcode)
+{
+ switch (opcode) {
+ case BPF_JEQ:
+ /* If this is false then we know nothing Jon Snow, but if it is
+ * true then we know for sure.
+ */
+ true_reg->max_value = true_reg->min_value = val;
+ break;
+ case BPF_JNE:
+ /* If this is true we know nothing Jon Snow, but if it is false
+ * we know the value for sure;
+ */
+ false_reg->max_value = false_reg->min_value = val;
+ break;
+ case BPF_JGT:
+ /* Unsigned comparison, the minimum value is 0. */
+ false_reg->min_value = 0;
+ case BPF_JSGT:
+ /* If this is false then we know the maximum val is val,
+ * otherwise we know the min val is val+1.
+ */
+ false_reg->max_value = val;
+ true_reg->min_value = val + 1;
+ break;
+ case BPF_JGE:
+ /* Unsigned comparison, the minimum value is 0. */
+ false_reg->min_value = 0;
+ case BPF_JSGE:
+ /* If this is false then we know the maximum value is val - 1,
+ * otherwise we know the mimimum value is val.
+ */
+ false_reg->max_value = val - 1;
+ true_reg->min_value = val;
+ break;
+ default:
+ break;
+ }
+
+ check_reg_overflow(false_reg);
+ check_reg_overflow(true_reg);
+}
+
+/* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
+ * is the variable reg.
+ */
+static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
+ struct bpf_reg_state *false_reg, u64 val,
+ u8 opcode)
+{
+ switch (opcode) {
+ case BPF_JEQ:
+ /* If this is false then we know nothing Jon Snow, but if it is
+ * true then we know for sure.
+ */
+ true_reg->max_value = true_reg->min_value = val;
+ break;
+ case BPF_JNE:
+ /* If this is true we know nothing Jon Snow, but if it is false
+ * we know the value for sure;
+ */
+ false_reg->max_value = false_reg->min_value = val;
+ break;
+ case BPF_JGT:
+ /* Unsigned comparison, the minimum value is 0. */
+ true_reg->min_value = 0;
+ case BPF_JSGT:
+ /*
+ * If this is false, then the val is <= the register, if it is
+ * true the register <= to the val.
+ */
+ false_reg->min_value = val;
+ true_reg->max_value = val - 1;
+ break;
+ case BPF_JGE:
+ /* Unsigned comparison, the minimum value is 0. */
+ true_reg->min_value = 0;
+ case BPF_JSGE:
+ /* If this is false then constant < register, if it is true then
+ * the register < constant.
+ */
+ false_reg->min_value = val + 1;
+ true_reg->max_value = val;
+ break;
+ default:
+ break;
+ }
+
+ check_reg_overflow(false_reg);
+ check_reg_overflow(true_reg);
+}
+
+static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
- struct reg_state *regs = env->cur_state.regs, *dst_reg;
- struct verifier_state *other_branch;
+ struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state;
+ struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
u8 opcode = BPF_OP(insn->code);
int err;
@@ -1704,7 +1973,24 @@ static int check_cond_jmp_op(struct verifier_env *env,
if (!other_branch)
return -EFAULT;
- /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */
+ /* detect if we are comparing against a constant value so we can adjust
+ * our min/max values for our dst register.
+ */
+ if (BPF_SRC(insn->code) == BPF_X) {
+ if (regs[insn->src_reg].type == CONST_IMM)
+ reg_set_min_max(&other_branch->regs[insn->dst_reg],
+ dst_reg, regs[insn->src_reg].imm,
+ opcode);
+ else if (dst_reg->type == CONST_IMM)
+ reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
+ &regs[insn->src_reg], dst_reg->imm,
+ opcode);
+ } else {
+ reg_set_min_max(&other_branch->regs[insn->dst_reg],
+ dst_reg, insn->imm, opcode);
+ }
+
+ /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
if (BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
@@ -1723,13 +2009,17 @@ static int check_cond_jmp_op(struct verifier_env *env,
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
dst_reg->type == PTR_TO_PACKET &&
regs[insn->src_reg].type == PTR_TO_PACKET_END) {
- find_good_pkt_pointers(env, dst_reg);
+ find_good_pkt_pointers(this_branch, dst_reg);
+ } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
+ dst_reg->type == PTR_TO_PACKET_END &&
+ regs[insn->src_reg].type == PTR_TO_PACKET) {
+ find_good_pkt_pointers(other_branch, &regs[insn->src_reg]);
} else if (is_pointer_value(env, insn->dst_reg)) {
verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
return -EACCES;
}
if (log_level)
- print_verifier_state(&env->cur_state);
+ print_verifier_state(this_branch);
return 0;
}
@@ -1742,9 +2032,9 @@ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
}
/* verify BPF_LD_IMM64 instruction */
-static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
+static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *regs = env->cur_state.regs;
int err;
if (BPF_SIZE(insn->code) != BPF_DW) {
@@ -1760,9 +2050,19 @@ static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
if (err)
return err;
- if (insn->src_reg == 0)
- /* generic move 64-bit immediate into a register */
+ if (insn->src_reg == 0) {
+ /* generic move 64-bit immediate into a register,
+ * only analyzer needs to collect the ld_imm value.
+ */
+ u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
+
+ if (!env->analyzer_ops)
+ return 0;
+
+ regs[insn->dst_reg].type = CONST_IMM;
+ regs[insn->dst_reg].imm = imm;
return 0;
+ }
/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
@@ -1799,11 +2099,11 @@ static bool may_access_skb(enum bpf_prog_type type)
* Output:
* R0 - 8/16/32-bit skb data converted to cpu endianness
*/
-static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
+static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *regs = env->cur_state.regs;
u8 mode = BPF_MODE(insn->code);
- struct reg_state *reg;
+ struct bpf_reg_state *reg;
int i, err;
if (!may_access_skb(env->prog->type)) {
@@ -1889,7 +2189,7 @@ enum {
BRANCH = 2,
};
-#define STATE_LIST_MARK ((struct verifier_state_list *) -1L)
+#define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
static int *insn_stack; /* stack of insns to process */
static int cur_stack; /* current stack index */
@@ -1900,7 +2200,7 @@ static int *insn_state;
* w - next instruction
* e - edge
*/
-static int push_insn(int t, int w, int e, struct verifier_env *env)
+static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
{
if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
return 0;
@@ -1941,7 +2241,7 @@ static int push_insn(int t, int w, int e, struct verifier_env *env)
/* non-recursive depth-first-search to detect loops in BPF program
* loop == back-edge in directed graph
*/
-static int check_cfg(struct verifier_env *env)
+static int check_cfg(struct bpf_verifier_env *env)
{
struct bpf_insn *insns = env->prog->insnsi;
int insn_cnt = env->prog->len;
@@ -2050,7 +2350,8 @@ err_free:
/* the following conditions reduce the number of explored insns
* from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
*/
-static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur)
+static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
+ struct bpf_reg_state *cur)
{
if (old->id != cur->id)
return false;
@@ -2125,9 +2426,11 @@ static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur)
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
-static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
+static bool states_equal(struct bpf_verifier_env *env,
+ struct bpf_verifier_state *old,
+ struct bpf_verifier_state *cur)
{
- struct reg_state *rold, *rcur;
+ struct bpf_reg_state *rold, *rcur;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
@@ -2137,6 +2440,13 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
if (memcmp(rold, rcur, sizeof(*rold)) == 0)
continue;
+ /* If the ranges were not the same, but everything else was and
+ * we didn't do a variable access into a map then we are a-ok.
+ */
+ if (!env->varlen_map_value_access &&
+ rold->type == rcur->type && rold->imm == rcur->imm)
+ continue;
+
if (rold->type == NOT_INIT ||
(rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT))
continue;
@@ -2167,9 +2477,9 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
* the same, check that stored pointers types
* are the same as well.
* Ex: explored safe path could have stored
- * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8}
+ * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -8}
* but current path has stored:
- * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16}
+ * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -16}
* such verifier states are not equivalent.
* return false to continue verification of this path
*/
@@ -2180,10 +2490,10 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
return true;
}
-static int is_state_visited(struct verifier_env *env, int insn_idx)
+static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{
- struct verifier_state_list *new_sl;
- struct verifier_state_list *sl;
+ struct bpf_verifier_state_list *new_sl;
+ struct bpf_verifier_state_list *sl;
sl = env->explored_states[insn_idx];
if (!sl)
@@ -2193,7 +2503,7 @@ static int is_state_visited(struct verifier_env *env, int insn_idx)
return 0;
while (sl != STATE_LIST_MARK) {
- if (states_equal(&sl->state, &env->cur_state))
+ if (states_equal(env, &sl->state, &env->cur_state))
/* reached equivalent register/stack state,
* prune the search
*/
@@ -2207,7 +2517,7 @@ static int is_state_visited(struct verifier_env *env, int insn_idx)
* it will be rejected. Since there are no loops, we won't be
* seeing this 'insn_idx' instruction again on the way to bpf_exit
*/
- new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER);
+ new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER);
if (!new_sl)
return -ENOMEM;
@@ -2218,11 +2528,20 @@ static int is_state_visited(struct verifier_env *env, int insn_idx)
return 0;
}
-static int do_check(struct verifier_env *env)
+static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx)
+{
+ if (!env->analyzer_ops || !env->analyzer_ops->insn_hook)
+ return 0;
+
+ return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx);
+}
+
+static int do_check(struct bpf_verifier_env *env)
{
- struct verifier_state *state = &env->cur_state;
+ struct bpf_verifier_state *state = &env->cur_state;
struct bpf_insn *insns = env->prog->insnsi;
- struct reg_state *regs = state->regs;
+ struct bpf_reg_state *regs = state->regs;
int insn_cnt = env->prog->len;
int insn_idx, prev_insn_idx = 0;
int insn_processed = 0;
@@ -2230,6 +2549,7 @@ static int do_check(struct verifier_env *env)
init_reg_state(regs);
insn_idx = 0;
+ env->varlen_map_value_access = false;
for (;;) {
struct bpf_insn *insn;
u8 class;
@@ -2276,13 +2596,17 @@ static int do_check(struct verifier_env *env)
print_bpf_insn(insn);
}
+ err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
+ if (err)
+ return err;
+
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
return err;
} else if (class == BPF_LDX) {
- enum bpf_reg_type src_reg_type;
+ enum bpf_reg_type *prev_src_type, src_reg_type;
/* check for reserved fields is already done */
@@ -2306,21 +2630,25 @@ static int do_check(struct verifier_env *env)
if (err)
return err;
- if (BPF_SIZE(insn->code) != BPF_W) {
+ reset_reg_range_values(regs, insn->dst_reg);
+ if (BPF_SIZE(insn->code) != BPF_W &&
+ BPF_SIZE(insn->code) != BPF_DW) {
insn_idx++;
continue;
}
- if (insn->imm == 0) {
+ prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
+
+ if (*prev_src_type == NOT_INIT) {
/* saw a valid insn
* dst_reg = *(u32 *)(src_reg + off)
- * use reserved 'imm' field to mark this insn
+ * save type to validate intersecting paths
*/
- insn->imm = src_reg_type;
+ *prev_src_type = src_reg_type;
- } else if (src_reg_type != insn->imm &&
+ } else if (src_reg_type != *prev_src_type &&
(src_reg_type == PTR_TO_CTX ||
- insn->imm == PTR_TO_CTX)) {
+ *prev_src_type == PTR_TO_CTX)) {
/* ABuser program is trying to use the same insn
* dst_reg = *(u32*) (src_reg + off)
* with different pointer types:
@@ -2333,7 +2661,7 @@ static int do_check(struct verifier_env *env)
}
} else if (class == BPF_STX) {
- enum bpf_reg_type dst_reg_type;
+ enum bpf_reg_type *prev_dst_type, dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, insn);
@@ -2361,11 +2689,13 @@ static int do_check(struct verifier_env *env)
if (err)
return err;
- if (insn->imm == 0) {
- insn->imm = dst_reg_type;
- } else if (dst_reg_type != insn->imm &&
+ prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
+
+ if (*prev_dst_type == NOT_INIT) {
+ *prev_dst_type = dst_reg_type;
+ } else if (dst_reg_type != *prev_dst_type &&
(dst_reg_type == PTR_TO_CTX ||
- insn->imm == PTR_TO_CTX)) {
+ *prev_dst_type == PTR_TO_CTX)) {
verbose("same insn cannot be used with different pointers\n");
return -EINVAL;
}
@@ -2471,6 +2801,7 @@ process_bpf_exit:
verbose("invalid BPF_LD mode\n");
return -EINVAL;
}
+ reset_reg_range_values(regs, insn->dst_reg);
} else {
verbose("unknown insn class %d\n", class);
return -EINVAL;
@@ -2483,14 +2814,28 @@ process_bpf_exit:
return 0;
}
+static int check_map_prog_compatibility(struct bpf_map *map,
+ struct bpf_prog *prog)
+
+{
+ if (prog->type == BPF_PROG_TYPE_PERF_EVENT &&
+ (map->map_type == BPF_MAP_TYPE_HASH ||
+ map->map_type == BPF_MAP_TYPE_PERCPU_HASH) &&
+ (map->map_flags & BPF_F_NO_PREALLOC)) {
+ verbose("perf_event programs can only use preallocated hash map\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/* look for pseudo eBPF instructions that access map FDs and
* replace them with actual map pointers
*/
-static int replace_map_fd_with_map_ptr(struct verifier_env *env)
+static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
- int i, j;
+ int i, j, err;
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
@@ -2534,6 +2879,12 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
return PTR_ERR(map);
}
+ err = check_map_prog_compatibility(map, env->prog);
+ if (err) {
+ fdput(f);
+ return err;
+ }
+
/* store map pointer inside BPF_LD_IMM64 instruction */
insn[0].imm = (u32) (unsigned long) map;
insn[1].imm = ((u64) (unsigned long) map) >> 32;
@@ -2577,7 +2928,7 @@ next_insn:
}
/* drop refcnt of maps used by the rejected program */
-static void release_maps(struct verifier_env *env)
+static void release_maps(struct bpf_verifier_env *env)
{
int i;
@@ -2586,7 +2937,7 @@ static void release_maps(struct verifier_env *env)
}
/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
-static void convert_pseudo_ld_imm64(struct verifier_env *env)
+static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
@@ -2600,62 +2951,74 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env)
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
-static int convert_ctx_accesses(struct verifier_env *env)
+static int convert_ctx_accesses(struct bpf_verifier_env *env)
{
- struct bpf_insn *insn = env->prog->insnsi;
- int insn_cnt = env->prog->len;
- struct bpf_insn insn_buf[16];
+ const struct bpf_verifier_ops *ops = env->prog->aux->ops;
+ const int insn_cnt = env->prog->len;
+ struct bpf_insn insn_buf[16], *insn;
struct bpf_prog *new_prog;
enum bpf_access_type type;
- int i;
+ int i, cnt, delta = 0;
- if (!env->prog->aux->ops->convert_ctx_access)
+ if (ops->gen_prologue) {
+ cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
+ env->prog);
+ if (cnt >= ARRAY_SIZE(insn_buf)) {
+ verbose("bpf verifier is misconfigured\n");
+ return -EINVAL;
+ } else if (cnt) {
+ new_prog = bpf_patch_insn_single(env->prog, 0,
+ insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+ env->prog = new_prog;
+ delta += cnt - 1;
+ }
+ }
+
+ if (!ops->convert_ctx_access)
return 0;
- for (i = 0; i < insn_cnt; i++, insn++) {
- u32 insn_delta, cnt;
+ insn = env->prog->insnsi + delta;
- if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
+ for (i = 0; i < insn_cnt; i++, insn++) {
+ if (insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
+ insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
type = BPF_READ;
- else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
+ else if (insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
+ insn->code == (BPF_STX | BPF_MEM | BPF_DW))
type = BPF_WRITE;
else
continue;
- if (insn->imm != PTR_TO_CTX) {
- /* clear internal mark */
- insn->imm = 0;
+ if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX)
continue;
- }
- cnt = env->prog->aux->ops->
- convert_ctx_access(type, insn->dst_reg, insn->src_reg,
- insn->off, insn_buf, env->prog);
+ cnt = ops->convert_ctx_access(type, insn->dst_reg, insn->src_reg,
+ insn->off, insn_buf, env->prog);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose("bpf verifier is misconfigured\n");
return -EINVAL;
}
- new_prog = bpf_patch_insn_single(env->prog, i, insn_buf, cnt);
+ new_prog = bpf_patch_insn_single(env->prog, i + delta, insn_buf,
+ cnt);
if (!new_prog)
return -ENOMEM;
- insn_delta = cnt - 1;
+ delta += cnt - 1;
/* keep walking new program and skip insns we just inserted */
env->prog = new_prog;
- insn = new_prog->insnsi + i + insn_delta;
-
- insn_cnt += insn_delta;
- i += insn_delta;
+ insn = new_prog->insnsi + i + delta;
}
return 0;
}
-static void free_states(struct verifier_env *env)
+static void free_states(struct bpf_verifier_env *env)
{
- struct verifier_state_list *sl, *sln;
+ struct bpf_verifier_state_list *sl, *sln;
int i;
if (!env->explored_states)
@@ -2678,19 +3041,24 @@ static void free_states(struct verifier_env *env)
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
{
char __user *log_ubuf = NULL;
- struct verifier_env *env;
+ struct bpf_verifier_env *env;
int ret = -EINVAL;
if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS)
return -E2BIG;
- /* 'struct verifier_env' can be global, but since it's not small,
+ /* 'struct bpf_verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
- env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL);
+ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
+ env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
+ (*prog)->len);
+ ret = -ENOMEM;
+ if (!env->insn_aux_data)
+ goto err_free_env;
env->prog = *prog;
/* grab the mutex to protect few globals used by verifier */
@@ -2709,12 +3077,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
/* log_* values have to be sane */
if (log_size < 128 || log_size > UINT_MAX >> 8 ||
log_level == 0 || log_ubuf == NULL)
- goto free_env;
+ goto err_unlock;
ret = -ENOMEM;
log_buf = vmalloc(log_size);
if (!log_buf)
- goto free_env;
+ goto err_unlock;
} else {
log_level = 0;
}
@@ -2724,7 +3092,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
goto skip_full_check;
env->explored_states = kcalloc(env->prog->len,
- sizeof(struct verifier_state_list *),
+ sizeof(struct bpf_verifier_state_list *),
GFP_USER);
ret = -ENOMEM;
if (!env->explored_states)
@@ -2783,14 +3151,67 @@ skip_full_check:
free_log_buf:
if (log_level)
vfree(log_buf);
-free_env:
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
* them now. Otherwise free_bpf_prog_info() will release them.
*/
release_maps(env);
*prog = env->prog;
+err_unlock:
+ mutex_unlock(&bpf_verifier_lock);
+ vfree(env->insn_aux_data);
+err_free_env:
kfree(env);
+ return ret;
+}
+
+int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
+ void *priv)
+{
+ struct bpf_verifier_env *env;
+ int ret;
+
+ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+
+ env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
+ prog->len);
+ ret = -ENOMEM;
+ if (!env->insn_aux_data)
+ goto err_free_env;
+ env->prog = prog;
+ env->analyzer_ops = ops;
+ env->analyzer_priv = priv;
+
+ /* grab the mutex to protect few globals used by verifier */
+ mutex_lock(&bpf_verifier_lock);
+
+ log_level = 0;
+
+ env->explored_states = kcalloc(env->prog->len,
+ sizeof(struct bpf_verifier_state_list *),
+ GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!env->explored_states)
+ goto skip_full_check;
+
+ ret = check_cfg(env);
+ if (ret < 0)
+ goto skip_full_check;
+
+ env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
+
+ ret = do_check(env);
+
+skip_full_check:
+ while (pop_stack(env, NULL) >= 0);
+ free_states(env);
+
mutex_unlock(&bpf_verifier_lock);
+ vfree(env->insn_aux_data);
+err_free_env:
+ kfree(env);
return ret;
}
+EXPORT_SYMBOL_GPL(bpf_analyzer);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9ba28310eab6..44066158f0d1 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -6328,6 +6328,16 @@ void cgroup_sk_free(struct sock_cgroup_data *skcd)
/* cgroup namespaces */
+static struct ucounts *inc_cgroup_namespaces(struct user_namespace *ns)
+{
+ return inc_ucount(ns, current_euid(), UCOUNT_CGROUP_NAMESPACES);
+}
+
+static void dec_cgroup_namespaces(struct ucounts *ucounts)
+{
+ dec_ucount(ucounts, UCOUNT_CGROUP_NAMESPACES);
+}
+
static struct cgroup_namespace *alloc_cgroup_ns(void)
{
struct cgroup_namespace *new_ns;
@@ -6349,6 +6359,7 @@ static struct cgroup_namespace *alloc_cgroup_ns(void)
void free_cgroup_ns(struct cgroup_namespace *ns)
{
put_css_set(ns->root_cset);
+ dec_cgroup_namespaces(ns->ucounts);
put_user_ns(ns->user_ns);
ns_free_inum(&ns->ns);
kfree(ns);
@@ -6360,6 +6371,7 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
struct cgroup_namespace *old_ns)
{
struct cgroup_namespace *new_ns;
+ struct ucounts *ucounts;
struct css_set *cset;
BUG_ON(!old_ns);
@@ -6373,6 +6385,10 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
if (!ns_capable(user_ns, CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
+ ucounts = inc_cgroup_namespaces(user_ns);
+ if (!ucounts)
+ return ERR_PTR(-ENOSPC);
+
/* It is not safe to take cgroup_mutex here */
spin_lock_irq(&css_set_lock);
cset = task_css_set(current);
@@ -6382,10 +6398,12 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
new_ns = alloc_cgroup_ns();
if (IS_ERR(new_ns)) {
put_css_set(cset);
+ dec_cgroup_namespaces(ucounts);
return new_ns;
}
new_ns->user_ns = get_user_ns(user_ns);
+ new_ns->ucounts = ucounts;
new_ns->root_cset = cset;
return new_ns;
@@ -6436,12 +6454,18 @@ static void cgroupns_put(struct ns_common *ns)
put_cgroup_ns(to_cg_ns(ns));
}
+static struct user_namespace *cgroupns_owner(struct ns_common *ns)
+{
+ return to_cg_ns(ns)->user_ns;
+}
+
const struct proc_ns_operations cgroupns_operations = {
.name = "cgroup",
.type = CLONE_NEWCGROUP,
.get = cgroupns_get,
.put = cgroupns_put,
.install = cgroupns_install,
+ .owner = cgroupns_owner,
};
static __init int cgroup_namespaces_init(void)
diff --git a/arch/x86/configs/kvm_guest.config b/kernel/configs/kvm_guest.config
index 9906505c998a..8d9643767142 100644
--- a/arch/x86/configs/kvm_guest.config
+++ b/kernel/configs/kvm_guest.config
@@ -29,3 +29,4 @@ CONFIG_NET_9P_VIRTIO=y
CONFIG_SCSI_LOWLEVEL=y
CONFIG_SCSI_VIRTIO=y
CONFIG_VIRTIO_INPUT=y
+CONFIG_DRM_VIRTIO_GPU=y
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7c0d263f6bc5..c6e47e97b33f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7079,7 +7079,7 @@ static int __perf_event_overflow(struct perf_event *event,
irq_work_queue(&event->pending);
}
- event->overflow_handler(event, data, regs);
+ READ_ONCE(event->overflow_handler)(event, data, regs);
if (*perf_event_fasync(event) && event->pending_kill) {
event->pending_wakeup = 1;
@@ -7694,11 +7694,83 @@ static void perf_event_free_filter(struct perf_event *event)
ftrace_profile_free_filter(event);
}
+#ifdef CONFIG_BPF_SYSCALL
+static void bpf_overflow_handler(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct bpf_perf_event_data_kern ctx = {
+ .data = data,
+ .regs = regs,
+ };
+ int ret = 0;
+
+ preempt_disable();
+ if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
+ goto out;
+ rcu_read_lock();
+ ret = BPF_PROG_RUN(event->prog, (void *)&ctx);
+ rcu_read_unlock();
+out:
+ __this_cpu_dec(bpf_prog_active);
+ preempt_enable();
+ if (!ret)
+ return;
+
+ event->orig_overflow_handler(event, data, regs);
+}
+
+static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
+{
+ struct bpf_prog *prog;
+
+ if (event->overflow_handler_context)
+ /* hw breakpoint or kernel counter */
+ return -EINVAL;
+
+ if (event->prog)
+ return -EEXIST;
+
+ prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ event->prog = prog;
+ event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
+ WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
+ return 0;
+}
+
+static void perf_event_free_bpf_handler(struct perf_event *event)
+{
+ struct bpf_prog *prog = event->prog;
+
+ if (!prog)
+ return;
+
+ WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
+ event->prog = NULL;
+ bpf_prog_put(prog);
+}
+#else
+static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
+{
+ return -EOPNOTSUPP;
+}
+static void perf_event_free_bpf_handler(struct perf_event *event)
+{
+}
+#endif
+
static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
{
bool is_kprobe, is_tracepoint;
struct bpf_prog *prog;
+ if (event->attr.type == PERF_TYPE_HARDWARE ||
+ event->attr.type == PERF_TYPE_SOFTWARE)
+ return perf_event_set_bpf_handler(event, prog_fd);
+
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -EINVAL;
@@ -7739,6 +7811,8 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
{
struct bpf_prog *prog;
+ perf_event_free_bpf_handler(event);
+
if (!event->tp_event)
return;
@@ -9055,6 +9129,19 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (!overflow_handler && parent_event) {
overflow_handler = parent_event->overflow_handler;
context = parent_event->overflow_handler_context;
+#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
+ if (overflow_handler == bpf_overflow_handler) {
+ struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
+
+ if (IS_ERR(prog)) {
+ err = PTR_ERR(prog);
+ goto err_ns;
+ }
+ event->prog = prog;
+ event->orig_overflow_handler =
+ parent_event->orig_overflow_handler;
+ }
+#endif
}
if (overflow_handler) {
diff --git a/kernel/exit.c b/kernel/exit.c
index 1e1d913914c0..9d68c45ebbe3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -511,7 +511,7 @@ static void exit_mm(struct task_struct *tsk)
mm_update_next_owner(mm);
mmput(mm);
if (test_thread_flag(TIF_MEMDIE))
- exit_oom_victim(tsk);
+ exit_oom_victim();
}
static struct task_struct *find_alive_thread(struct task_struct *p)
diff --git a/kernel/fork.c b/kernel/fork.c
index c060c7e7c247..6d42242485cb 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -359,6 +359,12 @@ static inline void free_signal_struct(struct signal_struct *sig)
{
taskstats_tgid_free(sig);
sched_autogroup_exit(sig);
+ /*
+ * __mmdrop is not safe to call from softirq context on x86 due to
+ * pgd_dtor so postpone it to the async context
+ */
+ if (sig->oom_mm)
+ mmdrop_async(sig->oom_mm);
kmem_cache_free(signal_cachep, sig);
}
@@ -418,6 +424,7 @@ int arch_task_struct_size __read_mostly;
void __init fork_init(void)
{
+ int i;
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
@@ -437,6 +444,10 @@ void __init fork_init(void)
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
init_task.signal->rlim[RLIMIT_SIGPENDING] =
init_task.signal->rlim[RLIMIT_NPROC];
+
+ for (i = 0; i < UCOUNT_COUNTS; i++) {
+ init_user_ns.ucount_max[i] = max_threads/2;
+ }
}
int __weak arch_dup_task_struct(struct task_struct *dst,
@@ -843,6 +854,7 @@ static inline void __mmput(struct mm_struct *mm)
ksm_exit(mm);
khugepaged_exit(mm); /* must run before exit_mmap */
exit_mmap(mm);
+ mm_put_huge_zero_page(mm);
set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
@@ -851,6 +863,7 @@ static inline void __mmput(struct mm_struct *mm)
}
if (mm->binfmt)
module_put(mm->binfmt->module);
+ set_bit(MMF_OOM_SKIP, &mm->flags);
mmdrop(mm);
}
diff --git a/kernel/groups.c b/kernel/groups.c
index 74d431d25251..2fcadd66a8fd 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -7,55 +7,31 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/user_namespace.h>
+#include <linux/vmalloc.h>
#include <asm/uaccess.h>
struct group_info *groups_alloc(int gidsetsize)
{
- struct group_info *group_info;
- int nblocks;
- int i;
-
- nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
- /* Make sure we always allocate at least one indirect block pointer */
- nblocks = nblocks ? : 1;
- group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
- if (!group_info)
+ struct group_info *gi;
+ unsigned int len;
+
+ len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize;
+ gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY);
+ if (!gi)
+ gi = __vmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_HIGHMEM, PAGE_KERNEL);
+ if (!gi)
return NULL;
- group_info->ngroups = gidsetsize;
- group_info->nblocks = nblocks;
- atomic_set(&group_info->usage, 1);
-
- if (gidsetsize <= NGROUPS_SMALL)
- group_info->blocks[0] = group_info->small_block;
- else {
- for (i = 0; i < nblocks; i++) {
- kgid_t *b;
- b = (void *)__get_free_page(GFP_USER);
- if (!b)
- goto out_undo_partial_alloc;
- group_info->blocks[i] = b;
- }
- }
- return group_info;
-out_undo_partial_alloc:
- while (--i >= 0) {
- free_page((unsigned long)group_info->blocks[i]);
- }
- kfree(group_info);
- return NULL;
+ atomic_set(&gi->usage, 1);
+ gi->ngroups = gidsetsize;
+ return gi;
}
EXPORT_SYMBOL(groups_alloc);
void groups_free(struct group_info *group_info)
{
- if (group_info->blocks[0] != group_info->small_block) {
- int i;
- for (i = 0; i < group_info->nblocks; i++)
- free_page((unsigned long)group_info->blocks[i]);
- }
- kfree(group_info);
+ kvfree(group_info);
}
EXPORT_SYMBOL(groups_free);
@@ -70,7 +46,7 @@ static int groups_to_user(gid_t __user *grouplist,
for (i = 0; i < count; i++) {
gid_t gid;
- gid = from_kgid_munged(user_ns, GROUP_AT(group_info, i));
+ gid = from_kgid_munged(user_ns, group_info->gid[i]);
if (put_user(gid, grouplist+i))
return -EFAULT;
}
@@ -95,7 +71,7 @@ static int groups_from_user(struct group_info *group_info,
if (!gid_valid(kgid))
return -EINVAL;
- GROUP_AT(group_info, i) = kgid;
+ group_info->gid[i] = kgid;
}
return 0;
}
@@ -115,15 +91,14 @@ static void groups_sort(struct group_info *group_info)
for (base = 0; base < max; base++) {
int left = base;
int right = left + stride;
- kgid_t tmp = GROUP_AT(group_info, right);
+ kgid_t tmp = group_info->gid[right];
- while (left >= 0 && gid_gt(GROUP_AT(group_info, left), tmp)) {
- GROUP_AT(group_info, right) =
- GROUP_AT(group_info, left);
+ while (left >= 0 && gid_gt(group_info->gid[left], tmp)) {
+ group_info->gid[right] = group_info->gid[left];
right = left;
left -= stride;
}
- GROUP_AT(group_info, right) = tmp;
+ group_info->gid[right] = tmp;
}
stride /= 3;
}
@@ -141,9 +116,9 @@ int groups_search(const struct group_info *group_info, kgid_t grp)
right = group_info->ngroups;
while (left < right) {
unsigned int mid = (left+right)/2;
- if (gid_gt(grp, GROUP_AT(group_info, mid)))
+ if (gid_gt(grp, group_info->gid[mid]))
left = mid + 1;
- else if (gid_lt(grp, GROUP_AT(group_info, mid)))
+ else if (gid_lt(grp, group_info->gid[mid]))
right = mid;
else
return 1;
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 8bbe50704621..af4643873e71 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -274,7 +274,6 @@ static int klp_write_object_relocations(struct module *pmod,
objname = klp_is_module(obj) ? obj->name : "vmlinux";
- module_disable_ro(pmod);
/* For each klp relocation section */
for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
sec = pmod->klp_info->sechdrs + i;
@@ -309,7 +308,6 @@ static int klp_write_object_relocations(struct module *pmod,
break;
}
- module_enable_ro(pmod, true);
return ret;
}
@@ -547,9 +545,6 @@ static int __klp_enable_patch(struct klp_patch *patch)
list_prev_entry(patch, list)->state == KLP_DISABLED)
return -EBUSY;
- pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
- add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
-
pr_notice("enabling patch '%s'\n", patch->mod->name);
klp_for_each_object(patch, obj) {
@@ -763,6 +758,12 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
func->old_sympos ? func->old_sympos : 1);
}
+/* Arches may override this to finish any remaining arch-specific tasks */
+void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
+ struct klp_object *obj)
+{
+}
+
/* parts of the initialization that is done only when the object is loaded */
static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_object *obj)
@@ -770,9 +771,15 @@ static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_func *func;
int ret;
+ module_disable_ro(patch->mod);
ret = klp_write_object_relocations(patch->mod, obj);
- if (ret)
+ if (ret) {
+ module_enable_ro(patch->mod, true);
return ret;
+ }
+
+ arch_klp_init_object_loaded(patch, obj);
+ module_enable_ro(patch->mod, true);
klp_for_each_func(obj, func) {
ret = klp_find_object_symbol(obj->name, func->old_name,
diff --git a/kernel/module.c b/kernel/module.c
index 529efae9f481..f57dd63186e6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1149,6 +1149,8 @@ static size_t module_flags_taint(struct module *mod, char *buf)
buf[l++] = 'C';
if (mod->taints & (1 << TAINT_UNSIGNED_MODULE))
buf[l++] = 'E';
+ if (mod->taints & (1 << TAINT_LIVEPATCH))
+ buf[l++] = 'K';
/*
* TAINT_FORCED_RMMOD: could be added.
* TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
@@ -2792,14 +2794,17 @@ static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned l
}
#ifdef CONFIG_LIVEPATCH
-static int find_livepatch_modinfo(struct module *mod, struct load_info *info)
+static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
{
- mod->klp = get_modinfo(info, "livepatch") ? true : false;
+ if (get_modinfo(info, "livepatch")) {
+ mod->klp = true;
+ add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
+ }
return 0;
}
#else /* !CONFIG_LIVEPATCH */
-static int find_livepatch_modinfo(struct module *mod, struct load_info *info)
+static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
{
if (get_modinfo(info, "livepatch")) {
pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
@@ -2969,7 +2974,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
"is unknown, you have been warned.\n", mod->name);
}
- err = find_livepatch_modinfo(mod, info);
+ err = check_modinfo_livepatch(mod, info);
if (err)
return err;
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index a65ba137fd15..df9e8e9e0be7 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -79,23 +79,36 @@ static void proc_cleanup_work(struct work_struct *work)
/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
#define MAX_PID_NS_LEVEL 32
+static struct ucounts *inc_pid_namespaces(struct user_namespace *ns)
+{
+ return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES);
+}
+
+static void dec_pid_namespaces(struct ucounts *ucounts)
+{
+ dec_ucount(ucounts, UCOUNT_PID_NAMESPACES);
+}
+
static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns,
struct pid_namespace *parent_pid_ns)
{
struct pid_namespace *ns;
unsigned int level = parent_pid_ns->level + 1;
+ struct ucounts *ucounts;
int i;
int err;
- if (level > MAX_PID_NS_LEVEL) {
- err = -EINVAL;
+ err = -ENOSPC;
+ if (level > MAX_PID_NS_LEVEL)
+ goto out;
+ ucounts = inc_pid_namespaces(user_ns);
+ if (!ucounts)
goto out;
- }
err = -ENOMEM;
ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
if (ns == NULL)
- goto out;
+ goto out_dec;
ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!ns->pidmap[0].page)
@@ -114,6 +127,7 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
ns->level = level;
ns->parent = get_pid_ns(parent_pid_ns);
ns->user_ns = get_user_ns(user_ns);
+ ns->ucounts = ucounts;
ns->nr_hashed = PIDNS_HASH_ADDING;
INIT_WORK(&ns->proc_work, proc_cleanup_work);
@@ -129,6 +143,8 @@ out_free_map:
kfree(ns->pidmap[0].page);
out_free:
kmem_cache_free(pid_ns_cachep, ns);
+out_dec:
+ dec_pid_namespaces(ucounts);
out:
return ERR_PTR(err);
}
@@ -146,6 +162,7 @@ static void destroy_pid_namespace(struct pid_namespace *ns)
ns_free_inum(&ns->ns);
for (i = 0; i < PIDMAP_ENTRIES; i++)
kfree(ns->pidmap[i].page);
+ dec_pid_namespaces(ns->ucounts);
put_user_ns(ns->user_ns);
call_rcu(&ns->rcu, delayed_free_pidns);
}
@@ -388,12 +405,37 @@ static int pidns_install(struct nsproxy *nsproxy, struct ns_common *ns)
return 0;
}
+static struct ns_common *pidns_get_parent(struct ns_common *ns)
+{
+ struct pid_namespace *active = task_active_pid_ns(current);
+ struct pid_namespace *pid_ns, *p;
+
+ /* See if the parent is in the current namespace */
+ pid_ns = p = to_pid_ns(ns)->parent;
+ for (;;) {
+ if (!p)
+ return ERR_PTR(-EPERM);
+ if (p == active)
+ break;
+ p = p->parent;
+ }
+
+ return &get_pid_ns(pid_ns)->ns;
+}
+
+static struct user_namespace *pidns_owner(struct ns_common *ns)
+{
+ return to_pid_ns(ns)->user_ns;
+}
+
const struct proc_ns_operations pidns_operations = {
.name = "pid",
.type = CLONE_NEWPID,
.get = pidns_get,
.put = pidns_put,
.install = pidns_install,
+ .owner = pidns_owner,
+ .get_parent = pidns_get_parent,
};
static __init int pid_namespaces_init(void)
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 8f27d5a8adf6..2fba066e125f 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -144,23 +144,12 @@ int freeze_processes(void)
/*
* Now that the whole userspace is frozen we need to disbale
* the OOM killer to disallow any further interference with
- * killable tasks.
+ * killable tasks. There is no guarantee oom victims will
+ * ever reach a point they go away we have to wait with a timeout.
*/
- if (!error && !oom_killer_disable())
+ if (!error && !oom_killer_disable(msecs_to_jiffies(freeze_timeout_msecs)))
error = -EBUSY;
- /*
- * There is a hard to fix race between oom_reaper kernel thread
- * and oom_killer_disable. oom_reaper calls exit_oom_victim
- * before the victim reaches exit_mm so try to freeze all the tasks
- * again and catch such a left over task.
- */
- if (!error) {
- pr_info("Double checking all user space processes after OOM killer disable... ");
- error = try_to_freeze_tasks(true);
- pr_cont("\n");
- }
-
if (error)
thaw_processes();
return error;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index eea6dbc2d8cf..d5e397315473 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -253,6 +253,17 @@ static int preferred_console = -1;
int console_set_on_cmdline;
EXPORT_SYMBOL(console_set_on_cmdline);
+#ifdef CONFIG_OF
+static bool of_specified_console;
+
+void console_set_by_of(void)
+{
+ of_specified_console = true;
+}
+#else
+# define of_specified_console false
+#endif
+
/* Flag: console code may call schedule() */
static int console_may_schedule;
@@ -655,11 +666,8 @@ static ssize_t msg_print_ext_header(char *buf, size_t size,
* better readable output. 'c' in the record flags mark the first
* fragment of a line, '+' the following.
*/
- if (msg->flags & LOG_CONT && !(prev_flags & LOG_CONT))
- cont = 'c';
- else if ((msg->flags & LOG_CONT) ||
- ((prev_flags & LOG_CONT) && !(msg->flags & LOG_PREFIX)))
- cont = '+';
+ if (msg->flags & LOG_CONT)
+ cont = (prev_flags & LOG_CONT) ? '+' : 'c';
return scnprintf(buf, size, "%u,%llu,%llu,%c;",
(msg->facility << 3) | msg->level, seq, ts_usec, cont);
@@ -786,6 +794,8 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
return ret;
}
+static void cont_flush(void);
+
static ssize_t devkmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -801,6 +811,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
if (ret)
return ret;
raw_spin_lock_irq(&logbuf_lock);
+ cont_flush();
while (user->seq == log_next_seq) {
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
@@ -863,6 +874,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
return -ESPIPE;
raw_spin_lock_irq(&logbuf_lock);
+ cont_flush();
switch (whence) {
case SEEK_SET:
/* the first record */
@@ -901,6 +913,7 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
poll_wait(file, &log_wait, wait);
raw_spin_lock_irq(&logbuf_lock);
+ cont_flush();
if (user->seq < log_next_seq) {
/* return error when data has vanished underneath us */
if (user->seq < log_first_seq)
@@ -1287,6 +1300,7 @@ static int syslog_print(char __user *buf, int size)
size_t skip;
raw_spin_lock_irq(&logbuf_lock);
+ cont_flush();
if (syslog_seq < log_first_seq) {
/* messages are gone, move to first one */
syslog_seq = log_first_seq;
@@ -1346,6 +1360,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
return -ENOMEM;
raw_spin_lock_irq(&logbuf_lock);
+ cont_flush();
if (buf) {
u64 next_seq;
u64 seq;
@@ -1507,6 +1522,7 @@ int do_syslog(int type, char __user *buf, int len, int source)
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
raw_spin_lock_irq(&logbuf_lock);
+ cont_flush();
if (syslog_seq < log_first_seq) {
/* messages are gone, move to first one */
syslog_seq = log_first_seq;
@@ -1643,35 +1659,33 @@ static struct cont {
bool flushed:1; /* buffer sealed and committed */
} cont;
-static void cont_flush(enum log_flags flags)
+static void cont_flush(void)
{
if (cont.flushed)
return;
if (cont.len == 0)
return;
-
if (cont.cons) {
/*
* If a fragment of this line was directly flushed to the
* console; wait for the console to pick up the rest of the
* line. LOG_NOCONS suppresses a duplicated output.
*/
- log_store(cont.facility, cont.level, flags | LOG_NOCONS,
+ log_store(cont.facility, cont.level, cont.flags | LOG_NOCONS,
cont.ts_nsec, NULL, 0, cont.buf, cont.len);
- cont.flags = flags;
cont.flushed = true;
} else {
/*
* If no fragment of this line ever reached the console,
* just submit it to the store and free the buffer.
*/
- log_store(cont.facility, cont.level, flags, 0,
+ log_store(cont.facility, cont.level, cont.flags, 0,
NULL, 0, cont.buf, cont.len);
cont.len = 0;
}
}
-static bool cont_add(int facility, int level, const char *text, size_t len)
+static bool cont_add(int facility, int level, enum log_flags flags, const char *text, size_t len)
{
if (cont.len && cont.flushed)
return false;
@@ -1682,7 +1696,7 @@ static bool cont_add(int facility, int level, const char *text, size_t len)
* the line gets too long, split it up in separate records.
*/
if (nr_ext_console_drivers || cont.len + len > sizeof(cont.buf)) {
- cont_flush(LOG_CONT);
+ cont_flush();
return false;
}
@@ -1691,7 +1705,7 @@ static bool cont_add(int facility, int level, const char *text, size_t len)
cont.level = level;
cont.owner = current;
cont.ts_nsec = local_clock();
- cont.flags = 0;
+ cont.flags = flags;
cont.cons = 0;
cont.flushed = false;
}
@@ -1699,8 +1713,15 @@ static bool cont_add(int facility, int level, const char *text, size_t len)
memcpy(cont.buf + cont.len, text, len);
cont.len += len;
+ // The original flags come from the first line,
+ // but later continuations can add a newline.
+ if (flags & LOG_NEWLINE) {
+ cont.flags |= LOG_NEWLINE;
+ cont_flush();
+ }
+
if (cont.len > (sizeof(cont.buf) * 80) / 100)
- cont_flush(LOG_CONT);
+ cont_flush();
return true;
}
@@ -1733,6 +1754,31 @@ static size_t cont_print_text(char *text, size_t size)
return textlen;
}
+static size_t log_output(int facility, int level, enum log_flags lflags, const char *dict, size_t dictlen, char *text, size_t text_len)
+{
+ /*
+ * If an earlier line was buffered, and we're a continuation
+ * write from the same process, try to add it to the buffer.
+ */
+ if (cont.len) {
+ if (cont.owner == current && (lflags & LOG_CONT)) {
+ if (cont_add(facility, level, lflags, text, text_len))
+ return text_len;
+ }
+ /* Otherwise, make sure it's flushed */
+ cont_flush();
+ }
+
+ /* If it doesn't end in a newline, try to buffer the current line */
+ if (!(lflags & LOG_NEWLINE)) {
+ if (cont_add(facility, level, lflags, text, text_len))
+ return text_len;
+ }
+
+ /* Store it in the record log */
+ return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len);
+}
+
asmlinkage int vprintk_emit(int facility, int level,
const char *dict, size_t dictlen,
const char *fmt, va_list args)
@@ -1819,10 +1865,9 @@ asmlinkage int vprintk_emit(int facility, int level,
/* strip kernel syslog prefix and extract log level or control flags */
if (facility == 0) {
- int kern_level = printk_get_level(text);
+ int kern_level;
- if (kern_level) {
- const char *end_of_header = printk_skip_level(text);
+ while ((kern_level = printk_get_level(text)) != 0) {
switch (kern_level) {
case '0' ... '7':
if (level == LOGLEVEL_DEFAULT)
@@ -1830,14 +1875,13 @@ asmlinkage int vprintk_emit(int facility, int level,
/* fallthrough */
case 'd': /* KERN_DEFAULT */
lflags |= LOG_PREFIX;
+ break;
+ case 'c': /* KERN_CONT */
+ lflags |= LOG_CONT;
}
- /*
- * No need to check length here because vscnprintf
- * put '\0' at the end of the string. Only valid and
- * newly printed level is detected.
- */
- text_len -= end_of_header - text;
- text = (char *)end_of_header;
+
+ text_len -= 2;
+ text += 2;
}
}
@@ -1847,45 +1891,7 @@ asmlinkage int vprintk_emit(int facility, int level,
if (dict)
lflags |= LOG_PREFIX|LOG_NEWLINE;
- if (!(lflags & LOG_NEWLINE)) {
- /*
- * Flush the conflicting buffer. An earlier newline was missing,
- * or another task also prints continuation lines.
- */
- if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
- cont_flush(LOG_NEWLINE);
-
- /* buffer line if possible, otherwise store it right away */
- if (cont_add(facility, level, text, text_len))
- printed_len += text_len;
- else
- printed_len += log_store(facility, level,
- lflags | LOG_CONT, 0,
- dict, dictlen, text, text_len);
- } else {
- bool stored = false;
-
- /*
- * If an earlier newline was missing and it was the same task,
- * either merge it with the current buffer and flush, or if
- * there was a race with interrupts (prefix == true) then just
- * flush it out and store this line separately.
- * If the preceding printk was from a different task and missed
- * a newline, flush and append the newline.
- */
- if (cont.len) {
- if (cont.owner == current && !(lflags & LOG_PREFIX))
- stored = cont_add(facility, level, text,
- text_len);
- cont_flush(LOG_NEWLINE);
- }
-
- if (stored)
- printed_len += text_len;
- else
- printed_len += log_store(facility, level, lflags, 0,
- dict, dictlen, text, text_len);
- }
+ printed_len += log_output(facility, level, lflags, dict, dictlen, text, text_len);
logbuf_cpu = UINT_MAX;
raw_spin_unlock(&logbuf_lock);
@@ -2647,7 +2653,7 @@ void register_console(struct console *newcon)
* didn't select a console we take the first one
* that registers here.
*/
- if (preferred_console < 0) {
+ if (preferred_console < 0 && !of_specified_console) {
if (newcon->index < 0)
newcon->index = 0;
if (newcon->setup == NULL ||
@@ -3029,6 +3035,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
dumper->active = true;
raw_spin_lock_irqsave(&logbuf_lock, flags);
+ cont_flush();
dumper->cur_seq = clear_seq;
dumper->cur_idx = clear_idx;
dumper->next_seq = log_next_seq;
@@ -3119,6 +3126,7 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
bool ret;
raw_spin_lock_irqsave(&logbuf_lock, flags);
+ cont_flush();
ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
@@ -3161,6 +3169,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
goto out;
raw_spin_lock_irqsave(&logbuf_lock, flags);
+ cont_flush();
if (dumper->cur_seq < log_first_seq) {
/* messages are gone, move to first available one */
dumper->cur_seq = log_first_seq;
diff --git a/kernel/relay.c b/kernel/relay.c
index fc9b4a4af463..9988f5cc2d46 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1108,51 +1108,23 @@ static size_t relay_file_read_end_pos(struct rchan_buf *buf,
return end_pos;
}
-/*
- * subbuf_read_actor - read up to one subbuf's worth of data
- */
-static int subbuf_read_actor(size_t read_start,
- struct rchan_buf *buf,
- size_t avail,
- read_descriptor_t *desc)
-{
- void *from;
- int ret = 0;
-
- from = buf->start + read_start;
- ret = avail;
- if (copy_to_user(desc->arg.buf, from, avail)) {
- desc->error = -EFAULT;
- ret = 0;
- }
- desc->arg.data += ret;
- desc->written += ret;
- desc->count -= ret;
-
- return ret;
-}
-
-typedef int (*subbuf_actor_t) (size_t read_start,
- struct rchan_buf *buf,
- size_t avail,
- read_descriptor_t *desc);
-
-/*
- * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries
- */
-static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
- subbuf_actor_t subbuf_actor,
- read_descriptor_t *desc)
+static ssize_t relay_file_read(struct file *filp,
+ char __user *buffer,
+ size_t count,
+ loff_t *ppos)
{
struct rchan_buf *buf = filp->private_data;
size_t read_start, avail;
+ size_t written = 0;
int ret;
- if (!desc->count)
+ if (!count)
return 0;
inode_lock(file_inode(filp));
do {
+ void *from;
+
if (!relay_file_read_avail(buf, *ppos))
break;
@@ -1161,32 +1133,22 @@ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
if (!avail)
break;
- avail = min(desc->count, avail);
- ret = subbuf_actor(read_start, buf, avail, desc);
- if (desc->error < 0)
+ avail = min(count, avail);
+ from = buf->start + read_start;
+ ret = avail;
+ if (copy_to_user(buffer, from, avail))
break;
- if (ret) {
- relay_file_read_consume(buf, read_start, ret);
- *ppos = relay_file_read_end_pos(buf, read_start, ret);
- }
- } while (desc->count && ret);
- inode_unlock(file_inode(filp));
+ buffer += ret;
+ written += ret;
+ count -= ret;
- return desc->written;
-}
+ relay_file_read_consume(buf, read_start, ret);
+ *ppos = relay_file_read_end_pos(buf, read_start, ret);
+ } while (count);
+ inode_unlock(file_inode(filp));
-static ssize_t relay_file_read(struct file *filp,
- char __user *buffer,
- size_t count,
- loff_t *ppos)
-{
- read_descriptor_t desc;
- desc.written = 0;
- desc.count = count;
- desc.arg.buf = buffer;
- desc.error = 0;
- return relay_file_read_subbufs(filp, ppos, subbuf_read_actor, &desc);
+ return written;
}
static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 9fb873cfc75c..1d8718d5300d 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -16,6 +16,9 @@
#include "sched.h"
+/* Linker adds these: start and end of __cpuidle functions */
+extern char __cpuidle_text_start[], __cpuidle_text_end[];
+
/**
* sched_idle_set_state - Record idle state for the current CPU.
* @idle_state: State to record.
@@ -53,7 +56,7 @@ static int __init cpu_idle_nopoll_setup(char *__unused)
__setup("hlt", cpu_idle_nopoll_setup);
#endif
-static inline int cpu_idle_poll(void)
+static noinline int __cpuidle cpu_idle_poll(void)
{
rcu_idle_enter();
trace_cpu_idle_rcuidle(0, smp_processor_id());
@@ -84,7 +87,7 @@ void __weak arch_cpu_idle(void)
*
* To use when the cpuidle framework cannot be used.
*/
-void default_idle_call(void)
+void __cpuidle default_idle_call(void)
{
if (current_clr_polling_and_test()) {
local_irq_enable();
@@ -271,6 +274,12 @@ static void cpu_idle_loop(void)
}
}
+bool cpu_in_idle(unsigned long pc)
+{
+ return pc >= (unsigned long)__cpuidle_text_start &&
+ pc < (unsigned long)__cpuidle_text_end;
+}
+
void cpu_startup_entry(enum cpuhp_state state)
{
/*
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index a13bbdaab47d..a43775c6646c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -65,6 +65,7 @@
#include <linux/sched/sysctl.h>
#include <linux/kexec.h>
#include <linux/bpf.h>
+#include <linux/mount.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -1838,6 +1839,14 @@ static struct ctl_table fs_table[] = {
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
+ {
+ .procname = "mount-max",
+ .data = &sysctl_mount_max,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one,
+ },
{ }
};
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e07fb093f819..37dec7e3db43 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -403,8 +403,11 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base);
- now += clocksource_delta(tkr->read(tkr->clock),
- tkr->cycle_last, tkr->mask);
+ now += timekeeping_delta_to_ns(tkr,
+ clocksource_delta(
+ tkr->read(tkr->clock),
+ tkr->cycle_last,
+ tkr->mask));
} while (read_seqcount_retry(&tkf->seq, seq));
return now;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index ba3326785ca4..2a96b063d659 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -216,6 +216,41 @@ config SCHED_TRACER
This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up.
+config HWLAT_TRACER
+ bool "Tracer to detect hardware latencies (like SMIs)"
+ select GENERIC_TRACER
+ help
+ This tracer, when enabled will create one or more kernel threads,
+ depening on what the cpumask file is set to, which each thread
+ spinning in a loop looking for interruptions caused by
+ something other than the kernel. For example, if a
+ System Management Interrupt (SMI) takes a noticeable amount of
+ time, this tracer will detect it. This is useful for testing
+ if a system is reliable for Real Time tasks.
+
+ Some files are created in the tracing directory when this
+ is enabled:
+
+ hwlat_detector/width - time in usecs for how long to spin for
+ hwlat_detector/window - time in usecs between the start of each
+ iteration
+
+ A kernel thread is created that will spin with interrupts disabled
+ for "width" microseconds in every "widow" cycle. It will not spin
+ for "window - width" microseconds, where the system can
+ continue to operate.
+
+ The output will appear in the trace and trace_pipe files.
+
+ When the tracer is not running, it has no affect on the system,
+ but when it is running, it can cause the system to be
+ periodically non responsive. Do not run this tracer on a
+ production system.
+
+ To enable this tracer, echo in "hwlat" into the current_tracer
+ file. Every time a latency is greater than tracing_thresh, it will
+ be recorded into the ring buffer.
+
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index d0a1617b52b4..992ab9d99f35 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
+obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index b20438fdb029..5dcb99281259 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2016 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -8,6 +9,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
+#include <linux/bpf_perf_event.h>
#include <linux/filter.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
@@ -59,11 +61,9 @@ unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
}
EXPORT_SYMBOL_GPL(trace_call_bpf);
-static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
{
- void *dst = (void *) (long) r1;
- int ret, size = (int) r2;
- void *unsafe_ptr = (void *) (long) r3;
+ int ret;
ret = probe_kernel_read(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
@@ -81,12 +81,9 @@ static const struct bpf_func_proto bpf_probe_read_proto = {
.arg3_type = ARG_ANYTHING,
};
-static u64 bpf_probe_write_user(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
+ u32, size)
{
- void *unsafe_ptr = (void *) (long) r1;
- void *src = (void *) (long) r2;
- int size = (int) r3;
-
/*
* Ensure we're in user context which is safe for the helper to
* run. This helper has no business in a kthread.
@@ -128,9 +125,9 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
* limited trace_printk()
* only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
*/
-static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
+BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
+ u64, arg2, u64, arg3)
{
- char *fmt = (char *) (long) r1;
bool str_seen = false;
int mod[3] = {};
int fmt_cnt = 0;
@@ -176,16 +173,16 @@ static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
switch (fmt_cnt) {
case 1:
- unsafe_addr = r3;
- r3 = (long) buf;
+ unsafe_addr = arg1;
+ arg1 = (long) buf;
break;
case 2:
- unsafe_addr = r4;
- r4 = (long) buf;
+ unsafe_addr = arg2;
+ arg2 = (long) buf;
break;
case 3:
- unsafe_addr = r5;
- r5 = (long) buf;
+ unsafe_addr = arg3;
+ arg3 = (long) buf;
break;
}
buf[0] = 0;
@@ -207,9 +204,9 @@ static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
}
return __trace_printk(1/* fake ip will not be printed */, fmt,
- mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3,
- mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4,
- mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5);
+ mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
+ mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
+ mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
}
static const struct bpf_func_proto bpf_trace_printk_proto = {
@@ -231,9 +228,8 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
return &bpf_trace_printk_proto;
}
-static u64 bpf_perf_event_read(u64 r1, u64 flags, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
{
- struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
struct bpf_array *array = container_of(map, struct bpf_array, map);
unsigned int cpu = smp_processor_id();
u64 index = flags & BPF_F_INDEX_MASK;
@@ -310,11 +306,9 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
return 0;
}
-static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
+BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
+ u64, flags, void *, data, u64, size)
{
- struct pt_regs *regs = (struct pt_regs *)(long) r1;
- struct bpf_map *map = (struct bpf_map *)(long) r2;
- void *data = (void *)(long) r4;
struct perf_raw_record raw = {
.frag = {
.size = size,
@@ -365,7 +359,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
return __bpf_perf_event_output(regs, map, flags, &raw);
}
-static u64 bpf_get_current_task(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_get_current_task)
{
return (long) current;
}
@@ -376,6 +370,31 @@ static const struct bpf_func_proto bpf_get_current_task_proto = {
.ret_type = RET_INTEGER,
};
+BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ struct cgroup *cgrp;
+
+ if (unlikely(in_interrupt()))
+ return -EINVAL;
+ if (unlikely(idx >= array->map.max_entries))
+ return -E2BIG;
+
+ cgrp = READ_ONCE(array->ptrs[idx]);
+ if (unlikely(!cgrp))
+ return -EAGAIN;
+
+ return task_under_cgroup_hierarchy(current, cgrp);
+}
+
+static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
+ .func = bpf_current_task_under_cgroup,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_ANYTHING,
+};
+
static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
@@ -407,6 +426,10 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
return &bpf_perf_event_read_proto;
case BPF_FUNC_probe_write_user:
return bpf_get_probe_write_proto();
+ case BPF_FUNC_current_task_under_cgroup:
+ return &bpf_current_task_under_cgroup_proto;
+ case BPF_FUNC_get_prandom_u32:
+ return &bpf_get_prandom_u32_proto;
default:
return NULL;
}
@@ -447,16 +470,17 @@ static struct bpf_prog_type_list kprobe_tl = {
.type = BPF_PROG_TYPE_KPROBE,
};
-static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
+BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
+ u64, flags, void *, data, u64, size)
{
+ struct pt_regs *regs = *(struct pt_regs **)tp_buff;
+
/*
* r1 points to perf tracepoint buffer where first 8 bytes are hidden
* from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
- * from there and call the same bpf_perf_event_output() helper
+ * from there and call the same bpf_perf_event_output() helper inline.
*/
- u64 ctx = *(long *)(uintptr_t)r1;
-
- return bpf_perf_event_output(ctx, r2, index, r4, size);
+ return ____bpf_perf_event_output(regs, map, flags, data, size);
}
static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
@@ -470,11 +494,18 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
.arg5_type = ARG_CONST_STACK_SIZE,
};
-static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
+ u64, flags)
{
- u64 ctx = *(long *)(uintptr_t)r1;
+ struct pt_regs *regs = *(struct pt_regs **)tp_buff;
- return bpf_get_stackid(ctx, r2, r3, r4, r5);
+ /*
+ * Same comment as in bpf_perf_event_output_tp(), only that this time
+ * the other helper's function body cannot be inlined due to being
+ * external, thus we need to call raw helper function.
+ */
+ return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
+ flags, 0, 0);
}
static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
@@ -520,10 +551,69 @@ static struct bpf_prog_type_list tracepoint_tl = {
.type = BPF_PROG_TYPE_TRACEPOINT,
};
+static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+ enum bpf_reg_type *reg_type)
+{
+ if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ if (off == offsetof(struct bpf_perf_event_data, sample_period)) {
+ if (size != sizeof(u64))
+ return false;
+ } else {
+ if (size != sizeof(long))
+ return false;
+ }
+ return true;
+}
+
+static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, int dst_reg,
+ int src_reg, int ctx_off,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ switch (ctx_off) {
+ case offsetof(struct bpf_perf_event_data, sample_period):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64));
+
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
+ data), dst_reg, src_reg,
+ offsetof(struct bpf_perf_event_data_kern, data));
+ *insn++ = BPF_LDX_MEM(BPF_DW, dst_reg, dst_reg,
+ offsetof(struct perf_sample_data, period));
+ break;
+ default:
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
+ regs), dst_reg, src_reg,
+ offsetof(struct bpf_perf_event_data_kern, regs));
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), dst_reg, dst_reg, ctx_off);
+ break;
+ }
+
+ return insn - insn_buf;
+}
+
+static const struct bpf_verifier_ops perf_event_prog_ops = {
+ .get_func_proto = tp_prog_func_proto,
+ .is_valid_access = pe_prog_is_valid_access,
+ .convert_ctx_access = pe_prog_convert_ctx_access,
+};
+
+static struct bpf_prog_type_list perf_event_tl = {
+ .ops = &perf_event_prog_ops,
+ .type = BPF_PROG_TYPE_PERF_EVENT,
+};
+
static int __init register_kprobe_prog_ops(void)
{
bpf_register_prog_type(&kprobe_tl);
bpf_register_prog_type(&tracepoint_tl);
+ bpf_register_prog_type(&perf_event_tl);
return 0;
}
late_initcall(register_kprobe_prog_ops);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 84752c8e28b5..2050a7652a86 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -872,7 +872,13 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int profile_graph_entry(struct ftrace_graph_ent *trace)
{
+ int index = trace->depth;
+
function_profile_call(trace->func, 0, NULL, NULL);
+
+ if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
+ current->ret_stack[index].subtime = 0;
+
return 1;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 37824d98ae71..8696ce6bf2f6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1047,7 +1047,7 @@ void disable_trace_on_warning(void)
*
* Shows real state of the ring buffer if it is enabled or not.
*/
-static int tracer_tracing_is_on(struct trace_array *tr)
+int tracer_tracing_is_on(struct trace_array *tr)
{
if (tr->trace_buffer.buffer)
return ring_buffer_record_is_on(tr->trace_buffer.buffer);
@@ -4969,7 +4969,7 @@ out:
return ret;
}
-#ifdef CONFIG_TRACER_MAX_TRACE
+#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
static ssize_t
tracing_max_lat_read(struct file *filp, char __user *ubuf,
@@ -5892,7 +5892,7 @@ static const struct file_operations tracing_thresh_fops = {
.llseek = generic_file_llseek,
};
-#ifdef CONFIG_TRACER_MAX_TRACE
+#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
static const struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
@@ -7222,7 +7222,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
create_trace_options_dir(tr);
-#ifdef CONFIG_TRACER_MAX_TRACE
+#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
trace_create_file("tracing_max_latency", 0644, d_tracer,
&tr->max_latency, &tracing_max_lat_fops);
#endif
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f783df416726..fd24b1f9ac43 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -38,6 +38,7 @@ enum trace_type {
TRACE_USER_STACK,
TRACE_BLK,
TRACE_BPUTS,
+ TRACE_HWLAT,
__TRACE_LAST_TYPE,
};
@@ -213,6 +214,8 @@ struct trace_array {
*/
struct trace_buffer max_buffer;
bool allocated_snapshot;
+#endif
+#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
unsigned long max_latency;
#endif
struct trace_pid_list __rcu *filtered_pids;
@@ -326,6 +329,7 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
+ IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
TRACE_MMIO_RW); \
IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
@@ -571,6 +575,7 @@ void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
bool tracing_is_disabled(void);
+int tracer_tracing_is_on(struct trace_array *tr);
struct dentry *trace_create_file(const char *name,
umode_t mode,
struct dentry *parent,
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 5c30efcda5e6..d1cc37e78f99 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -322,3 +322,30 @@ FTRACE_ENTRY(branch, trace_branch,
FILTER_OTHER
);
+
+FTRACE_ENTRY(hwlat, hwlat_entry,
+
+ TRACE_HWLAT,
+
+ F_STRUCT(
+ __field( u64, duration )
+ __field( u64, outer_duration )
+ __field( u64, nmi_total_ts )
+ __field_struct( struct timespec, timestamp )
+ __field_desc( long, timestamp, tv_sec )
+ __field_desc( long, timestamp, tv_nsec )
+ __field( unsigned int, nmi_count )
+ __field( unsigned int, seqnum )
+ ),
+
+ F_printk("cnt:%u\tts:%010lu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n",
+ __entry->seqnum,
+ __entry->tv_sec,
+ __entry->tv_nsec,
+ __entry->duration,
+ __entry->outer_duration,
+ __entry->nmi_total_ts,
+ __entry->nmi_count),
+
+ FILTER_OTHER
+);
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index a975571cde24..6721a1e89f39 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -1028,6 +1028,7 @@ static struct event_command trigger_traceon_cmd = {
static struct event_command trigger_traceoff_cmd = {
.name = "traceoff",
.trigger_type = ETT_TRACE_ONOFF,
+ .flags = EVENT_CMD_FL_POST_TRIGGER,
.func = event_trigger_callback,
.reg = register_trigger,
.unreg = unregister_trigger,
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 0cbe38a844fa..4e480e870474 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -170,7 +170,6 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func;
current->ret_stack[index].calltime = calltime;
- current->ret_stack[index].subtime = 0;
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
current->ret_stack[index].fp = frame_pointer;
#endif
@@ -1183,6 +1182,11 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
trace_seq_puts(s, "/* ");
switch (iter->ent->type) {
+ case TRACE_BPUTS:
+ ret = trace_print_bputs_msg_only(iter);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
+ break;
case TRACE_BPRINT:
ret = trace_print_bprintk_msg_only(iter);
if (ret != TRACE_TYPE_HANDLED)
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
new file mode 100644
index 000000000000..b97286c48735
--- /dev/null
+++ b/kernel/trace/trace_hwlat.c
@@ -0,0 +1,633 @@
+/*
+ * trace_hwlatdetect.c - A simple Hardware Latency detector.
+ *
+ * Use this tracer to detect large system latencies induced by the behavior of
+ * certain underlying system hardware or firmware, independent of Linux itself.
+ * The code was developed originally to detect the presence of SMIs on Intel
+ * and AMD systems, although there is no dependency upon x86 herein.
+ *
+ * The classical example usage of this tracer is in detecting the presence of
+ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
+ * somewhat special form of hardware interrupt spawned from earlier CPU debug
+ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
+ * LPC (or other device) to generate a special interrupt under certain
+ * circumstances, for example, upon expiration of a special SMI timer device,
+ * due to certain external thermal readings, on certain I/O address accesses,
+ * and other situations. An SMI hits a special CPU pin, triggers a special
+ * SMI mode (complete with special memory map), and the OS is unaware.
+ *
+ * Although certain hardware-inducing latencies are necessary (for example,
+ * a modern system often requires an SMI handler for correct thermal control
+ * and remote management) they can wreak havoc upon any OS-level performance
+ * guarantees toward low-latency, especially when the OS is not even made
+ * aware of the presence of these interrupts. For this reason, we need a
+ * somewhat brute force mechanism to detect these interrupts. In this case,
+ * we do it by hogging all of the CPU(s) for configurable timer intervals,
+ * sampling the built-in CPU timer, looking for discontiguous readings.
+ *
+ * WARNING: This implementation necessarily introduces latencies. Therefore,
+ * you should NEVER use this tracer while running in a production
+ * environment requiring any kind of low-latency performance
+ * guarantee(s).
+ *
+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
+ * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
+ *
+ * Includes useful feedback from Clark Williams <clark@redhat.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kthread.h>
+#include <linux/tracefs.h>
+#include <linux/uaccess.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include "trace.h"
+
+static struct trace_array *hwlat_trace;
+
+#define U64STR_SIZE 22 /* 20 digits max */
+
+#define BANNER "hwlat_detector: "
+#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */
+#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
+#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
+
+/* sampling thread*/
+static struct task_struct *hwlat_kthread;
+
+static struct dentry *hwlat_sample_width; /* sample width us */
+static struct dentry *hwlat_sample_window; /* sample window us */
+
+/* Save the previous tracing_thresh value */
+static unsigned long save_tracing_thresh;
+
+/* NMI timestamp counters */
+static u64 nmi_ts_start;
+static u64 nmi_total_ts;
+static int nmi_count;
+static int nmi_cpu;
+
+/* Tells NMIs to call back to the hwlat tracer to record timestamps */
+bool trace_hwlat_callback_enabled;
+
+/* If the user changed threshold, remember it */
+static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
+
+/* Individual latency samples are stored here when detected. */
+struct hwlat_sample {
+ u64 seqnum; /* unique sequence */
+ u64 duration; /* delta */
+ u64 outer_duration; /* delta (outer loop) */
+ u64 nmi_total_ts; /* Total time spent in NMIs */
+ struct timespec timestamp; /* wall time */
+ int nmi_count; /* # NMIs during this sample */
+};
+
+/* keep the global state somewhere. */
+static struct hwlat_data {
+
+ struct mutex lock; /* protect changes */
+
+ u64 count; /* total since reset */
+
+ u64 sample_window; /* total sampling window (on+off) */
+ u64 sample_width; /* active sampling portion of window */
+
+} hwlat_data = {
+ .sample_window = DEFAULT_SAMPLE_WINDOW,
+ .sample_width = DEFAULT_SAMPLE_WIDTH,
+};
+
+static void trace_hwlat_sample(struct hwlat_sample *sample)
+{
+ struct trace_array *tr = hwlat_trace;
+ struct trace_event_call *call = &event_hwlat;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct ring_buffer_event *event;
+ struct hwlat_entry *entry;
+ unsigned long flags;
+ int pc;
+
+ pc = preempt_count();
+ local_save_flags(flags);
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
+ flags, pc);
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ entry->seqnum = sample->seqnum;
+ entry->duration = sample->duration;
+ entry->outer_duration = sample->outer_duration;
+ entry->timestamp = sample->timestamp;
+ entry->nmi_total_ts = sample->nmi_total_ts;
+ entry->nmi_count = sample->nmi_count;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+ __buffer_unlock_commit(buffer, event);
+}
+
+/* Macros to encapsulate the time capturing infrastructure */
+#define time_type u64
+#define time_get() trace_clock_local()
+#define time_to_us(x) div_u64(x, 1000)
+#define time_sub(a, b) ((a) - (b))
+#define init_time(a, b) (a = b)
+#define time_u64(a) a
+
+void trace_hwlat_callback(bool enter)
+{
+ if (smp_processor_id() != nmi_cpu)
+ return;
+
+ /*
+ * Currently trace_clock_local() calls sched_clock() and the
+ * generic version is not NMI safe.
+ */
+ if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
+ if (enter)
+ nmi_ts_start = time_get();
+ else
+ nmi_total_ts = time_get() - nmi_ts_start;
+ }
+
+ if (enter)
+ nmi_count++;
+}
+
+/**
+ * get_sample - sample the CPU TSC and look for likely hardware latencies
+ *
+ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
+ * hardware-induced latency. Called with interrupts disabled and with
+ * hwlat_data.lock held.
+ */
+static int get_sample(void)
+{
+ struct trace_array *tr = hwlat_trace;
+ time_type start, t1, t2, last_t2;
+ s64 diff, total, last_total = 0;
+ u64 sample = 0;
+ u64 thresh = tracing_thresh;
+ u64 outer_sample = 0;
+ int ret = -1;
+
+ do_div(thresh, NSEC_PER_USEC); /* modifies interval value */
+
+ nmi_cpu = smp_processor_id();
+ nmi_total_ts = 0;
+ nmi_count = 0;
+ /* Make sure NMIs see this first */
+ barrier();
+
+ trace_hwlat_callback_enabled = true;
+
+ init_time(last_t2, 0);
+ start = time_get(); /* start timestamp */
+
+ do {
+
+ t1 = time_get(); /* we'll look for a discontinuity */
+ t2 = time_get();
+
+ if (time_u64(last_t2)) {
+ /* Check the delta from outer loop (t2 to next t1) */
+ diff = time_to_us(time_sub(t1, last_t2));
+ /* This shouldn't happen */
+ if (diff < 0) {
+ pr_err(BANNER "time running backwards\n");
+ goto out;
+ }
+ if (diff > outer_sample)
+ outer_sample = diff;
+ }
+ last_t2 = t2;
+
+ total = time_to_us(time_sub(t2, start)); /* sample width */
+
+ /* Check for possible overflows */
+ if (total < last_total) {
+ pr_err("Time total overflowed\n");
+ break;
+ }
+ last_total = total;
+
+ /* This checks the inner loop (t1 to t2) */
+ diff = time_to_us(time_sub(t2, t1)); /* current diff */
+
+ /* This shouldn't happen */
+ if (diff < 0) {
+ pr_err(BANNER "time running backwards\n");
+ goto out;
+ }
+
+ if (diff > sample)
+ sample = diff; /* only want highest value */
+
+ } while (total <= hwlat_data.sample_width);
+
+ barrier(); /* finish the above in the view for NMIs */
+ trace_hwlat_callback_enabled = false;
+ barrier(); /* Make sure nmi_total_ts is no longer updated */
+
+ ret = 0;
+
+ /* If we exceed the threshold value, we have found a hardware latency */
+ if (sample > thresh || outer_sample > thresh) {
+ struct hwlat_sample s;
+
+ ret = 1;
+
+ /* We read in microseconds */
+ if (nmi_total_ts)
+ do_div(nmi_total_ts, NSEC_PER_USEC);
+
+ hwlat_data.count++;
+ s.seqnum = hwlat_data.count;
+ s.duration = sample;
+ s.outer_duration = outer_sample;
+ s.timestamp = CURRENT_TIME;
+ s.nmi_total_ts = nmi_total_ts;
+ s.nmi_count = nmi_count;
+ trace_hwlat_sample(&s);
+
+ /* Keep a running maximum ever recorded hardware latency */
+ if (sample > tr->max_latency)
+ tr->max_latency = sample;
+ }
+
+out:
+ return ret;
+}
+
+static struct cpumask save_cpumask;
+static bool disable_migrate;
+
+static void move_to_next_cpu(void)
+{
+ static struct cpumask *current_mask;
+ int next_cpu;
+
+ if (disable_migrate)
+ return;
+
+ /* Just pick the first CPU on first iteration */
+ if (!current_mask) {
+ current_mask = &save_cpumask;
+ get_online_cpus();
+ cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
+ put_online_cpus();
+ next_cpu = cpumask_first(current_mask);
+ goto set_affinity;
+ }
+
+ /*
+ * If for some reason the user modifies the CPU affinity
+ * of this thread, than stop migrating for the duration
+ * of the current test.
+ */
+ if (!cpumask_equal(current_mask, &current->cpus_allowed))
+ goto disable;
+
+ get_online_cpus();
+ cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
+ next_cpu = cpumask_next(smp_processor_id(), current_mask);
+ put_online_cpus();
+
+ if (next_cpu >= nr_cpu_ids)
+ next_cpu = cpumask_first(current_mask);
+
+ set_affinity:
+ if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
+ goto disable;
+
+ cpumask_clear(current_mask);
+ cpumask_set_cpu(next_cpu, current_mask);
+
+ sched_setaffinity(0, current_mask);
+ return;
+
+ disable:
+ disable_migrate = true;
+}
+
+/*
+ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
+ *
+ * Used to periodically sample the CPU TSC via a call to get_sample. We
+ * disable interrupts, which does (intentionally) introduce latency since we
+ * need to ensure nothing else might be running (and thus preempting).
+ * Obviously this should never be used in production environments.
+ *
+ * Currently this runs on which ever CPU it was scheduled on, but most
+ * real-world hardware latency situations occur across several CPUs,
+ * but we might later generalize this if we find there are any actualy
+ * systems with alternate SMI delivery or other hardware latencies.
+ */
+static int kthread_fn(void *data)
+{
+ u64 interval;
+
+ while (!kthread_should_stop()) {
+
+ move_to_next_cpu();
+
+ local_irq_disable();
+ get_sample();
+ local_irq_enable();
+
+ mutex_lock(&hwlat_data.lock);
+ interval = hwlat_data.sample_window - hwlat_data.sample_width;
+ mutex_unlock(&hwlat_data.lock);
+
+ do_div(interval, USEC_PER_MSEC); /* modifies interval value */
+
+ /* Always sleep for at least 1ms */
+ if (interval < 1)
+ interval = 1;
+
+ if (msleep_interruptible(interval))
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * start_kthread - Kick off the hardware latency sampling/detector kthread
+ *
+ * This starts the kernel thread that will sit and sample the CPU timestamp
+ * counter (TSC or similar) and look for potential hardware latencies.
+ */
+static int start_kthread(struct trace_array *tr)
+{
+ struct task_struct *kthread;
+
+ kthread = kthread_create(kthread_fn, NULL, "hwlatd");
+ if (IS_ERR(kthread)) {
+ pr_err(BANNER "could not start sampling thread\n");
+ return -ENOMEM;
+ }
+ hwlat_kthread = kthread;
+ wake_up_process(kthread);
+
+ return 0;
+}
+
+/**
+ * stop_kthread - Inform the hardware latency samping/detector kthread to stop
+ *
+ * This kicks the running hardware latency sampling/detector kernel thread and
+ * tells it to stop sampling now. Use this on unload and at system shutdown.
+ */
+static void stop_kthread(void)
+{
+ if (!hwlat_kthread)
+ return;
+ kthread_stop(hwlat_kthread);
+ hwlat_kthread = NULL;
+}
+
+/*
+ * hwlat_read - Wrapper read function for reading both window and width
+ * @filp: The active open file structure
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a generic read implementation for the global state
+ * "hwlat_data" structure filesystem entries.
+ */
+static ssize_t hwlat_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[U64STR_SIZE];
+ u64 *entry = filp->private_data;
+ u64 val;
+ int len;
+
+ if (!entry)
+ return -EFAULT;
+
+ if (cnt > sizeof(buf))
+ cnt = sizeof(buf);
+
+ val = *entry;
+
+ len = snprintf(buf, sizeof(buf), "%llu\n", val);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/**
+ * hwlat_width_write - Write function for "width" entry
+ * @filp: The active open file structure
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in @file
+ *
+ * This function provides a write implementation for the "width" interface
+ * to the hardware latency detector. It can be used to configure
+ * for how many us of the total window us we will actively sample for any
+ * hardware-induced latency periods. Obviously, it is not possible to
+ * sample constantly and have the system respond to a sample reader, or,
+ * worse, without having the system appear to have gone out to lunch. It
+ * is enforced that width is less that the total window size.
+ */
+static ssize_t
+hwlat_width_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ u64 val;
+ int err;
+
+ err = kstrtoull_from_user(ubuf, cnt, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&hwlat_data.lock);
+ if (val < hwlat_data.sample_window)
+ hwlat_data.sample_width = val;
+ else
+ err = -EINVAL;
+ mutex_unlock(&hwlat_data.lock);
+
+ if (err)
+ return err;
+
+ return cnt;
+}
+
+/**
+ * hwlat_window_write - Write function for "window" entry
+ * @filp: The active open file structure
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in @file
+ *
+ * This function provides a write implementation for the "window" interface
+ * to the hardware latency detetector. The window is the total time
+ * in us that will be considered one sample period. Conceptually, windows
+ * occur back-to-back and contain a sample width period during which
+ * actual sampling occurs. Can be used to write a new total window size. It
+ * is enfoced that any value written must be greater than the sample width
+ * size, or an error results.
+ */
+static ssize_t
+hwlat_window_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ u64 val;
+ int err;
+
+ err = kstrtoull_from_user(ubuf, cnt, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&hwlat_data.lock);
+ if (hwlat_data.sample_width < val)
+ hwlat_data.sample_window = val;
+ else
+ err = -EINVAL;
+ mutex_unlock(&hwlat_data.lock);
+
+ if (err)
+ return err;
+
+ return cnt;
+}
+
+static const struct file_operations width_fops = {
+ .open = tracing_open_generic,
+ .read = hwlat_read,
+ .write = hwlat_width_write,
+};
+
+static const struct file_operations window_fops = {
+ .open = tracing_open_generic,
+ .read = hwlat_read,
+ .write = hwlat_window_write,
+};
+
+/**
+ * init_tracefs - A function to initialize the tracefs interface files
+ *
+ * This function creates entries in tracefs for "hwlat_detector".
+ * It creates the hwlat_detector directory in the tracing directory,
+ * and within that directory is the count, width and window files to
+ * change and view those values.
+ */
+static int init_tracefs(void)
+{
+ struct dentry *d_tracer;
+ struct dentry *top_dir;
+
+ d_tracer = tracing_init_dentry();
+ if (IS_ERR(d_tracer))
+ return -ENOMEM;
+
+ top_dir = tracefs_create_dir("hwlat_detector", d_tracer);
+ if (!top_dir)
+ return -ENOMEM;
+
+ hwlat_sample_window = tracefs_create_file("window", 0640,
+ top_dir,
+ &hwlat_data.sample_window,
+ &window_fops);
+ if (!hwlat_sample_window)
+ goto err;
+
+ hwlat_sample_width = tracefs_create_file("width", 0644,
+ top_dir,
+ &hwlat_data.sample_width,
+ &width_fops);
+ if (!hwlat_sample_width)
+ goto err;
+
+ return 0;
+
+ err:
+ tracefs_remove_recursive(top_dir);
+ return -ENOMEM;
+}
+
+static void hwlat_tracer_start(struct trace_array *tr)
+{
+ int err;
+
+ err = start_kthread(tr);
+ if (err)
+ pr_err(BANNER "Cannot start hwlat kthread\n");
+}
+
+static void hwlat_tracer_stop(struct trace_array *tr)
+{
+ stop_kthread();
+}
+
+static bool hwlat_busy;
+
+static int hwlat_tracer_init(struct trace_array *tr)
+{
+ /* Only allow one instance to enable this */
+ if (hwlat_busy)
+ return -EBUSY;
+
+ hwlat_trace = tr;
+
+ disable_migrate = false;
+ hwlat_data.count = 0;
+ tr->max_latency = 0;
+ save_tracing_thresh = tracing_thresh;
+
+ /* tracing_thresh is in nsecs, we speak in usecs */
+ if (!tracing_thresh)
+ tracing_thresh = last_tracing_thresh;
+
+ if (tracer_tracing_is_on(tr))
+ hwlat_tracer_start(tr);
+
+ hwlat_busy = true;
+
+ return 0;
+}
+
+static void hwlat_tracer_reset(struct trace_array *tr)
+{
+ stop_kthread();
+
+ /* the tracing threshold is static between runs */
+ last_tracing_thresh = tracing_thresh;
+
+ tracing_thresh = save_tracing_thresh;
+ hwlat_busy = false;
+}
+
+static struct tracer hwlat_tracer __read_mostly =
+{
+ .name = "hwlat",
+ .init = hwlat_tracer_init,
+ .reset = hwlat_tracer_reset,
+ .start = hwlat_tracer_start,
+ .stop = hwlat_tracer_stop,
+ .allow_instances = true,
+};
+
+__init static int init_hwlat_tracer(void)
+{
+ int ret;
+
+ mutex_init(&hwlat_data.lock);
+
+ ret = register_tracer(&hwlat_tracer);
+ if (ret)
+ return ret;
+
+ init_tracefs();
+
+ return 0;
+}
+late_initcall(init_hwlat_tracer);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 0bb9cf2d53e6..3fc20422c166 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1098,6 +1098,71 @@ static struct trace_event trace_user_stack_event = {
.funcs = &trace_user_stack_funcs,
};
+/* TRACE_HWLAT */
+static enum print_line_t
+trace_hwlat_print(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct trace_entry *entry = iter->ent;
+ struct trace_seq *s = &iter->seq;
+ struct hwlat_entry *field;
+
+ trace_assign_type(field, entry);
+
+ trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ld.%09ld",
+ field->seqnum,
+ field->duration,
+ field->outer_duration,
+ field->timestamp.tv_sec,
+ field->timestamp.tv_nsec);
+
+ if (field->nmi_count) {
+ /*
+ * The generic sched_clock() is not NMI safe, thus
+ * we only record the count and not the time.
+ */
+ if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK))
+ trace_seq_printf(s, " nmi-total:%llu",
+ field->nmi_total_ts);
+ trace_seq_printf(s, " nmi-count:%u",
+ field->nmi_count);
+ }
+
+ trace_seq_putc(s, '\n');
+
+ return trace_handle_return(s);
+}
+
+
+static enum print_line_t
+trace_hwlat_raw(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct hwlat_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ trace_seq_printf(s, "%llu %lld %ld %09ld %u\n",
+ field->duration,
+ field->outer_duration,
+ field->timestamp.tv_sec,
+ field->timestamp.tv_nsec,
+ field->seqnum);
+
+ return trace_handle_return(s);
+}
+
+static struct trace_event_functions trace_hwlat_funcs = {
+ .trace = trace_hwlat_print,
+ .raw = trace_hwlat_raw,
+};
+
+static struct trace_event trace_hwlat_event = {
+ .type = TRACE_HWLAT,
+ .funcs = &trace_hwlat_funcs,
+};
+
/* TRACE_BPUTS */
static enum print_line_t
trace_bputs_print(struct trace_iterator *iter, int flags,
@@ -1233,6 +1298,7 @@ static struct trace_event *events[] __initdata = {
&trace_bputs_event,
&trace_bprint_event,
&trace_print_event,
+ &trace_hwlat_event,
NULL
};
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index b2b6efc083a4..5e10395da88e 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -610,8 +610,7 @@ static int perf_sysenter_enable(struct trace_event_call *call)
if (!sys_perf_refcount_enter)
ret = register_trace_sys_enter(perf_syscall_enter, NULL);
if (ret) {
- pr_info("event trace: Could not activate"
- "syscall entry trace point");
+ pr_info("event trace: Could not activate syscall entry trace point");
} else {
set_bit(num, enabled_perf_enter_syscalls);
sys_perf_refcount_enter++;
@@ -682,8 +681,7 @@ static int perf_sysexit_enable(struct trace_event_call *call)
if (!sys_perf_refcount_exit)
ret = register_trace_sys_exit(perf_syscall_exit, NULL);
if (ret) {
- pr_info("event trace: Could not activate"
- "syscall exit trace point");
+ pr_info("event trace: Could not activate syscall exit trace point");
} else {
set_bit(num, enabled_perf_exit_syscalls);
sys_perf_refcount_exit++;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 7a687320f867..0913693caf6e 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -431,10 +431,6 @@ static int create_trace_uprobe(int argc, char **argv)
pr_info("Probe point is not specified.\n");
return -EINVAL;
}
- if (isdigit(argv[1][0])) {
- pr_info("probe point must be have a filename.\n");
- return -EINVAL;
- }
arg = strchr(argv[1], ':');
if (!arg) {
ret = -EINVAL;
diff --git a/kernel/ucount.c b/kernel/ucount.c
new file mode 100644
index 000000000000..9d20d5dd298a
--- /dev/null
+++ b/kernel/ucount.c
@@ -0,0 +1,235 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/stat.h>
+#include <linux/sysctl.h>
+#include <linux/slab.h>
+#include <linux/hash.h>
+#include <linux/user_namespace.h>
+
+#define UCOUNTS_HASHTABLE_BITS 10
+static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
+static DEFINE_SPINLOCK(ucounts_lock);
+
+#define ucounts_hashfn(ns, uid) \
+ hash_long((unsigned long)__kuid_val(uid) + (unsigned long)(ns), \
+ UCOUNTS_HASHTABLE_BITS)
+#define ucounts_hashentry(ns, uid) \
+ (ucounts_hashtable + ucounts_hashfn(ns, uid))
+
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table_set *
+set_lookup(struct ctl_table_root *root)
+{
+ return &current_user_ns()->set;
+}
+
+static int set_is_seen(struct ctl_table_set *set)
+{
+ return &current_user_ns()->set == set;
+}
+
+static int set_permissions(struct ctl_table_header *head,
+ struct ctl_table *table)
+{
+ struct user_namespace *user_ns =
+ container_of(head->set, struct user_namespace, set);
+ int mode;
+
+ /* Allow users with CAP_SYS_RESOURCE unrestrained access */
+ if (ns_capable(user_ns, CAP_SYS_RESOURCE))
+ mode = (table->mode & S_IRWXU) >> 6;
+ else
+ /* Allow all others at most read-only access */
+ mode = table->mode & S_IROTH;
+ return (mode << 6) | (mode << 3) | mode;
+}
+
+static struct ctl_table_root set_root = {
+ .lookup = set_lookup,
+ .permissions = set_permissions,
+};
+
+static int zero = 0;
+static int int_max = INT_MAX;
+#define UCOUNT_ENTRY(name) \
+ { \
+ .procname = name, \
+ .maxlen = sizeof(int), \
+ .mode = 0644, \
+ .proc_handler = proc_dointvec_minmax, \
+ .extra1 = &zero, \
+ .extra2 = &int_max, \
+ }
+static struct ctl_table user_table[] = {
+ UCOUNT_ENTRY("max_user_namespaces"),
+ UCOUNT_ENTRY("max_pid_namespaces"),
+ UCOUNT_ENTRY("max_uts_namespaces"),
+ UCOUNT_ENTRY("max_ipc_namespaces"),
+ UCOUNT_ENTRY("max_net_namespaces"),
+ UCOUNT_ENTRY("max_mnt_namespaces"),
+ UCOUNT_ENTRY("max_cgroup_namespaces"),
+ { }
+};
+#endif /* CONFIG_SYSCTL */
+
+bool setup_userns_sysctls(struct user_namespace *ns)
+{
+#ifdef CONFIG_SYSCTL
+ struct ctl_table *tbl;
+ setup_sysctl_set(&ns->set, &set_root, set_is_seen);
+ tbl = kmemdup(user_table, sizeof(user_table), GFP_KERNEL);
+ if (tbl) {
+ int i;
+ for (i = 0; i < UCOUNT_COUNTS; i++) {
+ tbl[i].data = &ns->ucount_max[i];
+ }
+ ns->sysctls = __register_sysctl_table(&ns->set, "user", tbl);
+ }
+ if (!ns->sysctls) {
+ kfree(tbl);
+ retire_sysctl_set(&ns->set);
+ return false;
+ }
+#endif
+ return true;
+}
+
+void retire_userns_sysctls(struct user_namespace *ns)
+{
+#ifdef CONFIG_SYSCTL
+ struct ctl_table *tbl;
+
+ tbl = ns->sysctls->ctl_table_arg;
+ unregister_sysctl_table(ns->sysctls);
+ retire_sysctl_set(&ns->set);
+ kfree(tbl);
+#endif
+}
+
+static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
+{
+ struct ucounts *ucounts;
+
+ hlist_for_each_entry(ucounts, hashent, node) {
+ if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
+ return ucounts;
+ }
+ return NULL;
+}
+
+static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
+{
+ struct hlist_head *hashent = ucounts_hashentry(ns, uid);
+ struct ucounts *ucounts, *new;
+
+ spin_lock(&ucounts_lock);
+ ucounts = find_ucounts(ns, uid, hashent);
+ if (!ucounts) {
+ spin_unlock(&ucounts_lock);
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ new->ns = ns;
+ new->uid = uid;
+ atomic_set(&new->count, 0);
+
+ spin_lock(&ucounts_lock);
+ ucounts = find_ucounts(ns, uid, hashent);
+ if (ucounts) {
+ kfree(new);
+ } else {
+ hlist_add_head(&new->node, hashent);
+ ucounts = new;
+ }
+ }
+ if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
+ ucounts = NULL;
+ spin_unlock(&ucounts_lock);
+ return ucounts;
+}
+
+static void put_ucounts(struct ucounts *ucounts)
+{
+ if (atomic_dec_and_test(&ucounts->count)) {
+ spin_lock(&ucounts_lock);
+ hlist_del_init(&ucounts->node);
+ spin_unlock(&ucounts_lock);
+
+ kfree(ucounts);
+ }
+}
+
+static inline bool atomic_inc_below(atomic_t *v, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c >= u))
+ return false;
+ old = atomic_cmpxchg(v, c, c+1);
+ if (likely(old == c))
+ return true;
+ c = old;
+ }
+}
+
+struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
+ enum ucount_type type)
+{
+ struct ucounts *ucounts, *iter, *bad;
+ struct user_namespace *tns;
+ ucounts = get_ucounts(ns, uid);
+ for (iter = ucounts; iter; iter = tns->ucounts) {
+ int max;
+ tns = iter->ns;
+ max = READ_ONCE(tns->ucount_max[type]);
+ if (!atomic_inc_below(&iter->ucount[type], max))
+ goto fail;
+ }
+ return ucounts;
+fail:
+ bad = iter;
+ for (iter = ucounts; iter != bad; iter = iter->ns->ucounts)
+ atomic_dec(&iter->ucount[type]);
+
+ put_ucounts(ucounts);
+ return NULL;
+}
+
+void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
+{
+ struct ucounts *iter;
+ for (iter = ucounts; iter; iter = iter->ns->ucounts) {
+ int dec = atomic_dec_if_positive(&iter->ucount[type]);
+ WARN_ON_ONCE(dec < 0);
+ }
+ put_ucounts(ucounts);
+}
+
+static __init int user_namespace_sysctl_init(void)
+{
+#ifdef CONFIG_SYSCTL
+ static struct ctl_table_header *user_header;
+ static struct ctl_table empty[1];
+ /*
+ * It is necessary to register the user directory in the
+ * default set so that registrations in the child sets work
+ * properly.
+ */
+ user_header = register_sysctl("user", empty);
+ BUG_ON(!user_header);
+ BUG_ON(!setup_userns_sysctls(&init_user_ns));
+#endif
+ return 0;
+}
+subsys_initcall(user_namespace_sysctl_init);
+
+
diff --git a/kernel/uid16.c b/kernel/uid16.c
index d58cc4d8f0d1..cc40793464e3 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -117,7 +117,7 @@ static int groups16_to_user(old_gid_t __user *grouplist,
kgid_t kgid;
for (i = 0; i < group_info->ngroups; i++) {
- kgid = GROUP_AT(group_info, i);
+ kgid = group_info->gid[i];
group = high2lowgid(from_kgid_munged(user_ns, kgid));
if (put_user(group, grouplist+i))
return -EFAULT;
@@ -142,7 +142,7 @@ static int groups16_from_user(struct group_info *group_info,
if (!gid_valid(kgid))
return -EINVAL;
- GROUP_AT(group_info, i) = kgid;
+ group_info->gid[i] = kgid;
}
return 0;
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 68f594212759..86b7854fec8e 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -29,6 +29,17 @@ static DEFINE_MUTEX(userns_state_mutex);
static bool new_idmap_permitted(const struct file *file,
struct user_namespace *ns, int cap_setid,
struct uid_gid_map *map);
+static void free_user_ns(struct work_struct *work);
+
+static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
+{
+ return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
+}
+
+static void dec_user_namespaces(struct ucounts *ucounts)
+{
+ return dec_ucount(ucounts, UCOUNT_USER_NAMESPACES);
+}
static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
{
@@ -62,10 +73,16 @@ int create_user_ns(struct cred *new)
struct user_namespace *ns, *parent_ns = new->user_ns;
kuid_t owner = new->euid;
kgid_t group = new->egid;
- int ret;
+ struct ucounts *ucounts;
+ int ret, i;
+ ret = -ENOSPC;
if (parent_ns->level > 32)
- return -EUSERS;
+ goto fail;
+
+ ucounts = inc_user_namespaces(parent_ns, owner);
+ if (!ucounts)
+ goto fail;
/*
* Verify that we can not violate the policy of which files
@@ -73,26 +90,27 @@ int create_user_ns(struct cred *new)
* by verifing that the root directory is at the root of the
* mount namespace which allows all files to be accessed.
*/
+ ret = -EPERM;
if (current_chrooted())
- return -EPERM;
+ goto fail_dec;
/* The creator needs a mapping in the parent user namespace
* or else we won't be able to reasonably tell userspace who
* created a user_namespace.
*/
+ ret = -EPERM;
if (!kuid_has_mapping(parent_ns, owner) ||
!kgid_has_mapping(parent_ns, group))
- return -EPERM;
+ goto fail_dec;
+ ret = -ENOMEM;
ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
if (!ns)
- return -ENOMEM;
+ goto fail_dec;
ret = ns_alloc_inum(&ns->ns);
- if (ret) {
- kmem_cache_free(user_ns_cachep, ns);
- return ret;
- }
+ if (ret)
+ goto fail_free;
ns->ns.ops = &userns_operations;
atomic_set(&ns->count, 1);
@@ -101,18 +119,37 @@ int create_user_ns(struct cred *new)
ns->level = parent_ns->level + 1;
ns->owner = owner;
ns->group = group;
+ INIT_WORK(&ns->work, free_user_ns);
+ for (i = 0; i < UCOUNT_COUNTS; i++) {
+ ns->ucount_max[i] = INT_MAX;
+ }
+ ns->ucounts = ucounts;
/* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
mutex_lock(&userns_state_mutex);
ns->flags = parent_ns->flags;
mutex_unlock(&userns_state_mutex);
- set_cred_user_ns(new, ns);
-
#ifdef CONFIG_PERSISTENT_KEYRINGS
init_rwsem(&ns->persistent_keyring_register_sem);
#endif
+ ret = -ENOMEM;
+ if (!setup_userns_sysctls(ns))
+ goto fail_keyring;
+
+ set_cred_user_ns(new, ns);
return 0;
+fail_keyring:
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ key_put(ns->persistent_keyring_register);
+#endif
+ ns_free_inum(&ns->ns);
+fail_free:
+ kmem_cache_free(user_ns_cachep, ns);
+fail_dec:
+ dec_user_namespaces(ucounts);
+fail:
+ return ret;
}
int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
@@ -135,21 +172,30 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
return err;
}
-void free_user_ns(struct user_namespace *ns)
+static void free_user_ns(struct work_struct *work)
{
- struct user_namespace *parent;
+ struct user_namespace *parent, *ns =
+ container_of(work, struct user_namespace, work);
do {
+ struct ucounts *ucounts = ns->ucounts;
parent = ns->parent;
+ retire_userns_sysctls(ns);
#ifdef CONFIG_PERSISTENT_KEYRINGS
key_put(ns->persistent_keyring_register);
#endif
ns_free_inum(&ns->ns);
kmem_cache_free(user_ns_cachep, ns);
+ dec_user_namespaces(ucounts);
ns = parent;
} while (atomic_dec_and_test(&parent->count));
}
-EXPORT_SYMBOL(free_user_ns);
+
+void __put_user_ns(struct user_namespace *ns)
+{
+ schedule_work(&ns->work);
+}
+EXPORT_SYMBOL(__put_user_ns);
static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
{
@@ -1004,12 +1050,37 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
return commit_creds(cred);
}
+struct ns_common *ns_get_owner(struct ns_common *ns)
+{
+ struct user_namespace *my_user_ns = current_user_ns();
+ struct user_namespace *owner, *p;
+
+ /* See if the owner is in the current user namespace */
+ owner = p = ns->ops->owner(ns);
+ for (;;) {
+ if (!p)
+ return ERR_PTR(-EPERM);
+ if (p == my_user_ns)
+ break;
+ p = p->parent;
+ }
+
+ return &get_user_ns(owner)->ns;
+}
+
+static struct user_namespace *userns_owner(struct ns_common *ns)
+{
+ return to_user_ns(ns)->parent;
+}
+
const struct proc_ns_operations userns_operations = {
.name = "user",
.type = CLONE_NEWUSER,
.get = userns_get,
.put = userns_put,
.install = userns_install,
+ .owner = userns_owner,
+ .get_parent = ns_get_owner,
};
static __init int user_namespaces_init(void)
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 831ea7108232..6976cd47dcf6 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -17,6 +17,16 @@
#include <linux/user_namespace.h>
#include <linux/proc_ns.h>
+static struct ucounts *inc_uts_namespaces(struct user_namespace *ns)
+{
+ return inc_ucount(ns, current_euid(), UCOUNT_UTS_NAMESPACES);
+}
+
+static void dec_uts_namespaces(struct ucounts *ucounts)
+{
+ dec_ucount(ucounts, UCOUNT_UTS_NAMESPACES);
+}
+
static struct uts_namespace *create_uts_ns(void)
{
struct uts_namespace *uts_ns;
@@ -36,18 +46,24 @@ static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns,
struct uts_namespace *old_ns)
{
struct uts_namespace *ns;
+ struct ucounts *ucounts;
int err;
+ err = -ENOSPC;
+ ucounts = inc_uts_namespaces(user_ns);
+ if (!ucounts)
+ goto fail;
+
+ err = -ENOMEM;
ns = create_uts_ns();
if (!ns)
- return ERR_PTR(-ENOMEM);
+ goto fail_dec;
err = ns_alloc_inum(&ns->ns);
- if (err) {
- kfree(ns);
- return ERR_PTR(err);
- }
+ if (err)
+ goto fail_free;
+ ns->ucounts = ucounts;
ns->ns.ops = &utsns_operations;
down_read(&uts_sem);
@@ -55,6 +71,13 @@ static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns,
ns->user_ns = get_user_ns(user_ns);
up_read(&uts_sem);
return ns;
+
+fail_free:
+ kfree(ns);
+fail_dec:
+ dec_uts_namespaces(ucounts);
+fail:
+ return ERR_PTR(err);
}
/*
@@ -85,6 +108,7 @@ void free_uts_ns(struct kref *kref)
struct uts_namespace *ns;
ns = container_of(kref, struct uts_namespace, kref);
+ dec_uts_namespaces(ns->ucounts);
put_user_ns(ns->user_ns);
ns_free_inum(&ns->ns);
kfree(ns);
@@ -130,10 +154,16 @@ static int utsns_install(struct nsproxy *nsproxy, struct ns_common *new)
return 0;
}
+static struct user_namespace *utsns_owner(struct ns_common *ns)
+{
+ return to_uts_ns(ns)->user_ns;
+}
+
const struct proc_ns_operations utsns_operations = {
.name = "uts",
.type = CLONE_NEWUTS,
.get = utsns_get,
.put = utsns_put,
.install = utsns_install,
+ .owner = utsns_owner,
};
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ef071ca73fc3..bd81f0390277 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2974,6 +2974,31 @@ bool flush_delayed_work(struct delayed_work *dwork)
}
EXPORT_SYMBOL(flush_delayed_work);
+static bool __cancel_work(struct work_struct *work, bool is_dwork)
+{
+ unsigned long flags;
+ int ret;
+
+ do {
+ ret = try_to_grab_pending(work, is_dwork, &flags);
+ } while (unlikely(ret == -EAGAIN));
+
+ if (unlikely(ret < 0))
+ return false;
+
+ set_work_pool_and_clear_pending(work, get_work_pool_id(work));
+ local_irq_restore(flags);
+ return ret;
+}
+
+/*
+ * See cancel_delayed_work()
+ */
+bool cancel_work(struct work_struct *work)
+{
+ return __cancel_work(work, false);
+}
+
/**
* cancel_delayed_work - cancel a delayed work
* @dwork: delayed_work to cancel
@@ -2992,20 +3017,7 @@ EXPORT_SYMBOL(flush_delayed_work);
*/
bool cancel_delayed_work(struct delayed_work *dwork)
{
- unsigned long flags;
- int ret;
-
- do {
- ret = try_to_grab_pending(&dwork->work, true, &flags);
- } while (unlikely(ret == -EAGAIN));
-
- if (unlikely(ret < 0))
- return false;
-
- set_work_pool_and_clear_pending(&dwork->work,
- get_work_pool_id(&dwork->work));
- local_irq_restore(flags);
- return ret;
+ return __cancel_work(&dwork->work, true);
}
EXPORT_SYMBOL(cancel_delayed_work);
diff --git a/lib/Kconfig b/lib/Kconfig
index d79909dc01ec..260a80e313b9 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -457,9 +457,6 @@ config NLATTR
config GENERIC_ATOMIC64
bool
-config ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- def_bool y if GENERIC_ATOMIC64
-
config LRU_CACHE
tristate
@@ -550,4 +547,7 @@ config STACKDEPOT
bool
select STACKTRACE
+config SBITMAP
+ bool
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index cab7405f48d2..39d07e754822 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -305,7 +305,7 @@ config DEBUG_SECTION_MISMATCH
a larger kernel).
- Run the section mismatch analysis for each module/built-in.o file.
When we run the section mismatch analysis on vmlinux.o, we
- lose valueble information about where the mismatch was
+ lose valuable information about where the mismatch was
introduced.
Running the analysis for each module/built-in.o file
tells where the mismatch happens much closer to the
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 39494af9a84a..bc6e651df68c 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -1,6 +1,9 @@
config ARCH_HAS_UBSAN_SANITIZE_ALL
bool
+config ARCH_WANTS_UBSAN_NO_NULL
+ def_bool n
+
config UBSAN
bool "Undefined behaviour sanity checker"
help
@@ -34,3 +37,11 @@ config UBSAN_ALIGNMENT
This option enables detection of unaligned memory accesses.
Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
+
+config UBSAN_NULL
+ bool "Enable checking of null pointers"
+ depends on UBSAN
+ default y if !ARCH_WANTS_UBSAN_NO_NULL
+ help
+ This option enables detection of memory accesses via a
+ null pointer.
diff --git a/lib/Makefile b/lib/Makefile
index 5dc77a8ec297..f3ca8c0ab634 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -22,7 +22,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o chacha20.o md5.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o
+ earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o win_minmax.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -227,3 +227,5 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n
+
+obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index dbb369145dda..46042901130f 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -213,7 +213,6 @@ static __init void test_atomic64(void)
r += one;
BUG_ON(v.counter != r);
-#ifdef CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
INIT(onestwos);
BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
r -= one;
@@ -226,9 +225,6 @@ static __init void test_atomic64(void)
INIT(-one);
BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
BUG_ON(v.counter != r);
-#else
-#warning Please implement atomic64_dec_if_positive for your architecture and select the above Kconfig symbol
-#endif
INIT(onestwos);
BUG_ON(!atomic64_inc_not_zero(&v));
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 06f02f6aecd2..8971370bfb16 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -44,6 +44,7 @@ enum {
dma_debug_page,
dma_debug_sg,
dma_debug_coherent,
+ dma_debug_resource,
};
enum map_err_types {
@@ -151,8 +152,9 @@ static const char *const maperr2str[] = {
[MAP_ERR_CHECKED] = "dma map error checked",
};
-static const char *type2name[4] = { "single", "page",
- "scather-gather", "coherent" };
+static const char *type2name[5] = { "single", "page",
+ "scather-gather", "coherent",
+ "resource" };
static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
"DMA_FROM_DEVICE", "DMA_NONE" };
@@ -400,6 +402,9 @@ static void hash_bucket_del(struct dma_debug_entry *entry)
static unsigned long long phys_addr(struct dma_debug_entry *entry)
{
+ if (entry->type == dma_debug_resource)
+ return __pfn_to_phys(entry->pfn) + entry->offset;
+
return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
}
@@ -1519,6 +1524,49 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
}
EXPORT_SYMBOL(debug_dma_free_coherent);
+void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
+ int direction, dma_addr_t dma_addr)
+{
+ struct dma_debug_entry *entry;
+
+ if (unlikely(dma_debug_disabled()))
+ return;
+
+ entry = dma_entry_alloc();
+ if (!entry)
+ return;
+
+ entry->type = dma_debug_resource;
+ entry->dev = dev;
+ entry->pfn = PHYS_PFN(addr);
+ entry->offset = offset_in_page(addr);
+ entry->size = size;
+ entry->dev_addr = dma_addr;
+ entry->direction = direction;
+ entry->map_err_type = MAP_ERR_NOT_CHECKED;
+
+ add_dma_entry(entry);
+}
+EXPORT_SYMBOL(debug_dma_map_resource);
+
+void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ struct dma_debug_entry ref = {
+ .type = dma_debug_resource,
+ .dev = dev,
+ .dev_addr = dma_addr,
+ .size = size,
+ .direction = direction,
+ };
+
+ if (unlikely(dma_debug_disabled()))
+ return;
+
+ check_unmap(&ref);
+}
+EXPORT_SYMBOL(debug_dma_unmap_resource);
+
void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, int direction)
{
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 7e3138cfc8c9..48b8c27acabb 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -3,8 +3,11 @@
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/splice.h>
#include <net/checksum.h>
+#define PIPE_PARANOIA /* for now */
+
#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
size_t left; \
size_t wanted = n; \
@@ -290,6 +293,93 @@ done:
return wanted - bytes;
}
+#ifdef PIPE_PARANOIA
+static bool sanity(const struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ int idx = i->idx;
+ int next = pipe->curbuf + pipe->nrbufs;
+ if (i->iov_offset) {
+ struct pipe_buffer *p;
+ if (unlikely(!pipe->nrbufs))
+ goto Bad; // pipe must be non-empty
+ if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
+ goto Bad; // must be at the last buffer...
+
+ p = &pipe->bufs[idx];
+ if (unlikely(p->offset + p->len != i->iov_offset))
+ goto Bad; // ... at the end of segment
+ } else {
+ if (idx != (next & (pipe->buffers - 1)))
+ goto Bad; // must be right after the last buffer
+ }
+ return true;
+Bad:
+ printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
+ printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
+ pipe->curbuf, pipe->nrbufs, pipe->buffers);
+ for (idx = 0; idx < pipe->buffers; idx++)
+ printk(KERN_ERR "[%p %p %d %d]\n",
+ pipe->bufs[idx].ops,
+ pipe->bufs[idx].page,
+ pipe->bufs[idx].offset,
+ pipe->bufs[idx].len);
+ WARN_ON(1);
+ return false;
+}
+#else
+#define sanity(i) true
+#endif
+
+static inline int next_idx(int idx, struct pipe_inode_info *pipe)
+{
+ return (idx + 1) & (pipe->buffers - 1);
+}
+
+static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ struct pipe_buffer *buf;
+ size_t off;
+ int idx;
+
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+
+ if (unlikely(!bytes))
+ return 0;
+
+ if (!sanity(i))
+ return 0;
+
+ off = i->iov_offset;
+ idx = i->idx;
+ buf = &pipe->bufs[idx];
+ if (off) {
+ if (offset == off && buf->page == page) {
+ /* merge with the last one */
+ buf->len += bytes;
+ i->iov_offset += bytes;
+ goto out;
+ }
+ idx = next_idx(idx, pipe);
+ buf = &pipe->bufs[idx];
+ }
+ if (idx == pipe->curbuf && pipe->nrbufs)
+ return 0;
+ pipe->nrbufs++;
+ buf->ops = &page_cache_pipe_buf_ops;
+ get_page(buf->page = page);
+ buf->offset = offset;
+ buf->len = bytes;
+ i->iov_offset = offset + bytes;
+ i->idx = idx;
+out:
+ i->count -= bytes;
+ return bytes;
+}
+
/*
* Fault in one or more iovecs of the given iov_iter, to a maximum length of
* bytes. For each iovec, fault in each page that constitutes the iovec.
@@ -356,9 +446,98 @@ static void memzero_page(struct page *page, size_t offset, size_t len)
kunmap_atomic(addr);
}
+static inline bool allocated(struct pipe_buffer *buf)
+{
+ return buf->ops == &default_pipe_buf_ops;
+}
+
+static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
+{
+ size_t off = i->iov_offset;
+ int idx = i->idx;
+ if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
+ idx = next_idx(idx, i->pipe);
+ off = 0;
+ }
+ *idxp = idx;
+ *offp = off;
+}
+
+static size_t push_pipe(struct iov_iter *i, size_t size,
+ int *idxp, size_t *offp)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t off;
+ int idx;
+ ssize_t left;
+
+ if (unlikely(size > i->count))
+ size = i->count;
+ if (unlikely(!size))
+ return 0;
+
+ left = size;
+ data_start(i, &idx, &off);
+ *idxp = idx;
+ *offp = off;
+ if (off) {
+ left -= PAGE_SIZE - off;
+ if (left <= 0) {
+ pipe->bufs[idx].len += size;
+ return size;
+ }
+ pipe->bufs[idx].len = PAGE_SIZE;
+ idx = next_idx(idx, pipe);
+ }
+ while (idx != pipe->curbuf || !pipe->nrbufs) {
+ struct page *page = alloc_page(GFP_USER);
+ if (!page)
+ break;
+ pipe->nrbufs++;
+ pipe->bufs[idx].ops = &default_pipe_buf_ops;
+ pipe->bufs[idx].page = page;
+ pipe->bufs[idx].offset = 0;
+ if (left <= PAGE_SIZE) {
+ pipe->bufs[idx].len = left;
+ return size;
+ }
+ pipe->bufs[idx].len = PAGE_SIZE;
+ left -= PAGE_SIZE;
+ idx = next_idx(idx, pipe);
+ }
+ return size - left;
+}
+
+static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t n, off;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ bytes = n = push_pipe(i, bytes, &idx, &off);
+ if (unlikely(!n))
+ return 0;
+ for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+ size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
+ i->idx = idx;
+ i->iov_offset = off + chunk;
+ n -= chunk;
+ addr += chunk;
+ }
+ i->count -= bytes;
+ return bytes;
+}
+
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
+ if (unlikely(i->type & ITER_PIPE))
+ return copy_pipe_to_iter(addr, bytes, i);
iterate_and_advance(i, bytes, v,
__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
v.iov_len),
@@ -374,6 +553,10 @@ EXPORT_SYMBOL(copy_to_iter);
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return 0;
+ }
iterate_and_advance(i, bytes, v,
__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
v.iov_len),
@@ -389,6 +572,10 @@ EXPORT_SYMBOL(copy_from_iter);
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return 0;
+ }
iterate_and_advance(i, bytes, v,
__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len),
@@ -409,14 +596,20 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
return wanted;
- } else
+ } else if (likely(!(i->type & ITER_PIPE)))
return copy_page_to_iter_iovec(page, offset, bytes, i);
+ else
+ return copy_page_to_iter_pipe(page, offset, bytes, i);
}
EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return 0;
+ }
if (i->type & (ITER_BVEC|ITER_KVEC)) {
void *kaddr = kmap_atomic(page);
size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
@@ -427,8 +620,34 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
}
EXPORT_SYMBOL(copy_page_from_iter);
+static size_t pipe_zero(size_t bytes, struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t n, off;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ bytes = n = push_pipe(i, bytes, &idx, &off);
+ if (unlikely(!n))
+ return 0;
+
+ for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+ size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ memzero_page(pipe->bufs[idx].page, off, chunk);
+ i->idx = idx;
+ i->iov_offset = off + chunk;
+ n -= chunk;
+ }
+ i->count -= bytes;
+ return bytes;
+}
+
size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{
+ if (unlikely(i->type & ITER_PIPE))
+ return pipe_zero(bytes, i);
iterate_and_advance(i, bytes, v,
__clear_user(v.iov_base, v.iov_len),
memzero_page(v.bv_page, v.bv_offset, v.bv_len),
@@ -443,6 +662,11 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
char *kaddr = kmap_atomic(page), *p = kaddr + offset;
+ if (unlikely(i->type & ITER_PIPE)) {
+ kunmap_atomic(kaddr);
+ WARN_ON(1);
+ return 0;
+ }
iterate_all_kinds(i, bytes, v,
__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len),
@@ -455,8 +679,49 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
}
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+static void pipe_advance(struct iov_iter *i, size_t size)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ struct pipe_buffer *buf;
+ int idx = i->idx;
+ size_t off = i->iov_offset;
+
+ if (unlikely(i->count < size))
+ size = i->count;
+
+ if (size) {
+ if (off) /* make it relative to the beginning of buffer */
+ size += off - pipe->bufs[idx].offset;
+ while (1) {
+ buf = &pipe->bufs[idx];
+ if (size <= buf->len)
+ break;
+ size -= buf->len;
+ idx = next_idx(idx, pipe);
+ }
+ buf->len = size;
+ i->idx = idx;
+ off = i->iov_offset = buf->offset + size;
+ }
+ if (off)
+ idx = next_idx(idx, pipe);
+ if (pipe->nrbufs) {
+ int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
+ /* [curbuf,unused) is in use. Free [idx,unused) */
+ while (idx != unused) {
+ pipe_buf_release(pipe, &pipe->bufs[idx]);
+ idx = next_idx(idx, pipe);
+ pipe->nrbufs--;
+ }
+ }
+}
+
void iov_iter_advance(struct iov_iter *i, size_t size)
{
+ if (unlikely(i->type & ITER_PIPE)) {
+ pipe_advance(i, size);
+ return;
+ }
iterate_and_advance(i, size, v, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -466,6 +731,8 @@ EXPORT_SYMBOL(iov_iter_advance);
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
+ if (unlikely(i->type & ITER_PIPE))
+ return i->count; // it is a silly place, anyway
if (i->nr_segs == 1)
return i->count;
else if (i->type & ITER_BVEC)
@@ -501,6 +768,19 @@ void iov_iter_bvec(struct iov_iter *i, int direction,
}
EXPORT_SYMBOL(iov_iter_bvec);
+void iov_iter_pipe(struct iov_iter *i, int direction,
+ struct pipe_inode_info *pipe,
+ size_t count)
+{
+ BUG_ON(direction != ITER_PIPE);
+ i->type = direction;
+ i->pipe = pipe;
+ i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
+ i->iov_offset = 0;
+ i->count = count;
+}
+EXPORT_SYMBOL(iov_iter_pipe);
+
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
unsigned long res = 0;
@@ -509,6 +789,11 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
if (!size)
return 0;
+ if (unlikely(i->type & ITER_PIPE)) {
+ if (i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
+ return size | i->iov_offset;
+ return size;
+ }
iterate_all_kinds(i, size, v,
(res |= (unsigned long)v.iov_base | v.iov_len, 0),
res |= v.bv_offset | v.bv_len,
@@ -525,6 +810,11 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
if (!size)
return 0;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return ~0U;
+ }
+
iterate_all_kinds(i, size, v,
(res |= (!res ? 0 : (unsigned long)v.iov_base) |
(size != v.iov_len ? size : 0), 0),
@@ -537,6 +827,47 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
}
EXPORT_SYMBOL(iov_iter_gap_alignment);
+static inline size_t __pipe_get_pages(struct iov_iter *i,
+ size_t maxsize,
+ struct page **pages,
+ int idx,
+ size_t *start)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t n = push_pipe(i, maxsize, &idx, start);
+ if (!n)
+ return -EFAULT;
+
+ maxsize = n;
+ n += *start;
+ while (n >= PAGE_SIZE) {
+ get_page(*pages++ = pipe->bufs[idx].page);
+ idx = next_idx(idx, pipe);
+ n -= PAGE_SIZE;
+ }
+
+ return maxsize;
+}
+
+static ssize_t pipe_get_pages(struct iov_iter *i,
+ struct page **pages, size_t maxsize, unsigned maxpages,
+ size_t *start)
+{
+ unsigned npages;
+ size_t capacity;
+ int idx;
+
+ if (!sanity(i))
+ return -EFAULT;
+
+ data_start(i, &idx, start);
+ /* some of this one + all after this one */
+ npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
+ capacity = min(npages,maxpages) * PAGE_SIZE - *start;
+
+ return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
+}
+
ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
@@ -547,6 +878,8 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (!maxsize)
return 0;
+ if (unlikely(i->type & ITER_PIPE))
+ return pipe_get_pages(i, pages, maxsize, maxpages, start);
iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base;
size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -582,6 +915,37 @@ static struct page **get_pages_array(size_t n)
return p;
}
+static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ size_t *start)
+{
+ struct page **p;
+ size_t n;
+ int idx;
+ int npages;
+
+ if (!sanity(i))
+ return -EFAULT;
+
+ data_start(i, &idx, start);
+ /* some of this one + all after this one */
+ npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
+ n = npages * PAGE_SIZE - *start;
+ if (maxsize > n)
+ maxsize = n;
+ else
+ npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
+ p = get_pages_array(npages);
+ if (!p)
+ return -ENOMEM;
+ n = __pipe_get_pages(i, maxsize, p, idx, start);
+ if (n > 0)
+ *pages = p;
+ else
+ kvfree(p);
+ return n;
+}
+
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
size_t *start)
@@ -594,6 +958,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
if (!maxsize)
return 0;
+ if (unlikely(i->type & ITER_PIPE))
+ return pipe_get_pages_alloc(i, pages, maxsize, start);
iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base;
size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -635,6 +1001,10 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
__wsum sum, next;
size_t off = 0;
sum = *csum;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return 0;
+ }
iterate_and_advance(i, bytes, v, ({
int err = 0;
next = csum_and_copy_from_user(v.iov_base,
@@ -673,6 +1043,10 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
__wsum sum, next;
size_t off = 0;
sum = *csum;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1); /* for now */
+ return 0;
+ }
iterate_and_advance(i, bytes, v, ({
int err = 0;
next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
@@ -712,7 +1086,20 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
if (!size)
return 0;
- iterate_all_kinds(i, size, v, ({
+ if (unlikely(i->type & ITER_PIPE)) {
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t off;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ data_start(i, &idx, &off);
+ /* some of this one + all after this one */
+ npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
+ if (npages >= maxpages)
+ return maxpages;
+ } else iterate_all_kinds(i, size, v, ({
unsigned long p = (unsigned long)v.iov_base;
npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- p / PAGE_SIZE;
@@ -737,6 +1124,10 @@ EXPORT_SYMBOL(iov_iter_npages);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
{
*new = *old;
+ if (unlikely(new->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return NULL;
+ }
if (new->type & ITER_BVEC)
return new->bvec = kmemdup(new->bvec,
new->nr_segs * sizeof(struct bio_vec),
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 26caf51cc238..75554754eadf 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -16,21 +16,23 @@
#include <linux/delay.h>
#include <linux/kprobes.h>
#include <linux/nmi.h>
+#include <linux/cpu.h>
-#ifdef arch_trigger_all_cpu_backtrace
+#ifdef arch_trigger_cpumask_backtrace
/* For reliability, we're prepared to waste bits here. */
static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
-/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+/* "in progress" flag of arch_trigger_cpumask_backtrace */
static unsigned long backtrace_flag;
/*
- * When raise() is called it will be is passed a pointer to the
+ * When raise() is called it will be passed a pointer to the
* backtrace_mask. Architectures that call nmi_cpu_backtrace()
* directly from their raise() functions may rely on the mask
* they are passed being updated as a side effect of this call.
*/
-void nmi_trigger_all_cpu_backtrace(bool include_self,
+void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
+ bool exclude_self,
void (*raise)(cpumask_t *mask))
{
int i, this_cpu = get_cpu();
@@ -44,13 +46,22 @@ void nmi_trigger_all_cpu_backtrace(bool include_self,
return;
}
- cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
- if (!include_self)
+ cpumask_copy(to_cpumask(backtrace_mask), mask);
+ if (exclude_self)
cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
+ /*
+ * Don't try to send an NMI to this cpu; it may work on some
+ * architectures, but on others it may not, and we'll get
+ * information at least as useful just by doing a dump_stack() here.
+ * Note that nmi_cpu_backtrace(NULL) will clear the cpu bit.
+ */
+ if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask)))
+ nmi_cpu_backtrace(NULL);
+
if (!cpumask_empty(to_cpumask(backtrace_mask))) {
- pr_info("Sending NMI to %s CPUs:\n",
- (include_self ? "all" : "other"));
+ pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n",
+ this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask));
raise(to_cpumask(backtrace_mask));
}
@@ -77,11 +88,16 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- pr_warn("NMI backtrace for cpu %d\n", cpu);
- if (regs)
- show_regs(regs);
- else
- dump_stack();
+ if (regs && cpu_in_idle(instruction_pointer(regs))) {
+ pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
+ cpu, instruction_pointer(regs));
+ } else {
+ pr_warn("NMI backtrace for cpu %d\n", cpu);
+ if (regs)
+ show_regs(regs);
+ else
+ dump_stack();
+ }
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 91f0727e3cad..8e6d552c40dd 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1583,15 +1583,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
}
EXPORT_SYMBOL(radix_tree_delete);
-struct radix_tree_node *radix_tree_replace_clear_tags(
- struct radix_tree_root *root,
- unsigned long index, void *entry)
+void radix_tree_clear_tags(struct radix_tree_root *root,
+ struct radix_tree_node *node,
+ void **slot)
{
- struct radix_tree_node *node;
- void **slot;
-
- __radix_tree_lookup(root, index, &node, &slot);
-
if (node) {
unsigned int tag, offset = get_slot_offset(node, slot);
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
@@ -1600,9 +1595,6 @@ struct radix_tree_node *radix_tree_replace_clear_tags(
/* Clear root node tags */
root->gfp_mask &= __GFP_BITS_MASK;
}
-
- radix_tree_replace_slot(slot, entry);
- return node;
}
/**
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
index 0a7e494b2bcd..f01b1cb04f91 100644
--- a/lib/raid6/.gitignore
+++ b/lib/raid6/.gitignore
@@ -3,3 +3,4 @@ altivec*.c
int*.c
tables.c
neon?.c
+s390vx?.c
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 3b10a48fa040..3057011f5599 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -3,10 +3,11 @@ obj-$(CONFIG_RAID6_PQ) += raid6_pq.o
raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
int8.o int16.o int32.o
-raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o
+raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o
raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o
raid6_pq-$(CONFIG_TILEGX) += tilegx8.o
+raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
hostprogs-y += mktables
@@ -116,6 +117,11 @@ $(obj)/tilegx8.c: UNROLL := 8
$(obj)/tilegx8.c: $(src)/tilegx.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
+targets += s390vx8.c
+$(obj)/s390vx8.c: UNROLL := 8
+$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
quiet_cmd_mktable = TABLE $@
cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 975c6e0434bd..7857049fd7d3 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -49,6 +49,10 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_avx2x1,
&raid6_avx2x2,
#endif
+#ifdef CONFIG_AS_AVX512
+ &raid6_avx512x1,
+ &raid6_avx512x2,
+#endif
#endif
#if defined(__x86_64__) && !defined(__arch_um__)
&raid6_sse2x1,
@@ -59,6 +63,11 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_avx2x2,
&raid6_avx2x4,
#endif
+#ifdef CONFIG_AS_AVX512
+ &raid6_avx512x1,
+ &raid6_avx512x2,
+ &raid6_avx512x4,
+#endif
#endif
#ifdef CONFIG_ALTIVEC
&raid6_altivec1,
@@ -69,6 +78,9 @@ const struct raid6_calls * const raid6_algos[] = {
#if defined(CONFIG_TILEGX)
&raid6_tilegx8,
#endif
+#if defined(CONFIG_S390)
+ &raid6_s390vx8,
+#endif
&raid6_intx1,
&raid6_intx2,
&raid6_intx4,
@@ -89,12 +101,18 @@ void (*raid6_datap_recov)(int, size_t, int, void **);
EXPORT_SYMBOL_GPL(raid6_datap_recov);
const struct raid6_recov_calls *const raid6_recov_algos[] = {
+#ifdef CONFIG_AS_AVX512
+ &raid6_recov_avx512,
+#endif
#ifdef CONFIG_AS_AVX2
&raid6_recov_avx2,
#endif
#ifdef CONFIG_AS_SSSE3
&raid6_recov_ssse3,
#endif
+#ifdef CONFIG_S390
+ &raid6_recov_s390xc,
+#endif
&raid6_recov_intx1,
NULL
};
diff --git a/lib/raid6/avx512.c b/lib/raid6/avx512.c
new file mode 100644
index 000000000000..f524a7972006
--- /dev/null
+++ b/lib/raid6/avx512.c
@@ -0,0 +1,569 @@
+/* -*- linux-c -*- --------------------------------------------------------
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Gayatri Kammela <gayatri.kammela@intel.com>
+ * Author: Megha Dey <megha.dey@linux.intel.com>
+ *
+ * Based on avx2.c: Copyright 2012 Yuanhan Liu All Rights Reserved
+ * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * -----------------------------------------------------------------------
+ */
+
+/*
+ * AVX512 implementation of RAID-6 syndrome functions
+ *
+ */
+
+#ifdef CONFIG_AS_AVX512
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+static const struct raid6_avx512_constants {
+ u64 x1d[8];
+} raid6_avx512_constants __aligned(512) = {
+ { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
+ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
+ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
+ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
+};
+
+static int raid6_have_avx512(void)
+{
+ return boot_cpu_has(X86_FEATURE_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX) &&
+ boot_cpu_has(X86_FEATURE_AVX512F) &&
+ boot_cpu_has(X86_FEATURE_AVX512BW) &&
+ boot_cpu_has(X86_FEATURE_AVX512VL) &&
+ boot_cpu_has(X86_FEATURE_AVX512DQ);
+}
+
+static void raid6_avx5121_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0\n\t"
+ "vpxorq %%zmm1,%%zmm1,%%zmm1" /* Zero temp */
+ :
+ : "m" (raid6_avx512_constants.x1d[0]));
+
+ for (d = 0; d < bytes; d += 64) {
+ asm volatile("prefetchnta %0\n\t"
+ "vmovdqa64 %0,%%zmm2\n\t" /* P[0] */
+ "prefetchnta %1\n\t"
+ "vmovdqa64 %%zmm2,%%zmm4\n\t" /* Q[0] */
+ "vmovdqa64 %1,%%zmm6"
+ :
+ : "m" (dptr[z0][d]), "m" (dptr[z0-1][d]));
+ for (z = z0-2; z >= 0; z--) {
+ asm volatile("prefetchnta %0\n\t"
+ "vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm6,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm6,%%zmm4,%%zmm4\n\t"
+ "vmovdqa64 %0,%%zmm6"
+ :
+ : "m" (dptr[z][d]));
+ }
+ asm volatile("vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm6,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm6,%%zmm4,%%zmm4\n\t"
+ "vmovntdq %%zmm2,%0\n\t"
+ "vpxorq %%zmm2,%%zmm2,%%zmm2\n\t"
+ "vmovntdq %%zmm4,%1\n\t"
+ "vpxorq %%zmm4,%%zmm4,%%zmm4"
+ :
+ : "m" (p[d]), "m" (q[d]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+static void raid6_avx5121_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0"
+ : : "m" (raid6_avx512_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 64) {
+ asm volatile("vmovdqa64 %0,%%zmm4\n\t"
+ "vmovdqa64 %1,%%zmm2\n\t"
+ "vpxorq %%zmm4,%%zmm2,%%zmm2"
+ :
+ : "m" (dptr[z0][d]), "m" (p[d]));
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vmovdqa64 %0,%%zmm5\n\t"
+ "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4"
+ :
+ : "m" (dptr[z][d]));
+ }
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4"
+ :
+ : );
+ }
+ asm volatile("vpxorq %0,%%zmm4,%%zmm4\n\t"
+ /* Don't use movntdq for r/w memory area < cache line */
+ "vmovdqa64 %%zmm4,%0\n\t"
+ "vmovdqa64 %%zmm2,%1"
+ :
+ : "m" (q[d]), "m" (p[d]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_avx512x1 = {
+ raid6_avx5121_gen_syndrome,
+ raid6_avx5121_xor_syndrome,
+ raid6_have_avx512,
+ "avx512x1",
+ 1 /* Has cache hints */
+};
+
+/*
+ * Unrolled-by-2 AVX512 implementation
+ */
+static void raid6_avx5122_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0\n\t"
+ "vpxorq %%zmm1,%%zmm1,%%zmm1" /* Zero temp */
+ :
+ : "m" (raid6_avx512_constants.x1d[0]));
+
+ /* We uniformly assume a single prefetch covers at least 64 bytes */
+ for (d = 0; d < bytes; d += 128) {
+ asm volatile("prefetchnta %0\n\t"
+ "prefetchnta %1\n\t"
+ "vmovdqa64 %0,%%zmm2\n\t" /* P[0] */
+ "vmovdqa64 %1,%%zmm3\n\t" /* P[1] */
+ "vmovdqa64 %%zmm2,%%zmm4\n\t" /* Q[0] */
+ "vmovdqa64 %%zmm3,%%zmm6" /* Q[1] */
+ :
+ : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]));
+ for (z = z0-1; z >= 0; z--) {
+ asm volatile("prefetchnta %0\n\t"
+ "prefetchnta %1\n\t"
+ "vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm1,%%k2\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vmovdqa64 %0,%%zmm5\n\t"
+ "vmovdqa64 %1,%%zmm7\n\t"
+ "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6"
+ :
+ : "m" (dptr[z][d]), "m" (dptr[z][d+64]));
+ }
+ asm volatile("vmovntdq %%zmm2,%0\n\t"
+ "vmovntdq %%zmm3,%1\n\t"
+ "vmovntdq %%zmm4,%2\n\t"
+ "vmovntdq %%zmm6,%3"
+ :
+ : "m" (p[d]), "m" (p[d+64]), "m" (q[d]),
+ "m" (q[d+64]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+static void raid6_avx5122_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0"
+ : : "m" (raid6_avx512_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 128) {
+ asm volatile("vmovdqa64 %0,%%zmm4\n\t"
+ "vmovdqa64 %1,%%zmm6\n\t"
+ "vmovdqa64 %2,%%zmm2\n\t"
+ "vmovdqa64 %3,%%zmm3\n\t"
+ "vpxorq %%zmm4,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm6,%%zmm3,%%zmm3"
+ :
+ : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]),
+ "m" (p[d]), "m" (p[d+64]));
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vmovdqa64 %0,%%zmm5\n\t"
+ "vmovdqa64 %1,%%zmm7\n\t"
+ "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6"
+ :
+ : "m" (dptr[z][d]), "m" (dptr[z][d+64]));
+ }
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6"
+ :
+ : );
+ }
+ asm volatile("vpxorq %0,%%zmm4,%%zmm4\n\t"
+ "vpxorq %1,%%zmm6,%%zmm6\n\t"
+ /* Don't use movntdq for r/w
+ * memory area < cache line
+ */
+ "vmovdqa64 %%zmm4,%0\n\t"
+ "vmovdqa64 %%zmm6,%1\n\t"
+ "vmovdqa64 %%zmm2,%2\n\t"
+ "vmovdqa64 %%zmm3,%3"
+ :
+ : "m" (q[d]), "m" (q[d+64]), "m" (p[d]),
+ "m" (p[d+64]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_avx512x2 = {
+ raid6_avx5122_gen_syndrome,
+ raid6_avx5122_xor_syndrome,
+ raid6_have_avx512,
+ "avx512x2",
+ 1 /* Has cache hints */
+};
+
+#ifdef CONFIG_X86_64
+
+/*
+ * Unrolled-by-4 AVX2 implementation
+ */
+static void raid6_avx5124_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0\n\t"
+ "vpxorq %%zmm1,%%zmm1,%%zmm1\n\t" /* Zero temp */
+ "vpxorq %%zmm2,%%zmm2,%%zmm2\n\t" /* P[0] */
+ "vpxorq %%zmm3,%%zmm3,%%zmm3\n\t" /* P[1] */
+ "vpxorq %%zmm4,%%zmm4,%%zmm4\n\t" /* Q[0] */
+ "vpxorq %%zmm6,%%zmm6,%%zmm6\n\t" /* Q[1] */
+ "vpxorq %%zmm10,%%zmm10,%%zmm10\n\t" /* P[2] */
+ "vpxorq %%zmm11,%%zmm11,%%zmm11\n\t" /* P[3] */
+ "vpxorq %%zmm12,%%zmm12,%%zmm12\n\t" /* Q[2] */
+ "vpxorq %%zmm14,%%zmm14,%%zmm14" /* Q[3] */
+ :
+ : "m" (raid6_avx512_constants.x1d[0]));
+
+ for (d = 0; d < bytes; d += 256) {
+ for (z = z0; z >= 0; z--) {
+ asm volatile("prefetchnta %0\n\t"
+ "prefetchnta %1\n\t"
+ "prefetchnta %2\n\t"
+ "prefetchnta %3\n\t"
+ "vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm1,%%k2\n\t"
+ "vpcmpgtb %%zmm12,%%zmm1,%%k3\n\t"
+ "vpcmpgtb %%zmm14,%%zmm1,%%k4\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpmovm2b %%k3,%%zmm13\n\t"
+ "vpmovm2b %%k4,%%zmm15\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
+ "vpaddb %%zmm14,%%zmm14,%%zmm14\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
+ "vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
+ "vpxorq %%zmm15,%%zmm14,%%zmm14\n\t"
+ "vmovdqa64 %0,%%zmm5\n\t"
+ "vmovdqa64 %1,%%zmm7\n\t"
+ "vmovdqa64 %2,%%zmm13\n\t"
+ "vmovdqa64 %3,%%zmm15\n\t"
+ "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
+ "vpxorq %%zmm13,%%zmm10,%%zmm10\n\t"
+ "vpxorq %%zmm15,%%zmm11,%%zmm11\n"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
+ "vpxorq %%zmm15,%%zmm14,%%zmm14"
+ :
+ : "m" (dptr[z][d]), "m" (dptr[z][d+64]),
+ "m" (dptr[z][d+128]), "m" (dptr[z][d+192]));
+ }
+ asm volatile("vmovntdq %%zmm2,%0\n\t"
+ "vpxorq %%zmm2,%%zmm2,%%zmm2\n\t"
+ "vmovntdq %%zmm3,%1\n\t"
+ "vpxorq %%zmm3,%%zmm3,%%zmm3\n\t"
+ "vmovntdq %%zmm10,%2\n\t"
+ "vpxorq %%zmm10,%%zmm10,%%zmm10\n\t"
+ "vmovntdq %%zmm11,%3\n\t"
+ "vpxorq %%zmm11,%%zmm11,%%zmm11\n\t"
+ "vmovntdq %%zmm4,%4\n\t"
+ "vpxorq %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vmovntdq %%zmm6,%5\n\t"
+ "vpxorq %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vmovntdq %%zmm12,%6\n\t"
+ "vpxorq %%zmm12,%%zmm12,%%zmm12\n\t"
+ "vmovntdq %%zmm14,%7\n\t"
+ "vpxorq %%zmm14,%%zmm14,%%zmm14"
+ :
+ : "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
+ "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
+ "m" (q[d+128]), "m" (q[d+192]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+static void raid6_avx5124_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0"
+ :: "m" (raid6_avx512_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 256) {
+ asm volatile("vmovdqa64 %0,%%zmm4\n\t"
+ "vmovdqa64 %1,%%zmm6\n\t"
+ "vmovdqa64 %2,%%zmm12\n\t"
+ "vmovdqa64 %3,%%zmm14\n\t"
+ "vmovdqa64 %4,%%zmm2\n\t"
+ "vmovdqa64 %5,%%zmm3\n\t"
+ "vmovdqa64 %6,%%zmm10\n\t"
+ "vmovdqa64 %7,%%zmm11\n\t"
+ "vpxorq %%zmm4,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm6,%%zmm3,%%zmm3\n\t"
+ "vpxorq %%zmm12,%%zmm10,%%zmm10\n\t"
+ "vpxorq %%zmm14,%%zmm11,%%zmm11"
+ :
+ : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]),
+ "m" (dptr[z0][d+128]), "m" (dptr[z0][d+192]),
+ "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
+ "m" (p[d+192]));
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
+ "vpxorq %%zmm13,%%zmm13,%%zmm13\n\t"
+ "vpxorq %%zmm15,%%zmm15,%%zmm15\n\t"
+ "prefetchnta %0\n\t"
+ "prefetchnta %2\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
+ "vpcmpgtb %%zmm12,%%zmm13,%%k3\n\t"
+ "vpcmpgtb %%zmm14,%%zmm15,%%k4\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpmovm2b %%k3,%%zmm13\n\t"
+ "vpmovm2b %%k4,%%zmm15\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
+ "vpaddb %%Zmm14,%%zmm14,%%zmm14\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
+ "vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
+ "vpxorq %%zmm15,%%zmm14,%%zmm14\n\t"
+ "vmovdqa64 %0,%%zmm5\n\t"
+ "vmovdqa64 %1,%%zmm7\n\t"
+ "vmovdqa64 %2,%%zmm13\n\t"
+ "vmovdqa64 %3,%%zmm15\n\t"
+ "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
+ "vpxorq %%zmm13,%%zmm10,%%zmm10\n\t"
+ "vpxorq %%zmm15,%%zmm11,%%zmm11\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
+ "vpxorq %%zmm15,%%zmm14,%%zmm14"
+ :
+ : "m" (dptr[z][d]), "m" (dptr[z][d+64]),
+ "m" (dptr[z][d+128]),
+ "m" (dptr[z][d+192]));
+ }
+ asm volatile("prefetchnta %0\n\t"
+ "prefetchnta %1\n\t"
+ :
+ : "m" (q[d]), "m" (q[d+128]));
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
+ "vpxorq %%zmm13,%%zmm13,%%zmm13\n\t"
+ "vpxorq %%zmm15,%%zmm15,%%zmm15\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
+ "vpcmpgtb %%zmm12,%%zmm13,%%k3\n\t"
+ "vpcmpgtb %%zmm14,%%zmm15,%%k4\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpmovm2b %%k3,%%zmm13\n\t"
+ "vpmovm2b %%k4,%%zmm15\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
+ "vpaddb %%zmm14,%%zmm14,%%zmm14\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
+ "vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
+ "vpxorq %%zmm15,%%zmm14,%%zmm14"
+ :
+ : );
+ }
+ asm volatile("vmovntdq %%zmm2,%0\n\t"
+ "vmovntdq %%zmm3,%1\n\t"
+ "vmovntdq %%zmm10,%2\n\t"
+ "vmovntdq %%zmm11,%3\n\t"
+ "vpxorq %4,%%zmm4,%%zmm4\n\t"
+ "vpxorq %5,%%zmm6,%%zmm6\n\t"
+ "vpxorq %6,%%zmm12,%%zmm12\n\t"
+ "vpxorq %7,%%zmm14,%%zmm14\n\t"
+ "vmovntdq %%zmm4,%4\n\t"
+ "vmovntdq %%zmm6,%5\n\t"
+ "vmovntdq %%zmm12,%6\n\t"
+ "vmovntdq %%zmm14,%7"
+ :
+ : "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
+ "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
+ "m" (q[d+128]), "m" (q[d+192]));
+ }
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+const struct raid6_calls raid6_avx512x4 = {
+ raid6_avx5124_gen_syndrome,
+ raid6_avx5124_xor_syndrome,
+ raid6_have_avx512,
+ "avx512x4",
+ 1 /* Has cache hints */
+};
+#endif
+
+#endif /* CONFIG_AS_AVX512 */
diff --git a/lib/raid6/recov_avx512.c b/lib/raid6/recov_avx512.c
new file mode 100644
index 000000000000..625aafa33b61
--- /dev/null
+++ b/lib/raid6/recov_avx512.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Gayatri Kammela <gayatri.kammela@intel.com>
+ * Author: Megha Dey <megha.dey@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ */
+
+#ifdef CONFIG_AS_AVX512
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+static int raid6_has_avx512(void)
+{
+ return boot_cpu_has(X86_FEATURE_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX) &&
+ boot_cpu_has(X86_FEATURE_AVX512F) &&
+ boot_cpu_has(X86_FEATURE_AVX512BW) &&
+ boot_cpu_has(X86_FEATURE_AVX512VL) &&
+ boot_cpu_has(X86_FEATURE_AVX512DQ);
+}
+
+static void raid6_2data_recov_avx512(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+ const u8 x0f = 0x0f;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks-2] = p;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
+ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /* zmm0 = x0f[16] */
+ asm volatile("vpbroadcastb %0, %%zmm7" : : "m" (x0f));
+
+ while (bytes) {
+#ifdef CONFIG_X86_64
+ asm volatile("vmovdqa64 %0, %%zmm1\n\t"
+ "vmovdqa64 %1, %%zmm9\n\t"
+ "vmovdqa64 %2, %%zmm0\n\t"
+ "vmovdqa64 %3, %%zmm8\n\t"
+ "vpxorq %4, %%zmm1, %%zmm1\n\t"
+ "vpxorq %5, %%zmm9, %%zmm9\n\t"
+ "vpxorq %6, %%zmm0, %%zmm0\n\t"
+ "vpxorq %7, %%zmm8, %%zmm8"
+ :
+ : "m" (q[0]), "m" (q[64]), "m" (p[0]),
+ "m" (p[64]), "m" (dq[0]), "m" (dq[64]),
+ "m" (dp[0]), "m" (dp[64]));
+
+ /*
+ * 1 = dq[0] ^ q[0]
+ * 9 = dq[64] ^ q[64]
+ * 0 = dp[0] ^ p[0]
+ * 8 = dp[64] ^ p[64]
+ */
+
+ asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
+ "vbroadcasti64x2 %1, %%zmm5"
+ :
+ : "m" (qmul[0]), "m" (qmul[16]));
+
+ asm volatile("vpsraw $4, %%zmm1, %%zmm3\n\t"
+ "vpsraw $4, %%zmm9, %%zmm12\n\t"
+ "vpandq %%zmm7, %%zmm1, %%zmm1\n\t"
+ "vpandq %%zmm7, %%zmm9, %%zmm9\n\t"
+ "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm12, %%zmm12\n\t"
+ "vpshufb %%zmm9, %%zmm4, %%zmm14\n\t"
+ "vpshufb %%zmm1, %%zmm4, %%zmm4\n\t"
+ "vpshufb %%zmm12, %%zmm5, %%zmm15\n\t"
+ "vpshufb %%zmm3, %%zmm5, %%zmm5\n\t"
+ "vpxorq %%zmm14, %%zmm15, %%zmm15\n\t"
+ "vpxorq %%zmm4, %%zmm5, %%zmm5"
+ :
+ : );
+
+ /*
+ * 5 = qx[0]
+ * 15 = qx[64]
+ */
+
+ asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
+ "vbroadcasti64x2 %1, %%zmm1\n\t"
+ "vpsraw $4, %%zmm0, %%zmm2\n\t"
+ "vpsraw $4, %%zmm8, %%zmm6\n\t"
+ "vpandq %%zmm7, %%zmm0, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm8, %%zmm14\n\t"
+ "vpandq %%zmm7, %%zmm2, %%zmm2\n\t"
+ "vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
+ "vpshufb %%zmm14, %%zmm4, %%zmm12\n\t"
+ "vpshufb %%zmm3, %%zmm4, %%zmm4\n\t"
+ "vpshufb %%zmm6, %%zmm1, %%zmm13\n\t"
+ "vpshufb %%zmm2, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm4, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm12, %%zmm13, %%zmm13"
+ :
+ : "m" (pbmul[0]), "m" (pbmul[16]));
+
+ /*
+ * 1 = pbmul[px[0]]
+ * 13 = pbmul[px[64]]
+ */
+ asm volatile("vpxorq %%zmm5, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm15, %%zmm13, %%zmm13"
+ :
+ : );
+
+ /*
+ * 1 = db = DQ
+ * 13 = db[64] = DQ[64]
+ */
+ asm volatile("vmovdqa64 %%zmm1, %0\n\t"
+ "vmovdqa64 %%zmm13,%1\n\t"
+ "vpxorq %%zmm1, %%zmm0, %%zmm0\n\t"
+ "vpxorq %%zmm13, %%zmm8, %%zmm8"
+ :
+ : "m" (dq[0]), "m" (dq[64]));
+
+ asm volatile("vmovdqa64 %%zmm0, %0\n\t"
+ "vmovdqa64 %%zmm8, %1"
+ :
+ : "m" (dp[0]), "m" (dp[64]));
+
+ bytes -= 128;
+ p += 128;
+ q += 128;
+ dp += 128;
+ dq += 128;
+#else
+ asm volatile("vmovdqa64 %0, %%zmm1\n\t"
+ "vmovdqa64 %1, %%zmm0\n\t"
+ "vpxorq %2, %%zmm1, %%zmm1\n\t"
+ "vpxorq %3, %%zmm0, %%zmm0"
+ :
+ : "m" (*q), "m" (*p), "m"(*dq), "m" (*dp));
+
+ /* 1 = dq ^ q; 0 = dp ^ p */
+
+ asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
+ "vbroadcasti64x2 %1, %%zmm5"
+ :
+ : "m" (qmul[0]), "m" (qmul[16]));
+
+ /*
+ * 1 = dq ^ q
+ * 3 = dq ^ p >> 4
+ */
+ asm volatile("vpsraw $4, %%zmm1, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm1, %%zmm1\n\t"
+ "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
+ "vpshufb %%zmm1, %%zmm4, %%zmm4\n\t"
+ "vpshufb %%zmm3, %%zmm5, %%zmm5\n\t"
+ "vpxorq %%zmm4, %%zmm5, %%zmm5"
+ :
+ : );
+
+ /* 5 = qx */
+
+ asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
+ "vbroadcasti64x2 %1, %%zmm1"
+ :
+ : "m" (pbmul[0]), "m" (pbmul[16]));
+
+ asm volatile("vpsraw $4, %%zmm0, %%zmm2\n\t"
+ "vpandq %%zmm7, %%zmm0, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm2, %%zmm2\n\t"
+ "vpshufb %%zmm3, %%zmm4, %%zmm4\n\t"
+ "vpshufb %%zmm2, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm4, %%zmm1, %%zmm1"
+ :
+ : );
+
+ /* 1 = pbmul[px] */
+ asm volatile("vpxorq %%zmm5, %%zmm1, %%zmm1\n\t"
+ /* 1 = db = DQ */
+ "vmovdqa64 %%zmm1, %0\n\t"
+ :
+ : "m" (dq[0]));
+
+ asm volatile("vpxorq %%zmm1, %%zmm0, %%zmm0\n\t"
+ "vmovdqa64 %%zmm0, %0"
+ :
+ : "m" (dp[0]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+#endif
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_avx512(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+ const u8 x0f = 0x0f;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ asm volatile("vpbroadcastb %0, %%zmm7" : : "m" (x0f));
+
+ while (bytes) {
+#ifdef CONFIG_X86_64
+ asm volatile("vmovdqa64 %0, %%zmm3\n\t"
+ "vmovdqa64 %1, %%zmm8\n\t"
+ "vpxorq %2, %%zmm3, %%zmm3\n\t"
+ "vpxorq %3, %%zmm8, %%zmm8"
+ :
+ : "m" (dq[0]), "m" (dq[64]), "m" (q[0]),
+ "m" (q[64]));
+
+ /*
+ * 3 = q[0] ^ dq[0]
+ * 8 = q[64] ^ dq[64]
+ */
+ asm volatile("vbroadcasti64x2 %0, %%zmm0\n\t"
+ "vmovapd %%zmm0, %%zmm13\n\t"
+ "vbroadcasti64x2 %1, %%zmm1\n\t"
+ "vmovapd %%zmm1, %%zmm14"
+ :
+ : "m" (qmul[0]), "m" (qmul[16]));
+
+ asm volatile("vpsraw $4, %%zmm3, %%zmm6\n\t"
+ "vpsraw $4, %%zmm8, %%zmm12\n\t"
+ "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm8, %%zmm8\n\t"
+ "vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
+ "vpandq %%zmm7, %%zmm12, %%zmm12\n\t"
+ "vpshufb %%zmm3, %%zmm0, %%zmm0\n\t"
+ "vpshufb %%zmm8, %%zmm13, %%zmm13\n\t"
+ "vpshufb %%zmm6, %%zmm1, %%zmm1\n\t"
+ "vpshufb %%zmm12, %%zmm14, %%zmm14\n\t"
+ "vpxorq %%zmm0, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm13, %%zmm14, %%zmm14"
+ :
+ : );
+
+ /*
+ * 1 = qmul[q[0] ^ dq[0]]
+ * 14 = qmul[q[64] ^ dq[64]]
+ */
+ asm volatile("vmovdqa64 %0, %%zmm2\n\t"
+ "vmovdqa64 %1, %%zmm12\n\t"
+ "vpxorq %%zmm1, %%zmm2, %%zmm2\n\t"
+ "vpxorq %%zmm14, %%zmm12, %%zmm12"
+ :
+ : "m" (p[0]), "m" (p[64]));
+
+ /*
+ * 2 = p[0] ^ qmul[q[0] ^ dq[0]]
+ * 12 = p[64] ^ qmul[q[64] ^ dq[64]]
+ */
+
+ asm volatile("vmovdqa64 %%zmm1, %0\n\t"
+ "vmovdqa64 %%zmm14, %1\n\t"
+ "vmovdqa64 %%zmm2, %2\n\t"
+ "vmovdqa64 %%zmm12,%3"
+ :
+ : "m" (dq[0]), "m" (dq[64]), "m" (p[0]),
+ "m" (p[64]));
+
+ bytes -= 128;
+ p += 128;
+ q += 128;
+ dq += 128;
+#else
+ asm volatile("vmovdqa64 %0, %%zmm3\n\t"
+ "vpxorq %1, %%zmm3, %%zmm3"
+ :
+ : "m" (dq[0]), "m" (q[0]));
+
+ /* 3 = q ^ dq */
+
+ asm volatile("vbroadcasti64x2 %0, %%zmm0\n\t"
+ "vbroadcasti64x2 %1, %%zmm1"
+ :
+ : "m" (qmul[0]), "m" (qmul[16]));
+
+ asm volatile("vpsraw $4, %%zmm3, %%zmm6\n\t"
+ "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
+ "vpshufb %%zmm3, %%zmm0, %%zmm0\n\t"
+ "vpshufb %%zmm6, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm0, %%zmm1, %%zmm1"
+ :
+ : );
+
+ /* 1 = qmul[q ^ dq] */
+
+ asm volatile("vmovdqa64 %0, %%zmm2\n\t"
+ "vpxorq %%zmm1, %%zmm2, %%zmm2"
+ :
+ : "m" (p[0]));
+
+ /* 2 = p ^ qmul[q ^ dq] */
+
+ asm volatile("vmovdqa64 %%zmm1, %0\n\t"
+ "vmovdqa64 %%zmm2, %1"
+ :
+ : "m" (dq[0]), "m" (p[0]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+#endif
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_avx512 = {
+ .data2 = raid6_2data_recov_avx512,
+ .datap = raid6_datap_recov_avx512,
+ .valid = raid6_has_avx512,
+#ifdef CONFIG_X86_64
+ .name = "avx512x2",
+#else
+ .name = "avx512x1",
+#endif
+ .priority = 3,
+};
+
+#else
+#warning "your version of binutils lacks AVX512 support"
+#endif
diff --git a/lib/raid6/recov_s390xc.c b/lib/raid6/recov_s390xc.c
new file mode 100644
index 000000000000..b042dac826cc
--- /dev/null
+++ b/lib/raid6/recov_s390xc.c
@@ -0,0 +1,116 @@
+/*
+ * RAID-6 data recovery in dual failure mode based on the XC instruction.
+ *
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/export.h>
+#include <linux/raid/pq.h>
+
+static inline void xor_block(u8 *p1, u8 *p2)
+{
+ typedef struct { u8 _[256]; } addrtype;
+
+ asm volatile(
+ " xc 0(256,%[p1]),0(%[p2])\n"
+ : "+m" (*(addrtype *) p1) : "m" (*(addrtype *) p2),
+ [p1] "a" (p1), [p2] "a" (p2) : "cc");
+}
+
+/* Recover two failed data blocks. */
+static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+ int i;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data pages
+ Use the dead data pages as temporary storage for
+ delta p and delta q */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks-2] = p;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
+ qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
+
+ /* Now do it... */
+ while (bytes) {
+ xor_block(dp, p);
+ xor_block(dq, q);
+ for (i = 0; i < 256; i++)
+ dq[i] = pbmul[dp[i]] ^ qmul[dq[i]];
+ xor_block(dp, dq);
+ p += 256;
+ q += 256;
+ dp += 256;
+ dq += 256;
+ bytes -= 256;
+ }
+}
+
+/* Recover failure of one data block plus the P block */
+static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+ int i;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data page
+ Use the dead data page as temporary storage for delta q */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ /* Now do it... */
+ while (bytes) {
+ xor_block(dq, q);
+ for (i = 0; i < 256; i++)
+ dq[i] = qmul[dq[i]];
+ xor_block(p, dq);
+ p += 256;
+ q += 256;
+ dq += 256;
+ bytes -= 256;
+ }
+}
+
+
+const struct raid6_recov_calls raid6_recov_s390xc = {
+ .data2 = raid6_2data_recov_s390xc,
+ .datap = raid6_datap_recov_s390xc,
+ .valid = NULL,
+ .name = "s390xc",
+ .priority = 1,
+};
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
new file mode 100644
index 000000000000..7b45191a655f
--- /dev/null
+++ b/lib/raid6/s390vx.uc
@@ -0,0 +1,168 @@
+/*
+ * raid6_vx$#.c
+ *
+ * $#-way unrolled RAID6 gen/xor functions for s390
+ * based on the vector facility
+ *
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This file is postprocessed using unroll.awk.
+ */
+
+#include <linux/raid/pq.h>
+#include <asm/fpu/api.h>
+
+asm(".include \"asm/vx-insn.h\"\n");
+
+#define NSIZE 16
+
+static inline void LOAD_CONST(void)
+{
+ asm volatile("VREPIB %v24,7");
+ asm volatile("VREPIB %v25,0x1d");
+}
+
+/*
+ * The SHLBYTE() operation shifts each of the 16 bytes in
+ * vector register y left by 1 bit and stores the result in
+ * vector register x.
+ */
+static inline void SHLBYTE(int x, int y)
+{
+ asm volatile ("VAB %0,%1,%1" : : "i" (x), "i" (y));
+}
+
+/*
+ * For each of the 16 bytes in the vector register y the MASK()
+ * operation returns 0xFF if the high bit of the byte is 1,
+ * or 0x00 if the high bit is 0. The result is stored in vector
+ * register x.
+ */
+static inline void MASK(int x, int y)
+{
+ asm volatile ("VESRAVB %0,%1,24" : : "i" (x), "i" (y));
+}
+
+static inline void AND(int x, int y, int z)
+{
+ asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
+}
+
+static inline void XOR(int x, int y, int z)
+{
+ asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
+}
+
+static inline void LOAD_DATA(int x, int n, u8 *ptr)
+{
+ typedef struct { u8 _[16*n]; } addrtype;
+ register addrtype *__ptr asm("1") = (addrtype *) ptr;
+
+ asm volatile ("VLM %2,%3,0,%r1"
+ : : "m" (*__ptr), "a" (__ptr), "i" (x), "i" (x + n - 1));
+}
+
+static inline void STORE_DATA(int x, int n, u8 *ptr)
+{
+ typedef struct { u8 _[16*n]; } addrtype;
+ register addrtype *__ptr asm("1") = (addrtype *) ptr;
+
+ asm volatile ("VSTM %2,%3,0,1"
+ : "=m" (*__ptr) : "a" (__ptr), "i" (x), "i" (x + n - 1));
+}
+
+static inline void COPY_VEC(int x, int y)
+{
+ asm volatile ("VLR %0,%1" : : "i" (x), "i" (y));
+}
+
+static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ struct kernel_fpu vxstate;
+ u8 **dptr, *p, *q;
+ int d, z, z0;
+
+ kernel_fpu_begin(&vxstate, KERNEL_VXR);
+ LOAD_CONST();
+
+ dptr = (u8 **) ptrs;
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
+
+ for (d = 0; d < bytes; d += $#*NSIZE) {
+ LOAD_DATA(0,$#,&dptr[z0][d]);
+ COPY_VEC(8+$$,0+$$);
+ for (z = z0 - 1; z >= 0; z--) {
+ MASK(16+$$,8+$$);
+ AND(16+$$,16+$$,25);
+ SHLBYTE(8+$$,8+$$);
+ XOR(8+$$,8+$$,16+$$);
+ LOAD_DATA(16,$#,&dptr[z][d]);
+ XOR(0+$$,0+$$,16+$$);
+ XOR(8+$$,8+$$,16+$$);
+ }
+ STORE_DATA(0,$#,&p[d]);
+ STORE_DATA(8,$#,&q[d]);
+ }
+ kernel_fpu_end(&vxstate, KERNEL_VXR);
+}
+
+static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ struct kernel_fpu vxstate;
+ u8 **dptr, *p, *q;
+ int d, z, z0;
+
+ dptr = (u8 **) ptrs;
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
+
+ kernel_fpu_begin(&vxstate, KERNEL_VXR);
+ LOAD_CONST();
+
+ for (d = 0; d < bytes; d += $#*NSIZE) {
+ /* P/Q data pages */
+ LOAD_DATA(0,$#,&dptr[z0][d]);
+ COPY_VEC(8+$$,0+$$);
+ for (z = z0 - 1; z >= start; z--) {
+ MASK(16+$$,8+$$);
+ AND(16+$$,16+$$,25);
+ SHLBYTE(8+$$,8+$$);
+ XOR(8+$$,8+$$,16+$$);
+ LOAD_DATA(16,$#,&dptr[z][d]);
+ XOR(0+$$,0+$$,16+$$);
+ XOR(8+$$,8+$$,16+$$);
+ }
+ /* P/Q left side optimization */
+ for (z = start - 1; z >= 0; z--) {
+ MASK(16+$$,8+$$);
+ AND(16+$$,16+$$,25);
+ SHLBYTE(8+$$,8+$$);
+ XOR(8+$$,8+$$,16+$$);
+ }
+ LOAD_DATA(16,$#,&p[d]);
+ XOR(16+$$,16+$$,0+$$);
+ STORE_DATA(16,$#,&p[d]);
+ LOAD_DATA(16,$#,&q[d]);
+ XOR(16+$$,16+$$,8+$$);
+ STORE_DATA(16,$#,&q[d]);
+ }
+ kernel_fpu_end(&vxstate, KERNEL_VXR);
+}
+
+static int raid6_s390vx$#_valid(void)
+{
+ return MACHINE_HAS_VX;
+}
+
+const struct raid6_calls raid6_s390vx$# = {
+ raid6_s390vx$#_gen_syndrome,
+ raid6_s390vx$#_xor_syndrome,
+ raid6_s390vx$#_valid,
+ "vx128x$#",
+ 1
+};
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 29090f3db677..2c7b60edea04 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -32,10 +32,13 @@ ifeq ($(ARCH),arm64)
endif
ifeq ($(IS_X86),yes)
- OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o
+ OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_AVX2=1)
+ CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \
+ gcc -c -x assembler - >&/dev/null && \
+ rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
OBJS += neon.o neon1.o neon2.o neon4.o neon8.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c
index 3bebbabdb510..b07f4d8e6b03 100644
--- a/lib/raid6/test/test.c
+++ b/lib/raid6/test/test.c
@@ -21,12 +21,13 @@
#define NDISKS 16 /* Including P and Q */
-const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
+const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
struct raid6_calls raid6_call;
char *dataptrs[NDISKS];
-char data[NDISKS][PAGE_SIZE];
-char recovi[PAGE_SIZE], recovj[PAGE_SIZE];
+char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+char recovi[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+char recovj[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
static void makedata(int start, int stop)
{
diff --git a/lib/raid6/x86.h b/lib/raid6/x86.h
index 8fe9d9662abb..834d268a4b05 100644
--- a/lib/raid6/x86.h
+++ b/lib/raid6/x86.h
@@ -46,6 +46,16 @@ static inline void kernel_fpu_end(void)
#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_AVX512DQ (9*32+17) /* AVX-512 DQ (Double/Quad granular)
+ * Instructions
+ */
+#define X86_FEATURE_AVX512BW (9*32+30) /* AVX-512 BW (Byte/Word granular)
+ * Instructions
+ */
+#define X86_FEATURE_AVX512VL (9*32+31) /* AVX-512 VL (128/256 Vector Length)
+ * Extensions
+ */
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
/* Should work well enough on modern CPUs for testing */
diff --git a/lib/random32.c b/lib/random32.c
index 69ed593aab07..915982b304bb 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -81,7 +81,7 @@ u32 prandom_u32(void)
u32 res;
res = prandom_u32_state(state);
- put_cpu_var(state);
+ put_cpu_var(net_rand_state);
return res;
}
@@ -128,7 +128,7 @@ void prandom_bytes(void *buf, size_t bytes)
struct rnd_state *state = &get_cpu_var(net_rand_state);
prandom_bytes_state(state, buf, bytes);
- put_cpu_var(state);
+ put_cpu_var(net_rand_state);
}
EXPORT_SYMBOL(prandom_bytes);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 56054e541a0f..32d0ad058380 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -378,22 +378,8 @@ static void rht_deferred_worker(struct work_struct *work)
schedule_work(&ht->run_work);
}
-static bool rhashtable_check_elasticity(struct rhashtable *ht,
- struct bucket_table *tbl,
- unsigned int hash)
-{
- unsigned int elasticity = ht->elasticity;
- struct rhash_head *head;
-
- rht_for_each(head, tbl, hash)
- if (!--elasticity)
- return true;
-
- return false;
-}
-
-int rhashtable_insert_rehash(struct rhashtable *ht,
- struct bucket_table *tbl)
+static int rhashtable_insert_rehash(struct rhashtable *ht,
+ struct bucket_table *tbl)
{
struct bucket_table *old_tbl;
struct bucket_table *new_tbl;
@@ -439,61 +425,172 @@ fail:
return err;
}
-EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
-struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
- const void *key,
- struct rhash_head *obj,
- struct bucket_table *tbl)
+static void *rhashtable_lookup_one(struct rhashtable *ht,
+ struct bucket_table *tbl, unsigned int hash,
+ const void *key, struct rhash_head *obj)
{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ struct rhash_head __rcu **pprev;
struct rhash_head *head;
- unsigned int hash;
- int err;
+ int elasticity;
- tbl = rhashtable_last_table(ht, tbl);
- hash = head_hashfn(ht, tbl, obj);
- spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
+ elasticity = ht->elasticity;
+ pprev = &tbl->buckets[hash];
+ rht_for_each(head, tbl, hash) {
+ struct rhlist_head *list;
+ struct rhlist_head *plist;
- err = -EEXIST;
- if (key && rhashtable_lookup_fast(ht, key, ht->p))
- goto exit;
+ elasticity--;
+ if (!key ||
+ (ht->p.obj_cmpfn ?
+ ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head))))
+ continue;
- err = -E2BIG;
- if (unlikely(rht_grow_above_max(ht, tbl)))
- goto exit;
+ if (!ht->rhlist)
+ return rht_obj(ht, head);
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ plist = container_of(head, struct rhlist_head, rhead);
+
+ RCU_INIT_POINTER(list->next, plist);
+ head = rht_dereference_bucket(head->next, tbl, hash);
+ RCU_INIT_POINTER(list->rhead.next, head);
+ rcu_assign_pointer(*pprev, obj);
+
+ return NULL;
+ }
+
+ if (elasticity <= 0)
+ return ERR_PTR(-EAGAIN);
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ unsigned int hash,
+ struct rhash_head *obj,
+ void *data)
+{
+ struct bucket_table *new_tbl;
+ struct rhash_head *head;
+
+ if (!IS_ERR_OR_NULL(data))
+ return ERR_PTR(-EEXIST);
+
+ if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
+ return ERR_CAST(data);
- err = -EAGAIN;
- if (rhashtable_check_elasticity(ht, tbl, hash) ||
- rht_grow_above_100(ht, tbl))
- goto exit;
+ new_tbl = rcu_dereference(tbl->future_tbl);
+ if (new_tbl)
+ return new_tbl;
- err = 0;
+ if (PTR_ERR(data) != -ENOENT)
+ return ERR_CAST(data);
+
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ return ERR_PTR(-E2BIG);
+
+ if (unlikely(rht_grow_above_100(ht, tbl)))
+ return ERR_PTR(-EAGAIN);
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
RCU_INIT_POINTER(obj->next, head);
+ if (ht->rhlist) {
+ struct rhlist_head *list;
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ RCU_INIT_POINTER(list->next, NULL);
+ }
rcu_assign_pointer(tbl->buckets[hash], obj);
atomic_inc(&ht->nelems);
+ if (rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
-exit:
- spin_unlock(rht_bucket_lock(tbl, hash));
+ return NULL;
+}
- if (err == 0)
- return NULL;
- else if (err == -EAGAIN)
- return tbl;
- else
- return ERR_PTR(err);
+static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj)
+{
+ struct bucket_table *new_tbl;
+ struct bucket_table *tbl;
+ unsigned int hash;
+ spinlock_t *lock;
+ void *data;
+
+ tbl = rcu_dereference(ht->tbl);
+
+ /* All insertions must grab the oldest table containing
+ * the hashed bucket that is yet to be rehashed.
+ */
+ for (;;) {
+ hash = rht_head_hashfn(ht, tbl, obj, ht->p);
+ lock = rht_bucket_lock(tbl, hash);
+ spin_lock_bh(lock);
+
+ if (tbl->rehash <= hash)
+ break;
+
+ spin_unlock_bh(lock);
+ tbl = rcu_dereference(tbl->future_tbl);
+ }
+
+ data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
+ new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
+ if (PTR_ERR(new_tbl) != -EEXIST)
+ data = ERR_CAST(new_tbl);
+
+ while (!IS_ERR_OR_NULL(new_tbl)) {
+ tbl = new_tbl;
+ hash = rht_head_hashfn(ht, tbl, obj, ht->p);
+ spin_lock_nested(rht_bucket_lock(tbl, hash),
+ SINGLE_DEPTH_NESTING);
+
+ data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
+ new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
+ if (PTR_ERR(new_tbl) != -EEXIST)
+ data = ERR_CAST(new_tbl);
+
+ spin_unlock(rht_bucket_lock(tbl, hash));
+ }
+
+ spin_unlock_bh(lock);
+
+ if (PTR_ERR(data) == -EAGAIN)
+ data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
+ -EAGAIN);
+
+ return data;
+}
+
+void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj)
+{
+ void *data;
+
+ do {
+ rcu_read_lock();
+ data = rhashtable_try_insert(ht, key, obj);
+ rcu_read_unlock();
+ } while (PTR_ERR(data) == -EAGAIN);
+
+ return data;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
/**
- * rhashtable_walk_init - Initialise an iterator
+ * rhashtable_walk_enter - Initialise an iterator
* @ht: Table to walk over
* @iter: Hash table Iterator
- * @gfp: GFP flags for allocations
*
* This function prepares a hash table walk.
*
@@ -508,30 +605,22 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* This function may sleep so you must not call it from interrupt
* context or with spin locks held.
*
- * You must call rhashtable_walk_exit if this function returns
- * successfully.
+ * You must call rhashtable_walk_exit after this function returns.
*/
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
- gfp_t gfp)
+void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
{
iter->ht = ht;
iter->p = NULL;
iter->slot = 0;
iter->skip = 0;
- iter->walker = kmalloc(sizeof(*iter->walker), gfp);
- if (!iter->walker)
- return -ENOMEM;
-
spin_lock(&ht->lock);
- iter->walker->tbl =
+ iter->walker.tbl =
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
- list_add(&iter->walker->list, &iter->walker->tbl->walkers);
+ list_add(&iter->walker.list, &iter->walker.tbl->walkers);
spin_unlock(&ht->lock);
-
- return 0;
}
-EXPORT_SYMBOL_GPL(rhashtable_walk_init);
+EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
/**
* rhashtable_walk_exit - Free an iterator
@@ -542,10 +631,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
spin_lock(&iter->ht->lock);
- if (iter->walker->tbl)
- list_del(&iter->walker->list);
+ if (iter->walker.tbl)
+ list_del(&iter->walker.list);
spin_unlock(&iter->ht->lock);
- kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
@@ -571,12 +659,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
rcu_read_lock();
spin_lock(&ht->lock);
- if (iter->walker->tbl)
- list_del(&iter->walker->list);
+ if (iter->walker.tbl)
+ list_del(&iter->walker.list);
spin_unlock(&ht->lock);
- if (!iter->walker->tbl) {
- iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
+ if (!iter->walker.tbl) {
+ iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
return -EAGAIN;
}
@@ -598,12 +686,17 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start);
*/
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
- struct bucket_table *tbl = iter->walker->tbl;
+ struct bucket_table *tbl = iter->walker.tbl;
+ struct rhlist_head *list = iter->list;
struct rhashtable *ht = iter->ht;
struct rhash_head *p = iter->p;
+ bool rhlist = ht->rhlist;
if (p) {
- p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
+ if (!rhlist || !(list = rcu_dereference(list->next))) {
+ p = rcu_dereference(p->next);
+ list = container_of(p, struct rhlist_head, rhead);
+ }
goto next;
}
@@ -611,6 +704,18 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
int skip = iter->skip;
rht_for_each_rcu(p, tbl, iter->slot) {
+ if (rhlist) {
+ list = container_of(p, struct rhlist_head,
+ rhead);
+ do {
+ if (!skip)
+ goto next;
+ skip--;
+ list = rcu_dereference(list->next);
+ } while (list);
+
+ continue;
+ }
if (!skip)
break;
skip--;
@@ -620,7 +725,8 @@ next:
if (!rht_is_a_nulls(p)) {
iter->skip++;
iter->p = p;
- return rht_obj(ht, p);
+ iter->list = list;
+ return rht_obj(ht, rhlist ? &list->rhead : p);
}
iter->skip = 0;
@@ -631,8 +737,8 @@ next:
/* Ensure we see any new tables. */
smp_rmb();
- iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (iter->walker->tbl) {
+ iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (iter->walker.tbl) {
iter->slot = 0;
iter->skip = 0;
return ERR_PTR(-EAGAIN);
@@ -652,7 +758,7 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
__releases(RCU)
{
struct rhashtable *ht;
- struct bucket_table *tbl = iter->walker->tbl;
+ struct bucket_table *tbl = iter->walker.tbl;
if (!tbl)
goto out;
@@ -661,9 +767,9 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
spin_lock(&ht->lock);
if (tbl->rehash < tbl->size)
- list_add(&iter->walker->list, &tbl->walkers);
+ list_add(&iter->walker.list, &tbl->walkers);
else
- iter->walker->tbl = NULL;
+ iter->walker.tbl = NULL;
spin_unlock(&ht->lock);
iter->p = NULL;
@@ -809,6 +915,48 @@ int rhashtable_init(struct rhashtable *ht,
EXPORT_SYMBOL_GPL(rhashtable_init);
/**
+ * rhltable_init - initialize a new hash list table
+ * @hlt: hash list table to be initialized
+ * @params: configuration parameters
+ *
+ * Initializes a new hash list table.
+ *
+ * See documentation for rhashtable_init.
+ */
+int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
+{
+ int err;
+
+ /* No rhlist NULLs marking for now. */
+ if (params->nulls_base)
+ return -EINVAL;
+
+ err = rhashtable_init(&hlt->ht, params);
+ hlt->ht.rhlist = true;
+ return err;
+}
+EXPORT_SYMBOL_GPL(rhltable_init);
+
+static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
+ void (*free_fn)(void *ptr, void *arg),
+ void *arg)
+{
+ struct rhlist_head *list;
+
+ if (!ht->rhlist) {
+ free_fn(rht_obj(ht, obj), arg);
+ return;
+ }
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ do {
+ obj = &list->rhead;
+ list = rht_dereference(list->next, ht);
+ free_fn(rht_obj(ht, obj), arg);
+ } while (list);
+}
+
+/**
* rhashtable_free_and_destroy - free elements and destroy hash table
* @ht: the hash table to destroy
* @free_fn: callback to release resources of element
@@ -845,7 +993,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
pos = next,
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL)
- free_fn(rht_obj(ht, pos), arg);
+ rhashtable_free_one(ht, pos, free_fn, arg);
}
}
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
new file mode 100644
index 000000000000..2cecf05c82fd
--- /dev/null
+++ b/lib/sbitmap.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright (C) 2016 Facebook
+ * Copyright (C) 2013-2014 Jens Axboe
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <linux/random.h>
+#include <linux/sbitmap.h>
+
+int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
+ gfp_t flags, int node)
+{
+ unsigned int bits_per_word;
+ unsigned int i;
+
+ if (shift < 0) {
+ shift = ilog2(BITS_PER_LONG);
+ /*
+ * If the bitmap is small, shrink the number of bits per word so
+ * we spread over a few cachelines, at least. If less than 4
+ * bits, just forget about it, it's not going to work optimally
+ * anyway.
+ */
+ if (depth >= 4) {
+ while ((4U << shift) > depth)
+ shift--;
+ }
+ }
+ bits_per_word = 1U << shift;
+ if (bits_per_word > BITS_PER_LONG)
+ return -EINVAL;
+
+ sb->shift = shift;
+ sb->depth = depth;
+ sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
+
+ if (depth == 0) {
+ sb->map = NULL;
+ return 0;
+ }
+
+ sb->map = kzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
+ if (!sb->map)
+ return -ENOMEM;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ sb->map[i].depth = min(depth, bits_per_word);
+ depth -= sb->map[i].depth;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sbitmap_init_node);
+
+void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
+{
+ unsigned int bits_per_word = 1U << sb->shift;
+ unsigned int i;
+
+ sb->depth = depth;
+ sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
+
+ for (i = 0; i < sb->map_nr; i++) {
+ sb->map[i].depth = min(depth, bits_per_word);
+ depth -= sb->map[i].depth;
+ }
+}
+EXPORT_SYMBOL_GPL(sbitmap_resize);
+
+static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
+ bool wrap)
+{
+ unsigned int orig_hint = hint;
+ int nr;
+
+ while (1) {
+ nr = find_next_zero_bit(&word->word, word->depth, hint);
+ if (unlikely(nr >= word->depth)) {
+ /*
+ * We started with an offset, and we didn't reset the
+ * offset to 0 in a failure case, so start from 0 to
+ * exhaust the map.
+ */
+ if (orig_hint && hint && wrap) {
+ hint = orig_hint = 0;
+ continue;
+ }
+ return -1;
+ }
+
+ if (!test_and_set_bit(nr, &word->word))
+ break;
+
+ hint = nr + 1;
+ if (hint >= word->depth - 1)
+ hint = 0;
+ }
+
+ return nr;
+}
+
+int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
+{
+ unsigned int i, index;
+ int nr = -1;
+
+ index = SB_NR_TO_INDEX(sb, alloc_hint);
+
+ for (i = 0; i < sb->map_nr; i++) {
+ nr = __sbitmap_get_word(&sb->map[index],
+ SB_NR_TO_BIT(sb, alloc_hint),
+ !round_robin);
+ if (nr != -1) {
+ nr += index << sb->shift;
+ break;
+ }
+
+ /* Jump to next index. */
+ index++;
+ alloc_hint = index << sb->shift;
+
+ if (index >= sb->map_nr) {
+ index = 0;
+ alloc_hint = 0;
+ }
+ }
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(sbitmap_get);
+
+bool sbitmap_any_bit_set(const struct sbitmap *sb)
+{
+ unsigned int i;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ if (sb->map[i].word)
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
+
+bool sbitmap_any_bit_clear(const struct sbitmap *sb)
+{
+ unsigned int i;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ const struct sbitmap_word *word = &sb->map[i];
+ unsigned long ret;
+
+ ret = find_first_zero_bit(&word->word, word->depth);
+ if (ret < word->depth)
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
+
+unsigned int sbitmap_weight(const struct sbitmap *sb)
+{
+ unsigned int i, weight = 0;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ const struct sbitmap_word *word = &sb->map[i];
+
+ weight += bitmap_weight(&word->word, word->depth);
+ }
+ return weight;
+}
+EXPORT_SYMBOL_GPL(sbitmap_weight);
+
+static unsigned int sbq_calc_wake_batch(unsigned int depth)
+{
+ unsigned int wake_batch;
+
+ /*
+ * For each batch, we wake up one queue. We need to make sure that our
+ * batch size is small enough that the full depth of the bitmap is
+ * enough to wake up all of the queues.
+ */
+ wake_batch = SBQ_WAKE_BATCH;
+ if (wake_batch > depth / SBQ_WAIT_QUEUES)
+ wake_batch = max(1U, depth / SBQ_WAIT_QUEUES);
+
+ return wake_batch;
+}
+
+int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
+ int shift, bool round_robin, gfp_t flags, int node)
+{
+ int ret;
+ int i;
+
+ ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
+ if (ret)
+ return ret;
+
+ sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
+ if (!sbq->alloc_hint) {
+ sbitmap_free(&sbq->sb);
+ return -ENOMEM;
+ }
+
+ if (depth && !round_robin) {
+ for_each_possible_cpu(i)
+ *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
+ }
+
+ sbq->wake_batch = sbq_calc_wake_batch(depth);
+ atomic_set(&sbq->wake_index, 0);
+
+ sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
+ if (!sbq->ws) {
+ free_percpu(sbq->alloc_hint);
+ sbitmap_free(&sbq->sb);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ init_waitqueue_head(&sbq->ws[i].wait);
+ atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
+ }
+
+ sbq->round_robin = round_robin;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
+
+void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
+{
+ sbq->wake_batch = sbq_calc_wake_batch(depth);
+ sbitmap_resize(&sbq->sb, depth);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
+
+int __sbitmap_queue_get(struct sbitmap_queue *sbq)
+{
+ unsigned int hint, depth;
+ int nr;
+
+ hint = this_cpu_read(*sbq->alloc_hint);
+ depth = READ_ONCE(sbq->sb.depth);
+ if (unlikely(hint >= depth)) {
+ hint = depth ? prandom_u32() % depth : 0;
+ this_cpu_write(*sbq->alloc_hint, hint);
+ }
+ nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
+
+ if (nr == -1) {
+ /* If the map is full, a hint won't do us much good. */
+ this_cpu_write(*sbq->alloc_hint, 0);
+ } else if (nr == hint || unlikely(sbq->round_robin)) {
+ /* Only update the hint if we used it. */
+ hint = nr + 1;
+ if (hint >= depth - 1)
+ hint = 0;
+ this_cpu_write(*sbq->alloc_hint, hint);
+ }
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
+
+static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
+{
+ int i, wake_index;
+
+ wake_index = atomic_read(&sbq->wake_index);
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ struct sbq_wait_state *ws = &sbq->ws[wake_index];
+
+ if (waitqueue_active(&ws->wait)) {
+ int o = atomic_read(&sbq->wake_index);
+
+ if (wake_index != o)
+ atomic_cmpxchg(&sbq->wake_index, o, wake_index);
+ return ws;
+ }
+
+ wake_index = sbq_index_inc(wake_index);
+ }
+
+ return NULL;
+}
+
+static void sbq_wake_up(struct sbitmap_queue *sbq)
+{
+ struct sbq_wait_state *ws;
+ int wait_cnt;
+
+ /* Ensure that the wait list checks occur after clear_bit(). */
+ smp_mb();
+
+ ws = sbq_wake_ptr(sbq);
+ if (!ws)
+ return;
+
+ wait_cnt = atomic_dec_return(&ws->wait_cnt);
+ if (unlikely(wait_cnt < 0))
+ wait_cnt = atomic_inc_return(&ws->wait_cnt);
+ if (wait_cnt == 0) {
+ atomic_add(sbq->wake_batch, &ws->wait_cnt);
+ sbq_index_atomic_inc(&sbq->wake_index);
+ wake_up(&ws->wait);
+ }
+}
+
+void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
+ unsigned int cpu)
+{
+ sbitmap_clear_bit(&sbq->sb, nr);
+ sbq_wake_up(sbq);
+ if (likely(!sbq->round_robin && nr < sbq->sb.depth))
+ *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
+
+void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
+{
+ int i, wake_index;
+
+ /*
+ * Make sure all changes prior to this are visible from other CPUs.
+ */
+ smp_mb();
+ wake_index = atomic_read(&sbq->wake_index);
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ struct sbq_wait_state *ws = &sbq->ws[wake_index];
+
+ if (waitqueue_active(&ws->wait))
+ wake_up(&ws->wait);
+
+ wake_index = sbq_index_inc(wake_index);
+ }
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 93f45011a59d..94346b4d8984 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5485,6 +5485,7 @@ static struct sk_buff *populate_skb(char *buf, int size)
skb->hash = SKB_HASH;
skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI;
+ skb->vlan_proto = htons(ETH_P_IP);
skb->dev = &dev;
skb->dev->ifindex = SKB_DEV_IFINDEX;
skb->dev->type = SKB_DEV_TYPE;
diff --git a/lib/win_minmax.c b/lib/win_minmax.c
new file mode 100644
index 000000000000..c8420d404926
--- /dev/null
+++ b/lib/win_minmax.c
@@ -0,0 +1,98 @@
+/**
+ * lib/minmax.c: windowed min/max tracker
+ *
+ * Kathleen Nichols' algorithm for tracking the minimum (or maximum)
+ * value of a data stream over some fixed time interval. (E.g.,
+ * the minimum RTT over the past five minutes.) It uses constant
+ * space and constant time per update yet almost always delivers
+ * the same minimum as an implementation that has to keep all the
+ * data in the window.
+ *
+ * The algorithm keeps track of the best, 2nd best & 3rd best min
+ * values, maintaining an invariant that the measurement time of
+ * the n'th best >= n-1'th best. It also makes sure that the three
+ * values are widely separated in the time window since that bounds
+ * the worse case error when that data is monotonically increasing
+ * over the window.
+ *
+ * Upon getting a new min, we can forget everything earlier because
+ * it has no value - the new min is <= everything else in the window
+ * by definition and it's the most recent. So we restart fresh on
+ * every new min and overwrites 2nd & 3rd choices. The same property
+ * holds for 2nd & 3rd best.
+ */
+#include <linux/module.h>
+#include <linux/win_minmax.h>
+
+/* As time advances, update the 1st, 2nd, and 3rd choices. */
+static u32 minmax_subwin_update(struct minmax *m, u32 win,
+ const struct minmax_sample *val)
+{
+ u32 dt = val->t - m->s[0].t;
+
+ if (unlikely(dt > win)) {
+ /*
+ * Passed entire window without a new val so make 2nd
+ * choice the new val & 3rd choice the new 2nd choice.
+ * we may have to iterate this since our 2nd choice
+ * may also be outside the window (we checked on entry
+ * that the third choice was in the window).
+ */
+ m->s[0] = m->s[1];
+ m->s[1] = m->s[2];
+ m->s[2] = *val;
+ if (unlikely(val->t - m->s[0].t > win)) {
+ m->s[0] = m->s[1];
+ m->s[1] = m->s[2];
+ m->s[2] = *val;
+ }
+ } else if (unlikely(m->s[1].t == m->s[0].t) && dt > win/4) {
+ /*
+ * We've passed a quarter of the window without a new val
+ * so take a 2nd choice from the 2nd quarter of the window.
+ */
+ m->s[2] = m->s[1] = *val;
+ } else if (unlikely(m->s[2].t == m->s[1].t) && dt > win/2) {
+ /*
+ * We've passed half the window without finding a new val
+ * so take a 3rd choice from the last half of the window
+ */
+ m->s[2] = *val;
+ }
+ return m->s[0].v;
+}
+
+/* Check if new measurement updates the 1st, 2nd or 3rd choice max. */
+u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas)
+{
+ struct minmax_sample val = { .t = t, .v = meas };
+
+ if (unlikely(val.v >= m->s[0].v) || /* found new max? */
+ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */
+ return minmax_reset(m, t, meas); /* forget earlier samples */
+
+ if (unlikely(val.v >= m->s[1].v))
+ m->s[2] = m->s[1] = val;
+ else if (unlikely(val.v >= m->s[2].v))
+ m->s[2] = val;
+
+ return minmax_subwin_update(m, win, &val);
+}
+EXPORT_SYMBOL(minmax_running_max);
+
+/* Check if new measurement updates the 1st, 2nd or 3rd choice min. */
+u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas)
+{
+ struct minmax_sample val = { .t = t, .v = meas };
+
+ if (unlikely(val.v <= m->s[0].v) || /* found new min? */
+ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */
+ return minmax_reset(m, t, meas); /* forget earlier samples */
+
+ if (unlikely(val.v <= m->s[1].v))
+ m->s[2] = m->s[1] = val;
+ else if (unlikely(val.v <= m->s[2].v))
+ m->s[2] = val;
+
+ return minmax_subwin_update(m, win, &val);
+}
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 0aa7dda52402..a869f84f44d3 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -11,15 +11,12 @@
#include <linux/init.h>
#include <linux/pfn.h>
#include <linux/slab.h>
-#include <linux/bootmem.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/range.h>
-#include <linux/memblock.h>
#include <linux/bug.h>
#include <linux/io.h>
-
-#include <asm/processor.h>
+#include <linux/bootmem.h>
#include "internal.h"
@@ -712,7 +709,7 @@ void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
void *ptr;
if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc(size, GFP_NOWAIT);
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
again:
/* do not panic in alloc_bootmem_bdata() */
@@ -738,9 +735,6 @@ again:
void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
}
@@ -812,10 +806,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
}
-#ifndef ARCH_LOW_ADDRESS_LIMIT
-#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
-#endif
-
/**
* __alloc_bootmem_low - allocate low boot memory
* @size: size of the request in bytes
diff --git a/mm/compaction.c b/mm/compaction.c
index 9affb2908304..0409a4ad6ea1 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -997,8 +997,12 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
#ifdef CONFIG_COMPACTION
/* Returns true if the page is within a block suitable for migration to */
-static bool suitable_migration_target(struct page *page)
+static bool suitable_migration_target(struct compact_control *cc,
+ struct page *page)
{
+ if (cc->ignore_block_suitable)
+ return true;
+
/* If the page is a large free page, then disallow migration */
if (PageBuddy(page)) {
/*
@@ -1083,7 +1087,7 @@ static void isolate_freepages(struct compact_control *cc)
continue;
/* Check the block is suitable for migration */
- if (!suitable_migration_target(page))
+ if (!suitable_migration_target(cc, page))
continue;
/* If isolation recently failed, do not retry */
@@ -1316,7 +1320,7 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_
return COMPACT_CONTINUE;
/* Compaction run is not finished if the watermark is not met */
- watermark = low_wmark_pages(zone);
+ watermark = zone->watermark[cc->alloc_flags & ALLOC_WMARK_MASK];
if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
cc->alloc_flags))
@@ -1329,13 +1333,13 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_
/* Job done if page is free of the right migratetype */
if (!list_empty(&area->free_list[migratetype]))
- return COMPACT_PARTIAL;
+ return COMPACT_SUCCESS;
#ifdef CONFIG_CMA
/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
if (migratetype == MIGRATE_MOVABLE &&
!list_empty(&area->free_list[MIGRATE_CMA]))
- return COMPACT_PARTIAL;
+ return COMPACT_SUCCESS;
#endif
/*
* Job done if allocation would steal freepages from
@@ -1343,7 +1347,7 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_
*/
if (find_suitable_fallback(area, order, migratetype,
true, &can_steal) != -1)
- return COMPACT_PARTIAL;
+ return COMPACT_SUCCESS;
}
return COMPACT_NO_SUITABLE_PAGE;
@@ -1367,7 +1371,7 @@ static enum compact_result compact_finished(struct zone *zone,
* compaction_suitable: Is this suitable to run compaction on this zone now?
* Returns
* COMPACT_SKIPPED - If there are too few free pages for compaction
- * COMPACT_PARTIAL - If the allocation would succeed without compaction
+ * COMPACT_SUCCESS - If the allocation would succeed without compaction
* COMPACT_CONTINUE - If compaction should run now
*/
static enum compact_result __compaction_suitable(struct zone *zone, int order,
@@ -1375,46 +1379,41 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
int classzone_idx,
unsigned long wmark_target)
{
- int fragindex;
unsigned long watermark;
if (is_via_compact_memory(order))
return COMPACT_CONTINUE;
- watermark = low_wmark_pages(zone);
+ watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
/*
* If watermarks for high-order allocation are already met, there
* should be no need for compaction at all.
*/
if (zone_watermark_ok(zone, order, watermark, classzone_idx,
alloc_flags))
- return COMPACT_PARTIAL;
+ return COMPACT_SUCCESS;
/*
- * Watermarks for order-0 must be met for compaction. Note the 2UL.
- * This is because during migration, copies of pages need to be
- * allocated and for a short time, the footprint is higher
+ * Watermarks for order-0 must be met for compaction to be able to
+ * isolate free pages for migration targets. This means that the
+ * watermark and alloc_flags have to match, or be more pessimistic than
+ * the check in __isolate_free_page(). We don't use the direct
+ * compactor's alloc_flags, as they are not relevant for freepage
+ * isolation. We however do use the direct compactor's classzone_idx to
+ * skip over zones where lowmem reserves would prevent allocation even
+ * if compaction succeeds.
+ * For costly orders, we require low watermark instead of min for
+ * compaction to proceed to increase its chances.
+ * ALLOC_CMA is used, as pages in CMA pageblocks are considered
+ * suitable migration targets
*/
- watermark += (2UL << order);
+ watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
+ low_wmark_pages(zone) : min_wmark_pages(zone);
+ watermark += compact_gap(order);
if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
- alloc_flags, wmark_target))
+ ALLOC_CMA, wmark_target))
return COMPACT_SKIPPED;
- /*
- * fragmentation index determines if allocation failures are due to
- * low memory or external fragmentation
- *
- * index of -1000 would imply allocations might succeed depending on
- * watermarks, but we already failed the high-order watermark check
- * index towards 0 implies failure is due to lack of memory
- * index towards 1000 implies failure is due to fragmentation
- *
- * Only compact if a failure would be due to fragmentation.
- */
- fragindex = fragmentation_index(zone, order);
- if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
- return COMPACT_NOT_SUITABLE_ZONE;
-
return COMPACT_CONTINUE;
}
@@ -1423,9 +1422,32 @@ enum compact_result compaction_suitable(struct zone *zone, int order,
int classzone_idx)
{
enum compact_result ret;
+ int fragindex;
ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
zone_page_state(zone, NR_FREE_PAGES));
+ /*
+ * fragmentation index determines if allocation failures are due to
+ * low memory or external fragmentation
+ *
+ * index of -1000 would imply allocations might succeed depending on
+ * watermarks, but we already failed the high-order watermark check
+ * index towards 0 implies failure is due to lack of memory
+ * index towards 1000 implies failure is due to fragmentation
+ *
+ * Only compact if a failure would be due to fragmentation. Also
+ * ignore fragindex for non-costly orders where the alternative to
+ * a successful reclaim/compaction is OOM. Fragindex and the
+ * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
+ * excessive compaction for costly orders, but it should not be at the
+ * expense of system stability.
+ */
+ if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
+ fragindex = fragmentation_index(zone, order);
+ if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
+ ret = COMPACT_NOT_SUITABLE_ZONE;
+ }
+
trace_mm_compaction_suitable(zone, order, ret);
if (ret == COMPACT_NOT_SUITABLE_ZONE)
ret = COMPACT_SKIPPED;
@@ -1458,8 +1480,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
compact_result = __compaction_suitable(zone, order, alloc_flags,
ac_classzone_idx(ac), available);
- if (compact_result != COMPACT_SKIPPED &&
- compact_result != COMPACT_NOT_SUITABLE_ZONE)
+ if (compact_result != COMPACT_SKIPPED)
return true;
}
@@ -1477,7 +1498,7 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
cc->classzone_idx);
/* Compaction is likely to fail */
- if (ret == COMPACT_PARTIAL || ret == COMPACT_SKIPPED)
+ if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
return ret;
/* huh, compaction_suitable is returning something unexpected */
@@ -1492,23 +1513,29 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
/*
* Setup to move all movable pages to the end of the zone. Used cached
- * information on where the scanners should start but check that it
- * is initialised by ensuring the values are within zone boundaries.
+ * information on where the scanners should start (unless we explicitly
+ * want to compact the whole zone), but check that it is initialised
+ * by ensuring the values are within zone boundaries.
*/
- cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
- cc->free_pfn = zone->compact_cached_free_pfn;
- if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
- cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
- zone->compact_cached_free_pfn = cc->free_pfn;
- }
- if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
+ if (cc->whole_zone) {
cc->migrate_pfn = start_pfn;
- zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
- zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
- }
+ cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
+ } else {
+ cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
+ cc->free_pfn = zone->compact_cached_free_pfn;
+ if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
+ cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
+ zone->compact_cached_free_pfn = cc->free_pfn;
+ }
+ if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
+ cc->migrate_pfn = start_pfn;
+ zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
+ zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
+ }
- if (cc->migrate_pfn == start_pfn)
- cc->whole_zone = true;
+ if (cc->migrate_pfn == start_pfn)
+ cc->whole_zone = true;
+ }
cc->last_migrated_pfn = 0;
@@ -1638,6 +1665,9 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
.alloc_flags = alloc_flags,
.classzone_idx = classzone_idx,
.direct_compaction = true,
+ .whole_zone = (prio == MIN_COMPACT_PRIORITY),
+ .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
+ .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
};
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
@@ -1683,7 +1713,8 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
ac->nodemask) {
enum compact_result status;
- if (compaction_deferred(zone, order)) {
+ if (prio > MIN_COMPACT_PRIORITY
+ && compaction_deferred(zone, order)) {
rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
continue;
}
@@ -1692,9 +1723,8 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
alloc_flags, ac_classzone_idx(ac));
rc = max(status, rc);
- /* If a normal allocation would succeed, stop compacting */
- if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
- ac_classzone_idx(ac), alloc_flags)) {
+ /* The allocation should succeed, stop compacting */
+ if (status == COMPACT_SUCCESS) {
/*
* We think the allocation will succeed in this zone,
* but it is not certain, hence the false. The caller
@@ -1730,10 +1760,18 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
/* Compact all zones within a node */
-static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
+static void compact_node(int nid)
{
+ pg_data_t *pgdat = NODE_DATA(nid);
int zoneid;
struct zone *zone;
+ struct compact_control cc = {
+ .order = -1,
+ .mode = MIGRATE_SYNC,
+ .ignore_skip_hint = true,
+ .whole_zone = true,
+ };
+
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
@@ -1741,60 +1779,19 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
if (!populated_zone(zone))
continue;
- cc->nr_freepages = 0;
- cc->nr_migratepages = 0;
- cc->zone = zone;
- INIT_LIST_HEAD(&cc->freepages);
- INIT_LIST_HEAD(&cc->migratepages);
-
- /*
- * When called via /proc/sys/vm/compact_memory
- * this makes sure we compact the whole zone regardless of
- * cached scanner positions.
- */
- if (is_via_compact_memory(cc->order))
- __reset_isolation_suitable(zone);
-
- if (is_via_compact_memory(cc->order) ||
- !compaction_deferred(zone, cc->order))
- compact_zone(zone, cc);
-
- VM_BUG_ON(!list_empty(&cc->freepages));
- VM_BUG_ON(!list_empty(&cc->migratepages));
+ cc.nr_freepages = 0;
+ cc.nr_migratepages = 0;
+ cc.zone = zone;
+ INIT_LIST_HEAD(&cc.freepages);
+ INIT_LIST_HEAD(&cc.migratepages);
- if (is_via_compact_memory(cc->order))
- continue;
+ compact_zone(zone, &cc);
- if (zone_watermark_ok(zone, cc->order,
- low_wmark_pages(zone), 0, 0))
- compaction_defer_reset(zone, cc->order, false);
+ VM_BUG_ON(!list_empty(&cc.freepages));
+ VM_BUG_ON(!list_empty(&cc.migratepages));
}
}
-void compact_pgdat(pg_data_t *pgdat, int order)
-{
- struct compact_control cc = {
- .order = order,
- .mode = MIGRATE_ASYNC,
- };
-
- if (!order)
- return;
-
- __compact_pgdat(pgdat, &cc);
-}
-
-static void compact_node(int nid)
-{
- struct compact_control cc = {
- .order = -1,
- .mode = MIGRATE_SYNC,
- .ignore_skip_hint = true,
- };
-
- __compact_pgdat(NODE_DATA(nid), &cc);
-}
-
/* Compact all nodes in the system */
static void compact_nodes(void)
{
@@ -1900,8 +1897,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
.ignore_skip_hint = true,
};
- bool success = false;
-
trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
cc.classzone_idx);
count_vm_event(KCOMPACTD_WAKE);
@@ -1930,9 +1925,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
return;
status = compact_zone(zone, &cc);
- if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
- cc.classzone_idx, 0)) {
- success = true;
+ if (status == COMPACT_SUCCESS) {
compaction_defer_reset(zone, cc.order, false);
} else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
/*
diff --git a/mm/debug.c b/mm/debug.c
index 74c7cae4f683..9feb699c5d25 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -42,6 +42,11 @@ const struct trace_print_flags vmaflag_names[] = {
void __dump_page(struct page *page, const char *reason)
{
+ /*
+ * Avoid VM_BUG_ON() in page_mapcount().
+ * page->_mapcount space in struct page is used by sl[aou]b pages to
+ * encode own info.
+ */
int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
diff --git a/mm/filemap.c b/mm/filemap.c
index 2d0986a64f1f..2f7b7783bd6b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -169,33 +169,35 @@ static int page_cache_tree_insert(struct address_space *mapping,
static void page_cache_tree_delete(struct address_space *mapping,
struct page *page, void *shadow)
{
- struct radix_tree_node *node;
int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(nr != 1 && shadow, page);
- if (shadow) {
- mapping->nrexceptional += nr;
- /*
- * Make sure the nrexceptional update is committed before
- * the nrpages update so that final truncate racing
- * with reclaim does not see both counters 0 at the
- * same time and miss a shadow entry.
- */
- smp_wmb();
- }
- mapping->nrpages -= nr;
-
for (i = 0; i < nr; i++) {
- node = radix_tree_replace_clear_tags(&mapping->page_tree,
- page->index + i, shadow);
+ struct radix_tree_node *node;
+ void **slot;
+
+ __radix_tree_lookup(&mapping->page_tree, page->index + i,
+ &node, &slot);
+
+ radix_tree_clear_tags(&mapping->page_tree, node, slot);
+
if (!node) {
VM_BUG_ON_PAGE(nr != 1, page);
- return;
+ /*
+ * We need a node to properly account shadow
+ * entries. Don't plant any without. XXX
+ */
+ shadow = NULL;
}
+ radix_tree_replace_slot(slot, shadow);
+
+ if (!node)
+ break;
+
workingset_node_pages_dec(node);
if (shadow)
workingset_node_shadows_inc(node);
@@ -219,6 +221,18 @@ static void page_cache_tree_delete(struct address_space *mapping,
&node->private_list);
}
}
+
+ if (shadow) {
+ mapping->nrexceptional += nr;
+ /*
+ * Make sure the nrexceptional update is committed before
+ * the nrpages update so that final truncate racing
+ * with reclaim does not see both counters 0 at the
+ * same time and miss a shadow entry.
+ */
+ smp_wmb();
+ }
+ mapping->nrpages -= nr;
}
/*
@@ -619,7 +633,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
__delete_from_page_cache(old, NULL);
error = page_cache_tree_insert(mapping, new, NULL);
BUG_ON(error);
- mapping->nrpages++;
/*
* hugetlb pages do not participate in page cache accounting.
@@ -1674,6 +1687,10 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
unsigned int prev_offset;
int error = 0;
+ if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
+ return -EINVAL;
+ iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
+
index = *ppos >> PAGE_SHIFT;
prev_index = ra->prev_pos >> PAGE_SHIFT;
prev_offset = ra->prev_pos & (PAGE_SIZE-1);
@@ -1708,7 +1725,9 @@ find_page:
* wait_on_page_locked is used to avoid unnecessarily
* serialisations and why it's safe.
*/
- wait_on_page_locked_killable(page);
+ error = wait_on_page_locked_killable(page);
+ if (unlikely(error))
+ goto readpage_error;
if (PageUptodate(page))
goto page_ok;
@@ -1910,16 +1929,18 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
+ struct iov_iter data = *iter;
loff_t size;
size = i_size_read(inode);
retval = filemap_write_and_wait_range(mapping, iocb->ki_pos,
iocb->ki_pos + count - 1);
- if (!retval) {
- struct iov_iter data = *iter;
- retval = mapping->a_ops->direct_IO(iocb, &data);
- }
+ if (retval < 0)
+ goto out;
+ file_accessed(file);
+
+ retval = mapping->a_ops->direct_IO(iocb, &data);
if (retval > 0) {
iocb->ki_pos += retval;
iov_iter_advance(iter, retval);
@@ -1935,10 +1956,8 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
* DAX files, so don't bother trying.
*/
if (retval < 0 || !iov_iter_count(iter) || iocb->ki_pos >= size ||
- IS_DAX(inode)) {
- file_accessed(file);
+ IS_DAX(inode))
goto out;
- }
}
retval = do_generic_file_read(file, &iocb->ki_pos, iter, retval);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 283583fcb1e7..cdcd25cb30fe 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -59,7 +59,7 @@ static struct shrinker deferred_split_shrinker;
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
-struct page *get_huge_zero_page(void)
+static struct page *get_huge_zero_page(void)
{
struct page *zero_page;
retry:
@@ -86,7 +86,7 @@ retry:
return READ_ONCE(huge_zero_page);
}
-void put_huge_zero_page(void)
+static void put_huge_zero_page(void)
{
/*
* Counter should never go to zero here. Only shrinker can put
@@ -95,6 +95,26 @@ void put_huge_zero_page(void)
BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
}
+struct page *mm_get_huge_zero_page(struct mm_struct *mm)
+{
+ if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
+ return READ_ONCE(huge_zero_page);
+
+ if (!get_huge_zero_page())
+ return NULL;
+
+ if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
+ put_huge_zero_page();
+
+ return READ_ONCE(huge_zero_page);
+}
+
+void mm_put_huge_zero_page(struct mm_struct *mm)
+{
+ if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
+ put_huge_zero_page();
+}
+
static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
struct shrink_control *sc)
{
@@ -469,6 +489,49 @@ void prep_transhuge_page(struct page *page)
set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
}
+unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
+ loff_t off, unsigned long flags, unsigned long size)
+{
+ unsigned long addr;
+ loff_t off_end = off + len;
+ loff_t off_align = round_up(off, size);
+ unsigned long len_pad;
+
+ if (off_end <= off_align || (off_end - off_align) < size)
+ return 0;
+
+ len_pad = len + size;
+ if (len_pad < len || (off + len_pad) < off)
+ return 0;
+
+ addr = current->mm->get_unmapped_area(filp, 0, len_pad,
+ off >> PAGE_SHIFT, flags);
+ if (IS_ERR_VALUE(addr))
+ return 0;
+
+ addr += (off - addr) & (size - 1);
+ return addr;
+}
+
+unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ loff_t off = (loff_t)pgoff << PAGE_SHIFT;
+
+ if (addr)
+ goto out;
+ if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
+ goto out;
+
+ addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE);
+ if (addr)
+ return addr;
+
+ out:
+ return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
+
static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
gfp_t gfp)
{
@@ -601,7 +664,7 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe)
pgtable = pte_alloc_one(vma->vm_mm, haddr);
if (unlikely(!pgtable))
return VM_FAULT_OOM;
- zero_page = get_huge_zero_page();
+ zero_page = mm_get_huge_zero_page(vma->vm_mm);
if (unlikely(!zero_page)) {
pte_free(vma->vm_mm, pgtable);
count_vm_event(THP_FAULT_FALLBACK);
@@ -623,10 +686,8 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe)
}
} else
spin_unlock(fe->ptl);
- if (!set) {
+ if (!set)
pte_free(vma->vm_mm, pgtable);
- put_huge_zero_page();
- }
return ret;
}
gfp = alloc_hugepage_direct_gfpmask(vma);
@@ -780,7 +841,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* since we already have a zero page to copy. It just takes a
* reference.
*/
- zero_page = get_huge_zero_page();
+ zero_page = mm_get_huge_zero_page(dst_mm);
set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
zero_page);
ret = 0;
@@ -1038,7 +1099,6 @@ alloc:
update_mmu_cache_pmd(vma, fe->address, fe->pmd);
if (!page) {
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
- put_huge_zero_page();
} else {
VM_BUG_ON_PAGE(!PageHead(page), page);
page_remove_rmap(page, true);
@@ -1499,7 +1559,6 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
}
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
- put_huge_zero_page();
}
static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
@@ -1522,8 +1581,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
if (!vma_is_anonymous(vma)) {
_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
- if (is_huge_zero_pmd(_pmd))
- put_huge_zero_page();
if (vma_is_dax(vma))
return;
page = pmd_page(_pmd);
@@ -1563,7 +1620,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
if (soft_dirty)
entry = pte_swp_mksoft_dirty(entry);
} else {
- entry = mk_pte(page + i, vma->vm_page_prot);
+ entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
entry = maybe_mkwrite(entry, vma);
if (!write)
entry = pte_wrprotect(entry);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 87e11d8ad536..ec49d9ef1eef 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -567,13 +567,13 @@ retry:
* appear as a "reserved" entry instead of simply dangling with incorrect
* counts.
*/
-void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
+void hugetlb_fix_reserve_counts(struct inode *inode)
{
struct hugepage_subpool *spool = subpool_inode(inode);
long rsv_adjust;
rsv_adjust = hugepage_subpool_get_pages(spool, 1);
- if (restore_reserve && rsv_adjust) {
+ if (rsv_adjust) {
struct hstate *h = hstate_inode(inode);
hugetlb_acct_memory(h, 1);
@@ -1022,7 +1022,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
((node = hstate_next_node_to_free(hs, mask)) || 1); \
nr_nodes--)
-#if (defined(CONFIG_X86_64) || defined(CONFIG_S390)) && \
+#if defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) && \
((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || \
defined(CONFIG_CMA))
static void destroy_compound_gigantic_page(struct page *page,
@@ -1437,38 +1437,61 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
/*
* Dissolve a given free hugepage into free buddy pages. This function does
- * nothing for in-use (including surplus) hugepages.
+ * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
+ * number of free hugepages would be reduced below the number of reserved
+ * hugepages.
*/
-static void dissolve_free_huge_page(struct page *page)
+static int dissolve_free_huge_page(struct page *page)
{
+ int rc = 0;
+
spin_lock(&hugetlb_lock);
if (PageHuge(page) && !page_count(page)) {
- struct hstate *h = page_hstate(page);
- int nid = page_to_nid(page);
- list_del(&page->lru);
+ struct page *head = compound_head(page);
+ struct hstate *h = page_hstate(head);
+ int nid = page_to_nid(head);
+ if (h->free_huge_pages - h->resv_huge_pages == 0) {
+ rc = -EBUSY;
+ goto out;
+ }
+ list_del(&head->lru);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
h->max_huge_pages--;
- update_and_free_page(h, page);
+ update_and_free_page(h, head);
}
+out:
spin_unlock(&hugetlb_lock);
+ return rc;
}
/*
* Dissolve free hugepages in a given pfn range. Used by memory hotplug to
* make specified memory blocks removable from the system.
- * Note that start_pfn should aligned with (minimum) hugepage size.
+ * Note that this will dissolve a free gigantic hugepage completely, if any
+ * part of it lies within the given range.
+ * Also note that if dissolve_free_huge_page() returns with an error, all
+ * free hugepages that were dissolved before that error are lost.
*/
-void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
+ struct page *page;
+ int rc = 0;
if (!hugepages_supported())
- return;
+ return rc;
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
+ page = pfn_to_page(pfn);
+ if (PageHuge(page) && !page_count(page)) {
+ rc = dissolve_free_huge_page(page);
+ if (rc)
+ break;
+ }
+ }
- VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
- for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
- dissolve_free_huge_page(pfn_to_page(pfn));
+ return rc;
}
/*
diff --git a/mm/internal.h b/mm/internal.h
index 1501304f87a4..537ac9951f5f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -178,8 +178,9 @@ struct compact_control {
unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
enum migrate_mode mode; /* Async or sync migration mode */
bool ignore_skip_hint; /* Scan blocks even if marked skip */
+ bool ignore_block_suitable; /* Scan blocks considered unsuitable */
bool direct_compaction; /* False from kcompactd or /proc/... */
- bool whole_zone; /* Whole zone has been scanned */
+ bool whole_zone; /* Whole zone should/has been scanned */
int order; /* order a direct compactor needs */
const gfp_t gfp_mask; /* gfp mask of a direct compactor */
const unsigned int alloc_flags; /* alloc flags of a direct compactor */
diff --git a/mm/ksm.c b/mm/ksm.c
index 5048083b60f2..9ae6011a41f8 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -299,7 +299,12 @@ static inline void free_rmap_item(struct rmap_item *rmap_item)
static inline struct stable_node *alloc_stable_node(void)
{
- return kmem_cache_alloc(stable_node_cache, GFP_KERNEL);
+ /*
+ * The allocation can take too long with GFP_KERNEL when memory is under
+ * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
+ * grants access to memory reserves, helping to avoid this problem.
+ */
+ return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
}
static inline void free_stable_node(struct stable_node *stable_node)
diff --git a/mm/memblock.c b/mm/memblock.c
index 483197ef613f..c8dfa430342b 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1438,6 +1438,11 @@ phys_addr_t __init_memblock memblock_phys_mem_size(void)
return memblock.memory.total_size;
}
+phys_addr_t __init_memblock memblock_reserved_size(void)
+{
+ return memblock.reserved.total_size;
+}
+
phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
{
unsigned long pages = 0;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4be518d4e68a..ae052b5e3315 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -921,6 +921,43 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
iter = mem_cgroup_iter(NULL, iter, NULL))
/**
+ * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
+ * @memcg: hierarchy root
+ * @fn: function to call for each task
+ * @arg: argument passed to @fn
+ *
+ * This function iterates over tasks attached to @memcg or to any of its
+ * descendants and calls @fn for each task. If @fn returns a non-zero
+ * value, the function breaks the iteration loop and returns the value.
+ * Otherwise, it will iterate over all tasks and return 0.
+ *
+ * This function must not be called for the root memory cgroup.
+ */
+int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ int (*fn)(struct task_struct *, void *), void *arg)
+{
+ struct mem_cgroup *iter;
+ int ret = 0;
+
+ BUG_ON(memcg == root_mem_cgroup);
+
+ for_each_mem_cgroup_tree(iter, memcg) {
+ struct css_task_iter it;
+ struct task_struct *task;
+
+ css_task_iter_start(&iter->css, &it);
+ while (!ret && (task = css_task_iter_next(&it)))
+ ret = fn(task, arg);
+ css_task_iter_end(&it);
+ if (ret) {
+ mem_cgroup_iter_break(memcg, iter);
+ break;
+ }
+ }
+ return ret;
+}
+
+/**
* mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
* @page: the page
* @zone: zone of the page
@@ -1178,7 +1215,7 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg)
/*
* Return the memory (and swap, if configured) limit for a memcg.
*/
-static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
{
unsigned long limit;
@@ -1205,79 +1242,12 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
.gfp_mask = gfp_mask,
.order = order,
};
- struct mem_cgroup *iter;
- unsigned long chosen_points = 0;
- unsigned long totalpages;
- unsigned int points = 0;
- struct task_struct *chosen = NULL;
+ bool ret;
mutex_lock(&oom_lock);
-
- /*
- * If current has a pending SIGKILL or is exiting, then automatically
- * select it. The goal is to allow it to allocate so that it may
- * quickly exit and free its memory.
- */
- if (task_will_free_mem(current)) {
- mark_oom_victim(current);
- wake_oom_reaper(current);
- goto unlock;
- }
-
- check_panic_on_oom(&oc, CONSTRAINT_MEMCG);
- totalpages = mem_cgroup_get_limit(memcg) ? : 1;
- for_each_mem_cgroup_tree(iter, memcg) {
- struct css_task_iter it;
- struct task_struct *task;
-
- css_task_iter_start(&iter->css, &it);
- while ((task = css_task_iter_next(&it))) {
- switch (oom_scan_process_thread(&oc, task)) {
- case OOM_SCAN_SELECT:
- if (chosen)
- put_task_struct(chosen);
- chosen = task;
- chosen_points = ULONG_MAX;
- get_task_struct(chosen);
- /* fall through */
- case OOM_SCAN_CONTINUE:
- continue;
- case OOM_SCAN_ABORT:
- css_task_iter_end(&it);
- mem_cgroup_iter_break(memcg, iter);
- if (chosen)
- put_task_struct(chosen);
- /* Set a dummy value to return "true". */
- chosen = (void *) 1;
- goto unlock;
- case OOM_SCAN_OK:
- break;
- };
- points = oom_badness(task, memcg, NULL, totalpages);
- if (!points || points < chosen_points)
- continue;
- /* Prefer thread group leaders for display purposes */
- if (points == chosen_points &&
- thread_group_leader(chosen))
- continue;
-
- if (chosen)
- put_task_struct(chosen);
- chosen = task;
- chosen_points = points;
- get_task_struct(chosen);
- }
- css_task_iter_end(&it);
- }
-
- if (chosen) {
- points = chosen_points * 1000 / totalpages;
- oom_kill_process(&oc, chosen, points, totalpages,
- "Memory cgroup out of memory");
- }
-unlock:
+ ret = out_of_memory(&oc);
mutex_unlock(&oom_lock);
- return chosen;
+ return ret;
}
#if MAX_NUMNODES > 1
@@ -1600,7 +1570,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
if (!memcg)
return false;
- if (!handle || oom_killer_disabled)
+ if (!handle)
goto cleanup;
owait.memcg = memcg;
@@ -2969,16 +2939,16 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
/*
* The active flag needs to be written after the static_key
* update. This is what guarantees that the socket activation
- * function is the last one to run. See sock_update_memcg() for
- * details, and note that we don't mark any socket as belonging
- * to this memcg until that flag is up.
+ * function is the last one to run. See mem_cgroup_sk_alloc()
+ * for details, and note that we don't mark any socket as
+ * belonging to this memcg until that flag is up.
*
* We need to do this, because static_keys will span multiple
* sites, but we can't control their order. If we mark a socket
* as accounted, but the accounting functions are not patched in
* yet, we'll lose accounting.
*
- * We never race with the readers in sock_update_memcg(),
+ * We never race with the readers in mem_cgroup_sk_alloc(),
* because when this value change, the code to process it is not
* patched in yet.
*/
@@ -4092,11 +4062,13 @@ static DEFINE_IDR(mem_cgroup_idr);
static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
{
+ VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
atomic_add(n, &memcg->id.ref);
}
static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
{
+ VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
if (atomic_sub_and_test(n, &memcg->id.ref)) {
idr_remove(&mem_cgroup_idr, memcg->id.id);
memcg->id.id = 0;
@@ -4285,8 +4257,10 @@ fail:
static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+
/* Online state pins memcg ID, memcg ID pins CSS */
- mem_cgroup_id_get(mem_cgroup_from_css(css));
+ atomic_set(&memcg->id.ref, 1);
css_get(css);
return 0;
}
@@ -4434,7 +4408,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
* Because lookup_swap_cache() updates some statistics counter,
* we call find_get_page() with swapper_space directly.
*/
- page = find_get_page(swap_address_space(ent), ent.val);
+ page = find_get_page(swap_address_space(ent), swp_offset(ent));
if (do_memsw_account())
entry->val = ent.val;
@@ -4472,7 +4446,8 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
swp_entry_t swp = radix_to_swp_entry(page);
if (do_memsw_account())
*entry = swp;
- page = find_get_page(swap_address_space(swp), swp.val);
+ page = find_get_page(swap_address_space(swp),
+ swp_offset(swp));
}
} else
page = find_get_page(mapping, pgoff);
@@ -4707,7 +4682,8 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
.mm = mm,
};
down_read(&mm->mmap_sem);
- walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
+ walk_page_range(0, mm->highest_vm_end,
+ &mem_cgroup_count_precharge_walk);
up_read(&mm->mmap_sem);
precharge = mc.precharge;
@@ -4995,7 +4971,8 @@ retry:
* When we have consumed all precharges and failed in doing
* additional charge, the page walk just aborts.
*/
- walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
+ walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
+
up_read(&mc.mm->mmap_sem);
atomic_dec(&mc.from->moving_account);
}
@@ -5674,11 +5651,15 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
EXPORT_SYMBOL(memcg_sockets_enabled_key);
-void sock_update_memcg(struct sock *sk)
+void mem_cgroup_sk_alloc(struct sock *sk)
{
struct mem_cgroup *memcg;
- /* Socket cloning can throw us here with sk_cgrp already
+ if (!mem_cgroup_sockets_enabled)
+ return;
+
+ /*
+ * Socket cloning can throw us here with sk_memcg already
* filled. It won't however, necessarily happen from
* process context. So the test for root memcg given
* the current task's memcg won't help us in this case.
@@ -5703,12 +5684,11 @@ void sock_update_memcg(struct sock *sk)
out:
rcu_read_unlock();
}
-EXPORT_SYMBOL(sock_update_memcg);
-void sock_release_memcg(struct sock *sk)
+void mem_cgroup_sk_free(struct sock *sk)
{
- WARN_ON(!sk->sk_memcg);
- css_put(&sk->sk_memcg->css);
+ if (sk->sk_memcg)
+ css_put(&sk->sk_memcg->css);
}
/**
diff --git a/mm/memory.c b/mm/memory.c
index f1a68049edff..fc1987dfd8cc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1649,10 +1649,14 @@ EXPORT_SYMBOL(vm_insert_pfn_prot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn)
{
+ pgprot_t pgprot = vma->vm_page_prot;
+
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
+ if (track_pfn_insert(vma, &pgprot, pfn))
+ return -EINVAL;
/*
* If we don't have pte special, then we have to use the pfn_valid()
@@ -1670,9 +1674,9 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
* result in pfn_t_has_page() == false.
*/
page = pfn_to_page(pfn_t_to_pfn(pfn));
- return insert_page(vma, addr, page, vma->vm_page_prot);
+ return insert_page(vma, addr, page, pgprot);
}
- return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+ return insert_pfn(vma, addr, pfn, pgprot);
}
EXPORT_SYMBOL(vm_insert_mixed);
@@ -3658,6 +3662,19 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
mem_cgroup_oom_synchronize(false);
}
+ /*
+ * This mm has been already reaped by the oom reaper and so the
+ * refault cannot be trusted in general. Anonymous refaults would
+ * lose data and give a zero page instead e.g. This is especially
+ * problem for use_mm() because regular tasks will just die and
+ * the corrupted data will not be visible anywhere while kthread
+ * will outlive the oom victim and potentially propagate the date
+ * further.
+ */
+ if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
+ && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
+ ret = VM_FAULT_SIGBUS;
+
return ret;
}
EXPORT_SYMBOL_GPL(handle_mm_fault);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9d29ba0f7192..962927309b6e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1945,7 +1945,9 @@ repeat:
* dissolve free hugepages in the memory block before doing offlining
* actually in order to make hugetlbfs's object counting consistent.
*/
- dissolve_free_huge_pages(start_pfn, end_pfn);
+ ret = dissolve_free_huge_pages(start_pfn, end_pfn);
+ if (ret)
+ goto failed_removal;
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
if (offlined_pages < 0) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2da72a5b6ecc..ad1c96ac313c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1749,7 +1749,7 @@ unsigned int mempolicy_slab_node(void)
*/
struct zonelist *zonelist;
enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
- zonelist = &NODE_DATA(node)->node_zonelists[0];
+ zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, highest_zoneidx,
&policy->v.nodes);
return z->zone ? z->zone->node : node;
diff --git a/mm/migrate.c b/mm/migrate.c
index f7ee04a5ae27..99250aee1ac1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -234,7 +234,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
goto unlock;
get_page(new);
- pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+ pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
if (pte_swp_soft_dirty(*ptep))
pte = pte_mksoft_dirty(pte);
diff --git a/mm/mincore.c b/mm/mincore.c
index c0b5ba965200..bfb866435478 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -66,7 +66,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
*/
if (radix_tree_exceptional_entry(page)) {
swp_entry_t swp = radix_to_swp_entry(page);
- page = find_get_page(swap_address_space(swp), swp.val);
+ page = find_get_page(swap_address_space(swp),
+ swp_offset(swp));
}
} else
page = find_get_page(mapping, pgoff);
@@ -150,7 +151,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
#ifdef CONFIG_SWAP
*vec = mincore_page(swap_address_space(entry),
- entry.val);
+ swp_offset(entry));
#else
WARN_ON(1);
*vec = 1;
diff --git a/mm/mlock.c b/mm/mlock.c
index 14645be06e30..145a4258ddbc 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -516,6 +516,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
int nr_pages;
int ret = 0;
int lock = !!(newflags & VM_LOCKED);
+ vm_flags_t old_flags = vma->vm_flags;
if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
@@ -550,6 +551,8 @@ success:
nr_pages = (end - start) >> PAGE_SHIFT;
if (!lock)
nr_pages = -nr_pages;
+ else if (old_flags & VM_LOCKED)
+ nr_pages = 0;
mm->locked_vm += nr_pages;
/*
@@ -617,6 +620,45 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
return error;
}
+/*
+ * Go through vma areas and sum size of mlocked
+ * vma pages, as return value.
+ * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
+ * is also counted.
+ * Return value: previously mlocked page counts
+ */
+static int count_mm_mlocked_page_nr(struct mm_struct *mm,
+ unsigned long start, size_t len)
+{
+ struct vm_area_struct *vma;
+ int count = 0;
+
+ if (mm == NULL)
+ mm = current->mm;
+
+ vma = find_vma(mm, start);
+ if (vma == NULL)
+ vma = mm->mmap;
+
+ for (; vma ; vma = vma->vm_next) {
+ if (start >= vma->vm_end)
+ continue;
+ if (start + len <= vma->vm_start)
+ break;
+ if (vma->vm_flags & VM_LOCKED) {
+ if (start > vma->vm_start)
+ count -= (start - vma->vm_start);
+ if (start + len < vma->vm_end) {
+ count += start + len - vma->vm_start;
+ break;
+ }
+ count += vma->vm_end - vma->vm_start;
+ }
+ }
+
+ return count >> PAGE_SHIFT;
+}
+
static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
{
unsigned long locked;
@@ -639,6 +681,16 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
return -EINTR;
locked += current->mm->locked_vm;
+ if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
+ /*
+ * It is possible that the regions requested intersect with
+ * previously mlocked areas, that part area in "mm->locked_vm"
+ * should not be counted to new mlock increment count. So check
+ * and adjust locked count if necessary.
+ */
+ locked -= count_mm_mlocked_page_nr(current->mm,
+ start, len);
+ }
/* check against resource limits */
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
diff --git a/mm/mmap.c b/mm/mmap.c
index 7a0707a48047..1af87c14183d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -116,13 +116,15 @@ static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
void vma_set_page_prot(struct vm_area_struct *vma)
{
unsigned long vm_flags = vma->vm_flags;
+ pgprot_t vm_page_prot;
- vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
- if (vma_wants_writenotify(vma)) {
+ vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
+ if (vma_wants_writenotify(vma, vm_page_prot)) {
vm_flags &= ~VM_SHARED;
- vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot,
- vm_flags);
+ vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
}
+ /* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */
+ WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
}
/*
@@ -400,15 +402,9 @@ static inline void vma_rb_insert(struct vm_area_struct *vma,
rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
}
-static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
+static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
{
/*
- * All rb_subtree_gap values must be consistent prior to erase,
- * with the possible exception of the vma being erased.
- */
- validate_mm_rb(root, vma);
-
- /*
* Note rb_erase_augmented is a fairly large inline function,
* so make sure we instantiate it only once with our desired
* augmented rbtree callbacks.
@@ -416,6 +412,32 @@ static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
}
+static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
+ struct rb_root *root,
+ struct vm_area_struct *ignore)
+{
+ /*
+ * All rb_subtree_gap values must be consistent prior to erase,
+ * with the possible exception of the "next" vma being erased if
+ * next->vm_start was reduced.
+ */
+ validate_mm_rb(root, ignore);
+
+ __vma_rb_erase(vma, root);
+}
+
+static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
+ struct rb_root *root)
+{
+ /*
+ * All rb_subtree_gap values must be consistent prior to erase,
+ * with the possible exception of the vma being erased.
+ */
+ validate_mm_rb(root, vma);
+
+ __vma_rb_erase(vma, root);
+}
+
/*
* vma has some anon_vma assigned, and is already inserted on that
* anon_vma's interval trees.
@@ -599,14 +621,25 @@ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
mm->map_count++;
}
-static inline void
-__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev)
+static __always_inline void __vma_unlink_common(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ struct vm_area_struct *prev,
+ bool has_prev,
+ struct vm_area_struct *ignore)
{
struct vm_area_struct *next;
- vma_rb_erase(vma, &mm->mm_rb);
- prev->vm_next = next = vma->vm_next;
+ vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
+ next = vma->vm_next;
+ if (has_prev)
+ prev->vm_next = next;
+ else {
+ prev = vma->vm_prev;
+ if (prev)
+ prev->vm_next = next;
+ else
+ mm->mmap = next;
+ }
if (next)
next->vm_prev = prev;
@@ -614,6 +647,13 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
vmacache_invalidate(mm);
}
+static inline void __vma_unlink_prev(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ struct vm_area_struct *prev)
+{
+ __vma_unlink_common(mm, vma, prev, true, vma);
+}
+
/*
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
* is already present in an i_mmap tree without adjusting the tree.
@@ -621,11 +661,12 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
* are necessary. The "insert" vma (if any) is to be inserted
* before we drop the necessary locks.
*/
-int vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
+int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
+ struct vm_area_struct *expand)
{
struct mm_struct *mm = vma->vm_mm;
- struct vm_area_struct *next = vma->vm_next;
+ struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
struct address_space *mapping = NULL;
struct rb_root *root = NULL;
struct anon_vma *anon_vma = NULL;
@@ -641,9 +682,38 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
/*
* vma expands, overlapping all the next, and
* perhaps the one after too (mprotect case 6).
+ * The only other cases that gets here are
+ * case 1, case 7 and case 8.
*/
- remove_next = 1 + (end > next->vm_end);
- end = next->vm_end;
+ if (next == expand) {
+ /*
+ * The only case where we don't expand "vma"
+ * and we expand "next" instead is case 8.
+ */
+ VM_WARN_ON(end != next->vm_end);
+ /*
+ * remove_next == 3 means we're
+ * removing "vma" and that to do so we
+ * swapped "vma" and "next".
+ */
+ remove_next = 3;
+ VM_WARN_ON(file != next->vm_file);
+ swap(vma, next);
+ } else {
+ VM_WARN_ON(expand != vma);
+ /*
+ * case 1, 6, 7, remove_next == 2 is case 6,
+ * remove_next == 1 is case 1 or 7.
+ */
+ remove_next = 1 + (end > next->vm_end);
+ VM_WARN_ON(remove_next == 2 &&
+ end != next->vm_next->vm_end);
+ VM_WARN_ON(remove_next == 1 &&
+ end != next->vm_end);
+ /* trim end to next, for case 6 first pass */
+ end = next->vm_end;
+ }
+
exporter = next;
importer = vma;
@@ -651,7 +721,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
* If next doesn't have anon_vma, import from vma after
* next, if the vma overlaps with it.
*/
- if (remove_next == 2 && next && !next->anon_vma)
+ if (remove_next == 2 && !next->anon_vma)
exporter = next->vm_next;
} else if (end > next->vm_start) {
@@ -662,6 +732,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
exporter = next;
importer = vma;
+ VM_WARN_ON(expand != importer);
} else if (end < vma->vm_end) {
/*
* vma shrinks, and !insert tells it's not
@@ -671,6 +742,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
exporter = vma;
importer = next;
+ VM_WARN_ON(expand != importer);
}
/*
@@ -688,7 +760,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
}
}
again:
- vma_adjust_trans_huge(vma, start, end, adjust_next);
+ vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
if (file) {
mapping = file->f_mapping;
@@ -714,8 +786,8 @@ again:
if (!anon_vma && adjust_next)
anon_vma = next->anon_vma;
if (anon_vma) {
- VM_BUG_ON_VMA(adjust_next && next->anon_vma &&
- anon_vma != next->anon_vma, next);
+ VM_WARN_ON(adjust_next && next->anon_vma &&
+ anon_vma != next->anon_vma);
anon_vma_lock_write(anon_vma);
anon_vma_interval_tree_pre_update_vma(vma);
if (adjust_next)
@@ -755,7 +827,19 @@ again:
* vma_merge has merged next into vma, and needs
* us to remove next before dropping the locks.
*/
- __vma_unlink(mm, next, vma);
+ if (remove_next != 3)
+ __vma_unlink_prev(mm, next, vma);
+ else
+ /*
+ * vma is not before next if they've been
+ * swapped.
+ *
+ * pre-swap() next->vm_start was reduced so
+ * tell validate_mm_rb to ignore pre-swap()
+ * "next" (which is stored in post-swap()
+ * "vma").
+ */
+ __vma_unlink_common(mm, next, NULL, false, vma);
if (file)
__remove_shared_vm_struct(next, file, mapping);
} else if (insert) {
@@ -807,7 +891,27 @@ again:
* we must remove another next too. It would clutter
* up the code too much to do both in one go.
*/
- next = vma->vm_next;
+ if (remove_next != 3) {
+ /*
+ * If "next" was removed and vma->vm_end was
+ * expanded (up) over it, in turn
+ * "next->vm_prev->vm_end" changed and the
+ * "vma->vm_next" gap must be updated.
+ */
+ next = vma->vm_next;
+ } else {
+ /*
+ * For the scope of the comment "next" and
+ * "vma" considered pre-swap(): if "vma" was
+ * removed, next->vm_start was expanded (down)
+ * over it and the "next" gap must be updated.
+ * Because of the swap() the post-swap() "vma"
+ * actually points to pre-swap() "next"
+ * (post-swap() "next" as opposed is now a
+ * dangling pointer).
+ */
+ next = vma;
+ }
if (remove_next == 2) {
remove_next = 1;
end = next->vm_end;
@@ -815,8 +919,28 @@ again:
}
else if (next)
vma_gap_update(next);
- else
- mm->highest_vm_end = end;
+ else {
+ /*
+ * If remove_next == 2 we obviously can't
+ * reach this path.
+ *
+ * If remove_next == 3 we can't reach this
+ * path because pre-swap() next is always not
+ * NULL. pre-swap() "next" is not being
+ * removed and its next->vm_end is not altered
+ * (and furthermore "end" already matches
+ * next->vm_end in remove_next == 3).
+ *
+ * We reach this only in the remove_next == 1
+ * case if the "next" vma that was removed was
+ * the highest vma of the mm. However in such
+ * case next->vm_end == "end" and the extended
+ * "vma" has vma->vm_end == next->vm_end so
+ * mm->highest_vm_end doesn't need any update
+ * in remove_next == 1 case.
+ */
+ VM_WARN_ON(mm->highest_vm_end != end);
+ }
}
if (insert && file)
uprobe_mmap(insert);
@@ -936,13 +1060,24 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
* cannot merge might become might become might become
* PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or
* mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or
- * mremap move: PPPPNNNNNNNN 8
+ * mremap move: PPPPXXXXXXXX 8
* AAAA
* PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
* might become case 1 below case 2 below case 3 below
*
- * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
- * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
+ * It is important for case 8 that the the vma NNNN overlapping the
+ * region AAAA is never going to extended over XXXX. Instead XXXX must
+ * be extended in region AAAA and NNNN must be removed. This way in
+ * all cases where vma_merge succeeds, the moment vma_adjust drops the
+ * rmap_locks, the properties of the merged vma will be already
+ * correct for the whole merged range. Some of those properties like
+ * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
+ * be correct for the whole merged range immediately after the
+ * rmap_locks are released. Otherwise if XXXX would be removed and
+ * NNNN would be extended over the XXXX range, remove_migration_ptes
+ * or other rmap walkers (if working on addresses beyond the "end"
+ * parameter) may establish ptes with the wrong permissions of NNNN
+ * instead of the right permissions of XXXX.
*/
struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
@@ -967,9 +1102,14 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
else
next = mm->mmap;
area = next;
- if (next && next->vm_end == end) /* cases 6, 7, 8 */
+ if (area && area->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
+ /* verify some invariant that must be enforced by the caller */
+ VM_WARN_ON(prev && addr <= prev->vm_start);
+ VM_WARN_ON(area && end > area->vm_end);
+ VM_WARN_ON(addr >= end);
+
/*
* Can it merge with the predecessor?
*/
@@ -990,11 +1130,12 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
is_mergeable_anon_vma(prev->anon_vma,
next->anon_vma, NULL)) {
/* cases 1, 6 */
- err = vma_adjust(prev, prev->vm_start,
- next->vm_end, prev->vm_pgoff, NULL);
+ err = __vma_adjust(prev, prev->vm_start,
+ next->vm_end, prev->vm_pgoff, NULL,
+ prev);
} else /* cases 2, 5, 7 */
- err = vma_adjust(prev, prev->vm_start,
- end, prev->vm_pgoff, NULL);
+ err = __vma_adjust(prev, prev->vm_start,
+ end, prev->vm_pgoff, NULL, prev);
if (err)
return NULL;
khugepaged_enter_vma_merge(prev, vm_flags);
@@ -1010,11 +1151,18 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
anon_vma, file, pgoff+pglen,
vm_userfaultfd_ctx)) {
if (prev && addr < prev->vm_end) /* case 4 */
- err = vma_adjust(prev, prev->vm_start,
- addr, prev->vm_pgoff, NULL);
- else /* cases 3, 8 */
- err = vma_adjust(area, addr, next->vm_end,
- next->vm_pgoff - pglen, NULL);
+ err = __vma_adjust(prev, prev->vm_start,
+ addr, prev->vm_pgoff, NULL, next);
+ else { /* cases 3, 8 */
+ err = __vma_adjust(area, addr, next->vm_end,
+ next->vm_pgoff - pglen, NULL, next);
+ /*
+ * In case 3 area is already equal to next and
+ * this is a noop, but in case 8 "area" has
+ * been removed and next was expanded over it.
+ */
+ area = next;
+ }
if (err)
return NULL;
khugepaged_enter_vma_merge(area, vm_flags);
@@ -1386,7 +1534,7 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
* to the private version (using protection_map[] without the
* VM_SHARED bit).
*/
-int vma_wants_writenotify(struct vm_area_struct *vma)
+int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
{
vm_flags_t vm_flags = vma->vm_flags;
const struct vm_operations_struct *vm_ops = vma->vm_ops;
@@ -1401,8 +1549,8 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
/* The open routine did something to the protections that pgprot_modify
* won't preserve? */
- if (pgprot_val(vma->vm_page_prot) !=
- pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags)))
+ if (pgprot_val(vm_page_prot) !=
+ pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
return 0;
/* Do we need to track softdirty? */
diff --git a/mm/mprotect.c b/mm/mprotect.c
index a4830f0325fe..ec91dfd3f900 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -304,6 +304,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
vma->vm_userfaultfd_ctx);
if (*pprev) {
vma = *pprev;
+ VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
goto success;
}
@@ -327,7 +328,7 @@ success:
* held in write mode.
*/
vma->vm_flags = newflags;
- dirty_accountable = vma_wants_writenotify(vma);
+ dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
vma_set_page_prot(vma);
change_protection(vma, start, end, vma->vm_page_prot,
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index bd05a70f44b9..ba609b684d7a 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -11,18 +11,21 @@
#include <linux/init.h>
#include <linux/pfn.h>
#include <linux/slab.h>
-#include <linux/bootmem.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/range.h>
#include <linux/memblock.h>
+#include <linux/bootmem.h>
#include <asm/bug.h>
#include <asm/io.h>
-#include <asm/processor.h>
#include "internal.h"
+#ifndef CONFIG_HAVE_MEMBLOCK
+#error CONFIG_HAVE_MEMBLOCK not defined
+#endif
+
#ifndef CONFIG_NEED_MULTIPLE_NODES
struct pglist_data __refdata contig_page_data;
EXPORT_SYMBOL(contig_page_data);
@@ -134,6 +137,11 @@ static unsigned long __init free_low_memory_core_early(void)
for_each_reserved_mem_region(i, &start, &end)
reserve_bootmem_region(start, end);
+ /*
+ * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
+ * because in some case like Node0 doesn't have RAM installed
+ * low ram will be on Node1
+ */
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
NULL)
count += __free_memory_core(start, end);
@@ -191,11 +199,6 @@ unsigned long __init free_all_bootmem(void)
reset_all_zones_managed_pages();
- /*
- * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
- * because in some case like Node0 doesn't have RAM installed
- * low ram will be on Node1
- */
pages = free_low_memory_core_early();
totalram_pages += pages;
@@ -395,9 +398,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
return __alloc_bootmem_node(pgdat, size, align, goal);
}
-#ifndef ARCH_LOW_ADDRESS_LIMIT
-#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
-#endif
/**
* __alloc_bootmem_low - allocate low boot memory
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index d53a9aa00977..ec9f11d4f094 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -132,6 +132,11 @@ static inline bool is_sysrq_oom(struct oom_control *oc)
return oc->order == -1;
}
+static inline bool is_memcg_oom(struct oom_control *oc)
+{
+ return oc->memcg != NULL;
+}
+
/* return true if the task is not adequate as candidate victim task. */
static bool oom_unkillable_task(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask)
@@ -181,7 +186,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
*/
adj = (long)p->signal->oom_score_adj;
if (adj == OOM_SCORE_ADJ_MIN ||
- test_bit(MMF_OOM_REAPED, &p->mm->flags) ||
+ test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
in_vfork(p)) {
task_unlock(p);
return 0;
@@ -213,12 +218,17 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
return points > 0 ? points : 1;
}
+enum oom_constraint {
+ CONSTRAINT_NONE,
+ CONSTRAINT_CPUSET,
+ CONSTRAINT_MEMORY_POLICY,
+ CONSTRAINT_MEMCG,
+};
+
/*
* Determine the type of allocation constraint.
*/
-#ifdef CONFIG_NUMA
-static enum oom_constraint constrained_alloc(struct oom_control *oc,
- unsigned long *totalpages)
+static enum oom_constraint constrained_alloc(struct oom_control *oc)
{
struct zone *zone;
struct zoneref *z;
@@ -226,8 +236,16 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc,
bool cpuset_limited = false;
int nid;
+ if (is_memcg_oom(oc)) {
+ oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
+ return CONSTRAINT_MEMCG;
+ }
+
/* Default to all available memory */
- *totalpages = totalram_pages + total_swap_pages;
+ oc->totalpages = totalram_pages + total_swap_pages;
+
+ if (!IS_ENABLED(CONFIG_NUMA))
+ return CONSTRAINT_NONE;
if (!oc->zonelist)
return CONSTRAINT_NONE;
@@ -246,9 +264,9 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc,
*/
if (oc->nodemask &&
!nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
- *totalpages = total_swap_pages;
+ oc->totalpages = total_swap_pages;
for_each_node_mask(nid, *oc->nodemask)
- *totalpages += node_spanned_pages(nid);
+ oc->totalpages += node_spanned_pages(nid);
return CONSTRAINT_MEMORY_POLICY;
}
@@ -259,98 +277,84 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc,
cpuset_limited = true;
if (cpuset_limited) {
- *totalpages = total_swap_pages;
+ oc->totalpages = total_swap_pages;
for_each_node_mask(nid, cpuset_current_mems_allowed)
- *totalpages += node_spanned_pages(nid);
+ oc->totalpages += node_spanned_pages(nid);
return CONSTRAINT_CPUSET;
}
return CONSTRAINT_NONE;
}
-#else
-static enum oom_constraint constrained_alloc(struct oom_control *oc,
- unsigned long *totalpages)
-{
- *totalpages = totalram_pages + total_swap_pages;
- return CONSTRAINT_NONE;
-}
-#endif
-enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
- struct task_struct *task)
+static int oom_evaluate_task(struct task_struct *task, void *arg)
{
+ struct oom_control *oc = arg;
+ unsigned long points;
+
if (oom_unkillable_task(task, NULL, oc->nodemask))
- return OOM_SCAN_CONTINUE;
+ goto next;
/*
* This task already has access to memory reserves and is being killed.
* Don't allow any other task to have access to the reserves unless
- * the task has MMF_OOM_REAPED because chances that it would release
+ * the task has MMF_OOM_SKIP because chances that it would release
* any memory is quite low.
*/
- if (!is_sysrq_oom(oc) && atomic_read(&task->signal->oom_victims)) {
- struct task_struct *p = find_lock_task_mm(task);
- enum oom_scan_t ret = OOM_SCAN_ABORT;
-
- if (p) {
- if (test_bit(MMF_OOM_REAPED, &p->mm->flags))
- ret = OOM_SCAN_CONTINUE;
- task_unlock(p);
- }
-
- return ret;
+ if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
+ if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
+ goto next;
+ goto abort;
}
/*
* If task is allocating a lot of memory and has been marked to be
* killed first if it triggers an oom, then select it.
*/
- if (oom_task_origin(task))
- return OOM_SCAN_SELECT;
+ if (oom_task_origin(task)) {
+ points = ULONG_MAX;
+ goto select;
+ }
- return OOM_SCAN_OK;
+ points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
+ if (!points || points < oc->chosen_points)
+ goto next;
+
+ /* Prefer thread group leaders for display purposes */
+ if (points == oc->chosen_points && thread_group_leader(oc->chosen))
+ goto next;
+select:
+ if (oc->chosen)
+ put_task_struct(oc->chosen);
+ get_task_struct(task);
+ oc->chosen = task;
+ oc->chosen_points = points;
+next:
+ return 0;
+abort:
+ if (oc->chosen)
+ put_task_struct(oc->chosen);
+ oc->chosen = (void *)-1UL;
+ return 1;
}
/*
- * Simple selection loop. We chose the process with the highest
- * number of 'points'. Returns -1 on scan abort.
+ * Simple selection loop. We choose the process with the highest number of
+ * 'points'. In case scan was aborted, oc->chosen is set to -1.
*/
-static struct task_struct *select_bad_process(struct oom_control *oc,
- unsigned int *ppoints, unsigned long totalpages)
+static void select_bad_process(struct oom_control *oc)
{
- struct task_struct *p;
- struct task_struct *chosen = NULL;
- unsigned long chosen_points = 0;
-
- rcu_read_lock();
- for_each_process(p) {
- unsigned int points;
-
- switch (oom_scan_process_thread(oc, p)) {
- case OOM_SCAN_SELECT:
- chosen = p;
- chosen_points = ULONG_MAX;
- /* fall through */
- case OOM_SCAN_CONTINUE:
- continue;
- case OOM_SCAN_ABORT:
- rcu_read_unlock();
- return (struct task_struct *)(-1UL);
- case OOM_SCAN_OK:
- break;
- };
- points = oom_badness(p, NULL, oc->nodemask, totalpages);
- if (!points || points < chosen_points)
- continue;
+ if (is_memcg_oom(oc))
+ mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
+ else {
+ struct task_struct *p;
- chosen = p;
- chosen_points = points;
+ rcu_read_lock();
+ for_each_process(p)
+ if (oom_evaluate_task(p, oc))
+ break;
+ rcu_read_unlock();
}
- if (chosen)
- get_task_struct(chosen);
- rcu_read_unlock();
- *ppoints = chosen_points * 1000 / totalpages;
- return chosen;
+ oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
}
/**
@@ -399,9 +403,14 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
static void dump_header(struct oom_control *oc, struct task_struct *p)
{
- pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
- current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
+ nodemask_t *nm = (oc->nodemask) ? oc->nodemask : &cpuset_current_mems_allowed;
+
+ pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
+ current->comm, oc->gfp_mask, &oc->gfp_mask,
+ nodemask_pr_args(nm), oc->order,
current->signal->oom_score_adj);
+ if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
+ pr_warn("COMPACTION is disabled!!!\n");
cpuset_print_current_mems_allowed();
dump_stack();
@@ -419,7 +428,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
static atomic_t oom_victims = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
-bool oom_killer_disabled __read_mostly;
+static bool oom_killer_disabled __read_mostly;
#define K(x) ((x) << (PAGE_SHIFT-10))
@@ -452,12 +461,10 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
static struct task_struct *oom_reaper_list;
static DEFINE_SPINLOCK(oom_reaper_lock);
-static bool __oom_reap_task(struct task_struct *tsk)
+static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
{
struct mmu_gather tlb;
struct vm_area_struct *vma;
- struct mm_struct *mm = NULL;
- struct task_struct *p;
struct zap_details details = {.check_swap_entries = true,
.ignore_dirty = true};
bool ret = true;
@@ -465,7 +472,7 @@ static bool __oom_reap_task(struct task_struct *tsk)
/*
* We have to make sure to not race with the victim exit path
* and cause premature new oom victim selection:
- * __oom_reap_task exit_mm
+ * __oom_reap_task_mm exit_mm
* mmget_not_zero
* mmput
* atomic_dec_and_test
@@ -478,22 +485,9 @@ static bool __oom_reap_task(struct task_struct *tsk)
*/
mutex_lock(&oom_lock);
- /*
- * Make sure we find the associated mm_struct even when the particular
- * thread has already terminated and cleared its mm.
- * We might have race with exit path so consider our work done if there
- * is no mm.
- */
- p = find_lock_task_mm(tsk);
- if (!p)
- goto unlock_oom;
- mm = p->mm;
- atomic_inc(&mm->mm_count);
- task_unlock(p);
-
if (!down_read_trylock(&mm->mmap_sem)) {
ret = false;
- goto mm_drop;
+ goto unlock_oom;
}
/*
@@ -503,9 +497,17 @@ static bool __oom_reap_task(struct task_struct *tsk)
*/
if (!mmget_not_zero(mm)) {
up_read(&mm->mmap_sem);
- goto mm_drop;
+ goto unlock_oom;
}
+ /*
+ * Tell all users of get_user/copy_from_user etc... that the content
+ * is no longer stable. No barriers really needed because unmapping
+ * should imply barriers already and the reader would hit a page fault
+ * if it stumbled over a reaped memory.
+ */
+ set_bit(MMF_UNSTABLE, &mm->flags);
+
tlb_gather_mmu(&tlb, mm, 0, -1);
for (vma = mm->mmap ; vma; vma = vma->vm_next) {
if (is_vm_hugetlb_page(vma))
@@ -541,18 +543,11 @@ static bool __oom_reap_task(struct task_struct *tsk)
up_read(&mm->mmap_sem);
/*
- * This task can be safely ignored because we cannot do much more
- * to release its memory.
- */
- set_bit(MMF_OOM_REAPED, &mm->flags);
- /*
* Drop our reference but make sure the mmput slow path is called from a
* different context because we shouldn't risk we get stuck there and
* put the oom_reaper out of the way.
*/
mmput_async(mm);
-mm_drop:
- mmdrop(mm);
unlock_oom:
mutex_unlock(&oom_lock);
return ret;
@@ -562,44 +557,28 @@ unlock_oom:
static void oom_reap_task(struct task_struct *tsk)
{
int attempts = 0;
+ struct mm_struct *mm = tsk->signal->oom_mm;
/* Retry the down_read_trylock(mmap_sem) a few times */
- while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task(tsk))
+ while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
schedule_timeout_idle(HZ/10);
- if (attempts > MAX_OOM_REAP_RETRIES) {
- struct task_struct *p;
+ if (attempts <= MAX_OOM_REAP_RETRIES)
+ goto done;
- pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
- task_pid_nr(tsk), tsk->comm);
- /*
- * If we've already tried to reap this task in the past and
- * failed it probably doesn't make much sense to try yet again
- * so hide the mm from the oom killer so that it can move on
- * to another task with a different mm struct.
- */
- p = find_lock_task_mm(tsk);
- if (p) {
- if (test_and_set_bit(MMF_OOM_NOT_REAPABLE, &p->mm->flags)) {
- pr_info("oom_reaper: giving up pid:%d (%s)\n",
- task_pid_nr(tsk), tsk->comm);
- set_bit(MMF_OOM_REAPED, &p->mm->flags);
- }
- task_unlock(p);
- }
+ pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
+ task_pid_nr(tsk), tsk->comm);
+ debug_show_all_locks();
- debug_show_all_locks();
- }
+done:
+ tsk->oom_reaper_list = NULL;
/*
- * Clear TIF_MEMDIE because the task shouldn't be sitting on a
- * reasonably reclaimable memory anymore or it is not a good candidate
- * for the oom victim right now because it cannot release its memory
- * itself nor by the oom reaper.
+ * Hide this mm from OOM killer because it has been either reaped or
+ * somebody can't call up_write(mmap_sem).
*/
- tsk->oom_reaper_list = NULL;
- exit_oom_victim(tsk);
+ set_bit(MMF_OOM_SKIP, &mm->flags);
/* Drop a reference taken by wake_oom_reaper */
put_task_struct(tsk);
@@ -607,8 +586,6 @@ static void oom_reap_task(struct task_struct *tsk)
static int oom_reaper(void *unused)
{
- set_freezable();
-
while (true) {
struct task_struct *tsk = NULL;
@@ -627,7 +604,7 @@ static int oom_reaper(void *unused)
return 0;
}
-void wake_oom_reaper(struct task_struct *tsk)
+static void wake_oom_reaper(struct task_struct *tsk)
{
if (!oom_reaper_th)
return;
@@ -656,7 +633,11 @@ static int __init oom_init(void)
return 0;
}
subsys_initcall(oom_init)
-#endif
+#else
+static inline void wake_oom_reaper(struct task_struct *tsk)
+{
+}
+#endif /* CONFIG_MMU */
/**
* mark_oom_victim - mark the given task as OOM victim
@@ -664,14 +645,23 @@ subsys_initcall(oom_init)
*
* Has to be called with oom_lock held and never after
* oom has been disabled already.
+ *
+ * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
+ * under task_lock or operate on the current).
*/
-void mark_oom_victim(struct task_struct *tsk)
+static void mark_oom_victim(struct task_struct *tsk)
{
+ struct mm_struct *mm = tsk->mm;
+
WARN_ON(oom_killer_disabled);
/* OOM killer might race with memcg OOM */
if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
return;
- atomic_inc(&tsk->signal->oom_victims);
+
+ /* oom_mm is bound to the signal struct life time. */
+ if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
+ atomic_inc(&tsk->signal->oom_mm->mm_count);
+
/*
* Make sure that the task is woken up from uninterruptible sleep
* if it is frozen because OOM killer wouldn't be able to free
@@ -685,21 +675,29 @@ void mark_oom_victim(struct task_struct *tsk)
/**
* exit_oom_victim - note the exit of an OOM victim
*/
-void exit_oom_victim(struct task_struct *tsk)
+void exit_oom_victim(void)
{
- if (!test_and_clear_tsk_thread_flag(tsk, TIF_MEMDIE))
- return;
- atomic_dec(&tsk->signal->oom_victims);
+ clear_thread_flag(TIF_MEMDIE);
if (!atomic_dec_return(&oom_victims))
wake_up_all(&oom_victims_wait);
}
/**
+ * oom_killer_enable - enable OOM killer
+ */
+void oom_killer_enable(void)
+{
+ oom_killer_disabled = false;
+}
+
+/**
* oom_killer_disable - disable OOM killer
+ * @timeout: maximum timeout to wait for oom victims in jiffies
*
* Forces all page allocations to fail rather than trigger OOM killer.
- * Will block and wait until all OOM victims are killed.
+ * Will block and wait until all OOM victims are killed or the given
+ * timeout expires.
*
* The function cannot be called when there are runnable user tasks because
* the userspace would see unexpected allocation failures as a result. Any
@@ -708,8 +706,10 @@ void exit_oom_victim(struct task_struct *tsk)
* Returns true if successful and false if the OOM killer cannot be
* disabled.
*/
-bool oom_killer_disable(void)
+bool oom_killer_disable(signed long timeout)
{
+ signed long ret;
+
/*
* Make sure to not race with an ongoing OOM killer. Check that the
* current is not killed (possibly due to sharing the victim's memory).
@@ -719,19 +719,16 @@ bool oom_killer_disable(void)
oom_killer_disabled = true;
mutex_unlock(&oom_lock);
- wait_event(oom_victims_wait, !atomic_read(&oom_victims));
+ ret = wait_event_interruptible_timeout(oom_victims_wait,
+ !atomic_read(&oom_victims), timeout);
+ if (ret <= 0) {
+ oom_killer_enable();
+ return false;
+ }
return true;
}
-/**
- * oom_killer_enable - enable OOM killer
- */
-void oom_killer_enable(void)
-{
- oom_killer_disabled = false;
-}
-
static inline bool __task_will_free_mem(struct task_struct *task)
{
struct signal_struct *sig = task->signal;
@@ -760,7 +757,7 @@ static inline bool __task_will_free_mem(struct task_struct *task)
* Caller has to make sure that task->mm is stable (hold task_lock or
* it operates on the current).
*/
-bool task_will_free_mem(struct task_struct *task)
+static bool task_will_free_mem(struct task_struct *task)
{
struct mm_struct *mm = task->mm;
struct task_struct *p;
@@ -781,15 +778,16 @@ bool task_will_free_mem(struct task_struct *task)
* This task has already been drained by the oom reaper so there are
* only small chances it will free some more
*/
- if (test_bit(MMF_OOM_REAPED, &mm->flags))
+ if (test_bit(MMF_OOM_SKIP, &mm->flags))
return false;
if (atomic_read(&mm->mm_users) <= 1)
return true;
/*
- * This is really pessimistic but we do not have any reliable way
- * to check that external processes share with our mm
+ * Make sure that all tasks which share the mm with the given tasks
+ * are dying as well to make sure that a) nobody pins its mm and
+ * b) the task is also reapable by the oom reaper.
*/
rcu_read_lock();
for_each_process(p) {
@@ -806,14 +804,10 @@ bool task_will_free_mem(struct task_struct *task)
return ret;
}
-/*
- * Must be called while holding a reference to p, which will be released upon
- * returning.
- */
-void oom_kill_process(struct oom_control *oc, struct task_struct *p,
- unsigned int points, unsigned long totalpages,
- const char *message)
+static void oom_kill_process(struct oom_control *oc, const char *message)
{
+ struct task_struct *p = oc->chosen;
+ unsigned int points = oc->chosen_points;
struct task_struct *victim = p;
struct task_struct *child;
struct task_struct *t;
@@ -860,7 +854,7 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
* oom_badness() returns 0 if the thread is unkillable
*/
child_points = oom_badness(child,
- oc->memcg, oc->nodemask, totalpages);
+ oc->memcg, oc->nodemask, oc->totalpages);
if (child_points > victim_points) {
put_task_struct(victim);
victim = child;
@@ -913,20 +907,20 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
continue;
if (same_thread_group(p, victim))
continue;
- if (unlikely(p->flags & PF_KTHREAD) || is_global_init(p)) {
- /*
- * We cannot use oom_reaper for the mm shared by this
- * process because it wouldn't get killed and so the
- * memory might be still used. Hide the mm from the oom
- * killer to guarantee OOM forward progress.
- */
+ if (is_global_init(p)) {
can_oom_reap = false;
- set_bit(MMF_OOM_REAPED, &mm->flags);
+ set_bit(MMF_OOM_SKIP, &mm->flags);
pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
task_pid_nr(victim), victim->comm,
task_pid_nr(p), p->comm);
continue;
}
+ /*
+ * No use_mm() user needs to read from the userspace so we are
+ * ok to reap it.
+ */
+ if (unlikely(p->flags & PF_KTHREAD))
+ continue;
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
}
rcu_read_unlock();
@@ -942,7 +936,8 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
/*
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
*/
-void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint)
+static void check_panic_on_oom(struct oom_control *oc,
+ enum oom_constraint constraint)
{
if (likely(!sysctl_panic_on_oom))
return;
@@ -988,19 +983,18 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
*/
bool out_of_memory(struct oom_control *oc)
{
- struct task_struct *p;
- unsigned long totalpages;
unsigned long freed = 0;
- unsigned int uninitialized_var(points);
enum oom_constraint constraint = CONSTRAINT_NONE;
if (oom_killer_disabled)
return false;
- blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
- if (freed > 0)
- /* Got some memory back in the last second. */
- return true;
+ if (!is_memcg_oom(oc)) {
+ blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
+ if (freed > 0)
+ /* Got some memory back in the last second. */
+ return true;
+ }
/*
* If current has a pending SIGKILL or is exiting, then automatically
@@ -1024,37 +1018,38 @@ bool out_of_memory(struct oom_control *oc)
/*
* Check if there were limitations on the allocation (only relevant for
- * NUMA) that may require different handling.
+ * NUMA and memcg) that may require different handling.
*/
- constraint = constrained_alloc(oc, &totalpages);
+ constraint = constrained_alloc(oc);
if (constraint != CONSTRAINT_MEMORY_POLICY)
oc->nodemask = NULL;
check_panic_on_oom(oc, constraint);
- if (sysctl_oom_kill_allocating_task && current->mm &&
- !oom_unkillable_task(current, NULL, oc->nodemask) &&
+ if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
+ current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
get_task_struct(current);
- oom_kill_process(oc, current, 0, totalpages,
- "Out of memory (oom_kill_allocating_task)");
+ oc->chosen = current;
+ oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
return true;
}
- p = select_bad_process(oc, &points, totalpages);
+ select_bad_process(oc);
/* Found nothing?!?! Either we hang forever, or we panic. */
- if (!p && !is_sysrq_oom(oc)) {
+ if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
dump_header(oc, NULL);
panic("Out of memory and no killable processes...\n");
}
- if (p && p != (void *)-1UL) {
- oom_kill_process(oc, p, points, totalpages, "Out of memory");
+ if (oc->chosen && oc->chosen != (void *)-1UL) {
+ oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
+ "Memory cgroup out of memory");
/*
* Give the killed process a good chance to exit before trying
* to allocate memory again.
*/
schedule_timeout_killable(1);
}
- return true;
+ return !!oc->chosen;
}
/*
@@ -1077,16 +1072,6 @@ void pagefault_out_of_memory(void)
if (!mutex_trylock(&oom_lock))
return;
-
- if (!out_of_memory(&oc)) {
- /*
- * There shouldn't be any user tasks runnable while the
- * OOM killer is disabled, so the current task has to
- * be a racing OOM victim for which oom_killer_disable()
- * is waiting for.
- */
- WARN_ON(test_thread_flag(TIF_MEMDIE));
- }
-
+ out_of_memory(&oc);
mutex_unlock(&oom_lock);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 28d6f36a2d79..439cc63ad903 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1965,36 +1965,6 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
return false;
}
-void throttle_vm_writeout(gfp_t gfp_mask)
-{
- unsigned long background_thresh;
- unsigned long dirty_thresh;
-
- for ( ; ; ) {
- global_dirty_limits(&background_thresh, &dirty_thresh);
- dirty_thresh = hard_dirty_limit(&global_wb_domain, dirty_thresh);
-
- /*
- * Boost the allowable dirty threshold a bit for page
- * allocators so they don't get DoS'ed by heavy writers
- */
- dirty_thresh += dirty_thresh / 10; /* wheeee... */
-
- if (global_node_page_state(NR_UNSTABLE_NFS) +
- global_node_page_state(NR_WRITEBACK) <= dirty_thresh)
- break;
- congestion_wait(BLK_RW_ASYNC, HZ/10);
-
- /*
- * The caller might hold locks which can prevent IO completion
- * or progress in the filesystem. So we cannot just sit here
- * waiting for IO to complete.
- */
- if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
- break;
- }
-}
-
/*
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
@@ -2746,7 +2716,7 @@ int test_clear_page_writeback(struct page *page)
int ret;
lock_page_memcg(page);
- if (mapping) {
+ if (mapping && mapping_use_writeback_tags(mapping)) {
struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode);
unsigned long flags;
@@ -2789,7 +2759,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
int ret;
lock_page_memcg(page);
- if (mapping) {
+ if (mapping && mapping_use_writeback_tags(mapping)) {
struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode);
unsigned long flags;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a2214c64ed3c..ca423cc20b59 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -607,6 +607,9 @@ static bool need_debug_guardpage(void)
if (!debug_pagealloc_enabled())
return false;
+ if (!debug_guardpage_minorder())
+ return false;
+
return true;
}
@@ -615,6 +618,9 @@ static void init_debug_guardpage(void)
if (!debug_pagealloc_enabled())
return;
+ if (!debug_guardpage_minorder())
+ return;
+
_debug_guardpage_enabled = true;
}
@@ -635,19 +641,22 @@ static int __init debug_guardpage_minorder_setup(char *buf)
pr_info("Setting debug_guardpage_minorder to %lu\n", res);
return 0;
}
-__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
+early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
-static inline void set_page_guard(struct zone *zone, struct page *page,
+static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype)
{
struct page_ext *page_ext;
if (!debug_guardpage_enabled())
- return;
+ return false;
+
+ if (order >= debug_guardpage_minorder())
+ return false;
page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
- return;
+ return false;
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
@@ -655,6 +664,8 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
set_page_private(page, order);
/* Guard pages are not available for any usage */
__mod_zone_freepage_state(zone, -(1 << order), migratetype);
+
+ return true;
}
static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -676,9 +687,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
__mod_zone_freepage_state(zone, (1 << order), migratetype);
}
#else
-struct page_ext_operations debug_guardpage_ops = { NULL, };
-static inline void set_page_guard(struct zone *zone, struct page *page,
- unsigned int order, int migratetype) {}
+struct page_ext_operations debug_guardpage_ops;
+static inline bool set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) {}
#endif
@@ -1393,15 +1404,18 @@ static void __init deferred_free_range(struct page *page,
return;
/* Free a large naturally-aligned chunk if possible */
- if (nr_pages == MAX_ORDER_NR_PAGES &&
- (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
+ if (nr_pages == pageblock_nr_pages &&
+ (pfn & (pageblock_nr_pages - 1)) == 0) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
- __free_pages_boot_core(page, MAX_ORDER-1);
+ __free_pages_boot_core(page, pageblock_order);
return;
}
- for (i = 0; i < nr_pages; i++, page++)
+ for (i = 0; i < nr_pages; i++, page++, pfn++) {
+ if ((pfn & (pageblock_nr_pages - 1)) == 0)
+ set_pageblock_migratetype(page, MIGRATE_MOVABLE);
__free_pages_boot_core(page, 0);
+ }
}
/* Completion tracking for deferred_init_memmap() threads */
@@ -1469,9 +1483,9 @@ static int __init deferred_init_memmap(void *data)
/*
* Ensure pfn_valid is checked every
- * MAX_ORDER_NR_PAGES for memory holes
+ * pageblock_nr_pages for memory holes
*/
- if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
+ if ((pfn & (pageblock_nr_pages - 1)) == 0) {
if (!pfn_valid(pfn)) {
page = NULL;
goto free_range;
@@ -1484,7 +1498,7 @@ static int __init deferred_init_memmap(void *data)
}
/* Minimise pfn page lookups and scheduler checks */
- if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
+ if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
page++;
} else {
nr_pages += nr_to_free;
@@ -1520,6 +1534,9 @@ free_range:
free_base_page = NULL;
free_base_pfn = nr_to_free = 0;
}
+ /* Free the last block of pages to allocator */
+ nr_pages += nr_to_free;
+ deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
first_init_pfn = max(end_pfn, first_init_pfn);
}
@@ -1616,18 +1633,15 @@ static inline void expand(struct zone *zone, struct page *page,
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
- if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
- debug_guardpage_enabled() &&
- high < debug_guardpage_minorder()) {
- /*
- * Mark as guard pages (or page), that will allow to
- * merge back to allocator when buddy will be freed.
- * Corresponding page table entries will not be touched,
- * pages will stay not present in virtual address space
- */
- set_page_guard(zone, &page[size], high, migratetype);
+ /*
+ * Mark as guard pages (or page), that will allow to
+ * merge back to allocator when buddy will be freed.
+ * Corresponding page table entries will not be touched,
+ * pages will stay not present in virtual address space
+ */
+ if (set_page_guard(zone, &page[size], high, migratetype))
continue;
- }
+
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
set_page_order(&page[size], high);
@@ -2489,9 +2503,14 @@ int __isolate_free_page(struct page *page, unsigned int order)
mt = get_pageblock_migratetype(page);
if (!is_migrate_isolate(mt)) {
- /* Obey watermarks as if the page was being allocated */
- watermark = low_wmark_pages(zone) + (1 << order);
- if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+ /*
+ * Obey watermarks as if the page was being allocated. We can
+ * emulate a high-order watermark check with a raised order-0
+ * watermark, because we already know our high-order page
+ * exists.
+ */
+ watermark = min_wmark_pages(zone) + (1UL << order);
+ if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
return 0;
__mod_zone_freepage_state(zone, -(1UL << order), mt);
@@ -2960,9 +2979,11 @@ static DEFINE_RATELIMIT_STATE(nopage_rs,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
-void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
+void warn_alloc(gfp_t gfp_mask, const char *fmt, ...)
{
unsigned int filter = SHOW_MEM_FILTER_NODES;
+ struct va_format vaf;
+ va_list args;
if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
debug_guardpage_minorder() > 0)
@@ -2980,22 +3001,16 @@ void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
filter &= ~SHOW_MEM_FILTER_NODES;
- if (fmt) {
- struct va_format vaf;
- va_list args;
+ pr_warn("%s: ", current->comm);
- va_start(args, fmt);
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ pr_cont("%pV", &vaf);
+ va_end(args);
- vaf.fmt = fmt;
- vaf.va = &args;
+ pr_cont(", mode:%#x(%pGg)\n", gfp_mask, &gfp_mask);
- pr_warn("%pV", &vaf);
-
- va_end(args);
- }
-
- pr_warn("%s: page allocation failure: order:%u, mode:%#x(%pGg)\n",
- current->comm, order, gfp_mask, &gfp_mask);
dump_stack();
if (!should_suppress_show_mem())
show_mem(filter);
@@ -3137,6 +3152,65 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
return NULL;
}
+static inline bool
+should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
+ enum compact_result compact_result,
+ enum compact_priority *compact_priority,
+ int *compaction_retries)
+{
+ int max_retries = MAX_COMPACT_RETRIES;
+ int min_priority;
+
+ if (!order)
+ return false;
+
+ if (compaction_made_progress(compact_result))
+ (*compaction_retries)++;
+
+ /*
+ * compaction considers all the zone as desperately out of memory
+ * so it doesn't really make much sense to retry except when the
+ * failure could be caused by insufficient priority
+ */
+ if (compaction_failed(compact_result))
+ goto check_priority;
+
+ /*
+ * make sure the compaction wasn't deferred or didn't bail out early
+ * due to locks contention before we declare that we should give up.
+ * But do not retry if the given zonelist is not suitable for
+ * compaction.
+ */
+ if (compaction_withdrawn(compact_result))
+ return compaction_zonelist_suitable(ac, order, alloc_flags);
+
+ /*
+ * !costly requests are much more important than __GFP_REPEAT
+ * costly ones because they are de facto nofail and invoke OOM
+ * killer to move on while costly can fail and users are ready
+ * to cope with that. 1/4 retries is rather arbitrary but we
+ * would need much more detailed feedback from compaction to
+ * make a better decision.
+ */
+ if (order > PAGE_ALLOC_COSTLY_ORDER)
+ max_retries /= 4;
+ if (*compaction_retries <= max_retries)
+ return true;
+
+ /*
+ * Make sure there are attempts at the highest priority if we exhausted
+ * all retries or failed at the lower priorities.
+ */
+check_priority:
+ min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
+ MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
+ if (*compact_priority > min_priority) {
+ (*compact_priority)--;
+ *compaction_retries = 0;
+ return true;
+ }
+ return false;
+}
#else
static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
@@ -3147,13 +3221,11 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
return NULL;
}
-#endif /* CONFIG_COMPACTION */
-
static inline bool
should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
enum compact_result compact_result,
enum compact_priority *compact_priority,
- int compaction_retries)
+ int *compaction_retries)
{
struct zone *zone;
struct zoneref *z;
@@ -3175,6 +3247,7 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
}
return false;
}
+#endif /* CONFIG_COMPACTION */
/* Perform direct synchronous page reclaim */
static int
@@ -3325,16 +3398,26 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
static inline bool
should_reclaim_retry(gfp_t gfp_mask, unsigned order,
struct alloc_context *ac, int alloc_flags,
- bool did_some_progress, int no_progress_loops)
+ bool did_some_progress, int *no_progress_loops)
{
struct zone *zone;
struct zoneref *z;
/*
+ * Costly allocations might have made a progress but this doesn't mean
+ * their order will become available due to high fragmentation so
+ * always increment the no progress counter for them
+ */
+ if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
+ *no_progress_loops = 0;
+ else
+ (*no_progress_loops)++;
+
+ /*
* Make sure we converge to OOM if we cannot make any progress
* several times in the row.
*/
- if (no_progress_loops > MAX_RECLAIM_RETRIES)
+ if (*no_progress_loops > MAX_RECLAIM_RETRIES)
return false;
/*
@@ -3349,7 +3432,7 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
unsigned long reclaimable;
available = reclaimable = zone_reclaimable_pages(zone);
- available -= DIV_ROUND_UP(no_progress_loops * available,
+ available -= DIV_ROUND_UP((*no_progress_loops) * available,
MAX_RECLAIM_RETRIES);
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
@@ -3410,6 +3493,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
enum compact_result compact_result;
int compaction_retries = 0;
int no_progress_loops = 0;
+ unsigned long alloc_start = jiffies;
+ unsigned int stall_timeout = 10 * HZ;
/*
* In the slowpath, we sanity check order to avoid ever trying to
@@ -3554,9 +3639,6 @@ retry:
if (page)
goto got_pg;
- if (order && compaction_made_progress(compact_result))
- compaction_retries++;
-
/* Do not loop if specifically requested */
if (gfp_mask & __GFP_NORETRY)
goto nopage;
@@ -3568,18 +3650,16 @@ retry:
if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
goto nopage;
- /*
- * Costly allocations might have made a progress but this doesn't mean
- * their order will become available due to high fragmentation so
- * always increment the no progress counter for them
- */
- if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
- no_progress_loops = 0;
- else
- no_progress_loops++;
+ /* Make sure we know about allocations which stall for too long */
+ if (time_after(jiffies, alloc_start + stall_timeout)) {
+ warn_alloc(gfp_mask,
+ "page alloction stalls for %ums, order:%u\n",
+ jiffies_to_msecs(jiffies-alloc_start), order);
+ stall_timeout += 10 * HZ;
+ }
if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
- did_some_progress > 0, no_progress_loops))
+ did_some_progress > 0, &no_progress_loops))
goto retry;
/*
@@ -3591,7 +3671,7 @@ retry:
if (did_some_progress > 0 &&
should_compact_retry(ac, order, alloc_flags,
compact_result, &compact_priority,
- compaction_retries))
+ &compaction_retries))
goto retry;
/* Reclaim has failed us, start killing things */
@@ -3606,7 +3686,8 @@ retry:
}
nopage:
- warn_alloc_failed(gfp_mask, order, NULL);
+ warn_alloc(gfp_mask,
+ "page allocation failure: order:%u", order);
got_pg:
return page;
}
@@ -4555,7 +4636,7 @@ static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
int j;
struct zonelist *zonelist;
- zonelist = &pgdat->node_zonelists[0];
+ zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
;
j = build_zonelists_node(NODE_DATA(node), zonelist, j);
@@ -4571,7 +4652,7 @@ static void build_thisnode_zonelists(pg_data_t *pgdat)
int j;
struct zonelist *zonelist;
- zonelist = &pgdat->node_zonelists[1];
+ zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
j = build_zonelists_node(pgdat, zonelist, 0);
zonelist->_zonerefs[j].zone = NULL;
zonelist->_zonerefs[j].zone_idx = 0;
@@ -4592,7 +4673,7 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
struct zone *z;
struct zonelist *zonelist;
- zonelist = &pgdat->node_zonelists[0];
+ zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
pos = 0;
for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
for (j = 0; j < nr_nodes; j++) {
@@ -4727,7 +4808,7 @@ static void build_zonelists(pg_data_t *pgdat)
local_node = pgdat->node_id;
- zonelist = &pgdat->node_zonelists[0];
+ zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
j = build_zonelists_node(pgdat, zonelist, 0);
/*
@@ -5000,15 +5081,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
- * If not mirrored_kernelcore and ZONE_MOVABLE exists, range
- * from zone_movable_pfn[nid] to end of each node should be
- * ZONE_MOVABLE not ZONE_NORMAL. skip it.
- */
- if (!mirrored_kernelcore && zone_movable_pfn[nid])
- if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
- continue;
-
- /*
* Check given memblock attribute by firmware which can affect
* kernel memory layout. If zone==ZONE_MOVABLE but memory is
* mirrored, it's an overlapped memmap init. skip it.
@@ -5451,6 +5523,12 @@ static void __meminit adjust_zone_range_for_zone_movable(int nid,
*zone_end_pfn = min(node_end_pfn,
arch_zone_highest_possible_pfn[movable_zone]);
+ /* Adjust for ZONE_MOVABLE starting within this range */
+ } else if (!mirrored_kernelcore &&
+ *zone_start_pfn < zone_movable_pfn[nid] &&
+ *zone_end_pfn > zone_movable_pfn[nid]) {
+ *zone_end_pfn = zone_movable_pfn[nid];
+
/* Check if this whole range is within ZONE_MOVABLE */
} else if (*zone_start_pfn >= zone_movable_pfn[nid])
*zone_start_pfn = *zone_end_pfn;
@@ -5554,28 +5632,23 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
* Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
* and vice versa.
*/
- if (zone_movable_pfn[nid]) {
- if (mirrored_kernelcore) {
- unsigned long start_pfn, end_pfn;
- struct memblock_region *r;
-
- for_each_memblock(memory, r) {
- start_pfn = clamp(memblock_region_memory_base_pfn(r),
- zone_start_pfn, zone_end_pfn);
- end_pfn = clamp(memblock_region_memory_end_pfn(r),
- zone_start_pfn, zone_end_pfn);
-
- if (zone_type == ZONE_MOVABLE &&
- memblock_is_mirror(r))
- nr_absent += end_pfn - start_pfn;
-
- if (zone_type == ZONE_NORMAL &&
- !memblock_is_mirror(r))
- nr_absent += end_pfn - start_pfn;
- }
- } else {
- if (zone_type == ZONE_NORMAL)
- nr_absent += node_end_pfn - zone_movable_pfn[nid];
+ if (mirrored_kernelcore && zone_movable_pfn[nid]) {
+ unsigned long start_pfn, end_pfn;
+ struct memblock_region *r;
+
+ for_each_memblock(memory, r) {
+ start_pfn = clamp(memblock_region_memory_base_pfn(r),
+ zone_start_pfn, zone_end_pfn);
+ end_pfn = clamp(memblock_region_memory_end_pfn(r),
+ zone_start_pfn, zone_end_pfn);
+
+ if (zone_type == ZONE_MOVABLE &&
+ memblock_is_mirror(r))
+ nr_absent += end_pfn - start_pfn;
+
+ if (zone_type == ZONE_NORMAL &&
+ !memblock_is_mirror(r))
+ nr_absent += end_pfn - start_pfn;
}
}
@@ -6929,6 +7002,17 @@ static int __init set_hashdist(char *str)
__setup("hashdist=", set_hashdist);
#endif
+#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
+/*
+ * Returns the number of pages that arch has reserved but
+ * is not known to alloc_large_system_hash().
+ */
+static unsigned long __init arch_reserved_kernel_pages(void)
+{
+ return 0;
+}
+#endif
+
/*
* allocate a large system hash table from bootmem
* - it is assumed that the hash table must contain an exact power-of-2
@@ -6953,6 +7037,7 @@ void *__init alloc_large_system_hash(const char *tablename,
if (!numentries) {
/* round applicable memory size up to nearest megabyte */
numentries = nr_kernel_pages;
+ numentries -= arch_reserved_kernel_pages();
/* It isn't necessary when PAGE_SIZE >= 1MB */
if (PAGE_SHIFT < 20)
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 44a4c029c8e7..121dcffc4ec1 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -42,6 +42,11 @@
* and page extension core can skip to allocate memory. As result,
* none of memory is wasted.
*
+ * When need callback returns true, page_ext checks if there is a request for
+ * extra memory through size in struct page_ext_operations. If it is non-zero,
+ * extra space is allocated for each page_ext entry and offset is returned to
+ * user through offset in struct page_ext_operations.
+ *
* The init callback is used to do proper initialization after page extension
* is completely initialized. In sparse memory system, extra memory is
* allocated some time later than memmap is allocated. In other words, lifetime
@@ -66,18 +71,24 @@ static struct page_ext_operations *page_ext_ops[] = {
};
static unsigned long total_usage;
+static unsigned long extra_mem;
static bool __init invoke_need_callbacks(void)
{
int i;
int entries = ARRAY_SIZE(page_ext_ops);
+ bool need = false;
for (i = 0; i < entries; i++) {
- if (page_ext_ops[i]->need && page_ext_ops[i]->need())
- return true;
+ if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
+ page_ext_ops[i]->offset = sizeof(struct page_ext) +
+ extra_mem;
+ extra_mem += page_ext_ops[i]->size;
+ need = true;
+ }
}
- return false;
+ return need;
}
static void __init invoke_init_callbacks(void)
@@ -91,6 +102,16 @@ static void __init invoke_init_callbacks(void)
}
}
+static unsigned long get_entry_size(void)
+{
+ return sizeof(struct page_ext) + extra_mem;
+}
+
+static inline struct page_ext *get_entry(void *base, unsigned long index)
+{
+ return base + get_entry_size() * index;
+}
+
#if !defined(CONFIG_SPARSEMEM)
@@ -102,7 +123,7 @@ void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
struct page_ext *lookup_page_ext(struct page *page)
{
unsigned long pfn = page_to_pfn(page);
- unsigned long offset;
+ unsigned long index;
struct page_ext *base;
base = NODE_DATA(page_to_nid(page))->node_page_ext;
@@ -119,9 +140,9 @@ struct page_ext *lookup_page_ext(struct page *page)
if (unlikely(!base))
return NULL;
#endif
- offset = pfn - round_down(node_start_pfn(page_to_nid(page)),
+ index = pfn - round_down(node_start_pfn(page_to_nid(page)),
MAX_ORDER_NR_PAGES);
- return base + offset;
+ return get_entry(base, index);
}
static int __init alloc_node_page_ext(int nid)
@@ -143,7 +164,7 @@ static int __init alloc_node_page_ext(int nid)
!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
nr_pages += MAX_ORDER_NR_PAGES;
- table_size = sizeof(struct page_ext) * nr_pages;
+ table_size = get_entry_size() * nr_pages;
base = memblock_virt_alloc_try_nid_nopanic(
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
@@ -196,7 +217,7 @@ struct page_ext *lookup_page_ext(struct page *page)
if (!section->page_ext)
return NULL;
#endif
- return section->page_ext + pfn;
+ return get_entry(section->page_ext, pfn);
}
static void *__meminit alloc_page_ext(size_t size, int nid)
@@ -229,7 +250,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
if (section->page_ext)
return 0;
- table_size = sizeof(struct page_ext) * PAGES_PER_SECTION;
+ table_size = get_entry_size() * PAGES_PER_SECTION;
base = alloc_page_ext(table_size, nid);
/*
@@ -249,7 +270,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
* we need to apply a mask.
*/
pfn &= PAGE_SECTION_MASK;
- section->page_ext = base - pfn;
+ section->page_ext = (void *)base - get_entry_size() * pfn;
total_usage += table_size;
return 0;
}
@@ -262,7 +283,7 @@ static void free_page_ext(void *addr)
struct page *page = virt_to_page(addr);
size_t table_size;
- table_size = sizeof(struct page_ext) * PAGES_PER_SECTION;
+ table_size = get_entry_size() * PAGES_PER_SECTION;
BUG_ON(PageReserved(page));
free_pages_exact(addr, table_size);
@@ -277,7 +298,7 @@ static void __free_page_ext(unsigned long pfn)
ms = __pfn_to_section(pfn);
if (!ms || !ms->page_ext)
return;
- base = ms->page_ext + pfn;
+ base = get_entry(ms->page_ext, pfn);
free_page_ext(base);
ms->page_ext = NULL;
}
diff --git a/mm/page_io.c b/mm/page_io.c
index eafe5ddc2b54..a2651f58c86a 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -264,7 +264,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
int ret;
struct swap_info_struct *sis = page_swap_info(page);
- BUG_ON(!PageSwapCache(page));
+ VM_BUG_ON_PAGE(!PageSwapCache(page), page);
if (sis->flags & SWP_FILE) {
struct kiocb kiocb;
struct file *swap_file = sis->swap_file;
@@ -338,7 +338,7 @@ int swap_readpage(struct page *page)
int ret = 0;
struct swap_info_struct *sis = page_swap_info(page);
- BUG_ON(!PageSwapCache(page));
+ VM_BUG_ON_PAGE(!PageSwapCache(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageUptodate(page), page);
if (frontswap_load(page) == 0) {
@@ -388,7 +388,8 @@ int swap_set_page_dirty(struct page *page)
if (sis->flags & SWP_FILE) {
struct address_space *mapping = sis->swap_file->f_mapping;
- BUG_ON(!PageSwapCache(page));
+
+ VM_BUG_ON_PAGE(!PageSwapCache(page), page);
return mapping->a_ops->set_page_dirty(page);
} else {
return __set_page_dirty_no_writeback(page);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 064b7fb6e0b5..a5594bfcc5ed 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -55,7 +55,7 @@ static int set_migratetype_isolate(struct page *page,
ret = 0;
/*
- * immobile means "not-on-lru" paes. If immobile is larger than
+ * immobile means "not-on-lru" pages. If immobile is larger than
* removable-by-driver pages reported by notifier, we'll fail.
*/
diff --git a/mm/page_owner.c b/mm/page_owner.c
index ec6dc1886f71..60634dc53a88 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -8,6 +8,7 @@
#include <linux/jump_label.h>
#include <linux/migrate.h>
#include <linux/stackdepot.h>
+#include <linux/seq_file.h>
#include "internal.h"
@@ -17,6 +18,13 @@
*/
#define PAGE_OWNER_STACK_DEPTH (16)
+struct page_owner {
+ unsigned int order;
+ gfp_t gfp_mask;
+ int last_migrate_reason;
+ depot_stack_handle_t handle;
+};
+
static bool page_owner_disabled = true;
DEFINE_STATIC_KEY_FALSE(page_owner_inited);
@@ -85,10 +93,16 @@ static void init_page_owner(void)
}
struct page_ext_operations page_owner_ops = {
+ .size = sizeof(struct page_owner),
.need = need_page_owner,
.init = init_page_owner,
};
+static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
+{
+ return (void *)page_ext + page_owner_ops.offset;
+}
+
void __reset_page_owner(struct page *page, unsigned int order)
{
int i;
@@ -155,14 +169,16 @@ noinline void __set_page_owner(struct page *page, unsigned int order,
gfp_t gfp_mask)
{
struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_owner *page_owner;
if (unlikely(!page_ext))
return;
- page_ext->handle = save_stack(gfp_mask);
- page_ext->order = order;
- page_ext->gfp_mask = gfp_mask;
- page_ext->last_migrate_reason = -1;
+ page_owner = get_page_owner(page_ext);
+ page_owner->handle = save_stack(gfp_mask);
+ page_owner->order = order;
+ page_owner->gfp_mask = gfp_mask;
+ page_owner->last_migrate_reason = -1;
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
}
@@ -170,21 +186,26 @@ noinline void __set_page_owner(struct page *page, unsigned int order,
void __set_page_owner_migrate_reason(struct page *page, int reason)
{
struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_owner *page_owner;
+
if (unlikely(!page_ext))
return;
- page_ext->last_migrate_reason = reason;
+ page_owner = get_page_owner(page_ext);
+ page_owner->last_migrate_reason = reason;
}
void __split_page_owner(struct page *page, unsigned int order)
{
int i;
struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_owner *page_owner;
if (unlikely(!page_ext))
return;
- page_ext->order = 0;
+ page_owner = get_page_owner(page_ext);
+ page_owner->order = 0;
for (i = 1; i < (1 << order); i++)
__copy_page_owner(page, page + i);
}
@@ -193,14 +214,18 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
{
struct page_ext *old_ext = lookup_page_ext(oldpage);
struct page_ext *new_ext = lookup_page_ext(newpage);
+ struct page_owner *old_page_owner, *new_page_owner;
if (unlikely(!old_ext || !new_ext))
return;
- new_ext->order = old_ext->order;
- new_ext->gfp_mask = old_ext->gfp_mask;
- new_ext->last_migrate_reason = old_ext->last_migrate_reason;
- new_ext->handle = old_ext->handle;
+ old_page_owner = get_page_owner(old_ext);
+ new_page_owner = get_page_owner(new_ext);
+ new_page_owner->order = old_page_owner->order;
+ new_page_owner->gfp_mask = old_page_owner->gfp_mask;
+ new_page_owner->last_migrate_reason =
+ old_page_owner->last_migrate_reason;
+ new_page_owner->handle = old_page_owner->handle;
/*
* We don't clear the bit on the oldpage as it's going to be freed
@@ -214,9 +239,88 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
}
+void pagetypeinfo_showmixedcount_print(struct seq_file *m,
+ pg_data_t *pgdat, struct zone *zone)
+{
+ struct page *page;
+ struct page_ext *page_ext;
+ struct page_owner *page_owner;
+ unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
+ unsigned long end_pfn = pfn + zone->spanned_pages;
+ unsigned long count[MIGRATE_TYPES] = { 0, };
+ int pageblock_mt, page_mt;
+ int i;
+
+ /* Scan block by block. First and last block may be incomplete */
+ pfn = zone->zone_start_pfn;
+
+ /*
+ * Walk the zone in pageblock_nr_pages steps. If a page block spans
+ * a zone boundary, it will be double counted between zones. This does
+ * not matter as the mixed block count will still be correct
+ */
+ for (; pfn < end_pfn; ) {
+ if (!pfn_valid(pfn)) {
+ pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
+ continue;
+ }
+
+ block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+ block_end_pfn = min(block_end_pfn, end_pfn);
+
+ page = pfn_to_page(pfn);
+ pageblock_mt = get_pageblock_migratetype(page);
+
+ for (; pfn < block_end_pfn; pfn++) {
+ if (!pfn_valid_within(pfn))
+ continue;
+
+ page = pfn_to_page(pfn);
+
+ if (page_zone(page) != zone)
+ continue;
+
+ if (PageBuddy(page)) {
+ pfn += (1UL << page_order(page)) - 1;
+ continue;
+ }
+
+ if (PageReserved(page))
+ continue;
+
+ page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ continue;
+
+ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+ continue;
+
+ page_owner = get_page_owner(page_ext);
+ page_mt = gfpflags_to_migratetype(
+ page_owner->gfp_mask);
+ if (pageblock_mt != page_mt) {
+ if (is_migrate_cma(pageblock_mt))
+ count[MIGRATE_MOVABLE]++;
+ else
+ count[pageblock_mt]++;
+
+ pfn = block_end_pfn;
+ break;
+ }
+ pfn += (1UL << page_owner->order) - 1;
+ }
+ }
+
+ /* Print counts */
+ seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
+ for (i = 0; i < MIGRATE_TYPES; i++)
+ seq_printf(m, "%12lu ", count[i]);
+ seq_putc(m, '\n');
+}
+
static ssize_t
print_page_owner(char __user *buf, size_t count, unsigned long pfn,
- struct page *page, struct page_ext *page_ext,
+ struct page *page, struct page_owner *page_owner,
depot_stack_handle_t handle)
{
int ret;
@@ -236,15 +340,15 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
ret = snprintf(kbuf, count,
"Page allocated via order %u, mask %#x(%pGg)\n",
- page_ext->order, page_ext->gfp_mask,
- &page_ext->gfp_mask);
+ page_owner->order, page_owner->gfp_mask,
+ &page_owner->gfp_mask);
if (ret >= count)
goto err;
/* Print information relevant to grouping pages by mobility */
pageblock_mt = get_pageblock_migratetype(page);
- page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
+ page_mt = gfpflags_to_migratetype(page_owner->gfp_mask);
ret += snprintf(kbuf + ret, count - ret,
"PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
pfn,
@@ -261,10 +365,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
if (ret >= count)
goto err;
- if (page_ext->last_migrate_reason != -1) {
+ if (page_owner->last_migrate_reason != -1) {
ret += snprintf(kbuf + ret, count - ret,
"Page has been migrated, last migrate reason: %s\n",
- migrate_reason_names[page_ext->last_migrate_reason]);
+ migrate_reason_names[page_owner->last_migrate_reason]);
if (ret >= count)
goto err;
}
@@ -287,6 +391,7 @@ err:
void __dump_page_owner(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_owner *page_owner;
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
struct stack_trace trace = {
.nr_entries = 0,
@@ -302,7 +407,9 @@ void __dump_page_owner(struct page *page)
pr_alert("There is not page extension available.\n");
return;
}
- gfp_mask = page_ext->gfp_mask;
+
+ page_owner = get_page_owner(page_ext);
+ gfp_mask = page_owner->gfp_mask;
mt = gfpflags_to_migratetype(gfp_mask);
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
@@ -310,7 +417,7 @@ void __dump_page_owner(struct page *page)
return;
}
- handle = READ_ONCE(page_ext->handle);
+ handle = READ_ONCE(page_owner->handle);
if (!handle) {
pr_alert("page_owner info is not active (free page?)\n");
return;
@@ -318,12 +425,12 @@ void __dump_page_owner(struct page *page)
depot_fetch_stack(handle, &trace);
pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
- page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask);
+ page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
print_stack_trace(&trace, 0);
- if (page_ext->last_migrate_reason != -1)
+ if (page_owner->last_migrate_reason != -1)
pr_alert("page has been migrated, last migrate reason: %s\n",
- migrate_reason_names[page_ext->last_migrate_reason]);
+ migrate_reason_names[page_owner->last_migrate_reason]);
}
static ssize_t
@@ -332,6 +439,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
unsigned long pfn;
struct page *page;
struct page_ext *page_ext;
+ struct page_owner *page_owner;
depot_stack_handle_t handle;
if (!static_branch_unlikely(&page_owner_inited))
@@ -381,11 +489,13 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
+ page_owner = get_page_owner(page_ext);
+
/*
* Access to page_ext->handle isn't synchronous so we should
* be careful to access it.
*/
- handle = READ_ONCE(page_ext->handle);
+ handle = READ_ONCE(page_owner->handle);
if (!handle)
continue;
@@ -393,7 +503,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
*ppos = (pfn - min_low_pfn) + 1;
return print_page_owner(buf, count, pfn, page,
- page_ext, handle);
+ page_owner, handle);
}
return 0;
diff --git a/mm/shmem.c b/mm/shmem.c
index 971fc83e6402..0e9901e69d24 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2311,119 +2311,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
return retval ? retval : error;
}
-static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags)
-{
- struct address_space *mapping = in->f_mapping;
- struct inode *inode = mapping->host;
- unsigned int loff, nr_pages, req_pages;
- struct page *pages[PIPE_DEF_BUFFERS];
- struct partial_page partial[PIPE_DEF_BUFFERS];
- struct page *page;
- pgoff_t index, end_index;
- loff_t isize, left;
- int error, page_nr;
- struct splice_pipe_desc spd = {
- .pages = pages,
- .partial = partial,
- .nr_pages_max = PIPE_DEF_BUFFERS,
- .flags = flags,
- .ops = &page_cache_pipe_buf_ops,
- .spd_release = spd_release_page,
- };
-
- isize = i_size_read(inode);
- if (unlikely(*ppos >= isize))
- return 0;
-
- left = isize - *ppos;
- if (unlikely(left < len))
- len = left;
-
- if (splice_grow_spd(pipe, &spd))
- return -ENOMEM;
-
- index = *ppos >> PAGE_SHIFT;
- loff = *ppos & ~PAGE_MASK;
- req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
- nr_pages = min(req_pages, spd.nr_pages_max);
-
- spd.nr_pages = find_get_pages_contig(mapping, index,
- nr_pages, spd.pages);
- index += spd.nr_pages;
- error = 0;
-
- while (spd.nr_pages < nr_pages) {
- error = shmem_getpage(inode, index, &page, SGP_CACHE);
- if (error)
- break;
- unlock_page(page);
- spd.pages[spd.nr_pages++] = page;
- index++;
- }
-
- index = *ppos >> PAGE_SHIFT;
- nr_pages = spd.nr_pages;
- spd.nr_pages = 0;
-
- for (page_nr = 0; page_nr < nr_pages; page_nr++) {
- unsigned int this_len;
-
- if (!len)
- break;
-
- this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
- page = spd.pages[page_nr];
-
- if (!PageUptodate(page) || page->mapping != mapping) {
- error = shmem_getpage(inode, index, &page, SGP_CACHE);
- if (error)
- break;
- unlock_page(page);
- put_page(spd.pages[page_nr]);
- spd.pages[page_nr] = page;
- }
-
- isize = i_size_read(inode);
- end_index = (isize - 1) >> PAGE_SHIFT;
- if (unlikely(!isize || index > end_index))
- break;
-
- if (end_index == index) {
- unsigned int plen;
-
- plen = ((isize - 1) & ~PAGE_MASK) + 1;
- if (plen <= loff)
- break;
-
- this_len = min(this_len, plen - loff);
- len = this_len;
- }
-
- spd.partial[page_nr].offset = loff;
- spd.partial[page_nr].len = this_len;
- len -= this_len;
- loff = 0;
- spd.nr_pages++;
- index++;
- }
-
- while (page_nr < nr_pages)
- put_page(spd.pages[page_nr++]);
-
- if (spd.nr_pages)
- error = splice_to_pipe(pipe, &spd);
-
- splice_shrink_spd(&spd);
-
- if (error > 0) {
- *ppos += error;
- file_accessed(in);
- }
- return error;
-}
-
/*
* llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
*/
@@ -3786,7 +3673,7 @@ static const struct file_operations shmem_file_operations = {
.read_iter = shmem_file_read_iter,
.write_iter = generic_file_write_iter,
.fsync = noop_fsync,
- .splice_read = shmem_file_splice_read,
+ .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = shmem_fallocate,
#endif
@@ -4078,7 +3965,7 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
/* common code */
-static struct dentry_operations anon_ops = {
+static const struct dentry_operations anon_ops = {
.d_dname = simple_dname
};
diff --git a/mm/swap.c b/mm/swap.c
index 75c63bb2a1da..4dcf852e1e6d 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -748,10 +748,8 @@ void release_pages(struct page **pages, int nr, bool cold)
locked_pgdat = NULL;
}
- if (is_huge_zero_page(page)) {
- put_huge_zero_page();
+ if (is_huge_zero_page(page))
continue;
- }
page = compound_head(page);
if (!put_page_testzero(page))
diff --git a/mm/swap_state.c b/mm/swap_state.c
index c8310a37be3a..35d7e0ee1c77 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -37,6 +37,8 @@ struct address_space swapper_spaces[MAX_SWAPFILES] = {
.page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
.i_mmap_writable = ATOMIC_INIT(0),
.a_ops = &swap_aops,
+ /* swap cache doesn't use writeback related tags */
+ .flags = 1 << AS_NO_WRITEBACK_TAGS,
}
};
@@ -92,7 +94,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
address_space = swap_address_space(entry);
spin_lock_irq(&address_space->tree_lock);
error = radix_tree_insert(&address_space->page_tree,
- entry.val, page);
+ swp_offset(entry), page);
if (likely(!error)) {
address_space->nrpages++;
__inc_node_page_state(page, NR_FILE_PAGES);
@@ -143,7 +145,7 @@ void __delete_from_swap_cache(struct page *page)
entry.val = page_private(page);
address_space = swap_address_space(entry);
- radix_tree_delete(&address_space->page_tree, page_private(page));
+ radix_tree_delete(&address_space->page_tree, swp_offset(entry));
set_page_private(page, 0);
ClearPageSwapCache(page);
address_space->nrpages--;
@@ -252,9 +254,7 @@ static inline void free_swap_cache(struct page *page)
void free_page_and_swap_cache(struct page *page)
{
free_swap_cache(page);
- if (is_huge_zero_page(page))
- put_huge_zero_page();
- else
+ if (!is_huge_zero_page(page))
put_page(page);
}
@@ -283,7 +283,7 @@ struct page * lookup_swap_cache(swp_entry_t entry)
{
struct page *page;
- page = find_get_page(swap_address_space(entry), entry.val);
+ page = find_get_page(swap_address_space(entry), swp_offset(entry));
if (page) {
INC_CACHE_INFO(find_success);
@@ -310,7 +310,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* called after lookup_swap_cache() failed, re-calling
* that would confuse statistics.
*/
- found_page = find_get_page(swapper_space, entry.val);
+ found_page = find_get_page(swapper_space, swp_offset(entry));
if (found_page)
break;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2657accc6e2b..2210de290b54 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -105,7 +105,7 @@ __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
struct page *page;
int ret = 0;
- page = find_get_page(swap_address_space(entry), entry.val);
+ page = find_get_page(swap_address_space(entry), swp_offset(entry));
if (!page)
return 0;
/*
@@ -257,6 +257,53 @@ static inline void cluster_set_null(struct swap_cluster_info *info)
info->data = 0;
}
+static inline bool cluster_list_empty(struct swap_cluster_list *list)
+{
+ return cluster_is_null(&list->head);
+}
+
+static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
+{
+ return cluster_next(&list->head);
+}
+
+static void cluster_list_init(struct swap_cluster_list *list)
+{
+ cluster_set_null(&list->head);
+ cluster_set_null(&list->tail);
+}
+
+static void cluster_list_add_tail(struct swap_cluster_list *list,
+ struct swap_cluster_info *ci,
+ unsigned int idx)
+{
+ if (cluster_list_empty(list)) {
+ cluster_set_next_flag(&list->head, idx, 0);
+ cluster_set_next_flag(&list->tail, idx, 0);
+ } else {
+ unsigned int tail = cluster_next(&list->tail);
+
+ cluster_set_next(&ci[tail], idx);
+ cluster_set_next_flag(&list->tail, idx, 0);
+ }
+}
+
+static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
+ struct swap_cluster_info *ci)
+{
+ unsigned int idx;
+
+ idx = cluster_next(&list->head);
+ if (cluster_next(&list->tail) == idx) {
+ cluster_set_null(&list->head);
+ cluster_set_null(&list->tail);
+ } else
+ cluster_set_next_flag(&list->head,
+ cluster_next(&ci[idx]), 0);
+
+ return idx;
+}
+
/* Add a cluster to discard list and schedule it to do discard */
static void swap_cluster_schedule_discard(struct swap_info_struct *si,
unsigned int idx)
@@ -270,17 +317,7 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
SWAP_MAP_BAD, SWAPFILE_CLUSTER);
- if (cluster_is_null(&si->discard_cluster_head)) {
- cluster_set_next_flag(&si->discard_cluster_head,
- idx, 0);
- cluster_set_next_flag(&si->discard_cluster_tail,
- idx, 0);
- } else {
- unsigned int tail = cluster_next(&si->discard_cluster_tail);
- cluster_set_next(&si->cluster_info[tail], idx);
- cluster_set_next_flag(&si->discard_cluster_tail,
- idx, 0);
- }
+ cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
schedule_work(&si->discard_work);
}
@@ -296,15 +333,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
info = si->cluster_info;
- while (!cluster_is_null(&si->discard_cluster_head)) {
- idx = cluster_next(&si->discard_cluster_head);
-
- cluster_set_next_flag(&si->discard_cluster_head,
- cluster_next(&info[idx]), 0);
- if (cluster_next(&si->discard_cluster_tail) == idx) {
- cluster_set_null(&si->discard_cluster_head);
- cluster_set_null(&si->discard_cluster_tail);
- }
+ while (!cluster_list_empty(&si->discard_clusters)) {
+ idx = cluster_list_del_first(&si->discard_clusters, info);
spin_unlock(&si->lock);
discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
@@ -312,19 +342,7 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
spin_lock(&si->lock);
cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE);
- if (cluster_is_null(&si->free_cluster_head)) {
- cluster_set_next_flag(&si->free_cluster_head,
- idx, 0);
- cluster_set_next_flag(&si->free_cluster_tail,
- idx, 0);
- } else {
- unsigned int tail;
-
- tail = cluster_next(&si->free_cluster_tail);
- cluster_set_next(&info[tail], idx);
- cluster_set_next_flag(&si->free_cluster_tail,
- idx, 0);
- }
+ cluster_list_add_tail(&si->free_clusters, info, idx);
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
0, SWAPFILE_CLUSTER);
}
@@ -353,13 +371,8 @@ static void inc_cluster_info_page(struct swap_info_struct *p,
if (!cluster_info)
return;
if (cluster_is_free(&cluster_info[idx])) {
- VM_BUG_ON(cluster_next(&p->free_cluster_head) != idx);
- cluster_set_next_flag(&p->free_cluster_head,
- cluster_next(&cluster_info[idx]), 0);
- if (cluster_next(&p->free_cluster_tail) == idx) {
- cluster_set_null(&p->free_cluster_tail);
- cluster_set_null(&p->free_cluster_head);
- }
+ VM_BUG_ON(cluster_list_first(&p->free_clusters) != idx);
+ cluster_list_del_first(&p->free_clusters, cluster_info);
cluster_set_count_flag(&cluster_info[idx], 0, 0);
}
@@ -398,14 +411,7 @@ static void dec_cluster_info_page(struct swap_info_struct *p,
}
cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
- if (cluster_is_null(&p->free_cluster_head)) {
- cluster_set_next_flag(&p->free_cluster_head, idx, 0);
- cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
- } else {
- unsigned int tail = cluster_next(&p->free_cluster_tail);
- cluster_set_next(&cluster_info[tail], idx);
- cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
- }
+ cluster_list_add_tail(&p->free_clusters, cluster_info, idx);
}
}
@@ -421,8 +427,8 @@ scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
bool conflict;
offset /= SWAPFILE_CLUSTER;
- conflict = !cluster_is_null(&si->free_cluster_head) &&
- offset != cluster_next(&si->free_cluster_head) &&
+ conflict = !cluster_list_empty(&si->free_clusters) &&
+ offset != cluster_list_first(&si->free_clusters) &&
cluster_is_free(&si->cluster_info[offset]);
if (!conflict)
@@ -447,11 +453,11 @@ static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
new_cluster:
cluster = this_cpu_ptr(si->percpu_cluster);
if (cluster_is_null(&cluster->index)) {
- if (!cluster_is_null(&si->free_cluster_head)) {
- cluster->index = si->free_cluster_head;
+ if (!cluster_list_empty(&si->free_clusters)) {
+ cluster->index = si->free_clusters.head;
cluster->next = cluster_next(&cluster->index) *
SWAPFILE_CLUSTER;
- } else if (!cluster_is_null(&si->discard_cluster_head)) {
+ } else if (!cluster_list_empty(&si->discard_clusters)) {
/*
* we don't have free cluster but have some clusters in
* discarding, do discard now and reclaim them
@@ -999,7 +1005,7 @@ int free_swap_and_cache(swp_entry_t entry)
if (p) {
if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
page = find_get_page(swap_address_space(entry),
- entry.val);
+ swp_offset(entry));
if (page && !trylock_page(page)) {
put_page(page);
page = NULL;
@@ -2292,10 +2298,8 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
nr_good_pages = maxpages - 1; /* omit header page */
- cluster_set_null(&p->free_cluster_head);
- cluster_set_null(&p->free_cluster_tail);
- cluster_set_null(&p->discard_cluster_head);
- cluster_set_null(&p->discard_cluster_tail);
+ cluster_list_init(&p->free_clusters);
+ cluster_list_init(&p->discard_clusters);
for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i];
@@ -2341,19 +2345,8 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
for (i = 0; i < nr_clusters; i++) {
if (!cluster_count(&cluster_info[idx])) {
cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
- if (cluster_is_null(&p->free_cluster_head)) {
- cluster_set_next_flag(&p->free_cluster_head,
- idx, 0);
- cluster_set_next_flag(&p->free_cluster_tail,
- idx, 0);
- } else {
- unsigned int tail;
-
- tail = cluster_next(&p->free_cluster_tail);
- cluster_set_next(&cluster_info[tail], idx);
- cluster_set_next_flag(&p->free_cluster_tail,
- idx, 0);
- }
+ cluster_list_add_tail(&p->free_clusters, cluster_info,
+ idx);
}
idx++;
if (idx == nr_clusters)
diff --git a/mm/vmacache.c b/mm/vmacache.c
index fd09dc9c6812..035fdeb35b43 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -87,11 +87,11 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
{
int i;
+ count_vm_vmacache_event(VMACACHE_FIND_CALLS);
+
if (!vmacache_valid(mm))
return NULL;
- count_vm_vmacache_event(VMACACHE_FIND_CALLS);
-
for (i = 0; i < VMACACHE_SIZE; i++) {
struct vm_area_struct *vma = current->vmacache[i];
@@ -115,11 +115,11 @@ struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
{
int i;
+ count_vm_vmacache_event(VMACACHE_FIND_CALLS);
+
if (!vmacache_valid(mm))
return NULL;
- count_vm_vmacache_event(VMACACHE_FIND_CALLS);
-
for (i = 0; i < VMACACHE_SIZE; i++) {
struct vm_area_struct *vma = current->vmacache[i];
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 91f44e78c516..f2481cb4e6b2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1359,14 +1359,14 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
struct vm_struct *area;
BUG_ON(in_interrupt());
- if (flags & VM_IOREMAP)
- align = 1ul << clamp_t(int, fls_long(size),
- PAGE_SHIFT, IOREMAP_MAX_ORDER);
-
size = PAGE_ALIGN(size);
if (unlikely(!size))
return NULL;
+ if (flags & VM_IOREMAP)
+ align = 1ul << clamp_t(int, get_count_order_long(size),
+ PAGE_SHIFT, IOREMAP_MAX_ORDER);
+
area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
if (unlikely(!area))
return NULL;
@@ -1601,7 +1601,6 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node)
{
- const int order = 0;
struct page **pages;
unsigned int nr_pages, array_size, i;
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
@@ -1629,9 +1628,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
struct page *page;
if (node == NUMA_NO_NODE)
- page = alloc_pages(alloc_mask, order);
+ page = alloc_page(alloc_mask);
else
- page = alloc_pages_node(node, alloc_mask, order);
+ page = alloc_pages_node(node, alloc_mask, 0);
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
@@ -1648,8 +1647,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
return area->addr;
fail:
- warn_alloc_failed(gfp_mask, order,
- "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
+ warn_alloc(gfp_mask,
+ "vmalloc: allocation failure, allocated %ld of %ld bytes",
(area->nr_pages*PAGE_SIZE), area->size);
vfree(area->addr);
return NULL;
@@ -1710,9 +1709,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return addr;
fail:
- warn_alloc_failed(gfp_mask, 0,
- "vmalloc: allocation failure: %lu bytes\n",
- real_size);
+ warn_alloc(gfp_mask,
+ "vmalloc: allocation failure: %lu bytes", real_size);
return NULL;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0fe8b7113868..744f926af442 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2418,8 +2418,6 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
if (inactive_list_is_low(lruvec, false, sc))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
sc, LRU_ACTIVE_ANON);
-
- throttle_vm_writeout(sc->gfp_mask);
}
/* Use reclaim/compaction for costly allocs or under memory pressure */
@@ -2480,7 +2478,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
* If we have not reclaimed enough pages for compaction and the
* inactive lists are large enough, continue reclaiming
*/
- pages_for_compaction = (2UL << sc->order);
+ pages_for_compaction = compact_gap(sc->order);
inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
if (get_nr_swap_pages() > 0)
inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
@@ -2495,7 +2493,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
continue;
switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
- case COMPACT_PARTIAL:
+ case COMPACT_SUCCESS:
case COMPACT_CONTINUE:
return false;
default:
@@ -2598,38 +2596,35 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
}
/*
- * Returns true if compaction should go ahead for a high-order request, or
- * the high-order allocation would succeed without compaction.
+ * Returns true if compaction should go ahead for a costly-order request, or
+ * the allocation would already succeed without compaction. Return false if we
+ * should reclaim first.
*/
static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
{
unsigned long watermark;
- bool watermark_ok;
+ enum compact_result suitable;
- /*
- * Compaction takes time to run and there are potentially other
- * callers using the pages just freed. Continue reclaiming until
- * there is a buffer of free pages available to give compaction
- * a reasonable chance of completing and allocating the page
- */
- watermark = high_wmark_pages(zone) + (2UL << sc->order);
- watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
-
- /*
- * If compaction is deferred, reclaim up to a point where
- * compaction will have a chance of success when re-enabled
- */
- if (compaction_deferred(zone, sc->order))
- return watermark_ok;
+ suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
+ if (suitable == COMPACT_SUCCESS)
+ /* Allocation should succeed already. Don't reclaim. */
+ return true;
+ if (suitable == COMPACT_SKIPPED)
+ /* Compaction cannot yet proceed. Do reclaim. */
+ return false;
/*
- * If compaction is not ready to start and allocation is not likely
- * to succeed without it, then keep reclaiming.
+ * Compaction is already possible, but it takes time to run and there
+ * are potentially other callers using the pages just freed. So proceed
+ * with reclaim to make a buffer of free pages available to give
+ * compaction a reasonable chance of completing and allocating the page.
+ * Note that we won't actually reclaim the whole buffer in one attempt
+ * as the target watermark in should_continue_reclaim() is lower. But if
+ * we are already above the high+gap watermark, don't reclaim at all.
*/
- if (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx) == COMPACT_SKIPPED)
- return false;
+ watermark = high_wmark_pages(zone) + compact_gap(sc->order);
- return watermark_ok;
+ return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
}
/*
@@ -3041,7 +3036,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
*/
nid = mem_cgroup_select_victim_node(memcg);
- zonelist = NODE_DATA(nid)->node_zonelists;
+ zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
trace_mm_vmscan_memcg_reclaim_begin(0,
sc.may_writepage,
@@ -3169,7 +3164,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
* excessive reclaim. Assume that a process requested a high-order
* can direct reclaim/compact.
*/
- if (sc->order && sc->nr_reclaimed >= 2UL << sc->order)
+ if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
sc->order = 0;
return sc->nr_scanned >= sc->nr_to_reclaim;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 89cec42d19ff..604f26a4f696 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1254,85 +1254,6 @@ static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
return 0;
}
-#ifdef CONFIG_PAGE_OWNER
-static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
- pg_data_t *pgdat,
- struct zone *zone)
-{
- struct page *page;
- struct page_ext *page_ext;
- unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
- unsigned long end_pfn = pfn + zone->spanned_pages;
- unsigned long count[MIGRATE_TYPES] = { 0, };
- int pageblock_mt, page_mt;
- int i;
-
- /* Scan block by block. First and last block may be incomplete */
- pfn = zone->zone_start_pfn;
-
- /*
- * Walk the zone in pageblock_nr_pages steps. If a page block spans
- * a zone boundary, it will be double counted between zones. This does
- * not matter as the mixed block count will still be correct
- */
- for (; pfn < end_pfn; ) {
- if (!pfn_valid(pfn)) {
- pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
- continue;
- }
-
- block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
- block_end_pfn = min(block_end_pfn, end_pfn);
-
- page = pfn_to_page(pfn);
- pageblock_mt = get_pageblock_migratetype(page);
-
- for (; pfn < block_end_pfn; pfn++) {
- if (!pfn_valid_within(pfn))
- continue;
-
- page = pfn_to_page(pfn);
-
- if (page_zone(page) != zone)
- continue;
-
- if (PageBuddy(page)) {
- pfn += (1UL << page_order(page)) - 1;
- continue;
- }
-
- if (PageReserved(page))
- continue;
-
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- continue;
-
- if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
- continue;
-
- page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
- if (pageblock_mt != page_mt) {
- if (is_migrate_cma(pageblock_mt))
- count[MIGRATE_MOVABLE]++;
- else
- count[pageblock_mt]++;
-
- pfn = block_end_pfn;
- break;
- }
- pfn += (1UL << page_ext->order) - 1;
- }
- }
-
- /* Print counts */
- seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
- for (i = 0; i < MIGRATE_TYPES; i++)
- seq_printf(m, "%12lu ", count[i]);
- seq_putc(m, '\n');
-}
-#endif /* CONFIG_PAGE_OWNER */
-
/*
* Print out the number of pageblocks for each migratetype that contain pages
* of other types. This gives an indication of how well fallbacks are being
@@ -1592,7 +1513,10 @@ static int vmstat_show(struct seq_file *m, void *arg)
{
unsigned long *l = arg;
unsigned long off = l - (unsigned long *)m->private;
- seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
+
+ seq_puts(m, vmstat_text[off]);
+ seq_put_decimal_ull(m, " ", *l);
+ seq_putc(m, '\n');
return 0;
}
@@ -1794,6 +1718,16 @@ static void __init start_shepherd_timer(void)
round_jiffies_relative(sysctl_stat_interval));
}
+static void __init init_cpu_node_state(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ node_set_state(cpu_to_node(cpu), N_CPU);
+ put_online_cpus();
+}
+
static void vmstat_cpu_dead(int node)
{
int cpu;
@@ -1851,6 +1785,7 @@ static int __init setup_vmstat(void)
#ifdef CONFIG_SMP
cpu_notifier_register_begin();
__register_cpu_notifier(&vmstat_notifier);
+ init_cpu_node_state();
start_shepherd_timer();
cpu_notifier_register_done();
diff --git a/net/6lowpan/ndisc.c b/net/6lowpan/ndisc.c
index 86450b7e2899..941df2fa4448 100644
--- a/net/6lowpan/ndisc.c
+++ b/net/6lowpan/ndisc.c
@@ -101,8 +101,6 @@ static void lowpan_ndisc_802154_update(struct neighbour *n, u32 flags,
ieee802154_be16_to_le16(&neigh->short_addr, lladdr_short);
if (!lowpan_802154_is_valid_src_short_addr(neigh->short_addr))
neigh->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
- } else {
- neigh->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
}
write_unlock_bh(&n->lock);
}
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 1852e383afd6..553ed4ecb6a0 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -680,7 +680,7 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
goto error;
/* Create the Protection Domain */
- rdma->pd = ib_alloc_pd(rdma->cm_id->device);
+ rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0);
if (IS_ERR(rdma->pd))
goto error;
diff --git a/net/Kconfig b/net/Kconfig
index c2cdbce629bd..7b6cd340b72b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -369,6 +369,7 @@ source "net/irda/Kconfig"
source "net/bluetooth/Kconfig"
source "net/rxrpc/Kconfig"
source "net/kcm/Kconfig"
+source "net/strparser/Kconfig"
config FIB_RULES
bool
diff --git a/net/Makefile b/net/Makefile
index 9bd20bb86cc6..4cafaa2b4667 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_BT) += bluetooth/
obj-$(CONFIG_SUNRPC) += sunrpc/
obj-$(CONFIG_AF_RXRPC) += rxrpc/
obj-$(CONFIG_AF_KCM) += kcm/
+obj-$(CONFIG_STREAM_PARSER) += strparser/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_L2TP) += l2tp/
obj-$(CONFIG_DECNET) += decnet/
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index f066781be3c8..10d2bdce686e 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1278,7 +1278,7 @@ out:
return err;
}
-#if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE)
+#if IS_ENABLED(CONFIG_IPDDP)
static __inline__ int is_ip_over_ddp(struct sk_buff *skb)
{
return skb->data[12] == 22;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index e574a7e9db6f..5d2693826afb 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -31,7 +31,7 @@
#include <linux/atmlec.h>
/* Proxy LEC knows about bridging */
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE)
#include "../bridge/br_private.h"
static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
@@ -121,7 +121,7 @@ static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
/* Device structures */
static struct net_device *dev_lec[MAX_LEC_ITF];
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE)
static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
{
char *buff;
@@ -155,7 +155,7 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
sk->sk_data_ready(sk);
}
}
-#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
+#endif /* IS_ENABLED(CONFIG_BRIDGE) */
/*
* Open/initialize the netdevice. This is called (in the current kernel)
@@ -222,7 +222,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
(long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
(long)skb_end_pointer(skb));
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE)
if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
lec_handle_bridge(skb, dev);
#endif
@@ -426,7 +426,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
(unsigned short)(0xffff & mesg->content.normal.flag);
break;
case l_should_bridge:
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE)
{
pr_debug("%s: bridge zeppelin asks about %pM\n",
dev->name, mesg->content.proxy.mac_addr);
@@ -452,7 +452,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
sk->sk_data_ready(sk);
}
}
-#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
+#endif /* IS_ENABLED(CONFIG_BRIDGE) */
break;
default:
pr_info("%s: Unknown message type %d\n", dev->name, mesg->type);
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 0e982222d425..3b3b1a292ec8 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1007,7 +1007,7 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
- if (dev->name == NULL || strncmp(dev->name, "lec", 3))
+ if (strncmp(dev->name, "lec", 3))
return NOTIFY_DONE; /* we are only interested in lec:s */
switch (event) {
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 833bb145ba3c..f20742cbae6d 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -73,10 +73,21 @@ config BATMAN_ADV_MCAST
reduce the air overhead while improving the reliability of
multicast messages.
-config BATMAN_ADV_DEBUG
- bool "B.A.T.M.A.N. debugging"
+config BATMAN_ADV_DEBUGFS
+ bool "batman-adv debugfs entries"
depends on BATMAN_ADV
depends on DEBUG_FS
+ default y
+ help
+ Enable this to export routing related debug tables via debugfs.
+ The information for each soft-interface and used hard-interface can be
+ found under batman_adv/
+
+ If unsure, say Y.
+
+config BATMAN_ADV_DEBUG
+ bool "B.A.T.M.A.N. debugging"
+ depends on BATMAN_ADV_DEBUGFS
help
This is an option for use by developers; most people should
say N here. This enables compilation of support for
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index a83fc6c58d19..f724d3c98a81 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -24,14 +24,14 @@ batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_elp.o
batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_ogm.o
batman-adv-y += bitarray.o
batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
-batman-adv-$(CONFIG_DEBUG_FS) += debugfs.o
+batman-adv-$(CONFIG_BATMAN_ADV_DEBUGFS) += debugfs.o
batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
batman-adv-y += fragmentation.o
batman-adv-y += gateway_client.o
batman-adv-y += gateway_common.o
batman-adv-y += hard-interface.o
batman-adv-y += hash.o
-batman-adv-y += icmp_socket.o
+batman-adv-$(CONFIG_BATMAN_ADV_DEBUGFS) += icmp_socket.o
batman-adv-$(CONFIG_BATMAN_ADV_DEBUG) += log.o
batman-adv-y += main.o
batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c
index 81dbbf569bd4..623d04302aa2 100644
--- a/net/batman-adv/bat_algo.c
+++ b/net/batman-adv/bat_algo.c
@@ -20,12 +20,18 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/moduleparam.h>
+#include <linux/netlink.h>
#include <linux/printk.h>
#include <linux/seq_file.h>
+#include <linux/skbuff.h>
#include <linux/stddef.h>
#include <linux/string.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
+#include "netlink.h"
char batadv_routing_algo[20] = "BATMAN_IV";
static struct hlist_head batadv_algo_list;
@@ -95,6 +101,7 @@ int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
return 0;
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
{
struct batadv_algo_ops *bat_algo_ops;
@@ -107,6 +114,7 @@ int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
return 0;
}
+#endif
static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
{
@@ -138,3 +146,65 @@ static struct kparam_string batadv_param_string_ra = {
module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
0644);
+
+/**
+ * batadv_algo_dump_entry - fill in information about one supported routing
+ * algorithm
+ * @msg: netlink message to be sent back
+ * @portid: Port to reply to
+ * @seq: Sequence number of message
+ * @bat_algo_ops: Algorithm to be dumped
+ *
+ * Return: Error number, or 0 on success
+ */
+static int batadv_algo_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_algo_ops *bat_algo_ops)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI, BATADV_CMD_GET_ROUTING_ALGOS);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, BATADV_ATTR_ALGO_NAME, bat_algo_ops->name))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_algo_dump - fill in information about supported routing
+ * algorithms
+ * @msg: netlink message to be sent back
+ * @cb: Parameters to the netlink request
+ *
+ * Return: Length of reply message.
+ */
+int batadv_algo_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct batadv_algo_ops *bat_algo_ops;
+ int skip = cb->args[0];
+ int i = 0;
+
+ hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
+ if (i++ < skip)
+ continue;
+
+ if (batadv_algo_dump_entry(msg, portid, cb->nlh->nlmsg_seq,
+ bat_algo_ops)) {
+ i--;
+ break;
+ }
+ }
+
+ cb->args[0] = i;
+
+ return msg->len;
+}
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 860d773dd8fa..3b5b69cdd12b 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -22,7 +22,9 @@
#include <linux/types.h>
+struct netlink_callback;
struct seq_file;
+struct sk_buff;
extern char batadv_routing_algo[];
extern struct list_head batadv_hardif_list;
@@ -31,5 +33,6 @@ void batadv_algo_init(void);
int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_algo_dump(struct sk_buff *msg, struct netlink_callback *cb);
#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 19b0abd6c640..e2d18d0b1f06 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -35,6 +35,7 @@
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
+#include <linux/netlink.h>
#include <linux/pkt_sched.h>
#include <linux/printk.h>
#include <linux/random.h>
@@ -48,12 +49,17 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "bitarray.h"
+#include "gateway_client.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
+#include "netlink.h"
#include "network-coding.h"
#include "originator.h"
#include "packet.h"
@@ -318,17 +324,18 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
if (!orig_node->bat_iv.bcast_own_sum)
goto free_orig_node;
+ kref_get(&orig_node->refcount);
hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
batadv_choose_orig, orig_node,
&orig_node->hash_entry);
if (hash_added != 0)
- goto free_orig_node;
+ goto free_orig_node_hash;
return orig_node;
-free_orig_node:
- /* free twice, as batadv_orig_node_new sets refcount to 2 */
+free_orig_node_hash:
batadv_orig_node_put(orig_node);
+free_orig_node:
batadv_orig_node_put(orig_node);
return NULL;
@@ -528,36 +535,25 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
{
struct net_device *soft_iface;
- struct batadv_priv *bat_priv;
- struct batadv_hard_iface *primary_if = NULL;
if (!forw_packet->if_incoming) {
pr_err("Error - can't forward packet: incoming iface not specified\n");
- goto out;
+ return;
}
soft_iface = forw_packet->if_incoming->soft_iface;
- bat_priv = netdev_priv(soft_iface);
if (WARN_ON(!forw_packet->if_outgoing))
- goto out;
+ return;
if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
- goto out;
+ return;
if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
- goto out;
-
- primary_if = batadv_primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
+ return;
/* only for one specific outgoing interface */
batadv_iv_ogm_send_to_if(forw_packet, forw_packet->if_outgoing);
-
-out:
- if (primary_if)
- batadv_hardif_put(primary_if);
}
/**
@@ -685,19 +681,12 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
struct batadv_forw_packet *forw_packet_aggr;
unsigned char *skb_buff;
unsigned int skb_size;
+ atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left;
- /* own packet should always be scheduled */
- if (!own_packet) {
- if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "batman packet queue full\n");
- return;
- }
- }
-
- forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
+ forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, if_outgoing,
+ queue_left, bat_priv);
if (!forw_packet_aggr)
- goto out_nomem;
+ return;
if (atomic_read(&bat_priv->aggregated_ogms) &&
packet_len < BATADV_MAX_AGGREGATION_BYTES)
@@ -708,8 +697,11 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
skb_size += ETH_HLEN;
forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size);
- if (!forw_packet_aggr->skb)
- goto out_free_forw_packet;
+ if (!forw_packet_aggr->skb) {
+ batadv_forw_packet_free(forw_packet_aggr);
+ return;
+ }
+
forw_packet_aggr->skb->priority = TC_PRIO_CONTROL;
skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
@@ -717,12 +709,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
forw_packet_aggr->packet_len = packet_len;
memcpy(skb_buff, packet_buff, packet_len);
- kref_get(&if_incoming->refcount);
- kref_get(&if_outgoing->refcount);
forw_packet_aggr->own = own_packet;
- forw_packet_aggr->if_incoming = if_incoming;
- forw_packet_aggr->if_outgoing = if_outgoing;
- forw_packet_aggr->num_packets = 0;
forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS;
forw_packet_aggr->send_time = send_time;
@@ -741,13 +728,6 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
queue_delayed_work(batadv_event_workqueue,
&forw_packet_aggr->delayed_work,
send_time - jiffies);
-
- return;
-out_free_forw_packet:
- kfree(forw_packet_aggr);
-out_nomem:
- if (!own_packet)
- atomic_inc(&bat_priv->batman_queue_left);
}
/* aggregate a new packet into the existing ogm packet */
@@ -1830,10 +1810,6 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
batadv_iv_ogm_schedule(forw_packet->if_incoming);
out:
- /* don't count own packet */
- if (!forw_packet->own)
- atomic_inc(&bat_priv->batman_queue_left);
-
batadv_forw_packet_free(forw_packet);
}
@@ -1879,6 +1855,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
return NET_RX_SUCCESS;
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
* batadv_iv_ogm_orig_print_neigh - print neighbors for the originator table
* @orig_node: the orig_node for which the neighbors are printed
@@ -1976,8 +1953,239 @@ next:
if (batman_count == 0)
seq_puts(seq, "No batman nodes in range ...\n");
}
+#endif
+
+/**
+ * batadv_iv_ogm_neigh_get_tq_avg - Get the TQ average for a neighbour on a
+ * given outgoing interface.
+ * @neigh_node: Neighbour of interest
+ * @if_outgoing: Outgoing interface of interest
+ * @tq_avg: Pointer of where to store the TQ average
+ *
+ * Return: False if no average TQ available, otherwise true.
+ */
+static bool
+batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node,
+ struct batadv_hard_iface *if_outgoing,
+ u8 *tq_avg)
+{
+ struct batadv_neigh_ifinfo *n_ifinfo;
+
+ n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
+ if (!n_ifinfo)
+ return false;
+
+ *tq_avg = n_ifinfo->bat_iv.tq_avg;
+ batadv_neigh_ifinfo_put(n_ifinfo);
+
+ return true;
+}
+
+/**
+ * batadv_iv_ogm_orig_dump_subentry - Dump an originator subentry into a
+ * message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @orig_node: Originator to dump
+ * @neigh_node: Single hops neighbour
+ * @best: Is the best originator
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ bool best)
+{
+ void *hdr;
+ u8 tq_avg;
+ unsigned int last_seen_msecs;
+
+ last_seen_msecs = jiffies_to_msecs(jiffies - orig_node->last_seen);
+
+ if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node, if_outgoing, &tq_avg))
+ return 0;
+
+ if (if_outgoing != BATADV_IF_DEFAULT &&
+ if_outgoing != neigh_node->if_incoming)
+ return 0;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI, BATADV_CMD_GET_ORIGINATORS);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
+ orig_node->orig) ||
+ nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
+ neigh_node->addr) ||
+ nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ neigh_node->if_incoming->net_dev->ifindex) ||
+ nla_put_u8(msg, BATADV_ATTR_TQ, tq_avg) ||
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
+ last_seen_msecs))
+ goto nla_put_failure;
+
+ if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_iv_ogm_orig_dump_entry - Dump an originator entry into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @orig_node: Originator to dump
+ * @sub_s: Number of sub entries to skip
+ *
+ * This function assumes the caller holds rcu_read_lock().
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct batadv_orig_node *orig_node, int *sub_s)
+{
+ struct batadv_neigh_node *neigh_node_best;
+ struct batadv_neigh_node *neigh_node;
+ int sub = 0;
+ bool best;
+ u8 tq_avg_best;
+
+ neigh_node_best = batadv_orig_router_get(orig_node, if_outgoing);
+ if (!neigh_node_best)
+ goto out;
+
+ if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node_best, if_outgoing,
+ &tq_avg_best))
+ goto out;
+
+ if (tq_avg_best == 0)
+ goto out;
+
+ hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) {
+ if (sub++ < *sub_s)
+ continue;
+
+ best = (neigh_node == neigh_node_best);
+
+ if (batadv_iv_ogm_orig_dump_subentry(msg, portid, seq,
+ bat_priv, if_outgoing,
+ orig_node, neigh_node,
+ best)) {
+ batadv_neigh_node_put(neigh_node_best);
+
+ *sub_s = sub - 1;
+ return -EMSGSIZE;
+ }
+ }
+
+ out:
+ if (neigh_node_best)
+ batadv_neigh_node_put(neigh_node_best);
+
+ *sub_s = 0;
+ return 0;
+}
/**
+ * batadv_iv_ogm_orig_dump_bucket - Dump an originator bucket into a
+ * message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @head: Bucket to be dumped
+ * @idx_s: Number of entries to be skipped
+ * @sub: Number of sub entries to be skipped
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct hlist_head *head, int *idx_s, int *sub)
+{
+ struct batadv_orig_node *orig_node;
+ int idx = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_iv_ogm_orig_dump_entry(msg, portid, seq, bat_priv,
+ if_outgoing, orig_node,
+ sub)) {
+ rcu_read_unlock();
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+ rcu_read_unlock();
+
+ *idx_s = 0;
+ *sub = 0;
+ return 0;
+}
+
+/**
+ * batadv_iv_ogm_orig_dump - Dump the originators into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ */
+static void
+batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct hlist_head *head;
+ int bucket = cb->args[0];
+ int idx = cb->args[1];
+ int sub = cb->args[2];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ while (bucket < hash->size) {
+ head = &hash->table[bucket];
+
+ if (batadv_iv_ogm_orig_dump_bucket(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv, if_outgoing, head,
+ &idx, &sub))
+ break;
+
+ bucket++;
+ }
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+ cb->args[2] = sub;
+}
+
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+/**
* batadv_iv_hardif_neigh_print - print a single hop neighbour node
* @seq: neighbour table seq_file struct
* @hardif_neigh: hardif neighbour information
@@ -2027,37 +2235,43 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
if (batman_count == 0)
seq_puts(seq, "No batman nodes in range ...\n");
}
+#endif
/**
- * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
+ * batadv_iv_ogm_neigh_diff - calculate tq difference of two neighbors
* @neigh1: the first neighbor object of the comparison
* @if_outgoing1: outgoing interface for the first neighbor
* @neigh2: the second neighbor object of the comparison
* @if_outgoing2: outgoing interface for the second neighbor
+ * @diff: pointer to integer receiving the calculated difference
*
- * Return: a value less, equal to or greater than 0 if the metric via neigh1 is
- * lower, the same as or higher than the metric via neigh2
+ * The content of *@diff is only valid when this function returns true.
+ * It is less, equal to or greater than 0 if the metric via neigh1 is lower,
+ * the same as or higher than the metric via neigh2
+ *
+ * Return: true when the difference could be calculated, false otherwise
*/
-static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
- struct batadv_hard_iface *if_outgoing1,
- struct batadv_neigh_node *neigh2,
- struct batadv_hard_iface *if_outgoing2)
+static bool batadv_iv_ogm_neigh_diff(struct batadv_neigh_node *neigh1,
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2,
+ int *diff)
{
struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
u8 tq1, tq2;
- int diff;
+ bool ret = true;
neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
if (!neigh1_ifinfo || !neigh2_ifinfo) {
- diff = 0;
+ ret = false;
goto out;
}
tq1 = neigh1_ifinfo->bat_iv.tq_avg;
tq2 = neigh2_ifinfo->bat_iv.tq_avg;
- diff = tq1 - tq2;
+ *diff = (int)tq1 - (int)tq2;
out:
if (neigh1_ifinfo)
@@ -2065,6 +2279,162 @@ out:
if (neigh2_ifinfo)
batadv_neigh_ifinfo_put(neigh2_ifinfo);
+ return ret;
+}
+
+/**
+ * batadv_iv_ogm_neigh_dump_neigh - Dump a neighbour into a netlink message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @hardif_neigh: Neighbour to be dumped
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_hardif_neigh_node *hardif_neigh)
+{
+ void *hdr;
+ unsigned int last_seen_msecs;
+
+ last_seen_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen);
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI, BATADV_CMD_GET_NEIGHBORS);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
+ hardif_neigh->addr) ||
+ nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ hardif_neigh->if_incoming->net_dev->ifindex) ||
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
+ last_seen_msecs))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_iv_ogm_neigh_dump_hardif - Dump the neighbours of a hard interface
+ * into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @hard_iface: Hard interface to dump the neighbours for
+ * @idx_s: Number of entries to skip
+ *
+ * This function assumes the caller holds rcu_read_lock().
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface,
+ int *idx_s)
+{
+ struct batadv_hardif_neigh_node *hardif_neigh;
+ int idx = 0;
+
+ hlist_for_each_entry_rcu(hardif_neigh,
+ &hard_iface->neigh_list, list) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_iv_ogm_neigh_dump_neigh(msg, portid, seq,
+ hardif_neigh)) {
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+
+ *idx_s = 0;
+ return 0;
+}
+
+/**
+ * batadv_iv_ogm_neigh_dump - Dump the neighbours into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @single_hardif: Limit dump to this hard interfaace
+ */
+static void
+batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *single_hardif)
+{
+ struct batadv_hard_iface *hard_iface;
+ int i_hardif = 0;
+ int i_hardif_s = cb->args[0];
+ int idx = cb->args[1];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ rcu_read_lock();
+ if (single_hardif) {
+ if (i_hardif_s == 0) {
+ if (batadv_iv_ogm_neigh_dump_hardif(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv,
+ single_hardif,
+ &idx) == 0)
+ i_hardif++;
+ }
+ } else {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list,
+ list) {
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ if (i_hardif++ < i_hardif_s)
+ continue;
+
+ if (batadv_iv_ogm_neigh_dump_hardif(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv,
+ hard_iface, &idx)) {
+ i_hardif--;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ cb->args[0] = i_hardif;
+ cb->args[1] = idx;
+}
+
+/**
+ * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
+ * @neigh1: the first neighbor object of the comparison
+ * @if_outgoing1: outgoing interface for the first neighbor
+ * @neigh2: the second neighbor object of the comparison
+ * @if_outgoing2: outgoing interface for the second neighbor
+ *
+ * Return: a value less, equal to or greater than 0 if the metric via neigh1 is
+ * lower, the same as or higher than the metric via neigh2
+ */
+static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2)
+{
+ bool ret;
+ int diff;
+
+ ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2,
+ if_outgoing2, &diff);
+ if (!ret)
+ return 0;
+
return diff;
}
@@ -2085,36 +2455,341 @@ batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2)
{
- struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
- u8 tq1, tq2;
bool ret;
+ int diff;
- neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
- neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
+ ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2,
+ if_outgoing2, &diff);
+ if (!ret)
+ return false;
- /* we can't say that the metric is better */
- if (!neigh1_ifinfo || !neigh2_ifinfo) {
- ret = false;
+ ret = diff > -BATADV_TQ_SIMILARITY_THRESHOLD;
+ return ret;
+}
+
+static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
+{
+ /* begin scheduling originator messages on that interface */
+ batadv_iv_ogm_schedule(hard_iface);
+}
+
+static struct batadv_gw_node *
+batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
+{
+ struct batadv_neigh_node *router;
+ struct batadv_neigh_ifinfo *router_ifinfo;
+ struct batadv_gw_node *gw_node, *curr_gw = NULL;
+ u64 max_gw_factor = 0;
+ u64 tmp_gw_factor = 0;
+ u8 max_tq = 0;
+ u8 tq_avg;
+ struct batadv_orig_node *orig_node;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
+ orig_node = gw_node->orig_node;
+ router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
+ if (!router)
+ continue;
+
+ router_ifinfo = batadv_neigh_ifinfo_get(router,
+ BATADV_IF_DEFAULT);
+ if (!router_ifinfo)
+ goto next;
+
+ if (!kref_get_unless_zero(&gw_node->refcount))
+ goto next;
+
+ tq_avg = router_ifinfo->bat_iv.tq_avg;
+
+ switch (atomic_read(&bat_priv->gw.sel_class)) {
+ case 1: /* fast connection */
+ tmp_gw_factor = tq_avg * tq_avg;
+ tmp_gw_factor *= gw_node->bandwidth_down;
+ tmp_gw_factor *= 100 * 100;
+ tmp_gw_factor >>= 18;
+
+ if ((tmp_gw_factor > max_gw_factor) ||
+ ((tmp_gw_factor == max_gw_factor) &&
+ (tq_avg > max_tq))) {
+ if (curr_gw)
+ batadv_gw_node_put(curr_gw);
+ curr_gw = gw_node;
+ kref_get(&curr_gw->refcount);
+ }
+ break;
+
+ default: /* 2: stable connection (use best statistic)
+ * 3: fast-switch (use best statistic but change as
+ * soon as a better gateway appears)
+ * XX: late-switch (use best statistic but change as
+ * soon as a better gateway appears which has
+ * $routing_class more tq points)
+ */
+ if (tq_avg > max_tq) {
+ if (curr_gw)
+ batadv_gw_node_put(curr_gw);
+ curr_gw = gw_node;
+ kref_get(&curr_gw->refcount);
+ }
+ break;
+ }
+
+ if (tq_avg > max_tq)
+ max_tq = tq_avg;
+
+ if (tmp_gw_factor > max_gw_factor)
+ max_gw_factor = tmp_gw_factor;
+
+ batadv_gw_node_put(gw_node);
+
+next:
+ batadv_neigh_node_put(router);
+ if (router_ifinfo)
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ }
+ rcu_read_unlock();
+
+ return curr_gw;
+}
+
+static bool batadv_iv_gw_is_eligible(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *curr_gw_orig,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_neigh_ifinfo *router_orig_ifinfo = NULL;
+ struct batadv_neigh_ifinfo *router_gw_ifinfo = NULL;
+ struct batadv_neigh_node *router_gw = NULL;
+ struct batadv_neigh_node *router_orig = NULL;
+ u8 gw_tq_avg, orig_tq_avg;
+ bool ret = false;
+
+ /* dynamic re-election is performed only on fast or late switch */
+ if (atomic_read(&bat_priv->gw.sel_class) <= 2)
+ return false;
+
+ router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT);
+ if (!router_gw) {
+ ret = true;
goto out;
}
- tq1 = neigh1_ifinfo->bat_iv.tq_avg;
- tq2 = neigh2_ifinfo->bat_iv.tq_avg;
- ret = (tq1 - tq2) > -BATADV_TQ_SIMILARITY_THRESHOLD;
+ router_gw_ifinfo = batadv_neigh_ifinfo_get(router_gw,
+ BATADV_IF_DEFAULT);
+ if (!router_gw_ifinfo) {
+ ret = true;
+ goto out;
+ }
+
+ router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
+ if (!router_orig)
+ goto out;
+
+ router_orig_ifinfo = batadv_neigh_ifinfo_get(router_orig,
+ BATADV_IF_DEFAULT);
+ if (!router_orig_ifinfo)
+ goto out;
+
+ gw_tq_avg = router_gw_ifinfo->bat_iv.tq_avg;
+ orig_tq_avg = router_orig_ifinfo->bat_iv.tq_avg;
+
+ /* the TQ value has to be better */
+ if (orig_tq_avg < gw_tq_avg)
+ goto out;
+ /* if the routing class is greater than 3 the value tells us how much
+ * greater the TQ value of the new gateway must be
+ */
+ if ((atomic_read(&bat_priv->gw.sel_class) > 3) &&
+ (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw.sel_class)))
+ goto out;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
+ gw_tq_avg, orig_tq_avg);
+
+ ret = true;
out:
- if (neigh1_ifinfo)
- batadv_neigh_ifinfo_put(neigh1_ifinfo);
- if (neigh2_ifinfo)
- batadv_neigh_ifinfo_put(neigh2_ifinfo);
+ if (router_gw_ifinfo)
+ batadv_neigh_ifinfo_put(router_gw_ifinfo);
+ if (router_orig_ifinfo)
+ batadv_neigh_ifinfo_put(router_orig_ifinfo);
+ if (router_gw)
+ batadv_neigh_node_put(router_gw);
+ if (router_orig)
+ batadv_neigh_node_put(router_orig);
return ret;
}
-static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+/* fails if orig_node has no router */
+static int batadv_iv_gw_write_buffer_text(struct batadv_priv *bat_priv,
+ struct seq_file *seq,
+ const struct batadv_gw_node *gw_node)
{
- /* begin scheduling originator messages on that interface */
- batadv_iv_ogm_schedule(hard_iface);
+ struct batadv_gw_node *curr_gw;
+ struct batadv_neigh_node *router;
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ int ret = -1;
+
+ router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
+ if (!router)
+ goto out;
+
+ router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
+ if (!router_ifinfo)
+ goto out;
+
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+
+ seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
+ (curr_gw == gw_node ? "=>" : " "),
+ gw_node->orig_node->orig,
+ router_ifinfo->bat_iv.tq_avg, router->addr,
+ router->if_incoming->net_dev->name,
+ gw_node->bandwidth_down / 10,
+ gw_node->bandwidth_down % 10,
+ gw_node->bandwidth_up / 10,
+ gw_node->bandwidth_up % 10);
+ ret = seq_has_overflowed(seq) ? -1 : 0;
+
+ if (curr_gw)
+ batadv_gw_node_put(curr_gw);
+out:
+ if (router_ifinfo)
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ if (router)
+ batadv_neigh_node_put(router);
+ return ret;
+}
+
+static void batadv_iv_gw_print(struct batadv_priv *bat_priv,
+ struct seq_file *seq)
+{
+ struct batadv_gw_node *gw_node;
+ int gw_count = 0;
+
+ seq_puts(seq,
+ " Gateway (#/255) Nexthop [outgoingIF]: advertised uplink bandwidth\n");
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
+ /* fails if orig_node has no router */
+ if (batadv_iv_gw_write_buffer_text(bat_priv, seq, gw_node) < 0)
+ continue;
+
+ gw_count++;
+ }
+ rcu_read_unlock();
+
+ if (gw_count == 0)
+ seq_puts(seq, "No gateways in range ...\n");
+}
+#endif
+
+/**
+ * batadv_iv_gw_dump_entry - Dump a gateway into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @gw_node: Gateway to be dumped
+ *
+ * Return: Error code, or 0 on success
+ */
+static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_gw_node *gw_node)
+{
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ struct batadv_neigh_node *router;
+ struct batadv_gw_node *curr_gw;
+ int ret = -EINVAL;
+ void *hdr;
+
+ router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
+ if (!router)
+ goto out;
+
+ router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
+ if (!router_ifinfo)
+ goto out;
+
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI, BATADV_CMD_GET_GATEWAYS);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ ret = -EMSGSIZE;
+
+ if (curr_gw == gw_node)
+ if (nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
+ gw_node->orig_node->orig) ||
+ nla_put_u8(msg, BATADV_ATTR_TQ, router_ifinfo->bat_iv.tq_avg) ||
+ nla_put(msg, BATADV_ATTR_ROUTER, ETH_ALEN,
+ router->addr) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ router->if_incoming->net_dev->name) ||
+ nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN,
+ gw_node->bandwidth_down) ||
+ nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP,
+ gw_node->bandwidth_up)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
+out:
+ if (router_ifinfo)
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ if (router)
+ batadv_neigh_node_put(router);
+ return ret;
+}
+
+/**
+ * batadv_iv_gw_dump - Dump gateways into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ */
+static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv)
+{
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct batadv_gw_node *gw_node;
+ int idx_skip = cb->args[0];
+ int idx = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
+ if (idx++ < idx_skip)
+ continue;
+
+ if (batadv_iv_gw_dump_entry(msg, portid, cb->nlh->nlmsg_seq,
+ bat_priv, gw_node)) {
+ idx_skip = idx - 1;
+ goto unlock;
+ }
+ }
+
+ idx_skip = idx;
+unlock:
+ rcu_read_unlock();
+
+ cb->args[0] = idx_skip;
}
static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
@@ -2129,14 +2804,28 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.neigh = {
.cmp = batadv_iv_ogm_neigh_cmp,
.is_similar_or_better = batadv_iv_ogm_neigh_is_sob,
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
.print = batadv_iv_neigh_print,
+#endif
+ .dump = batadv_iv_ogm_neigh_dump,
},
.orig = {
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
.print = batadv_iv_ogm_orig_print,
+#endif
+ .dump = batadv_iv_ogm_orig_dump,
.free = batadv_iv_ogm_orig_free,
.add_if = batadv_iv_ogm_orig_add_if,
.del_if = batadv_iv_ogm_orig_del_if,
},
+ .gw = {
+ .get_best_gw_node = batadv_iv_gw_get_best_gw_node,
+ .is_eligible = batadv_iv_gw_is_eligible,
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+ .print = batadv_iv_gw_print,
+#endif
+ .dump = batadv_iv_gw_dump,
+ },
};
int __init batadv_iv_init(void)
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 0366cbf5e444..e79f6f01182e 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -21,24 +21,38 @@
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/cache.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
#include <linux/netdevice.h>
+#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "bat_v_elp.h"
#include "bat_v_ogm.h"
+#include "gateway_client.h"
+#include "gateway_common.h"
#include "hard-interface.h"
#include "hash.h"
+#include "log.h"
+#include "netlink.h"
#include "originator.h"
#include "packet.h"
+struct sk_buff;
+
static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
@@ -115,6 +129,7 @@ batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
batadv_v_elp_throughput_metric_update);
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
* batadv_v_orig_print_neigh - print neighbors for the originator table
* @orig_node: the orig_node for which the neighbors are printed
@@ -198,8 +213,142 @@ static void batadv_v_neigh_print(struct batadv_priv *bat_priv,
if (batman_count == 0)
seq_puts(seq, "No batman nodes in range ...\n");
}
+#endif
+
+/**
+ * batadv_v_neigh_dump_neigh - Dump a neighbour into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @hardif_neigh: Neighbour to dump
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_hardif_neigh_node *hardif_neigh)
+{
+ void *hdr;
+ unsigned int last_seen_msecs;
+ u32 throughput;
+
+ last_seen_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen);
+ throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
+ throughput = throughput * 100;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_NEIGHBORS);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
+ hardif_neigh->addr) ||
+ nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ hardif_neigh->if_incoming->net_dev->ifindex) ||
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
+ last_seen_msecs) ||
+ nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
/**
+ * batadv_v_neigh_dump_hardif - Dump the neighbours of a hard interface into
+ * a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @hard_iface: The hard interface to be dumped
+ * @idx_s: Entries to be skipped
+ *
+ * This function assumes the caller holds rcu_read_lock().
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_v_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface,
+ int *idx_s)
+{
+ struct batadv_hardif_neigh_node *hardif_neigh;
+ int idx = 0;
+
+ hlist_for_each_entry_rcu(hardif_neigh,
+ &hard_iface->neigh_list, list) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_v_neigh_dump_neigh(msg, portid, seq, hardif_neigh)) {
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+
+ *idx_s = 0;
+ return 0;
+}
+
+/**
+ * batadv_v_neigh_dump - Dump the neighbours of a hard interface into a
+ * message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @single_hardif: Limit dumping to this hard interface
+ */
+static void
+batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *single_hardif)
+{
+ struct batadv_hard_iface *hard_iface;
+ int i_hardif = 0;
+ int i_hardif_s = cb->args[0];
+ int idx = cb->args[1];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ rcu_read_lock();
+ if (single_hardif) {
+ if (i_hardif_s == 0) {
+ if (batadv_v_neigh_dump_hardif(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv, single_hardif,
+ &idx) == 0)
+ i_hardif++;
+ }
+ } else {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ if (i_hardif++ < i_hardif_s)
+ continue;
+
+ if (batadv_v_neigh_dump_hardif(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv, hard_iface,
+ &idx)) {
+ i_hardif--;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ cb->args[0] = i_hardif;
+ cb->args[1] = idx;
+}
+
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+/**
* batadv_v_orig_print - print the originator table
* @bat_priv: the bat priv with all the soft interface information
* @seq: debugfs table seq_file struct
@@ -265,6 +414,205 @@ next:
if (batman_count == 0)
seq_puts(seq, "No batman nodes in range ...\n");
}
+#endif
+
+/**
+ * batadv_v_orig_dump_subentry - Dump an originator subentry into a
+ * message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @orig_node: Originator to dump
+ * @neigh_node: Single hops neighbour
+ * @best: Is the best originator
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_v_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ bool best)
+{
+ struct batadv_neigh_ifinfo *n_ifinfo;
+ unsigned int last_seen_msecs;
+ u32 throughput;
+ void *hdr;
+
+ n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
+ if (!n_ifinfo)
+ return 0;
+
+ throughput = n_ifinfo->bat_v.throughput * 100;
+
+ batadv_neigh_ifinfo_put(n_ifinfo);
+
+ last_seen_msecs = jiffies_to_msecs(jiffies - orig_node->last_seen);
+
+ if (if_outgoing != BATADV_IF_DEFAULT &&
+ if_outgoing != neigh_node->if_incoming)
+ return 0;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_ORIGINATORS);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, orig_node->orig) ||
+ nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
+ neigh_node->addr) ||
+ nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ neigh_node->if_incoming->net_dev->ifindex) ||
+ nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput) ||
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
+ last_seen_msecs))
+ goto nla_put_failure;
+
+ if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_v_orig_dump_entry - Dump an originator entry into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @orig_node: Originator to dump
+ * @sub_s: Number of sub entries to skip
+ *
+ * This function assumes the caller holds rcu_read_lock().
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_v_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct batadv_orig_node *orig_node, int *sub_s)
+{
+ struct batadv_neigh_node *neigh_node_best;
+ struct batadv_neigh_node *neigh_node;
+ int sub = 0;
+ bool best;
+
+ neigh_node_best = batadv_orig_router_get(orig_node, if_outgoing);
+ if (!neigh_node_best)
+ goto out;
+
+ hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) {
+ if (sub++ < *sub_s)
+ continue;
+
+ best = (neigh_node == neigh_node_best);
+
+ if (batadv_v_orig_dump_subentry(msg, portid, seq, bat_priv,
+ if_outgoing, orig_node,
+ neigh_node, best)) {
+ batadv_neigh_node_put(neigh_node_best);
+
+ *sub_s = sub - 1;
+ return -EMSGSIZE;
+ }
+ }
+
+ out:
+ if (neigh_node_best)
+ batadv_neigh_node_put(neigh_node_best);
+
+ *sub_s = 0;
+ return 0;
+}
+
+/**
+ * batadv_v_orig_dump_bucket - Dump an originator bucket into a
+ * message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @head: Bucket to be dumped
+ * @idx_s: Number of entries to be skipped
+ * @sub: Number of sub entries to be skipped
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_v_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct hlist_head *head, int *idx_s, int *sub)
+{
+ struct batadv_orig_node *orig_node;
+ int idx = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_v_orig_dump_entry(msg, portid, seq, bat_priv,
+ if_outgoing, orig_node, sub)) {
+ rcu_read_unlock();
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+ rcu_read_unlock();
+
+ *idx_s = 0;
+ *sub = 0;
+ return 0;
+}
+
+/**
+ * batadv_v_orig_dump - Dump the originators into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ */
+static void
+batadv_v_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct hlist_head *head;
+ int bucket = cb->args[0];
+ int idx = cb->args[1];
+ int sub = cb->args[2];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ while (bucket < hash->size) {
+ head = &hash->table[bucket];
+
+ if (batadv_v_orig_dump_bucket(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv, if_outgoing, head, &idx,
+ &sub))
+ break;
+
+ bucket++;
+ }
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+ cb->args[2] = sub;
+}
static int batadv_v_neigh_cmp(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing1,
@@ -320,6 +668,365 @@ err_ifinfo1:
return ret;
}
+static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
+ char *buff, size_t count)
+{
+ u32 old_class, class;
+
+ if (!batadv_parse_throughput(bat_priv->soft_iface, buff,
+ "B.A.T.M.A.N. V GW selection class",
+ &class))
+ return -EINVAL;
+
+ old_class = atomic_read(&bat_priv->gw.sel_class);
+ atomic_set(&bat_priv->gw.sel_class, class);
+
+ if (old_class != class)
+ batadv_gw_reselect(bat_priv);
+
+ return count;
+}
+
+static ssize_t batadv_v_show_sel_class(struct batadv_priv *bat_priv, char *buff)
+{
+ u32 class = atomic_read(&bat_priv->gw.sel_class);
+
+ return sprintf(buff, "%u.%u MBit\n", class / 10, class % 10);
+}
+
+/**
+ * batadv_v_gw_throughput_get - retrieve the GW-bandwidth for a given GW
+ * @gw_node: the GW to retrieve the metric for
+ * @bw: the pointer where the metric will be stored. The metric is computed as
+ * the minimum between the GW advertised throughput and the path throughput to
+ * it in the mesh
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int batadv_v_gw_throughput_get(struct batadv_gw_node *gw_node, u32 *bw)
+{
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *router;
+ int ret = -1;
+
+ orig_node = gw_node->orig_node;
+ router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
+ if (!router)
+ goto out;
+
+ router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
+ if (!router_ifinfo)
+ goto out;
+
+ /* the GW metric is computed as the minimum between the path throughput
+ * to reach the GW itself and the advertised bandwidth.
+ * This gives us an approximation of the effective throughput that the
+ * client can expect via this particular GW node
+ */
+ *bw = router_ifinfo->bat_v.throughput;
+ *bw = min_t(u32, *bw, gw_node->bandwidth_down);
+
+ ret = 0;
+out:
+ if (router)
+ batadv_neigh_node_put(router);
+ if (router_ifinfo)
+ batadv_neigh_ifinfo_put(router_ifinfo);
+
+ return ret;
+}
+
+/**
+ * batadv_v_gw_get_best_gw_node - retrieve the best GW node
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: the GW node having the best GW-metric, NULL if no GW is known
+ */
+static struct batadv_gw_node *
+batadv_v_gw_get_best_gw_node(struct batadv_priv *bat_priv)
+{
+ struct batadv_gw_node *gw_node, *curr_gw = NULL;
+ u32 max_bw = 0, bw;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
+ if (!kref_get_unless_zero(&gw_node->refcount))
+ continue;
+
+ if (batadv_v_gw_throughput_get(gw_node, &bw) < 0)
+ goto next;
+
+ if (curr_gw && (bw <= max_bw))
+ goto next;
+
+ if (curr_gw)
+ batadv_gw_node_put(curr_gw);
+
+ curr_gw = gw_node;
+ kref_get(&curr_gw->refcount);
+ max_bw = bw;
+
+next:
+ batadv_gw_node_put(gw_node);
+ }
+ rcu_read_unlock();
+
+ return curr_gw;
+}
+
+/**
+ * batadv_v_gw_is_eligible - check if a originator would be selected as GW
+ * @bat_priv: the bat priv with all the soft interface information
+ * @curr_gw_orig: originator representing the currently selected GW
+ * @orig_node: the originator representing the new candidate
+ *
+ * Return: true if orig_node can be selected as current GW, false otherwise
+ */
+static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *curr_gw_orig,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_gw_node *curr_gw = NULL, *orig_gw = NULL;
+ u32 gw_throughput, orig_throughput, threshold;
+ bool ret = false;
+
+ threshold = atomic_read(&bat_priv->gw.sel_class);
+
+ curr_gw = batadv_gw_node_get(bat_priv, curr_gw_orig);
+ if (!curr_gw) {
+ ret = true;
+ goto out;
+ }
+
+ if (batadv_v_gw_throughput_get(curr_gw, &gw_throughput) < 0) {
+ ret = true;
+ goto out;
+ }
+
+ orig_gw = batadv_gw_node_get(bat_priv, orig_node);
+ if (!orig_node)
+ goto out;
+
+ if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0)
+ goto out;
+
+ if (orig_throughput < gw_throughput)
+ goto out;
+
+ if ((orig_throughput - gw_throughput) < threshold)
+ goto out;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Restarting gateway selection: better gateway found (throughput curr: %u, throughput new: %u)\n",
+ gw_throughput, orig_throughput);
+
+ ret = true;
+out:
+ if (curr_gw)
+ batadv_gw_node_put(curr_gw);
+ if (orig_gw)
+ batadv_gw_node_put(orig_gw);
+
+ return ret;
+}
+
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+/* fails if orig_node has no router */
+static int batadv_v_gw_write_buffer_text(struct batadv_priv *bat_priv,
+ struct seq_file *seq,
+ const struct batadv_gw_node *gw_node)
+{
+ struct batadv_gw_node *curr_gw;
+ struct batadv_neigh_node *router;
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ int ret = -1;
+
+ router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
+ if (!router)
+ goto out;
+
+ router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
+ if (!router_ifinfo)
+ goto out;
+
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+
+ seq_printf(seq, "%s %pM (%9u.%1u) %pM [%10s]: %u.%u/%u.%u MBit\n",
+ (curr_gw == gw_node ? "=>" : " "),
+ gw_node->orig_node->orig,
+ router_ifinfo->bat_v.throughput / 10,
+ router_ifinfo->bat_v.throughput % 10, router->addr,
+ router->if_incoming->net_dev->name,
+ gw_node->bandwidth_down / 10,
+ gw_node->bandwidth_down % 10,
+ gw_node->bandwidth_up / 10,
+ gw_node->bandwidth_up % 10);
+ ret = seq_has_overflowed(seq) ? -1 : 0;
+
+ if (curr_gw)
+ batadv_gw_node_put(curr_gw);
+out:
+ if (router_ifinfo)
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ if (router)
+ batadv_neigh_node_put(router);
+ return ret;
+}
+
+/**
+ * batadv_v_gw_print - print the gateway list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @seq: gateway table seq_file struct
+ */
+static void batadv_v_gw_print(struct batadv_priv *bat_priv,
+ struct seq_file *seq)
+{
+ struct batadv_gw_node *gw_node;
+ int gw_count = 0;
+
+ seq_puts(seq,
+ " Gateway ( throughput) Nexthop [outgoingIF]: advertised uplink bandwidth\n");
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
+ /* fails if orig_node has no router */
+ if (batadv_v_gw_write_buffer_text(bat_priv, seq, gw_node) < 0)
+ continue;
+
+ gw_count++;
+ }
+ rcu_read_unlock();
+
+ if (gw_count == 0)
+ seq_puts(seq, "No gateways in range ...\n");
+}
+#endif
+
+/**
+ * batadv_v_gw_dump_entry - Dump a gateway into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @gw_node: Gateway to be dumped
+ *
+ * Return: Error code, or 0 on success
+ */
+static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_gw_node *gw_node)
+{
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ struct batadv_neigh_node *router;
+ struct batadv_gw_node *curr_gw;
+ int ret = -EINVAL;
+ void *hdr;
+
+ router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
+ if (!router)
+ goto out;
+
+ router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
+ if (!router_ifinfo)
+ goto out;
+
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI, BATADV_CMD_GET_GATEWAYS);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ ret = -EMSGSIZE;
+
+ if (curr_gw == gw_node) {
+ if (nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+ }
+
+ if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
+ gw_node->orig_node->orig)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put_u32(msg, BATADV_ATTR_THROUGHPUT,
+ router_ifinfo->bat_v.throughput)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_ROUTER, ETH_ALEN, router->addr)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ router->if_incoming->net_dev->name)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN,
+ gw_node->bandwidth_down)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP, gw_node->bandwidth_up)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
+out:
+ if (router_ifinfo)
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ if (router)
+ batadv_neigh_node_put(router);
+ return ret;
+}
+
+/**
+ * batadv_v_gw_dump - Dump gateways into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ */
+static void batadv_v_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv)
+{
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct batadv_gw_node *gw_node;
+ int idx_skip = cb->args[0];
+ int idx = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
+ if (idx++ < idx_skip)
+ continue;
+
+ if (batadv_v_gw_dump_entry(msg, portid, cb->nlh->nlmsg_seq,
+ bat_priv, gw_node)) {
+ idx_skip = idx - 1;
+ goto unlock;
+ }
+ }
+
+ idx_skip = idx;
+unlock:
+ rcu_read_unlock();
+
+ cb->args[0] = idx_skip;
+}
+
static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.name = "BATMAN_V",
.iface = {
@@ -333,10 +1040,26 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.hardif_init = batadv_v_hardif_neigh_init,
.cmp = batadv_v_neigh_cmp,
.is_similar_or_better = batadv_v_neigh_is_sob,
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
.print = batadv_v_neigh_print,
+#endif
+ .dump = batadv_v_neigh_dump,
},
.orig = {
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
.print = batadv_v_orig_print,
+#endif
+ .dump = batadv_v_orig_dump,
+ },
+ .gw = {
+ .store_sel_class = batadv_v_store_sel_class,
+ .show_sel_class = batadv_v_show_sel_class,
+ .get_best_gw_node = batadv_v_gw_get_best_gw_node,
+ .is_eligible = batadv_v_gw_is_eligible,
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+ .print = batadv_v_gw_print,
+#endif
+ .dump = batadv_v_gw_dump,
},
};
@@ -363,7 +1086,16 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
*/
int batadv_v_mesh_init(struct batadv_priv *bat_priv)
{
- return batadv_v_ogm_init(bat_priv);
+ int ret = 0;
+
+ ret = batadv_v_ogm_init(bat_priv);
+ if (ret < 0)
+ return ret;
+
+ /* set default throughput difference threshold to 5Mbps */
+ atomic_set(&bat_priv->gw.sel_class, 50);
+
+ return 0;
}
/**
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 6fbba4eb0617..1aeeadca620c 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -73,13 +73,12 @@ struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
if (!orig_node)
return NULL;
+ kref_get(&orig_node->refcount);
hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
batadv_choose_orig, orig_node,
&orig_node->hash_entry);
if (hash_added != 0) {
- /* orig_node->refcounter is initialised to 2 by
- * batadv_orig_node_new()
- */
+ /* remove refcnt for newly created orig_node and hash entry */
batadv_orig_node_put(orig_node);
batadv_orig_node_put(orig_node);
orig_node = NULL;
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ad2ffe16d29f..e7f690b571ea 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -35,6 +35,7 @@
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
+#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
@@ -45,12 +46,18 @@
#include <linux/string.h>
#include <linux/workqueue.h>
#include <net/arp.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <uapi/linux/batman_adv.h>
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
+#include "netlink.h"
#include "originator.h"
#include "packet.h"
+#include "soft-interface.h"
#include "sysfs.h"
#include "translation-table.h"
@@ -519,11 +526,9 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
atomic_set(&entry->wait_periods, 0);
ether_addr_copy(entry->orig, orig);
INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
-
- /* one for the hash, one for returning */
kref_init(&entry->refcount);
- kref_get(&entry->refcount);
+ kref_get(&entry->refcount);
hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
batadv_compare_backbone_gw,
batadv_choose_backbone_gw, entry,
@@ -711,12 +716,13 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
claim->lasttime = jiffies;
kref_get(&backbone_gw->refcount);
claim->backbone_gw = backbone_gw;
-
kref_init(&claim->refcount);
- kref_get(&claim->refcount);
+
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
mac, BATADV_PRINT_VID(vid));
+
+ kref_get(&claim->refcount);
hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
batadv_compare_claim,
batadv_choose_claim, claim,
@@ -1148,7 +1154,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
/* Let the loopdetect frames on the mesh in any case. */
if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
- return 0;
+ return false;
/* check if it is a claim frame. */
ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
@@ -1990,6 +1996,7 @@ out:
return ret;
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
* batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
* @seq: seq file to print on
@@ -2050,8 +2057,172 @@ out:
batadv_hardif_put(primary_if);
return 0;
}
+#endif
+
+/**
+ * batadv_bla_claim_dump_entry - dump one entry of the claim table
+ * to a netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @seq: Sequence number of netlink message
+ * @primary_if: primary interface
+ * @claim: entry to dump
+ *
+ * Return: 0 or error code.
+ */
+static int
+batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_bla_claim *claim)
+{
+ u8 *primary_addr = primary_if->net_dev->dev_addr;
+ u16 backbone_crc;
+ bool is_own;
+ void *hdr;
+ int ret = -EINVAL;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI, BATADV_CMD_GET_BLA_CLAIM);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ is_own = batadv_compare_eth(claim->backbone_gw->orig,
+ primary_addr);
+
+ spin_lock_bh(&claim->backbone_gw->crc_lock);
+ backbone_crc = claim->backbone_gw->crc;
+ spin_unlock_bh(&claim->backbone_gw->crc_lock);
+
+ if (is_own)
+ if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
+ nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
+ claim->backbone_gw->orig) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
+ backbone_crc)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * batadv_bla_claim_dump_bucket - dump one bucket of the claim table
+ * to a netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @seq: Sequence number of netlink message
+ * @primary_if: primary interface
+ * @head: bucket to dump
+ * @idx_skip: How many entries to skip
+ *
+ * Return: always 0.
+ */
+static int
+batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_hard_iface *primary_if,
+ struct hlist_head *head, int *idx_skip)
+{
+ struct batadv_bla_claim *claim;
+ int idx = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(claim, head, hash_entry) {
+ if (idx++ < *idx_skip)
+ continue;
+ if (batadv_bla_claim_dump_entry(msg, portid, seq,
+ primary_if, claim)) {
+ *idx_skip = idx - 1;
+ goto unlock;
+ }
+ }
+
+ *idx_skip = idx;
+unlock:
+ rcu_read_unlock();
+ return 0;
+}
/**
+ * batadv_bla_claim_dump - dump claim table to a netlink socket
+ * @msg: buffer for the message
+ * @cb: callback structure containing arguments
+ *
+ * Return: message length.
+ */
+int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct batadv_hard_iface *primary_if = NULL;
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_hashtable *hash;
+ struct batadv_priv *bat_priv;
+ int bucket = cb->args[0];
+ struct hlist_head *head;
+ int idx = cb->args[1];
+ int ifindex;
+ int ret = 0;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+ hash = bat_priv->bla.claim_hash;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ while (bucket < hash->size) {
+ head = &hash->table[bucket];
+
+ if (batadv_bla_claim_dump_bucket(msg, portid,
+ cb->nlh->nlmsg_seq,
+ primary_if, head, &idx))
+ break;
+ bucket++;
+ }
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+
+ ret = msg->len;
+
+out:
+ if (primary_if)
+ batadv_hardif_put(primary_if);
+
+ if (soft_iface)
+ dev_put(soft_iface);
+
+ return ret;
+}
+
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+/**
* batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
* file
* @seq: seq file to print on
@@ -2114,3 +2285,168 @@ out:
batadv_hardif_put(primary_if);
return 0;
}
+#endif
+
+/**
+ * batadv_bla_backbone_dump_entry - dump one entry of the backbone table
+ * to a netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @seq: Sequence number of netlink message
+ * @primary_if: primary interface
+ * @backbone_gw: entry to dump
+ *
+ * Return: 0 or error code.
+ */
+static int
+batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_bla_backbone_gw *backbone_gw)
+{
+ u8 *primary_addr = primary_if->net_dev->dev_addr;
+ u16 backbone_crc;
+ bool is_own;
+ int msecs;
+ void *hdr;
+ int ret = -EINVAL;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI, BATADV_CMD_GET_BLA_BACKBONE);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
+
+ spin_lock_bh(&backbone_gw->crc_lock);
+ backbone_crc = backbone_gw->crc;
+ spin_unlock_bh(&backbone_gw->crc_lock);
+
+ msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);
+
+ if (is_own)
+ if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
+ backbone_gw->orig) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
+ backbone_crc) ||
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * batadv_bla_backbone_dump_bucket - dump one bucket of the backbone table
+ * to a netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @seq: Sequence number of netlink message
+ * @primary_if: primary interface
+ * @head: bucket to dump
+ * @idx_skip: How many entries to skip
+ *
+ * Return: always 0.
+ */
+static int
+batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_hard_iface *primary_if,
+ struct hlist_head *head, int *idx_skip)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ int idx = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
+ if (idx++ < *idx_skip)
+ continue;
+ if (batadv_bla_backbone_dump_entry(msg, portid, seq,
+ primary_if, backbone_gw)) {
+ *idx_skip = idx - 1;
+ goto unlock;
+ }
+ }
+
+ *idx_skip = idx;
+unlock:
+ rcu_read_unlock();
+ return 0;
+}
+
+/**
+ * batadv_bla_backbone_dump - dump backbone table to a netlink socket
+ * @msg: buffer for the message
+ * @cb: callback structure containing arguments
+ *
+ * Return: message length.
+ */
+int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct batadv_hard_iface *primary_if = NULL;
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_hashtable *hash;
+ struct batadv_priv *bat_priv;
+ int bucket = cb->args[0];
+ struct hlist_head *head;
+ int idx = cb->args[1];
+ int ifindex;
+ int ret = 0;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+ hash = bat_priv->bla.backbone_hash;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ while (bucket < hash->size) {
+ head = &hash->table[bucket];
+
+ if (batadv_bla_backbone_dump_bucket(msg, portid,
+ cb->nlh->nlmsg_seq,
+ primary_if, head, &idx))
+ break;
+ bucket++;
+ }
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+
+ ret = msg->len;
+
+out:
+ if (primary_if)
+ batadv_hardif_put(primary_if);
+
+ if (soft_iface)
+ dev_put(soft_iface);
+
+ return ret;
+}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 0f01daeb359e..1ae93e46fb98 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
struct net_device;
+struct netlink_callback;
struct seq_file;
struct sk_buff;
@@ -35,8 +36,10 @@ bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
struct batadv_orig_node *orig_node,
int hdr_size);
int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb);
int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
void *offset);
+int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb);
bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
unsigned short vid);
bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
@@ -47,7 +50,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
void batadv_bla_status_update(struct net_device *net_dev);
int batadv_bla_init(struct batadv_priv *bat_priv);
void batadv_bla_free(struct batadv_priv *bat_priv);
-
+int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb);
#define BATADV_BLA_CRC_INIT 0
#else /* ifdef CONFIG_BATMAN_ADV_BLA */
@@ -112,6 +115,18 @@ static inline void batadv_bla_free(struct batadv_priv *bat_priv)
{
}
+static inline int batadv_bla_claim_dump(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int batadv_bla_backbone_dump(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* ifdef CONFIG_BATMAN_ADV_BLA */
#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 1d68b6e63b96..b4ffba7dd583 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -31,6 +31,7 @@
#include <linux/stddef.h>
#include <linux/stringify.h>
#include <linux/sysfs.h>
+#include <net/net_namespace.h>
#include "bat_algo.h"
#include "bridge_loop_avoidance.h"
@@ -305,12 +306,16 @@ void batadv_debugfs_destroy(void)
*/
int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
{
+ struct net *net = dev_net(hard_iface->net_dev);
struct batadv_debuginfo **bat_debug;
struct dentry *file;
if (!batadv_debugfs)
goto out;
+ if (net != &init_net)
+ return 0;
+
hard_iface->debug_dir = debugfs_create_dir(hard_iface->net_dev->name,
batadv_debugfs);
if (!hard_iface->debug_dir)
@@ -341,6 +346,11 @@ out:
*/
void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
{
+ struct net *net = dev_net(hard_iface->net_dev);
+
+ if (net != &init_net)
+ return;
+
if (batadv_debugfs) {
debugfs_remove_recursive(hard_iface->debug_dir);
hard_iface->debug_dir = NULL;
@@ -351,11 +361,15 @@ int batadv_debugfs_add_meshif(struct net_device *dev)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
struct batadv_debuginfo **bat_debug;
+ struct net *net = dev_net(dev);
struct dentry *file;
if (!batadv_debugfs)
goto out;
+ if (net != &init_net)
+ return 0;
+
bat_priv->debug_dir = debugfs_create_dir(dev->name, batadv_debugfs);
if (!bat_priv->debug_dir)
goto out;
@@ -392,6 +406,10 @@ out:
void batadv_debugfs_del_meshif(struct net_device *dev)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct net *net = dev_net(dev);
+
+ if (net != &init_net)
+ return;
batadv_debug_log_cleanup(bat_priv);
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 1ab4e2e63afc..c68ff3dcb926 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -26,7 +26,7 @@ struct net_device;
#define BATADV_DEBUGFS_SUBDIR "batman_adv"
-#if IS_ENABLED(CONFIG_DEBUG_FS)
+#if IS_ENABLED(CONFIG_BATMAN_ADV_DEBUGFS)
void batadv_debugfs_init(void);
void batadv_debugfs_destroy(void);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index b1cc8bfe11ac..e257efdc5d03 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -343,8 +343,8 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
ether_addr_copy(dat_entry->mac_addr, mac_addr);
dat_entry->last_update = jiffies;
kref_init(&dat_entry->refcount);
- kref_get(&dat_entry->refcount);
+ kref_get(&dat_entry->refcount);
hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat,
batadv_hash_dat, dat_entry,
&dat_entry->hash_entry);
@@ -795,6 +795,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv)
batadv_dat_hash_free(bat_priv);
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
* batadv_dat_cache_seq_print_text - print the local DAT hash table
* @seq: seq file to print on
@@ -846,6 +847,7 @@ out:
batadv_hardif_put(primary_if);
return 0;
}
+#endif
/**
* batadv_arp_get_type - parse an ARP packet and gets the type
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 63a805d3f96e..de055d64debe 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -20,6 +20,7 @@
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/fs.h>
#include <linux/if_ether.h>
@@ -31,6 +32,7 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/netdevice.h>
+#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
@@ -39,13 +41,17 @@
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/udp.h>
+#include <net/sock.h>
+#include <uapi/linux/batman_adv.h>
#include "gateway_common.h"
#include "hard-interface.h"
#include "log.h"
+#include "netlink.h"
#include "originator.h"
#include "packet.h"
#include "routing.h"
+#include "soft-interface.h"
#include "sysfs.h"
#include "translation-table.h"
@@ -80,12 +86,12 @@ static void batadv_gw_node_release(struct kref *ref)
* batadv_gw_node_put - decrement the gw_node refcounter and possibly release it
* @gw_node: gateway node to free
*/
-static void batadv_gw_node_put(struct batadv_gw_node *gw_node)
+void batadv_gw_node_put(struct batadv_gw_node *gw_node)
{
kref_put(&gw_node->refcount, batadv_gw_node_release);
}
-static struct batadv_gw_node *
+struct batadv_gw_node *
batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *gw_node;
@@ -164,86 +170,6 @@ void batadv_gw_reselect(struct batadv_priv *bat_priv)
atomic_set(&bat_priv->gw.reselect, 1);
}
-static struct batadv_gw_node *
-batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
-{
- struct batadv_neigh_node *router;
- struct batadv_neigh_ifinfo *router_ifinfo;
- struct batadv_gw_node *gw_node, *curr_gw = NULL;
- u64 max_gw_factor = 0;
- u64 tmp_gw_factor = 0;
- u8 max_tq = 0;
- u8 tq_avg;
- struct batadv_orig_node *orig_node;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
- orig_node = gw_node->orig_node;
- router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
- if (!router)
- continue;
-
- router_ifinfo = batadv_neigh_ifinfo_get(router,
- BATADV_IF_DEFAULT);
- if (!router_ifinfo)
- goto next;
-
- if (!kref_get_unless_zero(&gw_node->refcount))
- goto next;
-
- tq_avg = router_ifinfo->bat_iv.tq_avg;
-
- switch (atomic_read(&bat_priv->gw.sel_class)) {
- case 1: /* fast connection */
- tmp_gw_factor = tq_avg * tq_avg;
- tmp_gw_factor *= gw_node->bandwidth_down;
- tmp_gw_factor *= 100 * 100;
- tmp_gw_factor >>= 18;
-
- if ((tmp_gw_factor > max_gw_factor) ||
- ((tmp_gw_factor == max_gw_factor) &&
- (tq_avg > max_tq))) {
- if (curr_gw)
- batadv_gw_node_put(curr_gw);
- curr_gw = gw_node;
- kref_get(&curr_gw->refcount);
- }
- break;
-
- default: /* 2: stable connection (use best statistic)
- * 3: fast-switch (use best statistic but change as
- * soon as a better gateway appears)
- * XX: late-switch (use best statistic but change as
- * soon as a better gateway appears which has
- * $routing_class more tq points)
- */
- if (tq_avg > max_tq) {
- if (curr_gw)
- batadv_gw_node_put(curr_gw);
- curr_gw = gw_node;
- kref_get(&curr_gw->refcount);
- }
- break;
- }
-
- if (tq_avg > max_tq)
- max_tq = tq_avg;
-
- if (tmp_gw_factor > max_gw_factor)
- max_gw_factor = tmp_gw_factor;
-
- batadv_gw_node_put(gw_node);
-
-next:
- batadv_neigh_node_put(router);
- if (router_ifinfo)
- batadv_neigh_ifinfo_put(router_ifinfo);
- }
- rcu_read_unlock();
-
- return curr_gw;
-}
-
/**
* batadv_gw_check_client_stop - check if client mode has been switched off
* @bat_priv: the bat priv with all the soft interface information
@@ -287,12 +213,19 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
if (atomic_read(&bat_priv->gw.mode) != BATADV_GW_MODE_CLIENT)
goto out;
+ if (!bat_priv->algo_ops->gw.get_best_gw_node)
+ goto out;
+
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
goto out;
- next_gw = batadv_gw_get_best_gw_node(bat_priv);
+ /* if gw.reselect is set to 1 it means that a previous call to
+ * gw.is_eligible() said that we have a new best GW, therefore it can
+ * now be picked from the list and selected
+ */
+ next_gw = bat_priv->algo_ops->gw.get_best_gw_node(bat_priv);
if (curr_gw == next_gw)
goto out;
@@ -360,70 +293,31 @@ out:
void batadv_gw_check_election(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
- struct batadv_neigh_ifinfo *router_orig_tq = NULL;
- struct batadv_neigh_ifinfo *router_gw_tq = NULL;
struct batadv_orig_node *curr_gw_orig;
- struct batadv_neigh_node *router_gw = NULL;
- struct batadv_neigh_node *router_orig = NULL;
- u8 gw_tq_avg, orig_tq_avg;
+
+ /* abort immediately if the routing algorithm does not support gateway
+ * election
+ */
+ if (!bat_priv->algo_ops->gw.is_eligible)
+ return;
curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
if (!curr_gw_orig)
goto reselect;
- router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT);
- if (!router_gw)
- goto reselect;
-
- router_gw_tq = batadv_neigh_ifinfo_get(router_gw,
- BATADV_IF_DEFAULT);
- if (!router_gw_tq)
- goto reselect;
-
/* this node already is the gateway */
if (curr_gw_orig == orig_node)
goto out;
- router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
- if (!router_orig)
+ if (!bat_priv->algo_ops->gw.is_eligible(bat_priv, curr_gw_orig,
+ orig_node))
goto out;
- router_orig_tq = batadv_neigh_ifinfo_get(router_orig,
- BATADV_IF_DEFAULT);
- if (!router_orig_tq)
- goto out;
-
- gw_tq_avg = router_gw_tq->bat_iv.tq_avg;
- orig_tq_avg = router_orig_tq->bat_iv.tq_avg;
-
- /* the TQ value has to be better */
- if (orig_tq_avg < gw_tq_avg)
- goto out;
-
- /* if the routing class is greater than 3 the value tells us how much
- * greater the TQ value of the new gateway must be
- */
- if ((atomic_read(&bat_priv->gw.sel_class) > 3) &&
- (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw.sel_class)))
- goto out;
-
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
- gw_tq_avg, orig_tq_avg);
-
reselect:
batadv_gw_reselect(bat_priv);
out:
if (curr_gw_orig)
batadv_orig_node_put(curr_gw_orig);
- if (router_gw)
- batadv_neigh_node_put(router_gw);
- if (router_orig)
- batadv_neigh_node_put(router_orig);
- if (router_gw_tq)
- batadv_neigh_ifinfo_put(router_gw_tq);
- if (router_orig_tq)
- batadv_neigh_ifinfo_put(router_orig_tq);
}
/**
@@ -445,14 +339,15 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
if (!gw_node)
return;
- kref_get(&orig_node->refcount);
+ kref_init(&gw_node->refcount);
INIT_HLIST_NODE(&gw_node->list);
+ kref_get(&orig_node->refcount);
gw_node->orig_node = orig_node;
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
- kref_init(&gw_node->refcount);
spin_lock_bh(&bat_priv->gw.list_lock);
+ kref_get(&gw_node->refcount);
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
spin_unlock_bh(&bat_priv->gw.list_lock);
@@ -463,6 +358,9 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
ntohl(gateway->bandwidth_down) % 10,
ntohl(gateway->bandwidth_up) / 10,
ntohl(gateway->bandwidth_up) % 10);
+
+ /* don't return reference to new gw_node */
+ batadv_gw_node_put(gw_node);
}
/**
@@ -472,9 +370,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
*
* Return: gateway node if found or NULL otherwise.
*/
-static struct batadv_gw_node *
-batadv_gw_node_get(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node)
+struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
struct batadv_gw_node *gw_node_tmp, *gw_node = NULL;
@@ -585,81 +482,87 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv)
spin_unlock_bh(&bat_priv->gw.list_lock);
}
-/* fails if orig_node has no router */
-static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
- struct seq_file *seq,
- const struct batadv_gw_node *gw_node)
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
{
- struct batadv_gw_node *curr_gw;
- struct batadv_neigh_node *router;
- struct batadv_neigh_ifinfo *router_ifinfo = NULL;
- int ret = -1;
+ struct net_device *net_dev = (struct net_device *)seq->private;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hard_iface *primary_if;
- router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
- if (!router)
- goto out;
+ primary_if = batadv_seq_print_text_primary_if_get(seq);
+ if (!primary_if)
+ return 0;
- router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
- if (!router_ifinfo)
- goto out;
+ seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
+ BATADV_SOURCE_VERSION, primary_if->net_dev->name,
+ primary_if->net_dev->dev_addr, net_dev->name,
+ bat_priv->algo_ops->name);
- curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+ batadv_hardif_put(primary_if);
- seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
- (curr_gw == gw_node ? "=>" : " "),
- gw_node->orig_node->orig,
- router_ifinfo->bat_iv.tq_avg, router->addr,
- router->if_incoming->net_dev->name,
- gw_node->bandwidth_down / 10,
- gw_node->bandwidth_down % 10,
- gw_node->bandwidth_up / 10,
- gw_node->bandwidth_up % 10);
- ret = seq_has_overflowed(seq) ? -1 : 0;
+ if (!bat_priv->algo_ops->gw.print) {
+ seq_puts(seq,
+ "No printing function for this routing protocol\n");
+ return 0;
+ }
- if (curr_gw)
- batadv_gw_node_put(curr_gw);
-out:
- if (router_ifinfo)
- batadv_neigh_ifinfo_put(router_ifinfo);
- if (router)
- batadv_neigh_node_put(router);
- return ret;
+ bat_priv->algo_ops->gw.print(bat_priv, seq);
+
+ return 0;
}
+#endif
-int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
+/**
+ * batadv_gw_dump - Dump gateways into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ *
+ * Return: Error code, or length of message
+ */
+int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
- struct batadv_hard_iface *primary_if;
- struct batadv_gw_node *gw_node;
- int gw_count = 0;
-
- primary_if = batadv_seq_print_text_primary_if_get(seq);
- if (!primary_if)
+ struct batadv_hard_iface *primary_if = NULL;
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_priv *bat_priv;
+ int ifindex;
+ int ret;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
goto out;
+ }
- seq_printf(seq,
- " Gateway (#/255) Nexthop [outgoingIF]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
- BATADV_SOURCE_VERSION, primary_if->net_dev->name,
- primary_if->net_dev->dev_addr, net_dev->name);
+ bat_priv = netdev_priv(soft_iface);
- rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
- /* fails if orig_node has no router */
- if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
- continue;
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
- gw_count++;
+ if (!bat_priv->algo_ops->gw.dump) {
+ ret = -EOPNOTSUPP;
+ goto out;
}
- rcu_read_unlock();
- if (gw_count == 0)
- seq_puts(seq, "No gateways in range ...\n");
+ bat_priv->algo_ops->gw.dump(msg, cb, bat_priv);
+
+ ret = msg->len;
out:
if (primary_if)
batadv_hardif_put(primary_if);
- return 0;
+ if (soft_iface)
+ dev_put(soft_iface);
+
+ return ret;
}
/**
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 582dd8c413c8..859166d03561 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
struct batadv_tvlv_gateway_data;
+struct netlink_callback;
struct seq_file;
struct sk_buff;
@@ -39,10 +40,16 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
void batadv_gw_node_delete(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node);
void batadv_gw_node_free(struct batadv_priv *bat_priv);
+void batadv_gw_node_put(struct batadv_gw_node *gw_node);
+struct batadv_gw_node *
+batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv);
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb);
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
enum batadv_dhcp_recipient
batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
u8 *chaddr);
+struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node);
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index d7bc6a87bcc9..21184810d89f 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -241,10 +241,9 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
batadv_gw_node_update(bat_priv, orig, &gateway);
- /* restart gateway selection if fast or late switching was enabled */
+ /* restart gateway selection */
if ((gateway.bandwidth_down != 0) &&
- (atomic_read(&bat_priv->gw.mode) == BATADV_GW_MODE_CLIENT) &&
- (atomic_read(&bat_priv->gw.sel_class) > 2))
+ (atomic_read(&bat_priv->gw.mode) == BATADV_GW_MODE_CLIENT))
batadv_gw_check_election(bat_priv, orig);
}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 1f9080840566..08ce36147c4c 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -35,7 +35,8 @@
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
+#include <net/net_namespace.h>
+#include <net/rtnetlink.h>
#include "bat_v.h"
#include "bridge_loop_avoidance.h"
@@ -85,25 +86,55 @@ out:
}
/**
+ * batadv_getlink_net - return link net namespace (of use fallback)
+ * @netdev: net_device to check
+ * @fallback_net: return in case get_link_net is not available for @netdev
+ *
+ * Return: result of rtnl_link_ops->get_link_net or @fallback_net
+ */
+static const struct net *batadv_getlink_net(const struct net_device *netdev,
+ const struct net *fallback_net)
+{
+ if (!netdev->rtnl_link_ops)
+ return fallback_net;
+
+ if (!netdev->rtnl_link_ops->get_link_net)
+ return fallback_net;
+
+ return netdev->rtnl_link_ops->get_link_net(netdev);
+}
+
+/**
* batadv_mutual_parents - check if two devices are each others parent
- * @dev1: 1st net_device
- * @dev2: 2nd net_device
+ * @dev1: 1st net dev
+ * @net1: 1st devices netns
+ * @dev2: 2nd net dev
+ * @net2: 2nd devices netns
*
* veth devices come in pairs and each is the parent of the other!
*
* Return: true if the devices are each others parent, otherwise false
*/
static bool batadv_mutual_parents(const struct net_device *dev1,
- const struct net_device *dev2)
+ const struct net *net1,
+ const struct net_device *dev2,
+ const struct net *net2)
{
int dev1_parent_iflink = dev_get_iflink(dev1);
int dev2_parent_iflink = dev_get_iflink(dev2);
+ const struct net *dev1_parent_net;
+ const struct net *dev2_parent_net;
+
+ dev1_parent_net = batadv_getlink_net(dev1, net1);
+ dev2_parent_net = batadv_getlink_net(dev2, net2);
if (!dev1_parent_iflink || !dev2_parent_iflink)
return false;
return (dev1_parent_iflink == dev2->ifindex) &&
- (dev2_parent_iflink == dev1->ifindex);
+ (dev2_parent_iflink == dev1->ifindex) &&
+ net_eq(dev1_parent_net, net2) &&
+ net_eq(dev2_parent_net, net1);
}
/**
@@ -121,8 +152,9 @@ static bool batadv_mutual_parents(const struct net_device *dev1,
*/
static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
{
- struct net_device *parent_dev;
struct net *net = dev_net(net_dev);
+ struct net_device *parent_dev;
+ const struct net *parent_net;
bool ret;
/* check if this is a batman-adv mesh interface */
@@ -134,13 +166,16 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
dev_get_iflink(net_dev) == net_dev->ifindex)
return false;
+ parent_net = batadv_getlink_net(net_dev, net);
+
/* recurse over the parent device */
- parent_dev = __dev_get_by_index(net, dev_get_iflink(net_dev));
+ parent_dev = __dev_get_by_index((struct net *)parent_net,
+ dev_get_iflink(net_dev));
/* if we got a NULL parent_dev there is something broken.. */
if (WARN(!parent_dev, "Cannot find parent device"))
return false;
- if (batadv_mutual_parents(net_dev, parent_dev))
+ if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
return false;
ret = batadv_is_on_batman_iface(parent_dev);
@@ -625,25 +660,6 @@ out:
batadv_hardif_put(primary_if);
}
-/**
- * batadv_hardif_remove_interface_finish - cleans up the remains of a hardif
- * @work: work queue item
- *
- * Free the parts of the hard interface which can not be removed under
- * rtnl lock (to prevent deadlock situations).
- */
-static void batadv_hardif_remove_interface_finish(struct work_struct *work)
-{
- struct batadv_hard_iface *hard_iface;
-
- hard_iface = container_of(work, struct batadv_hard_iface,
- cleanup_work);
-
- batadv_debugfs_del_hardif(hard_iface);
- batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
- batadv_hardif_put(hard_iface);
-}
-
static struct batadv_hard_iface *
batadv_hardif_add_interface(struct net_device *net_dev)
{
@@ -676,10 +692,9 @@ batadv_hardif_add_interface(struct net_device *net_dev)
INIT_LIST_HEAD(&hard_iface->list);
INIT_HLIST_HEAD(&hard_iface->neigh_list);
- INIT_WORK(&hard_iface->cleanup_work,
- batadv_hardif_remove_interface_finish);
spin_lock_init(&hard_iface->neigh_list_lock);
+ kref_init(&hard_iface->refcount);
hard_iface->num_bcasts = BATADV_NUM_BCASTS_DEFAULT;
if (batadv_is_wifi_netdev(net_dev))
@@ -687,11 +702,8 @@ batadv_hardif_add_interface(struct net_device *net_dev)
batadv_v_hardif_init(hard_iface);
- /* extra reference for return */
- kref_init(&hard_iface->refcount);
- kref_get(&hard_iface->refcount);
-
batadv_check_known_mac_addr(hard_iface->net_dev);
+ kref_get(&hard_iface->refcount);
list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list);
return hard_iface;
@@ -713,13 +725,15 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
/* first deactivate interface */
if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
batadv_hardif_disable_interface(hard_iface,
- BATADV_IF_CLEANUP_AUTO);
+ BATADV_IF_CLEANUP_KEEP);
if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
return;
hard_iface->if_status = BATADV_IF_TO_BE_REMOVED;
- queue_work(batadv_event_workqueue, &hard_iface->cleanup_work);
+ batadv_debugfs_del_hardif(hard_iface);
+ batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
+ batadv_hardif_put(hard_iface);
}
void batadv_hardif_remove_interfaces(void)
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 618d5de06f20..e44a7da51431 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -26,9 +26,25 @@ struct batadv_icmp_header;
#define BATADV_ICMP_SOCKET "socket"
-void batadv_socket_init(void);
int batadv_socket_setup(struct batadv_priv *bat_priv);
+
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+void batadv_socket_init(void);
void batadv_socket_receive_packet(struct batadv_icmp_header *icmph,
size_t icmp_len);
+#else
+
+static inline void batadv_socket_init(void)
+{
+}
+
+static inline void
+batadv_socket_receive_packet(struct batadv_icmp_header *icmph, size_t icmp_len)
+{
+}
+
+#endif
+
#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index fe4c5e29f96b..2c017ab47557 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -82,6 +82,12 @@ static void batadv_recv_handler_init(void);
static int __init batadv_init(void)
{
+ int ret;
+
+ ret = batadv_tt_cache_init();
+ if (ret < 0)
+ return ret;
+
INIT_LIST_HEAD(&batadv_hardif_list);
batadv_algo_init();
@@ -93,9 +99,8 @@ static int __init batadv_init(void)
batadv_tp_meter_init();
batadv_event_workqueue = create_singlethread_workqueue("bat_events");
-
if (!batadv_event_workqueue)
- return -ENOMEM;
+ goto err_create_wq;
batadv_socket_init();
batadv_debugfs_init();
@@ -108,6 +113,11 @@ static int __init batadv_init(void)
BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
return 0;
+
+err_create_wq:
+ batadv_tt_cache_destroy();
+
+ return -ENOMEM;
}
static void __exit batadv_exit(void)
@@ -123,6 +133,8 @@ static void __exit batadv_exit(void)
batadv_event_workqueue = NULL;
rcu_barrier();
+
+ batadv_tt_cache_destroy();
}
int batadv_mesh_init(struct net_device *soft_iface)
@@ -270,6 +282,7 @@ bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
return is_my_mac;
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
* batadv_seq_print_text_primary_if_get - called from debugfs table printing
* function that requires the primary interface
@@ -305,6 +318,7 @@ batadv_seq_print_text_primary_if_get(struct seq_file *seq)
out:
return primary_if;
}
+#endif
/**
* batadv_max_header_len - calculate maximum encapsulation overhead for a
@@ -638,3 +652,4 @@ MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
MODULE_VERSION(BATADV_SOURCE_VERSION);
+MODULE_ALIAS_RTNL_LINK("batadv");
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 06a860845434..09af21e27639 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -24,7 +24,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2016.3"
+#define BATADV_SOURCE_VERSION "2016.4"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index cc915073a753..13661f43386f 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -528,7 +528,7 @@ update:
}
return !(mcast_data.flags &
- (BATADV_MCAST_WANT_ALL_IPV4 + BATADV_MCAST_WANT_ALL_IPV6));
+ (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6));
}
/**
@@ -1134,6 +1134,7 @@ void batadv_mcast_init(struct batadv_priv *bat_priv)
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
* batadv_mcast_flags_print_header - print own mcast flags to debugfs table
* @bat_priv: the bat priv with all the soft interface information
@@ -1234,6 +1235,7 @@ int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
return 0;
}
+#endif
/**
* batadv_mcast_free - free the multicast optimizations structures
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 231f8eaf075b..64cb6acbe0a6 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -18,6 +18,8 @@
#include "netlink.h"
#include "main.h"
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genetlink.h>
@@ -26,24 +28,33 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <net/genetlink.h>
#include <net/netlink.h>
+#include <net/sock.h>
#include <uapi/linux/batman_adv.h>
+#include "bat_algo.h"
+#include "bridge_loop_avoidance.h"
+#include "gateway_client.h"
#include "hard-interface.h"
+#include "originator.h"
+#include "packet.h"
#include "soft-interface.h"
#include "tp_meter.h"
+#include "translation-table.h"
-struct sk_buff;
-
-static struct genl_family batadv_netlink_family = {
+struct genl_family batadv_netlink_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = BATADV_NL_NAME,
.version = 1,
.maxattr = BATADV_ATTR_MAX,
+ .netnsok = true,
};
/* multicast groups */
@@ -51,11 +62,11 @@ enum batadv_netlink_multicast_groups {
BATADV_NL_MCGRP_TPMETER,
};
-static struct genl_multicast_group batadv_netlink_mcgrps[] = {
+static const struct genl_multicast_group batadv_netlink_mcgrps[] = {
[BATADV_NL_MCGRP_TPMETER] = { .name = BATADV_NL_MCAST_GROUP_TPMETER },
};
-static struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
+static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
[BATADV_ATTR_VERSION] = { .type = NLA_STRING },
[BATADV_ATTR_ALGO_NAME] = { .type = NLA_STRING },
[BATADV_ATTR_MESH_IFINDEX] = { .type = NLA_U32 },
@@ -69,9 +80,44 @@ static struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
[BATADV_ATTR_TPMETER_TEST_TIME] = { .type = NLA_U32 },
[BATADV_ATTR_TPMETER_BYTES] = { .type = NLA_U64 },
[BATADV_ATTR_TPMETER_COOKIE] = { .type = NLA_U32 },
+ [BATADV_ATTR_ACTIVE] = { .type = NLA_FLAG },
+ [BATADV_ATTR_TT_ADDRESS] = { .len = ETH_ALEN },
+ [BATADV_ATTR_TT_TTVN] = { .type = NLA_U8 },
+ [BATADV_ATTR_TT_LAST_TTVN] = { .type = NLA_U8 },
+ [BATADV_ATTR_TT_CRC32] = { .type = NLA_U32 },
+ [BATADV_ATTR_TT_VID] = { .type = NLA_U16 },
+ [BATADV_ATTR_TT_FLAGS] = { .type = NLA_U32 },
+ [BATADV_ATTR_FLAG_BEST] = { .type = NLA_FLAG },
+ [BATADV_ATTR_LAST_SEEN_MSECS] = { .type = NLA_U32 },
+ [BATADV_ATTR_NEIGH_ADDRESS] = { .len = ETH_ALEN },
+ [BATADV_ATTR_TQ] = { .type = NLA_U8 },
+ [BATADV_ATTR_THROUGHPUT] = { .type = NLA_U32 },
+ [BATADV_ATTR_BANDWIDTH_UP] = { .type = NLA_U32 },
+ [BATADV_ATTR_BANDWIDTH_DOWN] = { .type = NLA_U32 },
+ [BATADV_ATTR_ROUTER] = { .len = ETH_ALEN },
+ [BATADV_ATTR_BLA_OWN] = { .type = NLA_FLAG },
+ [BATADV_ATTR_BLA_ADDRESS] = { .len = ETH_ALEN },
+ [BATADV_ATTR_BLA_VID] = { .type = NLA_U16 },
+ [BATADV_ATTR_BLA_BACKBONE] = { .len = ETH_ALEN },
+ [BATADV_ATTR_BLA_CRC] = { .type = NLA_U16 },
};
/**
+ * batadv_netlink_get_ifindex - Extract an interface index from a message
+ * @nlh: Message header
+ * @attrtype: Attribute which holds an interface index
+ *
+ * Return: interface index, or 0.
+ */
+int
+batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
+{
+ struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
+
+ return attr ? nla_get_u32(attr) : 0;
+}
+
+/**
* batadv_netlink_mesh_info_put - fill in generic information about mesh
* interface
* @msg: netlink message to be sent back
@@ -93,8 +139,16 @@ batadv_netlink_mesh_info_put(struct sk_buff *msg, struct net_device *soft_iface)
nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX, soft_iface->ifindex) ||
nla_put_string(msg, BATADV_ATTR_MESH_IFNAME, soft_iface->name) ||
nla_put(msg, BATADV_ATTR_MESH_ADDRESS, ETH_ALEN,
- soft_iface->dev_addr))
+ soft_iface->dev_addr) ||
+ nla_put_u8(msg, BATADV_ATTR_TT_TTVN,
+ (u8)atomic_read(&bat_priv->tt.vn)))
+ goto out;
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+ if (nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
+ ntohs(bat_priv->bla.claim_dest.group)))
goto out;
+#endif
primary_if = batadv_primary_if_get_selected(bat_priv);
if (primary_if && primary_if->if_status == BATADV_IF_ACTIVE) {
@@ -380,6 +434,106 @@ out:
return ret;
}
+/**
+ * batadv_netlink_dump_hardif_entry - Dump one hard interface into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @hard_iface: Hard interface to dump
+ *
+ * Return: error code, or 0 on success
+ */
+static int
+batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_hard_iface *hard_iface)
+{
+ struct net_device *net_dev = hard_iface->net_dev;
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_HARDIFS);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ net_dev->ifindex) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ net_dev->name) ||
+ nla_put(msg, BATADV_ATTR_HARD_ADDRESS, ETH_ALEN,
+ net_dev->dev_addr))
+ goto nla_put_failure;
+
+ if (hard_iface->if_status == BATADV_IF_ACTIVE) {
+ if (nla_put_flag(msg, BATADV_ATTR_ACTIVE))
+ goto nla_put_failure;
+ }
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_netlink_dump_hardifs - Dump all hard interface into a messages
+ * @msg: Netlink message to dump into
+ * @cb: Parameters from query
+ *
+ * Return: error code, or length of reply message on success
+ */
+static int
+batadv_netlink_dump_hardifs(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_hard_iface *hard_iface;
+ int ifindex;
+ int portid = NETLINK_CB(cb->skb).portid;
+ int seq = cb->nlh->nlmsg_seq;
+ int skip = cb->args[0];
+ int i = 0;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface)
+ return -ENODEV;
+
+ if (!batadv_softif_is_valid(soft_iface)) {
+ dev_put(soft_iface);
+ return -ENODEV;
+ }
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != soft_iface)
+ continue;
+
+ if (i++ < skip)
+ continue;
+
+ if (batadv_netlink_dump_hardif_entry(msg, portid, seq,
+ hard_iface)) {
+ i--;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ dev_put(soft_iface);
+
+ cb->args[0] = i;
+
+ return msg->len;
+}
+
static struct genl_ops batadv_netlink_ops[] = {
{
.cmd = BATADV_CMD_GET_MESH_INFO,
@@ -399,6 +553,61 @@ static struct genl_ops batadv_netlink_ops[] = {
.policy = batadv_netlink_policy,
.doit = batadv_netlink_tp_meter_cancel,
},
+ {
+ .cmd = BATADV_CMD_GET_ROUTING_ALGOS,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .dumpit = batadv_algo_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_HARDIFS,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .dumpit = batadv_netlink_dump_hardifs,
+ },
+ {
+ .cmd = BATADV_CMD_GET_TRANSTABLE_LOCAL,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .dumpit = batadv_tt_local_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_TRANSTABLE_GLOBAL,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .dumpit = batadv_tt_global_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_ORIGINATORS,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .dumpit = batadv_orig_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_NEIGHBORS,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .dumpit = batadv_hardif_neigh_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_GATEWAYS,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .dumpit = batadv_gw_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_BLA_CLAIM,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .dumpit = batadv_bla_claim_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_BLA_BACKBONE,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .dumpit = batadv_bla_backbone_dump,
+ },
+
};
/**
diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h
index 945653ab58c6..52eb16281aba 100644
--- a/net/batman-adv/netlink.h
+++ b/net/batman-adv/netlink.h
@@ -21,12 +21,18 @@
#include "main.h"
#include <linux/types.h>
+#include <net/genetlink.h>
+
+struct nlmsghdr;
void batadv_netlink_register(void);
void batadv_netlink_unregister(void);
+int batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype);
int batadv_netlink_tpmeter_notify(struct batadv_priv *bat_priv, const u8 *dst,
u8 result, u32 test_time, u64 total_bytes,
u32 cookie);
+extern struct genl_family batadv_netlink_family;
+
#endif /* _NET_BATMAN_ADV_NETLINK_H_ */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 293ef4ffd4e1..e3baf697a35c 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -856,14 +856,12 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
if (!nc_node)
return NULL;
- kref_get(&orig_neigh_node->refcount);
-
/* Initialize nc_node */
INIT_LIST_HEAD(&nc_node->list);
+ kref_init(&nc_node->refcount);
ether_addr_copy(nc_node->addr, orig_node->orig);
+ kref_get(&orig_neigh_node->refcount);
nc_node->orig_node = orig_neigh_node;
- kref_init(&nc_node->refcount);
- kref_get(&nc_node->refcount);
/* Select ingoing or outgoing coding node */
if (in_coding) {
@@ -879,6 +877,7 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
/* Add nc_node to orig_node */
spin_lock_bh(lock);
+ kref_get(&nc_node->refcount);
list_add_tail_rcu(&nc_node->list, list);
spin_unlock_bh(lock);
@@ -979,7 +978,6 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
INIT_LIST_HEAD(&nc_path->packet_list);
spin_lock_init(&nc_path->packet_list_lock);
kref_init(&nc_path->refcount);
- kref_get(&nc_path->refcount);
nc_path->last_valid = jiffies;
ether_addr_copy(nc_path->next_hop, dst);
ether_addr_copy(nc_path->prev_hop, src);
@@ -989,6 +987,7 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
nc_path->next_hop);
/* Add nc_path to hash table */
+ kref_get(&nc_path->refcount);
hash_added = batadv_hash_add(hash, batadv_nc_hash_compare,
batadv_nc_hash_choose, &nc_path_key,
&nc_path->hash_entry);
@@ -1882,6 +1881,7 @@ void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
batadv_hash_destroy(bat_priv->nc.decoding_hash);
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
* batadv_nc_nodes_seq_print_text - print the nc node information
* @seq: seq file to print on
@@ -1981,3 +1981,4 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv)
out:
return -ENOMEM;
}
+#endif
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 3940b5d24421..5f3bfc41aeb1 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -28,11 +28,15 @@
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
+#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/seq_file.h>
+#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <net/sock.h>
+#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "distributed-arp-table.h"
@@ -42,8 +46,10 @@
#include "hash.h"
#include "log.h"
#include "multicast.h"
+#include "netlink.h"
#include "network-coding.h"
#include "routing.h"
+#include "soft-interface.h"
#include "translation-table.h"
/* hash class keys */
@@ -127,9 +133,9 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
goto out;
kref_init(&vlan->refcount);
- kref_get(&vlan->refcount);
vlan->vid = vid;
+ kref_get(&vlan->refcount);
hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
out:
@@ -380,6 +386,7 @@ batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
orig_ifinfo->if_outgoing = if_outgoing;
INIT_HLIST_NODE(&orig_ifinfo->list);
kref_init(&orig_ifinfo->refcount);
+
kref_get(&orig_ifinfo->refcount);
hlist_add_head_rcu(&orig_ifinfo->list,
&orig_node->ifinfo_list);
@@ -453,9 +460,9 @@ batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
INIT_HLIST_NODE(&neigh_ifinfo->list);
kref_init(&neigh_ifinfo->refcount);
- kref_get(&neigh_ifinfo->refcount);
neigh_ifinfo->if_outgoing = if_outgoing;
+ kref_get(&neigh_ifinfo->refcount);
hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
out:
@@ -647,8 +654,8 @@ batadv_neigh_node_create(struct batadv_orig_node *orig_node,
/* extra reference for return */
kref_init(&neigh_node->refcount);
- kref_get(&neigh_node->refcount);
+ kref_get(&neigh_node->refcount);
hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
@@ -686,6 +693,7 @@ batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
return batadv_neigh_node_create(orig_node, hard_iface, neigh_addr);
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
* batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
* @seq: neighbour table seq_file struct
@@ -719,6 +727,84 @@ int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
bat_priv->algo_ops->neigh.print(bat_priv, seq);
return 0;
}
+#endif
+
+/**
+ * batadv_hardif_neigh_dump - Dump to netlink the neighbor infos for a specific
+ * outgoing interface
+ * @msg: message to dump into
+ * @cb: parameters for the dump
+ *
+ * Return: 0 or error value
+ */
+int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct net_device *hard_iface = NULL;
+ struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ int ret;
+ int ifindex, hard_ifindex;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ hard_ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_HARD_IFINDEX);
+ if (hard_ifindex) {
+ hard_iface = dev_get_by_index(net, hard_ifindex);
+ if (hard_iface)
+ hardif = batadv_hardif_get_by_netdev(hard_iface);
+
+ if (!hardif) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (hardif->soft_iface != soft_iface) {
+ ret = -ENOENT;
+ goto out;
+ }
+ }
+
+ if (!bat_priv->algo_ops->neigh.dump) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ bat_priv->algo_ops->neigh.dump(msg, cb, bat_priv, hardif);
+
+ ret = msg->len;
+
+ out:
+ if (hardif)
+ batadv_hardif_put(hardif);
+ if (hard_iface)
+ dev_put(hard_iface);
+ if (primary_if)
+ batadv_hardif_put(primary_if);
+ if (soft_iface)
+ dev_put(soft_iface);
+
+ return ret;
+}
/**
* batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
@@ -905,7 +991,6 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
/* extra reference for return */
kref_init(&orig_node->refcount);
- kref_get(&orig_node->refcount);
orig_node->bat_priv = bat_priv;
ether_addr_copy(orig_node->orig, addr);
@@ -1256,6 +1341,7 @@ void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
_batadv_purge_orig(bat_priv);
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1329,6 +1415,84 @@ out:
batadv_hardif_put(hard_iface);
return 0;
}
+#endif
+
+/**
+ * batadv_orig_dump - Dump to netlink the originator infos for a specific
+ * outgoing interface
+ * @msg: message to dump into
+ * @cb: parameters for the dump
+ *
+ * Return: 0 or error value
+ */
+int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct net_device *hard_iface = NULL;
+ struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ int ret;
+ int ifindex, hard_ifindex;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ hard_ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_HARD_IFINDEX);
+ if (hard_ifindex) {
+ hard_iface = dev_get_by_index(net, hard_ifindex);
+ if (hard_iface)
+ hardif = batadv_hardif_get_by_netdev(hard_iface);
+
+ if (!hardif) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (hardif->soft_iface != soft_iface) {
+ ret = -ENOENT;
+ goto out;
+ }
+ }
+
+ if (!bat_priv->algo_ops->orig.dump) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ bat_priv->algo_ops->orig.dump(msg, cb, bat_priv, hardif);
+
+ ret = msg->len;
+
+ out:
+ if (hardif)
+ batadv_hardif_put(hardif);
+ if (hard_iface)
+ dev_put(hard_iface);
+ if (primary_if)
+ batadv_hardif_put(primary_if);
+ if (soft_iface)
+ dev_put(soft_iface);
+
+ return ret;
+}
int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
int max_if_num)
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 566306bf05dc..ebc56183f358 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -31,7 +31,9 @@
#include "hash.h"
+struct netlink_callback;
struct seq_file;
+struct sk_buff;
bool batadv_compare_orig(const struct hlist_node *node, const void *data2);
int batadv_originator_init(struct batadv_priv *bat_priv);
@@ -61,6 +63,7 @@ batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
struct batadv_hard_iface *if_outgoing);
void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo);
+int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb);
int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset);
struct batadv_orig_ifinfo *
@@ -72,6 +75,7 @@ batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo);
int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
int max_if_num);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 6b011ff64dd8..6afc0b86950e 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -129,42 +129,6 @@ enum batadv_tt_data_flags {
};
/**
- * enum batadv_tt_client_flags - TT client specific flags
- * @BATADV_TT_CLIENT_DEL: the client has to be deleted from the table
- * @BATADV_TT_CLIENT_ROAM: the client roamed to/from another node and the new
- * update telling its new real location has not been received/sent yet
- * @BATADV_TT_CLIENT_WIFI: this client is connected through a wifi interface.
- * This information is used by the "AP Isolation" feature
- * @BATADV_TT_CLIENT_ISOLA: this client is considered "isolated". This
- * information is used by the Extended Isolation feature
- * @BATADV_TT_CLIENT_NOPURGE: this client should never be removed from the table
- * @BATADV_TT_CLIENT_NEW: this client has been added to the local table but has
- * not been announced yet
- * @BATADV_TT_CLIENT_PENDING: this client is marked for removal but it is kept
- * in the table for one more originator interval for consistency purposes
- * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be part of
- * the network but no nnode has already announced it
- *
- * Bits from 0 to 7 are called _remote flags_ because they are sent on the wire.
- * Bits from 8 to 15 are called _local flags_ because they are used for local
- * computations only.
- *
- * Bits from 4 to 7 - a subset of remote flags - are ensured to be in sync with
- * the other nodes in the network. To achieve this goal these flags are included
- * in the TT CRC computation.
- */
-enum batadv_tt_client_flags {
- BATADV_TT_CLIENT_DEL = BIT(0),
- BATADV_TT_CLIENT_ROAM = BIT(1),
- BATADV_TT_CLIENT_WIFI = BIT(4),
- BATADV_TT_CLIENT_ISOLA = BIT(5),
- BATADV_TT_CLIENT_NOPURGE = BIT(8),
- BATADV_TT_CLIENT_NEW = BIT(9),
- BATADV_TT_CLIENT_PENDING = BIT(10),
- BATADV_TT_CLIENT_TEMP = BIT(11),
-};
-
-/**
* enum batadv_vlan_flags - flags for the four MSB of any vlan ID field
* @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
*/
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 3d199478c405..7e8dc648b95a 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -74,11 +74,23 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
if (!orig_ifinfo)
return;
- rcu_read_lock();
- curr_router = rcu_dereference(orig_ifinfo->router);
- if (curr_router && !kref_get_unless_zero(&curr_router->refcount))
- curr_router = NULL;
- rcu_read_unlock();
+ spin_lock_bh(&orig_node->neigh_list_lock);
+ /* curr_router used earlier may not be the current orig_ifinfo->router
+ * anymore because it was dereferenced outside of the neigh_list_lock
+ * protected region. After the new best neighbor has replace the current
+ * best neighbor the reference counter needs to decrease. Consequently,
+ * the code needs to ensure the curr_router variable contains a pointer
+ * to the replaced best neighbor.
+ */
+ curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
+
+ /* increase refcount of new best neighbor */
+ if (neigh_node)
+ kref_get(&neigh_node->refcount);
+
+ rcu_assign_pointer(orig_ifinfo->router, neigh_node);
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+ batadv_orig_ifinfo_put(orig_ifinfo);
/* route deleted */
if ((curr_router) && (!neigh_node)) {
@@ -100,27 +112,6 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
curr_router->addr);
}
- if (curr_router)
- batadv_neigh_node_put(curr_router);
-
- spin_lock_bh(&orig_node->neigh_list_lock);
- /* curr_router used earlier may not be the current orig_ifinfo->router
- * anymore because it was dereferenced outside of the neigh_list_lock
- * protected region. After the new best neighbor has replace the current
- * best neighbor the reference counter needs to decrease. Consequently,
- * the code needs to ensure the curr_router variable contains a pointer
- * to the replaced best neighbor.
- */
- curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
-
- /* increase refcount of new best neighbor */
- if (neigh_node)
- kref_get(&neigh_node->refcount);
-
- rcu_assign_pointer(orig_ifinfo->router, neigh_node);
- spin_unlock_bh(&orig_node->neigh_list_lock);
- batadv_orig_ifinfo_put(orig_ifinfo);
-
/* decrease refcount of previous best neighbor */
if (curr_router)
batadv_neigh_node_put(curr_router);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 6191159484df..8d4e1f578574 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -315,8 +315,7 @@ out:
*
* Wrap the given skb into a batman-adv unicast or unicast-4addr header
* depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
- * as packet_type. Then send this frame to the given orig_node and release a
- * reference to this orig_node.
+ * as packet_type. Then send this frame to the given orig_node.
*
* Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
*/
@@ -370,8 +369,6 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
ret = NET_XMIT_SUCCESS;
out:
- if (orig_node)
- batadv_orig_node_put(orig_node);
if (ret == NET_XMIT_DROP)
kfree_skb(skb);
return ret;
@@ -403,6 +400,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct batadv_orig_node *orig_node;
u8 *src, *dst;
+ int ret;
src = ethhdr->h_source;
dst = ethhdr->h_dest;
@@ -414,8 +412,13 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
}
orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
- return batadv_send_skb_unicast(bat_priv, skb, packet_type,
- packet_subtype, orig_node, vid);
+ ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
+ packet_subtype, orig_node, vid);
+
+ if (orig_node)
+ batadv_orig_node_put(orig_node);
+
+ return ret;
}
/**
@@ -433,12 +436,25 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid)
{
struct batadv_orig_node *orig_node;
+ int ret;
orig_node = batadv_gw_get_selected_orig(bat_priv);
- return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
- BATADV_P_DATA, orig_node, vid);
+ ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
+ BATADV_P_DATA, orig_node, vid);
+
+ if (orig_node)
+ batadv_orig_node_put(orig_node);
+
+ return ret;
}
+/**
+ * batadv_forw_packet_free - free a forwarding packet
+ * @forw_packet: The packet to free
+ *
+ * This frees a forwarding packet and releases any resources it might
+ * have claimed.
+ */
void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
{
kfree_skb(forw_packet->skb);
@@ -446,9 +462,73 @@ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
batadv_hardif_put(forw_packet->if_incoming);
if (forw_packet->if_outgoing)
batadv_hardif_put(forw_packet->if_outgoing);
+ if (forw_packet->queue_left)
+ atomic_inc(forw_packet->queue_left);
kfree(forw_packet);
}
+/**
+ * batadv_forw_packet_alloc - allocate a forwarding packet
+ * @if_incoming: The (optional) if_incoming to be grabbed
+ * @if_outgoing: The (optional) if_outgoing to be grabbed
+ * @queue_left: The (optional) queue counter to decrease
+ * @bat_priv: The bat_priv for the mesh of this forw_packet
+ *
+ * Allocates a forwarding packet and tries to get a reference to the
+ * (optional) if_incoming, if_outgoing and queue_left. If queue_left
+ * is NULL then bat_priv is optional, too.
+ *
+ * Return: An allocated forwarding packet on success, NULL otherwise.
+ */
+struct batadv_forw_packet *
+batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing,
+ atomic_t *queue_left,
+ struct batadv_priv *bat_priv)
+{
+ struct batadv_forw_packet *forw_packet;
+ const char *qname;
+
+ if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
+ qname = "unknown";
+
+ if (queue_left == &bat_priv->bcast_queue_left)
+ qname = "bcast";
+
+ if (queue_left == &bat_priv->batman_queue_left)
+ qname = "batman";
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "%s queue is full\n", qname);
+
+ return NULL;
+ }
+
+ forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
+ if (!forw_packet)
+ goto err;
+
+ if (if_incoming)
+ kref_get(&if_incoming->refcount);
+
+ if (if_outgoing)
+ kref_get(&if_outgoing->refcount);
+
+ forw_packet->skb = NULL;
+ forw_packet->queue_left = queue_left;
+ forw_packet->if_incoming = if_incoming;
+ forw_packet->if_outgoing = if_outgoing;
+ forw_packet->num_packets = 0;
+
+ return forw_packet;
+
+err:
+ if (queue_left)
+ atomic_inc(queue_left);
+
+ return NULL;
+}
+
static void
_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
struct batadv_forw_packet *forw_packet,
@@ -487,24 +567,20 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
struct batadv_bcast_packet *bcast_packet;
struct sk_buff *newskb;
- if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "bcast packet queue full\n");
- goto out;
- }
-
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
- goto out_and_inc;
-
- forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
+ goto err;
+ forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
+ &bat_priv->bcast_queue_left,
+ bat_priv);
+ batadv_hardif_put(primary_if);
if (!forw_packet)
- goto out_and_inc;
+ goto err;
newskb = skb_copy(skb, GFP_ATOMIC);
if (!newskb)
- goto packet_free;
+ goto err_packet_free;
/* as we have a copy now, it is safe to decrease the TTL */
bcast_packet = (struct batadv_bcast_packet *)newskb->data;
@@ -513,11 +589,6 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
skb_reset_mac_header(newskb);
forw_packet->skb = newskb;
- forw_packet->if_incoming = primary_if;
- forw_packet->if_outgoing = NULL;
-
- /* how often did we send the bcast packet ? */
- forw_packet->num_packets = 0;
INIT_DELAYED_WORK(&forw_packet->delayed_work,
batadv_send_outstanding_bcast_packet);
@@ -525,13 +596,9 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
return NETDEV_TX_OK;
-packet_free:
- kfree(forw_packet);
-out_and_inc:
- atomic_inc(&bat_priv->bcast_queue_left);
-out:
- if (primary_if)
- batadv_hardif_put(primary_if);
+err_packet_free:
+ batadv_forw_packet_free(forw_packet);
+err:
return NETDEV_TX_BUSY;
}
@@ -592,7 +659,6 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
out:
batadv_forw_packet_free(forw_packet);
- atomic_inc(&bat_priv->bcast_queue_left);
}
void
@@ -633,9 +699,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
- if (!forw_packet->own)
- atomic_inc(&bat_priv->bcast_queue_left);
-
batadv_forw_packet_free(forw_packet);
}
}
@@ -663,9 +726,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
- if (!forw_packet->own)
- atomic_inc(&bat_priv->batman_queue_left);
-
batadv_forw_packet_free(forw_packet);
}
}
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 7cecb7563b45..999f78683d9e 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -28,6 +28,12 @@
struct sk_buff;
void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet);
+struct batadv_forw_packet *
+batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing,
+ atomic_t *queue_left,
+ struct batadv_priv *bat_priv);
+
int batadv_send_skb_to_orig(struct sk_buff *skb,
struct batadv_orig_node *orig_node,
struct batadv_hard_iface *recv_if);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 7527c0652dd5..49e16b6e0ba3 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -39,6 +39,7 @@
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/socket.h>
@@ -46,7 +47,6 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/workqueue.h>
#include "bat_algo.h"
#include "bridge_loop_avoidance.h"
@@ -57,6 +57,7 @@
#include "hard-interface.h"
#include "multicast.h"
#include "network-coding.h"
+#include "originator.h"
#include "packet.h"
#include "send.h"
#include "sysfs.h"
@@ -377,6 +378,8 @@ dropped:
dropped_freed:
batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
end:
+ if (mcast_single_orig)
+ batadv_orig_node_put(mcast_single_orig);
if (primary_if)
batadv_hardif_put(primary_if);
return NETDEV_TX_OK;
@@ -591,6 +594,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
}
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+ kref_get(&vlan->refcount);
hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
@@ -601,6 +605,9 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
bat_priv->soft_iface->dev_addr, vid,
BATADV_NULL_IFINDEX, BATADV_NO_MARK);
+ /* don't return reference to new softif_vlan */
+ batadv_softif_vlan_put(vlan);
+
return 0;
}
@@ -747,34 +754,6 @@ static void batadv_set_lockdep_class(struct net_device *dev)
}
/**
- * batadv_softif_destroy_finish - cleans up the remains of a softif
- * @work: work queue item
- *
- * Free the parts of the soft interface which can not be removed under
- * rtnl lock (to prevent deadlock situations).
- */
-static void batadv_softif_destroy_finish(struct work_struct *work)
-{
- struct batadv_softif_vlan *vlan;
- struct batadv_priv *bat_priv;
- struct net_device *soft_iface;
-
- bat_priv = container_of(work, struct batadv_priv,
- cleanup_work);
- soft_iface = bat_priv->soft_iface;
-
- /* destroy the "untagged" VLAN */
- vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
- if (vlan) {
- batadv_softif_destroy_vlan(bat_priv, vlan);
- batadv_softif_vlan_put(vlan);
- }
-
- batadv_sysfs_del_meshif(soft_iface);
- unregister_netdev(soft_iface);
-}
-
-/**
* batadv_softif_init_late - late stage initialization of soft interface
* @dev: registered network device to modify
*
@@ -791,7 +770,6 @@ static int batadv_softif_init_late(struct net_device *dev)
bat_priv = netdev_priv(dev);
bat_priv->soft_iface = dev;
- INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish);
/* batadv_interface_stats() needs to be available as soon as
* register_netdevice() has been called
@@ -1028,8 +1006,19 @@ struct net_device *batadv_softif_create(struct net *net, const char *name)
void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_softif_vlan *vlan;
- queue_work(batadv_event_workqueue, &bat_priv->cleanup_work);
+ ASSERT_RTNL();
+
+ /* destroy the "untagged" VLAN */
+ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+ if (vlan) {
+ batadv_softif_destroy_vlan(bat_priv, vlan);
+ batadv_softif_vlan_put(vlan);
+ }
+
+ batadv_sysfs_del_meshif(soft_iface);
+ unregister_netdevice(soft_iface);
}
/**
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index fe9ca94ddee2..02d96f224c60 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -37,6 +37,7 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/stringify.h>
+#include <linux/workqueue.h>
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
@@ -428,6 +429,13 @@ static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr,
struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
int bytes_written;
+ /* GW mode is not available if the routing algorithm in use does not
+ * implement the GW API
+ */
+ if (!bat_priv->algo_ops->gw.get_best_gw_node ||
+ !bat_priv->algo_ops->gw.is_eligible)
+ return -ENOENT;
+
switch (atomic_read(&bat_priv->gw.mode)) {
case BATADV_GW_MODE_CLIENT:
bytes_written = sprintf(buff, "%s\n",
@@ -455,6 +463,13 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
char *curr_gw_mode_str;
int gw_mode_tmp = -1;
+ /* toggling GW mode is allowed only if the routing algorithm in use
+ * provides the GW API
+ */
+ if (!bat_priv->algo_ops->gw.get_best_gw_node ||
+ !bat_priv->algo_ops->gw.is_eligible)
+ return -EINVAL;
+
if (buff[count - 1] == '\n')
buff[count - 1] = '\0';
@@ -514,6 +529,50 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
return count;
}
+static ssize_t batadv_show_gw_sel_class(struct kobject *kobj,
+ struct attribute *attr, char *buff)
+{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+
+ /* GW selection class is not available if the routing algorithm in use
+ * does not implement the GW API
+ */
+ if (!bat_priv->algo_ops->gw.get_best_gw_node ||
+ !bat_priv->algo_ops->gw.is_eligible)
+ return -ENOENT;
+
+ if (bat_priv->algo_ops->gw.show_sel_class)
+ return bat_priv->algo_ops->gw.show_sel_class(bat_priv, buff);
+
+ return sprintf(buff, "%i\n", atomic_read(&bat_priv->gw.sel_class));
+}
+
+static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
+ struct attribute *attr, char *buff,
+ size_t count)
+{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+
+ /* setting the GW selection class is allowed only if the routing
+ * algorithm in use implements the GW API
+ */
+ if (!bat_priv->algo_ops->gw.get_best_gw_node ||
+ !bat_priv->algo_ops->gw.is_eligible)
+ return -EINVAL;
+
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ if (bat_priv->algo_ops->gw.store_sel_class)
+ return bat_priv->algo_ops->gw.store_sel_class(bat_priv, buff,
+ count);
+
+ return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
+ batadv_post_gw_reselect, attr,
+ &bat_priv->gw.sel_class,
+ bat_priv->soft_iface);
+}
+
static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
struct attribute *attr, char *buff)
{
@@ -625,8 +684,8 @@ BATADV_ATTR_SIF_UINT(orig_interval, orig_interval, S_IRUGO | S_IWUSR,
2 * BATADV_JITTER, INT_MAX, NULL);
BATADV_ATTR_SIF_UINT(hop_penalty, hop_penalty, S_IRUGO | S_IWUSR, 0,
BATADV_TQ_MAX_VALUE, NULL);
-BATADV_ATTR_SIF_UINT(gw_sel_class, gw.sel_class, S_IRUGO | S_IWUSR, 1,
- BATADV_TQ_MAX_VALUE, batadv_post_gw_reselect);
+static BATADV_ATTR(gw_sel_class, S_IRUGO | S_IWUSR, batadv_show_gw_sel_class,
+ batadv_store_gw_sel_class);
static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
batadv_store_gw_bwidth);
#ifdef CONFIG_BATMAN_ADV_MCAST
@@ -712,6 +771,8 @@ rem_attr:
for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
+ kobject_uevent(bat_priv->mesh_obj, KOBJ_REMOVE);
+ kobject_del(bat_priv->mesh_obj);
kobject_put(bat_priv->mesh_obj);
bat_priv->mesh_obj = NULL;
out:
@@ -726,6 +787,8 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
+ kobject_uevent(bat_priv->mesh_obj, KOBJ_REMOVE);
+ kobject_del(bat_priv->mesh_obj);
kobject_put(bat_priv->mesh_obj);
bat_priv->mesh_obj = NULL;
}
@@ -781,6 +844,10 @@ rem_attr:
for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
+ if (vlan->kobj != bat_priv->mesh_obj) {
+ kobject_uevent(vlan->kobj, KOBJ_REMOVE);
+ kobject_del(vlan->kobj);
+ }
kobject_put(vlan->kobj);
vlan->kobj = NULL;
out:
@@ -800,6 +867,10 @@ void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
+ if (vlan->kobj != bat_priv->mesh_obj) {
+ kobject_uevent(vlan->kobj, KOBJ_REMOVE);
+ kobject_del(vlan->kobj);
+ }
kobject_put(vlan->kobj);
vlan->kobj = NULL;
}
@@ -828,31 +899,31 @@ static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
return length;
}
-static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
- struct attribute *attr, char *buff,
- size_t count)
+/**
+ * batadv_store_mesh_iface_finish - store new hardif mesh_iface state
+ * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
+ * @ifname: name of soft-interface to modify
+ *
+ * Changes the parts of the hard+soft interface which can not be modified under
+ * sysfs lock (to prevent deadlock situations).
+ *
+ * Return: 0 on success, 0 < on failure
+ */
+static int batadv_store_mesh_iface_finish(struct net_device *net_dev,
+ char ifname[IFNAMSIZ])
{
- struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
struct net *net = dev_net(net_dev);
struct batadv_hard_iface *hard_iface;
- int status_tmp = -1;
- int ret = count;
+ int status_tmp;
+ int ret = 0;
+
+ ASSERT_RTNL();
hard_iface = batadv_hardif_get_by_netdev(net_dev);
if (!hard_iface)
- return count;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if (strlen(buff) >= IFNAMSIZ) {
- pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
- buff);
- batadv_hardif_put(hard_iface);
- return -EINVAL;
- }
+ return 0;
- if (strncmp(buff, "none", 4) == 0)
+ if (strncmp(ifname, "none", 4) == 0)
status_tmp = BATADV_IF_NOT_IN_USE;
else
status_tmp = BATADV_IF_I_WANT_YOU;
@@ -861,15 +932,13 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
goto out;
if ((hard_iface->soft_iface) &&
- (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
+ (strncmp(hard_iface->soft_iface->name, ifname, IFNAMSIZ) == 0))
goto out;
- rtnl_lock();
-
if (status_tmp == BATADV_IF_NOT_IN_USE) {
batadv_hardif_disable_interface(hard_iface,
BATADV_IF_CLEANUP_AUTO);
- goto unlock;
+ goto out;
}
/* if the interface already is in use */
@@ -877,15 +946,71 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
batadv_hardif_disable_interface(hard_iface,
BATADV_IF_CLEANUP_AUTO);
- ret = batadv_hardif_enable_interface(hard_iface, net, buff);
-
-unlock:
- rtnl_unlock();
+ ret = batadv_hardif_enable_interface(hard_iface, net, ifname);
out:
batadv_hardif_put(hard_iface);
return ret;
}
+/**
+ * batadv_store_mesh_iface_work - store new hardif mesh_iface state
+ * @work: work queue item
+ *
+ * Changes the parts of the hard+soft interface which can not be modified under
+ * sysfs lock (to prevent deadlock situations).
+ */
+static void batadv_store_mesh_iface_work(struct work_struct *work)
+{
+ struct batadv_store_mesh_work *store_work;
+ int ret;
+
+ store_work = container_of(work, struct batadv_store_mesh_work, work);
+
+ rtnl_lock();
+ ret = batadv_store_mesh_iface_finish(store_work->net_dev,
+ store_work->soft_iface_name);
+ rtnl_unlock();
+
+ if (ret < 0)
+ pr_err("Failed to store new mesh_iface state %s for %s: %d\n",
+ store_work->soft_iface_name, store_work->net_dev->name,
+ ret);
+
+ dev_put(store_work->net_dev);
+ kfree(store_work);
+}
+
+static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
+ struct attribute *attr, char *buff,
+ size_t count)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_store_mesh_work *store_work;
+
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ if (strlen(buff) >= IFNAMSIZ) {
+ pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
+ buff);
+ return -EINVAL;
+ }
+
+ store_work = kmalloc(sizeof(*store_work), GFP_KERNEL);
+ if (!store_work)
+ return -ENOMEM;
+
+ dev_hold(net_dev);
+ INIT_WORK(&store_work->work, batadv_store_mesh_iface_work);
+ store_work->net_dev = net_dev;
+ strlcpy(store_work->soft_iface_name, buff,
+ sizeof(store_work->soft_iface_name));
+
+ queue_work(batadv_event_workqueue, &store_work->work);
+
+ return count;
+}
+
static ssize_t batadv_show_iface_status(struct kobject *kobj,
struct attribute *attr, char *buff)
{
@@ -1048,6 +1173,8 @@ out:
void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
{
+ kobject_uevent(*hardif_obj, KOBJ_REMOVE);
+ kobject_del(*hardif_obj);
kobject_put(*hardif_obj);
*hardif_obj = NULL;
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 7e6df7a4964a..7f663092f6de 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -22,12 +22,14 @@
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/byteorder/generic.h>
+#include <linux/cache.h>
#include <linux/compiler.h>
#include <linux/crc32c.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/fs.h>
#include <linux/if_ether.h>
+#include <linux/init.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -35,25 +37,39 @@
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
+#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
+#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/workqueue.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <uapi/linux/batman_adv.h>
#include "bridge_loop_avoidance.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "multicast.h"
+#include "netlink.h"
#include "originator.h"
#include "packet.h"
#include "soft-interface.h"
#include "tvlv.h"
+static struct kmem_cache *batadv_tl_cache __read_mostly;
+static struct kmem_cache *batadv_tg_cache __read_mostly;
+static struct kmem_cache *batadv_tt_orig_cache __read_mostly;
+static struct kmem_cache *batadv_tt_change_cache __read_mostly;
+static struct kmem_cache *batadv_tt_req_cache __read_mostly;
+static struct kmem_cache *batadv_tt_roam_cache __read_mostly;
+
/* hash class keys */
static struct lock_class_key batadv_tt_local_hash_lock_class_key;
static struct lock_class_key batadv_tt_global_hash_lock_class_key;
@@ -205,6 +221,20 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
}
/**
+ * batadv_tt_local_entry_free_rcu - free the tt_local_entry
+ * @rcu: rcu pointer of the tt_local_entry
+ */
+static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_local_entry *tt_local_entry;
+
+ tt_local_entry = container_of(rcu, struct batadv_tt_local_entry,
+ common.rcu);
+
+ kmem_cache_free(batadv_tl_cache, tt_local_entry);
+}
+
+/**
* batadv_tt_local_entry_release - release tt_local_entry from lists and queue
* for free after rcu grace period
* @ref: kref pointer of the nc_node
@@ -218,7 +248,7 @@ static void batadv_tt_local_entry_release(struct kref *ref)
batadv_softif_vlan_put(tt_local_entry->vlan);
- kfree_rcu(tt_local_entry, common.rcu);
+ call_rcu(&tt_local_entry->common.rcu, batadv_tt_local_entry_free_rcu);
}
/**
@@ -234,6 +264,20 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
}
/**
+ * batadv_tt_global_entry_free_rcu - free the tt_global_entry
+ * @rcu: rcu pointer of the tt_global_entry
+ */
+static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_global_entry *tt_global_entry;
+
+ tt_global_entry = container_of(rcu, struct batadv_tt_global_entry,
+ common.rcu);
+
+ kmem_cache_free(batadv_tg_cache, tt_global_entry);
+}
+
+/**
* batadv_tt_global_entry_release - release tt_global_entry from lists and queue
* for free after rcu grace period
* @ref: kref pointer of the nc_node
@@ -246,7 +290,8 @@ static void batadv_tt_global_entry_release(struct kref *ref)
common.refcount);
batadv_tt_global_del_orig_list(tt_global_entry);
- kfree_rcu(tt_global_entry, common.rcu);
+
+ call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu);
}
/**
@@ -384,6 +429,19 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
}
/**
+ * batadv_tt_orig_list_entry_free_rcu - free the orig_entry
+ * @rcu: rcu pointer of the orig_entry
+ */
+static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_orig_list_entry *orig_entry;
+
+ orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
+
+ kmem_cache_free(batadv_tt_orig_cache, orig_entry);
+}
+
+/**
* batadv_tt_orig_list_entry_release - release tt orig entry from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the tt orig entry
@@ -396,7 +454,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref)
refcount);
batadv_orig_node_put(orig_entry->orig_node);
- kfree_rcu(orig_entry, rcu);
+ call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
}
/**
@@ -426,7 +484,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
bool event_removed = false;
bool del_op_requested, del_op_entry;
- tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
+ tt_change_node = kmem_cache_alloc(batadv_tt_change_cache, GFP_ATOMIC);
if (!tt_change_node)
return;
@@ -467,8 +525,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
continue;
del:
list_del(&entry->list);
- kfree(entry);
- kfree(tt_change_node);
+ kmem_cache_free(batadv_tt_change_cache, entry);
+ kmem_cache_free(batadv_tt_change_cache, tt_change_node);
event_removed = true;
goto unlock;
}
@@ -646,7 +704,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
goto out;
}
- tt_local = kmalloc(sizeof(*tt_local), GFP_ATOMIC);
+ tt_local = kmem_cache_alloc(batadv_tl_cache, GFP_ATOMIC);
if (!tt_local)
goto out;
@@ -656,7 +714,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
net_ratelimited_function(batadv_info, soft_iface,
"adding TT local entry %pM to non-existent VLAN %d\n",
addr, BATADV_PRINT_VID(vid));
- kfree(tt_local);
+ kmem_cache_free(batadv_tl_cache, tt_local);
tt_local = NULL;
goto out;
}
@@ -676,7 +734,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
if (batadv_is_wifi_netdev(in_dev))
tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
kref_init(&tt_local->common.refcount);
- kref_get(&tt_local->common.refcount);
tt_local->last_seen = jiffies;
tt_local->common.added_at = tt_local->last_seen;
tt_local->vlan = vlan;
@@ -688,6 +745,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
is_multicast_ether_addr(addr))
tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
+ kref_get(&tt_local->common.refcount);
hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
batadv_choose_tt, &tt_local->common,
&tt_local->common.hash_entry);
@@ -959,7 +1017,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
tt_diff_entries_count++;
}
list_del(&entry->list);
- kfree(entry);
+ kmem_cache_free(batadv_tt_change_cache, entry);
}
spin_unlock_bh(&bat_priv->tt.changes_list_lock);
@@ -989,6 +1047,7 @@ container_register:
kfree(tt_data);
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1056,6 +1115,165 @@ out:
batadv_hardif_put(primary_if);
return 0;
}
+#endif
+
+/**
+ * batadv_tt_local_dump_entry - Dump one TT local entry into a message
+ * @msg :Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @common: tt local & tt global common data
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_tt_common_entry *common)
+{
+ void *hdr;
+ struct batadv_softif_vlan *vlan;
+ struct batadv_tt_local_entry *local;
+ unsigned int last_seen_msecs;
+ u32 crc;
+
+ local = container_of(common, struct batadv_tt_local_entry, common);
+ last_seen_msecs = jiffies_to_msecs(jiffies - local->last_seen);
+
+ vlan = batadv_softif_vlan_get(bat_priv, common->vid);
+ if (!vlan)
+ return 0;
+
+ crc = vlan->tt.crc;
+
+ batadv_softif_vlan_put(vlan);
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI,
+ BATADV_CMD_GET_TRANSTABLE_LOCAL);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) ||
+ nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
+ nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
+ nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))
+ goto nla_put_failure;
+
+ if (!(common->flags & BATADV_TT_CLIENT_NOPURGE) &&
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, last_seen_msecs))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_tt_local_dump_bucket - Dump one TT local bucket into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @head: Pointer to the list containing the local tt entries
+ * @idx_s: Number of entries to skip
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct hlist_head *head, int *idx_s)
+{
+ struct batadv_tt_common_entry *common;
+ int idx = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(common, head, hash_entry) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_tt_local_dump_entry(msg, portid, seq, bat_priv,
+ common)) {
+ rcu_read_unlock();
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+ rcu_read_unlock();
+
+ *idx_s = 0;
+ return 0;
+}
+
+/**
+ * batadv_tt_local_dump - Dump TT local entries into a message
+ * @msg: Netlink message to dump into
+ * @cb: Parameters from query
+ *
+ * Return: Error code, or 0 on success
+ */
+int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_hashtable *hash;
+ struct hlist_head *head;
+ int ret;
+ int ifindex;
+ int bucket = cb->args[0];
+ int idx = cb->args[1];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ hash = bat_priv->tt.local_hash;
+
+ while (bucket < hash->size) {
+ head = &hash->table[bucket];
+
+ if (batadv_tt_local_dump_bucket(msg, portid, cb->nlh->nlmsg_seq,
+ bat_priv, head, &idx))
+ break;
+
+ bucket++;
+ }
+
+ ret = msg->len;
+
+ out:
+ if (primary_if)
+ batadv_hardif_put(primary_if);
+ if (soft_iface)
+ dev_put(soft_iface);
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+
+ return ret;
+}
static void
batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
@@ -1259,7 +1477,7 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) {
list_del(&entry->list);
- kfree(entry);
+ kmem_cache_free(batadv_tt_change_cache, entry);
}
atomic_set(&bat_priv->tt.local_changes, 0);
@@ -1341,7 +1559,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
goto out;
}
- orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
+ orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
if (!orig_entry)
goto out;
@@ -1351,9 +1569,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
orig_entry->orig_node = orig_node;
orig_entry->ttvn = ttvn;
kref_init(&orig_entry->refcount);
- kref_get(&orig_entry->refcount);
spin_lock_bh(&tt_global->list_lock);
+ kref_get(&orig_entry->refcount);
hlist_add_head_rcu(&orig_entry->list,
&tt_global->orig_list);
spin_unlock_bh(&tt_global->list_lock);
@@ -1411,7 +1629,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
goto out;
if (!tt_global_entry) {
- tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
+ tt_global_entry = kmem_cache_zalloc(batadv_tg_cache,
+ GFP_ATOMIC);
if (!tt_global_entry)
goto out;
@@ -1428,13 +1647,13 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
if (flags & BATADV_TT_CLIENT_ROAM)
tt_global_entry->roam_at = jiffies;
kref_init(&common->refcount);
- kref_get(&common->refcount);
common->added_at = jiffies;
INIT_HLIST_HEAD(&tt_global_entry->orig_list);
atomic_set(&tt_global_entry->orig_list_count, 0);
spin_lock_init(&tt_global_entry->list_lock);
+ kref_get(&common->refcount);
hash_added = batadv_hash_add(bat_priv->tt.global_hash,
batadv_compare_tt,
batadv_choose_tt, common,
@@ -1579,6 +1798,7 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv,
return best_entry;
}
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
* batadv_tt_global_print_entry - print all orig nodes who announce the address
* for this global entry
@@ -1702,6 +1922,219 @@ out:
batadv_hardif_put(primary_if);
return 0;
}
+#endif
+
+/**
+ * batadv_tt_global_dump_subentry - Dump all TT local entries into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @common: tt local & tt global common data
+ * @orig: Originator node announcing a non-mesh client
+ * @best: Is the best originator for the TT entry
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_tt_common_entry *common,
+ struct batadv_tt_orig_list_entry *orig,
+ bool best)
+{
+ void *hdr;
+ struct batadv_orig_node_vlan *vlan;
+ u8 last_ttvn;
+ u32 crc;
+
+ vlan = batadv_orig_node_vlan_get(orig->orig_node,
+ common->vid);
+ if (!vlan)
+ return 0;
+
+ crc = vlan->tt.crc;
+
+ batadv_orig_node_vlan_put(vlan);
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI,
+ BATADV_CMD_GET_TRANSTABLE_GLOBAL);
+ if (!hdr)
+ return -ENOBUFS;
+
+ last_ttvn = atomic_read(&orig->orig_node->last_ttvn);
+
+ if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) ||
+ nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
+ orig->orig_node->orig) ||
+ nla_put_u8(msg, BATADV_ATTR_TT_TTVN, orig->ttvn) ||
+ nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
+ nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
+ nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
+ nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))
+ goto nla_put_failure;
+
+ if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_tt_global_dump_entry - Dump one TT global entry into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @common: tt local & tt global common data
+ * @sub_s: Number of entries to skip
+ *
+ * This function assumes the caller holds rcu_read_lock().
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_tt_global_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_tt_common_entry *common, int *sub_s)
+{
+ struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
+ struct batadv_tt_global_entry *global;
+ struct hlist_head *head;
+ int sub = 0;
+ bool best;
+
+ global = container_of(common, struct batadv_tt_global_entry, common);
+ best_entry = batadv_transtable_best_orig(bat_priv, global);
+ head = &global->orig_list;
+
+ hlist_for_each_entry_rcu(orig_entry, head, list) {
+ if (sub++ < *sub_s)
+ continue;
+
+ best = (orig_entry == best_entry);
+
+ if (batadv_tt_global_dump_subentry(msg, portid, seq, common,
+ orig_entry, best)) {
+ *sub_s = sub - 1;
+ return -EMSGSIZE;
+ }
+ }
+
+ *sub_s = 0;
+ return 0;
+}
+
+/**
+ * batadv_tt_global_dump_bucket - Dump one TT local bucket into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @head: Pointer to the list containing the global tt entries
+ * @idx_s: Number of entries to skip
+ * @sub: Number of entries to skip
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct hlist_head *head, int *idx_s, int *sub)
+{
+ struct batadv_tt_common_entry *common;
+ int idx = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(common, head, hash_entry) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_tt_global_dump_entry(msg, portid, seq, bat_priv,
+ common, sub)) {
+ rcu_read_unlock();
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+ rcu_read_unlock();
+
+ *idx_s = 0;
+ *sub = 0;
+ return 0;
+}
+
+/**
+ * batadv_tt_global_dump - Dump TT global entries into a message
+ * @msg: Netlink message to dump into
+ * @cb: Parameters from query
+ *
+ * Return: Error code, or length of message on success
+ */
+int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_hashtable *hash;
+ struct hlist_head *head;
+ int ret;
+ int ifindex;
+ int bucket = cb->args[0];
+ int idx = cb->args[1];
+ int sub = cb->args[2];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ hash = bat_priv->tt.global_hash;
+
+ while (bucket < hash->size) {
+ head = &hash->table[bucket];
+
+ if (batadv_tt_global_dump_bucket(msg, portid,
+ cb->nlh->nlmsg_seq, bat_priv,
+ head, &idx, &sub))
+ break;
+
+ bucket++;
+ }
+
+ ret = msg->len;
+
+ out:
+ if (primary_if)
+ batadv_hardif_put(primary_if);
+ if (soft_iface)
+ dev_put(soft_iface);
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+ cb->args[2] = sub;
+
+ return ret;
+}
/**
* _batadv_tt_global_del_orig_entry - remove and free an orig_entry
@@ -2280,7 +2713,7 @@ static void batadv_tt_req_node_release(struct kref *ref)
tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
- kfree(tt_req_node);
+ kmem_cache_free(batadv_tt_req_cache, tt_req_node);
}
/**
@@ -2367,7 +2800,7 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv,
goto unlock;
}
- tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
+ tt_req_node = kmem_cache_alloc(batadv_tt_req_cache, GFP_ATOMIC);
if (!tt_req_node)
goto unlock;
@@ -3104,7 +3537,7 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
list_del(&node->list);
- kfree(node);
+ kmem_cache_free(batadv_tt_roam_cache, node);
}
spin_unlock_bh(&bat_priv->tt.roam_list_lock);
@@ -3121,7 +3554,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
continue;
list_del(&node->list);
- kfree(node);
+ kmem_cache_free(batadv_tt_roam_cache, node);
}
spin_unlock_bh(&bat_priv->tt.roam_list_lock);
}
@@ -3162,7 +3595,8 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client)
}
if (!ret) {
- tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
+ tt_roam_node = kmem_cache_alloc(batadv_tt_roam_cache,
+ GFP_ATOMIC);
if (!tt_roam_node)
goto unlock;
@@ -3865,3 +4299,85 @@ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
return ret;
}
+
+/**
+ * batadv_tt_cache_init - Initialize tt memory object cache
+ *
+ * Return: 0 on success or negative error number in case of failure.
+ */
+int __init batadv_tt_cache_init(void)
+{
+ size_t tl_size = sizeof(struct batadv_tt_local_entry);
+ size_t tg_size = sizeof(struct batadv_tt_global_entry);
+ size_t tt_orig_size = sizeof(struct batadv_tt_orig_list_entry);
+ size_t tt_change_size = sizeof(struct batadv_tt_change_node);
+ size_t tt_req_size = sizeof(struct batadv_tt_req_node);
+ size_t tt_roam_size = sizeof(struct batadv_tt_roam_node);
+
+ batadv_tl_cache = kmem_cache_create("batadv_tl_cache", tl_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tl_cache)
+ return -ENOMEM;
+
+ batadv_tg_cache = kmem_cache_create("batadv_tg_cache", tg_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tg_cache)
+ goto err_tt_tl_destroy;
+
+ batadv_tt_orig_cache = kmem_cache_create("batadv_tt_orig_cache",
+ tt_orig_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tt_orig_cache)
+ goto err_tt_tg_destroy;
+
+ batadv_tt_change_cache = kmem_cache_create("batadv_tt_change_cache",
+ tt_change_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tt_change_cache)
+ goto err_tt_orig_destroy;
+
+ batadv_tt_req_cache = kmem_cache_create("batadv_tt_req_cache",
+ tt_req_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tt_req_cache)
+ goto err_tt_change_destroy;
+
+ batadv_tt_roam_cache = kmem_cache_create("batadv_tt_roam_cache",
+ tt_roam_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tt_roam_cache)
+ goto err_tt_req_destroy;
+
+ return 0;
+
+err_tt_req_destroy:
+ kmem_cache_destroy(batadv_tt_req_cache);
+ batadv_tt_req_cache = NULL;
+err_tt_change_destroy:
+ kmem_cache_destroy(batadv_tt_change_cache);
+ batadv_tt_change_cache = NULL;
+err_tt_orig_destroy:
+ kmem_cache_destroy(batadv_tt_orig_cache);
+ batadv_tt_orig_cache = NULL;
+err_tt_tg_destroy:
+ kmem_cache_destroy(batadv_tg_cache);
+ batadv_tg_cache = NULL;
+err_tt_tl_destroy:
+ kmem_cache_destroy(batadv_tl_cache);
+ batadv_tl_cache = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * batadv_tt_cache_destroy - Destroy tt memory object cache
+ */
+void batadv_tt_cache_destroy(void)
+{
+ kmem_cache_destroy(batadv_tl_cache);
+ kmem_cache_destroy(batadv_tg_cache);
+ kmem_cache_destroy(batadv_tt_orig_cache);
+ kmem_cache_destroy(batadv_tt_change_cache);
+ kmem_cache_destroy(batadv_tt_req_cache);
+ kmem_cache_destroy(batadv_tt_roam_cache);
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 7c7e2c006bfe..783fdba84db2 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -22,8 +22,10 @@
#include <linux/types.h>
+struct netlink_callback;
struct net_device;
struct seq_file;
+struct sk_buff;
int batadv_tt_init(struct batadv_priv *bat_priv);
bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
@@ -33,6 +35,8 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv,
const char *message, bool roaming);
int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb);
+int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb);
void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
s32 match_vid, const char *message);
@@ -59,4 +63,7 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
const u8 *addr, unsigned short vid);
+int batadv_tt_cache_init(void);
+void batadv_tt_cache_destroy(void);
+
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index 3d1cf0fb112d..77654f055f24 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -257,8 +257,13 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
spin_lock_bh(&bat_priv->tvlv.container_list_lock);
tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
batadv_tvlv_container_remove(bat_priv, tvlv_old);
+
+ kref_get(&tvlv_new->refcount);
hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+
+ /* don't return reference to new tvlv_container */
+ batadv_tvlv_container_put(tvlv_new);
}
/**
@@ -542,8 +547,12 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
INIT_HLIST_NODE(&tvlv_handler->list);
spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+ kref_get(&tvlv_handler->refcount);
hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+
+ /* don't return reference to new tvlv_handler */
+ batadv_tvlv_handler_put(tvlv_handler);
}
/**
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index a64522c3b45d..b3dd1a381aad 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -28,6 +28,7 @@
#include <linux/if_ether.h>
#include <linux/kref.h>
#include <linux/netdevice.h>
+#include <linux/netlink.h>
#include <linux/sched.h> /* for linux/wait.h */
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -132,7 +133,6 @@ struct batadv_hard_iface_bat_v {
* @rcu: struct used for freeing in an RCU-safe manner
* @bat_iv: per hard-interface B.A.T.M.A.N. IV data
* @bat_v: per hard-interface B.A.T.M.A.N. V data
- * @cleanup_work: work queue callback item for hard-interface deinit
* @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
* @neigh_list: list of unique single hop neighbors via this interface
* @neigh_list_lock: lock protecting neigh_list
@@ -152,7 +152,6 @@ struct batadv_hard_iface {
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
struct batadv_hard_iface_bat_v bat_v;
#endif
- struct work_struct cleanup_work;
struct dentry *debug_dir;
struct hlist_head neigh_list;
/* neigh_list_lock protects: neigh_list */
@@ -1015,7 +1014,6 @@ struct batadv_priv_bat_v {
* @forw_bcast_list_lock: lock protecting forw_bcast_list
* @tp_list_lock: spinlock protecting @tp_list
* @orig_work: work queue callback item for orig node purging
- * @cleanup_work: work queue callback item for soft-interface deinit
* @primary_if: one of the hard-interfaces assigned to this mesh interface
* becomes the primary interface
* @algo_ops: routing algorithm used by this mesh interface
@@ -1074,7 +1072,6 @@ struct batadv_priv {
spinlock_t tp_list_lock; /* protects tp_list */
atomic_t tp_num;
struct delayed_work orig_work;
- struct work_struct cleanup_work;
struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
struct batadv_algo_ops *algo_ops;
struct hlist_head softif_vlan_list;
@@ -1379,6 +1376,7 @@ struct batadv_skb_cb {
* locally generated packet
* @if_outgoing: packet where the packet should be sent to, or NULL if
* unspecified
+ * @queue_left: The queue (counter) this packet was applied to
*/
struct batadv_forw_packet {
struct hlist_node list;
@@ -1391,11 +1389,13 @@ struct batadv_forw_packet {
struct delayed_work delayed_work;
struct batadv_hard_iface *if_incoming;
struct batadv_hard_iface *if_outgoing;
+ atomic_t *queue_left;
};
/**
* struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific)
* @activate: start routing mechanisms when hard-interface is brought up
+ * (optional)
* @enable: init routing info when hard-interface is enabled
* @disable: de-init routing info when hard-interface is disabled
* @update_mac: (re-)init mac addresses of the protocol information
@@ -1413,11 +1413,13 @@ struct batadv_algo_iface_ops {
/**
* struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific)
* @hardif_init: called on creation of single hop entry
+ * (optional)
* @cmp: compare the metrics of two neighbors for their respective outgoing
* interfaces
* @is_similar_or_better: check if neigh1 is equally similar or better than
* neigh2 for their respective outgoing interface from the metric prospective
* @print: print the single hop neighbor list (optional)
+ * @dump: dump neighbors to a netlink socket (optional)
*/
struct batadv_algo_neigh_ops {
void (*hardif_init)(struct batadv_hardif_neigh_node *neigh);
@@ -1429,26 +1431,64 @@ struct batadv_algo_neigh_ops {
struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2);
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
void (*print)(struct batadv_priv *priv, struct seq_file *seq);
+#endif
+ void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *priv,
+ struct batadv_hard_iface *hard_iface);
};
/**
* struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific)
* @free: free the resources allocated by the routing algorithm for an orig_node
- * object
+ * object (optional)
* @add_if: ask the routing algorithm to apply the needed changes to the
- * orig_node due to a new hard-interface being added into the mesh
+ * orig_node due to a new hard-interface being added into the mesh (optional)
* @del_if: ask the routing algorithm to apply the needed changes to the
- * orig_node due to an hard-interface being removed from the mesh
+ * orig_node due to an hard-interface being removed from the mesh (optional)
* @print: print the originator table (optional)
+ * @dump: dump originators to a netlink socket (optional)
*/
struct batadv_algo_orig_ops {
void (*free)(struct batadv_orig_node *orig_node);
int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num);
int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num,
int del_if_num);
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
void (*print)(struct batadv_priv *priv, struct seq_file *seq,
struct batadv_hard_iface *hard_iface);
+#endif
+ void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *priv,
+ struct batadv_hard_iface *hard_iface);
+};
+
+/**
+ * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
+ * @store_sel_class: parse and stores a new GW selection class (optional)
+ * @show_sel_class: prints the current GW selection class (optional)
+ * @get_best_gw_node: select the best GW from the list of available nodes
+ * (optional)
+ * @is_eligible: check if a newly discovered GW is a potential candidate for
+ * the election as best GW (optional)
+ * @print: print the gateway table (optional)
+ * @dump: dump gateways to a netlink socket (optional)
+ */
+struct batadv_algo_gw_ops {
+ ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
+ size_t count);
+ ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
+ struct batadv_gw_node *(*get_best_gw_node)
+ (struct batadv_priv *bat_priv);
+ bool (*is_eligible)(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *curr_gw_orig,
+ struct batadv_orig_node *orig_node);
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+ void (*print)(struct batadv_priv *bat_priv, struct seq_file *seq);
+#endif
+ void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *priv);
};
/**
@@ -1458,6 +1498,7 @@ struct batadv_algo_orig_ops {
* @iface: callbacks related to interface handling
* @neigh: callbacks related to neighbors handling
* @orig: callbacks related to originators handling
+ * @gw: callbacks related to GW mode
*/
struct batadv_algo_ops {
struct hlist_node list;
@@ -1465,6 +1506,7 @@ struct batadv_algo_ops {
struct batadv_algo_iface_ops iface;
struct batadv_algo_neigh_ops neigh;
struct batadv_algo_orig_ops orig;
+ struct batadv_algo_gw_ops gw;
};
/**
@@ -1564,4 +1606,17 @@ enum batadv_tvlv_handler_flags {
BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
};
+/**
+ * struct batadv_store_mesh_work - Work queue item to detach add/del interface
+ * from sysfs locks
+ * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
+ * @soft_iface_name: name of soft-interface to modify
+ * @work: work queue item
+ */
+struct batadv_store_mesh_work {
+ struct net_device *net_dev;
+ char soft_iface_name[IFNAMSIZ];
+ struct work_struct work;
+};
+
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 0b5f729d08d2..1aff2da9bc74 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -26,11 +26,13 @@
#include <linux/module.h>
#include <linux/debugfs.h>
+#include <linux/stringify.h>
#include <asm/ioctls.h>
#include <net/bluetooth/bluetooth.h>
#include <linux/proc_fs.h>
+#include "leds.h"
#include "selftest.h"
/* Bluetooth sockets */
@@ -712,13 +714,16 @@ static struct net_proto_family bt_sock_family_ops = {
struct dentry *bt_debugfs;
EXPORT_SYMBOL_GPL(bt_debugfs);
+#define VERSION __stringify(BT_SUBSYS_VERSION) "." \
+ __stringify(BT_SUBSYS_REVISION)
+
static int __init bt_init(void)
{
int err;
sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
- BT_INFO("Core ver %s", BT_SUBSYS_VERSION);
+ BT_INFO("Core ver %s", VERSION);
err = bt_selftest();
if (err < 0)
@@ -726,6 +731,8 @@ static int __init bt_init(void)
bt_debugfs = debugfs_create_dir("bluetooth", NULL);
+ bt_leds_init();
+
err = bt_sysfs_init();
if (err < 0)
return err;
@@ -785,6 +792,8 @@ static void __exit bt_exit(void)
bt_sysfs_cleanup();
+ bt_leds_cleanup();
+
debugfs_remove_recursive(bt_debugfs);
}
@@ -792,7 +801,7 @@ subsys_initcall(bt_init);
module_exit(bt_exit);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth Core ver " BT_SUBSYS_VERSION);
-MODULE_VERSION(BT_SUBSYS_VERSION);
+MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
+MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index ddf8432fe8fb..3ac89e9ace71 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1562,6 +1562,7 @@ int hci_dev_do_close(struct hci_dev *hdev)
auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_dev_test_flag(hdev, HCI_MGMT))
__mgmt_power_off(hdev);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index b0e23dfc5c34..c8135680c43e 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -971,14 +971,14 @@ void __hci_req_enable_advertising(struct hci_request *req)
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
-static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
{
- u8 ad_len = 0;
size_t name_len;
+ int max_len;
+ max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
name_len = strlen(hdev->dev_name);
- if (name_len > 0) {
- size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
+ if (name_len > 0 && max_len > 0) {
if (name_len > max_len) {
name_len = max_len;
@@ -997,22 +997,42 @@ static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
return ad_len;
}
+static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+{
+ return append_local_name(hdev, ptr, 0);
+}
+
static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
u8 *ptr)
{
struct adv_info *adv_instance;
+ u32 instance_flags;
+ u8 scan_rsp_len = 0;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return 0;
- /* TODO: Set the appropriate entries based on advertising instance flags
- * here once flags other than 0 are supported.
- */
+ instance_flags = adv_instance->flags;
+
+ if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
+ ptr[0] = 3;
+ ptr[1] = EIR_APPEARANCE;
+ put_unaligned_le16(hdev->appearance, ptr + 2);
+ scan_rsp_len += 4;
+ ptr += 4;
+ }
+
memcpy(ptr, adv_instance->scan_rsp_data,
adv_instance->scan_rsp_len);
- return adv_instance->scan_rsp_len;
+ scan_rsp_len += adv_instance->scan_rsp_len;
+ ptr += adv_instance->scan_rsp_len;
+
+ if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
+ scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
+
+ return scan_rsp_len;
}
void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
@@ -1194,7 +1214,7 @@ static void adv_timeout_expire(struct work_struct *work)
hci_req_init(&req, hdev);
- hci_req_clear_adv_instance(hdev, &req, instance, false);
+ hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
if (list_empty(&hdev->adv_instances))
__hci_req_disable_advertising(&req);
@@ -1284,8 +1304,9 @@ static void cancel_adv_timeout(struct hci_dev *hdev)
* setting.
* - force == false: Only instances that have a timeout will be removed.
*/
-void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
- u8 instance, bool force)
+void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
+ struct hci_request *req, u8 instance,
+ bool force)
{
struct adv_info *adv_instance, *n, *next_instance = NULL;
int err;
@@ -1311,7 +1332,7 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
rem_inst = adv_instance->instance;
err = hci_remove_adv_instance(hdev, rem_inst);
if (!err)
- mgmt_advertising_removed(NULL, hdev, rem_inst);
+ mgmt_advertising_removed(sk, hdev, rem_inst);
}
} else {
adv_instance = hci_find_adv_instance(hdev, instance);
@@ -1325,7 +1346,7 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
err = hci_remove_adv_instance(hdev, instance);
if (!err)
- mgmt_advertising_removed(NULL, hdev, instance);
+ mgmt_advertising_removed(sk, hdev, instance);
}
}
@@ -1716,7 +1737,7 @@ void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
* function. To be safe hard-code one of the
* values that's suitable for SCO.
*/
- rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
+ rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
sizeof(rej), &rej);
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index b2d044bdc732..ac1e11006f38 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -73,8 +73,9 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance);
int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
bool force);
-void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
- u8 instance, bool force);
+void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
+ struct hci_request *req, u8 instance,
+ bool force);
void __hci_req_update_class(struct hci_request *req);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 96f04b7b9556..48f9471e7c85 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -26,6 +26,7 @@
#include <linux/export.h>
#include <linux/utsname.h>
+#include <linux/sched.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -38,6 +39,8 @@
static LIST_HEAD(mgmt_chan_list);
static DEFINE_MUTEX(mgmt_chan_list_lock);
+static DEFINE_IDA(sock_cookie_ida);
+
static atomic_t monitor_promisc = ATOMIC_INIT(0);
/* ----- HCI socket interface ----- */
@@ -52,6 +55,8 @@ struct hci_pinfo {
__u32 cmsg_mask;
unsigned short channel;
unsigned long flags;
+ __u32 cookie;
+ char comm[TASK_COMM_LEN];
};
void hci_sock_set_flag(struct sock *sk, int nr)
@@ -74,6 +79,38 @@ unsigned short hci_sock_get_channel(struct sock *sk)
return hci_pi(sk)->channel;
}
+u32 hci_sock_get_cookie(struct sock *sk)
+{
+ return hci_pi(sk)->cookie;
+}
+
+static bool hci_sock_gen_cookie(struct sock *sk)
+{
+ int id = hci_pi(sk)->cookie;
+
+ if (!id) {
+ id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
+ if (id < 0)
+ id = 0xffffffff;
+
+ hci_pi(sk)->cookie = id;
+ get_task_comm(hci_pi(sk)->comm, current);
+ return true;
+ }
+
+ return false;
+}
+
+static void hci_sock_free_cookie(struct sock *sk)
+{
+ int id = hci_pi(sk)->cookie;
+
+ if (id) {
+ hci_pi(sk)->cookie = 0xffffffff;
+ ida_simple_remove(&sock_cookie_ida, id);
+ }
+}
+
static inline int hci_test_bit(int nr, const void *addr)
{
return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
@@ -305,6 +342,60 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
kfree_skb(skb_copy);
}
+void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
+ void *data, u16 data_len, ktime_t tstamp,
+ int flag, struct sock *skip_sk)
+{
+ struct sock *sk;
+ __le16 index;
+
+ if (hdev)
+ index = cpu_to_le16(hdev->id);
+ else
+ index = cpu_to_le16(MGMT_INDEX_NONE);
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, &hci_sk_list.head) {
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
+ continue;
+
+ /* Ignore socket without the flag set */
+ if (!hci_sock_test_flag(sk, flag))
+ continue;
+
+ /* Skip the original socket */
+ if (sk == skip_sk)
+ continue;
+
+ skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
+ put_unaligned_le16(event, skb_put(skb, 2));
+
+ if (data)
+ memcpy(skb_put(skb, data_len), data, data_len);
+
+ skb->tstamp = tstamp;
+
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
+ hdr->index = index;
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+}
+
static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
{
struct hci_mon_hdr *hdr;
@@ -384,6 +475,129 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
return skb;
}
+static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
+{
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+ u16 format;
+ u8 ver[3];
+ u32 flags;
+
+ /* No message needed when cookie is not present */
+ if (!hci_pi(sk)->cookie)
+ return NULL;
+
+ switch (hci_pi(sk)->channel) {
+ case HCI_CHANNEL_RAW:
+ format = 0x0000;
+ ver[0] = BT_SUBSYS_VERSION;
+ put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
+ break;
+ case HCI_CHANNEL_USER:
+ format = 0x0001;
+ ver[0] = BT_SUBSYS_VERSION;
+ put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
+ break;
+ case HCI_CHANNEL_CONTROL:
+ format = 0x0002;
+ mgmt_fill_version_info(ver);
+ break;
+ default:
+ /* No message for unsupported format */
+ return NULL;
+ }
+
+ skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
+
+ put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
+ put_unaligned_le16(format, skb_put(skb, 2));
+ memcpy(skb_put(skb, sizeof(ver)), ver, sizeof(ver));
+ put_unaligned_le32(flags, skb_put(skb, 4));
+ *skb_put(skb, 1) = TASK_COMM_LEN;
+ memcpy(skb_put(skb, TASK_COMM_LEN), hci_pi(sk)->comm, TASK_COMM_LEN);
+
+ __net_timestamp(skb);
+
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
+ if (hci_pi(sk)->hdev)
+ hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
+ else
+ hdr->index = cpu_to_le16(HCI_DEV_NONE);
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ return skb;
+}
+
+static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
+{
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+
+ /* No message needed when cookie is not present */
+ if (!hci_pi(sk)->cookie)
+ return NULL;
+
+ switch (hci_pi(sk)->channel) {
+ case HCI_CHANNEL_RAW:
+ case HCI_CHANNEL_USER:
+ case HCI_CHANNEL_CONTROL:
+ break;
+ default:
+ /* No message for unsupported format */
+ return NULL;
+ }
+
+ skb = bt_skb_alloc(4, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
+
+ __net_timestamp(skb);
+
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
+ if (hci_pi(sk)->hdev)
+ hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
+ else
+ hdr->index = cpu_to_le16(HCI_DEV_NONE);
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ return skb;
+}
+
+static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
+ u16 opcode, u16 len,
+ const void *buf)
+{
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
+ put_unaligned_le16(opcode, skb_put(skb, 2));
+
+ if (buf)
+ memcpy(skb_put(skb, len), buf, len);
+
+ __net_timestamp(skb);
+
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
+ hdr->index = cpu_to_le16(index);
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ return skb;
+}
+
static void __printf(2, 3)
send_monitor_note(struct sock *sk, const char *fmt, ...)
{
@@ -458,6 +672,26 @@ static void send_monitor_replay(struct sock *sk)
read_unlock(&hci_dev_list_lock);
}
+static void send_monitor_control_replay(struct sock *mon_sk)
+{
+ struct sock *sk;
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, &hci_sk_list.head) {
+ struct sk_buff *skb;
+
+ skb = create_monitor_ctrl_open(sk);
+ if (!skb)
+ continue;
+
+ if (sock_queue_rcv_skb(mon_sk, skb))
+ kfree_skb(skb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+}
+
/* Generate internal stack event */
static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
{
@@ -585,6 +819,7 @@ static int hci_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct hci_dev *hdev;
+ struct sk_buff *skb;
BT_DBG("sock %p sk %p", sock, sk);
@@ -593,8 +828,24 @@ static int hci_sock_release(struct socket *sock)
hdev = hci_pi(sk)->hdev;
- if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
+ switch (hci_pi(sk)->channel) {
+ case HCI_CHANNEL_MONITOR:
atomic_dec(&monitor_promisc);
+ break;
+ case HCI_CHANNEL_RAW:
+ case HCI_CHANNEL_USER:
+ case HCI_CHANNEL_CONTROL:
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_close(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+
+ hci_sock_free_cookie(sk);
+ break;
+ }
bt_sock_unlink(&hci_sk_list, sk);
@@ -721,6 +972,27 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
goto done;
}
+ /* When calling an ioctl on an unbound raw socket, then ensure
+ * that the monitor gets informed. Ensure that the resulting event
+ * is only send once by checking if the cookie exists or not. The
+ * socket cookie will be only ever generated once for the lifetime
+ * of a given socket.
+ */
+ if (hci_sock_gen_cookie(sk)) {
+ struct sk_buff *skb;
+
+ if (capable(CAP_NET_ADMIN))
+ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_open(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+ }
+
release_sock(sk);
switch (cmd) {
@@ -784,6 +1056,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
struct sockaddr_hci haddr;
struct sock *sk = sock->sk;
struct hci_dev *hdev = NULL;
+ struct sk_buff *skb;
int len, err = 0;
BT_DBG("sock %p sk %p", sock, sk);
@@ -822,7 +1095,35 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
atomic_inc(&hdev->promisc);
}
+ hci_pi(sk)->channel = haddr.hci_channel;
+
+ if (!hci_sock_gen_cookie(sk)) {
+ /* In the case when a cookie has already been assigned,
+ * then there has been already an ioctl issued against
+ * an unbound socket and with that triggerd an open
+ * notification. Send a close notification first to
+ * allow the state transition to bounded.
+ */
+ skb = create_monitor_ctrl_close(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+ }
+
+ if (capable(CAP_NET_ADMIN))
+ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+
hci_pi(sk)->hdev = hdev;
+
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_open(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
break;
case HCI_CHANNEL_USER:
@@ -884,9 +1185,38 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
}
}
- atomic_inc(&hdev->promisc);
+ hci_pi(sk)->channel = haddr.hci_channel;
+
+ if (!hci_sock_gen_cookie(sk)) {
+ /* In the case when a cookie has already been assigned,
+ * this socket will transition from a raw socket into
+ * an user channel socket. For a clean transition, send
+ * the close notification first.
+ */
+ skb = create_monitor_ctrl_close(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+ }
+
+ /* The user channel is restricted to CAP_NET_ADMIN
+ * capabilities and with that implicitly trusted.
+ */
+ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
hci_pi(sk)->hdev = hdev;
+
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_open(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+
+ atomic_inc(&hdev->promisc);
break;
case HCI_CHANNEL_MONITOR:
@@ -900,6 +1230,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
goto done;
}
+ hci_pi(sk)->channel = haddr.hci_channel;
+
/* The monitor interface is restricted to CAP_NET_RAW
* capabilities and with that implicitly trusted.
*/
@@ -908,9 +1240,10 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
send_monitor_note(sk, "Linux version %s (%s)",
init_utsname()->release,
init_utsname()->machine);
- send_monitor_note(sk, "Bluetooth subsystem version %s",
- BT_SUBSYS_VERSION);
+ send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
+ BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
send_monitor_replay(sk);
+ send_monitor_control_replay(sk);
atomic_inc(&monitor_promisc);
break;
@@ -925,6 +1258,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
err = -EPERM;
goto done;
}
+
+ hci_pi(sk)->channel = haddr.hci_channel;
break;
default:
@@ -946,6 +1281,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
if (capable(CAP_NET_ADMIN))
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+ hci_pi(sk)->channel = haddr.hci_channel;
+
/* At the moment the index and unconfigured index events
* are enabled unconditionally. Setting them on each
* socket when binding keeps this functionality. They
@@ -956,16 +1293,40 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
* received by untrusted users. Example for such events
* are changes to settings, class of device, name etc.
*/
- if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
+ if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
+ if (!hci_sock_gen_cookie(sk)) {
+ /* In the case when a cookie has already been
+ * assigned, this socket will transtion from
+ * a raw socket into a control socket. To
+ * allow for a clean transtion, send the
+ * close notification first.
+ */
+ skb = create_monitor_ctrl_close(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+ }
+
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_open(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+
hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
- hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
+ hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
+ hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
+ hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
+ hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
}
break;
}
-
- hci_pi(sk)->channel = haddr.hci_channel;
sk->sk_state = BT_BOUND;
done:
@@ -1133,6 +1494,19 @@ static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
goto done;
}
+ if (chan->channel == HCI_CHANNEL_CONTROL) {
+ struct sk_buff *skb;
+
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_command(sk, index, opcode, len,
+ buf + sizeof(*hdr));
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+ }
+
if (opcode >= chan->handler_count ||
chan->handlers[opcode].func == NULL) {
BT_DBG("Unknown op %u", opcode);
@@ -1440,6 +1814,9 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
BT_DBG("sk %p, opt %d", sk, optname);
+ if (level != SOL_HCI)
+ return -ENOPROTOOPT;
+
lock_sock(sk);
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
@@ -1523,6 +1900,9 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
BT_DBG("sk %p, opt %d", sk, optname);
+ if (level != SOL_HCI)
+ return -ENOPROTOOPT;
+
if (get_user(len, optlen))
return -EFAULT;
diff --git a/net/bluetooth/leds.c b/net/bluetooth/leds.c
index 8319c8440c89..cb670b5594eb 100644
--- a/net/bluetooth/leds.c
+++ b/net/bluetooth/leds.c
@@ -11,6 +11,8 @@
#include "leds.h"
+DEFINE_LED_TRIGGER(bt_power_led_trigger);
+
struct hci_basic_led_trigger {
struct led_trigger led_trigger;
struct hci_dev *hdev;
@@ -24,6 +26,21 @@ void hci_leds_update_powered(struct hci_dev *hdev, bool enabled)
if (hdev->power_led)
led_trigger_event(hdev->power_led,
enabled ? LED_FULL : LED_OFF);
+
+ if (!enabled) {
+ struct hci_dev *d;
+
+ read_lock(&hci_dev_list_lock);
+
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (test_bit(HCI_UP, &d->flags))
+ enabled = true;
+ }
+
+ read_unlock(&hci_dev_list_lock);
+ }
+
+ led_trigger_event(bt_power_led_trigger, enabled ? LED_FULL : LED_OFF);
}
static void power_activate(struct led_classdev *led_cdev)
@@ -72,3 +89,13 @@ void hci_leds_init(struct hci_dev *hdev)
/* initialize power_led */
hdev->power_led = led_allocate_basic(hdev, power_activate, "power");
}
+
+void bt_leds_init(void)
+{
+ led_trigger_register_simple("bluetooth-power", &bt_power_led_trigger);
+}
+
+void bt_leds_cleanup(void)
+{
+ led_trigger_unregister_simple(bt_power_led_trigger);
+}
diff --git a/net/bluetooth/leds.h b/net/bluetooth/leds.h
index a9c4d6ea01cf..08725a2fbd9b 100644
--- a/net/bluetooth/leds.h
+++ b/net/bluetooth/leds.h
@@ -7,10 +7,20 @@
*/
#if IS_ENABLED(CONFIG_BT_LEDS)
+
void hci_leds_update_powered(struct hci_dev *hdev, bool enabled);
void hci_leds_init(struct hci_dev *hdev);
+
+void bt_leds_init(void);
+void bt_leds_cleanup(void);
+
#else
+
static inline void hci_leds_update_powered(struct hci_dev *hdev,
bool enabled) {}
static inline void hci_leds_init(struct hci_dev *hdev) {}
+
+static inline void bt_leds_init(void) {}
+static inline void bt_leds_cleanup(void) {}
+
#endif
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7639290b6de3..19b8a5e9420d 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -38,7 +38,7 @@
#include "mgmt_util.h"
#define MGMT_VERSION 1
-#define MGMT_REVISION 13
+#define MGMT_REVISION 14
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -104,6 +104,8 @@ static const u16 mgmt_commands[] = {
MGMT_OP_REMOVE_ADVERTISING,
MGMT_OP_GET_ADV_SIZE_INFO,
MGMT_OP_START_LIMITED_DISCOVERY,
+ MGMT_OP_READ_EXT_INFO,
+ MGMT_OP_SET_APPEARANCE,
};
static const u16 mgmt_events[] = {
@@ -141,6 +143,7 @@ static const u16 mgmt_events[] = {
MGMT_EV_LOCAL_OOB_DATA_UPDATED,
MGMT_EV_ADVERTISING_ADDED,
MGMT_EV_ADVERTISING_REMOVED,
+ MGMT_EV_EXT_INFO_CHANGED,
};
static const u16 mgmt_untrusted_commands[] = {
@@ -149,6 +152,7 @@ static const u16 mgmt_untrusted_commands[] = {
MGMT_OP_READ_UNCONF_INDEX_LIST,
MGMT_OP_READ_CONFIG_INFO,
MGMT_OP_READ_EXT_INDEX_LIST,
+ MGMT_OP_READ_EXT_INFO,
};
static const u16 mgmt_untrusted_events[] = {
@@ -162,6 +166,7 @@ static const u16 mgmt_untrusted_events[] = {
MGMT_EV_NEW_CONFIG_OPTIONS,
MGMT_EV_EXT_INDEX_ADDED,
MGMT_EV_EXT_INDEX_REMOVED,
+ MGMT_EV_EXT_INFO_CHANGED,
};
#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
@@ -256,13 +261,6 @@ static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
flag, skip_sk);
}
-static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
- u16 len, struct sock *skip_sk)
-{
- return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
- HCI_MGMT_GENERIC_EVENTS, skip_sk);
-}
-
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
struct sock *skip_sk)
{
@@ -278,6 +276,14 @@ static u8 le_addr_type(u8 mgmt_addr_type)
return ADDR_LE_DEV_RANDOM;
}
+void mgmt_fill_version_info(void *ver)
+{
+ struct mgmt_rp_read_version *rp = ver;
+
+ rp->version = MGMT_VERSION;
+ rp->revision = cpu_to_le16(MGMT_REVISION);
+}
+
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
@@ -285,8 +291,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("sock %p", sk);
- rp.version = MGMT_VERSION;
- rp.revision = cpu_to_le16(MGMT_REVISION);
+ mgmt_fill_version_info(&rp);
return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
&rp, sizeof(rp));
@@ -572,8 +577,8 @@ static int new_options(struct hci_dev *hdev, struct sock *skip)
{
__le32 options = get_missing_options(hdev);
- return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
- sizeof(options), skip);
+ return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
+ sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
}
static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
@@ -862,6 +867,107 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
sizeof(rp));
}
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
+ u8 data_len)
+{
+ eir[eir_len++] = sizeof(type) + data_len;
+ eir[eir_len++] = type;
+ memcpy(&eir[eir_len], data, data_len);
+ eir_len += data_len;
+
+ return eir_len;
+}
+
+static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
+{
+ eir[eir_len++] = sizeof(type) + sizeof(data);
+ eir[eir_len++] = type;
+ put_unaligned_le16(data, &eir[eir_len]);
+ eir_len += sizeof(data);
+
+ return eir_len;
+}
+
+static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
+{
+ u16 eir_len = 0;
+ size_t name_len;
+
+ if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
+ hdev->dev_class, 3);
+
+ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
+ hdev->appearance);
+
+ name_len = strlen(hdev->dev_name);
+ eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
+ hdev->dev_name, name_len);
+
+ name_len = strlen(hdev->short_name);
+ eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
+ hdev->short_name, name_len);
+
+ return eir_len;
+}
+
+static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ char buf[512];
+ struct mgmt_rp_read_ext_info *rp = (void *)buf;
+ u16 eir_len;
+
+ BT_DBG("sock %p %s", sk, hdev->name);
+
+ memset(&buf, 0, sizeof(buf));
+
+ hci_dev_lock(hdev);
+
+ bacpy(&rp->bdaddr, &hdev->bdaddr);
+
+ rp->version = hdev->hci_ver;
+ rp->manufacturer = cpu_to_le16(hdev->manufacturer);
+
+ rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
+ rp->current_settings = cpu_to_le32(get_current_settings(hdev));
+
+
+ eir_len = append_eir_data_to_buf(hdev, rp->eir);
+ rp->eir_len = cpu_to_le16(eir_len);
+
+ hci_dev_unlock(hdev);
+
+ /* If this command is called at least once, then the events
+ * for class of device and local name changes are disabled
+ * and only the new extended controller information event
+ * is used.
+ */
+ hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
+ hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
+ hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
+
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
+ sizeof(*rp) + eir_len);
+}
+
+static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
+{
+ char buf[512];
+ struct mgmt_ev_ext_info_changed *ev = (void *)buf;
+ u16 eir_len;
+
+ memset(buf, 0, sizeof(buf));
+
+ eir_len = append_eir_data_to_buf(hdev, ev->eir);
+ ev->eir_len = cpu_to_le16(eir_len);
+
+ return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
+ sizeof(*ev) + eir_len,
+ HCI_MGMT_EXT_INFO_EVENTS, skip);
+}
+
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
{
__le32 settings = cpu_to_le32(get_current_settings(hdev));
@@ -922,7 +1028,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
- hci_req_clear_adv_instance(hdev, NULL, 0x00, false);
+ hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
__hci_req_disable_advertising(&req);
@@ -1000,8 +1106,8 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
__le32 ev = cpu_to_le32(get_current_settings(hdev));
- return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
- sizeof(ev), skip);
+ return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
+ sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
}
int mgmt_new_settings(struct hci_dev *hdev)
@@ -1690,7 +1796,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
enabled = lmp_host_le_capable(hdev);
if (!val)
- hci_req_clear_adv_instance(hdev, NULL, 0x00, true);
+ hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
if (!hdev_is_powered(hdev) || val == enabled) {
bool changed = false;
@@ -2435,6 +2541,8 @@ static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
if (!cmd)
return -ENOMEM;
+ cmd->cmd_complete = addr_cmd_complete;
+
err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
if (err < 0)
@@ -2513,8 +2621,8 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("");
if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
- return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
- MGMT_STATUS_INVALID_PARAMS, NULL, 0);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
@@ -2932,6 +3040,35 @@ static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
}
+static void adv_expire(struct hci_dev *hdev, u32 flags)
+{
+ struct adv_info *adv_instance;
+ struct hci_request req;
+ int err;
+
+ adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
+ if (!adv_instance)
+ return;
+
+ /* stop if current instance doesn't need to be changed */
+ if (!(adv_instance->flags & flags))
+ return;
+
+ cancel_adv_timeout(hdev);
+
+ adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
+ if (!adv_instance)
+ return;
+
+ hci_req_init(&req, hdev);
+ err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
+ true);
+ if (err)
+ return;
+
+ hci_req_run(&req, NULL);
+}
+
static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct mgmt_cp_set_local_name *cp;
@@ -2947,13 +3084,17 @@ static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
cp = cmd->param;
- if (status)
+ if (status) {
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
mgmt_status(status));
- else
+ } else {
mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
cp, sizeof(*cp));
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
+ adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
+ }
+
mgmt_pending_remove(cmd);
unlock:
@@ -2993,8 +3134,9 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
if (err < 0)
goto failed;
- err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
- data, len, sk);
+ err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
+ len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
+ ext_info_changed(hdev, sk);
goto failed;
}
@@ -3017,7 +3159,7 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
/* The name is stored in the scan response data and so
* no need to udpate the advertising data here.
*/
- if (lmp_le_capable(hdev))
+ if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
err = hci_req_run(&req, set_name_complete);
@@ -3029,6 +3171,40 @@ failed:
return err;
}
+static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_appearance *cp = data;
+ u16 apperance;
+ int err;
+
+ BT_DBG("");
+
+ if (!lmp_le_capable(hdev))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ apperance = le16_to_cpu(cp->appearance);
+
+ hci_dev_lock(hdev);
+
+ if (hdev->appearance != apperance) {
+ hdev->appearance = apperance;
+
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
+ adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
+
+ ext_info_changed(hdev, sk);
+ }
+
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
+ 0);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb)
{
@@ -4869,7 +5045,7 @@ static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
int err;
memset(&rp, 0, sizeof(rp));
- memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
+ memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
if (status)
goto complete;
@@ -5501,17 +5677,6 @@ unlock:
return err;
}
-static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
- u8 data_len)
-{
- eir[eir_len++] = sizeof(type) + data_len;
- eir[eir_len++] = type;
- memcpy(&eir[eir_len], data, data_len);
- eir_len += data_len;
-
- return eir_len;
-}
-
static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb)
{
@@ -5815,6 +5980,8 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev)
flags |= MGMT_ADV_FLAG_DISCOV;
flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
+ flags |= MGMT_ADV_FLAG_APPEARANCE;
+ flags |= MGMT_ADV_FLAG_LOCAL_NAME;
if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
flags |= MGMT_ADV_FLAG_TX_POWER;
@@ -5871,28 +6038,59 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
- u8 len, bool is_adv_data)
+static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
{
u8 max_len = HCI_MAX_AD_LENGTH;
- int i, cur_len;
- bool flags_managed = false;
- bool tx_power_managed = false;
if (is_adv_data) {
if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
MGMT_ADV_FLAG_LIMITED_DISCOV |
- MGMT_ADV_FLAG_MANAGED_FLAGS)) {
- flags_managed = true;
+ MGMT_ADV_FLAG_MANAGED_FLAGS))
max_len -= 3;
- }
- if (adv_flags & MGMT_ADV_FLAG_TX_POWER) {
- tx_power_managed = true;
+ if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
max_len -= 3;
- }
+ } else {
+ /* at least 1 byte of name should fit in */
+ if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
+ max_len -= 3;
+
+ if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
+ max_len -= 4;
}
+ return max_len;
+}
+
+static bool flags_managed(u32 adv_flags)
+{
+ return adv_flags & (MGMT_ADV_FLAG_DISCOV |
+ MGMT_ADV_FLAG_LIMITED_DISCOV |
+ MGMT_ADV_FLAG_MANAGED_FLAGS);
+}
+
+static bool tx_power_managed(u32 adv_flags)
+{
+ return adv_flags & MGMT_ADV_FLAG_TX_POWER;
+}
+
+static bool name_managed(u32 adv_flags)
+{
+ return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
+}
+
+static bool appearance_managed(u32 adv_flags)
+{
+ return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
+}
+
+static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data)
+{
+ int i, cur_len;
+ u8 max_len;
+
+ max_len = tlv_data_max_len(adv_flags, is_adv_data);
+
if (len > max_len)
return false;
@@ -5900,10 +6098,21 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
cur_len = data[i];
- if (flags_managed && data[i + 1] == EIR_FLAGS)
+ if (data[i + 1] == EIR_FLAGS &&
+ (!is_adv_data || flags_managed(adv_flags)))
+ return false;
+
+ if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
+ return false;
+
+ if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
return false;
- if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
+ if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
+ return false;
+
+ if (data[i + 1] == EIR_APPEARANCE &&
+ appearance_managed(adv_flags))
return false;
/* If the current field length would exceed the total data
@@ -6027,8 +6236,8 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
- !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
+ if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) ||
+ !tlv_data_is_valid(flags, cp->data + cp->adv_data_len,
cp->scan_rsp_len, false)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -6175,7 +6384,7 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
hci_req_init(&req, hdev);
- hci_req_clear_adv_instance(hdev, &req, cp->instance, true);
+ hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
if (list_empty(&hdev->adv_instances))
__hci_req_disable_advertising(&req);
@@ -6211,23 +6420,6 @@ unlock:
return err;
}
-static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
-{
- u8 max_len = HCI_MAX_AD_LENGTH;
-
- if (is_adv_data) {
- if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
- MGMT_ADV_FLAG_LIMITED_DISCOV |
- MGMT_ADV_FLAG_MANAGED_FLAGS))
- max_len -= 3;
-
- if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
- max_len -= 3;
- }
-
- return max_len;
-}
-
static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
@@ -6356,6 +6548,9 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
{ get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
+ { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
+ HCI_MGMT_UNTRUSTED },
+ { set_appearance, MGMT_SET_APPEARANCE_SIZE },
};
void mgmt_index_added(struct hci_dev *hdev)
@@ -6494,9 +6689,12 @@ void __mgmt_power_off(struct hci_dev *hdev)
mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
- if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
- mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
- zero_cod, sizeof(zero_cod), NULL);
+ if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
+ mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
+ zero_cod, sizeof(zero_cod),
+ HCI_MGMT_DEV_CLASS_EVENTS, NULL);
+ ext_info_changed(hdev, NULL);
+ }
new_settings(hdev, match.sk);
@@ -7092,9 +7290,11 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
- if (!status)
- mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
- dev_class, 3, NULL);
+ if (!status) {
+ mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
+ 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
+ ext_info_changed(hdev, NULL);
+ }
if (match.sk)
sock_put(match.sk);
@@ -7123,8 +7323,9 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
return;
}
- mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
+ HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
+ ext_info_changed(hdev, cmd ? cmd->sk : NULL);
}
static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
index 8c30c7eb8bef..c933bd08c1fe 100644
--- a/net/bluetooth/mgmt_util.c
+++ b/net/bluetooth/mgmt_util.c
@@ -21,12 +21,41 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <asm/unaligned.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_mon.h>
#include <net/bluetooth/mgmt.h>
#include "mgmt_util.h"
+static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie,
+ u16 opcode, u16 len, void *buf)
+{
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ put_unaligned_le32(cookie, skb_put(skb, 4));
+ put_unaligned_le16(opcode, skb_put(skb, 2));
+
+ if (buf)
+ memcpy(skb_put(skb, len), buf, len);
+
+ __net_timestamp(skb);
+
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
+ hdr->index = index;
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ return skb;
+}
+
int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
void *data, u16 data_len, int flag, struct sock *skip_sk)
{
@@ -52,14 +81,18 @@ int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
__net_timestamp(skb);
hci_send_to_channel(channel, skb, flag, skip_sk);
- kfree_skb(skb);
+ if (channel == HCI_CHANNEL_CONTROL)
+ hci_send_monitor_ctrl_event(hdev, event, data, data_len,
+ skb_get_ktime(skb), flag, skip_sk);
+
+ kfree_skb(skb);
return 0;
}
int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{
- struct sk_buff *skb;
+ struct sk_buff *skb, *mskb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_status *ev;
int err;
@@ -80,17 +113,30 @@ int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
ev->status = status;
ev->opcode = cpu_to_le16(cmd);
+ mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk),
+ MGMT_EV_CMD_STATUS, sizeof(*ev), ev);
+ if (mskb)
+ skb->tstamp = mskb->tstamp;
+ else
+ __net_timestamp(skb);
+
err = sock_queue_rcv_skb(sk, skb);
if (err < 0)
kfree_skb(skb);
+ if (mskb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(mskb);
+ }
+
return err;
}
int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
void *rp, size_t rp_len)
{
- struct sk_buff *skb;
+ struct sk_buff *skb, *mskb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_complete *ev;
int err;
@@ -114,10 +160,24 @@ int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
if (rp)
memcpy(ev->data, rp, rp_len);
+ mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk),
+ MGMT_EV_CMD_COMPLETE,
+ sizeof(*ev) + rp_len, ev);
+ if (mskb)
+ skb->tstamp = mskb->tstamp;
+ else
+ __net_timestamp(skb);
+
err = sock_queue_rcv_skb(sk, skb);
if (err < 0)
kfree_skb(skb);
+ if (mskb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(mskb);
+ }
+
return err;
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 4c1a16a96ae5..43faf2aea2ab 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -3387,7 +3387,10 @@ int smp_register(struct hci_dev *hdev)
if (!lmp_sc_capable(hdev)) {
debugfs_create_file("force_bredr_smp", 0644, hdev->debugfs,
hdev, &force_bredr_smp_fops);
- return 0;
+
+ /* Flag can be already set here (due to power toggle) */
+ if (!hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
+ return 0;
}
if (WARN_ON(hdev->smp_bredr_data)) {
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index a1cda5d4718d..0aefc011b668 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -20,4 +20,6 @@ bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o
+bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o
+
obj-$(CONFIG_NETFILTER) += netfilter/
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 3addc05b9a16..889e5640455f 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -227,9 +227,11 @@ static int __init br_init(void)
br_fdb_test_addr_hook = br_fdb_test_addr;
#endif
- pr_info("bridge: automatic filtering via arp/ip/ip6tables has been "
- "deprecated. Update your scripts to load br_netfilter if you "
+#if IS_MODULE(CONFIG_BRIDGE_NETFILTER)
+ pr_info("bridge: filtering via arp/ip/ip6tables is no longer available "
+ "by default. Update your scripts to load br_netfilter if you "
"need this.\n");
+#endif
return 0;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 09f26940aba5..89a687f3c0a3 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -62,10 +62,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
if (is_broadcast_ether_addr(dest)) {
- br_flood(br, skb, false, false, true);
+ br_flood(br, skb, BR_PKT_BROADCAST, false, true);
} else if (is_multicast_ether_addr(dest)) {
if (unlikely(netpoll_tx_running(dev))) {
- br_flood(br, skb, false, false, true);
+ br_flood(br, skb, BR_PKT_MULTICAST, false, true);
goto out;
}
if (br_multicast_rcv(br, NULL, skb, vid)) {
@@ -78,11 +78,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
br_multicast_querier_exists(br, eth_hdr(skb)))
br_multicast_flood(mdst, skb, false, true);
else
- br_flood(br, skb, false, false, true);
+ br_flood(br, skb, BR_PKT_MULTICAST, false, true);
} else if ((dst = __br_fdb_get(br, dest, vid)) != NULL) {
br_forward(dst->dst, skb, false, true);
} else {
- br_flood(br, skb, true, false, true);
+ br_flood(br, skb, BR_PKT_UNICAST, false, true);
}
out:
rcu_read_unlock();
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index cd620fab41b0..6b43c8c88f19 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -710,24 +710,27 @@ int br_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
- int idx)
+ int *idx)
{
struct net_bridge *br = netdev_priv(dev);
+ int err = 0;
int i;
if (!(dev->priv_flags & IFF_EBRIDGE))
goto out;
- if (!filter_dev)
- idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
+ if (!filter_dev) {
+ err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
+ if (err < 0)
+ goto out;
+ }
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
- int err;
- if (idx < cb->args[0])
+ if (*idx < cb->args[2])
goto skip;
if (filter_dev &&
@@ -750,17 +753,15 @@ int br_fdb_dump(struct sk_buff *skb,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI);
- if (err < 0) {
- cb->args[1] = err;
- break;
- }
+ if (err < 0)
+ goto out;
skip:
- ++idx;
+ *idx += 1;
}
}
out:
- return idx;
+ return err;
}
/* Update (create or replace) forwarding database entry */
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 63a83d8d7da3..7cb41aee4c82 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -29,7 +29,8 @@ static inline int should_deliver(const struct net_bridge_port *p,
vg = nbp_vlan_group_rcu(p);
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
- br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING;
+ br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING &&
+ nbp_switchdev_allowed_egress(p, skb);
}
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -175,7 +176,7 @@ out:
/* called under rcu_read_lock */
void br_flood(struct net_bridge *br, struct sk_buff *skb,
- bool unicast, bool local_rcv, bool local_orig)
+ enum br_pkt_type pkt_type, bool local_rcv, bool local_orig)
{
u8 igmp_type = br_multicast_igmp_type(skb);
struct net_bridge_port *prev = NULL;
@@ -183,7 +184,10 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb,
list_for_each_entry_rcu(p, &br->port_list, list) {
/* Do not flood unicast traffic to ports that turn it off */
- if (unicast && !(p->flags & BR_FLOOD))
+ if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD))
+ continue;
+ if (pkt_type == BR_PKT_MULTICAST &&
+ !(p->flags & BR_MCAST_FLOOD))
continue;
/* Do not flood to ports that enable proxy ARP */
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f2fede05d32c..ed0dd3340084 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -362,7 +362,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
p->path_cost = port_cost(dev);
p->priority = 0x8000 >> BR_PORT_BITS;
p->port_no = index;
- p->flags = BR_LEARNING | BR_FLOOD;
+ p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD;
br_init_port(p);
br_set_state(p, BR_STATE_DISABLED);
br_stp_port_timer_init(p);
@@ -545,6 +545,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (err)
goto err5;
+ err = nbp_switchdev_mark_set(p);
+ if (err)
+ goto err6;
+
dev_disable_lro(dev);
list_add_rcu(&p->list, &br->port_list);
@@ -566,7 +570,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
err = nbp_vlan_init(p);
if (err) {
netdev_err(dev, "failed to initialize vlan filtering on this port\n");
- goto err6;
+ goto err7;
}
spin_lock_bh(&br->lock);
@@ -589,12 +593,12 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
return 0;
-err6:
+err7:
list_del_rcu(&p->list);
br_fdb_delete_by_port(br, p, 0, 1);
nbp_update_port_count(br);
+err6:
netdev_upper_dev_unlink(dev, br->dev);
-
err5:
dev->priv_flags &= ~IFF_BRIDGE_PORT;
netdev_rx_handler_unregister(dev);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index abe11f085479..855b72fbe1da 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -128,11 +128,12 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
/* note: already called with rcu_read_lock */
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- bool local_rcv = false, mcast_hit = false, unicast = true;
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
const unsigned char *dest = eth_hdr(skb)->h_dest;
+ enum br_pkt_type pkt_type = BR_PKT_UNICAST;
struct net_bridge_fdb_entry *dst = NULL;
struct net_bridge_mdb_entry *mdst;
+ bool local_rcv, mcast_hit = false;
struct net_bridge *br;
u16 vid = 0;
@@ -142,29 +143,36 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid))
goto out;
+ nbp_switchdev_frame_mark(p, skb);
+
/* insert into forwarding database after filtering to avoid spoofing */
br = p->br;
if (p->flags & BR_LEARNING)
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
- if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
- br_multicast_rcv(br, p, skb, vid))
- goto drop;
+ local_rcv = !!(br->dev->flags & IFF_PROMISC);
+ if (is_multicast_ether_addr(dest)) {
+ /* by definition the broadcast is also a multicast address */
+ if (is_broadcast_ether_addr(dest)) {
+ pkt_type = BR_PKT_BROADCAST;
+ local_rcv = true;
+ } else {
+ pkt_type = BR_PKT_MULTICAST;
+ if (br_multicast_rcv(br, p, skb, vid))
+ goto drop;
+ }
+ }
if (p->state == BR_STATE_LEARNING)
goto drop;
BR_INPUT_SKB_CB(skb)->brdev = br->dev;
- local_rcv = !!(br->dev->flags & IFF_PROMISC);
-
if (IS_ENABLED(CONFIG_INET) && skb->protocol == htons(ETH_P_ARP))
br_do_proxy_arp(skb, br, vid, p);
- if (is_broadcast_ether_addr(dest)) {
- local_rcv = true;
- unicast = false;
- } else if (is_multicast_ether_addr(dest)) {
+ switch (pkt_type) {
+ case BR_PKT_MULTICAST:
mdst = br_mdb_get(br, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(br, eth_hdr(skb))) {
@@ -178,18 +186,22 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
local_rcv = true;
br->dev->stats.multicast++;
}
- unicast = false;
- } else if ((dst = __br_fdb_get(br, dest, vid)) && dst->is_local) {
- /* Do not forward the packet since it's local. */
- return br_pass_frame_up(skb);
+ break;
+ case BR_PKT_UNICAST:
+ dst = __br_fdb_get(br, dest, vid);
+ default:
+ break;
}
if (dst) {
+ if (dst->is_local)
+ return br_pass_frame_up(skb);
+
dst->used = jiffies;
br_forward(dst->dst, skb, local_rcv, false);
} else {
if (!mcast_hit)
- br_flood(br, skb, unicast, local_rcv, false);
+ br_flood(br, skb, pkt_type, local_rcv, false);
else
br_multicast_flood(mdst, skb, local_rcv, false);
}
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 77e7f69bf80d..2fe9345c1407 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -30,6 +30,7 @@
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_arp.h>
#include <linux/in_route.h>
+#include <linux/rculist.h>
#include <linux/inetdevice.h>
#include <net/ip.h>
@@ -395,11 +396,10 @@ bridged_dnat:
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(NFPROTO_BRIDGE,
- NF_BR_PRE_ROUTING,
- net, sk, skb, skb->dev, NULL,
- br_nf_pre_routing_finish_bridge,
- 1);
+ br_nf_hook_thresh(NF_BR_PRE_ROUTING,
+ net, sk, skb, skb->dev,
+ NULL,
+ br_nf_pre_routing_finish);
return 0;
}
ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
@@ -417,10 +417,8 @@ bridged_dnat:
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, net, sk, skb,
- skb->dev, NULL,
- br_handle_frame_finish, 1);
-
+ br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
+ br_handle_frame_finish);
return 0;
}
@@ -992,6 +990,43 @@ static struct notifier_block brnf_notifier __read_mostly = {
.notifier_call = brnf_device_event,
};
+/* recursively invokes nf_hook_slow (again), skipping already-called
+ * hooks (< NF_BR_PRI_BRNF).
+ *
+ * Called with rcu read lock held.
+ */
+int br_nf_hook_thresh(unsigned int hook, struct net *net,
+ struct sock *sk, struct sk_buff *skb,
+ struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct net *, struct sock *,
+ struct sk_buff *))
+{
+ struct nf_hook_entry *elem;
+ struct nf_hook_state state;
+ int ret;
+
+ elem = rcu_dereference(net->nf.hooks[NFPROTO_BRIDGE][hook]);
+
+ while (elem && (elem->ops.priority <= NF_BR_PRI_BRNF))
+ elem = rcu_dereference(elem->next);
+
+ if (!elem)
+ return okfn(net, sk, skb);
+
+ /* We may already have this, but read-locks nest anyway */
+ rcu_read_lock();
+ nf_hook_state_init(&state, elem, hook, NF_BR_PRI_BRNF + 1,
+ NFPROTO_BRIDGE, indev, outdev, sk, net, okfn);
+
+ ret = nf_hook_slow(skb, &state);
+ rcu_read_unlock();
+ if (ret == 1)
+ ret = okfn(net, sk, skb);
+
+ return ret;
+}
+
#ifdef CONFIG_SYSCTL
static
int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 5e59a8457e7b..5989661c659f 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -187,10 +187,9 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
- net, sk, skb, skb->dev, NULL,
- br_nf_pre_routing_finish_bridge,
- 1);
+ br_nf_hook_thresh(NF_BR_PRE_ROUTING,
+ net, sk, skb, skb->dev, NULL,
+ br_nf_pre_routing_finish_bridge);
return 0;
}
ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
@@ -207,9 +206,8 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
- NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, net, sk, skb,
- skb->dev, NULL,
- br_handle_frame_finish, 1);
+ br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb,
+ skb->dev, NULL, br_handle_frame_finish);
return 0;
}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f2a29e467e78..e99037c6f7b7 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -169,10 +169,15 @@ static int br_port_fill_attrs(struct sk_buff *skb,
nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
- nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) ||
- nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
+ nla_put_u8(skb, IFLA_BRPORT_PROTECT,
+ !!(p->flags & BR_ROOT_BLOCK)) ||
+ nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
+ !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
- nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) ||
+ nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
+ !!(p->flags & BR_FLOOD)) ||
+ nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
+ !!(p->flags & BR_MCAST_FLOOD)) ||
nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
!!(p->flags & BR_PROXYARP_WIFI)) ||
@@ -630,6 +635,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
+ br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
@@ -1245,14 +1251,30 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
return 0;
}
-static size_t bridge_get_linkxstats_size(const struct net_device *dev)
+static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
{
- struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_port *p = NULL;
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
+ struct net_bridge *br;
int numvls = 0;
- vg = br_vlan_group(br);
+ switch (attr) {
+ case IFLA_STATS_LINK_XSTATS:
+ br = netdev_priv(dev);
+ vg = br_vlan_group(br);
+ break;
+ case IFLA_STATS_LINK_XSTATS_SLAVE:
+ p = br_port_get_rtnl(dev);
+ if (!p)
+ return 0;
+ br = p->br;
+ vg = nbp_vlan_group(p);
+ break;
+ default:
+ return 0;
+ }
+
if (vg) {
/* we need to count all, even placeholder entries */
list_for_each_entry(v, &vg->vlan_list, vlist)
@@ -1264,45 +1286,42 @@ static size_t bridge_get_linkxstats_size(const struct net_device *dev)
nla_total_size(0);
}
-static size_t brport_get_linkxstats_size(const struct net_device *dev)
-{
- return nla_total_size(sizeof(struct br_mcast_stats)) +
- nla_total_size(0);
-}
-
-static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
+static int br_fill_linkxstats(struct sk_buff *skb,
+ const struct net_device *dev,
+ int *prividx, int attr)
{
- size_t retsize = 0;
+ struct nlattr *nla __maybe_unused;
+ struct net_bridge_port *p = NULL;
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_vlan *v;
+ struct net_bridge *br;
+ struct nlattr *nest;
+ int vl_idx = 0;
switch (attr) {
case IFLA_STATS_LINK_XSTATS:
- retsize = bridge_get_linkxstats_size(dev);
+ br = netdev_priv(dev);
+ vg = br_vlan_group(br);
break;
case IFLA_STATS_LINK_XSTATS_SLAVE:
- retsize = brport_get_linkxstats_size(dev);
+ p = br_port_get_rtnl(dev);
+ if (!p)
+ return 0;
+ br = p->br;
+ vg = nbp_vlan_group(p);
break;
+ default:
+ return -EINVAL;
}
- return retsize;
-}
-
-static int bridge_fill_linkxstats(struct sk_buff *skb,
- const struct net_device *dev,
- int *prividx)
-{
- struct net_bridge *br = netdev_priv(dev);
- struct nlattr *nla __maybe_unused;
- struct net_bridge_vlan_group *vg;
- struct net_bridge_vlan *v;
- struct nlattr *nest;
- int vl_idx = 0;
-
nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
if (!nest)
return -EMSGSIZE;
- vg = br_vlan_group(br);
if (vg) {
+ u16 pvid;
+
+ pvid = br_get_pvid(vg);
list_for_each_entry(v, &vg->vlan_list, vlist) {
struct bridge_vlan_xstats vxi;
struct br_vlan_stats stats;
@@ -1311,6 +1330,9 @@ static int bridge_fill_linkxstats(struct sk_buff *skb,
continue;
memset(&vxi, 0, sizeof(vxi));
vxi.vid = v->vid;
+ vxi.flags = v->flags;
+ if (v->vid == pvid)
+ vxi.flags |= BRIDGE_VLAN_INFO_PVID;
br_vlan_get_stats(v, &stats);
vxi.rx_bytes = stats.rx_bytes;
vxi.rx_packets = stats.rx_packets;
@@ -1329,7 +1351,7 @@ static int bridge_fill_linkxstats(struct sk_buff *skb,
BRIDGE_XSTATS_PAD);
if (!nla)
goto nla_put_failure;
- br_multicast_get_stats(br, NULL, nla_data(nla));
+ br_multicast_get_stats(br, p, nla_data(nla));
}
#endif
nla_nest_end(skb, nest);
@@ -1344,52 +1366,6 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int brport_fill_linkxstats(struct sk_buff *skb,
- const struct net_device *dev,
- int *prividx)
-{
- struct net_bridge_port *p = br_port_get_rtnl(dev);
- struct nlattr *nla __maybe_unused;
- struct nlattr *nest;
-
- if (!p)
- return 0;
-
- nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
- if (!nest)
- return -EMSGSIZE;
-#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
- nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
- sizeof(struct br_mcast_stats),
- BRIDGE_XSTATS_PAD);
- if (!nla) {
- nla_nest_end(skb, nest);
- return -EMSGSIZE;
- }
- br_multicast_get_stats(p->br, p, nla_data(nla));
-#endif
- nla_nest_end(skb, nest);
-
- return 0;
-}
-
-static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev,
- int *prividx, int attr)
-{
- int ret = -EINVAL;
-
- switch (attr) {
- case IFLA_STATS_LINK_XSTATS:
- ret = bridge_fill_linkxstats(skb, dev, prividx);
- break;
- case IFLA_STATS_LINK_XSTATS_SLAVE:
- ret = brport_fill_linkxstats(skb, dev, prividx);
- break;
- }
-
- return ret;
-}
-
static struct rtnl_af_ops br_af_ops __read_mostly = {
.family = AF_BRIDGE,
.get_link_af_size = br_get_link_af_size_filtered,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index aac2a6e6b008..1b63177e0ccd 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -251,6 +251,9 @@ struct net_bridge_port
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
struct net_bridge_vlan_group __rcu *vlgrp;
#endif
+#ifdef CONFIG_NET_SWITCHDEV
+ int offload_fwd_mark;
+#endif
};
#define br_auto_port(p) ((p)->flags & BR_AUTO_MASK)
@@ -359,6 +362,11 @@ struct net_bridge
struct timer_list gc_timer;
struct kobject *ifobj;
u32 auto_cnt;
+
+#ifdef CONFIG_NET_SWITCHDEV
+ int offload_fwd_mark;
+#endif
+
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
struct net_bridge_vlan_group __rcu *vlgrp;
u8 vlan_enabled;
@@ -381,6 +389,10 @@ struct br_input_skb_cb {
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
bool vlan_filtered;
#endif
+
+#ifdef CONFIG_NET_SWITCHDEV
+ int offload_fwd_mark;
+#endif
};
#define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)
@@ -496,7 +508,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
const unsigned char *addr, u16 vid, u16 nlh_flags);
int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct net_device *dev, struct net_device *fdev, int idx);
+ struct net_device *dev, struct net_device *fdev, int *idx);
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
@@ -505,12 +517,17 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid);
/* br_forward.c */
+enum br_pkt_type {
+ BR_PKT_UNICAST,
+ BR_PKT_MULTICAST,
+ BR_PKT_BROADCAST
+};
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb);
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb,
bool local_rcv, bool local_orig);
int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
void br_flood(struct net_bridge *br, struct sk_buff *skb,
- bool unicast, bool local_rcv, bool local_orig);
+ enum br_pkt_type pkt_type, bool local_rcv, bool local_orig);
/* br_if.c */
void br_port_carrier_check(struct net_bridge_port *p);
@@ -1034,4 +1051,29 @@ static inline int br_sysfs_addbr(struct net_device *dev) { return 0; }
static inline void br_sysfs_delbr(struct net_device *dev) { return; }
#endif /* CONFIG_SYSFS */
+/* br_switchdev.c */
+#ifdef CONFIG_NET_SWITCHDEV
+int nbp_switchdev_mark_set(struct net_bridge_port *p);
+void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
+ struct sk_buff *skb);
+bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
+ const struct sk_buff *skb);
+#else
+static inline int nbp_switchdev_mark_set(struct net_bridge_port *p)
+{
+ return 0;
+}
+
+static inline void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
+ struct sk_buff *skb)
+{
+}
+
+static inline bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
+ const struct sk_buff *skb)
+{
+ return true;
+}
+#endif /* CONFIG_NET_SWITCHDEV */
+
#endif
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 341caa0ca63a..d8ad73b38de2 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -134,17 +134,36 @@ void br_stp_disable_port(struct net_bridge_port *p)
br_become_root_bridge(br);
}
-static void br_stp_start(struct net_bridge *br)
+static int br_stp_call_user(struct net_bridge *br, char *arg)
{
- int r;
- char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
+ char *argv[] = { BR_STP_PROG, br->dev->name, arg, NULL };
char *envp[] = { NULL };
+ int rc;
+
+ /* call userspace STP and report program errors */
+ rc = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
+ if (rc > 0) {
+ if (rc & 0xff)
+ br_debug(br, BR_STP_PROG " received signal %d\n",
+ rc & 0x7f);
+ else
+ br_debug(br, BR_STP_PROG " exited with code %d\n",
+ (rc >> 8) & 0xff);
+ }
+
+ return rc;
+}
+
+static void br_stp_start(struct net_bridge *br)
+{
struct net_bridge_port *p;
+ int err = -ENOENT;
if (net_eq(dev_net(br->dev), &init_net))
- r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
- else
- r = -ENOENT;
+ err = br_stp_call_user(br, "start");
+
+ if (err && err != -ENOENT)
+ br_err(br, "failed to start userspace STP (%d)\n", err);
spin_lock_bh(&br->lock);
@@ -153,9 +172,10 @@ static void br_stp_start(struct net_bridge *br)
else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
__br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
- if (r == 0) {
+ if (!err) {
br->stp_enabled = BR_USER_STP;
br_debug(br, "userspace STP started\n");
+
/* Stop hello and hold timers */
del_timer(&br->hello_timer);
list_for_each_entry(p, &br->port_list, list)
@@ -173,14 +193,13 @@ static void br_stp_start(struct net_bridge *br)
static void br_stp_stop(struct net_bridge *br)
{
- int r;
- char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
- char *envp[] = { NULL };
struct net_bridge_port *p;
+ int err;
if (br->stp_enabled == BR_USER_STP) {
- r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
- br_info(br, "userspace STP stopped, return code %d\n", r);
+ err = br_stp_call_user(br, "stop");
+ if (err)
+ br_err(br, "failed to stop userspace STP (%d)\n", err);
/* To start timers on any ports left in blocking */
mod_timer(&br->hello_timer, jiffies + br->hello_time);
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
new file mode 100644
index 000000000000..f4097b900de1
--- /dev/null
+++ b/net/bridge/br_switchdev.c
@@ -0,0 +1,57 @@
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+#include <net/switchdev.h>
+
+#include "br_private.h"
+
+static int br_switchdev_mark_get(struct net_bridge *br, struct net_device *dev)
+{
+ struct net_bridge_port *p;
+
+ /* dev is yet to be added to the port list. */
+ list_for_each_entry(p, &br->port_list, list) {
+ if (switchdev_port_same_parent_id(dev, p->dev))
+ return p->offload_fwd_mark;
+ }
+
+ return ++br->offload_fwd_mark;
+}
+
+int nbp_switchdev_mark_set(struct net_bridge_port *p)
+{
+ struct switchdev_attr attr = {
+ .orig_dev = p->dev,
+ .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
+ };
+ int err;
+
+ ASSERT_RTNL();
+
+ err = switchdev_port_attr_get(p->dev, &attr);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ p->offload_fwd_mark = br_switchdev_mark_get(p->br, p->dev);
+
+ return 0;
+}
+
+void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
+ struct sk_buff *skb)
+{
+ if (skb->offload_fwd_mark && !WARN_ON_ONCE(!p->offload_fwd_mark))
+ BR_INPUT_SKB_CB(skb)->offload_fwd_mark = p->offload_fwd_mark;
+}
+
+bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
+ const struct sk_buff *skb)
+{
+ return !skb->offload_fwd_mark ||
+ BR_INPUT_SKB_CB(skb)->offload_fwd_mark != p->offload_fwd_mark;
+}
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 1e04d4d44273..e657258e1f2c 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -171,6 +171,7 @@ BRPORT_ATTR_FLAG(learning, BR_LEARNING);
BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP);
BRPORT_ATTR_FLAG(proxyarp_wifi, BR_PROXYARP_WIFI);
+BRPORT_ATTR_FLAG(multicast_flood, BR_MCAST_FLOOD);
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 152300d164ac..9a11086ba6ff 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -91,7 +91,7 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
if (loginfo->type == NF_LOG_TYPE_LOG)
bitmask = loginfo->u.log.logflags;
else
- bitmask = NF_LOG_MASK;
+ bitmask = NF_LOG_DEFAULT_MASK;
if ((bitmask & EBT_LOG_IP) && eth_hdr(skb)->h_proto ==
htons(ETH_P_IP)) {
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index 203964997a51..2e7c4f974340 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -24,7 +24,7 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
return EBT_DROP;
if (par->hooknum != NF_BR_BROUTING)
- /* rcu_read_lock()ed by nf_hook_slow */
+ /* rcu_read_lock()ed by nf_hook_thresh */
ether_addr_copy(eth_hdr(skb)->h_dest,
br_port_get_rcu(par->in)->br->dev->dev_addr);
else
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 0833c251aef7..f5c11bbe27db 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -146,7 +146,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
return 1;
if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out)))
return 1;
- /* rcu_read_lock()ed by nf_hook_slow */
+ /* rcu_read_lock()ed by nf_hook_thresh */
if (in && (p = br_port_get_rcu(in)) != NULL &&
NF_INVF(e, EBT_ILOGICALIN,
ebt_dev_check(e->logical_in, p->br->dev)))
diff --git a/net/bridge/netfilter/nf_log_bridge.c b/net/bridge/netfilter/nf_log_bridge.c
index 5d9953a90929..1663df598545 100644
--- a/net/bridge/netfilter/nf_log_bridge.c
+++ b/net/bridge/netfilter/nf_log_bridge.c
@@ -50,8 +50,7 @@ static struct nf_logger nf_bridge_logger __read_mostly = {
static int __net_init nf_log_bridge_net_init(struct net *net)
{
- nf_log_set(net, NFPROTO_BRIDGE, &nf_bridge_logger);
- return 0;
+ return nf_log_set(net, NFPROTO_BRIDGE, &nf_bridge_logger);
}
static void __net_exit nf_log_bridge_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index a78c4e2826e5..97afdc0744e6 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -13,79 +13,11 @@
#include <linux/module.h>
#include <linux/netfilter_bridge.h>
#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables_bridge.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/netfilter/nf_tables_ipv4.h>
#include <net/netfilter/nf_tables_ipv6.h>
-int nft_bridge_iphdr_validate(struct sk_buff *skb)
-{
- struct iphdr *iph;
- u32 len;
-
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- return 0;
-
- iph = ip_hdr(skb);
- if (iph->ihl < 5 || iph->version != 4)
- return 0;
-
- len = ntohs(iph->tot_len);
- if (skb->len < len)
- return 0;
- else if (len < (iph->ihl*4))
- return 0;
-
- if (!pskb_may_pull(skb, iph->ihl*4))
- return 0;
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(nft_bridge_iphdr_validate);
-
-int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
-{
- struct ipv6hdr *hdr;
- u32 pkt_len;
-
- if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
- return 0;
-
- hdr = ipv6_hdr(skb);
- if (hdr->version != 6)
- return 0;
-
- pkt_len = ntohs(hdr->payload_len);
- if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
- return 0;
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(nft_bridge_ip6hdr_validate);
-
-static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- if (nft_bridge_iphdr_validate(skb))
- nft_set_pktinfo_ipv4(pkt, skb, state);
- else
- nft_set_pktinfo(pkt, skb, state);
-}
-
-static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
-#if IS_ENABLED(CONFIG_IPV6)
- if (nft_bridge_ip6hdr_validate(skb) &&
- nft_set_pktinfo_ipv6(pkt, skb, state) == 0)
- return;
-#endif
- nft_set_pktinfo(pkt, skb, state);
-}
-
static unsigned int
nft_do_chain_bridge(void *priv,
struct sk_buff *skb,
@@ -95,13 +27,13 @@ nft_do_chain_bridge(void *priv,
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
- nft_bridge_set_pktinfo_ipv4(&pkt, skb, state);
+ nft_set_pktinfo_ipv4_validate(&pkt, skb, state);
break;
case htons(ETH_P_IPV6):
- nft_bridge_set_pktinfo_ipv6(&pkt, skb, state);
+ nft_set_pktinfo_ipv6_validate(&pkt, skb, state);
break;
default:
- nft_set_pktinfo(&pkt, skb, state);
+ nft_set_pktinfo_unspec(&pkt, skb, state);
break;
}
@@ -207,12 +139,20 @@ static int __init nf_tables_bridge_init(void)
int ret;
nf_register_afinfo(&nf_br_afinfo);
- nft_register_chain_type(&filter_bridge);
+ ret = nft_register_chain_type(&filter_bridge);
+ if (ret < 0)
+ goto err1;
+
ret = register_pernet_subsys(&nf_tables_bridge_net_ops);
- if (ret < 0) {
- nft_unregister_chain_type(&filter_bridge);
- nf_unregister_afinfo(&nf_br_afinfo);
- }
+ if (ret < 0)
+ goto err2;
+
+ return ret;
+
+err2:
+ nft_unregister_chain_type(&filter_bridge);
+err1:
+ nf_unregister_afinfo(&nf_br_afinfo);
return ret;
}
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 0b77ffbc27d6..4b3df6b0e3b9 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -14,7 +14,6 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
-#include <net/netfilter/nf_tables_bridge.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/ipv6/nf_reject.h>
#include <linux/ip.h>
@@ -37,6 +36,30 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
skb_pull(nskb, ETH_HLEN);
}
+static int nft_bridge_iphdr_validate(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ u32 len;
+
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return 0;
+
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ return 0;
+
+ len = ntohs(iph->tot_len);
+ if (skb->len < len)
+ return 0;
+ else if (len < (iph->ihl*4))
+ return 0;
+
+ if (!pskb_may_pull(skb, iph->ihl*4))
+ return 0;
+
+ return 1;
+}
+
/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
* or the bridge port (NF_BRIDGE PREROUTING).
*/
@@ -143,6 +166,25 @@ static void nft_reject_br_send_v4_unreach(struct net *net,
br_forward(br_port_get_rcu(dev), nskb, false, true);
}
+static int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
+{
+ struct ipv6hdr *hdr;
+ u32 pkt_len;
+
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return 0;
+
+ hdr = ipv6_hdr(skb);
+ if (hdr->version != 6)
+ return 0;
+
+ pkt_len = ntohs(hdr->payload_len);
+ if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
+ return 0;
+
+ return 1;
+}
+
static void nft_reject_br_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
diff --git a/net/core/dev.c b/net/core/dev.c
index ea6312057a71..f1fe26f66458 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3355,16 +3355,6 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
else
skb_dst_force(skb);
-#ifdef CONFIG_NET_SWITCHDEV
- /* Don't forward if offload device already forwarded */
- if (skb->offload_fwd_mark &&
- skb->offload_fwd_mark == dev->offload_fwd_mark) {
- consume_skb(skb);
- rc = NET_XMIT_SUCCESS;
- goto out;
- }
-#endif
-
txq = netdev_pick_tx(dev, skb, accel_priv);
q = rcu_dereference_bh(txq->qdisc);
@@ -3914,8 +3904,7 @@ static void net_tx_action(struct softirq_action *h)
}
}
-#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
- (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
+#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
/* This hook is defined here for ATM LANE */
int (*br_fdb_test_addr_hook)(struct net_device *dev,
unsigned char *addr) __read_mostly;
@@ -4066,12 +4055,17 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
{
#ifdef CONFIG_NETFILTER_INGRESS
if (nf_hook_ingress_active(skb)) {
+ int ingress_retval;
+
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
- return nf_hook_ingress(skb);
+ rcu_read_lock();
+ ingress_retval = nf_hook_ingress(skb);
+ rcu_read_unlock();
+ return ingress_retval;
}
#endif /* CONFIG_NETFILTER_INGRESS */
return 0;
@@ -4308,32 +4302,53 @@ int netif_receive_skb(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_receive_skb);
-/* Network device is going away, flush any packets still pending
- * Called with irqs disabled.
- */
-static void flush_backlog(void *arg)
+DEFINE_PER_CPU(struct work_struct, flush_works);
+
+/* Network device is going away, flush any packets still pending */
+static void flush_backlog(struct work_struct *work)
{
- struct net_device *dev = arg;
- struct softnet_data *sd = this_cpu_ptr(&softnet_data);
struct sk_buff *skb, *tmp;
+ struct softnet_data *sd;
+
+ local_bh_disable();
+ sd = this_cpu_ptr(&softnet_data);
+ local_irq_disable();
rps_lock(sd);
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
- if (skb->dev == dev) {
+ if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
kfree_skb(skb);
input_queue_head_incr(sd);
}
}
rps_unlock(sd);
+ local_irq_enable();
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
- if (skb->dev == dev) {
+ if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
kfree_skb(skb);
input_queue_head_incr(sd);
}
}
+ local_bh_enable();
+}
+
+static void flush_all_backlogs(void)
+{
+ unsigned int cpu;
+
+ get_online_cpus();
+
+ for_each_online_cpu(cpu)
+ queue_work_on(cpu, system_highpri_wq,
+ per_cpu_ptr(&flush_works, cpu));
+
+ for_each_online_cpu(cpu)
+ flush_work(per_cpu_ptr(&flush_works, cpu));
+
+ put_online_cpus();
}
static int napi_gro_complete(struct sk_buff *skb)
@@ -4821,8 +4836,9 @@ static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
static int process_backlog(struct napi_struct *napi, int quota)
{
- int work = 0;
struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
+ bool again = true;
+ int work = 0;
/* Check if we have pending ipi, its better to send them now,
* not waiting net_rx_action() end.
@@ -4833,23 +4849,20 @@ static int process_backlog(struct napi_struct *napi, int quota)
}
napi->weight = weight_p;
- local_irq_disable();
- while (1) {
+ while (again) {
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sd->process_queue))) {
rcu_read_lock();
- local_irq_enable();
__netif_receive_skb(skb);
rcu_read_unlock();
- local_irq_disable();
input_queue_head_incr(sd);
- if (++work >= quota) {
- local_irq_enable();
+ if (++work >= quota)
return work;
- }
+
}
+ local_irq_disable();
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
@@ -4861,16 +4874,14 @@ static int process_backlog(struct napi_struct *napi, int quota)
* and we dont need an smp_mb() memory barrier.
*/
napi->state = 0;
- rps_unlock(sd);
-
- break;
+ again = false;
+ } else {
+ skb_queue_splice_tail_init(&sd->input_pkt_queue,
+ &sd->process_queue);
}
-
- skb_queue_splice_tail_init(&sd->input_pkt_queue,
- &sd->process_queue);
rps_unlock(sd);
+ local_irq_enable();
}
- local_irq_enable();
return work;
}
@@ -5578,6 +5589,7 @@ static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
static int __netdev_adjacent_dev_insert(struct net_device *dev,
struct net_device *adj_dev,
+ u16 ref_nr,
struct list_head *dev_list,
void *private, bool master)
{
@@ -5587,7 +5599,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
adj = __netdev_find_adj(adj_dev, dev_list);
if (adj) {
- adj->ref_nr++;
+ adj->ref_nr += ref_nr;
return 0;
}
@@ -5597,7 +5609,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
adj->dev = adj_dev;
adj->master = master;
- adj->ref_nr = 1;
+ adj->ref_nr = ref_nr;
adj->private = private;
dev_hold(adj_dev);
@@ -5636,6 +5648,7 @@ free_adj:
static void __netdev_adjacent_dev_remove(struct net_device *dev,
struct net_device *adj_dev,
+ u16 ref_nr,
struct list_head *dev_list)
{
struct netdev_adjacent *adj;
@@ -5648,10 +5661,10 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
BUG();
}
- if (adj->ref_nr > 1) {
- pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
- adj->ref_nr-1);
- adj->ref_nr--;
+ if (adj->ref_nr > ref_nr) {
+ pr_debug("%s to %s ref_nr-%d = %d\n", dev->name, adj_dev->name,
+ ref_nr, adj->ref_nr-ref_nr);
+ adj->ref_nr -= ref_nr;
return;
}
@@ -5670,21 +5683,22 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
struct net_device *upper_dev,
+ u16 ref_nr,
struct list_head *up_list,
struct list_head *down_list,
void *private, bool master)
{
int ret;
- ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
- master);
+ ret = __netdev_adjacent_dev_insert(dev, upper_dev, ref_nr, up_list,
+ private, master);
if (ret)
return ret;
- ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
- false);
+ ret = __netdev_adjacent_dev_insert(upper_dev, dev, ref_nr, down_list,
+ private, false);
if (ret) {
- __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
+ __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
return ret;
}
@@ -5692,9 +5706,10 @@ static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
}
static int __netdev_adjacent_dev_link(struct net_device *dev,
- struct net_device *upper_dev)
+ struct net_device *upper_dev,
+ u16 ref_nr)
{
- return __netdev_adjacent_dev_link_lists(dev, upper_dev,
+ return __netdev_adjacent_dev_link_lists(dev, upper_dev, ref_nr,
&dev->all_adj_list.upper,
&upper_dev->all_adj_list.lower,
NULL, false);
@@ -5702,17 +5717,19 @@ static int __netdev_adjacent_dev_link(struct net_device *dev,
static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
struct net_device *upper_dev,
+ u16 ref_nr,
struct list_head *up_list,
struct list_head *down_list)
{
- __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
- __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
+ __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
+ __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
}
static void __netdev_adjacent_dev_unlink(struct net_device *dev,
- struct net_device *upper_dev)
+ struct net_device *upper_dev,
+ u16 ref_nr)
{
- __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+ __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ref_nr,
&dev->all_adj_list.upper,
&upper_dev->all_adj_list.lower);
}
@@ -5721,17 +5738,17 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
struct net_device *upper_dev,
void *private, bool master)
{
- int ret = __netdev_adjacent_dev_link(dev, upper_dev);
+ int ret = __netdev_adjacent_dev_link(dev, upper_dev, 1);
if (ret)
return ret;
- ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
+ ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 1,
&dev->adj_list.upper,
&upper_dev->adj_list.lower,
private, master);
if (ret) {
- __netdev_adjacent_dev_unlink(dev, upper_dev);
+ __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
return ret;
}
@@ -5741,8 +5758,8 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
struct net_device *upper_dev)
{
- __netdev_adjacent_dev_unlink(dev, upper_dev);
- __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+ __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
+ __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
&dev->adj_list.upper,
&upper_dev->adj_list.lower);
}
@@ -5795,7 +5812,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
pr_debug("Interlinking %s with %s, non-neighbour\n",
i->dev->name, j->dev->name);
- ret = __netdev_adjacent_dev_link(i->dev, j->dev);
+ ret = __netdev_adjacent_dev_link(i->dev, j->dev, i->ref_nr);
if (ret)
goto rollback_mesh;
}
@@ -5805,7 +5822,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
pr_debug("linking %s's upper device %s with %s\n",
upper_dev->name, i->dev->name, dev->name);
- ret = __netdev_adjacent_dev_link(dev, i->dev);
+ ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr);
if (ret)
goto rollback_upper_mesh;
}
@@ -5814,7 +5831,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
list_for_each_entry(i, &dev->all_adj_list.lower, list) {
pr_debug("linking %s's lower device %s with %s\n", dev->name,
i->dev->name, upper_dev->name);
- ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
+ ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr);
if (ret)
goto rollback_lower_mesh;
}
@@ -5832,7 +5849,7 @@ rollback_lower_mesh:
list_for_each_entry(i, &dev->all_adj_list.lower, list) {
if (i == to_i)
break;
- __netdev_adjacent_dev_unlink(i->dev, upper_dev);
+ __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
}
i = NULL;
@@ -5842,7 +5859,7 @@ rollback_upper_mesh:
list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
if (i == to_i)
break;
- __netdev_adjacent_dev_unlink(dev, i->dev);
+ __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
}
i = j = NULL;
@@ -5854,7 +5871,7 @@ rollback_mesh:
list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
if (i == to_i && j == to_j)
break;
- __netdev_adjacent_dev_unlink(i->dev, j->dev);
+ __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
}
if (i == to_i)
break;
@@ -5934,16 +5951,16 @@ void netdev_upper_dev_unlink(struct net_device *dev,
*/
list_for_each_entry(i, &dev->all_adj_list.lower, list)
list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
- __netdev_adjacent_dev_unlink(i->dev, j->dev);
+ __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
/* remove also the devices itself from lower/upper device
* list
*/
list_for_each_entry(i, &dev->all_adj_list.lower, list)
- __netdev_adjacent_dev_unlink(i->dev, upper_dev);
+ __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
- __netdev_adjacent_dev_unlink(dev, i->dev);
+ __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
&changeupper_info.info);
@@ -6723,8 +6740,8 @@ static void rollback_registered_many(struct list_head *head)
unlist_netdevice(dev);
dev->reg_state = NETREG_UNREGISTERING;
- on_each_cpu(flush_backlog, dev, 1);
}
+ flush_all_backlogs();
synchronize_net();
@@ -7641,6 +7658,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
INIT_LIST_HEAD(&dev->all_adj_list.lower);
INIT_LIST_HEAD(&dev->ptype_all);
INIT_LIST_HEAD(&dev->ptype_specific);
+#ifdef CONFIG_NET_SCHED
+ hash_init(dev->qdisc_hash);
+#endif
dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
setup(dev);
@@ -8286,8 +8306,11 @@ static int __init net_dev_init(void)
*/
for_each_possible_cpu(i) {
+ struct work_struct *flush = per_cpu_ptr(&flush_works, i);
struct softnet_data *sd = &per_cpu(softnet_data, i);
+ INIT_WORK(flush, flush_backlog);
+
skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
INIT_LIST_HEAD(&sd->poll_list);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index d6b3b579560d..72cfb0c61125 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -105,7 +105,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
return skb;
}
-static struct genl_multicast_group dropmon_mcgrps[] = {
+static const struct genl_multicast_group dropmon_mcgrps[] = {
{ .name = "events", },
};
diff --git a/net/core/filter.c b/net/core/filter.c
index cb06aceb512a..00351cdf7d0c 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -94,14 +94,13 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
}
EXPORT_SYMBOL(sk_filter_trim_cap);
-static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
+BPF_CALL_1(__skb_get_pay_offset, struct sk_buff *, skb)
{
- return skb_get_poff((struct sk_buff *)(unsigned long) ctx);
+ return skb_get_poff(skb);
}
-static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
+BPF_CALL_3(__skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
{
- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
@@ -120,9 +119,8 @@ static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
return 0;
}
-static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
+BPF_CALL_3(__skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
{
- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
@@ -145,7 +143,7 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
return 0;
}
-static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
+BPF_CALL_0(__get_raw_cpu_id)
{
return raw_smp_processor_id();
}
@@ -233,9 +231,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_HATYPE:
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
- BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
BPF_REG_TMP, BPF_REG_CTX,
offsetof(struct sk_buff, dev));
/* if (tmp != 0) goto pc + 1 */
@@ -1350,17 +1347,26 @@ struct bpf_scratchpad {
static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
+static inline int __bpf_try_make_writable(struct sk_buff *skb,
+ unsigned int write_len)
+{
+ return skb_ensure_writable(skb, write_len);
+}
+
static inline int bpf_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
- int err;
+ int err = __bpf_try_make_writable(skb, write_len);
- err = skb_ensure_writable(skb, write_len);
bpf_compute_data_end(skb);
-
return err;
}
+static int bpf_try_make_head_writable(struct sk_buff *skb)
+{
+ return bpf_try_make_writable(skb, skb_headlen(skb));
+}
+
static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
{
if (skb_at_tc_ingress(skb))
@@ -1373,12 +1379,9 @@ static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
}
-static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
+BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
+ const void *, from, u32, len, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- unsigned int offset = (unsigned int) r2;
- void *from = (void *) (long) r3;
- unsigned int len = (unsigned int) r4;
void *ptr;
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
@@ -1413,12 +1416,9 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
.arg5_type = ARG_ANYTHING,
};
-static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
+ void *, to, u32, len)
{
- const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
- unsigned int offset = (unsigned int) r2;
- void *to = (void *)(unsigned long) r3;
- unsigned int len = (unsigned int) r4;
void *ptr;
if (unlikely(offset > 0xffff))
@@ -1446,10 +1446,31 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
.arg4_type = ARG_CONST_STACK_SIZE,
};
-static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
+BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
+{
+ /* Idea is the following: should the needed direct read/write
+ * test fail during runtime, we can pull in more data and redo
+ * again, since implicitly, we invalidate previous checks here.
+ *
+ * Or, since we know how much we need to make read/writeable,
+ * this can be done once at the program beginning for direct
+ * access case. By this we overcome limitations of only current
+ * headroom being accessible.
+ */
+ return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
+}
+
+static const struct bpf_func_proto bpf_skb_pull_data_proto = {
+ .func = bpf_skb_pull_data,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
+ u64, from, u64, to, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- unsigned int offset = (unsigned int) r2;
__sum16 *ptr;
if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
@@ -1491,12 +1512,11 @@ static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
.arg5_type = ARG_ANYTHING,
};
-static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
+BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
+ u64, from, u64, to, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
- unsigned int offset = (unsigned int) r2;
__sum16 *ptr;
if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
@@ -1544,12 +1564,11 @@ static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
.arg5_type = ARG_ANYTHING,
};
-static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed)
+BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
+ __be32 *, to, u32, to_size, __wsum, seed)
{
struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
- u64 diff_size = from_size + to_size;
- __be32 *from = (__be32 *) (long) r1;
- __be32 *to = (__be32 *) (long) r3;
+ u32 diff_size = from_size + to_size;
int i, j = 0;
/* This is quite flexible, some examples:
@@ -1575,6 +1594,7 @@ static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed)
static const struct bpf_func_proto bpf_csum_diff_proto = {
.func = bpf_csum_diff,
.gpl_only = false,
+ .pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_STACK,
.arg2_type = ARG_CONST_STACK_SIZE_OR_ZERO,
@@ -1583,6 +1603,26 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
.arg5_type = ARG_ANYTHING,
};
+BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
+{
+ /* The interface is to be used in combination with bpf_csum_diff()
+ * for direct packet writes. csum rotation for alignment as well
+ * as emulating csum_sub() can be done from the eBPF program.
+ */
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ return (skb->csum = csum_add(skb->csum, csum));
+
+ return -ENOTSUPP;
+}
+
+static const struct bpf_func_proto bpf_csum_update_proto = {
+ .func = bpf_csum_update,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+};
+
static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
{
return dev_forward_skb(dev, skb);
@@ -1607,10 +1647,11 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
return ret;
}
-static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
+BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
struct net_device *dev;
+ struct sk_buff *clone;
+ int ret;
if (unlikely(flags & ~(BPF_F_INGRESS)))
return -EINVAL;
@@ -1619,14 +1660,25 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
if (unlikely(!dev))
return -EINVAL;
- skb = skb_clone(skb, GFP_ATOMIC);
- if (unlikely(!skb))
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!clone))
return -ENOMEM;
- bpf_push_mac_rcsum(skb);
+ /* For direct write, we need to keep the invariant that the skbs
+ * we're dealing with need to be uncloned. Should uncloning fail
+ * here, we need to free the just generated clone to unclone once
+ * again.
+ */
+ ret = bpf_try_make_head_writable(skb);
+ if (unlikely(ret)) {
+ kfree_skb(clone);
+ return -ENOMEM;
+ }
+
+ bpf_push_mac_rcsum(clone);
return flags & BPF_F_INGRESS ?
- __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
+ __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone);
}
static const struct bpf_func_proto bpf_clone_redirect_proto = {
@@ -1645,7 +1697,7 @@ struct redirect_info {
static DEFINE_PER_CPU(struct redirect_info, redirect_info);
-static u64 bpf_redirect(u64 ifindex, u64 flags, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
@@ -1684,9 +1736,9 @@ static const struct bpf_func_proto bpf_redirect_proto = {
.arg2_type = ARG_ANYTHING,
};
-static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
{
- return task_get_classid((struct sk_buff *) (unsigned long) r1);
+ return task_get_classid(skb);
}
static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
@@ -1696,9 +1748,9 @@ static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
{
- return dst_tclassid((struct sk_buff *) (unsigned long) r1);
+ return dst_tclassid(skb);
}
static const struct bpf_func_proto bpf_get_route_realm_proto = {
@@ -1708,14 +1760,14 @@ static const struct bpf_func_proto bpf_get_route_realm_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-static u64 bpf_get_hash_recalc(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
{
/* If skb_clear_hash() was called due to mangling, we can
* trigger SW recalculation here. Later access to hash
* can then use the inline skb->hash via context directly
* instead of calling this helper again.
*/
- return skb_get_hash((struct sk_buff *) (unsigned long) r1);
+ return skb_get_hash(skb);
}
static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
@@ -1725,10 +1777,25 @@ static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
+BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
+{
+ /* After all direct packet write, this can be used once for
+ * triggering a lazy recalc on next skb_get_hash() invocation.
+ */
+ skb_clear_hash(skb);
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
+ .func = bpf_set_hash_invalid,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
+ u16, vlan_tci)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- __be16 vlan_proto = (__force __be16) r2;
int ret;
if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
@@ -1753,9 +1820,8 @@ const struct bpf_func_proto bpf_skb_vlan_push_proto = {
};
EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
-static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
int ret;
bpf_push_mac_rcsum(skb);
@@ -1930,10 +1996,9 @@ static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
return -ENOTSUPP;
}
-static u64 bpf_skb_change_proto(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
+BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
+ u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- __be16 proto = (__force __be16) r2;
int ret;
if (unlikely(flags))
@@ -1970,14 +2035,11 @@ static const struct bpf_func_proto bpf_skb_change_proto_proto = {
.arg3_type = ARG_ANYTHING,
};
-static u64 bpf_skb_change_type(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- u32 pkt_type = r2;
-
/* We only allow a restricted subset to be changed for now. */
- if (unlikely(skb->pkt_type > PACKET_OTHERHOST ||
- pkt_type > PACKET_OTHERHOST))
+ if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
+ !skb_pkt_type_ok(pkt_type)))
return -EINVAL;
skb->pkt_type = pkt_type;
@@ -1992,19 +2054,100 @@ static const struct bpf_func_proto bpf_skb_change_type_proto = {
.arg2_type = ARG_ANYTHING,
};
+static u32 __bpf_skb_min_len(const struct sk_buff *skb)
+{
+ u32 min_len = skb_network_offset(skb);
+
+ if (skb_transport_header_was_set(skb))
+ min_len = skb_transport_offset(skb);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ min_len = skb_checksum_start_offset(skb) +
+ skb->csum_offset + sizeof(__sum16);
+ return min_len;
+}
+
+static u32 __bpf_skb_max_len(const struct sk_buff *skb)
+{
+ return skb->dev->mtu + skb->dev->hard_header_len;
+}
+
+static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
+{
+ unsigned int old_len = skb->len;
+ int ret;
+
+ ret = __skb_grow_rcsum(skb, new_len);
+ if (!ret)
+ memset(skb->data + old_len, 0, new_len - old_len);
+ return ret;
+}
+
+static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
+{
+ return __skb_trim_rcsum(skb, new_len);
+}
+
+BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
+ u64, flags)
+{
+ u32 max_len = __bpf_skb_max_len(skb);
+ u32 min_len = __bpf_skb_min_len(skb);
+ int ret;
+
+ if (unlikely(flags || new_len > max_len || new_len < min_len))
+ return -EINVAL;
+ if (skb->encapsulation)
+ return -ENOTSUPP;
+
+ /* The basic idea of this helper is that it's performing the
+ * needed work to either grow or trim an skb, and eBPF program
+ * rewrites the rest via helpers like bpf_skb_store_bytes(),
+ * bpf_lX_csum_replace() and others rather than passing a raw
+ * buffer here. This one is a slow path helper and intended
+ * for replies with control messages.
+ *
+ * Like in bpf_skb_change_proto(), we want to keep this rather
+ * minimal and without protocol specifics so that we are able
+ * to separate concerns as in bpf_skb_store_bytes() should only
+ * be the one responsible for writing buffers.
+ *
+ * It's really expected to be a slow path operation here for
+ * control message replies, so we're implicitly linearizing,
+ * uncloning and drop offloads from the skb by this.
+ */
+ ret = __bpf_try_make_writable(skb, skb->len);
+ if (!ret) {
+ if (new_len > skb->len)
+ ret = bpf_skb_grow_rcsum(skb, new_len);
+ else if (new_len < skb->len)
+ ret = bpf_skb_trim_rcsum(skb, new_len);
+ if (!ret && skb_is_gso(skb))
+ skb_gso_reset(skb);
+ }
+
+ bpf_compute_data_end(skb);
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_skb_change_tail_proto = {
+ .func = bpf_skb_change_tail,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+};
+
bool bpf_helper_changes_skb_data(void *func)
{
- if (func == bpf_skb_vlan_push)
- return true;
- if (func == bpf_skb_vlan_pop)
- return true;
- if (func == bpf_skb_store_bytes)
- return true;
- if (func == bpf_skb_change_proto)
- return true;
- if (func == bpf_l3_csum_replace)
- return true;
- if (func == bpf_l4_csum_replace)
+ if (func == bpf_skb_vlan_push ||
+ func == bpf_skb_vlan_pop ||
+ func == bpf_skb_store_bytes ||
+ func == bpf_skb_change_proto ||
+ func == bpf_skb_change_tail ||
+ func == bpf_skb_pull_data ||
+ func == bpf_l3_csum_replace ||
+ func == bpf_l4_csum_replace)
return true;
return false;
@@ -2023,13 +2166,10 @@ static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
return 0;
}
-static u64 bpf_skb_event_output(u64 r1, u64 r2, u64 flags, u64 r4,
- u64 meta_size)
+BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
+ u64, flags, void *, meta, u64, meta_size)
{
- struct sk_buff *skb = (struct sk_buff *)(long) r1;
- struct bpf_map *map = (struct bpf_map *)(long) r2;
u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
- void *meta = (void *)(long) r4;
if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
return -EINVAL;
@@ -2056,10 +2196,9 @@ static unsigned short bpf_tunnel_key_af(u64 flags)
return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
}
-static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
+ u32, size, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
const struct ip_tunnel_info *info = skb_tunnel_info(skb);
u8 compat[sizeof(struct bpf_tunnel_key)];
void *to_orig = to;
@@ -2124,10 +2263,8 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
.arg4_type = ARG_ANYTHING,
};
-static u64 bpf_skb_get_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
+BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- u8 *to = (u8 *) (long) r2;
const struct ip_tunnel_info *info = skb_tunnel_info(skb);
int err;
@@ -2162,10 +2299,9 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
static struct metadata_dst __percpu *md_dst;
-static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
+ const struct bpf_tunnel_key *, from, u32, size, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
struct metadata_dst *md = this_cpu_ptr(md_dst);
u8 compat[sizeof(struct bpf_tunnel_key)];
struct ip_tunnel_info *info;
@@ -2183,7 +2319,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
*/
memcpy(compat, from, size);
memset(compat + size, 0, sizeof(compat) - size);
- from = (struct bpf_tunnel_key *)compat;
+ from = (const struct bpf_tunnel_key *) compat;
break;
default:
return -EINVAL;
@@ -2233,10 +2369,9 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
.arg4_type = ARG_ANYTHING,
};
-static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
+BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
+ const u8 *, from, u32, size)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- u8 *from = (u8 *) (long) r2;
struct ip_tunnel_info *info = skb_tunnel_info(skb);
const struct metadata_dst *md = this_cpu_ptr(md_dst);
@@ -2282,28 +2417,24 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
}
}
-#ifdef CONFIG_SOCK_CGROUP_DATA
-static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
+ u32, idx)
{
- struct sk_buff *skb = (struct sk_buff *)(long)r1;
- struct bpf_map *map = (struct bpf_map *)(long)r2;
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct cgroup *cgrp;
struct sock *sk;
- u32 i = (u32)r3;
- sk = skb->sk;
+ sk = skb_to_full_sk(skb);
if (!sk || !sk_fullsock(sk))
return -ENOENT;
-
- if (unlikely(i >= array->map.max_entries))
+ if (unlikely(idx >= array->map.max_entries))
return -E2BIG;
- cgrp = READ_ONCE(array->ptrs[i]);
+ cgrp = READ_ONCE(array->ptrs[idx]);
if (unlikely(!cgrp))
return -EAGAIN;
- return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp);
+ return sk_under_cgroup_hierarchy(sk, cgrp);
}
static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
@@ -2314,7 +2445,38 @@ static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
};
-#endif
+
+static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
+ unsigned long off, unsigned long len)
+{
+ memcpy(dst_buff, src_buff + off, len);
+ return 0;
+}
+
+BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
+ u64, flags, void *, meta, u64, meta_size)
+{
+ u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
+
+ if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
+ return -EINVAL;
+ if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
+ return -EFAULT;
+
+ return bpf_event_output(map, flags, meta, meta_size, xdp, xdp_size,
+ bpf_xdp_copy);
+}
+
+static const struct bpf_func_proto bpf_xdp_event_output_proto = {
+ .func = bpf_xdp_event_output,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_STACK,
+ .arg5_type = ARG_CONST_STACK_SIZE,
+};
static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id)
@@ -2350,8 +2512,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return &bpf_skb_store_bytes_proto;
case BPF_FUNC_skb_load_bytes:
return &bpf_skb_load_bytes_proto;
+ case BPF_FUNC_skb_pull_data:
+ return &bpf_skb_pull_data_proto;
case BPF_FUNC_csum_diff:
return &bpf_csum_diff_proto;
+ case BPF_FUNC_csum_update:
+ return &bpf_csum_update_proto;
case BPF_FUNC_l3_csum_replace:
return &bpf_l3_csum_replace_proto;
case BPF_FUNC_l4_csum_replace:
@@ -2368,6 +2534,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return &bpf_skb_change_proto_proto;
case BPF_FUNC_skb_change_type:
return &bpf_skb_change_type_proto;
+ case BPF_FUNC_skb_change_tail:
+ return &bpf_skb_change_tail_proto;
case BPF_FUNC_skb_get_tunnel_key:
return &bpf_skb_get_tunnel_key_proto;
case BPF_FUNC_skb_set_tunnel_key:
@@ -2382,14 +2550,14 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return &bpf_get_route_realm_proto;
case BPF_FUNC_get_hash_recalc:
return &bpf_get_hash_recalc_proto;
+ case BPF_FUNC_set_hash_invalid:
+ return &bpf_set_hash_invalid_proto;
case BPF_FUNC_perf_event_output:
return &bpf_skb_event_output_proto;
case BPF_FUNC_get_smp_processor_id:
return &bpf_get_smp_processor_id_proto;
-#ifdef CONFIG_SOCK_CGROUP_DATA
case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto;
-#endif
default:
return sk_filter_func_proto(func_id);
}
@@ -2398,7 +2566,14 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
static const struct bpf_func_proto *
xdp_func_proto(enum bpf_func_id func_id)
{
- return sk_filter_func_proto(func_id);
+ switch (func_id) {
+ case BPF_FUNC_perf_event_output:
+ return &bpf_xdp_event_output_proto;
+ case BPF_FUNC_get_smp_processor_id:
+ return &bpf_get_smp_processor_id_proto;
+ default:
+ return sk_filter_func_proto(func_id);
+ }
}
static bool __is_valid_access(int off, int size, enum bpf_access_type type)
@@ -2438,6 +2613,45 @@ static bool sk_filter_is_valid_access(int off, int size,
return __is_valid_access(off, size, type);
}
+static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
+ const struct bpf_prog *prog)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ if (!direct_write)
+ return 0;
+
+ /* if (!skb->cloned)
+ * goto start;
+ *
+ * (Fast-path, otherwise approximation that we might be
+ * a clone, do the rest in helper.)
+ */
+ *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
+ *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
+
+ /* ret = bpf_skb_pull_data(skb, 0); */
+ *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+ *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
+ *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_pull_data);
+ /* if (!ret)
+ * goto restore;
+ * return TC_ACT_SHOT;
+ */
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
+ *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, TC_ACT_SHOT);
+ *insn++ = BPF_EXIT_INSN();
+
+ /* restore: */
+ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
+ /* start: */
+ *insn++ = prog->insnsi[0];
+
+ return insn - insn_buf;
+}
+
static bool tc_cls_act_is_valid_access(int off, int size,
enum bpf_access_type type,
enum bpf_reg_type *reg_type)
@@ -2475,7 +2689,7 @@ static bool __is_valid_xdp_access(int off, int size,
return false;
if (off % size != 0)
return false;
- if (size != 4)
+ if (size != sizeof(__u32))
return false;
return true;
@@ -2506,10 +2720,10 @@ void bpf_warn_invalid_xdp_action(u32 act)
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
-static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
- struct bpf_insn *insn_buf,
- struct bpf_prog *prog)
+static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
+ int src_reg, int ctx_off,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
@@ -2556,7 +2770,7 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
case offsetof(struct __sk_buff, ifindex):
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
dst_reg, src_reg,
offsetof(struct sk_buff, dev));
*insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
@@ -2597,7 +2811,7 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
dst_reg, src_reg, insn);
case offsetof(struct __sk_buff, cb[0]) ...
- offsetof(struct __sk_buff, cb[4]):
+ offsetof(struct __sk_buff, cb[4]):
BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
prog->cb_access = 1;
@@ -2621,7 +2835,7 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
break;
case offsetof(struct __sk_buff, data):
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, data)),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
dst_reg, src_reg,
offsetof(struct sk_buff, data));
break;
@@ -2630,8 +2844,8 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
ctx_off -= offsetof(struct __sk_buff, data_end);
ctx_off += offsetof(struct sk_buff, cb);
ctx_off += offsetof(struct bpf_skb_data_end, data_end);
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(void *)),
- dst_reg, src_reg, ctx_off);
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), dst_reg, src_reg,
+ ctx_off);
break;
case offsetof(struct __sk_buff, tc_index):
@@ -2657,6 +2871,31 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
return insn - insn_buf;
}
+static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, int dst_reg,
+ int src_reg, int ctx_off,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ switch (ctx_off) {
+ case offsetof(struct __sk_buff, ifindex):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
+
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
+ dst_reg, src_reg,
+ offsetof(struct sk_buff, dev));
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+ offsetof(struct net_device, ifindex));
+ break;
+ default:
+ return sk_filter_convert_ctx_access(type, dst_reg, src_reg,
+ ctx_off, insn_buf, prog);
+ }
+
+ return insn - insn_buf;
+}
+
static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn_buf,
@@ -2666,12 +2905,12 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
switch (ctx_off) {
case offsetof(struct xdp_md, data):
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct xdp_buff, data)),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
dst_reg, src_reg,
offsetof(struct xdp_buff, data));
break;
case offsetof(struct xdp_md, data_end):
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct xdp_buff, data_end)),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
dst_reg, src_reg,
offsetof(struct xdp_buff, data_end));
break;
@@ -2683,13 +2922,14 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
static const struct bpf_verifier_ops sk_filter_ops = {
.get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access,
- .convert_ctx_access = bpf_net_convert_ctx_access,
+ .convert_ctx_access = sk_filter_convert_ctx_access,
};
static const struct bpf_verifier_ops tc_cls_act_ops = {
.get_func_proto = tc_cls_act_func_proto,
.is_valid_access = tc_cls_act_is_valid_access,
- .convert_ctx_access = bpf_net_convert_ctx_access,
+ .convert_ctx_access = tc_cls_act_convert_ctx_access,
+ .gen_prologue = tc_cls_act_prologue,
};
static const struct bpf_verifier_ops xdp_ops = {
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 52742a02814f..1a7b80f73376 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -6,6 +6,8 @@
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/ipv6.h>
+#include <net/gre.h>
+#include <net/pptp.h>
#include <linux/igmp.h>
#include <linux/icmp.h>
#include <linux/sctp.h>
@@ -116,13 +118,16 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_addrs *key_addrs;
struct flow_dissector_key_ports *key_ports;
struct flow_dissector_key_tags *key_tags;
+ struct flow_dissector_key_vlan *key_vlan;
struct flow_dissector_key_keyid *key_keyid;
+ bool skip_vlan = false;
u8 ip_proto = 0;
bool ret = false;
if (!data) {
data = skb->data;
- proto = skb->protocol;
+ proto = skb_vlan_tag_present(skb) ?
+ skb->vlan_proto : skb->protocol;
nhoff = skb_network_offset(skb);
hlen = skb_headlen(skb);
}
@@ -241,23 +246,45 @@ ipv6:
case htons(ETH_P_8021AD):
case htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
- struct vlan_hdr _vlan;
- vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan);
- if (!vlan)
- goto out_bad;
+ if (skb_vlan_tag_present(skb))
+ proto = skb->protocol;
+
+ if (!skb_vlan_tag_present(skb) ||
+ proto == cpu_to_be16(ETH_P_8021Q) ||
+ proto == cpu_to_be16(ETH_P_8021AD)) {
+ struct vlan_hdr _vlan;
+ vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
+ data, hlen, &_vlan);
+ if (!vlan)
+ goto out_bad;
+ proto = vlan->h_vlan_encapsulated_proto;
+ nhoff += sizeof(*vlan);
+ if (skip_vlan)
+ goto again;
+ }
+
+ skip_vlan = true;
if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_VLANID)) {
- key_tags = skb_flow_dissector_target(flow_dissector,
- FLOW_DISSECTOR_KEY_VLANID,
+ FLOW_DISSECTOR_KEY_VLAN)) {
+ key_vlan = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
target_container);
- key_tags->vlan_id = skb_vlan_tag_get_id(skb);
+ if (skb_vlan_tag_present(skb)) {
+ key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
+ key_vlan->vlan_priority =
+ (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
+ } else {
+ key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
+ VLAN_VID_MASK;
+ key_vlan->vlan_priority =
+ (ntohs(vlan->h_vlan_TCI) &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ }
}
- proto = vlan->h_vlan_encapsulated_proto;
- nhoff += sizeof(*vlan);
goto again;
}
case htons(ETH_P_PPP_SES): {
@@ -338,32 +365,42 @@ mpls:
ip_proto_again:
switch (ip_proto) {
case IPPROTO_GRE: {
- struct gre_hdr {
- __be16 flags;
- __be16 proto;
- } *hdr, _hdr;
+ struct gre_base_hdr *hdr, _hdr;
+ u16 gre_ver;
+ int offset = 0;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
goto out_bad;
- /*
- * Only look inside GRE if version zero and no
- * routing
- */
- if (hdr->flags & (GRE_VERSION | GRE_ROUTING))
+
+ /* Only look inside GRE without routing */
+ if (hdr->flags & GRE_ROUTING)
break;
- proto = hdr->proto;
- nhoff += 4;
+ /* Only look inside GRE for version 0 and 1 */
+ gre_ver = ntohs(hdr->flags & GRE_VERSION);
+ if (gre_ver > 1)
+ break;
+
+ proto = hdr->protocol;
+ if (gre_ver) {
+ /* Version1 must be PPTP, and check the flags */
+ if (!(proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
+ break;
+ }
+
+ offset += sizeof(struct gre_base_hdr);
+
if (hdr->flags & GRE_CSUM)
- nhoff += 4;
+ offset += sizeof(((struct gre_full_hdr *)0)->csum) +
+ sizeof(((struct gre_full_hdr *)0)->reserved1);
+
if (hdr->flags & GRE_KEY) {
const __be32 *keyid;
__be32 _keyid;
- keyid = __skb_header_pointer(skb, nhoff, sizeof(_keyid),
+ keyid = __skb_header_pointer(skb, nhoff + offset, sizeof(_keyid),
data, hlen, &_keyid);
-
if (!keyid)
goto out_bad;
@@ -372,32 +409,65 @@ ip_proto_again:
key_keyid = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_GRE_KEYID,
target_container);
- key_keyid->keyid = *keyid;
+ if (gre_ver == 0)
+ key_keyid->keyid = *keyid;
+ else
+ key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
}
- nhoff += 4;
+ offset += sizeof(((struct gre_full_hdr *)0)->key);
}
+
if (hdr->flags & GRE_SEQ)
- nhoff += 4;
- if (proto == htons(ETH_P_TEB)) {
- const struct ethhdr *eth;
- struct ethhdr _eth;
-
- eth = __skb_header_pointer(skb, nhoff,
- sizeof(_eth),
- data, hlen, &_eth);
- if (!eth)
+ offset += sizeof(((struct pptp_gre_header *)0)->seq);
+
+ if (gre_ver == 0) {
+ if (proto == htons(ETH_P_TEB)) {
+ const struct ethhdr *eth;
+ struct ethhdr _eth;
+
+ eth = __skb_header_pointer(skb, nhoff + offset,
+ sizeof(_eth),
+ data, hlen, &_eth);
+ if (!eth)
+ goto out_bad;
+ proto = eth->h_proto;
+ offset += sizeof(*eth);
+
+ /* Cap headers that we access via pointers at the
+ * end of the Ethernet header as our maximum alignment
+ * at that point is only 2 bytes.
+ */
+ if (NET_IP_ALIGN)
+ hlen = (nhoff + offset);
+ }
+ } else { /* version 1, must be PPTP */
+ u8 _ppp_hdr[PPP_HDRLEN];
+ u8 *ppp_hdr;
+
+ if (hdr->flags & GRE_ACK)
+ offset += sizeof(((struct pptp_gre_header *)0)->ack);
+
+ ppp_hdr = skb_header_pointer(skb, nhoff + offset,
+ sizeof(_ppp_hdr), _ppp_hdr);
+ if (!ppp_hdr)
goto out_bad;
- proto = eth->h_proto;
- nhoff += sizeof(*eth);
-
- /* Cap headers that we access via pointers at the
- * end of the Ethernet header as our maximum alignment
- * at that point is only 2 bytes.
- */
- if (NET_IP_ALIGN)
- hlen = nhoff;
+
+ switch (PPP_PROTOCOL(ppp_hdr)) {
+ case PPP_IP:
+ proto = htons(ETH_P_IP);
+ break;
+ case PPP_IPV6:
+ proto = htons(ETH_P_IPV6);
+ break;
+ default:
+ /* Could probably catch some more like MPLS */
+ break;
+ }
+
+ offset += PPP_HDRLEN;
}
+ nhoff += offset;
key_control->flags |= FLOW_DIS_ENCAPSULATION;
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
goto out_good;
@@ -874,8 +944,8 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
.offset = offsetof(struct flow_keys, ports),
},
{
- .key_id = FLOW_DISSECTOR_KEY_VLANID,
- .offset = offsetof(struct flow_keys, tags),
+ .key_id = FLOW_DISSECTOR_KEY_VLAN,
+ .offset = offsetof(struct flow_keys, vlan),
},
{
.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 669ecc9f884e..e5f84c26ba1a 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -251,6 +251,41 @@ drop:
}
EXPORT_SYMBOL(lwtunnel_output);
+int lwtunnel_xmit(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ const struct lwtunnel_encap_ops *ops;
+ struct lwtunnel_state *lwtstate;
+ int ret = -EINVAL;
+
+ if (!dst)
+ goto drop;
+
+ lwtstate = dst->lwtstate;
+
+ if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+ lwtstate->type > LWTUNNEL_ENCAP_MAX)
+ return 0;
+
+ ret = -EOPNOTSUPP;
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+ if (likely(ops && ops->xmit))
+ ret = ops->xmit(skb);
+ rcu_read_unlock();
+
+ if (ret == -EOPNOTSUPP)
+ goto drop;
+
+ return ret;
+
+drop:
+ kfree_skb(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_xmit);
+
int lwtunnel_input(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index cf26e04c4046..2ae929f9bd06 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1148,7 +1148,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
} else
goto out;
} else {
- if (lladdr == neigh->ha && new == NUD_STALE)
+ if (lladdr == neigh->ha && new == NUD_STALE &&
+ !(flags & NEIGH_UPDATE_F_ADMIN))
new = old;
}
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 2c2eb1b629b1..989434f36f96 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -37,6 +37,8 @@ struct net init_net = {
};
EXPORT_SYMBOL(init_net);
+static bool init_net_initialized;
+
#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
@@ -213,31 +215,29 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
*/
int peernet2id_alloc(struct net *net, struct net *peer)
{
- unsigned long flags;
bool alloc;
int id;
- spin_lock_irqsave(&net->nsid_lock, flags);
+ spin_lock_bh(&net->nsid_lock);
alloc = atomic_read(&peer->count) == 0 ? false : true;
id = __peernet2id_alloc(net, peer, &alloc);
- spin_unlock_irqrestore(&net->nsid_lock, flags);
+ spin_unlock_bh(&net->nsid_lock);
if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id);
return id;
}
-EXPORT_SYMBOL(peernet2id_alloc);
/* This function returns, if assigned, the id of a peer netns. */
int peernet2id(struct net *net, struct net *peer)
{
- unsigned long flags;
int id;
- spin_lock_irqsave(&net->nsid_lock, flags);
+ spin_lock_bh(&net->nsid_lock);
id = __peernet2id(net, peer);
- spin_unlock_irqrestore(&net->nsid_lock, flags);
+ spin_unlock_bh(&net->nsid_lock);
return id;
}
+EXPORT_SYMBOL(peernet2id);
/* This function returns true is the peer netns has an id assigned into the
* current netns.
@@ -249,18 +249,17 @@ bool peernet_has_id(struct net *net, struct net *peer)
struct net *get_net_ns_by_id(struct net *net, int id)
{
- unsigned long flags;
struct net *peer;
if (id < 0)
return NULL;
rcu_read_lock();
- spin_lock_irqsave(&net->nsid_lock, flags);
+ spin_lock_bh(&net->nsid_lock);
peer = idr_find(&net->netns_ids, id);
if (peer)
get_net(peer);
- spin_unlock_irqrestore(&net->nsid_lock, flags);
+ spin_unlock_bh(&net->nsid_lock);
rcu_read_unlock();
return peer;
@@ -310,6 +309,16 @@ out_undo:
#ifdef CONFIG_NET_NS
+static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
+{
+ return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
+}
+
+static void dec_net_namespaces(struct ucounts *ucounts)
+{
+ dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
+}
+
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;
@@ -351,19 +360,27 @@ void net_drop_ns(void *p)
struct net *copy_net_ns(unsigned long flags,
struct user_namespace *user_ns, struct net *old_net)
{
+ struct ucounts *ucounts;
struct net *net;
int rv;
if (!(flags & CLONE_NEWNET))
return get_net(old_net);
+ ucounts = inc_net_namespaces(user_ns);
+ if (!ucounts)
+ return ERR_PTR(-ENOSPC);
+
net = net_alloc();
- if (!net)
+ if (!net) {
+ dec_net_namespaces(ucounts);
return ERR_PTR(-ENOMEM);
+ }
get_user_ns(user_ns);
mutex_lock(&net_mutex);
+ net->ucounts = ucounts;
rv = setup_net(net, user_ns);
if (rv == 0) {
rtnl_lock();
@@ -372,6 +389,7 @@ struct net *copy_net_ns(unsigned long flags,
}
mutex_unlock(&net_mutex);
if (rv < 0) {
+ dec_net_namespaces(ucounts);
put_user_ns(user_ns);
net_drop_ns(net);
return ERR_PTR(rv);
@@ -404,17 +422,17 @@ static void cleanup_net(struct work_struct *work)
for_each_net(tmp) {
int id;
- spin_lock_irq(&tmp->nsid_lock);
+ spin_lock_bh(&tmp->nsid_lock);
id = __peernet2id(tmp, net);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
- spin_unlock_irq(&tmp->nsid_lock);
+ spin_unlock_bh(&tmp->nsid_lock);
if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id);
}
- spin_lock_irq(&net->nsid_lock);
+ spin_lock_bh(&net->nsid_lock);
idr_destroy(&net->netns_ids);
- spin_unlock_irq(&net->nsid_lock);
+ spin_unlock_bh(&net->nsid_lock);
}
rtnl_unlock();
@@ -444,6 +462,7 @@ static void cleanup_net(struct work_struct *work)
/* Finally it is safe to free my network namespace structure */
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
list_del_init(&net->exit_list);
+ dec_net_namespaces(net->ucounts);
put_user_ns(net->user_ns);
net_drop_ns(net);
}
@@ -531,7 +550,7 @@ static struct pernet_operations __net_initdata net_ns_ops = {
.exit = net_ns_net_exit,
};
-static struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
+static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
[NETNSA_NONE] = { .type = NLA_UNSPEC },
[NETNSA_NSID] = { .type = NLA_S32 },
[NETNSA_PID] = { .type = NLA_U32 },
@@ -542,7 +561,6 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
- unsigned long flags;
struct net *peer;
int nsid, err;
@@ -563,15 +581,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
if (IS_ERR(peer))
return PTR_ERR(peer);
- spin_lock_irqsave(&net->nsid_lock, flags);
+ spin_lock_bh(&net->nsid_lock);
if (__peernet2id(net, peer) >= 0) {
- spin_unlock_irqrestore(&net->nsid_lock, flags);
+ spin_unlock_bh(&net->nsid_lock);
err = -EEXIST;
goto out;
}
err = alloc_netid(net, peer, nsid);
- spin_unlock_irqrestore(&net->nsid_lock, flags);
+ spin_unlock_bh(&net->nsid_lock);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err);
err = 0;
@@ -693,11 +711,10 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
.idx = 0,
.s_idx = cb->args[0],
};
- unsigned long flags;
- spin_lock_irqsave(&net->nsid_lock, flags);
+ spin_lock_bh(&net->nsid_lock);
idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
- spin_unlock_irqrestore(&net->nsid_lock, flags);
+ spin_unlock_bh(&net->nsid_lock);
cb->args[0] = net_cb.idx;
return skb->len;
@@ -750,6 +767,8 @@ static int __init net_ns_init(void)
if (setup_net(&init_net, &init_user_ns))
panic("Could not setup the initial network namespace");
+ init_net_initialized = true;
+
rtnl_lock();
list_add_tail_rcu(&init_net.list, &net_namespace_list);
rtnl_unlock();
@@ -811,15 +830,24 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
+ if (!init_net_initialized) {
+ list_add_tail(&ops->list, list);
+ return 0;
+ }
+
return ops_init(ops, &init_net);
}
static void __unregister_pernet_operations(struct pernet_operations *ops)
{
- LIST_HEAD(net_exit_list);
- list_add(&init_net.exit_list, &net_exit_list);
- ops_exit_list(ops, &net_exit_list);
- ops_free_list(ops, &net_exit_list);
+ if (!init_net_initialized) {
+ list_del(&ops->list);
+ } else {
+ LIST_HEAD(net_exit_list);
+ list_add(&init_net.exit_list, &net_exit_list);
+ ops_exit_list(ops, &net_exit_list);
+ ops_free_list(ops, &net_exit_list);
+ }
}
#endif /* CONFIG_NET_NS */
@@ -996,11 +1024,17 @@ static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
return 0;
}
+static struct user_namespace *netns_owner(struct ns_common *ns)
+{
+ return to_net_ns(ns)->user_ns;
+}
+
const struct proc_ns_operations netns_operations = {
.name = "net",
.type = CLONE_NEWNET,
.get = netns_get,
.put = netns_put,
.install = netns_install,
+ .owner = netns_owner,
};
#endif
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index bbd118b19aef..5219a9e2127a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2286,7 +2286,7 @@ out:
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
{
- pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
+ pkt_dev->pkt_overhead = 0;
pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
@@ -2777,13 +2777,13 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
}
static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
- struct pktgen_dev *pkt_dev,
- unsigned int extralen)
+ struct pktgen_dev *pkt_dev)
{
+ unsigned int extralen = LL_RESERVED_SPACE(dev);
struct sk_buff *skb = NULL;
- unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen +
- pkt_dev->pkt_overhead;
+ unsigned int size;
+ size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead;
if (pkt_dev->flags & F_NODE) {
int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
@@ -2796,8 +2796,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
}
+ /* the caller pre-fetches from skb->data and reserves for the mac hdr */
if (likely(skb))
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, extralen - 16);
return skb;
}
@@ -2830,16 +2831,14 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
mod_cur_headers(pkt_dev);
queue_map = pkt_dev->cur_queue_map;
- datalen = (odev->hard_header_len + 16) & ~0xf;
-
- skb = pktgen_alloc_skb(odev, pkt_dev, datalen);
+ skb = pktgen_alloc_skb(odev, pkt_dev);
if (!skb) {
sprintf(pkt_dev->result, "No memory");
return NULL;
}
prefetchw(skb->data);
- skb_reserve(skb, datalen);
+ skb_reserve(skb, 16);
/* Reserve for ethernet and IP header */
eth = (__u8 *) skb_push(skb, 14);
@@ -2959,7 +2958,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
mod_cur_headers(pkt_dev);
queue_map = pkt_dev->cur_queue_map;
- skb = pktgen_alloc_skb(odev, pkt_dev, 16);
+ skb = pktgen_alloc_skb(odev, pkt_dev);
if (!skb) {
sprintf(pkt_dev->result, "No memory");
return NULL;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 189cc78c77eb..b06d2f46b83e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -704,6 +704,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
} else if (i == RTAX_FEATURES - 1) {
u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
+ if (!user_features)
+ continue;
BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
if (nla_put_u32(skb, i + 1, user_features))
goto nla_put_failure;
@@ -841,7 +843,10 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
size += nla_total_size(num_vfs * sizeof(struct nlattr));
size += num_vfs *
(nla_total_size(sizeof(struct ifla_vf_mac)) +
- nla_total_size(sizeof(struct ifla_vf_vlan)) +
+ nla_total_size(MAX_VLAN_LIST_LEN *
+ sizeof(struct nlattr)) +
+ nla_total_size(MAX_VLAN_LIST_LEN *
+ sizeof(struct ifla_vf_vlan_info)) +
nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
nla_total_size(sizeof(struct ifla_vf_rate)) +
nla_total_size(sizeof(struct ifla_vf_link_state)) +
@@ -1109,14 +1114,15 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
struct nlattr *vfinfo)
{
struct ifla_vf_rss_query_en vf_rss_query_en;
+ struct nlattr *vf, *vfstats, *vfvlanlist;
struct ifla_vf_link_state vf_linkstate;
+ struct ifla_vf_vlan_info vf_vlan_info;
struct ifla_vf_spoofchk vf_spoofchk;
struct ifla_vf_tx_rate vf_tx_rate;
struct ifla_vf_stats vf_stats;
struct ifla_vf_trust vf_trust;
struct ifla_vf_vlan vf_vlan;
struct ifla_vf_rate vf_rate;
- struct nlattr *vf, *vfstats;
struct ifla_vf_mac vf_mac;
struct ifla_vf_info ivi;
@@ -1133,11 +1139,14 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
* IFLA_VF_LINK_STATE_AUTO which equals zero
*/
ivi.linkstate = 0;
+ /* VLAN Protocol by default is 802.1Q */
+ ivi.vlan_proto = htons(ETH_P_8021Q);
if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
return 0;
vf_mac.vf =
vf_vlan.vf =
+ vf_vlan_info.vf =
vf_rate.vf =
vf_tx_rate.vf =
vf_spoofchk.vf =
@@ -1148,6 +1157,9 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
vf_vlan.qos = ivi.qos;
+ vf_vlan_info.vlan = ivi.vlan;
+ vf_vlan_info.qos = ivi.qos;
+ vf_vlan_info.vlan_proto = ivi.vlan_proto;
vf_tx_rate.rate = ivi.max_tx_rate;
vf_rate.min_tx_rate = ivi.min_tx_rate;
vf_rate.max_tx_rate = ivi.max_tx_rate;
@@ -1156,10 +1168,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
vf_rss_query_en.setting = ivi.rss_query_en;
vf_trust.setting = ivi.trusted;
vf = nla_nest_start(skb, IFLA_VF_INFO);
- if (!vf) {
- nla_nest_cancel(skb, vfinfo);
- return -EMSGSIZE;
- }
+ if (!vf)
+ goto nla_put_vfinfo_failure;
if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
@@ -1175,17 +1185,23 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
&vf_rss_query_en) ||
nla_put(skb, IFLA_VF_TRUST,
sizeof(vf_trust), &vf_trust))
- return -EMSGSIZE;
+ goto nla_put_vf_failure;
+ vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
+ if (!vfvlanlist)
+ goto nla_put_vf_failure;
+ if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
+ &vf_vlan_info)) {
+ nla_nest_cancel(skb, vfvlanlist);
+ goto nla_put_vf_failure;
+ }
+ nla_nest_end(skb, vfvlanlist);
memset(&vf_stats, 0, sizeof(vf_stats));
if (dev->netdev_ops->ndo_get_vf_stats)
dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
&vf_stats);
vfstats = nla_nest_start(skb, IFLA_VF_STATS);
- if (!vfstats) {
- nla_nest_cancel(skb, vf);
- nla_nest_cancel(skb, vfinfo);
- return -EMSGSIZE;
- }
+ if (!vfstats)
+ goto nla_put_vf_failure;
if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
@@ -1197,11 +1213,19 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
- vf_stats.multicast, IFLA_VF_STATS_PAD))
- return -EMSGSIZE;
+ vf_stats.multicast, IFLA_VF_STATS_PAD)) {
+ nla_nest_cancel(skb, vfstats);
+ goto nla_put_vf_failure;
+ }
nla_nest_end(skb, vfstats);
nla_nest_end(skb, vf);
return 0;
+
+nla_put_vf_failure:
+ nla_nest_cancel(skb, vf);
+nla_put_vfinfo_failure:
+ nla_nest_cancel(skb, vfinfo);
+ return -EMSGSIZE;
}
static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
@@ -1446,6 +1470,7 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
[IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
[IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
+ [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
[IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
[IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
[IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
@@ -1702,7 +1727,37 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
- ivv->qos);
+ ivv->qos,
+ htons(ETH_P_8021Q));
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[IFLA_VF_VLAN_LIST]) {
+ struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
+ struct nlattr *attr;
+ int rem, len = 0;
+
+ err = -EOPNOTSUPP;
+ if (!ops->ndo_set_vf_vlan)
+ return err;
+
+ nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
+ if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
+ nla_len(attr) < NLA_HDRLEN) {
+ return -EINVAL;
+ }
+ if (len >= MAX_VLAN_LIST_LEN)
+ return -EOPNOTSUPP;
+ ivvl[len] = nla_data(attr);
+
+ len++;
+ }
+ if (len == 0)
+ return -EINVAL;
+
+ err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
+ ivvl[0]->qos, ivvl[0]->vlan_proto);
if (err < 0)
return err;
}
@@ -3066,7 +3121,7 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
seq = cb->nlh->nlmsg_seq;
list_for_each_entry(ha, &list->list, list) {
- if (*idx < cb->args[0])
+ if (*idx < cb->args[2])
goto skip;
err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
@@ -3093,19 +3148,18 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
- int idx)
+ int *idx)
{
int err;
netif_addr_lock_bh(dev);
- err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc);
+ err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
if (err)
goto out;
- nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
+ nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
out:
netif_addr_unlock_bh(dev);
- cb->args[1] = err;
- return idx;
+ return err;
}
EXPORT_SYMBOL(ndo_dflt_fdb_dump);
@@ -3118,9 +3172,13 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
const struct net_device_ops *cops = NULL;
struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
struct net *net = sock_net(skb->sk);
+ struct hlist_head *head;
int brport_idx = 0;
int br_idx = 0;
- int idx = 0;
+ int h, s_h;
+ int idx = 0, s_idx;
+ int err = 0;
+ int fidx = 0;
if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
ifla_policy) == 0) {
@@ -3138,49 +3196,71 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
ops = br_dev->netdev_ops;
}
- cb->args[1] = 0;
- for_each_netdev(net, dev) {
- if (brport_idx && (dev->ifindex != brport_idx))
- continue;
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
- if (!br_idx) { /* user did not specify a specific bridge */
- if (dev->priv_flags & IFF_BRIDGE_PORT) {
- br_dev = netdev_master_upper_dev_get(dev);
- cops = br_dev->netdev_ops;
- }
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &net->dev_index_head[h];
+ hlist_for_each_entry(dev, head, index_hlist) {
- } else {
- if (dev != br_dev &&
- !(dev->priv_flags & IFF_BRIDGE_PORT))
+ if (brport_idx && (dev->ifindex != brport_idx))
continue;
- if (br_dev != netdev_master_upper_dev_get(dev) &&
- !(dev->priv_flags & IFF_EBRIDGE))
- continue;
+ if (!br_idx) { /* user did not specify a specific bridge */
+ if (dev->priv_flags & IFF_BRIDGE_PORT) {
+ br_dev = netdev_master_upper_dev_get(dev);
+ cops = br_dev->netdev_ops;
+ }
+ } else {
+ if (dev != br_dev &&
+ !(dev->priv_flags & IFF_BRIDGE_PORT))
+ continue;
- cops = ops;
- }
+ if (br_dev != netdev_master_upper_dev_get(dev) &&
+ !(dev->priv_flags & IFF_EBRIDGE))
+ continue;
+ cops = ops;
+ }
- if (dev->priv_flags & IFF_BRIDGE_PORT) {
- if (cops && cops->ndo_fdb_dump)
- idx = cops->ndo_fdb_dump(skb, cb, br_dev, dev,
- idx);
- }
- if (cb->args[1] == -EMSGSIZE)
- break;
+ if (idx < s_idx)
+ goto cont;
- if (dev->netdev_ops->ndo_fdb_dump)
- idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL,
- idx);
- else
- idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
- if (cb->args[1] == -EMSGSIZE)
- break;
+ if (dev->priv_flags & IFF_BRIDGE_PORT) {
+ if (cops && cops->ndo_fdb_dump) {
+ err = cops->ndo_fdb_dump(skb, cb,
+ br_dev, dev,
+ &fidx);
+ if (err == -EMSGSIZE)
+ goto out;
+ }
+ }
+
+ if (dev->netdev_ops->ndo_fdb_dump)
+ err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
+ dev, NULL,
+ &fidx);
+ else
+ err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
+ &fidx);
+ if (err == -EMSGSIZE)
+ goto out;
+
+ cops = NULL;
- cops = NULL;
+ /* reset fdb offset to 0 for rest of the interfaces */
+ cb->args[2] = 0;
+ fidx = 0;
+cont:
+ idx++;
+ }
}
- cb->args[0] = idx;
+out:
+ cb->args[0] = h;
+ cb->args[1] = idx;
+ cb->args[2] = fidx;
+
return skb->len;
}
@@ -3550,6 +3630,91 @@ static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
(!idxattr || idxattr == attrid);
}
+#define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
+static int rtnl_get_offload_stats_attr_size(int attr_id)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return sizeof(struct rtnl_link_stats64);
+ }
+
+ return 0;
+}
+
+static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
+ int *prividx)
+{
+ struct nlattr *attr = NULL;
+ int attr_id, size;
+ void *attr_data;
+ int err;
+
+ if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
+ dev->netdev_ops->ndo_get_offload_stats))
+ return -ENODATA;
+
+ for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
+ attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
+ if (attr_id < *prividx)
+ continue;
+
+ size = rtnl_get_offload_stats_attr_size(attr_id);
+ if (!size)
+ continue;
+
+ if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
+ continue;
+
+ attr = nla_reserve_64bit(skb, attr_id, size,
+ IFLA_OFFLOAD_XSTATS_UNSPEC);
+ if (!attr)
+ goto nla_put_failure;
+
+ attr_data = nla_data(attr);
+ memset(attr_data, 0, size);
+ err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
+ attr_data);
+ if (err)
+ goto get_offload_stats_failure;
+ }
+
+ if (!attr)
+ return -ENODATA;
+
+ *prividx = 0;
+ return 0;
+
+nla_put_failure:
+ err = -EMSGSIZE;
+get_offload_stats_failure:
+ *prividx = attr_id;
+ return err;
+}
+
+static int rtnl_get_offload_stats_size(const struct net_device *dev)
+{
+ int nla_size = 0;
+ int attr_id;
+ int size;
+
+ if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
+ dev->netdev_ops->ndo_get_offload_stats))
+ return 0;
+
+ for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
+ attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
+ if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
+ continue;
+ size = rtnl_get_offload_stats_attr_size(attr_id);
+ nla_size += nla_total_size_64bit(size);
+ }
+
+ if (nla_size != 0)
+ nla_size += nla_total_size(0);
+
+ return nla_size;
+}
+
static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, unsigned int filter_mask,
@@ -3559,6 +3724,7 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
struct nlmsghdr *nlh;
struct nlattr *attr;
int s_prividx = *prividx;
+ int err;
ASSERT_RTNL();
@@ -3587,8 +3753,6 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
if (ops && ops->fill_linkxstats) {
- int err;
-
*idxattr = IFLA_STATS_LINK_XSTATS;
attr = nla_nest_start(skb,
IFLA_STATS_LINK_XSTATS);
@@ -3612,8 +3776,6 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
if (master)
ops = master->rtnl_link_ops;
if (ops && ops->fill_linkxstats) {
- int err;
-
*idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
attr = nla_nest_start(skb,
IFLA_STATS_LINK_XSTATS_SLAVE);
@@ -3628,6 +3790,24 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
}
}
+ if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
+ *idxattr)) {
+ *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
+ attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
+ if (!attr)
+ goto nla_put_failure;
+
+ err = rtnl_get_offload_stats(skb, dev, prividx);
+ if (err == -ENODATA)
+ nla_nest_cancel(skb, attr);
+ else
+ nla_nest_end(skb, attr);
+
+ if (err && err != -ENODATA)
+ goto nla_put_failure;
+ *idxattr = 0;
+ }
+
nlmsg_end(skb, nlh);
return 0;
@@ -3642,10 +3822,6 @@ nla_put_failure:
return -EMSGSIZE;
}
-static const struct nla_policy ifla_stats_policy[IFLA_STATS_MAX + 1] = {
- [IFLA_STATS_LINK_64] = { .len = sizeof(struct rtnl_link_stats64) },
-};
-
static size_t if_nlmsg_stats_size(const struct net_device *dev,
u32 filter_mask)
{
@@ -3685,6 +3861,9 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev,
}
}
+ if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
+ size += rtnl_get_offload_stats_size(dev);
+
return size;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3864b4b68fa1..1e3e0087245b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1962,37 +1962,13 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
return false;
}
-ssize_t skb_socket_splice(struct sock *sk,
- struct pipe_inode_info *pipe,
- struct splice_pipe_desc *spd)
-{
- int ret;
-
- /* Drop the socket lock, otherwise we have reverse
- * locking dependencies between sk_lock and i_mutex
- * here as compared to sendfile(). We enter here
- * with the socket lock held, and splice_to_pipe() will
- * grab the pipe inode lock. For sendfile() emulation,
- * we call into ->sendpage() with the i_mutex lock held
- * and networking will grab the socket lock.
- */
- release_sock(sk);
- ret = splice_to_pipe(pipe, spd);
- lock_sock(sk);
-
- return ret;
-}
-
/*
* Map data from the skb to a pipe. Should handle both the linear part,
* the fragments, and the frag list.
*/
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int tlen,
- unsigned int flags,
- ssize_t (*splice_cb)(struct sock *,
- struct pipe_inode_info *,
- struct splice_pipe_desc *))
+ unsigned int flags)
{
struct partial_page partial[MAX_SKB_FRAGS];
struct page *pages[MAX_SKB_FRAGS];
@@ -2009,7 +1985,7 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
if (spd.nr_pages)
- ret = splice_cb(sk, pipe, &spd);
+ ret = splice_to_pipe(pipe, &spd);
return ret;
}
@@ -2445,6 +2421,25 @@ void skb_queue_purge(struct sk_buff_head *list)
EXPORT_SYMBOL(skb_queue_purge);
/**
+ * skb_rbtree_purge - empty a skb rbtree
+ * @root: root of the rbtree to empty
+ *
+ * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
+ * the list and one reference dropped. This function does not take
+ * any lock. Synchronization should be handled by the caller (e.g., TCP
+ * out-of-order queue is protected by the socket lock).
+ */
+void skb_rbtree_purge(struct rb_root *root)
+{
+ struct sk_buff *skb, *next;
+
+ rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
+ kfree_skb(skb);
+
+ *root = RB_ROOT;
+}
+
+/**
* skb_queue_head - queue a buffer at the list head
* @list: list to use
* @newsk: buffer to queue
@@ -3078,11 +3073,31 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
sg = !!(features & NETIF_F_SG);
csum = !!can_checksum_protocol(features, proto);
- /* GSO partial only requires that we trim off any excess that
- * doesn't fit into an MSS sized block, so take care of that
- * now.
- */
- if (sg && csum && (features & NETIF_F_GSO_PARTIAL)) {
+ if (sg && csum && (mss != GSO_BY_FRAGS)) {
+ if (!(features & NETIF_F_GSO_PARTIAL)) {
+ struct sk_buff *iter;
+
+ if (!list_skb ||
+ !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
+ goto normal;
+
+ /* Split the buffer at the frag_list pointer.
+ * This is based on the assumption that all
+ * buffers in the chain excluding the last
+ * containing the same amount of data.
+ */
+ skb_walk_frags(head_skb, iter) {
+ if (skb_headlen(iter))
+ goto normal;
+
+ len -= iter->len;
+ }
+ }
+
+ /* GSO partial only requires that we trim off any excess that
+ * doesn't fit into an MSS sized block, so take care of that
+ * now.
+ */
partial_segs = len / mss;
if (partial_segs > 1)
mss *= partial_segs;
@@ -3090,6 +3105,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
partial_segs = 0;
}
+normal:
headroom = skb_headroom(head_skb);
pos = skb_headlen(head_skb);
@@ -3281,21 +3297,29 @@ perform_csum_check:
*/
segs->prev = tail;
- /* Update GSO info on first skb in partial sequence. */
if (partial_segs) {
+ struct sk_buff *iter;
int type = skb_shinfo(head_skb)->gso_type;
+ unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
/* Update type to add partial and then remove dodgy if set */
- type |= SKB_GSO_PARTIAL;
+ type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
type &= ~SKB_GSO_DODGY;
/* Update GSO info and prepare to start updating headers on
* our way back down the stack of protocols.
*/
- skb_shinfo(segs)->gso_size = skb_shinfo(head_skb)->gso_size;
- skb_shinfo(segs)->gso_segs = partial_segs;
- skb_shinfo(segs)->gso_type = type;
- SKB_GSO_CB(segs)->data_offset = skb_headroom(segs) + doffset;
+ for (iter = segs; iter; iter = iter->next) {
+ skb_shinfo(iter)->gso_size = gso_size;
+ skb_shinfo(iter)->gso_segs = partial_segs;
+ skb_shinfo(iter)->gso_type = type;
+ SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
+ }
+
+ if (tail->len - doffset <= gso_size)
+ skb_shinfo(tail)->gso_size = 0;
+ else if (tail != segs)
+ skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
}
/* Following permits correct backpressure, for protocols
@@ -4474,17 +4498,24 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len)
}
EXPORT_SYMBOL(skb_ensure_writable);
-/* remove VLAN header from packet and update csum accordingly. */
-static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
+/* remove VLAN header from packet and update csum accordingly.
+ * expects a non skb_vlan_tag_present skb with a vlan tag payload
+ */
+int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_hdr *vhdr;
- unsigned int offset = skb->data - skb_mac_header(skb);
+ int offset = skb->data - skb_mac_header(skb);
int err;
- __skb_push(skb, offset);
+ if (WARN_ONCE(offset,
+ "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
+ offset)) {
+ return -EINVAL;
+ }
+
err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
if (unlikely(err))
- goto pull;
+ return err;
skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
@@ -4501,12 +4532,14 @@ static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
skb_set_network_header(skb, ETH_HLEN);
skb_reset_mac_len(skb);
-pull:
- __skb_pull(skb, offset);
return err;
}
+EXPORT_SYMBOL(__skb_vlan_pop);
+/* Pop a vlan tag either from hwaccel or from payload.
+ * Expects skb->data at mac header.
+ */
int skb_vlan_pop(struct sk_buff *skb)
{
u16 vlan_tci;
@@ -4516,9 +4549,7 @@ int skb_vlan_pop(struct sk_buff *skb)
if (likely(skb_vlan_tag_present(skb))) {
skb->vlan_tci = 0;
} else {
- if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
- skb->protocol != htons(ETH_P_8021AD)) ||
- skb->len < VLAN_ETH_HLEN))
+ if (unlikely(!eth_type_vlan(skb->protocol)))
return 0;
err = __skb_vlan_pop(skb, &vlan_tci);
@@ -4526,9 +4557,7 @@ int skb_vlan_pop(struct sk_buff *skb)
return err;
}
/* move next vlan tag to hw accel tag */
- if (likely((skb->protocol != htons(ETH_P_8021Q) &&
- skb->protocol != htons(ETH_P_8021AD)) ||
- skb->len < VLAN_ETH_HLEN))
+ if (likely(!eth_type_vlan(skb->protocol)))
return 0;
vlan_proto = skb->protocol;
@@ -4541,29 +4570,30 @@ int skb_vlan_pop(struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_vlan_pop);
+/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
+ * Expects skb->data at mac header.
+ */
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
{
if (skb_vlan_tag_present(skb)) {
- unsigned int offset = skb->data - skb_mac_header(skb);
+ int offset = skb->data - skb_mac_header(skb);
int err;
- /* __vlan_insert_tag expect skb->data pointing to mac header.
- * So change skb->data before calling it and change back to
- * original position later
- */
- __skb_push(skb, offset);
+ if (WARN_ONCE(offset,
+ "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
+ offset)) {
+ return -EINVAL;
+ }
+
err = __vlan_insert_tag(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
- if (err) {
- __skb_pull(skb, offset);
+ if (err)
return err;
- }
skb->protocol = skb->vlan_proto;
skb->mac_len += VLAN_HLEN;
skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
- __skb_pull(skb, offset);
}
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
return 0;
diff --git a/net/core/sock.c b/net/core/sock.c
index fd7b41edf1ce..c73e28fc9c2a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1315,24 +1315,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
#endif
}
-void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
-{
- unsigned long nulls1, nulls2;
-
- nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
- nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
- if (nulls1 > nulls2)
- swap(nulls1, nulls2);
-
- if (nulls1 != 0)
- memset((char *)sk, 0, nulls1);
- memset((char *)sk + nulls1 + sizeof(void *), 0,
- nulls2 - nulls1 - sizeof(void *));
- memset((char *)sk + nulls2 + sizeof(void *), 0,
- size - nulls2 - sizeof(void *));
-}
-EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
-
static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
int family)
{
@@ -1344,12 +1326,8 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
if (!sk)
return sk;
- if (priority & __GFP_ZERO) {
- if (prot->clear_sk)
- prot->clear_sk(sk, prot->obj_size);
- else
- sk_prot_clear_nulls(sk, prot->obj_size);
- }
+ if (priority & __GFP_ZERO)
+ sk_prot_clear_nulls(sk, prot->obj_size);
} else
sk = kmalloc(prot->obj_size, priority);
@@ -1385,6 +1363,7 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
slab = prot->slab;
cgroup_sk_free(&sk->sk_cgrp_data);
+ mem_cgroup_sk_free(sk);
security_sk_free(sk);
if (slab != NULL)
kmem_cache_free(slab, sk);
@@ -1421,6 +1400,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
sock_net_set(sk, net);
atomic_set(&sk->sk_wmem_alloc, 1);
+ mem_cgroup_sk_alloc(sk);
cgroup_sk_alloc(&sk->sk_cgrp_data);
sock_update_classid(&sk->sk_cgrp_data);
sock_update_netprioidx(&sk->sk_cgrp_data);
@@ -1567,6 +1547,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_incoming_cpu = raw_smp_processor_id();
atomic64_set(&newsk->sk_cookie, 0);
+ mem_cgroup_sk_alloc(newsk);
cgroup_sk_alloc(&newsk->sk_cgrp_data);
/*
@@ -1591,9 +1572,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
sk_set_socket(newsk, NULL);
newsk->sk_wq = NULL;
- if (mem_cgroup_sockets_enabled && sk->sk_memcg)
- sock_update_memcg(newsk);
-
if (newsk->sk_prot->sockets_allocated)
sk_sockets_allocated_inc(newsk);
diff --git a/net/core/stream.c b/net/core/stream.c
index 159516a11b7e..1086c8b280a8 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -43,7 +43,6 @@ void sk_stream_write_space(struct sock *sk)
rcu_read_unlock();
}
}
-EXPORT_SYMBOL(sk_stream_write_space);
/**
* sk_stream_wait_connect - Wait for a socket to get into the connected state
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index ff7736f7ff42..96e47c539bee 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -38,4 +38,7 @@ config NET_DSA_TAG_EDSA
config NET_DSA_TAG_TRAILER
bool
+config NET_DSA_TAG_QCA
+ bool
+
endif
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 8af4ded70f1c..a3380ed0e0be 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -7,3 +7,4 @@ dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
+dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 7e68bc6bc853..a6902c1e2f28 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -54,6 +54,9 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
#ifdef CONFIG_NET_DSA_TAG_BRCM
[DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops,
#endif
+#ifdef CONFIG_NET_DSA_TAG_QCA
+ [DSA_TAG_PROTO_QCA] = &qca_netdev_ops,
+#endif
[DSA_TAG_PROTO_NONE] = &none_ops,
};
@@ -61,27 +64,27 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
static DEFINE_MUTEX(dsa_switch_drivers_mutex);
static LIST_HEAD(dsa_switch_drivers);
-void register_switch_driver(struct dsa_switch_driver *drv)
+void register_switch_driver(struct dsa_switch_ops *ops)
{
mutex_lock(&dsa_switch_drivers_mutex);
- list_add_tail(&drv->list, &dsa_switch_drivers);
+ list_add_tail(&ops->list, &dsa_switch_drivers);
mutex_unlock(&dsa_switch_drivers_mutex);
}
EXPORT_SYMBOL_GPL(register_switch_driver);
-void unregister_switch_driver(struct dsa_switch_driver *drv)
+void unregister_switch_driver(struct dsa_switch_ops *ops)
{
mutex_lock(&dsa_switch_drivers_mutex);
- list_del_init(&drv->list);
+ list_del_init(&ops->list);
mutex_unlock(&dsa_switch_drivers_mutex);
}
EXPORT_SYMBOL_GPL(unregister_switch_driver);
-static struct dsa_switch_driver *
+static struct dsa_switch_ops *
dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr,
const char **_name, void **priv)
{
- struct dsa_switch_driver *ret;
+ struct dsa_switch_ops *ret;
struct list_head *list;
const char *name;
@@ -90,13 +93,13 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr,
mutex_lock(&dsa_switch_drivers_mutex);
list_for_each(list, &dsa_switch_drivers) {
- struct dsa_switch_driver *drv;
+ struct dsa_switch_ops *ops;
- drv = list_entry(list, struct dsa_switch_driver, list);
+ ops = list_entry(list, struct dsa_switch_ops, list);
- name = drv->probe(parent, host_dev, sw_addr, priv);
+ name = ops->probe(parent, host_dev, sw_addr, priv);
if (name != NULL) {
- ret = drv;
+ ret = ops;
break;
}
}
@@ -117,7 +120,7 @@ static ssize_t temp1_input_show(struct device *dev,
struct dsa_switch *ds = dev_get_drvdata(dev);
int temp, ret;
- ret = ds->drv->get_temp(ds, &temp);
+ ret = ds->ops->get_temp(ds, &temp);
if (ret < 0)
return ret;
@@ -131,7 +134,7 @@ static ssize_t temp1_max_show(struct device *dev,
struct dsa_switch *ds = dev_get_drvdata(dev);
int temp, ret;
- ret = ds->drv->get_temp_limit(ds, &temp);
+ ret = ds->ops->get_temp_limit(ds, &temp);
if (ret < 0)
return ret;
@@ -149,7 +152,7 @@ static ssize_t temp1_max_store(struct device *dev,
if (ret < 0)
return ret;
- ret = ds->drv->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000));
+ ret = ds->ops->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000));
if (ret < 0)
return ret;
@@ -164,7 +167,7 @@ static ssize_t temp1_max_alarm_show(struct device *dev,
bool alarm;
int ret;
- ret = ds->drv->get_temp_alarm(ds, &alarm);
+ ret = ds->ops->get_temp_alarm(ds, &alarm);
if (ret < 0)
return ret;
@@ -184,15 +187,15 @@ static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct dsa_switch *ds = dev_get_drvdata(dev);
- struct dsa_switch_driver *drv = ds->drv;
+ struct dsa_switch_ops *ops = ds->ops;
umode_t mode = attr->mode;
if (index == 1) {
- if (!drv->get_temp_limit)
+ if (!ops->get_temp_limit)
mode = 0;
- else if (!drv->set_temp_limit)
+ else if (!ops->set_temp_limit)
mode &= ~S_IWUSR;
- } else if (index == 2 && !drv->get_temp_alarm) {
+ } else if (index == 2 && !ops->get_temp_alarm) {
mode = 0;
}
return mode;
@@ -228,8 +231,8 @@ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
genphy_config_init(phydev);
genphy_read_status(phydev);
- if (ds->drv->adjust_link)
- ds->drv->adjust_link(ds, port, phydev);
+ if (ds->ops->adjust_link)
+ ds->ops->adjust_link(ds, port, phydev);
}
return 0;
@@ -303,7 +306,7 @@ void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds)
static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
{
- struct dsa_switch_driver *drv = ds->drv;
+ struct dsa_switch_ops *ops = ds->ops;
struct dsa_switch_tree *dst = ds->dst;
struct dsa_chip_data *cd = ds->cd;
bool valid_name_found = false;
@@ -354,7 +357,10 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
* switch.
*/
if (dst->cpu_switch == index) {
- dst->tag_ops = dsa_resolve_tag_protocol(drv->tag_protocol);
+ enum dsa_tag_protocol tag_protocol;
+
+ tag_protocol = ops->get_tag_protocol(ds);
+ dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
if (IS_ERR(dst->tag_ops)) {
ret = PTR_ERR(dst->tag_ops);
goto out;
@@ -368,15 +374,17 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
/*
* Do basic register setup.
*/
- ret = drv->setup(ds);
+ ret = ops->setup(ds);
if (ret < 0)
goto out;
- ret = drv->set_addr(ds, dst->master_netdev->dev_addr);
- if (ret < 0)
- goto out;
+ if (ops->set_addr) {
+ ret = ops->set_addr(ds, dst->master_netdev->dev_addr);
+ if (ret < 0)
+ goto out;
+ }
- if (!ds->slave_mii_bus && drv->phy_read) {
+ if (!ds->slave_mii_bus && ops->phy_read) {
ds->slave_mii_bus = devm_mdiobus_alloc(parent);
if (!ds->slave_mii_bus) {
ret = -ENOMEM;
@@ -423,7 +431,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
* register with hardware monitoring subsystem.
* Treat registration error as non-fatal and ignore it.
*/
- if (drv->get_temp) {
+ if (ops->get_temp) {
const char *netname = netdev_name(dst->master_netdev);
char hname[IFNAMSIZ + 1];
int i, j;
@@ -454,7 +462,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
struct device *parent, struct device *host_dev)
{
struct dsa_chip_data *cd = dst->pd->chip + index;
- struct dsa_switch_driver *drv;
+ struct dsa_switch_ops *ops;
struct dsa_switch *ds;
int ret;
const char *name;
@@ -463,8 +471,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
/*
* Probe for switch model.
*/
- drv = dsa_switch_probe(parent, host_dev, cd->sw_addr, &name, &priv);
- if (drv == NULL) {
+ ops = dsa_switch_probe(parent, host_dev, cd->sw_addr, &name, &priv);
+ if (!ops) {
netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
index);
return ERR_PTR(-EINVAL);
@@ -483,7 +491,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
ds->dst = dst;
ds->index = index;
ds->cd = cd;
- ds->drv = drv;
+ ds->ops = ops;
ds->priv = priv;
ds->dev = parent;
@@ -538,12 +546,12 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
ds->dsa_port_mask |= ~(1 << port);
}
- if (ds->slave_mii_bus && ds->drv->phy_read)
+ if (ds->slave_mii_bus && ds->ops->phy_read)
mdiobus_unregister(ds->slave_mii_bus);
}
#ifdef CONFIG_PM_SLEEP
-static int dsa_switch_suspend(struct dsa_switch *ds)
+int dsa_switch_suspend(struct dsa_switch *ds)
{
int i, ret = 0;
@@ -557,18 +565,19 @@ static int dsa_switch_suspend(struct dsa_switch *ds)
return ret;
}
- if (ds->drv->suspend)
- ret = ds->drv->suspend(ds);
+ if (ds->ops->suspend)
+ ret = ds->ops->suspend(ds);
return ret;
}
+EXPORT_SYMBOL_GPL(dsa_switch_suspend);
-static int dsa_switch_resume(struct dsa_switch *ds)
+int dsa_switch_resume(struct dsa_switch *ds)
{
int i, ret = 0;
- if (ds->drv->resume)
- ret = ds->drv->resume(ds);
+ if (ds->ops->resume)
+ ret = ds->ops->resume(ds);
if (ret)
return ret;
@@ -585,6 +594,7 @@ static int dsa_switch_resume(struct dsa_switch *ds)
return 0;
}
+EXPORT_SYMBOL_GPL(dsa_switch_resume);
#endif
/* platform driver init and cleanup *****************************************/
@@ -1086,7 +1096,6 @@ static int dsa_resume(struct device *d)
static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume);
static const struct of_device_id dsa_of_match_table[] = {
- { .compatible = "brcm,bcm7445-switch-v4.0" },
{ .compatible = "marvell,dsa", },
{}
};
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index f30bad9678f0..f8a7d9aab437 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -294,25 +294,23 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
int err;
/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
- * driver and before drv->setup() has run, since the switch drivers and
+ * driver and before ops->setup() has run, since the switch drivers and
* the slave MDIO bus driver rely on these values for probing PHY
* devices or not
*/
ds->phys_mii_mask = ds->enabled_port_mask;
- err = ds->drv->setup(ds);
+ err = ds->ops->setup(ds);
if (err < 0)
return err;
- err = ds->drv->set_addr(ds, dst->master_netdev->dev_addr);
- if (err < 0)
- return err;
-
- err = ds->drv->set_addr(ds, dst->master_netdev->dev_addr);
- if (err < 0)
- return err;
+ if (ds->ops->set_addr) {
+ err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
+ if (err < 0)
+ return err;
+ }
- if (!ds->slave_mii_bus && ds->drv->phy_read) {
+ if (!ds->slave_mii_bus && ds->ops->phy_read) {
ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
if (!ds->slave_mii_bus)
return -ENOMEM;
@@ -374,7 +372,7 @@ static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
dsa_user_port_unapply(port, index, ds);
}
- if (ds->slave_mii_bus && ds->drv->phy_read)
+ if (ds->slave_mii_bus && ds->ops->phy_read)
mdiobus_unregister(ds->slave_mii_bus);
}
@@ -443,6 +441,7 @@ static int dsa_cpu_parse(struct device_node *port, u32 index,
struct dsa_switch_tree *dst,
struct dsa_switch *ds)
{
+ enum dsa_tag_protocol tag_protocol;
struct net_device *ethernet_dev;
struct device_node *ethernet;
@@ -465,7 +464,8 @@ static int dsa_cpu_parse(struct device_node *port, u32 index,
dst->cpu_port = index;
}
- dst->tag_ops = dsa_resolve_tag_protocol(ds->drv->tag_protocol);
+ tag_protocol = ds->ops->get_tag_protocol(ds);
+ dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
if (IS_ERR(dst->tag_ops)) {
dev_warn(ds->dev, "No tagger for this switch\n");
return PTR_ERR(dst->tag_ops);
@@ -541,7 +541,7 @@ static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
ds->ports[reg].dn = port;
- /* Initialize enabled_port_mask now for drv->setup()
+ /* Initialize enabled_port_mask now for ops->setup()
* to have access to a correct value, just like what
* net/dsa/dsa.c::dsa_switch_setup_one does.
*/
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 00077a9c97f4..6cfd7388834e 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -81,5 +81,7 @@ extern const struct dsa_device_ops trailer_netdev_ops;
/* tag_brcm.c */
extern const struct dsa_device_ops brcm_netdev_ops;
+/* tag_qca.c */
+extern const struct dsa_device_ops qca_netdev_ops;
#endif
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index fc9196745225..6b1282c006b1 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -28,7 +28,7 @@ static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
struct dsa_switch *ds = bus->priv;
if (ds->phys_mii_mask & (1 << addr))
- return ds->drv->phy_read(ds, addr, reg);
+ return ds->ops->phy_read(ds, addr, reg);
return 0xffff;
}
@@ -38,7 +38,7 @@ static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
struct dsa_switch *ds = bus->priv;
if (ds->phys_mii_mask & (1 << addr))
- return ds->drv->phy_write(ds, addr, reg, val);
+ return ds->ops->phy_write(ds, addr, reg, val);
return 0;
}
@@ -69,6 +69,30 @@ static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p)
return !!p->bridge_dev;
}
+static void dsa_port_set_stp_state(struct dsa_switch *ds, int port, u8 state)
+{
+ struct dsa_port *dp = &ds->ports[port];
+
+ if (ds->ops->port_stp_state_set)
+ ds->ops->port_stp_state_set(ds, port, state);
+
+ if (ds->ops->port_fast_age) {
+ /* Fast age FDB entries or flush appropriate forwarding database
+ * for the given port, if we are moving it from Learning or
+ * Forwarding state, to Disabled or Blocking or Listening state.
+ */
+
+ if ((dp->stp_state == BR_STATE_LEARNING ||
+ dp->stp_state == BR_STATE_FORWARDING) &&
+ (state == BR_STATE_DISABLED ||
+ state == BR_STATE_BLOCKING ||
+ state == BR_STATE_LISTENING))
+ ds->ops->port_fast_age(ds, port);
+ }
+
+ dp->stp_state = state;
+}
+
static int dsa_slave_open(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
@@ -98,14 +122,13 @@ static int dsa_slave_open(struct net_device *dev)
goto clear_allmulti;
}
- if (ds->drv->port_enable) {
- err = ds->drv->port_enable(ds, p->port, p->phy);
+ if (ds->ops->port_enable) {
+ err = ds->ops->port_enable(ds, p->port, p->phy);
if (err)
goto clear_promisc;
}
- if (ds->drv->port_stp_state_set)
- ds->drv->port_stp_state_set(ds, p->port, stp_state);
+ dsa_port_set_stp_state(ds, p->port, stp_state);
if (p->phy)
phy_start(p->phy);
@@ -144,11 +167,10 @@ static int dsa_slave_close(struct net_device *dev)
if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
- if (ds->drv->port_disable)
- ds->drv->port_disable(ds, p->port, p->phy);
+ if (ds->ops->port_disable)
+ ds->ops->port_disable(ds, p->port, p->phy);
- if (ds->drv->port_stp_state_set)
- ds->drv->port_stp_state_set(ds, p->port, BR_STATE_DISABLED);
+ dsa_port_set_stp_state(ds, p->port, BR_STATE_DISABLED);
return 0;
}
@@ -209,13 +231,13 @@ static int dsa_slave_port_vlan_add(struct net_device *dev,
struct dsa_switch *ds = p->parent;
if (switchdev_trans_ph_prepare(trans)) {
- if (!ds->drv->port_vlan_prepare || !ds->drv->port_vlan_add)
+ if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
return -EOPNOTSUPP;
- return ds->drv->port_vlan_prepare(ds, p->port, vlan, trans);
+ return ds->ops->port_vlan_prepare(ds, p->port, vlan, trans);
}
- ds->drv->port_vlan_add(ds, p->port, vlan, trans);
+ ds->ops->port_vlan_add(ds, p->port, vlan, trans);
return 0;
}
@@ -226,10 +248,10 @@ static int dsa_slave_port_vlan_del(struct net_device *dev,
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- if (!ds->drv->port_vlan_del)
+ if (!ds->ops->port_vlan_del)
return -EOPNOTSUPP;
- return ds->drv->port_vlan_del(ds, p->port, vlan);
+ return ds->ops->port_vlan_del(ds, p->port, vlan);
}
static int dsa_slave_port_vlan_dump(struct net_device *dev,
@@ -239,8 +261,8 @@ static int dsa_slave_port_vlan_dump(struct net_device *dev,
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- if (ds->drv->port_vlan_dump)
- return ds->drv->port_vlan_dump(ds, p->port, vlan, cb);
+ if (ds->ops->port_vlan_dump)
+ return ds->ops->port_vlan_dump(ds, p->port, vlan, cb);
return -EOPNOTSUPP;
}
@@ -253,13 +275,13 @@ static int dsa_slave_port_fdb_add(struct net_device *dev,
struct dsa_switch *ds = p->parent;
if (switchdev_trans_ph_prepare(trans)) {
- if (!ds->drv->port_fdb_prepare || !ds->drv->port_fdb_add)
+ if (!ds->ops->port_fdb_prepare || !ds->ops->port_fdb_add)
return -EOPNOTSUPP;
- return ds->drv->port_fdb_prepare(ds, p->port, fdb, trans);
+ return ds->ops->port_fdb_prepare(ds, p->port, fdb, trans);
}
- ds->drv->port_fdb_add(ds, p->port, fdb, trans);
+ ds->ops->port_fdb_add(ds, p->port, fdb, trans);
return 0;
}
@@ -271,8 +293,8 @@ static int dsa_slave_port_fdb_del(struct net_device *dev,
struct dsa_switch *ds = p->parent;
int ret = -EOPNOTSUPP;
- if (ds->drv->port_fdb_del)
- ret = ds->drv->port_fdb_del(ds, p->port, fdb);
+ if (ds->ops->port_fdb_del)
+ ret = ds->ops->port_fdb_del(ds, p->port, fdb);
return ret;
}
@@ -284,8 +306,52 @@ static int dsa_slave_port_fdb_dump(struct net_device *dev,
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- if (ds->drv->port_fdb_dump)
- return ds->drv->port_fdb_dump(ds, p->port, fdb, cb);
+ if (ds->ops->port_fdb_dump)
+ return ds->ops->port_fdb_dump(ds, p->port, fdb, cb);
+
+ return -EOPNOTSUPP;
+}
+
+static int dsa_slave_port_mdb_add(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_mdb_prepare(ds, p->port, mdb, trans);
+ }
+
+ ds->ops->port_mdb_add(ds, p->port, mdb, trans);
+
+ return 0;
+}
+
+static int dsa_slave_port_mdb_del(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ if (ds->ops->port_mdb_del)
+ return ds->ops->port_mdb_del(ds, p->port, mdb);
+
+ return -EOPNOTSUPP;
+}
+
+static int dsa_slave_port_mdb_dump(struct net_device *dev,
+ struct switchdev_obj_port_mdb *mdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ if (ds->ops->port_mdb_dump)
+ return ds->ops->port_mdb_dump(ds, p->port, mdb, cb);
return -EOPNOTSUPP;
}
@@ -308,9 +374,9 @@ static int dsa_slave_stp_state_set(struct net_device *dev,
struct dsa_switch *ds = p->parent;
if (switchdev_trans_ph_prepare(trans))
- return ds->drv->port_stp_state_set ? 0 : -EOPNOTSUPP;
+ return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP;
- ds->drv->port_stp_state_set(ds, p->port, attr->u.stp_state);
+ dsa_port_set_stp_state(ds, p->port, attr->u.stp_state);
return 0;
}
@@ -326,8 +392,8 @@ static int dsa_slave_vlan_filtering(struct net_device *dev,
if (switchdev_trans_ph_prepare(trans))
return 0;
- if (ds->drv->port_vlan_filtering)
- return ds->drv->port_vlan_filtering(ds, p->port,
+ if (ds->ops->port_vlan_filtering)
+ return ds->ops->port_vlan_filtering(ds, p->port,
attr->u.vlan_filtering);
return 0;
@@ -365,8 +431,8 @@ static int dsa_slave_ageing_time(struct net_device *dev,
ds->ports[p->port].ageing_time = ageing_time;
ageing_time = dsa_fastest_ageing_time(ds, ageing_time);
- if (ds->drv->set_ageing_time)
- return ds->drv->set_ageing_time(ds, ageing_time);
+ if (ds->ops->set_ageing_time)
+ return ds->ops->set_ageing_time(ds, ageing_time);
return 0;
}
@@ -412,6 +478,10 @@ static int dsa_slave_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_FDB(obj),
trans);
break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = dsa_slave_port_mdb_add(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
+ trans);
+ break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = dsa_slave_port_vlan_add(dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
@@ -435,6 +505,9 @@ static int dsa_slave_port_obj_del(struct net_device *dev,
err = dsa_slave_port_fdb_del(dev,
SWITCHDEV_OBJ_PORT_FDB(obj));
break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = dsa_slave_port_mdb_del(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = dsa_slave_port_vlan_del(dev,
SWITCHDEV_OBJ_PORT_VLAN(obj));
@@ -459,6 +532,10 @@ static int dsa_slave_port_obj_dump(struct net_device *dev,
SWITCHDEV_OBJ_PORT_FDB(obj),
cb);
break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = dsa_slave_port_mdb_dump(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
+ cb);
+ break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = dsa_slave_port_vlan_dump(dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
@@ -481,8 +558,8 @@ static int dsa_slave_bridge_port_join(struct net_device *dev,
p->bridge_dev = br;
- if (ds->drv->port_bridge_join)
- ret = ds->drv->port_bridge_join(ds, p->port, br);
+ if (ds->ops->port_bridge_join)
+ ret = ds->ops->port_bridge_join(ds, p->port, br);
return ret == -EOPNOTSUPP ? 0 : ret;
}
@@ -493,16 +570,15 @@ static void dsa_slave_bridge_port_leave(struct net_device *dev)
struct dsa_switch *ds = p->parent;
- if (ds->drv->port_bridge_leave)
- ds->drv->port_bridge_leave(ds, p->port);
+ if (ds->ops->port_bridge_leave)
+ ds->ops->port_bridge_leave(ds, p->port);
p->bridge_dev = NULL;
/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
* so allow it to be in BR_STATE_FORWARDING to be kept functional
*/
- if (ds->drv->port_stp_state_set)
- ds->drv->port_stp_state_set(ds, p->port, BR_STATE_FORWARDING);
+ dsa_port_set_stp_state(ds, p->port, BR_STATE_FORWARDING);
}
static int dsa_slave_port_attr_get(struct net_device *dev,
@@ -605,8 +681,8 @@ static int dsa_slave_get_regs_len(struct net_device *dev)
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- if (ds->drv->get_regs_len)
- return ds->drv->get_regs_len(ds, p->port);
+ if (ds->ops->get_regs_len)
+ return ds->ops->get_regs_len(ds, p->port);
return -EOPNOTSUPP;
}
@@ -617,8 +693,8 @@ dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- if (ds->drv->get_regs)
- ds->drv->get_regs(ds, p->port, regs, _p);
+ if (ds->ops->get_regs)
+ ds->ops->get_regs(ds, p->port, regs, _p);
}
static int dsa_slave_nway_reset(struct net_device *dev)
@@ -651,8 +727,8 @@ static int dsa_slave_get_eeprom_len(struct net_device *dev)
if (ds->cd && ds->cd->eeprom_len)
return ds->cd->eeprom_len;
- if (ds->drv->get_eeprom_len)
- return ds->drv->get_eeprom_len(ds);
+ if (ds->ops->get_eeprom_len)
+ return ds->ops->get_eeprom_len(ds);
return 0;
}
@@ -663,8 +739,8 @@ static int dsa_slave_get_eeprom(struct net_device *dev,
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- if (ds->drv->get_eeprom)
- return ds->drv->get_eeprom(ds, eeprom, data);
+ if (ds->ops->get_eeprom)
+ return ds->ops->get_eeprom(ds, eeprom, data);
return -EOPNOTSUPP;
}
@@ -675,8 +751,8 @@ static int dsa_slave_set_eeprom(struct net_device *dev,
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- if (ds->drv->set_eeprom)
- return ds->drv->set_eeprom(ds, eeprom, data);
+ if (ds->ops->set_eeprom)
+ return ds->ops->set_eeprom(ds, eeprom, data);
return -EOPNOTSUPP;
}
@@ -694,8 +770,8 @@ static void dsa_slave_get_strings(struct net_device *dev,
strncpy(data + len, "tx_bytes", len);
strncpy(data + 2 * len, "rx_packets", len);
strncpy(data + 3 * len, "rx_bytes", len);
- if (ds->drv->get_strings != NULL)
- ds->drv->get_strings(ds, p->port, data + 4 * len);
+ if (ds->ops->get_strings)
+ ds->ops->get_strings(ds, p->port, data + 4 * len);
}
}
@@ -714,8 +790,8 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev,
dst->master_ethtool_ops.get_ethtool_stats(dev, stats, data);
}
- if (ds->drv->get_ethtool_stats)
- ds->drv->get_ethtool_stats(ds, cpu_port, data + count);
+ if (ds->ops->get_ethtool_stats)
+ ds->ops->get_ethtool_stats(ds, cpu_port, data + count);
}
static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset)
@@ -727,8 +803,8 @@ static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset)
if (dst->master_ethtool_ops.get_sset_count)
count += dst->master_ethtool_ops.get_sset_count(dev, sset);
- if (sset == ETH_SS_STATS && ds->drv->get_sset_count)
- count += ds->drv->get_sset_count(ds);
+ if (sset == ETH_SS_STATS && ds->ops->get_sset_count)
+ count += ds->ops->get_sset_count(ds);
return count;
}
@@ -755,14 +831,14 @@ static void dsa_cpu_port_get_strings(struct net_device *dev,
dst->master_ethtool_ops.get_strings(dev, stringset, data);
}
- if (stringset == ETH_SS_STATS && ds->drv->get_strings) {
+ if (stringset == ETH_SS_STATS && ds->ops->get_strings) {
ndata = data + mcount * len;
/* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
* the output after to prepend our CPU port prefix we
* constructed earlier
*/
- ds->drv->get_strings(ds, cpu_port, ndata);
- count = ds->drv->get_sset_count(ds);
+ ds->ops->get_strings(ds, cpu_port, ndata);
+ count = ds->ops->get_sset_count(ds);
for (i = 0; i < count; i++) {
memmove(ndata + (i * len + sizeof(pfx)),
ndata + i * len, len - sizeof(pfx));
@@ -782,8 +858,8 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
data[1] = dev->stats.tx_bytes;
data[2] = dev->stats.rx_packets;
data[3] = dev->stats.rx_bytes;
- if (ds->drv->get_ethtool_stats != NULL)
- ds->drv->get_ethtool_stats(ds, p->port, data + 4);
+ if (ds->ops->get_ethtool_stats)
+ ds->ops->get_ethtool_stats(ds, p->port, data + 4);
}
static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
@@ -795,8 +871,8 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
int count;
count = 4;
- if (ds->drv->get_sset_count != NULL)
- count += ds->drv->get_sset_count(ds);
+ if (ds->ops->get_sset_count)
+ count += ds->ops->get_sset_count(ds);
return count;
}
@@ -809,8 +885,8 @@ static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- if (ds->drv->get_wol)
- ds->drv->get_wol(ds, p->port, w);
+ if (ds->ops->get_wol)
+ ds->ops->get_wol(ds, p->port, w);
}
static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
@@ -819,8 +895,8 @@ static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
struct dsa_switch *ds = p->parent;
int ret = -EOPNOTSUPP;
- if (ds->drv->set_wol)
- ret = ds->drv->set_wol(ds, p->port, w);
+ if (ds->ops->set_wol)
+ ret = ds->ops->set_wol(ds, p->port, w);
return ret;
}
@@ -831,10 +907,10 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
struct dsa_switch *ds = p->parent;
int ret;
- if (!ds->drv->set_eee)
+ if (!ds->ops->set_eee)
return -EOPNOTSUPP;
- ret = ds->drv->set_eee(ds, p->port, p->phy, e);
+ ret = ds->ops->set_eee(ds, p->port, p->phy, e);
if (ret)
return ret;
@@ -850,10 +926,10 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
struct dsa_switch *ds = p->parent;
int ret;
- if (!ds->drv->get_eee)
+ if (!ds->ops->get_eee)
return -EOPNOTSUPP;
- ret = ds->drv->get_eee(ds, p->port, e);
+ ret = ds->ops->get_eee(ds, p->port, e);
if (ret)
return ret;
@@ -988,8 +1064,8 @@ static void dsa_slave_adjust_link(struct net_device *dev)
p->old_pause = p->phy->pause;
}
- if (ds->drv->adjust_link && status_changed)
- ds->drv->adjust_link(ds, p->port, p->phy);
+ if (ds->ops->adjust_link && status_changed)
+ ds->ops->adjust_link(ds, p->port, p->phy);
if (status_changed)
phy_print_status(p->phy);
@@ -1004,8 +1080,8 @@ static int dsa_slave_fixed_link_update(struct net_device *dev,
if (dev) {
p = netdev_priv(dev);
ds = p->parent;
- if (ds->drv->fixed_link_update)
- ds->drv->fixed_link_update(ds, p->port, status);
+ if (ds->ops->fixed_link_update)
+ ds->ops->fixed_link_update(ds, p->port, status);
}
return 0;
@@ -1062,8 +1138,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
phy_dn = port_dn;
}
- if (ds->drv->get_phy_flags)
- phy_flags = ds->drv->get_phy_flags(ds, p->port);
+ if (ds->ops->get_phy_flags)
+ phy_flags = ds->ops->get_phy_flags(ds, p->port);
if (phy_dn) {
int phy_id = of_mdio_parse_addr(&slave_dev->dev, phy_dn);
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
new file mode 100644
index 000000000000..0c90cacee7aa
--- /dev/null
+++ b/net/dsa/tag_qca.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/etherdevice.h>
+#include "dsa_priv.h"
+
+#define QCA_HDR_LEN 2
+#define QCA_HDR_VERSION 0x2
+
+#define QCA_HDR_RECV_VERSION_MASK GENMASK(15, 14)
+#define QCA_HDR_RECV_VERSION_S 14
+#define QCA_HDR_RECV_PRIORITY_MASK GENMASK(13, 11)
+#define QCA_HDR_RECV_PRIORITY_S 11
+#define QCA_HDR_RECV_TYPE_MASK GENMASK(10, 6)
+#define QCA_HDR_RECV_TYPE_S 6
+#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3)
+#define QCA_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0)
+
+#define QCA_HDR_XMIT_VERSION_MASK GENMASK(15, 14)
+#define QCA_HDR_XMIT_VERSION_S 14
+#define QCA_HDR_XMIT_PRIORITY_MASK GENMASK(13, 11)
+#define QCA_HDR_XMIT_PRIORITY_S 11
+#define QCA_HDR_XMIT_CONTROL_MASK GENMASK(10, 8)
+#define QCA_HDR_XMIT_CONTROL_S 8
+#define QCA_HDR_XMIT_FROM_CPU BIT(7)
+#define QCA_HDR_XMIT_DP_BIT_MASK GENMASK(6, 0)
+
+static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ u16 *phdr, hdr;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ if (skb_cow_head(skb, 0) < 0)
+ goto out_free;
+
+ skb_push(skb, QCA_HDR_LEN);
+
+ memmove(skb->data, skb->data + QCA_HDR_LEN, 2 * ETH_ALEN);
+ phdr = (u16 *)(skb->data + 2 * ETH_ALEN);
+
+ /* Set the version field, and set destination port information */
+ hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S |
+ QCA_HDR_XMIT_FROM_CPU |
+ BIT(p->port);
+
+ *phdr = htons(hdr);
+
+ return skb;
+
+out_free:
+ kfree_skb(skb);
+ return NULL;
+}
+
+static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct dsa_switch_tree *dst = dev->dsa_ptr;
+ struct dsa_switch *ds;
+ u8 ver;
+ int port;
+ __be16 *phdr, hdr;
+
+ if (unlikely(!dst))
+ goto out_drop;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN)))
+ goto out_drop;
+
+ /* The QCA header is added by the switch between src addr and Ethertype
+ * At this point, skb->data points to ethertype so header should be
+ * right before
+ */
+ phdr = (__be16 *)(skb->data - 2);
+ hdr = ntohs(*phdr);
+
+ /* Make sure the version is correct */
+ ver = (hdr & QCA_HDR_RECV_VERSION_MASK) >> QCA_HDR_RECV_VERSION_S;
+ if (unlikely(ver != QCA_HDR_VERSION))
+ goto out_drop;
+
+ /* Remove QCA tag and recalculate checksum */
+ skb_pull_rcsum(skb, QCA_HDR_LEN);
+ memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - QCA_HDR_LEN,
+ ETH_HLEN - QCA_HDR_LEN);
+
+ /* This protocol doesn't support cascading multiple switches so it's
+ * safe to assume the switch is first in the tree
+ */
+ ds = dst->ds[0];
+ if (!ds)
+ goto out_drop;
+
+ /* Get source port information */
+ port = (hdr & QCA_HDR_RECV_SOURCE_PORT_MASK);
+ if (!ds->ports[port].netdev)
+ goto out_drop;
+
+ /* Update skb & forward the frame accordingly */
+ skb_push(skb, ETH_HLEN);
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = ds->ports[port].netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ skb->dev->stats.rx_packets++;
+ skb->dev->stats.rx_bytes += skb->len;
+
+ netif_receive_skb(skb);
+
+ return 0;
+
+out_drop:
+ kfree_skb(skb);
+out:
+ return 0;
+}
+
+const struct dsa_device_ops qca_netdev_ops = {
+ .xmit = qca_tag_xmit,
+ .rcv = qca_tag_rcv,
+};
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 50d6a9b49f6c..300b06888fdf 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -640,6 +640,21 @@ config TCP_CONG_CDG
D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using
delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg
+config TCP_CONG_BBR
+ tristate "BBR TCP"
+ default n
+ ---help---
+
+ BBR (Bottleneck Bandwidth and RTT) TCP congestion control aims to
+ maximize network utilization and minimize queues. It builds an explicit
+ model of the the bottleneck delivery rate and path round-trip
+ propagation delay. It tolerates packet loss and delay unrelated to
+ congestion. It can operate over LAN, WAN, cellular, wifi, or cable
+ modem links. It can coexist with flows that use loss-based congestion
+ control, and can operate with shallow buffers, deep buffers,
+ bufferbloat, policers, or AQM schemes that do not provide a delay
+ signal. It requires the fq ("Fair Queue") pacing packet scheduler.
+
choice
prompt "Default TCP congestion control"
default DEFAULT_CUBIC
@@ -674,6 +689,9 @@ choice
config DEFAULT_CDG
bool "CDG" if TCP_CONG_CDG=y
+ config DEFAULT_BBR
+ bool "BBR" if TCP_CONG_BBR=y
+
config DEFAULT_RENO
bool "Reno"
endchoice
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 24629b6f57cc..bc6a6c8b9bcd 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -8,7 +8,7 @@ obj-y := route.o inetpeer.o protocol.o \
inet_timewait_sock.o inet_connection_sock.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
- tcp_recovery.o \
+ tcp_rate.o tcp_recovery.o \
tcp_offload.o datagram.o raw.o udp.o udplite.o \
udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
fib_frontend.o fib_semantics.o fib_trie.o \
@@ -41,6 +41,7 @@ obj-$(CONFIG_INET_DIAG) += inet_diag.o
obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
+obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o
obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 55513e654d79..1effc986739e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -211,24 +211,19 @@ int inet_listen(struct socket *sock, int backlog)
* we can only allow the backlog to be adjusted.
*/
if (old_state != TCP_LISTEN) {
- /* Check special setups for testing purpose to enable TFO w/o
- * requiring TCP_FASTOPEN sockopt.
+ /* Enable TFO w/o requiring TCP_FASTOPEN socket option.
* Note that only TCP sockets (SOCK_STREAM) will reach here.
- * Also fastopenq may already been allocated because this
- * socket was in TCP_LISTEN state previously but was
- * shutdown() (rather than close()).
+ * Also fastopen backlog may already been set via the option
+ * because the socket was in TCP_LISTEN state previously but
+ * was shutdown() rather than close().
*/
- if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
+ if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
+ (sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
!inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
- if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
- fastopen_queue_tune(sk, backlog);
- else if ((sysctl_tcp_fastopen &
- TFO_SERVER_WO_SOCKOPT2) != 0)
- fastopen_queue_tune(sk,
- ((uint)sysctl_tcp_fastopen) >> 16);
-
+ fastopen_queue_tune(sk, backlog);
tcp_fastopen_init_key_once(true);
}
+
err = inet_csk_listen_start(sk, backlog);
if (err)
goto out;
@@ -921,6 +916,8 @@ const struct proto_ops inet_stream_ops = {
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
.splice_read = tcp_splice_read,
+ .read_sock = tcp_read_sock,
+ .peek_len = tcp_peek_len,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
@@ -1195,7 +1192,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
- bool udpfrag = false, fixedid = false, encap;
+ bool udpfrag = false, fixedid = false, gso_partial, encap;
struct sk_buff *segs = ERR_PTR(-EINVAL);
const struct net_offload *ops;
unsigned int offset = 0;
@@ -1248,6 +1245,8 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
if (IS_ERR_OR_NULL(segs))
goto out;
+ gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
skb = segs;
do {
iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
@@ -1262,9 +1261,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
iph->id = htons(id);
id += skb_shinfo(skb)->gso_segs;
}
- tot_len = skb_shinfo(skb)->gso_size +
- SKB_GSO_CB(skb)->data_offset +
- skb->head - (unsigned char *)iph;
+
+ if (gso_partial)
+ tot_len = skb_shinfo(skb)->gso_size +
+ SKB_GSO_CB(skb)->data_offset +
+ skb->head - (unsigned char *)iph;
+ else
+ tot_len = skb->len - nhoff;
} else {
if (!fixedid)
iph->id = htons(id++);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 1b25daf8c7f1..c3b80478226e 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -93,9 +93,6 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
return NULL;
switch (id) {
- case RT_TABLE_LOCAL:
- rcu_assign_pointer(net->ipv4.fib_local, tb);
- break;
case RT_TABLE_MAIN:
rcu_assign_pointer(net->ipv4.fib_main, tb);
break;
@@ -137,9 +134,6 @@ static void fib_replace_table(struct net *net, struct fib_table *old,
{
#ifdef CONFIG_IP_MULTIPLE_TABLES
switch (new->tb_id) {
- case RT_TABLE_LOCAL:
- rcu_assign_pointer(net->ipv4.fib_local, new);
- break;
case RT_TABLE_MAIN:
rcu_assign_pointer(net->ipv4.fib_main, new);
break;
@@ -188,26 +182,13 @@ static void fib_flush(struct net *net)
struct fib_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
- flushed += fib_table_flush(tb);
+ flushed += fib_table_flush(net, tb);
}
if (flushed)
rt_cache_flush(net);
}
-void fib_flush_external(struct net *net)
-{
- struct fib_table *tb;
- struct hlist_head *head;
- unsigned int h;
-
- for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
- head = &net->ipv4.fib_table_hash[h];
- hlist_for_each_entry(tb, head, tb_hlist)
- fib_table_flush_external(tb);
- }
-}
-
/*
* Find address type as if only "dev" was present in the system. If
* on_dev is NULL then all interfaces are taken into consideration.
@@ -596,13 +577,13 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (cmd == SIOCDELRT) {
tb = fib_get_table(net, cfg.fc_table);
if (tb)
- err = fib_table_delete(tb, &cfg);
+ err = fib_table_delete(net, tb, &cfg);
else
err = -ESRCH;
} else {
tb = fib_new_table(net, cfg.fc_table);
if (tb)
- err = fib_table_insert(tb, &cfg);
+ err = fib_table_insert(net, tb, &cfg);
else
err = -ENOBUFS;
}
@@ -725,7 +706,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
goto errout;
}
- err = fib_table_delete(tb, &cfg);
+ err = fib_table_delete(net, tb, &cfg);
errout:
return err;
}
@@ -747,7 +728,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
goto errout;
}
- err = fib_table_insert(tb, &cfg);
+ err = fib_table_insert(net, tb, &cfg);
errout:
return err;
}
@@ -834,9 +815,9 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad
cfg.fc_scope = RT_SCOPE_HOST;
if (cmd == RTM_NEWROUTE)
- fib_table_insert(tb, &cfg);
+ fib_table_insert(net, tb, &cfg);
else
- fib_table_delete(tb, &cfg);
+ fib_table_delete(net, tb, &cfg);
}
void fib_add_ifaddr(struct in_ifaddr *ifa)
@@ -1250,7 +1231,6 @@ static void ip_fib_net_exit(struct net *net)
rtnl_lock();
#ifdef CONFIG_IP_MULTIPLE_TABLES
- RCU_INIT_POINTER(net->ipv4.fib_local, NULL);
RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
#endif
@@ -1261,7 +1241,7 @@ static void ip_fib_net_exit(struct net *net)
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
hlist_del(&tb->tb_hlist);
- fib_table_flush(tb);
+ fib_table_flush(net, tb);
fib_free_table(tb);
}
}
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 6e9ea69e5f75..2e50062f642d 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -56,6 +56,9 @@ int __fib_lookup(struct net *net, struct flowi4 *flp,
};
int err;
+ /* update flow if oif or iif point to device enslaved to l3mdev */
+ l3mdev_update_flow(net, flowi4_to_flowi(flp));
+
err = fib_rules_lookup(net->ipv4.rules_ops, flowi4_to_flowi(flp), 0, &arg);
#ifdef CONFIG_IP_ROUTE_CLASSID
if (arg.rule)
@@ -161,6 +164,14 @@ static struct fib_table *fib_empty_table(struct net *net)
return NULL;
}
+static int call_fib_rule_notifiers(struct net *net,
+ enum fib_event_type event_type)
+{
+ struct fib_notifier_info info;
+
+ return call_fib_notifiers(net, event_type, &info);
+}
+
static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = {
FRA_GENERIC_POLICY,
[FRA_FLOW] = { .type = NLA_U32 },
@@ -217,7 +228,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
rule4->tos = frh->tos;
net->ipv4.fib_has_custom_rules = true;
- fib_flush_external(rule->fr_net);
+ call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD);
err = 0;
errout:
@@ -239,7 +250,7 @@ static int fib4_rule_delete(struct fib_rule *rule)
net->ipv4.fib_num_tclassid_users--;
#endif
net->ipv4.fib_has_custom_rules = true;
- fib_flush_external(rule->fr_net);
+ call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL);
errout:
return err;
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e9f56225e53f..388d3e21629b 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1580,7 +1580,8 @@ static bool fib_good_nh(const struct fib_nh *nh)
rcu_read_lock_bh();
- n = __ipv4_neigh_lookup_noref(nh->nh_dev, nh->nh_gw);
+ n = __ipv4_neigh_lookup_noref(nh->nh_dev,
+ (__force u32)nh->nh_gw);
if (n)
state = n->nud_state;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index e2ffc2a5c7db..31cef3602585 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -73,6 +73,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
+#include <linux/notifier.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/protocol.h>
@@ -80,10 +81,47 @@
#include <net/tcp.h>
#include <net/sock.h>
#include <net/ip_fib.h>
-#include <net/switchdev.h>
#include <trace/events/fib.h>
#include "fib_lookup.h"
+static BLOCKING_NOTIFIER_HEAD(fib_chain);
+
+int register_fib_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&fib_chain, nb);
+}
+EXPORT_SYMBOL(register_fib_notifier);
+
+int unregister_fib_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&fib_chain, nb);
+}
+EXPORT_SYMBOL(unregister_fib_notifier);
+
+int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info)
+{
+ info->net = net;
+ return blocking_notifier_call_chain(&fib_chain, event_type, info);
+}
+
+static int call_fib_entry_notifiers(struct net *net,
+ enum fib_event_type event_type, u32 dst,
+ int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id, u32 nlflags)
+{
+ struct fib_entry_notifier_info info = {
+ .dst = dst,
+ .dst_len = dst_len,
+ .fi = fi,
+ .tos = tos,
+ .type = type,
+ .tb_id = tb_id,
+ .nlflags = nlflags,
+ };
+ return call_fib_notifiers(net, event_type, &info.info);
+}
+
#define MAX_STAT_DEPTH 32
#define KEYLENGTH (8*sizeof(t_key))
@@ -1076,12 +1114,13 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp,
}
/* Caller must hold RTNL. */
-int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
+int fib_table_insert(struct net *net, struct fib_table *tb,
+ struct fib_config *cfg)
{
struct trie *t = (struct trie *)tb->tb_data;
struct fib_alias *fa, *new_fa;
struct key_vector *l, *tp;
- unsigned int nlflags = 0;
+ u16 nlflags = NLM_F_EXCL;
struct fib_info *fi;
u8 plen = cfg->fc_dst_len;
u8 slen = KEYLENGTH - plen;
@@ -1126,6 +1165,8 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
if (cfg->fc_nlflags & NLM_F_EXCL)
goto out;
+ nlflags &= ~NLM_F_EXCL;
+
/* We have 2 goals:
* 1. Find exact match for type, scope, fib_info to avoid
* duplicate routes
@@ -1151,6 +1192,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
struct fib_info *fi_drop;
u8 state;
+ nlflags |= NLM_F_REPLACE;
fa = fa_first;
if (fa_match) {
if (fa == fa_match)
@@ -1172,17 +1214,6 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
new_fa->tb_id = tb->tb_id;
new_fa->fa_default = -1;
- err = switchdev_fib_ipv4_add(key, plen, fi,
- new_fa->fa_tos,
- cfg->fc_type,
- cfg->fc_nlflags,
- tb->tb_id);
- if (err) {
- switchdev_fib_ipv4_abort(fi);
- kmem_cache_free(fn_alias_kmem, new_fa);
- goto out;
- }
-
hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list);
alias_free_mem_rcu(fa);
@@ -1190,8 +1221,13 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
fib_release_info(fi_drop);
if (state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
+
+ call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
+ key, plen, fi,
+ new_fa->fa_tos, cfg->fc_type,
+ tb->tb_id, cfg->fc_nlflags);
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
- tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
+ tb->tb_id, &cfg->fc_nlinfo, nlflags);
goto succeeded;
}
@@ -1203,7 +1239,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
goto out;
if (cfg->fc_nlflags & NLM_F_APPEND)
- nlflags = NLM_F_APPEND;
+ nlflags |= NLM_F_APPEND;
else
fa = fa_first;
}
@@ -1211,6 +1247,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
if (!(cfg->fc_nlflags & NLM_F_CREATE))
goto out;
+ nlflags |= NLM_F_CREATE;
err = -ENOBUFS;
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (!new_fa)
@@ -1224,30 +1261,22 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
new_fa->tb_id = tb->tb_id;
new_fa->fa_default = -1;
- /* (Optionally) offload fib entry to switch hardware. */
- err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
- cfg->fc_nlflags, tb->tb_id);
- if (err) {
- switchdev_fib_ipv4_abort(fi);
- goto out_free_new_fa;
- }
-
/* Insert new entry to the list. */
err = fib_insert_alias(t, tp, l, new_fa, fa, key);
if (err)
- goto out_sw_fib_del;
+ goto out_free_new_fa;
if (!plen)
tb->tb_num_default++;
rt_cache_flush(cfg->fc_nlinfo.nl_net);
+ call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, key, plen, fi, tos,
+ cfg->fc_type, tb->tb_id, cfg->fc_nlflags);
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
&cfg->fc_nlinfo, nlflags);
succeeded:
return 0;
-out_sw_fib_del:
- switchdev_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
out_free_new_fa:
kmem_cache_free(fn_alias_kmem, new_fa);
out:
@@ -1486,7 +1515,8 @@ static void fib_remove_alias(struct trie *t, struct key_vector *tp,
}
/* Caller must hold RTNL. */
-int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
+int fib_table_delete(struct net *net, struct fib_table *tb,
+ struct fib_config *cfg)
{
struct trie *t = (struct trie *) tb->tb_data;
struct fib_alias *fa, *fa_to_delete;
@@ -1539,9 +1569,9 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
if (!fa_to_delete)
return -ESRCH;
- switchdev_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos,
- cfg->fc_type, tb->tb_id);
-
+ call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, key, plen,
+ fa_to_delete->fa_info, tos, cfg->fc_type,
+ tb->tb_id, 0);
rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
&cfg->fc_nlinfo, 0);
@@ -1730,82 +1760,8 @@ out:
return NULL;
}
-/* Caller must hold RTNL */
-void fib_table_flush_external(struct fib_table *tb)
-{
- struct trie *t = (struct trie *)tb->tb_data;
- struct key_vector *pn = t->kv;
- unsigned long cindex = 1;
- struct hlist_node *tmp;
- struct fib_alias *fa;
-
- /* walk trie in reverse order */
- for (;;) {
- unsigned char slen = 0;
- struct key_vector *n;
-
- if (!(cindex--)) {
- t_key pkey = pn->key;
-
- /* cannot resize the trie vector */
- if (IS_TRIE(pn))
- break;
-
- /* resize completed node */
- pn = resize(t, pn);
- cindex = get_index(pkey, pn);
-
- continue;
- }
-
- /* grab the next available node */
- n = get_child(pn, cindex);
- if (!n)
- continue;
-
- if (IS_TNODE(n)) {
- /* record pn and cindex for leaf walking */
- pn = n;
- cindex = 1ul << n->bits;
-
- continue;
- }
-
- hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
- struct fib_info *fi = fa->fa_info;
-
- /* if alias was cloned to local then we just
- * need to remove the local copy from main
- */
- if (tb->tb_id != fa->tb_id) {
- hlist_del_rcu(&fa->fa_list);
- alias_free_mem_rcu(fa);
- continue;
- }
-
- /* record local slen */
- slen = fa->fa_slen;
-
- if (!fi || !(fi->fib_flags & RTNH_F_OFFLOAD))
- continue;
-
- switchdev_fib_ipv4_del(n->key, KEYLENGTH - fa->fa_slen,
- fi, fa->fa_tos, fa->fa_type,
- tb->tb_id);
- }
-
- /* update leaf slen */
- n->slen = slen;
-
- if (hlist_empty(&n->leaf)) {
- put_child_root(pn, n->key, NULL);
- node_free(n);
- }
- }
-}
-
/* Caller must hold RTNL. */
-int fib_table_flush(struct fib_table *tb)
+int fib_table_flush(struct net *net, struct fib_table *tb)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
@@ -1854,9 +1810,11 @@ int fib_table_flush(struct fib_table *tb)
continue;
}
- switchdev_fib_ipv4_del(n->key, KEYLENGTH - fa->fa_slen,
- fi, fa->fa_tos, fa->fa_type,
- tb->tb_id);
+ call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_DEL,
+ n->key,
+ KEYLENGTH - fa->fa_slen,
+ fi, fa->fa_tos, fa->fa_type,
+ tb->tb_id, 0);
hlist_del_rcu(&fa->fa_list);
fib_release_info(fa->fa_info);
alias_free_mem_rcu(fa);
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 321d57f825ce..cf50f7e2b012 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -631,7 +631,7 @@ static struct genl_family fou_nl_family = {
.netnsok = true,
};
-static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
+static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
[FOU_ATTR_PORT] = { .type = NLA_U16, },
[FOU_ATTR_AF] = { .type = NLA_U8, },
[FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index ecd1e09dbbf1..96e0efecefa6 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -24,7 +24,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
__be16 protocol = skb->protocol;
u16 mac_len = skb->mac_len;
int gre_offset, outer_hlen;
- bool need_csum, ufo;
+ bool need_csum, ufo, gso_partial;
if (!skb->encapsulation)
goto out;
@@ -69,6 +69,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
goto out;
}
+ gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
outer_hlen = skb_tnl_header_len(skb);
gre_offset = outer_hlen - tnl_hlen;
skb = segs;
@@ -96,7 +98,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
greh = (struct gre_base_hdr *)skb_transport_header(skb);
pcsum = (__sum16 *)(greh + 1);
- if (skb_is_gso(skb)) {
+ if (gso_partial) {
unsigned int partial_adj;
/* Adjust checksum to account for the fact that
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 9b4ca87f70ba..606cc3e85d2b 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -472,6 +472,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
continue;
}
+ /* Based on RFC3376 5.1. Should not send source-list change
+ * records when there is a filter mode change.
+ */
+ if (((gdeleted && pmc->sfmode == MCAST_EXCLUDE) ||
+ (!gdeleted && pmc->crcount)) &&
+ (type == IGMPV3_ALLOW_NEW_SOURCES ||
+ type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount)
+ goto decrease_sf_crcount;
+
/* clear marks on query responses */
if (isquery)
psf->sf_gsresp = 0;
@@ -499,6 +508,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
scount++; stotal++;
if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
+decrease_sf_crcount:
psf->sf_crcount--;
if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
if (psf_prev)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 38c2c47fe0e8..e4d16fc5bbb3 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -45,6 +45,7 @@ struct inet_diag_entry {
u16 family;
u16 userlocks;
u32 ifindex;
+ u32 mark;
};
static DEFINE_MUTEX(inet_diag_table_mutex);
@@ -98,6 +99,7 @@ static size_t inet_sk_attr_size(void)
+ nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+ nla_total_size(1) /* INET_DIAG_TOS */
+ nla_total_size(1) /* INET_DIAG_TCLASS */
+ + nla_total_size(4) /* INET_DIAG_MARK */
+ nla_total_size(sizeof(struct inet_diag_meminfo))
+ nla_total_size(sizeof(struct inet_diag_msg))
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
@@ -108,7 +110,8 @@ static size_t inet_sk_attr_size(void)
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
- struct user_namespace *user_ns)
+ struct user_namespace *user_ns,
+ bool net_admin)
{
const struct inet_sock *inet = inet_sk(sk);
@@ -135,6 +138,9 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
}
#endif
+ if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
+ goto errout;
+
r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
r->idiag_inode = sock_i_ino(sk);
@@ -148,7 +154,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ const struct nlmsghdr *unlh,
+ bool net_admin)
{
const struct tcp_congestion_ops *ca_ops;
const struct inet_diag_handler *handler;
@@ -174,7 +181,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->idiag_timer = 0;
r->idiag_retrans = 0;
- if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns))
+ if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
goto errout;
if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
@@ -273,10 +280,11 @@ static int inet_csk_diag_fill(struct sock *sk,
const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ const struct nlmsghdr *unlh,
+ bool net_admin)
{
- return inet_sk_diag_fill(sk, inet_csk(sk), skb, req,
- user_ns, portid, seq, nlmsg_flags, unlh);
+ return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns,
+ portid, seq, nlmsg_flags, unlh, net_admin);
}
static int inet_twsk_diag_fill(struct sock *sk,
@@ -318,8 +326,9 @@ static int inet_twsk_diag_fill(struct sock *sk,
static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ const struct nlmsghdr *unlh, bool net_admin)
{
+ struct request_sock *reqsk = inet_reqsk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
@@ -333,7 +342,7 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
inet_diag_msg_common_fill(r, sk);
r->idiag_state = TCP_SYN_RECV;
r->idiag_timer = 1;
- r->idiag_retrans = inet_reqsk(sk)->num_retrans;
+ r->idiag_retrans = reqsk->num_retrans;
BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
offsetof(struct sock, sk_cookie));
@@ -345,6 +354,10 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
r->idiag_uid = 0;
r->idiag_inode = 0;
+ if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
+ inet_rsk(reqsk)->ir_mark))
+ return -EMSGSIZE;
+
nlmsg_end(skb, nlh);
return 0;
}
@@ -353,7 +366,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
const struct inet_diag_req_v2 *r,
struct user_namespace *user_ns,
u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ const struct nlmsghdr *unlh, bool net_admin)
{
if (sk->sk_state == TCP_TIME_WAIT)
return inet_twsk_diag_fill(sk, skb, portid, seq,
@@ -361,10 +374,10 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
if (sk->sk_state == TCP_NEW_SYN_RECV)
return inet_req_diag_fill(sk, skb, portid, seq,
- nlmsg_flags, unlh);
+ nlmsg_flags, unlh, net_admin);
return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
- nlmsg_flags, unlh);
+ nlmsg_flags, unlh, net_admin);
}
struct sock *inet_diag_find_one_icsk(struct net *net,
@@ -434,7 +447,8 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
err = sk_diag_fill(sk, rep, req,
sk_user_ns(NETLINK_CB(in_skb).sk),
NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh);
+ nlh->nlmsg_seq, 0, nlh,
+ netlink_net_capable(in_skb, CAP_NET_ADMIN));
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
nlmsg_free(rep);
@@ -580,6 +594,14 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
yes = 0;
break;
}
+ case INET_DIAG_BC_MARK_COND: {
+ struct inet_diag_markcond *cond;
+
+ cond = (struct inet_diag_markcond *)(op + 1);
+ if ((entry->mark & cond->mask) != cond->mark)
+ yes = 0;
+ break;
+ }
}
if (yes) {
@@ -624,6 +646,12 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
entry.dport = ntohs(inet->inet_dport);
entry.ifindex = sk->sk_bound_dev_if;
entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
+ if (sk_fullsock(sk))
+ entry.mark = sk->sk_mark;
+ else if (sk->sk_state == TCP_NEW_SYN_RECV)
+ entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
+ else
+ entry.mark = 0;
return inet_diag_bc_run(bc, &entry);
}
@@ -706,10 +734,25 @@ static bool valid_port_comparison(const struct inet_diag_bc_op *op,
return true;
}
-static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
+static bool valid_markcond(const struct inet_diag_bc_op *op, int len,
+ int *min_len)
+{
+ *min_len += sizeof(struct inet_diag_markcond);
+ return len >= *min_len;
+}
+
+static int inet_diag_bc_audit(const struct nlattr *attr,
+ const struct sk_buff *skb)
{
- const void *bc = bytecode;
- int len = bytecode_len;
+ bool net_admin = netlink_net_capable(skb, CAP_NET_ADMIN);
+ const void *bytecode, *bc;
+ int bytecode_len, len;
+
+ if (!attr || nla_len(attr) < sizeof(struct inet_diag_bc_op))
+ return -EINVAL;
+
+ bytecode = bc = nla_data(attr);
+ len = bytecode_len = nla_len(attr);
while (len > 0) {
int min_len = sizeof(struct inet_diag_bc_op);
@@ -732,6 +775,12 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
if (!valid_port_comparison(bc, len, &min_len))
return -EINVAL;
break;
+ case INET_DIAG_BC_MARK_COND:
+ if (!net_admin)
+ return -EPERM;
+ if (!valid_markcond(bc, len, &min_len))
+ return -EINVAL;
+ break;
case INET_DIAG_BC_AUTO:
case INET_DIAG_BC_JMP:
case INET_DIAG_BC_NOP:
@@ -760,7 +809,8 @@ static int inet_csk_diag_dump(struct sock *sk,
struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
- const struct nlattr *bc)
+ const struct nlattr *bc,
+ bool net_admin)
{
if (!inet_diag_bc_sk(bc, sk))
return 0;
@@ -768,7 +818,8 @@ static int inet_csk_diag_dump(struct sock *sk,
return inet_csk_diag_fill(sk, skb, r,
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh,
+ net_admin);
}
static void twsk_build_assert(void)
@@ -804,6 +855,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
struct net *net = sock_net(skb->sk);
int i, num, s_i, s_num;
u32 idiag_states = r->idiag_states;
+ bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
if (idiag_states & TCPF_SYN_RECV)
idiag_states |= TCPF_NEW_SYN_RECV;
@@ -844,7 +896,8 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
cb->args[3] > 0)
goto next_listen;
- if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
+ if (inet_csk_diag_dump(sk, skb, cb, r,
+ bc, net_admin) < 0) {
spin_unlock_bh(&ilb->lock);
goto done;
}
@@ -912,7 +965,7 @@ skip_listen_ht:
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- cb->nlh);
+ cb->nlh, net_admin);
if (res < 0) {
spin_unlock_bh(lock);
goto done;
@@ -1020,13 +1073,13 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
if (nlh->nlmsg_flags & NLM_F_DUMP) {
if (nlmsg_attrlen(nlh, hdrlen)) {
struct nlattr *attr;
+ int err;
attr = nlmsg_find_attr(nlh, hdrlen,
INET_DIAG_REQ_BYTECODE);
- if (!attr ||
- nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
- inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
- return -EINVAL;
+ err = inet_diag_bc_audit(attr, skb);
+ if (err)
+ return err;
}
{
struct netlink_dump_control c = {
@@ -1051,13 +1104,13 @@ static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h)
h->nlmsg_flags & NLM_F_DUMP) {
if (nlmsg_attrlen(h, hdrlen)) {
struct nlattr *attr;
+ int err;
attr = nlmsg_find_attr(h, hdrlen,
INET_DIAG_REQ_BYTECODE);
- if (!attr ||
- nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
- inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
- return -EINVAL;
+ err = inet_diag_bc_audit(attr, skb);
+ if (err)
+ return err;
}
{
struct netlink_dump_control c = {
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 113cc43df789..576f705d8180 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -246,25 +246,6 @@ static void gre_err(struct sk_buff *skb, u32 info)
ipgre_err(skb, info, &tpi);
}
-static __be64 key_to_tunnel_id(__be32 key)
-{
-#ifdef __BIG_ENDIAN
- return (__force __be64)((__force u32)key);
-#else
- return (__force __be64)((__force u64)key << 32);
-#endif
-}
-
-/* Returns the least-significant 32 bits of a __be64. */
-static __be32 tunnel_id_to_key(__be64 x)
-{
-#ifdef __BIG_ENDIAN
- return (__force __be32)x;
-#else
- return (__force __be32)((__force u64)x >> 32);
-#endif
-}
-
static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
{
@@ -290,7 +271,7 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
__be64 tun_id;
flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
- tun_id = key_to_tunnel_id(tpi->key);
+ tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
if (!tun_dst)
return PACKET_REJECT;
@@ -446,7 +427,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
gre_build_header(skb, tunnel_hlen, flags, proto,
- tunnel_id_to_key(tun_info->key.tun_id), 0);
+ tunnel_id_to_key32(tun_info->key.tun_id), 0);
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index dde37fb340bf..05d105832bdb 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -73,6 +73,7 @@
#include <net/icmp.h>
#include <net/checksum.h>
#include <net/inetpeer.h>
+#include <net/lwtunnel.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
@@ -98,6 +99,14 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
iph->tot_len = htons(skb->len);
ip_send_check(iph);
+
+ /* if egress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip_out(sk, skb);
+ if (unlikely(!skb))
+ return 0;
+
return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
net, sk, skb, NULL, skb_dst(skb)->dev,
dst_output);
@@ -197,6 +206,13 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
skb = skb2;
}
+ if (lwtunnel_xmit_redirect(dst->lwtstate)) {
+ int res = lwtunnel_xmit(skb);
+
+ if (res < 0 || res == LWTUNNEL_XMIT_DONE)
+ return res;
+ }
+
rcu_read_lock_bh();
nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
@@ -482,7 +498,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
-#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+#if IS_ENABLED(CONFIG_IP_VS)
to->ipvs_property = from->ipvs_property;
#endif
skb_copy_secmark(to, from);
@@ -1566,8 +1582,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
}
oif = arg->bound_dev_if;
- if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
- oif = skb->skb_iif;
+ oif = oif ? : skb->skb_iif;
flowi4_init_output(&fl4, oif,
IP4_REPLY_MARK(net, skb->mark),
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 71a52f4d4cff..af4919792b6a 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -284,9 +284,12 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
ipc->ttl = val;
break;
case IP_TOS:
- if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+ if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
+ val = *(int *)CMSG_DATA(cmsg);
+ else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
+ val = *(u8 *)CMSG_DATA(cmsg);
+ else
return -EINVAL;
- val = *(int *)CMSG_DATA(cmsg);
if (val < 0 || val > 255)
return -EINVAL;
ipc->tos = val;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 95649ebd2874..5719d6ba0824 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -55,6 +55,7 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/udp.h>
+#include <net/dst_metadata.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -546,6 +547,81 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
return 0;
}
+void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
+{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ u32 headroom = sizeof(struct iphdr);
+ struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_key *key;
+ const struct iphdr *inner_iph;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ __be16 df = 0;
+ u8 tos, ttl;
+
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+ ip_tunnel_info_af(tun_info) != AF_INET))
+ goto tx_error;
+ key = &tun_info->key;
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+ tos = key->tos;
+ if (tos == 1) {
+ if (skb->protocol == htons(ETH_P_IP))
+ tos = inner_iph->tos;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
+ }
+ init_tunnel_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0,
+ RT_TOS(tos), tunnel->parms.link);
+ if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
+ goto tx_error;
+ rt = ip_route_output_key(tunnel->net, &fl4);
+ if (IS_ERR(rt)) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
+ if (rt->dst.dev == dev) {
+ ip_rt_put(rt);
+ dev->stats.collisions++;
+ goto tx_error;
+ }
+ tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
+ ttl = key->ttl;
+ if (ttl == 0) {
+ if (skb->protocol == htons(ETH_P_IP))
+ ttl = inner_iph->ttl;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
+ else
+ ttl = ip4_dst_hoplimit(&rt->dst);
+ }
+ if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
+ else if (skb->protocol == htons(ETH_P_IP))
+ df = inner_iph->frag_off & htons(IP_DF);
+ headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
+ if (headroom > dev->needed_headroom)
+ dev->needed_headroom = headroom;
+
+ if (skb_cow_head(skb, dev->needed_headroom)) {
+ ip_rt_put(rt);
+ goto tx_dropped;
+ }
+ iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, key->tos,
+ key->ttl, df, !net_eq(tunnel->net, dev_net(dev)));
+ return;
+tx_error:
+ dev->stats.tx_errors++;
+ goto kfree;
+tx_dropped:
+ dev->stats.tx_dropped++;
+kfree:
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit);
+
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, u8 protocol)
{
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 0f227db0e9ac..777bc1883870 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -69,7 +69,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
skb_scrub_packet(skb, xnet);
- skb_clear_hash(skb);
+ skb_clear_hash_if_not_l4(skb);
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 1d71c40eaaf3..071a785c65eb 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -85,7 +85,6 @@
/* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */
#define CONF_OPEN_RETRIES 2 /* (Re)open devices twice */
#define CONF_SEND_RETRIES 6 /* Send six requests per open */
-#define CONF_INTER_TIMEOUT (HZ) /* Inter-device timeout: 1 second */
#define CONF_BASE_TIMEOUT (HZ*2) /* Initial timeout: 2 seconds */
#define CONF_TIMEOUT_RANDOM (HZ) /* Maximum amount of randomization */
#define CONF_TIMEOUT_MULT *7/4 /* Rate of timeout growth */
@@ -188,7 +187,7 @@ struct ic_device {
};
static struct ic_device *ic_first_dev __initdata; /* List of open device */
-static struct net_device *ic_dev __initdata; /* Selected device */
+static struct ic_device *ic_dev __initdata; /* Selected device */
static bool __init ic_is_init_dev(struct net_device *dev)
{
@@ -307,7 +306,7 @@ static void __init ic_close_devs(void)
while ((d = next)) {
next = d->next;
dev = d->dev;
- if (dev != ic_dev && !netdev_uses_dsa(dev)) {
+ if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) {
pr_debug("IP-Config: Downing %s\n", dev->name);
dev_change_flags(dev, d->flags);
}
@@ -372,7 +371,7 @@ static int __init ic_setup_if(void)
int err;
memset(&ir, 0, sizeof(ir));
- strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name);
+ strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->dev->name);
set_sockaddr(sin, ic_myaddr, 0);
if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) {
pr_err("IP-Config: Unable to set interface address (%d)\n",
@@ -396,7 +395,7 @@ static int __init ic_setup_if(void)
* out, we'll try to muddle along.
*/
if (ic_dev_mtu != 0) {
- strcpy(ir.ifr_name, ic_dev->name);
+ strcpy(ir.ifr_name, ic_dev->dev->name);
ir.ifr_mtu = ic_dev_mtu;
if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0)
pr_err("IP-Config: Unable to set interface mtu to %d (%d)\n",
@@ -568,7 +567,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
goto drop_unlock;
/* We have a winner! */
- ic_dev = dev;
+ ic_dev = d;
if (ic_myaddr == NONE)
ic_myaddr = tip;
ic_servaddr = sip;
@@ -655,8 +654,6 @@ static struct packet_type bootp_packet_type __initdata = {
.func = ic_bootp_recv,
};
-static __be32 ic_dev_xid; /* Device under configuration */
-
/*
* Initialize DHCP/BOOTP extension fields in the request.
*/
@@ -666,14 +663,14 @@ static const u8 ic_bootp_cookie[4] = { 99, 130, 83, 99 };
#ifdef IPCONFIG_DHCP
static void __init
-ic_dhcp_init_options(u8 *options)
+ic_dhcp_init_options(u8 *options, struct ic_device *d)
{
u8 mt = ((ic_servaddr == NONE)
? DHCPDISCOVER : DHCPREQUEST);
u8 *e = options;
int len;
- pr_debug("DHCP: Sending message type %d\n", mt);
+ pr_debug("DHCP: Sending message type %d (%s)\n", mt, d->dev->name);
memcpy(e, ic_bootp_cookie, 4); /* RFC1048 Magic Cookie */
e += 4;
@@ -857,7 +854,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
/* add DHCP options or BOOTP extensions */
#ifdef IPCONFIG_DHCP
if (ic_proto_enabled & IC_USE_DHCP)
- ic_dhcp_init_options(b->exten);
+ ic_dhcp_init_options(b->exten, d);
else
#endif
ic_bootp_init_ext(b->exten);
@@ -1033,14 +1030,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
/* Is it a reply to our BOOTP request? */
if (b->op != BOOTP_REPLY ||
b->xid != d->xid) {
- net_err_ratelimited("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n",
- b->op, b->xid);
- goto drop_unlock;
- }
-
- /* Is it a reply for the device we are configuring? */
- if (b->xid != ic_dev_xid) {
- net_err_ratelimited("DHCP/BOOTP: Ignoring delayed packet\n");
+ net_err_ratelimited("DHCP/BOOTP: Reply not for us on %s, op[%x] xid[%x]\n",
+ d->dev->name, b->op, b->xid);
goto drop_unlock;
}
@@ -1075,7 +1066,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
}
}
- pr_debug("DHCP: Got message type %d\n", mt);
+ pr_debug("DHCP: Got message type %d (%s)\n", mt, d->dev->name);
switch (mt) {
case DHCPOFFER:
@@ -1130,7 +1121,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
}
/* We have a winner! */
- ic_dev = dev;
+ ic_dev = d;
ic_myaddr = b->your_ip;
ic_servaddr = b->server_ip;
ic_addrservaddr = b->iph.saddr;
@@ -1225,9 +1216,6 @@ static int __init ic_dynamic(void)
timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned int) CONF_TIMEOUT_RANDOM);
for (;;) {
#ifdef IPCONFIG_BOOTP
- /* Track the device we are configuring */
- ic_dev_xid = d->xid;
-
if (do_bootp && (d->able & IC_BOOTP))
ic_bootp_send_if(d, jiffies - start_jiffies);
#endif
@@ -1236,15 +1224,19 @@ static int __init ic_dynamic(void)
ic_rarp_send_if(d);
#endif
- jiff = jiffies + (d->next ? CONF_INTER_TIMEOUT : timeout);
- while (time_before(jiffies, jiff) && !ic_got_reply)
- schedule_timeout_uninterruptible(1);
+ if (!d->next) {
+ jiff = jiffies + timeout;
+ while (time_before(jiffies, jiff) && !ic_got_reply)
+ schedule_timeout_uninterruptible(1);
+ }
#ifdef IPCONFIG_DHCP
/* DHCP isn't done until we get a DHCPACK. */
if ((ic_got_reply & IC_BOOTP) &&
(ic_proto_enabled & IC_USE_DHCP) &&
ic_dhcp_msgtype != DHCPACK) {
ic_got_reply = 0;
+ /* continue on device that got the reply */
+ d = ic_dev;
pr_cont(",");
continue;
}
@@ -1487,7 +1479,7 @@ static int __init ip_auto_config(void)
#endif /* IPCONFIG_DYNAMIC */
} else {
/* Device selected manually or only one device -> use it */
- ic_dev = ic_first_dev->dev;
+ ic_dev = ic_first_dev;
}
addr = root_nfs_parse_addr(root_server_path);
@@ -1501,14 +1493,6 @@ static int __init ip_auto_config(void)
return -1;
/*
- * Close all network devices except the device we've
- * autoconfigured and set up routes.
- */
- ic_close_devs();
- if (ic_setup_if() < 0 || ic_setup_routes() < 0)
- return -1;
-
- /*
* Record which protocol was actually used.
*/
#ifdef IPCONFIG_DYNAMIC
@@ -1522,7 +1506,7 @@ static int __init ip_auto_config(void)
pr_info("IP-Config: Complete:\n");
pr_info(" device=%s, hwaddr=%*phC, ipaddr=%pI4, mask=%pI4, gw=%pI4\n",
- ic_dev->name, ic_dev->addr_len, ic_dev->dev_addr,
+ ic_dev->dev->name, ic_dev->dev->addr_len, ic_dev->dev->dev_addr,
&ic_myaddr, &ic_netmask, &ic_gateway);
pr_info(" host=%s, domain=%s, nis-domain=%s\n",
utsname()->nodename, ic_domain, utsname()->domainname);
@@ -1542,7 +1526,18 @@ static int __init ip_auto_config(void)
pr_cont("\n");
#endif /* !SILENT */
- return 0;
+ /*
+ * Close all network devices except the device we've
+ * autoconfigured and set up routes.
+ */
+ if (ic_setup_if() < 0 || ic_setup_routes() < 0)
+ err = -1;
+ else
+ err = 0;
+
+ ic_close_devs();
+
+ return err;
}
late_initcall(ip_auto_config);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 4ae3f8e6c6cc..c9392589c415 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -115,6 +115,7 @@
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/dst_metadata.h>
static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
@@ -193,6 +194,7 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
{
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
+ struct metadata_dst *tun_dst = NULL;
struct ip_tunnel *tunnel;
const struct iphdr *iph;
@@ -216,7 +218,12 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
tpi = &ipip_tpi;
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
goto drop;
- return ip_tunnel_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+ if (tunnel->collect_md) {
+ tun_dst = ip_tun_rx_dst(skb, 0, 0, 0);
+ if (!tun_dst)
+ return 0;
+ }
+ return ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
}
return -1;
@@ -270,7 +277,10 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
skb_set_inner_ipproto(skb, ipproto);
- ip_tunnel_xmit(skb, dev, tiph, ipproto);
+ if (tunnel->collect_md)
+ ip_md_tunnel_xmit(skb, dev, ipproto);
+ else
+ ip_tunnel_xmit(skb, dev, tiph, ipproto);
return NETDEV_TX_OK;
tx_error:
@@ -380,13 +390,14 @@ static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
}
static void ipip_netlink_parms(struct nlattr *data[],
- struct ip_tunnel_parm *parms)
+ struct ip_tunnel_parm *parms, bool *collect_md)
{
memset(parms, 0, sizeof(*parms));
parms->iph.version = 4;
parms->iph.protocol = IPPROTO_IPIP;
parms->iph.ihl = 5;
+ *collect_md = false;
if (!data)
return;
@@ -414,6 +425,9 @@ static void ipip_netlink_parms(struct nlattr *data[],
if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
parms->iph.frag_off = htons(IP_DF);
+
+ if (data[IFLA_IPTUN_COLLECT_METADATA])
+ *collect_md = true;
}
/* This function returns true when ENCAP attributes are present in the nl msg */
@@ -453,18 +467,18 @@ static bool ipip_netlink_encap_parms(struct nlattr *data[],
static int ipip_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
+ struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
if (ipip_netlink_encap_parms(data, &ipencap)) {
- struct ip_tunnel *t = netdev_priv(dev);
int err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0)
return err;
}
- ipip_netlink_parms(data, &p);
+ ipip_netlink_parms(data, &p, &t->collect_md);
return ip_tunnel_newlink(dev, tb, &p);
}
@@ -473,6 +487,7 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
+ bool collect_md;
if (ipip_netlink_encap_parms(data, &ipencap)) {
struct ip_tunnel *t = netdev_priv(dev);
@@ -482,7 +497,9 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
- ipip_netlink_parms(data, &p);
+ ipip_netlink_parms(data, &p, &collect_md);
+ if (collect_md)
+ return -EINVAL;
if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
(!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
@@ -516,6 +533,8 @@ static size_t ipip_get_size(const struct net_device *dev)
nla_total_size(2) +
/* IFLA_IPTUN_ENCAP_DPORT */
nla_total_size(2) +
+ /* IFLA_IPTUN_COLLECT_METADATA */
+ nla_total_size(0) +
0;
}
@@ -544,6 +563,9 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
tunnel->encap.flags))
goto nla_put_failure;
+ if (tunnel->collect_md)
+ if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -562,6 +584,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
};
static struct rtnl_link_ops ipip_link_ops __read_mostly = {
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index c187c60e3e0c..d613309e3e5d 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -25,17 +25,6 @@ config NF_CONNTRACK_IPV4
To compile it as a module, choose M here. If unsure, say N.
-config NF_CONNTRACK_PROC_COMPAT
- bool "proc/sysctl compatibility with old connection tracking"
- depends on NF_CONNTRACK_PROCFS && NF_CONNTRACK_IPV4
- default y
- help
- This option enables /proc and sysctl compatibility with the old
- layer 3 dependent connection tracking. This is needed to keep
- old programs that have not been adapted to the new names working.
-
- If unsure, say Y.
-
if NF_TABLES
config NF_TABLES_IPV4
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 87b073da14c9..853328f8fd05 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -4,11 +4,6 @@
# objects for l3 independent conntrack
nf_conntrack_ipv4-y := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
-ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y)
-ifeq ($(CONFIG_PROC_FS),y)
-nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o
-endif
-endif
# connection tracking
obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index f993545a3373..7c00ce90adb8 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -156,7 +156,7 @@ static struct nf_loginfo trace_loginfo = {
.u = {
.log = {
.level = 4,
- .logflags = NF_LOG_MASK,
+ .logflags = NF_LOG_DEFAULT_MASK,
},
},
};
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index ae1a71a97132..713c09a74b90 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -110,7 +110,7 @@ static unsigned int ipv4_helper(void *priv,
if (!help)
return NF_ACCEPT;
- /* rcu_read_lock()ed by nf_hook_slow */
+ /* rcu_read_lock()ed by nf_hook_thresh */
helper = rcu_dereference(help->helper);
if (!helper)
return NF_ACCEPT;
@@ -202,47 +202,6 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
},
};
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
-static int log_invalid_proto_min = 0;
-static int log_invalid_proto_max = 255;
-
-static struct ctl_table ip_ct_sysctl_table[] = {
- {
- .procname = "ip_conntrack_max",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "ip_conntrack_count",
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "ip_conntrack_buckets",
- .maxlen = sizeof(unsigned int),
- .mode = 0444,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "ip_conntrack_checksum",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "ip_conntrack_log_invalid",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &log_invalid_proto_min,
- .extra2 = &log_invalid_proto_max,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL && CONFIG_NF_CONNTRACK_PROC_COMPAT */
-
/* Fast function for those who don't want to parse /proc (and I don't
blame them). */
/* Reversing the socket's dst/src point of view gives us the reply
@@ -350,20 +309,6 @@ static struct nf_sockopt_ops so_getorigdst = {
static int ipv4_init_net(struct net *net)
{
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- struct nf_ip_net *in = &net->ct.nf_ct_proto;
- in->ctl_table = kmemdup(ip_ct_sysctl_table,
- sizeof(ip_ct_sysctl_table),
- GFP_KERNEL);
- if (!in->ctl_table)
- return -ENOMEM;
-
- in->ctl_table[0].data = &nf_conntrack_max;
- in->ctl_table[1].data = &net->ct.count;
- in->ctl_table[2].data = &nf_conntrack_htable_size;
- in->ctl_table[3].data = &net->ct.sysctl_checksum;
- in->ctl_table[4].data = &net->ct.sysctl_log_invalid;
-#endif
return 0;
}
@@ -380,9 +325,6 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
.nlattr_to_tuple = ipv4_nlattr_to_tuple,
.nla_policy = ipv4_nla_policy,
#endif
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- .ctl_table_path = "net/ipv4/netfilter",
-#endif
.init_net = ipv4_init_net,
.me = THIS_MODULE,
};
@@ -492,16 +434,7 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
goto cleanup_icmpv4;
}
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- ret = nf_conntrack_ipv4_compat_init();
- if (ret < 0)
- goto cleanup_proto;
-#endif
return ret;
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- cleanup_proto:
- nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
-#endif
cleanup_icmpv4:
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_icmp);
cleanup_udp4:
@@ -520,9 +453,6 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
static void __exit nf_conntrack_l3proto_ipv4_fini(void)
{
synchronize_net();
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- nf_conntrack_ipv4_compat_fini();
-#endif
nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_icmp);
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udp4);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
deleted file mode 100644
index 63923710f325..000000000000
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ /dev/null
@@ -1,492 +0,0 @@
-/* ip_conntrack proc compat - based on ip_conntrack_standalone.c
- *
- * (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2006-2010 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/percpu.h>
-#include <linux/security.h>
-#include <net/net_namespace.h>
-
-#include <linux/netfilter.h>
-#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
-#include <net/netfilter/nf_conntrack_expect.h>
-#include <net/netfilter/nf_conntrack_acct.h>
-#include <linux/rculist_nulls.h>
-#include <linux/export.h>
-
-struct ct_iter_state {
- struct seq_net_private p;
- struct hlist_nulls_head *hash;
- unsigned int htable_size;
- unsigned int bucket;
-};
-
-static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
-{
- struct ct_iter_state *st = seq->private;
- struct hlist_nulls_node *n;
-
- for (st->bucket = 0;
- st->bucket < st->htable_size;
- st->bucket++) {
- n = rcu_dereference(
- hlist_nulls_first_rcu(&st->hash[st->bucket]));
- if (!is_a_nulls(n))
- return n;
- }
- return NULL;
-}
-
-static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
- struct hlist_nulls_node *head)
-{
- struct ct_iter_state *st = seq->private;
-
- head = rcu_dereference(hlist_nulls_next_rcu(head));
- while (is_a_nulls(head)) {
- if (likely(get_nulls_value(head) == st->bucket)) {
- if (++st->bucket >= st->htable_size)
- return NULL;
- }
- head = rcu_dereference(
- hlist_nulls_first_rcu(&st->hash[st->bucket]));
- }
- return head;
-}
-
-static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
-{
- struct hlist_nulls_node *head = ct_get_first(seq);
-
- if (head)
- while (pos && (head = ct_get_next(seq, head)))
- pos--;
- return pos ? NULL : head;
-}
-
-static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(RCU)
-{
- struct ct_iter_state *st = seq->private;
-
- rcu_read_lock();
-
- nf_conntrack_get_ht(&st->hash, &st->htable_size);
- return ct_get_idx(seq, *pos);
-}
-
-static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- (*pos)++;
- return ct_get_next(s, v);
-}
-
-static void ct_seq_stop(struct seq_file *s, void *v)
- __releases(RCU)
-{
- rcu_read_unlock();
-}
-
-#ifdef CONFIG_NF_CONNTRACK_SECMARK
-static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
-{
- int ret;
- u32 len;
- char *secctx;
-
- ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
- if (ret)
- return;
-
- seq_printf(s, "secctx=%s ", secctx);
-
- security_release_secctx(secctx, len);
-}
-#else
-static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
-{
-}
-#endif
-
-static bool ct_seq_should_skip(const struct nf_conn *ct,
- const struct net *net,
- const struct nf_conntrack_tuple_hash *hash)
-{
- /* we only want to print DIR_ORIGINAL */
- if (NF_CT_DIRECTION(hash))
- return true;
-
- if (nf_ct_l3num(ct) != AF_INET)
- return true;
-
- if (!net_eq(nf_ct_net(ct), net))
- return true;
-
- return false;
-}
-
-static int ct_seq_show(struct seq_file *s, void *v)
-{
- struct nf_conntrack_tuple_hash *hash = v;
- struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
- const struct nf_conntrack_l3proto *l3proto;
- const struct nf_conntrack_l4proto *l4proto;
- int ret = 0;
-
- NF_CT_ASSERT(ct);
- if (ct_seq_should_skip(ct, seq_file_net(s), hash))
- return 0;
-
- if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
- return 0;
-
- /* check if we raced w. object reuse */
- if (!nf_ct_is_confirmed(ct) ||
- ct_seq_should_skip(ct, seq_file_net(s), hash))
- goto release;
-
- l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
- NF_CT_ASSERT(l3proto);
- l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
- NF_CT_ASSERT(l4proto);
-
- ret = -ENOSPC;
- seq_printf(s, "%-8s %u %ld ",
- l4proto->name, nf_ct_protonum(ct),
- timer_pending(&ct->timeout)
- ? (long)(ct->timeout.expires - jiffies)/HZ : 0);
-
- if (l4proto->print_conntrack)
- l4proto->print_conntrack(s, ct);
-
- if (seq_has_overflowed(s))
- goto release;
-
- print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
- l3proto, l4proto);
-
- if (seq_has_overflowed(s))
- goto release;
-
- if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
- goto release;
-
- if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
- seq_printf(s, "[UNREPLIED] ");
-
- print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
- l3proto, l4proto);
-
- if (seq_has_overflowed(s))
- goto release;
-
- if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
- goto release;
-
- if (test_bit(IPS_ASSURED_BIT, &ct->status))
- seq_printf(s, "[ASSURED] ");
-
-#ifdef CONFIG_NF_CONNTRACK_MARK
- seq_printf(s, "mark=%u ", ct->mark);
-#endif
-
- ct_show_secctx(s, ct);
-
- seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
-
- if (seq_has_overflowed(s))
- goto release;
-
- ret = 0;
-release:
- nf_ct_put(ct);
- return ret;
-}
-
-static const struct seq_operations ct_seq_ops = {
- .start = ct_seq_start,
- .next = ct_seq_next,
- .stop = ct_seq_stop,
- .show = ct_seq_show
-};
-
-static int ct_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &ct_seq_ops,
- sizeof(struct ct_iter_state));
-}
-
-static const struct file_operations ct_file_ops = {
- .owner = THIS_MODULE,
- .open = ct_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-/* expects */
-struct ct_expect_iter_state {
- struct seq_net_private p;
- unsigned int bucket;
-};
-
-static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
-{
- struct ct_expect_iter_state *st = seq->private;
- struct hlist_node *n;
-
- for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
- n = rcu_dereference(
- hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
- if (n)
- return n;
- }
- return NULL;
-}
-
-static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
- struct hlist_node *head)
-{
- struct ct_expect_iter_state *st = seq->private;
-
- head = rcu_dereference(hlist_next_rcu(head));
- while (head == NULL) {
- if (++st->bucket >= nf_ct_expect_hsize)
- return NULL;
- head = rcu_dereference(
- hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
- }
- return head;
-}
-
-static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
-{
- struct hlist_node *head = ct_expect_get_first(seq);
-
- if (head)
- while (pos && (head = ct_expect_get_next(seq, head)))
- pos--;
- return pos ? NULL : head;
-}
-
-static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(RCU)
-{
- rcu_read_lock();
- return ct_expect_get_idx(seq, *pos);
-}
-
-static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- (*pos)++;
- return ct_expect_get_next(seq, v);
-}
-
-static void exp_seq_stop(struct seq_file *seq, void *v)
- __releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static int exp_seq_show(struct seq_file *s, void *v)
-{
- struct nf_conntrack_expect *exp;
- const struct hlist_node *n = v;
-
- exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
-
- if (!net_eq(nf_ct_net(exp->master), seq_file_net(s)))
- return 0;
-
- if (exp->tuple.src.l3num != AF_INET)
- return 0;
-
- if (exp->timeout.function)
- seq_printf(s, "%ld ", timer_pending(&exp->timeout)
- ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
- else
- seq_printf(s, "- ");
-
- seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
-
- print_tuple(s, &exp->tuple,
- __nf_ct_l3proto_find(exp->tuple.src.l3num),
- __nf_ct_l4proto_find(exp->tuple.src.l3num,
- exp->tuple.dst.protonum));
- seq_putc(s, '\n');
-
- return 0;
-}
-
-static const struct seq_operations exp_seq_ops = {
- .start = exp_seq_start,
- .next = exp_seq_next,
- .stop = exp_seq_stop,
- .show = exp_seq_show
-};
-
-static int exp_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &exp_seq_ops,
- sizeof(struct ct_expect_iter_state));
-}
-
-static const struct file_operations ip_exp_file_ops = {
- .owner = THIS_MODULE,
- .open = exp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
-{
- struct net *net = seq_file_net(seq);
- int cpu;
-
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
- if (!cpu_possible(cpu))
- continue;
- *pos = cpu+1;
- return per_cpu_ptr(net->ct.stat, cpu);
- }
-
- return NULL;
-}
-
-static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- struct net *net = seq_file_net(seq);
- int cpu;
-
- for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
- if (!cpu_possible(cpu))
- continue;
- *pos = cpu+1;
- return per_cpu_ptr(net->ct.stat, cpu);
- }
-
- return NULL;
-}
-
-static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
-{
-}
-
-static int ct_cpu_seq_show(struct seq_file *seq, void *v)
-{
- struct net *net = seq_file_net(seq);
- unsigned int nr_conntracks = atomic_read(&net->ct.count);
- const struct ip_conntrack_stat *st = v;
-
- if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
- return 0;
- }
-
- seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
- "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
- nr_conntracks,
- st->searched,
- st->found,
- st->new,
- st->invalid,
- st->ignore,
- st->delete,
- st->delete_list,
- st->insert,
- st->insert_failed,
- st->drop,
- st->early_drop,
- st->error,
-
- st->expect_new,
- st->expect_create,
- st->expect_delete,
- st->search_restart
- );
- return 0;
-}
-
-static const struct seq_operations ct_cpu_seq_ops = {
- .start = ct_cpu_seq_start,
- .next = ct_cpu_seq_next,
- .stop = ct_cpu_seq_stop,
- .show = ct_cpu_seq_show,
-};
-
-static int ct_cpu_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &ct_cpu_seq_ops,
- sizeof(struct seq_net_private));
-}
-
-static const struct file_operations ct_cpu_seq_fops = {
- .owner = THIS_MODULE,
- .open = ct_cpu_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-static int __net_init ip_conntrack_net_init(struct net *net)
-{
- struct proc_dir_entry *proc, *proc_exp, *proc_stat;
-
- proc = proc_create("ip_conntrack", 0440, net->proc_net, &ct_file_ops);
- if (!proc)
- goto err1;
-
- proc_exp = proc_create("ip_conntrack_expect", 0440, net->proc_net,
- &ip_exp_file_ops);
- if (!proc_exp)
- goto err2;
-
- proc_stat = proc_create("ip_conntrack", S_IRUGO,
- net->proc_net_stat, &ct_cpu_seq_fops);
- if (!proc_stat)
- goto err3;
- return 0;
-
-err3:
- remove_proc_entry("ip_conntrack_expect", net->proc_net);
-err2:
- remove_proc_entry("ip_conntrack", net->proc_net);
-err1:
- return -ENOMEM;
-}
-
-static void __net_exit ip_conntrack_net_exit(struct net *net)
-{
- remove_proc_entry("ip_conntrack", net->proc_net_stat);
- remove_proc_entry("ip_conntrack_expect", net->proc_net);
- remove_proc_entry("ip_conntrack", net->proc_net);
-}
-
-static struct pernet_operations ip_conntrack_net_ops = {
- .init = ip_conntrack_net_init,
- .exit = ip_conntrack_net_exit,
-};
-
-int __init nf_conntrack_ipv4_compat_init(void)
-{
- return register_pernet_subsys(&ip_conntrack_net_ops);
-}
-
-void __exit nf_conntrack_ipv4_compat_fini(void)
-{
- unregister_pernet_subsys(&ip_conntrack_net_ops);
-}
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index c567e1b5d799..d075b3cf2400 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -149,7 +149,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
return -NF_ACCEPT;
}
- /* rcu_read_lock()ed by nf_hook_slow */
+ /* rcu_read_lock()ed by nf_hook_thresh */
innerproto = __nf_ct_l4proto_find(PF_INET, origtuple.dst.protonum);
/* Ordinarily, we'd expect the inverted tupleproto, but it's
@@ -327,17 +327,6 @@ static struct ctl_table icmp_sysctl_table[] = {
},
{ }
};
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
-static struct ctl_table icmp_compat_sysctl_table[] = {
- {
- .procname = "ip_conntrack_icmp_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
@@ -355,40 +344,14 @@ static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
return 0;
}
-static int icmp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
- struct nf_icmp_net *in)
-{
-#ifdef CONFIG_SYSCTL
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- pn->ctl_compat_table = kmemdup(icmp_compat_sysctl_table,
- sizeof(icmp_compat_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_compat_table)
- return -ENOMEM;
-
- pn->ctl_compat_table[0].data = &in->timeout;
-#endif
-#endif
- return 0;
-}
-
static int icmp_init_net(struct net *net, u_int16_t proto)
{
- int ret;
struct nf_icmp_net *in = icmp_pernet(net);
struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmp_timeout;
- ret = icmp_kmemdup_compat_sysctl_table(pn, in);
- if (ret < 0)
- return ret;
-
- ret = icmp_kmemdup_sysctl_table(pn, in);
- if (ret < 0)
- nf_ct_kfree_compat_sysctl_table(pn);
-
- return ret;
+ return icmp_kmemdup_sysctl_table(pn, in);
}
static struct nf_proto_net *icmp_get_net_proto(struct net *net)
diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
index ceb187308120..cf986e1c7bbd 100644
--- a/net/ipv4/netfilter/nf_dup_ipv4.c
+++ b/net/ipv4/netfilter/nf_dup_ipv4.c
@@ -74,21 +74,19 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
nf_conntrack_get(skb->nfct);
#endif
/*
- * If we are in PREROUTING/INPUT, the checksum must be recalculated
- * since the length could have changed as a result of defragmentation.
- *
- * We also decrease the TTL to mitigate potential loops between two
- * hosts.
+ * If we are in PREROUTING/INPUT, decrease the TTL to mitigate potential
+ * loops between two hosts.
*
* Set %IP_DF so that the original source is notified of a potentially
* decreased MTU on the clone route. IPv6 does this too.
+ *
+ * IP header checksum will be recalculated at ip_local_out.
*/
iph = ip_hdr(skb);
iph->frag_off |= htons(IP_DF);
if (hooknum == NF_INET_PRE_ROUTING ||
hooknum == NF_INET_LOCAL_IN)
--iph->ttl;
- ip_send_check(iph);
if (nf_dup_ipv4_route(net, skb, gw, oif)) {
__this_cpu_write(nf_skb_duplicated, true);
diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
index e7ad950cf9ef..b24795e2ee6d 100644
--- a/net/ipv4/netfilter/nf_log_arp.c
+++ b/net/ipv4/netfilter/nf_log_arp.c
@@ -30,7 +30,7 @@ static struct nf_loginfo default_loginfo = {
.u = {
.log = {
.level = LOGLEVEL_NOTICE,
- .logflags = NF_LOG_MASK,
+ .logflags = NF_LOG_DEFAULT_MASK,
},
},
};
@@ -62,7 +62,7 @@ static void dump_arp_packet(struct nf_log_buf *m,
/* If it's for Ethernet and the lengths are OK, then log the ARP
* payload.
*/
- if (ah->ar_hrd != htons(1) ||
+ if (ah->ar_hrd != htons(ARPHRD_ETHER) ||
ah->ar_hln != ETH_ALEN ||
ah->ar_pln != sizeof(__be32))
return;
@@ -111,8 +111,7 @@ static struct nf_logger nf_arp_logger __read_mostly = {
static int __net_init nf_log_arp_net_init(struct net *net)
{
- nf_log_set(net, NFPROTO_ARP, &nf_arp_logger);
- return 0;
+ return nf_log_set(net, NFPROTO_ARP, &nf_arp_logger);
}
static void __net_exit nf_log_arp_net_exit(struct net *net)
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
index 076aadda0473..856648966f4c 100644
--- a/net/ipv4/netfilter/nf_log_ipv4.c
+++ b/net/ipv4/netfilter/nf_log_ipv4.c
@@ -29,7 +29,7 @@ static struct nf_loginfo default_loginfo = {
.u = {
.log = {
.level = LOGLEVEL_NOTICE,
- .logflags = NF_LOG_MASK,
+ .logflags = NF_LOG_DEFAULT_MASK,
},
},
};
@@ -46,7 +46,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
- logflags = NF_LOG_MASK;
+ logflags = NF_LOG_DEFAULT_MASK;
ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
if (ih == NULL) {
@@ -76,7 +76,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
if (ntohs(ih->frag_off) & IP_OFFSET)
nf_log_buf_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
- if ((logflags & XT_LOG_IPOPT) &&
+ if ((logflags & NF_LOG_IPOPT) &&
ih->ihl * 4 > sizeof(struct iphdr)) {
const unsigned char *op;
unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
@@ -250,7 +250,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
}
/* Max length: 15 "UID=4294967295 " */
- if ((logflags & XT_LOG_UID) && !iphoff)
+ if ((logflags & NF_LOG_UID) && !iphoff)
nf_log_dump_sk_uid_gid(m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
@@ -282,7 +282,7 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m,
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
- if (!(logflags & XT_LOG_MACDECODE))
+ if (!(logflags & NF_LOG_MACDECODE))
goto fallback;
switch (dev->type) {
@@ -347,8 +347,7 @@ static struct nf_logger nf_ip_logger __read_mostly = {
static int __net_init nf_log_ipv4_net_init(struct net *net)
{
- nf_log_set(net, NFPROTO_IPV4, &nf_ip_logger);
- return 0;
+ return nf_log_set(net, NFPROTO_IPV4, &nf_ip_logger);
}
static void __net_exit nf_log_ipv4_net_exit(struct net *net)
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index 9414923f1e15..edf05002d674 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -88,8 +88,8 @@ gre_manip_pkt(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
- const struct gre_hdr *greh;
- struct gre_hdr_pptp *pgreh;
+ const struct gre_base_hdr *greh;
+ struct pptp_gre_header *pgreh;
/* pgreh includes two optional 32bit fields which are not required
* to be there. That's where the magic '8' comes from */
@@ -97,18 +97,19 @@ gre_manip_pkt(struct sk_buff *skb,
return false;
greh = (void *)skb->data + hdroff;
- pgreh = (struct gre_hdr_pptp *)greh;
+ pgreh = (struct pptp_gre_header *)greh;
/* we only have destination manip of a packet, since 'source key'
* is not present in the packet itself */
if (maniptype != NF_NAT_MANIP_DST)
return true;
- switch (greh->version) {
- case GRE_VERSION_1701:
+
+ switch (greh->flags & GRE_VERSION) {
+ case GRE_VERSION_0:
/* We do not currently NAT any GREv0 packets.
* Try to behave like "nf_nat_proto_unknown" */
break;
- case GRE_VERSION_PPTP:
+ case GRE_VERSION_1:
pr_debug("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key));
pgreh->call_id = tuple->dst.u.gre.key;
break;
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
index cd84d4295a20..805c8ddfe860 100644
--- a/net/ipv4/netfilter/nf_tables_arp.c
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -21,7 +21,7 @@ nft_do_chain_arp(void *priv,
{
struct nft_pktinfo pkt;
- nft_set_pktinfo(&pkt, skb, state);
+ nft_set_pktinfo_unspec(&pkt, skb, state);
return nft_do_chain(&pkt, priv);
}
@@ -80,7 +80,10 @@ static int __init nf_tables_arp_init(void)
{
int ret;
- nft_register_chain_type(&filter_arp);
+ ret = nft_register_chain_type(&filter_arp);
+ if (ret < 0)
+ return ret;
+
ret = register_pernet_subsys(&nf_tables_arp_net_ops);
if (ret < 0)
nft_unregister_chain_type(&filter_arp);
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
index e44ba3b12fbb..2840a29b2e04 100644
--- a/net/ipv4/netfilter/nf_tables_ipv4.c
+++ b/net/ipv4/netfilter/nf_tables_ipv4.c
@@ -103,7 +103,10 @@ static int __init nf_tables_ipv4_init(void)
{
int ret;
- nft_register_chain_type(&filter_ipv4);
+ ret = nft_register_chain_type(&filter_ipv4);
+ if (ret < 0)
+ return ret;
+
ret = register_pernet_subsys(&nf_tables_ipv4_net_ops);
if (ret < 0)
nft_unregister_chain_type(&filter_ipv4);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 66ddcb60519a..7cf7d6e380c2 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -258,7 +258,7 @@ int ping_init_sock(struct sock *sk)
struct net *net = sock_net(sk);
kgid_t group = current_egid();
struct group_info *group_info;
- int i, j, count;
+ int i;
kgid_t low, high;
int ret = 0;
@@ -270,16 +270,11 @@ int ping_init_sock(struct sock *sk)
return 0;
group_info = get_current_groups();
- count = group_info->ngroups;
- for (i = 0; i < group_info->nblocks; i++) {
- int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
- for (j = 0; j < cp_count; j++) {
- kgid_t gid = group_info->blocks[i][j];
- if (gid_lte(low, gid) && gid_lte(gid, high))
- goto out_release_group;
- }
+ for (i = 0; i < group_info->ngroups; i++) {
+ kgid_t gid = group_info->gid[i];
- count -= cp_count;
+ if (gid_lte(low, gid) && gid_lte(gid, high))
+ goto out_release_group;
}
ret = -EACCES;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 9f665b63a927..7143ca1a6af9 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -46,6 +46,8 @@
#include <net/sock.h>
#include <net/raw.h>
+#define TCPUDP_MIB_MAX max_t(u32, UDP_MIB_MAX, TCP_MIB_MAX)
+
/*
* Report socket allocation statistics [mea@utu.fi]
*/
@@ -257,6 +259,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPSpuriousRTOs", LINUX_MIB_TCPSPURIOUSRTOS),
SNMP_MIB_ITEM("TCPMD5NotFound", LINUX_MIB_TCPMD5NOTFOUND),
SNMP_MIB_ITEM("TCPMD5Unexpected", LINUX_MIB_TCPMD5UNEXPECTED),
+ SNMP_MIB_ITEM("TCPMD5Failure", LINUX_MIB_TCPMD5FAILURE),
SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED),
SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED),
SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
@@ -355,22 +358,22 @@ static void icmp_put(struct seq_file *seq)
atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs;
seq_puts(seq, "\nIcmp: InMsgs InErrors InCsumErrors");
- for (i = 0; icmpmibmap[i].name != NULL; i++)
+ for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " In%s", icmpmibmap[i].name);
seq_puts(seq, " OutMsgs OutErrors");
- for (i = 0; icmpmibmap[i].name != NULL; i++)
+ for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " Out%s", icmpmibmap[i].name);
seq_printf(seq, "\nIcmp: %lu %lu %lu",
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INMSGS),
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INERRORS),
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
- for (i = 0; icmpmibmap[i].name != NULL; i++)
+ for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " %lu",
atomic_long_read(ptr + icmpmibmap[i].index));
seq_printf(seq, " %lu %lu",
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
- for (i = 0; icmpmibmap[i].name != NULL; i++)
+ for (i = 0; icmpmibmap[i].name; i++)
seq_printf(seq, " %lu",
atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
}
@@ -378,14 +381,16 @@ static void icmp_put(struct seq_file *seq)
/*
* Called from the PROCfs module. This outputs /proc/net/snmp.
*/
-static int snmp_seq_show(struct seq_file *seq, void *v)
+static int snmp_seq_show_ipstats(struct seq_file *seq, void *v)
{
- int i;
struct net *net = seq->private;
+ u64 buff64[IPSTATS_MIB_MAX];
+ int i;
- seq_puts(seq, "Ip: Forwarding DefaultTTL");
+ memset(buff64, 0, IPSTATS_MIB_MAX * sizeof(u64));
- for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
+ seq_puts(seq, "Ip: Forwarding DefaultTTL");
+ for (i = 0; snmp4_ipstats_list[i].name; i++)
seq_printf(seq, " %s", snmp4_ipstats_list[i].name);
seq_printf(seq, "\nIp: %d %d",
@@ -393,57 +398,77 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
net->ipv4.sysctl_ip_default_ttl);
BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
- for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
- seq_printf(seq, " %llu",
- snmp_fold_field64(net->mib.ip_statistics,
- snmp4_ipstats_list[i].entry,
- offsetof(struct ipstats_mib, syncp)));
+ snmp_get_cpu_field64_batch(buff64, snmp4_ipstats_list,
+ net->mib.ip_statistics,
+ offsetof(struct ipstats_mib, syncp));
+ for (i = 0; snmp4_ipstats_list[i].name; i++)
+ seq_printf(seq, " %llu", buff64[i]);
- icmp_put(seq); /* RFC 2011 compatibility */
- icmpmsg_put(seq);
+ return 0;
+}
+
+static int snmp_seq_show_tcp_udp(struct seq_file *seq, void *v)
+{
+ unsigned long buff[TCPUDP_MIB_MAX];
+ struct net *net = seq->private;
+ int i;
+
+ memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long));
seq_puts(seq, "\nTcp:");
- for (i = 0; snmp4_tcp_list[i].name != NULL; i++)
+ for (i = 0; snmp4_tcp_list[i].name; i++)
seq_printf(seq, " %s", snmp4_tcp_list[i].name);
seq_puts(seq, "\nTcp:");
- for (i = 0; snmp4_tcp_list[i].name != NULL; i++) {
+ snmp_get_cpu_field_batch(buff, snmp4_tcp_list,
+ net->mib.tcp_statistics);
+ for (i = 0; snmp4_tcp_list[i].name; i++) {
/* MaxConn field is signed, RFC 2012 */
if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
- seq_printf(seq, " %ld",
- snmp_fold_field(net->mib.tcp_statistics,
- snmp4_tcp_list[i].entry));
+ seq_printf(seq, " %ld", buff[i]);
else
- seq_printf(seq, " %lu",
- snmp_fold_field(net->mib.tcp_statistics,
- snmp4_tcp_list[i].entry));
+ seq_printf(seq, " %lu", buff[i]);
}
+ memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long));
+
+ snmp_get_cpu_field_batch(buff, snmp4_udp_list,
+ net->mib.udp_statistics);
seq_puts(seq, "\nUdp:");
- for (i = 0; snmp4_udp_list[i].name != NULL; i++)
+ for (i = 0; snmp4_udp_list[i].name; i++)
seq_printf(seq, " %s", snmp4_udp_list[i].name);
-
seq_puts(seq, "\nUdp:");
- for (i = 0; snmp4_udp_list[i].name != NULL; i++)
- seq_printf(seq, " %lu",
- snmp_fold_field(net->mib.udp_statistics,
- snmp4_udp_list[i].entry));
+ for (i = 0; snmp4_udp_list[i].name; i++)
+ seq_printf(seq, " %lu", buff[i]);
+
+ memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long));
/* the UDP and UDP-Lite MIBs are the same */
seq_puts(seq, "\nUdpLite:");
- for (i = 0; snmp4_udp_list[i].name != NULL; i++)
+ snmp_get_cpu_field_batch(buff, snmp4_udp_list,
+ net->mib.udplite_statistics);
+ for (i = 0; snmp4_udp_list[i].name; i++)
seq_printf(seq, " %s", snmp4_udp_list[i].name);
-
seq_puts(seq, "\nUdpLite:");
- for (i = 0; snmp4_udp_list[i].name != NULL; i++)
- seq_printf(seq, " %lu",
- snmp_fold_field(net->mib.udplite_statistics,
- snmp4_udp_list[i].entry));
+ for (i = 0; snmp4_udp_list[i].name; i++)
+ seq_printf(seq, " %lu", buff[i]);
seq_putc(seq, '\n');
return 0;
}
+static int snmp_seq_show(struct seq_file *seq, void *v)
+{
+ snmp_seq_show_ipstats(seq, v);
+
+ icmp_put(seq); /* RFC 2011 compatibility */
+ icmpmsg_put(seq);
+
+ snmp_seq_show_tcp_udp(seq, v);
+
+ return 0;
+}
+
static int snmp_seq_open(struct inode *inode, struct file *file)
{
return single_open_net(inode, file, snmp_seq_show);
@@ -468,21 +493,21 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
struct net *net = seq->private;
seq_puts(seq, "TcpExt:");
- for (i = 0; snmp4_net_list[i].name != NULL; i++)
+ for (i = 0; snmp4_net_list[i].name; i++)
seq_printf(seq, " %s", snmp4_net_list[i].name);
seq_puts(seq, "\nTcpExt:");
- for (i = 0; snmp4_net_list[i].name != NULL; i++)
+ for (i = 0; snmp4_net_list[i].name; i++)
seq_printf(seq, " %lu",
snmp_fold_field(net->mib.net_statistics,
snmp4_net_list[i].entry));
seq_puts(seq, "\nIpExt:");
- for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
+ for (i = 0; snmp4_ipextstats_list[i].name; i++)
seq_printf(seq, " %s", snmp4_ipextstats_list[i].name);
seq_puts(seq, "\nIpExt:");
- for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
+ for (i = 0; snmp4_ipextstats_list[i].name; i++)
seq_printf(seq, " %llu",
snmp_fold_field64(net->mib.ip_statistics,
snmp4_ipextstats_list[i].entry,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 438f50c1a676..90a85c955872 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -606,12 +606,6 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
(inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0);
- if (!saddr && ipc.oif) {
- err = l3mdev_get_saddr(net, ipc.oif, &fl4);
- if (err < 0)
- goto done;
- }
-
if (!inet->hdrincl) {
rfv.msg = msg;
rfv.hlen = 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 62c3ed0b7556..f2be689a6c85 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1252,7 +1252,9 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = 576;
}
- return min_t(unsigned int, mtu, IP_MAX_MTU);
+ mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
+
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
@@ -1835,7 +1837,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
* Now we are ready to route packet.
*/
fl4.flowi4_oif = 0;
- fl4.flowi4_iif = l3mdev_fib_oif_rcu(dev);
+ fl4.flowi4_iif = dev->ifindex;
fl4.flowi4_mark = skb->mark;
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
@@ -2022,7 +2024,9 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
return ERR_PTR(-EINVAL);
if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
- if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
+ if (ipv4_is_loopback(fl4->saddr) &&
+ !(dev_out->flags & IFF_LOOPBACK) &&
+ !netif_is_l3_master(dev_out))
return ERR_PTR(-EINVAL);
if (ipv4_is_lbcast(fl4->daddr))
@@ -2152,7 +2156,6 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
unsigned int flags = 0;
struct fib_result res;
struct rtable *rth;
- int master_idx;
int orig_oif;
int err = -ENETUNREACH;
@@ -2162,9 +2165,6 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
orig_oif = fl4->flowi4_oif;
- master_idx = l3mdev_master_ifindex_by_index(net, fl4->flowi4_oif);
- if (master_idx)
- fl4->flowi4_oif = master_idx;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
@@ -2248,10 +2248,6 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_HOST);
}
-
- rth = l3mdev_get_rtable(dev_out, fl4);
- if (rth)
- goto out;
}
if (!fl4->daddr) {
@@ -2269,8 +2265,7 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
if (err) {
res.fi = NULL;
res.table = NULL;
- if (fl4->flowi4_oif &&
- !netif_index_is_l3_master(net, fl4->flowi4_oif)) {
+ if (fl4->flowi4_oif) {
/* Apparently, routing tables are wrong. Assume,
that the destination is on link.
@@ -2306,7 +2301,9 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
else
fl4->saddr = fl4->daddr;
}
- dev_out = net->loopback_dev;
+
+ /* L3 master device is the loopback for that domain */
+ dev_out = l3mdev_master_dev_rcu(dev_out) ? : net->loopback_dev;
fl4->flowi4_oif = dev_out->ifindex;
flags |= RTCF_LOCAL;
goto make_route;
@@ -2582,9 +2579,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
fl4.flowi4_mark = mark;
- if (netif_index_is_l3_master(net, fl4.flowi4_oif))
- fl4.flowi4_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF;
-
if (iif) {
struct net_device *dev;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ffbb218de520..3251fe71f39f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -380,14 +380,14 @@ void tcp_init_sock(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- __skb_queue_head_init(&tp->out_of_order_queue);
+ tp->out_of_order_queue = RB_ROOT;
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
INIT_LIST_HEAD(&tp->tsq_node);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
- tp->rtt_min[0].rtt = ~0U;
+ minmax_reset(&tp->rtt_min, tcp_time_stamp, ~0U);
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
@@ -396,6 +396,9 @@ void tcp_init_sock(struct sock *sk)
*/
tp->snd_cwnd = TCP_INIT_CWND;
+ /* There's a bubble in the pipe until at least the first ACK. */
+ tp->app_limited = ~0U;
+
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
*/
@@ -421,8 +424,6 @@ void tcp_init_sock(struct sock *sk)
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
local_bh_disable();
- if (mem_cgroup_sockets_enabled)
- sock_update_memcg(sk);
sk_sockets_allocated_inc(sk);
local_bh_enable();
}
@@ -688,8 +689,7 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
int ret;
ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
- min(rd_desc->count, len), tss->flags,
- skb_socket_splice);
+ min(rd_desc->count, len), tss->flags);
if (ret > 0)
rd_desc->count -= ret;
return ret;
@@ -1014,23 +1014,40 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
flags);
lock_sock(sk);
+
+ tcp_rate_check_app_limited(sk); /* is sending application-limited? */
+
res = do_tcp_sendpages(sk, page, offset, size, flags);
release_sock(sk);
return res;
}
EXPORT_SYMBOL(tcp_sendpage);
-static inline int select_size(const struct sock *sk, bool sg)
+/* Do not bother using a page frag for very small frames.
+ * But use this heuristic only for the first skb in write queue.
+ *
+ * Having no payload in skb->head allows better SACK shifting
+ * in tcp_shift_skb_data(), reducing sack/rack overhead, because
+ * write queue has less skbs.
+ * Each skb can hold up to MAX_SKB_FRAGS * 32Kbytes, or ~0.5 MB.
+ * This also speeds up tso_fragment(), since it wont fallback
+ * to tcp_fragment().
+ */
+static int linear_payload_sz(bool first_skb)
+{
+ if (first_skb)
+ return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
+ return 0;
+}
+
+static int select_size(const struct sock *sk, bool sg, bool first_skb)
{
const struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache;
if (sg) {
if (sk_can_gso(sk)) {
- /* Small frames wont use a full page:
- * Payload will immediately follow tcp header.
- */
- tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
+ tmp = linear_payload_sz(first_skb);
} else {
int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
@@ -1101,6 +1118,8 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+ tcp_rate_check_app_limited(sk); /* is sending application-limited? */
+
/* Wait for a connection to finish. One exception is TCP Fast Open
* (passive side) where data is allowed to be sent before a connection
* is fully established.
@@ -1161,6 +1180,8 @@ restart:
}
if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
+ bool first_skb;
+
new_segment:
/* Allocate new segment. If the interface is SG,
* allocate skb fitting to single page.
@@ -1172,10 +1193,11 @@ new_segment:
process_backlog = false;
goto restart;
}
+ first_skb = skb_queue_empty(&sk->sk_write_queue);
skb = sk_stream_alloc_skb(sk,
- select_size(sk, sg),
+ select_size(sk, sg, first_skb),
sk->sk_allocation,
- skb_queue_empty(&sk->sk_write_queue));
+ first_skb);
if (!skb)
goto wait_for_memory;
@@ -1570,6 +1592,12 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
}
EXPORT_SYMBOL(tcp_read_sock);
+int tcp_peek_len(struct socket *sock)
+{
+ return tcp_inq(sock->sk);
+}
+EXPORT_SYMBOL(tcp_peek_len);
+
/*
* This routine copies from a sock struct into the user buffer.
*
@@ -2237,7 +2265,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
tcp_write_queue_purge(sk);
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
inet->inet_dport = 0;
@@ -2681,7 +2709,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
{
const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
const struct inet_connection_sock *icsk = inet_csk(sk);
- u32 now = tcp_time_stamp;
+ u32 now = tcp_time_stamp, intv;
unsigned int start;
int notsent_bytes;
u64 rate64;
@@ -2771,6 +2799,15 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_min_rtt = tcp_min_rtt(tp);
info->tcpi_data_segs_in = tp->data_segs_in;
info->tcpi_data_segs_out = tp->data_segs_out;
+
+ info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
+ rate = READ_ONCE(tp->rate_delivered);
+ intv = READ_ONCE(tp->rate_interval_us);
+ if (rate && intv) {
+ rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
+ do_div(rate64, intv);
+ put_unaligned(rate64, &info->tcpi_delivery_rate);
+ }
}
EXPORT_SYMBOL_GPL(tcp_get_info);
@@ -3092,23 +3129,6 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
}
EXPORT_SYMBOL(tcp_get_md5sig_pool);
-int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
- const struct tcphdr *th)
-{
- struct scatterlist sg;
- struct tcphdr hdr;
-
- /* We are not allowed to change tcphdr, make a local copy */
- memcpy(&hdr, th, sizeof(hdr));
- hdr.check = 0;
-
- /* options aren't included in the hash */
- sg_init_one(&sg, &hdr, sizeof(hdr));
- ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(hdr));
- return crypto_ahash_update(hp->md5_req);
-}
-EXPORT_SYMBOL(tcp_md5_hash_header);
-
int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
const struct sk_buff *skb, unsigned int header_len)
{
@@ -3255,11 +3275,12 @@ static void __init tcp_init_mem(void)
void __init tcp_init(void)
{
- unsigned long limit;
int max_rshare, max_wshare, cnt;
+ unsigned long limit;
unsigned int i;
- sock_skb_cb_check_size(sizeof(struct tcp_skb_cb));
+ BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
+ FIELD_SIZEOF(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
new file mode 100644
index 000000000000..0ea66c2c9344
--- /dev/null
+++ b/net/ipv4/tcp_bbr.c
@@ -0,0 +1,896 @@
+/* Bottleneck Bandwidth and RTT (BBR) congestion control
+ *
+ * BBR congestion control computes the sending rate based on the delivery
+ * rate (throughput) estimated from ACKs. In a nutshell:
+ *
+ * On each ACK, update our model of the network path:
+ * bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips)
+ * min_rtt = windowed_min(rtt, 10 seconds)
+ * pacing_rate = pacing_gain * bottleneck_bandwidth
+ * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
+ *
+ * The core algorithm does not react directly to packet losses or delays,
+ * although BBR may adjust the size of next send per ACK when loss is
+ * observed, or adjust the sending rate if it estimates there is a
+ * traffic policer, in order to keep the drop rate reasonable.
+ *
+ * BBR is described in detail in:
+ * "BBR: Congestion-Based Congestion Control",
+ * Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
+ * Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
+ *
+ * There is a public e-mail list for discussing BBR development and testing:
+ * https://groups.google.com/forum/#!forum/bbr-dev
+ *
+ * NOTE: BBR *must* be used with the fq qdisc ("man tc-fq") with pacing enabled,
+ * since pacing is integral to the BBR design and implementation.
+ * BBR without pacing would not function properly, and may incur unnecessary
+ * high packet loss rates.
+ */
+#include <linux/module.h>
+#include <net/tcp.h>
+#include <linux/inet_diag.h>
+#include <linux/inet.h>
+#include <linux/random.h>
+#include <linux/win_minmax.h>
+
+/* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
+ * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
+ * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
+ * Since the minimum window is >=4 packets, the lower bound isn't
+ * an issue. The upper bound isn't an issue with existing technologies.
+ */
+#define BW_SCALE 24
+#define BW_UNIT (1 << BW_SCALE)
+
+#define BBR_SCALE 8 /* scaling factor for fractions in BBR (e.g. gains) */
+#define BBR_UNIT (1 << BBR_SCALE)
+
+/* BBR has the following modes for deciding how fast to send: */
+enum bbr_mode {
+ BBR_STARTUP, /* ramp up sending rate rapidly to fill pipe */
+ BBR_DRAIN, /* drain any queue created during startup */
+ BBR_PROBE_BW, /* discover, share bw: pace around estimated bw */
+ BBR_PROBE_RTT, /* cut cwnd to min to probe min_rtt */
+};
+
+/* BBR congestion control block */
+struct bbr {
+ u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */
+ u32 min_rtt_stamp; /* timestamp of min_rtt_us */
+ u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */
+ struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
+ u32 rtt_cnt; /* count of packet-timed rounds elapsed */
+ u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
+ struct skb_mstamp cycle_mstamp; /* time of this cycle phase start */
+ u32 mode:3, /* current bbr_mode in state machine */
+ prev_ca_state:3, /* CA state on previous ACK */
+ packet_conservation:1, /* use packet conservation? */
+ restore_cwnd:1, /* decided to revert cwnd to old value */
+ round_start:1, /* start of packet-timed tx->ack round? */
+ tso_segs_goal:7, /* segments we want in each skb we send */
+ idle_restart:1, /* restarting after idle? */
+ probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
+ unused:5,
+ lt_is_sampling:1, /* taking long-term ("LT") samples now? */
+ lt_rtt_cnt:7, /* round trips in long-term interval */
+ lt_use_bw:1; /* use lt_bw as our bw estimate? */
+ u32 lt_bw; /* LT est delivery rate in pkts/uS << 24 */
+ u32 lt_last_delivered; /* LT intvl start: tp->delivered */
+ u32 lt_last_stamp; /* LT intvl start: tp->delivered_mstamp */
+ u32 lt_last_lost; /* LT intvl start: tp->lost */
+ u32 pacing_gain:10, /* current gain for setting pacing rate */
+ cwnd_gain:10, /* current gain for setting cwnd */
+ full_bw_cnt:3, /* number of rounds without large bw gains */
+ cycle_idx:3, /* current index in pacing_gain cycle array */
+ unused_b:6;
+ u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
+ u32 full_bw; /* recent bw, to estimate if pipe is full */
+};
+
+#define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
+
+/* Window length of bw filter (in rounds): */
+static const int bbr_bw_rtts = CYCLE_LEN + 2;
+/* Window length of min_rtt filter (in sec): */
+static const u32 bbr_min_rtt_win_sec = 10;
+/* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */
+static const u32 bbr_probe_rtt_mode_ms = 200;
+/* Skip TSO below the following bandwidth (bits/sec): */
+static const int bbr_min_tso_rate = 1200000;
+
+/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
+ * that will allow a smoothly increasing pacing rate that will double each RTT
+ * and send the same number of packets per RTT that an un-paced, slow-starting
+ * Reno or CUBIC flow would:
+ */
+static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1;
+/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
+ * the queue created in BBR_STARTUP in a single round:
+ */
+static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
+/* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
+static const int bbr_cwnd_gain = BBR_UNIT * 2;
+/* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */
+static const int bbr_pacing_gain[] = {
+ BBR_UNIT * 5 / 4, /* probe for more available bw */
+ BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */
+ BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */
+ BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */
+};
+/* Randomize the starting gain cycling phase over N phases: */
+static const u32 bbr_cycle_rand = 7;
+
+/* Try to keep at least this many packets in flight, if things go smoothly. For
+ * smooth functioning, a sliding window protocol ACKing every other packet
+ * needs at least 4 packets in flight:
+ */
+static const u32 bbr_cwnd_min_target = 4;
+
+/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
+/* If bw has increased significantly (1.25x), there may be more bw available: */
+static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
+/* But after 3 rounds w/o significant bw growth, estimate pipe is full: */
+static const u32 bbr_full_bw_cnt = 3;
+
+/* "long-term" ("LT") bandwidth estimator parameters... */
+/* The minimum number of rounds in an LT bw sampling interval: */
+static const u32 bbr_lt_intvl_min_rtts = 4;
+/* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */
+static const u32 bbr_lt_loss_thresh = 50;
+/* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */
+static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8;
+/* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */
+static const u32 bbr_lt_bw_diff = 4000 / 8;
+/* If we estimate we're policed, use lt_bw for this many round trips: */
+static const u32 bbr_lt_bw_max_rtts = 48;
+
+/* Do we estimate that STARTUP filled the pipe? */
+static bool bbr_full_bw_reached(const struct sock *sk)
+{
+ const struct bbr *bbr = inet_csk_ca(sk);
+
+ return bbr->full_bw_cnt >= bbr_full_bw_cnt;
+}
+
+/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
+static u32 bbr_max_bw(const struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ return minmax_get(&bbr->bw);
+}
+
+/* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
+static u32 bbr_bw(const struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
+}
+
+/* Return rate in bytes per second, optionally with a gain.
+ * The order here is chosen carefully to avoid overflow of u64. This should
+ * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
+ */
+static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
+{
+ rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache);
+ rate *= gain;
+ rate >>= BBR_SCALE;
+ rate *= USEC_PER_SEC;
+ return rate >> BW_SCALE;
+}
+
+/* Pace using current bw estimate and a gain factor. In order to help drive the
+ * network toward lower queues while maintaining high utilization and low
+ * latency, the average pacing rate aims to be slightly (~1%) lower than the
+ * estimated bandwidth. This is an important aspect of the design. In this
+ * implementation this slightly lower pacing rate is achieved implicitly by not
+ * including link-layer headers in the packet size used for the pacing rate.
+ */
+static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+ u64 rate = bw;
+
+ rate = bbr_rate_bytes_per_sec(sk, rate, gain);
+ rate = min_t(u64, rate, sk->sk_max_pacing_rate);
+ if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
+ sk->sk_pacing_rate = rate;
+}
+
+/* Return count of segments we want in the skbs we send, or 0 for default. */
+static u32 bbr_tso_segs_goal(struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ return bbr->tso_segs_goal;
+}
+
+static void bbr_set_tso_segs_goal(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 min_segs;
+
+ min_segs = sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
+ bbr->tso_segs_goal = min(tcp_tso_autosize(sk, tp->mss_cache, min_segs),
+ 0x7FU);
+}
+
+/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
+static void bbr_save_cwnd(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
+ bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */
+ else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
+ bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
+}
+
+static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if (event == CA_EVENT_TX_START && tp->app_limited) {
+ bbr->idle_restart = 1;
+ /* Avoid pointless buffer overflows: pace at est. bw if we don't
+ * need more speed (we're restarting from idle and app-limited).
+ */
+ if (bbr->mode == BBR_PROBE_BW)
+ bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
+ }
+}
+
+/* Find target cwnd. Right-size the cwnd based on min RTT and the
+ * estimated bottleneck bandwidth:
+ *
+ * cwnd = bw * min_rtt * gain = BDP * gain
+ *
+ * The key factor, gain, controls the amount of queue. While a small gain
+ * builds a smaller queue, it becomes more vulnerable to noise in RTT
+ * measurements (e.g., delayed ACKs or other ACK compression effects). This
+ * noise may cause BBR to under-estimate the rate.
+ *
+ * To achieve full performance in high-speed paths, we budget enough cwnd to
+ * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
+ * - one skb in sending host Qdisc,
+ * - one skb in sending host TSO/GSO engine
+ * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
+ * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
+ * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
+ * which allows 2 outstanding 2-packet sequences, to try to keep pipe
+ * full even with ACK-every-other-packet delayed ACKs.
+ */
+static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 cwnd;
+ u64 w;
+
+ /* If we've never had a valid RTT sample, cap cwnd at the initial
+ * default. This should only happen when the connection is not using TCP
+ * timestamps and has retransmitted all of the SYN/SYNACK/data packets
+ * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
+ * case we need to slow-start up toward something safe: TCP_INIT_CWND.
+ */
+ if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */
+ return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/
+
+ w = (u64)bw * bbr->min_rtt_us;
+
+ /* Apply a gain to the given value, then remove the BW_SCALE shift. */
+ cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
+
+ /* Allow enough full-sized skbs in flight to utilize end systems. */
+ cwnd += 3 * bbr->tso_segs_goal;
+
+ /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
+ cwnd = (cwnd + 1) & ~1U;
+
+ return cwnd;
+}
+
+/* An optimization in BBR to reduce losses: On the first round of recovery, we
+ * follow the packet conservation principle: send P packets per P packets acked.
+ * After that, we slow-start and send at most 2*P packets per P packets acked.
+ * After recovery finishes, or upon undo, we restore the cwnd we had when
+ * recovery started (capped by the target cwnd based on estimated BDP).
+ *
+ * TODO(ycheng/ncardwell): implement a rate-based approach.
+ */
+static bool bbr_set_cwnd_to_recover_or_restore(
+ struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
+ u32 cwnd = tp->snd_cwnd;
+
+ /* An ACK for P pkts should release at most 2*P packets. We do this
+ * in two steps. First, here we deduct the number of lost packets.
+ * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
+ */
+ if (rs->losses > 0)
+ cwnd = max_t(s32, cwnd - rs->losses, 1);
+
+ if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) {
+ /* Starting 1st round of Recovery, so do packet conservation. */
+ bbr->packet_conservation = 1;
+ bbr->next_rtt_delivered = tp->delivered; /* start round now */
+ /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
+ cwnd = tcp_packets_in_flight(tp) + acked;
+ } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
+ /* Exiting loss recovery; restore cwnd saved before recovery. */
+ bbr->restore_cwnd = 1;
+ bbr->packet_conservation = 0;
+ }
+ bbr->prev_ca_state = state;
+
+ if (bbr->restore_cwnd) {
+ /* Restore cwnd after exiting loss recovery or PROBE_RTT. */
+ cwnd = max(cwnd, bbr->prior_cwnd);
+ bbr->restore_cwnd = 0;
+ }
+
+ if (bbr->packet_conservation) {
+ *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
+ return true; /* yes, using packet conservation */
+ }
+ *new_cwnd = cwnd;
+ return false;
+}
+
+/* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
+ * has drawn us down below target), or snap down to target if we're above it.
+ */
+static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
+ u32 acked, u32 bw, int gain)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 cwnd = 0, target_cwnd = 0;
+
+ if (!acked)
+ return;
+
+ if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
+ goto done;
+
+ /* If we're below target cwnd, slow start cwnd toward target cwnd. */
+ target_cwnd = bbr_target_cwnd(sk, bw, gain);
+ if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
+ cwnd = min(cwnd + acked, target_cwnd);
+ else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
+ cwnd = cwnd + acked;
+ cwnd = max(cwnd, bbr_cwnd_min_target);
+
+done:
+ tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); /* apply global cap */
+ if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */
+ tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
+}
+
+/* End cycle phase if it's time and/or we hit the phase's in-flight target. */
+static bool bbr_is_next_cycle_phase(struct sock *sk,
+ const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ bool is_full_length =
+ skb_mstamp_us_delta(&tp->delivered_mstamp, &bbr->cycle_mstamp) >
+ bbr->min_rtt_us;
+ u32 inflight, bw;
+
+ /* The pacing_gain of 1.0 paces at the estimated bw to try to fully
+ * use the pipe without increasing the queue.
+ */
+ if (bbr->pacing_gain == BBR_UNIT)
+ return is_full_length; /* just use wall clock time */
+
+ inflight = rs->prior_in_flight; /* what was in-flight before ACK? */
+ bw = bbr_max_bw(sk);
+
+ /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
+ * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is
+ * small (e.g. on a LAN). We do not persist if packets are lost, since
+ * a path with small buffers may not hold that much.
+ */
+ if (bbr->pacing_gain > BBR_UNIT)
+ return is_full_length &&
+ (rs->losses || /* perhaps pacing_gain*BDP won't fit */
+ inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain));
+
+ /* A pacing_gain < 1.0 tries to drain extra queue we added if bw
+ * probing didn't find more bw. If inflight falls to match BDP then we
+ * estimate queue is drained; persisting would underutilize the pipe.
+ */
+ return is_full_length ||
+ inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT);
+}
+
+static void bbr_advance_cycle_phase(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
+ bbr->cycle_mstamp = tp->delivered_mstamp;
+ bbr->pacing_gain = bbr_pacing_gain[bbr->cycle_idx];
+}
+
+/* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
+static void bbr_update_cycle_phase(struct sock *sk,
+ const struct rate_sample *rs)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if ((bbr->mode == BBR_PROBE_BW) && !bbr->lt_use_bw &&
+ bbr_is_next_cycle_phase(sk, rs))
+ bbr_advance_cycle_phase(sk);
+}
+
+static void bbr_reset_startup_mode(struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ bbr->mode = BBR_STARTUP;
+ bbr->pacing_gain = bbr_high_gain;
+ bbr->cwnd_gain = bbr_high_gain;
+}
+
+static void bbr_reset_probe_bw_mode(struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ bbr->mode = BBR_PROBE_BW;
+ bbr->pacing_gain = BBR_UNIT;
+ bbr->cwnd_gain = bbr_cwnd_gain;
+ bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
+ bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */
+}
+
+static void bbr_reset_mode(struct sock *sk)
+{
+ if (!bbr_full_bw_reached(sk))
+ bbr_reset_startup_mode(sk);
+ else
+ bbr_reset_probe_bw_mode(sk);
+}
+
+/* Start a new long-term sampling interval. */
+static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ bbr->lt_last_stamp = tp->delivered_mstamp.stamp_jiffies;
+ bbr->lt_last_delivered = tp->delivered;
+ bbr->lt_last_lost = tp->lost;
+ bbr->lt_rtt_cnt = 0;
+}
+
+/* Completely reset long-term bandwidth sampling. */
+static void bbr_reset_lt_bw_sampling(struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ bbr->lt_bw = 0;
+ bbr->lt_use_bw = 0;
+ bbr->lt_is_sampling = false;
+ bbr_reset_lt_bw_sampling_interval(sk);
+}
+
+/* Long-term bw sampling interval is done. Estimate whether we're policed. */
+static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 diff;
+
+ if (bbr->lt_bw) { /* do we have bw from a previous interval? */
+ /* Is new bw close to the lt_bw from the previous interval? */
+ diff = abs(bw - bbr->lt_bw);
+ if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) ||
+ (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
+ bbr_lt_bw_diff)) {
+ /* All criteria are met; estimate we're policed. */
+ bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */
+ bbr->lt_use_bw = 1;
+ bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */
+ bbr->lt_rtt_cnt = 0;
+ return;
+ }
+ }
+ bbr->lt_bw = bw;
+ bbr_reset_lt_bw_sampling_interval(sk);
+}
+
+/* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of
+ * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and
+ * explicitly models their policed rate, to reduce unnecessary losses. We
+ * estimate that we're policed if we see 2 consecutive sampling intervals with
+ * consistent throughput and high packet loss. If we think we're being policed,
+ * set lt_bw to the "long-term" average delivery rate from those 2 intervals.
+ */
+static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 lost, delivered;
+ u64 bw;
+ s32 t;
+
+ if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
+ if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
+ ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) {
+ bbr_reset_lt_bw_sampling(sk); /* stop using lt_bw */
+ bbr_reset_probe_bw_mode(sk); /* restart gain cycling */
+ }
+ return;
+ }
+
+ /* Wait for the first loss before sampling, to let the policer exhaust
+ * its tokens and estimate the steady-state rate allowed by the policer.
+ * Starting samples earlier includes bursts that over-estimate the bw.
+ */
+ if (!bbr->lt_is_sampling) {
+ if (!rs->losses)
+ return;
+ bbr_reset_lt_bw_sampling_interval(sk);
+ bbr->lt_is_sampling = true;
+ }
+
+ /* To avoid underestimates, reset sampling if we run out of data. */
+ if (rs->is_app_limited) {
+ bbr_reset_lt_bw_sampling(sk);
+ return;
+ }
+
+ if (bbr->round_start)
+ bbr->lt_rtt_cnt++; /* count round trips in this interval */
+ if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts)
+ return; /* sampling interval needs to be longer */
+ if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) {
+ bbr_reset_lt_bw_sampling(sk); /* interval is too long */
+ return;
+ }
+
+ /* End sampling interval when a packet is lost, so we estimate the
+ * policer tokens were exhausted. Stopping the sampling before the
+ * tokens are exhausted under-estimates the policed rate.
+ */
+ if (!rs->losses)
+ return;
+
+ /* Calculate packets lost and delivered in sampling interval. */
+ lost = tp->lost - bbr->lt_last_lost;
+ delivered = tp->delivered - bbr->lt_last_delivered;
+ /* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */
+ if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered)
+ return;
+
+ /* Find average delivery rate in this sampling interval. */
+ t = (s32)(tp->delivered_mstamp.stamp_jiffies - bbr->lt_last_stamp);
+ if (t < 1)
+ return; /* interval is less than one jiffy, so wait */
+ t = jiffies_to_usecs(t);
+ /* Interval long enough for jiffies_to_usecs() to return a bogus 0? */
+ if (t < 1) {
+ bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
+ return;
+ }
+ bw = (u64)delivered * BW_UNIT;
+ do_div(bw, t);
+ bbr_lt_bw_interval_done(sk, bw);
+}
+
+/* Estimate the bandwidth based on how fast packets are delivered */
+static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u64 bw;
+
+ bbr->round_start = 0;
+ if (rs->delivered < 0 || rs->interval_us <= 0)
+ return; /* Not a valid observation */
+
+ /* See if we've reached the next RTT */
+ if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) {
+ bbr->next_rtt_delivered = tp->delivered;
+ bbr->rtt_cnt++;
+ bbr->round_start = 1;
+ bbr->packet_conservation = 0;
+ }
+
+ bbr_lt_bw_sampling(sk, rs);
+
+ /* Divide delivered by the interval to find a (lower bound) bottleneck
+ * bandwidth sample. Delivered is in packets and interval_us in uS and
+ * ratio will be <<1 for most connections. So delivered is first scaled.
+ */
+ bw = (u64)rs->delivered * BW_UNIT;
+ do_div(bw, rs->interval_us);
+
+ /* If this sample is application-limited, it is likely to have a very
+ * low delivered count that represents application behavior rather than
+ * the available network rate. Such a sample could drag down estimated
+ * bw, causing needless slow-down. Thus, to continue to send at the
+ * last measured network rate, we filter out app-limited samples unless
+ * they describe the path bw at least as well as our bw model.
+ *
+ * So the goal during app-limited phase is to proceed with the best
+ * network rate no matter how long. We automatically leave this
+ * phase when app writes faster than the network can deliver :)
+ */
+ if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
+ /* Incorporate new sample into our max bw filter. */
+ minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw);
+ }
+}
+
+/* Estimate when the pipe is full, using the change in delivery rate: BBR
+ * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
+ * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
+ * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
+ * higher rwin, 3: we get higher delivery rate samples. Or transient
+ * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
+ * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
+ */
+static void bbr_check_full_bw_reached(struct sock *sk,
+ const struct rate_sample *rs)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 bw_thresh;
+
+ if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
+ return;
+
+ bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE;
+ if (bbr_max_bw(sk) >= bw_thresh) {
+ bbr->full_bw = bbr_max_bw(sk);
+ bbr->full_bw_cnt = 0;
+ return;
+ }
+ ++bbr->full_bw_cnt;
+}
+
+/* If pipe is probably full, drain the queue and then enter steady-state. */
+static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
+ bbr->mode = BBR_DRAIN; /* drain queue we created */
+ bbr->pacing_gain = bbr_drain_gain; /* pace slow to drain */
+ bbr->cwnd_gain = bbr_high_gain; /* maintain cwnd */
+ } /* fall through to check if in-flight is already small: */
+ if (bbr->mode == BBR_DRAIN &&
+ tcp_packets_in_flight(tcp_sk(sk)) <=
+ bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
+ bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
+}
+
+/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
+ * periodically drain the bottleneck queue, to converge to measure the true
+ * min_rtt (unloaded propagation delay). This allows the flows to keep queues
+ * small (reducing queuing delay and packet loss) and achieve fairness among
+ * BBR flows.
+ *
+ * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
+ * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
+ * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
+ * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
+ * re-enter the previous mode. BBR uses 200ms to approximately bound the
+ * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
+ *
+ * Note that flows need only pay 2% if they are busy sending over the last 10
+ * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
+ * natural silences or low-rate periods within 10 seconds where the rate is low
+ * enough for long enough to drain its queue in the bottleneck. We pick up
+ * these min RTT measurements opportunistically with our min_rtt filter. :-)
+ */
+static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ bool filter_expired;
+
+ /* Track min RTT seen in the min_rtt_win_sec filter window: */
+ filter_expired = after(tcp_time_stamp,
+ bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
+ if (rs->rtt_us >= 0 &&
+ (rs->rtt_us <= bbr->min_rtt_us || filter_expired)) {
+ bbr->min_rtt_us = rs->rtt_us;
+ bbr->min_rtt_stamp = tcp_time_stamp;
+ }
+
+ if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
+ !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
+ bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */
+ bbr->pacing_gain = BBR_UNIT;
+ bbr->cwnd_gain = BBR_UNIT;
+ bbr_save_cwnd(sk); /* note cwnd so we can restore it */
+ bbr->probe_rtt_done_stamp = 0;
+ }
+
+ if (bbr->mode == BBR_PROBE_RTT) {
+ /* Ignore low rate samples during this mode. */
+ tp->app_limited =
+ (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
+ /* Maintain min packets in flight for max(200 ms, 1 round). */
+ if (!bbr->probe_rtt_done_stamp &&
+ tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
+ bbr->probe_rtt_done_stamp = tcp_time_stamp +
+ msecs_to_jiffies(bbr_probe_rtt_mode_ms);
+ bbr->probe_rtt_round_done = 0;
+ bbr->next_rtt_delivered = tp->delivered;
+ } else if (bbr->probe_rtt_done_stamp) {
+ if (bbr->round_start)
+ bbr->probe_rtt_round_done = 1;
+ if (bbr->probe_rtt_round_done &&
+ after(tcp_time_stamp, bbr->probe_rtt_done_stamp)) {
+ bbr->min_rtt_stamp = tcp_time_stamp;
+ bbr->restore_cwnd = 1; /* snap to prior_cwnd */
+ bbr_reset_mode(sk);
+ }
+ }
+ }
+ bbr->idle_restart = 0;
+}
+
+static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
+{
+ bbr_update_bw(sk, rs);
+ bbr_update_cycle_phase(sk, rs);
+ bbr_check_full_bw_reached(sk, rs);
+ bbr_check_drain(sk, rs);
+ bbr_update_min_rtt(sk, rs);
+}
+
+static void bbr_main(struct sock *sk, const struct rate_sample *rs)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 bw;
+
+ bbr_update_model(sk, rs);
+
+ bw = bbr_bw(sk);
+ bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
+ bbr_set_tso_segs_goal(sk);
+ bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
+}
+
+static void bbr_init(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u64 bw;
+
+ bbr->prior_cwnd = 0;
+ bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
+ bbr->rtt_cnt = 0;
+ bbr->next_rtt_delivered = 0;
+ bbr->prev_ca_state = TCP_CA_Open;
+ bbr->packet_conservation = 0;
+
+ bbr->probe_rtt_done_stamp = 0;
+ bbr->probe_rtt_round_done = 0;
+ bbr->min_rtt_us = tcp_min_rtt(tp);
+ bbr->min_rtt_stamp = tcp_time_stamp;
+
+ minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
+
+ /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
+ bw = (u64)tp->snd_cwnd * BW_UNIT;
+ do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
+ sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
+ bbr_set_pacing_rate(sk, bw, bbr_high_gain);
+
+ bbr->restore_cwnd = 0;
+ bbr->round_start = 0;
+ bbr->idle_restart = 0;
+ bbr->full_bw = 0;
+ bbr->full_bw_cnt = 0;
+ bbr->cycle_mstamp.v64 = 0;
+ bbr->cycle_idx = 0;
+ bbr_reset_lt_bw_sampling(sk);
+ bbr_reset_startup_mode(sk);
+}
+
+static u32 bbr_sndbuf_expand(struct sock *sk)
+{
+ /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
+ return 3;
+}
+
+/* In theory BBR does not need to undo the cwnd since it does not
+ * always reduce cwnd on losses (see bbr_main()). Keep it for now.
+ */
+static u32 bbr_undo_cwnd(struct sock *sk)
+{
+ return tcp_sk(sk)->snd_cwnd;
+}
+
+/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
+static u32 bbr_ssthresh(struct sock *sk)
+{
+ bbr_save_cwnd(sk);
+ return TCP_INFINITE_SSTHRESH; /* BBR does not use ssthresh */
+}
+
+static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info)
+{
+ if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
+ ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u64 bw = bbr_bw(sk);
+
+ bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE;
+ memset(&info->bbr, 0, sizeof(info->bbr));
+ info->bbr.bbr_bw_lo = (u32)bw;
+ info->bbr.bbr_bw_hi = (u32)(bw >> 32);
+ info->bbr.bbr_min_rtt = bbr->min_rtt_us;
+ info->bbr.bbr_pacing_gain = bbr->pacing_gain;
+ info->bbr.bbr_cwnd_gain = bbr->cwnd_gain;
+ *attr = INET_DIAG_BBRINFO;
+ return sizeof(info->bbr);
+ }
+ return 0;
+}
+
+static void bbr_set_state(struct sock *sk, u8 new_state)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if (new_state == TCP_CA_Loss) {
+ struct rate_sample rs = { .losses = 1 };
+
+ bbr->prev_ca_state = TCP_CA_Loss;
+ bbr->full_bw = 0;
+ bbr->round_start = 1; /* treat RTO like end of a round */
+ bbr_lt_bw_sampling(sk, &rs);
+ }
+}
+
+static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
+ .flags = TCP_CONG_NON_RESTRICTED,
+ .name = "bbr",
+ .owner = THIS_MODULE,
+ .init = bbr_init,
+ .cong_control = bbr_main,
+ .sndbuf_expand = bbr_sndbuf_expand,
+ .undo_cwnd = bbr_undo_cwnd,
+ .cwnd_event = bbr_cwnd_event,
+ .ssthresh = bbr_ssthresh,
+ .tso_segs_goal = bbr_tso_segs_goal,
+ .get_info = bbr_get_info,
+ .set_state = bbr_set_state,
+};
+
+static int __init bbr_register(void)
+{
+ BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
+ return tcp_register_congestion_control(&tcp_bbr_cong_ops);
+}
+
+static void __exit bbr_unregister(void)
+{
+ tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
+}
+
+module_init(bbr_register);
+module_exit(bbr_unregister);
+
+MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
+MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
+MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
+MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 03725b294286..35b280361cb2 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(use_shadow, "use shadow window heuristic");
module_param(use_tolerance, bool, 0644);
MODULE_PARM_DESC(use_tolerance, "use loss tolerance heuristic");
-struct minmax {
+struct cdg_minmax {
union {
struct {
s32 min;
@@ -74,10 +74,10 @@ enum cdg_state {
};
struct cdg {
- struct minmax rtt;
- struct minmax rtt_prev;
- struct minmax *gradients;
- struct minmax gsum;
+ struct cdg_minmax rtt;
+ struct cdg_minmax rtt_prev;
+ struct cdg_minmax *gradients;
+ struct cdg_minmax gsum;
bool gfilled;
u8 tail;
u8 state;
@@ -353,7 +353,7 @@ static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct minmax *gradients;
+ struct cdg_minmax *gradients;
switch (ev) {
case CA_EVENT_CWND_RESTART:
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 882caa4e72bc..1294af4e0127 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -69,7 +69,7 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
int ret = 0;
/* all algorithms must implement ssthresh and cong_avoid ops */
- if (!ca->ssthresh || !ca->cong_avoid) {
+ if (!ca->ssthresh || !(ca->cong_avoid || ca->cong_control)) {
pr_err("%s does not implement required ops\n", ca->name);
return -EINVAL;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a756b8749a26..a27b9c0e27c0 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -289,6 +289,7 @@ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr
static void tcp_sndbuf_expand(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
int sndmem, per_mss;
u32 nr_segs;
@@ -309,7 +310,8 @@ static void tcp_sndbuf_expand(struct sock *sk)
* Cubic needs 1.7 factor, rounded to 2 to include
* extra cushion (application might react slowly to POLLOUT)
*/
- sndmem = 2 * nr_segs * per_mss;
+ sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
+ sndmem *= nr_segs * per_mss;
if (sk->sk_sndbuf < sndmem)
sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
@@ -899,12 +901,29 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
}
+/* Sum the number of packets on the wire we have marked as lost.
+ * There are two cases we care about here:
+ * a) Packet hasn't been marked lost (nor retransmitted),
+ * and this is the first loss.
+ * b) Packet has been marked both lost and retransmitted,
+ * and this means we think it was lost again.
+ */
+static void tcp_sum_lost(struct tcp_sock *tp, struct sk_buff *skb)
+{
+ __u8 sacked = TCP_SKB_CB(skb)->sacked;
+
+ if (!(sacked & TCPCB_LOST) ||
+ ((sacked & TCPCB_LOST) && (sacked & TCPCB_SACKED_RETRANS)))
+ tp->lost += tcp_skb_pcount(skb);
+}
+
static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
{
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
tcp_verify_retransmit_hint(tp, skb);
tp->lost_out += tcp_skb_pcount(skb);
+ tcp_sum_lost(tp, skb);
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
}
}
@@ -913,6 +932,7 @@ void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
{
tcp_verify_retransmit_hint(tp, skb);
+ tcp_sum_lost(tp, skb);
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
tp->lost_out += tcp_skb_pcount(skb);
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
@@ -1094,6 +1114,7 @@ struct tcp_sacktag_state {
*/
struct skb_mstamp first_sackt;
struct skb_mstamp last_sackt;
+ struct rate_sample *rate;
int flag;
};
@@ -1261,6 +1282,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
start_seq, end_seq, dup_sack, pcount,
&skb->skb_mstamp);
+ tcp_rate_skb_delivered(sk, skb, state->rate);
if (skb == tp->lost_skb_hint)
tp->lost_cnt_hint += pcount;
@@ -1311,6 +1333,9 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tcp_advance_highest_sack(sk, skb);
tcp_skb_collapse_tstamp(prev, skb);
+ if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp.v64))
+ TCP_SKB_CB(prev)->tx.delivered_mstamp.v64 = 0;
+
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
@@ -1540,6 +1565,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
dup_sack,
tcp_skb_pcount(skb),
&skb->skb_mstamp);
+ tcp_rate_skb_delivered(sk, skb, state->rate);
if (!before(TCP_SKB_CB(skb)->seq,
tcp_highest_sack_seq(tp)))
@@ -1622,8 +1648,10 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
num_sacks, prior_snd_una);
- if (found_dup_sack)
+ if (found_dup_sack) {
state->flag |= FLAG_DSACKING_ACK;
+ tp->delivered++; /* A spurious retransmission is delivered */
+ }
/* Eliminate too old ACKs, but take into
* account more or less fresh ones, they can
@@ -1890,6 +1918,7 @@ void tcp_enter_loss(struct sock *sk)
struct sk_buff *skb;
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
bool is_reneg; /* is receiver reneging on SACKs? */
+ bool mark_lost;
/* Reduce ssthresh if it has not yet been made inside this window. */
if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
@@ -1923,8 +1952,12 @@ void tcp_enter_loss(struct sock *sk)
if (skb == tcp_send_head(sk))
break;
+ mark_lost = (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
+ is_reneg);
+ if (mark_lost)
+ tcp_sum_lost(tp, skb);
TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
- if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) {
+ if (mark_lost) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
@@ -2502,6 +2535,9 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ if (inet_csk(sk)->icsk_ca_ops->cong_control)
+ return;
+
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
(tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
@@ -2878,67 +2914,13 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
*rexmit = REXMIT_LOST;
}
-/* Kathleen Nichols' algorithm for tracking the minimum value of
- * a data stream over some fixed time interval. (E.g., the minimum
- * RTT over the past five minutes.) It uses constant space and constant
- * time per update yet almost always delivers the same minimum as an
- * implementation that has to keep all the data in the window.
- *
- * The algorithm keeps track of the best, 2nd best & 3rd best min
- * values, maintaining an invariant that the measurement time of the
- * n'th best >= n-1'th best. It also makes sure that the three values
- * are widely separated in the time window since that bounds the worse
- * case error when that data is monotonically increasing over the window.
- *
- * Upon getting a new min, we can forget everything earlier because it
- * has no value - the new min is <= everything else in the window by
- * definition and it's the most recent. So we restart fresh on every new min
- * and overwrites 2nd & 3rd choices. The same property holds for 2nd & 3rd
- * best.
- */
static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
{
- const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ;
- struct rtt_meas *m = tcp_sk(sk)->rtt_min;
- struct rtt_meas rttm = {
- .rtt = likely(rtt_us) ? rtt_us : jiffies_to_usecs(1),
- .ts = now,
- };
- u32 elapsed;
-
- /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */
- if (unlikely(rttm.rtt <= m[0].rtt))
- m[0] = m[1] = m[2] = rttm;
- else if (rttm.rtt <= m[1].rtt)
- m[1] = m[2] = rttm;
- else if (rttm.rtt <= m[2].rtt)
- m[2] = rttm;
-
- elapsed = now - m[0].ts;
- if (unlikely(elapsed > wlen)) {
- /* Passed entire window without a new min so make 2nd choice
- * the new min & 3rd choice the new 2nd. So forth and so on.
- */
- m[0] = m[1];
- m[1] = m[2];
- m[2] = rttm;
- if (now - m[0].ts > wlen) {
- m[0] = m[1];
- m[1] = rttm;
- if (now - m[0].ts > wlen)
- m[0] = rttm;
- }
- } else if (m[1].ts == m[0].ts && elapsed > wlen / 4) {
- /* Passed a quarter of the window without a new min so
- * take 2nd choice from the 2nd quarter of the window.
- */
- m[2] = m[1] = rttm;
- } else if (m[2].ts == m[1].ts && elapsed > wlen / 2) {
- /* Passed half the window without a new min so take the 3rd
- * choice from the last half of the window.
- */
- m[2] = rttm;
- }
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 wlen = sysctl_tcp_min_rtt_wlen * HZ;
+
+ minmax_running_min(&tp->rtt_min, wlen, tcp_time_stamp,
+ rtt_us ? : jiffies_to_usecs(1));
}
static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
@@ -3101,10 +3083,11 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
*/
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
u32 prior_snd_una, int *acked,
- struct tcp_sacktag_state *sack)
+ struct tcp_sacktag_state *sack,
+ struct skb_mstamp *now)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- struct skb_mstamp first_ackt, last_ackt, now;
+ struct skb_mstamp first_ackt, last_ackt;
struct tcp_sock *tp = tcp_sk(sk);
u32 prior_sacked = tp->sacked_out;
u32 reord = tp->packets_out;
@@ -3136,7 +3119,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
acked_pcount = tcp_tso_acked(sk, skb);
if (!acked_pcount)
break;
-
fully_acked = false;
} else {
/* Speedup tcp_unlink_write_queue() and next loop */
@@ -3172,6 +3154,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->packets_out -= acked_pcount;
pkts_acked += acked_pcount;
+ tcp_rate_skb_delivered(sk, skb, sack->rate);
/* Initial outgoing SYN's get put onto the write_queue
* just like anything else we transmit. It is not
@@ -3204,16 +3187,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING;
- skb_mstamp_get(&now);
if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
- seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
- ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+ seq_rtt_us = skb_mstamp_us_delta(now, &first_ackt);
+ ca_rtt_us = skb_mstamp_us_delta(now, &last_ackt);
}
if (sack->first_sackt.v64) {
- sack_rtt_us = skb_mstamp_us_delta(&now, &sack->first_sackt);
- ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt);
+ sack_rtt_us = skb_mstamp_us_delta(now, &sack->first_sackt);
+ ca_rtt_us = skb_mstamp_us_delta(now, &sack->last_sackt);
}
-
+ sack->rate->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet, or -1 */
rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
ca_rtt_us);
@@ -3241,7 +3223,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
- sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
+ sack_rtt_us > skb_mstamp_us_delta(now, &skb->skb_mstamp)) {
/* Do not re-arm RTO if the sack RTT is measured from data sent
* after when the head was last (re)transmitted. Otherwise the
* timeout may continue to extend in loss recovery.
@@ -3332,8 +3314,15 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
* information. All transmission or retransmission are delayed afterwards.
*/
static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
- int flag)
+ int flag, const struct rate_sample *rs)
{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (icsk->icsk_ca_ops->cong_control) {
+ icsk->icsk_ca_ops->cong_control(sk, rs);
+ return;
+ }
+
if (tcp_in_cwnd_reduction(sk)) {
/* Reduce cwnd if state mandates */
tcp_cwnd_reduction(sk, acked_sacked, flag);
@@ -3578,17 +3567,21 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_sacktag_state sack_state;
+ struct rate_sample rs = { .prior_delivered = 0 };
u32 prior_snd_una = tp->snd_una;
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
bool is_dupack = false;
u32 prior_fackets;
int prior_packets = tp->packets_out;
- u32 prior_delivered = tp->delivered;
+ u32 delivered = tp->delivered;
+ u32 lost = tp->lost;
int acked = 0; /* Number of packets newly acked */
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
+ struct skb_mstamp now;
sack_state.first_sackt.v64 = 0;
+ sack_state.rate = &rs;
/* We very likely will need to access write queue head. */
prefetchw(sk->sk_write_queue.next);
@@ -3611,6 +3604,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (after(ack, tp->snd_nxt))
goto invalid_ack;
+ skb_mstamp_get(&now);
+
if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
tcp_rearm_rto(sk);
@@ -3621,6 +3616,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
}
prior_fackets = tp->fackets_out;
+ rs.prior_in_flight = tcp_packets_in_flight(tp);
/* ts_recent update must be made after we are sure that the packet
* is in window.
@@ -3676,7 +3672,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
- &sack_state);
+ &sack_state, &now);
if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
@@ -3693,7 +3689,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (icsk->icsk_pending == ICSK_TIME_RETRANS)
tcp_schedule_loss_probe(sk);
- tcp_cong_control(sk, ack, tp->delivered - prior_delivered, flag);
+ delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
+ lost = tp->lost - lost; /* freshly marked lost */
+ tcp_rate_gen(sk, delivered, lost, &now, &rs);
+ tcp_cong_control(sk, ack, delivered, flag, &rs);
tcp_xmit_recovery(sk, rexmit);
return 1;
@@ -4107,7 +4106,7 @@ void tcp_fin(struct sock *sk)
/* It _is_ possible, that we have something out-of-order _after_ FIN.
* Probably, we should reset in this case. For now drop them.
*/
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
if (tcp_is_sack(tp))
tcp_sack_reset(&tp->rx_opt);
sk_mem_reclaim(sk);
@@ -4267,7 +4266,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
int this_sack;
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
- if (skb_queue_empty(&tp->out_of_order_queue)) {
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
tp->rx_opt.num_sacks = 0;
return;
}
@@ -4343,10 +4342,13 @@ static void tcp_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
__u32 dsack_high = tp->rcv_nxt;
+ bool fin, fragstolen, eaten;
struct sk_buff *skb, *tail;
- bool fragstolen, eaten;
+ struct rb_node *p;
- while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
+ p = rb_first(&tp->out_of_order_queue);
+ while (p) {
+ skb = rb_entry(p, struct sk_buff, rbnode);
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break;
@@ -4356,9 +4358,10 @@ static void tcp_ofo_queue(struct sock *sk)
dsack_high = TCP_SKB_CB(skb)->end_seq;
tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
}
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, &tp->out_of_order_queue);
- __skb_unlink(skb, &tp->out_of_order_queue);
- if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
+ if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
SOCK_DEBUG(sk, "ofo packet was already received\n");
tcp_drop(sk, skb);
continue;
@@ -4370,12 +4373,19 @@ static void tcp_ofo_queue(struct sock *sk)
tail = skb_peek_tail(&sk->sk_receive_queue);
eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
+ fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
if (!eaten)
__skb_queue_tail(&sk->sk_receive_queue, skb);
- if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
- tcp_fin(sk);
- if (eaten)
+ else
kfree_skb_partial(skb, fragstolen);
+
+ if (unlikely(fin)) {
+ tcp_fin(sk);
+ /* tcp_fin() purges tp->out_of_order_queue,
+ * so we must end this loop right now.
+ */
+ break;
+ }
}
}
@@ -4391,12 +4401,9 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
if (tcp_prune_queue(sk) < 0)
return -1;
- if (!sk_rmem_schedule(sk, skb, size)) {
+ while (!sk_rmem_schedule(sk, skb, size)) {
if (!tcp_prune_ofo_queue(sk))
return -1;
-
- if (!sk_rmem_schedule(sk, skb, size))
- return -1;
}
}
return 0;
@@ -4405,8 +4412,10 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ struct rb_node **p, *q, *parent;
struct sk_buff *skb1;
u32 seq, end_seq;
+ bool fragstolen;
tcp_ecn_check_ce(tp, skb);
@@ -4421,88 +4430,92 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
inet_csk_schedule_ack(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
+ seq = TCP_SKB_CB(skb)->seq;
+ end_seq = TCP_SKB_CB(skb)->end_seq;
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
- tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
+ tp->rcv_nxt, seq, end_seq);
- skb1 = skb_peek_tail(&tp->out_of_order_queue);
- if (!skb1) {
+ p = &tp->out_of_order_queue.rb_node;
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
/* Initial out of order segment, build 1 SACK. */
if (tcp_is_sack(tp)) {
tp->rx_opt.num_sacks = 1;
- tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
- tp->selective_acks[0].end_seq =
- TCP_SKB_CB(skb)->end_seq;
+ tp->selective_acks[0].start_seq = seq;
+ tp->selective_acks[0].end_seq = end_seq;
}
- __skb_queue_head(&tp->out_of_order_queue, skb);
+ rb_link_node(&skb->rbnode, NULL, p);
+ rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
+ tp->ooo_last_skb = skb;
goto end;
}
- seq = TCP_SKB_CB(skb)->seq;
- end_seq = TCP_SKB_CB(skb)->end_seq;
-
- if (seq == TCP_SKB_CB(skb1)->end_seq) {
- bool fragstolen;
-
- if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
- __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
- } else {
- tcp_grow_window(sk, skb);
- kfree_skb_partial(skb, fragstolen);
- skb = NULL;
- }
-
- if (!tp->rx_opt.num_sacks ||
- tp->selective_acks[0].end_seq != seq)
- goto add_sack;
-
- /* Common case: data arrive in order after hole. */
- tp->selective_acks[0].end_seq = end_seq;
- goto end;
- }
-
- /* Find place to insert this segment. */
- while (1) {
- if (!after(TCP_SKB_CB(skb1)->seq, seq))
- break;
- if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
- skb1 = NULL;
- break;
- }
- skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
- }
-
- /* Do skb overlap to previous one? */
- if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
- if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
- /* All the bits are present. Drop. */
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
- tcp_drop(sk, skb);
- skb = NULL;
- tcp_dsack_set(sk, seq, end_seq);
- goto add_sack;
+ /* In the typical case, we are adding an skb to the end of the list.
+ * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
+ */
+ if (tcp_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) {
+coalesce_done:
+ tcp_grow_window(sk, skb);
+ kfree_skb_partial(skb, fragstolen);
+ skb = NULL;
+ goto add_sack;
+ }
+ /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
+ if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) {
+ parent = &tp->ooo_last_skb->rbnode;
+ p = &parent->rb_right;
+ goto insert;
+ }
+
+ /* Find place to insert this segment. Handle overlaps on the way. */
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ if (before(seq, TCP_SKB_CB(skb1)->seq)) {
+ p = &parent->rb_left;
+ continue;
}
- if (after(seq, TCP_SKB_CB(skb1)->seq)) {
- /* Partial overlap. */
- tcp_dsack_set(sk, seq,
- TCP_SKB_CB(skb1)->end_seq);
- } else {
- if (skb_queue_is_first(&tp->out_of_order_queue,
- skb1))
- skb1 = NULL;
- else
- skb1 = skb_queue_prev(
- &tp->out_of_order_queue,
- skb1);
+ if (before(seq, TCP_SKB_CB(skb1)->end_seq)) {
+ if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
+ /* All the bits are present. Drop. */
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPOFOMERGE);
+ __kfree_skb(skb);
+ skb = NULL;
+ tcp_dsack_set(sk, seq, end_seq);
+ goto add_sack;
+ }
+ if (after(seq, TCP_SKB_CB(skb1)->seq)) {
+ /* Partial overlap. */
+ tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
+ } else {
+ /* skb's seq == skb1's seq and skb covers skb1.
+ * Replace skb1 with skb.
+ */
+ rb_replace_node(&skb1->rbnode, &skb->rbnode,
+ &tp->out_of_order_queue);
+ tcp_dsack_extend(sk,
+ TCP_SKB_CB(skb1)->seq,
+ TCP_SKB_CB(skb1)->end_seq);
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPOFOMERGE);
+ __kfree_skb(skb1);
+ goto merge_right;
+ }
+ } else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
+ goto coalesce_done;
}
+ p = &parent->rb_right;
}
- if (!skb1)
- __skb_queue_head(&tp->out_of_order_queue, skb);
- else
- __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
+insert:
+ /* Insert segment into RB tree. */
+ rb_link_node(&skb->rbnode, parent, p);
+ rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
- /* And clean segments covered by new one as whole. */
- while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
- skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
+merge_right:
+ /* Remove other segments covered by skb. */
+ while ((q = rb_next(&skb->rbnode)) != NULL) {
+ skb1 = rb_entry(q, struct sk_buff, rbnode);
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
break;
@@ -4511,12 +4524,15 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
end_seq);
break;
}
- __skb_unlink(skb1, &tp->out_of_order_queue);
+ rb_erase(&skb1->rbnode, &tp->out_of_order_queue);
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
TCP_SKB_CB(skb1)->end_seq);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
tcp_drop(sk, skb1);
}
+ /* If there is no skb after us, we are the last_skb ! */
+ if (!q)
+ tp->ooo_last_skb = skb;
add_sack:
if (tcp_is_sack(tp))
@@ -4653,13 +4669,13 @@ queue_and_out:
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
tcp_fin(sk);
- if (!skb_queue_empty(&tp->out_of_order_queue)) {
+ if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
tcp_ofo_queue(sk);
/* RFC2581. 4.2. SHOULD send immediate ACK, when
* gap in queue is filled.
*/
- if (skb_queue_empty(&tp->out_of_order_queue))
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
inet_csk(sk)->icsk_ack.pingpong = 0;
}
@@ -4713,48 +4729,76 @@ drop:
tcp_data_queue_ofo(sk, skb);
}
+static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ if (list)
+ return !skb_queue_is_last(list, skb) ? skb->next : NULL;
+
+ return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
+}
+
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
- struct sk_buff_head *list)
+ struct sk_buff_head *list,
+ struct rb_root *root)
{
- struct sk_buff *next = NULL;
+ struct sk_buff *next = tcp_skb_next(skb, list);
- if (!skb_queue_is_last(list, skb))
- next = skb_queue_next(list, skb);
+ if (list)
+ __skb_unlink(skb, list);
+ else
+ rb_erase(&skb->rbnode, root);
- __skb_unlink(skb, list);
__kfree_skb(skb);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
return next;
}
+/* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */
+static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct sk_buff *skb1;
+
+ while (*p) {
+ parent = *p;
+ skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&skb->rbnode, parent, p);
+ rb_insert_color(&skb->rbnode, root);
+}
+
/* Collapse contiguous sequence of skbs head..tail with
* sequence numbers start..end.
*
- * If tail is NULL, this means until the end of the list.
+ * If tail is NULL, this means until the end of the queue.
*
* Segments with FIN/SYN are not collapsed (only because this
* simplifies code)
*/
static void
-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
- struct sk_buff *head, struct sk_buff *tail,
- u32 start, u32 end)
+tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
+ struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end)
{
- struct sk_buff *skb, *n;
+ struct sk_buff *skb = head, *n;
+ struct sk_buff_head tmp;
bool end_of_skbs;
/* First, check that queue is collapsible and find
- * the point where collapsing can be useful. */
- skb = head;
+ * the point where collapsing can be useful.
+ */
restart:
- end_of_skbs = true;
- skb_queue_walk_from_safe(list, skb, n) {
- if (skb == tail)
- break;
+ for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
+ n = tcp_skb_next(skb, list);
+
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
- skb = tcp_collapse_one(sk, skb, list);
+ skb = tcp_collapse_one(sk, skb, list, root);
if (!skb)
break;
goto restart;
@@ -4772,13 +4816,10 @@ restart:
break;
}
- if (!skb_queue_is_last(list, skb)) {
- struct sk_buff *next = skb_queue_next(list, skb);
- if (next != tail &&
- TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
- end_of_skbs = false;
- break;
- }
+ if (n && n != tail &&
+ TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
+ end_of_skbs = false;
+ break;
}
/* Decided to skip this, advance start seq. */
@@ -4788,17 +4829,22 @@ restart:
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
return;
+ __skb_queue_head_init(&tmp);
+
while (before(start, end)) {
int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
struct sk_buff *nskb;
nskb = alloc_skb(copy, GFP_ATOMIC);
if (!nskb)
- return;
+ break;
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
- __skb_queue_before(list, skb, nskb);
+ if (list)
+ __skb_queue_before(list, skb, nskb);
+ else
+ __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
skb_set_owner_r(nskb, sk);
/* Copy data, releasing collapsed skbs. */
@@ -4816,14 +4862,17 @@ restart:
start += size;
}
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
- skb = tcp_collapse_one(sk, skb, list);
+ skb = tcp_collapse_one(sk, skb, list, root);
if (!skb ||
skb == tail ||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
- return;
+ goto end;
}
}
}
+end:
+ skb_queue_walk_safe(&tmp, skb, n)
+ tcp_rbtree_insert(root, skb);
}
/* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
@@ -4832,70 +4881,86 @@ restart:
static void tcp_collapse_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
- struct sk_buff *head;
+ struct sk_buff *skb, *head;
+ struct rb_node *p;
u32 start, end;
- if (!skb)
+ p = rb_first(&tp->out_of_order_queue);
+ skb = rb_entry_safe(p, struct sk_buff, rbnode);
+new_range:
+ if (!skb) {
+ p = rb_last(&tp->out_of_order_queue);
+ /* Note: This is possible p is NULL here. We do not
+ * use rb_entry_safe(), as ooo_last_skb is valid only
+ * if rbtree is not empty.
+ */
+ tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
return;
-
+ }
start = TCP_SKB_CB(skb)->seq;
end = TCP_SKB_CB(skb)->end_seq;
- head = skb;
-
- for (;;) {
- struct sk_buff *next = NULL;
- if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
- next = skb_queue_next(&tp->out_of_order_queue, skb);
- skb = next;
+ for (head = skb;;) {
+ skb = tcp_skb_next(skb, NULL);
- /* Segment is terminated when we see gap or when
- * we are at the end of all the queue. */
+ /* Range is terminated when we see a gap or when
+ * we are at the queue end.
+ */
if (!skb ||
after(TCP_SKB_CB(skb)->seq, end) ||
before(TCP_SKB_CB(skb)->end_seq, start)) {
- tcp_collapse(sk, &tp->out_of_order_queue,
+ tcp_collapse(sk, NULL, &tp->out_of_order_queue,
head, skb, start, end);
- head = skb;
- if (!skb)
- break;
- /* Start new segment */
+ goto new_range;
+ }
+
+ if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
start = TCP_SKB_CB(skb)->seq;
+ if (after(TCP_SKB_CB(skb)->end_seq, end))
end = TCP_SKB_CB(skb)->end_seq;
- } else {
- if (before(TCP_SKB_CB(skb)->seq, start))
- start = TCP_SKB_CB(skb)->seq;
- if (after(TCP_SKB_CB(skb)->end_seq, end))
- end = TCP_SKB_CB(skb)->end_seq;
- }
}
}
/*
- * Purge the out-of-order queue.
- * Return true if queue was pruned.
+ * Clean the out-of-order queue to make room.
+ * We drop high sequences packets to :
+ * 1) Let a chance for holes to be filled.
+ * 2) not add too big latencies if thousands of packets sit there.
+ * (But if application shrinks SO_RCVBUF, we could still end up
+ * freeing whole queue here)
+ *
+ * Return true if queue has shrunk.
*/
static bool tcp_prune_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- bool res = false;
+ struct rb_node *node, *prev;
- if (!skb_queue_empty(&tp->out_of_order_queue)) {
- NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
- __skb_queue_purge(&tp->out_of_order_queue);
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
+ return false;
- /* Reset SACK state. A conforming SACK implementation will
- * do the same at a timeout based retransmit. When a connection
- * is in a sad state like this, we care only about integrity
- * of the connection not performance.
- */
- if (tp->rx_opt.sack_ok)
- tcp_sack_reset(&tp->rx_opt);
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
+ node = &tp->ooo_last_skb->rbnode;
+ do {
+ prev = rb_prev(node);
+ rb_erase(node, &tp->out_of_order_queue);
+ tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
sk_mem_reclaim(sk);
- res = true;
- }
- return res;
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+ !tcp_under_memory_pressure(sk))
+ break;
+ node = prev;
+ } while (node);
+ tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
+
+ /* Reset SACK state. A conforming SACK implementation will
+ * do the same at a timeout based retransmit. When a connection
+ * is in a sad state like this, we care only about integrity
+ * of the connection not performance.
+ */
+ if (tp->rx_opt.sack_ok)
+ tcp_sack_reset(&tp->rx_opt);
+ return true;
}
/* Reduce allocated memory if we can, trying to get
@@ -4920,7 +4985,7 @@ static int tcp_prune_queue(struct sock *sk)
tcp_collapse_ofo_queue(sk);
if (!skb_queue_empty(&sk->sk_receive_queue))
- tcp_collapse(sk, &sk->sk_receive_queue,
+ tcp_collapse(sk, &sk->sk_receive_queue, NULL,
skb_peek(&sk->sk_receive_queue),
NULL,
tp->copied_seq, tp->rcv_nxt);
@@ -5025,7 +5090,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
/* We ACK each frame or... */
tcp_in_quickack_mode(sk) ||
/* We have out of order data. */
- (ofo_possible && skb_peek(&tp->out_of_order_queue))) {
+ (ofo_possible && !RB_EMPTY_ROOT(&tp->out_of_order_queue))) {
/* Then ack it now */
tcp_send_ack(sk);
} else {
@@ -5926,7 +5991,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
} else
tcp_init_metrics(sk);
- tcp_update_pacing_rate(sk);
+ if (!inet_csk(sk)->icsk_ca_ops->cong_control)
+ tcp_update_pacing_rate(sk);
/* Prevent spurious tcp_cwnd_restart() on first data packet */
tp->lsndtime = tcp_time_stamp;
@@ -6259,6 +6325,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb, sk);
+ inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent;
/* Note: tcp_v6_init_req() might override ir_iif for link locals */
inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7158d4f8dae4..bd5e8d10893f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1175,6 +1175,7 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
&iph->saddr, ntohs(th->source),
&iph->daddr, ntohs(th->dest),
@@ -1195,7 +1196,6 @@ static void tcp_v4_init_req(struct request_sock *req,
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
- ireq->no_srccheck = inet_sk(sk_listener)->transparent;
ireq->opt = tcp_v4_save_options(skb);
}
@@ -1537,6 +1537,34 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_prequeue);
+bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+ u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
+
+ /* Only socket owner can try to collapse/prune rx queues
+ * to reduce memory overhead, so add a little headroom here.
+ * Few sockets backlog are possibly concurrently non empty.
+ */
+ limit += 64*1024;
+
+ /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
+ * we can fix skb->truesize to its real value to avoid future drops.
+ * This is valid because skb is not yet charged to the socket.
+ * It has been noticed pure SACK packets were sometimes dropped
+ * (if cooked by drivers without copybreak feature).
+ */
+ if (!skb->data_len)
+ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+
+ if (unlikely(sk_add_backlog(sk, skb, limit))) {
+ bh_unlock_sock(sk);
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL(tcp_add_backlog);
+
/*
* From tcp_input.c
*/
@@ -1608,6 +1636,7 @@ process:
sk = req->rsk_listener;
if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
+ sk_drops_add(sk, skb);
reqsk_put(req);
goto discard_it;
}
@@ -1666,10 +1695,7 @@ process:
if (!sock_owned_by_user(sk)) {
if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb);
- } else if (unlikely(sk_add_backlog(sk, skb,
- sk->sk_rcvbuf + sk->sk_sndbuf))) {
- bh_unlock_sock(sk);
- __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
+ } else if (tcp_add_backlog(sk, skb)) {
goto discard_and_relse;
}
bh_unlock_sock(sk);
@@ -1818,7 +1844,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
tcp_write_queue_purge(sk);
/* Cleans up our, hopefully empty, out_of_order_queue. */
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
#ifdef CONFIG_TCP_MD5SIG
/* Clean up the MD5 key list, if any */
@@ -1845,9 +1871,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
local_bh_disable();
sk_sockets_allocated_dec(sk);
local_bh_enable();
-
- if (mem_cgroup_sockets_enabled && sk->sk_memcg)
- sock_release_memcg(sk);
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index b617826e2477..bf1f3b2b29d1 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -751,7 +751,7 @@ static struct genl_family tcp_metrics_nl_family = {
.netnsok = true,
};
-static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
+static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
[TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
[TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
.len = sizeof(struct in6_addr), },
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 4b95ec4ed2c8..6234ebaa7db1 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -464,7 +464,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->srtt_us = 0;
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
- newtp->rtt_min[0].rtt = ~0U;
+ minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newtp->packets_out = 0;
@@ -487,8 +487,10 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->snd_cwnd = TCP_INIT_CWND;
newtp->snd_cwnd_cnt = 0;
+ /* There's a bubble in the pipe until at least the first ACK. */
+ newtp->app_limited = ~0U;
+
tcp_init_xmit_timers(newsk);
- __skb_queue_head_init(&newtp->out_of_order_queue);
newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
newtp->rx_opt.saw_tstamp = 0;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 5c5964962d0c..bc68da38ea86 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -90,12 +90,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
goto out;
}
- /* GSO partial only requires splitting the frame into an MSS
- * multiple and possibly a remainder. So update the mss now.
- */
- if (features & NETIF_F_GSO_PARTIAL)
- mss = skb->len - (skb->len % mss);
-
copy_destructor = gso_skb->destructor == tcp_wfree;
ooo_okay = gso_skb->ooo_okay;
/* All segments but the first should have ooo_okay cleared */
@@ -108,6 +102,13 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
/* Only first segment might have ooo_okay set */
segs->ooo_okay = ooo_okay;
+ /* GSO partial and frag_list segmentation only requires splitting
+ * the frame into an MSS multiple and possibly a remainder, both
+ * cases return a GSO skb. So update the mss now.
+ */
+ if (skb_is_gso(segs))
+ mss *= skb_shinfo(segs)->gso_segs;
+
delta = htonl(oldlen + (thlen + mss));
skb = segs;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d48d5571e62a..896e9dfbdb5c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -734,9 +734,16 @@ static void tcp_tsq_handler(struct sock *sk)
{
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
- TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
- tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
+ TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tp->lost_out > tp->retrans_out &&
+ tp->snd_cwnd > tcp_packets_in_flight(tp))
+ tcp_xmit_retransmit_queue(sk);
+
+ tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
0, GFP_ATOMIC);
+ }
}
/*
* One tasklet per cpu tries to send more skbs.
@@ -918,6 +925,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
skb_mstamp_get(&skb->skb_mstamp);
TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
- tp->snd_una;
+ tcp_rate_skb_sent(sk, skb);
if (unlikely(skb_cloned(skb)))
skb = pskb_copy(skb, gfp_mask);
@@ -1213,6 +1221,9 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
tcp_set_skb_tso_segs(skb, mss_now);
tcp_set_skb_tso_segs(buff, mss_now);
+ /* Update delivered info for the new segment */
+ TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
+
/* If this packet has been sent out already, we must
* adjust the various packet counters.
*/
@@ -1358,6 +1369,7 @@ int tcp_mss_to_mtu(struct sock *sk, int mss)
}
return mtu;
}
+EXPORT_SYMBOL(tcp_mss_to_mtu);
/* MTU probing init per socket */
void tcp_mtup_init(struct sock *sk)
@@ -1545,7 +1557,8 @@ static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
/* Return how many segs we'd like on a TSO packet,
* to send one TSO packet per ms
*/
-static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now)
+u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
+ int min_tso_segs)
{
u32 bytes, segs;
@@ -1557,10 +1570,23 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now)
* This preserves ACK clocking and is consistent
* with tcp_tso_should_defer() heuristic.
*/
- segs = max_t(u32, bytes / mss_now, sysctl_tcp_min_tso_segs);
+ segs = max_t(u32, bytes / mss_now, min_tso_segs);
return min_t(u32, segs, sk->sk_gso_max_segs);
}
+EXPORT_SYMBOL(tcp_tso_autosize);
+
+/* Return the number of segments we want in the skb we are transmitting.
+ * See if congestion control module wants to decide; otherwise, autosize.
+ */
+static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
+{
+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
+ u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
+
+ return tso_segs ? :
+ tcp_tso_autosize(sk, mss_now, sysctl_tcp_min_tso_segs);
+}
/* Returns the portion of skb which can be sent right away */
static unsigned int tcp_mss_split_point(const struct sock *sk,
@@ -2022,6 +2048,39 @@ static int tcp_mtu_probe(struct sock *sk)
return -1;
}
+/* TCP Small Queues :
+ * Control number of packets in qdisc/devices to two packets / or ~1 ms.
+ * (These limits are doubled for retransmits)
+ * This allows for :
+ * - better RTT estimation and ACK scheduling
+ * - faster recovery
+ * - high rates
+ * Alas, some drivers / subsystems require a fair amount
+ * of queued bytes to ensure line rate.
+ * One example is wifi aggregation (802.11 AMPDU)
+ */
+static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
+ unsigned int factor)
+{
+ unsigned int limit;
+
+ limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
+ limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
+ limit <<= factor;
+
+ if (atomic_read(&sk->sk_wmem_alloc) > limit) {
+ set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags);
+ /* It is possible TX completion already happened
+ * before we set TSQ_THROTTLED, so we must
+ * test again the condition.
+ */
+ smp_mb__after_atomic();
+ if (atomic_read(&sk->sk_wmem_alloc) > limit)
+ return true;
+ }
+ return false;
+}
+
/* This routine writes packets to the network. It advances the
* send_head. This happens as incoming acks open up the remote
* window for us.
@@ -2059,7 +2118,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
}
- max_segs = tcp_tso_autosize(sk, mss_now);
+ max_segs = tcp_tso_segs(sk, mss_now);
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
@@ -2108,29 +2167,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
- /* TCP Small Queues :
- * Control number of packets in qdisc/devices to two packets / or ~1 ms.
- * This allows for :
- * - better RTT estimation and ACK scheduling
- * - faster recovery
- * - high rates
- * Alas, some drivers / subsystems require a fair amount
- * of queued bytes to ensure line rate.
- * One example is wifi aggregation (802.11 AMPDU)
- */
- limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
- limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
-
- if (atomic_read(&sk->sk_wmem_alloc) > limit) {
- set_bit(TSQ_THROTTLED, &tp->tsq_flags);
- /* It is possible TX completion already happened
- * before we set TSQ_THROTTLED, so we must
- * test again the condition.
- */
- smp_mb__after_atomic();
- if (atomic_read(&sk->sk_wmem_alloc) > limit)
- break;
- }
+ if (tcp_small_queue_check(sk, skb, 0))
+ break;
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
@@ -2777,9 +2815,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
last_lost = tp->snd_una;
}
- max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
+ max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
tcp_for_write_queue_from(skb, sk) {
- __u8 sacked = TCP_SKB_CB(skb)->sacked;
+ __u8 sacked;
int segs;
if (skb == tcp_send_head(sk))
@@ -2791,6 +2829,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
if (segs <= 0)
return;
+ sacked = TCP_SKB_CB(skb)->sacked;
/* In case tcp_shift_skb_data() have aggregated large skbs,
* we need to make sure not sending too bigs TSO packets
*/
@@ -2830,6 +2869,9 @@ begin_fwd:
if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
continue;
+ if (tcp_small_queue_check(sk, skb, 1))
+ return;
+
if (tcp_retransmit_skb(sk, skb, segs))
return;
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
new file mode 100644
index 000000000000..9be1581a5a08
--- /dev/null
+++ b/net/ipv4/tcp_rate.c
@@ -0,0 +1,186 @@
+#include <net/tcp.h>
+
+/* The bandwidth estimator estimates the rate at which the network
+ * can currently deliver outbound data packets for this flow. At a high
+ * level, it operates by taking a delivery rate sample for each ACK.
+ *
+ * A rate sample records the rate at which the network delivered packets
+ * for this flow, calculated over the time interval between the transmission
+ * of a data packet and the acknowledgment of that packet.
+ *
+ * Specifically, over the interval between each transmit and corresponding ACK,
+ * the estimator generates a delivery rate sample. Typically it uses the rate
+ * at which packets were acknowledged. However, the approach of using only the
+ * acknowledgment rate faces a challenge under the prevalent ACK decimation or
+ * compression: packets can temporarily appear to be delivered much quicker
+ * than the bottleneck rate. Since it is physically impossible to do that in a
+ * sustained fashion, when the estimator notices that the ACK rate is faster
+ * than the transmit rate, it uses the latter:
+ *
+ * send_rate = #pkts_delivered/(last_snd_time - first_snd_time)
+ * ack_rate = #pkts_delivered/(last_ack_time - first_ack_time)
+ * bw = min(send_rate, ack_rate)
+ *
+ * Notice the estimator essentially estimates the goodput, not always the
+ * network bottleneck link rate when the sending or receiving is limited by
+ * other factors like applications or receiver window limits. The estimator
+ * deliberately avoids using the inter-packet spacing approach because that
+ * approach requires a large number of samples and sophisticated filtering.
+ *
+ * TCP flows can often be application-limited in request/response workloads.
+ * The estimator marks a bandwidth sample as application-limited if there
+ * was some moment during the sampled window of packets when there was no data
+ * ready to send in the write queue.
+ */
+
+/* Snapshot the current delivery information in the skb, to generate
+ * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
+ */
+void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* In general we need to start delivery rate samples from the
+ * time we received the most recent ACK, to ensure we include
+ * the full time the network needs to deliver all in-flight
+ * packets. If there are no packets in flight yet, then we
+ * know that any ACKs after now indicate that the network was
+ * able to deliver those packets completely in the sampling
+ * interval between now and the next ACK.
+ *
+ * Note that we use packets_out instead of tcp_packets_in_flight(tp)
+ * because the latter is a guess based on RTO and loss-marking
+ * heuristics. We don't want spurious RTOs or loss markings to cause
+ * a spuriously small time interval, causing a spuriously high
+ * bandwidth estimate.
+ */
+ if (!tp->packets_out) {
+ tp->first_tx_mstamp = skb->skb_mstamp;
+ tp->delivered_mstamp = skb->skb_mstamp;
+ }
+
+ TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
+ TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp;
+ TCP_SKB_CB(skb)->tx.delivered = tp->delivered;
+ TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0;
+}
+
+/* When an skb is sacked or acked, we fill in the rate sample with the (prior)
+ * delivery information when the skb was last transmitted.
+ *
+ * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
+ * called multiple times. We favor the information from the most recently
+ * sent skb, i.e., the skb with the highest prior_delivered count.
+ */
+void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
+ struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+
+ if (!scb->tx.delivered_mstamp.v64)
+ return;
+
+ if (!rs->prior_delivered ||
+ after(scb->tx.delivered, rs->prior_delivered)) {
+ rs->prior_delivered = scb->tx.delivered;
+ rs->prior_mstamp = scb->tx.delivered_mstamp;
+ rs->is_app_limited = scb->tx.is_app_limited;
+ rs->is_retrans = scb->sacked & TCPCB_RETRANS;
+
+ /* Find the duration of the "send phase" of this window: */
+ rs->interval_us = skb_mstamp_us_delta(
+ &skb->skb_mstamp,
+ &scb->tx.first_tx_mstamp);
+
+ /* Record send time of most recently ACKed packet: */
+ tp->first_tx_mstamp = skb->skb_mstamp;
+ }
+ /* Mark off the skb delivered once it's sacked to avoid being
+ * used again when it's cumulatively acked. For acked packets
+ * we don't need to reset since it'll be freed soon.
+ */
+ if (scb->sacked & TCPCB_SACKED_ACKED)
+ scb->tx.delivered_mstamp.v64 = 0;
+}
+
+/* Update the connection delivery information and generate a rate sample. */
+void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
+ struct skb_mstamp *now, struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 snd_us, ack_us;
+
+ /* Clear app limited if bubble is acked and gone. */
+ if (tp->app_limited && after(tp->delivered, tp->app_limited))
+ tp->app_limited = 0;
+
+ /* TODO: there are multiple places throughout tcp_ack() to get
+ * current time. Refactor the code using a new "tcp_acktag_state"
+ * to carry current time, flags, stats like "tcp_sacktag_state".
+ */
+ if (delivered)
+ tp->delivered_mstamp = *now;
+
+ rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
+ rs->losses = lost; /* freshly marked lost */
+ /* Return an invalid sample if no timing information is available. */
+ if (!rs->prior_mstamp.v64) {
+ rs->delivered = -1;
+ rs->interval_us = -1;
+ return;
+ }
+ rs->delivered = tp->delivered - rs->prior_delivered;
+
+ /* Model sending data and receiving ACKs as separate pipeline phases
+ * for a window. Usually the ACK phase is longer, but with ACK
+ * compression the send phase can be longer. To be safe we use the
+ * longer phase.
+ */
+ snd_us = rs->interval_us; /* send phase */
+ ack_us = skb_mstamp_us_delta(now, &rs->prior_mstamp); /* ack phase */
+ rs->interval_us = max(snd_us, ack_us);
+
+ /* Normally we expect interval_us >= min-rtt.
+ * Note that rate may still be over-estimated when a spuriously
+ * retransmistted skb was first (s)acked because "interval_us"
+ * is under-estimated (up to an RTT). However continuously
+ * measuring the delivery rate during loss recovery is crucial
+ * for connections suffer heavy or prolonged losses.
+ */
+ if (unlikely(rs->interval_us < tcp_min_rtt(tp))) {
+ if (!rs->is_retrans)
+ pr_debug("tcp rate: %ld %d %u %u %u\n",
+ rs->interval_us, rs->delivered,
+ inet_csk(sk)->icsk_ca_state,
+ tp->rx_opt.sack_ok, tcp_min_rtt(tp));
+ rs->interval_us = -1;
+ return;
+ }
+
+ /* Record the last non-app-limited or the highest app-limited bw */
+ if (!rs->is_app_limited ||
+ ((u64)rs->delivered * tp->rate_interval_us >=
+ (u64)tp->rate_delivered * rs->interval_us)) {
+ tp->rate_delivered = rs->delivered;
+ tp->rate_interval_us = rs->interval_us;
+ tp->rate_app_limited = rs->is_app_limited;
+ }
+}
+
+/* If a gap is detected between sends, mark the socket application-limited. */
+void tcp_rate_check_app_limited(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (/* We have less than one packet to send. */
+ tp->write_seq - tp->snd_nxt < tp->mss_cache &&
+ /* Nothing in sending host's qdisc queues or NIC tx queue. */
+ sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) &&
+ /* We are not limited by CWND. */
+ tcp_packets_in_flight(tp) < tp->snd_cwnd &&
+ /* All lost packets have been retransmitted. */
+ tp->lost_out <= tp->retrans_out)
+ tp->app_limited =
+ (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
+}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index f712b411f6ed..3ea1cf804748 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -192,6 +192,8 @@ static int tcp_write_timeout(struct sock *sk)
if (tp->syn_data && icsk->icsk_retransmits == 1)
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+ } else if (!tp->syn_data && !tp->syn_fastopen) {
+ sk_rethink_txhash(sk);
}
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
syn_set = true;
@@ -213,6 +215,8 @@ static int tcp_write_timeout(struct sock *sk)
tcp_mtu_probing(icsk, sk);
dst_negative_advice(sk);
+ } else {
+ sk_rethink_txhash(sk);
}
retry_until = net->ipv4.sysctl_tcp_retries2;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5fdcb8d108d4..7d96dc2d3d08 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -114,6 +114,7 @@
#include <net/busy_poll.h>
#include "udp_impl.h"
#include <net/sock_reuseport.h>
+#include <net/addrconf.h>
struct udp_table udp_table __read_mostly;
EXPORT_SYMBOL(udp_table);
@@ -1020,12 +1021,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
flow_flags,
faddr, saddr, dport, inet->inet_sport);
- if (!saddr && ipc.oif) {
- err = l3mdev_get_saddr(net, ipc.oif, fl4);
- if (err < 0)
- goto out;
- }
-
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt)) {
@@ -2192,6 +2187,20 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
}
EXPORT_SYMBOL(udp_poll);
+int udp_abort(struct sock *sk, int err)
+{
+ lock_sock(sk);
+
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
+ udp_disconnect(sk, 0);
+
+ release_sock(sk);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(udp_abort);
+
struct proto udp_prot = {
.name = "UDP",
.owner = THIS_MODULE,
@@ -2221,7 +2230,7 @@ struct proto udp_prot = {
.compat_setsockopt = compat_udp_setsockopt,
.compat_getsockopt = compat_udp_getsockopt,
#endif
- .clear_sk = sk_prot_clear_portaddr_nulls,
+ .diag_destroy = udp_abort,
};
EXPORT_SYMBOL(udp_prot);
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 3d5ccf4b1412..9a89c10a55f0 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -20,7 +20,7 @@
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *req,
- struct nlattr *bc)
+ struct nlattr *bc, bool net_admin)
{
if (!inet_diag_bc_sk(bc, sk))
return 0;
@@ -28,7 +28,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
return inet_sk_diag_fill(sk, NULL, skb, req,
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin);
}
static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
@@ -76,7 +76,8 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
err = inet_sk_diag_fill(sk, NULL, rep, req,
sk_user_ns(NETLINK_CB(in_skb).sk),
NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh);
+ nlh->nlmsg_seq, 0, nlh,
+ netlink_net_capable(in_skb, CAP_NET_ADMIN));
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(rep);
@@ -97,6 +98,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r, struct nlattr *bc)
{
+ bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
struct net *net = sock_net(skb->sk);
int num, s_num, slot, s_slot;
@@ -132,7 +134,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb,
r->id.idiag_dport)
goto next;
- if (sk_diag_dump(sk, skb, cb, r, bc) < 0) {
+ if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
spin_unlock_bh(&hslot->lock);
goto done;
}
@@ -165,12 +167,88 @@ static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
r->idiag_wqueue = sk_wmem_alloc_get(sk);
}
+#ifdef CONFIG_INET_DIAG_DESTROY
+static int __udp_diag_destroy(struct sk_buff *in_skb,
+ const struct inet_diag_req_v2 *req,
+ struct udp_table *tbl)
+{
+ struct net *net = sock_net(in_skb->sk);
+ struct sock *sk;
+ int err;
+
+ rcu_read_lock();
+
+ if (req->sdiag_family == AF_INET)
+ sk = __udp4_lib_lookup(net,
+ req->id.idiag_dst[0], req->id.idiag_dport,
+ req->id.idiag_src[0], req->id.idiag_sport,
+ req->id.idiag_if, tbl, NULL);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (req->sdiag_family == AF_INET6) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
+ ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
+ sk = __udp4_lib_lookup(net,
+ req->id.idiag_dst[3], req->id.idiag_dport,
+ req->id.idiag_src[3], req->id.idiag_sport,
+ req->id.idiag_if, tbl, NULL);
+
+ else
+ sk = __udp6_lib_lookup(net,
+ (struct in6_addr *)req->id.idiag_dst,
+ req->id.idiag_dport,
+ (struct in6_addr *)req->id.idiag_src,
+ req->id.idiag_sport,
+ req->id.idiag_if, tbl, NULL);
+ }
+#endif
+ else {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+ sk = NULL;
+
+ rcu_read_unlock();
+
+ if (!sk)
+ return -ENOENT;
+
+ if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
+ sock_put(sk);
+ return -ENOENT;
+ }
+
+ err = sock_diag_destroy(sk, ECONNABORTED);
+
+ sock_put(sk);
+
+ return err;
+}
+
+static int udp_diag_destroy(struct sk_buff *in_skb,
+ const struct inet_diag_req_v2 *req)
+{
+ return __udp_diag_destroy(in_skb, req, &udp_table);
+}
+
+static int udplite_diag_destroy(struct sk_buff *in_skb,
+ const struct inet_diag_req_v2 *req)
+{
+ return __udp_diag_destroy(in_skb, req, &udplite_table);
+}
+
+#endif
+
static const struct inet_diag_handler udp_diag_handler = {
.dump = udp_diag_dump,
.dump_one = udp_diag_dump_one,
.idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDP,
.idiag_info_size = 0,
+#ifdef CONFIG_INET_DIAG_DESTROY
+ .destroy = udp_diag_destroy,
+#endif
};
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
@@ -192,6 +270,9 @@ static const struct inet_diag_handler udplite_diag_handler = {
.idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDPLITE,
.idiag_info_size = 0,
+#ifdef CONFIG_INET_DIAG_DESTROY
+ .destroy = udplite_diag_destroy,
+#endif
};
static int __init udp_diag_init(void)
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 81f253b6ff36..f9333c963607 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -21,7 +21,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
__be16 new_protocol, bool is_ipv6)
{
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
- bool remcsum, need_csum, offload_csum, ufo;
+ bool remcsum, need_csum, offload_csum, ufo, gso_partial;
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct udphdr *uh = udp_hdr(skb);
u16 mac_offset = skb->mac_header;
@@ -88,6 +88,8 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
goto out;
}
+ gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
outer_hlen = skb_tnl_header_len(skb);
udp_offset = outer_hlen - tnl_hlen;
skb = segs;
@@ -117,7 +119,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
* will be using a length value equal to only one MSS sized
* segment instead of the entire frame.
*/
- if (skb_is_gso(skb)) {
+ if (gso_partial) {
uh->len = htons(skb_shinfo(skb)->gso_size +
SKB_GSO_CB(skb)->data_offset +
skb->head - (unsigned char *)uh);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 2eea073e27ef..af817158d830 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -60,7 +60,6 @@ struct proto udplite_prot = {
.compat_setsockopt = compat_udp_setsockopt,
.compat_getsockopt = compat_udp_getsockopt,
#endif
- .clear_sk = sk_prot_clear_portaddr_nulls,
};
EXPORT_SYMBOL(udplite_prot);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 41f5b504a782..6a7ff6957535 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -112,7 +112,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
int oif = 0;
if (skb_dst(skb))
- oif = l3mdev_fib_oif(skb_dst(skb)->dev);
+ oif = skb_dst(skb)->dev->ifindex;
memset(fl4, 0, sizeof(struct flowi4));
fl4->flowi4_mark = skb->mark;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2f1f5d439788..cbd9343751a2 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -112,6 +112,27 @@ static inline u32 cstamp_delta(unsigned long cstamp)
return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
}
+static inline s32 rfc3315_s14_backoff_init(s32 irt)
+{
+ /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
+ u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
+ do_div(tmp, 1000000);
+ return (s32)tmp;
+}
+
+static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
+{
+ /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
+ u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
+ do_div(tmp, 1000000);
+ if ((s32)tmp > mrt) {
+ /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
+ tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
+ do_div(tmp, 1000000);
+ }
+ return (s32)tmp;
+}
+
#ifdef CONFIG_SYSCTL
static int addrconf_sysctl_register(struct inet6_dev *idev);
static void addrconf_sysctl_unregister(struct inet6_dev *idev);
@@ -187,6 +208,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
+ .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
@@ -232,6 +254,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
+ .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
@@ -3687,7 +3710,7 @@ static void addrconf_rs_timer(unsigned long data)
if (idev->if_flags & IF_RA_RCVD)
goto out;
- if (idev->rs_probes++ < idev->cnf.rtr_solicits) {
+ if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
write_unlock(&idev->lock);
if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
ndisc_send_rs(dev, &lladdr,
@@ -3696,11 +3719,13 @@ static void addrconf_rs_timer(unsigned long data)
goto put;
write_lock(&idev->lock);
+ idev->rs_interval = rfc3315_s14_backoff_update(
+ idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
/* The wait after the last probe can be shorter */
addrconf_mod_rs_timer(idev, (idev->rs_probes ==
idev->cnf.rtr_solicits) ?
idev->cnf.rtr_solicit_delay :
- idev->cnf.rtr_solicit_interval);
+ idev->rs_interval);
} else {
/*
* Note: we do not support deprecated "all on-link"
@@ -3949,7 +3974,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
send_rs = send_mld &&
ipv6_accept_ra(ifp->idev) &&
- ifp->idev->cnf.rtr_solicits > 0 &&
+ ifp->idev->cnf.rtr_solicits != 0 &&
(dev->flags&IFF_LOOPBACK) == 0;
read_unlock_bh(&ifp->idev->lock);
@@ -3971,10 +3996,11 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
write_lock_bh(&ifp->idev->lock);
spin_lock(&ifp->lock);
+ ifp->idev->rs_interval = rfc3315_s14_backoff_init(
+ ifp->idev->cnf.rtr_solicit_interval);
ifp->idev->rs_probes = 1;
ifp->idev->if_flags |= IF_RS_SENT;
- addrconf_mod_rs_timer(ifp->idev,
- ifp->idev->cnf.rtr_solicit_interval);
+ addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
spin_unlock(&ifp->lock);
write_unlock_bh(&ifp->idev->lock);
}
@@ -4891,6 +4917,8 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
array[DEVCONF_RTR_SOLICIT_INTERVAL] =
jiffies_to_msecs(cnf->rtr_solicit_interval);
+ array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
+ jiffies_to_msecs(cnf->rtr_solicit_max_interval);
array[DEVCONF_RTR_SOLICIT_DELAY] =
jiffies_to_msecs(cnf->rtr_solicit_delay);
array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
@@ -4961,18 +4989,18 @@ static inline size_t inet6_if_nlmsg_size(void)
}
static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
- int items, int bytes)
+ int bytes)
{
int i;
- int pad = bytes - sizeof(u64) * items;
+ int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
BUG_ON(pad < 0);
/* Use put_unaligned() because stats may not be aligned for u64. */
- put_unaligned(items, &stats[0]);
- for (i = 1; i < items; i++)
+ put_unaligned(ICMP6_MIB_MAX, &stats[0]);
+ for (i = 1; i < ICMP6_MIB_MAX; i++)
put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
- memset(&stats[items], 0, pad);
+ memset(&stats[ICMP6_MIB_MAX], 0, pad);
}
static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
@@ -5005,7 +5033,7 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
offsetof(struct ipstats_mib, syncp));
break;
case IFLA_INET6_ICMP6STATS:
- __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, ICMP6_MIB_MAX, bytes);
+ __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
break;
}
}
@@ -5099,7 +5127,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
return -EINVAL;
if (!ipv6_accept_ra(idev))
return -EINVAL;
- if (idev->cnf.rtr_solicits <= 0)
+ if (idev->cnf.rtr_solicits == 0)
return -EINVAL;
write_lock_bh(&idev->lock);
@@ -5128,8 +5156,10 @@ update_lft:
if (update_rs) {
idev->if_flags |= IF_RS_SENT;
+ idev->rs_interval = rfc3315_s14_backoff_init(
+ idev->cnf.rtr_solicit_interval);
idev->rs_probes = 1;
- addrconf_mod_rs_timer(idev, idev->cnf.rtr_solicit_interval);
+ addrconf_mod_rs_timer(idev, idev->rs_interval);
}
/* Well, that's kinda nasty ... */
@@ -5467,20 +5497,6 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
}
static
-int addrconf_sysctl_hop_limit(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- struct ctl_table lctl;
- int min_hl = 1, max_hl = 255;
-
- lctl = *ctl;
- lctl.extra1 = &min_hl;
- lctl.extra2 = &max_hl;
-
- return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
-}
-
-static
int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -5713,6 +5729,9 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
return ret;
}
+static const int one = 1;
+static const int two_five_five = 255;
+
static const struct ctl_table addrconf_sysctl[] = {
{
.procname = "forwarding",
@@ -5726,7 +5745,9 @@ static const struct ctl_table addrconf_sysctl[] = {
.data = &ipv6_devconf.hop_limit,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = addrconf_sysctl_hop_limit,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *)&one,
+ .extra2 = (void *)&two_five_five,
},
{
.procname = "mtu",
@@ -5778,6 +5799,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.proc_handler = proc_dointvec_jiffies,
},
{
+ .procname = "router_solicitation_max_interval",
+ .data = &ipv6_devconf.rtr_solicit_max_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
.procname = "router_solicitation_delay",
.data = &ipv6_devconf.rtr_solicit_delay,
.maxlen = sizeof(int),
@@ -6044,8 +6072,14 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
for (i = 0; table[i].data; i++) {
table[i].data += (char *)p - (char *)&ipv6_devconf;
- table[i].extra1 = idev; /* embedded; no ref */
- table[i].extra2 = net;
+ /* If one of these is already set, then it is not safe to
+ * overwrite either of them: this makes proc_dointvec_minmax
+ * usable.
+ */
+ if (!table[i].extra1 && !table[i].extra2) {
+ table[i].extra1 = idev; /* embedded; no ref */
+ table[i].extra2 = net;
+ }
}
snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b454055ba625..46ad699937fd 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -545,6 +545,8 @@ const struct proto_ops inet6_stream_ops = {
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
.splice_read = tcp_splice_read,
+ .read_sock = tcp_read_sock,
+ .peek_len = tcp_peek_len,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 5857c1fc8b67..eea23b57c6a5 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -38,6 +38,9 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
.flags = FIB_LOOKUP_NOREF,
};
+ /* update flow if oif or iif point to device enslaved to l3mdev */
+ l3mdev_update_flow(net, flowi6_to_flowi(fl6));
+
fib_rules_lookup(net->ipv6.fib6_rules_ops,
flowi6_to_flowi(fl6), flags, &arg);
diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c
index ec9efbcdad35..aba0998ddbfb 100644
--- a/net/ipv6/ila/ila_common.c
+++ b/net/ipv6/ila/ila_common.c
@@ -172,6 +172,5 @@ static void __exit ila_fini(void)
module_init(ila_init);
module_exit(ila_fini);
-MODULE_ALIAS_RTNL_LWT(ILA);
MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
MODULE_LICENSE("GPL");
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index c8314c6b6154..e50c27a93e17 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -51,7 +51,7 @@ drop:
return -EINVAL;
}
-static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
+static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
[ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
[ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
};
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index e6eca5fdf4c9..e604013dd814 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -128,7 +128,7 @@ static struct genl_family ila_nl_family = {
.parallel_ops = true,
};
-static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
+static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
[ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
[ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, },
[ILA_ATTR_IFINDEX] = { .type = NLA_U32, },
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 771be1fa4176..ef5485204522 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -743,6 +743,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
(info->nlh->nlmsg_flags & NLM_F_CREATE));
int found = 0;
bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
+ u16 nlflags = NLM_F_EXCL;
int err;
ins = &fn->leaf;
@@ -759,6 +760,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
if (info->nlh &&
(info->nlh->nlmsg_flags & NLM_F_EXCL))
return -EEXIST;
+
+ nlflags &= ~NLM_F_EXCL;
if (replace) {
if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
found++;
@@ -856,6 +859,7 @@ next_iter:
pr_warn("NLM_F_CREATE should be set when creating new route\n");
add:
+ nlflags |= NLM_F_CREATE;
err = fib6_commit_metrics(&rt->dst, mxc);
if (err)
return err;
@@ -864,7 +868,7 @@ add:
*ins = rt;
rt->rt6i_node = fn;
atomic_inc(&rt->rt6i_ref);
- inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
+ inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
if (!(fn->fn_flags & RTN_RTINFO)) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index edc3daab354e..d7d6d3ae0b3b 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -61,12 +61,12 @@ static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
-#define HASH_SIZE_SHIFT 5
-#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
+#define IP6_GRE_HASH_SIZE_SHIFT 5
+#define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
static int ip6gre_net_id __read_mostly;
struct ip6gre_net {
- struct ip6_tnl __rcu *tunnels[4][HASH_SIZE];
+ struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
struct net_device *fb_tunnel_dev;
};
@@ -96,12 +96,12 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
will match fallback tunnel.
*/
-#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1))
+#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1))
static u32 HASH_ADDR(const struct in6_addr *addr)
{
u32 hash = ipv6_addr_hash(addr);
- return hash_32(hash, HASH_SIZE_SHIFT);
+ return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT);
}
#define tunnels_r_l tunnels[3]
@@ -1086,7 +1086,7 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
for (prio = 0; prio < 4; prio++) {
int h;
- for (h = 0; h < HASH_SIZE; h++) {
+ for (h = 0; h < IP6_GRE_HASH_SIZE; h++) {
struct ip6_tnl *t;
t = rtnl_dereference(ign->tunnels[prio][h]);
@@ -1238,7 +1238,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
if (data[IFLA_GRE_FLOWINFO])
- parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]);
+ parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]);
if (data[IFLA_GRE_FLAGS])
parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 22e90e56b5a9..e7bfd55899a3 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -69,6 +69,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
int offset = 0;
bool encap, udpfrag;
int nhoff;
+ bool gso_partial;
skb_reset_network_header(skb);
nhoff = skb_network_header(skb) - skb_mac_header(skb);
@@ -101,9 +102,11 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
if (IS_ERR(segs))
goto out;
+ gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
for (skb = segs; skb; skb = skb->next) {
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
- if (skb_is_gso(skb))
+ if (gso_partial)
payload_len = skb_shinfo(skb)->gso_size +
SKB_GSO_CB(skb)->data_offset +
skb->head - (unsigned char *)(ipv6h + 1);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 1dfc402d9ad1..6001e781164e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -56,6 +56,7 @@
#include <net/checksum.h>
#include <linux/mroute6.h>
#include <net/l3mdev.h>
+#include <net/lwtunnel.h>
static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
@@ -104,6 +105,13 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
}
}
+ if (lwtunnel_xmit_redirect(dst->lwtstate)) {
+ int res = lwtunnel_xmit(skb);
+
+ if (res < 0 || res == LWTUNNEL_XMIT_DONE)
+ return res;
+ }
+
rcu_read_lock_bh();
nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
@@ -228,6 +236,14 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUT, skb->len);
+
+ /* if egress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip6_out((struct sock *)sk, skb);
+ if (unlikely(!skb))
+ return 0;
+
/* hooks should never assume socket lock is held.
* we promote our socket to non const
*/
@@ -910,13 +926,6 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
int err;
int flags = 0;
- if (ipv6_addr_any(&fl6->saddr) && fl6->flowi6_oif &&
- (!*dst || !(*dst)->error)) {
- err = l3mdev_get_saddr6(net, sk, fl6);
- if (err)
- goto out_err;
- }
-
/* The correct way to handle this would be to do
* ip6_route_get_saddr, and then ip6_route_output; however,
* the route-specific preferred source forces the
@@ -1008,7 +1017,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
out_err_release:
dst_release(*dst);
*dst = NULL;
-out_err:
+
if (err == -ENETUNREACH)
IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
return err;
@@ -1054,8 +1063,6 @@ struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
return ERR_PTR(err);
if (final_dst)
fl6->daddr = *final_dst;
- if (!fl6->flowi6_oif)
- fl6->flowi6_oif = l3mdev_fib_oif(dst->dev);
return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 888543debe4e..6a66adba0c22 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -57,6 +57,7 @@
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/dst_metadata.h>
MODULE_AUTHOR("Ville Nuorvala");
MODULE_DESCRIPTION("IPv6 tunneling device");
@@ -64,8 +65,8 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("ip6tnl");
MODULE_ALIAS_NETDEV("ip6tnl0");
-#define HASH_SIZE_SHIFT 5
-#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
+#define IP6_TUNNEL_HASH_SIZE_SHIFT 5
+#define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
@@ -75,7 +76,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
{
u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
- return hash_32(hash, HASH_SIZE_SHIFT);
+ return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
}
static int ip6_tnl_dev_init(struct net_device *dev);
@@ -87,9 +88,10 @@ struct ip6_tnl_net {
/* the IPv6 tunnel fallback device */
struct net_device *fb_tnl_dev;
/* lists for storing tunnels in use */
- struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
+ struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
struct ip6_tnl __rcu *tnls_wc[1];
struct ip6_tnl __rcu **tnls[2];
+ struct ip6_tnl __rcu *collect_md_tun;
};
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
@@ -166,6 +168,10 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
return t;
}
+ t = rcu_dereference(ip6n->collect_md_tun);
+ if (t)
+ return t;
+
t = rcu_dereference(ip6n->tnls_wc[0]);
if (t && (t->dev->flags & IFF_UP))
return t;
@@ -209,6 +215,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
{
struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
+ if (t->parms.collect_md)
+ rcu_assign_pointer(ip6n->collect_md_tun, t);
rcu_assign_pointer(t->next , rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
}
@@ -224,6 +232,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
struct ip6_tnl __rcu **tp;
struct ip6_tnl *iter;
+ if (t->parms.collect_md)
+ rcu_assign_pointer(ip6n->collect_md_tun, NULL);
+
for (tp = ip6_tnl_bucket(ip6n, &t->parms);
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
@@ -829,6 +840,9 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
+ if (tun_dst)
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+
gro_cells_receive(&tunnel->gro_cells, skb);
return 0;
@@ -865,6 +879,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
{
struct ip6_tnl *t;
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct metadata_dst *tun_dst = NULL;
int ret = -1;
rcu_read_lock();
@@ -881,7 +896,12 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
goto drop;
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
goto drop;
- ret = __ip6_tnl_rcv(t, skb, tpi, NULL, dscp_ecn_decapsulate,
+ if (t->parms.collect_md) {
+ tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
+ if (!tun_dst)
+ return 0;
+ }
+ ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
log_ecn_error);
}
@@ -1012,8 +1032,16 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
int mtu;
unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
unsigned int max_headroom = psh_hlen;
+ u8 hop_limit;
int err = -1;
+ if (t->parms.collect_md) {
+ hop_limit = skb_tunnel_info(skb)->key.ttl;
+ goto route_lookup;
+ } else {
+ hop_limit = t->parms.hop_limit;
+ }
+
/* NBMA tunnel */
if (ipv6_addr_any(&t->parms.raddr)) {
struct in6_addr *addr6;
@@ -1043,6 +1071,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
goto tx_err_link_failure;
if (!dst) {
+route_lookup:
dst = ip6_route_output(net, NULL, fl6);
if (dst->error)
@@ -1053,6 +1082,10 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
dst = NULL;
goto tx_err_link_failure;
}
+ if (t->parms.collect_md &&
+ ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
+ &fl6->daddr, 0, &fl6->saddr))
+ goto tx_err_link_failure;
ndst = dst;
}
@@ -1071,7 +1104,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
}
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
- if (skb_dst(skb))
+ if (skb_dst(skb) && !t->parms.collect_md)
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
@@ -1111,8 +1144,13 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
skb = new_skb;
}
- if (!fl6->flowi6_mark && ndst)
- dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
+ if (t->parms.collect_md) {
+ if (t->encap.type != TUNNEL_ENCAP_NONE)
+ goto tx_err_dst_release;
+ } else {
+ if (!fl6->flowi6_mark && ndst)
+ dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
+ }
skb_dst_set(skb, dst);
if (encap_limit >= 0) {
@@ -1137,7 +1175,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
ipv6h = ipv6_hdr(skb);
ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
- ipv6h->hop_limit = t->parms.hop_limit;
+ ipv6h->hop_limit = hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
ipv6h->daddr = fl6->daddr;
@@ -1170,19 +1208,34 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if (tproto != IPPROTO_IPIP && tproto != 0)
return -1;
- if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- encap_limit = t->parms.encap_limit;
+ dsfield = ipv4_get_dsfield(iph);
- memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_IPIP;
+ if (t->parms.collect_md) {
+ struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_key *key;
- dsfield = ipv4_get_dsfield(iph);
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+ ip_tunnel_info_af(tun_info) != AF_INET6))
+ return -1;
+ key = &tun_info->key;
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_IPIP;
+ fl6.daddr = key->u.ipv6.dst;
+ fl6.flowlabel = key->label;
+ } else {
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+ encap_limit = t->parms.encap_limit;
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
- fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
- & IPV6_TCLASS_MASK;
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
- fl6.flowi6_mark = skb->mark;
+ memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_IPIP;
+
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+ fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
+ & IPV6_TCLASS_MASK;
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+ fl6.flowi6_mark = skb->mark;
+ }
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
@@ -1220,29 +1273,47 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
ip6_tnl_addr_conflict(t, ipv6h))
return -1;
- offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
- if (offset > 0) {
- struct ipv6_tlv_tnl_enc_lim *tel;
- tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
- if (tel->encap_limit == 0) {
- icmpv6_send(skb, ICMPV6_PARAMPROB,
- ICMPV6_HDR_FIELD, offset + 2);
+ dsfield = ipv6_get_dsfield(ipv6h);
+
+ if (t->parms.collect_md) {
+ struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_key *key;
+
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+ ip_tunnel_info_af(tun_info) != AF_INET6))
return -1;
+ key = &tun_info->key;
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_IPV6;
+ fl6.daddr = key->u.ipv6.dst;
+ fl6.flowlabel = key->label;
+ } else {
+ offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+ if (offset > 0) {
+ struct ipv6_tlv_tnl_enc_lim *tel;
+
+ tel = (void *)&skb_network_header(skb)[offset];
+ if (tel->encap_limit == 0) {
+ icmpv6_send(skb, ICMPV6_PARAMPROB,
+ ICMPV6_HDR_FIELD, offset + 2);
+ return -1;
+ }
+ encap_limit = tel->encap_limit - 1;
+ } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
+ encap_limit = t->parms.encap_limit;
}
- encap_limit = tel->encap_limit - 1;
- } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- encap_limit = t->parms.encap_limit;
- memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_IPV6;
+ memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_IPV6;
- dsfield = ipv6_get_dsfield(ipv6h);
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
- fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
- fl6.flowlabel |= ip6_flowlabel(ipv6h);
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
- fl6.flowi6_mark = skb->mark;
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+ fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK);
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
+ fl6.flowlabel |= ip6_flowlabel(ipv6h);
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+ fl6.flowi6_mark = skb->mark;
+ }
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
@@ -1741,6 +1812,10 @@ static int ip6_tnl_dev_init(struct net_device *dev)
if (err)
return err;
ip6_tnl_link_config(t);
+ if (t->parms.collect_md) {
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ netif_keep_dst(dev);
+ }
return 0;
}
@@ -1811,6 +1886,9 @@ static void ip6_tnl_netlink_parms(struct nlattr *data[],
if (data[IFLA_IPTUN_PROTO])
parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+
+ if (data[IFLA_IPTUN_COLLECT_METADATA])
+ parms->collect_md = true;
}
static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
@@ -1850,6 +1928,7 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net *net = dev_net(dev);
+ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
struct ip6_tnl *nt, *t;
struct ip_tunnel_encap ipencap;
@@ -1864,9 +1943,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
ip6_tnl_netlink_parms(data, &nt->parms);
- t = ip6_tnl_locate(net, &nt->parms, 0);
- if (!IS_ERR(t))
- return -EEXIST;
+ if (nt->parms.collect_md) {
+ if (rtnl_dereference(ip6n->collect_md_tun))
+ return -EEXIST;
+ } else {
+ t = ip6_tnl_locate(net, &nt->parms, 0);
+ if (!IS_ERR(t))
+ return -EEXIST;
+ }
return ip6_tnl_create2(dev);
}
@@ -1890,6 +1974,8 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
ip6_tnl_netlink_parms(data, &p);
+ if (p.collect_md)
+ return -EINVAL;
t = ip6_tnl_locate(net, &p, 0);
if (!IS_ERR(t)) {
@@ -1937,6 +2023,8 @@ static size_t ip6_tnl_get_size(const struct net_device *dev)
nla_total_size(2) +
/* IFLA_IPTUN_ENCAP_DPORT */
nla_total_size(2) +
+ /* IFLA_IPTUN_COLLECT_METADATA */
+ nla_total_size(0) +
0;
}
@@ -1955,16 +2043,15 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
- tunnel->encap.type) ||
- nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
- tunnel->encap.sport) ||
- nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
- tunnel->encap.dport) ||
- nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
- tunnel->encap.flags))
+ if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
+ nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
goto nla_put_failure;
+ if (parm->collect_md)
+ if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -1992,6 +2079,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
};
static struct rtnl_link_ops ip6_link_ops __read_mostly = {
@@ -2033,7 +2121,7 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
if (dev->rtnl_link_ops == &ip6_link_ops)
unregister_netdevice_queue(dev, &list);
- for (h = 0; h < HASH_SIZE; h++) {
+ for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
while (t) {
/* If dev is in the same netns, it has already
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 5bd3afdcc771..8a02ca8a11af 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -50,14 +50,14 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-#define HASH_SIZE_SHIFT 5
-#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
+#define IP6_VTI_HASH_SIZE_SHIFT 5
+#define IP6_VTI_HASH_SIZE (1 << IP6_VTI_HASH_SIZE_SHIFT)
static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
{
u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
- return hash_32(hash, HASH_SIZE_SHIFT);
+ return hash_32(hash, IP6_VTI_HASH_SIZE_SHIFT);
}
static int vti6_dev_init(struct net_device *dev);
@@ -69,7 +69,7 @@ struct vti6_net {
/* the vti6 tunnel fallback device */
struct net_device *fb_tnl_dev;
/* lists for storing tunnels in use */
- struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
+ struct ip6_tnl __rcu *tnls_r_l[IP6_VTI_HASH_SIZE];
struct ip6_tnl __rcu *tnls_wc[1];
struct ip6_tnl __rcu **tnls[2];
};
@@ -1051,7 +1051,7 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n)
struct ip6_tnl *t;
LIST_HEAD(list);
- for (h = 0; h < HASH_SIZE; h++) {
+ for (h = 0; h < IP6_VTI_HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
while (t) {
unregister_netdevice_queue(t->dev, &list);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index d64ee7e83664..75c1fc54f188 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1739,6 +1739,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
continue;
}
+ /* Based on RFC3810 6.1. Should not send source-list change
+ * records when there is a filter mode change.
+ */
+ if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
+ (!gdeleted && pmc->mca_crcount)) &&
+ (type == MLD2_ALLOW_NEW_SOURCES ||
+ type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
+ goto decrease_sf_crcount;
+
/* clear marks on query responses */
if (isquery)
psf->sf_gsresp = 0;
@@ -1766,6 +1775,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
scount++; stotal++;
if ((type == MLD2_ALLOW_NEW_SOURCES ||
type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
+decrease_sf_crcount:
psf->sf_crcount--;
if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
if (psf_prev)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index fe65cdc28a45..d8e671457d10 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -67,7 +67,6 @@
#include <net/flow.h>
#include <net/ip6_checksum.h>
#include <net/inet_common.h>
-#include <net/l3mdev.h>
#include <linux/proc_fs.h>
#include <linux/netfilter.h>
@@ -457,11 +456,9 @@ static void ndisc_send_skb(struct sk_buff *skb,
if (!dst) {
struct flowi6 fl6;
- int oif = l3mdev_fib_oif(skb->dev);
+ int oif = skb->dev->ifindex;
icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif);
- if (oif != skb->dev->ifindex)
- fl6.flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
dst = icmp6_dst_alloc(skb->dev, &fl6);
if (IS_ERR(dst)) {
kfree_skb(skb);
@@ -1538,7 +1535,6 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
int rd_len;
u8 ha_buf[MAX_ADDR_LEN], *ha = NULL,
ops_data_buf[NDISC_OPS_REDIRECT_DATA_SPACE], *ops_data = NULL;
- int oif = l3mdev_fib_oif(dev);
bool ret;
if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
@@ -1555,10 +1551,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
}
icmpv6_flow_init(sk, &fl6, NDISC_REDIRECT,
- &saddr_buf, &ipv6_hdr(skb)->saddr, oif);
-
- if (oif != skb->dev->ifindex)
- fl6.flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
+ &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 552fac2f390a..55aacea24396 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -190,7 +190,7 @@ static struct nf_loginfo trace_loginfo = {
.u = {
.log = {
.level = LOGLEVEL_WARNING,
- .logflags = NF_LOG_MASK,
+ .logflags = NF_LOG_DEFAULT_MASK,
},
},
};
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 1aa5848764a7..963ee3848675 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -115,7 +115,7 @@ static unsigned int ipv6_helper(void *priv,
help = nfct_help(ct);
if (!help)
return NF_ACCEPT;
- /* rcu_read_lock()ed by nf_hook_slow */
+ /* rcu_read_lock()ed by nf_hook_thresh */
helper = rcu_dereference(help->helper);
if (!helper)
return NF_ACCEPT;
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 660bc10c7a9c..f5a61bc3ec2b 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -165,7 +165,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
return -NF_ACCEPT;
}
- /* rcu_read_lock()ed by nf_hook_slow */
+ /* rcu_read_lock()ed by nf_hook_thresh */
inproto = __nf_ct_l4proto_find(PF_INET6, origtuple.dst.protonum);
/* Ordinarily, we'd expect the inverted tupleproto, but it's
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
index 8dd869642f45..57d86066a13b 100644
--- a/net/ipv6/netfilter/nf_log_ipv6.c
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
@@ -30,7 +30,7 @@ static struct nf_loginfo default_loginfo = {
.u = {
.log = {
.level = LOGLEVEL_NOTICE,
- .logflags = NF_LOG_MASK,
+ .logflags = NF_LOG_DEFAULT_MASK,
},
},
};
@@ -52,7 +52,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
- logflags = NF_LOG_MASK;
+ logflags = NF_LOG_DEFAULT_MASK;
ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
if (ih == NULL) {
@@ -84,7 +84,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
}
/* Max length: 48 "OPT (...) " */
- if (logflags & XT_LOG_IPOPT)
+ if (logflags & NF_LOG_IPOPT)
nf_log_buf_add(m, "OPT ( ");
switch (currenthdr) {
@@ -121,7 +121,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
case IPPROTO_ROUTING:
case IPPROTO_HOPOPTS:
if (fragment) {
- if (logflags & XT_LOG_IPOPT)
+ if (logflags & NF_LOG_IPOPT)
nf_log_buf_add(m, ")");
return;
}
@@ -129,7 +129,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
break;
/* Max Length */
case IPPROTO_AH:
- if (logflags & XT_LOG_IPOPT) {
+ if (logflags & NF_LOG_IPOPT) {
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
@@ -161,7 +161,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
hdrlen = (hp->hdrlen+2)<<2;
break;
case IPPROTO_ESP:
- if (logflags & XT_LOG_IPOPT) {
+ if (logflags & NF_LOG_IPOPT) {
struct ip_esp_hdr _esph;
const struct ip_esp_hdr *eh;
@@ -194,7 +194,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
nf_log_buf_add(m, "Unknown Ext Hdr %u", currenthdr);
return;
}
- if (logflags & XT_LOG_IPOPT)
+ if (logflags & NF_LOG_IPOPT)
nf_log_buf_add(m, ") ");
currenthdr = hp->nexthdr;
@@ -277,7 +277,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
}
/* Max length: 15 "UID=4294967295 " */
- if ((logflags & XT_LOG_UID) && recurse)
+ if ((logflags & NF_LOG_UID) && recurse)
nf_log_dump_sk_uid_gid(m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
@@ -295,7 +295,7 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m,
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
- if (!(logflags & XT_LOG_MACDECODE))
+ if (!(logflags & NF_LOG_MACDECODE))
goto fallback;
switch (dev->type) {
@@ -379,8 +379,7 @@ static struct nf_logger nf_ip6_logger __read_mostly = {
static int __net_init nf_log_ipv6_net_init(struct net *net)
{
- nf_log_set(net, NFPROTO_IPV6, &nf_ip6_logger);
- return 0;
+ return nf_log_set(net, NFPROTO_IPV6, &nf_ip6_logger);
}
static void __net_exit nf_log_ipv6_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
index 30b22f4dff55..d6e4ba5de916 100644
--- a/net/ipv6/netfilter/nf_tables_ipv6.c
+++ b/net/ipv6/netfilter/nf_tables_ipv6.c
@@ -22,9 +22,7 @@ static unsigned int nft_do_chain_ipv6(void *priv,
{
struct nft_pktinfo pkt;
- /* malformed packet, drop it */
- if (nft_set_pktinfo_ipv6(&pkt, skb, state) < 0)
- return NF_DROP;
+ nft_set_pktinfo_ipv6(&pkt, skb, state);
return nft_do_chain(&pkt, priv);
}
@@ -102,7 +100,10 @@ static int __init nf_tables_ipv6_init(void)
{
int ret;
- nft_register_chain_type(&filter_ipv6);
+ ret = nft_register_chain_type(&filter_ipv6);
+ if (ret < 0)
+ return ret;
+
ret = register_pernet_subsys(&nf_tables_ipv6_net_ops);
if (ret < 0)
nft_unregister_chain_type(&filter_ipv6);
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index 2535223ba956..f2727475895e 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -33,9 +33,7 @@ static unsigned int nf_route_table_hook(void *priv,
u32 mark, flowlabel;
int err;
- /* malformed packet, drop it */
- if (nft_set_pktinfo_ipv6(&pkt, skb, state) < 0)
- return NF_DROP;
+ nft_set_pktinfo_ipv6(&pkt, skb, state);
/* save source/dest address, mark, hoplimit, flowlabel, priority */
memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 462f2a76b5c2..7cca8ac66fe9 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -148,6 +148,13 @@ int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
ipv6_hdr(skb)->payload_len = htons(len);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+ /* if egress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip6_out(sk, skb);
+ if (unlikely(!skb))
+ return 0;
+
return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
net, sk, skb, NULL, skb_dst(skb)->dev,
dst_output);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 679253d0af84..cc8e3ae9ca73 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -30,6 +30,11 @@
#include <net/transp_v6.h>
#include <net/ipv6.h>
+#define MAX4(a, b, c, d) \
+ max_t(u32, max_t(u32, a, b), max_t(u32, c, d))
+#define SNMP_MIB_MAX MAX4(UDP_MIB_MAX, TCP_MIB_MAX, \
+ IPSTATS_MIB_MAX, ICMP_MIB_MAX)
+
static int sockstat6_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
@@ -191,25 +196,34 @@ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
atomic_long_t *smib,
const struct snmp_mib *itemlist)
{
+ unsigned long buff[SNMP_MIB_MAX];
int i;
- unsigned long val;
- for (i = 0; itemlist[i].name; i++) {
- val = pcpumib ?
- snmp_fold_field(pcpumib, itemlist[i].entry) :
- atomic_long_read(smib + itemlist[i].entry);
- seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, val);
+ if (pcpumib) {
+ memset(buff, 0, sizeof(unsigned long) * SNMP_MIB_MAX);
+
+ snmp_get_cpu_field_batch(buff, itemlist, pcpumib);
+ for (i = 0; itemlist[i].name; i++)
+ seq_printf(seq, "%-32s\t%lu\n",
+ itemlist[i].name, buff[i]);
+ } else {
+ for (i = 0; itemlist[i].name; i++)
+ seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
+ atomic_long_read(smib + itemlist[i].entry));
}
}
static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
const struct snmp_mib *itemlist, size_t syncpoff)
{
+ u64 buff64[SNMP_MIB_MAX];
int i;
+ memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX);
+
+ snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
for (i = 0; itemlist[i].name; i++)
- seq_printf(seq, "%-32s\t%llu\n", itemlist[i].name,
- snmp_fold_field64(mib, itemlist[i].entry, syncpoff));
+ seq_printf(seq, "%-32s\t%llu\n", itemlist[i].name, buff64[i]);
}
static int snmp6_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 590dd1f7746f..54404f08efcc 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -653,6 +653,13 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
if (err)
goto error_fault;
+ /* if egress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip6_out(sk, skb);
+ if (unlikely(!skb))
+ return 0;
+
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
NULL, rt->dst.dev, dst_output);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 269218aacbea..bdbc38e8bf29 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1147,15 +1147,16 @@ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *
return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
}
-static struct dst_entry *ip6_route_input_lookup(struct net *net,
- struct net_device *dev,
- struct flowi6 *fl6, int flags)
+struct dst_entry *ip6_route_input_lookup(struct net *net,
+ struct net_device *dev,
+ struct flowi6 *fl6, int flags)
{
if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
flags |= RT6_LOOKUP_F_IFACE;
return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
}
+EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
void ip6_route_input(struct sk_buff *skb)
{
@@ -1164,7 +1165,7 @@ void ip6_route_input(struct sk_buff *skb)
int flags = RT6_LOOKUP_F_HAS_SADDR;
struct ip_tunnel_info *tun_info;
struct flowi6 fl6 = {
- .flowi6_iif = l3mdev_fib_oif(skb->dev),
+ .flowi6_iif = skb->dev->ifindex,
.daddr = iph->daddr,
.saddr = iph->saddr,
.flowlabel = ip6_flowinfo(iph),
@@ -1188,12 +1189,15 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags)
{
- struct dst_entry *dst;
bool any_src;
- dst = l3mdev_get_rt6_dst(net, fl6);
- if (dst)
- return dst;
+ if (rt6_need_strict(&fl6->daddr)) {
+ struct dst_entry *dst;
+
+ dst = l3mdev_link_scope_lookup(net, fl6);
+ if (dst)
+ return dst;
+ }
fl6->flowi6_iif = LOOPBACK_IFINDEX;
@@ -1604,7 +1608,9 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
rcu_read_unlock();
out:
- return min_t(unsigned int, mtu, IP6_MAX_MTU);
+ mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
+
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
static struct dst_entry *icmp6_dst_gc_list;
@@ -2565,8 +2571,16 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
{
u32 tb_id;
struct net *net = dev_net(idev->dev);
- struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
- DST_NOCOUNT);
+ struct net_device *dev = net->loopback_dev;
+ struct rt6_info *rt;
+
+ /* use L3 Master device as loopback for host routes if device
+ * is enslaved and address is not link local or multicast
+ */
+ if (!rt6_need_strict(addr))
+ dev = l3mdev_master_dev_rcu(idev->dev) ? : dev;
+
+ rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
if (!rt)
return ERR_PTR(-ENOMEM);
@@ -3347,11 +3361,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
} else {
fl6.flowi6_oif = oif;
- if (netif_index_is_l3_master(net, oif)) {
- fl6.flowi6_flags = FLOWI_FLAG_L3MDEV_SRC |
- FLOWI_FLAG_SKIP_NH_OIF;
- }
-
rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 182b6a9be29d..b1cdf8009d29 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -62,7 +62,7 @@
For comments look at net/ipv4/ip_gre.c --ANK
*/
-#define HASH_SIZE 16
+#define IP6_SIT_HASH_SIZE 16
#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
static bool log_ecn_error = true;
@@ -78,9 +78,9 @@ static struct rtnl_link_ops sit_link_ops __read_mostly;
static int sit_net_id __read_mostly;
struct sit_net {
- struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
- struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
- struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_r_l[IP6_SIT_HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_r[IP6_SIT_HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_l[IP6_SIT_HASH_SIZE];
struct ip_tunnel __rcu *tunnels_wc[1];
struct ip_tunnel __rcu **tunnels[4];
@@ -1126,7 +1126,7 @@ static int ipip6_tunnel_update_6rd(struct ip_tunnel *t,
}
#endif
-bool ipip6_valid_ip_proto(u8 ipproto)
+static bool ipip6_valid_ip_proto(u8 ipproto)
{
return ipproto == IPPROTO_IPV6 ||
ipproto == IPPROTO_IPIP ||
@@ -1783,7 +1783,7 @@ static void __net_exit sit_destroy_tunnels(struct net *net,
for (prio = 1; prio < 4; prio++) {
int h;
- for (h = 0; h < HASH_SIZE; h++) {
+ for (h = 0; h < IP6_SIT_HASH_SIZE; h++) {
struct ip_tunnel *t;
t = rtnl_dereference(sitn->tunnels[prio][h]);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 94f4f89d73e7..54cf7197c7ab 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -671,6 +671,7 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
genhash ? "failed" : "mismatch",
&ip6h->saddr, ntohs(th->source),
@@ -817,12 +818,8 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
fl6.flowi6_proto = IPPROTO_TCP;
if (rt6_need_strict(&fl6.daddr) && !oif)
fl6.flowi6_oif = tcp_v6_iif(skb);
- else {
- if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
- oif = skb->skb_iif;
-
- fl6.flowi6_oif = oif;
- }
+ else
+ fl6.flowi6_oif = oif ? : skb->skb_iif;
fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
fl6.fl6_dport = t1->dest;
@@ -1415,6 +1412,7 @@ process:
sk = req->rsk_listener;
tcp_v6_fill_cb(skb, hdr, th);
if (tcp_v6_inbound_md5_hash(sk, skb)) {
+ sk_drops_add(sk, skb);
reqsk_put(req);
goto discard_it;
}
@@ -1471,10 +1469,7 @@ process:
if (!sock_owned_by_user(sk)) {
if (!tcp_prequeue(sk, skb))
ret = tcp_v6_do_rcv(sk, skb);
- } else if (unlikely(sk_add_backlog(sk, skb,
- sk->sk_rcvbuf + sk->sk_sndbuf))) {
- bh_unlock_sock(sk);
- __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
+ } else if (tcp_add_backlog(sk, skb)) {
goto discard_and_relse;
}
bh_unlock_sock(sk);
@@ -1868,17 +1863,6 @@ void tcp6_proc_exit(struct net *net)
}
#endif
-static void tcp_v6_clear_sk(struct sock *sk, int size)
-{
- struct inet_sock *inet = inet_sk(sk);
-
- /* we do not want to clear pinet6 field, because of RCU lookups */
- sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
-
- size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
- memset(&inet->pinet6 + 1, 0, size);
-}
-
struct proto tcpv6_prot = {
.name = "TCPv6",
.owner = THIS_MODULE,
@@ -1920,7 +1904,6 @@ struct proto tcpv6_prot = {
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
- .clear_sk = tcp_v6_clear_sk,
.diag_destroy = tcp_abort,
};
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 19ac3a1c308d..9aa7c1c7a9ce 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1424,17 +1424,6 @@ void udp6_proc_exit(struct net *net)
}
#endif /* CONFIG_PROC_FS */
-void udp_v6_clear_sk(struct sock *sk, int size)
-{
- struct inet_sock *inet = inet_sk(sk);
-
- /* we do not want to clear pinet6 field, because of RCU lookups */
- sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
-
- size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
- memset(&inet->pinet6 + 1, 0, size);
-}
-
/* ------------------------------------------------------------------------ */
struct proto udpv6_prot = {
@@ -1465,7 +1454,7 @@ struct proto udpv6_prot = {
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
- .clear_sk = udp_v6_clear_sk,
+ .diag_destroy = udp_abort,
};
static struct inet_protosw udpv6_protosw = {
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 0682c031ccdc..f6eb1ab34f4b 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -29,8 +29,6 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
void udpv6_destroy_sock(struct sock *sk);
-void udp_v6_clear_sk(struct sock *sk, int size);
-
#ifdef CONFIG_PROC_FS
int udp6_seq_show(struct seq_file *seq, void *v);
#endif
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index fd6ef414899b..47d0d2b87106 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -55,7 +55,6 @@ struct proto udplitev6_prot = {
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
- .clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udplite6_protosw = {
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 70a86adad875..e0f71c01d728 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -134,7 +134,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
nexthdr = nh[nhoff];
if (skb_dst(skb))
- oif = l3mdev_fib_oif(skb_dst(skb)->dev);
+ oif = skb_dst(skb)->dev->ifindex;
memset(fl6, 0, sizeof(struct flowi6));
fl6->flowi6_mark = skb->mark;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index ccc244406fb9..391c3cbd2eed 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -845,9 +845,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
if (sock->state != SS_UNCONNECTED)
goto out;
- if ((sk = sock->sk) == NULL)
- goto out;
-
err = -EOPNOTSUPP;
if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
(sk->sk_type != SOCK_DGRAM))
diff --git a/net/kcm/Kconfig b/net/kcm/Kconfig
index 5db94d940ecc..87fca36e6c47 100644
--- a/net/kcm/Kconfig
+++ b/net/kcm/Kconfig
@@ -3,6 +3,7 @@ config AF_KCM
tristate "KCM sockets"
depends on INET
select BPF_SYSCALL
+ select STREAM_PARSER
---help---
KCM (Kernel Connection Multiplexor) sockets provide a method
for multiplexing messages of a message based application
diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c
index 16c2e03bd388..bf75c9231cca 100644
--- a/net/kcm/kcmproc.c
+++ b/net/kcm/kcmproc.c
@@ -155,8 +155,8 @@ static void kcm_format_psock(struct kcm_psock *psock, struct seq_file *seq,
seq_printf(seq,
" psock-%-5u %-10llu %-16llu %-10llu %-16llu %-8d %-8d %-8d %-8d ",
psock->index,
- psock->stats.rx_msgs,
- psock->stats.rx_bytes,
+ psock->strp.stats.rx_msgs,
+ psock->strp.stats.rx_bytes,
psock->stats.tx_msgs,
psock->stats.tx_bytes,
psock->sk->sk_receive_queue.qlen,
@@ -170,14 +170,27 @@ static void kcm_format_psock(struct kcm_psock *psock, struct seq_file *seq,
if (psock->tx_stopped)
seq_puts(seq, "TxStop ");
- if (psock->rx_stopped)
+ if (psock->strp.rx_stopped)
seq_puts(seq, "RxStop ");
if (psock->tx_kcm)
seq_printf(seq, "Rsvd-%d ", psock->tx_kcm->index);
- if (psock->ready_rx_msg)
- seq_puts(seq, "RdyRx ");
+ if (!psock->strp.rx_paused && !psock->ready_rx_msg) {
+ if (psock->sk->sk_receive_queue.qlen) {
+ if (psock->strp.rx_need_bytes)
+ seq_printf(seq, "RxWait=%u ",
+ psock->strp.rx_need_bytes);
+ else
+ seq_printf(seq, "RxWait ");
+ }
+ } else {
+ if (psock->strp.rx_paused)
+ seq_puts(seq, "RxPause ");
+
+ if (psock->ready_rx_msg)
+ seq_puts(seq, "RdyRx ");
+ }
seq_puts(seq, "\n");
}
@@ -275,6 +288,7 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v)
{
struct kcm_psock_stats psock_stats;
struct kcm_mux_stats mux_stats;
+ struct strp_aggr_stats strp_stats;
struct kcm_mux *mux;
struct kcm_psock *psock;
struct net *net = seq->private;
@@ -282,20 +296,28 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v)
memset(&mux_stats, 0, sizeof(mux_stats));
memset(&psock_stats, 0, sizeof(psock_stats));
+ memset(&strp_stats, 0, sizeof(strp_stats));
mutex_lock(&knet->mutex);
aggregate_mux_stats(&knet->aggregate_mux_stats, &mux_stats);
aggregate_psock_stats(&knet->aggregate_psock_stats,
&psock_stats);
+ aggregate_strp_stats(&knet->aggregate_strp_stats,
+ &strp_stats);
list_for_each_entry_rcu(mux, &knet->mux_list, kcm_mux_list) {
spin_lock_bh(&mux->lock);
aggregate_mux_stats(&mux->stats, &mux_stats);
aggregate_psock_stats(&mux->aggregate_psock_stats,
&psock_stats);
- list_for_each_entry(psock, &mux->psocks, psock_list)
+ aggregate_strp_stats(&mux->aggregate_strp_stats,
+ &strp_stats);
+ list_for_each_entry(psock, &mux->psocks, psock_list) {
aggregate_psock_stats(&psock->stats, &psock_stats);
+ save_strp_stats(&psock->strp, &strp_stats);
+ }
+
spin_unlock_bh(&mux->lock);
}
@@ -328,7 +350,7 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v)
mux_stats.rx_ready_drops);
seq_printf(seq,
- "%-8s %-10s %-16s %-10s %-16s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s\n",
+ "%-8s %-10s %-16s %-10s %-16s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-10s\n",
"Psock",
"RX-Msgs",
"RX-Bytes",
@@ -337,6 +359,8 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v)
"Reserved",
"Unreserved",
"RX-Aborts",
+ "RX-Intr",
+ "RX-Unrecov",
"RX-MemFail",
"RX-NeedMor",
"RX-BadLen",
@@ -345,20 +369,22 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v)
"TX-Aborts");
seq_printf(seq,
- "%-8s %-10llu %-16llu %-10llu %-16llu %-10llu %-10llu %-10u %-10u %-10u %-10u %-10u %-10u %-10u\n",
+ "%-8s %-10llu %-16llu %-10llu %-16llu %-10llu %-10llu %-10u %-10u %-10u %-10u %-10u %-10u %-10u %-10u %-10u\n",
"",
- psock_stats.rx_msgs,
- psock_stats.rx_bytes,
+ strp_stats.rx_msgs,
+ strp_stats.rx_bytes,
psock_stats.tx_msgs,
psock_stats.tx_bytes,
psock_stats.reserved,
psock_stats.unreserved,
- psock_stats.rx_aborts,
- psock_stats.rx_mem_fail,
- psock_stats.rx_need_more_hdr,
- psock_stats.rx_bad_hdr_len,
- psock_stats.rx_msg_too_big,
- psock_stats.rx_msg_timeouts,
+ strp_stats.rx_aborts,
+ strp_stats.rx_interrupted,
+ strp_stats.rx_unrecov_intr,
+ strp_stats.rx_mem_fail,
+ strp_stats.rx_need_more_hdr,
+ strp_stats.rx_bad_hdr_len,
+ strp_stats.rx_msg_too_big,
+ strp_stats.rx_msg_timeouts,
psock_stats.tx_aborts);
return 0;
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 411693288648..7e08a4d3d77d 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1,3 +1,13 @@
+/*
+ * Kernel Connection Multiplexor
+ *
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
#include <linux/bpf.h>
#include <linux/errno.h>
#include <linux/errqueue.h>
@@ -17,7 +27,6 @@
#include <net/kcm.h>
#include <net/netns/generic.h>
#include <net/sock.h>
-#include <net/tcp.h>
#include <uapi/linux/kcm.h>
unsigned int kcm_net_id;
@@ -36,38 +45,12 @@ static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
return (struct kcm_tx_msg *)skb->cb;
}
-static inline struct kcm_rx_msg *kcm_rx_msg(struct sk_buff *skb)
-{
- return (struct kcm_rx_msg *)((void *)skb->cb +
- offsetof(struct qdisc_skb_cb, data));
-}
-
static void report_csk_error(struct sock *csk, int err)
{
csk->sk_err = EPIPE;
csk->sk_error_report(csk);
}
-/* Callback lock held */
-static void kcm_abort_rx_psock(struct kcm_psock *psock, int err,
- struct sk_buff *skb)
-{
- struct sock *csk = psock->sk;
-
- /* Unrecoverable error in receive */
-
- del_timer(&psock->rx_msg_timer);
-
- if (psock->rx_stopped)
- return;
-
- psock->rx_stopped = 1;
- KCM_STATS_INCR(psock->stats.rx_aborts);
-
- /* Report an error on the lower socket */
- report_csk_error(csk, err);
-}
-
static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
bool wakeup_kcm)
{
@@ -110,12 +93,13 @@ static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
struct kcm_psock *psock)
{
- KCM_STATS_ADD(mux->stats.rx_bytes,
- psock->stats.rx_bytes - psock->saved_rx_bytes);
+ STRP_STATS_ADD(mux->stats.rx_bytes,
+ psock->strp.stats.rx_bytes -
+ psock->saved_rx_bytes);
mux->stats.rx_msgs +=
- psock->stats.rx_msgs - psock->saved_rx_msgs;
- psock->saved_rx_msgs = psock->stats.rx_msgs;
- psock->saved_rx_bytes = psock->stats.rx_bytes;
+ psock->strp.stats.rx_msgs - psock->saved_rx_msgs;
+ psock->saved_rx_msgs = psock->strp.stats.rx_msgs;
+ psock->saved_rx_bytes = psock->strp.stats.rx_bytes;
}
static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
@@ -168,11 +152,11 @@ static void kcm_rcv_ready(struct kcm_sock *kcm)
*/
list_del(&psock->psock_ready_list);
psock->ready_rx_msg = NULL;
-
/* Commit clearing of ready_rx_msg for queuing work */
smp_mb();
- queue_work(kcm_wq, &psock->rx_work);
+ strp_unpause(&psock->strp);
+ strp_check_rcv(&psock->strp);
}
/* Buffer limit is okay now, add to ready list */
@@ -286,6 +270,7 @@ static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
if (list_empty(&mux->kcm_rx_waiters)) {
psock->ready_rx_msg = head;
+ strp_pause(&psock->strp);
list_add_tail(&psock->psock_ready_list,
&mux->psocks_ready);
spin_unlock_bh(&mux->rx_lock);
@@ -354,346 +339,60 @@ static void unreserve_rx_kcm(struct kcm_psock *psock,
spin_unlock_bh(&mux->rx_lock);
}
-static void kcm_start_rx_timer(struct kcm_psock *psock)
-{
- if (psock->sk->sk_rcvtimeo)
- mod_timer(&psock->rx_msg_timer, psock->sk->sk_rcvtimeo);
-}
-
-/* Macro to invoke filter function. */
-#define KCM_RUN_FILTER(prog, ctx) \
- (*prog->bpf_func)(ctx, prog->insnsi)
-
-/* Lower socket lock held */
-static int kcm_tcp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
- unsigned int orig_offset, size_t orig_len)
-{
- struct kcm_psock *psock = (struct kcm_psock *)desc->arg.data;
- struct kcm_rx_msg *rxm;
- struct kcm_sock *kcm;
- struct sk_buff *head, *skb;
- size_t eaten = 0, cand_len;
- ssize_t extra;
- int err;
- bool cloned_orig = false;
-
- if (psock->ready_rx_msg)
- return 0;
-
- head = psock->rx_skb_head;
- if (head) {
- /* Message already in progress */
-
- rxm = kcm_rx_msg(head);
- if (unlikely(rxm->early_eaten)) {
- /* Already some number of bytes on the receive sock
- * data saved in rx_skb_head, just indicate they
- * are consumed.
- */
- eaten = orig_len <= rxm->early_eaten ?
- orig_len : rxm->early_eaten;
- rxm->early_eaten -= eaten;
-
- return eaten;
- }
-
- if (unlikely(orig_offset)) {
- /* Getting data with a non-zero offset when a message is
- * in progress is not expected. If it does happen, we
- * need to clone and pull since we can't deal with
- * offsets in the skbs for a message expect in the head.
- */
- orig_skb = skb_clone(orig_skb, GFP_ATOMIC);
- if (!orig_skb) {
- KCM_STATS_INCR(psock->stats.rx_mem_fail);
- desc->error = -ENOMEM;
- return 0;
- }
- if (!pskb_pull(orig_skb, orig_offset)) {
- KCM_STATS_INCR(psock->stats.rx_mem_fail);
- kfree_skb(orig_skb);
- desc->error = -ENOMEM;
- return 0;
- }
- cloned_orig = true;
- orig_offset = 0;
- }
-
- if (!psock->rx_skb_nextp) {
- /* We are going to append to the frags_list of head.
- * Need to unshare the frag_list.
- */
- err = skb_unclone(head, GFP_ATOMIC);
- if (err) {
- KCM_STATS_INCR(psock->stats.rx_mem_fail);
- desc->error = err;
- return 0;
- }
-
- if (unlikely(skb_shinfo(head)->frag_list)) {
- /* We can't append to an sk_buff that already
- * has a frag_list. We create a new head, point
- * the frag_list of that to the old head, and
- * then are able to use the old head->next for
- * appending to the message.
- */
- if (WARN_ON(head->next)) {
- desc->error = -EINVAL;
- return 0;
- }
-
- skb = alloc_skb(0, GFP_ATOMIC);
- if (!skb) {
- KCM_STATS_INCR(psock->stats.rx_mem_fail);
- desc->error = -ENOMEM;
- return 0;
- }
- skb->len = head->len;
- skb->data_len = head->len;
- skb->truesize = head->truesize;
- *kcm_rx_msg(skb) = *kcm_rx_msg(head);
- psock->rx_skb_nextp = &head->next;
- skb_shinfo(skb)->frag_list = head;
- psock->rx_skb_head = skb;
- head = skb;
- } else {
- psock->rx_skb_nextp =
- &skb_shinfo(head)->frag_list;
- }
- }
- }
-
- while (eaten < orig_len) {
- /* Always clone since we will consume something */
- skb = skb_clone(orig_skb, GFP_ATOMIC);
- if (!skb) {
- KCM_STATS_INCR(psock->stats.rx_mem_fail);
- desc->error = -ENOMEM;
- break;
- }
-
- cand_len = orig_len - eaten;
-
- head = psock->rx_skb_head;
- if (!head) {
- head = skb;
- psock->rx_skb_head = head;
- /* Will set rx_skb_nextp on next packet if needed */
- psock->rx_skb_nextp = NULL;
- rxm = kcm_rx_msg(head);
- memset(rxm, 0, sizeof(*rxm));
- rxm->offset = orig_offset + eaten;
- } else {
- /* Unclone since we may be appending to an skb that we
- * already share a frag_list with.
- */
- err = skb_unclone(skb, GFP_ATOMIC);
- if (err) {
- KCM_STATS_INCR(psock->stats.rx_mem_fail);
- desc->error = err;
- break;
- }
-
- rxm = kcm_rx_msg(head);
- *psock->rx_skb_nextp = skb;
- psock->rx_skb_nextp = &skb->next;
- head->data_len += skb->len;
- head->len += skb->len;
- head->truesize += skb->truesize;
- }
-
- if (!rxm->full_len) {
- ssize_t len;
-
- len = KCM_RUN_FILTER(psock->bpf_prog, head);
-
- if (!len) {
- /* Need more header to determine length */
- if (!rxm->accum_len) {
- /* Start RX timer for new message */
- kcm_start_rx_timer(psock);
- }
- rxm->accum_len += cand_len;
- eaten += cand_len;
- KCM_STATS_INCR(psock->stats.rx_need_more_hdr);
- WARN_ON(eaten != orig_len);
- break;
- } else if (len > psock->sk->sk_rcvbuf) {
- /* Message length exceeds maximum allowed */
- KCM_STATS_INCR(psock->stats.rx_msg_too_big);
- desc->error = -EMSGSIZE;
- psock->rx_skb_head = NULL;
- kcm_abort_rx_psock(psock, EMSGSIZE, head);
- break;
- } else if (len <= (ssize_t)head->len -
- skb->len - rxm->offset) {
- /* Length must be into new skb (and also
- * greater than zero)
- */
- KCM_STATS_INCR(psock->stats.rx_bad_hdr_len);
- desc->error = -EPROTO;
- psock->rx_skb_head = NULL;
- kcm_abort_rx_psock(psock, EPROTO, head);
- break;
- }
-
- rxm->full_len = len;
- }
-
- extra = (ssize_t)(rxm->accum_len + cand_len) - rxm->full_len;
-
- if (extra < 0) {
- /* Message not complete yet. */
- if (rxm->full_len - rxm->accum_len >
- tcp_inq(psock->sk)) {
- /* Don't have the whole messages in the socket
- * buffer. Set psock->rx_need_bytes to wait for
- * the rest of the message. Also, set "early
- * eaten" since we've already buffered the skb
- * but don't consume yet per tcp_read_sock.
- */
-
- if (!rxm->accum_len) {
- /* Start RX timer for new message */
- kcm_start_rx_timer(psock);
- }
-
- psock->rx_need_bytes = rxm->full_len -
- rxm->accum_len;
- rxm->accum_len += cand_len;
- rxm->early_eaten = cand_len;
- KCM_STATS_ADD(psock->stats.rx_bytes, cand_len);
- desc->count = 0; /* Stop reading socket */
- break;
- }
- rxm->accum_len += cand_len;
- eaten += cand_len;
- WARN_ON(eaten != orig_len);
- break;
- }
-
- /* Positive extra indicates ore bytes than needed for the
- * message
- */
-
- WARN_ON(extra > cand_len);
-
- eaten += (cand_len - extra);
-
- /* Hurray, we have a new message! */
- del_timer(&psock->rx_msg_timer);
- psock->rx_skb_head = NULL;
- KCM_STATS_INCR(psock->stats.rx_msgs);
-
-try_queue:
- kcm = reserve_rx_kcm(psock, head);
- if (!kcm) {
- /* Unable to reserve a KCM, message is held in psock. */
- break;
- }
-
- if (kcm_queue_rcv_skb(&kcm->sk, head)) {
- /* Should mean socket buffer full */
- unreserve_rx_kcm(psock, false);
- goto try_queue;
- }
- }
-
- if (cloned_orig)
- kfree_skb(orig_skb);
-
- KCM_STATS_ADD(psock->stats.rx_bytes, eaten);
-
- return eaten;
-}
-
-/* Called with lock held on lower socket */
-static int psock_tcp_read_sock(struct kcm_psock *psock)
-{
- read_descriptor_t desc;
-
- desc.arg.data = psock;
- desc.error = 0;
- desc.count = 1; /* give more than one skb per call */
-
- /* sk should be locked here, so okay to do tcp_read_sock */
- tcp_read_sock(psock->sk, &desc, kcm_tcp_recv);
-
- unreserve_rx_kcm(psock, true);
-
- return desc.error;
-}
-
/* Lower sock lock held */
-static void psock_tcp_data_ready(struct sock *sk)
+static void psock_data_ready(struct sock *sk)
{
struct kcm_psock *psock;
read_lock_bh(&sk->sk_callback_lock);
psock = (struct kcm_psock *)sk->sk_user_data;
- if (unlikely(!psock || psock->rx_stopped))
- goto out;
-
- if (psock->ready_rx_msg)
- goto out;
+ if (likely(psock))
+ strp_data_ready(&psock->strp);
- if (psock->rx_need_bytes) {
- if (tcp_inq(sk) >= psock->rx_need_bytes)
- psock->rx_need_bytes = 0;
- else
- goto out;
- }
-
- if (psock_tcp_read_sock(psock) == -ENOMEM)
- queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
-
-out:
read_unlock_bh(&sk->sk_callback_lock);
}
-static void do_psock_rx_work(struct kcm_psock *psock)
+/* Called with lower sock held */
+static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
{
- read_descriptor_t rd_desc;
- struct sock *csk = psock->sk;
-
- /* We need the read lock to synchronize with psock_tcp_data_ready. We
- * need the socket lock for calling tcp_read_sock.
- */
- lock_sock(csk);
- read_lock_bh(&csk->sk_callback_lock);
-
- if (unlikely(csk->sk_user_data != psock))
- goto out;
-
- if (unlikely(psock->rx_stopped))
- goto out;
-
- if (psock->ready_rx_msg)
- goto out;
-
- rd_desc.arg.data = psock;
+ struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
+ struct kcm_sock *kcm;
- if (psock_tcp_read_sock(psock) == -ENOMEM)
- queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
+try_queue:
+ kcm = reserve_rx_kcm(psock, skb);
+ if (!kcm) {
+ /* Unable to reserve a KCM, message is held in psock and strp
+ * is paused.
+ */
+ return;
+ }
-out:
- read_unlock_bh(&csk->sk_callback_lock);
- release_sock(csk);
+ if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
+ /* Should mean socket buffer full */
+ unreserve_rx_kcm(psock, false);
+ goto try_queue;
+ }
}
-static void psock_rx_work(struct work_struct *w)
+static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
{
- do_psock_rx_work(container_of(w, struct kcm_psock, rx_work));
+ struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
+ struct bpf_prog *prog = psock->bpf_prog;
+
+ return (*prog->bpf_func)(skb, prog->insnsi);
}
-static void psock_rx_delayed_work(struct work_struct *w)
+static int kcm_read_sock_done(struct strparser *strp, int err)
{
- do_psock_rx_work(container_of(w, struct kcm_psock,
- rx_delayed_work.work));
+ struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
+
+ unreserve_rx_kcm(psock, true);
+
+ return err;
}
-static void psock_tcp_state_change(struct sock *sk)
+static void psock_state_change(struct sock *sk)
{
/* TCP only does a POLLIN for a half close. Do a POLLHUP here
* since application will normally not poll with POLLIN
@@ -703,7 +402,7 @@ static void psock_tcp_state_change(struct sock *sk)
report_csk_error(sk, EPIPE);
}
-static void psock_tcp_write_space(struct sock *sk)
+static void psock_write_space(struct sock *sk)
{
struct kcm_psock *psock;
struct kcm_mux *mux;
@@ -714,14 +413,13 @@ static void psock_tcp_write_space(struct sock *sk)
psock = (struct kcm_psock *)sk->sk_user_data;
if (unlikely(!psock))
goto out;
-
mux = psock->mux;
spin_lock_bh(&mux->lock);
/* Check if the socket is reserved so someone is waiting for sending. */
kcm = psock->tx_kcm;
- if (kcm)
+ if (kcm && !unlikely(kcm->tx_stopped))
queue_work(kcm_wq, &kcm->tx_work);
spin_unlock_bh(&mux->lock);
@@ -1412,7 +1110,7 @@ static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
struct kcm_sock *kcm = kcm_sk(sk);
int err = 0;
long timeo;
- struct kcm_rx_msg *rxm;
+ struct strp_rx_msg *rxm;
int copied = 0;
struct sk_buff *skb;
@@ -1426,7 +1124,7 @@ static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
/* Okay, have a message on the receive queue */
- rxm = kcm_rx_msg(skb);
+ rxm = strp_rx_msg(skb);
if (len > rxm->full_len)
len = rxm->full_len;
@@ -1462,19 +1160,6 @@ out:
return copied ? : err;
}
-static ssize_t kcm_sock_splice(struct sock *sk,
- struct pipe_inode_info *pipe,
- struct splice_pipe_desc *spd)
-{
- int ret;
-
- release_sock(sk);
- ret = splice_to_pipe(pipe, spd);
- lock_sock(sk);
-
- return ret;
-}
-
static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
@@ -1482,7 +1167,7 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
struct sock *sk = sock->sk;
struct kcm_sock *kcm = kcm_sk(sk);
long timeo;
- struct kcm_rx_msg *rxm;
+ struct strp_rx_msg *rxm;
int err = 0;
ssize_t copied;
struct sk_buff *skb;
@@ -1499,13 +1184,12 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
/* Okay, have a message on the receive queue */
- rxm = kcm_rx_msg(skb);
+ rxm = strp_rx_msg(skb);
if (len > rxm->full_len)
len = rxm->full_len;
- copied = skb_splice_bits(skb, sk, rxm->offset, pipe, len, flags,
- kcm_sock_splice);
+ copied = skb_splice_bits(skb, sk, rxm->offset, pipe, len, flags);
if (copied < 0) {
err = copied;
goto err_out;
@@ -1675,15 +1359,6 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
spin_unlock_bh(&mux->rx_lock);
}
-static void kcm_rx_msg_timeout(unsigned long arg)
-{
- struct kcm_psock *psock = (struct kcm_psock *)arg;
-
- /* Message assembly timed out */
- KCM_STATS_INCR(psock->stats.rx_msg_timeouts);
- kcm_abort_rx_psock(psock, ETIMEDOUT, NULL);
-}
-
static int kcm_attach(struct socket *sock, struct socket *csock,
struct bpf_prog *prog)
{
@@ -1693,19 +1368,13 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
struct kcm_psock *psock = NULL, *tpsock;
struct list_head *head;
int index = 0;
-
- if (csock->ops->family != PF_INET &&
- csock->ops->family != PF_INET6)
- return -EINVAL;
+ struct strp_callbacks cb;
+ int err;
csk = csock->sk;
if (!csk)
return -EINVAL;
- /* Only support TCP for now */
- if (csk->sk_protocol != IPPROTO_TCP)
- return -EINVAL;
-
psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
if (!psock)
return -ENOMEM;
@@ -1714,11 +1383,16 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
psock->sk = csk;
psock->bpf_prog = prog;
- setup_timer(&psock->rx_msg_timer, kcm_rx_msg_timeout,
- (unsigned long)psock);
+ cb.rcv_msg = kcm_rcv_strparser;
+ cb.abort_parser = NULL;
+ cb.parse_msg = kcm_parse_func_strparser;
+ cb.read_sock_done = kcm_read_sock_done;
- INIT_WORK(&psock->rx_work, psock_rx_work);
- INIT_DELAYED_WORK(&psock->rx_delayed_work, psock_rx_delayed_work);
+ err = strp_init(&psock->strp, csk, &cb);
+ if (err) {
+ kmem_cache_free(kcm_psockp, psock);
+ return err;
+ }
sock_hold(csk);
@@ -1727,9 +1401,9 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
psock->save_write_space = csk->sk_write_space;
psock->save_state_change = csk->sk_state_change;
csk->sk_user_data = psock;
- csk->sk_data_ready = psock_tcp_data_ready;
- csk->sk_write_space = psock_tcp_write_space;
- csk->sk_state_change = psock_tcp_state_change;
+ csk->sk_data_ready = psock_data_ready;
+ csk->sk_write_space = psock_write_space;
+ csk->sk_state_change = psock_state_change;
write_unlock_bh(&csk->sk_callback_lock);
/* Finished initialization, now add the psock to the MUX. */
@@ -1751,7 +1425,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
spin_unlock_bh(&mux->lock);
/* Schedule RX work in case there are already bytes queued */
- queue_work(kcm_wq, &psock->rx_work);
+ strp_check_rcv(&psock->strp);
return 0;
}
@@ -1791,6 +1465,8 @@ static void kcm_unattach(struct kcm_psock *psock)
struct sock *csk = psock->sk;
struct kcm_mux *mux = psock->mux;
+ lock_sock(csk);
+
/* Stop getting callbacks from TCP socket. After this there should
* be no way to reserve a kcm for this psock.
*/
@@ -1799,7 +1475,7 @@ static void kcm_unattach(struct kcm_psock *psock)
csk->sk_data_ready = psock->save_data_ready;
csk->sk_write_space = psock->save_write_space;
csk->sk_state_change = psock->save_state_change;
- psock->rx_stopped = 1;
+ strp_stop(&psock->strp);
if (WARN_ON(psock->rx_kcm)) {
write_unlock_bh(&csk->sk_callback_lock);
@@ -1822,18 +1498,17 @@ static void kcm_unattach(struct kcm_psock *psock)
write_unlock_bh(&csk->sk_callback_lock);
- del_timer_sync(&psock->rx_msg_timer);
- cancel_work_sync(&psock->rx_work);
- cancel_delayed_work_sync(&psock->rx_delayed_work);
+ /* Call strp_done without sock lock */
+ release_sock(csk);
+ strp_done(&psock->strp);
+ lock_sock(csk);
bpf_prog_put(psock->bpf_prog);
- kfree_skb(psock->rx_skb_head);
- psock->rx_skb_head = NULL;
-
spin_lock_bh(&mux->lock);
aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
+ save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
KCM_STATS_INCR(mux->stats.psock_unattach);
@@ -1876,6 +1551,8 @@ no_reserved:
fput(csk->sk_socket->file);
kmem_cache_free(kcm_psockp, psock);
}
+
+ release_sock(csk);
}
static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
@@ -1916,6 +1593,7 @@ static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
spin_unlock_bh(&mux->lock);
+ /* Lower socket lock should already be held */
kcm_unattach(psock);
err = 0;
@@ -2073,6 +1751,8 @@ static void release_mux(struct kcm_mux *mux)
aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
aggregate_psock_stats(&mux->aggregate_psock_stats,
&knet->aggregate_psock_stats);
+ aggregate_strp_stats(&mux->aggregate_strp_stats,
+ &knet->aggregate_strp_stats);
list_del_rcu(&mux->kcm_mux_list);
knet->count--;
mutex_unlock(&knet->mutex);
@@ -2152,6 +1832,13 @@ static int kcm_release(struct socket *sock)
* it will just return.
*/
__skb_queue_purge(&sk->sk_write_queue);
+
+ /* Set tx_stopped. This is checked when psock is bound to a kcm and we
+ * get a writespace callback. This prevents further work being queued
+ * from the callback (unbinding the psock occurs after canceling work.
+ */
+ kcm->tx_stopped = 1;
+
release_sock(sk);
spin_lock_bh(&mux->lock);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 5871537af387..2599af6378e4 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -139,7 +139,7 @@ struct l2tp_session {
void (*session_close)(struct l2tp_session *session);
void (*ref)(struct l2tp_session *session);
void (*deref)(struct l2tp_session *session);
-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
void (*show)(struct seq_file *m, void *priv);
#endif
uint8_t priv[0]; /* private data */
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 57fc5a46ce06..965f7e344cef 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -121,7 +121,7 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
}
-static struct net_device_ops l2tp_eth_netdev_ops = {
+static const struct net_device_ops l2tp_eth_netdev_ops = {
.ndo_init = l2tp_eth_dev_init,
.ndo_uninit = l2tp_eth_dev_uninit,
.ndo_start_xmit = l2tp_eth_dev_xmit,
@@ -195,7 +195,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
}
}
-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
static void l2tp_eth_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
@@ -268,7 +268,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
priv->tunnel_sock = tunnel->sock;
session->recv_skb = l2tp_eth_dev_recv;
session->session_close = l2tp_eth_delete;
-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
session->show = l2tp_eth_show;
#endif
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 1d02e8d20e56..bf3117771822 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -867,7 +867,7 @@ out:
return skb->len;
}
-static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
+static const struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
[L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, },
[L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, },
[L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, },
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 232cb92033e8..41d47bfda15c 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -177,7 +177,7 @@ static int pppol2tp_recv_payload_hook(struct sk_buff *skb)
if (!pskb_may_pull(skb, 2))
return 1;
- if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
+ if ((skb->data[0] == PPP_ALLSTATIONS) && (skb->data[1] == PPP_UI))
skb_pull(skb, 2);
return 0;
@@ -282,7 +282,6 @@ static void pppol2tp_session_sock_put(struct l2tp_session *session)
static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m,
size_t total_len)
{
- static const unsigned char ppph[2] = { 0xff, 0x03 };
struct sock *sk = sock->sk;
struct sk_buff *skb;
int error;
@@ -312,7 +311,7 @@ static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m,
error = -ENOMEM;
skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
uhlen + session->hdr_len +
- sizeof(ppph) + total_len,
+ 2 + total_len, /* 2 bytes for PPP_ALLSTATIONS & PPP_UI */
0, GFP_KERNEL);
if (!skb)
goto error_put_sess_tun;
@@ -325,8 +324,8 @@ static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m,
skb_reserve(skb, uhlen);
/* Add PPP header */
- skb->data[0] = ppph[0];
- skb->data[1] = ppph[1];
+ skb->data[0] = PPP_ALLSTATIONS;
+ skb->data[1] = PPP_UI;
skb_put(skb, 2);
/* Copy user data into skb */
@@ -369,7 +368,6 @@ error:
*/
static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
{
- static const u8 ppph[2] = { 0xff, 0x03 };
struct sock *sk = (struct sock *) chan->private;
struct sock *sk_tun;
struct l2tp_session *session;
@@ -398,14 +396,14 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
sizeof(struct iphdr) + /* IP header */
uhlen + /* UDP header (if L2TP_ENCAPTYPE_UDP) */
session->hdr_len + /* L2TP header */
- sizeof(ppph); /* PPP header */
+ 2; /* 2 bytes for PPP_ALLSTATIONS & PPP_UI */
if (skb_cow_head(skb, headroom))
goto abort_put_sess_tun;
/* Setup PPP header */
- __skb_push(skb, sizeof(ppph));
- skb->data[0] = ppph[0];
- skb->data[1] = ppph[1];
+ __skb_push(skb, 2);
+ skb->data[0] = PPP_ALLSTATIONS;
+ skb->data[1] = PPP_UI;
local_bh_disable();
l2tp_xmit_skb(session, skb, session->hdr_len);
@@ -440,7 +438,7 @@ static void pppol2tp_session_close(struct l2tp_session *session)
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
if (sock) {
- inet_shutdown(sock, 2);
+ inet_shutdown(sock, SEND_SHUTDOWN);
/* Don't let the session go away before our socket does */
l2tp_session_inc_refcount(session);
}
@@ -554,7 +552,7 @@ out:
return error;
}
-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
static void pppol2tp_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
@@ -725,7 +723,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
session->recv_skb = pppol2tp_recv;
session->session_close = pppol2tp_session_close;
-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
session->show = pppol2tp_show;
#endif
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index c4a1c3e84e12..8da86ceca33d 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -100,15 +100,14 @@ u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
/**
- * l3mdev_get_rt6_dst - IPv6 route lookup based on flow. Returns
- * cached route for L3 master device if relevant
- * to flow
+ * l3mdev_link_scope_lookup - IPv6 route lookup based on flow for link
+ * local and multicast addresses
* @net: network namespace for device index lookup
* @fl6: IPv6 flow struct for lookup
*/
-struct dst_entry *l3mdev_get_rt6_dst(struct net *net,
- struct flowi6 *fl6)
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net,
+ struct flowi6 *fl6)
{
struct dst_entry *dst = NULL;
struct net_device *dev;
@@ -121,70 +120,15 @@ struct dst_entry *l3mdev_get_rt6_dst(struct net *net,
dev = netdev_master_upper_dev_get_rcu(dev);
if (dev && netif_is_l3_master(dev) &&
- dev->l3mdev_ops->l3mdev_get_rt6_dst)
- dst = dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6);
+ dev->l3mdev_ops->l3mdev_link_scope_lookup)
+ dst = dev->l3mdev_ops->l3mdev_link_scope_lookup(dev, fl6);
rcu_read_unlock();
}
return dst;
}
-EXPORT_SYMBOL_GPL(l3mdev_get_rt6_dst);
-
-/**
- * l3mdev_get_saddr - get source address for a flow based on an interface
- * enslaved to an L3 master device
- * @net: network namespace for device index lookup
- * @ifindex: Interface index
- * @fl4: IPv4 flow struct
- */
-
-int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4)
-{
- struct net_device *dev;
- int rc = 0;
-
- if (ifindex) {
- rcu_read_lock();
-
- dev = dev_get_by_index_rcu(net, ifindex);
- if (dev && netif_is_l3_slave(dev))
- dev = netdev_master_upper_dev_get_rcu(dev);
-
- if (dev && netif_is_l3_master(dev) &&
- dev->l3mdev_ops->l3mdev_get_saddr)
- rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4);
-
- rcu_read_unlock();
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(l3mdev_get_saddr);
-
-int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
- struct flowi6 *fl6)
-{
- struct net_device *dev;
- int rc = 0;
-
- if (fl6->flowi6_oif) {
- rcu_read_lock();
-
- dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
- if (dev && netif_is_l3_slave(dev))
- dev = netdev_master_upper_dev_get_rcu(dev);
-
- if (dev && netif_is_l3_master(dev) &&
- dev->l3mdev_ops->l3mdev_get_saddr6)
- rc = dev->l3mdev_ops->l3mdev_get_saddr6(dev, sk, fl6);
-
- rcu_read_unlock();
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(l3mdev_get_saddr6);
+EXPORT_SYMBOL_GPL(l3mdev_link_scope_lookup);
/**
* l3mdev_fib_rule_match - Determine if flowi references an
@@ -222,3 +166,38 @@ out:
return rc;
}
+
+void l3mdev_update_flow(struct net *net, struct flowi *fl)
+{
+ struct net_device *dev;
+ int ifindex;
+
+ rcu_read_lock();
+
+ if (fl->flowi_oif) {
+ dev = dev_get_by_index_rcu(net, fl->flowi_oif);
+ if (dev) {
+ ifindex = l3mdev_master_ifindex_rcu(dev);
+ if (ifindex) {
+ fl->flowi_oif = ifindex;
+ fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
+ goto out;
+ }
+ }
+ }
+
+ if (fl->flowi_iif) {
+ dev = dev_get_by_index_rcu(net, fl->flowi_iif);
+ if (dev) {
+ ifindex = l3mdev_master_ifindex_rcu(dev);
+ if (ifindex) {
+ fl->flowi_iif = ifindex;
+ fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
+ }
+ }
+ }
+
+out:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(l3mdev_update_flow);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8ae3ed97d95c..db916cf51ffe 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -38,7 +38,7 @@ static u16 llc_ui_sap_link_no_max[256];
static struct sockaddr_llc llc_ui_addrnull;
static const struct proto_ops llc_ui_ops;
-static long llc_ui_wait_for_conn(struct sock *sk, long timeout);
+static bool llc_ui_wait_for_conn(struct sock *sk, long timeout);
static int llc_ui_wait_for_disc(struct sock *sk, long timeout);
static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
@@ -551,7 +551,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
return rc;
}
-static long llc_ui_wait_for_conn(struct sock *sk, long timeout)
+static bool llc_ui_wait_for_conn(struct sock *sk, long timeout)
{
DEFINE_WAIT(wait);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index afa94687d5e1..f6749dced021 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -304,10 +304,13 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
buf_size = IEEE80211_MAX_AMPDU_BUF;
/* make sure the size doesn't exceed the maximum supported by the hw */
- if (buf_size > local->hw.max_rx_aggregation_subframes)
- buf_size = local->hw.max_rx_aggregation_subframes;
+ if (buf_size > sta->sta.max_rx_aggregation_subframes)
+ buf_size = sta->sta.max_rx_aggregation_subframes;
params.buf_size = buf_size;
+ ht_dbg(sta->sdata, "AddBA Req buf_size=%d for %pM\n",
+ buf_size, sta->sta.addr);
+
/* examine state machine */
mutex_lock(&sta->ampdu_mlme.mtx);
@@ -412,8 +415,10 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
}
end:
- if (status == WLAN_STATUS_SUCCESS)
+ if (status == WLAN_STATUS_SUCCESS) {
__set_bit(tid, sta->ampdu_mlme.agg_session_valid);
+ __clear_bit(tid, sta->ampdu_mlme.unexpected_agg);
+ }
mutex_unlock(&sta->ampdu_mlme.mtx);
end_no_lock:
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 543b1d4fc33d..fd6541f3ade3 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3,6 +3,7 @@
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2016 Intel Deutschland GmbH
*
* This file is GPLv2 as found in COPYING.
*/
@@ -39,7 +40,7 @@ static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
if (type == NL80211_IFTYPE_MONITOR && flags) {
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- sdata->u.mntr_flags = *flags;
+ sdata->u.mntr.flags = *flags;
}
return wdev;
@@ -73,8 +74,29 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
sdata->u.mgd.use_4addr = params->use_4addr;
}
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *monitor_sdata;
+ u32 mu_mntr_cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER;
+
+ monitor_sdata = rtnl_dereference(local->monitor_sdata);
+ if (monitor_sdata &&
+ wiphy_ext_feature_isset(wiphy, mu_mntr_cap_flag)) {
+ memcpy(monitor_sdata->vif.bss_conf.mu_group.membership,
+ params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN);
+ memcpy(monitor_sdata->vif.bss_conf.mu_group.position,
+ params->vht_mumimo_groups + WLAN_MEMBERSHIP_LEN,
+ WLAN_USER_POSITION_LEN);
+ monitor_sdata->vif.mu_mimo_owner = true;
+ ieee80211_bss_info_change_notify(monitor_sdata,
+ BSS_CHANGED_MU_GROUPS);
+
+ ether_addr_copy(monitor_sdata->u.mntr.mu_follow_addr,
+ params->macaddr);
+ }
+
+ if (!flags)
+ return 0;
if (ieee80211_sdata_running(sdata)) {
u32 mask = MONITOR_FLAG_COOK_FRAMES |
@@ -89,11 +111,11 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
* cooked_mntrs, monitor and all fif_* counters
* reconfigure hardware
*/
- if ((*flags & mask) != (sdata->u.mntr_flags & mask))
+ if ((*flags & mask) != (sdata->u.mntr.flags & mask))
return -EBUSY;
ieee80211_adjust_monitor_flags(sdata, -1);
- sdata->u.mntr_flags = *flags;
+ sdata->u.mntr.flags = *flags;
ieee80211_adjust_monitor_flags(sdata, 1);
ieee80211_configure_filter(local);
@@ -103,7 +125,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
* and ieee80211_do_open take care of "everything"
* mentioned in the comment above.
*/
- sdata->u.mntr_flags = *flags;
+ sdata->u.mntr.flags = *flags;
}
}
@@ -131,6 +153,149 @@ static void ieee80211_stop_p2p_device(struct wiphy *wiphy,
ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev));
}
+static int ieee80211_start_nan(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ int ret;
+
+ mutex_lock(&sdata->local->chanctx_mtx);
+ ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
+ mutex_unlock(&sdata->local->chanctx_mtx);
+ if (ret < 0)
+ return ret;
+
+ ret = ieee80211_do_open(wdev, true);
+ if (ret)
+ return ret;
+
+ ret = drv_start_nan(sdata->local, sdata, conf);
+ if (ret)
+ ieee80211_sdata_stop(sdata);
+
+ sdata->u.nan.conf = *conf;
+
+ return ret;
+}
+
+static void ieee80211_stop_nan(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+
+ drv_stop_nan(sdata->local, sdata);
+ ieee80211_sdata_stop(sdata);
+}
+
+static int ieee80211_nan_change_conf(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf,
+ u32 changes)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct cfg80211_nan_conf new_conf;
+ int ret = 0;
+
+ if (sdata->vif.type != NL80211_IFTYPE_NAN)
+ return -EOPNOTSUPP;
+
+ if (!ieee80211_sdata_running(sdata))
+ return -ENETDOWN;
+
+ new_conf = sdata->u.nan.conf;
+
+ if (changes & CFG80211_NAN_CONF_CHANGED_PREF)
+ new_conf.master_pref = conf->master_pref;
+
+ if (changes & CFG80211_NAN_CONF_CHANGED_DUAL)
+ new_conf.dual = conf->dual;
+
+ ret = drv_nan_change_conf(sdata->local, sdata, &new_conf, changes);
+ if (!ret)
+ sdata->u.nan.conf = new_conf;
+
+ return ret;
+}
+
+static int ieee80211_add_nan_func(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_func *nan_func)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ int ret;
+
+ if (sdata->vif.type != NL80211_IFTYPE_NAN)
+ return -EOPNOTSUPP;
+
+ if (!ieee80211_sdata_running(sdata))
+ return -ENETDOWN;
+
+ spin_lock_bh(&sdata->u.nan.func_lock);
+
+ ret = idr_alloc(&sdata->u.nan.function_inst_ids,
+ nan_func, 1, sdata->local->hw.max_nan_de_entries + 1,
+ GFP_ATOMIC);
+ spin_unlock_bh(&sdata->u.nan.func_lock);
+
+ if (ret < 0)
+ return ret;
+
+ nan_func->instance_id = ret;
+
+ WARN_ON(nan_func->instance_id == 0);
+
+ ret = drv_add_nan_func(sdata->local, sdata, nan_func);
+ if (ret) {
+ spin_lock_bh(&sdata->u.nan.func_lock);
+ idr_remove(&sdata->u.nan.function_inst_ids,
+ nan_func->instance_id);
+ spin_unlock_bh(&sdata->u.nan.func_lock);
+ }
+
+ return ret;
+}
+
+static struct cfg80211_nan_func *
+ieee80211_find_nan_func_by_cookie(struct ieee80211_sub_if_data *sdata,
+ u64 cookie)
+{
+ struct cfg80211_nan_func *func;
+ int id;
+
+ lockdep_assert_held(&sdata->u.nan.func_lock);
+
+ idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, id) {
+ if (func->cookie == cookie)
+ return func;
+ }
+
+ return NULL;
+}
+
+static void ieee80211_del_nan_func(struct wiphy *wiphy,
+ struct wireless_dev *wdev, u64 cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct cfg80211_nan_func *func;
+ u8 instance_id = 0;
+
+ if (sdata->vif.type != NL80211_IFTYPE_NAN ||
+ !ieee80211_sdata_running(sdata))
+ return;
+
+ spin_lock_bh(&sdata->u.nan.func_lock);
+
+ func = ieee80211_find_nan_func_by_cookie(sdata, cookie);
+ if (func)
+ instance_id = func->instance_id;
+
+ spin_unlock_bh(&sdata->u.nan.func_lock);
+
+ if (instance_id)
+ drv_del_nan_func(sdata->local, sdata, instance_id);
+}
+
static int ieee80211_set_noack_map(struct wiphy *wiphy,
struct net_device *dev,
u16 noack_map)
@@ -236,6 +401,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_NAN:
case NL80211_IFTYPE_UNSPECIFIED:
case NUM_NL80211_IFTYPES:
case NL80211_IFTYPE_P2P_CLIENT:
@@ -2015,6 +2181,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
!(req->flags & NL80211_SCAN_FLAG_AP)))
return -EOPNOTSUPP;
break;
+ case NL80211_IFTYPE_NAN:
default:
return -EOPNOTSUPP;
}
@@ -2940,10 +3107,6 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
}
chanctx = container_of(conf, struct ieee80211_chanctx, conf);
- if (!chanctx) {
- err = -EBUSY;
- goto out;
- }
ch_switch.timestamp = 0;
ch_switch.device_timestamp = 0;
@@ -3360,6 +3523,63 @@ static int ieee80211_del_tx_ts(struct wiphy *wiphy, struct net_device *dev,
return -ENOENT;
}
+void ieee80211_nan_func_terminated(struct ieee80211_vif *vif,
+ u8 inst_id,
+ enum nl80211_nan_func_term_reason reason,
+ gfp_t gfp)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct cfg80211_nan_func *func;
+ u64 cookie;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_NAN))
+ return;
+
+ spin_lock_bh(&sdata->u.nan.func_lock);
+
+ func = idr_find(&sdata->u.nan.function_inst_ids, inst_id);
+ if (WARN_ON(!func)) {
+ spin_unlock_bh(&sdata->u.nan.func_lock);
+ return;
+ }
+
+ cookie = func->cookie;
+ idr_remove(&sdata->u.nan.function_inst_ids, inst_id);
+
+ spin_unlock_bh(&sdata->u.nan.func_lock);
+
+ cfg80211_free_nan_func(func);
+
+ cfg80211_nan_func_terminated(ieee80211_vif_to_wdev(vif), inst_id,
+ reason, cookie, gfp);
+}
+EXPORT_SYMBOL(ieee80211_nan_func_terminated);
+
+void ieee80211_nan_func_match(struct ieee80211_vif *vif,
+ struct cfg80211_nan_match_params *match,
+ gfp_t gfp)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct cfg80211_nan_func *func;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_NAN))
+ return;
+
+ spin_lock_bh(&sdata->u.nan.func_lock);
+
+ func = idr_find(&sdata->u.nan.function_inst_ids, match->inst_id);
+ if (WARN_ON(!func)) {
+ spin_unlock_bh(&sdata->u.nan.func_lock);
+ return;
+ }
+ match->cookie = func->cookie;
+
+ spin_unlock_bh(&sdata->u.nan.func_lock);
+
+ cfg80211_nan_match(ieee80211_vif_to_wdev(vif), match, gfp);
+}
+EXPORT_SYMBOL(ieee80211_nan_func_match);
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -3445,4 +3665,9 @@ const struct cfg80211_ops mac80211_config_ops = {
.set_ap_chanwidth = ieee80211_set_ap_chanwidth,
.add_tx_ts = ieee80211_add_tx_ts,
.del_tx_ts = ieee80211_del_tx_ts,
+ .start_nan = ieee80211_start_nan,
+ .stop_nan = ieee80211_stop_nan,
+ .nan_change_conf = ieee80211_nan_change_conf,
+ .add_nan_func = ieee80211_add_nan_func,
+ .del_nan_func = ieee80211_del_nan_func,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 74142d07ad31..e75cbf6ecc26 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -274,6 +274,7 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
ieee80211_get_max_required_bw(sdata));
break;
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_NAN:
continue;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_WDS:
@@ -646,6 +647,9 @@ static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
struct ieee80211_chanctx *curr_ctx = NULL;
int ret = 0;
+ if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_NAN))
+ return -ENOTSUPP;
+
conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
lockdep_is_held(&local->chanctx_mtx));
@@ -718,6 +722,7 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
switch (sdata->vif.type) {
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_NAN:
continue;
case NL80211_IFTYPE_STATION:
if (!sdata->u.mgd.associated)
@@ -980,6 +985,7 @@ ieee80211_vif_chanctx_reservation_complete(struct ieee80211_sub_if_data *sdata)
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_NAN:
case NUM_NL80211_IFTYPES:
WARN_ON(1);
break;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 2906c1004e1a..f56e2f487d09 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -71,138 +71,45 @@ DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
-struct aqm_info {
- struct ieee80211_local *local;
- size_t size;
- size_t len;
- unsigned char buf[0];
-};
-
-#define AQM_HDR_LEN 200
-#define AQM_HW_ENTRY_LEN 40
-#define AQM_TXQ_ENTRY_LEN 110
-
-static int aqm_open(struct inode *inode, struct file *file)
+static ssize_t aqm_read(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
{
- struct ieee80211_local *local = inode->i_private;
- struct ieee80211_sub_if_data *sdata;
- struct sta_info *sta;
- struct txq_info *txqi;
+ struct ieee80211_local *local = file->private_data;
struct fq *fq = &local->fq;
- struct aqm_info *info = NULL;
+ char buf[200];
int len = 0;
- int i;
-
- if (!local->ops->wake_tx_queue)
- return -EOPNOTSUPP;
-
- len += AQM_HDR_LEN;
- len += 6 * AQM_HW_ENTRY_LEN;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- len += AQM_TXQ_ENTRY_LEN;
- list_for_each_entry_rcu(sta, &local->sta_list, list)
- len += AQM_TXQ_ENTRY_LEN * ARRAY_SIZE(sta->sta.txq);
- rcu_read_unlock();
-
- info = vmalloc(len);
- if (!info)
- return -ENOMEM;
spin_lock_bh(&local->fq.lock);
rcu_read_lock();
- file->private_data = info;
- info->local = local;
- info->size = len;
- len = 0;
-
- len += scnprintf(info->buf + len, info->size - len,
- "* hw\n"
- "access name value\n"
- "R fq_flows_cnt %u\n"
- "R fq_backlog %u\n"
- "R fq_overlimit %u\n"
- "R fq_collisions %u\n"
- "RW fq_limit %u\n"
- "RW fq_quantum %u\n",
- fq->flows_cnt,
- fq->backlog,
- fq->overlimit,
- fq->collisions,
- fq->limit,
- fq->quantum);
-
- len += scnprintf(info->buf + len,
- info->size - len,
- "* vif\n"
- "ifname addr ac backlog-bytes backlog-packets flows overlimit collisions tx-bytes tx-packets\n");
-
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- txqi = to_txq_info(sdata->vif.txq);
- len += scnprintf(info->buf + len, info->size - len,
- "%s %pM %u %u %u %u %u %u %u %u\n",
- sdata->name,
- sdata->vif.addr,
- txqi->txq.ac,
- txqi->tin.backlog_bytes,
- txqi->tin.backlog_packets,
- txqi->tin.flows,
- txqi->tin.overlimit,
- txqi->tin.collisions,
- txqi->tin.tx_bytes,
- txqi->tin.tx_packets);
- }
-
- len += scnprintf(info->buf + len,
- info->size - len,
- "* sta\n"
- "ifname addr tid ac backlog-bytes backlog-packets flows overlimit collisions tx-bytes tx-packets\n");
-
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
- sdata = sta->sdata;
- for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
- txqi = to_txq_info(sta->sta.txq[i]);
- len += scnprintf(info->buf + len, info->size - len,
- "%s %pM %d %d %u %u %u %u %u %u %u\n",
- sdata->name,
- sta->sta.addr,
- txqi->txq.tid,
- txqi->txq.ac,
- txqi->tin.backlog_bytes,
- txqi->tin.backlog_packets,
- txqi->tin.flows,
- txqi->tin.overlimit,
- txqi->tin.collisions,
- txqi->tin.tx_bytes,
- txqi->tin.tx_packets);
- }
- }
-
- info->len = len;
+ len = scnprintf(buf, sizeof(buf),
+ "access name value\n"
+ "R fq_flows_cnt %u\n"
+ "R fq_backlog %u\n"
+ "R fq_overlimit %u\n"
+ "R fq_overmemory %u\n"
+ "R fq_collisions %u\n"
+ "R fq_memory_usage %u\n"
+ "RW fq_memory_limit %u\n"
+ "RW fq_limit %u\n"
+ "RW fq_quantum %u\n",
+ fq->flows_cnt,
+ fq->backlog,
+ fq->overmemory,
+ fq->overlimit,
+ fq->collisions,
+ fq->memory_usage,
+ fq->memory_limit,
+ fq->limit,
+ fq->quantum);
rcu_read_unlock();
spin_unlock_bh(&local->fq.lock);
- return 0;
-}
-
-static int aqm_release(struct inode *inode, struct file *file)
-{
- vfree(file->private_data);
- return 0;
-}
-
-static ssize_t aqm_read(struct file *file,
- char __user *user_buf,
- size_t count,
- loff_t *ppos)
-{
- struct aqm_info *info = file->private_data;
-
return simple_read_from_buffer(user_buf, count, ppos,
- info->buf, info->len);
+ buf, len);
}
static ssize_t aqm_write(struct file *file,
@@ -210,8 +117,7 @@ static ssize_t aqm_write(struct file *file,
size_t count,
loff_t *ppos)
{
- struct aqm_info *info = file->private_data;
- struct ieee80211_local *local = info->local;
+ struct ieee80211_local *local = file->private_data;
char buf[100];
size_t len;
@@ -228,6 +134,8 @@ static ssize_t aqm_write(struct file *file,
if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1)
return count;
+ else if (sscanf(buf, "fq_memory_limit %u", &local->fq.memory_limit) == 1)
+ return count;
else if (sscanf(buf, "fq_quantum %u", &local->fq.quantum) == 1)
return count;
@@ -237,8 +145,7 @@ static ssize_t aqm_write(struct file *file,
static const struct file_operations aqm_ops = {
.write = aqm_write,
.read = aqm_read,
- .open = aqm_open,
- .release = aqm_release,
+ .open = simple_open,
.llseek = default_llseek,
};
@@ -302,6 +209,7 @@ static const char *hw_flag_names[] = {
FLAG(USES_RSS),
FLAG(TX_AMSDU),
FLAG(TX_FRAG_LIST),
+ FLAG(REPORTS_LOW_ACK),
#undef FLAG
};
@@ -428,7 +336,9 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(hwflags);
DEBUGFS_ADD(user_power);
DEBUGFS_ADD(power);
- DEBUGFS_ADD_MODE(aqm, 0600);
+
+ if (local->ops->wake_tx_queue)
+ DEBUGFS_ADD_MODE(aqm, 0600);
statsd = debugfs_create_dir("statistics", phyd);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index a5ba739cd2a7..bcec1240f41d 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -30,7 +30,7 @@ static ssize_t ieee80211_if_read(
size_t count, loff_t *ppos,
ssize_t (*format)(const struct ieee80211_sub_if_data *, char *, int))
{
- char buf[70];
+ char buf[200];
ssize_t ret = -EINVAL;
read_lock(&dev_base_lock);
@@ -486,6 +486,38 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
}
IEEE80211_IF_FILE_R(num_buffered_multicast);
+static ssize_t ieee80211_if_fmt_aqm(
+ const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+ int len;
+
+ spin_lock_bh(&local->fq.lock);
+ rcu_read_lock();
+
+ len = scnprintf(buf,
+ buflen,
+ "ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets\n"
+ "%u %u %u %u %u %u %u %u %u %u\n",
+ txqi->txq.ac,
+ txqi->tin.backlog_bytes,
+ txqi->tin.backlog_packets,
+ txqi->tin.flows,
+ txqi->cstats.drop_count,
+ txqi->cstats.ecn_mark,
+ txqi->tin.overlimit,
+ txqi->tin.collisions,
+ txqi->tin.tx_bytes,
+ txqi->tin.tx_packets);
+
+ rcu_read_unlock();
+ spin_unlock_bh(&local->fq.lock);
+
+ return len;
+}
+IEEE80211_IF_FILE_R(aqm);
+
/* IBSS attributes */
static ssize_t ieee80211_if_fmt_tsf(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -524,9 +556,15 @@ static ssize_t ieee80211_if_parse_tsf(
ret = kstrtoull(buf, 10, &tsf);
if (ret < 0)
return ret;
- if (tsf_is_delta)
- tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf;
- if (local->ops->set_tsf) {
+ if (tsf_is_delta && local->ops->offset_tsf) {
+ drv_offset_tsf(local, sdata, tsf_is_delta * tsf);
+ wiphy_info(local->hw.wiphy,
+ "debugfs offset TSF by %018lld\n",
+ tsf_is_delta * tsf);
+ } else if (local->ops->set_tsf) {
+ if (tsf_is_delta)
+ tsf = drv_get_tsf(local, sdata) +
+ tsf_is_delta * tsf;
drv_set_tsf(local, sdata, tsf);
wiphy_info(local->hw.wiphy,
"debugfs set TSF to %#018llx\n", tsf);
@@ -618,6 +656,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
DEBUGFS_ADD(hw_queues);
+
+ if (sdata->local->ops->wake_tx_queue)
+ DEBUGFS_ADD(aqm);
}
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index fd334133ff45..a2fcdb47a0e6 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -133,6 +133,55 @@ static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
}
STA_OPS(last_seq_ctrl);
+#define AQM_TXQ_ENTRY_LEN 130
+
+static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct sta_info *sta = file->private_data;
+ struct ieee80211_local *local = sta->local;
+ size_t bufsz = AQM_TXQ_ENTRY_LEN*(IEEE80211_NUM_TIDS+1);
+ char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
+ struct txq_info *txqi;
+ ssize_t rv;
+ int i;
+
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_bh(&local->fq.lock);
+ rcu_read_lock();
+
+ p += scnprintf(p,
+ bufsz+buf-p,
+ "tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets\n");
+
+ for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
+ txqi = to_txq_info(sta->sta.txq[i]);
+ p += scnprintf(p, bufsz+buf-p,
+ "%d %d %u %u %u %u %u %u %u %u %u\n",
+ txqi->txq.tid,
+ txqi->txq.ac,
+ txqi->tin.backlog_bytes,
+ txqi->tin.backlog_packets,
+ txqi->tin.flows,
+ txqi->cstats.drop_count,
+ txqi->cstats.ecn_mark,
+ txqi->tin.overlimit,
+ txqi->tin.collisions,
+ txqi->tin.tx_bytes,
+ txqi->tin.tx_packets);
+ }
+
+ rcu_read_unlock();
+ spin_unlock_bh(&local->fq.lock);
+
+ rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ kfree(buf);
+ return rv;
+}
+STA_OPS(aqm);
+
static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
@@ -478,6 +527,9 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
DEBUGFS_ADD_COUNTER(tx_filtered, status_stats.filtered);
+ if (local->ops->wake_tx_queue)
+ DEBUGFS_ADD(aqm);
+
if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
debugfs_create_x32("driver_buffered_tids", 0400,
sta->debugfs_dir,
@@ -492,10 +544,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
void ieee80211_sta_debugfs_remove(struct sta_info *sta)
{
- struct ieee80211_local *local = sta->local;
- struct ieee80211_sub_if_data *sdata = sta->sdata;
-
- drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs_dir);
debugfs_remove_recursive(sta->debugfs_dir);
sta->debugfs_dir = NULL;
}
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index c258f1041d33..bb886e7db47f 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -62,7 +62,7 @@ int drv_add_interface(struct ieee80211_local *local,
if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
(sdata->vif.type == NL80211_IFTYPE_MONITOR &&
!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
- !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))))
+ !(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))))
return -EINVAL;
trace_drv_add_interface(local, sdata);
@@ -215,6 +215,21 @@ void drv_set_tsf(struct ieee80211_local *local,
trace_drv_return_void(local);
}
+void drv_offset_tsf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ s64 offset)
+{
+ might_sleep();
+
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ trace_drv_offset_tsf(local, sdata, offset);
+ if (local->ops->offset_tsf)
+ local->ops->offset_tsf(&local->hw, &sdata->vif, offset);
+ trace_drv_return_void(local);
+}
+
void drv_reset_tsf(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index ba5fc1f01e53..09f77e4a8a79 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -162,7 +162,9 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
return;
if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
- sdata->vif.type == NL80211_IFTYPE_MONITOR))
+ sdata->vif.type == NL80211_IFTYPE_NAN ||
+ (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ !sdata->vif.mu_mimo_owner)))
return;
if (!check_sdata_in_driver(sdata))
@@ -498,21 +500,6 @@ static inline void drv_sta_add_debugfs(struct ieee80211_local *local,
local->ops->sta_add_debugfs(&local->hw, &sdata->vif,
sta, dir);
}
-
-static inline void drv_sta_remove_debugfs(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta,
- struct dentry *dir)
-{
- might_sleep();
-
- sdata = get_bss_sdata(sdata);
- check_sdata_in_driver(sdata);
-
- if (local->ops->sta_remove_debugfs)
- local->ops->sta_remove_debugfs(&local->hw, &sdata->vif,
- sta, dir);
-}
#endif
static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local,
@@ -582,6 +569,9 @@ u64 drv_get_tsf(struct ieee80211_local *local,
void drv_set_tsf(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
u64 tsf);
+void drv_offset_tsf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ s64 offset);
void drv_reset_tsf(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
@@ -1088,13 +1078,13 @@ static inline void drv_leave_ibss(struct ieee80211_local *local,
}
static inline u32 drv_get_expected_throughput(struct ieee80211_local *local,
- struct ieee80211_sta *sta)
+ struct sta_info *sta)
{
u32 ret = 0;
- trace_drv_get_expected_throughput(sta);
- if (local->ops->get_expected_throughput)
- ret = local->ops->get_expected_throughput(&local->hw, sta);
+ trace_drv_get_expected_throughput(&sta->sta);
+ if (local->ops->get_expected_throughput && sta->uploaded)
+ ret = local->ops->get_expected_throughput(&local->hw, &sta->sta);
trace_drv_return_u32(local, ret);
return ret;
@@ -1179,4 +1169,83 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
local->ops->wake_tx_queue(&local->hw, &txq->txq);
}
+static inline int drv_start_nan(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_nan_conf *conf)
+{
+ int ret;
+
+ might_sleep();
+ check_sdata_in_driver(sdata);
+
+ trace_drv_start_nan(local, sdata, conf);
+ ret = local->ops->start_nan(&local->hw, &sdata->vif, conf);
+ trace_drv_return_int(local, ret);
+ return ret;
+}
+
+static inline void drv_stop_nan(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ might_sleep();
+ check_sdata_in_driver(sdata);
+
+ trace_drv_stop_nan(local, sdata);
+ local->ops->stop_nan(&local->hw, &sdata->vif);
+ trace_drv_return_void(local);
+}
+
+static inline int drv_nan_change_conf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_nan_conf *conf,
+ u32 changes)
+{
+ int ret;
+
+ might_sleep();
+ check_sdata_in_driver(sdata);
+
+ if (!local->ops->nan_change_conf)
+ return -EOPNOTSUPP;
+
+ trace_drv_nan_change_conf(local, sdata, conf, changes);
+ ret = local->ops->nan_change_conf(&local->hw, &sdata->vif, conf,
+ changes);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
+static inline int drv_add_nan_func(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_nan_func *nan_func)
+{
+ int ret;
+
+ might_sleep();
+ check_sdata_in_driver(sdata);
+
+ if (!local->ops->add_nan_func)
+ return -EOPNOTSUPP;
+
+ trace_drv_add_nan_func(local, sdata, nan_func);
+ ret = local->ops->add_nan_func(&local->hw, &sdata->vif, nan_func);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
+static inline void drv_del_nan_func(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u8 instance_id)
+{
+ might_sleep();
+ check_sdata_in_driver(sdata);
+
+ trace_drv_del_nan_func(local, sdata, instance_id);
+ if (local->ops->del_nan_func)
+ local->ops->del_nan_func(&local->hw, &sdata->vif, instance_id);
+ trace_drv_return_void(local);
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f56d342c31b8..34c2add2c455 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -3,7 +3,7 @@
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2013-2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -86,6 +86,8 @@ struct ieee80211_local;
#define IEEE80211_DEAUTH_FRAME_LEN (24 /* hdr */ + 2 /* reason */)
+#define IEEE80211_MAX_NAN_INSTANCE_ID 255
+
struct ieee80211_fragment_entry {
struct sk_buff_head skb_list;
unsigned long first_frag_time;
@@ -813,17 +815,39 @@ enum txq_info_flags {
* @def_flow: used as a fallback flow when a packet destined to @tin hashes to
* a fq_flow which is already owned by a different tin
* @def_cvars: codel vars for @def_flow
+ * @frags: used to keep fragments created after dequeue
*/
struct txq_info {
struct fq_tin tin;
struct fq_flow def_flow;
struct codel_vars def_cvars;
+ struct codel_stats cstats;
+ struct sk_buff_head frags;
unsigned long flags;
/* keep last! */
struct ieee80211_txq txq;
};
+struct ieee80211_if_mntr {
+ u32 flags;
+ u8 mu_follow_addr[ETH_ALEN] __aligned(2);
+};
+
+/**
+ * struct ieee80211_if_nan - NAN state
+ *
+ * @conf: current NAN configuration
+ * @func_ids: a bitmap of available instance_id's
+ */
+struct ieee80211_if_nan {
+ struct cfg80211_nan_conf conf;
+
+ /* protects function_inst_ids */
+ spinlock_t func_lock;
+ struct idr function_inst_ids;
+};
+
struct ieee80211_sub_if_data {
struct list_head list;
@@ -922,7 +946,8 @@ struct ieee80211_sub_if_data {
struct ieee80211_if_ibss ibss;
struct ieee80211_if_mesh mesh;
struct ieee80211_if_ocb ocb;
- u32 mntr_flags;
+ struct ieee80211_if_mntr mntr;
+ struct ieee80211_if_nan nan;
} u;
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -1112,7 +1137,6 @@ struct ieee80211_local {
struct fq fq;
struct codel_vars *cvars;
struct codel_params cparams;
- struct codel_stats cstats;
const struct ieee80211_ops *ops;
@@ -1208,7 +1232,7 @@ struct ieee80211_local {
spinlock_t tim_lock;
unsigned long num_sta;
struct list_head sta_list;
- struct rhashtable sta_hash;
+ struct rhltable sta_hash;
struct timer_list sta_cleanup;
int sta_generation;
@@ -1476,6 +1500,13 @@ static inline struct txq_info *to_txq_info(struct ieee80211_txq *txq)
return container_of(txq, struct txq_info, txq);
}
+static inline bool txq_has_queue(struct ieee80211_txq *txq)
+{
+ struct txq_info *txqi = to_txq_info(txq);
+
+ return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets);
+}
+
static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
{
return ether_addr_equal(raddr, addr) ||
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index b123a9e325b3..638ec0759078 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -43,6 +43,8 @@
* by either the RTNL, the iflist_mtx or RCU.
*/
+static void ieee80211_iface_work(struct work_struct *work);
+
bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_chanctx_conf *chanctx_conf;
@@ -188,7 +190,7 @@ static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr,
continue;
if (iter->vif.type == NL80211_IFTYPE_MONITOR &&
- !(iter->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+ !(iter->u.mntr.flags & MONITOR_FLAG_ACTIVE))
continue;
m = iter->vif.addr;
@@ -217,7 +219,7 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
return -EBUSY;
if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
- !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+ !(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
check_dup = false;
ret = ieee80211_verify_mac(sdata, sa->sa_data, check_dup);
@@ -325,6 +327,9 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
int n_queues = sdata->local->hw.queues;
int i;
+ if (iftype == NL80211_IFTYPE_NAN)
+ return 0;
+
if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
@@ -357,7 +362,7 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
const int offset)
{
struct ieee80211_local *local = sdata->local;
- u32 flags = sdata->u.mntr_flags;
+ u32 flags = sdata->u.mntr.flags;
#define ADJUST(_f, _s) do { \
if (flags & MONITOR_FLAG_##_f) \
@@ -448,6 +453,9 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
return ret;
}
+ skb_queue_head_init(&sdata->skb_queue);
+ INIT_WORK(&sdata->work, ieee80211_iface_work);
+
return 0;
}
@@ -540,6 +548,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_OCB:
+ case NL80211_IFTYPE_NAN:
/* no special treatment */
break;
case NL80211_IFTYPE_UNSPECIFIED:
@@ -589,12 +598,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
}
break;
case NL80211_IFTYPE_MONITOR:
- if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) {
local->cooked_mntrs++;
break;
}
- if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) {
+ if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
res = drv_add_interface(local, sdata);
if (res)
goto err_stop;
@@ -641,7 +650,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
local->fif_probe_req++;
}
- if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
+ if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+ sdata->vif.type != NL80211_IFTYPE_NAN)
changed |= ieee80211_reset_erp_info(sdata);
ieee80211_bss_info_change_notify(sdata, changed);
@@ -655,6 +665,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
break;
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_NAN:
break;
default:
/* not reached */
@@ -787,6 +798,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
struct ps_data *ps;
struct cfg80211_chan_def chandef;
bool cancel_scan;
+ struct cfg80211_nan_func *func;
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -926,7 +938,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
/* no need to tell driver */
break;
case NL80211_IFTYPE_MONITOR:
- if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) {
local->cooked_mntrs--;
break;
}
@@ -939,6 +951,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_adjust_monitor_flags(sdata, -1);
break;
+ case NL80211_IFTYPE_NAN:
+ /* clean all the functions */
+ spin_lock_bh(&sdata->u.nan.func_lock);
+
+ idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, i) {
+ idr_remove(&sdata->u.nan.function_inst_ids, i);
+ cfg80211_free_nan_func(func);
+ }
+ idr_destroy(&sdata->u.nan.function_inst_ids);
+
+ spin_unlock_bh(&sdata->u.nan.func_lock);
+ break;
case NL80211_IFTYPE_P2P_DEVICE:
/* relies on synchronize_rcu() below */
RCU_INIT_POINTER(local->p2p_sdata, NULL);
@@ -1012,7 +1036,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_idle(local);
mutex_unlock(&local->mtx);
- if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+ if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
break;
/* fall through */
@@ -1444,12 +1468,17 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_MONITOR:
sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP;
sdata->dev->netdev_ops = &ieee80211_monitorif_ops;
- sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
+ sdata->u.mntr.flags = MONITOR_FLAG_CONTROL |
MONITOR_FLAG_OTHER_BSS;
break;
case NL80211_IFTYPE_WDS:
sdata->vif.bss_conf.bssid = NULL;
break;
+ case NL80211_IFTYPE_NAN:
+ idr_init(&sdata->u.nan.function_inst_ids);
+ spin_lock_init(&sdata->u.nan.func_lock);
+ sdata->vif.bss_conf.bssid = sdata->vif.addr;
+ break;
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_P2P_DEVICE:
sdata->vif.bss_conf.bssid = sdata->vif.addr;
@@ -1717,7 +1746,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ASSERT_RTNL();
- if (type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN) {
struct wireless_dev *wdev;
sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index d00ea9b13f49..1075ac24c8c5 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -660,6 +660,9 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
ieee80211_roc_setup(local);
+ local->hw.radiotap_timestamp.units_pos = -1;
+ local->hw.radiotap_timestamp.accuracy = -1;
+
return &local->hw;
err_free:
wiphy_free(wiphy);
@@ -818,6 +821,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
!local->ops->tdls_recv_channel_switch))
return -EOPNOTSUPP;
+ if (WARN_ON(local->hw.wiphy->interface_modes &
+ BIT(NL80211_IFTYPE_NAN) &&
+ (!local->ops->start_nan || !local->ops->stop_nan)))
+ return -EINVAL;
+
#ifdef CONFIG_PM
if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume))
return -EINVAL;
@@ -1055,6 +1063,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->dynamic_ps_forced_timeout = -1;
+ if (!local->hw.max_nan_de_entries)
+ local->hw.max_nan_de_entries = IEEE80211_MAX_NAN_INSTANCE_ID;
+
result = ieee80211_wep_init(local);
if (result < 0)
wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index faccef977670..b747c9645e43 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -326,22 +326,33 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
u32 tx_time, estimated_retx;
u64 result;
- if (sta->mesh->fail_avg >= 100)
- return MAX_METRIC;
+ /* Try to get rate based on HW/SW RC algorithm.
+ * Rate is returned in units of Kbps, correct this
+ * to comply with airtime calculation units
+ * Round up in case we get rate < 100Kbps
+ */
+ rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100);
- sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
- rate = cfg80211_calculate_bitrate(&rinfo);
- if (WARN_ON(!rate))
- return MAX_METRIC;
+ if (rate) {
+ err = 0;
+ } else {
+ if (sta->mesh->fail_avg >= 100)
+ return MAX_METRIC;
- err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100;
+ sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
+ rate = cfg80211_calculate_bitrate(&rinfo);
+ if (WARN_ON(!rate))
+ return MAX_METRIC;
+
+ err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100;
+ }
/* bitrate is in units of 100 Kbps, while we need rate in units of
* 1Mbps. This will be corrected on tx_time computation.
*/
tx_time = (device_constant + 10 * test_frame_len / rate);
estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
- result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
+ result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
return (u32)result;
}
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 64bc22ad9496..faca22cd02b5 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -28,7 +28,7 @@
* could be, for instance, in case a neighbor is restarted and its TSF counter
* reset.
*/
-#define TOFFSET_MAXIMUM_ADJUSTMENT 30000 /* 30 ms */
+#define TOFFSET_MAXIMUM_ADJUSTMENT 800 /* 0.8 ms */
struct sync_method {
u8 method;
@@ -70,9 +70,13 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
}
spin_unlock_bh(&ifmsh->sync_offset_lock);
- tsf = drv_get_tsf(local, sdata);
- if (tsf != -1ULL)
- drv_set_tsf(local, sdata, tsf + tsfdelta);
+ if (local->ops->offset_tsf) {
+ drv_offset_tsf(local, sdata, tsfdelta);
+ } else {
+ tsf = drv_get_tsf(local, sdata);
+ if (tsf != -1ULL)
+ drv_set_tsf(local, sdata, tsf + tsfdelta);
+ }
}
static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 8d426f637f58..7486f2dab4ba 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1672,11 +1672,15 @@ __ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
non_acm_ac++)
if (!(sdata->wmm_acm & BIT(7 - 2 * non_acm_ac)))
break;
- /* The loop will result in using BK even if it requires
- * admission control, such configuration makes no sense
- * and we have to transmit somehow - the AC selection
- * does the same thing.
+ /* Usually the loop will result in using BK even if it
+ * requires admission control, but such a configuration
+ * makes no sense and we have to transmit somehow - the
+ * AC selection does the same thing.
+ * If we started out trying to downgrade from BK, then
+ * the extra condition here might be needed.
*/
+ if (non_acm_ac >= IEEE80211_NUM_ACS)
+ non_acm_ac = IEEE80211_AC_BK;
if (drv_conf_tx(local, sdata, ac,
&sdata->tx_conf[non_acm_ac]))
sdata_err(sdata,
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 55a9c5b94ce1..c3f610bba3fe 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -128,7 +128,8 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
if (!ieee80211_sdata_running(sdata))
continue;
- if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+ if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
+ sdata->vif.type == NL80211_IFTYPE_NAN)
continue;
if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
@@ -838,6 +839,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
case NL80211_IFTYPE_P2P_DEVICE:
need_offchan = true;
break;
+ case NL80211_IFTYPE_NAN:
default:
return -EOPNOTSUPP;
}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 00a43a70e1fc..28a3a0957c9e 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -178,8 +178,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
WARN_ON(!list_empty(&local->chanctx_list));
/* stop hardware - this must stop RX */
- if (local->open_count)
- ieee80211_stop_device(local);
+ ieee80211_stop_device(local);
suspend:
local->suspended = true;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9dce3b157908..6175db385ba7 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -180,6 +180,11 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
len += 12;
}
+ if (local->hw.radiotap_timestamp.units_pos >= 0) {
+ len = ALIGN(len, 8);
+ len += 12;
+ }
+
if (status->chains) {
/* antenna and antenna signal fields */
len += 2 * hweight8(status->chains);
@@ -447,6 +452,31 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos += 2;
}
+ if (local->hw.radiotap_timestamp.units_pos >= 0) {
+ u16 accuracy = 0;
+ u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
+
+ rthdr->it_present |=
+ cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
+
+ /* ensure 8 byte alignment */
+ while ((pos - (u8 *)rthdr) & 7)
+ pos++;
+
+ put_unaligned_le64(status->device_timestamp, pos);
+ pos += sizeof(u64);
+
+ if (local->hw.radiotap_timestamp.accuracy >= 0) {
+ accuracy = local->hw.radiotap_timestamp.accuracy;
+ flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
+ }
+ put_unaligned_le16(accuracy, pos);
+ pos += sizeof(u16);
+
+ *pos++ = local->hw.radiotap_timestamp.units_pos;
+ *pos++ = flags;
+ }
+
for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
*pos++ = status->chain_signal[chain];
*pos++ = chain;
@@ -485,6 +515,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
struct net_device *prev_dev = NULL;
int present_fcs_len = 0;
unsigned int rtap_vendor_space = 0;
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_sub_if_data *monitor_sdata =
+ rcu_dereference(local->monitor_sdata);
if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data;
@@ -567,7 +600,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
continue;
- if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
continue;
if (!ieee80211_sdata_running(sdata))
@@ -585,6 +618,23 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
ieee80211_rx_stats(sdata->dev, skb->len);
}
+ mgmt = (void *)skb->data;
+ if (monitor_sdata &&
+ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
+ ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_VHT &&
+ mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
+ is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
+ ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
+ struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
+
+ if (mu_skb) {
+ mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+ skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
+ ieee80211_queue_work(&local->hw, &monitor_sdata->work);
+ }
+ }
+
if (prev_dev) {
skb->dev = prev_dev;
netif_receive_skb(skb);
@@ -1072,8 +1122,15 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
- if (!tid_agg_rx)
+ if (!tid_agg_rx) {
+ if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
+ !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
+ !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
+ ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
+ WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_REQUIRE_SETUP);
goto dont_reorder;
+ }
/* qos null data frames are excluded */
if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
@@ -1266,9 +1323,7 @@ static void sta_ps_start(struct sta_info *sta)
return;
for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
- struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
-
- if (txqi->tin.backlog_packets)
+ if (txq_has_queue(sta->sta.txq[tid]))
set_bit(tid, &sta->txq_buffered_tids);
else
clear_bit(tid, &sta->txq_buffered_tids);
@@ -2535,6 +2590,12 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
tid = le16_to_cpu(bar_data.control) >> 12;
+ if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
+ !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
+ ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
+ WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_REQUIRE_SETUP);
+
tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
if (!tid_agg_rx)
return RX_DROP_MONITOR;
@@ -3147,7 +3208,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
continue;
if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
- !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
+ !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
continue;
if (prev_dev) {
@@ -3523,6 +3584,9 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
ieee80211_is_probe_req(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control) ||
ieee80211_is_beacon(hdr->frame_control);
+ case NL80211_IFTYPE_NAN:
+ /* Currently no frames on NAN interface are allowed */
+ return false;
default:
break;
}
@@ -3940,7 +4004,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
__le16 fc;
struct ieee80211_rx_data rx;
struct ieee80211_sub_if_data *prev;
- struct rhash_head *tmp;
+ struct rhlist_head *tmp;
int err = 0;
fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
@@ -3983,13 +4047,10 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
goto out;
} else if (ieee80211_is_data(fc)) {
struct sta_info *sta, *prev_sta;
- const struct bucket_table *tbl;
prev_sta = NULL;
- tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
- for_each_sta_info(local, tbl, hdr->addr2, sta, tmp) {
+ for_each_sta_info(local, hdr->addr2, sta, tmp) {
if (!prev_sta) {
prev_sta = sta;
continue;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 070b40f15850..23d8ac829279 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -420,7 +420,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw,
{
struct ieee80211_local *local = hw_to_local(hw);
- trace_api_scan_completed(local, info);
+ trace_api_scan_completed(local, info->aborted);
set_bit(SCAN_COMPLETED, &local->scanning);
if (info->aborted)
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index aa58df80ede0..78e9ecbc96e6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -67,12 +67,10 @@
static const struct rhashtable_params sta_rht_params = {
.nelem_hint = 3, /* start small */
- .insecure_elasticity = true, /* Disable chain-length checks. */
.automatic_shrinking = true,
.head_offset = offsetof(struct sta_info, hash_node),
.key_offset = offsetof(struct sta_info, addr),
.key_len = ETH_ALEN,
- .hashfn = sta_addr_hash,
.max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
};
@@ -80,8 +78,8 @@ static const struct rhashtable_params sta_rht_params = {
static int sta_info_hash_del(struct ieee80211_local *local,
struct sta_info *sta)
{
- return rhashtable_remove_fast(&local->sta_hash, &sta->hash_node,
- sta_rht_params);
+ return rhltable_remove(&local->sta_hash, &sta->hash_node,
+ sta_rht_params);
}
static void __cleanup_single_sta(struct sta_info *sta)
@@ -157,19 +155,22 @@ static void cleanup_single_sta(struct sta_info *sta)
sta_info_free(local, sta);
}
+struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local,
+ const u8 *addr)
+{
+ return rhltable_lookup(&local->sta_hash, addr, sta_rht_params);
+}
+
/* protected by RCU */
struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
const u8 *addr)
{
struct ieee80211_local *local = sdata->local;
+ struct rhlist_head *tmp;
struct sta_info *sta;
- struct rhash_head *tmp;
- const struct bucket_table *tbl;
rcu_read_lock();
- tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
- for_each_sta_info(local, tbl, addr, sta, tmp) {
+ for_each_sta_info(local, addr, sta, tmp) {
if (sta->sdata == sdata) {
rcu_read_unlock();
/* this is safe as the caller must already hold
@@ -190,14 +191,11 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr)
{
struct ieee80211_local *local = sdata->local;
+ struct rhlist_head *tmp;
struct sta_info *sta;
- struct rhash_head *tmp;
- const struct bucket_table *tbl;
rcu_read_lock();
- tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
- for_each_sta_info(local, tbl, addr, sta, tmp) {
+ for_each_sta_info(local, addr, sta, tmp) {
if (sta->sdata == sdata ||
(sta->sdata->bss && sta->sdata->bss == sdata->bss)) {
rcu_read_unlock();
@@ -263,8 +261,8 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
static int sta_info_hash_add(struct ieee80211_local *local,
struct sta_info *sta)
{
- return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
- sta_rht_params);
+ return rhltable_insert(&local->sta_hash, &sta->hash_node,
+ sta_rht_params);
}
static void sta_deliver_ps_frames(struct work_struct *wk)
@@ -340,6 +338,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
memcpy(sta->addr, addr, ETH_ALEN);
memcpy(sta->sta.addr, addr, ETH_ALEN);
+ sta->sta.max_rx_aggregation_subframes =
+ local->hw.max_rx_aggregation_subframes;
+
sta->local = local;
sta->sdata = sdata;
sta->rx_stats.last_rx = jiffies;
@@ -450,9 +451,9 @@ static int sta_info_insert_check(struct sta_info *sta)
is_multicast_ether_addr(sta->sta.addr)))
return -EINVAL;
- /* Strictly speaking this isn't necessary as we hold the mutex, but
- * the rhashtable code can't really deal with that distinction. We
- * do require the mutex for correctness though.
+ /* The RCU read lock is required by rhashtable due to
+ * asynchronous resize/rehash. We also require the mutex
+ * for correctness.
*/
rcu_read_lock();
lockdep_assert_held(&sdata->local->sta_mtx);
@@ -687,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
}
/* No need to do anything if the driver does all */
- if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
+ if (!local->ops->set_tim)
return;
if (sta->dead)
@@ -1040,16 +1041,11 @@ static void sta_info_cleanup(unsigned long data)
round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL));
}
-u32 sta_addr_hash(const void *key, u32 length, u32 seed)
-{
- return jhash(key, ETH_ALEN, seed);
-}
-
int sta_info_init(struct ieee80211_local *local)
{
int err;
- err = rhashtable_init(&local->sta_hash, &sta_rht_params);
+ err = rhltable_init(&local->sta_hash, &sta_rht_params);
if (err)
return err;
@@ -1065,7 +1061,7 @@ int sta_info_init(struct ieee80211_local *local)
void sta_info_stop(struct ieee80211_local *local)
{
del_timer_sync(&local->sta_cleanup);
- rhashtable_destroy(&local->sta_hash);
+ rhltable_destroy(&local->sta_hash);
}
@@ -1135,17 +1131,14 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
const u8 *localaddr)
{
struct ieee80211_local *local = hw_to_local(hw);
+ struct rhlist_head *tmp;
struct sta_info *sta;
- struct rhash_head *tmp;
- const struct bucket_table *tbl;
-
- tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
/*
* Just return a random station if localaddr is NULL
* ... first in list.
*/
- for_each_sta_info(local, tbl, addr, sta, tmp) {
+ for_each_sta_info(local, addr, sta, tmp) {
if (localaddr &&
!ether_addr_equal(sta->sdata->vif.addr, localaddr))
continue;
@@ -1209,12 +1202,10 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
if (sta->sta.txq[0]) {
for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
- struct txq_info *txqi = to_txq_info(sta->sta.txq[i]);
-
- if (!txqi->tin.backlog_packets)
+ if (!txq_has_queue(sta->sta.txq[i]))
continue;
- drv_wake_tx_queue(local, txqi);
+ drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i]));
}
}
@@ -1645,10 +1636,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
return;
for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
- struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
-
if (!(driver_release_tids & BIT(tid)) ||
- txqi->tin.backlog_packets)
+ txq_has_queue(sta->sta.txq[tid]))
continue;
sta_info_recalc_tim(sta);
@@ -2279,11 +2268,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
- /* check if the driver has a SW RC implementation */
- if (ref && ref->ops->get_expected_throughput)
- thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv);
- else
- thr = drv_get_expected_throughput(local, &sta->sta);
+ thr = sta_get_expected_throughput(sta);
if (thr != 0) {
sinfo->filled |= BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
@@ -2291,6 +2276,25 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
}
}
+u32 sta_get_expected_throughput(struct sta_info *sta)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+ struct rate_control_ref *ref = NULL;
+ u32 thr = 0;
+
+ if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
+ ref = local->rate_ctrl;
+
+ /* check if the driver has a SW RC implementation */
+ if (ref && ref->ops->get_expected_throughput)
+ thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv);
+ else
+ thr = drv_get_expected_throughput(local, sta);
+
+ return thr;
+}
+
unsigned long ieee80211_sta_last_active(struct sta_info *sta)
{
struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 78b0ef32dddd..ed5fcb984a01 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -230,6 +230,8 @@ struct tid_ampdu_rx {
* @tid_rx_stop_requested: bitmap indicating which BA sessions per TID the
* driver requested to close until the work for it runs
* @agg_session_valid: bitmap indicating which TID has a rx BA session open on
+ * @unexpected_agg: bitmap indicating which TID already sent a delBA due to
+ * unexpected aggregation related frames outside a session
* @work: work struct for starting/stopping aggregation
* @tid_tx: aggregation info for Tx per TID
* @tid_start_tx: sessions where start was requested
@@ -244,6 +246,7 @@ struct sta_ampdu_mlme {
unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
+ unsigned long unexpected_agg[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
/* tx */
struct work_struct work;
struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS];
@@ -452,7 +455,7 @@ struct sta_info {
/* General information, mostly static */
struct list_head list, free_list;
struct rcu_head rcu_head;
- struct rhash_head hash_node;
+ struct rhlist_head hash_node;
u8 addr[ETH_ALEN];
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
@@ -635,6 +638,9 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
*/
#define STA_INFO_CLEANUP_INTERVAL (10 * HZ)
+struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local,
+ const u8 *addr);
+
/*
* Get a STA info, must be under RCU read lock.
*/
@@ -644,17 +650,9 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
-u32 sta_addr_hash(const void *key, u32 length, u32 seed);
-
-#define _sta_bucket_idx(_tbl, _a) \
- rht_bucket_index(_tbl, sta_addr_hash(_a, ETH_ALEN, (_tbl)->hash_rnd))
-
-#define for_each_sta_info(local, tbl, _addr, _sta, _tmp) \
- rht_for_each_entry_rcu(_sta, _tmp, tbl, \
- _sta_bucket_idx(tbl, _addr), \
- hash_node) \
- /* compare address and run code only if it matches */ \
- if (ether_addr_equal(_sta->addr, (_addr)))
+#define for_each_sta_info(local, _addr, _sta, _tmp) \
+ rhl_for_each_entry_rcu(_sta, _tmp, \
+ sta_info_hash_lookup(local, _addr), hash_node)
/*
* Get STA info by index, BROKEN!
@@ -712,6 +710,8 @@ void sta_set_rate_info_tx(struct sta_info *sta,
struct rate_info *rinfo);
void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo);
+u32 sta_get_expected_throughput(struct sta_info *sta);
+
void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
unsigned long exp_time);
u8 sta_info_tx_streams(struct sta_info *sta);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index a2a68269675d..ddf71c648cab 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -557,6 +557,12 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
static void ieee80211_lost_packet(struct sta_info *sta,
struct ieee80211_tx_info *info)
{
+ /* If driver relies on its own algorithm for station kickout, skip
+ * mac80211 packet loss mechanism.
+ */
+ if (ieee80211_hw_check(&sta->local->hw, REPORTS_LOW_ACK))
+ return;
+
/* This packet was aggregated but doesn't carry status info */
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU))
@@ -709,7 +715,7 @@ void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
if (!ieee80211_sdata_running(sdata))
continue;
- if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
+ if ((sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) &&
!send_to_cooked)
continue;
@@ -740,8 +746,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
__le16 fc;
struct ieee80211_supported_band *sband;
+ struct rhlist_head *tmp;
struct sta_info *sta;
- struct rhash_head *tmp;
int retry_count;
int rates_idx;
bool send_to_cooked;
@@ -749,7 +755,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_bar *bar;
int shift = 0;
int tid = IEEE80211_NUM_TIDS;
- const struct bucket_table *tbl;
rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
@@ -758,9 +763,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
sband = local->hw.wiphy->bands[info->band];
fc = hdr->frame_control;
- tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
- for_each_sta_info(local, tbl, hdr->addr1, sta, tmp) {
+ for_each_sta_info(local, hdr->addr1, sta, tmp) {
/* skip wrong virtual interface */
if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
continue;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 77e4c53baefb..92a47afaa989 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -984,6 +984,32 @@ TRACE_EVENT(drv_set_tsf,
)
);
+TRACE_EVENT(drv_offset_tsf,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ s64 offset),
+
+ TP_ARGS(local, sdata, offset),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(s64, tsf_offset)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->tsf_offset = offset;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " tsf offset:%lld",
+ LOCAL_PR_ARG, VIF_PR_ARG,
+ (unsigned long long)__entry->tsf_offset
+ )
+);
+
DEFINE_EVENT(local_sdata_evt, drv_reset_tsf,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata),
@@ -1700,6 +1726,139 @@ TRACE_EVENT(drv_get_expected_throughput,
)
);
+TRACE_EVENT(drv_start_nan,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_nan_conf *conf),
+
+ TP_ARGS(local, sdata, conf),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u8, master_pref)
+ __field(u8, dual)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->master_pref = conf->master_pref;
+ __entry->dual = conf->dual;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT
+ ", master preference: %u, dual: %d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->master_pref,
+ __entry->dual
+ )
+);
+
+TRACE_EVENT(drv_stop_nan,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+
+ TP_ARGS(local, sdata),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG
+ )
+);
+
+TRACE_EVENT(drv_nan_change_conf,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_nan_conf *conf,
+ u32 changes),
+
+ TP_ARGS(local, sdata, conf, changes),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u8, master_pref)
+ __field(u8, dual)
+ __field(u32, changes)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->master_pref = conf->master_pref;
+ __entry->dual = conf->dual;
+ __entry->changes = changes;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT
+ ", master preference: %u, dual: %d, changes: 0x%x",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->master_pref,
+ __entry->dual, __entry->changes
+ )
+);
+
+TRACE_EVENT(drv_add_nan_func,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_nan_func *func),
+
+ TP_ARGS(local, sdata, func),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u8, type)
+ __field(u8, inst_id)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->type = func->type;
+ __entry->inst_id = func->instance_id;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT
+ ", type: %u, inst_id: %u",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->type, __entry->inst_id
+ )
+);
+
+TRACE_EVENT(drv_del_nan_func,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u8 instance_id),
+
+ TP_ARGS(local, sdata, instance_id),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u8, instance_id)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->instance_id = instance_id;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT
+ ", instance_id: %u",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->instance_id
+ )
+);
+
/*
* Tracing for API calls that drivers call.
*/
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 18b285e06bc8..1c56abc49627 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -796,36 +796,6 @@ static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid)
return ret;
}
-static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *pubsta,
- struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_txq *txq = NULL;
-
- if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
- (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
- return NULL;
-
- if (!ieee80211_is_data(hdr->frame_control))
- return NULL;
-
- if (pubsta) {
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
-
- txq = pubsta->txq[tid];
- } else if (vif) {
- txq = vif->txq;
- }
-
- if (!txq)
- return NULL;
-
- return to_txq_info(txq);
-}
-
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
{
@@ -883,9 +853,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
tx->sta->tx_stats.msdu[tid]++;
- if (!ieee80211_get_txq(tx->local, info->control.vif, &tx->sta->sta,
- tx->skb))
- hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
+ hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
return TX_CONTINUE;
}
@@ -1274,6 +1242,36 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
return TX_CONTINUE;
}
+static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *pubsta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_txq *txq = NULL;
+
+ if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
+ (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
+ return NULL;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return NULL;
+
+ if (pubsta) {
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+ txq = pubsta->txq[tid];
+ } else if (vif) {
+ txq = vif->txq;
+ }
+
+ if (!txq)
+ return NULL;
+
+ return to_txq_info(txq);
+}
+
static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
{
IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
@@ -1344,7 +1342,7 @@ static struct sk_buff *fq_tin_dequeue_func(struct fq *fq,
local = container_of(fq, struct ieee80211_local, fq);
txqi = container_of(tin, struct txq_info, tin);
cparams = &local->cparams;
- cstats = &local->cstats;
+ cstats = &txqi->cstats;
if (flow == &txqi->def_flow)
cvars = &txqi->def_cvars;
@@ -1404,6 +1402,8 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
fq_tin_init(&txqi->tin);
fq_flow_init(&txqi->def_flow);
codel_vars_init(&txqi->def_cvars);
+ codel_stats_init(&txqi->cstats);
+ __skb_queue_head_init(&txqi->frags);
txqi->txq.vif = &sdata->vif;
@@ -1426,6 +1426,7 @@ void ieee80211_txq_purge(struct ieee80211_local *local,
struct fq_tin *tin = &txqi->tin;
fq_tin_reset(fq, tin, fq_skb_free_func);
+ ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
}
int ieee80211_txq_setup_flows(struct ieee80211_local *local)
@@ -1433,6 +1434,8 @@ int ieee80211_txq_setup_flows(struct ieee80211_local *local)
struct fq *fq = &local->fq;
int ret;
int i;
+ bool supp_vht = false;
+ enum nl80211_band band;
if (!local->ops->wake_tx_queue)
return 0;
@@ -1441,8 +1444,24 @@ int ieee80211_txq_setup_flows(struct ieee80211_local *local)
if (ret)
return ret;
+ /*
+ * If the hardware doesn't support VHT, it is safe to limit the maximum
+ * queue size. 4 Mbytes is 64 max-size aggregates in 802.11n.
+ */
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband;
+
+ sband = local->hw.wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ supp_vht = supp_vht || sband->vht_cap.vht_supported;
+ }
+
+ if (!supp_vht)
+ fq->memory_limit = 4 << 20; /* 4 Mbytes */
+
codel_params_init(&local->cparams);
- codel_stats_init(&local->cstats);
local->cparams.interval = MS2TIME(100);
local->cparams.target = MS2TIME(20);
local->cparams.ecn = true;
@@ -1477,54 +1496,46 @@ void ieee80211_txq_teardown_flows(struct ieee80211_local *local)
spin_unlock_bh(&fq->lock);
}
-struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
- struct ieee80211_txq *txq)
+static bool ieee80211_queue_skb(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ struct sk_buff *skb)
{
- struct ieee80211_local *local = hw_to_local(hw);
- struct txq_info *txqi = container_of(txq, struct txq_info, txq);
- struct ieee80211_hdr *hdr;
- struct sk_buff *skb = NULL;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct fq *fq = &local->fq;
- struct fq_tin *tin = &txqi->tin;
+ struct ieee80211_vif *vif;
+ struct txq_info *txqi;
+ struct ieee80211_sta *pubsta;
- spin_lock_bh(&fq->lock);
+ if (!local->ops->wake_tx_queue ||
+ sdata->vif.type == NL80211_IFTYPE_MONITOR)
+ return false;
- if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags))
- goto out;
+ if (sta && sta->uploaded)
+ pubsta = &sta->sta;
+ else
+ pubsta = NULL;
- skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
- if (!skb)
- goto out;
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
- ieee80211_set_skb_vif(skb, txqi);
+ vif = &sdata->vif;
+ txqi = ieee80211_get_txq(local, vif, pubsta, skb);
- hdr = (struct ieee80211_hdr *)skb->data;
- if (txq->sta && ieee80211_is_data_qos(hdr->frame_control)) {
- struct sta_info *sta = container_of(txq->sta, struct sta_info,
- sta);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ if (!txqi)
+ return false;
- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, txq->tid);
- if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
- info->flags |= IEEE80211_TX_CTL_AMPDU;
- else
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- }
+ info->control.vif = vif;
-out:
+ spin_lock_bh(&fq->lock);
+ ieee80211_txq_enqueue(local, txqi, skb);
spin_unlock_bh(&fq->lock);
- if (skb && skb_has_frag_list(skb) &&
- !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
- if (skb_linearize(skb)) {
- ieee80211_free_txskb(&local->hw, skb);
- return NULL;
- }
- }
+ drv_wake_tx_queue(local, txqi);
- return skb;
+ return true;
}
-EXPORT_SYMBOL(ieee80211_tx_dequeue);
static bool ieee80211_tx_frags(struct ieee80211_local *local,
struct ieee80211_vif *vif,
@@ -1533,9 +1544,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
bool txpending)
{
struct ieee80211_tx_control control = {};
- struct fq *fq = &local->fq;
struct sk_buff *skb, *tmp;
- struct txq_info *txqi;
unsigned long flags;
skb_queue_walk_safe(skbs, skb, tmp) {
@@ -1550,21 +1559,6 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
}
#endif
- txqi = ieee80211_get_txq(local, vif, sta, skb);
- if (txqi) {
- info->control.vif = vif;
-
- __skb_unlink(skb, skbs);
-
- spin_lock_bh(&fq->lock);
- ieee80211_txq_enqueue(local, txqi, skb);
- spin_unlock_bh(&fq->lock);
-
- drv_wake_tx_queue(local, txqi);
-
- continue;
- }
-
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
if (local->queue_stop_reasons[q] ||
(!txpending && !skb_queue_empty(&local->pending[q]))) {
@@ -1648,7 +1642,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
- if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) {
+ if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
vif = &sdata->vif;
break;
}
@@ -1685,10 +1679,13 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
/*
* Invoke TX handlers, return 0 on success and non-zero if the
* frame was dropped or queued.
+ *
+ * The handlers are split into an early and late part. The latter is everything
+ * that can be sensitive to reordering, and will be deferred to after packets
+ * are dequeued from the intermediate queues (when they are enabled).
*/
-static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
+static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
ieee80211_tx_result res = TX_DROP;
#define CALL_TXH(txh) \
@@ -1706,6 +1703,31 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
CALL_TXH(ieee80211_tx_h_rate_ctrl);
+ txh_done:
+ if (unlikely(res == TX_DROP)) {
+ I802_DEBUG_INC(tx->local->tx_handlers_drop);
+ if (tx->skb)
+ ieee80211_free_txskb(&tx->local->hw, tx->skb);
+ else
+ ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
+ return -1;
+ } else if (unlikely(res == TX_QUEUED)) {
+ I802_DEBUG_INC(tx->local->tx_handlers_queued);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Late handlers can be called while the sta lock is held. Handlers that can
+ * cause packets to be generated will cause deadlock!
+ */
+static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+ ieee80211_tx_result res = TX_CONTINUE;
+
if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
__skb_queue_tail(&tx->skbs, tx->skb);
tx->skb = NULL;
@@ -1738,6 +1760,15 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
return 0;
}
+static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
+{
+ int r = invoke_tx_handlers_early(tx);
+
+ if (r)
+ return r;
+ return invoke_tx_handlers_late(tx);
+}
+
bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct sk_buff *skb,
int band, struct ieee80211_sta **sta)
@@ -1812,7 +1843,13 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
info->hw_queue =
sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
- if (!invoke_tx_handlers(&tx))
+ if (invoke_tx_handlers_early(&tx))
+ return false;
+
+ if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
+ return true;
+
+ if (!invoke_tx_handlers_late(&tx))
result = __ieee80211_tx(local, &tx.skbs, led_len,
tx.sta, txpending);
@@ -2268,15 +2305,9 @@ static int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_STATION:
if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
sta = sta_info_get(sdata, skb->data);
- if (sta) {
- bool tdls_peer, tdls_auth;
-
- tdls_peer = test_sta_flag(sta,
- WLAN_STA_TDLS_PEER);
- tdls_auth = test_sta_flag(sta,
- WLAN_STA_TDLS_PEER_AUTH);
-
- if (tdls_peer && tdls_auth) {
+ if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+ if (test_sta_flag(sta,
+ WLAN_STA_TDLS_PEER_AUTH)) {
*sta_out = sta;
return 0;
}
@@ -2288,8 +2319,7 @@ static int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
* after a TDLS sta is removed due to being
* unreachable.
*/
- if (tdls_peer && !tdls_auth &&
- !ieee80211_is_tdls_setup(skb))
+ if (!ieee80211_is_tdls_setup(skb))
return -EINVAL;
}
@@ -2339,7 +2369,6 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
const u8 *encaps_data;
int encaps_len, skip_header_bytes;
- int nh_pos, h_pos;
bool wme_sta = false, authorized = false;
bool tdls_peer;
bool multicast;
@@ -2645,13 +2674,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
encaps_len = 0;
}
- nh_pos = skb_network_header(skb) - skb->data;
- h_pos = skb_transport_header(skb) - skb->data;
-
skb_pull(skb, skip_header_bytes);
- nh_pos -= skip_header_bytes;
- h_pos -= skip_header_bytes;
-
head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
/*
@@ -2677,18 +2700,12 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
}
}
- if (encaps_data) {
+ if (encaps_data)
memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
- nh_pos += encaps_len;
- h_pos += encaps_len;
- }
#ifdef CONFIG_MAC80211_MESH
- if (meshhdrlen > 0) {
+ if (meshhdrlen > 0)
memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
- nh_pos += meshhdrlen;
- h_pos += meshhdrlen;
- }
#endif
if (ieee80211_is_data_qos(fc)) {
@@ -2704,15 +2721,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
} else
memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
- nh_pos += hdrlen;
- h_pos += hdrlen;
-
- /* Update skb pointers to various headers since this modified frame
- * is going to go through Linux networking code that may potentially
- * need things like pointer to IP header. */
skb_reset_mac_header(skb);
- skb_set_network_header(skb, nh_pos);
- skb_set_transport_header(skb, h_pos);
info = IEEE80211_SKB_CB(skb);
memset(info, 0, sizeof(*info));
@@ -3184,8 +3193,71 @@ out:
return ret;
}
+/*
+ * Can be called while the sta lock is held. Anything that can cause packets to
+ * be generated will cause deadlock!
+ */
+static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, u8 pn_offs,
+ struct ieee80211_key *key,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u8 tid = IEEE80211_NUM_TIDS;
+
+ if (key)
+ info->control.hw_key = &key->conf;
+
+ ieee80211_tx_stats(skb->dev, skb->len);
+
+ if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ *ieee80211_get_qos_ctl(hdr) = tid;
+ hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
+ } else {
+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
+ sdata->sequence_number += 0x10;
+ }
+
+ if (skb_shinfo(skb)->gso_size)
+ sta->tx_stats.msdu[tid] +=
+ DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
+ else
+ sta->tx_stats.msdu[tid]++;
+
+ info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
+
+ /* statistics normally done by ieee80211_tx_h_stats (but that
+ * has to consider fragmentation, so is more complex)
+ */
+ sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
+ sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
+
+ if (pn_offs) {
+ u64 pn;
+ u8 *crypto_hdr = skb->data + pn_offs;
+
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ pn = atomic64_inc_return(&key->conf.tx_pn);
+ crypto_hdr[0] = pn;
+ crypto_hdr[1] = pn >> 8;
+ crypto_hdr[4] = pn >> 16;
+ crypto_hdr[5] = pn >> 24;
+ crypto_hdr[6] = pn >> 32;
+ crypto_hdr[7] = pn >> 40;
+ break;
+ }
+ }
+}
+
static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
- struct net_device *dev, struct sta_info *sta,
+ struct sta_info *sta,
struct ieee80211_fast_tx *fast_tx,
struct sk_buff *skb)
{
@@ -3236,8 +3308,6 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
return true;
}
- ieee80211_tx_stats(dev, skb->len + extra_head);
-
if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
return true;
@@ -3266,24 +3336,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
IEEE80211_TX_CTL_DONTFRAG |
(tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
-
- if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
- *ieee80211_get_qos_ctl(hdr) = tid;
- if (!ieee80211_get_txq(local, &sdata->vif, &sta->sta, skb))
- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
- } else {
- info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
- hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
- sdata->sequence_number += 0x10;
- }
-
- if (skb_shinfo(skb)->gso_size)
- sta->tx_stats.msdu[tid] +=
- DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
- else
- sta->tx_stats.msdu[tid]++;
-
- info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
+ info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
__skb_queue_head_init(&tx.skbs);
@@ -3293,9 +3346,6 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
tx.sta = sta;
tx.key = fast_tx->key;
- if (fast_tx->key)
- info->control.hw_key = &fast_tx->key->conf;
-
if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
tx.skb = skb;
r = ieee80211_tx_h_rate_ctrl(&tx);
@@ -3309,31 +3359,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
}
}
- /* statistics normally done by ieee80211_tx_h_stats (but that
- * has to consider fragmentation, so is more complex)
- */
- sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
- sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
-
- if (fast_tx->pn_offs) {
- u64 pn;
- u8 *crypto_hdr = skb->data + fast_tx->pn_offs;
+ if (ieee80211_queue_skb(local, sdata, sta, skb))
+ return true;
- switch (fast_tx->key->conf.cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_CCMP_256:
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- pn = atomic64_inc_return(&fast_tx->key->conf.tx_pn);
- crypto_hdr[0] = pn;
- crypto_hdr[1] = pn >> 8;
- crypto_hdr[4] = pn >> 16;
- crypto_hdr[5] = pn >> 24;
- crypto_hdr[6] = pn >> 32;
- crypto_hdr[7] = pn >> 40;
- break;
- }
- }
+ ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
+ fast_tx->key, skb);
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
@@ -3344,6 +3374,94 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
return true;
}
+struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct txq_info *txqi = container_of(txq, struct txq_info, txq);
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb = NULL;
+ struct fq *fq = &local->fq;
+ struct fq_tin *tin = &txqi->tin;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_tx_data tx;
+ ieee80211_tx_result r;
+
+ spin_lock_bh(&fq->lock);
+
+ if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags))
+ goto out;
+
+ /* Make sure fragments stay together. */
+ skb = __skb_dequeue(&txqi->frags);
+ if (skb)
+ goto out;
+
+begin:
+ skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
+ if (!skb)
+ goto out;
+
+ ieee80211_set_skb_vif(skb, txqi);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ info = IEEE80211_SKB_CB(skb);
+
+ memset(&tx, 0, sizeof(tx));
+ __skb_queue_head_init(&tx.skbs);
+ tx.local = local;
+ tx.skb = skb;
+ tx.sdata = vif_to_sdata(info->control.vif);
+
+ if (txq->sta)
+ tx.sta = container_of(txq->sta, struct sta_info, sta);
+
+ /*
+ * The key can be removed while the packet was queued, so need to call
+ * this here to get the current key.
+ */
+ r = ieee80211_tx_h_select_key(&tx);
+ if (r != TX_CONTINUE) {
+ ieee80211_free_txskb(&local->hw, skb);
+ goto begin;
+ }
+
+ if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
+ struct sta_info *sta = container_of(txq->sta, struct sta_info,
+ sta);
+ u8 pn_offs = 0;
+
+ if (tx.key &&
+ (tx.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))
+ pn_offs = ieee80211_hdrlen(hdr->frame_control);
+
+ ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
+ tx.key, skb);
+ } else {
+ if (invoke_tx_handlers_late(&tx))
+ goto begin;
+
+ skb = __skb_dequeue(&tx.skbs);
+
+ if (!skb_queue_empty(&tx.skbs))
+ skb_queue_splice_tail(&tx.skbs, &txqi->frags);
+ }
+
+ if (skb && skb_has_frag_list(skb) &&
+ !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
+ if (skb_linearize(skb)) {
+ ieee80211_free_txskb(&local->hw, skb);
+ goto begin;
+ }
+ }
+
+out:
+ spin_unlock_bh(&fq->lock);
+
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_tx_dequeue);
+
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,
u32 info_flags)
@@ -3368,7 +3486,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
fast_tx = rcu_dereference(sta->fast_tx);
if (fast_tx &&
- ieee80211_xmit_fast(sdata, dev, sta, fast_tx, skb))
+ ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
goto out;
}
@@ -4395,9 +4513,6 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
int ac = ieee802_1d_to_ac[tid & 7];
skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
skb_set_queue_mapping(skb, ac);
skb->priority = tid;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 42bf0b6685e8..545c79a42a77 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -598,7 +598,7 @@ static void __iterate_interfaces(struct ieee80211_local *local,
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
- if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+ if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
continue;
break;
case NL80211_IFTYPE_AP_VLAN:
@@ -1209,7 +1209,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
}
if (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
- sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
+ sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+ sdata->vif.type != NL80211_IFTYPE_NAN) {
sdata->vif.bss_conf.qos = enable_qos;
if (bss_notify)
ieee80211_bss_info_change_notify(sdata,
@@ -1748,6 +1749,46 @@ static void ieee80211_reconfig_stations(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&local->sta_mtx);
}
+static int ieee80211_reconfig_nan(struct ieee80211_sub_if_data *sdata)
+{
+ struct cfg80211_nan_func *func, **funcs;
+ int res, id, i = 0;
+
+ res = drv_start_nan(sdata->local, sdata,
+ &sdata->u.nan.conf);
+ if (WARN_ON(res))
+ return res;
+
+ funcs = kzalloc((sdata->local->hw.max_nan_de_entries + 1) *
+ sizeof(*funcs), GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ /* Add all the functions:
+ * This is a little bit ugly. We need to call a potentially sleeping
+ * callback for each NAN function, so we can't hold the spinlock.
+ */
+ spin_lock_bh(&sdata->u.nan.func_lock);
+
+ idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, id)
+ funcs[i++] = func;
+
+ spin_unlock_bh(&sdata->u.nan.func_lock);
+
+ for (i = 0; funcs[i]; i++) {
+ res = drv_add_nan_func(sdata->local, sdata, funcs[i]);
+ if (WARN_ON(res))
+ ieee80211_nan_func_terminated(&sdata->vif,
+ funcs[i]->instance_id,
+ NL80211_NAN_FUNC_TERM_REASON_ERROR,
+ GFP_KERNEL);
+ }
+
+ kfree(funcs);
+
+ return 0;
+}
+
int ieee80211_reconfig(struct ieee80211_local *local)
{
struct ieee80211_hw *hw = &local->hw;
@@ -1971,6 +2012,13 @@ int ieee80211_reconfig(struct ieee80211_local *local)
ieee80211_bss_info_change_notify(sdata, changed);
}
break;
+ case NL80211_IFTYPE_NAN:
+ res = ieee80211_reconfig_nan(sdata);
+ if (res < 0) {
+ ieee80211_handle_reconfig_failure(local);
+ return res;
+ }
+ break;
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MONITOR:
@@ -2555,7 +2603,6 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
if (need_basic && basic_rates & BIT(i))
basic = 0x80;
- rate = sband->bitrates[i].bitrate;
rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
5 * (1 << shift));
*pos++ = basic | (u8) rate;
@@ -3394,11 +3441,18 @@ void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
unsigned long *byte_cnt)
{
struct txq_info *txqi = to_txq_info(txq);
+ u32 frag_cnt = 0, frag_bytes = 0;
+ struct sk_buff *skb;
+
+ skb_queue_walk(&txqi->frags, skb) {
+ frag_cnt++;
+ frag_bytes += skb->len;
+ }
if (frame_cnt)
- *frame_cnt = txqi->tin.backlog_packets;
+ *frame_cnt = txqi->tin.backlog_packets + frag_cnt;
if (byte_cnt)
- *byte_cnt = txqi->tin.backlog_bytes;
+ *byte_cnt = txqi->tin.backlog_bytes + frag_bytes;
}
EXPORT_SYMBOL(ieee80211_txq_get_depth);
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 7079cd32a7ad..06019dba4b10 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -663,6 +663,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
/* TODO check this */
SET_NETDEV_DEV(ndev, &local->phy->dev);
+ dev_net_set(ndev, wpan_phy_net(local->hw.phy));
sdata = netdev_priv(ndev);
ndev->ieee802154_ptr = &sdata->wpan_dev;
memcpy(sdata->name, ndev->name, IFNAMSIZ);
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 446e1300383e..4dcf6e18563a 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -101,11 +101,16 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
sdata->dev->stats.rx_bytes += skb->len;
switch (mac_cb(skb)->type) {
+ case IEEE802154_FC_TYPE_BEACON:
+ case IEEE802154_FC_TYPE_ACK:
+ case IEEE802154_FC_TYPE_MAC_CMD:
+ goto fail;
+
case IEEE802154_FC_TYPE_DATA:
return ieee802154_deliver_skb(skb);
default:
- pr_warn("ieee802154: bad frame received (type = %d)\n",
- mac_cb(skb)->type);
+ pr_warn_ratelimited("ieee802154: bad frame received "
+ "(type = %d)\n", mac_cb(skb)->type);
goto fail;
}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 5c161e7759b5..0e4334cbde17 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -961,9 +961,6 @@ static void mpls_ifdown(struct net_device *dev, int event)
RCU_INIT_POINTER(nh->nh_dev, NULL);
} endfor_nexthops(rt);
}
-
-
- return;
}
static void mpls_ifup(struct net_device *dev, unsigned int nh_flags)
@@ -997,8 +994,6 @@ static void mpls_ifup(struct net_device *dev, unsigned int nh_flags)
ACCESS_ONCE(rt->rt_nhn_alive) = alive;
}
-
- return;
}
static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 732a5c17e986..bdfef6c3271a 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -1,9 +1,6 @@
#ifndef MPLS_INTERNAL_H
#define MPLS_INTERNAL_H
-
-struct mpls_shim_hdr {
- __be32 label_stack_entry;
-};
+#include <net/mpls.h>
struct mpls_entry_decoded {
u32 label;
@@ -93,11 +90,6 @@ struct mpls_route { /* next hop label forwarding entry */
#define endfor_nexthops(rt) }
-static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
-{
- return (struct mpls_shim_hdr *)skb_network_header(skb);
-}
-
static inline struct mpls_shim_hdr mpls_entry_encode(u32 label, unsigned ttl, unsigned tc, bool bos)
{
struct mpls_shim_hdr result;
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 2055e57ed1c3..b4da6d8e8632 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -23,32 +23,50 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
+ u16 mac_offset = skb->mac_header;
netdev_features_t mpls_features;
+ u16 mac_len = skb->mac_len;
__be16 mpls_protocol;
+ unsigned int mpls_hlen;
+
+ skb_reset_network_header(skb);
+ mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
+ if (unlikely(!pskb_may_pull(skb, mpls_hlen)))
+ goto out;
/* Setup inner SKB. */
mpls_protocol = skb->protocol;
skb->protocol = skb->inner_protocol;
- /* Push back the mac header that skb_mac_gso_segment() has pulled.
- * It will be re-pulled by the call to skb_mac_gso_segment() below
- */
- __skb_push(skb, skb->mac_len);
+ __skb_pull(skb, mpls_hlen);
+
+ skb->mac_len = 0;
+ skb_reset_mac_header(skb);
/* Segment inner packet. */
mpls_features = skb->dev->mpls_features & features;
segs = skb_mac_gso_segment(skb, mpls_features);
+ if (IS_ERR_OR_NULL(segs)) {
+ skb_gso_error_unwind(skb, mpls_protocol, mpls_hlen, mac_offset,
+ mac_len);
+ goto out;
+ }
+ skb = segs;
+
+ mpls_hlen += mac_len;
+ do {
+ skb->mac_len = mac_len;
+ skb->protocol = mpls_protocol;
+ skb_reset_inner_network_header(skb);
- /* Restore outer protocol. */
- skb->protocol = mpls_protocol;
+ __skb_push(skb, mpls_hlen);
- /* Re-pull the mac header that the call to skb_mac_gso_segment()
- * above pulled. It will be re-pushed after returning
- * skb_mac_gso_segment(), an indirect caller of this function.
- */
- __skb_pull(skb, skb->data - skb_mac_header(skb));
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, mac_len);
+ } while ((skb = skb->next));
+out:
return segs;
}
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 644a8da6d4bd..cf52cf30ac4b 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -37,7 +37,7 @@ static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
return en->labels * sizeof(struct mpls_shim_hdr);
}
-static int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+static int mpls_xmit(struct sk_buff *skb)
{
struct mpls_iptunnel_encap *tun_encap_info;
struct mpls_shim_hdr *hdr;
@@ -90,7 +90,11 @@ static int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
if (skb_cow(skb, hh_len + new_header_size))
goto drop;
+ skb_set_inner_protocol(skb, skb->protocol);
+ skb_reset_inner_network_header(skb);
+
skb_push(skb, new_header_size);
+
skb_reset_network_header(skb);
skb->dev = out_dev;
@@ -115,7 +119,7 @@ static int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
net_dbg_ratelimited("%s: packet transmission failed: %d\n",
__func__, err);
- return 0;
+ return LWTUNNEL_XMIT_DONE;
drop:
kfree_skb(skb);
@@ -153,7 +157,8 @@ static int mpls_build_state(struct net_device *dev, struct nlattr *nla,
if (ret)
goto errout;
newts->type = LWTUNNEL_ENCAP_MPLS;
- newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
+ newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
+ newts->headroom = mpls_encap_size(tun_encap_info);
*ts = newts;
@@ -209,7 +214,7 @@ static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
static const struct lwtunnel_encap_ops mpls_iptun_ops = {
.build_state = mpls_build_state,
- .output = mpls_output,
+ .xmit = mpls_xmit,
.fill_encap = mpls_fill_encap_info,
.get_encap_size = mpls_encap_nlsize,
.cmp_encap = mpls_encap_cmp,
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 33738c060547..13290a70fa71 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -170,6 +170,7 @@ struct ncsi_package;
#define NCSI_PACKAGE_SHIFT 5
#define NCSI_PACKAGE_INDEX(c) (((c) >> NCSI_PACKAGE_SHIFT) & 0x7)
+#define NCSI_RESERVED_CHANNEL 0x1f
#define NCSI_CHANNEL_INDEX(c) ((c) & ((1 << NCSI_PACKAGE_SHIFT) - 1))
#define NCSI_TO_CHANNEL(p, c) (((p) << NCSI_PACKAGE_SHIFT) | (c))
@@ -186,9 +187,15 @@ struct ncsi_channel {
struct ncsi_channel_mode modes[NCSI_MODE_MAX];
struct ncsi_channel_filter *filters[NCSI_FILTER_MAX];
struct ncsi_channel_stats stats;
- struct timer_list timer; /* Link monitor timer */
- bool enabled; /* Timer is enabled */
- unsigned int timeout; /* Times of timeout */
+ struct {
+ struct timer_list timer;
+ bool enabled;
+ unsigned int state;
+#define NCSI_CHANNEL_MONITOR_START 0
+#define NCSI_CHANNEL_MONITOR_RETRY 1
+#define NCSI_CHANNEL_MONITOR_WAIT 2
+#define NCSI_CHANNEL_MONITOR_WAIT_MAX 5
+ } monitor;
struct list_head node;
struct list_head link;
};
@@ -206,7 +213,8 @@ struct ncsi_package {
struct ncsi_request {
unsigned char id; /* Request ID - 0 to 255 */
bool used; /* Request that has been assigned */
- bool driven; /* Drive state machine */
+ unsigned int flags; /* NCSI request property */
+#define NCSI_REQ_FLAG_EVENT_DRIVEN 1
struct ncsi_dev_priv *ndp; /* Associated NCSI device */
struct sk_buff *cmd; /* Associated NCSI command packet */
struct sk_buff *rsp; /* Associated NCSI response packet */
@@ -258,6 +266,7 @@ struct ncsi_dev_priv {
struct list_head packages; /* List of packages */
struct ncsi_request requests[256]; /* Request table */
unsigned int request_id; /* Last used request ID */
+#define NCSI_REQ_START_IDX 1
unsigned int pending_req_num; /* Number of pending requests */
struct ncsi_package *active_package; /* Currently handled package */
struct ncsi_channel *active_channel; /* Currently handled channel */
@@ -274,7 +283,7 @@ struct ncsi_cmd_arg {
unsigned char package; /* Destination package ID */
unsigned char channel; /* Detination channel ID or 0x1f */
unsigned short payload; /* Command packet payload length */
- bool driven; /* Drive the state machine? */
+ unsigned int req_flags; /* NCSI request properties */
union {
unsigned char bytes[16]; /* Command packet specific data */
unsigned short words[8];
@@ -313,7 +322,8 @@ void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
unsigned char id,
struct ncsi_package **np,
struct ncsi_channel **nc);
-struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp, bool driven);
+struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
+ unsigned int req_flags);
void ncsi_free_request(struct ncsi_request *nr);
struct ncsi_dev *ncsi_find_dev(struct net_device *dev);
int ncsi_process_next_channel(struct ncsi_dev_priv *ndp);
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index d463468442ae..b41a6617d498 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -53,7 +53,9 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
struct ncsi_aen_lsc_pkt *lsc;
struct ncsi_channel *nc;
struct ncsi_channel_mode *ncm;
- unsigned long old_data;
+ bool chained;
+ int state;
+ unsigned long old_data, data;
unsigned long flags;
/* Find the NCSI channel */
@@ -62,20 +64,27 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
return -ENODEV;
/* Update the link status */
- ncm = &nc->modes[NCSI_MODE_LINK];
lsc = (struct ncsi_aen_lsc_pkt *)h;
+
+ spin_lock_irqsave(&nc->lock, flags);
+ ncm = &nc->modes[NCSI_MODE_LINK];
old_data = ncm->data[2];
- ncm->data[2] = ntohl(lsc->status);
+ data = ntohl(lsc->status);
+ ncm->data[2] = data;
ncm->data[4] = ntohl(lsc->oem_status);
- if (!((old_data ^ ncm->data[2]) & 0x1) ||
- !list_empty(&nc->link))
+
+ chained = !list_empty(&nc->link);
+ state = nc->state;
+ spin_unlock_irqrestore(&nc->lock, flags);
+
+ if (!((old_data ^ data) & 0x1) || chained)
return 0;
- if (!(nc->state == NCSI_CHANNEL_INACTIVE && (ncm->data[2] & 0x1)) &&
- !(nc->state == NCSI_CHANNEL_ACTIVE && !(ncm->data[2] & 0x1)))
+ if (!(state == NCSI_CHANNEL_INACTIVE && (data & 0x1)) &&
+ !(state == NCSI_CHANNEL_ACTIVE && !(data & 0x1)))
return 0;
if (!(ndp->flags & NCSI_DEV_HWA) &&
- nc->state == NCSI_CHANNEL_ACTIVE)
+ state == NCSI_CHANNEL_ACTIVE)
ndp->flags |= NCSI_DEV_RESHUFFLE;
ncsi_stop_channel_monitor(nc);
@@ -97,13 +106,21 @@ static int ncsi_aen_handler_cr(struct ncsi_dev_priv *ndp,
if (!nc)
return -ENODEV;
+ spin_lock_irqsave(&nc->lock, flags);
if (!list_empty(&nc->link) ||
- nc->state != NCSI_CHANNEL_ACTIVE)
+ nc->state != NCSI_CHANNEL_ACTIVE) {
+ spin_unlock_irqrestore(&nc->lock, flags);
return 0;
+ }
+ spin_unlock_irqrestore(&nc->lock, flags);
ncsi_stop_channel_monitor(nc);
+ spin_lock_irqsave(&nc->lock, flags);
+ nc->state = NCSI_CHANNEL_INVISIBLE;
+ spin_unlock_irqrestore(&nc->lock, flags);
+
spin_lock_irqsave(&ndp->lock, flags);
- xchg(&nc->state, NCSI_CHANNEL_INACTIVE);
+ nc->state = NCSI_CHANNEL_INACTIVE;
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index 21057a8ceeac..db7083bfd476 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -272,7 +272,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
struct sk_buff *skb;
struct ncsi_request *nr;
- nr = ncsi_alloc_request(ndp, nca->driven);
+ nr = ncsi_alloc_request(ndp, nca->req_flags);
if (!nr)
return NULL;
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index ef017b871857..5e509e547c2d 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -132,6 +132,7 @@ static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
struct ncsi_dev *nd = &ndp->ndev;
struct ncsi_package *np;
struct ncsi_channel *nc;
+ unsigned long flags;
nd->state = ncsi_dev_state_functional;
if (force_down) {
@@ -142,14 +143,21 @@ static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
nd->link_up = 0;
NCSI_FOR_EACH_PACKAGE(ndp, np) {
NCSI_FOR_EACH_CHANNEL(np, nc) {
+ spin_lock_irqsave(&nc->lock, flags);
+
if (!list_empty(&nc->link) ||
- nc->state != NCSI_CHANNEL_ACTIVE)
+ nc->state != NCSI_CHANNEL_ACTIVE) {
+ spin_unlock_irqrestore(&nc->lock, flags);
continue;
+ }
if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
+ spin_unlock_irqrestore(&nc->lock, flags);
nd->link_up = 1;
goto report;
}
+
+ spin_unlock_irqrestore(&nc->lock, flags);
}
}
@@ -163,43 +171,55 @@ static void ncsi_channel_monitor(unsigned long data)
struct ncsi_package *np = nc->package;
struct ncsi_dev_priv *ndp = np->ndp;
struct ncsi_cmd_arg nca;
- bool enabled;
- unsigned int timeout;
+ bool enabled, chained;
+ unsigned int monitor_state;
unsigned long flags;
- int ret;
+ int state, ret;
spin_lock_irqsave(&nc->lock, flags);
- timeout = nc->timeout;
- enabled = nc->enabled;
+ state = nc->state;
+ chained = !list_empty(&nc->link);
+ enabled = nc->monitor.enabled;
+ monitor_state = nc->monitor.state;
spin_unlock_irqrestore(&nc->lock, flags);
- if (!enabled || !list_empty(&nc->link))
+ if (!enabled || chained)
return;
- if (nc->state != NCSI_CHANNEL_INACTIVE &&
- nc->state != NCSI_CHANNEL_ACTIVE)
+ if (state != NCSI_CHANNEL_INACTIVE &&
+ state != NCSI_CHANNEL_ACTIVE)
return;
- if (!(timeout % 2)) {
+ switch (monitor_state) {
+ case NCSI_CHANNEL_MONITOR_START:
+ case NCSI_CHANNEL_MONITOR_RETRY:
nca.ndp = ndp;
nca.package = np->id;
nca.channel = nc->id;
nca.type = NCSI_PKT_CMD_GLS;
- nca.driven = false;
+ nca.req_flags = 0;
ret = ncsi_xmit_cmd(&nca);
if (ret) {
netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
ret);
return;
}
- }
- if (timeout + 1 >= 3) {
+ break;
+ case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
+ break;
+ default:
if (!(ndp->flags & NCSI_DEV_HWA) &&
- nc->state == NCSI_CHANNEL_ACTIVE)
+ state == NCSI_CHANNEL_ACTIVE) {
ncsi_report_link(ndp, true);
+ ndp->flags |= NCSI_DEV_RESHUFFLE;
+ }
+
+ spin_lock_irqsave(&nc->lock, flags);
+ nc->state = NCSI_CHANNEL_INVISIBLE;
+ spin_unlock_irqrestore(&nc->lock, flags);
spin_lock_irqsave(&ndp->lock, flags);
- xchg(&nc->state, NCSI_CHANNEL_INACTIVE);
+ nc->state = NCSI_CHANNEL_INACTIVE;
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
ncsi_process_next_channel(ndp);
@@ -207,10 +227,9 @@ static void ncsi_channel_monitor(unsigned long data)
}
spin_lock_irqsave(&nc->lock, flags);
- nc->timeout = timeout + 1;
- nc->enabled = true;
+ nc->monitor.state++;
spin_unlock_irqrestore(&nc->lock, flags);
- mod_timer(&nc->timer, jiffies + HZ * (1 << (nc->timeout / 2)));
+ mod_timer(&nc->monitor.timer, jiffies + HZ);
}
void ncsi_start_channel_monitor(struct ncsi_channel *nc)
@@ -218,12 +237,12 @@ void ncsi_start_channel_monitor(struct ncsi_channel *nc)
unsigned long flags;
spin_lock_irqsave(&nc->lock, flags);
- WARN_ON_ONCE(nc->enabled);
- nc->timeout = 0;
- nc->enabled = true;
+ WARN_ON_ONCE(nc->monitor.enabled);
+ nc->monitor.enabled = true;
+ nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
spin_unlock_irqrestore(&nc->lock, flags);
- mod_timer(&nc->timer, jiffies + HZ * (1 << (nc->timeout / 2)));
+ mod_timer(&nc->monitor.timer, jiffies + HZ);
}
void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
@@ -231,14 +250,14 @@ void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
unsigned long flags;
spin_lock_irqsave(&nc->lock, flags);
- if (!nc->enabled) {
+ if (!nc->monitor.enabled) {
spin_unlock_irqrestore(&nc->lock, flags);
return;
}
- nc->enabled = false;
+ nc->monitor.enabled = false;
spin_unlock_irqrestore(&nc->lock, flags);
- del_timer_sync(&nc->timer);
+ del_timer_sync(&nc->monitor.timer);
}
struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
@@ -267,8 +286,9 @@ struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
nc->id = id;
nc->package = np;
nc->state = NCSI_CHANNEL_INACTIVE;
- nc->enabled = false;
- setup_timer(&nc->timer, ncsi_channel_monitor, (unsigned long)nc);
+ nc->monitor.enabled = false;
+ setup_timer(&nc->monitor.timer,
+ ncsi_channel_monitor, (unsigned long)nc);
spin_lock_init(&nc->lock);
INIT_LIST_HEAD(&nc->link);
for (index = 0; index < NCSI_CAP_MAX; index++)
@@ -405,7 +425,8 @@ void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
* be same. Otherwise, the bogus response might be replied. So
* the available IDs are allocated in round-robin fashion.
*/
-struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp, bool driven)
+struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
+ unsigned int req_flags)
{
struct ncsi_request *nr = NULL;
int i, limit = ARRAY_SIZE(ndp->requests);
@@ -413,30 +434,31 @@ struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp, bool driven)
/* Check if there is one available request until the ceiling */
spin_lock_irqsave(&ndp->lock, flags);
- for (i = ndp->request_id; !nr && i < limit; i++) {
+ for (i = ndp->request_id; i < limit; i++) {
if (ndp->requests[i].used)
continue;
nr = &ndp->requests[i];
nr->used = true;
- nr->driven = driven;
- if (++ndp->request_id >= limit)
- ndp->request_id = 0;
+ nr->flags = req_flags;
+ ndp->request_id = i + 1;
+ goto found;
}
/* Fail back to check from the starting cursor */
- for (i = 0; !nr && i < ndp->request_id; i++) {
+ for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
if (ndp->requests[i].used)
continue;
nr = &ndp->requests[i];
nr->used = true;
- nr->driven = driven;
- if (++ndp->request_id >= limit)
- ndp->request_id = 0;
+ nr->flags = req_flags;
+ ndp->request_id = i + 1;
+ goto found;
}
- spin_unlock_irqrestore(&ndp->lock, flags);
+found:
+ spin_unlock_irqrestore(&ndp->lock, flags);
return nr;
}
@@ -458,7 +480,7 @@ void ncsi_free_request(struct ncsi_request *nr)
nr->cmd = NULL;
nr->rsp = NULL;
nr->used = false;
- driven = nr->driven;
+ driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
spin_unlock_irqrestore(&ndp->lock, flags);
if (driven && cmd && --ndp->pending_req_num == 0)
@@ -508,10 +530,11 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
struct ncsi_package *np = ndp->active_package;
struct ncsi_channel *nc = ndp->active_channel;
struct ncsi_cmd_arg nca;
+ unsigned long flags;
int ret;
nca.ndp = ndp;
- nca.driven = true;
+ nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
switch (nd->state) {
case ncsi_dev_state_suspend:
nd->state = ncsi_dev_state_suspend_select;
@@ -527,7 +550,7 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
nca.package = np->id;
if (nd->state == ncsi_dev_state_suspend_select) {
nca.type = NCSI_PKT_CMD_SP;
- nca.channel = 0x1f;
+ nca.channel = NCSI_RESERVED_CHANNEL;
if (ndp->flags & NCSI_DEV_HWA)
nca.bytes[0] = 0;
else
@@ -544,7 +567,7 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_suspend_deselect;
} else if (nd->state == ncsi_dev_state_suspend_deselect) {
nca.type = NCSI_PKT_CMD_DP;
- nca.channel = 0x1f;
+ nca.channel = NCSI_RESERVED_CHANNEL;
nd->state = ncsi_dev_state_suspend_done;
}
@@ -556,7 +579,9 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
break;
case ncsi_dev_state_suspend_done:
- xchg(&nc->state, NCSI_CHANNEL_INACTIVE);
+ spin_lock_irqsave(&nc->lock, flags);
+ nc->state = NCSI_CHANNEL_INACTIVE;
+ spin_unlock_irqrestore(&nc->lock, flags);
ncsi_process_next_channel(ndp);
break;
@@ -574,10 +599,11 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
struct ncsi_channel *nc = ndp->active_channel;
struct ncsi_cmd_arg nca;
unsigned char index;
+ unsigned long flags;
int ret;
nca.ndp = ndp;
- nca.driven = true;
+ nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
switch (nd->state) {
case ncsi_dev_state_config:
case ncsi_dev_state_config_sp:
@@ -590,7 +616,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
else
nca.bytes[0] = 1;
nca.package = np->id;
- nca.channel = 0x1f;
+ nca.channel = NCSI_RESERVED_CHANNEL;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
@@ -675,10 +701,12 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
goto error;
break;
case ncsi_dev_state_config_done:
+ spin_lock_irqsave(&nc->lock, flags);
if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1)
- xchg(&nc->state, NCSI_CHANNEL_ACTIVE);
+ nc->state = NCSI_CHANNEL_ACTIVE;
else
- xchg(&nc->state, NCSI_CHANNEL_INACTIVE);
+ nc->state = NCSI_CHANNEL_INACTIVE;
+ spin_unlock_irqrestore(&nc->lock, flags);
ncsi_start_channel_monitor(nc);
ncsi_process_next_channel(ndp);
@@ -707,18 +735,25 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
found = NULL;
NCSI_FOR_EACH_PACKAGE(ndp, np) {
NCSI_FOR_EACH_CHANNEL(np, nc) {
+ spin_lock_irqsave(&nc->lock, flags);
+
if (!list_empty(&nc->link) ||
- nc->state != NCSI_CHANNEL_INACTIVE)
+ nc->state != NCSI_CHANNEL_INACTIVE) {
+ spin_unlock_irqrestore(&nc->lock, flags);
continue;
+ }
if (!found)
found = nc;
ncm = &nc->modes[NCSI_MODE_LINK];
if (ncm->data[2] & 0x1) {
+ spin_unlock_irqrestore(&nc->lock, flags);
found = nc;
goto out;
}
+
+ spin_unlock_irqrestore(&nc->lock, flags);
}
}
@@ -797,7 +832,7 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
int ret;
nca.ndp = ndp;
- nca.driven = true;
+ nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
switch (nd->state) {
case ncsi_dev_state_probe:
nd->state = ncsi_dev_state_probe_deselect;
@@ -807,7 +842,7 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
/* Deselect all possible packages */
nca.type = NCSI_PKT_CMD_DP;
- nca.channel = 0x1f;
+ nca.channel = NCSI_RESERVED_CHANNEL;
for (index = 0; index < 8; index++) {
nca.package = index;
ret = ncsi_xmit_cmd(&nca);
@@ -823,7 +858,7 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
/* Select all possible packages */
nca.type = NCSI_PKT_CMD_SP;
nca.bytes[0] = 1;
- nca.channel = 0x1f;
+ nca.channel = NCSI_RESERVED_CHANNEL;
for (index = 0; index < 8; index++) {
nca.package = index;
ret = ncsi_xmit_cmd(&nca);
@@ -876,7 +911,7 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nca.type = NCSI_PKT_CMD_SP;
nca.bytes[0] = 1;
nca.package = ndp->active_package->id;
- nca.channel = 0x1f;
+ nca.channel = NCSI_RESERVED_CHANNEL;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
@@ -884,12 +919,12 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_cis;
break;
case ncsi_dev_state_probe_cis:
- ndp->pending_req_num = 32;
+ ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
/* Clear initial state */
nca.type = NCSI_PKT_CMD_CIS;
nca.package = ndp->active_package->id;
- for (index = 0; index < 0x20; index++) {
+ for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
nca.channel = index;
ret = ncsi_xmit_cmd(&nca);
if (ret)
@@ -933,7 +968,7 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
/* Deselect the active package */
nca.type = NCSI_PKT_CMD_DP;
nca.package = ndp->active_package->id;
- nca.channel = 0x1f;
+ nca.channel = NCSI_RESERVED_CHANNEL;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
@@ -987,11 +1022,14 @@ int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
goto out;
}
- old_state = xchg(&nc->state, NCSI_CHANNEL_INVISIBLE);
list_del_init(&nc->link);
-
spin_unlock_irqrestore(&ndp->lock, flags);
+ spin_lock_irqsave(&nc->lock, flags);
+ old_state = nc->state;
+ nc->state = NCSI_CHANNEL_INVISIBLE;
+ spin_unlock_irqrestore(&nc->lock, flags);
+
ndp->active_channel = nc;
ndp->active_package = nc->package;
@@ -1006,7 +1044,7 @@ int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
break;
default:
netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
- nc->state, nc->package->id, nc->id);
+ old_state, nc->package->id, nc->id);
ncsi_report_link(ndp, false);
return -EINVAL;
}
@@ -1070,7 +1108,7 @@ static int ncsi_inet6addr_event(struct notifier_block *this,
return NOTIFY_OK;
nca.ndp = ndp;
- nca.driven = false;
+ nca.req_flags = 0;
nca.package = np->id;
nca.channel = nc->id;
nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
@@ -1118,7 +1156,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
/* Initialize private NCSI device */
spin_lock_init(&ndp->lock);
INIT_LIST_HEAD(&ndp->packages);
- ndp->request_id = 0;
+ ndp->request_id = NCSI_REQ_START_IDX;
for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
ndp->requests[i].id = i;
ndp->requests[i].ndp = ndp;
@@ -1149,9 +1187,7 @@ EXPORT_SYMBOL_GPL(ncsi_register_dev);
int ncsi_start_dev(struct ncsi_dev *nd)
{
struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
- struct ncsi_package *np;
- struct ncsi_channel *nc;
- int old_state, ret;
+ int ret;
if (nd->state != ncsi_dev_state_registered &&
nd->state != ncsi_dev_state_functional)
@@ -1163,15 +1199,6 @@ int ncsi_start_dev(struct ncsi_dev *nd)
return 0;
}
- /* Reset channel's state and start over */
- NCSI_FOR_EACH_PACKAGE(ndp, np) {
- NCSI_FOR_EACH_CHANNEL(np, nc) {
- old_state = xchg(&nc->state, NCSI_CHANNEL_INACTIVE);
- WARN_ON_ONCE(!list_empty(&nc->link) ||
- old_state == NCSI_CHANNEL_INVISIBLE);
- }
- }
-
if (ndp->flags & NCSI_DEV_HWA)
ret = ncsi_enable_hwa(ndp);
else
@@ -1181,6 +1208,35 @@ int ncsi_start_dev(struct ncsi_dev *nd)
}
EXPORT_SYMBOL_GPL(ncsi_start_dev);
+void ncsi_stop_dev(struct ncsi_dev *nd)
+{
+ struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
+ struct ncsi_package *np;
+ struct ncsi_channel *nc;
+ bool chained;
+ int old_state;
+ unsigned long flags;
+
+ /* Stop the channel monitor and reset channel's state */
+ NCSI_FOR_EACH_PACKAGE(ndp, np) {
+ NCSI_FOR_EACH_CHANNEL(np, nc) {
+ ncsi_stop_channel_monitor(nc);
+
+ spin_lock_irqsave(&nc->lock, flags);
+ chained = !list_empty(&nc->link);
+ old_state = nc->state;
+ nc->state = NCSI_CHANNEL_INACTIVE;
+ spin_unlock_irqrestore(&nc->lock, flags);
+
+ WARN_ON_ONCE(chained ||
+ old_state == NCSI_CHANNEL_INVISIBLE);
+ }
+ }
+
+ ncsi_report_link(ndp, true);
+}
+EXPORT_SYMBOL_GPL(ncsi_stop_dev);
+
void ncsi_unregister_dev(struct ncsi_dev *nd)
{
struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index af84389a6bf1..087db775b3dc 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -317,12 +317,12 @@ static int ncsi_rsp_handler_gls(struct ncsi_request *nr)
ncm->data[3] = ntohl(rsp->other);
ncm->data[4] = ntohl(rsp->oem_status);
- if (nr->driven)
+ if (nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN)
return 0;
/* Reset the channel monitor if it has been enabled */
spin_lock_irqsave(&nc->lock, flags);
- nc->timeout = 0;
+ nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
spin_unlock_irqrestore(&nc->lock, flags);
return 0;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 9266ceebd112..e8d56d9a4df2 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -474,6 +474,12 @@ config NFT_META
This option adds the "meta" expression that you can use to match and
to set packet metainformation such as the packet mark.
+config NFT_NUMGEN
+ tristate "Netfilter nf_tables number generator module"
+ help
+ This option adds the number generator expression used to perform
+ incremental counting and random numbers bound to a upper limit.
+
config NFT_CT
depends on NF_CONNTRACK
tristate "Netfilter nf_tables conntrack module"
@@ -481,13 +487,13 @@ config NFT_CT
This option adds the "meta" expression that you can use to match
connection tracking information such as the flow state.
-config NFT_RBTREE
+config NFT_SET_RBTREE
tristate "Netfilter nf_tables rbtree set module"
help
This option adds the "rbtree" set type (Red Black tree) that is used
to build interval-based sets.
-config NFT_HASH
+config NFT_SET_HASH
tristate "Netfilter nf_tables hash set module"
help
This option adds the "hash" set type that is used to build one-way
@@ -542,6 +548,12 @@ config NFT_QUEUE
This is required if you intend to use the userspace queueing
infrastructure (also known as NFQUEUE) from nftables.
+config NFT_QUOTA
+ tristate "Netfilter nf_tables quota module"
+ help
+ This option adds the "quota" expression that you can use to match
+ enforce bytes quotas.
+
config NFT_REJECT
default m if NETFILTER_ADVANCED=n
tristate "Netfilter nf_tables reject support"
@@ -563,6 +575,12 @@ config NFT_COMPAT
x_tables match/target extensions over the nf_tables
framework.
+config NFT_HASH
+ tristate "Netfilter nf_tables hash module"
+ help
+ This option adds the "hash" expression that you can use to perform
+ a hash operation on registers.
+
if NF_TABLES_NETDEV
config NF_DUP_NETDEV
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 69134541d65b..c23c3c84416f 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -71,8 +71,9 @@ obj-$(CONFIG_NF_DUP_NETDEV) += nf_dup_netdev.o
# nf_tables
nf_tables-objs += nf_tables_core.o nf_tables_api.o nf_tables_trace.o
-nf_tables-objs += nft_immediate.o nft_cmp.o nft_lookup.o nft_dynset.o
+nf_tables-objs += nft_immediate.o nft_cmp.o nft_range.o
nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
+nf_tables-objs += nft_lookup.o nft_dynset.o
obj-$(CONFIG_NF_TABLES) += nf_tables.o
obj-$(CONFIG_NF_TABLES_INET) += nf_tables_inet.o
@@ -80,18 +81,21 @@ obj-$(CONFIG_NF_TABLES_NETDEV) += nf_tables_netdev.o
obj-$(CONFIG_NFT_COMPAT) += nft_compat.o
obj-$(CONFIG_NFT_EXTHDR) += nft_exthdr.o
obj-$(CONFIG_NFT_META) += nft_meta.o
+obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o
obj-$(CONFIG_NFT_CT) += nft_ct.o
obj-$(CONFIG_NFT_LIMIT) += nft_limit.o
obj-$(CONFIG_NFT_NAT) += nft_nat.o
obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
+obj-$(CONFIG_NFT_QUOTA) += nft_quota.o
obj-$(CONFIG_NFT_REJECT) += nft_reject.o
obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
-obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o
-obj-$(CONFIG_NFT_HASH) += nft_hash.o
+obj-$(CONFIG_NFT_SET_RBTREE) += nft_set_rbtree.o
+obj-$(CONFIG_NFT_SET_HASH) += nft_set_hash.o
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
obj-$(CONFIG_NFT_LOG) += nft_log.o
obj-$(CONFIG_NFT_MASQ) += nft_masq.o
obj-$(CONFIG_NFT_REDIR) += nft_redir.o
+obj-$(CONFIG_NFT_HASH) += nft_hash.o
# nf_tables netdev
obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index f39276d1c2d7..c9d90eb64046 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -22,6 +22,7 @@
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -61,33 +62,62 @@ EXPORT_SYMBOL(nf_hooks_needed);
#endif
static DEFINE_MUTEX(nf_hook_mutex);
+#define nf_entry_dereference(e) \
+ rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
-static struct list_head *nf_find_hook_list(struct net *net,
- const struct nf_hook_ops *reg)
+static struct nf_hook_entry *nf_hook_entry_head(struct net *net,
+ const struct nf_hook_ops *reg)
{
- struct list_head *hook_list = NULL;
+ struct nf_hook_entry *hook_head = NULL;
if (reg->pf != NFPROTO_NETDEV)
- hook_list = &net->nf.hooks[reg->pf][reg->hooknum];
+ hook_head = nf_entry_dereference(net->nf.hooks[reg->pf]
+ [reg->hooknum]);
else if (reg->hooknum == NF_NETDEV_INGRESS) {
#ifdef CONFIG_NETFILTER_INGRESS
if (reg->dev && dev_net(reg->dev) == net)
- hook_list = &reg->dev->nf_hooks_ingress;
+ hook_head =
+ nf_entry_dereference(
+ reg->dev->nf_hooks_ingress);
#endif
}
- return hook_list;
+ return hook_head;
}
-struct nf_hook_entry {
- const struct nf_hook_ops *orig_ops;
- struct nf_hook_ops ops;
-};
+/* must hold nf_hook_mutex */
+static void nf_set_hooks_head(struct net *net, const struct nf_hook_ops *reg,
+ struct nf_hook_entry *entry)
+{
+ switch (reg->pf) {
+ case NFPROTO_NETDEV:
+#ifdef CONFIG_NETFILTER_INGRESS
+ /* We already checked in nf_register_net_hook() that this is
+ * used from ingress.
+ */
+ rcu_assign_pointer(reg->dev->nf_hooks_ingress, entry);
+#endif
+ break;
+ default:
+ rcu_assign_pointer(net->nf.hooks[reg->pf][reg->hooknum],
+ entry);
+ break;
+ }
+}
int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
- struct list_head *hook_list;
+ struct nf_hook_entry *hooks_entry;
struct nf_hook_entry *entry;
- struct nf_hook_ops *elem;
+
+ if (reg->pf == NFPROTO_NETDEV) {
+#ifndef CONFIG_NETFILTER_INGRESS
+ if (reg->hooknum == NF_NETDEV_INGRESS)
+ return -EOPNOTSUPP;
+#endif
+ if (reg->hooknum != NF_NETDEV_INGRESS ||
+ !reg->dev || dev_net(reg->dev) != net)
+ return -EINVAL;
+ }
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
@@ -95,19 +125,30 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
entry->orig_ops = reg;
entry->ops = *reg;
+ entry->next = NULL;
+
+ mutex_lock(&nf_hook_mutex);
+ hooks_entry = nf_hook_entry_head(net, reg);
- hook_list = nf_find_hook_list(net, reg);
- if (!hook_list) {
- kfree(entry);
- return -ENOENT;
+ if (hooks_entry && hooks_entry->orig_ops->priority > reg->priority) {
+ /* This is the case where we need to insert at the head */
+ entry->next = hooks_entry;
+ hooks_entry = NULL;
}
- mutex_lock(&nf_hook_mutex);
- list_for_each_entry(elem, hook_list, list) {
- if (reg->priority < elem->priority)
- break;
+ while (hooks_entry &&
+ reg->priority >= hooks_entry->orig_ops->priority &&
+ nf_entry_dereference(hooks_entry->next)) {
+ hooks_entry = nf_entry_dereference(hooks_entry->next);
}
- list_add_rcu(&entry->ops.list, elem->list.prev);
+
+ if (hooks_entry) {
+ entry->next = nf_entry_dereference(hooks_entry->next);
+ rcu_assign_pointer(hooks_entry->next, entry);
+ } else {
+ nf_set_hooks_head(net, reg, entry);
+ }
+
mutex_unlock(&nf_hook_mutex);
#ifdef CONFIG_NETFILTER_INGRESS
if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
@@ -122,24 +163,33 @@ EXPORT_SYMBOL(nf_register_net_hook);
void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
- struct list_head *hook_list;
- struct nf_hook_entry *entry;
- struct nf_hook_ops *elem;
-
- hook_list = nf_find_hook_list(net, reg);
- if (!hook_list)
- return;
+ struct nf_hook_entry *hooks_entry;
mutex_lock(&nf_hook_mutex);
- list_for_each_entry(elem, hook_list, list) {
- entry = container_of(elem, struct nf_hook_entry, ops);
- if (entry->orig_ops == reg) {
- list_del_rcu(&entry->ops.list);
- break;
+ hooks_entry = nf_hook_entry_head(net, reg);
+ if (hooks_entry && hooks_entry->orig_ops == reg) {
+ nf_set_hooks_head(net, reg,
+ nf_entry_dereference(hooks_entry->next));
+ goto unlock;
+ }
+ while (hooks_entry && nf_entry_dereference(hooks_entry->next)) {
+ struct nf_hook_entry *next =
+ nf_entry_dereference(hooks_entry->next);
+ struct nf_hook_entry *nnext;
+
+ if (next->orig_ops != reg) {
+ hooks_entry = next;
+ continue;
}
+ nnext = nf_entry_dereference(next->next);
+ rcu_assign_pointer(hooks_entry->next, nnext);
+ hooks_entry = next;
+ break;
}
+
+unlock:
mutex_unlock(&nf_hook_mutex);
- if (&elem->list == hook_list) {
+ if (!hooks_entry) {
WARN(1, "nf_unregister_net_hook: hook not found!\n");
return;
}
@@ -151,10 +201,10 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
synchronize_net();
- nf_queue_nf_hook_drop(net, &entry->ops);
+ nf_queue_nf_hook_drop(net, hooks_entry);
/* other cpu might still process nfqueue verdict that used reg */
synchronize_net();
- kfree(entry);
+ kfree(hooks_entry);
}
EXPORT_SYMBOL(nf_unregister_net_hook);
@@ -188,19 +238,17 @@ EXPORT_SYMBOL(nf_unregister_net_hooks);
static LIST_HEAD(nf_hook_list);
-int nf_register_hook(struct nf_hook_ops *reg)
+static int _nf_register_hook(struct nf_hook_ops *reg)
{
struct net *net, *last;
int ret;
- rtnl_lock();
for_each_net(net) {
ret = nf_register_net_hook(net, reg);
if (ret && ret != -ENOENT)
goto rollback;
}
list_add_tail(&reg->list, &nf_hook_list);
- rtnl_unlock();
return 0;
rollback:
@@ -210,19 +258,34 @@ rollback:
break;
nf_unregister_net_hook(net, reg);
}
+ return ret;
+}
+
+int nf_register_hook(struct nf_hook_ops *reg)
+{
+ int ret;
+
+ rtnl_lock();
+ ret = _nf_register_hook(reg);
rtnl_unlock();
+
return ret;
}
EXPORT_SYMBOL(nf_register_hook);
-void nf_unregister_hook(struct nf_hook_ops *reg)
+static void _nf_unregister_hook(struct nf_hook_ops *reg)
{
struct net *net;
- rtnl_lock();
list_del(&reg->list);
for_each_net(net)
nf_unregister_net_hook(net, reg);
+}
+
+void nf_unregister_hook(struct nf_hook_ops *reg)
+{
+ rtnl_lock();
+ _nf_unregister_hook(reg);
rtnl_unlock();
}
EXPORT_SYMBOL(nf_unregister_hook);
@@ -246,6 +309,26 @@ err:
}
EXPORT_SYMBOL(nf_register_hooks);
+/* Caller MUST take rtnl_lock() */
+int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = 0; i < n; i++) {
+ err = _nf_register_hook(&reg[i]);
+ if (err)
+ goto err;
+ }
+ return err;
+
+err:
+ if (i > 0)
+ _nf_unregister_hooks(reg, i);
+ return err;
+}
+EXPORT_SYMBOL(_nf_register_hooks);
+
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
{
while (n-- > 0)
@@ -253,10 +336,17 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
}
EXPORT_SYMBOL(nf_unregister_hooks);
-unsigned int nf_iterate(struct list_head *head,
- struct sk_buff *skb,
+/* Caller MUST take rtnl_lock */
+void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
+{
+ while (n-- > 0)
+ _nf_unregister_hook(&reg[n]);
+}
+EXPORT_SYMBOL(_nf_unregister_hooks);
+
+unsigned int nf_iterate(struct sk_buff *skb,
struct nf_hook_state *state,
- struct nf_hook_ops **elemp)
+ struct nf_hook_entry **entryp)
{
unsigned int verdict;
@@ -264,20 +354,23 @@ unsigned int nf_iterate(struct list_head *head,
* The caller must not block between calls to this
* function because of risk of continuing from deleted element.
*/
- list_for_each_entry_continue_rcu((*elemp), head, list) {
- if (state->thresh > (*elemp)->priority)
+ while (*entryp) {
+ if (state->thresh > (*entryp)->ops.priority) {
+ *entryp = rcu_dereference((*entryp)->next);
continue;
+ }
/* Optimization: we don't need to hold module
reference here, since function can't sleep. --RR */
repeat:
- verdict = (*elemp)->hook((*elemp)->priv, skb, state);
+ verdict = (*entryp)->ops.hook((*entryp)->ops.priv, skb, state);
if (verdict != NF_ACCEPT) {
#ifdef CONFIG_NETFILTER_DEBUG
if (unlikely((verdict & NF_VERDICT_MASK)
> NF_MAX_VERDICT)) {
NFDEBUG("Evil return from %p(%u).\n",
- (*elemp)->hook, state->hook);
+ (*entryp)->ops.hook, state->hook);
+ *entryp = rcu_dereference((*entryp)->next);
continue;
}
#endif
@@ -285,25 +378,23 @@ repeat:
return verdict;
goto repeat;
}
+ *entryp = rcu_dereference((*entryp)->next);
}
return NF_ACCEPT;
}
/* Returns 1 if okfn() needs to be executed by the caller,
- * -EPERM for NF_DROP, 0 otherwise. */
+ * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
{
- struct nf_hook_ops *elem;
+ struct nf_hook_entry *entry;
unsigned int verdict;
int ret = 0;
- /* We may already have this, but read-locks nest anyway */
- rcu_read_lock();
-
- elem = list_entry_rcu(state->hook_list, struct nf_hook_ops, list);
+ entry = rcu_dereference(state->hook_entries);
next_hook:
- verdict = nf_iterate(state->hook_list, skb, state, &elem);
+ verdict = nf_iterate(skb, state, &entry);
if (verdict == NF_ACCEPT || verdict == NF_STOP) {
ret = 1;
} else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
@@ -312,8 +403,10 @@ next_hook:
if (ret == 0)
ret = -EPERM;
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
- int err = nf_queue(skb, elem, state,
- verdict >> NF_VERDICT_QBITS);
+ int err;
+
+ RCU_INIT_POINTER(state->hook_entries, entry);
+ err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
if (err < 0) {
if (err == -ESRCH &&
(verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
@@ -321,7 +414,6 @@ next_hook:
kfree_skb(skb);
}
}
- rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(nf_hook_slow);
@@ -441,7 +533,7 @@ static int __net_init netfilter_net_init(struct net *net)
for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
for (h = 0; h < NF_MAX_HOOKS; h++)
- INIT_LIST_HEAD(&net->nf.hooks[i][h]);
+ RCU_INIT_POINTER(net->nf.hooks[i][h], NULL);
}
#ifdef CONFIG_PROC_FS
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index f04fd8df210b..fc230d99aa3b 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -281,13 +281,10 @@ void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
h = nf_conntrack_find_get(cp->ipvs->net, &nf_ct_zone_dflt, &tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
- /* Show what happens instead of calling nf_ct_kill() */
- if (del_timer(&ct->timeout)) {
- IP_VS_DBG(7, "%s: ct=%p, deleted conntrack timer for tuple="
+ if (nf_ct_kill(ct)) {
+ IP_VS_DBG(7, "%s: ct=%p, deleted conntrack for tuple="
FMT_TUPLE "\n",
__func__, ct, ARG_TUPLE(&tuple));
- if (ct->timeout.function)
- ct->timeout.function(ct->timeout.data);
} else {
IP_VS_DBG(7, "%s: ct=%p, no conntrack timer for tuple="
FMT_TUPLE "\n",
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 9934b0c93c1e..ba6a1d421222 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -72,12 +72,24 @@ EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+struct conntrack_gc_work {
+ struct delayed_work dwork;
+ u32 last_bucket;
+ bool exiting;
+};
+
static __read_mostly struct kmem_cache *nf_conntrack_cachep;
static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
-static __read_mostly seqcount_t nf_conntrack_generation;
static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all;
+#define GC_MAX_BUCKETS_DIV 64u
+#define GC_MAX_BUCKETS 8192u
+#define GC_INTERVAL (5 * HZ)
+#define GC_MAX_EVICTS 256u
+
+static struct conntrack_gc_work conntrack_gc_work;
+
void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
{
spin_lock(lock);
@@ -164,7 +176,7 @@ unsigned int nf_conntrack_htable_size __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_max);
+seqcount_t nf_conntrack_generation __read_mostly;
DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
@@ -367,12 +379,10 @@ static void
destroy_conntrack(struct nf_conntrack *nfct)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
- struct net *net = nf_ct_net(ct);
struct nf_conntrack_l4proto *l4proto;
pr_debug("destroy_conntrack(%p)\n", ct);
NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
- NF_CT_ASSERT(!timer_pending(&ct->timeout));
if (unlikely(nf_ct_is_template(ct))) {
nf_ct_tmpl_free(ct);
@@ -395,7 +405,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
nf_ct_del_from_dying_or_unconfirmed_list(ct);
- NF_CT_STAT_INC(net, delete);
local_bh_enable();
if (ct->master)
@@ -427,7 +436,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
nf_ct_add_to_dying_list(ct);
- NF_CT_STAT_INC(net, delete_list);
local_bh_enable();
}
@@ -435,35 +443,30 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
{
struct nf_conn_tstamp *tstamp;
+ if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
+ return false;
+
tstamp = nf_conn_tstamp_find(ct);
if (tstamp && tstamp->stop == 0)
tstamp->stop = ktime_get_real_ns();
- if (nf_ct_is_dying(ct))
- goto delete;
-
if (nf_conntrack_event_report(IPCT_DESTROY, ct,
portid, report) < 0) {
- /* destroy event was not delivered */
+ /* destroy event was not delivered. nf_ct_put will
+ * be done by event cache worker on redelivery.
+ */
nf_ct_delete_from_lists(ct);
nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
return false;
}
nf_conntrack_ecache_work(nf_ct_net(ct));
- set_bit(IPS_DYING_BIT, &ct->status);
- delete:
nf_ct_delete_from_lists(ct);
nf_ct_put(ct);
return true;
}
EXPORT_SYMBOL_GPL(nf_ct_delete);
-static void death_by_timeout(unsigned long ul_conntrack)
-{
- nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
-}
-
static inline bool
nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
const struct nf_conntrack_tuple *tuple,
@@ -481,22 +484,17 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
net_eq(net, nf_ct_net(ct));
}
-/* must be called with rcu read lock held */
-void nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize)
+/* caller must hold rcu readlock and none of the nf_conntrack_locks */
+static void nf_ct_gc_expired(struct nf_conn *ct)
{
- struct hlist_nulls_head *hptr;
- unsigned int sequence, hsz;
+ if (!atomic_inc_not_zero(&ct->ct_general.use))
+ return;
- do {
- sequence = read_seqcount_begin(&nf_conntrack_generation);
- hsz = nf_conntrack_htable_size;
- hptr = nf_conntrack_hash;
- } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+ if (nf_ct_should_gc(ct))
+ nf_ct_kill(ct);
- *hash = hptr;
- *hsize = hsz;
+ nf_ct_put(ct);
}
-EXPORT_SYMBOL_GPL(nf_conntrack_get_ht);
/*
* Warning :
@@ -510,21 +508,26 @@ ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_head *ct_hash;
struct hlist_nulls_node *n;
- unsigned int bucket, sequence;
+ unsigned int bucket, hsize;
begin:
- do {
- sequence = read_seqcount_begin(&nf_conntrack_generation);
- bucket = scale_hash(hash);
- ct_hash = nf_conntrack_hash;
- } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+ nf_conntrack_get_ht(&ct_hash, &hsize);
+ bucket = reciprocal_scale(hash, hsize);
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
- if (nf_ct_key_equal(h, tuple, zone, net)) {
- NF_CT_STAT_INC_ATOMIC(net, found);
- return h;
+ struct nf_conn *ct;
+
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (nf_ct_is_expired(ct)) {
+ nf_ct_gc_expired(ct);
+ continue;
}
- NF_CT_STAT_INC_ATOMIC(net, searched);
+
+ if (nf_ct_is_dying(ct))
+ continue;
+
+ if (nf_ct_key_equal(h, tuple, zone, net))
+ return h;
}
/*
* if the nulls value we got at the end of this lookup is
@@ -618,7 +621,6 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
zone, net))
goto out;
- add_timer(&ct->timeout);
smp_wmb();
/* The caller holds a reference to this object */
atomic_set(&ct->ct_general.use, 2);
@@ -771,8 +773,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
/* Timer relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
- ct->timeout.expires += jiffies;
- add_timer(&ct->timeout);
+ ct->timeout += nfct_time_stamp;
atomic_inc(&ct->ct_general.use);
ct->status |= IPS_CONFIRMED;
@@ -791,7 +792,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
*/
__nf_conntrack_hash_insert(ct, hash, reply_hash);
nf_conntrack_double_unlock(hash, reply_hash);
- NF_CT_STAT_INC(net, insert);
local_bh_enable();
help = nfct_help(ct);
@@ -823,29 +823,40 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_head *ct_hash;
- unsigned int hash, sequence;
+ unsigned int hash, hsize;
struct hlist_nulls_node *n;
struct nf_conn *ct;
zone = nf_ct_zone(ignored_conntrack);
rcu_read_lock();
- do {
- sequence = read_seqcount_begin(&nf_conntrack_generation);
- hash = hash_conntrack(net, tuple);
- ct_hash = nf_conntrack_hash;
- } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+ begin:
+ nf_conntrack_get_ht(&ct_hash, &hsize);
+ hash = __hash_conntrack(net, tuple, hsize);
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
- if (ct != ignored_conntrack &&
- nf_ct_key_equal(h, tuple, zone, net)) {
+
+ if (ct == ignored_conntrack)
+ continue;
+
+ if (nf_ct_is_expired(ct)) {
+ nf_ct_gc_expired(ct);
+ continue;
+ }
+
+ if (nf_ct_key_equal(h, tuple, zone, net)) {
NF_CT_STAT_INC_ATOMIC(net, found);
rcu_read_unlock();
return 1;
}
- NF_CT_STAT_INC_ATOMIC(net, searched);
}
+
+ if (get_nulls_value(n) != hash) {
+ NF_CT_STAT_INC_ATOMIC(net, search_restart);
+ goto begin;
+ }
+
rcu_read_unlock();
return 0;
@@ -867,6 +878,11 @@ static unsigned int early_drop_list(struct net *net,
hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
+ if (nf_ct_is_expired(tmp)) {
+ nf_ct_gc_expired(tmp);
+ continue;
+ }
+
if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
!net_eq(nf_ct_net(tmp), net) ||
nf_ct_is_dying(tmp))
@@ -884,7 +900,6 @@ static unsigned int early_drop_list(struct net *net,
*/
if (net_eq(nf_ct_net(tmp), net) &&
nf_ct_is_confirmed(tmp) &&
- del_timer(&tmp->timeout) &&
nf_ct_delete(tmp, 0, 0))
drops++;
@@ -900,14 +915,11 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
struct hlist_nulls_head *ct_hash;
- unsigned hash, sequence, drops;
+ unsigned int hash, hsize, drops;
rcu_read_lock();
- do {
- sequence = read_seqcount_begin(&nf_conntrack_generation);
- hash = scale_hash(_hash++);
- ct_hash = nf_conntrack_hash;
- } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+ nf_conntrack_get_ht(&ct_hash, &hsize);
+ hash = reciprocal_scale(_hash++, hsize);
drops = early_drop_list(net, &ct_hash[hash]);
rcu_read_unlock();
@@ -921,6 +933,69 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
return false;
}
+static void gc_worker(struct work_struct *work)
+{
+ unsigned int i, goal, buckets = 0, expired_count = 0;
+ unsigned long next_run = GC_INTERVAL;
+ unsigned int ratio, scanned = 0;
+ struct conntrack_gc_work *gc_work;
+
+ gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
+
+ goal = min(nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV, GC_MAX_BUCKETS);
+ i = gc_work->last_bucket;
+
+ do {
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_head *ct_hash;
+ struct hlist_nulls_node *n;
+ unsigned int hashsz;
+ struct nf_conn *tmp;
+
+ i++;
+ rcu_read_lock();
+
+ nf_conntrack_get_ht(&ct_hash, &hashsz);
+ if (i >= hashsz)
+ i = 0;
+
+ hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
+ tmp = nf_ct_tuplehash_to_ctrack(h);
+
+ scanned++;
+ if (nf_ct_is_expired(tmp)) {
+ nf_ct_gc_expired(tmp);
+ expired_count++;
+ continue;
+ }
+ }
+
+ /* could check get_nulls_value() here and restart if ct
+ * was moved to another chain. But given gc is best-effort
+ * we will just continue with next hash slot.
+ */
+ rcu_read_unlock();
+ cond_resched_rcu_qs();
+ } while (++buckets < goal &&
+ expired_count < GC_MAX_EVICTS);
+
+ if (gc_work->exiting)
+ return;
+
+ ratio = scanned ? expired_count * 100 / scanned : 0;
+ if (ratio >= 90)
+ next_run = 0;
+
+ gc_work->last_bucket = i;
+ schedule_delayed_work(&gc_work->dwork, next_run);
+}
+
+static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
+{
+ INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
+ gc_work->exiting = false;
+}
+
static struct nf_conn *
__nf_conntrack_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
@@ -957,8 +1032,6 @@ __nf_conntrack_alloc(struct net *net,
/* save hash for reusing when confirming */
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
ct->status = 0;
- /* Don't set timer yet: wait for confirmation */
- setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
write_pnet(&ct->ct_net, net);
memset(&ct->__nfct_init_offset[0], 0,
offsetof(struct nf_conn, proto) -
@@ -1096,10 +1169,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
}
spin_unlock(&nf_conntrack_expect_lock);
}
- if (!exp) {
+ if (!exp)
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
- NF_CT_STAT_INC(net, new);
- }
/* Now it is inserted into the unconfirmed list, bump refcount */
nf_conntrack_get(&ct->ct_general);
@@ -1204,7 +1275,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
skb->nfct = NULL;
}
- /* rcu_read_lock()ed by nf_hook_slow */
+ /* rcu_read_lock()ed by nf_hook_thresh */
l3proto = __nf_ct_l3proto_find(pf);
ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
&dataoff, &protonum);
@@ -1332,7 +1403,6 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
unsigned long extra_jiffies,
int do_acct)
{
- NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
NF_CT_ASSERT(skb);
/* Only update if this is not a fixed timeout */
@@ -1340,39 +1410,25 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
goto acct;
/* If not in hash table, timer will not be active yet */
- if (!nf_ct_is_confirmed(ct)) {
- ct->timeout.expires = extra_jiffies;
- } else {
- unsigned long newtime = jiffies + extra_jiffies;
-
- /* Only update the timeout if the new timeout is at least
- HZ jiffies from the old timeout. Need del_timer for race
- avoidance (may already be dying). */
- if (newtime - ct->timeout.expires >= HZ)
- mod_timer_pending(&ct->timeout, newtime);
- }
+ if (nf_ct_is_confirmed(ct))
+ extra_jiffies += nfct_time_stamp;
+ ct->timeout = extra_jiffies;
acct:
if (do_acct)
nf_ct_acct_update(ct, ctinfo, skb->len);
}
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
-bool __nf_ct_kill_acct(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb,
- int do_acct)
+bool nf_ct_kill_acct(struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb)
{
- if (do_acct)
- nf_ct_acct_update(ct, ctinfo, skb->len);
+ nf_ct_acct_update(ct, ctinfo, skb->len);
- if (del_timer(&ct->timeout)) {
- ct->timeout.function((unsigned long)ct);
- return true;
- }
- return false;
+ return nf_ct_delete(ct, 0, 0);
}
-EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
+EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -1505,11 +1561,8 @@ void nf_ct_iterate_cleanup(struct net *net,
while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
/* Time to push up daises... */
- if (del_timer(&ct->timeout))
- nf_ct_delete(ct, portid, report);
-
- /* ... else the timer will get him soon. */
+ nf_ct_delete(ct, portid, report);
nf_ct_put(ct);
cond_resched();
}
@@ -1545,6 +1598,7 @@ static int untrack_refs(void)
void nf_conntrack_cleanup_start(void)
{
+ conntrack_gc_work.exiting = true;
RCU_INIT_POINTER(ip_ct_attach, NULL);
}
@@ -1554,6 +1608,7 @@ void nf_conntrack_cleanup_end(void)
while (untrack_refs() > 0)
schedule();
+ cancel_delayed_work_sync(&conntrack_gc_work.dwork);
nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
nf_conntrack_proto_fini();
@@ -1828,6 +1883,10 @@ int nf_conntrack_init_start(void)
}
/* - and look it like as a confirmed connection */
nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
+
+ conntrack_gc_work_init(&conntrack_gc_work);
+ schedule_delayed_work(&conntrack_gc_work.dwork, GC_INTERVAL);
+
return 0;
err_proto:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index d28011b42845..da9df2d56e66 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -49,8 +49,13 @@ static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu)
hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+ struct nf_conntrack_ecache *e;
- if (nf_ct_is_dying(ct))
+ if (!nf_ct_is_confirmed(ct))
+ continue;
+
+ e = nf_ct_ecache_find(ct);
+ if (!e || e->state != NFCT_ECACHE_DESTROY_FAIL)
continue;
if (nf_conntrack_event(IPCT_DESTROY, ct)) {
@@ -58,8 +63,7 @@ static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu)
break;
}
- /* we've got the event delivered, now it's dying */
- set_bit(IPS_DYING_BIT, &ct->status);
+ e->state = NFCT_ECACHE_DESTROY_SENT;
refs[evicted] = ct;
if (++evicted >= ARRAY_SIZE(refs)) {
@@ -130,7 +134,7 @@ int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
if (!e)
goto out_unlock;
- if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
+ if (nf_ct_is_confirmed(ct)) {
struct nf_ct_event item = {
.ct = ct,
.portid = e->portid ? e->portid : portid,
@@ -150,11 +154,13 @@ int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
* triggered by a process, we store the PORTID
* to include it in the retransmission.
*/
- if (eventmask & (1 << IPCT_DESTROY) &&
- e->portid == 0 && portid != 0)
- e->portid = portid;
- else
+ if (eventmask & (1 << IPCT_DESTROY)) {
+ if (e->portid == 0 && portid != 0)
+ e->portid = portid;
+ e->state = NFCT_ECACHE_DESTROY_FAIL;
+ } else {
e->missed |= eventmask;
+ }
} else {
e->missed &= ~missed;
}
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 43147005bea3..e3ed20060878 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -237,7 +237,7 @@ static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd,
}
delim = data[0];
if (isdigit(delim) || delim < 33 || delim > 126 || data[2] != delim) {
- pr_debug("try_eprt: invalid delimitter.\n");
+ pr_debug("try_eprt: invalid delimiter.\n");
return 0;
}
@@ -301,8 +301,6 @@ static int find_pattern(const char *data, size_t dlen,
size_t i = plen;
pr_debug("find_pattern `%s': dlen = %Zu\n", pattern, dlen);
- if (dlen == 0)
- return 0;
if (dlen <= plen) {
/* Short packet: try for partial? */
@@ -311,19 +309,8 @@ static int find_pattern(const char *data, size_t dlen,
else return 0;
}
- if (strncasecmp(data, pattern, plen) != 0) {
-#if 0
- size_t i;
-
- pr_debug("ftp: string mismatch\n");
- for (i = 0; i < plen; i++) {
- pr_debug("ftp:char %u `%c'(%u) vs `%c'(%u)\n",
- i, data[i], data[i],
- pattern[i], pattern[i]);
- }
-#endif
+ if (strncasecmp(data, pattern, plen) != 0)
return 0;
- }
pr_debug("Pattern matches!\n");
/* Now we've found the constant string, try to skip
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 5c0db5c64734..f65d93639d12 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -736,7 +736,7 @@ static int callforward_do_filter(struct net *net,
const struct nf_afinfo *afinfo;
int ret = 0;
- /* rcu_read_lock()ed by nf_hook_slow() */
+ /* rcu_read_lock()ed by nf_hook_thresh */
afinfo = nf_get_afinfo(family);
if (!afinfo)
return 0;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index b989b81ac156..336e21559e01 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -189,7 +189,6 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
struct nf_conntrack_helper *helper = NULL;
struct nf_conn_help *help;
struct net *net = nf_ct_net(ct);
- int ret = 0;
/* We already got a helper explicitly attached. The function
* nf_conntrack_alter_reply - in case NAT is in use - asks for looking
@@ -223,15 +222,13 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
if (helper == NULL) {
if (help)
RCU_INIT_POINTER(help->helper, NULL);
- goto out;
+ return 0;
}
if (help == NULL) {
help = nf_ct_helper_ext_add(ct, helper, flags);
- if (help == NULL) {
- ret = -ENOMEM;
- goto out;
- }
+ if (help == NULL)
+ return -ENOMEM;
} else {
/* We only allow helper re-assignment of the same sort since
* we cannot reallocate the helper extension area.
@@ -240,13 +237,13 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
if (tmp && tmp->help != helper->help) {
RCU_INIT_POINTER(help->helper, NULL);
- goto out;
+ return 0;
}
}
rcu_assign_pointer(help->helper, helper);
-out:
- return ret;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(__nf_ct_try_assign_helper);
@@ -349,7 +346,7 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
/* Called from the helper function, this call never fails */
help = nfct_help(ct);
- /* rcu_read_lock()ed by nf_hook_slow */
+ /* rcu_read_lock()ed by nf_hook_thresh */
helper = rcu_dereference(help->helper);
nf_log_packet(nf_ct_net(ct), nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index fdfc71f416b7..27540455dc62 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -149,10 +149,7 @@ nla_put_failure:
static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
{
- long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
-
- if (timeout < 0)
- timeout = 0;
+ long timeout = nf_ct_expires(ct) / HZ;
if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
goto nla_put_failure;
@@ -818,14 +815,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
struct hlist_nulls_node *n;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
- int res;
+ struct nf_conn *nf_ct_evict[8];
+ int res, i;
spinlock_t *lockp;
last = (struct nf_conn *)cb->args[1];
+ i = 0;
local_bh_disable();
for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
restart:
+ while (i) {
+ i--;
+ if (nf_ct_should_gc(nf_ct_evict[i]))
+ nf_ct_kill(nf_ct_evict[i]);
+ nf_ct_put(nf_ct_evict[i]);
+ }
+
lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
nf_conntrack_lock(lockp);
if (cb->args[0] >= nf_conntrack_htable_size) {
@@ -837,6 +843,13 @@ restart:
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue;
ct = nf_ct_tuplehash_to_ctrack(h);
+ if (nf_ct_is_expired(ct)) {
+ if (i < ARRAY_SIZE(nf_ct_evict) &&
+ atomic_inc_not_zero(&ct->ct_general.use))
+ nf_ct_evict[i++] = ct;
+ continue;
+ }
+
if (!net_eq(net, nf_ct_net(ct)))
continue;
@@ -878,6 +891,13 @@ out:
if (last)
nf_ct_put(last);
+ while (i) {
+ i--;
+ if (nf_ct_should_gc(nf_ct_evict[i]))
+ nf_ct_kill(nf_ct_evict[i]);
+ nf_ct_put(nf_ct_evict[i]);
+ }
+
return skb->len;
}
@@ -1147,9 +1167,7 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
}
}
- if (del_timer(&ct->timeout))
- nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
-
+ nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
nf_ct_put(ct);
return 0;
@@ -1517,11 +1535,10 @@ static int ctnetlink_change_timeout(struct nf_conn *ct,
{
u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
- if (!del_timer(&ct->timeout))
- return -ETIME;
+ ct->timeout = nfct_time_stamp + timeout * HZ;
- ct->timeout.expires = jiffies + timeout * HZ;
- add_timer(&ct->timeout);
+ if (test_bit(IPS_DYING_BIT, &ct->status))
+ return -ETIME;
return 0;
}
@@ -1719,9 +1736,8 @@ ctnetlink_create_conntrack(struct net *net,
if (!cda[CTA_TIMEOUT])
goto err1;
- ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
- ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
+ ct->timeout = nfct_time_stamp + ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
rcu_read_lock();
if (cda[CTA_HELP]) {
@@ -1968,13 +1984,9 @@ ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(cpu);
- if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
- nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
- nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
+ if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
- nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
- nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
htonl(st->insert_failed)) ||
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 5588c7ae1ac2..f60a4755d71e 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -157,8 +157,7 @@ static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
pr_debug("setting timeout of conntrack %p to 0\n", sibling);
sibling->proto.gre.timeout = 0;
sibling->proto.gre.stream_timeout = 0;
- if (del_timer(&sibling->timeout))
- sibling->timeout.function((unsigned long)sibling);
+ nf_ct_kill(sibling);
nf_ct_put(sibling);
return 1;
} else {
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index b65d5864b6d9..8d2c7d8c666a 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -159,54 +159,6 @@ static int kill_l4proto(struct nf_conn *i, void *data)
nf_ct_l3num(i) == l4proto->l3proto;
}
-static struct nf_ip_net *nf_ct_l3proto_net(struct net *net,
- struct nf_conntrack_l3proto *l3proto)
-{
- if (l3proto->l3proto == PF_INET)
- return &net->ct.nf_ct_proto;
- else
- return NULL;
-}
-
-static int nf_ct_l3proto_register_sysctl(struct net *net,
- struct nf_conntrack_l3proto *l3proto)
-{
- int err = 0;
- struct nf_ip_net *in = nf_ct_l3proto_net(net, l3proto);
- /* nf_conntrack_l3proto_ipv6 doesn't support sysctl */
- if (in == NULL)
- return 0;
-
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- if (in->ctl_table != NULL) {
- err = nf_ct_register_sysctl(net,
- &in->ctl_table_header,
- l3proto->ctl_table_path,
- in->ctl_table);
- if (err < 0) {
- kfree(in->ctl_table);
- in->ctl_table = NULL;
- }
- }
-#endif
- return err;
-}
-
-static void nf_ct_l3proto_unregister_sysctl(struct net *net,
- struct nf_conntrack_l3proto *l3proto)
-{
- struct nf_ip_net *in = nf_ct_l3proto_net(net, l3proto);
-
- if (in == NULL)
- return;
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- if (in->ctl_table_header != NULL)
- nf_ct_unregister_sysctl(&in->ctl_table_header,
- &in->ctl_table,
- 0);
-#endif
-}
-
int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto)
{
int ret = 0;
@@ -241,7 +193,7 @@ EXPORT_SYMBOL_GPL(nf_ct_l3proto_register);
int nf_ct_l3proto_pernet_register(struct net *net,
struct nf_conntrack_l3proto *proto)
{
- int ret = 0;
+ int ret;
if (proto->init_net) {
ret = proto->init_net(net);
@@ -249,7 +201,7 @@ int nf_ct_l3proto_pernet_register(struct net *net,
return ret;
}
- return nf_ct_l3proto_register_sysctl(net, proto);
+ return 0;
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_register);
@@ -272,8 +224,6 @@ EXPORT_SYMBOL_GPL(nf_ct_l3proto_unregister);
void nf_ct_l3proto_pernet_unregister(struct net *net,
struct nf_conntrack_l3proto *proto)
{
- nf_ct_l3proto_unregister_sysctl(net, proto);
-
/* Remove all contrack entries for this protocol */
nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0);
}
@@ -312,26 +262,6 @@ int nf_ct_l4proto_register_sysctl(struct net *net,
}
}
}
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- if (l4proto->l3proto != AF_INET6 && pn->ctl_compat_table != NULL) {
- if (err < 0) {
- nf_ct_kfree_compat_sysctl_table(pn);
- goto out;
- }
- err = nf_ct_register_sysctl(net,
- &pn->ctl_compat_header,
- "net/ipv4/netfilter",
- pn->ctl_compat_table);
- if (err == 0)
- goto out;
-
- nf_ct_kfree_compat_sysctl_table(pn);
- nf_ct_unregister_sysctl(&pn->ctl_table_header,
- &pn->ctl_table,
- pn->users);
- }
-out:
-#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
return err;
}
@@ -346,13 +276,6 @@ void nf_ct_l4proto_unregister_sysctl(struct net *net,
nf_ct_unregister_sysctl(&pn->ctl_table_header,
&pn->ctl_table,
pn->users);
-
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- if (l4proto->l3proto != AF_INET6 && pn->ctl_compat_header != NULL)
- nf_ct_unregister_sysctl(&pn->ctl_compat_header,
- &pn->ctl_compat_table,
- 0);
-#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
}
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 399a38fd685a..a45bee52dccc 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -402,7 +402,8 @@ static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
{
struct dccp_hdr _hdr, *dh;
- dh = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+ /* Actually only need first 4 bytes to get ports. */
+ dh = skb_header_pointer(skb, dataoff, 4, &_hdr);
if (dh == NULL)
return false;
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 86dc752e5349..d5868bad33a7 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -151,17 +151,6 @@ static struct ctl_table generic_sysctl_table[] = {
},
{ }
};
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
-static struct ctl_table generic_compat_sysctl_table[] = {
- {
- .procname = "ip_conntrack_generic_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
@@ -179,40 +168,14 @@ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
return 0;
}
-static int generic_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
- struct nf_generic_net *gn)
-{
-#ifdef CONFIG_SYSCTL
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- pn->ctl_compat_table = kmemdup(generic_compat_sysctl_table,
- sizeof(generic_compat_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_compat_table)
- return -ENOMEM;
-
- pn->ctl_compat_table[0].data = &gn->timeout;
-#endif
-#endif
- return 0;
-}
-
static int generic_init_net(struct net *net, u_int16_t proto)
{
- int ret;
struct nf_generic_net *gn = generic_pernet(net);
struct nf_proto_net *pn = &gn->pn;
gn->timeout = nf_ct_generic_timeout;
- ret = generic_kmemdup_compat_sysctl_table(pn, gn);
- if (ret < 0)
- return ret;
-
- ret = generic_kmemdup_sysctl_table(pn, gn);
- if (ret < 0)
- nf_ct_kfree_compat_sysctl_table(pn);
-
- return ret;
+ return generic_kmemdup_sysctl_table(pn, gn);
}
static struct nf_proto_net *generic_get_net_proto(struct net *net)
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index a96451a7af20..9a715f88b2f1 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -192,15 +192,15 @@ static bool gre_invert_tuple(struct nf_conntrack_tuple *tuple,
static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
{
- const struct gre_hdr_pptp *pgrehdr;
- struct gre_hdr_pptp _pgrehdr;
+ const struct pptp_gre_header *pgrehdr;
+ struct pptp_gre_header _pgrehdr;
__be16 srckey;
- const struct gre_hdr *grehdr;
- struct gre_hdr _grehdr;
+ const struct gre_base_hdr *grehdr;
+ struct gre_base_hdr _grehdr;
/* first only delinearize old RFC1701 GRE header */
grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr);
- if (!grehdr || grehdr->version != GRE_VERSION_PPTP) {
+ if (!grehdr || (grehdr->flags & GRE_VERSION) != GRE_VERSION_1) {
/* try to behave like "nf_conntrack_proto_generic" */
tuple->src.u.all = 0;
tuple->dst.u.all = 0;
@@ -212,8 +212,8 @@ static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
if (!pgrehdr)
return true;
- if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) {
- pr_debug("GRE_VERSION_PPTP but unknown proto\n");
+ if (grehdr->protocol != GRE_PROTO_PPP) {
+ pr_debug("Unsupported GRE proto(0x%x)\n", ntohs(grehdr->protocol));
return false;
}
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 1d7ab960a9e6..982ea62606c7 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -161,8 +161,8 @@ static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
const struct sctphdr *hp;
struct sctphdr _hdr;
- /* Actually only need first 8 bytes. */
- hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
+ /* Actually only need first 4 bytes to get ports. */
+ hp = skb_header_pointer(skb, dataoff, 4, &_hdr);
if (hp == NULL)
return false;
@@ -705,54 +705,6 @@ static struct ctl_table sctp_sysctl_table[] = {
},
{ }
};
-
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
-static struct ctl_table sctp_compat_sysctl_table[] = {
- {
- .procname = "ip_conntrack_sctp_timeout_closed",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_sctp_timeout_cookie_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_sctp_timeout_cookie_echoed",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_sctp_timeout_established",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_sctp_timeout_shutdown_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_sctp_timeout_shutdown_recd",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif
static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
@@ -781,32 +733,8 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
return 0;
}
-static int sctp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
- struct sctp_net *sn)
-{
-#ifdef CONFIG_SYSCTL
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- pn->ctl_compat_table = kmemdup(sctp_compat_sysctl_table,
- sizeof(sctp_compat_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_compat_table)
- return -ENOMEM;
-
- pn->ctl_compat_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED];
- pn->ctl_compat_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT];
- pn->ctl_compat_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED];
- pn->ctl_compat_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED];
- pn->ctl_compat_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
- pn->ctl_compat_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
- pn->ctl_compat_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
-#endif
-#endif
- return 0;
-}
-
static int sctp_init_net(struct net *net, u_int16_t proto)
{
- int ret;
struct sctp_net *sn = sctp_pernet(net);
struct nf_proto_net *pn = &sn->pn;
@@ -817,18 +745,7 @@ static int sctp_init_net(struct net *net, u_int16_t proto)
sn->timeouts[i] = sctp_timeouts[i];
}
- if (proto == AF_INET) {
- ret = sctp_kmemdup_compat_sysctl_table(pn, sn);
- if (ret < 0)
- return ret;
-
- ret = sctp_kmemdup_sysctl_table(pn, sn);
- if (ret < 0)
- nf_ct_kfree_compat_sysctl_table(pn);
- } else
- ret = sctp_kmemdup_sysctl_table(pn, sn);
-
- return ret;
+ return sctp_kmemdup_sysctl_table(pn, sn);
}
static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 70c8381641a7..69f687740c76 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -282,8 +282,8 @@ static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
const struct tcphdr *hp;
struct tcphdr _hdr;
- /* Actually only need first 8 bytes. */
- hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
+ /* Actually only need first 4 bytes to get ports. */
+ hp = skb_header_pointer(skb, dataoff, 4, &_hdr);
if (hp == NULL)
return false;
@@ -1481,90 +1481,6 @@ static struct ctl_table tcp_sysctl_table[] = {
},
{ }
};
-
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
-static struct ctl_table tcp_compat_sysctl_table[] = {
- {
- .procname = "ip_conntrack_tcp_timeout_syn_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_tcp_timeout_syn_sent2",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_tcp_timeout_syn_recv",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_tcp_timeout_established",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_tcp_timeout_fin_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_tcp_timeout_close_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_tcp_timeout_last_ack",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_tcp_timeout_time_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_tcp_timeout_close",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_tcp_timeout_max_retrans",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_tcp_loose",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "ip_conntrack_tcp_be_liberal",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "ip_conntrack_tcp_max_retrans",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { }
-};
-#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
@@ -1597,38 +1513,8 @@ static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
return 0;
}
-static int tcp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
- struct nf_tcp_net *tn)
-{
-#ifdef CONFIG_SYSCTL
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- pn->ctl_compat_table = kmemdup(tcp_compat_sysctl_table,
- sizeof(tcp_compat_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_compat_table)
- return -ENOMEM;
-
- pn->ctl_compat_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
- pn->ctl_compat_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT2];
- pn->ctl_compat_table[2].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
- pn->ctl_compat_table[3].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
- pn->ctl_compat_table[4].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
- pn->ctl_compat_table[5].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
- pn->ctl_compat_table[6].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
- pn->ctl_compat_table[7].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
- pn->ctl_compat_table[8].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
- pn->ctl_compat_table[9].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
- pn->ctl_compat_table[10].data = &tn->tcp_loose;
- pn->ctl_compat_table[11].data = &tn->tcp_be_liberal;
- pn->ctl_compat_table[12].data = &tn->tcp_max_retrans;
-#endif
-#endif
- return 0;
-}
-
static int tcp_init_net(struct net *net, u_int16_t proto)
{
- int ret;
struct nf_tcp_net *tn = tcp_pernet(net);
struct nf_proto_net *pn = &tn->pn;
@@ -1643,18 +1529,7 @@ static int tcp_init_net(struct net *net, u_int16_t proto)
tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
}
- if (proto == AF_INET) {
- ret = tcp_kmemdup_compat_sysctl_table(pn, tn);
- if (ret < 0)
- return ret;
-
- ret = tcp_kmemdup_sysctl_table(pn, tn);
- if (ret < 0)
- nf_ct_kfree_compat_sysctl_table(pn);
- } else
- ret = tcp_kmemdup_sysctl_table(pn, tn);
-
- return ret;
+ return tcp_kmemdup_sysctl_table(pn, tn);
}
static struct nf_proto_net *tcp_get_net_proto(struct net *net)
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 4fd040575ffe..20f35ed68030 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -44,8 +44,8 @@ static bool udp_pkt_to_tuple(const struct sk_buff *skb,
const struct udphdr *hp;
struct udphdr _hdr;
- /* Actually only need first 8 bytes. */
- hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+ /* Actually only need first 4 bytes to get ports. */
+ hp = skb_header_pointer(skb, dataoff, 4, &_hdr);
if (hp == NULL)
return false;
@@ -218,23 +218,6 @@ static struct ctl_table udp_sysctl_table[] = {
},
{ }
};
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
-static struct ctl_table udp_compat_sysctl_table[] = {
- {
- .procname = "ip_conntrack_udp_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "ip_conntrack_udp_timeout_stream",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
@@ -254,27 +237,8 @@ static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
return 0;
}
-static int udp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
- struct nf_udp_net *un)
-{
-#ifdef CONFIG_SYSCTL
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- pn->ctl_compat_table = kmemdup(udp_compat_sysctl_table,
- sizeof(udp_compat_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_compat_table)
- return -ENOMEM;
-
- pn->ctl_compat_table[0].data = &un->timeouts[UDP_CT_UNREPLIED];
- pn->ctl_compat_table[1].data = &un->timeouts[UDP_CT_REPLIED];
-#endif
-#endif
- return 0;
-}
-
static int udp_init_net(struct net *net, u_int16_t proto)
{
- int ret;
struct nf_udp_net *un = udp_pernet(net);
struct nf_proto_net *pn = &un->pn;
@@ -285,18 +249,7 @@ static int udp_init_net(struct net *net, u_int16_t proto)
un->timeouts[i] = udp_timeouts[i];
}
- if (proto == AF_INET) {
- ret = udp_kmemdup_compat_sysctl_table(pn, un);
- if (ret < 0)
- return ret;
-
- ret = udp_kmemdup_sysctl_table(pn, un);
- if (ret < 0)
- nf_ct_kfree_compat_sysctl_table(pn);
- } else
- ret = udp_kmemdup_sysctl_table(pn, un);
-
- return ret;
+ return udp_kmemdup_sysctl_table(pn, un);
}
static struct nf_proto_net *udp_get_net_proto(struct net *net)
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 9d692f5adb94..029206e8dec4 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -54,7 +54,8 @@ static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
const struct udphdr *hp;
struct udphdr _hdr;
- hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+ /* Actually only need first 4 bytes to get ports. */
+ hp = skb_header_pointer(skb, dataoff, 4, &_hdr);
if (hp == NULL)
return false;
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index dff0f0cc59e4..ef7063eced7c 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -169,7 +169,7 @@ int nf_ct_seq_adjust(struct sk_buff *skb,
s32 seqoff, ackoff;
struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
struct nf_ct_seqadj *this_way, *other_way;
- int res;
+ int res = 1;
this_way = &seqadj->seq[dir];
other_way = &seqadj->seq[!dir];
@@ -184,27 +184,31 @@ int nf_ct_seq_adjust(struct sk_buff *skb,
else
seqoff = this_way->offset_before;
+ newseq = htonl(ntohl(tcph->seq) + seqoff);
+ inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false);
+ pr_debug("Adjusting sequence number from %u->%u\n",
+ ntohl(tcph->seq), ntohl(newseq));
+ tcph->seq = newseq;
+
+ if (!tcph->ack)
+ goto out;
+
if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
other_way->correction_pos))
ackoff = other_way->offset_after;
else
ackoff = other_way->offset_before;
- newseq = htonl(ntohl(tcph->seq) + seqoff);
newack = htonl(ntohl(tcph->ack_seq) - ackoff);
-
- inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false);
inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack,
false);
-
- pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
+ pr_debug("Adjusting ack number from %u->%u, ack from %u->%u\n",
ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
ntohl(newack));
-
- tcph->seq = newseq;
tcph->ack_seq = newack;
res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+out:
spin_unlock_bh(&ct->lock);
return res;
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 7d77217de6a3..621b81c7bddc 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -83,9 +83,10 @@ static int digits_len(const struct nf_conn *ct, const char *dptr,
static int iswordc(const char c)
{
if (isalnum(c) || c == '!' || c == '"' || c == '%' ||
- (c >= '(' && c <= '/') || c == ':' || c == '<' || c == '>' ||
+ (c >= '(' && c <= '+') || c == ':' || c == '<' || c == '>' ||
c == '?' || (c >= '[' && c <= ']') || c == '_' || c == '`' ||
- c == '{' || c == '}' || c == '~')
+ c == '{' || c == '}' || c == '~' || (c >= '-' && c <= '/') ||
+ c == '\'')
return 1;
return 0;
}
@@ -329,13 +330,12 @@ static const char *sip_follow_continuation(const char *dptr, const char *limit)
static const char *sip_skip_whitespace(const char *dptr, const char *limit)
{
for (; dptr < limit; dptr++) {
- if (*dptr == ' ')
+ if (*dptr == ' ' || *dptr == '\t')
continue;
if (*dptr != '\r' && *dptr != '\n')
break;
dptr = sip_follow_continuation(dptr, limit);
- if (dptr == NULL)
- return NULL;
+ break;
}
return dptr;
}
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 9f267c3ffb39..5f446cd9f3fd 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -212,6 +212,11 @@ static int ct_seq_show(struct seq_file *s, void *v)
if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
return 0;
+ if (nf_ct_should_gc(ct)) {
+ nf_ct_kill(ct);
+ goto release;
+ }
+
/* we only want to print DIR_ORIGINAL */
if (NF_CT_DIRECTION(hash))
goto release;
@@ -228,8 +233,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
seq_printf(s, "%-8s %u %-8s %u %ld ",
l3proto->name, nf_ct_l3num(ct),
l4proto->name, nf_ct_protonum(ct),
- timer_pending(&ct->timeout)
- ? (long)(ct->timeout.expires - jiffies)/HZ : 0);
+ nf_ct_expires(ct) / HZ);
if (l4proto->print_conntrack)
l4proto->print_conntrack(s, ct);
@@ -353,13 +357,13 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
"%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
nr_conntracks,
- st->searched,
+ 0,
st->found,
- st->new,
+ 0,
st->invalid,
st->ignore,
- st->delete,
- st->delete_list,
+ 0,
+ 0,
st->insert,
st->insert_failed,
st->drop,
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 065522564ac6..e0adb5959342 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -13,13 +13,13 @@
/* core.c */
-unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
- struct nf_hook_state *state, struct nf_hook_ops **elemp);
+unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state,
+ struct nf_hook_entry **entryp);
/* nf_queue.c */
-int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
- struct nf_hook_state *state, unsigned int queuenum);
-void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops);
+int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
+ unsigned int queuenum);
+void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry);
int __init netfilter_queue_init(void);
/* nf_log.c */
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index aa5847a16713..3dca90dc24ad 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -39,12 +39,12 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
return NULL;
}
-void nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger)
+int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger)
{
const struct nf_logger *log;
- if (pf == NFPROTO_UNSPEC)
- return;
+ if (pf == NFPROTO_UNSPEC || pf >= ARRAY_SIZE(net->nf.nf_loggers))
+ return -EOPNOTSUPP;
mutex_lock(&nf_log_mutex);
log = nft_log_dereference(net->nf.nf_loggers[pf]);
@@ -52,6 +52,8 @@ void nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger)
rcu_assign_pointer(net->nf.nf_loggers[pf], logger);
mutex_unlock(&nf_log_mutex);
+
+ return 0;
}
EXPORT_SYMBOL(nf_log_set);
@@ -420,7 +422,7 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
char buf[NFLOGGER_NAME_LEN];
int r = 0;
int tindex = (unsigned long)table->extra1;
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = table->extra2;
if (write) {
struct ctl_table tmp = *table;
@@ -474,7 +476,6 @@ static int netfilter_log_sysctl_init(struct net *net)
3, "%d", i);
nf_log_sysctl_table[i].procname =
nf_log_sysctl_fnames[i];
- nf_log_sysctl_table[i].data = NULL;
nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN;
nf_log_sysctl_table[i].mode = 0644;
nf_log_sysctl_table[i].proc_handler =
@@ -484,6 +485,9 @@ static int netfilter_log_sysctl_init(struct net *net)
}
}
+ for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
+ table[i].extra2 = net;
+
net->nf.nf_log_dir_header = register_net_sysctl(net,
"net/netfilter/nf_log",
table);
diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
index a5aa5967b8e1..119fe1cb1ea9 100644
--- a/net/netfilter/nf_log_common.c
+++ b/net/netfilter/nf_log_common.c
@@ -77,7 +77,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
nf_log_buf_add(m, "SPT=%u DPT=%u ",
ntohs(th->source), ntohs(th->dest));
/* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
- if (logflags & XT_LOG_TCPSEQ) {
+ if (logflags & NF_LOG_TCPSEQ) {
nf_log_buf_add(m, "SEQ=%u ACK=%u ",
ntohl(th->seq), ntohl(th->ack_seq));
}
@@ -107,7 +107,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
/* Max length: 11 "URGP=65535 " */
nf_log_buf_add(m, "URGP=%u ", ntohs(th->urg_ptr));
- if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
+ if ((logflags & NF_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
u_int8_t _opt[60 - sizeof(struct tcphdr)];
const u_int8_t *op;
unsigned int i;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index ecee105bbada..bbb8f3df79f7 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -566,16 +566,10 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
* Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
* will delete entry from already-freed table.
*/
- if (!del_timer(&ct->timeout))
- return 1;
-
ct->status &= ~IPS_NAT_DONE_MASK;
-
rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
nf_nat_bysource_params);
- add_timer(&ct->timeout);
-
/* don't delete conntrack. Although that would make things a lot
* simpler, we'd end up flushing all conntracks on nat rmmod.
*/
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index b19ad20a705c..96964a0070e1 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -96,14 +96,14 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
}
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
-void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
+void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
{
const struct nf_queue_handler *qh;
rcu_read_lock();
qh = rcu_dereference(net->nf.queue_handler);
if (qh)
- qh->nf_hook_drop(net, ops);
+ qh->nf_hook_drop(net, entry);
rcu_read_unlock();
}
@@ -112,7 +112,6 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
* through nf_reinject().
*/
int nf_queue(struct sk_buff *skb,
- struct nf_hook_ops *elem,
struct nf_hook_state *state,
unsigned int queuenum)
{
@@ -141,7 +140,6 @@ int nf_queue(struct sk_buff *skb,
*entry = (struct nf_queue_entry) {
.skb = skb,
- .elem = elem,
.state = *state,
.size = sizeof(*entry) + afinfo->route_key_size,
};
@@ -165,11 +163,15 @@ err:
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
+ struct nf_hook_entry *hook_entry;
struct sk_buff *skb = entry->skb;
- struct nf_hook_ops *elem = entry->elem;
const struct nf_afinfo *afinfo;
+ struct nf_hook_ops *elem;
int err;
+ hook_entry = rcu_dereference(entry->state.hook_entries);
+ elem = &hook_entry->ops;
+
nf_queue_entry_release_refs(entry);
/* Continue traversal iff userspace said ok... */
@@ -186,8 +188,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
if (verdict == NF_ACCEPT) {
next_hook:
- verdict = nf_iterate(entry->state.hook_list,
- skb, &entry->state, &elem);
+ verdict = nf_iterate(skb, &entry->state, &hook_entry);
}
switch (verdict & NF_VERDICT_MASK) {
@@ -198,7 +199,8 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
local_bh_enable();
break;
case NF_QUEUE:
- err = nf_queue(skb, elem, &entry->state,
+ RCU_INIT_POINTER(entry->state.hook_entries, hook_entry);
+ err = nf_queue(skb, &entry->state,
verdict >> NF_VERDICT_QBITS);
if (err < 0) {
if (err == -ESRCH &&
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 7e1c876c7608..b70d3ea1430e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1196,6 +1196,83 @@ static void nf_tables_chain_destroy(struct nft_chain *chain)
}
}
+struct nft_chain_hook {
+ u32 num;
+ u32 priority;
+ const struct nf_chain_type *type;
+ struct net_device *dev;
+};
+
+static int nft_chain_parse_hook(struct net *net,
+ const struct nlattr * const nla[],
+ struct nft_af_info *afi,
+ struct nft_chain_hook *hook, bool create)
+{
+ struct nlattr *ha[NFTA_HOOK_MAX + 1];
+ const struct nf_chain_type *type;
+ struct net_device *dev;
+ int err;
+
+ err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK],
+ nft_hook_policy);
+ if (err < 0)
+ return err;
+
+ if (ha[NFTA_HOOK_HOOKNUM] == NULL ||
+ ha[NFTA_HOOK_PRIORITY] == NULL)
+ return -EINVAL;
+
+ hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
+ if (hook->num >= afi->nhooks)
+ return -EINVAL;
+
+ hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
+
+ type = chain_type[afi->family][NFT_CHAIN_T_DEFAULT];
+ if (nla[NFTA_CHAIN_TYPE]) {
+ type = nf_tables_chain_type_lookup(afi, nla[NFTA_CHAIN_TYPE],
+ create);
+ if (IS_ERR(type))
+ return PTR_ERR(type);
+ }
+ if (!(type->hook_mask & (1 << hook->num)))
+ return -EOPNOTSUPP;
+ if (!try_module_get(type->owner))
+ return -ENOENT;
+
+ hook->type = type;
+
+ hook->dev = NULL;
+ if (afi->flags & NFT_AF_NEEDS_DEV) {
+ char ifname[IFNAMSIZ];
+
+ if (!ha[NFTA_HOOK_DEV]) {
+ module_put(type->owner);
+ return -EOPNOTSUPP;
+ }
+
+ nla_strlcpy(ifname, ha[NFTA_HOOK_DEV], IFNAMSIZ);
+ dev = dev_get_by_name(net, ifname);
+ if (!dev) {
+ module_put(type->owner);
+ return -ENOENT;
+ }
+ hook->dev = dev;
+ } else if (ha[NFTA_HOOK_DEV]) {
+ module_put(type->owner);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void nft_chain_release_hook(struct nft_chain_hook *hook)
+{
+ module_put(hook->type->owner);
+ if (hook->dev != NULL)
+ dev_put(hook->dev);
+}
+
static int nf_tables_newchain(struct net *net, struct sock *nlsk,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
@@ -1206,10 +1283,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
struct nft_table *table;
struct nft_chain *chain;
struct nft_base_chain *basechain = NULL;
- struct nlattr *ha[NFTA_HOOK_MAX + 1];
u8 genmask = nft_genmask_next(net);
int family = nfmsg->nfgen_family;
- struct net_device *dev = NULL;
u8 policy = NF_ACCEPT;
u64 handle = 0;
unsigned int i;
@@ -1273,6 +1348,37 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
if (nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
+ if (nla[NFTA_CHAIN_HOOK]) {
+ struct nft_base_chain *basechain;
+ struct nft_chain_hook hook;
+ struct nf_hook_ops *ops;
+
+ if (!(chain->flags & NFT_BASE_CHAIN))
+ return -EBUSY;
+
+ err = nft_chain_parse_hook(net, nla, afi, &hook,
+ create);
+ if (err < 0)
+ return err;
+
+ basechain = nft_base_chain(chain);
+ if (basechain->type != hook.type) {
+ nft_chain_release_hook(&hook);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < afi->nops; i++) {
+ ops = &basechain->ops[i];
+ if (ops->hooknum != hook.num ||
+ ops->priority != hook.priority ||
+ ops->dev != hook.dev) {
+ nft_chain_release_hook(&hook);
+ return -EBUSY;
+ }
+ }
+ nft_chain_release_hook(&hook);
+ }
+
if (nla[NFTA_CHAIN_HANDLE] && name) {
struct nft_chain *chain2;
@@ -1320,102 +1426,53 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
return -EOVERFLOW;
if (nla[NFTA_CHAIN_HOOK]) {
- const struct nf_chain_type *type;
+ struct nft_chain_hook hook;
struct nf_hook_ops *ops;
nf_hookfn *hookfn;
- u32 hooknum, priority;
-
- type = chain_type[family][NFT_CHAIN_T_DEFAULT];
- if (nla[NFTA_CHAIN_TYPE]) {
- type = nf_tables_chain_type_lookup(afi,
- nla[NFTA_CHAIN_TYPE],
- create);
- if (IS_ERR(type))
- return PTR_ERR(type);
- }
- err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK],
- nft_hook_policy);
+ err = nft_chain_parse_hook(net, nla, afi, &hook, create);
if (err < 0)
return err;
- if (ha[NFTA_HOOK_HOOKNUM] == NULL ||
- ha[NFTA_HOOK_PRIORITY] == NULL)
- return -EINVAL;
-
- hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
- if (hooknum >= afi->nhooks)
- return -EINVAL;
- priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
-
- if (!(type->hook_mask & (1 << hooknum)))
- return -EOPNOTSUPP;
- if (!try_module_get(type->owner))
- return -ENOENT;
- hookfn = type->hooks[hooknum];
-
- if (afi->flags & NFT_AF_NEEDS_DEV) {
- char ifname[IFNAMSIZ];
-
- if (!ha[NFTA_HOOK_DEV]) {
- module_put(type->owner);
- return -EOPNOTSUPP;
- }
-
- nla_strlcpy(ifname, ha[NFTA_HOOK_DEV], IFNAMSIZ);
- dev = dev_get_by_name(net, ifname);
- if (!dev) {
- module_put(type->owner);
- return -ENOENT;
- }
- } else if (ha[NFTA_HOOK_DEV]) {
- module_put(type->owner);
- return -EOPNOTSUPP;
- }
basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
if (basechain == NULL) {
- module_put(type->owner);
- if (dev != NULL)
- dev_put(dev);
+ nft_chain_release_hook(&hook);
return -ENOMEM;
}
- if (dev != NULL)
- strncpy(basechain->dev_name, dev->name, IFNAMSIZ);
+ if (hook.dev != NULL)
+ strncpy(basechain->dev_name, hook.dev->name, IFNAMSIZ);
if (nla[NFTA_CHAIN_COUNTERS]) {
stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
if (IS_ERR(stats)) {
- module_put(type->owner);
+ nft_chain_release_hook(&hook);
kfree(basechain);
- if (dev != NULL)
- dev_put(dev);
return PTR_ERR(stats);
}
basechain->stats = stats;
} else {
stats = netdev_alloc_pcpu_stats(struct nft_stats);
if (stats == NULL) {
- module_put(type->owner);
+ nft_chain_release_hook(&hook);
kfree(basechain);
- if (dev != NULL)
- dev_put(dev);
return -ENOMEM;
}
rcu_assign_pointer(basechain->stats, stats);
}
- basechain->type = type;
+ hookfn = hook.type->hooks[hook.num];
+ basechain->type = hook.type;
chain = &basechain->chain;
for (i = 0; i < afi->nops; i++) {
ops = &basechain->ops[i];
ops->pf = family;
- ops->hooknum = hooknum;
- ops->priority = priority;
+ ops->hooknum = hook.num;
+ ops->priority = hook.priority;
ops->priv = chain;
ops->hook = afi->hooks[ops->hooknum];
- ops->dev = dev;
+ ops->dev = hook.dev;
if (hookfn)
ops->hook = hookfn;
if (afi->hook_ops_init)
@@ -3426,12 +3483,12 @@ static int nft_setelem_parse_flags(const struct nft_set *set,
}
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
- const struct nlattr *attr)
+ const struct nlattr *attr, u32 nlmsg_flags)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
struct nft_data_desc d1, d2;
struct nft_set_ext_tmpl tmpl;
- struct nft_set_ext *ext;
+ struct nft_set_ext *ext, *ext2;
struct nft_set_elem elem;
struct nft_set_binding *binding;
struct nft_userdata *udata;
@@ -3558,9 +3615,19 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
goto err4;
ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
- err = set->ops->insert(ctx->net, set, &elem);
- if (err < 0)
+ err = set->ops->insert(ctx->net, set, &elem, &ext2);
+ if (err) {
+ if (err == -EEXIST) {
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
+ nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) &&
+ memcmp(nft_set_ext_data(ext),
+ nft_set_ext_data(ext2), set->dlen) != 0)
+ err = -EBUSY;
+ else if (!(nlmsg_flags & NLM_F_EXCL))
+ err = 0;
+ }
goto err5;
+ }
nft_trans_elem(trans) = elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
@@ -3616,7 +3683,7 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
!atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
return -ENFILE;
- err = nft_add_set_elem(&ctx, set, attr);
+ err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
if (err < 0) {
atomic_dec(&set->nelems);
break;
@@ -4343,6 +4410,31 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
}
/**
+ * nft_parse_u32_check - fetch u32 attribute and check for maximum value
+ *
+ * @attr: netlink attribute to fetch value from
+ * @max: maximum value to be stored in dest
+ * @dest: pointer to the variable
+ *
+ * Parse, check and store a given u32 netlink attribute into variable.
+ * This function returns -ERANGE if the value goes over maximum value.
+ * Otherwise a 0 is returned and the attribute value is stored in the
+ * destination variable.
+ */
+unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
+{
+ int val;
+
+ val = ntohl(nla_get_be32(attr));
+ if (val > max)
+ return -ERANGE;
+
+ *dest = val;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_parse_u32_check);
+
+/**
* nft_parse_register - parse a register value from a netlink attribute
*
* @attr: netlink attribute
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index fb8b5892b5ff..0dd5c695482f 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -34,7 +34,7 @@ static struct nf_loginfo trace_loginfo = {
.u = {
.log = {
.level = LOGLEVEL_WARNING,
- .logflags = NF_LOG_MASK,
+ .logflags = NF_LOG_DEFAULT_MASK,
},
},
};
@@ -93,12 +93,15 @@ static bool nft_payload_fast_eval(const struct nft_expr *expr,
if (priv->base == NFT_PAYLOAD_NETWORK_HEADER)
ptr = skb_network_header(skb);
- else
+ else {
+ if (!pkt->tprot_set)
+ return false;
ptr = skb_network_header(skb) + pkt->xt.thoff;
+ }
ptr += priv->offset;
- if (unlikely(ptr + priv->len >= skb_tail_pointer(skb)))
+ if (unlikely(ptr + priv->len > skb_tail_pointer(skb)))
return false;
*dest = 0;
@@ -260,8 +263,13 @@ int __init nf_tables_core_module_init(void)
if (err < 0)
goto err7;
- return 0;
+ err = nft_range_module_init();
+ if (err < 0)
+ goto err8;
+ return 0;
+err8:
+ nft_dynset_module_exit();
err7:
nft_payload_module_exit();
err6:
diff --git a/net/netfilter/nf_tables_inet.c b/net/netfilter/nf_tables_inet.c
index 6b5f76295d3d..f713cc205669 100644
--- a/net/netfilter/nf_tables_inet.c
+++ b/net/netfilter/nf_tables_inet.c
@@ -82,7 +82,10 @@ static int __init nf_tables_inet_init(void)
{
int ret;
- nft_register_chain_type(&filter_inet);
+ ret = nft_register_chain_type(&filter_inet);
+ if (ret < 0)
+ return ret;
+
ret = register_pernet_subsys(&nf_tables_inet_net_ops);
if (ret < 0)
nft_unregister_chain_type(&filter_inet);
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c
index 75d696f11045..9e2ae424b640 100644
--- a/net/netfilter/nf_tables_netdev.c
+++ b/net/netfilter/nf_tables_netdev.c
@@ -15,78 +15,6 @@
#include <net/netfilter/nf_tables_ipv4.h>
#include <net/netfilter/nf_tables_ipv6.h>
-static inline void
-nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct iphdr *iph, _iph;
- u32 len, thoff;
-
- nft_set_pktinfo(pkt, skb, state);
-
- iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
- &_iph);
- if (!iph)
- return;
-
- if (iph->ihl < 5 || iph->version != 4)
- return;
-
- len = ntohs(iph->tot_len);
- thoff = iph->ihl * 4;
- if (skb->len < len)
- return;
- else if (len < thoff)
- return;
-
- pkt->tprot = iph->protocol;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
-}
-
-static inline void
-__nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
-#if IS_ENABLED(CONFIG_IPV6)
- struct ipv6hdr *ip6h, _ip6h;
- unsigned int thoff = 0;
- unsigned short frag_off;
- int protohdr;
- u32 pkt_len;
-
- ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
- &_ip6h);
- if (!ip6h)
- return;
-
- if (ip6h->version != 6)
- return;
-
- pkt_len = ntohs(ip6h->payload_len);
- if (pkt_len + sizeof(*ip6h) > skb->len)
- return;
-
- protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
- if (protohdr < 0)
- return;
-
- pkt->tprot = protohdr;
- pkt->xt.thoff = thoff;
- pkt->xt.fragoff = frag_off;
-#endif
-}
-
-static inline void nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- nft_set_pktinfo(pkt, skb, state);
- __nft_netdev_set_pktinfo_ipv6(pkt, skb, state);
-}
-
static unsigned int
nft_do_chain_netdev(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -95,13 +23,13 @@ nft_do_chain_netdev(void *priv, struct sk_buff *skb,
switch (skb->protocol) {
case htons(ETH_P_IP):
- nft_netdev_set_pktinfo_ipv4(&pkt, skb, state);
+ nft_set_pktinfo_ipv4_validate(&pkt, skb, state);
break;
case htons(ETH_P_IPV6):
- nft_netdev_set_pktinfo_ipv6(&pkt, skb, state);
+ nft_set_pktinfo_ipv6_validate(&pkt, skb, state);
break;
default:
- nft_set_pktinfo(&pkt, skb, state);
+ nft_set_pktinfo_unspec(&pkt, skb, state);
break;
}
@@ -221,14 +149,25 @@ static int __init nf_tables_netdev_init(void)
{
int ret;
- nft_register_chain_type(&nft_filter_chain_netdev);
- ret = register_pernet_subsys(&nf_tables_netdev_net_ops);
- if (ret < 0) {
- nft_unregister_chain_type(&nft_filter_chain_netdev);
+ ret = nft_register_chain_type(&nft_filter_chain_netdev);
+ if (ret)
return ret;
- }
- register_netdevice_notifier(&nf_tables_netdev_notifier);
+
+ ret = register_pernet_subsys(&nf_tables_netdev_net_ops);
+ if (ret)
+ goto err1;
+
+ ret = register_netdevice_notifier(&nf_tables_netdev_notifier);
+ if (ret)
+ goto err2;
+
return 0;
+
+err2:
+ unregister_pernet_subsys(&nf_tables_netdev_net_ops);
+err1:
+ nft_unregister_chain_type(&nft_filter_chain_netdev);
+ return ret;
}
static void __exit nf_tables_netdev_exit(void)
diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c
index fa24a5b398b1..ab695f8e2d29 100644
--- a/net/netfilter/nf_tables_trace.c
+++ b/net/netfilter/nf_tables_trace.c
@@ -113,20 +113,22 @@ static int nf_trace_fill_pkt_info(struct sk_buff *nlskb,
const struct nft_pktinfo *pkt)
{
const struct sk_buff *skb = pkt->skb;
- unsigned int len = min_t(unsigned int,
- pkt->xt.thoff - skb_network_offset(skb),
- NFT_TRACETYPE_NETWORK_HSIZE);
int off = skb_network_offset(skb);
+ unsigned int len, nh_end;
+ nh_end = pkt->tprot_set ? pkt->xt.thoff : skb->len;
+ len = min_t(unsigned int, nh_end - skb_network_offset(skb),
+ NFT_TRACETYPE_NETWORK_HSIZE);
if (trace_fill_header(nlskb, NFTA_TRACE_NETWORK_HEADER, skb, off, len))
return -1;
- len = min_t(unsigned int, skb->len - pkt->xt.thoff,
- NFT_TRACETYPE_TRANSPORT_HSIZE);
-
- if (trace_fill_header(nlskb, NFTA_TRACE_TRANSPORT_HEADER, skb,
- pkt->xt.thoff, len))
- return -1;
+ if (pkt->tprot_set) {
+ len = min_t(unsigned int, skb->len - pkt->xt.thoff,
+ NFT_TRACETYPE_TRANSPORT_HSIZE);
+ if (trace_fill_header(nlskb, NFTA_TRACE_TRANSPORT_HEADER, skb,
+ pkt->xt.thoff, len))
+ return -1;
+ }
if (!skb_mac_header_was_set(skb))
return 0;
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index e924e95fcc7f..3b79f34b5095 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -43,7 +43,7 @@ nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
if (help == NULL)
return NF_DROP;
- /* rcu_read_lock()ed by nf_hook_slow */
+ /* rcu_read_lock()ed by nf_hook_thresh */
helper = rcu_dereference(help->helper);
if (helper == NULL)
return NF_DROP;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 6577db524ef6..eb086a192c5a 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -442,7 +442,9 @@ __build_packet_message(struct nfnl_log_net *log,
if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
htonl(indev->ifindex)) ||
/* this is the bridge group "brX" */
- /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
+ /* rcu_read_lock()ed by nf_hook_thresh or
+ * nf_log_packet.
+ */
nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
goto nla_put_failure;
@@ -477,7 +479,9 @@ __build_packet_message(struct nfnl_log_net *log,
if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
htonl(outdev->ifindex)) ||
/* this is the bridge group "brX" */
- /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
+ /* rcu_read_lock()ed by nf_hook_thresh or
+ * nf_log_packet.
+ */
nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
goto nla_put_failure;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index f49f45081acb..af832c526048 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -740,7 +740,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
struct net *net = entry->state.net;
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
- /* rcu_read_lock()ed by nf_hook_slow() */
+ /* rcu_read_lock()ed by nf_hook_thresh */
queue = instance_lookup(q, queuenum);
if (!queue)
return -ESRCH;
@@ -917,12 +917,14 @@ static struct notifier_block nfqnl_dev_notifier = {
.notifier_call = nfqnl_rcv_dev_event,
};
-static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long ops_ptr)
+static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long entry_ptr)
{
- return entry->elem == (struct nf_hook_ops *)ops_ptr;
+ return rcu_access_pointer(entry->state.hook_entries) ==
+ (struct nf_hook_entry *)entry_ptr;
}
-static void nfqnl_nf_hook_drop(struct net *net, struct nf_hook_ops *hook)
+static void nfqnl_nf_hook_drop(struct net *net,
+ const struct nf_hook_entry *hook)
{
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
int i;
@@ -1522,9 +1524,16 @@ static int __init nfnetlink_queue_init(void)
goto cleanup_netlink_notifier;
}
- register_netdevice_notifier(&nfqnl_dev_notifier);
+ status = register_netdevice_notifier(&nfqnl_dev_notifier);
+ if (status < 0) {
+ pr_err("nf_queue: failed to register netdevice notifier\n");
+ goto cleanup_netlink_subsys;
+ }
+
return status;
+cleanup_netlink_subsys:
+ nfnetlink_subsys_unregister(&nfqnl_subsys);
cleanup_netlink_notifier:
netlink_unregister_notifier(&nfqnl_rtnl_notifier);
unregister_pernet_subsys(&nfnl_queue_net_ops);
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index d71cc18fa35d..31c15ed2e5fc 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -52,6 +52,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
{
struct nft_bitwise *priv = nft_expr_priv(expr);
struct nft_data_desc d1, d2;
+ u32 len;
int err;
if (tb[NFTA_BITWISE_SREG] == NULL ||
@@ -61,7 +62,12 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
tb[NFTA_BITWISE_XOR] == NULL)
return -EINVAL;
- priv->len = ntohl(nla_get_be32(tb[NFTA_BITWISE_LEN]));
+ err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len);
+ if (err < 0)
+ return err;
+
+ priv->len = len;
+
priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
err = nft_validate_register_load(priv->sreg, priv->len);
if (err < 0)
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
index b78c28ba465f..ee63d981268d 100644
--- a/net/netfilter/nft_byteorder.c
+++ b/net/netfilter/nft_byteorder.c
@@ -99,6 +99,7 @@ static int nft_byteorder_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_byteorder *priv = nft_expr_priv(expr);
+ u32 size, len;
int err;
if (tb[NFTA_BYTEORDER_SREG] == NULL ||
@@ -117,7 +118,12 @@ static int nft_byteorder_init(const struct nft_ctx *ctx,
return -EINVAL;
}
- priv->size = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SIZE]));
+ err = nft_parse_u32_check(tb[NFTA_BYTEORDER_SIZE], U8_MAX, &size);
+ if (err < 0)
+ return err;
+
+ priv->size = size;
+
switch (priv->size) {
case 2:
case 4:
@@ -128,7 +134,12 @@ static int nft_byteorder_init(const struct nft_ctx *ctx,
}
priv->sreg = nft_parse_register(tb[NFTA_BYTEORDER_SREG]);
- priv->len = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_LEN]));
+ err = nft_parse_u32_check(tb[NFTA_BYTEORDER_LEN], U8_MAX, &len);
+ if (err < 0)
+ return err;
+
+ priv->len = len;
+
err = nft_validate_register_load(priv->sreg, priv->len);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index e25b35d70e4d..2e53739812b1 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -84,6 +84,9 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
if (err < 0)
return err;
+ if (desc.len > U8_MAX)
+ return -ERANGE;
+
priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
priv->len = desc.len;
return 0;
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 51e180f2a003..d7b0d171172a 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -128,15 +128,18 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
memcpy(dest, &count, sizeof(count));
return;
}
+ case NFT_CT_L3PROTOCOL:
+ *dest = nf_ct_l3num(ct);
+ return;
+ case NFT_CT_PROTOCOL:
+ *dest = nf_ct_protonum(ct);
+ return;
default:
break;
}
tuple = &ct->tuplehash[priv->dir].tuple;
switch (priv->key) {
- case NFT_CT_L3PROTOCOL:
- *dest = nf_ct_l3num(ct);
- return;
case NFT_CT_SRC:
memcpy(dest, tuple->src.u3.all,
nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
@@ -145,9 +148,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
memcpy(dest, tuple->dst.u3.all,
nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
return;
- case NFT_CT_PROTOCOL:
- *dest = nf_ct_protonum(ct);
- return;
case NFT_CT_PROTO_SRC:
*dest = (__force __u16)tuple->src.u.all;
return;
@@ -283,8 +283,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
case NFT_CT_L3PROTOCOL:
case NFT_CT_PROTOCOL:
- if (tb[NFTA_CT_DIRECTION] == NULL)
- return -EINVAL;
+ /* For compatibility, do not report error if NFTA_CT_DIRECTION
+ * attribute is specified.
+ */
len = sizeof(u8);
break;
case NFT_CT_SRC:
@@ -363,6 +364,8 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
switch (priv->key) {
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
+ if (tb[NFTA_CT_DIRECTION])
+ return -EINVAL;
len = FIELD_SIZEOF(struct nf_conn, mark);
break;
#endif
@@ -432,8 +435,6 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
goto nla_put_failure;
switch (priv->key) {
- case NFT_CT_L3PROTOCOL:
- case NFT_CT_PROTOCOL:
case NFT_CT_SRC:
case NFT_CT_DST:
case NFT_CT_PROTO_SRC:
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 0af26699bf04..e3b83c31da2e 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -22,6 +22,7 @@ struct nft_dynset {
enum nft_dynset_ops op:8;
enum nft_registers sreg_key:8;
enum nft_registers sreg_data:8;
+ bool invert;
u64 timeout;
struct nft_expr *expr;
struct nft_set_binding binding;
@@ -82,10 +83,14 @@ static void nft_dynset_eval(const struct nft_expr *expr,
if (sexpr != NULL)
sexpr->ops->eval(sexpr, regs, pkt);
+
+ if (priv->invert)
+ regs->verdict.code = NFT_BREAK;
return;
}
out:
- regs->verdict.code = NFT_BREAK;
+ if (!priv->invert)
+ regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
@@ -96,6 +101,7 @@ static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
[NFTA_DYNSET_SREG_DATA] = { .type = NLA_U32 },
[NFTA_DYNSET_TIMEOUT] = { .type = NLA_U64 },
[NFTA_DYNSET_EXPR] = { .type = NLA_NESTED },
+ [NFTA_DYNSET_FLAGS] = { .type = NLA_U32 },
};
static int nft_dynset_init(const struct nft_ctx *ctx,
@@ -113,6 +119,15 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
tb[NFTA_DYNSET_SREG_KEY] == NULL)
return -EINVAL;
+ if (tb[NFTA_DYNSET_FLAGS]) {
+ u32 flags = ntohl(nla_get_be32(tb[NFTA_DYNSET_FLAGS]));
+
+ if (flags & ~NFT_DYNSET_F_INV)
+ return -EINVAL;
+ if (flags & NFT_DYNSET_F_INV)
+ priv->invert = true;
+ }
+
set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME],
genmask);
if (IS_ERR(set)) {
@@ -220,6 +235,7 @@ static void nft_dynset_destroy(const struct nft_ctx *ctx,
static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_dynset *priv = nft_expr_priv(expr);
+ u32 flags = priv->invert ? NFT_DYNSET_F_INV : 0;
if (nft_dump_register(skb, NFTA_DYNSET_SREG_KEY, priv->sreg_key))
goto nla_put_failure;
@@ -235,6 +251,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
goto nla_put_failure;
if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_DYNSET_FLAGS, htonl(flags)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 82c264e40278..a84cf3d66056 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -59,7 +59,7 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
- u32 offset, len;
+ u32 offset, len, err;
if (tb[NFTA_EXTHDR_DREG] == NULL ||
tb[NFTA_EXTHDR_TYPE] == NULL ||
@@ -67,11 +67,13 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
tb[NFTA_EXTHDR_LEN] == NULL)
return -EINVAL;
- offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
- len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
+ err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
+ if (err < 0)
+ return err;
- if (offset > U8_MAX || len > U8_MAX)
- return -ERANGE;
+ err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
+ if (err < 0)
+ return err;
priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
priv->offset = offset;
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 564fa7929ed5..09473b415b95 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -1,395 +1,145 @@
/*
- * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2016 Laura Garcia <nevola@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/log2.h>
-#include <linux/jhash.h>
#include <linux/netlink.h>
-#include <linux/workqueue.h>
-#include <linux/rhashtable.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
-
-/* We target a hash table size of 4, element hint is 75% of final size */
-#define NFT_HASH_ELEMENT_HINT 3
+#include <net/netfilter/nf_tables_core.h>
+#include <linux/jhash.h>
struct nft_hash {
- struct rhashtable ht;
- struct delayed_work gc_work;
-};
-
-struct nft_hash_elem {
- struct rhash_head node;
- struct nft_set_ext ext;
+ enum nft_registers sreg:8;
+ enum nft_registers dreg:8;
+ u8 len;
+ u32 modulus;
+ u32 seed;
+ u32 offset;
};
-struct nft_hash_cmp_arg {
- const struct nft_set *set;
- const u32 *key;
- u8 genmask;
-};
-
-static const struct rhashtable_params nft_hash_params;
-
-static inline u32 nft_hash_key(const void *data, u32 len, u32 seed)
-{
- const struct nft_hash_cmp_arg *arg = data;
-
- return jhash(arg->key, len, seed);
-}
-
-static inline u32 nft_hash_obj(const void *data, u32 len, u32 seed)
-{
- const struct nft_hash_elem *he = data;
-
- return jhash(nft_set_ext_key(&he->ext), len, seed);
-}
-
-static inline int nft_hash_cmp(struct rhashtable_compare_arg *arg,
- const void *ptr)
-{
- const struct nft_hash_cmp_arg *x = arg->key;
- const struct nft_hash_elem *he = ptr;
-
- if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
- return 1;
- if (nft_set_elem_expired(&he->ext))
- return 1;
- if (!nft_set_elem_active(&he->ext, x->genmask))
- return 1;
- return 0;
-}
-
-static bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
-{
- struct nft_hash *priv = nft_set_priv(set);
- const struct nft_hash_elem *he;
- struct nft_hash_cmp_arg arg = {
- .genmask = nft_genmask_cur(net),
- .set = set,
- .key = key,
- };
-
- he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
- if (he != NULL)
- *ext = &he->ext;
-
- return !!he;
-}
-
-static bool nft_hash_update(struct nft_set *set, const u32 *key,
- void *(*new)(struct nft_set *,
- const struct nft_expr *,
- struct nft_regs *regs),
- const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_set_ext **ext)
-{
- struct nft_hash *priv = nft_set_priv(set);
- struct nft_hash_elem *he;
- struct nft_hash_cmp_arg arg = {
- .genmask = NFT_GENMASK_ANY,
- .set = set,
- .key = key,
- };
-
- he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
- if (he != NULL)
- goto out;
-
- he = new(set, expr, regs);
- if (he == NULL)
- goto err1;
- if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
- nft_hash_params))
- goto err2;
-out:
- *ext = &he->ext;
- return true;
-
-err2:
- nft_set_elem_destroy(set, he);
-err1:
- return false;
-}
-
-static int nft_hash_insert(const struct net *net, const struct nft_set *set,
- const struct nft_set_elem *elem)
-{
- struct nft_hash *priv = nft_set_priv(set);
- struct nft_hash_elem *he = elem->priv;
- struct nft_hash_cmp_arg arg = {
- .genmask = nft_genmask_next(net),
- .set = set,
- .key = elem->key.val.data,
- };
-
- return rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
- nft_hash_params);
-}
-
-static void nft_hash_activate(const struct net *net, const struct nft_set *set,
- const struct nft_set_elem *elem)
-{
- struct nft_hash_elem *he = elem->priv;
-
- nft_set_elem_change_active(net, set, &he->ext);
- nft_set_elem_clear_busy(&he->ext);
-}
-
-static void *nft_hash_deactivate(const struct net *net,
- const struct nft_set *set,
- const struct nft_set_elem *elem)
+static void nft_hash_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
- struct nft_hash *priv = nft_set_priv(set);
- struct nft_hash_elem *he;
- struct nft_hash_cmp_arg arg = {
- .genmask = nft_genmask_next(net),
- .set = set,
- .key = elem->key.val.data,
- };
-
- rcu_read_lock();
- he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
- if (he != NULL) {
- if (!nft_set_elem_mark_busy(&he->ext) ||
- !nft_is_active(net, &he->ext))
- nft_set_elem_change_active(net, set, &he->ext);
- else
- he = NULL;
- }
- rcu_read_unlock();
+ struct nft_hash *priv = nft_expr_priv(expr);
+ const void *data = &regs->data[priv->sreg];
+ u32 h;
- return he;
+ h = reciprocal_scale(jhash(data, priv->len, priv->seed), priv->modulus);
+ regs->data[priv->dreg] = h + priv->offset;
}
-static void nft_hash_remove(const struct nft_set *set,
- const struct nft_set_elem *elem)
-{
- struct nft_hash *priv = nft_set_priv(set);
- struct nft_hash_elem *he = elem->priv;
-
- rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
-}
+static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
+ [NFTA_HASH_SREG] = { .type = NLA_U32 },
+ [NFTA_HASH_DREG] = { .type = NLA_U32 },
+ [NFTA_HASH_LEN] = { .type = NLA_U32 },
+ [NFTA_HASH_MODULUS] = { .type = NLA_U32 },
+ [NFTA_HASH_SEED] = { .type = NLA_U32 },
+};
-static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
- struct nft_set_iter *iter)
+static int nft_hash_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
{
- struct nft_hash *priv = nft_set_priv(set);
- struct nft_hash_elem *he;
- struct rhashtable_iter hti;
- struct nft_set_elem elem;
- int err;
-
- err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
- iter->err = err;
- if (err)
- return;
-
- err = rhashtable_walk_start(&hti);
- if (err && err != -EAGAIN) {
- iter->err = err;
- goto out;
- }
-
- while ((he = rhashtable_walk_next(&hti))) {
- if (IS_ERR(he)) {
- err = PTR_ERR(he);
- if (err != -EAGAIN) {
- iter->err = err;
- goto out;
- }
+ struct nft_hash *priv = nft_expr_priv(expr);
+ u32 len;
- continue;
- }
+ if (!tb[NFTA_HASH_SREG] ||
+ !tb[NFTA_HASH_DREG] ||
+ !tb[NFTA_HASH_LEN] ||
+ !tb[NFTA_HASH_SEED] ||
+ !tb[NFTA_HASH_MODULUS])
+ return -EINVAL;
- if (iter->count < iter->skip)
- goto cont;
- if (nft_set_elem_expired(&he->ext))
- goto cont;
- if (!nft_set_elem_active(&he->ext, iter->genmask))
- goto cont;
+ if (tb[NFTA_HASH_OFFSET])
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
- elem.priv = he;
+ priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]);
+ priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
- iter->err = iter->fn(ctx, set, iter, &elem);
- if (iter->err < 0)
- goto out;
+ len = ntohl(nla_get_be32(tb[NFTA_HASH_LEN]));
+ if (len == 0 || len > U8_MAX)
+ return -ERANGE;
-cont:
- iter->count++;
- }
+ priv->len = len;
-out:
- rhashtable_walk_stop(&hti);
- rhashtable_walk_exit(&hti);
-}
-
-static void nft_hash_gc(struct work_struct *work)
-{
- struct nft_set *set;
- struct nft_hash_elem *he;
- struct nft_hash *priv;
- struct nft_set_gc_batch *gcb = NULL;
- struct rhashtable_iter hti;
- int err;
-
- priv = container_of(work, struct nft_hash, gc_work.work);
- set = nft_set_container_of(priv);
-
- err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
- if (err)
- goto schedule;
+ priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
+ if (priv->modulus <= 1)
+ return -ERANGE;
- err = rhashtable_walk_start(&hti);
- if (err && err != -EAGAIN)
- goto out;
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+ return -EOVERFLOW;
- while ((he = rhashtable_walk_next(&hti))) {
- if (IS_ERR(he)) {
- if (PTR_ERR(he) != -EAGAIN)
- goto out;
- continue;
- }
+ priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED]));
- if (!nft_set_elem_expired(&he->ext))
- continue;
- if (nft_set_elem_mark_busy(&he->ext))
- continue;
-
- gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
- if (gcb == NULL)
- goto out;
- rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
- atomic_dec(&set->nelems);
- nft_set_gc_batch_add(gcb, he);
- }
-out:
- rhashtable_walk_stop(&hti);
- rhashtable_walk_exit(&hti);
-
- nft_set_gc_batch_complete(gcb);
-schedule:
- queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
- nft_set_gc_interval(set));
+ return nft_validate_register_load(priv->sreg, len) &&
+ nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, sizeof(u32));
}
-static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
+static int nft_hash_dump(struct sk_buff *skb,
+ const struct nft_expr *expr)
{
- return sizeof(struct nft_hash);
-}
-
-static const struct rhashtable_params nft_hash_params = {
- .head_offset = offsetof(struct nft_hash_elem, node),
- .hashfn = nft_hash_key,
- .obj_hashfn = nft_hash_obj,
- .obj_cmpfn = nft_hash_cmp,
- .automatic_shrinking = true,
-};
+ const struct nft_hash *priv = nft_expr_priv(expr);
-static int nft_hash_init(const struct nft_set *set,
- const struct nft_set_desc *desc,
- const struct nlattr * const tb[])
-{
- struct nft_hash *priv = nft_set_priv(set);
- struct rhashtable_params params = nft_hash_params;
- int err;
-
- params.nelem_hint = desc->size ?: NFT_HASH_ELEMENT_HINT;
- params.key_len = set->klen;
-
- err = rhashtable_init(&priv->ht, &params);
- if (err < 0)
- return err;
-
- INIT_DEFERRABLE_WORK(&priv->gc_work, nft_hash_gc);
- if (set->flags & NFT_SET_TIMEOUT)
- queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
- nft_set_gc_interval(set));
+ if (nft_dump_register(skb, NFTA_HASH_SREG, priv->sreg))
+ goto nla_put_failure;
+ if (nft_dump_register(skb, NFTA_HASH_DREG, priv->dreg))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_HASH_LEN, htonl(priv->len)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
+ goto nla_put_failure;
+ if (priv->offset != 0)
+ if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
+ goto nla_put_failure;
return 0;
-}
-static void nft_hash_elem_destroy(void *ptr, void *arg)
-{
- nft_set_elem_destroy((const struct nft_set *)arg, ptr);
+nla_put_failure:
+ return -1;
}
-static void nft_hash_destroy(const struct nft_set *set)
-{
- struct nft_hash *priv = nft_set_priv(set);
-
- cancel_delayed_work_sync(&priv->gc_work);
- rhashtable_free_and_destroy(&priv->ht, nft_hash_elem_destroy,
- (void *)set);
-}
-
-static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
- struct nft_set_estimate *est)
-{
- unsigned int esize;
-
- esize = sizeof(struct nft_hash_elem);
- if (desc->size) {
- est->size = sizeof(struct nft_hash) +
- roundup_pow_of_two(desc->size * 4 / 3) *
- sizeof(struct nft_hash_elem *) +
- desc->size * esize;
- } else {
- /* Resizing happens when the load drops below 30% or goes
- * above 75%. The average of 52.5% load (approximated by 50%)
- * is used for the size estimation of the hash buckets,
- * meaning we calculate two buckets per element.
- */
- est->size = esize + 2 * sizeof(struct nft_hash_elem *);
- }
-
- est->class = NFT_SET_CLASS_O_1;
-
- return true;
-}
-
-static struct nft_set_ops nft_hash_ops __read_mostly = {
- .privsize = nft_hash_privsize,
- .elemsize = offsetof(struct nft_hash_elem, ext),
- .estimate = nft_hash_estimate,
+static struct nft_expr_type nft_hash_type;
+static const struct nft_expr_ops nft_hash_ops = {
+ .type = &nft_hash_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_hash)),
+ .eval = nft_hash_eval,
.init = nft_hash_init,
- .destroy = nft_hash_destroy,
- .insert = nft_hash_insert,
- .activate = nft_hash_activate,
- .deactivate = nft_hash_deactivate,
- .remove = nft_hash_remove,
- .lookup = nft_hash_lookup,
- .update = nft_hash_update,
- .walk = nft_hash_walk,
- .features = NFT_SET_MAP | NFT_SET_TIMEOUT,
+ .dump = nft_hash_dump,
+};
+
+static struct nft_expr_type nft_hash_type __read_mostly = {
+ .name = "hash",
+ .ops = &nft_hash_ops,
+ .policy = nft_hash_policy,
+ .maxattr = NFTA_HASH_MAX,
.owner = THIS_MODULE,
};
static int __init nft_hash_module_init(void)
{
- return nft_register_set(&nft_hash_ops);
+ return nft_register_expr(&nft_hash_type);
}
static void __exit nft_hash_module_exit(void)
{
- nft_unregister_set(&nft_hash_ops);
+ nft_unregister_expr(&nft_hash_type);
}
module_init(nft_hash_module_init);
module_exit(nft_hash_module_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_SET();
+MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
+MODULE_ALIAS_NFT_EXPR("hash");
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index db3b746858e3..d17018ff54e6 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -53,6 +53,10 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
tb[NFTA_IMMEDIATE_DATA]);
if (err < 0)
return err;
+
+ if (desc.len > U8_MAX)
+ return -ERANGE;
+
priv->dlen = desc.len;
priv->dreg = nft_parse_register(tb[NFTA_IMMEDIATE_DREG]);
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 070b98938e02..c6baf412236d 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -145,7 +145,7 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx,
if (err < 0)
return err;
- priv->cost = div_u64(priv->limit.nsecs, priv->limit.rate);
+ priv->cost = div64_u64(priv->limit.nsecs, priv->limit.rate);
return 0;
}
@@ -170,7 +170,7 @@ static void nft_limit_pkt_bytes_eval(const struct nft_expr *expr,
const struct nft_pktinfo *pkt)
{
struct nft_limit *priv = nft_expr_priv(expr);
- u64 cost = div_u64(priv->nsecs * pkt->skb->len, priv->rate);
+ u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate);
if (nft_limit_eval(priv, cost))
regs->verdict.code = NFT_BREAK;
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 24a73bb26e94..1b01404bb33f 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -58,8 +58,11 @@ static int nft_log_init(const struct nft_ctx *ctx,
if (tb[NFTA_LOG_LEVEL] != NULL &&
tb[NFTA_LOG_GROUP] != NULL)
return -EINVAL;
- if (tb[NFTA_LOG_GROUP] != NULL)
+ if (tb[NFTA_LOG_GROUP] != NULL) {
li->type = NF_LOG_TYPE_ULOG;
+ if (tb[NFTA_LOG_FLAGS] != NULL)
+ return -EINVAL;
+ }
nla = tb[NFTA_LOG_PREFIX];
if (nla != NULL) {
@@ -87,6 +90,10 @@ static int nft_log_init(const struct nft_ctx *ctx,
if (tb[NFTA_LOG_FLAGS] != NULL) {
li->u.log.logflags =
ntohl(nla_get_be32(tb[NFTA_LOG_FLAGS]));
+ if (li->u.log.logflags & ~NF_LOG_MASK) {
+ err = -EINVAL;
+ goto err1;
+ }
}
break;
case NF_LOG_TYPE_ULOG:
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index e164325d1bc0..8166b6994cc7 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -43,7 +43,7 @@ static void nft_lookup_eval(const struct nft_expr *expr,
return;
}
- if (found && set->flags & NFT_SET_MAP)
+ if (set->flags & NFT_SET_MAP)
nft_data_copy(&regs->data[priv->dreg],
nft_set_ext_data(ext), set->dlen);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 8a6bc7630912..6c1e0246706e 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -52,6 +52,8 @@ void nft_meta_get_eval(const struct nft_expr *expr,
*dest = pkt->pf;
break;
case NFT_META_L4PROTO:
+ if (!pkt->tprot_set)
+ goto err;
*dest = pkt->tprot;
break;
case NFT_META_PRIORITY:
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
new file mode 100644
index 000000000000..55bc5ab78d4a
--- /dev/null
+++ b/net/netfilter/nft_numgen.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2016 Laura Garcia <nevola@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <linux/static_key.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+
+static DEFINE_PER_CPU(struct rnd_state, nft_numgen_prandom_state);
+
+struct nft_ng_inc {
+ enum nft_registers dreg:8;
+ u32 modulus;
+ atomic_t counter;
+ u32 offset;
+};
+
+static void nft_ng_inc_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_ng_inc *priv = nft_expr_priv(expr);
+ u32 nval, oval;
+
+ do {
+ oval = atomic_read(&priv->counter);
+ nval = (oval + 1 < priv->modulus) ? oval + 1 : 0;
+ } while (atomic_cmpxchg(&priv->counter, oval, nval) != oval);
+
+ regs->data[priv->dreg] = nval + priv->offset;
+}
+
+static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
+ [NFTA_NG_DREG] = { .type = NLA_U32 },
+ [NFTA_NG_MODULUS] = { .type = NLA_U32 },
+ [NFTA_NG_TYPE] = { .type = NLA_U32 },
+ [NFTA_NG_OFFSET] = { .type = NLA_U32 },
+};
+
+static int nft_ng_inc_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_ng_inc *priv = nft_expr_priv(expr);
+
+ if (tb[NFTA_NG_OFFSET])
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_NG_OFFSET]));
+
+ priv->modulus = ntohl(nla_get_be32(tb[NFTA_NG_MODULUS]));
+ if (priv->modulus == 0)
+ return -ERANGE;
+
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+ return -EOVERFLOW;
+
+ priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]);
+ atomic_set(&priv->counter, 0);
+
+ return nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, sizeof(u32));
+}
+
+static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
+ u32 modulus, enum nft_ng_types type, u32 offset)
+{
+ if (nft_dump_register(skb, NFTA_NG_DREG, dreg))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_NG_MODULUS, htonl(modulus)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_NG_TYPE, htonl(type)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_NG_OFFSET, htonl(offset)))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_ng_inc *priv = nft_expr_priv(expr);
+
+ return nft_ng_dump(skb, priv->dreg, priv->modulus, NFT_NG_INCREMENTAL,
+ priv->offset);
+}
+
+struct nft_ng_random {
+ enum nft_registers dreg:8;
+ u32 modulus;
+ u32 offset;
+};
+
+static void nft_ng_random_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_ng_random *priv = nft_expr_priv(expr);
+ struct rnd_state *state = this_cpu_ptr(&nft_numgen_prandom_state);
+ u32 val;
+
+ val = reciprocal_scale(prandom_u32_state(state), priv->modulus);
+ regs->data[priv->dreg] = val + priv->offset;
+}
+
+static int nft_ng_random_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_ng_random *priv = nft_expr_priv(expr);
+
+ if (tb[NFTA_NG_OFFSET])
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_NG_OFFSET]));
+
+ priv->modulus = ntohl(nla_get_be32(tb[NFTA_NG_MODULUS]));
+ if (priv->modulus == 0)
+ return -ERANGE;
+
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+ return -EOVERFLOW;
+
+ prandom_init_once(&nft_numgen_prandom_state);
+
+ priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]);
+
+ return nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, sizeof(u32));
+}
+
+static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_ng_random *priv = nft_expr_priv(expr);
+
+ return nft_ng_dump(skb, priv->dreg, priv->modulus, NFT_NG_RANDOM,
+ priv->offset);
+}
+
+static struct nft_expr_type nft_ng_type;
+static const struct nft_expr_ops nft_ng_inc_ops = {
+ .type = &nft_ng_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
+ .eval = nft_ng_inc_eval,
+ .init = nft_ng_inc_init,
+ .dump = nft_ng_inc_dump,
+};
+
+static const struct nft_expr_ops nft_ng_random_ops = {
+ .type = &nft_ng_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
+ .eval = nft_ng_random_eval,
+ .init = nft_ng_random_init,
+ .dump = nft_ng_random_dump,
+};
+
+static const struct nft_expr_ops *
+nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
+{
+ u32 type;
+
+ if (!tb[NFTA_NG_DREG] ||
+ !tb[NFTA_NG_MODULUS] ||
+ !tb[NFTA_NG_TYPE])
+ return ERR_PTR(-EINVAL);
+
+ type = ntohl(nla_get_be32(tb[NFTA_NG_TYPE]));
+
+ switch (type) {
+ case NFT_NG_INCREMENTAL:
+ return &nft_ng_inc_ops;
+ case NFT_NG_RANDOM:
+ return &nft_ng_random_ops;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct nft_expr_type nft_ng_type __read_mostly = {
+ .name = "numgen",
+ .select_ops = &nft_ng_select_ops,
+ .policy = nft_ng_policy,
+ .maxattr = NFTA_NG_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_ng_module_init(void)
+{
+ return nft_register_expr(&nft_ng_type);
+}
+
+static void __exit nft_ng_module_exit(void)
+{
+ nft_unregister_expr(&nft_ng_type);
+}
+
+module_init(nft_ng_module_init);
+module_exit(nft_ng_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
+MODULE_ALIAS_NFT_EXPR("numgen");
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 12cd4bf16d17..b2f88617611a 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -92,6 +92,8 @@ static void nft_payload_eval(const struct nft_expr *expr,
offset = skb_network_offset(skb);
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
+ if (!pkt->tprot_set)
+ goto err;
offset = pkt->xt.thoff;
break;
default:
@@ -184,6 +186,8 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
offset = skb_network_offset(skb);
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
+ if (!pkt->tprot_set)
+ goto err;
offset = pkt->xt.thoff;
break;
default:
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index 61d216eb7917..393d359a1889 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -22,9 +22,10 @@
static u32 jhash_initval __read_mostly;
struct nft_queue {
- u16 queuenum;
- u16 queues_total;
- u16 flags;
+ enum nft_registers sreg_qnum:8;
+ u16 queuenum;
+ u16 queues_total;
+ u16 flags;
};
static void nft_queue_eval(const struct nft_expr *expr,
@@ -54,31 +55,78 @@ static void nft_queue_eval(const struct nft_expr *expr,
regs->verdict.code = ret;
}
+static void nft_queue_sreg_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_queue *priv = nft_expr_priv(expr);
+ u32 queue, ret;
+
+ queue = regs->data[priv->sreg_qnum];
+
+ ret = NF_QUEUE_NR(queue);
+ if (priv->flags & NFT_QUEUE_FLAG_BYPASS)
+ ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
+
+ regs->verdict.code = ret;
+}
+
static const struct nla_policy nft_queue_policy[NFTA_QUEUE_MAX + 1] = {
[NFTA_QUEUE_NUM] = { .type = NLA_U16 },
[NFTA_QUEUE_TOTAL] = { .type = NLA_U16 },
[NFTA_QUEUE_FLAGS] = { .type = NLA_U16 },
+ [NFTA_QUEUE_SREG_QNUM] = { .type = NLA_U32 },
};
static int nft_queue_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
{
struct nft_queue *priv = nft_expr_priv(expr);
+ u32 maxid;
- if (tb[NFTA_QUEUE_NUM] == NULL)
- return -EINVAL;
-
- init_hashrandom(&jhash_initval);
priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM]));
- if (tb[NFTA_QUEUE_TOTAL] != NULL)
+ if (tb[NFTA_QUEUE_TOTAL])
priv->queues_total = ntohs(nla_get_be16(tb[NFTA_QUEUE_TOTAL]));
- if (tb[NFTA_QUEUE_FLAGS] != NULL) {
+ else
+ priv->queues_total = 1;
+
+ if (priv->queues_total == 0)
+ return -EINVAL;
+
+ maxid = priv->queues_total - 1 + priv->queuenum;
+ if (maxid > U16_MAX)
+ return -ERANGE;
+
+ if (tb[NFTA_QUEUE_FLAGS]) {
+ priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS]));
+ if (priv->flags & ~NFT_QUEUE_FLAG_MASK)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int nft_queue_sreg_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_queue *priv = nft_expr_priv(expr);
+ int err;
+
+ priv->sreg_qnum = nft_parse_register(tb[NFTA_QUEUE_SREG_QNUM]);
+ err = nft_validate_register_load(priv->sreg_qnum, sizeof(u32));
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_QUEUE_FLAGS]) {
priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS]));
if (priv->flags & ~NFT_QUEUE_FLAG_MASK)
return -EINVAL;
+ if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT)
+ return -EOPNOTSUPP;
}
+
return 0;
}
@@ -97,6 +145,21 @@ nla_put_failure:
return -1;
}
+static int
+nft_queue_sreg_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_queue *priv = nft_expr_priv(expr);
+
+ if (nft_dump_register(skb, NFTA_QUEUE_SREG_QNUM, priv->sreg_qnum) ||
+ nla_put_be16(skb, NFTA_QUEUE_FLAGS, htons(priv->flags)))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
static struct nft_expr_type nft_queue_type;
static const struct nft_expr_ops nft_queue_ops = {
.type = &nft_queue_type,
@@ -106,9 +169,35 @@ static const struct nft_expr_ops nft_queue_ops = {
.dump = nft_queue_dump,
};
+static const struct nft_expr_ops nft_queue_sreg_ops = {
+ .type = &nft_queue_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_queue)),
+ .eval = nft_queue_sreg_eval,
+ .init = nft_queue_sreg_init,
+ .dump = nft_queue_sreg_dump,
+};
+
+static const struct nft_expr_ops *
+nft_queue_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+{
+ if (tb[NFTA_QUEUE_NUM] && tb[NFTA_QUEUE_SREG_QNUM])
+ return ERR_PTR(-EINVAL);
+
+ init_hashrandom(&jhash_initval);
+
+ if (tb[NFTA_QUEUE_NUM])
+ return &nft_queue_ops;
+
+ if (tb[NFTA_QUEUE_SREG_QNUM])
+ return &nft_queue_sreg_ops;
+
+ return ERR_PTR(-EINVAL);
+}
+
static struct nft_expr_type nft_queue_type __read_mostly = {
.name = "queue",
- .ops = &nft_queue_ops,
+ .select_ops = &nft_queue_select_ops,
.policy = nft_queue_policy,
.maxattr = NFTA_QUEUE_MAX,
.owner = THIS_MODULE,
diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
new file mode 100644
index 000000000000..c00104c07095
--- /dev/null
+++ b/net/netfilter/nft_quota.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_quota {
+ u64 quota;
+ bool invert;
+ atomic64_t remain;
+};
+
+static inline bool nft_overquota(struct nft_quota *priv,
+ const struct nft_pktinfo *pkt)
+{
+ return atomic64_sub_return(pkt->skb->len, &priv->remain) < 0;
+}
+
+static void nft_quota_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_quota *priv = nft_expr_priv(expr);
+
+ if (nft_overquota(priv, pkt) ^ priv->invert)
+ regs->verdict.code = NFT_BREAK;
+}
+
+static const struct nla_policy nft_quota_policy[NFTA_QUOTA_MAX + 1] = {
+ [NFTA_QUOTA_BYTES] = { .type = NLA_U64 },
+ [NFTA_QUOTA_FLAGS] = { .type = NLA_U32 },
+};
+
+static int nft_quota_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_quota *priv = nft_expr_priv(expr);
+ u32 flags = 0;
+ u64 quota;
+
+ if (!tb[NFTA_QUOTA_BYTES])
+ return -EINVAL;
+
+ quota = be64_to_cpu(nla_get_be64(tb[NFTA_QUOTA_BYTES]));
+ if (quota > S64_MAX)
+ return -EOVERFLOW;
+
+ if (tb[NFTA_QUOTA_FLAGS]) {
+ flags = ntohl(nla_get_be32(tb[NFTA_QUOTA_FLAGS]));
+ if (flags & ~NFT_QUOTA_F_INV)
+ return -EINVAL;
+ }
+
+ priv->quota = quota;
+ priv->invert = (flags & NFT_QUOTA_F_INV) ? true : false;
+ atomic64_set(&priv->remain, quota);
+
+ return 0;
+}
+
+static int nft_quota_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_quota *priv = nft_expr_priv(expr);
+ u32 flags = priv->invert ? NFT_QUOTA_F_INV : 0;
+
+ if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota),
+ NFTA_QUOTA_PAD) ||
+ nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_quota_type;
+static const struct nft_expr_ops nft_quota_ops = {
+ .type = &nft_quota_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_quota)),
+ .eval = nft_quota_eval,
+ .init = nft_quota_init,
+ .dump = nft_quota_dump,
+};
+
+static struct nft_expr_type nft_quota_type __read_mostly = {
+ .name = "quota",
+ .ops = &nft_quota_ops,
+ .policy = nft_quota_policy,
+ .maxattr = NFTA_QUOTA_MAX,
+ .flags = NFT_EXPR_STATEFUL,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_quota_module_init(void)
+{
+ return nft_register_expr(&nft_quota_type);
+}
+
+static void __exit nft_quota_module_exit(void)
+{
+ nft_unregister_expr(&nft_quota_type);
+}
+
+module_init(nft_quota_module_init);
+module_exit(nft_quota_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_EXPR("quota");
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
new file mode 100644
index 000000000000..c6d5358482d1
--- /dev/null
+++ b/net/netfilter/nft_range.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_range_expr {
+ struct nft_data data_from;
+ struct nft_data data_to;
+ enum nft_registers sreg:8;
+ u8 len;
+ enum nft_range_ops op:8;
+};
+
+static void nft_range_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_range_expr *priv = nft_expr_priv(expr);
+ bool mismatch;
+ int d1, d2;
+
+ d1 = memcmp(&regs->data[priv->sreg], &priv->data_from, priv->len);
+ d2 = memcmp(&regs->data[priv->sreg], &priv->data_to, priv->len);
+ switch (priv->op) {
+ case NFT_RANGE_EQ:
+ mismatch = (d1 < 0 || d2 > 0);
+ break;
+ case NFT_RANGE_NEQ:
+ mismatch = (d1 >= 0 && d2 <= 0);
+ break;
+ }
+
+ if (mismatch)
+ regs->verdict.code = NFT_BREAK;
+}
+
+static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = {
+ [NFTA_RANGE_SREG] = { .type = NLA_U32 },
+ [NFTA_RANGE_OP] = { .type = NLA_U32 },
+ [NFTA_RANGE_FROM_DATA] = { .type = NLA_NESTED },
+ [NFTA_RANGE_TO_DATA] = { .type = NLA_NESTED },
+};
+
+static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_range_expr *priv = nft_expr_priv(expr);
+ struct nft_data_desc desc_from, desc_to;
+ int err;
+
+ err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
+ &desc_from, tb[NFTA_RANGE_FROM_DATA]);
+ if (err < 0)
+ return err;
+
+ err = nft_data_init(NULL, &priv->data_to, sizeof(priv->data_to),
+ &desc_to, tb[NFTA_RANGE_TO_DATA]);
+ if (err < 0)
+ goto err1;
+
+ if (desc_from.len != desc_to.len) {
+ err = -EINVAL;
+ goto err2;
+ }
+
+ priv->sreg = nft_parse_register(tb[NFTA_RANGE_SREG]);
+ err = nft_validate_register_load(priv->sreg, desc_from.len);
+ if (err < 0)
+ goto err2;
+
+ priv->op = ntohl(nla_get_be32(tb[NFTA_RANGE_OP]));
+ priv->len = desc_from.len;
+ return 0;
+err2:
+ nft_data_uninit(&priv->data_to, desc_to.type);
+err1:
+ nft_data_uninit(&priv->data_from, desc_from.type);
+ return err;
+}
+
+static int nft_range_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_range_expr *priv = nft_expr_priv(expr);
+
+ if (nft_dump_register(skb, NFTA_RANGE_SREG, priv->sreg))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_RANGE_OP, htonl(priv->op)))
+ goto nla_put_failure;
+
+ if (nft_data_dump(skb, NFTA_RANGE_FROM_DATA, &priv->data_from,
+ NFT_DATA_VALUE, priv->len) < 0 ||
+ nft_data_dump(skb, NFTA_RANGE_TO_DATA, &priv->data_to,
+ NFT_DATA_VALUE, priv->len) < 0)
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_range_type;
+static const struct nft_expr_ops nft_range_ops = {
+ .type = &nft_range_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_range_expr)),
+ .eval = nft_range_eval,
+ .init = nft_range_init,
+ .dump = nft_range_dump,
+};
+
+static struct nft_expr_type nft_range_type __read_mostly = {
+ .name = "range",
+ .ops = &nft_range_ops,
+ .policy = nft_range_policy,
+ .maxattr = NFTA_RANGE_MAX,
+ .owner = THIS_MODULE,
+};
+
+int __init nft_range_module_init(void)
+{
+ return nft_register_expr(&nft_range_type);
+}
+
+void nft_range_module_exit(void)
+{
+ nft_unregister_expr(&nft_range_type);
+}
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
new file mode 100644
index 000000000000..3794cb2fc788
--- /dev/null
+++ b/net/netfilter/nft_set_hash.c
@@ -0,0 +1,404 @@
+/*
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/jhash.h>
+#include <linux/netlink.h>
+#include <linux/workqueue.h>
+#include <linux/rhashtable.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+/* We target a hash table size of 4, element hint is 75% of final size */
+#define NFT_HASH_ELEMENT_HINT 3
+
+struct nft_hash {
+ struct rhashtable ht;
+ struct delayed_work gc_work;
+};
+
+struct nft_hash_elem {
+ struct rhash_head node;
+ struct nft_set_ext ext;
+};
+
+struct nft_hash_cmp_arg {
+ const struct nft_set *set;
+ const u32 *key;
+ u8 genmask;
+};
+
+static const struct rhashtable_params nft_hash_params;
+
+static inline u32 nft_hash_key(const void *data, u32 len, u32 seed)
+{
+ const struct nft_hash_cmp_arg *arg = data;
+
+ return jhash(arg->key, len, seed);
+}
+
+static inline u32 nft_hash_obj(const void *data, u32 len, u32 seed)
+{
+ const struct nft_hash_elem *he = data;
+
+ return jhash(nft_set_ext_key(&he->ext), len, seed);
+}
+
+static inline int nft_hash_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const struct nft_hash_cmp_arg *x = arg->key;
+ const struct nft_hash_elem *he = ptr;
+
+ if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
+ return 1;
+ if (nft_set_elem_expired(&he->ext))
+ return 1;
+ if (!nft_set_elem_active(&he->ext, x->genmask))
+ return 1;
+ return 0;
+}
+
+static bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+{
+ struct nft_hash *priv = nft_set_priv(set);
+ const struct nft_hash_elem *he;
+ struct nft_hash_cmp_arg arg = {
+ .genmask = nft_genmask_cur(net),
+ .set = set,
+ .key = key,
+ };
+
+ he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
+ if (he != NULL)
+ *ext = &he->ext;
+
+ return !!he;
+}
+
+static bool nft_hash_update(struct nft_set *set, const u32 *key,
+ void *(*new)(struct nft_set *,
+ const struct nft_expr *,
+ struct nft_regs *regs),
+ const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_set_ext **ext)
+{
+ struct nft_hash *priv = nft_set_priv(set);
+ struct nft_hash_elem *he;
+ struct nft_hash_cmp_arg arg = {
+ .genmask = NFT_GENMASK_ANY,
+ .set = set,
+ .key = key,
+ };
+
+ he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
+ if (he != NULL)
+ goto out;
+
+ he = new(set, expr, regs);
+ if (he == NULL)
+ goto err1;
+ if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
+ nft_hash_params))
+ goto err2;
+out:
+ *ext = &he->ext;
+ return true;
+
+err2:
+ nft_set_elem_destroy(set, he);
+err1:
+ return false;
+}
+
+static int nft_hash_insert(const struct net *net, const struct nft_set *set,
+ const struct nft_set_elem *elem,
+ struct nft_set_ext **ext)
+{
+ struct nft_hash *priv = nft_set_priv(set);
+ struct nft_hash_elem *he = elem->priv;
+ struct nft_hash_cmp_arg arg = {
+ .genmask = nft_genmask_next(net),
+ .set = set,
+ .key = elem->key.val.data,
+ };
+ struct nft_hash_elem *prev;
+
+ prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
+ nft_hash_params);
+ if (IS_ERR(prev))
+ return PTR_ERR(prev);
+ if (prev) {
+ *ext = &prev->ext;
+ return -EEXIST;
+ }
+ return 0;
+}
+
+static void nft_hash_activate(const struct net *net, const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ struct nft_hash_elem *he = elem->priv;
+
+ nft_set_elem_change_active(net, set, &he->ext);
+ nft_set_elem_clear_busy(&he->ext);
+}
+
+static void *nft_hash_deactivate(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ struct nft_hash *priv = nft_set_priv(set);
+ struct nft_hash_elem *he;
+ struct nft_hash_cmp_arg arg = {
+ .genmask = nft_genmask_next(net),
+ .set = set,
+ .key = elem->key.val.data,
+ };
+
+ rcu_read_lock();
+ he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
+ if (he != NULL) {
+ if (!nft_set_elem_mark_busy(&he->ext) ||
+ !nft_is_active(net, &he->ext))
+ nft_set_elem_change_active(net, set, &he->ext);
+ else
+ he = NULL;
+ }
+ rcu_read_unlock();
+
+ return he;
+}
+
+static void nft_hash_remove(const struct nft_set *set,
+ const struct nft_set_elem *elem)
+{
+ struct nft_hash *priv = nft_set_priv(set);
+ struct nft_hash_elem *he = elem->priv;
+
+ rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
+}
+
+static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
+ struct nft_set_iter *iter)
+{
+ struct nft_hash *priv = nft_set_priv(set);
+ struct nft_hash_elem *he;
+ struct rhashtable_iter hti;
+ struct nft_set_elem elem;
+ int err;
+
+ err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
+ iter->err = err;
+ if (err)
+ return;
+
+ err = rhashtable_walk_start(&hti);
+ if (err && err != -EAGAIN) {
+ iter->err = err;
+ goto out;
+ }
+
+ while ((he = rhashtable_walk_next(&hti))) {
+ if (IS_ERR(he)) {
+ err = PTR_ERR(he);
+ if (err != -EAGAIN) {
+ iter->err = err;
+ goto out;
+ }
+
+ continue;
+ }
+
+ if (iter->count < iter->skip)
+ goto cont;
+ if (nft_set_elem_expired(&he->ext))
+ goto cont;
+ if (!nft_set_elem_active(&he->ext, iter->genmask))
+ goto cont;
+
+ elem.priv = he;
+
+ iter->err = iter->fn(ctx, set, iter, &elem);
+ if (iter->err < 0)
+ goto out;
+
+cont:
+ iter->count++;
+ }
+
+out:
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+}
+
+static void nft_hash_gc(struct work_struct *work)
+{
+ struct nft_set *set;
+ struct nft_hash_elem *he;
+ struct nft_hash *priv;
+ struct nft_set_gc_batch *gcb = NULL;
+ struct rhashtable_iter hti;
+ int err;
+
+ priv = container_of(work, struct nft_hash, gc_work.work);
+ set = nft_set_container_of(priv);
+
+ err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
+ if (err)
+ goto schedule;
+
+ err = rhashtable_walk_start(&hti);
+ if (err && err != -EAGAIN)
+ goto out;
+
+ while ((he = rhashtable_walk_next(&hti))) {
+ if (IS_ERR(he)) {
+ if (PTR_ERR(he) != -EAGAIN)
+ goto out;
+ continue;
+ }
+
+ if (!nft_set_elem_expired(&he->ext))
+ continue;
+ if (nft_set_elem_mark_busy(&he->ext))
+ continue;
+
+ gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
+ if (gcb == NULL)
+ goto out;
+ rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
+ atomic_dec(&set->nelems);
+ nft_set_gc_batch_add(gcb, he);
+ }
+out:
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+
+ nft_set_gc_batch_complete(gcb);
+schedule:
+ queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
+ nft_set_gc_interval(set));
+}
+
+static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
+{
+ return sizeof(struct nft_hash);
+}
+
+static const struct rhashtable_params nft_hash_params = {
+ .head_offset = offsetof(struct nft_hash_elem, node),
+ .hashfn = nft_hash_key,
+ .obj_hashfn = nft_hash_obj,
+ .obj_cmpfn = nft_hash_cmp,
+ .automatic_shrinking = true,
+};
+
+static int nft_hash_init(const struct nft_set *set,
+ const struct nft_set_desc *desc,
+ const struct nlattr * const tb[])
+{
+ struct nft_hash *priv = nft_set_priv(set);
+ struct rhashtable_params params = nft_hash_params;
+ int err;
+
+ params.nelem_hint = desc->size ?: NFT_HASH_ELEMENT_HINT;
+ params.key_len = set->klen;
+
+ err = rhashtable_init(&priv->ht, &params);
+ if (err < 0)
+ return err;
+
+ INIT_DEFERRABLE_WORK(&priv->gc_work, nft_hash_gc);
+ if (set->flags & NFT_SET_TIMEOUT)
+ queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
+ nft_set_gc_interval(set));
+ return 0;
+}
+
+static void nft_hash_elem_destroy(void *ptr, void *arg)
+{
+ nft_set_elem_destroy((const struct nft_set *)arg, ptr);
+}
+
+static void nft_hash_destroy(const struct nft_set *set)
+{
+ struct nft_hash *priv = nft_set_priv(set);
+
+ cancel_delayed_work_sync(&priv->gc_work);
+ rhashtable_free_and_destroy(&priv->ht, nft_hash_elem_destroy,
+ (void *)set);
+}
+
+static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
+ struct nft_set_estimate *est)
+{
+ unsigned int esize;
+
+ esize = sizeof(struct nft_hash_elem);
+ if (desc->size) {
+ est->size = sizeof(struct nft_hash) +
+ roundup_pow_of_two(desc->size * 4 / 3) *
+ sizeof(struct nft_hash_elem *) +
+ desc->size * esize;
+ } else {
+ /* Resizing happens when the load drops below 30% or goes
+ * above 75%. The average of 52.5% load (approximated by 50%)
+ * is used for the size estimation of the hash buckets,
+ * meaning we calculate two buckets per element.
+ */
+ est->size = esize + 2 * sizeof(struct nft_hash_elem *);
+ }
+
+ est->class = NFT_SET_CLASS_O_1;
+
+ return true;
+}
+
+static struct nft_set_ops nft_hash_ops __read_mostly = {
+ .privsize = nft_hash_privsize,
+ .elemsize = offsetof(struct nft_hash_elem, ext),
+ .estimate = nft_hash_estimate,
+ .init = nft_hash_init,
+ .destroy = nft_hash_destroy,
+ .insert = nft_hash_insert,
+ .activate = nft_hash_activate,
+ .deactivate = nft_hash_deactivate,
+ .remove = nft_hash_remove,
+ .lookup = nft_hash_lookup,
+ .update = nft_hash_update,
+ .walk = nft_hash_walk,
+ .features = NFT_SET_MAP | NFT_SET_TIMEOUT,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_hash_module_init(void)
+{
+ return nft_register_set(&nft_hash_ops);
+}
+
+static void __exit nft_hash_module_exit(void)
+{
+ nft_unregister_set(&nft_hash_ops);
+}
+
+module_init(nft_hash_module_init);
+module_exit(nft_hash_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_set_rbtree.c
index ffe9ae062d23..38b5bda242f8 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -96,7 +96,8 @@ out:
}
static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
- struct nft_rbtree_elem *new)
+ struct nft_rbtree_elem *new,
+ struct nft_set_ext **ext)
{
struct nft_rbtree *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net);
@@ -124,8 +125,10 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
else if (!nft_rbtree_interval_end(rbe) &&
nft_rbtree_interval_end(new))
p = &parent->rb_right;
- else
+ else {
+ *ext = &rbe->ext;
return -EEXIST;
+ }
}
}
}
@@ -135,13 +138,14 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
}
static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
- const struct nft_set_elem *elem)
+ const struct nft_set_elem *elem,
+ struct nft_set_ext **ext)
{
struct nft_rbtree_elem *rbe = elem->priv;
int err;
spin_lock_bh(&nft_rbtree_lock);
- err = __nft_rbtree_insert(net, set, rbe);
+ err = __nft_rbtree_insert(net, set, rbe, ext);
spin_unlock_bh(&nft_rbtree_lock);
return err;
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 515131f9e021..dbd6c4a12b97 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -24,7 +24,6 @@ static DEFINE_MUTEX(xt_rateest_mutex);
#define RATEEST_HSIZE 16
static struct hlist_head rateest_hash[RATEEST_HSIZE] __read_mostly;
static unsigned int jhash_rnd __read_mostly;
-static bool rnd_inited __read_mostly;
static unsigned int xt_rateest_hash(const char *name)
{
@@ -99,10 +98,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
} cfg;
int ret;
- if (unlikely(!rnd_inited)) {
- get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
- rnd_inited = true;
- }
+ net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
est = xt_rateest_lookup(info->name);
if (est) {
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index e118397254af..872db2d0e2a9 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -110,18 +110,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
struct net *net = par->net;
unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
+ unsigned int min_mtu = min(dst_mtu(skb_dst(skb)), in_mtu);
- if (dst_mtu(skb_dst(skb)) <= minlen) {
+ if (min_mtu <= minlen) {
net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
- dst_mtu(skb_dst(skb)));
+ min_mtu);
return -1;
}
- if (in_mtu <= minlen) {
- net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
- in_mtu);
- return -1;
- }
- newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen;
+ newmss = min_mtu - minlen;
} else
newmss = info->mss;
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 6e57a3966dc5..0471db4032c5 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -89,6 +89,8 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
return -EINVAL;
if (info->oif[0]) {
+ int ret;
+
if (info->oif[sizeof(info->oif)-1] != '\0')
return -EINVAL;
@@ -101,7 +103,11 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
priv->notifier.notifier_call = tee_netdev_event;
info->priv = priv;
- register_netdevice_notifier(&priv->notifier);
+ ret = register_netdevice_notifier(&priv->notifier);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
} else
info->priv = NULL;
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 99bbc829868d..b6dc322593a3 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -366,14 +366,8 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par)
unsigned int i;
int ret;
- if (unlikely(!connlimit_rnd)) {
- u_int32_t rand;
+ net_get_random_once(&connlimit_rnd, sizeof(connlimit_rnd));
- do {
- get_random_bytes(&rand, sizeof(rand));
- } while (!rand);
- cmpxchg(&connlimit_rnd, 0, rand);
- }
ret = nf_ct_l3proto_try_module_get(par->family);
if (ret < 0) {
pr_info("cannot load conntrack support for "
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 188404b9b002..a3b8f697cfc5 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -233,10 +233,8 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
return false;
if (info->match_flags & XT_CONNTRACK_EXPIRES) {
- unsigned long expires = 0;
+ unsigned long expires = nf_ct_expires(ct) / HZ;
- if (timer_pending(&ct->timeout))
- expires = (ct->timeout.expires - jiffies) / HZ;
if ((expires >= info->expires_min &&
expires <= info->expires_max) ^
!(info->invert_flags & XT_CONNTRACK_EXPIRES))
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 178696852bde..2fab0c65aa94 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -56,6 +56,7 @@ static inline struct hashlimit_net *hashlimit_pernet(struct net *net)
}
/* need to declare this at the top */
+static const struct file_operations dl_file_ops_v1;
static const struct file_operations dl_file_ops;
/* hash table crap */
@@ -86,8 +87,8 @@ struct dsthash_ent {
unsigned long expires; /* precalculated expiry time */
struct {
unsigned long prev; /* last modification */
- u_int32_t credit;
- u_int32_t credit_cap, cost;
+ u_int64_t credit;
+ u_int64_t credit_cap, cost;
} rateinfo;
struct rcu_head rcu;
};
@@ -98,7 +99,7 @@ struct xt_hashlimit_htable {
u_int8_t family;
bool rnd_initialized;
- struct hashlimit_cfg1 cfg; /* config */
+ struct hashlimit_cfg2 cfg; /* config */
/* used internally */
spinlock_t lock; /* lock for list_head */
@@ -114,6 +115,30 @@ struct xt_hashlimit_htable {
struct hlist_head hash[0]; /* hashtable itself */
};
+static int
+cfg_copy(struct hashlimit_cfg2 *to, void *from, int revision)
+{
+ if (revision == 1) {
+ struct hashlimit_cfg1 *cfg = (struct hashlimit_cfg1 *)from;
+
+ to->mode = cfg->mode;
+ to->avg = cfg->avg;
+ to->burst = cfg->burst;
+ to->size = cfg->size;
+ to->max = cfg->max;
+ to->gc_interval = cfg->gc_interval;
+ to->expire = cfg->expire;
+ to->srcmask = cfg->srcmask;
+ to->dstmask = cfg->dstmask;
+ } else if (revision == 2) {
+ memcpy(to, from, sizeof(struct hashlimit_cfg2));
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */
static struct kmem_cache *hashlimit_cachep __read_mostly;
@@ -215,16 +240,18 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
}
static void htable_gc(struct work_struct *work);
-static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
- u_int8_t family)
+static int htable_create(struct net *net, struct hashlimit_cfg2 *cfg,
+ const char *name, u_int8_t family,
+ struct xt_hashlimit_htable **out_hinfo,
+ int revision)
{
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
struct xt_hashlimit_htable *hinfo;
- unsigned int size;
- unsigned int i;
+ unsigned int size, i;
+ int ret;
- if (minfo->cfg.size) {
- size = minfo->cfg.size;
+ if (cfg->size) {
+ size = cfg->size;
} else {
size = (totalram_pages << PAGE_SHIFT) / 16384 /
sizeof(struct list_head);
@@ -238,10 +265,14 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
sizeof(struct list_head) * size);
if (hinfo == NULL)
return -ENOMEM;
- minfo->hinfo = hinfo;
+ *out_hinfo = hinfo;
/* copy match config into hashtable config */
- memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg));
+ ret = cfg_copy(&hinfo->cfg, (void *)cfg, 2);
+
+ if (ret)
+ return ret;
+
hinfo->cfg.size = size;
if (hinfo->cfg.max == 0)
hinfo->cfg.max = 8 * hinfo->cfg.size;
@@ -255,17 +286,18 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
hinfo->count = 0;
hinfo->family = family;
hinfo->rnd_initialized = false;
- hinfo->name = kstrdup(minfo->name, GFP_KERNEL);
+ hinfo->name = kstrdup(name, GFP_KERNEL);
if (!hinfo->name) {
vfree(hinfo);
return -ENOMEM;
}
spin_lock_init(&hinfo->lock);
- hinfo->pde = proc_create_data(minfo->name, 0,
+ hinfo->pde = proc_create_data(name, 0,
(family == NFPROTO_IPV4) ?
hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
- &dl_file_ops, hinfo);
+ (revision == 1) ? &dl_file_ops_v1 : &dl_file_ops,
+ hinfo);
if (hinfo->pde == NULL) {
kfree(hinfo->name);
vfree(hinfo);
@@ -398,7 +430,8 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
(slowest userspace tool allows), which means
CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
*/
-#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
+#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24))
+#define MAX_CPJ (0xFFFFFFFFFFFFFFFF / (HZ*60*60*24))
/* Repeated shift and or gives us all 1s, final shift and add 1 gives
* us the power of 2 below the theoretical max, so GCC simply does a
@@ -408,9 +441,12 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
+#define _POW2_BELOW64(x) (_POW2_BELOW32(x)|_POW2_BELOW32((x)>>32))
#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
+#define POW2_BELOW64(x) ((_POW2_BELOW64(x)>>1) + 1)
-#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
+#define CREDITS_PER_JIFFY POW2_BELOW64(MAX_CPJ)
+#define CREDITS_PER_JIFFY_v1 POW2_BELOW32(MAX_CPJ_v1)
/* in byte mode, the lowest possible rate is one packet/second.
* credit_cap is used as a counter that tells us how many times we can
@@ -425,14 +461,25 @@ static u32 xt_hashlimit_len_to_chunks(u32 len)
}
/* Precision saver. */
-static u32 user2credits(u32 user)
+static u64 user2credits(u64 user, int revision)
{
- /* If multiplying would overflow... */
- if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
- /* Divide first. */
- return (user / XT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
+ if (revision == 1) {
+ /* If multiplying would overflow... */
+ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY_v1))
+ /* Divide first. */
+ return div64_u64(user, XT_HASHLIMIT_SCALE)
+ * HZ * CREDITS_PER_JIFFY_v1;
+
+ return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1,
+ XT_HASHLIMIT_SCALE);
+ } else {
+ if (user > 0xFFFFFFFFFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+ return div64_u64(user, XT_HASHLIMIT_SCALE_v2)
+ * HZ * CREDITS_PER_JIFFY;
- return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
+ return div64_u64(user * HZ * CREDITS_PER_JIFFY,
+ XT_HASHLIMIT_SCALE_v2);
+ }
}
static u32 user2credits_byte(u32 user)
@@ -442,10 +489,11 @@ static u32 user2credits_byte(u32 user)
return (u32) (us >> 32);
}
-static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode)
+static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now,
+ u32 mode, int revision)
{
unsigned long delta = now - dh->rateinfo.prev;
- u32 cap;
+ u64 cap, cpj;
if (delta == 0)
return;
@@ -453,7 +501,7 @@ static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode)
dh->rateinfo.prev = now;
if (mode & XT_HASHLIMIT_BYTES) {
- u32 tmp = dh->rateinfo.credit;
+ u64 tmp = dh->rateinfo.credit;
dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta;
cap = CREDITS_PER_JIFFY_BYTES * HZ;
if (tmp >= dh->rateinfo.credit) {/* overflow */
@@ -461,7 +509,9 @@ static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode)
return;
}
} else {
- dh->rateinfo.credit += delta * CREDITS_PER_JIFFY;
+ cpj = (revision == 1) ?
+ CREDITS_PER_JIFFY_v1 : CREDITS_PER_JIFFY;
+ dh->rateinfo.credit += delta * cpj;
cap = dh->rateinfo.credit_cap;
}
if (dh->rateinfo.credit > cap)
@@ -469,7 +519,7 @@ static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode)
}
static void rateinfo_init(struct dsthash_ent *dh,
- struct xt_hashlimit_htable *hinfo)
+ struct xt_hashlimit_htable *hinfo, int revision)
{
dh->rateinfo.prev = jiffies;
if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) {
@@ -478,8 +528,8 @@ static void rateinfo_init(struct dsthash_ent *dh,
dh->rateinfo.credit_cap = hinfo->cfg.burst;
} else {
dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
- hinfo->cfg.burst);
- dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
+ hinfo->cfg.burst, revision);
+ dh->rateinfo.cost = user2credits(hinfo->cfg.avg, revision);
dh->rateinfo.credit_cap = dh->rateinfo.credit;
}
}
@@ -603,15 +653,15 @@ static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh)
}
static bool
-hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
+hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par,
+ struct xt_hashlimit_htable *hinfo,
+ const struct hashlimit_cfg2 *cfg, int revision)
{
- const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
- struct xt_hashlimit_htable *hinfo = info->hinfo;
unsigned long now = jiffies;
struct dsthash_ent *dh;
struct dsthash_dst dst;
bool race = false;
- u32 cost;
+ u64 cost;
if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
goto hotdrop;
@@ -626,18 +676,18 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
} else if (race) {
/* Already got an entry, update expiration timeout */
dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
- rateinfo_recalc(dh, now, hinfo->cfg.mode);
+ rateinfo_recalc(dh, now, hinfo->cfg.mode, revision);
} else {
dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
- rateinfo_init(dh, hinfo);
+ rateinfo_init(dh, hinfo, revision);
}
} else {
/* update expiration timeout */
dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
- rateinfo_recalc(dh, now, hinfo->cfg.mode);
+ rateinfo_recalc(dh, now, hinfo->cfg.mode, revision);
}
- if (info->cfg.mode & XT_HASHLIMIT_BYTES)
+ if (cfg->mode & XT_HASHLIMIT_BYTES)
cost = hashlimit_byte_cost(skb->len, dh);
else
cost = dh->rateinfo.cost;
@@ -647,84 +697,157 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
dh->rateinfo.credit -= cost;
spin_unlock(&dh->lock);
rcu_read_unlock_bh();
- return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
+ return !(cfg->mode & XT_HASHLIMIT_INVERT);
}
spin_unlock(&dh->lock);
rcu_read_unlock_bh();
/* default match is underlimit - so over the limit, we need to invert */
- return info->cfg.mode & XT_HASHLIMIT_INVERT;
+ return cfg->mode & XT_HASHLIMIT_INVERT;
hotdrop:
par->hotdrop = true;
return false;
}
-static int hashlimit_mt_check(const struct xt_mtchk_param *par)
+static bool
+hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
+ struct xt_hashlimit_htable *hinfo = info->hinfo;
+ struct hashlimit_cfg2 cfg = {};
+ int ret;
+
+ ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
+
+ if (ret)
+ return ret;
+
+ return hashlimit_mt_common(skb, par, hinfo, &cfg, 1);
+}
+
+static bool
+hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_hashlimit_mtinfo2 *info = par->matchinfo;
+ struct xt_hashlimit_htable *hinfo = info->hinfo;
+
+ return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 2);
+}
+
+static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
+ struct xt_hashlimit_htable **hinfo,
+ struct hashlimit_cfg2 *cfg,
+ const char *name, int revision)
{
struct net *net = par->net;
- struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
int ret;
- if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
- return -EINVAL;
- if (info->name[sizeof(info->name)-1] != '\0')
+ if (cfg->gc_interval == 0 || cfg->expire == 0)
return -EINVAL;
if (par->family == NFPROTO_IPV4) {
- if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32)
+ if (cfg->srcmask > 32 || cfg->dstmask > 32)
return -EINVAL;
} else {
- if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128)
+ if (cfg->srcmask > 128 || cfg->dstmask > 128)
return -EINVAL;
}
- if (info->cfg.mode & ~XT_HASHLIMIT_ALL) {
+ if (cfg->mode & ~XT_HASHLIMIT_ALL) {
pr_info("Unknown mode mask %X, kernel too old?\n",
- info->cfg.mode);
+ cfg->mode);
return -EINVAL;
}
/* Check for overflow. */
- if (info->cfg.mode & XT_HASHLIMIT_BYTES) {
- if (user2credits_byte(info->cfg.avg) == 0) {
- pr_info("overflow, rate too high: %u\n", info->cfg.avg);
+ if (cfg->mode & XT_HASHLIMIT_BYTES) {
+ if (user2credits_byte(cfg->avg) == 0) {
+ pr_info("overflow, rate too high: %llu\n", cfg->avg);
return -EINVAL;
}
- } else if (info->cfg.burst == 0 ||
- user2credits(info->cfg.avg * info->cfg.burst) <
- user2credits(info->cfg.avg)) {
- pr_info("overflow, try lower: %u/%u\n",
- info->cfg.avg, info->cfg.burst);
+ } else if (cfg->burst == 0 ||
+ user2credits(cfg->avg * cfg->burst, revision) <
+ user2credits(cfg->avg, revision)) {
+ pr_info("overflow, try lower: %llu/%llu\n",
+ cfg->avg, cfg->burst);
return -ERANGE;
}
mutex_lock(&hashlimit_mutex);
- info->hinfo = htable_find_get(net, info->name, par->family);
- if (info->hinfo == NULL) {
- ret = htable_create(net, info, par->family);
+ *hinfo = htable_find_get(net, name, par->family);
+ if (*hinfo == NULL) {
+ ret = htable_create(net, cfg, name, par->family,
+ hinfo, revision);
if (ret < 0) {
mutex_unlock(&hashlimit_mutex);
return ret;
}
}
mutex_unlock(&hashlimit_mutex);
+
return 0;
}
-static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
+static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
+{
+ struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
+ struct hashlimit_cfg2 cfg = {};
+ int ret;
+
+ if (info->name[sizeof(info->name) - 1] != '\0')
+ return -EINVAL;
+
+ ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
+
+ if (ret)
+ return ret;
+
+ return hashlimit_mt_check_common(par, &info->hinfo,
+ &cfg, info->name, 1);
+}
+
+static int hashlimit_mt_check(const struct xt_mtchk_param *par)
+{
+ struct xt_hashlimit_mtinfo2 *info = par->matchinfo;
+
+ if (info->name[sizeof(info->name) - 1] != '\0')
+ return -EINVAL;
+
+ return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg,
+ info->name, 2);
+}
+
+static void hashlimit_mt_destroy_v1(const struct xt_mtdtor_param *par)
{
const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
htable_put(info->hinfo);
}
+static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
+{
+ const struct xt_hashlimit_mtinfo2 *info = par->matchinfo;
+
+ htable_put(info->hinfo);
+}
+
static struct xt_match hashlimit_mt_reg[] __read_mostly = {
{
.name = "hashlimit",
.revision = 1,
.family = NFPROTO_IPV4,
- .match = hashlimit_mt,
+ .match = hashlimit_mt_v1,
.matchsize = sizeof(struct xt_hashlimit_mtinfo1),
+ .checkentry = hashlimit_mt_check_v1,
+ .destroy = hashlimit_mt_destroy_v1,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "hashlimit",
+ .revision = 2,
+ .family = NFPROTO_IPV4,
+ .match = hashlimit_mt,
+ .matchsize = sizeof(struct xt_hashlimit_mtinfo2),
.checkentry = hashlimit_mt_check,
.destroy = hashlimit_mt_destroy,
.me = THIS_MODULE,
@@ -734,8 +857,18 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
.name = "hashlimit",
.revision = 1,
.family = NFPROTO_IPV6,
- .match = hashlimit_mt,
+ .match = hashlimit_mt_v1,
.matchsize = sizeof(struct xt_hashlimit_mtinfo1),
+ .checkentry = hashlimit_mt_check_v1,
+ .destroy = hashlimit_mt_destroy_v1,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "hashlimit",
+ .revision = 2,
+ .family = NFPROTO_IPV6,
+ .match = hashlimit_mt,
+ .matchsize = sizeof(struct xt_hashlimit_mtinfo2),
.checkentry = hashlimit_mt_check,
.destroy = hashlimit_mt_destroy,
.me = THIS_MODULE,
@@ -786,18 +919,12 @@ static void dl_seq_stop(struct seq_file *s, void *v)
spin_unlock_bh(&htable->lock);
}
-static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
- struct seq_file *s)
+static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family,
+ struct seq_file *s)
{
- const struct xt_hashlimit_htable *ht = s->private;
-
- spin_lock(&ent->lock);
- /* recalculate to show accurate numbers */
- rateinfo_recalc(ent, jiffies, ht->cfg.mode);
-
switch (family) {
case NFPROTO_IPV4:
- seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n",
+ seq_printf(s, "%ld %pI4:%u->%pI4:%u %llu %llu %llu\n",
(long)(ent->expires - jiffies)/HZ,
&ent->dst.ip.src,
ntohs(ent->dst.src_port),
@@ -808,7 +935,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
break;
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
case NFPROTO_IPV6:
- seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
+ seq_printf(s, "%ld %pI6:%u->%pI6:%u %llu %llu %llu\n",
(long)(ent->expires - jiffies)/HZ,
&ent->dst.ip6.src,
ntohs(ent->dst.src_port),
@@ -821,10 +948,52 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
default:
BUG();
}
+}
+
+static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
+ struct seq_file *s)
+{
+ const struct xt_hashlimit_htable *ht = s->private;
+
+ spin_lock(&ent->lock);
+ /* recalculate to show accurate numbers */
+ rateinfo_recalc(ent, jiffies, ht->cfg.mode, 1);
+
+ dl_seq_print(ent, family, s);
+
+ spin_unlock(&ent->lock);
+ return seq_has_overflowed(s);
+}
+
+static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
+ struct seq_file *s)
+{
+ const struct xt_hashlimit_htable *ht = s->private;
+
+ spin_lock(&ent->lock);
+ /* recalculate to show accurate numbers */
+ rateinfo_recalc(ent, jiffies, ht->cfg.mode, 2);
+
+ dl_seq_print(ent, family, s);
+
spin_unlock(&ent->lock);
return seq_has_overflowed(s);
}
+static int dl_seq_show_v1(struct seq_file *s, void *v)
+{
+ struct xt_hashlimit_htable *htable = s->private;
+ unsigned int *bucket = (unsigned int *)v;
+ struct dsthash_ent *ent;
+
+ if (!hlist_empty(&htable->hash[*bucket])) {
+ hlist_for_each_entry(ent, &htable->hash[*bucket], node)
+ if (dl_seq_real_show_v1(ent, htable->family, s))
+ return -1;
+ }
+ return 0;
+}
+
static int dl_seq_show(struct seq_file *s, void *v)
{
struct xt_hashlimit_htable *htable = s->private;
@@ -839,6 +1008,13 @@ static int dl_seq_show(struct seq_file *s, void *v)
return 0;
}
+static const struct seq_operations dl_seq_ops_v1 = {
+ .start = dl_seq_start,
+ .next = dl_seq_next,
+ .stop = dl_seq_stop,
+ .show = dl_seq_show_v1
+};
+
static const struct seq_operations dl_seq_ops = {
.start = dl_seq_start,
.next = dl_seq_next,
@@ -846,17 +1022,37 @@ static const struct seq_operations dl_seq_ops = {
.show = dl_seq_show
};
+static int dl_proc_open_v1(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &dl_seq_ops_v1);
+
+ if (!ret) {
+ struct seq_file *sf = file->private_data;
+ sf->private = PDE_DATA(inode);
+ }
+ return ret;
+}
+
static int dl_proc_open(struct inode *inode, struct file *file)
{
int ret = seq_open(file, &dl_seq_ops);
if (!ret) {
struct seq_file *sf = file->private_data;
+
sf->private = PDE_DATA(inode);
}
return ret;
}
+static const struct file_operations dl_file_ops_v1 = {
+ .owner = THIS_MODULE,
+ .open = dl_proc_open_v1,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
static const struct file_operations dl_file_ops = {
.owner = THIS_MODULE,
.open = dl_proc_open,
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c
index 9f4ab00c8050..f679dd4c272a 100644
--- a/net/netfilter/xt_helper.c
+++ b/net/netfilter/xt_helper.c
@@ -41,7 +41,7 @@ helper_mt(const struct sk_buff *skb, struct xt_action_param *par)
if (!master_help)
return ret;
- /* rcu_read_lock()ed by nf_hook_slow */
+ /* rcu_read_lock()ed by nf_hook_thresh */
helper = rcu_dereference(master_help->helper);
if (!helper)
return ret;
@@ -65,7 +65,7 @@ static int helper_mt_check(const struct xt_mtchk_param *par)
par->family);
return ret;
}
- info->name[29] = '\0';
+ info->name[sizeof(info->name) - 1] = '\0';
return 0;
}
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index e5f18988aee0..bb33598e4530 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -107,8 +107,8 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
info->invert & XT_PHYSDEV_OP_BRIDGED) &&
par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
- pr_info("using --physdev-out and --physdev-is-out are only"
- "supported in the FORWARD and POSTROUTING chains with"
+ pr_info("using --physdev-out and --physdev-is-out are only "
+ "supported in the FORWARD and POSTROUTING chains with "
"bridged traffic.\n");
if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
return -EINVAL;
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index d725a27743a1..e3b7a09b103e 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -110,7 +110,6 @@ static const struct file_operations recent_old_fops, recent_mt_fops;
#endif
static u_int32_t hash_rnd __read_mostly;
-static bool hash_rnd_inited __read_mostly;
static inline unsigned int recent_entry_hash4(const union nf_inet_addr *addr)
{
@@ -340,10 +339,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
int ret = -EINVAL;
size_t sz;
- if (unlikely(!hash_rnd_inited)) {
- get_random_bytes(&hash_rnd, sizeof(hash_rnd));
- hash_rnd_inited = true;
- }
+ net_get_random_once(&hash_rnd, sizeof(hash_rnd));
+
if (info->check_set & ~XT_RECENT_VALID_FLAGS) {
pr_info("Unsupported user space flags (%08x)\n",
info->check_set);
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index ef36a56a02c6..4dedb96d1a06 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -68,7 +68,7 @@ match_packet(const struct sk_buff *skb,
++i, offset, sch->type, htons(sch->length),
sch->flags);
#endif
- offset += WORD_ROUND(ntohs(sch->length));
+ offset += SCTP_PAD4(ntohs(sch->length));
pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index 8dd836a8dd60..b2f0e986a6f4 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -63,43 +63,74 @@ out_nlmsg_trim:
static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
int protocol, int s_num)
{
+ struct rhashtable_iter *hti = (void *)cb->args[2];
struct netlink_table *tbl = &nl_table[protocol];
- struct rhashtable *ht = &tbl->hash;
- const struct bucket_table *htbl = rht_dereference_rcu(ht->tbl, ht);
struct net *net = sock_net(skb->sk);
struct netlink_diag_req *req;
struct netlink_sock *nlsk;
struct sock *sk;
- int ret = 0, num = 0, i;
+ int num = 2;
+ int ret = 0;
req = nlmsg_data(cb->nlh);
- for (i = 0; i < htbl->size; i++) {
- struct rhash_head *pos;
+ if (s_num > 1)
+ goto mc_list;
- rht_for_each_entry_rcu(nlsk, pos, htbl, i, node) {
- sk = (struct sock *)nlsk;
+ num--;
- if (!net_eq(sock_net(sk), net))
- continue;
- if (num < s_num) {
- num++;
+ if (!hti) {
+ hti = kmalloc(sizeof(*hti), GFP_KERNEL);
+ if (!hti)
+ return -ENOMEM;
+
+ cb->args[2] = (long)hti;
+ }
+
+ if (!s_num)
+ rhashtable_walk_enter(&tbl->hash, hti);
+
+ ret = rhashtable_walk_start(hti);
+ if (ret == -EAGAIN)
+ ret = 0;
+ if (ret)
+ goto stop;
+
+ while ((nlsk = rhashtable_walk_next(hti))) {
+ if (IS_ERR(nlsk)) {
+ ret = PTR_ERR(nlsk);
+ if (ret == -EAGAIN) {
+ ret = 0;
continue;
}
+ break;
+ }
- if (sk_diag_fill(sk, skb, req,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI,
- sock_i_ino(sk)) < 0) {
- ret = 1;
- goto done;
- }
+ sk = (struct sock *)nlsk;
- num++;
+ if (!net_eq(sock_net(sk), net))
+ continue;
+
+ if (sk_diag_fill(sk, skb, req,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI,
+ sock_i_ino(sk)) < 0) {
+ ret = 1;
+ break;
}
}
+stop:
+ rhashtable_walk_stop(hti);
+ if (ret)
+ goto done;
+
+ rhashtable_walk_exit(hti);
+ num++;
+
+mc_list:
+ read_lock(&nl_table_lock);
sk_for_each_bound(sk, &tbl->mc_list) {
if (sk_hashed(sk))
continue;
@@ -116,13 +147,14 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
NLM_F_MULTI,
sock_i_ino(sk)) < 0) {
ret = 1;
- goto done;
+ break;
}
num++;
}
+ read_unlock(&nl_table_lock);
+
done:
cb->args[0] = num;
- cb->args[1] = protocol;
return ret;
}
@@ -131,20 +163,20 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct netlink_diag_req *req;
int s_num = cb->args[0];
+ int err = 0;
req = nlmsg_data(cb->nlh);
- rcu_read_lock();
- read_lock(&nl_table_lock);
-
if (req->sdiag_protocol == NDIAG_PROTO_ALL) {
int i;
for (i = cb->args[1]; i < MAX_LINKS; i++) {
- if (__netlink_diag_dump(skb, cb, i, s_num))
+ err = __netlink_diag_dump(skb, cb, i, s_num);
+ if (err)
break;
s_num = 0;
}
+ cb->args[1] = i;
} else {
if (req->sdiag_protocol >= MAX_LINKS) {
read_unlock(&nl_table_lock);
@@ -152,13 +184,22 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
return -ENOENT;
}
- __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
+ err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
}
- read_unlock(&nl_table_lock);
- rcu_read_unlock();
+ return err < 0 ? err : skb->len;
+}
+
+static int netlink_diag_dump_done(struct netlink_callback *cb)
+{
+ struct rhashtable_iter *hti = (void *)cb->args[2];
+
+ if (cb->args[0] == 1)
+ rhashtable_walk_exit(hti);
- return skb->len;
+ kfree(hti);
+
+ return 0;
}
static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
@@ -172,6 +213,7 @@ static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
if (h->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = netlink_diag_dump,
+ .done = netlink_diag_dump_done,
};
return netlink_dump_start(net->diag_nlsk, skb, h, &c);
} else
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index a09132a69869..23cc12639ba7 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -977,7 +977,7 @@ static int genl_ctrl_event(int event, struct genl_family *family,
return 0;
}
-static struct genl_ops genl_ctrl_ops[] = {
+static const struct genl_ops genl_ctrl_ops[] = {
{
.cmd = CTRL_CMD_GETFAMILY,
.doit = ctrl_getfamily,
@@ -986,7 +986,7 @@ static struct genl_ops genl_ctrl_ops[] = {
},
};
-static struct genl_multicast_group genl_ctrl_groups[] = {
+static const struct genl_multicast_group genl_ctrl_groups[] = {
{ .name = "notify", },
};
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 1ecbd7715f6d..4e03f64709bc 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -71,6 +71,8 @@ struct ovs_frag_data {
static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
#define DEFERRED_ACTION_FIFO_SIZE 10
+#define OVS_RECURSION_LIMIT 5
+#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
struct action_fifo {
int head;
int tail;
@@ -78,7 +80,12 @@ struct action_fifo {
struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
};
+struct recirc_keys {
+ struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
+};
+
static struct action_fifo __percpu *action_fifos;
+static struct recirc_keys __percpu *recirc_keys;
static DEFINE_PER_CPU(int, exec_actions_level);
static void action_fifo_init(struct action_fifo *fifo)
@@ -153,7 +160,7 @@ static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_mpls *mpls)
{
- __be32 *new_mpls_lse;
+ struct mpls_shim_hdr *new_mpls_lse;
/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
if (skb->encapsulation)
@@ -162,19 +169,23 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
if (skb_cow_head(skb, MPLS_HLEN) < 0)
return -ENOMEM;
+ if (!skb->inner_protocol) {
+ skb_set_inner_network_header(skb, skb->mac_len);
+ skb_set_inner_protocol(skb, skb->protocol);
+ }
+
skb_push(skb, MPLS_HLEN);
memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
skb->mac_len);
skb_reset_mac_header(skb);
+ skb_set_network_header(skb, skb->mac_len);
- new_mpls_lse = (__be32 *)skb_mpls_header(skb);
- *new_mpls_lse = mpls->mpls_lse;
+ new_mpls_lse = mpls_hdr(skb);
+ new_mpls_lse->label_stack_entry = mpls->mpls_lse;
skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
- if (!skb->inner_protocol)
- skb_set_inner_protocol(skb, skb->protocol);
skb->protocol = mpls->mpls_ethertype;
invalidate_flow_key(key);
@@ -191,18 +202,19 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
if (unlikely(err))
return err;
- skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
+ skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
skb->mac_len);
__skb_pull(skb, MPLS_HLEN);
skb_reset_mac_header(skb);
+ skb_set_network_header(skb, skb->mac_len);
- /* skb_mpls_header() is used to locate the ethertype
- * field correctly in the presence of VLAN tags.
+ /* mpls_hdr() is used to locate the ethertype field correctly in the
+ * presence of VLAN tags.
*/
- hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
+ hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
update_ethertype(skb, hdr, ethertype);
if (eth_p_mpls(skb->protocol))
skb->protocol = ethertype;
@@ -214,7 +226,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
const __be32 *mpls_lse, const __be32 *mask)
{
- __be32 *stack;
+ struct mpls_shim_hdr *stack;
__be32 lse;
int err;
@@ -222,16 +234,16 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
if (unlikely(err))
return err;
- stack = (__be32 *)skb_mpls_header(skb);
- lse = OVS_MASKED(*stack, *mpls_lse, *mask);
+ stack = mpls_hdr(skb);
+ lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
if (skb->ip_summed == CHECKSUM_COMPLETE) {
- __be32 diff[] = { ~(*stack), lse };
+ __be32 diff[] = { ~(stack->label_stack_entry), lse };
skb->csum = ~csum_partial((char *)diff, sizeof(diff),
~skb->csum);
}
- *stack = lse;
+ stack->label_stack_entry = lse;
flow_key->mpls.top_lse = lse;
return 0;
}
@@ -241,20 +253,24 @@ static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
int err;
err = skb_vlan_pop(skb);
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
invalidate_flow_key(key);
- else
- key->eth.tci = 0;
+ } else {
+ key->eth.vlan.tci = 0;
+ key->eth.vlan.tpid = 0;
+ }
return err;
}
static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_vlan *vlan)
{
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
invalidate_flow_key(key);
- else
- key->eth.tci = vlan->vlan_tci;
+ } else {
+ key->eth.vlan.tci = vlan->vlan_tci;
+ key->eth.vlan.tpid = vlan->vlan_tpid;
+ }
return skb_vlan_push(skb, vlan->vlan_tpid,
ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
}
@@ -1011,6 +1027,7 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
const struct nlattr *a, int rem)
{
struct deferred_action *da;
+ int level;
if (!is_flow_key_valid(key)) {
int err;
@@ -1034,6 +1051,18 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
return 0;
}
+ level = this_cpu_read(exec_actions_level);
+ if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
+ struct recirc_keys *rks = this_cpu_ptr(recirc_keys);
+ struct sw_flow_key *recirc_key = &rks->key[level - 1];
+
+ *recirc_key = *key;
+ recirc_key->recirc_id = nla_get_u32(a);
+ ovs_dp_process_packet(skb, recirc_key);
+
+ return 0;
+ }
+
da = add_deferred_actions(skb, key, NULL);
if (da) {
da->pkt_key.recirc_id = nla_get_u32(a);
@@ -1200,11 +1229,10 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_actions *acts,
struct sw_flow_key *key)
{
- static const int ovs_recursion_limit = 5;
int err, level;
level = __this_cpu_inc_return(exec_actions_level);
- if (unlikely(level > ovs_recursion_limit)) {
+ if (unlikely(level > OVS_RECURSION_LIMIT)) {
net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
ovs_dp_name(dp));
kfree_skb(skb);
@@ -1229,10 +1257,17 @@ int action_fifos_init(void)
if (!action_fifos)
return -ENOMEM;
+ recirc_keys = alloc_percpu(struct recirc_keys);
+ if (!recirc_keys) {
+ free_percpu(action_fifos);
+ return -ENOMEM;
+ }
+
return 0;
}
void action_fifos_exit(void)
{
free_percpu(action_fifos);
+ free_percpu(recirc_keys);
}
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index e054a748ff25..31045ef44a82 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1367,7 +1367,7 @@ static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
if (ct_info->helper)
module_put(ct_info->helper->me);
if (ct_info->ct)
- nf_ct_put(ct_info->ct);
+ nf_ct_tmpl_free(ct_info->ct);
}
void ovs_ct_init(struct net *net)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 524c0fd3078e..4d67ea856067 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -928,7 +928,6 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
struct sw_flow_mask mask;
struct sk_buff *reply;
struct datapath *dp;
- struct sw_flow_key key;
struct sw_flow_actions *acts;
struct sw_flow_match match;
u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
@@ -956,20 +955,24 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
/* Extract key. */
- ovs_match_init(&match, &key, &mask);
+ ovs_match_init(&match, &new_flow->key, false, &mask);
error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
if (error)
goto err_kfree_flow;
- ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
-
/* Extract flow identifier. */
error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
- &key, log);
+ &new_flow->key, log);
if (error)
goto err_kfree_flow;
+ /* unmasked key is needed to match when ufid is not used. */
+ if (ovs_identifier_is_key(&new_flow->id))
+ match.key = new_flow->id.unmasked_key;
+
+ ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
+
/* Validate actions. */
error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
&new_flow->key, &acts, log);
@@ -996,7 +999,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (ovs_identifier_is_ufid(&new_flow->id))
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
if (!flow)
- flow = ovs_flow_tbl_lookup(&dp->table, &key);
+ flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
if (likely(!flow)) {
rcu_assign_pointer(new_flow->sf_acts, acts);
@@ -1121,7 +1124,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
if (a[OVS_FLOW_ATTR_KEY]) {
- ovs_match_init(&match, &key, &mask);
+ ovs_match_init(&match, &key, true, &mask);
error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
} else if (!ufid_present) {
@@ -1238,7 +1241,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
if (a[OVS_FLOW_ATTR_KEY]) {
- ovs_match_init(&match, &key, NULL);
+ ovs_match_init(&match, &key, true, NULL);
err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
log);
} else if (!ufid_present) {
@@ -1297,7 +1300,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
if (a[OVS_FLOW_ATTR_KEY]) {
- ovs_match_init(&match, &key, NULL);
+ ovs_match_init(&match, &key, true, NULL);
err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
NULL, log);
if (unlikely(err))
@@ -2437,3 +2440,7 @@ module_exit(dp_cleanup);
MODULE_DESCRIPTION("Open vSwitch switching datapath");
MODULE_LICENSE("GPL");
+MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
+MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
+MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
+MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 0ea128eeeab2..c8c82e109c68 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/in.h>
#include <linux/rcupdate.h>
+#include <linux/cpumask.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -72,32 +73,33 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
{
struct flow_stats *stats;
int node = numa_node_id();
+ int cpu = smp_processor_id();
int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
- stats = rcu_dereference(flow->stats[node]);
+ stats = rcu_dereference(flow->stats[cpu]);
- /* Check if already have node-specific stats. */
+ /* Check if already have CPU-specific stats. */
if (likely(stats)) {
spin_lock(&stats->lock);
/* Mark if we write on the pre-allocated stats. */
- if (node == 0 && unlikely(flow->stats_last_writer != node))
- flow->stats_last_writer = node;
+ if (cpu == 0 && unlikely(flow->stats_last_writer != cpu))
+ flow->stats_last_writer = cpu;
} else {
stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
spin_lock(&stats->lock);
- /* If the current NUMA-node is the only writer on the
+ /* If the current CPU is the only writer on the
* pre-allocated stats keep using them.
*/
- if (unlikely(flow->stats_last_writer != node)) {
+ if (unlikely(flow->stats_last_writer != cpu)) {
/* A previous locker may have already allocated the
- * stats, so we need to check again. If node-specific
+ * stats, so we need to check again. If CPU-specific
* stats were already allocated, we update the pre-
* allocated stats as we have already locked them.
*/
- if (likely(flow->stats_last_writer != NUMA_NO_NODE)
- && likely(!rcu_access_pointer(flow->stats[node]))) {
- /* Try to allocate node-specific stats. */
+ if (likely(flow->stats_last_writer != -1) &&
+ likely(!rcu_access_pointer(flow->stats[cpu]))) {
+ /* Try to allocate CPU-specific stats. */
struct flow_stats *new_stats;
new_stats =
@@ -114,12 +116,12 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
new_stats->tcp_flags = tcp_flags;
spin_lock_init(&new_stats->lock);
- rcu_assign_pointer(flow->stats[node],
+ rcu_assign_pointer(flow->stats[cpu],
new_stats);
goto unlock;
}
}
- flow->stats_last_writer = node;
+ flow->stats_last_writer = cpu;
}
}
@@ -136,14 +138,15 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
struct ovs_flow_stats *ovs_stats,
unsigned long *used, __be16 *tcp_flags)
{
- int node;
+ int cpu;
*used = 0;
*tcp_flags = 0;
memset(ovs_stats, 0, sizeof(*ovs_stats));
- for_each_node(node) {
- struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[node]);
+ /* We open code this to make sure cpu 0 is always considered */
+ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) {
+ struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) {
/* Local CPU may write on non-local stats, so we must
@@ -163,10 +166,11 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
/* Called with ovs_mutex. */
void ovs_flow_stats_clear(struct sw_flow *flow)
{
- int node;
+ int cpu;
- for_each_node(node) {
- struct flow_stats *stats = ovsl_dereference(flow->stats[node]);
+ /* We open code this to make sure cpu 0 is always considered */
+ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) {
+ struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) {
spin_lock_bh(&stats->lock);
@@ -302,24 +306,57 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
sizeof(struct icmp6hdr));
}
-static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
+/**
+ * Parse vlan tag from vlan header.
+ * Returns ERROR on memory error.
+ * Returns 0 if it encounters a non-vlan or incomplete packet.
+ * Returns 1 after successfully parsing vlan tag.
+ */
+static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh)
{
- struct qtag_prefix {
- __be16 eth_type; /* ETH_P_8021Q */
- __be16 tci;
- };
- struct qtag_prefix *qp;
+ struct vlan_head *vh = (struct vlan_head *)skb->data;
+
+ if (likely(!eth_type_vlan(vh->tpid)))
+ return 0;
- if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
+ if (unlikely(skb->len < sizeof(struct vlan_head) + sizeof(__be16)))
return 0;
- if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
- sizeof(__be16))))
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct vlan_head) +
+ sizeof(__be16))))
return -ENOMEM;
- qp = (struct qtag_prefix *) skb->data;
- key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
- __skb_pull(skb, sizeof(struct qtag_prefix));
+ vh = (struct vlan_head *)skb->data;
+ key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT);
+ key_vh->tpid = vh->tpid;
+
+ __skb_pull(skb, sizeof(struct vlan_head));
+ return 1;
+}
+
+static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
+{
+ int res;
+
+ key->eth.vlan.tci = 0;
+ key->eth.vlan.tpid = 0;
+ key->eth.cvlan.tci = 0;
+ key->eth.cvlan.tpid = 0;
+
+ if (likely(skb_vlan_tag_present(skb))) {
+ key->eth.vlan.tci = htons(skb->vlan_tci);
+ key->eth.vlan.tpid = skb->vlan_proto;
+ } else {
+ /* Parse outer vlan tag in the non-accelerated case. */
+ res = parse_vlan_tag(skb, &key->eth.vlan);
+ if (res <= 0)
+ return res;
+ }
+
+ /* Parse inner vlan tag. */
+ res = parse_vlan_tag(skb, &key->eth.cvlan);
+ if (res <= 0)
+ return res;
return 0;
}
@@ -480,12 +517,8 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
* update skb->csum here.
*/
- key->eth.tci = 0;
- if (skb_vlan_tag_present(skb))
- key->eth.tci = htons(skb->vlan_tci);
- else if (eth->h_proto == htons(ETH_P_8021Q))
- if (unlikely(parse_vlan(skb, key)))
- return -ENOMEM;
+ if (unlikely(parse_vlan(skb, key)))
+ return -ENOMEM;
key->eth.type = parse_ethertype(skb);
if (unlikely(key->eth.type == htons(0)))
@@ -600,12 +633,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
} else if (eth_p_mpls(key->eth.type)) {
size_t stack_len = MPLS_HLEN;
- /* In the presence of an MPLS label stack the end of the L2
- * header and the beginning of the L3 header differ.
- *
- * Advance network_header to the beginning of the L3
- * header. mac_len corresponds to the end of the L2 header.
- */
+ skb_set_inner_network_header(skb, skb->mac_len);
while (1) {
__be32 lse;
@@ -613,12 +641,12 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
if (unlikely(error))
return 0;
- memcpy(&lse, skb_network_header(skb), MPLS_HLEN);
+ memcpy(&lse, skb_inner_network_header(skb), MPLS_HLEN);
if (stack_len == MPLS_HLEN)
memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN);
- skb_set_network_header(skb, skb->mac_len + stack_len);
+ skb_set_inner_network_header(skb, skb->mac_len + stack_len);
if (lse & htonl(MPLS_LS_S_MASK))
break;
@@ -734,8 +762,6 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
{
int err;
- memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE);
-
/* Extract metadata from netlink attributes. */
err = ovs_nla_get_flow_metadata(net, attr, key, log);
if (err)
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 03378e75a67c..ae783f5c6695 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -50,6 +50,11 @@ struct ovs_tunnel_info {
struct metadata_dst *tun_dst;
};
+struct vlan_head {
+ __be16 tpid; /* Vlan type. Generally 802.1q or 802.1ad.*/
+ __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
+};
+
#define OVS_SW_FLOW_KEY_METADATA_SIZE \
(offsetof(struct sw_flow_key, recirc_id) + \
FIELD_SIZEOF(struct sw_flow_key, recirc_id))
@@ -69,7 +74,8 @@ struct sw_flow_key {
struct {
u8 src[ETH_ALEN]; /* Ethernet source address. */
u8 dst[ETH_ALEN]; /* Ethernet destination address. */
- __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
+ struct vlan_head vlan;
+ struct vlan_head cvlan;
__be16 type; /* Ethernet frame type. */
} eth;
union {
@@ -172,14 +178,14 @@ struct sw_flow {
struct hlist_node node[2];
u32 hash;
} flow_table, ufid_table;
- int stats_last_writer; /* NUMA-node id of the last writer on
+ int stats_last_writer; /* CPU id of the last writer on
* 'stats[0]'.
*/
struct sw_flow_key key;
struct sw_flow_id id;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
- struct flow_stats __rcu *stats[]; /* One for each NUMA node. First one
+ struct flow_stats __rcu *stats[]; /* One for each CPU. First one
* is allocated at flow creation time,
* the rest are allocated on demand
* while holding the 'stats[0].lock'.
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index c78a6a1476fb..ae25ded82b3b 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -808,6 +808,167 @@ int ovs_nla_put_tunnel_info(struct sk_buff *skb,
ip_tunnel_info_af(tun_info));
}
+static int encode_vlan_from_nlattrs(struct sw_flow_match *match,
+ const struct nlattr *a[],
+ bool is_mask, bool inner)
+{
+ __be16 tci = 0;
+ __be16 tpid = 0;
+
+ if (a[OVS_KEY_ATTR_VLAN])
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+
+ if (a[OVS_KEY_ATTR_ETHERTYPE])
+ tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+
+ if (likely(!inner)) {
+ SW_FLOW_KEY_PUT(match, eth.vlan.tpid, tpid, is_mask);
+ SW_FLOW_KEY_PUT(match, eth.vlan.tci, tci, is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, eth.cvlan.tpid, tpid, is_mask);
+ SW_FLOW_KEY_PUT(match, eth.cvlan.tci, tci, is_mask);
+ }
+ return 0;
+}
+
+static int validate_vlan_from_nlattrs(const struct sw_flow_match *match,
+ u64 key_attrs, bool inner,
+ const struct nlattr **a, bool log)
+{
+ __be16 tci = 0;
+
+ if (!((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
+ (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
+ eth_type_vlan(nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE])))) {
+ /* Not a VLAN. */
+ return 0;
+ }
+
+ if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
+ (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
+ OVS_NLERR(log, "Invalid %s frame", (inner) ? "C-VLAN" : "VLAN");
+ return -EINVAL;
+ }
+
+ if (a[OVS_KEY_ATTR_VLAN])
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+
+ if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ if (tci) {
+ OVS_NLERR(log, "%s TCI does not have VLAN_TAG_PRESENT bit set.",
+ (inner) ? "C-VLAN" : "VLAN");
+ return -EINVAL;
+ } else if (nla_len(a[OVS_KEY_ATTR_ENCAP])) {
+ /* Corner case for truncated VLAN header. */
+ OVS_NLERR(log, "Truncated %s header has non-zero encap attribute.",
+ (inner) ? "C-VLAN" : "VLAN");
+ return -EINVAL;
+ }
+ }
+
+ return 1;
+}
+
+static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match *match,
+ u64 key_attrs, bool inner,
+ const struct nlattr **a, bool log)
+{
+ __be16 tci = 0;
+ __be16 tpid = 0;
+ bool encap_valid = !!(match->key->eth.vlan.tci &
+ htons(VLAN_TAG_PRESENT));
+ bool i_encap_valid = !!(match->key->eth.cvlan.tci &
+ htons(VLAN_TAG_PRESENT));
+
+ if (!(key_attrs & (1 << OVS_KEY_ATTR_ENCAP))) {
+ /* Not a VLAN. */
+ return 0;
+ }
+
+ if ((!inner && !encap_valid) || (inner && !i_encap_valid)) {
+ OVS_NLERR(log, "Encap mask attribute is set for non-%s frame.",
+ (inner) ? "C-VLAN" : "VLAN");
+ return -EINVAL;
+ }
+
+ if (a[OVS_KEY_ATTR_VLAN])
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+
+ if (a[OVS_KEY_ATTR_ETHERTYPE])
+ tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+
+ if (tpid != htons(0xffff)) {
+ OVS_NLERR(log, "Must have an exact match on %s TPID (mask=%x).",
+ (inner) ? "C-VLAN" : "VLAN", ntohs(tpid));
+ return -EINVAL;
+ }
+ if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ OVS_NLERR(log, "%s TCI mask does not have exact match for VLAN_TAG_PRESENT bit.",
+ (inner) ? "C-VLAN" : "VLAN");
+ return -EINVAL;
+ }
+
+ return 1;
+}
+
+static int __parse_vlan_from_nlattrs(struct sw_flow_match *match,
+ u64 *key_attrs, bool inner,
+ const struct nlattr **a, bool is_mask,
+ bool log)
+{
+ int err;
+ const struct nlattr *encap;
+
+ if (!is_mask)
+ err = validate_vlan_from_nlattrs(match, *key_attrs, inner,
+ a, log);
+ else
+ err = validate_vlan_mask_from_nlattrs(match, *key_attrs, inner,
+ a, log);
+ if (err <= 0)
+ return err;
+
+ err = encode_vlan_from_nlattrs(match, a, is_mask, inner);
+ if (err)
+ return err;
+
+ *key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
+ *key_attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
+ *key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+
+ encap = a[OVS_KEY_ATTR_ENCAP];
+
+ if (!is_mask)
+ err = parse_flow_nlattrs(encap, a, key_attrs, log);
+ else
+ err = parse_flow_mask_nlattrs(encap, a, key_attrs, log);
+
+ return err;
+}
+
+static int parse_vlan_from_nlattrs(struct sw_flow_match *match,
+ u64 *key_attrs, const struct nlattr **a,
+ bool is_mask, bool log)
+{
+ int err;
+ bool encap_valid = false;
+
+ err = __parse_vlan_from_nlattrs(match, key_attrs, false, a,
+ is_mask, log);
+ if (err)
+ return err;
+
+ encap_valid = !!(match->key->eth.vlan.tci & htons(VLAN_TAG_PRESENT));
+ if (encap_valid) {
+ err = __parse_vlan_from_nlattrs(match, key_attrs, true, a,
+ is_mask, log);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
u64 *attrs, const struct nlattr **a,
bool is_mask, bool log)
@@ -923,20 +1084,11 @@ static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
}
if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
- __be16 tci;
-
- tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
- if (!(tci & htons(VLAN_TAG_PRESENT))) {
- if (is_mask)
- OVS_NLERR(log, "VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.");
- else
- OVS_NLERR(log, "VLAN TCI does not have VLAN_TAG_PRESENT bit set.");
-
- return -EINVAL;
- }
-
- SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
- attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
+ /* VLAN attribute is always parsed before getting here since it
+ * may occur multiple times.
+ */
+ OVS_NLERR(log, "VLAN attribute unexpected.");
+ return -EINVAL;
}
if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
@@ -1182,49 +1334,18 @@ int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
bool log)
{
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
- const struct nlattr *encap;
struct nlattr *newmask = NULL;
u64 key_attrs = 0;
u64 mask_attrs = 0;
- bool encap_valid = false;
int err;
err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
if (err)
return err;
- if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
- (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
- (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
- __be16 tci;
-
- if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
- (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
- OVS_NLERR(log, "Invalid Vlan frame.");
- return -EINVAL;
- }
-
- key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
- tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
- encap = a[OVS_KEY_ATTR_ENCAP];
- key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
- encap_valid = true;
-
- if (tci & htons(VLAN_TAG_PRESENT)) {
- err = parse_flow_nlattrs(encap, a, &key_attrs, log);
- if (err)
- return err;
- } else if (!tci) {
- /* Corner case for truncated 802.1Q header. */
- if (nla_len(encap)) {
- OVS_NLERR(log, "Truncated 802.1Q header has non-zero encap attribute.");
- return -EINVAL;
- }
- } else {
- OVS_NLERR(log, "Encap attr is set for non-VLAN frame");
- return -EINVAL;
- }
- }
+ err = parse_vlan_from_nlattrs(match, &key_attrs, a, false, log);
+ if (err)
+ return err;
err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
if (err)
@@ -1265,46 +1386,12 @@ int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
goto free_newmask;
/* Always match on tci. */
- SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
-
- if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) {
- __be16 eth_type = 0;
- __be16 tci = 0;
-
- if (!encap_valid) {
- OVS_NLERR(log, "Encap mask attribute is set for non-VLAN frame.");
- err = -EINVAL;
- goto free_newmask;
- }
-
- mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
- if (a[OVS_KEY_ATTR_ETHERTYPE])
- eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
-
- if (eth_type == htons(0xffff)) {
- mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
- encap = a[OVS_KEY_ATTR_ENCAP];
- err = parse_flow_mask_nlattrs(encap, a,
- &mask_attrs, log);
- if (err)
- goto free_newmask;
- } else {
- OVS_NLERR(log, "VLAN frames must have an exact match on the TPID (mask=%x).",
- ntohs(eth_type));
- err = -EINVAL;
- goto free_newmask;
- }
-
- if (a[OVS_KEY_ATTR_VLAN])
- tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+ SW_FLOW_KEY_PUT(match, eth.vlan.tci, htons(0xffff), true);
+ SW_FLOW_KEY_PUT(match, eth.cvlan.tci, htons(0xffff), true);
- if (!(tci & htons(VLAN_TAG_PRESENT))) {
- OVS_NLERR(log, "VLAN tag present bit must have an exact match (tci_mask=%x).",
- ntohs(tci));
- err = -EINVAL;
- goto free_newmask;
- }
- }
+ err = parse_vlan_from_nlattrs(match, &mask_attrs, a, true, log);
+ if (err)
+ goto free_newmask;
err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
log);
@@ -1410,12 +1497,25 @@ int ovs_nla_get_flow_metadata(struct net *net, const struct nlattr *attr,
return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
}
+static int ovs_nla_put_vlan(struct sk_buff *skb, const struct vlan_head *vh,
+ bool is_mask)
+{
+ __be16 eth_type = !is_mask ? vh->tpid : htons(0xffff);
+
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
+ nla_put_be16(skb, OVS_KEY_ATTR_VLAN, vh->tci))
+ return -EMSGSIZE;
+ return 0;
+}
+
static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
const struct sw_flow_key *output, bool is_mask,
struct sk_buff *skb)
{
struct ovs_key_ethernet *eth_key;
- struct nlattr *nla, *encap;
+ struct nlattr *nla;
+ struct nlattr *encap = NULL;
+ struct nlattr *in_encap = NULL;
if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
goto nla_put_failure;
@@ -1464,17 +1564,21 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
ether_addr_copy(eth_key->eth_src, output->eth.src);
ether_addr_copy(eth_key->eth_dst, output->eth.dst);
- if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
- __be16 eth_type;
- eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
- if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
- nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
+ if (swkey->eth.vlan.tci || eth_type_vlan(swkey->eth.type)) {
+ if (ovs_nla_put_vlan(skb, &output->eth.vlan, is_mask))
goto nla_put_failure;
encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
- if (!swkey->eth.tci)
+ if (!swkey->eth.vlan.tci)
goto unencap;
- } else
- encap = NULL;
+
+ if (swkey->eth.cvlan.tci || eth_type_vlan(swkey->eth.type)) {
+ if (ovs_nla_put_vlan(skb, &output->eth.cvlan, is_mask))
+ goto nla_put_failure;
+ in_encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
+ if (!swkey->eth.cvlan.tci)
+ goto unencap;
+ }
+ }
if (swkey->eth.type == htons(ETH_P_802_2)) {
/*
@@ -1493,6 +1597,14 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
goto nla_put_failure;
+ if (eth_type_vlan(swkey->eth.type)) {
+ /* There are 3 VLAN tags, we don't know anything about the rest
+ * of the packet, so truncate here.
+ */
+ WARN_ON_ONCE(!(encap && in_encap));
+ goto unencap;
+ }
+
if (swkey->eth.type == htons(ETH_P_IP)) {
struct ovs_key_ipv4 *ipv4_key;
@@ -1619,6 +1731,8 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
}
unencap:
+ if (in_encap)
+ nla_nest_end(skb, in_encap);
if (encap)
nla_nest_end(skb, encap);
@@ -1882,13 +1996,15 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
void ovs_match_init(struct sw_flow_match *match,
struct sw_flow_key *key,
+ bool reset_key,
struct sw_flow_mask *mask)
{
memset(match, 0, sizeof(*match));
match->key = key;
match->mask = mask;
- memset(key, 0, sizeof(*key));
+ if (reset_key)
+ memset(key, 0, sizeof(*key));
if (mask) {
memset(&mask->key, 0, sizeof(mask->key));
@@ -1935,7 +2051,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
struct nlattr *a;
int err = 0, start, opts_type;
- ovs_match_init(&match, &key, NULL);
+ ovs_match_init(&match, &key, true, NULL);
opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
if (opts_type < 0)
return opts_type;
@@ -2283,7 +2399,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
case OVS_ACTION_ATTR_PUSH_VLAN:
vlan = nla_data(a);
- if (vlan->vlan_tpid != htons(ETH_P_8021Q))
+ if (!eth_type_vlan(vlan->vlan_tpid))
return -EINVAL;
if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
return -EINVAL;
@@ -2388,7 +2504,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
(*sfa)->orig_len = nla_len(attr);
err = __ovs_nla_copy_actions(net, attr, key, 0, sfa, key->eth.type,
- key->eth.tci, log);
+ key->eth.vlan.tci, log);
if (err)
ovs_nla_free_flow_actions(*sfa);
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 47dd142eca1c..45f9769e5aac 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -41,7 +41,8 @@ size_t ovs_tun_key_attr_size(void);
size_t ovs_key_attr_size(void);
void ovs_match_init(struct sw_flow_match *match,
- struct sw_flow_key *key, struct sw_flow_mask *mask);
+ struct sw_flow_key *key, bool reset_key,
+ struct sw_flow_mask *mask);
int ovs_nla_put_key(const struct sw_flow_key *, const struct sw_flow_key *,
int attr, bool is_mask, struct sk_buff *);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index d073fff82fdb..ea7a8073fa02 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/in.h>
#include <linux/rcupdate.h>
+#include <linux/cpumask.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -79,17 +80,12 @@ struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
struct flow_stats *stats;
- int node;
- flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+ flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
if (!flow)
return ERR_PTR(-ENOMEM);
- flow->sf_acts = NULL;
- flow->mask = NULL;
- flow->id.unmasked_key = NULL;
- flow->id.ufid_len = 0;
- flow->stats_last_writer = NUMA_NO_NODE;
+ flow->stats_last_writer = -1;
/* Initialize the default stat node. */
stats = kmem_cache_alloc_node(flow_stats_cache,
@@ -102,10 +98,6 @@ struct sw_flow *ovs_flow_alloc(void)
RCU_INIT_POINTER(flow->stats[0], stats);
- for_each_node(node)
- if (node != 0)
- RCU_INIT_POINTER(flow->stats[node], NULL);
-
return flow;
err:
kmem_cache_free(flow_cache, flow);
@@ -142,16 +134,17 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
static void flow_free(struct sw_flow *flow)
{
- int node;
+ int cpu;
if (ovs_identifier_is_key(&flow->id))
kfree(flow->id.unmasked_key);
if (flow->sf_acts)
ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
- for_each_node(node)
- if (flow->stats[node])
+ /* We open code this to make sure cpu 0 is always considered */
+ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask))
+ if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
- (struct flow_stats __force *)flow->stats[node]);
+ (struct flow_stats __force *)flow->stats[cpu]);
kmem_cache_free(flow_cache, flow);
}
@@ -756,7 +749,7 @@ int ovs_flow_init(void)
BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
- + (nr_node_ids
+ + (nr_cpu_ids
* sizeof(struct flow_stats *)),
0, 0, NULL);
if (flow_cache == NULL)
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6b21fd068d87..8f198437c724 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -485,9 +485,14 @@ static unsigned int packet_length(const struct sk_buff *skb)
{
unsigned int length = skb->len - ETH_HLEN;
- if (skb->protocol == htons(ETH_P_8021Q))
+ if (skb_vlan_tagged(skb))
length -= VLAN_HLEN;
+ /* Don't subtract for multiple VLAN tags. Most (all?) drivers allow
+ * (ETH_LEN + VLAN_HLEN) in addition to the mtu value, but almost none
+ * account for 802.1ad. e.g. is_skb_forwardable().
+ */
+
return length;
}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 7eaf887e46f8..5680d90b0b77 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -160,7 +160,7 @@ static void rds_ib_add_one(struct ib_device *device)
rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom;
rds_ibdev->dev = device;
- rds_ibdev->pd = ib_alloc_pd(device);
+ rds_ibdev->pd = ib_alloc_pd(device, 0);
if (IS_ERR(rds_ibdev->pd)) {
rds_ibdev->pd = NULL;
goto put_dev;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 046f7508c06b..45ac8e8e58f4 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -333,6 +333,7 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
void rds_ib_state_change(struct sock *sk);
int rds_ib_listen_init(void);
void rds_ib_listen_stop(void);
+__printf(2, 3)
void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index b2d17f0fafa8..fd0bccb2f9f9 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -688,6 +688,7 @@ void __rds_conn_error(struct rds_connection *conn, const char *, ...);
#define rds_conn_error(conn, fmt...) \
__rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
+__printf(2, 3)
void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
#define rds_conn_path_error(cp, fmt...) \
__rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
index 784c53163b7b..86f8853a038c 100644
--- a/net/rxrpc/Kconfig
+++ b/net/rxrpc/Kconfig
@@ -19,6 +19,20 @@ config AF_RXRPC
See Documentation/networking/rxrpc.txt.
+config AF_RXRPC_IPV6
+ bool "IPv6 support for RxRPC"
+ depends on (IPV6 = m && AF_RXRPC = m) || (IPV6 = y && AF_RXRPC)
+ help
+ Say Y here to allow AF_RXRPC to use IPV6 UDP as well as IPV4 UDP as
+ its network transport.
+
+config AF_RXRPC_INJECT_LOSS
+ bool "Inject packet loss into RxRPC packet stream"
+ depends on AF_RXRPC
+ help
+ Say Y here to inject packet loss by discarding some received and some
+ transmitted packets.
+
config AF_RXRPC_DEBUG
bool "RxRPC dynamic debugging"
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index 10f3f48a16a8..8fc6ea347182 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -22,6 +22,7 @@ af-rxrpc-y := \
peer_object.o \
recvmsg.o \
security.o \
+ sendmsg.o \
skbuff.o \
utils.o
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 88effadd4b16..44c9c2b0b190 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -16,12 +16,14 @@
#include <linux/net.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
+#include <linux/random.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/key-type.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
+#define CREATE_TRACE_POINTS
#include "ar-internal.h"
MODULE_DESCRIPTION("RxRPC network protocol");
@@ -43,7 +45,7 @@ u32 rxrpc_epoch;
atomic_t rxrpc_debug_id;
/* count of skbs currently in use */
-atomic_t rxrpc_n_skbs;
+atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
struct workqueue_struct *rxrpc_workqueue;
@@ -104,19 +106,25 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx,
case AF_INET:
if (srx->transport_len < sizeof(struct sockaddr_in))
return -EINVAL;
- _debug("INET: %x @ %pI4",
- ntohs(srx->transport.sin.sin_port),
- &srx->transport.sin.sin_addr);
tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad);
break;
+#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
+ if (srx->transport_len < sizeof(struct sockaddr_in6))
+ return -EINVAL;
+ tail = offsetof(struct sockaddr_rxrpc, transport) +
+ sizeof(struct sockaddr_in6);
+ break;
+#endif
+
default:
return -EAFNOSUPPORT;
}
if (tail < len)
memset((void *)srx + tail, 0, len - tail);
+ _debug("INET: %pISp", &srx->transport);
return 0;
}
@@ -128,7 +136,8 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
struct sock *sk = sock->sk;
struct rxrpc_local *local;
- struct rxrpc_sock *rx = rxrpc_sk(sk), *prx;
+ struct rxrpc_sock *rx = rxrpc_sk(sk);
+ u16 service_id = srx->srx_service;
int ret;
_enter("%p,%p,%d", rx, saddr, len);
@@ -152,16 +161,13 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
goto error_unlock;
}
- if (rx->srx.srx_service) {
- write_lock_bh(&local->services_lock);
- list_for_each_entry(prx, &local->services, listen_link) {
- if (prx->srx.srx_service == rx->srx.srx_service)
- goto service_in_use;
- }
-
+ if (service_id) {
+ write_lock(&local->services_lock);
+ if (rcu_access_pointer(local->service))
+ goto service_in_use;
rx->local = local;
- list_add_tail(&rx->listen_link, &local->services);
- write_unlock_bh(&local->services_lock);
+ rcu_assign_pointer(local->service, rx);
+ write_unlock(&local->services_lock);
rx->sk.sk_state = RXRPC_SERVER_BOUND;
} else {
@@ -174,7 +180,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
return 0;
service_in_use:
- write_unlock_bh(&local->services_lock);
+ write_unlock(&local->services_lock);
rxrpc_put_local(local);
ret = -EADDRINUSE;
error_unlock:
@@ -191,7 +197,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
struct rxrpc_sock *rx = rxrpc_sk(sk);
- unsigned int max;
+ unsigned int max, old;
int ret;
_enter("%p,%d", rx, backlog);
@@ -210,9 +216,13 @@ static int rxrpc_listen(struct socket *sock, int backlog)
backlog = max;
else if (backlog < 0 || backlog > max)
break;
+ old = sk->sk_max_ack_backlog;
sk->sk_max_ack_backlog = backlog;
- rx->sk.sk_state = RXRPC_SERVER_LISTENING;
- ret = 0;
+ ret = rxrpc_service_prealloc(rx, GFP_KERNEL);
+ if (ret == 0)
+ rx->sk.sk_state = RXRPC_SERVER_LISTENING;
+ else
+ sk->sk_max_ack_backlog = old;
break;
default:
ret = -EBUSY;
@@ -230,6 +240,8 @@ static int rxrpc_listen(struct socket *sock, int backlog)
* @srx: The address of the peer to contact
* @key: The security context to use (defaults to socket setting)
* @user_call_ID: The ID to use
+ * @gfp: The allocation constraints
+ * @notify_rx: Where to send notifications instead of socket queue
*
* Allow a kernel service to begin a call on the nominated socket. This just
* sets up all the internal tracking structures and allocates connection and
@@ -242,7 +254,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
- gfp_t gfp)
+ gfp_t gfp,
+ rxrpc_notify_rx_t notify_rx)
{
struct rxrpc_conn_parameters cp;
struct rxrpc_call *call;
@@ -269,6 +282,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
cp.exclusive = false;
cp.service_id = srx->srx_service;
call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp);
+ if (!IS_ERR(call))
+ call->notify_rx = notify_rx;
release_sock(&rx->sk);
_leave(" = %p", call);
@@ -278,40 +293,39 @@ EXPORT_SYMBOL(rxrpc_kernel_begin_call);
/**
* rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
+ * @sock: The socket the call is on
* @call: The call to end
*
* Allow a kernel service to end a call it was using. The call must be
* complete before this is called (the call should be aborted if necessary).
*/
-void rxrpc_kernel_end_call(struct rxrpc_call *call)
+void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
{
_enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
- rxrpc_remove_user_ID(call->socket, call);
- rxrpc_put_call(call);
+ rxrpc_release_call(rxrpc_sk(sock->sk), call);
+ rxrpc_put_call(call, rxrpc_call_put_kernel);
}
EXPORT_SYMBOL(rxrpc_kernel_end_call);
/**
- * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages
+ * rxrpc_kernel_new_call_notification - Get notifications of new calls
* @sock: The socket to intercept received messages on
- * @interceptor: The function to pass the messages to
+ * @notify_new_call: Function to be called when new calls appear
+ * @discard_new_call: Function to discard preallocated calls
*
- * Allow a kernel service to intercept messages heading for the Rx queue on an
- * RxRPC socket. They get passed to the specified function instead.
- * @interceptor should free the socket buffers it is given. @interceptor is
- * called with the socket receive queue spinlock held and softirqs disabled -
- * this ensures that the messages will be delivered in the right order.
+ * Allow a kernel service to be given notifications about new calls.
*/
-void rxrpc_kernel_intercept_rx_messages(struct socket *sock,
- rxrpc_interceptor_t interceptor)
+void rxrpc_kernel_new_call_notification(
+ struct socket *sock,
+ rxrpc_notify_new_call_t notify_new_call,
+ rxrpc_discard_new_call_t discard_new_call)
{
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
- _enter("");
- rx->interceptor = interceptor;
+ rx->notify_new_call = notify_new_call;
+ rx->discard_new_call = discard_new_call;
}
-
-EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages);
+EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
/*
* connect an RxRPC socket
@@ -391,6 +405,23 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
switch (rx->sk.sk_state) {
case RXRPC_UNBOUND:
+ rx->srx.srx_family = AF_RXRPC;
+ rx->srx.srx_service = 0;
+ rx->srx.transport_type = SOCK_DGRAM;
+ rx->srx.transport.family = rx->family;
+ switch (rx->family) {
+ case AF_INET:
+ rx->srx.transport_len = sizeof(struct sockaddr_in);
+ break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ rx->srx.transport_len = sizeof(struct sockaddr_in6);
+ break;
+#endif
+ default:
+ ret = -EAFNOSUPPORT;
+ goto error_unlock;
+ }
local = rxrpc_lookup_local(&rx->srx);
if (IS_ERR(local)) {
ret = PTR_ERR(local);
@@ -505,15 +536,16 @@ error:
static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- unsigned int mask;
struct sock *sk = sock->sk;
+ struct rxrpc_sock *rx = rxrpc_sk(sk);
+ unsigned int mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* the socket is readable if there are any messages waiting on the Rx
* queue */
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!list_empty(&rx->recvmsg_q))
mask |= POLLIN | POLLRDNORM;
/* the socket is writable if there is space to add new data to the
@@ -540,7 +572,8 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
return -EAFNOSUPPORT;
/* we support transport protocol UDP/UDP6 only */
- if (protocol != PF_INET)
+ if (protocol != PF_INET &&
+ IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6)
return -EPROTONOSUPPORT;
if (sock->type != SOCK_DGRAM)
@@ -554,6 +587,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
return -ENOMEM;
sock_init_data(sock, sk);
+ sock_set_flag(sk, SOCK_RCU_FREE);
sk->sk_state = RXRPC_UNBOUND;
sk->sk_write_space = rxrpc_write_space;
sk->sk_max_ack_backlog = 0;
@@ -563,9 +597,11 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
rx->family = protocol;
rx->calls = RB_ROOT;
- INIT_LIST_HEAD(&rx->listen_link);
- INIT_LIST_HEAD(&rx->secureq);
- INIT_LIST_HEAD(&rx->acceptq);
+ spin_lock_init(&rx->incoming_lock);
+ INIT_LIST_HEAD(&rx->sock_calls);
+ INIT_LIST_HEAD(&rx->to_be_accepted);
+ INIT_LIST_HEAD(&rx->recvmsg_q);
+ rwlock_init(&rx->recvmsg_lock);
rwlock_init(&rx->call_lock);
memset(&rx->srx, 0, sizeof(rx->srx));
@@ -574,6 +610,39 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
}
/*
+ * Kill all the calls on a socket and shut it down.
+ */
+static int rxrpc_shutdown(struct socket *sock, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct rxrpc_sock *rx = rxrpc_sk(sk);
+ int ret = 0;
+
+ _enter("%p,%d", sk, flags);
+
+ if (flags != SHUT_RDWR)
+ return -EOPNOTSUPP;
+ if (sk->sk_state == RXRPC_CLOSE)
+ return -ESHUTDOWN;
+
+ lock_sock(sk);
+
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ if (sk->sk_state < RXRPC_CLOSE) {
+ sk->sk_state = RXRPC_CLOSE;
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ } else {
+ ret = -ESHUTDOWN;
+ }
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+
+ rxrpc_discard_prealloc(rx);
+
+ release_sock(sk);
+ return ret;
+}
+
+/*
* RxRPC socket destructor
*/
static void rxrpc_sock_destructor(struct sock *sk)
@@ -609,15 +678,14 @@ static int rxrpc_release_sock(struct sock *sk)
sk->sk_state = RXRPC_CLOSE;
spin_unlock_bh(&sk->sk_receive_queue.lock);
- ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1);
-
- if (!list_empty(&rx->listen_link)) {
- write_lock_bh(&rx->local->services_lock);
- list_del(&rx->listen_link);
- write_unlock_bh(&rx->local->services_lock);
+ if (rx->local && rx->local->service == rx) {
+ write_lock(&rx->local->services_lock);
+ rx->local->service = NULL;
+ write_unlock(&rx->local->services_lock);
}
/* try to flush out this socket */
+ rxrpc_discard_prealloc(rx);
rxrpc_release_calls_on_socket(rx);
flush_workqueue(rxrpc_workqueue);
rxrpc_purge_queue(&sk->sk_receive_queue);
@@ -666,7 +734,7 @@ static const struct proto_ops rxrpc_rpc_ops = {
.poll = rxrpc_poll,
.ioctl = sock_no_ioctl,
.listen = rxrpc_listen,
- .shutdown = sock_no_shutdown,
+ .shutdown = rxrpc_shutdown,
.setsockopt = rxrpc_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = rxrpc_sendmsg,
@@ -697,7 +765,13 @@ static int __init af_rxrpc_init(void)
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
- rxrpc_epoch = get_seconds();
+ get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch));
+ rxrpc_epoch |= RXRPC_RANDOM_EPOCH;
+ get_random_bytes(&rxrpc_client_conn_ids.cur,
+ sizeof(rxrpc_client_conn_ids.cur));
+ rxrpc_client_conn_ids.cur &= 0x3fffffff;
+ if (rxrpc_client_conn_ids.cur == 0)
+ rxrpc_client_conn_ids.cur = 1;
ret = -ENOMEM;
rxrpc_call_jar = kmem_cache_create(
@@ -788,7 +862,8 @@ static void __exit af_rxrpc_exit(void)
proto_unregister(&rxrpc_proto);
rxrpc_destroy_all_calls();
rxrpc_destroy_all_connections();
- ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
+ ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
+ ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
rxrpc_destroy_all_locals();
remove_proc_entry("rxrpc_conns", init_net.proc_net);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index ff83fb1ddd47..d38dffd78085 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -35,11 +35,23 @@ struct rxrpc_crypt {
#define rxrpc_queue_delayed_work(WS,D) \
queue_delayed_work(rxrpc_workqueue, (WS), (D))
-#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor)
-
struct rxrpc_connection;
/*
+ * Mark applied to socket buffers.
+ */
+enum rxrpc_skb_mark {
+ RXRPC_SKB_MARK_DATA, /* data message */
+ RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
+ RXRPC_SKB_MARK_BUSY, /* server busy message */
+ RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
+ RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
+ RXRPC_SKB_MARK_NET_ERROR, /* network error message */
+ RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
+ RXRPC_SKB_MARK_NEW_CALL, /* local error message */
+};
+
+/*
* sk_state for RxRPC sockets
*/
enum {
@@ -52,19 +64,44 @@ enum {
};
/*
+ * Service backlog preallocation.
+ *
+ * This contains circular buffers of preallocated peers, connections and calls
+ * for incoming service calls and their head and tail pointers. This allows
+ * calls to be set up in the data_ready handler, thereby avoiding the need to
+ * shuffle packets around so much.
+ */
+struct rxrpc_backlog {
+ unsigned short peer_backlog_head;
+ unsigned short peer_backlog_tail;
+ unsigned short conn_backlog_head;
+ unsigned short conn_backlog_tail;
+ unsigned short call_backlog_head;
+ unsigned short call_backlog_tail;
+#define RXRPC_BACKLOG_MAX 32
+ struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
+ struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
+ struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
+};
+
+/*
* RxRPC socket definition
*/
struct rxrpc_sock {
/* WARNING: sk has to be the first member */
struct sock sk;
- rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */
+ rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
+ rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
struct rxrpc_local *local; /* local endpoint */
- struct list_head listen_link; /* link in the local endpoint's listen list */
- struct list_head secureq; /* calls awaiting connection security clearance */
- struct list_head acceptq; /* calls awaiting acceptance */
+ struct rxrpc_backlog *backlog; /* Preallocation for services */
+ spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
+ struct list_head sock_calls; /* List of calls owned by this socket */
+ struct list_head to_be_accepted; /* calls awaiting acceptance */
+ struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
+ rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
struct key *key; /* security for this socket */
struct key *securities; /* list of server security descriptors */
- struct rb_root calls; /* outstanding calls on this socket */
+ struct rb_root calls; /* User ID -> call mapping */
unsigned long flags;
#define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
rwlock_t call_lock; /* lock for calls */
@@ -103,13 +140,11 @@ struct rxrpc_host_header {
* - max 48 bytes (struct sk_buff::cb)
*/
struct rxrpc_skb_priv {
- struct rxrpc_call *call; /* call with which associated */
- unsigned long resend_at; /* time in jiffies at which to resend */
union {
- unsigned int offset; /* offset into buffer of next read */
+ u8 nr_jumbo; /* Number of jumbo subpackets */
+ };
+ union {
int remain; /* amount of space remaining for next write */
- u32 error; /* network error code */
- bool need_resend; /* T if needs resending */
};
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
@@ -117,13 +152,6 @@ struct rxrpc_skb_priv {
#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
-enum rxrpc_command {
- RXRPC_CMD_SEND_DATA, /* send data message */
- RXRPC_CMD_SEND_ABORT, /* request abort generation */
- RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
- RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
-};
-
/*
* RxRPC security module interface
*/
@@ -150,7 +178,12 @@ struct rxrpc_security {
void *);
/* verify the security on a received packet */
- int (*verify_packet)(struct rxrpc_call *, struct sk_buff *, u32 *);
+ int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
+ unsigned int, unsigned int, rxrpc_seq_t, u16);
+
+ /* Locate the data in a received packet that has been verified. */
+ void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
+ unsigned int *, unsigned int *);
/* issue a challenge */
int (*issue_challenge)(struct rxrpc_connection *);
@@ -180,9 +213,8 @@ struct rxrpc_local {
struct list_head link;
struct socket *socket; /* my UDP socket */
struct work_struct processor;
- struct list_head services; /* services listening on this endpoint */
+ struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
- struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
struct sk_buff_head reject_queue; /* packets awaiting rejection */
struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
struct rb_root client_conns; /* Client connections by socket params */
@@ -220,10 +252,12 @@ struct rxrpc_peer {
/* calculated RTT cache */
#define RXRPC_RTT_CACHE_SIZE 32
- suseconds_t rtt; /* current RTT estimate (in uS) */
- unsigned int rtt_point; /* next entry at which to insert */
- unsigned int rtt_usage; /* amount of cache actually used */
- suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
+ ktime_t rtt_last_req; /* Time of last RTT request */
+ u64 rtt; /* Current RTT estimate (in nS) */
+ u64 rtt_sum; /* Sum of cache contents */
+ u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
+ u8 rtt_cursor; /* next entry at which to insert */
+ u8 rtt_usage; /* amount of cache actually used */
};
/*
@@ -255,6 +289,9 @@ enum rxrpc_conn_flag {
RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
RXRPC_CONN_IN_CLIENT_CONNS, /* Conn is in local->client_conns */
+ RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */
+ RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
+ RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
};
/*
@@ -265,17 +302,29 @@ enum rxrpc_conn_event {
};
/*
+ * The connection cache state.
+ */
+enum rxrpc_conn_cache_state {
+ RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */
+ RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */
+ RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
+ RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
+ RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
+ RXRPC_CONN__NR_CACHE_STATES
+};
+
+/*
* The connection protocol state.
*/
enum rxrpc_conn_proto_state {
RXRPC_CONN_UNUSED, /* Connection not yet attempted */
RXRPC_CONN_CLIENT, /* Client connection */
+ RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
RXRPC_CONN_SERVICE, /* Service secured connection */
RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
- RXRPC_CONN_NETWORK_ERROR, /* Conn terminated by network error */
RXRPC_CONN__NR_STATES
};
@@ -288,23 +337,33 @@ struct rxrpc_connection {
struct rxrpc_conn_proto proto;
struct rxrpc_conn_parameters params;
- spinlock_t channel_lock;
+ atomic_t usage;
+ struct rcu_head rcu;
+ struct list_head cache_link;
+ spinlock_t channel_lock;
+ unsigned char active_chans; /* Mask of active channels */
+#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
+ struct list_head waiting_calls; /* Calls waiting for channels */
struct rxrpc_channel {
struct rxrpc_call __rcu *call; /* Active call */
u32 call_id; /* ID of current call */
u32 call_counter; /* Call ID counter */
u32 last_call; /* ID of last call */
- u32 last_result; /* Result of last call (0/abort) */
+ u8 last_type; /* Type of last packet */
+ u16 last_service_id;
+ union {
+ u32 last_seq;
+ u32 last_abort;
+ };
} channels[RXRPC_MAXCALLS];
- wait_queue_head_t channel_wq; /* queue to wait for channel to become available */
- struct rcu_head rcu;
struct work_struct processor; /* connection event processor */
union {
struct rb_node client_node; /* Node in local->client_conns */
struct rb_node service_node; /* Node in peer->service_conns */
};
+ struct list_head proc_link; /* link in procfs list */
struct list_head link; /* link in master connection list */
struct sk_buff_head rx_queue; /* received conn-level packets */
const struct rxrpc_security *security; /* applied security module */
@@ -313,21 +372,18 @@ struct rxrpc_connection {
struct rxrpc_crypt csum_iv; /* packet checksum base */
unsigned long flags;
unsigned long events;
- unsigned long put_time; /* Time at which last put */
+ unsigned long idle_timestamp; /* Time at which last became idle */
spinlock_t state_lock; /* state-change lock */
- atomic_t usage;
- enum rxrpc_conn_proto_state state : 8; /* current state of connection */
+ enum rxrpc_conn_cache_state cache_state;
+ enum rxrpc_conn_proto_state state; /* current state of connection */
u32 local_abort; /* local abort code */
u32 remote_abort; /* remote abort code */
- int error; /* local error incurred */
int debug_id; /* debug ID for printks */
atomic_t serial; /* packet serial number counter */
- atomic_t hi_serial; /* highest serial number received */
- atomic_t avail_chans; /* number of channels available */
+ unsigned int hi_serial; /* highest serial number received */
+ u32 security_nonce; /* response re-use preventer */
u8 size_align; /* data size alignment (for security) */
- u8 header_size; /* rxrpc + security header size */
u8 security_size; /* security header size */
- u32 security_nonce; /* response re-use preventer */
u8 security_ix; /* security type */
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
};
@@ -337,37 +393,23 @@ struct rxrpc_connection {
*/
enum rxrpc_call_flag {
RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
- RXRPC_CALL_TERMINAL_MSG, /* call has given the socket its final message */
- RXRPC_CALL_RCVD_LAST, /* all packets received */
- RXRPC_CALL_RUN_RTIMER, /* Tx resend timer started */
- RXRPC_CALL_TX_SOFT_ACK, /* sent some soft ACKs */
- RXRPC_CALL_PROC_BUSY, /* the processor is busy */
- RXRPC_CALL_INIT_ACCEPT, /* acceptance was initiated */
RXRPC_CALL_HAS_USERID, /* has a user ID attached */
- RXRPC_CALL_EXPECT_OOS, /* expect out of sequence packets */
+ RXRPC_CALL_IS_SERVICE, /* Call is service call */
+ RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
+ RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
+ RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
+ RXRPC_CALL_PINGING, /* Ping in process */
+ RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
};
/*
* Events that can be raised on a call.
*/
enum rxrpc_call_event {
- RXRPC_CALL_EV_RCVD_ACKALL, /* ACKALL or reply received */
- RXRPC_CALL_EV_RCVD_BUSY, /* busy packet received */
- RXRPC_CALL_EV_RCVD_ABORT, /* abort packet received */
- RXRPC_CALL_EV_RCVD_ERROR, /* network error received */
- RXRPC_CALL_EV_ACK_FINAL, /* need to generate final ACK (and release call) */
RXRPC_CALL_EV_ACK, /* need to generate ACK */
- RXRPC_CALL_EV_REJECT_BUSY, /* need to generate busy message */
RXRPC_CALL_EV_ABORT, /* need to generate abort */
- RXRPC_CALL_EV_CONN_ABORT, /* local connection abort generated */
- RXRPC_CALL_EV_RESEND_TIMER, /* Tx resend timer expired */
+ RXRPC_CALL_EV_TIMER, /* Timer expired */
RXRPC_CALL_EV_RESEND, /* Tx resend required */
- RXRPC_CALL_EV_DRAIN_RX_OOS, /* drain the Rx out of sequence queue */
- RXRPC_CALL_EV_LIFE_TIMER, /* call's lifetimer ran out */
- RXRPC_CALL_EV_ACCEPTED, /* incoming call accepted by userspace app */
- RXRPC_CALL_EV_SECURED, /* incoming call's connection is now secure */
- RXRPC_CALL_EV_POST_ACCEPT, /* need to post an "accept?" message to the app */
- RXRPC_CALL_EV_RELEASE, /* need to release the call's resources */
};
/*
@@ -379,20 +421,38 @@ enum rxrpc_call_state {
RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
- RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */
+ RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
- RXRPC_CALL_COMPLETE, /* - call completed */
- RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */
+ RXRPC_CALL_COMPLETE, /* - call complete */
+ NR__RXRPC_CALL_STATES
+};
+
+/*
+ * Call completion condition (state == RXRPC_CALL_COMPLETE).
+ */
+enum rxrpc_call_completion {
+ RXRPC_CALL_SUCCEEDED, /* - Normal termination */
RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
+ RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
- RXRPC_CALL_DEAD, /* - call is dead */
- NR__RXRPC_CALL_STATES
+ NR__RXRPC_CALL_COMPLETIONS
+};
+
+/*
+ * Call Tx congestion management modes.
+ */
+enum rxrpc_congest_mode {
+ RXRPC_CALL_SLOW_START,
+ RXRPC_CALL_CONGEST_AVOIDANCE,
+ RXRPC_CALL_PACKET_LOSS,
+ RXRPC_CALL_FAST_RETRANSMIT,
+ NR__RXRPC_CONGEST_MODES
};
/*
@@ -402,92 +462,329 @@ enum rxrpc_call_state {
struct rxrpc_call {
struct rcu_head rcu;
struct rxrpc_connection *conn; /* connection carrying call */
- struct rxrpc_sock *socket; /* socket responsible */
- struct timer_list lifetimer; /* lifetime remaining on call */
- struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */
- struct timer_list ack_timer; /* ACK generation timer */
- struct timer_list resend_timer; /* Tx resend timer */
- struct work_struct destroyer; /* call destroyer */
- struct work_struct processor; /* packet processor and ACK generator */
+ struct rxrpc_peer *peer; /* Peer record for remote address */
+ struct rxrpc_sock __rcu *socket; /* socket responsible */
+ ktime_t ack_at; /* When deferred ACK needs to happen */
+ ktime_t resend_at; /* When next resend needs to happen */
+ ktime_t expire_at; /* When the call times out */
+ struct timer_list timer; /* Combined event timer */
+ struct work_struct processor; /* Event processor */
+ rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
struct list_head link; /* link in master call list */
+ struct list_head chan_wait_link; /* Link in conn->waiting_calls */
struct hlist_node error_link; /* link in error distribution list */
- struct list_head accept_link; /* calls awaiting acceptance */
- struct rb_node sock_node; /* node in socket call tree */
- struct sk_buff_head rx_queue; /* received packets */
- struct sk_buff_head rx_oos_queue; /* packets received out of sequence */
+ struct list_head accept_link; /* Link in rx->acceptq */
+ struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
+ struct list_head sock_link; /* Link in rx->sock_calls */
+ struct rb_node sock_node; /* Node in rx->calls */
struct sk_buff *tx_pending; /* Tx socket buffer being filled */
- wait_queue_head_t tx_waitq; /* wait for Tx window space to become available */
+ wait_queue_head_t waitq; /* Wait queue for channel or Tx */
__be32 crypto_buf[2]; /* Temporary packet crypto buffer */
unsigned long user_call_ID; /* user-defined call ID */
- unsigned long creation_jif; /* time of call creation */
unsigned long flags;
unsigned long events;
spinlock_t lock;
rwlock_t state_lock; /* lock for state transition */
- atomic_t usage;
- atomic_t skb_count; /* Outstanding packets on this call */
- atomic_t sequence; /* Tx data packet sequence counter */
- u32 local_abort; /* local abort code */
- u32 remote_abort; /* remote abort code */
- int error_report; /* Network error (ICMP/local transport) */
+ u32 abort_code; /* Local/remote abort code */
int error; /* Local error incurred */
- enum rxrpc_call_state state : 8; /* current state of call */
+ enum rxrpc_call_state state; /* current state of call */
+ enum rxrpc_call_completion completion; /* Call completion condition */
+ atomic_t usage;
+ u16 service_id; /* service ID */
+ u8 security_ix; /* Security type */
+ u32 call_id; /* call ID on connection */
+ u32 cid; /* connection ID plus channel index */
int debug_id; /* debug ID for printks */
- u8 channel; /* connection channel occupied by this call */
-
- /* transmission-phase ACK management */
- u8 acks_head; /* offset into window of first entry */
- u8 acks_tail; /* offset into window of last entry */
- u8 acks_winsz; /* size of un-ACK'd window */
- u8 acks_unacked; /* lowest unacked packet in last ACK received */
- int acks_latest; /* serial number of latest ACK received */
- rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */
- unsigned long *acks_window; /* sent packet window
- * - elements are pointers with LSB set if ACK'd
+ unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
+ unsigned short rx_pkt_len; /* Current recvmsg packet len */
+
+ /* Rx/Tx circular buffer, depending on phase.
+ *
+ * In the Rx phase, packets are annotated with 0 or the number of the
+ * segment of a jumbo packet each buffer refers to. There can be up to
+ * 47 segments in a maximum-size UDP packet.
+ *
+ * In the Tx phase, packets are annotated with which buffers have been
+ * acked.
+ */
+#define RXRPC_RXTX_BUFF_SIZE 64
+#define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
+#define RXRPC_INIT_RX_WINDOW_SIZE 32
+ struct sk_buff **rxtx_buffer;
+ u8 *rxtx_annotations;
+#define RXRPC_TX_ANNO_ACK 0
+#define RXRPC_TX_ANNO_UNACK 1
+#define RXRPC_TX_ANNO_NAK 2
+#define RXRPC_TX_ANNO_RETRANS 3
+#define RXRPC_TX_ANNO_MASK 0x03
+#define RXRPC_TX_ANNO_LAST 0x04
+#define RXRPC_TX_ANNO_RESENT 0x08
+
+#define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */
+#define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
+#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
+ rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
+ * not hard-ACK'd packet follows this.
+ */
+ rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
+
+ /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
+ * is fixed, we keep these numbers in terms of segments (ie. DATA
+ * packets) rather than bytes.
+ */
+#define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
+ u8 cong_cwnd; /* Congestion window size */
+ u8 cong_extra; /* Extra to send for congestion management */
+ u8 cong_ssthresh; /* Slow-start threshold */
+ enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
+ u8 cong_dup_acks; /* Count of ACKs showing missing packets */
+ u8 cong_cumul_acks; /* Cumulative ACK count */
+ ktime_t cong_tstamp; /* Last time cwnd was changed */
+
+ rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
+ * consumed packet follows this.
*/
+ rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
+ rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
+ u8 rx_winsize; /* Size of Rx window */
+ u8 tx_winsize; /* Maximum size of Tx window */
+ bool tx_phase; /* T if transmission phase, F if receive phase */
+ u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
/* receive-phase ACK management */
- rxrpc_seq_t rx_data_expect; /* next data seq ID expected to be received */
- rxrpc_seq_t rx_data_post; /* next data seq ID expected to be posted */
- rxrpc_seq_t rx_data_recv; /* last data seq ID encountered by recvmsg */
- rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */
- rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */
- rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */
- rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
u8 ackr_reason; /* reason to ACK */
+ u16 ackr_skew; /* skew on packet being ACK'd */
rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
- atomic_t ackr_not_idle; /* number of packets in Rx queue */
-
- /* received packet records, 1 bit per record */
-#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
- unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
+ rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
+ rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
+ rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
+ rxrpc_serial_t ackr_ping; /* Last ping sent */
+ ktime_t ackr_ping_time; /* Time last ping sent */
- u8 in_clientflag; /* Copy of conn->in_clientflag */
- struct rxrpc_local *local; /* Local endpoint. */
- u32 call_id; /* call ID on connection */
- u32 cid; /* connection ID plus channel index */
- u32 epoch; /* epoch of this connection */
- u16 service_id; /* service ID */
+ /* transmission-phase ACK management */
+ ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
+ rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
+ rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
};
/*
- * locally abort an RxRPC call
+ * Summary of a new ACK and the changes it made to the Tx buffer packet states.
*/
-static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
-{
- write_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE) {
- call->local_abort = abort_code;
- call->state = RXRPC_CALL_LOCALLY_ABORTED;
- set_bit(RXRPC_CALL_EV_ABORT, &call->events);
- }
- write_unlock_bh(&call->state_lock);
-}
+struct rxrpc_ack_summary {
+ u8 ack_reason;
+ u8 nr_acks; /* Number of ACKs in packet */
+ u8 nr_nacks; /* Number of NACKs in packet */
+ u8 nr_new_acks; /* Number of new ACKs in packet */
+ u8 nr_new_nacks; /* Number of new NACKs in packet */
+ u8 nr_rot_new_acks; /* Number of rotated new ACKs */
+ bool new_low_nack; /* T if new low NACK found */
+ bool retrans_timeo; /* T if reTx due to timeout happened */
+ u8 flight_size; /* Number of unreceived transmissions */
+ /* Place to stash values for tracing */
+ enum rxrpc_congest_mode mode:8;
+ u8 cwnd;
+ u8 ssthresh;
+ u8 dup_acks;
+ u8 cumulative_acks;
+};
+
+enum rxrpc_skb_trace {
+ rxrpc_skb_rx_cleaned,
+ rxrpc_skb_rx_freed,
+ rxrpc_skb_rx_got,
+ rxrpc_skb_rx_lost,
+ rxrpc_skb_rx_received,
+ rxrpc_skb_rx_rotated,
+ rxrpc_skb_rx_purged,
+ rxrpc_skb_rx_seen,
+ rxrpc_skb_tx_cleaned,
+ rxrpc_skb_tx_freed,
+ rxrpc_skb_tx_got,
+ rxrpc_skb_tx_new,
+ rxrpc_skb_tx_rotated,
+ rxrpc_skb_tx_seen,
+ rxrpc_skb__nr_trace
+};
+
+extern const char rxrpc_skb_traces[rxrpc_skb__nr_trace][7];
+
+enum rxrpc_conn_trace {
+ rxrpc_conn_new_client,
+ rxrpc_conn_new_service,
+ rxrpc_conn_queued,
+ rxrpc_conn_seen,
+ rxrpc_conn_got,
+ rxrpc_conn_put_client,
+ rxrpc_conn_put_service,
+ rxrpc_conn__nr_trace
+};
+
+extern const char rxrpc_conn_traces[rxrpc_conn__nr_trace][4];
+
+enum rxrpc_client_trace {
+ rxrpc_client_activate_chans,
+ rxrpc_client_alloc,
+ rxrpc_client_chan_activate,
+ rxrpc_client_chan_disconnect,
+ rxrpc_client_chan_pass,
+ rxrpc_client_chan_unstarted,
+ rxrpc_client_cleanup,
+ rxrpc_client_count,
+ rxrpc_client_discard,
+ rxrpc_client_duplicate,
+ rxrpc_client_exposed,
+ rxrpc_client_replace,
+ rxrpc_client_to_active,
+ rxrpc_client_to_culled,
+ rxrpc_client_to_idle,
+ rxrpc_client_to_inactive,
+ rxrpc_client_to_waiting,
+ rxrpc_client_uncount,
+ rxrpc_client__nr_trace
+};
+
+extern const char rxrpc_client_traces[rxrpc_client__nr_trace][7];
+extern const char rxrpc_conn_cache_states[RXRPC_CONN__NR_CACHE_STATES][5];
+
+enum rxrpc_call_trace {
+ rxrpc_call_new_client,
+ rxrpc_call_new_service,
+ rxrpc_call_queued,
+ rxrpc_call_queued_ref,
+ rxrpc_call_seen,
+ rxrpc_call_connected,
+ rxrpc_call_release,
+ rxrpc_call_got,
+ rxrpc_call_got_userid,
+ rxrpc_call_got_kernel,
+ rxrpc_call_put,
+ rxrpc_call_put_userid,
+ rxrpc_call_put_kernel,
+ rxrpc_call_put_noqueue,
+ rxrpc_call_error,
+ rxrpc_call__nr_trace
+};
+
+extern const char rxrpc_call_traces[rxrpc_call__nr_trace][4];
+
+enum rxrpc_transmit_trace {
+ rxrpc_transmit_wait,
+ rxrpc_transmit_queue,
+ rxrpc_transmit_queue_last,
+ rxrpc_transmit_rotate,
+ rxrpc_transmit_rotate_last,
+ rxrpc_transmit_await_reply,
+ rxrpc_transmit_end,
+ rxrpc_transmit__nr_trace
+};
+
+extern const char rxrpc_transmit_traces[rxrpc_transmit__nr_trace][4];
+
+enum rxrpc_receive_trace {
+ rxrpc_receive_incoming,
+ rxrpc_receive_queue,
+ rxrpc_receive_queue_last,
+ rxrpc_receive_front,
+ rxrpc_receive_rotate,
+ rxrpc_receive_end,
+ rxrpc_receive__nr_trace
+};
+
+extern const char rxrpc_receive_traces[rxrpc_receive__nr_trace][4];
+
+enum rxrpc_recvmsg_trace {
+ rxrpc_recvmsg_enter,
+ rxrpc_recvmsg_wait,
+ rxrpc_recvmsg_dequeue,
+ rxrpc_recvmsg_hole,
+ rxrpc_recvmsg_next,
+ rxrpc_recvmsg_cont,
+ rxrpc_recvmsg_full,
+ rxrpc_recvmsg_data_return,
+ rxrpc_recvmsg_terminal,
+ rxrpc_recvmsg_to_be_accepted,
+ rxrpc_recvmsg_return,
+ rxrpc_recvmsg__nr_trace
+};
+
+extern const char rxrpc_recvmsg_traces[rxrpc_recvmsg__nr_trace][5];
+
+enum rxrpc_rtt_tx_trace {
+ rxrpc_rtt_tx_ping,
+ rxrpc_rtt_tx_data,
+ rxrpc_rtt_tx__nr_trace
+};
+
+extern const char rxrpc_rtt_tx_traces[rxrpc_rtt_tx__nr_trace][5];
+
+enum rxrpc_rtt_rx_trace {
+ rxrpc_rtt_rx_ping_response,
+ rxrpc_rtt_rx_requested_ack,
+ rxrpc_rtt_rx__nr_trace
+};
+
+extern const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5];
+
+enum rxrpc_timer_trace {
+ rxrpc_timer_begin,
+ rxrpc_timer_init_for_reply,
+ rxrpc_timer_expired,
+ rxrpc_timer_set_for_ack,
+ rxrpc_timer_set_for_resend,
+ rxrpc_timer_set_for_send,
+ rxrpc_timer__nr_trace
+};
+
+extern const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8];
+
+enum rxrpc_propose_ack_trace {
+ rxrpc_propose_ack_client_tx_end,
+ rxrpc_propose_ack_input_data,
+ rxrpc_propose_ack_ping_for_lost_ack,
+ rxrpc_propose_ack_ping_for_lost_reply,
+ rxrpc_propose_ack_ping_for_params,
+ rxrpc_propose_ack_respond_to_ack,
+ rxrpc_propose_ack_respond_to_ping,
+ rxrpc_propose_ack_retry_tx,
+ rxrpc_propose_ack_rotate_rx,
+ rxrpc_propose_ack_terminal_ack,
+ rxrpc_propose_ack__nr_trace
+};
+
+enum rxrpc_propose_ack_outcome {
+ rxrpc_propose_ack_use,
+ rxrpc_propose_ack_update,
+ rxrpc_propose_ack_subsume,
+ rxrpc_propose_ack__nr_outcomes
+};
+
+extern const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8];
+extern const char *const rxrpc_propose_ack_outcomes[rxrpc_propose_ack__nr_outcomes];
+
+enum rxrpc_congest_change {
+ rxrpc_cong_begin_retransmission,
+ rxrpc_cong_cleared_nacks,
+ rxrpc_cong_new_low_nack,
+ rxrpc_cong_no_change,
+ rxrpc_cong_progress,
+ rxrpc_cong_retransmit_again,
+ rxrpc_cong_rtt_window_end,
+ rxrpc_cong_saw_nack,
+ rxrpc_congest__nr_change
+};
+
+extern const char rxrpc_congest_modes[NR__RXRPC_CONGEST_MODES][10];
+extern const char rxrpc_congest_changes[rxrpc_congest__nr_change][9];
+
+extern const char *const rxrpc_pkts[];
+extern const char const rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4];
+
+#include <trace/events/rxrpc.h>
/*
* af_rxrpc.c
*/
-extern atomic_t rxrpc_n_skbs;
+extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
extern u32 rxrpc_epoch;
extern atomic_t rxrpc_debug_id;
extern struct workqueue_struct *rxrpc_workqueue;
@@ -495,70 +792,178 @@ extern struct workqueue_struct *rxrpc_workqueue;
/*
* call_accept.c
*/
+int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
+void rxrpc_discard_prealloc(struct rxrpc_sock *);
+struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
+ struct rxrpc_connection *,
+ struct sk_buff *);
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
-struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
+struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
+ rxrpc_notify_rx_t);
int rxrpc_reject_call(struct rxrpc_sock *);
/*
* call_event.c
*/
-void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
-void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
+void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
+void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
+ enum rxrpc_propose_ack_trace);
void rxrpc_process_call(struct work_struct *);
/*
* call_object.c
*/
+extern const char *const rxrpc_call_states[];
+extern const char *const rxrpc_call_completions[];
extern unsigned int rxrpc_max_call_lifetime;
-extern unsigned int rxrpc_dead_call_expiry;
extern struct kmem_cache *rxrpc_call_jar;
extern struct list_head rxrpc_calls;
extern rwlock_t rxrpc_call_lock;
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
+struct rxrpc_call *rxrpc_alloc_call(gfp_t);
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_conn_parameters *,
struct sockaddr_rxrpc *,
unsigned long, gfp_t);
-struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
- struct rxrpc_connection *,
- struct sk_buff *);
-void rxrpc_release_call(struct rxrpc_call *);
+void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
+ struct sk_buff *);
+void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
-void __rxrpc_put_call(struct rxrpc_call *);
+bool __rxrpc_queue_call(struct rxrpc_call *);
+bool rxrpc_queue_call(struct rxrpc_call *);
+void rxrpc_see_call(struct rxrpc_call *);
+void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
+void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
+void rxrpc_cleanup_call(struct rxrpc_call *);
void __exit rxrpc_destroy_all_calls(void);
+static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
+{
+ return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
+}
+
+static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
+{
+ return !rxrpc_is_service_call(call);
+}
+
+/*
+ * Transition a call to the complete state.
+ */
+static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
+ enum rxrpc_call_completion compl,
+ u32 abort_code,
+ int error)
+{
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ call->abort_code = abort_code;
+ call->error = error;
+ call->completion = compl,
+ call->state = RXRPC_CALL_COMPLETE;
+ wake_up(&call->waitq);
+ return true;
+ }
+ return false;
+}
+
+static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
+ enum rxrpc_call_completion compl,
+ u32 abort_code,
+ int error)
+{
+ bool ret;
+
+ write_lock_bh(&call->state_lock);
+ ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
+ write_unlock_bh(&call->state_lock);
+ return ret;
+}
+
+/*
+ * Record that a call successfully completed.
+ */
+static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
+{
+ return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
+}
+
+static inline bool rxrpc_call_completed(struct rxrpc_call *call)
+{
+ bool ret;
+
+ write_lock_bh(&call->state_lock);
+ ret = __rxrpc_call_completed(call);
+ write_unlock_bh(&call->state_lock);
+ return ret;
+}
+
+/*
+ * Record that a call is locally aborted.
+ */
+static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
+ rxrpc_seq_t seq,
+ u32 abort_code, int error)
+{
+ trace_rxrpc_abort(why, call->cid, call->call_id, seq,
+ abort_code, error);
+ return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
+ abort_code, error);
+}
+
+static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
+ rxrpc_seq_t seq, u32 abort_code, int error)
+{
+ bool ret;
+
+ write_lock_bh(&call->state_lock);
+ ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
+ write_unlock_bh(&call->state_lock);
+ return ret;
+}
+
/*
* conn_client.c
*/
+extern unsigned int rxrpc_max_client_connections;
+extern unsigned int rxrpc_reap_client_connections;
+extern unsigned int rxrpc_conn_idle_client_expiry;
+extern unsigned int rxrpc_conn_idle_client_fast_expiry;
extern struct idr rxrpc_client_conn_ids;
void rxrpc_destroy_client_conn_ids(void);
int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
struct sockaddr_rxrpc *, gfp_t);
-void rxrpc_unpublish_client_conn(struct rxrpc_connection *);
+void rxrpc_expose_client_call(struct rxrpc_call *);
+void rxrpc_disconnect_client_call(struct rxrpc_call *);
+void rxrpc_put_client_conn(struct rxrpc_connection *);
+void __exit rxrpc_destroy_all_client_connections(void);
/*
* conn_event.c
*/
void rxrpc_process_connection(struct work_struct *);
-void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
-void rxrpc_reject_packets(struct rxrpc_local *);
/*
* conn_object.c
*/
extern unsigned int rxrpc_connection_expiry;
extern struct list_head rxrpc_connections;
+extern struct list_head rxrpc_connection_proc_list;
extern rwlock_t rxrpc_connection_lock;
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
struct sk_buff *);
-void __rxrpc_disconnect_call(struct rxrpc_call *);
+void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
void rxrpc_disconnect_call(struct rxrpc_call *);
-void rxrpc_put_connection(struct rxrpc_connection *);
+void rxrpc_kill_connection(struct rxrpc_connection *);
+bool rxrpc_queue_conn(struct rxrpc_connection *);
+void rxrpc_see_connection(struct rxrpc_connection *);
+void rxrpc_get_connection(struct rxrpc_connection *);
+struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
+void rxrpc_put_service_conn(struct rxrpc_connection *);
void __exit rxrpc_destroy_all_connections(void);
static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
@@ -571,24 +976,15 @@ static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
return !rxrpc_conn_is_client(conn);
}
-static inline void rxrpc_get_connection(struct rxrpc_connection *conn)
-{
- atomic_inc(&conn->usage);
-}
-
-static inline
-struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
+static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
{
- return atomic_inc_not_zero(&conn->usage) ? conn : NULL;
-}
+ if (!conn)
+ return;
-static inline bool rxrpc_queue_conn(struct rxrpc_connection *conn)
-{
- if (!rxrpc_get_connection_maybe(conn))
- return false;
- if (!rxrpc_queue_work(&conn->processor))
- rxrpc_put_connection(conn);
- return true;
+ if (rxrpc_conn_is_client(conn))
+ rxrpc_put_client_conn(conn);
+ else
+ rxrpc_put_service_conn(conn);
}
/*
@@ -596,17 +992,14 @@ static inline bool rxrpc_queue_conn(struct rxrpc_connection *conn)
*/
struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
struct sk_buff *);
-struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *,
- struct sockaddr_rxrpc *,
- struct sk_buff *);
+struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t);
+void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *);
void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
/*
* input.c
*/
void rxrpc_data_ready(struct sock *);
-int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
-void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
/*
* insecure.c
@@ -668,25 +1061,24 @@ extern unsigned int rxrpc_idle_ack_delay;
extern unsigned int rxrpc_rx_window_size;
extern unsigned int rxrpc_rx_mtu;
extern unsigned int rxrpc_rx_jumbo_max;
+extern unsigned int rxrpc_resend_timeout;
-extern const char *const rxrpc_pkts[];
extern const s8 rxrpc_ack_priority[];
-extern const char *rxrpc_acks(u8 reason);
-
/*
* output.c
*/
-extern unsigned int rxrpc_resend_timeout;
-
-int rxrpc_send_data_packet(struct rxrpc_connection *, struct sk_buff *);
-int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
+int rxrpc_send_call_packet(struct rxrpc_call *, u8);
+int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
+void rxrpc_reject_packets(struct rxrpc_local *);
/*
* peer_event.c
*/
void rxrpc_error_report(struct sock *);
void rxrpc_peer_error_distributor(struct work_struct *);
+void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
+ rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
/*
* peer_object.c
@@ -696,10 +1088,13 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
struct sockaddr_rxrpc *, gfp_t);
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
+struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
+ struct rxrpc_peer *);
-static inline void rxrpc_get_peer(struct rxrpc_peer *peer)
+static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
{
atomic_inc(&peer->usage);
+ return peer;
}
static inline
@@ -718,14 +1113,13 @@ static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
/*
* proc.c
*/
-extern const char *const rxrpc_call_states[];
extern const struct file_operations rxrpc_call_seq_fops;
extern const struct file_operations rxrpc_connection_seq_fops;
/*
* recvmsg.c
*/
-void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
+void rxrpc_notify_socket(struct rxrpc_call *);
int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
/*
@@ -744,9 +1138,21 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *);
int rxrpc_init_server_conn_security(struct rxrpc_connection *);
/*
+ * sendmsg.c
+ */
+int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
+
+/*
* skbuff.c
*/
+void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
void rxrpc_packet_destructor(struct sk_buff *);
+void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
+void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
+void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
+void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
+void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
+void rxrpc_purge_queue(struct sk_buff_head *);
/*
* sysctl.c
@@ -764,6 +1170,23 @@ static inline void rxrpc_sysctl_exit(void) {}
*/
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
+static inline bool before(u32 seq1, u32 seq2)
+{
+ return (s32)(seq1 - seq2) < 0;
+}
+static inline bool before_eq(u32 seq1, u32 seq2)
+{
+ return (s32)(seq1 - seq2) <= 0;
+}
+static inline bool after(u32 seq1, u32 seq2)
+{
+ return (s32)(seq1 - seq2) > 0;
+}
+static inline bool after_eq(u32 seq1, u32 seq2)
+{
+ return (s32)(seq1 - seq2) >= 0;
+}
+
/*
* debug tracing
*/
@@ -846,11 +1269,12 @@ do { \
#define ASSERTCMP(X, OP, Y) \
do { \
- unsigned long _x = (unsigned long)(X); \
- unsigned long _y = (unsigned long)(Y); \
+ __typeof__(X) _x = (X); \
+ __typeof__(Y) _y = (__typeof__(X))(Y); \
if (unlikely(!(_x OP _y))) { \
- pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
- _x, _x, #OP, _y, _y); \
+ pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
+ (unsigned long)_x, (unsigned long)_x, #OP, \
+ (unsigned long)_y, (unsigned long)_y); \
BUG(); \
} \
} while (0)
@@ -865,11 +1289,12 @@ do { \
#define ASSERTIFCMP(C, X, OP, Y) \
do { \
- unsigned long _x = (unsigned long)(X); \
- unsigned long _y = (unsigned long)(Y); \
+ __typeof__(X) _x = (X); \
+ __typeof__(Y) _y = (__typeof__(X))(Y); \
if (unlikely((C) && !(_x OP _y))) { \
pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
- _x, _x, #OP, _y, _y); \
+ (unsigned long)_x, (unsigned long)_x, #OP, \
+ (unsigned long)_y, (unsigned long)_y); \
BUG(); \
} \
} while (0)
@@ -893,54 +1318,3 @@ do { \
} while (0)
#endif /* __KDEBUGALL */
-
-/*
- * socket buffer accounting / leak finding
- */
-static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn)
-{
- //_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
- //atomic_inc(&rxrpc_n_skbs);
-}
-
-#define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__)
-
-static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn)
-{
- //_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
- //atomic_dec(&rxrpc_n_skbs);
-}
-
-#define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__)
-
-static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn)
-{
- if (skb) {
- CHECK_SLAB_OKAY(&skb->users);
- //_net("free skb %p %s [%d]",
- // skb, fn, atomic_read(&rxrpc_n_skbs));
- //atomic_dec(&rxrpc_n_skbs);
- kfree_skb(skb);
- }
-}
-
-#define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__)
-
-static inline void rxrpc_purge_queue(struct sk_buff_head *list)
-{
- struct sk_buff *skb;
- while ((skb = skb_dequeue((list))) != NULL)
- rxrpc_free_skb(skb);
-}
-
-#define rxrpc_get_call(CALL) \
-do { \
- CHECK_SLAB_OKAY(&(CALL)->usage); \
- if (atomic_inc_return(&(CALL)->usage) == 1) \
- BUG(); \
-} while (0)
-
-#define rxrpc_put_call(CALL) \
-do { \
- __rxrpc_put_call(CALL); \
-} while (0)
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 9bae21e66d65..3cac231d8405 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -20,265 +20,409 @@
#include <linux/in6.h>
#include <linux/icmp.h>
#include <linux/gfp.h>
+#include <linux/circ_buf.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
#include "ar-internal.h"
/*
- * generate a connection-level abort
+ * Preallocate a single service call, connection and peer and, if possible,
+ * give them a user ID and attach the user's side of the ID to them.
*/
-static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
- struct rxrpc_wire_header *whdr)
+static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
+ struct rxrpc_backlog *b,
+ rxrpc_notify_rx_t notify_rx,
+ rxrpc_user_attach_call_t user_attach_call,
+ unsigned long user_call_ID, gfp_t gfp)
{
- struct msghdr msg;
- struct kvec iov[1];
- size_t len;
- int ret;
+ const void *here = __builtin_return_address(0);
+ struct rxrpc_call *call;
+ int max, tmp;
+ unsigned int size = RXRPC_BACKLOG_MAX;
+ unsigned int head, tail, call_head, call_tail;
+
+ max = rx->sk.sk_max_ack_backlog;
+ tmp = rx->sk.sk_ack_backlog;
+ if (tmp >= max) {
+ _leave(" = -ENOBUFS [full %u]", max);
+ return -ENOBUFS;
+ }
+ max -= tmp;
+
+ /* We don't need more conns and peers than we have calls, but on the
+ * other hand, we shouldn't ever use more peers than conns or conns
+ * than calls.
+ */
+ call_head = b->call_backlog_head;
+ call_tail = READ_ONCE(b->call_backlog_tail);
+ tmp = CIRC_CNT(call_head, call_tail, size);
+ if (tmp >= max) {
+ _leave(" = -ENOBUFS [enough %u]", tmp);
+ return -ENOBUFS;
+ }
+ max = tmp + 1;
+
+ head = b->peer_backlog_head;
+ tail = READ_ONCE(b->peer_backlog_tail);
+ if (CIRC_CNT(head, tail, size) < max) {
+ struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
+ if (!peer)
+ return -ENOMEM;
+ b->peer_backlog[head] = peer;
+ smp_store_release(&b->peer_backlog_head,
+ (head + 1) & (size - 1));
+ }
- _enter("%d,,", local->debug_id);
+ head = b->conn_backlog_head;
+ tail = READ_ONCE(b->conn_backlog_tail);
+ if (CIRC_CNT(head, tail, size) < max) {
+ struct rxrpc_connection *conn;
- whdr->type = RXRPC_PACKET_TYPE_BUSY;
- whdr->serial = htonl(1);
+ conn = rxrpc_prealloc_service_connection(gfp);
+ if (!conn)
+ return -ENOMEM;
+ b->conn_backlog[head] = conn;
+ smp_store_release(&b->conn_backlog_head,
+ (head + 1) & (size - 1));
- msg.msg_name = &srx->transport.sin;
- msg.msg_namelen = sizeof(srx->transport.sin);
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
+ trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+ atomic_read(&conn->usage), here);
+ }
- iov[0].iov_base = whdr;
- iov[0].iov_len = sizeof(*whdr);
+ /* Now it gets complicated, because calls get registered with the
+ * socket here, particularly if a user ID is preassigned by the user.
+ */
+ call = rxrpc_alloc_call(gfp);
+ if (!call)
+ return -ENOMEM;
+ call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
+ call->state = RXRPC_CALL_SERVER_PREALLOC;
- len = iov[0].iov_len;
+ trace_rxrpc_call(call, rxrpc_call_new_service,
+ atomic_read(&call->usage),
+ here, (const void *)user_call_ID);
- _proto("Tx BUSY %%1");
+ write_lock(&rx->call_lock);
+ if (user_attach_call) {
+ struct rxrpc_call *xcall;
+ struct rb_node *parent, **pp;
+
+ /* Check the user ID isn't already in use */
+ pp = &rx->calls.rb_node;
+ parent = NULL;
+ while (*pp) {
+ parent = *pp;
+ xcall = rb_entry(parent, struct rxrpc_call, sock_node);
+ if (user_call_ID < call->user_call_ID)
+ pp = &(*pp)->rb_left;
+ else if (user_call_ID > call->user_call_ID)
+ pp = &(*pp)->rb_right;
+ else
+ goto id_in_use;
+ }
- ret = kernel_sendmsg(local->socket, &msg, iov, 1, len);
- if (ret < 0) {
- _leave(" = -EAGAIN [sendmsg failed: %d]", ret);
- return -EAGAIN;
+ call->user_call_ID = user_call_ID;
+ call->notify_rx = notify_rx;
+ rxrpc_get_call(call, rxrpc_call_got_kernel);
+ user_attach_call(call, user_call_ID);
+ rxrpc_get_call(call, rxrpc_call_got_userid);
+ rb_link_node(&call->sock_node, parent, pp);
+ rb_insert_color(&call->sock_node, &rx->calls);
+ set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
}
- _leave(" = 0");
+ list_add(&call->sock_link, &rx->sock_calls);
+
+ write_unlock(&rx->call_lock);
+
+ write_lock(&rxrpc_call_lock);
+ list_add_tail(&call->link, &rxrpc_calls);
+ write_unlock(&rxrpc_call_lock);
+
+ b->call_backlog[call_head] = call;
+ smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
+ _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
return 0;
+
+id_in_use:
+ write_unlock(&rx->call_lock);
+ rxrpc_cleanup_call(call);
+ _leave(" = -EBADSLT");
+ return -EBADSLT;
}
/*
- * accept an incoming call that needs peer, transport and/or connection setting
- * up
+ * Preallocate sufficient service connections, calls and peers to cover the
+ * entire backlog of a socket. When a new call comes in, if we don't have
+ * sufficient of each available, the call gets rejected as busy or ignored.
+ *
+ * The backlog is replenished when a connection is accepted or rejected.
*/
-static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
- struct rxrpc_sock *rx,
- struct sk_buff *skb,
- struct sockaddr_rxrpc *srx)
+int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
{
- struct rxrpc_connection *conn;
- struct rxrpc_skb_priv *sp, *nsp;
- struct rxrpc_call *call;
- struct sk_buff *notification;
- int ret;
+ struct rxrpc_backlog *b = rx->backlog;
- _enter("");
+ if (!b) {
+ b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
+ if (!b)
+ return -ENOMEM;
+ rx->backlog = b;
+ }
+
+ if (rx->discard_new_call)
+ return 0;
+
+ while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0)
+ ;
- sp = rxrpc_skb(skb);
+ return 0;
+}
- /* get a notification message to send to the server app */
- notification = alloc_skb(0, GFP_NOFS);
- if (!notification) {
- _debug("no memory");
- ret = -ENOMEM;
- goto error_nofree;
+/*
+ * Discard the preallocation on a service.
+ */
+void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
+{
+ struct rxrpc_backlog *b = rx->backlog;
+ unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
+
+ if (!b)
+ return;
+ rx->backlog = NULL;
+
+ /* Make sure that there aren't any incoming calls in progress before we
+ * clear the preallocation buffers.
+ */
+ spin_lock_bh(&rx->incoming_lock);
+ spin_unlock_bh(&rx->incoming_lock);
+
+ head = b->peer_backlog_head;
+ tail = b->peer_backlog_tail;
+ while (CIRC_CNT(head, tail, size) > 0) {
+ struct rxrpc_peer *peer = b->peer_backlog[tail];
+ kfree(peer);
+ tail = (tail + 1) & (size - 1);
}
- rxrpc_new_skb(notification);
- notification->mark = RXRPC_SKB_MARK_NEW_CALL;
-
- conn = rxrpc_incoming_connection(local, srx, skb);
- if (IS_ERR(conn)) {
- _debug("no conn");
- ret = PTR_ERR(conn);
- goto error;
+
+ head = b->conn_backlog_head;
+ tail = b->conn_backlog_tail;
+ while (CIRC_CNT(head, tail, size) > 0) {
+ struct rxrpc_connection *conn = b->conn_backlog[tail];
+ write_lock(&rxrpc_connection_lock);
+ list_del(&conn->link);
+ list_del(&conn->proc_link);
+ write_unlock(&rxrpc_connection_lock);
+ kfree(conn);
+ tail = (tail + 1) & (size - 1);
}
- call = rxrpc_incoming_call(rx, conn, skb);
- rxrpc_put_connection(conn);
- if (IS_ERR(call)) {
- _debug("no call");
- ret = PTR_ERR(call);
- goto error;
+ head = b->call_backlog_head;
+ tail = b->call_backlog_tail;
+ while (CIRC_CNT(head, tail, size) > 0) {
+ struct rxrpc_call *call = b->call_backlog[tail];
+ if (rx->discard_new_call) {
+ _debug("discard %lx", call->user_call_ID);
+ rx->discard_new_call(call, call->user_call_ID);
+ rxrpc_put_call(call, rxrpc_call_put_kernel);
+ }
+ rxrpc_call_completed(call);
+ rxrpc_release_call(rx, call);
+ rxrpc_put_call(call, rxrpc_call_put);
+ tail = (tail + 1) & (size - 1);
}
- /* attach the call to the socket */
- read_lock_bh(&local->services_lock);
- if (rx->sk.sk_state == RXRPC_CLOSE)
- goto invalid_service;
+ kfree(b);
+}
- write_lock(&rx->call_lock);
- if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) {
- rxrpc_get_call(call);
-
- spin_lock(&call->conn->state_lock);
- if (sp->hdr.securityIndex > 0 &&
- call->conn->state == RXRPC_CONN_SERVICE_UNSECURED) {
- _debug("await conn sec");
- list_add_tail(&call->accept_link, &rx->secureq);
- call->conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
- set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
- rxrpc_queue_conn(call->conn);
- } else {
- _debug("conn ready");
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
- list_add_tail(&call->accept_link, &rx->acceptq);
- rxrpc_get_call(call);
- atomic_inc(&call->skb_count);
- nsp = rxrpc_skb(notification);
- nsp->call = call;
-
- ASSERTCMP(atomic_read(&call->usage), >=, 3);
-
- _debug("notify");
- spin_lock(&call->lock);
- ret = rxrpc_queue_rcv_skb(call, notification, true,
- false);
- spin_unlock(&call->lock);
- notification = NULL;
- BUG_ON(ret < 0);
+/*
+ * Allocate a new incoming call from the prealloc pool, along with a connection
+ * and a peer as necessary.
+ */
+static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
+ struct rxrpc_local *local,
+ struct rxrpc_connection *conn,
+ struct sk_buff *skb)
+{
+ struct rxrpc_backlog *b = rx->backlog;
+ struct rxrpc_peer *peer, *xpeer;
+ struct rxrpc_call *call;
+ unsigned short call_head, conn_head, peer_head;
+ unsigned short call_tail, conn_tail, peer_tail;
+ unsigned short call_count, conn_count;
+
+ /* #calls >= #conns >= #peers must hold true. */
+ call_head = smp_load_acquire(&b->call_backlog_head);
+ call_tail = b->call_backlog_tail;
+ call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
+ conn_head = smp_load_acquire(&b->conn_backlog_head);
+ conn_tail = b->conn_backlog_tail;
+ conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
+ ASSERTCMP(conn_count, >=, call_count);
+ peer_head = smp_load_acquire(&b->peer_backlog_head);
+ peer_tail = b->peer_backlog_tail;
+ ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
+ conn_count);
+
+ if (call_count == 0)
+ return NULL;
+
+ if (!conn) {
+ /* No connection. We're going to need a peer to start off
+ * with. If one doesn't yet exist, use a spare from the
+ * preallocation set. We dump the address into the spare in
+ * anticipation - and to save on stack space.
+ */
+ xpeer = b->peer_backlog[peer_tail];
+ if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0)
+ return NULL;
+
+ peer = rxrpc_lookup_incoming_peer(local, xpeer);
+ if (peer == xpeer) {
+ b->peer_backlog[peer_tail] = NULL;
+ smp_store_release(&b->peer_backlog_tail,
+ (peer_tail + 1) &
+ (RXRPC_BACKLOG_MAX - 1));
}
- spin_unlock(&call->conn->state_lock);
- _debug("queued");
+ /* Now allocate and set up the connection */
+ conn = b->conn_backlog[conn_tail];
+ b->conn_backlog[conn_tail] = NULL;
+ smp_store_release(&b->conn_backlog_tail,
+ (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
+ rxrpc_get_local(local);
+ conn->params.local = local;
+ conn->params.peer = peer;
+ rxrpc_see_connection(conn);
+ rxrpc_new_incoming_connection(conn, skb);
+ } else {
+ rxrpc_get_connection(conn);
}
- write_unlock(&rx->call_lock);
- _debug("process");
- rxrpc_fast_process_packet(call, skb);
+ /* And now we can allocate and set up a new call */
+ call = b->call_backlog[call_tail];
+ b->call_backlog[call_tail] = NULL;
+ smp_store_release(&b->call_backlog_tail,
+ (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
- _debug("done");
- read_unlock_bh(&local->services_lock);
- rxrpc_free_skb(notification);
- rxrpc_put_call(call);
- _leave(" = 0");
- return 0;
-
-invalid_service:
- _debug("invalid");
- read_unlock_bh(&local->services_lock);
-
- read_lock_bh(&call->state_lock);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
- rxrpc_get_call(call);
- rxrpc_queue_call(call);
- }
- read_unlock_bh(&call->state_lock);
- rxrpc_put_call(call);
- ret = -ECONNREFUSED;
-error:
- rxrpc_free_skb(notification);
-error_nofree:
- _leave(" = %d", ret);
- return ret;
+ rxrpc_see_call(call);
+ call->conn = conn;
+ call->peer = rxrpc_get_peer(conn->params.peer);
+ return call;
}
/*
- * accept incoming calls that need peer, transport and/or connection setting up
- * - the packets we get are all incoming client DATA packets that have seq == 1
+ * Set up a new incoming call. Called in BH context with the RCU read lock
+ * held.
+ *
+ * If this is for a kernel service, when we allocate the call, it will have
+ * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
+ * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
+ * services only have the ref from the backlog buffer. We want to pass this
+ * ref to non-BH context to dispose of.
+ *
+ * If we want to report an error, we mark the skb with the packet type and
+ * abort code and return NULL.
*/
-void rxrpc_accept_incoming_calls(struct rxrpc_local *local)
+struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ struct rxrpc_connection *conn,
+ struct sk_buff *skb)
{
- struct rxrpc_skb_priv *sp;
- struct sockaddr_rxrpc srx;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_sock *rx;
- struct rxrpc_wire_header whdr;
- struct sk_buff *skb;
- int ret;
+ struct rxrpc_call *call;
+ u16 service_id = sp->hdr.serviceId;
- _enter("%d", local->debug_id);
+ _enter("");
- skb = skb_dequeue(&local->accept_queue);
- if (!skb) {
- _leave("\n");
- return;
+ /* Get the socket providing the service */
+ rx = rcu_dereference(local->service);
+ if (service_id == rx->srx.srx_service)
+ goto found_service;
+
+ trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RX_INVALID_OPERATION, EOPNOTSUPP);
+ skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+ skb->priority = RX_INVALID_OPERATION;
+ _leave(" = NULL [service]");
+ return NULL;
+
+found_service:
+ spin_lock(&rx->incoming_lock);
+ if (rx->sk.sk_state == RXRPC_CLOSE) {
+ trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber,
+ sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
+ skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+ skb->priority = RX_INVALID_OPERATION;
+ _leave(" = NULL [close]");
+ call = NULL;
+ goto out;
}
- _net("incoming call skb %p", skb);
-
- sp = rxrpc_skb(skb);
-
- /* Set up a response packet header in case we need it */
- whdr.epoch = htonl(sp->hdr.epoch);
- whdr.cid = htonl(sp->hdr.cid);
- whdr.callNumber = htonl(sp->hdr.callNumber);
- whdr.seq = htonl(sp->hdr.seq);
- whdr.serial = 0;
- whdr.flags = 0;
- whdr.type = 0;
- whdr.userStatus = 0;
- whdr.securityIndex = sp->hdr.securityIndex;
- whdr._rsvd = 0;
- whdr.serviceId = htons(sp->hdr.serviceId);
-
- if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
- goto drop;
-
- /* get the socket providing the service */
- read_lock_bh(&local->services_lock);
- list_for_each_entry(rx, &local->services, listen_link) {
- if (rx->srx.srx_service == sp->hdr.serviceId &&
- rx->sk.sk_state != RXRPC_CLOSE)
- goto found_service;
+ call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
+ if (!call) {
+ skb->mark = RXRPC_SKB_MARK_BUSY;
+ _leave(" = NULL [busy]");
+ call = NULL;
+ goto out;
}
- read_unlock_bh(&local->services_lock);
- goto invalid_service;
-found_service:
- _debug("found service %hd", rx->srx.srx_service);
- if (sk_acceptq_is_full(&rx->sk))
- goto backlog_full;
- sk_acceptq_added(&rx->sk);
- sock_hold(&rx->sk);
- read_unlock_bh(&local->services_lock);
-
- ret = rxrpc_accept_incoming_call(local, rx, skb, &srx);
- if (ret < 0)
- sk_acceptq_removed(&rx->sk);
- sock_put(&rx->sk);
- switch (ret) {
- case -ECONNRESET: /* old calls are ignored */
- case -ECONNABORTED: /* aborted calls are reaborted or ignored */
- case 0:
- return;
- case -ECONNREFUSED:
- goto invalid_service;
- case -EBUSY:
- goto busy;
- case -EKEYREJECTED:
- goto security_mismatch;
+ trace_rxrpc_receive(call, rxrpc_receive_incoming,
+ sp->hdr.serial, sp->hdr.seq);
+
+ /* Make the call live. */
+ rxrpc_incoming_call(rx, call, skb);
+ conn = call->conn;
+
+ if (rx->notify_new_call)
+ rx->notify_new_call(&rx->sk, call, call->user_call_ID);
+ else
+ sk_acceptq_added(&rx->sk);
+
+ spin_lock(&conn->state_lock);
+ switch (conn->state) {
+ case RXRPC_CONN_SERVICE_UNSECURED:
+ conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
+ set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
+ rxrpc_queue_conn(call->conn);
+ break;
+
+ case RXRPC_CONN_SERVICE:
+ write_lock(&call->state_lock);
+ if (rx->discard_new_call)
+ call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
+ else
+ call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ write_unlock(&call->state_lock);
+ break;
+
+ case RXRPC_CONN_REMOTELY_ABORTED:
+ rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
+ conn->remote_abort, ECONNABORTED);
+ break;
+ case RXRPC_CONN_LOCALLY_ABORTED:
+ rxrpc_abort_call("CON", call, sp->hdr.seq,
+ conn->local_abort, ECONNABORTED);
+ break;
default:
BUG();
}
+ spin_unlock(&conn->state_lock);
-backlog_full:
- read_unlock_bh(&local->services_lock);
-busy:
- rxrpc_busy(local, &srx, &whdr);
- rxrpc_free_skb(skb);
- return;
+ if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
+ rxrpc_notify_socket(call);
-drop:
- rxrpc_free_skb(skb);
- return;
+ /* We have to discard the prealloc queue's ref here and rely on a
+ * combination of the RCU read lock and refs held either by the socket
+ * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
+ * service to prevent the call from being deallocated too early.
+ */
+ rxrpc_put_call(call, rxrpc_call_put);
-invalid_service:
- skb->priority = RX_INVALID_OPERATION;
- rxrpc_reject_packet(local, skb);
- return;
-
- /* can't change connection security type mid-flow */
-security_mismatch:
- skb->priority = RX_PROTOCOL_ERROR;
- rxrpc_reject_packet(local, skb);
- return;
+ _leave(" = %p{%d}", call, call->debug_id);
+out:
+ spin_unlock(&rx->incoming_lock);
+ return call;
}
/*
@@ -286,7 +430,8 @@ security_mismatch:
* - assign the user call ID to the call at the front of the queue
*/
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
- unsigned long user_call_ID)
+ unsigned long user_call_ID,
+ rxrpc_notify_rx_t notify_rx)
{
struct rxrpc_call *call;
struct rb_node *parent, **pp;
@@ -298,12 +443,13 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
write_lock(&rx->call_lock);
- ret = -ENODATA;
- if (list_empty(&rx->acceptq))
- goto out;
+ if (list_empty(&rx->to_be_accepted)) {
+ write_unlock(&rx->call_lock);
+ kleave(" = -ENODATA [empty]");
+ return ERR_PTR(-ENODATA);
+ }
/* check the user ID isn't already in use */
- ret = -EBADSLT;
pp = &rx->calls.rb_node;
parent = NULL;
while (*pp) {
@@ -315,62 +461,59 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
else if (user_call_ID > call->user_call_ID)
pp = &(*pp)->rb_right;
else
- goto out;
+ goto id_in_use;
}
- /* dequeue the first call and check it's still valid */
- call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
+ /* Dequeue the first call and check it's still valid. We gain
+ * responsibility for the queue's reference.
+ */
+ call = list_entry(rx->to_be_accepted.next,
+ struct rxrpc_call, accept_link);
list_del_init(&call->accept_link);
sk_acceptq_removed(&rx->sk);
+ rxrpc_see_call(call);
write_lock_bh(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_SERVER_ACCEPTING:
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
break;
- case RXRPC_CALL_REMOTELY_ABORTED:
- case RXRPC_CALL_LOCALLY_ABORTED:
- ret = -ECONNABORTED;
- goto out_release;
- case RXRPC_CALL_NETWORK_ERROR:
- ret = call->conn->error;
+ case RXRPC_CALL_COMPLETE:
+ ret = call->error;
goto out_release;
- case RXRPC_CALL_DEAD:
- ret = -ETIME;
- goto out_discard;
default:
BUG();
}
/* formalise the acceptance */
+ call->notify_rx = notify_rx;
call->user_call_ID = user_call_ID;
+ rxrpc_get_call(call, rxrpc_call_got_userid);
rb_link_node(&call->sock_node, parent, pp);
rb_insert_color(&call->sock_node, &rx->calls);
if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
BUG();
- if (test_and_set_bit(RXRPC_CALL_EV_ACCEPTED, &call->events))
- BUG();
- rxrpc_queue_call(call);
- rxrpc_get_call(call);
write_unlock_bh(&call->state_lock);
write_unlock(&rx->call_lock);
+ rxrpc_notify_socket(call);
+ rxrpc_service_prealloc(rx, GFP_KERNEL);
_leave(" = %p{%d}", call, call->debug_id);
return call;
- /* if the call is already dying or dead, then we leave the socket's ref
- * on it to be released by rxrpc_dead_call_expired() as induced by
- * rxrpc_release_call() */
out_release:
_debug("release %p", call);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
- rxrpc_queue_call(call);
-out_discard:
write_unlock_bh(&call->state_lock);
- _debug("discard %p", call);
-out:
write_unlock(&rx->call_lock);
+ rxrpc_release_call(rx, call);
+ rxrpc_put_call(call, rxrpc_call_put);
+ goto out;
+
+id_in_use:
+ ret = -EBADSLT;
+ write_unlock(&rx->call_lock);
+out:
+ rxrpc_service_prealloc(rx, GFP_KERNEL);
_leave(" = %d", ret);
return ERR_PTR(ret);
}
@@ -382,6 +525,7 @@ out:
int rxrpc_reject_call(struct rxrpc_sock *rx)
{
struct rxrpc_call *call;
+ bool abort = false;
int ret;
_enter("");
@@ -390,88 +534,73 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
write_lock(&rx->call_lock);
- ret = -ENODATA;
- if (list_empty(&rx->acceptq))
- goto out;
+ if (list_empty(&rx->to_be_accepted)) {
+ write_unlock(&rx->call_lock);
+ return -ENODATA;
+ }
- /* dequeue the first call and check it's still valid */
- call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
+ /* Dequeue the first call and check it's still valid. We gain
+ * responsibility for the queue's reference.
+ */
+ call = list_entry(rx->to_be_accepted.next,
+ struct rxrpc_call, accept_link);
list_del_init(&call->accept_link);
sk_acceptq_removed(&rx->sk);
+ rxrpc_see_call(call);
write_lock_bh(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_SERVER_ACCEPTING:
- call->state = RXRPC_CALL_SERVER_BUSY;
- if (test_and_set_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events))
- rxrpc_queue_call(call);
- ret = 0;
- goto out_release;
- case RXRPC_CALL_REMOTELY_ABORTED:
- case RXRPC_CALL_LOCALLY_ABORTED:
- ret = -ECONNABORTED;
- goto out_release;
- case RXRPC_CALL_NETWORK_ERROR:
- ret = call->conn->error;
- goto out_release;
- case RXRPC_CALL_DEAD:
- ret = -ETIME;
+ __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, ECONNABORTED);
+ abort = true;
+ /* fall through */
+ case RXRPC_CALL_COMPLETE:
+ ret = call->error;
goto out_discard;
default:
BUG();
}
- /* if the call is already dying or dead, then we leave the socket's ref
- * on it to be released by rxrpc_dead_call_expired() as induced by
- * rxrpc_release_call() */
-out_release:
- _debug("release %p", call);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
- rxrpc_queue_call(call);
out_discard:
write_unlock_bh(&call->state_lock);
- _debug("discard %p", call);
-out:
write_unlock(&rx->call_lock);
+ if (abort) {
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+ rxrpc_release_call(rx, call);
+ rxrpc_put_call(call, rxrpc_call_put);
+ }
+ rxrpc_service_prealloc(rx, GFP_KERNEL);
_leave(" = %d", ret);
return ret;
}
-/**
- * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call
- * @sock: The socket on which the impending call is waiting
- * @user_call_ID: The tag to attach to the call
+/*
+ * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
+ * @sock: The socket on which to preallocate
+ * @notify_rx: Event notification function for the call
+ * @user_attach_call: Func to attach call to user_call_ID
+ * @user_call_ID: The tag to attach to the preallocated call
+ * @gfp: The allocation conditions.
*
- * Allow a kernel service to accept an incoming call, assuming the incoming
- * call is still valid.
- */
-struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock,
- unsigned long user_call_ID)
-{
- struct rxrpc_call *call;
-
- _enter(",%lx", user_call_ID);
- call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID);
- _leave(" = %p", call);
- return call;
-}
-EXPORT_SYMBOL(rxrpc_kernel_accept_call);
-
-/**
- * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call
- * @sock: The socket on which the impending call is waiting
+ * Charge up the socket with preallocated calls, each with a user ID. A
+ * function should be provided to effect the attachment from the user's side.
+ * The user is given a ref to hold on the call.
*
- * Allow a kernel service to reject an incoming call with a BUSY message,
- * assuming the incoming call is still valid.
+ * Note that the call may be come connected before this function returns.
*/
-int rxrpc_kernel_reject_call(struct socket *sock)
+int rxrpc_kernel_charge_accept(struct socket *sock,
+ rxrpc_notify_rx_t notify_rx,
+ rxrpc_user_attach_call_t user_attach_call,
+ unsigned long user_call_ID, gfp_t gfp)
{
- int ret;
+ struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
+ struct rxrpc_backlog *b = rx->backlog;
- _enter("");
- ret = rxrpc_reject_call(rxrpc_sk(sock->sk));
- _leave(" = %d", ret);
- return ret;
+ if (sock->sk->sk_state == RXRPC_CLOSE)
+ return -ESHUTDOWN;
+
+ return rxrpc_service_prealloc_one(rx, b, notify_rx,
+ user_attach_call, user_call_ID,
+ gfp);
}
-EXPORT_SYMBOL(rxrpc_kernel_reject_call);
+EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index e60cf65c2232..4f00476630b9 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -22,1281 +22,351 @@
#include "ar-internal.h"
/*
- * propose an ACK be sent
+ * Set the timer
*/
-void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- u32 serial, bool immediate)
+void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
+ ktime_t now)
{
- unsigned long expiry;
- s8 prior = rxrpc_ack_priority[ack_reason];
-
- ASSERTCMP(prior, >, 0);
-
- _enter("{%d},%s,%%%x,%u",
- call->debug_id, rxrpc_acks(ack_reason), serial, immediate);
-
- if (prior < rxrpc_ack_priority[call->ackr_reason]) {
- if (immediate)
- goto cancel_timer;
- return;
- }
+ unsigned long t_j, now_j = jiffies;
+ ktime_t t;
+ bool queue = false;
- /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
- * numbers */
- if (prior == rxrpc_ack_priority[call->ackr_reason]) {
- if (prior <= 4)
- call->ackr_serial = serial;
- if (immediate)
- goto cancel_timer;
- return;
- }
-
- call->ackr_reason = ack_reason;
- call->ackr_serial = serial;
-
- switch (ack_reason) {
- case RXRPC_ACK_DELAY:
- _debug("run delay timer");
- expiry = rxrpc_soft_ack_delay;
- goto run_timer;
-
- case RXRPC_ACK_IDLE:
- if (!immediate) {
- _debug("run defer timer");
- expiry = rxrpc_idle_ack_delay;
- goto run_timer;
- }
- goto cancel_timer;
-
- case RXRPC_ACK_REQUESTED:
- expiry = rxrpc_requested_ack_delay;
- if (!expiry)
- goto cancel_timer;
- if (!immediate || serial == 1) {
- _debug("run defer timer");
- goto run_timer;
- }
-
- default:
- _debug("immediate ACK");
- goto cancel_timer;
- }
-
-run_timer:
- expiry += jiffies;
- if (!timer_pending(&call->ack_timer) ||
- time_after(call->ack_timer.expires, expiry))
- mod_timer(&call->ack_timer, expiry);
- return;
-
-cancel_timer:
- _debug("cancel timer %%%u", serial);
- try_to_del_timer_sync(&call->ack_timer);
read_lock_bh(&call->state_lock);
- if (call->state <= RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
- rxrpc_queue_call(call);
- read_unlock_bh(&call->state_lock);
-}
-
-/*
- * propose an ACK be sent, locking the call structure
- */
-void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- u32 serial, bool immediate)
-{
- s8 prior = rxrpc_ack_priority[ack_reason];
-
- if (prior > rxrpc_ack_priority[call->ackr_reason]) {
- spin_lock_bh(&call->lock);
- __rxrpc_propose_ACK(call, ack_reason, serial, immediate);
- spin_unlock_bh(&call->lock);
- }
-}
-/*
- * set the resend timer
- */
-static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
- unsigned long resend_at)
-{
- read_lock_bh(&call->state_lock);
- if (call->state >= RXRPC_CALL_COMPLETE)
- resend = 0;
-
- if (resend & 1) {
- _debug("SET RESEND");
- set_bit(RXRPC_CALL_EV_RESEND, &call->events);
- }
-
- if (resend & 2) {
- _debug("MODIFY RESEND TIMER");
- set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- mod_timer(&call->resend_timer, resend_at);
- } else {
- _debug("KILL RESEND TIMER");
- del_timer_sync(&call->resend_timer);
- clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- }
- read_unlock_bh(&call->state_lock);
-}
-
-/*
- * resend packets
- */
-static void rxrpc_resend(struct rxrpc_call *call)
-{
- struct rxrpc_wire_header *whdr;
- struct rxrpc_skb_priv *sp;
- struct sk_buff *txb;
- unsigned long *p_txb, resend_at;
- bool stop;
- int loop;
- u8 resend;
-
- _enter("{%d,%d,%d,%d},",
- call->acks_hard, call->acks_unacked,
- atomic_read(&call->sequence),
- CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
-
- stop = false;
- resend = 0;
- resend_at = 0;
-
- for (loop = call->acks_tail;
- loop != call->acks_head || stop;
- loop = (loop + 1) & (call->acks_winsz - 1)
- ) {
- p_txb = call->acks_window + loop;
- smp_read_barrier_depends();
- if (*p_txb & 1)
- continue;
-
- txb = (struct sk_buff *) *p_txb;
- sp = rxrpc_skb(txb);
-
- if (sp->need_resend) {
- sp->need_resend = false;
-
- /* each Tx packet has a new serial number */
- sp->hdr.serial = atomic_inc_return(&call->conn->serial);
-
- whdr = (struct rxrpc_wire_header *)txb->head;
- whdr->serial = htonl(sp->hdr.serial);
-
- _proto("Tx DATA %%%u { #%d }",
- sp->hdr.serial, sp->hdr.seq);
- if (rxrpc_send_data_packet(call->conn, txb) < 0) {
- stop = true;
- sp->resend_at = jiffies + 3;
- } else {
- sp->resend_at =
- jiffies + rxrpc_resend_timeout;
- }
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ t = call->expire_at;
+ if (!ktime_after(t, now))
+ goto out;
+
+ if (!ktime_after(call->resend_at, now)) {
+ call->resend_at = call->expire_at;
+ if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
+ queue = true;
+ } else if (ktime_before(call->resend_at, t)) {
+ t = call->resend_at;
}
- if (time_after_eq(jiffies + 1, sp->resend_at)) {
- sp->need_resend = true;
- resend |= 1;
- } else if (resend & 2) {
- if (time_before(sp->resend_at, resend_at))
- resend_at = sp->resend_at;
- } else {
- resend_at = sp->resend_at;
- resend |= 2;
+ if (!ktime_after(call->ack_at, now)) {
+ call->ack_at = call->expire_at;
+ if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
+ queue = true;
+ } else if (ktime_before(call->ack_at, t)) {
+ t = call->ack_at;
}
- }
-
- rxrpc_set_resend(call, resend, resend_at);
- _leave("");
-}
-
-/*
- * handle resend timer expiry
- */
-static void rxrpc_resend_timer(struct rxrpc_call *call)
-{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *txb;
- unsigned long *p_txb, resend_at;
- int loop;
- u8 resend;
-
- _enter("%d,%d,%d",
- call->acks_tail, call->acks_unacked, call->acks_head);
-
- if (call->state >= RXRPC_CALL_COMPLETE)
- return;
-
- resend = 0;
- resend_at = 0;
- for (loop = call->acks_unacked;
- loop != call->acks_head;
- loop = (loop + 1) & (call->acks_winsz - 1)
- ) {
- p_txb = call->acks_window + loop;
- smp_read_barrier_depends();
- txb = (struct sk_buff *) (*p_txb & ~1);
- sp = rxrpc_skb(txb);
+ t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
+ t_j += jiffies;
- ASSERT(!(*p_txb & 1));
+ /* We have to make sure that the calculated jiffies value falls
+ * at or after the nsec value, or we may loop ceaselessly
+ * because the timer times out, but we haven't reached the nsec
+ * timeout yet.
+ */
+ t_j++;
- if (sp->need_resend) {
- ;
- } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
- sp->need_resend = true;
- resend |= 1;
- } else if (resend & 2) {
- if (time_before(sp->resend_at, resend_at))
- resend_at = sp->resend_at;
- } else {
- resend_at = sp->resend_at;
- resend |= 2;
+ if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
+ mod_timer(&call->timer, t_j);
+ trace_rxrpc_timer(call, why, now, now_j);
}
+
+ if (queue)
+ rxrpc_queue_call(call);
}
- rxrpc_set_resend(call, resend, resend_at);
- _leave("");
+out:
+ read_unlock_bh(&call->state_lock);
}
/*
- * process soft ACKs of our transmitted packets
- * - these indicate packets the peer has or has not received, but hasn't yet
- * given to the consumer, and so can still be discarded and re-requested
+ * propose an ACK be sent
*/
-static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
- struct rxrpc_ackpacket *ack,
- struct sk_buff *skb)
+static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
+ u16 skew, u32 serial, bool immediate,
+ bool background,
+ enum rxrpc_propose_ack_trace why)
{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *txb;
- unsigned long *p_txb, resend_at;
- int loop;
- u8 sacks[RXRPC_MAXACKS], resend;
-
- _enter("{%d,%d},{%d},",
- call->acks_hard,
- CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz),
- ack->nAcks);
-
- if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0)
- goto protocol_error;
-
- resend = 0;
- resend_at = 0;
- for (loop = 0; loop < ack->nAcks; loop++) {
- p_txb = call->acks_window;
- p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1);
- smp_read_barrier_depends();
- txb = (struct sk_buff *) (*p_txb & ~1);
- sp = rxrpc_skb(txb);
-
- switch (sacks[loop]) {
- case RXRPC_ACK_TYPE_ACK:
- sp->need_resend = false;
- *p_txb |= 1;
- break;
- case RXRPC_ACK_TYPE_NACK:
- sp->need_resend = true;
- *p_txb &= ~1;
- resend = 1;
- break;
- default:
- _debug("Unsupported ACK type %d", sacks[loop]);
- goto protocol_error;
- }
- }
-
- smp_mb();
- call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1);
-
- /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
- * have been received or processed yet by the far end */
- for (loop = call->acks_unacked;
- loop != call->acks_head;
- loop = (loop + 1) & (call->acks_winsz - 1)
- ) {
- p_txb = call->acks_window + loop;
- smp_read_barrier_depends();
- txb = (struct sk_buff *) (*p_txb & ~1);
- sp = rxrpc_skb(txb);
+ enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
+ unsigned int expiry = rxrpc_soft_ack_delay;
+ ktime_t now, ack_at;
+ s8 prior = rxrpc_ack_priority[ack_reason];
- if (*p_txb & 1) {
- /* packet must have been discarded */
- sp->need_resend = true;
- *p_txb &= ~1;
- resend |= 1;
- } else if (sp->need_resend) {
- ;
- } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
- sp->need_resend = true;
- resend |= 1;
- } else if (resend & 2) {
- if (time_before(sp->resend_at, resend_at))
- resend_at = sp->resend_at;
- } else {
- resend_at = sp->resend_at;
- resend |= 2;
+ /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
+ * numbers, but we don't alter the timeout.
+ */
+ _debug("prior %u %u vs %u %u",
+ ack_reason, prior,
+ call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
+ if (ack_reason == call->ackr_reason) {
+ if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
+ outcome = rxrpc_propose_ack_update;
+ call->ackr_serial = serial;
+ call->ackr_skew = skew;
}
+ if (!immediate)
+ goto trace;
+ } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
+ call->ackr_reason = ack_reason;
+ call->ackr_serial = serial;
+ call->ackr_skew = skew;
+ } else {
+ outcome = rxrpc_propose_ack_subsume;
}
- rxrpc_set_resend(call, resend, resend_at);
- _leave(" = 0");
- return 0;
-
-protocol_error:
- _leave(" = -EPROTO");
- return -EPROTO;
-}
-
-/*
- * discard hard-ACK'd packets from the Tx window
- */
-static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
-{
- unsigned long _skb;
- int tail = call->acks_tail, old_tail;
- int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
+ switch (ack_reason) {
+ case RXRPC_ACK_REQUESTED:
+ if (rxrpc_requested_ack_delay < expiry)
+ expiry = rxrpc_requested_ack_delay;
+ if (serial == 1)
+ immediate = false;
+ break;
- _enter("{%u,%u},%u", call->acks_hard, win, hard);
+ case RXRPC_ACK_DELAY:
+ if (rxrpc_soft_ack_delay < expiry)
+ expiry = rxrpc_soft_ack_delay;
+ break;
- ASSERTCMP(hard - call->acks_hard, <=, win);
+ case RXRPC_ACK_PING:
+ case RXRPC_ACK_IDLE:
+ if (rxrpc_idle_ack_delay < expiry)
+ expiry = rxrpc_idle_ack_delay;
+ break;
- while (call->acks_hard < hard) {
- smp_read_barrier_depends();
- _skb = call->acks_window[tail] & ~1;
- rxrpc_free_skb((struct sk_buff *) _skb);
- old_tail = tail;
- tail = (tail + 1) & (call->acks_winsz - 1);
- call->acks_tail = tail;
- if (call->acks_unacked == old_tail)
- call->acks_unacked = tail;
- call->acks_hard++;
+ default:
+ immediate = true;
+ break;
}
- wake_up(&call->tx_waitq);
-}
-
-/*
- * clear the Tx window in the event of a failure
- */
-static void rxrpc_clear_tx_window(struct rxrpc_call *call)
-{
- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
-}
-
-/*
- * drain the out of sequence received packet queue into the packet Rx queue
- */
-static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
-{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
- bool terminal;
- int ret;
-
- _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos);
-
- spin_lock_bh(&call->lock);
-
- ret = -ECONNRESET;
- if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
- goto socket_unavailable;
-
- skb = skb_dequeue(&call->rx_oos_queue);
- if (skb) {
- sp = rxrpc_skb(skb);
-
- _debug("drain OOS packet %d [%d]",
- sp->hdr.seq, call->rx_first_oos);
-
- if (sp->hdr.seq != call->rx_first_oos) {
- skb_queue_head(&call->rx_oos_queue, skb);
- call->rx_first_oos = rxrpc_skb(skb)->hdr.seq;
- _debug("requeue %p {%u}", skb, call->rx_first_oos);
- } else {
- skb->mark = RXRPC_SKB_MARK_DATA;
- terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
- !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
- ret = rxrpc_queue_rcv_skb(call, skb, true, terminal);
- BUG_ON(ret < 0);
- _debug("drain #%u", call->rx_data_post);
- call->rx_data_post++;
-
- /* find out what the next packet is */
- skb = skb_peek(&call->rx_oos_queue);
- if (skb)
- call->rx_first_oos = rxrpc_skb(skb)->hdr.seq;
- else
- call->rx_first_oos = 0;
- _debug("peek %p {%u}", skb, call->rx_first_oos);
+ if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
+ _debug("already scheduled");
+ } else if (immediate || expiry == 0) {
+ _debug("immediate ACK %lx", call->events);
+ if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
+ background)
+ rxrpc_queue_call(call);
+ } else {
+ now = ktime_get_real();
+ ack_at = ktime_add_ms(now, expiry);
+ if (ktime_before(ack_at, call->ack_at)) {
+ call->ack_at = ack_at;
+ rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
}
}
- ret = 0;
-socket_unavailable:
- spin_unlock_bh(&call->lock);
- _leave(" = %d", ret);
- return ret;
+trace:
+ trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
+ background, outcome);
}
/*
- * insert an out of sequence packet into the buffer
+ * propose an ACK be sent, locking the call structure
*/
-static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
- struct sk_buff *skb)
+void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
+ u16 skew, u32 serial, bool immediate, bool background,
+ enum rxrpc_propose_ack_trace why)
{
- struct rxrpc_skb_priv *sp, *psp;
- struct sk_buff *p;
- u32 seq;
-
- sp = rxrpc_skb(skb);
- seq = sp->hdr.seq;
- _enter(",,{%u}", seq);
-
- skb->destructor = rxrpc_packet_destructor;
- ASSERTCMP(sp->call, ==, NULL);
- sp->call = call;
- rxrpc_get_call(call);
- atomic_inc(&call->skb_count);
-
- /* insert into the buffer in sequence order */
spin_lock_bh(&call->lock);
-
- skb_queue_walk(&call->rx_oos_queue, p) {
- psp = rxrpc_skb(p);
- if (psp->hdr.seq > seq) {
- _debug("insert oos #%u before #%u", seq, psp->hdr.seq);
- skb_insert(p, skb, &call->rx_oos_queue);
- goto inserted;
- }
- }
-
- _debug("append oos #%u", seq);
- skb_queue_tail(&call->rx_oos_queue, skb);
-inserted:
-
- /* we might now have a new front to the queue */
- if (call->rx_first_oos == 0 || seq < call->rx_first_oos)
- call->rx_first_oos = seq;
-
- read_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE &&
- call->rx_data_post == call->rx_first_oos) {
- _debug("drain rx oos now");
- set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
- }
- read_unlock(&call->state_lock);
-
+ __rxrpc_propose_ACK(call, ack_reason, skew, serial,
+ immediate, background, why);
spin_unlock_bh(&call->lock);
- _leave(" [stored #%u]", call->rx_first_oos);
-}
-
-/*
- * clear the Tx window on final ACK reception
- */
-static void rxrpc_zap_tx_window(struct rxrpc_call *call)
-{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
- unsigned long _skb, *acks_window;
- u8 winsz = call->acks_winsz;
- int tail;
-
- acks_window = call->acks_window;
- call->acks_window = NULL;
-
- while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) {
- tail = call->acks_tail;
- smp_read_barrier_depends();
- _skb = acks_window[tail] & ~1;
- smp_mb();
- call->acks_tail = (call->acks_tail + 1) & (winsz - 1);
-
- skb = (struct sk_buff *) _skb;
- sp = rxrpc_skb(skb);
- _debug("+++ clear Tx %u", sp->hdr.seq);
- rxrpc_free_skb(skb);
- }
-
- kfree(acks_window);
}
/*
- * process the extra information that may be appended to an ACK packet
+ * Handle congestion being detected by the retransmit timeout.
*/
-static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
- unsigned int latest, int nAcks)
+static void rxrpc_congestion_timeout(struct rxrpc_call *call)
{
- struct rxrpc_ackinfo ackinfo;
- struct rxrpc_peer *peer;
- unsigned int mtu;
-
- if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
- _leave(" [no ackinfo]");
- return;
- }
-
- _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
- latest,
- ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU),
- ntohl(ackinfo.rwind), ntohl(ackinfo.jumbo_max));
-
- mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU));
-
- peer = call->conn->params.peer;
- if (mtu < peer->maxdata) {
- spin_lock_bh(&peer->lock);
- peer->maxdata = mtu;
- peer->mtu = mtu + peer->hdrsize;
- spin_unlock_bh(&peer->lock);
- _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
- }
+ set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
}
/*
- * process packets in the reception queue
+ * Perform retransmission of NAK'd and unack'd packets.
*/
-static int rxrpc_process_rx_queue(struct rxrpc_call *call,
- u32 *_abort_code)
+static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
{
- struct rxrpc_ackpacket ack;
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
- bool post_ACK;
- int latest;
- u32 hard, tx;
-
- _enter("");
-
-process_further:
- skb = skb_dequeue(&call->rx_queue);
- if (!skb)
- return -EAGAIN;
-
- _net("deferred skb %p", skb);
-
- sp = rxrpc_skb(skb);
-
- _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state);
-
- post_ACK = false;
-
- switch (sp->hdr.type) {
- /* data packets that wind up here have been received out of
- * order, need security processing or are jumbo packets */
- case RXRPC_PACKET_TYPE_DATA:
- _proto("OOSQ DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
+ rxrpc_seq_t cursor, seq, top;
+ ktime_t max_age, oldest, ack_ts;
+ int ix;
+ u8 annotation, anno_type, retrans = 0, unacked = 0;
- /* secured packets must be verified and possibly decrypted */
- if (call->conn->security->verify_packet(call, skb,
- _abort_code) < 0)
- goto protocol_error;
-
- rxrpc_insert_oos_packet(call, skb);
- goto process_further;
-
- /* partial ACK to process */
- case RXRPC_PACKET_TYPE_ACK:
- if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) {
- _debug("extraction failure");
- goto protocol_error;
- }
- if (!skb_pull(skb, sizeof(ack)))
- BUG();
-
- latest = sp->hdr.serial;
- hard = ntohl(ack.firstPacket);
- tx = atomic_read(&call->sequence);
-
- _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
- latest,
- ntohs(ack.maxSkew),
- hard,
- ntohl(ack.previousPacket),
- ntohl(ack.serial),
- rxrpc_acks(ack.reason),
- ack.nAcks);
-
- rxrpc_extract_ackinfo(call, skb, latest, ack.nAcks);
-
- if (ack.reason == RXRPC_ACK_PING) {
- _proto("Rx ACK %%%u PING Request", latest);
- rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
- sp->hdr.serial, true);
- }
-
- /* discard any out-of-order or duplicate ACKs */
- if (latest - call->acks_latest <= 0) {
- _debug("discard ACK %d <= %d",
- latest, call->acks_latest);
- goto discard;
- }
- call->acks_latest = latest;
+ _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
- if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
- call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY &&
- call->state != RXRPC_CALL_SERVER_SEND_REPLY &&
- call->state != RXRPC_CALL_SERVER_AWAIT_ACK)
- goto discard;
+ max_age = ktime_sub_ms(now, rxrpc_resend_timeout);
- _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state);
-
- if (hard > 0) {
- if (hard - 1 > tx) {
- _debug("hard-ACK'd packet %d not transmitted"
- " (%d top)",
- hard - 1, tx);
- goto protocol_error;
- }
+ spin_lock_bh(&call->lock);
- if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
- call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
- hard > tx) {
- call->acks_hard = tx;
- goto all_acked;
- }
+ cursor = call->tx_hard_ack;
+ top = call->tx_top;
+ ASSERT(before_eq(cursor, top));
+ if (cursor == top)
+ goto out_unlock;
+
+ /* Scan the packet list without dropping the lock and decide which of
+ * the packets in the Tx buffer we're going to resend and what the new
+ * resend timeout will be.
+ */
+ oldest = now;
+ for (seq = cursor + 1; before_eq(seq, top); seq++) {
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ annotation = call->rxtx_annotations[ix];
+ anno_type = annotation & RXRPC_TX_ANNO_MASK;
+ annotation &= ~RXRPC_TX_ANNO_MASK;
+ if (anno_type == RXRPC_TX_ANNO_ACK)
+ continue;
- smp_rmb();
- rxrpc_rotate_tx_window(call, hard - 1);
- }
+ skb = call->rxtx_buffer[ix];
+ rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
+ sp = rxrpc_skb(skb);
- if (ack.nAcks > 0) {
- if (hard - 1 + ack.nAcks > tx) {
- _debug("soft-ACK'd packet %d+%d not"
- " transmitted (%d top)",
- hard - 1, ack.nAcks, tx);
- goto protocol_error;
+ if (anno_type == RXRPC_TX_ANNO_UNACK) {
+ if (ktime_after(skb->tstamp, max_age)) {
+ if (ktime_before(skb->tstamp, oldest))
+ oldest = skb->tstamp;
+ continue;
}
-
- if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0)
- goto protocol_error;
+ if (!(annotation & RXRPC_TX_ANNO_RESENT))
+ unacked++;
}
- goto discard;
-
- /* complete ACK to process */
- case RXRPC_PACKET_TYPE_ACKALL:
- goto all_acked;
-
- /* abort and busy are handled elsewhere */
- case RXRPC_PACKET_TYPE_BUSY:
- case RXRPC_PACKET_TYPE_ABORT:
- BUG();
- /* connection level events - also handled elsewhere */
- case RXRPC_PACKET_TYPE_CHALLENGE:
- case RXRPC_PACKET_TYPE_RESPONSE:
- case RXRPC_PACKET_TYPE_DEBUG:
- BUG();
+ /* Okay, we need to retransmit a packet. */
+ call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
+ retrans++;
+ trace_rxrpc_retransmit(call, seq, annotation | anno_type,
+ ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
}
- /* if we've had a hard ACK that covers all the packets we've sent, then
- * that ends that phase of the operation */
-all_acked:
- write_lock_bh(&call->state_lock);
- _debug("ack all %d", call->state);
+ call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
- switch (call->state) {
- case RXRPC_CALL_CLIENT_AWAIT_REPLY:
- call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
- break;
- case RXRPC_CALL_SERVER_AWAIT_ACK:
- _debug("srv complete");
- call->state = RXRPC_CALL_COMPLETE;
- post_ACK = true;
- break;
- case RXRPC_CALL_CLIENT_SEND_REQUEST:
- case RXRPC_CALL_SERVER_RECV_REQUEST:
- goto protocol_error_unlock; /* can't occur yet */
- default:
- write_unlock_bh(&call->state_lock);
- goto discard; /* assume packet left over from earlier phase */
- }
-
- write_unlock_bh(&call->state_lock);
+ if (unacked)
+ rxrpc_congestion_timeout(call);
- /* if all the packets we sent are hard-ACK'd, then we can discard
- * whatever we've got left */
- _debug("clear Tx %d",
- CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
-
- del_timer_sync(&call->resend_timer);
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
-
- if (call->acks_window)
- rxrpc_zap_tx_window(call);
-
- if (post_ACK) {
- /* post the final ACK message for userspace to pick up */
- _debug("post ACK");
- skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
- sp->call = call;
- rxrpc_get_call(call);
- atomic_inc(&call->skb_count);
- spin_lock_bh(&call->lock);
- if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
- BUG();
+ /* If there was nothing that needed retransmission then it's likely
+ * that an ACK got lost somewhere. Send a ping to find out instead of
+ * retransmitting data.
+ */
+ if (!retrans) {
+ rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
spin_unlock_bh(&call->lock);
- goto process_further;
- }
-
-discard:
- rxrpc_free_skb(skb);
- goto process_further;
-
-protocol_error_unlock:
- write_unlock_bh(&call->state_lock);
-protocol_error:
- rxrpc_free_skb(skb);
- _leave(" = -EPROTO");
- return -EPROTO;
-}
-
-/*
- * post a message to the socket Rx queue for recvmsg() to pick up
- */
-static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
- bool fatal)
-{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
- int ret;
-
- _enter("{%d,%lx},%u,%u,%d",
- call->debug_id, call->flags, mark, error, fatal);
-
- /* remove timers and things for fatal messages */
- if (fatal) {
- del_timer_sync(&call->resend_timer);
- del_timer_sync(&call->ack_timer);
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- }
+ ack_ts = ktime_sub(now, call->acks_latest_ts);
+ if (ktime_to_ns(ack_ts) < call->peer->rtt)
+ goto out;
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+ rxrpc_propose_ack_ping_for_lost_ack);
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+ goto out;
+ }
+
+ /* Now go through the Tx window and perform the retransmissions. We
+ * have to drop the lock for each send. If an ACK comes in whilst the
+ * lock is dropped, it may clear some of the retransmission markers for
+ * packets that it soft-ACKs.
+ */
+ for (seq = cursor + 1; before_eq(seq, top); seq++) {
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ annotation = call->rxtx_annotations[ix];
+ anno_type = annotation & RXRPC_TX_ANNO_MASK;
+ if (anno_type != RXRPC_TX_ANNO_RETRANS)
+ continue;
- if (mark != RXRPC_SKB_MARK_NEW_CALL &&
- !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
- _leave("[no userid]");
- return 0;
- }
+ skb = call->rxtx_buffer[ix];
+ rxrpc_get_skb(skb, rxrpc_skb_tx_got);
+ spin_unlock_bh(&call->lock);
- if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
- skb = alloc_skb(0, GFP_NOFS);
- if (!skb)
- return -ENOMEM;
+ if (rxrpc_send_data_packet(call, skb, true) < 0) {
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ return;
+ }
- rxrpc_new_skb(skb);
+ if (rxrpc_is_client_call(call))
+ rxrpc_expose_client_call(call);
- skb->mark = mark;
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ spin_lock_bh(&call->lock);
- sp = rxrpc_skb(skb);
- memset(sp, 0, sizeof(*sp));
- sp->error = error;
- sp->call = call;
- rxrpc_get_call(call);
- atomic_inc(&call->skb_count);
+ /* We need to clear the retransmit state, but there are two
+ * things we need to be aware of: A new ACK/NAK might have been
+ * received and the packet might have been hard-ACK'd (in which
+ * case it will no longer be in the buffer).
+ */
+ if (after(seq, call->tx_hard_ack)) {
+ annotation = call->rxtx_annotations[ix];
+ anno_type = annotation & RXRPC_TX_ANNO_MASK;
+ if (anno_type == RXRPC_TX_ANNO_RETRANS ||
+ anno_type == RXRPC_TX_ANNO_NAK) {
+ annotation &= ~RXRPC_TX_ANNO_MASK;
+ annotation |= RXRPC_TX_ANNO_UNACK;
+ }
+ annotation |= RXRPC_TX_ANNO_RESENT;
+ call->rxtx_annotations[ix] = annotation;
+ }
- spin_lock_bh(&call->lock);
- ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
- spin_unlock_bh(&call->lock);
- BUG_ON(ret < 0);
+ if (after(call->tx_hard_ack, seq))
+ seq = call->tx_hard_ack;
}
- return 0;
+out_unlock:
+ spin_unlock_bh(&call->lock);
+out:
+ _leave("");
}
/*
- * handle background processing of incoming call packets and ACK / abort
- * generation
+ * Handle retransmission and deferred ACK/abort generation.
*/
void rxrpc_process_call(struct work_struct *work)
{
struct rxrpc_call *call =
container_of(work, struct rxrpc_call, processor);
- struct rxrpc_wire_header whdr;
- struct rxrpc_ackpacket ack;
- struct rxrpc_ackinfo ackinfo;
- struct msghdr msg;
- struct kvec iov[5];
- enum rxrpc_call_event genbit;
- unsigned long bits;
- __be32 data, pad;
- size_t len;
- int loop, nbit, ioc, ret, mtu;
- u32 serial, abort_code = RX_PROTOCOL_ERROR;
- u8 *acks = NULL;
-
- //printk("\n--------------------\n");
- _enter("{%d,%s,%lx} [%lu]",
- call->debug_id, rxrpc_call_states[call->state], call->events,
- (jiffies - call->creation_jif) / (HZ / 10));
-
- if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) {
- _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX");
- return;
- }
-
- if (!call->conn)
- goto skip_msg_init;
-
- /* there's a good chance we're going to have to send a message, so set
- * one up in advance */
- msg.msg_name = &call->conn->params.peer->srx.transport;
- msg.msg_namelen = call->conn->params.peer->srx.transport_len;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- whdr.epoch = htonl(call->conn->proto.epoch);
- whdr.cid = htonl(call->cid);
- whdr.callNumber = htonl(call->call_id);
- whdr.seq = 0;
- whdr.type = RXRPC_PACKET_TYPE_ACK;
- whdr.flags = call->conn->out_clientflag;
- whdr.userStatus = 0;
- whdr.securityIndex = call->conn->security_ix;
- whdr._rsvd = 0;
- whdr.serviceId = htons(call->service_id);
-
- memset(iov, 0, sizeof(iov));
- iov[0].iov_base = &whdr;
- iov[0].iov_len = sizeof(whdr);
-skip_msg_init:
-
- /* deal with events of a final nature */
- if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
- enum rxrpc_skb_mark mark;
- int error;
-
- clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
- clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
- clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
-
- error = call->error_report;
- if (error < RXRPC_LOCAL_ERROR_OFFSET) {
- mark = RXRPC_SKB_MARK_NET_ERROR;
- _debug("post net error %d", error);
- } else {
- mark = RXRPC_SKB_MARK_LOCAL_ERROR;
- error -= RXRPC_LOCAL_ERROR_OFFSET;
- _debug("post net local error %d", error);
- }
-
- if (rxrpc_post_message(call, mark, error, true) < 0)
- goto no_mem;
- clear_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
- goto kill_ACKs;
- }
-
- if (test_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events)) {
- ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
-
- clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
- clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
+ ktime_t now;
- _debug("post conn abort");
+ rxrpc_see_call(call);
- if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
- call->conn->error, true) < 0)
- goto no_mem;
- clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
- goto kill_ACKs;
- }
-
- if (test_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events)) {
- whdr.type = RXRPC_PACKET_TYPE_BUSY;
- genbit = RXRPC_CALL_EV_REJECT_BUSY;
- goto send_message;
- }
-
- if (test_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
- ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
-
- if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
- ECONNABORTED, true) < 0)
- goto no_mem;
- whdr.type = RXRPC_PACKET_TYPE_ABORT;
- data = htonl(call->local_abort);
- iov[1].iov_base = &data;
- iov[1].iov_len = sizeof(data);
- genbit = RXRPC_CALL_EV_ABORT;
- goto send_message;
- }
-
- if (test_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events)) {
- genbit = RXRPC_CALL_EV_ACK_FINAL;
-
- ack.bufferSpace = htons(8);
- ack.maxSkew = 0;
- ack.serial = 0;
- ack.reason = RXRPC_ACK_IDLE;
- ack.nAcks = 0;
- call->ackr_reason = 0;
-
- spin_lock_bh(&call->lock);
- ack.serial = htonl(call->ackr_serial);
- ack.previousPacket = htonl(call->ackr_prev_seq);
- ack.firstPacket = htonl(call->rx_data_eaten + 1);
- spin_unlock_bh(&call->lock);
-
- pad = 0;
+ //printk("\n--------------------\n");
+ _enter("{%d,%s,%lx}",
+ call->debug_id, rxrpc_call_states[call->state], call->events);
- iov[1].iov_base = &ack;
- iov[1].iov_len = sizeof(ack);
- iov[2].iov_base = &pad;
- iov[2].iov_len = 3;
- iov[3].iov_base = &ackinfo;
- iov[3].iov_len = sizeof(ackinfo);
- goto send_ACK;
+recheck_state:
+ if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+ goto recheck_state;
}
- if (call->events & ((1 << RXRPC_CALL_EV_RCVD_BUSY) |
- (1 << RXRPC_CALL_EV_RCVD_ABORT))
- ) {
- u32 mark;
-
- if (test_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events))
- mark = RXRPC_SKB_MARK_REMOTE_ABORT;
- else
- mark = RXRPC_SKB_MARK_BUSY;
-
- _debug("post abort/busy");
- rxrpc_clear_tx_window(call);
- if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
- goto no_mem;
-
- clear_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
- clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
- goto kill_ACKs;
+ if (call->state == RXRPC_CALL_COMPLETE) {
+ del_timer_sync(&call->timer);
+ goto out_put;
}
- if (test_and_clear_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events)) {
- _debug("do implicit ackall");
- rxrpc_clear_tx_window(call);
+ now = ktime_get_real();
+ if (ktime_before(call->expire_at, now)) {
+ rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
+ goto recheck_state;
}
- if (test_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events)) {
- write_lock_bh(&call->state_lock);
- if (call->state <= RXRPC_CALL_COMPLETE) {
- call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->local_abort = RX_CALL_TIMEOUT;
- set_bit(RXRPC_CALL_EV_ABORT, &call->events);
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
+ call->ack_at = call->expire_at;
+ if (call->ackr_reason) {
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+ goto recheck_state;
}
- write_unlock_bh(&call->state_lock);
-
- _debug("post timeout");
- if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
- ETIME, true) < 0)
- goto no_mem;
-
- clear_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
- goto kill_ACKs;
}
- /* deal with assorted inbound messages */
- if (!skb_queue_empty(&call->rx_queue)) {
- switch (rxrpc_process_rx_queue(call, &abort_code)) {
- case 0:
- case -EAGAIN:
- break;
- case -ENOMEM:
- goto no_mem;
- case -EKEYEXPIRED:
- case -EKEYREJECTED:
- case -EPROTO:
- rxrpc_abort_call(call, abort_code);
- goto kill_ACKs;
- }
+ if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
+ rxrpc_resend(call, now);
+ goto recheck_state;
}
- /* handle resending */
- if (test_and_clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
- rxrpc_resend_timer(call);
- if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events))
- rxrpc_resend(call);
-
- /* consider sending an ordinary ACK */
- if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
- _debug("send ACK: window: %d - %d { %lx }",
- call->rx_data_eaten, call->ackr_win_top,
- call->ackr_window[0]);
-
- if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
- call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
- /* ACK by sending reply DATA packet in this state */
- clear_bit(RXRPC_CALL_EV_ACK, &call->events);
- goto maybe_reschedule;
- }
-
- genbit = RXRPC_CALL_EV_ACK;
-
- acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
- GFP_NOFS);
- if (!acks)
- goto no_mem;
-
- //hdr.flags = RXRPC_SLOW_START_OK;
- ack.bufferSpace = htons(8);
- ack.maxSkew = 0;
-
- spin_lock_bh(&call->lock);
- ack.reason = call->ackr_reason;
- ack.serial = htonl(call->ackr_serial);
- ack.previousPacket = htonl(call->ackr_prev_seq);
- ack.firstPacket = htonl(call->rx_data_eaten + 1);
-
- ack.nAcks = 0;
- for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
- nbit = loop * BITS_PER_LONG;
- for (bits = call->ackr_window[loop]; bits; bits >>= 1
- ) {
- _debug("- l=%d n=%d b=%lx", loop, nbit, bits);
- if (bits & 1) {
- acks[nbit] = RXRPC_ACK_TYPE_ACK;
- ack.nAcks = nbit + 1;
- }
- nbit++;
- }
- }
- call->ackr_reason = 0;
- spin_unlock_bh(&call->lock);
-
- pad = 0;
-
- iov[1].iov_base = &ack;
- iov[1].iov_len = sizeof(ack);
- iov[2].iov_base = acks;
- iov[2].iov_len = ack.nAcks;
- iov[3].iov_base = &pad;
- iov[3].iov_len = 3;
- iov[4].iov_base = &ackinfo;
- iov[4].iov_len = sizeof(ackinfo);
-
- switch (ack.reason) {
- case RXRPC_ACK_REQUESTED:
- case RXRPC_ACK_DUPLICATE:
- case RXRPC_ACK_OUT_OF_SEQUENCE:
- case RXRPC_ACK_EXCEEDS_WINDOW:
- case RXRPC_ACK_NOSPACE:
- case RXRPC_ACK_PING:
- case RXRPC_ACK_PING_RESPONSE:
- goto send_ACK_with_skew;
- case RXRPC_ACK_DELAY:
- case RXRPC_ACK_IDLE:
- goto send_ACK;
- }
- }
-
- /* handle completion of security negotiations on an incoming
- * connection */
- if (test_and_clear_bit(RXRPC_CALL_EV_SECURED, &call->events)) {
- _debug("secured");
- spin_lock_bh(&call->lock);
-
- if (call->state == RXRPC_CALL_SERVER_SECURING) {
- _debug("securing");
- write_lock(&call->socket->call_lock);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
- _debug("not released");
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
- list_move_tail(&call->accept_link,
- &call->socket->acceptq);
- }
- write_unlock(&call->socket->call_lock);
- read_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE)
- set_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
- read_unlock(&call->state_lock);
- }
-
- spin_unlock_bh(&call->lock);
- if (!test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events))
- goto maybe_reschedule;
- }
-
- /* post a notification of an acceptable connection to the app */
- if (test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events)) {
- _debug("post accept");
- if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
- 0, false) < 0)
- goto no_mem;
- clear_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
- goto maybe_reschedule;
- }
-
- /* handle incoming call acceptance */
- if (test_and_clear_bit(RXRPC_CALL_EV_ACCEPTED, &call->events)) {
- _debug("accepted");
- ASSERTCMP(call->rx_data_post, ==, 0);
- call->rx_data_post = 1;
- read_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE)
- set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
- read_unlock_bh(&call->state_lock);
- }
-
- /* drain the out of sequence received packet queue into the packet Rx
- * queue */
- if (test_and_clear_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events)) {
- while (call->rx_data_post == call->rx_first_oos)
- if (rxrpc_drain_rx_oos_queue(call) < 0)
- break;
- goto maybe_reschedule;
- }
-
- if (test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
- rxrpc_release_call(call);
- clear_bit(RXRPC_CALL_EV_RELEASE, &call->events);
- }
+ rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
/* other events may have been raised since we started checking */
- goto maybe_reschedule;
-
-send_ACK_with_skew:
- ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
- ntohl(ack.serial));
-send_ACK:
- mtu = call->conn->params.peer->if_mtu;
- mtu -= call->conn->params.peer->hdrsize;
- ackinfo.maxMTU = htonl(mtu);
- ackinfo.rwind = htonl(rxrpc_rx_window_size);
-
- /* permit the peer to send us jumbo packets if it wants to */
- ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
- ackinfo.jumbo_max = htonl(rxrpc_rx_jumbo_max);
-
- serial = atomic_inc_return(&call->conn->serial);
- whdr.serial = htonl(serial);
- _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
- serial,
- ntohs(ack.maxSkew),
- ntohl(ack.firstPacket),
- ntohl(ack.previousPacket),
- ntohl(ack.serial),
- rxrpc_acks(ack.reason),
- ack.nAcks);
-
- del_timer_sync(&call->ack_timer);
- if (ack.nAcks > 0)
- set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags);
- goto send_message_2;
-
-send_message:
- _debug("send message");
-
- serial = atomic_inc_return(&call->conn->serial);
- whdr.serial = htonl(serial);
- _proto("Tx %s %%%u", rxrpc_pkts[whdr.type], serial);
-send_message_2:
-
- len = iov[0].iov_len;
- ioc = 1;
- if (iov[4].iov_len) {
- ioc = 5;
- len += iov[4].iov_len;
- len += iov[3].iov_len;
- len += iov[2].iov_len;
- len += iov[1].iov_len;
- } else if (iov[3].iov_len) {
- ioc = 4;
- len += iov[3].iov_len;
- len += iov[2].iov_len;
- len += iov[1].iov_len;
- } else if (iov[2].iov_len) {
- ioc = 3;
- len += iov[2].iov_len;
- len += iov[1].iov_len;
- } else if (iov[1].iov_len) {
- ioc = 2;
- len += iov[1].iov_len;
- }
-
- ret = kernel_sendmsg(call->conn->params.local->socket,
- &msg, iov, ioc, len);
- if (ret < 0) {
- _debug("sendmsg failed: %d", ret);
- read_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_DEAD)
- rxrpc_queue_call(call);
- read_unlock_bh(&call->state_lock);
- goto error;
- }
-
- switch (genbit) {
- case RXRPC_CALL_EV_ABORT:
- clear_bit(genbit, &call->events);
- clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
- goto kill_ACKs;
-
- case RXRPC_CALL_EV_ACK_FINAL:
- write_lock_bh(&call->state_lock);
- if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
- call->state = RXRPC_CALL_COMPLETE;
- write_unlock_bh(&call->state_lock);
- goto kill_ACKs;
-
- default:
- clear_bit(genbit, &call->events);
- switch (call->state) {
- case RXRPC_CALL_CLIENT_AWAIT_REPLY:
- case RXRPC_CALL_CLIENT_RECV_REPLY:
- case RXRPC_CALL_SERVER_RECV_REQUEST:
- case RXRPC_CALL_SERVER_ACK_REQUEST:
- _debug("start ACK timer");
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY,
- call->ackr_serial, false);
- default:
- break;
- }
- goto maybe_reschedule;
- }
-
-kill_ACKs:
- del_timer_sync(&call->ack_timer);
- if (test_and_clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events))
- rxrpc_put_call(call);
- clear_bit(RXRPC_CALL_EV_ACK, &call->events);
-
-maybe_reschedule:
- if (call->events || !skb_queue_empty(&call->rx_queue)) {
- read_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_DEAD)
- rxrpc_queue_call(call);
- read_unlock_bh(&call->state_lock);
- }
-
- /* don't leave aborted connections on the accept queue */
- if (call->state >= RXRPC_CALL_COMPLETE &&
- !list_empty(&call->accept_link)) {
- _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
- call, call->events, call->flags, call->conn->proto.cid);
-
- read_lock_bh(&call->state_lock);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
- rxrpc_queue_call(call);
- read_unlock_bh(&call->state_lock);
- }
-
-error:
- clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags);
- kfree(acks);
-
- /* because we don't want two CPUs both processing the work item for one
- * call at the same time, we use a flag to note when it's busy; however
- * this means there's a race between clearing the flag and setting the
- * work pending bit and the work item being processed again */
- if (call->events && !work_pending(&call->processor)) {
- _debug("jumpstart %x", call->conn->proto.cid);
- rxrpc_queue_call(call);
+ if (call->events && call->state < RXRPC_CALL_COMPLETE) {
+ __rxrpc_queue_call(call);
+ goto out;
}
+out_put:
+ rxrpc_put_call(call, rxrpc_call_put);
+out:
_leave("");
- return;
-
-no_mem:
- _debug("out of memory");
- goto maybe_reschedule;
}
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index ae057e0740f3..364b42dc3dce 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -19,23 +19,13 @@
#include <net/af_rxrpc.h>
#include "ar-internal.h"
-/*
- * Maximum lifetime of a call (in jiffies).
- */
-unsigned int rxrpc_max_call_lifetime = 60 * HZ;
-
-/*
- * Time till dead call expires after last use (in jiffies).
- */
-unsigned int rxrpc_dead_call_expiry = 2 * HZ;
-
const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
- [RXRPC_CALL_UNINITIALISED] = "Uninit",
+ [RXRPC_CALL_UNINITIALISED] = "Uninit ",
[RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
[RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
[RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
[RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
- [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
+ [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
[RXRPC_CALL_SERVER_SECURING] = "SvSecure",
[RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
[RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
@@ -43,22 +33,47 @@ const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
[RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
[RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
[RXRPC_CALL_COMPLETE] = "Complete",
- [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
+};
+
+const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
+ [RXRPC_CALL_SUCCEEDED] = "Complete",
[RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
[RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
+ [RXRPC_CALL_LOCAL_ERROR] = "LocError",
[RXRPC_CALL_NETWORK_ERROR] = "NetError",
- [RXRPC_CALL_DEAD] = "Dead ",
+};
+
+const char rxrpc_call_traces[rxrpc_call__nr_trace][4] = {
+ [rxrpc_call_new_client] = "NWc",
+ [rxrpc_call_new_service] = "NWs",
+ [rxrpc_call_queued] = "QUE",
+ [rxrpc_call_queued_ref] = "QUR",
+ [rxrpc_call_connected] = "CON",
+ [rxrpc_call_release] = "RLS",
+ [rxrpc_call_seen] = "SEE",
+ [rxrpc_call_got] = "GOT",
+ [rxrpc_call_got_userid] = "Gus",
+ [rxrpc_call_got_kernel] = "Gke",
+ [rxrpc_call_put] = "PUT",
+ [rxrpc_call_put_userid] = "Pus",
+ [rxrpc_call_put_kernel] = "Pke",
+ [rxrpc_call_put_noqueue] = "PNQ",
+ [rxrpc_call_error] = "*E*",
};
struct kmem_cache *rxrpc_call_jar;
LIST_HEAD(rxrpc_calls);
DEFINE_RWLOCK(rxrpc_call_lock);
-static void rxrpc_destroy_call(struct work_struct *work);
-static void rxrpc_call_life_expired(unsigned long _call);
-static void rxrpc_dead_call_expired(unsigned long _call);
-static void rxrpc_ack_time_expired(unsigned long _call);
-static void rxrpc_resend_time_expired(unsigned long _call);
+static void rxrpc_call_timer_expired(unsigned long _call)
+{
+ struct rxrpc_call *call = (struct rxrpc_call *)_call;
+
+ _enter("%d", call->debug_id);
+
+ if (call->state < RXRPC_CALL_COMPLETE)
+ rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
+}
/*
* find an extant server call
@@ -91,7 +106,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
return NULL;
found_extant_call:
- rxrpc_get_call(call);
+ rxrpc_get_call(call, rxrpc_call_got);
read_unlock(&rx->call_lock);
_leave(" = %p [%d]", call, atomic_read(&call->usage));
return call;
@@ -100,7 +115,7 @@ found_extant_call:
/*
* allocate a new call
*/
-static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
+struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
{
struct rxrpc_call *call;
@@ -108,29 +123,25 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
if (!call)
return NULL;
- call->acks_winsz = 16;
- call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
+ call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
+ sizeof(struct sk_buff *),
gfp);
- if (!call->acks_window) {
- kmem_cache_free(rxrpc_call_jar, call);
- return NULL;
- }
+ if (!call->rxtx_buffer)
+ goto nomem;
+
+ call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
+ if (!call->rxtx_annotations)
+ goto nomem_2;
- setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
- (unsigned long) call);
- setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
- (unsigned long) call);
- setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
- (unsigned long) call);
- setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
- (unsigned long) call);
- INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
+ setup_timer(&call->timer, rxrpc_call_timer_expired,
+ (unsigned long)call);
INIT_WORK(&call->processor, &rxrpc_process_call);
INIT_LIST_HEAD(&call->link);
+ INIT_LIST_HEAD(&call->chan_wait_link);
INIT_LIST_HEAD(&call->accept_link);
- skb_queue_head_init(&call->rx_queue);
- skb_queue_head_init(&call->rx_oos_queue);
- init_waitqueue_head(&call->tx_waitq);
+ INIT_LIST_HEAD(&call->recvmsg_link);
+ INIT_LIST_HEAD(&call->sock_link);
+ init_waitqueue_head(&call->waitq);
spin_lock_init(&call->lock);
rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1);
@@ -138,70 +149,65 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
- call->rx_data_expect = 1;
- call->rx_data_eaten = 0;
- call->rx_first_oos = 0;
- call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
- call->creation_jif = jiffies;
+ /* Leave space in the ring to handle a maxed-out jumbo packet */
+ call->rx_winsize = rxrpc_rx_window_size;
+ call->tx_winsize = 16;
+ call->rx_expect_next = 1;
+
+ if (RXRPC_TX_SMSS > 2190)
+ call->cong_cwnd = 2;
+ else if (RXRPC_TX_SMSS > 1095)
+ call->cong_cwnd = 3;
+ else
+ call->cong_cwnd = 4;
+ call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
return call;
+
+nomem_2:
+ kfree(call->rxtx_buffer);
+nomem:
+ kmem_cache_free(rxrpc_call_jar, call);
+ return NULL;
}
/*
* Allocate a new client call.
*/
-static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
- struct sockaddr_rxrpc *srx,
+static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
gfp_t gfp)
{
struct rxrpc_call *call;
+ ktime_t now;
_enter("");
- ASSERT(rx->local != NULL);
-
call = rxrpc_alloc_call(gfp);
if (!call)
return ERR_PTR(-ENOMEM);
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
-
- sock_hold(&rx->sk);
- call->socket = rx;
- call->rx_data_post = 1;
-
- call->local = rx->local;
call->service_id = srx->srx_service;
- call->in_clientflag = 0;
+ call->tx_phase = true;
+ now = ktime_get_real();
+ call->acks_latest_ts = now;
+ call->cong_tstamp = now;
_leave(" = %p", call);
return call;
}
/*
- * Begin client call.
+ * Initiate the call ack/resend/expiry timer.
*/
-static int rxrpc_begin_client_call(struct rxrpc_call *call,
- struct rxrpc_conn_parameters *cp,
- struct sockaddr_rxrpc *srx,
- gfp_t gfp)
+static void rxrpc_start_call_timer(struct rxrpc_call *call)
{
- int ret;
-
- /* Set up or get a connection record and set the protocol parameters,
- * including channel number and call ID.
- */
- ret = rxrpc_connect_call(call, cp, srx, gfp);
- if (ret < 0)
- return ret;
-
- call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
-
- spin_lock(&call->conn->params.peer->lock);
- hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
- spin_unlock(&call->conn->params.peer->lock);
-
- call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
- add_timer(&call->lifetimer);
- return 0;
+ ktime_t now = ktime_get_real(), expire_at;
+
+ expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
+ call->expire_at = expire_at;
+ call->ack_at = expire_at;
+ call->resend_at = expire_at;
+ call->timer.expires = jiffies + LONG_MAX / 2;
+ rxrpc_set_timer(call, rxrpc_timer_begin, now);
}
/*
@@ -216,20 +222,21 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
{
struct rxrpc_call *call, *xcall;
struct rb_node *parent, **pp;
+ const void *here = __builtin_return_address(0);
int ret;
_enter("%p,%lx", rx, user_call_ID);
- call = rxrpc_alloc_client_call(rx, srx, gfp);
+ call = rxrpc_alloc_client_call(srx, gfp);
if (IS_ERR(call)) {
_leave(" = %ld", PTR_ERR(call));
return call;
}
- /* Publish the call, even though it is incompletely set up as yet */
- call->user_call_ID = user_call_ID;
- __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
+ trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
+ here, (const void *)user_call_ID);
+ /* Publish the call, even though it is incompletely set up as yet */
write_lock(&rx->call_lock);
pp = &rx->calls.rb_node;
@@ -243,369 +250,285 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
else if (user_call_ID > xcall->user_call_ID)
pp = &(*pp)->rb_right;
else
- goto found_user_ID_now_present;
+ goto error_dup_user_ID;
}
- rxrpc_get_call(call);
-
+ rcu_assign_pointer(call->socket, rx);
+ call->user_call_ID = user_call_ID;
+ __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
+ rxrpc_get_call(call, rxrpc_call_got_userid);
rb_link_node(&call->sock_node, parent, pp);
rb_insert_color(&call->sock_node, &rx->calls);
+ list_add(&call->sock_link, &rx->sock_calls);
+
write_unlock(&rx->call_lock);
- write_lock_bh(&rxrpc_call_lock);
+ write_lock(&rxrpc_call_lock);
list_add_tail(&call->link, &rxrpc_calls);
- write_unlock_bh(&rxrpc_call_lock);
+ write_unlock(&rxrpc_call_lock);
- ret = rxrpc_begin_client_call(call, cp, srx, gfp);
+ /* Set up or get a connection record and set the protocol parameters,
+ * including channel number and call ID.
+ */
+ ret = rxrpc_connect_call(call, cp, srx, gfp);
if (ret < 0)
goto error;
- _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
+ trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
+ here, ERR_PTR(ret));
- _leave(" = %p [new]", call);
- return call;
+ spin_lock_bh(&call->conn->params.peer->lock);
+ hlist_add_head(&call->error_link,
+ &call->conn->params.peer->error_targets);
+ spin_unlock_bh(&call->conn->params.peer->lock);
-error:
- write_lock(&rx->call_lock);
- rb_erase(&call->sock_node, &rx->calls);
- write_unlock(&rx->call_lock);
- rxrpc_put_call(call);
+ rxrpc_start_call_timer(call);
- write_lock_bh(&rxrpc_call_lock);
- list_del_init(&call->link);
- write_unlock_bh(&rxrpc_call_lock);
+ _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
- set_bit(RXRPC_CALL_RELEASED, &call->flags);
- call->state = RXRPC_CALL_DEAD;
- rxrpc_put_call(call);
- _leave(" = %d", ret);
- return ERR_PTR(ret);
+ _leave(" = %p [new]", call);
+ return call;
/* We unexpectedly found the user ID in the list after taking
* the call_lock. This shouldn't happen unless the user races
* with itself and tries to add the same user ID twice at the
* same time in different threads.
*/
-found_user_ID_now_present:
+error_dup_user_ID:
write_unlock(&rx->call_lock);
- set_bit(RXRPC_CALL_RELEASED, &call->flags);
- call->state = RXRPC_CALL_DEAD;
- rxrpc_put_call(call);
- _leave(" = -EEXIST [%p]", call);
- return ERR_PTR(-EEXIST);
+ ret = -EEXIST;
+
+error:
+ __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
+ RX_CALL_DEAD, ret);
+ trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
+ here, ERR_PTR(ret));
+ rxrpc_release_call(rx, call);
+ rxrpc_put_call(call, rxrpc_call_put);
+ _leave(" = %d", ret);
+ return ERR_PTR(ret);
}
/*
- * set up an incoming call
- * - called in process context with IRQs enabled
+ * Set up an incoming call. call->conn points to the connection.
+ * This is called in BH context and isn't allowed to fail.
*/
-struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
- struct rxrpc_connection *conn,
- struct sk_buff *skb)
+void rxrpc_incoming_call(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
+ struct sk_buff *skb)
{
+ struct rxrpc_connection *conn = call->conn;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_call *call, *candidate;
- u32 call_id, chan;
-
- _enter(",%d", conn->debug_id);
-
- ASSERT(rx != NULL);
-
- candidate = rxrpc_alloc_call(GFP_NOIO);
- if (!candidate)
- return ERR_PTR(-EBUSY);
-
- chan = sp->hdr.cid & RXRPC_CHANNELMASK;
- candidate->socket = rx;
- candidate->conn = conn;
- candidate->cid = sp->hdr.cid;
- candidate->call_id = sp->hdr.callNumber;
- candidate->channel = chan;
- candidate->rx_data_post = 0;
- candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
- if (conn->security_ix > 0)
- candidate->state = RXRPC_CALL_SERVER_SECURING;
-
- spin_lock(&conn->channel_lock);
-
- /* set the channel for this call */
- call = rcu_dereference_protected(conn->channels[chan].call,
- lockdep_is_held(&conn->channel_lock));
-
- _debug("channel[%u] is %p", candidate->channel, call);
- if (call && call->call_id == sp->hdr.callNumber) {
- /* already set; must've been a duplicate packet */
- _debug("extant call [%d]", call->state);
- ASSERTCMP(call->conn, ==, conn);
-
- read_lock(&call->state_lock);
- switch (call->state) {
- case RXRPC_CALL_LOCALLY_ABORTED:
- if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
- rxrpc_queue_call(call);
- case RXRPC_CALL_REMOTELY_ABORTED:
- read_unlock(&call->state_lock);
- goto aborted_call;
- default:
- rxrpc_get_call(call);
- read_unlock(&call->state_lock);
- goto extant_call;
- }
- }
-
- if (call) {
- /* it seems the channel is still in use from the previous call
- * - ditch the old binding if its call is now complete */
- _debug("CALL: %u { %s }",
- call->debug_id, rxrpc_call_states[call->state]);
-
- if (call->state >= RXRPC_CALL_COMPLETE) {
- __rxrpc_disconnect_call(call);
- } else {
- spin_unlock(&conn->channel_lock);
- kmem_cache_free(rxrpc_call_jar, candidate);
- _leave(" = -EBUSY");
- return ERR_PTR(-EBUSY);
- }
- }
-
- /* check the call number isn't duplicate */
- _debug("check dup");
- call_id = sp->hdr.callNumber;
-
- /* We just ignore calls prior to the current call ID. Terminated calls
- * are handled via the connection.
+ u32 chan;
+
+ _enter(",%d", call->conn->debug_id);
+
+ rcu_assign_pointer(call->socket, rx);
+ call->call_id = sp->hdr.callNumber;
+ call->service_id = sp->hdr.serviceId;
+ call->cid = sp->hdr.cid;
+ call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ if (sp->hdr.securityIndex > 0)
+ call->state = RXRPC_CALL_SERVER_SECURING;
+ call->cong_tstamp = skb->tstamp;
+
+ /* Set the channel for this call. We don't get channel_lock as we're
+ * only defending against the data_ready handler (which we're called
+ * from) and the RESPONSE packet parser (which is only really
+ * interested in call_counter and can cope with a disagreement with the
+ * call pointer).
*/
- if (call_id <= conn->channels[chan].call_counter)
- goto old_call; /* TODO: Just drop packet */
-
- /* make the call available */
- _debug("new call");
- call = candidate;
- candidate = NULL;
- conn->channels[chan].call_counter = call_id;
+ chan = sp->hdr.cid & RXRPC_CHANNELMASK;
+ conn->channels[chan].call_counter = call->call_id;
+ conn->channels[chan].call_id = call->call_id;
rcu_assign_pointer(conn->channels[chan].call, call);
- sock_hold(&rx->sk);
- rxrpc_get_connection(conn);
- spin_unlock(&conn->channel_lock);
spin_lock(&conn->params.peer->lock);
hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
spin_unlock(&conn->params.peer->lock);
- write_lock_bh(&rxrpc_call_lock);
- list_add_tail(&call->link, &rxrpc_calls);
- write_unlock_bh(&rxrpc_call_lock);
+ _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
- call->local = conn->params.local;
- call->epoch = conn->proto.epoch;
- call->service_id = conn->params.service_id;
- call->in_clientflag = RXRPC_CLIENT_INITIATED;
+ rxrpc_start_call_timer(call);
+ _leave("");
+}
- _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
+/*
+ * Queue a call's work processor, getting a ref to pass to the work queue.
+ */
+bool rxrpc_queue_call(struct rxrpc_call *call)
+{
+ const void *here = __builtin_return_address(0);
+ int n = __atomic_add_unless(&call->usage, 1, 0);
+ if (n == 0)
+ return false;
+ if (rxrpc_queue_work(&call->processor))
+ trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
+ else
+ rxrpc_put_call(call, rxrpc_call_put_noqueue);
+ return true;
+}
- call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
- add_timer(&call->lifetimer);
- _leave(" = %p {%d} [new]", call, call->debug_id);
- return call;
+/*
+ * Queue a call's work processor, passing the callers ref to the work queue.
+ */
+bool __rxrpc_queue_call(struct rxrpc_call *call)
+{
+ const void *here = __builtin_return_address(0);
+ int n = atomic_read(&call->usage);
+ ASSERTCMP(n, >=, 1);
+ if (rxrpc_queue_work(&call->processor))
+ trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
+ else
+ rxrpc_put_call(call, rxrpc_call_put_noqueue);
+ return true;
+}
-extant_call:
- spin_unlock(&conn->channel_lock);
- kmem_cache_free(rxrpc_call_jar, candidate);
- _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
- return call;
+/*
+ * Note the re-emergence of a call.
+ */
+void rxrpc_see_call(struct rxrpc_call *call)
+{
+ const void *here = __builtin_return_address(0);
+ if (call) {
+ int n = atomic_read(&call->usage);
-aborted_call:
- spin_unlock(&conn->channel_lock);
- kmem_cache_free(rxrpc_call_jar, candidate);
- _leave(" = -ECONNABORTED");
- return ERR_PTR(-ECONNABORTED);
-
-old_call:
- spin_unlock(&conn->channel_lock);
- kmem_cache_free(rxrpc_call_jar, candidate);
- _leave(" = -ECONNRESET [old]");
- return ERR_PTR(-ECONNRESET);
+ trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
+ }
}
/*
- * detach a call from a socket and set up for release
+ * Note the addition of a ref on a call.
*/
-void rxrpc_release_call(struct rxrpc_call *call)
+void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
{
+ const void *here = __builtin_return_address(0);
+ int n = atomic_inc_return(&call->usage);
+
+ trace_rxrpc_call(call, op, n, here, NULL);
+}
+
+/*
+ * Detach a call from its owning socket.
+ */
+void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
+{
+ const void *here = __builtin_return_address(0);
struct rxrpc_connection *conn = call->conn;
- struct rxrpc_sock *rx = call->socket;
+ bool put = false;
+ int i;
- _enter("{%d,%d,%d,%d}",
- call->debug_id, atomic_read(&call->usage),
- atomic_read(&call->ackr_not_idle),
- call->rx_first_oos);
+ _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
+
+ trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
+ here, (const void *)call->flags);
+
+ ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
spin_lock_bh(&call->lock);
if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
BUG();
spin_unlock_bh(&call->lock);
- /* dissociate from the socket
- * - the socket's ref on the call is passed to the death timer
- */
- _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
+ del_timer_sync(&call->timer);
- spin_lock(&conn->params.peer->lock);
- hlist_del_init(&call->error_link);
- spin_unlock(&conn->params.peer->lock);
+ /* Make sure we don't get any more notifications */
+ write_lock_bh(&rx->recvmsg_lock);
- write_lock_bh(&rx->call_lock);
- if (!list_empty(&call->accept_link)) {
+ if (!list_empty(&call->recvmsg_link)) {
_debug("unlinking once-pending call %p { e=%lx f=%lx }",
call, call->events, call->flags);
- ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
- list_del_init(&call->accept_link);
- sk_acceptq_removed(&rx->sk);
- } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
- rb_erase(&call->sock_node, &rx->calls);
- memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
- clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
+ list_del(&call->recvmsg_link);
+ put = true;
}
- write_unlock_bh(&rx->call_lock);
- /* free up the channel for reuse */
- write_lock_bh(&call->state_lock);
+ /* list_empty() must return false in rxrpc_notify_socket() */
+ call->recvmsg_link.next = NULL;
+ call->recvmsg_link.prev = NULL;
- if (call->state < RXRPC_CALL_COMPLETE &&
- call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
- _debug("+++ ABORTING STATE %d +++\n", call->state);
- call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->local_abort = RX_CALL_DEAD;
- }
- write_unlock_bh(&call->state_lock);
+ write_unlock_bh(&rx->recvmsg_lock);
+ if (put)
+ rxrpc_put_call(call, rxrpc_call_put);
- rxrpc_disconnect_call(call);
+ write_lock(&rx->call_lock);
- /* clean up the Rx queue */
- if (!skb_queue_empty(&call->rx_queue) ||
- !skb_queue_empty(&call->rx_oos_queue)) {
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
+ if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
+ rb_erase(&call->sock_node, &rx->calls);
+ memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
+ rxrpc_put_call(call, rxrpc_call_put_userid);
+ }
- _debug("purge Rx queues");
+ list_del(&call->sock_link);
+ write_unlock(&rx->call_lock);
- spin_lock_bh(&call->lock);
- while ((skb = skb_dequeue(&call->rx_queue)) ||
- (skb = skb_dequeue(&call->rx_oos_queue))) {
- spin_unlock_bh(&call->lock);
+ _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
- sp = rxrpc_skb(skb);
- _debug("- zap %s %%%u #%u",
- rxrpc_pkts[sp->hdr.type],
- sp->hdr.serial, sp->hdr.seq);
- rxrpc_free_skb(skb);
- spin_lock_bh(&call->lock);
- }
- spin_unlock_bh(&call->lock);
+ if (conn)
+ rxrpc_disconnect_call(call);
- ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
+ for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
+ rxrpc_free_skb(call->rxtx_buffer[i],
+ (call->tx_phase ? rxrpc_skb_tx_cleaned :
+ rxrpc_skb_rx_cleaned));
+ call->rxtx_buffer[i] = NULL;
}
- del_timer_sync(&call->resend_timer);
- del_timer_sync(&call->ack_timer);
- del_timer_sync(&call->lifetimer);
- call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
- add_timer(&call->deadspan);
-
_leave("");
}
/*
- * handle a dead call being ready for reaping
- */
-static void rxrpc_dead_call_expired(unsigned long _call)
-{
- struct rxrpc_call *call = (struct rxrpc_call *) _call;
-
- _enter("{%d}", call->debug_id);
-
- write_lock_bh(&call->state_lock);
- call->state = RXRPC_CALL_DEAD;
- write_unlock_bh(&call->state_lock);
- rxrpc_put_call(call);
-}
-
-/*
- * mark a call as to be released, aborting it if it's still in progress
- * - called with softirqs disabled
- */
-static void rxrpc_mark_call_released(struct rxrpc_call *call)
-{
- bool sched;
-
- write_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_DEAD) {
- sched = false;
- if (call->state < RXRPC_CALL_COMPLETE) {
- _debug("abort call %p", call);
- call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->local_abort = RX_CALL_DEAD;
- if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
- sched = true;
- }
- if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
- sched = true;
- if (sched)
- rxrpc_queue_call(call);
- }
- write_unlock(&call->state_lock);
-}
-
-/*
* release all the calls associated with a socket
*/
void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
{
struct rxrpc_call *call;
- struct rb_node *p;
_enter("%p", rx);
- read_lock_bh(&rx->call_lock);
-
- /* mark all the calls as no longer wanting incoming packets */
- for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
- call = rb_entry(p, struct rxrpc_call, sock_node);
- rxrpc_mark_call_released(call);
- }
-
- /* kill the not-yet-accepted incoming calls */
- list_for_each_entry(call, &rx->secureq, accept_link) {
- rxrpc_mark_call_released(call);
+ while (!list_empty(&rx->to_be_accepted)) {
+ call = list_entry(rx->to_be_accepted.next,
+ struct rxrpc_call, accept_link);
+ list_del(&call->accept_link);
+ rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, ECONNRESET);
+ rxrpc_put_call(call, rxrpc_call_put);
}
- list_for_each_entry(call, &rx->acceptq, accept_link) {
- rxrpc_mark_call_released(call);
+ while (!list_empty(&rx->sock_calls)) {
+ call = list_entry(rx->sock_calls.next,
+ struct rxrpc_call, sock_link);
+ rxrpc_get_call(call, rxrpc_call_got);
+ rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET);
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+ rxrpc_release_call(rx, call);
+ rxrpc_put_call(call, rxrpc_call_put);
}
- read_unlock_bh(&rx->call_lock);
_leave("");
}
/*
* release a call
*/
-void __rxrpc_put_call(struct rxrpc_call *call)
+void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
{
+ const void *here = __builtin_return_address(0);
+ int n;
+
ASSERT(call != NULL);
- _enter("%p{u=%d}", call, atomic_read(&call->usage));
+ n = atomic_dec_return(&call->usage);
+ trace_rxrpc_call(call, op, n, here, NULL);
+ ASSERTCMP(n, >=, 0);
+ if (n == 0) {
+ _debug("call %d dead", call->debug_id);
+ ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
- ASSERTCMP(atomic_read(&call->usage), >, 0);
+ write_lock(&rxrpc_call_lock);
+ list_del_init(&call->link);
+ write_unlock(&rxrpc_call_lock);
- if (atomic_dec_and_test(&call->usage)) {
- _debug("call %d dead", call->debug_id);
- WARN_ON(atomic_read(&call->skb_count) != 0);
- ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
- rxrpc_queue_work(&call->destroyer);
+ rxrpc_cleanup_call(call);
}
- _leave("");
}
/*
@@ -615,187 +538,70 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
{
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
- rxrpc_purge_queue(&call->rx_queue);
+ rxrpc_put_peer(call->peer);
+ kfree(call->rxtx_buffer);
+ kfree(call->rxtx_annotations);
kmem_cache_free(rxrpc_call_jar, call);
}
/*
* clean up a call
*/
-static void rxrpc_cleanup_call(struct rxrpc_call *call)
+void rxrpc_cleanup_call(struct rxrpc_call *call)
{
- _net("DESTROY CALL %d", call->debug_id);
+ int i;
- ASSERT(call->socket);
+ _net("DESTROY CALL %d", call->debug_id);
memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
- del_timer_sync(&call->lifetimer);
- del_timer_sync(&call->deadspan);
- del_timer_sync(&call->ack_timer);
- del_timer_sync(&call->resend_timer);
+ del_timer_sync(&call->timer);
+ ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
- ASSERTCMP(call->events, ==, 0);
- if (work_pending(&call->processor)) {
- _debug("defer destroy");
- rxrpc_queue_work(&call->destroyer);
- return;
- }
-
ASSERTCMP(call->conn, ==, NULL);
- if (call->acks_window) {
- _debug("kill Tx window %d",
- CIRC_CNT(call->acks_head, call->acks_tail,
- call->acks_winsz));
- smp_mb();
- while (CIRC_CNT(call->acks_head, call->acks_tail,
- call->acks_winsz) > 0) {
- struct rxrpc_skb_priv *sp;
- unsigned long _skb;
-
- _skb = call->acks_window[call->acks_tail] & ~1;
- sp = rxrpc_skb((struct sk_buff *)_skb);
- _debug("+++ clear Tx %u", sp->hdr.seq);
- rxrpc_free_skb((struct sk_buff *)_skb);
- call->acks_tail =
- (call->acks_tail + 1) & (call->acks_winsz - 1);
- }
-
- kfree(call->acks_window);
- }
+ /* Clean up the Rx/Tx buffer */
+ for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
+ rxrpc_free_skb(call->rxtx_buffer[i],
+ (call->tx_phase ? rxrpc_skb_tx_cleaned :
+ rxrpc_skb_rx_cleaned));
- rxrpc_free_skb(call->tx_pending);
+ rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
- rxrpc_purge_queue(&call->rx_queue);
- ASSERT(skb_queue_empty(&call->rx_oos_queue));
- sock_put(&call->socket->sk);
call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
}
/*
- * destroy a call
- */
-static void rxrpc_destroy_call(struct work_struct *work)
-{
- struct rxrpc_call *call =
- container_of(work, struct rxrpc_call, destroyer);
-
- _enter("%p{%d,%d,%p}",
- call, atomic_read(&call->usage), call->channel, call->conn);
-
- ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
-
- write_lock_bh(&rxrpc_call_lock);
- list_del_init(&call->link);
- write_unlock_bh(&rxrpc_call_lock);
-
- rxrpc_cleanup_call(call);
- _leave("");
-}
-
-/*
- * preemptively destroy all the call records from a transport endpoint rather
- * than waiting for them to time out
+ * Make sure that all calls are gone.
*/
void __exit rxrpc_destroy_all_calls(void)
{
struct rxrpc_call *call;
_enter("");
- write_lock_bh(&rxrpc_call_lock);
+
+ if (list_empty(&rxrpc_calls))
+ return;
+
+ write_lock(&rxrpc_call_lock);
while (!list_empty(&rxrpc_calls)) {
call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
_debug("Zapping call %p", call);
+ rxrpc_see_call(call);
list_del_init(&call->link);
- switch (atomic_read(&call->usage)) {
- case 0:
- ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
- break;
- case 1:
- if (del_timer_sync(&call->deadspan) != 0 &&
- call->state != RXRPC_CALL_DEAD)
- rxrpc_dead_call_expired((unsigned long) call);
- if (call->state != RXRPC_CALL_DEAD)
- break;
- default:
- pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
- call, atomic_read(&call->usage),
- atomic_read(&call->ackr_not_idle),
- rxrpc_call_states[call->state],
- call->flags, call->events);
- if (!skb_queue_empty(&call->rx_queue))
- pr_err("Rx queue occupied\n");
- if (!skb_queue_empty(&call->rx_oos_queue))
- pr_err("OOS queue occupied\n");
- break;
- }
-
- write_unlock_bh(&rxrpc_call_lock);
- cond_resched();
- write_lock_bh(&rxrpc_call_lock);
- }
-
- write_unlock_bh(&rxrpc_call_lock);
- _leave("");
-}
-
-/*
- * handle call lifetime being exceeded
- */
-static void rxrpc_call_life_expired(unsigned long _call)
-{
- struct rxrpc_call *call = (struct rxrpc_call *) _call;
+ pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
+ call, atomic_read(&call->usage),
+ rxrpc_call_states[call->state],
+ call->flags, call->events);
- if (call->state >= RXRPC_CALL_COMPLETE)
- return;
-
- _enter("{%d}", call->debug_id);
- read_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE) {
- set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
- rxrpc_queue_call(call);
+ write_unlock(&rxrpc_call_lock);
+ cond_resched();
+ write_lock(&rxrpc_call_lock);
}
- read_unlock_bh(&call->state_lock);
-}
-
-/*
- * handle resend timer expiry
- * - may not take call->state_lock as this can deadlock against del_timer_sync()
- */
-static void rxrpc_resend_time_expired(unsigned long _call)
-{
- struct rxrpc_call *call = (struct rxrpc_call *) _call;
-
- _enter("{%d}", call->debug_id);
-
- if (call->state >= RXRPC_CALL_COMPLETE)
- return;
-
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
- rxrpc_queue_call(call);
-}
-
-/*
- * handle ACK timer expiry
- */
-static void rxrpc_ack_time_expired(unsigned long _call)
-{
- struct rxrpc_call *call = (struct rxrpc_call *) _call;
-
- _enter("{%d}", call->debug_id);
-
- if (call->state >= RXRPC_CALL_COMPLETE)
- return;
- read_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
- rxrpc_queue_call(call);
- read_unlock_bh(&call->state_lock);
+ write_unlock(&rxrpc_call_lock);
}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 9e91f27b0d0f..60ef9605167e 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -7,6 +7,68 @@
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
+ *
+ *
+ * Client connections need to be cached for a little while after they've made a
+ * call so as to handle retransmitted DATA packets in case the server didn't
+ * receive the final ACK or terminating ABORT we sent it.
+ *
+ * Client connections can be in one of a number of cache states:
+ *
+ * (1) INACTIVE - The connection is not held in any list and may not have been
+ * exposed to the world. If it has been previously exposed, it was
+ * discarded from the idle list after expiring.
+ *
+ * (2) WAITING - The connection is waiting for the number of client conns to
+ * drop below the maximum capacity. Calls may be in progress upon it from
+ * when it was active and got culled.
+ *
+ * The connection is on the rxrpc_waiting_client_conns list which is kept
+ * in to-be-granted order. Culled conns with waiters go to the back of
+ * the queue just like new conns.
+ *
+ * (3) ACTIVE - The connection has at least one call in progress upon it, it
+ * may freely grant available channels to new calls and calls may be
+ * waiting on it for channels to become available.
+ *
+ * The connection is on the rxrpc_active_client_conns list which is kept
+ * in activation order for culling purposes.
+ *
+ * rxrpc_nr_active_client_conns is held incremented also.
+ *
+ * (4) CULLED - The connection got summarily culled to try and free up
+ * capacity. Calls currently in progress on the connection are allowed to
+ * continue, but new calls will have to wait. There can be no waiters in
+ * this state - the conn would have to go to the WAITING state instead.
+ *
+ * (5) IDLE - The connection has no calls in progress upon it and must have
+ * been exposed to the world (ie. the EXPOSED flag must be set). When it
+ * expires, the EXPOSED flag is cleared and the connection transitions to
+ * the INACTIVE state.
+ *
+ * The connection is on the rxrpc_idle_client_conns list which is kept in
+ * order of how soon they'll expire.
+ *
+ * There are flags of relevance to the cache:
+ *
+ * (1) EXPOSED - The connection ID got exposed to the world. If this flag is
+ * set, an extra ref is added to the connection preventing it from being
+ * reaped when it has no calls outstanding. This flag is cleared and the
+ * ref dropped when a conn is discarded from the idle list.
+ *
+ * This allows us to move terminal call state retransmission to the
+ * connection and to discard the call immediately we think it is done
+ * with. It also give us a chance to reuse the connection.
+ *
+ * (2) DONT_REUSE - The connection should be discarded as soon as possible and
+ * should not be reused. This is set when an exclusive connection is used
+ * or a call ID counter overflows.
+ *
+ * The caching state may only be changed if the cache lock is held.
+ *
+ * There are two idle client connection expiry durations. If the total number
+ * of connections is below the reap threshold, we use the normal duration; if
+ * it's above, we use the fast duration.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -16,27 +78,50 @@
#include <linux/timer.h>
#include "ar-internal.h"
+__read_mostly unsigned int rxrpc_max_client_connections = 1000;
+__read_mostly unsigned int rxrpc_reap_client_connections = 900;
+__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
+__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
+
+static unsigned int rxrpc_nr_client_conns;
+static unsigned int rxrpc_nr_active_client_conns;
+static __read_mostly bool rxrpc_kill_all_client_conns;
+
+static DEFINE_SPINLOCK(rxrpc_client_conn_cache_lock);
+static DEFINE_SPINLOCK(rxrpc_client_conn_discard_mutex);
+static LIST_HEAD(rxrpc_waiting_client_conns);
+static LIST_HEAD(rxrpc_active_client_conns);
+static LIST_HEAD(rxrpc_idle_client_conns);
+
/*
* We use machine-unique IDs for our client connections.
*/
DEFINE_IDR(rxrpc_client_conn_ids);
static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
+static void rxrpc_cull_active_client_conns(void);
+static void rxrpc_discard_expired_client_conns(struct work_struct *);
+
+static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap,
+ rxrpc_discard_expired_client_conns);
+
+const char rxrpc_conn_cache_states[RXRPC_CONN__NR_CACHE_STATES][5] = {
+ [RXRPC_CONN_CLIENT_INACTIVE] = "Inac",
+ [RXRPC_CONN_CLIENT_WAITING] = "Wait",
+ [RXRPC_CONN_CLIENT_ACTIVE] = "Actv",
+ [RXRPC_CONN_CLIENT_CULLED] = "Cull",
+ [RXRPC_CONN_CLIENT_IDLE] = "Idle",
+};
+
/*
* Get a connection ID and epoch for a client connection from the global pool.
* The connection struct pointer is then recorded in the idr radix tree. The
- * epoch is changed if this wraps.
- *
- * TODO: The IDR tree gets very expensive on memory if the connection IDs are
- * widely scattered throughout the number space, so we shall need to retire
- * connections that have, say, an ID more than four times the maximum number of
- * client conns away from the current allocation point to try and keep the IDs
- * concentrated. We will also need to retire connections from an old epoch.
+ * epoch doesn't change until the client is rebooted (or, at least, unless the
+ * module is unloaded).
*/
static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
gfp_t gfp)
{
- u32 epoch;
int id;
_enter("");
@@ -44,34 +129,18 @@ static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
idr_preload(gfp);
spin_lock(&rxrpc_conn_id_lock);
- epoch = rxrpc_epoch;
-
- /* We could use idr_alloc_cyclic() here, but we really need to know
- * when the thing wraps so that we can advance the epoch.
- */
- if (rxrpc_client_conn_ids.cur == 0)
- rxrpc_client_conn_ids.cur = 1;
- id = idr_alloc(&rxrpc_client_conn_ids, conn,
- rxrpc_client_conn_ids.cur, 0x40000000, GFP_NOWAIT);
- if (id < 0) {
- if (id != -ENOSPC)
- goto error;
- id = idr_alloc(&rxrpc_client_conn_ids, conn,
- 1, 0x40000000, GFP_NOWAIT);
- if (id < 0)
- goto error;
- epoch++;
- rxrpc_epoch = epoch;
- }
- rxrpc_client_conn_ids.cur = id + 1;
+ id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
+ 1, 0x40000000, GFP_NOWAIT);
+ if (id < 0)
+ goto error;
spin_unlock(&rxrpc_conn_id_lock);
idr_preload_end();
- conn->proto.epoch = epoch;
+ conn->proto.epoch = rxrpc_epoch;
conn->proto.cid = id << RXRPC_CIDSHIFT;
set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
- _leave(" [CID %x:%x]", epoch, conn->proto.cid);
+ _leave(" [CID %x]", conn->proto.cid);
return 0;
error:
@@ -114,8 +183,7 @@ void rxrpc_destroy_client_conn_ids(void)
}
/*
- * Allocate a client connection. The caller must take care to clear any
- * padding bytes in *cp.
+ * Allocate a client connection.
*/
static struct rxrpc_connection *
rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
@@ -131,6 +199,10 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
return ERR_PTR(-ENOMEM);
}
+ atomic_set(&conn->usage, 1);
+ if (cp->exclusive)
+ __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
+
conn->params = *cp;
conn->out_clientflag = RXRPC_CLIENT_INITIATED;
conn->state = RXRPC_CONN_CLIENT;
@@ -148,7 +220,7 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
goto error_2;
write_lock(&rxrpc_connection_lock);
- list_add_tail(&conn->link, &rxrpc_connections);
+ list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
write_unlock(&rxrpc_connection_lock);
/* We steal the caller's peer ref. */
@@ -156,6 +228,9 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
rxrpc_get_local(conn->params.local);
key_get(conn->params.key);
+ trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
+ __builtin_return_address(0));
+ trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
_leave(" = %p", conn);
return conn;
@@ -170,32 +245,68 @@ error_0:
}
/*
- * find a connection for a call
- * - called in process context with IRQs enabled
+ * Determine if a connection may be reused.
*/
-int rxrpc_connect_call(struct rxrpc_call *call,
- struct rxrpc_conn_parameters *cp,
- struct sockaddr_rxrpc *srx,
- gfp_t gfp)
+static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
+{
+ int id_cursor, id, distance, limit;
+
+ if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
+ goto dont_reuse;
+
+ if (conn->proto.epoch != rxrpc_epoch)
+ goto mark_dont_reuse;
+
+ /* The IDR tree gets very expensive on memory if the connection IDs are
+ * widely scattered throughout the number space, so we shall want to
+ * kill off connections that, say, have an ID more than about four
+ * times the maximum number of client conns away from the current
+ * allocation point to try and keep the IDs concentrated.
+ */
+ id_cursor = READ_ONCE(rxrpc_client_conn_ids.cur);
+ id = conn->proto.cid >> RXRPC_CIDSHIFT;
+ distance = id - id_cursor;
+ if (distance < 0)
+ distance = -distance;
+ limit = round_up(rxrpc_max_client_connections, IDR_SIZE) * 4;
+ if (distance > limit)
+ goto mark_dont_reuse;
+
+ return true;
+
+mark_dont_reuse:
+ set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
+dont_reuse:
+ return false;
+}
+
+/*
+ * Create or find a client connection to use for a call.
+ *
+ * If we return with a connection, the call will be on its waiting list. It's
+ * left to the caller to assign a channel and wake up the call.
+ */
+static int rxrpc_get_client_conn(struct rxrpc_call *call,
+ struct rxrpc_conn_parameters *cp,
+ struct sockaddr_rxrpc *srx,
+ gfp_t gfp)
{
struct rxrpc_connection *conn, *candidate = NULL;
struct rxrpc_local *local = cp->local;
struct rb_node *p, **pp, *parent;
long diff;
- int chan;
-
- DECLARE_WAITQUEUE(myself, current);
+ int ret = -ENOMEM;
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
if (!cp->peer)
- return -ENOMEM;
+ goto error;
+ /* If the connection is not meant to be exclusive, search the available
+ * connections to see if the connection we want to use already exists.
+ */
if (!cp->exclusive) {
- /* Search for a existing client connection unless this is going
- * to be a connection that's used exclusively for a single call.
- */
_debug("search 1");
spin_lock(&local->client_conns_lock);
p = local->client_conns.rb_node;
@@ -206,39 +317,56 @@ int rxrpc_connect_call(struct rxrpc_call *call,
diff = (cmp(peer) ?:
cmp(key) ?:
cmp(security_level));
- if (diff < 0)
+#undef cmp
+ if (diff < 0) {
p = p->rb_left;
- else if (diff > 0)
+ } else if (diff > 0) {
p = p->rb_right;
- else
- goto found_extant_conn;
+ } else {
+ if (rxrpc_may_reuse_conn(conn) &&
+ rxrpc_get_connection_maybe(conn))
+ goto found_extant_conn;
+ /* The connection needs replacing. It's better
+ * to effect that when we have something to
+ * replace it with so that we don't have to
+ * rebalance the tree twice.
+ */
+ break;
+ }
}
spin_unlock(&local->client_conns_lock);
}
- /* We didn't find a connection or we want an exclusive one. */
- _debug("get new conn");
+ /* There wasn't a connection yet or we need an exclusive connection.
+ * We need to create a candidate and then potentially redo the search
+ * in case we're racing with another thread also trying to connect on a
+ * shareable connection.
+ */
+ _debug("new conn");
candidate = rxrpc_alloc_client_connection(cp, gfp);
- if (!candidate) {
- _leave(" = -ENOMEM");
- return -ENOMEM;
+ if (IS_ERR(candidate)) {
+ ret = PTR_ERR(candidate);
+ goto error_peer;
}
+ /* Add the call to the new connection's waiting list in case we're
+ * going to have to wait for the connection to come live. It's our
+ * connection, so we want first dibs on the channel slots. We would
+ * normally have to take channel_lock but we do this before anyone else
+ * can see the connection.
+ */
+ list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
+
if (cp->exclusive) {
- /* Assign the call on an exclusive connection to channel 0 and
- * don't add the connection to the endpoint's shareable conn
- * lookup tree.
- */
- _debug("exclusive chan 0");
- conn = candidate;
- atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
- spin_lock(&conn->channel_lock);
- chan = 0;
- goto found_channel;
+ call->conn = candidate;
+ call->security_ix = candidate->security_ix;
+ _leave(" = 0 [exclusive %d]", candidate->debug_id);
+ return 0;
}
- /* We need to redo the search before attempting to add a new connection
- * lest we race with someone else adding a conflicting instance.
+ /* Publish the new connection for userspace to find. We need to redo
+ * the search before doing this lest we race with someone else adding a
+ * conflicting instance.
*/
_debug("search 2");
spin_lock(&local->client_conns_lock);
@@ -249,124 +377,711 @@ int rxrpc_connect_call(struct rxrpc_call *call,
parent = *pp;
conn = rb_entry(parent, struct rxrpc_connection, client_node);
+#define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
diff = (cmp(peer) ?:
cmp(key) ?:
cmp(security_level));
- if (diff < 0)
+#undef cmp
+ if (diff < 0) {
pp = &(*pp)->rb_left;
- else if (diff > 0)
+ } else if (diff > 0) {
pp = &(*pp)->rb_right;
- else
- goto found_extant_conn;
+ } else {
+ if (rxrpc_may_reuse_conn(conn) &&
+ rxrpc_get_connection_maybe(conn))
+ goto found_extant_conn;
+ /* The old connection is from an outdated epoch. */
+ _debug("replace conn");
+ clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
+ rb_replace_node(&conn->client_node,
+ &candidate->client_node,
+ &local->client_conns);
+ trace_rxrpc_client(conn, -1, rxrpc_client_replace);
+ goto candidate_published;
+ }
}
- /* The second search also failed; simply add the new connection with
- * the new call in channel 0. Note that we need to take the channel
- * lock before dropping the client conn lock.
- */
_debug("new conn");
- set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
rb_link_node(&candidate->client_node, parent, pp);
rb_insert_color(&candidate->client_node, &local->client_conns);
-attached:
- conn = candidate;
- candidate = NULL;
- atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
- spin_lock(&conn->channel_lock);
+candidate_published:
+ set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
+ call->conn = candidate;
+ call->security_ix = candidate->security_ix;
spin_unlock(&local->client_conns_lock);
- chan = 0;
+ _leave(" = 0 [new %d]", candidate->debug_id);
+ return 0;
-found_channel:
- _debug("found chan");
- call->conn = conn;
- call->channel = chan;
- call->epoch = conn->proto.epoch;
- call->cid = conn->proto.cid | chan;
- call->call_id = ++conn->channels[chan].call_counter;
- conn->channels[chan].call_id = call->call_id;
- rcu_assign_pointer(conn->channels[chan].call, call);
+ /* We come here if we found a suitable connection already in existence.
+ * Discard any candidate we may have allocated, and try to get a
+ * channel on this one.
+ */
+found_extant_conn:
+ _debug("found conn");
+ spin_unlock(&local->client_conns_lock);
- _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
+ if (candidate) {
+ trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
+ rxrpc_put_connection(candidate);
+ candidate = NULL;
+ }
+ spin_lock(&conn->channel_lock);
+ call->conn = conn;
+ call->security_ix = conn->security_ix;
+ list_add(&call->chan_wait_link, &conn->waiting_calls);
spin_unlock(&conn->channel_lock);
+ _leave(" = 0 [extant %d]", conn->debug_id);
+ return 0;
+
+error_peer:
rxrpc_put_peer(cp->peer);
cp->peer = NULL;
- _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
- return 0;
+error:
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Activate a connection.
+ */
+static void rxrpc_activate_conn(struct rxrpc_connection *conn)
+{
+ trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
+ conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
+ rxrpc_nr_active_client_conns++;
+ list_move_tail(&conn->cache_link, &rxrpc_active_client_conns);
+}
+
+/*
+ * Attempt to animate a connection for a new call.
+ *
+ * If it's not exclusive, the connection is in the endpoint tree, and we're in
+ * the conn's list of those waiting to grab a channel. There is, however, a
+ * limit on the number of live connections allowed at any one time, so we may
+ * have to wait for capacity to become available.
+ *
+ * Note that a connection on the waiting queue might *also* have active
+ * channels if it has been culled to make space and then re-requested by a new
+ * call.
+ */
+static void rxrpc_animate_client_conn(struct rxrpc_connection *conn)
+{
+ unsigned int nr_conns;
+
+ _enter("%d,%d", conn->debug_id, conn->cache_state);
+
+ if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE)
+ goto out;
+
+ spin_lock(&rxrpc_client_conn_cache_lock);
+
+ nr_conns = rxrpc_nr_client_conns;
+ if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
+ trace_rxrpc_client(conn, -1, rxrpc_client_count);
+ rxrpc_nr_client_conns = nr_conns + 1;
+ }
+
+ switch (conn->cache_state) {
+ case RXRPC_CONN_CLIENT_ACTIVE:
+ case RXRPC_CONN_CLIENT_WAITING:
+ break;
+
+ case RXRPC_CONN_CLIENT_INACTIVE:
+ case RXRPC_CONN_CLIENT_CULLED:
+ case RXRPC_CONN_CLIENT_IDLE:
+ if (nr_conns >= rxrpc_max_client_connections)
+ goto wait_for_capacity;
+ goto activate_conn;
+
+ default:
+ BUG();
+ }
+
+out_unlock:
+ spin_unlock(&rxrpc_client_conn_cache_lock);
+out:
+ _leave(" [%d]", conn->cache_state);
+ return;
+
+activate_conn:
+ _debug("activate");
+ rxrpc_activate_conn(conn);
+ goto out_unlock;
+
+wait_for_capacity:
+ _debug("wait");
+ trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
+ conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
+ list_move_tail(&conn->cache_link, &rxrpc_waiting_client_conns);
+ goto out_unlock;
+}
+
+/*
+ * Deactivate a channel.
+ */
+static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn,
+ unsigned int channel)
+{
+ struct rxrpc_channel *chan = &conn->channels[channel];
+
+ rcu_assign_pointer(chan->call, NULL);
+ conn->active_chans &= ~(1 << channel);
+}
+
+/*
+ * Assign a channel to the call at the front of the queue and wake the call up.
+ * We don't increment the callNumber counter until this number has been exposed
+ * to the world.
+ */
+static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
+ unsigned int channel)
+{
+ struct rxrpc_channel *chan = &conn->channels[channel];
+ struct rxrpc_call *call = list_entry(conn->waiting_calls.next,
+ struct rxrpc_call, chan_wait_link);
+ u32 call_id = chan->call_counter + 1;
+
+ trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
+
+ write_lock_bh(&call->state_lock);
+ call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
+ write_unlock_bh(&call->state_lock);
+
+ rxrpc_see_call(call);
+ list_del_init(&call->chan_wait_link);
+ conn->active_chans |= 1 << channel;
+ call->peer = rxrpc_get_peer(conn->params.peer);
+ call->cid = conn->proto.cid | channel;
+ call->call_id = call_id;
+
+ _net("CONNECT call %08x:%08x as call %d on conn %d",
+ call->cid, call->call_id, call->debug_id, conn->debug_id);
- /* We found a potentially suitable connection already in existence. If
- * we can reuse it (ie. its usage count hasn't been reduced to 0 by the
- * reaper), discard any candidate we may have allocated, and try to get
- * a channel on this one, otherwise we have to replace it.
+ /* Paired with the read barrier in rxrpc_wait_for_channel(). This
+ * orders cid and epoch in the connection wrt to call_id without the
+ * need to take the channel_lock.
+ *
+ * We provisionally assign a callNumber at this point, but we don't
+ * confirm it until the call is about to be exposed.
+ *
+ * TODO: Pair with a barrier in the data_ready handler when that looks
+ * at the call ID through a connection channel.
*/
-found_extant_conn:
- _debug("found conn");
- if (!rxrpc_get_connection_maybe(conn)) {
- set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
- rb_replace_node(&conn->client_node,
- &candidate->client_node,
- &local->client_conns);
- clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
- goto attached;
+ smp_wmb();
+ chan->call_id = call_id;
+ rcu_assign_pointer(chan->call, call);
+ wake_up(&call->waitq);
+}
+
+/*
+ * Assign channels and callNumbers to waiting calls with channel_lock
+ * held by caller.
+ */
+static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn)
+{
+ u8 avail, mask;
+
+ switch (conn->cache_state) {
+ case RXRPC_CONN_CLIENT_ACTIVE:
+ mask = RXRPC_ACTIVE_CHANS_MASK;
+ break;
+ default:
+ return;
}
- spin_unlock(&local->client_conns_lock);
+ while (!list_empty(&conn->waiting_calls) &&
+ (avail = ~conn->active_chans,
+ avail &= mask,
+ avail != 0))
+ rxrpc_activate_one_channel(conn, __ffs(avail));
+}
+
+/*
+ * Assign channels and callNumbers to waiting calls.
+ */
+static void rxrpc_activate_channels(struct rxrpc_connection *conn)
+{
+ _enter("%d", conn->debug_id);
- rxrpc_put_connection(candidate);
+ trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans);
+
+ if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK)
+ return;
+
+ spin_lock(&conn->channel_lock);
+ rxrpc_activate_channels_locked(conn);
+ spin_unlock(&conn->channel_lock);
+ _leave("");
+}
+
+/*
+ * Wait for a callNumber and a channel to be granted to a call.
+ */
+static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
+{
+ int ret = 0;
+
+ _enter("%d", call->debug_id);
+
+ if (!call->call_id) {
+ DECLARE_WAITQUEUE(myself, current);
- if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
if (!gfpflags_allow_blocking(gfp)) {
- rxrpc_put_connection(conn);
- _leave(" = -EAGAIN");
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto out;
}
- add_wait_queue(&conn->channel_wq, &myself);
+ add_wait_queue_exclusive(&call->waitq, &myself);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (atomic_add_unless(&conn->avail_chans, -1, 0))
+ if (call->call_id)
+ break;
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
break;
- if (signal_pending(current))
- goto interrupted;
+ }
schedule();
}
- remove_wait_queue(&conn->channel_wq, &myself);
+ remove_wait_queue(&call->waitq, &myself);
__set_current_state(TASK_RUNNING);
}
- /* The connection allegedly now has a free channel and we can now
- * attach the call to it.
- */
+ /* Paired with the write barrier in rxrpc_activate_one_channel(). */
+ smp_rmb();
+
+out:
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * find a connection for a call
+ * - called in process context with IRQs enabled
+ */
+int rxrpc_connect_call(struct rxrpc_call *call,
+ struct rxrpc_conn_parameters *cp,
+ struct sockaddr_rxrpc *srx,
+ gfp_t gfp)
+{
+ int ret;
+
+ _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
+
+ rxrpc_discard_expired_client_conns(NULL);
+ rxrpc_cull_active_client_conns();
+
+ ret = rxrpc_get_client_conn(call, cp, srx, gfp);
+ if (ret < 0)
+ return ret;
+
+ rxrpc_animate_client_conn(call->conn);
+ rxrpc_activate_channels(call->conn);
+
+ ret = rxrpc_wait_for_channel(call, gfp);
+ if (ret < 0)
+ rxrpc_disconnect_client_call(call);
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Note that a connection is about to be exposed to the world. Once it is
+ * exposed, we maintain an extra ref on it that stops it from being summarily
+ * discarded before it's (a) had a chance to deal with retransmission and (b)
+ * had a chance at re-use (the per-connection security negotiation is
+ * expensive).
+ */
+static void rxrpc_expose_client_conn(struct rxrpc_connection *conn,
+ unsigned int channel)
+{
+ if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
+ trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
+ rxrpc_get_connection(conn);
+ }
+}
+
+/*
+ * Note that a call, and thus a connection, is about to be exposed to the
+ * world.
+ */
+void rxrpc_expose_client_call(struct rxrpc_call *call)
+{
+ unsigned int channel = call->cid & RXRPC_CHANNELMASK;
+ struct rxrpc_connection *conn = call->conn;
+ struct rxrpc_channel *chan = &conn->channels[channel];
+
+ if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
+ /* Mark the call ID as being used. If the callNumber counter
+ * exceeds ~2 billion, we kill the connection after its
+ * outstanding calls have finished so that the counter doesn't
+ * wrap.
+ */
+ chan->call_counter++;
+ if (chan->call_counter >= INT_MAX)
+ set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
+ rxrpc_expose_client_conn(conn, channel);
+ }
+}
+
+/*
+ * Disconnect a client call.
+ */
+void rxrpc_disconnect_client_call(struct rxrpc_call *call)
+{
+ unsigned int channel = call->cid & RXRPC_CHANNELMASK;
+ struct rxrpc_connection *conn = call->conn;
+ struct rxrpc_channel *chan = &conn->channels[channel];
+
+ trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
+ call->conn = NULL;
+
spin_lock(&conn->channel_lock);
- for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
- if (!conn->channels[chan].call)
- goto found_channel;
- BUG();
+ /* Calls that have never actually been assigned a channel can simply be
+ * discarded. If the conn didn't get used either, it will follow
+ * immediately unless someone else grabs it in the meantime.
+ */
+ if (!list_empty(&call->chan_wait_link)) {
+ _debug("call is waiting");
+ ASSERTCMP(call->call_id, ==, 0);
+ ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
+ list_del_init(&call->chan_wait_link);
+
+ trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted);
+
+ /* We must deactivate or idle the connection if it's now
+ * waiting for nothing.
+ */
+ spin_lock(&rxrpc_client_conn_cache_lock);
+ if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
+ list_empty(&conn->waiting_calls) &&
+ !conn->active_chans)
+ goto idle_connection;
+ goto out;
+ }
+
+ ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
+
+ /* If a client call was exposed to the world, we save the result for
+ * retransmission.
+ *
+ * We use a barrier here so that the call number and abort code can be
+ * read without needing to take a lock.
+ *
+ * TODO: Make the incoming packet handler check this and handle
+ * terminal retransmission without requiring access to the call.
+ */
+ if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
+ _debug("exposed %u,%u", call->call_id, call->abort_code);
+ __rxrpc_disconnect_call(conn, call);
+ }
+
+ /* See if we can pass the channel directly to another call. */
+ if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE &&
+ !list_empty(&conn->waiting_calls)) {
+ trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
+ rxrpc_activate_one_channel(conn, channel);
+ goto out_2;
+ }
+
+ /* Things are more complex and we need the cache lock. We might be
+ * able to simply idle the conn or it might now be lurking on the wait
+ * list. It might even get moved back to the active list whilst we're
+ * waiting for the lock.
+ */
+ spin_lock(&rxrpc_client_conn_cache_lock);
+
+ switch (conn->cache_state) {
+ case RXRPC_CONN_CLIENT_ACTIVE:
+ if (list_empty(&conn->waiting_calls)) {
+ rxrpc_deactivate_one_channel(conn, channel);
+ if (!conn->active_chans) {
+ rxrpc_nr_active_client_conns--;
+ goto idle_connection;
+ }
+ goto out;
+ }
+
+ trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
+ rxrpc_activate_one_channel(conn, channel);
+ goto out;
-interrupted:
- remove_wait_queue(&conn->channel_wq, &myself);
- __set_current_state(TASK_RUNNING);
+ case RXRPC_CONN_CLIENT_CULLED:
+ rxrpc_deactivate_one_channel(conn, channel);
+ ASSERT(list_empty(&conn->waiting_calls));
+ if (!conn->active_chans)
+ goto idle_connection;
+ goto out;
+
+ case RXRPC_CONN_CLIENT_WAITING:
+ rxrpc_deactivate_one_channel(conn, channel);
+ goto out;
+
+ default:
+ BUG();
+ }
+
+out:
+ spin_unlock(&rxrpc_client_conn_cache_lock);
+out_2:
+ spin_unlock(&conn->channel_lock);
rxrpc_put_connection(conn);
- rxrpc_put_peer(cp->peer);
- cp->peer = NULL;
- _leave(" = -ERESTARTSYS");
- return -ERESTARTSYS;
+ _leave("");
+ return;
+
+idle_connection:
+ /* As no channels remain active, the connection gets deactivated
+ * immediately or moved to the idle list for a short while.
+ */
+ if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
+ trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
+ conn->idle_timestamp = jiffies;
+ conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
+ list_move_tail(&conn->cache_link, &rxrpc_idle_client_conns);
+ if (rxrpc_idle_client_conns.next == &conn->cache_link &&
+ !rxrpc_kill_all_client_conns)
+ queue_delayed_work(rxrpc_workqueue,
+ &rxrpc_client_conn_reap,
+ rxrpc_conn_idle_client_expiry);
+ } else {
+ trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
+ conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
+ list_del_init(&conn->cache_link);
+ }
+ goto out;
}
/*
- * Remove a client connection from the local endpoint's tree, thereby removing
- * it as a target for reuse for new client calls.
+ * Clean up a dead client connection.
*/
-void rxrpc_unpublish_client_conn(struct rxrpc_connection *conn)
+static struct rxrpc_connection *
+rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
{
+ struct rxrpc_connection *next = NULL;
struct rxrpc_local *local = conn->params.local;
+ unsigned int nr_conns;
- spin_lock(&local->client_conns_lock);
- if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags))
- rb_erase(&conn->client_node, &local->client_conns);
- spin_unlock(&local->client_conns_lock);
+ trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
+
+ if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) {
+ spin_lock(&local->client_conns_lock);
+ if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS,
+ &conn->flags))
+ rb_erase(&conn->client_node, &local->client_conns);
+ spin_unlock(&local->client_conns_lock);
+ }
rxrpc_put_client_connection_id(conn);
+
+ ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE);
+
+ if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
+ trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
+ spin_lock(&rxrpc_client_conn_cache_lock);
+ nr_conns = --rxrpc_nr_client_conns;
+
+ if (nr_conns < rxrpc_max_client_connections &&
+ !list_empty(&rxrpc_waiting_client_conns)) {
+ next = list_entry(rxrpc_waiting_client_conns.next,
+ struct rxrpc_connection, cache_link);
+ rxrpc_get_connection(next);
+ rxrpc_activate_conn(next);
+ }
+
+ spin_unlock(&rxrpc_client_conn_cache_lock);
+ }
+
+ rxrpc_kill_connection(conn);
+ if (next)
+ rxrpc_activate_channels(next);
+
+ /* We need to get rid of the temporary ref we took upon next, but we
+ * can't call rxrpc_put_connection() recursively.
+ */
+ return next;
+}
+
+/*
+ * Clean up a dead client connections.
+ */
+void rxrpc_put_client_conn(struct rxrpc_connection *conn)
+{
+ const void *here = __builtin_return_address(0);
+ int n;
+
+ do {
+ n = atomic_dec_return(&conn->usage);
+ trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
+ if (n > 0)
+ return;
+ ASSERTCMP(n, >=, 0);
+
+ conn = rxrpc_put_one_client_conn(conn);
+ } while (conn);
+}
+
+/*
+ * Kill the longest-active client connections to make room for new ones.
+ */
+static void rxrpc_cull_active_client_conns(void)
+{
+ struct rxrpc_connection *conn;
+ unsigned int nr_conns = rxrpc_nr_client_conns;
+ unsigned int nr_active, limit;
+
+ _enter("");
+
+ ASSERTCMP(nr_conns, >=, 0);
+ if (nr_conns < rxrpc_max_client_connections) {
+ _leave(" [ok]");
+ return;
+ }
+ limit = rxrpc_reap_client_connections;
+
+ spin_lock(&rxrpc_client_conn_cache_lock);
+ nr_active = rxrpc_nr_active_client_conns;
+
+ while (nr_active > limit) {
+ ASSERT(!list_empty(&rxrpc_active_client_conns));
+ conn = list_entry(rxrpc_active_client_conns.next,
+ struct rxrpc_connection, cache_link);
+ ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_ACTIVE);
+
+ if (list_empty(&conn->waiting_calls)) {
+ trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
+ conn->cache_state = RXRPC_CONN_CLIENT_CULLED;
+ list_del_init(&conn->cache_link);
+ } else {
+ trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
+ conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
+ list_move_tail(&conn->cache_link,
+ &rxrpc_waiting_client_conns);
+ }
+
+ nr_active--;
+ }
+
+ rxrpc_nr_active_client_conns = nr_active;
+ spin_unlock(&rxrpc_client_conn_cache_lock);
+ ASSERTCMP(nr_active, >=, 0);
+ _leave(" [culled]");
+}
+
+/*
+ * Discard expired client connections from the idle list. Each conn in the
+ * idle list has been exposed and holds an extra ref because of that.
+ *
+ * This may be called from conn setup or from a work item so cannot be
+ * considered non-reentrant.
+ */
+static void rxrpc_discard_expired_client_conns(struct work_struct *work)
+{
+ struct rxrpc_connection *conn;
+ unsigned long expiry, conn_expires_at, now;
+ unsigned int nr_conns;
+ bool did_discard = false;
+
+ _enter("%c", work ? 'w' : 'n');
+
+ if (list_empty(&rxrpc_idle_client_conns)) {
+ _leave(" [empty]");
+ return;
+ }
+
+ /* Don't double up on the discarding */
+ if (!spin_trylock(&rxrpc_client_conn_discard_mutex)) {
+ _leave(" [already]");
+ return;
+ }
+
+ /* We keep an estimate of what the number of conns ought to be after
+ * we've discarded some so that we don't overdo the discarding.
+ */
+ nr_conns = rxrpc_nr_client_conns;
+
+next:
+ spin_lock(&rxrpc_client_conn_cache_lock);
+
+ if (list_empty(&rxrpc_idle_client_conns))
+ goto out;
+
+ conn = list_entry(rxrpc_idle_client_conns.next,
+ struct rxrpc_connection, cache_link);
+ ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
+
+ if (!rxrpc_kill_all_client_conns) {
+ /* If the number of connections is over the reap limit, we
+ * expedite discard by reducing the expiry timeout. We must,
+ * however, have at least a short grace period to be able to do
+ * final-ACK or ABORT retransmission.
+ */
+ expiry = rxrpc_conn_idle_client_expiry;
+ if (nr_conns > rxrpc_reap_client_connections)
+ expiry = rxrpc_conn_idle_client_fast_expiry;
+
+ conn_expires_at = conn->idle_timestamp + expiry;
+
+ now = READ_ONCE(jiffies);
+ if (time_after(conn_expires_at, now))
+ goto not_yet_expired;
+ }
+
+ trace_rxrpc_client(conn, -1, rxrpc_client_discard);
+ if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
+ BUG();
+ conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
+ list_del_init(&conn->cache_link);
+
+ spin_unlock(&rxrpc_client_conn_cache_lock);
+
+ /* When we cleared the EXPOSED flag, we took on responsibility for the
+ * reference that that had on the usage count. We deal with that here.
+ * If someone re-sets the flag and re-gets the ref, that's fine.
+ */
+ rxrpc_put_connection(conn);
+ did_discard = true;
+ nr_conns--;
+ goto next;
+
+not_yet_expired:
+ /* The connection at the front of the queue hasn't yet expired, so
+ * schedule the work item for that point if we discarded something.
+ *
+ * We don't worry if the work item is already scheduled - it can look
+ * after rescheduling itself at a later time. We could cancel it, but
+ * then things get messier.
+ */
+ _debug("not yet");
+ if (!rxrpc_kill_all_client_conns)
+ queue_delayed_work(rxrpc_workqueue,
+ &rxrpc_client_conn_reap,
+ conn_expires_at - now);
+
+out:
+ spin_unlock(&rxrpc_client_conn_cache_lock);
+ spin_unlock(&rxrpc_client_conn_discard_mutex);
+ _leave("");
+}
+
+/*
+ * Preemptively destroy all the client connection records rather than waiting
+ * for them to time out
+ */
+void __exit rxrpc_destroy_all_client_connections(void)
+{
+ _enter("");
+
+ spin_lock(&rxrpc_client_conn_cache_lock);
+ rxrpc_kill_all_client_conns = true;
+ spin_unlock(&rxrpc_client_conn_cache_lock);
+
+ cancel_delayed_work(&rxrpc_client_conn_reap);
+
+ if (!queue_delayed_work(rxrpc_workqueue, &rxrpc_client_conn_reap, 0))
+ _debug("destroy: queue failed");
+
+ _leave("");
}
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index cee0f35bc1cf..3f9d8d7ec632 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -15,20 +15,128 @@
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/errqueue.h>
-#include <linux/udp.h>
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/icmp.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
#include "ar-internal.h"
/*
+ * Retransmit terminal ACK or ABORT of the previous call.
+ */
+static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
+ struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct rxrpc_channel *chan;
+ struct msghdr msg;
+ struct kvec iov;
+ struct {
+ struct rxrpc_wire_header whdr;
+ union {
+ struct {
+ __be32 code;
+ } abort;
+ struct {
+ struct rxrpc_ackpacket ack;
+ u8 padding[3];
+ struct rxrpc_ackinfo info;
+ };
+ };
+ } __attribute__((packed)) pkt;
+ size_t len;
+ u32 serial, mtu, call_id;
+
+ _enter("%d", conn->debug_id);
+
+ chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK];
+
+ /* If the last call got moved on whilst we were waiting to run, just
+ * ignore this packet.
+ */
+ call_id = READ_ONCE(chan->last_call);
+ /* Sync with __rxrpc_disconnect_call() */
+ smp_rmb();
+ if (call_id != sp->hdr.callNumber)
+ return;
+
+ msg.msg_name = &conn->params.peer->srx.transport;
+ msg.msg_namelen = conn->params.peer->srx.transport_len;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ pkt.whdr.epoch = htonl(sp->hdr.epoch);
+ pkt.whdr.cid = htonl(sp->hdr.cid);
+ pkt.whdr.callNumber = htonl(sp->hdr.callNumber);
+ pkt.whdr.seq = 0;
+ pkt.whdr.type = chan->last_type;
+ pkt.whdr.flags = conn->out_clientflag;
+ pkt.whdr.userStatus = 0;
+ pkt.whdr.securityIndex = conn->security_ix;
+ pkt.whdr._rsvd = 0;
+ pkt.whdr.serviceId = htons(chan->last_service_id);
+
+ len = sizeof(pkt.whdr);
+ switch (chan->last_type) {
+ case RXRPC_PACKET_TYPE_ABORT:
+ pkt.abort.code = htonl(chan->last_abort);
+ len += sizeof(pkt.abort);
+ break;
+
+ case RXRPC_PACKET_TYPE_ACK:
+ mtu = conn->params.peer->if_mtu;
+ mtu -= conn->params.peer->hdrsize;
+ pkt.ack.bufferSpace = 0;
+ pkt.ack.maxSkew = htons(skb->priority);
+ pkt.ack.firstPacket = htonl(chan->last_seq);
+ pkt.ack.previousPacket = htonl(chan->last_seq - 1);
+ pkt.ack.serial = htonl(sp->hdr.serial);
+ pkt.ack.reason = RXRPC_ACK_DUPLICATE;
+ pkt.ack.nAcks = 0;
+ pkt.info.rxMTU = htonl(rxrpc_rx_mtu);
+ pkt.info.maxMTU = htonl(mtu);
+ pkt.info.rwind = htonl(rxrpc_rx_window_size);
+ pkt.info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
+ pkt.whdr.flags |= RXRPC_SLOW_START_OK;
+ len += sizeof(pkt.ack) + sizeof(pkt.info);
+ break;
+ }
+
+ /* Resync with __rxrpc_disconnect_call() and check that the last call
+ * didn't get advanced whilst we were filling out the packets.
+ */
+ smp_rmb();
+ if (READ_ONCE(chan->last_call) != call_id)
+ return;
+
+ iov.iov_base = &pkt;
+ iov.iov_len = len;
+
+ serial = atomic_inc_return(&conn->serial);
+ pkt.whdr.serial = htonl(serial);
+
+ switch (chan->last_type) {
+ case RXRPC_PACKET_TYPE_ABORT:
+ _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort);
+ break;
+ case RXRPC_PACKET_TYPE_ACK:
+ trace_rxrpc_tx_ack(NULL, serial, chan->last_seq, 0,
+ RXRPC_ACK_DUPLICATE, 0);
+ _proto("Tx ACK %%%u [re]", serial);
+ break;
+ }
+
+ kernel_sendmsg(conn->params.local->socket, &msg, &iov, 1, len);
+ _leave("");
+ return;
+}
+
+/*
* pass a connection-level abort onto all calls on that connection
*/
-static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state,
- u32 abort_code)
+static void rxrpc_abort_calls(struct rxrpc_connection *conn,
+ enum rxrpc_call_completion compl,
+ u32 abort_code, int error)
{
struct rxrpc_call *call;
int i;
@@ -41,19 +149,15 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state,
call = rcu_dereference_protected(
conn->channels[i].call,
lockdep_is_held(&conn->channel_lock));
- write_lock_bh(&call->state_lock);
- if (call->state <= RXRPC_CALL_COMPLETE) {
- call->state = state;
- if (state == RXRPC_CALL_LOCALLY_ABORTED) {
- call->local_abort = conn->local_abort;
- set_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
- } else {
- call->remote_abort = conn->remote_abort;
- set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
- }
- rxrpc_queue_call(call);
+ if (call) {
+ if (compl == RXRPC_CALL_LOCALLY_ABORTED)
+ trace_rxrpc_abort("CON", call->cid,
+ call->call_id, 0,
+ abort_code, error);
+ if (rxrpc_set_call_completion(call, compl,
+ abort_code, error))
+ rxrpc_notify_socket(call);
}
- write_unlock_bh(&call->state_lock);
}
spin_unlock(&conn->channel_lock);
@@ -78,17 +182,16 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
/* generate a connection-level abort */
spin_lock_bh(&conn->state_lock);
- if (conn->state < RXRPC_CONN_REMOTELY_ABORTED) {
- conn->state = RXRPC_CONN_LOCALLY_ABORTED;
- conn->error = error;
- spin_unlock_bh(&conn->state_lock);
- } else {
+ if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
spin_unlock_bh(&conn->state_lock);
_leave(" = 0 [already dead]");
return 0;
}
- rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code);
+ conn->state = RXRPC_CONN_LOCALLY_ABORTED;
+ spin_unlock_bh(&conn->state_lock);
+
+ rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error);
msg.msg_name = &conn->params.peer->srx.transport;
msg.msg_namelen = conn->params.peer->srx.transport_len;
@@ -132,17 +235,18 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
/*
* mark a call as being on a now-secured channel
- * - must be called with softirqs disabled
+ * - must be called with BH's disabled.
*/
static void rxrpc_call_is_secure(struct rxrpc_call *call)
{
_enter("%p", call);
if (call) {
- read_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_EV_SECURED, &call->events))
- rxrpc_queue_call(call);
- read_unlock(&call->state_lock);
+ write_lock_bh(&call->state_lock);
+ if (call->state == RXRPC_CALL_SERVER_SECURING) {
+ call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ rxrpc_notify_socket(call);
+ }
+ write_unlock_bh(&call->state_lock);
}
}
@@ -159,22 +263,28 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
int loop, ret;
if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
- kleave(" = -ECONNABORTED [%u]", conn->state);
+ _leave(" = -ECONNABORTED [%u]", conn->state);
return -ECONNABORTED;
}
_enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
switch (sp->hdr.type) {
+ case RXRPC_PACKET_TYPE_DATA:
+ case RXRPC_PACKET_TYPE_ACK:
+ rxrpc_conn_retransmit_call(conn, skb);
+ return 0;
+
case RXRPC_PACKET_TYPE_ABORT:
- if (skb_copy_bits(skb, 0, &wtmp, sizeof(wtmp)) < 0)
+ if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+ &wtmp, sizeof(wtmp)) < 0)
return -EPROTO;
abort_code = ntohl(wtmp);
_proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
conn->state = RXRPC_CONN_REMOTELY_ABORTED;
rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
- abort_code);
+ abort_code, ECONNABORTED);
return -ECONNABORTED;
case RXRPC_PACKET_TYPE_CHALLENGE:
@@ -199,14 +309,16 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
conn->state = RXRPC_CONN_SERVICE;
+ spin_unlock(&conn->state_lock);
for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
rxrpc_call_is_secure(
rcu_dereference_protected(
conn->channels[loop].call,
lockdep_is_held(&conn->channel_lock)));
+ } else {
+ spin_unlock(&conn->state_lock);
}
- spin_unlock(&conn->state_lock);
spin_unlock(&conn->channel_lock);
return 0;
@@ -269,7 +381,7 @@ void rxrpc_process_connection(struct work_struct *work)
u32 abort_code = RX_PROTOCOL_ERROR;
int ret;
- _enter("{%d}", conn->debug_id);
+ rxrpc_see_connection(conn);
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
rxrpc_secure_connection(conn);
@@ -277,6 +389,7 @@ void rxrpc_process_connection(struct work_struct *work)
/* go through the conn-level event packets, releasing the ref on this
* connection that each one has when we've finished with it */
while ((skb = skb_dequeue(&conn->rx_queue))) {
+ rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
ret = rxrpc_process_event(conn, skb, &abort_code);
switch (ret) {
case -EPROTO:
@@ -287,7 +400,7 @@ void rxrpc_process_connection(struct work_struct *work)
goto requeue_and_leave;
case -ECONNABORTED:
default:
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
break;
}
}
@@ -304,91 +417,7 @@ requeue_and_leave:
protocol_error:
if (rxrpc_abort_connection(conn, -ret, abort_code) < 0)
goto requeue_and_leave;
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
_leave(" [EPROTO]");
goto out;
}
-
-/*
- * put a packet up for transport-level abort
- */
-void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
-{
- CHECK_SLAB_OKAY(&local->usage);
-
- skb_queue_tail(&local->reject_queue, skb);
- rxrpc_queue_local(local);
-}
-
-/*
- * reject packets through the local endpoint
- */
-void rxrpc_reject_packets(struct rxrpc_local *local)
-{
- union {
- struct sockaddr sa;
- struct sockaddr_in sin;
- } sa;
- struct rxrpc_skb_priv *sp;
- struct rxrpc_wire_header whdr;
- struct sk_buff *skb;
- struct msghdr msg;
- struct kvec iov[2];
- size_t size;
- __be32 code;
-
- _enter("%d", local->debug_id);
-
- iov[0].iov_base = &whdr;
- iov[0].iov_len = sizeof(whdr);
- iov[1].iov_base = &code;
- iov[1].iov_len = sizeof(code);
- size = sizeof(whdr) + sizeof(code);
-
- msg.msg_name = &sa;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- memset(&sa, 0, sizeof(sa));
- sa.sa.sa_family = local->srx.transport.family;
- switch (sa.sa.sa_family) {
- case AF_INET:
- msg.msg_namelen = sizeof(sa.sin);
- break;
- default:
- msg.msg_namelen = 0;
- break;
- }
-
- memset(&whdr, 0, sizeof(whdr));
- whdr.type = RXRPC_PACKET_TYPE_ABORT;
-
- while ((skb = skb_dequeue(&local->reject_queue))) {
- sp = rxrpc_skb(skb);
- switch (sa.sa.sa_family) {
- case AF_INET:
- sa.sin.sin_port = udp_hdr(skb)->source;
- sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
- code = htonl(skb->priority);
-
- whdr.epoch = htonl(sp->hdr.epoch);
- whdr.cid = htonl(sp->hdr.cid);
- whdr.callNumber = htonl(sp->hdr.callNumber);
- whdr.serviceId = htons(sp->hdr.serviceId);
- whdr.flags = sp->hdr.flags;
- whdr.flags ^= RXRPC_CLIENT_INITIATED;
- whdr.flags &= RXRPC_CLIENT_INITIATED;
-
- kernel_sendmsg(local->socket, &msg, iov, 2, size);
- break;
-
- default:
- break;
- }
-
- rxrpc_free_skb(skb);
- }
-
- _leave("");
-}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 896d84493a05..e1e83af47866 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -1,6 +1,6 @@
-/* RxRPC virtual connection handler
+/* RxRPC virtual connection handler, common bits.
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -15,8 +15,6 @@
#include <linux/slab.h>
#include <linux/net.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
-#include <net/af_rxrpc.h>
#include "ar-internal.h"
/*
@@ -27,9 +25,12 @@ unsigned int rxrpc_connection_expiry = 10 * 60;
static void rxrpc_connection_reaper(struct work_struct *work);
LIST_HEAD(rxrpc_connections);
+LIST_HEAD(rxrpc_connection_proc_list);
DEFINE_RWLOCK(rxrpc_connection_lock);
static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
+static void rxrpc_destroy_connection(struct rcu_head *);
+
/*
* allocate a new connection
*/
@@ -41,21 +42,18 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
if (conn) {
+ INIT_LIST_HEAD(&conn->cache_link);
spin_lock_init(&conn->channel_lock);
- init_waitqueue_head(&conn->channel_wq);
+ INIT_LIST_HEAD(&conn->waiting_calls);
INIT_WORK(&conn->processor, &rxrpc_process_connection);
+ INIT_LIST_HEAD(&conn->proc_link);
INIT_LIST_HEAD(&conn->link);
skb_queue_head_init(&conn->rx_queue);
conn->security = &rxrpc_no_security;
spin_lock_init(&conn->state_lock);
- /* We maintain an extra ref on the connection whilst it is
- * on the rxrpc_connections list.
- */
- atomic_set(&conn->usage, 2);
conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
- atomic_set(&conn->avail_chans, RXRPC_MAXCALLS);
conn->size_align = 4;
- conn->header_size = sizeof(struct rxrpc_wire_header);
+ conn->idle_timestamp = jiffies;
}
_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
@@ -135,6 +133,16 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
srx.transport.sin.sin_addr.s_addr)
goto not_found;
break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ if (peer->srx.transport.sin6.sin6_port !=
+ srx.transport.sin6.sin6_port ||
+ memcmp(&peer->srx.transport.sin6.sin6_addr,
+ &srx.transport.sin6.sin6_addr,
+ sizeof(struct in6_addr)) != 0)
+ goto not_found;
+ break;
+#endif
default:
BUG();
}
@@ -153,25 +161,32 @@ not_found:
* terminates. The caller must hold the channel_lock and must release the
* call's ref on the connection.
*/
-void __rxrpc_disconnect_call(struct rxrpc_call *call)
+void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
+ struct rxrpc_call *call)
{
- struct rxrpc_connection *conn = call->conn;
- struct rxrpc_channel *chan = &conn->channels[call->channel];
+ struct rxrpc_channel *chan =
+ &conn->channels[call->cid & RXRPC_CHANNELMASK];
- _enter("%d,%d", conn->debug_id, call->channel);
+ _enter("%d,%x", conn->debug_id, call->cid);
if (rcu_access_pointer(chan->call) == call) {
/* Save the result of the call so that we can repeat it if necessary
* through the channel, whilst disposing of the actual call record.
*/
- chan->last_result = call->local_abort;
+ chan->last_service_id = call->service_id;
+ if (call->abort_code) {
+ chan->last_abort = call->abort_code;
+ chan->last_type = RXRPC_PACKET_TYPE_ABORT;
+ } else {
+ chan->last_seq = call->rx_hard_ack;
+ chan->last_type = RXRPC_PACKET_TYPE_ACK;
+ }
+ /* Sync with rxrpc_conn_retransmit(). */
smp_wmb();
chan->last_call = chan->call_id;
chan->call_id = chan->call_counter;
rcu_assign_pointer(chan->call, NULL);
- atomic_inc(&conn->avail_chans);
- wake_up(&conn->channel_wq);
}
_leave("");
@@ -185,34 +200,122 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
{
struct rxrpc_connection *conn = call->conn;
+ spin_lock_bh(&conn->params.peer->lock);
+ hlist_del_init(&call->error_link);
+ spin_unlock_bh(&conn->params.peer->lock);
+
+ if (rxrpc_is_client_call(call))
+ return rxrpc_disconnect_client_call(call);
+
spin_lock(&conn->channel_lock);
- __rxrpc_disconnect_call(call);
+ __rxrpc_disconnect_call(conn, call);
spin_unlock(&conn->channel_lock);
call->conn = NULL;
+ conn->idle_timestamp = jiffies;
rxrpc_put_connection(conn);
}
/*
- * release a virtual connection
+ * Kill off a connection.
*/
-void rxrpc_put_connection(struct rxrpc_connection *conn)
+void rxrpc_kill_connection(struct rxrpc_connection *conn)
{
- if (!conn)
- return;
+ ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
+ !rcu_access_pointer(conn->channels[1].call) &&
+ !rcu_access_pointer(conn->channels[2].call) &&
+ !rcu_access_pointer(conn->channels[3].call));
+ ASSERT(list_empty(&conn->cache_link));
- _enter("%p{u=%d,d=%d}",
- conn, atomic_read(&conn->usage), conn->debug_id);
+ write_lock(&rxrpc_connection_lock);
+ list_del_init(&conn->proc_link);
+ write_unlock(&rxrpc_connection_lock);
- ASSERTCMP(atomic_read(&conn->usage), >, 1);
+ /* Drain the Rx queue. Note that even though we've unpublished, an
+ * incoming packet could still be being added to our Rx queue, so we
+ * will need to drain it again in the RCU cleanup handler.
+ */
+ rxrpc_purge_queue(&conn->rx_queue);
- conn->put_time = ktime_get_seconds();
- if (atomic_dec_return(&conn->usage) == 1) {
- _debug("zombie");
- rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
+ /* Leave final destruction to RCU. The connection processor work item
+ * must carry a ref on the connection to prevent us getting here whilst
+ * it is queued or running.
+ */
+ call_rcu(&conn->rcu, rxrpc_destroy_connection);
+}
+
+/*
+ * Queue a connection's work processor, getting a ref to pass to the work
+ * queue.
+ */
+bool rxrpc_queue_conn(struct rxrpc_connection *conn)
+{
+ const void *here = __builtin_return_address(0);
+ int n = __atomic_add_unless(&conn->usage, 1, 0);
+ if (n == 0)
+ return false;
+ if (rxrpc_queue_work(&conn->processor))
+ trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
+ else
+ rxrpc_put_connection(conn);
+ return true;
+}
+
+/*
+ * Note the re-emergence of a connection.
+ */
+void rxrpc_see_connection(struct rxrpc_connection *conn)
+{
+ const void *here = __builtin_return_address(0);
+ if (conn) {
+ int n = atomic_read(&conn->usage);
+
+ trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
+ }
+}
+
+/*
+ * Get a ref on a connection.
+ */
+void rxrpc_get_connection(struct rxrpc_connection *conn)
+{
+ const void *here = __builtin_return_address(0);
+ int n = atomic_inc_return(&conn->usage);
+
+ trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
+}
+
+/*
+ * Try to get a ref on a connection.
+ */
+struct rxrpc_connection *
+rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
+{
+ const void *here = __builtin_return_address(0);
+
+ if (conn) {
+ int n = __atomic_add_unless(&conn->usage, 1, 0);
+ if (n > 0)
+ trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
+ else
+ conn = NULL;
}
+ return conn;
+}
- _leave("");
+/*
+ * Release a service connection
+ */
+void rxrpc_put_service_conn(struct rxrpc_connection *conn)
+{
+ const void *here = __builtin_return_address(0);
+ int n;
+
+ n = atomic_dec_return(&conn->usage);
+ trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
+ ASSERTCMP(n, >=, 0);
+ if (n == 0)
+ rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
}
/*
@@ -242,19 +345,19 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
}
/*
- * reap dead connections
+ * reap dead service connections
*/
static void rxrpc_connection_reaper(struct work_struct *work)
{
struct rxrpc_connection *conn, *_p;
- unsigned long reap_older_than, earliest, put_time, now;
+ unsigned long reap_older_than, earliest, idle_timestamp, now;
LIST_HEAD(graveyard);
_enter("");
- now = ktime_get_seconds();
- reap_older_than = now - rxrpc_connection_expiry;
+ now = jiffies;
+ reap_older_than = now - rxrpc_connection_expiry * HZ;
earliest = ULONG_MAX;
write_lock(&rxrpc_connection_lock);
@@ -262,11 +365,17 @@ static void rxrpc_connection_reaper(struct work_struct *work)
ASSERTCMP(atomic_read(&conn->usage), >, 0);
if (likely(atomic_read(&conn->usage) > 1))
continue;
+ if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
+ continue;
+
+ idle_timestamp = READ_ONCE(conn->idle_timestamp);
+ _debug("reap CONN %d { u=%d,t=%ld }",
+ conn->debug_id, atomic_read(&conn->usage),
+ (long)reap_older_than - (long)idle_timestamp);
- put_time = READ_ONCE(conn->put_time);
- if (time_after(put_time, reap_older_than)) {
- if (time_before(put_time, earliest))
- earliest = put_time;
+ if (time_after(idle_timestamp, reap_older_than)) {
+ if (time_before(idle_timestamp, earliest))
+ earliest = idle_timestamp;
continue;
}
@@ -277,7 +386,7 @@ static void rxrpc_connection_reaper(struct work_struct *work)
continue;
if (rxrpc_conn_is_client(conn))
- rxrpc_unpublish_client_conn(conn);
+ BUG();
else
rxrpc_unpublish_service_conn(conn);
@@ -287,9 +396,9 @@ static void rxrpc_connection_reaper(struct work_struct *work)
if (earliest != ULONG_MAX) {
_debug("reschedule reaper %ld", (long) earliest - now);
- ASSERTCMP(earliest, >, now);
+ ASSERT(time_after(earliest, now));
rxrpc_queue_delayed_work(&rxrpc_connection_reap,
- (earliest - now) * HZ);
+ earliest - now);
}
while (!list_empty(&graveyard)) {
@@ -298,16 +407,15 @@ static void rxrpc_connection_reaper(struct work_struct *work)
list_del_init(&conn->link);
ASSERTCMP(atomic_read(&conn->usage), ==, 0);
- skb_queue_purge(&conn->rx_queue);
- call_rcu(&conn->rcu, rxrpc_destroy_connection);
+ rxrpc_kill_connection(conn);
}
_leave("");
}
/*
- * preemptively destroy all the connection records rather than waiting for them
- * to time out
+ * preemptively destroy all the service connection records rather than
+ * waiting for them to time out
*/
void __exit rxrpc_destroy_all_connections(void)
{
@@ -316,6 +424,8 @@ void __exit rxrpc_destroy_all_connections(void)
_enter("");
+ rxrpc_destroy_all_client_connections();
+
rxrpc_connection_expiry = 0;
cancel_delayed_work(&rxrpc_connection_reap);
rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
@@ -330,6 +440,8 @@ void __exit rxrpc_destroy_all_connections(void)
write_unlock(&rxrpc_connection_lock);
BUG_ON(leak);
+ ASSERT(list_empty(&rxrpc_connection_proc_list));
+
/* Make sure the local and peer records pinned by any dying connections
* are released.
*/
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index fd9027ccba8f..eef551f40dc2 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -65,9 +65,8 @@ done:
* Insert a service connection into a peer's tree, thereby making it a target
* for incoming packets.
*/
-static struct rxrpc_connection *
-rxrpc_publish_service_conn(struct rxrpc_peer *peer,
- struct rxrpc_connection *conn)
+static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
+ struct rxrpc_connection *conn)
{
struct rxrpc_connection *cursor = NULL;
struct rxrpc_conn_proto k = conn->proto;
@@ -96,7 +95,7 @@ conn_published:
set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
write_sequnlock_bh(&peer->service_conn_lock);
_leave(" = %d [new]", conn->debug_id);
- return conn;
+ return;
found_extant_conn:
if (atomic_read(&cursor->usage) == 0)
@@ -119,100 +118,58 @@ replace_old_connection:
}
/*
- * get a record of an incoming connection
+ * Preallocate a service connection. The connection is placed on the proc and
+ * reap lists so that we don't have to get the lock from BH context.
*/
-struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local,
- struct sockaddr_rxrpc *srx,
- struct sk_buff *skb)
+struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t gfp)
{
- struct rxrpc_connection *conn;
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_peer *peer;
- const char *new = "old";
-
- _enter("");
+ struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
- peer = rxrpc_lookup_peer(local, srx, GFP_NOIO);
- if (!peer) {
- _debug("no peer");
- return ERR_PTR(-EBUSY);
- }
+ if (conn) {
+ /* We maintain an extra ref on the connection whilst it is on
+ * the rxrpc_connections list.
+ */
+ conn->state = RXRPC_CONN_SERVICE_PREALLOC;
+ atomic_set(&conn->usage, 2);
- ASSERT(sp->hdr.flags & RXRPC_CLIENT_INITIATED);
-
- rcu_read_lock();
- peer = rxrpc_lookup_peer_rcu(local, srx);
- if (peer) {
- conn = rxrpc_find_service_conn_rcu(peer, skb);
- if (conn) {
- if (sp->hdr.securityIndex != conn->security_ix)
- goto security_mismatch_rcu;
- if (rxrpc_get_connection_maybe(conn))
- goto found_extant_connection_rcu;
-
- /* The conn has expired but we can't remove it without
- * the appropriate lock, so we attempt to replace it
- * when we have a new candidate.
- */
- }
+ write_lock(&rxrpc_connection_lock);
+ list_add_tail(&conn->link, &rxrpc_connections);
+ list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
+ write_unlock(&rxrpc_connection_lock);
- if (!rxrpc_get_peer_maybe(peer))
- peer = NULL;
+ trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+ atomic_read(&conn->usage),
+ __builtin_return_address(0));
}
- rcu_read_unlock();
- if (!peer) {
- peer = rxrpc_lookup_peer(local, srx, GFP_NOIO);
- if (!peer)
- goto enomem;
- }
+ return conn;
+}
- /* We don't have a matching record yet. */
- conn = rxrpc_alloc_connection(GFP_NOIO);
- if (!conn)
- goto enomem_peer;
+/*
+ * Set up an incoming connection. This is called in BH context with the RCU
+ * read lock held.
+ */
+void rxrpc_new_incoming_connection(struct rxrpc_connection *conn,
+ struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+ _enter("");
conn->proto.epoch = sp->hdr.epoch;
conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
- conn->params.local = local;
- conn->params.peer = peer;
conn->params.service_id = sp->hdr.serviceId;
conn->security_ix = sp->hdr.securityIndex;
conn->out_clientflag = 0;
- conn->state = RXRPC_CONN_SERVICE;
- if (conn->params.service_id)
+ if (conn->security_ix)
conn->state = RXRPC_CONN_SERVICE_UNSECURED;
-
- rxrpc_get_local(local);
-
- write_lock(&rxrpc_connection_lock);
- list_add_tail(&conn->link, &rxrpc_connections);
- write_unlock(&rxrpc_connection_lock);
+ else
+ conn->state = RXRPC_CONN_SERVICE;
/* Make the connection a target for incoming packets. */
- rxrpc_publish_service_conn(peer, conn);
-
- new = "new";
-
-success:
- _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->proto.cid);
- _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
- return conn;
-
-found_extant_connection_rcu:
- rcu_read_unlock();
- goto success;
-
-security_mismatch_rcu:
- rcu_read_unlock();
- _leave(" = -EKEYREJECTED");
- return ERR_PTR(-EKEYREJECTED);
+ rxrpc_publish_service_conn(conn->params.peer, conn);
-enomem_peer:
- rxrpc_put_peer(peer);
-enomem:
- _leave(" = -ENOMEM");
- return ERR_PTR(-ENOMEM);
+ _net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
}
/*
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 70bb77818dea..3ad9f75031e3 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1,6 +1,6 @@
/* RxRPC packet reception
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -27,550 +27,920 @@
#include <net/net_namespace.h>
#include "ar-internal.h"
+static void rxrpc_proto_abort(const char *why,
+ struct rxrpc_call *call, rxrpc_seq_t seq)
+{
+ if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, EBADMSG)) {
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
+ rxrpc_queue_call(call);
+ }
+}
+
/*
- * queue a packet for recvmsg to pass to userspace
- * - the caller must hold a lock on call->lock
- * - must not be called with interrupts disabled (sk_filter() disables BH's)
- * - eats the packet whether successful or not
- * - there must be just one reference to the packet, which the caller passes to
- * this function
+ * Do TCP-style congestion management [RFC 5681].
*/
-int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
- bool force, bool terminal)
+static void rxrpc_congestion_management(struct rxrpc_call *call,
+ struct sk_buff *skb,
+ struct rxrpc_ack_summary *summary,
+ rxrpc_serial_t acked_serial)
{
- struct rxrpc_skb_priv *sp;
- struct rxrpc_sock *rx = call->socket;
- struct sock *sk;
- int ret;
+ enum rxrpc_congest_change change = rxrpc_cong_no_change;
+ unsigned int cumulative_acks = call->cong_cumul_acks;
+ unsigned int cwnd = call->cong_cwnd;
+ bool resend = false;
+
+ summary->flight_size =
+ (call->tx_top - call->tx_hard_ack) - summary->nr_acks;
+
+ if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
+ summary->retrans_timeo = true;
+ call->cong_ssthresh = max_t(unsigned int,
+ summary->flight_size / 2, 2);
+ cwnd = 1;
+ if (cwnd >= call->cong_ssthresh &&
+ call->cong_mode == RXRPC_CALL_SLOW_START) {
+ call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+ call->cong_tstamp = skb->tstamp;
+ cumulative_acks = 0;
+ }
+ }
- _enter(",,%d,%d", force, terminal);
+ cumulative_acks += summary->nr_new_acks;
+ cumulative_acks += summary->nr_rot_new_acks;
+ if (cumulative_acks > 255)
+ cumulative_acks = 255;
+
+ summary->mode = call->cong_mode;
+ summary->cwnd = call->cong_cwnd;
+ summary->ssthresh = call->cong_ssthresh;
+ summary->cumulative_acks = cumulative_acks;
+ summary->dup_acks = call->cong_dup_acks;
+
+ switch (call->cong_mode) {
+ case RXRPC_CALL_SLOW_START:
+ if (summary->nr_nacks > 0)
+ goto packet_loss_detected;
+ if (summary->cumulative_acks > 0)
+ cwnd += 1;
+ if (cwnd >= call->cong_ssthresh) {
+ call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+ call->cong_tstamp = skb->tstamp;
+ }
+ goto out;
- ASSERT(!irqs_disabled());
+ case RXRPC_CALL_CONGEST_AVOIDANCE:
+ if (summary->nr_nacks > 0)
+ goto packet_loss_detected;
- sp = rxrpc_skb(skb);
- ASSERTCMP(sp->call, ==, call);
-
- /* if we've already posted the terminal message for a call, then we
- * don't post any more */
- if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
- _debug("already terminated");
- ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
- rxrpc_free_skb(skb);
- return 0;
- }
-
- sk = &rx->sk;
-
- if (!force) {
- /* cast skb->rcvbuf to unsigned... It's pointless, but
- * reduces number of warnings when compiling with -W
- * --ANK */
-// ret = -ENOBUFS;
-// if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-// (unsigned int) sk->sk_rcvbuf)
-// goto out;
-
- ret = sk_filter(sk, skb);
- if (ret < 0)
+ /* We analyse the number of packets that get ACK'd per RTT
+ * period and increase the window if we managed to fill it.
+ */
+ if (call->peer->rtt_usage == 0)
goto out;
- }
+ if (ktime_before(skb->tstamp,
+ ktime_add_ns(call->cong_tstamp,
+ call->peer->rtt)))
+ goto out_no_clear_ca;
+ change = rxrpc_cong_rtt_window_end;
+ call->cong_tstamp = skb->tstamp;
+ if (cumulative_acks >= cwnd)
+ cwnd++;
+ goto out;
- spin_lock_bh(&sk->sk_receive_queue.lock);
- if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) &&
- !test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- call->socket->sk.sk_state != RXRPC_CLOSE) {
- skb->destructor = rxrpc_packet_destructor;
- skb->dev = NULL;
- skb->sk = sk;
- atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ case RXRPC_CALL_PACKET_LOSS:
+ if (summary->nr_nacks == 0)
+ goto resume_normality;
- if (terminal) {
- _debug("<<<< TERMINAL MESSAGE >>>>");
- set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags);
+ if (summary->new_low_nack) {
+ change = rxrpc_cong_new_low_nack;
+ call->cong_dup_acks = 1;
+ if (call->cong_extra > 1)
+ call->cong_extra = 1;
+ goto send_extra_data;
}
- /* allow interception by a kernel service */
- if (rx->interceptor) {
- rx->interceptor(sk, call->user_call_ID, skb);
- spin_unlock_bh(&sk->sk_receive_queue.lock);
- } else {
- _net("post skb %p", skb);
- __skb_queue_tail(&sk->sk_receive_queue, skb);
- spin_unlock_bh(&sk->sk_receive_queue.lock);
+ call->cong_dup_acks++;
+ if (call->cong_dup_acks < 3)
+ goto send_extra_data;
+
+ change = rxrpc_cong_begin_retransmission;
+ call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
+ call->cong_ssthresh = max_t(unsigned int,
+ summary->flight_size / 2, 2);
+ cwnd = call->cong_ssthresh + 3;
+ call->cong_extra = 0;
+ call->cong_dup_acks = 0;
+ resend = true;
+ goto out;
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk);
+ case RXRPC_CALL_FAST_RETRANSMIT:
+ if (!summary->new_low_nack) {
+ if (summary->nr_new_acks == 0)
+ cwnd += 1;
+ call->cong_dup_acks++;
+ if (call->cong_dup_acks == 2) {
+ change = rxrpc_cong_retransmit_again;
+ call->cong_dup_acks = 0;
+ resend = true;
+ }
+ } else {
+ change = rxrpc_cong_progress;
+ cwnd = call->cong_ssthresh;
+ if (summary->nr_nacks == 0)
+ goto resume_normality;
}
- skb = NULL;
- } else {
- spin_unlock_bh(&sk->sk_receive_queue.lock);
+ goto out;
+
+ default:
+ BUG();
+ goto out;
}
- ret = 0;
+resume_normality:
+ change = rxrpc_cong_cleared_nacks;
+ call->cong_dup_acks = 0;
+ call->cong_extra = 0;
+ call->cong_tstamp = skb->tstamp;
+ if (cwnd < call->cong_ssthresh)
+ call->cong_mode = RXRPC_CALL_SLOW_START;
+ else
+ call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
out:
- rxrpc_free_skb(skb);
+ cumulative_acks = 0;
+out_no_clear_ca:
+ if (cwnd >= RXRPC_RXTX_BUFF_SIZE - 1)
+ cwnd = RXRPC_RXTX_BUFF_SIZE - 1;
+ call->cong_cwnd = cwnd;
+ call->cong_cumul_acks = cumulative_acks;
+ trace_rxrpc_congest(call, summary, acked_serial, change);
+ if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
+ rxrpc_queue_call(call);
+ return;
- _leave(" = %d", ret);
- return ret;
+packet_loss_detected:
+ change = rxrpc_cong_saw_nack;
+ call->cong_mode = RXRPC_CALL_PACKET_LOSS;
+ call->cong_dup_acks = 0;
+ goto send_extra_data;
+
+send_extra_data:
+ /* Send some previously unsent DATA if we have some to advance the ACK
+ * state.
+ */
+ if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
+ RXRPC_TX_ANNO_LAST ||
+ summary->nr_acks != call->tx_top - call->tx_hard_ack) {
+ call->cong_extra++;
+ wake_up(&call->waitq);
+ }
+ goto out_no_clear_ca;
}
/*
- * process a DATA packet, posting the packet to the appropriate queue
- * - eats the packet if successful
+ * Ping the other end to fill our RTT cache and to retrieve the rwind
+ * and MTU parameters.
*/
-static int rxrpc_fast_process_data(struct rxrpc_call *call,
- struct sk_buff *skb, u32 seq)
+static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
+ int skew)
{
- struct rxrpc_skb_priv *sp;
- bool terminal;
- int ret, ackbit, ack;
- u32 serial;
- u8 flags;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ ktime_t now = skb->tstamp;
- _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
+ if (call->peer->rtt_usage < 3 ||
+ ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
+ true, true,
+ rxrpc_propose_ack_ping_for_params);
+}
- sp = rxrpc_skb(skb);
- ASSERTCMP(sp->call, ==, NULL);
- flags = sp->hdr.flags;
- serial = sp->hdr.serial;
+/*
+ * Apply a hard ACK by advancing the Tx window.
+ */
+static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
+ struct rxrpc_ack_summary *summary)
+{
+ struct sk_buff *skb, *list = NULL;
+ int ix;
+ u8 annotation;
+
+ if (call->acks_lowest_nak == call->tx_hard_ack) {
+ call->acks_lowest_nak = to;
+ } else if (before_eq(call->acks_lowest_nak, to)) {
+ summary->new_low_nack = true;
+ call->acks_lowest_nak = to;
+ }
spin_lock(&call->lock);
- if (call->state > RXRPC_CALL_COMPLETE)
- goto discard;
+ while (before(call->tx_hard_ack, to)) {
+ call->tx_hard_ack++;
+ ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
+ skb = call->rxtx_buffer[ix];
+ annotation = call->rxtx_annotations[ix];
+ rxrpc_see_skb(skb, rxrpc_skb_tx_rotated);
+ call->rxtx_buffer[ix] = NULL;
+ call->rxtx_annotations[ix] = 0;
+ skb->next = list;
+ list = skb;
+
+ if (annotation & RXRPC_TX_ANNO_LAST)
+ set_bit(RXRPC_CALL_TX_LAST, &call->flags);
+ if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
+ summary->nr_rot_new_acks++;
+ }
- ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post);
- ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv);
- ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten);
+ spin_unlock(&call->lock);
- if (seq < call->rx_data_post) {
- _debug("dup #%u [-%u]", seq, call->rx_data_post);
- ack = RXRPC_ACK_DUPLICATE;
- ret = -ENOBUFS;
- goto discard_and_ack;
- }
+ trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
+ rxrpc_transmit_rotate_last :
+ rxrpc_transmit_rotate));
+ wake_up(&call->waitq);
- /* we may already have the packet in the out of sequence queue */
- ackbit = seq - (call->rx_data_eaten + 1);
- ASSERTCMP(ackbit, >=, 0);
- if (__test_and_set_bit(ackbit, call->ackr_window)) {
- _debug("dup oos #%u [%u,%u]",
- seq, call->rx_data_eaten, call->rx_data_post);
- ack = RXRPC_ACK_DUPLICATE;
- goto discard_and_ack;
+ while (list) {
+ skb = list;
+ list = skb->next;
+ skb->next = NULL;
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
}
+}
- if (seq >= call->ackr_win_top) {
- _debug("exceed #%u [%u]", seq, call->ackr_win_top);
- __clear_bit(ackbit, call->ackr_window);
- ack = RXRPC_ACK_EXCEEDS_WINDOW;
- goto discard_and_ack;
- }
+/*
+ * End the transmission phase of a call.
+ *
+ * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
+ * or a final ACK packet.
+ */
+static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
+ const char *abort_why)
+{
- if (seq == call->rx_data_expect) {
- clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags);
- call->rx_data_expect++;
- } else if (seq > call->rx_data_expect) {
- _debug("oos #%u [%u]", seq, call->rx_data_expect);
- call->rx_data_expect = seq + 1;
- if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) {
- ack = RXRPC_ACK_OUT_OF_SEQUENCE;
- goto enqueue_and_ack;
- }
- goto enqueue_packet;
- }
+ ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
- if (seq != call->rx_data_post) {
- _debug("ahead #%u [%u]", seq, call->rx_data_post);
- goto enqueue_packet;
- }
+ write_lock(&call->state_lock);
- if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags))
- goto protocol_error;
+ switch (call->state) {
+ case RXRPC_CALL_CLIENT_SEND_REQUEST:
+ case RXRPC_CALL_CLIENT_AWAIT_REPLY:
+ if (reply_begun)
+ call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
+ else
+ call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+ break;
- /* if the packet need security things doing to it, then it goes down
- * the slow path */
- if (call->conn->security_ix)
- goto enqueue_packet;
+ case RXRPC_CALL_SERVER_AWAIT_ACK:
+ __rxrpc_call_completed(call);
+ rxrpc_notify_socket(call);
+ break;
- sp->call = call;
- rxrpc_get_call(call);
- atomic_inc(&call->skb_count);
- terminal = ((flags & RXRPC_LAST_PACKET) &&
- !(flags & RXRPC_CLIENT_INITIATED));
- ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
- if (ret < 0) {
- if (ret == -ENOMEM || ret == -ENOBUFS) {
- __clear_bit(ackbit, call->ackr_window);
- ack = RXRPC_ACK_NOSPACE;
- goto discard_and_ack;
- }
- goto out;
+ default:
+ goto bad_state;
}
- skb = NULL;
- sp = NULL;
-
- _debug("post #%u", seq);
- ASSERTCMP(call->rx_data_post, ==, seq);
- call->rx_data_post++;
+ write_unlock(&call->state_lock);
+ if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
+ rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, 0, false, true,
+ rxrpc_propose_ack_client_tx_end);
+ trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
+ } else {
+ trace_rxrpc_transmit(call, rxrpc_transmit_end);
+ }
+ _leave(" = ok");
+ return true;
+
+bad_state:
+ write_unlock(&call->state_lock);
+ kdebug("end_tx %s", rxrpc_call_states[call->state]);
+ rxrpc_proto_abort(abort_why, call, call->tx_top);
+ return false;
+}
- if (flags & RXRPC_LAST_PACKET)
- set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
+/*
+ * Begin the reply reception phase of a call.
+ */
+static bool rxrpc_receiving_reply(struct rxrpc_call *call)
+{
+ struct rxrpc_ack_summary summary = { 0 };
+ rxrpc_seq_t top = READ_ONCE(call->tx_top);
+
+ if (call->ackr_reason) {
+ spin_lock_bh(&call->lock);
+ call->ackr_reason = 0;
+ call->resend_at = call->expire_at;
+ call->ack_at = call->expire_at;
+ spin_unlock_bh(&call->lock);
+ rxrpc_set_timer(call, rxrpc_timer_init_for_reply,
+ ktime_get_real());
+ }
- /* if we've reached an out of sequence packet then we need to drain
- * that queue into the socket Rx queue now */
- if (call->rx_data_post == call->rx_first_oos) {
- _debug("drain rx oos now");
- read_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events))
- rxrpc_queue_call(call);
- read_unlock(&call->state_lock);
+ if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+ rxrpc_rotate_tx_window(call, top, &summary);
+ if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
+ rxrpc_proto_abort("TXL", call, top);
+ return false;
}
+ if (!rxrpc_end_tx_phase(call, true, "ETD"))
+ return false;
+ call->tx_phase = false;
+ return true;
+}
- spin_unlock(&call->lock);
- atomic_inc(&call->ackr_not_idle);
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false);
- _leave(" = 0 [posted]");
- return 0;
+/*
+ * Scan a jumbo packet to validate its structure and to work out how many
+ * subpackets it contains.
+ *
+ * A jumbo packet is a collection of consecutive packets glued together with
+ * little headers between that indicate how to change the initial header for
+ * each subpacket.
+ *
+ * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but
+ * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any
+ * size.
+ */
+static bool rxrpc_validate_jumbo(struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned int offset = sizeof(struct rxrpc_wire_header);
+ unsigned int len = skb->len;
+ int nr_jumbo = 1;
+ u8 flags = sp->hdr.flags;
-protocol_error:
- ret = -EBADMSG;
-out:
- spin_unlock(&call->lock);
- _leave(" = %d", ret);
- return ret;
+ do {
+ nr_jumbo++;
+ if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
+ goto protocol_error;
+ if (flags & RXRPC_LAST_PACKET)
+ goto protocol_error;
+ offset += RXRPC_JUMBO_DATALEN;
+ if (skb_copy_bits(skb, offset, &flags, 1) < 0)
+ goto protocol_error;
+ offset += sizeof(struct rxrpc_jumbo_header);
+ } while (flags & RXRPC_JUMBO_PACKET);
-discard_and_ack:
- _debug("discard and ACK packet %p", skb);
- __rxrpc_propose_ACK(call, ack, serial, true);
-discard:
- spin_unlock(&call->lock);
- rxrpc_free_skb(skb);
- _leave(" = 0 [discarded]");
- return 0;
+ sp->nr_jumbo = nr_jumbo;
+ return true;
-enqueue_and_ack:
- __rxrpc_propose_ACK(call, ack, serial, true);
-enqueue_packet:
- _net("defer skb %p", skb);
- spin_unlock(&call->lock);
- skb_queue_tail(&call->rx_queue, skb);
- atomic_inc(&call->ackr_not_idle);
- read_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_DEAD)
- rxrpc_queue_call(call);
- read_unlock(&call->state_lock);
- _leave(" = 0 [queued]");
- return 0;
+protocol_error:
+ return false;
}
/*
- * assume an implicit ACKALL of the transmission phase of a client socket upon
- * reception of the first reply packet
+ * Handle reception of a duplicate packet.
+ *
+ * We have to take care to avoid an attack here whereby we're given a series of
+ * jumbograms, each with a sequence number one before the preceding one and
+ * filled up to maximum UDP size. If they never send us the first packet in
+ * the sequence, they can cause us to have to hold on to around 2MiB of kernel
+ * space until the call times out.
+ *
+ * We limit the space usage by only accepting three duplicate jumbo packets per
+ * call. After that, we tell the other side we're no longer accepting jumbos
+ * (that information is encoded in the ACK packet).
*/
-static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
+static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
+ u8 annotation, bool *_jumbo_bad)
{
- write_lock_bh(&call->state_lock);
-
- switch (call->state) {
- case RXRPC_CALL_CLIENT_AWAIT_REPLY:
- call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
- call->acks_latest = serial;
-
- _debug("implicit ACKALL %%%u", call->acks_latest);
- set_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events);
- write_unlock_bh(&call->state_lock);
-
- if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
- clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
- clear_bit(RXRPC_CALL_EV_RESEND, &call->events);
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- }
- break;
+ /* Discard normal packets that are duplicates. */
+ if (annotation == 0)
+ return;
- default:
- write_unlock_bh(&call->state_lock);
- break;
+ /* Skip jumbo subpackets that are duplicates. When we've had three or
+ * more partially duplicate jumbo packets, we refuse to take any more
+ * jumbos for this call.
+ */
+ if (!*_jumbo_bad) {
+ call->nr_jumbo_bad++;
+ *_jumbo_bad = true;
}
}
/*
- * post an incoming packet to the nominated call to deal with
- * - must get rid of the sk_buff, either by freeing it or by queuing it
+ * Process a DATA packet, adding the packet to the Rx ring.
*/
-void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
+static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
+ u16 skew)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- __be32 wtmp;
- u32 hi_serial, abort_code;
+ unsigned int offset = sizeof(struct rxrpc_wire_header);
+ unsigned int ix;
+ rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
+ rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
+ bool immediate_ack = false, jumbo_bad = false, queued;
+ u16 len;
+ u8 ack = 0, flags, annotation = 0;
- _enter("%p,%p", call, skb);
+ _enter("{%u,%u},{%u,%u}",
+ call->rx_hard_ack, call->rx_top, skb->len, seq);
- ASSERT(!irqs_disabled());
+ _proto("Rx DATA %%%u { #%u f=%02x }",
+ sp->hdr.serial, seq, sp->hdr.flags);
-#if 0 // INJECT RX ERROR
- if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
- static int skip = 0;
- if (++skip == 3) {
- printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n");
- skip = 0;
- goto free_packet;
- }
+ if (call->state >= RXRPC_CALL_COMPLETE)
+ return;
+
+ /* Received data implicitly ACKs all of the request packets we sent
+ * when we're acting as a client.
+ */
+ if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
+ call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
+ !rxrpc_receiving_reply(call))
+ return;
+
+ call->ackr_prev_seq = seq;
+
+ hard_ack = READ_ONCE(call->rx_hard_ack);
+ if (after(seq, hard_ack + call->rx_winsize)) {
+ ack = RXRPC_ACK_EXCEEDS_WINDOW;
+ ack_serial = serial;
+ goto ack;
}
-#endif
- /* track the latest serial number on this connection for ACK packet
- * information */
- hi_serial = atomic_read(&call->conn->hi_serial);
- while (sp->hdr.serial > hi_serial)
- hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
- sp->hdr.serial);
+ flags = sp->hdr.flags;
+ if (flags & RXRPC_JUMBO_PACKET) {
+ if (call->nr_jumbo_bad > 3) {
+ ack = RXRPC_ACK_NOSPACE;
+ ack_serial = serial;
+ goto ack;
+ }
+ annotation = 1;
+ }
- /* request ACK generation for any ACK or DATA packet that requests
- * it */
- if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
- _proto("ACK Requested on %%%u", sp->hdr.serial);
- rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, false);
+next_subpacket:
+ queued = false;
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ len = skb->len;
+ if (flags & RXRPC_JUMBO_PACKET)
+ len = RXRPC_JUMBO_DATALEN;
+
+ if (flags & RXRPC_LAST_PACKET) {
+ if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
+ seq != call->rx_top)
+ return rxrpc_proto_abort("LSN", call, seq);
+ } else {
+ if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
+ after_eq(seq, call->rx_top))
+ return rxrpc_proto_abort("LSA", call, seq);
}
- switch (sp->hdr.type) {
- case RXRPC_PACKET_TYPE_ABORT:
- _debug("abort");
+ if (before_eq(seq, hard_ack)) {
+ ack = RXRPC_ACK_DUPLICATE;
+ ack_serial = serial;
+ goto skip;
+ }
- if (skb_copy_bits(skb, 0, &wtmp, sizeof(wtmp)) < 0)
- goto protocol_error;
+ if (flags & RXRPC_REQUEST_ACK && !ack) {
+ ack = RXRPC_ACK_REQUESTED;
+ ack_serial = serial;
+ }
- abort_code = ntohl(wtmp);
- _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
-
- write_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE) {
- call->state = RXRPC_CALL_REMOTELY_ABORTED;
- call->remote_abort = abort_code;
- set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
- rxrpc_queue_call(call);
+ if (call->rxtx_buffer[ix]) {
+ rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
+ if (ack != RXRPC_ACK_DUPLICATE) {
+ ack = RXRPC_ACK_DUPLICATE;
+ ack_serial = serial;
}
- goto free_packet_unlock;
+ immediate_ack = true;
+ goto skip;
+ }
- case RXRPC_PACKET_TYPE_BUSY:
- _proto("Rx BUSY %%%u", sp->hdr.serial);
+ /* Queue the packet. We use a couple of memory barriers here as need
+ * to make sure that rx_top is perceived to be set after the buffer
+ * pointer and that the buffer pointer is set after the annotation and
+ * the skb data.
+ *
+ * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
+ * and also rxrpc_fill_out_ack().
+ */
+ rxrpc_get_skb(skb, rxrpc_skb_rx_got);
+ call->rxtx_annotations[ix] = annotation;
+ smp_wmb();
+ call->rxtx_buffer[ix] = skb;
+ if (after(seq, call->rx_top)) {
+ smp_store_release(&call->rx_top, seq);
+ } else if (before(seq, call->rx_top)) {
+ /* Send an immediate ACK if we fill in a hole */
+ if (!ack) {
+ ack = RXRPC_ACK_DELAY;
+ ack_serial = serial;
+ }
+ immediate_ack = true;
+ }
+ if (flags & RXRPC_LAST_PACKET) {
+ set_bit(RXRPC_CALL_RX_LAST, &call->flags);
+ trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
+ } else {
+ trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
+ }
+ queued = true;
- if (rxrpc_conn_is_service(call->conn))
- goto protocol_error;
+ if (after_eq(seq, call->rx_expect_next)) {
+ if (after(seq, call->rx_expect_next)) {
+ _net("OOS %u > %u", seq, call->rx_expect_next);
+ ack = RXRPC_ACK_OUT_OF_SEQUENCE;
+ ack_serial = serial;
+ }
+ call->rx_expect_next = seq + 1;
+ }
- write_lock_bh(&call->state_lock);
- switch (call->state) {
- case RXRPC_CALL_CLIENT_SEND_REQUEST:
- call->state = RXRPC_CALL_SERVER_BUSY;
- set_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
- rxrpc_queue_call(call);
- case RXRPC_CALL_SERVER_BUSY:
- goto free_packet_unlock;
- default:
- goto protocol_error_locked;
+skip:
+ offset += len;
+ if (flags & RXRPC_JUMBO_PACKET) {
+ if (skb_copy_bits(skb, offset, &flags, 1) < 0)
+ return rxrpc_proto_abort("XJF", call, seq);
+ offset += sizeof(struct rxrpc_jumbo_header);
+ seq++;
+ serial++;
+ annotation++;
+ if (flags & RXRPC_JUMBO_PACKET)
+ annotation |= RXRPC_RX_ANNO_JLAST;
+ if (after(seq, hard_ack + call->rx_winsize)) {
+ ack = RXRPC_ACK_EXCEEDS_WINDOW;
+ ack_serial = serial;
+ if (!jumbo_bad) {
+ call->nr_jumbo_bad++;
+ jumbo_bad = true;
+ }
+ goto ack;
}
- default:
- _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial);
- goto protocol_error;
+ _proto("Rx DATA Jumbo %%%u", serial);
+ goto next_subpacket;
+ }
- case RXRPC_PACKET_TYPE_DATA:
- _proto("Rx DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
+ if (queued && flags & RXRPC_LAST_PACKET && !ack) {
+ ack = RXRPC_ACK_DELAY;
+ ack_serial = serial;
+ }
- if (sp->hdr.seq == 0)
- goto protocol_error;
+ack:
+ if (ack)
+ rxrpc_propose_ACK(call, ack, skew, ack_serial,
+ immediate_ack, true,
+ rxrpc_propose_ack_input_data);
- call->ackr_prev_seq = sp->hdr.seq;
+ if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1)
+ rxrpc_notify_socket(call);
+ _leave(" [queued]");
+}
- /* received data implicitly ACKs all of the request packets we
- * sent when we're acting as a client */
- if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
- rxrpc_assume_implicit_ackall(call, sp->hdr.serial);
+/*
+ * Process a requested ACK.
+ */
+static void rxrpc_input_requested_ack(struct rxrpc_call *call,
+ ktime_t resp_time,
+ rxrpc_serial_t orig_serial,
+ rxrpc_serial_t ack_serial)
+{
+ struct rxrpc_skb_priv *sp;
+ struct sk_buff *skb;
+ ktime_t sent_at;
+ int ix;
+
+ for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
+ skb = call->rxtx_buffer[ix];
+ if (!skb)
+ continue;
+
+ sp = rxrpc_skb(skb);
+ if (sp->hdr.serial != orig_serial)
+ continue;
+ smp_rmb();
+ sent_at = skb->tstamp;
+ goto found;
+ }
+ return;
- switch (rxrpc_fast_process_data(call, skb, sp->hdr.seq)) {
- case 0:
- skb = NULL;
- goto done;
+found:
+ rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
+ orig_serial, ack_serial, sent_at, resp_time);
+}
- default:
- BUG();
+/*
+ * Process a ping response.
+ */
+static void rxrpc_input_ping_response(struct rxrpc_call *call,
+ ktime_t resp_time,
+ rxrpc_serial_t orig_serial,
+ rxrpc_serial_t ack_serial)
+{
+ rxrpc_serial_t ping_serial;
+ ktime_t ping_time;
- /* data packet received beyond the last packet */
- case -EBADMSG:
- goto protocol_error;
- }
+ ping_time = call->ackr_ping_time;
+ smp_rmb();
+ ping_serial = call->ackr_ping;
- case RXRPC_PACKET_TYPE_ACKALL:
- case RXRPC_PACKET_TYPE_ACK:
- /* ACK processing is done in process context */
- read_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_DEAD) {
- skb_queue_tail(&call->rx_queue, skb);
- rxrpc_queue_call(call);
- skb = NULL;
- }
- read_unlock_bh(&call->state_lock);
- goto free_packet;
+ if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
+ before(orig_serial, ping_serial))
+ return;
+ clear_bit(RXRPC_CALL_PINGING, &call->flags);
+ if (after(orig_serial, ping_serial))
+ return;
+
+ rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
+ orig_serial, ack_serial, ping_time, resp_time);
+}
+
+/*
+ * Process the extra information that may be appended to an ACK packet
+ */
+static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
+ struct rxrpc_ackinfo *ackinfo)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct rxrpc_peer *peer;
+ unsigned int mtu;
+ u32 rwind = ntohl(ackinfo->rwind);
+
+ _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
+ sp->hdr.serial,
+ ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
+ rwind, ntohl(ackinfo->jumbo_max));
+
+ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+ rwind = RXRPC_RXTX_BUFF_SIZE - 1;
+ call->tx_winsize = rwind;
+ if (call->cong_ssthresh > rwind)
+ call->cong_ssthresh = rwind;
+
+ mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
+
+ peer = call->peer;
+ if (mtu < peer->maxdata) {
+ spin_lock_bh(&peer->lock);
+ peer->maxdata = mtu;
+ peer->mtu = mtu + peer->hdrsize;
+ spin_unlock_bh(&peer->lock);
+ _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
}
+}
-protocol_error:
- _debug("protocol error");
- write_lock_bh(&call->state_lock);
-protocol_error_locked:
- if (call->state <= RXRPC_CALL_COMPLETE) {
- call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->local_abort = RX_PROTOCOL_ERROR;
- set_bit(RXRPC_CALL_EV_ABORT, &call->events);
- rxrpc_queue_call(call);
+/*
+ * Process individual soft ACKs.
+ *
+ * Each ACK in the array corresponds to one packet and can be either an ACK or
+ * a NAK. If we get find an explicitly NAK'd packet we resend immediately;
+ * packets that lie beyond the end of the ACK list are scheduled for resend by
+ * the timer on the basis that the peer might just not have processed them at
+ * the time the ACK was sent.
+ */
+static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
+ rxrpc_seq_t seq, int nr_acks,
+ struct rxrpc_ack_summary *summary)
+{
+ int ix;
+ u8 annotation, anno_type;
+
+ for (; nr_acks > 0; nr_acks--, seq++) {
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ annotation = call->rxtx_annotations[ix];
+ anno_type = annotation & RXRPC_TX_ANNO_MASK;
+ annotation &= ~RXRPC_TX_ANNO_MASK;
+ switch (*acks++) {
+ case RXRPC_ACK_TYPE_ACK:
+ summary->nr_acks++;
+ if (anno_type == RXRPC_TX_ANNO_ACK)
+ continue;
+ summary->nr_new_acks++;
+ call->rxtx_annotations[ix] =
+ RXRPC_TX_ANNO_ACK | annotation;
+ break;
+ case RXRPC_ACK_TYPE_NACK:
+ if (!summary->nr_nacks &&
+ call->acks_lowest_nak != seq) {
+ call->acks_lowest_nak = seq;
+ summary->new_low_nack = true;
+ }
+ summary->nr_nacks++;
+ if (anno_type == RXRPC_TX_ANNO_NAK)
+ continue;
+ summary->nr_new_nacks++;
+ if (anno_type == RXRPC_TX_ANNO_RETRANS)
+ continue;
+ call->rxtx_annotations[ix] =
+ RXRPC_TX_ANNO_NAK | annotation;
+ break;
+ default:
+ return rxrpc_proto_abort("SFT", call, 0);
+ }
}
-free_packet_unlock:
- write_unlock_bh(&call->state_lock);
-free_packet:
- rxrpc_free_skb(skb);
-done:
- _leave("");
}
/*
- * split up a jumbo data packet
+ * Process an ACK packet.
+ *
+ * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
+ * in the ACK array. Anything before that is hard-ACK'd and may be discarded.
+ *
+ * A hard-ACK means that a packet has been processed and may be discarded; a
+ * soft-ACK means that the packet may be discarded and retransmission
+ * requested. A phase is complete when all packets are hard-ACK'd.
*/
-static void rxrpc_process_jumbo_packet(struct rxrpc_call *call,
- struct sk_buff *jumbo)
+static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
+ u16 skew)
{
- struct rxrpc_jumbo_header jhdr;
- struct rxrpc_skb_priv *sp;
- struct sk_buff *part;
+ struct rxrpc_ack_summary summary = { 0 };
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ union {
+ struct rxrpc_ackpacket ack;
+ struct rxrpc_ackinfo info;
+ u8 acks[RXRPC_MAXACKS];
+ } buf;
+ rxrpc_serial_t acked_serial;
+ rxrpc_seq_t first_soft_ack, hard_ack;
+ int nr_acks, offset, ioffset;
+
+ _enter("");
+
+ offset = sizeof(struct rxrpc_wire_header);
+ if (skb_copy_bits(skb, offset, &buf.ack, sizeof(buf.ack)) < 0) {
+ _debug("extraction failure");
+ return rxrpc_proto_abort("XAK", call, 0);
+ }
+ offset += sizeof(buf.ack);
+
+ acked_serial = ntohl(buf.ack.serial);
+ first_soft_ack = ntohl(buf.ack.firstPacket);
+ hard_ack = first_soft_ack - 1;
+ nr_acks = buf.ack.nAcks;
+ summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
+ buf.ack.reason : RXRPC_ACK__INVALID);
+
+ trace_rxrpc_rx_ack(call, first_soft_ack, summary.ack_reason, nr_acks);
+
+ _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+ sp->hdr.serial,
+ ntohs(buf.ack.maxSkew),
+ first_soft_ack,
+ ntohl(buf.ack.previousPacket),
+ acked_serial,
+ rxrpc_ack_names[summary.ack_reason],
+ buf.ack.nAcks);
+
+ if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
+ rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
+ sp->hdr.serial);
+ if (buf.ack.reason == RXRPC_ACK_REQUESTED)
+ rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
+ sp->hdr.serial);
+
+ if (buf.ack.reason == RXRPC_ACK_PING) {
+ _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
+ skew, sp->hdr.serial, true, true,
+ rxrpc_propose_ack_respond_to_ping);
+ } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
+ rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
+ skew, sp->hdr.serial, true, true,
+ rxrpc_propose_ack_respond_to_ack);
+ }
- _enter(",{%u,%u}", jumbo->data_len, jumbo->len);
+ ioffset = offset + nr_acks + 3;
+ if (skb->len >= ioffset + sizeof(buf.info)) {
+ if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
+ return rxrpc_proto_abort("XAI", call, 0);
+ rxrpc_input_ackinfo(call, skb, &buf.info);
+ }
- sp = rxrpc_skb(jumbo);
+ if (first_soft_ack == 0)
+ return rxrpc_proto_abort("AK0", call, 0);
- do {
- sp->hdr.flags &= ~RXRPC_JUMBO_PACKET;
-
- /* make a clone to represent the first subpacket in what's left
- * of the jumbo packet */
- part = skb_clone(jumbo, GFP_ATOMIC);
- if (!part) {
- /* simply ditch the tail in the event of ENOMEM */
- pskb_trim(jumbo, RXRPC_JUMBO_DATALEN);
- break;
- }
- rxrpc_new_skb(part);
+ /* Ignore ACKs unless we are or have just been transmitting. */
+ switch (call->state) {
+ case RXRPC_CALL_CLIENT_SEND_REQUEST:
+ case RXRPC_CALL_CLIENT_AWAIT_REPLY:
+ case RXRPC_CALL_SERVER_SEND_REPLY:
+ case RXRPC_CALL_SERVER_AWAIT_ACK:
+ break;
+ default:
+ return;
+ }
- pskb_trim(part, RXRPC_JUMBO_DATALEN);
+ /* Discard any out-of-order or duplicate ACKs. */
+ if (before_eq(sp->hdr.serial, call->acks_latest)) {
+ _debug("discard ACK %d <= %d",
+ sp->hdr.serial, call->acks_latest);
+ return;
+ }
+ call->acks_latest_ts = skb->tstamp;
+ call->acks_latest = sp->hdr.serial;
+
+ if (before(hard_ack, call->tx_hard_ack) ||
+ after(hard_ack, call->tx_top))
+ return rxrpc_proto_abort("AKW", call, 0);
+ if (nr_acks > call->tx_top - hard_ack)
+ return rxrpc_proto_abort("AKN", call, 0);
+
+ if (after(hard_ack, call->tx_hard_ack))
+ rxrpc_rotate_tx_window(call, hard_ack, &summary);
+
+ if (nr_acks > 0) {
+ if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
+ return rxrpc_proto_abort("XSA", call, 0);
+ rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
+ &summary);
+ }
- if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN))
- goto protocol_error;
+ if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
+ rxrpc_end_tx_phase(call, false, "ETA");
+ return;
+ }
- if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0)
- goto protocol_error;
- if (!pskb_pull(jumbo, sizeof(jhdr)))
- BUG();
+ if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
+ RXRPC_TX_ANNO_LAST &&
+ summary.nr_acks == call->tx_top - hard_ack)
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
+ false, true,
+ rxrpc_propose_ack_ping_for_lost_reply);
- sp->hdr.seq += 1;
- sp->hdr.serial += 1;
- sp->hdr.flags = jhdr.flags;
- sp->hdr._rsvd = ntohs(jhdr._rsvd);
+ return rxrpc_congestion_management(call, skb, &summary, acked_serial);
+}
- _proto("Rx DATA Jumbo %%%u", sp->hdr.serial - 1);
+/*
+ * Process an ACKALL packet.
+ */
+static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
+{
+ struct rxrpc_ack_summary summary = { 0 };
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- rxrpc_fast_process_packet(call, part);
- part = NULL;
+ _proto("Rx ACKALL %%%u", sp->hdr.serial);
- } while (sp->hdr.flags & RXRPC_JUMBO_PACKET);
+ rxrpc_rotate_tx_window(call, call->tx_top, &summary);
+ if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+ rxrpc_end_tx_phase(call, false, "ETL");
+}
- rxrpc_fast_process_packet(call, jumbo);
- _leave("");
- return;
+/*
+ * Process an ABORT packet.
+ */
+static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ __be32 wtmp;
+ u32 abort_code = RX_CALL_DEAD;
-protocol_error:
- _debug("protocol error");
- rxrpc_free_skb(part);
- rxrpc_free_skb(jumbo);
- write_lock_bh(&call->state_lock);
- if (call->state <= RXRPC_CALL_COMPLETE) {
- call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->local_abort = RX_PROTOCOL_ERROR;
- set_bit(RXRPC_CALL_EV_ABORT, &call->events);
- rxrpc_queue_call(call);
- }
- write_unlock_bh(&call->state_lock);
- _leave("");
+ _enter("");
+
+ if (skb->len >= 4 &&
+ skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+ &wtmp, sizeof(wtmp)) >= 0)
+ abort_code = ntohl(wtmp);
+
+ _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
+
+ if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
+ abort_code, ECONNABORTED))
+ rxrpc_notify_socket(call);
}
/*
- * post an incoming packet to the appropriate call/socket to deal with
- * - must get rid of the sk_buff, either by freeing it or by queuing it
+ * Process an incoming call packet.
*/
-static void rxrpc_post_packet_to_call(struct rxrpc_call *call,
- struct sk_buff *skb)
+static void rxrpc_input_call_packet(struct rxrpc_call *call,
+ struct sk_buff *skb, u16 skew)
{
- struct rxrpc_skb_priv *sp;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
_enter("%p,%p", call, skb);
- sp = rxrpc_skb(skb);
+ switch (sp->hdr.type) {
+ case RXRPC_PACKET_TYPE_DATA:
+ rxrpc_input_data(call, skb, skew);
+ break;
- _debug("extant call [%d]", call->state);
+ case RXRPC_PACKET_TYPE_ACK:
+ rxrpc_input_ack(call, skb, skew);
+ break;
+
+ case RXRPC_PACKET_TYPE_BUSY:
+ _proto("Rx BUSY %%%u", sp->hdr.serial);
+
+ /* Just ignore BUSY packets from the server; the retry and
+ * lifespan timers will take care of business. BUSY packets
+ * from the client don't make sense.
+ */
+ break;
+
+ case RXRPC_PACKET_TYPE_ABORT:
+ rxrpc_input_abort(call, skb);
+ break;
+
+ case RXRPC_PACKET_TYPE_ACKALL:
+ rxrpc_input_ackall(call, skb);
+ break;
- read_lock(&call->state_lock);
- switch (call->state) {
- case RXRPC_CALL_LOCALLY_ABORTED:
- if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
- rxrpc_queue_call(call);
- goto free_unlock;
- }
- case RXRPC_CALL_REMOTELY_ABORTED:
- case RXRPC_CALL_NETWORK_ERROR:
- case RXRPC_CALL_DEAD:
- goto dead_call;
- case RXRPC_CALL_COMPLETE:
- case RXRPC_CALL_CLIENT_FINAL_ACK:
- /* complete server call */
- if (rxrpc_conn_is_service(call->conn))
- goto dead_call;
- /* resend last packet of a completed call */
- _debug("final ack again");
- rxrpc_get_call(call);
- set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
- rxrpc_queue_call(call);
- goto free_unlock;
default:
+ _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial);
break;
}
- read_unlock(&call->state_lock);
- rxrpc_get_call(call);
-
- if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
- sp->hdr.flags & RXRPC_JUMBO_PACKET)
- rxrpc_process_jumbo_packet(call, skb);
- else
- rxrpc_fast_process_packet(call, skb);
-
- rxrpc_put_call(call);
- goto done;
-
-dead_call:
- if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
- skb->priority = RX_CALL_DEAD;
- rxrpc_reject_packet(call->conn->params.local, skb);
- goto unlock;
- }
-free_unlock:
- rxrpc_free_skb(skb);
-unlock:
- read_unlock(&call->state_lock);
-done:
_leave("");
}
/*
* post connection-level events to the connection
- * - this includes challenges, responses and some aborts
+ * - this includes challenges, responses, some aborts and call terminal packet
+ * retransmission.
*/
static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
struct sk_buff *skb)
@@ -595,6 +965,17 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
}
/*
+ * put a packet up for transport-level abort
+ */
+static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
+{
+ CHECK_SLAB_OKAY(&local->usage);
+
+ skb_queue_tail(&local->reject_queue, skb);
+ rxrpc_queue_local(local);
+}
+
+/*
* Extract the wire header from a packet and translate the byte order.
*/
static noinline
@@ -605,8 +986,6 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
/* dig out the RxRPC connection details */
if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
return -EBADMSG;
- if (!pskb_pull(skb, sizeof(whdr)))
- BUG();
memset(sp, 0, sizeof(*sp));
sp->hdr.epoch = ntohl(whdr.epoch);
@@ -631,19 +1010,22 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
* shut down and the local endpoint from going away, thus sk_user_data will not
* be cleared until this function returns.
*/
-void rxrpc_data_ready(struct sock *sk)
+void rxrpc_data_ready(struct sock *udp_sk)
{
struct rxrpc_connection *conn;
+ struct rxrpc_channel *chan;
+ struct rxrpc_call *call;
struct rxrpc_skb_priv *sp;
- struct rxrpc_local *local = sk->sk_user_data;
+ struct rxrpc_local *local = udp_sk->sk_user_data;
struct sk_buff *skb;
- int ret;
+ unsigned int channel;
+ int ret, skew;
- _enter("%p", sk);
+ _enter("%p", udp_sk);
ASSERT(!irqs_disabled());
- skb = skb_recv_datagram(sk, 0, 1, &ret);
+ skb = skb_recv_datagram(udp_sk, 0, 1, &ret);
if (!skb) {
if (ret == -EAGAIN)
return;
@@ -651,13 +1033,13 @@ void rxrpc_data_ready(struct sock *sk)
return;
}
- rxrpc_new_skb(skb);
+ rxrpc_new_skb(skb, rxrpc_skb_rx_received);
_net("recv skb %p", skb);
/* we'll probably need to checksum it (didn't call sock_recvmsg) */
if (skb_checksum_complete(skb)) {
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
__UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
_leave(" [CSUM failed]");
return;
@@ -671,13 +1053,21 @@ void rxrpc_data_ready(struct sock *sk)
skb_orphan(skb);
sp = rxrpc_skb(skb);
- _net("Rx UDP packet from %08x:%04hu",
- ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source));
-
/* dig out the RxRPC connection details */
if (rxrpc_extract_header(sp, skb) < 0)
goto bad_message;
+ if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
+ static int lose;
+ if ((lose++ & 7) == 7) {
+ trace_rxrpc_rx_lose(sp);
+ rxrpc_lose_skb(skb, rxrpc_skb_rx_lost);
+ return;
+ }
+ }
+
+ trace_rxrpc_rx_packet(sp);
+
_net("Rx RxRPC %s ep=%x call=%x:%x",
sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
@@ -688,70 +1078,125 @@ void rxrpc_data_ready(struct sock *sk)
goto bad_message;
}
- if (sp->hdr.type == RXRPC_PACKET_TYPE_VERSION) {
+ switch (sp->hdr.type) {
+ case RXRPC_PACKET_TYPE_VERSION:
rxrpc_post_packet_to_local(local, skb);
goto out;
- }
- if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
- (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
- goto bad_message;
+ case RXRPC_PACKET_TYPE_BUSY:
+ if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
+ goto discard;
+
+ case RXRPC_PACKET_TYPE_DATA:
+ if (sp->hdr.callNumber == 0)
+ goto bad_message;
+ if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
+ !rxrpc_validate_jumbo(skb))
+ goto bad_message;
+ break;
+ }
rcu_read_lock();
conn = rxrpc_find_connection_rcu(local, skb);
- if (!conn)
- goto cant_route_call;
+ if (conn) {
+ if (sp->hdr.securityIndex != conn->security_ix)
+ goto wrong_security;
+
+ if (sp->hdr.callNumber == 0) {
+ /* Connection-level packet */
+ _debug("CONN %p {%d}", conn, conn->debug_id);
+ rxrpc_post_packet_to_conn(conn, skb);
+ goto out_unlock;
+ }
+
+ /* Note the serial number skew here */
+ skew = (int)sp->hdr.serial - (int)conn->hi_serial;
+ if (skew >= 0) {
+ if (skew > 0)
+ conn->hi_serial = sp->hdr.serial;
+ } else {
+ skew = -skew;
+ skew = min(skew, 65535);
+ }
- if (sp->hdr.callNumber == 0) {
- /* Connection-level packet */
- _debug("CONN %p {%d}", conn, conn->debug_id);
- rxrpc_post_packet_to_conn(conn, skb);
- } else {
/* Call-bound packets are routed by connection channel. */
- unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK;
- struct rxrpc_channel *chan = &conn->channels[channel];
- struct rxrpc_call *call = rcu_dereference(chan->call);
+ channel = sp->hdr.cid & RXRPC_CHANNELMASK;
+ chan = &conn->channels[channel];
+
+ /* Ignore really old calls */
+ if (sp->hdr.callNumber < chan->last_call)
+ goto discard_unlock;
+
+ if (sp->hdr.callNumber == chan->last_call) {
+ /* For the previous service call, if completed successfully, we
+ * discard all further packets.
+ */
+ if (rxrpc_conn_is_service(conn) &&
+ (chan->last_type == RXRPC_PACKET_TYPE_ACK ||
+ sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
+ goto discard_unlock;
+
+ /* But otherwise we need to retransmit the final packet from
+ * data cached in the connection record.
+ */
+ rxrpc_post_packet_to_conn(conn, skb);
+ goto out_unlock;
+ }
- if (!call || atomic_read(&call->usage) == 0)
- goto cant_route_call;
+ call = rcu_dereference(chan->call);
+ } else {
+ skew = 0;
+ call = NULL;
+ }
- rxrpc_post_packet_to_call(call, skb);
+ if (!call || atomic_read(&call->usage) == 0) {
+ if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
+ sp->hdr.callNumber == 0 ||
+ sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
+ goto bad_message_unlock;
+ if (sp->hdr.seq != 1)
+ goto discard_unlock;
+ call = rxrpc_new_incoming_call(local, conn, skb);
+ if (!call) {
+ rcu_read_unlock();
+ goto reject_packet;
+ }
+ rxrpc_send_ping(call, skb, skew);
}
+ rxrpc_input_call_packet(call, skb, skew);
+ goto discard_unlock;
+
+discard_unlock:
rcu_read_unlock();
+discard:
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
out:
+ trace_rxrpc_rx_done(0, 0);
return;
-cant_route_call:
+out_unlock:
rcu_read_unlock();
+ goto out;
- _debug("can't route call");
- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
- sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
- if (sp->hdr.seq == 1) {
- _debug("first packet");
- skb_queue_tail(&local->accept_queue, skb);
- rxrpc_queue_work(&local->processor);
- _leave(" [incoming]");
- return;
- }
- skb->priority = RX_INVALID_OPERATION;
- } else {
- skb->priority = RX_CALL_DEAD;
- }
-
- if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
- _debug("reject type %d",sp->hdr.type);
- rxrpc_reject_packet(local, skb);
- } else {
- rxrpc_free_skb(skb);
- }
- _leave(" [no call]");
- return;
+wrong_security:
+ rcu_read_unlock();
+ trace_rxrpc_abort("SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RXKADINCONSISTENCY, EBADMSG);
+ skb->priority = RXKADINCONSISTENCY;
+ goto post_abort;
+bad_message_unlock:
+ rcu_read_unlock();
bad_message:
+ trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RX_PROTOCOL_ERROR, EBADMSG);
skb->priority = RX_PROTOCOL_ERROR;
+post_abort:
+ skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+reject_packet:
+ trace_rxrpc_rx_done(skb->mark, skb->priority);
rxrpc_reject_packet(local, skb);
_leave(" [badmsg]");
}
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
index c21ad213b337..7d4375e557e6 100644
--- a/net/rxrpc/insecure.c
+++ b/net/rxrpc/insecure.c
@@ -23,31 +23,36 @@ static int none_prime_packet_security(struct rxrpc_connection *conn)
}
static int none_secure_packet(struct rxrpc_call *call,
- struct sk_buff *skb,
- size_t data_size,
- void *sechdr)
+ struct sk_buff *skb,
+ size_t data_size,
+ void *sechdr)
{
return 0;
}
-static int none_verify_packet(struct rxrpc_call *call,
- struct sk_buff *skb,
- u32 *_abort_code)
+static int none_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int offset, unsigned int len,
+ rxrpc_seq_t seq, u16 expected_cksum)
{
return 0;
}
+static void none_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int *_offset, unsigned int *_len)
+{
+}
+
static int none_respond_to_challenge(struct rxrpc_connection *conn,
- struct sk_buff *skb,
- u32 *_abort_code)
+ struct sk_buff *skb,
+ u32 *_abort_code)
{
*_abort_code = RX_PROTOCOL_ERROR;
return -EPROTO;
}
static int none_verify_response(struct rxrpc_connection *conn,
- struct sk_buff *skb,
- u32 *_abort_code)
+ struct sk_buff *skb,
+ u32 *_abort_code)
{
*_abort_code = RX_PROTOCOL_ERROR;
return -EPROTO;
@@ -78,6 +83,7 @@ const struct rxrpc_security rxrpc_no_security = {
.prime_packet_security = none_prime_packet_security,
.secure_packet = none_secure_packet,
.verify_packet = none_verify_packet,
+ .locate_data = none_locate_data,
.respond_to_challenge = none_respond_to_challenge,
.verify_response = none_verify_response,
.clear = none_clear,
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c
index 31a3f86ef2f6..540d3955c1bc 100644
--- a/net/rxrpc/local_event.c
+++ b/net/rxrpc/local_event.c
@@ -15,8 +15,6 @@
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
-#include <linux/udp.h>
-#include <linux/ip.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <generated/utsrelease.h>
@@ -33,7 +31,7 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
{
struct rxrpc_wire_header whdr;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct sockaddr_in sin;
+ struct sockaddr_rxrpc srx;
struct msghdr msg;
struct kvec iov[2];
size_t len;
@@ -41,12 +39,11 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
_enter("");
- sin.sin_family = AF_INET;
- sin.sin_port = udp_hdr(skb)->source;
- sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+ if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
+ return;
- msg.msg_name = &sin;
- msg.msg_namelen = sizeof(sin);
+ msg.msg_name = &srx.transport;
+ msg.msg_namelen = srx.transport_len;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -93,11 +90,13 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
if (skb) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
_debug("{%d},{%u}", local->debug_id, sp->hdr.type);
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_VERSION:
- if (skb_copy_bits(skb, 0, &v, 1) < 0)
+ if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+ &v, 1) < 0)
return;
_proto("Rx VERSION { %02x }", v);
if (v == 0)
@@ -109,7 +108,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
break;
}
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
}
_leave("");
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index a753796fbe8f..ff4864d550b8 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -58,6 +58,17 @@ static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
memcmp(&local->srx.transport.sin.sin_addr,
&srx->transport.sin.sin_addr,
sizeof(struct in_addr));
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ /* If the choice of UDP6 port is left up to the transport, then
+ * the endpoint record doesn't match.
+ */
+ return ((u16 __force)local->srx.transport.sin6.sin6_port -
+ (u16 __force)srx->transport.sin6.sin6_port) ?:
+ memcmp(&local->srx.transport.sin6.sin6_addr,
+ &srx->transport.sin6.sin6_addr,
+ sizeof(struct in6_addr));
+#endif
default:
BUG();
}
@@ -75,9 +86,7 @@ static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx)
atomic_set(&local->usage, 1);
INIT_LIST_HEAD(&local->link);
INIT_WORK(&local->processor, rxrpc_local_processor);
- INIT_LIST_HEAD(&local->services);
init_rwsem(&local->defrag_sem);
- skb_queue_head_init(&local->accept_queue);
skb_queue_head_init(&local->reject_queue);
skb_queue_head_init(&local->event_queue);
local->client_conns = RB_ROOT;
@@ -101,11 +110,12 @@ static int rxrpc_open_socket(struct rxrpc_local *local)
struct sock *sock;
int ret, opt;
- _enter("%p{%d}", local, local->srx.transport_type);
+ _enter("%p{%d,%d}",
+ local, local->srx.transport_type, local->srx.transport.family);
/* create a socket to represent the local endpoint */
- ret = sock_create_kern(&init_net, PF_INET, local->srx.transport_type,
- IPPROTO_UDP, &local->socket);
+ ret = sock_create_kern(&init_net, local->srx.transport.family,
+ local->srx.transport_type, 0, &local->socket);
if (ret < 0) {
_leave(" = %d [socket]", ret);
return ret;
@@ -170,18 +180,8 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
long diff;
int ret;
- if (srx->transport.family == AF_INET) {
- _enter("{%d,%u,%pI4+%hu}",
- srx->transport_type,
- srx->transport.family,
- &srx->transport.sin.sin_addr,
- ntohs(srx->transport.sin.sin_port));
- } else {
- _enter("{%d,%u}",
- srx->transport_type,
- srx->transport.family);
- return ERR_PTR(-EAFNOSUPPORT);
- }
+ _enter("{%d,%d,%pISp}",
+ srx->transport_type, srx->transport.family, &srx->transport);
mutex_lock(&rxrpc_local_mutex);
@@ -234,13 +234,8 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
found:
mutex_unlock(&rxrpc_local_mutex);
- _net("LOCAL %s %d {%d,%u,%pI4+%hu}",
- age,
- local->debug_id,
- local->srx.transport_type,
- local->srx.transport.family,
- &local->srx.transport.sin.sin_addr,
- ntohs(local->srx.transport.sin.sin_port));
+ _net("LOCAL %s %d {%pISp}",
+ age, local->debug_id, &local->srx.transport);
_leave(" = %p", local);
return local;
@@ -296,7 +291,7 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
mutex_unlock(&rxrpc_local_mutex);
ASSERT(RB_EMPTY_ROOT(&local->client_conns));
- ASSERT(list_empty(&local->services));
+ ASSERT(!local->service);
if (socket) {
local->socket = NULL;
@@ -308,7 +303,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
/* At this point, there should be no more packets coming in to the
* local endpoint.
*/
- rxrpc_purge_queue(&local->accept_queue);
rxrpc_purge_queue(&local->reject_queue);
rxrpc_purge_queue(&local->event_queue);
@@ -332,11 +326,6 @@ static void rxrpc_local_processor(struct work_struct *work)
if (atomic_read(&local->usage) == 0)
return rxrpc_local_destroyer(local);
- if (!skb_queue_empty(&local->accept_queue)) {
- rxrpc_accept_incoming_calls(local);
- again = true;
- }
-
if (!skb_queue_empty(&local->reject_queue)) {
rxrpc_reject_packets(local);
again = true;
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index bdc5e42fe600..9d1c721bc4e8 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -21,28 +21,33 @@
unsigned int rxrpc_max_backlog __read_mostly = 10;
/*
+ * Maximum lifetime of a call (in mx).
+ */
+unsigned int rxrpc_max_call_lifetime = 60 * 1000;
+
+/*
* How long to wait before scheduling ACK generation after seeing a
- * packet with RXRPC_REQUEST_ACK set (in jiffies).
+ * packet with RXRPC_REQUEST_ACK set (in ms).
*/
unsigned int rxrpc_requested_ack_delay = 1;
/*
- * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
+ * How long to wait before scheduling an ACK with subtype DELAY (in ms).
*
* We use this when we've received new data packets. If those packets aren't
* all consumed within this time we will send a DELAY ACK if an ACK was not
* requested to let the sender know it doesn't need to resend.
*/
-unsigned int rxrpc_soft_ack_delay = 1 * HZ;
+unsigned int rxrpc_soft_ack_delay = 1 * 1000;
/*
- * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
+ * How long to wait before scheduling an ACK with subtype IDLE (in ms).
*
* We use this when we've consumed some previously soft-ACK'd packets when
* further packets aren't immediately received to decide when to send an IDLE
* ACK let the other end know that it can free up its Tx buffer space.
*/
-unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
+unsigned int rxrpc_idle_ack_delay = 0.5 * 1000;
/*
* Receive window size in packets. This indicates the maximum number of
@@ -50,7 +55,10 @@ unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
* limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
* packets.
*/
-unsigned int rxrpc_rx_window_size = 32;
+unsigned int rxrpc_rx_window_size = RXRPC_INIT_RX_WINDOW_SIZE;
+#if (RXRPC_RXTX_BUFF_SIZE - 1) < RXRPC_INIT_RX_WINDOW_SIZE
+#error Need to reduce RXRPC_INIT_RX_WINDOW_SIZE
+#endif
/*
* Maximum Rx MTU size. This indicates to the sender the size of jumbo packet
@@ -64,6 +72,11 @@ unsigned int rxrpc_rx_mtu = 5692;
*/
unsigned int rxrpc_rx_jumbo_max = 4;
+/*
+ * Time till packet resend (in milliseconds).
+ */
+unsigned int rxrpc_resend_timeout = 4 * 1000;
+
const char *const rxrpc_pkts[] = {
"?00",
"DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
@@ -75,21 +88,152 @@ const s8 rxrpc_ack_priority[] = {
[RXRPC_ACK_DELAY] = 1,
[RXRPC_ACK_REQUESTED] = 2,
[RXRPC_ACK_IDLE] = 3,
- [RXRPC_ACK_PING_RESPONSE] = 4,
- [RXRPC_ACK_DUPLICATE] = 5,
- [RXRPC_ACK_OUT_OF_SEQUENCE] = 6,
- [RXRPC_ACK_EXCEEDS_WINDOW] = 7,
- [RXRPC_ACK_NOSPACE] = 8,
-};
-
-const char *rxrpc_acks(u8 reason)
-{
- static const char *const str[] = {
- "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
- "IDL", "-?-"
- };
-
- if (reason >= ARRAY_SIZE(str))
- reason = ARRAY_SIZE(str) - 1;
- return str[reason];
-}
+ [RXRPC_ACK_DUPLICATE] = 4,
+ [RXRPC_ACK_OUT_OF_SEQUENCE] = 5,
+ [RXRPC_ACK_EXCEEDS_WINDOW] = 6,
+ [RXRPC_ACK_NOSPACE] = 7,
+ [RXRPC_ACK_PING_RESPONSE] = 8,
+ [RXRPC_ACK_PING] = 9,
+};
+
+const char const rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4] = {
+ "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
+ "IDL", "-?-"
+};
+
+const char rxrpc_skb_traces[rxrpc_skb__nr_trace][7] = {
+ [rxrpc_skb_rx_cleaned] = "Rx CLN",
+ [rxrpc_skb_rx_freed] = "Rx FRE",
+ [rxrpc_skb_rx_got] = "Rx GOT",
+ [rxrpc_skb_rx_lost] = "Rx *L*",
+ [rxrpc_skb_rx_received] = "Rx RCV",
+ [rxrpc_skb_rx_purged] = "Rx PUR",
+ [rxrpc_skb_rx_rotated] = "Rx ROT",
+ [rxrpc_skb_rx_seen] = "Rx SEE",
+ [rxrpc_skb_tx_cleaned] = "Tx CLN",
+ [rxrpc_skb_tx_freed] = "Tx FRE",
+ [rxrpc_skb_tx_got] = "Tx GOT",
+ [rxrpc_skb_tx_new] = "Tx NEW",
+ [rxrpc_skb_tx_rotated] = "Tx ROT",
+ [rxrpc_skb_tx_seen] = "Tx SEE",
+};
+
+const char rxrpc_conn_traces[rxrpc_conn__nr_trace][4] = {
+ [rxrpc_conn_new_client] = "NWc",
+ [rxrpc_conn_new_service] = "NWs",
+ [rxrpc_conn_queued] = "QUE",
+ [rxrpc_conn_seen] = "SEE",
+ [rxrpc_conn_got] = "GOT",
+ [rxrpc_conn_put_client] = "PTc",
+ [rxrpc_conn_put_service] = "PTs",
+};
+
+const char rxrpc_client_traces[rxrpc_client__nr_trace][7] = {
+ [rxrpc_client_activate_chans] = "Activa",
+ [rxrpc_client_alloc] = "Alloc ",
+ [rxrpc_client_chan_activate] = "ChActv",
+ [rxrpc_client_chan_disconnect] = "ChDisc",
+ [rxrpc_client_chan_pass] = "ChPass",
+ [rxrpc_client_chan_unstarted] = "ChUnst",
+ [rxrpc_client_cleanup] = "Clean ",
+ [rxrpc_client_count] = "Count ",
+ [rxrpc_client_discard] = "Discar",
+ [rxrpc_client_duplicate] = "Duplic",
+ [rxrpc_client_exposed] = "Expose",
+ [rxrpc_client_replace] = "Replac",
+ [rxrpc_client_to_active] = "->Actv",
+ [rxrpc_client_to_culled] = "->Cull",
+ [rxrpc_client_to_idle] = "->Idle",
+ [rxrpc_client_to_inactive] = "->Inac",
+ [rxrpc_client_to_waiting] = "->Wait",
+ [rxrpc_client_uncount] = "Uncoun",
+};
+
+const char rxrpc_transmit_traces[rxrpc_transmit__nr_trace][4] = {
+ [rxrpc_transmit_wait] = "WAI",
+ [rxrpc_transmit_queue] = "QUE",
+ [rxrpc_transmit_queue_last] = "QLS",
+ [rxrpc_transmit_rotate] = "ROT",
+ [rxrpc_transmit_rotate_last] = "RLS",
+ [rxrpc_transmit_await_reply] = "AWR",
+ [rxrpc_transmit_end] = "END",
+};
+
+const char rxrpc_receive_traces[rxrpc_receive__nr_trace][4] = {
+ [rxrpc_receive_incoming] = "INC",
+ [rxrpc_receive_queue] = "QUE",
+ [rxrpc_receive_queue_last] = "QLS",
+ [rxrpc_receive_front] = "FRN",
+ [rxrpc_receive_rotate] = "ROT",
+ [rxrpc_receive_end] = "END",
+};
+
+const char rxrpc_recvmsg_traces[rxrpc_recvmsg__nr_trace][5] = {
+ [rxrpc_recvmsg_enter] = "ENTR",
+ [rxrpc_recvmsg_wait] = "WAIT",
+ [rxrpc_recvmsg_dequeue] = "DEQU",
+ [rxrpc_recvmsg_hole] = "HOLE",
+ [rxrpc_recvmsg_next] = "NEXT",
+ [rxrpc_recvmsg_cont] = "CONT",
+ [rxrpc_recvmsg_full] = "FULL",
+ [rxrpc_recvmsg_data_return] = "DATA",
+ [rxrpc_recvmsg_terminal] = "TERM",
+ [rxrpc_recvmsg_to_be_accepted] = "TBAC",
+ [rxrpc_recvmsg_return] = "RETN",
+};
+
+const char rxrpc_rtt_tx_traces[rxrpc_rtt_tx__nr_trace][5] = {
+ [rxrpc_rtt_tx_ping] = "PING",
+ [rxrpc_rtt_tx_data] = "DATA",
+};
+
+const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5] = {
+ [rxrpc_rtt_rx_ping_response] = "PONG",
+ [rxrpc_rtt_rx_requested_ack] = "RACK",
+};
+
+const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8] = {
+ [rxrpc_timer_begin] = "Begin ",
+ [rxrpc_timer_expired] = "*EXPR*",
+ [rxrpc_timer_init_for_reply] = "IniRpl",
+ [rxrpc_timer_set_for_ack] = "SetAck",
+ [rxrpc_timer_set_for_send] = "SetTx ",
+ [rxrpc_timer_set_for_resend] = "SetRTx",
+};
+
+const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8] = {
+ [rxrpc_propose_ack_client_tx_end] = "ClTxEnd",
+ [rxrpc_propose_ack_input_data] = "DataIn ",
+ [rxrpc_propose_ack_ping_for_lost_ack] = "LostAck",
+ [rxrpc_propose_ack_ping_for_lost_reply] = "LostRpl",
+ [rxrpc_propose_ack_ping_for_params] = "Params ",
+ [rxrpc_propose_ack_respond_to_ack] = "Rsp2Ack",
+ [rxrpc_propose_ack_respond_to_ping] = "Rsp2Png",
+ [rxrpc_propose_ack_retry_tx] = "RetryTx",
+ [rxrpc_propose_ack_rotate_rx] = "RxAck ",
+ [rxrpc_propose_ack_terminal_ack] = "ClTerm ",
+};
+
+const char *const rxrpc_propose_ack_outcomes[rxrpc_propose_ack__nr_outcomes] = {
+ [rxrpc_propose_ack_use] = "",
+ [rxrpc_propose_ack_update] = " Update",
+ [rxrpc_propose_ack_subsume] = " Subsume",
+};
+
+const char rxrpc_congest_modes[NR__RXRPC_CONGEST_MODES][10] = {
+ [RXRPC_CALL_SLOW_START] = "SlowStart",
+ [RXRPC_CALL_CONGEST_AVOIDANCE] = "CongAvoid",
+ [RXRPC_CALL_PACKET_LOSS] = "PktLoss ",
+ [RXRPC_CALL_FAST_RETRANSMIT] = "FastReTx ",
+};
+
+const char rxrpc_congest_changes[rxrpc_congest__nr_change][9] = {
+ [rxrpc_cong_begin_retransmission] = " Retrans",
+ [rxrpc_cong_cleared_nacks] = " Cleared",
+ [rxrpc_cong_new_low_nack] = " NewLowN",
+ [rxrpc_cong_no_change] = "",
+ [rxrpc_cong_progress] = " Progres",
+ [rxrpc_cong_retransmit_again] = " ReTxAgn",
+ [rxrpc_cong_rtt_window_end] = " RttWinE",
+ [rxrpc_cong_saw_nack] = " SawNack",
+};
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index f4bda06b7d2d..0d47db886f6e 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -14,336 +14,326 @@
#include <linux/net.h>
#include <linux/gfp.h>
#include <linux/skbuff.h>
-#include <linux/circ_buf.h>
#include <linux/export.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
-/*
- * Time till packet resend (in jiffies).
- */
-unsigned int rxrpc_resend_timeout = 4 * HZ;
-
-static int rxrpc_send_data(struct rxrpc_sock *rx,
- struct rxrpc_call *call,
- struct msghdr *msg, size_t len);
+struct rxrpc_pkt_buffer {
+ struct rxrpc_wire_header whdr;
+ union {
+ struct {
+ struct rxrpc_ackpacket ack;
+ u8 acks[255];
+ u8 pad[3];
+ };
+ __be32 abort_code;
+ };
+ struct rxrpc_ackinfo ackinfo;
+};
/*
- * extract control messages from the sendmsg() control buffer
+ * Fill out an ACK packet.
*/
-static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
- unsigned long *user_call_ID,
- enum rxrpc_command *command,
- u32 *abort_code,
- bool *_exclusive)
+static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
+ struct rxrpc_pkt_buffer *pkt,
+ rxrpc_seq_t *_hard_ack,
+ rxrpc_seq_t *_top)
{
- struct cmsghdr *cmsg;
- bool got_user_ID = false;
- int len;
-
- *command = RXRPC_CMD_SEND_DATA;
-
- if (msg->msg_controllen == 0)
- return -EINVAL;
-
- for_each_cmsghdr(cmsg, msg) {
- if (!CMSG_OK(msg, cmsg))
- return -EINVAL;
-
- len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
- _debug("CMSG %d, %d, %d",
- cmsg->cmsg_level, cmsg->cmsg_type, len);
-
- if (cmsg->cmsg_level != SOL_RXRPC)
- continue;
-
- switch (cmsg->cmsg_type) {
- case RXRPC_USER_CALL_ID:
- if (msg->msg_flags & MSG_CMSG_COMPAT) {
- if (len != sizeof(u32))
- return -EINVAL;
- *user_call_ID = *(u32 *) CMSG_DATA(cmsg);
- } else {
- if (len != sizeof(unsigned long))
- return -EINVAL;
- *user_call_ID = *(unsigned long *)
- CMSG_DATA(cmsg);
- }
- _debug("User Call ID %lx", *user_call_ID);
- got_user_ID = true;
- break;
-
- case RXRPC_ABORT:
- if (*command != RXRPC_CMD_SEND_DATA)
- return -EINVAL;
- *command = RXRPC_CMD_SEND_ABORT;
- if (len != sizeof(*abort_code))
- return -EINVAL;
- *abort_code = *(unsigned int *) CMSG_DATA(cmsg);
- _debug("Abort %x", *abort_code);
- if (*abort_code == 0)
- return -EINVAL;
- break;
-
- case RXRPC_ACCEPT:
- if (*command != RXRPC_CMD_SEND_DATA)
- return -EINVAL;
- *command = RXRPC_CMD_ACCEPT;
- if (len != 0)
- return -EINVAL;
- break;
-
- case RXRPC_EXCLUSIVE_CALL:
- *_exclusive = true;
- if (len != 0)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
+ rxrpc_serial_t serial;
+ rxrpc_seq_t hard_ack, top, seq;
+ int ix;
+ u32 mtu, jmax;
+ u8 *ackp = pkt->acks;
+
+ /* Barrier against rxrpc_input_data(). */
+ serial = call->ackr_serial;
+ hard_ack = READ_ONCE(call->rx_hard_ack);
+ top = smp_load_acquire(&call->rx_top);
+ *_hard_ack = hard_ack;
+ *_top = top;
+
+ pkt->ack.bufferSpace = htons(8);
+ pkt->ack.maxSkew = htons(call->ackr_skew);
+ pkt->ack.firstPacket = htonl(hard_ack + 1);
+ pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
+ pkt->ack.serial = htonl(serial);
+ pkt->ack.reason = call->ackr_reason;
+ pkt->ack.nAcks = top - hard_ack;
+
+ if (pkt->ack.reason == RXRPC_ACK_PING)
+ pkt->whdr.flags |= RXRPC_REQUEST_ACK;
+
+ if (after(top, hard_ack)) {
+ seq = hard_ack + 1;
+ do {
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ if (call->rxtx_buffer[ix])
+ *ackp++ = RXRPC_ACK_TYPE_ACK;
+ else
+ *ackp++ = RXRPC_ACK_TYPE_NACK;
+ seq++;
+ } while (before_eq(seq, top));
}
- if (!got_user_ID)
- return -EINVAL;
- _leave(" = 0");
- return 0;
+ mtu = call->conn->params.peer->if_mtu;
+ mtu -= call->conn->params.peer->hdrsize;
+ jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
+ pkt->ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
+ pkt->ackinfo.maxMTU = htonl(mtu);
+ pkt->ackinfo.rwind = htonl(call->rx_winsize);
+ pkt->ackinfo.jumbo_max = htonl(jmax);
+
+ *ackp++ = 0;
+ *ackp++ = 0;
+ *ackp++ = 0;
+ return top - hard_ack + 3;
}
/*
- * abort a call, sending an ABORT packet to the peer
+ * Send an ACK or ABORT call packet.
*/
-static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
+int rxrpc_send_call_packet(struct rxrpc_call *call, u8 type)
{
- write_lock_bh(&call->state_lock);
-
- if (call->state <= RXRPC_CALL_COMPLETE) {
- call->state = RXRPC_CALL_LOCALLY_ABORTED;
- call->local_abort = abort_code;
- set_bit(RXRPC_CALL_EV_ABORT, &call->events);
- del_timer_sync(&call->resend_timer);
- del_timer_sync(&call->ack_timer);
- clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
- clear_bit(RXRPC_CALL_EV_ACK, &call->events);
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- rxrpc_queue_call(call);
+ struct rxrpc_connection *conn = NULL;
+ struct rxrpc_pkt_buffer *pkt;
+ struct msghdr msg;
+ struct kvec iov[2];
+ rxrpc_serial_t serial;
+ rxrpc_seq_t hard_ack, top;
+ size_t len, n;
+ bool ping = false;
+ int ioc, ret;
+ u32 abort_code;
+
+ _enter("%u,%s", call->debug_id, rxrpc_pkts[type]);
+
+ spin_lock_bh(&call->lock);
+ if (call->conn)
+ conn = rxrpc_get_connection_maybe(call->conn);
+ spin_unlock_bh(&call->lock);
+ if (!conn)
+ return -ECONNRESET;
+
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt) {
+ rxrpc_put_connection(conn);
+ return -ENOMEM;
}
- write_unlock_bh(&call->state_lock);
-}
-
-/*
- * Create a new client call for sendmsg().
- */
-static struct rxrpc_call *
-rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
- unsigned long user_call_ID, bool exclusive)
-{
- struct rxrpc_conn_parameters cp;
- struct rxrpc_call *call;
- struct key *key;
-
- DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name);
+ msg.msg_name = &call->peer->srx.transport;
+ msg.msg_namelen = call->peer->srx.transport_len;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ pkt->whdr.epoch = htonl(conn->proto.epoch);
+ pkt->whdr.cid = htonl(call->cid);
+ pkt->whdr.callNumber = htonl(call->call_id);
+ pkt->whdr.seq = 0;
+ pkt->whdr.type = type;
+ pkt->whdr.flags = conn->out_clientflag;
+ pkt->whdr.userStatus = 0;
+ pkt->whdr.securityIndex = call->security_ix;
+ pkt->whdr._rsvd = 0;
+ pkt->whdr.serviceId = htons(call->service_id);
+
+ iov[0].iov_base = pkt;
+ iov[0].iov_len = sizeof(pkt->whdr);
+ len = sizeof(pkt->whdr);
+
+ switch (type) {
+ case RXRPC_PACKET_TYPE_ACK:
+ spin_lock_bh(&call->lock);
+ if (!call->ackr_reason) {
+ spin_unlock_bh(&call->lock);
+ ret = 0;
+ goto out;
+ }
+ ping = (call->ackr_reason == RXRPC_ACK_PING);
+ n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top);
+ call->ackr_reason = 0;
- _enter("");
+ spin_unlock_bh(&call->lock);
- if (!msg->msg_name)
- return ERR_PTR(-EDESTADDRREQ);
- key = rx->key;
- if (key && !rx->key->payload.data[0])
- key = NULL;
+ pkt->whdr.flags |= RXRPC_SLOW_START_OK;
- memset(&cp, 0, sizeof(cp));
- cp.local = rx->local;
- cp.key = rx->key;
- cp.security_level = rx->min_sec_level;
- cp.exclusive = rx->exclusive | exclusive;
- cp.service_id = srx->srx_service;
- call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL);
+ iov[0].iov_len += sizeof(pkt->ack) + n;
+ iov[1].iov_base = &pkt->ackinfo;
+ iov[1].iov_len = sizeof(pkt->ackinfo);
+ len += sizeof(pkt->ack) + n + sizeof(pkt->ackinfo);
+ ioc = 2;
+ break;
- _leave(" = %p\n", call);
- return call;
-}
+ case RXRPC_PACKET_TYPE_ABORT:
+ abort_code = call->abort_code;
+ pkt->abort_code = htonl(abort_code);
+ iov[0].iov_len += sizeof(pkt->abort_code);
+ len += sizeof(pkt->abort_code);
+ ioc = 1;
+ break;
-/*
- * send a message forming part of a client call through an RxRPC socket
- * - caller holds the socket locked
- * - the socket may be either a client socket or a server socket
- */
-int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
-{
- enum rxrpc_command cmd;
- struct rxrpc_call *call;
- unsigned long user_call_ID = 0;
- bool exclusive = false;
- u32 abort_code = 0;
- int ret;
-
- _enter("");
-
- ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code,
- &exclusive);
- if (ret < 0)
- return ret;
-
- if (cmd == RXRPC_CMD_ACCEPT) {
- if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
- return -EINVAL;
- call = rxrpc_accept_call(rx, user_call_ID);
- if (IS_ERR(call))
- return PTR_ERR(call);
- rxrpc_put_call(call);
- return 0;
+ default:
+ BUG();
+ ret = -ENOANO;
+ goto out;
}
- call = rxrpc_find_call_by_user_ID(rx, user_call_ID);
- if (!call) {
- if (cmd != RXRPC_CMD_SEND_DATA)
- return -EBADSLT;
- call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID,
- exclusive);
- if (IS_ERR(call))
- return PTR_ERR(call);
+ serial = atomic_inc_return(&conn->serial);
+ pkt->whdr.serial = htonl(serial);
+ switch (type) {
+ case RXRPC_PACKET_TYPE_ACK:
+ trace_rxrpc_tx_ack(call, serial,
+ ntohl(pkt->ack.firstPacket),
+ ntohl(pkt->ack.serial),
+ pkt->ack.reason, pkt->ack.nAcks);
+ break;
}
- _debug("CALL %d USR %lx ST %d on CONN %p",
- call->debug_id, call->user_call_ID, call->state, call->conn);
-
- if (call->state >= RXRPC_CALL_COMPLETE) {
- /* it's too late for this call */
- ret = -ECONNRESET;
- } else if (cmd == RXRPC_CMD_SEND_ABORT) {
- rxrpc_send_abort(call, abort_code);
- ret = 0;
- } else if (cmd != RXRPC_CMD_SEND_DATA) {
- ret = -EINVAL;
- } else if (!call->in_clientflag &&
- call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
- /* request phase complete for this client call */
- ret = -EPROTO;
- } else if (call->in_clientflag &&
- call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
- call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
- /* Reply phase not begun or not complete for service call. */
- ret = -EPROTO;
- } else {
- ret = rxrpc_send_data(rx, call, msg, len);
+ if (ping) {
+ call->ackr_ping = serial;
+ smp_wmb();
+ /* We need to stick a time in before we send the packet in case
+ * the reply gets back before kernel_sendmsg() completes - but
+ * asking UDP to send the packet can take a relatively long
+ * time, so we update the time after, on the assumption that
+ * the packet transmission is more likely to happen towards the
+ * end of the kernel_sendmsg() call.
+ */
+ call->ackr_ping_time = ktime_get_real();
+ set_bit(RXRPC_CALL_PINGING, &call->flags);
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
}
-
- rxrpc_put_call(call);
- _leave(" = %d", ret);
- return ret;
-}
-
-/**
- * rxrpc_kernel_send_data - Allow a kernel service to send data on a call
- * @call: The call to send data through
- * @msg: The data to send
- * @len: The amount of data to send
- *
- * Allow a kernel service to send data on a call. The call must be in an state
- * appropriate to sending data. No control data should be supplied in @msg,
- * nor should an address be supplied. MSG_MORE should be flagged if there's
- * more data to come, otherwise this data will end the transmission phase.
- */
-int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
- size_t len)
-{
- int ret;
-
- _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
-
- ASSERTCMP(msg->msg_name, ==, NULL);
- ASSERTCMP(msg->msg_control, ==, NULL);
-
- lock_sock(&call->socket->sk);
-
- _debug("CALL %d USR %lx ST %d on CONN %p",
- call->debug_id, call->user_call_ID, call->state, call->conn);
-
- if (call->state >= RXRPC_CALL_COMPLETE) {
- ret = -ESHUTDOWN; /* it's too late for this call */
- } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
- call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
- call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
- ret = -EPROTO; /* request phase complete for this client call */
- } else {
- ret = rxrpc_send_data(call->socket, call, msg, len);
+ ret = kernel_sendmsg(conn->params.local->socket,
+ &msg, iov, ioc, len);
+ if (ping)
+ call->ackr_ping_time = ktime_get_real();
+
+ if (type == RXRPC_PACKET_TYPE_ACK &&
+ call->state < RXRPC_CALL_COMPLETE) {
+ if (ret < 0) {
+ clear_bit(RXRPC_CALL_PINGING, &call->flags);
+ rxrpc_propose_ACK(call, pkt->ack.reason,
+ ntohs(pkt->ack.maxSkew),
+ ntohl(pkt->ack.serial),
+ true, true,
+ rxrpc_propose_ack_retry_tx);
+ } else {
+ spin_lock_bh(&call->lock);
+ if (after(hard_ack, call->ackr_consumed))
+ call->ackr_consumed = hard_ack;
+ if (after(top, call->ackr_seen))
+ call->ackr_seen = top;
+ spin_unlock_bh(&call->lock);
+ }
}
- release_sock(&call->socket->sk);
- _leave(" = %d", ret);
+out:
+ rxrpc_put_connection(conn);
+ kfree(pkt);
return ret;
}
-EXPORT_SYMBOL(rxrpc_kernel_send_data);
-
-/**
- * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
- * @call: The call to be aborted
- * @abort_code: The abort code to stick into the ABORT packet
- *
- * Allow a kernel service to abort a call, if it's still in an abortable state.
- */
-void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code)
-{
- _enter("{%d},%d", call->debug_id, abort_code);
-
- lock_sock(&call->socket->sk);
-
- _debug("CALL %d USR %lx ST %d on CONN %p",
- call->debug_id, call->user_call_ID, call->state, call->conn);
-
- if (call->state < RXRPC_CALL_COMPLETE)
- rxrpc_send_abort(call, abort_code);
-
- release_sock(&call->socket->sk);
- _leave("");
-}
-
-EXPORT_SYMBOL(rxrpc_kernel_abort_call);
-
/*
* send a packet through the transport endpoint
*/
-int rxrpc_send_data_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
+int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
+ bool retrans)
{
- struct kvec iov[1];
+ struct rxrpc_connection *conn = call->conn;
+ struct rxrpc_wire_header whdr;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct msghdr msg;
+ struct kvec iov[2];
+ rxrpc_serial_t serial;
+ size_t len;
+ bool lost = false;
int ret, opt;
_enter(",{%d}", skb->len);
- iov[0].iov_base = skb->head;
- iov[0].iov_len = skb->len;
+ /* Each transmission of a Tx packet needs a new serial number */
+ serial = atomic_inc_return(&conn->serial);
+
+ whdr.epoch = htonl(conn->proto.epoch);
+ whdr.cid = htonl(call->cid);
+ whdr.callNumber = htonl(call->call_id);
+ whdr.seq = htonl(sp->hdr.seq);
+ whdr.serial = htonl(serial);
+ whdr.type = RXRPC_PACKET_TYPE_DATA;
+ whdr.flags = sp->hdr.flags;
+ whdr.userStatus = 0;
+ whdr.securityIndex = call->security_ix;
+ whdr._rsvd = htons(sp->hdr._rsvd);
+ whdr.serviceId = htons(call->service_id);
+
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
+ iov[1].iov_base = skb->head;
+ iov[1].iov_len = skb->len;
+ len = iov[0].iov_len + iov[1].iov_len;
- msg.msg_name = &conn->params.peer->srx.transport;
- msg.msg_namelen = conn->params.peer->srx.transport_len;
+ msg.msg_name = &call->peer->srx.transport;
+ msg.msg_namelen = call->peer->srx.transport_len;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
- /* send the packet with the don't fragment bit set if we currently
- * think it's small enough */
- if (skb->len - sizeof(struct rxrpc_wire_header) < conn->params.peer->maxdata) {
- down_read(&conn->params.local->defrag_sem);
- /* send the packet by UDP
- * - returns -EMSGSIZE if UDP would have to fragment the packet
- * to go out of the interface
- * - in which case, we'll have processed the ICMP error
- * message and update the peer record
- */
- ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1,
- iov[0].iov_len);
+ /* If our RTT cache needs working on, request an ACK. Also request
+ * ACKs if a DATA packet appears to have been lost.
+ */
+ if (retrans ||
+ call->cong_mode == RXRPC_CALL_SLOW_START ||
+ (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
+ ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
+ ktime_get_real()))
+ whdr.flags |= RXRPC_REQUEST_ACK;
+
+ if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
+ static int lose;
+ if ((lose++ & 7) == 7) {
+ ret = 0;
+ lost = true;
+ goto done;
+ }
+ }
- up_read(&conn->params.local->defrag_sem);
- if (ret == -EMSGSIZE)
- goto send_fragmentable;
+ _proto("Tx DATA %%%u { #%u }", serial, sp->hdr.seq);
- _leave(" = %d [%u]", ret, conn->params.peer->maxdata);
- return ret;
+ /* send the packet with the don't fragment bit set if we currently
+ * think it's small enough */
+ if (iov[1].iov_len >= call->peer->maxdata)
+ goto send_fragmentable;
+
+ down_read(&conn->params.local->defrag_sem);
+ /* send the packet by UDP
+ * - returns -EMSGSIZE if UDP would have to fragment the packet
+ * to go out of the interface
+ * - in which case, we'll have processed the ICMP error
+ * message and update the peer record
+ */
+ ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
+
+ up_read(&conn->params.local->defrag_sem);
+ if (ret == -EMSGSIZE)
+ goto send_fragmentable;
+
+done:
+ trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
+ retrans, lost);
+ if (ret >= 0) {
+ ktime_t now = ktime_get_real();
+ skb->tstamp = now;
+ smp_wmb();
+ sp->hdr.serial = serial;
+ if (whdr.flags & RXRPC_REQUEST_ACK) {
+ call->peer->rtt_last_req = now;
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
+ }
}
+ _leave(" = %d [%u]", ret, call->peer->maxdata);
+ return ret;
send_fragmentable:
/* attempt to send this message with fragmentation enabled */
@@ -358,8 +348,8 @@ send_fragmentable:
SOL_IP, IP_MTU_DISCOVER,
(char *)&opt, sizeof(opt));
if (ret == 0) {
- ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1,
- iov[0].iov_len);
+ ret = kernel_sendmsg(conn->params.local->socket, &msg,
+ iov, 2, len);
opt = IP_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -367,355 +357,82 @@ send_fragmentable:
(char *)&opt, sizeof(opt));
}
break;
- }
-
- up_write(&conn->params.local->defrag_sem);
- _leave(" = %d [frag %u]", ret, conn->params.peer->maxdata);
- return ret;
-}
-/*
- * wait for space to appear in the transmit/ACK window
- * - caller holds the socket locked
- */
-static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
- struct rxrpc_call *call,
- long *timeo)
-{
- DECLARE_WAITQUEUE(myself, current);
- int ret;
-
- _enter(",{%d},%ld",
- CIRC_SPACE(call->acks_head, ACCESS_ONCE(call->acks_tail),
- call->acks_winsz),
- *timeo);
-
- add_wait_queue(&call->tx_waitq, &myself);
-
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- ret = 0;
- if (CIRC_SPACE(call->acks_head, ACCESS_ONCE(call->acks_tail),
- call->acks_winsz) > 0)
- break;
- if (signal_pending(current)) {
- ret = sock_intr_errno(*timeo);
- break;
- }
-
- release_sock(&rx->sk);
- *timeo = schedule_timeout(*timeo);
- lock_sock(&rx->sk);
- }
-
- remove_wait_queue(&call->tx_waitq, &myself);
- set_current_state(TASK_RUNNING);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * attempt to schedule an instant Tx resend
- */
-static inline void rxrpc_instant_resend(struct rxrpc_call *call)
-{
- read_lock_bh(&call->state_lock);
- if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
- rxrpc_queue_call(call);
- }
- read_unlock_bh(&call->state_lock);
-}
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ opt = IPV6_PMTUDISC_DONT;
+ ret = kernel_setsockopt(conn->params.local->socket,
+ SOL_IPV6, IPV6_MTU_DISCOVER,
+ (char *)&opt, sizeof(opt));
+ if (ret == 0) {
+ ret = kernel_sendmsg(conn->params.local->socket, &msg,
+ iov, 1, iov[0].iov_len);
-/*
- * queue a packet for transmission, set the resend timer and attempt
- * to send the packet immediately
- */
-static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
- bool last)
-{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- int ret;
-
- _net("queue skb %p [%d]", skb, call->acks_head);
-
- ASSERT(call->acks_window != NULL);
- call->acks_window[call->acks_head] = (unsigned long) skb;
- smp_wmb();
- call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1);
-
- if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
- _debug("________awaiting reply/ACK__________");
- write_lock_bh(&call->state_lock);
- switch (call->state) {
- case RXRPC_CALL_CLIENT_SEND_REQUEST:
- call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
- break;
- case RXRPC_CALL_SERVER_ACK_REQUEST:
- call->state = RXRPC_CALL_SERVER_SEND_REPLY;
- if (!last)
- break;
- case RXRPC_CALL_SERVER_SEND_REPLY:
- call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
- break;
- default:
- break;
+ opt = IPV6_PMTUDISC_DO;
+ kernel_setsockopt(conn->params.local->socket,
+ SOL_IPV6, IPV6_MTU_DISCOVER,
+ (char *)&opt, sizeof(opt));
}
- write_unlock_bh(&call->state_lock);
- }
-
- _proto("Tx DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
-
- sp->need_resend = false;
- sp->resend_at = jiffies + rxrpc_resend_timeout;
- if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
- _debug("run timer");
- call->resend_timer.expires = sp->resend_at;
- add_timer(&call->resend_timer);
- }
-
- /* attempt to cancel the rx-ACK timer, deferring reply transmission if
- * we're ACK'ing the request phase of an incoming call */
- ret = -EAGAIN;
- if (try_to_del_timer_sync(&call->ack_timer) >= 0) {
- /* the packet may be freed by rxrpc_process_call() before this
- * returns */
- ret = rxrpc_send_data_packet(call->conn, skb);
- _net("sent skb %p", skb);
- } else {
- _debug("failed to delete ACK timer");
- }
-
- if (ret < 0) {
- _debug("need instant resend %d", ret);
- sp->need_resend = true;
- rxrpc_instant_resend(call);
+ break;
+#endif
}
- _leave("");
-}
-
-/*
- * Convert a host-endian header into a network-endian header.
- */
-static void rxrpc_insert_header(struct sk_buff *skb)
-{
- struct rxrpc_wire_header whdr;
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-
- whdr.epoch = htonl(sp->hdr.epoch);
- whdr.cid = htonl(sp->hdr.cid);
- whdr.callNumber = htonl(sp->hdr.callNumber);
- whdr.seq = htonl(sp->hdr.seq);
- whdr.serial = htonl(sp->hdr.serial);
- whdr.type = sp->hdr.type;
- whdr.flags = sp->hdr.flags;
- whdr.userStatus = sp->hdr.userStatus;
- whdr.securityIndex = sp->hdr.securityIndex;
- whdr._rsvd = htons(sp->hdr._rsvd);
- whdr.serviceId = htons(sp->hdr.serviceId);
-
- memcpy(skb->head, &whdr, sizeof(whdr));
+ up_write(&conn->params.local->defrag_sem);
+ goto done;
}
/*
- * send data through a socket
- * - must be called in process context
- * - caller holds the socket locked
+ * reject packets through the local endpoint
*/
-static int rxrpc_send_data(struct rxrpc_sock *rx,
- struct rxrpc_call *call,
- struct msghdr *msg, size_t len)
+void rxrpc_reject_packets(struct rxrpc_local *local)
{
+ struct sockaddr_rxrpc srx;
struct rxrpc_skb_priv *sp;
+ struct rxrpc_wire_header whdr;
struct sk_buff *skb;
- struct sock *sk = &rx->sk;
- long timeo;
- bool more;
- int ret, copied;
-
- timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
-
- /* this should be in poll */
- sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
-
- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
- return -EPIPE;
-
- more = msg->msg_flags & MSG_MORE;
-
- skb = call->tx_pending;
- call->tx_pending = NULL;
-
- copied = 0;
- do {
- if (!skb) {
- size_t size, chunk, max, space;
-
- _debug("alloc");
-
- if (CIRC_SPACE(call->acks_head,
- ACCESS_ONCE(call->acks_tail),
- call->acks_winsz) <= 0) {
- ret = -EAGAIN;
- if (msg->msg_flags & MSG_DONTWAIT)
- goto maybe_error;
- ret = rxrpc_wait_for_tx_window(rx, call,
- &timeo);
- if (ret < 0)
- goto maybe_error;
- }
-
- max = call->conn->params.peer->maxdata;
- max -= call->conn->security_size;
- max &= ~(call->conn->size_align - 1UL);
-
- chunk = max;
- if (chunk > msg_data_left(msg) && !more)
- chunk = msg_data_left(msg);
-
- space = chunk + call->conn->size_align;
- space &= ~(call->conn->size_align - 1UL);
-
- size = space + call->conn->header_size;
-
- _debug("SIZE: %zu/%zu/%zu", chunk, space, size);
-
- /* create a buffer that we can retain until it's ACK'd */
- skb = sock_alloc_send_skb(
- sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
- if (!skb)
- goto maybe_error;
-
- rxrpc_new_skb(skb);
-
- _debug("ALLOC SEND %p", skb);
-
- ASSERTCMP(skb->mark, ==, 0);
+ struct msghdr msg;
+ struct kvec iov[2];
+ size_t size;
+ __be32 code;
- _debug("HS: %u", call->conn->header_size);
- skb_reserve(skb, call->conn->header_size);
- skb->len += call->conn->header_size;
+ _enter("%d", local->debug_id);
- sp = rxrpc_skb(skb);
- sp->remain = chunk;
- if (sp->remain > skb_tailroom(skb))
- sp->remain = skb_tailroom(skb);
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
+ iov[1].iov_base = &code;
+ iov[1].iov_len = sizeof(code);
+ size = sizeof(whdr) + sizeof(code);
- _net("skb: hr %d, tr %d, hl %d, rm %d",
- skb_headroom(skb),
- skb_tailroom(skb),
- skb_headlen(skb),
- sp->remain);
+ msg.msg_name = &srx.transport;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
+ memset(&whdr, 0, sizeof(whdr));
+ whdr.type = RXRPC_PACKET_TYPE_ABORT;
- _debug("append");
+ while ((skb = skb_dequeue(&local->reject_queue))) {
+ rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
sp = rxrpc_skb(skb);
- /* append next segment of data to the current buffer */
- if (msg_data_left(msg) > 0) {
- int copy = skb_tailroom(skb);
- ASSERTCMP(copy, >, 0);
- if (copy > msg_data_left(msg))
- copy = msg_data_left(msg);
- if (copy > sp->remain)
- copy = sp->remain;
-
- _debug("add");
- ret = skb_add_data(skb, &msg->msg_iter, copy);
- _debug("added");
- if (ret < 0)
- goto efault;
- sp->remain -= copy;
- skb->mark += copy;
- copied += copy;
- }
+ if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
+ msg.msg_namelen = srx.transport_len;
- /* check for the far side aborting the call or a network error
- * occurring */
- if (call->state > RXRPC_CALL_COMPLETE)
- goto call_aborted;
-
- /* add the packet to the send queue if it's now full */
- if (sp->remain <= 0 ||
- (msg_data_left(msg) == 0 && !more)) {
- struct rxrpc_connection *conn = call->conn;
- uint32_t seq;
- size_t pad;
-
- /* pad out if we're using security */
- if (conn->security_ix) {
- pad = conn->security_size + skb->mark;
- pad = conn->size_align - pad;
- pad &= conn->size_align - 1;
- _debug("pad %zu", pad);
- if (pad)
- memset(skb_put(skb, pad), 0, pad);
- }
-
- seq = atomic_inc_return(&call->sequence);
-
- sp->hdr.epoch = conn->proto.epoch;
- sp->hdr.cid = call->cid;
- sp->hdr.callNumber = call->call_id;
- sp->hdr.seq = seq;
- sp->hdr.serial = atomic_inc_return(&conn->serial);
- sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
- sp->hdr.userStatus = 0;
- sp->hdr.securityIndex = conn->security_ix;
- sp->hdr._rsvd = 0;
- sp->hdr.serviceId = call->service_id;
-
- sp->hdr.flags = conn->out_clientflag;
- if (msg_data_left(msg) == 0 && !more)
- sp->hdr.flags |= RXRPC_LAST_PACKET;
- else if (CIRC_SPACE(call->acks_head,
- ACCESS_ONCE(call->acks_tail),
- call->acks_winsz) > 1)
- sp->hdr.flags |= RXRPC_MORE_PACKETS;
- if (more && seq & 1)
- sp->hdr.flags |= RXRPC_REQUEST_ACK;
-
- ret = conn->security->secure_packet(
- call, skb, skb->mark,
- skb->head + sizeof(struct rxrpc_wire_header));
- if (ret < 0)
- goto out;
-
- rxrpc_insert_header(skb);
- rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more);
- skb = NULL;
- }
- } while (msg_data_left(msg) > 0);
+ code = htonl(skb->priority);
-success:
- ret = copied;
-out:
- call->tx_pending = skb;
- _leave(" = %d", ret);
- return ret;
+ whdr.epoch = htonl(sp->hdr.epoch);
+ whdr.cid = htonl(sp->hdr.cid);
+ whdr.callNumber = htonl(sp->hdr.callNumber);
+ whdr.serviceId = htons(sp->hdr.serviceId);
+ whdr.flags = sp->hdr.flags;
+ whdr.flags ^= RXRPC_CLIENT_INITIATED;
+ whdr.flags &= RXRPC_CLIENT_INITIATED;
-call_aborted:
- rxrpc_free_skb(skb);
- if (call->state == RXRPC_CALL_NETWORK_ERROR)
- ret = call->error_report < RXRPC_LOCAL_ERROR_OFFSET ?
- call->error_report :
- call->error_report - RXRPC_LOCAL_ERROR_OFFSET;
- else
- ret = -ECONNABORTED;
- _leave(" = %d", ret);
- return ret;
+ kernel_sendmsg(local->socket, &msg, iov, 2, size);
+ }
-maybe_error:
- if (copied)
- goto success;
- goto out;
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ }
-efault:
- ret = -EFAULT;
- goto out;
+ _leave("");
}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 8940674b5e08..bf13b8470c9a 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -66,6 +66,32 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
}
break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ srx.transport.sin6.sin6_port = serr->port;
+ srx.transport_len = sizeof(struct sockaddr_in6);
+ switch (serr->ee.ee_origin) {
+ case SO_EE_ORIGIN_ICMP6:
+ _net("Rx ICMP6");
+ memcpy(&srx.transport.sin6.sin6_addr,
+ skb_network_header(skb) + serr->addr_offset,
+ sizeof(struct in6_addr));
+ break;
+ case SO_EE_ORIGIN_ICMP:
+ _net("Rx ICMP on v6 sock");
+ memcpy(srx.transport.sin6.sin6_addr.s6_addr + 12,
+ skb_network_header(skb) + serr->addr_offset,
+ sizeof(struct in_addr));
+ break;
+ default:
+ memcpy(&srx.transport.sin6.sin6_addr,
+ &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ break;
+ }
+ break;
+#endif
+
default:
BUG();
}
@@ -129,22 +155,21 @@ void rxrpc_error_report(struct sock *sk)
_leave("UDP socket errqueue empty");
return;
}
+ rxrpc_new_skb(skb, rxrpc_skb_rx_received);
serr = SKB_EXT_ERR(skb);
if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
_leave("UDP empty message");
- kfree_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
return;
}
- rxrpc_new_skb(skb);
-
rcu_read_lock();
peer = rxrpc_lookup_peer_icmp_rcu(local, skb);
if (peer && !rxrpc_get_peer_maybe(peer))
peer = NULL;
if (!peer) {
rcu_read_unlock();
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
_leave(" [no peer]");
return;
}
@@ -154,7 +179,7 @@ void rxrpc_error_report(struct sock *sk)
serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
rxrpc_adjust_mtu(peer, serr);
rcu_read_unlock();
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
rxrpc_put_peer(peer);
_leave(" [MTU update]");
return;
@@ -162,7 +187,7 @@ void rxrpc_error_report(struct sock *sk)
rxrpc_store_error(peer, serr);
rcu_read_unlock();
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
/* The ref we obtained is passed off to the work item */
rxrpc_queue_work(&peer->error_distributor);
@@ -248,13 +273,20 @@ void rxrpc_peer_error_distributor(struct work_struct *work)
struct rxrpc_peer *peer =
container_of(work, struct rxrpc_peer, error_distributor);
struct rxrpc_call *call;
- int error_report;
+ enum rxrpc_call_completion compl;
+ int error;
_enter("");
- error_report = READ_ONCE(peer->error_report);
+ error = READ_ONCE(peer->error_report);
+ if (error < RXRPC_LOCAL_ERROR_OFFSET) {
+ compl = RXRPC_CALL_NETWORK_ERROR;
+ } else {
+ compl = RXRPC_CALL_LOCAL_ERROR;
+ error -= RXRPC_LOCAL_ERROR_OFFSET;
+ }
- _debug("ISSUE ERROR %d", error_report);
+ _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error);
spin_lock_bh(&peer->lock);
@@ -262,16 +294,10 @@ void rxrpc_peer_error_distributor(struct work_struct *work)
call = hlist_entry(peer->error_targets.first,
struct rxrpc_call, error_link);
hlist_del_init(&call->error_link);
+ rxrpc_see_call(call);
- write_lock(&call->state_lock);
- if (call->state != RXRPC_CALL_COMPLETE &&
- call->state < RXRPC_CALL_NETWORK_ERROR) {
- call->error_report = error_report;
- call->state = RXRPC_CALL_NETWORK_ERROR;
- set_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
- rxrpc_queue_call(call);
- }
- write_unlock(&call->state_lock);
+ if (rxrpc_set_call_completion(call, compl, 0, error))
+ rxrpc_notify_socket(call);
}
spin_unlock_bh(&peer->lock);
@@ -279,3 +305,44 @@ void rxrpc_peer_error_distributor(struct work_struct *work)
rxrpc_put_peer(peer);
_leave("");
}
+
+/*
+ * Add RTT information to cache. This is called in softirq mode and has
+ * exclusive access to the peer RTT data.
+ */
+void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
+ ktime_t send_time, ktime_t resp_time)
+{
+ struct rxrpc_peer *peer = call->peer;
+ s64 rtt;
+ u64 sum = peer->rtt_sum, avg;
+ u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage;
+
+ rtt = ktime_to_ns(ktime_sub(resp_time, send_time));
+ if (rtt < 0)
+ return;
+
+ /* Replace the oldest datum in the RTT buffer */
+ sum -= peer->rtt_cache[cursor];
+ sum += rtt;
+ peer->rtt_cache[cursor] = rtt;
+ peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1);
+ peer->rtt_sum = sum;
+ if (usage < RXRPC_RTT_CACHE_SIZE) {
+ usage++;
+ peer->rtt_usage = usage;
+ }
+
+ /* Now recalculate the average */
+ if (usage == RXRPC_RTT_CACHE_SIZE) {
+ avg = sum / RXRPC_RTT_CACHE_SIZE;
+ } else {
+ avg = sum;
+ do_div(avg, usage);
+ }
+
+ peer->rtt = avg;
+ trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
+ usage, avg);
+}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 538e9831c699..941b724d523b 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -16,12 +16,14 @@
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/in.h>
+#include <linux/in6.h>
#include <linux/slab.h>
#include <linux/hashtable.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
#include <net/route.h>
+#include <net/ip6_route.h>
#include "ar-internal.h"
static DEFINE_HASHTABLE(rxrpc_peer_hash, 10);
@@ -50,6 +52,13 @@ static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local,
size = sizeof(srx->transport.sin.sin_addr);
p = (u16 *)&srx->transport.sin.sin_addr;
break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ hash_key += (u16 __force)srx->transport.sin.sin_port;
+ size = sizeof(srx->transport.sin6.sin6_addr);
+ p = (u16 *)&srx->transport.sin6.sin6_addr;
+ break;
+#endif
default:
WARN(1, "AF_RXRPC: Unsupported transport address family\n");
return 0;
@@ -93,6 +102,14 @@ static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
memcmp(&peer->srx.transport.sin.sin_addr,
&srx->transport.sin.sin_addr,
sizeof(struct in_addr));
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ return ((u16 __force)peer->srx.transport.sin6.sin6_port -
+ (u16 __force)srx->transport.sin6.sin6_port) ?:
+ memcmp(&peer->srx.transport.sin6.sin6_addr,
+ &srx->transport.sin6.sin6_addr,
+ sizeof(struct in6_addr));
+#endif
default:
BUG();
}
@@ -130,17 +147,7 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
if (peer) {
- switch (srx->transport.family) {
- case AF_INET:
- _net("PEER %d {%d,%u,%pI4+%hu}",
- peer->debug_id,
- peer->srx.transport_type,
- peer->srx.transport.family,
- &peer->srx.transport.sin.sin_addr,
- ntohs(peer->srx.transport.sin.sin_port));
- break;
- }
-
+ _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
_leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
}
return peer;
@@ -152,22 +159,53 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
*/
static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
{
+ struct dst_entry *dst;
struct rtable *rt;
- struct flowi4 fl4;
+ struct flowi fl;
+ struct flowi4 *fl4 = &fl.u.ip4;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ struct flowi6 *fl6 = &fl.u.ip6;
+#endif
peer->if_mtu = 1500;
- rt = ip_route_output_ports(&init_net, &fl4, NULL,
- peer->srx.transport.sin.sin_addr.s_addr, 0,
- htons(7000), htons(7001),
- IPPROTO_UDP, 0, 0);
- if (IS_ERR(rt)) {
- _leave(" [route err %ld]", PTR_ERR(rt));
- return;
+ memset(&fl, 0, sizeof(fl));
+ switch (peer->srx.transport.family) {
+ case AF_INET:
+ rt = ip_route_output_ports(
+ &init_net, fl4, NULL,
+ peer->srx.transport.sin.sin_addr.s_addr, 0,
+ htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
+ if (IS_ERR(rt)) {
+ _leave(" [route err %ld]", PTR_ERR(rt));
+ return;
+ }
+ dst = &rt->dst;
+ break;
+
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ fl6->flowi6_iif = LOOPBACK_IFINDEX;
+ fl6->flowi6_scope = RT_SCOPE_UNIVERSE;
+ fl6->flowi6_proto = IPPROTO_UDP;
+ memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr,
+ sizeof(struct in6_addr));
+ fl6->fl6_dport = htons(7001);
+ fl6->fl6_sport = htons(7000);
+ dst = ip6_route_output(&init_net, NULL, fl6);
+ if (IS_ERR(dst)) {
+ _leave(" [route err %ld]", PTR_ERR(dst));
+ return;
+ }
+ break;
+#endif
+
+ default:
+ BUG();
}
- peer->if_mtu = dst_mtu(&rt->dst);
- dst_release(&rt->dst);
+ peer->if_mtu = dst_mtu(dst);
+ dst_release(dst);
_leave(" [if_mtu %u]", peer->if_mtu);
}
@@ -199,6 +237,41 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
}
/*
+ * Initialise peer record.
+ */
+static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
+{
+ peer->hash_key = hash_key;
+ rxrpc_assess_MTU_size(peer);
+ peer->mtu = peer->if_mtu;
+ peer->rtt_last_req = ktime_get_real();
+
+ switch (peer->srx.transport.family) {
+ case AF_INET:
+ peer->hdrsize = sizeof(struct iphdr);
+ break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ peer->hdrsize = sizeof(struct ipv6hdr);
+ break;
+#endif
+ default:
+ BUG();
+ }
+
+ switch (peer->srx.transport_type) {
+ case SOCK_DGRAM:
+ peer->hdrsize += sizeof(struct udphdr);
+ break;
+ default:
+ BUG();
+ }
+
+ peer->hdrsize += sizeof(struct rxrpc_wire_header);
+ peer->maxdata = peer->mtu - peer->hdrsize;
+}
+
+/*
* Set up a new peer.
*/
static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
@@ -212,31 +285,40 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
peer = rxrpc_alloc_peer(local, gfp);
if (peer) {
- peer->hash_key = hash_key;
memcpy(&peer->srx, srx, sizeof(*srx));
+ rxrpc_init_peer(peer, hash_key);
+ }
- rxrpc_assess_MTU_size(peer);
- peer->mtu = peer->if_mtu;
-
- if (srx->transport.family == AF_INET) {
- peer->hdrsize = sizeof(struct iphdr);
- switch (srx->transport_type) {
- case SOCK_DGRAM:
- peer->hdrsize += sizeof(struct udphdr);
- break;
- default:
- BUG();
- break;
- }
- } else {
- BUG();
- }
+ _leave(" = %p", peer);
+ return peer;
+}
+
+/*
+ * Set up a new incoming peer. The address is prestored in the preallocated
+ * peer.
+ */
+struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
+ struct rxrpc_peer *prealloc)
+{
+ struct rxrpc_peer *peer;
+ unsigned long hash_key;
+
+ hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
+ prealloc->local = local;
+ rxrpc_init_peer(prealloc, hash_key);
- peer->hdrsize += sizeof(struct rxrpc_wire_header);
- peer->maxdata = peer->mtu - peer->hdrsize;
+ spin_lock(&rxrpc_peer_hash_lock);
+
+ /* Need to check that we aren't racing with someone else */
+ peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
+ if (peer && !rxrpc_get_peer_maybe(peer))
+ peer = NULL;
+ if (!peer) {
+ peer = prealloc;
+ hash_add_rcu(rxrpc_peer_hash, &peer->hash_link, hash_key);
}
- _leave(" = %p", peer);
+ spin_unlock(&rxrpc_peer_hash_lock);
return peer;
}
@@ -249,11 +331,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
struct rxrpc_peer *peer, *candidate;
unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
- _enter("{%d,%d,%pI4+%hu}",
- srx->transport_type,
- srx->transport_len,
- &srx->transport.sin.sin_addr,
- ntohs(srx->transport.sin.sin_port));
+ _enter("{%pISp}", &srx->transport);
/* search the peer list first */
rcu_read_lock();
@@ -272,7 +350,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
return NULL;
}
- spin_lock(&rxrpc_peer_hash_lock);
+ spin_lock_bh(&rxrpc_peer_hash_lock);
/* Need to check that we aren't racing with someone else */
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
@@ -282,7 +360,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
hash_add_rcu(rxrpc_peer_hash,
&candidate->hash_link, hash_key);
- spin_unlock(&rxrpc_peer_hash_lock);
+ spin_unlock_bh(&rxrpc_peer_hash_lock);
if (peer)
kfree(candidate);
@@ -290,11 +368,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
peer = candidate;
}
- _net("PEER %d {%d,%pI4+%hu}",
- peer->debug_id,
- peer->srx.transport_type,
- &peer->srx.transport.sin.sin_addr,
- ntohs(peer->srx.transport.sin.sin_port));
+ _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
_leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
return peer;
@@ -307,9 +381,24 @@ void __rxrpc_put_peer(struct rxrpc_peer *peer)
{
ASSERT(hlist_empty(&peer->error_targets));
- spin_lock(&rxrpc_peer_hash_lock);
+ spin_lock_bh(&rxrpc_peer_hash_lock);
hash_del_rcu(&peer->hash_link);
- spin_unlock(&rxrpc_peer_hash_lock);
+ spin_unlock_bh(&rxrpc_peer_hash_lock);
kfree_rcu(peer, rcu);
}
+
+/**
+ * rxrpc_kernel_get_peer - Get the peer address of a call
+ * @sock: The socket on which the call is in progress.
+ * @call: The call to query
+ * @_srx: Where to place the result
+ *
+ * Get the address of the remote peer in a call.
+ */
+void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call,
+ struct sockaddr_rxrpc *_srx)
+{
+ *_srx = call->peer->srx;
+}
+EXPORT_SYMBOL(rxrpc_kernel_get_peer);
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index ced5f07444e5..65cd980767fa 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -17,12 +17,12 @@
static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
[RXRPC_CONN_UNUSED] = "Unused ",
[RXRPC_CONN_CLIENT] = "Client ",
+ [RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc",
[RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ",
[RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ",
[RXRPC_CONN_SERVICE] = "SvSecure",
[RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort",
[RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort",
- [RXRPC_CONN_NETWORK_ERROR] = "NetError",
};
/*
@@ -30,6 +30,7 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
*/
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
{
+ rcu_read_lock();
read_lock(&rxrpc_call_lock);
return seq_list_start_head(&rxrpc_calls, *_pos);
}
@@ -42,17 +43,21 @@ static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
{
read_unlock(&rxrpc_call_lock);
+ rcu_read_unlock();
}
static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
{
- struct rxrpc_connection *conn;
+ struct rxrpc_local *local;
+ struct rxrpc_sock *rx;
+ struct rxrpc_peer *peer;
struct rxrpc_call *call;
- char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
+ char lbuff[50], rbuff[50];
if (v == &rxrpc_calls) {
seq_puts(seq,
- "Proto Local Remote "
+ "Proto Local "
+ " Remote "
" SvID ConnID CallID End Use State Abort "
" UserID\n");
return 0;
@@ -60,30 +65,35 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
call = list_entry(v, struct rxrpc_call, link);
- sprintf(lbuff, "%pI4:%u",
- &call->local->srx.transport.sin.sin_addr,
- ntohs(call->local->srx.transport.sin.sin_port));
+ rx = rcu_dereference(call->socket);
+ if (rx) {
+ local = READ_ONCE(rx->local);
+ if (local)
+ sprintf(lbuff, "%pISpc", &local->srx.transport);
+ else
+ strcpy(lbuff, "no_local");
+ } else {
+ strcpy(lbuff, "no_socket");
+ }
- conn = call->conn;
- if (conn)
- sprintf(rbuff, "%pI4:%u",
- &conn->params.peer->srx.transport.sin.sin_addr,
- ntohs(conn->params.peer->srx.transport.sin.sin_port));
+ peer = call->peer;
+ if (peer)
+ sprintf(rbuff, "%pISpc", &peer->srx.transport);
else
strcpy(rbuff, "no_connection");
seq_printf(seq,
- "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u"
+ "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
" %-8.8s %08x %lx\n",
lbuff,
rbuff,
call->service_id,
call->cid,
call->call_id,
- call->in_clientflag ? "Svc" : "Clt",
+ rxrpc_is_service_call(call) ? "Svc" : "Clt",
atomic_read(&call->usage),
rxrpc_call_states[call->state],
- call->remote_abort ?: call->local_abort,
+ call->abort_code,
call->user_call_ID);
return 0;
@@ -115,13 +125,13 @@ const struct file_operations rxrpc_call_seq_fops = {
static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
{
read_lock(&rxrpc_connection_lock);
- return seq_list_start_head(&rxrpc_connections, *_pos);
+ return seq_list_start_head(&rxrpc_connection_proc_list, *_pos);
}
static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
loff_t *pos)
{
- return seq_list_next(v, &rxrpc_connections, pos);
+ return seq_list_next(v, &rxrpc_connection_proc_list, pos);
}
static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
@@ -132,29 +142,31 @@ static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
{
struct rxrpc_connection *conn;
- char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
+ char lbuff[50], rbuff[50];
- if (v == &rxrpc_connections) {
+ if (v == &rxrpc_connection_proc_list) {
seq_puts(seq,
- "Proto Local Remote "
+ "Proto Local "
+ " Remote "
" SvID ConnID End Use State Key "
" Serial ISerial\n"
);
return 0;
}
- conn = list_entry(v, struct rxrpc_connection, link);
-
- sprintf(lbuff, "%pI4:%u",
- &conn->params.local->srx.transport.sin.sin_addr,
- ntohs(conn->params.local->srx.transport.sin.sin_port));
+ conn = list_entry(v, struct rxrpc_connection, proc_link);
+ if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) {
+ strcpy(lbuff, "no_local");
+ strcpy(rbuff, "no_connection");
+ goto print;
+ }
- sprintf(rbuff, "%pI4:%u",
- &conn->params.peer->srx.transport.sin.sin_addr,
- ntohs(conn->params.peer->srx.transport.sin.sin_port));
+ sprintf(lbuff, "%pISpc", &conn->params.local->srx.transport);
+ sprintf(rbuff, "%pISpc", &conn->params.peer->srx.transport);
+print:
seq_printf(seq,
- "UDP %-22.22s %-22.22s %4x %08x %s %3u"
+ "UDP %-47.47s %-47.47s %4x %08x %s %3u"
" %s %08x %08x %08x\n",
lbuff,
rbuff,
@@ -165,7 +177,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
rxrpc_conn_states[conn->state],
key_serial(conn->params.key),
atomic_read(&conn->serial),
- atomic_read(&conn->hi_serial));
+ conn->hi_serial);
return 0;
}
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 9ed66d533002..f05ea0a88076 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -19,399 +19,645 @@
#include "ar-internal.h"
/*
- * removal a call's user ID from the socket tree to make the user ID available
- * again and so that it won't be seen again in association with that call
+ * Post a call for attention by the socket or kernel service. Further
+ * notifications are suppressed by putting recvmsg_link on a dummy queue.
*/
-void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call)
+void rxrpc_notify_socket(struct rxrpc_call *call)
{
- _debug("RELEASE CALL %d", call->debug_id);
+ struct rxrpc_sock *rx;
+ struct sock *sk;
+
+ _enter("%d", call->debug_id);
+
+ if (!list_empty(&call->recvmsg_link))
+ return;
+
+ rcu_read_lock();
+
+ rx = rcu_dereference(call->socket);
+ sk = &rx->sk;
+ if (rx && sk->sk_state < RXRPC_CLOSE) {
+ if (call->notify_rx) {
+ call->notify_rx(sk, call, call->user_call_ID);
+ } else {
+ write_lock_bh(&rx->recvmsg_lock);
+ if (list_empty(&call->recvmsg_link)) {
+ rxrpc_get_call(call, rxrpc_call_got);
+ list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
+ }
+ write_unlock_bh(&rx->recvmsg_lock);
- if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
- write_lock_bh(&rx->call_lock);
- rb_erase(&call->sock_node, &call->socket->calls);
- clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
- write_unlock_bh(&rx->call_lock);
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ _debug("call %ps", sk->sk_data_ready);
+ sk->sk_data_ready(sk);
+ }
+ }
}
- read_lock_bh(&call->state_lock);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
- rxrpc_queue_call(call);
- read_unlock_bh(&call->state_lock);
+ rcu_read_unlock();
+ _leave("");
}
/*
- * receive a message from an RxRPC socket
- * - we need to be careful about two or more threads calling recvmsg
- * simultaneously
+ * Pass a call terminating message to userspace.
*/
-int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- int flags)
+static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
{
- struct rxrpc_skb_priv *sp;
- struct rxrpc_call *call = NULL, *continue_call = NULL;
- struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
- struct sk_buff *skb;
- long timeo;
- int copy, ret, ullen, offset, copied = 0;
- u32 abort_code;
-
- DEFINE_WAIT(wait);
-
- _enter(",,,%zu,%d", len, flags);
-
- if (flags & (MSG_OOB | MSG_TRUNC))
- return -EOPNOTSUPP;
-
- ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);
+ u32 tmp = 0;
+ int ret;
+
+ switch (call->completion) {
+ case RXRPC_CALL_SUCCEEDED:
+ ret = 0;
+ if (rxrpc_is_service_call(call))
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp);
+ break;
+ case RXRPC_CALL_REMOTELY_ABORTED:
+ tmp = call->abort_code;
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
+ break;
+ case RXRPC_CALL_LOCALLY_ABORTED:
+ tmp = call->abort_code;
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
+ break;
+ case RXRPC_CALL_NETWORK_ERROR:
+ tmp = call->error;
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp);
+ break;
+ case RXRPC_CALL_LOCAL_ERROR:
+ tmp = call->error;
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
+ break;
+ default:
+ pr_err("Invalid terminal call state %u\n", call->state);
+ BUG();
+ break;
+ }
- timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
- msg->msg_flags |= MSG_MORE;
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack,
+ call->rx_pkt_offset, call->rx_pkt_len, ret);
+ return ret;
+}
- lock_sock(&rx->sk);
+/*
+ * Pass back notification of a new call. The call is added to the
+ * to-be-accepted list. This means that the next call to be accepted might not
+ * be the last call seen awaiting acceptance, but unless we leave this on the
+ * front of the queue and block all other messages until someone gives us a
+ * user_ID for it, there's not a lot we can do.
+ */
+static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
+ struct msghdr *msg, int flags)
+{
+ int tmp = 0, ret;
- for (;;) {
- /* return immediately if a client socket has no outstanding
- * calls */
- if (RB_EMPTY_ROOT(&rx->calls)) {
- if (copied)
- goto out;
- if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
- release_sock(&rx->sk);
- if (continue_call)
- rxrpc_put_call(continue_call);
- return -ENODATA;
- }
- }
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp);
- /* get the next message on the Rx queue */
- skb = skb_peek(&rx->sk.sk_receive_queue);
- if (!skb) {
- /* nothing remains on the queue */
- if (copied &&
- (flags & MSG_PEEK || timeo == 0))
- goto out;
+ if (ret == 0 && !(flags & MSG_PEEK)) {
+ _debug("to be accepted");
+ write_lock_bh(&rx->recvmsg_lock);
+ list_del_init(&call->recvmsg_link);
+ write_unlock_bh(&rx->recvmsg_lock);
- /* wait for a message to turn up */
- release_sock(&rx->sk);
- prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
- TASK_INTERRUPTIBLE);
- ret = sock_error(&rx->sk);
- if (ret)
- goto wait_error;
-
- if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
- if (signal_pending(current))
- goto wait_interrupted;
- timeo = schedule_timeout(timeo);
- }
- finish_wait(sk_sleep(&rx->sk), &wait);
- lock_sock(&rx->sk);
- continue;
- }
+ rxrpc_get_call(call, rxrpc_call_got);
+ write_lock(&rx->call_lock);
+ list_add_tail(&call->accept_link, &rx->to_be_accepted);
+ write_unlock(&rx->call_lock);
+ }
- peek_next_packet:
- sp = rxrpc_skb(skb);
- call = sp->call;
- ASSERT(call != NULL);
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret);
+ return ret;
+}
- _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);
+/*
+ * End the packet reception phase.
+ */
+static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
+{
+ _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
- /* make sure we wait for the state to be updated in this call */
- spin_lock_bh(&call->lock);
- spin_unlock_bh(&call->lock);
+ trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
+ ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
- if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
- _debug("packet from released call");
- if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
- BUG();
- rxrpc_free_skb(skb);
- continue;
- }
+ if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
+ rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
+ rxrpc_propose_ack_terminal_ack);
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+ }
- /* determine whether to continue last data receive */
- if (continue_call) {
- _debug("maybe cont");
- if (call != continue_call ||
- skb->mark != RXRPC_SKB_MARK_DATA) {
- release_sock(&rx->sk);
- rxrpc_put_call(continue_call);
- _leave(" = %d [noncont]", copied);
- return copied;
- }
- }
+ write_lock_bh(&call->state_lock);
- rxrpc_get_call(call);
+ switch (call->state) {
+ case RXRPC_CALL_CLIENT_RECV_REPLY:
+ __rxrpc_call_completed(call);
+ break;
- /* copy the peer address and timestamp */
- if (!continue_call) {
- if (msg->msg_name) {
- size_t len =
- sizeof(call->conn->params.peer->srx);
- memcpy(msg->msg_name,
- &call->conn->params.peer->srx, len);
- msg->msg_namelen = len;
- }
- sock_recv_timestamp(msg, &rx->sk, skb);
- }
+ case RXRPC_CALL_SERVER_RECV_REQUEST:
+ call->tx_phase = true;
+ call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
+ break;
+ default:
+ break;
+ }
- /* receive the message */
- if (skb->mark != RXRPC_SKB_MARK_DATA)
- goto receive_non_data_message;
+ write_unlock_bh(&call->state_lock);
+}
- _debug("recvmsg DATA #%u { %d, %d }",
- sp->hdr.seq, skb->len, sp->offset);
+/*
+ * Discard a packet we've used up and advance the Rx window by one.
+ */
+static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
+{
+ struct rxrpc_skb_priv *sp;
+ struct sk_buff *skb;
+ rxrpc_serial_t serial;
+ rxrpc_seq_t hard_ack, top;
+ u8 flags;
+ int ix;
+
+ _enter("%d", call->debug_id);
+
+ hard_ack = call->rx_hard_ack;
+ top = smp_load_acquire(&call->rx_top);
+ ASSERT(before(hard_ack, top));
+
+ hard_ack++;
+ ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
+ skb = call->rxtx_buffer[ix];
+ rxrpc_see_skb(skb, rxrpc_skb_rx_rotated);
+ sp = rxrpc_skb(skb);
+ flags = sp->hdr.flags;
+ serial = sp->hdr.serial;
+ if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO)
+ serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1;
+
+ call->rxtx_buffer[ix] = NULL;
+ call->rxtx_annotations[ix] = 0;
+ /* Barrier against rxrpc_input_data(). */
+ smp_store_release(&call->rx_hard_ack, hard_ack);
+
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+
+ _debug("%u,%u,%02x", hard_ack, top, flags);
+ trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
+ if (flags & RXRPC_LAST_PACKET) {
+ rxrpc_end_rx_phase(call, serial);
+ } else {
+ /* Check to see if there's an ACK that needs sending. */
+ if (after_eq(hard_ack, call->ackr_consumed + 2) ||
+ after_eq(top, call->ackr_seen + 2) ||
+ (hard_ack == top && after(hard_ack, call->ackr_consumed)))
+ rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
+ true, false,
+ rxrpc_propose_ack_rotate_rx);
+ if (call->ackr_reason)
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+ }
+}
- if (!continue_call) {
- /* only set the control data once per recvmsg() */
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
- ullen, &call->user_call_ID);
- if (ret < 0)
- goto copy_error;
- ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
- }
+/*
+ * Decrypt and verify a (sub)packet. The packet's length may be changed due to
+ * padding, but if this is the case, the packet length will be resident in the
+ * socket buffer. Note that we can't modify the master skb info as the skb may
+ * be the home to multiple subpackets.
+ */
+static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
+ u8 annotation,
+ unsigned int offset, unsigned int len)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ rxrpc_seq_t seq = sp->hdr.seq;
+ u16 cksum = sp->hdr.cksum;
- ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
- ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
- call->rx_data_recv = sp->hdr.seq;
+ _enter("");
- ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
+ /* For all but the head jumbo subpacket, the security checksum is in a
+ * jumbo header immediately prior to the data.
+ */
+ if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) {
+ __be16 tmp;
+ if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
+ BUG();
+ cksum = ntohs(tmp);
+ seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1;
+ }
- offset = sp->offset;
- copy = skb->len - offset;
- if (copy > len - copied)
- copy = len - copied;
+ return call->conn->security->verify_packet(call, skb, offset, len,
+ seq, cksum);
+}
- ret = skb_copy_datagram_msg(skb, offset, msg, copy);
+/*
+ * Locate the data within a packet. This is complicated by:
+ *
+ * (1) An skb may contain a jumbo packet - so we have to find the appropriate
+ * subpacket.
+ *
+ * (2) The (sub)packets may be encrypted and, if so, the encrypted portion
+ * contains an extra header which includes the true length of the data,
+ * excluding any encrypted padding.
+ */
+static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
+ u8 *_annotation,
+ unsigned int *_offset, unsigned int *_len)
+{
+ unsigned int offset = sizeof(struct rxrpc_wire_header);
+ unsigned int len = *_len;
+ int ret;
+ u8 annotation = *_annotation;
+
+ /* Locate the subpacket */
+ len = skb->len - offset;
+ if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) {
+ offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) *
+ RXRPC_JUMBO_SUBPKTLEN);
+ len = (annotation & RXRPC_RX_ANNO_JLAST) ?
+ skb->len - offset : RXRPC_JUMBO_SUBPKTLEN;
+ }
+ if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
+ ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
if (ret < 0)
- goto copy_error;
-
- /* handle piecemeal consumption of data packets */
- _debug("copied %d+%d", copy, copied);
+ return ret;
+ *_annotation |= RXRPC_RX_ANNO_VERIFIED;
+ }
- offset += copy;
- copied += copy;
+ *_offset = offset;
+ *_len = len;
+ call->conn->security->locate_data(call, skb, _offset, _len);
+ return 0;
+}
- if (!(flags & MSG_PEEK))
- sp->offset = offset;
+/*
+ * Deliver messages to a call. This keeps processing packets until the buffer
+ * is filled and we find either more DATA (returns 0) or the end of the DATA
+ * (returns 1). If more packets are required, it returns -EAGAIN.
+ */
+static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
+ struct msghdr *msg, struct iov_iter *iter,
+ size_t len, int flags, size_t *_offset)
+{
+ struct rxrpc_skb_priv *sp;
+ struct sk_buff *skb;
+ rxrpc_seq_t hard_ack, top, seq;
+ size_t remain;
+ bool last;
+ unsigned int rx_pkt_offset, rx_pkt_len;
+ int ix, copy, ret = -EAGAIN, ret2;
+
+ rx_pkt_offset = call->rx_pkt_offset;
+ rx_pkt_len = call->rx_pkt_len;
+
+ if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
+ seq = call->rx_hard_ack;
+ ret = 1;
+ goto done;
+ }
- if (sp->offset < skb->len) {
- _debug("buffer full");
- ASSERTCMP(copied, ==, len);
+ /* Barriers against rxrpc_input_data(). */
+ hard_ack = call->rx_hard_ack;
+ top = smp_load_acquire(&call->rx_top);
+ for (seq = hard_ack + 1; before_eq(seq, top); seq++) {
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ skb = call->rxtx_buffer[ix];
+ if (!skb) {
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
+ rx_pkt_offset, rx_pkt_len, 0);
break;
}
+ smp_rmb();
+ rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
+ sp = rxrpc_skb(skb);
- /* we transferred the whole data packet */
if (!(flags & MSG_PEEK))
- rxrpc_kernel_data_consumed(call, skb);
-
- if (sp->hdr.flags & RXRPC_LAST_PACKET) {
- _debug("last");
- if (rxrpc_conn_is_client(call->conn)) {
- /* last byte of reply received */
- ret = copied;
- goto terminal_message;
+ trace_rxrpc_receive(call, rxrpc_receive_front,
+ sp->hdr.serial, seq);
+
+ if (msg)
+ sock_recv_timestamp(msg, sock->sk, skb);
+
+ if (rx_pkt_offset == 0) {
+ ret2 = rxrpc_locate_data(call, skb,
+ &call->rxtx_annotations[ix],
+ &rx_pkt_offset, &rx_pkt_len);
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
+ rx_pkt_offset, rx_pkt_len, ret2);
+ if (ret2 < 0) {
+ ret = ret2;
+ goto out;
}
+ } else {
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
+ rx_pkt_offset, rx_pkt_len, 0);
+ }
- /* last bit of request received */
- if (!(flags & MSG_PEEK)) {
- _debug("eat packet");
- if (skb_dequeue(&rx->sk.sk_receive_queue) !=
- skb)
- BUG();
- rxrpc_free_skb(skb);
+ /* We have to handle short, empty and used-up DATA packets. */
+ remain = len - *_offset;
+ copy = rx_pkt_len;
+ if (copy > remain)
+ copy = remain;
+ if (copy > 0) {
+ ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
+ copy);
+ if (ret2 < 0) {
+ ret = ret2;
+ goto out;
}
- msg->msg_flags &= ~MSG_MORE;
- break;
+
+ /* handle piecemeal consumption of data packets */
+ rx_pkt_offset += copy;
+ rx_pkt_len -= copy;
+ *_offset += copy;
}
- /* move on to the next data message */
- _debug("next");
- if (!continue_call)
- continue_call = sp->call;
- else
- rxrpc_put_call(call);
- call = NULL;
-
- if (flags & MSG_PEEK) {
- _debug("peek next");
- skb = skb->next;
- if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
- break;
- goto peek_next_packet;
+ if (rx_pkt_len > 0) {
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
+ rx_pkt_offset, rx_pkt_len, 0);
+ ASSERTCMP(*_offset, ==, len);
+ ret = 0;
+ break;
}
- _debug("eat packet");
- if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
- BUG();
- rxrpc_free_skb(skb);
+ /* The whole packet has been transferred. */
+ last = sp->hdr.flags & RXRPC_LAST_PACKET;
+ if (!(flags & MSG_PEEK))
+ rxrpc_rotate_rx_window(call);
+ rx_pkt_offset = 0;
+ rx_pkt_len = 0;
+
+ if (last) {
+ ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
+ ret = 1;
+ goto out;
+ }
}
- /* end of non-terminal data packet reception for the moment */
- _debug("end rcv data");
out:
- release_sock(&rx->sk);
- if (call)
- rxrpc_put_call(call);
- if (continue_call)
- rxrpc_put_call(continue_call);
- _leave(" = %d [data]", copied);
- return copied;
-
- /* handle non-DATA messages such as aborts, incoming connections and
- * final ACKs */
-receive_non_data_message:
- _debug("non-data");
-
- if (skb->mark == RXRPC_SKB_MARK_NEW_CALL) {
- _debug("RECV NEW CALL");
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code);
- if (ret < 0)
- goto copy_error;
- if (!(flags & MSG_PEEK)) {
- if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
- BUG();
- rxrpc_free_skb(skb);
- }
- goto out;
+ if (!(flags & MSG_PEEK)) {
+ call->rx_pkt_offset = rx_pkt_offset;
+ call->rx_pkt_len = rx_pkt_len;
}
+done:
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
+ rx_pkt_offset, rx_pkt_len, ret);
+ return ret;
+}
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
- ullen, &call->user_call_ID);
- if (ret < 0)
- goto copy_error;
- ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
+/*
+ * Receive a message from an RxRPC socket
+ * - we need to be careful about two or more threads calling recvmsg
+ * simultaneously
+ */
+int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
+{
+ struct rxrpc_call *call;
+ struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
+ struct list_head *l;
+ size_t copied = 0;
+ long timeo;
+ int ret;
+
+ DEFINE_WAIT(wait);
+
+ trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0);
+
+ if (flags & (MSG_OOB | MSG_TRUNC))
+ return -EOPNOTSUPP;
+
+ timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
+
+try_again:
+ lock_sock(&rx->sk);
+
+ /* Return immediately if a client socket has no outstanding calls */
+ if (RB_EMPTY_ROOT(&rx->calls) &&
+ list_empty(&rx->recvmsg_q) &&
+ rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
+ release_sock(&rx->sk);
+ return -ENODATA;
+ }
+
+ if (list_empty(&rx->recvmsg_q)) {
+ ret = -EWOULDBLOCK;
+ if (timeo == 0) {
+ call = NULL;
+ goto error_no_call;
+ }
+
+ release_sock(&rx->sk);
+
+ /* Wait for something to happen */
+ prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
+ TASK_INTERRUPTIBLE);
+ ret = sock_error(&rx->sk);
+ if (ret)
+ goto wait_error;
+
+ if (list_empty(&rx->recvmsg_q)) {
+ if (signal_pending(current))
+ goto wait_interrupted;
+ trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait,
+ 0, 0, 0, 0);
+ timeo = schedule_timeout(timeo);
+ }
+ finish_wait(sk_sleep(&rx->sk), &wait);
+ goto try_again;
+ }
- switch (skb->mark) {
- case RXRPC_SKB_MARK_DATA:
+ /* Find the next call and dequeue it if we're not just peeking. If we
+ * do dequeue it, that comes with a ref that we will need to release.
+ */
+ write_lock_bh(&rx->recvmsg_lock);
+ l = rx->recvmsg_q.next;
+ call = list_entry(l, struct rxrpc_call, recvmsg_link);
+ if (!(flags & MSG_PEEK))
+ list_del_init(&call->recvmsg_link);
+ else
+ rxrpc_get_call(call, rxrpc_call_got);
+ write_unlock_bh(&rx->recvmsg_lock);
+
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
+
+ if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
BUG();
- case RXRPC_SKB_MARK_FINAL_ACK:
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code);
- break;
- case RXRPC_SKB_MARK_BUSY:
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code);
- break;
- case RXRPC_SKB_MARK_REMOTE_ABORT:
- abort_code = call->remote_abort;
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
- break;
- case RXRPC_SKB_MARK_LOCAL_ABORT:
- abort_code = call->local_abort;
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
- break;
- case RXRPC_SKB_MARK_NET_ERROR:
- _debug("RECV NET ERROR %d", sp->error);
- abort_code = sp->error;
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code);
+
+ if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
+ if (flags & MSG_CMSG_COMPAT) {
+ unsigned int id32 = call->user_call_ID;
+
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
+ sizeof(unsigned int), &id32);
+ } else {
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
+ sizeof(unsigned long),
+ &call->user_call_ID);
+ }
+ if (ret < 0)
+ goto error;
+ }
+
+ if (msg->msg_name) {
+ size_t len = sizeof(call->conn->params.peer->srx);
+ memcpy(msg->msg_name, &call->conn->params.peer->srx, len);
+ msg->msg_namelen = len;
+ }
+
+ switch (call->state) {
+ case RXRPC_CALL_SERVER_ACCEPTING:
+ ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
break;
- case RXRPC_SKB_MARK_LOCAL_ERROR:
- _debug("RECV LOCAL ERROR %d", sp->error);
- abort_code = sp->error;
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4,
- &abort_code);
+ case RXRPC_CALL_CLIENT_RECV_REPLY:
+ case RXRPC_CALL_SERVER_RECV_REQUEST:
+ case RXRPC_CALL_SERVER_ACK_REQUEST:
+ ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
+ flags, &copied);
+ if (ret == -EAGAIN)
+ ret = 0;
+
+ if (after(call->rx_top, call->rx_hard_ack) &&
+ call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
+ rxrpc_notify_socket(call);
break;
default:
- pr_err("Unknown packet mark %u\n", skb->mark);
- BUG();
+ ret = 0;
break;
}
if (ret < 0)
- goto copy_error;
-
-terminal_message:
- _debug("terminal");
- msg->msg_flags &= ~MSG_MORE;
- msg->msg_flags |= MSG_EOR;
+ goto error;
- if (!(flags & MSG_PEEK)) {
- _net("free terminal skb %p", skb);
- if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
- BUG();
- rxrpc_free_skb(skb);
- rxrpc_remove_user_ID(rx, call);
+ if (call->state == RXRPC_CALL_COMPLETE) {
+ ret = rxrpc_recvmsg_term(call, msg);
+ if (ret < 0)
+ goto error;
+ if (!(flags & MSG_PEEK))
+ rxrpc_release_call(rx, call);
+ msg->msg_flags |= MSG_EOR;
+ ret = 1;
}
- release_sock(&rx->sk);
- rxrpc_put_call(call);
- if (continue_call)
- rxrpc_put_call(continue_call);
- _leave(" = %d", ret);
- return ret;
+ if (ret == 0)
+ msg->msg_flags |= MSG_MORE;
+ else
+ msg->msg_flags &= ~MSG_MORE;
+ ret = copied;
-copy_error:
- _debug("copy error");
+error:
+ rxrpc_put_call(call, rxrpc_call_put);
+error_no_call:
release_sock(&rx->sk);
- rxrpc_put_call(call);
- if (continue_call)
- rxrpc_put_call(continue_call);
- _leave(" = %d", ret);
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
return ret;
wait_interrupted:
ret = sock_intr_errno(timeo);
wait_error:
finish_wait(sk_sleep(&rx->sk), &wait);
- if (continue_call)
- rxrpc_put_call(continue_call);
- if (copied)
- copied = ret;
- _leave(" = %d [waitfail %d]", copied, ret);
- return copied;
-
+ call = NULL;
+ goto error_no_call;
}
/**
- * rxrpc_kernel_is_data_last - Determine if data message is last one
- * @skb: Message holding data
+ * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
+ * @sock: The socket that the call exists on
+ * @call: The call to send data through
+ * @buf: The buffer to receive into
+ * @size: The size of the buffer, including data already read
+ * @_offset: The running offset into the buffer.
+ * @want_more: True if more data is expected to be read
+ * @_abort: Where the abort code is stored if -ECONNABORTED is returned
*
- * Determine if data message is last one for the parent call.
+ * Allow a kernel service to receive data and pick up information about the
+ * state of a call. Returns 0 if got what was asked for and there's more
+ * available, 1 if we got what was asked for and we're at the end of the data
+ * and -EAGAIN if we need more data.
+ *
+ * Note that we may return -EAGAIN to drain empty packets at the end of the
+ * data, even if we've already copied over the requested data.
+ *
+ * This function adds the amount it transfers to *_offset, so this should be
+ * precleared as appropriate. Note that the amount remaining in the buffer is
+ * taken to be size - *_offset.
+ *
+ * *_abort should also be initialised to 0.
*/
-bool rxrpc_kernel_is_data_last(struct sk_buff *skb)
+int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
+ void *buf, size_t size, size_t *_offset,
+ bool want_more, u32 *_abort)
{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct iov_iter iter;
+ struct kvec iov;
+ int ret;
- ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_DATA);
+ _enter("{%d,%s},%zu/%zu,%d",
+ call->debug_id, rxrpc_call_states[call->state],
+ *_offset, size, want_more);
- return sp->hdr.flags & RXRPC_LAST_PACKET;
-}
+ ASSERTCMP(*_offset, <=, size);
+ ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);
-EXPORT_SYMBOL(rxrpc_kernel_is_data_last);
+ iov.iov_base = buf + *_offset;
+ iov.iov_len = size - *_offset;
+ iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, size - *_offset);
-/**
- * rxrpc_kernel_get_abort_code - Get the abort code from an RxRPC abort message
- * @skb: Message indicating an abort
- *
- * Get the abort code from an RxRPC abort message.
- */
-u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb)
-{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ lock_sock(sock->sk);
+
+ switch (call->state) {
+ case RXRPC_CALL_CLIENT_RECV_REPLY:
+ case RXRPC_CALL_SERVER_RECV_REQUEST:
+ case RXRPC_CALL_SERVER_ACK_REQUEST:
+ ret = rxrpc_recvmsg_data(sock, call, NULL, &iter, size, 0,
+ _offset);
+ if (ret < 0)
+ goto out;
+
+ /* We can only reach here with a partially full buffer if we
+ * have reached the end of the data. We must otherwise have a
+ * full buffer or have been given -EAGAIN.
+ */
+ if (ret == 1) {
+ if (*_offset < size)
+ goto short_data;
+ if (!want_more)
+ goto read_phase_complete;
+ ret = 0;
+ goto out;
+ }
+
+ if (!want_more)
+ goto excess_data;
+ goto out;
+
+ case RXRPC_CALL_COMPLETE:
+ goto call_complete;
- switch (skb->mark) {
- case RXRPC_SKB_MARK_REMOTE_ABORT:
- return sp->call->remote_abort;
- case RXRPC_SKB_MARK_LOCAL_ABORT:
- return sp->call->local_abort;
default:
- BUG();
+ ret = -EINPROGRESS;
+ goto out;
}
-}
-
-EXPORT_SYMBOL(rxrpc_kernel_get_abort_code);
-/**
- * rxrpc_kernel_get_error - Get the error number from an RxRPC error message
- * @skb: Message indicating an error
- *
- * Get the error number from an RxRPC error message.
- */
-int rxrpc_kernel_get_error_number(struct sk_buff *skb)
-{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+read_phase_complete:
+ ret = 1;
+out:
+ release_sock(sock->sk);
+ _leave(" = %d [%zu,%d]", ret, *_offset, *_abort);
+ return ret;
- return sp->error;
+short_data:
+ ret = -EBADMSG;
+ goto out;
+excess_data:
+ ret = -EMSGSIZE;
+ goto out;
+call_complete:
+ *_abort = call->abort_code;
+ ret = call->error;
+ if (call->completion == RXRPC_CALL_SUCCEEDED) {
+ ret = 1;
+ if (size > 0)
+ ret = -ECONNRESET;
+ }
+ goto out;
}
-
-EXPORT_SYMBOL(rxrpc_kernel_get_error_number);
+EXPORT_SYMBOL(rxrpc_kernel_recv_data);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 63afa9e9cc08..627abed5f999 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -80,12 +80,10 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
case RXRPC_SECURITY_AUTH:
conn->size_align = 8;
conn->security_size = sizeof(struct rxkad_level1_hdr);
- conn->header_size += sizeof(struct rxkad_level1_hdr);
break;
case RXRPC_SECURITY_ENCRYPT:
conn->size_align = 8;
conn->security_size = sizeof(struct rxkad_level2_hdr);
- conn->header_size += sizeof(struct rxkad_level2_hdr);
break;
default:
ret = -EKEYREJECTED;
@@ -161,7 +159,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
_enter("");
- check = sp->hdr.seq ^ sp->hdr.callNumber;
+ check = sp->hdr.seq ^ call->call_id;
data_size |= (u32)check << 16;
hdr.data_size = htonl(data_size);
@@ -205,7 +203,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
_enter("");
- check = sp->hdr.seq ^ sp->hdr.callNumber;
+ check = sp->hdr.seq ^ call->call_id;
rxkhdr.data_size = htonl(data_size | (u32)check << 16);
rxkhdr.checksum = 0;
@@ -275,9 +273,9 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
/* calculate the security checksum */
- x = call->channel << (32 - RXRPC_CIDSHIFT);
+ x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
x |= sp->hdr.seq & 0x3fffffff;
- call->crypto_buf[0] = htonl(sp->hdr.callNumber);
+ call->crypto_buf[0] = htonl(call->call_id);
call->crypto_buf[1] = htonl(x);
sg_init_one(&sg, call->crypto_buf, 8);
@@ -316,12 +314,11 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
/*
* decrypt partial encryption on a packet (level 1 security)
*/
-static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
- struct sk_buff *skb,
- u32 *_abort_code)
+static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int offset, unsigned int len,
+ rxrpc_seq_t seq)
{
struct rxkad_level1_hdr sechdr;
- struct rxrpc_skb_priv *sp;
SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist sg[16];
@@ -332,15 +329,20 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
_enter("");
- sp = rxrpc_skb(skb);
+ if (len < 8) {
+ rxrpc_abort_call("V1H", call, seq, RXKADSEALEDINCON, EPROTO);
+ goto protocol_error;
+ }
- /* we want to decrypt the skbuff in-place */
+ /* Decrypt the skbuff in-place. TODO: We really want to decrypt
+ * directly into the target buffer.
+ */
nsg = skb_cow_data(skb, 0, &trailer);
if (nsg < 0 || nsg > 16)
goto nomem;
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, 8);
+ skb_to_sgvec(skb, sg, offset, 8);
/* start the decryption afresh */
memset(&iv, 0, sizeof(iv));
@@ -351,35 +353,35 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
- /* remove the decrypted packet length */
- if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
- goto datalen_error;
- if (!skb_pull(skb, sizeof(sechdr)))
- BUG();
+ /* Extract the decrypted packet length */
+ if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) {
+ rxrpc_abort_call("XV1", call, seq, RXKADDATALEN, EPROTO);
+ goto protocol_error;
+ }
+ offset += sizeof(sechdr);
+ len -= sizeof(sechdr);
buf = ntohl(sechdr.data_size);
data_size = buf & 0xffff;
check = buf >> 16;
- check ^= sp->hdr.seq ^ sp->hdr.callNumber;
+ check ^= seq ^ call->call_id;
check &= 0xffff;
if (check != 0) {
- *_abort_code = RXKADSEALEDINCON;
+ rxrpc_abort_call("V1C", call, seq, RXKADSEALEDINCON, EPROTO);
goto protocol_error;
}
- /* shorten the packet to remove the padding */
- if (data_size > skb->len)
- goto datalen_error;
- else if (data_size < skb->len)
- skb->len = data_size;
+ if (data_size > len) {
+ rxrpc_abort_call("V1L", call, seq, RXKADDATALEN, EPROTO);
+ goto protocol_error;
+ }
_leave(" = 0 [dlen=%x]", data_size);
return 0;
-datalen_error:
- *_abort_code = RXKADDATALEN;
protocol_error:
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
_leave(" = -EPROTO");
return -EPROTO;
@@ -391,13 +393,12 @@ nomem:
/*
* wholly decrypt a packet (level 2 security)
*/
-static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
- struct sk_buff *skb,
- u32 *_abort_code)
+static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int offset, unsigned int len,
+ rxrpc_seq_t seq)
{
const struct rxrpc_key_token *token;
struct rxkad_level2_hdr sechdr;
- struct rxrpc_skb_priv *sp;
SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist _sg[4], *sg;
@@ -408,9 +409,14 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
_enter(",{%d}", skb->len);
- sp = rxrpc_skb(skb);
+ if (len < 8) {
+ rxrpc_abort_call("V2H", call, seq, RXKADSEALEDINCON, EPROTO);
+ goto protocol_error;
+ }
- /* we want to decrypt the skbuff in-place */
+ /* Decrypt the skbuff in-place. TODO: We really want to decrypt
+ * directly into the target buffer.
+ */
nsg = skb_cow_data(skb, 0, &trailer);
if (nsg < 0)
goto nomem;
@@ -423,7 +429,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
}
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ skb_to_sgvec(skb, sg, offset, len);
/* decrypt from the session key */
token = call->conn->params.key->payload.data[0];
@@ -431,41 +437,41 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
skcipher_request_set_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg, sg, skb->len, iv.x);
+ skcipher_request_set_crypt(req, sg, sg, len, iv.x);
crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
if (sg != _sg)
kfree(sg);
- /* remove the decrypted packet length */
- if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
- goto datalen_error;
- if (!skb_pull(skb, sizeof(sechdr)))
- BUG();
+ /* Extract the decrypted packet length */
+ if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) {
+ rxrpc_abort_call("XV2", call, seq, RXKADDATALEN, EPROTO);
+ goto protocol_error;
+ }
+ offset += sizeof(sechdr);
+ len -= sizeof(sechdr);
buf = ntohl(sechdr.data_size);
data_size = buf & 0xffff;
check = buf >> 16;
- check ^= sp->hdr.seq ^ sp->hdr.callNumber;
+ check ^= seq ^ call->call_id;
check &= 0xffff;
if (check != 0) {
- *_abort_code = RXKADSEALEDINCON;
+ rxrpc_abort_call("V2C", call, seq, RXKADSEALEDINCON, EPROTO);
goto protocol_error;
}
- /* shorten the packet to remove the padding */
- if (data_size > skb->len)
- goto datalen_error;
- else if (data_size < skb->len)
- skb->len = data_size;
+ if (data_size > len) {
+ rxrpc_abort_call("V2L", call, seq, RXKADDATALEN, EPROTO);
+ goto protocol_error;
+ }
_leave(" = 0 [dlen=%x]", data_size);
return 0;
-datalen_error:
- *_abort_code = RXKADDATALEN;
protocol_error:
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
_leave(" = -EPROTO");
return -EPROTO;
@@ -475,40 +481,31 @@ nomem:
}
/*
- * verify the security on a received packet
+ * Verify the security on a received packet or subpacket (if part of a
+ * jumbo packet).
*/
-static int rxkad_verify_packet(struct rxrpc_call *call,
- struct sk_buff *skb,
- u32 *_abort_code)
+static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int offset, unsigned int len,
+ rxrpc_seq_t seq, u16 expected_cksum)
{
SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
- struct rxrpc_skb_priv *sp;
struct rxrpc_crypt iv;
struct scatterlist sg;
u16 cksum;
u32 x, y;
- int ret;
-
- sp = rxrpc_skb(skb);
_enter("{%d{%x}},{#%u}",
- call->debug_id, key_serial(call->conn->params.key), sp->hdr.seq);
+ call->debug_id, key_serial(call->conn->params.key), seq);
if (!call->conn->cipher)
return 0;
- if (sp->hdr.securityIndex != RXRPC_SECURITY_RXKAD) {
- *_abort_code = RXKADINCONSISTENCY;
- _leave(" = -EPROTO [not rxkad]");
- return -EPROTO;
- }
-
/* continue encrypting from where we left off */
memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
/* validate the security checksum */
- x = call->channel << (32 - RXRPC_CIDSHIFT);
- x |= sp->hdr.seq & 0x3fffffff;
+ x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
+ x |= seq & 0x3fffffff;
call->crypto_buf[0] = htonl(call->call_id);
call->crypto_buf[1] = htonl(x);
@@ -524,29 +521,69 @@ static int rxkad_verify_packet(struct rxrpc_call *call,
if (cksum == 0)
cksum = 1; /* zero checksums are not permitted */
- if (sp->hdr.cksum != cksum) {
- *_abort_code = RXKADSEALEDINCON;
+ if (cksum != expected_cksum) {
+ rxrpc_abort_call("VCK", call, seq, RXKADSEALEDINCON, EPROTO);
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
_leave(" = -EPROTO [csum failed]");
return -EPROTO;
}
switch (call->conn->params.security_level) {
case RXRPC_SECURITY_PLAIN:
- ret = 0;
- break;
+ return 0;
case RXRPC_SECURITY_AUTH:
- ret = rxkad_verify_packet_auth(call, skb, _abort_code);
- break;
+ return rxkad_verify_packet_1(call, skb, offset, len, seq);
case RXRPC_SECURITY_ENCRYPT:
- ret = rxkad_verify_packet_encrypt(call, skb, _abort_code);
- break;
+ return rxkad_verify_packet_2(call, skb, offset, len, seq);
default:
- ret = -ENOANO;
- break;
+ return -ENOANO;
}
+}
- _leave(" = %d", ret);
- return ret;
+/*
+ * Locate the data contained in a packet that was partially encrypted.
+ */
+static void rxkad_locate_data_1(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int *_offset, unsigned int *_len)
+{
+ struct rxkad_level1_hdr sechdr;
+
+ if (skb_copy_bits(skb, *_offset, &sechdr, sizeof(sechdr)) < 0)
+ BUG();
+ *_offset += sizeof(sechdr);
+ *_len = ntohl(sechdr.data_size) & 0xffff;
+}
+
+/*
+ * Locate the data contained in a packet that was completely encrypted.
+ */
+static void rxkad_locate_data_2(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int *_offset, unsigned int *_len)
+{
+ struct rxkad_level2_hdr sechdr;
+
+ if (skb_copy_bits(skb, *_offset, &sechdr, sizeof(sechdr)) < 0)
+ BUG();
+ *_offset += sizeof(sechdr);
+ *_len = ntohl(sechdr.data_size) & 0xffff;
+}
+
+/*
+ * Locate the data contained in an already decrypted packet.
+ */
+static void rxkad_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int *_offset, unsigned int *_len)
+{
+ switch (call->conn->params.security_level) {
+ case RXRPC_SECURITY_AUTH:
+ rxkad_locate_data_1(call, skb, _offset, _len);
+ return;
+ case RXRPC_SECURITY_ENCRYPT:
+ rxkad_locate_data_2(call, skb, _offset, _len);
+ return;
+ default:
+ return;
+ }
}
/*
@@ -716,7 +753,7 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
struct rxkad_challenge challenge;
struct rxkad_response resp
__attribute__((aligned(8))); /* must be aligned for crypto */
- struct rxrpc_skb_priv *sp;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
u32 version, nonce, min_level, abort_code;
int ret;
@@ -734,8 +771,8 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
}
abort_code = RXKADPACKETSHORT;
- sp = rxrpc_skb(skb);
- if (skb_copy_bits(skb, 0, &challenge, sizeof(challenge)) < 0)
+ if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+ &challenge, sizeof(challenge)) < 0)
goto protocol_error;
version = ntohl(challenge.version);
@@ -981,7 +1018,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
{
struct rxkad_response response
__attribute__((aligned(8))); /* must be aligned for crypto */
- struct rxrpc_skb_priv *sp;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_crypt session_key;
time_t expiry;
void *ticket;
@@ -992,7 +1029,8 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
_enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
abort_code = RXKADPACKETSHORT;
- if (skb_copy_bits(skb, 0, &response, sizeof(response)) < 0)
+ if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+ &response, sizeof(response)) < 0)
goto protocol_error;
if (!pskb_pull(skb, sizeof(response)))
BUG();
@@ -1000,7 +1038,6 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
version = ntohl(response.version);
ticket_len = ntohl(response.ticket_len);
kvno = ntohl(response.kvno);
- sp = rxrpc_skb(skb);
_proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }",
sp->hdr.serial, version, kvno, ticket_len);
@@ -1022,7 +1059,8 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
return -ENOMEM;
abort_code = RXKADPACKETSHORT;
- if (skb_copy_bits(skb, 0, ticket, ticket_len) < 0)
+ if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+ ticket, ticket_len) < 0)
goto protocol_error_free;
ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key,
@@ -1147,6 +1185,7 @@ const struct rxrpc_security rxkad = {
.prime_packet_security = rxkad_prime_packet_security,
.secure_packet = rxkad_secure_packet,
.verify_packet = rxkad_verify_packet,
+ .locate_data = rxkad_locate_data,
.issue_challenge = rxkad_issue_challenge,
.respond_to_challenge = rxkad_respond_to_challenge,
.verify_response = rxkad_verify_response,
diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
index 814d285ff802..7d921e56e715 100644
--- a/net/rxrpc/security.c
+++ b/net/rxrpc/security.c
@@ -130,20 +130,20 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
}
/* find the service */
- read_lock_bh(&local->services_lock);
- list_for_each_entry(rx, &local->services, listen_link) {
- if (rx->srx.srx_service == conn->params.service_id)
- goto found_service;
- }
+ read_lock(&local->services_lock);
+ rx = rcu_dereference_protected(local->service,
+ lockdep_is_held(&local->services_lock));
+ if (rx && rx->srx.srx_service == conn->params.service_id)
+ goto found_service;
/* the service appears to have died */
- read_unlock_bh(&local->services_lock);
+ read_unlock(&local->services_lock);
_leave(" = -ENOENT");
return -ENOENT;
found_service:
if (!rx->securities) {
- read_unlock_bh(&local->services_lock);
+ read_unlock(&local->services_lock);
_leave(" = -ENOKEY");
return -ENOKEY;
}
@@ -152,13 +152,13 @@ found_service:
kref = keyring_search(make_key_ref(rx->securities, 1UL),
&key_type_rxrpc_s, kdesc);
if (IS_ERR(kref)) {
- read_unlock_bh(&local->services_lock);
+ read_unlock(&local->services_lock);
_leave(" = %ld [search]", PTR_ERR(kref));
return PTR_ERR(kref);
}
key = key_ref_to_ptr(kref);
- read_unlock_bh(&local->services_lock);
+ read_unlock(&local->services_lock);
conn->server_key = key;
conn->security = sec;
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
new file mode 100644
index 000000000000..3322543d460a
--- /dev/null
+++ b/net/rxrpc/sendmsg.c
@@ -0,0 +1,606 @@
+/* AF_RXRPC sendmsg() implementation.
+ *
+ * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/net.h>
+#include <linux/gfp.h>
+#include <linux/skbuff.h>
+#include <linux/export.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+enum rxrpc_command {
+ RXRPC_CMD_SEND_DATA, /* send data message */
+ RXRPC_CMD_SEND_ABORT, /* request abort generation */
+ RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
+ RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
+};
+
+/*
+ * wait for space to appear in the transmit/ACK window
+ * - caller holds the socket locked
+ */
+static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
+ long *timeo)
+{
+ DECLARE_WAITQUEUE(myself, current);
+ int ret;
+
+ _enter(",{%u,%u,%u}",
+ call->tx_hard_ack, call->tx_top, call->tx_winsize);
+
+ add_wait_queue(&call->waitq, &myself);
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ ret = 0;
+ if (call->tx_top - call->tx_hard_ack <
+ min_t(unsigned int, call->tx_winsize,
+ call->cong_cwnd + call->cong_extra))
+ break;
+ if (call->state >= RXRPC_CALL_COMPLETE) {
+ ret = -call->error;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = sock_intr_errno(*timeo);
+ break;
+ }
+
+ trace_rxrpc_transmit(call, rxrpc_transmit_wait);
+ release_sock(&rx->sk);
+ *timeo = schedule_timeout(*timeo);
+ lock_sock(&rx->sk);
+ }
+
+ remove_wait_queue(&call->waitq, &myself);
+ set_current_state(TASK_RUNNING);
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Schedule an instant Tx resend.
+ */
+static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
+{
+ spin_lock_bh(&call->lock);
+
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
+ if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
+ rxrpc_queue_call(call);
+ }
+
+ spin_unlock_bh(&call->lock);
+}
+
+/*
+ * Queue a DATA packet for transmission, set the resend timeout and send the
+ * packet immediately
+ */
+static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
+ bool last)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ rxrpc_seq_t seq = sp->hdr.seq;
+ int ret, ix;
+ u8 annotation = RXRPC_TX_ANNO_UNACK;
+
+ _net("queue skb %p [%d]", skb, seq);
+
+ ASSERTCMP(seq, ==, call->tx_top + 1);
+
+ if (last)
+ annotation |= RXRPC_TX_ANNO_LAST;
+
+ /* We have to set the timestamp before queueing as the retransmit
+ * algorithm can see the packet as soon as we queue it.
+ */
+ skb->tstamp = ktime_get_real();
+
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ rxrpc_get_skb(skb, rxrpc_skb_tx_got);
+ call->rxtx_annotations[ix] = annotation;
+ smp_wmb();
+ call->rxtx_buffer[ix] = skb;
+ call->tx_top = seq;
+ if (last)
+ trace_rxrpc_transmit(call, rxrpc_transmit_queue_last);
+ else
+ trace_rxrpc_transmit(call, rxrpc_transmit_queue);
+
+ if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
+ _debug("________awaiting reply/ACK__________");
+ write_lock_bh(&call->state_lock);
+ switch (call->state) {
+ case RXRPC_CALL_CLIENT_SEND_REQUEST:
+ call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+ break;
+ case RXRPC_CALL_SERVER_ACK_REQUEST:
+ call->state = RXRPC_CALL_SERVER_SEND_REPLY;
+ if (!last)
+ break;
+ case RXRPC_CALL_SERVER_SEND_REPLY:
+ call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
+ break;
+ default:
+ break;
+ }
+ write_unlock_bh(&call->state_lock);
+ }
+
+ if (seq == 1 && rxrpc_is_client_call(call))
+ rxrpc_expose_client_call(call);
+
+ ret = rxrpc_send_data_packet(call, skb, false);
+ if (ret < 0) {
+ _debug("need instant resend %d", ret);
+ rxrpc_instant_resend(call, ix);
+ } else {
+ ktime_t now = ktime_get_real(), resend_at;
+
+ resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
+
+ if (ktime_before(resend_at, call->resend_at)) {
+ call->resend_at = resend_at;
+ rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
+ }
+ }
+
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ _leave("");
+}
+
+/*
+ * send data through a socket
+ * - must be called in process context
+ * - caller holds the socket locked
+ */
+static int rxrpc_send_data(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
+ struct msghdr *msg, size_t len)
+{
+ struct rxrpc_skb_priv *sp;
+ struct sk_buff *skb;
+ struct sock *sk = &rx->sk;
+ long timeo;
+ bool more;
+ int ret, copied;
+
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+
+ /* this should be in poll */
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+ return -EPIPE;
+
+ more = msg->msg_flags & MSG_MORE;
+
+ skb = call->tx_pending;
+ call->tx_pending = NULL;
+ rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
+
+ copied = 0;
+ do {
+ /* Check to see if there's a ping ACK to reply to. */
+ if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+
+ if (!skb) {
+ size_t size, chunk, max, space;
+
+ _debug("alloc");
+
+ if (call->tx_top - call->tx_hard_ack >=
+ min_t(unsigned int, call->tx_winsize,
+ call->cong_cwnd + call->cong_extra)) {
+ ret = -EAGAIN;
+ if (msg->msg_flags & MSG_DONTWAIT)
+ goto maybe_error;
+ ret = rxrpc_wait_for_tx_window(rx, call,
+ &timeo);
+ if (ret < 0)
+ goto maybe_error;
+ }
+
+ max = RXRPC_JUMBO_DATALEN;
+ max -= call->conn->security_size;
+ max &= ~(call->conn->size_align - 1UL);
+
+ chunk = max;
+ if (chunk > msg_data_left(msg) && !more)
+ chunk = msg_data_left(msg);
+
+ space = chunk + call->conn->size_align;
+ space &= ~(call->conn->size_align - 1UL);
+
+ size = space + call->conn->security_size;
+
+ _debug("SIZE: %zu/%zu/%zu", chunk, space, size);
+
+ /* create a buffer that we can retain until it's ACK'd */
+ skb = sock_alloc_send_skb(
+ sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
+ if (!skb)
+ goto maybe_error;
+
+ rxrpc_new_skb(skb, rxrpc_skb_tx_new);
+
+ _debug("ALLOC SEND %p", skb);
+
+ ASSERTCMP(skb->mark, ==, 0);
+
+ _debug("HS: %u", call->conn->security_size);
+ skb_reserve(skb, call->conn->security_size);
+ skb->len += call->conn->security_size;
+
+ sp = rxrpc_skb(skb);
+ sp->remain = chunk;
+ if (sp->remain > skb_tailroom(skb))
+ sp->remain = skb_tailroom(skb);
+
+ _net("skb: hr %d, tr %d, hl %d, rm %d",
+ skb_headroom(skb),
+ skb_tailroom(skb),
+ skb_headlen(skb),
+ sp->remain);
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ _debug("append");
+ sp = rxrpc_skb(skb);
+
+ /* append next segment of data to the current buffer */
+ if (msg_data_left(msg) > 0) {
+ int copy = skb_tailroom(skb);
+ ASSERTCMP(copy, >, 0);
+ if (copy > msg_data_left(msg))
+ copy = msg_data_left(msg);
+ if (copy > sp->remain)
+ copy = sp->remain;
+
+ _debug("add");
+ ret = skb_add_data(skb, &msg->msg_iter, copy);
+ _debug("added");
+ if (ret < 0)
+ goto efault;
+ sp->remain -= copy;
+ skb->mark += copy;
+ copied += copy;
+ }
+
+ /* check for the far side aborting the call or a network error
+ * occurring */
+ if (call->state == RXRPC_CALL_COMPLETE)
+ goto call_terminated;
+
+ /* add the packet to the send queue if it's now full */
+ if (sp->remain <= 0 ||
+ (msg_data_left(msg) == 0 && !more)) {
+ struct rxrpc_connection *conn = call->conn;
+ uint32_t seq;
+ size_t pad;
+
+ /* pad out if we're using security */
+ if (conn->security_ix) {
+ pad = conn->security_size + skb->mark;
+ pad = conn->size_align - pad;
+ pad &= conn->size_align - 1;
+ _debug("pad %zu", pad);
+ if (pad)
+ memset(skb_put(skb, pad), 0, pad);
+ }
+
+ seq = call->tx_top + 1;
+
+ sp->hdr.seq = seq;
+ sp->hdr._rsvd = 0;
+ sp->hdr.flags = conn->out_clientflag;
+
+ if (msg_data_left(msg) == 0 && !more)
+ sp->hdr.flags |= RXRPC_LAST_PACKET;
+ else if (call->tx_top - call->tx_hard_ack <
+ call->tx_winsize)
+ sp->hdr.flags |= RXRPC_MORE_PACKETS;
+
+ ret = conn->security->secure_packet(
+ call, skb, skb->mark, skb->head);
+ if (ret < 0)
+ goto out;
+
+ rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more);
+ skb = NULL;
+ }
+ } while (msg_data_left(msg) > 0);
+
+success:
+ ret = copied;
+out:
+ call->tx_pending = skb;
+ _leave(" = %d", ret);
+ return ret;
+
+call_terminated:
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ _leave(" = %d", -call->error);
+ return -call->error;
+
+maybe_error:
+ if (copied)
+ goto success;
+ goto out;
+
+efault:
+ ret = -EFAULT;
+ goto out;
+}
+
+/*
+ * extract control messages from the sendmsg() control buffer
+ */
+static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
+ unsigned long *user_call_ID,
+ enum rxrpc_command *command,
+ u32 *abort_code,
+ bool *_exclusive)
+{
+ struct cmsghdr *cmsg;
+ bool got_user_ID = false;
+ int len;
+
+ *command = RXRPC_CMD_SEND_DATA;
+
+ if (msg->msg_controllen == 0)
+ return -EINVAL;
+
+ for_each_cmsghdr(cmsg, msg) {
+ if (!CMSG_OK(msg, cmsg))
+ return -EINVAL;
+
+ len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
+ _debug("CMSG %d, %d, %d",
+ cmsg->cmsg_level, cmsg->cmsg_type, len);
+
+ if (cmsg->cmsg_level != SOL_RXRPC)
+ continue;
+
+ switch (cmsg->cmsg_type) {
+ case RXRPC_USER_CALL_ID:
+ if (msg->msg_flags & MSG_CMSG_COMPAT) {
+ if (len != sizeof(u32))
+ return -EINVAL;
+ *user_call_ID = *(u32 *) CMSG_DATA(cmsg);
+ } else {
+ if (len != sizeof(unsigned long))
+ return -EINVAL;
+ *user_call_ID = *(unsigned long *)
+ CMSG_DATA(cmsg);
+ }
+ _debug("User Call ID %lx", *user_call_ID);
+ got_user_ID = true;
+ break;
+
+ case RXRPC_ABORT:
+ if (*command != RXRPC_CMD_SEND_DATA)
+ return -EINVAL;
+ *command = RXRPC_CMD_SEND_ABORT;
+ if (len != sizeof(*abort_code))
+ return -EINVAL;
+ *abort_code = *(unsigned int *) CMSG_DATA(cmsg);
+ _debug("Abort %x", *abort_code);
+ if (*abort_code == 0)
+ return -EINVAL;
+ break;
+
+ case RXRPC_ACCEPT:
+ if (*command != RXRPC_CMD_SEND_DATA)
+ return -EINVAL;
+ *command = RXRPC_CMD_ACCEPT;
+ if (len != 0)
+ return -EINVAL;
+ break;
+
+ case RXRPC_EXCLUSIVE_CALL:
+ *_exclusive = true;
+ if (len != 0)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!got_user_ID)
+ return -EINVAL;
+ _leave(" = 0");
+ return 0;
+}
+
+/*
+ * Create a new client call for sendmsg().
+ */
+static struct rxrpc_call *
+rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
+ unsigned long user_call_ID, bool exclusive)
+{
+ struct rxrpc_conn_parameters cp;
+ struct rxrpc_call *call;
+ struct key *key;
+
+ DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name);
+
+ _enter("");
+
+ if (!msg->msg_name)
+ return ERR_PTR(-EDESTADDRREQ);
+
+ key = rx->key;
+ if (key && !rx->key->payload.data[0])
+ key = NULL;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.local = rx->local;
+ cp.key = rx->key;
+ cp.security_level = rx->min_sec_level;
+ cp.exclusive = rx->exclusive | exclusive;
+ cp.service_id = srx->srx_service;
+ call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL);
+
+ _leave(" = %p\n", call);
+ return call;
+}
+
+/*
+ * send a message forming part of a client call through an RxRPC socket
+ * - caller holds the socket locked
+ * - the socket may be either a client socket or a server socket
+ */
+int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+{
+ enum rxrpc_command cmd;
+ struct rxrpc_call *call;
+ unsigned long user_call_ID = 0;
+ bool exclusive = false;
+ u32 abort_code = 0;
+ int ret;
+
+ _enter("");
+
+ ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code,
+ &exclusive);
+ if (ret < 0)
+ return ret;
+
+ if (cmd == RXRPC_CMD_ACCEPT) {
+ if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
+ return -EINVAL;
+ call = rxrpc_accept_call(rx, user_call_ID, NULL);
+ if (IS_ERR(call))
+ return PTR_ERR(call);
+ rxrpc_put_call(call, rxrpc_call_put);
+ return 0;
+ }
+
+ call = rxrpc_find_call_by_user_ID(rx, user_call_ID);
+ if (!call) {
+ if (cmd != RXRPC_CMD_SEND_DATA)
+ return -EBADSLT;
+ call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID,
+ exclusive);
+ if (IS_ERR(call))
+ return PTR_ERR(call);
+ }
+
+ _debug("CALL %d USR %lx ST %d on CONN %p",
+ call->debug_id, call->user_call_ID, call->state, call->conn);
+
+ if (call->state >= RXRPC_CALL_COMPLETE) {
+ /* it's too late for this call */
+ ret = -ESHUTDOWN;
+ } else if (cmd == RXRPC_CMD_SEND_ABORT) {
+ ret = 0;
+ if (rxrpc_abort_call("CMD", call, 0, abort_code, ECONNABORTED))
+ ret = rxrpc_send_call_packet(call,
+ RXRPC_PACKET_TYPE_ABORT);
+ } else if (cmd != RXRPC_CMD_SEND_DATA) {
+ ret = -EINVAL;
+ } else if (rxrpc_is_client_call(call) &&
+ call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
+ /* request phase complete for this client call */
+ ret = -EPROTO;
+ } else if (rxrpc_is_service_call(call) &&
+ call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
+ call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
+ /* Reply phase not begun or not complete for service call. */
+ ret = -EPROTO;
+ } else {
+ ret = rxrpc_send_data(rx, call, msg, len);
+ }
+
+ rxrpc_put_call(call, rxrpc_call_put);
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/**
+ * rxrpc_kernel_send_data - Allow a kernel service to send data on a call
+ * @sock: The socket the call is on
+ * @call: The call to send data through
+ * @msg: The data to send
+ * @len: The amount of data to send
+ *
+ * Allow a kernel service to send data on a call. The call must be in an state
+ * appropriate to sending data. No control data should be supplied in @msg,
+ * nor should an address be supplied. MSG_MORE should be flagged if there's
+ * more data to come, otherwise this data will end the transmission phase.
+ */
+int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
+ struct msghdr *msg, size_t len)
+{
+ int ret;
+
+ _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
+
+ ASSERTCMP(msg->msg_name, ==, NULL);
+ ASSERTCMP(msg->msg_control, ==, NULL);
+
+ lock_sock(sock->sk);
+
+ _debug("CALL %d USR %lx ST %d on CONN %p",
+ call->debug_id, call->user_call_ID, call->state, call->conn);
+
+ if (call->state >= RXRPC_CALL_COMPLETE) {
+ ret = -ESHUTDOWN; /* it's too late for this call */
+ } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
+ call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
+ call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
+ ret = -EPROTO; /* request phase complete for this client call */
+ } else {
+ ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);
+ }
+
+ release_sock(sock->sk);
+ _leave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(rxrpc_kernel_send_data);
+
+/**
+ * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
+ * @sock: The socket the call is on
+ * @call: The call to be aborted
+ * @abort_code: The abort code to stick into the ABORT packet
+ * @error: Local error value
+ * @why: 3-char string indicating why.
+ *
+ * Allow a kernel service to abort a call, if it's still in an abortable state.
+ */
+void rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
+ u32 abort_code, int error, const char *why)
+{
+ _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
+
+ lock_sock(sock->sk);
+
+ if (rxrpc_abort_call(why, call, 0, abort_code, error))
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+
+ release_sock(sock->sk);
+ _leave("");
+}
+
+EXPORT_SYMBOL(rxrpc_kernel_abort_call);
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index 06c51d4b622d..67b02c45271b 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -18,148 +18,82 @@
#include <net/af_rxrpc.h>
#include "ar-internal.h"
+#define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
+
/*
- * set up for the ACK at the end of the receive phase when we discard the final
- * receive phase data packet
- * - called with softirqs disabled
+ * Note the allocation or reception of a socket buffer.
*/
-static void rxrpc_request_final_ACK(struct rxrpc_call *call)
+void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
- /* the call may be aborted before we have a chance to ACK it */
- write_lock(&call->state_lock);
-
- switch (call->state) {
- case RXRPC_CALL_CLIENT_RECV_REPLY:
- call->state = RXRPC_CALL_CLIENT_FINAL_ACK;
- _debug("request final ACK");
-
- /* get an extra ref on the call for the final-ACK generator to
- * release */
- rxrpc_get_call(call);
- set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
- if (try_to_del_timer_sync(&call->ack_timer) >= 0)
- rxrpc_queue_call(call);
- break;
-
- case RXRPC_CALL_SERVER_RECV_REQUEST:
- call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
- default:
- break;
- }
-
- write_unlock(&call->state_lock);
+ const void *here = __builtin_return_address(0);
+ int n = atomic_inc_return(select_skb_count(op));
+ trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
}
/*
- * drop the bottom ACK off of the call ACK window and advance the window
+ * Note the re-emergence of a socket buffer from a queue or buffer.
*/
-static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
- struct rxrpc_skb_priv *sp)
+void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
- int loop;
- u32 seq;
-
- spin_lock_bh(&call->lock);
-
- _debug("hard ACK #%u", sp->hdr.seq);
-
- for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
- call->ackr_window[loop] >>= 1;
- call->ackr_window[loop] |=
- call->ackr_window[loop + 1] << (BITS_PER_LONG - 1);
- }
-
- seq = sp->hdr.seq;
- ASSERTCMP(seq, ==, call->rx_data_eaten + 1);
- call->rx_data_eaten = seq;
-
- if (call->ackr_win_top < UINT_MAX)
- call->ackr_win_top++;
-
- ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
- call->rx_data_post, >=, call->rx_data_recv);
- ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
- call->rx_data_recv, >=, call->rx_data_eaten);
-
- if (sp->hdr.flags & RXRPC_LAST_PACKET) {
- rxrpc_request_final_ACK(call);
- } else if (atomic_dec_and_test(&call->ackr_not_idle) &&
- test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) {
- /* We previously soft-ACK'd some received packets that have now
- * been consumed, so send a hard-ACK if no more packets are
- * immediately forthcoming to allow the transmitter to free up
- * its Tx bufferage.
- */
- _debug("send Rx idle ACK");
- __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial,
- false);
+ const void *here = __builtin_return_address(0);
+ if (skb) {
+ int n = atomic_read(select_skb_count(op));
+ trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
}
-
- spin_unlock_bh(&call->lock);
}
-/**
- * rxrpc_kernel_data_consumed - Record consumption of data message
- * @call: The call to which the message pertains.
- * @skb: Message holding data
- *
- * Record the consumption of a data message and generate an ACK if appropriate.
- * The call state is shifted if this was the final packet. The caller must be
- * in process context with no spinlocks held.
- *
- * TODO: Actually generate the ACK here rather than punting this to the
- * workqueue.
+/*
+ * Note the addition of a ref on a socket buffer.
*/
-void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb)
+void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-
- _enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq);
-
- ASSERTCMP(sp->call, ==, call);
- ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA);
-
- /* TODO: Fix the sequence number tracking */
- ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
- ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
- ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
-
- call->rx_data_recv = sp->hdr.seq;
- rxrpc_hard_ACK_data(call, sp);
+ const void *here = __builtin_return_address(0);
+ int n = atomic_inc_return(select_skb_count(op));
+ trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+ skb_get(skb);
}
-EXPORT_SYMBOL(rxrpc_kernel_data_consumed);
/*
- * Destroy a packet that has an RxRPC control buffer
+ * Note the destruction of a socket buffer.
*/
-void rxrpc_packet_destructor(struct sk_buff *skb)
+void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_call *call = sp->call;
-
- _enter("%p{%p}", skb, call);
-
- if (call) {
- if (atomic_dec_return(&call->skb_count) < 0)
- BUG();
- rxrpc_put_call(call);
- sp->call = NULL;
+ const void *here = __builtin_return_address(0);
+ if (skb) {
+ int n;
+ CHECK_SLAB_OKAY(&skb->users);
+ n = atomic_dec_return(select_skb_count(op));
+ trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+ kfree_skb(skb);
}
+}
- if (skb->sk)
- sock_rfree(skb);
- _leave("");
+/*
+ * Note the injected loss of a socket buffer.
+ */
+void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
+{
+ const void *here = __builtin_return_address(0);
+ if (skb) {
+ int n;
+ CHECK_SLAB_OKAY(&skb->users);
+ n = atomic_dec_return(select_skb_count(op));
+ trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+ kfree_skb(skb);
+ }
}
-/**
- * rxrpc_kernel_free_skb - Free an RxRPC socket buffer
- * @skb: The socket buffer to be freed
- *
- * Let RxRPC free its own socket buffer, permitting it to maintain debug
- * accounting.
+/*
+ * Clear a queue of socket buffers.
*/
-void rxrpc_kernel_free_skb(struct sk_buff *skb)
+void rxrpc_purge_queue(struct sk_buff_head *list)
{
- rxrpc_free_skb(skb);
+ const void *here = __builtin_return_address(0);
+ struct sk_buff *skb;
+ while ((skb = skb_dequeue((list))) != NULL) {
+ int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
+ trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
+ atomic_read(&skb->users), n, here);
+ kfree_skb(skb);
+ }
}
-EXPORT_SYMBOL(rxrpc_kernel_free_skb);
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index 03ad08774d4e..34c706d2f79c 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -20,7 +20,7 @@ static const unsigned int one = 1;
static const unsigned int four = 4;
static const unsigned int thirtytwo = 32;
static const unsigned int n_65535 = 65535;
-static const unsigned int n_max_acks = RXRPC_MAXACKS;
+static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
/*
* RxRPC operating parameters.
@@ -35,7 +35,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.data = &rxrpc_requested_ack_delay,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
+ .proc_handler = proc_dointvec,
.extra1 = (void *)&zero,
},
{
@@ -43,7 +43,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.data = &rxrpc_soft_ack_delay,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
+ .proc_handler = proc_dointvec,
.extra1 = (void *)&one,
},
{
@@ -51,7 +51,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.data = &rxrpc_idle_ack_delay,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
+ .proc_handler = proc_dointvec,
.extra1 = (void *)&one,
},
{
@@ -59,6 +59,22 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.data = &rxrpc_resend_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = (void *)&one,
+ },
+ {
+ .procname = "idle_conn_expiry",
+ .data = &rxrpc_conn_idle_client_expiry,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ .extra1 = (void *)&one,
+ },
+ {
+ .procname = "idle_conn_fast_expiry",
+ .data = &rxrpc_conn_idle_client_fast_expiry,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
.extra1 = (void *)&one,
},
@@ -69,29 +85,28 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.data = &rxrpc_max_call_lifetime,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .proc_handler = proc_dointvec,
.extra1 = (void *)&one,
},
+
+ /* Non-time values */
{
- .procname = "dead_call_expiry",
- .data = &rxrpc_dead_call_expiry,
+ .procname = "max_client_conns",
+ .data = &rxrpc_max_client_connections,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- .extra1 = (void *)&one,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *)&rxrpc_reap_client_connections,
},
-
- /* Values measured in seconds */
{
- .procname = "connection_expiry",
- .data = &rxrpc_connection_expiry,
+ .procname = "reap_client_conns",
+ .data = &rxrpc_reap_client_connections,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void *)&one,
+ .extra2 = (void *)&rxrpc_max_client_connections,
},
-
- /* Non-time values */
{
.procname = "max_backlog",
.data = &rxrpc_max_backlog,
diff --git a/net/rxrpc/utils.c b/net/rxrpc/utils.c
index b88914d53ca5..ff7af71c4b49 100644
--- a/net/rxrpc/utils.c
+++ b/net/rxrpc/utils.c
@@ -30,6 +30,7 @@ int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb)
srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
return 0;
+#ifdef CONFIG_AF_RXRPC_IPV6
case ETH_P_IPV6:
srx->transport_type = SOCK_DGRAM;
srx->transport_len = sizeof(srx->transport.sin6);
@@ -37,6 +38,7 @@ int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb)
srx->transport.sin6.sin6_port = udp_hdr(skb)->source;
srx->transport.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
return 0;
+#endif
default:
pr_warn_ratelimited("AF_RXRPC: Unknown eth protocol %u\n",
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index ccf931b3b94c..87956a768d1b 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -749,6 +749,17 @@ config NET_ACT_CONNMARK
To compile this code as a module, choose M here: the
module will be called act_connmark.
+config NET_ACT_SKBMOD
+ tristate "skb data modification action"
+ depends on NET_CLS_ACT
+ ---help---
+ Say Y here to allow modification of skb data
+
+ If unsure, say N.
+
+ To compile this code as a module, choose M here: the
+ module will be called act_skbmod.
+
config NET_ACT_IFE
tristate "Inter-FE action based on IETF ForCES InterFE LFB"
depends on NET_CLS_ACT
@@ -761,6 +772,17 @@ config NET_ACT_IFE
To compile this code as a module, choose M here: the
module will be called act_ife.
+config NET_ACT_TUNNEL_KEY
+ tristate "IP tunnel metadata manipulation"
+ depends on NET_CLS_ACT
+ ---help---
+ Say Y here to set/release ip tunnel metadata.
+
+ If unsure, say N.
+
+ To compile this code as a module, choose M here: the
+ module will be called act_tunnel_key.
+
config NET_IFE_SKBMARK
tristate "Support to encoding decoding skb mark on IFE action"
depends on NET_ACT_IFE
@@ -771,6 +793,11 @@ config NET_IFE_SKBPRIO
depends on NET_ACT_IFE
---help---
+config NET_IFE_SKBTCINDEX
+ tristate "Support to encoding decoding skb tcindex on IFE action"
+ depends on NET_ACT_IFE
+ ---help---
+
config NET_CLS_IND
bool "Incoming device classification"
depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index ae088a5a9d95..4bdda3634e0b 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -19,9 +19,12 @@ obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
obj-$(CONFIG_NET_ACT_VLAN) += act_vlan.o
obj-$(CONFIG_NET_ACT_BPF) += act_bpf.o
obj-$(CONFIG_NET_ACT_CONNMARK) += act_connmark.o
+obj-$(CONFIG_NET_ACT_SKBMOD) += act_skbmod.o
obj-$(CONFIG_NET_ACT_IFE) += act_ife.o
obj-$(CONFIG_NET_IFE_SKBMARK) += act_meta_mark.o
obj-$(CONFIG_NET_IFE_SKBPRIO) += act_meta_skbprio.o
+obj-$(CONFIG_NET_IFE_SKBTCINDEX) += act_meta_skbtcindex.o
+obj-$(CONFIG_NET_ACT_TUNNEL_KEY)+= act_tunnel_key.o
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index d09d0687594b..c9102172ce3b 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -592,9 +592,19 @@ err_out:
return ERR_PTR(err);
}
-int tcf_action_init(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *name, int ovr,
- int bind, struct list_head *actions)
+static void cleanup_a(struct list_head *actions, int ovr)
+{
+ struct tc_action *a;
+
+ if (!ovr)
+ return;
+
+ list_for_each_entry(a, actions, list)
+ a->tcfa_refcnt--;
+}
+
+int tcf_action_init(struct net *net, struct nlattr *nla, struct nlattr *est,
+ char *name, int ovr, int bind, struct list_head *actions)
{
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *act;
@@ -612,8 +622,15 @@ int tcf_action_init(struct net *net, struct nlattr *nla,
goto err;
}
act->order = i;
+ if (ovr)
+ act->tcfa_refcnt++;
list_add_tail(&act->list, actions);
}
+
+ /* Remove the temp refcnt which was necessary to protect against
+ * destroying an existing action which was being replaced
+ */
+ cleanup_a(actions, ovr);
return 0;
err:
@@ -883,6 +900,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
goto err;
}
act->order = i;
+ if (event == RTM_GETACTION)
+ act->tcfa_refcnt++;
list_add_tail(&act->list, &actions);
}
@@ -923,9 +942,8 @@ tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
return err;
}
-static int
-tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
- u32 portid, int ovr)
+static int tcf_action_add(struct net *net, struct nlattr *nla,
+ struct nlmsghdr *n, u32 portid, int ovr)
{
int ret = 0;
LIST_HEAD(actions);
@@ -988,8 +1006,7 @@ replay:
return ret;
}
-static struct nlattr *
-find_dump_kind(const struct nlmsghdr *n)
+static struct nlattr *find_dump_kind(const struct nlmsghdr *n)
{
struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
@@ -1016,8 +1033,7 @@ find_dump_kind(const struct nlmsghdr *n)
return kind;
}
-static int
-tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
+static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct nlmsghdr *nlh;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index bfa870731e74..1d3960033f61 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -39,13 +39,10 @@ static struct tc_action_ops act_bpf_ops;
static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
struct tcf_result *res)
{
+ bool at_ingress = skb_at_tc_ingress(skb);
struct tcf_bpf *prog = to_bpf(act);
struct bpf_prog *filter;
int action, filter_res;
- bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
-
- if (unlikely(!skb_mac_header_was_set(skb)))
- return TC_ACT_UNSPEC;
tcf_lastuse_update(&prog->tcf_tm);
bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index b5dbf633a863..e0defcef376d 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -116,8 +116,8 @@ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
return (void *)(skb_network_header(skb) + ihl);
}
-static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl)
{
struct icmphdr *icmph;
@@ -152,8 +152,8 @@ static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
return 1;
}
-static int tcf_csum_ipv6_icmp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl)
{
struct icmp6hdr *icmp6h;
const struct ipv6hdr *ip6h;
@@ -174,8 +174,8 @@ static int tcf_csum_ipv6_icmp(struct sk_buff *skb,
return 1;
}
-static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl)
{
struct tcphdr *tcph;
const struct iphdr *iph;
@@ -195,8 +195,8 @@ static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
return 1;
}
-static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl)
{
struct tcphdr *tcph;
const struct ipv6hdr *ip6h;
@@ -217,8 +217,8 @@ static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
return 1;
}
-static int tcf_csum_ipv4_udp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl, int udplite)
+static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl, int udplite)
{
struct udphdr *udph;
const struct iphdr *iph;
@@ -270,8 +270,8 @@ ignore_obscure_skb:
return 1;
}
-static int tcf_csum_ipv6_udp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl, int udplite)
+static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl, int udplite)
{
struct udphdr *udph;
const struct ipv6hdr *ip6h;
@@ -380,8 +380,8 @@ fail:
return 0;
}
-static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
- unsigned int ixhl, unsigned int *pl)
+static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
+ unsigned int *pl)
{
int off, len, optlen;
unsigned char *xh = (void *)ip6xh;
@@ -494,8 +494,8 @@ fail:
return 0;
}
-static int tcf_csum(struct sk_buff *skb,
- const struct tc_action *a, struct tcf_result *res)
+static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
struct tcf_csum *p = to_tcf_csum(a);
int action;
@@ -531,8 +531,8 @@ drop:
return TC_ACT_SHOT;
}
-static int tcf_csum_dump(struct sk_buff *skb,
- struct tc_action *a, int bind, int ref)
+static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_csum *p = to_tcf_csum(a);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index e24a4093d6f6..e0aa30f83c6c 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -156,7 +156,8 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
int action = READ_ONCE(gact->tcf_action);
struct tcf_t *tm = &gact->tcf_tm;
- _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes, packets);
+ _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes,
+ packets);
if (action == TC_ACT_SHOT)
this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 4a60cd5e1875..95c463cbb9a6 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -63,6 +63,23 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
}
EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
+int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
+{
+ u16 edata = 0;
+
+ if (mi->metaval)
+ edata = *(u16 *)mi->metaval;
+ else if (metaval)
+ edata = metaval;
+
+ if (!edata) /* will not encode */
+ return 0;
+
+ edata = htons(edata);
+ return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata);
+}
+EXPORT_SYMBOL_GPL(ife_encode_meta_u16);
+
int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
{
if (mi->metaval)
@@ -81,6 +98,15 @@ int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
}
EXPORT_SYMBOL_GPL(ife_check_meta_u32);
+int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi)
+{
+ if (metaval || mi->metaval)
+ return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ife_check_meta_u16);
+
int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
{
u32 edata = metaval;
diff --git a/net/sched/act_meta_skbtcindex.c b/net/sched/act_meta_skbtcindex.c
new file mode 100644
index 000000000000..3b35774ce890
--- /dev/null
+++ b/net/sched/act_meta_skbtcindex.c
@@ -0,0 +1,79 @@
+/*
+ * net/sched/act_meta_tc_index.c IFE skb->tc_index metadata module
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * copyright Jamal Hadi Salim (2016)
+ *
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <uapi/linux/tc_act/tc_ife.h>
+#include <net/tc_act/tc_ife.h>
+#include <linux/rtnetlink.h>
+
+static int skbtcindex_encode(struct sk_buff *skb, void *skbdata,
+ struct tcf_meta_info *e)
+{
+ u32 ifetc_index = skb->tc_index;
+
+ return ife_encode_meta_u16(ifetc_index, skbdata, e);
+}
+
+static int skbtcindex_decode(struct sk_buff *skb, void *data, u16 len)
+{
+ u16 ifetc_index = *(u16 *)data;
+
+ skb->tc_index = ntohs(ifetc_index);
+ return 0;
+}
+
+static int skbtcindex_check(struct sk_buff *skb, struct tcf_meta_info *e)
+{
+ return ife_check_meta_u16(skb->tc_index, e);
+}
+
+static struct tcf_meta_ops ife_skbtcindex_ops = {
+ .metaid = IFE_META_TCINDEX,
+ .metatype = NLA_U16,
+ .name = "tc_index",
+ .synopsis = "skb tc_index 16 bit metadata",
+ .check_presence = skbtcindex_check,
+ .encode = skbtcindex_encode,
+ .decode = skbtcindex_decode,
+ .get = ife_get_meta_u16,
+ .alloc = ife_alloc_meta_u16,
+ .release = ife_release_meta_gen,
+ .validate = ife_validate_meta_u16,
+ .owner = THIS_MODULE,
+};
+
+static int __init ifetc_index_init_module(void)
+{
+ return register_ife_op(&ife_skbtcindex_ops);
+}
+
+static void __exit ifetc_index_cleanup_module(void)
+{
+ unregister_ife_op(&ife_skbtcindex_ops);
+}
+
+module_init(ifetc_index_init_module);
+module_exit(ifetc_index_cleanup_module);
+
+MODULE_AUTHOR("Jamal Hadi Salim(2016)");
+MODULE_DESCRIPTION("Inter-FE skb tc_index metadata module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_IFE_META(IFE_META_SKBTCINDEX);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 6038c85d92f5..667dc382df82 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -204,7 +204,15 @@ out:
return retval;
}
-static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
+ u64 lastuse)
+{
+ tcf_lastuse_update(&a->tcfa_tm);
+ _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
+}
+
+static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_mirred *m = to_mirred(a);
@@ -280,6 +288,7 @@ static struct tc_action_ops act_mirred_ops = {
.type = TCA_ACT_MIRRED,
.owner = THIS_MODULE,
.act = tcf_mirred,
+ .stats_update = tcf_stats_update,
.dump = tcf_mirred_dump,
.cleanup = tcf_mirred_release,
.init = tcf_mirred_init,
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 8a3be1d99775..d1bd248fe146 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -249,6 +249,8 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
police->tcfp_t_c = now;
police->tcfp_toks = toks;
police->tcfp_ptoks = ptoks;
+ if (police->tcfp_result == TC_ACT_SHOT)
+ police->tcf_qstats.drops++;
spin_unlock(&police->tcf_lock);
return police->tcfp_result;
}
@@ -261,8 +263,8 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
return police->tcf_action;
}
-static int
-tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_police *police = to_police(a);
@@ -347,14 +349,12 @@ static struct pernet_operations police_net_ops = {
.size = sizeof(struct tc_action_net),
};
-static int __init
-police_init_module(void)
+static int __init police_init_module(void)
{
return tcf_register_action(&act_police_ops, &police_net_ops);
}
-static void __exit
-police_cleanup_module(void)
+static void __exit police_cleanup_module(void)
{
tcf_unregister_action(&act_police_ops, &police_net_ops);
}
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
new file mode 100644
index 000000000000..e7d96381c908
--- /dev/null
+++ b/net/sched/act_skbmod.c
@@ -0,0 +1,301 @@
+/*
+ * net/sched/act_skbmod.c skb data modifier
+ *
+ * Copyright (c) 2016 Jamal Hadi Salim <jhs@mojatatu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+
+#include <linux/tc_act/tc_skbmod.h>
+#include <net/tc_act/tc_skbmod.h>
+
+#define SKBMOD_TAB_MASK 15
+
+static int skbmod_net_id;
+static struct tc_action_ops act_skbmod_ops;
+
+#define MAX_EDIT_LEN ETH_HLEN
+static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ struct tcf_skbmod *d = to_skbmod(a);
+ int action;
+ struct tcf_skbmod_params *p;
+ u64 flags;
+ int err;
+
+ tcf_lastuse_update(&d->tcf_tm);
+ bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
+
+ /* XXX: if you are going to edit more fields beyond ethernet header
+ * (example when you add IP header replacement or vlan swap)
+ * then MAX_EDIT_LEN needs to change appropriately
+ */
+ err = skb_ensure_writable(skb, MAX_EDIT_LEN);
+ if (unlikely(err)) { /* best policy is to drop on the floor */
+ qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
+ return TC_ACT_SHOT;
+ }
+
+ rcu_read_lock();
+ action = READ_ONCE(d->tcf_action);
+ if (unlikely(action == TC_ACT_SHOT)) {
+ qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
+ rcu_read_unlock();
+ return action;
+ }
+
+ p = rcu_dereference(d->skbmod_p);
+ flags = p->flags;
+ if (flags & SKBMOD_F_DMAC)
+ ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst);
+ if (flags & SKBMOD_F_SMAC)
+ ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src);
+ if (flags & SKBMOD_F_ETYPE)
+ eth_hdr(skb)->h_proto = p->eth_type;
+ rcu_read_unlock();
+
+ if (flags & SKBMOD_F_SWAPMAC) {
+ u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */
+ /*XXX: I am sure we can come up with more efficient swapping*/
+ ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest);
+ ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source);
+ ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr);
+ }
+
+ return action;
+}
+
+static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
+ [TCA_SKBMOD_PARMS] = { .len = sizeof(struct tc_skbmod) },
+ [TCA_SKBMOD_DMAC] = { .len = ETH_ALEN },
+ [TCA_SKBMOD_SMAC] = { .len = ETH_ALEN },
+ [TCA_SKBMOD_ETYPE] = { .type = NLA_U16 },
+};
+
+static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action **a,
+ int ovr, int bind)
+{
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+ struct nlattr *tb[TCA_SKBMOD_MAX + 1];
+ struct tcf_skbmod_params *p, *p_old;
+ struct tc_skbmod *parm;
+ struct tcf_skbmod *d;
+ bool exists = false;
+ u8 *daddr = NULL;
+ u8 *saddr = NULL;
+ u16 eth_type = 0;
+ u32 lflags = 0;
+ int ret = 0, err;
+
+ if (!nla)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_SKBMOD_MAX, nla, skbmod_policy);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_SKBMOD_PARMS])
+ return -EINVAL;
+
+ if (tb[TCA_SKBMOD_DMAC]) {
+ daddr = nla_data(tb[TCA_SKBMOD_DMAC]);
+ lflags |= SKBMOD_F_DMAC;
+ }
+
+ if (tb[TCA_SKBMOD_SMAC]) {
+ saddr = nla_data(tb[TCA_SKBMOD_SMAC]);
+ lflags |= SKBMOD_F_SMAC;
+ }
+
+ if (tb[TCA_SKBMOD_ETYPE]) {
+ eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]);
+ lflags |= SKBMOD_F_ETYPE;
+ }
+
+ parm = nla_data(tb[TCA_SKBMOD_PARMS]);
+ if (parm->flags & SKBMOD_F_SWAPMAC)
+ lflags = SKBMOD_F_SWAPMAC;
+
+ exists = tcf_hash_check(tn, parm->index, a, bind);
+ if (exists && bind)
+ return 0;
+
+ if (!lflags)
+ return -EINVAL;
+
+ if (!exists) {
+ ret = tcf_hash_create(tn, parm->index, est, a,
+ &act_skbmod_ops, bind, true);
+ if (ret)
+ return ret;
+
+ ret = ACT_P_CREATED;
+ } else {
+ tcf_hash_release(*a, bind);
+ if (!ovr)
+ return -EEXIST;
+ }
+
+ d = to_skbmod(*a);
+
+ ASSERT_RTNL();
+ p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
+ if (unlikely(!p)) {
+ if (ovr)
+ tcf_hash_release(*a, bind);
+ return -ENOMEM;
+ }
+
+ p->flags = lflags;
+ d->tcf_action = parm->action;
+
+ p_old = rtnl_dereference(d->skbmod_p);
+
+ if (ovr)
+ spin_lock_bh(&d->tcf_lock);
+
+ if (lflags & SKBMOD_F_DMAC)
+ ether_addr_copy(p->eth_dst, daddr);
+ if (lflags & SKBMOD_F_SMAC)
+ ether_addr_copy(p->eth_src, saddr);
+ if (lflags & SKBMOD_F_ETYPE)
+ p->eth_type = htons(eth_type);
+
+ rcu_assign_pointer(d->skbmod_p, p);
+ if (ovr)
+ spin_unlock_bh(&d->tcf_lock);
+
+ if (p_old)
+ kfree_rcu(p_old, rcu);
+
+ if (ret == ACT_P_CREATED)
+ tcf_hash_insert(tn, *a);
+ return ret;
+}
+
+static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
+{
+ struct tcf_skbmod *d = to_skbmod(a);
+ struct tcf_skbmod_params *p;
+
+ p = rcu_dereference_protected(d->skbmod_p, 1);
+ kfree_rcu(p, rcu);
+}
+
+static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
+{
+ struct tcf_skbmod *d = to_skbmod(a);
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tcf_skbmod_params *p = rtnl_dereference(d->skbmod_p);
+ struct tc_skbmod opt = {
+ .index = d->tcf_index,
+ .refcnt = d->tcf_refcnt - ref,
+ .bindcnt = d->tcf_bindcnt - bind,
+ .action = d->tcf_action,
+ };
+ struct tcf_t t;
+
+ opt.flags = p->flags;
+ if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+ if ((p->flags & SKBMOD_F_DMAC) &&
+ nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst))
+ goto nla_put_failure;
+ if ((p->flags & SKBMOD_F_SMAC) &&
+ nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src))
+ goto nla_put_failure;
+ if ((p->flags & SKBMOD_F_ETYPE) &&
+ nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type)))
+ goto nla_put_failure;
+
+ tcf_tm_dump(&t, &d->tcf_tm);
+ if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
+ goto nla_put_failure;
+
+ return skb->len;
+nla_put_failure:
+ rcu_read_unlock();
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb,
+ struct netlink_callback *cb, int type,
+ const struct tc_action_ops *ops)
+{
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+ return tcf_generic_walker(tn, skb, cb, type, ops);
+}
+
+static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+ return tcf_hash_search(tn, a, index);
+}
+
+static struct tc_action_ops act_skbmod_ops = {
+ .kind = "skbmod",
+ .type = TCA_ACT_SKBMOD,
+ .owner = THIS_MODULE,
+ .act = tcf_skbmod_run,
+ .dump = tcf_skbmod_dump,
+ .init = tcf_skbmod_init,
+ .cleanup = tcf_skbmod_cleanup,
+ .walk = tcf_skbmod_walker,
+ .lookup = tcf_skbmod_search,
+ .size = sizeof(struct tcf_skbmod),
+};
+
+static __net_init int skbmod_init_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+ return tc_action_net_init(tn, &act_skbmod_ops, SKBMOD_TAB_MASK);
+}
+
+static void __net_exit skbmod_exit_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+ tc_action_net_exit(tn);
+}
+
+static struct pernet_operations skbmod_net_ops = {
+ .init = skbmod_init_net,
+ .exit = skbmod_exit_net,
+ .id = &skbmod_net_id,
+ .size = sizeof(struct tc_action_net),
+};
+
+MODULE_AUTHOR("Jamal Hadi Salim, <jhs@mojatatu.com>");
+MODULE_DESCRIPTION("SKB data mod-ing");
+MODULE_LICENSE("GPL");
+
+static int __init skbmod_init_module(void)
+{
+ return tcf_register_action(&act_skbmod_ops, &skbmod_net_ops);
+}
+
+static void __exit skbmod_cleanup_module(void)
+{
+ tcf_unregister_action(&act_skbmod_ops, &skbmod_net_ops);
+}
+
+module_init(skbmod_init_module);
+module_exit(skbmod_cleanup_module);
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
new file mode 100644
index 000000000000..af47bdf2f483
--- /dev/null
+++ b/net/sched/act_tunnel_key.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/dst.h>
+#include <net/dst_metadata.h>
+
+#include <linux/tc_act/tc_tunnel_key.h>
+#include <net/tc_act/tc_tunnel_key.h>
+
+#define TUNNEL_KEY_TAB_MASK 15
+
+static int tunnel_key_net_id;
+static struct tc_action_ops act_tunnel_key_ops;
+
+static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params;
+ int action;
+
+ rcu_read_lock();
+
+ params = rcu_dereference(t->params);
+
+ tcf_lastuse_update(&t->tcf_tm);
+ bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
+ action = params->action;
+
+ switch (params->tcft_action) {
+ case TCA_TUNNEL_KEY_ACT_RELEASE:
+ skb_dst_drop(skb);
+ break;
+ case TCA_TUNNEL_KEY_ACT_SET:
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst_clone(&params->tcft_enc_metadata->dst));
+ break;
+ default:
+ WARN_ONCE(1, "Bad tunnel_key action %d.\n",
+ params->tcft_action);
+ break;
+ }
+
+ rcu_read_unlock();
+
+ return action;
+}
+
+static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
+ [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
+ [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
+ [TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
+ [TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
+ [TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
+ [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
+};
+
+static int tunnel_key_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action **a,
+ int ovr, int bind)
+{
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+ struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
+ struct tcf_tunnel_key_params *params_old;
+ struct tcf_tunnel_key_params *params_new;
+ struct metadata_dst *metadata = NULL;
+ struct tc_tunnel_key *parm;
+ struct tcf_tunnel_key *t;
+ bool exists = false;
+ __be64 key_id;
+ int ret = 0;
+ int err;
+
+ if (!nla)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_TUNNEL_KEY_PARMS])
+ return -EINVAL;
+
+ parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
+ exists = tcf_hash_check(tn, parm->index, a, bind);
+ if (exists && bind)
+ return 0;
+
+ switch (parm->t_action) {
+ case TCA_TUNNEL_KEY_ACT_RELEASE:
+ break;
+ case TCA_TUNNEL_KEY_ACT_SET:
+ if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ key_id = key32_to_tunnel_id(nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]));
+
+ if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
+ tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
+ __be32 saddr;
+ __be32 daddr;
+
+ saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
+ daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
+
+ metadata = __ip_tun_set_dst(saddr, daddr, 0, 0,
+ TUNNEL_KEY, key_id, 0);
+ } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
+ tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+
+ saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
+ daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
+
+ metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, 0,
+ TUNNEL_KEY, key_id, 0);
+ }
+
+ if (!metadata) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
+ break;
+ default:
+ goto err_out;
+ }
+
+ if (!exists) {
+ ret = tcf_hash_create(tn, parm->index, est, a,
+ &act_tunnel_key_ops, bind, true);
+ if (ret)
+ return ret;
+
+ ret = ACT_P_CREATED;
+ } else {
+ tcf_hash_release(*a, bind);
+ if (!ovr)
+ return -EEXIST;
+ }
+
+ t = to_tunnel_key(*a);
+
+ ASSERT_RTNL();
+ params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
+ if (unlikely(!params_new)) {
+ if (ret == ACT_P_CREATED)
+ tcf_hash_release(*a, bind);
+ return -ENOMEM;
+ }
+
+ params_old = rtnl_dereference(t->params);
+
+ params_new->action = parm->action;
+ params_new->tcft_action = parm->t_action;
+ params_new->tcft_enc_metadata = metadata;
+
+ rcu_assign_pointer(t->params, params_new);
+
+ if (params_old)
+ kfree_rcu(params_old, rcu);
+
+ if (ret == ACT_P_CREATED)
+ tcf_hash_insert(tn, *a);
+
+ return ret;
+
+err_out:
+ if (exists)
+ tcf_hash_release(*a, bind);
+ return ret;
+}
+
+static void tunnel_key_release(struct tc_action *a, int bind)
+{
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params;
+
+ params = rcu_dereference_protected(t->params, 1);
+
+ if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
+ dst_release(&params->tcft_enc_metadata->dst);
+
+ kfree_rcu(params, rcu);
+}
+
+static int tunnel_key_dump_addresses(struct sk_buff *skb,
+ const struct ip_tunnel_info *info)
+{
+ unsigned short family = ip_tunnel_info_af(info);
+
+ if (family == AF_INET) {
+ __be32 saddr = info->key.u.ipv4.src;
+ __be32 daddr = info->key.u.ipv4.dst;
+
+ if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
+ !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
+ return 0;
+ }
+
+ if (family == AF_INET6) {
+ const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
+ const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
+
+ if (!nla_put_in6_addr(skb,
+ TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
+ !nla_put_in6_addr(skb,
+ TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params;
+ struct tc_tunnel_key opt = {
+ .index = t->tcf_index,
+ .refcnt = t->tcf_refcnt - ref,
+ .bindcnt = t->tcf_bindcnt - bind,
+ };
+ struct tcf_t tm;
+
+ params = rtnl_dereference(t->params);
+
+ opt.t_action = params->tcft_action;
+ opt.action = params->action;
+
+ if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+
+ if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
+ struct ip_tunnel_key *key =
+ &params->tcft_enc_metadata->u.tun_info.key;
+ __be32 key_id = tunnel_id_to_key32(key->tun_id);
+
+ if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) ||
+ tunnel_key_dump_addresses(skb,
+ &params->tcft_enc_metadata->u.tun_info))
+ goto nla_put_failure;
+ }
+
+ tcf_tm_dump(&tm, &t->tcf_tm);
+ if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
+ &tm, TCA_TUNNEL_KEY_PAD))
+ goto nla_put_failure;
+
+ return skb->len;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
+ struct netlink_callback *cb, int type,
+ const struct tc_action_ops *ops)
+{
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+
+ return tcf_generic_walker(tn, skb, cb, type, ops);
+}
+
+static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+
+ return tcf_hash_search(tn, a, index);
+}
+
+static struct tc_action_ops act_tunnel_key_ops = {
+ .kind = "tunnel_key",
+ .type = TCA_ACT_TUNNEL_KEY,
+ .owner = THIS_MODULE,
+ .act = tunnel_key_act,
+ .dump = tunnel_key_dump,
+ .init = tunnel_key_init,
+ .cleanup = tunnel_key_release,
+ .walk = tunnel_key_walker,
+ .lookup = tunnel_key_search,
+ .size = sizeof(struct tcf_tunnel_key),
+};
+
+static __net_init int tunnel_key_init_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+
+ return tc_action_net_init(tn, &act_tunnel_key_ops, TUNNEL_KEY_TAB_MASK);
+}
+
+static void __net_exit tunnel_key_exit_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+
+ tc_action_net_exit(tn);
+}
+
+static struct pernet_operations tunnel_key_net_ops = {
+ .init = tunnel_key_init_net,
+ .exit = tunnel_key_exit_net,
+ .id = &tunnel_key_net_id,
+ .size = sizeof(struct tc_action_net),
+};
+
+static int __init tunnel_key_init_module(void)
+{
+ return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
+}
+
+static void __exit tunnel_key_cleanup_module(void)
+{
+ tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
+}
+
+module_init(tunnel_key_init_module);
+module_exit(tunnel_key_cleanup_module);
+
+MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
+MODULE_DESCRIPTION("ip tunnel manipulation actions");
+MODULE_LICENSE("GPL v2");
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 691409de3e1a..b57fcbcefea1 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -30,12 +30,19 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
struct tcf_vlan *v = to_vlan(a);
int action;
int err;
+ u16 tci;
spin_lock(&v->tcf_lock);
tcf_lastuse_update(&v->tcf_tm);
bstats_update(&v->tcf_bstats, skb);
action = v->tcf_action;
+ /* Ensure 'data' points at mac_header prior calling vlan manipulating
+ * functions.
+ */
+ if (skb_at_tc_ingress(skb))
+ skb_push_rcsum(skb, skb->mac_len);
+
switch (v->tcfv_action) {
case TCA_VLAN_ACT_POP:
err = skb_vlan_pop(skb);
@@ -43,10 +50,35 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
goto drop;
break;
case TCA_VLAN_ACT_PUSH:
- err = skb_vlan_push(skb, v->tcfv_push_proto, v->tcfv_push_vid);
+ err = skb_vlan_push(skb, v->tcfv_push_proto, v->tcfv_push_vid |
+ (v->tcfv_push_prio << VLAN_PRIO_SHIFT));
if (err)
goto drop;
break;
+ case TCA_VLAN_ACT_MODIFY:
+ /* No-op if no vlan tag (either hw-accel or in-payload) */
+ if (!skb_vlan_tagged(skb))
+ goto unlock;
+ /* extract existing tag (and guarantee no hw-accel tag) */
+ if (skb_vlan_tag_present(skb)) {
+ tci = skb_vlan_tag_get(skb);
+ skb->vlan_tci = 0;
+ } else {
+ /* in-payload vlan tag, pop it */
+ err = __skb_vlan_pop(skb, &tci);
+ if (err)
+ goto drop;
+ }
+ /* replace the vid */
+ tci = (tci & ~VLAN_VID_MASK) | v->tcfv_push_vid;
+ /* replace prio bits, if tcfv_push_prio specified */
+ if (v->tcfv_push_prio) {
+ tci &= ~VLAN_PRIO_MASK;
+ tci |= v->tcfv_push_prio << VLAN_PRIO_SHIFT;
+ }
+ /* put updated tci as hwaccel tag */
+ __vlan_hwaccel_put_tag(skb, v->tcfv_push_proto, tci);
+ break;
default:
BUG();
}
@@ -57,6 +89,9 @@ drop:
action = TC_ACT_SHOT;
v->tcf_qstats.drops++;
unlock:
+ if (skb_at_tc_ingress(skb))
+ skb_pull_rcsum(skb, skb->mac_len);
+
spin_unlock(&v->tcf_lock);
return action;
}
@@ -65,6 +100,7 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
[TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) },
[TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 },
[TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 },
+ [TCA_VLAN_PUSH_VLAN_PRIORITY] = { .type = NLA_U8 },
};
static int tcf_vlan_init(struct net *net, struct nlattr *nla,
@@ -78,6 +114,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
int action;
__be16 push_vid = 0;
__be16 push_proto = 0;
+ u8 push_prio = 0;
bool exists = false;
int ret = 0, err;
@@ -99,6 +136,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
case TCA_VLAN_ACT_POP:
break;
case TCA_VLAN_ACT_PUSH:
+ case TCA_VLAN_ACT_MODIFY:
if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
if (exists)
tcf_hash_release(*a, bind);
@@ -123,6 +161,9 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
} else {
push_proto = htons(ETH_P_8021Q);
}
+
+ if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY])
+ push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
break;
default:
if (exists)
@@ -150,6 +191,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
v->tcfv_action = action;
v->tcfv_push_vid = push_vid;
+ v->tcfv_push_prio = push_prio;
v->tcfv_push_proto = push_proto;
v->tcf_action = parm->action;
@@ -178,10 +220,13 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
- if (v->tcfv_action == TCA_VLAN_ACT_PUSH &&
+ if ((v->tcfv_action == TCA_VLAN_ACT_PUSH ||
+ v->tcfv_action == TCA_VLAN_ACT_MODIFY) &&
(nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) ||
nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL,
- v->tcfv_push_proto)))
+ v->tcfv_push_proto) ||
+ (nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY,
+ v->tcfv_push_prio))))
goto nla_put_failure;
tcf_tm_dump(&t, &v->tcf_tm);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a7c5645373af..11da7da0b7c4 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -344,13 +344,15 @@ replay:
if (err == 0) {
struct tcf_proto *next = rtnl_dereference(tp->next);
- tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
+ tfilter_notify(net, skb, n, tp, fh,
+ RTM_DELTFILTER);
if (tcf_destroy(tp, false))
RCU_INIT_POINTER(*back, next);
}
goto errout;
case RTM_GETTFILTER:
- err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
+ err = tfilter_notify(net, skb, n, tp, fh,
+ RTM_NEWTFILTER);
goto errout;
default:
err = -EINVAL;
@@ -448,7 +450,8 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
struct net *net = sock_net(a->skb->sk);
return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
- a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
+ a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWTFILTER);
}
/* called with RTNL */
@@ -552,7 +555,7 @@ void tcf_exts_destroy(struct tcf_exts *exts)
EXPORT_SYMBOL(tcf_exts_destroy);
int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
- struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
+ struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
{
#ifdef CONFIG_NET_CLS_ACT
{
@@ -560,8 +563,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
if (exts->police && tb[exts->police]) {
act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
- "police", ovr,
- TCA_ACT_BIND);
+ "police", ovr, TCA_ACT_BIND);
if (IS_ERR(act))
return PTR_ERR(act);
@@ -573,8 +575,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
int err, i = 0;
err = tcf_action_init(net, tb[exts->action], rate_tlv,
- NULL, ovr,
- TCA_ACT_BIND, &actions);
+ NULL, ovr, TCA_ACT_BIND,
+ &actions);
if (err)
return err;
list_for_each_entry(act, &actions, list)
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 0b8c3ace671f..eb219b78cd49 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -138,10 +138,12 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
struct tcf_exts e;
struct tcf_ematch_tree t;
- tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE);
- err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ err = tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE);
if (err < 0)
return err;
+ err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ if (err < 0)
+ goto errout;
err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &t);
if (err < 0)
@@ -189,7 +191,10 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
if (!fnew)
return -ENOBUFS;
- tcf_exts_init(&fnew->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE);
+ err = tcf_exts_init(&fnew->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE);
+ if (err < 0)
+ goto errout;
+
err = -EINVAL;
if (handle) {
fnew->handle = handle;
@@ -226,6 +231,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
return 0;
errout:
+ tcf_exts_destroy(&fnew->exts);
kfree(fnew);
return err;
}
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index c3002c2c68bb..bb1d5a487081 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -27,6 +27,8 @@ MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
MODULE_DESCRIPTION("TC BPF based classifier");
#define CLS_BPF_NAME_LEN 256
+#define CLS_BPF_SUPPORTED_GEN_FLAGS \
+ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
struct cls_bpf_head {
struct list_head plist;
@@ -39,6 +41,8 @@ struct cls_bpf_prog {
struct list_head link;
struct tcf_result res;
bool exts_integrated;
+ bool offloaded;
+ u32 gen_flags;
struct tcf_exts exts;
u32 handle;
union {
@@ -54,8 +58,10 @@ struct cls_bpf_prog {
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
[TCA_BPF_CLASSID] = { .type = NLA_U32 },
[TCA_BPF_FLAGS] = { .type = NLA_U32 },
+ [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
[TCA_BPF_FD] = { .type = NLA_U32 },
- [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
+ [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
+ .len = CLS_BPF_NAME_LEN },
[TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
[TCA_BPF_OPS] = { .type = NLA_BINARY,
.len = sizeof(struct sock_filter) * BPF_MAXINSNS },
@@ -83,9 +89,6 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct cls_bpf_prog *prog;
int ret = -1;
- if (unlikely(!skb_mac_header_was_set(skb)))
- return -1;
-
/* Needed here for accessing maps. */
rcu_read_lock();
list_for_each_entry_rcu(prog, &head->plist, link) {
@@ -93,7 +96,9 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
- if (at_ingress) {
+ if (tc_skip_sw(prog->gen_flags)) {
+ filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
+ } else if (at_ingress) {
/* It is safe to push/pull even if skb_shared() */
__skb_push(skb, skb->mac_len);
bpf_compute_data_end(skb);
@@ -140,6 +145,91 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
return !prog->bpf_ops;
}
+static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
+ enum tc_clsbpf_command cmd)
+{
+ struct net_device *dev = tp->q->dev_queue->dev;
+ struct tc_cls_bpf_offload bpf_offload = {};
+ struct tc_to_netdev offload;
+
+ offload.type = TC_SETUP_CLSBPF;
+ offload.cls_bpf = &bpf_offload;
+
+ bpf_offload.command = cmd;
+ bpf_offload.exts = &prog->exts;
+ bpf_offload.prog = prog->filter;
+ bpf_offload.name = prog->bpf_name;
+ bpf_offload.exts_integrated = prog->exts_integrated;
+ bpf_offload.gen_flags = prog->gen_flags;
+
+ return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+ tp->protocol, &offload);
+}
+
+static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
+ struct cls_bpf_prog *oldprog)
+{
+ struct net_device *dev = tp->q->dev_queue->dev;
+ struct cls_bpf_prog *obj = prog;
+ enum tc_clsbpf_command cmd;
+ bool skip_sw;
+ int ret;
+
+ skip_sw = tc_skip_sw(prog->gen_flags) ||
+ (oldprog && tc_skip_sw(oldprog->gen_flags));
+
+ if (oldprog && oldprog->offloaded) {
+ if (tc_should_offload(dev, tp, prog->gen_flags)) {
+ cmd = TC_CLSBPF_REPLACE;
+ } else if (!tc_skip_sw(prog->gen_flags)) {
+ obj = oldprog;
+ cmd = TC_CLSBPF_DESTROY;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ if (!tc_should_offload(dev, tp, prog->gen_flags))
+ return skip_sw ? -EINVAL : 0;
+ cmd = TC_CLSBPF_ADD;
+ }
+
+ ret = cls_bpf_offload_cmd(tp, obj, cmd);
+ if (ret)
+ return skip_sw ? ret : 0;
+
+ obj->offloaded = true;
+ if (oldprog)
+ oldprog->offloaded = false;
+
+ return 0;
+}
+
+static void cls_bpf_stop_offload(struct tcf_proto *tp,
+ struct cls_bpf_prog *prog)
+{
+ int err;
+
+ if (!prog->offloaded)
+ return;
+
+ err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
+ if (err) {
+ pr_err("Stopping hardware offload failed: %d\n", err);
+ return;
+ }
+
+ prog->offloaded = false;
+}
+
+static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
+ struct cls_bpf_prog *prog)
+{
+ if (!prog->offloaded)
+ return;
+
+ cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
+}
+
static int cls_bpf_init(struct tcf_proto *tp)
{
struct cls_bpf_head *head;
@@ -179,6 +269,7 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
{
struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
+ cls_bpf_stop_offload(tp, prog);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
call_rcu(&prog->rcu, __cls_bpf_delete_prog);
@@ -195,6 +286,7 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
return false;
list_for_each_entry_safe(prog, tmp, &head->plist, link) {
+ cls_bpf_stop_offload(tp, prog);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
call_rcu(&prog->rcu, __cls_bpf_delete_prog);
@@ -304,6 +396,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
{
bool is_bpf, is_ebpf, have_exts = false;
struct tcf_exts exts;
+ u32 gen_flags = 0;
int ret;
is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
@@ -311,30 +404,39 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
return -EINVAL;
- tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
- ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
+ ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
if (ret < 0)
return ret;
+ ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
+ if (ret < 0)
+ goto errout;
if (tb[TCA_BPF_FLAGS]) {
u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
- tcf_exts_destroy(&exts);
- return -EINVAL;
+ ret = -EINVAL;
+ goto errout;
}
have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
}
+ if (tb[TCA_BPF_FLAGS_GEN]) {
+ gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
+ if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
+ !tc_flags_valid(gen_flags)) {
+ ret = -EINVAL;
+ goto errout;
+ }
+ }
prog->exts_integrated = have_exts;
+ prog->gen_flags = gen_flags;
ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
cls_bpf_prog_from_efd(tb, prog, tp);
- if (ret < 0) {
- tcf_exts_destroy(&exts);
- return ret;
- }
+ if (ret < 0)
+ goto errout;
if (tb[TCA_BPF_CLASSID]) {
prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
@@ -343,6 +445,10 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
tcf_exts_change(tp, &prog->exts, &exts);
return 0;
+
+errout:
+ tcf_exts_destroy(&exts);
+ return ret;
}
static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
@@ -388,7 +494,9 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (!prog)
return -ENOBUFS;
- tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
+ ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
+ if (ret < 0)
+ goto errout;
if (oldprog) {
if (handle && oldprog->handle != handle) {
@@ -406,10 +514,17 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
goto errout;
}
- ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
+ ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
+ ovr);
if (ret < 0)
goto errout;
+ ret = cls_bpf_offload(tp, prog, oldprog);
+ if (ret) {
+ cls_bpf_delete_prog(tp, prog);
+ return ret;
+ }
+
if (oldprog) {
list_replace_rcu(&oldprog->link, &prog->link);
tcf_unbind_filter(tp, &oldprog->res);
@@ -420,9 +535,10 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
*arg = (unsigned long) prog;
return 0;
+
errout:
+ tcf_exts_destroy(&prog->exts);
kfree(prog);
-
return ret;
}
@@ -470,6 +586,8 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
tm->tcm_handle = prog->handle;
+ cls_bpf_offload_update_stats(tp, prog);
+
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
@@ -492,6 +610,9 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
goto nla_put_failure;
+ if (prog->gen_flags &&
+ nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 4c85bd3a750c..85233c470035 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -93,7 +93,9 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
if (!new)
return -ENOBUFS;
- tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
+ err = tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
+ if (err < 0)
+ goto errout;
new->handle = handle;
new->tp = tp;
err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
@@ -101,10 +103,14 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
goto errout;
- tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
+ err = tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
if (err < 0)
goto errout;
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
+ if (err < 0) {
+ tcf_exts_destroy(&e);
+ goto errout;
+ }
err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
if (err < 0) {
@@ -120,6 +126,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
return 0;
errout:
+ tcf_exts_destroy(&new->exts);
kfree(new);
return err;
}
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index fbfec6a18839..e39672394c7b 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -29,7 +29,7 @@
#include <net/route.h>
#include <net/flow_dissector.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
@@ -87,12 +87,14 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
}
-static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_proto(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
return flow->basic.ip_proto;
}
-static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_proto_src(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
if (flow->ports.ports)
return ntohs(flow->ports.src);
@@ -100,7 +102,8 @@ static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys
return addr_fold(skb->sk);
}
-static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_proto_dst(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
if (flow->ports.ports)
return ntohs(flow->ports.dst);
@@ -125,14 +128,14 @@ static u32 flow_get_mark(const struct sk_buff *skb)
static u32 flow_get_nfct(const struct sk_buff *skb)
{
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
return addr_fold(skb->nfct);
#else
return 0;
#endif
}
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#define CTTUPLE(skb, member) \
({ \
enum ip_conntrack_info ctinfo; \
@@ -149,7 +152,8 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
})
#endif
-static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_src(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
@@ -161,7 +165,8 @@ fallback:
return flow_get_src(skb, flow);
}
-static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_dst(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
@@ -173,14 +178,16 @@ fallback:
return flow_get_dst(skb, flow);
}
-static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
return ntohs(CTTUPLE(skb, src.u.all));
fallback:
return flow_get_proto_src(skb, flow);
}
-static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
return ntohs(CTTUPLE(skb, dst.u.all));
fallback:
@@ -418,10 +425,12 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
return -EOPNOTSUPP;
}
- tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
+ err = tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
+ if (err < 0)
+ goto err1;
err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
if (err < 0)
- return err;
+ goto err1;
err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t);
if (err < 0)
@@ -432,13 +441,15 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
if (!fnew)
goto err2;
- tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
+ err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
+ if (err < 0)
+ goto err3;
fold = (struct flow_filter *)*arg;
if (fold) {
err = -EINVAL;
if (fold->handle != handle && handle)
- goto err2;
+ goto err3;
/* Copy fold into fnew */
fnew->tp = fold->tp;
@@ -458,31 +469,31 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
if (tb[TCA_FLOW_MODE])
mode = nla_get_u32(tb[TCA_FLOW_MODE]);
if (mode != FLOW_MODE_HASH && nkeys > 1)
- goto err2;
+ goto err3;
if (mode == FLOW_MODE_HASH)
perturb_period = fold->perturb_period;
if (tb[TCA_FLOW_PERTURB]) {
if (mode != FLOW_MODE_HASH)
- goto err2;
+ goto err3;
perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
}
} else {
err = -EINVAL;
if (!handle)
- goto err2;
+ goto err3;
if (!tb[TCA_FLOW_KEYS])
- goto err2;
+ goto err3;
mode = FLOW_MODE_MAP;
if (tb[TCA_FLOW_MODE])
mode = nla_get_u32(tb[TCA_FLOW_MODE]);
if (mode != FLOW_MODE_HASH && nkeys > 1)
- goto err2;
+ goto err3;
if (tb[TCA_FLOW_PERTURB]) {
if (mode != FLOW_MODE_HASH)
- goto err2;
+ goto err3;
perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
}
@@ -542,6 +553,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
call_rcu(&fold->rcu, flow_destroy_filter);
return 0;
+err3:
+ tcf_exts_destroy(&fnew->exts);
err2:
tcf_em_tree_destroy(&t);
kfree(fnew);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 5060801a2f6d..f6f40fba599b 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -23,17 +23,26 @@
#include <net/ip.h>
#include <net/flow_dissector.h>
+#include <net/dst.h>
+#include <net/dst_metadata.h>
+
struct fl_flow_key {
int indev_ifindex;
struct flow_dissector_key_control control;
+ struct flow_dissector_key_control enc_control;
struct flow_dissector_key_basic basic;
struct flow_dissector_key_eth_addrs eth;
- struct flow_dissector_key_addrs ipaddrs;
+ struct flow_dissector_key_vlan vlan;
union {
struct flow_dissector_key_ipv4_addrs ipv4;
struct flow_dissector_key_ipv6_addrs ipv6;
};
struct flow_dissector_key_ports tp;
+ struct flow_dissector_key_keyid enc_key_id;
+ union {
+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
+ };
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct fl_flow_mask_range {
@@ -123,11 +132,31 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct cls_fl_filter *f;
struct fl_flow_key skb_key;
struct fl_flow_key skb_mkey;
+ struct ip_tunnel_info *info;
if (!atomic_read(&head->ht.nelems))
return -1;
fl_clear_masked_range(&skb_key, &head->mask);
+
+ info = skb_tunnel_info(skb);
+ if (info) {
+ struct ip_tunnel_key *key = &info->key;
+
+ switch (ip_tunnel_info_af(info)) {
+ case AF_INET:
+ skb_key.enc_ipv4.src = key->u.ipv4.src;
+ skb_key.enc_ipv4.dst = key->u.ipv4.dst;
+ break;
+ case AF_INET6:
+ skb_key.enc_ipv6.src = key->u.ipv6.src;
+ skb_key.enc_ipv6.dst = key->u.ipv6.dst;
+ break;
+ }
+
+ skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
+ }
+
skb_key.indev_ifindex = skb->skb_iif;
/* skb_flow_dissect() does not set n_proto in case an unknown protocol,
* so do it rather here.
@@ -212,7 +241,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
tc.type = TC_SETUP_CLSFLOWER;
tc.cls_flower = &offload;
- err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
+ err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
+ &tc);
if (tc_skip_sw(flags))
return err;
@@ -293,6 +323,22 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
};
static void fl_set_key_val(struct nlattr **tb,
@@ -308,9 +354,29 @@ static void fl_set_key_val(struct nlattr **tb,
memcpy(mask, nla_data(tb[mask_type]), len);
}
+static void fl_set_key_vlan(struct nlattr **tb,
+ struct flow_dissector_key_vlan *key_val,
+ struct flow_dissector_key_vlan *key_mask)
+{
+#define VLAN_PRIORITY_MASK 0x7
+
+ if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
+ key_val->vlan_id =
+ nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
+ key_mask->vlan_id = VLAN_VID_MASK;
+ }
+ if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
+ key_val->vlan_priority =
+ nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
+ VLAN_PRIORITY_MASK;
+ key_mask->vlan_priority = VLAN_PRIORITY_MASK;
+ }
+}
+
static int fl_set_key(struct net *net, struct nlattr **tb,
struct fl_flow_key *key, struct fl_flow_key *mask)
{
+ __be16 ethertype;
#ifdef CONFIG_NET_CLS_IND
if (tb[TCA_FLOWER_INDEV]) {
int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
@@ -328,9 +394,20 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
sizeof(key->eth.src));
- fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
- &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
- sizeof(key->basic.n_proto));
+ if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
+ ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
+
+ if (ethertype == htons(ETH_P_8021Q)) {
+ fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
+ fl_set_key_val(tb, &key->basic.n_proto,
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
+ sizeof(key->basic.n_proto));
+ } else {
+ key->basic.n_proto = ethertype;
+ mask->basic.n_proto = cpu_to_be16(~0);
+ }
+ }
if (key->basic.n_proto == htons(ETH_P_IP) ||
key->basic.n_proto == htons(ETH_P_IPV6)) {
@@ -359,20 +436,54 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
if (key->basic.ip_proto == IPPROTO_TCP) {
fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
- &mask->tp.src, TCA_FLOWER_UNSPEC,
+ &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
sizeof(key->tp.src));
fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
- &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
sizeof(key->tp.dst));
} else if (key->basic.ip_proto == IPPROTO_UDP) {
fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
- &mask->tp.src, TCA_FLOWER_UNSPEC,
+ &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
sizeof(key->tp.src));
fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
- &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
sizeof(key->tp.dst));
}
+ if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
+ tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
+ key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ fl_set_key_val(tb, &key->enc_ipv4.src,
+ TCA_FLOWER_KEY_ENC_IPV4_SRC,
+ &mask->enc_ipv4.src,
+ TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
+ sizeof(key->enc_ipv4.src));
+ fl_set_key_val(tb, &key->enc_ipv4.dst,
+ TCA_FLOWER_KEY_ENC_IPV4_DST,
+ &mask->enc_ipv4.dst,
+ TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
+ sizeof(key->enc_ipv4.dst));
+ }
+
+ if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
+ tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
+ key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ fl_set_key_val(tb, &key->enc_ipv6.src,
+ TCA_FLOWER_KEY_ENC_IPV6_SRC,
+ &mask->enc_ipv6.src,
+ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
+ sizeof(key->enc_ipv6.src));
+ fl_set_key_val(tb, &key->enc_ipv6.dst,
+ TCA_FLOWER_KEY_ENC_IPV6_DST,
+ &mask->enc_ipv6.dst,
+ TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
+ sizeof(key->enc_ipv6.dst));
+ }
+
+ fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
+ &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
+ sizeof(key->enc_key_id.keyid));
+
return 0;
}
@@ -404,12 +515,10 @@ static int fl_init_hashtable(struct cls_fl_head *head,
#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
-#define FL_KEY_MEMBER_END_OFFSET(member) \
- (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
-#define FL_KEY_IN_RANGE(mask, member) \
- (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \
- FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
+#define FL_KEY_IS_MASKED(mask, member) \
+ memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
+ 0, FL_KEY_MEMBER_SIZE(member)) \
#define FL_KEY_SET(keys, cnt, id, member) \
do { \
@@ -418,9 +527,9 @@ static int fl_init_hashtable(struct cls_fl_head *head,
cnt++; \
} while(0);
-#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \
+#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
do { \
- if (FL_KEY_IN_RANGE(mask, member)) \
+ if (FL_KEY_IS_MASKED(mask, member)) \
FL_KEY_SET(keys, cnt, id, member); \
} while(0);
@@ -432,14 +541,16 @@ static void fl_init_dissector(struct cls_fl_head *head,
FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
- FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
- FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
- FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
- FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
- FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
- FLOW_DISSECTOR_KEY_PORTS, tp);
+ FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
+ FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
+ FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
+ FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FLOW_DISSECTOR_KEY_PORTS, tp);
+ FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FLOW_DISSECTOR_KEY_VLAN, vlan);
skb_flow_dissector_init(&head->dissector, keys, cnt);
}
@@ -478,10 +589,12 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
struct tcf_exts e;
int err;
- tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
- err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
if (err < 0)
return err;
+ err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ if (err < 0)
+ goto errout;
if (tb[TCA_FLOWER_CLASSID]) {
f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
@@ -550,7 +663,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (!fnew)
return -ENOBUFS;
- tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
+ err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
+ if (err < 0)
+ goto errout;
if (!handle) {
handle = fl_grab_new_handle(tp, head);
@@ -614,6 +729,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
return 0;
errout:
+ tcf_exts_destroy(&fnew->exts);
kfree(fnew);
return err;
}
@@ -668,6 +784,29 @@ static int fl_dump_key_val(struct sk_buff *skb,
return 0;
}
+static int fl_dump_key_vlan(struct sk_buff *skb,
+ struct flow_dissector_key_vlan *vlan_key,
+ struct flow_dissector_key_vlan *vlan_mask)
+{
+ int err;
+
+ if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
+ return 0;
+ if (vlan_mask->vlan_id) {
+ err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
+ vlan_key->vlan_id);
+ if (err)
+ return err;
+ }
+ if (vlan_mask->vlan_priority) {
+ err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
+ vlan_key->vlan_priority);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
@@ -712,6 +851,10 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
&mask->basic.n_proto, TCA_FLOWER_UNSPEC,
sizeof(key->basic.n_proto)))
goto nla_put_failure;
+
+ if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
+ goto nla_put_failure;
+
if ((key->basic.n_proto == htons(ETH_P_IP) ||
key->basic.n_proto == htons(ETH_P_IPV6)) &&
fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
@@ -738,21 +881,48 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
if (key->basic.ip_proto == IPPROTO_TCP &&
(fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
- &mask->tp.src, TCA_FLOWER_UNSPEC,
+ &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
sizeof(key->tp.src)) ||
fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
- &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
sizeof(key->tp.dst))))
goto nla_put_failure;
else if (key->basic.ip_proto == IPPROTO_UDP &&
(fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
- &mask->tp.src, TCA_FLOWER_UNSPEC,
+ &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
sizeof(key->tp.src)) ||
fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
- &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
sizeof(key->tp.dst))))
goto nla_put_failure;
+ if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
+ (fl_dump_key_val(skb, &key->enc_ipv4.src,
+ TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
+ TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
+ sizeof(key->enc_ipv4.src)) ||
+ fl_dump_key_val(skb, &key->enc_ipv4.dst,
+ TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
+ TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
+ sizeof(key->enc_ipv4.dst))))
+ goto nla_put_failure;
+ else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
+ (fl_dump_key_val(skb, &key->enc_ipv6.src,
+ TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
+ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
+ sizeof(key->enc_ipv6.src)) ||
+ fl_dump_key_val(skb, &key->enc_ipv6.dst,
+ TCA_FLOWER_KEY_ENC_IPV6_DST,
+ &mask->enc_ipv6.dst,
+ TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
+ sizeof(key->enc_ipv6.dst))))
+ goto nla_put_failure;
+
+ if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
+ &mask->enc_key_id, TCA_FLOWER_UNSPEC,
+ sizeof(key->enc_key_id)))
+ goto nla_put_failure;
+
nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
if (tcf_exts_dump(skb, &f->exts))
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index f23a3b68bba6..9dc63d54e167 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -57,7 +57,7 @@ static u32 fw_hash(u32 handle)
}
static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res)
+ struct tcf_result *res)
{
struct fw_head *head = rcu_dereference_bh(tp->root);
struct fw_filter *f;
@@ -188,17 +188,20 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
static int
fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
- struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr)
+ struct nlattr **tb, struct nlattr **tca, unsigned long base,
+ bool ovr)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct tcf_exts e;
u32 mask;
int err;
- tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE);
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
+ err = tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE);
if (err < 0)
return err;
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
+ if (err < 0)
+ goto errout;
if (tb[TCA_FW_CLASSID]) {
f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
@@ -235,9 +238,8 @@ errout:
static int fw_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
- u32 handle,
- struct nlattr **tca,
- unsigned long *arg, bool ovr)
+ u32 handle, struct nlattr **tca, unsigned long *arg,
+ bool ovr)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = (struct fw_filter *) *arg;
@@ -270,10 +272,15 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
#endif /* CONFIG_NET_CLS_IND */
fnew->tp = f->tp;
- tcf_exts_init(&fnew->exts, TCA_FW_ACT, TCA_FW_POLICE);
+ err = tcf_exts_init(&fnew->exts, TCA_FW_ACT, TCA_FW_POLICE);
+ if (err < 0) {
+ kfree(fnew);
+ return err;
+ }
err = fw_change_attrs(net, tp, fnew, tb, tca, base, ovr);
if (err < 0) {
+ tcf_exts_destroy(&fnew->exts);
kfree(fnew);
return err;
}
@@ -313,7 +320,9 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
if (f == NULL)
return -ENOBUFS;
- tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE);
+ err = tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE);
+ if (err < 0)
+ goto errout;
f->id = handle;
f->tp = tp;
@@ -328,6 +337,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
return 0;
errout:
+ tcf_exts_destroy(&f->exts);
kfree(f);
return err;
}
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 08a3b0a6f5ab..455fc8f83d0a 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -268,8 +268,7 @@ static int route4_init(struct tcf_proto *tp)
return 0;
}
-static void
-route4_delete_filter(struct rcu_head *head)
+static void route4_delete_filter(struct rcu_head *head)
{
struct route4_filter *f = container_of(head, struct route4_filter, rcu);
@@ -383,17 +382,19 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *est, int new,
bool ovr)
{
- int err;
u32 id = 0, to = 0, nhandle = 0x8000;
struct route4_filter *fp;
unsigned int h1;
struct route4_bucket *b;
struct tcf_exts e;
+ int err;
- tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
- err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ err = tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
if (err < 0)
return err;
+ err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ if (err < 0)
+ goto errout;
err = -EINVAL;
if (tb[TCA_ROUTE4_TO]) {
@@ -472,10 +473,8 @@ errout:
}
static int route4_change(struct net *net, struct sk_buff *in_skb,
- struct tcf_proto *tp, unsigned long base,
- u32 handle,
- struct nlattr **tca,
- unsigned long *arg, bool ovr)
+ struct tcf_proto *tp, unsigned long base, u32 handle,
+ struct nlattr **tca, unsigned long *arg, bool ovr)
{
struct route4_head *head = rtnl_dereference(tp->root);
struct route4_filter __rcu **fp;
@@ -503,7 +502,10 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
if (!f)
goto errout;
- tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
+ err = tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
+ if (err < 0)
+ goto errout;
+
if (fold) {
f->id = fold->id;
f->iif = fold->iif;
@@ -557,6 +559,8 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
return 0;
errout:
+ if (f)
+ tcf_exts_destroy(&f->exts);
kfree(f);
return err;
}
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index f9c9fc075fe6..4f05a19fb073 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -487,10 +487,12 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
return err;
- tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
+ err = tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
if (err < 0)
return err;
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
+ if (err < 0)
+ goto errout2;
f = (struct rsvp_filter *)*arg;
if (f) {
@@ -506,7 +508,11 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
goto errout2;
}
- tcf_exts_init(&n->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
+ err = tcf_exts_init(&n->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
+ if (err < 0) {
+ kfree(n);
+ goto errout2;
+ }
if (tb[TCA_RSVP_CLASSID]) {
n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
@@ -530,7 +536,9 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
if (f == NULL)
goto errout2;
- tcf_exts_init(&f->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
+ err = tcf_exts_init(&f->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
+ if (err < 0)
+ goto errout;
h2 = 16;
if (tb[TCA_RSVP_SRC]) {
memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
@@ -627,6 +635,7 @@ insert:
goto insert;
errout:
+ tcf_exts_destroy(&f->exts);
kfree(f);
errout2:
tcf_exts_destroy(&e);
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 944c8ff45055..96144bdf30db 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -50,14 +50,13 @@ struct tcindex_data {
struct rcu_head rcu;
};
-static inline int
-tcindex_filter_is_set(struct tcindex_filter_result *r)
+static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
{
return tcf_exts_is_predicative(&r->exts) || r->res.classid;
}
-static struct tcindex_filter_result *
-tcindex_lookup(struct tcindex_data *p, u16 key)
+static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
+ u16 key)
{
if (p->perfect) {
struct tcindex_filter_result *f = p->perfect + key;
@@ -144,7 +143,8 @@ static void tcindex_destroy_rexts(struct rcu_head *head)
static void tcindex_destroy_fexts(struct rcu_head *head)
{
- struct tcindex_filter *f = container_of(head, struct tcindex_filter, rcu);
+ struct tcindex_filter *f = container_of(head, struct tcindex_filter,
+ rcu);
tcf_exts_destroy(&f->result.exts);
kfree(f);
@@ -219,10 +219,10 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
[TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
};
-static void tcindex_filter_result_init(struct tcindex_filter_result *r)
+static int tcindex_filter_result_init(struct tcindex_filter_result *r)
{
memset(r, 0, sizeof(*r));
- tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
}
static void __tcindex_partial_destroy(struct rcu_head *head)
@@ -233,23 +233,57 @@ static void __tcindex_partial_destroy(struct rcu_head *head)
kfree(p);
}
+static void tcindex_free_perfect_hash(struct tcindex_data *cp)
+{
+ int i;
+
+ for (i = 0; i < cp->hash; i++)
+ tcf_exts_destroy(&cp->perfect[i].exts);
+ kfree(cp->perfect);
+}
+
+static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
+{
+ int i, err = 0;
+
+ cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
+ GFP_KERNEL);
+ if (!cp->perfect)
+ return -ENOMEM;
+
+ for (i = 0; i < cp->hash; i++) {
+ err = tcf_exts_init(&cp->perfect[i].exts,
+ TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ if (err < 0)
+ goto errout;
+ }
+
+ return 0;
+
+errout:
+ tcindex_free_perfect_hash(cp);
+ return err;
+}
+
static int
tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
u32 handle, struct tcindex_data *p,
struct tcindex_filter_result *r, struct nlattr **tb,
struct nlattr *est, bool ovr)
{
- int err, balloc = 0;
struct tcindex_filter_result new_filter_result, *old_r = r;
struct tcindex_filter_result cr;
- struct tcindex_data *cp, *oldp;
+ struct tcindex_data *cp = NULL, *oldp;
struct tcindex_filter *f = NULL; /* make gcc behave */
+ int err, balloc = 0;
struct tcf_exts e;
- tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
- err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ err = tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
if (err < 0)
return err;
+ err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ if (err < 0)
+ goto errout;
err = -ENOMEM;
/* tcindex_data attributes must look atomic to classifier/lookup so
@@ -270,19 +304,20 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
if (p->perfect) {
int i;
- cp->perfect = kmemdup(p->perfect,
- sizeof(*r) * cp->hash, GFP_KERNEL);
- if (!cp->perfect)
+ if (tcindex_alloc_perfect_hash(cp) < 0)
goto errout;
for (i = 0; i < cp->hash; i++)
- tcf_exts_init(&cp->perfect[i].exts,
- TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ cp->perfect[i].res = p->perfect[i].res;
balloc = 1;
}
cp->h = p->h;
- tcindex_filter_result_init(&new_filter_result);
- tcindex_filter_result_init(&cr);
+ err = tcindex_filter_result_init(&new_filter_result);
+ if (err < 0)
+ goto errout1;
+ err = tcindex_filter_result_init(&cr);
+ if (err < 0)
+ goto errout1;
if (old_r)
cr.res = r->res;
@@ -338,15 +373,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
err = -ENOMEM;
if (!cp->perfect && !cp->h) {
if (valid_perfect_hash(cp)) {
- int i;
-
- cp->perfect = kcalloc(cp->hash, sizeof(*r), GFP_KERNEL);
- if (!cp->perfect)
+ if (tcindex_alloc_perfect_hash(cp) < 0)
goto errout_alloc;
- for (i = 0; i < cp->hash; i++)
- tcf_exts_init(&cp->perfect[i].exts,
- TCA_TCINDEX_ACT,
- TCA_TCINDEX_POLICE);
balloc = 1;
} else {
struct tcindex_filter __rcu **hash;
@@ -373,8 +401,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
if (!f)
goto errout_alloc;
f->key = handle;
- tcindex_filter_result_init(&f->result);
f->next = NULL;
+ err = tcindex_filter_result_init(&f->result);
+ if (err < 0) {
+ kfree(f);
+ goto errout_alloc;
+ }
}
if (tb[TCA_TCINDEX_CLASSID]) {
@@ -387,8 +419,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
else
tcf_exts_change(tp, &cr.exts, &e);
- if (old_r && old_r != r)
- tcindex_filter_result_init(old_r);
+ if (old_r && old_r != r) {
+ err = tcindex_filter_result_init(old_r);
+ if (err < 0) {
+ kfree(f);
+ goto errout_alloc;
+ }
+ }
oldp = p;
r->res = cr.res;
@@ -415,9 +452,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
errout_alloc:
if (balloc == 1)
- kfree(cp->perfect);
+ tcindex_free_perfect_hash(cp);
else if (balloc == 2)
kfree(cp->h);
+errout1:
+ tcf_exts_destroy(&cr.exts);
+ tcf_exts_destroy(&new_filter_result.exts);
errout:
kfree(cp);
tcf_exts_destroy(&e);
@@ -510,7 +550,7 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force)
static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index ffe593efe930..ae83c3aec308 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -104,7 +104,8 @@ static inline unsigned int u32_hash_fold(__be32 key,
return h;
}
-static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
+static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res)
{
struct {
struct tc_u_knode *knode;
@@ -256,8 +257,7 @@ deadloop:
return -1;
}
-static struct tc_u_hnode *
-u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
+static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{
struct tc_u_hnode *ht;
@@ -270,8 +270,7 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
return ht;
}
-static struct tc_u_knode *
-u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
+static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
{
unsigned int sel;
struct tc_u_knode *n = NULL;
@@ -360,8 +359,7 @@ static int u32_init(struct tcf_proto *tp)
return 0;
}
-static int u32_destroy_key(struct tcf_proto *tp,
- struct tc_u_knode *n,
+static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
bool free_pf)
{
tcf_exts_destroy(&n->exts);
@@ -448,9 +446,8 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
}
}
-static int u32_replace_hw_hnode(struct tcf_proto *tp,
- struct tc_u_hnode *h,
- u32 flags)
+static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
+ u32 flags)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload u32_offload = {0};
@@ -496,9 +493,8 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
}
}
-static int u32_replace_hw_knode(struct tcf_proto *tp,
- struct tc_u_knode *n,
- u32 flags)
+static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
+ u32 flags)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload u32_offload = {0};
@@ -709,13 +705,15 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
struct tc_u_knode *n, struct nlattr **tb,
struct nlattr *est, bool ovr)
{
- int err;
struct tcf_exts e;
+ int err;
- tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
- err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ err = tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
if (err < 0)
return err;
+ err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ if (err < 0)
+ goto errout;
err = -EINVAL;
if (tb[TCA_U32_LINK]) {
@@ -761,8 +759,7 @@ errout:
return err;
}
-static void u32_replace_knode(struct tcf_proto *tp,
- struct tc_u_common *tp_c,
+static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
struct tc_u_knode *n)
{
struct tc_u_knode __rcu **ins;
@@ -833,15 +830,17 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
new->tp = tp;
memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
- tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE);
+ if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
+ kfree(new);
+ return NULL;
+ }
return new;
}
static int u32_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
- struct nlattr **tca,
- unsigned long *arg, bool ovr)
+ struct nlattr **tca, unsigned long *arg, bool ovr)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
@@ -985,9 +984,12 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
n->flags = flags;
- tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
n->tp = tp;
+ err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
+ if (err < 0)
+ goto errout;
+
#ifdef CONFIG_CLS_U32_MARK
n->pcpu_success = alloc_percpu(u32);
if (!n->pcpu_success) {
@@ -1028,9 +1030,10 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
errhw:
#ifdef CONFIG_CLS_U32_MARK
free_percpu(n->pcpu_success);
-errout:
#endif
+errout:
+ tcf_exts_destroy(&n->exts);
#ifdef CONFIG_CLS_U32_PERF
free_percpu(n->pf);
#endif
@@ -1079,7 +1082,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
}
static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t)
{
struct tc_u_knode *n = (struct tc_u_knode *)fh;
struct tc_u_hnode *ht_up, *ht_down;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 12ebde845523..206dc24add3a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -29,6 +29,7 @@
#include <linux/hrtimer.h>
#include <linux/lockdep.h>
#include <linux/slab.h>
+#include <linux/hashtable.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -259,37 +260,40 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
{
struct Qdisc *q;
+ if (!qdisc_dev(root))
+ return (root->handle == handle ? root : NULL);
+
if (!(root->flags & TCQ_F_BUILTIN) &&
root->handle == handle)
return root;
- list_for_each_entry_rcu(q, &root->list, list) {
+ hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
if (q->handle == handle)
return q;
}
return NULL;
}
-void qdisc_list_add(struct Qdisc *q)
+void qdisc_hash_add(struct Qdisc *q)
{
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
struct Qdisc *root = qdisc_dev(q)->qdisc;
WARN_ON_ONCE(root == &noop_qdisc);
ASSERT_RTNL();
- list_add_tail_rcu(&q->list, &root->list);
+ hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
}
}
-EXPORT_SYMBOL(qdisc_list_add);
+EXPORT_SYMBOL(qdisc_hash_add);
-void qdisc_list_del(struct Qdisc *q)
+void qdisc_hash_del(struct Qdisc *q)
{
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
ASSERT_RTNL();
- list_del_rcu(&q->list);
+ hash_del_rcu(&q->hash);
}
}
-EXPORT_SYMBOL(qdisc_list_del);
+EXPORT_SYMBOL(qdisc_hash_del);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
@@ -385,7 +389,8 @@ static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
static struct qdisc_rate_table *qdisc_rtab_list;
-struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
+struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+ struct nlattr *tab)
{
struct qdisc_rate_table *rtab;
@@ -537,7 +542,8 @@ nla_put_failure:
return -1;
}
-void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
+void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct qdisc_size_table *stab)
{
int pkt_len, slot;
@@ -884,10 +890,10 @@ static struct lock_class_key qdisc_rx_lock;
Parameters are passed via opt.
*/
-static struct Qdisc *
-qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
- struct Qdisc *p, u32 parent, u32 handle,
- struct nlattr **tca, int *errp)
+static struct Qdisc *qdisc_create(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ struct Qdisc *p, u32 parent, u32 handle,
+ struct nlattr **tca, int *errp)
{
int err;
struct nlattr *kind = tca[TCA_KIND];
@@ -998,7 +1004,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
goto err_out4;
}
- qdisc_list_add(sch);
+ qdisc_hash_add(sch);
return sch;
}
@@ -1069,7 +1075,8 @@ struct check_loop_arg {
int depth;
};
-static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
+static int check_loop_fn(struct Qdisc *q, unsigned long cl,
+ struct qdisc_walker *w);
static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
{
@@ -1431,10 +1438,11 @@ err_out:
static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
struct netlink_callback *cb,
- int *q_idx_p, int s_q_idx)
+ int *q_idx_p, int s_q_idx, bool recur)
{
int ret = 0, q_idx = *q_idx_p;
struct Qdisc *q;
+ int b;
if (!root)
return 0;
@@ -1445,18 +1453,30 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
} else {
if (!tc_qdisc_dump_ignore(q) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWQDISC) <= 0)
goto done;
q_idx++;
}
- list_for_each_entry(q, &root->list, list) {
+
+ /* If dumping singletons, there is no qdisc_dev(root) and the singleton
+ * itself has already been dumped.
+ *
+ * If we've already dumped the top-level (ingress) qdisc above and the global
+ * qdisc hashtable, we don't want to hit it again
+ */
+ if (!qdisc_dev(root) || !recur)
+ goto out;
+
+ hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
if (q_idx < s_q_idx) {
q_idx++;
continue;
}
if (!tc_qdisc_dump_ignore(q) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWQDISC) <= 0)
goto done;
q_idx++;
}
@@ -1490,13 +1510,14 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
s_q_idx = 0;
q_idx = 0;
- if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
+ if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
+ true) < 0)
goto done;
dev_queue = dev_ingress_queue(dev);
if (dev_queue &&
tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
- &q_idx, s_q_idx) < 0)
+ &q_idx, s_q_idx, false) < 0)
goto done;
cont:
@@ -1625,7 +1646,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
if (cops->delete)
err = cops->delete(q, cl);
if (err == 0)
- tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
+ tclass_notify(net, skb, n, q, cl,
+ RTM_DELTCLASS);
goto out;
case RTM_GETTCLASS:
err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
@@ -1723,12 +1745,14 @@ struct qdisc_dump_args {
struct netlink_callback *cb;
};
-static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
+static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
+ struct qdisc_walker *arg)
{
struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
- a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
+ a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWTCLASS);
}
static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
@@ -1765,6 +1789,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
int *t_p, int s_t)
{
struct Qdisc *q;
+ int b;
if (!root)
return 0;
@@ -1772,7 +1797,10 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
return -1;
- list_for_each_entry(q, &root->list, list) {
+ if (!qdisc_dev(root))
+ return 0;
+
+ hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
return -1;
}
@@ -1957,10 +1985,12 @@ static int __init pktsched_init(void)
rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
- rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
+ rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
+ NULL);
rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
- rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
+ rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
+ NULL);
return 0;
}
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 4002df3c7d9f..5bfa79ee657c 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -69,7 +69,7 @@ struct codel_sched_data {
static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
{
struct Qdisc *sch = ctx;
- struct sk_buff *skb = __skb_dequeue(&sch->q);
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
if (skb)
sch->qstats.backlog -= qdisc_pkt_len(skb);
@@ -172,7 +172,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
- struct sk_buff *skb = __skb_dequeue(&sch->q);
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index baeed6a78d28..1e37247656f8 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -31,7 +31,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
- if (likely(skb_queue_len(&sch->q) < sch->limit))
+ if (likely(sch->q.qlen < sch->limit))
return qdisc_enqueue_tail(skb, sch);
return qdisc_drop(skb, sch, to_free);
@@ -42,7 +42,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
unsigned int prev_backlog;
- if (likely(skb_queue_len(&sch->q) < sch->limit))
+ if (likely(sch->q.qlen < sch->limit))
return qdisc_enqueue_tail(skb, sch);
prev_backlog = sch->qstats.backlog;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index e5458b99e09c..18e752439f6f 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -86,6 +86,7 @@ struct fq_sched_data {
struct rb_root delayed; /* for rate limited flows */
u64 time_next_delayed_flow;
+ unsigned long unthrottle_latency_ns;
struct fq_flow internal; /* for non classified or high prio packets */
u32 quantum;
@@ -94,6 +95,7 @@ struct fq_sched_data {
u32 flow_max_rate; /* optional max rate per flow */
u32 flow_plimit; /* max packets per flow */
u32 orphan_mask; /* mask for orphaned skb */
+ u32 low_rate_threshold;
struct rb_root *fq_root;
u8 rate_enable;
u8 fq_trees_log;
@@ -407,11 +409,19 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
static void fq_check_throttled(struct fq_sched_data *q, u64 now)
{
+ unsigned long sample;
struct rb_node *p;
if (q->time_next_delayed_flow > now)
return;
+ /* Update unthrottle latency EWMA.
+ * This is cheap and can help diagnosing timer/latency problems.
+ */
+ sample = (unsigned long)(now - q->time_next_delayed_flow);
+ q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
+ q->unthrottle_latency_ns += sample >> 3;
+
q->time_next_delayed_flow = ~0ULL;
while ((p = rb_first(&q->delayed)) != NULL) {
struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
@@ -433,7 +443,7 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
struct fq_flow_head *head;
struct sk_buff *skb;
struct fq_flow *f;
- u32 rate;
+ u32 rate, plen;
skb = fq_dequeue_head(sch, &q->internal);
if (skb)
@@ -482,7 +492,7 @@ begin:
prefetch(&skb->end);
f->credit -= qdisc_pkt_len(skb);
- if (f->credit > 0 || !q->rate_enable)
+ if (!q->rate_enable)
goto out;
/* Do not pace locally generated ack packets */
@@ -493,8 +503,15 @@ begin:
if (skb->sk)
rate = min(skb->sk->sk_pacing_rate, rate);
+ if (rate <= q->low_rate_threshold) {
+ f->credit = 0;
+ plen = qdisc_pkt_len(skb);
+ } else {
+ plen = max(qdisc_pkt_len(skb), q->quantum);
+ if (f->credit > 0)
+ goto out;
+ }
if (rate != ~0U) {
- u32 plen = max(qdisc_pkt_len(skb), q->quantum);
u64 len = (u64)plen * NSEC_PER_SEC;
if (likely(rate))
@@ -507,7 +524,12 @@ begin:
len = NSEC_PER_SEC;
q->stat_pkts_too_long++;
}
-
+ /* Account for schedule/timers drifts.
+ * f->time_next_packet was set when prior packet was sent,
+ * and current time (@now) can be too late by tens of us.
+ */
+ if (f->time_next_packet)
+ len -= min(len/2, now - f->time_next_packet);
f->time_next_packet = now + len;
}
out:
@@ -662,6 +684,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
[TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
[TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
+ [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
};
static int fq_change(struct Qdisc *sch, struct nlattr *opt)
@@ -716,6 +739,10 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_FQ_FLOW_MAX_RATE])
q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
+ if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
+ q->low_rate_threshold =
+ nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
+
if (tb[TCA_FQ_RATE_ENABLE]) {
u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
@@ -774,6 +801,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
q->flow_refill_delay = msecs_to_jiffies(40);
q->flow_max_rate = ~0U;
+ q->time_next_delayed_flow = ~0ULL;
q->rate_enable = 1;
q->new_flows.first = NULL;
q->old_flows.first = NULL;
@@ -781,6 +809,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
q->fq_root = NULL;
q->fq_trees_log = ilog2(1024);
q->orphan_mask = 1024 - 1;
+ q->low_rate_threshold = 550000 / 8;
qdisc_watchdog_init(&q->watchdog, sch);
if (opt)
@@ -811,6 +840,8 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
jiffies_to_usecs(q->flow_refill_delay)) ||
nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
+ nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
+ q->low_rate_threshold) ||
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
goto nla_put_failure;
@@ -823,20 +854,24 @@ nla_put_failure:
static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct fq_sched_data *q = qdisc_priv(sch);
- u64 now = ktime_get_ns();
- struct tc_fq_qd_stats st = {
- .gc_flows = q->stat_gc_flows,
- .highprio_packets = q->stat_internal_packets,
- .tcp_retrans = q->stat_tcp_retrans,
- .throttled = q->stat_throttled,
- .flows_plimit = q->stat_flows_plimit,
- .pkts_too_long = q->stat_pkts_too_long,
- .allocation_errors = q->stat_allocation_errors,
- .flows = q->flows,
- .inactive_flows = q->inactive_flows,
- .throttled_flows = q->throttled_flows,
- .time_next_delayed_flow = q->time_next_delayed_flow - now,
- };
+ struct tc_fq_qd_stats st;
+
+ sch_tree_lock(sch);
+
+ st.gc_flows = q->stat_gc_flows;
+ st.highprio_packets = q->stat_internal_packets;
+ st.tcp_retrans = q->stat_tcp_retrans;
+ st.throttled = q->stat_throttled;
+ st.flows_plimit = q->stat_flows_plimit;
+ st.pkts_too_long = q->stat_pkts_too_long;
+ st.allocation_errors = q->stat_allocation_errors;
+ st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
+ st.flows = q->flows;
+ st.inactive_flows = q->inactive_flows;
+ st.throttled_flows = q->throttled_flows;
+ st.unthrottle_latency_ns = min_t(unsigned long,
+ q->unthrottle_latency_ns, ~0U);
+ sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st));
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 657c13362b19..6cfb6e9038c2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -423,7 +423,6 @@ struct Qdisc noop_qdisc = {
.dequeue = noop_dequeue,
.flags = TCQ_F_BUILTIN,
.ops = &noop_qdisc_ops,
- .list = LIST_HEAD_INIT(noop_qdisc.list),
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
.running = SEQCNT_ZERO(noop_qdisc.running),
@@ -467,7 +466,7 @@ static const u8 prio2band[TC_PRIO_MAX + 1] = {
*/
struct pfifo_fast_priv {
u32 bitmap;
- struct sk_buff_head q[PFIFO_FAST_BANDS];
+ struct qdisc_skb_head q[PFIFO_FAST_BANDS];
};
/*
@@ -478,7 +477,7 @@ struct pfifo_fast_priv {
*/
static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
-static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
+static inline struct qdisc_skb_head *band2list(struct pfifo_fast_priv *priv,
int band)
{
return priv->q + band;
@@ -487,10 +486,10 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
- if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
+ if (qdisc->q.qlen < qdisc_dev(qdisc)->tx_queue_len) {
int band = prio2band[skb->priority & TC_PRIO_MAX];
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
- struct sk_buff_head *list = band2list(priv, band);
+ struct qdisc_skb_head *list = band2list(priv, band);
priv->bitmap |= (1 << band);
qdisc->q.qlen++;
@@ -506,11 +505,16 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
int band = bitmap2band[priv->bitmap];
if (likely(band >= 0)) {
- struct sk_buff_head *list = band2list(priv, band);
- struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
+ struct qdisc_skb_head *qh = band2list(priv, band);
+ struct sk_buff *skb = __qdisc_dequeue_head(qh);
+
+ if (likely(skb != NULL)) {
+ qdisc_qstats_backlog_dec(qdisc, skb);
+ qdisc_bstats_update(qdisc, skb);
+ }
qdisc->q.qlen--;
- if (skb_queue_empty(list))
+ if (qh->qlen == 0)
priv->bitmap &= ~(1 << band);
return skb;
@@ -525,9 +529,9 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
int band = bitmap2band[priv->bitmap];
if (band >= 0) {
- struct sk_buff_head *list = band2list(priv, band);
+ struct qdisc_skb_head *qh = band2list(priv, band);
- return skb_peek(list);
+ return qh->head;
}
return NULL;
@@ -565,7 +569,7 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
- __skb_queue_head_init(band2list(priv, prio));
+ qdisc_skb_head_init(band2list(priv, prio));
/* Can by-pass the queue discipline */
qdisc->flags |= TCQ_F_CAN_BYPASS;
@@ -613,8 +617,8 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
sch->padded = (char *) sch - (char *) p;
}
- INIT_LIST_HEAD(&sch->list);
- skb_queue_head_init(&sch->q);
+ qdisc_skb_head_init(&sch->q);
+ spin_lock_init(&sch->q.lock);
spin_lock_init(&sch->busylock);
lockdep_set_class(&sch->busylock,
@@ -701,7 +705,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
return;
#ifdef CONFIG_NET_SCHED
- qdisc_list_del(qdisc);
+ qdisc_hash_del(qdisc);
qdisc_put_stab(rtnl_dereference(qdisc->stab));
#endif
@@ -789,6 +793,10 @@ static void attach_default_qdiscs(struct net_device *dev)
qdisc->ops->attach(qdisc);
}
}
+#ifdef CONFIG_NET_SCHED
+ if (dev->qdisc)
+ qdisc_hash_add(dev->qdisc);
+#endif
}
static void transition_one_qdisc(struct net_device *dev,
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 3ddc7bd74ecb..000f1d36128e 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -142,8 +142,6 @@ struct hfsc_class {
link-sharing, max(myf, cfmin) */
u64 cl_myf; /* my fit-time (calculated from this
class's own upperlimit curve) */
- u64 cl_myfadj; /* my fit-time adjustment (to cancel
- history dependence) */
u64 cl_cfmin; /* earliest children's fit-time (used
with cl_myf to obtain cl_f) */
u64 cl_cvtmin; /* minimal virtual time among the
@@ -151,11 +149,8 @@ struct hfsc_class {
(monotonic within a period) */
u64 cl_vtadj; /* intra-period cumulative vt
adjustment */
- u64 cl_vtoff; /* inter-period cumulative vt offset */
- u64 cl_cvtmax; /* max child's vt in the last period */
- u64 cl_cvtoff; /* cumulative cvtmax of all periods */
- u64 cl_pcvtoff; /* parent's cvtoff at initialization
- time */
+ u64 cl_cvtoff; /* largest virtual time seen among
+ the children */
struct internal_sc cl_rsc; /* internal real-time service curve */
struct internal_sc cl_fsc; /* internal fair service curve */
@@ -701,28 +696,16 @@ init_vf(struct hfsc_class *cl, unsigned int len)
} else {
/*
* first child for a new parent backlog period.
- * add parent's cvtmax to cvtoff to make a new
- * vt (vtoff + vt) larger than the vt in the
- * last period for all children.
+ * initialize cl_vt to the highest value seen
+ * among the siblings. this is analogous to
+ * what cur_time would provide in realtime case.
*/
- vt = cl->cl_parent->cl_cvtmax;
- cl->cl_parent->cl_cvtoff += vt;
- cl->cl_parent->cl_cvtmax = 0;
+ cl->cl_vt = cl->cl_parent->cl_cvtoff;
cl->cl_parent->cl_cvtmin = 0;
- cl->cl_vt = 0;
}
- cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
- cl->cl_pcvtoff;
-
/* update the virtual curve */
- vt = cl->cl_vt + cl->cl_vtoff;
- rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
- cl->cl_total);
- if (cl->cl_virtual.x == vt) {
- cl->cl_virtual.x -= cl->cl_vtoff;
- cl->cl_vtoff = 0;
- }
+ rtsc_min(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
cl->cl_vtadj = 0;
cl->cl_vtperiod++; /* increment vt period */
@@ -745,7 +728,6 @@ init_vf(struct hfsc_class *cl, unsigned int len)
/* compute myf */
cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
cl->cl_total);
- cl->cl_myfadj = 0;
}
}
@@ -779,8 +761,7 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
go_passive = 0;
/* update vt */
- cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
- - cl->cl_vtoff + cl->cl_vtadj;
+ cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) + cl->cl_vtadj;
/*
* if vt of the class is smaller than cvtmin,
@@ -795,9 +776,9 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
if (go_passive) {
/* no more active child, going passive */
- /* update cvtmax of the parent class */
- if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
- cl->cl_parent->cl_cvtmax = cl->cl_vt;
+ /* update cvtoff of the parent class */
+ if (cl->cl_vt > cl->cl_parent->cl_cvtoff)
+ cl->cl_parent->cl_cvtoff = cl->cl_vt;
/* remove this class from the vt tree */
vttree_remove(cl);
@@ -813,9 +794,10 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
/* update f */
if (cl->cl_flags & HFSC_USC) {
+ cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
+#if 0
cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
cl->cl_total);
-#if 0
/*
* This code causes classes to stay way under their
* limit when multiple classes are used at gigabit
@@ -940,7 +922,7 @@ static void
hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
{
sc2isc(fsc, &cl->cl_fsc);
- rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vtoff + cl->cl_vt, cl->cl_total);
+ rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
cl->cl_flags |= HFSC_FSC;
}
@@ -1094,7 +1076,6 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (parent->level == 0)
hfsc_purge_queue(sch, parent);
hfsc_adjust_levels(parent);
- cl->cl_pcvtoff = parent->cl_cvtoff;
sch_tree_unlock(sch);
qdisc_class_hash_grow(sch, &q->clhash);
@@ -1482,16 +1463,12 @@ hfsc_reset_class(struct hfsc_class *cl)
cl->cl_e = 0;
cl->cl_vt = 0;
cl->cl_vtadj = 0;
- cl->cl_vtoff = 0;
cl->cl_cvtmin = 0;
- cl->cl_cvtmax = 0;
cl->cl_cvtoff = 0;
- cl->cl_pcvtoff = 0;
cl->cl_vtperiod = 0;
cl->cl_parentperiod = 0;
cl->cl_f = 0;
cl->cl_myf = 0;
- cl->cl_myfadj = 0;
cl->cl_cfmin = 0;
cl->cl_nactive = 0;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 53dbfa187870..c798d0de8a9d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -162,7 +162,7 @@ struct htb_sched {
struct work_struct work;
/* non shaped skbs; let them go directly thru */
- struct sk_buff_head direct_queue;
+ struct qdisc_skb_head direct_queue;
long direct_pkts;
struct qdisc_watchdog watchdog;
@@ -570,6 +570,22 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
list_del_init(&cl->un.leaf.drop_list);
}
+static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
+ struct qdisc_skb_head *qh)
+{
+ struct sk_buff *last = qh->tail;
+
+ if (last) {
+ skb->next = NULL;
+ last->next = skb;
+ qh->tail = skb;
+ } else {
+ qh->tail = skb;
+ qh->head = skb;
+ }
+ qh->qlen++;
+}
+
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
@@ -580,7 +596,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (cl == HTB_DIRECT) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen) {
- __skb_queue_tail(&q->direct_queue, skb);
+ htb_enqueue_tail(skb, sch, &q->direct_queue);
q->direct_pkts++;
} else {
return qdisc_drop(skb, sch, to_free);
@@ -888,7 +904,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
unsigned long start_at;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
- skb = __skb_dequeue(&q->direct_queue);
+ skb = __qdisc_dequeue_head(&q->direct_queue);
if (skb != NULL) {
ok:
qdisc_bstats_update(sch, skb);
@@ -1019,7 +1035,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
qdisc_watchdog_init(&q->watchdog, sch);
INIT_WORK(&q->work, htb_work_func);
- __skb_queue_head_init(&q->direct_queue);
+ qdisc_skb_head_init(&q->direct_queue);
if (tb[TCA_HTB_DIRECT_QLEN])
q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index b9439827c172..2bc8d7f8df16 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -88,7 +88,7 @@ static void mq_attach(struct Qdisc *sch)
qdisc_destroy(old);
#ifdef CONFIG_NET_SCHED
if (ntx < dev->real_num_tx_queues)
- qdisc_list_add(qdisc);
+ qdisc_hash_add(qdisc);
#endif
}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 549c66359924..b5c502c78143 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -182,7 +182,7 @@ static void mqprio_attach(struct Qdisc *sch)
if (old)
qdisc_destroy(old);
if (ntx < dev->real_num_tx_queues)
- qdisc_list_add(qdisc);
+ qdisc_hash_add(qdisc);
}
kfree(priv->qdiscs);
priv->qdiscs = NULL;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index aaaf02175338..9f7b380cf0a3 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -413,6 +413,16 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
return segs;
}
+static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
+{
+ skb->next = qh->head;
+
+ if (!qh->head)
+ qh->tail = skb;
+ qh->head = skb;
+ qh->qlen++;
+}
+
/*
* Insert one skb into qdisc.
* Note: parent depends on return value to account for queue length.
@@ -502,7 +512,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1<<(prandom_u32() % 8);
}
- if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
+ if (unlikely(sch->q.qlen >= sch->limit))
return qdisc_drop(skb, sch, to_free);
qdisc_qstats_backlog_inc(sch, skb);
@@ -522,8 +532,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (q->rate) {
struct sk_buff *last;
- if (!skb_queue_empty(&sch->q))
- last = skb_peek_tail(&sch->q);
+ if (sch->q.qlen)
+ last = sch->q.tail;
else
last = netem_rb_to_skb(rb_last(&q->t_root));
if (last) {
@@ -552,7 +562,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
cb->time_to_send = psched_get_time();
q->counter = 0;
- __skb_queue_head(&sch->q, skb);
+ netem_enqueue_skb_head(&sch->q, skb);
sch->qstats.requeues++;
}
@@ -587,7 +597,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
struct rb_node *p;
tfifo_dequeue:
- skb = __skb_dequeue(&sch->q);
+ skb = __qdisc_dequeue_head(&sch->q);
if (skb) {
qdisc_qstats_backlog_dec(sch, skb);
deliver:
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index a570b0bb254c..5c3a99d6aa82 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -231,7 +231,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
/* Drop excess packets if new limit is lower */
qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
- struct sk_buff *skb = __skb_dequeue(&sch->q);
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
@@ -511,7 +511,7 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
- skb = __qdisc_dequeue_head(sch, &sch->q);
+ skb = qdisc_dequeue_head(sch);
if (!skb)
return NULL;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 1c23060c41a6..f10d3397f917 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1408,7 +1408,7 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
transports) {
if (t->pmtu_pending && t->dst) {
sctp_transport_update_pmtu(sk, t,
- WORD_TRUNC(dst_mtu(t->dst)));
+ SCTP_TRUNC4(dst_mtu(t->dst)));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 912eb1685a5d..f99d4855d3de 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -48,7 +48,7 @@ static struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = {
/* id 2 is reserved as well */
.hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_2,
},
-#if defined (CONFIG_CRYPTO_SHA256) || defined (CONFIG_CRYPTO_SHA256_MODULE)
+#if IS_ENABLED(CONFIG_CRYPTO_SHA256)
{
.hmac_id = SCTP_AUTH_HMAC_ID_SHA256,
.hmac_name = "hmac(sha256)",
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 0a3dbec0a8fb..7a1cdf43e49d 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -70,6 +70,19 @@ static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
return msg;
}
+void sctp_datamsg_free(struct sctp_datamsg *msg)
+{
+ struct sctp_chunk *chunk;
+
+ /* This doesn't have to be a _safe vairant because
+ * sctp_chunk_free() only drops the refs.
+ */
+ list_for_each_entry(chunk, &msg->chunks, frag_list)
+ sctp_chunk_free(chunk);
+
+ sctp_datamsg_put(msg);
+}
+
/* Final destructruction of datamsg memory. */
static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
{
@@ -187,9 +200,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
/* This is the biggest possible DATA chunk that can fit into
* the packet
*/
- max_data = (asoc->pathmtu -
- sctp_sk(asoc->base.sk)->pf->af->net_header_len -
- sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk)) & ~3;
+ max_data = asoc->pathmtu -
+ sctp_sk(asoc->base.sk)->pf->af->net_header_len -
+ sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
+ max_data = SCTP_TRUNC4(max_data);
max = asoc->frag_point;
/* If the the peer requested that we authenticate DATA chunks
@@ -200,8 +214,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc);
if (hmac_desc)
- max_data -= WORD_ROUND(sizeof(sctp_auth_chunk_t) +
- hmac_desc->hmac_len);
+ max_data -= SCTP_PAD4(sizeof(sctp_auth_chunk_t) +
+ hmac_desc->hmac_len);
}
/* Now, check if we need to reduce our max */
@@ -221,7 +235,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
asoc->outqueue.out_qlen == 0 &&
list_empty(&asoc->outqueue.retransmit) &&
msg_len > max)
- max_data -= WORD_ROUND(sizeof(sctp_sack_chunk_t));
+ max_data -= SCTP_PAD4(sizeof(sctp_sack_chunk_t));
/* Encourage Cookie-ECHO bundling. */
if (asoc->state < SCTP_STATE_COOKIE_ECHOED)
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 1555fb8c68e0..a2ea1d1cc06a 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -605,7 +605,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
/* PMTU discovery (RFC1191) */
if (ICMP_FRAG_NEEDED == code) {
sctp_icmp_frag_needed(sk, asoc, transport,
- WORD_TRUNC(info));
+ SCTP_TRUNC4(info));
goto out_unlock;
} else {
if (ICMP_PROT_UNREACH == code) {
@@ -673,7 +673,7 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
break;
- ch_end = offset + WORD_ROUND(ntohs(ch->length));
+ ch_end = offset + SCTP_PAD4(ntohs(ch->length));
if (ch_end > skb->len)
break;
@@ -1128,7 +1128,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
break;
- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
if (ch_end > skb_tail_pointer(skb))
break;
@@ -1197,7 +1197,7 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
* that the chunk length doesn't cause overflow. Otherwise, we'll
* walk off the end.
*/
- if (WORD_ROUND(ntohs(ch->length)) > skb->len)
+ if (SCTP_PAD4(ntohs(ch->length)) > skb->len)
return NULL;
/* If this is INIT/INIT-ACK look inside the chunk too. */
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 6437aa97cfd7..f731de3e8428 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -213,7 +213,7 @@ new_skb:
}
chunk->chunk_hdr = ch;
- chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ chunk->chunk_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
chunk->subh.v = NULL; /* Subheader is no longer valid. */
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 31b7bc35895d..2a5c1896d18f 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -180,7 +180,6 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
int one_packet, gfp_t gfp)
{
sctp_xmit_t retval;
- int error = 0;
pr_debug("%s: packet:%p size:%Zu chunk:%p size:%d\n", __func__,
packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
@@ -188,6 +187,8 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
case SCTP_XMIT_PMTU_FULL:
if (!packet->has_cookie_echo) {
+ int error = 0;
+
error = sctp_packet_transmit(packet, gfp);
if (error < 0)
chunk->skb->sk->sk_err = -error;
@@ -296,7 +297,7 @@ static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
struct sctp_chunk *chunk)
{
sctp_xmit_t retval = SCTP_XMIT_OK;
- __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
+ __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
/* Check to see if this chunk will fit into the packet */
retval = sctp_packet_will_fit(packet, chunk, chunk_len);
@@ -441,14 +442,14 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
* time. Application may notice this error.
*/
pr_err_once("Trying to GSO but underlying device doesn't support it.");
- goto nomem;
+ goto err;
}
} else {
pkt_size = packet->size;
}
head = alloc_skb(pkt_size + MAX_HEADER, gfp);
if (!head)
- goto nomem;
+ goto err;
if (gso) {
NAPI_GRO_CB(head)->last = head;
skb_shinfo(head)->gso_type = sk->sk_gso_type;
@@ -469,8 +470,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
}
}
dst = dst_clone(tp->dst);
- if (!dst)
- goto no_route;
+ if (!dst) {
+ if (asoc)
+ IP_INC_STATS(sock_net(asoc->base.sk),
+ IPSTATS_MIB_OUTNOROUTES);
+ goto nodst;
+ }
skb_dst_set(head, dst);
/* Build the SCTP header. */
@@ -503,7 +508,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
if (gso) {
pkt_size = packet->overhead;
list_for_each_entry(chunk, &packet->chunk_list, list) {
- int padded = WORD_ROUND(chunk->skb->len);
+ int padded = SCTP_PAD4(chunk->skb->len);
if (pkt_size + padded > tp->pathmtu)
break;
@@ -533,7 +538,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
* included in the chunk length field. The sender should
* never pad with more than 3 bytes.
*
- * [This whole comment explains WORD_ROUND() below.]
+ * [This whole comment explains SCTP_PAD4() below.]
*/
pkt_size -= packet->overhead;
@@ -555,7 +560,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
has_data = 1;
}
- padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len;
+ padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
if (padding)
memset(skb_put(chunk->skb, padding), 0, padding);
@@ -582,7 +587,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
* acknowledged or have failed.
* Re-queue auth chunks if needed.
*/
- pkt_size -= WORD_ROUND(chunk->skb->len);
+ pkt_size -= SCTP_PAD4(chunk->skb->len);
if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
sctp_chunk_free(chunk);
@@ -621,8 +626,10 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
if (!gso)
break;
- if (skb_gro_receive(&head, nskb))
+ if (skb_gro_receive(&head, nskb)) {
+ kfree_skb(nskb);
goto nomem;
+ }
nskb = NULL;
if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
sk->sk_gso_max_segs))
@@ -716,18 +723,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
}
head->ignore_df = packet->ipfragok;
tp->af_specific->sctp_xmit(head, tp);
+ goto out;
-out:
- sctp_packet_reset(packet);
- return err;
-no_route:
- kfree_skb(head);
- if (nskb != head)
- kfree_skb(nskb);
-
- if (asoc)
- IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+nomem:
+ if (packet->auth && list_empty(&packet->auth->list))
+ sctp_chunk_free(packet->auth);
+nodst:
/* FIXME: Returning the 'err' will effect all the associations
* associated with a socket, although only one of the paths of the
* association is unreachable.
@@ -736,22 +738,18 @@ no_route:
* required.
*/
/* err = -EHOSTUNREACH; */
-err:
- /* Control chunks are unreliable so just drop them. DATA chunks
- * will get resent or dropped later.
- */
+ kfree_skb(head);
+err:
list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
list_del_init(&chunk->list);
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
}
- goto out;
-nomem:
- if (packet->auth && list_empty(&packet->auth->list))
- sctp_chunk_free(packet->auth);
- err = -ENOMEM;
- goto err;
+
+out:
+ sctp_packet_reset(packet);
+ return err;
}
/********************************************************************
@@ -913,7 +911,7 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
*/
maxsize = pmtu - packet->overhead;
if (packet->auth)
- maxsize -= WORD_ROUND(packet->auth->skb->len);
+ maxsize -= SCTP_PAD4(packet->auth->skb->len);
if (chunk_len > maxsize)
retval = SCTP_XMIT_PMTU_FULL;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 107233da5cc9..582585393d35 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -68,7 +68,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
-static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
+static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
/* Add data to the front of the queue. */
static inline void sctp_outq_head_data(struct sctp_outq *q,
@@ -285,10 +285,9 @@ void sctp_outq_free(struct sctp_outq *q)
}
/* Put a new chunk in an sctp_outq. */
-int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
+void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
{
struct net *net = sock_net(q->asoc->base.sk);
- int error = 0;
pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
chunk && chunk->chunk_hdr ?
@@ -299,54 +298,26 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
* immediately.
*/
if (sctp_chunk_is_data(chunk)) {
- /* Is it OK to queue data chunks? */
- /* From 9. Termination of Association
- *
- * When either endpoint performs a shutdown, the
- * association on each peer will stop accepting new
- * data from its user and only deliver data in queue
- * at the time of sending or receiving the SHUTDOWN
- * chunk.
- */
- switch (q->asoc->state) {
- case SCTP_STATE_CLOSED:
- case SCTP_STATE_SHUTDOWN_PENDING:
- case SCTP_STATE_SHUTDOWN_SENT:
- case SCTP_STATE_SHUTDOWN_RECEIVED:
- case SCTP_STATE_SHUTDOWN_ACK_SENT:
- /* Cannot send after transport endpoint shutdown */
- error = -ESHUTDOWN;
- break;
-
- default:
- pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
- __func__, q, chunk, chunk && chunk->chunk_hdr ?
- sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
- "illegal chunk");
-
- sctp_chunk_hold(chunk);
- sctp_outq_tail_data(q, chunk);
- if (chunk->asoc->peer.prsctp_capable &&
- SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
- chunk->asoc->sent_cnt_removable++;
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
- else
- SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
- break;
- }
+ pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
+ __func__, q, chunk, chunk && chunk->chunk_hdr ?
+ sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
+ "illegal chunk");
+
+ sctp_outq_tail_data(q, chunk);
+ if (chunk->asoc->peer.prsctp_capable &&
+ SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
+ chunk->asoc->sent_cnt_removable++;
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
+ else
+ SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
} else {
list_add_tail(&chunk->list, &q->control_chunk_list);
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
}
- if (error < 0)
- return error;
-
if (!q->cork)
- error = sctp_outq_flush(q, 0, gfp);
-
- return error;
+ sctp_outq_flush(q, 0, gfp);
}
/* Insert a chunk into the sorted list based on the TSNs. The retransmit list
@@ -559,7 +530,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
sctp_retransmit_reason_t reason)
{
struct net *net = sock_net(q->asoc->base.sk);
- int error = 0;
switch (reason) {
case SCTP_RTXR_T3_RTX:
@@ -603,10 +573,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
* will be flushed at the end.
*/
if (reason != SCTP_RTXR_FAST_RTX)
- error = sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
-
- if (error)
- q->asoc->base.sk->sk_err = -error;
+ sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
}
/*
@@ -778,12 +745,12 @@ redo:
}
/* Cork the outqueue so queued chunks are really queued. */
-int sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
+void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
{
if (q->cork)
q->cork = 0;
- return sctp_outq_flush(q, 0, gfp);
+ sctp_outq_flush(q, 0, gfp);
}
@@ -796,7 +763,7 @@ int sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
* locking concerns must be made. Today we use the sock lock to protect
* this function.
*/
-static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
+static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
{
struct sctp_packet *packet;
struct sctp_packet singleton;
@@ -919,8 +886,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
sctp_packet_config(&singleton, vtag, 0);
sctp_packet_append_chunk(&singleton, chunk);
error = sctp_packet_transmit(&singleton, gfp);
- if (error < 0)
- return error;
+ if (error < 0) {
+ asoc->base.sk->sk_err = -error;
+ return;
+ }
break;
case SCTP_CID_ABORT:
@@ -1018,6 +987,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
retran:
error = sctp_outq_flush_rtx(q, packet,
rtx_timeout, &start_timer);
+ if (error < 0)
+ asoc->base.sk->sk_err = -error;
if (start_timer) {
sctp_transport_reset_t3_rtx(transport);
@@ -1192,14 +1163,15 @@ sctp_flush_out:
struct sctp_transport,
send_ready);
packet = &t->packet;
- if (!sctp_packet_empty(packet))
+ if (!sctp_packet_empty(packet)) {
error = sctp_packet_transmit(packet, gfp);
+ if (error < 0)
+ asoc->base.sk->sk_err = -error;
+ }
/* Clear the burst limited state, if any */
sctp_transport_burst_reset(t);
}
-
- return error;
}
/* Update unack_data based on the incoming SACK chunk */
@@ -1747,7 +1719,7 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
{
int i;
sctp_sack_variable_t *frags;
- __u16 gap;
+ __u16 tsn_offset, blocks;
__u32 ctsn = ntohl(sack->cum_tsn_ack);
if (TSN_lte(tsn, ctsn))
@@ -1766,10 +1738,11 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
*/
frags = sack->variable;
- gap = tsn - ctsn;
- for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
- if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
- TSN_lte(gap, ntohs(frags[i].gab.end)))
+ blocks = ntohs(sack->num_gap_ack_blocks);
+ tsn_offset = tsn - ctsn;
+ for (i = 0; i < blocks; ++i) {
+ if (tsn_offset >= ntohs(frags[i].gab.start) &&
+ tsn_offset <= ntohs(frags[i].gab.end))
goto pass;
}
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index ef8ba77a5bea..206377fe91ec 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -73,13 +73,17 @@ static const struct snmp_mib sctp_snmp_list[] = {
/* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */
static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
{
+ unsigned long buff[SCTP_MIB_MAX];
struct net *net = seq->private;
int i;
- for (i = 0; sctp_snmp_list[i].name != NULL; i++)
+ memset(buff, 0, sizeof(unsigned long) * SCTP_MIB_MAX);
+
+ snmp_get_cpu_field_batch(buff, sctp_snmp_list,
+ net->sctp.sctp_statistics);
+ for (i = 0; sctp_snmp_list[i].name; i++)
seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
- snmp_fold_field(net->sctp.sctp_statistics,
- sctp_snmp_list[i].entry));
+ buff[i]);
return 0;
}
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index cef0cee182d4..048954eee984 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -106,7 +106,8 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
int portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ const struct nlmsghdr *unlh,
+ bool net_admin)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct list_head *addr_list;
@@ -133,7 +134,7 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
r->idiag_retrans = 0;
}
- if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns))
+ if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
goto errout;
if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
@@ -203,6 +204,7 @@ struct sctp_comm_param {
struct netlink_callback *cb;
const struct inet_diag_req_v2 *r;
const struct nlmsghdr *nlh;
+ bool net_admin;
};
static size_t inet_assoc_attr_size(struct sctp_association *asoc)
@@ -219,6 +221,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
+ nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+ nla_total_size(1) /* INET_DIAG_TOS */
+ nla_total_size(1) /* INET_DIAG_TCLASS */
+ + nla_total_size(4) /* INET_DIAG_MARK */
+ nla_total_size(addrlen * asoc->peer.transport_count)
+ nla_total_size(addrlen * addrcnt)
+ nla_total_size(sizeof(struct inet_diag_meminfo))
@@ -256,7 +259,8 @@ static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
err = inet_sctp_diag_fill(sk, assoc, rep, req,
sk_user_ns(NETLINK_CB(in_skb).sk),
NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh);
+ nlh->nlmsg_seq, 0, nlh,
+ commp->net_admin);
release_sock(sk);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
@@ -299,7 +303,8 @@ static int sctp_sock_dump(struct sock *sk, void *p)
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- NLM_F_MULTI, cb->nlh) < 0) {
+ NLM_F_MULTI, cb->nlh,
+ commp->net_admin) < 0) {
cb->args[3] = 1;
err = 1;
goto release;
@@ -309,7 +314,8 @@ static int sctp_sock_dump(struct sock *sk, void *p)
if (inet_sctp_diag_fill(sk, assoc, skb, r,
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, 0, cb->nlh) < 0) {
+ cb->nlh->nlmsg_seq, 0, cb->nlh,
+ commp->net_admin) < 0) {
err = 1;
goto release;
}
@@ -389,7 +395,7 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- cb->nlh) < 0) {
+ cb->nlh, commp->net_admin) < 0) {
err = 2;
goto out;
}
@@ -426,6 +432,7 @@ static int sctp_diag_dump_one(struct sk_buff *in_skb,
.skb = in_skb,
.r = req,
.nlh = nlh,
+ .net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN),
};
if (req->sdiag_family == AF_INET) {
@@ -461,6 +468,7 @@ static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
.skb = skb,
.cb = cb,
.r = r,
+ .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
};
/* eps hashtable dumps
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 46ffecc57214..9e9690b7afe1 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -253,7 +253,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
num_types = sp->pf->supported_addrs(sp, types);
chunksize = sizeof(init) + addrs_len;
- chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
+ chunksize += SCTP_PAD4(SCTP_SAT_LEN(num_types));
chunksize += sizeof(ecap_param);
if (asoc->prsctp_enable)
@@ -283,14 +283,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* Add HMACS parameter length if any were defined */
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
+ chunksize += SCTP_PAD4(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
/* Add CHUNKS parameter length */
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += WORD_ROUND(ntohs(auth_chunks->length));
+ chunksize += SCTP_PAD4(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
@@ -300,8 +300,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* If we have any extensions to report, account for that */
if (num_ext)
- chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
- num_ext);
+ chunksize += SCTP_PAD4(sizeof(sctp_supported_ext_param_t) +
+ num_ext);
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
@@ -443,13 +443,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
+ chunksize += SCTP_PAD4(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += WORD_ROUND(ntohs(auth_chunks->length));
+ chunksize += SCTP_PAD4(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
@@ -458,8 +458,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
}
if (num_ext)
- chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
- num_ext);
+ chunksize += SCTP_PAD4(sizeof(sctp_supported_ext_param_t) +
+ num_ext);
/* Now allocate and fill out the chunk. */
retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize, gfp);
@@ -1375,7 +1375,7 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
struct sock *sk;
/* No need to allocate LL here, as this is only a chunk. */
- skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), gfp);
+ skb = alloc_skb(SCTP_PAD4(sizeof(sctp_chunkhdr_t) + paylen), gfp);
if (!skb)
goto nodata;
@@ -1467,7 +1467,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
void *target;
void *padding;
int chunklen = ntohs(chunk->chunk_hdr->length);
- int padlen = WORD_ROUND(chunklen) - chunklen;
+ int padlen = SCTP_PAD4(chunklen) - chunklen;
padding = skb_put(chunk->skb, padlen);
target = skb_put(chunk->skb, len);
@@ -1885,7 +1885,7 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
struct __sctp_missing report;
__u16 len;
- len = WORD_ROUND(sizeof(report));
+ len = SCTP_PAD4(sizeof(report));
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
@@ -2083,9 +2083,9 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
if (*errp) {
if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
- WORD_ROUND(ntohs(param.p->length))))
+ SCTP_PAD4(ntohs(param.p->length))))
sctp_addto_chunk_fixed(*errp,
- WORD_ROUND(ntohs(param.p->length)),
+ SCTP_PAD4(ntohs(param.p->length)),
param.v);
} else {
/* If there is no memory for generating the ERROR
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 12d45193357c..c345bf153bed 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1020,19 +1020,13 @@ static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
* This way the whole message is queued up and bundling if
* encouraged for small fragments.
*/
-static int sctp_cmd_send_msg(struct sctp_association *asoc,
- struct sctp_datamsg *msg, gfp_t gfp)
+static void sctp_cmd_send_msg(struct sctp_association *asoc,
+ struct sctp_datamsg *msg, gfp_t gfp)
{
struct sctp_chunk *chunk;
- int error = 0;
-
- list_for_each_entry(chunk, &msg->chunks, frag_list) {
- error = sctp_outq_tail(&asoc->outqueue, chunk, gfp);
- if (error)
- break;
- }
- return error;
+ list_for_each_entry(chunk, &msg->chunks, frag_list)
+ sctp_outq_tail(&asoc->outqueue, chunk, gfp);
}
@@ -1427,8 +1421,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
local_cork = 1;
}
/* Send a chunk to our peer. */
- error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk,
- gfp);
+ sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
break;
case SCTP_CMD_SEND_PKT:
@@ -1682,7 +1675,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_FORCE_PRIM_RETRAN:
t = asoc->peer.retran_path;
asoc->peer.retran_path = asoc->peer.primary_path;
- error = sctp_outq_uncork(&asoc->outqueue, gfp);
+ sctp_outq_uncork(&asoc->outqueue, gfp);
local_cork = 0;
asoc->peer.retran_path = t;
break;
@@ -1709,7 +1702,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_outq_cork(&asoc->outqueue);
local_cork = 1;
}
- error = sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
+ sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
break;
case SCTP_CMD_SEND_NEXT_ASCONF:
sctp_cmd_send_asconf(asoc);
@@ -1739,9 +1732,9 @@ out:
*/
if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
if (chunk->end_of_packet || chunk->singleton)
- error = sctp_outq_uncork(&asoc->outqueue, gfp);
+ sctp_outq_uncork(&asoc->outqueue, gfp);
} else if (local_cork)
- error = sctp_outq_uncork(&asoc->outqueue, gfp);
+ sctp_outq_uncork(&asoc->outqueue, gfp);
if (sp->data_ready_signalled)
sp->data_ready_signalled = 0;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index d88bb2b0b699..026e3bca4a94 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3454,7 +3454,7 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
}
/* Report violation if chunk len overflows */
- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
if (ch_end > skb_tail_pointer(skb))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
@@ -4185,7 +4185,7 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
- WORD_ROUND(ntohs(hdr->length)),
+ SCTP_PAD4(ntohs(hdr->length)),
0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
@@ -4203,7 +4203,7 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
- WORD_ROUND(ntohs(hdr->length)),
+ SCTP_PAD4(ntohs(hdr->length)),
0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 8ed2d99bde6d..fb02c7033307 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1958,6 +1958,8 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
/* Now send the (possibly) fragmented message. */
list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
+ sctp_chunk_hold(chunk);
+
/* Do accounting for the write space. */
sctp_set_owner_w(chunk);
@@ -1970,13 +1972,15 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
* breaks.
*/
err = sctp_primitive_SEND(net, asoc, datamsg);
- sctp_datamsg_put(datamsg);
/* Did the lower layer accept the chunk? */
- if (err)
+ if (err) {
+ sctp_datamsg_free(datamsg);
goto out_free;
+ }
pr_debug("%s: we sent primitively\n", __func__);
+ sctp_datamsg_put(datamsg);
err = msg_len;
if (unlikely(wait_connect)) {
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 81b86678be4d..ce54dce13ddb 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -233,7 +233,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
}
if (transport->dst) {
- transport->pathmtu = WORD_TRUNC(dst_mtu(transport->dst));
+ transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst));
} else
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
@@ -287,7 +287,7 @@ void sctp_transport_route(struct sctp_transport *transport,
return;
}
if (transport->dst) {
- transport->pathmtu = WORD_TRUNC(dst_mtu(transport->dst));
+ transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst));
/* Initialize sk->sk_rcv_saddr, if the transport is the
* association's active path for getsockname().
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index d85b803da11d..bea00058ce35 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -383,7 +383,7 @@ sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
ch = (sctp_errhdr_t *)(chunk->skb->data);
cause = ch->cause;
- elen = WORD_ROUND(ntohs(ch->length)) - sizeof(sctp_errhdr_t);
+ elen = SCTP_PAD4(ntohs(ch->length)) - sizeof(sctp_errhdr_t);
/* Pull off the ERROR header. */
skb_pull(chunk->skb, sizeof(sctp_errhdr_t));
@@ -688,7 +688,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
* MUST ignore the padding bytes.
*/
len = ntohs(chunk->chunk_hdr->length);
- padding = WORD_ROUND(len) - len;
+ padding = SCTP_PAD4(len) - len;
/* Fixup cloned skb with just this chunks data. */
skb_trim(skb, chunk->chunk_end - padding - skb->data);
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 877e55066f89..84d0fdaf7de9 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -140,11 +140,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
* we can go ahead and clear out the lobby in one shot
*/
if (!skb_queue_empty(&sp->pd_lobby)) {
- struct list_head *list;
skb_queue_splice_tail_init(&sp->pd_lobby,
&sk->sk_receive_queue);
- list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
- INIT_LIST_HEAD(list);
return 1;
}
} else {
diff --git a/net/strparser/Kconfig b/net/strparser/Kconfig
new file mode 100644
index 000000000000..6cff3f6d0c3a
--- /dev/null
+++ b/net/strparser/Kconfig
@@ -0,0 +1,4 @@
+
+config STREAM_PARSER
+ tristate
+ default n
diff --git a/net/strparser/Makefile b/net/strparser/Makefile
new file mode 100644
index 000000000000..858a126ebaa0
--- /dev/null
+++ b/net/strparser/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_STREAM_PARSER) += strparser.o
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
new file mode 100644
index 000000000000..5c7549b5b92c
--- /dev/null
+++ b/net/strparser/strparser.c
@@ -0,0 +1,510 @@
+/*
+ * Stream Parser
+ *
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/bpf.h>
+#include <linux/errno.h>
+#include <linux/errqueue.h>
+#include <linux/file.h>
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/rculist.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <net/strparser.h>
+#include <net/netns/generic.h>
+#include <net/sock.h>
+
+static struct workqueue_struct *strp_wq;
+
+struct _strp_rx_msg {
+ /* Internal cb structure. struct strp_rx_msg must be first for passing
+ * to upper layer.
+ */
+ struct strp_rx_msg strp;
+ int accum_len;
+ int early_eaten;
+};
+
+static inline struct _strp_rx_msg *_strp_rx_msg(struct sk_buff *skb)
+{
+ return (struct _strp_rx_msg *)((void *)skb->cb +
+ offsetof(struct qdisc_skb_cb, data));
+}
+
+/* Lower lock held */
+static void strp_abort_rx_strp(struct strparser *strp, int err)
+{
+ struct sock *csk = strp->sk;
+
+ /* Unrecoverable error in receive */
+
+ del_timer(&strp->rx_msg_timer);
+
+ if (strp->rx_stopped)
+ return;
+
+ strp->rx_stopped = 1;
+
+ /* Report an error on the lower socket */
+ csk->sk_err = err;
+ csk->sk_error_report(csk);
+}
+
+static void strp_start_rx_timer(struct strparser *strp)
+{
+ if (strp->sk->sk_rcvtimeo)
+ mod_timer(&strp->rx_msg_timer, strp->sk->sk_rcvtimeo);
+}
+
+/* Lower lock held */
+static void strp_parser_err(struct strparser *strp, int err,
+ read_descriptor_t *desc)
+{
+ desc->error = err;
+ kfree_skb(strp->rx_skb_head);
+ strp->rx_skb_head = NULL;
+ strp->cb.abort_parser(strp, err);
+}
+
+static inline int strp_peek_len(struct strparser *strp)
+{
+ struct socket *sock = strp->sk->sk_socket;
+
+ return sock->ops->peek_len(sock);
+}
+
+/* Lower socket lock held */
+static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
+ unsigned int orig_offset, size_t orig_len)
+{
+ struct strparser *strp = (struct strparser *)desc->arg.data;
+ struct _strp_rx_msg *rxm;
+ struct sk_buff *head, *skb;
+ size_t eaten = 0, cand_len;
+ ssize_t extra;
+ int err;
+ bool cloned_orig = false;
+
+ if (strp->rx_paused)
+ return 0;
+
+ head = strp->rx_skb_head;
+ if (head) {
+ /* Message already in progress */
+
+ rxm = _strp_rx_msg(head);
+ if (unlikely(rxm->early_eaten)) {
+ /* Already some number of bytes on the receive sock
+ * data saved in rx_skb_head, just indicate they
+ * are consumed.
+ */
+ eaten = orig_len <= rxm->early_eaten ?
+ orig_len : rxm->early_eaten;
+ rxm->early_eaten -= eaten;
+
+ return eaten;
+ }
+
+ if (unlikely(orig_offset)) {
+ /* Getting data with a non-zero offset when a message is
+ * in progress is not expected. If it does happen, we
+ * need to clone and pull since we can't deal with
+ * offsets in the skbs for a message expect in the head.
+ */
+ orig_skb = skb_clone(orig_skb, GFP_ATOMIC);
+ if (!orig_skb) {
+ STRP_STATS_INCR(strp->stats.rx_mem_fail);
+ desc->error = -ENOMEM;
+ return 0;
+ }
+ if (!pskb_pull(orig_skb, orig_offset)) {
+ STRP_STATS_INCR(strp->stats.rx_mem_fail);
+ kfree_skb(orig_skb);
+ desc->error = -ENOMEM;
+ return 0;
+ }
+ cloned_orig = true;
+ orig_offset = 0;
+ }
+
+ if (!strp->rx_skb_nextp) {
+ /* We are going to append to the frags_list of head.
+ * Need to unshare the frag_list.
+ */
+ err = skb_unclone(head, GFP_ATOMIC);
+ if (err) {
+ STRP_STATS_INCR(strp->stats.rx_mem_fail);
+ desc->error = err;
+ return 0;
+ }
+
+ if (unlikely(skb_shinfo(head)->frag_list)) {
+ /* We can't append to an sk_buff that already
+ * has a frag_list. We create a new head, point
+ * the frag_list of that to the old head, and
+ * then are able to use the old head->next for
+ * appending to the message.
+ */
+ if (WARN_ON(head->next)) {
+ desc->error = -EINVAL;
+ return 0;
+ }
+
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb) {
+ STRP_STATS_INCR(strp->stats.rx_mem_fail);
+ desc->error = -ENOMEM;
+ return 0;
+ }
+ skb->len = head->len;
+ skb->data_len = head->len;
+ skb->truesize = head->truesize;
+ *_strp_rx_msg(skb) = *_strp_rx_msg(head);
+ strp->rx_skb_nextp = &head->next;
+ skb_shinfo(skb)->frag_list = head;
+ strp->rx_skb_head = skb;
+ head = skb;
+ } else {
+ strp->rx_skb_nextp =
+ &skb_shinfo(head)->frag_list;
+ }
+ }
+ }
+
+ while (eaten < orig_len) {
+ /* Always clone since we will consume something */
+ skb = skb_clone(orig_skb, GFP_ATOMIC);
+ if (!skb) {
+ STRP_STATS_INCR(strp->stats.rx_mem_fail);
+ desc->error = -ENOMEM;
+ break;
+ }
+
+ cand_len = orig_len - eaten;
+
+ head = strp->rx_skb_head;
+ if (!head) {
+ head = skb;
+ strp->rx_skb_head = head;
+ /* Will set rx_skb_nextp on next packet if needed */
+ strp->rx_skb_nextp = NULL;
+ rxm = _strp_rx_msg(head);
+ memset(rxm, 0, sizeof(*rxm));
+ rxm->strp.offset = orig_offset + eaten;
+ } else {
+ /* Unclone since we may be appending to an skb that we
+ * already share a frag_list with.
+ */
+ err = skb_unclone(skb, GFP_ATOMIC);
+ if (err) {
+ STRP_STATS_INCR(strp->stats.rx_mem_fail);
+ desc->error = err;
+ break;
+ }
+
+ rxm = _strp_rx_msg(head);
+ *strp->rx_skb_nextp = skb;
+ strp->rx_skb_nextp = &skb->next;
+ head->data_len += skb->len;
+ head->len += skb->len;
+ head->truesize += skb->truesize;
+ }
+
+ if (!rxm->strp.full_len) {
+ ssize_t len;
+
+ len = (*strp->cb.parse_msg)(strp, head);
+
+ if (!len) {
+ /* Need more header to determine length */
+ if (!rxm->accum_len) {
+ /* Start RX timer for new message */
+ strp_start_rx_timer(strp);
+ }
+ rxm->accum_len += cand_len;
+ eaten += cand_len;
+ STRP_STATS_INCR(strp->stats.rx_need_more_hdr);
+ WARN_ON(eaten != orig_len);
+ break;
+ } else if (len < 0) {
+ if (len == -ESTRPIPE && rxm->accum_len) {
+ len = -ENODATA;
+ strp->rx_unrecov_intr = 1;
+ } else {
+ strp->rx_interrupted = 1;
+ }
+ strp_parser_err(strp, err, desc);
+ break;
+ } else if (len > strp->sk->sk_rcvbuf) {
+ /* Message length exceeds maximum allowed */
+ STRP_STATS_INCR(strp->stats.rx_msg_too_big);
+ strp_parser_err(strp, -EMSGSIZE, desc);
+ break;
+ } else if (len <= (ssize_t)head->len -
+ skb->len - rxm->strp.offset) {
+ /* Length must be into new skb (and also
+ * greater than zero)
+ */
+ STRP_STATS_INCR(strp->stats.rx_bad_hdr_len);
+ strp_parser_err(strp, -EPROTO, desc);
+ break;
+ }
+
+ rxm->strp.full_len = len;
+ }
+
+ extra = (ssize_t)(rxm->accum_len + cand_len) -
+ rxm->strp.full_len;
+
+ if (extra < 0) {
+ /* Message not complete yet. */
+ if (rxm->strp.full_len - rxm->accum_len >
+ strp_peek_len(strp)) {
+ /* Don't have the whole messages in the socket
+ * buffer. Set strp->rx_need_bytes to wait for
+ * the rest of the message. Also, set "early
+ * eaten" since we've already buffered the skb
+ * but don't consume yet per strp_read_sock.
+ */
+
+ if (!rxm->accum_len) {
+ /* Start RX timer for new message */
+ strp_start_rx_timer(strp);
+ }
+
+ strp->rx_need_bytes = rxm->strp.full_len -
+ rxm->accum_len;
+ rxm->accum_len += cand_len;
+ rxm->early_eaten = cand_len;
+ STRP_STATS_ADD(strp->stats.rx_bytes, cand_len);
+ desc->count = 0; /* Stop reading socket */
+ break;
+ }
+ rxm->accum_len += cand_len;
+ eaten += cand_len;
+ WARN_ON(eaten != orig_len);
+ break;
+ }
+
+ /* Positive extra indicates ore bytes than needed for the
+ * message
+ */
+
+ WARN_ON(extra > cand_len);
+
+ eaten += (cand_len - extra);
+
+ /* Hurray, we have a new message! */
+ del_timer(&strp->rx_msg_timer);
+ strp->rx_skb_head = NULL;
+ STRP_STATS_INCR(strp->stats.rx_msgs);
+
+ /* Give skb to upper layer */
+ strp->cb.rcv_msg(strp, head);
+
+ if (unlikely(strp->rx_paused)) {
+ /* Upper layer paused strp */
+ break;
+ }
+ }
+
+ if (cloned_orig)
+ kfree_skb(orig_skb);
+
+ STRP_STATS_ADD(strp->stats.rx_bytes, eaten);
+
+ return eaten;
+}
+
+static int default_read_sock_done(struct strparser *strp, int err)
+{
+ return err;
+}
+
+/* Called with lock held on lower socket */
+static int strp_read_sock(struct strparser *strp)
+{
+ struct socket *sock = strp->sk->sk_socket;
+ read_descriptor_t desc;
+
+ desc.arg.data = strp;
+ desc.error = 0;
+ desc.count = 1; /* give more than one skb per call */
+
+ /* sk should be locked here, so okay to do read_sock */
+ sock->ops->read_sock(strp->sk, &desc, strp_recv);
+
+ desc.error = strp->cb.read_sock_done(strp, desc.error);
+
+ return desc.error;
+}
+
+/* Lower sock lock held */
+void strp_data_ready(struct strparser *strp)
+{
+ if (unlikely(strp->rx_stopped))
+ return;
+
+ /* This check is needed to synchronize with do_strp_rx_work.
+ * do_strp_rx_work acquires a process lock (lock_sock) whereas
+ * the lock held here is bh_lock_sock. The two locks can be
+ * held by different threads at the same time, but bh_lock_sock
+ * allows a thread in BH context to safely check if the process
+ * lock is held. In this case, if the lock is held, queue work.
+ */
+ if (sock_owned_by_user(strp->sk)) {
+ queue_work(strp_wq, &strp->rx_work);
+ return;
+ }
+
+ if (strp->rx_paused)
+ return;
+
+ if (strp->rx_need_bytes) {
+ if (strp_peek_len(strp) >= strp->rx_need_bytes)
+ strp->rx_need_bytes = 0;
+ else
+ return;
+ }
+
+ if (strp_read_sock(strp) == -ENOMEM)
+ queue_work(strp_wq, &strp->rx_work);
+}
+EXPORT_SYMBOL_GPL(strp_data_ready);
+
+static void do_strp_rx_work(struct strparser *strp)
+{
+ read_descriptor_t rd_desc;
+ struct sock *csk = strp->sk;
+
+ /* We need the read lock to synchronize with strp_data_ready. We
+ * need the socket lock for calling strp_read_sock.
+ */
+ lock_sock(csk);
+
+ if (unlikely(strp->rx_stopped))
+ goto out;
+
+ if (strp->rx_paused)
+ goto out;
+
+ rd_desc.arg.data = strp;
+
+ if (strp_read_sock(strp) == -ENOMEM)
+ queue_work(strp_wq, &strp->rx_work);
+
+out:
+ release_sock(csk);
+}
+
+static void strp_rx_work(struct work_struct *w)
+{
+ do_strp_rx_work(container_of(w, struct strparser, rx_work));
+}
+
+static void strp_rx_msg_timeout(unsigned long arg)
+{
+ struct strparser *strp = (struct strparser *)arg;
+
+ /* Message assembly timed out */
+ STRP_STATS_INCR(strp->stats.rx_msg_timeouts);
+ lock_sock(strp->sk);
+ strp->cb.abort_parser(strp, ETIMEDOUT);
+ release_sock(strp->sk);
+}
+
+int strp_init(struct strparser *strp, struct sock *csk,
+ struct strp_callbacks *cb)
+{
+ struct socket *sock = csk->sk_socket;
+
+ if (!cb || !cb->rcv_msg || !cb->parse_msg)
+ return -EINVAL;
+
+ if (!sock->ops->read_sock || !sock->ops->peek_len)
+ return -EAFNOSUPPORT;
+
+ memset(strp, 0, sizeof(*strp));
+
+ strp->sk = csk;
+
+ setup_timer(&strp->rx_msg_timer, strp_rx_msg_timeout,
+ (unsigned long)strp);
+
+ INIT_WORK(&strp->rx_work, strp_rx_work);
+
+ strp->cb.rcv_msg = cb->rcv_msg;
+ strp->cb.parse_msg = cb->parse_msg;
+ strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
+ strp->cb.abort_parser = cb->abort_parser ? : strp_abort_rx_strp;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(strp_init);
+
+void strp_unpause(struct strparser *strp)
+{
+ strp->rx_paused = 0;
+
+ /* Sync setting rx_paused with RX work */
+ smp_mb();
+
+ queue_work(strp_wq, &strp->rx_work);
+}
+EXPORT_SYMBOL_GPL(strp_unpause);
+
+/* strp must already be stopped so that strp_recv will no longer be called.
+ * Note that strp_done is not called with the lower socket held.
+ */
+void strp_done(struct strparser *strp)
+{
+ WARN_ON(!strp->rx_stopped);
+
+ del_timer_sync(&strp->rx_msg_timer);
+ cancel_work_sync(&strp->rx_work);
+
+ if (strp->rx_skb_head) {
+ kfree_skb(strp->rx_skb_head);
+ strp->rx_skb_head = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(strp_done);
+
+void strp_stop(struct strparser *strp)
+{
+ strp->rx_stopped = 1;
+}
+EXPORT_SYMBOL_GPL(strp_stop);
+
+void strp_check_rcv(struct strparser *strp)
+{
+ queue_work(strp_wq, &strp->rx_work);
+}
+EXPORT_SYMBOL_GPL(strp_check_rcv);
+
+static int __init strp_mod_init(void)
+{
+ strp_wq = create_singlethread_workqueue("kstrp");
+
+ return 0;
+}
+
+static void __exit strp_mod_exit(void)
+{
+}
+module_init(strp_mod_init);
+module_exit(strp_mod_exit);
+MODULE_LICENSE("GPL");
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 168219535a34..83dffeadf20a 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -176,8 +176,8 @@ generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
if (gcred->acred.group_info->ngroups != acred->group_info->ngroups)
goto out_nomatch;
for (i = 0; i < gcred->acred.group_info->ngroups; i++) {
- if (!gid_eq(GROUP_AT(gcred->acred.group_info, i),
- GROUP_AT(acred->group_info, i)))
+ if (!gid_eq(gcred->acred.group_info->gid[i],
+ acred->group_info->gid[i]))
goto out_nomatch;
}
out_match:
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index eeeba5adee6d..dc6fb79a361f 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -229,7 +229,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
kgid = make_kgid(&init_user_ns, tmp);
if (!gid_valid(kgid))
goto out_free_groups;
- GROUP_AT(creds->cr_group_info, i) = kgid;
+ creds->cr_group_info->gid[i] = kgid;
}
return 0;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index d8582028b346..d67f7e1bc82d 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -479,7 +479,7 @@ static int rsc_parse(struct cache_detail *cd,
kgid = make_kgid(&init_user_ns, id);
if (!gid_valid(kgid))
goto out;
- GROUP_AT(rsci.cred.cr_group_info, i) = kgid;
+ rsci.cred.cr_group_info->gid[i] = kgid;
}
/* mech name */
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index a99278c984e8..a1d768a973f5 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -79,7 +79,7 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t
cred->uc_gid = acred->gid;
for (i = 0; i < groups; i++)
- cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
+ cred->uc_gids[i] = acred->group_info->gid[i];
if (i < NFS_NGROUPS)
cred->uc_gids[i] = INVALID_GID;
@@ -127,7 +127,7 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
if (groups > NFS_NGROUPS)
groups = NFS_NGROUPS;
for (i = 0; i < groups ; i++)
- if (!gid_eq(cred->uc_gids[i], GROUP_AT(acred->group_info, i)))
+ if (!gid_eq(cred->uc_gids[i], acred->group_info->gid[i]))
return 0;
if (groups < NFS_NGROUPS && gid_valid(cred->uc_gids[groups]))
return 0;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index dfacdc95b3f5..64af4f034de6 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -517,7 +517,7 @@ static int unix_gid_parse(struct cache_detail *cd,
kgid = make_kgid(&init_user_ns, gid);
if (!gid_valid(kgid))
goto out;
- GROUP_AT(ug.gi, i) = kgid;
+ ug.gi->gid[i] = kgid;
}
ugp = unix_gid_lookup(cd, uid);
@@ -564,7 +564,7 @@ static int unix_gid_show(struct seq_file *m,
seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen);
for (i = 0; i < glen; i++)
- seq_printf(m, " %d", from_kgid_munged(user_ns, GROUP_AT(ug->gi, i)));
+ seq_printf(m, " %d", from_kgid_munged(user_ns, ug->gi->gid[i]));
seq_printf(m, "\n");
return 0;
}
@@ -817,7 +817,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
return SVC_CLOSE;
for (i = 0; i < slen; i++) {
kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
- GROUP_AT(cred->cr_group_info, i) = kgid;
+ cred->cr_group_info->gid[i] = kgid;
}
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
*authp = rpc_autherr_badverf;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index dd9440137834..eb2857f52b05 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -993,7 +993,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
newxprt->sc_ord = min_t(size_t, dev->attrs.max_qp_rd_atom, newxprt->sc_ord);
newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
- newxprt->sc_pd = ib_alloc_pd(dev);
+ newxprt->sc_pd = ib_alloc_pd(dev, 0);
if (IS_ERR(newxprt->sc_pd)) {
dprintk("svcrdma: error creating PD for connect request\n");
goto errout;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 799cce6cbe45..be3178e5e2d2 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -387,7 +387,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
}
ia->ri_device = ia->ri_id->device;
- ia->ri_pd = ib_alloc_pd(ia->ri_device);
+ ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
if (IS_ERR(ia->ri_pd)) {
rc = PTR_ERR(ia->ri_pd);
pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index a5fc9dd24aa9..02beb35f577f 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -21,7 +21,6 @@
#include <linux/workqueue.h>
#include <linux/if_vlan.h>
#include <linux/rtnetlink.h>
-#include <net/ip_fib.h>
#include <net/switchdev.h>
/**
@@ -344,8 +343,6 @@ static size_t switchdev_obj_size(const struct switchdev_obj *obj)
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
return sizeof(struct switchdev_obj_port_vlan);
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- return sizeof(struct switchdev_obj_ipv4_fib);
case SWITCHDEV_OBJ_ID_PORT_FDB:
return sizeof(struct switchdev_obj_port_fdb);
case SWITCHDEV_OBJ_ID_PORT_MDB:
@@ -1042,7 +1039,7 @@ static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj)
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[0])
+ if (dump->idx < dump->cb->args[2])
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -1089,7 +1086,7 @@ nla_put_failure:
*/
int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
- struct net_device *filter_dev, int idx)
+ struct net_device *filter_dev, int *idx)
{
struct switchdev_fdb_dump dump = {
.fdb.obj.orig_dev = dev,
@@ -1097,207 +1094,27 @@ int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
.dev = dev,
.skb = skb,
.cb = cb,
- .idx = idx,
+ .idx = *idx,
};
int err;
err = switchdev_port_obj_dump(dev, &dump.fdb.obj,
switchdev_port_fdb_dump_cb);
- cb->args[1] = err;
- return dump.idx;
+ *idx = dump.idx;
+ return err;
}
EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
-static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
-{
- const struct switchdev_ops *ops = dev->switchdev_ops;
- struct net_device *lower_dev;
- struct net_device *port_dev;
- struct list_head *iter;
-
- /* Recusively search down until we find a sw port dev.
- * (A sw port dev supports switchdev_port_attr_get).
- */
-
- if (ops && ops->switchdev_port_attr_get)
- return dev;
-
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
- port_dev = switchdev_get_lowest_dev(lower_dev);
- if (port_dev)
- return port_dev;
- }
-
- return NULL;
-}
-
-static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
-{
- struct switchdev_attr attr = {
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- };
- struct switchdev_attr prev_attr;
- struct net_device *dev = NULL;
- int nhsel;
-
- ASSERT_RTNL();
-
- /* For this route, all nexthop devs must be on the same switch. */
-
- for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
- const struct fib_nh *nh = &fi->fib_nh[nhsel];
-
- if (!nh->nh_dev)
- return NULL;
-
- dev = switchdev_get_lowest_dev(nh->nh_dev);
- if (!dev)
- return NULL;
-
- attr.orig_dev = dev;
- if (switchdev_port_attr_get(dev, &attr))
- return NULL;
-
- if (nhsel > 0 &&
- !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid))
- return NULL;
-
- prev_attr = attr;
- }
-
- return dev;
-}
-
-/**
- * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry
- *
- * @dst: route's IPv4 destination address
- * @dst_len: destination address length (prefix length)
- * @fi: route FIB info structure
- * @tos: route TOS
- * @type: route type
- * @nlflags: netlink flags passed in (NLM_F_*)
- * @tb_id: route table ID
- *
- * Add/modify switch IPv4 route entry.
- */
-int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 nlflags, u32 tb_id)
-{
- struct switchdev_obj_ipv4_fib ipv4_fib = {
- .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
- .dst = dst,
- .dst_len = dst_len,
- .fi = fi,
- .tos = tos,
- .type = type,
- .nlflags = nlflags,
- .tb_id = tb_id,
- };
- struct net_device *dev;
- int err = 0;
-
- /* Don't offload route if using custom ip rules or if
- * IPv4 FIB offloading has been disabled completely.
- */
-
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- if (fi->fib_net->ipv4.fib_has_custom_rules)
- return 0;
-#endif
-
- if (fi->fib_net->ipv4.fib_offload_disabled)
- return 0;
-
- dev = switchdev_get_dev_by_nhs(fi);
- if (!dev)
- return 0;
-
- ipv4_fib.obj.orig_dev = dev;
- err = switchdev_port_obj_add(dev, &ipv4_fib.obj);
- if (!err)
- fi->fib_flags |= RTNH_F_OFFLOAD;
-
- return err == -EOPNOTSUPP ? 0 : err;
-}
-EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
-
-/**
- * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
- *
- * @dst: route's IPv4 destination address
- * @dst_len: destination address length (prefix length)
- * @fi: route FIB info structure
- * @tos: route TOS
- * @type: route type
- * @tb_id: route table ID
- *
- * Delete IPv4 route entry from switch device.
- */
-int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id)
-{
- struct switchdev_obj_ipv4_fib ipv4_fib = {
- .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
- .dst = dst,
- .dst_len = dst_len,
- .fi = fi,
- .tos = tos,
- .type = type,
- .nlflags = 0,
- .tb_id = tb_id,
- };
- struct net_device *dev;
- int err = 0;
-
- if (!(fi->fib_flags & RTNH_F_OFFLOAD))
- return 0;
-
- dev = switchdev_get_dev_by_nhs(fi);
- if (!dev)
- return 0;
-
- ipv4_fib.obj.orig_dev = dev;
- err = switchdev_port_obj_del(dev, &ipv4_fib.obj);
- if (!err)
- fi->fib_flags &= ~RTNH_F_OFFLOAD;
-
- return err == -EOPNOTSUPP ? 0 : err;
-}
-EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
-
-/**
- * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
- *
- * @fi: route FIB info structure
- */
-void switchdev_fib_ipv4_abort(struct fib_info *fi)
-{
- /* There was a problem installing this route to the offload
- * device. For now, until we come up with more refined
- * policy handling, abruptly end IPv4 fib offloading for
- * for entire net by flushing offload device(s) of all
- * IPv4 routes, and mark IPv4 fib offloading broken from
- * this point forward.
- */
-
- fib_flush_external(fi->fib_net);
- fi->fib_net->ipv4.fib_offload_disabled = true;
-}
-EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
-
bool switchdev_port_same_parent_id(struct net_device *a,
struct net_device *b)
{
struct switchdev_attr a_attr = {
.orig_dev = a,
.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- .flags = SWITCHDEV_F_NO_RECURSE,
};
struct switchdev_attr b_attr = {
.orig_dev = b,
.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- .flags = SWITCHDEV_F_NO_RECURSE,
};
if (switchdev_port_attr_get(a, &a_attr) ||
@@ -1306,89 +1123,4 @@ bool switchdev_port_same_parent_id(struct net_device *a,
return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
}
-
-static u32 switchdev_port_fwd_mark_get(struct net_device *dev,
- struct net_device *group_dev)
-{
- struct net_device *lower_dev;
- struct list_head *iter;
-
- netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
- if (lower_dev == dev)
- continue;
- if (switchdev_port_same_parent_id(dev, lower_dev))
- return lower_dev->offload_fwd_mark;
- return switchdev_port_fwd_mark_get(dev, lower_dev);
- }
-
- return dev->ifindex;
-}
EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id);
-
-static void switchdev_port_fwd_mark_reset(struct net_device *group_dev,
- u32 old_mark, u32 *reset_mark)
-{
- struct net_device *lower_dev;
- struct list_head *iter;
-
- netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
- if (lower_dev->offload_fwd_mark == old_mark) {
- if (!*reset_mark)
- *reset_mark = lower_dev->ifindex;
- lower_dev->offload_fwd_mark = *reset_mark;
- }
- switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark);
- }
-}
-
-/**
- * switchdev_port_fwd_mark_set - Set port offload forwarding mark
- *
- * @dev: port device
- * @group_dev: containing device
- * @joining: true if dev is joining group; false if leaving group
- *
- * An ungrouped port's offload mark is just its ifindex. A grouped
- * port's (member of a bridge, for example) offload mark is the ifindex
- * of one of the ports in the group with the same parent (switch) ID.
- * Ports on the same device in the same group will have the same mark.
- *
- * Example:
- *
- * br0 ifindex=9
- * sw1p1 ifindex=2 mark=2
- * sw1p2 ifindex=3 mark=2
- * sw2p1 ifindex=4 mark=5
- * sw2p2 ifindex=5 mark=5
- *
- * If sw2p2 leaves the bridge, we'll have:
- *
- * br0 ifindex=9
- * sw1p1 ifindex=2 mark=2
- * sw1p2 ifindex=3 mark=2
- * sw2p1 ifindex=4 mark=4
- * sw2p2 ifindex=5 mark=5
- */
-void switchdev_port_fwd_mark_set(struct net_device *dev,
- struct net_device *group_dev,
- bool joining)
-{
- u32 mark = dev->ifindex;
- u32 reset_mark = 0;
-
- if (group_dev) {
- ASSERT_RTNL();
- if (joining)
- mark = switchdev_port_fwd_mark_get(dev, group_dev);
- else if (dev->offload_fwd_mark == mark)
- /* Ohoh, this port was the mark reference port,
- * but it's leaving the group, so reset the
- * mark for the remaining ports in the group.
- */
- switchdev_port_fwd_mark_reset(group_dev, mark,
- &reset_mark);
- }
-
- dev->offload_fwd_mark = mark;
-}
-EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set);
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 46a71c701e7c..919981324171 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -27,9 +27,9 @@
#endif
static struct ctl_table_set *
-net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces)
+net_ctl_header_lookup(struct ctl_table_root *root)
{
- return &namespaces->net_ns->sysctls;
+ return &current->nsproxy->net_ns->sysctls;
}
static int is_seen(struct ctl_table_set *set)
@@ -42,26 +42,37 @@ static int net_ctl_permissions(struct ctl_table_header *head,
struct ctl_table *table)
{
struct net *net = container_of(head->set, struct net, sysctls);
- kuid_t root_uid = make_kuid(net->user_ns, 0);
- kgid_t root_gid = make_kgid(net->user_ns, 0);
/* Allow network administrator to have same access as root. */
- if (ns_capable_noaudit(net->user_ns, CAP_NET_ADMIN) ||
- uid_eq(root_uid, current_euid())) {
+ if (ns_capable_noaudit(net->user_ns, CAP_NET_ADMIN)) {
int mode = (table->mode >> 6) & 7;
return (mode << 6) | (mode << 3) | mode;
}
- /* Allow netns root group to have the same access as the root group */
- if (in_egroup_p(root_gid)) {
- int mode = (table->mode >> 3) & 7;
- return (mode << 3) | mode;
- }
+
return table->mode;
}
+static void net_ctl_set_ownership(struct ctl_table_header *head,
+ struct ctl_table *table,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct net *net = container_of(head->set, struct net, sysctls);
+ kuid_t ns_root_uid;
+ kgid_t ns_root_gid;
+
+ ns_root_uid = make_kuid(net->user_ns, 0);
+ if (uid_valid(ns_root_uid))
+ *uid = ns_root_uid;
+
+ ns_root_gid = make_kgid(net->user_ns, 0);
+ if (gid_valid(ns_root_gid))
+ *gid = ns_root_gid;
+}
+
static struct ctl_table_root net_sysctl_root = {
.lookup = net_ctl_header_lookup,
.permissions = net_ctl_permissions,
+ .set_ownership = net_ctl_set_ownership,
};
static int __net_init sysctl_net_init(struct net *net)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index ae469b37d852..753f774cb46f 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -269,18 +269,19 @@ void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
*
* RCU is locked, no other locks set
*/
-void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
- struct tipc_msg *hdr)
+int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr)
{
struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
struct sk_buff_head xmitq;
+ int rc = 0;
__skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
if (msg_type(hdr) == STATE_MSG) {
tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
- tipc_link_bc_sync_rcv(l, hdr, &xmitq);
+ rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
} else {
tipc_link_bc_init_rcv(l, hdr);
}
@@ -291,6 +292,7 @@ void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
/* Any socket wakeup messages ? */
if (!skb_queue_empty(inputq))
tipc_sk_rcv(net, inputq);
+ return rc;
}
/* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index d5e79b3767fd..5ffe34472ccd 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -56,8 +56,8 @@ int tipc_bcast_get_mtu(struct net *net);
int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked);
-void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
- struct tipc_msg *hdr);
+int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr);
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
int tipc_bclink_reset_stats(struct net *net);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 65b1bbf133bd..975dbeb60ab0 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -42,6 +42,7 @@
#include "monitor.h"
#include "bcast.h"
#include "netlink.h"
+#include "udp_media.h"
#define MAX_ADDR_STR 60
@@ -56,6 +57,13 @@ static struct tipc_media * const media_info_array[] = {
NULL
};
+static struct tipc_bearer *bearer_get(struct net *net, int bearer_id)
+{
+ struct tipc_net *tn = tipc_net(net);
+
+ return rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+}
+
static void bearer_disable(struct net *net, struct tipc_bearer *b);
/**
@@ -323,6 +331,7 @@ restart:
b->domain = disc_domain;
b->net_plane = bearer_id + 'A';
b->priority = priority;
+ test_and_set_bit_lock(0, &b->up);
res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
if (res) {
@@ -360,15 +369,24 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
*/
void tipc_bearer_reset_all(struct net *net)
{
- struct tipc_net *tn = tipc_net(net);
struct tipc_bearer *b;
int i;
for (i = 0; i < MAX_BEARERS; i++) {
- b = rcu_dereference_rtnl(tn->bearer_list[i]);
+ b = bearer_get(net, i);
+ if (b)
+ clear_bit_unlock(0, &b->up);
+ }
+ for (i = 0; i < MAX_BEARERS; i++) {
+ b = bearer_get(net, i);
if (b)
tipc_reset_bearer(net, b);
}
+ for (i = 0; i < MAX_BEARERS; i++) {
+ b = bearer_get(net, i);
+ if (b)
+ test_and_set_bit_lock(0, &b->up);
+ }
}
/**
@@ -382,8 +400,9 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b)
int bearer_id = b->identity;
pr_info("Disabling bearer <%s>\n", b->name);
- b->media->disable_media(b);
+ clear_bit_unlock(0, &b->up);
tipc_node_delete_links(net, bearer_id);
+ b->media->disable_media(b);
RCU_INIT_POINTER(b->media_ptr, NULL);
if (b->link_req)
tipc_disc_delete(b->link_req);
@@ -440,22 +459,16 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
{
struct net_device *dev;
int delta;
- void *tipc_ptr;
dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
if (!dev)
return 0;
- /* Send RESET message even if bearer is detached from device */
- tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
- if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
- goto drop;
-
- delta = dev->hard_header_len - skb_headroom(skb);
- if ((delta > 0) &&
- pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
- goto drop;
-
+ delta = SKB_DATA_ALIGN(dev->hard_header_len - skb_headroom(skb));
+ if ((delta > 0) && pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) {
+ kfree_skb(skb);
+ return 0;
+ }
skb_reset_network_header(skb);
skb->dev = dev;
skb->protocol = htons(ETH_P_TIPC);
@@ -463,9 +476,6 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
dev->dev_addr, skb->len);
dev_queue_xmit(skb);
return 0;
-drop:
- kfree_skb(skb);
- return 0;
}
int tipc_bearer_mtu(struct net *net, u32 bearer_id)
@@ -487,12 +497,12 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
struct sk_buff *skb,
struct tipc_media_addr *dest)
{
- struct tipc_net *tn = tipc_net(net);
+ struct tipc_msg *hdr = buf_msg(skb);
struct tipc_bearer *b;
rcu_read_lock();
- b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
- if (likely(b))
+ b = bearer_get(net, bearer_id);
+ if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr))))
b->media->send_msg(net, skb, b, dest);
else
kfree_skb(skb);
@@ -505,7 +515,6 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq,
struct tipc_media_addr *dst)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b;
struct sk_buff *skb, *tmp;
@@ -513,12 +522,15 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
return;
rcu_read_lock();
- b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+ b = bearer_get(net, bearer_id);
if (unlikely(!b))
__skb_queue_purge(xmitq);
skb_queue_walk_safe(xmitq, skb, tmp) {
__skb_dequeue(xmitq);
- b->media->send_msg(net, skb, b, dst);
+ if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb))))
+ b->media->send_msg(net, skb, b, dst);
+ else
+ kfree_skb(skb);
}
rcu_read_unlock();
}
@@ -535,8 +547,8 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
struct tipc_msg *hdr;
rcu_read_lock();
- b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
- if (unlikely(!b))
+ b = bearer_get(net, bearer_id);
+ if (unlikely(!b || !test_bit(0, &b->up)))
__skb_queue_purge(xmitq);
skb_queue_walk_safe(xmitq, skb, tmp) {
hdr = buf_msg(skb);
@@ -566,7 +578,8 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
rcu_read_lock();
b = rcu_dereference_rtnl(dev->tipc_ptr);
- if (likely(b && (skb->pkt_type <= PACKET_BROADCAST))) {
+ if (likely(b && test_bit(0, &b->up) &&
+ (skb->pkt_type <= PACKET_BROADCAST))) {
skb->next = NULL;
tipc_rcv(dev_net(dev), skb, b);
rcu_read_unlock();
@@ -591,18 +604,9 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
- struct tipc_net *tn = tipc_net(net);
struct tipc_bearer *b;
- int i;
b = rtnl_dereference(dev->tipc_ptr);
- if (!b) {
- for (i = 0; i < MAX_BEARERS; b = NULL, i++) {
- b = rtnl_dereference(tn->bearer_list[i]);
- if (b && (b->media_ptr == dev))
- break;
- }
- }
if (!b)
return NOTIFY_DONE;
@@ -613,11 +617,10 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
if (netif_carrier_ok(dev))
break;
case NETDEV_UP:
- rcu_assign_pointer(dev->tipc_ptr, b);
+ test_and_set_bit_lock(0, &b->up);
break;
case NETDEV_GOING_DOWN:
- RCU_INIT_POINTER(dev->tipc_ptr, NULL);
- synchronize_net();
+ clear_bit_unlock(0, &b->up);
tipc_reset_bearer(net, b);
break;
case NETDEV_CHANGEMTU:
@@ -709,6 +712,14 @@ static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
goto prop_msg_full;
nla_nest_end(msg->skb, prop);
+
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ if (bearer->media->type_id == TIPC_MEDIA_TYPE_UDP) {
+ if (tipc_udp_nl_add_bearer_data(msg, bearer))
+ goto attr_msg_full;
+ }
+#endif
+
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
@@ -895,6 +906,49 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
return 0;
}
+int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *name;
+ struct tipc_bearer *b;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+
+ if (!info->attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ rtnl_lock();
+ b = tipc_bearer_find(net, name);
+ if (!b) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
+ err = tipc_udp_nl_bearer_add(b,
+ attrs[TIPC_NLA_BEARER_UDP_OPTS]);
+ if (err) {
+ rtnl_unlock();
+ return err;
+ }
+ }
+#endif
+ rtnl_unlock();
+
+ return 0;
+}
+
int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
{
int err;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 43757f1f9cb3..78892e2f53e3 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -150,6 +150,7 @@ struct tipc_bearer {
u32 identity;
struct tipc_link_req *link_req;
char net_plane;
+ unsigned long up;
};
struct tipc_bearer_names {
@@ -180,6 +181,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 877d94f34814..b36e16cdc945 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -181,7 +181,10 @@ struct tipc_link {
u16 acked;
struct tipc_link *bc_rcvlink;
struct tipc_link *bc_sndlink;
- int nack_state;
+ unsigned long prev_retr;
+ u16 prev_from;
+ u16 prev_to;
+ u8 nack_state;
bool bc_peer_is_up;
/* Statistics */
@@ -202,6 +205,8 @@ enum {
BC_NACK_SND_SUPPRESS,
};
+#define TIPC_BC_RETR_LIMIT 10 /* [ms] */
+
/*
* Interval between NACKs when packets arrive out of order
*/
@@ -237,8 +242,8 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
u16 rcvgap, int tolerance, int priority,
struct sk_buff_head *xmitq);
static void link_print(struct tipc_link *l, const char *str);
-static void tipc_link_build_nack_msg(struct tipc_link *l,
- struct sk_buff_head *xmitq);
+static int tipc_link_build_nack_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq);
static void tipc_link_build_bc_init_msg(struct tipc_link *l,
struct sk_buff_head *xmitq);
static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
@@ -367,6 +372,18 @@ int tipc_link_bc_peers(struct tipc_link *l)
return l->ackers;
}
+u16 link_bc_rcv_gap(struct tipc_link *l)
+{
+ struct sk_buff *skb = skb_peek(&l->deferdq);
+ u16 gap = 0;
+
+ if (more(l->snd_nxt, l->rcv_nxt))
+ gap = l->snd_nxt - l->rcv_nxt;
+ if (skb)
+ gap = buf_seqno(skb) - l->rcv_nxt;
+ return gap;
+}
+
void tipc_link_set_mtu(struct tipc_link *l, int mtu)
{
l->mtu = mtu;
@@ -807,7 +824,7 @@ void link_prepare_wakeup(struct tipc_link *l)
skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
imp = TIPC_SKB_CB(skb)->chain_imp;
- lim = l->window + l->backlog[imp].limit;
+ lim = l->backlog[imp].limit;
pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
if ((pnd[imp] + l->backlog[imp].len) >= lim)
break;
@@ -873,9 +890,11 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff *skb, *_skb, *bskb;
/* Match msg importance against this and all higher backlog limits: */
- for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
- if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
- return link_schedule_user(l, list);
+ if (!skb_queue_empty(backlogq)) {
+ for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
+ if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
+ return link_schedule_user(l, list);
+ }
}
if (unlikely(msg_size(hdr) > mtu)) {
skb_queue_purge(list);
@@ -1133,7 +1152,10 @@ int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
return 0;
l->rcv_unacked = 0;
- return TIPC_LINK_SND_BC_ACK;
+
+ /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
+ l->snd_nxt = l->rcv_nxt;
+ return TIPC_LINK_SND_STATE;
}
/* Unicast ACK */
@@ -1162,17 +1184,26 @@ void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
}
/* tipc_link_build_nack_msg: prepare link nack message for transmission
+ * Note that sending of broadcast NACK is coordinated among nodes, to
+ * reduce the risk of NACK storms towards the sender
*/
-static void tipc_link_build_nack_msg(struct tipc_link *l,
- struct sk_buff_head *xmitq)
+static int tipc_link_build_nack_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
{
u32 def_cnt = ++l->stats.deferred_recv;
+ int match1, match2;
- if (link_is_bc_rcvlink(l))
- return;
+ if (link_is_bc_rcvlink(l)) {
+ match1 = def_cnt & 0xf;
+ match2 = tipc_own_addr(l->net) & 0xf;
+ if (match1 == match2)
+ return TIPC_LINK_SND_STATE;
+ return 0;
+ }
if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
+ return 0;
}
/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
@@ -1223,7 +1254,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
/* Defer delivery if sequence gap */
if (unlikely(seqno != rcv_nxt)) {
__tipc_skb_queue_sorted(defq, seqno, skb);
- tipc_link_build_nack_msg(l, xmitq);
+ rc |= tipc_link_build_nack_msg(l, xmitq);
break;
}
@@ -1234,7 +1265,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
rc |= tipc_link_input(l, skb, l->inputq);
if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
rc |= tipc_link_build_state_msg(l, xmitq);
- if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
+ if (unlikely(rc & ~TIPC_LINK_SND_STATE))
break;
} while ((skb = __skb_dequeue(defq)));
@@ -1248,10 +1279,11 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
u16 rcvgap, int tolerance, int priority,
struct sk_buff_head *xmitq)
{
+ struct tipc_link *bcl = l->bc_rcvlink;
struct sk_buff *skb;
struct tipc_msg *hdr;
struct sk_buff_head *dfq = &l->deferdq;
- bool node_up = link_is_up(l->bc_rcvlink);
+ bool node_up = link_is_up(bcl);
struct tipc_mon_state *mstate = &l->mon_state;
int dlen = 0;
void *data;
@@ -1279,7 +1311,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
msg_set_net_plane(hdr, l->net_plane);
msg_set_next_sent(hdr, l->snd_nxt);
msg_set_ack(hdr, l->rcv_nxt - 1);
- msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1);
+ msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
msg_set_link_tolerance(hdr, tolerance);
msg_set_linkprio(hdr, priority);
@@ -1289,6 +1321,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
if (mtyp == STATE_MSG) {
msg_set_seq_gap(hdr, rcvgap);
+ msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
msg_set_probe(hdr, probe);
tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
msg_set_size(hdr, INT_H_SIZE + dlen);
@@ -1571,51 +1604,107 @@ void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
l->rcv_nxt = peers_snd_nxt;
}
+/* link_bc_retr eval()- check if the indicated range can be retransmitted now
+ * - Adjust permitted range if there is overlap with previous retransmission
+ */
+static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
+{
+ unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
+
+ if (less(*to, *from))
+ return false;
+
+ /* New retransmission request */
+ if ((elapsed > TIPC_BC_RETR_LIMIT) ||
+ less(*to, l->prev_from) || more(*from, l->prev_to)) {
+ l->prev_from = *from;
+ l->prev_to = *to;
+ l->prev_retr = jiffies;
+ return true;
+ }
+
+ /* Inside range of previous retransmit */
+ if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
+ return false;
+
+ /* Fully or partially outside previous range => exclude overlap */
+ if (less(*from, l->prev_from)) {
+ *to = l->prev_from - 1;
+ l->prev_from = *from;
+ }
+ if (more(*to, l->prev_to)) {
+ *from = l->prev_to + 1;
+ l->prev_to = *to;
+ }
+ l->prev_retr = jiffies;
+ return true;
+}
+
/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
*/
-void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
- struct sk_buff_head *xmitq)
+int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
+ struct sk_buff_head *xmitq)
{
+ struct tipc_link *snd_l = l->bc_sndlink;
u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
+ u16 from = msg_bcast_ack(hdr) + 1;
+ u16 to = from + msg_bc_gap(hdr) - 1;
+ int rc = 0;
if (!link_is_up(l))
- return;
+ return rc;
if (!msg_peer_node_is_up(hdr))
- return;
+ return rc;
/* Open when peer ackowledges our bcast init msg (pkt #1) */
if (msg_ack(hdr))
l->bc_peer_is_up = true;
if (!l->bc_peer_is_up)
- return;
+ return rc;
+
+ l->stats.recv_nacks++;
/* Ignore if peers_snd_nxt goes beyond receive window */
if (more(peers_snd_nxt, l->rcv_nxt + l->window))
- return;
+ return rc;
+
+ if (link_bc_retr_eval(snd_l, &from, &to))
+ rc = tipc_link_retrans(snd_l, from, to, xmitq);
+
+ l->snd_nxt = peers_snd_nxt;
+ if (link_bc_rcv_gap(l))
+ rc |= TIPC_LINK_SND_STATE;
+
+ /* Return now if sender supports nack via STATE messages */
+ if (l->peer_caps & TIPC_BCAST_STATE_NACK)
+ return rc;
+
+ /* Otherwise, be backwards compatible */
if (!more(peers_snd_nxt, l->rcv_nxt)) {
l->nack_state = BC_NACK_SND_CONDITIONAL;
- return;
+ return 0;
}
/* Don't NACK if one was recently sent or peeked */
if (l->nack_state == BC_NACK_SND_SUPPRESS) {
l->nack_state = BC_NACK_SND_UNCONDITIONAL;
- return;
+ return 0;
}
/* Conditionally delay NACK sending until next synch rcv */
if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
l->nack_state = BC_NACK_SND_UNCONDITIONAL;
if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
- return;
+ return 0;
}
/* Send NACK now but suppress next one */
tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
l->nack_state = BC_NACK_SND_SUPPRESS;
+ return 0;
}
void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
@@ -1652,6 +1741,8 @@ void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
}
/* tipc_link_bc_nack_rcv(): receive broadcast nack message
+ * This function is here for backwards compatibility, since
+ * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
*/
int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq)
@@ -1692,10 +1783,10 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
l->window = win;
- l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
- l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
- l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
- l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
+ l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
+ l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
+ l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
+ l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index d7e9d42fcb2d..d1bd1787a768 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -63,7 +63,7 @@ enum {
enum {
TIPC_LINK_UP_EVT = 1,
TIPC_LINK_DOWN_EVT = (1 << 1),
- TIPC_LINK_SND_BC_ACK = (1 << 2)
+ TIPC_LINK_SND_STATE = (1 << 2)
};
/* Starting value for maximum packet size negotiation on unicast links
@@ -138,8 +138,8 @@ void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
void tipc_link_build_bc_sync_msg(struct tipc_link *l,
struct sk_buff_head *xmitq);
void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr);
-void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
- struct sk_buff_head *xmitq);
+int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
+ struct sk_buff_head *xmitq);
int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq);
#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 7cf52fb39bee..c3832cdf2278 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -719,6 +719,16 @@ static inline char *msg_media_addr(struct tipc_msg *m)
return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
}
+static inline u32 msg_bc_gap(struct tipc_msg *m)
+{
+ return msg_bits(m, 8, 0, 0x3ff);
+}
+
+static inline void msg_set_bc_gap(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 8, 0, 0x3ff, n);
+}
+
/*
* Word 9
*/
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 77a7a118911d..c7c254902873 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -39,6 +39,8 @@
#include <net/genetlink.h>
+extern const struct nla_policy tipc_nl_net_policy[];
+
int tipc_net_start(struct net *net, u32 addr);
void tipc_net_stop(struct net *net);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index a84daec0afe9..3200059d14b2 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -41,6 +41,7 @@
#include "link.h"
#include "node.h"
#include "net.h"
+#include "udp_media.h"
#include <net/genetlink.h>
static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = {
@@ -161,6 +162,11 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
.policy = tipc_nl_policy,
},
{
+ .cmd = TIPC_NL_BEARER_ADD,
+ .doit = tipc_nl_bearer_add,
+ .policy = tipc_nl_policy,
+ },
+ {
.cmd = TIPC_NL_BEARER_SET,
.doit = tipc_nl_bearer_set,
.policy = tipc_nl_policy,
@@ -238,6 +244,18 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
.dumpit = tipc_nl_node_dump_monitor_peer,
.policy = tipc_nl_policy,
},
+ {
+ .cmd = TIPC_NL_PEER_REMOVE,
+ .doit = tipc_nl_peer_rm,
+ .policy = tipc_nl_policy,
+ },
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ {
+ .cmd = TIPC_NL_UDP_GET_REMOTEIP,
+ .dumpit = tipc_udp_nl_dump_remoteip,
+ .policy = tipc_nl_policy,
+ },
+#endif
};
int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 21974191e425..7ef14e2d2356 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1262,6 +1262,34 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
kfree_skb(skb);
}
+static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
+ int bearer_id, struct sk_buff_head *xmitq)
+{
+ struct tipc_link *ucl;
+ int rc;
+
+ rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr);
+
+ if (rc & TIPC_LINK_DOWN_EVT) {
+ tipc_bearer_reset_all(n->net);
+ return;
+ }
+
+ if (!(rc & TIPC_LINK_SND_STATE))
+ return;
+
+ /* If probe message, a STATE response will be sent anyway */
+ if (msg_probe(hdr))
+ return;
+
+ /* Produce a STATE message carrying broadcast NACK */
+ tipc_node_read_lock(n);
+ ucl = n->links[bearer_id].link;
+ if (ucl)
+ tipc_link_build_state_msg(ucl, xmitq);
+ tipc_node_read_unlock(n);
+}
+
/**
* tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
* @net: the applicable net namespace
@@ -1298,7 +1326,7 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
rc = tipc_bcast_rcv(net, be->link, skb);
/* Broadcast ACKs are sent on a unicast link */
- if (rc & TIPC_LINK_SND_BC_ACK) {
+ if (rc & TIPC_LINK_SND_STATE) {
tipc_node_read_lock(n);
tipc_link_build_state_msg(le->link, &xmitq);
tipc_node_read_unlock(n);
@@ -1505,7 +1533,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
/* Ensure broadcast reception is in synch with peer's send state */
if (unlikely(usr == LINK_PROTOCOL))
- tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr);
+ tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
@@ -1553,6 +1581,69 @@ discard:
kfree_skb(skb);
}
+int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
+ struct tipc_node *peer;
+ u32 addr;
+ int err;
+ int i;
+
+ /* We identify the peer by its net */
+ if (!info->attrs[TIPC_NLA_NET])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX,
+ info->attrs[TIPC_NLA_NET],
+ tipc_nl_net_policy);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_NET_ADDR])
+ return -EINVAL;
+
+ addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
+
+ if (in_own_node(net, addr))
+ return -ENOTSUPP;
+
+ spin_lock_bh(&tn->node_list_lock);
+ peer = tipc_node_find(net, addr);
+ if (!peer) {
+ spin_unlock_bh(&tn->node_list_lock);
+ return -ENXIO;
+ }
+
+ tipc_node_write_lock(peer);
+ if (peer->state != SELF_DOWN_PEER_DOWN &&
+ peer->state != SELF_DOWN_PEER_LEAVING) {
+ tipc_node_write_unlock(peer);
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ struct tipc_link_entry *le = &peer->links[i];
+
+ if (le->link) {
+ kfree(le->link);
+ le->link = NULL;
+ peer->link_cnt--;
+ }
+ }
+ tipc_node_write_unlock(peer);
+ tipc_node_delete(peer);
+
+ err = 0;
+err_out:
+ tipc_node_put(peer);
+ spin_unlock_bh(&tn->node_list_lock);
+
+ return err;
+}
+
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int err;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index d69fdfcc0ec9..39ef54c1f2ad 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -1,7 +1,7 @@
/*
* net/tipc/node.h: Include file for TIPC node management routines
*
- * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2016, Ericsson AB
* Copyright (c) 2005, 2010-2014, Wind River Systems
* All rights reserved.
*
@@ -45,11 +45,14 @@
/* Optional capabilities supported by this code version
*/
enum {
- TIPC_BCAST_SYNCH = (1 << 1),
- TIPC_BLOCK_FLOWCTL = (2 << 1)
+ TIPC_BCAST_SYNCH = (1 << 1),
+ TIPC_BCAST_STATE_NACK = (1 << 2),
+ TIPC_BLOCK_FLOWCTL = (1 << 3)
};
-#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | TIPC_BLOCK_FLOWCTL)
+#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \
+ TIPC_BCAST_STATE_NACK | \
+ TIPC_BLOCK_FLOWCTL)
#define INVALID_BEARER_ID -1
void tipc_node_stop(struct net *net);
@@ -77,6 +80,7 @@ int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index ae7e14cae085..d80cd3f7503f 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -49,6 +49,7 @@
#include "core.h"
#include "bearer.h"
#include "netlink.h"
+#include "msg.h"
/* IANA assigned UDP port */
#define UDP_PORT_DEFAULT 6118
@@ -70,6 +71,13 @@ struct udp_media_addr {
};
};
+/* struct udp_replicast - container for UDP remote addresses */
+struct udp_replicast {
+ struct udp_media_addr addr;
+ struct rcu_head rcu;
+ struct list_head list;
+};
+
/**
* struct udp_bearer - ip/udp bearer data structure
* @bearer: associated generic tipc bearer
@@ -82,8 +90,20 @@ struct udp_bearer {
struct socket *ubsock;
u32 ifindex;
struct work_struct work;
+ struct udp_replicast rcast;
};
+static int tipc_udp_is_mcast_addr(struct udp_media_addr *addr)
+{
+ if (ntohs(addr->proto) == ETH_P_IP)
+ return ipv4_is_multicast(addr->ipv4.s_addr);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ return ipv6_addr_is_multicast(&addr->ipv6);
+#endif
+ return 0;
+}
+
/* udp_media_addr_set - convert a ip/udp address to a TIPC media address */
static void tipc_udp_media_addr_set(struct tipc_media_addr *addr,
struct udp_media_addr *ua)
@@ -91,15 +111,9 @@ static void tipc_udp_media_addr_set(struct tipc_media_addr *addr,
memset(addr, 0, sizeof(struct tipc_media_addr));
addr->media_id = TIPC_MEDIA_TYPE_UDP;
memcpy(addr->value, ua, sizeof(struct udp_media_addr));
- if (ntohs(ua->proto) == ETH_P_IP) {
- if (ipv4_is_multicast(ua->ipv4.s_addr))
- addr->broadcast = 1;
- } else if (ntohs(ua->proto) == ETH_P_IPV6) {
- if (ipv6_addr_type(&ua->ipv6) & IPV6_ADDR_MULTICAST)
- addr->broadcast = 1;
- } else {
- pr_err("Invalid UDP media address\n");
- }
+
+ if (tipc_udp_is_mcast_addr(ua))
+ addr->broadcast = 1;
}
/* tipc_udp_addr2str - convert ip/udp address to string */
@@ -140,28 +154,13 @@ static int tipc_udp_addr2msg(char *msg, struct tipc_media_addr *a)
}
/* tipc_send_msg - enqueue a send request */
-static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
- struct tipc_bearer *b,
- struct tipc_media_addr *dest)
+static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
+ struct udp_bearer *ub, struct udp_media_addr *src,
+ struct udp_media_addr *dst)
{
int ttl, err = 0;
- struct udp_bearer *ub;
- struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value;
- struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
struct rtable *rt;
- if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
- err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
- if (err)
- goto tx_error;
- }
-
- skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
- ub = rcu_dereference_rtnl(b->media_ptr);
- if (!ub) {
- err = -ENODEV;
- goto tx_error;
- }
if (dst->proto == htons(ETH_P_IP)) {
struct flowi4 fl = {
.daddr = dst->ipv4.s_addr,
@@ -207,29 +206,178 @@ tx_error:
return err;
}
+static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *addr)
+{
+ struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
+ struct udp_media_addr *dst = (struct udp_media_addr *)&addr->value;
+ struct udp_replicast *rcast;
+ struct udp_bearer *ub;
+ int err = 0;
+
+ if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
+ err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+ if (err)
+ goto out;
+ }
+
+ skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
+ ub = rcu_dereference_rtnl(b->media_ptr);
+ if (!ub) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (!addr->broadcast || list_empty(&ub->rcast.list))
+ return tipc_udp_xmit(net, skb, ub, src, dst);
+
+ /* Replicast, send an skb to each configured IP address */
+ list_for_each_entry_rcu(rcast, &ub->rcast.list, list) {
+ struct sk_buff *_skb;
+
+ _skb = pskb_copy(skb, GFP_ATOMIC);
+ if (!_skb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
+ if (err) {
+ kfree_skb(_skb);
+ goto out;
+ }
+ }
+ err = 0;
+out:
+ kfree_skb(skb);
+ return err;
+}
+
+static bool tipc_udp_is_known_peer(struct tipc_bearer *b,
+ struct udp_media_addr *addr)
+{
+ struct udp_replicast *rcast, *tmp;
+ struct udp_bearer *ub;
+
+ ub = rcu_dereference_rtnl(b->media_ptr);
+ if (!ub) {
+ pr_err_ratelimited("UDP bearer instance not found\n");
+ return false;
+ }
+
+ list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
+ if (!memcmp(&rcast->addr, addr, sizeof(struct udp_media_addr)))
+ return true;
+ }
+
+ return false;
+}
+
+static int tipc_udp_rcast_add(struct tipc_bearer *b,
+ struct udp_media_addr *addr)
+{
+ struct udp_replicast *rcast;
+ struct udp_bearer *ub;
+
+ ub = rcu_dereference_rtnl(b->media_ptr);
+ if (!ub)
+ return -ENODEV;
+
+ rcast = kmalloc(sizeof(*rcast), GFP_ATOMIC);
+ if (!rcast)
+ return -ENOMEM;
+
+ memcpy(&rcast->addr, addr, sizeof(struct udp_media_addr));
+
+ if (ntohs(addr->proto) == ETH_P_IP)
+ pr_info("New replicast peer: %pI4\n", &rcast->addr.ipv4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (ntohs(addr->proto) == ETH_P_IPV6)
+ pr_info("New replicast peer: %pI6\n", &rcast->addr.ipv6);
+#endif
+
+ list_add_rcu(&rcast->list, &ub->rcast.list);
+ return 0;
+}
+
+static int tipc_udp_rcast_disc(struct tipc_bearer *b, struct sk_buff *skb)
+{
+ struct udp_media_addr src = {0};
+ struct udp_media_addr *dst;
+
+ dst = (struct udp_media_addr *)&b->bcast_addr.value;
+ if (tipc_udp_is_mcast_addr(dst))
+ return 0;
+
+ src.port = udp_hdr(skb)->source;
+
+ if (ip_hdr(skb)->version == 4) {
+ struct iphdr *iphdr = ip_hdr(skb);
+
+ src.proto = htons(ETH_P_IP);
+ src.ipv4.s_addr = iphdr->saddr;
+ if (ipv4_is_multicast(iphdr->daddr))
+ return 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (ip_hdr(skb)->version == 6) {
+ struct ipv6hdr *iphdr = ipv6_hdr(skb);
+
+ src.proto = htons(ETH_P_IPV6);
+ src.ipv6 = iphdr->saddr;
+ if (ipv6_addr_is_multicast(&iphdr->daddr))
+ return 0;
+#endif
+ } else {
+ return 0;
+ }
+
+ if (likely(tipc_udp_is_known_peer(b, &src)))
+ return 0;
+
+ return tipc_udp_rcast_add(b, &src);
+}
+
/* tipc_udp_recv - read data from bearer socket */
static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
{
struct udp_bearer *ub;
struct tipc_bearer *b;
+ struct tipc_msg *hdr;
+ int err;
ub = rcu_dereference_sk_user_data(sk);
if (!ub) {
pr_err_ratelimited("Failed to get UDP bearer reference");
- kfree_skb(skb);
- return 0;
+ goto out;
}
-
skb_pull(skb, sizeof(struct udphdr));
+ hdr = buf_msg(skb);
+
rcu_read_lock();
b = rcu_dereference_rtnl(ub->bearer);
+ if (!b)
+ goto rcu_out;
- if (b) {
+ if (b && test_bit(0, &b->up)) {
tipc_rcv(sock_net(sk), skb, b);
rcu_read_unlock();
return 0;
}
+
+ if (unlikely(msg_user(hdr) == LINK_CONFIG)) {
+ err = tipc_udp_rcast_disc(b, skb);
+ if (err)
+ goto rcu_out;
+ }
+
+ tipc_rcv(sock_net(sk), skb, b);
rcu_read_unlock();
+ return 0;
+
+rcu_out:
+ rcu_read_unlock();
+out:
kfree_skb(skb);
return 0;
}
@@ -241,15 +389,11 @@ static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote)
struct sock *sk = ub->ubsock->sk;
if (ntohs(remote->proto) == ETH_P_IP) {
- if (!ipv4_is_multicast(remote->ipv4.s_addr))
- return 0;
mreqn.imr_multiaddr = remote->ipv4;
mreqn.imr_ifindex = ub->ifindex;
err = ip_mc_join_group(sk, &mreqn);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- if (!ipv6_addr_is_multicast(&remote->ipv6))
- return 0;
err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex,
&remote->ipv6);
#endif
@@ -257,75 +401,234 @@ static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote)
return err;
}
-/**
- * parse_options - build local/remote addresses from configuration
- * @attrs: netlink config data
- * @ub: UDP bearer instance
- * @local: local bearer IP address/port
- * @remote: peer or multicast IP/port
- */
-static int parse_options(struct nlattr *attrs[], struct udp_bearer *ub,
- struct udp_media_addr *local,
- struct udp_media_addr *remote)
+static int __tipc_nl_add_udp_addr(struct sk_buff *skb,
+ struct udp_media_addr *addr, int nla_t)
{
- struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
- struct sockaddr_storage sa_local, sa_remote;
+ if (ntohs(addr->proto) == ETH_P_IP) {
+ struct sockaddr_in ip4;
- if (!attrs[TIPC_NLA_BEARER_UDP_OPTS])
- goto err;
- if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX,
- attrs[TIPC_NLA_BEARER_UDP_OPTS],
- tipc_nl_udp_policy))
- goto err;
- if (opts[TIPC_NLA_UDP_LOCAL] && opts[TIPC_NLA_UDP_REMOTE]) {
- nla_memcpy(&sa_local, opts[TIPC_NLA_UDP_LOCAL],
- sizeof(sa_local));
- nla_memcpy(&sa_remote, opts[TIPC_NLA_UDP_REMOTE],
- sizeof(sa_remote));
+ ip4.sin_family = AF_INET;
+ ip4.sin_port = addr->port;
+ ip4.sin_addr.s_addr = addr->ipv4.s_addr;
+ if (nla_put(skb, nla_t, sizeof(ip4), &ip4))
+ return -EMSGSIZE;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (ntohs(addr->proto) == ETH_P_IPV6) {
+ struct sockaddr_in6 ip6;
+
+ ip6.sin6_family = AF_INET6;
+ ip6.sin6_port = addr->port;
+ memcpy(&ip6.sin6_addr, &addr->ipv6, sizeof(struct in6_addr));
+ if (nla_put(skb, nla_t, sizeof(ip6), &ip6))
+ return -EMSGSIZE;
+#endif
+ }
+
+ return 0;
+}
+
+int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ u32 bid = cb->args[0];
+ u32 skip_cnt = cb->args[1];
+ u32 portid = NETLINK_CB(cb->skb).portid;
+ struct udp_replicast *rcast, *tmp;
+ struct tipc_bearer *b;
+ struct udp_bearer *ub;
+ void *hdr;
+ int err;
+ int i;
+
+ if (!bid && !skip_cnt) {
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *battrs[TIPC_NLA_BEARER_MAX + 1];
+ struct nlattr **attrs;
+ char *bname;
+
+ err = tipc_nlmsg_parse(cb->nlh, &attrs);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested(battrs, TIPC_NLA_BEARER_MAX,
+ attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy);
+ if (err)
+ return err;
+
+ if (!battrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+
+ bname = nla_data(battrs[TIPC_NLA_BEARER_NAME]);
+
+ rtnl_lock();
+ b = tipc_bearer_find(net, bname);
+ if (!b) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+ bid = b->identity;
} else {
-err:
- pr_err("Invalid UDP bearer configuration");
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ rtnl_lock();
+ b = rtnl_dereference(tn->bearer_list[bid]);
+ if (!b) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+ }
+
+ ub = rcu_dereference_rtnl(b->media_ptr);
+ if (!ub) {
+ rtnl_unlock();
return -EINVAL;
}
- if ((sa_local.ss_family & sa_remote.ss_family) == AF_INET) {
- struct sockaddr_in *ip4;
-
- ip4 = (struct sockaddr_in *)&sa_local;
- local->proto = htons(ETH_P_IP);
- local->port = ip4->sin_port;
- local->ipv4.s_addr = ip4->sin_addr.s_addr;
-
- ip4 = (struct sockaddr_in *)&sa_remote;
- remote->proto = htons(ETH_P_IP);
- remote->port = ip4->sin_port;
- remote->ipv4.s_addr = ip4->sin_addr.s_addr;
+
+ i = 0;
+ list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
+ if (i < skip_cnt)
+ goto count;
+
+ hdr = genlmsg_put(skb, portid, cb->nlh->nlmsg_seq,
+ &tipc_genl_family, NLM_F_MULTI,
+ TIPC_NL_BEARER_GET);
+ if (!hdr)
+ goto done;
+
+ err = __tipc_nl_add_udp_addr(skb, &rcast->addr,
+ TIPC_NLA_UDP_REMOTE);
+ if (err) {
+ genlmsg_cancel(skb, hdr);
+ goto done;
+ }
+ genlmsg_end(skb, hdr);
+count:
+ i++;
+ }
+done:
+ rtnl_unlock();
+ cb->args[0] = bid;
+ cb->args[1] = i;
+
+ return skb->len;
+}
+
+int tipc_udp_nl_add_bearer_data(struct tipc_nl_msg *msg, struct tipc_bearer *b)
+{
+ struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
+ struct udp_media_addr *dst;
+ struct udp_bearer *ub;
+ struct nlattr *nest;
+
+ ub = rcu_dereference_rtnl(b->media_ptr);
+ if (!ub)
+ return -ENODEV;
+
+ nest = nla_nest_start(msg->skb, TIPC_NLA_BEARER_UDP_OPTS);
+ if (!nest)
+ goto msg_full;
+
+ if (__tipc_nl_add_udp_addr(msg->skb, src, TIPC_NLA_UDP_LOCAL))
+ goto msg_full;
+
+ dst = (struct udp_media_addr *)&b->bcast_addr.value;
+ if (__tipc_nl_add_udp_addr(msg->skb, dst, TIPC_NLA_UDP_REMOTE))
+ goto msg_full;
+
+ if (!list_empty(&ub->rcast.list)) {
+ if (nla_put_flag(msg->skb, TIPC_NLA_UDP_MULTI_REMOTEIP))
+ goto msg_full;
+ }
+
+ nla_nest_end(msg->skb, nest);
+ return 0;
+msg_full:
+ nla_nest_cancel(msg->skb, nest);
+ return -EMSGSIZE;
+}
+
+/**
+ * tipc_parse_udp_addr - build udp media address from netlink data
+ * @nlattr: netlink attribute containing sockaddr storage aligned address
+ * @addr: tipc media address to fill with address, port and protocol type
+ * @scope_id: IPv6 scope id pointer, not NULL indicates it's required
+ */
+
+static int tipc_parse_udp_addr(struct nlattr *nla, struct udp_media_addr *addr,
+ u32 *scope_id)
+{
+ struct sockaddr_storage sa;
+
+ nla_memcpy(&sa, nla, sizeof(sa));
+ if (sa.ss_family == AF_INET) {
+ struct sockaddr_in *ip4 = (struct sockaddr_in *)&sa;
+
+ addr->proto = htons(ETH_P_IP);
+ addr->port = ip4->sin_port;
+ addr->ipv4.s_addr = ip4->sin_addr.s_addr;
return 0;
#if IS_ENABLED(CONFIG_IPV6)
- } else if ((sa_local.ss_family & sa_remote.ss_family) == AF_INET6) {
- int atype;
- struct sockaddr_in6 *ip6;
+ } else if (sa.ss_family == AF_INET6) {
+ struct sockaddr_in6 *ip6 = (struct sockaddr_in6 *)&sa;
- ip6 = (struct sockaddr_in6 *)&sa_local;
- atype = ipv6_addr_type(&ip6->sin6_addr);
- if (__ipv6_addr_needs_scope_id(atype) && !ip6->sin6_scope_id)
- return -EINVAL;
+ addr->proto = htons(ETH_P_IPV6);
+ addr->port = ip6->sin6_port;
+ memcpy(&addr->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr));
+
+ /* Scope ID is only interesting for local addresses */
+ if (scope_id) {
+ int atype;
- local->proto = htons(ETH_P_IPV6);
- local->port = ip6->sin6_port;
- memcpy(&local->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr));
- ub->ifindex = ip6->sin6_scope_id;
+ atype = ipv6_addr_type(&ip6->sin6_addr);
+ if (__ipv6_addr_needs_scope_id(atype) &&
+ !ip6->sin6_scope_id) {
+ return -EINVAL;
+ }
+
+ *scope_id = ip6->sin6_scope_id ? : 0;
+ }
- ip6 = (struct sockaddr_in6 *)&sa_remote;
- remote->proto = htons(ETH_P_IPV6);
- remote->port = ip6->sin6_port;
- memcpy(&remote->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr));
return 0;
#endif
}
return -EADDRNOTAVAIL;
}
+int tipc_udp_nl_bearer_add(struct tipc_bearer *b, struct nlattr *attr)
+{
+ int err;
+ struct udp_media_addr addr = {0};
+ struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
+ struct udp_media_addr *dst;
+
+ if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, attr, tipc_nl_udp_policy))
+ return -EINVAL;
+
+ if (!opts[TIPC_NLA_UDP_REMOTE])
+ return -EINVAL;
+
+ err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_REMOTE], &addr, NULL);
+ if (err)
+ return err;
+
+ dst = (struct udp_media_addr *)&b->bcast_addr.value;
+ if (tipc_udp_is_mcast_addr(dst)) {
+ pr_err("Can't add remote ip to TIPC UDP multicast bearer\n");
+ return -EINVAL;
+ }
+
+ if (tipc_udp_is_known_peer(b, &addr))
+ return 0;
+
+ return tipc_udp_rcast_add(b, &addr);
+}
+
/**
* tipc_udp_enable - callback to create a new udp bearer instance
* @net: network namespace
@@ -340,18 +643,38 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
{
int err = -EINVAL;
struct udp_bearer *ub;
- struct udp_media_addr *remote;
+ struct udp_media_addr remote = {0};
struct udp_media_addr local = {0};
struct udp_port_cfg udp_conf = {0};
struct udp_tunnel_sock_cfg tuncfg = {NULL};
+ struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
ub = kzalloc(sizeof(*ub), GFP_ATOMIC);
if (!ub)
return -ENOMEM;
- remote = (struct udp_media_addr *)&b->bcast_addr.value;
- memset(remote, 0, sizeof(struct udp_media_addr));
- err = parse_options(attrs, ub, &local, remote);
+ INIT_LIST_HEAD(&ub->rcast.list);
+
+ if (!attrs[TIPC_NLA_BEARER_UDP_OPTS])
+ goto err;
+
+ if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX,
+ attrs[TIPC_NLA_BEARER_UDP_OPTS],
+ tipc_nl_udp_policy))
+ goto err;
+
+ if (!opts[TIPC_NLA_UDP_LOCAL] || !opts[TIPC_NLA_UDP_REMOTE]) {
+ pr_err("Invalid UDP bearer configuration");
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_LOCAL], &local,
+ &ub->ifindex);
+ if (err)
+ goto err;
+
+ err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_REMOTE], &remote, NULL);
if (err)
goto err;
@@ -396,9 +719,18 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
tuncfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg);
- err = enable_mcast(ub, remote);
+ /**
+ * The bcast media address port is used for all peers and the ip
+ * is used if it's a multicast address.
+ */
+ memcpy(&b->bcast_addr.value, &remote, sizeof(remote));
+ if (tipc_udp_is_mcast_addr(&remote))
+ err = enable_mcast(ub, &remote);
+ else
+ err = tipc_udp_rcast_add(b, &remote);
if (err)
goto err;
+
return 0;
err:
if (ub->ubsock)
@@ -411,6 +743,12 @@ err:
static void cleanup_bearer(struct work_struct *work)
{
struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
+ struct udp_replicast *rcast, *tmp;
+
+ list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
+ list_del_rcu(&rcast->list);
+ kfree_rcu(rcast, rcu);
+ }
if (ub->ubsock)
udp_tunnel_sock_release(ub->ubsock);
diff --git a/net/tipc/udp_media.h b/net/tipc/udp_media.h
new file mode 100644
index 000000000000..281bbae87726
--- /dev/null
+++ b/net/tipc/udp_media.h
@@ -0,0 +1,46 @@
+/*
+ * net/tipc/udp_media.h: Include file for UDP bearer media
+ *
+ * Copyright (c) 1996-2006, 2013-2016, Ericsson AB
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_TIPC_MEDIA_UDP
+#ifndef _TIPC_UDP_MEDIA_H
+#define _TIPC_UDP_MEDIA_H
+
+int tipc_udp_nl_bearer_add(struct tipc_bearer *b, struct nlattr *attr);
+int tipc_udp_nl_add_bearer_data(struct tipc_nl_msg *msg, struct tipc_bearer *b);
+int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb);
+
+#endif
+#endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 8309687a56b0..145082e2ba36 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2475,28 +2475,13 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
return unix_stream_read_generic(&state);
}
-static ssize_t skb_unix_socket_splice(struct sock *sk,
- struct pipe_inode_info *pipe,
- struct splice_pipe_desc *spd)
-{
- int ret;
- struct unix_sock *u = unix_sk(sk);
-
- mutex_unlock(&u->iolock);
- ret = splice_to_pipe(pipe, spd);
- mutex_lock(&u->iolock);
-
- return ret;
-}
-
static int unix_stream_splice_actor(struct sk_buff *skb,
int skip, int chunk,
struct unix_stream_read_state *state)
{
return skb_splice_bits(skb, state->socket->sk,
UNIXCB(skb).consumed + skip,
- state->pipe, chunk, state->splice_flags,
- skb_unix_socket_splice);
+ state->pipe, chunk, state->splice_flags);
}
static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 0f506220a3bd..5497d022fada 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -372,6 +372,7 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_NAN:
break;
case NL80211_IFTYPE_UNSPECIFIED:
case NUM_NL80211_IFTYPES:
@@ -946,6 +947,7 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_NAN:
/* these interface types don't really have a channel */
return;
case NL80211_IFTYPE_UNSPECIFIED:
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 7645e97362c0..8201e6d7449e 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -225,6 +225,23 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
}
}
+void cfg80211_stop_nan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev)
+{
+ ASSERT_RTNL();
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_NAN))
+ return;
+
+ if (!wdev->nan_started)
+ return;
+
+ rdev_stop_nan(rdev, wdev);
+ wdev->nan_started = false;
+
+ rdev->opencount--;
+}
+
void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
@@ -242,6 +259,9 @@ void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy)
case NL80211_IFTYPE_P2P_DEVICE:
cfg80211_stop_p2p_device(rdev, wdev);
break;
+ case NL80211_IFTYPE_NAN:
+ cfg80211_stop_nan(rdev, wdev);
+ break;
default:
break;
}
@@ -537,6 +557,11 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
c->limits[j].max > 1))
return -EINVAL;
+ /* Only a single NAN can be allowed */
+ if (WARN_ON(types & BIT(NL80211_IFTYPE_NAN) &&
+ c->limits[j].max > 1))
+ return -EINVAL;
+
cnt += c->limits[j].max;
/*
* Don't advertise an unsupported type
@@ -579,6 +604,11 @@ int wiphy_register(struct wiphy *wiphy)
!rdev->ops->tdls_cancel_channel_switch)))
return -EINVAL;
+ if (WARN_ON((wiphy->interface_modes & BIT(NL80211_IFTYPE_NAN)) &&
+ (!rdev->ops->start_nan || !rdev->ops->stop_nan ||
+ !rdev->ops->add_nan_func || !rdev->ops->del_nan_func)))
+ return -EINVAL;
+
/*
* if a wiphy has unsupported modes for regulatory channel enforcement,
* opt-out of enforcement checking
@@ -589,6 +619,7 @@ int wiphy_register(struct wiphy *wiphy)
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_NAN) |
BIT(NL80211_IFTYPE_AP_VLAN) |
BIT(NL80211_IFTYPE_MONITOR)))
wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF;
@@ -906,6 +937,8 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
if (WARN_ON(wdev->netdev))
return;
+ nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
+
list_del_rcu(&wdev->list);
rdev->devlist_generation++;
@@ -914,6 +947,9 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
cfg80211_mlme_purge_registrations(wdev);
cfg80211_stop_p2p_device(rdev, wdev);
break;
+ case NL80211_IFTYPE_NAN:
+ cfg80211_stop_nan(rdev, wdev);
+ break;
default:
WARN_ON_ONCE(1);
break;
@@ -977,6 +1013,7 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
/* must be handled by mac80211/driver, has no APIs */
break;
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_NAN:
/* cannot happen, has no netdev */
break;
case NL80211_IFTYPE_AP_VLAN:
@@ -1079,6 +1116,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
dev->priv_flags |= IFF_DONT_BRIDGE;
+
+ nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
break;
case NETDEV_GOING_DOWN:
cfg80211_leave(rdev, wdev);
@@ -1157,6 +1196,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
* remove and clean it up.
*/
if (!list_empty(&wdev->list)) {
+ nl80211_notify_iface(rdev, wdev,
+ NL80211_CMD_DEL_INTERFACE);
sysfs_remove_link(&dev->dev.kobj, "phy80211");
list_del_rcu(&wdev->list);
rdev->devlist_generation++;
@@ -1246,7 +1287,7 @@ static int __init cfg80211_init(void)
if (err)
goto out_fail_reg;
- cfg80211_wq = create_singlethread_workqueue("cfg80211");
+ cfg80211_wq = alloc_ordered_workqueue("cfg80211", WQ_MEM_RECLAIM);
if (!cfg80211_wq) {
err = -ENOMEM;
goto out_fail_wq;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index eee91443924d..08d2e948c9ad 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -249,9 +249,9 @@ struct cfg80211_event {
};
struct cfg80211_cached_keys {
- struct key_params params[6];
- u8 data[6][WLAN_MAX_KEY_LEN];
- int def, defmgmt;
+ struct key_params params[CFG80211_MAX_WEP_KEYS];
+ u8 data[CFG80211_MAX_WEP_KEYS][WLAN_KEY_LEN_WEP104];
+ int def;
};
enum cfg80211_chan_mode {
@@ -488,6 +488,9 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
+void cfg80211_stop_nan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev);
+
#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 4a4dda53bdf1..364f900a3dc4 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -43,7 +43,8 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
cfg80211_hold_bss(bss_from_pub(bss));
wdev->current_bss = bss_from_pub(bss);
- cfg80211_upload_connect_keys(wdev);
+ if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP))
+ cfg80211_upload_connect_keys(wdev);
nl80211_send_ibss_bssid(wiphy_to_rdev(wdev->wiphy), dev, bssid,
GFP_KERNEL);
@@ -114,6 +115,9 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
}
}
+ if (WARN_ON(connkeys && connkeys->def < 0))
+ return -EINVAL;
+
if (WARN_ON(wdev->connect_keys))
kzfree(wdev->connect_keys);
wdev->connect_keys = connkeys;
@@ -284,18 +288,16 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
if (!netif_running(wdev->netdev))
return 0;
- if (wdev->wext.keys) {
+ if (wdev->wext.keys)
wdev->wext.keys->def = wdev->wext.default_key;
- wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
- }
wdev->wext.ibss.privacy = wdev->wext.default_key != -1;
- if (wdev->wext.keys) {
+ if (wdev->wext.keys && wdev->wext.keys->def != -1) {
ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
if (!ck)
return -ENOMEM;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++)
ck->params[i].key = ck->data[i];
}
err = __cfg80211_join_ibss(rdev, wdev->netdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index c284d883c349..cbb48e26a871 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -222,7 +222,7 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
if (auth_type == NL80211_AUTHTYPE_SHARED_KEY)
- if (!key || !key_len || key_idx < 0 || key_idx > 4)
+ if (!key || !key_len || key_idx < 0 || key_idx > 3)
return -EINVAL;
if (wdev->current_bss &&
@@ -634,6 +634,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
* fall through, P2P device only supports
* public action frames
*/
+ case NL80211_IFTYPE_NAN:
default:
err = -EOPNOTSUPP;
break;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4809f4d2cdcc..c510810f0b7c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -56,6 +56,7 @@ enum nl80211_multicast_groups {
NL80211_MCGRP_REGULATORY,
NL80211_MCGRP_MLME,
NL80211_MCGRP_VENDOR,
+ NL80211_MCGRP_NAN,
NL80211_MCGRP_TESTMODE /* keep last - ifdef! */
};
@@ -65,6 +66,7 @@ static const struct genl_multicast_group nl80211_mcgrps[] = {
[NL80211_MCGRP_REGULATORY] = { .name = NL80211_MULTICAST_GROUP_REG },
[NL80211_MCGRP_MLME] = { .name = NL80211_MULTICAST_GROUP_MLME },
[NL80211_MCGRP_VENDOR] = { .name = NL80211_MULTICAST_GROUP_VENDOR },
+ [NL80211_MCGRP_NAN] = { .name = NL80211_MULTICAST_GROUP_NAN },
#ifdef CONFIG_NL80211_TESTMODE
[NL80211_MCGRP_TESTMODE] = { .name = NL80211_MULTICAST_GROUP_TESTMODE }
#endif
@@ -409,6 +411,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
.len = VHT_MUMIMO_GROUPS_DATA_LEN
},
[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN },
+ [NL80211_ATTR_NAN_MASTER_PREF] = { .type = NLA_U8 },
+ [NL80211_ATTR_NAN_DUAL] = { .type = NLA_U8 },
+ [NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED },
};
/* policy for the key attributes */
@@ -502,6 +507,39 @@ nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = {
},
};
+/* policy for NAN function attributes */
+static const struct nla_policy
+nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = {
+ [NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 },
+ [NL80211_NAN_FUNC_SERVICE_ID] = { .type = NLA_BINARY,
+ .len = NL80211_NAN_FUNC_SERVICE_ID_LEN },
+ [NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 },
+ [NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG },
+ [NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE] = { .type = NLA_FLAG },
+ [NL80211_NAN_FUNC_FOLLOW_UP_ID] = { .type = NLA_U8 },
+ [NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] = { .type = NLA_U8 },
+ [NL80211_NAN_FUNC_FOLLOW_UP_DEST] = { .len = ETH_ALEN },
+ [NL80211_NAN_FUNC_CLOSE_RANGE] = { .type = NLA_FLAG },
+ [NL80211_NAN_FUNC_TTL] = { .type = NLA_U32 },
+ [NL80211_NAN_FUNC_SERVICE_INFO] = { .type = NLA_BINARY,
+ .len = NL80211_NAN_FUNC_SERVICE_SPEC_INFO_MAX_LEN },
+ [NL80211_NAN_FUNC_SRF] = { .type = NLA_NESTED },
+ [NL80211_NAN_FUNC_RX_MATCH_FILTER] = { .type = NLA_NESTED },
+ [NL80211_NAN_FUNC_TX_MATCH_FILTER] = { .type = NLA_NESTED },
+ [NL80211_NAN_FUNC_INSTANCE_ID] = { .type = NLA_U8 },
+ [NL80211_NAN_FUNC_TERM_REASON] = { .type = NLA_U8 },
+};
+
+/* policy for Service Response Filter attributes */
+static const struct nla_policy
+nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = {
+ [NL80211_NAN_SRF_INCLUDE] = { .type = NLA_FLAG },
+ [NL80211_NAN_SRF_BF] = { .type = NLA_BINARY,
+ .len = NL80211_NAN_FUNC_SRF_MAX_LEN },
+ [NL80211_NAN_SRF_BF_IDX] = { .type = NLA_U8 },
+ [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED },
+};
+
static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct cfg80211_registered_device **rdev,
@@ -848,13 +886,21 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
struct nlattr *key;
struct cfg80211_cached_keys *result;
int rem, err, def = 0;
+ bool have_key = false;
+
+ nla_for_each_nested(key, keys, rem) {
+ have_key = true;
+ break;
+ }
+
+ if (!have_key)
+ return NULL;
result = kzalloc(sizeof(*result), GFP_KERNEL);
if (!result)
return ERR_PTR(-ENOMEM);
result->def = -1;
- result->defmgmt = -1;
nla_for_each_nested(key, keys, rem) {
memset(&parse, 0, sizeof(parse));
@@ -866,7 +912,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
err = -EINVAL;
if (!parse.p.key)
goto error;
- if (parse.idx < 0 || parse.idx > 4)
+ if (parse.idx < 0 || parse.idx > 3)
goto error;
if (parse.def) {
if (def)
@@ -881,16 +927,24 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
parse.idx, false, NULL);
if (err)
goto error;
+ if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) {
+ err = -EINVAL;
+ goto error;
+ }
result->params[parse.idx].cipher = parse.p.cipher;
result->params[parse.idx].key_len = parse.p.key_len;
result->params[parse.idx].key = result->data[parse.idx];
memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len);
- if (parse.p.cipher == WLAN_CIPHER_SUITE_WEP40 ||
- parse.p.cipher == WLAN_CIPHER_SUITE_WEP104) {
- if (no_ht)
- *no_ht = true;
- }
+ /* must be WEP key if we got here */
+ if (no_ht)
+ *no_ht = true;
+ }
+
+ if (result->def < 0) {
+ err = -EINVAL;
+ goto error;
}
return result;
@@ -918,6 +972,7 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_NAN:
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_WDS:
case NUM_NL80211_IFTYPES:
@@ -2525,10 +2580,35 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
int if_idx = 0;
int wp_start = cb->args[0];
int if_start = cb->args[1];
+ int filter_wiphy = -1;
struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev;
rtnl_lock();
+ if (!cb->args[2]) {
+ struct nl80211_dump_wiphy_state state = {
+ .filter_wiphy = -1,
+ };
+ int ret;
+
+ ret = nl80211_dump_wiphy_parse(skb, cb, &state);
+ if (ret)
+ return ret;
+
+ filter_wiphy = state.filter_wiphy;
+
+ /*
+ * if filtering, set cb->args[2] to +1 since 0 is the default
+ * value needed to determine that parsing is necessary.
+ */
+ if (filter_wiphy >= 0)
+ cb->args[2] = filter_wiphy + 1;
+ else
+ cb->args[2] = -1;
+ } else if (cb->args[2] > 0) {
+ filter_wiphy = cb->args[2] - 1;
+ }
+
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
continue;
@@ -2536,6 +2616,10 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
wp_idx++;
continue;
}
+
+ if (filter_wiphy >= 0 && filter_wiphy != rdev->wiphy_idx)
+ continue;
+
if_idx = 0;
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
@@ -2751,7 +2835,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct vif_params params;
struct wireless_dev *wdev;
- struct sk_buff *msg, *event;
+ struct sk_buff *msg;
int err;
enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
u32 flags;
@@ -2774,7 +2858,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
!(rdev->wiphy.interface_modes & (1 << type)))
return -EOPNOTSUPP;
- if ((type == NL80211_IFTYPE_P2P_DEVICE ||
+ if ((type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN ||
rdev->wiphy.features & NL80211_FEATURE_MAC_ON_CREATE) &&
info->attrs[NL80211_ATTR_MAC]) {
nla_memcpy(params.macaddr, info->attrs[NL80211_ATTR_MAC],
@@ -2830,9 +2914,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
wdev->mesh_id_up_len);
wdev_unlock(wdev);
break;
+ case NL80211_IFTYPE_NAN:
case NL80211_IFTYPE_P2P_DEVICE:
/*
- * P2P Device doesn't have a netdev, so doesn't go
+ * P2P Device and NAN do not have a netdev, so don't go
* through the netdev notifier and must be added here
*/
mutex_init(&wdev->mtx);
@@ -2855,20 +2940,15 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
return -ENOBUFS;
}
- event = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (event) {
- if (nl80211_send_iface(event, 0, 0, 0,
- rdev, wdev, false) < 0) {
- nlmsg_free(event);
- goto out;
- }
-
- genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy),
- event, 0, NL80211_MCGRP_CONFIG,
- GFP_KERNEL);
- }
+ /*
+ * For wdevs which have no associated netdev object (e.g. of type
+ * NL80211_IFTYPE_P2P_DEVICE), emit the NEW_INTERFACE event here.
+ * For all other types, the event will be generated from the
+ * netdev notifier
+ */
+ if (!wdev->netdev)
+ nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
-out:
return genlmsg_reply(msg, info);
}
@@ -2876,18 +2956,10 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct wireless_dev *wdev = info->user_ptr[1];
- struct sk_buff *msg;
- int status;
if (!rdev->ops->del_virtual_intf)
return -EOPNOTSUPP;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (msg && nl80211_send_iface(msg, 0, 0, 0, rdev, wdev, true) < 0) {
- nlmsg_free(msg);
- msg = NULL;
- }
-
/*
* If we remove a wireless device without a netdev then clear
* user_ptr[1] so that nl80211_post_doit won't dereference it
@@ -2898,15 +2970,7 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
if (!wdev->netdev)
info->user_ptr[1] = NULL;
- status = rdev_del_virtual_intf(rdev, wdev);
- if (status >= 0 && msg)
- genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy),
- msg, 0, NL80211_MCGRP_CONFIG,
- GFP_KERNEL);
- else
- nlmsg_free(msg);
-
- return status;
+ return rdev_del_virtual_intf(rdev, wdev);
}
static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
@@ -3316,6 +3380,291 @@ static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
+ u8 *rates, u8 rates_len)
+{
+ u8 i;
+ u32 mask = 0;
+
+ for (i = 0; i < rates_len; i++) {
+ int rate = (rates[i] & 0x7f) * 5;
+ int ridx;
+
+ for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
+ struct ieee80211_rate *srate =
+ &sband->bitrates[ridx];
+ if (rate == srate->bitrate) {
+ mask |= 1 << ridx;
+ break;
+ }
+ }
+ if (ridx == sband->n_bitrates)
+ return 0; /* rate not found */
+ }
+
+ return mask;
+}
+
+static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
+ u8 *rates, u8 rates_len,
+ u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
+{
+ u8 i;
+
+ memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
+
+ for (i = 0; i < rates_len; i++) {
+ int ridx, rbit;
+
+ ridx = rates[i] / 8;
+ rbit = BIT(rates[i] % 8);
+
+ /* check validity */
+ if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
+ return false;
+
+ /* check availability */
+ if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
+ mcs[ridx] |= rbit;
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map)
+{
+ u16 mcs_mask = 0;
+
+ switch (vht_mcs_map) {
+ case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ mcs_mask = 0x00FF;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ mcs_mask = 0x01FF;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ mcs_mask = 0x03FF;
+ break;
+ default:
+ break;
+ }
+
+ return mcs_mask;
+}
+
+static void vht_build_mcs_mask(u16 vht_mcs_map,
+ u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ u8 nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03);
+ vht_mcs_map >>= 2;
+ }
+}
+
+static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband,
+ struct nl80211_txrate_vht *txrate,
+ u16 mcs[NL80211_VHT_NSS_MAX])
+{
+ u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {};
+ u8 i;
+
+ if (!sband->vht_cap.vht_supported)
+ return false;
+
+ memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX);
+
+ /* Build vht_mcs_mask from VHT capabilities */
+ vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
+ mcs[i] = txrate->mcs[i];
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
+ [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
+ .len = NL80211_MAX_SUPP_RATES },
+ [NL80211_TXRATE_HT] = { .type = NLA_BINARY,
+ .len = NL80211_MAX_SUPP_HT_RATES },
+ [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
+ [NL80211_TXRATE_GI] = { .type = NLA_U8 },
+};
+
+static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
+ struct cfg80211_bitrate_mask *mask)
+{
+ struct nlattr *tb[NL80211_TXRATE_MAX + 1];
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ int rem, i;
+ struct nlattr *tx_rates;
+ struct ieee80211_supported_band *sband;
+ u16 vht_tx_mcs_map;
+
+ memset(mask, 0, sizeof(*mask));
+ /* Default to all rates enabled */
+ for (i = 0; i < NUM_NL80211_BANDS; i++) {
+ sband = rdev->wiphy.bands[i];
+
+ if (!sband)
+ continue;
+
+ mask->control[i].legacy = (1 << sband->n_bitrates) - 1;
+ memcpy(mask->control[i].ht_mcs,
+ sband->ht_cap.mcs.rx_mask,
+ sizeof(mask->control[i].ht_mcs));
+
+ if (!sband->vht_cap.vht_supported)
+ continue;
+
+ vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
+ }
+
+ /* if no rates are given set it back to the defaults */
+ if (!info->attrs[NL80211_ATTR_TX_RATES])
+ goto out;
+
+ /* The nested attribute uses enum nl80211_band as the index. This maps
+ * directly to the enum nl80211_band values used in cfg80211.
+ */
+ BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
+ nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
+ enum nl80211_band band = nla_type(tx_rates);
+ int err;
+
+ if (band < 0 || band >= NUM_NL80211_BANDS)
+ return -EINVAL;
+ sband = rdev->wiphy.bands[band];
+ if (sband == NULL)
+ return -EINVAL;
+ err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
+ nla_len(tx_rates), nl80211_txattr_policy);
+ if (err)
+ return err;
+ if (tb[NL80211_TXRATE_LEGACY]) {
+ mask->control[band].legacy = rateset_to_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_LEGACY]),
+ nla_len(tb[NL80211_TXRATE_LEGACY]));
+ if ((mask->control[band].legacy == 0) &&
+ nla_len(tb[NL80211_TXRATE_LEGACY]))
+ return -EINVAL;
+ }
+ if (tb[NL80211_TXRATE_HT]) {
+ if (!ht_rateset_to_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_HT]),
+ nla_len(tb[NL80211_TXRATE_HT]),
+ mask->control[band].ht_mcs))
+ return -EINVAL;
+ }
+ if (tb[NL80211_TXRATE_VHT]) {
+ if (!vht_set_mcs_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_VHT]),
+ mask->control[band].vht_mcs))
+ return -EINVAL;
+ }
+ if (tb[NL80211_TXRATE_GI]) {
+ mask->control[band].gi =
+ nla_get_u8(tb[NL80211_TXRATE_GI]);
+ if (mask->control[band].gi > NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+ }
+
+ if (mask->control[band].legacy == 0) {
+ /* don't allow empty legacy rates if HT or VHT
+ * are not even supported.
+ */
+ if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
+ rdev->wiphy.bands[band]->vht_cap.vht_supported))
+ return -EINVAL;
+
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ if (mask->control[band].ht_mcs[i])
+ goto out;
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ if (mask->control[band].vht_mcs[i])
+ goto out;
+
+ /* legacy and mcs rates may not be both empty */
+ return -EINVAL;
+ }
+ }
+
+out:
+ return 0;
+}
+
+static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
+ enum nl80211_band band,
+ struct cfg80211_bitrate_mask *beacon_rate)
+{
+ u32 count_ht, count_vht, i;
+ u32 rate = beacon_rate->control[band].legacy;
+
+ /* Allow only one rate */
+ if (hweight32(rate) > 1)
+ return -EINVAL;
+
+ count_ht = 0;
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+ if (hweight8(beacon_rate->control[band].ht_mcs[i]) > 1) {
+ return -EINVAL;
+ } else if (beacon_rate->control[band].ht_mcs[i]) {
+ count_ht++;
+ if (count_ht > 1)
+ return -EINVAL;
+ }
+ if (count_ht && rate)
+ return -EINVAL;
+ }
+
+ count_vht = 0;
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ if (hweight16(beacon_rate->control[band].vht_mcs[i]) > 1) {
+ return -EINVAL;
+ } else if (beacon_rate->control[band].vht_mcs[i]) {
+ count_vht++;
+ if (count_vht > 1)
+ return -EINVAL;
+ }
+ if (count_vht && rate)
+ return -EINVAL;
+ }
+
+ if ((count_ht && count_vht) || (!rate && !count_ht && !count_vht))
+ return -EINVAL;
+
+ if (rate &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY))
+ return -EINVAL;
+ if (count_ht &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_HT))
+ return -EINVAL;
+ if (count_vht &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_VHT))
+ return -EINVAL;
+
+ return 0;
+}
+
static int nl80211_parse_beacon(struct nlattr *attrs[],
struct cfg80211_beacon_data *bcn)
{
@@ -3545,6 +3894,17 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
wdev->iftype))
return -EINVAL;
+ if (info->attrs[NL80211_ATTR_TX_RATES]) {
+ err = nl80211_parse_tx_bitrate_mask(info, &params.beacon_rate);
+ if (err)
+ return err;
+
+ err = validate_beacon_tx_rate(rdev, params.chandef.chan->band,
+ &params.beacon_rate);
+ if (err)
+ return err;
+ }
+
if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
params.smps_mode =
nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]);
@@ -5374,6 +5734,18 @@ static int nl80211_check_s32(const struct nlattr *nla, s32 min, s32 max, s32 *ou
return 0;
}
+static int nl80211_check_power_mode(const struct nlattr *nla,
+ enum nl80211_mesh_power_mode min,
+ enum nl80211_mesh_power_mode max,
+ enum nl80211_mesh_power_mode *out)
+{
+ u32 val = nla_get_u32(nla);
+ if (val < min || val > max)
+ return -EINVAL;
+ *out = val;
+ return 0;
+}
+
static int nl80211_parse_mesh_config(struct genl_info *info,
struct mesh_config *cfg,
u32 *mask_out)
@@ -5518,7 +5890,7 @@ do { \
NL80211_MESH_POWER_ACTIVE,
NL80211_MESH_POWER_MAX,
mask, NL80211_MESHCONF_POWER_MODE,
- nl80211_check_u32);
+ nl80211_check_power_mode);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration,
0, 65535, mask,
NL80211_MESHCONF_AWAKE_WINDOW, nl80211_check_u16);
@@ -6102,6 +6474,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
wiphy = &rdev->wiphy;
+ if (wdev->iftype == NL80211_IFTYPE_NAN)
+ return -EOPNOTSUPP;
+
if (!rdev->ops->scan)
return -EOPNOTSUPP;
@@ -7368,7 +7743,7 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
(key.p.cipher != WLAN_CIPHER_SUITE_WEP104 ||
key.p.key_len != WLAN_KEY_LEN_WEP104))
return -EINVAL;
- if (key.idx > 4)
+ if (key.idx > 3)
return -EINVAL;
} else {
key.p.key_len = 0;
@@ -7773,12 +8148,13 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
ibss.beacon_interval = 100;
- if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
+ if (info->attrs[NL80211_ATTR_BEACON_INTERVAL])
ibss.beacon_interval =
nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
- if (ibss.beacon_interval < 1 || ibss.beacon_interval > 10000)
- return -EINVAL;
- }
+
+ err = cfg80211_validate_beacon_int(rdev, ibss.beacon_interval);
+ if (err)
+ return err;
if (!rdev->ops->join_ibss)
return -EOPNOTSUPP;
@@ -7985,6 +8361,8 @@ __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
}
data = nla_nest_start(skb, attr);
+ if (!data)
+ goto nla_put_failure;
((void **)skb->cb)[0] = rdev;
((void **)skb->cb)[1] = hdr;
@@ -8602,238 +8980,21 @@ static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
return rdev_cancel_remain_on_channel(rdev, wdev, cookie);
}
-static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
- u8 *rates, u8 rates_len)
-{
- u8 i;
- u32 mask = 0;
-
- for (i = 0; i < rates_len; i++) {
- int rate = (rates[i] & 0x7f) * 5;
- int ridx;
-
- for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
- struct ieee80211_rate *srate =
- &sband->bitrates[ridx];
- if (rate == srate->bitrate) {
- mask |= 1 << ridx;
- break;
- }
- }
- if (ridx == sband->n_bitrates)
- return 0; /* rate not found */
- }
-
- return mask;
-}
-
-static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
- u8 *rates, u8 rates_len,
- u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
-{
- u8 i;
-
- memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
-
- for (i = 0; i < rates_len; i++) {
- int ridx, rbit;
-
- ridx = rates[i] / 8;
- rbit = BIT(rates[i] % 8);
-
- /* check validity */
- if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
- return false;
-
- /* check availability */
- if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
- mcs[ridx] |= rbit;
- else
- return false;
- }
-
- return true;
-}
-
-static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map)
-{
- u16 mcs_mask = 0;
-
- switch (vht_mcs_map) {
- case IEEE80211_VHT_MCS_NOT_SUPPORTED:
- break;
- case IEEE80211_VHT_MCS_SUPPORT_0_7:
- mcs_mask = 0x00FF;
- break;
- case IEEE80211_VHT_MCS_SUPPORT_0_8:
- mcs_mask = 0x01FF;
- break;
- case IEEE80211_VHT_MCS_SUPPORT_0_9:
- mcs_mask = 0x03FF;
- break;
- default:
- break;
- }
-
- return mcs_mask;
-}
-
-static void vht_build_mcs_mask(u16 vht_mcs_map,
- u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
-{
- u8 nss;
-
- for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
- vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03);
- vht_mcs_map >>= 2;
- }
-}
-
-static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband,
- struct nl80211_txrate_vht *txrate,
- u16 mcs[NL80211_VHT_NSS_MAX])
-{
- u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
- u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {};
- u8 i;
-
- if (!sband->vht_cap.vht_supported)
- return false;
-
- memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX);
-
- /* Build vht_mcs_mask from VHT capabilities */
- vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
-
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
- if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
- mcs[i] = txrate->mcs[i];
- else
- return false;
- }
-
- return true;
-}
-
-static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
- [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
- .len = NL80211_MAX_SUPP_RATES },
- [NL80211_TXRATE_HT] = { .type = NLA_BINARY,
- .len = NL80211_MAX_SUPP_HT_RATES },
- [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
- [NL80211_TXRATE_GI] = { .type = NLA_U8 },
-};
-
static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
struct genl_info *info)
{
- struct nlattr *tb[NL80211_TXRATE_MAX + 1];
- struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct cfg80211_bitrate_mask mask;
- int rem, i;
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
- struct nlattr *tx_rates;
- struct ieee80211_supported_band *sband;
- u16 vht_tx_mcs_map;
+ int err;
if (!rdev->ops->set_bitrate_mask)
return -EOPNOTSUPP;
- memset(&mask, 0, sizeof(mask));
- /* Default to all rates enabled */
- for (i = 0; i < NUM_NL80211_BANDS; i++) {
- sband = rdev->wiphy.bands[i];
-
- if (!sband)
- continue;
-
- mask.control[i].legacy = (1 << sband->n_bitrates) - 1;
- memcpy(mask.control[i].ht_mcs,
- sband->ht_cap.mcs.rx_mask,
- sizeof(mask.control[i].ht_mcs));
-
- if (!sband->vht_cap.vht_supported)
- continue;
-
- vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
- vht_build_mcs_mask(vht_tx_mcs_map, mask.control[i].vht_mcs);
- }
-
- /* if no rates are given set it back to the defaults */
- if (!info->attrs[NL80211_ATTR_TX_RATES])
- goto out;
-
- /*
- * The nested attribute uses enum nl80211_band as the index. This maps
- * directly to the enum nl80211_band values used in cfg80211.
- */
- BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
- nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
- enum nl80211_band band = nla_type(tx_rates);
- int err;
-
- if (band < 0 || band >= NUM_NL80211_BANDS)
- return -EINVAL;
- sband = rdev->wiphy.bands[band];
- if (sband == NULL)
- return -EINVAL;
- err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
- nla_len(tx_rates), nl80211_txattr_policy);
- if (err)
- return err;
- if (tb[NL80211_TXRATE_LEGACY]) {
- mask.control[band].legacy = rateset_to_mask(
- sband,
- nla_data(tb[NL80211_TXRATE_LEGACY]),
- nla_len(tb[NL80211_TXRATE_LEGACY]));
- if ((mask.control[band].legacy == 0) &&
- nla_len(tb[NL80211_TXRATE_LEGACY]))
- return -EINVAL;
- }
- if (tb[NL80211_TXRATE_HT]) {
- if (!ht_rateset_to_mask(
- sband,
- nla_data(tb[NL80211_TXRATE_HT]),
- nla_len(tb[NL80211_TXRATE_HT]),
- mask.control[band].ht_mcs))
- return -EINVAL;
- }
- if (tb[NL80211_TXRATE_VHT]) {
- if (!vht_set_mcs_mask(
- sband,
- nla_data(tb[NL80211_TXRATE_VHT]),
- mask.control[band].vht_mcs))
- return -EINVAL;
- }
- if (tb[NL80211_TXRATE_GI]) {
- mask.control[band].gi =
- nla_get_u8(tb[NL80211_TXRATE_GI]);
- if (mask.control[band].gi > NL80211_TXRATE_FORCE_LGI)
- return -EINVAL;
- }
-
- if (mask.control[band].legacy == 0) {
- /* don't allow empty legacy rates if HT or VHT
- * are not even supported.
- */
- if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
- rdev->wiphy.bands[band]->vht_cap.vht_supported))
- return -EINVAL;
-
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
- if (mask.control[band].ht_mcs[i])
- goto out;
-
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
- if (mask.control[band].vht_mcs[i])
- goto out;
-
- /* legacy and mcs rates may not be both empty */
- return -EINVAL;
- }
- }
+ err = nl80211_parse_tx_bitrate_mask(info, &mask);
+ if (err)
+ return err;
-out:
return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
}
@@ -8859,6 +9020,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
break;
+ case NL80211_IFTYPE_NAN:
default:
return -EOPNOTSUPP;
}
@@ -8904,6 +9066,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_P2P_GO:
break;
+ case NL80211_IFTYPE_NAN:
default:
return -EOPNOTSUPP;
}
@@ -9020,6 +9183,7 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
break;
+ case NL80211_IFTYPE_NAN:
default:
return -EOPNOTSUPP;
}
@@ -9252,9 +9416,10 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
setup.beacon_interval =
nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
- if (setup.beacon_interval < 10 ||
- setup.beacon_interval > 10000)
- return -EINVAL;
+
+ err = cfg80211_validate_beacon_int(rdev, setup.beacon_interval);
+ if (err)
+ return err;
}
if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) {
@@ -9300,6 +9465,17 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
return err;
}
+ if (info->attrs[NL80211_ATTR_TX_RATES]) {
+ err = nl80211_parse_tx_bitrate_mask(info, &setup.beacon_rate);
+ if (err)
+ return err;
+
+ err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
+ &setup.beacon_rate);
+ if (err)
+ return err;
+ }
+
return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
}
@@ -9413,18 +9589,27 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
if (!freqs)
return -ENOBUFS;
- for (i = 0; i < req->n_channels; i++)
- nla_put_u32(msg, i, req->channels[i]->center_freq);
+ for (i = 0; i < req->n_channels; i++) {
+ if (nla_put_u32(msg, i, req->channels[i]->center_freq))
+ return -ENOBUFS;
+ }
nla_nest_end(msg, freqs);
if (req->n_match_sets) {
matches = nla_nest_start(msg, NL80211_ATTR_SCHED_SCAN_MATCH);
+ if (!matches)
+ return -ENOBUFS;
+
for (i = 0; i < req->n_match_sets; i++) {
match = nla_nest_start(msg, i);
- nla_put(msg, NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
- req->match_sets[i].ssid.ssid_len,
- req->match_sets[i].ssid.ssid);
+ if (!match)
+ return -ENOBUFS;
+
+ if (nla_put(msg, NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
+ req->match_sets[i].ssid.ssid_len,
+ req->match_sets[i].ssid.ssid))
+ return -ENOBUFS;
nla_nest_end(msg, match);
}
nla_nest_end(msg, matches);
@@ -9436,6 +9621,9 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
for (i = 0; i < req->n_scan_plans; i++) {
scan_plan = nla_nest_start(msg, i + 1);
+ if (!scan_plan)
+ return -ENOBUFS;
+
if (!scan_plan ||
nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL,
req->scan_plans[i].interval) ||
@@ -10362,6 +10550,549 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
return 0;
}
+static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev = info->user_ptr[1];
+ struct cfg80211_nan_conf conf = {};
+ int err;
+
+ if (wdev->iftype != NL80211_IFTYPE_NAN)
+ return -EOPNOTSUPP;
+
+ if (wdev->nan_started)
+ return -EEXIST;
+
+ if (rfkill_blocked(rdev->rfkill))
+ return -ERFKILL;
+
+ if (!info->attrs[NL80211_ATTR_NAN_MASTER_PREF])
+ return -EINVAL;
+
+ if (!info->attrs[NL80211_ATTR_NAN_DUAL])
+ return -EINVAL;
+
+ conf.master_pref =
+ nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]);
+ if (!conf.master_pref)
+ return -EINVAL;
+
+ conf.dual = nla_get_u8(info->attrs[NL80211_ATTR_NAN_DUAL]);
+
+ err = rdev_start_nan(rdev, wdev, &conf);
+ if (err)
+ return err;
+
+ wdev->nan_started = true;
+ rdev->opencount++;
+
+ return 0;
+}
+
+static int nl80211_stop_nan(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev = info->user_ptr[1];
+
+ if (wdev->iftype != NL80211_IFTYPE_NAN)
+ return -EOPNOTSUPP;
+
+ cfg80211_stop_nan(rdev, wdev);
+
+ return 0;
+}
+
+static int validate_nan_filter(struct nlattr *filter_attr)
+{
+ struct nlattr *attr;
+ int len = 0, n_entries = 0, rem;
+
+ nla_for_each_nested(attr, filter_attr, rem) {
+ len += nla_len(attr);
+ n_entries++;
+ }
+
+ if (len >= U8_MAX)
+ return -EINVAL;
+
+ return n_entries;
+}
+
+static int handle_nan_filter(struct nlattr *attr_filter,
+ struct cfg80211_nan_func *func,
+ bool tx)
+{
+ struct nlattr *attr;
+ int n_entries, rem, i;
+ struct cfg80211_nan_func_filter *filter;
+
+ n_entries = validate_nan_filter(attr_filter);
+ if (n_entries < 0)
+ return n_entries;
+
+ BUILD_BUG_ON(sizeof(*func->rx_filters) != sizeof(*func->tx_filters));
+
+ filter = kcalloc(n_entries, sizeof(*func->rx_filters), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ i = 0;
+ nla_for_each_nested(attr, attr_filter, rem) {
+ filter[i].filter = kmemdup(nla_data(attr), nla_len(attr),
+ GFP_KERNEL);
+ filter[i].len = nla_len(attr);
+ i++;
+ }
+ if (tx) {
+ func->num_tx_filters = n_entries;
+ func->tx_filters = filter;
+ } else {
+ func->num_rx_filters = n_entries;
+ func->rx_filters = filter;
+ }
+
+ return 0;
+}
+
+static int nl80211_nan_add_func(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev = info->user_ptr[1];
+ struct nlattr *tb[NUM_NL80211_NAN_FUNC_ATTR], *func_attr;
+ struct cfg80211_nan_func *func;
+ struct sk_buff *msg = NULL;
+ void *hdr = NULL;
+ int err = 0;
+
+ if (wdev->iftype != NL80211_IFTYPE_NAN)
+ return -EOPNOTSUPP;
+
+ if (!wdev->nan_started)
+ return -ENOTCONN;
+
+ if (!info->attrs[NL80211_ATTR_NAN_FUNC])
+ return -EINVAL;
+
+ if (wdev->owner_nlportid &&
+ wdev->owner_nlportid != info->snd_portid)
+ return -ENOTCONN;
+
+ err = nla_parse(tb, NL80211_NAN_FUNC_ATTR_MAX,
+ nla_data(info->attrs[NL80211_ATTR_NAN_FUNC]),
+ nla_len(info->attrs[NL80211_ATTR_NAN_FUNC]),
+ nl80211_nan_func_policy);
+ if (err)
+ return err;
+
+ func = kzalloc(sizeof(*func), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ func->cookie = wdev->wiphy->cookie_counter++;
+
+ if (!tb[NL80211_NAN_FUNC_TYPE] ||
+ nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]) > NL80211_NAN_FUNC_MAX_TYPE) {
+ err = -EINVAL;
+ goto out;
+ }
+
+
+ func->type = nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]);
+
+ if (!tb[NL80211_NAN_FUNC_SERVICE_ID]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(func->service_id, nla_data(tb[NL80211_NAN_FUNC_SERVICE_ID]),
+ sizeof(func->service_id));
+
+ func->close_range =
+ nla_get_flag(tb[NL80211_NAN_FUNC_CLOSE_RANGE]);
+
+ if (tb[NL80211_NAN_FUNC_SERVICE_INFO]) {
+ func->serv_spec_info_len =
+ nla_len(tb[NL80211_NAN_FUNC_SERVICE_INFO]);
+ func->serv_spec_info =
+ kmemdup(nla_data(tb[NL80211_NAN_FUNC_SERVICE_INFO]),
+ func->serv_spec_info_len,
+ GFP_KERNEL);
+ if (!func->serv_spec_info) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (tb[NL80211_NAN_FUNC_TTL])
+ func->ttl = nla_get_u32(tb[NL80211_NAN_FUNC_TTL]);
+
+ switch (func->type) {
+ case NL80211_NAN_FUNC_PUBLISH:
+ if (!tb[NL80211_NAN_FUNC_PUBLISH_TYPE]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ func->publish_type =
+ nla_get_u8(tb[NL80211_NAN_FUNC_PUBLISH_TYPE]);
+ func->publish_bcast =
+ nla_get_flag(tb[NL80211_NAN_FUNC_PUBLISH_BCAST]);
+
+ if ((!(func->publish_type & NL80211_NAN_SOLICITED_PUBLISH)) &&
+ func->publish_bcast) {
+ err = -EINVAL;
+ goto out;
+ }
+ break;
+ case NL80211_NAN_FUNC_SUBSCRIBE:
+ func->subscribe_active =
+ nla_get_flag(tb[NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE]);
+ break;
+ case NL80211_NAN_FUNC_FOLLOW_UP:
+ if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] ||
+ !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ func->followup_id =
+ nla_get_u8(tb[NL80211_NAN_FUNC_FOLLOW_UP_ID]);
+ func->followup_reqid =
+ nla_get_u8(tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]);
+ memcpy(func->followup_dest.addr,
+ nla_data(tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]),
+ sizeof(func->followup_dest.addr));
+ if (func->ttl) {
+ err = -EINVAL;
+ goto out;
+ }
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (tb[NL80211_NAN_FUNC_SRF]) {
+ struct nlattr *srf_tb[NUM_NL80211_NAN_SRF_ATTR];
+
+ err = nla_parse(srf_tb, NL80211_NAN_SRF_ATTR_MAX,
+ nla_data(tb[NL80211_NAN_FUNC_SRF]),
+ nla_len(tb[NL80211_NAN_FUNC_SRF]), NULL);
+ if (err)
+ goto out;
+
+ func->srf_include =
+ nla_get_flag(srf_tb[NL80211_NAN_SRF_INCLUDE]);
+
+ if (srf_tb[NL80211_NAN_SRF_BF]) {
+ if (srf_tb[NL80211_NAN_SRF_MAC_ADDRS] ||
+ !srf_tb[NL80211_NAN_SRF_BF_IDX]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ func->srf_bf_len =
+ nla_len(srf_tb[NL80211_NAN_SRF_BF]);
+ func->srf_bf =
+ kmemdup(nla_data(srf_tb[NL80211_NAN_SRF_BF]),
+ func->srf_bf_len, GFP_KERNEL);
+ if (!func->srf_bf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ func->srf_bf_idx =
+ nla_get_u8(srf_tb[NL80211_NAN_SRF_BF_IDX]);
+ } else {
+ struct nlattr *attr, *mac_attr =
+ srf_tb[NL80211_NAN_SRF_MAC_ADDRS];
+ int n_entries, rem, i = 0;
+
+ if (!mac_attr) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ n_entries = validate_acl_mac_addrs(mac_attr);
+ if (n_entries <= 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ func->srf_num_macs = n_entries;
+ func->srf_macs =
+ kzalloc(sizeof(*func->srf_macs) * n_entries,
+ GFP_KERNEL);
+ if (!func->srf_macs) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ nla_for_each_nested(attr, mac_attr, rem)
+ memcpy(func->srf_macs[i++].addr, nla_data(attr),
+ sizeof(*func->srf_macs));
+ }
+ }
+
+ if (tb[NL80211_NAN_FUNC_TX_MATCH_FILTER]) {
+ err = handle_nan_filter(tb[NL80211_NAN_FUNC_TX_MATCH_FILTER],
+ func, true);
+ if (err)
+ goto out;
+ }
+
+ if (tb[NL80211_NAN_FUNC_RX_MATCH_FILTER]) {
+ err = handle_nan_filter(tb[NL80211_NAN_FUNC_RX_MATCH_FILTER],
+ func, false);
+ if (err)
+ goto out;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
+ NL80211_CMD_ADD_NAN_FUNCTION);
+ /* This can't really happen - we just allocated 4KB */
+ if (WARN_ON(!hdr)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = rdev_add_nan_func(rdev, wdev, func);
+out:
+ if (err < 0) {
+ cfg80211_free_nan_func(func);
+ nlmsg_free(msg);
+ return err;
+ }
+
+ /* propagate the instance id and cookie to userspace */
+ if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, func->cookie,
+ NL80211_ATTR_PAD))
+ goto nla_put_failure;
+
+ func_attr = nla_nest_start(msg, NL80211_ATTR_NAN_FUNC);
+ if (!func_attr)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID,
+ func->instance_id))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, func_attr);
+
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+
+static int nl80211_nan_del_func(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev = info->user_ptr[1];
+ u64 cookie;
+
+ if (wdev->iftype != NL80211_IFTYPE_NAN)
+ return -EOPNOTSUPP;
+
+ if (!wdev->nan_started)
+ return -ENOTCONN;
+
+ if (!info->attrs[NL80211_ATTR_COOKIE])
+ return -EINVAL;
+
+ if (wdev->owner_nlportid &&
+ wdev->owner_nlportid != info->snd_portid)
+ return -ENOTCONN;
+
+ cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
+
+ rdev_del_nan_func(rdev, wdev, cookie);
+
+ return 0;
+}
+
+static int nl80211_nan_change_config(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev = info->user_ptr[1];
+ struct cfg80211_nan_conf conf = {};
+ u32 changed = 0;
+
+ if (wdev->iftype != NL80211_IFTYPE_NAN)
+ return -EOPNOTSUPP;
+
+ if (!wdev->nan_started)
+ return -ENOTCONN;
+
+ if (info->attrs[NL80211_ATTR_NAN_MASTER_PREF]) {
+ conf.master_pref =
+ nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]);
+ if (conf.master_pref <= 1 || conf.master_pref == 255)
+ return -EINVAL;
+
+ changed |= CFG80211_NAN_CONF_CHANGED_PREF;
+ }
+
+ if (info->attrs[NL80211_ATTR_NAN_DUAL]) {
+ conf.dual = nla_get_u8(info->attrs[NL80211_ATTR_NAN_DUAL]);
+ changed |= CFG80211_NAN_CONF_CHANGED_DUAL;
+ }
+
+ if (!changed)
+ return -EINVAL;
+
+ return rdev_nan_change_conf(rdev, wdev, &conf, changed);
+}
+
+void cfg80211_nan_match(struct wireless_dev *wdev,
+ struct cfg80211_nan_match_params *match, gfp_t gfp)
+{
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ struct nlattr *match_attr, *local_func_attr, *peer_func_attr;
+ struct sk_buff *msg;
+ void *hdr;
+
+ if (WARN_ON(!match->inst_id || !match->peer_inst_id || !match->addr))
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NAN_MATCH);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
+ wdev->netdev->ifindex)) ||
+ nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+ NL80211_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, match->cookie,
+ NL80211_ATTR_PAD) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, match->addr))
+ goto nla_put_failure;
+
+ match_attr = nla_nest_start(msg, NL80211_ATTR_NAN_MATCH);
+ if (!match_attr)
+ goto nla_put_failure;
+
+ local_func_attr = nla_nest_start(msg, NL80211_NAN_MATCH_FUNC_LOCAL);
+ if (!local_func_attr)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID, match->inst_id))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, local_func_attr);
+
+ peer_func_attr = nla_nest_start(msg, NL80211_NAN_MATCH_FUNC_PEER);
+ if (!peer_func_attr)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, NL80211_NAN_FUNC_TYPE, match->type) ||
+ nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID, match->peer_inst_id))
+ goto nla_put_failure;
+
+ if (match->info && match->info_len &&
+ nla_put(msg, NL80211_NAN_FUNC_SERVICE_INFO, match->info_len,
+ match->info))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, peer_func_attr);
+ nla_nest_end(msg, match_attr);
+ genlmsg_end(msg, hdr);
+
+ if (!wdev->owner_nlportid)
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy),
+ msg, 0, NL80211_MCGRP_NAN, gfp);
+ else
+ genlmsg_unicast(wiphy_net(&rdev->wiphy), msg,
+ wdev->owner_nlportid);
+
+ return;
+
+nla_put_failure:
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_nan_match);
+
+void cfg80211_nan_func_terminated(struct wireless_dev *wdev,
+ u8 inst_id,
+ enum nl80211_nan_func_term_reason reason,
+ u64 cookie, gfp_t gfp)
+{
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ struct sk_buff *msg;
+ struct nlattr *func_attr;
+ void *hdr;
+
+ if (WARN_ON(!inst_id))
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DEL_NAN_FUNCTION);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
+ wdev->netdev->ifindex)) ||
+ nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+ NL80211_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+ NL80211_ATTR_PAD))
+ goto nla_put_failure;
+
+ func_attr = nla_nest_start(msg, NL80211_ATTR_NAN_FUNC);
+ if (!func_attr)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID, inst_id) ||
+ nla_put_u8(msg, NL80211_NAN_FUNC_TERM_REASON, reason))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, func_attr);
+ genlmsg_end(msg, hdr);
+
+ if (!wdev->owner_nlportid)
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy),
+ msg, 0, NL80211_MCGRP_NAN, gfp);
+ else
+ genlmsg_unicast(wiphy_net(&rdev->wiphy), msg,
+ wdev->owner_nlportid);
+
+ return;
+
+nla_put_failure:
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_nan_func_terminated);
+
static int nl80211_get_protocol_features(struct sk_buff *skb,
struct genl_info *info)
{
@@ -11063,7 +11794,14 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
dev_hold(dev);
} else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) {
- if (!wdev->p2p_started) {
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
+ !wdev->p2p_started) {
+ if (rtnl)
+ rtnl_unlock();
+ return -ENETDOWN;
+ }
+ if (wdev->iftype == NL80211_IFTYPE_NAN &&
+ !wdev->nan_started) {
if (rtnl)
rtnl_unlock();
return -ENETDOWN;
@@ -11697,6 +12435,46 @@ static const struct genl_ops nl80211_ops[] = {
NL80211_FLAG_NEED_RTNL,
},
{
+ .cmd = NL80211_CMD_START_NAN,
+ .doit = nl80211_start_nan,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WDEV |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_STOP_NAN,
+ .doit = nl80211_stop_nan,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_ADD_NAN_FUNCTION,
+ .doit = nl80211_nan_add_func,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_DEL_NAN_FUNCTION,
+ .doit = nl80211_nan_del_func,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_CHANGE_NAN_CONFIG,
+ .doit = nl80211_nan_change_config,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
.cmd = NL80211_CMD_SET_MCAST_RATE,
.doit = nl80211_set_mcast_rate,
.policy = nl80211_policy,
@@ -11847,6 +12625,29 @@ void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev,
NL80211_MCGRP_CONFIG, GFP_KERNEL);
}
+void nl80211_notify_iface(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ enum nl80211_commands cmd)
+{
+ struct sk_buff *msg;
+
+ WARN_ON(cmd != NL80211_CMD_NEW_INTERFACE &&
+ cmd != NL80211_CMD_DEL_INTERFACE);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ if (nl80211_send_iface(msg, 0, 0, 0, rdev, wdev,
+ cmd == NL80211_CMD_DEL_INTERFACE) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
+ NL80211_MCGRP_CONFIG, GFP_KERNEL);
+}
+
static int nl80211_add_scan_req(struct sk_buff *msg,
struct cfg80211_registered_device *rdev)
{
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index a63f402b10b7..7e3821d7fcc5 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -7,6 +7,9 @@ int nl80211_init(void);
void nl80211_exit(void);
void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev,
enum nl80211_commands cmd);
+void nl80211_notify_iface(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ enum nl80211_commands cmd);
void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 85ff30bee2b9..11cf83c8ad4f 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -887,6 +887,64 @@ static inline void rdev_stop_p2p_device(struct cfg80211_registered_device *rdev,
trace_rdev_return_void(&rdev->wiphy);
}
+static inline int rdev_start_nan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf)
+{
+ int ret;
+
+ trace_rdev_start_nan(&rdev->wiphy, wdev, conf);
+ ret = rdev->ops->start_nan(&rdev->wiphy, wdev, conf);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
+static inline void rdev_stop_nan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev)
+{
+ trace_rdev_stop_nan(&rdev->wiphy, wdev);
+ rdev->ops->stop_nan(&rdev->wiphy, wdev);
+ trace_rdev_return_void(&rdev->wiphy);
+}
+
+static inline int
+rdev_add_nan_func(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_func *nan_func)
+{
+ int ret;
+
+ trace_rdev_add_nan_func(&rdev->wiphy, wdev, nan_func);
+ ret = rdev->ops->add_nan_func(&rdev->wiphy, wdev, nan_func);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
+static inline void rdev_del_nan_func(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, u64 cookie)
+{
+ trace_rdev_del_nan_func(&rdev->wiphy, wdev, cookie);
+ rdev->ops->del_nan_func(&rdev->wiphy, wdev, cookie);
+ trace_rdev_return_void(&rdev->wiphy);
+}
+
+static inline int
+rdev_nan_change_conf(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf, u32 changes)
+{
+ int ret;
+
+ trace_rdev_nan_change_conf(&rdev->wiphy, wdev, conf, changes);
+ if (rdev->ops->nan_change_conf)
+ ret = rdev->ops->nan_change_conf(&rdev->wiphy, wdev, conf,
+ changes);
+ else
+ ret = -ENOTSUPP;
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
static inline int rdev_set_mac_acl(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct cfg80211_acl_data *params)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0358e12be54b..b5bd58d0f731 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -352,52 +352,48 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *rdev)
__cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
}
-const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
+const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
+ const u8 *match, int match_len,
+ int match_offset)
{
- while (len > 2 && ies[0] != eid) {
+ /* match_offset can't be smaller than 2, unless match_len is
+ * zero, in which case match_offset must be zero as well.
+ */
+ if (WARN_ON((match_len && match_offset < 2) ||
+ (!match_len && match_offset)))
+ return NULL;
+
+ while (len >= 2 && len >= ies[1] + 2) {
+ if ((ies[0] == eid) &&
+ (ies[1] + 2 >= match_offset + match_len) &&
+ !memcmp(ies + match_offset, match, match_len))
+ return ies;
+
len -= ies[1] + 2;
ies += ies[1] + 2;
}
- if (len < 2)
- return NULL;
- if (len < 2 + ies[1])
- return NULL;
- return ies;
+
+ return NULL;
}
-EXPORT_SYMBOL(cfg80211_find_ie);
+EXPORT_SYMBOL(cfg80211_find_ie_match);
const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
const u8 *ies, int len)
{
- struct ieee80211_vendor_ie *ie;
- const u8 *pos = ies, *end = ies + len;
- int ie_oui;
+ const u8 *ie;
+ u8 match[] = { oui >> 16, oui >> 8, oui, oui_type };
+ int match_len = (oui_type < 0) ? 3 : sizeof(match);
if (WARN_ON(oui_type > 0xff))
return NULL;
- while (pos < end) {
- pos = cfg80211_find_ie(WLAN_EID_VENDOR_SPECIFIC, pos,
- end - pos);
- if (!pos)
- return NULL;
-
- ie = (struct ieee80211_vendor_ie *)pos;
-
- /* make sure we can access ie->len */
- BUILD_BUG_ON(offsetof(struct ieee80211_vendor_ie, len) != 1);
+ ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, ies, len,
+ match, match_len, 2);
- if (ie->len < sizeof(*ie))
- goto cont;
+ if (ie && (ie[1] < 4))
+ return NULL;
- ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2];
- if (ie_oui == oui &&
- (oui_type < 0 || ie->oui_type == oui_type))
- return pos;
-cont:
- pos += 2 + ie->len;
- }
- return NULL;
+ return ie;
}
EXPORT_SYMBOL(cfg80211_find_vendor_ie);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index add6824c44fd..a77db333927e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -726,7 +726,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
wdev->current_bss = bss_from_pub(bss);
- cfg80211_upload_connect_keys(wdev);
+ if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP))
+ cfg80211_upload_connect_keys(wdev);
rcu_read_lock();
country_ie = ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
@@ -1043,6 +1044,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
connect->crypto.ciphers_pairwise[0] = cipher;
}
}
+
+ connect->crypto.wep_keys = connkeys->params;
+ connect->crypto.wep_tx_key = connkeys->def;
+ } else {
+ if (WARN_ON(connkeys))
+ return -EINVAL;
}
wdev->connect_keys = connkeys;
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index e46469bc130f..0082f4b01795 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -57,7 +57,7 @@ static ssize_t addresses_show(struct device *dev,
return sprintf(buf, "%pM\n", wiphy->perm_addr);
for (i = 0; i < wiphy->n_addresses; i++)
- buf += sprintf(buf, "%pM\n", &wiphy->addresses[i].addr);
+ buf += sprintf(buf, "%pM\n", wiphy->addresses[i].addr);
return buf - start;
}
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 72b5255cefe2..a3d0a91b1e09 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1889,6 +1889,96 @@ DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_p2p_device,
TP_ARGS(wiphy, wdev)
);
+TRACE_EVENT(rdev_start_nan,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf),
+ TP_ARGS(wiphy, wdev, conf),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ WDEV_ENTRY
+ __field(u8, master_pref)
+ __field(u8, dual);
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ WDEV_ASSIGN;
+ __entry->master_pref = conf->master_pref;
+ __entry->dual = conf->dual;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT
+ ", master preference: %u, dual: %d",
+ WIPHY_PR_ARG, WDEV_PR_ARG, __entry->master_pref,
+ __entry->dual)
+);
+
+TRACE_EVENT(rdev_nan_change_conf,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf, u32 changes),
+ TP_ARGS(wiphy, wdev, conf, changes),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ WDEV_ENTRY
+ __field(u8, master_pref)
+ __field(u8, dual);
+ __field(u32, changes);
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ WDEV_ASSIGN;
+ __entry->master_pref = conf->master_pref;
+ __entry->dual = conf->dual;
+ __entry->changes = changes;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT
+ ", master preference: %u, dual: %d, changes: %x",
+ WIPHY_PR_ARG, WDEV_PR_ARG, __entry->master_pref,
+ __entry->dual, __entry->changes)
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_nan,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+ TP_ARGS(wiphy, wdev)
+);
+
+TRACE_EVENT(rdev_add_nan_func,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const struct cfg80211_nan_func *func),
+ TP_ARGS(wiphy, wdev, func),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ WDEV_ENTRY
+ __field(u8, func_type)
+ __field(u64, cookie)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ WDEV_ASSIGN;
+ __entry->func_type = func->type;
+ __entry->cookie = func->cookie
+ ),
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", type=%u, cookie=%llu",
+ WIPHY_PR_ARG, WDEV_PR_ARG, __entry->func_type,
+ __entry->cookie)
+);
+
+TRACE_EVENT(rdev_del_nan_func,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+ u64 cookie),
+ TP_ARGS(wiphy, wdev, cookie),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ WDEV_ENTRY
+ __field(u64, cookie)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ WDEV_ASSIGN;
+ __entry->cookie = cookie;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie=%llu",
+ WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie)
+);
+
TRACE_EVENT(rdev_set_mac_acl,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_acl_data *params),
diff --git a/net/wireless/util.c b/net/wireless/util.c
index b7d1592bd5b8..8edce22d1b93 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -218,7 +218,7 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr)
{
- if (key_idx > 5)
+ if (key_idx < 0 || key_idx > 5)
return -EINVAL;
if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
@@ -249,7 +249,13 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
/* Disallow BIP (group-only) cipher as pairwise cipher */
if (pairwise)
return -EINVAL;
+ if (key_idx < 4)
+ return -EINVAL;
break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ if (key_idx > 3)
+ return -EINVAL;
default:
break;
}
@@ -906,7 +912,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
if (!wdev->connect_keys)
return;
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++) {
if (!wdev->connect_keys->params[i].cipher)
continue;
if (rdev_add_key(rdev, dev, i, false, NULL,
@@ -919,9 +925,6 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
netdev_err(dev, "failed to set defkey %d\n", i);
continue;
}
- if (wdev->connect_keys->defmgmt == i)
- if (rdev_set_default_mgmt_key(rdev, dev, i))
- netdev_err(dev, "failed to set mgtdef %d\n", i);
}
kzfree(wdev->connect_keys);
@@ -1005,8 +1008,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
if (otype == NL80211_IFTYPE_AP_VLAN)
return -EOPNOTSUPP;
- /* cannot change into P2P device type */
- if (ntype == NL80211_IFTYPE_P2P_DEVICE)
+ /* cannot change into P2P device or NAN */
+ if (ntype == NL80211_IFTYPE_P2P_DEVICE ||
+ ntype == NL80211_IFTYPE_NAN)
return -EOPNOTSUPP;
if (!rdev->ops->change_virtual_intf ||
@@ -1085,6 +1089,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
/* not happening */
break;
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_NAN:
WARN_ON(1);
break;
}
@@ -1559,7 +1564,7 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev;
int res = 0;
- if (!beacon_int)
+ if (beacon_int < 10 || beacon_int > 10000)
return -EINVAL;
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
@@ -1757,6 +1762,28 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
}
EXPORT_SYMBOL(cfg80211_get_station);
+void cfg80211_free_nan_func(struct cfg80211_nan_func *f)
+{
+ int i;
+
+ if (!f)
+ return;
+
+ kfree(f->serv_spec_info);
+ kfree(f->srf_bf);
+ kfree(f->srf_macs);
+ for (i = 0; i < f->num_rx_filters; i++)
+ kfree(f->rx_filters[i].filter);
+
+ for (i = 0; i < f->num_tx_filters; i++)
+ kfree(f->tx_filters[i].filter);
+
+ kfree(f->rx_filters);
+ kfree(f->tx_filters);
+ kfree(f);
+}
+EXPORT_SYMBOL(cfg80211_free_nan_func);
+
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
const unsigned char rfc1042_header[] __aligned(2) =
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 9f27221c8913..a220156cf217 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -406,12 +406,16 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
if (pairwise && !addr)
return -EINVAL;
+ /*
+ * In many cases we won't actually need this, but it's better
+ * to do it first in case the allocation fails. Don't use wext.
+ */
if (!wdev->wext.keys) {
wdev->wext.keys = kzalloc(sizeof(*wdev->wext.keys),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!wdev->wext.keys)
return -ENOMEM;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++)
wdev->wext.keys->params[i].key =
wdev->wext.keys->data[i];
}
@@ -460,7 +464,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
if (err == -ENOENT)
err = 0;
if (!err) {
- if (!addr) {
+ if (!addr && idx < 4) {
memset(wdev->wext.keys->data[idx], 0,
sizeof(wdev->wext.keys->data[idx]));
wdev->wext.keys->params[idx].key_len = 0;
@@ -487,10 +491,19 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
err = 0;
if (wdev->current_bss)
err = rdev_add_key(rdev, dev, idx, pairwise, addr, params);
+ else if (params->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ params->cipher != WLAN_CIPHER_SUITE_WEP104)
+ return -EINVAL;
if (err)
return err;
- if (!addr) {
+ /*
+ * We only need to store WEP keys, since they're the only keys that
+ * can be be set before a connection is established and persist after
+ * disconnecting.
+ */
+ if (!addr && (params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ params->cipher == WLAN_CIPHER_SUITE_WEP104)) {
wdev->wext.keys->params[idx] = *params;
memcpy(wdev->wext.keys->data[idx],
params->key, params->key_len);
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index a4e8af3321d2..995163830a61 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -35,7 +35,6 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
if (wdev->wext.keys) {
wdev->wext.keys->def = wdev->wext.default_key;
- wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
if (wdev->wext.default_key != -1)
wdev->wext.connect.privacy = true;
}
@@ -43,11 +42,11 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
if (!wdev->wext.connect.ssid_len)
return 0;
- if (wdev->wext.keys) {
+ if (wdev->wext.keys && wdev->wext.keys->def != -1) {
ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
if (!ck)
return -ENOMEM;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++)
ck->params[i].key = ck->data[i];
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index a750f330b8dd..f83b74d3e2ac 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1500,12 +1500,8 @@ out_fac_release:
goto out_dtefac_release;
if (dtefacs.calling_len > X25_MAX_AE_LEN)
goto out_dtefac_release;
- if (dtefacs.calling_ae == NULL)
- goto out_dtefac_release;
if (dtefacs.called_len > X25_MAX_AE_LEN)
goto out_dtefac_release;
- if (dtefacs.called_ae == NULL)
- goto out_dtefac_release;
x25->dte_facilities = dtefacs;
rc = 0;
out_dtefac_release:
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 250e567ba3d6..44ac85fe2bc9 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -17,7 +17,7 @@
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <net/xfrm.h>
-#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
+#if IS_ENABLED(CONFIG_INET_ESP) || IS_ENABLED(CONFIG_INET6_ESP)
#include <net/esp.h>
#endif
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 45f9cf97ea25..fd6986634e6f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -49,6 +49,7 @@ static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
__read_mostly;
static struct kmem_cache *xfrm_dst_cache __read_mostly;
+static __read_mostly seqcount_t xfrm_policy_hash_generation;
static void xfrm_init_pmtu(struct dst_entry *dst);
static int stale_bundle(struct dst_entry *dst);
@@ -59,6 +60,11 @@ static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
+static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
+{
+ return atomic_inc_not_zero(&policy->refcnt);
+}
+
static inline bool
__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
@@ -385,9 +391,11 @@ static struct hlist_head *policy_hash_bysel(struct net *net,
__get_hash_thresh(net, family, dir, &dbits, &sbits);
hash = __sel_hash(sel, family, hmask, dbits, sbits);
- return (hash == hmask + 1 ?
- &net->xfrm.policy_inexact[dir] :
- net->xfrm.policy_bydst[dir].table + hash);
+ if (hash == hmask + 1)
+ return &net->xfrm.policy_inexact[dir];
+
+ return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
+ lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
}
static struct hlist_head *policy_hash_direct(struct net *net,
@@ -403,7 +411,8 @@ static struct hlist_head *policy_hash_direct(struct net *net,
__get_hash_thresh(net, family, dir, &dbits, &sbits);
hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
- return net->xfrm.policy_bydst[dir].table + hash;
+ return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
+ lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
}
static void xfrm_dst_hash_transfer(struct net *net,
@@ -426,14 +435,14 @@ redo:
h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
pol->family, nhashmask, dbits, sbits);
if (!entry0) {
- hlist_del(&pol->bydst);
- hlist_add_head(&pol->bydst, ndsttable+h);
+ hlist_del_rcu(&pol->bydst);
+ hlist_add_head_rcu(&pol->bydst, ndsttable + h);
h0 = h;
} else {
if (h != h0)
continue;
- hlist_del(&pol->bydst);
- hlist_add_behind(&pol->bydst, entry0);
+ hlist_del_rcu(&pol->bydst);
+ hlist_add_behind_rcu(&pol->bydst, entry0);
}
entry0 = &pol->bydst;
}
@@ -468,22 +477,32 @@ static void xfrm_bydst_resize(struct net *net, int dir)
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int nhashmask = xfrm_new_hash_mask(hmask);
unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
- struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
struct hlist_head *ndst = xfrm_hash_alloc(nsize);
+ struct hlist_head *odst;
int i;
if (!ndst)
return;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+ write_seqcount_begin(&xfrm_policy_hash_generation);
+
+ odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
+ lockdep_is_held(&net->xfrm.xfrm_policy_lock));
+
+ odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
+ lockdep_is_held(&net->xfrm.xfrm_policy_lock));
for (i = hmask; i >= 0; i--)
xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
- net->xfrm.policy_bydst[dir].table = ndst;
+ rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
net->xfrm.policy_bydst[dir].hmask = nhashmask;
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ write_seqcount_end(&xfrm_policy_hash_generation);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+
+ synchronize_rcu();
xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
}
@@ -500,7 +519,7 @@ static void xfrm_byidx_resize(struct net *net, int total)
if (!nidx)
return;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
for (i = hmask; i >= 0; i--)
xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
@@ -508,7 +527,7 @@ static void xfrm_byidx_resize(struct net *net, int total)
net->xfrm.policy_byidx = nidx;
net->xfrm.policy_idx_hmask = nhashmask;
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
}
@@ -541,7 +560,6 @@ static inline int xfrm_byidx_should_resize(struct net *net, int total)
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
{
- read_lock_bh(&net->xfrm.xfrm_policy_lock);
si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
@@ -550,7 +568,6 @@ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
si->spdhcnt = net->xfrm.policy_idx_hmask;
si->spdhmcnt = xfrm_policy_hashmax;
- read_unlock_bh(&net->xfrm.xfrm_policy_lock);
}
EXPORT_SYMBOL(xfrm_spd_getinfo);
@@ -600,7 +617,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
rbits6 = net->xfrm.policy_hthresh.rbits6;
} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
/* reset the bydst and inexact table in all directions */
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
@@ -646,7 +663,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
hlist_add_head(&policy->bydst, chain);
}
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
mutex_unlock(&hash_resize_mutex);
}
@@ -757,7 +774,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
struct hlist_head *chain;
struct hlist_node *newpos;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
delpol = NULL;
newpos = NULL;
@@ -768,7 +785,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
if (excl) {
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return -EEXIST;
}
delpol = pol;
@@ -804,7 +821,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
policy->curlft.use_time = 0;
if (!mod_timer(&policy->timer, jiffies + HZ))
xfrm_pol_hold(policy);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (delpol)
xfrm_policy_kill(delpol);
@@ -824,7 +841,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
struct hlist_head *chain;
*err = 0;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_bysel(net, sel, sel->family, dir);
ret = NULL;
hlist_for_each_entry(pol, chain, bydst) {
@@ -837,7 +854,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
*err = security_xfrm_policy_delete(
pol->security);
if (*err) {
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
__xfrm_policy_unlink(pol, dir);
@@ -846,7 +863,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
break;
}
}
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (ret && delete)
xfrm_policy_kill(ret);
@@ -865,7 +882,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
return NULL;
*err = 0;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = net->xfrm.policy_byidx + idx_hash(net, id);
ret = NULL;
hlist_for_each_entry(pol, chain, byidx) {
@@ -876,7 +893,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
*err = security_xfrm_policy_delete(
pol->security);
if (*err) {
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
__xfrm_policy_unlink(pol, dir);
@@ -885,7 +902,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
break;
}
}
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (ret && delete)
xfrm_policy_kill(ret);
@@ -943,7 +960,7 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
{
int dir, err = 0, cnt = 0;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
err = xfrm_policy_flush_secctx_check(net, type, task_valid);
if (err)
@@ -959,14 +976,14 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
if (pol->type != type)
continue;
__xfrm_policy_unlink(pol, dir);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
cnt++;
xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
goto again1;
}
@@ -978,13 +995,13 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
if (pol->type != type)
continue;
__xfrm_policy_unlink(pol, dir);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
cnt++;
xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
goto again2;
}
}
@@ -993,7 +1010,7 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
if (!cnt)
err = -ESRCH;
out:
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return err;
}
EXPORT_SYMBOL(xfrm_policy_flush);
@@ -1013,7 +1030,7 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
if (list_empty(&walk->walk.all) && walk->seq != 0)
return 0;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
if (list_empty(&walk->walk.all))
x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
else
@@ -1041,7 +1058,7 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
}
list_del_init(&walk->walk.all);
out:
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return error;
}
EXPORT_SYMBOL(xfrm_policy_walk);
@@ -1060,9 +1077,9 @@ void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
if (list_empty(&walk->walk.all))
return;
- write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
list_del(&walk->walk.all);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
}
EXPORT_SYMBOL(xfrm_policy_walk_done);
@@ -1100,17 +1117,24 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
struct xfrm_policy *pol, *ret;
const xfrm_address_t *daddr, *saddr;
struct hlist_head *chain;
- u32 priority = ~0U;
+ unsigned int sequence;
+ u32 priority;
daddr = xfrm_flowi_daddr(fl, family);
saddr = xfrm_flowi_saddr(fl, family);
if (unlikely(!daddr || !saddr))
return NULL;
- read_lock_bh(&net->xfrm.xfrm_policy_lock);
- chain = policy_hash_direct(net, daddr, saddr, family, dir);
+ rcu_read_lock();
+ retry:
+ do {
+ sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
+ chain = policy_hash_direct(net, daddr, saddr, family, dir);
+ } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
+
+ priority = ~0U;
ret = NULL;
- hlist_for_each_entry(pol, chain, bydst) {
+ hlist_for_each_entry_rcu(pol, chain, bydst) {
err = xfrm_policy_match(pol, fl, type, family, dir);
if (err) {
if (err == -ESRCH)
@@ -1126,7 +1150,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
}
}
chain = &net->xfrm.policy_inexact[dir];
- hlist_for_each_entry(pol, chain, bydst) {
+ hlist_for_each_entry_rcu(pol, chain, bydst) {
if ((pol->priority >= priority) && ret)
break;
@@ -1144,9 +1168,13 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
}
}
- xfrm_pol_hold(ret);
+ if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
+ goto retry;
+
+ if (ret && !xfrm_pol_hold_rcu(ret))
+ goto retry;
fail:
- read_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ rcu_read_unlock();
return ret;
}
@@ -1223,10 +1251,9 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
const struct flowi *fl)
{
struct xfrm_policy *pol;
- struct net *net = sock_net(sk);
rcu_read_lock();
- read_lock_bh(&net->xfrm.xfrm_policy_lock);
+ again:
pol = rcu_dereference(sk->sk_policy[dir]);
if (pol != NULL) {
bool match = xfrm_selector_match(&pol->selector, fl,
@@ -1241,8 +1268,8 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
err = security_xfrm_policy_lookup(pol->security,
fl->flowi_secid,
policy_to_flow_dir(dir));
- if (!err)
- xfrm_pol_hold(pol);
+ if (!err && !xfrm_pol_hold_rcu(pol))
+ goto again;
else if (err == -ESRCH)
pol = NULL;
else
@@ -1251,7 +1278,6 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
pol = NULL;
}
out:
- read_unlock_bh(&net->xfrm.xfrm_policy_lock);
rcu_read_unlock();
return pol;
}
@@ -1275,7 +1301,7 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
/* Socket policies are not hashed. */
if (!hlist_unhashed(&pol->bydst)) {
- hlist_del(&pol->bydst);
+ hlist_del_rcu(&pol->bydst);
hlist_del(&pol->byidx);
}
@@ -1299,9 +1325,9 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
{
struct net *net = xp_net(pol);
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
pol = __xfrm_policy_unlink(pol, dir);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (pol) {
xfrm_policy_kill(pol);
return 0;
@@ -1320,7 +1346,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
return -EINVAL;
#endif
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
old_pol = rcu_dereference_protected(sk->sk_policy[dir],
lockdep_is_held(&net->xfrm.xfrm_policy_lock));
if (pol) {
@@ -1338,7 +1364,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
*/
xfrm_sk_policy_unlink(old_pol, dir);
}
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (old_pol) {
xfrm_policy_kill(old_pol);
@@ -1368,9 +1394,9 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
newp->type = old->type;
memcpy(newp->xfrm_vec, old->xfrm_vec,
newp->xfrm_nr*sizeof(struct xfrm_tmpl));
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_sk_policy_link(newp, dir);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_pol_put(newp);
}
return newp;
@@ -3052,7 +3078,7 @@ static int __net_init xfrm_net_init(struct net *net)
/* Initialize the per-net locks here */
spin_lock_init(&net->xfrm.xfrm_state_lock);
- rwlock_init(&net->xfrm.xfrm_policy_lock);
+ spin_lock_init(&net->xfrm.xfrm_policy_lock);
mutex_init(&net->xfrm.xfrm_cfg_mutex);
return 0;
@@ -3086,6 +3112,7 @@ static struct pernet_operations __net_initdata xfrm_net_ops = {
void __init xfrm_init(void)
{
register_pernet_subsys(&xfrm_net_ops);
+ seqcount_init(&xfrm_policy_hash_generation);
xfrm_input_init();
}
@@ -3183,7 +3210,7 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
struct hlist_head *chain;
u32 priority = ~0U;
- read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
hlist_for_each_entry(pol, chain, bydst) {
if (xfrm_migrate_selector_match(sel, &pol->selector) &&
@@ -3207,7 +3234,7 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
xfrm_pol_hold(ret);
- read_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return ret;
}
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index 9c4fbd8935f4..ba2b539879bc 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -50,12 +50,18 @@ static const struct snmp_mib xfrm_mib_list[] = {
static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
{
+ unsigned long buff[LINUX_MIB_XFRMMAX];
struct net *net = seq->private;
int i;
+
+ memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
+
+ snmp_get_cpu_field_batch(buff, xfrm_mib_list,
+ net->mib.xfrm_statistics);
for (i = 0; xfrm_mib_list[i].name; i++)
seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
- snmp_fold_field(net->mib.xfrm_statistics,
- xfrm_mib_list[i].entry));
+ buff[i]);
+
return 0;
}
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 4fd725a0c500..cdc2e2e71bff 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -558,7 +558,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
x->repl->notify(x, XFRM_REPLAY_UPDATE);
}
-static struct xfrm_replay xfrm_replay_legacy = {
+static const struct xfrm_replay xfrm_replay_legacy = {
.advance = xfrm_replay_advance,
.check = xfrm_replay_check,
.recheck = xfrm_replay_check,
@@ -566,7 +566,7 @@ static struct xfrm_replay xfrm_replay_legacy = {
.overflow = xfrm_replay_overflow,
};
-static struct xfrm_replay xfrm_replay_bmp = {
+static const struct xfrm_replay xfrm_replay_bmp = {
.advance = xfrm_replay_advance_bmp,
.check = xfrm_replay_check_bmp,
.recheck = xfrm_replay_check_bmp,
@@ -574,7 +574,7 @@ static struct xfrm_replay xfrm_replay_bmp = {
.overflow = xfrm_replay_overflow_bmp,
};
-static struct xfrm_replay xfrm_replay_esn = {
+static const struct xfrm_replay xfrm_replay_esn = {
.advance = xfrm_replay_advance_esn,
.check = xfrm_replay_check_esn,
.recheck = xfrm_replay_recheck_esn,
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index a30f898dc1c5..419bf5d463bd 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -28,6 +28,11 @@
#include "xfrm_hash.h"
+#define xfrm_state_deref_prot(table, net) \
+ rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
+
+static void xfrm_state_gc_task(struct work_struct *work);
+
/* Each xfrm_state may be linked to two tables:
1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
@@ -36,6 +41,15 @@
*/
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
+static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
+
+static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
+static HLIST_HEAD(xfrm_state_gc_list);
+
+static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
+{
+ return atomic_inc_not_zero(&x->refcnt);
+}
static inline unsigned int xfrm_dst_hash(struct net *net,
const xfrm_address_t *daddr,
@@ -76,18 +90,18 @@ static void xfrm_hash_transfer(struct hlist_head *list,
h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
x->props.reqid, x->props.family,
nhashmask);
- hlist_add_head(&x->bydst, ndsttable+h);
+ hlist_add_head_rcu(&x->bydst, ndsttable + h);
h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
x->props.family,
nhashmask);
- hlist_add_head(&x->bysrc, nsrctable+h);
+ hlist_add_head_rcu(&x->bysrc, nsrctable + h);
if (x->id.spi) {
h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
x->id.proto, x->props.family,
nhashmask);
- hlist_add_head(&x->byspi, nspitable+h);
+ hlist_add_head_rcu(&x->byspi, nspitable + h);
}
}
}
@@ -122,25 +136,29 @@ static void xfrm_hash_resize(struct work_struct *work)
}
spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ write_seqcount_begin(&xfrm_state_hash_generation);
nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
+ odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
for (i = net->xfrm.state_hmask; i >= 0; i--)
- xfrm_hash_transfer(net->xfrm.state_bydst+i, ndst, nsrc, nspi,
- nhashmask);
+ xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
- odst = net->xfrm.state_bydst;
- osrc = net->xfrm.state_bysrc;
- ospi = net->xfrm.state_byspi;
+ osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
+ ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
ohashmask = net->xfrm.state_hmask;
- net->xfrm.state_bydst = ndst;
- net->xfrm.state_bysrc = nsrc;
- net->xfrm.state_byspi = nspi;
+ rcu_assign_pointer(net->xfrm.state_bydst, ndst);
+ rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
+ rcu_assign_pointer(net->xfrm.state_byspi, nspi);
net->xfrm.state_hmask = nhashmask;
+ write_seqcount_end(&xfrm_state_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
osize = (ohashmask + 1) * sizeof(struct hlist_head);
+
+ synchronize_rcu();
+
xfrm_hash_free(odst, osize);
xfrm_hash_free(osrc, osize);
xfrm_hash_free(ospi, osize);
@@ -356,15 +374,16 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
static void xfrm_state_gc_task(struct work_struct *work)
{
- struct net *net = container_of(work, struct net, xfrm.state_gc_work);
struct xfrm_state *x;
struct hlist_node *tmp;
struct hlist_head gc_list;
spin_lock_bh(&xfrm_state_gc_lock);
- hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
+ hlist_move_list(&xfrm_state_gc_list, &gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
+ synchronize_rcu();
+
hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
xfrm_state_gc_destroy(x);
}
@@ -501,14 +520,12 @@ EXPORT_SYMBOL(xfrm_state_alloc);
void __xfrm_state_destroy(struct xfrm_state *x)
{
- struct net *net = xs_net(x);
-
WARN_ON(x->km.state != XFRM_STATE_DEAD);
spin_lock_bh(&xfrm_state_gc_lock);
- hlist_add_head(&x->gclist, &net->xfrm.state_gc_list);
+ hlist_add_head(&x->gclist, &xfrm_state_gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
- schedule_work(&net->xfrm.state_gc_work);
+ schedule_work(&xfrm_state_gc_work);
}
EXPORT_SYMBOL(__xfrm_state_destroy);
@@ -521,10 +538,10 @@ int __xfrm_state_delete(struct xfrm_state *x)
x->km.state = XFRM_STATE_DEAD;
spin_lock(&net->xfrm.xfrm_state_lock);
list_del(&x->km.all);
- hlist_del(&x->bydst);
- hlist_del(&x->bysrc);
+ hlist_del_rcu(&x->bydst);
+ hlist_del_rcu(&x->bysrc);
if (x->id.spi)
- hlist_del(&x->byspi);
+ hlist_del_rcu(&x->byspi);
net->xfrm.state_num--;
spin_unlock(&net->xfrm.xfrm_state_lock);
@@ -660,7 +677,7 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
struct xfrm_state *x;
- hlist_for_each_entry(x, net->xfrm.state_byspi+h, byspi) {
+ hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
if (x->props.family != family ||
x->id.spi != spi ||
x->id.proto != proto ||
@@ -669,7 +686,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
if ((mark & x->mark.m) != x->mark.v)
continue;
- xfrm_state_hold(x);
+ if (!xfrm_state_hold_rcu(x))
+ continue;
return x;
}
@@ -684,7 +702,7 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
struct xfrm_state *x;
- hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
+ hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
if (x->props.family != family ||
x->id.proto != proto ||
!xfrm_addr_equal(&x->id.daddr, daddr, family) ||
@@ -693,7 +711,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
if ((mark & x->mark.m) != x->mark.v)
continue;
- xfrm_state_hold(x);
+ if (!xfrm_state_hold_rcu(x))
+ continue;
return x;
}
@@ -776,13 +795,16 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
struct xfrm_state *best = NULL;
u32 mark = pol->mark.v & pol->mark.m;
unsigned short encap_family = tmpl->encap_family;
+ unsigned int sequence;
struct km_event c;
to_put = NULL;
- spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ sequence = read_seqcount_begin(&xfrm_state_hash_generation);
+
+ rcu_read_lock();
h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
- hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
+ hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
@@ -798,7 +820,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
goto found;
h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
- hlist_for_each_entry(x, net->xfrm.state_bydst+h_wildcard, bydst) {
+ hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
@@ -851,19 +873,21 @@ found:
}
if (km_query(x, tmpl, pol) == 0) {
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
x->km.state = XFRM_STATE_ACQ;
list_add(&x->km.all, &net->xfrm.state_all);
- hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
+ hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
h = xfrm_src_hash(net, daddr, saddr, encap_family);
- hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
+ hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
if (x->id.spi) {
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
- hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
+ hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
} else {
x->km.state = XFRM_STATE_DEAD;
to_put = x;
@@ -872,13 +896,26 @@ found:
}
}
out:
- if (x)
- xfrm_state_hold(x);
- else
+ if (x) {
+ if (!xfrm_state_hold_rcu(x)) {
+ *err = -EAGAIN;
+ x = NULL;
+ }
+ } else {
*err = acquire_in_progress ? -EAGAIN : error;
- spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ }
+ rcu_read_unlock();
if (to_put)
xfrm_state_put(to_put);
+
+ if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
+ *err = -EAGAIN;
+ if (x) {
+ xfrm_state_put(x);
+ x = NULL;
+ }
+ }
+
return x;
}
@@ -946,16 +983,16 @@ static void __xfrm_state_insert(struct xfrm_state *x)
h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
x->props.reqid, x->props.family);
- hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
+ hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
- hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
+ hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
if (x->id.spi) {
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
x->props.family);
- hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
+ hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
@@ -1064,9 +1101,9 @@ static struct xfrm_state *__find_acq_core(struct net *net,
xfrm_state_hold(x);
tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
list_add(&x->km.all, &net->xfrm.state_all);
- hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
+ hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
h = xfrm_src_hash(net, daddr, saddr, family);
- hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
+ hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
net->xfrm.state_num++;
@@ -1395,9 +1432,9 @@ xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32
{
struct xfrm_state *x;
- spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ rcu_read_lock();
x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
- spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ rcu_read_unlock();
return x;
}
EXPORT_SYMBOL(xfrm_state_lookup);
@@ -1582,7 +1619,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
if (x->id.spi) {
spin_lock_bh(&net->xfrm.xfrm_state_lock);
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
- hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
+ hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = 0;
@@ -2100,8 +2137,6 @@ int __net_init xfrm_state_init(struct net *net)
net->xfrm.state_num = 0;
INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
- INIT_HLIST_HEAD(&net->xfrm.state_gc_list);
- INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task);
spin_lock_init(&net->xfrm.xfrm_state_lock);
return 0;
@@ -2119,7 +2154,7 @@ void xfrm_state_fini(struct net *net)
flush_work(&net->xfrm.state_hash_work);
xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
- flush_work(&net->xfrm.state_gc_work);
+ flush_work(&xfrm_state_gc_work);
WARN_ON(!list_empty(&net->xfrm.state_all));
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 05a6e3d9c258..35a7e794ad04 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -17,13 +17,13 @@ static struct ctl_table xfrm_table[] = {
.procname = "xfrm_aevent_etime",
.maxlen = sizeof(u32),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_douintvec
},
{
.procname = "xfrm_aevent_rseqth",
.maxlen = sizeof(u32),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_douintvec
},
{
.procname = "xfrm_larval_drop",
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 90ebf7d35c07..12b7304d55dc 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -24,6 +24,9 @@ hostprogs-y += test_overhead
hostprogs-y += test_cgrp2_array_pin
hostprogs-y += xdp1
hostprogs-y += xdp2
+hostprogs-y += test_current_task_under_cgroup
+hostprogs-y += trace_event
+hostprogs-y += sampleip
test_verifier-objs := test_verifier.o libbpf.o
test_maps-objs := test_maps.o libbpf.o
@@ -49,6 +52,10 @@ test_cgrp2_array_pin-objs := libbpf.o test_cgrp2_array_pin.o
xdp1-objs := bpf_load.o libbpf.o xdp1_user.o
# reuse xdp1 source intentionally
xdp2-objs := bpf_load.o libbpf.o xdp1_user.o
+test_current_task_under_cgroup-objs := bpf_load.o libbpf.o \
+ test_current_task_under_cgroup_user.o
+trace_event-objs := bpf_load.o libbpf.o trace_event_user.o
+sampleip-objs := bpf_load.o libbpf.o sampleip_user.o
# Tell kbuild to always build the programs
always := $(hostprogs-y)
@@ -64,6 +71,7 @@ always += tracex6_kern.o
always += test_probe_write_user_kern.o
always += trace_output_kern.o
always += tcbpf1_kern.o
+always += tcbpf2_kern.o
always += lathist_kern.o
always += offwaketime_kern.o
always += spintest_kern.o
@@ -74,6 +82,9 @@ always += parse_varlen.o parse_simple.o parse_ldabs.o
always += test_cgrp2_tc_kern.o
always += xdp1_kern.o
always += xdp2_kern.o
+always += test_current_task_under_cgroup_kern.o
+always += trace_event_kern.o
+always += sampleip_kern.o
HOSTCFLAGS += -I$(objtree)/usr/include
@@ -97,6 +108,9 @@ HOSTLOADLIBES_map_perf_test += -lelf -lrt
HOSTLOADLIBES_test_overhead += -lelf -lrt
HOSTLOADLIBES_xdp1 += -lelf
HOSTLOADLIBES_xdp2 += -lelf
+HOSTLOADLIBES_test_current_task_under_cgroup += -lelf
+HOSTLOADLIBES_trace_event += -lelf
+HOSTLOADLIBES_sampleip += -lelf
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 7927a090fa0d..90f44bd2045e 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -37,12 +37,26 @@ static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
(void *) BPF_FUNC_clone_redirect;
static int (*bpf_redirect)(int ifindex, int flags) =
(void *) BPF_FUNC_redirect;
-static int (*bpf_perf_event_output)(void *ctx, void *map, int index, void *data, int size) =
+static int (*bpf_perf_event_output)(void *ctx, void *map,
+ unsigned long long flags, void *data,
+ int size) =
(void *) BPF_FUNC_perf_event_output;
static int (*bpf_get_stackid)(void *ctx, void *map, int flags) =
(void *) BPF_FUNC_get_stackid;
static int (*bpf_probe_write_user)(void *dst, void *src, int size) =
(void *) BPF_FUNC_probe_write_user;
+static int (*bpf_current_task_under_cgroup)(void *map, int index) =
+ (void *) BPF_FUNC_current_task_under_cgroup;
+static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) =
+ (void *) BPF_FUNC_skb_get_tunnel_key;
+static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) =
+ (void *) BPF_FUNC_skb_set_tunnel_key;
+static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) =
+ (void *) BPF_FUNC_skb_get_tunnel_opt;
+static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) =
+ (void *) BPF_FUNC_skb_set_tunnel_opt;
+static unsigned long long (*bpf_get_prandom_u32)(void) =
+ (void *) BPF_FUNC_get_prandom_u32;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 0cfda2320320..97913e109b14 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -51,6 +51,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0;
bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0;
bool is_xdp = strncmp(event, "xdp", 3) == 0;
+ bool is_perf_event = strncmp(event, "perf_event", 10) == 0;
enum bpf_prog_type prog_type;
char buf[256];
int fd, efd, err, id;
@@ -69,6 +70,8 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
prog_type = BPF_PROG_TYPE_TRACEPOINT;
} else if (is_xdp) {
prog_type = BPF_PROG_TYPE_XDP;
+ } else if (is_perf_event) {
+ prog_type = BPF_PROG_TYPE_PERF_EVENT;
} else {
printf("Unknown event '%s'\n", event);
return -1;
@@ -82,7 +85,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
prog_fd[prog_cnt++] = fd;
- if (is_xdp)
+ if (is_xdp || is_perf_event)
return 0;
if (is_socket) {
@@ -326,6 +329,7 @@ int load_bpf_file(char *path)
memcmp(shname_prog, "kretprobe/", 10) == 0 ||
memcmp(shname_prog, "tracepoint/", 11) == 0 ||
memcmp(shname_prog, "xdp", 3) == 0 ||
+ memcmp(shname_prog, "perf_event", 10) == 0 ||
memcmp(shname_prog, "socket", 6) == 0)
load_and_attach(shname_prog, insns, data_prog->d_size);
}
@@ -344,6 +348,7 @@ int load_bpf_file(char *path)
memcmp(shname, "kretprobe/", 10) == 0 ||
memcmp(shname, "tracepoint/", 11) == 0 ||
memcmp(shname, "xdp", 3) == 0 ||
+ memcmp(shname, "perf_event", 10) == 0 ||
memcmp(shname, "socket", 6) == 0)
load_and_attach(shname, data->d_buf, data->d_size);
}
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index 364582b77888..ac6edb61b64a 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -85,6 +85,14 @@ extern char bpf_log_buf[LOG_BUF_SIZE];
.off = 0, \
.imm = IMM })
+#define BPF_MOV32_IMM(DST, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM) \
BPF_LD_IMM64_RAW(DST, 0, IMM)
diff --git a/samples/bpf/sampleip_kern.c b/samples/bpf/sampleip_kern.c
new file mode 100644
index 000000000000..774a681f374a
--- /dev/null
+++ b/samples/bpf/sampleip_kern.c
@@ -0,0 +1,38 @@
+/* Copyright 2016 Netflix, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/bpf_perf_event.h>
+#include "bpf_helpers.h"
+
+#define MAX_IPS 8192
+
+struct bpf_map_def SEC("maps") ip_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(u64),
+ .value_size = sizeof(u32),
+ .max_entries = MAX_IPS,
+};
+
+SEC("perf_event")
+int do_sample(struct bpf_perf_event_data *ctx)
+{
+ u64 ip;
+ u32 *value, init_val = 1;
+
+ ip = ctx->regs.ip;
+ value = bpf_map_lookup_elem(&ip_map, &ip);
+ if (value)
+ *value += 1;
+ else
+ /* E2BIG not tested for this example only */
+ bpf_map_update_elem(&ip_map, &ip, &init_val, BPF_NOEXIST);
+
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sampleip_user.c b/samples/bpf/sampleip_user.c
new file mode 100644
index 000000000000..260a6bdd6413
--- /dev/null
+++ b/samples/bpf/sampleip_user.c
@@ -0,0 +1,196 @@
+/*
+ * sampleip: sample instruction pointer and frequency count in a BPF map.
+ *
+ * Copyright 2016 Netflix, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <assert.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include <sys/ioctl.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define DEFAULT_FREQ 99
+#define DEFAULT_SECS 5
+#define MAX_IPS 8192
+#define PAGE_OFFSET 0xffff880000000000
+
+static int nr_cpus;
+
+static void usage(void)
+{
+ printf("USAGE: sampleip [-F freq] [duration]\n");
+ printf(" -F freq # sample frequency (Hertz), default 99\n");
+ printf(" duration # sampling duration (seconds), default 5\n");
+}
+
+static int sampling_start(int *pmu_fd, int freq)
+{
+ int i;
+
+ struct perf_event_attr pe_sample_attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .freq = 1,
+ .sample_period = freq,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ .inherit = 1,
+ };
+
+ for (i = 0; i < nr_cpus; i++) {
+ pmu_fd[i] = perf_event_open(&pe_sample_attr, -1 /* pid */, i,
+ -1 /* group_fd */, 0 /* flags */);
+ if (pmu_fd[i] < 0) {
+ fprintf(stderr, "ERROR: Initializing perf sampling\n");
+ return 1;
+ }
+ assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF,
+ prog_fd[0]) == 0);
+ assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0) == 0);
+ }
+
+ return 0;
+}
+
+static void sampling_end(int *pmu_fd)
+{
+ int i;
+
+ for (i = 0; i < nr_cpus; i++)
+ close(pmu_fd[i]);
+}
+
+struct ipcount {
+ __u64 ip;
+ __u32 count;
+};
+
+/* used for sorting */
+struct ipcount counts[MAX_IPS];
+
+static int count_cmp(const void *p1, const void *p2)
+{
+ return ((struct ipcount *)p1)->count - ((struct ipcount *)p2)->count;
+}
+
+static void print_ip_map(int fd)
+{
+ struct ksym *sym;
+ __u64 key, next_key;
+ __u32 value;
+ int i, max;
+
+ printf("%-19s %-32s %s\n", "ADDR", "KSYM", "COUNT");
+
+ /* fetch IPs and counts */
+ key = 0, i = 0;
+ while (bpf_get_next_key(fd, &key, &next_key) == 0) {
+ bpf_lookup_elem(fd, &next_key, &value);
+ counts[i].ip = next_key;
+ counts[i++].count = value;
+ key = next_key;
+ }
+ max = i;
+
+ /* sort and print */
+ qsort(counts, max, sizeof(struct ipcount), count_cmp);
+ for (i = 0; i < max; i++) {
+ if (counts[i].ip > PAGE_OFFSET) {
+ sym = ksym_search(counts[i].ip);
+ printf("0x%-17llx %-32s %u\n", counts[i].ip, sym->name,
+ counts[i].count);
+ } else {
+ printf("0x%-17llx %-32s %u\n", counts[i].ip, "(user)",
+ counts[i].count);
+ }
+ }
+
+ if (max == MAX_IPS) {
+ printf("WARNING: IP hash was full (max %d entries); ", max);
+ printf("may have dropped samples\n");
+ }
+}
+
+static void int_exit(int sig)
+{
+ printf("\n");
+ print_ip_map(map_fd[0]);
+ exit(0);
+}
+
+int main(int argc, char **argv)
+{
+ char filename[256];
+ int *pmu_fd, opt, freq = DEFAULT_FREQ, secs = DEFAULT_SECS;
+
+ /* process arguments */
+ while ((opt = getopt(argc, argv, "F:h")) != -1) {
+ switch (opt) {
+ case 'F':
+ freq = atoi(optarg);
+ break;
+ case 'h':
+ default:
+ usage();
+ return 0;
+ }
+ }
+ if (argc - optind == 1)
+ secs = atoi(argv[optind]);
+ if (freq == 0 || secs == 0) {
+ usage();
+ return 1;
+ }
+
+ /* initialize kernel symbol translation */
+ if (load_kallsyms()) {
+ fprintf(stderr, "ERROR: loading /proc/kallsyms\n");
+ return 2;
+ }
+
+ /* create perf FDs for each CPU */
+ nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ pmu_fd = malloc(nr_cpus * sizeof(int));
+ if (pmu_fd == NULL) {
+ fprintf(stderr, "ERROR: malloc of pmu_fd\n");
+ return 1;
+ }
+
+ /* load BPF program */
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ if (load_bpf_file(filename)) {
+ fprintf(stderr, "ERROR: loading BPF program (errno %d):\n",
+ errno);
+ if (strcmp(bpf_log_buf, "") == 0)
+ fprintf(stderr, "Try: ulimit -l unlimited\n");
+ else
+ fprintf(stderr, "%s", bpf_log_buf);
+ return 1;
+ }
+ signal(SIGINT, int_exit);
+
+ /* do sampling */
+ printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n",
+ freq, secs);
+ if (sampling_start(pmu_fd, freq) != 0)
+ return 1;
+ sleep(secs);
+ sampling_end(pmu_fd);
+ free(pmu_fd);
+
+ /* output sample counts */
+ print_ip_map(map_fd[0]);
+
+ return 0;
+}
diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c
index ba0e177ff561..44e5846c988f 100644
--- a/samples/bpf/sockex2_kern.c
+++ b/samples/bpf/sockex2_kern.c
@@ -14,7 +14,7 @@ struct vlan_hdr {
__be16 h_vlan_encapsulated_proto;
};
-struct flow_keys {
+struct bpf_flow_keys {
__be32 src;
__be32 dst;
union {
@@ -59,7 +59,7 @@ static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
}
static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
- struct flow_keys *flow)
+ struct bpf_flow_keys *flow)
{
__u64 verlen;
@@ -83,7 +83,7 @@ static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto
}
static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
- struct flow_keys *flow)
+ struct bpf_flow_keys *flow)
{
*ip_proto = load_byte(skb,
nhoff + offsetof(struct ipv6hdr, nexthdr));
@@ -96,7 +96,7 @@ static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_pro
return nhoff;
}
-static inline bool flow_dissector(struct __sk_buff *skb, struct flow_keys *flow)
+static inline bool flow_dissector(struct __sk_buff *skb, struct bpf_flow_keys *flow)
{
__u64 nhoff = ETH_HLEN;
__u64 ip_proto;
@@ -198,7 +198,7 @@ struct bpf_map_def SEC("maps") hash_map = {
SEC("socket2")
int bpf_prog2(struct __sk_buff *skb)
{
- struct flow_keys flow;
+ struct bpf_flow_keys flow;
struct pair *value;
u32 key;
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c
index 41ae2fd21b13..95907f8d2b17 100644
--- a/samples/bpf/sockex3_kern.c
+++ b/samples/bpf/sockex3_kern.c
@@ -61,7 +61,7 @@ struct vlan_hdr {
__be16 h_vlan_encapsulated_proto;
};
-struct flow_keys {
+struct bpf_flow_keys {
__be32 src;
__be32 dst;
union {
@@ -88,7 +88,7 @@ static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
}
struct globals {
- struct flow_keys flow;
+ struct bpf_flow_keys flow;
};
struct bpf_map_def SEC("maps") percpu_map = {
@@ -114,14 +114,14 @@ struct pair {
struct bpf_map_def SEC("maps") hash_map = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(struct flow_keys),
+ .key_size = sizeof(struct bpf_flow_keys),
.value_size = sizeof(struct pair),
.max_entries = 1024,
};
static void update_stats(struct __sk_buff *skb, struct globals *g)
{
- struct flow_keys key = g->flow;
+ struct bpf_flow_keys key = g->flow;
struct pair *value;
value = bpf_map_lookup_elem(&hash_map, &key);
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index d4184ab5f3ac..3fcfd8c4b2a3 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -7,7 +7,7 @@
#include <arpa/inet.h>
#include <sys/resource.h>
-struct flow_keys {
+struct bpf_flow_keys {
__be32 src;
__be32 dst;
union {
@@ -49,7 +49,7 @@ int main(int argc, char **argv)
(void) f;
for (i = 0; i < 5; i++) {
- struct flow_keys key = {}, next_key;
+ struct bpf_flow_keys key = {}, next_key;
struct pair value;
sleep(1);
diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c
new file mode 100644
index 000000000000..3303bb85593b
--- /dev/null
+++ b/samples/bpf/tcbpf2_kern.c
@@ -0,0 +1,381 @@
+/* Copyright (c) 2016 VMware
+ * Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/filter.h>
+#include <uapi/linux/pkt_cls.h>
+#include <net/ipv6.h>
+#include "bpf_helpers.h"
+
+#define _htonl __builtin_bswap32
+#define ERROR(ret) do {\
+ char fmt[] = "ERROR line:%d ret:%d\n";\
+ bpf_trace_printk(fmt, sizeof(fmt), __LINE__, ret); \
+ } while(0)
+
+struct geneve_opt {
+ __be16 opt_class;
+ u8 type;
+ u8 length:5;
+ u8 r3:1;
+ u8 r2:1;
+ u8 r1:1;
+ u8 opt_data[8]; /* hard-coded to 8 byte */
+};
+
+struct vxlan_metadata {
+ u32 gbp;
+};
+
+SEC("gre_set_tunnel")
+int _gre_set_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ key.tunnel_id = 2;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_ZERO_CSUM_TX);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("gre_get_tunnel")
+int _gre_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ char fmt[] = "key %d remote ip 0x%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt), key.tunnel_id, key.remote_ipv4);
+ return TC_ACT_OK;
+}
+
+SEC("vxlan_set_tunnel")
+int _vxlan_set_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ struct vxlan_metadata md;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ key.tunnel_id = 2;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_ZERO_CSUM_TX);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ md.gbp = 0x800FF; /* Set VXLAN Group Policy extension */
+ ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("vxlan_get_tunnel")
+int _vxlan_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ struct vxlan_metadata md;
+ char fmt[] = "key %d remote ip 0x%x vxlan gbp 0x%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt),
+ key.tunnel_id, key.remote_ipv4, md.gbp);
+
+ return TC_ACT_OK;
+}
+
+SEC("geneve_set_tunnel")
+int _geneve_set_tunnel(struct __sk_buff *skb)
+{
+ int ret, ret2;
+ struct bpf_tunnel_key key;
+ struct geneve_opt gopt;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ key.tunnel_id = 2;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+ __builtin_memset(&gopt, 0x0, sizeof(gopt));
+ gopt.opt_class = 0x102; /* Open Virtual Networking (OVN) */
+ gopt.type = 0x08;
+ gopt.r1 = 1;
+ gopt.r2 = 0;
+ gopt.r3 = 1;
+ gopt.length = 2; /* 4-byte multiple */
+ *(int *) &gopt.opt_data = 0xdeadbeef;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_ZERO_CSUM_TX);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("geneve_get_tunnel")
+int _geneve_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ struct geneve_opt gopt;
+ char fmt[] = "key %d remote ip 0x%x geneve class 0x%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt),
+ key.tunnel_id, key.remote_ipv4, gopt.opt_class);
+ return TC_ACT_OK;
+}
+
+SEC("ipip_set_tunnel")
+int _ipip_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key = {};
+ void *data = (void *)(long)skb->data;
+ struct iphdr *iph = data;
+ struct tcphdr *tcp = data + sizeof(*iph);
+ void *data_end = (void *)(long)skb->data_end;
+ int ret;
+
+ /* single length check */
+ if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+ ERROR(1);
+ return TC_ACT_SHOT;
+ }
+
+ key.tunnel_ttl = 64;
+ if (iph->protocol == IPPROTO_ICMP) {
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ } else {
+ if (iph->protocol != IPPROTO_TCP || iph->ihl != 5)
+ return TC_ACT_SHOT;
+
+ if (tcp->dest == htons(5200))
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ else if (tcp->dest == htons(5201))
+ key.remote_ipv4 = 0xac100165; /* 172.16.1.101 */
+ else
+ return TC_ACT_SHOT;
+ }
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("ipip_get_tunnel")
+int _ipip_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ char fmt[] = "remote ip 0x%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt), key.remote_ipv4);
+ return TC_ACT_OK;
+}
+
+SEC("ipip6_set_tunnel")
+int _ipip6_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key = {};
+ void *data = (void *)(long)skb->data;
+ struct iphdr *iph = data;
+ struct tcphdr *tcp = data + sizeof(*iph);
+ void *data_end = (void *)(long)skb->data_end;
+ int ret;
+
+ /* single length check */
+ if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+ ERROR(1);
+ return TC_ACT_SHOT;
+ }
+
+ key.remote_ipv6[0] = _htonl(0x2401db00);
+ key.tunnel_ttl = 64;
+
+ if (iph->protocol == IPPROTO_ICMP) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else {
+ if (iph->protocol != IPPROTO_TCP || iph->ihl != 5) {
+ ERROR(iph->protocol);
+ return TC_ACT_SHOT;
+ }
+
+ if (tcp->dest == htons(5200)) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else if (tcp->dest == htons(5201)) {
+ key.remote_ipv6[3] = _htonl(2);
+ } else {
+ ERROR(tcp->dest);
+ return TC_ACT_SHOT;
+ }
+ }
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("ipip6_get_tunnel")
+int _ipip6_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ char fmt[] = "remote ip6 %x::%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt), _htonl(key.remote_ipv6[0]),
+ _htonl(key.remote_ipv6[3]));
+ return TC_ACT_OK;
+}
+
+SEC("ip6ip6_set_tunnel")
+int _ip6ip6_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key = {};
+ void *data = (void *)(long)skb->data;
+ struct ipv6hdr *iph = data;
+ struct tcphdr *tcp = data + sizeof(*iph);
+ void *data_end = (void *)(long)skb->data_end;
+ int ret;
+
+ /* single length check */
+ if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+ ERROR(1);
+ return TC_ACT_SHOT;
+ }
+
+ key.remote_ipv6[0] = _htonl(0x2401db00);
+ key.tunnel_ttl = 64;
+
+ if (iph->nexthdr == NEXTHDR_ICMP) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else {
+ if (iph->nexthdr != NEXTHDR_TCP) {
+ ERROR(iph->nexthdr);
+ return TC_ACT_SHOT;
+ }
+
+ if (tcp->dest == htons(5200)) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else if (tcp->dest == htons(5201)) {
+ key.remote_ipv6[3] = _htonl(2);
+ } else {
+ ERROR(tcp->dest);
+ return TC_ACT_SHOT;
+ }
+ }
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("ip6ip6_get_tunnel")
+int _ip6ip6_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ char fmt[] = "remote ip6 %x::%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt), _htonl(key.remote_ipv6[0]),
+ _htonl(key.remote_ipv6[3]));
+ return TC_ACT_OK;
+}
+
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_current_task_under_cgroup_kern.c b/samples/bpf/test_current_task_under_cgroup_kern.c
new file mode 100644
index 000000000000..86b28d7d6c99
--- /dev/null
+++ b/samples/bpf/test_current_task_under_cgroup_kern.c
@@ -0,0 +1,43 @@
+/* Copyright (c) 2016 Sargun Dhillon <sargun@sargun.me>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+#include <uapi/linux/utsname.h>
+
+struct bpf_map_def SEC("maps") cgroup_map = {
+ .type = BPF_MAP_TYPE_CGROUP_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u32),
+ .max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") perf_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u64),
+ .max_entries = 1,
+};
+
+/* Writes the last PID that called sync to a map at index 0 */
+SEC("kprobe/sys_sync")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ u64 pid = bpf_get_current_pid_tgid();
+ int idx = 0;
+
+ if (!bpf_current_task_under_cgroup(&cgroup_map, 0))
+ return 0;
+
+ bpf_map_update_elem(&perf_map, &idx, &pid, BPF_ANY);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c
new file mode 100644
index 000000000000..30b0bce884f9
--- /dev/null
+++ b/samples/bpf/test_current_task_under_cgroup_user.c
@@ -0,0 +1,145 @@
+/* Copyright (c) 2016 Sargun Dhillon <sargun@sargun.me>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <linux/bpf.h>
+#include <unistd.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+#include <string.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <linux/bpf.h>
+#include <sched.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <linux/limits.h>
+
+#define CGROUP_MOUNT_PATH "/mnt"
+#define CGROUP_PATH "/mnt/my-cgroup"
+
+#define clean_errno() (errno == 0 ? "None" : strerror(errno))
+#define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
+ __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
+
+static int join_cgroup(char *path)
+{
+ int fd, rc = 0;
+ pid_t pid = getpid();
+ char cgroup_path[PATH_MAX + 1];
+
+ snprintf(cgroup_path, sizeof(cgroup_path), "%s/cgroup.procs", path);
+
+ fd = open(cgroup_path, O_WRONLY);
+ if (fd < 0) {
+ log_err("Opening Cgroup");
+ return 1;
+ }
+
+ if (dprintf(fd, "%d\n", pid) < 0) {
+ log_err("Joining Cgroup");
+ rc = 1;
+ }
+ close(fd);
+ return rc;
+}
+
+int main(int argc, char **argv)
+{
+ char filename[256];
+ int cg2, idx = 0;
+ pid_t remote_pid, local_pid = getpid();
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ /*
+ * This is to avoid interfering with existing cgroups. Unfortunately,
+ * most people don't have cgroupv2 enabled at this point in time.
+ * It's easier to create our own mount namespace and manage it
+ * ourselves.
+ */
+ if (unshare(CLONE_NEWNS)) {
+ log_err("unshare");
+ return 1;
+ }
+
+ if (mount("none", "/", NULL, MS_REC | MS_PRIVATE, NULL)) {
+ log_err("mount fakeroot");
+ return 1;
+ }
+
+ if (mount("none", CGROUP_MOUNT_PATH, "cgroup2", 0, NULL)) {
+ log_err("mount cgroup2");
+ return 1;
+ }
+
+ if (mkdir(CGROUP_PATH, 0777) && errno != EEXIST) {
+ log_err("mkdir cgroup");
+ return 1;
+ }
+
+ cg2 = open(CGROUP_PATH, O_RDONLY);
+ if (cg2 < 0) {
+ log_err("opening target cgroup");
+ goto cleanup_cgroup_err;
+ }
+
+ if (bpf_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
+ log_err("Adding target cgroup to map");
+ goto cleanup_cgroup_err;
+ }
+ if (join_cgroup("/mnt/my-cgroup")) {
+ log_err("Leaving target cgroup");
+ goto cleanup_cgroup_err;
+ }
+
+ /*
+ * The installed helper program catched the sync call, and should
+ * write it to the map.
+ */
+
+ sync();
+ bpf_lookup_elem(map_fd[1], &idx, &remote_pid);
+
+ if (local_pid != remote_pid) {
+ fprintf(stderr,
+ "BPF Helper didn't write correct PID to map, but: %d\n",
+ remote_pid);
+ goto leave_cgroup_err;
+ }
+
+ /* Verify the negative scenario; leave the cgroup */
+ if (join_cgroup(CGROUP_MOUNT_PATH))
+ goto leave_cgroup_err;
+
+ remote_pid = 0;
+ bpf_update_elem(map_fd[1], &idx, &remote_pid, BPF_ANY);
+
+ sync();
+ bpf_lookup_elem(map_fd[1], &idx, &remote_pid);
+
+ if (local_pid == remote_pid) {
+ fprintf(stderr, "BPF cgroup negative test did not work\n");
+ goto cleanup_cgroup_err;
+ }
+
+ rmdir(CGROUP_PATH);
+ return 0;
+
+ /* Error condition, cleanup */
+leave_cgroup_err:
+ join_cgroup(CGROUP_MOUNT_PATH);
+cleanup_cgroup_err:
+ rmdir(CGROUP_PATH);
+ return 1;
+}
diff --git a/samples/bpf/test_ipip.sh b/samples/bpf/test_ipip.sh
new file mode 100755
index 000000000000..196925403ab4
--- /dev/null
+++ b/samples/bpf/test_ipip.sh
@@ -0,0 +1,178 @@
+#!/bin/bash
+
+function config_device {
+ ip netns add at_ns0
+ ip netns add at_ns1
+ ip netns add at_ns2
+ ip link add veth0 type veth peer name veth0b
+ ip link add veth1 type veth peer name veth1b
+ ip link add veth2 type veth peer name veth2b
+ ip link set veth0b up
+ ip link set veth1b up
+ ip link set veth2b up
+ ip link set dev veth0b mtu 1500
+ ip link set dev veth1b mtu 1500
+ ip link set dev veth2b mtu 1500
+ ip link set veth0 netns at_ns0
+ ip link set veth1 netns at_ns1
+ ip link set veth2 netns at_ns2
+ ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
+ ip netns exec at_ns0 ip addr add 2401:db00::1/64 dev veth0 nodad
+ ip netns exec at_ns0 ip link set dev veth0 up
+ ip netns exec at_ns1 ip addr add 172.16.1.101/24 dev veth1
+ ip netns exec at_ns1 ip addr add 2401:db00::2/64 dev veth1 nodad
+ ip netns exec at_ns1 ip link set dev veth1 up
+ ip netns exec at_ns2 ip addr add 172.16.1.200/24 dev veth2
+ ip netns exec at_ns2 ip addr add 2401:db00::3/64 dev veth2 nodad
+ ip netns exec at_ns2 ip link set dev veth2 up
+ ip link add br0 type bridge
+ ip link set br0 up
+ ip link set dev br0 mtu 1500
+ ip link set veth0b master br0
+ ip link set veth1b master br0
+ ip link set veth2b master br0
+}
+
+function add_ipip_tunnel {
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type ipip local 172.16.1.100 remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+ ip netns exec at_ns1 \
+ ip link add dev $DEV_NS type ipip local 172.16.1.101 remote 172.16.1.200
+ ip netns exec at_ns1 ip link set dev $DEV_NS up
+ # same inner IP address in at_ns0 and at_ns1
+ ip netns exec at_ns1 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ ip netns exec at_ns2 ip link add dev $DEV type ipip external
+ ip netns exec at_ns2 ip link set dev $DEV up
+ ip netns exec at_ns2 ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_ipip6_tunnel {
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type ip6tnl mode ipip6 local 2401:db00::1/64 remote 2401:db00::3/64
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+ ip netns exec at_ns1 \
+ ip link add dev $DEV_NS type ip6tnl mode ipip6 local 2401:db00::2/64 remote 2401:db00::3/64
+ ip netns exec at_ns1 ip link set dev $DEV_NS up
+ # same inner IP address in at_ns0 and at_ns1
+ ip netns exec at_ns1 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ ip netns exec at_ns2 ip link add dev $DEV type ip6tnl mode ipip6 external
+ ip netns exec at_ns2 ip link set dev $DEV up
+ ip netns exec at_ns2 ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_ip6ip6_tunnel {
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type ip6tnl mode ip6ip6 local 2401:db00::1/64 remote 2401:db00::3/64
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 2601:646::1/64
+ ip netns exec at_ns1 \
+ ip link add dev $DEV_NS type ip6tnl mode ip6ip6 local 2401:db00::2/64 remote 2401:db00::3/64
+ ip netns exec at_ns1 ip link set dev $DEV_NS up
+ # same inner IP address in at_ns0 and at_ns1
+ ip netns exec at_ns1 ip addr add dev $DEV_NS 2601:646::1/64
+
+ ip netns exec at_ns2 ip link add dev $DEV type ip6tnl mode ip6ip6 external
+ ip netns exec at_ns2 ip link set dev $DEV up
+ ip netns exec at_ns2 ip addr add dev $DEV 2601:646::2/64
+}
+
+function attach_bpf {
+ DEV=$1
+ SET_TUNNEL=$2
+ GET_TUNNEL=$3
+ ip netns exec at_ns2 tc qdisc add dev $DEV clsact
+ ip netns exec at_ns2 tc filter add dev $DEV egress bpf da obj tcbpf2_kern.o sec $SET_TUNNEL
+ ip netns exec at_ns2 tc filter add dev $DEV ingress bpf da obj tcbpf2_kern.o sec $GET_TUNNEL
+}
+
+function test_ipip {
+ DEV_NS=ipip_std
+ DEV=ipip_bpf
+ config_device
+# tcpdump -nei br0 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+
+ add_ipip_tunnel
+ attach_bpf $DEV ipip_set_tunnel ipip_get_tunnel
+
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ ip netns exec at_ns2 ping -c 1 10.1.1.100
+ ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+ ip netns exec at_ns1 iperf -sD -p 5201 > /dev/null
+ sleep 0.2
+ # tcp check _same_ IP over different tunnels
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5200
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5201
+ cleanup
+}
+
+# IPv4 over IPv6 tunnel
+function test_ipip6 {
+ DEV_NS=ipip_std
+ DEV=ipip_bpf
+ config_device
+# tcpdump -nei br0 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+
+ add_ipip6_tunnel
+ attach_bpf $DEV ipip6_set_tunnel ipip6_get_tunnel
+
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ ip netns exec at_ns2 ping -c 1 10.1.1.100
+ ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+ ip netns exec at_ns1 iperf -sD -p 5201 > /dev/null
+ sleep 0.2
+ # tcp check _same_ IP over different tunnels
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5200
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5201
+ cleanup
+}
+
+# IPv6 over IPv6 tunnel
+function test_ip6ip6 {
+ DEV_NS=ipip_std
+ DEV=ipip_bpf
+ config_device
+# tcpdump -nei br0 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+
+ add_ip6ip6_tunnel
+ attach_bpf $DEV ip6ip6_set_tunnel ip6ip6_get_tunnel
+
+ ip netns exec at_ns0 ping -6 -c 1 2601:646::2
+ ip netns exec at_ns2 ping -6 -c 1 2601:646::1
+ ip netns exec at_ns0 iperf -6sD -p 5200 > /dev/null
+ ip netns exec at_ns1 iperf -6sD -p 5201 > /dev/null
+ sleep 0.2
+ # tcp check _same_ IP over different tunnels
+ ip netns exec at_ns2 iperf -6c 2601:646::1 -n 5k -p 5200
+ ip netns exec at_ns2 iperf -6c 2601:646::1 -n 5k -p 5201
+ cleanup
+}
+
+function cleanup {
+ set +ex
+ pkill iperf
+ ip netns delete at_ns0
+ ip netns delete at_ns1
+ ip netns delete at_ns2
+ ip link del veth0
+ ip link del veth1
+ ip link del veth2
+ ip link del br0
+ pkill tcpdump
+ pkill cat
+ set -ex
+}
+
+cleanup
+echo "Testing IP tunnels..."
+test_ipip
+test_ipip6
+test_ip6ip6
+echo "*** PASS ***"
diff --git a/samples/bpf/test_tunnel_bpf.sh b/samples/bpf/test_tunnel_bpf.sh
new file mode 100755
index 000000000000..1ff634f187b7
--- /dev/null
+++ b/samples/bpf/test_tunnel_bpf.sh
@@ -0,0 +1,167 @@
+#!/bin/bash
+# In Namespace 0 (at_ns0) using native tunnel
+# Overlay IP: 10.1.1.100
+# local 192.16.1.100 remote 192.16.1.200
+# veth0 IP: 172.16.1.100, tunnel dev <type>00
+
+# Out of Namespace using BPF set/get on lwtunnel
+# Overlay IP: 10.1.1.200
+# local 172.16.1.200 remote 172.16.1.100
+# veth1 IP: 172.16.1.200, tunnel dev <type>11
+
+function config_device {
+ ip netns add at_ns0
+ ip link add veth0 type veth peer name veth1
+ ip link set veth0 netns at_ns0
+ ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
+ ip netns exec at_ns0 ip link set dev veth0 up
+ ip link set dev veth1 up mtu 1500
+ ip addr add dev veth1 172.16.1.200/24
+}
+
+function add_gre_tunnel {
+ # in namespace
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE key 2 local 172.16.1.100 remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ # out of namespace
+ ip link add dev $DEV type $TYPE key 2 external
+ ip link set dev $DEV up
+ ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_vxlan_tunnel {
+ # Set static ARP entry here because iptables set-mark works
+ # on L3 packet, as a result not applying to ARP packets,
+ # causing errors at get_tunnel_{key/opt}.
+
+ # in namespace
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE id 2 dstport 4789 gbp remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS address 52:54:00:d9:01:00 up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+ ip netns exec at_ns0 arp -s 10.1.1.200 52:54:00:d9:02:00
+ ip netns exec at_ns0 iptables -A OUTPUT -j MARK --set-mark 0x800FF
+
+ # out of namespace
+ ip link add dev $DEV type $TYPE external gbp dstport 4789
+ ip link set dev $DEV address 52:54:00:d9:02:00 up
+ ip addr add dev $DEV 10.1.1.200/24
+ arp -s 10.1.1.100 52:54:00:d9:01:00
+}
+
+function add_geneve_tunnel {
+ # in namespace
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE id 2 dstport 6081 remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ # out of namespace
+ ip link add dev $DEV type $TYPE dstport 6081 external
+ ip link set dev $DEV up
+ ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_ipip_tunnel {
+ # in namespace
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE local 172.16.1.100 remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ # out of namespace
+ ip link add dev $DEV type $TYPE external
+ ip link set dev $DEV up
+ ip addr add dev $DEV 10.1.1.200/24
+}
+
+function attach_bpf {
+ DEV=$1
+ SET_TUNNEL=$2
+ GET_TUNNEL=$3
+ tc qdisc add dev $DEV clsact
+ tc filter add dev $DEV egress bpf da obj tcbpf2_kern.o sec $SET_TUNNEL
+ tc filter add dev $DEV ingress bpf da obj tcbpf2_kern.o sec $GET_TUNNEL
+}
+
+function test_gre {
+ TYPE=gretap
+ DEV_NS=gretap00
+ DEV=gretap11
+ config_device
+ add_gre_tunnel
+ attach_bpf $DEV gre_set_tunnel gre_get_tunnel
+ ping -c 1 10.1.1.100
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ cleanup
+}
+
+function test_vxlan {
+ TYPE=vxlan
+ DEV_NS=vxlan00
+ DEV=vxlan11
+ config_device
+ add_vxlan_tunnel
+ attach_bpf $DEV vxlan_set_tunnel vxlan_get_tunnel
+ ping -c 1 10.1.1.100
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ cleanup
+}
+
+function test_geneve {
+ TYPE=geneve
+ DEV_NS=geneve00
+ DEV=geneve11
+ config_device
+ add_geneve_tunnel
+ attach_bpf $DEV geneve_set_tunnel geneve_get_tunnel
+ ping -c 1 10.1.1.100
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ cleanup
+}
+
+function test_ipip {
+ TYPE=ipip
+ DEV_NS=ipip00
+ DEV=ipip11
+ config_device
+ tcpdump -nei veth1 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+ add_ipip_tunnel
+ ethtool -K veth1 gso off gro off rx off tx off
+ ip link set dev veth1 mtu 1500
+ attach_bpf $DEV ipip_set_tunnel ipip_get_tunnel
+ ping -c 1 10.1.1.100
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+ sleep 0.2
+ iperf -c 10.1.1.100 -n 5k -p 5200
+ cleanup
+}
+
+function cleanup {
+ set +ex
+ pkill iperf
+ ip netns delete at_ns0
+ ip link del veth1
+ ip link del ipip11
+ ip link del gretap11
+ ip link del geneve11
+ pkill tcpdump
+ pkill cat
+ set -ex
+}
+
+cleanup
+echo "Testing GRE tunnel..."
+test_gre
+echo "Testing VXLAN tunnel..."
+test_vxlan
+echo "Testing GENEVE tunnel..."
+test_geneve
+echo "Testing IPIP tunnel..."
+test_ipip
+echo "*** PASS ***"
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index fe2fcec98c1f..369ffaad3799 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -29,6 +29,7 @@ struct bpf_test {
struct bpf_insn insns[MAX_INSNS];
int fixup[MAX_FIXUPS];
int prog_array_fixup[MAX_FIXUPS];
+ int test_val_map_fixup[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
enum {
@@ -39,6 +40,19 @@ struct bpf_test {
enum bpf_prog_type prog_type;
};
+/* Note we want this to be 64 bit aligned so that the end of our array is
+ * actually the end of the structure.
+ */
+#define MAX_ENTRIES 11
+struct test_val {
+ unsigned index;
+ int foo[MAX_ENTRIES];
+};
+
+struct other_val {
+ unsigned int action[32];
+};
+
static struct bpf_test tests[] = {
{
"add+sub+mul",
@@ -291,6 +305,29 @@ static struct bpf_test tests[] = {
.result = REJECT,
},
{
+ "invalid argument register",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "non-invalid argument register",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"check valid spill/fill",
.insns = {
/* spill R1(ctx) into stack */
@@ -1210,6 +1247,54 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "raw_stack: skb_load_bytes, negative len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "raw_stack: skb_load_bytes, negative len 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, ~0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "raw_stack: skb_load_bytes, zero len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"raw_stack: skb_load_bytes, no init",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
@@ -1449,7 +1534,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "pkt: test1",
+ "direct packet access: test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
@@ -1466,7 +1551,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "pkt: test2",
+ "direct packet access: test2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
@@ -1499,7 +1584,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "pkt: test3",
+ "direct packet access: test3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
@@ -1511,7 +1596,105 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
},
{
- "pkt: test4",
+ "direct packet access: test4 (write)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test5 (pkt_end >= reg, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test6 (pkt_end >= reg, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test7 (pkt_end >= reg, both accesses)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test8 (double test, variant 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test9 (double test, variant 2)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
@@ -1519,15 +1702,687 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test10 (write invalid)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "cannot write",
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test1, valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {5},
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "helper access to packet: test2, unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {1},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "helper access to packet: test3, variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {11},
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "helper access to packet: test4, packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {7},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "helper access to packet: test5, packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {6},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "helper access to packet: test6, cls valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {5},
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test7, cls unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {1},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test8, cls variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {11},
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test9, cls packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {7},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test10, cls packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {6},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test11, cls unsuitable helper 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 42),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test12, cls unsuitable helper 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test13, cls helper ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test14, cls helper fail sub",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "type=inv expected=fp",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test15, cls helper fail range 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test16, cls helper fail range 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, -9),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test17, cls helper fail range 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, ~0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test18, cls helper fail range zero",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test19, pkt end as input",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=pkt_end expected=fp",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test20, wrong reg",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
.result = REJECT,
+ .errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
+ {
+ "valid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+ "valid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+ "valid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+ "valid map access into an array with a signed variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+ "invalid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
+ offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr = "invalid access to map value, value_size=48 off=48 size=8",
+ .result = REJECT,
+ },
+ {
+ "invalid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr = "R0 min value is outside of the array range",
+ .result = REJECT,
+ },
+ {
+ "invalid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .result = REJECT,
+ },
+ {
+ "invalid map access into an array with no floor check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .result = REJECT,
+ },
+ {
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3},
+ .errstr = "invalid access to map value, value_size=48 off=44 size=8",
+ .result = REJECT,
+ },
+ {
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .test_val_map_fixup = {3, 11},
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .result = REJECT,
+ },
};
static int probe_filter_length(struct bpf_insn *fp)
@@ -1541,12 +2396,12 @@ static int probe_filter_length(struct bpf_insn *fp)
return len + 1;
}
-static int create_map(void)
+static int create_map(size_t val_size, int num)
{
int map_fd;
map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
- sizeof(long long), sizeof(long long), 1024, 0);
+ sizeof(long long), val_size, num, 0);
if (map_fd < 0)
printf("failed to create map '%s'\n", strerror(errno));
@@ -1576,12 +2431,13 @@ static int test(void)
int prog_len = probe_filter_length(prog);
int *fixup = tests[i].fixup;
int *prog_array_fixup = tests[i].prog_array_fixup;
+ int *test_val_map_fixup = tests[i].test_val_map_fixup;
int expected_result;
const char *expected_errstr;
- int map_fd = -1, prog_array_fd = -1;
+ int map_fd = -1, prog_array_fd = -1, test_val_map_fd = -1;
if (*fixup) {
- map_fd = create_map();
+ map_fd = create_map(sizeof(long long), 1024);
do {
prog[*fixup].imm = map_fd;
@@ -1596,6 +2452,18 @@ static int test(void)
prog_array_fixup++;
} while (*prog_array_fixup);
}
+ if (*test_val_map_fixup) {
+ /* Unprivileged can't create a hash map.*/
+ if (unpriv)
+ continue;
+ test_val_map_fd = create_map(sizeof(struct test_val),
+ 256);
+ do {
+ prog[*test_val_map_fixup].imm = test_val_map_fd;
+ test_val_map_fixup++;
+ } while (*test_val_map_fixup);
+ }
+
printf("#%d %s ", i, tests[i].descr);
prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
@@ -1642,6 +2510,8 @@ fail:
close(map_fd);
if (prog_array_fd >= 0)
close(prog_array_fd);
+ if (test_val_map_fd >= 0)
+ close(test_val_map_fd);
close(prog_fd);
}
diff --git a/samples/bpf/trace_event_kern.c b/samples/bpf/trace_event_kern.c
new file mode 100644
index 000000000000..71a8ed32823e
--- /dev/null
+++ b/samples/bpf/trace_event_kern.c
@@ -0,0 +1,65 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/bpf_perf_event.h>
+#include <uapi/linux/perf_event.h>
+#include "bpf_helpers.h"
+
+struct key_t {
+ char comm[TASK_COMM_LEN];
+ u32 kernstack;
+ u32 userstack;
+};
+
+struct bpf_map_def SEC("maps") counts = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct key_t),
+ .value_size = sizeof(u64),
+ .max_entries = 10000,
+};
+
+struct bpf_map_def SEC("maps") stackmap = {
+ .type = BPF_MAP_TYPE_STACK_TRACE,
+ .key_size = sizeof(u32),
+ .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
+ .max_entries = 10000,
+};
+
+#define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
+#define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK)
+
+SEC("perf_event")
+int bpf_prog1(struct bpf_perf_event_data *ctx)
+{
+ char fmt[] = "CPU-%d period %lld ip %llx";
+ u32 cpu = bpf_get_smp_processor_id();
+ struct key_t key;
+ u64 *val, one = 1;
+
+ if (ctx->sample_period < 10000)
+ /* ignore warmup */
+ return 0;
+ bpf_get_current_comm(&key.comm, sizeof(key.comm));
+ key.kernstack = bpf_get_stackid(ctx, &stackmap, KERN_STACKID_FLAGS);
+ key.userstack = bpf_get_stackid(ctx, &stackmap, USER_STACKID_FLAGS);
+ if ((int)key.kernstack < 0 && (int)key.userstack < 0) {
+ bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period,
+ ctx->regs.ip);
+ return 0;
+ }
+
+ val = bpf_map_lookup_elem(&counts, &key);
+ if (val)
+ (*val)++;
+ else
+ bpf_map_update_elem(&counts, &key, &one, BPF_NOEXIST);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
new file mode 100644
index 000000000000..9a130d31ecf2
--- /dev/null
+++ b/samples/bpf/trace_event_user.c
@@ -0,0 +1,213 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include <signal.h>
+#include <assert.h>
+#include <errno.h>
+#include <sys/resource.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define SAMPLE_FREQ 50
+
+static bool sys_read_seen, sys_write_seen;
+
+static void print_ksym(__u64 addr)
+{
+ struct ksym *sym;
+
+ if (!addr)
+ return;
+ sym = ksym_search(addr);
+ printf("%s;", sym->name);
+ if (!strcmp(sym->name, "sys_read"))
+ sys_read_seen = true;
+ else if (!strcmp(sym->name, "sys_write"))
+ sys_write_seen = true;
+}
+
+static void print_addr(__u64 addr)
+{
+ if (!addr)
+ return;
+ printf("%llx;", addr);
+}
+
+#define TASK_COMM_LEN 16
+
+struct key_t {
+ char comm[TASK_COMM_LEN];
+ __u32 kernstack;
+ __u32 userstack;
+};
+
+static void print_stack(struct key_t *key, __u64 count)
+{
+ __u64 ip[PERF_MAX_STACK_DEPTH] = {};
+ static bool warned;
+ int i;
+
+ printf("%3lld %s;", count, key->comm);
+ if (bpf_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
+ printf("---;");
+ } else {
+ for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
+ print_ksym(ip[i]);
+ }
+ printf("-;");
+ if (bpf_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
+ printf("---;");
+ } else {
+ for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
+ print_addr(ip[i]);
+ }
+ printf("\n");
+
+ if (key->kernstack == -EEXIST && !warned) {
+ printf("stackmap collisions seen. Consider increasing size\n");
+ warned = true;
+ } else if ((int)key->kernstack < 0 && (int)key->userstack < 0) {
+ printf("err stackid %d %d\n", key->kernstack, key->userstack);
+ }
+}
+
+static void int_exit(int sig)
+{
+ kill(0, SIGKILL);
+ exit(0);
+}
+
+static void print_stacks(void)
+{
+ struct key_t key = {}, next_key;
+ __u64 value;
+ __u32 stackid = 0, next_id;
+ int fd = map_fd[0], stack_map = map_fd[1];
+
+ sys_read_seen = sys_write_seen = false;
+ while (bpf_get_next_key(fd, &key, &next_key) == 0) {
+ bpf_lookup_elem(fd, &next_key, &value);
+ print_stack(&next_key, value);
+ bpf_delete_elem(fd, &next_key);
+ key = next_key;
+ }
+
+ if (!sys_read_seen || !sys_write_seen) {
+ printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
+ int_exit(0);
+ }
+
+ /* clear stack map */
+ while (bpf_get_next_key(stack_map, &stackid, &next_id) == 0) {
+ bpf_delete_elem(stack_map, &next_id);
+ stackid = next_id;
+ }
+}
+
+static void test_perf_event_all_cpu(struct perf_event_attr *attr)
+{
+ int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ int *pmu_fd = malloc(nr_cpus * sizeof(int));
+ int i;
+
+ /* open perf_event on all cpus */
+ for (i = 0; i < nr_cpus; i++) {
+ pmu_fd[i] = perf_event_open(attr, -1, i, -1, 0);
+ if (pmu_fd[i] < 0) {
+ printf("perf_event_open failed\n");
+ goto all_cpu_err;
+ }
+ assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
+ assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0) == 0);
+ }
+ system("dd if=/dev/zero of=/dev/null count=5000k");
+ print_stacks();
+all_cpu_err:
+ for (i--; i >= 0; i--)
+ close(pmu_fd[i]);
+ free(pmu_fd);
+}
+
+static void test_perf_event_task(struct perf_event_attr *attr)
+{
+ int pmu_fd;
+
+ /* open task bound event */
+ pmu_fd = perf_event_open(attr, 0, -1, -1, 0);
+ if (pmu_fd < 0) {
+ printf("perf_event_open failed\n");
+ return;
+ }
+ assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
+ assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0) == 0);
+ system("dd if=/dev/zero of=/dev/null count=5000k");
+ print_stacks();
+ close(pmu_fd);
+}
+
+static void test_bpf_perf_event(void)
+{
+ struct perf_event_attr attr_type_hw = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .inherit = 1,
+ };
+ struct perf_event_attr attr_type_sw = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ .inherit = 1,
+ };
+
+ test_perf_event_all_cpu(&attr_type_hw);
+ test_perf_event_task(&attr_type_hw);
+ test_perf_event_all_cpu(&attr_type_sw);
+ test_perf_event_task(&attr_type_sw);
+}
+
+
+int main(int argc, char **argv)
+{
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ char filename[256];
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ setrlimit(RLIMIT_MEMLOCK, &r);
+
+ signal(SIGINT, int_exit);
+
+ if (load_kallsyms()) {
+ printf("failed to process /proc/kallsyms\n");
+ return 1;
+ }
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 2;
+ }
+
+ if (fork() == 0) {
+ read_trace_pipe();
+ return 0;
+ }
+ test_bpf_perf_event();
+
+ int_exit(0);
+ return 0;
+}
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
index f95f232cbab9..fd12d7154d42 100644
--- a/samples/bpf/tracex5_kern.c
+++ b/samples/bpf/tracex5_kern.c
@@ -19,20 +19,18 @@ struct bpf_map_def SEC("maps") progs = {
.max_entries = 1024,
};
-SEC("kprobe/seccomp_phase1")
+SEC("kprobe/__seccomp_filter")
int bpf_prog1(struct pt_regs *ctx)
{
- struct seccomp_data sd;
-
- bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
+ int sc_nr = (int)PT_REGS_PARM1(ctx);
/* dispatch into next BPF program depending on syscall number */
- bpf_tail_call(ctx, &progs, sd.nr);
+ bpf_tail_call(ctx, &progs, sc_nr);
/* fall through -> unknown syscall */
- if (sd.nr >= __NR_getuid && sd.nr <= __NR_getsid) {
+ if (sc_nr >= __NR_getuid && sc_nr <= __NR_getsid) {
char fmt[] = "syscall=%d (one of get/set uid/pid/gid)\n";
- bpf_trace_printk(fmt, sizeof(fmt), sd.nr);
+ bpf_trace_printk(fmt, sizeof(fmt), sc_nr);
}
return 0;
}
@@ -42,7 +40,7 @@ PROG(__NR_write)(struct pt_regs *ctx)
{
struct seccomp_data sd;
- bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
+ bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
if (sd.args[2] == 512) {
char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
bpf_trace_printk(fmt, sizeof(fmt),
@@ -55,7 +53,7 @@ PROG(__NR_read)(struct pt_regs *ctx)
{
struct seccomp_data sd;
- bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
+ bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
if (sd.args[2] > 128 && sd.args[2] <= 1024) {
char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
bpf_trace_printk(fmt, sizeof(fmt),
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
index a04dd3cd4358..36b5925bb137 100644
--- a/samples/bpf/tracex5_user.c
+++ b/samples/bpf/tracex5_user.c
@@ -6,6 +6,7 @@
#include <sys/prctl.h>
#include "libbpf.h"
#include "bpf_load.h"
+#include <sys/resource.h>
/* install fake seccomp program to enable seccomp code path inside the kernel,
* so that our kprobe attached to seccomp_phase1() can be triggered
@@ -27,8 +28,10 @@ int main(int ac, char **argv)
{
FILE *f;
char filename[256];
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ setrlimit(RLIMIT_MEMLOCK, &r);
if (load_bpf_file(filename)) {
printf("%s", bpf_log_buf);
diff --git a/samples/rpmsg/rpmsg_client_sample.c b/samples/rpmsg/rpmsg_client_sample.c
index d0e249c90668..f161dfd3e70a 100644
--- a/samples/rpmsg/rpmsg_client_sample.c
+++ b/samples/rpmsg/rpmsg_client_sample.c
@@ -24,38 +24,52 @@
#define MSG "hello world!"
#define MSG_LIMIT 100
-static void rpmsg_sample_cb(struct rpmsg_channel *rpdev, void *data, int len,
+struct instance_data {
+ int rx_count;
+};
+
+static int rpmsg_sample_cb(struct rpmsg_device *rpdev, void *data, int len,
void *priv, u32 src)
{
int ret;
- static int rx_count;
+ struct instance_data *idata = dev_get_drvdata(&rpdev->dev);
- dev_info(&rpdev->dev, "incoming msg %d (src: 0x%x)\n", ++rx_count, src);
+ dev_info(&rpdev->dev, "incoming msg %d (src: 0x%x)\n",
+ ++idata->rx_count, src);
print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
data, len, true);
/* samples should not live forever */
- if (rx_count >= MSG_LIMIT) {
+ if (idata->rx_count >= MSG_LIMIT) {
dev_info(&rpdev->dev, "goodbye!\n");
- return;
+ return 0;
}
/* send a new message now */
- ret = rpmsg_send(rpdev, MSG, strlen(MSG));
+ ret = rpmsg_send(rpdev->ept, MSG, strlen(MSG));
if (ret)
dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
+
+ return 0;
}
-static int rpmsg_sample_probe(struct rpmsg_channel *rpdev)
+static int rpmsg_sample_probe(struct rpmsg_device *rpdev)
{
int ret;
+ struct instance_data *idata;
dev_info(&rpdev->dev, "new channel: 0x%x -> 0x%x!\n",
rpdev->src, rpdev->dst);
+ idata = devm_kzalloc(&rpdev->dev, sizeof(*idata), GFP_KERNEL);
+ if (!idata)
+ return -ENOMEM;
+
+ dev_set_drvdata(&rpdev->dev, idata);
+
/* send a message to our remote processor */
- ret = rpmsg_send(rpdev, MSG, strlen(MSG));
+ ret = rpmsg_send(rpdev->ept, MSG, strlen(MSG));
if (ret) {
dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
return ret;
@@ -64,7 +78,7 @@ static int rpmsg_sample_probe(struct rpmsg_channel *rpdev)
return 0;
}
-static void rpmsg_sample_remove(struct rpmsg_channel *rpdev)
+static void rpmsg_sample_remove(struct rpmsg_device *rpdev)
{
dev_info(&rpdev->dev, "rpmsg sample client driver is removed\n");
}
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan
index 8ab68679cfb5..dd779c40c8e6 100644
--- a/scripts/Makefile.ubsan
+++ b/scripts/Makefile.ubsan
@@ -3,7 +3,6 @@ ifdef CONFIG_UBSAN
CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=vla-bound)
- CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size)
@@ -14,4 +13,8 @@ ifdef CONFIG_UBSAN
ifdef CONFIG_UBSAN_ALIGNMENT
CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
endif
+
+ifdef CONFIG_UBSAN_NULL
+ CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
+endif
endif
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
index df643f60bb41..a32e4da4c117 100755
--- a/scripts/checkkconfigsymbols.py
+++ b/scripts/checkkconfigsymbols.py
@@ -1,98 +1,99 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
"""Find Kconfig symbols that are referenced but not defined."""
-# (c) 2014-2015 Valentin Rothberg <valentinrothberg@gmail.com>
+# (c) 2014-2016 Valentin Rothberg <valentinrothberg@gmail.com>
# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
#
# Licensed under the terms of the GNU GPL License version 2
+import argparse
import difflib
import os
import re
import signal
+import subprocess
import sys
from multiprocessing import Pool, cpu_count
-from optparse import OptionParser
-from subprocess import Popen, PIPE, STDOUT
# regex expressions
OPERATORS = r"&|\(|\)|\||\!"
-FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}"
-DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*"
-EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+"
+SYMBOL = r"(?:\w*[A-Z0-9]\w*){2,}"
+DEF = r"^\s*(?:menu){,1}config\s+(" + SYMBOL + r")\s*"
+EXPR = r"(?:" + OPERATORS + r"|\s|" + SYMBOL + r")+"
DEFAULT = r"default\s+.*?(?:if\s.+){,1}"
STMT = r"^\s*(?:if|select|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR
-SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")"
+SOURCE_SYMBOL = r"(?:\W|\b)+[D]{,1}CONFIG_(" + SYMBOL + r")"
# regex objects
REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$")
-REGEX_FEATURE = re.compile(r'(?!\B)' + FEATURE + r'(?!\B)')
-REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE)
+REGEX_SYMBOL = re.compile(r'(?!\B)' + SYMBOL + r'(?!\B)')
+REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
-REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$")
+REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
REGEX_QUOTES = re.compile("(\"(.*?)\")")
def parse_options():
"""The user interface of this module."""
- usage = "%prog [options]\n\n" \
- "Run this tool to detect Kconfig symbols that are referenced but " \
- "not defined in\nKconfig. The output of this tool has the " \
- "format \'Undefined symbol\\tFile list\'\n\n" \
- "If no option is specified, %prog will default to check your\n" \
- "current tree. Please note that specifying commits will " \
- "\'git reset --hard\'\nyour current tree! You may save " \
- "uncommitted changes to avoid losing data."
-
- parser = OptionParser(usage=usage)
-
- parser.add_option('-c', '--commit', dest='commit', action='store',
- default="",
- help="Check if the specified commit (hash) introduces "
- "undefined Kconfig symbols.")
-
- parser.add_option('-d', '--diff', dest='diff', action='store',
- default="",
- help="Diff undefined symbols between two commits. The "
- "input format bases on Git log's "
- "\'commmit1..commit2\'.")
-
- parser.add_option('-f', '--find', dest='find', action='store_true',
- default=False,
- help="Find and show commits that may cause symbols to be "
- "missing. Required to run with --diff.")
-
- parser.add_option('-i', '--ignore', dest='ignore', action='store',
- default="",
- help="Ignore files matching this pattern. Note that "
- "the pattern needs to be a Python regex. To "
- "ignore defconfigs, specify -i '.*defconfig'.")
-
- parser.add_option('-s', '--sim', dest='sim', action='store', default="",
- help="Print a list of maximum 10 string-similar symbols.")
-
- parser.add_option('', '--force', dest='force', action='store_true',
- default=False,
- help="Reset current Git tree even when it's dirty.")
-
- (opts, _) = parser.parse_args()
-
- if opts.commit and opts.diff:
+ usage = "Run this tool to detect Kconfig symbols that are referenced but " \
+ "not defined in Kconfig. If no option is specified, " \
+ "checkkconfigsymbols defaults to check your current tree. " \
+ "Please note that specifying commits will 'git reset --hard\' " \
+ "your current tree! You may save uncommitted changes to avoid " \
+ "losing data."
+
+ parser = argparse.ArgumentParser(description=usage)
+
+ parser.add_argument('-c', '--commit', dest='commit', action='store',
+ default="",
+ help="check if the specified commit (hash) introduces "
+ "undefined Kconfig symbols")
+
+ parser.add_argument('-d', '--diff', dest='diff', action='store',
+ default="",
+ help="diff undefined symbols between two commits "
+ "(e.g., -d commmit1..commit2)")
+
+ parser.add_argument('-f', '--find', dest='find', action='store_true',
+ default=False,
+ help="find and show commits that may cause symbols to be "
+ "missing (required to run with --diff)")
+
+ parser.add_argument('-i', '--ignore', dest='ignore', action='store',
+ default="",
+ help="ignore files matching this Python regex "
+ "(e.g., -i '.*defconfig')")
+
+ parser.add_argument('-s', '--sim', dest='sim', action='store', default="",
+ help="print a list of max. 10 string-similar symbols")
+
+ parser.add_argument('--force', dest='force', action='store_true',
+ default=False,
+ help="reset current Git tree even when it's dirty")
+
+ parser.add_argument('--no-color', dest='color', action='store_false',
+ default=True,
+ help="don't print colored output (default when not "
+ "outputting to a terminal)")
+
+ args = parser.parse_args()
+
+ if args.commit and args.diff:
sys.exit("Please specify only one option at once.")
- if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff):
+ if args.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", args.diff):
sys.exit("Please specify valid input in the following format: "
"\'commit1..commit2\'")
- if opts.commit or opts.diff:
- if not opts.force and tree_is_dirty():
+ if args.commit or args.diff:
+ if not args.force and tree_is_dirty():
sys.exit("The current Git tree is dirty (see 'git status'). "
"Running this script may\ndelete important data since it "
"calls 'git reset --hard' for some performance\nreasons. "
@@ -100,138 +101,148 @@ def parse_options():
"'--force' if you\nwant to ignore this warning and "
"continue.")
- if opts.commit:
- opts.find = False
+ if args.commit:
+ args.find = False
- if opts.ignore:
+ if args.ignore:
try:
- re.match(opts.ignore, "this/is/just/a/test.c")
+ re.match(args.ignore, "this/is/just/a/test.c")
except:
sys.exit("Please specify a valid Python regex.")
- return opts
+ return args
def main():
"""Main function of this module."""
- opts = parse_options()
+ args = parse_options()
- if opts.sim and not opts.commit and not opts.diff:
- sims = find_sims(opts.sim, opts.ignore)
+ global COLOR
+ COLOR = args.color and sys.stdout.isatty()
+
+ if args.sim and not args.commit and not args.diff:
+ sims = find_sims(args.sim, args.ignore)
if sims:
- print "%s: %s" % (yel("Similar symbols"), ', '.join(sims))
+ print("%s: %s" % (yel("Similar symbols"), ', '.join(sims)))
else:
- print "%s: no similar symbols found" % yel("Similar symbols")
+ print("%s: no similar symbols found" % yel("Similar symbols"))
sys.exit(0)
# dictionary of (un)defined symbols
defined = {}
undefined = {}
- if opts.commit or opts.diff:
+ if args.commit or args.diff:
head = get_head()
# get commit range
commit_a = None
commit_b = None
- if opts.commit:
- commit_a = opts.commit + "~"
- commit_b = opts.commit
- elif opts.diff:
- split = opts.diff.split("..")
+ if args.commit:
+ commit_a = args.commit + "~"
+ commit_b = args.commit
+ elif args.diff:
+ split = args.diff.split("..")
commit_a = split[0]
commit_b = split[1]
undefined_a = {}
undefined_b = {}
# get undefined items before the commit
- execute("git reset --hard %s" % commit_a)
- undefined_a, _ = check_symbols(opts.ignore)
+ reset(commit_a)
+ undefined_a, _ = check_symbols(args.ignore)
# get undefined items for the commit
- execute("git reset --hard %s" % commit_b)
- undefined_b, defined = check_symbols(opts.ignore)
+ reset(commit_b)
+ undefined_b, defined = check_symbols(args.ignore)
# report cases that are present for the commit but not before
- for feature in sorted(undefined_b):
- # feature has not been undefined before
- if not feature in undefined_a:
- files = sorted(undefined_b.get(feature))
- undefined[feature] = files
- # check if there are new files that reference the undefined feature
+ for symbol in sorted(undefined_b):
+ # symbol has not been undefined before
+ if symbol not in undefined_a:
+ files = sorted(undefined_b.get(symbol))
+ undefined[symbol] = files
+ # check if there are new files that reference the undefined symbol
else:
- files = sorted(undefined_b.get(feature) -
- undefined_a.get(feature))
+ files = sorted(undefined_b.get(symbol) -
+ undefined_a.get(symbol))
if files:
- undefined[feature] = files
+ undefined[symbol] = files
# reset to head
- execute("git reset --hard %s" % head)
+ reset(head)
# default to check the entire tree
else:
- undefined, defined = check_symbols(opts.ignore)
+ undefined, defined = check_symbols(args.ignore)
# now print the output
- for feature in sorted(undefined):
- print red(feature)
+ for symbol in sorted(undefined):
+ print(red(symbol))
- files = sorted(undefined.get(feature))
- print "%s: %s" % (yel("Referencing files"), ", ".join(files))
+ files = sorted(undefined.get(symbol))
+ print("%s: %s" % (yel("Referencing files"), ", ".join(files)))
- sims = find_sims(feature, opts.ignore, defined)
+ sims = find_sims(symbol, args.ignore, defined)
sims_out = yel("Similar symbols")
if sims:
- print "%s: %s" % (sims_out, ', '.join(sims))
+ print("%s: %s" % (sims_out, ', '.join(sims)))
else:
- print "%s: %s" % (sims_out, "no similar symbols found")
+ print("%s: %s" % (sims_out, "no similar symbols found"))
- if opts.find:
- print "%s:" % yel("Commits changing symbol")
- commits = find_commits(feature, opts.diff)
+ if args.find:
+ print("%s:" % yel("Commits changing symbol"))
+ commits = find_commits(symbol, args.diff)
if commits:
for commit in commits:
commit = commit.split(" ", 1)
- print "\t- %s (\"%s\")" % (yel(commit[0]), commit[1])
+ print("\t- %s (\"%s\")" % (yel(commit[0]), commit[1]))
else:
- print "\t- no commit found"
- print # new line
+ print("\t- no commit found")
+ print() # new line
+
+
+def reset(commit):
+ """Reset current git tree to %commit."""
+ execute(["git", "reset", "--hard", commit])
def yel(string):
"""
Color %string yellow.
"""
- return "\033[33m%s\033[0m" % string
+ return "\033[33m%s\033[0m" % string if COLOR else string
def red(string):
"""
Color %string red.
"""
- return "\033[31m%s\033[0m" % string
+ return "\033[31m%s\033[0m" % string if COLOR else string
def execute(cmd):
"""Execute %cmd and return stdout. Exit in case of error."""
- pop = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
- (stdout, _) = pop.communicate() # wait until finished
- if pop.returncode != 0:
- sys.exit(stdout)
+ try:
+ stdout = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=False)
+ stdout = stdout.decode(errors='replace')
+ except subprocess.CalledProcessError as fail:
+ exit(fail)
return stdout
def find_commits(symbol, diff):
"""Find commits changing %symbol in the given range of %diff."""
- commits = execute("git log --pretty=oneline --abbrev-commit -G %s %s"
- % (symbol, diff))
+ commits = execute(["git", "log", "--pretty=oneline",
+ "--abbrev-commit", "-G",
+ symbol, diff])
return [x for x in commits.split("\n") if x]
def tree_is_dirty():
"""Return true if the current working tree is dirty (i.e., if any file has
been added, deleted, modified, renamed or copied but not committed)."""
- stdout = execute("git status --porcelain")
+ stdout = execute(["git", "status", "--porcelain"])
for line in stdout:
if re.findall(r"[URMADC]{1}", line[:2]):
return True
@@ -240,13 +251,13 @@ def tree_is_dirty():
def get_head():
"""Return commit hash of current HEAD."""
- stdout = execute("git rev-parse HEAD")
+ stdout = execute(["git", "rev-parse", "HEAD"])
return stdout.strip('\n')
def partition(lst, size):
"""Partition list @lst into eveni-sized lists of size @size."""
- return [lst[i::size] for i in xrange(size)]
+ return [lst[i::size] for i in range(size)]
def init_worker():
@@ -254,7 +265,7 @@ def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
-def find_sims(symbol, ignore, defined = []):
+def find_sims(symbol, ignore, defined=[]):
"""Return a list of max. ten Kconfig symbols that are string-similar to
@symbol."""
if defined:
@@ -279,7 +290,7 @@ def find_sims(symbol, ignore, defined = []):
def get_files():
"""Return a list of all files in the current git directory."""
# use 'git ls-files' to get the worklist
- stdout = execute("git ls-files")
+ stdout = execute(["git", "ls-files"])
if len(stdout) > 0 and stdout[-1] == "\n":
stdout = stdout[:-1]
@@ -311,8 +322,8 @@ def check_symbols_helper(pool, ignore):
check_symbols() in order to properly terminate running worker processes."""
source_files = []
kconfig_files = []
- defined_features = []
- referenced_features = dict() # {file: [features]}
+ defined_symbols = []
+ referenced_symbols = dict() # {file: [symbols]}
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
@@ -326,76 +337,75 @@ def check_symbols_helper(pool, ignore):
# parse source files
arglist = partition(source_files, cpu_count())
for res in pool.map(parse_source_files, arglist):
- referenced_features.update(res)
-
+ referenced_symbols.update(res)
# parse kconfig files
arglist = []
for part in partition(kconfig_files, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
- defined_features.extend(res[0])
- referenced_features.update(res[1])
- defined_features = set(defined_features)
+ defined_symbols.extend(res[0])
+ referenced_symbols.update(res[1])
+ defined_symbols = set(defined_symbols)
- # inverse mapping of referenced_features to dict(feature: [files])
+ # inverse mapping of referenced_symbols to dict(symbol: [files])
inv_map = dict()
- for _file, features in referenced_features.iteritems():
- for feature in features:
- inv_map[feature] = inv_map.get(feature, set())
- inv_map[feature].add(_file)
- referenced_features = inv_map
-
- undefined = {} # {feature: [files]}
- for feature in sorted(referenced_features):
+ for _file, symbols in referenced_symbols.items():
+ for symbol in symbols:
+ inv_map[symbol] = inv_map.get(symbol, set())
+ inv_map[symbol].add(_file)
+ referenced_symbols = inv_map
+
+ undefined = {} # {symbol: [files]}
+ for symbol in sorted(referenced_symbols):
# filter some false positives
- if feature == "FOO" or feature == "BAR" or \
- feature == "FOO_BAR" or feature == "XXX":
+ if symbol == "FOO" or symbol == "BAR" or \
+ symbol == "FOO_BAR" or symbol == "XXX":
continue
- if feature not in defined_features:
- if feature.endswith("_MODULE"):
+ if symbol not in defined_symbols:
+ if symbol.endswith("_MODULE"):
# avoid false positives for kernel modules
- if feature[:-len("_MODULE")] in defined_features:
+ if symbol[:-len("_MODULE")] in defined_symbols:
continue
- undefined[feature] = referenced_features.get(feature)
- return undefined, defined_features
+ undefined[symbol] = referenced_symbols.get(symbol)
+ return undefined, defined_symbols
def parse_source_files(source_files):
"""Parse each source file in @source_files and return dictionary with source
files as keys and lists of references Kconfig symbols as values."""
- referenced_features = dict()
+ referenced_symbols = dict()
for sfile in source_files:
- referenced_features[sfile] = parse_source_file(sfile)
- return referenced_features
+ referenced_symbols[sfile] = parse_source_file(sfile)
+ return referenced_symbols
def parse_source_file(sfile):
- """Parse @sfile and return a list of referenced Kconfig features."""
+ """Parse @sfile and return a list of referenced Kconfig symbols."""
lines = []
references = []
if not os.path.exists(sfile):
return references
- with open(sfile, "r") as stream:
+ with open(sfile, "r", encoding='utf-8', errors='replace') as stream:
lines = stream.readlines()
for line in lines:
- if not "CONFIG_" in line:
+ if "CONFIG_" not in line:
continue
- features = REGEX_SOURCE_FEATURE.findall(line)
- for feature in features:
- if not REGEX_FILTER_FEATURES.search(feature):
+ symbols = REGEX_SOURCE_SYMBOL.findall(line)
+ for symbol in symbols:
+ if not REGEX_FILTER_SYMBOLS.search(symbol):
continue
- references.append(feature)
+ references.append(symbol)
return references
-def get_features_in_line(line):
- """Return mentioned Kconfig features in @line."""
- return REGEX_FEATURE.findall(line)
+def get_symbols_in_line(line):
+ """Return mentioned Kconfig symbols in @line."""
+ return REGEX_SYMBOL.findall(line)
def parse_kconfig_files(args):
@@ -404,21 +414,21 @@ def parse_kconfig_files(args):
pattern."""
kconfig_files = args[0]
ignore = args[1]
- defined_features = []
- referenced_features = dict()
+ defined_symbols = []
+ referenced_symbols = dict()
for kfile in kconfig_files:
defined, references = parse_kconfig_file(kfile)
- defined_features.extend(defined)
+ defined_symbols.extend(defined)
if ignore and re.match(ignore, kfile):
# do not collect references for files that match the ignore pattern
continue
- referenced_features[kfile] = references
- return (defined_features, referenced_features)
+ referenced_symbols[kfile] = references
+ return (defined_symbols, referenced_symbols)
def parse_kconfig_file(kfile):
- """Parse @kfile and update feature definitions and references."""
+ """Parse @kfile and update symbol definitions and references."""
lines = []
defined = []
references = []
@@ -427,7 +437,7 @@ def parse_kconfig_file(kfile):
if not os.path.exists(kfile):
return defined, references
- with open(kfile, "r") as stream:
+ with open(kfile, "r", encoding='utf-8', errors='replace') as stream:
lines = stream.readlines()
for i in range(len(lines)):
@@ -436,8 +446,8 @@ def parse_kconfig_file(kfile):
line = line.split("#")[0] # ignore comments
if REGEX_KCONFIG_DEF.match(line):
- feature_def = REGEX_KCONFIG_DEF.findall(line)
- defined.append(feature_def[0])
+ symbol_def = REGEX_KCONFIG_DEF.findall(line)
+ defined.append(symbol_def[0])
skip = False
elif REGEX_KCONFIG_HELP.match(line):
skip = True
@@ -446,18 +456,18 @@ def parse_kconfig_file(kfile):
pass
elif REGEX_KCONFIG_STMT.match(line):
line = REGEX_QUOTES.sub("", line)
- features = get_features_in_line(line)
+ symbols = get_symbols_in_line(line)
# multi-line statements
while line.endswith("\\"):
i += 1
line = lines[i]
line = line.strip('\n')
- features.extend(get_features_in_line(line))
- for feature in set(features):
- if REGEX_NUMERIC.match(feature):
+ symbols.extend(get_symbols_in_line(line))
+ for symbol in set(symbols):
+ if REGEX_NUMERIC.match(symbol):
# ignore numeric values
continue
- references.append(feature)
+ references.append(symbol)
return defined, references
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 4f2e9049e8fa..93721f3c91bf 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -212,6 +212,7 @@ my $anon_struct_union = 0;
my $type_constant = '\%([-_\w]+)';
my $type_func = '(\w+)\(\)';
my $type_param = '\@(\w+)';
+my $type_fp_param = '\@(\w+)\(\)'; # Special RST handling for func ptr params
my $type_struct = '\&((struct\s*)*[_\w]+)';
my $type_struct_xml = '\\&amp;((struct\s*)*[_\w]+)';
my $type_env = '(\$\w+)';
@@ -292,6 +293,7 @@ my @highlights_rst = (
# Note: need to escape () to avoid func matching later
[$type_member_func, "\\:c\\:type\\:`\$1\$2\\\\(\\\\) <\$1>`"],
[$type_member, "\\:c\\:type\\:`\$1\$2 <\$1>`"],
+ [$type_fp_param, "**\$1\\\\(\\\\)**"],
[$type_func, "\\:c\\:func\\:`\$1()`"],
[$type_struct_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"],
[$type_enum_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"],
@@ -412,7 +414,7 @@ my $doc_com_body = '\s*\* ?';
my $doc_decl = $doc_com . '(\w+)';
# @params and a strictly limited set of supported section names
my $doc_sect = $doc_com .
- '\s*(\@\w+|description|context|returns?|notes?|examples?)\s*:(.*)';
+ '\s*(\@[.\w]+|\@\.\.\.|description|context|returns?|notes?|examples?)\s*:(.*)';
my $doc_content = $doc_com_body . '(.*)';
my $doc_block = $doc_com . 'DOC:\s*(.*)?';
my $doc_inline_start = '^\s*/\*\*\s*$';
@@ -1831,13 +1833,22 @@ sub output_function_rst(%) {
my %args = %{$_[0]};
my ($parameter, $section);
my $oldprefix = $lineprefix;
- my $start;
-
- print ".. c:function:: ";
+ my $start = "";
+
+ if ($args{'typedef'}) {
+ print ".. c:type:: ". $args{'function'} . "\n\n";
+ print_lineno($declaration_start_line);
+ print " **Typedef**: ";
+ $lineprefix = "";
+ output_highlight_rst($args{'purpose'});
+ $start = "\n\n**Syntax**\n\n ``";
+ } else {
+ print ".. c:function:: ";
+ }
if ($args{'functiontype'} ne "") {
- $start = $args{'functiontype'} . " " . $args{'function'} . " (";
+ $start .= $args{'functiontype'} . " " . $args{'function'} . " (";
} else {
- $start = $args{'function'} . " (";
+ $start .= $args{'function'} . " (";
}
print $start;
@@ -1849,9 +1860,6 @@ sub output_function_rst(%) {
$count++;
$type = $args{'parametertypes'}{$parameter};
- # RST doesn't like address_space tags at function prototypes
- $type =~ s/__(user|kernel|iomem|percpu|pmem|rcu)\s*//;
-
if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
# pointer-to-function
print $1 . $parameter . ") (" . $2;
@@ -1859,11 +1867,15 @@ sub output_function_rst(%) {
print $type . " " . $parameter;
}
}
- print ")\n\n";
- print_lineno($declaration_start_line);
- $lineprefix = " ";
- output_highlight_rst($args{'purpose'});
- print "\n";
+ if ($args{'typedef'}) {
+ print ");``\n\n";
+ } else {
+ print ")\n\n";
+ print_lineno($declaration_start_line);
+ $lineprefix = " ";
+ output_highlight_rst($args{'purpose'});
+ print "\n";
+ }
print "**Parameters**\n\n";
$lineprefix = " ";
@@ -2003,7 +2015,7 @@ sub output_struct_rst(%) {
($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
$type = $args{'parametertypes'}{$parameter};
print_lineno($parameterdesc_start_lines{$parameter_name});
- print "``$type $parameter``\n";
+ print "``" . $parameter . "``\n";
output_highlight_rst($args{'parameterdescs'}{$parameter_name});
print "\n";
}
@@ -2193,7 +2205,9 @@ sub dump_typedef($$) {
$x =~ s@/\*.*?\*/@@gos; # strip comments.
# Parse function prototypes
- if ($x =~ /typedef\s+(\w+)\s*\(\*\s*(\w\S+)\s*\)\s*\((.*)\);/) {
+ if ($x =~ /typedef\s+(\w+)\s*\(\*\s*(\w\S+)\s*\)\s*\((.*)\);/ ||
+ $x =~ /typedef\s+(\w+)\s*(\w\S+)\s*\s*\((.*)\);/) {
+
# Function typedefs
$return_type = $1;
$declaration_name = $2;
@@ -2204,6 +2218,7 @@ sub dump_typedef($$) {
output_declaration($declaration_name,
'function',
{'function' => $declaration_name,
+ 'typedef' => 1,
'module' => $modulename,
'functiontype' => $return_type,
'parameterlist' => \@parameterlist,
@@ -2338,6 +2353,7 @@ sub push_parameter($$$) {
if ($type eq "" && $param =~ /\.\.\.$/)
{
+ $param = "...";
if (!defined $parameterdescs{$param} || $parameterdescs{$param} eq "") {
$parameterdescs{$param} = "variable arguments";
}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 48958d3cec9e..bd8349759095 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -888,7 +888,7 @@ static void check_section(const char *modname, struct elf_info *elf,
#define DATA_SECTIONS ".data", ".data.rel"
#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \
- ".kprobes.text"
+ ".kprobes.text", ".cpuidle.text"
#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
".fixup", ".entry.text", ".exception.text", ".text.*", \
".coldtext"
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index a68f03133df9..5423a58d1b06 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -365,6 +365,7 @@ is_mcounted_section_name(char const *const txtname)
strcmp(".irqentry.text", txtname) == 0 ||
strcmp(".softirqentry.text", txtname) == 0 ||
strcmp(".kprobes.text", txtname) == 0 ||
+ strcmp(".cpuidle.text", txtname) == 0 ||
strcmp(".text.unlikely", txtname) == 0;
}
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 2d48011bc362..faac4b10d8ea 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -136,6 +136,7 @@ my %text_sections = (
".irqentry.text" => 1,
".softirqentry.text" => 1,
".kprobes.text" => 1,
+ ".cpuidle.text" => 1,
".text.unlikely" => 1,
);
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index fa79c6d2a5b8..163c720d3f2b 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -629,7 +629,6 @@ mispelt||misspelt
miximum||maximum
mmnemonic||mnemonic
mnay||many
-modeled||modelled
modulues||modules
monochorome||monochrome
monochromo||monochrome
diff --git a/scripts/tracing/ftrace-bisect.sh b/scripts/tracing/ftrace-bisect.sh
new file mode 100755
index 000000000000..9ff8ac5fc53c
--- /dev/null
+++ b/scripts/tracing/ftrace-bisect.sh
@@ -0,0 +1,115 @@
+#!/bin/bash
+#
+# Here's how to use this:
+#
+# This script is used to help find functions that are being traced by function
+# tracer or function graph tracing that causes the machine to reboot, hang, or
+# crash. Here's the steps to take.
+#
+# First, determine if function tracing is working with a single function:
+#
+# (note, if this is a problem with function_graph tracing, then simply
+# replace "function" with "function_graph" in the following steps).
+#
+# # cd /sys/kernel/debug/tracing
+# # echo schedule > set_ftrace_filter
+# # echo function > current_tracer
+#
+# If this works, then we know that something is being traced that shouldn't be.
+#
+# # echo nop > current_tracer
+#
+# # cat available_filter_functions > ~/full-file
+# # ftrace-bisect ~/full-file ~/test-file ~/non-test-file
+# # cat ~/test-file > set_ftrace_filter
+#
+# *** Note *** this will take several minutes. Setting multiple functions is
+# an O(n^2) operation, and we are dealing with thousands of functions. So go
+# have coffee, talk with your coworkers, read facebook. And eventually, this
+# operation will end.
+#
+# # echo function > current_tracer
+#
+# If it crashes, we know that ~/test-file has a bad function.
+#
+# Reboot back to test kernel.
+#
+# # cd /sys/kernel/debug/tracing
+# # mv ~/test-file ~/full-file
+#
+# If it didn't crash.
+#
+# # echo nop > current_tracer
+# # mv ~/non-test-file ~/full-file
+#
+# Get rid of the other test file from previous run (or save them off somewhere).
+# # rm -f ~/test-file ~/non-test-file
+#
+# And start again:
+#
+# # ftrace-bisect ~/full-file ~/test-file ~/non-test-file
+#
+# The good thing is, because this cuts the number of functions in ~/test-file
+# by half, the cat of it into set_ftrace_filter takes half as long each
+# iteration, so don't talk so much at the water cooler the second time.
+#
+# Eventually, if you did this correctly, you will get down to the problem
+# function, and all we need to do is to notrace it.
+#
+# The way to figure out if the problem function is bad, just do:
+#
+# # echo <problem-function> > set_ftrace_notrace
+# # echo > set_ftrace_filter
+# # echo function > current_tracer
+#
+# And if it doesn't crash, we are done.
+#
+# If it does crash, do this again (there's more than one problem function)
+# but you need to echo the problem function(s) into set_ftrace_notrace before
+# enabling function tracing in the above steps. Or if you can compile the
+# kernel, annotate the problem functions with "notrace" and start again.
+#
+
+
+if [ $# -ne 3 ]; then
+ echo 'usage: ftrace-bisect full-file test-file non-test-file'
+ exit
+fi
+
+full=$1
+test=$2
+nontest=$3
+
+x=`cat $full | wc -l`
+if [ $x -eq 1 ]; then
+ echo "There's only one function left, must be the bad one"
+ cat $full
+ exit 0
+fi
+
+let x=$x/2
+let y=$x+1
+
+if [ ! -f $full ]; then
+ echo "$full does not exist"
+ exit 1
+fi
+
+if [ -f $test ]; then
+ echo -n "$test exists, delete it? [y/N]"
+ read a
+ if [ "$a" != "y" -a "$a" != "Y" ]; then
+ exit 1
+ fi
+fi
+
+if [ -f $nontest ]; then
+ echo -n "$nontest exists, delete it? [y/N]"
+ read a
+ if [ "$a" != "y" -a "$a" != "Y" ]; then
+ exit 1
+ fi
+fi
+
+sed -ne "1,${x}p" $full > $test
+sed -ne "$y,\$p" $full > $nontest
diff --git a/scripts/ver_linux b/scripts/ver_linux
index 0d8bd29b1bd6..430b201f3e25 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -1,193 +1,89 @@
-#!/bin/sh
+#!/bin/awk -f
# Before running this script please ensure that your PATH is
# typical as you use for compilation/installation. I use
# /bin /sbin /usr/bin /usr/sbin /usr/local/bin, but it may
# differ on your system.
-#
-echo 'If some fields are empty or look unusual you may have an old version.'
-echo 'Compare to the current minimal requirements in Documentation/Changes.'
-echo ' '
-uname -a
-echo ' '
-
-gcc -dumpversion 2>&1 |
-awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("GNU C\t\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-make --version 2>&1 |
-awk '/GNU Make/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("GNU Make\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-ld -v 2>&1 |
-awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Binutils\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-mount --version 2>&1 |
-awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- $0 = substr($0,RSTART,RLENGTH)
- printf("Util-linux\t\t%s\nMount\t\t\t%s\n",$0,$0)
-}'
-
-depmod -V 2>&1 |
-awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Module-init-tools\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-tune2fs 2>&1 |
-awk '/^tune2fs/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("E2fsprogs\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-fsck.jfs -V 2>&1 |
-awk '/version/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Jfsutils\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-reiserfsck -V 2>&1 |
-awk '/^reiserfsck/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Reiserfsprogs\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-fsck.reiser4 -V 2>&1 | grep ^fsck.reiser4 | awk \
-'NR==1{print "reiser4progs ", $2}'
-
-xfs_db -V 2>&1 |
-awk '/version/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Xfsprogs\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-pccardctl -V 2>&1 |
-awk '/pcmciautils/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Pcmciautils\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-cardmgr -V 2>&1| grep version | awk \
-'NR==1{print "pcmcia-cs ", $3}'
-
-quota -V 2>&1 |
-awk '/version/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Quota-tools\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-pppd --version 2>&1 |
-awk '/version/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("PPP\t\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-isdnctrl 2>&1 | grep version | awk \
-'NR==1{print "isdn4k-utils ", $NF}'
-
-showmount --version 2>&1 | grep nfs-utils | awk \
-'NR==1{print "nfs-utils ", $NF}'
-
-test -r /proc/self/maps &&
-sed '
- /.*libc-\(.*\)\.so$/!d
- s//Linux C Library\t\t\1/
- q
-' /proc/self/maps
-
-ldd --version 2>&1 |
-awk '/^ldd/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Dynamic linker (ldd)\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-libcpp=`ldconfig -p 2>/dev/null |
- awk '/(libg|stdc)[+]+\.so/ {
- print $NF
- exit
+BEGIN {
+ usage = "If some fields are empty or look unusual you may have an old version.\n"
+ usage = usage "Compare to the current minimal requirements in Documentation/Changes.\n"
+ print usage
+
+ system("uname -a")
+ printf("\n")
+
+ printversion("GNU C", version("gcc -dumpversion 2>&1"))
+ printversion("GNU Make", version("make --version 2>&1"))
+ printversion("Binutils", version("ld -v 2>&1"))
+ printversion("Util-linux", version("mount --version 2>&1"))
+ printversion("Mount", version("mount --version 2>&1"))
+ printversion("Module-init-tools", version("depmod -V 2>&1"))
+ printversion("E2fsprogs", version("tune2fs 2>&1"))
+ printversion("Jfsutils", version("fsck.jfs -V 2>&1"))
+ printversion("Reiserfsprogs", version("reiserfsck -V 2>&1"))
+ printversion("Reiser4fsprogs", version("fsck.reiser4 -V 2>&1"))
+ printversion("Xfsprogs", version("xfs_db -V 2>&1"))
+ printversion("Pcmciautils", version("pccardctl -V 2>&1"))
+ printversion("Pcmcia-cs", version("cardmgr -V 2>&1"))
+ printversion("Quota-tools", version("quota -V 2>&1"))
+ printversion("PPP", version("pppd --version 2>&1"))
+ printversion("Isdn4k-utils", version("isdnctrl 2>&1"))
+ printversion("Nfs-utils", version("showmount --version 2>&1"))
+
+ if (system("test -r /proc/self/maps") == 0) {
+ while (getline <"/proc/self/maps" > 0) {
+ n = split($0, procmaps, "/")
+ if (/libc.*so$/ && match(procmaps[n], /[0-9]+([.]?[0-9]+)+/)) {
+ ver = substr(procmaps[n], RSTART, RLENGTH)
+ printversion("Linux C Library", ver)
+ break
+ }
+ }
}
-'`
-test -r "$libcpp" &&
-ls -l $libcpp |
-sed '
- s!.*so\.!!
- s!^!Linux C++ Library\t!
-'
-ps --version 2>&1 |
-awk '/version/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Procps\t\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-ifconfig --version 2>&1 |
-awk '/tools/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Net-tools\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-loadkeys -V 2>&1 |
-awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- $0 = substr($0,RSTART,RLENGTH)
- printf("Kbd\t\t\t%s\nConsole-tools\t\t%s\n",$0,$0)
-}'
-oprofiled --version 2>&1 | awk \
-'(NR==1 && ($2 == "oprofile")) {print "oprofile ", $3}'
+ printversion("Dynamic linker (ldd)", version("ldd --version 2>&1"))
-expr --v 2>&1 |
-awk '/^expr/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Sh-utils\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
-
-udevadm --version 2>&1 |
-awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Udev\t\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
+ while ("ldconfig -p 2>/dev/null" | getline > 0) {
+ if (/(libg|stdc)[+]+\.so/) {
+ libcpp = $NF
+ break
+ }
+ }
+ if (system("test -r " libcpp) == 0)
+ printversion("Linux C++ Library", version("readlink " libcpp))
+
+ printversion("Procps", version("ps --version 2>&1"))
+ printversion("Net-tools", version("ifconfig --version 2>&1"))
+ printversion("Kbd", version("loadkeys -V 2>&1"))
+ printversion("Console-tools", version("loadkeys -V 2>&1"))
+ printversion("Oprofile", version("oprofiled --version 2>&1"))
+ printversion("Sh-utils", version("expr --v 2>&1"))
+ printversion("Udev", version("udevadm --version 2>&1"))
+ printversion("Wireless-tools", version("iwconfig --version 2>&1"))
+
+ if (system("test -r /proc/modules") == 0) {
+ while ("sort /proc/modules" | getline > 0) {
+ mods = mods sep $1
+ sep = " "
+ }
+ printversion("Modules Loaded", mods)
+ }
+}
-iwconfig --version 2>&1 |
-awk '/version/{
- match($0, /[0-9]+([.]?[0-9]+)+/)
- printf("Wireless-tools\t\t%s\n",
- substr($0,RSTART,RLENGTH))
-}'
+function version(cmd, ver) {
+ while (cmd | getline > 0) {
+ if (!/ver_linux/ && match($0, /[0-9]+([.]?[0-9]+)+/)) {
+ ver = substr($0, RSTART, RLENGTH)
+ break
+ }
+ }
+ close(cmd)
+ return ver
+}
-test -e /proc/modules &&
-sort /proc/modules |
-sed '
- s/ .*//
- H
-${
- g
- s/^\n/Modules Loaded\t\t/
- y/\n/ /
- q
+function printversion(name, value, ofmt) {
+ if (value != "") {
+ ofmt = "%-20s\t%s\n"
+ printf(ofmt, name, value)
+ }
}
- d
-'
diff --git a/security/inode.c b/security/inode.c
index e3df905ab5b1..acc3e9c8d5a7 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -156,12 +156,11 @@ EXPORT_SYMBOL_GPL(securityfs_create_file);
* This function returns a pointer to a dentry if it succeeds. This
* pointer must be passed to the securityfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here). If an error occurs, %NULL will be returned.
+ * you are responsible here). If an error occurs, the function will return
+ * the error value (via ERR_PTR).
*
* If securityfs is not enabled in the kernel, the value %-ENODEV is
- * returned. It is not wise to check for this value, but rather, check for
- * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
- * code.
+ * returned.
*/
struct dentry *securityfs_create_dir(const char *name, struct dentry *parent)
{
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index cccbf3068cdc..37f04dadc8d6 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -99,7 +99,7 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
}
return ret;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
/**
* ipv6_skb_to_auditdata : fill auditdata from skb
* @skb : the skb
@@ -220,7 +220,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
*/
BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
- audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
+ audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm)));
switch (a->type) {
@@ -245,6 +245,19 @@ static void dump_common_audit_data(struct audit_buffer *ab,
}
break;
}
+ case LSM_AUDIT_DATA_FILE: {
+ struct inode *inode;
+
+ audit_log_d_path(ab, " path=", &a->u.file->f_path);
+
+ inode = file_inode(a->u.file);
+ if (inode) {
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ }
+ break;
+ }
case LSM_AUDIT_DATA_IOCTL_OP: {
struct inode *inode;
@@ -257,7 +270,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
audit_log_format(ab, " ino=%lu", inode->i_ino);
}
- audit_log_format(ab, " ioctlcmd=%hx", a->u.op->cmd);
+ audit_log_format(ab, " ioctlcmd=0x%hx", a->u.op->cmd);
break;
}
case LSM_AUDIT_DATA_DENTRY: {
@@ -294,7 +307,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
case LSM_AUDIT_DATA_TASK: {
struct task_struct *tsk = a->u.tsk;
if (tsk) {
- pid_t pid = task_pid_nr(tsk);
+ pid_t pid = task_tgid_nr(tsk);
if (pid) {
char comm[sizeof(tsk->comm)];
audit_log_format(ab, " opid=%d ocomm=", pid);
diff --git a/security/security.c b/security/security.c
index 4838e7fefa1f..f825304f04a7 100644
--- a/security/security.c
+++ b/security/security.c
@@ -364,6 +364,15 @@ int security_dentry_init_security(struct dentry *dentry, int mode,
}
EXPORT_SYMBOL(security_dentry_init_security);
+int security_dentry_create_files_as(struct dentry *dentry, int mode,
+ struct qstr *name,
+ const struct cred *old, struct cred *new)
+{
+ return call_int_hook(dentry_create_files_as, 0, dentry, mode,
+ name, old, new);
+}
+EXPORT_SYMBOL(security_dentry_create_files_as);
+
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
const initxattrs initxattrs, void *fs_data)
@@ -748,6 +757,18 @@ void security_inode_getsecid(struct inode *inode, u32 *secid)
call_void_hook(inode_getsecid, inode, secid);
}
+int security_inode_copy_up(struct dentry *src, struct cred **new)
+{
+ return call_int_hook(inode_copy_up, 0, src, new);
+}
+EXPORT_SYMBOL(security_inode_copy_up);
+
+int security_inode_copy_up_xattr(const char *name)
+{
+ return call_int_hook(inode_copy_up_xattr, -EOPNOTSUPP, name);
+}
+EXPORT_SYMBOL(security_inode_copy_up_xattr);
+
int security_file_permission(struct file *file, int mask)
{
int ret;
@@ -1623,6 +1644,8 @@ struct security_hook_heads security_hook_heads = {
LIST_HEAD_INIT(security_hook_heads.sb_parse_opts_str),
.dentry_init_security =
LIST_HEAD_INIT(security_hook_heads.dentry_init_security),
+ .dentry_create_files_as =
+ LIST_HEAD_INIT(security_hook_heads.dentry_create_files_as),
#ifdef CONFIG_SECURITY_PATH
.path_unlink = LIST_HEAD_INIT(security_hook_heads.path_unlink),
.path_mkdir = LIST_HEAD_INIT(security_hook_heads.path_mkdir),
@@ -1684,6 +1707,10 @@ struct security_hook_heads security_hook_heads = {
LIST_HEAD_INIT(security_hook_heads.inode_listsecurity),
.inode_getsecid =
LIST_HEAD_INIT(security_hook_heads.inode_getsecid),
+ .inode_copy_up =
+ LIST_HEAD_INIT(security_hook_heads.inode_copy_up),
+ .inode_copy_up_xattr =
+ LIST_HEAD_INIT(security_hook_heads.inode_copy_up_xattr),
.file_permission =
LIST_HEAD_INIT(security_hook_heads.file_permission),
.file_alloc_security =
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index 8691e92f27e5..ea7e3efbe0f7 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -93,41 +93,3 @@ config SECURITY_SELINUX_CHECKREQPROT_VALUE
via /selinux/checkreqprot if authorized by policy.
If you are unsure how to answer this question, answer 0.
-
-config SECURITY_SELINUX_POLICYDB_VERSION_MAX
- bool "NSA SELinux maximum supported policy format version"
- depends on SECURITY_SELINUX
- default n
- help
- This option enables the maximum policy format version supported
- by SELinux to be set to a particular value. This value is reported
- to userspace via /selinux/policyvers and used at policy load time.
- It can be adjusted downward to support legacy userland (init) that
- does not correctly handle kernels that support newer policy versions.
-
- Examples:
- For the Fedora Core 3 or 4 Linux distributions, enable this option
- and set the value via the next option. For Fedora Core 5 and later,
- do not enable this option.
-
- If you are unsure how to answer this question, answer N.
-
-config SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
- int "NSA SELinux maximum supported policy format version value"
- depends on SECURITY_SELINUX_POLICYDB_VERSION_MAX
- range 15 23
- default 19
- help
- This option sets the value for the maximum policy format version
- supported by SELinux.
-
- Examples:
- For Fedora Core 3, use 18.
- For Fedora Core 4, use 19.
-
- If you are unsure how to answer this question, look for the
- policy format version supported by your policy toolchain, by
- running 'checkpolicy -V'. Or look at what policy you have
- installed under /etc/selinux/$SELINUXTYPE/policy, where
- SELINUXTYPE is defined in your /etc/selinux/config.
-
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 13185a6c266a..2205ea27aa0a 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1761,8 +1761,8 @@ static inline int file_path_has_perm(const struct cred *cred,
{
struct common_audit_data ad;
- ad.type = LSM_AUDIT_DATA_PATH;
- ad.u.path = file->f_path;
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = file;
return inode_has_perm(cred, file_inode(file), av, &ad);
}
@@ -1784,8 +1784,8 @@ static int file_has_perm(const struct cred *cred,
u32 sid = cred_sid(cred);
int rc;
- ad.type = LSM_AUDIT_DATA_PATH;
- ad.u.path = file->f_path;
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = file;
if (sid != fsec->sid) {
rc = avc_has_perm(sid, fsec->sid,
@@ -1808,13 +1808,13 @@ out:
/*
* Determine the label for an inode that might be unioned.
*/
-static int selinux_determine_inode_label(struct inode *dir,
- const struct qstr *name,
- u16 tclass,
- u32 *_new_isid)
+static int
+selinux_determine_inode_label(const struct task_security_struct *tsec,
+ struct inode *dir,
+ const struct qstr *name, u16 tclass,
+ u32 *_new_isid)
{
const struct superblock_security_struct *sbsec = dir->i_sb->s_security;
- const struct task_security_struct *tsec = current_security();
if ((sbsec->flags & SE_SBINITIALIZED) &&
(sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) {
@@ -1857,8 +1857,8 @@ static int may_create(struct inode *dir,
if (rc)
return rc;
- rc = selinux_determine_inode_label(dir, &dentry->d_name, tclass,
- &newsid);
+ rc = selinux_determine_inode_label(current_security(), dir,
+ &dentry->d_name, tclass, &newsid);
if (rc)
return rc;
@@ -2365,8 +2365,8 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
new_tsec->sid = old_tsec->sid;
}
- ad.type = LSM_AUDIT_DATA_PATH;
- ad.u.path = bprm->file->f_path;
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = bprm->file;
if (new_tsec->sid == old_tsec->sid) {
rc = avc_has_perm(old_tsec->sid, isec->sid,
@@ -2838,7 +2838,8 @@ static int selinux_dentry_init_security(struct dentry *dentry, int mode,
u32 newsid;
int rc;
- rc = selinux_determine_inode_label(d_inode(dentry->d_parent), name,
+ rc = selinux_determine_inode_label(current_security(),
+ d_inode(dentry->d_parent), name,
inode_mode_to_security_class(mode),
&newsid);
if (rc)
@@ -2847,6 +2848,27 @@ static int selinux_dentry_init_security(struct dentry *dentry, int mode,
return security_sid_to_context(newsid, (char **)ctx, ctxlen);
}
+static int selinux_dentry_create_files_as(struct dentry *dentry, int mode,
+ struct qstr *name,
+ const struct cred *old,
+ struct cred *new)
+{
+ u32 newsid;
+ int rc;
+ struct task_security_struct *tsec;
+
+ rc = selinux_determine_inode_label(old->security,
+ d_inode(dentry->d_parent), name,
+ inode_mode_to_security_class(mode),
+ &newsid);
+ if (rc)
+ return rc;
+
+ tsec = new->security;
+ tsec->create_sid = newsid;
+ return 0;
+}
+
static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
const char **name,
@@ -2863,7 +2885,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
sid = tsec->sid;
newsid = tsec->create_sid;
- rc = selinux_determine_inode_label(
+ rc = selinux_determine_inode_label(current_security(),
dir, qstr,
inode_mode_to_security_class(inode->i_mode),
&newsid);
@@ -3293,6 +3315,41 @@ static void selinux_inode_getsecid(struct inode *inode, u32 *secid)
*secid = isec->sid;
}
+static int selinux_inode_copy_up(struct dentry *src, struct cred **new)
+{
+ u32 sid;
+ struct task_security_struct *tsec;
+ struct cred *new_creds = *new;
+
+ if (new_creds == NULL) {
+ new_creds = prepare_creds();
+ if (!new_creds)
+ return -ENOMEM;
+ }
+
+ tsec = new_creds->security;
+ /* Get label from overlay inode and set it in create_sid */
+ selinux_inode_getsecid(d_inode(src), &sid);
+ tsec->create_sid = sid;
+ *new = new_creds;
+ return 0;
+}
+
+static int selinux_inode_copy_up_xattr(const char *name)
+{
+ /* The copy_up hook above sets the initial context on an inode, but we
+ * don't then want to overwrite it by blindly copying all the lower
+ * xattrs up. Instead, we have to filter out SELinux-related xattrs.
+ */
+ if (strcmp(name, XATTR_NAME_SELINUX) == 0)
+ return 1; /* Discard */
+ /*
+ * Any other attribute apart from SELINUX is not claimed, supported
+ * by selinux.
+ */
+ return -EOPNOTSUPP;
+}
+
/* file security operations */
static int selinux_revalidate_file_permission(struct file *file, int mask)
@@ -3776,8 +3833,8 @@ static int selinux_kernel_module_from_file(struct file *file)
/* finit_module */
- ad.type = LSM_AUDIT_DATA_PATH;
- ad.u.path = file->f_path;
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = file;
fsec = file->f_security;
if (sid != fsec->sid) {
@@ -3984,7 +4041,7 @@ out:
return ret;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
/* Returns error only if unable to parse addresses */
static int selinux_parse_skb_ipv6(struct sk_buff *skb,
@@ -4075,7 +4132,7 @@ static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
&ad->u.net->v4info.daddr);
goto okay;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:
ret = selinux_parse_skb_ipv6(skb, ad, proto);
if (ret)
@@ -5029,7 +5086,7 @@ static unsigned int selinux_ipv4_forward(void *priv,
return selinux_ip_forward(skb, state->in, PF_INET);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static unsigned int selinux_ipv6_forward(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -5087,7 +5144,7 @@ static unsigned int selinux_ipv4_output(void *priv,
return selinux_ip_output(skb, PF_INET);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static unsigned int selinux_ipv6_output(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -5273,7 +5330,7 @@ static unsigned int selinux_ipv4_postroute(void *priv,
return selinux_ip_postroute(skb, state->out, PF_INET);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static unsigned int selinux_ipv6_postroute(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -6062,6 +6119,7 @@ static struct security_hook_list selinux_hooks[] = {
LSM_HOOK_INIT(sb_parse_opts_str, selinux_parse_opts_str),
LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security),
+ LSM_HOOK_INIT(dentry_create_files_as, selinux_dentry_create_files_as),
LSM_HOOK_INIT(inode_alloc_security, selinux_inode_alloc_security),
LSM_HOOK_INIT(inode_free_security, selinux_inode_free_security),
@@ -6088,6 +6146,8 @@ static struct security_hook_list selinux_hooks[] = {
LSM_HOOK_INIT(inode_setsecurity, selinux_inode_setsecurity),
LSM_HOOK_INIT(inode_listsecurity, selinux_inode_listsecurity),
LSM_HOOK_INIT(inode_getsecid, selinux_inode_getsecid),
+ LSM_HOOK_INIT(inode_copy_up, selinux_inode_copy_up),
+ LSM_HOOK_INIT(inode_copy_up_xattr, selinux_inode_copy_up_xattr),
LSM_HOOK_INIT(file_permission, selinux_file_permission),
LSM_HOOK_INIT(file_alloc_security, selinux_file_alloc_security),
@@ -6317,7 +6377,7 @@ static struct nf_hook_ops selinux_nf_ops[] = {
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_SELINUX_FIRST,
},
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
{
.hook = selinux_ipv6_postroute,
.pf = NFPROTO_IPV6,
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 38feb55d531a..308a286c6cbe 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -39,11 +39,7 @@
/* Range of policy versions we understand*/
#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
-#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
-#define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
-#else
#define POLICYDB_VERSION_MAX POLICYDB_VERSION_XPERMS_IOCTL
-#endif
/* Mask for just the mount related flags */
#define SE_MNTMASK 0x0f
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 456e1a9bcfde..34afeadd9e73 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -242,6 +242,8 @@ int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp)
goto err;
len = le32_to_cpu(buf[2]);
+ if (((len == 0) || (len == (u32)-1)))
+ goto err;
rc = -ENOMEM;
key = kmalloc(len + 1, GFP_KERNEL);
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 894b6cdc11c5..7d10e5d418bb 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -374,6 +374,9 @@ int ebitmap_read(struct ebitmap *e, void *fp)
goto ok;
}
+ if (e->highbit && !count)
+ goto bad;
+
for (i = 0; i < count; i++) {
rc = next_entry(&startbit, fp, sizeof(u32));
if (rc < 0) {
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 992a31530825..d719db4219cd 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -527,9 +527,9 @@ static int policydb_index(struct policydb *p)
printk(KERN_DEBUG "SELinux: %d users, %d roles, %d types, %d bools",
p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
if (p->mls_enabled)
- printk(", %d sens, %d cats", p->p_levels.nprim,
+ printk(KERN_CONT ", %d sens, %d cats", p->p_levels.nprim,
p->p_cats.nprim);
- printk("\n");
+ printk(KERN_CONT "\n");
printk(KERN_DEBUG "SELinux: %d classes, %d rules\n",
p->p_classes.nprim, p->te_avtab.nel);
@@ -541,21 +541,21 @@ static int policydb_index(struct policydb *p)
rc = -ENOMEM;
p->class_val_to_struct =
- kmalloc(p->p_classes.nprim * sizeof(*(p->class_val_to_struct)),
+ kzalloc(p->p_classes.nprim * sizeof(*(p->class_val_to_struct)),
GFP_KERNEL);
if (!p->class_val_to_struct)
goto out;
rc = -ENOMEM;
p->role_val_to_struct =
- kmalloc(p->p_roles.nprim * sizeof(*(p->role_val_to_struct)),
+ kzalloc(p->p_roles.nprim * sizeof(*(p->role_val_to_struct)),
GFP_KERNEL);
if (!p->role_val_to_struct)
goto out;
rc = -ENOMEM;
p->user_val_to_struct =
- kmalloc(p->p_users.nprim * sizeof(*(p->user_val_to_struct)),
+ kzalloc(p->p_users.nprim * sizeof(*(p->user_val_to_struct)),
GFP_KERNEL);
if (!p->user_val_to_struct)
goto out;
@@ -964,7 +964,7 @@ int policydb_context_isvalid(struct policydb *p, struct context *c)
* Role must be authorized for the type.
*/
role = p->role_val_to_struct[c->role - 1];
- if (!ebitmap_get_bit(&role->types, c->type - 1))
+ if (!role || !ebitmap_get_bit(&role->types, c->type - 1))
/* role may not be associated with type */
return 0;
@@ -1094,6 +1094,9 @@ static int str_read(char **strp, gfp_t flags, void *fp, u32 len)
int rc;
char *str;
+ if ((len == 0) || (len == (u32)-1))
+ return -EINVAL;
+
str = kmalloc(len + 1, flags);
if (!str)
return -ENOMEM;
@@ -2414,6 +2417,7 @@ int policydb_read(struct policydb *p, void *fp)
} else
tr->tclass = p->process_class;
+ rc = -EINVAL;
if (!policydb_role_isvalid(p, tr->role) ||
!policydb_type_isvalid(p, tr->type) ||
!policydb_class_isvalid(p, tr->tclass) ||
diff --git a/security/smack/Kconfig b/security/smack/Kconfig
index 271adae81796..923b120e0fa5 100644
--- a/security/smack/Kconfig
+++ b/security/smack/Kconfig
@@ -40,3 +40,15 @@ config SECURITY_SMACK_NETFILTER
This enables security marking of network packets using
Smack labels.
If you are unsure how to answer this question, answer N.
+
+config SECURITY_SMACK_APPEND_SIGNALS
+ bool "Treat delivering signals as an append operation"
+ depends on SECURITY_SMACK
+ default n
+ help
+ Sending a signal has been treated as a write operation to the
+ receiving process. If this option is selected, the delivery
+ will be an append operation instead. This makes it possible
+ to differentiate between delivering a network packet and
+ delivering a signal in the Smack rules.
+ If you are unsure how to answer this question, answer N.
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 26e58f1804b1..51fd30192c08 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -256,6 +256,16 @@ enum {
#define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */
#define MAY_BRINGUP 0x00004000 /* Report use of this rule */
+/*
+ * The policy for delivering signals is configurable.
+ * It is usually "write", but can be "append".
+ */
+#ifdef CONFIG_SECURITY_SMACK_APPEND_SIGNALS
+#define MAY_DELIVER MAY_APPEND /* Signal delivery requires append */
+#else
+#define MAY_DELIVER MAY_WRITE /* Signal delivery requires write */
+#endif
+
#define SMACK_BRINGUP_ALLOW 1 /* Allow bringup mode */
#define SMACK_UNCONFINED_SUBJECT 2 /* Allow unconfined label */
#define SMACK_UNCONFINED_OBJECT 3 /* Allow unconfined label */
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 87a9741b0d02..caec2256ab22 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1857,14 +1857,14 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
/* we don't log here as rc can be overriden */
skp = file->f_security;
- rc = smk_access(skp, tkp, MAY_WRITE, NULL);
- rc = smk_bu_note("sigiotask", skp, tkp, MAY_WRITE, rc);
+ rc = smk_access(skp, tkp, MAY_DELIVER, NULL);
+ rc = smk_bu_note("sigiotask", skp, tkp, MAY_DELIVER, rc);
if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE))
rc = 0;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, tsk);
- smack_log(skp->smk_known, tkp->smk_known, MAY_WRITE, rc, &ad);
+ smack_log(skp->smk_known, tkp->smk_known, MAY_DELIVER, rc, &ad);
return rc;
}
@@ -2265,8 +2265,8 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
* can write the receiver.
*/
if (secid == 0) {
- rc = smk_curacc(tkp, MAY_WRITE, &ad);
- rc = smk_bu_task(p, MAY_WRITE, rc);
+ rc = smk_curacc(tkp, MAY_DELIVER, &ad);
+ rc = smk_bu_task(p, MAY_DELIVER, rc);
return rc;
}
/*
@@ -2275,8 +2275,8 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
* we can't take privilege into account.
*/
skp = smack_from_secid(secid);
- rc = smk_access(skp, tkp, MAY_WRITE, &ad);
- rc = smk_bu_note("USB signal", skp, tkp, MAY_WRITE, rc);
+ rc = smk_access(skp, tkp, MAY_DELIVER, &ad);
+ rc = smk_bu_note("USB signal", skp, tkp, MAY_DELIVER, rc);
return rc;
}
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
index aa6bf1b22ec5..205b785fb400 100644
--- a/security/smack/smack_netfilter.c
+++ b/security/smack/smack_netfilter.c
@@ -20,7 +20,7 @@
#include <net/inet_sock.h>
#include "smack.h"
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static unsigned int smack_ipv6_output(void *priv,
struct sk_buff *skb,
@@ -64,7 +64,7 @@ static struct nf_hook_ops smack_nf_ops[] = {
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_SELINUX_FIRST,
},
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
{
.hook = smack_ipv6_output,
.pf = NFPROTO_IPV6,
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index e249a66db533..6492fe96cae4 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -2523,14 +2523,9 @@ static ssize_t smk_write_revoke_subj(struct file *file, const char __user *buf,
if (count == 0 || count > SMK_LONGLABEL)
return -EINVAL;
- data = kzalloc(count, GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- if (copy_from_user(data, buf, count) != 0) {
- rc = -EFAULT;
- goto out_data;
- }
+ data = memdup_user(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
cp = smk_parse_smack(data, count);
if (IS_ERR(cp)) {
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index f34153962d07..71960089e207 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -118,7 +118,7 @@ static void get_irq(struct device_node * np, int *irqptr)
if (np)
*irqptr = irq_of_parse_and_map(np, 0);
else
- *irqptr = NO_IRQ;
+ *irqptr = 0;
}
/* 0x4 is outenable, 0x1 is out, thus 4 or 5 */
@@ -336,7 +336,7 @@ static int ftr_set_notify(struct gpio_runtime *rt,
return -EINVAL;
}
- if (irq == NO_IRQ)
+ if (!irq)
return -ENODEV;
mutex_lock(&notif->mutex);
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c
index 8f71f7e4d966..a0c4a5de809c 100644
--- a/sound/aoa/fabrics/layout.c
+++ b/sound/aoa/fabrics/layout.c
@@ -112,6 +112,7 @@ MODULE_ALIAS("sound-layout-100");
MODULE_ALIAS("aoa-device-id-14");
MODULE_ALIAS("aoa-device-id-22");
+MODULE_ALIAS("aoa-device-id-31");
MODULE_ALIAS("aoa-device-id-35");
MODULE_ALIAS("aoa-device-id-44");
@@ -362,6 +363,13 @@ static struct layout layouts[] = {
.connections = tas_connections_nolineout,
},
},
+ /* PowerBook6,1 */
+ { .device_id = 31,
+ .codecs[0] = {
+ .name = "tas",
+ .connections = tas_connections_nolineout,
+ },
+ },
/* PowerBook6,5 */
{ .device_id = 44,
.codecs[0] = {
@@ -1161,12 +1169,7 @@ static struct soundbus_driver aoa_soundbus_driver = {
static int __init aoa_fabric_layout_init(void)
{
- int err;
-
- err = soundbus_register_driver(&aoa_soundbus_driver);
- if (err)
- return err;
- return 0;
+ return soundbus_register_driver(&aoa_soundbus_driver);
}
static void __exit aoa_fabric_layout_exit(void)
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index 1cbf210080a1..000b58522106 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -197,7 +197,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
* so restrict to those we do handle for now.
*/
if (id && (*id == 22 || *id == 14 || *id == 35 ||
- *id == 44)) {
+ *id == 31 || *id == 44)) {
snprintf(dev->sound.modalias, 32,
"aoa-device-id-%d", *id);
ok = 1;
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 2c498488af6c..fec1dfdb14ad 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -553,13 +553,9 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
* we should allow parameter change only when stream has been
* opened not in other cases
*/
- params = kmalloc(sizeof(*params), GFP_KERNEL);
- if (!params)
- return -ENOMEM;
- if (copy_from_user(params, (void __user *)arg, sizeof(*params))) {
- retval = -EFAULT;
- goto out;
- }
+ params = memdup_user((void __user *)arg, sizeof(*params));
+ if (IS_ERR(params))
+ return PTR_ERR(params);
retval = snd_compress_check_input(params);
if (retval)
@@ -784,7 +780,7 @@ static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
ret = wait_event_interruptible(stream->runtime->sleep,
(stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
if (ret == -ERESTARTSYS)
- pr_debug("wait aborted by a signal");
+ pr_debug("wait aborted by a signal\n");
else if (ret)
pr_debug("wait for drain failed with %d\n", ret);
@@ -966,7 +962,7 @@ static int snd_compress_dev_register(struct snd_device *device)
compr->card, compr->device,
&snd_compr_file_ops, compr, &compr->dev);
if (ret < 0) {
- pr_err("snd_register_device failed\n %d", ret);
+ pr_err("snd_register_device failed %d\n", ret);
return ret;
}
return ret;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index c61fd50f771f..9d33c1e85c79 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2637,9 +2637,11 @@ static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
break;
/* Fall through */
case SNDRV_PCM_STATE_PREPARED:
- case SNDRV_PCM_STATE_SUSPENDED:
err = 0;
break;
+ case SNDRV_PCM_STATE_SUSPENDED:
+ err = -ESTRPIPE;
+ break;
case SNDRV_PCM_STATE_XRUN:
err = -EPIPE;
break;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index d6d9419d8bac..4c935202ce23 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -87,21 +87,6 @@ static int snd_seq_deliver_single_event(struct snd_seq_client *client,
/*
*/
-
-static inline mm_segment_t snd_enter_user(void)
-{
- mm_segment_t fs = get_fs();
- set_fs(get_ds());
- return fs;
-}
-
-static inline void snd_leave_user(mm_segment_t fs)
-{
- set_fs(fs);
-}
-
-/*
- */
static inline unsigned short snd_seq_file_flags(struct file *file)
{
switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) {
@@ -1128,59 +1113,69 @@ static unsigned int snd_seq_poll(struct file *file, poll_table * wait)
/*-----------------------------------------------------*/
+static int snd_seq_ioctl_pversion(struct snd_seq_client *client, void *arg)
+{
+ int *pversion = arg;
+
+ *pversion = SNDRV_SEQ_VERSION;
+ return 0;
+}
+
+static int snd_seq_ioctl_client_id(struct snd_seq_client *client, void *arg)
+{
+ int *client_id = arg;
+
+ *client_id = client->number;
+ return 0;
+}
/* SYSTEM_INFO ioctl() */
-static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void __user *arg)
+static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void *arg)
{
- struct snd_seq_system_info info;
+ struct snd_seq_system_info *info = arg;
- memset(&info, 0, sizeof(info));
+ memset(info, 0, sizeof(*info));
/* fill the info fields */
- info.queues = SNDRV_SEQ_MAX_QUEUES;
- info.clients = SNDRV_SEQ_MAX_CLIENTS;
- info.ports = SNDRV_SEQ_MAX_PORTS;
- info.channels = 256; /* fixed limit */
- info.cur_clients = client_usage.cur;
- info.cur_queues = snd_seq_queue_get_cur_queues();
-
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
+ info->queues = SNDRV_SEQ_MAX_QUEUES;
+ info->clients = SNDRV_SEQ_MAX_CLIENTS;
+ info->ports = SNDRV_SEQ_MAX_PORTS;
+ info->channels = 256; /* fixed limit */
+ info->cur_clients = client_usage.cur;
+ info->cur_queues = snd_seq_queue_get_cur_queues();
+
return 0;
}
/* RUNNING_MODE ioctl() */
-static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void __user *arg)
+static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void *arg)
{
- struct snd_seq_running_info info;
+ struct snd_seq_running_info *info = arg;
struct snd_seq_client *cptr;
int err = 0;
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
-
/* requested client number */
- cptr = snd_seq_client_use_ptr(info.client);
+ cptr = snd_seq_client_use_ptr(info->client);
if (cptr == NULL)
return -ENOENT; /* don't change !!! */
#ifdef SNDRV_BIG_ENDIAN
- if (! info.big_endian) {
+ if (!info->big_endian) {
err = -EINVAL;
goto __err;
}
#else
- if (info.big_endian) {
+ if (info->big_endian) {
err = -EINVAL;
goto __err;
}
#endif
- if (info.cpu_mode > sizeof(long)) {
+ if (info->cpu_mode > sizeof(long)) {
err = -EINVAL;
goto __err;
}
- cptr->convert32 = (info.cpu_mode < sizeof(long));
+ cptr->convert32 = (info->cpu_mode < sizeof(long));
__err:
snd_seq_client_unlock(cptr);
return err;
@@ -1214,51 +1209,43 @@ static void get_client_info(struct snd_seq_client *cptr,
}
static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
+ struct snd_seq_client_info *client_info = arg;
struct snd_seq_client *cptr;
- struct snd_seq_client_info client_info;
-
- if (copy_from_user(&client_info, arg, sizeof(client_info)))
- return -EFAULT;
/* requested client number */
- cptr = snd_seq_client_use_ptr(client_info.client);
+ cptr = snd_seq_client_use_ptr(client_info->client);
if (cptr == NULL)
return -ENOENT; /* don't change !!! */
- get_client_info(cptr, &client_info);
+ get_client_info(cptr, client_info);
snd_seq_client_unlock(cptr);
- if (copy_to_user(arg, &client_info, sizeof(client_info)))
- return -EFAULT;
return 0;
}
/* CLIENT_INFO ioctl() */
static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
- struct snd_seq_client_info client_info;
-
- if (copy_from_user(&client_info, arg, sizeof(client_info)))
- return -EFAULT;
+ struct snd_seq_client_info *client_info = arg;
/* it is not allowed to set the info fields for an another client */
- if (client->number != client_info.client)
+ if (client->number != client_info->client)
return -EPERM;
/* also client type must be set now */
- if (client->type != client_info.type)
+ if (client->type != client_info->type)
return -EINVAL;
/* fill the info fields */
- if (client_info.name[0])
- strlcpy(client->name, client_info.name, sizeof(client->name));
+ if (client_info->name[0])
+ strlcpy(client->name, client_info->name, sizeof(client->name));
- client->filter = client_info.filter;
- client->event_lost = client_info.event_lost;
- memcpy(client->event_filter, client_info.event_filter, 32);
+ client->filter = client_info->filter;
+ client->event_lost = client_info->event_lost;
+ memcpy(client->event_filter, client_info->event_filter, 32);
return 0;
}
@@ -1267,30 +1254,26 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
/*
* CREATE PORT ioctl()
*/
-static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
- void __user *arg)
+static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
{
+ struct snd_seq_port_info *info = arg;
struct snd_seq_client_port *port;
- struct snd_seq_port_info info;
struct snd_seq_port_callback *callback;
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
-
/* it is not allowed to create the port for an another client */
- if (info.addr.client != client->number)
+ if (info->addr.client != client->number)
return -EPERM;
- port = snd_seq_create_port(client, (info.flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT) ? info.addr.port : -1);
+ port = snd_seq_create_port(client, (info->flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT) ? info->addr.port : -1);
if (port == NULL)
return -ENOMEM;
- if (client->type == USER_CLIENT && info.kernel) {
+ if (client->type == USER_CLIENT && info->kernel) {
snd_seq_delete_port(client, port->addr.port);
return -EINVAL;
}
if (client->type == KERNEL_CLIENT) {
- if ((callback = info.kernel) != NULL) {
+ if ((callback = info->kernel) != NULL) {
if (callback->owner)
port->owner = callback->owner;
port->private_data = callback->private_data;
@@ -1303,37 +1286,29 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
}
}
- info.addr = port->addr;
+ info->addr = port->addr;
- snd_seq_set_port_info(port, &info);
+ snd_seq_set_port_info(port, info);
snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
-
return 0;
}
/*
* DELETE PORT ioctl()
*/
-static int snd_seq_ioctl_delete_port(struct snd_seq_client *client,
- void __user *arg)
+static int snd_seq_ioctl_delete_port(struct snd_seq_client *client, void *arg)
{
- struct snd_seq_port_info info;
+ struct snd_seq_port_info *info = arg;
int err;
- /* set passed parameters */
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
-
/* it is not allowed to remove the port for an another client */
- if (info.addr.client != client->number)
+ if (info->addr.client != client->number)
return -EPERM;
- err = snd_seq_delete_port(client, info.addr.port);
+ err = snd_seq_delete_port(client, info->addr.port);
if (err >= 0)
- snd_seq_system_client_ev_port_exit(client->number, info.addr.port);
+ snd_seq_system_client_ev_port_exit(client->number, info->addr.port);
return err;
}
@@ -1341,32 +1316,27 @@ static int snd_seq_ioctl_delete_port(struct snd_seq_client *client,
/*
* GET_PORT_INFO ioctl() (on any client)
*/
-static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client,
- void __user *arg)
+static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client, void *arg)
{
+ struct snd_seq_port_info *info = arg;
struct snd_seq_client *cptr;
struct snd_seq_client_port *port;
- struct snd_seq_port_info info;
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
- cptr = snd_seq_client_use_ptr(info.addr.client);
+ cptr = snd_seq_client_use_ptr(info->addr.client);
if (cptr == NULL)
return -ENXIO;
- port = snd_seq_port_use_ptr(cptr, info.addr.port);
+ port = snd_seq_port_use_ptr(cptr, info->addr.port);
if (port == NULL) {
snd_seq_client_unlock(cptr);
return -ENOENT; /* don't change */
}
/* get port info */
- snd_seq_get_port_info(port, &info);
+ snd_seq_get_port_info(port, info);
snd_seq_port_unlock(port);
snd_seq_client_unlock(cptr);
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
return 0;
}
@@ -1374,20 +1344,16 @@ static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client,
/*
* SET_PORT_INFO ioctl() (only ports on this/own client)
*/
-static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client,
- void __user *arg)
+static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client, void *arg)
{
+ struct snd_seq_port_info *info = arg;
struct snd_seq_client_port *port;
- struct snd_seq_port_info info;
-
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
- if (info.addr.client != client->number) /* only set our own ports ! */
+ if (info->addr.client != client->number) /* only set our own ports ! */
return -EPERM;
- port = snd_seq_port_use_ptr(client, info.addr.port);
+ port = snd_seq_port_use_ptr(client, info->addr.port);
if (port) {
- snd_seq_set_port_info(port, &info);
+ snd_seq_set_port_info(port, info);
snd_seq_port_unlock(port);
}
return 0;
@@ -1453,34 +1419,31 @@ int snd_seq_client_notify_subscription(int client, int port,
* add to port's subscription list IOCTL interface
*/
static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
+ struct snd_seq_port_subscribe *subs = arg;
int result = -EINVAL;
struct snd_seq_client *receiver = NULL, *sender = NULL;
struct snd_seq_client_port *sport = NULL, *dport = NULL;
- struct snd_seq_port_subscribe subs;
-
- if (copy_from_user(&subs, arg, sizeof(subs)))
- return -EFAULT;
- if ((receiver = snd_seq_client_use_ptr(subs.dest.client)) == NULL)
+ if ((receiver = snd_seq_client_use_ptr(subs->dest.client)) == NULL)
goto __end;
- if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
+ if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
goto __end;
- if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
+ if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
goto __end;
- if ((dport = snd_seq_port_use_ptr(receiver, subs.dest.port)) == NULL)
+ if ((dport = snd_seq_port_use_ptr(receiver, subs->dest.port)) == NULL)
goto __end;
- result = check_subscription_permission(client, sport, dport, &subs);
+ result = check_subscription_permission(client, sport, dport, subs);
if (result < 0)
goto __end;
/* connect them */
- result = snd_seq_port_connect(client, sender, sport, receiver, dport, &subs);
+ result = snd_seq_port_connect(client, sender, sport, receiver, dport, subs);
if (! result) /* broadcast announce */
snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
- &subs, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED);
+ subs, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED);
__end:
if (sport)
snd_seq_port_unlock(sport);
@@ -1498,33 +1461,30 @@ static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
* remove from port's subscription list
*/
static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
+ struct snd_seq_port_subscribe *subs = arg;
int result = -ENXIO;
struct snd_seq_client *receiver = NULL, *sender = NULL;
struct snd_seq_client_port *sport = NULL, *dport = NULL;
- struct snd_seq_port_subscribe subs;
-
- if (copy_from_user(&subs, arg, sizeof(subs)))
- return -EFAULT;
- if ((receiver = snd_seq_client_use_ptr(subs.dest.client)) == NULL)
+ if ((receiver = snd_seq_client_use_ptr(subs->dest.client)) == NULL)
goto __end;
- if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
+ if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
goto __end;
- if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
+ if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
goto __end;
- if ((dport = snd_seq_port_use_ptr(receiver, subs.dest.port)) == NULL)
+ if ((dport = snd_seq_port_use_ptr(receiver, subs->dest.port)) == NULL)
goto __end;
- result = check_subscription_permission(client, sport, dport, &subs);
+ result = check_subscription_permission(client, sport, dport, subs);
if (result < 0)
goto __end;
- result = snd_seq_port_disconnect(client, sender, sport, receiver, dport, &subs);
+ result = snd_seq_port_disconnect(client, sender, sport, receiver, dport, subs);
if (! result) /* broadcast announce */
snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
- &subs, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED);
+ subs, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED);
__end:
if (sport)
snd_seq_port_unlock(sport);
@@ -1539,17 +1499,13 @@ static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
/* CREATE_QUEUE ioctl() */
-static int snd_seq_ioctl_create_queue(struct snd_seq_client *client,
- void __user *arg)
+static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
{
- struct snd_seq_queue_info info;
+ struct snd_seq_queue_info *info = arg;
int result;
struct snd_seq_queue *q;
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
-
- result = snd_seq_queue_alloc(client->number, info.locked, info.flags);
+ result = snd_seq_queue_alloc(client->number, info->locked, info->flags);
if (result < 0)
return result;
@@ -1557,181 +1513,150 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client,
if (q == NULL)
return -EINVAL;
- info.queue = q->queue;
- info.locked = q->locked;
- info.owner = q->owner;
+ info->queue = q->queue;
+ info->locked = q->locked;
+ info->owner = q->owner;
/* set queue name */
- if (! info.name[0])
- snprintf(info.name, sizeof(info.name), "Queue-%d", q->queue);
- strlcpy(q->name, info.name, sizeof(q->name));
+ if (!info->name[0])
+ snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
+ strlcpy(q->name, info->name, sizeof(q->name));
queuefree(q);
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
-
return 0;
}
/* DELETE_QUEUE ioctl() */
-static int snd_seq_ioctl_delete_queue(struct snd_seq_client *client,
- void __user *arg)
+static int snd_seq_ioctl_delete_queue(struct snd_seq_client *client, void *arg)
{
- struct snd_seq_queue_info info;
-
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
+ struct snd_seq_queue_info *info = arg;
- return snd_seq_queue_delete(client->number, info.queue);
+ return snd_seq_queue_delete(client->number, info->queue);
}
/* GET_QUEUE_INFO ioctl() */
static int snd_seq_ioctl_get_queue_info(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
- struct snd_seq_queue_info info;
+ struct snd_seq_queue_info *info = arg;
struct snd_seq_queue *q;
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
-
- q = queueptr(info.queue);
+ q = queueptr(info->queue);
if (q == NULL)
return -EINVAL;
- memset(&info, 0, sizeof(info));
- info.queue = q->queue;
- info.owner = q->owner;
- info.locked = q->locked;
- strlcpy(info.name, q->name, sizeof(info.name));
+ memset(info, 0, sizeof(*info));
+ info->queue = q->queue;
+ info->owner = q->owner;
+ info->locked = q->locked;
+ strlcpy(info->name, q->name, sizeof(info->name));
queuefree(q);
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
-
return 0;
}
/* SET_QUEUE_INFO ioctl() */
static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
- struct snd_seq_queue_info info;
+ struct snd_seq_queue_info *info = arg;
struct snd_seq_queue *q;
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
-
- if (info.owner != client->number)
+ if (info->owner != client->number)
return -EINVAL;
/* change owner/locked permission */
- if (snd_seq_queue_check_access(info.queue, client->number)) {
- if (snd_seq_queue_set_owner(info.queue, client->number, info.locked) < 0)
+ if (snd_seq_queue_check_access(info->queue, client->number)) {
+ if (snd_seq_queue_set_owner(info->queue, client->number, info->locked) < 0)
return -EPERM;
- if (info.locked)
- snd_seq_queue_use(info.queue, client->number, 1);
+ if (info->locked)
+ snd_seq_queue_use(info->queue, client->number, 1);
} else {
return -EPERM;
}
- q = queueptr(info.queue);
+ q = queueptr(info->queue);
if (! q)
return -EINVAL;
if (q->owner != client->number) {
queuefree(q);
return -EPERM;
}
- strlcpy(q->name, info.name, sizeof(q->name));
+ strlcpy(q->name, info->name, sizeof(q->name));
queuefree(q);
return 0;
}
/* GET_NAMED_QUEUE ioctl() */
-static int snd_seq_ioctl_get_named_queue(struct snd_seq_client *client, void __user *arg)
+static int snd_seq_ioctl_get_named_queue(struct snd_seq_client *client,
+ void *arg)
{
- struct snd_seq_queue_info info;
+ struct snd_seq_queue_info *info = arg;
struct snd_seq_queue *q;
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
-
- q = snd_seq_queue_find_name(info.name);
+ q = snd_seq_queue_find_name(info->name);
if (q == NULL)
return -EINVAL;
- info.queue = q->queue;
- info.owner = q->owner;
- info.locked = q->locked;
+ info->queue = q->queue;
+ info->owner = q->owner;
+ info->locked = q->locked;
queuefree(q);
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
-
return 0;
}
/* GET_QUEUE_STATUS ioctl() */
static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
- struct snd_seq_queue_status status;
+ struct snd_seq_queue_status *status = arg;
struct snd_seq_queue *queue;
struct snd_seq_timer *tmr;
- if (copy_from_user(&status, arg, sizeof(status)))
- return -EFAULT;
-
- queue = queueptr(status.queue);
+ queue = queueptr(status->queue);
if (queue == NULL)
return -EINVAL;
- memset(&status, 0, sizeof(status));
- status.queue = queue->queue;
+ memset(status, 0, sizeof(*status));
+ status->queue = queue->queue;
tmr = queue->timer;
- status.events = queue->tickq->cells + queue->timeq->cells;
+ status->events = queue->tickq->cells + queue->timeq->cells;
- status.time = snd_seq_timer_get_cur_time(tmr);
- status.tick = snd_seq_timer_get_cur_tick(tmr);
+ status->time = snd_seq_timer_get_cur_time(tmr);
+ status->tick = snd_seq_timer_get_cur_tick(tmr);
- status.running = tmr->running;
+ status->running = tmr->running;
- status.flags = queue->flags;
+ status->flags = queue->flags;
queuefree(queue);
- if (copy_to_user(arg, &status, sizeof(status)))
- return -EFAULT;
return 0;
}
/* GET_QUEUE_TEMPO ioctl() */
static int snd_seq_ioctl_get_queue_tempo(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
- struct snd_seq_queue_tempo tempo;
+ struct snd_seq_queue_tempo *tempo = arg;
struct snd_seq_queue *queue;
struct snd_seq_timer *tmr;
- if (copy_from_user(&tempo, arg, sizeof(tempo)))
- return -EFAULT;
-
- queue = queueptr(tempo.queue);
+ queue = queueptr(tempo->queue);
if (queue == NULL)
return -EINVAL;
- memset(&tempo, 0, sizeof(tempo));
- tempo.queue = queue->queue;
+ memset(tempo, 0, sizeof(*tempo));
+ tempo->queue = queue->queue;
tmr = queue->timer;
- tempo.tempo = tmr->tempo;
- tempo.ppq = tmr->ppq;
- tempo.skew_value = tmr->skew;
- tempo.skew_base = tmr->skew_base;
+ tempo->tempo = tmr->tempo;
+ tempo->ppq = tmr->ppq;
+ tempo->skew_value = tmr->skew;
+ tempo->skew_base = tmr->skew_base;
queuefree(queue);
- if (copy_to_user(arg, &tempo, sizeof(tempo)))
- return -EFAULT;
return 0;
}
@@ -1747,31 +1672,25 @@ int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo)
EXPORT_SYMBOL(snd_seq_set_queue_tempo);
static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
+ struct snd_seq_queue_tempo *tempo = arg;
int result;
- struct snd_seq_queue_tempo tempo;
-
- if (copy_from_user(&tempo, arg, sizeof(tempo)))
- return -EFAULT;
- result = snd_seq_set_queue_tempo(client->number, &tempo);
+ result = snd_seq_set_queue_tempo(client->number, tempo);
return result < 0 ? result : 0;
}
/* GET_QUEUE_TIMER ioctl() */
static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
- struct snd_seq_queue_timer timer;
+ struct snd_seq_queue_timer *timer = arg;
struct snd_seq_queue *queue;
struct snd_seq_timer *tmr;
- if (copy_from_user(&timer, arg, sizeof(timer)))
- return -EFAULT;
-
- queue = queueptr(timer.queue);
+ queue = queueptr(timer->queue);
if (queue == NULL)
return -EINVAL;
@@ -1780,41 +1699,36 @@ static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client,
return -ERESTARTSYS;
}
tmr = queue->timer;
- memset(&timer, 0, sizeof(timer));
- timer.queue = queue->queue;
+ memset(timer, 0, sizeof(*timer));
+ timer->queue = queue->queue;
- timer.type = tmr->type;
+ timer->type = tmr->type;
if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
- timer.u.alsa.id = tmr->alsa_id;
- timer.u.alsa.resolution = tmr->preferred_resolution;
+ timer->u.alsa.id = tmr->alsa_id;
+ timer->u.alsa.resolution = tmr->preferred_resolution;
}
mutex_unlock(&queue->timer_mutex);
queuefree(queue);
- if (copy_to_user(arg, &timer, sizeof(timer)))
- return -EFAULT;
return 0;
}
/* SET_QUEUE_TIMER ioctl() */
static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
+ struct snd_seq_queue_timer *timer = arg;
int result = 0;
- struct snd_seq_queue_timer timer;
-
- if (copy_from_user(&timer, arg, sizeof(timer)))
- return -EFAULT;
- if (timer.type != SNDRV_SEQ_TIMER_ALSA)
+ if (timer->type != SNDRV_SEQ_TIMER_ALSA)
return -EINVAL;
- if (snd_seq_queue_check_access(timer.queue, client->number)) {
+ if (snd_seq_queue_check_access(timer->queue, client->number)) {
struct snd_seq_queue *q;
struct snd_seq_timer *tmr;
- q = queueptr(timer.queue);
+ q = queueptr(timer->queue);
if (q == NULL)
return -ENXIO;
if (mutex_lock_interruptible(&q->timer_mutex)) {
@@ -1822,13 +1736,13 @@ static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client,
return -ERESTARTSYS;
}
tmr = q->timer;
- snd_seq_queue_timer_close(timer.queue);
- tmr->type = timer.type;
+ snd_seq_queue_timer_close(timer->queue);
+ tmr->type = timer->type;
if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
- tmr->alsa_id = timer.u.alsa.id;
- tmr->preferred_resolution = timer.u.alsa.resolution;
+ tmr->alsa_id = timer->u.alsa.id;
+ tmr->preferred_resolution = timer->u.alsa.resolution;
}
- result = snd_seq_queue_timer_open(timer.queue);
+ result = snd_seq_queue_timer_open(timer->queue);
mutex_unlock(&q->timer_mutex);
queuefree(q);
} else {
@@ -1841,38 +1755,30 @@ static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client,
/* GET_QUEUE_CLIENT ioctl() */
static int snd_seq_ioctl_get_queue_client(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
- struct snd_seq_queue_client info;
+ struct snd_seq_queue_client *info = arg;
int used;
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
-
- used = snd_seq_queue_is_used(info.queue, client->number);
+ used = snd_seq_queue_is_used(info->queue, client->number);
if (used < 0)
return -EINVAL;
- info.used = used;
- info.client = client->number;
+ info->used = used;
+ info->client = client->number;
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
return 0;
}
/* SET_QUEUE_CLIENT ioctl() */
static int snd_seq_ioctl_set_queue_client(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
+ struct snd_seq_queue_client *info = arg;
int err;
- struct snd_seq_queue_client info;
-
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
- if (info.used >= 0) {
- err = snd_seq_queue_use(info.queue, client->number, info.used);
+ if (info->used >= 0) {
+ err = snd_seq_queue_use(info->queue, client->number, info->used);
if (err < 0)
return err;
}
@@ -1883,78 +1789,70 @@ static int snd_seq_ioctl_set_queue_client(struct snd_seq_client *client,
/* GET_CLIENT_POOL ioctl() */
static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
- struct snd_seq_client_pool info;
+ struct snd_seq_client_pool *info = arg;
struct snd_seq_client *cptr;
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
-
- cptr = snd_seq_client_use_ptr(info.client);
+ cptr = snd_seq_client_use_ptr(info->client);
if (cptr == NULL)
return -ENOENT;
- memset(&info, 0, sizeof(info));
- info.client = cptr->number;
- info.output_pool = cptr->pool->size;
- info.output_room = cptr->pool->room;
- info.output_free = info.output_pool;
- info.output_free = snd_seq_unused_cells(cptr->pool);
+ memset(info, 0, sizeof(*info));
+ info->client = cptr->number;
+ info->output_pool = cptr->pool->size;
+ info->output_room = cptr->pool->room;
+ info->output_free = info->output_pool;
+ info->output_free = snd_seq_unused_cells(cptr->pool);
if (cptr->type == USER_CLIENT) {
- info.input_pool = cptr->data.user.fifo_pool_size;
- info.input_free = info.input_pool;
+ info->input_pool = cptr->data.user.fifo_pool_size;
+ info->input_free = info->input_pool;
if (cptr->data.user.fifo)
- info.input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
+ info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
} else {
- info.input_pool = 0;
- info.input_free = 0;
+ info->input_pool = 0;
+ info->input_free = 0;
}
snd_seq_client_unlock(cptr);
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
return 0;
}
/* SET_CLIENT_POOL ioctl() */
static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
- struct snd_seq_client_pool info;
+ struct snd_seq_client_pool *info = arg;
int rc;
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
-
- if (client->number != info.client)
+ if (client->number != info->client)
return -EINVAL; /* can't change other clients */
- if (info.output_pool >= 1 && info.output_pool <= SNDRV_SEQ_MAX_EVENTS &&
+ if (info->output_pool >= 1 && info->output_pool <= SNDRV_SEQ_MAX_EVENTS &&
(! snd_seq_write_pool_allocated(client) ||
- info.output_pool != client->pool->size)) {
+ info->output_pool != client->pool->size)) {
if (snd_seq_write_pool_allocated(client)) {
/* remove all existing cells */
snd_seq_queue_client_leave_cells(client->number);
snd_seq_pool_done(client->pool);
}
- client->pool->size = info.output_pool;
+ client->pool->size = info->output_pool;
rc = snd_seq_pool_init(client->pool);
if (rc < 0)
return rc;
}
if (client->type == USER_CLIENT && client->data.user.fifo != NULL &&
- info.input_pool >= 1 &&
- info.input_pool <= SNDRV_SEQ_MAX_CLIENT_EVENTS &&
- info.input_pool != client->data.user.fifo_pool_size) {
+ info->input_pool >= 1 &&
+ info->input_pool <= SNDRV_SEQ_MAX_CLIENT_EVENTS &&
+ info->input_pool != client->data.user.fifo_pool_size) {
/* change pool size */
- rc = snd_seq_fifo_resize(client->data.user.fifo, info.input_pool);
+ rc = snd_seq_fifo_resize(client->data.user.fifo, info->input_pool);
if (rc < 0)
return rc;
- client->data.user.fifo_pool_size = info.input_pool;
+ client->data.user.fifo_pool_size = info->input_pool;
}
- if (info.output_room >= 1 &&
- info.output_room <= client->pool->size) {
- client->pool->room = info.output_room;
+ if (info->output_room >= 1 &&
+ info->output_room <= client->pool->size) {
+ client->pool->room = info->output_room;
}
return snd_seq_ioctl_get_client_pool(client, arg);
@@ -1963,17 +1861,14 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
/* REMOVE_EVENTS ioctl() */
static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
- struct snd_seq_remove_events info;
-
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
+ struct snd_seq_remove_events *info = arg;
/*
* Input mostly not implemented XXX.
*/
- if (info.remove_mode & SNDRV_SEQ_REMOVE_INPUT) {
+ if (info->remove_mode & SNDRV_SEQ_REMOVE_INPUT) {
/*
* No restrictions so for a user client we can clear
* the whole fifo
@@ -1982,8 +1877,8 @@ static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
snd_seq_fifo_clear(client->data.user.fifo);
}
- if (info.remove_mode & SNDRV_SEQ_REMOVE_OUTPUT)
- snd_seq_queue_remove_cells(client->number, &info);
+ if (info->remove_mode & SNDRV_SEQ_REMOVE_OUTPUT)
+ snd_seq_queue_remove_cells(client->number, info);
return 0;
}
@@ -1993,26 +1888,23 @@ static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
* get subscription info
*/
static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
+ struct snd_seq_port_subscribe *subs = arg;
int result;
struct snd_seq_client *sender = NULL;
struct snd_seq_client_port *sport = NULL;
- struct snd_seq_port_subscribe subs;
struct snd_seq_subscribers *p;
- if (copy_from_user(&subs, arg, sizeof(subs)))
- return -EFAULT;
-
result = -EINVAL;
- if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
+ if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
goto __end;
- if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
+ if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
goto __end;
- p = snd_seq_port_get_subscription(&sport->c_src, &subs.dest);
+ p = snd_seq_port_get_subscription(&sport->c_src, &subs->dest);
if (p) {
result = 0;
- subs = p->info;
+ *subs = p->info;
} else
result = -ENOENT;
@@ -2021,10 +1913,7 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
snd_seq_port_unlock(sport);
if (sender)
snd_seq_client_unlock(sender);
- if (result >= 0) {
- if (copy_to_user(arg, &subs, sizeof(subs)))
- return -EFAULT;
- }
+
return result;
}
@@ -2032,26 +1921,22 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
/*
* get subscription info - check only its presence
*/
-static int snd_seq_ioctl_query_subs(struct snd_seq_client *client,
- void __user *arg)
+static int snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg)
{
+ struct snd_seq_query_subs *subs = arg;
int result = -ENXIO;
struct snd_seq_client *cptr = NULL;
struct snd_seq_client_port *port = NULL;
- struct snd_seq_query_subs subs;
struct snd_seq_port_subs_info *group;
struct list_head *p;
int i;
- if (copy_from_user(&subs, arg, sizeof(subs)))
- return -EFAULT;
-
- if ((cptr = snd_seq_client_use_ptr(subs.root.client)) == NULL)
+ if ((cptr = snd_seq_client_use_ptr(subs->root.client)) == NULL)
goto __end;
- if ((port = snd_seq_port_use_ptr(cptr, subs.root.port)) == NULL)
+ if ((port = snd_seq_port_use_ptr(cptr, subs->root.port)) == NULL)
goto __end;
- switch (subs.type) {
+ switch (subs->type) {
case SNDRV_SEQ_QUERY_SUBS_READ:
group = &port->c_src;
break;
@@ -2064,22 +1949,22 @@ static int snd_seq_ioctl_query_subs(struct snd_seq_client *client,
down_read(&group->list_mutex);
/* search for the subscriber */
- subs.num_subs = group->count;
+ subs->num_subs = group->count;
i = 0;
result = -ENOENT;
list_for_each(p, &group->list_head) {
- if (i++ == subs.index) {
+ if (i++ == subs->index) {
/* found! */
struct snd_seq_subscribers *s;
- if (subs.type == SNDRV_SEQ_QUERY_SUBS_READ) {
+ if (subs->type == SNDRV_SEQ_QUERY_SUBS_READ) {
s = list_entry(p, struct snd_seq_subscribers, src_list);
- subs.addr = s->info.dest;
+ subs->addr = s->info.dest;
} else {
s = list_entry(p, struct snd_seq_subscribers, dest_list);
- subs.addr = s->info.sender;
+ subs->addr = s->info.sender;
}
- subs.flags = s->info.flags;
- subs.queue = s->info.queue;
+ subs->flags = s->info.flags;
+ subs->queue = s->info.queue;
result = 0;
break;
}
@@ -2091,10 +1976,7 @@ static int snd_seq_ioctl_query_subs(struct snd_seq_client *client,
snd_seq_port_unlock(port);
if (cptr)
snd_seq_client_unlock(cptr);
- if (result >= 0) {
- if (copy_to_user(arg, &subs, sizeof(subs)))
- return -EFAULT;
- }
+
return result;
}
@@ -2103,31 +1985,26 @@ static int snd_seq_ioctl_query_subs(struct snd_seq_client *client,
* query next client
*/
static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
+ struct snd_seq_client_info *info = arg;
struct snd_seq_client *cptr = NULL;
- struct snd_seq_client_info info;
-
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
/* search for next client */
- info.client++;
- if (info.client < 0)
- info.client = 0;
- for (; info.client < SNDRV_SEQ_MAX_CLIENTS; info.client++) {
- cptr = snd_seq_client_use_ptr(info.client);
+ info->client++;
+ if (info->client < 0)
+ info->client = 0;
+ for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
+ cptr = snd_seq_client_use_ptr(info->client);
if (cptr)
break; /* found */
}
if (cptr == NULL)
return -ENOENT;
- get_client_info(cptr, &info);
+ get_client_info(cptr, info);
snd_seq_client_unlock(cptr);
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
return 0;
}
@@ -2135,43 +2012,41 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
* query next port
*/
static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client,
- void __user *arg)
+ void *arg)
{
+ struct snd_seq_port_info *info = arg;
struct snd_seq_client *cptr;
struct snd_seq_client_port *port = NULL;
- struct snd_seq_port_info info;
- if (copy_from_user(&info, arg, sizeof(info)))
- return -EFAULT;
- cptr = snd_seq_client_use_ptr(info.addr.client);
+ cptr = snd_seq_client_use_ptr(info->addr.client);
if (cptr == NULL)
return -ENXIO;
/* search for next port */
- info.addr.port++;
- port = snd_seq_port_query_nearest(cptr, &info);
+ info->addr.port++;
+ port = snd_seq_port_query_nearest(cptr, info);
if (port == NULL) {
snd_seq_client_unlock(cptr);
return -ENOENT;
}
/* get port info */
- info.addr = port->addr;
- snd_seq_get_port_info(port, &info);
+ info->addr = port->addr;
+ snd_seq_get_port_info(port, info);
snd_seq_port_unlock(port);
snd_seq_client_unlock(cptr);
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
return 0;
}
/* -------------------------------------------------------- */
-static struct seq_ioctl_table {
+static const struct ioctl_handler {
unsigned int cmd;
- int (*func)(struct snd_seq_client *client, void __user * arg);
-} ioctl_tables[] = {
+ int (*func)(struct snd_seq_client *client, void *arg);
+} ioctl_handlers[] = {
+ { SNDRV_SEQ_IOCTL_PVERSION, snd_seq_ioctl_pversion },
+ { SNDRV_SEQ_IOCTL_CLIENT_ID, snd_seq_ioctl_client_id },
{ SNDRV_SEQ_IOCTL_SYSTEM_INFO, snd_seq_ioctl_system_info },
{ SNDRV_SEQ_IOCTL_RUNNING_MODE, snd_seq_ioctl_running_mode },
{ SNDRV_SEQ_IOCTL_GET_CLIENT_INFO, snd_seq_ioctl_get_client_info },
@@ -2204,40 +2079,65 @@ static struct seq_ioctl_table {
{ 0, NULL },
};
-static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
- void __user *arg)
+static long snd_seq_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
- struct seq_ioctl_table *p;
+ struct snd_seq_client *client = file->private_data;
+ /* To use kernel stack for ioctl data. */
+ union {
+ int pversion;
+ int client_id;
+ struct snd_seq_system_info system_info;
+ struct snd_seq_running_info running_info;
+ struct snd_seq_client_info client_info;
+ struct snd_seq_port_info port_info;
+ struct snd_seq_port_subscribe port_subscribe;
+ struct snd_seq_queue_info queue_info;
+ struct snd_seq_queue_status queue_status;
+ struct snd_seq_queue_tempo tempo;
+ struct snd_seq_queue_timer queue_timer;
+ struct snd_seq_queue_client queue_client;
+ struct snd_seq_client_pool client_pool;
+ struct snd_seq_remove_events remove_events;
+ struct snd_seq_query_subs query_subs;
+ } buf;
+ const struct ioctl_handler *handler;
+ unsigned long size;
+ int err;
- switch (cmd) {
- case SNDRV_SEQ_IOCTL_PVERSION:
- /* return sequencer version number */
- return put_user(SNDRV_SEQ_VERSION, (int __user *)arg) ? -EFAULT : 0;
- case SNDRV_SEQ_IOCTL_CLIENT_ID:
- /* return the id of this client */
- return put_user(client->number, (int __user *)arg) ? -EFAULT : 0;
- }
+ if (snd_BUG_ON(!client))
+ return -ENXIO;
- if (! arg)
- return -EFAULT;
- for (p = ioctl_tables; p->cmd; p++) {
- if (p->cmd == cmd)
- return p->func(client, arg);
+ for (handler = ioctl_handlers; handler->cmd > 0; ++handler) {
+ if (handler->cmd == cmd)
+ break;
}
- pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
- cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
- return -ENOTTY;
-}
+ if (handler->cmd == 0)
+ return -ENOTTY;
+ memset(&buf, 0, sizeof(buf));
-static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct snd_seq_client *client = file->private_data;
+ /*
+ * All of ioctl commands for ALSA sequencer get an argument of size
+ * within 13 bits. We can safely pick up the size from the command.
+ */
+ size = _IOC_SIZE(handler->cmd);
+ if (handler->cmd & IOC_IN) {
+ if (copy_from_user(&buf, (const void __user *)arg, size))
+ return -EFAULT;
+ }
- if (snd_BUG_ON(!client))
- return -ENXIO;
-
- return snd_seq_do_ioctl(client, cmd, (void __user *) arg);
+ err = handler->func(client, &buf);
+ if (err >= 0) {
+ /* Some commands includes a bug in 'dir' field. */
+ if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
+ handler->cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_POOL ||
+ (handler->cmd & IOC_OUT))
+ if (copy_to_user((void __user *)arg, &buf, size))
+ return -EFAULT;
+ }
+
+ return err;
}
#ifdef CONFIG_COMPAT
@@ -2423,23 +2323,35 @@ int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event * ev,
EXPORT_SYMBOL(snd_seq_kernel_client_dispatch);
-/*
- * exported, called by kernel clients to perform same functions as with
- * userland ioctl()
+/**
+ * snd_seq_kernel_client_ctl - operate a command for a client with data in
+ * kernel space.
+ * @clientid: A numerical ID for a client.
+ * @cmd: An ioctl(2) command for ALSA sequencer operation.
+ * @arg: A pointer to data in kernel space.
+ *
+ * Against its name, both kernel/application client can be handled by this
+ * kernel API. A pointer of 'arg' argument should be in kernel space.
+ *
+ * Return: 0 at success. Negative error code at failure.
*/
int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg)
{
+ const struct ioctl_handler *handler;
struct snd_seq_client *client;
- mm_segment_t fs;
- int result;
client = clientptr(clientid);
if (client == NULL)
return -ENXIO;
- fs = snd_enter_user();
- result = snd_seq_do_ioctl(client, cmd, (void __force __user *)arg);
- snd_leave_user(fs);
- return result;
+
+ for (handler = ioctl_handlers; handler->cmd > 0; ++handler) {
+ if (handler->cmd == cmd)
+ return handler->func(client, arg);
+ }
+
+ pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
+ cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
+ return -ENOTTY;
}
EXPORT_SYMBOL(snd_seq_kernel_client_ctl);
diff --git a/sound/core/seq/seq_compat.c b/sound/core/seq/seq_compat.c
index 65175902a68a..fce5697e4261 100644
--- a/sound/core/seq/seq_compat.c
+++ b/sound/core/seq/seq_compat.c
@@ -47,7 +47,6 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
{
int err = -EFAULT;
struct snd_seq_port_info *data;
- mm_segment_t fs;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
@@ -59,9 +58,7 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
goto error;
data->kernel = NULL;
- fs = snd_enter_user();
- err = snd_seq_do_ioctl(client, cmd, data);
- snd_leave_user(fs);
+ err = snd_seq_kernel_client_ctl(client->number, cmd, &data);
if (err < 0)
goto error;
@@ -123,7 +120,7 @@ static long snd_seq_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
case SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION:
case SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT:
case SNDRV_SEQ_IOCTL_RUNNING_MODE:
- return snd_seq_do_ioctl(client, cmd, argp);
+ return snd_seq_ioctl(file, cmd, arg);
case SNDRV_SEQ_IOCTL_CREATE_PORT32:
return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_CREATE_PORT, argp);
case SNDRV_SEQ_IOCTL_DELETE_PORT32:
diff --git a/sound/firewire/bebob/Makefile b/sound/firewire/bebob/Makefile
index af7ed6643266..dd454867240d 100644
--- a/sound/firewire/bebob/Makefile
+++ b/sound/firewire/bebob/Makefile
@@ -1,4 +1,5 @@
snd-bebob-objs := bebob_command.o bebob_stream.o bebob_proc.o bebob_midi.o \
- bebob_pcm.o bebob_hwdep.o bebob_terratec.o bebob_yamaha.o \
- bebob_focusrite.o bebob_maudio.o bebob.o
+ bebob_pcm.o bebob_hwdep.o bebob_terratec.o \
+ bebob_yamaha_terratec.o bebob_focusrite.o bebob_maudio.o \
+ bebob.o
obj-$(CONFIG_SND_BEBOB) += snd-bebob.o
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index f7e2cbd2a313..3469ac14c89c 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -458,17 +458,17 @@ static const struct ieee1394_device_id bebob_id_table[] = {
/* TerraTec Electronic GmbH, PHASE 88 Rack FW */
SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000003, &phase88_rack_spec),
/* TerraTec Electronic GmbH, PHASE 24 FW */
- SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000004, &phase24_series_spec),
+ SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000004, &yamaha_terratec_spec),
/* TerraTec Electronic GmbH, Phase X24 FW */
- SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000007, &phase24_series_spec),
+ SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000007, &yamaha_terratec_spec),
/* TerraTec Electronic GmbH, EWS MIC2/MIC8 */
SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000005, &spec_normal),
/* Terratec Electronic GmbH, Aureon 7.1 Firewire */
SND_BEBOB_DEV_ENTRY(VEN_TERRATEC, 0x00000002, &spec_normal),
/* Yamaha, GO44 */
- SND_BEBOB_DEV_ENTRY(VEN_YAMAHA, 0x0010000b, &yamaha_go_spec),
+ SND_BEBOB_DEV_ENTRY(VEN_YAMAHA, 0x0010000b, &yamaha_terratec_spec),
/* YAMAHA, GO46 */
- SND_BEBOB_DEV_ENTRY(VEN_YAMAHA, 0x0010000c, &yamaha_go_spec),
+ SND_BEBOB_DEV_ENTRY(VEN_YAMAHA, 0x0010000c, &yamaha_terratec_spec),
/* Focusrite, SaffirePro 26 I/O */
SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
/* Focusrite, SaffirePro 10 I/O */
diff --git a/sound/firewire/bebob/bebob.h b/sound/firewire/bebob/bebob.h
index e7f1bb925b12..175da875162d 100644
--- a/sound/firewire/bebob/bebob.h
+++ b/sound/firewire/bebob/bebob.h
@@ -235,8 +235,7 @@ int snd_bebob_create_hwdep_device(struct snd_bebob *bebob);
/* model specific operations */
extern const struct snd_bebob_spec phase88_rack_spec;
-extern const struct snd_bebob_spec phase24_series_spec;
-extern const struct snd_bebob_spec yamaha_go_spec;
+extern const struct snd_bebob_spec yamaha_terratec_spec;
extern const struct snd_bebob_spec saffirepro_26_spec;
extern const struct snd_bebob_spec saffirepro_10_spec;
extern const struct snd_bebob_spec saffire_le_spec;
diff --git a/sound/firewire/bebob/bebob_terratec.c b/sound/firewire/bebob/bebob_terratec.c
index c38358b82ada..2fdaf93e7a8d 100644
--- a/sound/firewire/bebob/bebob_terratec.c
+++ b/sound/firewire/bebob/bebob_terratec.c
@@ -36,25 +36,6 @@ end:
return err;
}
-static enum snd_bebob_clock_type phase24_series_clk_src_types[] = {
- SND_BEBOB_CLOCK_TYPE_INTERNAL,
- SND_BEBOB_CLOCK_TYPE_EXTERNAL, /* S/PDIF */
-};
-static int
-phase24_series_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
-{
- int err;
-
- err = avc_audio_get_selector(bebob->unit, 0, 4, id);
- if (err < 0)
- return err;
-
- if (*id >= ARRAY_SIZE(phase24_series_clk_src_types))
- return -EIO;
-
- return 0;
-}
-
static const struct snd_bebob_rate_spec phase_series_rate_spec = {
.get = &snd_bebob_stream_get_rate,
.set = &snd_bebob_stream_set_rate,
@@ -71,15 +52,3 @@ const struct snd_bebob_spec phase88_rack_spec = {
.rate = &phase_series_rate_spec,
.meter = NULL
};
-
-/* 'PHASE 24 FW' and 'PHASE X24 FW' */
-static const struct snd_bebob_clock_spec phase24_series_clk = {
- .num = ARRAY_SIZE(phase24_series_clk_src_types),
- .types = phase24_series_clk_src_types,
- .get = &phase24_series_clk_src_get,
-};
-const struct snd_bebob_spec phase24_series_spec = {
- .clock = &phase24_series_clk,
- .rate = &phase_series_rate_spec,
- .meter = NULL
-};
diff --git a/sound/firewire/bebob/bebob_yamaha.c b/sound/firewire/bebob/bebob_yamaha_terratec.c
index 90d4404f77ce..a6be3e7138e0 100644
--- a/sound/firewire/bebob/bebob_yamaha.c
+++ b/sound/firewire/bebob/bebob_yamaha_terratec.c
@@ -14,7 +14,7 @@
* must be accompanied. If changing the state, a LED on the device starts to
* blink and its sync status is false. In this state, the device sounds nothing
* even if streaming. To start streaming at the current sampling rate is only
- * way to revocer this state. GO46 is better for stand-alone mixer.
+ * way to recover this state. GO46 is better for stand-alone mixer.
*
* Both of them have a capability to change its sampling rate up to 192.0kHz.
* At 192.0kHz, the device reports 4 PCM-in, 1 MIDI-in, 6 PCM-out, 1 MIDI-out.
@@ -25,7 +25,10 @@
* streaming with many asynchronous transactions brings sounds with noises.
* Unfortunately current 'ffado-mixer' generated many asynchronous transaction
* to observe device's state, mainly check cmp connection and signal format. I
- * reccomend users to close ffado-mixer at 192.0kHz if mixer is needless.
+ * recommend users to close ffado-mixer at 192.0kHz if mixer is needless.
+ *
+ * Terratec PHASE 24 FW and PHASE X24 FW are internally the same as
+ * Yamaha GO 44 and GO 46. Yamaha and Terratec had cooperated for these models.
*/
static enum snd_bebob_clock_type clk_src_types[] = {
@@ -55,7 +58,7 @@ static const struct snd_bebob_rate_spec rate_spec = {
.get = &snd_bebob_stream_get_rate,
.set = &snd_bebob_stream_set_rate,
};
-const struct snd_bebob_spec yamaha_go_spec = {
+const struct snd_bebob_spec yamaha_terratec_spec = {
.clock = &clock_spec,
.rate = &rate_spec,
.meter = NULL
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
index 4aa0249826fd..6074fe1f00f7 100644
--- a/sound/firewire/dice/dice-pcm.c
+++ b/sound/firewire/dice/dice-pcm.c
@@ -302,7 +302,7 @@ static snd_pcm_uframes_t playback_pointer(struct snd_pcm_substream *substream)
int snd_dice_create_pcm(struct snd_dice *dice)
{
- static struct snd_pcm_ops capture_ops = {
+ static const struct snd_pcm_ops capture_ops = {
.open = pcm_open,
.close = pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -314,7 +314,7 @@ int snd_dice_create_pcm(struct snd_dice *dice)
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
- static struct snd_pcm_ops playback_ops = {
+ static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
.close = pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/firewire/digi00x/digi00x-pcm.c b/sound/firewire/digi00x/digi00x-pcm.c
index cac28f70aef7..613f05872770 100644
--- a/sound/firewire/digi00x/digi00x-pcm.c
+++ b/sound/firewire/digi00x/digi00x-pcm.c
@@ -329,7 +329,7 @@ static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
return amdtp_stream_pcm_pointer(&dg00x->rx_stream);
}
-static struct snd_pcm_ops pcm_capture_ops = {
+static const struct snd_pcm_ops pcm_capture_ops = {
.open = pcm_open,
.close = pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -341,7 +341,7 @@ static struct snd_pcm_ops pcm_capture_ops = {
.page = snd_pcm_lib_get_vmalloc_page,
};
-static struct snd_pcm_ops pcm_playback_ops = {
+static const struct snd_pcm_ops pcm_playback_ops = {
.open = pcm_open,
.close = pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 8d233417695d..f3530f89a025 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -388,7 +388,7 @@ static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstm)
int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
{
- static struct snd_pcm_ops capture_ops = {
+ static const struct snd_pcm_ops capture_ops = {
.open = pcm_open,
.close = pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -400,7 +400,7 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
- static struct snd_pcm_ops playback_ops = {
+ static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
.close = pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
index 380d3db969a5..79db1b651f5c 100644
--- a/sound/firewire/tascam/tascam-pcm.c
+++ b/sound/firewire/tascam/tascam-pcm.c
@@ -268,7 +268,7 @@ static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
return amdtp_stream_pcm_pointer(&tscm->rx_stream);
}
-static struct snd_pcm_ops pcm_capture_ops = {
+static const struct snd_pcm_ops pcm_capture_ops = {
.open = pcm_open,
.close = pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -280,7 +280,7 @@ static struct snd_pcm_ops pcm_capture_ops = {
.page = snd_pcm_lib_get_vmalloc_page,
};
-static struct snd_pcm_ops pcm_playback_ops = {
+static const struct snd_pcm_ops pcm_playback_ops = {
.open = pcm_open,
.close = pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index 860f8cad6602..261469188566 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -29,81 +29,6 @@
*/
#define HDAC_MAX_CAPS 10
-/**
- * snd_hdac_ext_bus_parse_capabilities - parse capablity structure
- * @ebus: the pointer to extended bus object
- *
- * Returns 0 if successful, or a negative error code.
- */
-int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus)
-{
- unsigned int cur_cap;
- unsigned int offset;
- struct hdac_bus *bus = &ebus->bus;
- unsigned int counter = 0;
-
- offset = snd_hdac_chip_readl(bus, LLCH);
-
- /* Lets walk the linked capabilities list */
- do {
- cur_cap = _snd_hdac_chip_read(l, bus, offset);
-
- dev_dbg(bus->dev, "Capability version: 0x%x\n",
- ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF));
-
- dev_dbg(bus->dev, "HDA capability ID: 0x%x\n",
- (cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF);
-
- switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) {
- case AZX_ML_CAP_ID:
- dev_dbg(bus->dev, "Found ML capability\n");
- ebus->mlcap = bus->remap_addr + offset;
- break;
-
- case AZX_GTS_CAP_ID:
- dev_dbg(bus->dev, "Found GTS capability offset=%x\n", offset);
- ebus->gtscap = bus->remap_addr + offset;
- break;
-
- case AZX_PP_CAP_ID:
- /* PP capability found, the Audio DSP is present */
- dev_dbg(bus->dev, "Found PP capability offset=%x\n", offset);
- ebus->ppcap = bus->remap_addr + offset;
- break;
-
- case AZX_SPB_CAP_ID:
- /* SPIB capability found, handler function */
- dev_dbg(bus->dev, "Found SPB capability\n");
- ebus->spbcap = bus->remap_addr + offset;
- break;
-
- case AZX_DRSM_CAP_ID:
- /* DMA resume capability found, handler function */
- dev_dbg(bus->dev, "Found DRSM capability\n");
- ebus->drsmcap = bus->remap_addr + offset;
- break;
-
- default:
- dev_dbg(bus->dev, "Unknown capability %d\n", cur_cap);
- break;
- }
-
- counter++;
-
- if (counter > HDAC_MAX_CAPS) {
- dev_err(bus->dev, "We exceeded HDAC Ext capablities!!!\n");
- break;
- }
-
- /* read the offset of next capabiity */
- offset = cur_cap & AZX_CAP_HDR_NXT_PTR_MASK;
-
- } while (offset);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_parse_capabilities);
-
/*
* processing pipe helpers - these helpers are useful for dealing with HDA
* new capability of processing pipelines
@@ -118,15 +43,15 @@ void snd_hdac_ext_bus_ppcap_enable(struct hdac_ext_bus *ebus, bool enable)
{
struct hdac_bus *bus = &ebus->bus;
- if (!ebus->ppcap) {
+ if (!bus->ppcap) {
dev_err(bus->dev, "Address of PP capability is NULL");
return;
}
if (enable)
- snd_hdac_updatel(ebus->ppcap, AZX_REG_PP_PPCTL, 0, AZX_PPCTL_GPROCEN);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, 0, AZX_PPCTL_GPROCEN);
else
- snd_hdac_updatel(ebus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_GPROCEN, 0);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_GPROCEN, 0);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_enable);
@@ -139,15 +64,15 @@ void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_ext_bus *ebus, bool enable)
{
struct hdac_bus *bus = &ebus->bus;
- if (!ebus->ppcap) {
+ if (!bus->ppcap) {
dev_err(bus->dev, "Address of PP capability is NULL\n");
return;
}
if (enable)
- snd_hdac_updatel(ebus->ppcap, AZX_REG_PP_PPCTL, 0, AZX_PPCTL_PIE);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, 0, AZX_PPCTL_PIE);
else
- snd_hdac_updatel(ebus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_PIE, 0);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_PIE, 0);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_int_enable);
@@ -171,7 +96,7 @@ int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_ext_bus *ebus)
struct hdac_ext_link *hlink;
struct hdac_bus *bus = &ebus->bus;
- link_count = readl(ebus->mlcap + AZX_REG_ML_MLCD) + 1;
+ link_count = readl(bus->mlcap + AZX_REG_ML_MLCD) + 1;
dev_dbg(bus->dev, "In %s Link count: %d\n", __func__, link_count);
@@ -181,7 +106,7 @@ int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_ext_bus *ebus)
return -ENOMEM;
hlink->index = idx;
hlink->bus = bus;
- hlink->ml_addr = ebus->mlcap + AZX_ML_BASE +
+ hlink->ml_addr = bus->mlcap + AZX_ML_BASE +
(AZX_ML_INTERVAL * idx);
hlink->lcaps = readl(hlink->ml_addr + AZX_REG_ML_LCAP);
hlink->lsdiid = readw(hlink->ml_addr + AZX_REG_ML_LSDIID);
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 626f3bb24c55..3be051ab5533 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -40,27 +40,27 @@ void snd_hdac_ext_stream_init(struct hdac_ext_bus *ebus,
{
struct hdac_bus *bus = &ebus->bus;
- if (ebus->ppcap) {
- stream->pphc_addr = ebus->ppcap + AZX_PPHC_BASE +
+ if (bus->ppcap) {
+ stream->pphc_addr = bus->ppcap + AZX_PPHC_BASE +
AZX_PPHC_INTERVAL * idx;
- stream->pplc_addr = ebus->ppcap + AZX_PPLC_BASE +
+ stream->pplc_addr = bus->ppcap + AZX_PPLC_BASE +
AZX_PPLC_MULTI * ebus->num_streams +
AZX_PPLC_INTERVAL * idx;
}
- if (ebus->spbcap) {
- stream->spib_addr = ebus->spbcap + AZX_SPB_BASE +
+ if (bus->spbcap) {
+ stream->spib_addr = bus->spbcap + AZX_SPB_BASE +
AZX_SPB_INTERVAL * idx +
AZX_SPB_SPIB;
- stream->fifo_addr = ebus->spbcap + AZX_SPB_BASE +
+ stream->fifo_addr = bus->spbcap + AZX_SPB_BASE +
AZX_SPB_INTERVAL * idx +
AZX_SPB_MAXFIFO;
}
- if (ebus->drsmcap)
- stream->dpibr_addr = ebus->drsmcap + AZX_DRSM_BASE +
+ if (bus->drsmcap)
+ stream->dpibr_addr = bus->drsmcap + AZX_DRSM_BASE +
AZX_DRSM_INTERVAL * idx;
stream->decoupled = false;
@@ -131,10 +131,10 @@ void snd_hdac_ext_stream_decouple(struct hdac_ext_bus *ebus,
spin_lock_irq(&bus->reg_lock);
if (decouple)
- snd_hdac_updatel(ebus->ppcap, AZX_REG_PP_PPCTL, 0,
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, 0,
AZX_PPCTL_PROCEN(hstream->index));
else
- snd_hdac_updatel(ebus->ppcap, AZX_REG_PP_PPCTL,
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
AZX_PPCTL_PROCEN(hstream->index), 0);
stream->decoupled = decouple;
spin_unlock_irq(&bus->reg_lock);
@@ -255,7 +255,7 @@ hdac_ext_link_stream_assign(struct hdac_ext_bus *ebus,
struct hdac_stream *stream = NULL;
struct hdac_bus *hbus = &ebus->bus;
- if (!ebus->ppcap) {
+ if (!hbus->ppcap) {
dev_err(hbus->dev, "stream type not supported\n");
return NULL;
}
@@ -296,7 +296,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
struct hdac_stream *stream = NULL;
struct hdac_bus *hbus = &ebus->bus;
- if (!ebus->ppcap) {
+ if (!hbus->ppcap) {
dev_err(hbus->dev, "stream type not supported\n");
return NULL;
}
@@ -423,21 +423,21 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_ext_bus *ebus,
u32 register_mask = 0;
struct hdac_bus *bus = &ebus->bus;
- if (!ebus->spbcap) {
- dev_err(bus->dev, "Address of SPB capability is NULL");
+ if (!bus->spbcap) {
+ dev_err(bus->dev, "Address of SPB capability is NULL\n");
return;
}
mask |= (1 << index);
- register_mask = readl(ebus->spbcap + AZX_REG_SPB_SPBFCCTL);
+ register_mask = readl(bus->spbcap + AZX_REG_SPB_SPBFCCTL);
mask |= register_mask;
if (enable)
- snd_hdac_updatel(ebus->spbcap, AZX_REG_SPB_SPBFCCTL, 0, mask);
+ snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, 0, mask);
else
- snd_hdac_updatel(ebus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0);
+ snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_spbcap_enable);
@@ -452,8 +452,8 @@ int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
{
struct hdac_bus *bus = &ebus->bus;
- if (!ebus->spbcap) {
- dev_err(bus->dev, "Address of SPB capability is NULL");
+ if (!bus->spbcap) {
+ dev_err(bus->dev, "Address of SPB capability is NULL\n");
return -EINVAL;
}
@@ -475,8 +475,8 @@ int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
{
struct hdac_bus *bus = &ebus->bus;
- if (!ebus->spbcap) {
- dev_err(bus->dev, "Address of SPB capability is NULL");
+ if (!bus->spbcap) {
+ dev_err(bus->dev, "Address of SPB capability is NULL\n");
return -EINVAL;
}
@@ -515,21 +515,21 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_ext_bus *ebus,
u32 register_mask = 0;
struct hdac_bus *bus = &ebus->bus;
- if (!ebus->drsmcap) {
- dev_err(bus->dev, "Address of DRSM capability is NULL");
+ if (!bus->drsmcap) {
+ dev_err(bus->dev, "Address of DRSM capability is NULL\n");
return;
}
mask |= (1 << index);
- register_mask = readl(ebus->drsmcap + AZX_REG_SPB_SPBFCCTL);
+ register_mask = readl(bus->drsmcap + AZX_REG_SPB_SPBFCCTL);
mask |= register_mask;
if (enable)
- snd_hdac_updatel(ebus->drsmcap, AZX_REG_DRSM_CTL, 0, mask);
+ snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, 0, mask);
else
- snd_hdac_updatel(ebus->drsmcap, AZX_REG_DRSM_CTL, mask, 0);
+ snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_drsm_enable);
@@ -544,8 +544,8 @@ int snd_hdac_ext_stream_set_dpibr(struct hdac_ext_bus *ebus,
{
struct hdac_bus *bus = &ebus->bus;
- if (!ebus->drsmcap) {
- dev_err(bus->dev, "Address of DRSM capability is NULL");
+ if (!bus->drsmcap) {
+ dev_err(bus->dev, "Address of DRSM capability is NULL\n");
return -EINVAL;
}
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 9fee464e5d49..043065867656 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -255,6 +255,81 @@ int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr,
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_get_response);
+#define HDAC_MAX_CAPS 10
+/**
+ * snd_hdac_bus_parse_capabilities - parse capability structure
+ * @bus: the pointer to bus object
+ *
+ * Returns 0 if successful, or a negative error code.
+ */
+int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus)
+{
+ unsigned int cur_cap;
+ unsigned int offset;
+ unsigned int counter = 0;
+
+ offset = snd_hdac_chip_readl(bus, LLCH);
+
+ /* Lets walk the linked capabilities list */
+ do {
+ cur_cap = _snd_hdac_chip_read(l, bus, offset);
+
+ dev_dbg(bus->dev, "Capability version: 0x%x\n",
+ (cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF);
+
+ dev_dbg(bus->dev, "HDA capability ID: 0x%x\n",
+ (cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF);
+
+ switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) {
+ case AZX_ML_CAP_ID:
+ dev_dbg(bus->dev, "Found ML capability\n");
+ bus->mlcap = bus->remap_addr + offset;
+ break;
+
+ case AZX_GTS_CAP_ID:
+ dev_dbg(bus->dev, "Found GTS capability offset=%x\n", offset);
+ bus->gtscap = bus->remap_addr + offset;
+ break;
+
+ case AZX_PP_CAP_ID:
+ /* PP capability found, the Audio DSP is present */
+ dev_dbg(bus->dev, "Found PP capability offset=%x\n", offset);
+ bus->ppcap = bus->remap_addr + offset;
+ break;
+
+ case AZX_SPB_CAP_ID:
+ /* SPIB capability found, handler function */
+ dev_dbg(bus->dev, "Found SPB capability\n");
+ bus->spbcap = bus->remap_addr + offset;
+ break;
+
+ case AZX_DRSM_CAP_ID:
+ /* DMA resume capability found, handler function */
+ dev_dbg(bus->dev, "Found DRSM capability\n");
+ bus->drsmcap = bus->remap_addr + offset;
+ break;
+
+ default:
+ dev_dbg(bus->dev, "Unknown capability %d\n", cur_cap);
+ break;
+ }
+
+ counter++;
+
+ if (counter > HDAC_MAX_CAPS) {
+ dev_err(bus->dev, "We exceeded HDAC capabilities!!!\n");
+ break;
+ }
+
+ /* read the offset of next capability */
+ offset = cur_cap & AZX_CAP_HDR_NXT_PTR_MASK;
+
+ } while (offset);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_parse_capabilities);
+
/*
* Lowlevel interface
*/
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index 1fc6d8bc09e5..8c36990e26f6 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -571,7 +571,7 @@ snd_ad1889_capture_pointer(struct snd_pcm_substream *ss)
return bytes_to_frames(ss->runtime, ptr);
}
-static struct snd_pcm_ops snd_ad1889_playback_ops = {
+static const struct snd_pcm_ops snd_ad1889_playback_ops = {
.open = snd_ad1889_playback_open,
.close = snd_ad1889_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -582,7 +582,7 @@ static struct snd_pcm_ops snd_ad1889_playback_ops = {
.pointer = snd_ad1889_playback_pointer,
};
-static struct snd_pcm_ops snd_ad1889_capture_ops = {
+static const struct snd_pcm_ops snd_ad1889_capture_ops = {
.open = snd_ad1889_capture_open,
.close = snd_ad1889_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 36470af7eda7..92b819e4f729 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1408,6 +1408,7 @@ snd_ali_playback_pointer(struct snd_pcm_substream *substream)
spin_unlock(&codec->reg_lock);
dev_dbg(codec->card->dev, "playback pointer returned cso=%xh.\n", cso);
+ cso %= runtime->buffer_size;
return cso;
}
@@ -1428,6 +1429,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream)
cso = inw(ALI_REG(codec, ALI_CSO_ALPHA_FMS + 2));
spin_unlock(&codec->reg_lock);
+ cso %= runtime->buffer_size;
return cso;
}
diff --git a/sound/pci/als300.c b/sound/pci/als300.c
index add3176398d3..ab75601d7c2c 100644
--- a/sound/pci/als300.c
+++ b/sound/pci/als300.c
@@ -563,7 +563,7 @@ static snd_pcm_uframes_t snd_als300_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(substream->runtime, current_ptr);
}
-static struct snd_pcm_ops snd_als300_playback_ops = {
+static const struct snd_pcm_ops snd_als300_playback_ops = {
.open = snd_als300_playback_open,
.close = snd_als300_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -574,7 +574,7 @@ static struct snd_pcm_ops snd_als300_playback_ops = {
.pointer = snd_als300_pointer,
};
-static struct snd_pcm_ops snd_als300_capture_ops = {
+static const struct snd_pcm_ops snd_als300_capture_ops = {
.open = snd_als300_capture_open,
.close = snd_als300_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index ff39a0c7277b..edabe1371660 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -672,7 +672,7 @@ static int snd_als4000_capture_close(struct snd_pcm_substream *substream)
/******************************************************************/
-static struct snd_pcm_ops snd_als4000_playback_ops = {
+static const struct snd_pcm_ops snd_als4000_playback_ops = {
.open = snd_als4000_playback_open,
.close = snd_als4000_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -683,7 +683,7 @@ static struct snd_pcm_ops snd_als4000_playback_ops = {
.pointer = snd_als4000_playback_pointer
};
-static struct snd_pcm_ops snd_als4000_capture_ops = {
+static const struct snd_pcm_ops snd_als4000_capture_ops = {
.open = snd_als4000_capture_open,
.close = snd_als4000_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index 1039eccbb895..976a3d23557e 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -1138,7 +1138,7 @@ static int snd_card_asihpi_playback_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_card_asihpi_playback_mmap_ops = {
+static const struct snd_pcm_ops snd_card_asihpi_playback_mmap_ops = {
.open = snd_card_asihpi_playback_open,
.close = snd_card_asihpi_playback_close,
.ioctl = snd_card_asihpi_playback_ioctl,
@@ -1305,7 +1305,7 @@ static int snd_card_asihpi_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_card_asihpi_capture_mmap_ops = {
+static const struct snd_pcm_ops snd_card_asihpi_capture_mmap_ops = {
.open = snd_card_asihpi_capture_open,
.close = snd_card_asihpi_capture_close,
.ioctl = snd_card_asihpi_capture_ioctl,
diff --git a/sound/pci/asihpi/hpifunc.c b/sound/pci/asihpi/hpifunc.c
index 510e56cffd31..f9b57647b319 100644
--- a/sound/pci/asihpi/hpifunc.c
+++ b/sound/pci/asihpi/hpifunc.c
@@ -2323,11 +2323,8 @@ u16 hpi_sample_clock_get_source_index(u32 h_control, u16 *pw_source_index)
u16 hpi_sample_clock_query_local_rate(const u32 h_clock, const u32 index,
u32 *prate)
{
- u16 err;
- err = hpi_control_query(h_clock, HPI_SAMPLECLOCK_LOCAL_SAMPLERATE,
- index, 0, prate);
-
- return err;
+ return hpi_control_query(h_clock, HPI_SAMPLECLOCK_LOCAL_SAMPLERATE,
+ index, 0, prate);
}
u16 hpi_sample_clock_set_local_rate(u32 h_control, u32 sample_rate)
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index 2ce0022dbc46..a40c918c8dff 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -1148,7 +1148,7 @@ static int snd_atiixp_spdif_close(struct snd_pcm_substream *substream)
}
/* AC97 playback */
-static struct snd_pcm_ops snd_atiixp_playback_ops = {
+static const struct snd_pcm_ops snd_atiixp_playback_ops = {
.open = snd_atiixp_playback_open,
.close = snd_atiixp_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1160,7 +1160,7 @@ static struct snd_pcm_ops snd_atiixp_playback_ops = {
};
/* AC97 capture */
-static struct snd_pcm_ops snd_atiixp_capture_ops = {
+static const struct snd_pcm_ops snd_atiixp_capture_ops = {
.open = snd_atiixp_capture_open,
.close = snd_atiixp_capture_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1172,7 +1172,7 @@ static struct snd_pcm_ops snd_atiixp_capture_ops = {
};
/* SPDIF playback */
-static struct snd_pcm_ops snd_atiixp_spdif_ops = {
+static const struct snd_pcm_ops snd_atiixp_spdif_ops = {
.open = snd_atiixp_spdif_open,
.close = snd_atiixp_spdif_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index c534552963e7..40152feef1e7 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -947,7 +947,7 @@ static int snd_atiixp_capture_close(struct snd_pcm_substream *substream)
/* AC97 playback */
-static struct snd_pcm_ops snd_atiixp_playback_ops = {
+static const struct snd_pcm_ops snd_atiixp_playback_ops = {
.open = snd_atiixp_playback_open,
.close = snd_atiixp_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -959,7 +959,7 @@ static struct snd_pcm_ops snd_atiixp_playback_ops = {
};
/* AC97 capture */
-static struct snd_pcm_ops snd_atiixp_capture_ops = {
+static const struct snd_pcm_ops snd_atiixp_capture_ops = {
.open = snd_atiixp_capture_open,
.close = snd_atiixp_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index d3125c169684..e1af24f87566 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1043,7 +1043,7 @@ static void vortex_fifo_init(vortex_t * vortex)
for (x = NR_ADB - 1; x >= 0; x--) {
hwwrite(vortex->mmio, addr, (FIFO_U0 | FIFO_U1));
if (hwread(vortex->mmio, addr) != (FIFO_U0 | FIFO_U1))
- dev_err(vortex->card->dev, "bad adb fifo reset!");
+ dev_err(vortex->card->dev, "bad adb fifo reset!\n");
vortex_fifo_clearadbdata(vortex, x, FIFO_SIZE);
addr -= 4;
}
diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
index 167714303070..57bbb87d0c62 100644
--- a/sound/pci/aw2/aw2-alsa.c
+++ b/sound/pci/aw2/aw2-alsa.c
@@ -179,7 +179,7 @@ static struct pci_driver aw2_driver = {
module_pci_driver(aw2_driver);
/* operators for playback PCM alsa interface */
-static struct snd_pcm_ops snd_aw2_playback_ops = {
+static const struct snd_pcm_ops snd_aw2_playback_ops = {
.open = snd_aw2_pcm_playback_open,
.close = snd_aw2_pcm_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -191,7 +191,7 @@ static struct snd_pcm_ops snd_aw2_playback_ops = {
};
/* operators for capture PCM alsa interface */
-static struct snd_pcm_ops snd_aw2_capture_ops = {
+static const struct snd_pcm_ops snd_aw2_capture_ops = {
.open = snd_aw2_pcm_capture_open,
.close = snd_aw2_pcm_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 5e2ef0bb7057..80c4a4456197 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -2090,7 +2090,7 @@ snd_azf3328_pcm_close(struct snd_pcm_substream *substream
/******************************************************************/
-static struct snd_pcm_ops snd_azf3328_playback_ops = {
+static const struct snd_pcm_ops snd_azf3328_playback_ops = {
.open = snd_azf3328_pcm_playback_open,
.close = snd_azf3328_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -2101,7 +2101,7 @@ static struct snd_pcm_ops snd_azf3328_playback_ops = {
.pointer = snd_azf3328_pcm_pointer
};
-static struct snd_pcm_ops snd_azf3328_capture_ops = {
+static const struct snd_pcm_ops snd_azf3328_capture_ops = {
.open = snd_azf3328_pcm_capture_open,
.close = snd_azf3328_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -2112,7 +2112,7 @@ static struct snd_pcm_ops snd_azf3328_capture_ops = {
.pointer = snd_azf3328_pcm_pointer
};
-static struct snd_pcm_ops snd_azf3328_i2s_out_ops = {
+static const struct snd_pcm_ops snd_azf3328_i2s_out_ops = {
.open = snd_azf3328_pcm_i2s_out_open,
.close = snd_azf3328_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 5925b7170e25..f2c0709d7441 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -550,7 +550,7 @@ static snd_pcm_uframes_t snd_bt87x_pointer(struct snd_pcm_substream *substream)
return (snd_pcm_uframes_t)bytes_to_frames(runtime, chip->current_line * chip->line_bytes);
}
-static struct snd_pcm_ops snd_bt87x_pcm_ops = {
+static const struct snd_pcm_ops snd_bt87x_pcm_ops = {
.open = snd_bt87x_pcm_open,
.close = snd_bt87x_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index d3cd95633ee2..6165a57a94ae 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1109,7 +1109,7 @@ snd_ca0106_pcm_pointer_capture(struct snd_pcm_substream *substream)
}
/* operators */
-static struct snd_pcm_ops snd_ca0106_playback_front_ops = {
+static const struct snd_pcm_ops snd_ca0106_playback_front_ops = {
.open = snd_ca0106_pcm_open_playback_front,
.close = snd_ca0106_pcm_close_playback,
.ioctl = snd_pcm_lib_ioctl,
@@ -1120,7 +1120,7 @@ static struct snd_pcm_ops snd_ca0106_playback_front_ops = {
.pointer = snd_ca0106_pcm_pointer_playback,
};
-static struct snd_pcm_ops snd_ca0106_capture_0_ops = {
+static const struct snd_pcm_ops snd_ca0106_capture_0_ops = {
.open = snd_ca0106_pcm_open_0_capture,
.close = snd_ca0106_pcm_close_capture,
.ioctl = snd_pcm_lib_ioctl,
@@ -1131,7 +1131,7 @@ static struct snd_pcm_ops snd_ca0106_capture_0_ops = {
.pointer = snd_ca0106_pcm_pointer_capture,
};
-static struct snd_pcm_ops snd_ca0106_capture_1_ops = {
+static const struct snd_pcm_ops snd_ca0106_capture_1_ops = {
.open = snd_ca0106_pcm_open_1_capture,
.close = snd_ca0106_pcm_close_capture,
.ioctl = snd_pcm_lib_ioctl,
@@ -1142,7 +1142,7 @@ static struct snd_pcm_ops snd_ca0106_capture_1_ops = {
.pointer = snd_ca0106_pcm_pointer_capture,
};
-static struct snd_pcm_ops snd_ca0106_capture_2_ops = {
+static const struct snd_pcm_ops snd_ca0106_capture_2_ops = {
.open = snd_ca0106_pcm_open_2_capture,
.close = snd_ca0106_pcm_close_capture,
.ioctl = snd_pcm_lib_ioctl,
@@ -1153,7 +1153,7 @@ static struct snd_pcm_ops snd_ca0106_capture_2_ops = {
.pointer = snd_ca0106_pcm_pointer_capture,
};
-static struct snd_pcm_ops snd_ca0106_capture_3_ops = {
+static const struct snd_pcm_ops snd_ca0106_capture_3_ops = {
.open = snd_ca0106_pcm_open_3_capture,
.close = snd_ca0106_pcm_close_capture,
.ioctl = snd_pcm_lib_ioctl,
@@ -1164,7 +1164,7 @@ static struct snd_pcm_ops snd_ca0106_capture_3_ops = {
.pointer = snd_ca0106_pcm_pointer_capture,
};
-static struct snd_pcm_ops snd_ca0106_playback_center_lfe_ops = {
+static const struct snd_pcm_ops snd_ca0106_playback_center_lfe_ops = {
.open = snd_ca0106_pcm_open_playback_center_lfe,
.close = snd_ca0106_pcm_close_playback,
.ioctl = snd_pcm_lib_ioctl,
@@ -1175,7 +1175,7 @@ static struct snd_pcm_ops snd_ca0106_playback_center_lfe_ops = {
.pointer = snd_ca0106_pcm_pointer_playback,
};
-static struct snd_pcm_ops snd_ca0106_playback_unknown_ops = {
+static const struct snd_pcm_ops snd_ca0106_playback_unknown_ops = {
.open = snd_ca0106_pcm_open_playback_unknown,
.close = snd_ca0106_pcm_close_playback,
.ioctl = snd_pcm_lib_ioctl,
@@ -1186,7 +1186,7 @@ static struct snd_pcm_ops snd_ca0106_playback_unknown_ops = {
.pointer = snd_ca0106_pcm_pointer_playback,
};
-static struct snd_pcm_ops snd_ca0106_playback_rear_ops = {
+static const struct snd_pcm_ops snd_ca0106_playback_rear_ops = {
.open = snd_ca0106_pcm_open_playback_rear,
.close = snd_ca0106_pcm_close_playback,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 24cdcba06d27..73f593526b2d 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -1838,7 +1838,7 @@ static int snd_cmipci_capture_spdif_close(struct snd_pcm_substream *substream)
/*
*/
-static struct snd_pcm_ops snd_cmipci_playback_ops = {
+static const struct snd_pcm_ops snd_cmipci_playback_ops = {
.open = snd_cmipci_playback_open,
.close = snd_cmipci_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1849,7 +1849,7 @@ static struct snd_pcm_ops snd_cmipci_playback_ops = {
.pointer = snd_cmipci_playback_pointer,
};
-static struct snd_pcm_ops snd_cmipci_capture_ops = {
+static const struct snd_pcm_ops snd_cmipci_capture_ops = {
.open = snd_cmipci_capture_open,
.close = snd_cmipci_capture_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1860,7 +1860,7 @@ static struct snd_pcm_ops snd_cmipci_capture_ops = {
.pointer = snd_cmipci_capture_pointer,
};
-static struct snd_pcm_ops snd_cmipci_playback2_ops = {
+static const struct snd_pcm_ops snd_cmipci_playback2_ops = {
.open = snd_cmipci_playback2_open,
.close = snd_cmipci_playback2_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1871,7 +1871,7 @@ static struct snd_pcm_ops snd_cmipci_playback2_ops = {
.pointer = snd_cmipci_capture_pointer, /* channel B */
};
-static struct snd_pcm_ops snd_cmipci_playback_spdif_ops = {
+static const struct snd_pcm_ops snd_cmipci_playback_spdif_ops = {
.open = snd_cmipci_playback_spdif_open,
.close = snd_cmipci_playback_spdif_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1882,7 +1882,7 @@ static struct snd_pcm_ops snd_cmipci_playback_spdif_ops = {
.pointer = snd_cmipci_playback_pointer,
};
-static struct snd_pcm_ops snd_cmipci_capture_spdif_ops = {
+static const struct snd_pcm_ops snd_cmipci_capture_spdif_ops = {
.open = snd_cmipci_capture_spdif_open,
.close = snd_cmipci_capture_spdif_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index c296fd0dbc9c..615d8a99d8c8 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -951,7 +951,7 @@ static int snd_cs4281_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_cs4281_playback_ops = {
+static const struct snd_pcm_ops snd_cs4281_playback_ops = {
.open = snd_cs4281_playback_open,
.close = snd_cs4281_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -962,7 +962,7 @@ static struct snd_pcm_ops snd_cs4281_playback_ops = {
.pointer = snd_cs4281_pointer,
};
-static struct snd_pcm_ops snd_cs4281_capture_ops = {
+static const struct snd_pcm_ops snd_cs4281_capture_ops = {
.open = snd_cs4281_capture_open,
.close = snd_cs4281_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 2706f271a83b..528102cc2d5d 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -2460,7 +2460,7 @@ static int cs46xx_detect_codec(struct snd_cs46xx *chip, int codec)
udelay(10);
if (snd_cs46xx_codec_read(chip, AC97_RESET, codec) & 0x8000) {
dev_dbg(chip->card->dev,
- "seconadry codec not present\n");
+ "secondary codec not present\n");
return -ENXIO;
}
}
@@ -2503,7 +2503,7 @@ int snd_cs46xx_mixer(struct snd_cs46xx *chip, int spdif_device)
chip->nr_ac97_codecs = 1;
#ifdef CONFIG_SND_CS46XX_NEW_DSP
- dev_dbg(chip->card->dev, "detecting seconadry codec\n");
+ dev_dbg(chip->card->dev, "detecting secondary codec\n");
/* try detect a secondary codec */
if (! cs46xx_detect_codec(chip, CS46XX_SECONDARY_CODEC_INDEX))
chip->nr_ac97_codecs = 2;
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index d2951ed4bf71..4a0cbd2241d8 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -1441,7 +1441,7 @@ int cs46xx_dsp_scb_and_task_init (struct snd_cs46xx *chip)
if (chip->nr_ac97_codecs == 2) {
/* create CODEC tasklet for rear Center/LFE output
- slot 6 and 9 on seconadry CODEC */
+ slot 6 and 9 on secondary CODEC */
clfe_codec_out_scb = cs46xx_dsp_create_codec_out_scb(chip,"CodecOutSCB_CLFE",0x0030,0x0030,
CLFE_MIXER_SCB_ADDR,
CLFE_CODEC_SCB_ADDR,
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index 27fa57da8dc4..c208c1d8dbb2 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -380,7 +380,7 @@ static int snd_cs5535audio_capture_prepare(struct snd_pcm_substream *substream)
substream->runtime->rate);
}
-static struct snd_pcm_ops snd_cs5535audio_playback_ops = {
+static const struct snd_pcm_ops snd_cs5535audio_playback_ops = {
.open = snd_cs5535audio_playback_open,
.close = snd_cs5535audio_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -391,7 +391,7 @@ static struct snd_pcm_ops snd_cs5535audio_playback_ops = {
.pointer = snd_cs5535audio_pcm_pointer,
};
-static struct snd_pcm_ops snd_cs5535audio_capture_ops = {
+static const struct snd_pcm_ops snd_cs5535audio_capture_ops = {
.open = snd_cs5535audio_capture_open,
.close = snd_cs5535audio_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index 977a59855fa6..908658a00377 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -1623,7 +1623,7 @@ static int atc_resume(struct ct_atc *atc)
}
#endif
-static struct ct_atc atc_preset = {
+static const struct ct_atc atc_preset = {
.map_audio_buffer = ct_map_audio_buffer,
.unmap_audio_buffer = ct_unmap_audio_buffer,
.pcm_playback_prepare = atc_pcm_playback_prepare,
diff --git a/sound/pci/ctxfi/ctpcm.c b/sound/pci/ctxfi/ctpcm.c
index d86c474ca5b6..974978041558 100644
--- a/sound/pci/ctxfi/ctpcm.c
+++ b/sound/pci/ctxfi/ctpcm.c
@@ -372,7 +372,7 @@ ct_pcm_capture_pointer(struct snd_pcm_substream *substream)
}
/* PCM operators for playback */
-static struct snd_pcm_ops ct_pcm_playback_ops = {
+static const struct snd_pcm_ops ct_pcm_playback_ops = {
.open = ct_pcm_playback_open,
.close = ct_pcm_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -385,7 +385,7 @@ static struct snd_pcm_ops ct_pcm_playback_ops = {
};
/* PCM operators for capture */
-static struct snd_pcm_ops ct_pcm_capture_ops = {
+static const struct snd_pcm_ops ct_pcm_capture_ops = {
.open = ct_pcm_capture_open,
.close = ct_pcm_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c
index 419306ef825f..520e19bc649f 100644
--- a/sound/pci/ctxfi/ctvmem.c
+++ b/sound/pci/ctxfi/ctvmem.c
@@ -166,11 +166,7 @@ static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
static dma_addr_t
ct_get_ptp_phys(struct ct_vm *vm, int index)
{
- dma_addr_t addr;
-
- addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
-
- return addr;
+ return (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
}
int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index 6d1b98d14327..921037ed8468 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -548,7 +548,7 @@ snd_emu10k1x_pcm_pointer(struct snd_pcm_substream *substream)
}
/* operators */
-static struct snd_pcm_ops snd_emu10k1x_playback_ops = {
+static const struct snd_pcm_ops snd_emu10k1x_playback_ops = {
.open = snd_emu10k1x_playback_open,
.close = snd_emu10k1x_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -694,7 +694,7 @@ snd_emu10k1x_pcm_pointer_capture(struct snd_pcm_substream *substream)
return ptr;
}
-static struct snd_pcm_ops snd_emu10k1x_capture_ops = {
+static const struct snd_pcm_ops snd_emu10k1x_capture_ops = {
.open = snd_emu10k1x_pcm_open_capture,
.close = snd_emu10k1x_pcm_close_capture,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 14a305bd8a98..37be1e14d756 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1364,7 +1364,7 @@ static int snd_emu10k1_capture_efx_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_emu10k1_playback_ops = {
+static const struct snd_pcm_ops snd_emu10k1_playback_ops = {
.open = snd_emu10k1_playback_open,
.close = snd_emu10k1_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1376,7 +1376,7 @@ static struct snd_pcm_ops snd_emu10k1_playback_ops = {
.page = snd_pcm_sgbuf_ops_page,
};
-static struct snd_pcm_ops snd_emu10k1_capture_ops = {
+static const struct snd_pcm_ops snd_emu10k1_capture_ops = {
.open = snd_emu10k1_capture_open,
.close = snd_emu10k1_capture_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1388,7 +1388,7 @@ static struct snd_pcm_ops snd_emu10k1_capture_ops = {
};
/* EFX playback */
-static struct snd_pcm_ops snd_emu10k1_efx_playback_ops = {
+static const struct snd_pcm_ops snd_emu10k1_efx_playback_ops = {
.open = snd_emu10k1_efx_playback_open,
.close = snd_emu10k1_efx_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1455,7 +1455,7 @@ int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device)
}
-static struct snd_pcm_ops snd_emu10k1_capture_mic_ops = {
+static const struct snd_pcm_ops snd_emu10k1_capture_mic_ops = {
.open = snd_emu10k1_capture_mic_open,
.close = snd_emu10k1_capture_mic_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1550,7 +1550,7 @@ static struct snd_kcontrol_new snd_emu10k1_pcm_efx_voices_mask = {
.put = snd_emu10k1_pcm_efx_voices_mask_put
};
-static struct snd_pcm_ops snd_emu10k1_capture_efx_ops = {
+static const struct snd_pcm_ops snd_emu10k1_capture_efx_ops = {
.open = snd_emu10k1_capture_efx_open,
.close = snd_emu10k1_capture_efx_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1791,7 +1791,7 @@ static int snd_emu10k1_fx8010_playback_close(struct snd_pcm_substream *substream
return 0;
}
-static struct snd_pcm_ops snd_emu10k1_fx8010_playback_ops = {
+static const struct snd_pcm_ops snd_emu10k1_fx8010_playback_ops = {
.open = snd_emu10k1_fx8010_playback_open,
.close = snd_emu10k1_fx8010_playback_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/emu10k1/p16v.c b/sound/pci/emu10k1/p16v.c
index 3c60b433de9f..c11f1a29f35d 100644
--- a/sound/pci/emu10k1/p16v.c
+++ b/sound/pci/emu10k1/p16v.c
@@ -300,37 +300,29 @@ static int snd_p16v_pcm_open_capture(struct snd_pcm_substream *substream)
static int snd_p16v_pcm_hw_params_playback(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- int result;
- result = snd_pcm_lib_malloc_pages(substream,
+ return snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
- return result;
}
/* hw_params callback */
static int snd_p16v_pcm_hw_params_capture(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- int result;
- result = snd_pcm_lib_malloc_pages(substream,
+ return snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
- return result;
}
/* hw_free callback */
static int snd_p16v_pcm_hw_free_playback(struct snd_pcm_substream *substream)
{
- int result;
- result = snd_pcm_lib_free_pages(substream);
- return result;
+ return snd_pcm_lib_free_pages(substream);
}
/* hw_free callback */
static int snd_p16v_pcm_hw_free_capture(struct snd_pcm_substream *substream)
{
- int result;
- result = snd_pcm_lib_free_pages(substream);
- return result;
+ return snd_pcm_lib_free_pages(substream);
}
@@ -601,7 +593,7 @@ snd_p16v_pcm_pointer_capture(struct snd_pcm_substream *substream)
}
/* operators */
-static struct snd_pcm_ops snd_p16v_playback_front_ops = {
+static const struct snd_pcm_ops snd_p16v_playback_front_ops = {
.open = snd_p16v_pcm_open_playback_front,
.close = snd_p16v_pcm_close_playback,
.ioctl = snd_pcm_lib_ioctl,
@@ -612,7 +604,7 @@ static struct snd_pcm_ops snd_p16v_playback_front_ops = {
.pointer = snd_p16v_pcm_pointer_playback,
};
-static struct snd_pcm_ops snd_p16v_capture_ops = {
+static const struct snd_pcm_ops snd_p16v_capture_ops = {
.open = snd_p16v_pcm_open_capture,
.close = snd_p16v_pcm_close_capture,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 626cd2167d29..7e760fed0728 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -1227,7 +1227,7 @@ static int snd_ensoniq_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_ensoniq_playback1_ops = {
+static const struct snd_pcm_ops snd_ensoniq_playback1_ops = {
.open = snd_ensoniq_playback1_open,
.close = snd_ensoniq_playback1_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1238,7 +1238,7 @@ static struct snd_pcm_ops snd_ensoniq_playback1_ops = {
.pointer = snd_ensoniq_playback1_pointer,
};
-static struct snd_pcm_ops snd_ensoniq_playback2_ops = {
+static const struct snd_pcm_ops snd_ensoniq_playback2_ops = {
.open = snd_ensoniq_playback2_open,
.close = snd_ensoniq_playback2_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1249,7 +1249,7 @@ static struct snd_pcm_ops snd_ensoniq_playback2_ops = {
.pointer = snd_ensoniq_playback2_pointer,
};
-static struct snd_pcm_ops snd_ensoniq_capture_ops = {
+static const struct snd_pcm_ops snd_ensoniq_capture_ops = {
.open = snd_ensoniq_capture_open,
.close = snd_ensoniq_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index 8963d7688fb0..681355829484 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -992,7 +992,7 @@ static int snd_es1938_playback_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_es1938_playback_ops = {
+static const struct snd_pcm_ops snd_es1938_playback_ops = {
.open = snd_es1938_playback_open,
.close = snd_es1938_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1003,7 +1003,7 @@ static struct snd_pcm_ops snd_es1938_playback_ops = {
.pointer = snd_es1938_playback_pointer,
};
-static struct snd_pcm_ops snd_es1938_capture_ops = {
+static const struct snd_pcm_ops snd_es1938_capture_ops = {
.open = snd_es1938_capture_open,
.close = snd_es1938_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 514f2604086e..8146fb76a4ad 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -1677,7 +1677,7 @@ static int snd_es1968_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_es1968_playback_ops = {
+static const struct snd_pcm_ops snd_es1968_playback_ops = {
.open = snd_es1968_playback_open,
.close = snd_es1968_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1688,7 +1688,7 @@ static struct snd_pcm_ops snd_es1968_playback_ops = {
.pointer = snd_es1968_pcm_pointer,
};
-static struct snd_pcm_ops snd_es1968_capture_ops = {
+static const struct snd_pcm_ops snd_es1968_capture_ops = {
.open = snd_es1968_capture_open,
.close = snd_es1968_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index bade9b907b92..c47287d79306 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -691,7 +691,7 @@ static int snd_fm801_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_fm801_playback_ops = {
+static const struct snd_pcm_ops snd_fm801_playback_ops = {
.open = snd_fm801_playback_open,
.close = snd_fm801_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -702,7 +702,7 @@ static struct snd_pcm_ops snd_fm801_playback_ops = {
.pointer = snd_fm801_playback_pointer,
};
-static struct snd_pcm_ops snd_fm801_capture_ops = {
+static const struct snd_pcm_ops snd_fm801_capture_ops = {
.open = snd_fm801_capture_open,
.close = snd_fm801_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 27de8015717d..500878556578 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -27,6 +27,12 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+
+#ifdef CONFIG_X86
+/* for art-tsc conversion */
+#include <asm/tsc.h>
+#endif
+
#include <sound/core.h>
#include <sound/initval.h>
#include "hda_controller.h"
@@ -337,12 +343,173 @@ static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
azx_get_position(chip, azx_dev));
}
+/*
+ * azx_scale64: Scale base by mult/div while not overflowing sanely
+ *
+ * Derived from scale64_check_overflow in kernel/time/timekeeping.c
+ *
+ * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
+ * is about 384307 ie ~4.5 days.
+ *
+ * This scales the calculation so that overflow will happen but after 2^64 /
+ * 48000 secs, which is pretty large!
+ *
+ * In caln below:
+ * base may overflow, but since there isn’t any additional division
+ * performed on base it’s OK
+ * rem can’t overflow because both are 32-bit values
+ */
+
+#ifdef CONFIG_X86
+static u64 azx_scale64(u64 base, u32 num, u32 den)
+{
+ u64 rem;
+
+ rem = do_div(base, den);
+
+ base *= num;
+ rem *= num;
+
+ do_div(rem, den);
+
+ return base + rem;
+}
+
+static int azx_get_sync_time(ktime_t *device,
+ struct system_counterval_t *system, void *ctx)
+{
+ struct snd_pcm_substream *substream = ctx;
+ struct azx_dev *azx_dev = get_azx_dev(substream);
+ struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
+ struct azx *chip = apcm->chip;
+ struct snd_pcm_runtime *runtime;
+ u64 ll_counter, ll_counter_l, ll_counter_h;
+ u64 tsc_counter, tsc_counter_l, tsc_counter_h;
+ u32 wallclk_ctr, wallclk_cycles;
+ bool direction;
+ u32 dma_select;
+ u32 timeout = 200;
+ u32 retry_count = 0;
+
+ runtime = substream->runtime;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ direction = 1;
+ else
+ direction = 0;
+
+ /* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
+ do {
+ timeout = 100;
+ dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
+ (azx_dev->core.stream_tag - 1);
+ snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
+
+ /* Enable the capture */
+ snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
+
+ while (timeout) {
+ if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
+ GTSCC_TSCCD_MASK)
+ break;
+
+ timeout--;
+ }
+
+ if (!timeout) {
+ dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
+ return -EIO;
+ }
+
+ /* Read wall clock counter */
+ wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
+
+ /* Read TSC counter */
+ tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
+ tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
+
+ /* Read Link counter */
+ ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
+ ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
+
+ /* Ack: registers read done */
+ snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
+
+ tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
+ tsc_counter_l;
+
+ ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) | ll_counter_l;
+ wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
+
+ /*
+ * An error occurs near frame "rollover". The clocks in
+ * frame value indicates whether this error may have
+ * occurred. Here we use the value of 10 i.e.,
+ * HDA_MAX_CYCLE_OFFSET
+ */
+ if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
+ && wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
+ break;
+
+ /*
+ * Sleep before we read again, else we may again get
+ * value near to MAX_CYCLE. Try to sleep for different
+ * amount of time so we dont hit the same number again
+ */
+ udelay(retry_count++);
+
+ } while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
+
+ if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
+ dev_err_ratelimited(chip->card->dev,
+ "Error in WALFCC cycle count\n");
+ return -EIO;
+ }
+
+ *device = ns_to_ktime(azx_scale64(ll_counter,
+ NSEC_PER_SEC, runtime->rate));
+ *device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
+ ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
+
+ *system = convert_art_to_tsc(tsc_counter);
+
+ return 0;
+}
+
+#else
+static int azx_get_sync_time(ktime_t *device,
+ struct system_counterval_t *system, void *ctx)
+{
+ return -ENXIO;
+}
+#endif
+
+static int azx_get_crosststamp(struct snd_pcm_substream *substream,
+ struct system_device_crosststamp *xtstamp)
+{
+ return get_device_system_crosststamp(azx_get_sync_time,
+ substream, NULL, xtstamp);
+}
+
+static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
+ struct snd_pcm_audio_tstamp_config *ts)
+{
+ if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
+ if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
+ return true;
+
+ return false;
+}
+
static int azx_get_time_info(struct snd_pcm_substream *substream,
struct timespec *system_ts, struct timespec *audio_ts,
struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
{
struct azx_dev *azx_dev = get_azx_dev(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct system_device_crosststamp xtstamp;
+ int ret;
u64 nsec;
if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
@@ -361,8 +528,37 @@ static int azx_get_time_info(struct snd_pcm_substream *substream,
audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
- } else
+ } else if (is_link_time_supported(runtime, audio_tstamp_config)) {
+
+ ret = azx_get_crosststamp(substream, &xtstamp);
+ if (ret)
+ return ret;
+
+ switch (runtime->tstamp_type) {
+ case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
+ return -EINVAL;
+
+ case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
+ *system_ts = ktime_to_timespec(xtstamp.sys_monoraw);
+ break;
+
+ default:
+ *system_ts = ktime_to_timespec(xtstamp.sys_realtime);
+ break;
+
+ }
+
+ *audio_ts = ktime_to_timespec(xtstamp.device);
+
+ audio_tstamp_report->actual_type =
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
+ audio_tstamp_report->accuracy_report = 1;
+ /* 24 MHz WallClock == 42ns resolution */
+ audio_tstamp_report->accuracy = 42;
+
+ } else {
audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
+ }
return 0;
}
@@ -412,6 +608,11 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
goto unlock;
}
runtime->private_data = azx_dev;
+
+ if (chip->gts_present)
+ azx_pcm_hw.info = azx_pcm_hw.info |
+ SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
+
runtime->hw = azx_pcm_hw;
runtime->hw.channels_min = hinfo->channels_min;
runtime->hw.channels_max = hinfo->channels_max;
@@ -495,7 +696,7 @@ static int azx_pcm_mmap(struct snd_pcm_substream *substream,
return snd_pcm_lib_default_mmap(substream, area);
}
-static struct snd_pcm_ops azx_pcm_ops = {
+static const struct snd_pcm_ops azx_pcm_ops = {
.open = azx_pcm_open,
.close = azx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index ec63bbf1ec6d..a50e0532622a 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -159,6 +159,9 @@ struct azx {
unsigned int region_requested:1;
unsigned int disabled:1; /* disabled by vga_switcheroo */
+ /* GTS present */
+ unsigned int gts_present:1;
+
#ifdef CONFIG_SND_HDA_DSP_LOADER
struct azx_dev saved_azx_dev;
#endif
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 160c7f713722..c3469f756ec2 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -54,6 +54,7 @@
/* for snoop control */
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
#endif
#include <sound/core.h>
#include <sound/initval.h>
@@ -1663,6 +1664,22 @@ static int azx_first_init(struct azx *chip)
return -ENXIO;
}
+ if (IS_SKL_PLUS(pci))
+ snd_hdac_bus_parse_capabilities(bus);
+
+ /*
+ * Some Intel CPUs has always running timer (ART) feature and
+ * controller may have Global time sync reporting capability, so
+ * check both of these before declaring synchronized time reporting
+ * capability SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME
+ */
+ chip->gts_present = false;
+
+#ifdef CONFIG_X86
+ if (bus->ppcap && boot_cpu_has(X86_FEATURE_ART))
+ chip->gts_present = true;
+#endif
+
if (chip->msi) {
if (chip->driver_caps & AZX_DCAPS_NO_MSI64) {
dev_dbg(card->dev, "Disabling 64bit MSI\n");
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 9ceb2bc36e68..ad06866d7c69 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -4018,7 +4018,7 @@ static int ca0132_build_controls(struct hda_codec *codec)
/*
* PCM
*/
-static struct hda_pcm_stream ca0132_pcm_analog_playback = {
+static const struct hda_pcm_stream ca0132_pcm_analog_playback = {
.substreams = 1,
.channels_min = 2,
.channels_max = 6,
@@ -4029,7 +4029,7 @@ static struct hda_pcm_stream ca0132_pcm_analog_playback = {
},
};
-static struct hda_pcm_stream ca0132_pcm_analog_capture = {
+static const struct hda_pcm_stream ca0132_pcm_analog_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
@@ -4040,7 +4040,7 @@ static struct hda_pcm_stream ca0132_pcm_analog_capture = {
},
};
-static struct hda_pcm_stream ca0132_pcm_digital_playback = {
+static const struct hda_pcm_stream ca0132_pcm_digital_playback = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
@@ -4052,7 +4052,7 @@ static struct hda_pcm_stream ca0132_pcm_digital_playback = {
},
};
-static struct hda_pcm_stream ca0132_pcm_digital_capture = {
+static const struct hda_pcm_stream ca0132_pcm_digital_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
@@ -4614,7 +4614,7 @@ static void ca0132_free(struct hda_codec *codec)
kfree(codec->spec);
}
-static struct hda_codec_ops ca0132_patch_ops = {
+static const struct hda_codec_ops ca0132_patch_ops = {
.build_controls = ca0132_build_controls,
.build_pcms = ca0132_build_pcms,
.init = ca0132_init,
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 56fefbd85782..ed62748a6d55 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -261,6 +261,7 @@ enum {
CXT_FIXUP_HP_530,
CXT_FIXUP_CAP_MIX_AMP_5047,
CXT_FIXUP_MUTE_LED_EAPD,
+ CXT_FIXUP_HP_SPECTRE,
};
/* for hda_fixup_thinkpad_acpi() */
@@ -765,6 +766,14 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt_fixup_mute_led_eapd,
},
+ [CXT_FIXUP_HP_SPECTRE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ /* enable NID 0x1d for the speaker on top */
+ { 0x1d, 0x91170111 },
+ { }
+ }
+ },
};
static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -814,6 +823,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 575cefd8cc4a..b58e8c76346a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5264,6 +5264,8 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC269_FIXUP_THINKPAD_ACPI] = {
.type = HDA_FIXUP_FUNC,
.v.func = hda_fixup_thinkpad_acpi,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_SKU_IGNORE,
},
[ALC269_FIXUP_DMIC_THINKPAD_ACPI] = {
.type = HDA_FIXUP_FUNC,
@@ -5806,6 +5808,13 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{0x14, 0x90170110}, \
{0x15, 0x0221401f}
+#define ALC295_STANDARD_PINS \
+ {0x12, 0xb7a60130}, \
+ {0x14, 0x90170110}, \
+ {0x17, 0x21014020}, \
+ {0x18, 0x21a19030}, \
+ {0x21, 0x04211020}
+
#define ALC298_STANDARD_PINS \
{0x12, 0x90a60130}, \
{0x21, 0x03211020}
@@ -5846,6 +5855,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x14, 0x90170120},
{0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170110},
+ {0x1b, 0x02011020},
+ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x14, 0x90170130},
{0x1b, 0x01014020},
{0x21, 0x0221103f}),
@@ -5911,6 +5924,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x14, 0x90170120},
{0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0xb7a60130},
+ {0x14, 0x90170110},
+ {0x21, 0x02211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC256_STANDARD_PINS),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
{0x12, 0x90a60130},
@@ -6021,6 +6038,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC292_STANDARD_PINS,
{0x13, 0x90a60140}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC295_STANDARD_PINS),
SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC298_STANDARD_PINS,
{0x17, 0x90170110}),
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 8ae3bb7975d1..b4aa4c1370a8 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -847,7 +847,7 @@ static int snd_ice1712_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_ice1712_playback_ops = {
+static const struct snd_pcm_ops snd_ice1712_playback_ops = {
.open = snd_ice1712_playback_open,
.close = snd_ice1712_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -858,7 +858,7 @@ static struct snd_pcm_ops snd_ice1712_playback_ops = {
.pointer = snd_ice1712_playback_pointer,
};
-static struct snd_pcm_ops snd_ice1712_playback_ds_ops = {
+static const struct snd_pcm_ops snd_ice1712_playback_ds_ops = {
.open = snd_ice1712_playback_ds_open,
.close = snd_ice1712_playback_ds_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -869,7 +869,7 @@ static struct snd_pcm_ops snd_ice1712_playback_ds_ops = {
.pointer = snd_ice1712_playback_ds_pointer,
};
-static struct snd_pcm_ops snd_ice1712_capture_ops = {
+static const struct snd_pcm_ops snd_ice1712_capture_ops = {
.open = snd_ice1712_capture_open,
.close = snd_ice1712_capture_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1228,7 +1228,7 @@ static int snd_ice1712_capture_pro_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_ice1712_playback_pro_ops = {
+static const struct snd_pcm_ops snd_ice1712_playback_pro_ops = {
.open = snd_ice1712_playback_pro_open,
.close = snd_ice1712_playback_pro_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1239,7 +1239,7 @@ static struct snd_pcm_ops snd_ice1712_playback_pro_ops = {
.pointer = snd_ice1712_playback_pro_pointer,
};
-static struct snd_pcm_ops snd_ice1712_capture_pro_ops = {
+static const struct snd_pcm_ops snd_ice1712_capture_pro_ops = {
.open = snd_ice1712_capture_pro_open,
.close = snd_ice1712_capture_pro_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 0b22c00642bb..e5c52ed9b674 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -620,9 +620,7 @@ static const unsigned int stdclock_rate_list[16] = {
static unsigned int stdclock_get_rate(struct snd_ice1712 *ice)
{
- unsigned int rate;
- rate = stdclock_rate_list[inb(ICEMT1724(ice, RATE)) & 15];
- return rate;
+ return stdclock_rate_list[inb(ICEMT1724(ice, RATE)) & 15];
}
static void stdclock_set_rate(struct snd_ice1712 *ice, unsigned int rate)
@@ -1113,7 +1111,7 @@ static int snd_vt1724_capture_pro_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_vt1724_playback_pro_ops = {
+static const struct snd_pcm_ops snd_vt1724_playback_pro_ops = {
.open = snd_vt1724_playback_pro_open,
.close = snd_vt1724_playback_pro_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1124,7 +1122,7 @@ static struct snd_pcm_ops snd_vt1724_playback_pro_ops = {
.pointer = snd_vt1724_playback_pro_pointer,
};
-static struct snd_pcm_ops snd_vt1724_capture_pro_ops = {
+static const struct snd_pcm_ops snd_vt1724_capture_pro_ops = {
.open = snd_vt1724_capture_pro_open,
.close = snd_vt1724_capture_pro_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1292,7 +1290,7 @@ static int snd_vt1724_capture_spdif_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_vt1724_playback_spdif_ops = {
+static const struct snd_pcm_ops snd_vt1724_playback_spdif_ops = {
.open = snd_vt1724_playback_spdif_open,
.close = snd_vt1724_playback_spdif_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1303,7 +1301,7 @@ static struct snd_pcm_ops snd_vt1724_playback_spdif_ops = {
.pointer = snd_vt1724_pcm_pointer,
};
-static struct snd_pcm_ops snd_vt1724_capture_spdif_ops = {
+static const struct snd_pcm_ops snd_vt1724_capture_spdif_ops = {
.open = snd_vt1724_capture_spdif_open,
.close = snd_vt1724_capture_spdif_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1437,7 +1435,7 @@ static int snd_vt1724_playback_indep_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_vt1724_playback_indep_ops = {
+static const struct snd_pcm_ops snd_vt1724_playback_indep_ops = {
.open = snd_vt1724_playback_indep_open,
.close = snd_vt1724_playback_indep_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 9e1ad119a3ce..565f7f55c3ca 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -1681,7 +1681,7 @@ static int snd_korg1212_capture_copy(struct snd_pcm_substream *substream,
return snd_korg1212_copy_to(korg1212, dst, pos, count, 0, korg1212->channels * 2);
}
-static struct snd_pcm_ops snd_korg1212_playback_ops = {
+static const struct snd_pcm_ops snd_korg1212_playback_ops = {
.open = snd_korg1212_playback_open,
.close = snd_korg1212_playback_close,
.ioctl = snd_korg1212_ioctl,
@@ -1693,7 +1693,7 @@ static struct snd_pcm_ops snd_korg1212_playback_ops = {
.silence = snd_korg1212_playback_silence,
};
-static struct snd_pcm_ops snd_korg1212_capture_ops = {
+static const struct snd_pcm_ops snd_korg1212_capture_ops = {
.open = snd_korg1212_capture_open,
.close = snd_korg1212_capture_close,
.ioctl = snd_korg1212_ioctl,
diff --git a/sound/pci/lola/lola_pcm.c b/sound/pci/lola/lola_pcm.c
index 3bd6985430e8..1268ba329016 100644
--- a/sound/pci/lola/lola_pcm.c
+++ b/sound/pci/lola/lola_pcm.c
@@ -586,7 +586,7 @@ void lola_pcm_update(struct lola *chip, struct lola_pcm *pcm, unsigned int bits)
}
}
-static struct snd_pcm_ops lola_pcm_ops = {
+static const struct snd_pcm_ops lola_pcm_ops = {
.open = lola_pcm_open,
.close = lola_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index 8b8e2e54fba3..c0f0c349c3ec 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -804,7 +804,7 @@ mac_ready:
return err;
}
-static struct snd_pcm_ops lx_ops_playback = {
+static const struct snd_pcm_ops lx_ops_playback = {
.open = lx_pcm_open,
.close = lx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -815,7 +815,7 @@ static struct snd_pcm_ops lx_ops_playback = {
.pointer = lx_pcm_stream_pointer,
};
-static struct snd_pcm_ops lx_ops_capture = {
+static const struct snd_pcm_ops lx_ops_capture = {
.open = lx_pcm_open,
.close = lx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index 17ae92613de4..cafea6dc5c01 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -1834,7 +1834,7 @@ snd_m3_capture_close(struct snd_pcm_substream *subs)
* create pcm instance
*/
-static struct snd_pcm_ops snd_m3_playback_ops = {
+static const struct snd_pcm_ops snd_m3_playback_ops = {
.open = snd_m3_playback_open,
.close = snd_m3_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1845,7 +1845,7 @@ static struct snd_pcm_ops snd_m3_playback_ops = {
.pointer = snd_m3_pcm_pointer,
};
-static struct snd_pcm_ops snd_m3_capture_ops = {
+static const struct snd_pcm_ops snd_m3_capture_ops = {
.open = snd_m3_capture_open,
.close = snd_m3_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index 25c0ddd3a53b..80d439944cb5 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -922,7 +922,7 @@ static snd_pcm_uframes_t snd_mixart_stream_pointer(struct snd_pcm_substream *sub
-static struct snd_pcm_ops snd_mixart_playback_ops = {
+static const struct snd_pcm_ops snd_mixart_playback_ops = {
.open = snd_mixart_playback_open,
.close = snd_mixart_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -933,7 +933,7 @@ static struct snd_pcm_ops snd_mixart_playback_ops = {
.pointer = snd_mixart_stream_pointer,
};
-static struct snd_pcm_ops snd_mixart_capture_ops = {
+static const struct snd_pcm_ops snd_mixart_capture_ops = {
.open = snd_mixart_capture_open,
.close = snd_mixart_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index 4735e27cc773..103fe311e5a9 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -902,7 +902,7 @@ snd_nm256_capture_close(struct snd_pcm_substream *substream)
/*
* create a pcm instance
*/
-static struct snd_pcm_ops snd_nm256_playback_ops = {
+static const struct snd_pcm_ops snd_nm256_playback_ops = {
.open = snd_nm256_playback_open,
.close = snd_nm256_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -917,7 +917,7 @@ static struct snd_pcm_ops snd_nm256_playback_ops = {
.mmap = snd_pcm_lib_mmap_iomem,
};
-static struct snd_pcm_ops snd_nm256_capture_ops = {
+static const struct snd_pcm_ops snd_nm256_capture_ops = {
.open = snd_nm256_capture_open,
.close = snd_nm256_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c
index aa2ebd1d6d12..042a2439fea5 100644
--- a/sound/pci/oxygen/oxygen_pcm.c
+++ b/sound/pci/oxygen/oxygen_pcm.c
@@ -631,7 +631,7 @@ static snd_pcm_uframes_t oxygen_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, curr_addr - (u32)runtime->dma_addr);
}
-static struct snd_pcm_ops oxygen_rec_a_ops = {
+static const struct snd_pcm_ops oxygen_rec_a_ops = {
.open = oxygen_rec_a_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -642,7 +642,7 @@ static struct snd_pcm_ops oxygen_rec_a_ops = {
.pointer = oxygen_pointer,
};
-static struct snd_pcm_ops oxygen_rec_b_ops = {
+static const struct snd_pcm_ops oxygen_rec_b_ops = {
.open = oxygen_rec_b_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -653,7 +653,7 @@ static struct snd_pcm_ops oxygen_rec_b_ops = {
.pointer = oxygen_pointer,
};
-static struct snd_pcm_ops oxygen_rec_c_ops = {
+static const struct snd_pcm_ops oxygen_rec_c_ops = {
.open = oxygen_rec_c_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -664,7 +664,7 @@ static struct snd_pcm_ops oxygen_rec_c_ops = {
.pointer = oxygen_pointer,
};
-static struct snd_pcm_ops oxygen_spdif_ops = {
+static const struct snd_pcm_ops oxygen_spdif_ops = {
.open = oxygen_spdif_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -675,7 +675,7 @@ static struct snd_pcm_ops oxygen_spdif_ops = {
.pointer = oxygen_pointer,
};
-static struct snd_pcm_ops oxygen_multich_ops = {
+static const struct snd_pcm_ops oxygen_multich_ops = {
.open = oxygen_multich_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -686,7 +686,7 @@ static struct snd_pcm_ops oxygen_multich_ops = {
.pointer = oxygen_pointer,
};
-static struct snd_pcm_ops oxygen_ac97_ops = {
+static const struct snd_pcm_ops oxygen_ac97_ops = {
.open = oxygen_ac97_open,
.close = oxygen_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c
index 9293235281dc..bb7eee9d0c2b 100644
--- a/sound/pci/pcxhr/pcxhr.c
+++ b/sound/pci/pcxhr/pcxhr.c
@@ -1146,7 +1146,7 @@ static snd_pcm_uframes_t pcxhr_stream_pointer(struct snd_pcm_substream *subs)
}
-static struct snd_pcm_ops pcxhr_ops = {
+static const struct snd_pcm_ops pcxhr_ops = {
.open = pcxhr_open,
.close = pcxhr_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 067a91207d8e..ada5f01d479c 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -644,7 +644,7 @@ static struct lbuspath lbus_play_paths[] = {
.mono = lbus_play_mono3,
},
};
-static struct lbuspath lbus_rec_path = {
+static const struct lbuspath lbus_rec_path = {
.noconv = lbus_rec_noconv1,
.stereo = lbus_rec_stereo1,
.mono = lbus_rec_mono1,
@@ -1669,7 +1669,7 @@ static int snd_riptide_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_riptide_playback_ops = {
+static const struct snd_pcm_ops snd_riptide_playback_ops = {
.open = snd_riptide_playback_open,
.close = snd_riptide_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1680,7 +1680,7 @@ static struct snd_pcm_ops snd_riptide_playback_ops = {
.trigger = snd_riptide_trigger,
.pointer = snd_riptide_pointer,
};
-static struct snd_pcm_ops snd_riptide_capture_ops = {
+static const struct snd_pcm_ops snd_riptide_capture_ops = {
.open = snd_riptide_capture_open,
.close = snd_riptide_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index cd94ac548ba3..96d15db65dfd 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -1196,7 +1196,7 @@ snd_rme32_capture_fd_pointer(struct snd_pcm_substream *substream)
}
/* for halfduplex mode */
-static struct snd_pcm_ops snd_rme32_playback_spdif_ops = {
+static const struct snd_pcm_ops snd_rme32_playback_spdif_ops = {
.open = snd_rme32_playback_spdif_open,
.close = snd_rme32_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1210,7 +1210,7 @@ static struct snd_pcm_ops snd_rme32_playback_spdif_ops = {
.mmap = snd_pcm_lib_mmap_iomem,
};
-static struct snd_pcm_ops snd_rme32_capture_spdif_ops = {
+static const struct snd_pcm_ops snd_rme32_capture_spdif_ops = {
.open = snd_rme32_capture_spdif_open,
.close = snd_rme32_capture_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1223,7 +1223,7 @@ static struct snd_pcm_ops snd_rme32_capture_spdif_ops = {
.mmap = snd_pcm_lib_mmap_iomem,
};
-static struct snd_pcm_ops snd_rme32_playback_adat_ops = {
+static const struct snd_pcm_ops snd_rme32_playback_adat_ops = {
.open = snd_rme32_playback_adat_open,
.close = snd_rme32_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1236,7 +1236,7 @@ static struct snd_pcm_ops snd_rme32_playback_adat_ops = {
.mmap = snd_pcm_lib_mmap_iomem,
};
-static struct snd_pcm_ops snd_rme32_capture_adat_ops = {
+static const struct snd_pcm_ops snd_rme32_capture_adat_ops = {
.open = snd_rme32_capture_adat_open,
.close = snd_rme32_capture_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1249,7 +1249,7 @@ static struct snd_pcm_ops snd_rme32_capture_adat_ops = {
};
/* for fullduplex mode */
-static struct snd_pcm_ops snd_rme32_playback_spdif_fd_ops = {
+static const struct snd_pcm_ops snd_rme32_playback_spdif_fd_ops = {
.open = snd_rme32_playback_spdif_open,
.close = snd_rme32_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1261,7 +1261,7 @@ static struct snd_pcm_ops snd_rme32_playback_spdif_fd_ops = {
.ack = snd_rme32_playback_fd_ack,
};
-static struct snd_pcm_ops snd_rme32_capture_spdif_fd_ops = {
+static const struct snd_pcm_ops snd_rme32_capture_spdif_fd_ops = {
.open = snd_rme32_capture_spdif_open,
.close = snd_rme32_capture_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1273,7 +1273,7 @@ static struct snd_pcm_ops snd_rme32_capture_spdif_fd_ops = {
.ack = snd_rme32_capture_fd_ack,
};
-static struct snd_pcm_ops snd_rme32_playback_adat_fd_ops = {
+static const struct snd_pcm_ops snd_rme32_playback_adat_fd_ops = {
.open = snd_rme32_playback_adat_open,
.close = snd_rme32_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1284,7 +1284,7 @@ static struct snd_pcm_ops snd_rme32_playback_adat_fd_ops = {
.ack = snd_rme32_playback_fd_ack,
};
-static struct snd_pcm_ops snd_rme32_capture_adat_fd_ops = {
+static const struct snd_pcm_ops snd_rme32_capture_adat_fd_ops = {
.open = snd_rme32_capture_adat_open,
.close = snd_rme32_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 41c31db65039..05b9da30990d 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -1505,7 +1505,7 @@ snd_rme96_capture_pointer(struct snd_pcm_substream *substream)
return snd_rme96_capture_ptr(rme96);
}
-static struct snd_pcm_ops snd_rme96_playback_spdif_ops = {
+static const struct snd_pcm_ops snd_rme96_playback_spdif_ops = {
.open = snd_rme96_playback_spdif_open,
.close = snd_rme96_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1518,7 +1518,7 @@ static struct snd_pcm_ops snd_rme96_playback_spdif_ops = {
.mmap = snd_pcm_lib_mmap_iomem,
};
-static struct snd_pcm_ops snd_rme96_capture_spdif_ops = {
+static const struct snd_pcm_ops snd_rme96_capture_spdif_ops = {
.open = snd_rme96_capture_spdif_open,
.close = snd_rme96_capture_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1530,7 +1530,7 @@ static struct snd_pcm_ops snd_rme96_capture_spdif_ops = {
.mmap = snd_pcm_lib_mmap_iomem,
};
-static struct snd_pcm_ops snd_rme96_playback_adat_ops = {
+static const struct snd_pcm_ops snd_rme96_playback_adat_ops = {
.open = snd_rme96_playback_adat_open,
.close = snd_rme96_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1543,7 +1543,7 @@ static struct snd_pcm_ops snd_rme96_playback_adat_ops = {
.mmap = snd_pcm_lib_mmap_iomem,
};
-static struct snd_pcm_ops snd_rme96_capture_adat_ops = {
+static const struct snd_pcm_ops snd_rme96_capture_adat_ops = {
.open = snd_rme96_capture_adat_open,
.close = snd_rme96_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 7c8941b8b2de..b94fc6357139 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -4861,7 +4861,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
return 0;
}
-static struct snd_pcm_ops snd_hdsp_playback_ops = {
+static const struct snd_pcm_ops snd_hdsp_playback_ops = {
.open = snd_hdsp_playback_open,
.close = snd_hdsp_playback_release,
.ioctl = snd_hdsp_ioctl,
@@ -4873,7 +4873,7 @@ static struct snd_pcm_ops snd_hdsp_playback_ops = {
.silence = snd_hdsp_hw_silence,
};
-static struct snd_pcm_ops snd_hdsp_capture_ops = {
+static const struct snd_pcm_ops snd_hdsp_capture_ops = {
.open = snd_hdsp_capture_open,
.close = snd_hdsp_capture_release,
.ioctl = snd_hdsp_ioctl,
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index a4a999a0317e..14bbf55c1ef9 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -1666,7 +1666,7 @@ static int hdspm_set_rate(struct hdspm * hdspm, int rate, int called_internally)
HDSPM_AUTOSYNC_FROM_NONE) {
dev_warn(hdspm->card->dev,
- "Detected no Externel Sync\n");
+ "Detected no External Sync\n");
not_set = 1;
} else if (rate != external_freq) {
@@ -6361,7 +6361,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
return 0;
}
-static struct snd_pcm_ops snd_hdspm_ops = {
+static const struct snd_pcm_ops snd_hdspm_ops = {
.open = snd_hdspm_open,
.close = snd_hdspm_release,
.ioctl = snd_hdspm_ioctl,
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index fdbc0aa2776a..55172c689991 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -2368,7 +2368,7 @@ static int snd_rme9652_capture_release(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_rme9652_playback_ops = {
+static const struct snd_pcm_ops snd_rme9652_playback_ops = {
.open = snd_rme9652_playback_open,
.close = snd_rme9652_playback_release,
.ioctl = snd_rme9652_ioctl,
@@ -2380,7 +2380,7 @@ static struct snd_pcm_ops snd_rme9652_playback_ops = {
.silence = snd_rme9652_hw_silence,
};
-static struct snd_pcm_ops snd_rme9652_capture_ops = {
+static const struct snd_pcm_ops snd_rme9652_capture_ops = {
.open = snd_rme9652_capture_open,
.close = snd_rme9652_capture_release,
.ioctl = snd_rme9652_ioctl,
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 1b6fad7d4d56..e1a13870bb80 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -857,7 +857,7 @@ static int snd_sonicvibes_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_sonicvibes_playback_ops = {
+static const struct snd_pcm_ops snd_sonicvibes_playback_ops = {
.open = snd_sonicvibes_playback_open,
.close = snd_sonicvibes_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -868,7 +868,7 @@ static struct snd_pcm_ops snd_sonicvibes_playback_ops = {
.pointer = snd_sonicvibes_playback_pointer,
};
-static struct snd_pcm_ops snd_sonicvibes_capture_ops = {
+static const struct snd_pcm_ops snd_sonicvibes_capture_ops = {
.open = snd_sonicvibes_capture_open,
.close = snd_sonicvibes_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 599d2b7eb5b8..27f0ed840979 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -2070,7 +2070,7 @@ static int snd_trident_foldback_close(struct snd_pcm_substream *substream)
PCM operations
---------------------------------------------------------------------------*/
-static struct snd_pcm_ops snd_trident_playback_ops = {
+static const struct snd_pcm_ops snd_trident_playback_ops = {
.open = snd_trident_playback_open,
.close = snd_trident_playback_close,
.ioctl = snd_trident_ioctl,
@@ -2081,7 +2081,7 @@ static struct snd_pcm_ops snd_trident_playback_ops = {
.pointer = snd_trident_playback_pointer,
};
-static struct snd_pcm_ops snd_trident_nx_playback_ops = {
+static const struct snd_pcm_ops snd_trident_nx_playback_ops = {
.open = snd_trident_playback_open,
.close = snd_trident_playback_close,
.ioctl = snd_trident_ioctl,
@@ -2115,7 +2115,7 @@ static struct snd_pcm_ops snd_trident_si7018_capture_ops = {
.pointer = snd_trident_playback_pointer,
};
-static struct snd_pcm_ops snd_trident_foldback_ops = {
+static const struct snd_pcm_ops snd_trident_foldback_ops = {
.open = snd_trident_foldback_open,
.close = snd_trident_foldback_close,
.ioctl = snd_trident_ioctl,
@@ -2126,7 +2126,7 @@ static struct snd_pcm_ops snd_trident_foldback_ops = {
.pointer = snd_trident_playback_pointer,
};
-static struct snd_pcm_ops snd_trident_nx_foldback_ops = {
+static const struct snd_pcm_ops snd_trident_nx_foldback_ops = {
.open = snd_trident_foldback_open,
.close = snd_trident_foldback_close,
.ioctl = snd_trident_ioctl,
@@ -2138,7 +2138,7 @@ static struct snd_pcm_ops snd_trident_nx_foldback_ops = {
.page = snd_pcm_sgbuf_ops_page,
};
-static struct snd_pcm_ops snd_trident_spdif_ops = {
+static const struct snd_pcm_ops snd_trident_spdif_ops = {
.open = snd_trident_spdif_open,
.close = snd_trident_spdif_close,
.ioctl = snd_trident_ioctl,
@@ -2149,7 +2149,7 @@ static struct snd_pcm_ops snd_trident_spdif_ops = {
.pointer = snd_trident_spdif_pointer,
};
-static struct snd_pcm_ops snd_trident_spdif_7018_ops = {
+static const struct snd_pcm_ops snd_trident_spdif_7018_ops = {
.open = snd_trident_spdif_open,
.close = snd_trident_spdif_close,
.ioctl = snd_trident_ioctl,
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 3dd038bdb204..38a17b4342a6 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -1366,7 +1366,7 @@ static int snd_via8233_playback_close(struct snd_pcm_substream *substream)
/* via686 playback callbacks */
-static struct snd_pcm_ops snd_via686_playback_ops = {
+static const struct snd_pcm_ops snd_via686_playback_ops = {
.open = snd_via686_playback_open,
.close = snd_via82xx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1379,7 +1379,7 @@ static struct snd_pcm_ops snd_via686_playback_ops = {
};
/* via686 capture callbacks */
-static struct snd_pcm_ops snd_via686_capture_ops = {
+static const struct snd_pcm_ops snd_via686_capture_ops = {
.open = snd_via82xx_capture_open,
.close = snd_via82xx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1392,7 +1392,7 @@ static struct snd_pcm_ops snd_via686_capture_ops = {
};
/* via823x DSX playback callbacks */
-static struct snd_pcm_ops snd_via8233_playback_ops = {
+static const struct snd_pcm_ops snd_via8233_playback_ops = {
.open = snd_via8233_playback_open,
.close = snd_via8233_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1405,7 +1405,7 @@ static struct snd_pcm_ops snd_via8233_playback_ops = {
};
/* via823x multi-channel playback callbacks */
-static struct snd_pcm_ops snd_via8233_multi_ops = {
+static const struct snd_pcm_ops snd_via8233_multi_ops = {
.open = snd_via8233_multi_open,
.close = snd_via82xx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1418,7 +1418,7 @@ static struct snd_pcm_ops snd_via8233_multi_ops = {
};
/* via823x capture callbacks */
-static struct snd_pcm_ops snd_via8233_capture_ops = {
+static const struct snd_pcm_ops snd_via8233_capture_ops = {
.open = snd_via82xx_capture_open,
.close = snd_via82xx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 99b9137bc677..2f6d40f10618 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -804,7 +804,7 @@ static int snd_via82xx_pcm_close(struct snd_pcm_substream *substream)
/* via686 playback callbacks */
-static struct snd_pcm_ops snd_via686_playback_ops = {
+static const struct snd_pcm_ops snd_via686_playback_ops = {
.open = snd_via82xx_playback_open,
.close = snd_via82xx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -817,7 +817,7 @@ static struct snd_pcm_ops snd_via686_playback_ops = {
};
/* via686 capture callbacks */
-static struct snd_pcm_ops snd_via686_capture_ops = {
+static const struct snd_pcm_ops snd_via686_capture_ops = {
.open = snd_via82xx_capture_open,
.close = snd_via82xx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 4c26076dbf78..ffee284898b3 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -1123,7 +1123,7 @@ static int snd_ymfpci_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_ymfpci_playback_ops = {
+static const struct snd_pcm_ops snd_ymfpci_playback_ops = {
.open = snd_ymfpci_playback_open,
.close = snd_ymfpci_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1134,7 +1134,7 @@ static struct snd_pcm_ops snd_ymfpci_playback_ops = {
.pointer = snd_ymfpci_playback_pointer,
};
-static struct snd_pcm_ops snd_ymfpci_capture_rec_ops = {
+static const struct snd_pcm_ops snd_ymfpci_capture_rec_ops = {
.open = snd_ymfpci_capture_rec_open,
.close = snd_ymfpci_capture_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1169,7 +1169,7 @@ int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device)
snd_pcm_std_chmaps, 2, 0, NULL);
}
-static struct snd_pcm_ops snd_ymfpci_capture_ac97_ops = {
+static const struct snd_pcm_ops snd_ymfpci_capture_ac97_ops = {
.open = snd_ymfpci_capture_ac97_open,
.close = snd_ymfpci_capture_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1203,7 +1203,7 @@ int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device)
return 0;
}
-static struct snd_pcm_ops snd_ymfpci_playback_spdif_ops = {
+static const struct snd_pcm_ops snd_ymfpci_playback_spdif_ops = {
.open = snd_ymfpci_playback_spdif_open,
.close = snd_ymfpci_playback_spdif_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1236,7 +1236,7 @@ int snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device)
return 0;
}
-static struct snd_pcm_ops snd_ymfpci_playback_4ch_ops = {
+static const struct snd_pcm_ops snd_ymfpci_playback_4ch_ops = {
.open = snd_ymfpci_playback_4ch_open,
.close = snd_ymfpci_playback_4ch_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 368242519279..b84d7d34f188 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -564,9 +564,7 @@ static int snd_ps3_pcm_hw_params(struct snd_pcm_substream *substream,
static int snd_ps3_pcm_hw_free(struct snd_pcm_substream *substream)
{
- int ret;
- ret = snd_pcm_lib_free_pages(substream);
- return ret;
+ return snd_pcm_lib_free_pages(substream);
};
static int snd_ps3_delay_to_bytes(struct snd_pcm_substream *substream,
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index c8fafba218a5..58ee8089bbf9 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -1303,19 +1303,19 @@ static int tumbler_init(struct snd_pmac *chip)
&mix->line_mute, 1);
irq = tumbler_find_device("headphone-detect",
NULL, &mix->hp_detect, 0);
- if (irq <= NO_IRQ)
+ if (irq <= 0)
irq = tumbler_find_device("headphone-detect",
NULL, &mix->hp_detect, 1);
- if (irq <= NO_IRQ)
+ if (irq <= 0)
irq = tumbler_find_device("keywest-gpio15",
NULL, &mix->hp_detect, 1);
mix->headphone_irq = irq;
irq = tumbler_find_device("line-output-detect",
NULL, &mix->line_detect, 0);
- if (irq <= NO_IRQ)
+ if (irq <= 0)
irq = tumbler_find_device("line-output-detect",
NULL, &mix->line_detect, 1);
- if (IS_G4DA && irq <= NO_IRQ)
+ if (IS_G4DA && irq <= 0)
irq = tumbler_find_device("keywest-gpio16",
NULL, &mix->line_detect, 1);
mix->lineout_irq = irq;
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index d1fb035f44db..504c7cd7f58a 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -897,7 +897,7 @@ static int acp_dma_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops acp_dma_ops = {
+static const struct snd_pcm_ops acp_dma_ops = {
.open = acp_dma_open,
.close = acp_dma_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 6d9b8b44e2da..89ac5f5a93eb 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -308,9 +308,11 @@ static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
static struct snd_soc_codec_driver soc_codec_dev_classd = {
.probe = atmel_classd_codec_probe,
- .controls = atmel_classd_snd_controls,
- .num_controls = ARRAY_SIZE(atmel_classd_snd_controls),
.get_regmap = atmel_classd_codec_get_remap,
+ .component_driver = {
+ .controls = atmel_classd_snd_controls,
+ .num_controls = ARRAY_SIZE(atmel_classd_snd_controls),
+ },
};
/* codec dai component */
diff --git a/sound/soc/atmel/atmel-pcm-pdc.c b/sound/soc/atmel/atmel-pcm-pdc.c
index da861b44413f..91b7069c3499 100644
--- a/sound/soc/atmel/atmel-pcm-pdc.c
+++ b/sound/soc/atmel/atmel-pcm-pdc.c
@@ -381,7 +381,7 @@ static int atmel_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops atmel_pcm_ops = {
+static const struct snd_pcm_ops atmel_pcm_ops = {
.open = atmel_pcm_open,
.close = atmel_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/soc/atmel/atmel-pdmic.c b/sound/soc/atmel/atmel-pdmic.c
index 5f56da60c92f..c917df7715d1 100644
--- a/sound/soc/atmel/atmel-pdmic.c
+++ b/sound/soc/atmel/atmel-pdmic.c
@@ -80,7 +80,7 @@ static struct atmel_pdmic_pdata *atmel_pdmic_dt_init(struct device *dev)
if (pdata->mic_max_freq < pdata->mic_min_freq) {
dev_err(dev,
- "mic-max-freq should not less than mic-min-freq\n");
+ "mic-max-freq should not be less than mic-min-freq\n");
return ERR_PTR(-EINVAL);
}
@@ -115,8 +115,10 @@ static int atmel_pdmic_cpu_dai_startup(struct snd_pcm_substream *substream,
return ret;
ret = clk_prepare_enable(dd->pclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(dd->gclk);
return ret;
+ }
/* Clear all bits in the Control Register(PDMIC_CR) */
regmap_write(dd->regmap, PDMIC_CR, 0);
@@ -283,7 +285,7 @@ static const DECLARE_TLV_DB_RANGE(mic_gain_tlv,
8, ARRAY_SIZE(mic_gain_table)-1, TLV_DB_SCALE_ITEM(-6500, 100, 0),
);
-int pdmic_get_mic_volsw(struct snd_kcontrol *kcontrol,
+static int pdmic_get_mic_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
@@ -357,8 +359,10 @@ static int atmel_pdmic_codec_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_pdmic = {
.probe = atmel_pdmic_codec_probe,
- .controls = atmel_pdmic_snd_controls,
- .num_controls = ARRAY_SIZE(atmel_pdmic_snd_controls),
+ .component_driver = {
+ .controls = atmel_pdmic_snd_controls,
+ .num_controls = ARRAY_SIZE(atmel_pdmic_snd_controls),
+ },
};
/* codec dai component */
@@ -596,7 +600,7 @@ static int atmel_pdmic_probe(struct platform_device *pdev)
dd->irq = platform_get_irq(pdev, 0);
if (dd->irq < 0) {
ret = dd->irq;
- dev_err(dev, "failed to could not get irq: %d\n", ret);
+ dev_err(dev, "failed to get irq: %d\n", ret);
return ret;
}
@@ -614,7 +618,7 @@ static int atmel_pdmic_probe(struct platform_device *pdev)
return ret;
}
- /* The gclk clock frequency must always be tree times
+ /* The gclk clock frequency must always be three times
* lower than the pclk clock frequency
*/
ret = clk_set_rate(dd->gclk, clk_get_rate(dd->pclk)/3);
@@ -649,7 +653,7 @@ static int atmel_pdmic_probe(struct platform_device *pdev)
return ret;
}
- /* Get the minimal and maximal sample rate that micphone supports */
+ /* Get the minimal and maximal sample rate that the microphone supports */
atmel_pdmic_get_sample_rate(dd, &rate_min, &rate_max);
/* register cpu dai */
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index e8bed6b0c9db..b013a4c62b63 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1361,12 +1361,14 @@ static struct snd_soc_codec_driver soc_codec_dev_pm860x = {
.set_bias_level = pm860x_set_bias_level,
.get_regmap = pm860x_get_regmap,
- .controls = pm860x_snd_controls,
- .num_controls = ARRAY_SIZE(pm860x_snd_controls),
- .dapm_widgets = pm860x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(pm860x_dapm_widgets),
- .dapm_routes = pm860x_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(pm860x_dapm_routes),
+ .component_driver = {
+ .controls = pm860x_snd_controls,
+ .num_controls = ARRAY_SIZE(pm860x_snd_controls),
+ .dapm_widgets = pm860x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pm860x_dapm_widgets),
+ .dapm_routes = pm860x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pm860x_dapm_routes),
+ },
};
static int pm860x_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 1cd6ab344d67..c67667bb970f 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -92,6 +92,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MAX9877 if I2C
select SND_SOC_MC13783 if MFD_MC13XXX
select SND_SOC_ML26124 if I2C
+ select SND_SOC_NAU8810 if I2C
select SND_SOC_NAU8825 if I2C
select SND_SOC_HDMI_CODEC
select SND_SOC_PCM1681 if I2C
@@ -112,6 +113,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_RT5645 if I2C
select SND_SOC_RT5651 if I2C
select SND_SOC_RT5659 if I2C
+ select SND_SOC_RT5660 if I2C
+ select SND_SOC_RT5663 if I2C
select SND_SOC_RT5670 if I2C
select SND_SOC_RT5677 if I2C && SPI_MASTER
select SND_SOC_SGTL5000 if I2C
@@ -645,6 +648,8 @@ config SND_SOC_RL6231
default y if SND_SOC_RT5645=y
default y if SND_SOC_RT5651=y
default y if SND_SOC_RT5659=y
+ default y if SND_SOC_RT5660=y
+ default y if SND_SOC_RT5663=y
default y if SND_SOC_RT5670=y
default y if SND_SOC_RT5677=y
default m if SND_SOC_RT5514=m
@@ -653,6 +658,8 @@ config SND_SOC_RL6231
default m if SND_SOC_RT5645=m
default m if SND_SOC_RT5651=m
default m if SND_SOC_RT5659=m
+ default m if SND_SOC_RT5660=m
+ default m if SND_SOC_RT5663=m
default m if SND_SOC_RT5670=m
default m if SND_SOC_RT5677=m
@@ -665,6 +672,7 @@ config SND_SOC_RL6347A
config SND_SOC_RT286
tristate
+ select SND_SOC_RT5663
depends on I2C
config SND_SOC_RT298
@@ -697,6 +705,12 @@ config SND_SOC_RT5651
config SND_SOC_RT5659
tristate
+config SND_SOC_RT5660
+ tristate
+
+config SND_SOC_RT5663
+ tristate
+
config SND_SOC_RT5670
tristate
@@ -1064,6 +1078,10 @@ config SND_SOC_MC13783
config SND_SOC_ML26124
tristate
+config SND_SOC_NAU8810
+ tristate "Nuvoton Technology Corporation NAU88C10 CODEC"
+ depends on I2C
+
config SND_SOC_NAU8825
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 58036af2c7d9..958cd4912fbc 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -86,6 +86,7 @@ snd-soc-max9850-objs := max9850.o
snd-soc-max9860-objs := max9860.o
snd-soc-mc13783-objs := mc13783.o
snd-soc-ml26124-objs := ml26124.o
+snd-soc-nau8810-objs := nau8810.o
snd-soc-nau8825-objs := nau8825.o
snd-soc-hdmi-codec-objs := hdmi-codec.o
snd-soc-pcm1681-objs := pcm1681.o
@@ -112,6 +113,8 @@ snd-soc-rt5640-objs := rt5640.o
snd-soc-rt5645-objs := rt5645.o
snd-soc-rt5651-objs := rt5651.o
snd-soc-rt5659-objs := rt5659.o
+snd-soc-rt5660-objs := rt5660.o
+snd-soc-rt5663-objs := rt5663.o
snd-soc-rt5670-objs := rt5670.o
snd-soc-rt5677-objs := rt5677.o
snd-soc-rt5677-spi-objs := rt5677-spi.o
@@ -307,6 +310,7 @@ obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MAX9860) += snd-soc-max9860.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o
+obj-$(CONFIG_SND_SOC_NAU8810) += snd-soc-nau8810.o
obj-$(CONFIG_SND_SOC_NAU8825) += snd-soc-nau8825.o
obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o
obj-$(CONFIG_SND_SOC_PCM1681) += snd-soc-pcm1681.o
@@ -333,6 +337,8 @@ obj-$(CONFIG_SND_SOC_RT5640) += snd-soc-rt5640.o
obj-$(CONFIG_SND_SOC_RT5645) += snd-soc-rt5645.o
obj-$(CONFIG_SND_SOC_RT5651) += snd-soc-rt5651.o
obj-$(CONFIG_SND_SOC_RT5659) += snd-soc-rt5659.o
+obj-$(CONFIG_SND_SOC_RT5660) += snd-soc-rt5660.o
+obj-$(CONFIG_SND_SOC_RT5663) += snd-soc-rt5663.o
obj-$(CONFIG_SND_SOC_RT5670) += snd-soc-rt5670.o
obj-$(CONFIG_SND_SOC_RT5677) += snd-soc-rt5677.o
obj-$(CONFIG_SND_SOC_RT5677_SPI) += snd-soc-rt5677-spi.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 2fc89155f14a..935ff7cb71c5 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -2408,28 +2408,28 @@ static void ab8500_codec_of_probe(struct device *dev, struct device_node *np,
{
u32 value;
- if (of_get_property(np, "stericsson,amic1-type-single-ended", NULL))
+ if (of_property_read_bool(np, "stericsson,amic1-type-single-ended"))
codec->amics.mic1_type = AMIC_TYPE_SINGLE_ENDED;
else
codec->amics.mic1_type = AMIC_TYPE_DIFFERENTIAL;
- if (of_get_property(np, "stericsson,amic2-type-single-ended", NULL))
+ if (of_property_read_bool(np, "stericsson,amic2-type-single-ended"))
codec->amics.mic2_type = AMIC_TYPE_SINGLE_ENDED;
else
codec->amics.mic2_type = AMIC_TYPE_DIFFERENTIAL;
/* Has a non-standard Vamic been requested? */
- if (of_get_property(np, "stericsson,amic1a-bias-vamic2", NULL))
+ if (of_property_read_bool(np, "stericsson,amic1a-bias-vamic2"))
codec->amics.mic1a_micbias = AMIC_MICBIAS_VAMIC2;
else
codec->amics.mic1a_micbias = AMIC_MICBIAS_VAMIC1;
- if (of_get_property(np, "stericsson,amic1b-bias-vamic2", NULL))
+ if (of_property_read_bool(np, "stericsson,amic1b-bias-vamic2"))
codec->amics.mic1b_micbias = AMIC_MICBIAS_VAMIC2;
else
codec->amics.mic1b_micbias = AMIC_MICBIAS_VAMIC1;
- if (of_get_property(np, "stericsson,amic2-bias-vamic1", NULL))
+ if (of_property_read_bool(np, "stericsson,amic2-bias-vamic1"))
codec->amics.mic2_micbias = AMIC_MICBIAS_VAMIC1;
else
codec->amics.mic2_micbias = AMIC_MICBIAS_VAMIC2;
@@ -2525,12 +2525,14 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver ab8500_codec_driver = {
.probe = ab8500_codec_probe,
- .controls = ab8500_ctrls,
- .num_controls = ARRAY_SIZE(ab8500_ctrls),
- .dapm_widgets = ab8500_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ab8500_dapm_widgets),
- .dapm_routes = ab8500_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(ab8500_dapm_routes),
+ .component_driver = {
+ .controls = ab8500_ctrls,
+ .num_controls = ARRAY_SIZE(ab8500_ctrls),
+ .dapm_widgets = ab8500_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ab8500_dapm_widgets),
+ .dapm_routes = ab8500_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ab8500_dapm_routes),
+ },
};
static int ab8500_codec_driver_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c
index 5b3224c63943..f7f04c6be01e 100644
--- a/sound/soc/codecs/ac97.c
+++ b/sound/soc/codecs/ac97.c
@@ -117,10 +117,12 @@ static struct snd_soc_codec_driver soc_codec_dev_ac97 = {
.suspend = ac97_soc_suspend,
.resume = ac97_soc_resume,
- .dapm_widgets = ac97_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ac97_widgets),
- .dapm_routes = ac97_routes,
- .num_dapm_routes = ARRAY_SIZE(ac97_routes),
+ .component_driver = {
+ .dapm_widgets = ac97_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ac97_widgets),
+ .dapm_routes = ac97_routes,
+ .num_dapm_routes = ARRAY_SIZE(ac97_routes),
+ },
};
static int ac97_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index e2ce6c4d7ece..a478239aadac 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -327,12 +327,14 @@ static struct snd_soc_codec_driver soc_codec_dev_ad1836 = {
.suspend = ad1836_suspend,
.resume = ad1836_resume,
- .controls = ad183x_controls,
- .num_controls = ARRAY_SIZE(ad183x_controls),
- .dapm_widgets = ad183x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ad183x_dapm_widgets),
- .dapm_routes = ad183x_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(ad183x_dapm_routes),
+ .component_driver = {
+ .controls = ad183x_controls,
+ .num_controls = ARRAY_SIZE(ad183x_controls),
+ .dapm_widgets = ad183x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ad183x_dapm_widgets),
+ .dapm_routes = ad183x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ad183x_dapm_routes),
+ },
};
static const struct reg_default ad1836_reg_defaults[] = {
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
index 3a3f3f2343d7..d643557d89a7 100644
--- a/sound/soc/codecs/ad193x.c
+++ b/sound/soc/codecs/ad193x.c
@@ -410,12 +410,14 @@ static int ad193x_codec_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_ad193x = {
.probe = ad193x_codec_probe,
- .controls = ad193x_snd_controls,
- .num_controls = ARRAY_SIZE(ad193x_snd_controls),
- .dapm_widgets = ad193x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ad193x_dapm_widgets),
- .dapm_routes = audio_paths,
- .num_dapm_routes = ARRAY_SIZE(audio_paths),
+ .component_driver = {
+ .controls = ad193x_snd_controls,
+ .num_controls = ARRAY_SIZE(ad193x_snd_controls),
+ .dapm_widgets = ad193x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ad193x_dapm_widgets),
+ .dapm_routes = audio_paths,
+ .num_dapm_routes = ARRAY_SIZE(audio_paths),
+ },
};
const struct regmap_config ad193x_regmap_config = {
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 9ef20dbccbe3..b7c1b9f4bf5f 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -299,12 +299,14 @@ static struct snd_soc_codec_driver soc_codec_dev_ad1980 = {
.probe = ad1980_soc_probe,
.remove = ad1980_soc_remove,
- .controls = ad1980_snd_ac97_controls,
- .num_controls = ARRAY_SIZE(ad1980_snd_ac97_controls),
- .dapm_widgets = ad1980_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ad1980_dapm_widgets),
- .dapm_routes = ad1980_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(ad1980_dapm_routes),
+ .component_driver = {
+ .controls = ad1980_snd_ac97_controls,
+ .num_controls = ARRAY_SIZE(ad1980_snd_ac97_controls),
+ .dapm_widgets = ad1980_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ad1980_dapm_widgets),
+ .dapm_routes = ad1980_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ad1980_dapm_routes),
+ },
};
static int ad1980_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ad73311.c b/sound/soc/codecs/ad73311.c
index a9400aef60b5..3e358a49442d 100644
--- a/sound/soc/codecs/ad73311.c
+++ b/sound/soc/codecs/ad73311.c
@@ -55,10 +55,12 @@ static struct snd_soc_dai_driver ad73311_dai = {
};
static struct snd_soc_codec_driver soc_codec_dev_ad73311 = {
- .dapm_widgets = ad73311_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ad73311_dapm_widgets),
- .dapm_routes = ad73311_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(ad73311_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = ad73311_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ad73311_dapm_widgets),
+ .dapm_routes = ad73311_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ad73311_dapm_routes),
+ },
};
static int ad73311_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index 1556b360fa15..8fa9045571ff 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -1466,12 +1466,14 @@ static struct snd_soc_codec_driver adau1373_codec_driver = {
.set_pll = adau1373_set_pll,
- .controls = adau1373_controls,
- .num_controls = ARRAY_SIZE(adau1373_controls),
- .dapm_widgets = adau1373_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(adau1373_dapm_widgets),
- .dapm_routes = adau1373_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(adau1373_dapm_routes),
+ .component_driver = {
+ .controls = adau1373_controls,
+ .num_controls = ARRAY_SIZE(adau1373_controls),
+ .dapm_widgets = adau1373_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adau1373_dapm_widgets),
+ .dapm_routes = adau1373_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(adau1373_dapm_routes),
+ },
};
static int adau1373_i2c_probe(struct i2c_client *client,
diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
index de53c0d7bf10..3bad1bc8c00a 100644
--- a/sound/soc/codecs/adau1701.c
+++ b/sound/soc/codecs/adau1701.c
@@ -765,13 +765,14 @@ static struct snd_soc_codec_driver adau1701_codec_drv = {
.set_bias_level = adau1701_set_bias_level,
.idle_bias_off = true,
- .controls = adau1701_controls,
- .num_controls = ARRAY_SIZE(adau1701_controls),
- .dapm_widgets = adau1701_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(adau1701_dapm_widgets),
- .dapm_routes = adau1701_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(adau1701_dapm_routes),
-
+ .component_driver = {
+ .controls = adau1701_controls,
+ .num_controls = ARRAY_SIZE(adau1701_controls),
+ .dapm_widgets = adau1701_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adau1701_dapm_widgets),
+ .dapm_routes = adau1701_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(adau1701_dapm_routes),
+ },
.set_sysclk = adau1701_set_sysclk,
};
diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c
index b95d29dbd13d..3bc3cc559dde 100644
--- a/sound/soc/codecs/adau1761.c
+++ b/sound/soc/codecs/adau1761.c
@@ -719,12 +719,14 @@ static const struct snd_soc_codec_driver adau1761_codec_driver = {
.set_bias_level = adau1761_set_bias_level,
.suspend_bias_off = true,
- .controls = adau1761_controls,
- .num_controls = ARRAY_SIZE(adau1761_controls),
- .dapm_widgets = adau1x61_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(adau1x61_dapm_widgets),
- .dapm_routes = adau1x61_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(adau1x61_dapm_routes),
+ .component_driver = {
+ .controls = adau1761_controls,
+ .num_controls = ARRAY_SIZE(adau1761_controls),
+ .dapm_widgets = adau1x61_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adau1x61_dapm_widgets),
+ .dapm_routes = adau1x61_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(adau1x61_dapm_routes),
+ },
};
#define ADAU1761_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
diff --git a/sound/soc/codecs/adau1781.c b/sound/soc/codecs/adau1781.c
index bc1bb56dae63..546071c6c0d0 100644
--- a/sound/soc/codecs/adau1781.c
+++ b/sound/soc/codecs/adau1781.c
@@ -432,12 +432,14 @@ static const struct snd_soc_codec_driver adau1781_codec_driver = {
.set_bias_level = adau1781_set_bias_level,
.suspend_bias_off = true,
- .controls = adau1781_controls,
- .num_controls = ARRAY_SIZE(adau1781_controls),
- .dapm_widgets = adau1781_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(adau1781_dapm_widgets),
- .dapm_routes = adau1781_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(adau1781_dapm_routes),
+ .component_driver = {
+ .controls = adau1781_controls,
+ .num_controls = ARRAY_SIZE(adau1781_controls),
+ .dapm_widgets = adau1781_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adau1781_dapm_widgets),
+ .dapm_routes = adau1781_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(adau1781_dapm_routes),
+ },
};
#define ADAU1781_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
diff --git a/sound/soc/codecs/adau1977.c b/sound/soc/codecs/adau1977.c
index 9bdd15f408c1..b319db6a69f8 100644
--- a/sound/soc/codecs/adau1977.c
+++ b/sound/soc/codecs/adau1977.c
@@ -873,12 +873,14 @@ static struct snd_soc_codec_driver adau1977_codec_driver = {
.set_sysclk = adau1977_set_sysclk,
.idle_bias_off = true,
- .controls = adau1977_snd_controls,
- .num_controls = ARRAY_SIZE(adau1977_snd_controls),
- .dapm_widgets = adau1977_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(adau1977_dapm_widgets),
- .dapm_routes = adau1977_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(adau1977_dapm_routes),
+ .component_driver = {
+ .controls = adau1977_snd_controls,
+ .num_controls = ARRAY_SIZE(adau1977_snd_controls),
+ .dapm_widgets = adau1977_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adau1977_dapm_widgets),
+ .dapm_routes = adau1977_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(adau1977_dapm_routes),
+ },
};
static int adau1977_setup_micbias(struct adau1977 *adau1977)
diff --git a/sound/soc/codecs/adau7002.c b/sound/soc/codecs/adau7002.c
index 9df72c6adcca..6384c5491de8 100644
--- a/sound/soc/codecs/adau7002.c
+++ b/sound/soc/codecs/adau7002.c
@@ -39,10 +39,12 @@ static struct snd_soc_dai_driver adau7002_dai = {
};
static const struct snd_soc_codec_driver adau7002_codec_driver = {
- .dapm_widgets = adau7002_widgets,
- .num_dapm_widgets = ARRAY_SIZE(adau7002_widgets),
- .dapm_routes = adau7002_routes,
- .num_dapm_routes = ARRAY_SIZE(adau7002_routes),
+ .component_driver = {
+ .dapm_widgets = adau7002_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adau7002_widgets),
+ .dapm_routes = adau7002_routes,
+ .num_dapm_routes = ARRAY_SIZE(adau7002_routes),
+ },
};
static int adau7002_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index acff8d62059c..6e793ebb5883 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -834,12 +834,14 @@ static struct snd_soc_codec_driver adav80x_codec_driver = {
.set_pll = adav80x_set_pll,
.set_sysclk = adav80x_set_sysclk,
- .controls = adav80x_controls,
- .num_controls = ARRAY_SIZE(adav80x_controls),
- .dapm_widgets = adav80x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(adav80x_dapm_widgets),
- .dapm_routes = adav80x_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(adav80x_dapm_routes),
+ .component_driver = {
+ .controls = adav80x_controls,
+ .num_controls = ARRAY_SIZE(adav80x_controls),
+ .dapm_widgets = adav80x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adav80x_dapm_widgets),
+ .dapm_routes = adav80x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(adav80x_dapm_routes),
+ },
};
int adav80x_bus_probe(struct device *dev, struct regmap *regmap)
diff --git a/sound/soc/codecs/ads117x.c b/sound/soc/codecs/ads117x.c
index c5be1bdc2c9a..90c756d183b4 100644
--- a/sound/soc/codecs/ads117x.c
+++ b/sound/soc/codecs/ads117x.c
@@ -59,10 +59,12 @@ static struct snd_soc_dai_driver ads117x_dai = {
};
static struct snd_soc_codec_driver soc_codec_dev_ads117x = {
- .dapm_widgets = ads117x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ads117x_dapm_widgets),
- .dapm_routes = ads117x_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(ads117x_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = ads117x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ads117x_dapm_widgets),
+ .dapm_routes = ads117x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ads117x_dapm_routes),
+ },
};
static int ads117x_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ak4104.c b/sound/soc/codecs/ak4104.c
index 595d02d7602c..1a9d233c94d0 100644
--- a/sound/soc/codecs/ak4104.c
+++ b/sound/soc/codecs/ak4104.c
@@ -163,7 +163,10 @@ static struct snd_soc_dai_driver ak4104_dai = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000,
+ .rates = SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_3LE |
SNDRV_PCM_FMTBIT_S24_LE
@@ -245,10 +248,12 @@ static struct snd_soc_codec_driver soc_codec_device_ak4104 = {
.suspend = ak4104_soc_suspend,
.resume = ak4104_soc_resume,
- .dapm_widgets = ak4104_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ak4104_dapm_widgets),
- .dapm_routes = ak4104_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(ak4104_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = ak4104_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak4104_dapm_widgets),
+ .dapm_routes = ak4104_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ak4104_dapm_routes),
+ }
};
static const struct regmap_config ak4104_regmap = {
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index 54428c64387b..66cfffde9a12 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -395,12 +395,14 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4535 = {
.set_bias_level = ak4535_set_bias_level,
.suspend_bias_off = true,
- .controls = ak4535_snd_controls,
- .num_controls = ARRAY_SIZE(ak4535_snd_controls),
- .dapm_widgets = ak4535_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ak4535_dapm_widgets),
- .dapm_routes = ak4535_audio_map,
- .num_dapm_routes = ARRAY_SIZE(ak4535_audio_map),
+ .component_driver = {
+ .controls = ak4535_snd_controls,
+ .num_controls = ARRAY_SIZE(ak4535_snd_controls),
+ .dapm_widgets = ak4535_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak4535_dapm_widgets),
+ .dapm_routes = ak4535_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(ak4535_audio_map),
+ },
};
static int ak4535_i2c_probe(struct i2c_client *i2c,
diff --git a/sound/soc/codecs/ak4554.c b/sound/soc/codecs/ak4554.c
index 298dedc05140..b92c548b9d29 100644
--- a/sound/soc/codecs/ak4554.c
+++ b/sound/soc/codecs/ak4554.c
@@ -65,10 +65,12 @@ static struct snd_soc_dai_driver ak4554_dai = {
};
static struct snd_soc_codec_driver soc_codec_dev_ak4554 = {
- .dapm_widgets = ak4554_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ak4554_dapm_widgets),
- .dapm_routes = ak4554_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(ak4554_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = ak4554_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak4554_dapm_widgets),
+ .dapm_routes = ak4554_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ak4554_dapm_routes),
+ },
};
static int ak4554_soc_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c
index 97798d250f08..e819dd8c82fd 100644
--- a/sound/soc/codecs/ak4613.c
+++ b/sound/soc/codecs/ak4613.c
@@ -458,12 +458,14 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4613 = {
.suspend = ak4613_suspend,
.resume = ak4613_resume,
.set_bias_level = ak4613_set_bias_level,
- .controls = ak4613_snd_controls,
- .num_controls = ARRAY_SIZE(ak4613_snd_controls),
- .dapm_widgets = ak4613_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ak4613_dapm_widgets),
- .dapm_routes = ak4613_intercon,
- .num_dapm_routes = ARRAY_SIZE(ak4613_intercon),
+ .component_driver = {
+ .controls = ak4613_snd_controls,
+ .num_controls = ARRAY_SIZE(ak4613_snd_controls),
+ .dapm_widgets = ak4613_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak4613_dapm_widgets),
+ .dapm_routes = ak4613_intercon,
+ .num_dapm_routes = ARRAY_SIZE(ak4613_intercon),
+ },
};
static void ak4613_parse_of(struct ak4613_priv *priv,
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index b14176f8d884..c91717d08513 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -505,12 +505,14 @@ static struct snd_soc_dai_driver ak4641_dai[] = {
};
static struct snd_soc_codec_driver soc_codec_dev_ak4641 = {
- .controls = ak4641_snd_controls,
- .num_controls = ARRAY_SIZE(ak4641_snd_controls),
- .dapm_widgets = ak4641_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ak4641_dapm_widgets),
- .dapm_routes = ak4641_audio_map,
- .num_dapm_routes = ARRAY_SIZE(ak4641_audio_map),
+ .component_driver = {
+ .controls = ak4641_snd_controls,
+ .num_controls = ARRAY_SIZE(ak4641_snd_controls),
+ .dapm_widgets = ak4641_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak4641_dapm_widgets),
+ .dapm_routes = ak4641_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(ak4641_audio_map),
+ },
.set_bias_level = ak4641_set_bias_level,
.suspend_bias_off = true,
};
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index cc941d66ec3d..2609f95b7d19 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -555,12 +555,14 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4642 = {
.suspend = ak4642_suspend,
.resume = ak4642_resume,
.set_bias_level = ak4642_set_bias_level,
- .controls = ak4642_snd_controls,
- .num_controls = ARRAY_SIZE(ak4642_snd_controls),
- .dapm_widgets = ak4642_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ak4642_dapm_widgets),
- .dapm_routes = ak4642_intercon,
- .num_dapm_routes = ARRAY_SIZE(ak4642_intercon),
+ .component_driver = {
+ .controls = ak4642_snd_controls,
+ .num_controls = ARRAY_SIZE(ak4642_snd_controls),
+ .dapm_widgets = ak4642_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak4642_dapm_widgets),
+ .dapm_routes = ak4642_intercon,
+ .num_dapm_routes = ARRAY_SIZE(ak4642_intercon),
+ },
};
static const struct regmap_config ak4642_regmap = {
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index c73a9f6914b6..6088afaabf62 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -612,12 +612,14 @@ static struct snd_soc_dai_driver ak4671_dai = {
static struct snd_soc_codec_driver soc_codec_dev_ak4671 = {
.set_bias_level = ak4671_set_bias_level,
- .controls = ak4671_snd_controls,
- .num_controls = ARRAY_SIZE(ak4671_snd_controls),
- .dapm_widgets = ak4671_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ak4671_dapm_widgets),
- .dapm_routes = ak4671_intercon,
- .num_dapm_routes = ARRAY_SIZE(ak4671_intercon),
+ .component_driver = {
+ .controls = ak4671_snd_controls,
+ .num_controls = ARRAY_SIZE(ak4671_snd_controls),
+ .dapm_widgets = ak4671_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak4671_dapm_widgets),
+ .dapm_routes = ak4671_intercon,
+ .num_dapm_routes = ARRAY_SIZE(ak4671_intercon),
+ },
};
static const struct regmap_config ak4671_regmap = {
diff --git a/sound/soc/codecs/ak5386.c b/sound/soc/codecs/ak5386.c
index afa95360826d..0ef2df223336 100644
--- a/sound/soc/codecs/ak5386.c
+++ b/sound/soc/codecs/ak5386.c
@@ -74,10 +74,12 @@ static struct snd_soc_codec_driver soc_codec_ak5386 = {
.remove = ak5386_soc_remove,
.suspend = ak5386_soc_suspend,
.resume = ak5386_soc_resume,
- .dapm_widgets = ak5386_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ak5386_dapm_widgets),
- .dapm_routes = ak5386_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(ak5386_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = ak5386_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak5386_dapm_widgets),
+ .dapm_routes = ak5386_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ak5386_dapm_routes),
+ },
};
static int ak5386_set_dai_fmt(struct snd_soc_dai *codec_dai,
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index 4d3ba33eb6f9..adb80d8719bd 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -1072,12 +1072,14 @@ static const struct snd_soc_codec_driver soc_codec_device_alc5632 = {
.set_bias_level = alc5632_set_bias_level,
.suspend_bias_off = true,
- .controls = alc5632_snd_controls,
- .num_controls = ARRAY_SIZE(alc5632_snd_controls),
- .dapm_widgets = alc5632_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(alc5632_dapm_widgets),
- .dapm_routes = alc5632_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(alc5632_dapm_routes),
+ .component_driver = {
+ .controls = alc5632_snd_controls,
+ .num_controls = ARRAY_SIZE(alc5632_snd_controls),
+ .dapm_widgets = alc5632_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(alc5632_dapm_widgets),
+ .dapm_routes = alc5632_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(alc5632_dapm_routes),
+ },
};
static const struct regmap_config alc5632_regmap = {
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index ecfdbfcae366..846ca079845f 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -109,7 +109,7 @@ static int arizona_spk_ev(struct snd_soc_dapm_widget *w,
break;
}
- return 0;
+ return arizona_out_ev(w, kcontrol, event);
}
static irqreturn_t arizona_thermal_warn(int irq, void *data)
@@ -159,12 +159,14 @@ static irqreturn_t arizona_thermal_shutdown(int irq, void *data)
static const struct snd_soc_dapm_widget arizona_spkl =
SND_SOC_DAPM_PGA_E("OUT4L", SND_SOC_NOPM,
ARIZONA_OUT4L_ENA_SHIFT, 0, NULL, 0, arizona_spk_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU);
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD);
static const struct snd_soc_dapm_widget arizona_spkr =
SND_SOC_DAPM_PGA_E("OUT4R", SND_SOC_NOPM,
ARIZONA_OUT4R_ENA_SHIFT, 0, NULL, 0, arizona_spk_ev,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU);
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD);
int arizona_init_spk(struct snd_soc_codec *codec)
{
@@ -864,6 +866,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -877,6 +880,18 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
priv->out_up_pending++;
priv->out_up_delay += 17;
break;
+ case ARIZONA_OUT4L_ENA_SHIFT:
+ case ARIZONA_OUT4R_ENA_SHIFT:
+ priv->out_up_pending++;
+ switch (arizona->type) {
+ case WM5102:
+ case WM8997:
+ break;
+ default:
+ priv->out_up_delay += 10;
+ break;
+ }
+ break;
default:
break;
}
@@ -889,8 +904,12 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
case ARIZONA_OUT2R_ENA_SHIFT:
case ARIZONA_OUT3L_ENA_SHIFT:
case ARIZONA_OUT3R_ENA_SHIFT:
+ case ARIZONA_OUT4L_ENA_SHIFT:
+ case ARIZONA_OUT4R_ENA_SHIFT:
priv->out_up_pending--;
- if (!priv->out_up_pending) {
+ if (!priv->out_up_pending && priv->out_up_delay) {
+ dev_dbg(codec->dev, "Power up delay: %d\n",
+ priv->out_up_delay);
msleep(priv->out_up_delay);
priv->out_up_delay = 0;
}
@@ -911,6 +930,21 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
priv->out_down_pending++;
priv->out_down_delay++;
break;
+ case ARIZONA_OUT4L_ENA_SHIFT:
+ case ARIZONA_OUT4R_ENA_SHIFT:
+ priv->out_down_pending++;
+ switch (arizona->type) {
+ case WM5102:
+ case WM8997:
+ break;
+ case WM8998:
+ case WM1814:
+ priv->out_down_delay += 5;
+ break;
+ default:
+ priv->out_down_delay++;
+ break;
+ }
default:
break;
}
@@ -923,8 +957,12 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w,
case ARIZONA_OUT2R_ENA_SHIFT:
case ARIZONA_OUT3L_ENA_SHIFT:
case ARIZONA_OUT3R_ENA_SHIFT:
+ case ARIZONA_OUT4L_ENA_SHIFT:
+ case ARIZONA_OUT4R_ENA_SHIFT:
priv->out_down_pending--;
- if (!priv->out_down_pending) {
+ if (!priv->out_down_pending && priv->out_down_delay) {
+ dev_dbg(codec->dev, "Power down delay: %d\n",
+ priv->out_down_delay);
msleep(priv->out_down_delay);
priv->out_down_delay = 0;
}
@@ -1920,8 +1958,8 @@ static struct {
struct arizona_fll_cfg {
int n;
- int theta;
- int lambda;
+ unsigned int theta;
+ unsigned int lambda;
int refdiv;
int outdiv;
int fratio;
@@ -2188,13 +2226,13 @@ static void arizona_apply_fll(struct arizona *arizona, unsigned int base,
ARIZONA_FLL1_CTRL_UPD | cfg->n);
}
-static int arizona_is_enabled_fll(struct arizona_fll *fll)
+static int arizona_is_enabled_fll(struct arizona_fll *fll, int base)
{
struct arizona *arizona = fll->arizona;
unsigned int reg;
int ret;
- ret = regmap_read(arizona->regmap, fll->base + 1, &reg);
+ ret = regmap_read(arizona->regmap, base + 1, &reg);
if (ret != 0) {
arizona_fll_err(fll, "Failed to read current state: %d\n",
ret);
@@ -2208,21 +2246,24 @@ static int arizona_enable_fll(struct arizona_fll *fll)
{
struct arizona *arizona = fll->arizona;
bool use_sync = false;
- int already_enabled = arizona_is_enabled_fll(fll);
+ int already_enabled = arizona_is_enabled_fll(fll, fll->base);
+ int sync_enabled = arizona_is_enabled_fll(fll, fll->base + 0x10);
struct arizona_fll_cfg cfg;
int i;
unsigned int val;
if (already_enabled < 0)
return already_enabled;
+ if (sync_enabled < 0)
+ return sync_enabled;
if (already_enabled) {
/* Facilitate smooth refclk across the transition */
- regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x9,
- ARIZONA_FLL1_GAIN_MASK, 0);
regmap_update_bits(fll->arizona->regmap, fll->base + 1,
ARIZONA_FLL1_FREERUN, ARIZONA_FLL1_FREERUN);
udelay(32);
+ regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x9,
+ ARIZONA_FLL1_GAIN_MASK, 0);
}
/*
@@ -2233,6 +2274,10 @@ static int arizona_enable_fll(struct arizona_fll *fll)
fll->ref_src != fll->sync_src) {
arizona_calc_fll(fll, &cfg, fll->ref_freq, false);
+ /* Ref path hardcodes lambda to 65536 when sync is on */
+ if (fll->sync_src >= 0 && cfg.lambda)
+ cfg.theta = (cfg.theta * (1 << 16)) / cfg.lambda;
+
arizona_apply_fll(arizona, fll->base, &cfg, fll->ref_src,
false);
if (fll->sync_src >= 0) {
@@ -2255,6 +2300,9 @@ static int arizona_enable_fll(struct arizona_fll *fll)
return -EINVAL;
}
+ if (already_enabled && !!sync_enabled != use_sync)
+ arizona_fll_warn(fll, "Synchroniser changed on active FLL\n");
+
/*
* Increase the bandwidth if we're not using a low frequency
* sync source.
@@ -2268,14 +2316,14 @@ static int arizona_enable_fll(struct arizona_fll *fll)
ARIZONA_FLL1_SYNC_BW);
if (!already_enabled)
- pm_runtime_get(arizona->dev);
+ pm_runtime_get_sync(arizona->dev);
- regmap_update_bits_async(arizona->regmap, fll->base + 1,
- ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
if (use_sync)
regmap_update_bits_async(arizona->regmap, fll->base + 0x11,
ARIZONA_FLL1_SYNC_ENA,
ARIZONA_FLL1_SYNC_ENA);
+ regmap_update_bits_async(arizona->regmap, fll->base + 1,
+ ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
if (already_enabled)
regmap_update_bits_async(arizona->regmap, fll->base + 1,
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 69da1ef3a17c..850aa338ba29 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -190,20 +190,21 @@ extern unsigned int arizona_mixer_values[ARIZONA_NUM_MIXER_INPUTS];
#define ARIZONA_DSP_ROUTES(name) \
{ name, NULL, name " Preloader"}, \
- { name " Preloader", NULL, name " Aux 1" }, \
- { name " Preloader", NULL, name " Aux 2" }, \
- { name " Preloader", NULL, name " Aux 3" }, \
- { name " Preloader", NULL, name " Aux 4" }, \
- { name " Preloader", NULL, name " Aux 5" }, \
- { name " Preloader", NULL, name " Aux 6" }, \
+ { name " Preloader", NULL, "SYSCLK" }, \
+ { name, NULL, name " Aux 1" }, \
+ { name, NULL, name " Aux 2" }, \
+ { name, NULL, name " Aux 3" }, \
+ { name, NULL, name " Aux 4" }, \
+ { name, NULL, name " Aux 5" }, \
+ { name, NULL, name " Aux 6" }, \
ARIZONA_MIXER_INPUT_ROUTES(name " Aux 1"), \
ARIZONA_MIXER_INPUT_ROUTES(name " Aux 2"), \
ARIZONA_MIXER_INPUT_ROUTES(name " Aux 3"), \
ARIZONA_MIXER_INPUT_ROUTES(name " Aux 4"), \
ARIZONA_MIXER_INPUT_ROUTES(name " Aux 5"), \
ARIZONA_MIXER_INPUT_ROUTES(name " Aux 6"), \
- ARIZONA_MIXER_ROUTES(name " Preloader", name "L"), \
- ARIZONA_MIXER_ROUTES(name " Preloader", name "R")
+ ARIZONA_MIXER_ROUTES(name, name "L"), \
+ ARIZONA_MIXER_ROUTES(name, name "R")
#define ARIZONA_EQ_CONTROL(xname, xbase) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
diff --git a/sound/soc/codecs/bt-sco.c b/sound/soc/codecs/bt-sco.c
index 2a8d0ee141d4..8014e697d540 100644
--- a/sound/soc/codecs/bt-sco.c
+++ b/sound/soc/codecs/bt-sco.c
@@ -63,10 +63,12 @@ static struct snd_soc_dai_driver bt_sco_dai[] = {
};
static struct snd_soc_codec_driver soc_codec_dev_bt_sco = {
- .dapm_widgets = bt_sco_widgets,
- .num_dapm_widgets = ARRAY_SIZE(bt_sco_widgets),
- .dapm_routes = bt_sco_routes,
- .num_dapm_routes = ARRAY_SIZE(bt_sco_routes),
+ .component_driver = {
+ .dapm_widgets = bt_sco_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(bt_sco_widgets),
+ .dapm_routes = bt_sco_routes,
+ .num_dapm_routes = ARRAY_SIZE(bt_sco_routes),
+ },
};
static int bt_sco_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 1c895a53001d..7a2d9a2ee427 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -131,8 +131,10 @@ static struct regmap *cq93vc_get_regmap(struct device *dev)
static struct snd_soc_codec_driver soc_codec_dev_cq93vc = {
.set_bias_level = cq93vc_set_bias_level,
.get_regmap = cq93vc_get_regmap,
- .controls = cq93vc_snd_controls,
- .num_controls = ARRAY_SIZE(cq93vc_snd_controls),
+ .component_driver = {
+ .controls = cq93vc_snd_controls,
+ .num_controls = ARRAY_SIZE(cq93vc_snd_controls),
+ },
};
static int cq93vc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 287d13740be4..7e9806206648 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -231,13 +231,14 @@ static int cs35l32_codec_set_sysclk(struct snd_soc_codec *codec,
static const struct snd_soc_codec_driver soc_codec_dev_cs35l32 = {
.set_sysclk = cs35l32_codec_set_sysclk,
- .dapm_widgets = cs35l32_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs35l32_dapm_widgets),
- .dapm_routes = cs35l32_audio_map,
- .num_dapm_routes = ARRAY_SIZE(cs35l32_audio_map),
-
- .controls = cs35l32_snd_controls,
- .num_controls = ARRAY_SIZE(cs35l32_snd_controls),
+ .component_driver = {
+ .controls = cs35l32_snd_controls,
+ .num_controls = ARRAY_SIZE(cs35l32_snd_controls),
+ .dapm_widgets = cs35l32_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs35l32_dapm_widgets),
+ .dapm_routes = cs35l32_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs35l32_audio_map),
+ },
};
/* Current and threshold powerup sequence Pg37 in datasheet */
diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
index 6f9c1addcd7f..6df29fa30fb9 100644
--- a/sound/soc/codecs/cs35l33.c
+++ b/sound/soc/codecs/cs35l33.c
@@ -837,13 +837,14 @@ static struct snd_soc_codec_driver soc_codec_dev_cs35l33 = {
.set_bias_level = cs35l33_set_bias_level,
.set_sysclk = cs35l33_codec_set_sysclk,
- .dapm_widgets = cs35l33_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs35l33_dapm_widgets),
- .dapm_routes = cs35l33_audio_map,
- .num_dapm_routes = ARRAY_SIZE(cs35l33_audio_map),
- .controls = cs35l33_snd_controls,
- .num_controls = ARRAY_SIZE(cs35l33_snd_controls),
-
+ .component_driver = {
+ .controls = cs35l33_snd_controls,
+ .num_controls = ARRAY_SIZE(cs35l33_snd_controls),
+ .dapm_widgets = cs35l33_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs35l33_dapm_widgets),
+ .dapm_routes = cs35l33_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs35l33_audio_map),
+ },
.idle_bias_off = true,
};
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 55db19ddc5ff..fd966bb851cb 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -547,13 +547,14 @@ static struct snd_soc_dai_driver cs4265_dai[] = {
static const struct snd_soc_codec_driver soc_codec_cs4265 = {
.set_bias_level = cs4265_set_bias_level,
- .dapm_widgets = cs4265_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs4265_dapm_widgets),
- .dapm_routes = cs4265_audio_map,
- .num_dapm_routes = ARRAY_SIZE(cs4265_audio_map),
-
- .controls = cs4265_snd_controls,
- .num_controls = ARRAY_SIZE(cs4265_snd_controls),
+ .component_driver = {
+ .controls = cs4265_snd_controls,
+ .num_controls = ARRAY_SIZE(cs4265_snd_controls),
+ .dapm_widgets = cs4265_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs4265_dapm_widgets),
+ .dapm_routes = cs4265_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs4265_audio_map),
+ },
};
static const struct regmap_config cs4265_regmap = {
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index e07807d96b68..18baea2f7d65 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -617,12 +617,14 @@ static const struct snd_soc_codec_driver soc_codec_device_cs4270 = {
.suspend = cs4270_soc_suspend,
.resume = cs4270_soc_resume,
- .controls = cs4270_snd_controls,
- .num_controls = ARRAY_SIZE(cs4270_snd_controls),
- .dapm_widgets = cs4270_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs4270_dapm_widgets),
- .dapm_routes = cs4270_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(cs4270_dapm_routes),
+ .component_driver = {
+ .controls = cs4270_snd_controls,
+ .num_controls = ARRAY_SIZE(cs4270_snd_controls),
+ .dapm_widgets = cs4270_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs4270_dapm_widgets),
+ .dapm_routes = cs4270_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs4270_dapm_routes),
+ },
};
/*
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 0c0010b25421..8c0f3b89b5bc 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -645,12 +645,14 @@ static struct snd_soc_codec_driver soc_codec_dev_cs4271 = {
.suspend = cs4271_soc_suspend,
.resume = cs4271_soc_resume,
- .controls = cs4271_snd_controls,
- .num_controls = ARRAY_SIZE(cs4271_snd_controls),
- .dapm_widgets = cs4271_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs4271_dapm_widgets),
- .dapm_routes = cs4271_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(cs4271_dapm_routes),
+ .component_driver = {
+ .controls = cs4271_snd_controls,
+ .num_controls = ARRAY_SIZE(cs4271_snd_controls),
+ .dapm_widgets = cs4271_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs4271_dapm_widgets),
+ .dapm_routes = cs4271_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs4271_dapm_routes),
+ },
};
static int cs4271_common_probe(struct device *dev,
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index 35488f14e237..96cfe38943cd 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -507,12 +507,14 @@ static int cs42l51_codec_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_device_cs42l51 = {
.probe = cs42l51_codec_probe,
- .controls = cs42l51_snd_controls,
- .num_controls = ARRAY_SIZE(cs42l51_snd_controls),
- .dapm_widgets = cs42l51_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs42l51_dapm_widgets),
- .dapm_routes = cs42l51_routes,
- .num_dapm_routes = ARRAY_SIZE(cs42l51_routes),
+ .component_driver = {
+ .controls = cs42l51_snd_controls,
+ .num_controls = ARRAY_SIZE(cs42l51_snd_controls),
+ .dapm_widgets = cs42l51_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs42l51_dapm_widgets),
+ .dapm_routes = cs42l51_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs42l51_routes),
+ },
};
const struct regmap_config cs42l51_regmap = {
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 47b97fcefb0b..0d9c4a57301b 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -1056,13 +1056,14 @@ static const struct snd_soc_codec_driver soc_codec_dev_cs42l52 = {
.set_bias_level = cs42l52_set_bias_level,
.suspend_bias_off = true,
- .dapm_widgets = cs42l52_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs42l52_dapm_widgets),
- .dapm_routes = cs42l52_audio_map,
- .num_dapm_routes = ARRAY_SIZE(cs42l52_audio_map),
-
- .controls = cs42l52_snd_controls,
- .num_controls = ARRAY_SIZE(cs42l52_snd_controls),
+ .component_driver = {
+ .controls = cs42l52_snd_controls,
+ .num_controls = ARRAY_SIZE(cs42l52_snd_controls),
+ .dapm_widgets = cs42l52_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs42l52_dapm_widgets),
+ .dapm_routes = cs42l52_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs42l52_audio_map),
+ },
};
/* Current and threshold powerup sequence Pg37 */
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index eec1ff853b98..54c1768bc818 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1121,13 +1121,14 @@ static const struct snd_soc_codec_driver soc_codec_dev_cs42l56 = {
.set_bias_level = cs42l56_set_bias_level,
.suspend_bias_off = true,
- .dapm_widgets = cs42l56_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs42l56_dapm_widgets),
- .dapm_routes = cs42l56_audio_map,
- .num_dapm_routes = ARRAY_SIZE(cs42l56_audio_map),
-
- .controls = cs42l56_snd_controls,
- .num_controls = ARRAY_SIZE(cs42l56_snd_controls),
+ .component_driver = {
+ .controls = cs42l56_snd_controls,
+ .num_controls = ARRAY_SIZE(cs42l56_snd_controls),
+ .dapm_widgets = cs42l56_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs42l56_dapm_widgets),
+ .dapm_routes = cs42l56_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs42l56_audio_map),
+ },
};
static const struct regmap_config cs42l56_regmap = {
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 42a8fd4e1f9b..71ba5605495f 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -794,7 +794,7 @@ struct cs42l73_mclk_div {
u8 mmcc;
};
-static struct cs42l73_mclk_div cs42l73_mclk_coeffs[] = {
+static const struct cs42l73_mclk_div cs42l73_mclk_coeffs[] = {
/* MCLK, Sample Rate, xMMCC[5:0] */
{5644800, 11025, 0x30},
{5644800, 22050, 0x20},
@@ -844,7 +844,7 @@ struct cs42l73_mclkx_div {
u8 mclkdiv;
};
-static struct cs42l73_mclkx_div cs42l73_mclkx_coeffs[] = {
+static const struct cs42l73_mclkx_div cs42l73_mclkx_coeffs[] = {
{5644800, 1, 0}, /* 5644800 */
{6000000, 1, 0}, /* 6000000 */
{6144000, 1, 0}, /* 6144000 */
@@ -1257,13 +1257,14 @@ static const struct snd_soc_codec_driver soc_codec_dev_cs42l73 = {
.set_bias_level = cs42l73_set_bias_level,
.suspend_bias_off = true,
- .dapm_widgets = cs42l73_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs42l73_dapm_widgets),
- .dapm_routes = cs42l73_audio_map,
- .num_dapm_routes = ARRAY_SIZE(cs42l73_audio_map),
-
- .controls = cs42l73_snd_controls,
- .num_controls = ARRAY_SIZE(cs42l73_snd_controls),
+ .component_driver = {
+ .controls = cs42l73_snd_controls,
+ .num_controls = ARRAY_SIZE(cs42l73_snd_controls),
+ .dapm_widgets = cs42l73_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs42l73_dapm_widgets),
+ .dapm_routes = cs42l73_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs42l73_audio_map),
+ },
};
static const struct regmap_config cs42l73_regmap = {
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index 1179101b2b05..b4d87379d2bc 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -411,12 +411,14 @@ static const struct snd_soc_codec_driver cs42xx8_driver = {
.probe = cs42xx8_codec_probe,
.idle_bias_off = true,
- .controls = cs42xx8_snd_controls,
- .num_controls = ARRAY_SIZE(cs42xx8_snd_controls),
- .dapm_widgets = cs42xx8_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs42xx8_dapm_widgets),
- .dapm_routes = cs42xx8_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(cs42xx8_dapm_routes),
+ .component_driver = {
+ .controls = cs42xx8_snd_controls,
+ .num_controls = ARRAY_SIZE(cs42xx8_snd_controls),
+ .dapm_widgets = cs42xx8_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs42xx8_dapm_widgets),
+ .dapm_routes = cs42xx8_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs42xx8_dapm_routes),
+ },
};
const struct cs42xx8_driver_data cs42448_data = {
diff --git a/sound/soc/codecs/cs4349.c b/sound/soc/codecs/cs4349.c
index 0ac8fc5ed4ae..231ca935cdf3 100644
--- a/sound/soc/codecs/cs4349.c
+++ b/sound/soc/codecs/cs4349.c
@@ -256,13 +256,14 @@ static struct snd_soc_dai_driver cs4349_dai = {
};
static struct snd_soc_codec_driver soc_codec_dev_cs4349 = {
- .controls = cs4349_snd_controls,
- .num_controls = ARRAY_SIZE(cs4349_snd_controls),
-
- .dapm_widgets = cs4349_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs4349_dapm_widgets),
- .dapm_routes = cs4349_routes,
- .num_dapm_routes = ARRAY_SIZE(cs4349_routes),
+ .component_driver = {
+ .controls = cs4349_snd_controls,
+ .num_controls = ARRAY_SIZE(cs4349_snd_controls),
+ .dapm_widgets = cs4349_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs4349_dapm_widgets),
+ .dapm_routes = cs4349_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs4349_routes),
+ },
};
static const struct regmap_config cs4349_regmap = {
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 954a4f5d3338..5b22564f037c 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -746,6 +746,16 @@ static const struct snd_soc_dapm_route cs47l24_dapm_routes[] = {
{ "IN2L", NULL, "SYSCLK" },
{ "IN2R", NULL, "SYSCLK" },
+ { "ASRC1L", NULL, "SYSCLK" },
+ { "ASRC1R", NULL, "SYSCLK" },
+ { "ASRC2L", NULL, "SYSCLK" },
+ { "ASRC2R", NULL, "SYSCLK" },
+
+ { "ASRC1L", NULL, "ASYNCCLK" },
+ { "ASRC1R", NULL, "ASYNCCLK" },
+ { "ASRC2L", NULL, "ASYNCCLK" },
+ { "ASRC2R", NULL, "ASYNCCLK" },
+
{ "MICBIAS1", NULL, "MICVDD" },
{ "MICBIAS2", NULL, "MICVDD" },
@@ -804,7 +814,6 @@ static const struct snd_soc_dapm_route cs47l24_dapm_routes[] = {
{ "AIF3 Capture", NULL, "SYSCLK" },
{ "Voice Control DSP", NULL, "DSP3" },
- { "Voice Control DSP", NULL, "SYSCLK" },
{ "IN1L PGA", NULL, "IN1L" },
{ "IN1R PGA", NULL, "IN1R" },
@@ -813,7 +822,6 @@ static const struct snd_soc_dapm_route cs47l24_dapm_routes[] = {
{ "IN2R PGA", NULL, "IN2R" },
{ "Audio Trace DSP", NULL, "DSP2" },
- { "Audio Trace DSP", NULL, "SYSCLK" },
ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
@@ -1190,12 +1198,14 @@ static struct snd_soc_codec_driver soc_codec_dev_cs47l24 = {
.set_sysclk = arizona_set_sysclk,
.set_pll = cs47l24_set_fll,
- .controls = cs47l24_snd_controls,
- .num_controls = ARRAY_SIZE(cs47l24_snd_controls),
- .dapm_widgets = cs47l24_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs47l24_dapm_widgets),
- .dapm_routes = cs47l24_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(cs47l24_dapm_routes),
+ .component_driver = {
+ .controls = cs47l24_snd_controls,
+ .num_controls = ARRAY_SIZE(cs47l24_snd_controls),
+ .dapm_widgets = cs47l24_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs47l24_dapm_widgets),
+ .dapm_routes = cs47l24_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs47l24_dapm_routes),
+ },
};
static struct snd_compr_ops cs47l24_compr_ops = {
diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c
index 2c0d9c430a8c..cb47fb595ff4 100644
--- a/sound/soc/codecs/cs53l30.c
+++ b/sound/soc/codecs/cs53l30.c
@@ -466,7 +466,7 @@ struct cs53l30_mclk_div {
u8 mclk_int_scale;
};
-static struct cs53l30_mclk_div cs53l30_mclk_coeffs[] = {
+static const struct cs53l30_mclk_div cs53l30_mclk_coeffs[] = {
/* NOTE: Enable MCLK_INT_SCALE to save power. */
/* MCLK, Sample Rate, asp_rate, internal_fs_ratio, mclk_int_scale */
@@ -511,7 +511,7 @@ struct cs53l30_mclkx_div {
u8 mclkdiv;
};
-static struct cs53l30_mclkx_div cs53l30_mclkx_coeffs[] = {
+static const struct cs53l30_mclkx_div cs53l30_mclkx_coeffs[] = {
{5644800, 1, CS53L30_MCLK_DIV_BY_1},
{6000000, 1, CS53L30_MCLK_DIV_BY_1},
{6144000, 1, CS53L30_MCLK_DIV_BY_1},
@@ -897,13 +897,14 @@ static struct snd_soc_codec_driver cs53l30_driver = {
.set_bias_level = cs53l30_set_bias_level,
.idle_bias_off = true,
- .dapm_widgets = cs53l30_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cs53l30_dapm_widgets),
- .dapm_routes = cs53l30_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(cs53l30_dapm_routes),
-
- .controls = cs53l30_snd_controls,
- .num_controls = ARRAY_SIZE(cs53l30_snd_controls),
+ .component_driver = {
+ .controls = cs53l30_snd_controls,
+ .num_controls = ARRAY_SIZE(cs53l30_snd_controls),
+ .dapm_widgets = cs53l30_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs53l30_dapm_widgets),
+ .dapm_routes = cs53l30_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs53l30_dapm_routes),
+ },
};
static struct regmap_config cs53l30_regmap = {
@@ -999,8 +1000,8 @@ static int cs53l30_i2c_probe(struct i2c_client *client,
/* Check if MCLK provided */
cs53l30->mclk = devm_clk_get(dev, "mclk");
if (IS_ERR(cs53l30->mclk)) {
- if (PTR_ERR(cs53l30->mclk) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ if (PTR_ERR(cs53l30->mclk) != -ENOENT) {
+ ret = PTR_ERR(cs53l30->mclk);
goto error;
}
/* Otherwise mark the mclk pointer to NULL */
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index fb3885fe0afb..2c12471e42a6 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -407,10 +407,12 @@ static struct snd_soc_codec_driver cx20442_codec_dev = {
.reg_word_size = sizeof(u8),
.read = cx20442_read_reg_cache,
.write = cx20442_write,
- .dapm_widgets = cx20442_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cx20442_dapm_widgets),
- .dapm_routes = cx20442_audio_map,
- .num_dapm_routes = ARRAY_SIZE(cx20442_audio_map),
+ .component_driver = {
+ .dapm_widgets = cx20442_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cx20442_dapm_widgets),
+ .dapm_routes = cx20442_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cx20442_audio_map),
+ },
};
static int cx20442_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index af23a61b7b28..17053dfc94cf 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -1167,13 +1167,14 @@ static int da7210_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_da7210 = {
.probe = da7210_probe,
- .controls = da7210_snd_controls,
- .num_controls = ARRAY_SIZE(da7210_snd_controls),
-
- .dapm_widgets = da7210_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(da7210_dapm_widgets),
- .dapm_routes = da7210_audio_map,
- .num_dapm_routes = ARRAY_SIZE(da7210_audio_map),
+ .component_driver = {
+ .controls = da7210_snd_controls,
+ .num_controls = ARRAY_SIZE(da7210_snd_controls),
+ .dapm_widgets = da7210_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(da7210_dapm_widgets),
+ .dapm_routes = da7210_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(da7210_audio_map),
+ },
};
#if IS_ENABLED(CONFIG_I2C)
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index bcf1834c5648..12da55882c06 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -750,11 +750,18 @@ static int da7213_dai_event(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, DA7213_PC_COUNT,
DA7213_PC_FREERUN_MASK, 0);
- /* Slave mode, if SRM not enabled no need for status checks */
+ /* If SRM not enabled then nothing more to do */
pll_ctrl = snd_soc_read(codec, DA7213_PLL_CTRL);
if (!(pll_ctrl & DA7213_PLL_SRM_EN))
return 0;
+ /* Assist 32KHz mode PLL lock */
+ if (pll_ctrl & DA7213_PLL_32K_MODE) {
+ snd_soc_write(codec, 0xF0, 0x8B);
+ snd_soc_write(codec, 0xF2, 0x03);
+ snd_soc_write(codec, 0xF0, 0x00);
+ }
+
/* Check SRM has locked */
do {
pll_status = snd_soc_read(codec, DA7213_PLL_STATUS);
@@ -771,6 +778,14 @@ static int da7213_dai_event(struct snd_soc_dapm_widget *w,
return 0;
case SND_SOC_DAPM_POST_PMD:
+ /* Revert 32KHz PLL lock udpates if applied previously */
+ pll_ctrl = snd_soc_read(codec, DA7213_PLL_CTRL);
+ if (pll_ctrl & DA7213_PLL_32K_MODE) {
+ snd_soc_write(codec, 0xF0, 0x8B);
+ snd_soc_write(codec, 0xF2, 0x01);
+ snd_soc_write(codec, 0xF0, 0x00);
+ }
+
/* PC free-running */
snd_soc_update_bits(codec, DA7213_PC_COUNT,
DA7213_PC_FREERUN_MASK,
@@ -1297,10 +1312,13 @@ static int da7213_set_dai_sysclk(struct snd_soc_dai *codec_dai,
switch (clk_id) {
case DA7213_CLKSRC_MCLK:
- da7213->mclk_squarer_en = false;
+ snd_soc_update_bits(codec, DA7213_PLL_CTRL,
+ DA7213_PLL_MCLK_SQR_EN, 0);
break;
case DA7213_CLKSRC_MCLK_SQR:
- da7213->mclk_squarer_en = true;
+ snd_soc_update_bits(codec, DA7213_PLL_CTRL,
+ DA7213_PLL_MCLK_SQR_EN,
+ DA7213_PLL_MCLK_SQR_EN);
break;
default:
dev_err(codec_dai->dev, "Unknown clock source %d\n", clk_id);
@@ -1324,7 +1342,7 @@ static int da7213_set_dai_sysclk(struct snd_soc_dai *codec_dai,
return 0;
}
-/* Supported PLL input frequencies are 5MHz - 54MHz. */
+/* Supported PLL input frequencies are 32KHz, 5MHz - 54MHz. */
static int da7213_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
int source, unsigned int fref, unsigned int fout)
{
@@ -1336,22 +1354,26 @@ static int da7213_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
u32 freq_ref;
u64 frac_div;
- /* Reset PLL configuration */
- snd_soc_write(codec, DA7213_PLL_CTRL, 0);
-
- pll_ctrl = 0;
-
/* Workout input divider based on MCLK rate */
if (da7213->mclk_rate == 32768) {
+ if (!da7213->master) {
+ dev_err(codec->dev,
+ "32KHz only valid if codec is clock master\n");
+ return -EINVAL;
+ }
+
/* 32KHz PLL Mode */
indiv_bits = DA7213_PLL_INDIV_9_TO_18_MHZ;
indiv = DA7213_PLL_INDIV_9_TO_18_MHZ_VAL;
+ source = DA7213_SYSCLK_PLL_32KHZ;
freq_ref = 3750000;
- pll_ctrl |= DA7213_PLL_32K_MODE;
+
} else {
- /* 5 - 54MHz MCLK */
if (da7213->mclk_rate < 5000000) {
- goto pll_err;
+ dev_err(codec->dev,
+ "PLL input clock %d below valid range\n",
+ da7213->mclk_rate);
+ return -EINVAL;
} else if (da7213->mclk_rate <= 9000000) {
indiv_bits = DA7213_PLL_INDIV_5_TO_9_MHZ;
indiv = DA7213_PLL_INDIV_5_TO_9_MHZ_VAL;
@@ -1365,32 +1387,44 @@ static int da7213_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
indiv_bits = DA7213_PLL_INDIV_36_TO_54_MHZ;
indiv = DA7213_PLL_INDIV_36_TO_54_MHZ_VAL;
} else {
- goto pll_err;
+ dev_err(codec->dev,
+ "PLL input clock %d above valid range\n",
+ da7213->mclk_rate);
+ return -EINVAL;
}
freq_ref = (da7213->mclk_rate / indiv);
}
- pll_ctrl |= indiv_bits;
+ pll_ctrl = indiv_bits;
- /* PLL Bypass mode */
- if (source == DA7213_SYSCLK_MCLK) {
- snd_soc_write(codec, DA7213_PLL_CTRL, pll_ctrl);
+ /* Configure PLL */
+ switch (source) {
+ case DA7213_SYSCLK_MCLK:
+ snd_soc_update_bits(codec, DA7213_PLL_CTRL,
+ DA7213_PLL_INDIV_MASK |
+ DA7213_PLL_MODE_MASK, pll_ctrl);
return 0;
- }
+ case DA7213_SYSCLK_PLL:
+ break;
+ case DA7213_SYSCLK_PLL_SRM:
+ pll_ctrl |= DA7213_PLL_SRM_EN;
+ fout = DA7213_PLL_FREQ_OUT_94310400;
+ break;
+ case DA7213_SYSCLK_PLL_32KHZ:
+ if (da7213->mclk_rate != 32768) {
+ dev_err(codec->dev,
+ "32KHz mode only valid with 32KHz MCLK\n");
+ return -EINVAL;
+ }
- /*
- * If Codec is slave and SRM enabled,
- * freq_out is (98304000 + 90316800)/2 = 94310400
- */
- if (!da7213->master && da7213->srm_en) {
+ pll_ctrl |= DA7213_PLL_32K_MODE | DA7213_PLL_SRM_EN;
fout = DA7213_PLL_FREQ_OUT_94310400;
- pll_ctrl |= DA7213_PLL_SRM_EN;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid PLL config\n");
+ return -EINVAL;
}
- /* Enable MCLK squarer if required */
- if (da7213->mclk_squarer_en)
- pll_ctrl |= DA7213_PLL_MCLK_SQR_EN;
-
/* Calculate dividers for PLL */
pll_integer = fout / freq_ref;
frac_div = (u64)(fout % freq_ref) * 8192ULL;
@@ -1405,14 +1439,19 @@ static int da7213_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
/* Enable PLL */
pll_ctrl |= DA7213_PLL_EN;
- snd_soc_write(codec, DA7213_PLL_CTRL, pll_ctrl);
+ snd_soc_update_bits(codec, DA7213_PLL_CTRL,
+ DA7213_PLL_INDIV_MASK | DA7213_PLL_MODE_MASK,
+ pll_ctrl);
+
+ /* Assist 32KHz mode PLL lock */
+ if (source == DA7213_SYSCLK_PLL_32KHZ) {
+ snd_soc_write(codec, 0xF0, 0x8B);
+ snd_soc_write(codec, 0xF1, 0x03);
+ snd_soc_write(codec, 0xF1, 0x01);
+ snd_soc_write(codec, 0xF0, 0x00);
+ }
return 0;
-
-pll_err:
- dev_err(codec_dai->dev, "Unsupported PLL input frequency %d\n",
- da7213->mclk_rate);
- return -EINVAL;
}
/* DAI operations */
@@ -1454,11 +1493,10 @@ static int da7213_set_bias_level(struct snd_soc_codec *codec,
switch (level) {
case SND_SOC_BIAS_ON:
- case SND_SOC_BIAS_PREPARE:
break;
- case SND_SOC_BIAS_STANDBY:
- if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
- /* MCLK */
+ case SND_SOC_BIAS_PREPARE:
+ /* Enable MCLK for transition to ON state */
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_STANDBY) {
if (da7213->mclk) {
ret = clk_prepare_enable(da7213->mclk);
if (ret) {
@@ -1467,21 +1505,24 @@ static int da7213_set_bias_level(struct snd_soc_codec *codec,
return ret;
}
}
-
+ }
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
/* Enable VMID reference & master bias */
snd_soc_update_bits(codec, DA7213_REFERENCES,
DA7213_VMID_EN | DA7213_BIAS_EN,
DA7213_VMID_EN | DA7213_BIAS_EN);
+ } else {
+ /* Remove MCLK */
+ if (da7213->mclk)
+ clk_disable_unprepare(da7213->mclk);
}
break;
case SND_SOC_BIAS_OFF:
/* Disable VMID reference & master bias */
snd_soc_update_bits(codec, DA7213_REFERENCES,
DA7213_VMID_EN | DA7213_BIAS_EN, 0);
-
- /* MCLK */
- if (da7213->mclk)
- clk_disable_unprepare(da7213->mclk);
break;
}
return 0;
@@ -1605,9 +1646,6 @@ static int da7213_probe(struct snd_soc_codec *codec)
DA7213_ALC_CALIB_MODE_MAN, 0);
da7213->alc_calib_auto = true;
- /* Default to using SRM for slave mode */
- da7213->srm_en = true;
-
/* Default PC counter to free-running */
snd_soc_update_bits(codec, DA7213_PC_COUNT, DA7213_PC_FREERUN_MASK,
DA7213_PC_FREERUN_MASK);
@@ -1740,13 +1778,14 @@ static struct snd_soc_codec_driver soc_codec_dev_da7213 = {
.probe = da7213_probe,
.set_bias_level = da7213_set_bias_level,
- .controls = da7213_snd_controls,
- .num_controls = ARRAY_SIZE(da7213_snd_controls),
-
- .dapm_widgets = da7213_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(da7213_dapm_widgets),
- .dapm_routes = da7213_audio_map,
- .num_dapm_routes = ARRAY_SIZE(da7213_audio_map),
+ .component_driver = {
+ .controls = da7213_snd_controls,
+ .num_controls = ARRAY_SIZE(da7213_snd_controls),
+ .dapm_widgets = da7213_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(da7213_dapm_widgets),
+ .dapm_routes = da7213_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(da7213_audio_map),
+ },
};
static const struct regmap_config da7213_regmap_config = {
diff --git a/sound/soc/codecs/da7213.h b/sound/soc/codecs/da7213.h
index fbb7a356a501..16ef56f77cd4 100644
--- a/sound/soc/codecs/da7213.h
+++ b/sound/soc/codecs/da7213.h
@@ -172,6 +172,7 @@
#define DA7213_PLL_32K_MODE (0x1 << 5)
#define DA7213_PLL_SRM_EN (0x1 << 6)
#define DA7213_PLL_EN (0x1 << 7)
+#define DA7213_PLL_MODE_MASK (0x7 << 5)
/* DA7213_DAI_CLK_MODE = 0x28 */
#define DA7213_DAI_BCLKS_PER_WCLK_32 (0x0 << 0)
@@ -499,8 +500,6 @@
#define DA7213_ALC_AVG_ITERATIONS 5
/* PLL related */
-#define DA7213_SYSCLK_MCLK 0
-#define DA7213_SYSCLK_PLL 1
#define DA7213_PLL_FREQ_OUT_90316800 90316800
#define DA7213_PLL_FREQ_OUT_98304000 98304000
#define DA7213_PLL_FREQ_OUT_94310400 94310400
@@ -515,6 +514,13 @@ enum da7213_clk_src {
DA7213_CLKSRC_MCLK_SQR,
};
+enum da7213_sys_clk {
+ DA7213_SYSCLK_MCLK = 0,
+ DA7213_SYSCLK_PLL,
+ DA7213_SYSCLK_PLL_SRM,
+ DA7213_SYSCLK_PLL_32KHZ
+};
+
/* Codec private data */
struct da7213_priv {
struct regmap *regmap;
@@ -522,8 +528,6 @@ struct da7213_priv {
unsigned int mclk_rate;
int clk_src;
bool master;
- bool mclk_squarer_en;
- bool srm_en;
bool alc_calib_auto;
bool alc_en;
struct da7213_platform_data *pdata;
diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
index 99ce23e113bf..c69e97654fc6 100644
--- a/sound/soc/codecs/da7218.c
+++ b/sound/soc/codecs/da7218.c
@@ -1819,7 +1819,7 @@ static int da7218_set_dai_sysclk(struct snd_soc_dai *codec_dai,
if (da7218->mclk_rate == freq)
return 0;
- if (((freq < 2000000) && (freq != 32768)) || (freq > 54000000)) {
+ if ((freq < 2000000) || (freq > 54000000)) {
dev_err(codec_dai->dev, "Unsupported MCLK value %d\n",
freq);
return -EINVAL;
@@ -1866,11 +1866,8 @@ static int da7218_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
u32 freq_ref;
u64 frac_div;
- /* Verify 32KHz, 2MHz - 54MHz MCLK provided, and set input divider */
- if (da7218->mclk_rate == 32768) {
- indiv_bits = DA7218_PLL_INDIV_9_TO_18_MHZ;
- indiv = DA7218_PLL_INDIV_9_TO_18_MHZ_VAL;
- } else if (da7218->mclk_rate < 2000000) {
+ /* Verify 2MHz - 54MHz MCLK provided, and set input divider */
+ if (da7218->mclk_rate < 2000000) {
dev_err(codec->dev, "PLL input clock %d below valid range\n",
da7218->mclk_rate);
return -EINVAL;
@@ -1911,9 +1908,6 @@ static int da7218_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
case DA7218_SYSCLK_PLL_SRM:
pll_ctrl |= DA7218_PLL_MODE_SRM;
break;
- case DA7218_SYSCLK_PLL_32KHZ:
- pll_ctrl |= DA7218_PLL_MODE_32KHZ;
- break;
default:
dev_err(codec->dev, "Invalid PLL config\n");
return -EINVAL;
@@ -2589,20 +2583,22 @@ static int da7218_set_bias_level(struct snd_soc_codec *codec,
switch (level) {
case SND_SOC_BIAS_ON:
- case SND_SOC_BIAS_PREPARE:
break;
- case SND_SOC_BIAS_STANDBY:
- if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
- /* MCLK */
+ case SND_SOC_BIAS_PREPARE:
+ /* Enable MCLK for transition to ON state */
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_STANDBY) {
if (da7218->mclk) {
ret = clk_prepare_enable(da7218->mclk);
if (ret) {
- dev_err(codec->dev,
- "Failed to enable mclk\n");
+ dev_err(codec->dev, "Failed to enable mclk\n");
return ret;
}
}
+ }
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
/* Master bias */
snd_soc_update_bits(codec, DA7218_REFERENCES,
DA7218_BIAS_EN_MASK,
@@ -2612,6 +2608,10 @@ static int da7218_set_bias_level(struct snd_soc_codec *codec,
snd_soc_update_bits(codec, DA7218_LDO_CTRL,
DA7218_LDO_EN_MASK,
DA7218_LDO_EN_MASK);
+ } else {
+ /* Remove MCLK */
+ if (da7218->mclk)
+ clk_disable_unprepare(da7218->mclk);
}
break;
case SND_SOC_BIAS_OFF:
@@ -2625,10 +2625,6 @@ static int da7218_set_bias_level(struct snd_soc_codec *codec,
snd_soc_update_bits(codec, DA7218_REFERENCES,
DA7218_BIAS_EN_MASK, 0);
}
-
- /* MCLK */
- if (da7218->mclk)
- clk_disable_unprepare(da7218->mclk);
break;
}
@@ -3045,13 +3041,14 @@ static struct snd_soc_codec_driver soc_codec_dev_da7218 = {
.resume = da7218_resume,
.set_bias_level = da7218_set_bias_level,
- .controls = da7218_snd_controls,
- .num_controls = ARRAY_SIZE(da7218_snd_controls),
-
- .dapm_widgets = da7218_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(da7218_dapm_widgets),
- .dapm_routes = da7218_audio_map,
- .num_dapm_routes = ARRAY_SIZE(da7218_audio_map),
+ .component_driver = {
+ .controls = da7218_snd_controls,
+ .num_controls = ARRAY_SIZE(da7218_snd_controls),
+ .dapm_widgets = da7218_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(da7218_dapm_widgets),
+ .dapm_routes = da7218_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(da7218_audio_map),
+ },
};
diff --git a/sound/soc/codecs/da7218.h b/sound/soc/codecs/da7218.h
index 477cd37723cf..4f7ec21069a4 100644
--- a/sound/soc/codecs/da7218.h
+++ b/sound/soc/codecs/da7218.h
@@ -888,7 +888,6 @@
#define DA7218_PLL_MODE_BYPASS (0x0 << 6)
#define DA7218_PLL_MODE_NORMAL (0x1 << 6)
#define DA7218_PLL_MODE_SRM (0x2 << 6)
-#define DA7218_PLL_MODE_32KHZ (0x3 << 6)
/* DA7218_PLL_FRAC_TOP = 0x92 */
#define DA7218_PLL_FBDIV_FRAC_TOP_SHIFT 0
@@ -1371,7 +1370,6 @@ enum da7218_sys_clk {
DA7218_SYSCLK_MCLK = 0,
DA7218_SYSCLK_PLL,
DA7218_SYSCLK_PLL_SRM,
- DA7218_SYSCLK_PLL_32KHZ
};
enum da7218_dev_id {
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index f0057cd223a4..2b8914dd5990 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/property.h>
#include <linux/pm_wakeirq.h>
@@ -114,13 +115,38 @@ static void da7219_aad_hptest_work(struct work_struct *work)
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
u16 tonegen_freq_hptest;
- u8 accdet_cfg8;
- int report = 0;
+ u8 pll_srm_sts, gain_ramp_ctrl, accdet_cfg8;
+ int report = 0, ret = 0;
/* Lock DAPM and any Kcontrols that are affected by this test */
snd_soc_dapm_mutex_lock(dapm);
mutex_lock(&da7219->lock);
+ /* Ensure MCLK is available for HP test procedure */
+ if (da7219->mclk) {
+ ret = clk_prepare_enable(da7219->mclk);
+ if (ret) {
+ dev_err(codec->dev, "Failed to enable mclk - %d\n", ret);
+ mutex_unlock(&da7219->lock);
+ snd_soc_dapm_mutex_unlock(dapm);
+ return;
+ }
+ }
+
+ /*
+ * If MCLK not present, then we're using the internal oscillator and
+ * require different frequency settings to achieve the same result.
+ */
+ pll_srm_sts = snd_soc_read(codec, DA7219_PLL_SRM_STS);
+ if (pll_srm_sts & DA7219_PLL_SRM_STS_MCLK)
+ tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ);
+ else
+ tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ_INT_OSC);
+
+ /* Ensure gain ramping at fastest rate */
+ gain_ramp_ctrl = snd_soc_read(codec, DA7219_GAIN_RAMP_CTRL);
+ snd_soc_write(codec, DA7219_GAIN_RAMP_CTRL, DA7219_GAIN_RAMP_RATE_X8);
+
/* Bypass cache so it saves current settings */
regcache_cache_bypass(da7219->regmap, true);
@@ -183,9 +209,15 @@ static void da7219_aad_hptest_work(struct work_struct *work)
snd_soc_write(codec, DA7219_HP_R_CTRL,
DA7219_HP_R_AMP_OE_MASK | DA7219_HP_R_AMP_EN_MASK);
+ /*
+ * If we're running from the internal oscillator then give audio paths
+ * time to settle before running test.
+ */
+ if (!(pll_srm_sts & DA7219_PLL_SRM_STS_MCLK))
+ msleep(DA7219_AAD_HPTEST_INT_OSC_PATH_DELAY);
+
/* Configure & start Tone Generator */
snd_soc_write(codec, DA7219_TONE_GEN_ON_PER, DA7219_BEEP_ON_PER_MASK);
- tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ);
regmap_raw_write(da7219->regmap, DA7219_TONE_GEN_FREQ1_L,
&tonegen_freq_hptest, sizeof(tonegen_freq_hptest));
snd_soc_update_bits(codec, DA7219_TONE_GEN_CFG2,
@@ -244,12 +276,26 @@ static void da7219_aad_hptest_work(struct work_struct *work)
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_8,
DA7219_HPTEST_EN_MASK, 0);
+ /*
+ * If we're running from the internal oscillator then give audio paths
+ * time to settle before allowing headphones to be driven as required.
+ */
+ if (!(pll_srm_sts & DA7219_PLL_SRM_STS_MCLK))
+ msleep(DA7219_AAD_HPTEST_INT_OSC_PATH_DELAY);
+
+ /* Restore gain ramping rate */
+ snd_soc_write(codec, DA7219_GAIN_RAMP_CTRL, gain_ramp_ctrl);
+
/* Drive Headphones/lineout */
snd_soc_update_bits(codec, DA7219_HP_L_CTRL, DA7219_HP_L_AMP_OE_MASK,
DA7219_HP_L_AMP_OE_MASK);
snd_soc_update_bits(codec, DA7219_HP_R_CTRL, DA7219_HP_R_AMP_OE_MASK,
DA7219_HP_R_AMP_OE_MASK);
+ /* Remove MCLK, if previously enabled */
+ if (da7219->mclk)
+ clk_disable_unprepare(da7219->mclk);
+
mutex_unlock(&da7219->lock);
snd_soc_dapm_mutex_unlock(dapm);
@@ -751,6 +797,62 @@ static void da7219_aad_handle_pdata(struct snd_soc_codec *codec)
/*
+ * Suspend/Resume
+ */
+
+void da7219_aad_suspend(struct snd_soc_codec *codec)
+{
+ struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
+ struct da7219_aad_priv *da7219_aad = da7219->aad;
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ u8 micbias_ctrl;
+
+ if (da7219_aad->jack) {
+ /* Disable jack detection during suspend */
+ snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_1,
+ DA7219_ACCDET_EN_MASK, 0);
+
+ /*
+ * If we have a 4-pole jack inserted, then micbias will be
+ * enabled. We can disable micbias here, and keep a note to
+ * re-enable it on resume. If jack removal occurred during
+ * suspend then this will be dealt with through the IRQ handler.
+ */
+ if (da7219_aad->jack_inserted) {
+ micbias_ctrl = snd_soc_read(codec, DA7219_MICBIAS_CTRL);
+ if (micbias_ctrl & DA7219_MICBIAS1_EN_MASK) {
+ snd_soc_dapm_disable_pin(dapm, "Mic Bias");
+ snd_soc_dapm_sync(dapm);
+ da7219_aad->micbias_resume_enable = true;
+ }
+ }
+ }
+}
+
+void da7219_aad_resume(struct snd_soc_codec *codec)
+{
+ struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
+ struct da7219_aad_priv *da7219_aad = da7219->aad;
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+ if (da7219_aad->jack) {
+ /* Re-enable micbias if previously enabled for 4-pole jack */
+ if (da7219_aad->jack_inserted &&
+ da7219_aad->micbias_resume_enable) {
+ snd_soc_dapm_force_enable_pin(dapm, "Mic Bias");
+ snd_soc_dapm_sync(dapm);
+ da7219_aad->micbias_resume_enable = false;
+ }
+
+ /* Re-enable jack detection */
+ snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_1,
+ DA7219_ACCDET_EN_MASK,
+ DA7219_ACCDET_EN_MASK);
+ }
+}
+
+
+/*
* Init/Exit
*/
diff --git a/sound/soc/codecs/da7219-aad.h b/sound/soc/codecs/da7219-aad.h
index 4fccf677cd06..117a3d7ccd31 100644
--- a/sound/soc/codecs/da7219-aad.h
+++ b/sound/soc/codecs/da7219-aad.h
@@ -176,8 +176,10 @@
#define DA7219_AAD_MICBIAS_CHK_DELAY 10
#define DA7219_AAD_MICBIAS_CHK_RETRIES 5
-#define DA7219_AAD_HPTEST_RAMP_FREQ 0x28
-#define DA7219_AAD_HPTEST_PERIOD 65
+#define DA7219_AAD_HPTEST_RAMP_FREQ 0x28
+#define DA7219_AAD_HPTEST_RAMP_FREQ_INT_OSC 0x4D
+#define DA7219_AAD_HPTEST_PERIOD 65
+#define DA7219_AAD_HPTEST_INT_OSC_PATH_DELAY 20
enum da7219_aad_event_regs {
DA7219_AAD_IRQ_REG_A = 0,
@@ -199,12 +201,17 @@ struct da7219_aad_priv {
struct work_struct hptest_work;
struct snd_soc_jack *jack;
+ bool micbias_resume_enable;
bool jack_inserted;
};
/* AAD control */
void da7219_aad_jack_det(struct snd_soc_codec *codec, struct snd_soc_jack *jack);
+/* Suspend/Resume */
+void da7219_aad_suspend(struct snd_soc_codec *codec);
+void da7219_aad_resume(struct snd_soc_codec *codec);
+
/* Init/Exit */
int da7219_aad_init(struct snd_soc_codec *codec);
void da7219_aad_exit(struct snd_soc_codec *codec);
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index 50ea94317cb3..1152aa5e7c39 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -801,7 +801,7 @@ static int da7219_dai_event(struct snd_soc_dapm_widget *w,
++i;
msleep(50);
}
- } while ((i < DA7219_SRM_CHECK_RETRIES) & (!srm_lock));
+ } while ((i < DA7219_SRM_CHECK_RETRIES) && (!srm_lock));
if (!srm_lock)
dev_warn(codec->dev, "SRM failed to lock\n");
@@ -1482,6 +1482,8 @@ static struct da7219_pdata *da7219_fw_to_pdata(struct snd_soc_codec *codec)
if (!pdata)
return NULL;
+ pdata->wakeup_source = device_property_read_bool(dev, "wakeup-source");
+
if (device_property_read_u32(dev, "dlg,micbias-lvl", &of_val32) >= 0)
pdata->micbias_lvl = da7219_fw_micbias_lvl(dev, of_val32);
else
@@ -1508,11 +1510,10 @@ static int da7219_set_bias_level(struct snd_soc_codec *codec,
switch (level) {
case SND_SOC_BIAS_ON:
- case SND_SOC_BIAS_PREPARE:
break;
- case SND_SOC_BIAS_STANDBY:
- if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
- /* MCLK */
+ case SND_SOC_BIAS_PREPARE:
+ /* Enable MCLK for transition to ON state */
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_STANDBY) {
if (da7219->mclk) {
ret = clk_prepare_enable(da7219->mclk);
if (ret) {
@@ -1521,22 +1522,28 @@ static int da7219_set_bias_level(struct snd_soc_codec *codec,
return ret;
}
}
+ }
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF)
/* Master bias */
snd_soc_update_bits(codec, DA7219_REFERENCES,
DA7219_BIAS_EN_MASK,
DA7219_BIAS_EN_MASK);
+
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_PREPARE) {
+ /* Remove MCLK */
+ if (da7219->mclk)
+ clk_disable_unprepare(da7219->mclk);
}
break;
case SND_SOC_BIAS_OFF:
- /* Only disable master bias if jack detection not active */
- if (!da7219->aad->jack)
+ /* Only disable master bias if we're not a wake-up source */
+ if (!da7219->wakeup_source)
snd_soc_update_bits(codec, DA7219_REFERENCES,
DA7219_BIAS_EN_MASK, 0);
- /* MCLK */
- if (da7219->mclk)
- clk_disable_unprepare(da7219->mclk);
break;
}
@@ -1599,6 +1606,8 @@ static void da7219_handle_pdata(struct snd_soc_codec *codec)
if (pdata) {
u8 micbias_lvl = 0;
+ da7219->wakeup_source = pdata->wakeup_source;
+
/* Mic Bias voltages */
switch (pdata->micbias_lvl) {
case DA7219_MICBIAS_1_6V:
@@ -1733,11 +1742,11 @@ static int da7219_suspend(struct snd_soc_codec *codec)
{
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
- snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_OFF);
+ /* Suspend AAD if we're not a wake-up source */
+ if (!da7219->wakeup_source)
+ da7219_aad_suspend(codec);
- /* Put device into standby mode if jack detection disabled */
- if (!da7219->aad->jack)
- snd_soc_write(codec, DA7219_SYSTEM_ACTIVE, 0);
+ snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
@@ -1746,13 +1755,12 @@ static int da7219_resume(struct snd_soc_codec *codec)
{
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
- /* Put device into active mode if previously pushed to standby */
- if (!da7219->aad->jack)
- snd_soc_write(codec, DA7219_SYSTEM_ACTIVE,
- DA7219_SYSTEM_ACTIVE_MASK);
-
snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ /* Resume AAD if previously suspended */
+ if (!da7219->wakeup_source)
+ da7219_aad_resume(codec);
+
return 0;
}
#else
@@ -1767,13 +1775,14 @@ static struct snd_soc_codec_driver soc_codec_dev_da7219 = {
.resume = da7219_resume,
.set_bias_level = da7219_set_bias_level,
- .controls = da7219_snd_controls,
- .num_controls = ARRAY_SIZE(da7219_snd_controls),
-
- .dapm_widgets = da7219_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(da7219_dapm_widgets),
- .dapm_routes = da7219_audio_map,
- .num_dapm_routes = ARRAY_SIZE(da7219_audio_map),
+ .component_driver = {
+ .controls = da7219_snd_controls,
+ .num_controls = ARRAY_SIZE(da7219_snd_controls),
+ .dapm_widgets = da7219_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(da7219_dapm_widgets),
+ .dapm_routes = da7219_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(da7219_audio_map),
+ },
};
@@ -1921,7 +1930,8 @@ static int da7219_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct da7219_priv *da7219;
- int ret;
+ unsigned int system_active, system_status;
+ int i, ret;
da7219 = devm_kzalloc(&i2c->dev, sizeof(struct da7219_priv),
GFP_KERNEL);
@@ -1937,6 +1947,37 @@ static int da7219_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ regcache_cache_bypass(da7219->regmap, true);
+
+ /* Disable audio paths if still active from previous start */
+ regmap_read(da7219->regmap, DA7219_SYSTEM_ACTIVE, &system_active);
+ if (system_active) {
+ regmap_write(da7219->regmap, DA7219_GAIN_RAMP_CTRL,
+ DA7219_GAIN_RAMP_RATE_NOMINAL);
+ regmap_write(da7219->regmap, DA7219_SYSTEM_MODES_INPUT, 0x00);
+ regmap_write(da7219->regmap, DA7219_SYSTEM_MODES_OUTPUT, 0x01);
+
+ for (i = 0; i < DA7219_SYS_STAT_CHECK_RETRIES; ++i) {
+ regmap_read(da7219->regmap, DA7219_SYSTEM_STATUS,
+ &system_status);
+ if (!system_status)
+ break;
+
+ msleep(DA7219_SYS_STAT_CHECK_DELAY);
+ }
+ }
+
+ /* Soft reset codec */
+ regmap_write_bits(da7219->regmap, DA7219_ACCDET_CONFIG_1,
+ DA7219_ACCDET_EN_MASK, 0);
+ regmap_write_bits(da7219->regmap, DA7219_CIF_CTRL,
+ DA7219_CIF_REG_SOFT_RESET_MASK,
+ DA7219_CIF_REG_SOFT_RESET_MASK);
+ regmap_write_bits(da7219->regmap, DA7219_SYSTEM_ACTIVE,
+ DA7219_SYSTEM_ACTIVE_MASK, 0);
+
+ regcache_cache_bypass(da7219->regmap, false);
+
ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_da7219,
&da7219_dai, 1);
if (ret < 0) {
diff --git a/sound/soc/codecs/da7219.h b/sound/soc/codecs/da7219.h
index ff2a2f02ce40..66d3bad86739 100644
--- a/sound/soc/codecs/da7219.h
+++ b/sound/soc/codecs/da7219.h
@@ -224,6 +224,7 @@
#define DA7219_PLL_SRM_STATE_MASK (0xF << 0)
#define DA7219_PLL_SRM_STATUS_SHIFT 4
#define DA7219_PLL_SRM_STATUS_MASK (0xF << 4)
+#define DA7219_PLL_SRM_STS_MCLK (0x1 << 4)
#define DA7219_PLL_SRM_STS_SRM_LOCK (0x1 << 7)
/* DA7219_DIG_ROUTING_DAI = 0x2A */
@@ -576,6 +577,8 @@
/* DA7219_GAIN_RAMP_CTRL = 0x92 */
#define DA7219_GAIN_RAMP_RATE_SHIFT 0
#define DA7219_GAIN_RAMP_RATE_MASK (0x3 << 0)
+#define DA7219_GAIN_RAMP_RATE_X8 (0x0 << 0)
+#define DA7219_GAIN_RAMP_RATE_NOMINAL (0x1 << 0)
#define DA7219_GAIN_RAMP_RATE_MAX 4
/* DA7219_PC_COUNT = 0x94 */
@@ -770,6 +773,10 @@
/* SRM */
#define DA7219_SRM_CHECK_RETRIES 8
+/* System Controller */
+#define DA7219_SYS_STAT_CHECK_RETRIES 6
+#define DA7219_SYS_STAT_CHECK_DELAY 50
+
enum da7219_clk_src {
DA7219_CLKSRC_MCLK = 0,
DA7219_CLKSRC_MCLK_SQR,
@@ -796,6 +803,7 @@ struct da7219_priv {
struct da7219_aad_priv *aad;
struct da7219_pdata *pdata;
+ bool wakeup_source;
struct regulator_bulk_data supplies[DA7219_NUM_SUPPLIES];
struct regmap *regmap;
struct mutex lock;
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index 461506a4ca6a..c1cc1c1c28f2 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -1501,12 +1501,14 @@ static int da732x_set_bias_level(struct snd_soc_codec *codec,
static struct snd_soc_codec_driver soc_codec_dev_da732x = {
.set_bias_level = da732x_set_bias_level,
- .controls = da732x_snd_controls,
- .num_controls = ARRAY_SIZE(da732x_snd_controls),
- .dapm_widgets = da732x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(da732x_dapm_widgets),
- .dapm_routes = da732x_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(da732x_dapm_routes),
+ .component_driver = {
+ .controls = da732x_snd_controls,
+ .num_controls = ARRAY_SIZE(da732x_snd_controls),
+ .dapm_widgets = da732x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(da732x_dapm_widgets),
+ .dapm_routes = da732x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(da732x_dapm_routes),
+ },
.set_pll = da732x_set_dai_pll,
};
diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
index 0b2ede8db978..4efb5f897a0c 100644
--- a/sound/soc/codecs/da9055.c
+++ b/sound/soc/codecs/da9055.c
@@ -1455,13 +1455,14 @@ static struct snd_soc_codec_driver soc_codec_dev_da9055 = {
.probe = da9055_probe,
.set_bias_level = da9055_set_bias_level,
- .controls = da9055_snd_controls,
- .num_controls = ARRAY_SIZE(da9055_snd_controls),
-
- .dapm_widgets = da9055_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(da9055_dapm_widgets),
- .dapm_routes = da9055_audio_map,
- .num_dapm_routes = ARRAY_SIZE(da9055_audio_map),
+ .component_driver = {
+ .controls = da9055_snd_controls,
+ .num_controls = ARRAY_SIZE(da9055_snd_controls),
+ .dapm_widgets = da9055_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(da9055_dapm_widgets),
+ .dapm_routes = da9055_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(da9055_audio_map),
+ },
};
static const struct regmap_config da9055_regmap_config = {
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index fde53251c047..c82b9dc41e9a 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -51,10 +51,12 @@ static const struct snd_soc_dapm_route intercon[] = {
};
static struct snd_soc_codec_driver soc_dmic = {
- .dapm_widgets = dmic_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(dmic_dapm_widgets),
- .dapm_routes = intercon,
- .num_dapm_routes = ARRAY_SIZE(intercon),
+ .component_driver = {
+ .dapm_widgets = dmic_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(dmic_dapm_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
+ },
};
static int dmic_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index 2086d7107622..37722194b107 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -823,12 +823,14 @@ static struct snd_soc_codec_driver es8328_codec_driver = {
.set_bias_level = es8328_set_bias_level,
.suspend_bias_off = true,
- .controls = es8328_snd_controls,
- .num_controls = ARRAY_SIZE(es8328_snd_controls),
- .dapm_widgets = es8328_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(es8328_dapm_widgets),
- .dapm_routes = es8328_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(es8328_dapm_routes),
+ .component_driver = {
+ .controls = es8328_snd_controls,
+ .num_controls = ARRAY_SIZE(es8328_snd_controls),
+ .dapm_widgets = es8328_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(es8328_dapm_widgets),
+ .dapm_routes = es8328_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(es8328_dapm_routes),
+ },
};
int es8328_probe(struct device *dev, struct regmap *regmap)
diff --git a/sound/soc/codecs/gtm601.c b/sound/soc/codecs/gtm601.c
index 0b80052996d3..926b1a4e37d4 100644
--- a/sound/soc/codecs/gtm601.c
+++ b/sound/soc/codecs/gtm601.c
@@ -52,10 +52,12 @@ static struct snd_soc_dai_driver gtm601_dai = {
};
static const struct snd_soc_codec_driver soc_codec_dev_gtm601 = {
- .dapm_widgets = gtm601_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(gtm601_dapm_widgets),
- .dapm_routes = gtm601_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(gtm601_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = gtm601_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(gtm601_dapm_widgets),
+ .dapm_routes = gtm601_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(gtm601_dapm_routes),
+ },
};
static int gtm601_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 4e181b270d95..c602c4960924 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -614,7 +614,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
(!pin->eld.eld_valid)) {
dev_warn(&hdac->hdac.dev,
- "Failed: montior present? %d ELD valid?: %d for pin: %d\n",
+ "Failed: monitor present? %d ELD valid?: %d for pin: %d\n",
pin->eld.monitor_present, pin->eld.eld_valid, pin->nid);
return 0;
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index f27d115626db..b904492d7744 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -24,6 +24,15 @@
#include <drm/drm_crtc.h> /* This is only to get MAX_ELD_BYTES */
+struct hdmi_device {
+ struct device *dev;
+ struct list_head list;
+ int cnt;
+};
+#define pos_to_hdmi_device(pos) container_of((pos), struct hdmi_device, list)
+LIST_HEAD(hdmi_device_list);
+
+#define DAI_NAME_SIZE 16
struct hdmi_codec_priv {
struct hdmi_codec_pdata hcd;
struct snd_soc_dai_driver *daidrv;
@@ -320,7 +329,6 @@ static const struct snd_soc_dai_ops hdmi_dai_ops = {
SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE)
static struct snd_soc_dai_driver hdmi_i2s_dai = {
- .name = "i2s-hifi",
.id = DAI_ID_I2S,
.playback = {
.stream_name = "Playback",
@@ -334,7 +342,6 @@ static struct snd_soc_dai_driver hdmi_i2s_dai = {
};
static const struct snd_soc_dai_driver hdmi_spdif_dai = {
- .name = "spdif-hifi",
.id = DAI_ID_SPDIF,
.playback = {
.stream_name = "Playback",
@@ -346,13 +353,37 @@ static const struct snd_soc_dai_driver hdmi_spdif_dai = {
.ops = &hdmi_dai_ops,
};
+static char hdmi_dai_name[][DAI_NAME_SIZE] = {
+ "hdmi-hifi.0",
+ "hdmi-hifi.1",
+ "hdmi-hifi.2",
+ "hdmi-hifi.3",
+};
+
+static int hdmi_of_xlate_dai_name(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name)
+{
+ int id = args->args[0];
+
+ if (id < ARRAY_SIZE(hdmi_dai_name)) {
+ *dai_name = hdmi_dai_name[id];
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+
static struct snd_soc_codec_driver hdmi_codec = {
- .controls = hdmi_controls,
- .num_controls = ARRAY_SIZE(hdmi_controls),
- .dapm_widgets = hdmi_widgets,
- .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
- .dapm_routes = hdmi_routes,
- .num_dapm_routes = ARRAY_SIZE(hdmi_routes),
+ .component_driver = {
+ .controls = hdmi_controls,
+ .num_controls = ARRAY_SIZE(hdmi_controls),
+ .dapm_widgets = hdmi_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
+ .dapm_routes = hdmi_routes,
+ .num_dapm_routes = ARRAY_SIZE(hdmi_routes),
+ .of_xlate_dai_name = hdmi_of_xlate_dai_name,
+ },
};
static int hdmi_codec_probe(struct platform_device *pdev)
@@ -360,6 +391,8 @@ static int hdmi_codec_probe(struct platform_device *pdev)
struct hdmi_codec_pdata *hcd = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct hdmi_codec_priv *hcp;
+ struct hdmi_device *hd;
+ struct list_head *pos;
int dai_count, i = 0;
int ret;
@@ -381,6 +414,31 @@ static int hdmi_codec_probe(struct platform_device *pdev)
if (!hcp)
return -ENOMEM;
+ hd = NULL;
+ list_for_each(pos, &hdmi_device_list) {
+ struct hdmi_device *tmp = pos_to_hdmi_device(pos);
+
+ if (tmp->dev == dev->parent) {
+ hd = tmp;
+ break;
+ }
+ }
+
+ if (!hd) {
+ hd = devm_kzalloc(dev, sizeof(*hd), GFP_KERNEL);
+ if (!hd)
+ return -ENOMEM;
+
+ hd->dev = dev->parent;
+
+ list_add_tail(&hd->list, &hdmi_device_list);
+ }
+
+ if (hd->cnt >= ARRAY_SIZE(hdmi_dai_name)) {
+ dev_err(dev, "too many hdmi codec are deteced\n");
+ return -EINVAL;
+ }
+
hcp->hcd = *hcd;
mutex_init(&hcp->current_stream_lock);
@@ -393,11 +451,14 @@ static int hdmi_codec_probe(struct platform_device *pdev)
hcp->daidrv[i] = hdmi_i2s_dai;
hcp->daidrv[i].playback.channels_max =
hcd->max_i2s_channels;
+ hcp->daidrv[i].name = hdmi_dai_name[hd->cnt++];
i++;
}
- if (hcd->spdif)
+ if (hcd->spdif) {
hcp->daidrv[i] = hdmi_spdif_dai;
+ hcp->daidrv[i].name = hdmi_dai_name[hd->cnt++];
+ }
ret = snd_soc_register_codec(dev, &hdmi_codec, hcp->daidrv,
dai_count);
diff --git a/sound/soc/codecs/inno_rk3036.c b/sound/soc/codecs/inno_rk3036.c
index 9b6e8840a1b5..b918ba5c8ce5 100644
--- a/sound/soc/codecs/inno_rk3036.c
+++ b/sound/soc/codecs/inno_rk3036.c
@@ -380,12 +380,14 @@ static struct snd_soc_codec_driver rk3036_codec_driver = {
.probe = rk3036_codec_probe,
.remove = rk3036_codec_remove,
.set_bias_level = rk3036_codec_set_bias_level,
- .controls = rk3036_codec_dapm_controls,
- .num_controls = ARRAY_SIZE(rk3036_codec_dapm_controls),
- .dapm_routes = rk3036_codec_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rk3036_codec_dapm_routes),
- .dapm_widgets = rk3036_codec_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rk3036_codec_dapm_widgets),
+ .component_driver = {
+ .controls = rk3036_codec_dapm_controls,
+ .num_controls = ARRAY_SIZE(rk3036_codec_dapm_controls),
+ .dapm_routes = rk3036_codec_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rk3036_codec_dapm_routes),
+ .dapm_widgets = rk3036_codec_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rk3036_codec_dapm_widgets),
+ },
};
static const struct regmap_config rk3036_codec_regmap_config = {
diff --git a/sound/soc/codecs/isabelle.c b/sound/soc/codecs/isabelle.c
index be448373d39a..a4b0eded984a 100644
--- a/sound/soc/codecs/isabelle.c
+++ b/sound/soc/codecs/isabelle.c
@@ -1089,12 +1089,14 @@ static struct snd_soc_dai_driver isabelle_dai[] = {
static struct snd_soc_codec_driver soc_codec_dev_isabelle = {
.set_bias_level = isabelle_set_bias_level,
- .controls = isabelle_snd_controls,
- .num_controls = ARRAY_SIZE(isabelle_snd_controls),
- .dapm_widgets = isabelle_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(isabelle_dapm_widgets),
- .dapm_routes = isabelle_intercon,
- .num_dapm_routes = ARRAY_SIZE(isabelle_intercon),
+ .component_driver = {
+ .controls = isabelle_snd_controls,
+ .num_controls = ARRAY_SIZE(isabelle_snd_controls),
+ .dapm_widgets = isabelle_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(isabelle_dapm_widgets),
+ .dapm_routes = isabelle_intercon,
+ .num_dapm_routes = ARRAY_SIZE(isabelle_intercon),
+ },
.idle_bias_off = true,
};
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index 1f5ab99956ed..0290fab383da 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -298,12 +298,14 @@ static struct snd_soc_codec_driver soc_codec_dev_jz4740_codec = {
.set_bias_level = jz4740_codec_set_bias_level,
.suspend_bias_off = true,
- .controls = jz4740_codec_controls,
- .num_controls = ARRAY_SIZE(jz4740_codec_controls),
- .dapm_widgets = jz4740_codec_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(jz4740_codec_dapm_widgets),
- .dapm_routes = jz4740_codec_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(jz4740_codec_dapm_routes),
+ .component_driver = {
+ .controls = jz4740_codec_controls,
+ .num_controls = ARRAY_SIZE(jz4740_codec_controls),
+ .dapm_widgets = jz4740_codec_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(jz4740_codec_dapm_widgets),
+ .dapm_routes = jz4740_codec_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(jz4740_codec_dapm_routes),
+ },
};
static const struct regmap_config jz4740_codec_regmap_config = {
diff --git a/sound/soc/codecs/l3.c b/sound/soc/codecs/l3.c
index 5353af58862c..a10ea3c716c6 100644
--- a/sound/soc/codecs/l3.c
+++ b/sound/soc/codecs/l3.c
@@ -20,6 +20,8 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
#include <sound/l3.h>
@@ -32,11 +34,11 @@ static void sendbyte(struct l3_pins *adap, unsigned int byte)
int i;
for (i = 0; i < 8; i++) {
- adap->setclk(0);
+ adap->setclk(adap, 0);
udelay(adap->data_hold);
- adap->setdat(byte & 1);
+ adap->setdat(adap, byte & 1);
udelay(adap->data_setup);
- adap->setclk(1);
+ adap->setclk(adap, 1);
udelay(adap->clock_high);
byte >>= 1;
}
@@ -55,10 +57,10 @@ static void sendbytes(struct l3_pins *adap, const u8 *buf,
for (i = 0; i < len; i++) {
if (i) {
udelay(adap->mode_hold);
- adap->setmode(0);
+ adap->setmode(adap, 0);
udelay(adap->mode);
}
- adap->setmode(1);
+ adap->setmode(adap, 1);
udelay(adap->mode_setup);
sendbyte(adap, buf[i]);
}
@@ -66,26 +68,71 @@ static void sendbytes(struct l3_pins *adap, const u8 *buf,
int l3_write(struct l3_pins *adap, u8 addr, u8 *data, int len)
{
- adap->setclk(1);
- adap->setdat(1);
- adap->setmode(1);
+ adap->setclk(adap, 1);
+ adap->setdat(adap, 1);
+ adap->setmode(adap, 1);
udelay(adap->mode);
- adap->setmode(0);
+ adap->setmode(adap, 0);
udelay(adap->mode_setup);
sendbyte(adap, addr);
udelay(adap->mode_hold);
sendbytes(adap, data, len);
- adap->setclk(1);
- adap->setdat(1);
- adap->setmode(0);
+ adap->setclk(adap, 1);
+ adap->setdat(adap, 1);
+ adap->setmode(adap, 0);
return len;
}
EXPORT_SYMBOL_GPL(l3_write);
+
+static void l3_set_clk(struct l3_pins *adap, int val)
+{
+ gpio_set_value(adap->gpio_clk, val);
+}
+
+static void l3_set_data(struct l3_pins *adap, int val)
+{
+ gpio_set_value(adap->gpio_data, val);
+}
+
+static void l3_set_mode(struct l3_pins *adap, int val)
+{
+ gpio_set_value(adap->gpio_mode, val);
+}
+
+int l3_set_gpio_ops(struct device *dev, struct l3_pins *adap)
+{
+ int ret;
+
+ if (!adap->use_gpios)
+ return -EINVAL;
+
+ ret = devm_gpio_request_one(dev, adap->gpio_data,
+ GPIOF_OUT_INIT_LOW, "l3_data");
+ if (ret < 0)
+ return ret;
+ adap->setdat = l3_set_data;
+
+ ret = devm_gpio_request_one(dev, adap->gpio_clk,
+ GPIOF_OUT_INIT_LOW, "l3_clk");
+ if (ret < 0)
+ return ret;
+ adap->setclk = l3_set_clk;
+
+ ret = devm_gpio_request_one(dev, adap->gpio_mode,
+ GPIOF_OUT_INIT_LOW, "l3_mode");
+ if (ret < 0)
+ return ret;
+ adap->setmode = l3_set_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(l3_set_gpio_ops);
+
MODULE_DESCRIPTION("L3 bit-banging driver");
MODULE_AUTHOR("Christian Pellegrin <chripell@evolware.org>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/lm49453.c b/sound/soc/codecs/lm49453.c
index 9af5640e3446..8d413c2677cc 100644
--- a/sound/soc/codecs/lm49453.c
+++ b/sound/soc/codecs/lm49453.c
@@ -1391,12 +1391,14 @@ static struct snd_soc_dai_driver lm49453_dai[] = {
static struct snd_soc_codec_driver soc_codec_dev_lm49453 = {
.set_bias_level = lm49453_set_bias_level,
- .controls = lm49453_snd_controls,
- .num_controls = ARRAY_SIZE(lm49453_snd_controls),
- .dapm_widgets = lm49453_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(lm49453_dapm_widgets),
- .dapm_routes = lm49453_audio_map,
- .num_dapm_routes = ARRAY_SIZE(lm49453_audio_map),
+ .component_driver = {
+ .controls = lm49453_snd_controls,
+ .num_controls = ARRAY_SIZE(lm49453_snd_controls),
+ .dapm_widgets = lm49453_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(lm49453_dapm_widgets),
+ .dapm_routes = lm49453_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(lm49453_audio_map),
+ },
.idle_bias_off = true,
};
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index fc22804cabc5..72f77455582e 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -1704,12 +1704,14 @@ static struct snd_soc_codec_driver soc_codec_dev_max98088 = {
.set_bias_level = max98088_set_bias_level,
.suspend_bias_off = true,
- .controls = max98088_snd_controls,
- .num_controls = ARRAY_SIZE(max98088_snd_controls),
- .dapm_widgets = max98088_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(max98088_dapm_widgets),
- .dapm_routes = max98088_audio_map,
- .num_dapm_routes = ARRAY_SIZE(max98088_audio_map),
+ .component_driver = {
+ .controls = max98088_snd_controls,
+ .num_controls = ARRAY_SIZE(max98088_snd_controls),
+ .dapm_widgets = max98088_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98088_dapm_widgets),
+ .dapm_routes = max98088_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max98088_audio_map),
+ },
};
static int max98088_i2c_probe(struct i2c_client *i2c,
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 3577003f39cf..6f8a757876ed 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -2108,12 +2108,14 @@ static struct snd_soc_codec_driver soc_codec_dev_max98095 = {
.suspend = max98095_suspend,
.resume = max98095_resume,
.set_bias_level = max98095_set_bias_level,
- .controls = max98095_snd_controls,
- .num_controls = ARRAY_SIZE(max98095_snd_controls),
- .dapm_widgets = max98095_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(max98095_dapm_widgets),
- .dapm_routes = max98095_audio_map,
- .num_dapm_routes = ARRAY_SIZE(max98095_audio_map),
+ .component_driver = {
+ .controls = max98095_snd_controls,
+ .num_controls = ARRAY_SIZE(max98095_snd_controls),
+ .dapm_widgets = max98095_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98095_dapm_widgets),
+ .dapm_routes = max98095_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max98095_audio_map),
+ },
};
static int max98095_i2c_probe(struct i2c_client *i2c,
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
index 5b1dfb1518fb..6a6b68a4cb52 100644
--- a/sound/soc/codecs/max98357a.c
+++ b/sound/soc/codecs/max98357a.c
@@ -74,10 +74,12 @@ static int max98357a_codec_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver max98357a_codec_driver = {
.probe = max98357a_codec_probe,
- .dapm_widgets = max98357a_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(max98357a_dapm_widgets),
- .dapm_routes = max98357a_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(max98357a_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = max98357a_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98357a_dapm_widgets),
+ .dapm_routes = max98357a_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(max98357a_dapm_routes),
+ },
};
static const struct snd_soc_dai_ops max98357a_dai_ops = {
diff --git a/sound/soc/codecs/max98371.c b/sound/soc/codecs/max98371.c
index 02352ed8961c..781be9ba8dba 100644
--- a/sound/soc/codecs/max98371.c
+++ b/sound/soc/codecs/max98371.c
@@ -426,7 +426,6 @@ MODULE_DEVICE_TABLE(of, max98371_of_match);
static struct i2c_driver max98371_i2c_driver = {
.driver = {
.name = "max98371",
- .owner = THIS_MODULE,
.pm = NULL,
.of_match_table = of_match_ptr(max98371_of_match),
},
diff --git a/sound/soc/codecs/max9850.c b/sound/soc/codecs/max9850.c
index c14a79d026a1..0610840733d1 100644
--- a/sound/soc/codecs/max9850.c
+++ b/sound/soc/codecs/max9850.c
@@ -306,12 +306,14 @@ static struct snd_soc_codec_driver soc_codec_dev_max9850 = {
.set_bias_level = max9850_set_bias_level,
.suspend_bias_off = true,
- .controls = max9850_controls,
- .num_controls = ARRAY_SIZE(max9850_controls),
- .dapm_widgets = max9850_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(max9850_dapm_widgets),
- .dapm_routes = max9850_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(max9850_dapm_routes),
+ .component_driver = {
+ .controls = max9850_controls,
+ .num_controls = ARRAY_SIZE(max9850_controls),
+ .dapm_widgets = max9850_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max9850_dapm_widgets),
+ .dapm_routes = max9850_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(max9850_dapm_routes),
+ },
};
static int max9850_i2c_probe(struct i2c_client *i2c,
diff --git a/sound/soc/codecs/max9860.c b/sound/soc/codecs/max9860.c
index 68074c92a7c0..499bdbfd0a2d 100644
--- a/sound/soc/codecs/max9860.c
+++ b/sound/soc/codecs/max9860.c
@@ -538,12 +538,14 @@ static struct snd_soc_codec_driver max9860_codec_driver = {
.set_bias_level = max9860_set_bias_level,
.idle_bias_off = true,
- .controls = max9860_controls,
- .num_controls = ARRAY_SIZE(max9860_controls),
- .dapm_widgets = max9860_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(max9860_dapm_widgets),
- .dapm_routes = max9860_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(max9860_dapm_routes),
+ .component_driver = {
+ .controls = max9860_controls,
+ .num_controls = ARRAY_SIZE(max9860_controls),
+ .dapm_widgets = max9860_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max9860_dapm_widgets),
+ .dapm_routes = max9860_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(max9860_dapm_routes),
+ },
};
#ifdef CONFIG_PM
diff --git a/sound/soc/codecs/max9867.c b/sound/soc/codecs/max9867.c
index 2a22fddeb6af..42e2e407e287 100644
--- a/sound/soc/codecs/max9867.c
+++ b/sound/soc/codecs/max9867.c
@@ -38,11 +38,10 @@ static DECLARE_TLV_DB_SCALE(max9860_capture_tlv, -600, 200, 0);
static DECLARE_TLV_DB_SCALE(max9860_mic_tlv, 2000, 100, 1);
static DECLARE_TLV_DB_SCALE(max9860_adc_left_tlv, -1200, 100, 1);
static DECLARE_TLV_DB_SCALE(max9860_adc_right_tlv, -1200, 100, 1);
-static const unsigned int max98088_micboost_tlv[] = {
- TLV_DB_RANGE_HEAD(2),
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(max98088_micboost_tlv,
0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+);
static const struct snd_kcontrol_new max9867_snd_controls[] = {
SOC_DOUBLE_R("Master Playback Volume", MAX9867_LEFTVOL,
@@ -417,12 +416,14 @@ static int max9867_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver max9867_codec = {
.probe = max9867_probe,
- .controls = max9867_snd_controls,
- .num_controls = ARRAY_SIZE(max9867_snd_controls),
- .dapm_routes = max9867_audio_map,
- .num_dapm_routes = ARRAY_SIZE(max9867_audio_map),
- .dapm_widgets = max9867_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(max9867_dapm_widgets),
+ .component_driver = {
+ .controls = max9867_snd_controls,
+ .num_controls = ARRAY_SIZE(max9867_snd_controls),
+ .dapm_routes = max9867_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max9867_audio_map),
+ .dapm_widgets = max9867_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max9867_dapm_widgets),
+ },
};
static bool max9867_volatile_register(struct device *dev, unsigned int reg)
diff --git a/sound/soc/codecs/max98925.c b/sound/soc/codecs/max98925.c
index 5990de317999..327eaa25c9bd 100644
--- a/sound/soc/codecs/max98925.c
+++ b/sound/soc/codecs/max98925.c
@@ -540,12 +540,14 @@ static int max98925_probe(struct snd_soc_codec *codec)
static const struct snd_soc_codec_driver soc_codec_dev_max98925 = {
.probe = max98925_probe,
- .controls = max98925_snd_controls,
- .num_controls = ARRAY_SIZE(max98925_snd_controls),
- .dapm_routes = max98925_audio_map,
- .num_dapm_routes = ARRAY_SIZE(max98925_audio_map),
- .dapm_widgets = max98925_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(max98925_dapm_widgets),
+ .component_driver = {
+ .controls = max98925_snd_controls,
+ .num_controls = ARRAY_SIZE(max98925_snd_controls),
+ .dapm_routes = max98925_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max98925_audio_map),
+ .dapm_widgets = max98925_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98925_dapm_widgets),
+ },
};
static const struct regmap_config max98925_regmap = {
diff --git a/sound/soc/codecs/max98926.c b/sound/soc/codecs/max98926.c
index 8d14adae5cc5..1eff7e0b092e 100644
--- a/sound/soc/codecs/max98926.c
+++ b/sound/soc/codecs/max98926.c
@@ -347,7 +347,7 @@ static int max98926_dai_set_fmt(struct snd_soc_dai *codec_dai,
max98926_set_sense_data(max98926);
break;
default:
- dev_err(codec->dev, "DAI clock mode unsupported");
+ dev_err(codec->dev, "DAI clock mode unsupported\n");
return -EINVAL;
}
@@ -364,7 +364,7 @@ static int max98926_dai_set_fmt(struct snd_soc_dai *codec_dai,
invert = MAX98926_DAI_BCI_MASK | MAX98926_DAI_WCI_MASK;
break;
default:
- dev_err(codec->dev, "DAI invert mode unsupported");
+ dev_err(codec->dev, "DAI invert mode unsupported\n");
return -EINVAL;
}
@@ -408,7 +408,7 @@ static int max98926_dai_hw_params(struct snd_pcm_substream *substream,
max98926->ch_size = 32;
break;
default:
- dev_dbg(codec->dev, "format unsupported %d",
+ dev_dbg(codec->dev, "format unsupported %d\n",
params_format(params));
return -EINVAL;
}
@@ -498,12 +498,14 @@ static int max98926_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_max98926 = {
.probe = max98926_probe,
- .controls = max98926_snd_controls,
- .num_controls = ARRAY_SIZE(max98926_snd_controls),
- .dapm_routes = max98926_audio_map,
- .num_dapm_routes = ARRAY_SIZE(max98926_audio_map),
- .dapm_widgets = max98926_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(max98926_dapm_widgets),
+ .component_driver = {
+ .controls = max98926_snd_controls,
+ .num_controls = ARRAY_SIZE(max98926_snd_controls),
+ .dapm_routes = max98926_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max98926_audio_map),
+ .dapm_widgets = max98926_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98926_dapm_widgets),
+ },
};
static const struct regmap_config max98926_regmap = {
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 3e770cbe7f0f..90562703dcfd 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -737,12 +737,14 @@ static struct snd_soc_codec_driver soc_codec_dev_mc13783 = {
.probe = mc13783_probe,
.remove = mc13783_remove,
.get_regmap = mc13783_get_regmap,
- .controls = mc13783_control_list,
- .num_controls = ARRAY_SIZE(mc13783_control_list),
- .dapm_widgets = mc13783_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(mc13783_dapm_widgets),
- .dapm_routes = mc13783_routes,
- .num_dapm_routes = ARRAY_SIZE(mc13783_routes),
+ .component_driver = {
+ .controls = mc13783_control_list,
+ .num_controls = ARRAY_SIZE(mc13783_control_list),
+ .dapm_widgets = mc13783_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mc13783_dapm_widgets),
+ .dapm_routes = mc13783_routes,
+ .num_dapm_routes = ARRAY_SIZE(mc13783_routes),
+ },
};
static int __init mc13783_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ml26124.c b/sound/soc/codecs/ml26124.c
index f561c78b9e0e..69e5e18880c5 100644
--- a/sound/soc/codecs/ml26124.c
+++ b/sound/soc/codecs/ml26124.c
@@ -541,12 +541,14 @@ static struct snd_soc_codec_driver soc_codec_dev_ml26124 = {
.probe = ml26124_probe,
.set_bias_level = ml26124_set_bias_level,
.suspend_bias_off = true,
- .dapm_widgets = ml26124_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ml26124_dapm_widgets),
- .dapm_routes = ml26124_intercon,
- .num_dapm_routes = ARRAY_SIZE(ml26124_intercon),
- .controls = ml26124_snd_controls,
- .num_controls = ARRAY_SIZE(ml26124_snd_controls),
+ .component_driver = {
+ .controls = ml26124_snd_controls,
+ .num_controls = ARRAY_SIZE(ml26124_snd_controls),
+ .dapm_widgets = ml26124_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ml26124_dapm_widgets),
+ .dapm_routes = ml26124_intercon,
+ .num_dapm_routes = ARRAY_SIZE(ml26124_intercon),
+ },
};
static const struct regmap_config ml26124_i2c_regmap = {
diff --git a/sound/soc/codecs/nau8810.c b/sound/soc/codecs/nau8810.c
new file mode 100644
index 000000000000..e45518629968
--- /dev/null
+++ b/sound/soc/codecs/nau8810.c
@@ -0,0 +1,884 @@
+/*
+ * nau8810.c -- NAU8810 ALSA Soc Audio driver
+ *
+ * Copyright 2016 Nuvoton Technology Corp.
+ *
+ * Author: David Lin <ctlin0@nuvoton.com>
+ *
+ * Based on WM8974.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "nau8810.h"
+
+#define NAU_PLL_FREQ_MAX 100000000
+#define NAU_PLL_FREQ_MIN 90000000
+#define NAU_PLL_REF_MAX 33000000
+#define NAU_PLL_REF_MIN 8000000
+#define NAU_PLL_OPTOP_MIN 6
+
+
+static const int nau8810_mclk_scaler[] = { 10, 15, 20, 30, 40, 60, 80, 120 };
+
+static const struct reg_default nau8810_reg_defaults[] = {
+ { NAU8810_REG_POWER1, 0x0000 },
+ { NAU8810_REG_POWER2, 0x0000 },
+ { NAU8810_REG_POWER3, 0x0000 },
+ { NAU8810_REG_IFACE, 0x0050 },
+ { NAU8810_REG_COMP, 0x0000 },
+ { NAU8810_REG_CLOCK, 0x0140 },
+ { NAU8810_REG_SMPLR, 0x0000 },
+ { NAU8810_REG_DAC, 0x0000 },
+ { NAU8810_REG_DACGAIN, 0x00FF },
+ { NAU8810_REG_ADC, 0x0100 },
+ { NAU8810_REG_ADCGAIN, 0x00FF },
+ { NAU8810_REG_EQ1, 0x012C },
+ { NAU8810_REG_EQ2, 0x002C },
+ { NAU8810_REG_EQ3, 0x002C },
+ { NAU8810_REG_EQ4, 0x002C },
+ { NAU8810_REG_EQ5, 0x002C },
+ { NAU8810_REG_DACLIM1, 0x0032 },
+ { NAU8810_REG_DACLIM2, 0x0000 },
+ { NAU8810_REG_NOTCH1, 0x0000 },
+ { NAU8810_REG_NOTCH2, 0x0000 },
+ { NAU8810_REG_NOTCH3, 0x0000 },
+ { NAU8810_REG_NOTCH4, 0x0000 },
+ { NAU8810_REG_ALC1, 0x0038 },
+ { NAU8810_REG_ALC2, 0x000B },
+ { NAU8810_REG_ALC3, 0x0032 },
+ { NAU8810_REG_NOISEGATE, 0x0000 },
+ { NAU8810_REG_PLLN, 0x0008 },
+ { NAU8810_REG_PLLK1, 0x000C },
+ { NAU8810_REG_PLLK2, 0x0093 },
+ { NAU8810_REG_PLLK3, 0x00E9 },
+ { NAU8810_REG_ATTEN, 0x0000 },
+ { NAU8810_REG_INPUT_SIGNAL, 0x0003 },
+ { NAU8810_REG_PGAGAIN, 0x0010 },
+ { NAU8810_REG_ADCBOOST, 0x0100 },
+ { NAU8810_REG_OUTPUT, 0x0002 },
+ { NAU8810_REG_SPKMIX, 0x0001 },
+ { NAU8810_REG_SPKGAIN, 0x0039 },
+ { NAU8810_REG_MONOMIX, 0x0001 },
+ { NAU8810_REG_POWER4, 0x0000 },
+ { NAU8810_REG_TSLOTCTL1, 0x0000 },
+ { NAU8810_REG_TSLOTCTL2, 0x0020 },
+ { NAU8810_REG_DEVICE_REVID, 0x0000 },
+ { NAU8810_REG_I2C_DEVICEID, 0x001A },
+ { NAU8810_REG_ADDITIONID, 0x00CA },
+ { NAU8810_REG_RESERVE, 0x0124 },
+ { NAU8810_REG_OUTCTL, 0x0001 },
+ { NAU8810_REG_ALC1ENHAN1, 0x0010 },
+ { NAU8810_REG_ALC1ENHAN2, 0x0000 },
+ { NAU8810_REG_MISCCTL, 0x0000 },
+ { NAU8810_REG_OUTTIEOFF, 0x0000 },
+ { NAU8810_REG_AGCP2POUT, 0x0000 },
+ { NAU8810_REG_AGCPOUT, 0x0000 },
+ { NAU8810_REG_AMTCTL, 0x0000 },
+ { NAU8810_REG_OUTTIEOFFMAN, 0x0000 },
+};
+
+static bool nau8810_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NAU8810_REG_RESET ... NAU8810_REG_SMPLR:
+ case NAU8810_REG_DAC ... NAU8810_REG_DACGAIN:
+ case NAU8810_REG_ADC ... NAU8810_REG_ADCGAIN:
+ case NAU8810_REG_EQ1 ... NAU8810_REG_EQ5:
+ case NAU8810_REG_DACLIM1 ... NAU8810_REG_DACLIM2:
+ case NAU8810_REG_NOTCH1 ... NAU8810_REG_NOTCH4:
+ case NAU8810_REG_ALC1 ... NAU8810_REG_ATTEN:
+ case NAU8810_REG_INPUT_SIGNAL ... NAU8810_REG_PGAGAIN:
+ case NAU8810_REG_ADCBOOST:
+ case NAU8810_REG_OUTPUT ... NAU8810_REG_SPKMIX:
+ case NAU8810_REG_SPKGAIN:
+ case NAU8810_REG_MONOMIX:
+ case NAU8810_REG_POWER4 ... NAU8810_REG_TSLOTCTL2:
+ case NAU8810_REG_DEVICE_REVID ... NAU8810_REG_RESERVE:
+ case NAU8810_REG_OUTCTL ... NAU8810_REG_ALC1ENHAN2:
+ case NAU8810_REG_MISCCTL:
+ case NAU8810_REG_OUTTIEOFF ... NAU8810_REG_OUTTIEOFFMAN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool nau8810_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NAU8810_REG_RESET ... NAU8810_REG_SMPLR:
+ case NAU8810_REG_DAC ... NAU8810_REG_DACGAIN:
+ case NAU8810_REG_ADC ... NAU8810_REG_ADCGAIN:
+ case NAU8810_REG_EQ1 ... NAU8810_REG_EQ5:
+ case NAU8810_REG_DACLIM1 ... NAU8810_REG_DACLIM2:
+ case NAU8810_REG_NOTCH1 ... NAU8810_REG_NOTCH4:
+ case NAU8810_REG_ALC1 ... NAU8810_REG_ATTEN:
+ case NAU8810_REG_INPUT_SIGNAL ... NAU8810_REG_PGAGAIN:
+ case NAU8810_REG_ADCBOOST:
+ case NAU8810_REG_OUTPUT ... NAU8810_REG_SPKMIX:
+ case NAU8810_REG_SPKGAIN:
+ case NAU8810_REG_MONOMIX:
+ case NAU8810_REG_POWER4 ... NAU8810_REG_TSLOTCTL2:
+ case NAU8810_REG_OUTCTL ... NAU8810_REG_ALC1ENHAN2:
+ case NAU8810_REG_MISCCTL:
+ case NAU8810_REG_OUTTIEOFF ... NAU8810_REG_OUTTIEOFFMAN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool nau8810_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NAU8810_REG_RESET:
+ case NAU8810_REG_DEVICE_REVID ... NAU8810_REG_RESERVE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* The EQ parameters get function is to get the 5 band equalizer control.
+ * The regmap raw read can't work here because regmap doesn't provide
+ * value format for value width of 9 bits. Therefore, the driver reads data
+ * from cache and makes value format according to the endianness of
+ * bytes type control element.
+ */
+static int nau8810_eq_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct nau8810 *nau8810 = snd_soc_codec_get_drvdata(codec);
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ int i, reg, reg_val;
+ u16 *val;
+
+ val = (u16 *)ucontrol->value.bytes.data;
+ reg = NAU8810_REG_EQ1;
+ for (i = 0; i < params->max / sizeof(u16); i++) {
+ regmap_read(nau8810->regmap, reg + i, &reg_val);
+ /* conversion of 16-bit integers between native CPU format
+ * and big endian format
+ */
+ reg_val = cpu_to_be16(reg_val);
+ memcpy(val + i, &reg_val, sizeof(reg_val));
+ }
+
+ return 0;
+}
+
+/* The EQ parameters put function is to make configuration of 5 band equalizer
+ * control. These configuration includes central frequency, equalizer gain,
+ * cut-off frequency, bandwidth control, and equalizer path.
+ * The regmap raw write can't work here because regmap doesn't provide
+ * register and value format for register with address 7 bits and value 9 bits.
+ * Therefore, the driver makes value format according to the endianness of
+ * bytes type control element and writes data to codec.
+ */
+static int nau8810_eq_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct nau8810 *nau8810 = snd_soc_codec_get_drvdata(codec);
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ void *data;
+ u16 *val, value;
+ int i, reg, ret;
+
+ data = kmemdup(ucontrol->value.bytes.data,
+ params->max, GFP_KERNEL | GFP_DMA);
+ if (!data)
+ return -ENOMEM;
+
+ val = (u16 *)data;
+ reg = NAU8810_REG_EQ1;
+ for (i = 0; i < params->max / sizeof(u16); i++) {
+ /* conversion of 16-bit integers between native CPU format
+ * and big endian format
+ */
+ value = be16_to_cpu(*(val + i));
+ ret = regmap_write(nau8810->regmap, reg + i, value);
+ if (ret) {
+ dev_err(codec->dev, "EQ configuration fail, register: %x ret: %d\n",
+ reg + i, ret);
+ kfree(data);
+ return ret;
+ }
+ }
+ kfree(data);
+
+ return 0;
+}
+
+static const char * const nau8810_companding[] = {
+ "Off", "NC", "u-law", "A-law" };
+
+static const struct soc_enum nau8810_companding_adc_enum =
+ SOC_ENUM_SINGLE(NAU8810_REG_COMP, NAU8810_ADCCM_SFT,
+ ARRAY_SIZE(nau8810_companding), nau8810_companding);
+
+static const struct soc_enum nau8810_companding_dac_enum =
+ SOC_ENUM_SINGLE(NAU8810_REG_COMP, NAU8810_DACCM_SFT,
+ ARRAY_SIZE(nau8810_companding), nau8810_companding);
+
+static const char * const nau8810_deemp[] = {
+ "None", "32kHz", "44.1kHz", "48kHz" };
+
+static const struct soc_enum nau8810_deemp_enum =
+ SOC_ENUM_SINGLE(NAU8810_REG_DAC, NAU8810_DEEMP_SFT,
+ ARRAY_SIZE(nau8810_deemp), nau8810_deemp);
+
+static const char * const nau8810_eqmode[] = {"Capture", "Playback" };
+
+static const struct soc_enum nau8810_eqmode_enum =
+ SOC_ENUM_SINGLE(NAU8810_REG_EQ1, NAU8810_EQM_SFT,
+ ARRAY_SIZE(nau8810_eqmode), nau8810_eqmode);
+
+static const char * const nau8810_alc[] = {"Normal", "Limiter" };
+
+static const struct soc_enum nau8810_alc_enum =
+ SOC_ENUM_SINGLE(NAU8810_REG_ALC3, NAU8810_ALCM_SFT,
+ ARRAY_SIZE(nau8810_alc), nau8810_alc);
+
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1200, 75, 0);
+static const DECLARE_TLV_DB_SCALE(spk_tlv, -5700, 100, 0);
+
+static const struct snd_kcontrol_new nau8810_snd_controls[] = {
+ SOC_ENUM("ADC Companding", nau8810_companding_adc_enum),
+ SOC_ENUM("DAC Companding", nau8810_companding_dac_enum),
+ SOC_ENUM("DAC De-emphasis", nau8810_deemp_enum),
+
+ SOC_ENUM("EQ Function", nau8810_eqmode_enum),
+ SND_SOC_BYTES_EXT("EQ Parameters", 10,
+ nau8810_eq_get, nau8810_eq_put),
+
+ SOC_SINGLE("DAC Inversion Switch", NAU8810_REG_DAC,
+ NAU8810_DACPL_SFT, 1, 0),
+ SOC_SINGLE_TLV("Playback Volume", NAU8810_REG_DACGAIN,
+ NAU8810_DACGAIN_SFT, 0xff, 0, digital_tlv),
+
+ SOC_SINGLE("High Pass Filter Switch", NAU8810_REG_ADC,
+ NAU8810_HPFEN_SFT, 1, 0),
+ SOC_SINGLE("High Pass Cut Off", NAU8810_REG_ADC,
+ NAU8810_HPF_SFT, 0x7, 0),
+
+ SOC_SINGLE("ADC Inversion Switch", NAU8810_REG_ADC,
+ NAU8810_ADCPL_SFT, 1, 0),
+ SOC_SINGLE_TLV("Capture Volume", NAU8810_REG_ADCGAIN,
+ NAU8810_ADCGAIN_SFT, 0xff, 0, digital_tlv),
+
+ SOC_SINGLE_TLV("EQ1 Volume", NAU8810_REG_EQ1,
+ NAU8810_EQ1GC_SFT, 0x18, 1, eq_tlv),
+ SOC_SINGLE_TLV("EQ2 Volume", NAU8810_REG_EQ2,
+ NAU8810_EQ2GC_SFT, 0x18, 1, eq_tlv),
+ SOC_SINGLE_TLV("EQ3 Volume", NAU8810_REG_EQ3,
+ NAU8810_EQ3GC_SFT, 0x18, 1, eq_tlv),
+ SOC_SINGLE_TLV("EQ4 Volume", NAU8810_REG_EQ4,
+ NAU8810_EQ4GC_SFT, 0x18, 1, eq_tlv),
+ SOC_SINGLE_TLV("EQ5 Volume", NAU8810_REG_EQ5,
+ NAU8810_EQ5GC_SFT, 0x18, 1, eq_tlv),
+
+ SOC_SINGLE("DAC Limiter Switch", NAU8810_REG_DACLIM1,
+ NAU8810_DACLIMEN_SFT, 1, 0),
+ SOC_SINGLE("DAC Limiter Decay", NAU8810_REG_DACLIM1,
+ NAU8810_DACLIMDCY_SFT, 0xf, 0),
+ SOC_SINGLE("DAC Limiter Attack", NAU8810_REG_DACLIM1,
+ NAU8810_DACLIMATK_SFT, 0xf, 0),
+ SOC_SINGLE("DAC Limiter Threshold", NAU8810_REG_DACLIM2,
+ NAU8810_DACLIMTHL_SFT, 0x7, 0),
+ SOC_SINGLE("DAC Limiter Boost", NAU8810_REG_DACLIM2,
+ NAU8810_DACLIMBST_SFT, 0xf, 0),
+
+ SOC_ENUM("ALC Mode", nau8810_alc_enum),
+ SOC_SINGLE("ALC Enable Switch", NAU8810_REG_ALC1,
+ NAU8810_ALCEN_SFT, 1, 0),
+ SOC_SINGLE("ALC Max Volume", NAU8810_REG_ALC1,
+ NAU8810_ALCMXGAIN_SFT, 0x7, 0),
+ SOC_SINGLE("ALC Min Volume", NAU8810_REG_ALC1,
+ NAU8810_ALCMINGAIN_SFT, 0x7, 0),
+ SOC_SINGLE("ALC ZC Switch", NAU8810_REG_ALC2,
+ NAU8810_ALCZC_SFT, 1, 0),
+ SOC_SINGLE("ALC Hold", NAU8810_REG_ALC2,
+ NAU8810_ALCHT_SFT, 0xf, 0),
+ SOC_SINGLE("ALC Target", NAU8810_REG_ALC2,
+ NAU8810_ALCSL_SFT, 0xf, 0),
+ SOC_SINGLE("ALC Decay", NAU8810_REG_ALC3,
+ NAU8810_ALCDCY_SFT, 0xf, 0),
+ SOC_SINGLE("ALC Attack", NAU8810_REG_ALC3,
+ NAU8810_ALCATK_SFT, 0xf, 0),
+ SOC_SINGLE("ALC Noise Gate Switch", NAU8810_REG_NOISEGATE,
+ NAU8810_ALCNEN_SFT, 1, 0),
+ SOC_SINGLE("ALC Noise Gate Threshold", NAU8810_REG_NOISEGATE,
+ NAU8810_ALCNTH_SFT, 0x7, 0),
+
+ SOC_SINGLE("PGA ZC Switch", NAU8810_REG_PGAGAIN,
+ NAU8810_PGAZC_SFT, 1, 0),
+ SOC_SINGLE_TLV("PGA Volume", NAU8810_REG_PGAGAIN,
+ NAU8810_PGAGAIN_SFT, 0x3f, 0, inpga_tlv),
+
+ SOC_SINGLE("Speaker ZC Switch", NAU8810_REG_SPKGAIN,
+ NAU8810_SPKZC_SFT, 1, 0),
+ SOC_SINGLE("Speaker Mute Switch", NAU8810_REG_SPKGAIN,
+ NAU8810_SPKMT_SFT, 1, 0),
+ SOC_SINGLE_TLV("Speaker Volume", NAU8810_REG_SPKGAIN,
+ NAU8810_SPKGAIN_SFT, 0x3f, 0, spk_tlv),
+
+ SOC_SINGLE("Capture Boost(+20dB)", NAU8810_REG_ADCBOOST,
+ NAU8810_PGABST_SFT, 1, 0),
+ SOC_SINGLE("Mono Mute Switch", NAU8810_REG_MONOMIX,
+ NAU8810_MOUTMXMT_SFT, 1, 0),
+
+ SOC_SINGLE("DAC Oversampling Rate(128x) Switch", NAU8810_REG_DAC,
+ NAU8810_DACOS_SFT, 1, 0),
+ SOC_SINGLE("ADC Oversampling Rate(128x) Switch", NAU8810_REG_ADC,
+ NAU8810_ADCOS_SFT, 1, 0),
+};
+
+/* Speaker Output Mixer */
+static const struct snd_kcontrol_new nau8810_speaker_mixer_controls[] = {
+ SOC_DAPM_SINGLE("Line Bypass Switch", NAU8810_REG_SPKMIX,
+ NAU8810_BYPSPK_SFT, 1, 0),
+ SOC_DAPM_SINGLE("PCM Playback Switch", NAU8810_REG_SPKMIX,
+ NAU8810_DACSPK_SFT, 1, 0),
+};
+
+/* Mono Output Mixer */
+static const struct snd_kcontrol_new nau8810_mono_mixer_controls[] = {
+ SOC_DAPM_SINGLE("Line Bypass Switch", NAU8810_REG_MONOMIX,
+ NAU8810_BYPMOUT_SFT, 1, 0),
+ SOC_DAPM_SINGLE("PCM Playback Switch", NAU8810_REG_MONOMIX,
+ NAU8810_DACMOUT_SFT, 1, 0),
+};
+
+/* PGA Mute */
+static const struct snd_kcontrol_new nau8810_inpga_mute[] = {
+ SOC_DAPM_SINGLE("PGA Mute Switch", NAU8810_REG_PGAGAIN,
+ NAU8810_PGAMT_SFT, 1, 0),
+};
+
+/* Input PGA */
+static const struct snd_kcontrol_new nau8810_inpga[] = {
+ SOC_DAPM_SINGLE("MicN Switch", NAU8810_REG_INPUT_SIGNAL,
+ NAU8810_NMICPGA_SFT, 1, 0),
+ SOC_DAPM_SINGLE("MicP Switch", NAU8810_REG_INPUT_SIGNAL,
+ NAU8810_PMICPGA_SFT, 1, 0),
+};
+
+/* Mic Input boost vol */
+static const struct snd_kcontrol_new nau8810_mic_boost_controls =
+ SOC_DAPM_SINGLE("Mic Volume", NAU8810_REG_ADCBOOST,
+ NAU8810_PMICBSTGAIN_SFT, 0x7, 0);
+
+/* Loopback Switch */
+static const struct snd_kcontrol_new nau8810_loopback =
+ SOC_DAPM_SINGLE("Switch", NAU8810_REG_COMP,
+ NAU8810_ADDAP_SFT, 1, 0);
+
+static int check_mclk_select_pll(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+ struct nau8810 *nau8810 = snd_soc_codec_get_drvdata(codec);
+ unsigned int value;
+
+ regmap_read(nau8810->regmap, NAU8810_REG_CLOCK, &value);
+ return (value & NAU8810_CLKM_MASK);
+}
+
+static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
+ SND_SOC_DAPM_MIXER("Speaker Mixer", NAU8810_REG_POWER3,
+ NAU8810_SPKMX_EN_SFT, 0, &nau8810_speaker_mixer_controls[0],
+ ARRAY_SIZE(nau8810_speaker_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3,
+ NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0],
+ ARRAY_SIZE(nau8810_mono_mixer_controls)),
+ SND_SOC_DAPM_DAC("DAC", "HiFi Playback", NAU8810_REG_POWER3,
+ NAU8810_DAC_EN_SFT, 0),
+ SND_SOC_DAPM_ADC("ADC", "HiFi Capture", NAU8810_REG_POWER2,
+ NAU8810_ADC_EN_SFT, 0),
+ SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3,
+ NAU8810_NSPK_EN_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("SpkP Out", NAU8810_REG_POWER3,
+ NAU8810_PSPK_EN_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Mono Out", NAU8810_REG_POWER3,
+ NAU8810_MOUT_EN_SFT, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("Input PGA", NAU8810_REG_POWER2,
+ NAU8810_PGA_EN_SFT, 0, nau8810_inpga,
+ ARRAY_SIZE(nau8810_inpga)),
+ SND_SOC_DAPM_MIXER("Input Boost Stage", NAU8810_REG_POWER2,
+ NAU8810_BST_EN_SFT, 0, nau8810_inpga_mute,
+ ARRAY_SIZE(nau8810_inpga_mute)),
+
+ SND_SOC_DAPM_SUPPLY("Mic Bias", NAU8810_REG_POWER1,
+ NAU8810_MICBIAS_EN_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL", NAU8810_REG_POWER1,
+ NAU8810_PLL_EN_SFT, 0, NULL, 0),
+
+ SND_SOC_DAPM_SWITCH("Digital Loopback", SND_SOC_NOPM, 0, 0,
+ &nau8810_loopback),
+
+ SND_SOC_DAPM_INPUT("MICN"),
+ SND_SOC_DAPM_INPUT("MICP"),
+ SND_SOC_DAPM_OUTPUT("MONOOUT"),
+ SND_SOC_DAPM_OUTPUT("SPKOUTP"),
+ SND_SOC_DAPM_OUTPUT("SPKOUTN"),
+};
+
+static const struct snd_soc_dapm_route nau8810_dapm_routes[] = {
+ {"DAC", NULL, "PLL", check_mclk_select_pll},
+
+ /* Mono output mixer */
+ {"Mono Mixer", "PCM Playback Switch", "DAC"},
+ {"Mono Mixer", "Line Bypass Switch", "Input Boost Stage"},
+
+ /* Speaker output mixer */
+ {"Speaker Mixer", "PCM Playback Switch", "DAC"},
+ {"Speaker Mixer", "Line Bypass Switch", "Input Boost Stage"},
+
+ /* Outputs */
+ {"Mono Out", NULL, "Mono Mixer"},
+ {"MONOOUT", NULL, "Mono Out"},
+ {"SpkN Out", NULL, "Speaker Mixer"},
+ {"SpkP Out", NULL, "Speaker Mixer"},
+ {"SPKOUTN", NULL, "SpkN Out"},
+ {"SPKOUTP", NULL, "SpkP Out"},
+
+ /* Input Boost Stage */
+ {"ADC", NULL, "Input Boost Stage"},
+ {"ADC", NULL, "PLL", check_mclk_select_pll},
+ {"Input Boost Stage", NULL, "Input PGA"},
+ {"Input Boost Stage", NULL, "MICP"},
+
+ /* Input PGA */
+ {"Input PGA", NULL, "Mic Bias"},
+ {"Input PGA", "MicN Switch", "MICN"},
+ {"Input PGA", "MicP Switch", "MICP"},
+
+ /* Digital Looptack */
+ {"Digital Loopback", "Switch", "ADC"},
+ {"DAC", NULL, "Digital Loopback"},
+};
+
+static int nau8810_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct nau8810 *nau8810 = snd_soc_codec_get_drvdata(codec);
+
+ nau8810->clk_id = clk_id;
+ nau8810->sysclk = freq;
+ dev_dbg(nau8810->dev, "master sysclk %dHz, source %s\n",
+ freq, clk_id == NAU8810_SCLK_PLL ? "PLL" : "MCLK");
+
+ return 0;
+}
+
+static int nau88l0_calc_pll(unsigned int pll_in,
+ unsigned int fs, struct nau8810_pll *pll_param)
+{
+ u64 f2, f2_max, pll_ratio;
+ int i, scal_sel;
+
+ if (pll_in > NAU_PLL_REF_MAX || pll_in < NAU_PLL_REF_MIN)
+ return -EINVAL;
+
+ f2_max = 0;
+ scal_sel = ARRAY_SIZE(nau8810_mclk_scaler);
+ for (i = 0; i < ARRAY_SIZE(nau8810_mclk_scaler); i++) {
+ f2 = 256 * fs * 4 * nau8810_mclk_scaler[i] / 10;
+ if (f2 > NAU_PLL_FREQ_MIN && f2 < NAU_PLL_FREQ_MAX &&
+ f2_max < f2) {
+ f2_max = f2;
+ scal_sel = i;
+ }
+ }
+ if (ARRAY_SIZE(nau8810_mclk_scaler) == scal_sel)
+ return -EINVAL;
+ pll_param->mclk_scaler = scal_sel;
+ f2 = f2_max;
+
+ /* Calculate the PLL 4-bit integer input and the PLL 24-bit fractional
+ * input; round up the 24+4bit.
+ */
+ pll_ratio = div_u64(f2 << 28, pll_in);
+ pll_param->pre_factor = 0;
+ if (((pll_ratio >> 28) & 0xF) < NAU_PLL_OPTOP_MIN) {
+ pll_ratio <<= 1;
+ pll_param->pre_factor = 1;
+ }
+ pll_param->pll_int = (pll_ratio >> 28) & 0xF;
+ pll_param->pll_frac = ((pll_ratio & 0xFFFFFFF) >> 4);
+
+ return 0;
+}
+
+static int nau8810_set_pll(struct snd_soc_dai *codec_dai, int pll_id,
+ int source, unsigned int freq_in, unsigned int freq_out)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct nau8810 *nau8810 = snd_soc_codec_get_drvdata(codec);
+ struct regmap *map = nau8810->regmap;
+ struct nau8810_pll *pll_param = &nau8810->pll;
+ int ret, fs;
+
+ fs = freq_out / 256;
+ ret = nau88l0_calc_pll(freq_in, fs, pll_param);
+ if (ret < 0) {
+ dev_err(nau8810->dev, "Unsupported input clock %d\n", freq_in);
+ return ret;
+ }
+ dev_info(nau8810->dev, "pll_int=%x pll_frac=%x mclk_scaler=%x pre_factor=%x\n",
+ pll_param->pll_int, pll_param->pll_frac, pll_param->mclk_scaler,
+ pll_param->pre_factor);
+
+ regmap_update_bits(map, NAU8810_REG_PLLN,
+ NAU8810_PLLMCLK_DIV2 | NAU8810_PLLN_MASK,
+ (pll_param->pre_factor ? NAU8810_PLLMCLK_DIV2 : 0) |
+ pll_param->pll_int);
+ regmap_write(map, NAU8810_REG_PLLK1,
+ (pll_param->pll_frac >> NAU8810_PLLK1_SFT) &
+ NAU8810_PLLK1_MASK);
+ regmap_write(map, NAU8810_REG_PLLK2,
+ (pll_param->pll_frac >> NAU8810_PLLK2_SFT) &
+ NAU8810_PLLK2_MASK);
+ regmap_write(map, NAU8810_REG_PLLK3,
+ pll_param->pll_frac & NAU8810_PLLK3_MASK);
+ regmap_update_bits(map, NAU8810_REG_CLOCK, NAU8810_MCLKSEL_MASK,
+ pll_param->mclk_scaler << NAU8810_MCLKSEL_SFT);
+ regmap_update_bits(map, NAU8810_REG_CLOCK,
+ NAU8810_CLKM_MASK, NAU8810_CLKM_PLL);
+
+ return 0;
+}
+
+static int nau8810_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct nau8810 *nau8810 = snd_soc_codec_get_drvdata(codec);
+ u16 ctrl1_val = 0, ctrl2_val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ ctrl2_val |= NAU8810_CLKIO_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ ctrl1_val |= NAU8810_AIFMT_I2S;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ ctrl1_val |= NAU8810_AIFMT_LEFT;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ ctrl1_val |= NAU8810_AIFMT_PCM_A;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ ctrl1_val |= NAU8810_BCLKP_IB | NAU8810_FSP_IF;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ ctrl1_val |= NAU8810_BCLKP_IB;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ ctrl1_val |= NAU8810_FSP_IF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(nau8810->regmap, NAU8810_REG_IFACE,
+ NAU8810_AIFMT_MASK | NAU8810_FSP_IF |
+ NAU8810_BCLKP_IB, ctrl1_val);
+ regmap_update_bits(nau8810->regmap, NAU8810_REG_CLOCK,
+ NAU8810_CLKIO_MASK, ctrl2_val);
+
+ return 0;
+}
+
+static int nau8810_mclk_clkdiv(struct nau8810 *nau8810, int rate)
+{
+ int i, sclk, imclk = rate * 256, div = 0;
+
+ if (!nau8810->sysclk) {
+ dev_err(nau8810->dev, "Make mclk div configuration fail because of invalid system clock\n");
+ return -EINVAL;
+ }
+
+ /* Configure the master clock prescaler div to make system
+ * clock to approximate the internal master clock (IMCLK);
+ * and large or equal to IMCLK.
+ */
+ for (i = 1; i < ARRAY_SIZE(nau8810_mclk_scaler); i++) {
+ sclk = (nau8810->sysclk * 10) /
+ nau8810_mclk_scaler[i];
+ if (sclk < imclk)
+ break;
+ div = i;
+ }
+ dev_dbg(nau8810->dev,
+ "master clock prescaler %x for fs %d\n", div, rate);
+
+ /* master clock from MCLK and disable PLL */
+ regmap_update_bits(nau8810->regmap, NAU8810_REG_CLOCK,
+ NAU8810_MCLKSEL_MASK, (div << NAU8810_MCLKSEL_SFT));
+ regmap_update_bits(nau8810->regmap, NAU8810_REG_CLOCK,
+ NAU8810_CLKM_MASK, NAU8810_CLKM_MCLK);
+
+ return 0;
+}
+
+static int nau8810_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct nau8810 *nau8810 = snd_soc_codec_get_drvdata(codec);
+ int val_len = 0, val_rate = 0, ret = 0;
+
+ switch (params_width(params)) {
+ case 16:
+ break;
+ case 20:
+ val_len |= NAU8810_WLEN_20;
+ break;
+ case 24:
+ val_len |= NAU8810_WLEN_24;
+ break;
+ case 32:
+ val_len |= NAU8810_WLEN_32;
+ break;
+ }
+
+ switch (params_rate(params)) {
+ case 8000:
+ val_rate |= NAU8810_SMPLR_8K;
+ break;
+ case 11025:
+ val_rate |= NAU8810_SMPLR_12K;
+ break;
+ case 16000:
+ val_rate |= NAU8810_SMPLR_16K;
+ break;
+ case 22050:
+ val_rate |= NAU8810_SMPLR_24K;
+ break;
+ case 32000:
+ val_rate |= NAU8810_SMPLR_32K;
+ break;
+ case 44100:
+ case 48000:
+ break;
+ }
+
+ regmap_update_bits(nau8810->regmap, NAU8810_REG_IFACE,
+ NAU8810_WLEN_MASK, val_len);
+ regmap_update_bits(nau8810->regmap, NAU8810_REG_SMPLR,
+ NAU8810_SMPLR_MASK, val_rate);
+
+ /* If the master clock is from MCLK, provide the runtime FS for driver
+ * to get the master clock prescaler configuration.
+ */
+ if (nau8810->clk_id == NAU8810_SCLK_MCLK) {
+ ret = nau8810_mclk_clkdiv(nau8810, params_rate(params));
+ if (ret < 0)
+ dev_err(nau8810->dev, "MCLK div configuration fail\n");
+ }
+
+ return ret;
+}
+
+static int nau8810_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct nau8810 *nau8810 = snd_soc_codec_get_drvdata(codec);
+ struct regmap *map = nau8810->regmap;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ regmap_update_bits(map, NAU8810_REG_POWER1,
+ NAU8810_REFIMP_MASK, NAU8810_REFIMP_80K);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ regmap_update_bits(map, NAU8810_REG_POWER1,
+ NAU8810_IOBUF_EN | NAU8810_ABIAS_EN,
+ NAU8810_IOBUF_EN | NAU8810_ABIAS_EN);
+
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
+ regcache_sync(map);
+ regmap_update_bits(map, NAU8810_REG_POWER1,
+ NAU8810_REFIMP_MASK, NAU8810_REFIMP_3K);
+ mdelay(100);
+ }
+ regmap_update_bits(map, NAU8810_REG_POWER1,
+ NAU8810_REFIMP_MASK, NAU8810_REFIMP_300K);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ regmap_write(map, NAU8810_REG_POWER1, 0);
+ regmap_write(map, NAU8810_REG_POWER2, 0);
+ regmap_write(map, NAU8810_REG_POWER3, 0);
+ break;
+ }
+
+ return 0;
+}
+
+
+#define NAU8810_RATES (SNDRV_PCM_RATE_8000_48000)
+
+#define NAU8810_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops nau8810_ops = {
+ .hw_params = nau8810_pcm_hw_params,
+ .set_fmt = nau8810_set_dai_fmt,
+ .set_sysclk = nau8810_set_sysclk,
+ .set_pll = nau8810_set_pll,
+};
+
+static struct snd_soc_dai_driver nau8810_dai = {
+ .name = "nau8810-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2, /* Only 1 channel of data */
+ .rates = NAU8810_RATES,
+ .formats = NAU8810_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2, /* Only 1 channel of data */
+ .rates = NAU8810_RATES,
+ .formats = NAU8810_FORMATS,
+ },
+ .ops = &nau8810_ops,
+ .symmetric_rates = 1,
+};
+
+static const struct regmap_config nau8810_regmap_config = {
+ .reg_bits = 7,
+ .val_bits = 9,
+
+ .max_register = NAU8810_REG_MAX,
+ .readable_reg = nau8810_readable_reg,
+ .writeable_reg = nau8810_writeable_reg,
+ .volatile_reg = nau8810_volatile_reg,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = nau8810_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(nau8810_reg_defaults),
+};
+
+static struct snd_soc_codec_driver nau8810_codec_driver = {
+ .set_bias_level = nau8810_set_bias_level,
+ .suspend_bias_off = true,
+
+ .component_driver = {
+ .controls = nau8810_snd_controls,
+ .num_controls = ARRAY_SIZE(nau8810_snd_controls),
+ .dapm_widgets = nau8810_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(nau8810_dapm_widgets),
+ .dapm_routes = nau8810_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(nau8810_dapm_routes),
+ },
+};
+
+static int nau8810_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct nau8810 *nau8810 = dev_get_platdata(dev);
+
+ if (!nau8810) {
+ nau8810 = devm_kzalloc(dev, sizeof(*nau8810), GFP_KERNEL);
+ if (!nau8810)
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(i2c, nau8810);
+
+ nau8810->regmap = devm_regmap_init_i2c(i2c, &nau8810_regmap_config);
+ if (IS_ERR(nau8810->regmap))
+ return PTR_ERR(nau8810->regmap);
+ nau8810->dev = dev;
+
+ regmap_write(nau8810->regmap, NAU8810_REG_RESET, 0x00);
+
+ return snd_soc_register_codec(dev,
+ &nau8810_codec_driver, &nau8810_dai, 1);
+}
+
+static int nau8810_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id nau8810_i2c_id[] = {
+ { "nau8810", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nau8810_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id nau8810_of_match[] = {
+ { .compatible = "nuvoton,nau8810", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nau8810_of_match);
+#endif
+
+static struct i2c_driver nau8810_i2c_driver = {
+ .driver = {
+ .name = "nau8810",
+ .of_match_table = of_match_ptr(nau8810_of_match),
+ },
+ .probe = nau8810_i2c_probe,
+ .remove = nau8810_i2c_remove,
+ .id_table = nau8810_i2c_id,
+};
+
+module_i2c_driver(nau8810_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC NAU8810 driver");
+MODULE_AUTHOR("David Lin <ctlin0@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/nau8810.h b/sound/soc/codecs/nau8810.h
new file mode 100644
index 000000000000..df882658ca91
--- /dev/null
+++ b/sound/soc/codecs/nau8810.h
@@ -0,0 +1,281 @@
+/*
+ * NAU8810 ALSA SoC audio driver
+ *
+ * Copyright 2016 Nuvoton Technology Corp.
+ * Author: David Lin <ctlin0@nuvoton.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __NAU8810_H__
+#define __NAU8810_H__
+
+#define NAU8810_REG_RESET 0x00
+#define NAU8810_REG_POWER1 0x01
+#define NAU8810_REG_POWER2 0x02
+#define NAU8810_REG_POWER3 0x03
+#define NAU8810_REG_IFACE 0x04
+#define NAU8810_REG_COMP 0x05
+#define NAU8810_REG_CLOCK 0x06
+#define NAU8810_REG_SMPLR 0x07
+#define NAU8810_REG_DAC 0x0A
+#define NAU8810_REG_DACGAIN 0x0B
+#define NAU8810_REG_ADC 0x0E
+#define NAU8810_REG_ADCGAIN 0x0F
+#define NAU8810_REG_EQ1 0x12
+#define NAU8810_REG_EQ2 0x13
+#define NAU8810_REG_EQ3 0x14
+#define NAU8810_REG_EQ4 0x15
+#define NAU8810_REG_EQ5 0x16
+#define NAU8810_REG_DACLIM1 0x18
+#define NAU8810_REG_DACLIM2 0x19
+#define NAU8810_REG_NOTCH1 0x1B
+#define NAU8810_REG_NOTCH2 0x1C
+#define NAU8810_REG_NOTCH3 0x1D
+#define NAU8810_REG_NOTCH4 0x1E
+#define NAU8810_REG_ALC1 0x20
+#define NAU8810_REG_ALC2 0x21
+#define NAU8810_REG_ALC3 0x22
+#define NAU8810_REG_NOISEGATE 0x23
+#define NAU8810_REG_PLLN 0x24
+#define NAU8810_REG_PLLK1 0x25
+#define NAU8810_REG_PLLK2 0x26
+#define NAU8810_REG_PLLK3 0x27
+#define NAU8810_REG_ATTEN 0x28
+#define NAU8810_REG_INPUT_SIGNAL 0x2C
+#define NAU8810_REG_PGAGAIN 0x2D
+#define NAU8810_REG_ADCBOOST 0x2F
+#define NAU8810_REG_OUTPUT 0x31
+#define NAU8810_REG_SPKMIX 0x32
+#define NAU8810_REG_SPKGAIN 0x36
+#define NAU8810_REG_MONOMIX 0x38
+#define NAU8810_REG_POWER4 0x3A
+#define NAU8810_REG_TSLOTCTL1 0x3B
+#define NAU8810_REG_TSLOTCTL2 0x3C
+#define NAU8810_REG_DEVICE_REVID 0x3E
+#define NAU8810_REG_I2C_DEVICEID 0x3F
+#define NAU8810_REG_ADDITIONID 0x40
+#define NAU8810_REG_RESERVE 0x41
+#define NAU8810_REG_OUTCTL 0x45
+#define NAU8810_REG_ALC1ENHAN1 0x46
+#define NAU8810_REG_ALC1ENHAN2 0x47
+#define NAU8810_REG_MISCCTL 0x49
+#define NAU8810_REG_OUTTIEOFF 0x4B
+#define NAU8810_REG_AGCP2POUT 0x4C
+#define NAU8810_REG_AGCPOUT 0x4D
+#define NAU8810_REG_AMTCTL 0x4E
+#define NAU8810_REG_OUTTIEOFFMAN 0x4F
+#define NAU8810_REG_MAX NAU8810_REG_OUTTIEOFFMAN
+
+
+/* NAU8810_REG_POWER1 (0x1) */
+#define NAU8810_DCBUF_EN (0x1 << 8)
+#define NAU8810_PLL_EN_SFT 5
+#define NAU8810_MICBIAS_EN_SFT 4
+#define NAU8810_ABIAS_EN (0x1 << 3)
+#define NAU8810_IOBUF_EN (0x1 << 2)
+#define NAU8810_REFIMP_MASK 0x3
+#define NAU8810_REFIMP_DIS 0x0
+#define NAU8810_REFIMP_80K 0x1
+#define NAU8810_REFIMP_300K 0x2
+#define NAU8810_REFIMP_3K 0x3
+
+/* NAU8810_REG_POWER2 (0x2) */
+#define NAU8810_BST_EN_SFT 4
+#define NAU8810_PGA_EN_SFT 2
+#define NAU8810_ADC_EN_SFT 0
+
+/* NAU8810_REG_POWER3 (0x3) */
+#define NAU8810_DAC_EN_SFT 0
+#define NAU8810_SPKMX_EN_SFT 2
+#define NAU8810_MOUTMX_EN_SFT 3
+#define NAU8810_PSPK_EN_SFT 5
+#define NAU8810_NSPK_EN_SFT 6
+#define NAU8810_MOUT_EN_SFT 7
+
+/* NAU8810_REG_IFACE (0x4) */
+#define NAU8810_AIFMT_SFT 3
+#define NAU8810_AIFMT_MASK (0x3 << NAU8810_AIFMT_SFT)
+#define NAU8810_AIFMT_RIGHT (0x0 << NAU8810_AIFMT_SFT)
+#define NAU8810_AIFMT_LEFT (0x1 << NAU8810_AIFMT_SFT)
+#define NAU8810_AIFMT_I2S (0x2 << NAU8810_AIFMT_SFT)
+#define NAU8810_AIFMT_PCM_A (0x3 << NAU8810_AIFMT_SFT)
+#define NAU8810_WLEN_SFT 5
+#define NAU8810_WLEN_MASK (0x3 << NAU8810_WLEN_SFT)
+#define NAU8810_WLEN_16 (0x0 << NAU8810_WLEN_SFT)
+#define NAU8810_WLEN_20 (0x1 << NAU8810_WLEN_SFT)
+#define NAU8810_WLEN_24 (0x2 << NAU8810_WLEN_SFT)
+#define NAU8810_WLEN_32 (0x3 << NAU8810_WLEN_SFT)
+#define NAU8810_FSP_IF (0x1 << 7)
+#define NAU8810_BCLKP_IB (0x1 << 8)
+
+/* NAU8810_REG_COMP (0x5) */
+#define NAU8810_ADDAP_SFT 0
+#define NAU8810_ADCCM_SFT 1
+#define NAU8810_DACCM_SFT 3
+
+/* NAU8810_REG_CLOCK (0x6) */
+#define NAU8810_CLKIO_MASK 0x1
+#define NAU8810_CLKIO_SLAVE 0x0
+#define NAU8810_CLKIO_MASTER 0x1
+#define NAU8810_BCLKSEL_SFT 2
+#define NAU8810_BCLKSEL_MASK (0x7 << NAU8810_BCLKSEL_SFT)
+#define NAU8810_BCLKDIV_1 (0x0 << NAU8810_BCLKSEL_SFT)
+#define NAU8810_BCLKDIV_2 (0x1 << NAU8810_BCLKSEL_SFT)
+#define NAU8810_BCLKDIV_4 (0x2 << NAU8810_BCLKSEL_SFT)
+#define NAU8810_BCLKDIV_8 (0x3 << NAU8810_BCLKSEL_SFT)
+#define NAU8810_BCLKDIV_16 (0x4 << NAU8810_BCLKSEL_SFT)
+#define NAU8810_BCLKDIV_32 (0x5 << NAU8810_BCLKSEL_SFT)
+#define NAU8810_MCLKSEL_SFT 5
+#define NAU8810_MCLKSEL_MASK (0x7 << NAU8810_MCLKSEL_SFT)
+#define NAU8810_CLKM_SFT 8
+#define NAU8810_CLKM_MASK (0x1 << NAU8810_CLKM_SFT)
+#define NAU8810_CLKM_MCLK (0x0 << NAU8810_CLKM_SFT)
+#define NAU8810_CLKM_PLL (0x1 << NAU8810_CLKM_SFT)
+
+/* NAU8810_REG_SMPLR (0x7) */
+#define NAU8810_SMPLR_SFT 1
+#define NAU8810_SMPLR_MASK (0x7 << NAU8810_SMPLR_SFT)
+#define NAU8810_SMPLR_48K (0x0 << NAU8810_SMPLR_SFT)
+#define NAU8810_SMPLR_32K (0x1 << NAU8810_SMPLR_SFT)
+#define NAU8810_SMPLR_24K (0x2 << NAU8810_SMPLR_SFT)
+#define NAU8810_SMPLR_16K (0x3 << NAU8810_SMPLR_SFT)
+#define NAU8810_SMPLR_12K (0x4 << NAU8810_SMPLR_SFT)
+#define NAU8810_SMPLR_8K (0x5 << NAU8810_SMPLR_SFT)
+
+/* NAU8810_REG_DAC (0xA) */
+#define NAU8810_DACPL_SFT 0
+#define NAU8810_DACOS_SFT 3
+#define NAU8810_DEEMP_SFT 4
+
+/* NAU8810_REG_DACGAIN (0xB) */
+#define NAU8810_DACGAIN_SFT 0
+
+/* NAU8810_REG_ADC (0xE) */
+#define NAU8810_ADCPL_SFT 0
+#define NAU8810_ADCOS_SFT 3
+#define NAU8810_HPF_SFT 4
+#define NAU8810_HPFEN_SFT 8
+
+/* NAU8810_REG_ADCGAIN (0xF) */
+#define NAU8810_ADCGAIN_SFT 0
+
+/* NAU8810_REG_EQ1 (0x12) */
+#define NAU8810_EQ1GC_SFT 0
+#define NAU8810_EQ1CF_SFT 5
+#define NAU8810_EQM_SFT 8
+
+/* NAU8810_REG_EQ2 (0x13) */
+#define NAU8810_EQ2GC_SFT 0
+#define NAU8810_EQ2CF_SFT 5
+#define NAU8810_EQ2BW_SFT 8
+
+/* NAU8810_REG_EQ3 (0x14) */
+#define NAU8810_EQ3GC_SFT 0
+#define NAU8810_EQ3CF_SFT 5
+#define NAU8810_EQ3BW_SFT 8
+
+/* NAU8810_REG_EQ4 (0x15) */
+#define NAU8810_EQ4GC_SFT 0
+#define NAU8810_EQ4CF_SFT 5
+#define NAU8810_EQ4BW_SFT 8
+
+/* NAU8810_REG_EQ5 (0x16) */
+#define NAU8810_EQ5GC_SFT 0
+#define NAU8810_EQ5CF_SFT 5
+
+/* NAU8810_REG_DACLIM1 (0x18) */
+#define NAU8810_DACLIMATK_SFT 0
+#define NAU8810_DACLIMDCY_SFT 4
+#define NAU8810_DACLIMEN_SFT 8
+
+/* NAU8810_REG_DACLIM2 (0x19) */
+#define NAU8810_DACLIMBST_SFT 0
+#define NAU8810_DACLIMTHL_SFT 4
+
+/* NAU8810_REG_ALC1 (0x20) */
+#define NAU8810_ALCMINGAIN_SFT 0
+#define NAU8810_ALCMXGAIN_SFT 3
+#define NAU8810_ALCEN_SFT 8
+
+/* NAU8810_REG_ALC2 (0x21) */
+#define NAU8810_ALCSL_SFT 0
+#define NAU8810_ALCHT_SFT 4
+#define NAU8810_ALCZC_SFT 8
+
+/* NAU8810_REG_ALC3 (0x22) */
+#define NAU8810_ALCATK_SFT 0
+#define NAU8810_ALCDCY_SFT 4
+#define NAU8810_ALCM_SFT 8
+
+/* NAU8810_REG_NOISEGATE (0x23) */
+#define NAU8810_ALCNTH_SFT 0
+#define NAU8810_ALCNEN_SFT 3
+
+/* NAU8810_REG_PLLN (0x24) */
+#define NAU8810_PLLN_MASK 0xF
+#define NAU8810_PLLMCLK_DIV2 (0x1 << 4)
+
+/* NAU8810_REG_PLLK1 (0x25) */
+#define NAU8810_PLLK1_SFT 18
+#define NAU8810_PLLK1_MASK 0x3F
+
+/* NAU8810_REG_PLLK2 (0x26) */
+#define NAU8810_PLLK2_SFT 9
+#define NAU8810_PLLK2_MASK 0x1FF
+
+/* NAU8810_REG_PLLK3 (0x27) */
+#define NAU8810_PLLK3_MASK 0x1FF
+
+/* NAU8810_REG_INPUT_SIGNAL (0x2C) */
+#define NAU8810_PMICPGA_SFT 0
+#define NAU8810_NMICPGA_SFT 1
+
+/* NAU8810_REG_PGAGAIN (0x2D) */
+#define NAU8810_PGAGAIN_SFT 0
+#define NAU8810_PGAMT_SFT 6
+#define NAU8810_PGAZC_SFT 7
+
+/* NAU8810_REG_ADCBOOST (0x2F) */
+#define NAU8810_PMICBSTGAIN_SFT 4
+#define NAU8810_PGABST_SFT 8
+
+/* NAU8810_REG_SPKMIX (0x32) */
+#define NAU8810_DACSPK_SFT 0
+#define NAU8810_BYPSPK_SFT 1
+
+/* NAU8810_REG_SPKGAIN (0x36) */
+#define NAU8810_SPKGAIN_SFT 0
+#define NAU8810_SPKMT_SFT 6
+#define NAU8810_SPKZC_SFT 7
+
+/* NAU8810_REG_MONOMIX (0x38) */
+#define NAU8810_DACMOUT_SFT 0
+#define NAU8810_BYPMOUT_SFT 1
+#define NAU8810_MOUTMXMT_SFT 6
+
+
+/* System Clock Source */
+enum {
+ NAU8810_SCLK_MCLK,
+ NAU8810_SCLK_PLL,
+};
+
+struct nau8810_pll {
+ int pre_factor;
+ int mclk_scaler;
+ int pll_frac;
+ int pll_int;
+};
+
+struct nau8810 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct nau8810_pll pll;
+ int sysclk;
+ int clk_id;
+};
+
+#endif
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 2e59a85e360b..e643be91d762 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -1907,7 +1907,7 @@ static int nau8825_calc_fll_param(unsigned int fll_in, unsigned int fs,
/* Calculate the FLL 10-bit integer input and the FLL 16-bit fractional
* input based on FDCO, FREF and FLL ratio.
*/
- fvco = div_u64(fvco << 16, fref * fll_param->ratio);
+ fvco = div_u64(fvco_max << 16, fref * fll_param->ratio);
fll_param->fll_int = (fvco >> 16) & 0x3FF;
fll_param->fll_frac = fvco & 0xFFFF;
return 0;
@@ -2256,12 +2256,14 @@ static struct snd_soc_codec_driver nau8825_codec_driver = {
.suspend = nau8825_suspend,
.resume = nau8825_resume,
- .controls = nau8825_controls,
- .num_controls = ARRAY_SIZE(nau8825_controls),
- .dapm_widgets = nau8825_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(nau8825_dapm_widgets),
- .dapm_routes = nau8825_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(nau8825_dapm_routes),
+ .component_driver = {
+ .controls = nau8825_controls,
+ .num_controls = ARRAY_SIZE(nau8825_controls),
+ .dapm_widgets = nau8825_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(nau8825_dapm_widgets),
+ .dapm_routes = nau8825_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(nau8825_dapm_routes),
+ },
};
static void nau8825_reset_chip(struct regmap *regmap)
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index 33e1fc2d1598..0b14efab6280 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -289,12 +289,14 @@ static const struct regmap_config pcm1681_regmap = {
};
static struct snd_soc_codec_driver soc_codec_dev_pcm1681 = {
- .controls = pcm1681_controls,
- .num_controls = ARRAY_SIZE(pcm1681_controls),
- .dapm_widgets = pcm1681_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(pcm1681_dapm_widgets),
- .dapm_routes = pcm1681_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(pcm1681_dapm_routes),
+ .component_driver = {
+ .controls = pcm1681_controls,
+ .num_controls = ARRAY_SIZE(pcm1681_controls),
+ .dapm_widgets = pcm1681_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm1681_dapm_widgets),
+ .dapm_routes = pcm1681_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm1681_dapm_routes),
+ },
};
static const struct i2c_device_id pcm1681_i2c_id[] = {
diff --git a/sound/soc/codecs/pcm179x.c b/sound/soc/codecs/pcm179x.c
index 88fbdd184aa0..b813a154ddd9 100644
--- a/sound/soc/codecs/pcm179x.c
+++ b/sound/soc/codecs/pcm179x.c
@@ -206,12 +206,14 @@ const struct regmap_config pcm179x_regmap_config = {
EXPORT_SYMBOL_GPL(pcm179x_regmap_config);
static struct snd_soc_codec_driver soc_codec_dev_pcm179x = {
- .controls = pcm179x_controls,
- .num_controls = ARRAY_SIZE(pcm179x_controls),
- .dapm_widgets = pcm179x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(pcm179x_dapm_widgets),
- .dapm_routes = pcm179x_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(pcm179x_dapm_routes),
+ .component_driver = {
+ .controls = pcm179x_controls,
+ .num_controls = ARRAY_SIZE(pcm179x_controls),
+ .dapm_widgets = pcm179x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm179x_dapm_widgets),
+ .dapm_routes = pcm179x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm179x_dapm_routes),
+ },
};
int pcm179x_common_init(struct device *dev, struct regmap *regmap)
diff --git a/sound/soc/codecs/pcm3008.c b/sound/soc/codecs/pcm3008.c
index 8fb445f33f6f..708af05486f6 100644
--- a/sound/soc/codecs/pcm3008.c
+++ b/sound/soc/codecs/pcm3008.c
@@ -99,10 +99,12 @@ static struct snd_soc_dai_driver pcm3008_dai = {
};
static struct snd_soc_codec_driver soc_codec_dev_pcm3008 = {
- .dapm_widgets = pcm3008_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(pcm3008_dapm_widgets),
- .dapm_routes = pcm3008_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(pcm3008_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = pcm3008_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm3008_dapm_widgets),
+ .dapm_routes = pcm3008_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm3008_dapm_routes),
+ },
};
static int pcm3008_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index 992a77edcd5d..39bc02d5bc5d 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -599,12 +599,14 @@ EXPORT_SYMBOL_GPL(pcm3168a_regmap);
static const struct snd_soc_codec_driver pcm3168a_driver = {
.idle_bias_off = true,
- .controls = pcm3168a_snd_controls,
- .num_controls = ARRAY_SIZE(pcm3168a_snd_controls),
- .dapm_widgets = pcm3168a_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(pcm3168a_dapm_widgets),
- .dapm_routes = pcm3168a_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(pcm3168a_dapm_routes)
+ .component_driver = {
+ .controls = pcm3168a_snd_controls,
+ .num_controls = ARRAY_SIZE(pcm3168a_snd_controls),
+ .dapm_widgets = pcm3168a_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm3168a_dapm_widgets),
+ .dapm_routes = pcm3168a_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm3168a_dapm_routes)
+ },
};
int pcm3168a_probe(struct device *dev, struct regmap *regmap)
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 047c48953a20..72b19e62f626 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1348,12 +1348,14 @@ static struct snd_soc_codec_driver pcm512x_codec_driver = {
.set_bias_level = pcm512x_set_bias_level,
.idle_bias_off = true,
- .controls = pcm512x_controls,
- .num_controls = ARRAY_SIZE(pcm512x_controls),
- .dapm_widgets = pcm512x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(pcm512x_dapm_widgets),
- .dapm_routes = pcm512x_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(pcm512x_dapm_routes),
+ .component_driver = {
+ .controls = pcm512x_controls,
+ .num_controls = ARRAY_SIZE(pcm512x_controls),
+ .dapm_widgets = pcm512x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm512x_dapm_widgets),
+ .dapm_routes = pcm512x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm512x_dapm_routes),
+ },
};
static const struct regmap_range_cfg pcm512x_range = {
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 74c0e4eb3788..9c365a7f758d 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1053,12 +1053,14 @@ static struct snd_soc_codec_driver soc_codec_dev_rt286 = {
.resume = rt286_resume,
.set_bias_level = rt286_set_bias_level,
.idle_bias_off = true,
- .controls = rt286_snd_controls,
- .num_controls = ARRAY_SIZE(rt286_snd_controls),
- .dapm_widgets = rt286_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rt286_dapm_widgets),
- .dapm_routes = rt286_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rt286_dapm_routes),
+ .component_driver = {
+ .controls = rt286_snd_controls,
+ .num_controls = ARRAY_SIZE(rt286_snd_controls),
+ .dapm_widgets = rt286_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt286_dapm_widgets),
+ .dapm_routes = rt286_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt286_dapm_routes),
+ },
};
static const struct regmap_config rt286_regmap = {
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index f80cfe4d2ef2..55558643166f 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -1095,12 +1095,14 @@ static struct snd_soc_codec_driver soc_codec_dev_rt298 = {
.resume = rt298_resume,
.set_bias_level = rt298_set_bias_level,
.idle_bias_off = true,
- .controls = rt298_snd_controls,
- .num_controls = ARRAY_SIZE(rt298_snd_controls),
- .dapm_widgets = rt298_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rt298_dapm_widgets),
- .dapm_routes = rt298_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rt298_dapm_routes),
+ .component_driver = {
+ .controls = rt298_snd_controls,
+ .num_controls = ARRAY_SIZE(rt298_snd_controls),
+ .dapm_widgets = rt298_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt298_dapm_widgets),
+ .dapm_routes = rt298_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt298_dapm_routes),
+ },
};
static const struct regmap_config rt298_regmap = {
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 77ff8ebe6dfb..09103aab0cb2 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -236,7 +236,7 @@ static snd_pcm_uframes_t rt5514_spi_pcm_pointer(
return bytes_to_frames(runtime, rt5514_dsp->dma_offset);
}
-static struct snd_pcm_ops rt5514_spi_pcm_ops = {
+static const struct snd_pcm_ops rt5514_spi_pcm_ops = {
.open = rt5514_spi_pcm_open,
.hw_params = rt5514_spi_hw_params,
.hw_free = rt5514_spi_hw_free,
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index 7162f05101d9..f24b7cfd3a89 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -278,7 +278,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
8, 8, TLV_DB_SCALE_ITEM(1700, 0, 0)
);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
static int rt5514_dsp_voice_wake_up_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -352,10 +352,10 @@ static const struct snd_kcontrol_new rt5514_snd_controls[] = {
SOC_DOUBLE_TLV("MIC Boost Volume", RT5514_ANA_CTRL_MICBST,
RT5514_SEL_BSTL_SFT, RT5514_SEL_BSTR_SFT, 8, 0, bst_tlv),
SOC_DOUBLE_R_TLV("ADC1 Capture Volume", RT5514_DOWNFILTER0_CTRL1,
- RT5514_DOWNFILTER0_CTRL2, RT5514_AD_GAIN_SFT, 127, 0,
+ RT5514_DOWNFILTER0_CTRL2, RT5514_AD_GAIN_SFT, 63, 0,
adc_vol_tlv),
SOC_DOUBLE_R_TLV("ADC2 Capture Volume", RT5514_DOWNFILTER1_CTRL1,
- RT5514_DOWNFILTER1_CTRL2, RT5514_AD_GAIN_SFT, 127, 0,
+ RT5514_DOWNFILTER1_CTRL2, RT5514_AD_GAIN_SFT, 63, 0,
adc_vol_tlv),
SOC_SINGLE_EXT("DSP Voice Wake Up", SND_SOC_NOPM, 0, 1, 0,
rt5514_dsp_voice_wake_up_get, rt5514_dsp_voice_wake_up_put),
@@ -1023,12 +1023,14 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5514 = {
.probe = rt5514_probe,
.idle_bias_off = true,
.set_bias_level = rt5514_set_bias_level,
- .controls = rt5514_snd_controls,
- .num_controls = ARRAY_SIZE(rt5514_snd_controls),
- .dapm_widgets = rt5514_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rt5514_dapm_widgets),
- .dapm_routes = rt5514_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rt5514_dapm_routes),
+ .component_driver = {
+ .controls = rt5514_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5514_snd_controls),
+ .dapm_widgets = rt5514_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5514_dapm_widgets),
+ .dapm_routes = rt5514_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5514_dapm_routes),
+ },
};
static const struct regmap_config rt5514_i2c_regmap = {
diff --git a/sound/soc/codecs/rt5514.h b/sound/soc/codecs/rt5514.h
index 68883c68e999..229de0e2c88c 100644
--- a/sound/soc/codecs/rt5514.h
+++ b/sound/soc/codecs/rt5514.h
@@ -196,8 +196,8 @@
#define RT5514_AD_AD_MIX_BIT 10
#define RT5514_AD_AD_MUTE (0x1 << 7)
#define RT5514_AD_AD_MUTE_BIT 7
-#define RT5514_AD_GAIN_MASK (0x7f << 0)
-#define RT5514_AD_GAIN_SFT 0
+#define RT5514_AD_GAIN_MASK (0x3f << 1)
+#define RT5514_AD_GAIN_SFT 1
/* RT5514_ANA_CTRL_MICBST (0x2220) */
#define RT5514_SEL_BSTL_MASK (0xf << 4)
diff --git a/sound/soc/codecs/rt5616.c b/sound/soc/codecs/rt5616.c
index f527b5b2817b..d1f273b24991 100644
--- a/sound/soc/codecs/rt5616.c
+++ b/sound/soc/codecs/rt5616.c
@@ -294,8 +294,7 @@ static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(bst_tlv,
0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
@@ -303,7 +302,7 @@ static unsigned int bst_tlv[] = {
6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+);
static const struct snd_kcontrol_new rt5616_snd_controls[] = {
/* Headphone Output Volume */
@@ -1267,14 +1266,14 @@ static int rt5616_resume(struct snd_soc_codec *codec)
#define RT5616_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
-struct snd_soc_dai_ops rt5616_aif_dai_ops = {
+static struct snd_soc_dai_ops rt5616_aif_dai_ops = {
.hw_params = rt5616_hw_params,
.set_fmt = rt5616_set_dai_fmt,
.set_sysclk = rt5616_set_dai_sysclk,
.set_pll = rt5616_set_dai_pll,
};
-struct snd_soc_dai_driver rt5616_dai[] = {
+static struct snd_soc_dai_driver rt5616_dai[] = {
{
.name = "rt5616-aif1",
.id = RT5616_AIF1,
@@ -1302,12 +1301,14 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5616 = {
.resume = rt5616_resume,
.set_bias_level = rt5616_set_bias_level,
.idle_bias_off = true,
- .controls = rt5616_snd_controls,
- .num_controls = ARRAY_SIZE(rt5616_snd_controls),
- .dapm_widgets = rt5616_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rt5616_dapm_widgets),
- .dapm_routes = rt5616_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rt5616_dapm_routes),
+ .component_driver = {
+ .controls = rt5616_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5616_snd_controls),
+ .dapm_widgets = rt5616_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5616_dapm_widgets),
+ .dapm_routes = rt5616_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5616_dapm_routes),
+ },
};
static const struct regmap_config rt5616_regmap = {
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 1be2bab7dee3..0e418089c053 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -1657,12 +1657,14 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5631 = {
.probe = rt5631_probe,
.set_bias_level = rt5631_set_bias_level,
.suspend_bias_off = true,
- .controls = rt5631_snd_controls,
- .num_controls = ARRAY_SIZE(rt5631_snd_controls),
- .dapm_widgets = rt5631_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rt5631_dapm_widgets),
- .dapm_routes = rt5631_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rt5631_dapm_routes),
+ .component_driver = {
+ .controls = rt5631_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5631_snd_controls),
+ .dapm_widgets = rt5631_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5631_dapm_widgets),
+ .dapm_routes = rt5631_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5631_dapm_routes),
+ },
};
static const struct i2c_device_id rt5631_i2c_id[] = {
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 09e8988bbb2d..3cc1135fc2cd 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -1870,6 +1870,9 @@ static int rt5640_set_dai_sysclk(struct snd_soc_dai *dai,
case RT5640_SCLK_S_PLL1:
reg_val |= RT5640_SCLK_SRC_PLL1;
break;
+ case RT5640_SCLK_S_RCCLK:
+ reg_val |= RT5640_SCLK_SRC_RCCLK;
+ break;
default:
dev_err(codec->dev, "Invalid clock id (%d)\n", clk_id);
return -EINVAL;
@@ -2261,12 +2264,14 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5640 = {
.resume = rt5640_resume,
.set_bias_level = rt5640_set_bias_level,
.idle_bias_off = true,
- .controls = rt5640_snd_controls,
- .num_controls = ARRAY_SIZE(rt5640_snd_controls),
- .dapm_widgets = rt5640_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rt5640_dapm_widgets),
- .dapm_routes = rt5640_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rt5640_dapm_routes),
+ .component_driver = {
+ .controls = rt5640_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5640_snd_controls),
+ .dapm_widgets = rt5640_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5640_dapm_widgets),
+ .dapm_routes = rt5640_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5640_dapm_routes),
+ },
};
static const struct regmap_config rt5640_regmap = {
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 58b664b06c16..90c88711c72a 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -984,6 +984,7 @@
#define RT5640_SCLK_SRC_SFT 14
#define RT5640_SCLK_SRC_MCLK (0x0 << 14)
#define RT5640_SCLK_SRC_PLL1 (0x1 << 14)
+#define RT5640_SCLK_SRC_RCCLK (0x2 << 14)
#define RT5640_PLL1_SRC_MASK (0x3 << 12)
#define RT5640_PLL1_SRC_SFT 12
#define RT5640_PLL1_SRC_MCLK (0x0 << 12)
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 490bfe661346..10c2a564a715 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3484,12 +3484,14 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5645 = {
.resume = rt5645_resume,
.set_bias_level = rt5645_set_bias_level,
.idle_bias_off = true,
- .controls = rt5645_snd_controls,
- .num_controls = ARRAY_SIZE(rt5645_snd_controls),
- .dapm_widgets = rt5645_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rt5645_dapm_widgets),
- .dapm_routes = rt5645_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rt5645_dapm_routes),
+ .component_driver = {
+ .controls = rt5645_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5645_snd_controls),
+ .dapm_widgets = rt5645_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5645_dapm_widgets),
+ .dapm_routes = rt5645_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5645_dapm_routes),
+ },
};
static const struct regmap_config rt5645_regmap = {
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index 7a6197042423..f5d34153e21f 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -1712,12 +1712,14 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5651 = {
.resume = rt5651_resume,
.set_bias_level = rt5651_set_bias_level,
.idle_bias_off = true,
- .controls = rt5651_snd_controls,
- .num_controls = ARRAY_SIZE(rt5651_snd_controls),
- .dapm_widgets = rt5651_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rt5651_dapm_widgets),
- .dapm_routes = rt5651_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rt5651_dapm_routes),
+ .component_driver = {
+ .controls = rt5651_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5651_snd_controls),
+ .dapm_widgets = rt5651_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5651_dapm_widgets),
+ .dapm_routes = rt5651_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5651_dapm_routes),
+ },
};
static const struct regmap_config rt5651_regmap = {
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index 1b30914c2d91..db54550aed60 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -3565,7 +3566,9 @@ static int rt5659_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
static int rt5659_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct rt5659_priv *rt5659 = snd_soc_codec_get_drvdata(codec);
+ int ret;
switch (level) {
case SND_SOC_BIAS_PREPARE:
@@ -3582,6 +3585,17 @@ static int rt5659_set_bias_level(struct snd_soc_codec *codec,
RT5659_PWR_FV1 | RT5659_PWR_FV2);
break;
+ case SND_SOC_BIAS_STANDBY:
+ if (dapm->bias_level == SND_SOC_BIAS_OFF) {
+ ret = clk_prepare_enable(rt5659->mclk);
+ if (ret) {
+ dev_err(codec->dev,
+ "failed to enable MCLK: %d\n", ret);
+ return ret;
+ }
+ }
+ break;
+
case SND_SOC_BIAS_OFF:
regmap_update_bits(rt5659->regmap, RT5659_PWR_DIG_1,
RT5659_PWR_LDO, 0);
@@ -3591,6 +3605,7 @@ static int rt5659_set_bias_level(struct snd_soc_codec *codec,
RT5659_PWR_MB | RT5659_PWR_VREF2);
regmap_update_bits(rt5659->regmap, RT5659_DIG_MISC,
RT5659_DIG_GATE_CTRL, 0);
+ clk_disable_unprepare(rt5659->mclk);
break;
default:
@@ -3722,12 +3737,14 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5659 = {
.resume = rt5659_resume,
.set_bias_level = rt5659_set_bias_level,
.idle_bias_off = true,
- .controls = rt5659_snd_controls,
- .num_controls = ARRAY_SIZE(rt5659_snd_controls),
- .dapm_widgets = rt5659_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rt5659_dapm_widgets),
- .dapm_routes = rt5659_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rt5659_dapm_routes),
+ .component_driver = {
+ .controls = rt5659_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5659_snd_controls),
+ .dapm_widgets = rt5659_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5659_dapm_widgets),
+ .dapm_routes = rt5659_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5659_dapm_routes),
+ },
};
@@ -4020,6 +4037,15 @@ static int rt5659_i2c_probe(struct i2c_client *i2c,
regmap_write(rt5659->regmap, RT5659_RESET, 0);
+ /* Check if MCLK provided */
+ rt5659->mclk = devm_clk_get(&i2c->dev, "mclk");
+ if (IS_ERR(rt5659->mclk)) {
+ if (PTR_ERR(rt5659->mclk) != -ENOENT)
+ return PTR_ERR(rt5659->mclk);
+ /* Otherwise mark the mclk pointer to NULL */
+ rt5659->mclk = NULL;
+ }
+
rt5659_calibrate(rt5659);
/* line in diff mode*/
@@ -4163,6 +4189,9 @@ static int rt5659_i2c_probe(struct i2c_client *i2c,
if (ret)
dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
+ /* Enable IRQ output for GPIO1 pin any way */
+ regmap_update_bits(rt5659->regmap, RT5659_GPIO_CTRL_1,
+ RT5659_GP1_PIN_MASK, RT5659_GP1_PIN_IRQ);
}
return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5659,
diff --git a/sound/soc/codecs/rt5659.h b/sound/soc/codecs/rt5659.h
index d31c9e5bcec8..8f1aeef08489 100644
--- a/sound/soc/codecs/rt5659.h
+++ b/sound/soc/codecs/rt5659.h
@@ -180,9 +180,9 @@
#define RT5659_IRQ_CTRL_1 0x00b6
#define RT5659_IRQ_CTRL_2 0x00b7
#define RT5659_IRQ_CTRL_3 0x00b8
-#define RT5659_IRQ_CTRL_4 0x00b9
-#define RT5659_IRQ_CTRL_5 0x00ba
-#define RT5659_IRQ_CTRL_6 0x00bb
+#define RT5659_IRQ_CTRL_4 0x00ba
+#define RT5659_IRQ_CTRL_5 0x00bb
+#define RT5659_IRQ_CTRL_6 0x00bc
#define RT5659_INT_ST_1 0x00be
#define RT5659_INT_ST_2 0x00bf
#define RT5659_GPIO_CTRL_1 0x00c0
@@ -1796,6 +1796,7 @@ struct rt5659_priv {
struct gpio_desc *gpiod_reset;
struct snd_soc_jack *hs_jack;
struct delayed_work jack_detect_work;
+ struct clk *mclk;
int sysclk;
int sysclk_src;
diff --git a/sound/soc/codecs/rt5660.c b/sound/soc/codecs/rt5660.c
new file mode 100644
index 000000000000..9f0933ced804
--- /dev/null
+++ b/sound/soc/codecs/rt5660.c
@@ -0,0 +1,1353 @@
+/*
+ * rt5660.c -- RT5660 ALSA SoC audio codec driver
+ *
+ * Copyright 2016 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "rl6231.h"
+#include "rt5660.h"
+
+#define RT5660_DEVICE_ID 0x6338
+
+#define RT5660_PR_RANGE_BASE (0xff + 1)
+#define RT5660_PR_SPACING 0x100
+
+#define RT5660_PR_BASE (RT5660_PR_RANGE_BASE + (0 * RT5660_PR_SPACING))
+
+static const struct regmap_range_cfg rt5660_ranges[] = {
+ { .name = "PR", .range_min = RT5660_PR_BASE,
+ .range_max = RT5660_PR_BASE + 0xf3,
+ .selector_reg = RT5660_PRIV_INDEX,
+ .selector_mask = 0xff,
+ .selector_shift = 0x0,
+ .window_start = RT5660_PRIV_DATA,
+ .window_len = 0x1, },
+};
+
+static const struct reg_sequence rt5660_patch[] = {
+ { RT5660_ALC_PGA_CTRL2, 0x44c3 },
+ { RT5660_PR_BASE + 0x3d, 0x2600 },
+};
+
+static const struct reg_default rt5660_reg[] = {
+ { 0x00, 0x0000 },
+ { 0x01, 0xc800 },
+ { 0x02, 0xc8c8 },
+ { 0x0d, 0x1010 },
+ { 0x0e, 0x1010 },
+ { 0x19, 0xafaf },
+ { 0x1c, 0x2f2f },
+ { 0x1e, 0x0000 },
+ { 0x27, 0x6060 },
+ { 0x29, 0x8080 },
+ { 0x2a, 0x4242 },
+ { 0x2f, 0x0000 },
+ { 0x3b, 0x0000 },
+ { 0x3c, 0x007f },
+ { 0x3d, 0x0000 },
+ { 0x3e, 0x007f },
+ { 0x45, 0xe000 },
+ { 0x46, 0x003e },
+ { 0x48, 0xf800 },
+ { 0x4a, 0x0004 },
+ { 0x4d, 0x0000 },
+ { 0x4e, 0x0000 },
+ { 0x4f, 0x01ff },
+ { 0x50, 0x0000 },
+ { 0x51, 0x0000 },
+ { 0x52, 0x01ff },
+ { 0x61, 0x0000 },
+ { 0x62, 0x0000 },
+ { 0x63, 0x00c0 },
+ { 0x64, 0x0000 },
+ { 0x65, 0x0000 },
+ { 0x66, 0x0000 },
+ { 0x70, 0x8000 },
+ { 0x73, 0x7000 },
+ { 0x74, 0x3c00 },
+ { 0x75, 0x2800 },
+ { 0x80, 0x0000 },
+ { 0x81, 0x0000 },
+ { 0x82, 0x0000 },
+ { 0x8c, 0x0228 },
+ { 0x8d, 0xa000 },
+ { 0x8e, 0x0000 },
+ { 0x92, 0x0000 },
+ { 0x93, 0x3000 },
+ { 0xa1, 0x0059 },
+ { 0xa2, 0x0001 },
+ { 0xa3, 0x5c80 },
+ { 0xa4, 0x0146 },
+ { 0xa5, 0x1f1f },
+ { 0xa6, 0x78c6 },
+ { 0xa7, 0xe5ec },
+ { 0xa8, 0xba61 },
+ { 0xa9, 0x3c78 },
+ { 0xaa, 0x8ae2 },
+ { 0xab, 0xe5ec },
+ { 0xac, 0xc600 },
+ { 0xad, 0xba61 },
+ { 0xae, 0x17ed },
+ { 0xb0, 0x2080 },
+ { 0xb1, 0x0000 },
+ { 0xb3, 0x001f },
+ { 0xb4, 0x020c },
+ { 0xb5, 0x1f00 },
+ { 0xb6, 0x0000 },
+ { 0xb7, 0x4000 },
+ { 0xbb, 0x0000 },
+ { 0xbd, 0x0000 },
+ { 0xbe, 0x0000 },
+ { 0xbf, 0x0100 },
+ { 0xc0, 0x0000 },
+ { 0xc2, 0x0000 },
+ { 0xd3, 0xa220 },
+ { 0xd9, 0x0809 },
+ { 0xda, 0x0000 },
+ { 0xe0, 0x8000 },
+ { 0xe1, 0x0200 },
+ { 0xe2, 0x8000 },
+ { 0xe3, 0x0200 },
+ { 0xe4, 0x0f20 },
+ { 0xe5, 0x001f },
+ { 0xe6, 0x020c },
+ { 0xe7, 0x1f00 },
+ { 0xe8, 0x0000 },
+ { 0xe9, 0x4000 },
+ { 0xea, 0x00a6 },
+ { 0xeb, 0x04c3 },
+ { 0xec, 0x27c8 },
+ { 0xed, 0x7418 },
+ { 0xee, 0xbf50 },
+ { 0xef, 0x0045 },
+ { 0xf0, 0x0007 },
+ { 0xfa, 0x0000 },
+ { 0xfd, 0x0000 },
+ { 0xfe, 0x10ec },
+ { 0xff, 0x6338 },
+};
+
+static bool rt5660_volatile_register(struct device *dev, unsigned int reg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rt5660_ranges); i++)
+ if ((reg >= rt5660_ranges[i].window_start &&
+ reg <= rt5660_ranges[i].window_start +
+ rt5660_ranges[i].window_len) ||
+ (reg >= rt5660_ranges[i].range_min &&
+ reg <= rt5660_ranges[i].range_max))
+ return true;
+
+ switch (reg) {
+ case RT5660_RESET:
+ case RT5660_PRIV_DATA:
+ case RT5660_EQ_CTRL1:
+ case RT5660_IRQ_CTRL2:
+ case RT5660_INT_IRQ_ST:
+ case RT5660_VENDOR_ID:
+ case RT5660_VENDOR_ID1:
+ case RT5660_VENDOR_ID2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rt5660_readable_register(struct device *dev, unsigned int reg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rt5660_ranges); i++)
+ if ((reg >= rt5660_ranges[i].window_start &&
+ reg <= rt5660_ranges[i].window_start +
+ rt5660_ranges[i].window_len) ||
+ (reg >= rt5660_ranges[i].range_min &&
+ reg <= rt5660_ranges[i].range_max))
+ return true;
+
+ switch (reg) {
+ case RT5660_RESET:
+ case RT5660_SPK_VOL:
+ case RT5660_LOUT_VOL:
+ case RT5660_IN1_IN2:
+ case RT5660_IN3_IN4:
+ case RT5660_DAC1_DIG_VOL:
+ case RT5660_STO1_ADC_DIG_VOL:
+ case RT5660_ADC_BST_VOL1:
+ case RT5660_STO1_ADC_MIXER:
+ case RT5660_AD_DA_MIXER:
+ case RT5660_STO_DAC_MIXER:
+ case RT5660_DIG_INF1_DATA:
+ case RT5660_REC_L1_MIXER:
+ case RT5660_REC_L2_MIXER:
+ case RT5660_REC_R1_MIXER:
+ case RT5660_REC_R2_MIXER:
+ case RT5660_LOUT_MIXER:
+ case RT5660_SPK_MIXER:
+ case RT5660_SPO_MIXER:
+ case RT5660_SPO_CLSD_RATIO:
+ case RT5660_OUT_L_GAIN1:
+ case RT5660_OUT_L_GAIN2:
+ case RT5660_OUT_L1_MIXER:
+ case RT5660_OUT_R_GAIN1:
+ case RT5660_OUT_R_GAIN2:
+ case RT5660_OUT_R1_MIXER:
+ case RT5660_PWR_DIG1:
+ case RT5660_PWR_DIG2:
+ case RT5660_PWR_ANLG1:
+ case RT5660_PWR_ANLG2:
+ case RT5660_PWR_MIXER:
+ case RT5660_PWR_VOL:
+ case RT5660_PRIV_INDEX:
+ case RT5660_PRIV_DATA:
+ case RT5660_I2S1_SDP:
+ case RT5660_ADDA_CLK1:
+ case RT5660_ADDA_CLK2:
+ case RT5660_DMIC_CTRL1:
+ case RT5660_GLB_CLK:
+ case RT5660_PLL_CTRL1:
+ case RT5660_PLL_CTRL2:
+ case RT5660_CLSD_AMP_OC_CTRL:
+ case RT5660_CLSD_AMP_CTRL:
+ case RT5660_LOUT_AMP_CTRL:
+ case RT5660_SPK_AMP_SPKVDD:
+ case RT5660_MICBIAS:
+ case RT5660_CLSD_OUT_CTRL1:
+ case RT5660_CLSD_OUT_CTRL2:
+ case RT5660_DIPOLE_MIC_CTRL1:
+ case RT5660_DIPOLE_MIC_CTRL2:
+ case RT5660_DIPOLE_MIC_CTRL3:
+ case RT5660_DIPOLE_MIC_CTRL4:
+ case RT5660_DIPOLE_MIC_CTRL5:
+ case RT5660_DIPOLE_MIC_CTRL6:
+ case RT5660_DIPOLE_MIC_CTRL7:
+ case RT5660_DIPOLE_MIC_CTRL8:
+ case RT5660_DIPOLE_MIC_CTRL9:
+ case RT5660_DIPOLE_MIC_CTRL10:
+ case RT5660_DIPOLE_MIC_CTRL11:
+ case RT5660_DIPOLE_MIC_CTRL12:
+ case RT5660_EQ_CTRL1:
+ case RT5660_EQ_CTRL2:
+ case RT5660_DRC_AGC_CTRL1:
+ case RT5660_DRC_AGC_CTRL2:
+ case RT5660_DRC_AGC_CTRL3:
+ case RT5660_DRC_AGC_CTRL4:
+ case RT5660_DRC_AGC_CTRL5:
+ case RT5660_JD_CTRL:
+ case RT5660_IRQ_CTRL1:
+ case RT5660_IRQ_CTRL2:
+ case RT5660_INT_IRQ_ST:
+ case RT5660_GPIO_CTRL1:
+ case RT5660_GPIO_CTRL2:
+ case RT5660_WIND_FILTER_CTRL1:
+ case RT5660_SV_ZCD1:
+ case RT5660_SV_ZCD2:
+ case RT5660_DRC1_LM_CTRL1:
+ case RT5660_DRC1_LM_CTRL2:
+ case RT5660_DRC2_LM_CTRL1:
+ case RT5660_DRC2_LM_CTRL2:
+ case RT5660_MULTI_DRC_CTRL:
+ case RT5660_DRC2_CTRL1:
+ case RT5660_DRC2_CTRL2:
+ case RT5660_DRC2_CTRL3:
+ case RT5660_DRC2_CTRL4:
+ case RT5660_DRC2_CTRL5:
+ case RT5660_ALC_PGA_CTRL1:
+ case RT5660_ALC_PGA_CTRL2:
+ case RT5660_ALC_PGA_CTRL3:
+ case RT5660_ALC_PGA_CTRL4:
+ case RT5660_ALC_PGA_CTRL5:
+ case RT5660_ALC_PGA_CTRL6:
+ case RT5660_ALC_PGA_CTRL7:
+ case RT5660_GEN_CTRL1:
+ case RT5660_GEN_CTRL2:
+ case RT5660_GEN_CTRL3:
+ case RT5660_VENDOR_ID:
+ case RT5660_VENDOR_ID1:
+ case RT5660_VENDOR_ID2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const DECLARE_TLV_DB_SCALE(rt5660_out_vol_tlv, -4650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(rt5660_dac_vol_tlv, -6525, 75, 0);
+static const DECLARE_TLV_DB_SCALE(rt5660_adc_vol_tlv, -1725, 75, 0);
+static const DECLARE_TLV_DB_SCALE(rt5660_adc_bst_tlv, 0, 1200, 0);
+static const DECLARE_TLV_DB_SCALE(rt5660_bst_tlv, -1200, 75, 0);
+
+static const struct snd_kcontrol_new rt5660_snd_controls[] = {
+ /* Speaker Output Volume */
+ SOC_SINGLE("Speaker Playback Switch", RT5660_SPK_VOL, RT5660_L_MUTE_SFT,
+ 1, 1),
+ SOC_SINGLE_TLV("Speaker Playback Volume", RT5660_SPK_VOL,
+ RT5660_L_VOL_SFT, 39, 1, rt5660_out_vol_tlv),
+
+ /* OUTPUT Control */
+ SOC_DOUBLE("OUT Playback Switch", RT5660_LOUT_VOL, RT5660_L_MUTE_SFT,
+ RT5660_R_MUTE_SFT, 1, 1),
+ SOC_DOUBLE_TLV("OUT Playback Volume", RT5660_LOUT_VOL, RT5660_L_VOL_SFT,
+ RT5660_R_VOL_SFT, 39, 1, rt5660_out_vol_tlv),
+
+ /* DAC Digital Volume */
+ SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5660_DAC1_DIG_VOL,
+ RT5660_DAC_L1_VOL_SFT, RT5660_DAC_R1_VOL_SFT, 87, 0,
+ rt5660_dac_vol_tlv),
+
+ /* IN1/IN2/IN3 Control */
+ SOC_SINGLE_TLV("IN1 Boost Volume", RT5660_IN1_IN2, RT5660_BST_SFT1, 69,
+ 0, rt5660_bst_tlv),
+ SOC_SINGLE_TLV("IN2 Boost Volume", RT5660_IN1_IN2, RT5660_BST_SFT2, 69,
+ 0, rt5660_bst_tlv),
+ SOC_SINGLE_TLV("IN3 Boost Volume", RT5660_IN3_IN4, RT5660_BST_SFT3, 69,
+ 0, rt5660_bst_tlv),
+
+ /* ADC Digital Volume Control */
+ SOC_DOUBLE("ADC Capture Switch", RT5660_STO1_ADC_DIG_VOL,
+ RT5660_L_MUTE_SFT, RT5660_R_MUTE_SFT, 1, 1),
+ SOC_DOUBLE_TLV("ADC Capture Volume", RT5660_STO1_ADC_DIG_VOL,
+ RT5660_ADC_L_VOL_SFT, RT5660_ADC_R_VOL_SFT, 63, 0,
+ rt5660_adc_vol_tlv),
+
+ /* ADC Boost Volume Control */
+ SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5660_ADC_BST_VOL1,
+ RT5660_STO1_ADC_L_BST_SFT, RT5660_STO1_ADC_R_BST_SFT, 3, 0,
+ rt5660_adc_bst_tlv),
+};
+
+/**
+ * rt5660_set_dmic_clk - Set parameter of dmic.
+ *
+ * @w: DAPM widget.
+ * @kcontrol: The kcontrol of this widget.
+ * @event: Event id.
+ *
+ */
+static int rt5660_set_dmic_clk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct rt5660_priv *rt5660 = snd_soc_codec_get_drvdata(codec);
+ int idx, rate;
+
+ rate = rt5660->sysclk / rl6231_get_pre_div(rt5660->regmap,
+ RT5660_ADDA_CLK1, RT5660_I2S_PD1_SFT);
+ idx = rl6231_calc_dmic_clk(rate);
+ if (idx < 0)
+ dev_err(codec->dev, "Failed to set DMIC clock\n");
+ else
+ snd_soc_update_bits(codec, RT5660_DMIC_CTRL1,
+ RT5660_DMIC_CLK_MASK, idx << RT5660_DMIC_CLK_SFT);
+
+ return idx;
+}
+
+static int rt5660_is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+ unsigned int val;
+
+ val = snd_soc_read(codec, RT5660_GLB_CLK);
+ val &= RT5660_SCLK_SRC_MASK;
+ if (val == RT5660_SCLK_SRC_PLL1)
+ return 1;
+ else
+ return 0;
+}
+
+/* Digital Mixer */
+static const struct snd_kcontrol_new rt5660_sto1_adc_l_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5660_STO1_ADC_MIXER,
+ RT5660_M_ADC_L1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5660_STO1_ADC_MIXER,
+ RT5660_M_ADC_L2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5660_sto1_adc_r_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5660_STO1_ADC_MIXER,
+ RT5660_M_ADC_R1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5660_STO1_ADC_MIXER,
+ RT5660_M_ADC_R2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5660_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("Stereo ADC Switch", RT5660_AD_DA_MIXER,
+ RT5660_M_ADCMIX_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC1 Switch", RT5660_AD_DA_MIXER,
+ RT5660_M_DAC1_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5660_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("Stereo ADC Switch", RT5660_AD_DA_MIXER,
+ RT5660_M_ADCMIX_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC1 Switch", RT5660_AD_DA_MIXER,
+ RT5660_M_DAC1_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5660_sto_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5660_STO_DAC_MIXER,
+ RT5660_M_DAC_L1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5660_STO_DAC_MIXER,
+ RT5660_M_DAC_R1_STO_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5660_sto_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5660_STO_DAC_MIXER,
+ RT5660_M_DAC_R1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5660_STO_DAC_MIXER,
+ RT5660_M_DAC_L1_STO_R_SFT, 1, 1),
+};
+
+/* Analog Input Mixer */
+static const struct snd_kcontrol_new rt5660_rec_l_mix[] = {
+ SOC_DAPM_SINGLE("BST3 Switch", RT5660_REC_L2_MIXER,
+ RT5660_M_BST3_RM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5660_REC_L2_MIXER,
+ RT5660_M_BST2_RM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5660_REC_L2_MIXER,
+ RT5660_M_BST1_RM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUT MIXL Switch", RT5660_REC_L2_MIXER,
+ RT5660_M_OM_L_RM_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5660_rec_r_mix[] = {
+ SOC_DAPM_SINGLE("BST3 Switch", RT5660_REC_R2_MIXER,
+ RT5660_M_BST3_RM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5660_REC_R2_MIXER,
+ RT5660_M_BST2_RM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5660_REC_R2_MIXER,
+ RT5660_M_BST1_RM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUT MIXR Switch", RT5660_REC_R2_MIXER,
+ RT5660_M_OM_R_RM_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5660_spk_mix[] = {
+ SOC_DAPM_SINGLE("BST3 Switch", RT5660_SPK_MIXER,
+ RT5660_M_BST3_SM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5660_SPK_MIXER,
+ RT5660_M_BST1_SM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DACL Switch", RT5660_SPK_MIXER,
+ RT5660_M_DACL_SM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DACR Switch", RT5660_SPK_MIXER,
+ RT5660_M_DACR_SM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUTMIXL Switch", RT5660_SPK_MIXER,
+ RT5660_M_OM_L_SM_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5660_out_l_mix[] = {
+ SOC_DAPM_SINGLE("BST3 Switch", RT5660_OUT_L1_MIXER,
+ RT5660_M_BST3_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST2 Switch", RT5660_OUT_L1_MIXER,
+ RT5660_M_BST2_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5660_OUT_L1_MIXER,
+ RT5660_M_BST1_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("RECMIXL Switch", RT5660_OUT_L1_MIXER,
+ RT5660_M_RM_L_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DACR Switch", RT5660_OUT_L1_MIXER,
+ RT5660_M_DAC_R_OM_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DACL Switch", RT5660_OUT_L1_MIXER,
+ RT5660_M_DAC_L_OM_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5660_out_r_mix[] = {
+ SOC_DAPM_SINGLE("BST2 Switch", RT5660_OUT_R1_MIXER,
+ RT5660_M_BST2_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5660_OUT_R1_MIXER,
+ RT5660_M_BST1_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("RECMIXR Switch", RT5660_OUT_R1_MIXER,
+ RT5660_M_RM_R_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DACR Switch", RT5660_OUT_R1_MIXER,
+ RT5660_M_DAC_R_OM_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DACL Switch", RT5660_OUT_R1_MIXER,
+ RT5660_M_DAC_L_OM_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5660_spo_mix[] = {
+ SOC_DAPM_SINGLE("DACR Switch", RT5660_SPO_MIXER,
+ RT5660_M_DAC_R_SPM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DACL Switch", RT5660_SPO_MIXER,
+ RT5660_M_DAC_L_SPM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("SPKVOL Switch", RT5660_SPO_MIXER,
+ RT5660_M_SV_SPM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 Switch", RT5660_SPO_MIXER,
+ RT5660_M_BST1_SPM_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5660_lout_mix[] = {
+ SOC_DAPM_SINGLE("DAC Switch", RT5660_LOUT_MIXER,
+ RT5660_M_DAC1_LM_SFT, 1, 1),
+ SOC_DAPM_SINGLE("OUTMIX Switch", RT5660_LOUT_MIXER,
+ RT5660_M_LOVOL_LM_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new spk_vol_control =
+ SOC_DAPM_SINGLE("Switch", RT5660_SPK_VOL,
+ RT5660_VOL_L_SFT, 1, 1);
+
+static const struct snd_kcontrol_new lout_l_vol_control =
+ SOC_DAPM_SINGLE("Switch", RT5660_LOUT_VOL,
+ RT5660_VOL_L_SFT, 1, 1);
+
+static const struct snd_kcontrol_new lout_r_vol_control =
+ SOC_DAPM_SINGLE("Switch", RT5660_LOUT_VOL,
+ RT5660_VOL_R_SFT, 1, 1);
+
+/* Interface data select */
+static const char * const rt5660_data_select[] = {
+ "L/R", "R/L", "L/L", "R/R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(rt5660_if1_dac_enum,
+ RT5660_DIG_INF1_DATA, RT5660_IF1_DAC_IN_SFT, rt5660_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5660_if1_adc_enum,
+ RT5660_DIG_INF1_DATA, RT5660_IF1_ADC_IN_SFT, rt5660_data_select);
+
+static const struct snd_kcontrol_new rt5660_if1_dac_swap_mux =
+ SOC_DAPM_ENUM("IF1 DAC Swap Source", rt5660_if1_dac_enum);
+
+static const struct snd_kcontrol_new rt5660_if1_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1 ADC Swap Source", rt5660_if1_adc_enum);
+
+static int rt5660_lout_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec, RT5660_LOUT_AMP_CTRL,
+ RT5660_LOUT_CO_MASK | RT5660_LOUT_CB_MASK,
+ RT5660_LOUT_CO_EN | RT5660_LOUT_CB_PU);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, RT5660_LOUT_AMP_CTRL,
+ RT5660_LOUT_CO_MASK | RT5660_LOUT_CB_MASK,
+ RT5660_LOUT_CO_DIS | RT5660_LOUT_CB_PD);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget rt5660_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("LDO2", RT5660_PWR_ANLG1,
+ RT5660_PWR_LDO2_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL1", RT5660_PWR_ANLG2,
+ RT5660_PWR_PLL_BIT, 0, NULL, 0),
+
+ /* MICBIAS */
+ SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5660_PWR_ANLG2,
+ RT5660_PWR_MB1_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5660_PWR_ANLG2,
+ RT5660_PWR_MB2_BIT, 0, NULL, 0),
+
+ /* Input Side */
+ /* Input Lines */
+ SND_SOC_DAPM_INPUT("DMIC L1"),
+ SND_SOC_DAPM_INPUT("DMIC R1"),
+
+ SND_SOC_DAPM_INPUT("IN1P"),
+ SND_SOC_DAPM_INPUT("IN1N"),
+ SND_SOC_DAPM_INPUT("IN2P"),
+ SND_SOC_DAPM_INPUT("IN3P"),
+ SND_SOC_DAPM_INPUT("IN3N"),
+
+ SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0,
+ rt5660_set_dmic_clk, SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_SUPPLY("DMIC Power", RT5660_DMIC_CTRL1,
+ RT5660_DMIC_1_EN_SFT, 0, NULL, 0),
+
+ /* Boost */
+ SND_SOC_DAPM_PGA("BST1", RT5660_PWR_ANLG2, RT5660_PWR_BST1_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_PGA("BST2", RT5660_PWR_ANLG2, RT5660_PWR_BST2_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_PGA("BST3", RT5660_PWR_ANLG2, RT5660_PWR_BST3_BIT, 0,
+ NULL, 0),
+
+ /* REC Mixer */
+ SND_SOC_DAPM_MIXER("RECMIXL", RT5660_PWR_MIXER, RT5660_PWR_RM_L_BIT,
+ 0, rt5660_rec_l_mix, ARRAY_SIZE(rt5660_rec_l_mix)),
+ SND_SOC_DAPM_MIXER("RECMIXR", RT5660_PWR_MIXER, RT5660_PWR_RM_R_BIT,
+ 0, rt5660_rec_r_mix, ARRAY_SIZE(rt5660_rec_r_mix)),
+
+ /* ADCs */
+ SND_SOC_DAPM_ADC("ADC L", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_ADC("ADC R", NULL, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_SUPPLY("ADC L power", RT5660_PWR_DIG1,
+ RT5660_PWR_ADC_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC R power", RT5660_PWR_DIG1,
+ RT5660_PWR_ADC_R_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC clock", RT5660_PR_BASE + RT5660_CHOP_DAC_ADC,
+ 12, 0, NULL, 0),
+
+ /* ADC Mixer */
+ SND_SOC_DAPM_SUPPLY("adc stereo1 filter", RT5660_PWR_DIG2,
+ RT5660_PWR_ADC_S1F_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("Sto1 ADC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5660_sto1_adc_l_mix, ARRAY_SIZE(rt5660_sto1_adc_l_mix)),
+ SND_SOC_DAPM_MIXER("Sto1 ADC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5660_sto1_adc_r_mix, ARRAY_SIZE(rt5660_sto1_adc_r_mix)),
+
+ /* ADC */
+ SND_SOC_DAPM_ADC("Stereo1 ADC MIXL", NULL, RT5660_STO1_ADC_DIG_VOL,
+ RT5660_L_MUTE_SFT, 1),
+ SND_SOC_DAPM_ADC("Stereo1 ADC MIXR", NULL, RT5660_STO1_ADC_DIG_VOL,
+ RT5660_R_MUTE_SFT, 1),
+
+ /* Digital Interface */
+ SND_SOC_DAPM_SUPPLY("I2S1", RT5660_PWR_DIG1, RT5660_PWR_I2S1_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MUX("IF1 DAC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5660_if1_dac_swap_mux),
+ SND_SOC_DAPM_PGA("IF1 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MUX("IF1 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5660_if1_adc_swap_mux),
+
+ /* Audio Interface */
+ SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+
+ /* Output Side */
+ /* DAC mixer before sound effect */
+ SND_SOC_DAPM_MIXER("DAC1 MIXL", SND_SOC_NOPM, 0, 0, rt5660_dac_l_mix,
+ ARRAY_SIZE(rt5660_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("DAC1 MIXR", SND_SOC_NOPM, 0, 0, rt5660_dac_r_mix,
+ ARRAY_SIZE(rt5660_dac_r_mix)),
+
+ /* DAC Mixer */
+ SND_SOC_DAPM_SUPPLY("dac stereo1 filter", RT5660_PWR_DIG2,
+ RT5660_PWR_DAC_S1F_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5660_sto_dac_l_mix, ARRAY_SIZE(rt5660_sto_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("Stereo DAC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5660_sto_dac_r_mix, ARRAY_SIZE(rt5660_sto_dac_r_mix)),
+
+ /* DACs */
+ SND_SOC_DAPM_DAC("DAC L1", NULL, RT5660_PWR_DIG1,
+ RT5660_PWR_DAC_L1_BIT, 0),
+ SND_SOC_DAPM_DAC("DAC R1", NULL, RT5660_PWR_DIG1,
+ RT5660_PWR_DAC_R1_BIT, 0),
+
+ /* OUT Mixer */
+ SND_SOC_DAPM_MIXER("SPK MIX", RT5660_PWR_MIXER, RT5660_PWR_SM_BIT,
+ 0, rt5660_spk_mix, ARRAY_SIZE(rt5660_spk_mix)),
+ SND_SOC_DAPM_MIXER("OUT MIXL", RT5660_PWR_MIXER, RT5660_PWR_OM_L_BIT,
+ 0, rt5660_out_l_mix, ARRAY_SIZE(rt5660_out_l_mix)),
+ SND_SOC_DAPM_MIXER("OUT MIXR", RT5660_PWR_MIXER, RT5660_PWR_OM_R_BIT,
+ 0, rt5660_out_r_mix, ARRAY_SIZE(rt5660_out_r_mix)),
+
+ /* Output Volume */
+ SND_SOC_DAPM_SWITCH("SPKVOL", RT5660_PWR_VOL,
+ RT5660_PWR_SV_BIT, 0, &spk_vol_control),
+ SND_SOC_DAPM_PGA("DAC 1", SND_SOC_NOPM,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("LOUTVOL", SND_SOC_NOPM,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_SWITCH("LOUTVOL L", SND_SOC_NOPM,
+ RT5660_PWR_LV_L_BIT, 0, &lout_l_vol_control),
+ SND_SOC_DAPM_SWITCH("LOUTVOL R", SND_SOC_NOPM,
+ RT5660_PWR_LV_R_BIT, 0, &lout_r_vol_control),
+
+ /* HPO/LOUT/Mono Mixer */
+ SND_SOC_DAPM_MIXER("SPO MIX", SND_SOC_NOPM, 0,
+ 0, rt5660_spo_mix, ARRAY_SIZE(rt5660_spo_mix)),
+ SND_SOC_DAPM_MIXER("LOUT MIX", SND_SOC_NOPM, 0, 0,
+ rt5660_lout_mix, ARRAY_SIZE(rt5660_lout_mix)),
+ SND_SOC_DAPM_SUPPLY("VREF HP", RT5660_GEN_CTRL1,
+ RT5660_PWR_VREF_HP_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA_S("LOUT amp", 1, RT5660_PWR_ANLG1,
+ RT5660_PWR_HA_BIT, 0, rt5660_lout_event,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PGA_S("SPK amp", 1, RT5660_PWR_DIG1,
+ RT5660_PWR_CLS_D_BIT, 0, NULL, 0),
+
+ /* Output Lines */
+ SND_SOC_DAPM_OUTPUT("LOUTL"),
+ SND_SOC_DAPM_OUTPUT("LOUTR"),
+ SND_SOC_DAPM_OUTPUT("SPO"),
+};
+
+static const struct snd_soc_dapm_route rt5660_dapm_routes[] = {
+ { "MICBIAS1", NULL, "LDO2" },
+ { "MICBIAS2", NULL, "LDO2" },
+
+ { "BST1", NULL, "IN1P" },
+ { "BST1", NULL, "IN1N" },
+ { "BST2", NULL, "IN2P" },
+ { "BST3", NULL, "IN3P" },
+ { "BST3", NULL, "IN3N" },
+
+ { "RECMIXL", "BST3 Switch", "BST3" },
+ { "RECMIXL", "BST2 Switch", "BST2" },
+ { "RECMIXL", "BST1 Switch", "BST1" },
+ { "RECMIXL", "OUT MIXL Switch", "OUT MIXL" },
+
+ { "RECMIXR", "BST3 Switch", "BST3" },
+ { "RECMIXR", "BST2 Switch", "BST2" },
+ { "RECMIXR", "BST1 Switch", "BST1" },
+ { "RECMIXR", "OUT MIXR Switch", "OUT MIXR" },
+
+ { "ADC L", NULL, "RECMIXL" },
+ { "ADC L", NULL, "ADC L power" },
+ { "ADC L", NULL, "ADC clock" },
+ { "ADC R", NULL, "RECMIXR" },
+ { "ADC R", NULL, "ADC R power" },
+ { "ADC R", NULL, "ADC clock" },
+
+ {"DMIC L1", NULL, "DMIC CLK"},
+ {"DMIC L1", NULL, "DMIC Power"},
+ {"DMIC R1", NULL, "DMIC CLK"},
+ {"DMIC R1", NULL, "DMIC Power"},
+
+ { "Sto1 ADC MIXL", "ADC1 Switch", "ADC L" },
+ { "Sto1 ADC MIXL", "ADC2 Switch", "DMIC L1" },
+ { "Sto1 ADC MIXR", "ADC1 Switch", "ADC R" },
+ { "Sto1 ADC MIXR", "ADC2 Switch", "DMIC R1" },
+
+ { "Stereo1 ADC MIXL", NULL, "Sto1 ADC MIXL" },
+ { "Stereo1 ADC MIXL", NULL, "adc stereo1 filter" },
+ { "adc stereo1 filter", NULL, "PLL1", rt5660_is_sys_clk_from_pll },
+
+ { "Stereo1 ADC MIXR", NULL, "Sto1 ADC MIXR" },
+ { "Stereo1 ADC MIXR", NULL, "adc stereo1 filter" },
+ { "adc stereo1 filter", NULL, "PLL1", rt5660_is_sys_clk_from_pll },
+
+ { "IF1 ADC", NULL, "Stereo1 ADC MIXL" },
+ { "IF1 ADC", NULL, "Stereo1 ADC MIXR" },
+ { "IF1 ADC", NULL, "I2S1" },
+
+ { "IF1 ADC Swap Mux", "L/R", "IF1 ADC" },
+ { "IF1 ADC Swap Mux", "R/L", "IF1 ADC" },
+ { "IF1 ADC Swap Mux", "L/L", "IF1 ADC" },
+ { "IF1 ADC Swap Mux", "R/R", "IF1 ADC" },
+ { "AIF1TX", NULL, "IF1 ADC Swap Mux" },
+
+ { "IF1 DAC", NULL, "AIF1RX" },
+ { "IF1 DAC", NULL, "I2S1" },
+
+ { "IF1 DAC Swap Mux", "L/R", "IF1 DAC" },
+ { "IF1 DAC Swap Mux", "R/L", "IF1 DAC" },
+ { "IF1 DAC Swap Mux", "L/L", "IF1 DAC" },
+ { "IF1 DAC Swap Mux", "R/R", "IF1 DAC" },
+
+ { "IF1 DAC L", NULL, "IF1 DAC Swap Mux" },
+ { "IF1 DAC R", NULL, "IF1 DAC Swap Mux" },
+
+ { "DAC1 MIXL", "Stereo ADC Switch", "Stereo1 ADC MIXL" },
+ { "DAC1 MIXL", "DAC1 Switch", "IF1 DAC L" },
+ { "DAC1 MIXR", "Stereo ADC Switch", "Stereo1 ADC MIXR" },
+ { "DAC1 MIXR", "DAC1 Switch", "IF1 DAC R" },
+
+ { "Stereo DAC MIXL", "DAC L1 Switch", "DAC1 MIXL" },
+ { "Stereo DAC MIXL", "DAC R1 Switch", "DAC1 MIXR" },
+ { "Stereo DAC MIXL", NULL, "dac stereo1 filter" },
+ { "dac stereo1 filter", NULL, "PLL1", rt5660_is_sys_clk_from_pll },
+ { "Stereo DAC MIXR", "DAC R1 Switch", "DAC1 MIXR" },
+ { "Stereo DAC MIXR", "DAC L1 Switch", "DAC1 MIXL" },
+ { "Stereo DAC MIXR", NULL, "dac stereo1 filter" },
+ { "dac stereo1 filter", NULL, "PLL1", rt5660_is_sys_clk_from_pll },
+
+ { "DAC L1", NULL, "Stereo DAC MIXL" },
+ { "DAC R1", NULL, "Stereo DAC MIXR" },
+
+ { "SPK MIX", "BST3 Switch", "BST3" },
+ { "SPK MIX", "BST1 Switch", "BST1" },
+ { "SPK MIX", "DACL Switch", "DAC L1" },
+ { "SPK MIX", "DACR Switch", "DAC R1" },
+ { "SPK MIX", "OUTMIXL Switch", "OUT MIXL" },
+
+ { "OUT MIXL", "BST3 Switch", "BST3" },
+ { "OUT MIXL", "BST2 Switch", "BST2" },
+ { "OUT MIXL", "BST1 Switch", "BST1" },
+ { "OUT MIXL", "RECMIXL Switch", "RECMIXL" },
+ { "OUT MIXL", "DACR Switch", "DAC R1" },
+ { "OUT MIXL", "DACL Switch", "DAC L1" },
+
+ { "OUT MIXR", "BST2 Switch", "BST2" },
+ { "OUT MIXR", "BST1 Switch", "BST1" },
+ { "OUT MIXR", "RECMIXR Switch", "RECMIXR" },
+ { "OUT MIXR", "DACR Switch", "DAC R1" },
+ { "OUT MIXR", "DACL Switch", "DAC L1" },
+
+ { "SPO MIX", "DACR Switch", "DAC R1" },
+ { "SPO MIX", "DACL Switch", "DAC L1" },
+ { "SPO MIX", "SPKVOL Switch", "SPKVOL" },
+ { "SPO MIX", "BST1 Switch", "BST1" },
+
+ { "SPKVOL", "Switch", "SPK MIX" },
+ { "LOUTVOL L", "Switch", "OUT MIXL" },
+ { "LOUTVOL R", "Switch", "OUT MIXR" },
+
+ { "LOUTVOL", NULL, "LOUTVOL L" },
+ { "LOUTVOL", NULL, "LOUTVOL R" },
+
+ { "DAC 1", NULL, "DAC L1" },
+ { "DAC 1", NULL, "DAC R1" },
+
+ { "LOUT MIX", "DAC Switch", "DAC 1" },
+ { "LOUT MIX", "OUTMIX Switch", "LOUTVOL" },
+
+ { "LOUT amp", NULL, "LOUT MIX" },
+ { "LOUT amp", NULL, "VREF HP" },
+ { "LOUTL", NULL, "LOUT amp" },
+ { "LOUTR", NULL, "LOUT amp" },
+
+ { "SPK amp", NULL, "SPO MIX" },
+ { "SPO", NULL, "SPK amp" },
+};
+
+static int rt5660_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5660_priv *rt5660 = snd_soc_codec_get_drvdata(codec);
+ unsigned int val_len = 0, val_clk, mask_clk;
+ int pre_div, bclk_ms, frame_size;
+
+ rt5660->lrck[dai->id] = params_rate(params);
+ pre_div = rl6231_get_clk_info(rt5660->sysclk, rt5660->lrck[dai->id]);
+ if (pre_div < 0) {
+ dev_err(codec->dev, "Unsupported clock setting %d for DAI %d\n",
+ rt5660->lrck[dai->id], dai->id);
+ return -EINVAL;
+ }
+
+ frame_size = snd_soc_params_to_frame_size(params);
+ if (frame_size < 0) {
+ dev_err(codec->dev, "Unsupported frame size: %d\n", frame_size);
+ return frame_size;
+ }
+
+ if (frame_size > 32)
+ bclk_ms = 1;
+ else
+ bclk_ms = 0;
+
+ rt5660->bclk[dai->id] = rt5660->lrck[dai->id] * (32 << bclk_ms);
+
+ dev_dbg(dai->dev, "bclk is %dHz and lrck is %dHz\n",
+ rt5660->bclk[dai->id], rt5660->lrck[dai->id]);
+ dev_dbg(dai->dev, "bclk_ms is %d and pre_div is %d for iis %d\n",
+ bclk_ms, pre_div, dai->id);
+
+ switch (params_width(params)) {
+ case 16:
+ break;
+ case 20:
+ val_len |= RT5660_I2S_DL_20;
+ break;
+ case 24:
+ val_len |= RT5660_I2S_DL_24;
+ break;
+ case 8:
+ val_len |= RT5660_I2S_DL_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dai->id) {
+ case RT5660_AIF1:
+ mask_clk = RT5660_I2S_BCLK_MS1_MASK | RT5660_I2S_PD1_MASK;
+ val_clk = bclk_ms << RT5660_I2S_BCLK_MS1_SFT |
+ pre_div << RT5660_I2S_PD1_SFT;
+ snd_soc_update_bits(codec, RT5660_I2S1_SDP, RT5660_I2S_DL_MASK,
+ val_len);
+ snd_soc_update_bits(codec, RT5660_ADDA_CLK1, mask_clk, val_clk);
+ break;
+
+ default:
+ dev_err(codec->dev, "Invalid dai->id: %d\n", dai->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rt5660_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5660_priv *rt5660 = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg_val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ rt5660->master[dai->id] = 1;
+ break;
+
+ case SND_SOC_DAIFMT_CBS_CFS:
+ reg_val |= RT5660_I2S_MS_S;
+ rt5660->master[dai->id] = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+
+ case SND_SOC_DAIFMT_IB_NF:
+ reg_val |= RT5660_I2S_BP_INV;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+ reg_val |= RT5660_I2S_DF_LEFT;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_A:
+ reg_val |= RT5660_I2S_DF_PCM_A;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_B:
+ reg_val |= RT5660_I2S_DF_PCM_B;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (dai->id) {
+ case RT5660_AIF1:
+ snd_soc_update_bits(codec, RT5660_I2S1_SDP,
+ RT5660_I2S_MS_MASK | RT5660_I2S_BP_MASK |
+ RT5660_I2S_DF_MASK, reg_val);
+ break;
+
+ default:
+ dev_err(codec->dev, "Invalid dai->id: %d\n", dai->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rt5660_set_dai_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5660_priv *rt5660 = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg_val = 0;
+
+ if (freq == rt5660->sysclk && clk_id == rt5660->sysclk_src)
+ return 0;
+
+ switch (clk_id) {
+ case RT5660_SCLK_S_MCLK:
+ reg_val |= RT5660_SCLK_SRC_MCLK;
+ break;
+
+ case RT5660_SCLK_S_PLL1:
+ reg_val |= RT5660_SCLK_SRC_PLL1;
+ break;
+
+ case RT5660_SCLK_S_RCCLK:
+ reg_val |= RT5660_SCLK_SRC_RCCLK;
+ break;
+
+ default:
+ dev_err(codec->dev, "Invalid clock id (%d)\n", clk_id);
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, RT5660_GLB_CLK, RT5660_SCLK_SRC_MASK,
+ reg_val);
+
+ rt5660->sysclk = freq;
+ rt5660->sysclk_src = clk_id;
+
+ dev_dbg(dai->dev, "Sysclk is %dHz and clock id is %d\n", freq, clk_id);
+
+ return 0;
+}
+
+static int rt5660_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
+ unsigned int freq_in, unsigned int freq_out)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5660_priv *rt5660 = snd_soc_codec_get_drvdata(codec);
+ struct rl6231_pll_code pll_code;
+ int ret;
+
+ if (source == rt5660->pll_src && freq_in == rt5660->pll_in &&
+ freq_out == rt5660->pll_out)
+ return 0;
+
+ if (!freq_in || !freq_out) {
+ dev_dbg(codec->dev, "PLL disabled\n");
+
+ rt5660->pll_in = 0;
+ rt5660->pll_out = 0;
+ snd_soc_update_bits(codec, RT5660_GLB_CLK,
+ RT5660_SCLK_SRC_MASK, RT5660_SCLK_SRC_MCLK);
+ return 0;
+ }
+
+ switch (source) {
+ case RT5660_PLL1_S_MCLK:
+ snd_soc_update_bits(codec, RT5660_GLB_CLK,
+ RT5660_PLL1_SRC_MASK, RT5660_PLL1_SRC_MCLK);
+ break;
+
+ case RT5660_PLL1_S_BCLK:
+ snd_soc_update_bits(codec, RT5660_GLB_CLK,
+ RT5660_PLL1_SRC_MASK, RT5660_PLL1_SRC_BCLK1);
+ break;
+
+ default:
+ dev_err(codec->dev, "Unknown PLL source %d\n", source);
+ return -EINVAL;
+ }
+
+ ret = rl6231_pll_calc(freq_in, freq_out, &pll_code);
+ if (ret < 0) {
+ dev_err(codec->dev, "Unsupport input clock %d\n", freq_in);
+ return ret;
+ }
+
+ dev_dbg(codec->dev, "bypass=%d m=%d n=%d k=%d\n",
+ pll_code.m_bp, (pll_code.m_bp ? 0 : pll_code.m_code),
+ pll_code.n_code, pll_code.k_code);
+
+ snd_soc_write(codec, RT5660_PLL_CTRL1,
+ pll_code.n_code << RT5660_PLL_N_SFT | pll_code.k_code);
+ snd_soc_write(codec, RT5660_PLL_CTRL2,
+ (pll_code.m_bp ? 0 : pll_code.m_code) << RT5660_PLL_M_SFT |
+ pll_code.m_bp << RT5660_PLL_M_BP_SFT);
+
+ rt5660->pll_in = freq_in;
+ rt5660->pll_out = freq_out;
+ rt5660->pll_src = source;
+
+ return 0;
+}
+
+static int rt5660_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct rt5660_priv *rt5660 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ snd_soc_update_bits(codec, RT5660_GEN_CTRL1,
+ RT5660_DIG_GATE_CTRL, RT5660_DIG_GATE_CTRL);
+
+ if (IS_ERR(rt5660->mclk))
+ break;
+
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
+ clk_disable_unprepare(rt5660->mclk);
+ } else {
+ ret = clk_prepare_enable(rt5660->mclk);
+ if (ret)
+ return ret;
+ }
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
+ snd_soc_update_bits(codec, RT5660_PWR_ANLG1,
+ RT5660_PWR_VREF1 | RT5660_PWR_MB |
+ RT5660_PWR_BG | RT5660_PWR_VREF2,
+ RT5660_PWR_VREF1 | RT5660_PWR_MB |
+ RT5660_PWR_BG | RT5660_PWR_VREF2);
+ usleep_range(10000, 15000);
+ snd_soc_update_bits(codec, RT5660_PWR_ANLG1,
+ RT5660_PWR_FV1 | RT5660_PWR_FV2,
+ RT5660_PWR_FV1 | RT5660_PWR_FV2);
+ }
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ snd_soc_update_bits(codec, RT5660_GEN_CTRL1,
+ RT5660_DIG_GATE_CTRL, 0);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int rt5660_probe(struct snd_soc_codec *codec)
+{
+ struct rt5660_priv *rt5660 = snd_soc_codec_get_drvdata(codec);
+
+ rt5660->codec = codec;
+
+ return 0;
+}
+
+static int rt5660_remove(struct snd_soc_codec *codec)
+{
+ return snd_soc_write(codec, RT5660_RESET, 0);
+}
+
+#ifdef CONFIG_PM
+static int rt5660_suspend(struct snd_soc_codec *codec)
+{
+ struct rt5660_priv *rt5660 = snd_soc_codec_get_drvdata(codec);
+
+ regcache_cache_only(rt5660->regmap, true);
+ regcache_mark_dirty(rt5660->regmap);
+
+ return 0;
+}
+
+static int rt5660_resume(struct snd_soc_codec *codec)
+{
+ struct rt5660_priv *rt5660 = snd_soc_codec_get_drvdata(codec);
+
+ if (rt5660->pdata.poweroff_codec_in_suspend)
+ usleep_range(350000, 400000);
+
+ regcache_cache_only(rt5660->regmap, false);
+ regcache_sync(rt5660->regmap);
+
+ return 0;
+}
+#else
+#define rt5660_suspend NULL
+#define rt5660_resume NULL
+#endif
+
+#define RT5660_STEREO_RATES SNDRV_PCM_RATE_8000_192000
+#define RT5660_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
+
+static const struct snd_soc_dai_ops rt5660_aif_dai_ops = {
+ .hw_params = rt5660_hw_params,
+ .set_fmt = rt5660_set_dai_fmt,
+ .set_sysclk = rt5660_set_dai_sysclk,
+ .set_pll = rt5660_set_dai_pll,
+};
+
+static struct snd_soc_dai_driver rt5660_dai[] = {
+ {
+ .name = "rt5660-aif1",
+ .id = RT5660_AIF1,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5660_STEREO_RATES,
+ .formats = RT5660_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5660_STEREO_RATES,
+ .formats = RT5660_FORMATS,
+ },
+ .ops = &rt5660_aif_dai_ops,
+ },
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_rt5660 = {
+ .probe = rt5660_probe,
+ .remove = rt5660_remove,
+ .suspend = rt5660_suspend,
+ .resume = rt5660_resume,
+ .set_bias_level = rt5660_set_bias_level,
+ .idle_bias_off = true,
+ .component_driver = {
+ .controls = rt5660_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5660_snd_controls),
+ .dapm_widgets = rt5660_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5660_dapm_widgets),
+ .dapm_routes = rt5660_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5660_dapm_routes),
+ },
+};
+
+static const struct regmap_config rt5660_regmap = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .use_single_rw = true,
+
+ .max_register = RT5660_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5660_ranges) *
+ RT5660_PR_SPACING),
+ .volatile_reg = rt5660_volatile_register,
+ .readable_reg = rt5660_readable_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = rt5660_reg,
+ .num_reg_defaults = ARRAY_SIZE(rt5660_reg),
+ .ranges = rt5660_ranges,
+ .num_ranges = ARRAY_SIZE(rt5660_ranges),
+};
+
+static const struct i2c_device_id rt5660_i2c_id[] = {
+ { "rt5660", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rt5660_i2c_id);
+
+static const struct of_device_id rt5660_of_match[] = {
+ { .compatible = "realtek,rt5660", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt5660_of_match);
+
+static const struct acpi_device_id rt5660_acpi_match[] = {
+ { "10EC5660", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, rt5660_acpi_match);
+
+static int rt5660_parse_dt(struct rt5660_priv *rt5660, struct device *dev)
+{
+ rt5660->pdata.in1_diff = device_property_read_bool(dev,
+ "realtek,in1-differential");
+ rt5660->pdata.in3_diff = device_property_read_bool(dev,
+ "realtek,in3-differential");
+ rt5660->pdata.poweroff_codec_in_suspend = device_property_read_bool(dev,
+ "realtek,poweroff-in-suspend");
+ device_property_read_u32(dev, "realtek,dmic1-data-pin",
+ &rt5660->pdata.dmic1_data_pin);
+
+ return 0;
+}
+
+static int rt5660_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct rt5660_platform_data *pdata = dev_get_platdata(&i2c->dev);
+ struct rt5660_priv *rt5660;
+ int ret;
+ unsigned int val;
+
+ rt5660 = devm_kzalloc(&i2c->dev, sizeof(struct rt5660_priv),
+ GFP_KERNEL);
+
+ if (rt5660 == NULL)
+ return -ENOMEM;
+
+ /* Check if MCLK provided */
+ rt5660->mclk = devm_clk_get(&i2c->dev, "mclk");
+ if (PTR_ERR(rt5660->mclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ i2c_set_clientdata(i2c, rt5660);
+
+ if (pdata)
+ rt5660->pdata = *pdata;
+ else if (i2c->dev.of_node)
+ rt5660_parse_dt(rt5660, &i2c->dev);
+
+ rt5660->regmap = devm_regmap_init_i2c(i2c, &rt5660_regmap);
+ if (IS_ERR(rt5660->regmap)) {
+ ret = PTR_ERR(rt5660->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ regmap_read(rt5660->regmap, RT5660_VENDOR_ID2, &val);
+ if (val != RT5660_DEVICE_ID) {
+ dev_err(&i2c->dev,
+ "Device with ID register %#x is not rt5660\n", val);
+ return -ENODEV;
+ }
+
+ regmap_write(rt5660->regmap, RT5660_RESET, 0);
+
+ ret = regmap_register_patch(rt5660->regmap, rt5660_patch,
+ ARRAY_SIZE(rt5660_patch));
+ if (ret != 0)
+ dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
+
+ if (rt5660->pdata.dmic1_data_pin) {
+ regmap_update_bits(rt5660->regmap, RT5660_GPIO_CTRL1,
+ RT5660_GP1_PIN_MASK, RT5660_GP1_PIN_DMIC1_SCL);
+
+ if (rt5660->pdata.dmic1_data_pin == RT5660_DMIC1_DATA_GPIO2)
+ regmap_update_bits(rt5660->regmap, RT5660_DMIC_CTRL1,
+ RT5660_SEL_DMIC_DATA_MASK,
+ RT5660_SEL_DMIC_DATA_GPIO2);
+ else if (rt5660->pdata.dmic1_data_pin == RT5660_DMIC1_DATA_IN1P)
+ regmap_update_bits(rt5660->regmap, RT5660_DMIC_CTRL1,
+ RT5660_SEL_DMIC_DATA_MASK,
+ RT5660_SEL_DMIC_DATA_IN1P);
+ }
+
+ return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5660,
+ rt5660_dai, ARRAY_SIZE(rt5660_dai));
+}
+
+static int rt5660_i2c_remove(struct i2c_client *i2c)
+{
+ snd_soc_unregister_codec(&i2c->dev);
+
+ return 0;
+}
+
+static struct i2c_driver rt5660_i2c_driver = {
+ .driver = {
+ .name = "rt5660",
+ .acpi_match_table = ACPI_PTR(rt5660_acpi_match),
+ .of_match_table = of_match_ptr(rt5660_of_match),
+ },
+ .probe = rt5660_i2c_probe,
+ .remove = rt5660_i2c_remove,
+ .id_table = rt5660_i2c_id,
+};
+module_i2c_driver(rt5660_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC RT5660 driver");
+MODULE_AUTHOR("Oder Chiou <oder_chiou@realtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt5660.h b/sound/soc/codecs/rt5660.h
new file mode 100644
index 000000000000..6cdb9269ec9e
--- /dev/null
+++ b/sound/soc/codecs/rt5660.h
@@ -0,0 +1,847 @@
+/*
+ * rt5660.h -- RT5660 ALSA SoC audio driver
+ *
+ * Copyright 2016 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _RT5660_H
+#define _RT5660_H
+
+#include <linux/clk.h>
+#include <sound/rt5660.h>
+
+/* Info */
+#define RT5660_RESET 0x00
+#define RT5660_VENDOR_ID 0xfd
+#define RT5660_VENDOR_ID1 0xfe
+#define RT5660_VENDOR_ID2 0xff
+/* I/O - Output */
+#define RT5660_SPK_VOL 0x01
+#define RT5660_LOUT_VOL 0x02
+/* I/O - Input */
+#define RT5660_IN1_IN2 0x0d
+#define RT5660_IN3_IN4 0x0e
+/* I/O - ADC/DAC/DMIC */
+#define RT5660_DAC1_DIG_VOL 0x19
+#define RT5660_STO1_ADC_DIG_VOL 0x1c
+#define RT5660_ADC_BST_VOL1 0x1e
+/* Mixer - D-D */
+#define RT5660_STO1_ADC_MIXER 0x27
+#define RT5660_AD_DA_MIXER 0x29
+#define RT5660_STO_DAC_MIXER 0x2a
+#define RT5660_DIG_INF1_DATA 0x2f
+/* Mixer - ADC */
+#define RT5660_REC_L1_MIXER 0x3b
+#define RT5660_REC_L2_MIXER 0x3c
+#define RT5660_REC_R1_MIXER 0x3d
+#define RT5660_REC_R2_MIXER 0x3e
+/* Mixer - DAC */
+#define RT5660_LOUT_MIXER 0x45
+#define RT5660_SPK_MIXER 0x46
+#define RT5660_SPO_MIXER 0x48
+#define RT5660_SPO_CLSD_RATIO 0x4a
+#define RT5660_OUT_L_GAIN1 0x4d
+#define RT5660_OUT_L_GAIN2 0x4e
+#define RT5660_OUT_L1_MIXER 0x4f
+#define RT5660_OUT_R_GAIN1 0x50
+#define RT5660_OUT_R_GAIN2 0x51
+#define RT5660_OUT_R1_MIXER 0x52
+/* Power */
+#define RT5660_PWR_DIG1 0x61
+#define RT5660_PWR_DIG2 0x62
+#define RT5660_PWR_ANLG1 0x63
+#define RT5660_PWR_ANLG2 0x64
+#define RT5660_PWR_MIXER 0x65
+#define RT5660_PWR_VOL 0x66
+/* Private Register Control */
+#define RT5660_PRIV_INDEX 0x6a
+#define RT5660_PRIV_DATA 0x6c
+/* Format - ADC/DAC */
+#define RT5660_I2S1_SDP 0x70
+#define RT5660_ADDA_CLK1 0x73
+#define RT5660_ADDA_CLK2 0x74
+#define RT5660_DMIC_CTRL1 0x75
+/* Function - Analog */
+#define RT5660_GLB_CLK 0x80
+#define RT5660_PLL_CTRL1 0x81
+#define RT5660_PLL_CTRL2 0x82
+#define RT5660_CLSD_AMP_OC_CTRL 0x8c
+#define RT5660_CLSD_AMP_CTRL 0x8d
+#define RT5660_LOUT_AMP_CTRL 0x8e
+#define RT5660_SPK_AMP_SPKVDD 0x92
+#define RT5660_MICBIAS 0x93
+#define RT5660_CLSD_OUT_CTRL1 0xa1
+#define RT5660_CLSD_OUT_CTRL2 0xa2
+#define RT5660_DIPOLE_MIC_CTRL1 0xa3
+#define RT5660_DIPOLE_MIC_CTRL2 0xa4
+#define RT5660_DIPOLE_MIC_CTRL3 0xa5
+#define RT5660_DIPOLE_MIC_CTRL4 0xa6
+#define RT5660_DIPOLE_MIC_CTRL5 0xa7
+#define RT5660_DIPOLE_MIC_CTRL6 0xa8
+#define RT5660_DIPOLE_MIC_CTRL7 0xa9
+#define RT5660_DIPOLE_MIC_CTRL8 0xaa
+#define RT5660_DIPOLE_MIC_CTRL9 0xab
+#define RT5660_DIPOLE_MIC_CTRL10 0xac
+#define RT5660_DIPOLE_MIC_CTRL11 0xad
+#define RT5660_DIPOLE_MIC_CTRL12 0xae
+/* Function - Digital */
+#define RT5660_EQ_CTRL1 0xb0
+#define RT5660_EQ_CTRL2 0xb1
+#define RT5660_DRC_AGC_CTRL1 0xb3
+#define RT5660_DRC_AGC_CTRL2 0xb4
+#define RT5660_DRC_AGC_CTRL3 0xb5
+#define RT5660_DRC_AGC_CTRL4 0xb6
+#define RT5660_DRC_AGC_CTRL5 0xb7
+#define RT5660_JD_CTRL 0xbb
+#define RT5660_IRQ_CTRL1 0xbd
+#define RT5660_IRQ_CTRL2 0xbe
+#define RT5660_INT_IRQ_ST 0xbf
+#define RT5660_GPIO_CTRL1 0xc0
+#define RT5660_GPIO_CTRL2 0xc2
+#define RT5660_WIND_FILTER_CTRL1 0xd3
+#define RT5660_SV_ZCD1 0xd9
+#define RT5660_SV_ZCD2 0xda
+#define RT5660_DRC1_LM_CTRL1 0xe0
+#define RT5660_DRC1_LM_CTRL2 0xe1
+#define RT5660_DRC2_LM_CTRL1 0xe2
+#define RT5660_DRC2_LM_CTRL2 0xe3
+#define RT5660_MULTI_DRC_CTRL 0xe4
+#define RT5660_DRC2_CTRL1 0xe5
+#define RT5660_DRC2_CTRL2 0xe6
+#define RT5660_DRC2_CTRL3 0xe7
+#define RT5660_DRC2_CTRL4 0xe8
+#define RT5660_DRC2_CTRL5 0xe9
+#define RT5660_ALC_PGA_CTRL1 0xea
+#define RT5660_ALC_PGA_CTRL2 0xeb
+#define RT5660_ALC_PGA_CTRL3 0xec
+#define RT5660_ALC_PGA_CTRL4 0xed
+#define RT5660_ALC_PGA_CTRL5 0xee
+#define RT5660_ALC_PGA_CTRL6 0xef
+#define RT5660_ALC_PGA_CTRL7 0xf0
+
+/* General Control */
+#define RT5660_GEN_CTRL1 0xfa
+#define RT5660_GEN_CTRL2 0xfb
+#define RT5660_GEN_CTRL3 0xfc
+
+/* Index of Codec Private Register definition */
+#define RT5660_CHOP_DAC_ADC 0x3d
+
+/* Global Definition */
+#define RT5660_L_MUTE (0x1 << 15)
+#define RT5660_L_MUTE_SFT 15
+#define RT5660_VOL_L_MUTE (0x1 << 14)
+#define RT5660_VOL_L_SFT 14
+#define RT5660_R_MUTE (0x1 << 7)
+#define RT5660_R_MUTE_SFT 7
+#define RT5660_VOL_R_MUTE (0x1 << 6)
+#define RT5660_VOL_R_SFT 6
+#define RT5660_L_VOL_MASK (0x3f << 8)
+#define RT5660_L_VOL_SFT 8
+#define RT5660_R_VOL_MASK (0x3f)
+#define RT5660_R_VOL_SFT 0
+
+/* IN1 and IN2 Control (0x0d) */
+#define RT5660_IN_DF1 (0x1 << 15)
+#define RT5660_IN_SFT1 15
+#define RT5660_BST_MASK1 (0x7f << 8)
+#define RT5660_BST_SFT1 8
+#define RT5660_IN_DF2 (0x1 << 7)
+#define RT5660_IN_SFT2 7
+#define RT5660_BST_MASK2 (0x7f << 0)
+#define RT5660_BST_SFT2 0
+
+/* IN3 and IN4 Control (0x0e) */
+#define RT5660_IN_DF3 (0x1 << 15)
+#define RT5660_IN_SFT3 15
+#define RT5660_BST_MASK3 (0x7f << 8)
+#define RT5660_BST_SFT3 8
+#define RT5660_IN_DF4 (0x1 << 7)
+#define RT5660_IN_SFT4 7
+#define RT5660_BST_MASK4 (0x7f << 0)
+#define RT5660_BST_SFT4 0
+
+/* DAC1 Digital Volume (0x19) */
+#define RT5660_DAC_L1_VOL_MASK (0x7f << 9)
+#define RT5660_DAC_L1_VOL_SFT 9
+#define RT5660_DAC_R1_VOL_MASK (0x7f << 1)
+#define RT5660_DAC_R1_VOL_SFT 1
+
+/* ADC Digital Volume Control (0x1c) */
+#define RT5660_ADC_L_VOL_MASK (0x3f << 9)
+#define RT5660_ADC_L_VOL_SFT 9
+#define RT5660_ADC_R_VOL_MASK (0x3f << 1)
+#define RT5660_ADC_R_VOL_SFT 1
+
+/* ADC Boost Volume Control (0x1e) */
+#define RT5660_STO1_ADC_L_BST_MASK (0x3 << 14)
+#define RT5660_STO1_ADC_L_BST_SFT 14
+#define RT5660_STO1_ADC_R_BST_MASK (0x3 << 12)
+#define RT5660_STO1_ADC_R_BST_SFT 12
+
+/* Stereo ADC Mixer Control (0x27) */
+#define RT5660_M_ADC_L1 (0x1 << 14)
+#define RT5660_M_ADC_L1_SFT 14
+#define RT5660_M_ADC_L2 (0x1 << 13)
+#define RT5660_M_ADC_L2_SFT 13
+#define RT5660_M_ADC_R1 (0x1 << 6)
+#define RT5660_M_ADC_R1_SFT 6
+#define RT5660_M_ADC_R2 (0x1 << 5)
+#define RT5660_M_ADC_R2_SFT 5
+
+/* ADC Mixer to DAC Mixer Control (0x29) */
+#define RT5660_M_ADCMIX_L (0x1 << 15)
+#define RT5660_M_ADCMIX_L_SFT 15
+#define RT5660_M_DAC1_L (0x1 << 14)
+#define RT5660_M_DAC1_L_SFT 14
+#define RT5660_M_ADCMIX_R (0x1 << 7)
+#define RT5660_M_ADCMIX_R_SFT 7
+#define RT5660_M_DAC1_R (0x1 << 6)
+#define RT5660_M_DAC1_R_SFT 6
+
+/* Stereo DAC Mixer Control (0x2a) */
+#define RT5660_M_DAC_L1 (0x1 << 14)
+#define RT5660_M_DAC_L1_SFT 14
+#define RT5660_DAC_L1_STO_L_VOL_MASK (0x1 << 13)
+#define RT5660_DAC_L1_STO_L_VOL_SFT 13
+#define RT5660_M_DAC_R1_STO_L (0x1 << 9)
+#define RT5660_M_DAC_R1_STO_L_SFT 9
+#define RT5660_DAC_R1_STO_L_VOL_MASK (0x1 << 8)
+#define RT5660_DAC_R1_STO_L_VOL_SFT 8
+#define RT5660_M_DAC_R1 (0x1 << 6)
+#define RT5660_M_DAC_R1_SFT 6
+#define RT5660_DAC_R1_STO_R_VOL_MASK (0x1 << 5)
+#define RT5660_DAC_R1_STO_R_VOL_SFT 5
+#define RT5660_M_DAC_L1_STO_R (0x1 << 1)
+#define RT5660_M_DAC_L1_STO_R_SFT 1
+#define RT5660_DAC_L1_STO_R_VOL_MASK (0x1)
+#define RT5660_DAC_L1_STO_R_VOL_SFT 0
+
+/* Digital Interface Data Control (0x2f) */
+#define RT5660_IF1_DAC_IN_SEL (0x3 << 14)
+#define RT5660_IF1_DAC_IN_SFT 14
+#define RT5660_IF1_ADC_IN_SEL (0x3 << 12)
+#define RT5660_IF1_ADC_IN_SFT 12
+
+/* REC Left Mixer Control 1 (0x3b) */
+#define RT5660_G_BST3_RM_L_MASK (0x7 << 4)
+#define RT5660_G_BST3_RM_L_SFT 4
+#define RT5660_G_BST2_RM_L_MASK (0x7 << 1)
+#define RT5660_G_BST2_RM_L_SFT 1
+
+/* REC Left Mixer Control 2 (0x3c) */
+#define RT5660_G_BST1_RM_L_MASK (0x7 << 13)
+#define RT5660_G_BST1_RM_L_SFT 13
+#define RT5660_G_OM_L_RM_L_MASK (0x7 << 10)
+#define RT5660_G_OM_L_RM_L_SFT 10
+#define RT5660_M_BST3_RM_L (0x1 << 3)
+#define RT5660_M_BST3_RM_L_SFT 3
+#define RT5660_M_BST2_RM_L (0x1 << 2)
+#define RT5660_M_BST2_RM_L_SFT 2
+#define RT5660_M_BST1_RM_L (0x1 << 1)
+#define RT5660_M_BST1_RM_L_SFT 1
+#define RT5660_M_OM_L_RM_L (0x1)
+#define RT5660_M_OM_L_RM_L_SFT 0
+
+/* REC Right Mixer Control 1 (0x3d) */
+#define RT5660_G_BST3_RM_R_MASK (0x7 << 4)
+#define RT5660_G_BST3_RM_R_SFT 4
+#define RT5660_G_BST2_RM_R_MASK (0x7 << 1)
+#define RT5660_G_BST2_RM_R_SFT 1
+
+/* REC Right Mixer Control 2 (0x3e) */
+#define RT5660_G_BST1_RM_R_MASK (0x7 << 13)
+#define RT5660_G_BST1_RM_R_SFT 13
+#define RT5660_G_OM_R_RM_R_MASK (0x7 << 10)
+#define RT5660_G_OM_R_RM_R_SFT 10
+#define RT5660_M_BST3_RM_R (0x1 << 3)
+#define RT5660_M_BST3_RM_R_SFT 3
+#define RT5660_M_BST2_RM_R (0x1 << 2)
+#define RT5660_M_BST2_RM_R_SFT 2
+#define RT5660_M_BST1_RM_R (0x1 << 1)
+#define RT5660_M_BST1_RM_R_SFT 1
+#define RT5660_M_OM_R_RM_R (0x1)
+#define RT5660_M_OM_R_RM_R_SFT 0
+
+/* LOUTMIX Control (0x45) */
+#define RT5660_M_DAC1_LM (0x1 << 14)
+#define RT5660_M_DAC1_LM_SFT 14
+#define RT5660_M_LOVOL_M (0x1 << 13)
+#define RT5660_M_LOVOL_LM_SFT 13
+
+/* SPK Mixer Control (0x46) */
+#define RT5660_G_BST3_SM_MASK (0x3 << 14)
+#define RT5660_G_BST3_SM_SFT 14
+#define RT5660_G_BST1_SM_MASK (0x3 << 12)
+#define RT5660_G_BST1_SM_SFT 12
+#define RT5660_G_DACl_SM_MASK (0x3 << 10)
+#define RT5660_G_DACl_SM_SFT 10
+#define RT5660_G_DACR_SM_MASK (0x3 << 8)
+#define RT5660_G_DACR_SM_SFT 8
+#define RT5660_G_OM_L_SM_MASK (0x3 << 6)
+#define RT5660_G_OM_L_SM_SFT 6
+#define RT5660_M_DACR_SM (0x1 << 5)
+#define RT5660_M_DACR_SM_SFT 5
+#define RT5660_M_BST1_SM (0x1 << 4)
+#define RT5660_M_BST1_SM_SFT 4
+#define RT5660_M_BST3_SM (0x1 << 3)
+#define RT5660_M_BST3_SM_SFT 3
+#define RT5660_M_DACL_SM (0x1 << 2)
+#define RT5660_M_DACL_SM_SFT 2
+#define RT5660_M_OM_L_SM (0x1 << 1)
+#define RT5660_M_OM_L_SM_SFT 1
+
+/* SPOMIX Control (0x48) */
+#define RT5660_M_DAC_R_SPM (0x1 << 14)
+#define RT5660_M_DAC_R_SPM_SFT 14
+#define RT5660_M_DAC_L_SPM (0x1 << 13)
+#define RT5660_M_DAC_L_SPM_SFT 13
+#define RT5660_M_SV_SPM (0x1 << 12)
+#define RT5660_M_SV_SPM_SFT 12
+#define RT5660_M_BST1_SPM (0x1 << 11)
+#define RT5660_M_BST1_SPM_SFT 11
+
+/* Output Left Mixer Control 1 (0x4d) */
+#define RT5660_G_BST3_OM_L_MASK (0x7 << 13)
+#define RT5660_G_BST3_OM_L_SFT 13
+#define RT5660_G_BST2_OM_L_MASK (0x7 << 10)
+#define RT5660_G_BST2_OM_L_SFT 10
+#define RT5660_G_BST1_OM_L_MASK (0x7 << 7)
+#define RT5660_G_BST1_OM_L_SFT 7
+#define RT5660_G_RM_L_OM_L_MASK (0x7 << 1)
+#define RT5660_G_RM_L_OM_L_SFT 1
+
+/* Output Left Mixer Control 2 (0x4e) */
+#define RT5660_G_DAC_R1_OM_L_MASK (0x7 << 10)
+#define RT5660_G_DAC_R1_OM_L_SFT 10
+#define RT5660_G_DAC_L1_OM_L_MASK (0x7 << 7)
+#define RT5660_G_DAC_L1_OM_L_SFT 7
+
+/* Output Left Mixer Control 3 (0x4f) */
+#define RT5660_M_BST3_OM_L (0x1 << 5)
+#define RT5660_M_BST3_OM_L_SFT 5
+#define RT5660_M_BST2_OM_L (0x1 << 4)
+#define RT5660_M_BST2_OM_L_SFT 4
+#define RT5660_M_BST1_OM_L (0x1 << 3)
+#define RT5660_M_BST1_OM_L_SFT 3
+#define RT5660_M_RM_L_OM_L (0x1 << 2)
+#define RT5660_M_RM_L_OM_L_SFT 2
+#define RT5660_M_DAC_R_OM_L (0x1 << 1)
+#define RT5660_M_DAC_R_OM_L_SFT 1
+#define RT5660_M_DAC_L_OM_L (0x1)
+#define RT5660_M_DAC_L_OM_L_SFT 0
+
+/* Output Right Mixer Control 1 (0x50) */
+#define RT5660_G_BST2_OM_R_MASK (0x7 << 10)
+#define RT5660_G_BST2_OM_R_SFT 10
+#define RT5660_G_BST1_OM_R_MASK (0x7 << 7)
+#define RT5660_G_BST1_OM_R_SFT 7
+#define RT5660_G_RM_R_OM_R_MASK (0x7 << 1)
+#define RT5660_G_RM_R_OM_R_SFT 1
+
+/* Output Right Mixer Control 2 (0x51) */
+#define RT5660_G_DAC_L_OM_R_MASK (0x7 << 10)
+#define RT5660_G_DAC_L_OM_R_SFT 10
+#define RT5660_G_DAC_R_OM_R_MASK (0x7 << 7)
+#define RT5660_G_DAC_R_OM_R_SFT 7
+
+/* Output Right Mixer Control 3 (0x52) */
+#define RT5660_M_BST2_OM_R (0x1 << 4)
+#define RT5660_M_BST2_OM_R_SFT 4
+#define RT5660_M_BST1_OM_R (0x1 << 3)
+#define RT5660_M_BST1_OM_R_SFT 3
+#define RT5660_M_RM_R_OM_R (0x1 << 2)
+#define RT5660_M_RM_R_OM_R_SFT 2
+#define RT5660_M_DAC_L_OM_R (0x1 << 1)
+#define RT5660_M_DAC_L_OM_R_SFT 1
+#define RT5660_M_DAC_R_OM_R (0x1)
+#define RT5660_M_DAC_R_OM_R_SFT 0
+
+/* Power Management for Digital 1 (0x61) */
+#define RT5660_PWR_I2S1 (0x1 << 15)
+#define RT5660_PWR_I2S1_BIT 15
+#define RT5660_PWR_DAC_L1 (0x1 << 12)
+#define RT5660_PWR_DAC_L1_BIT 12
+#define RT5660_PWR_DAC_R1 (0x1 << 11)
+#define RT5660_PWR_DAC_R1_BIT 11
+#define RT5660_PWR_ADC_L (0x1 << 2)
+#define RT5660_PWR_ADC_L_BIT 2
+#define RT5660_PWR_ADC_R (0x1 << 1)
+#define RT5660_PWR_ADC_R_BIT 1
+#define RT5660_PWR_CLS_D (0x1)
+#define RT5660_PWR_CLS_D_BIT 0
+
+/* Power Management for Digital 2 (0x62) */
+#define RT5660_PWR_ADC_S1F (0x1 << 15)
+#define RT5660_PWR_ADC_S1F_BIT 15
+#define RT5660_PWR_DAC_S1F (0x1 << 11)
+#define RT5660_PWR_DAC_S1F_BIT 11
+
+/* Power Management for Analog 1 (0x63) */
+#define RT5660_PWR_VREF1 (0x1 << 15)
+#define RT5660_PWR_VREF1_BIT 15
+#define RT5660_PWR_FV1 (0x1 << 14)
+#define RT5660_PWR_FV1_BIT 14
+#define RT5660_PWR_MB (0x1 << 13)
+#define RT5660_PWR_MB_BIT 13
+#define RT5660_PWR_BG (0x1 << 11)
+#define RT5660_PWR_BG_BIT 11
+#define RT5660_PWR_HP_L (0x1 << 7)
+#define RT5660_PWR_HP_L_BIT 7
+#define RT5660_PWR_HP_R (0x1 << 6)
+#define RT5660_PWR_HP_R_BIT 6
+#define RT5660_PWR_HA (0x1 << 5)
+#define RT5660_PWR_HA_BIT 5
+#define RT5660_PWR_VREF2 (0x1 << 4)
+#define RT5660_PWR_VREF2_BIT 4
+#define RT5660_PWR_FV2 (0x1 << 3)
+#define RT5660_PWR_FV2_BIT 3
+#define RT5660_PWR_LDO2 (0x1 << 2)
+#define RT5660_PWR_LDO2_BIT 2
+
+/* Power Management for Analog 2 (0x64) */
+#define RT5660_PWR_BST1 (0x1 << 15)
+#define RT5660_PWR_BST1_BIT 15
+#define RT5660_PWR_BST2 (0x1 << 14)
+#define RT5660_PWR_BST2_BIT 14
+#define RT5660_PWR_BST3 (0x1 << 13)
+#define RT5660_PWR_BST3_BIT 13
+#define RT5660_PWR_MB1 (0x1 << 11)
+#define RT5660_PWR_MB1_BIT 11
+#define RT5660_PWR_MB2 (0x1 << 10)
+#define RT5660_PWR_MB2_BIT 10
+#define RT5660_PWR_PLL (0x1 << 9)
+#define RT5660_PWR_PLL_BIT 9
+
+/* Power Management for Mixer (0x65) */
+#define RT5660_PWR_OM_L (0x1 << 15)
+#define RT5660_PWR_OM_L_BIT 15
+#define RT5660_PWR_OM_R (0x1 << 14)
+#define RT5660_PWR_OM_R_BIT 14
+#define RT5660_PWR_SM (0x1 << 13)
+#define RT5660_PWR_SM_BIT 13
+#define RT5660_PWR_RM_L (0x1 << 11)
+#define RT5660_PWR_RM_L_BIT 11
+#define RT5660_PWR_RM_R (0x1 << 10)
+#define RT5660_PWR_RM_R_BIT 10
+
+/* Power Management for Volume (0x66) */
+#define RT5660_PWR_SV (0x1 << 15)
+#define RT5660_PWR_SV_BIT 15
+#define RT5660_PWR_LV_L (0x1 << 11)
+#define RT5660_PWR_LV_L_BIT 11
+#define RT5660_PWR_LV_R (0x1 << 10)
+#define RT5660_PWR_LV_R_BIT 10
+
+/* I2S1 Audio Serial Data Port Control (0x70) */
+#define RT5660_I2S_MS_MASK (0x1 << 15)
+#define RT5660_I2S_MS_SFT 15
+#define RT5660_I2S_MS_M (0x0 << 15)
+#define RT5660_I2S_MS_S (0x1 << 15)
+#define RT5660_I2S_O_CP_MASK (0x3 << 10)
+#define RT5660_I2S_O_CP_SFT 10
+#define RT5660_I2S_O_CP_OFF (0x0 << 10)
+#define RT5660_I2S_O_CP_U_LAW (0x1 << 10)
+#define RT5660_I2S_O_CP_A_LAW (0x2 << 10)
+#define RT5660_I2S_I_CP_MASK (0x3 << 8)
+#define RT5660_I2S_I_CP_SFT 8
+#define RT5660_I2S_I_CP_OFF (0x0 << 8)
+#define RT5660_I2S_I_CP_U_LAW (0x1 << 8)
+#define RT5660_I2S_I_CP_A_LAW (0x2 << 8)
+#define RT5660_I2S_BP_MASK (0x1 << 7)
+#define RT5660_I2S_BP_SFT 7
+#define RT5660_I2S_BP_NOR (0x0 << 7)
+#define RT5660_I2S_BP_INV (0x1 << 7)
+#define RT5660_I2S_DL_MASK (0x3 << 2)
+#define RT5660_I2S_DL_SFT 2
+#define RT5660_I2S_DL_16 (0x0 << 2)
+#define RT5660_I2S_DL_20 (0x1 << 2)
+#define RT5660_I2S_DL_24 (0x2 << 2)
+#define RT5660_I2S_DL_8 (0x3 << 2)
+#define RT5660_I2S_DF_MASK (0x3)
+#define RT5660_I2S_DF_SFT 0
+#define RT5660_I2S_DF_I2S (0x0)
+#define RT5660_I2S_DF_LEFT (0x1)
+#define RT5660_I2S_DF_PCM_A (0x2)
+#define RT5660_I2S_DF_PCM_B (0x3)
+
+/* ADC/DAC Clock Control 1 (0x73) */
+#define RT5660_I2S_BCLK_MS1_MASK (0x1 << 15)
+#define RT5660_I2S_BCLK_MS1_SFT 15
+#define RT5660_I2S_BCLK_MS1_32 (0x0 << 15)
+#define RT5660_I2S_BCLK_MS1_64 (0x1 << 15)
+#define RT5660_I2S_PD1_MASK (0x7 << 12)
+#define RT5660_I2S_PD1_SFT 12
+#define RT5660_I2S_PD1_1 (0x0 << 12)
+#define RT5660_I2S_PD1_2 (0x1 << 12)
+#define RT5660_I2S_PD1_3 (0x2 << 12)
+#define RT5660_I2S_PD1_4 (0x3 << 12)
+#define RT5660_I2S_PD1_6 (0x4 << 12)
+#define RT5660_I2S_PD1_8 (0x5 << 12)
+#define RT5660_I2S_PD1_12 (0x6 << 12)
+#define RT5660_I2S_PD1_16 (0x7 << 12)
+#define RT5660_DAC_OSR_MASK (0x3 << 2)
+#define RT5660_DAC_OSR_SFT 2
+#define RT5660_DAC_OSR_128 (0x0 << 2)
+#define RT5660_DAC_OSR_64 (0x1 << 2)
+#define RT5660_DAC_OSR_32 (0x2 << 2)
+#define RT5660_DAC_OSR_16 (0x3 << 2)
+#define RT5660_ADC_OSR_MASK (0x3)
+#define RT5660_ADC_OSR_SFT 0
+#define RT5660_ADC_OSR_128 (0x0)
+#define RT5660_ADC_OSR_64 (0x1)
+#define RT5660_ADC_OSR_32 (0x2)
+#define RT5660_ADC_OSR_16 (0x3)
+
+/* ADC/DAC Clock Control 2 (0x74) */
+#define RT5660_RESET_ADF (0x1 << 13)
+#define RT5660_RESET_ADF_SFT 13
+#define RT5660_RESET_DAF (0x1 << 12)
+#define RT5660_RESET_DAF_SFT 12
+#define RT5660_DAHPF_EN (0x1 << 11)
+#define RT5660_DAHPF_EN_SFT 11
+#define RT5660_ADHPF_EN (0x1 << 10)
+#define RT5660_ADHPF_EN_SFT 10
+
+/* Digital Microphone Control (0x75) */
+#define RT5660_DMIC_1_EN_MASK (0x1 << 15)
+#define RT5660_DMIC_1_EN_SFT 15
+#define RT5660_DMIC_1_DIS (0x0 << 15)
+#define RT5660_DMIC_1_EN (0x1 << 15)
+#define RT5660_DMIC_1L_LH_MASK (0x1 << 13)
+#define RT5660_DMIC_1L_LH_SFT 13
+#define RT5660_DMIC_1L_LH_RISING (0x0 << 13)
+#define RT5660_DMIC_1L_LH_FALLING (0x1 << 13)
+#define RT5660_DMIC_1R_LH_MASK (0x1 << 12)
+#define RT5660_DMIC_1R_LH_SFT 12
+#define RT5660_DMIC_1R_LH_RISING (0x0 << 12)
+#define RT5660_DMIC_1R_LH_FALLING (0x1 << 12)
+#define RT5660_SEL_DMIC_DATA_MASK (0x1 << 11)
+#define RT5660_SEL_DMIC_DATA_SFT 11
+#define RT5660_SEL_DMIC_DATA_GPIO2 (0x0 << 11)
+#define RT5660_SEL_DMIC_DATA_IN1P (0x1 << 11)
+#define RT5660_DMIC_CLK_MASK (0x7 << 5)
+#define RT5660_DMIC_CLK_SFT 5
+
+/* Global Clock Control (0x80) */
+#define RT5660_SCLK_SRC_MASK (0x3 << 14)
+#define RT5660_SCLK_SRC_SFT 14
+#define RT5660_SCLK_SRC_MCLK (0x0 << 14)
+#define RT5660_SCLK_SRC_PLL1 (0x1 << 14)
+#define RT5660_SCLK_SRC_RCCLK (0x2 << 14)
+#define RT5660_PLL1_SRC_MASK (0x3 << 12)
+#define RT5660_PLL1_SRC_SFT 12
+#define RT5660_PLL1_SRC_MCLK (0x0 << 12)
+#define RT5660_PLL1_SRC_BCLK1 (0x1 << 12)
+#define RT5660_PLL1_SRC_RCCLK (0x2 << 12)
+#define RT5660_PLL1_PD_MASK (0x1 << 3)
+#define RT5660_PLL1_PD_SFT 3
+#define RT5660_PLL1_PD_1 (0x0 << 3)
+#define RT5660_PLL1_PD_2 (0x1 << 3)
+
+#define RT5660_PLL_INP_MAX 40000000
+#define RT5660_PLL_INP_MIN 256000
+/* PLL M/N/K Code Control 1 (0x81) */
+#define RT5660_PLL_N_MAX 0x1ff
+#define RT5660_PLL_N_MASK (RT5660_PLL_N_MAX << 7)
+#define RT5660_PLL_N_SFT 7
+#define RT5660_PLL_K_MAX 0x1f
+#define RT5660_PLL_K_MASK (RT5660_PLL_K_MAX)
+#define RT5660_PLL_K_SFT 0
+
+/* PLL M/N/K Code Control 2 (0x82) */
+#define RT5660_PLL_M_MAX 0xf
+#define RT5660_PLL_M_MASK (RT5660_PLL_M_MAX << 12)
+#define RT5660_PLL_M_SFT 12
+#define RT5660_PLL_M_BP (0x1 << 11)
+#define RT5660_PLL_M_BP_SFT 11
+
+/* Class D Over Current Control (0x8c) */
+#define RT5660_CLSD_OC_MASK (0x1 << 9)
+#define RT5660_CLSD_OC_SFT 9
+#define RT5660_CLSD_OC_PU (0x0 << 9)
+#define RT5660_CLSD_OC_PD (0x1 << 9)
+#define RT5660_AUTO_PD_MASK (0x1 << 8)
+#define RT5660_AUTO_PD_SFT 8
+#define RT5660_AUTO_PD_DIS (0x0 << 8)
+#define RT5660_AUTO_PD_EN (0x1 << 8)
+#define RT5660_CLSD_OC_TH_MASK (0x3f)
+#define RT5660_CLSD_OC_TH_SFT 0
+
+/* Class D Output Control (0x8d) */
+#define RT5660_CLSD_RATIO_MASK (0xf << 12)
+#define RT5660_CLSD_RATIO_SFT 12
+
+/* Lout Amp Control 1 (0x8e) */
+#define RT5660_LOUT_CO_MASK (0x1 << 4)
+#define RT5660_LOUT_CO_SFT 4
+#define RT5660_LOUT_CO_DIS (0x0 << 4)
+#define RT5660_LOUT_CO_EN (0x1 << 4)
+#define RT5660_LOUT_CB_MASK (0x1)
+#define RT5660_LOUT_CB_SFT 0
+#define RT5660_LOUT_CB_PD (0x0)
+#define RT5660_LOUT_CB_PU (0x1)
+
+/* SPKVDD detection control (0x92) */
+#define RT5660_SPKVDD_DET_MASK (0x1 << 15)
+#define RT5660_SPKVDD_DET_SFT 15
+#define RT5660_SPKVDD_DET_DIS (0x0 << 15)
+#define RT5660_SPKVDD_DET_EN (0x1 << 15)
+#define RT5660_SPK_AG_MASK (0x1 << 14)
+#define RT5660_SPK_AG_SFT 14
+#define RT5660_SPK_AG_DIS (0x0 << 14)
+#define RT5660_SPK_AG_EN (0x1 << 14)
+
+/* Micbias Control (0x93) */
+#define RT5660_MIC1_BS_MASK (0x1 << 15)
+#define RT5660_MIC1_BS_SFT 15
+#define RT5660_MIC1_BS_9AV (0x0 << 15)
+#define RT5660_MIC1_BS_75AV (0x1 << 15)
+#define RT5660_MIC2_BS_MASK (0x1 << 14)
+#define RT5660_MIC2_BS_SFT 14
+#define RT5660_MIC2_BS_9AV (0x0 << 14)
+#define RT5660_MIC2_BS_75AV (0x1 << 14)
+#define RT5660_MIC1_OVCD_MASK (0x1 << 11)
+#define RT5660_MIC1_OVCD_SFT 11
+#define RT5660_MIC1_OVCD_DIS (0x0 << 11)
+#define RT5660_MIC1_OVCD_EN (0x1 << 11)
+#define RT5660_MIC1_OVTH_MASK (0x3 << 9)
+#define RT5660_MIC1_OVTH_SFT 9
+#define RT5660_MIC1_OVTH_600UA (0x0 << 9)
+#define RT5660_MIC1_OVTH_1500UA (0x1 << 9)
+#define RT5660_MIC1_OVTH_2000UA (0x2 << 9)
+#define RT5660_MIC2_OVCD_MASK (0x1 << 8)
+#define RT5660_MIC2_OVCD_SFT 8
+#define RT5660_MIC2_OVCD_DIS (0x0 << 8)
+#define RT5660_MIC2_OVCD_EN (0x1 << 8)
+#define RT5660_MIC2_OVTH_MASK (0x3 << 6)
+#define RT5660_MIC2_OVTH_SFT 6
+#define RT5660_MIC2_OVTH_600UA (0x0 << 6)
+#define RT5660_MIC2_OVTH_1500UA (0x1 << 6)
+#define RT5660_MIC2_OVTH_2000UA (0x2 << 6)
+#define RT5660_PWR_CLK25M_MASK (0x1 << 4)
+#define RT5660_PWR_CLK25M_SFT 4
+#define RT5660_PWR_CLK25M_PD (0x0 << 4)
+#define RT5660_PWR_CLK25M_PU (0x1 << 4)
+
+/* EQ Control 1 (0xb0) */
+#define RT5660_EQ_SRC_MASK (0x1 << 15)
+#define RT5660_EQ_SRC_SFT 15
+#define RT5660_EQ_SRC_DAC (0x0 << 15)
+#define RT5660_EQ_SRC_ADC (0x1 << 15)
+#define RT5660_EQ_UPD (0x1 << 14)
+#define RT5660_EQ_UPD_BIT 14
+
+/* Jack Detect Control (0xbb) */
+#define RT5660_JD_MASK (0x3 << 14)
+#define RT5660_JD_SFT 14
+#define RT5660_JD_DIS (0x0 << 14)
+#define RT5660_JD_GPIO1 (0x1 << 14)
+#define RT5660_JD_GPIO2 (0x2 << 14)
+#define RT5660_JD_LOUT_MASK (0x1 << 11)
+#define RT5660_JD_LOUT_SFT 11
+#define RT5660_JD_LOUT_DIS (0x0 << 11)
+#define RT5660_JD_LOUT_EN (0x1 << 11)
+#define RT5660_JD_LOUT_TRG_MASK (0x1 << 10)
+#define RT5660_JD_LOUT_TRG_SFT 10
+#define RT5660_JD_LOUT_TRG_LO (0x0 << 10)
+#define RT5660_JD_LOUT_TRG_HI (0x1 << 10)
+#define RT5660_JD_SPO_MASK (0x1 << 9)
+#define RT5660_JD_SPO_SFT 9
+#define RT5660_JD_SPO_DIS (0x0 << 9)
+#define RT5660_JD_SPO_EN (0x1 << 9)
+#define RT5660_JD_SPO_TRG_MASK (0x1 << 8)
+#define RT5660_JD_SPO_TRG_SFT 8
+#define RT5660_JD_SPO_TRG_LO (0x0 << 8)
+#define RT5660_JD_SPO_TRG_HI (0x1 << 8)
+
+/* IRQ Control 1 (0xbd) */
+#define RT5660_IRQ_JD_MASK (0x1 << 15)
+#define RT5660_IRQ_JD_SFT 15
+#define RT5660_IRQ_JD_BP (0x0 << 15)
+#define RT5660_IRQ_JD_NOR (0x1 << 15)
+#define RT5660_IRQ_OT_MASK (0x1 << 14)
+#define RT5660_IRQ_OT_SFT 14
+#define RT5660_IRQ_OT_BP (0x0 << 14)
+#define RT5660_IRQ_OT_NOR (0x1 << 14)
+#define RT5660_JD_STKY_MASK (0x1 << 13)
+#define RT5660_JD_STKY_SFT 13
+#define RT5660_JD_STKY_DIS (0x0 << 13)
+#define RT5660_JD_STKY_EN (0x1 << 13)
+#define RT5660_OT_STKY_MASK (0x1 << 12)
+#define RT5660_OT_STKY_SFT 12
+#define RT5660_OT_STKY_DIS (0x0 << 12)
+#define RT5660_OT_STKY_EN (0x1 << 12)
+#define RT5660_JD_P_MASK (0x1 << 11)
+#define RT5660_JD_P_SFT 11
+#define RT5660_JD_P_NOR (0x0 << 11)
+#define RT5660_JD_P_INV (0x1 << 11)
+#define RT5660_OT_P_MASK (0x1 << 10)
+#define RT5660_OT_P_SFT 10
+#define RT5660_OT_P_NOR (0x0 << 10)
+#define RT5660_OT_P_INV (0x1 << 10)
+
+/* IRQ Control 2 (0xbe) */
+#define RT5660_IRQ_MB1_OC_MASK (0x1 << 15)
+#define RT5660_IRQ_MB1_OC_SFT 15
+#define RT5660_IRQ_MB1_OC_BP (0x0 << 15)
+#define RT5660_IRQ_MB1_OC_NOR (0x1 << 15)
+#define RT5660_IRQ_MB2_OC_MASK (0x1 << 14)
+#define RT5660_IRQ_MB2_OC_SFT 14
+#define RT5660_IRQ_MB2_OC_BP (0x0 << 14)
+#define RT5660_IRQ_MB2_OC_NOR (0x1 << 14)
+#define RT5660_MB1_OC_STKY_MASK (0x1 << 11)
+#define RT5660_MB1_OC_STKY_SFT 11
+#define RT5660_MB1_OC_STKY_DIS (0x0 << 11)
+#define RT5660_MB1_OC_STKY_EN (0x1 << 11)
+#define RT5660_MB2_OC_STKY_MASK (0x1 << 10)
+#define RT5660_MB2_OC_STKY_SFT 10
+#define RT5660_MB2_OC_STKY_DIS (0x0 << 10)
+#define RT5660_MB2_OC_STKY_EN (0x1 << 10)
+#define RT5660_MB1_OC_P_MASK (0x1 << 7)
+#define RT5660_MB1_OC_P_SFT 7
+#define RT5660_MB1_OC_P_NOR (0x0 << 7)
+#define RT5660_MB1_OC_P_INV (0x1 << 7)
+#define RT5660_MB2_OC_P_MASK (0x1 << 6)
+#define RT5660_MB2_OC_P_SFT 6
+#define RT5660_MB2_OC_P_NOR (0x0 << 6)
+#define RT5660_MB2_OC_P_INV (0x1 << 6)
+#define RT5660_MB1_OC_CLR (0x1 << 3)
+#define RT5660_MB1_OC_CLR_SFT 3
+#define RT5660_MB2_OC_CLR (0x1 << 2)
+#define RT5660_MB2_OC_CLR_SFT 2
+
+/* GPIO Control 1 (0xc0) */
+#define RT5660_GP2_PIN_MASK (0x1 << 14)
+#define RT5660_GP2_PIN_SFT 14
+#define RT5660_GP2_PIN_GPIO2 (0x0 << 14)
+#define RT5660_GP2_PIN_DMIC1_SDA (0x1 << 14)
+#define RT5660_GP1_PIN_MASK (0x3 << 12)
+#define RT5660_GP1_PIN_SFT 12
+#define RT5660_GP1_PIN_GPIO1 (0x0 << 12)
+#define RT5660_GP1_PIN_DMIC1_SCL (0x1 << 12)
+#define RT5660_GP1_PIN_IRQ (0x2 << 12)
+#define RT5660_GPIO_M_MASK (0x1 << 9)
+#define RT5660_GPIO_M_SFT 9
+#define RT5660_GPIO_M_FLT (0x0 << 9)
+#define RT5660_GPIO_M_PH (0x1 << 9)
+
+/* GPIO Control 3 (0xc2) */
+#define RT5660_GP2_PF_MASK (0x1 << 5)
+#define RT5660_GP2_PF_SFT 5
+#define RT5660_GP2_PF_IN (0x0 << 5)
+#define RT5660_GP2_PF_OUT (0x1 << 5)
+#define RT5660_GP2_OUT_MASK (0x1 << 4)
+#define RT5660_GP2_OUT_SFT 4
+#define RT5660_GP2_OUT_LO (0x0 << 4)
+#define RT5660_GP2_OUT_HI (0x1 << 4)
+#define RT5660_GP2_P_MASK (0x1 << 3)
+#define RT5660_GP2_P_SFT 3
+#define RT5660_GP2_P_NOR (0x0 << 3)
+#define RT5660_GP2_P_INV (0x1 << 3)
+#define RT5660_GP1_PF_MASK (0x1 << 2)
+#define RT5660_GP1_PF_SFT 2
+#define RT5660_GP1_PF_IN (0x0 << 2)
+#define RT5660_GP1_PF_OUT (0x1 << 2)
+#define RT5660_GP1_OUT_MASK (0x1 << 1)
+#define RT5660_GP1_OUT_SFT 1
+#define RT5660_GP1_OUT_LO (0x0 << 1)
+#define RT5660_GP1_OUT_HI (0x1 << 1)
+#define RT5660_GP1_P_MASK (0x1)
+#define RT5660_GP1_P_SFT 0
+#define RT5660_GP1_P_NOR (0x0)
+#define RT5660_GP1_P_INV (0x1)
+
+/* Soft volume and zero cross control 1 (0xd9) */
+#define RT5660_SV_MASK (0x1 << 15)
+#define RT5660_SV_SFT 15
+#define RT5660_SV_DIS (0x0 << 15)
+#define RT5660_SV_EN (0x1 << 15)
+#define RT5660_SPO_SV_MASK (0x1 << 14)
+#define RT5660_SPO_SV_SFT 14
+#define RT5660_SPO_SV_DIS (0x0 << 14)
+#define RT5660_SPO_SV_EN (0x1 << 14)
+#define RT5660_OUT_SV_MASK (0x1 << 12)
+#define RT5660_OUT_SV_SFT 12
+#define RT5660_OUT_SV_DIS (0x0 << 12)
+#define RT5660_OUT_SV_EN (0x1 << 12)
+#define RT5660_ZCD_DIG_MASK (0x1 << 11)
+#define RT5660_ZCD_DIG_SFT 11
+#define RT5660_ZCD_DIG_DIS (0x0 << 11)
+#define RT5660_ZCD_DIG_EN (0x1 << 11)
+#define RT5660_ZCD_MASK (0x1 << 10)
+#define RT5660_ZCD_SFT 10
+#define RT5660_ZCD_PD (0x0 << 10)
+#define RT5660_ZCD_PU (0x1 << 10)
+#define RT5660_SV_DLY_MASK (0xf)
+#define RT5660_SV_DLY_SFT 0
+
+/* Soft volume and zero cross control 2 (0xda) */
+#define RT5660_ZCD_SPO_MASK (0x1 << 15)
+#define RT5660_ZCD_SPO_SFT 15
+#define RT5660_ZCD_SPO_DIS (0x0 << 15)
+#define RT5660_ZCD_SPO_EN (0x1 << 15)
+#define RT5660_ZCD_OMR_MASK (0x1 << 8)
+#define RT5660_ZCD_OMR_SFT 8
+#define RT5660_ZCD_OMR_DIS (0x0 << 8)
+#define RT5660_ZCD_OMR_EN (0x1 << 8)
+#define RT5660_ZCD_OML_MASK (0x1 << 7)
+#define RT5660_ZCD_OML_SFT 7
+#define RT5660_ZCD_OML_DIS (0x0 << 7)
+#define RT5660_ZCD_OML_EN (0x1 << 7)
+#define RT5660_ZCD_SPM_MASK (0x1 << 6)
+#define RT5660_ZCD_SPM_SFT 6
+#define RT5660_ZCD_SPM_DIS (0x0 << 6)
+#define RT5660_ZCD_SPM_EN (0x1 << 6)
+#define RT5660_ZCD_RMR_MASK (0x1 << 5)
+#define RT5660_ZCD_RMR_SFT 5
+#define RT5660_ZCD_RMR_DIS (0x0 << 5)
+#define RT5660_ZCD_RMR_EN (0x1 << 5)
+#define RT5660_ZCD_RML_MASK (0x1 << 4)
+#define RT5660_ZCD_RML_SFT 4
+#define RT5660_ZCD_RML_DIS (0x0 << 4)
+#define RT5660_ZCD_RML_EN (0x1 << 4)
+
+/* General Control 1 (0xfa) */
+#define RT5660_PWR_VREF_HP (0x1 << 11)
+#define RT5660_PWR_VREF_HP_SFT 11
+#define RT5660_DIG_GATE_CTRL (0x1)
+#define RT5660_DIG_GATE_CTRL_SFT 0
+
+/* System Clock Source */
+#define RT5660_SCLK_S_MCLK 0
+#define RT5660_SCLK_S_PLL1 1
+#define RT5660_SCLK_S_RCCLK 2
+
+/* PLL1 Source */
+#define RT5660_PLL1_S_MCLK 0
+#define RT5660_PLL1_S_BCLK 1
+
+enum {
+ RT5660_AIF1,
+ RT5660_AIFS,
+};
+
+struct rt5660_priv {
+ struct snd_soc_codec *codec;
+ struct rt5660_platform_data pdata;
+ struct regmap *regmap;
+ struct clk *mclk;
+
+ int sysclk;
+ int sysclk_src;
+ int lrck[RT5660_AIFS];
+ int bclk[RT5660_AIFS];
+ int master[RT5660_AIFS];
+
+ int pll_src;
+ int pll_in;
+ int pll_out;
+};
+
+#endif
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
new file mode 100644
index 000000000000..01a18d88f1eb
--- /dev/null
+++ b/sound/soc/codecs/rt5663.c
@@ -0,0 +1,3218 @@
+/*
+ * rt5663.c -- RT5668/RT5663 ALSA SoC audio codec driver
+ *
+ * Copyright 2016 Realtek Semiconductor Corp.
+ * Author: Jack Yu <jack.yu@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <linux/workqueue.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/jack.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "rt5663.h"
+#include "rl6231.h"
+
+#define RT5668_DEVICE_ID 0x6451
+#define RT5663_DEVICE_ID 0x6406
+
+enum {
+ CODEC_TYPE_RT5668,
+ CODEC_TYPE_RT5663,
+};
+
+struct rt5663_priv {
+ struct snd_soc_codec *codec;
+ struct regmap *regmap;
+ struct delayed_work jack_detect_work;
+ struct snd_soc_jack *hs_jack;
+ struct timer_list btn_check_timer;
+
+ int codec_type;
+ int sysclk;
+ int sysclk_src;
+ int lrck;
+
+ int pll_src;
+ int pll_in;
+ int pll_out;
+
+ int jack_type;
+};
+
+static const struct reg_default rt5668_reg[] = {
+ { 0x0000, 0x0000 },
+ { 0x0001, 0xc8c8 },
+ { 0x0002, 0x8080 },
+ { 0x0003, 0x8000 },
+ { 0x0004, 0xc80a },
+ { 0x0005, 0x0000 },
+ { 0x0006, 0x0000 },
+ { 0x0007, 0x0000 },
+ { 0x000a, 0x0000 },
+ { 0x000b, 0x0000 },
+ { 0x000c, 0x0000 },
+ { 0x000d, 0x0000 },
+ { 0x000f, 0x0808 },
+ { 0x0010, 0x4000 },
+ { 0x0011, 0x0000 },
+ { 0x0012, 0x1404 },
+ { 0x0013, 0x1000 },
+ { 0x0014, 0xa00a },
+ { 0x0015, 0x0404 },
+ { 0x0016, 0x0404 },
+ { 0x0017, 0x0011 },
+ { 0x0018, 0xafaf },
+ { 0x0019, 0xafaf },
+ { 0x001a, 0xafaf },
+ { 0x001b, 0x0011 },
+ { 0x001c, 0x2f2f },
+ { 0x001d, 0x2f2f },
+ { 0x001e, 0x2f2f },
+ { 0x001f, 0x0000 },
+ { 0x0020, 0x0000 },
+ { 0x0021, 0x0000 },
+ { 0x0022, 0x5757 },
+ { 0x0023, 0x0039 },
+ { 0x0024, 0x000b },
+ { 0x0026, 0xc0c0 },
+ { 0x0027, 0xc0c0 },
+ { 0x0028, 0xc0c0 },
+ { 0x0029, 0x8080 },
+ { 0x002a, 0xaaaa },
+ { 0x002b, 0xaaaa },
+ { 0x002c, 0xaba8 },
+ { 0x002d, 0x0000 },
+ { 0x002e, 0x0000 },
+ { 0x002f, 0x0000 },
+ { 0x0030, 0x0000 },
+ { 0x0031, 0x5000 },
+ { 0x0032, 0x0000 },
+ { 0x0033, 0x0000 },
+ { 0x0034, 0x0000 },
+ { 0x0035, 0x0000 },
+ { 0x003a, 0x0000 },
+ { 0x003b, 0x0000 },
+ { 0x003c, 0x00ff },
+ { 0x003d, 0x0000 },
+ { 0x003e, 0x00ff },
+ { 0x003f, 0x0000 },
+ { 0x0040, 0x0000 },
+ { 0x0041, 0x00ff },
+ { 0x0042, 0x0000 },
+ { 0x0043, 0x00ff },
+ { 0x0044, 0x0c0c },
+ { 0x0049, 0xc00b },
+ { 0x004a, 0x0000 },
+ { 0x004b, 0x031f },
+ { 0x004d, 0x0000 },
+ { 0x004e, 0x001f },
+ { 0x004f, 0x0000 },
+ { 0x0050, 0x001f },
+ { 0x0052, 0xf000 },
+ { 0x0061, 0x0000 },
+ { 0x0062, 0x0000 },
+ { 0x0063, 0x003e },
+ { 0x0064, 0x0000 },
+ { 0x0065, 0x0000 },
+ { 0x0066, 0x003f },
+ { 0x0067, 0x0000 },
+ { 0x006b, 0x0000 },
+ { 0x006d, 0xff00 },
+ { 0x006e, 0x2808 },
+ { 0x006f, 0x000a },
+ { 0x0070, 0x8000 },
+ { 0x0071, 0x8000 },
+ { 0x0072, 0x8000 },
+ { 0x0073, 0x7000 },
+ { 0x0074, 0x7770 },
+ { 0x0075, 0x0002 },
+ { 0x0076, 0x0001 },
+ { 0x0078, 0x00f0 },
+ { 0x0079, 0x0000 },
+ { 0x007a, 0x0000 },
+ { 0x007b, 0x0000 },
+ { 0x007c, 0x0000 },
+ { 0x007d, 0x0123 },
+ { 0x007e, 0x4500 },
+ { 0x007f, 0x8003 },
+ { 0x0080, 0x0000 },
+ { 0x0081, 0x0000 },
+ { 0x0082, 0x0000 },
+ { 0x0083, 0x0000 },
+ { 0x0084, 0x0000 },
+ { 0x0085, 0x0000 },
+ { 0x0086, 0x0008 },
+ { 0x0087, 0x0000 },
+ { 0x0088, 0x0000 },
+ { 0x0089, 0x0000 },
+ { 0x008a, 0x0000 },
+ { 0x008b, 0x0000 },
+ { 0x008c, 0x0003 },
+ { 0x008e, 0x0060 },
+ { 0x008f, 0x1000 },
+ { 0x0091, 0x0c26 },
+ { 0x0092, 0x0073 },
+ { 0x0093, 0x0000 },
+ { 0x0094, 0x0080 },
+ { 0x0098, 0x0000 },
+ { 0x0099, 0x0000 },
+ { 0x009a, 0x0007 },
+ { 0x009f, 0x0000 },
+ { 0x00a0, 0x0000 },
+ { 0x00a1, 0x0002 },
+ { 0x00a2, 0x0001 },
+ { 0x00a3, 0x0002 },
+ { 0x00a4, 0x0001 },
+ { 0x00ae, 0x2040 },
+ { 0x00af, 0x0000 },
+ { 0x00b6, 0x0000 },
+ { 0x00b7, 0x0000 },
+ { 0x00b8, 0x0000 },
+ { 0x00b9, 0x0000 },
+ { 0x00ba, 0x0002 },
+ { 0x00bb, 0x0000 },
+ { 0x00be, 0x0000 },
+ { 0x00c0, 0x0000 },
+ { 0x00c1, 0x0aaa },
+ { 0x00c2, 0xaa80 },
+ { 0x00c3, 0x0003 },
+ { 0x00c4, 0x0000 },
+ { 0x00d0, 0x0000 },
+ { 0x00d1, 0x2244 },
+ { 0x00d2, 0x0000 },
+ { 0x00d3, 0x3300 },
+ { 0x00d4, 0x2200 },
+ { 0x00d9, 0x0809 },
+ { 0x00da, 0x0000 },
+ { 0x00db, 0x0008 },
+ { 0x00dc, 0x00c0 },
+ { 0x00dd, 0x6724 },
+ { 0x00de, 0x3131 },
+ { 0x00df, 0x0008 },
+ { 0x00e0, 0x4000 },
+ { 0x00e1, 0x3131 },
+ { 0x00e2, 0x600c },
+ { 0x00ea, 0xb320 },
+ { 0x00eb, 0x0000 },
+ { 0x00ec, 0xb300 },
+ { 0x00ed, 0x0000 },
+ { 0x00ee, 0xb320 },
+ { 0x00ef, 0x0000 },
+ { 0x00f0, 0x0201 },
+ { 0x00f1, 0x0ddd },
+ { 0x00f2, 0x0ddd },
+ { 0x00f6, 0x0000 },
+ { 0x00f7, 0x0000 },
+ { 0x00f8, 0x0000 },
+ { 0x00fa, 0x0000 },
+ { 0x00fb, 0x0000 },
+ { 0x00fc, 0x0000 },
+ { 0x00fd, 0x0000 },
+ { 0x00fe, 0x10ec },
+ { 0x00ff, 0x6451 },
+ { 0x0100, 0xaaaa },
+ { 0x0101, 0x000a },
+ { 0x010a, 0xaaaa },
+ { 0x010b, 0xa0a0 },
+ { 0x010c, 0xaeae },
+ { 0x010d, 0xaaaa },
+ { 0x010e, 0xaaaa },
+ { 0x010f, 0xaaaa },
+ { 0x0110, 0xe002 },
+ { 0x0111, 0xa602 },
+ { 0x0112, 0xaaaa },
+ { 0x0113, 0x2000 },
+ { 0x0117, 0x0f00 },
+ { 0x0125, 0x0420 },
+ { 0x0132, 0x0000 },
+ { 0x0133, 0x0000 },
+ { 0x0136, 0x5555 },
+ { 0x0137, 0x5540 },
+ { 0x0138, 0x3700 },
+ { 0x0139, 0x79a1 },
+ { 0x013a, 0x2020 },
+ { 0x013b, 0x2020 },
+ { 0x013c, 0x2005 },
+ { 0x013f, 0x0000 },
+ { 0x0145, 0x0002 },
+ { 0x0146, 0x0000 },
+ { 0x0147, 0x0000 },
+ { 0x0148, 0x0000 },
+ { 0x0160, 0x4ec0 },
+ { 0x0161, 0x0080 },
+ { 0x0162, 0x0200 },
+ { 0x0163, 0x0800 },
+ { 0x0164, 0x0000 },
+ { 0x0165, 0x0000 },
+ { 0x0166, 0x0000 },
+ { 0x0167, 0x000f },
+ { 0x0168, 0x000f },
+ { 0x0170, 0x4e80 },
+ { 0x0171, 0x0080 },
+ { 0x0172, 0x0200 },
+ { 0x0173, 0x0800 },
+ { 0x0174, 0x00ff },
+ { 0x0175, 0x0000 },
+ { 0x0190, 0x4131 },
+ { 0x0191, 0x4131 },
+ { 0x0192, 0x4131 },
+ { 0x0193, 0x4131 },
+ { 0x0194, 0x0000 },
+ { 0x0195, 0x0000 },
+ { 0x0196, 0x0000 },
+ { 0x0197, 0x0000 },
+ { 0x0198, 0x0000 },
+ { 0x0199, 0x0000 },
+ { 0x01a0, 0x1e64 },
+ { 0x01a1, 0x06a3 },
+ { 0x01a2, 0x0000 },
+ { 0x01a3, 0x0000 },
+ { 0x01a4, 0x0000 },
+ { 0x01a5, 0x0000 },
+ { 0x01a6, 0x0000 },
+ { 0x01a7, 0x0000 },
+ { 0x01a8, 0x0000 },
+ { 0x01a9, 0x0000 },
+ { 0x01aa, 0x0000 },
+ { 0x01ab, 0x0000 },
+ { 0x01b5, 0x0000 },
+ { 0x01b6, 0x01c3 },
+ { 0x01b7, 0x02a0 },
+ { 0x01b8, 0x03e9 },
+ { 0x01b9, 0x1389 },
+ { 0x01ba, 0xc351 },
+ { 0x01bb, 0x0009 },
+ { 0x01bc, 0x0018 },
+ { 0x01bd, 0x002a },
+ { 0x01be, 0x004c },
+ { 0x01bf, 0x0097 },
+ { 0x01c0, 0x433d },
+ { 0x01c1, 0x0000 },
+ { 0x01c2, 0x0000 },
+ { 0x01c3, 0x0000 },
+ { 0x01c4, 0x0000 },
+ { 0x01c5, 0x0000 },
+ { 0x01c6, 0x0000 },
+ { 0x01c7, 0x0000 },
+ { 0x01c8, 0x40af },
+ { 0x01c9, 0x0702 },
+ { 0x01ca, 0x0000 },
+ { 0x01cb, 0x0000 },
+ { 0x01cc, 0x5757 },
+ { 0x01cd, 0x5757 },
+ { 0x01ce, 0x5757 },
+ { 0x01cf, 0x5757 },
+ { 0x01d0, 0x5757 },
+ { 0x01d1, 0x5757 },
+ { 0x01d2, 0x5757 },
+ { 0x01d3, 0x5757 },
+ { 0x01d4, 0x5757 },
+ { 0x01d5, 0x5757 },
+ { 0x01d6, 0x003c },
+ { 0x01da, 0x0000 },
+ { 0x01db, 0x0000 },
+ { 0x01dc, 0x0000 },
+ { 0x01de, 0x7c00 },
+ { 0x01df, 0x0320 },
+ { 0x01e0, 0x06a1 },
+ { 0x01e1, 0x0000 },
+ { 0x01e2, 0x0000 },
+ { 0x01e3, 0x0000 },
+ { 0x01e4, 0x0000 },
+ { 0x01e5, 0x0000 },
+ { 0x01e6, 0x0001 },
+ { 0x01e7, 0x0000 },
+ { 0x01e8, 0x0000 },
+ { 0x01ea, 0x0000 },
+ { 0x01eb, 0x0000 },
+ { 0x01ec, 0x0000 },
+ { 0x01ed, 0x0000 },
+ { 0x01ee, 0x0000 },
+ { 0x01ef, 0x0000 },
+ { 0x01f0, 0x0000 },
+ { 0x01f1, 0x0000 },
+ { 0x01f2, 0x0000 },
+ { 0x01f3, 0x0000 },
+ { 0x01f4, 0x0000 },
+ { 0x0200, 0x0000 },
+ { 0x0201, 0x0000 },
+ { 0x0202, 0x0000 },
+ { 0x0203, 0x0000 },
+ { 0x0204, 0x0000 },
+ { 0x0205, 0x0000 },
+ { 0x0206, 0x0000 },
+ { 0x0207, 0x0000 },
+ { 0x0208, 0x0000 },
+ { 0x0210, 0x60b1 },
+ { 0x0211, 0xa000 },
+ { 0x0212, 0x024c },
+ { 0x0213, 0xf7ff },
+ { 0x0214, 0x024c },
+ { 0x0215, 0x0102 },
+ { 0x0216, 0x00a3 },
+ { 0x0217, 0x0048 },
+ { 0x0218, 0x92c0 },
+ { 0x0219, 0x0000 },
+ { 0x021a, 0x00c8 },
+ { 0x021b, 0x0020 },
+ { 0x02fa, 0x0000 },
+ { 0x02fb, 0x0000 },
+ { 0x02fc, 0x0000 },
+ { 0x02ff, 0x0110 },
+ { 0x0300, 0x001f },
+ { 0x0301, 0x032c },
+ { 0x0302, 0x5f21 },
+ { 0x0303, 0x4000 },
+ { 0x0304, 0x4000 },
+ { 0x0305, 0x06d5 },
+ { 0x0306, 0x8000 },
+ { 0x0307, 0x0700 },
+ { 0x0310, 0x4560 },
+ { 0x0311, 0xa4a8 },
+ { 0x0312, 0x7418 },
+ { 0x0313, 0x0000 },
+ { 0x0314, 0x0006 },
+ { 0x0315, 0xffff },
+ { 0x0316, 0xc400 },
+ { 0x0317, 0x0000 },
+ { 0x0330, 0x00a6 },
+ { 0x0331, 0x04c3 },
+ { 0x0332, 0x27c8 },
+ { 0x0333, 0xbf50 },
+ { 0x0334, 0x0045 },
+ { 0x0335, 0x0007 },
+ { 0x0336, 0x7418 },
+ { 0x0337, 0x0501 },
+ { 0x0338, 0x0000 },
+ { 0x0339, 0x0010 },
+ { 0x033a, 0x1010 },
+ { 0x03c0, 0x7e00 },
+ { 0x03c1, 0x8000 },
+ { 0x03c2, 0x8000 },
+ { 0x03c3, 0x8000 },
+ { 0x03c4, 0x8000 },
+ { 0x03c5, 0x8000 },
+ { 0x03c6, 0x8000 },
+ { 0x03c7, 0x8000 },
+ { 0x03c8, 0x8000 },
+ { 0x03c9, 0x8000 },
+ { 0x03ca, 0x8000 },
+ { 0x03cb, 0x8000 },
+ { 0x03cc, 0x8000 },
+ { 0x03d0, 0x0000 },
+ { 0x03d1, 0x0000 },
+ { 0x03d2, 0x0000 },
+ { 0x03d3, 0x0000 },
+ { 0x03d4, 0x2000 },
+ { 0x03d5, 0x2000 },
+ { 0x03d6, 0x0000 },
+ { 0x03d7, 0x0000 },
+ { 0x03d8, 0x2000 },
+ { 0x03d9, 0x2000 },
+ { 0x03da, 0x2000 },
+ { 0x03db, 0x2000 },
+ { 0x03dc, 0x0000 },
+ { 0x03dd, 0x0000 },
+ { 0x03de, 0x0000 },
+ { 0x03df, 0x2000 },
+ { 0x03e0, 0x0000 },
+ { 0x03e1, 0x0000 },
+ { 0x03e2, 0x0000 },
+ { 0x03e3, 0x0000 },
+ { 0x03e4, 0x0000 },
+ { 0x03e5, 0x0000 },
+ { 0x03e6, 0x0000 },
+ { 0x03e7, 0x0000 },
+ { 0x03e8, 0x0000 },
+ { 0x03e9, 0x0000 },
+ { 0x03ea, 0x0000 },
+ { 0x03eb, 0x0000 },
+ { 0x03ec, 0x0000 },
+ { 0x03ed, 0x0000 },
+ { 0x03ee, 0x0000 },
+ { 0x03ef, 0x0000 },
+ { 0x03f0, 0x0800 },
+ { 0x03f1, 0x0800 },
+ { 0x03f2, 0x0800 },
+ { 0x03f3, 0x0800 },
+ { 0x03fe, 0x0000 },
+ { 0x03ff, 0x0000 },
+ { 0x07f0, 0x0000 },
+ { 0x07fa, 0x0000 },
+};
+
+static const struct reg_default rt5663_reg[] = {
+ { 0x0000, 0x0000 },
+ { 0x0002, 0x0008 },
+ { 0x0005, 0x1000 },
+ { 0x0006, 0x1000 },
+ { 0x000a, 0x0000 },
+ { 0x0010, 0x000f },
+ { 0x0015, 0x42c1 },
+ { 0x0016, 0x0000 },
+ { 0x0018, 0x000b },
+ { 0x0019, 0xafaf },
+ { 0x001c, 0x2f2f },
+ { 0x001f, 0x0000 },
+ { 0x0022, 0x5757 },
+ { 0x0023, 0x0039 },
+ { 0x0026, 0xc0c0 },
+ { 0x0029, 0x8080 },
+ { 0x002a, 0xa0a0 },
+ { 0x002c, 0x000c },
+ { 0x002d, 0x0000 },
+ { 0x0040, 0x0808 },
+ { 0x0061, 0x0000 },
+ { 0x0062, 0x0000 },
+ { 0x0063, 0x003e },
+ { 0x0064, 0x0000 },
+ { 0x0065, 0x0000 },
+ { 0x0066, 0x0000 },
+ { 0x006b, 0x0000 },
+ { 0x006e, 0x0000 },
+ { 0x006f, 0x0000 },
+ { 0x0070, 0x8020 },
+ { 0x0073, 0x1000 },
+ { 0x0074, 0xe400 },
+ { 0x0075, 0x0002 },
+ { 0x0076, 0x0001 },
+ { 0x0077, 0x00f0 },
+ { 0x0078, 0x0000 },
+ { 0x0079, 0x0000 },
+ { 0x007a, 0x0123 },
+ { 0x007b, 0x8003 },
+ { 0x0080, 0x0000 },
+ { 0x0081, 0x0000 },
+ { 0x0082, 0x0000 },
+ { 0x0083, 0x0000 },
+ { 0x0084, 0x0000 },
+ { 0x0086, 0x0008 },
+ { 0x0087, 0x0000 },
+ { 0x008a, 0x0000 },
+ { 0x008b, 0x0000 },
+ { 0x008c, 0x0003 },
+ { 0x008e, 0x0004 },
+ { 0x008f, 0x1000 },
+ { 0x0090, 0x0646 },
+ { 0x0091, 0x0e3e },
+ { 0x0092, 0x1071 },
+ { 0x0093, 0x0000 },
+ { 0x0094, 0x0080 },
+ { 0x0097, 0x0000 },
+ { 0x0098, 0x0000 },
+ { 0x009a, 0x0000 },
+ { 0x009f, 0x0000 },
+ { 0x00ae, 0x2000 },
+ { 0x00af, 0x0000 },
+ { 0x00b6, 0x0000 },
+ { 0x00b7, 0x0000 },
+ { 0x00b8, 0x0000 },
+ { 0x00ba, 0x0000 },
+ { 0x00bb, 0x0000 },
+ { 0x00be, 0x0000 },
+ { 0x00bf, 0x0000 },
+ { 0x00c0, 0x0000 },
+ { 0x00c1, 0x0000 },
+ { 0x00c5, 0x0000 },
+ { 0x00cb, 0xa02f },
+ { 0x00cc, 0x0000 },
+ { 0x00cd, 0x0e02 },
+ { 0x00d9, 0x08f9 },
+ { 0x00db, 0x0008 },
+ { 0x00dc, 0x00c0 },
+ { 0x00dd, 0x6724 },
+ { 0x00de, 0x3131 },
+ { 0x00df, 0x0008 },
+ { 0x00e0, 0x4000 },
+ { 0x00e1, 0x3131 },
+ { 0x00e2, 0x0043 },
+ { 0x00e4, 0x400b },
+ { 0x00e5, 0x8031 },
+ { 0x00e6, 0x3080 },
+ { 0x00e7, 0x4100 },
+ { 0x00e8, 0x1400 },
+ { 0x00e9, 0xe00a },
+ { 0x00ea, 0x0404 },
+ { 0x00eb, 0x0404 },
+ { 0x00ec, 0xb320 },
+ { 0x00ed, 0x0000 },
+ { 0x00f4, 0x0000 },
+ { 0x00f6, 0x0000 },
+ { 0x00f8, 0x0000 },
+ { 0x00fa, 0x8000 },
+ { 0x00fd, 0x0001 },
+ { 0x00fe, 0x10ec },
+ { 0x00ff, 0x6406 },
+ { 0x0100, 0xa0a0 },
+ { 0x0108, 0x4444 },
+ { 0x0109, 0x4444 },
+ { 0x010a, 0xaaaa },
+ { 0x010b, 0x00a0 },
+ { 0x010c, 0x8aaa },
+ { 0x010d, 0xaaaa },
+ { 0x010e, 0x2aaa },
+ { 0x010f, 0x002a },
+ { 0x0110, 0xa0a4 },
+ { 0x0111, 0x4602 },
+ { 0x0112, 0x0101 },
+ { 0x0113, 0x2000 },
+ { 0x0114, 0x0000 },
+ { 0x0116, 0x0000 },
+ { 0x0117, 0x0f00 },
+ { 0x0118, 0x0006 },
+ { 0x0125, 0x2224 },
+ { 0x0126, 0x5550 },
+ { 0x0127, 0x0400 },
+ { 0x0128, 0x7711 },
+ { 0x0132, 0x0004 },
+ { 0x0137, 0x5441 },
+ { 0x0139, 0x79a1 },
+ { 0x013a, 0x30c0 },
+ { 0x013b, 0x2000 },
+ { 0x013c, 0x2005 },
+ { 0x013d, 0x30c0 },
+ { 0x013e, 0x0000 },
+ { 0x0140, 0x3700 },
+ { 0x0141, 0x1f00 },
+ { 0x0144, 0x0000 },
+ { 0x0145, 0x0002 },
+ { 0x0146, 0x0000 },
+ { 0x0160, 0x0e80 },
+ { 0x0161, 0x0020 },
+ { 0x0162, 0x0080 },
+ { 0x0163, 0x0800 },
+ { 0x0164, 0x0000 },
+ { 0x0165, 0x0000 },
+ { 0x0166, 0x0000 },
+ { 0x0167, 0x1417 },
+ { 0x0168, 0x0017 },
+ { 0x0169, 0x0017 },
+ { 0x0180, 0x2000 },
+ { 0x0181, 0x0000 },
+ { 0x0182, 0x0000 },
+ { 0x0183, 0x2000 },
+ { 0x0184, 0x0000 },
+ { 0x0185, 0x0000 },
+ { 0x01b0, 0x4b30 },
+ { 0x01b1, 0x0000 },
+ { 0x01b2, 0xd870 },
+ { 0x01b3, 0x0000 },
+ { 0x01b4, 0x0030 },
+ { 0x01b5, 0x5757 },
+ { 0x01b6, 0x5757 },
+ { 0x01b7, 0x5757 },
+ { 0x01b8, 0x5757 },
+ { 0x01c0, 0x433d },
+ { 0x01c1, 0x0540 },
+ { 0x01c2, 0x0000 },
+ { 0x01c3, 0x0000 },
+ { 0x01c4, 0x0000 },
+ { 0x01c5, 0x0009 },
+ { 0x01c6, 0x0018 },
+ { 0x01c7, 0x002a },
+ { 0x01c8, 0x004c },
+ { 0x01c9, 0x0097 },
+ { 0x01ca, 0x01c3 },
+ { 0x01cb, 0x03e9 },
+ { 0x01cc, 0x1389 },
+ { 0x01cd, 0xc351 },
+ { 0x01ce, 0x0000 },
+ { 0x01cf, 0x0000 },
+ { 0x01d0, 0x0000 },
+ { 0x01d1, 0x0000 },
+ { 0x01d2, 0x0000 },
+ { 0x01d3, 0x003c },
+ { 0x01d4, 0x5757 },
+ { 0x01d5, 0x5757 },
+ { 0x01d6, 0x5757 },
+ { 0x01d7, 0x5757 },
+ { 0x01d8, 0x5757 },
+ { 0x01d9, 0x5757 },
+ { 0x01da, 0x0000 },
+ { 0x01db, 0x0000 },
+ { 0x01dd, 0x0009 },
+ { 0x01de, 0x7f00 },
+ { 0x01df, 0x00c8 },
+ { 0x01e0, 0x0691 },
+ { 0x01e1, 0x0000 },
+ { 0x01e2, 0x0000 },
+ { 0x01e3, 0x0000 },
+ { 0x01e4, 0x0000 },
+ { 0x01e5, 0x0040 },
+ { 0x01e6, 0x0000 },
+ { 0x01e7, 0x0000 },
+ { 0x01e8, 0x0000 },
+ { 0x01ea, 0x0000 },
+ { 0x01eb, 0x0000 },
+ { 0x01ec, 0x0000 },
+ { 0x01ed, 0x0000 },
+ { 0x01ee, 0x0000 },
+ { 0x01ef, 0x0000 },
+ { 0x01f0, 0x0000 },
+ { 0x01f1, 0x0000 },
+ { 0x01f2, 0x0000 },
+ { 0x0200, 0x0000 },
+ { 0x0201, 0x2244 },
+ { 0x0202, 0xaaaa },
+ { 0x0250, 0x8010 },
+ { 0x0251, 0x0000 },
+ { 0x0252, 0x028a },
+ { 0x02fa, 0x0000 },
+ { 0x02fb, 0x0000 },
+ { 0x02fc, 0x0000 },
+ { 0x0300, 0x0000 },
+ { 0x03d0, 0x0000 },
+ { 0x03d1, 0x0000 },
+ { 0x03d2, 0x0000 },
+ { 0x03d3, 0x0000 },
+ { 0x03d4, 0x2000 },
+ { 0x03d5, 0x2000 },
+ { 0x03d6, 0x0000 },
+ { 0x03d7, 0x0000 },
+ { 0x03d8, 0x2000 },
+ { 0x03d9, 0x2000 },
+ { 0x03da, 0x2000 },
+ { 0x03db, 0x2000 },
+ { 0x03dc, 0x0000 },
+ { 0x03dd, 0x0000 },
+ { 0x03de, 0x0000 },
+ { 0x03df, 0x2000 },
+ { 0x03e0, 0x0000 },
+ { 0x03e1, 0x0000 },
+ { 0x03e2, 0x0000 },
+ { 0x03e3, 0x0000 },
+ { 0x03e4, 0x0000 },
+ { 0x03e5, 0x0000 },
+ { 0x03e6, 0x0000 },
+ { 0x03e7, 0x0000 },
+ { 0x03e8, 0x0000 },
+ { 0x03e9, 0x0000 },
+ { 0x03ea, 0x0000 },
+ { 0x03eb, 0x0000 },
+ { 0x03ec, 0x0000 },
+ { 0x03ed, 0x0000 },
+ { 0x03ee, 0x0000 },
+ { 0x03ef, 0x0000 },
+ { 0x03f0, 0x0800 },
+ { 0x03f1, 0x0800 },
+ { 0x03f2, 0x0800 },
+ { 0x03f3, 0x0800 },
+};
+
+static bool rt5663_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RT5663_RESET:
+ case RT5663_SIL_DET_CTL:
+ case RT5663_HP_IMP_GAIN_2:
+ case RT5663_AD_DA_MIXER:
+ case RT5663_FRAC_DIV_2:
+ case RT5663_MICBIAS_1:
+ case RT5663_ASRC_11_2:
+ case RT5663_ADC_EQ_1:
+ case RT5663_INT_ST_1:
+ case RT5663_INT_ST_2:
+ case RT5663_GPIO_STA:
+ case RT5663_SIN_GEN_1:
+ case RT5663_IL_CMD_1:
+ case RT5663_IL_CMD_5:
+ case RT5663_IL_CMD_PWRSAV1:
+ case RT5663_EM_JACK_TYPE_1:
+ case RT5663_EM_JACK_TYPE_2:
+ case RT5663_EM_JACK_TYPE_3:
+ case RT5663_JD_CTRL2:
+ case RT5663_VENDOR_ID:
+ case RT5663_VENDOR_ID_1:
+ case RT5663_VENDOR_ID_2:
+ case RT5663_PLL_INT_REG:
+ case RT5663_SOFT_RAMP:
+ case RT5663_STO_DRE_1:
+ case RT5663_STO_DRE_5:
+ case RT5663_STO_DRE_6:
+ case RT5663_STO_DRE_7:
+ case RT5663_MIC_DECRO_1:
+ case RT5663_MIC_DECRO_4:
+ case RT5663_HP_IMP_SEN_1:
+ case RT5663_HP_IMP_SEN_3:
+ case RT5663_HP_IMP_SEN_4:
+ case RT5663_HP_IMP_SEN_5:
+ case RT5663_HP_CALIB_1_1:
+ case RT5663_HP_CALIB_9:
+ case RT5663_HP_CALIB_ST1:
+ case RT5663_HP_CALIB_ST2:
+ case RT5663_HP_CALIB_ST3:
+ case RT5663_HP_CALIB_ST4:
+ case RT5663_HP_CALIB_ST5:
+ case RT5663_HP_CALIB_ST6:
+ case RT5663_HP_CALIB_ST7:
+ case RT5663_HP_CALIB_ST8:
+ case RT5663_HP_CALIB_ST9:
+ case RT5663_ANA_JD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rt5663_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RT5663_RESET:
+ case RT5663_HP_OUT_EN:
+ case RT5663_HP_LCH_DRE:
+ case RT5663_HP_RCH_DRE:
+ case RT5663_CALIB_BST:
+ case RT5663_RECMIX:
+ case RT5663_SIL_DET_CTL:
+ case RT5663_PWR_SAV_SILDET:
+ case RT5663_SIDETONE_CTL:
+ case RT5663_STO1_DAC_DIG_VOL:
+ case RT5663_STO1_ADC_DIG_VOL:
+ case RT5663_STO1_BOOST:
+ case RT5663_HP_IMP_GAIN_1:
+ case RT5663_HP_IMP_GAIN_2:
+ case RT5663_STO1_ADC_MIXER:
+ case RT5663_AD_DA_MIXER:
+ case RT5663_STO_DAC_MIXER:
+ case RT5663_DIG_SIDE_MIXER:
+ case RT5663_BYPASS_STO_DAC:
+ case RT5663_CALIB_REC_MIX:
+ case RT5663_PWR_DIG_1:
+ case RT5663_PWR_DIG_2:
+ case RT5663_PWR_ANLG_1:
+ case RT5663_PWR_ANLG_2:
+ case RT5663_PWR_ANLG_3:
+ case RT5663_PWR_MIXER:
+ case RT5663_SIG_CLK_DET:
+ case RT5663_PRE_DIV_GATING_1:
+ case RT5663_PRE_DIV_GATING_2:
+ case RT5663_I2S1_SDP:
+ case RT5663_ADDA_CLK_1:
+ case RT5663_ADDA_RST:
+ case RT5663_FRAC_DIV_1:
+ case RT5663_FRAC_DIV_2:
+ case RT5663_TDM_1:
+ case RT5663_TDM_2:
+ case RT5663_TDM_3:
+ case RT5663_TDM_4:
+ case RT5663_TDM_5:
+ case RT5663_GLB_CLK:
+ case RT5663_PLL_1:
+ case RT5663_PLL_2:
+ case RT5663_ASRC_1:
+ case RT5663_ASRC_2:
+ case RT5663_ASRC_4:
+ case RT5663_DUMMY_REG:
+ case RT5663_ASRC_8:
+ case RT5663_ASRC_9:
+ case RT5663_ASRC_11:
+ case RT5663_DEPOP_1:
+ case RT5663_DEPOP_2:
+ case RT5663_DEPOP_3:
+ case RT5663_HP_CHARGE_PUMP_1:
+ case RT5663_HP_CHARGE_PUMP_2:
+ case RT5663_MICBIAS_1:
+ case RT5663_RC_CLK:
+ case RT5663_ASRC_11_2:
+ case RT5663_DUMMY_REG_2:
+ case RT5663_REC_PATH_GAIN:
+ case RT5663_AUTO_1MRC_CLK:
+ case RT5663_ADC_EQ_1:
+ case RT5663_ADC_EQ_2:
+ case RT5663_IRQ_1:
+ case RT5663_IRQ_2:
+ case RT5663_IRQ_3:
+ case RT5663_IRQ_4:
+ case RT5663_IRQ_5:
+ case RT5663_INT_ST_1:
+ case RT5663_INT_ST_2:
+ case RT5663_GPIO_1:
+ case RT5663_GPIO_2:
+ case RT5663_GPIO_STA:
+ case RT5663_SIN_GEN_1:
+ case RT5663_SIN_GEN_2:
+ case RT5663_SIN_GEN_3:
+ case RT5663_SOF_VOL_ZC1:
+ case RT5663_IL_CMD_1:
+ case RT5663_IL_CMD_2:
+ case RT5663_IL_CMD_3:
+ case RT5663_IL_CMD_4:
+ case RT5663_IL_CMD_5:
+ case RT5663_IL_CMD_6:
+ case RT5663_IL_CMD_7:
+ case RT5663_IL_CMD_8:
+ case RT5663_IL_CMD_PWRSAV1:
+ case RT5663_IL_CMD_PWRSAV2:
+ case RT5663_EM_JACK_TYPE_1:
+ case RT5663_EM_JACK_TYPE_2:
+ case RT5663_EM_JACK_TYPE_3:
+ case RT5663_EM_JACK_TYPE_4:
+ case RT5663_EM_JACK_TYPE_5:
+ case RT5663_EM_JACK_TYPE_6:
+ case RT5663_STO1_HPF_ADJ1:
+ case RT5663_STO1_HPF_ADJ2:
+ case RT5663_FAST_OFF_MICBIAS:
+ case RT5663_JD_CTRL1:
+ case RT5663_JD_CTRL2:
+ case RT5663_DIG_MISC:
+ case RT5663_VENDOR_ID:
+ case RT5663_VENDOR_ID_1:
+ case RT5663_VENDOR_ID_2:
+ case RT5663_DIG_VOL_ZCD:
+ case RT5663_ANA_BIAS_CUR_1:
+ case RT5663_ANA_BIAS_CUR_2:
+ case RT5663_ANA_BIAS_CUR_3:
+ case RT5663_ANA_BIAS_CUR_4:
+ case RT5663_ANA_BIAS_CUR_5:
+ case RT5663_ANA_BIAS_CUR_6:
+ case RT5663_BIAS_CUR_5:
+ case RT5663_BIAS_CUR_6:
+ case RT5663_BIAS_CUR_7:
+ case RT5663_BIAS_CUR_8:
+ case RT5663_DACREF_LDO:
+ case RT5663_DUMMY_REG_3:
+ case RT5663_BIAS_CUR_9:
+ case RT5663_DUMMY_REG_4:
+ case RT5663_VREFADJ_OP:
+ case RT5663_VREF_RECMIX:
+ case RT5663_CHARGE_PUMP_1:
+ case RT5663_CHARGE_PUMP_1_2:
+ case RT5663_CHARGE_PUMP_1_3:
+ case RT5663_CHARGE_PUMP_2:
+ case RT5663_DIG_IN_PIN1:
+ case RT5663_PAD_DRV_CTL:
+ case RT5663_PLL_INT_REG:
+ case RT5663_CHOP_DAC_L:
+ case RT5663_CHOP_ADC:
+ case RT5663_CALIB_ADC:
+ case RT5663_CHOP_DAC_R:
+ case RT5663_DUMMY_CTL_DACLR:
+ case RT5663_DUMMY_REG_5:
+ case RT5663_SOFT_RAMP:
+ case RT5663_TEST_MODE_1:
+ case RT5663_TEST_MODE_2:
+ case RT5663_TEST_MODE_3:
+ case RT5663_STO_DRE_1:
+ case RT5663_STO_DRE_2:
+ case RT5663_STO_DRE_3:
+ case RT5663_STO_DRE_4:
+ case RT5663_STO_DRE_5:
+ case RT5663_STO_DRE_6:
+ case RT5663_STO_DRE_7:
+ case RT5663_STO_DRE_8:
+ case RT5663_STO_DRE_9:
+ case RT5663_STO_DRE_10:
+ case RT5663_MIC_DECRO_1:
+ case RT5663_MIC_DECRO_2:
+ case RT5663_MIC_DECRO_3:
+ case RT5663_MIC_DECRO_4:
+ case RT5663_MIC_DECRO_5:
+ case RT5663_MIC_DECRO_6:
+ case RT5663_HP_DECRO_1:
+ case RT5663_HP_DECRO_2:
+ case RT5663_HP_DECRO_3:
+ case RT5663_HP_DECRO_4:
+ case RT5663_HP_DECOUP:
+ case RT5663_HP_IMP_SEN_MAP8:
+ case RT5663_HP_IMP_SEN_MAP9:
+ case RT5663_HP_IMP_SEN_MAP10:
+ case RT5663_HP_IMP_SEN_MAP11:
+ case RT5663_HP_IMP_SEN_1:
+ case RT5663_HP_IMP_SEN_2:
+ case RT5663_HP_IMP_SEN_3:
+ case RT5663_HP_IMP_SEN_4:
+ case RT5663_HP_IMP_SEN_5:
+ case RT5663_HP_IMP_SEN_6:
+ case RT5663_HP_IMP_SEN_7:
+ case RT5663_HP_IMP_SEN_8:
+ case RT5663_HP_IMP_SEN_9:
+ case RT5663_HP_IMP_SEN_10:
+ case RT5663_HP_IMP_SEN_11:
+ case RT5663_HP_IMP_SEN_12:
+ case RT5663_HP_IMP_SEN_13:
+ case RT5663_HP_IMP_SEN_14:
+ case RT5663_HP_IMP_SEN_15:
+ case RT5663_HP_IMP_SEN_16:
+ case RT5663_HP_IMP_SEN_17:
+ case RT5663_HP_IMP_SEN_18:
+ case RT5663_HP_IMP_SEN_19:
+ case RT5663_HP_IMPSEN_DIG5:
+ case RT5663_HP_IMPSEN_MAP1:
+ case RT5663_HP_IMPSEN_MAP2:
+ case RT5663_HP_IMPSEN_MAP3:
+ case RT5663_HP_IMPSEN_MAP4:
+ case RT5663_HP_IMPSEN_MAP5:
+ case RT5663_HP_IMPSEN_MAP7:
+ case RT5663_HP_LOGIC_1:
+ case RT5663_HP_LOGIC_2:
+ case RT5663_HP_CALIB_1:
+ case RT5663_HP_CALIB_1_1:
+ case RT5663_HP_CALIB_2:
+ case RT5663_HP_CALIB_3:
+ case RT5663_HP_CALIB_4:
+ case RT5663_HP_CALIB_5:
+ case RT5663_HP_CALIB_5_1:
+ case RT5663_HP_CALIB_6:
+ case RT5663_HP_CALIB_7:
+ case RT5663_HP_CALIB_9:
+ case RT5663_HP_CALIB_10:
+ case RT5663_HP_CALIB_11:
+ case RT5663_HP_CALIB_ST1:
+ case RT5663_HP_CALIB_ST2:
+ case RT5663_HP_CALIB_ST3:
+ case RT5663_HP_CALIB_ST4:
+ case RT5663_HP_CALIB_ST5:
+ case RT5663_HP_CALIB_ST6:
+ case RT5663_HP_CALIB_ST7:
+ case RT5663_HP_CALIB_ST8:
+ case RT5663_HP_CALIB_ST9:
+ case RT5663_HP_AMP_DET:
+ case RT5663_DUMMY_REG_6:
+ case RT5663_HP_BIAS:
+ case RT5663_CBJ_1:
+ case RT5663_CBJ_2:
+ case RT5663_CBJ_3:
+ case RT5663_DUMMY_1:
+ case RT5663_DUMMY_2:
+ case RT5663_DUMMY_3:
+ case RT5663_ANA_JD:
+ case RT5663_ADC_LCH_LPF1_A1:
+ case RT5663_ADC_RCH_LPF1_A1:
+ case RT5663_ADC_LCH_LPF1_H0:
+ case RT5663_ADC_RCH_LPF1_H0:
+ case RT5663_ADC_LCH_BPF1_A1:
+ case RT5663_ADC_RCH_BPF1_A1:
+ case RT5663_ADC_LCH_BPF1_A2:
+ case RT5663_ADC_RCH_BPF1_A2:
+ case RT5663_ADC_LCH_BPF1_H0:
+ case RT5663_ADC_RCH_BPF1_H0:
+ case RT5663_ADC_LCH_BPF2_A1:
+ case RT5663_ADC_RCH_BPF2_A1:
+ case RT5663_ADC_LCH_BPF2_A2:
+ case RT5663_ADC_RCH_BPF2_A2:
+ case RT5663_ADC_LCH_BPF2_H0:
+ case RT5663_ADC_RCH_BPF2_H0:
+ case RT5663_ADC_LCH_BPF3_A1:
+ case RT5663_ADC_RCH_BPF3_A1:
+ case RT5663_ADC_LCH_BPF3_A2:
+ case RT5663_ADC_RCH_BPF3_A2:
+ case RT5663_ADC_LCH_BPF3_H0:
+ case RT5663_ADC_RCH_BPF3_H0:
+ case RT5663_ADC_LCH_BPF4_A1:
+ case RT5663_ADC_RCH_BPF4_A1:
+ case RT5663_ADC_LCH_BPF4_A2:
+ case RT5663_ADC_RCH_BPF4_A2:
+ case RT5663_ADC_LCH_BPF4_H0:
+ case RT5663_ADC_RCH_BPF4_H0:
+ case RT5663_ADC_LCH_HPF1_A1:
+ case RT5663_ADC_RCH_HPF1_A1:
+ case RT5663_ADC_LCH_HPF1_H0:
+ case RT5663_ADC_RCH_HPF1_H0:
+ case RT5663_ADC_EQ_PRE_VOL_L:
+ case RT5663_ADC_EQ_PRE_VOL_R:
+ case RT5663_ADC_EQ_POST_VOL_L:
+ case RT5663_ADC_EQ_POST_VOL_R:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rt5668_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RT5663_RESET:
+ case RT5668_CBJ_TYPE_2:
+ case RT5668_PDM_OUT_CTL:
+ case RT5668_PDM_I2C_DATA_CTL1:
+ case RT5668_PDM_I2C_DATA_CTL4:
+ case RT5668_ALC_BK_GAIN:
+ case RT5663_PLL_2:
+ case RT5663_MICBIAS_1:
+ case RT5663_ADC_EQ_1:
+ case RT5663_INT_ST_1:
+ case RT5668_GPIO_STA:
+ case RT5663_IL_CMD_1:
+ case RT5663_IL_CMD_5:
+ case RT5668_A_JD_CTRL:
+ case RT5663_JD_CTRL2:
+ case RT5663_VENDOR_ID:
+ case RT5663_VENDOR_ID_1:
+ case RT5663_VENDOR_ID_2:
+ case RT5663_STO_DRE_1:
+ case RT5663_STO_DRE_5:
+ case RT5663_STO_DRE_6:
+ case RT5663_STO_DRE_7:
+ case RT5668_MONO_DYNA_6:
+ case RT5668_STO1_SIL_DET:
+ case RT5668_MONOL_SIL_DET:
+ case RT5668_MONOR_SIL_DET:
+ case RT5668_STO2_DAC_SIL:
+ case RT5668_MONO_AMP_CAL_ST1:
+ case RT5668_MONO_AMP_CAL_ST2:
+ case RT5668_MONO_AMP_CAL_ST3:
+ case RT5668_MONO_AMP_CAL_ST4:
+ case RT5663_HP_IMP_SEN_2:
+ case RT5663_HP_IMP_SEN_3:
+ case RT5663_HP_IMP_SEN_4:
+ case RT5663_HP_IMP_SEN_10:
+ case RT5663_HP_CALIB_1:
+ case RT5663_HP_CALIB_10:
+ case RT5663_HP_CALIB_ST1:
+ case RT5663_HP_CALIB_ST4:
+ case RT5663_HP_CALIB_ST5:
+ case RT5663_HP_CALIB_ST6:
+ case RT5663_HP_CALIB_ST7:
+ case RT5663_HP_CALIB_ST8:
+ case RT5663_HP_CALIB_ST9:
+ case RT5668_HP_CALIB_ST10:
+ case RT5668_HP_CALIB_ST11:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rt5668_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RT5668_LOUT_CTRL:
+ case RT5668_HP_AMP_2:
+ case RT5668_MONO_OUT:
+ case RT5668_MONO_GAIN:
+ case RT5668_AEC_BST:
+ case RT5668_IN1_IN2:
+ case RT5668_IN3_IN4:
+ case RT5668_INL1_INR1:
+ case RT5668_CBJ_TYPE_2:
+ case RT5668_CBJ_TYPE_3:
+ case RT5668_CBJ_TYPE_4:
+ case RT5668_CBJ_TYPE_5:
+ case RT5668_CBJ_TYPE_8:
+ case RT5668_DAC3_DIG_VOL:
+ case RT5668_DAC3_CTRL:
+ case RT5668_MONO_ADC_DIG_VOL:
+ case RT5668_STO2_ADC_DIG_VOL:
+ case RT5668_MONO_ADC_BST_GAIN:
+ case RT5668_STO2_ADC_BST_GAIN:
+ case RT5668_SIDETONE_CTRL:
+ case RT5668_MONO1_ADC_MIXER:
+ case RT5668_STO2_ADC_MIXER:
+ case RT5668_MONO_DAC_MIXER:
+ case RT5668_DAC2_SRC_CTRL:
+ case RT5668_IF_3_4_DATA_CTL:
+ case RT5668_IF_5_DATA_CTL:
+ case RT5668_PDM_OUT_CTL:
+ case RT5668_PDM_I2C_DATA_CTL1:
+ case RT5668_PDM_I2C_DATA_CTL2:
+ case RT5668_PDM_I2C_DATA_CTL3:
+ case RT5668_PDM_I2C_DATA_CTL4:
+ case RT5668_RECMIX1_NEW:
+ case RT5668_RECMIX1L_0:
+ case RT5668_RECMIX1L:
+ case RT5668_RECMIX1R_0:
+ case RT5668_RECMIX1R:
+ case RT5668_RECMIX2_NEW:
+ case RT5668_RECMIX2_L_2:
+ case RT5668_RECMIX2_R:
+ case RT5668_RECMIX2_R_2:
+ case RT5668_CALIB_REC_LR:
+ case RT5668_ALC_BK_GAIN:
+ case RT5668_MONOMIX_GAIN:
+ case RT5668_MONOMIX_IN_GAIN:
+ case RT5668_OUT_MIXL_GAIN:
+ case RT5668_OUT_LMIX_IN_GAIN:
+ case RT5668_OUT_RMIX_IN_GAIN:
+ case RT5668_OUT_RMIX_IN_GAIN1:
+ case RT5668_LOUT_MIXER_CTRL:
+ case RT5668_PWR_VOL:
+ case RT5668_ADCDAC_RST:
+ case RT5668_I2S34_SDP:
+ case RT5668_I2S5_SDP:
+ case RT5668_TDM_5:
+ case RT5668_TDM_6:
+ case RT5668_TDM_7:
+ case RT5668_TDM_8:
+ case RT5668_ASRC_3:
+ case RT5668_ASRC_6:
+ case RT5668_ASRC_7:
+ case RT5668_PLL_TRK_13:
+ case RT5668_I2S_M_CLK_CTL:
+ case RT5668_FDIV_I2S34_M_CLK:
+ case RT5668_FDIV_I2S34_M_CLK2:
+ case RT5668_FDIV_I2S5_M_CLK:
+ case RT5668_FDIV_I2S5_M_CLK2:
+ case RT5668_IRQ_4:
+ case RT5668_GPIO_3:
+ case RT5668_GPIO_4:
+ case RT5668_GPIO_STA:
+ case RT5668_HP_AMP_DET1:
+ case RT5668_HP_AMP_DET2:
+ case RT5668_HP_AMP_DET3:
+ case RT5668_MID_BD_HP_AMP:
+ case RT5668_LOW_BD_HP_AMP:
+ case RT5668_SOF_VOL_ZC2:
+ case RT5668_ADC_STO2_ADJ1:
+ case RT5668_ADC_STO2_ADJ2:
+ case RT5668_A_JD_CTRL:
+ case RT5668_JD1_TRES_CTRL:
+ case RT5668_JD2_TRES_CTRL:
+ case RT5668_JD_CTRL2:
+ case RT5668_DUM_REG_2:
+ case RT5668_DUM_REG_3:
+ case RT5663_VENDOR_ID:
+ case RT5663_VENDOR_ID_1:
+ case RT5663_VENDOR_ID_2:
+ case RT5668_DACADC_DIG_VOL2:
+ case RT5668_DIG_IN_PIN2:
+ case RT5668_PAD_DRV_CTL1:
+ case RT5668_SOF_RAM_DEPOP:
+ case RT5668_VOL_TEST:
+ case RT5668_TEST_MODE_3:
+ case RT5668_TEST_MODE_4:
+ case RT5663_STO_DRE_9:
+ case RT5668_MONO_DYNA_1:
+ case RT5668_MONO_DYNA_2:
+ case RT5668_MONO_DYNA_3:
+ case RT5668_MONO_DYNA_4:
+ case RT5668_MONO_DYNA_5:
+ case RT5668_MONO_DYNA_6:
+ case RT5668_STO1_SIL_DET:
+ case RT5668_MONOL_SIL_DET:
+ case RT5668_MONOR_SIL_DET:
+ case RT5668_STO2_DAC_SIL:
+ case RT5668_PWR_SAV_CTL1:
+ case RT5668_PWR_SAV_CTL2:
+ case RT5668_PWR_SAV_CTL3:
+ case RT5668_PWR_SAV_CTL4:
+ case RT5668_PWR_SAV_CTL5:
+ case RT5668_PWR_SAV_CTL6:
+ case RT5668_MONO_AMP_CAL1:
+ case RT5668_MONO_AMP_CAL2:
+ case RT5668_MONO_AMP_CAL3:
+ case RT5668_MONO_AMP_CAL4:
+ case RT5668_MONO_AMP_CAL5:
+ case RT5668_MONO_AMP_CAL6:
+ case RT5668_MONO_AMP_CAL7:
+ case RT5668_MONO_AMP_CAL_ST1:
+ case RT5668_MONO_AMP_CAL_ST2:
+ case RT5668_MONO_AMP_CAL_ST3:
+ case RT5668_MONO_AMP_CAL_ST4:
+ case RT5668_MONO_AMP_CAL_ST5:
+ case RT5668_HP_IMP_SEN_13:
+ case RT5668_HP_IMP_SEN_14:
+ case RT5668_HP_IMP_SEN_6:
+ case RT5668_HP_IMP_SEN_7:
+ case RT5668_HP_IMP_SEN_8:
+ case RT5668_HP_IMP_SEN_9:
+ case RT5668_HP_IMP_SEN_10:
+ case RT5668_HP_LOGIC_3:
+ case RT5668_HP_CALIB_ST10:
+ case RT5668_HP_CALIB_ST11:
+ case RT5668_PRO_REG_TBL_4:
+ case RT5668_PRO_REG_TBL_5:
+ case RT5668_PRO_REG_TBL_6:
+ case RT5668_PRO_REG_TBL_7:
+ case RT5668_PRO_REG_TBL_8:
+ case RT5668_PRO_REG_TBL_9:
+ case RT5668_SAR_ADC_INL_1:
+ case RT5668_SAR_ADC_INL_2:
+ case RT5668_SAR_ADC_INL_3:
+ case RT5668_SAR_ADC_INL_4:
+ case RT5668_SAR_ADC_INL_5:
+ case RT5668_SAR_ADC_INL_6:
+ case RT5668_SAR_ADC_INL_7:
+ case RT5668_SAR_ADC_INL_8:
+ case RT5668_SAR_ADC_INL_9:
+ case RT5668_SAR_ADC_INL_10:
+ case RT5668_SAR_ADC_INL_11:
+ case RT5668_SAR_ADC_INL_12:
+ case RT5668_DRC_CTRL_1:
+ case RT5668_DRC1_CTRL_2:
+ case RT5668_DRC1_CTRL_3:
+ case RT5668_DRC1_CTRL_4:
+ case RT5668_DRC1_CTRL_5:
+ case RT5668_DRC1_CTRL_6:
+ case RT5668_DRC1_HD_CTRL_1:
+ case RT5668_DRC1_HD_CTRL_2:
+ case RT5668_DRC1_PRI_REG_1:
+ case RT5668_DRC1_PRI_REG_2:
+ case RT5668_DRC1_PRI_REG_3:
+ case RT5668_DRC1_PRI_REG_4:
+ case RT5668_DRC1_PRI_REG_5:
+ case RT5668_DRC1_PRI_REG_6:
+ case RT5668_DRC1_PRI_REG_7:
+ case RT5668_DRC1_PRI_REG_8:
+ case RT5668_ALC_PGA_CTL_1:
+ case RT5668_ALC_PGA_CTL_2:
+ case RT5668_ALC_PGA_CTL_3:
+ case RT5668_ALC_PGA_CTL_4:
+ case RT5668_ALC_PGA_CTL_5:
+ case RT5668_ALC_PGA_CTL_6:
+ case RT5668_ALC_PGA_CTL_7:
+ case RT5668_ALC_PGA_CTL_8:
+ case RT5668_ALC_PGA_REG_1:
+ case RT5668_ALC_PGA_REG_2:
+ case RT5668_ALC_PGA_REG_3:
+ case RT5668_ADC_EQ_RECOV_1:
+ case RT5668_ADC_EQ_RECOV_2:
+ case RT5668_ADC_EQ_RECOV_3:
+ case RT5668_ADC_EQ_RECOV_4:
+ case RT5668_ADC_EQ_RECOV_5:
+ case RT5668_ADC_EQ_RECOV_6:
+ case RT5668_ADC_EQ_RECOV_7:
+ case RT5668_ADC_EQ_RECOV_8:
+ case RT5668_ADC_EQ_RECOV_9:
+ case RT5668_ADC_EQ_RECOV_10:
+ case RT5668_ADC_EQ_RECOV_11:
+ case RT5668_ADC_EQ_RECOV_12:
+ case RT5668_ADC_EQ_RECOV_13:
+ case RT5668_VID_HIDDEN:
+ case RT5668_VID_CUSTOMER:
+ case RT5668_SCAN_MODE:
+ case RT5668_I2C_BYPA:
+ return true;
+ case RT5663_TDM_1:
+ case RT5663_DEPOP_3:
+ case RT5663_ASRC_11_2:
+ case RT5663_INT_ST_2:
+ case RT5663_GPIO_STA:
+ case RT5663_SIN_GEN_1:
+ case RT5663_SIN_GEN_2:
+ case RT5663_SIN_GEN_3:
+ case RT5663_IL_CMD_PWRSAV1:
+ case RT5663_IL_CMD_PWRSAV2:
+ case RT5663_EM_JACK_TYPE_1:
+ case RT5663_EM_JACK_TYPE_2:
+ case RT5663_EM_JACK_TYPE_3:
+ case RT5663_EM_JACK_TYPE_4:
+ case RT5663_FAST_OFF_MICBIAS:
+ case RT5663_ANA_BIAS_CUR_1:
+ case RT5663_ANA_BIAS_CUR_2:
+ case RT5663_BIAS_CUR_9:
+ case RT5663_DUMMY_REG_4:
+ case RT5663_VREF_RECMIX:
+ case RT5663_CHARGE_PUMP_1_2:
+ case RT5663_CHARGE_PUMP_1_3:
+ case RT5663_CHARGE_PUMP_2:
+ case RT5663_CHOP_DAC_R:
+ case RT5663_DUMMY_CTL_DACLR:
+ case RT5663_DUMMY_REG_5:
+ case RT5663_SOFT_RAMP:
+ case RT5663_TEST_MODE_1:
+ case RT5663_STO_DRE_10:
+ case RT5663_MIC_DECRO_1:
+ case RT5663_MIC_DECRO_2:
+ case RT5663_MIC_DECRO_3:
+ case RT5663_MIC_DECRO_4:
+ case RT5663_MIC_DECRO_5:
+ case RT5663_MIC_DECRO_6:
+ case RT5663_HP_DECRO_1:
+ case RT5663_HP_DECRO_2:
+ case RT5663_HP_DECRO_3:
+ case RT5663_HP_DECRO_4:
+ case RT5663_HP_DECOUP:
+ case RT5663_HP_IMPSEN_MAP4:
+ case RT5663_HP_IMPSEN_MAP5:
+ case RT5663_HP_IMPSEN_MAP7:
+ case RT5663_HP_CALIB_1:
+ case RT5663_CBJ_1:
+ case RT5663_CBJ_2:
+ case RT5663_CBJ_3:
+ return false;
+ default:
+ return rt5663_readable_register(dev, reg);
+ }
+}
+
+static const DECLARE_TLV_DB_SCALE(rt5663_hp_vol_tlv, -2400, 150, 0);
+static const DECLARE_TLV_DB_SCALE(rt5668_hp_vol_tlv, -2250, 150, 0);
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
+
+/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
+static const DECLARE_TLV_DB_RANGE(in_bst_tlv,
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
+ 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
+ 3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
+ 6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
+ 7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
+ 8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
+
+/* Interface data select */
+static const char * const rt5663_if1_adc_data_select[] = {
+ "L/R", "R/L", "L/L", "R/R"
+};
+
+static SOC_ENUM_SINGLE_DECL(rt5663_if1_adc_enum, RT5663_TDM_2,
+ RT5663_DATA_SWAP_ADCDAT1_SHIFT, rt5663_if1_adc_data_select);
+
+static void rt5663_enable_push_button_irq(struct snd_soc_codec *codec,
+ bool enable)
+{
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+
+ if (enable) {
+ snd_soc_update_bits(codec, RT5663_IL_CMD_6,
+ RT5668_EN_4BTN_INL_MASK, RT5668_EN_4BTN_INL_EN);
+ /* reset in-line command */
+ snd_soc_update_bits(codec, RT5663_IL_CMD_6,
+ RT5668_RESET_4BTN_INL_MASK,
+ RT5668_RESET_4BTN_INL_RESET);
+ snd_soc_update_bits(codec, RT5663_IL_CMD_6,
+ RT5668_RESET_4BTN_INL_MASK,
+ RT5668_RESET_4BTN_INL_NOR);
+ switch (rt5663->codec_type) {
+ case CODEC_TYPE_RT5668:
+ snd_soc_update_bits(codec, RT5663_IRQ_3,
+ RT5668_EN_IRQ_INLINE_MASK,
+ RT5668_EN_IRQ_INLINE_NOR);
+ break;
+ case CODEC_TYPE_RT5663:
+ snd_soc_update_bits(codec, RT5663_IRQ_2,
+ RT5663_EN_IRQ_INLINE_MASK,
+ RT5663_EN_IRQ_INLINE_NOR);
+ break;
+ default:
+ dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ }
+ } else {
+ switch (rt5663->codec_type) {
+ case CODEC_TYPE_RT5668:
+ snd_soc_update_bits(codec, RT5663_IRQ_3,
+ RT5668_EN_IRQ_INLINE_MASK,
+ RT5668_EN_IRQ_INLINE_BYP);
+ break;
+ case CODEC_TYPE_RT5663:
+ snd_soc_update_bits(codec, RT5663_IRQ_2,
+ RT5663_EN_IRQ_INLINE_MASK,
+ RT5663_EN_IRQ_INLINE_BYP);
+ break;
+ default:
+ dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ }
+ snd_soc_update_bits(codec, RT5663_IL_CMD_6,
+ RT5668_EN_4BTN_INL_MASK, RT5668_EN_4BTN_INL_DIS);
+ /* reset in-line command */
+ snd_soc_update_bits(codec, RT5663_IL_CMD_6,
+ RT5668_RESET_4BTN_INL_MASK,
+ RT5668_RESET_4BTN_INL_RESET);
+ snd_soc_update_bits(codec, RT5663_IL_CMD_6,
+ RT5668_RESET_4BTN_INL_MASK,
+ RT5668_RESET_4BTN_INL_NOR);
+ }
+}
+
+/**
+ * rt5668_jack_detect - Detect headset.
+ * @codec: SoC audio codec device.
+ * @jack_insert: Jack insert or not.
+ *
+ * Detect whether is headset or not when jack inserted.
+ *
+ * Returns detect status.
+ */
+
+static int rt5668_jack_detect(struct snd_soc_codec *codec, int jack_insert)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct rt5663_priv *rt5668 = snd_soc_codec_get_drvdata(codec);
+ int val, i = 0, sleep_time[5] = {300, 150, 100, 50, 30};
+
+ dev_dbg(codec->dev, "%s jack_insert:%d\n", __func__, jack_insert);
+ if (jack_insert) {
+ snd_soc_write(codec, RT5668_CBJ_TYPE_2, 0x8040);
+ snd_soc_write(codec, RT5668_CBJ_TYPE_3, 0x1484);
+
+ snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
+ snd_soc_dapm_force_enable_pin(dapm, "MICBIAS2");
+ snd_soc_dapm_force_enable_pin(dapm, "Mic Det Power");
+ snd_soc_dapm_force_enable_pin(dapm, "CBJ Power");
+ snd_soc_dapm_sync(dapm);
+ snd_soc_update_bits(codec, RT5663_RC_CLK,
+ RT5668_DIG_1M_CLK_MASK, RT5668_DIG_1M_CLK_EN);
+ snd_soc_update_bits(codec, RT5663_RECMIX, 0x8, 0x8);
+
+ while (i < 5) {
+ msleep(sleep_time[i]);
+ val = snd_soc_read(codec, RT5668_CBJ_TYPE_2) & 0x0003;
+ if (val == 0x1 || val == 0x2 || val == 0x3)
+ break;
+ dev_dbg(codec->dev, "%s: MX-0011 val=%x sleep %d\n",
+ __func__, val, sleep_time[i]);
+ i++;
+ }
+ dev_dbg(codec->dev, "%s val = %d\n", __func__, val);
+ switch (val) {
+ case 1:
+ case 2:
+ rt5668->jack_type = SND_JACK_HEADSET;
+ rt5663_enable_push_button_irq(codec, true);
+ break;
+ default:
+ snd_soc_dapm_disable_pin(dapm, "MICBIAS1");
+ snd_soc_dapm_disable_pin(dapm, "MICBIAS2");
+ snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+ snd_soc_dapm_disable_pin(dapm, "CBJ Power");
+ snd_soc_dapm_sync(dapm);
+ rt5668->jack_type = SND_JACK_HEADPHONE;
+ break;
+ }
+ } else {
+ snd_soc_update_bits(codec, RT5663_RECMIX, 0x8, 0x0);
+
+ if (rt5668->jack_type == SND_JACK_HEADSET) {
+ rt5663_enable_push_button_irq(codec, false);
+ snd_soc_dapm_disable_pin(dapm, "MICBIAS1");
+ snd_soc_dapm_disable_pin(dapm, "MICBIAS2");
+ snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+ snd_soc_dapm_disable_pin(dapm, "CBJ Power");
+ snd_soc_dapm_sync(dapm);
+ }
+ rt5668->jack_type = 0;
+ }
+
+ dev_dbg(codec->dev, "jack_type = %d\n", rt5668->jack_type);
+ return rt5668->jack_type;
+}
+
+/**
+ * rt5663_jack_detect - Detect headset.
+ * @codec: SoC audio codec device.
+ * @jack_insert: Jack insert or not.
+ *
+ * Detect whether is headset or not when jack inserted.
+ *
+ * Returns detect status.
+ */
+static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
+{
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+ int val, i = 0, sleep_time[5] = {300, 150, 100, 50, 30};
+
+ dev_dbg(codec->dev, "%s jack_insert:%d\n", __func__, jack_insert);
+
+ if (jack_insert) {
+ snd_soc_update_bits(codec, RT5663_DIG_MISC,
+ RT5668_DIG_GATE_CTRL_MASK, RT5668_DIG_GATE_CTRL_EN);
+ snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
+ RT5663_SI_HP_MASK | RT5668_OSW_HP_L_MASK |
+ RT5668_OSW_HP_R_MASK, RT5663_SI_HP_EN |
+ RT5668_OSW_HP_L_DIS | RT5668_OSW_HP_R_DIS);
+ snd_soc_update_bits(codec, RT5663_DUMMY_1,
+ RT5663_EMB_CLK_MASK | RT5663_HPA_CPL_BIAS_MASK |
+ RT5663_HPA_CPR_BIAS_MASK, RT5663_EMB_CLK_EN |
+ RT5663_HPA_CPL_BIAS_1 | RT5663_HPA_CPR_BIAS_1);
+ snd_soc_update_bits(codec, RT5663_CBJ_1,
+ RT5663_INBUF_CBJ_BST1_MASK | RT5663_CBJ_SENSE_BST1_MASK,
+ RT5663_INBUF_CBJ_BST1_ON | RT5663_CBJ_SENSE_BST1_L);
+ snd_soc_update_bits(codec, RT5663_IL_CMD_2,
+ RT5663_PWR_MIC_DET_MASK, RT5663_PWR_MIC_DET_ON);
+ /* BST1 power on for JD */
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_2,
+ RT5668_PWR_BST1_MASK, RT5668_PWR_BST1_ON);
+ snd_soc_update_bits(codec, RT5663_EM_JACK_TYPE_1,
+ RT5663_CBJ_DET_MASK | RT5663_EXT_JD_MASK |
+ RT5663_POL_EXT_JD_MASK, RT5663_CBJ_DET_EN |
+ RT5663_EXT_JD_EN | RT5663_POL_EXT_JD_EN);
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
+ RT5668_PWR_MB_MASK | RT5668_LDO1_DVO_MASK |
+ RT5668_AMP_HP_MASK, RT5668_PWR_MB |
+ RT5668_LDO1_DVO_0_9V | RT5668_AMP_HP_3X);
+ snd_soc_update_bits(codec, RT5663_AUTO_1MRC_CLK,
+ RT5668_IRQ_POW_SAV_MASK, RT5668_IRQ_POW_SAV_EN);
+ snd_soc_update_bits(codec, RT5663_IRQ_1,
+ RT5663_EN_IRQ_JD1_MASK, RT5663_EN_IRQ_JD1_EN);
+ while (i < 5) {
+ msleep(sleep_time[i]);
+ val = snd_soc_read(codec, RT5663_EM_JACK_TYPE_2) &
+ 0x0003;
+ i++;
+ if (val == 0x1 || val == 0x2 || val == 0x3)
+ break;
+ dev_dbg(codec->dev, "%s: MX-00e7 val=%x sleep %d\n",
+ __func__, val, sleep_time[i]);
+ }
+ dev_dbg(codec->dev, "%s val = %d\n", __func__, val);
+ switch (val) {
+ case 1:
+ case 2:
+ rt5663->jack_type = SND_JACK_HEADSET;
+ rt5663_enable_push_button_irq(codec, true);
+ break;
+ default:
+ rt5663->jack_type = SND_JACK_HEADPHONE;
+ break;
+ }
+ } else {
+ if (rt5663->jack_type == SND_JACK_HEADSET)
+ rt5663_enable_push_button_irq(codec, false);
+ rt5663->jack_type = 0;
+ }
+
+ dev_dbg(codec->dev, "jack_type = %d\n", rt5663->jack_type);
+ return rt5663->jack_type;
+}
+
+static int rt5663_button_detect(struct snd_soc_codec *codec)
+{
+ int btn_type, val;
+
+ val = snd_soc_read(codec, RT5663_IL_CMD_5);
+ dev_dbg(codec->dev, "%s: val=0x%x\n", __func__, val);
+ btn_type = val & 0xfff0;
+ snd_soc_write(codec, RT5663_IL_CMD_5, val);
+
+ return btn_type;
+}
+
+static irqreturn_t rt5663_irq(int irq, void *data)
+{
+ struct rt5663_priv *rt5663 = data;
+
+ dev_dbg(rt5663->codec->dev, "%s IRQ queue work\n", __func__);
+
+ queue_delayed_work(system_wq, &rt5663->jack_detect_work,
+ msecs_to_jiffies(250));
+
+ return IRQ_HANDLED;
+}
+
+int rt5663_set_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *hs_jack)
+{
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+
+ rt5663->hs_jack = hs_jack;
+
+ rt5663_irq(0, rt5663);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5663_set_jack_detect);
+
+static bool rt5663_check_jd_status(struct snd_soc_codec *codec)
+{
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+ int val = snd_soc_read(codec, RT5663_INT_ST_1);
+
+ dev_dbg(codec->dev, "%s val=%x\n", __func__, val);
+
+ /* JD1 */
+ switch (rt5663->codec_type) {
+ case CODEC_TYPE_RT5668:
+ return !(val & 0x2000);
+ case CODEC_TYPE_RT5663:
+ return !(val & 0x1000);
+ default:
+ dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ }
+
+ return false;
+}
+
+static void rt5663_jack_detect_work(struct work_struct *work)
+{
+ struct rt5663_priv *rt5663 =
+ container_of(work, struct rt5663_priv, jack_detect_work.work);
+ struct snd_soc_codec *codec = rt5663->codec;
+ int btn_type, report = 0;
+
+ if (!codec)
+ return;
+
+ if (rt5663_check_jd_status(codec)) {
+ /* jack in */
+ if (rt5663->jack_type == 0) {
+ /* jack was out, report jack type */
+ switch (rt5663->codec_type) {
+ case CODEC_TYPE_RT5668:
+ report = rt5668_jack_detect(rt5663->codec, 1);
+ break;
+ case CODEC_TYPE_RT5663:
+ report = rt5663_jack_detect(rt5663->codec, 1);
+ break;
+ default:
+ dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ }
+ } else {
+ /* jack is already in, report button event */
+ report = SND_JACK_HEADSET;
+ btn_type = rt5663_button_detect(rt5663->codec);
+ /**
+ * rt5663 can report three kinds of button behavior,
+ * one click, double click and hold. However,
+ * currently we will report button pressed/released
+ * event. So all the three button behaviors are
+ * treated as button pressed.
+ */
+ switch (btn_type) {
+ case 0x8000:
+ case 0x4000:
+ case 0x2000:
+ report |= SND_JACK_BTN_0;
+ break;
+ case 0x1000:
+ case 0x0800:
+ case 0x0400:
+ report |= SND_JACK_BTN_1;
+ break;
+ case 0x0200:
+ case 0x0100:
+ case 0x0080:
+ report |= SND_JACK_BTN_2;
+ break;
+ case 0x0040:
+ case 0x0020:
+ case 0x0010:
+ report |= SND_JACK_BTN_3;
+ break;
+ case 0x0000: /* unpressed */
+ break;
+ default:
+ btn_type = 0;
+ dev_err(rt5663->codec->dev,
+ "Unexpected button code 0x%04x\n",
+ btn_type);
+ break;
+ }
+ /* button release or spurious interrput*/
+ if (btn_type == 0)
+ report = rt5663->jack_type;
+ }
+ } else {
+ /* jack out */
+ switch (rt5663->codec_type) {
+ case CODEC_TYPE_RT5668:
+ report = rt5668_jack_detect(rt5663->codec, 0);
+ break;
+ case CODEC_TYPE_RT5663:
+ report = rt5663_jack_detect(rt5663->codec, 0);
+ break;
+ default:
+ dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ }
+ }
+ dev_dbg(codec->dev, "%s jack report: 0x%04x\n", __func__, report);
+ snd_soc_jack_report(rt5663->hs_jack, report, SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
+}
+
+static const struct snd_kcontrol_new rt5663_snd_controls[] = {
+ /* DAC Digital Volume */
+ SOC_DOUBLE_TLV("DAC Playback Volume", RT5663_STO1_DAC_DIG_VOL,
+ RT5668_DAC_L1_VOL_SHIFT + 1, RT5668_DAC_R1_VOL_SHIFT + 1,
+ 87, 0, dac_vol_tlv),
+ /* ADC Digital Volume Control */
+ SOC_DOUBLE("ADC Capture Switch", RT5663_STO1_ADC_DIG_VOL,
+ RT5668_ADC_L_MUTE_SHIFT, RT5668_ADC_R_MUTE_SHIFT, 1, 1),
+ SOC_DOUBLE_TLV("ADC Capture Volume", RT5663_STO1_ADC_DIG_VOL,
+ RT5668_ADC_L_VOL_SHIFT + 1, RT5668_ADC_R_VOL_SHIFT + 1,
+ 63, 0, adc_vol_tlv),
+};
+
+static const struct snd_kcontrol_new rt5668_specific_controls[] = {
+ /* Headphone Output Volume */
+ SOC_DOUBLE_R_TLV("Headphone Playback Volume", RT5663_HP_LCH_DRE,
+ RT5663_HP_RCH_DRE, RT5668_GAIN_HP_SHIFT, 15, 1,
+ rt5668_hp_vol_tlv),
+ /* Mic Boost Volume */
+ SOC_SINGLE_TLV("IN1 Capture Volume", RT5668_AEC_BST,
+ RT5668_GAIN_CBJ_SHIFT, 8, 0, in_bst_tlv),
+};
+
+static const struct snd_kcontrol_new rt5663_specific_controls[] = {
+ /* Headphone Output Volume */
+ SOC_DOUBLE_R_TLV("Headphone Playback Volume", RT5663_STO_DRE_9,
+ RT5663_STO_DRE_10, RT5663_DRE_GAIN_HP_SHIFT, 23, 1,
+ rt5663_hp_vol_tlv),
+ /* Mic Boost Volume*/
+ SOC_SINGLE_TLV("IN1 Capture Volume", RT5663_CBJ_2,
+ RT5663_GAIN_BST1_SHIFT, 8, 0, in_bst_tlv),
+ /* Data Swap for Slot0/1 in ADCDAT1 */
+ SOC_ENUM("IF1 ADC Data Swap", rt5663_if1_adc_enum),
+};
+
+static int rt5663_is_sys_clk_from_pll(struct snd_soc_dapm_widget *w,
+ struct snd_soc_dapm_widget *sink)
+{
+ unsigned int val;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ val = snd_soc_read(codec, RT5663_GLB_CLK);
+ val &= RT5663_SCLK_SRC_MASK;
+ if (val == RT5663_SCLK_SRC_PLL1)
+ return 1;
+ else
+ return 0;
+}
+
+static int rt5663_is_using_asrc(struct snd_soc_dapm_widget *w,
+ struct snd_soc_dapm_widget *sink)
+{
+ unsigned int reg, shift, val;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+
+ if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+ switch (w->shift) {
+ case RT5668_ADC_STO1_ASRC_SHIFT:
+ reg = RT5668_ASRC_3;
+ shift = RT5668_AD_STO1_TRACK_SHIFT;
+ break;
+ case RT5668_DAC_STO1_ASRC_SHIFT:
+ reg = RT5663_ASRC_2;
+ shift = RT5668_DA_STO1_TRACK_SHIFT;
+ break;
+ default:
+ return 0;
+ }
+ } else {
+ switch (w->shift) {
+ case RT5663_ADC_STO1_ASRC_SHIFT:
+ reg = RT5663_ASRC_2;
+ shift = RT5663_AD_STO1_TRACK_SHIFT;
+ break;
+ case RT5663_DAC_STO1_ASRC_SHIFT:
+ reg = RT5663_ASRC_2;
+ shift = RT5663_DA_STO1_TRACK_SHIFT;
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ val = (snd_soc_read(codec, reg) >> shift) & 0x7;
+
+ if (val)
+ return 1;
+
+ return 0;
+}
+
+static int rt5663_i2s_use_asrc(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+ int da_asrc_en, ad_asrc_en;
+
+ da_asrc_en = (snd_soc_read(codec, RT5663_ASRC_2) &
+ RT5663_DA_STO1_TRACK_MASK) ? 1 : 0;
+ switch (rt5663->codec_type) {
+ case CODEC_TYPE_RT5668:
+ ad_asrc_en = (snd_soc_read(codec, RT5668_ASRC_3) &
+ RT5668_AD_STO1_TRACK_MASK) ? 1 : 0;
+ break;
+ case CODEC_TYPE_RT5663:
+ ad_asrc_en = (snd_soc_read(codec, RT5663_ASRC_2) &
+ RT5663_AD_STO1_TRACK_MASK) ? 1 : 0;
+ break;
+ default:
+ dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ return 1;
+ }
+
+ if (da_asrc_en || ad_asrc_en)
+ if (rt5663->sysclk > rt5663->lrck * 384)
+ return 1;
+
+ dev_err(codec->dev, "sysclk < 384 x fs, disable i2s asrc\n");
+
+ return 0;
+}
+
+/**
+ * rt5663_sel_asrc_clk_src - select ASRC clock source for a set of filters
+ * @codec: SoC audio codec device.
+ * @filter_mask: mask of filters.
+ * @clk_src: clock source
+ *
+ * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5668 can
+ * only support standard 32fs or 64fs i2s format, ASRC should be enabled to
+ * support special i2s clock format such as Intel's 100fs(100 * sampling rate).
+ * ASRC function will track i2s clock and generate a corresponding system clock
+ * for codec. This function provides an API to select the clock source for a
+ * set of filters specified by the mask. And the codec driver will turn on ASRC
+ * for these filters if ASRC is selected as their clock source.
+ */
+int rt5663_sel_asrc_clk_src(struct snd_soc_codec *codec,
+ unsigned int filter_mask, unsigned int clk_src)
+{
+ struct rt5663_priv *rt5668 = snd_soc_codec_get_drvdata(codec);
+ unsigned int asrc2_mask = 0;
+ unsigned int asrc2_value = 0;
+ unsigned int asrc3_mask = 0;
+ unsigned int asrc3_value = 0;
+
+ switch (clk_src) {
+ case RT5663_CLK_SEL_SYS:
+ case RT5663_CLK_SEL_I2S1_ASRC:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (filter_mask & RT5663_DA_STEREO_FILTER) {
+ asrc2_mask |= RT5668_DA_STO1_TRACK_MASK;
+ asrc2_value |= clk_src << RT5668_DA_STO1_TRACK_SHIFT;
+ }
+
+ if (filter_mask & RT5663_AD_STEREO_FILTER) {
+ switch (rt5668->codec_type) {
+ case CODEC_TYPE_RT5668:
+ asrc3_mask |= RT5668_AD_STO1_TRACK_MASK;
+ asrc3_value |= clk_src << RT5668_AD_STO1_TRACK_SHIFT;
+ break;
+ case CODEC_TYPE_RT5663:
+ asrc2_mask |= RT5663_AD_STO1_TRACK_MASK;
+ asrc2_value |= clk_src << RT5663_AD_STO1_TRACK_SHIFT;
+ break;
+ default:
+ dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ }
+ }
+
+ if (asrc2_mask)
+ snd_soc_update_bits(codec, RT5663_ASRC_2, asrc2_mask,
+ asrc2_value);
+
+ if (asrc3_mask)
+ snd_soc_update_bits(codec, RT5668_ASRC_3, asrc3_mask,
+ asrc3_value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5663_sel_asrc_clk_src);
+
+/* Analog Mixer */
+static const struct snd_kcontrol_new rt5668_recmix1l[] = {
+ SOC_DAPM_SINGLE("BST2 Switch", RT5668_RECMIX1L,
+ RT5668_RECMIX1L_BST2_SHIFT, 1, 1),
+ SOC_DAPM_SINGLE("BST1 CBJ Switch", RT5668_RECMIX1L,
+ RT5668_RECMIX1L_BST1_CBJ_SHIFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5668_recmix1r[] = {
+ SOC_DAPM_SINGLE("BST2 Switch", RT5668_RECMIX1R,
+ RT5668_RECMIX1R_BST2_SHIFT, 1, 1),
+};
+
+/* Digital Mixer */
+static const struct snd_kcontrol_new rt5663_sto1_adc_l_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5663_STO1_ADC_MIXER,
+ RT5668_M_STO1_ADC_L1_SHIFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5663_STO1_ADC_MIXER,
+ RT5668_M_STO1_ADC_L2_SHIFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5668_sto1_adc_r_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5663_STO1_ADC_MIXER,
+ RT5668_M_STO1_ADC_R1_SHIFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5663_STO1_ADC_MIXER,
+ RT5668_M_STO1_ADC_R2_SHIFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5663_adda_l_mix[] = {
+ SOC_DAPM_SINGLE("ADC L Switch", RT5663_AD_DA_MIXER,
+ RT5668_M_ADCMIX_L_SHIFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC L Switch", RT5663_AD_DA_MIXER,
+ RT5668_M_DAC1_L_SHIFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5663_adda_r_mix[] = {
+ SOC_DAPM_SINGLE("ADC R Switch", RT5663_AD_DA_MIXER,
+ RT5668_M_ADCMIX_R_SHIFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R Switch", RT5663_AD_DA_MIXER,
+ RT5668_M_DAC1_R_SHIFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5663_sto1_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC L Switch", RT5663_STO_DAC_MIXER,
+ RT5668_M_DAC_L1_STO_L_SHIFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R Switch", RT5663_STO_DAC_MIXER,
+ RT5668_M_DAC_R1_STO_L_SHIFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5663_sto1_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC L Switch", RT5663_STO_DAC_MIXER,
+ RT5668_M_DAC_L1_STO_R_SHIFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R Switch", RT5663_STO_DAC_MIXER,
+ RT5668_M_DAC_R1_STO_R_SHIFT, 1, 1),
+};
+
+/* Out Switch */
+static const struct snd_kcontrol_new rt5668_hpo_switch =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5668_HP_AMP_2,
+ RT5668_EN_DAC_HPO_SHIFT, 1, 0);
+
+/* Stereo ADC source */
+static const char * const rt5668_sto1_adc_src[] = {
+ "ADC L", "ADC R"
+};
+
+static SOC_ENUM_SINGLE_DECL(rt5668_sto1_adcl_enum, RT5663_STO1_ADC_MIXER,
+ RT5668_STO1_ADC_L_SRC_SHIFT, rt5668_sto1_adc_src);
+
+static const struct snd_kcontrol_new rt5668_sto1_adcl_mux =
+ SOC_DAPM_ENUM("STO1 ADC L Mux", rt5668_sto1_adcl_enum);
+
+static SOC_ENUM_SINGLE_DECL(rt5668_sto1_adcr_enum, RT5663_STO1_ADC_MIXER,
+ RT5668_STO1_ADC_R_SRC_SHIFT, rt5668_sto1_adc_src);
+
+static const struct snd_kcontrol_new rt5668_sto1_adcr_mux =
+ SOC_DAPM_ENUM("STO1 ADC R Mux", rt5668_sto1_adcr_enum);
+
+/* RT5663: Analog DACL1 input source */
+static const char * const rt5663_alg_dacl_src[] = {
+ "DAC L", "STO DAC MIXL"
+};
+
+static SOC_ENUM_SINGLE_DECL(rt5663_alg_dacl_enum, RT5663_BYPASS_STO_DAC,
+ RT5663_DACL1_SRC_SHIFT, rt5663_alg_dacl_src);
+
+static const struct snd_kcontrol_new rt5663_alg_dacl_mux =
+ SOC_DAPM_ENUM("DAC L Mux", rt5663_alg_dacl_enum);
+
+/* RT5663: Analog DACR1 input source */
+static const char * const rt5663_alg_dacr_src[] = {
+ "DAC R", "STO DAC MIXR"
+};
+
+static SOC_ENUM_SINGLE_DECL(rt5663_alg_dacr_enum, RT5663_BYPASS_STO_DAC,
+ RT5663_DACR1_SRC_SHIFT, rt5663_alg_dacr_src);
+
+static const struct snd_kcontrol_new rt5663_alg_dacr_mux =
+ SOC_DAPM_ENUM("DAC R Mux", rt5663_alg_dacr_enum);
+
+static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+ snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
+ RT5668_SEL_PM_HP_SHIFT, RT5668_SEL_PM_HP_HIGH);
+ snd_soc_update_bits(codec, RT5663_HP_LOGIC_2,
+ RT5668_HP_SIG_SRC1_MASK,
+ RT5668_HP_SIG_SRC1_SILENCE);
+ } else {
+ snd_soc_write(codec, RT5663_DEPOP_2, 0x3003);
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x000b,
+ 0x000b);
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x0030,
+ 0x0030);
+ snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
+ RT5668_OVCD_HP_MASK, RT5668_OVCD_HP_DIS);
+ snd_soc_write(codec, RT5663_HP_CHARGE_PUMP_2, 0x1371);
+ snd_soc_write(codec, RT5663_HP_BIAS, 0xabba);
+ snd_soc_write(codec, RT5663_CHARGE_PUMP_1, 0x2224);
+ snd_soc_write(codec, RT5663_ANA_BIAS_CUR_1, 0x7766);
+ snd_soc_write(codec, RT5663_HP_BIAS, 0xafaa);
+ snd_soc_write(codec, RT5663_CHARGE_PUMP_2, 0x7777);
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x3000,
+ 0x3000);
+ }
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+ snd_soc_update_bits(codec, RT5663_HP_LOGIC_2,
+ RT5668_HP_SIG_SRC1_MASK,
+ RT5668_HP_SIG_SRC1_REG);
+ } else {
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x3000, 0x0);
+ snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
+ RT5668_OVCD_HP_MASK, RT5668_OVCD_HP_EN);
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x0030, 0x0);
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x000b,
+ 0x000b);
+ }
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt5668_bst2_power(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_2,
+ RT5668_PWR_BST2_MASK | RT5668_PWR_BST2_OP_MASK,
+ RT5668_PWR_BST2 | RT5668_PWR_BST2_OP);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_2,
+ RT5668_PWR_BST2_MASK | RT5668_PWR_BST2_OP_MASK, 0);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt5663_pre_div_power(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_write(codec, RT5663_PRE_DIV_GATING_1, 0xff00);
+ snd_soc_write(codec, RT5663_PRE_DIV_GATING_2, 0xfffc);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_write(codec, RT5663_PRE_DIV_GATING_1, 0x0000);
+ snd_soc_write(codec, RT5663_PRE_DIV_GATING_2, 0x0000);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("PLL", RT5663_PWR_ANLG_3, RT5668_PWR_PLL_SHIFT, 0,
+ NULL, 0),
+
+ /* micbias */
+ SND_SOC_DAPM_MICBIAS("MICBIAS1", RT5663_PWR_ANLG_2,
+ RT5668_PWR_MB1_SHIFT, 0),
+ SND_SOC_DAPM_MICBIAS("MICBIAS2", RT5663_PWR_ANLG_2,
+ RT5668_PWR_MB2_SHIFT, 0),
+
+ /* Input Lines */
+ SND_SOC_DAPM_INPUT("IN1P"),
+ SND_SOC_DAPM_INPUT("IN1N"),
+
+ /* REC Mixer Power */
+ SND_SOC_DAPM_SUPPLY("RECMIX1L Power", RT5663_PWR_ANLG_2,
+ RT5668_PWR_RECMIX1_SHIFT, 0, NULL, 0),
+
+ /* ADCs */
+ SND_SOC_DAPM_ADC("ADC L", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_SUPPLY("ADC L Power", RT5663_PWR_DIG_1,
+ RT5668_PWR_ADC_L1_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC Clock", RT5663_CHOP_ADC,
+ RT5668_CKGEN_ADCC_SHIFT, 0, NULL, 0),
+
+ /* ADC Mixer */
+ SND_SOC_DAPM_MIXER("STO1 ADC MIXL", SND_SOC_NOPM,
+ 0, 0, rt5663_sto1_adc_l_mix,
+ ARRAY_SIZE(rt5663_sto1_adc_l_mix)),
+
+ /* ADC Filter Power */
+ SND_SOC_DAPM_SUPPLY("STO1 ADC Filter", RT5663_PWR_DIG_2,
+ RT5668_PWR_ADC_S1F_SHIFT, 0, NULL, 0),
+
+ /* Digital Interface */
+ SND_SOC_DAPM_SUPPLY("I2S", RT5663_PWR_DIG_1, RT5668_PWR_I2S1_SHIFT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_PGA("IF DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC1 L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC1 R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 ADC1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Audio Interface */
+ SND_SOC_DAPM_AIF_IN("AIFRX", "AIF Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIFTX", "AIF Capture", 0, SND_SOC_NOPM, 0, 0),
+
+ /* DAC mixer before sound effect */
+ SND_SOC_DAPM_MIXER("ADDA MIXL", SND_SOC_NOPM, 0, 0, rt5663_adda_l_mix,
+ ARRAY_SIZE(rt5663_adda_l_mix)),
+ SND_SOC_DAPM_MIXER("ADDA MIXR", SND_SOC_NOPM, 0, 0, rt5663_adda_r_mix,
+ ARRAY_SIZE(rt5663_adda_r_mix)),
+ SND_SOC_DAPM_PGA("DAC L1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DAC R1", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* DAC Mixer */
+ SND_SOC_DAPM_SUPPLY("STO1 DAC Filter", RT5663_PWR_DIG_2,
+ RT5668_PWR_DAC_S1F_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("STO1 DAC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5663_sto1_dac_l_mix, ARRAY_SIZE(rt5663_sto1_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("STO1 DAC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5663_sto1_dac_r_mix, ARRAY_SIZE(rt5663_sto1_dac_r_mix)),
+
+ /* DACs */
+ SND_SOC_DAPM_SUPPLY("STO1 DAC L Power", RT5663_PWR_DIG_1,
+ RT5668_PWR_DAC_L1_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("STO1 DAC R Power", RT5663_PWR_DIG_1,
+ RT5668_PWR_DAC_R1_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_DAC("DAC L", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC("DAC R", NULL, SND_SOC_NOPM, 0, 0),
+
+ /* Headphone*/
+ SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, rt5663_hp_event,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+
+ /* Output Lines */
+ SND_SOC_DAPM_OUTPUT("HPOL"),
+ SND_SOC_DAPM_OUTPUT("HPOR"),
+};
+
+static const struct snd_soc_dapm_widget rt5668_specific_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("LDO2", RT5663_PWR_ANLG_3,
+ RT5668_PWR_LDO2_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5668_PWR_VOL,
+ RT5668_PWR_MIC_DET_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("LDO DAC", RT5663_PWR_DIG_1,
+ RT5668_PWR_LDO_DACREF_SHIFT, 0, NULL, 0),
+
+ /* ASRC */
+ SND_SOC_DAPM_SUPPLY("I2S ASRC", RT5663_ASRC_1,
+ RT5668_I2S1_ASRC_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC ASRC", RT5663_ASRC_1,
+ RT5668_DAC_STO1_ASRC_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC ASRC", RT5663_ASRC_1,
+ RT5668_ADC_STO1_ASRC_SHIFT, 0, NULL, 0),
+
+ /* Input Lines */
+ SND_SOC_DAPM_INPUT("IN2P"),
+ SND_SOC_DAPM_INPUT("IN2N"),
+
+ /* Boost */
+ SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("CBJ Power", RT5663_PWR_ANLG_3,
+ RT5668_PWR_CBJ_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("BST2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("BST2 Power", SND_SOC_NOPM, 0, 0,
+ rt5668_bst2_power, SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMU),
+
+ /* REC Mixer */
+ SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5668_recmix1l,
+ ARRAY_SIZE(rt5668_recmix1l)),
+ SND_SOC_DAPM_MIXER("RECMIX1R", SND_SOC_NOPM, 0, 0, rt5668_recmix1r,
+ ARRAY_SIZE(rt5668_recmix1r)),
+ SND_SOC_DAPM_SUPPLY("RECMIX1R Power", RT5663_PWR_ANLG_2,
+ RT5668_PWR_RECMIX2_SHIFT, 0, NULL, 0),
+
+ /* ADC */
+ SND_SOC_DAPM_ADC("ADC R", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_SUPPLY("ADC R Power", RT5663_PWR_DIG_1,
+ RT5668_PWR_ADC_R1_SHIFT, 0, NULL, 0),
+
+ /* ADC Mux */
+ SND_SOC_DAPM_PGA("STO1 ADC L1", RT5663_STO1_ADC_MIXER,
+ RT5668_STO1_ADC_L1_SRC_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("STO1 ADC R1", RT5663_STO1_ADC_MIXER,
+ RT5668_STO1_ADC_R1_SRC_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("STO1 ADC L2", RT5663_STO1_ADC_MIXER,
+ RT5668_STO1_ADC_L2_SRC_SHIFT, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("STO1 ADC R2", RT5663_STO1_ADC_MIXER,
+ RT5668_STO1_ADC_R2_SRC_SHIFT, 1, NULL, 0),
+
+ SND_SOC_DAPM_MUX("STO1 ADC L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5668_sto1_adcl_mux),
+ SND_SOC_DAPM_MUX("STO1 ADC R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5668_sto1_adcr_mux),
+
+ /* ADC Mix */
+ SND_SOC_DAPM_MIXER("STO1 ADC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5668_sto1_adc_r_mix, ARRAY_SIZE(rt5668_sto1_adc_r_mix)),
+
+ /* Analog DAC Clock */
+ SND_SOC_DAPM_SUPPLY("DAC Clock", RT5663_CHOP_DAC_L,
+ RT5668_CKGEN_DAC1_SHIFT, 0, NULL, 0),
+
+ /* Headphone out */
+ SND_SOC_DAPM_SWITCH("HPO Playback", SND_SOC_NOPM, 0, 0,
+ &rt5668_hpo_switch),
+};
+
+static const struct snd_soc_dapm_widget rt5663_specific_dapm_widgets[] = {
+ /* System Clock Pre Divider Gating */
+ SND_SOC_DAPM_SUPPLY("Pre Div Power", SND_SOC_NOPM, 0, 0,
+ rt5663_pre_div_power, SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD),
+
+ /* LDO */
+ SND_SOC_DAPM_SUPPLY("LDO ADC", RT5663_PWR_DIG_1,
+ RT5668_PWR_LDO_DACREF_SHIFT, 0, NULL, 0),
+
+ /* ASRC */
+ SND_SOC_DAPM_SUPPLY("I2S ASRC", RT5663_ASRC_1,
+ RT5663_I2S1_ASRC_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC ASRC", RT5663_ASRC_1,
+ RT5663_DAC_STO1_ASRC_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC ASRC", RT5663_ASRC_1,
+ RT5663_ADC_STO1_ASRC_SHIFT, 0, NULL, 0),
+
+ /* Boost */
+ SND_SOC_DAPM_PGA("BST1", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* STO ADC */
+ SND_SOC_DAPM_PGA("STO1 ADC L1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("STO1 ADC L2", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Analog DAC source */
+ SND_SOC_DAPM_MUX("DAC L Mux", SND_SOC_NOPM, 0, 0, &rt5663_alg_dacl_mux),
+ SND_SOC_DAPM_MUX("DAC R Mux", SND_SOC_NOPM, 0, 0, &rt5663_alg_dacr_mux),
+};
+
+static const struct snd_soc_dapm_route rt5663_dapm_routes[] = {
+ /* PLL */
+ { "I2S", NULL, "PLL", rt5663_is_sys_clk_from_pll },
+
+ /* ASRC */
+ { "STO1 ADC Filter", NULL, "ADC ASRC", rt5663_is_using_asrc },
+ { "STO1 DAC Filter", NULL, "DAC ASRC", rt5663_is_using_asrc },
+ { "I2S", NULL, "I2S ASRC", rt5663_i2s_use_asrc },
+
+ { "ADC L", NULL, "ADC L Power" },
+ { "ADC L", NULL, "ADC Clock" },
+
+ { "STO1 ADC L2", NULL, "STO1 DAC MIXL" },
+
+ { "STO1 ADC MIXL", "ADC1 Switch", "STO1 ADC L1" },
+ { "STO1 ADC MIXL", "ADC2 Switch", "STO1 ADC L2" },
+ { "STO1 ADC MIXL", NULL, "STO1 ADC Filter" },
+
+ { "IF1 ADC1", NULL, "STO1 ADC MIXL" },
+ { "IF ADC", NULL, "IF1 ADC1" },
+ { "AIFTX", NULL, "IF ADC" },
+ { "AIFTX", NULL, "I2S" },
+
+ { "AIFRX", NULL, "I2S" },
+ { "IF DAC", NULL, "AIFRX" },
+ { "IF1 DAC1 L", NULL, "IF DAC" },
+ { "IF1 DAC1 R", NULL, "IF DAC" },
+
+ { "ADDA MIXL", "ADC L Switch", "STO1 ADC MIXL" },
+ { "ADDA MIXL", "DAC L Switch", "IF1 DAC1 L" },
+ { "ADDA MIXL", NULL, "STO1 DAC Filter" },
+ { "ADDA MIXL", NULL, "STO1 DAC L Power" },
+ { "ADDA MIXR", "DAC R Switch", "IF1 DAC1 R" },
+ { "ADDA MIXR", NULL, "STO1 DAC Filter" },
+ { "ADDA MIXR", NULL, "STO1 DAC R Power" },
+
+ { "DAC L1", NULL, "ADDA MIXL" },
+ { "DAC R1", NULL, "ADDA MIXR" },
+
+ { "STO1 DAC MIXL", "DAC L Switch", "DAC L1" },
+ { "STO1 DAC MIXL", "DAC R Switch", "DAC R1" },
+ { "STO1 DAC MIXL", NULL, "STO1 DAC L Power" },
+ { "STO1 DAC MIXL", NULL, "STO1 DAC Filter" },
+ { "STO1 DAC MIXR", "DAC R Switch", "DAC R1" },
+ { "STO1 DAC MIXR", "DAC L Switch", "DAC L1" },
+ { "STO1 DAC MIXR", NULL, "STO1 DAC R Power" },
+ { "STO1 DAC MIXR", NULL, "STO1 DAC Filter" },
+
+ { "HP Amp", NULL, "DAC L" },
+ { "HP Amp", NULL, "DAC R" },
+};
+
+static const struct snd_soc_dapm_route rt5668_specific_dapm_routes[] = {
+ { "MICBIAS1", NULL, "LDO2" },
+ { "MICBIAS2", NULL, "LDO2" },
+
+ { "BST1 CBJ", NULL, "IN1P" },
+ { "BST1 CBJ", NULL, "IN1N" },
+ { "BST1 CBJ", NULL, "CBJ Power" },
+
+ { "BST2", NULL, "IN2P" },
+ { "BST2", NULL, "IN2N" },
+ { "BST2", NULL, "BST2 Power" },
+
+ { "RECMIX1L", "BST2 Switch", "BST2" },
+ { "RECMIX1L", "BST1 CBJ Switch", "BST1 CBJ" },
+ { "RECMIX1L", NULL, "RECMIX1L Power" },
+ { "RECMIX1R", "BST2 Switch", "BST2" },
+ { "RECMIX1R", NULL, "RECMIX1R Power" },
+
+ { "ADC L", NULL, "RECMIX1L" },
+ { "ADC R", NULL, "RECMIX1R" },
+ { "ADC R", NULL, "ADC R Power" },
+ { "ADC R", NULL, "ADC Clock" },
+
+ { "STO1 ADC L Mux", "ADC L", "ADC L" },
+ { "STO1 ADC L Mux", "ADC R", "ADC R" },
+ { "STO1 ADC L1", NULL, "STO1 ADC L Mux" },
+
+ { "STO1 ADC R Mux", "ADC L", "ADC L" },
+ { "STO1 ADC R Mux", "ADC R", "ADC R" },
+ { "STO1 ADC R1", NULL, "STO1 ADC R Mux" },
+ { "STO1 ADC R2", NULL, "STO1 DAC MIXR" },
+
+ { "STO1 ADC MIXR", "ADC1 Switch", "STO1 ADC R1" },
+ { "STO1 ADC MIXR", "ADC2 Switch", "STO1 ADC R2" },
+ { "STO1 ADC MIXR", NULL, "STO1 ADC Filter" },
+
+ { "IF1 ADC1", NULL, "STO1 ADC MIXR" },
+
+ { "ADDA MIXR", "ADC R Switch", "STO1 ADC MIXR" },
+
+ { "DAC L", NULL, "STO1 DAC MIXL" },
+ { "DAC L", NULL, "LDO DAC" },
+ { "DAC L", NULL, "DAC Clock" },
+ { "DAC R", NULL, "STO1 DAC MIXR" },
+ { "DAC R", NULL, "LDO DAC" },
+ { "DAC R", NULL, "DAC Clock" },
+
+ { "HPO Playback", "Switch", "HP Amp" },
+ { "HPOL", NULL, "HPO Playback" },
+ { "HPOR", NULL, "HPO Playback" },
+};
+
+static const struct snd_soc_dapm_route rt5663_specific_dapm_routes[] = {
+ { "I2S", NULL, "Pre Div Power" },
+
+ { "BST1", NULL, "IN1P" },
+ { "BST1", NULL, "IN1N" },
+ { "BST1", NULL, "RECMIX1L Power" },
+
+ { "ADC L", NULL, "BST1" },
+
+ { "STO1 ADC L1", NULL, "ADC L" },
+
+ { "DAC L Mux", "DAC L", "DAC L1" },
+ { "DAC L Mux", "STO DAC MIXL", "STO1 DAC MIXL" },
+ { "DAC R Mux", "DAC R", "DAC R1"},
+ { "DAC R Mux", "STO DAC MIXR", "STO1 DAC MIXR" },
+
+ { "DAC L", NULL, "DAC L Mux" },
+ { "DAC R", NULL, "DAC R Mux" },
+
+ { "HPOL", NULL, "HP Amp" },
+ { "HPOR", NULL, "HP Amp" },
+};
+
+static int rt5663_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+ unsigned int val_len = 0;
+ int pre_div;
+
+ rt5663->lrck = params_rate(params);
+
+ dev_dbg(dai->dev, "bclk is %dHz and sysclk is %dHz\n",
+ rt5663->lrck, rt5663->sysclk);
+
+ pre_div = rl6231_get_clk_info(rt5663->sysclk, rt5663->lrck);
+ if (pre_div < 0) {
+ dev_err(codec->dev, "Unsupported clock setting %d for DAI %d\n",
+ rt5663->lrck, dai->id);
+ return -EINVAL;
+ }
+
+ dev_dbg(dai->dev, "pre_div is %d for iis %d\n", pre_div, dai->id);
+
+ switch (params_width(params)) {
+ case 8:
+ val_len = RT5668_I2S_DL_8;
+ break;
+ case 16:
+ val_len = RT5668_I2S_DL_16;
+ break;
+ case 20:
+ val_len = RT5668_I2S_DL_20;
+ break;
+ case 24:
+ val_len = RT5668_I2S_DL_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, RT5663_I2S1_SDP,
+ RT5668_I2S_DL_MASK, val_len);
+
+ snd_soc_update_bits(codec, RT5663_ADDA_CLK_1,
+ RT5668_I2S_PD1_MASK, pre_div << RT5668_I2S_PD1_SHIFT);
+
+ return 0;
+}
+
+static int rt5663_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int reg_val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ reg_val |= RT5668_I2S_MS_S;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ reg_val |= RT5668_I2S_BP_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ reg_val |= RT5668_I2S_DF_LEFT;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ reg_val |= RT5668_I2S_DF_PCM_A;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ reg_val |= RT5668_I2S_DF_PCM_B;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, RT5663_I2S1_SDP, RT5668_I2S_MS_MASK |
+ RT5668_I2S_BP_MASK | RT5668_I2S_DF_MASK, reg_val);
+
+ return 0;
+}
+
+static int rt5663_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg_val = 0;
+
+ if (freq == rt5663->sysclk && clk_id == rt5663->sysclk_src)
+ return 0;
+
+ switch (clk_id) {
+ case RT5663_SCLK_S_MCLK:
+ reg_val |= RT5663_SCLK_SRC_MCLK;
+ break;
+ case RT5663_SCLK_S_PLL1:
+ reg_val |= RT5663_SCLK_SRC_PLL1;
+ break;
+ case RT5663_SCLK_S_RCCLK:
+ reg_val |= RT5663_SCLK_SRC_RCCLK;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid clock id (%d)\n", clk_id);
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, RT5663_GLB_CLK, RT5668_SCLK_SRC_MASK,
+ reg_val);
+ rt5663->sysclk = freq;
+ rt5663->sysclk_src = clk_id;
+
+ dev_dbg(codec->dev, "Sysclk is %dHz and clock id is %d\n",
+ freq, clk_id);
+
+ return 0;
+}
+
+static int rt5663_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
+ unsigned int freq_in, unsigned int freq_out)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+ struct rl6231_pll_code pll_code;
+ int ret;
+ int mask, shift, val;
+
+ if (source == rt5663->pll_src && freq_in == rt5663->pll_in &&
+ freq_out == rt5663->pll_out)
+ return 0;
+
+ if (!freq_in || !freq_out) {
+ dev_dbg(codec->dev, "PLL disabled\n");
+
+ rt5663->pll_in = 0;
+ rt5663->pll_out = 0;
+ snd_soc_update_bits(codec, RT5663_GLB_CLK,
+ RT5663_SCLK_SRC_MASK, RT5663_SCLK_SRC_MCLK);
+ return 0;
+ }
+
+ switch (rt5663->codec_type) {
+ case CODEC_TYPE_RT5668:
+ mask = RT5668_PLL1_SRC_MASK;
+ shift = RT5668_PLL1_SRC_SHIFT;
+ break;
+ case CODEC_TYPE_RT5663:
+ mask = RT5663_PLL1_SRC_MASK;
+ shift = RT5663_PLL1_SRC_SHIFT;
+ break;
+ default:
+ dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ return -EINVAL;
+ }
+
+ switch (source) {
+ case RT5663_PLL1_S_MCLK:
+ val = 0x0;
+ break;
+ case RT5663_PLL1_S_BCLK1:
+ val = 0x1;
+ break;
+ default:
+ dev_err(codec->dev, "Unknown PLL source %d\n", source);
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, RT5663_GLB_CLK, mask, (val << shift));
+
+ ret = rl6231_pll_calc(freq_in, freq_out, &pll_code);
+ if (ret < 0) {
+ dev_err(codec->dev, "Unsupport input clock %d\n", freq_in);
+ return ret;
+ }
+
+ dev_dbg(codec->dev, "bypass=%d m=%d n=%d k=%d\n", pll_code.m_bp,
+ (pll_code.m_bp ? 0 : pll_code.m_code), pll_code.n_code,
+ pll_code.k_code);
+
+ snd_soc_write(codec, RT5663_PLL_1,
+ pll_code.n_code << RT5668_PLL_N_SHIFT | pll_code.k_code);
+ snd_soc_write(codec, RT5663_PLL_2,
+ (pll_code.m_bp ? 0 : pll_code.m_code) << RT5668_PLL_M_SHIFT |
+ pll_code.m_bp << RT5668_PLL_M_BP_SHIFT);
+
+ rt5663->pll_in = freq_in;
+ rt5663->pll_out = freq_out;
+ rt5663->pll_src = source;
+
+ return 0;
+}
+
+static int rt5663_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+ unsigned int val = 0, reg;
+
+ if (rx_mask || tx_mask)
+ val |= RT5668_TDM_MODE_TDM;
+
+ switch (slots) {
+ case 4:
+ val |= RT5668_TDM_IN_CH_4;
+ val |= RT5668_TDM_OUT_CH_4;
+ break;
+ case 6:
+ val |= RT5668_TDM_IN_CH_6;
+ val |= RT5668_TDM_OUT_CH_6;
+ break;
+ case 8:
+ val |= RT5668_TDM_IN_CH_8;
+ val |= RT5668_TDM_OUT_CH_8;
+ break;
+ case 2:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (slot_width) {
+ case 20:
+ val |= RT5668_TDM_IN_LEN_20;
+ val |= RT5668_TDM_OUT_LEN_20;
+ break;
+ case 24:
+ val |= RT5668_TDM_IN_LEN_24;
+ val |= RT5668_TDM_OUT_LEN_24;
+ break;
+ case 32:
+ val |= RT5668_TDM_IN_LEN_32;
+ val |= RT5668_TDM_OUT_LEN_32;
+ break;
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (rt5663->codec_type) {
+ case CODEC_TYPE_RT5668:
+ reg = RT5663_TDM_2;
+ break;
+ case CODEC_TYPE_RT5663:
+ reg = RT5663_TDM_1;
+ break;
+ default:
+ dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, reg, RT5668_TDM_MODE_MASK |
+ RT5668_TDM_IN_CH_MASK | RT5668_TDM_OUT_CH_MASK |
+ RT5668_TDM_IN_LEN_MASK | RT5668_TDM_OUT_LEN_MASK, val);
+
+ return 0;
+}
+
+static int rt5663_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg;
+
+ dev_dbg(codec->dev, "%s ratio = %d\n", __func__, ratio);
+
+ if (rt5663->codec_type == CODEC_TYPE_RT5668)
+ reg = RT5668_TDM_8;
+ else
+ reg = RT5663_TDM_5;
+
+ switch (ratio) {
+ case 32:
+ snd_soc_update_bits(codec, reg,
+ RT5663_TDM_LENGTN_MASK,
+ RT5663_TDM_LENGTN_16);
+ break;
+ case 40:
+ snd_soc_update_bits(codec, reg,
+ RT5663_TDM_LENGTN_MASK,
+ RT5663_TDM_LENGTN_20);
+ break;
+ case 48:
+ snd_soc_update_bits(codec, reg,
+ RT5663_TDM_LENGTN_MASK,
+ RT5663_TDM_LENGTN_24);
+ break;
+ case 64:
+ snd_soc_update_bits(codec, reg,
+ RT5663_TDM_LENGTN_MASK,
+ RT5663_TDM_LENGTN_32);
+ break;
+ default:
+ dev_err(codec->dev, "Invalid ratio!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rt5663_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
+ RT5668_PWR_FV1_MASK | RT5668_PWR_FV2_MASK,
+ RT5668_PWR_FV1 | RT5668_PWR_FV2);
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+ snd_soc_update_bits(codec, RT5663_DIG_MISC,
+ RT5668_DIG_GATE_CTRL_MASK,
+ RT5668_DIG_GATE_CTRL_EN);
+ snd_soc_update_bits(codec, RT5663_SIG_CLK_DET,
+ RT5668_EN_ANA_CLK_DET_MASK |
+ RT5668_PWR_CLK_DET_MASK,
+ RT5668_EN_ANA_CLK_DET_AUTO |
+ RT5668_PWR_CLK_DET_EN);
+ }
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (rt5663->codec_type == CODEC_TYPE_RT5668)
+ snd_soc_update_bits(codec, RT5663_DIG_MISC,
+ RT5668_DIG_GATE_CTRL_MASK,
+ RT5668_DIG_GATE_CTRL_DIS);
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
+ RT5668_PWR_VREF1_MASK | RT5668_PWR_VREF2_MASK |
+ RT5668_PWR_FV1_MASK | RT5668_PWR_FV2_MASK |
+ RT5668_PWR_MB_MASK, RT5668_PWR_VREF1 |
+ RT5668_PWR_VREF2 | RT5668_PWR_MB);
+ usleep_range(10000, 10005);
+ if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+ snd_soc_update_bits(codec, RT5663_SIG_CLK_DET,
+ RT5668_EN_ANA_CLK_DET_MASK |
+ RT5668_PWR_CLK_DET_MASK,
+ RT5668_EN_ANA_CLK_DET_DIS |
+ RT5668_PWR_CLK_DET_DIS);
+ }
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
+ RT5668_PWR_VREF1_MASK | RT5668_PWR_VREF2_MASK |
+ RT5668_PWR_FV1 | RT5668_PWR_FV2, 0x0);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int rt5663_probe(struct snd_soc_codec *codec)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+
+ rt5663->codec = codec;
+
+ switch (rt5663->codec_type) {
+ case CODEC_TYPE_RT5668:
+ snd_soc_dapm_new_controls(dapm,
+ rt5668_specific_dapm_widgets,
+ ARRAY_SIZE(rt5668_specific_dapm_widgets));
+ snd_soc_dapm_add_routes(dapm,
+ rt5668_specific_dapm_routes,
+ ARRAY_SIZE(rt5668_specific_dapm_routes));
+ snd_soc_add_codec_controls(codec, rt5668_specific_controls,
+ ARRAY_SIZE(rt5668_specific_controls));
+ break;
+ case CODEC_TYPE_RT5663:
+ snd_soc_dapm_new_controls(dapm,
+ rt5663_specific_dapm_widgets,
+ ARRAY_SIZE(rt5663_specific_dapm_widgets));
+ snd_soc_dapm_add_routes(dapm,
+ rt5663_specific_dapm_routes,
+ ARRAY_SIZE(rt5663_specific_dapm_routes));
+ snd_soc_add_codec_controls(codec, rt5663_specific_controls,
+ ARRAY_SIZE(rt5663_specific_controls));
+ break;
+ }
+
+ return 0;
+}
+
+static int rt5663_remove(struct snd_soc_codec *codec)
+{
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+
+ regmap_write(rt5663->regmap, RT5663_RESET, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int rt5663_suspend(struct snd_soc_codec *codec)
+{
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+
+ regcache_cache_only(rt5663->regmap, true);
+ regcache_mark_dirty(rt5663->regmap);
+
+ return 0;
+}
+
+static int rt5663_resume(struct snd_soc_codec *codec)
+{
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+
+ regcache_cache_only(rt5663->regmap, false);
+ regcache_sync(rt5663->regmap);
+
+ return 0;
+}
+#else
+#define rt5663_suspend NULL
+#define rt5663_resume NULL
+#endif
+
+#define RT5663_STEREO_RATES SNDRV_PCM_RATE_8000_192000
+#define RT5663_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
+
+static struct snd_soc_dai_ops rt5663_aif_dai_ops = {
+ .hw_params = rt5663_hw_params,
+ .set_fmt = rt5663_set_dai_fmt,
+ .set_sysclk = rt5663_set_dai_sysclk,
+ .set_pll = rt5663_set_dai_pll,
+ .set_tdm_slot = rt5663_set_tdm_slot,
+ .set_bclk_ratio = rt5663_set_bclk_ratio,
+};
+
+static struct snd_soc_dai_driver rt5663_dai[] = {
+ {
+ .name = "rt5663-aif",
+ .id = RT5663_AIF,
+ .playback = {
+ .stream_name = "AIF Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5663_STEREO_RATES,
+ .formats = RT5663_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5663_STEREO_RATES,
+ .formats = RT5663_FORMATS,
+ },
+ .ops = &rt5663_aif_dai_ops,
+ },
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_rt5663 = {
+ .probe = rt5663_probe,
+ .remove = rt5663_remove,
+ .suspend = rt5663_suspend,
+ .resume = rt5663_resume,
+ .set_bias_level = rt5663_set_bias_level,
+ .idle_bias_off = true,
+ .component_driver = {
+ .controls = rt5663_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5663_snd_controls),
+ .dapm_widgets = rt5663_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5663_dapm_widgets),
+ .dapm_routes = rt5663_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5663_dapm_routes),
+ }
+};
+
+static const struct regmap_config rt5668_regmap = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .use_single_rw = true,
+ .max_register = 0x07fa,
+ .volatile_reg = rt5668_volatile_register,
+ .readable_reg = rt5668_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = rt5668_reg,
+ .num_reg_defaults = ARRAY_SIZE(rt5668_reg),
+};
+
+static const struct regmap_config rt5663_regmap = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .use_single_rw = true,
+ .max_register = 0x03f3,
+ .volatile_reg = rt5663_volatile_register,
+ .readable_reg = rt5663_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = rt5663_reg,
+ .num_reg_defaults = ARRAY_SIZE(rt5663_reg),
+};
+
+static const struct regmap_config temp_regmap = {
+ .name = "nocache",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .use_single_rw = true,
+ .max_register = 0x03f3,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct i2c_device_id rt5663_i2c_id[] = {
+ { "rt5668", 0 },
+ { "rt5663", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, rt5663_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id rt5663_of_match[] = {
+ { .compatible = "realtek,rt5668", },
+ { .compatible = "realtek,rt5663", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt5663_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id rt5663_acpi_match[] = {
+ { "10EC5668", 0},
+ { "10EC5663", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, rt5663_acpi_match);
+#endif
+
+static void rt5668_calibrate(struct rt5663_priv *rt5668)
+{
+ regmap_write(rt5668->regmap, RT5663_BIAS_CUR_8, 0xa402);
+ regmap_write(rt5668->regmap, RT5663_PWR_DIG_1, 0x0100);
+ regmap_write(rt5668->regmap, RT5663_RECMIX, 0x4040);
+ regmap_write(rt5668->regmap, RT5663_DIG_MISC, 0x0001);
+ regmap_write(rt5668->regmap, RT5663_RC_CLK, 0x0380);
+ regmap_write(rt5668->regmap, RT5663_GLB_CLK, 0x8000);
+ regmap_write(rt5668->regmap, RT5663_ADDA_CLK_1, 0x1000);
+ regmap_write(rt5668->regmap, RT5663_CHOP_DAC_L, 0x3030);
+ regmap_write(rt5668->regmap, RT5663_CALIB_ADC, 0x3c05);
+ regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xa23e);
+ msleep(40);
+ regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xf23e);
+ regmap_write(rt5668->regmap, RT5663_HP_CALIB_2, 0x0321);
+ regmap_write(rt5668->regmap, RT5663_HP_CALIB_1, 0xfc00);
+ msleep(500);
+}
+
+static void rt5663_calibrate(struct rt5663_priv *rt5668)
+{
+ int value, count;
+
+ regmap_write(rt5668->regmap, RT5663_RC_CLK, 0x0280);
+ regmap_write(rt5668->regmap, RT5663_GLB_CLK, 0x8000);
+ regmap_write(rt5668->regmap, RT5663_DIG_MISC, 0x8001);
+ regmap_write(rt5668->regmap, RT5663_VREF_RECMIX, 0x0032);
+ regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xa2be);
+ msleep(20);
+ regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xf2be);
+ regmap_write(rt5668->regmap, RT5663_PWR_DIG_2, 0x8400);
+ regmap_write(rt5668->regmap, RT5663_CHOP_ADC, 0x3000);
+ regmap_write(rt5668->regmap, RT5663_DEPOP_1, 0x003b);
+ regmap_write(rt5668->regmap, RT5663_PWR_DIG_1, 0x8df8);
+ regmap_write(rt5668->regmap, RT5663_PWR_ANLG_2, 0x0003);
+ regmap_write(rt5668->regmap, RT5663_PWR_ANLG_3, 0x018c);
+ regmap_write(rt5668->regmap, RT5663_ADDA_CLK_1, 0x1111);
+ regmap_write(rt5668->regmap, RT5663_PRE_DIV_GATING_1, 0xffff);
+ regmap_write(rt5668->regmap, RT5663_PRE_DIV_GATING_2, 0xffff);
+ regmap_write(rt5668->regmap, RT5663_DEPOP_2, 0x3003);
+ regmap_write(rt5668->regmap, RT5663_DEPOP_1, 0x003b);
+ regmap_write(rt5668->regmap, RT5663_HP_CHARGE_PUMP_1, 0x1e32);
+ regmap_write(rt5668->regmap, RT5663_HP_CHARGE_PUMP_2, 0x1371);
+ regmap_write(rt5668->regmap, RT5663_DACREF_LDO, 0x3b0b);
+ regmap_write(rt5668->regmap, RT5663_STO_DAC_MIXER, 0x2080);
+ regmap_write(rt5668->regmap, RT5663_BYPASS_STO_DAC, 0x000c);
+ regmap_write(rt5668->regmap, RT5663_HP_BIAS, 0xabba);
+ regmap_write(rt5668->regmap, RT5663_CHARGE_PUMP_1, 0x2224);
+ regmap_write(rt5668->regmap, RT5663_HP_OUT_EN, 0x8088);
+ regmap_write(rt5668->regmap, RT5663_STO_DRE_9, 0x0017);
+ regmap_write(rt5668->regmap, RT5663_STO_DRE_10, 0x0017);
+ regmap_write(rt5668->regmap, RT5663_STO1_ADC_MIXER, 0x4040);
+ regmap_write(rt5668->regmap, RT5663_RECMIX, 0x0005);
+ regmap_write(rt5668->regmap, RT5663_ADDA_RST, 0xc000);
+ regmap_write(rt5668->regmap, RT5663_STO1_HPF_ADJ1, 0x3320);
+ regmap_write(rt5668->regmap, RT5663_HP_CALIB_2, 0x00c9);
+ regmap_write(rt5668->regmap, RT5663_DUMMY_1, 0x004c);
+ regmap_write(rt5668->regmap, RT5663_ANA_BIAS_CUR_1, 0x7766);
+ regmap_write(rt5668->regmap, RT5663_BIAS_CUR_8, 0x4702);
+ msleep(200);
+ regmap_write(rt5668->regmap, RT5663_HP_CALIB_1, 0x0069);
+ regmap_write(rt5668->regmap, RT5663_HP_CALIB_3, 0x06c2);
+ regmap_write(rt5668->regmap, RT5663_HP_CALIB_1_1, 0x7b00);
+ regmap_write(rt5668->regmap, RT5663_HP_CALIB_1_1, 0xfb00);
+ count = 0;
+ while (true) {
+ regmap_read(rt5668->regmap, RT5663_HP_CALIB_1_1, &value);
+ if (value & 0x8000)
+ usleep_range(10000, 10005);
+ else
+ break;
+
+ if (count > 200)
+ return;
+ count++;
+ }
+}
+
+static int rt5663_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct rt5663_priv *rt5663;
+ int ret;
+ unsigned int val;
+ struct regmap *regmap;
+
+ rt5663 = devm_kzalloc(&i2c->dev, sizeof(struct rt5663_priv),
+ GFP_KERNEL);
+
+ if (rt5663 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, rt5663);
+
+ regmap = devm_regmap_init_i2c(i2c, &temp_regmap);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "Failed to allocate temp register map: %d\n",
+ ret);
+ return ret;
+ }
+ regmap_read(regmap, RT5663_VENDOR_ID_2, &val);
+ switch (val) {
+ case RT5668_DEVICE_ID:
+ rt5663->regmap = devm_regmap_init_i2c(i2c, &rt5668_regmap);
+ rt5663->codec_type = CODEC_TYPE_RT5668;
+ break;
+ case RT5663_DEVICE_ID:
+ rt5663->regmap = devm_regmap_init_i2c(i2c, &rt5663_regmap);
+ rt5663->codec_type = CODEC_TYPE_RT5663;
+ break;
+ default:
+ dev_err(&i2c->dev,
+ "Device with ID register %#x is not rt5663 or rt5668\n",
+ val);
+ return -ENODEV;
+ }
+
+ if (IS_ERR(rt5663->regmap)) {
+ ret = PTR_ERR(rt5663->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* reset and calibrate */
+ regmap_write(rt5663->regmap, RT5663_RESET, 0);
+ regcache_cache_bypass(rt5663->regmap, true);
+ switch (rt5663->codec_type) {
+ case CODEC_TYPE_RT5668:
+ rt5668_calibrate(rt5663);
+ break;
+ case CODEC_TYPE_RT5663:
+ rt5663_calibrate(rt5663);
+ break;
+ default:
+ dev_err(&i2c->dev, "%s:Unknown codec type\n", __func__);
+ }
+ regcache_cache_bypass(rt5663->regmap, false);
+ regmap_write(rt5663->regmap, RT5663_RESET, 0);
+ dev_dbg(&i2c->dev, "calibrate done\n");
+
+ /* GPIO1 as IRQ */
+ regmap_update_bits(rt5663->regmap, RT5663_GPIO_1, RT5668_GP1_PIN_MASK,
+ RT5668_GP1_PIN_IRQ);
+ /* 4btn inline command debounce */
+ regmap_update_bits(rt5663->regmap, RT5663_IL_CMD_5,
+ RT5668_4BTN_CLK_DEB_MASK, RT5668_4BTN_CLK_DEB_65MS);
+
+ switch (rt5663->codec_type) {
+ case CODEC_TYPE_RT5668:
+ regmap_write(rt5663->regmap, RT5663_BIAS_CUR_8, 0xa402);
+ /* JD1 */
+ regmap_update_bits(rt5663->regmap, RT5663_AUTO_1MRC_CLK,
+ RT5668_IRQ_POW_SAV_MASK | RT5668_IRQ_POW_SAV_JD1_MASK,
+ RT5668_IRQ_POW_SAV_EN | RT5668_IRQ_POW_SAV_JD1_EN);
+ regmap_update_bits(rt5663->regmap, RT5663_PWR_ANLG_2,
+ RT5668_PWR_JD1_MASK, RT5668_PWR_JD1);
+ regmap_update_bits(rt5663->regmap, RT5663_IRQ_1,
+ RT5668_EN_CB_JD_MASK, RT5668_EN_CB_JD_EN);
+
+ regmap_update_bits(rt5663->regmap, RT5663_HP_LOGIC_2,
+ RT5668_HP_SIG_SRC1_MASK, RT5668_HP_SIG_SRC1_REG);
+ regmap_update_bits(rt5663->regmap, RT5663_RECMIX,
+ RT5668_VREF_BIAS_MASK | RT5668_CBJ_DET_MASK |
+ RT5668_DET_TYPE_MASK, RT5668_VREF_BIAS_REG |
+ RT5668_CBJ_DET_EN | RT5668_DET_TYPE_QFN);
+ /* Set GPIO4 and GPIO8 as input for combo jack */
+ regmap_update_bits(rt5663->regmap, RT5663_GPIO_2,
+ RT5668_GP4_PIN_CONF_MASK, RT5668_GP4_PIN_CONF_INPUT);
+ regmap_update_bits(rt5663->regmap, RT5668_GPIO_3,
+ RT5668_GP8_PIN_CONF_MASK, RT5668_GP8_PIN_CONF_INPUT);
+ regmap_update_bits(rt5663->regmap, RT5663_PWR_ANLG_1,
+ RT5668_LDO1_DVO_MASK | RT5668_AMP_HP_MASK,
+ RT5668_LDO1_DVO_0_9V | RT5668_AMP_HP_3X);
+ break;
+ case CODEC_TYPE_RT5663:
+ regmap_write(rt5663->regmap, RT5663_VREF_RECMIX, 0x0032);
+ regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xa2be);
+ msleep(20);
+ regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xf2be);
+ regmap_update_bits(rt5663->regmap, RT5663_GPIO_2,
+ RT5663_GP1_PIN_CONF_MASK, RT5663_GP1_PIN_CONF_OUTPUT);
+ /* DACREF LDO control */
+ regmap_update_bits(rt5663->regmap, RT5663_DACREF_LDO, 0x3e0e,
+ 0x3a0a);
+ regmap_update_bits(rt5663->regmap, RT5663_RECMIX,
+ RT5663_RECMIX1_BST1_MASK, RT5663_RECMIX1_BST1_ON);
+ regmap_update_bits(rt5663->regmap, RT5663_TDM_2,
+ RT5663_DATA_SWAP_ADCDAT1_MASK,
+ RT5663_DATA_SWAP_ADCDAT1_LL);
+ break;
+ default:
+ dev_err(&i2c->dev, "%s:Unknown codec type\n", __func__);
+ }
+
+ INIT_DELAYED_WORK(&rt5663->jack_detect_work, rt5663_jack_detect_work);
+
+ if (i2c->irq) {
+ ret = request_irq(i2c->irq, rt5663_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ | IRQF_ONESHOT, "rt5663", rt5663);
+ if (ret)
+ dev_err(&i2c->dev, "%s Failed to reguest IRQ: %d\n",
+ __func__, ret);
+ }
+
+ ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5663,
+ rt5663_dai, ARRAY_SIZE(rt5663_dai));
+
+ if (ret) {
+ if (i2c->irq)
+ free_irq(i2c->irq, rt5663);
+ }
+
+ return ret;
+}
+
+static int rt5663_i2c_remove(struct i2c_client *i2c)
+{
+ struct rt5663_priv *rt5663 = i2c_get_clientdata(i2c);
+
+ if (i2c->irq)
+ free_irq(i2c->irq, rt5663);
+
+ snd_soc_unregister_codec(&i2c->dev);
+
+ return 0;
+}
+
+static void rt5663_i2c_shutdown(struct i2c_client *client)
+{
+ struct rt5663_priv *rt5663 = i2c_get_clientdata(client);
+
+ regmap_write(rt5663->regmap, RT5663_RESET, 0);
+}
+
+static struct i2c_driver rt5663_i2c_driver = {
+ .driver = {
+ .name = "rt5663",
+ .acpi_match_table = ACPI_PTR(rt5663_acpi_match),
+ .of_match_table = of_match_ptr(rt5663_of_match),
+ },
+ .probe = rt5663_i2c_probe,
+ .remove = rt5663_i2c_remove,
+ .shutdown = rt5663_i2c_shutdown,
+ .id_table = rt5663_i2c_id,
+};
+module_i2c_driver(rt5663_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC RT5663 driver");
+MODULE_AUTHOR("Jack Yu <jack.yu@realtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt5663.h b/sound/soc/codecs/rt5663.h
new file mode 100644
index 000000000000..2cc8f28080f6
--- /dev/null
+++ b/sound/soc/codecs/rt5663.h
@@ -0,0 +1,1121 @@
+/*
+ * rt5663.h -- RT5663 ALSA SoC audio driver
+ *
+ * Copyright 2016 Realtek Microelectronics
+ * Author: Jack Yu <jack.yu@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT5663_H__
+#define __RT5663_H__
+
+/* Info */
+#define RT5663_RESET 0x0000
+#define RT5663_VENDOR_ID 0x00fd
+#define RT5663_VENDOR_ID_1 0x00fe
+#define RT5663_VENDOR_ID_2 0x00ff
+
+#define RT5668_LOUT_CTRL 0x0001
+#define RT5668_HP_AMP_2 0x0003
+#define RT5668_MONO_OUT 0x0004
+#define RT5668_MONO_GAIN 0x0007
+
+#define RT5668_AEC_BST 0x000b
+#define RT5668_IN1_IN2 0x000c
+#define RT5668_IN3_IN4 0x000d
+#define RT5668_INL1_INR1 0x000f
+#define RT5668_CBJ_TYPE_2 0x0011
+#define RT5668_CBJ_TYPE_3 0x0012
+#define RT5668_CBJ_TYPE_4 0x0013
+#define RT5668_CBJ_TYPE_5 0x0014
+#define RT5668_CBJ_TYPE_8 0x0017
+
+/* I/O - ADC/DAC/DMIC */
+#define RT5668_DAC3_DIG_VOL 0x001a
+#define RT5668_DAC3_CTRL 0x001b
+#define RT5668_MONO_ADC_DIG_VOL 0x001d
+#define RT5668_STO2_ADC_DIG_VOL 0x001e
+#define RT5668_MONO_ADC_BST_GAIN 0x0020
+#define RT5668_STO2_ADC_BST_GAIN 0x0021
+#define RT5668_SIDETONE_CTRL 0x0024
+/* Mixer - D-D */
+#define RT5668_MONO1_ADC_MIXER 0x0027
+#define RT5668_STO2_ADC_MIXER 0x0028
+#define RT5668_MONO_DAC_MIXER 0x002b
+#define RT5668_DAC2_SRC_CTRL 0x002e
+#define RT5668_IF_3_4_DATA_CTL 0x002f
+#define RT5668_IF_5_DATA_CTL 0x0030
+#define RT5668_PDM_OUT_CTL 0x0031
+#define RT5668_PDM_I2C_DATA_CTL1 0x0032
+#define RT5668_PDM_I2C_DATA_CTL2 0x0033
+#define RT5668_PDM_I2C_DATA_CTL3 0x0034
+#define RT5668_PDM_I2C_DATA_CTL4 0x0035
+
+/*Mixer - Analog*/
+#define RT5668_RECMIX1_NEW 0x003a
+#define RT5668_RECMIX1L_0 0x003b
+#define RT5668_RECMIX1L 0x003c
+#define RT5668_RECMIX1R_0 0x003d
+#define RT5668_RECMIX1R 0x003e
+#define RT5668_RECMIX2_NEW 0x003f
+#define RT5668_RECMIX2_L_2 0x0041
+#define RT5668_RECMIX2_R 0x0042
+#define RT5668_RECMIX2_R_2 0x0043
+#define RT5668_CALIB_REC_LR 0x0044
+#define RT5668_ALC_BK_GAIN 0x0049
+#define RT5668_MONOMIX_GAIN 0x004a
+#define RT5668_MONOMIX_IN_GAIN 0x004b
+#define RT5668_OUT_MIXL_GAIN 0x004d
+#define RT5668_OUT_LMIX_IN_GAIN 0x004e
+#define RT5668_OUT_RMIX_IN_GAIN 0x004f
+#define RT5668_OUT_RMIX_IN_GAIN1 0x0050
+#define RT5668_LOUT_MIXER_CTRL 0x0052
+/* Power */
+#define RT5668_PWR_VOL 0x0067
+
+#define RT5668_ADCDAC_RST 0x006d
+/* Format - ADC/DAC */
+#define RT5668_I2S34_SDP 0x0071
+#define RT5668_I2S5_SDP 0x0072
+/* Format - TDM Control */
+#define RT5668_TDM_5 0x007c
+#define RT5668_TDM_6 0x007d
+#define RT5668_TDM_7 0x007e
+#define RT5668_TDM_8 0x007f
+
+/* Function - Analog */
+#define RT5668_ASRC_3 0x0085
+#define RT5668_ASRC_6 0x0088
+#define RT5668_ASRC_7 0x0089
+#define RT5668_PLL_TRK_13 0x0099
+#define RT5668_I2S_M_CLK_CTL 0x00a0
+#define RT5668_FDIV_I2S34_M_CLK 0x00a1
+#define RT5668_FDIV_I2S34_M_CLK2 0x00a2
+#define RT5668_FDIV_I2S5_M_CLK 0x00a3
+#define RT5668_FDIV_I2S5_M_CLK2 0x00a4
+
+/* Function - Digital */
+#define RT5668_IRQ_4 0x00b9
+#define RT5668_GPIO_3 0x00c2
+#define RT5668_GPIO_4 0x00c3
+#define RT5668_GPIO_STA 0x00c4
+#define RT5668_HP_AMP_DET1 0x00d0
+#define RT5668_HP_AMP_DET2 0x00d1
+#define RT5668_HP_AMP_DET3 0x00d2
+#define RT5668_MID_BD_HP_AMP 0x00d3
+#define RT5668_LOW_BD_HP_AMP 0x00d4
+#define RT5668_SOF_VOL_ZC2 0x00da
+#define RT5668_ADC_STO2_ADJ1 0x00ee
+#define RT5668_ADC_STO2_ADJ2 0x00ef
+/* General Control */
+#define RT5668_A_JD_CTRL 0x00f0
+#define RT5668_JD1_TRES_CTRL 0x00f1
+#define RT5668_JD2_TRES_CTRL 0x00f2
+#define RT5668_JD_CTRL2 0x00f7
+#define RT5668_DUM_REG_2 0x00fb
+#define RT5668_DUM_REG_3 0x00fc
+
+
+#define RT5668_DACADC_DIG_VOL2 0x0101
+#define RT5668_DIG_IN_PIN2 0x0133
+#define RT5668_PAD_DRV_CTL1 0x0136
+#define RT5668_SOF_RAM_DEPOP 0x0138
+#define RT5668_VOL_TEST 0x013f
+#define RT5668_TEST_MODE_3 0x0147
+#define RT5668_TEST_MODE_4 0x0148
+#define RT5668_MONO_DYNA_1 0x0170
+#define RT5668_MONO_DYNA_2 0x0171
+#define RT5668_MONO_DYNA_3 0x0172
+#define RT5668_MONO_DYNA_4 0x0173
+#define RT5668_MONO_DYNA_5 0x0174
+#define RT5668_MONO_DYNA_6 0x0175
+#define RT5668_STO1_SIL_DET 0x0190
+#define RT5668_MONOL_SIL_DET 0x0191
+#define RT5668_MONOR_SIL_DET 0x0192
+#define RT5668_STO2_DAC_SIL 0x0193
+#define RT5668_PWR_SAV_CTL1 0x0194
+#define RT5668_PWR_SAV_CTL2 0x0195
+#define RT5668_PWR_SAV_CTL3 0x0196
+#define RT5668_PWR_SAV_CTL4 0x0197
+#define RT5668_PWR_SAV_CTL5 0x0198
+#define RT5668_PWR_SAV_CTL6 0x0199
+#define RT5668_MONO_AMP_CAL1 0x01a0
+#define RT5668_MONO_AMP_CAL2 0x01a1
+#define RT5668_MONO_AMP_CAL3 0x01a2
+#define RT5668_MONO_AMP_CAL4 0x01a3
+#define RT5668_MONO_AMP_CAL5 0x01a4
+#define RT5668_MONO_AMP_CAL6 0x01a5
+#define RT5668_MONO_AMP_CAL7 0x01a6
+#define RT5668_MONO_AMP_CAL_ST1 0x01a7
+#define RT5668_MONO_AMP_CAL_ST2 0x01a8
+#define RT5668_MONO_AMP_CAL_ST3 0x01a9
+#define RT5668_MONO_AMP_CAL_ST4 0x01aa
+#define RT5668_MONO_AMP_CAL_ST5 0x01ab
+#define RT5668_HP_IMP_SEN_13 0x01b9
+#define RT5668_HP_IMP_SEN_14 0x01ba
+#define RT5668_HP_IMP_SEN_6 0x01bb
+#define RT5668_HP_IMP_SEN_7 0x01bc
+#define RT5668_HP_IMP_SEN_8 0x01bd
+#define RT5668_HP_IMP_SEN_9 0x01be
+#define RT5668_HP_IMP_SEN_10 0x01bf
+#define RT5668_HP_LOGIC_3 0x01dc
+#define RT5668_HP_CALIB_ST10 0x01f3
+#define RT5668_HP_CALIB_ST11 0x01f4
+#define RT5668_PRO_REG_TBL_4 0x0203
+#define RT5668_PRO_REG_TBL_5 0x0204
+#define RT5668_PRO_REG_TBL_6 0x0205
+#define RT5668_PRO_REG_TBL_7 0x0206
+#define RT5668_PRO_REG_TBL_8 0x0207
+#define RT5668_PRO_REG_TBL_9 0x0208
+#define RT5668_SAR_ADC_INL_1 0x0210
+#define RT5668_SAR_ADC_INL_2 0x0211
+#define RT5668_SAR_ADC_INL_3 0x0212
+#define RT5668_SAR_ADC_INL_4 0x0213
+#define RT5668_SAR_ADC_INL_5 0x0214
+#define RT5668_SAR_ADC_INL_6 0x0215
+#define RT5668_SAR_ADC_INL_7 0x0216
+#define RT5668_SAR_ADC_INL_8 0x0217
+#define RT5668_SAR_ADC_INL_9 0x0218
+#define RT5668_SAR_ADC_INL_10 0x0219
+#define RT5668_SAR_ADC_INL_11 0x021a
+#define RT5668_SAR_ADC_INL_12 0x021b
+#define RT5668_DRC_CTRL_1 0x02ff
+#define RT5668_DRC1_CTRL_2 0x0301
+#define RT5668_DRC1_CTRL_3 0x0302
+#define RT5668_DRC1_CTRL_4 0x0303
+#define RT5668_DRC1_CTRL_5 0x0304
+#define RT5668_DRC1_CTRL_6 0x0305
+#define RT5668_DRC1_HD_CTRL_1 0x0306
+#define RT5668_DRC1_HD_CTRL_2 0x0307
+#define RT5668_DRC1_PRI_REG_1 0x0310
+#define RT5668_DRC1_PRI_REG_2 0x0311
+#define RT5668_DRC1_PRI_REG_3 0x0312
+#define RT5668_DRC1_PRI_REG_4 0x0313
+#define RT5668_DRC1_PRI_REG_5 0x0314
+#define RT5668_DRC1_PRI_REG_6 0x0315
+#define RT5668_DRC1_PRI_REG_7 0x0316
+#define RT5668_DRC1_PRI_REG_8 0x0317
+#define RT5668_ALC_PGA_CTL_1 0x0330
+#define RT5668_ALC_PGA_CTL_2 0x0331
+#define RT5668_ALC_PGA_CTL_3 0x0332
+#define RT5668_ALC_PGA_CTL_4 0x0333
+#define RT5668_ALC_PGA_CTL_5 0x0334
+#define RT5668_ALC_PGA_CTL_6 0x0335
+#define RT5668_ALC_PGA_CTL_7 0x0336
+#define RT5668_ALC_PGA_CTL_8 0x0337
+#define RT5668_ALC_PGA_REG_1 0x0338
+#define RT5668_ALC_PGA_REG_2 0x0339
+#define RT5668_ALC_PGA_REG_3 0x033a
+#define RT5668_ADC_EQ_RECOV_1 0x03c0
+#define RT5668_ADC_EQ_RECOV_2 0x03c1
+#define RT5668_ADC_EQ_RECOV_3 0x03c2
+#define RT5668_ADC_EQ_RECOV_4 0x03c3
+#define RT5668_ADC_EQ_RECOV_5 0x03c4
+#define RT5668_ADC_EQ_RECOV_6 0x03c5
+#define RT5668_ADC_EQ_RECOV_7 0x03c6
+#define RT5668_ADC_EQ_RECOV_8 0x03c7
+#define RT5668_ADC_EQ_RECOV_9 0x03c8
+#define RT5668_ADC_EQ_RECOV_10 0x03c9
+#define RT5668_ADC_EQ_RECOV_11 0x03ca
+#define RT5668_ADC_EQ_RECOV_12 0x03cb
+#define RT5668_ADC_EQ_RECOV_13 0x03cc
+#define RT5668_VID_HIDDEN 0x03fe
+#define RT5668_VID_CUSTOMER 0x03ff
+#define RT5668_SCAN_MODE 0x07f0
+#define RT5668_I2C_BYPA 0x07fa
+
+/* Headphone Amp Control 2 (0x0003) */
+#define RT5668_EN_DAC_HPO_MASK (0x1 << 14)
+#define RT5668_EN_DAC_HPO_SHIFT 14
+#define RT5668_EN_DAC_HPO_DIS (0x0 << 14)
+#define RT5668_EN_DAC_HPO_EN (0x1 << 14)
+
+/*Headphone Amp L/R Analog Gain and Digital NG2 Gain Control (0x0005 0x0006)*/
+#define RT5668_GAIN_HP (0x1f << 8)
+#define RT5668_GAIN_HP_SHIFT 8
+
+/* AEC BST Control (0x000b) */
+#define RT5668_GAIN_CBJ_MASK (0xf << 8)
+#define RT5668_GAIN_CBJ_SHIFT 8
+
+/* IN1 Control / MIC GND REF (0x000c) */
+#define RT5668_IN1_DF_MASK (0x1 << 15)
+#define RT5668_IN1_DF_SHIFT 15
+
+/* Combo Jack and Type Detection Control 1 (0x0010) */
+#define RT5668_CBJ_DET_MASK (0x1 << 15)
+#define RT5668_CBJ_DET_SHIFT 15
+#define RT5668_CBJ_DET_DIS (0x0 << 15)
+#define RT5668_CBJ_DET_EN (0x1 << 15)
+#define RT5668_DET_TYPE_MASK (0x1 << 12)
+#define RT5668_DET_TYPE_SHIFT 12
+#define RT5668_DET_TYPE_WLCSP (0x0 << 12)
+#define RT5668_DET_TYPE_QFN (0x1 << 12)
+#define RT5668_VREF_BIAS_MASK (0x1 << 6)
+#define RT5668_VREF_BIAS_SHIFT 6
+#define RT5668_VREF_BIAS_FSM (0x0 << 6)
+#define RT5668_VREF_BIAS_REG (0x1 << 6)
+
+/* REC Left Mixer Control 2 (0x003c) */
+#define RT5668_RECMIX1L_BST1_CBJ (0x1 << 7)
+#define RT5668_RECMIX1L_BST1_CBJ_SHIFT 7
+#define RT5668_RECMIX1L_BST2 (0x1 << 4)
+#define RT5668_RECMIX1L_BST2_SHIFT 4
+
+/* REC Right Mixer Control 2 (0x003e) */
+#define RT5668_RECMIX1R_BST2 (0x1 << 4)
+#define RT5668_RECMIX1R_BST2_SHIFT 4
+
+/* DAC1 Digital Volume (0x0019) */
+#define RT5668_DAC_L1_VOL_MASK (0xff << 8)
+#define RT5668_DAC_L1_VOL_SHIFT 8
+#define RT5668_DAC_R1_VOL_MASK (0xff)
+#define RT5668_DAC_R1_VOL_SHIFT 0
+
+/* ADC Digital Volume Control (0x001c) */
+#define RT5668_ADC_L_MUTE_MASK (0x1 << 15)
+#define RT5668_ADC_L_MUTE_SHIFT 15
+#define RT5668_ADC_L_VOL_MASK (0x7f << 8)
+#define RT5668_ADC_L_VOL_SHIFT 8
+#define RT5668_ADC_R_MUTE_MASK (0x1 << 7)
+#define RT5668_ADC_R_MUTE_SHIFT 7
+#define RT5668_ADC_R_VOL_MASK (0x7f)
+#define RT5668_ADC_R_VOL_SHIFT 0
+
+/* Stereo ADC Mixer Control (0x0026) */
+#define RT5668_M_STO1_ADC_L1 (0x1 << 15)
+#define RT5668_M_STO1_ADC_L1_SHIFT 15
+#define RT5668_M_STO1_ADC_L2 (0x1 << 14)
+#define RT5668_M_STO1_ADC_L2_SHIFT 14
+#define RT5668_STO1_ADC_L1_SRC (0x1 << 13)
+#define RT5668_STO1_ADC_L1_SRC_SHIFT 13
+#define RT5668_STO1_ADC_L2_SRC (0x1 << 12)
+#define RT5668_STO1_ADC_L2_SRC_SHIFT 12
+#define RT5668_STO1_ADC_L_SRC (0x3 << 10)
+#define RT5668_STO1_ADC_L_SRC_SHIFT 10
+#define RT5668_M_STO1_ADC_R1 (0x1 << 7)
+#define RT5668_M_STO1_ADC_R1_SHIFT 7
+#define RT5668_M_STO1_ADC_R2 (0x1 << 6)
+#define RT5668_M_STO1_ADC_R2_SHIFT 6
+#define RT5668_STO1_ADC_R1_SRC (0x1 << 5)
+#define RT5668_STO1_ADC_R1_SRC_SHIFT 5
+#define RT5668_STO1_ADC_R2_SRC (0x1 << 4)
+#define RT5668_STO1_ADC_R2_SRC_SHIFT 4
+#define RT5668_STO1_ADC_R_SRC (0x3 << 2)
+#define RT5668_STO1_ADC_R_SRC_SHIFT 2
+
+/* ADC Mixer to DAC Mixer Control (0x0029) */
+#define RT5668_M_ADCMIX_L (0x1 << 15)
+#define RT5668_M_ADCMIX_L_SHIFT 15
+#define RT5668_M_DAC1_L (0x1 << 14)
+#define RT5668_M_DAC1_L_SHIFT 14
+#define RT5668_M_ADCMIX_R (0x1 << 7)
+#define RT5668_M_ADCMIX_R_SHIFT 7
+#define RT5668_M_DAC1_R (0x1 << 6)
+#define RT5668_M_DAC1_R_SHIFT 6
+
+/* Stereo DAC Mixer Control (0x002a) */
+#define RT5668_M_DAC_L1_STO_L (0x1 << 15)
+#define RT5668_M_DAC_L1_STO_L_SHIFT 15
+#define RT5668_M_DAC_R1_STO_L (0x1 << 13)
+#define RT5668_M_DAC_R1_STO_L_SHIFT 13
+#define RT5668_M_DAC_L1_STO_R (0x1 << 7)
+#define RT5668_M_DAC_L1_STO_R_SHIFT 7
+#define RT5668_M_DAC_R1_STO_R (0x1 << 5)
+#define RT5668_M_DAC_R1_STO_R_SHIFT 5
+
+/* Power Management for Digital 1 (0x0061) */
+#define RT5668_PWR_I2S1 (0x1 << 15)
+#define RT5668_PWR_I2S1_SHIFT 15
+#define RT5668_PWR_DAC_L1 (0x1 << 11)
+#define RT5668_PWR_DAC_L1_SHIFT 11
+#define RT5668_PWR_DAC_R1 (0x1 << 10)
+#define RT5668_PWR_DAC_R1_SHIFT 10
+#define RT5668_PWR_LDO_DACREF_MASK (0x1 << 8)
+#define RT5668_PWR_LDO_DACREF_SHIFT 8
+#define RT5668_PWR_LDO_DACREF_ON (0x1 << 8)
+#define RT5668_PWR_LDO_DACREF_DOWN (0x0 << 8)
+#define RT5668_PWR_LDO_SHIFT 8
+#define RT5668_PWR_ADC_L1 (0x1 << 4)
+#define RT5668_PWR_ADC_L1_SHIFT 4
+#define RT5668_PWR_ADC_R1 (0x1 << 3)
+#define RT5668_PWR_ADC_R1_SHIFT 3
+
+/* Power Management for Digital 2 (0x0062) */
+#define RT5668_PWR_ADC_S1F (0x1 << 15)
+#define RT5668_PWR_ADC_S1F_SHIFT 15
+#define RT5668_PWR_DAC_S1F (0x1 << 10)
+#define RT5668_PWR_DAC_S1F_SHIFT 10
+
+/* Power Management for Analog 1 (0x0063) */
+#define RT5668_PWR_VREF1 (0x1 << 15)
+#define RT5668_PWR_VREF1_MASK (0x1 << 15)
+#define RT5668_PWR_VREF1_SHIFT 15
+#define RT5668_PWR_FV1 (0x1 << 14)
+#define RT5668_PWR_FV1_MASK (0x1 << 14)
+#define RT5668_PWR_FV1_SHIFT 14
+#define RT5668_PWR_VREF2 (0x1 << 13)
+#define RT5668_PWR_VREF2_MASK (0x1 << 13)
+#define RT5668_PWR_VREF2_SHIFT 13
+#define RT5668_PWR_FV2 (0x1 << 12)
+#define RT5668_PWR_FV2_MASK (0x1 << 12)
+#define RT5668_PWR_FV2_SHIFT 12
+#define RT5668_PWR_MB (0x1 << 9)
+#define RT5668_PWR_MB_MASK (0x1 << 9)
+#define RT5668_PWR_MB_SHIFT 9
+#define RT5668_AMP_HP_MASK (0x3 << 2)
+#define RT5668_AMP_HP_SHIFT 2
+#define RT5668_AMP_HP_1X (0x0 << 2)
+#define RT5668_AMP_HP_3X (0x1 << 2)
+#define RT5668_AMP_HP_5X (0x3 << 2)
+#define RT5668_LDO1_DVO_MASK (0x3)
+#define RT5668_LDO1_DVO_SHIFT 0
+#define RT5668_LDO1_DVO_0_9V (0x0)
+#define RT5668_LDO1_DVO_1_0V (0x1)
+#define RT5668_LDO1_DVO_1_2V (0x2)
+#define RT5668_LDO1_DVO_1_4V (0x3)
+
+/* Power Management for Analog 2 (0x0064) */
+#define RT5668_PWR_BST1 (0x1 << 15)
+#define RT5668_PWR_BST1_MASK (0x1 << 15)
+#define RT5668_PWR_BST1_SHIFT 15
+#define RT5668_PWR_BST1_OFF (0x0 << 15)
+#define RT5668_PWR_BST1_ON (0x1 << 15)
+#define RT5668_PWR_BST2 (0x1 << 14)
+#define RT5668_PWR_BST2_MASK (0x1 << 14)
+#define RT5668_PWR_BST2_SHIFT 14
+#define RT5668_PWR_MB1 (0x1 << 11)
+#define RT5668_PWR_MB1_SHIFT 11
+#define RT5668_PWR_MB2 (0x1 << 10)
+#define RT5668_PWR_MB2_SHIFT 10
+#define RT5668_PWR_BST2_OP (0x1 << 6)
+#define RT5668_PWR_BST2_OP_MASK (0x1 << 6)
+#define RT5668_PWR_BST2_OP_SHIFT 6
+#define RT5668_PWR_JD1 (0x1 << 3)
+#define RT5668_PWR_JD1_MASK (0x1 << 3)
+#define RT5668_PWR_JD1_SHIFT 3
+#define RT5668_PWR_JD2 (0x1 << 2)
+#define RT5668_PWR_JD2_MASK (0x1 << 2)
+#define RT5668_PWR_JD2_SHIFT 2
+#define RT5668_PWR_RECMIX1 (0x1 << 1)
+#define RT5668_PWR_RECMIX1_SHIFT 1
+#define RT5668_PWR_RECMIX2 (0x1)
+#define RT5668_PWR_RECMIX2_SHIFT 0
+
+/* Power Management for Analog 3 (0x0065) */
+#define RT5668_PWR_CBJ_MASK (0x1 << 9)
+#define RT5668_PWR_CBJ_SHIFT 9
+#define RT5668_PWR_CBJ_OFF (0x0 << 9)
+#define RT5668_PWR_CBJ_ON (0x1 << 9)
+#define RT5668_PWR_PLL (0x1 << 6)
+#define RT5668_PWR_PLL_SHIFT 6
+#define RT5668_PWR_LDO2 (0x1 << 2)
+#define RT5668_PWR_LDO2_SHIFT 2
+
+/* Power Management for Volume (0x0067) */
+#define RT5668_PWR_MIC_DET (0x1 << 5)
+#define RT5668_PWR_MIC_DET_SHIFT 5
+
+/* MCLK and System Clock Detection Control (0x006b) */
+#define RT5668_EN_ANA_CLK_DET_MASK (0x1 << 15)
+#define RT5668_EN_ANA_CLK_DET_SHIFT 15
+#define RT5668_EN_ANA_CLK_DET_DIS (0x0 << 15)
+#define RT5668_EN_ANA_CLK_DET_AUTO (0x1 << 15)
+#define RT5668_PWR_CLK_DET_MASK (0x1)
+#define RT5668_PWR_CLK_DET_SHIFT 0
+#define RT5668_PWR_CLK_DET_DIS (0x0)
+#define RT5668_PWR_CLK_DET_EN (0x1)
+
+/* I2S1 Audio Serial Data Port Control (0x0070) */
+#define RT5668_I2S_MS_MASK (0x1 << 15)
+#define RT5668_I2S_MS_SHIFT 15
+#define RT5668_I2S_MS_M (0x0 << 15)
+#define RT5668_I2S_MS_S (0x1 << 15)
+#define RT5668_I2S_BP_MASK (0x1 << 8)
+#define RT5668_I2S_BP_SHIFT 8
+#define RT5668_I2S_BP_NOR (0x0 << 8)
+#define RT5668_I2S_BP_INV (0x1 << 8)
+#define RT5668_I2S_DL_MASK (0x3 << 4)
+#define RT5668_I2S_DL_SHIFT 4
+#define RT5668_I2S_DL_16 (0x0 << 4)
+#define RT5668_I2S_DL_20 (0x1 << 4)
+#define RT5668_I2S_DL_24 (0x2 << 4)
+#define RT5668_I2S_DL_8 (0x3 << 4)
+#define RT5668_I2S_DF_MASK (0x7)
+#define RT5668_I2S_DF_SHIFT 0
+#define RT5668_I2S_DF_I2S (0x0)
+#define RT5668_I2S_DF_LEFT (0x1)
+#define RT5668_I2S_DF_PCM_A (0x2)
+#define RT5668_I2S_DF_PCM_B (0x3)
+#define RT5668_I2S_DF_PCM_A_N (0x6)
+#define RT5668_I2S_DF_PCM_B_N (0x7)
+
+/* ADC/DAC Clock Control 1 (0x0073) */
+#define RT5668_I2S_PD1_MASK (0x7 << 12)
+#define RT5668_I2S_PD1_SHIFT 12
+#define RT5668_M_I2S_DIV_MASK (0x7 << 8)
+#define RT5668_M_I2S_DIV_SHIFT 8
+#define RT5668_CLK_SRC_MASK (0x3 << 4)
+#define RT5668_CLK_SRC_MCLK (0x0 << 4)
+#define RT5668_CLK_SRC_PLL_OUT (0x1 << 4)
+#define RT5668_CLK_SRC_DIV (0x2 << 4)
+#define RT5668_CLK_SRC_RC (0x3 << 4)
+#define RT5668_DAC_OSR_MASK (0x3 << 2)
+#define RT5668_DAC_OSR_SHIFT 2
+#define RT5668_DAC_OSR_128 (0x0 << 2)
+#define RT5668_DAC_OSR_64 (0x1 << 2)
+#define RT5668_DAC_OSR_32 (0x2 << 2)
+#define RT5668_ADC_OSR_MASK (0x3)
+#define RT5668_ADC_OSR_SHIFT 0
+#define RT5668_ADC_OSR_128 (0x0)
+#define RT5668_ADC_OSR_64 (0x1)
+#define RT5668_ADC_OSR_32 (0x2)
+
+/* TDM1 control 1 (0x0078) */
+#define RT5668_TDM_MODE_MASK (0x1 << 15)
+#define RT5668_TDM_MODE_SHIFT 15
+#define RT5668_TDM_MODE_I2S (0x0 << 15)
+#define RT5668_TDM_MODE_TDM (0x1 << 15)
+#define RT5668_TDM_IN_CH_MASK (0x3 << 10)
+#define RT5668_TDM_IN_CH_SHIFT 10
+#define RT5668_TDM_IN_CH_2 (0x0 << 10)
+#define RT5668_TDM_IN_CH_4 (0x1 << 10)
+#define RT5668_TDM_IN_CH_6 (0x2 << 10)
+#define RT5668_TDM_IN_CH_8 (0x3 << 10)
+#define RT5668_TDM_OUT_CH_MASK (0x3 << 8)
+#define RT5668_TDM_OUT_CH_SHIFT 8
+#define RT5668_TDM_OUT_CH_2 (0x0 << 8)
+#define RT5668_TDM_OUT_CH_4 (0x1 << 8)
+#define RT5668_TDM_OUT_CH_6 (0x2 << 8)
+#define RT5668_TDM_OUT_CH_8 (0x3 << 8)
+#define RT5668_TDM_IN_LEN_MASK (0x3 << 6)
+#define RT5668_TDM_IN_LEN_SHIFT 6
+#define RT5668_TDM_IN_LEN_16 (0x0 << 6)
+#define RT5668_TDM_IN_LEN_20 (0x1 << 6)
+#define RT5668_TDM_IN_LEN_24 (0x2 << 6)
+#define RT5668_TDM_IN_LEN_32 (0x3 << 6)
+#define RT5668_TDM_OUT_LEN_MASK (0x3 << 4)
+#define RT5668_TDM_OUT_LEN_SHIFT 4
+#define RT5668_TDM_OUT_LEN_16 (0x0 << 4)
+#define RT5668_TDM_OUT_LEN_20 (0x1 << 4)
+#define RT5668_TDM_OUT_LEN_24 (0x2 << 4)
+#define RT5668_TDM_OUT_LEN_32 (0x3 << 4)
+
+/* Global Clock Control (0x0080) */
+#define RT5668_SCLK_SRC_MASK (0x3 << 14)
+#define RT5668_SCLK_SRC_SHIFT 14
+#define RT5668_SCLK_SRC_MCLK (0x0 << 14)
+#define RT5668_SCLK_SRC_PLL1 (0x1 << 14)
+#define RT5668_SCLK_SRC_RCCLK (0x2 << 14)
+#define RT5668_PLL1_SRC_MASK (0x7 << 8)
+#define RT5668_PLL1_SRC_SHIFT 8
+#define RT5668_PLL1_SRC_MCLK (0x0 << 8)
+#define RT5668_PLL1_SRC_BCLK1 (0x1 << 8)
+#define RT5668_PLL1_PD_MASK (0x1 << 4)
+#define RT5668_PLL1_PD_SHIFT 4
+
+#define RT5668_PLL_INP_MAX 40000000
+#define RT5668_PLL_INP_MIN 256000
+/* PLL M/N/K Code Control 1 (0x0081) */
+#define RT5668_PLL_N_MAX 0x001ff
+#define RT5668_PLL_N_MASK (RT5668_PLL_N_MAX << 7)
+#define RT5668_PLL_N_SHIFT 7
+#define RT5668_PLL_K_MAX 0x001f
+#define RT5668_PLL_K_MASK (RT5668_PLL_K_MAX)
+#define RT5668_PLL_K_SHIFT 0
+
+/* PLL M/N/K Code Control 2 (0x0082) */
+#define RT5668_PLL_M_MAX 0x00f
+#define RT5668_PLL_M_MASK (RT5668_PLL_M_MAX << 12)
+#define RT5668_PLL_M_SHIFT 12
+#define RT5668_PLL_M_BP (0x1 << 11)
+#define RT5668_PLL_M_BP_SHIFT 11
+
+/* PLL tracking mode 1 (0x0083) */
+#define RT5668_I2S1_ASRC_MASK (0x1 << 13)
+#define RT5668_I2S1_ASRC_SHIFT 13
+#define RT5668_DAC_STO1_ASRC_MASK (0x1 << 12)
+#define RT5668_DAC_STO1_ASRC_SHIFT 12
+#define RT5668_ADC_STO1_ASRC_MASK (0x1 << 4)
+#define RT5668_ADC_STO1_ASRC_SHIFT 4
+
+/* PLL tracking mode 2 (0x0084)*/
+#define RT5668_DA_STO1_TRACK_MASK (0x7 << 12)
+#define RT5668_DA_STO1_TRACK_SHIFT 12
+#define RT5668_DA_STO1_TRACK_SYSCLK (0x0 << 12)
+#define RT5668_DA_STO1_TRACK_I2S1 (0x1 << 12)
+
+/* PLL tracking mode 3 (0x0085)*/
+#define RT5668_AD_STO1_TRACK_MASK (0x7 << 12)
+#define RT5668_AD_STO1_TRACK_SHIFT 12
+#define RT5668_AD_STO1_TRACK_SYSCLK (0x0 << 12)
+#define RT5668_AD_STO1_TRACK_I2S1 (0x1 << 12)
+
+/* HPOUT Charge pump control 1 (0x0091) */
+#define RT5668_OSW_HP_L_MASK (0x1 << 11)
+#define RT5668_OSW_HP_L_SHIFT 11
+#define RT5668_OSW_HP_L_EN (0x1 << 11)
+#define RT5668_OSW_HP_L_DIS (0x0 << 11)
+#define RT5668_OSW_HP_R_MASK (0x1 << 10)
+#define RT5668_OSW_HP_R_SHIFT 10
+#define RT5668_OSW_HP_R_EN (0x1 << 10)
+#define RT5668_OSW_HP_R_DIS (0x0 << 10)
+#define RT5668_SEL_PM_HP_MASK (0x3 << 8)
+#define RT5668_SEL_PM_HP_SHIFT 8
+#define RT5668_SEL_PM_HP_0_6 (0x0 << 8)
+#define RT5668_SEL_PM_HP_0_9 (0x1 << 8)
+#define RT5668_SEL_PM_HP_1_8 (0x2 << 8)
+#define RT5668_SEL_PM_HP_HIGH (0x3 << 8)
+#define RT5668_OVCD_HP_MASK (0x1 << 2)
+#define RT5668_OVCD_HP_SHIFT 2
+#define RT5668_OVCD_HP_EN (0x1 << 2)
+#define RT5668_OVCD_HP_DIS (0x0 << 2)
+
+/* RC Clock Control (0x0094) */
+#define RT5668_DIG_25M_CLK_MASK (0x1 << 9)
+#define RT5668_DIG_25M_CLK_SHIFT 9
+#define RT5668_DIG_25M_CLK_DIS (0x0 << 9)
+#define RT5668_DIG_25M_CLK_EN (0x1 << 9)
+#define RT5668_DIG_1M_CLK_MASK (0x1 << 8)
+#define RT5668_DIG_1M_CLK_SHIFT 8
+#define RT5668_DIG_1M_CLK_DIS (0x0 << 8)
+#define RT5668_DIG_1M_CLK_EN (0x1 << 8)
+
+/* Auto Turn On 1M RC CLK (0x009f) */
+#define RT5668_IRQ_POW_SAV_MASK (0x1 << 15)
+#define RT5668_IRQ_POW_SAV_SHIFT 15
+#define RT5668_IRQ_POW_SAV_DIS (0x0 << 15)
+#define RT5668_IRQ_POW_SAV_EN (0x1 << 15)
+#define RT5668_IRQ_POW_SAV_JD1_MASK (0x1 << 14)
+#define RT5668_IRQ_POW_SAV_JD1_SHIFT 14
+#define RT5668_IRQ_POW_SAV_JD1_DIS (0x0 << 14)
+#define RT5668_IRQ_POW_SAV_JD1_EN (0x1 << 14)
+
+/* IRQ Control 1 (0x00b6) */
+#define RT5668_EN_CB_JD_MASK (0x1 << 3)
+#define RT5668_EN_CB_JD_SHIFT 3
+#define RT5668_EN_CB_JD_EN (0x1 << 3)
+#define RT5668_EN_CB_JD_DIS (0x0 << 3)
+
+/* IRQ Control 3 (0x00b8) */
+#define RT5668_EN_IRQ_INLINE_MASK (0x1 << 6)
+#define RT5668_EN_IRQ_INLINE_SHIFT 6
+#define RT5668_EN_IRQ_INLINE_BYP (0x0 << 6)
+#define RT5668_EN_IRQ_INLINE_NOR (0x1 << 6)
+
+/* GPIO Control 1 (0x00c0) */
+#define RT5668_GP1_PIN_MASK (0x1 << 15)
+#define RT5668_GP1_PIN_SHIFT 15
+#define RT5668_GP1_PIN_GPIO1 (0x0 << 15)
+#define RT5668_GP1_PIN_IRQ (0x1 << 15)
+
+/* GPIO Control 2 (0x00c1) */
+#define RT5668_GP4_PIN_CONF_MASK (0x1 << 5)
+#define RT5668_GP4_PIN_CONF_SHIFT 5
+#define RT5668_GP4_PIN_CONF_INPUT (0x0 << 5)
+#define RT5668_GP4_PIN_CONF_OUTPUT (0x1 << 5)
+
+/* GPIO Control 2 (0x00c2) */
+#define RT5668_GP8_PIN_CONF_MASK (0x1 << 13)
+#define RT5668_GP8_PIN_CONF_SHIFT 13
+#define RT5668_GP8_PIN_CONF_INPUT (0x0 << 13)
+#define RT5668_GP8_PIN_CONF_OUTPUT (0x1 << 13)
+
+/* 4 Buttons Inline Command Function 1 (0x00df) */
+#define RT5668_4BTN_CLK_DEB_MASK (0x3 << 2)
+#define RT5668_4BTN_CLK_DEB_SHIFT 2
+#define RT5668_4BTN_CLK_DEB_8MS (0x0 << 2)
+#define RT5668_4BTN_CLK_DEB_16MS (0x1 << 2)
+#define RT5668_4BTN_CLK_DEB_32MS (0x2 << 2)
+#define RT5668_4BTN_CLK_DEB_65MS (0x3 << 2)
+
+/* Inline Command Function 6 (0x00e0) */
+#define RT5668_EN_4BTN_INL_MASK (0x1 << 15)
+#define RT5668_EN_4BTN_INL_SHIFT 15
+#define RT5668_EN_4BTN_INL_DIS (0x0 << 15)
+#define RT5668_EN_4BTN_INL_EN (0x1 << 15)
+#define RT5668_RESET_4BTN_INL_MASK (0x1 << 14)
+#define RT5668_RESET_4BTN_INL_SHIFT 14
+#define RT5668_RESET_4BTN_INL_RESET (0x0 << 14)
+#define RT5668_RESET_4BTN_INL_NOR (0x1 << 14)
+
+/* Digital Misc Control (0x00fa) */
+#define RT5668_DIG_GATE_CTRL_MASK 0x1
+#define RT5668_DIG_GATE_CTRL_SHIFT (0)
+#define RT5668_DIG_GATE_CTRL_DIS 0x0
+#define RT5668_DIG_GATE_CTRL_EN 0x1
+
+/* Chopper and Clock control for DAC L (0x013a)*/
+#define RT5668_CKXEN_DAC1_MASK (0x1 << 13)
+#define RT5668_CKXEN_DAC1_SHIFT 13
+#define RT5668_CKGEN_DAC1_MASK (0x1 << 12)
+#define RT5668_CKGEN_DAC1_SHIFT 12
+
+/* Chopper and Clock control for ADC (0x013b)*/
+#define RT5668_CKXEN_ADCC_MASK (0x1 << 13)
+#define RT5668_CKXEN_ADCC_SHIFT 13
+#define RT5668_CKGEN_ADCC_MASK (0x1 << 12)
+#define RT5668_CKGEN_ADCC_SHIFT 12
+
+/* HP Behavior Logic Control 2 (0x01db) */
+#define RT5668_HP_SIG_SRC1_MASK (0x3)
+#define RT5668_HP_SIG_SRC1_SHIFT 0
+#define RT5668_HP_SIG_SRC1_HP_DC (0x0)
+#define RT5668_HP_SIG_SRC1_HP_CALIB (0x1)
+#define RT5668_HP_SIG_SRC1_REG (0x2)
+#define RT5668_HP_SIG_SRC1_SILENCE (0x3)
+
+/* RT5663 specific register */
+#define RT5663_HP_OUT_EN 0x0002
+#define RT5663_HP_LCH_DRE 0x0005
+#define RT5663_HP_RCH_DRE 0x0006
+#define RT5663_CALIB_BST 0x000a
+#define RT5663_RECMIX 0x0010
+#define RT5663_SIL_DET_CTL 0x0015
+#define RT5663_PWR_SAV_SILDET 0x0016
+#define RT5663_SIDETONE_CTL 0x0018
+#define RT5663_STO1_DAC_DIG_VOL 0x0019
+#define RT5663_STO1_ADC_DIG_VOL 0x001c
+#define RT5663_STO1_BOOST 0x001f
+#define RT5663_HP_IMP_GAIN_1 0x0022
+#define RT5663_HP_IMP_GAIN_2 0x0023
+#define RT5663_STO1_ADC_MIXER 0x0026
+#define RT5663_AD_DA_MIXER 0x0029
+#define RT5663_STO_DAC_MIXER 0x002a
+#define RT5663_DIG_SIDE_MIXER 0x002c
+#define RT5663_BYPASS_STO_DAC 0x002d
+#define RT5663_CALIB_REC_MIX 0x0040
+#define RT5663_PWR_DIG_1 0x0061
+#define RT5663_PWR_DIG_2 0x0062
+#define RT5663_PWR_ANLG_1 0x0063
+#define RT5663_PWR_ANLG_2 0x0064
+#define RT5663_PWR_ANLG_3 0x0065
+#define RT5663_PWR_MIXER 0x0066
+#define RT5663_SIG_CLK_DET 0x006b
+#define RT5663_PRE_DIV_GATING_1 0x006e
+#define RT5663_PRE_DIV_GATING_2 0x006f
+#define RT5663_I2S1_SDP 0x0070
+#define RT5663_ADDA_CLK_1 0x0073
+#define RT5663_ADDA_RST 0x0074
+#define RT5663_FRAC_DIV_1 0x0075
+#define RT5663_FRAC_DIV_2 0x0076
+#define RT5663_TDM_1 0x0077
+#define RT5663_TDM_2 0x0078
+#define RT5663_TDM_3 0x0079
+#define RT5663_TDM_4 0x007a
+#define RT5663_TDM_5 0x007b
+#define RT5663_GLB_CLK 0x0080
+#define RT5663_PLL_1 0x0081
+#define RT5663_PLL_2 0x0082
+#define RT5663_ASRC_1 0x0083
+#define RT5663_ASRC_2 0x0084
+#define RT5663_ASRC_4 0x0086
+#define RT5663_DUMMY_REG 0x0087
+#define RT5663_ASRC_8 0x008a
+#define RT5663_ASRC_9 0x008b
+#define RT5663_ASRC_11 0x008c
+#define RT5663_DEPOP_1 0x008e
+#define RT5663_DEPOP_2 0x008f
+#define RT5663_DEPOP_3 0x0090
+#define RT5663_HP_CHARGE_PUMP_1 0x0091
+#define RT5663_HP_CHARGE_PUMP_2 0x0092
+#define RT5663_MICBIAS_1 0x0093
+#define RT5663_RC_CLK 0x0094
+#define RT5663_ASRC_11_2 0x0097
+#define RT5663_DUMMY_REG_2 0x0098
+#define RT5663_REC_PATH_GAIN 0x009a
+#define RT5663_AUTO_1MRC_CLK 0x009f
+#define RT5663_ADC_EQ_1 0x00ae
+#define RT5663_ADC_EQ_2 0x00af
+#define RT5663_IRQ_1 0x00b6
+#define RT5663_IRQ_2 0x00b7
+#define RT5663_IRQ_3 0x00b8
+#define RT5663_IRQ_4 0x00ba
+#define RT5663_IRQ_5 0x00bb
+#define RT5663_INT_ST_1 0x00be
+#define RT5663_INT_ST_2 0x00bf
+#define RT5663_GPIO_1 0x00c0
+#define RT5663_GPIO_2 0x00c1
+#define RT5663_GPIO_STA 0x00c5
+#define RT5663_SIN_GEN_1 0x00cb
+#define RT5663_SIN_GEN_2 0x00cc
+#define RT5663_SIN_GEN_3 0x00cd
+#define RT5663_SOF_VOL_ZC1 0x00d9
+#define RT5663_IL_CMD_1 0x00db
+#define RT5663_IL_CMD_2 0x00dc
+#define RT5663_IL_CMD_3 0x00dd
+#define RT5663_IL_CMD_4 0x00de
+#define RT5663_IL_CMD_5 0x00df
+#define RT5663_IL_CMD_6 0x00e0
+#define RT5663_IL_CMD_7 0x00e1
+#define RT5663_IL_CMD_8 0x00e2
+#define RT5663_IL_CMD_PWRSAV1 0x00e4
+#define RT5663_IL_CMD_PWRSAV2 0x00e5
+#define RT5663_EM_JACK_TYPE_1 0x00e6
+#define RT5663_EM_JACK_TYPE_2 0x00e7
+#define RT5663_EM_JACK_TYPE_3 0x00e8
+#define RT5663_EM_JACK_TYPE_4 0x00e9
+#define RT5663_EM_JACK_TYPE_5 0x00ea
+#define RT5663_EM_JACK_TYPE_6 0x00eb
+#define RT5663_STO1_HPF_ADJ1 0x00ec
+#define RT5663_STO1_HPF_ADJ2 0x00ed
+#define RT5663_FAST_OFF_MICBIAS 0x00f4
+#define RT5663_JD_CTRL1 0x00f6
+#define RT5663_JD_CTRL2 0x00f8
+#define RT5663_DIG_MISC 0x00fa
+#define RT5663_DIG_VOL_ZCD 0x0100
+#define RT5663_ANA_BIAS_CUR_1 0x0108
+#define RT5663_ANA_BIAS_CUR_2 0x0109
+#define RT5663_ANA_BIAS_CUR_3 0x010a
+#define RT5663_ANA_BIAS_CUR_4 0x010b
+#define RT5663_ANA_BIAS_CUR_5 0x010c
+#define RT5663_ANA_BIAS_CUR_6 0x010d
+#define RT5663_BIAS_CUR_5 0x010e
+#define RT5663_BIAS_CUR_6 0x010f
+#define RT5663_BIAS_CUR_7 0x0110
+#define RT5663_BIAS_CUR_8 0x0111
+#define RT5663_DACREF_LDO 0x0112
+#define RT5663_DUMMY_REG_3 0x0113
+#define RT5663_BIAS_CUR_9 0x0114
+#define RT5663_DUMMY_REG_4 0x0116
+#define RT5663_VREFADJ_OP 0x0117
+#define RT5663_VREF_RECMIX 0x0118
+#define RT5663_CHARGE_PUMP_1 0x0125
+#define RT5663_CHARGE_PUMP_1_2 0x0126
+#define RT5663_CHARGE_PUMP_1_3 0x0127
+#define RT5663_CHARGE_PUMP_2 0x0128
+#define RT5663_DIG_IN_PIN1 0x0132
+#define RT5663_PAD_DRV_CTL 0x0137
+#define RT5663_PLL_INT_REG 0x0139
+#define RT5663_CHOP_DAC_L 0x013a
+#define RT5663_CHOP_ADC 0x013b
+#define RT5663_CALIB_ADC 0x013c
+#define RT5663_CHOP_DAC_R 0x013d
+#define RT5663_DUMMY_CTL_DACLR 0x013e
+#define RT5663_DUMMY_REG_5 0x0140
+#define RT5663_SOFT_RAMP 0x0141
+#define RT5663_TEST_MODE_1 0x0144
+#define RT5663_TEST_MODE_2 0x0145
+#define RT5663_TEST_MODE_3 0x0146
+#define RT5663_STO_DRE_1 0x0160
+#define RT5663_STO_DRE_2 0x0161
+#define RT5663_STO_DRE_3 0x0162
+#define RT5663_STO_DRE_4 0x0163
+#define RT5663_STO_DRE_5 0x0164
+#define RT5663_STO_DRE_6 0x0165
+#define RT5663_STO_DRE_7 0x0166
+#define RT5663_STO_DRE_8 0x0167
+#define RT5663_STO_DRE_9 0x0168
+#define RT5663_STO_DRE_10 0x0169
+#define RT5663_MIC_DECRO_1 0x0180
+#define RT5663_MIC_DECRO_2 0x0181
+#define RT5663_MIC_DECRO_3 0x0182
+#define RT5663_MIC_DECRO_4 0x0183
+#define RT5663_MIC_DECRO_5 0x0184
+#define RT5663_MIC_DECRO_6 0x0185
+#define RT5663_HP_DECRO_1 0x01b0
+#define RT5663_HP_DECRO_2 0x01b1
+#define RT5663_HP_DECRO_3 0x01b2
+#define RT5663_HP_DECRO_4 0x01b3
+#define RT5663_HP_DECOUP 0x01b4
+#define RT5663_HP_IMP_SEN_MAP8 0x01b5
+#define RT5663_HP_IMP_SEN_MAP9 0x01b6
+#define RT5663_HP_IMP_SEN_MAP10 0x01b7
+#define RT5663_HP_IMP_SEN_MAP11 0x01b8
+#define RT5663_HP_IMP_SEN_1 0x01c0
+#define RT5663_HP_IMP_SEN_2 0x01c1
+#define RT5663_HP_IMP_SEN_3 0x01c2
+#define RT5663_HP_IMP_SEN_4 0x01c3
+#define RT5663_HP_IMP_SEN_5 0x01c4
+#define RT5663_HP_IMP_SEN_6 0x01c5
+#define RT5663_HP_IMP_SEN_7 0x01c6
+#define RT5663_HP_IMP_SEN_8 0x01c7
+#define RT5663_HP_IMP_SEN_9 0x01c8
+#define RT5663_HP_IMP_SEN_10 0x01c9
+#define RT5663_HP_IMP_SEN_11 0x01ca
+#define RT5663_HP_IMP_SEN_12 0x01cb
+#define RT5663_HP_IMP_SEN_13 0x01cc
+#define RT5663_HP_IMP_SEN_14 0x01cd
+#define RT5663_HP_IMP_SEN_15 0x01ce
+#define RT5663_HP_IMP_SEN_16 0x01cf
+#define RT5663_HP_IMP_SEN_17 0x01d0
+#define RT5663_HP_IMP_SEN_18 0x01d1
+#define RT5663_HP_IMP_SEN_19 0x01d2
+#define RT5663_HP_IMPSEN_DIG5 0x01d3
+#define RT5663_HP_IMPSEN_MAP1 0x01d4
+#define RT5663_HP_IMPSEN_MAP2 0x01d5
+#define RT5663_HP_IMPSEN_MAP3 0x01d6
+#define RT5663_HP_IMPSEN_MAP4 0x01d7
+#define RT5663_HP_IMPSEN_MAP5 0x01d8
+#define RT5663_HP_IMPSEN_MAP7 0x01d9
+#define RT5663_HP_LOGIC_1 0x01da
+#define RT5663_HP_LOGIC_2 0x01db
+#define RT5663_HP_CALIB_1 0x01dd
+#define RT5663_HP_CALIB_1_1 0x01de
+#define RT5663_HP_CALIB_2 0x01df
+#define RT5663_HP_CALIB_3 0x01e0
+#define RT5663_HP_CALIB_4 0x01e1
+#define RT5663_HP_CALIB_5 0x01e2
+#define RT5663_HP_CALIB_5_1 0x01e3
+#define RT5663_HP_CALIB_6 0x01e4
+#define RT5663_HP_CALIB_7 0x01e5
+#define RT5663_HP_CALIB_9 0x01e6
+#define RT5663_HP_CALIB_10 0x01e7
+#define RT5663_HP_CALIB_11 0x01e8
+#define RT5663_HP_CALIB_ST1 0x01ea
+#define RT5663_HP_CALIB_ST2 0x01eb
+#define RT5663_HP_CALIB_ST3 0x01ec
+#define RT5663_HP_CALIB_ST4 0x01ed
+#define RT5663_HP_CALIB_ST5 0x01ee
+#define RT5663_HP_CALIB_ST6 0x01ef
+#define RT5663_HP_CALIB_ST7 0x01f0
+#define RT5663_HP_CALIB_ST8 0x01f1
+#define RT5663_HP_CALIB_ST9 0x01f2
+#define RT5663_HP_AMP_DET 0x0200
+#define RT5663_DUMMY_REG_6 0x0201
+#define RT5663_HP_BIAS 0x0202
+#define RT5663_CBJ_1 0x0250
+#define RT5663_CBJ_2 0x0251
+#define RT5663_CBJ_3 0x0252
+#define RT5663_DUMMY_1 0x02fa
+#define RT5663_DUMMY_2 0x02fb
+#define RT5663_DUMMY_3 0x02fc
+#define RT5663_ANA_JD 0x0300
+#define RT5663_ADC_LCH_LPF1_A1 0x03d0
+#define RT5663_ADC_RCH_LPF1_A1 0x03d1
+#define RT5663_ADC_LCH_LPF1_H0 0x03d2
+#define RT5663_ADC_RCH_LPF1_H0 0x03d3
+#define RT5663_ADC_LCH_BPF1_A1 0x03d4
+#define RT5663_ADC_RCH_BPF1_A1 0x03d5
+#define RT5663_ADC_LCH_BPF1_A2 0x03d6
+#define RT5663_ADC_RCH_BPF1_A2 0x03d7
+#define RT5663_ADC_LCH_BPF1_H0 0x03d8
+#define RT5663_ADC_RCH_BPF1_H0 0x03d9
+#define RT5663_ADC_LCH_BPF2_A1 0x03da
+#define RT5663_ADC_RCH_BPF2_A1 0x03db
+#define RT5663_ADC_LCH_BPF2_A2 0x03dc
+#define RT5663_ADC_RCH_BPF2_A2 0x03dd
+#define RT5663_ADC_LCH_BPF2_H0 0x03de
+#define RT5663_ADC_RCH_BPF2_H0 0x03df
+#define RT5663_ADC_LCH_BPF3_A1 0x03e0
+#define RT5663_ADC_RCH_BPF3_A1 0x03e1
+#define RT5663_ADC_LCH_BPF3_A2 0x03e2
+#define RT5663_ADC_RCH_BPF3_A2 0x03e3
+#define RT5663_ADC_LCH_BPF3_H0 0x03e4
+#define RT5663_ADC_RCH_BPF3_H0 0x03e5
+#define RT5663_ADC_LCH_BPF4_A1 0x03e6
+#define RT5663_ADC_RCH_BPF4_A1 0x03e7
+#define RT5663_ADC_LCH_BPF4_A2 0x03e8
+#define RT5663_ADC_RCH_BPF4_A2 0x03e9
+#define RT5663_ADC_LCH_BPF4_H0 0x03ea
+#define RT5663_ADC_RCH_BPF4_H0 0x03eb
+#define RT5663_ADC_LCH_HPF1_A1 0x03ec
+#define RT5663_ADC_RCH_HPF1_A1 0x03ed
+#define RT5663_ADC_LCH_HPF1_H0 0x03ee
+#define RT5663_ADC_RCH_HPF1_H0 0x03ef
+#define RT5663_ADC_EQ_PRE_VOL_L 0x03f0
+#define RT5663_ADC_EQ_PRE_VOL_R 0x03f1
+#define RT5663_ADC_EQ_POST_VOL_L 0x03f2
+#define RT5663_ADC_EQ_POST_VOL_R 0x03f3
+
+/* RT5663: RECMIX Control (0x0010) */
+#define RT5663_RECMIX1_BST1_MASK (0x1)
+#define RT5663_RECMIX1_BST1_SHIFT 0
+#define RT5663_RECMIX1_BST1_ON (0x0)
+#define RT5663_RECMIX1_BST1_OFF (0x1)
+
+/* RT5663: Bypass Stereo1 DAC Mixer Control (0x002d) */
+#define RT5663_DACL1_SRC_MASK (0x1 << 3)
+#define RT5663_DACL1_SRC_SHIFT 3
+#define RT5663_DACR1_SRC_MASK (0x1 << 2)
+#define RT5663_DACR1_SRC_SHIFT 2
+
+/* RT5663: TDM control 2 (0x0078) */
+#define RT5663_DATA_SWAP_ADCDAT1_MASK (0x3 << 14)
+#define RT5663_DATA_SWAP_ADCDAT1_SHIFT 14
+#define RT5663_DATA_SWAP_ADCDAT1_LR (0x0 << 14)
+#define RT5663_DATA_SWAP_ADCDAT1_RL (0x1 << 14)
+#define RT5663_DATA_SWAP_ADCDAT1_LL (0x2 << 14)
+#define RT5663_DATA_SWAP_ADCDAT1_RR (0x3 << 14)
+
+/* RT5663: TDM control 5 (0x007b) */
+#define RT5663_TDM_LENGTN_MASK (0x3)
+#define RT5663_TDM_LENGTN_SHIFT 0
+#define RT5663_TDM_LENGTN_16 (0x0)
+#define RT5663_TDM_LENGTN_20 (0x1)
+#define RT5663_TDM_LENGTN_24 (0x2)
+#define RT5663_TDM_LENGTN_32 (0x3)
+
+/* RT5663: Global Clock Control (0x0080) */
+#define RT5663_SCLK_SRC_MASK (0x3 << 14)
+#define RT5663_SCLK_SRC_SHIFT 14
+#define RT5663_SCLK_SRC_MCLK (0x0 << 14)
+#define RT5663_SCLK_SRC_PLL1 (0x1 << 14)
+#define RT5663_SCLK_SRC_RCCLK (0x2 << 14)
+#define RT5663_PLL1_SRC_MASK (0x7 << 11)
+#define RT5663_PLL1_SRC_SHIFT 11
+#define RT5663_PLL1_SRC_MCLK (0x0 << 11)
+#define RT5663_PLL1_SRC_BCLK1 (0x1 << 11)
+
+/* PLL tracking mode 1 (0x0083) */
+#define RT5663_I2S1_ASRC_MASK (0x1 << 11)
+#define RT5663_I2S1_ASRC_SHIFT 11
+#define RT5663_DAC_STO1_ASRC_MASK (0x1 << 10)
+#define RT5663_DAC_STO1_ASRC_SHIFT 10
+#define RT5663_ADC_STO1_ASRC_MASK (0x1 << 3)
+#define RT5663_ADC_STO1_ASRC_SHIFT 3
+
+/* PLL tracking mode 2 (0x0084)*/
+#define RT5663_DA_STO1_TRACK_MASK (0x7 << 12)
+#define RT5663_DA_STO1_TRACK_SHIFT 12
+#define RT5663_DA_STO1_TRACK_SYSCLK (0x0 << 12)
+#define RT5663_DA_STO1_TRACK_I2S1 (0x1 << 12)
+#define RT5663_AD_STO1_TRACK_MASK (0x7)
+#define RT5663_AD_STO1_TRACK_SHIFT 0
+#define RT5663_AD_STO1_TRACK_SYSCLK (0x0)
+#define RT5663_AD_STO1_TRACK_I2S1 (0x1)
+
+/* RT5663: HPOUT Charge pump control 1 (0x0091) */
+#define RT5663_SI_HP_MASK (0x1 << 12)
+#define RT5663_SI_HP_SHIFT 12
+#define RT5663_SI_HP_EN (0x1 << 12)
+#define RT5663_SI_HP_DIS (0x0 << 12)
+
+/* RT5663: GPIO Control 2 (0x00b6) */
+#define RT5663_GP1_PIN_CONF_MASK (0x1 << 2)
+#define RT5663_GP1_PIN_CONF_SHIFT 2
+#define RT5663_GP1_PIN_CONF_OUTPUT (0x1 << 2)
+#define RT5663_GP1_PIN_CONF_INPUT (0x0 << 2)
+
+/* RT5663: GPIO Control 2 (0x00b7) */
+#define RT5663_EN_IRQ_INLINE_MASK (0x1 << 3)
+#define RT5663_EN_IRQ_INLINE_SHIFT 3
+#define RT5663_EN_IRQ_INLINE_NOR (0x1 << 3)
+#define RT5663_EN_IRQ_INLINE_BYP (0x0 << 3)
+
+/* RT5663: IRQ Control 1 (0x00c1) */
+#define RT5663_EN_IRQ_JD1_MASK (0x1 << 6)
+#define RT5663_EN_IRQ_JD1_SHIFT 6
+#define RT5663_EN_IRQ_JD1_EN (0x1 << 6)
+#define RT5663_EN_IRQ_JD1_DIS (0x0 << 6)
+
+/* RT5663: Inline Command Function 2 (0x00dc) */
+#define RT5663_PWR_MIC_DET_MASK (0x1)
+#define RT5663_PWR_MIC_DET_SHIFT 0
+#define RT5663_PWR_MIC_DET_ON (0x1)
+#define RT5663_PWR_MIC_DET_OFF (0x0)
+
+/* RT5663: Embeeded Jack and Type Detection Control 1 (0x00e6)*/
+#define RT5663_CBJ_DET_MASK (0x1 << 15)
+#define RT5663_CBJ_DET_SHIFT 15
+#define RT5663_CBJ_DET_DIS (0x0 << 15)
+#define RT5663_CBJ_DET_EN (0x1 << 15)
+#define RT5663_EXT_JD_MASK (0x1 << 11)
+#define RT5663_EXT_JD_SHIFT 11
+#define RT5663_EXT_JD_EN (0x1 << 11)
+#define RT5663_EXT_JD_DIS (0x0 << 11)
+#define RT5663_POL_EXT_JD_MASK (0x1 << 10)
+#define RT5663_POL_EXT_JD_SHIFT 10
+#define RT5663_POL_EXT_JD_EN (0x1 << 10)
+#define RT5663_POL_EXT_JD_DIS (0x0 << 10)
+
+/* RT5663: DACREF LDO Control (0x0112)*/
+#define RT5663_PWR_LDO_DACREFL_MASK (0x1 << 9)
+#define RT5663_PWR_LDO_DACREFL_SHIFT 9
+#define RT5663_PWR_LDO_DACREFR_MASK (0x1 << 1)
+#define RT5663_PWR_LDO_DACREFR_SHIFT 1
+
+/* RT5663: Stereo Dynamic Range Enhancement Control 9 (0x0168, 0x0169)*/
+#define RT5663_DRE_GAIN_HP_MASK (0x1f)
+#define RT5663_DRE_GAIN_HP_SHIFT 0
+
+/* RT5663: Combo Jack Control (0x0250) */
+#define RT5663_INBUF_CBJ_BST1_MASK (0x1 << 11)
+#define RT5663_INBUF_CBJ_BST1_SHIFT 11
+#define RT5663_INBUF_CBJ_BST1_ON (0x1 << 11)
+#define RT5663_INBUF_CBJ_BST1_OFF (0x0 << 11)
+#define RT5663_CBJ_SENSE_BST1_MASK (0x1 << 10)
+#define RT5663_CBJ_SENSE_BST1_SHIFT 10
+#define RT5663_CBJ_SENSE_BST1_L (0x1 << 10)
+#define RT5663_CBJ_SENSE_BST1_R (0x0 << 10)
+
+/* RT5663: Combo Jack Control (0x0251) */
+#define RT5663_GAIN_BST1_MASK (0xf)
+#define RT5663_GAIN_BST1_SHIFT 0
+
+/* RT5663: Dummy register 1 (0x02fa) */
+#define RT5663_EMB_CLK_MASK (0x1 << 9)
+#define RT5663_EMB_CLK_SHIFT 9
+#define RT5663_EMB_CLK_EN (0x1 << 9)
+#define RT5663_EMB_CLK_DIS (0x0 << 9)
+#define RT5663_HPA_CPL_BIAS_MASK (0x7 << 6)
+#define RT5663_HPA_CPL_BIAS_SHIFT 6
+#define RT5663_HPA_CPL_BIAS_0_5 (0x0 << 6)
+#define RT5663_HPA_CPL_BIAS_1 (0x1 << 6)
+#define RT5663_HPA_CPL_BIAS_2 (0x2 << 6)
+#define RT5663_HPA_CPL_BIAS_3 (0x3 << 6)
+#define RT5663_HPA_CPL_BIAS_4_1 (0x4 << 6)
+#define RT5663_HPA_CPL_BIAS_4_2 (0x5 << 6)
+#define RT5663_HPA_CPL_BIAS_6 (0x6 << 6)
+#define RT5663_HPA_CPL_BIAS_8 (0x7 << 6)
+#define RT5663_HPA_CPR_BIAS_MASK (0x7 << 3)
+#define RT5663_HPA_CPR_BIAS_SHIFT 3
+#define RT5663_HPA_CPR_BIAS_0_5 (0x0 << 3)
+#define RT5663_HPA_CPR_BIAS_1 (0x1 << 3)
+#define RT5663_HPA_CPR_BIAS_2 (0x2 << 3)
+#define RT5663_HPA_CPR_BIAS_3 (0x3 << 3)
+#define RT5663_HPA_CPR_BIAS_4_1 (0x4 << 3)
+#define RT5663_HPA_CPR_BIAS_4_2 (0x5 << 3)
+#define RT5663_HPA_CPR_BIAS_6 (0x6 << 3)
+#define RT5663_HPA_CPR_BIAS_8 (0x7 << 3)
+#define RT5663_DUMMY_BIAS_MASK (0x7)
+#define RT5663_DUMMY_BIAS_SHIFT 0
+#define RT5663_DUMMY_BIAS_0_5 (0x0)
+#define RT5663_DUMMY_BIAS_1 (0x1)
+#define RT5663_DUMMY_BIAS_2 (0x2)
+#define RT5663_DUMMY_BIAS_3 (0x3)
+#define RT5663_DUMMY_BIAS_4_1 (0x4)
+#define RT5663_DUMMY_BIAS_4_2 (0x5)
+#define RT5663_DUMMY_BIAS_6 (0x6)
+#define RT5663_DUMMY_BIAS_8 (0x7)
+
+
+/* System Clock Source */
+enum {
+ RT5663_SCLK_S_MCLK,
+ RT5663_SCLK_S_PLL1,
+ RT5663_SCLK_S_RCCLK,
+};
+
+/* PLL1 Source */
+enum {
+ RT5663_PLL1_S_MCLK,
+ RT5663_PLL1_S_BCLK1,
+};
+
+enum {
+ RT5663_AIF,
+ RT5663_AIFS,
+};
+
+/* asrc clock source */
+enum {
+ RT5663_CLK_SEL_SYS = 0x0,
+ RT5663_CLK_SEL_I2S1_ASRC = 0x1,
+};
+
+/* filter mask */
+enum {
+ RT5663_DA_STEREO_FILTER = 0x1,
+ RT5663_AD_STEREO_FILTER = 0x2,
+};
+
+int rt5663_set_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *hs_jack);
+int rt5663_sel_asrc_clk_src(struct snd_soc_codec *codec,
+ unsigned int filter_mask, unsigned int clk_src);
+
+#endif /* __RT5663_H__ */
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 8ef467f64f03..49caf1393aeb 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -2777,12 +2777,14 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5670 = {
.resume = rt5670_resume,
.set_bias_level = rt5670_set_bias_level,
.idle_bias_off = true,
- .controls = rt5670_snd_controls,
- .num_controls = ARRAY_SIZE(rt5670_snd_controls),
- .dapm_widgets = rt5670_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rt5670_dapm_widgets),
- .dapm_routes = rt5670_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rt5670_dapm_routes),
+ .component_driver = {
+ .controls = rt5670_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5670_snd_controls),
+ .dapm_widgets = rt5670_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5670_dapm_widgets),
+ .dapm_routes = rt5670_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5670_dapm_routes),
+ },
};
static const struct regmap_config rt5670_regmap = {
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index da9483c1c6fb..abc802a5a479 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -40,6 +41,15 @@
#define RT5677_PR_BASE (RT5677_PR_RANGE_BASE + (0 * RT5677_PR_SPACING))
+/* GPIO indexes defined by ACPI */
+enum {
+ RT5677_GPIO_PLUG_DET = 0,
+ RT5677_GPIO_MIC_PRESENT_L = 1,
+ RT5677_GPIO_HOTWORD_DET_L = 2,
+ RT5677_GPIO_DSP_INT = 3,
+ RT5677_GPIO_HP_AMP_SHDN_L = 4,
+};
+
static const struct regmap_range_cfg rt5677_ranges[] = {
{
.name = "PR",
@@ -4657,7 +4667,7 @@ static int rt5677_to_irq(struct gpio_chip *chip, unsigned offset)
return regmap_irq_get_virq(data, irq);
}
-static struct gpio_chip rt5677_template_chip = {
+static const struct gpio_chip rt5677_template_chip = {
.label = "rt5677",
.owner = THIS_MODULE,
.direction_output = rt5677_gpio_direction_out,
@@ -4974,12 +4984,14 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5677 = {
.resume = rt5677_resume,
.set_bias_level = rt5677_set_bias_level,
.idle_bias_off = true,
- .controls = rt5677_snd_controls,
- .num_controls = ARRAY_SIZE(rt5677_snd_controls),
- .dapm_widgets = rt5677_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rt5677_dapm_widgets),
- .dapm_routes = rt5677_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rt5677_dapm_routes),
+ .component_driver = {
+ .controls = rt5677_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5677_snd_controls),
+ .dapm_widgets = rt5677_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5677_dapm_widgets),
+ .dapm_routes = rt5677_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5677_dapm_routes),
+ },
};
static const struct regmap_config rt5677_regmap_physical = {
@@ -5018,10 +5030,47 @@ static const struct regmap_config rt5677_regmap = {
static const struct i2c_device_id rt5677_i2c_id[] = {
{ "rt5677", RT5677 },
{ "rt5676", RT5676 },
+ { "RT5677CE:00", RT5677 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
+static const struct acpi_gpio_params plug_det_gpio = { RT5677_GPIO_PLUG_DET, 0, false };
+static const struct acpi_gpio_params mic_present_gpio = { RT5677_GPIO_MIC_PRESENT_L, 0, false };
+static const struct acpi_gpio_params headphone_enable_gpio = { RT5677_GPIO_HP_AMP_SHDN_L, 0, false };
+
+static const struct acpi_gpio_mapping bdw_rt5677_gpios[] = {
+ { "plug-det-gpios", &plug_det_gpio, 1 },
+ { "mic-present-gpios", &mic_present_gpio, 1 },
+ { "headphone-enable-gpios", &headphone_enable_gpio, 1 },
+ { NULL },
+};
+
+static void rt5677_read_acpi_properties(struct rt5677_priv *rt5677,
+ struct device *dev)
+{
+ int ret;
+ u32 val;
+
+ ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
+ bdw_rt5677_gpios);
+ if (ret)
+ dev_warn(dev, "Failed to add driver gpios\n");
+
+ if (!device_property_read_u32(dev, "DCLK", &val))
+ rt5677->pdata.dmic2_clk_pin = val;
+
+ rt5677->pdata.in1_diff = device_property_read_bool(dev, "IN1");
+ rt5677->pdata.in2_diff = device_property_read_bool(dev, "IN2");
+ rt5677->pdata.lout1_diff = device_property_read_bool(dev, "OUT1");
+ rt5677->pdata.lout2_diff = device_property_read_bool(dev, "OUT2");
+ rt5677->pdata.lout3_diff = device_property_read_bool(dev, "OUT3");
+
+ device_property_read_u32(dev, "JD1", &rt5677->pdata.jd1_gpio);
+ device_property_read_u32(dev, "JD2", &rt5677->pdata.jd2_gpio);
+ device_property_read_u32(dev, "JD3", &rt5677->pdata.jd3_gpio);
+}
+
static void rt5677_read_device_properties(struct rt5677_priv *rt5677,
struct device *dev)
{
@@ -5127,8 +5176,12 @@ static int rt5677_i2c_probe(struct i2c_client *i2c,
if (pdata)
rt5677->pdata = *pdata;
- else
+ else if (i2c->dev.of_node)
rt5677_read_device_properties(rt5677, &i2c->dev);
+ else if (ACPI_HANDLE(&i2c->dev))
+ rt5677_read_acpi_properties(rt5677, &i2c->dev);
+ else
+ return -EINVAL;
/* pow-ldo2 and reset are optional. The codec pins may be statically
* connected on the board without gpios. If the gpio device property
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 527b759c1562..1589325855bc 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -411,6 +411,8 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
0, 8,
0x7f, 1,
headphone_volume),
+ SOC_SINGLE("Headphone Playback Switch", SGTL5000_CHIP_ANA_CTRL,
+ 4, 1, 1),
SOC_SINGLE("Headphone Playback ZC Switch", SGTL5000_CHIP_ANA_CTRL,
5, 1, 0),
@@ -423,6 +425,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
SGTL5000_LINE_OUT_VOL_RIGHT_SHIFT,
0x1f, 1,
lineout_volume),
+ SOC_SINGLE("Lineout Playback Switch", SGTL5000_CHIP_ANA_CTRL, 8, 1, 1),
};
/* mute the codec used by alsa core */
@@ -1151,12 +1154,14 @@ static struct snd_soc_codec_driver sgtl5000_driver = {
.remove = sgtl5000_remove,
.set_bias_level = sgtl5000_set_bias_level,
.suspend_bias_off = true,
- .controls = sgtl5000_snd_controls,
- .num_controls = ARRAY_SIZE(sgtl5000_snd_controls),
- .dapm_widgets = sgtl5000_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(sgtl5000_dapm_widgets),
- .dapm_routes = sgtl5000_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(sgtl5000_dapm_routes),
+ .component_driver = {
+ .controls = sgtl5000_snd_controls,
+ .num_controls = ARRAY_SIZE(sgtl5000_snd_controls),
+ .dapm_widgets = sgtl5000_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sgtl5000_dapm_widgets),
+ .dapm_routes = sgtl5000_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(sgtl5000_dapm_routes),
+ },
};
static const struct regmap_config sgtl5000_regmap = {
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index a8402d0af0ea..5344f4aa8fde 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -238,10 +238,12 @@ static struct regmap *si476x_get_regmap(struct device *dev)
static struct snd_soc_codec_driver soc_codec_dev_si476x = {
.get_regmap = si476x_get_regmap,
- .dapm_widgets = si476x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(si476x_dapm_widgets),
- .dapm_routes = si476x_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(si476x_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = si476x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(si476x_dapm_widgets),
+ .dapm_routes = si476x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(si476x_dapm_routes),
+ },
};
static int si476x_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index 3a7de0159f24..eae54c37cff9 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -888,12 +888,14 @@ static struct snd_soc_codec_driver sn95031_codec = {
.set_bias_level = sn95031_set_vaud_bias,
.idle_bias_off = true,
- .controls = sn95031_snd_controls,
- .num_controls = ARRAY_SIZE(sn95031_snd_controls),
- .dapm_widgets = sn95031_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(sn95031_dapm_widgets),
- .dapm_routes = sn95031_audio_map,
- .num_dapm_routes = ARRAY_SIZE(sn95031_audio_map),
+ .component_driver = {
+ .controls = sn95031_snd_controls,
+ .num_controls = ARRAY_SIZE(sn95031_snd_controls),
+ .dapm_widgets = sn95031_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sn95031_dapm_widgets),
+ .dapm_routes = sn95031_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(sn95031_audio_map),
+ },
};
static int sn95031_device_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/spdif_receiver.c b/sound/soc/codecs/spdif_receiver.c
index 3ec41ccbf4e2..234f87b54838 100644
--- a/sound/soc/codecs/spdif_receiver.c
+++ b/sound/soc/codecs/spdif_receiver.c
@@ -38,10 +38,12 @@ static const struct snd_soc_dapm_route dir_routes[] = {
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
static struct snd_soc_codec_driver soc_codec_spdif_dir = {
- .dapm_widgets = dir_widgets,
- .num_dapm_widgets = ARRAY_SIZE(dir_widgets),
- .dapm_routes = dir_routes,
- .num_dapm_routes = ARRAY_SIZE(dir_routes),
+ .component_driver = {
+ .dapm_widgets = dir_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(dir_widgets),
+ .dapm_routes = dir_routes,
+ .num_dapm_routes = ARRAY_SIZE(dir_routes),
+ },
};
static struct snd_soc_dai_driver dir_stub_dai = {
diff --git a/sound/soc/codecs/spdif_transmitter.c b/sound/soc/codecs/spdif_transmitter.c
index ef634a9ad673..ee367536a498 100644
--- a/sound/soc/codecs/spdif_transmitter.c
+++ b/sound/soc/codecs/spdif_transmitter.c
@@ -38,10 +38,12 @@ static const struct snd_soc_dapm_route dit_routes[] = {
};
static struct snd_soc_codec_driver soc_codec_spdif_dit = {
- .dapm_widgets = dit_widgets,
- .num_dapm_widgets = ARRAY_SIZE(dit_widgets),
- .dapm_routes = dit_routes,
- .num_dapm_routes = ARRAY_SIZE(dit_routes),
+ .component_driver = {
+ .dapm_widgets = dit_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(dit_widgets),
+ .dapm_routes = dit_routes,
+ .num_dapm_routes = ARRAY_SIZE(dit_routes),
+ },
};
static struct snd_soc_dai_driver dit_stub_dai = {
diff --git a/sound/soc/codecs/ssm2518.c b/sound/soc/codecs/ssm2518.c
index e2e0bfa7ec20..38a85f3adc80 100644
--- a/sound/soc/codecs/ssm2518.c
+++ b/sound/soc/codecs/ssm2518.c
@@ -715,12 +715,14 @@ static struct snd_soc_codec_driver ssm2518_codec_driver = {
.set_sysclk = ssm2518_set_sysclk,
.idle_bias_off = true,
- .controls = ssm2518_snd_controls,
- .num_controls = ARRAY_SIZE(ssm2518_snd_controls),
- .dapm_widgets = ssm2518_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ssm2518_dapm_widgets),
- .dapm_routes = ssm2518_routes,
- .num_dapm_routes = ARRAY_SIZE(ssm2518_routes),
+ .component_driver = {
+ .controls = ssm2518_snd_controls,
+ .num_controls = ARRAY_SIZE(ssm2518_snd_controls),
+ .dapm_widgets = ssm2518_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ssm2518_dapm_widgets),
+ .dapm_routes = ssm2518_routes,
+ .num_dapm_routes = ARRAY_SIZE(ssm2518_routes),
+ },
};
static const struct regmap_config ssm2518_regmap_config = {
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 4452fea0b118..993bde29ca1b 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -597,12 +597,14 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = {
.set_bias_level = ssm2602_set_bias_level,
.suspend_bias_off = true,
- .controls = ssm260x_snd_controls,
- .num_controls = ARRAY_SIZE(ssm260x_snd_controls),
- .dapm_widgets = ssm260x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ssm260x_dapm_widgets),
- .dapm_routes = ssm260x_routes,
- .num_dapm_routes = ARRAY_SIZE(ssm260x_routes),
+ .component_driver = {
+ .controls = ssm260x_snd_controls,
+ .num_controls = ARRAY_SIZE(ssm260x_snd_controls),
+ .dapm_widgets = ssm260x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ssm260x_dapm_widgets),
+ .dapm_routes = ssm260x_routes,
+ .num_dapm_routes = ARRAY_SIZE(ssm260x_routes),
+ },
};
static bool ssm2602_register_volatile(struct device *dev, unsigned int reg)
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 080c78e88e10..2bb5a11c9ba1 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -421,12 +421,14 @@ static struct snd_soc_codec_driver ssm4567_codec_driver = {
.set_bias_level = ssm4567_set_bias_level,
.idle_bias_off = true,
- .controls = ssm4567_snd_controls,
- .num_controls = ARRAY_SIZE(ssm4567_snd_controls),
- .dapm_widgets = ssm4567_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(ssm4567_dapm_widgets),
- .dapm_routes = ssm4567_routes,
- .num_dapm_routes = ARRAY_SIZE(ssm4567_routes),
+ .component_driver = {
+ .controls = ssm4567_snd_controls,
+ .num_controls = ARRAY_SIZE(ssm4567_snd_controls),
+ .dapm_widgets = ssm4567_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ssm4567_dapm_widgets),
+ .dapm_routes = ssm4567_routes,
+ .num_dapm_routes = ARRAY_SIZE(ssm4567_routes),
+ },
};
static const struct regmap_config ssm4567_regmap_config = {
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index a9844b2ac829..0790ae8530d9 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -991,12 +991,14 @@ static const struct snd_soc_codec_driver sta32x_codec = {
.remove = sta32x_remove,
.set_bias_level = sta32x_set_bias_level,
.suspend_bias_off = true,
- .controls = sta32x_snd_controls,
- .num_controls = ARRAY_SIZE(sta32x_snd_controls),
- .dapm_widgets = sta32x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(sta32x_dapm_widgets),
- .dapm_routes = sta32x_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(sta32x_dapm_routes),
+ .component_driver = {
+ .controls = sta32x_snd_controls,
+ .num_controls = ARRAY_SIZE(sta32x_snd_controls),
+ .dapm_widgets = sta32x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sta32x_dapm_widgets),
+ .dapm_routes = sta32x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(sta32x_dapm_routes),
+ },
};
static const struct regmap_config sta32x_regmap = {
diff --git a/sound/soc/codecs/sta350.c b/sound/soc/codecs/sta350.c
index 33a4612f0a07..9644c20f44e3 100644
--- a/sound/soc/codecs/sta350.c
+++ b/sound/soc/codecs/sta350.c
@@ -1057,12 +1057,14 @@ static const struct snd_soc_codec_driver sta350_codec = {
.remove = sta350_remove,
.set_bias_level = sta350_set_bias_level,
.suspend_bias_off = true,
- .controls = sta350_snd_controls,
- .num_controls = ARRAY_SIZE(sta350_snd_controls),
- .dapm_widgets = sta350_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(sta350_dapm_widgets),
- .dapm_routes = sta350_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(sta350_dapm_routes),
+ .component_driver = {
+ .controls = sta350_snd_controls,
+ .num_controls = ARRAY_SIZE(sta350_snd_controls),
+ .dapm_widgets = sta350_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sta350_dapm_widgets),
+ .dapm_routes = sta350_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(sta350_dapm_routes),
+ },
};
static const struct regmap_config sta350_regmap = {
diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
index 2cdaca943a8c..d4b384e4b266 100644
--- a/sound/soc/codecs/sta529.c
+++ b/sound/soc/codecs/sta529.c
@@ -317,8 +317,10 @@ static const struct snd_soc_codec_driver sta529_codec_driver = {
.set_bias_level = sta529_set_bias_level,
.suspend_bias_off = true,
- .controls = sta529_snd_controls,
- .num_controls = ARRAY_SIZE(sta529_snd_controls),
+ .component_driver = {
+ .controls = sta529_snd_controls,
+ .num_controls = ARRAY_SIZE(sta529_snd_controls),
+ },
};
static const struct regmap_config sta529_regmap = {
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 0945c51df003..27f30d352867 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -85,10 +85,10 @@ static SOC_ENUM_SINGLE_DECL(stac9766_boost2_enum,
static SOC_ENUM_SINGLE_DECL(stac9766_stereo_mic_enum,
AC97_STAC_STEREO_MIC, 2, stac9766_stereo_mic);
-static const DECLARE_TLV_DB_LINEAR(master_tlv, -4600, 0);
-static const DECLARE_TLV_DB_LINEAR(record_tlv, 0, 2250);
-static const DECLARE_TLV_DB_LINEAR(beep_tlv, -4500, 0);
-static const DECLARE_TLV_DB_LINEAR(mix_tlv, -3450, 1200);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(master_tlv, -4650, 150, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(record_tlv, 0, 150, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(beep_tlv, -4500, 300, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(mix_tlv, -3450, 150, 0);
static const struct snd_kcontrol_new stac9766_snd_ac97_controls[] = {
SOC_DOUBLE_TLV("Speaker Volume", AC97_MASTER, 8, 0, 31, 1, master_tlv),
@@ -320,8 +320,10 @@ static int stac9766_codec_remove(struct snd_soc_codec *codec)
}
static struct snd_soc_codec_driver soc_codec_dev_stac9766 = {
- .controls = stac9766_snd_ac97_controls,
- .num_controls = ARRAY_SIZE(stac9766_snd_ac97_controls),
+ .component_driver = {
+ .controls = stac9766_snd_ac97_controls,
+ .num_controls = ARRAY_SIZE(stac9766_snd_ac97_controls),
+ },
.write = stac9766_ac97_write,
.read = stac9766_ac97_read,
.set_bias_level = stac9766_set_bias_level,
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
index 160d61a66204..7b31ee9b82bc 100644
--- a/sound/soc/codecs/sti-sas.c
+++ b/sound/soc/codecs/sti-sas.c
@@ -591,11 +591,11 @@ static int sti_sas_driver_probe(struct platform_device *pdev)
sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].ops = drvdata->dev_data->dac_ops;
/* Set dapms*/
- sti_sas_driver.dapm_widgets = drvdata->dev_data->dapm_widgets;
- sti_sas_driver.num_dapm_widgets = drvdata->dev_data->num_dapm_widgets;
+ sti_sas_driver.component_driver.dapm_widgets = drvdata->dev_data->dapm_widgets;
+ sti_sas_driver.component_driver.num_dapm_widgets = drvdata->dev_data->num_dapm_widgets;
- sti_sas_driver.dapm_routes = drvdata->dev_data->dapm_routes;
- sti_sas_driver.num_dapm_routes = drvdata->dev_data->num_dapm_routes;
+ sti_sas_driver.component_driver.dapm_routes = drvdata->dev_data->dapm_routes;
+ sti_sas_driver.component_driver.num_dapm_routes = drvdata->dev_data->num_dapm_routes;
/* Store context */
dev_set_drvdata(&pdev->dev, drvdata);
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index cc1d3981fa4b..baf455e8c2f7 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -667,12 +667,14 @@ static struct snd_soc_codec_driver soc_codec_dev_tas2552 = {
.resume = tas2552_resume,
.ignore_pmdown_time = true,
- .controls = tas2552_snd_controls,
- .num_controls = ARRAY_SIZE(tas2552_snd_controls),
- .dapm_widgets = tas2552_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tas2552_dapm_widgets),
- .dapm_routes = tas2552_audio_map,
- .num_dapm_routes = ARRAY_SIZE(tas2552_audio_map),
+ .component_driver = {
+ .controls = tas2552_snd_controls,
+ .num_controls = ARRAY_SIZE(tas2552_snd_controls),
+ .dapm_widgets = tas2552_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas2552_dapm_widgets),
+ .dapm_routes = tas2552_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas2552_audio_map),
+ },
};
static const struct regmap_config tas2552_regmap_config = {
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
index d49d25d51957..b7de857abb16 100644
--- a/sound/soc/codecs/tas5086.c
+++ b/sound/soc/codecs/tas5086.c
@@ -387,7 +387,7 @@ static int tas5086_hw_params(struct snd_pcm_substream *substream,
val = index_in_array(tas5086_ratios, ARRAY_SIZE(tas5086_ratios),
priv->mclk / priv->rate);
if (val < 0) {
- dev_err(codec->dev, "Inavlid MCLK / Fs ratio\n");
+ dev_err(codec->dev, "Invalid MCLK / Fs ratio\n");
return -EINVAL;
}
@@ -890,12 +890,14 @@ static struct snd_soc_codec_driver soc_codec_dev_tas5086 = {
.remove = tas5086_remove,
.suspend = tas5086_soc_suspend,
.resume = tas5086_soc_resume,
- .controls = tas5086_controls,
- .num_controls = ARRAY_SIZE(tas5086_controls),
- .dapm_widgets = tas5086_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tas5086_dapm_widgets),
- .dapm_routes = tas5086_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(tas5086_dapm_routes),
+ .component_driver = {
+ .controls = tas5086_controls,
+ .num_controls = ARRAY_SIZE(tas5086_controls),
+ .dapm_widgets = tas5086_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas5086_dapm_widgets),
+ .dapm_routes = tas5086_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(tas5086_dapm_routes),
+ },
};
static const struct i2c_device_id tas5086_i2c_id[] = {
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index d8baca3f8413..df5e5cb33baa 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -658,10 +658,12 @@ static const struct snd_soc_codec_driver tas571x_codec = {
.set_bias_level = tas571x_set_bias_level,
.idle_bias_off = true,
- .dapm_widgets = tas571x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tas571x_dapm_widgets),
- .dapm_routes = tas571x_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(tas571x_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = tas571x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas571x_dapm_widgets),
+ .dapm_routes = tas571x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(tas571x_dapm_routes),
+ },
};
static struct snd_soc_dai_driver tas571x_dai = {
@@ -754,8 +756,8 @@ static int tas571x_i2c_probe(struct i2c_client *client,
memcpy(&priv->codec_driver, &tas571x_codec, sizeof(priv->codec_driver));
- priv->codec_driver.controls = priv->chip->controls;
- priv->codec_driver.num_controls = priv->chip->num_controls;
+ priv->codec_driver.component_driver.controls = priv->chip->controls;
+ priv->codec_driver.component_driver.num_controls = priv->chip->num_controls;
if (priv->chip->vol_reg_size == 2) {
/*
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
index f54fb46b77c2..c65b917598d2 100644
--- a/sound/soc/codecs/tas5720.c
+++ b/sound/soc/codecs/tas5720.c
@@ -489,12 +489,14 @@ static struct snd_soc_codec_driver soc_codec_dev_tas5720 = {
.suspend = tas5720_suspend,
.resume = tas5720_resume,
- .controls = tas5720_snd_controls,
- .num_controls = ARRAY_SIZE(tas5720_snd_controls),
- .dapm_widgets = tas5720_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tas5720_dapm_widgets),
- .dapm_routes = tas5720_audio_map,
- .num_dapm_routes = ARRAY_SIZE(tas5720_audio_map),
+ .component_driver = {
+ .controls = tas5720_snd_controls,
+ .num_controls = ARRAY_SIZE(tas5720_snd_controls),
+ .dapm_widgets = tas5720_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas5720_dapm_widgets),
+ .dapm_routes = tas5720_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas5720_audio_map),
+ },
};
/* PCM rates supported by the TAS5720 driver */
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index cb5310d89c0f..95e0a7abeb7a 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -231,13 +231,14 @@ static const struct snd_soc_dapm_route tfa9879_dapm_routes[] = {
};
static const struct snd_soc_codec_driver tfa9879_codec = {
- .controls = tfa9879_controls,
- .num_controls = ARRAY_SIZE(tfa9879_controls),
-
- .dapm_widgets = tfa9879_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tfa9879_dapm_widgets),
- .dapm_routes = tfa9879_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(tfa9879_dapm_routes),
+ .component_driver = {
+ .controls = tfa9879_controls,
+ .num_controls = ARRAY_SIZE(tfa9879_controls),
+ .dapm_widgets = tfa9879_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tfa9879_dapm_widgets),
+ .dapm_routes = tfa9879_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(tfa9879_dapm_routes),
+ },
};
static const struct regmap_config tfa9879_regmap = {
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index cd8c02b6e4de..410cae0f2060 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -583,12 +583,14 @@ static struct snd_soc_codec_driver soc_codec_dev_tlv320aic23 = {
.set_bias_level = tlv320aic23_set_bias_level,
.suspend_bias_off = true,
- .controls = tlv320aic23_snd_controls,
- .num_controls = ARRAY_SIZE(tlv320aic23_snd_controls),
- .dapm_widgets = tlv320aic23_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tlv320aic23_dapm_widgets),
- .dapm_routes = tlv320aic23_intercon,
- .num_dapm_routes = ARRAY_SIZE(tlv320aic23_intercon),
+ .component_driver = {
+ .controls = tlv320aic23_snd_controls,
+ .num_controls = ARRAY_SIZE(tlv320aic23_snd_controls),
+ .dapm_widgets = tlv320aic23_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tlv320aic23_dapm_widgets),
+ .dapm_routes = tlv320aic23_intercon,
+ .num_dapm_routes = ARRAY_SIZE(tlv320aic23_intercon),
+ },
};
int tlv320aic23_probe(struct device *dev, struct regmap *regmap)
diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c
index 2c904d7150ad..14aa96d41719 100644
--- a/sound/soc/codecs/tlv320aic26.c
+++ b/sound/soc/codecs/tlv320aic26.c
@@ -321,12 +321,14 @@ static int aic26_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver aic26_soc_codec_dev = {
.probe = aic26_probe,
- .controls = aic26_snd_controls,
- .num_controls = ARRAY_SIZE(aic26_snd_controls),
- .dapm_widgets = tlv320aic26_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tlv320aic26_dapm_widgets),
- .dapm_routes = tlv320aic26_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(tlv320aic26_dapm_routes),
+ .component_driver = {
+ .controls = aic26_snd_controls,
+ .num_controls = ARRAY_SIZE(aic26_snd_controls),
+ .dapm_widgets = tlv320aic26_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tlv320aic26_dapm_widgets),
+ .dapm_routes = tlv320aic26_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(tlv320aic26_dapm_routes),
+ },
};
static const struct regmap_config aic26_regmap = {
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index 3c5e1df01c19..be1a64bfd320 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -273,10 +273,20 @@ static const DECLARE_TLV_DB_SCALE(sp_vol_tlv, -6350, 50, 0);
/*
* controls to be exported to the user space
*/
-static const struct snd_kcontrol_new aic31xx_snd_controls[] = {
+static const struct snd_kcontrol_new common31xx_snd_controls[] = {
SOC_DOUBLE_R_S_TLV("DAC Playback Volume", AIC31XX_LDACVOL,
AIC31XX_RDACVOL, 0, -127, 48, 7, 0, dac_vol_tlv),
+ SOC_DOUBLE_R("HP Driver Playback Switch", AIC31XX_HPLGAIN,
+ AIC31XX_HPRGAIN, 2, 1, 0),
+ SOC_DOUBLE_R_TLV("HP Driver Playback Volume", AIC31XX_HPLGAIN,
+ AIC31XX_HPRGAIN, 3, 0x09, 0, hp_drv_tlv),
+
+ SOC_DOUBLE_R_TLV("HP Analog Playback Volume", AIC31XX_LANALOGHPL,
+ AIC31XX_RANALOGHPR, 0, 0x7F, 1, hp_vol_tlv),
+};
+
+static const struct snd_kcontrol_new aic31xx_snd_controls[] = {
SOC_SINGLE_TLV("ADC Fine Capture Volume", AIC31XX_ADCFGA, 4, 4, 1,
adc_fgain_tlv),
@@ -286,14 +296,6 @@ static const struct snd_kcontrol_new aic31xx_snd_controls[] = {
SOC_SINGLE_TLV("Mic PGA Capture Volume", AIC31XX_MICPGA, 0,
119, 0, mic_pga_tlv),
-
- SOC_DOUBLE_R("HP Driver Playback Switch", AIC31XX_HPLGAIN,
- AIC31XX_HPRGAIN, 2, 1, 0),
- SOC_DOUBLE_R_TLV("HP Driver Playback Volume", AIC31XX_HPLGAIN,
- AIC31XX_HPRGAIN, 3, 0x09, 0, hp_drv_tlv),
-
- SOC_DOUBLE_R_TLV("HP Analog Playback Volume", AIC31XX_LANALOGHPL,
- AIC31XX_RANALOGHPR, 0, 0x7F, 1, hp_vol_tlv),
};
static const struct snd_kcontrol_new aic311x_snd_controls[] = {
@@ -397,17 +399,28 @@ static int aic31xx_dapm_power_event(struct snd_soc_dapm_widget *w,
return 0;
}
-static const struct snd_kcontrol_new left_output_switches[] = {
+static const struct snd_kcontrol_new aic31xx_left_output_switches[] = {
SOC_DAPM_SINGLE("From Left DAC", AIC31XX_DACMIXERROUTE, 6, 1, 0),
SOC_DAPM_SINGLE("From MIC1LP", AIC31XX_DACMIXERROUTE, 5, 1, 0),
SOC_DAPM_SINGLE("From MIC1RP", AIC31XX_DACMIXERROUTE, 4, 1, 0),
};
-static const struct snd_kcontrol_new right_output_switches[] = {
+static const struct snd_kcontrol_new aic31xx_right_output_switches[] = {
SOC_DAPM_SINGLE("From Right DAC", AIC31XX_DACMIXERROUTE, 2, 1, 0),
SOC_DAPM_SINGLE("From MIC1RP", AIC31XX_DACMIXERROUTE, 1, 1, 0),
};
+static const struct snd_kcontrol_new dac31xx_left_output_switches[] = {
+ SOC_DAPM_SINGLE("From Left DAC", AIC31XX_DACMIXERROUTE, 6, 1, 0),
+ SOC_DAPM_SINGLE("From AIN1", AIC31XX_DACMIXERROUTE, 5, 1, 0),
+ SOC_DAPM_SINGLE("From AIN2", AIC31XX_DACMIXERROUTE, 4, 1, 0),
+};
+
+static const struct snd_kcontrol_new dac31xx_right_output_switches[] = {
+ SOC_DAPM_SINGLE("From Right DAC", AIC31XX_DACMIXERROUTE, 2, 1, 0),
+ SOC_DAPM_SINGLE("From AIN2", AIC31XX_DACMIXERROUTE, 1, 1, 0),
+};
+
static const struct snd_kcontrol_new p_term_mic1lp =
SOC_DAPM_ENUM("MIC1LP P-Terminal", mic1lp_p_enum);
@@ -457,7 +470,7 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
return 0;
}
-static const struct snd_soc_dapm_widget aic31xx_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget common31xx_dapm_widgets[] = {
SND_SOC_DAPM_AIF_IN("DAC IN", "DAC Playback", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_MUX("DAC Left Input",
@@ -473,14 +486,7 @@ static const struct snd_soc_dapm_widget aic31xx_dapm_widgets[] = {
AIC31XX_DACSETUP, 6, 0, aic31xx_dapm_power_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- /* Output Mixers */
- SND_SOC_DAPM_MIXER("Output Left", SND_SOC_NOPM, 0, 0,
- left_output_switches,
- ARRAY_SIZE(left_output_switches)),
- SND_SOC_DAPM_MIXER("Output Right", SND_SOC_NOPM, 0, 0,
- right_output_switches,
- ARRAY_SIZE(right_output_switches)),
-
+ /* HP */
SND_SOC_DAPM_SWITCH("HP Left", SND_SOC_NOPM, 0, 0,
&aic31xx_dapm_hpl_switch),
SND_SOC_DAPM_SWITCH("HP Right", SND_SOC_NOPM, 0, 0,
@@ -494,10 +500,34 @@ static const struct snd_soc_dapm_widget aic31xx_dapm_widgets[] = {
NULL, 0, aic31xx_dapm_power_event,
SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
- /* ADC */
- SND_SOC_DAPM_ADC_E("ADC", "Capture", AIC31XX_ADCSETUP, 7, 0,
- aic31xx_dapm_power_event, SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
+ /* Mic Bias */
+ SND_SOC_DAPM_SUPPLY("MICBIAS", SND_SOC_NOPM, 0, 0, mic_bias_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+ /* Outputs */
+ SND_SOC_DAPM_OUTPUT("HPL"),
+ SND_SOC_DAPM_OUTPUT("HPR"),
+};
+
+static const struct snd_soc_dapm_widget dac31xx_dapm_widgets[] = {
+ /* Inputs */
+ SND_SOC_DAPM_INPUT("AIN1"),
+ SND_SOC_DAPM_INPUT("AIN2"),
+
+ /* Output Mixers */
+ SND_SOC_DAPM_MIXER("Output Left", SND_SOC_NOPM, 0, 0,
+ dac31xx_left_output_switches,
+ ARRAY_SIZE(dac31xx_left_output_switches)),
+ SND_SOC_DAPM_MIXER("Output Right", SND_SOC_NOPM, 0, 0,
+ dac31xx_right_output_switches,
+ ARRAY_SIZE(dac31xx_right_output_switches)),
+};
+
+static const struct snd_soc_dapm_widget aic31xx_dapm_widgets[] = {
+ /* Inputs */
+ SND_SOC_DAPM_INPUT("MIC1LP"),
+ SND_SOC_DAPM_INPUT("MIC1RP"),
+ SND_SOC_DAPM_INPUT("MIC1LM"),
/* Input Selection to MIC_PGA */
SND_SOC_DAPM_MUX("MIC1LP P-Terminal", SND_SOC_NOPM, 0, 0,
@@ -507,24 +537,25 @@ static const struct snd_soc_dapm_widget aic31xx_dapm_widgets[] = {
SND_SOC_DAPM_MUX("MIC1LM P-Terminal", SND_SOC_NOPM, 0, 0,
&p_term_mic1lm),
+ /* ADC */
+ SND_SOC_DAPM_ADC_E("ADC", "Capture", AIC31XX_ADCSETUP, 7, 0,
+ aic31xx_dapm_power_event, SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
SND_SOC_DAPM_MUX("MIC1LM M-Terminal", SND_SOC_NOPM, 0, 0,
&m_term_mic1lm),
+
/* Enabling & Disabling MIC Gain Ctl */
SND_SOC_DAPM_PGA("MIC_GAIN_CTL", AIC31XX_MICPGA,
7, 1, NULL, 0),
- /* Mic Bias */
- SND_SOC_DAPM_SUPPLY("MICBIAS", SND_SOC_NOPM, 0, 0, mic_bias_event,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
-
- /* Outputs */
- SND_SOC_DAPM_OUTPUT("HPL"),
- SND_SOC_DAPM_OUTPUT("HPR"),
-
- /* Inputs */
- SND_SOC_DAPM_INPUT("MIC1LP"),
- SND_SOC_DAPM_INPUT("MIC1RP"),
- SND_SOC_DAPM_INPUT("MIC1LM"),
+ /* Output Mixers */
+ SND_SOC_DAPM_MIXER("Output Left", SND_SOC_NOPM, 0, 0,
+ aic31xx_left_output_switches,
+ ARRAY_SIZE(aic31xx_left_output_switches)),
+ SND_SOC_DAPM_MIXER("Output Right", SND_SOC_NOPM, 0, 0,
+ aic31xx_right_output_switches,
+ ARRAY_SIZE(aic31xx_right_output_switches)),
};
static const struct snd_soc_dapm_widget aic311x_dapm_widgets[] = {
@@ -554,7 +585,7 @@ static const struct snd_soc_dapm_widget aic310x_dapm_widgets[] = {
};
static const struct snd_soc_dapm_route
-aic31xx_audio_map[] = {
+common31xx_audio_map[] = {
/* DAC Input Routing */
{"DAC Left Input", "Left Data", "DAC IN"},
{"DAC Left Input", "Right Data", "DAC IN"},
@@ -565,6 +596,31 @@ aic31xx_audio_map[] = {
{"DAC Left", NULL, "DAC Left Input"},
{"DAC Right", NULL, "DAC Right Input"},
+ /* HPL path */
+ {"HP Left", "Switch", "Output Left"},
+ {"HPL Driver", NULL, "HP Left"},
+ {"HPL", NULL, "HPL Driver"},
+
+ /* HPR path */
+ {"HP Right", "Switch", "Output Right"},
+ {"HPR Driver", NULL, "HP Right"},
+ {"HPR", NULL, "HPR Driver"},
+};
+
+static const struct snd_soc_dapm_route
+dac31xx_audio_map[] = {
+ /* Left Output */
+ {"Output Left", "From Left DAC", "DAC Left"},
+ {"Output Left", "From AIN1", "AIN1"},
+ {"Output Left", "From AIN2", "AIN2"},
+
+ /* Right Output */
+ {"Output Right", "From Right DAC", "DAC Right"},
+ {"Output Right", "From AIN2", "AIN2"},
+};
+
+static const struct snd_soc_dapm_route
+aic31xx_audio_map[] = {
/* Mic input */
{"MIC1LP P-Terminal", "FFR 10 Ohm", "MIC1LP"},
{"MIC1LP P-Terminal", "FFR 20 Ohm", "MIC1LP"},
@@ -595,16 +651,6 @@ aic31xx_audio_map[] = {
/* Right Output */
{"Output Right", "From Right DAC", "DAC Right"},
{"Output Right", "From MIC1RP", "MIC1RP"},
-
- /* HPL path */
- {"HP Left", "Switch", "Output Left"},
- {"HPL Driver", NULL, "HP Left"},
- {"HPL", NULL, "HPL Driver"},
-
- /* HPR path */
- {"HP Right", "Switch", "Output Right"},
- {"HPR Driver", NULL, "HP Right"},
- {"HPR", NULL, "HPR Driver"},
};
static const struct snd_soc_dapm_route
@@ -633,6 +679,13 @@ static int aic31xx_add_controls(struct snd_soc_codec *codec)
int ret = 0;
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
+ if (!(aic31xx->pdata.codec_type & DAC31XX_BIT))
+ ret = snd_soc_add_codec_controls(
+ codec, aic31xx_snd_controls,
+ ARRAY_SIZE(aic31xx_snd_controls));
+ if (ret)
+ return ret;
+
if (aic31xx->pdata.codec_type & AIC31XX_STEREO_CLASS_D_BIT)
ret = snd_soc_add_codec_controls(
codec, aic311x_snd_controls,
@@ -651,6 +704,30 @@ static int aic31xx_add_widgets(struct snd_soc_codec *codec)
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
int ret = 0;
+ if (aic31xx->pdata.codec_type & DAC31XX_BIT) {
+ ret = snd_soc_dapm_new_controls(
+ dapm, dac31xx_dapm_widgets,
+ ARRAY_SIZE(dac31xx_dapm_widgets));
+ if (ret)
+ return ret;
+
+ ret = snd_soc_dapm_add_routes(dapm, dac31xx_audio_map,
+ ARRAY_SIZE(dac31xx_audio_map));
+ if (ret)
+ return ret;
+ } else {
+ ret = snd_soc_dapm_new_controls(
+ dapm, aic31xx_dapm_widgets,
+ ARRAY_SIZE(aic31xx_dapm_widgets));
+ if (ret)
+ return ret;
+
+ ret = snd_soc_dapm_add_routes(dapm, aic31xx_audio_map,
+ ARRAY_SIZE(aic31xx_audio_map));
+ if (ret)
+ return ret;
+ }
+
if (aic31xx->pdata.codec_type & AIC31XX_STEREO_CLASS_D_BIT) {
ret = snd_soc_dapm_new_controls(
dapm, aic311x_dapm_widgets,
@@ -1114,12 +1191,14 @@ static struct snd_soc_codec_driver soc_codec_driver_aic31xx = {
.set_bias_level = aic31xx_set_bias_level,
.suspend_bias_off = true,
- .controls = aic31xx_snd_controls,
- .num_controls = ARRAY_SIZE(aic31xx_snd_controls),
- .dapm_widgets = aic31xx_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(aic31xx_dapm_widgets),
- .dapm_routes = aic31xx_audio_map,
- .num_dapm_routes = ARRAY_SIZE(aic31xx_audio_map),
+ .component_driver = {
+ .controls = common31xx_snd_controls,
+ .num_controls = ARRAY_SIZE(common31xx_snd_controls),
+ .dapm_widgets = common31xx_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(common31xx_dapm_widgets),
+ .dapm_routes = common31xx_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(common31xx_audio_map),
+ },
};
static const struct snd_soc_dai_ops aic31xx_dai_ops = {
@@ -1129,19 +1208,34 @@ static const struct snd_soc_dai_ops aic31xx_dai_ops = {
.digital_mute = aic31xx_dac_mute,
};
+static struct snd_soc_dai_driver dac31xx_dai_driver[] = {
+ {
+ .name = "tlv32dac31xx-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = AIC31XX_RATES,
+ .formats = AIC31XX_FORMATS,
+ },
+ .ops = &aic31xx_dai_ops,
+ .symmetric_rates = 1,
+ }
+};
+
static struct snd_soc_dai_driver aic31xx_dai_driver[] = {
{
.name = "tlv320aic31xx-hifi",
.playback = {
.stream_name = "Playback",
- .channels_min = 1,
+ .channels_min = 2,
.channels_max = 2,
.rates = AIC31XX_RATES,
.formats = AIC31XX_FORMATS,
},
.capture = {
.stream_name = "Capture",
- .channels_min = 1,
+ .channels_min = 2,
.channels_max = 2,
.rates = AIC31XX_RATES,
.formats = AIC31XX_FORMATS,
@@ -1259,9 +1353,16 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
if (ret)
return ret;
- return snd_soc_register_codec(&i2c->dev, &soc_codec_driver_aic31xx,
- aic31xx_dai_driver,
- ARRAY_SIZE(aic31xx_dai_driver));
+ if (aic31xx->pdata.codec_type & DAC31XX_BIT)
+ return snd_soc_register_codec(&i2c->dev,
+ &soc_codec_driver_aic31xx,
+ dac31xx_dai_driver,
+ ARRAY_SIZE(dac31xx_dai_driver));
+ else
+ return snd_soc_register_codec(&i2c->dev,
+ &soc_codec_driver_aic31xx,
+ aic31xx_dai_driver,
+ ARRAY_SIZE(aic31xx_dai_driver));
}
static int aic31xx_i2c_remove(struct i2c_client *i2c)
@@ -1277,6 +1378,7 @@ static const struct i2c_device_id aic31xx_i2c_id[] = {
{ "tlv320aic3110", AIC3110 },
{ "tlv320aic3120", AIC3120 },
{ "tlv320aic3111", AIC3111 },
+ { "tlv320dac3100", DAC3100 },
{ }
};
MODULE_DEVICE_TABLE(i2c, aic31xx_i2c_id);
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index ac9b146526eb..5acd5b69fb83 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -24,12 +24,14 @@
#define AIC31XX_STEREO_CLASS_D_BIT 0x1
#define AIC31XX_MINIDSP_BIT 0x2
+#define DAC31XX_BIT 0x4
enum aic31xx_type {
AIC3100 = 0,
AIC3110 = AIC31XX_STEREO_CLASS_D_BIT,
AIC3120 = AIC31XX_MINIDSP_BIT,
AIC3111 = (AIC31XX_STEREO_CLASS_D_BIT | AIC31XX_MINIDSP_BIT),
+ DAC3100 = DAC31XX_BIT,
};
struct aic31xx_pdata {
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 85d4978d0384..28fdfc5ec544 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -797,12 +797,14 @@ static struct snd_soc_codec_driver soc_codec_dev_aic32x4 = {
.set_bias_level = aic32x4_set_bias_level,
.suspend_bias_off = true,
- .controls = aic32x4_snd_controls,
- .num_controls = ARRAY_SIZE(aic32x4_snd_controls),
- .dapm_widgets = aic32x4_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(aic32x4_dapm_widgets),
- .dapm_routes = aic32x4_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(aic32x4_dapm_routes),
+ .component_driver = {
+ .controls = aic32x4_snd_controls,
+ .num_controls = ARRAY_SIZE(aic32x4_snd_controls),
+ .dapm_widgets = aic32x4_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(aic32x4_dapm_widgets),
+ .dapm_routes = aic32x4_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(aic32x4_dapm_routes),
+ },
};
static int aic32x4_parse_dt(struct aic32x4_priv *aic32x4,
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index a564759845f9..5a8d96ec058c 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1670,12 +1670,14 @@ static struct snd_soc_codec_driver soc_codec_dev_aic3x = {
.idle_bias_off = true,
.probe = aic3x_probe,
.remove = aic3x_remove,
- .controls = aic3x_snd_controls,
- .num_controls = ARRAY_SIZE(aic3x_snd_controls),
- .dapm_widgets = aic3x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(aic3x_dapm_widgets),
- .dapm_routes = intercon,
- .num_dapm_routes = ARRAY_SIZE(intercon),
+ .component_driver = {
+ .controls = aic3x_snd_controls,
+ .num_controls = ARRAY_SIZE(aic3x_snd_controls),
+ .dapm_widgets = aic3x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(aic3x_dapm_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
+ },
};
/*
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index f7a6ce7e5fb1..7bcf01efdf9a 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -90,7 +90,6 @@ static const char *dac33_supply_names[DAC33_NUM_SUPPLIES] = {
struct tlv320dac33_priv {
struct mutex mutex;
- struct workqueue_struct *dac33_wq;
struct work_struct work;
struct snd_soc_codec *codec;
struct regulator_bulk_data supplies[DAC33_NUM_SUPPLIES];
@@ -771,7 +770,7 @@ static irqreturn_t dac33_interrupt_handler(int irq, void *dev)
/* Do not schedule the workqueue in Mode7 */
if (dac33->fifo_mode != DAC33_FIFO_MODE7)
- queue_work(dac33->dac33_wq, &dac33->work);
+ schedule_work(&dac33->work);
return IRQ_HANDLED;
}
@@ -1127,7 +1126,7 @@ static int dac33_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (dac33->fifo_mode) {
dac33->state = DAC33_PREFILL;
- queue_work(dac33->dac33_wq, &dac33->work);
+ schedule_work(&dac33->work);
}
break;
case SNDRV_PCM_TRIGGER_STOP:
@@ -1135,7 +1134,7 @@ static int dac33_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (dac33->fifo_mode) {
dac33->state = DAC33_FLUSH;
- queue_work(dac33->dac33_wq, &dac33->work);
+ schedule_work(&dac33->work);
}
break;
default:
@@ -1410,14 +1409,6 @@ static int dac33_soc_probe(struct snd_soc_codec *codec)
dac33->irq = -1;
}
if (dac33->irq != -1) {
- /* Setup work queue */
- dac33->dac33_wq =
- create_singlethread_workqueue("tlv320dac33");
- if (dac33->dac33_wq == NULL) {
- free_irq(dac33->irq, codec);
- return -ENOMEM;
- }
-
INIT_WORK(&dac33->work, dac33_work);
}
}
@@ -1437,7 +1428,7 @@ static int dac33_soc_remove(struct snd_soc_codec *codec)
if (dac33->irq >= 0) {
free_irq(dac33->irq, dac33->codec);
- destroy_workqueue(dac33->dac33_wq);
+ flush_work(&dac33->work);
}
return 0;
}
@@ -1453,12 +1444,14 @@ static struct snd_soc_codec_driver soc_codec_dev_tlv320dac33 = {
.probe = dac33_soc_probe,
.remove = dac33_soc_remove,
- .controls = dac33_snd_controls,
- .num_controls = ARRAY_SIZE(dac33_snd_controls),
- .dapm_widgets = dac33_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(dac33_dapm_widgets),
- .dapm_routes = audio_map,
- .num_dapm_routes = ARRAY_SIZE(audio_map),
+ .component_driver = {
+ .controls = dac33_snd_controls,
+ .num_controls = ARRAY_SIZE(dac33_snd_controls),
+ .dapm_widgets = dac33_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(dac33_dapm_widgets),
+ .dapm_routes = audio_map,
+ .num_dapm_routes = ARRAY_SIZE(audio_map),
+ },
};
#define DAC33_RATES (SNDRV_PCM_RATE_44100 | \
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index f1ea052a822e..2e014c80d113 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -52,7 +52,7 @@ struct tpa6130a2_data {
static int tpa6130a2_power(struct tpa6130a2_data *data, bool enable)
{
- int ret;
+ int ret = 0, ret2;
if (enable) {
ret = regulator_enable(data->supply);
@@ -64,7 +64,30 @@ static int tpa6130a2_power(struct tpa6130a2_data *data, bool enable)
/* Power on */
if (data->power_gpio >= 0)
gpio_set_value(data->power_gpio, 1);
+
+ /* Sync registers */
+ regcache_cache_only(data->regmap, false);
+ ret = regcache_sync(data->regmap);
+ if (ret != 0) {
+ dev_err(data->dev,
+ "Failed to sync registers: %d\n", ret);
+ regcache_cache_only(data->regmap, true);
+ if (data->power_gpio >= 0)
+ gpio_set_value(data->power_gpio, 0);
+ ret2 = regulator_disable(data->supply);
+ if (ret2 != 0)
+ dev_err(data->dev,
+ "Failed to disable supply: %d\n", ret2);
+ return ret;
+ }
} else {
+ /* Powered off device does not retain registers. While device
+ * is off, any register updates (i.e. volume changes) should
+ * happen in cache only.
+ */
+ regcache_mark_dirty(data->regmap);
+ regcache_cache_only(data->regmap, true);
+
/* Power off */
if (data->power_gpio >= 0)
gpio_set_value(data->power_gpio, 0);
@@ -75,9 +98,6 @@ static int tpa6130a2_power(struct tpa6130a2_data *data, bool enable)
"Failed to disable supply: %d\n", ret);
return ret;
}
-
- /* device regs does not match the cache state anymore */
- regcache_mark_dirty(data->regmap);
}
return ret;
@@ -88,25 +108,14 @@ static int tpa6130a2_power_event(struct snd_soc_dapm_widget *w,
{
struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
struct tpa6130a2_data *data = snd_soc_component_get_drvdata(c);
- int ret;
- /* before widget power up */
if (SND_SOC_DAPM_EVENT_ON(event)) {
- /* Turn on the chip */
- tpa6130a2_power(data, true);
- /* Sync the registers */
- ret = regcache_sync(data->regmap);
- if (ret < 0) {
- dev_err(c->dev, "Failed to initialize chip\n");
- tpa6130a2_power(data, false);
- return ret;
- }
- /* after widget power down */
+ /* Before widget power up: turn chip on, sync registers */
+ return tpa6130a2_power(data, true);
} else {
- tpa6130a2_power(data, false);
+ /* After widget power down: turn chip off */
+ return tpa6130a2_power(data, false);
}
-
- return 0;
}
/*
@@ -190,7 +199,7 @@ static const struct snd_soc_dapm_route tpa6130a2_dapm_routes[] = {
{ "Right PGA", NULL, "Power" },
};
-struct snd_soc_component_driver tpa6130a2_component_driver = {
+static const struct snd_soc_component_driver tpa6130a2_component_driver = {
.name = "tpa6130a2",
.probe = tpa6130a2_component_probe,
.dapm_widgets = tpa6130a2_dapm_widgets,
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index a5a4e9f75c57..a2104d68169d 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -2199,12 +2199,14 @@ static struct snd_soc_codec_driver soc_codec_dev_twl4030 = {
.set_bias_level = twl4030_set_bias_level,
.idle_bias_off = true,
- .controls = twl4030_snd_controls,
- .num_controls = ARRAY_SIZE(twl4030_snd_controls),
- .dapm_widgets = twl4030_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(twl4030_dapm_widgets),
- .dapm_routes = intercon,
- .num_dapm_routes = ARRAY_SIZE(intercon),
+ .component_driver = {
+ .controls = twl4030_snd_controls,
+ .num_controls = ARRAY_SIZE(twl4030_snd_controls),
+ .dapm_widgets = twl4030_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(twl4030_dapm_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
+ },
};
static int twl4030_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 1f7081043566..748036e851ea 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -1156,12 +1156,14 @@ static struct snd_soc_codec_driver soc_codec_dev_twl6040 = {
.suspend_bias_off = true,
.ignore_pmdown_time = true,
- .controls = twl6040_snd_controls,
- .num_controls = ARRAY_SIZE(twl6040_snd_controls),
- .dapm_widgets = twl6040_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets),
- .dapm_routes = intercon,
- .num_dapm_routes = ARRAY_SIZE(intercon),
+ .component_driver = {
+ .controls = twl6040_snd_controls,
+ .num_controls = ARRAY_SIZE(twl6040_snd_controls),
+ .dapm_widgets = twl6040_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
+ },
};
static int twl6040_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index e4c694c758b8..5fdee874406d 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -523,10 +523,12 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
.set_bias_level = uda134x_set_bias_level,
.suspend_bias_off = true,
- .dapm_widgets = uda134x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(uda134x_dapm_widgets),
- .dapm_routes = uda134x_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(uda134x_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = uda134x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(uda134x_dapm_widgets),
+ .dapm_routes = uda134x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(uda134x_dapm_routes),
+ },
};
static const struct regmap_config uda134x_regmap_config = {
@@ -544,6 +546,7 @@ static int uda134x_codec_probe(struct platform_device *pdev)
{
struct uda134x_platform_data *pd = pdev->dev.platform_data;
struct uda134x_priv *uda134x;
+ int ret;
if (!pd) {
dev_err(&pdev->dev, "Missing L3 bitbang function\n");
@@ -557,6 +560,12 @@ static int uda134x_codec_probe(struct platform_device *pdev)
uda134x->pd = pd;
platform_set_drvdata(pdev, uda134x);
+ if (pd->l3.use_gpios) {
+ ret = l3_set_gpio_ops(&pdev->dev, &uda134x->pd->l3);
+ if (ret < 0)
+ return ret;
+ }
+
uda134x->regmap = devm_regmap_init(&pdev->dev, NULL, pd,
&uda134x_regmap_config);
if (IS_ERR(uda134x->regmap))
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 35f0469ebb16..533e3bb444e4 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -765,12 +765,14 @@ static struct snd_soc_codec_driver soc_codec_dev_uda1380 = {
.reg_cache_default = uda1380_reg,
.reg_cache_step = 1,
- .controls = uda1380_snd_controls,
- .num_controls = ARRAY_SIZE(uda1380_snd_controls),
- .dapm_widgets = uda1380_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(uda1380_dapm_widgets),
- .dapm_routes = uda1380_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(uda1380_dapm_routes),
+ .component_driver = {
+ .controls = uda1380_snd_controls,
+ .num_controls = ARRAY_SIZE(uda1380_snd_controls),
+ .dapm_widgets = uda1380_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(uda1380_dapm_widgets),
+ .dapm_routes = uda1380_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(uda1380_dapm_routes),
+ },
};
#if IS_ENABLED(CONFIG_I2C)
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c
index 1b79778098d2..fcffb6e707d9 100644
--- a/sound/soc/codecs/wl1273.c
+++ b/sound/soc/codecs/wl1273.c
@@ -484,12 +484,14 @@ static struct snd_soc_codec_driver soc_codec_dev_wl1273 = {
.probe = wl1273_probe,
.remove = wl1273_remove,
- .controls = wl1273_controls,
- .num_controls = ARRAY_SIZE(wl1273_controls),
- .dapm_widgets = wl1273_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wl1273_dapm_widgets),
- .dapm_routes = wl1273_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wl1273_dapm_routes),
+ .component_driver = {
+ .controls = wl1273_controls,
+ .num_controls = ARRAY_SIZE(wl1273_controls),
+ .dapm_widgets = wl1273_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wl1273_dapm_widgets),
+ .dapm_routes = wl1273_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wl1273_dapm_routes),
+ },
};
static int wl1273_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index e3c34bdc2772..0eb5dcf4c29d 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -789,16 +789,18 @@ static int wm0010_set_sysclk(struct snd_soc_codec *codec, int source,
static int wm0010_probe(struct snd_soc_codec *codec);
-static struct snd_soc_codec_driver soc_codec_dev_wm0010 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm0010 = {
.probe = wm0010_probe,
.set_bias_level = wm0010_set_bias_level,
.set_sysclk = wm0010_set_sysclk,
.idle_bias_off = true,
- .dapm_widgets = wm0010_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm0010_dapm_widgets),
- .dapm_routes = wm0010_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm0010_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = wm0010_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm0010_dapm_widgets),
+ .dapm_routes = wm0010_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm0010_dapm_routes),
+ },
};
#define WM0010_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
diff --git a/sound/soc/codecs/wm1250-ev1.c b/sound/soc/codecs/wm1250-ev1.c
index ec45c5b220b1..cf5f0580df6a 100644
--- a/sound/soc/codecs/wm1250-ev1.c
+++ b/sound/soc/codecs/wm1250-ev1.c
@@ -141,12 +141,13 @@ static struct snd_soc_dai_driver wm1250_ev1_dai = {
.ops = &wm1250_ev1_ops,
};
-static struct snd_soc_codec_driver soc_codec_dev_wm1250_ev1 = {
- .dapm_widgets = wm1250_ev1_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm1250_ev1_dapm_widgets),
- .dapm_routes = wm1250_ev1_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm1250_ev1_dapm_routes),
-
+static const struct snd_soc_codec_driver soc_codec_dev_wm1250_ev1 = {
+ .component_driver = {
+ .dapm_widgets = wm1250_ev1_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm1250_ev1_dapm_widgets),
+ .dapm_routes = wm1250_ev1_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm1250_ev1_dapm_routes),
+ },
.set_bias_level = wm1250_ev1_set_bias_level,
.idle_bias_off = true,
};
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index f2664396be6f..23cde3a0dc11 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -799,18 +799,20 @@ static int wm2000_remove(struct snd_soc_codec *codec)
return wm2000_anc_transition(wm2000, ANC_OFF);
}
-static struct snd_soc_codec_driver soc_codec_dev_wm2000 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm2000 = {
.probe = wm2000_probe,
.remove = wm2000_remove,
.suspend = wm2000_suspend,
.resume = wm2000_resume,
- .dapm_widgets = wm2000_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm2000_dapm_widgets),
- .dapm_routes = wm2000_audio_map,
- .num_dapm_routes = ARRAY_SIZE(wm2000_audio_map),
- .controls = wm2000_controls,
- .num_controls = ARRAY_SIZE(wm2000_controls),
+ .component_driver = {
+ .controls = wm2000_controls,
+ .num_controls = ARRAY_SIZE(wm2000_controls),
+ .dapm_widgets = wm2000_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm2000_dapm_widgets),
+ .dapm_routes = wm2000_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(wm2000_audio_map),
+ },
};
static int wm2000_i2c_probe(struct i2c_client *i2c,
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index fd1439ecb50a..606bf88abfc4 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -2103,7 +2103,7 @@ static struct snd_soc_dai_driver wm2200_dai = {
.ops = &wm2200_dai_ops,
};
-static struct snd_soc_codec_driver soc_codec_wm2200 = {
+static const struct snd_soc_codec_driver soc_codec_wm2200 = {
.probe = wm2200_probe,
.idle_bias_off = true,
@@ -2111,12 +2111,14 @@ static struct snd_soc_codec_driver soc_codec_wm2200 = {
.set_sysclk = wm2200_set_sysclk,
.set_pll = wm2200_set_fll,
- .controls = wm2200_snd_controls,
- .num_controls = ARRAY_SIZE(wm2200_snd_controls),
- .dapm_widgets = wm2200_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm2200_dapm_widgets),
- .dapm_routes = wm2200_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm2200_dapm_routes),
+ .component_driver = {
+ .controls = wm2200_snd_controls,
+ .num_controls = ARRAY_SIZE(wm2200_snd_controls),
+ .dapm_widgets = wm2200_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm2200_dapm_widgets),
+ .dapm_routes = wm2200_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm2200_dapm_routes),
+ },
};
static irqreturn_t wm2200_irq(int irq, void *data)
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 512a9d25fe6f..560575000cc5 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -2285,7 +2285,7 @@ static int wm5100_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
(1 << WM5100_GP1_DIR_SHIFT));
}
-static struct gpio_chip wm5100_template_chip = {
+static const struct gpio_chip wm5100_template_chip = {
.label = "wm5100",
.owner = THIS_MODULE,
.direction_output = wm5100_gpio_direction_out,
@@ -2381,7 +2381,7 @@ static int wm5100_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm5100 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm5100 = {
.probe = wm5100_probe,
.remove = wm5100_remove,
@@ -2390,12 +2390,14 @@ static struct snd_soc_codec_driver soc_codec_dev_wm5100 = {
.idle_bias_off = 1,
.seq_notifier = wm5100_seq_notifier,
- .controls = wm5100_snd_controls,
- .num_controls = ARRAY_SIZE(wm5100_snd_controls),
- .dapm_widgets = wm5100_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm5100_dapm_widgets),
- .dapm_routes = wm5100_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm5100_dapm_routes),
+ .component_driver = {
+ .controls = wm5100_snd_controls,
+ .num_controls = ARRAY_SIZE(wm5100_snd_controls),
+ .dapm_widgets = wm5100_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm5100_dapm_widgets),
+ .dapm_routes = wm5100_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm5100_dapm_routes),
+ },
};
static const struct regmap_config wm5100_regmap = {
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 846deed6af41..93876c6d48ee 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1521,6 +1521,16 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
{ "IN3L", NULL, "SYSCLK" },
{ "IN3R", NULL, "SYSCLK" },
+ { "ASRC1L", NULL, "SYSCLK" },
+ { "ASRC1R", NULL, "SYSCLK" },
+ { "ASRC2L", NULL, "SYSCLK" },
+ { "ASRC2R", NULL, "SYSCLK" },
+
+ { "ASRC1L", NULL, "ASYNCCLK" },
+ { "ASRC1R", NULL, "ASYNCCLK" },
+ { "ASRC2L", NULL, "ASYNCCLK" },
+ { "ASRC2R", NULL, "ASYNCCLK" },
+
{ "MICBIAS1", NULL, "MICVDD" },
{ "MICBIAS2", NULL, "MICVDD" },
{ "MICBIAS3", NULL, "MICVDD" },
@@ -1600,7 +1610,6 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
{ "Slim3 Capture", NULL, "SYSCLK" },
{ "Audio Trace DSP", NULL, "DSP1" },
- { "Audio Trace DSP", NULL, "SYSCLK" },
{ "IN1L PGA", NULL, "IN1L" },
{ "IN1R PGA", NULL, "IN1R" },
@@ -1990,7 +1999,7 @@ static struct regmap *wm5102_get_regmap(struct device *dev)
return priv->core.arizona->regmap;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm5102 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm5102 = {
.probe = wm5102_codec_probe,
.remove = wm5102_codec_remove,
.get_regmap = wm5102_get_regmap,
@@ -2000,12 +2009,14 @@ static struct snd_soc_codec_driver soc_codec_dev_wm5102 = {
.set_sysclk = arizona_set_sysclk,
.set_pll = wm5102_set_fll,
- .controls = wm5102_snd_controls,
- .num_controls = ARRAY_SIZE(wm5102_snd_controls),
- .dapm_widgets = wm5102_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm5102_dapm_widgets),
- .dapm_routes = wm5102_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm5102_dapm_routes),
+ .component_driver = {
+ .controls = wm5102_snd_controls,
+ .num_controls = ARRAY_SIZE(wm5102_snd_controls),
+ .dapm_widgets = wm5102_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm5102_dapm_widgets),
+ .dapm_routes = wm5102_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm5102_dapm_routes),
+ },
};
static struct snd_compr_ops wm5102_compr_ops = {
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 156547026a40..06bae3b23fce 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -1745,6 +1745,16 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
{ "IN4L", NULL, "SYSCLK" },
{ "IN4R", NULL, "SYSCLK" },
+ { "ASRC1L", NULL, "SYSCLK" },
+ { "ASRC1R", NULL, "SYSCLK" },
+ { "ASRC2L", NULL, "SYSCLK" },
+ { "ASRC2R", NULL, "SYSCLK" },
+
+ { "ASRC1L", NULL, "ASYNCCLK" },
+ { "ASRC1R", NULL, "ASYNCCLK" },
+ { "ASRC2L", NULL, "ASYNCCLK" },
+ { "ASRC2R", NULL, "ASYNCCLK" },
+
{ "MICBIAS1", NULL, "MICVDD" },
{ "MICBIAS2", NULL, "MICVDD" },
{ "MICBIAS3", NULL, "MICVDD" },
@@ -1832,10 +1842,8 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
{ "Slim3 Capture", NULL, "SYSCLK" },
{ "Voice Control DSP", NULL, "DSP3" },
- { "Voice Control DSP", NULL, "SYSCLK" },
{ "Audio Trace DSP", NULL, "DSP1" },
- { "Audio Trace DSP", NULL, "SYSCLK" },
{ "IN1L PGA", NULL, "IN1L" },
{ "IN1R PGA", NULL, "IN1R" },
@@ -2347,7 +2355,7 @@ static struct regmap *wm5110_get_regmap(struct device *dev)
return priv->core.arizona->regmap;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm5110 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm5110 = {
.probe = wm5110_codec_probe,
.remove = wm5110_codec_remove,
.get_regmap = wm5110_get_regmap,
@@ -2357,12 +2365,14 @@ static struct snd_soc_codec_driver soc_codec_dev_wm5110 = {
.set_sysclk = arizona_set_sysclk,
.set_pll = wm5110_set_fll,
- .controls = wm5110_snd_controls,
- .num_controls = ARRAY_SIZE(wm5110_snd_controls),
- .dapm_widgets = wm5110_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm5110_dapm_widgets),
- .dapm_routes = wm5110_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm5110_dapm_routes),
+ .component_driver = {
+ .controls = wm5110_snd_controls,
+ .num_controls = ARRAY_SIZE(wm5110_snd_controls),
+ .dapm_widgets = wm5110_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm5110_dapm_widgets),
+ .dapm_routes = wm5110_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm5110_dapm_routes),
+ },
};
static struct snd_compr_ops wm5110_compr_ops = {
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index ffbf3df8ae97..2efc5b41ad0f 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1587,19 +1587,21 @@ static struct regmap *wm8350_get_regmap(struct device *dev)
return wm8350->regmap;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8350 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8350 = {
.probe = wm8350_codec_probe,
.remove = wm8350_codec_remove,
.get_regmap = wm8350_get_regmap,
.set_bias_level = wm8350_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8350_snd_controls,
- .num_controls = ARRAY_SIZE(wm8350_snd_controls),
- .dapm_widgets = wm8350_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8350_dapm_widgets),
- .dapm_routes = wm8350_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8350_dapm_routes),
+ .component_driver = {
+ .controls = wm8350_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8350_snd_controls),
+ .dapm_widgets = wm8350_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8350_dapm_widgets),
+ .dapm_routes = wm8350_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8350_dapm_routes),
+ },
};
static int wm8350_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index b1d346aa4696..6c59fb933bd6 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -1332,19 +1332,21 @@ static struct regmap *wm8400_get_regmap(struct device *dev)
return wm8400->regmap;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8400 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8400 = {
.probe = wm8400_codec_probe,
.remove = wm8400_codec_remove,
.get_regmap = wm8400_get_regmap,
.set_bias_level = wm8400_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8400_snd_controls,
- .num_controls = ARRAY_SIZE(wm8400_snd_controls),
- .dapm_widgets = wm8400_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8400_dapm_widgets),
- .dapm_routes = wm8400_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8400_dapm_routes),
+ .component_driver = {
+ .controls = wm8400_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8400_snd_controls),
+ .dapm_widgets = wm8400_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8400_dapm_widgets),
+ .dapm_routes = wm8400_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8400_dapm_routes),
+ },
};
static int wm8400_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index 99e40e629cca..119ceac684ae 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -581,17 +581,19 @@ static int wm8510_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8510 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8510 = {
.probe = wm8510_probe,
.set_bias_level = wm8510_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8510_snd_controls,
- .num_controls = ARRAY_SIZE(wm8510_snd_controls),
- .dapm_widgets = wm8510_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8510_dapm_widgets),
- .dapm_routes = wm8510_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8510_dapm_routes),
+ .component_driver = {
+ .controls = wm8510_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8510_snd_controls),
+ .dapm_widgets = wm8510_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8510_dapm_widgets),
+ .dapm_routes = wm8510_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8510_dapm_routes),
+ },
};
static const struct of_device_id wm8510_of_match[] = {
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index aa287a3965e7..deb2e075428e 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -413,17 +413,19 @@ static int wm8523_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8523 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8523 = {
.probe = wm8523_probe,
.set_bias_level = wm8523_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8523_controls,
- .num_controls = ARRAY_SIZE(wm8523_controls),
- .dapm_widgets = wm8523_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8523_dapm_widgets),
- .dapm_routes = wm8523_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8523_dapm_routes),
+ .component_driver = {
+ .controls = wm8523_controls,
+ .num_controls = ARRAY_SIZE(wm8523_controls),
+ .dapm_widgets = wm8523_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8523_dapm_widgets),
+ .dapm_routes = wm8523_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8523_dapm_routes),
+ },
};
static const struct of_device_id wm8523_of_match[] = {
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 66602bf02f6e..faa7287a5253 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -899,17 +899,19 @@ static int wm8580_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8580 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8580 = {
.probe = wm8580_probe,
.remove = wm8580_remove,
.set_bias_level = wm8580_set_bias_level,
- .controls = wm8580_snd_controls,
- .num_controls = ARRAY_SIZE(wm8580_snd_controls),
- .dapm_widgets = wm8580_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8580_dapm_widgets),
- .dapm_routes = wm8580_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8580_dapm_routes),
+ .component_driver = {
+ .controls = wm8580_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8580_snd_controls),
+ .dapm_widgets = wm8580_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8580_dapm_widgets),
+ .dapm_routes = wm8580_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8580_dapm_routes),
+ },
};
static const struct of_device_id wm8580_of_match[] = {
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index c759ec068e97..2b376c9c99af 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -367,17 +367,19 @@ static int wm8711_probe(struct snd_soc_codec *codec)
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8711 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8711 = {
.probe = wm8711_probe,
.set_bias_level = wm8711_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8711_snd_controls,
- .num_controls = ARRAY_SIZE(wm8711_snd_controls),
- .dapm_widgets = wm8711_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8711_dapm_widgets),
- .dapm_routes = wm8711_intercon,
- .num_dapm_routes = ARRAY_SIZE(wm8711_intercon),
+ .component_driver = {
+ .controls = wm8711_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8711_snd_controls),
+ .dapm_widgets = wm8711_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8711_dapm_widgets),
+ .dapm_routes = wm8711_intercon,
+ .num_dapm_routes = ARRAY_SIZE(wm8711_intercon),
+ },
};
static const struct of_device_id wm8711_of_match[] = {
diff --git a/sound/soc/codecs/wm8727.c b/sound/soc/codecs/wm8727.c
index bb25a75f92a2..7fde077a014b 100644
--- a/sound/soc/codecs/wm8727.c
+++ b/sound/soc/codecs/wm8727.c
@@ -53,11 +53,13 @@ static struct snd_soc_dai_driver wm8727_dai = {
},
};
-static struct snd_soc_codec_driver soc_codec_dev_wm8727 = {
- .dapm_widgets = wm8727_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8727_dapm_widgets),
- .dapm_routes = wm8727_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8727_dapm_routes),
+static const struct snd_soc_codec_driver soc_codec_dev_wm8727 = {
+ .component_driver = {
+ .dapm_widgets = wm8727_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8727_dapm_widgets),
+ .dapm_routes = wm8727_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8727_dapm_routes),
+ },
};
static int wm8727_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index 1564e6926527..797cc6e7c70f 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -211,16 +211,18 @@ static struct snd_soc_dai_driver wm8728_dai = {
.ops = &wm8728_dai_ops,
};
-static struct snd_soc_codec_driver soc_codec_dev_wm8728 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8728 = {
.set_bias_level = wm8728_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8728_snd_controls,
- .num_controls = ARRAY_SIZE(wm8728_snd_controls),
- .dapm_widgets = wm8728_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8728_dapm_widgets),
- .dapm_routes = wm8728_intercon,
- .num_dapm_routes = ARRAY_SIZE(wm8728_intercon),
+ .component_driver = {
+ .controls = wm8728_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8728_snd_controls),
+ .dapm_widgets = wm8728_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8728_dapm_widgets),
+ .dapm_routes = wm8728_intercon,
+ .num_dapm_routes = ARRAY_SIZE(wm8728_intercon),
+ },
};
static const struct of_device_id wm8728_of_match[] = {
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index d18261a44256..4f9a1eb28120 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -628,16 +628,18 @@ err_regulator_enable:
return ret;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8731 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8731 = {
.set_bias_level = wm8731_set_bias_level,
.suspend_bias_off = true,
- .dapm_widgets = wm8731_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8731_dapm_widgets),
- .dapm_routes = wm8731_intercon,
- .num_dapm_routes = ARRAY_SIZE(wm8731_intercon),
- .controls = wm8731_snd_controls,
- .num_controls = ARRAY_SIZE(wm8731_snd_controls),
+ .component_driver = {
+ .controls = wm8731_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8731_snd_controls),
+ .dapm_widgets = wm8731_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8731_dapm_widgets),
+ .dapm_routes = wm8731_intercon,
+ .num_dapm_routes = ARRAY_SIZE(wm8731_intercon),
+ },
};
static const struct of_device_id wm8731_of_match[] = {
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
index e7807601e675..f0cb1c4afe3c 100644
--- a/sound/soc/codecs/wm8737.c
+++ b/sound/soc/codecs/wm8737.c
@@ -573,17 +573,19 @@ err_get:
return ret;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8737 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8737 = {
.probe = wm8737_probe,
.set_bias_level = wm8737_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8737_snd_controls,
- .num_controls = ARRAY_SIZE(wm8737_snd_controls),
- .dapm_widgets = wm8737_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8737_dapm_widgets),
- .dapm_routes = intercon,
- .num_dapm_routes = ARRAY_SIZE(intercon),
+ .component_driver = {
+ .controls = wm8737_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8737_snd_controls),
+ .dapm_widgets = wm8737_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8737_dapm_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
+ },
};
static const struct of_device_id wm8737_of_match[] = {
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index 36ef91fe0511..565d477cd790 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -497,15 +497,17 @@ static int wm8741_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8741 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8741 = {
.probe = wm8741_probe,
.remove = wm8741_remove,
.resume = wm8741_resume,
- .dapm_widgets = wm8741_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8741_dapm_widgets),
- .dapm_routes = wm8741_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8741_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = wm8741_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8741_dapm_widgets),
+ .dapm_routes = wm8741_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8741_dapm_routes),
+ },
};
static const struct of_device_id wm8741_of_match[] = {
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index bd9dcd2161bc..0da2bbaf06d1 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -708,17 +708,19 @@ static int wm8750_probe(struct snd_soc_codec *codec)
return ret;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8750 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8750 = {
.probe = wm8750_probe,
.set_bias_level = wm8750_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8750_snd_controls,
- .num_controls = ARRAY_SIZE(wm8750_snd_controls),
- .dapm_widgets = wm8750_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
- .dapm_routes = wm8750_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8750_dapm_routes),
+ .component_driver = {
+ .controls = wm8750_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8750_snd_controls),
+ .dapm_widgets = wm8750_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
+ .dapm_routes = wm8750_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8750_dapm_routes),
+ },
};
static const struct of_device_id wm8750_of_match[] = {
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index cdcc91282e8a..9bdf5447f6f6 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1478,18 +1478,20 @@ static int wm8753_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8753 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8753 = {
.probe = wm8753_probe,
.resume = wm8753_resume,
.set_bias_level = wm8753_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8753_snd_controls,
- .num_controls = ARRAY_SIZE(wm8753_snd_controls),
- .dapm_widgets = wm8753_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8753_dapm_widgets),
- .dapm_routes = wm8753_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8753_dapm_routes),
+ .component_driver = {
+ .controls = wm8753_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8753_snd_controls),
+ .dapm_widgets = wm8753_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8753_dapm_widgets),
+ .dapm_routes = wm8753_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8753_dapm_routes),
+ },
};
static const struct of_device_id wm8753_of_match[] = {
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
index df6178464b00..d6edcbbdec12 100644
--- a/sound/soc/codecs/wm8770.c
+++ b/sound/soc/codecs/wm8770.c
@@ -608,17 +608,19 @@ err_reg_enable:
return ret;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8770 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8770 = {
.probe = wm8770_probe,
.set_bias_level = wm8770_set_bias_level,
.idle_bias_off = true,
- .controls = wm8770_snd_controls,
- .num_controls = ARRAY_SIZE(wm8770_snd_controls),
- .dapm_widgets = wm8770_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8770_dapm_widgets),
- .dapm_routes = wm8770_intercon,
- .num_dapm_routes = ARRAY_SIZE(wm8770_intercon),
+ .component_driver = {
+ .controls = wm8770_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8770_snd_controls),
+ .dapm_widgets = wm8770_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8770_dapm_widgets),
+ .dapm_routes = wm8770_intercon,
+ .num_dapm_routes = ARRAY_SIZE(wm8770_intercon),
+ },
};
static const struct of_device_id wm8770_of_match[] = {
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index 5af44f9a8cf2..ae30480b3976 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -425,17 +425,19 @@ static int wm8776_probe(struct snd_soc_codec *codec)
return ret;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8776 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8776 = {
.probe = wm8776_probe,
.set_bias_level = wm8776_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8776_snd_controls,
- .num_controls = ARRAY_SIZE(wm8776_snd_controls),
- .dapm_widgets = wm8776_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8776_dapm_widgets),
- .dapm_routes = routes,
- .num_dapm_routes = ARRAY_SIZE(routes),
+ .component_driver = {
+ .controls = wm8776_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8776_snd_controls),
+ .dapm_widgets = wm8776_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8776_dapm_widgets),
+ .dapm_routes = routes,
+ .num_dapm_routes = ARRAY_SIZE(routes),
+ },
};
static const struct of_device_id wm8776_of_match[] = {
diff --git a/sound/soc/codecs/wm8782.c b/sound/soc/codecs/wm8782.c
index fb55fd845d27..bcda21018505 100644
--- a/sound/soc/codecs/wm8782.c
+++ b/sound/soc/codecs/wm8782.c
@@ -50,11 +50,13 @@ static struct snd_soc_dai_driver wm8782_dai = {
},
};
-static struct snd_soc_codec_driver soc_codec_dev_wm8782 = {
- .dapm_widgets = wm8782_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8782_dapm_widgets),
- .dapm_routes = wm8782_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8782_dapm_routes),
+static const struct snd_soc_codec_driver soc_codec_dev_wm8782 = {
+ .component_driver = {
+ .dapm_widgets = wm8782_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8782_dapm_widgets),
+ .dapm_routes = wm8782_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8782_dapm_routes),
+ },
};
static int wm8782_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 8d914702cae4..af95d648265b 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -545,10 +545,12 @@ static struct snd_soc_dai_driver wm8804_dai = {
static const struct snd_soc_codec_driver soc_codec_dev_wm8804 = {
.idle_bias_off = true,
- .dapm_widgets = wm8804_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8804_dapm_widgets),
- .dapm_routes = wm8804_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8804_dapm_routes),
+ .component_driver = {
+ .dapm_widgets = wm8804_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8804_dapm_widgets),
+ .dapm_routes = wm8804_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8804_dapm_routes),
+ },
};
const struct regmap_config wm8804_regmap_config = {
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index 5d8dca88d612..c77b49a29311 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -1208,18 +1208,20 @@ static int wm8900_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8900 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8900 = {
.probe = wm8900_probe,
.suspend = wm8900_suspend,
.resume = wm8900_resume,
.set_bias_level = wm8900_set_bias_level,
- .controls = wm8900_snd_controls,
- .num_controls = ARRAY_SIZE(wm8900_snd_controls),
- .dapm_widgets = wm8900_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8900_dapm_widgets),
- .dapm_routes = wm8900_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8900_dapm_routes),
+ .component_driver = {
+ .controls = wm8900_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8900_snd_controls),
+ .dapm_widgets = wm8900_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8900_dapm_widgets),
+ .dapm_routes = wm8900_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8900_dapm_routes),
+ },
};
static const struct regmap_config wm8900_regmap = {
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index a26ca490cf31..6e887c2c42b1 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1830,7 +1830,7 @@ static void wm8903_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
!!value << WM8903_GP1_LVL_SHIFT);
}
-static struct gpio_chip wm8903_template_chip = {
+static const struct gpio_chip wm8903_template_chip = {
.label = "wm8903",
.owner = THIS_MODULE,
.request = wm8903_gpio_request,
@@ -1874,18 +1874,20 @@ static void wm8903_free_gpio(struct wm8903_priv *wm8903)
}
#endif
-static struct snd_soc_codec_driver soc_codec_dev_wm8903 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8903 = {
.resume = wm8903_resume,
.set_bias_level = wm8903_set_bias_level,
.seq_notifier = wm8903_seq_notifier,
.suspend_bias_off = true,
- .controls = wm8903_snd_controls,
- .num_controls = ARRAY_SIZE(wm8903_snd_controls),
- .dapm_widgets = wm8903_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8903_dapm_widgets),
- .dapm_routes = wm8903_intercon,
- .num_dapm_routes = ARRAY_SIZE(wm8903_intercon),
+ .component_driver = {
+ .controls = wm8903_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8903_snd_controls),
+ .dapm_widgets = wm8903_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8903_dapm_widgets),
+ .dapm_routes = wm8903_intercon,
+ .num_dapm_routes = ARRAY_SIZE(wm8903_intercon),
+ },
};
static const struct regmap_config wm8903_regmap = {
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index edd7a7709194..4fd350e8420d 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -2086,7 +2086,7 @@ static int wm8904_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8904 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8904 = {
.probe = wm8904_probe,
.remove = wm8904_remove,
.set_bias_level = wm8904_set_bias_level,
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index 1c600819f768..b5935625feeb 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -723,17 +723,19 @@ static int wm8940_probe(struct snd_soc_codec *codec)
return ret;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8940 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8940 = {
.probe = wm8940_probe,
.set_bias_level = wm8940_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8940_snd_controls,
- .num_controls = ARRAY_SIZE(wm8940_snd_controls),
- .dapm_widgets = wm8940_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8940_dapm_widgets),
- .dapm_routes = wm8940_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8940_dapm_routes),
+ .component_driver = {
+ .controls = wm8940_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8940_snd_controls),
+ .dapm_widgets = wm8940_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8940_dapm_widgets),
+ .dapm_routes = wm8940_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8940_dapm_routes),
+ },
};
static const struct regmap_config wm8940_regmap = {
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 9db00d53abe7..1edc7b1df31d 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -940,17 +940,19 @@ err_enable:
return ret;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8955 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8955 = {
.probe = wm8955_probe,
.set_bias_level = wm8955_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8955_snd_controls,
- .num_controls = ARRAY_SIZE(wm8955_snd_controls),
- .dapm_widgets = wm8955_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8955_dapm_widgets),
- .dapm_routes = wm8955_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8955_dapm_routes),
+ .component_driver = {
+ .controls = wm8955_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8955_snd_controls),
+ .dapm_widgets = wm8955_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8955_dapm_widgets),
+ .dapm_routes = wm8955_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8955_dapm_routes),
+ },
};
static const struct regmap_config wm8955_regmap = {
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index d7f444f87460..3bf081a7e450 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -226,11 +226,10 @@ static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
static const DECLARE_TLV_DB_SCALE(bypass_tlv, -2100, 300, 0);
static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
static const DECLARE_TLV_DB_SCALE(lineinboost_tlv, -1500, 300, 1);
-static const unsigned int micboost_tlv[] = {
- TLV_DB_RANGE_HEAD(2),
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(micboost_tlv,
0, 1, TLV_DB_SCALE_ITEM(0, 1300, 0),
2, 3, TLV_DB_SCALE_ITEM(2000, 900, 0),
-};
+);
static const struct snd_kcontrol_new wm8960_snd_controls[] = {
SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL,
@@ -1264,7 +1263,7 @@ static int wm8960_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8960 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8960 = {
.probe = wm8960_probe,
.set_bias_level = wm8960_set_bias_level,
.suspend_bias_off = true,
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index e30446a04740..e23ceac76015 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -882,18 +882,20 @@ static int wm8961_resume(struct snd_soc_codec *codec)
#define wm8961_resume NULL
#endif
-static struct snd_soc_codec_driver soc_codec_dev_wm8961 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8961 = {
.probe = wm8961_probe,
.resume = wm8961_resume,
.set_bias_level = wm8961_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8961_snd_controls,
- .num_controls = ARRAY_SIZE(wm8961_snd_controls),
- .dapm_widgets = wm8961_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8961_dapm_widgets),
- .dapm_routes = audio_paths,
- .num_dapm_routes = ARRAY_SIZE(audio_paths),
+ .component_driver = {
+ .controls = wm8961_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8961_snd_controls),
+ .dapm_widgets = wm8961_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8961_dapm_widgets),
+ .dapm_routes = audio_paths,
+ .num_dapm_routes = ARRAY_SIZE(audio_paths),
+ },
};
static const struct regmap_config wm8961_regmap = {
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index f3109da24769..fd2731d171dd 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3357,7 +3357,7 @@ static int wm8962_gpio_direction_out(struct gpio_chip *chip,
return 0;
}
-static struct gpio_chip wm8962_template_chip = {
+static const struct gpio_chip wm8962_template_chip = {
.label = "wm8962",
.owner = THIS_MODULE,
.request = wm8962_gpio_request,
@@ -3479,7 +3479,7 @@ static int wm8962_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8962 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8962 = {
.probe = wm8962_probe,
.remove = wm8962_remove,
.set_bias_level = wm8962_set_bias_level,
@@ -3713,7 +3713,7 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
ARRAY_SIZE(wm8962_dc_measure));
if (ret != 0)
dev_err(&i2c->dev,
- "Failed to configure for DC mesurement: %d\n",
+ "Failed to configure for DC measurement: %d\n",
ret);
}
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index 2cdde32c43c6..887d31cf3945 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -649,17 +649,19 @@ static int wm8971_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8971 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8971 = {
.probe = wm8971_probe,
.set_bias_level = wm8971_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8971_snd_controls,
- .num_controls = ARRAY_SIZE(wm8971_snd_controls),
- .dapm_widgets = wm8971_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8971_dapm_widgets),
- .dapm_routes = wm8971_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8971_dapm_routes),
+ .component_driver = {
+ .controls = wm8971_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8971_snd_controls),
+ .dapm_widgets = wm8971_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8971_dapm_widgets),
+ .dapm_routes = wm8971_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8971_dapm_routes),
+ },
};
static const struct regmap_config wm8971_regmap = {
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index dc8c3b1ebb6f..d414ddd6e197 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -676,17 +676,19 @@ static int wm8974_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8974 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8974 = {
.probe = wm8974_probe,
.set_bias_level = wm8974_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8974_snd_controls,
- .num_controls = ARRAY_SIZE(wm8974_snd_controls),
- .dapm_widgets = wm8974_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8974_dapm_widgets),
- .dapm_routes = wm8974_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8974_dapm_routes),
+ .component_driver = {
+ .controls = wm8974_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8974_snd_controls),
+ .dapm_widgets = wm8974_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8974_dapm_widgets),
+ .dapm_routes = wm8974_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8974_dapm_routes),
+ },
};
static int wm8974_i2c_probe(struct i2c_client *i2c,
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index d36d6001fbb7..90b2d418ef60 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -993,18 +993,20 @@ static int wm8978_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8978 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8978 = {
.probe = wm8978_probe,
.suspend = wm8978_suspend,
.resume = wm8978_resume,
.set_bias_level = wm8978_set_bias_level,
- .controls = wm8978_snd_controls,
- .num_controls = ARRAY_SIZE(wm8978_snd_controls),
- .dapm_widgets = wm8978_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8978_dapm_widgets),
- .dapm_routes = wm8978_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8978_dapm_routes),
+ .component_driver = {
+ .controls = wm8978_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8978_snd_controls),
+ .dapm_widgets = wm8978_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8978_dapm_widgets),
+ .dapm_routes = wm8978_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8978_dapm_routes),
+ },
};
static const struct regmap_config wm8978_regmap_config = {
diff --git a/sound/soc/codecs/wm8983.c b/sound/soc/codecs/wm8983.c
index 0c002a5712cb..bfdbe72ee687 100644
--- a/sound/soc/codecs/wm8983.c
+++ b/sound/soc/codecs/wm8983.c
@@ -976,16 +976,18 @@ static struct snd_soc_dai_driver wm8983_dai = {
.symmetric_rates = 1
};
-static struct snd_soc_codec_driver soc_codec_dev_wm8983 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8983 = {
.probe = wm8983_probe,
.set_bias_level = wm8983_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8983_snd_controls,
- .num_controls = ARRAY_SIZE(wm8983_snd_controls),
- .dapm_widgets = wm8983_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8983_dapm_widgets),
- .dapm_routes = wm8983_audio_map,
- .num_dapm_routes = ARRAY_SIZE(wm8983_audio_map),
+ .component_driver = {
+ .controls = wm8983_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8983_snd_controls),
+ .dapm_widgets = wm8983_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8983_dapm_widgets),
+ .dapm_routes = wm8983_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(wm8983_audio_map),
+ },
};
static const struct regmap_config wm8983_regmap = {
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index 7347abff4b2c..05344f974ff3 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -1105,17 +1105,19 @@ static struct snd_soc_dai_driver wm8985_dai = {
.symmetric_rates = 1
};
-static struct snd_soc_codec_driver soc_codec_dev_wm8985 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8985 = {
.probe = wm8985_probe,
.set_bias_level = wm8985_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8985_common_snd_controls,
- .num_controls = ARRAY_SIZE(wm8985_common_snd_controls),
- .dapm_widgets = wm8985_common_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8985_common_dapm_widgets),
- .dapm_routes = wm8985_common_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8985_common_dapm_routes),
+ .component_driver = {
+ .controls = wm8985_common_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8985_common_snd_controls),
+ .dapm_widgets = wm8985_common_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8985_common_dapm_widgets),
+ .dapm_routes = wm8985_common_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8985_common_dapm_routes),
+ },
};
static const struct regmap_config wm8985_regmap = {
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index 895721a256f0..b0d0219532f2 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -817,12 +817,14 @@ static const struct snd_soc_codec_driver soc_codec_dev_wm8988 = {
.set_bias_level = wm8988_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8988_snd_controls,
- .num_controls = ARRAY_SIZE(wm8988_snd_controls),
- .dapm_widgets = wm8988_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8988_dapm_widgets),
- .dapm_routes = wm8988_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8988_dapm_routes),
+ .component_driver = {
+ .controls = wm8988_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8988_snd_controls),
+ .dapm_widgets = wm8988_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8988_dapm_widgets),
+ .dapm_routes = wm8988_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8988_dapm_routes),
+ },
};
static const struct regmap_config wm8988_regmap = {
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 23ecd30d8bca..a8945001e696 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -1294,17 +1294,19 @@ static int wm8990_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8990 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8990 = {
.probe = wm8990_probe,
.set_bias_level = wm8990_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8990_snd_controls,
- .num_controls = ARRAY_SIZE(wm8990_snd_controls),
- .dapm_widgets = wm8990_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8990_dapm_widgets),
- .dapm_routes = wm8990_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8990_dapm_routes),
+ .component_driver = {
+ .controls = wm8990_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8990_snd_controls),
+ .dapm_widgets = wm8990_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8990_dapm_widgets),
+ .dapm_routes = wm8990_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8990_dapm_routes),
+ },
};
static const struct regmap_config wm8990_regmap = {
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index c9ee0ac6a654..802c0694e5c3 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -111,14 +111,24 @@ static bool wm8991_volatile(struct device *dev, unsigned int reg)
}
}
-static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
-static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
-static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100);
-static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
-static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
-static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
-static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
-static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(in_pga_tlv, -1650, 150, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(out_mix_tlv, -2100, 300, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(out_pga_tlv,
+ 0x00, 0x2f, SNDRV_CTL_TLVD_DB_SCALE_ITEM(SNDRV_CTL_TLVD_DB_GAIN_MUTE, 0, 1),
+ 0x30, 0x7f, SNDRV_CTL_TLVD_DB_SCALE_ITEM(-7300, 100, 0),
+);
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(out_dac_tlv,
+ 0x00, 0xbf, SNDRV_CTL_TLVD_DB_SCALE_ITEM(-71625, 375, 1),
+ 0xc0, 0xff, SNDRV_CTL_TLVD_DB_SCALE_ITEM(0, 0, 0),
+);
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(in_adc_tlv,
+ 0x00, 0xef, SNDRV_CTL_TLVD_DB_SCALE_ITEM(-71625, 375, 1),
+ 0xf0, 0xff, SNDRV_CTL_TLVD_DB_SCALE_ITEM(17625, 0, 0),
+);
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(out_sidetone_tlv,
+ 0x00, 0x0c, SNDRV_CTL_TLVD_DB_SCALE_ITEM(-3600, 300, 0),
+ 0x0d, 0x0f, SNDRV_CTL_TLVD_DB_SCALE_ITEM(0, 0, 0),
+);
static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -398,7 +408,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
}
/* INMIX dB values */
-static const DECLARE_TLV_DB_LINEAR(in_mix_tlv, -1200, 600);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(in_mix_tlv, -1200, 300, 1);
/* Left In PGA Connections */
static const struct snd_kcontrol_new wm8991_dapm_lin12_pga_controls[] = {
@@ -1232,16 +1242,18 @@ static struct snd_soc_dai_driver wm8991_dai = {
.ops = &wm8991_ops
};
-static struct snd_soc_codec_driver soc_codec_dev_wm8991 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8991 = {
.set_bias_level = wm8991_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8991_snd_controls,
- .num_controls = ARRAY_SIZE(wm8991_snd_controls),
- .dapm_widgets = wm8991_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8991_dapm_widgets),
- .dapm_routes = wm8991_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8991_dapm_routes),
+ .component_driver = {
+ .controls = wm8991_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8991_snd_controls),
+ .dapm_widgets = wm8991_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8991_dapm_widgets),
+ .dapm_routes = wm8991_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8991_dapm_routes),
+ },
};
static const struct regmap_config wm8991_regmap = {
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 8668c4c391b0..195f7bf6eb22 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -1613,7 +1613,7 @@ static const struct regmap_config wm8993_regmap = {
.num_reg_defaults = ARRAY_SIZE(wm8993_reg_defaults),
};
-static struct snd_soc_codec_driver soc_codec_dev_wm8993 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8993 = {
.probe = wm8993_probe,
.suspend = wm8993_suspend,
.resume = wm8993_resume,
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index a18aecb49935..3896523b71e9 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -4439,7 +4439,7 @@ static struct regmap *wm8994_get_regmap(struct device *dev)
return control->regmap;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8994 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8994 = {
.probe = wm8994_codec_probe,
.remove = wm8994_codec_remove,
.suspend = wm8994_codec_suspend,
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index 24500bafb0a8..19b08a5cae62 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -2192,12 +2192,14 @@ static const struct snd_soc_codec_driver soc_codec_dev_wm8995 = {
.set_bias_level = wm8995_set_bias_level,
.idle_bias_off = true,
- .controls = wm8995_snd_controls,
- .num_controls = ARRAY_SIZE(wm8995_snd_controls),
- .dapm_widgets = wm8995_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8995_dapm_widgets),
- .dapm_routes = wm8995_intercon,
- .num_dapm_routes = ARRAY_SIZE(wm8995_intercon),
+ .component_driver = {
+ .controls = wm8995_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8995_snd_controls),
+ .dapm_widgets = wm8995_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8995_dapm_widgets),
+ .dapm_routes = wm8995_intercon,
+ .num_dapm_routes = ARRAY_SIZE(wm8995_intercon),
+ },
};
static const struct regmap_config wm8995_regmap = {
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index a73044251218..8affa4969120 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -2184,7 +2184,7 @@ static int wm8996_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
(1 << WM8996_GP1_DIR_SHIFT));
}
-static struct gpio_chip wm8996_template_chip = {
+static const struct gpio_chip wm8996_template_chip = {
.label = "wm8996",
.owner = THIS_MODULE,
.direction_output = wm8996_gpio_direction_out,
@@ -2684,18 +2684,20 @@ static int wm8996_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8996 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8996 = {
.probe = wm8996_probe,
.remove = wm8996_remove,
.set_bias_level = wm8996_set_bias_level,
.idle_bias_off = true,
.seq_notifier = wm8996_seq_notifier,
- .controls = wm8996_snd_controls,
- .num_controls = ARRAY_SIZE(wm8996_snd_controls),
- .dapm_widgets = wm8996_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8996_dapm_widgets),
- .dapm_routes = wm8996_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8996_dapm_routes),
+ .component_driver = {
+ .controls = wm8996_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8996_snd_controls),
+ .dapm_widgets = wm8996_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8996_dapm_widgets),
+ .dapm_routes = wm8996_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8996_dapm_routes),
+ },
.set_pll = wm8996_set_fll,
};
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 6b0785b5a5c5..2f2821b3382f 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1095,7 +1095,7 @@ static struct regmap *wm8997_get_regmap(struct device *dev)
return priv->core.arizona->regmap;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8997 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8997 = {
.probe = wm8997_codec_probe,
.remove = wm8997_codec_remove,
.get_regmap = wm8997_get_regmap,
@@ -1105,12 +1105,14 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8997 = {
.set_sysclk = arizona_set_sysclk,
.set_pll = wm8997_set_fll,
- .controls = wm8997_snd_controls,
- .num_controls = ARRAY_SIZE(wm8997_snd_controls),
- .dapm_widgets = wm8997_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8997_dapm_widgets),
- .dapm_routes = wm8997_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8997_dapm_routes),
+ .component_driver = {
+ .controls = wm8997_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8997_snd_controls),
+ .dapm_widgets = wm8997_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8997_dapm_widgets),
+ .dapm_routes = wm8997_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8997_dapm_routes),
+ },
};
static int wm8997_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 3a5c896a2d13..bcc2e1060a6c 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -966,6 +966,16 @@ static const struct snd_soc_dapm_route wm8998_dapm_routes[] = {
{ "IN2A", NULL, "SYSCLK" },
{ "IN2B", NULL, "SYSCLK" },
+ { "ASRC1L", NULL, "SYSCLK" },
+ { "ASRC1R", NULL, "SYSCLK" },
+ { "ASRC2L", NULL, "SYSCLK" },
+ { "ASRC2R", NULL, "SYSCLK" },
+
+ { "ASRC1L", NULL, "ASYNCCLK" },
+ { "ASRC1R", NULL, "ASYNCCLK" },
+ { "ASRC2L", NULL, "ASYNCCLK" },
+ { "ASRC2R", NULL, "ASYNCCLK" },
+
{ "SPD1", NULL, "SYSCLK" },
{ "SPD1", NULL, "SPD1TX1" },
{ "SPD1", NULL, "SPD1TX2" },
@@ -1351,7 +1361,7 @@ static struct regmap *wm8998_get_regmap(struct device *dev)
return priv->core.arizona->regmap;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm8998 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm8998 = {
.probe = wm8998_codec_probe,
.remove = wm8998_codec_remove,
.get_regmap = wm8998_get_regmap,
@@ -1361,12 +1371,14 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8998 = {
.set_sysclk = arizona_set_sysclk,
.set_pll = wm8998_set_fll,
- .controls = wm8998_snd_controls,
- .num_controls = ARRAY_SIZE(wm8998_snd_controls),
- .dapm_widgets = wm8998_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8998_dapm_widgets),
- .dapm_routes = wm8998_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8998_dapm_routes),
+ .component_driver = {
+ .controls = wm8998_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8998_snd_controls),
+ .dapm_widgets = wm8998_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8998_dapm_widgets),
+ .dapm_routes = wm8998_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8998_dapm_routes),
+ },
};
static int wm8998_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 363b3b667616..856867ec2813 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -1274,7 +1274,7 @@ static int wm9081_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm9081 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm9081 = {
.probe = wm9081_probe,
.set_sysclk = wm9081_set_sysclk,
@@ -1282,12 +1282,14 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9081 = {
.idle_bias_off = true,
- .controls = wm9081_snd_controls,
- .num_controls = ARRAY_SIZE(wm9081_snd_controls),
- .dapm_widgets = wm9081_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm9081_dapm_widgets),
- .dapm_routes = wm9081_audio_paths,
- .num_dapm_routes = ARRAY_SIZE(wm9081_audio_paths),
+ .component_driver = {
+ .controls = wm9081_snd_controls,
+ .num_controls = ARRAY_SIZE(wm9081_snd_controls),
+ .dapm_widgets = wm9081_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm9081_dapm_widgets),
+ .dapm_routes = wm9081_audio_paths,
+ .num_dapm_routes = ARRAY_SIZE(wm9081_audio_paths),
+ },
};
static const struct regmap_config wm9081_regmap = {
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 5d737290f547..5a131385cb2f 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -550,7 +550,7 @@ static int wm9090_probe(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm9090 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm9090 = {
.probe = wm9090_probe,
.set_bias_level = wm9090_set_bias_level,
.suspend_bias_off = true,
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index 744842c76a60..dcdd055db57b 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -352,7 +352,7 @@ static int wm9705_soc_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm9705 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm9705 = {
.probe = wm9705_soc_probe,
.remove = wm9705_soc_remove,
.suspend = wm9705_soc_suspend,
@@ -364,12 +364,14 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9705 = {
.reg_cache_step = 2,
.reg_cache_default = wm9705_reg,
- .controls = wm9705_snd_ac97_controls,
- .num_controls = ARRAY_SIZE(wm9705_snd_ac97_controls),
- .dapm_widgets = wm9705_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm9705_dapm_widgets),
- .dapm_routes = wm9705_audio_map,
- .num_dapm_routes = ARRAY_SIZE(wm9705_audio_map),
+ .component_driver = {
+ .controls = wm9705_snd_ac97_controls,
+ .num_controls = ARRAY_SIZE(wm9705_snd_ac97_controls),
+ .dapm_widgets = wm9705_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm9705_dapm_widgets),
+ .dapm_routes = wm9705_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(wm9705_audio_map),
+ },
};
static int wm9705_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 488a92224249..557709eac698 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -669,7 +669,7 @@ static int wm9712_soc_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm9712 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm9712 = {
.probe = wm9712_soc_probe,
.remove = wm9712_soc_remove,
.resume = wm9712_soc_resume,
@@ -682,12 +682,14 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9712 = {
.reg_cache_step = 2,
.reg_cache_default = wm9712_reg,
- .controls = wm9712_snd_ac97_controls,
- .num_controls = ARRAY_SIZE(wm9712_snd_ac97_controls),
- .dapm_widgets = wm9712_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm9712_dapm_widgets),
- .dapm_routes = wm9712_audio_map,
- .num_dapm_routes = ARRAY_SIZE(wm9712_audio_map),
+ .component_driver = {
+ .controls = wm9712_snd_ac97_controls,
+ .num_controls = ARRAY_SIZE(wm9712_snd_ac97_controls),
+ .dapm_widgets = wm9712_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm9712_dapm_widgets),
+ .dapm_routes = wm9712_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(wm9712_audio_map),
+ },
};
static int wm9712_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 9849643ef809..e4301ddb1b84 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -1235,19 +1235,21 @@ static int wm9713_soc_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct snd_soc_codec_driver soc_codec_dev_wm9713 = {
+static const struct snd_soc_codec_driver soc_codec_dev_wm9713 = {
.probe = wm9713_soc_probe,
.remove = wm9713_soc_remove,
.suspend = wm9713_soc_suspend,
.resume = wm9713_soc_resume,
.set_bias_level = wm9713_set_bias_level,
- .controls = wm9713_snd_ac97_controls,
- .num_controls = ARRAY_SIZE(wm9713_snd_ac97_controls),
- .dapm_widgets = wm9713_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm9713_dapm_widgets),
- .dapm_routes = wm9713_audio_map,
- .num_dapm_routes = ARRAY_SIZE(wm9713_audio_map),
+ .component_driver = {
+ .controls = wm9713_snd_ac97_controls,
+ .num_controls = ARRAY_SIZE(wm9713_snd_ac97_controls),
+ .dapm_widgets = wm9713_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm9713_dapm_widgets),
+ .dapm_routes = wm9713_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(wm9713_audio_map),
+ },
};
static int wm9713_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 21fbe7d07063..b943dde8dbe5 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -480,7 +480,7 @@ static ssize_t wm_adsp_debugfs_wmfw_read(struct file *file,
mutex_lock(&dsp->pwr_lock);
- if (!dsp->wmfw_file_name || !dsp->running)
+ if (!dsp->wmfw_file_name || !dsp->booted)
ret = 0;
else
ret = simple_read_from_buffer(user_buf, count, ppos,
@@ -500,7 +500,7 @@ static ssize_t wm_adsp_debugfs_bin_read(struct file *file,
mutex_lock(&dsp->pwr_lock);
- if (!dsp->bin_file_name || !dsp->running)
+ if (!dsp->bin_file_name || !dsp->booted)
ret = 0;
else
ret = simple_read_from_buffer(user_buf, count, ppos,
@@ -554,6 +554,9 @@ static void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
if (!root)
goto err;
+ if (!debugfs_create_bool("booted", S_IRUGO, root, &dsp->booted))
+ goto err;
+
if (!debugfs_create_bool("running", S_IRUGO, root, &dsp->running))
goto err;
@@ -637,7 +640,7 @@ static int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
mutex_lock(&dsp[e->shift_l].pwr_lock);
- if (dsp[e->shift_l].running || dsp[e->shift_l].compr)
+ if (dsp[e->shift_l].booted || dsp[e->shift_l].compr)
ret = -EBUSY;
else
dsp[e->shift_l].fw = ucontrol->value.enumerated.item[0];
@@ -789,7 +792,7 @@ static int wm_coeff_put(struct snd_kcontrol *kctl,
memcpy(ctl->cache, p, ctl->len);
ctl->set = 1;
- if (ctl->enabled)
+ if (ctl->enabled && ctl->dsp->running)
ret = wm_coeff_write_control(ctl, p, ctl->len);
mutex_unlock(&ctl->dsp->pwr_lock);
@@ -811,7 +814,7 @@ static int wm_coeff_tlv_put(struct snd_kcontrol *kctl,
ret = -EFAULT;
} else {
ctl->set = 1;
- if (ctl->enabled)
+ if (ctl->enabled && ctl->dsp->running)
ret = wm_coeff_write_control(ctl, ctl->cache, size);
}
@@ -871,12 +874,12 @@ static int wm_coeff_get(struct snd_kcontrol *kctl,
mutex_lock(&ctl->dsp->pwr_lock);
if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
- if (ctl->enabled)
+ if (ctl->enabled && ctl->dsp->running)
ret = wm_coeff_read_control(ctl, p, ctl->len);
else
ret = -EPERM;
} else {
- if (!ctl->flags && ctl->enabled)
+ if (!ctl->flags && ctl->enabled && ctl->dsp->running)
ret = wm_coeff_read_control(ctl, ctl->cache, ctl->len);
memcpy(p, ctl->cache, ctl->len);
@@ -898,12 +901,12 @@ static int wm_coeff_tlv_get(struct snd_kcontrol *kctl,
mutex_lock(&ctl->dsp->pwr_lock);
if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
- if (ctl->enabled)
+ if (ctl->enabled && ctl->dsp->running)
ret = wm_coeff_read_control(ctl, ctl->cache, size);
else
ret = -EPERM;
} else {
- if (!ctl->flags && ctl->enabled)
+ if (!ctl->flags && ctl->enabled && ctl->dsp->running)
ret = wm_coeff_read_control(ctl, ctl->cache, size);
}
@@ -2166,13 +2169,20 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
if (ret != 0)
goto err_ena;
+ dsp->booted = true;
+
/* Start the core running */
regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
ADSP1_CORE_ENA | ADSP1_START,
ADSP1_CORE_ENA | ADSP1_START);
+
+ dsp->running = true;
break;
case SND_SOC_DAPM_PRE_PMD:
+ dsp->running = false;
+ dsp->booted = false;
+
/* Halt the core */
regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
ADSP1_CORE_ENA | ADSP1_START, 0);
@@ -2227,7 +2237,7 @@ static int wm_adsp2_ena(struct wm_adsp *dsp)
if (val & ADSP2_RAM_RDY)
break;
- msleep(1);
+ usleep_range(250, 500);
}
if (!(val & ADSP2_RAM_RDY)) {
@@ -2249,6 +2259,11 @@ static void wm_adsp2_boot_work(struct work_struct *work)
mutex_lock(&dsp->pwr_lock);
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, ADSP2_MEM_ENA);
+ if (ret != 0)
+ goto err_mutex;
+
ret = wm_adsp2_ena(dsp);
if (ret != 0)
goto err_mutex;
@@ -2270,13 +2285,14 @@ static void wm_adsp2_boot_work(struct work_struct *work)
if (ret != 0)
goto err_ena;
- /* Sync set controls */
- ret = wm_coeff_sync_controls(dsp);
+ dsp->booted = true;
+
+ /* Turn DSP back off until we are ready to run */
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, 0);
if (ret != 0)
goto err_ena;
- dsp->running = true;
-
mutex_unlock(&dsp->pwr_lock);
return;
@@ -2307,6 +2323,7 @@ int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec);
struct wm_adsp *dsp = &dsps[w->shift];
+ struct wm_coeff_ctl *ctl;
dsp->card = codec->component.card;
@@ -2315,6 +2332,24 @@ int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
wm_adsp2_set_dspclk(dsp, freq);
queue_work(system_unbound_wq, &dsp->boot_work);
break;
+ case SND_SOC_DAPM_PRE_PMD:
+ wm_adsp_debugfs_clear(dsp);
+
+ dsp->fw_id = 0;
+ dsp->fw_id_version = 0;
+
+ dsp->booted = false;
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, 0);
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ ctl->enabled = 0;
+
+ wm_adsp_free_alg_regions(dsp);
+
+ adsp_dbg(dsp, "Shutdown complete\n");
+ break;
default:
break;
}
@@ -2329,16 +2364,24 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec);
struct wm_adsp *dsp = &dsps[w->shift];
- struct wm_coeff_ctl *ctl;
int ret;
switch (event) {
case SND_SOC_DAPM_POST_PMU:
flush_work(&dsp->boot_work);
- if (!dsp->running)
+ if (!dsp->booted)
return -EIO;
+ ret = wm_adsp2_ena(dsp);
+ if (ret != 0)
+ goto err;
+
+ /* Sync set controls */
+ ret = wm_coeff_sync_controls(dsp);
+ if (ret != 0)
+ goto err;
+
ret = regmap_update_bits(dsp->regmap,
dsp->base + ADSP2_CONTROL,
ADSP2_CORE_ENA | ADSP2_START,
@@ -2346,6 +2389,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
if (ret != 0)
goto err;
+ dsp->running = true;
+
mutex_lock(&dsp->pwr_lock);
if (wm_adsp_fw[dsp->fw].num_caps != 0)
@@ -2361,10 +2406,6 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
mutex_lock(&dsp->pwr_lock);
- wm_adsp_debugfs_clear(dsp);
-
- dsp->fw_id = 0;
- dsp->fw_id_version = 0;
dsp->running = false;
regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
@@ -2378,17 +2419,12 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
ADSP2_SYS_ENA, 0);
- list_for_each_entry(ctl, &dsp->ctl_list, list)
- ctl->enabled = 0;
-
- wm_adsp_free_alg_regions(dsp);
-
if (wm_adsp_fw[dsp->fw].num_caps != 0)
wm_adsp_buffer_free(dsp);
mutex_unlock(&dsp->pwr_lock);
- adsp_dbg(dsp, "Shutdown complete\n");
+ adsp_dbg(dsp, "Execution stopped\n");
break;
default:
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index be3b5bcb7f17..362dd7ce60d8 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -61,6 +61,8 @@ struct wm_adsp {
int fw;
int fw_ver;
+
+ bool booted;
bool running;
struct list_head ctl_list;
@@ -85,9 +87,10 @@ struct wm_adsp {
wm_adsp1_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD)
#define WM_ADSP2(wname, num, event_fn) \
-{ .id = snd_soc_dapm_dai_link, .name = wname " Preloader", \
+{ .id = snd_soc_dapm_supply, .name = wname " Preloader", \
.reg = SND_SOC_NOPM, .shift = num, .event = event_fn, \
- .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD }, \
+ .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD, \
+ .subseq = 100, /* Ensure we run after SYSCLK supply widget */ }, \
{ .id = snd_soc_dapm_out_drv, .name = wname, \
.reg = SND_SOC_NOPM, .shift = num, .event = wm_adsp2_event, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD }
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 05c2d33aa74d..3c5a9804d3f5 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1218,7 +1218,7 @@ static int davinci_mcasp_hw_rule_format(struct snd_pcm_hw_params *params,
snd_mask_none(&nfmt);
- for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) {
+ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
if (snd_mask_test(fmt, i)) {
uint sbits = snd_pcm_format_width(i);
int ppm;
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index dc97f4349e66..2998954a1c74 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -577,7 +577,6 @@ static int dw_configure_dai_by_dt(struct dw_i2s_dev *dev,
dev->capability |= DWC_I2S_PLAY;
dev->play_dma_data.dt.addr = res->start + I2S_TXDMA;
dev->play_dma_data.dt.addr_width = bus_widths[idx];
- dev->play_dma_data.dt.chan_name = "TX";
dev->play_dma_data.dt.fifo_size = fifo_depth *
(fifo_width[idx2]) >> 8;
dev->play_dma_data.dt.maxburst = 16;
@@ -588,7 +587,6 @@ static int dw_configure_dai_by_dt(struct dw_i2s_dev *dev,
dev->capability |= DWC_I2S_RECORD;
dev->capture_dma_data.dt.addr = res->start + I2S_RXDMA;
dev->capture_dma_data.dt.addr_width = bus_widths[idx];
- dev->capture_dma_data.dt.chan_name = "RX";
dev->capture_dma_data.dt.fifo_size = fifo_depth *
(fifo_width[idx2] >> 8);
dev->capture_dma_data.dt.maxburst = 16;
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index c1a0e01cb8e7..1d82f68305c3 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -726,7 +726,7 @@ static const struct regmap_config fsl_asrc_regmap_config = {
.readable_reg = fsl_asrc_readable_reg,
.volatile_reg = fsl_asrc_volatile_reg,
.writeable_reg = fsl_asrc_writeable_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_FLAT,
};
/**
@@ -879,7 +879,7 @@ static int fsl_asrc_probe(struct platform_device *pdev)
}
}
- if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx35-asrc")) {
+ if (of_device_is_compatible(np, "fsl,imx35-asrc")) {
asrc_priv->channel_bits = 3;
clk_map[IN] = input_clk_map_imx35;
clk_map[OUT] = output_clk_map_imx35;
@@ -892,7 +892,7 @@ static int fsl_asrc_probe(struct platform_device *pdev)
ret = fsl_asrc_init(asrc_priv);
if (ret) {
dev_err(&pdev->dev, "failed to init asrc %d\n", ret);
- return -EINVAL;
+ return ret;
}
asrc_priv->channel_avail = 10;
@@ -901,14 +901,14 @@ static int fsl_asrc_probe(struct platform_device *pdev)
&asrc_priv->asrc_rate);
if (ret) {
dev_err(&pdev->dev, "failed to get output rate\n");
- return -EINVAL;
+ return ret;
}
ret = of_property_read_u32(np, "fsl,asrc-width",
&asrc_priv->asrc_width);
if (ret) {
dev_err(&pdev->dev, "failed to get output width\n");
- return -EINVAL;
+ return ret;
}
if (asrc_priv->asrc_width != 16 && asrc_priv->asrc_width != 24) {
@@ -933,8 +933,6 @@ static int fsl_asrc_probe(struct platform_device *pdev)
return ret;
}
- dev_info(&pdev->dev, "driver registered\n");
-
return 0;
}
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index ffc000bc1f15..dc30d780f874 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -322,7 +322,7 @@ static snd_pcm_uframes_t fsl_asrc_dma_pcm_pointer(struct snd_pcm_substream *subs
return bytes_to_frames(substream->runtime, pair->pos);
}
-static struct snd_pcm_ops fsl_asrc_dma_pcm_ops = {
+static const struct snd_pcm_ops fsl_asrc_dma_pcm_ops = {
.ioctl = snd_pcm_lib_ioctl,
.hw_params = fsl_asrc_dma_hw_params,
.hw_free = fsl_asrc_dma_hw_free,
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 26a90e12ede4..38bfd46f4ad8 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -77,19 +77,19 @@ static irqreturn_t esai_isr(int irq, void *devid)
regmap_read(esai_priv->regmap, REG_ESAI_ESR, &esr);
if (esr & ESAI_ESR_TINIT_MASK)
- dev_dbg(&pdev->dev, "isr: Transmition Initialized\n");
+ dev_dbg(&pdev->dev, "isr: Transmission Initialized\n");
if (esr & ESAI_ESR_RFF_MASK)
dev_warn(&pdev->dev, "isr: Receiving overrun\n");
if (esr & ESAI_ESR_TFE_MASK)
- dev_warn(&pdev->dev, "isr: Transmition underrun\n");
+ dev_warn(&pdev->dev, "isr: Transmission underrun\n");
if (esr & ESAI_ESR_TLS_MASK)
dev_dbg(&pdev->dev, "isr: Just transmitted the last slot\n");
if (esr & ESAI_ESR_TDE_MASK)
- dev_dbg(&pdev->dev, "isr: Transmition data exception\n");
+ dev_dbg(&pdev->dev, "isr: Transmission data exception\n");
if (esr & ESAI_ESR_TED_MASK)
dev_dbg(&pdev->dev, "isr: Transmitting even slots\n");
@@ -781,7 +781,7 @@ static const struct regmap_config fsl_esai_regmap_config = {
.readable_reg = fsl_esai_readable_reg,
.volatile_reg = fsl_esai_volatile_reg,
.writeable_reg = fsl_esai_writeable_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_FLAT,
};
static int fsl_esai_probe(struct platform_device *pdev)
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 2147994ab46f..9fadf7e31c5f 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -801,8 +801,8 @@ static int fsl_sai_probe(struct platform_device *pdev)
sai->pdev = pdev;
- if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx6sx-sai") ||
- of_device_is_compatible(pdev->dev.of_node, "fsl,imx6ul-sai"))
+ if (of_device_is_compatible(np, "fsl,imx6sx-sai") ||
+ of_device_is_compatible(np, "fsl,imx6ul-sai"))
sai->sai_on_imx = true;
sai->is_lsb_first = of_property_read_bool(np, "lsb-first");
@@ -883,7 +883,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
}
if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) &&
- of_device_is_compatible(pdev->dev.of_node, "fsl,imx6ul-sai")) {
+ of_device_is_compatible(np, "fsl,imx6ul-sai")) {
gpr = syscon_regmap_lookup_by_compatible("fsl,imx6ul-iomuxc-gpr");
if (IS_ERR(gpr)) {
dev_err(&pdev->dev, "cannot find iomuxc registers\n");
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index beec7934a265..1ff467c9598a 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -1103,7 +1103,7 @@ static const struct regmap_config fsl_spdif_regmap_config = {
.readable_reg = fsl_spdif_readable_reg,
.volatile_reg = fsl_spdif_volatile_reg,
.writeable_reg = fsl_spdif_writeable_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_FLAT,
};
static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index bedec4a32581..50349437d961 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -182,7 +182,7 @@ static const struct regmap_config fsl_ssi_regconfig = {
.volatile_reg = fsl_ssi_volatile_reg,
.precious_reg = fsl_ssi_precious_reg,
.writeable_reg = fsl_ssi_writeable_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_FLAT,
};
struct fsl_ssi_soc_data {
diff --git a/sound/soc/generic/Kconfig b/sound/soc/generic/Kconfig
index c01c5dd68601..d023959b8cd6 100644
--- a/sound/soc/generic/Kconfig
+++ b/sound/soc/generic/Kconfig
@@ -6,3 +6,11 @@ config SND_SIMPLE_CARD
select SND_SIMPLE_CARD_UTILS
help
This option enables generic simple sound card support
+
+config SND_SIMPLE_SCU_CARD
+ tristate "ASoC Simple SCU sound card support"
+ depends on OF
+ select SND_SIMPLE_CARD_UTILS
+ help
+ This option enables generic simple SCU sound card support.
+ It supports DPCM of multi CPU single Codec system.
diff --git a/sound/soc/generic/Makefile b/sound/soc/generic/Makefile
index 2d53c8d70705..ee750f3023ba 100644
--- a/sound/soc/generic/Makefile
+++ b/sound/soc/generic/Makefile
@@ -1,5 +1,7 @@
snd-soc-simple-card-utils-objs := simple-card-utils.o
snd-soc-simple-card-objs := simple-card.o
+snd-soc-simple-scu-card-objs := simple-scu-card.o
obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) += snd-soc-simple-card-utils.o
obj-$(CONFIG_SND_SIMPLE_CARD) += snd-soc-simple-card.o
+obj-$(CONFIG_SND_SIMPLE_SCU_CARD) += snd-soc-simple-scu-card.o
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 9599de69a880..1cb39309f5d5 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -7,6 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <sound/simple_card_utils.h>
@@ -97,6 +98,146 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
}
EXPORT_SYMBOL_GPL(asoc_simple_card_parse_card_name);
+int asoc_simple_card_parse_clk(struct device_node *node,
+ struct device_node *dai_of_node,
+ struct asoc_simple_dai *simple_dai)
+{
+ struct clk *clk;
+ u32 val;
+
+ /*
+ * Parse dai->sysclk come from "clocks = <&xxx>"
+ * (if system has common clock)
+ * or "system-clock-frequency = <xxx>"
+ * or device's module clock.
+ */
+ clk = of_clk_get(node, 0);
+ if (!IS_ERR(clk)) {
+ simple_dai->sysclk = clk_get_rate(clk);
+ simple_dai->clk = clk;
+ } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
+ simple_dai->sysclk = val;
+ } else {
+ clk = of_clk_get(dai_of_node, 0);
+ if (!IS_ERR(clk))
+ simple_dai->sysclk = clk_get_rate(clk);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_parse_clk);
+
+int asoc_simple_card_parse_dai(struct device_node *node,
+ struct device_node **dai_of_node,
+ const char **dai_name,
+ const char *list_name,
+ const char *cells_name,
+ int *is_single_link)
+{
+ struct of_phandle_args args;
+ int ret;
+
+ if (!node)
+ return 0;
+
+ /*
+ * Get node via "sound-dai = <&phandle port>"
+ * it will be used as xxx_of_node on soc_bind_dai_link()
+ */
+ ret = of_parse_phandle_with_args(node, list_name, cells_name, 0, &args);
+ if (ret)
+ return ret;
+
+ /* Get dai->name */
+ if (dai_name) {
+ ret = snd_soc_of_get_dai_name(node, dai_name);
+ if (ret < 0)
+ return ret;
+ }
+
+ *dai_of_node = args.np;
+
+ if (is_single_link)
+ *is_single_link = !args.args_count;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_parse_dai);
+
+int asoc_simple_card_init_dai(struct snd_soc_dai *dai,
+ struct asoc_simple_dai *simple_dai)
+{
+ int ret;
+
+ if (simple_dai->sysclk) {
+ ret = snd_soc_dai_set_sysclk(dai, 0, simple_dai->sysclk, 0);
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(dai->dev, "simple-card: set_sysclk error\n");
+ return ret;
+ }
+ }
+
+ if (simple_dai->slots) {
+ ret = snd_soc_dai_set_tdm_slot(dai,
+ simple_dai->tx_slot_mask,
+ simple_dai->rx_slot_mask,
+ simple_dai->slots,
+ simple_dai->slot_width);
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(dai->dev, "simple-card: set_tdm_slot error\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_init_dai);
+
+int asoc_simple_card_canonicalize_dailink(struct snd_soc_dai_link *dai_link)
+{
+ if (!dai_link->cpu_dai_name || !dai_link->codec_dai_name)
+ return -EINVAL;
+
+ /* Assumes platform == cpu */
+ if (!dai_link->platform_of_node)
+ dai_link->platform_of_node = dai_link->cpu_of_node;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_canonicalize_dailink);
+
+void asoc_simple_card_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
+ int is_single_links)
+{
+ /*
+ * In soc_bind_dai_link() will check cpu name after
+ * of_node matching if dai_link has cpu_dai_name.
+ * but, it will never match if name was created by
+ * fmt_single_name() remove cpu_dai_name if cpu_args
+ * was 0. See:
+ * fmt_single_name()
+ * fmt_multiple_name()
+ */
+ if (is_single_links)
+ dai_link->cpu_dai_name = NULL;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_canonicalize_cpu);
+
+int asoc_simple_card_clean_reference(struct snd_soc_card *card)
+{
+ struct snd_soc_dai_link *dai_link;
+ int num_links;
+
+ for (num_links = 0, dai_link = card->dai_link;
+ num_links < card->num_links;
+ num_links++, dai_link++) {
+ of_node_put(dai_link->cpu_of_node);
+ of_node_put(dai_link->codec_of_node);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_clean_reference);
+
/* Module information */
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
MODULE_DESCRIPTION("ALSA SoC Simple Card Utils");
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 43295f024982..f608f8d23f3d 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -37,13 +37,15 @@ struct simple_card_data {
unsigned int mclk_fs;
struct asoc_simple_jack hp_jack;
struct asoc_simple_jack mic_jack;
- struct snd_soc_dai_link dai_link[]; /* dynamically allocated */
+ struct snd_soc_dai_link *dai_link;
};
#define simple_priv_to_dev(priv) ((priv)->snd_card.dev)
-#define simple_priv_to_link(priv, i) ((priv)->snd_card.dai_link + i)
-#define simple_priv_to_props(priv, i) ((priv)->dai_props + i)
+#define simple_priv_to_link(priv, i) ((priv)->snd_card.dai_link + (i))
+#define simple_priv_to_props(priv, i) ((priv)->dai_props + (i))
+#define DAI "sound-dai"
+#define CELL "#sound-dai-cells"
#define PREFIX "simple-audio-card,"
#define asoc_simple_card_init_hp(card, sjack, prefix)\
@@ -112,13 +114,13 @@ static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct simple_dai_props *dai_props =
- &priv->dai_props[rtd->num];
+ simple_priv_to_props(priv, rtd->num);
int ret;
ret = clk_prepare_enable(dai_props->cpu_dai.clk);
if (ret)
return ret;
-
+
ret = clk_prepare_enable(dai_props->codec_dai.clk);
if (ret)
clk_disable_unprepare(dai_props->cpu_dai.clk);
@@ -131,7 +133,7 @@ static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct simple_dai_props *dai_props =
- &priv->dai_props[rtd->num];
+ simple_priv_to_props(priv, rtd->num);
clk_disable_unprepare(dai_props->cpu_dai.clk);
@@ -145,7 +147,8 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct simple_dai_props *dai_props = &priv->dai_props[rtd->num];
+ struct simple_dai_props *dai_props =
+ simple_priv_to_props(priv, rtd->num);
unsigned int mclk, mclk_fs = 0;
int ret = 0;
@@ -177,51 +180,20 @@ static struct snd_soc_ops asoc_simple_card_ops = {
.hw_params = asoc_simple_card_hw_params,
};
-static int __asoc_simple_card_dai_init(struct snd_soc_dai *dai,
- struct asoc_simple_dai *set)
-{
- int ret;
-
- if (set->sysclk) {
- ret = snd_soc_dai_set_sysclk(dai, 0, set->sysclk, 0);
- if (ret && ret != -ENOTSUPP) {
- dev_err(dai->dev, "simple-card: set_sysclk error\n");
- goto err;
- }
- }
-
- if (set->slots) {
- ret = snd_soc_dai_set_tdm_slot(dai,
- set->tx_slot_mask,
- set->rx_slot_mask,
- set->slots,
- set->slot_width);
- if (ret && ret != -ENOTSUPP) {
- dev_err(dai->dev, "simple-card: set_tdm_slot error\n");
- goto err;
- }
- }
-
- ret = 0;
-
-err:
- return ret;
-}
-
static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
{
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_dai *codec = rtd->codec_dai;
struct snd_soc_dai *cpu = rtd->cpu_dai;
- struct simple_dai_props *dai_props;
+ struct simple_dai_props *dai_props =
+ simple_priv_to_props(priv, rtd->num);
int ret;
- dai_props = &priv->dai_props[rtd->num];
- ret = __asoc_simple_card_dai_init(codec, &dai_props->codec_dai);
+ ret = asoc_simple_card_init_dai(codec, &dai_props->codec_dai);
if (ret < 0)
return ret;
- ret = __asoc_simple_card_dai_init(cpu, &dai_props->cpu_dai);
+ ret = asoc_simple_card_init_dai(cpu, &dai_props->cpu_dai);
if (ret < 0)
return ret;
@@ -236,78 +208,6 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static int
-asoc_simple_card_sub_parse_of(struct device_node *np,
- struct asoc_simple_dai *dai,
- struct device_node **p_node,
- const char **name,
- int *args_count)
-{
- struct of_phandle_args args;
- struct clk *clk;
- u32 val;
- int ret;
-
- if (!np)
- return 0;
-
- /*
- * Get node via "sound-dai = <&phandle port>"
- * it will be used as xxx_of_node on soc_bind_dai_link()
- */
- ret = of_parse_phandle_with_args(np, "sound-dai",
- "#sound-dai-cells", 0, &args);
- if (ret)
- return ret;
-
- *p_node = args.np;
-
- if (args_count)
- *args_count = args.args_count;
-
- /* Get dai->name */
- if (name) {
- ret = snd_soc_of_get_dai_name(np, name);
- if (ret < 0)
- return ret;
- }
-
- if (!dai)
- return 0;
-
- /* Parse TDM slot */
- ret = snd_soc_of_parse_tdm_slot(np, &dai->tx_slot_mask,
- &dai->rx_slot_mask,
- &dai->slots, &dai->slot_width);
- if (ret)
- return ret;
-
- /*
- * Parse dai->sysclk come from "clocks = <&xxx>"
- * (if system has common clock)
- * or "system-clock-frequency = <xxx>"
- * or device's module clock.
- */
- if (of_property_read_bool(np, "clocks")) {
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- return ret;
- }
-
- dai->sysclk = clk_get_rate(clk);
- dai->clk = clk;
- } else if (!of_property_read_u32(np, "system-clock-frequency", &val)) {
- dai->sysclk = val;
- } else {
- clk = of_clk_get(args.np, 0);
- if (!IS_ERR(clk))
- dai->sysclk = clk_get_rate(clk);
- }
-
- return 0;
-}
-
static int asoc_simple_card_dai_link_of(struct device_node *node,
struct simple_card_data *priv,
int idx,
@@ -316,13 +216,14 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
struct device *dev = simple_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, idx);
+ struct asoc_simple_dai *cpu_dai = &dai_props->cpu_dai;
+ struct asoc_simple_dai *codec_dai = &dai_props->codec_dai;
struct device_node *cpu = NULL;
struct device_node *plat = NULL;
struct device_node *codec = NULL;
char prop[128];
char *prefix = "";
- int ret, cpu_args;
- u32 val;
+ int ret, single_cpu;
/* For single DAI link & old style of DT node */
if (is_top_level_node)
@@ -348,36 +249,46 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
if (ret < 0)
goto dai_link_of_err;
- if (!of_property_read_u32(node, "mclk-fs", &val))
- dai_props->mclk_fs = val;
+ of_property_read_u32(node, "mclk-fs", &dai_props->mclk_fs);
- ret = asoc_simple_card_sub_parse_of(cpu, &dai_props->cpu_dai,
- &dai_link->cpu_of_node,
- &dai_link->cpu_dai_name,
- &cpu_args);
+ ret = asoc_simple_card_parse_cpu(cpu, dai_link,
+ DAI, CELL, &single_cpu);
if (ret < 0)
goto dai_link_of_err;
- ret = asoc_simple_card_sub_parse_of(codec, &dai_props->codec_dai,
- &dai_link->codec_of_node,
- &dai_link->codec_dai_name, NULL);
+ ret = asoc_simple_card_parse_codec(codec, dai_link, DAI, CELL);
if (ret < 0)
goto dai_link_of_err;
- ret = asoc_simple_card_sub_parse_of(plat, NULL,
- &dai_link->platform_of_node,
- NULL, NULL);
+ ret = asoc_simple_card_parse_platform(plat, dai_link, DAI, CELL);
if (ret < 0)
goto dai_link_of_err;
- if (!dai_link->cpu_dai_name || !dai_link->codec_dai_name) {
- ret = -EINVAL;
+ ret = snd_soc_of_parse_tdm_slot(cpu, &cpu_dai->tx_slot_mask,
+ &cpu_dai->rx_slot_mask,
+ &cpu_dai->slots,
+ &cpu_dai->slot_width);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = snd_soc_of_parse_tdm_slot(codec, &codec_dai->tx_slot_mask,
+ &codec_dai->rx_slot_mask,
+ &codec_dai->slots,
+ &codec_dai->slot_width);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = asoc_simple_card_parse_clk_cpu(cpu, dai_link, cpu_dai);
+ if (ret < 0)
goto dai_link_of_err;
- }
- /* Assumes platform == cpu */
- if (!dai_link->platform_of_node)
- dai_link->platform_of_node = dai_link->cpu_of_node;
+ ret = asoc_simple_card_parse_clk_codec(codec, dai_link, codec_dai);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = asoc_simple_card_canonicalize_dailink(dai_link);
+ if (ret < 0)
+ goto dai_link_of_err;
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"%s-%s",
@@ -398,17 +309,7 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
dai_link->codec_dai_name,
dai_props->codec_dai.sysclk);
- /*
- * In soc_bind_dai_link() will check cpu name after
- * of_node matching if dai_link has cpu_dai_name.
- * but, it will never match if name was created by
- * fmt_single_name() remove cpu_dai_name if cpu_args
- * was 0. See:
- * fmt_single_name()
- * fmt_multiple_name()
- */
- if (!cpu_args)
- dai_link->cpu_dai_name = NULL;
+ asoc_simple_card_canonicalize_cpu(dai_link, single_cpu);
dai_link_of_err:
of_node_put(cpu);
@@ -417,22 +318,54 @@ dai_link_of_err:
return ret;
}
+static int asoc_simple_card_parse_aux_devs(struct device_node *node,
+ struct simple_card_data *priv)
+{
+ struct device *dev = simple_priv_to_dev(priv);
+ struct device_node *aux_node;
+ int i, n, len;
+
+ if (!of_find_property(node, PREFIX "aux-devs", &len))
+ return 0; /* Ok to have no aux-devs */
+
+ n = len / sizeof(__be32);
+ if (n <= 0)
+ return -EINVAL;
+
+ priv->snd_card.aux_dev = devm_kzalloc(dev,
+ n * sizeof(*priv->snd_card.aux_dev), GFP_KERNEL);
+ if (!priv->snd_card.aux_dev)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++) {
+ aux_node = of_parse_phandle(node, PREFIX "aux-devs", i);
+ if (!aux_node)
+ return -EINVAL;
+ priv->snd_card.aux_dev[i].codec_of_node = aux_node;
+ }
+
+ priv->snd_card.num_aux_devs = n;
+ return 0;
+}
+
static int asoc_simple_card_parse_of(struct device_node *node,
struct simple_card_data *priv)
{
struct device *dev = simple_priv_to_dev(priv);
- u32 val;
+ struct device_node *dai_link;
int ret;
if (!node)
return -EINVAL;
+ dai_link = of_get_child_by_name(node, PREFIX "dai-link");
+
/* The off-codec widgets */
if (of_property_read_bool(node, PREFIX "widgets")) {
ret = snd_soc_of_parse_audio_simple_widgets(&priv->snd_card,
PREFIX "widgets");
if (ret)
- return ret;
+ goto card_parse_end;
}
/* DAPM routes */
@@ -440,16 +373,14 @@ static int asoc_simple_card_parse_of(struct device_node *node,
ret = snd_soc_of_parse_audio_routing(&priv->snd_card,
PREFIX "routing");
if (ret)
- return ret;
+ goto card_parse_end;
}
/* Factor to mclk, used in hw_params() */
- ret = of_property_read_u32(node, PREFIX "mclk-fs", &val);
- if (ret == 0)
- priv->mclk_fs = val;
+ of_property_read_u32(node, PREFIX "mclk-fs", &priv->mclk_fs);
/* Single/Muti DAI link(s) & New style of DT node */
- if (of_get_child_by_name(node, PREFIX "dai-link")) {
+ if (dai_link) {
struct device_node *np = NULL;
int i = 0;
@@ -459,7 +390,7 @@ static int asoc_simple_card_parse_of(struct device_node *node,
i, false);
if (ret < 0) {
of_node_put(np);
- return ret;
+ goto card_parse_end;
}
i++;
}
@@ -467,66 +398,55 @@ static int asoc_simple_card_parse_of(struct device_node *node,
/* For single DAI link & old style of DT node */
ret = asoc_simple_card_dai_link_of(node, priv, 0, true);
if (ret < 0)
- return ret;
+ goto card_parse_end;
}
ret = asoc_simple_card_parse_card_name(&priv->snd_card, PREFIX);
- if (ret)
- return ret;
+ if (ret < 0)
+ goto card_parse_end;
- return 0;
-}
+ ret = asoc_simple_card_parse_aux_devs(node, priv);
-/* Decrease the reference count of the device nodes */
-static int asoc_simple_card_unref(struct snd_soc_card *card)
-{
- struct snd_soc_dai_link *dai_link;
- int num_links;
+card_parse_end:
+ of_node_put(dai_link);
- for (num_links = 0, dai_link = card->dai_link;
- num_links < card->num_links;
- num_links++, dai_link++) {
- of_node_put(dai_link->cpu_of_node);
- of_node_put(dai_link->codec_of_node);
- }
- return 0;
+ return ret;
}
static int asoc_simple_card_probe(struct platform_device *pdev)
{
struct simple_card_data *priv;
struct snd_soc_dai_link *dai_link;
+ struct simple_dai_props *dai_props;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- int num_links, ret;
+ int num, ret;
/* Get the number of DAI links */
if (np && of_get_child_by_name(np, PREFIX "dai-link"))
- num_links = of_get_child_count(np);
+ num = of_get_child_count(np);
else
- num_links = 1;
+ num = 1;
/* Allocate the private data and the DAI link array */
- priv = devm_kzalloc(dev,
- sizeof(*priv) + sizeof(*dai_link) * num_links,
- GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- /* Init snd_soc_card */
- priv->snd_card.owner = THIS_MODULE;
- priv->snd_card.dev = dev;
- dai_link = priv->dai_link;
- priv->snd_card.dai_link = dai_link;
- priv->snd_card.num_links = num_links;
-
- /* Get room for the other properties */
- priv->dai_props = devm_kzalloc(dev,
- sizeof(*priv->dai_props) * num_links,
- GFP_KERNEL);
- if (!priv->dai_props)
+ dai_props = devm_kzalloc(dev, sizeof(*dai_props) * num, GFP_KERNEL);
+ dai_link = devm_kzalloc(dev, sizeof(*dai_link) * num, GFP_KERNEL);
+ if (!dai_props || !dai_link)
return -ENOMEM;
+ priv->dai_props = dai_props;
+ priv->dai_link = dai_link;
+
+ /* Init snd_soc_card */
+ priv->snd_card.owner = THIS_MODULE;
+ priv->snd_card.dev = dev;
+ priv->snd_card.dai_link = priv->dai_link;
+ priv->snd_card.num_links = num;
+
if (np && of_device_is_available(np)) {
ret = asoc_simple_card_parse_of(np, priv);
@@ -567,7 +487,6 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
sizeof(priv->dai_props->cpu_dai));
memcpy(&priv->dai_props->codec_dai, &cinfo->codec_dai,
sizeof(priv->dai_props->codec_dai));
-
}
snd_soc_card_set_drvdata(&priv->snd_card, priv);
@@ -575,9 +494,9 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card);
if (ret >= 0)
return ret;
-
err:
- asoc_simple_card_unref(&priv->snd_card);
+ asoc_simple_card_clean_reference(&priv->snd_card);
+
return ret;
}
@@ -589,7 +508,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
asoc_simple_card_remove_jack(&priv->hp_jack);
asoc_simple_card_remove_jack(&priv->mic_jack);
- return asoc_simple_card_unref(card);
+ return asoc_simple_card_clean_reference(card);
}
static const struct of_device_id asoc_simple_of_match[] = {
@@ -611,6 +530,6 @@ static struct platform_driver asoc_simple_card = {
module_platform_driver(asoc_simple_card);
MODULE_ALIAS("platform:asoc-simple-card");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ASoC Simple Sound Card");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/sound/soc/generic/simple-scu-card.c b/sound/soc/generic/simple-scu-card.c
new file mode 100644
index 000000000000..b9973a56bcb0
--- /dev/null
+++ b/sound/soc/generic/simple-scu-card.c
@@ -0,0 +1,345 @@
+/*
+ * ASoC simple SCU sound card support
+ *
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * based on ${LINUX}/sound/soc/generic/simple-card.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <sound/jack.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/simple_card_utils.h>
+
+struct asoc_simple_card_priv {
+ struct snd_soc_card snd_card;
+ struct snd_soc_codec_conf codec_conf;
+ struct asoc_simple_dai *dai_props;
+ struct snd_soc_dai_link *dai_link;
+ u32 convert_rate;
+ u32 convert_channels;
+};
+
+#define simple_priv_to_dev(priv) ((priv)->snd_card.dev)
+#define simple_priv_to_link(priv, i) ((priv)->snd_card.dai_link + (i))
+#define simple_priv_to_props(priv, i) ((priv)->dai_props + (i))
+
+#define DAI "sound-dai"
+#define CELL "#sound-dai-cells"
+#define PREFIX "simple-audio-card,"
+
+static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct asoc_simple_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct asoc_simple_dai *dai_props =
+ simple_priv_to_props(priv, rtd->num);
+
+ return clk_prepare_enable(dai_props->clk);
+}
+
+static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct asoc_simple_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct asoc_simple_dai *dai_props =
+ simple_priv_to_props(priv, rtd->num);
+
+ clk_disable_unprepare(dai_props->clk);
+}
+
+static struct snd_soc_ops asoc_simple_card_ops = {
+ .startup = asoc_simple_card_startup,
+ .shutdown = asoc_simple_card_shutdown,
+};
+
+static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct asoc_simple_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai;
+ struct snd_soc_dai_link *dai_link;
+ struct asoc_simple_dai *dai_props;
+ int num = rtd->num;
+
+ dai_link = simple_priv_to_link(priv, num);
+ dai_props = simple_priv_to_props(priv, num);
+ dai = dai_link->dynamic ?
+ rtd->cpu_dai :
+ rtd->codec_dai;
+
+ return asoc_simple_card_init_dai(dai, dai_props);
+}
+
+static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct asoc_simple_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ if (priv->convert_rate)
+ rate->min =
+ rate->max = priv->convert_rate;
+
+ if (priv->convert_channels)
+ channels->min =
+ channels->max = priv->convert_channels;
+
+ return 0;
+}
+
+static int asoc_simple_card_parse_links(struct device_node *np,
+ struct asoc_simple_card_priv *priv,
+ unsigned int daifmt,
+ int idx, bool is_fe)
+{
+ struct device *dev = simple_priv_to_dev(priv);
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
+ struct asoc_simple_dai *dai_props = simple_priv_to_props(priv, idx);
+ int ret;
+
+ if (is_fe) {
+ int is_single_links = 0;
+
+ /* BE is dummy */
+ dai_link->codec_of_node = NULL;
+ dai_link->codec_dai_name = "snd-soc-dummy-dai";
+ dai_link->codec_name = "snd-soc-dummy";
+
+ /* FE settings */
+ dai_link->dynamic = 1;
+ dai_link->dpcm_merged_format = 1;
+
+ ret = asoc_simple_card_parse_cpu(np, dai_link, DAI, CELL,
+ &is_single_links);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_card_parse_clk_cpu(np, dai_link, dai_props);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "fe.%s",
+ dai_link->cpu_dai_name);
+ if (ret < 0)
+ return ret;
+
+ asoc_simple_card_canonicalize_cpu(dai_link, is_single_links);
+ } else {
+ /* FE is dummy */
+ dai_link->cpu_of_node = NULL;
+ dai_link->cpu_dai_name = "snd-soc-dummy-dai";
+ dai_link->cpu_name = "snd-soc-dummy";
+
+ /* BE settings */
+ dai_link->no_pcm = 1;
+ dai_link->be_hw_params_fixup = asoc_simple_card_be_hw_params_fixup;
+
+ ret = asoc_simple_card_parse_codec(np, dai_link, DAI, CELL);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_parse_clk_codec(np, dai_link, dai_props);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "be.%s",
+ dai_link->codec_dai_name);
+ if (ret < 0)
+ return ret;
+
+ snd_soc_of_parse_audio_prefix(&priv->snd_card,
+ &priv->codec_conf,
+ dai_link->codec_of_node,
+ PREFIX "prefix");
+ }
+
+ ret = snd_soc_of_parse_tdm_slot(np,
+ &dai_props->tx_slot_mask,
+ &dai_props->rx_slot_mask,
+ &dai_props->slots,
+ &dai_props->slot_width);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_card_canonicalize_dailink(dai_link);
+ if (ret < 0)
+ return ret;
+
+ dai_link->dai_fmt = daifmt;
+ dai_link->dpcm_playback = 1;
+ dai_link->dpcm_capture = 1;
+ dai_link->ops = &asoc_simple_card_ops;
+ dai_link->init = asoc_simple_card_dai_init;
+
+ dev_dbg(dev, "\t%s / %04x / %d\n",
+ dai_link->name,
+ dai_link->dai_fmt,
+ dai_props->sysclk);
+
+ return 0;
+}
+
+static int asoc_simple_card_dai_link_of(struct device_node *node,
+ struct asoc_simple_card_priv *priv)
+{
+ struct device *dev = simple_priv_to_dev(priv);
+ struct device_node *np;
+ unsigned int daifmt = 0;
+ int ret, i;
+ bool is_fe;
+
+ /* find 1st codec */
+ np = of_get_child_by_name(node, PREFIX "codec");
+ if (!np)
+ return -ENODEV;
+
+ ret = asoc_simple_card_parse_daifmt(dev, node, np,
+ PREFIX, &daifmt);
+ if (ret < 0)
+ return ret;
+
+ i = 0;
+ for_each_child_of_node(node, np) {
+ is_fe = false;
+ if (strcmp(np->name, PREFIX "cpu") == 0)
+ is_fe = true;
+
+ ret = asoc_simple_card_parse_links(np, priv, daifmt, i, is_fe);
+ if (ret < 0)
+ return ret;
+ i++;
+ }
+
+ return 0;
+}
+
+static int asoc_simple_card_parse_of(struct device_node *node,
+ struct asoc_simple_card_priv *priv,
+ struct device *dev)
+{
+ struct asoc_simple_dai *props;
+ struct snd_soc_dai_link *links;
+ int ret;
+ int num;
+
+ if (!node)
+ return -EINVAL;
+
+ num = of_get_child_count(node);
+ props = devm_kzalloc(dev, sizeof(*props) * num, GFP_KERNEL);
+ links = devm_kzalloc(dev, sizeof(*links) * num, GFP_KERNEL);
+ if (!props || !links)
+ return -ENOMEM;
+
+ priv->dai_props = props;
+ priv->dai_link = links;
+
+ /* Init snd_soc_card */
+ priv->snd_card.owner = THIS_MODULE;
+ priv->snd_card.dev = dev;
+ priv->snd_card.dai_link = priv->dai_link;
+ priv->snd_card.num_links = num;
+ priv->snd_card.codec_conf = &priv->codec_conf;
+ priv->snd_card.num_configs = 1;
+
+ ret = snd_soc_of_parse_audio_routing(&priv->snd_card, PREFIX "routing");
+ if (ret < 0)
+ return ret;
+
+ /* sampling rate convert */
+ of_property_read_u32(node, PREFIX "convert-rate", &priv->convert_rate);
+
+ /* channels transfer */
+ of_property_read_u32(node, PREFIX "convert-channels", &priv->convert_channels);
+
+ ret = asoc_simple_card_dai_link_of(node, priv);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_parse_card_name(&priv->snd_card, PREFIX);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "New card: %s\n",
+ priv->snd_card.name ? priv->snd_card.name : "");
+ dev_dbg(dev, "convert_rate %d\n", priv->convert_rate);
+ dev_dbg(dev, "convert_channels %d\n", priv->convert_channels);
+
+ return 0;
+}
+
+static int asoc_simple_card_probe(struct platform_device *pdev)
+{
+ struct asoc_simple_card_priv *priv;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ /* Allocate the private data */
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = asoc_simple_card_parse_of(np, priv, dev);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "parse error %d\n", ret);
+ goto err;
+ }
+
+ snd_soc_card_set_drvdata(&priv->snd_card, priv);
+
+ ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card);
+ if (ret >= 0)
+ return ret;
+err:
+ asoc_simple_card_clean_reference(&priv->snd_card);
+
+ return ret;
+}
+
+static int asoc_simple_card_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ return asoc_simple_card_clean_reference(card);
+}
+
+static const struct of_device_id asoc_simple_of_match[] = {
+ { .compatible = "renesas,rsrc-card", },
+ { .compatible = "simple-scu-audio-card", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, asoc_simple_of_match);
+
+static struct platform_driver asoc_simple_card = {
+ .driver = {
+ .name = "simple-scu-audio-card",
+ .of_match_table = asoc_simple_of_match,
+ },
+ .probe = asoc_simple_card_probe,
+ .remove = asoc_simple_card_remove,
+};
+
+module_platform_driver(asoc_simple_card);
+
+MODULE_ALIAS("platform:asoc-simple-scu-card");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ASoC Simple SCU Sound Card");
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/sound/soc/img/pistachio-internal-dac.c b/sound/soc/img/pistachio-internal-dac.c
index 162a0fd68c7b..53e11c6d4e22 100644
--- a/sound/soc/img/pistachio-internal-dac.c
+++ b/sound/soc/img/pistachio-internal-dac.c
@@ -134,12 +134,14 @@ static int pistachio_internal_dac_codec_probe(struct snd_soc_codec *codec)
static const struct snd_soc_codec_driver pistachio_internal_dac_driver = {
.probe = pistachio_internal_dac_codec_probe,
.idle_bias_off = true,
- .controls = pistachio_internal_dac_snd_controls,
- .num_controls = ARRAY_SIZE(pistachio_internal_dac_snd_controls),
- .dapm_widgets = pistachio_internal_dac_widgets,
- .num_dapm_widgets = ARRAY_SIZE(pistachio_internal_dac_widgets),
- .dapm_routes = pistachio_internal_dac_routes,
- .num_dapm_routes = ARRAY_SIZE(pistachio_internal_dac_routes),
+ .component_driver = {
+ .controls = pistachio_internal_dac_snd_controls,
+ .num_controls = ARRAY_SIZE(pistachio_internal_dac_snd_controls),
+ .dapm_widgets = pistachio_internal_dac_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pistachio_internal_dac_widgets),
+ .dapm_routes = pistachio_internal_dac_routes,
+ .num_dapm_routes = ARRAY_SIZE(pistachio_internal_dac_routes),
+ },
};
static int pistachio_internal_dac_probe(struct platform_device *pdev)
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index a20c3dfbcb5d..26eb5a0a5575 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -25,6 +25,7 @@ config SND_SST_IPC_ACPI
tristate
select SND_SST_IPC
select SND_SOC_INTEL_SST
+ select IOSF_MBI
config SND_SOC_INTEL_SST
tristate
@@ -120,6 +121,17 @@ config SND_SOC_INTEL_BYT_MAX98090_MACH
This adds audio driver for Intel Baytrail platform based boards
with the MAX98090 audio codec.
+config SND_SOC_INTEL_BDW_RT5677_MACH
+ tristate "ASoC Audio driver for Intel Broadwell with RT5677 codec"
+ depends on X86_INTEL_LPSS && GPIOLIB && I2C && DW_DMAC
+ depends on DW_DMAC_CORE=y
+ select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_HASWELL
+ select SND_SOC_RT5677
+ help
+ This adds support for Intel Broadwell platform based boards with
+ the RT5677 audio codec.
+
config SND_SOC_INTEL_BROADWELL_MACH
tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 98720a93de8a..0838478c4c3f 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -1,4 +1,4 @@
-/*
+ /*
* sst-atom-controls.c - Intel MID Platform driver DPCM ALSA controls for Mrfld
*
* Copyright (C) 2013-14 Intel Corp
@@ -534,6 +534,7 @@ static const DECLARE_TLV_DB_SCALE(sst_gain_tlv_common, SST_GAIN_MIN_VALUE * 10,
/* Look up table to convert MIXER SW bit regs to SWM inputs */
static const uint swm_mixer_input_ids[SST_SWM_INPUT_COUNT] = {
+ [SST_IP_MODEM] = SST_SWM_IN_MODEM,
[SST_IP_CODEC0] = SST_SWM_IN_CODEC0,
[SST_IP_CODEC1] = SST_SWM_IN_CODEC1,
[SST_IP_LOOP0] = SST_SWM_IN_SPROT_LOOP,
@@ -674,6 +675,7 @@ static int sst_swm_mixer_event(struct snd_soc_dapm_widget *w,
/* SBA mixers - 16 inputs */
#define SST_SBA_DECLARE_MIX_CONTROLS(kctl_name) \
static const struct snd_kcontrol_new kctl_name[] = { \
+ SOC_DAPM_SINGLE("modem_in Switch", SND_SOC_NOPM, SST_IP_MODEM, 1, 0), \
SOC_DAPM_SINGLE("codec_in0 Switch", SND_SOC_NOPM, SST_IP_CODEC0, 1, 0), \
SOC_DAPM_SINGLE("codec_in1 Switch", SND_SOC_NOPM, SST_IP_CODEC1, 1, 0), \
SOC_DAPM_SINGLE("sprot_loop_in Switch", SND_SOC_NOPM, SST_IP_LOOP0, 1, 0), \
@@ -684,6 +686,7 @@ static int sst_swm_mixer_event(struct snd_soc_dapm_widget *w,
}
#define SST_SBA_MIXER_GRAPH_MAP(mix_name) \
+ { mix_name, "modem_in Switch", "modem_in" }, \
{ mix_name, "codec_in0 Switch", "codec_in0" }, \
{ mix_name, "codec_in1 Switch", "codec_in1" }, \
{ mix_name, "sprot_loop_in Switch", "sprot_loop_in" }, \
@@ -713,6 +716,7 @@ SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_media_l2_controls);
SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_voip_controls);
SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec0_controls);
SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec1_controls);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_modem_controls);
/*
* sst_handle_vb_timer - Start/Stop the DSP scheduler
@@ -931,17 +935,26 @@ void sst_fill_ssp_defaults(struct snd_soc_dai *dai)
int send_ssp_cmd(struct snd_soc_dai *dai, const char *id, bool enable)
{
struct sst_data *drv = snd_soc_dai_get_drvdata(dai);
- const struct sst_ssp_config *config;
+ int ssp_id;
dev_info(dai->dev, "Enter: enable=%d port_name=%s\n", enable, id);
+ if (strcmp(id, "ssp0-port") == 0)
+ ssp_id = SSP_MODEM;
+ else if (strcmp(id, "ssp2-port") == 0)
+ ssp_id = SSP_CODEC;
+ else {
+ dev_dbg(dai->dev, "port %s is not supported\n", id);
+ return -1;
+ }
+
SST_FILL_DEFAULT_DESTINATION(drv->ssp_cmd.header.dst);
drv->ssp_cmd.header.command_id = SBA_HW_SET_SSP;
drv->ssp_cmd.header.length = sizeof(struct sst_cmd_sba_hw_set_ssp)
- sizeof(struct sst_dsp_header);
- config = &sst_ssp_configs;
- dev_dbg(dai->dev, "ssp_id: %u\n", config->ssp_id);
+ drv->ssp_cmd.selection = ssp_id;
+ dev_dbg(dai->dev, "ssp_id: %u\n", ssp_id);
if (enable)
drv->ssp_cmd.switch_state = SST_SWITCH_ON;
@@ -1047,8 +1060,10 @@ static int sst_set_media_loop(struct snd_soc_dapm_widget *w,
}
static const struct snd_soc_dapm_widget sst_dapm_widgets[] = {
+ SST_AIF_IN("modem_in", sst_set_be_modules),
SST_AIF_IN("codec_in0", sst_set_be_modules),
SST_AIF_IN("codec_in1", sst_set_be_modules),
+ SST_AIF_OUT("modem_out", sst_set_be_modules),
SST_AIF_OUT("codec_out0", sst_set_be_modules),
SST_AIF_OUT("codec_out1", sst_set_be_modules),
@@ -1103,6 +1118,9 @@ static const struct snd_soc_dapm_widget sst_dapm_widgets[] = {
sst_mix_codec0_controls, sst_swm_mixer_event),
SST_SWM_MIXER("codec_out1 mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_CODEC1,
sst_mix_codec1_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("modem_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_MODEM,
+ sst_mix_modem_controls, sst_swm_mixer_event),
+
};
static const struct snd_soc_dapm_route intercon[] = {
@@ -1148,6 +1166,9 @@ static const struct snd_soc_dapm_route intercon[] = {
SST_SBA_MIXER_GRAPH_MAP("codec_out0 mix 0"),
{"codec_out1", NULL, "codec_out1 mix 0"},
SST_SBA_MIXER_GRAPH_MAP("codec_out1 mix 0"),
+ {"modem_out", NULL, "modem_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("modem_out mix 0"),
+
};
static const char * const slot_names[] = {
@@ -1217,6 +1238,9 @@ static const struct snd_kcontrol_new sst_gain_controls[] = {
SST_GAIN("media_loop2_out", SST_PATH_INDEX_MEDIA_LOOP2_OUT, SST_TASK_SBA, 0, &sst_gains[13]),
SST_GAIN("sprot_loop_out", SST_PATH_INDEX_SPROT_LOOP_OUT, SST_TASK_SBA, 0, &sst_gains[14]),
SST_VOLUME("media0_in", SST_PATH_INDEX_MEDIA0_IN, SST_TASK_MMX, 0, &sst_gains[15]),
+ SST_GAIN("modem_in", SST_PATH_INDEX_MODEM_IN, SST_TASK_SBA, 0, &sst_gains[16]),
+ SST_GAIN("modem_out", SST_PATH_INDEX_MODEM_OUT, SST_TASK_SBA, 0, &sst_gains[17]),
+
};
#define SST_GAIN_NUM_CONTROLS 3
diff --git a/sound/soc/intel/atom/sst-atom-controls.h b/sound/soc/intel/atom/sst-atom-controls.h
index e0113112f668..351d81469685 100644
--- a/sound/soc/intel/atom/sst-atom-controls.h
+++ b/sound/soc/intel/atom/sst-atom-controls.h
@@ -35,6 +35,8 @@ enum {
/* define a bit for each mixer input */
#define SST_MIX_IP(x) (x)
+#define SST_IP_MODEM SST_MIX_IP(0)
+#define SST_IP_BT SST_MIX_IP(1)
#define SST_IP_CODEC0 SST_MIX_IP(2)
#define SST_IP_CODEC1 SST_MIX_IP(3)
#define SST_IP_LOOP0 SST_MIX_IP(4)
@@ -63,6 +65,7 @@ enum {
* Audio DSP Path Ids. Specified by the audio DSP FW
*/
enum sst_path_index {
+ SST_PATH_INDEX_MODEM_OUT = (0x00 << SST_PATH_ID_SHIFT),
SST_PATH_INDEX_CODEC_OUT0 = (0x02 << SST_PATH_ID_SHIFT),
SST_PATH_INDEX_CODEC_OUT1 = (0x03 << SST_PATH_ID_SHIFT),
@@ -80,6 +83,7 @@ enum sst_path_index {
/* Start of input paths */
+ SST_PATH_INDEX_MODEM_IN = (0x80 << SST_PATH_ID_SHIFT),
SST_PATH_INDEX_CODEC_IN0 = (0x82 << SST_PATH_ID_SHIFT),
SST_PATH_INDEX_CODEC_IN1 = (0x83 << SST_PATH_ID_SHIFT),
@@ -105,6 +109,7 @@ enum sst_path_index {
* path IDs
*/
enum sst_swm_inputs {
+ SST_SWM_IN_MODEM = (SST_PATH_INDEX_MODEM_IN | SST_DEFAULT_CELL_NBR),
SST_SWM_IN_CODEC0 = (SST_PATH_INDEX_CODEC_IN0 | SST_DEFAULT_CELL_NBR),
SST_SWM_IN_CODEC1 = (SST_PATH_INDEX_CODEC_IN1 | SST_DEFAULT_CELL_NBR),
SST_SWM_IN_SPROT_LOOP = (SST_PATH_INDEX_SPROT_LOOP_IN | SST_DEFAULT_CELL_NBR),
@@ -124,6 +129,7 @@ enum sst_swm_inputs {
* path IDs
*/
enum sst_swm_outputs {
+ SST_SWM_OUT_MODEM = (SST_PATH_INDEX_MODEM_OUT | SST_DEFAULT_CELL_NBR),
SST_SWM_OUT_CODEC0 = (SST_PATH_INDEX_CODEC_OUT0 | SST_DEFAULT_CELL_NBR),
SST_SWM_OUT_CODEC1 = (SST_PATH_INDEX_CODEC_OUT1 | SST_DEFAULT_CELL_NBR),
SST_SWM_OUT_SPROT_LOOP = (SST_PATH_INDEX_SPROT_LOOP_OUT | SST_DEFAULT_CELL_NBR),
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 52ed434cbca6..25c6d87c818e 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -670,7 +670,7 @@ static snd_pcm_uframes_t sst_platform_pcm_pointer
return str_info->buffer_ptr;
}
-static struct snd_pcm_ops sst_platform_ops = {
+static const struct snd_pcm_ops sst_platform_ops = {
.open = sst_platform_open,
.ioctl = snd_pcm_lib_ioctl,
.trigger = sst_platform_pcm_trigger,
diff --git a/sound/soc/intel/atom/sst/sst.c b/sound/soc/intel/atom/sst/sst.c
index a4b458e77089..9b6e27385dc9 100644
--- a/sound/soc/intel/atom/sst/sst.c
+++ b/sound/soc/intel/atom/sst/sst.c
@@ -190,7 +190,8 @@ int sst_driver_ops(struct intel_sst_drv *sst)
default:
dev_err(sst->dev,
- "SST Driver capablities missing for dev_id: %x", sst->dev_id);
+ "SST Driver capabilities missing for dev_id: %x",
+ sst->dev_id);
return -EINVAL;
};
}
@@ -441,7 +442,7 @@ static int intel_sst_suspend(struct device *dev)
struct stream_info *stream = &ctx->streams[i];
if (stream->status == STREAM_RUNNING) {
- dev_err(dev, "stream %d is running, cant susupend, abort\n", i);
+ dev_err(dev, "stream %d is running, can't suspend, abort\n", i);
return -EBUSY;
}
}
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index 4d3184971227..ba5c0d71720a 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -39,6 +39,8 @@
#include <acpi/platform/aclinux.h>
#include <acpi/actypes.h>
#include <acpi/acpi_bus.h>
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
#include "../sst-mfld-platform.h"
#include "../../common/sst-dsp.h"
#include "../../common/sst-acpi.h"
@@ -113,6 +115,28 @@ static const struct sst_res_info byt_rvp_res_info = {
.acpi_ipc_irq_index = 5,
};
+/* BYTCR has different BIOS from BYT */
+static const struct sst_res_info bytcr_res_info = {
+ .shim_offset = 0x140000,
+ .shim_size = 0x000100,
+ .shim_phy_addr = SST_BYT_SHIM_PHY_ADDR,
+ .ssp0_offset = 0xa0000,
+ .ssp0_size = 0x1000,
+ .dma0_offset = 0x98000,
+ .dma0_size = 0x4000,
+ .dma1_offset = 0x9c000,
+ .dma1_size = 0x4000,
+ .iram_offset = 0x0c0000,
+ .iram_size = 0x14000,
+ .dram_offset = 0x100000,
+ .dram_size = 0x28000,
+ .mbox_offset = 0x144000,
+ .mbox_size = 0x1000,
+ .acpi_lpe_res_index = 0,
+ .acpi_ddr_index = 2,
+ .acpi_ipc_irq_index = 0
+};
+
static struct sst_platform_info byt_rvp_platform_data = {
.probe_data = &byt_fwparse_info,
.ipc_info = &byt_ipc_info,
@@ -142,7 +166,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
rsrc = platform_get_resource(pdev, IORESOURCE_MEM,
ctx->pdata->res_info->acpi_lpe_res_index);
if (!rsrc) {
- dev_err(ctx->dev, "Invalid SHIM base from IFWI");
+ dev_err(ctx->dev, "Invalid SHIM base from IFWI\n");
return -EIO;
}
dev_info(ctx->dev, "LPE base: %#x size:%#x", (unsigned int) rsrc->start,
@@ -154,7 +178,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
ctx->iram = devm_ioremap_nocache(ctx->dev, ctx->iram_base,
ctx->pdata->res_info->iram_size);
if (!ctx->iram) {
- dev_err(ctx->dev, "unable to map IRAM");
+ dev_err(ctx->dev, "unable to map IRAM\n");
return -EIO;
}
@@ -164,7 +188,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
ctx->dram = devm_ioremap_nocache(ctx->dev, ctx->dram_base,
ctx->pdata->res_info->dram_size);
if (!ctx->dram) {
- dev_err(ctx->dev, "unable to map DRAM");
+ dev_err(ctx->dev, "unable to map DRAM\n");
return -EIO;
}
@@ -173,7 +197,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
ctx->shim = devm_ioremap_nocache(ctx->dev, ctx->shim_phy_add,
ctx->pdata->res_info->shim_size);
if (!ctx->shim) {
- dev_err(ctx->dev, "unable to map SHIM");
+ dev_err(ctx->dev, "unable to map SHIM\n");
return -EIO;
}
@@ -186,7 +210,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
ctx->mailbox = devm_ioremap_nocache(ctx->dev, ctx->mailbox_add,
ctx->pdata->res_info->mbox_size);
if (!ctx->mailbox) {
- dev_err(ctx->dev, "unable to map mailbox");
+ dev_err(ctx->dev, "unable to map mailbox\n");
return -EIO;
}
@@ -196,7 +220,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
rsrc = platform_get_resource(pdev, IORESOURCE_MEM,
ctx->pdata->res_info->acpi_ddr_index);
if (!rsrc) {
- dev_err(ctx->dev, "Invalid DDR base from IFWI");
+ dev_err(ctx->dev, "Invalid DDR base from IFWI\n");
return -EIO;
}
ctx->ddr_base = rsrc->start;
@@ -205,7 +229,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
ctx->ddr = devm_ioremap_nocache(ctx->dev, ctx->ddr_base,
resource_size(rsrc));
if (!ctx->ddr) {
- dev_err(ctx->dev, "unable to map DDR");
+ dev_err(ctx->dev, "unable to map DDR\n");
return -EIO;
}
@@ -215,6 +239,46 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
return 0;
}
+
+static int is_byt_cr(struct device *dev, bool *bytcr)
+{
+ int status = 0;
+
+ if (IS_ENABLED(CONFIG_IOSF_MBI)) {
+ static const struct x86_cpu_id cpu_ids[] = {
+ { X86_VENDOR_INTEL, 6, 55 }, /* Valleyview, Bay Trail */
+ {}
+ };
+ u32 bios_status;
+
+ if (!x86_match_cpu(cpu_ids) || !iosf_mbi_available()) {
+ /* bail silently */
+ return status;
+ }
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, /* 0x04 PUNIT */
+ MBI_REG_READ, /* 0x10 */
+ 0x006, /* BIOS_CONFIG */
+ &bios_status);
+
+ if (status) {
+ dev_err(dev, "could not read PUNIT BIOS_CONFIG\n");
+ } else {
+ /* bits 26:27 mirror PMIC options */
+ bios_status = (bios_status >> 26) & 3;
+
+ if ((bios_status == 1) || (bios_status == 3))
+ *bytcr = true;
+ else
+ dev_info(dev, "BYT-CR not detected\n");
+ }
+ } else {
+ dev_info(dev, "IOSF_MBI not enabled, no BYT-CR detection\n");
+ }
+ return status;
+}
+
+
static int sst_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -226,11 +290,12 @@ static int sst_acpi_probe(struct platform_device *pdev)
struct platform_device *plat_dev;
struct sst_platform_info *pdata;
unsigned int dev_id;
+ bool bytcr = false;
id = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!id)
return -ENODEV;
- dev_dbg(dev, "for %s", id->id);
+ dev_dbg(dev, "for %s\n", id->id);
mach = (struct sst_acpi_mach *)id->driver_data;
mach = sst_acpi_find_machine(mach);
@@ -251,6 +316,18 @@ static int sst_acpi_probe(struct platform_device *pdev)
dev_dbg(dev, "ACPI device id: %x\n", dev_id);
+ ret = sst_alloc_drv_context(&ctx, dev, dev_id);
+ if (ret < 0)
+ return ret;
+
+ ret = is_byt_cr(dev, &bytcr);
+ if (!((ret < 0) || (bytcr == false))) {
+ dev_info(dev, "Detected Baytrail-CR platform\n");
+
+ /* override resource info */
+ byt_rvp_platform_data.res_info = &bytcr_res_info;
+ }
+
plat_dev = platform_device_register_data(dev, pdata->platform, -1,
NULL, 0);
if (IS_ERR(plat_dev)) {
@@ -271,10 +348,6 @@ static int sst_acpi_probe(struct platform_device *pdev)
return PTR_ERR(mdev);
}
- ret = sst_alloc_drv_context(&ctx, dev, dev_id);
- if (ret < 0)
- return ret;
-
/* Fill sst platform data */
ctx->pdata = pdata;
strcpy(ctx->firmware_name, mach->fw_filename);
diff --git a/sound/soc/intel/atom/sst/sst_ipc.c b/sound/soc/intel/atom/sst/sst_ipc.c
index 8afa6fe7b0b0..bfc889950bb2 100644
--- a/sound/soc/intel/atom/sst/sst_ipc.c
+++ b/sound/soc/intel/atom/sst/sst_ipc.c
@@ -267,6 +267,9 @@ static void process_fw_async_msg(struct intel_sst_drv *sst_drv_ctx,
"Period elapsed rcvd for pipe id 0x%x\n",
pipe_id);
stream = &sst_drv_ctx->streams[str_id];
+ /* If stream is dropped, skip processing this message*/
+ if (stream->status == STREAM_INIT)
+ break;
if (stream->period_elapsed)
stream->period_elapsed(stream->pcm_substream);
if (stream->compr_cb)
diff --git a/sound/soc/intel/atom/sst/sst_pvt.c b/sound/soc/intel/atom/sst/sst_pvt.c
index adb32fefd693..b1e6b8f34a6a 100644
--- a/sound/soc/intel/atom/sst/sst_pvt.c
+++ b/sound/soc/intel/atom/sst/sst_pvt.c
@@ -279,17 +279,15 @@ int sst_prepare_and_post_msg(struct intel_sst_drv *sst,
if (response) {
ret = sst_wait_timeout(sst, block);
- if (ret < 0) {
+ if (ret < 0)
goto out;
- } else if(block->data) {
- if (!data)
- goto out;
- *data = kzalloc(block->size, GFP_KERNEL);
- if (!(*data)) {
+
+ if (data && block->data) {
+ *data = kmemdup(block->data, block->size, GFP_KERNEL);
+ if (!*data) {
ret = -ENOMEM;
goto out;
- } else
- memcpy(data, (void *) block->data, block->size);
+ }
}
}
out:
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 5bbaa667bec1..c8455b47388b 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -184,15 +184,9 @@ struct sst_byt {
static inline u64 sst_byt_header(int msg_id, int data, bool large, int str_id)
{
- u64 header;
-
- header = IPC_HEADER_MSG_ID(msg_id) |
- IPC_HEADER_STR_ID(str_id) |
- IPC_HEADER_LARGE(large) |
- IPC_HEADER_DATA(data) |
- SST_BYT_IPCX_BUSY;
-
- return header;
+ return IPC_HEADER_MSG_ID(msg_id) | IPC_HEADER_STR_ID(str_id) |
+ IPC_HEADER_LARGE(large) | IPC_HEADER_DATA(data) |
+ SST_BYT_IPCX_BUSY;
}
static inline u16 sst_byt_header_msg_id(u64 header)
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index dac03a06bfd8..5639f10774e6 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -1,6 +1,7 @@
snd-soc-sst-haswell-objs := haswell.o
snd-soc-sst-byt-rt5640-mach-objs := byt-rt5640.o
snd-soc-sst-byt-max98090-mach-objs := byt-max98090.o
+snd-soc-sst-bdw-rt5677-mach-objs := bdw-rt5677.o
snd-soc-sst-broadwell-objs := broadwell.o
snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o
snd-soc-sst-bxt-rt298-objs := bxt_rt298.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH) += snd-soc-sst-bxt-da7219_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_RT298_MACH) += snd-soc-sst-bxt-rt298.o
obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
+obj-$(CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH) += snd-soc-sst-bdw-rt5677-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH) += snd-soc-sst-bytcr-rt5640.o
obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH) += snd-soc-sst-bytcr-rt5651.o
obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH) += snd-soc-sst-cht-bsw-rt5672.o
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
new file mode 100644
index 000000000000..547e6705bf6d
--- /dev/null
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -0,0 +1,347 @@
+/*
+ * ASoC machine driver for Intel Broadwell platforms with RT5677 codec
+ *
+ * Copyright (c) 2014, The Chromium OS Authors. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include <sound/jack.h>
+
+#include "../common/sst-dsp.h"
+#include "../haswell/sst-haswell-ipc.h"
+
+#include "../../codecs/rt5677.h"
+
+struct bdw_rt5677_priv {
+ struct gpio_desc *gpio_hp_en;
+ struct snd_soc_codec *codec;
+};
+
+static int bdw_rt5677_event_hp(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct bdw_rt5677_priv *bdw_rt5677 = snd_soc_card_get_drvdata(card);
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ msleep(70);
+
+ gpiod_set_value_cansleep(bdw_rt5677->gpio_hp_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget bdw_rt5677_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone", bdw_rt5677_event_hp),
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Local DMICs", NULL),
+ SND_SOC_DAPM_MIC("Remote DMICs", NULL),
+};
+
+static const struct snd_soc_dapm_route bdw_rt5677_map[] = {
+ /* Speakers */
+ {"Speaker", NULL, "PDM1L"},
+ {"Speaker", NULL, "PDM1R"},
+
+ /* Headset jack connectors */
+ {"Headphone", NULL, "LOUT1"},
+ {"Headphone", NULL, "LOUT2"},
+ {"IN1P", NULL, "Headset Mic"},
+ {"IN1N", NULL, "Headset Mic"},
+
+ /* Digital MICs
+ * Local DMICs: the two DMICs on the mainboard
+ * Remote DMICs: the two DMICs on the camera module
+ */
+ {"DMIC L1", NULL, "Remote DMICs"},
+ {"DMIC R1", NULL, "Remote DMICs"},
+ {"DMIC L2", NULL, "Local DMICs"},
+ {"DMIC R2", NULL, "Local DMICs"},
+
+ /* CODEC BE connections */
+ {"SSP0 CODEC IN", NULL, "AIF1 Capture"},
+ {"AIF1 Playback", NULL, "SSP0 CODEC OUT"},
+};
+
+static const struct snd_kcontrol_new bdw_rt5677_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Local DMICs"),
+ SOC_DAPM_PIN_SWITCH("Remote DMICs"),
+};
+
+
+static struct snd_soc_jack headphone_jack;
+static struct snd_soc_jack mic_jack;
+
+static struct snd_soc_jack_pin headphone_jack_pin = {
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+};
+
+static struct snd_soc_jack_pin mic_jack_pin = {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+};
+
+static struct snd_soc_jack_gpio headphone_jack_gpio = {
+ .name = "plug-det",
+ .report = SND_JACK_HEADPHONE,
+ .debounce_time = 200,
+};
+
+static struct snd_soc_jack_gpio mic_jack_gpio = {
+ .name = "mic-present",
+ .report = SND_JACK_MICROPHONE,
+ .debounce_time = 200,
+ .invert = 1,
+};
+
+static int broadwell_ssp0_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ /* The ADSP will covert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP0 to 16 bit */
+ snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+ SNDRV_PCM_HW_PARAM_FIRST_MASK],
+ SNDRV_PCM_FORMAT_S16_LE);
+ return 0;
+}
+
+static int bdw_rt5677_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5677_SCLK_S_MCLK, 24576000,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec sysclk configuration\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct snd_soc_ops bdw_rt5677_ops = {
+ .hw_params = bdw_rt5677_hw_params,
+};
+
+static int bdw_rt5677_rtd_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct sst_pdata *pdata = dev_get_platdata(rtd->platform->dev);
+ struct sst_hsw *broadwell = pdata->dsp;
+ int ret;
+
+ /* Set ADSP SSP port settings */
+ ret = sst_hsw_device_set_config(broadwell, SST_HSW_DEVICE_SSP_0,
+ SST_HSW_DEVICE_MCLK_FREQ_24_MHZ,
+ SST_HSW_DEVICE_CLOCK_MASTER, 9);
+ if (ret < 0) {
+ dev_err(rtd->dev, "error: failed to set device config\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bdw_rt5677_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct bdw_rt5677_priv *bdw_rt5677 =
+ snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+ /* Enable codec ASRC function for Stereo DAC/Stereo1 ADC/DMIC/I2S1.
+ * The ASRC clock source is clk_i2s1_asrc.
+ */
+ rt5677_sel_asrc_clk_src(codec, RT5677_DA_STEREO_FILTER |
+ RT5677_AD_STEREO1_FILTER | RT5677_I2S1_SOURCE,
+ RT5677_CLK_SEL_I2S1_ASRC);
+
+ /* Request rt5677 GPIO for headphone amp control */
+ bdw_rt5677->gpio_hp_en = devm_gpiod_get_index(codec->dev,
+ "headphone-enable", 0, 0);
+ if (IS_ERR(bdw_rt5677->gpio_hp_en)) {
+ dev_err(codec->dev, "Can't find HP_AMP_SHDN_L gpio\n");
+ return PTR_ERR(bdw_rt5677->gpio_hp_en);
+ }
+ gpiod_direction_output(bdw_rt5677->gpio_hp_en, 0);
+
+ /* Create and initialize headphone jack */
+ if (!snd_soc_card_jack_new(rtd->card, "Headphone Jack",
+ SND_JACK_HEADPHONE, &headphone_jack,
+ &headphone_jack_pin, 1)) {
+ headphone_jack_gpio.gpiod_dev = codec->dev;
+ if (snd_soc_jack_add_gpios(&headphone_jack, 1,
+ &headphone_jack_gpio))
+ dev_err(codec->dev, "Can't add headphone jack gpio\n");
+ } else {
+ dev_err(codec->dev, "Can't create headphone jack\n");
+ }
+
+ /* Create and initialize mic jack */
+ if (!snd_soc_card_jack_new(rtd->card, "Mic Jack",
+ SND_JACK_MICROPHONE, &mic_jack,
+ &mic_jack_pin, 1)) {
+ mic_jack_gpio.gpiod_dev = codec->dev;
+ if (snd_soc_jack_add_gpios(&mic_jack, 1, &mic_jack_gpio))
+ dev_err(codec->dev, "Can't add mic jack gpio\n");
+ } else {
+ dev_err(codec->dev, "Can't create mic jack\n");
+ }
+ bdw_rt5677->codec = codec;
+
+ snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
+ return 0;
+}
+
+/* broadwell digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link bdw_rt5677_dais[] = {
+ /* Front End DAI links */
+ {
+ .name = "System PCM",
+ .stream_name = "System Playback/Capture",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "haswell-pcm-audio",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .init = bdw_rt5677_rtd_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST
+ },
+ .dpcm_capture = 1,
+ .dpcm_playback = 1,
+ },
+
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "Codec",
+ .id = 0,
+ .cpu_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "snd-soc-dummy",
+ .no_pcm = 1,
+ .codec_name = "i2c-RT5677CE:00",
+ .codec_dai_name = "rt5677-aif1",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = broadwell_ssp0_fixup,
+ .ops = &bdw_rt5677_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .init = bdw_rt5677_init,
+ },
+};
+
+static int bdw_rt5677_suspend_pre(struct snd_soc_card *card)
+{
+ struct bdw_rt5677_priv *bdw_rt5677 = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dapm_context *dapm;
+
+ if (bdw_rt5677->codec) {
+ dapm = snd_soc_codec_get_dapm(bdw_rt5677->codec);
+ snd_soc_dapm_disable_pin(dapm, "MICBIAS1");
+ }
+ return 0;
+}
+
+static int bdw_rt5677_resume_post(struct snd_soc_card *card)
+{
+ struct bdw_rt5677_priv *bdw_rt5677 = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dapm_context *dapm;
+
+ if (bdw_rt5677->codec) {
+ dapm = snd_soc_codec_get_dapm(bdw_rt5677->codec);
+ snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
+ }
+ return 0;
+}
+
+/* ASoC machine driver for Broadwell DSP + RT5677 */
+static struct snd_soc_card bdw_rt5677_card = {
+ .name = "bdw-rt5677",
+ .owner = THIS_MODULE,
+ .dai_link = bdw_rt5677_dais,
+ .num_links = ARRAY_SIZE(bdw_rt5677_dais),
+ .dapm_widgets = bdw_rt5677_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(bdw_rt5677_widgets),
+ .dapm_routes = bdw_rt5677_map,
+ .num_dapm_routes = ARRAY_SIZE(bdw_rt5677_map),
+ .controls = bdw_rt5677_controls,
+ .num_controls = ARRAY_SIZE(bdw_rt5677_controls),
+ .fully_routed = true,
+ .suspend_pre = bdw_rt5677_suspend_pre,
+ .resume_post = bdw_rt5677_resume_post,
+};
+
+static int bdw_rt5677_probe(struct platform_device *pdev)
+{
+ struct bdw_rt5677_priv *bdw_rt5677;
+
+ bdw_rt5677_card.dev = &pdev->dev;
+
+ /* Allocate driver private struct */
+ bdw_rt5677 = devm_kzalloc(&pdev->dev, sizeof(struct bdw_rt5677_priv),
+ GFP_KERNEL);
+ if (!bdw_rt5677) {
+ dev_err(&pdev->dev, "Can't allocate bdw_rt5677\n");
+ return -ENOMEM;
+ }
+
+ snd_soc_card_set_drvdata(&bdw_rt5677_card, bdw_rt5677);
+
+ return devm_snd_soc_register_card(&pdev->dev, &bdw_rt5677_card);
+}
+
+static struct platform_driver bdw_rt5677_audio = {
+ .probe = bdw_rt5677_probe,
+ .driver = {
+ .name = "bdw-rt5677",
+ },
+};
+
+module_platform_driver(bdw_rt5677_audio)
+
+/* Module information */
+MODULE_AUTHOR("Ben Zhang");
+MODULE_DESCRIPTION("Intel Broadwell RT5677 machine driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bdw-rt5677");
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 3774b117d365..6532b8f0ab2f 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -37,6 +37,7 @@ enum {
BXT_DPCM_AUDIO_PB = 0,
BXT_DPCM_AUDIO_CP,
BXT_DPCM_AUDIO_REF_CP,
+ BXT_DPCM_AUDIO_DMIC_CP,
BXT_DPCM_AUDIO_HDMI1_PB,
BXT_DPCM_AUDIO_HDMI2_PB,
BXT_DPCM_AUDIO_HDMI3_PB,
@@ -252,10 +253,56 @@ static struct snd_soc_ops broxton_da7219_ops = {
.hw_free = broxton_da7219_hw_free,
};
+static int broxton_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ channels->min = channels->max = DUAL_CHANNEL;
+
+ return 0;
+}
+
+static int broxton_dmic_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+}
+
+static const struct snd_soc_ops broxton_dmic_ops = {
+ .startup = broxton_dmic_startup,
+};
+
+static const unsigned int rates_16000[] = {
+ 16000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_16000 = {
+ .count = ARRAY_SIZE(rates_16000),
+ .list = rates_16000,
+};
+
+static int broxton_refcap_startup(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_16000);
+};
+
+static struct snd_soc_ops broxton_refcap_ops = {
+ .startup = broxton_refcap_startup,
+};
+
/* broxton digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link broxton_dais[] = {
/* Front End DAI links */
- [BXT_DPCM_AUDIO_PB]
+ [BXT_DPCM_AUDIO_PB] =
{
.name = "Bxt Audio Port",
.stream_name = "Audio",
@@ -271,7 +318,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.dpcm_playback = 1,
.ops = &broxton_da7219_fe_ops,
},
- [BXT_DPCM_AUDIO_CP]
+ [BXT_DPCM_AUDIO_CP] =
{
.name = "Bxt Audio Capture Port",
.stream_name = "Audio Record",
@@ -286,7 +333,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.dpcm_capture = 1,
.ops = &broxton_da7219_fe_ops,
},
- [BXT_DPCM_AUDIO_REF_CP]
+ [BXT_DPCM_AUDIO_REF_CP] =
{
.name = "Bxt Audio Reference cap",
.stream_name = "Refcap",
@@ -299,8 +346,23 @@ static struct snd_soc_dai_link broxton_dais[] = {
.ignore_suspend = 1,
.nonatomic = 1,
.dynamic = 1,
+ .ops = &broxton_refcap_ops,
+ },
+ [BXT_DPCM_AUDIO_DMIC_CP]
+ {
+ .name = "Bxt Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &broxton_dmic_ops,
},
- [BXT_DPCM_AUDIO_HDMI1_PB]
+ [BXT_DPCM_AUDIO_HDMI1_PB] =
{
.name = "Bxt HDMI Port1",
.stream_name = "Hdmi1",
@@ -313,7 +375,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
- [BXT_DPCM_AUDIO_HDMI2_PB]
+ [BXT_DPCM_AUDIO_HDMI2_PB] =
{
.name = "Bxt HDMI Port2",
.stream_name = "Hdmi2",
@@ -326,7 +388,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
- [BXT_DPCM_AUDIO_HDMI3_PB]
+ [BXT_DPCM_AUDIO_HDMI3_PB] =
{
.name = "Bxt HDMI Port3",
.stream_name = "Hdmi3",
@@ -382,6 +444,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.codec_dai_name = "dmic-hifi",
.platform_name = "0000:00:0e.0",
.ignore_suspend = 1,
+ .be_hw_params_fixup = broxton_dmic_fixup,
.dpcm_capture = 1,
.no_pcm = 1,
},
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index 253d7bfbf511..d610bdca1608 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -271,7 +271,7 @@ static const struct snd_soc_ops broxton_rt286_fe_ops = {
/* broxton digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link broxton_rt298_dais[] = {
/* Front End DAI links */
- [BXT_DPCM_AUDIO_PB]
+ [BXT_DPCM_AUDIO_PB] =
{
.name = "Bxt Audio Port",
.stream_name = "Audio",
@@ -286,7 +286,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.dpcm_playback = 1,
.ops = &broxton_rt286_fe_ops,
},
- [BXT_DPCM_AUDIO_CP]
+ [BXT_DPCM_AUDIO_CP] =
{
.name = "Bxt Audio Capture Port",
.stream_name = "Audio Record",
@@ -300,7 +300,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.dpcm_capture = 1,
.ops = &broxton_rt286_fe_ops,
},
- [BXT_DPCM_AUDIO_REF_CP]
+ [BXT_DPCM_AUDIO_REF_CP] =
{
.name = "Bxt Audio Reference cap",
.stream_name = "refcap",
@@ -313,7 +313,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
- [BXT_DPCM_AUDIO_DMIC_CP]
+ [BXT_DPCM_AUDIO_DMIC_CP] =
{
.name = "Bxt Audio DMIC cap",
.stream_name = "dmiccap",
@@ -327,7 +327,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.dynamic = 1,
.ops = &broxton_dmic_ops,
},
- [BXT_DPCM_AUDIO_HDMI1_PB]
+ [BXT_DPCM_AUDIO_HDMI1_PB] =
{
.name = "Bxt HDMI Port1",
.stream_name = "Hdmi1",
@@ -340,7 +340,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
- [BXT_DPCM_AUDIO_HDMI2_PB]
+ [BXT_DPCM_AUDIO_HDMI2_PB] =
{
.name = "Bxt HDMI Port2",
.stream_name = "Hdmi2",
@@ -353,7 +353,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
- [BXT_DPCM_AUDIO_HDMI3_PB]
+ [BXT_DPCM_AUDIO_HDMI3_PB] =
{
.name = "Bxt HDMI Port3",
.stream_name = "Hdmi3",
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 88efb62439ba..bff77a1f27fc 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -24,6 +24,9 @@
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/slab.h>
+#include <asm/cpu_device_id.h>
+#include <asm/platform_sst_audio.h>
+#include <linux/clk.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -31,42 +34,153 @@
#include "../../codecs/rt5640.h"
#include "../atom/sst-atom-controls.h"
#include "../common/sst-acpi.h"
+#include "../common/sst-dsp.h"
enum {
BYT_RT5640_DMIC1_MAP,
BYT_RT5640_DMIC2_MAP,
BYT_RT5640_IN1_MAP,
+ BYT_RT5640_IN3_MAP,
};
#define BYT_RT5640_MAP(quirk) ((quirk) & 0xff)
#define BYT_RT5640_DMIC_EN BIT(16)
+#define BYT_RT5640_MONO_SPEAKER BIT(17)
+#define BYT_RT5640_DIFF_MIC BIT(18) /* defaut is single-ended */
+#define BYT_RT5640_SSP2_AIF2 BIT(19) /* default is using AIF1 */
+#define BYT_RT5640_SSP0_AIF1 BIT(20)
+#define BYT_RT5640_SSP0_AIF2 BIT(21)
+#define BYT_RT5640_MCLK_EN BIT(22)
+#define BYT_RT5640_MCLK_25MHZ BIT(23)
+
+struct byt_rt5640_private {
+ struct clk *mclk;
+};
static unsigned long byt_rt5640_quirk = BYT_RT5640_DMIC1_MAP |
- BYT_RT5640_DMIC_EN;
+ BYT_RT5640_DMIC_EN |
+ BYT_RT5640_MCLK_EN;
+
+static void log_quirks(struct device *dev)
+{
+ if (BYT_RT5640_MAP(byt_rt5640_quirk) == BYT_RT5640_DMIC1_MAP)
+ dev_info(dev, "quirk DMIC1_MAP enabled");
+ if (BYT_RT5640_MAP(byt_rt5640_quirk) == BYT_RT5640_DMIC2_MAP)
+ dev_info(dev, "quirk DMIC2_MAP enabled");
+ if (BYT_RT5640_MAP(byt_rt5640_quirk) == BYT_RT5640_IN1_MAP)
+ dev_info(dev, "quirk IN1_MAP enabled");
+ if (BYT_RT5640_MAP(byt_rt5640_quirk) == BYT_RT5640_IN3_MAP)
+ dev_info(dev, "quirk IN3_MAP enabled");
+ if (byt_rt5640_quirk & BYT_RT5640_DMIC_EN)
+ dev_info(dev, "quirk DMIC enabled");
+ if (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER)
+ dev_info(dev, "quirk MONO_SPEAKER enabled");
+ if (byt_rt5640_quirk & BYT_RT5640_DIFF_MIC)
+ dev_info(dev, "quirk DIFF_MIC enabled");
+ if (byt_rt5640_quirk & BYT_RT5640_SSP2_AIF2)
+ dev_info(dev, "quirk SSP2_AIF2 enabled");
+ if (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF1)
+ dev_info(dev, "quirk SSP0_AIF1 enabled");
+ if (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF2)
+ dev_info(dev, "quirk SSP0_AIF2 enabled");
+ if (byt_rt5640_quirk & BYT_RT5640_MCLK_EN)
+ dev_info(dev, "quirk MCLK_EN enabled");
+ if (byt_rt5640_quirk & BYT_RT5640_MCLK_25MHZ)
+ dev_info(dev, "quirk MCLK_25MHZ enabled");
+}
+
+
+#define BYT_CODEC_DAI1 "rt5640-aif1"
+#define BYT_CODEC_DAI2 "rt5640-aif2"
+
+static inline struct snd_soc_dai *byt_get_codec_dai(struct snd_soc_card *card)
+{
+ struct snd_soc_pcm_runtime *rtd;
+
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ if (!strncmp(rtd->codec_dai->name, BYT_CODEC_DAI1,
+ strlen(BYT_CODEC_DAI1)))
+ return rtd->codec_dai;
+ if (!strncmp(rtd->codec_dai->name, BYT_CODEC_DAI2,
+ strlen(BYT_CODEC_DAI2)))
+ return rtd->codec_dai;
+
+ }
+ return NULL;
+}
+
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ struct byt_rt5640_private *priv = snd_soc_card_get_drvdata(card);
+ int ret;
+
+ codec_dai = byt_get_codec_dai(card);
+ if (!codec_dai) {
+ dev_err(card->dev,
+ "Codec dai not found; Unable to set platform clock\n");
+ return -EIO;
+ }
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk) {
+ ret = clk_prepare_enable(priv->mclk);
+ if (ret < 0) {
+ dev_err(card->dev,
+ "could not configure MCLK state");
+ return ret;
+ }
+ }
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_PLL1,
+ 48000 * 512,
+ SND_SOC_CLOCK_IN);
+ } else {
+ /*
+ * Set codec clock source to internal clock before
+ * turning off the platform clock. Codec needs clock
+ * for Jack detection and button press
+ */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_RCCLK,
+ 0,
+ SND_SOC_CLOCK_IN);
+ if (!ret) {
+ if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk)
+ clk_disable_unprepare(priv->mclk);
+ }
+ }
+
+ if (ret < 0) {
+ dev_err(card->dev, "can't set codec sysclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
static const struct snd_soc_dapm_widget byt_rt5640_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Internal Mic", NULL),
SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
};
static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
- {"AIF1 Playback", NULL, "ssp2 Tx"},
- {"ssp2 Tx", NULL, "codec_out0"},
- {"ssp2 Tx", NULL, "codec_out1"},
- {"codec_in0", NULL, "ssp2 Rx"},
- {"codec_in1", NULL, "ssp2 Rx"},
- {"ssp2 Rx", NULL, "AIF1 Capture"},
+ {"Headphone", NULL, "Platform Clock"},
+ {"Headset Mic", NULL, "Platform Clock"},
+ {"Internal Mic", NULL, "Platform Clock"},
+ {"Speaker", NULL, "Platform Clock"},
{"Headset Mic", NULL, "MICBIAS1"},
{"IN2P", NULL, "Headset Mic"},
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
- {"Speaker", NULL, "SPOLP"},
- {"Speaker", NULL, "SPOLN"},
- {"Speaker", NULL, "SPORP"},
- {"Speaker", NULL, "SPORN"},
};
static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic1_map[] = {
@@ -82,6 +196,59 @@ static const struct snd_soc_dapm_route byt_rt5640_intmic_in1_map[] = {
{"IN1P", NULL, "Internal Mic"},
};
+static const struct snd_soc_dapm_route byt_rt5640_intmic_in3_map[] = {
+ {"Internal Mic", NULL, "MICBIAS1"},
+ {"IN3P", NULL, "Internal Mic"},
+};
+
+static const struct snd_soc_dapm_route byt_rt5640_ssp2_aif1_map[] = {
+ {"ssp2 Tx", NULL, "codec_out0"},
+ {"ssp2 Tx", NULL, "codec_out1"},
+ {"codec_in0", NULL, "ssp2 Rx"},
+ {"codec_in1", NULL, "ssp2 Rx"},
+
+ {"AIF1 Playback", NULL, "ssp2 Tx"},
+ {"ssp2 Rx", NULL, "AIF1 Capture"},
+};
+
+static const struct snd_soc_dapm_route byt_rt5640_ssp2_aif2_map[] = {
+ {"ssp2 Tx", NULL, "codec_out0"},
+ {"ssp2 Tx", NULL, "codec_out1"},
+ {"codec_in0", NULL, "ssp2 Rx"},
+ {"codec_in1", NULL, "ssp2 Rx"},
+
+ {"AIF2 Playback", NULL, "ssp2 Tx"},
+ {"ssp2 Rx", NULL, "AIF2 Capture"},
+};
+
+static const struct snd_soc_dapm_route byt_rt5640_ssp0_aif1_map[] = {
+ {"ssp0 Tx", NULL, "modem_out"},
+ {"modem_in", NULL, "ssp0 Rx"},
+
+ {"AIF1 Playback", NULL, "ssp0 Tx"},
+ {"ssp0 Rx", NULL, "AIF1 Capture"},
+};
+
+static const struct snd_soc_dapm_route byt_rt5640_ssp0_aif2_map[] = {
+ {"ssp0 Tx", NULL, "modem_out"},
+ {"modem_in", NULL, "ssp0 Rx"},
+
+ {"AIF2 Playback", NULL, "ssp0 Tx"},
+ {"ssp0 Rx", NULL, "AIF2 Capture"},
+};
+
+static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = {
+ {"Speaker", NULL, "SPOLP"},
+ {"Speaker", NULL, "SPOLN"},
+ {"Speaker", NULL, "SPORP"},
+ {"Speaker", NULL, "SPORN"},
+};
+
+static const struct snd_soc_dapm_route byt_rt5640_mono_spk_map[] = {
+ {"Speaker", NULL, "SPOLP"},
+ {"Speaker", NULL, "SPOLN"},
+};
+
static const struct snd_kcontrol_new byt_rt5640_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -96,19 +263,46 @@ static int byt_rt5640_aif1_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret;
- snd_soc_dai_set_bclk_ratio(codec_dai, 50);
-
ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_PLL1,
params_rate(params) * 512,
SND_SOC_CLOCK_IN);
+
if (ret < 0) {
dev_err(rtd->dev, "can't set codec clock %d\n", ret);
return ret;
}
- ret = snd_soc_dai_set_pll(codec_dai, 0, RT5640_PLL1_S_BCLK1,
- params_rate(params) * 50,
- params_rate(params) * 512);
+ if (!(byt_rt5640_quirk & BYT_RT5640_MCLK_EN)) {
+ /* use bitclock as PLL input */
+ if ((byt_rt5640_quirk & BYT_RT5640_SSP0_AIF1) ||
+ (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF2)) {
+
+ /* 2x16 bit slots on SSP0 */
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ RT5640_PLL1_S_BCLK1,
+ params_rate(params) * 32,
+ params_rate(params) * 512);
+ } else {
+ /* 2x15 bit slots on SSP2 */
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ RT5640_PLL1_S_BCLK1,
+ params_rate(params) * 50,
+ params_rate(params) * 512);
+ }
+ } else {
+ if (byt_rt5640_quirk & BYT_RT5640_MCLK_25MHZ) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ RT5640_PLL1_S_MCLK,
+ 25000000,
+ params_rate(params) * 512);
+ } else {
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ RT5640_PLL1_S_MCLK,
+ 19200000,
+ params_rate(params) * 512);
+ }
+ }
+
if (ret < 0) {
dev_err(rtd->dev, "can't set codec pll: %d\n", ret);
return ret;
@@ -127,27 +321,73 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
{
.callback = byt_rt5640_quirk_cb,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ .driver_data = (unsigned long *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
+ .callback = byt_rt5640_quirk_cb,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
},
- .driver_data = (unsigned long *)BYT_RT5640_IN1_MAP,
+ .driver_data = (unsigned long *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF2 |
+ BYT_RT5640_MCLK_EN
+ ),
},
{
.callback = byt_rt5640_quirk_cb,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "DellInc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Venue 8 Pro 5830"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "DellInc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Venue 8 Pro 5830"),
},
.driver_data = (unsigned long *)(BYT_RT5640_DMIC2_MAP |
+ BYT_RT5640_DMIC_EN |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
+ .callback = byt_rt5640_quirk_cb,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP ElitePad 1000 G2"),
+ },
+ .driver_data = (unsigned long *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
+ .callback = byt_rt5640_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Max B3 PLATFORM"),
+ },
+ .driver_data = (unsigned long *)(BYT_RT5640_DMIC1_MAP |
BYT_RT5640_DMIC_EN),
},
{
.callback = byt_rt5640_quirk_cb,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ElitePad 1000 G2"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
},
- .driver_data = (unsigned long *)BYT_RT5640_IN1_MAP,
+ .driver_data = (unsigned long *)(BYT_RT5640_IN3_MAP |
+ BYT_RT5640_MCLK_EN |
+ BYT_RT5640_SSP0_AIF1),
+ },
+ {
+ .callback = byt_rt5640_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ },
+ .driver_data = (unsigned long *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_MCLK_EN |
+ BYT_RT5640_SSP0_AIF1),
+
},
{}
};
@@ -158,13 +398,18 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
struct snd_soc_codec *codec = runtime->codec;
struct snd_soc_card *card = runtime->card;
const struct snd_soc_dapm_route *custom_map;
+ struct byt_rt5640_private *priv = snd_soc_card_get_drvdata(card);
int num_routes;
card->dapm.idle_bias_off = true;
rt5640_sel_asrc_clk_src(codec,
RT5640_DA_STEREO_FILTER |
- RT5640_AD_STEREO_FILTER,
+ RT5640_DA_MONO_L_FILTER |
+ RT5640_DA_MONO_R_FILTER |
+ RT5640_AD_STEREO_FILTER |
+ RT5640_AD_MONO_L_FILTER |
+ RT5640_AD_MONO_R_FILTER,
RT5640_CLK_SEL_ASRC);
ret = snd_soc_add_card_controls(card, byt_rt5640_controls,
@@ -179,6 +424,10 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
custom_map = byt_rt5640_intmic_in1_map;
num_routes = ARRAY_SIZE(byt_rt5640_intmic_in1_map);
break;
+ case BYT_RT5640_IN3_MAP:
+ custom_map = byt_rt5640_intmic_in3_map;
+ num_routes = ARRAY_SIZE(byt_rt5640_intmic_in3_map);
+ break;
case BYT_RT5640_DMIC2_MAP:
custom_map = byt_rt5640_intmic_dmic2_map;
num_routes = ARRAY_SIZE(byt_rt5640_intmic_dmic2_map);
@@ -192,6 +441,43 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
if (ret)
return ret;
+ if (byt_rt5640_quirk & BYT_RT5640_SSP2_AIF2) {
+ ret = snd_soc_dapm_add_routes(&card->dapm,
+ byt_rt5640_ssp2_aif2_map,
+ ARRAY_SIZE(byt_rt5640_ssp2_aif2_map));
+ } else if (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF1) {
+ ret = snd_soc_dapm_add_routes(&card->dapm,
+ byt_rt5640_ssp0_aif1_map,
+ ARRAY_SIZE(byt_rt5640_ssp0_aif1_map));
+ } else if (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF2) {
+ ret = snd_soc_dapm_add_routes(&card->dapm,
+ byt_rt5640_ssp0_aif2_map,
+ ARRAY_SIZE(byt_rt5640_ssp0_aif2_map));
+ } else {
+ ret = snd_soc_dapm_add_routes(&card->dapm,
+ byt_rt5640_ssp2_aif1_map,
+ ARRAY_SIZE(byt_rt5640_ssp2_aif1_map));
+ }
+ if (ret)
+ return ret;
+
+ if (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER) {
+ ret = snd_soc_dapm_add_routes(&card->dapm,
+ byt_rt5640_mono_spk_map,
+ ARRAY_SIZE(byt_rt5640_mono_spk_map));
+ } else {
+ ret = snd_soc_dapm_add_routes(&card->dapm,
+ byt_rt5640_stereo_spk_map,
+ ARRAY_SIZE(byt_rt5640_stereo_spk_map));
+ }
+ if (ret)
+ return ret;
+
+ if (byt_rt5640_quirk & BYT_RT5640_DIFF_MIC) {
+ snd_soc_update_bits(codec, RT5640_IN1_IN2, RT5640_IN_DF1,
+ RT5640_IN_DF1);
+ }
+
if (byt_rt5640_quirk & BYT_RT5640_DMIC_EN) {
ret = rt5640_dmic_enable(codec, 0, 0);
if (ret)
@@ -201,6 +487,30 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
snd_soc_dapm_ignore_suspend(&card->dapm, "Headphone");
snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
+ if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk) {
+ /*
+ * The firmware might enable the clock at
+ * boot (this information may or may not
+ * be reflected in the enable clock register).
+ * To change the rate we must disable the clock
+ * first to cover these cases. Due to common
+ * clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled,
+ * we need to enable the clock first.
+ */
+ ret = clk_prepare_enable(priv->mclk);
+ if (!ret)
+ clk_disable_unprepare(priv->mclk);
+
+ if (byt_rt5640_quirk & BYT_RT5640_MCLK_25MHZ)
+ ret = clk_set_rate(priv->mclk, 25000000);
+ else
+ ret = clk_set_rate(priv->mclk, 19200000);
+
+ if (ret)
+ dev_err(card->dev, "unable to set MCLK rate\n");
+ }
+
return ret;
}
@@ -221,34 +531,63 @@ static int byt_rt5640_codec_fixup(struct snd_soc_pcm_runtime *rtd,
SNDRV_PCM_HW_PARAM_CHANNELS);
int ret;
- /* The DSP will covert the FE rate to 48k, stereo, 24bits */
+ /* The DSP will covert the FE rate to 48k, stereo */
rate->min = rate->max = 48000;
channels->min = channels->max = 2;
- /* set SSP2 to 24-bit */
- params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
+ if ((byt_rt5640_quirk & BYT_RT5640_SSP0_AIF1) ||
+ (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF2)) {
+
+ /* set SSP0 to 16-bit */
+ params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
+
+ /*
+ * Default mode for SSP configuration is TDM 4 slot, override config
+ * with explicit setting to I2S 2ch 16-bit. The word length is set with
+ * dai_set_tdm_slot() since there is no other API exposed
+ */
+ ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_IF |
+ SND_SOC_DAIFMT_CBS_CFS
+ );
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
+ return ret;
+ }
- /*
- * Default mode for SSP configuration is TDM 4 slot, override config
- * with explicit setting to I2S 2ch 24-bit. The word length is set with
- * dai_set_tdm_slot() since there is no other API exposed
- */
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
- SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_IF |
- SND_SOC_DAIFMT_CBS_CFS
- );
- if (ret < 0) {
- dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
- return ret;
- }
+ ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 16);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
+ return ret;
+ }
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 24);
- if (ret < 0) {
- dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
- return ret;
- }
+ } else {
+
+ /* set SSP2 to 24-bit */
+ params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
+
+ /*
+ * Default mode for SSP configuration is TDM 4 slot, override config
+ * with explicit setting to I2S 2ch 24-bit. The word length is set with
+ * dai_set_tdm_slot() since there is no other API exposed
+ */
+ ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_IF |
+ SND_SOC_DAIFMT_CBS_CFS
+ );
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
+ return ret;
+ }
+ ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 24);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
+ return ret;
+ }
+ }
return 0;
}
@@ -305,10 +644,10 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
{
.name = "SSP2-Codec",
.id = 1,
- .cpu_dai_name = "ssp2-port",
+ .cpu_dai_name = "ssp2-port", /* overwritten for ssp0 routing */
.platform_name = "sst-mfld-platform",
.no_pcm = 1,
- .codec_dai_name = "rt5640-aif1",
+ .codec_dai_name = "rt5640-aif1", /* changed w/ quirk */
.codec_name = "i2c-10EC5640:00", /* overwritten with HID */
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBS_CFS,
@@ -335,6 +674,21 @@ static struct snd_soc_card byt_rt5640_card = {
};
static char byt_rt5640_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char byt_rt5640_codec_aif_name[12]; /* = "rt5640-aif[1|2]" */
+static char byt_rt5640_cpu_dai_name[10]; /* = "ssp[0|2]-port" */
+
+static bool is_valleyview(void)
+{
+ static const struct x86_cpu_id cpu_ids[] = {
+ { X86_VENDOR_INTEL, 6, 55 }, /* Valleyview, Bay Trail */
+ {}
+ };
+
+ if (!x86_match_cpu(cpu_ids))
+ return false;
+ return true;
+}
+
static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
{
@@ -343,10 +697,16 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
const char *i2c_name = NULL;
int i;
int dai_index;
+ struct byt_rt5640_private *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
+ if (!priv)
+ return -ENOMEM;
/* register the soc card */
byt_rt5640_card.dev = &pdev->dev;
mach = byt_rt5640_card.dev->platform_data;
+ snd_soc_card_set_drvdata(&byt_rt5640_card, priv);
/* fix index of codec dai */
dai_index = MERR_DPCM_COMPR + 1;
@@ -366,8 +726,57 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
byt_rt5640_dais[dai_index].codec_name = byt_rt5640_codec_name;
}
+ /*
+ * swap SSP0 if bytcr is detected
+ * (will be overridden if DMI quirk is detected)
+ */
+ if (is_valleyview()) {
+ struct sst_platform_info *p_info = mach->pdata;
+ const struct sst_res_info *res_info = p_info->res_info;
+
+ /* TODO: use CHAN package info from BIOS to detect AIF1/AIF2 */
+ if (res_info->acpi_ipc_irq_index == 0) {
+ byt_rt5640_quirk |= BYT_RT5640_SSP0_AIF2;
+ }
+ }
+
/* check quirks before creating card */
dmi_check_system(byt_rt5640_quirk_table);
+ log_quirks(&pdev->dev);
+
+ if ((byt_rt5640_quirk & BYT_RT5640_SSP2_AIF2) ||
+ (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF2)) {
+
+ /* fixup codec aif name */
+ snprintf(byt_rt5640_codec_aif_name,
+ sizeof(byt_rt5640_codec_aif_name),
+ "%s", "rt5640-aif2");
+
+ byt_rt5640_dais[dai_index].codec_dai_name =
+ byt_rt5640_codec_aif_name;
+ }
+
+ if ((byt_rt5640_quirk & BYT_RT5640_SSP0_AIF1) ||
+ (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF2)) {
+
+ /* fixup cpu dai name name */
+ snprintf(byt_rt5640_cpu_dai_name,
+ sizeof(byt_rt5640_cpu_dai_name),
+ "%s", "ssp0-port");
+
+ byt_rt5640_dais[dai_index].cpu_dai_name =
+ byt_rt5640_cpu_dai_name;
+ }
+
+ if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && (is_valleyview())) {
+ priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (IS_ERR(priv->mclk)) {
+ dev_err(&pdev->dev,
+ "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
+ PTR_ERR(priv->mclk));
+ return PTR_ERR(priv->mclk);
+ }
+ }
ret_val = devm_snd_soc_register_card(&pdev->dev, &byt_rt5640_card);
diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c
index 2c5eda14d510..1285cc597b6b 100644
--- a/sound/soc/intel/common/sst-acpi.c
+++ b/sound/soc/intel/common/sst-acpi.c
@@ -199,6 +199,7 @@ static struct sst_acpi_desc sst_acpi_haswell_desc = {
static struct sst_acpi_mach broadwell_machines[] = {
{ "INT343A", "broadwell-audio", "intel/IntcSST2.bin", NULL, NULL, NULL },
+ { "RT5677CE", "bdw-rt5677", "intel/IntcSST2.bin", NULL, NULL, NULL },
{}
};
diff --git a/sound/soc/intel/haswell/sst-haswell-pcm.c b/sound/soc/intel/haswell/sst-haswell-pcm.c
index 3154525c2b83..9e4094e2c6e3 100644
--- a/sound/soc/intel/haswell/sst-haswell-pcm.c
+++ b/sound/soc/intel/haswell/sst-haswell-pcm.c
@@ -871,7 +871,7 @@ out:
return ret;
}
-static struct snd_pcm_ops hsw_pcm_ops = {
+static const struct snd_pcm_ops hsw_pcm_ops = {
.open = hsw_pcm_open,
.close = hsw_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 2663781278aa..1d251d59bcb9 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -23,6 +23,7 @@
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
#include "skl-sst-ipc.h"
+#include "skl-tplg-interface.h"
#define BXT_BASEFW_TIMEOUT 3000
#define BXT_INIT_TIMEOUT 500
@@ -40,11 +41,73 @@
#define BXT_INSTANCE_ID 0
#define BXT_BASE_FW_MODULE_ID 0
+#define BXT_ADSP_FW_BIN_HDR_OFFSET 0x2000
+
static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
}
+static int
+bxt_load_library(struct sst_dsp *ctx, struct skl_dfw_manifest *minfo)
+{
+ struct snd_dma_buffer dmab;
+ struct skl_sst *skl = ctx->thread_context;
+ const struct firmware *fw = NULL;
+ struct firmware stripped_fw;
+ int ret = 0, i, dma_id, stream_tag;
+
+ /* library indices start from 1 to N. 0 represents base FW */
+ for (i = 1; i < minfo->lib_count; i++) {
+ ret = request_firmware(&fw, minfo->lib[i].name, ctx->dev);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Request lib %s failed:%d\n",
+ minfo->lib[i].name, ret);
+ return ret;
+ }
+
+ if (skl->is_first_boot) {
+ ret = snd_skl_parse_uuids(ctx, fw,
+ BXT_ADSP_FW_BIN_HDR_OFFSET, i);
+ if (ret < 0)
+ goto load_library_failed;
+ }
+
+ stripped_fw.data = fw->data;
+ stripped_fw.size = fw->size;
+ skl_dsp_strip_extended_manifest(&stripped_fw);
+
+ stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40,
+ stripped_fw.size, &dmab);
+ if (stream_tag <= 0) {
+ dev_err(ctx->dev, "Lib prepare DMA err: %x\n",
+ stream_tag);
+ ret = stream_tag;
+ goto load_library_failed;
+ }
+
+ dma_id = stream_tag - 1;
+ memcpy(dmab.area, stripped_fw.data, stripped_fw.size);
+
+ ctx->dsp_ops.trigger(ctx->dev, true, stream_tag);
+ ret = skl_sst_ipc_load_library(&skl->ipc, dma_id, i);
+ if (ret < 0)
+ dev_err(ctx->dev, "IPC Load Lib for %s fail: %d\n",
+ minfo->lib[i].name, ret);
+
+ ctx->dsp_ops.trigger(ctx->dev, false, stream_tag);
+ ctx->dsp_ops.cleanup(ctx->dev, &dmab, stream_tag);
+ release_firmware(fw);
+ fw = NULL;
+ }
+
+ return ret;
+
+load_library_failed:
+ release_firmware(fw);
+ return ret;
+}
+
/*
* First boot sequence has some extra steps. Core 0 waits for power
* status on core 1, so power up core 1 also momentarily, keep it in
@@ -157,8 +220,6 @@ static int sst_transfer_fw_host_dma(struct sst_dsp *ctx)
return ret;
}
-#define BXT_ADSP_FW_BIN_HDR_OFFSET 0x2000
-
static int bxt_load_base_firmware(struct sst_dsp *ctx)
{
struct firmware stripped_fw;
@@ -175,9 +236,12 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx)
if (ctx->fw == NULL)
goto sst_load_base_firmware_failed;
- ret = snd_skl_parse_uuids(ctx, BXT_ADSP_FW_BIN_HDR_OFFSET);
- if (ret < 0)
- goto sst_load_base_firmware_failed;
+ /* prase uuids on first boot */
+ if (skl->is_first_boot) {
+ ret = snd_skl_parse_uuids(ctx, ctx->fw, BXT_ADSP_FW_BIN_HDR_OFFSET, 0);
+ if (ret < 0)
+ goto sst_load_base_firmware_failed;
+ }
stripped_fw.data = ctx->fw->data;
stripped_fw.size = ctx->fw->size;
@@ -230,12 +294,23 @@ static int bxt_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
int ret;
struct skl_ipc_dxstate_info dx;
unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
+ struct skl_dfw_manifest *minfo = &skl->manifest;
if (skl->fw_loaded == false) {
skl->boot_complete = false;
ret = bxt_load_base_firmware(ctx);
- if (ret < 0)
+ if (ret < 0) {
dev_err(ctx->dev, "reload fw failed: %d\n", ret);
+ return ret;
+ }
+
+ if (minfo->lib_count > 1) {
+ ret = bxt_load_library(ctx, minfo);
+ if (ret < 0) {
+ dev_err(ctx->dev, "reload libs failed: %d\n", ret);
+ return ret;
+ }
+ }
return ret;
}
@@ -329,7 +404,7 @@ static int bxt_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
ret = skl_dsp_disable_core(ctx, core_mask);
if (ret < 0) {
- dev_err(ctx->dev, "Failed to disable core %d", ret);
+ dev_err(ctx->dev, "Failed to disable core %d\n", ret);
return ret;
}
skl->cores.state[core_id] = SKL_DSP_RESET;
@@ -341,6 +416,7 @@ static struct skl_dsp_fw_ops bxt_fw_ops = {
.set_state_D3 = bxt_set_dsp_D3,
.load_fw = bxt_load_base_firmware,
.get_fw_errcode = bxt_get_errorcode,
+ .load_library = bxt_load_library,
};
static struct sst_ops skl_ops = {
@@ -397,22 +473,40 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
skl->cores.count = 2;
skl->boot_complete = false;
init_waitqueue_head(&skl->boot_wait);
+ skl->is_first_boot = true;
+
+ if (dsp)
+ *dsp = skl;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bxt_sst_dsp_init);
+
+int bxt_sst_init_fw(struct device *dev, struct skl_sst *ctx)
+{
+ int ret;
+ struct sst_dsp *sst = ctx->dsp;
ret = sst->fw_ops.load_fw(sst);
if (ret < 0) {
- dev_err(dev, "Load base fw failed: %x", ret);
+ dev_err(dev, "Load base fw failed: %x\n", ret);
return ret;
}
skl_dsp_init_core_state(sst);
- if (dsp)
- *dsp = skl;
+ if (ctx->manifest.lib_count > 1) {
+ ret = sst->fw_ops.load_library(sst, &ctx->manifest);
+ if (ret < 0) {
+ dev_err(dev, "Load Library failed : %x\n", ret);
+ return ret;
+ }
+ }
+ ctx->is_first_boot = false;
return 0;
}
-EXPORT_SYMBOL_GPL(bxt_sst_dsp_init);
-
+EXPORT_SYMBOL_GPL(bxt_sst_init_fw);
void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
{
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 44ab595ce21a..805b7f2173f3 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -203,32 +203,35 @@ static const struct skl_dsp_ops dsp_ops[] = {
.id = 0x9d70,
.loader_ops = skl_get_loader_ops,
.init = skl_sst_dsp_init,
+ .init_fw = skl_sst_init_fw,
.cleanup = skl_sst_dsp_cleanup
},
{
.id = 0x9d71,
.loader_ops = skl_get_loader_ops,
.init = skl_sst_dsp_init,
+ .init_fw = skl_sst_init_fw,
.cleanup = skl_sst_dsp_cleanup
},
{
.id = 0x5a98,
.loader_ops = bxt_get_loader_ops,
.init = bxt_sst_dsp_init,
+ .init_fw = bxt_sst_init_fw,
.cleanup = bxt_sst_dsp_cleanup
},
};
-static int skl_get_dsp_ops(int pci_id)
+const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) {
if (dsp_ops[i].id == pci_id)
- return i;
+ return &dsp_ops[i];
}
- return -EINVAL;
+ return NULL;
}
int skl_init_dsp(struct skl *skl)
@@ -238,7 +241,8 @@ int skl_init_dsp(struct skl *skl)
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct skl_dsp_loader_ops loader_ops;
int irq = bus->irq;
- int ret, index;
+ const struct skl_dsp_ops *ops;
+ int ret;
/* enable ppcap interrupt */
snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
@@ -251,18 +255,18 @@ int skl_init_dsp(struct skl *skl)
return -ENXIO;
}
- index = skl_get_dsp_ops(skl->pci->device);
- if (index < 0)
- return -EINVAL;
+ ops = skl_get_dsp_ops(skl->pci->device);
+ if (!ops)
+ return -EIO;
- loader_ops = dsp_ops[index].loader_ops();
- ret = dsp_ops[index].init(bus->dev, mmio_base, irq,
- skl->fw_name, loader_ops, &skl->skl_sst);
+ loader_ops = ops->loader_ops();
+ ret = ops->init(bus->dev, mmio_base, irq,
+ skl->fw_name, loader_ops,
+ &skl->skl_sst);
if (ret < 0)
return ret;
- skl_dsp_enable_notification(skl->skl_sst, false);
dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
return ret;
@@ -273,16 +277,16 @@ int skl_free_dsp(struct skl *skl)
struct hdac_ext_bus *ebus = &skl->ebus;
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct skl_sst *ctx = skl->skl_sst;
- int index;
+ const struct skl_dsp_ops *ops;
/* disable ppcap interrupt */
snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
- index = skl_get_dsp_ops(skl->pci->device);
- if (index < 0)
+ ops = skl_get_dsp_ops(skl->pci->device);
+ if (!ops)
return -EIO;
- dsp_ops[index].cleanup(bus->dev, ctx);
+ ops->cleanup(bus->dev, ctx);
if (ctx->dsp->addr.lpe)
iounmap(ctx->dsp->addr.lpe);
@@ -296,7 +300,7 @@ int skl_suspend_dsp(struct skl *skl)
int ret;
/* if ppcap is not supported return 0 */
- if (!skl->ebus.ppcap)
+ if (!skl->ebus.bus.ppcap)
return 0;
ret = skl_dsp_sleep(ctx->dsp);
@@ -316,13 +320,17 @@ int skl_resume_dsp(struct skl *skl)
int ret;
/* if ppcap is not supported return 0 */
- if (!skl->ebus.ppcap)
+ if (!skl->ebus.bus.ppcap)
return 0;
/* enable ppcap interrupt */
snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
+ /* check if DSP 1st boot is done */
+ if (skl->skl_sst->is_first_boot == true)
+ return 0;
+
ret = skl_dsp_wake(ctx->dsp);
if (ret < 0)
return ret;
@@ -672,6 +680,7 @@ static u16 skl_get_module_param_size(struct skl_sst *ctx,
return param_size;
case SKL_MODULE_TYPE_BASE_OUTFMT:
+ case SKL_MODULE_TYPE_KPB:
return sizeof(struct skl_base_outfmt_cfg);
default:
@@ -725,6 +734,7 @@ static int skl_set_module_format(struct skl_sst *ctx,
break;
case SKL_MODULE_TYPE_BASE_OUTFMT:
+ case SKL_MODULE_TYPE_KPB:
skl_set_base_outfmt_format(ctx, module_config, *param_data);
break;
@@ -779,6 +789,7 @@ static int skl_alloc_queue(struct skl_module_pin *mpin,
mpin[i].in_use = true;
mpin[i].id.module_id = id.module_id;
mpin[i].id.instance_id = id.instance_id;
+ mpin[i].id.pvt_id = id.pvt_id;
mpin[i].tgt_mcfg = tgt_cfg;
return i;
}
@@ -802,6 +813,7 @@ static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
mpin[q_index].in_use = false;
mpin[q_index].id.module_id = 0;
mpin[q_index].id.instance_id = 0;
+ mpin[q_index].id.pvt_id = 0;
}
mpin[q_index].pin_state = SKL_PIN_UNBIND;
mpin[q_index].tgt_mcfg = NULL;
@@ -842,7 +854,7 @@ int skl_init_module(struct skl_sst *ctx,
struct skl_ipc_init_instance_msg msg;
dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__,
- mconfig->id.module_id, mconfig->id.instance_id);
+ mconfig->id.module_id, mconfig->id.pvt_id);
if (mconfig->pipe->state != SKL_PIPE_CREATED) {
dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n",
@@ -858,10 +870,11 @@ int skl_init_module(struct skl_sst *ctx,
}
msg.module_id = mconfig->id.module_id;
- msg.instance_id = mconfig->id.instance_id;
+ msg.instance_id = mconfig->id.pvt_id;
msg.ppl_instance_id = mconfig->pipe->ppl_id;
msg.param_data_size = module_config_size;
msg.core_id = mconfig->core_id;
+ msg.domain = mconfig->domain;
ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data);
if (ret < 0) {
@@ -878,9 +891,9 @@ static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg
*src_module, struct skl_module_cfg *dst_module)
{
dev_dbg(ctx->dev, "%s: src module_id = %d src_instance=%d\n",
- __func__, src_module->id.module_id, src_module->id.instance_id);
+ __func__, src_module->id.module_id, src_module->id.pvt_id);
dev_dbg(ctx->dev, "%s: dst_module=%d dst_instacne=%d\n", __func__,
- dst_module->id.module_id, dst_module->id.instance_id);
+ dst_module->id.module_id, dst_module->id.pvt_id);
dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n",
src_module->m_state, dst_module->m_state);
@@ -927,9 +940,9 @@ int skl_unbind_modules(struct skl_sst *ctx,
return 0;
msg.module_id = src_mcfg->id.module_id;
- msg.instance_id = src_mcfg->id.instance_id;
+ msg.instance_id = src_mcfg->id.pvt_id;
msg.dst_module_id = dst_mcfg->id.module_id;
- msg.dst_instance_id = dst_mcfg->id.instance_id;
+ msg.dst_instance_id = dst_mcfg->id.pvt_id;
msg.bind = false;
ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
@@ -988,9 +1001,9 @@ int skl_bind_modules(struct skl_sst *ctx,
msg.src_queue, msg.dst_queue);
msg.module_id = src_mcfg->id.module_id;
- msg.instance_id = src_mcfg->id.instance_id;
+ msg.instance_id = src_mcfg->id.pvt_id;
msg.dst_module_id = dst_mcfg->id.module_id;
- msg.dst_instance_id = dst_mcfg->id.instance_id;
+ msg.dst_instance_id = dst_mcfg->id.pvt_id;
msg.bind = true;
ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
@@ -1168,7 +1181,7 @@ int skl_set_module_params(struct skl_sst *ctx, u32 *params, int size,
struct skl_ipc_large_config_msg msg;
msg.module_id = mcfg->id.module_id;
- msg.instance_id = mcfg->id.instance_id;
+ msg.instance_id = mcfg->id.pvt_id;
msg.param_data_size = size;
msg.large_param_id = param_id;
@@ -1181,7 +1194,7 @@ int skl_get_module_params(struct skl_sst *ctx, u32 *params, int size,
struct skl_ipc_large_config_msg msg;
msg.module_id = mcfg->id.module_id;
- msg.instance_id = mcfg->id.instance_id;
+ msg.instance_id = mcfg->id.pvt_id;
msg.param_data_size = size;
msg.large_param_id = param_id;
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 6e05bf8622f7..58c728662600 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -106,7 +106,7 @@ static void skl_set_pcm_constrains(struct hdac_ext_bus *ebus,
static enum hdac_ext_stream_type skl_get_host_stream_type(struct hdac_ext_bus *ebus)
{
- if (ebus->ppcap)
+ if ((ebus_to_hbus(ebus))->ppcap)
return HDAC_EXT_STREAM_TYPE_HOST;
else
return HDAC_EXT_STREAM_TYPE_COUPLED;
@@ -188,7 +188,7 @@ static int skl_get_format(struct snd_pcm_substream *substream,
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
int format_val = 0;
- if (ebus->ppcap) {
+ if ((ebus_to_hbus(ebus))->ppcap) {
struct snd_pcm_runtime *runtime = substream->runtime;
format_val = snd_hdac_calc_stream_format(runtime->rate,
@@ -648,7 +648,8 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
.channels_min = HDA_MONO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_8000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE,
},
.capture = {
.stream_name = "System Capture",
@@ -1020,7 +1021,7 @@ static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
{
struct hdac_ext_bus *ebus = get_bus_ctx(substream);
- if (!ebus->ppcap)
+ if (!(ebus_to_hbus(ebus))->ppcap)
return skl_coupled_trigger(substream, cmd);
return 0;
@@ -1093,7 +1094,7 @@ static int skl_get_time_info(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_pcm_ops skl_platform_ops = {
+static const struct snd_pcm_ops skl_platform_ops = {
.open = skl_platform_open,
.ioctl = snd_pcm_lib_ioctl,
.trigger = skl_platform_pcm_trigger,
@@ -1138,20 +1139,67 @@ static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
return retval;
}
+static int skl_populate_modules(struct skl *skl)
+{
+ struct skl_pipeline *p;
+ struct skl_pipe_module *m;
+ struct snd_soc_dapm_widget *w;
+ struct skl_module_cfg *mconfig;
+ int ret;
+
+ list_for_each_entry(p, &skl->ppl_list, node) {
+ list_for_each_entry(m, &p->pipe->w_list, node) {
+
+ w = m->w;
+ mconfig = w->priv;
+
+ ret = snd_skl_get_module_info(skl->skl_sst, mconfig);
+ if (ret < 0) {
+ dev_err(skl->skl_sst->dev,
+ "query module info failed:%d\n", ret);
+ goto err;
+ }
+ }
+ }
+err:
+ return ret;
+}
+
static int skl_platform_soc_probe(struct snd_soc_platform *platform)
{
struct hdac_ext_bus *ebus = dev_get_drvdata(platform->dev);
struct skl *skl = ebus_to_skl(ebus);
+ const struct skl_dsp_ops *ops;
int ret;
- if (ebus->ppcap) {
+ pm_runtime_get_sync(platform->dev);
+ if ((ebus_to_hbus(ebus))->ppcap) {
ret = skl_tplg_init(platform, ebus);
if (ret < 0) {
dev_err(platform->dev, "Failed to init topology!\n");
return ret;
}
skl->platform = platform;
+
+ /* load the firmwares, since all is set */
+ ops = skl_get_dsp_ops(skl->pci->device);
+ if (!ops)
+ return -EIO;
+
+ if (skl->skl_sst->is_first_boot == false) {
+ dev_err(platform->dev, "DSP reports first boot done!!!\n");
+ return -EIO;
+ }
+
+ ret = ops->init_fw(platform->dev, skl->skl_sst);
+ if (ret < 0) {
+ dev_err(platform->dev, "Failed to boot first fw: %d\n", ret);
+ return ret;
+ }
+ skl_populate_modules(skl);
}
+ pm_runtime_mark_last_busy(platform->dev);
+ pm_runtime_put_autosuspend(platform->dev);
return 0;
}
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.c b/sound/soc/intel/skylake/skl-sst-cldma.c
index da2329d17f4d..efa2532114ba 100644
--- a/sound/soc/intel/skylake/skl-sst-cldma.c
+++ b/sound/soc/intel/skylake/skl-sst-cldma.c
@@ -341,14 +341,14 @@ int skl_cldma_prepare(struct sst_dsp *ctx)
ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
&ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
if (ret < 0) {
- dev_err(ctx->dev, "Alloc buffer for base fw failed: %x", ret);
+ dev_err(ctx->dev, "Alloc buffer for base fw failed: %x\n", ret);
return ret;
}
/* Setup Code loader BDL */
ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
&ctx->cl_dev.dmab_bdl, PAGE_SIZE);
if (ret < 0) {
- dev_err(ctx->dev, "Alloc buffer for blde failed: %x", ret);
+ dev_err(ctx->dev, "Alloc buffer for blde failed: %x\n", ret);
ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
return ret;
}
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
index 0f8629ef79ac..b9e71d051fb1 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.h
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -20,6 +20,7 @@
#include <sound/memalloc.h>
#include "skl-sst-cldma.h"
#include "skl-tplg-interface.h"
+#include "skl-topology.h"
struct sst_dsp;
struct skl_sst;
@@ -133,6 +134,8 @@ enum skl_dsp_states {
struct skl_dsp_fw_ops {
int (*load_fw)(struct sst_dsp *ctx);
/* FW module parser/loader */
+ int (*load_library)(struct sst_dsp *ctx,
+ struct skl_dfw_manifest *minfo);
int (*parse_fw)(struct sst_dsp *ctx);
int (*set_state_D0)(struct sst_dsp *ctx, unsigned int core_id);
int (*set_state_D3)(struct sst_dsp *ctx, unsigned int core_id);
@@ -203,12 +206,21 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
struct skl_sst **dsp);
+int skl_sst_init_fw(struct device *dev, struct skl_sst *ctx);
+int bxt_sst_init_fw(struct device *dev, struct skl_sst *ctx);
void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
-int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid,
- struct skl_dfw_module *dfw_config);
-int snd_skl_parse_uuids(struct sst_dsp *ctx, unsigned int offset);
+int snd_skl_get_module_info(struct skl_sst *ctx,
+ struct skl_module_cfg *mconfig);
+int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
+ unsigned int offset, int index);
+int skl_get_pvt_id(struct skl_sst *ctx,
+ struct skl_module_cfg *mconfig);
+int skl_put_pvt_id(struct skl_sst *ctx,
+ struct skl_module_cfg *mconfig);
+int skl_get_pvt_instance_id_map(struct skl_sst *ctx,
+ int module_id, int instance_id);
void skl_freeup_uuid_list(struct skl_sst *ctx);
int skl_dsp_strip_extended_manifest(struct firmware *fw);
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 96f2f6889b18..0bd01e62622c 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -114,6 +114,11 @@
#define IPC_CORE_ID(x) (((x) & IPC_CORE_ID_MASK) \
<< IPC_CORE_ID_SHIFT)
+#define IPC_DOMAIN_SHIFT 28
+#define IPC_DOMAIN_MASK 0x1
+#define IPC_DOMAIN(x) (((x) & IPC_DOMAIN_MASK) \
+ << IPC_DOMAIN_SHIFT)
+
/* Bind/Unbind message extension register */
#define IPC_DST_MOD_ID_SHIFT 0
#define IPC_DST_MOD_ID(x) (((x) & IPC_MOD_ID_MASK) \
@@ -190,6 +195,7 @@ enum skl_ipc_glb_type {
IPC_GLB_GET_PPL_CONTEXT_SIZE = 21,
IPC_GLB_SAVE_PPL = 22,
IPC_GLB_RESTORE_PPL = 23,
+ IPC_GLB_LOAD_LIBRARY = 24,
IPC_GLB_NOTIFY = 26,
IPC_GLB_MAX_IPC_MSG_NUMBER = 31 /* Maximum message number */
};
@@ -338,7 +344,7 @@ static int skl_ipc_process_notification(struct sst_generic_ipc *ipc,
break;
default:
- dev_err(ipc->dev, "ipc: Unhandled error msg=%x",
+ dev_err(ipc->dev, "ipc: Unhandled error msg=%x\n",
header.primary);
break;
}
@@ -379,13 +385,13 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
break;
default:
- dev_err(ipc->dev, "Unknown ipc reply: 0x%x", reply);
+ dev_err(ipc->dev, "Unknown ipc reply: 0x%x\n", reply);
msg->errno = -EINVAL;
break;
}
if (reply != IPC_GLB_REPLY_SUCCESS) {
- dev_err(ipc->dev, "ipc FW reply: reply=%d", reply);
+ dev_err(ipc->dev, "ipc FW reply: reply=%d\n", reply);
dev_err(ipc->dev, "FW Error Code: %u\n",
ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp));
}
@@ -434,9 +440,9 @@ irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
hipcte = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCTE);
header.primary = hipct;
header.extension = hipcte;
- dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
+ dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x\n",
header.primary);
- dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
+ dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x\n",
header.extension);
if (IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
@@ -704,6 +710,7 @@ int skl_ipc_init_instance(struct sst_generic_ipc *ipc,
header.extension = IPC_CORE_ID(msg->core_id);
header.extension |= IPC_PPL_INSTANCE_ID(msg->ppl_instance_id);
header.extension |= IPC_PARAM_BLOCK_SIZE(param_block_size);
+ header.extension |= IPC_DOMAIN(msg->domain);
dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__,
header.primary, header.extension);
@@ -742,7 +749,7 @@ int skl_ipc_bind_unbind(struct sst_generic_ipc *ipc,
header.extension);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
if (ret < 0) {
- dev_err(ipc->dev, "ipc: bind/unbind faileden");
+ dev_err(ipc->dev, "ipc: bind/unbind failed\n");
return ret;
}
@@ -902,3 +909,25 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
return ret;
}
EXPORT_SYMBOL_GPL(skl_ipc_get_large_config);
+
+int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
+ u8 dma_id, u8 table_id)
+{
+ struct skl_ipc_header header = {0};
+ u64 *ipc_header = (u64 *)(&header);
+ int ret = 0;
+
+ header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+ header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+ header.primary |= IPC_GLB_TYPE(IPC_GLB_LOAD_LIBRARY);
+ header.primary |= IPC_MOD_INSTANCE_ID(table_id);
+ header.primary |= IPC_MOD_ID(dma_id);
+
+ ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+
+ if (ret < 0)
+ dev_err(ipc->dev, "ipc: load lib failed\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(skl_sst_ipc_load_library);
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h
index 2e3d4e80ef97..0334ed4af031 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.h
+++ b/sound/soc/intel/skylake/skl-sst-ipc.h
@@ -66,7 +66,7 @@ struct skl_sst {
/* callback for miscbdge */
void (*enable_miscbdcge)(struct device *dev, bool enable);
- /*Is CGCTL.MISCBDCGE disabled*/
+ /* Is CGCTL.MISCBDCGE disabled */
bool miscbdcg_disabled;
/* Populate module information */
@@ -75,8 +75,14 @@ struct skl_sst {
/* Is firmware loaded */
bool fw_loaded;
+ /* first boot ? */
+ bool is_first_boot;
+
/* multi-core */
struct skl_dsp_cores cores;
+
+ /* tplg manifest */
+ struct skl_dfw_manifest manifest;
};
struct skl_ipc_init_instance_msg {
@@ -85,6 +91,7 @@ struct skl_ipc_init_instance_msg {
u16 param_data_size;
u8 ppl_instance_id;
u8 core_id;
+ u8 domain;
};
struct skl_ipc_bind_unbind_msg {
@@ -145,6 +152,9 @@ int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
struct skl_ipc_large_config_msg *msg, u32 *param);
+int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
+ u8 dma_id, u8 table_id);
+
void skl_ipc_int_enable(struct sst_dsp *dsp);
void skl_ipc_op_int_enable(struct sst_dsp *ctx);
void skl_ipc_op_int_disable(struct sst_dsp *ctx);
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index ddcb52a51854..8dc03039b311 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -28,11 +28,6 @@
/* FW Extended Manifest Header id = $AE1 */
#define SKL_EXT_MANIFEST_HEADER_MAGIC 0x31454124
-struct skl_dfw_module_mod {
- char name[100];
- struct skl_dfw_module skl_dfw_mod;
-};
-
struct UUID {
u8 id[16];
};
@@ -99,10 +94,15 @@ struct adsp_fw_hdr {
u32 load_offset;
} __packed;
+#define MAX_INSTANCE_BUFF 2
+
struct uuid_module {
uuid_le uuid;
int id;
int is_loadable;
+ int max_instance;
+ u64 pvt_id[MAX_INSTANCE_BUFF];
+ int *instance_id;
struct list_head list;
};
@@ -115,13 +115,13 @@ struct skl_ext_manifest_hdr {
u32 entries;
};
-int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid,
- struct skl_dfw_module *dfw_config)
+int snd_skl_get_module_info(struct skl_sst *ctx,
+ struct skl_module_cfg *mconfig)
{
struct uuid_module *module;
uuid_le *uuid_mod;
- uuid_mod = (uuid_le *)uuid;
+ uuid_mod = (uuid_le *)mconfig->guid;
if (list_empty(&ctx->uuid_list)) {
dev_err(ctx->dev, "Module list is empty\n");
@@ -130,8 +130,8 @@ int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid,
list_for_each_entry(module, &ctx->uuid_list, list) {
if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
- dfw_config->module_id = module->id;
- dfw_config->is_loadable = module->is_loadable;
+ mconfig->id.module_id = module->id;
+ mconfig->is_loadable = module->is_loadable;
return 0;
}
@@ -141,15 +141,154 @@ int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid,
}
EXPORT_SYMBOL_GPL(snd_skl_get_module_info);
+static int skl_get_pvtid_map(struct uuid_module *module, int instance_id)
+{
+ int pvt_id;
+
+ for (pvt_id = 0; pvt_id < module->max_instance; pvt_id++) {
+ if (module->instance_id[pvt_id] == instance_id)
+ return pvt_id;
+ }
+ return -EINVAL;
+}
+
+int skl_get_pvt_instance_id_map(struct skl_sst *ctx,
+ int module_id, int instance_id)
+{
+ struct uuid_module *module;
+
+ list_for_each_entry(module, &ctx->uuid_list, list) {
+ if (module->id == module_id)
+ return skl_get_pvtid_map(module, instance_id);
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(skl_get_pvt_instance_id_map);
+
+static inline int skl_getid_32(struct uuid_module *module, u64 *val,
+ int word1_mask, int word2_mask)
+{
+ int index, max_inst, pvt_id;
+ u32 mask_val;
+
+ max_inst = module->max_instance;
+ mask_val = (u32)(*val >> word1_mask);
+
+ if (mask_val != 0xffffffff) {
+ index = ffz(mask_val);
+ pvt_id = index + word1_mask + word2_mask;
+ if (pvt_id <= (max_inst - 1)) {
+ *val |= 1 << (index + word1_mask);
+ return pvt_id;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static inline int skl_pvtid_128(struct uuid_module *module)
+{
+ int j, i, word1_mask, word2_mask = 0, pvt_id;
+
+ for (j = 0; j < MAX_INSTANCE_BUFF; j++) {
+ word1_mask = 0;
+
+ for (i = 0; i < 2; i++) {
+ pvt_id = skl_getid_32(module, &module->pvt_id[j],
+ word1_mask, word2_mask);
+ if (pvt_id >= 0)
+ return pvt_id;
+
+ word1_mask += 32;
+ if ((word1_mask + word2_mask) >= module->max_instance)
+ return -EINVAL;
+ }
+
+ word2_mask += 64;
+ if (word2_mask >= module->max_instance)
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * skl_get_pvt_id: generate a private id for use as module id
+ *
+ * @ctx: driver context
+ * @mconfig: module configuration data
+ *
+ * This generates a 128 bit private unique id for a module TYPE so that
+ * module instance is unique
+ */
+int skl_get_pvt_id(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
+{
+ struct uuid_module *module;
+ uuid_le *uuid_mod;
+ int pvt_id;
+
+ uuid_mod = (uuid_le *)mconfig->guid;
+
+ list_for_each_entry(module, &ctx->uuid_list, list) {
+ if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
+
+ pvt_id = skl_pvtid_128(module);
+ if (pvt_id >= 0) {
+ module->instance_id[pvt_id] =
+ mconfig->id.instance_id;
+ return pvt_id;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(skl_get_pvt_id);
+
+/**
+ * skl_put_pvt_id: free up the private id allocated
+ *
+ * @ctx: driver context
+ * @mconfig: module configuration data
+ *
+ * This frees a 128 bit private unique id previously generated
+ */
+int skl_put_pvt_id(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
+{
+ int i;
+ uuid_le *uuid_mod;
+ struct uuid_module *module;
+
+ uuid_mod = (uuid_le *)mconfig->guid;
+ list_for_each_entry(module, &ctx->uuid_list, list) {
+ if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
+
+ if (mconfig->id.pvt_id != 0)
+ i = (mconfig->id.pvt_id) / 64;
+ else
+ i = 0;
+
+ module->pvt_id[i] &= ~(1 << (mconfig->id.pvt_id));
+ mconfig->id.pvt_id = -1;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(skl_put_pvt_id);
+
/*
* Parse the firmware binary to get the UUID, module id
* and loadable flags
*/
-int snd_skl_parse_uuids(struct sst_dsp *ctx, unsigned int offset)
+int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
+ unsigned int offset, int index)
{
struct adsp_fw_hdr *adsp_hdr;
struct adsp_module_entry *mod_entry;
- int i, num_entry;
+ int i, num_entry, size;
uuid_le *uuid_bin;
const char *buf;
struct skl_sst *skl = ctx->thread_context;
@@ -158,8 +297,8 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, unsigned int offset)
unsigned int safe_file;
/* Get the FW pointer to derive ADSP header */
- stripped_fw.data = ctx->fw->data;
- stripped_fw.size = ctx->fw->size;
+ stripped_fw.data = fw->data;
+ stripped_fw.size = fw->size;
skl_dsp_strip_extended_manifest(&stripped_fw);
@@ -210,8 +349,15 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, unsigned int offset)
uuid_bin = (uuid_le *)mod_entry->uuid.id;
memcpy(&module->uuid, uuid_bin, sizeof(module->uuid));
- module->id = i;
+ module->id = (i | (index << 12));
module->is_loadable = mod_entry->type.load_type;
+ module->max_instance = mod_entry->instance_max_count;
+ size = sizeof(int) * mod_entry->instance_max_count;
+ module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
+ if (!module->instance_id) {
+ kfree(module);
+ return -ENOMEM;
+ }
list_add_tail(&module->list, &skl->uuid_list);
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
index 588f899ceb65..8fc3178bc79c 100644
--- a/sound/soc/intel/skylake/skl-sst.c
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -88,13 +88,15 @@ static int skl_load_base_firmware(struct sst_dsp *ctx)
}
}
- ret = snd_skl_parse_uuids(ctx, SKL_ADSP_FW_BIN_HDR_OFFSET);
- if (ret < 0) {
- dev_err(ctx->dev,
- "UUID parsing err: %d\n", ret);
- release_firmware(ctx->fw);
- skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
- return ret;
+ /* prase uuids on first boot */
+ if (skl->is_first_boot) {
+ ret = snd_skl_parse_uuids(ctx, ctx->fw, SKL_ADSP_FW_BIN_HDR_OFFSET, 0);
+ if (ret < 0) {
+ dev_err(ctx->dev, "UUID parsing err: %d\n", ret);
+ release_firmware(ctx->fw);
+ skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
+ return ret;
+ }
}
/* check for extended manifest */
@@ -105,13 +107,13 @@ static int skl_load_base_firmware(struct sst_dsp *ctx)
ret = skl_dsp_boot(ctx);
if (ret < 0) {
- dev_err(ctx->dev, "Boot dsp core failed ret: %d", ret);
+ dev_err(ctx->dev, "Boot dsp core failed ret: %d\n", ret);
goto skl_load_base_firmware_failed;
}
ret = skl_cldma_prepare(ctx);
if (ret < 0) {
- dev_err(ctx->dev, "CL dma prepare failed : %d", ret);
+ dev_err(ctx->dev, "CL dma prepare failed : %d\n", ret);
goto skl_load_base_firmware_failed;
}
@@ -484,25 +486,32 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
return ret;
skl->cores.count = 2;
+ skl->is_first_boot = true;
+
+ if (dsp)
+ *dsp = skl;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
+
+int skl_sst_init_fw(struct device *dev, struct skl_sst *ctx)
+{
+ int ret;
+ struct sst_dsp *sst = ctx->dsp;
ret = sst->fw_ops.load_fw(sst);
if (ret < 0) {
- dev_err(dev, "Load base fw failed : %d", ret);
- goto cleanup;
+ dev_err(dev, "Load base fw failed : %d\n", ret);
+ return ret;
}
skl_dsp_init_core_state(sst);
+ ctx->is_first_boot = false;
- if (dsp)
- *dsp = skl;
-
- return ret;
-
-cleanup:
- skl_sst_dsp_cleanup(dev, skl);
- return ret;
+ return 0;
}
-EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
+EXPORT_SYMBOL_GPL(skl_sst_init_fw);
void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
{
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index cc0150fc2601..b5b1934d8550 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -21,6 +21,7 @@
#include <linux/firmware.h>
#include <sound/soc.h>
#include <sound/soc-topology.h>
+#include <uapi/sound/snd_sst_tokens.h>
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
#include "skl-topology.h"
@@ -32,6 +33,8 @@
#define SKL_CH_FIXUP_MASK (1 << 0)
#define SKL_RATE_FIXUP_MASK (1 << 1)
#define SKL_FMT_FIXUP_MASK (1 << 2)
+#define SKL_IN_DIR_BIT_MASK BIT(0)
+#define SKL_PIN_COUNT_MASK GENMASK(7, 4)
/*
* SKL DSP driver modelling uses only few DAPM widgets so for rest we will
@@ -473,6 +476,14 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
w = w_module->w;
mconfig = w->priv;
+ /* check if module ids are populated */
+ if (mconfig->id.module_id < 0) {
+ dev_err(skl->skl_sst->dev,
+ "module %pUL id not populated\n",
+ (uuid_le *)mconfig->guid);
+ return -EIO;
+ }
+
/* check resource available */
if (!skl_is_pipe_mcps_avail(skl, mconfig))
return -ENOMEM;
@@ -494,12 +505,15 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
* FE/BE params
*/
skl_tplg_update_module_params(w, ctx);
-
+ mconfig->id.pvt_id = skl_get_pvt_id(ctx, mconfig);
+ if (mconfig->id.pvt_id < 0)
+ return ret;
skl_tplg_set_module_init_data(w);
ret = skl_init_module(ctx, mconfig);
- if (ret < 0)
+ if (ret < 0) {
+ skl_put_pvt_id(ctx, mconfig);
return ret;
-
+ }
skl_tplg_alloc_pipe_mcps(skl, mconfig);
ret = skl_tplg_set_module_params(w, ctx);
if (ret < 0)
@@ -512,6 +526,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
struct skl_pipe *pipe)
{
+ int ret;
struct skl_pipe_module *w_module = NULL;
struct skl_module_cfg *mconfig = NULL;
@@ -519,9 +534,13 @@ static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
mconfig = w_module->w->priv;
if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
- mconfig->m_state > SKL_MODULE_UNINIT)
- return ctx->dsp->fw_ops.unload_mod(ctx->dsp,
+ mconfig->m_state > SKL_MODULE_UNINIT) {
+ ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
mconfig->id.module_id);
+ if (ret < 0)
+ return -EIO;
+ }
+ skl_put_pvt_id(ctx, mconfig);
}
/* no modules to unload in this path, so return */
@@ -588,6 +607,26 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int skl_fill_sink_instance_id(struct skl_sst *ctx,
+ struct skl_algo_data *alg_data)
+{
+ struct skl_kpb_params *params = (struct skl_kpb_params *)alg_data->params;
+ struct skl_mod_inst_map *inst;
+ int i, pvt_id;
+
+ inst = params->map;
+
+ for (i = 0; i < params->num_modules; i++) {
+ pvt_id = skl_get_pvt_instance_id_map(ctx,
+ inst->mod_id, inst->inst_id);
+ if (pvt_id < 0)
+ return -EINVAL;
+ inst->inst_id = pvt_id;
+ inst++;
+ }
+ return 0;
+}
+
/*
* Some modules require params to be set after the module is bound to
* all pins connected.
@@ -636,6 +675,8 @@ static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
bc = (struct skl_algo_data *)sb->dobj.private;
if (bc->set_params == SKL_PARAM_BIND) {
+ if (mconfig->m_type == SKL_MODULE_TYPE_KPB)
+ skl_fill_sink_instance_id(ctx, bc);
ret = skl_set_module_params(ctx,
(u32 *)bc->params, bc->max,
bc->param_id, mconfig);
@@ -1460,85 +1501,570 @@ static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
skl_tplg_tlv_control_set},
};
-/*
- * The topology binary passes the pin info for a module so initialize the pin
- * info passed into module instance
- */
-static void skl_fill_module_pin_info(struct skl_dfw_module_pin *dfw_pin,
- struct skl_module_pin *m_pin,
- bool is_dynamic, int max_pin)
+static int skl_tplg_fill_pipe_tkn(struct device *dev,
+ struct skl_pipe *pipe, u32 tkn,
+ u32 tkn_val)
{
- int i;
- for (i = 0; i < max_pin; i++) {
- m_pin[i].id.module_id = dfw_pin[i].module_id;
- m_pin[i].id.instance_id = dfw_pin[i].instance_id;
- m_pin[i].in_use = false;
- m_pin[i].is_dynamic = is_dynamic;
- m_pin[i].pin_state = SKL_PIN_UNBIND;
+ switch (tkn) {
+ case SKL_TKN_U32_PIPE_CONN_TYPE:
+ pipe->conn_type = tkn_val;
+ break;
+
+ case SKL_TKN_U32_PIPE_PRIORITY:
+ pipe->pipe_priority = tkn_val;
+ break;
+
+ case SKL_TKN_U32_PIPE_MEM_PGS:
+ pipe->memory_pages = tkn_val;
+ break;
+
+ default:
+ dev_err(dev, "Token not handled %d\n", tkn);
+ return -EINVAL;
}
+
+ return 0;
}
/*
- * Add pipeline from topology binary into driver pipeline list
- *
- * If already added we return that instance
- * Otherwise we create a new instance and add into driver list
+ * Add pipeline by parsing the relevant tokens
+ * Return an existing pipe if the pipe already exists.
*/
-static struct skl_pipe *skl_tplg_add_pipe(struct device *dev,
- struct skl *skl, struct skl_dfw_pipe *dfw_pipe)
+static int skl_tplg_add_pipe(struct device *dev,
+ struct skl_module_cfg *mconfig, struct skl *skl,
+ struct snd_soc_tplg_vendor_value_elem *tkn_elem)
{
struct skl_pipeline *ppl;
struct skl_pipe *pipe;
struct skl_pipe_params *params;
list_for_each_entry(ppl, &skl->ppl_list, node) {
- if (ppl->pipe->ppl_id == dfw_pipe->pipe_id)
- return ppl->pipe;
+ if (ppl->pipe->ppl_id == tkn_elem->value) {
+ mconfig->pipe = ppl->pipe;
+ return EEXIST;
+ }
}
ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
if (!ppl)
- return NULL;
+ return -ENOMEM;
pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
if (!pipe)
- return NULL;
+ return -ENOMEM;
params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
if (!params)
- return NULL;
+ return -ENOMEM;
- pipe->ppl_id = dfw_pipe->pipe_id;
- pipe->memory_pages = dfw_pipe->memory_pages;
- pipe->pipe_priority = dfw_pipe->pipe_priority;
- pipe->conn_type = dfw_pipe->conn_type;
- pipe->state = SKL_PIPE_INVALID;
pipe->p_params = params;
+ pipe->ppl_id = tkn_elem->value;
INIT_LIST_HEAD(&pipe->w_list);
ppl->pipe = pipe;
list_add(&ppl->node, &skl->ppl_list);
- return ppl->pipe;
+ mconfig->pipe = pipe;
+ mconfig->pipe->state = SKL_PIPE_INVALID;
+
+ return 0;
+}
+
+static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
+ struct skl_module_pin *m_pin,
+ int pin_index, u32 value)
+{
+ switch (tkn) {
+ case SKL_TKN_U32_PIN_MOD_ID:
+ m_pin[pin_index].id.module_id = value;
+ break;
+
+ case SKL_TKN_U32_PIN_INST_ID:
+ m_pin[pin_index].id.instance_id = value;
+ break;
+
+ default:
+ dev_err(dev, "%d Not a pin token\n", value);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Parse for pin config specific tokens to fill up the
+ * module private data
+ */
+static int skl_tplg_fill_pins_info(struct device *dev,
+ struct skl_module_cfg *mconfig,
+ struct snd_soc_tplg_vendor_value_elem *tkn_elem,
+ int dir, int pin_count)
+{
+ int ret;
+ struct skl_module_pin *m_pin;
+
+ switch (dir) {
+ case SKL_DIR_IN:
+ m_pin = mconfig->m_in_pin;
+ break;
+
+ case SKL_DIR_OUT:
+ m_pin = mconfig->m_out_pin;
+ break;
+
+ default:
+ dev_err(dev, "Invalid direction value\n");
+ return -EINVAL;
+ }
+
+ ret = skl_tplg_fill_pin(dev, tkn_elem->token,
+ m_pin, pin_count, tkn_elem->value);
+
+ if (ret < 0)
+ return ret;
+
+ m_pin[pin_count].in_use = false;
+ m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
+
+ return 0;
+}
+
+/*
+ * Fill up input/output module config format based
+ * on the direction
+ */
+static int skl_tplg_fill_fmt(struct device *dev,
+ struct skl_module_cfg *mconfig, u32 tkn,
+ u32 value, u32 dir, u32 pin_count)
+{
+ struct skl_module_fmt *dst_fmt;
+
+ switch (dir) {
+ case SKL_DIR_IN:
+ dst_fmt = mconfig->in_fmt;
+ dst_fmt += pin_count;
+ break;
+
+ case SKL_DIR_OUT:
+ dst_fmt = mconfig->out_fmt;
+ dst_fmt += pin_count;
+ break;
+
+ default:
+ dev_err(dev, "Invalid direction value\n");
+ return -EINVAL;
+ }
+
+ switch (tkn) {
+ case SKL_TKN_U32_FMT_CH:
+ dst_fmt->channels = value;
+ break;
+
+ case SKL_TKN_U32_FMT_FREQ:
+ dst_fmt->s_freq = value;
+ break;
+
+ case SKL_TKN_U32_FMT_BIT_DEPTH:
+ dst_fmt->bit_depth = value;
+ break;
+
+ case SKL_TKN_U32_FMT_SAMPLE_SIZE:
+ dst_fmt->valid_bit_depth = value;
+ break;
+
+ case SKL_TKN_U32_FMT_CH_CONFIG:
+ dst_fmt->ch_cfg = value;
+ break;
+
+ case SKL_TKN_U32_FMT_INTERLEAVE:
+ dst_fmt->interleaving_style = value;
+ break;
+
+ case SKL_TKN_U32_FMT_SAMPLE_TYPE:
+ dst_fmt->sample_type = value;
+ break;
+
+ case SKL_TKN_U32_FMT_CH_MAP:
+ dst_fmt->ch_map = value;
+ break;
+
+ default:
+ dev_err(dev, "Invalid token %d\n", tkn);
+ return -EINVAL;
+ }
+
+ return 0;
}
-static void skl_tplg_fill_fmt(struct skl_module_fmt *dst_fmt,
- struct skl_dfw_module_fmt *src_fmt,
- int pins)
+static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
+ struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
+{
+ if (uuid_tkn->token == SKL_TKN_UUID)
+ memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
+ else {
+ dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void skl_tplg_fill_pin_dynamic_val(
+ struct skl_module_pin *mpin, u32 pin_count, u32 value)
{
int i;
- for (i = 0; i < pins; i++) {
- dst_fmt[i].channels = src_fmt[i].channels;
- dst_fmt[i].s_freq = src_fmt[i].freq;
- dst_fmt[i].bit_depth = src_fmt[i].bit_depth;
- dst_fmt[i].valid_bit_depth = src_fmt[i].valid_bit_depth;
- dst_fmt[i].ch_cfg = src_fmt[i].ch_cfg;
- dst_fmt[i].ch_map = src_fmt[i].ch_map;
- dst_fmt[i].interleaving_style = src_fmt[i].interleaving_style;
- dst_fmt[i].sample_type = src_fmt[i].sample_type;
+ for (i = 0; i < pin_count; i++)
+ mpin[i].is_dynamic = value;
+}
+
+/*
+ * Parse tokens to fill up the module private data
+ */
+static int skl_tplg_get_token(struct device *dev,
+ struct snd_soc_tplg_vendor_value_elem *tkn_elem,
+ struct skl *skl, struct skl_module_cfg *mconfig)
+{
+ int tkn_count = 0;
+ int ret;
+ static int is_pipe_exists;
+ static int pin_index, dir;
+
+ if (tkn_elem->token > SKL_TKN_MAX)
+ return -EINVAL;
+
+ switch (tkn_elem->token) {
+ case SKL_TKN_U8_IN_QUEUE_COUNT:
+ mconfig->max_in_queue = tkn_elem->value;
+ mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
+ sizeof(*mconfig->m_in_pin),
+ GFP_KERNEL);
+ if (!mconfig->m_in_pin)
+ return -ENOMEM;
+
+ break;
+
+ case SKL_TKN_U8_OUT_QUEUE_COUNT:
+ mconfig->max_out_queue = tkn_elem->value;
+ mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
+ sizeof(*mconfig->m_out_pin),
+ GFP_KERNEL);
+
+ if (!mconfig->m_out_pin)
+ return -ENOMEM;
+
+ break;
+
+ case SKL_TKN_U8_DYN_IN_PIN:
+ if (!mconfig->m_in_pin)
+ return -ENOMEM;
+
+ skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
+ mconfig->max_in_queue, tkn_elem->value);
+
+ break;
+
+ case SKL_TKN_U8_DYN_OUT_PIN:
+ if (!mconfig->m_out_pin)
+ return -ENOMEM;
+
+ skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
+ mconfig->max_out_queue, tkn_elem->value);
+
+ break;
+
+ case SKL_TKN_U8_TIME_SLOT:
+ mconfig->time_slot = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U8_CORE_ID:
+ mconfig->core_id = tkn_elem->value;
+
+ case SKL_TKN_U8_MOD_TYPE:
+ mconfig->m_type = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U8_DEV_TYPE:
+ mconfig->dev_type = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U8_HW_CONN_TYPE:
+ mconfig->hw_conn_type = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U16_MOD_INST_ID:
+ mconfig->id.instance_id =
+ tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_MEM_PAGES:
+ mconfig->mem_pages = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_MAX_MCPS:
+ mconfig->mcps = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_OBS:
+ mconfig->obs = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_IBS:
+ mconfig->ibs = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_VBUS_ID:
+ mconfig->vbus_id = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_PARAMS_FIXUP:
+ mconfig->params_fixup = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_CONVERTER:
+ mconfig->converter = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_PIPE_ID:
+ ret = skl_tplg_add_pipe(dev,
+ mconfig, skl, tkn_elem);
+
+ if (ret < 0)
+ return is_pipe_exists;
+
+ if (ret == EEXIST)
+ is_pipe_exists = 1;
+
+ break;
+
+ case SKL_TKN_U32_PIPE_CONN_TYPE:
+ case SKL_TKN_U32_PIPE_PRIORITY:
+ case SKL_TKN_U32_PIPE_MEM_PGS:
+ if (is_pipe_exists) {
+ ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
+ tkn_elem->token, tkn_elem->value);
+ if (ret < 0)
+ return ret;
+ }
+
+ break;
+
+ /*
+ * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
+ * direction and the pin count. The first four bits represent
+ * direction and next four the pin count.
+ */
+ case SKL_TKN_U32_DIR_PIN_COUNT:
+ dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
+ pin_index = (tkn_elem->value &
+ SKL_PIN_COUNT_MASK) >> 4;
+
+ break;
+
+ case SKL_TKN_U32_FMT_CH:
+ case SKL_TKN_U32_FMT_FREQ:
+ case SKL_TKN_U32_FMT_BIT_DEPTH:
+ case SKL_TKN_U32_FMT_SAMPLE_SIZE:
+ case SKL_TKN_U32_FMT_CH_CONFIG:
+ case SKL_TKN_U32_FMT_INTERLEAVE:
+ case SKL_TKN_U32_FMT_SAMPLE_TYPE:
+ case SKL_TKN_U32_FMT_CH_MAP:
+ ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
+ tkn_elem->value, dir, pin_index);
+
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case SKL_TKN_U32_PIN_MOD_ID:
+ case SKL_TKN_U32_PIN_INST_ID:
+ ret = skl_tplg_fill_pins_info(dev,
+ mconfig, tkn_elem, dir,
+ pin_index);
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case SKL_TKN_U32_CAPS_SIZE:
+ mconfig->formats_config.caps_size =
+ tkn_elem->value;
+
+ break;
+
+ case SKL_TKN_U32_PROC_DOMAIN:
+ mconfig->domain =
+ tkn_elem->value;
+
+ break;
+
+ case SKL_TKN_U8_IN_PIN_TYPE:
+ case SKL_TKN_U8_OUT_PIN_TYPE:
+ case SKL_TKN_U8_CONN_TYPE:
+ break;
+
+ default:
+ dev_err(dev, "Token %d not handled\n",
+ tkn_elem->token);
+ return -EINVAL;
+ }
+
+ tkn_count++;
+
+ return tkn_count;
+}
+
+/*
+ * Parse the vendor array for specific tokens to construct
+ * module private data
+ */
+static int skl_tplg_get_tokens(struct device *dev,
+ char *pvt_data, struct skl *skl,
+ struct skl_module_cfg *mconfig, int block_size)
+{
+ struct snd_soc_tplg_vendor_array *array;
+ struct snd_soc_tplg_vendor_value_elem *tkn_elem;
+ int tkn_count = 0, ret;
+ int off = 0, tuple_size = 0;
+
+ if (block_size <= 0)
+ return -EINVAL;
+
+ while (tuple_size < block_size) {
+ array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
+
+ off += array->size;
+
+ switch (array->type) {
+ case SND_SOC_TPLG_TUPLE_TYPE_STRING:
+ dev_warn(dev, "no string tokens expected for skl tplg\n");
+ continue;
+
+ case SND_SOC_TPLG_TUPLE_TYPE_UUID:
+ ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
+ if (ret < 0)
+ return ret;
+
+ tuple_size += sizeof(*array->uuid);
+
+ continue;
+
+ default:
+ tkn_elem = array->value;
+ tkn_count = 0;
+ break;
+ }
+
+ while (tkn_count <= (array->num_elems - 1)) {
+ ret = skl_tplg_get_token(dev, tkn_elem,
+ skl, mconfig);
+
+ if (ret < 0)
+ return ret;
+
+ tkn_count = tkn_count + ret;
+ tkn_elem++;
+ }
+
+ tuple_size += tkn_count * sizeof(*tkn_elem);
+ }
+
+ return 0;
+}
+
+/*
+ * Every data block is preceded by a descriptor to read the number
+ * of data blocks, they type of the block and it's size
+ */
+static int skl_tplg_get_desc_blocks(struct device *dev,
+ struct snd_soc_tplg_vendor_array *array)
+{
+ struct snd_soc_tplg_vendor_value_elem *tkn_elem;
+
+ tkn_elem = array->value;
+
+ switch (tkn_elem->token) {
+ case SKL_TKN_U8_NUM_BLOCKS:
+ case SKL_TKN_U8_BLOCK_TYPE:
+ case SKL_TKN_U16_BLOCK_SIZE:
+ return tkn_elem->value;
+
+ default:
+ dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
+ break;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Parse the private data for the token and corresponding value.
+ * The private data can have multiple data blocks. So, a data block
+ * is preceded by a descriptor for number of blocks and a descriptor
+ * for the type and size of the suceeding data block.
+ */
+static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
+ struct skl *skl, struct device *dev,
+ struct skl_module_cfg *mconfig)
+{
+ struct snd_soc_tplg_vendor_array *array;
+ int num_blocks, block_size = 0, block_type, off = 0;
+ char *data;
+ int ret;
+
+ /* Read the NUM_DATA_BLOCKS descriptor */
+ array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
+ ret = skl_tplg_get_desc_blocks(dev, array);
+ if (ret < 0)
+ return ret;
+ num_blocks = ret;
+
+ off += array->size;
+ array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
+
+ /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
+ while (num_blocks > 0) {
+ ret = skl_tplg_get_desc_blocks(dev, array);
+
+ if (ret < 0)
+ return ret;
+ block_type = ret;
+ off += array->size;
+
+ array = (struct snd_soc_tplg_vendor_array *)
+ (tplg_w->priv.data + off);
+
+ ret = skl_tplg_get_desc_blocks(dev, array);
+
+ if (ret < 0)
+ return ret;
+ block_size = ret;
+ off += array->size;
+
+ array = (struct snd_soc_tplg_vendor_array *)
+ (tplg_w->priv.data + off);
+
+ data = (tplg_w->priv.data + off);
+
+ if (block_type == SKL_TYPE_TUPLE) {
+ ret = skl_tplg_get_tokens(dev, data,
+ skl, mconfig, block_size);
+
+ if (ret < 0)
+ return ret;
+
+ --num_blocks;
+ } else {
+ if (mconfig->formats_config.caps_size > 0)
+ memcpy(mconfig->formats_config.caps, data,
+ mconfig->formats_config.caps_size);
+ --num_blocks;
+ }
}
+
+ return 0;
}
static void skl_clear_pin_config(struct snd_soc_platform *platform,
@@ -1606,9 +2132,6 @@ static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
struct skl *skl = ebus_to_skl(ebus);
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct skl_module_cfg *mconfig;
- struct skl_pipe *pipe;
- struct skl_dfw_module *dfw_config =
- (struct skl_dfw_module *)tplg_w->priv.data;
if (!tplg_w->priv.size)
goto bind_event;
@@ -1619,76 +2142,17 @@ static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
return -ENOMEM;
w->priv = mconfig;
- memcpy(&mconfig->guid, &dfw_config->uuid, 16);
- ret = snd_skl_get_module_info(skl->skl_sst, mconfig->guid, dfw_config);
+ /*
+ * module binary can be loaded later, so set it to query when
+ * module is load for a use case
+ */
+ mconfig->id.module_id = -1;
+
+ /* Parse private data for tuples */
+ ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
if (ret < 0)
return ret;
-
- mconfig->id.module_id = dfw_config->module_id;
- mconfig->id.instance_id = dfw_config->instance_id;
- mconfig->mcps = dfw_config->max_mcps;
- mconfig->ibs = dfw_config->ibs;
- mconfig->obs = dfw_config->obs;
- mconfig->core_id = dfw_config->core_id;
- mconfig->max_in_queue = dfw_config->max_in_queue;
- mconfig->max_out_queue = dfw_config->max_out_queue;
- mconfig->is_loadable = dfw_config->is_loadable;
- skl_tplg_fill_fmt(mconfig->in_fmt, dfw_config->in_fmt,
- MODULE_MAX_IN_PINS);
- skl_tplg_fill_fmt(mconfig->out_fmt, dfw_config->out_fmt,
- MODULE_MAX_OUT_PINS);
-
- mconfig->params_fixup = dfw_config->params_fixup;
- mconfig->converter = dfw_config->converter;
- mconfig->m_type = dfw_config->module_type;
- mconfig->vbus_id = dfw_config->vbus_id;
- mconfig->mem_pages = dfw_config->mem_pages;
-
- pipe = skl_tplg_add_pipe(bus->dev, skl, &dfw_config->pipe);
- if (pipe)
- mconfig->pipe = pipe;
-
- mconfig->dev_type = dfw_config->dev_type;
- mconfig->hw_conn_type = dfw_config->hw_conn_type;
- mconfig->time_slot = dfw_config->time_slot;
- mconfig->formats_config.caps_size = dfw_config->caps.caps_size;
-
- mconfig->m_in_pin = devm_kzalloc(bus->dev, (mconfig->max_in_queue) *
- sizeof(*mconfig->m_in_pin),
- GFP_KERNEL);
- if (!mconfig->m_in_pin)
- return -ENOMEM;
-
- mconfig->m_out_pin = devm_kzalloc(bus->dev, (mconfig->max_out_queue) *
- sizeof(*mconfig->m_out_pin),
- GFP_KERNEL);
- if (!mconfig->m_out_pin)
- return -ENOMEM;
-
- skl_fill_module_pin_info(dfw_config->in_pin, mconfig->m_in_pin,
- dfw_config->is_dynamic_in_pin,
- mconfig->max_in_queue);
-
- skl_fill_module_pin_info(dfw_config->out_pin, mconfig->m_out_pin,
- dfw_config->is_dynamic_out_pin,
- mconfig->max_out_queue);
-
-
- if (mconfig->formats_config.caps_size == 0)
- goto bind_event;
-
- mconfig->formats_config.caps = (u32 *)devm_kzalloc(bus->dev,
- mconfig->formats_config.caps_size, GFP_KERNEL);
-
- if (mconfig->formats_config.caps == NULL)
- return -ENOMEM;
-
- memcpy(mconfig->formats_config.caps, dfw_config->caps.caps,
- dfw_config->caps.caps_size);
- mconfig->formats_config.param_id = dfw_config->caps.param_id;
- mconfig->formats_config.set_params = dfw_config->caps.set_params;
-
bind_event:
if (tplg_w->event_type == 0) {
dev_dbg(bus->dev, "ASoC: No event handler required\n");
@@ -1767,11 +2231,229 @@ static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
return 0;
}
+static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
+ struct snd_soc_tplg_vendor_string_elem *str_elem,
+ struct skl_dfw_manifest *minfo)
+{
+ int tkn_count = 0;
+ static int ref_count;
+
+ switch (str_elem->token) {
+ case SKL_TKN_STR_LIB_NAME:
+ if (ref_count > minfo->lib_count - 1) {
+ ref_count = 0;
+ return -EINVAL;
+ }
+
+ strncpy(minfo->lib[ref_count].name, str_elem->string,
+ ARRAY_SIZE(minfo->lib[ref_count].name));
+ ref_count++;
+ tkn_count++;
+ break;
+
+ default:
+ dev_err(dev, "Not a string token %d\n", str_elem->token);
+ break;
+ }
+
+ return tkn_count;
+}
+
+static int skl_tplg_get_str_tkn(struct device *dev,
+ struct snd_soc_tplg_vendor_array *array,
+ struct skl_dfw_manifest *minfo)
+{
+ int tkn_count = 0, ret;
+ struct snd_soc_tplg_vendor_string_elem *str_elem;
+
+ str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
+ while (tkn_count < array->num_elems) {
+ ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, minfo);
+ str_elem++;
+
+ if (ret < 0)
+ return ret;
+
+ tkn_count = tkn_count + ret;
+ }
+
+ return tkn_count;
+}
+
+static int skl_tplg_get_int_tkn(struct device *dev,
+ struct snd_soc_tplg_vendor_value_elem *tkn_elem,
+ struct skl_dfw_manifest *minfo)
+{
+ int tkn_count = 0;
+
+ switch (tkn_elem->token) {
+ case SKL_TKN_U32_LIB_COUNT:
+ minfo->lib_count = tkn_elem->value;
+ tkn_count++;
+ break;
+
+ default:
+ dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
+ return -EINVAL;
+ }
+
+ return tkn_count;
+}
+
+/*
+ * Fill the manifest structure by parsing the tokens based on the
+ * type.
+ */
+static int skl_tplg_get_manifest_tkn(struct device *dev,
+ char *pvt_data, struct skl_dfw_manifest *minfo,
+ int block_size)
+{
+ int tkn_count = 0, ret;
+ int off = 0, tuple_size = 0;
+ struct snd_soc_tplg_vendor_array *array;
+ struct snd_soc_tplg_vendor_value_elem *tkn_elem;
+
+ if (block_size <= 0)
+ return -EINVAL;
+
+ while (tuple_size < block_size) {
+ array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
+ off += array->size;
+ switch (array->type) {
+ case SND_SOC_TPLG_TUPLE_TYPE_STRING:
+ ret = skl_tplg_get_str_tkn(dev, array, minfo);
+
+ if (ret < 0)
+ return ret;
+ tkn_count += ret;
+
+ tuple_size += tkn_count *
+ sizeof(struct snd_soc_tplg_vendor_string_elem);
+ continue;
+
+ case SND_SOC_TPLG_TUPLE_TYPE_UUID:
+ dev_warn(dev, "no uuid tokens for skl tplf manifest\n");
+ continue;
+
+ default:
+ tkn_elem = array->value;
+ tkn_count = 0;
+ break;
+ }
+
+ while (tkn_count <= array->num_elems - 1) {
+ ret = skl_tplg_get_int_tkn(dev,
+ tkn_elem, minfo);
+ if (ret < 0)
+ return ret;
+
+ tkn_count = tkn_count + ret;
+ tkn_elem++;
+ tuple_size += tkn_count *
+ sizeof(struct snd_soc_tplg_vendor_value_elem);
+ break;
+ }
+ tkn_count = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Parse manifest private data for tokens. The private data block is
+ * preceded by descriptors for type and size of data block.
+ */
+static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
+ struct device *dev, struct skl_dfw_manifest *minfo)
+{
+ struct snd_soc_tplg_vendor_array *array;
+ int num_blocks, block_size = 0, block_type, off = 0;
+ char *data;
+ int ret;
+
+ /* Read the NUM_DATA_BLOCKS descriptor */
+ array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
+ ret = skl_tplg_get_desc_blocks(dev, array);
+ if (ret < 0)
+ return ret;
+ num_blocks = ret;
+
+ off += array->size;
+ array = (struct snd_soc_tplg_vendor_array *)
+ (manifest->priv.data + off);
+
+ /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
+ while (num_blocks > 0) {
+ ret = skl_tplg_get_desc_blocks(dev, array);
+
+ if (ret < 0)
+ return ret;
+ block_type = ret;
+ off += array->size;
+
+ array = (struct snd_soc_tplg_vendor_array *)
+ (manifest->priv.data + off);
+
+ ret = skl_tplg_get_desc_blocks(dev, array);
+
+ if (ret < 0)
+ return ret;
+ block_size = ret;
+ off += array->size;
+
+ array = (struct snd_soc_tplg_vendor_array *)
+ (manifest->priv.data + off);
+
+ data = (manifest->priv.data + off);
+
+ if (block_type == SKL_TYPE_TUPLE) {
+ ret = skl_tplg_get_manifest_tkn(dev, data, minfo,
+ block_size);
+
+ if (ret < 0)
+ return ret;
+
+ --num_blocks;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int skl_manifest_load(struct snd_soc_component *cmpnt,
+ struct snd_soc_tplg_manifest *manifest)
+{
+ struct skl_dfw_manifest *minfo;
+ struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
+ struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct skl *skl = ebus_to_skl(ebus);
+ int ret = 0;
+
+ /* proceed only if we have private data defined */
+ if (manifest->priv.size == 0)
+ return 0;
+
+ minfo = &skl->skl_sst->manifest;
+
+ skl_tplg_get_manifest_data(manifest, bus->dev, minfo);
+
+ if (minfo->lib_count > HDA_MAX_LIB) {
+ dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
+ minfo->lib_count);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static struct snd_soc_tplg_ops skl_tplg_ops = {
.widget_load = skl_tplg_widget_load,
.control_load = skl_tplg_control_load,
.bytes_ext_ops = skl_tlv_ops,
.bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
+ .manifest = skl_manifest_load,
};
/*
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index 22d3ef83817d..a519360f42a6 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -133,7 +133,7 @@ struct skl_i2s_config_blob {
struct skl_dma_control {
u32 node_id;
u32 config_length;
- u32 config_data[1];
+ u32 config_data[0];
} __packed;
struct skl_cpr_cfg {
@@ -215,9 +215,20 @@ struct skl_module_fmt {
struct skl_module_cfg;
+struct skl_mod_inst_map {
+ u16 mod_id;
+ u16 inst_id;
+};
+
+struct skl_kpb_params {
+ u32 num_modules;
+ struct skl_mod_inst_map map[0];
+};
+
struct skl_module_inst_id {
- u32 module_id;
+ int module_id;
u32 instance_id;
+ int pvt_id;
};
enum skl_module_pin_state {
diff --git a/sound/soc/intel/skylake/skl-tplg-interface.h b/sound/soc/intel/skylake/skl-tplg-interface.h
index a32e5e9cc530..2f6281e056d6 100644
--- a/sound/soc/intel/skylake/skl-tplg-interface.h
+++ b/sound/soc/intel/skylake/skl-tplg-interface.h
@@ -80,7 +80,8 @@ enum skl_module_type {
SKL_MODULE_TYPE_UPDWMIX,
SKL_MODULE_TYPE_SRCINT,
SKL_MODULE_TYPE_ALGO,
- SKL_MODULE_TYPE_BASE_OUTFMT
+ SKL_MODULE_TYPE_BASE_OUTFMT,
+ SKL_MODULE_TYPE_KPB,
};
enum skl_core_affinity {
@@ -148,84 +149,34 @@ enum skl_module_param_type {
SKL_PARAM_BIND
};
-struct skl_dfw_module_pin {
- u16 module_id;
- u16 instance_id;
-} __packed;
-
-struct skl_dfw_module_fmt {
- u32 channels;
- u32 freq;
- u32 bit_depth;
- u32 valid_bit_depth;
- u32 ch_cfg;
- u32 interleaving_style;
- u32 sample_type;
- u32 ch_map;
-} __packed;
-
-struct skl_dfw_module_caps {
+struct skl_dfw_algo_data {
u32 set_params:2;
u32 rsvd:30;
u32 param_id;
- u32 caps_size;
- u32 caps[HDA_SST_CFG_MAX];
-};
-
-struct skl_dfw_pipe {
- u8 pipe_id;
- u8 pipe_priority;
- u16 conn_type:4;
- u16 rsvd:4;
- u16 memory_pages:8;
+ u32 max;
+ char params[0];
} __packed;
-struct skl_dfw_module {
- u8 uuid[16];
-
- u16 module_id;
- u16 instance_id;
- u32 max_mcps;
- u32 mem_pages;
- u32 obs;
- u32 ibs;
- u32 vbus_id;
-
- u32 max_in_queue:8;
- u32 max_out_queue:8;
- u32 time_slot:8;
- u32 core_id:4;
- u32 rsvd1:4;
-
- u32 module_type:8;
- u32 conn_type:4;
- u32 dev_type:4;
- u32 hw_conn_type:4;
- u32 rsvd2:12;
-
- u32 params_fixup:8;
- u32 converter:8;
- u32 input_pin_type:1;
- u32 output_pin_type:1;
- u32 is_dynamic_in_pin:1;
- u32 is_dynamic_out_pin:1;
- u32 is_loadable:1;
- u32 rsvd3:11;
-
- struct skl_dfw_pipe pipe;
- struct skl_dfw_module_fmt in_fmt[MAX_IN_QUEUE];
- struct skl_dfw_module_fmt out_fmt[MAX_OUT_QUEUE];
- struct skl_dfw_module_pin in_pin[MAX_IN_QUEUE];
- struct skl_dfw_module_pin out_pin[MAX_OUT_QUEUE];
- struct skl_dfw_module_caps caps;
+#define LIB_NAME_LENGTH 128
+#define HDA_MAX_LIB 16
+
+struct lib_info {
+ char name[LIB_NAME_LENGTH];
} __packed;
-struct skl_dfw_algo_data {
- u32 set_params:2;
- u32 rsvd:30;
- u32 param_id;
- u32 max;
- char params[0];
+struct skl_dfw_manifest {
+ u32 lib_count;
+ struct lib_info lib[HDA_MAX_LIB];
} __packed;
+enum skl_tkn_dir {
+ SKL_DIR_IN,
+ SKL_DIR_OUT
+};
+
+enum skl_tuple_type {
+ SKL_TYPE_TUPLE,
+ SKL_TYPE_DATA
+};
+
#endif
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index e3e764167765..2989c164dafe 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -587,7 +587,7 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
return -ENXIO;
}
- snd_hdac_ext_bus_parse_capabilities(ebus);
+ snd_hdac_bus_parse_capabilities(bus);
if (skl_acquire_irq(ebus, 0) < 0)
return -EBUSY;
@@ -684,7 +684,7 @@ static int skl_probe(struct pci_dev *pci,
skl_dmic_data.dmic_num = skl_get_dmic_geo(skl);
/* check if dsp is there */
- if (ebus->ppcap) {
+ if (bus->ppcap) {
err = skl_machine_device_register(skl,
(void *)pci_id->driver_data);
if (err < 0)
@@ -698,7 +698,7 @@ static int skl_probe(struct pci_dev *pci,
skl->skl_sst->enable_miscbdcge = skl_enable_miscbdcge;
}
- if (ebus->mlcap)
+ if (bus->mlcap)
snd_hdac_ext_bus_get_ml_capabilities(ebus);
/* create device for soc dmic */
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 9064e5b0d676..5d4fbb094c48 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -105,6 +105,7 @@ struct skl_dsp_ops {
int irq, const char *fw_name,
struct skl_dsp_loader_ops loader_ops,
struct skl_sst **skl_sst);
+ int (*init_fw)(struct device *dev, struct skl_sst *ctx);
void (*cleanup)(struct device *dev, struct skl_sst *ctx);
};
@@ -123,4 +124,5 @@ int skl_free_dsp(struct skl *skl);
int skl_suspend_dsp(struct skl *skl);
int skl_resume_dsp(struct skl *skl);
void skl_cleanup_resources(struct skl *skl);
+const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id);
#endif /* __SOUND_SOC_SKL_H */
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index dbfdfe99c69d..dafd22e874e9 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -242,7 +242,7 @@ static snd_pcm_uframes_t kirkwood_dma_pointer(struct snd_pcm_substream
return count;
}
-static struct snd_pcm_ops kirkwood_dma_ops = {
+static const struct snd_pcm_ops kirkwood_dma_ops = {
.open = kirkwood_dma_open,
.close = kirkwood_dma_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/soc/mediatek/common/mtk-afe-fe-dai.c b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
index b788791b0a35..ac231d33d8fe 100644
--- a/sound/soc/mediatek/common/mtk-afe-fe-dai.c
+++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
@@ -23,7 +23,8 @@
#define AFE_BASE_END_OFFSET 8
-int mtk_regmap_update_bits(struct regmap *map, int reg, unsigned int mask,
+static int mtk_regmap_update_bits(struct regmap *map, int reg,
+ unsigned int mask,
unsigned int val)
{
if (reg < 0)
@@ -31,7 +32,7 @@ int mtk_regmap_update_bits(struct regmap *map, int reg, unsigned int mask,
return regmap_update_bits(map, reg, mask, val);
}
-int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
+static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
{
if (reg < 0)
return 0;
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 76ce33199bf9..06fec5699cc8 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -221,7 +221,8 @@ void omap_mcbsp_config(struct omap_mcbsp *mcbsp,
/* Enable TX/RX sync error interrupts by default */
if (mcbsp->irq)
- MCBSP_WRITE(mcbsp, IRQEN, RSYNCERREN | XSYNCERREN);
+ MCBSP_WRITE(mcbsp, IRQEN, RSYNCERREN | XSYNCERREN |
+ RUNDFLEN | ROVFLEN | XUNDFLEN | XOVFLEN);
}
/**
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index f61b3b58083b..89fe95e877db 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -305,23 +305,14 @@ static int omap_abe_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(card, priv);
- ret = snd_soc_register_card(card);
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret)
- dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
+ dev_err(&pdev->dev, "devm_snd_soc_register_card() failed: %d\n",
ret);
return ret;
}
-static int omap_abe_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
-
- return 0;
-}
-
static const struct of_device_id omap_abe_of_match[] = {
{.compatible = "ti,abe-twl6040", },
{ },
@@ -335,7 +326,6 @@ static struct platform_driver omap_abe_driver = {
.of_match_table = omap_abe_of_match,
},
.probe = omap_abe_probe,
- .remove = omap_abe_remove,
};
static int __init omap_abe_init(void)
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index a84f677234f0..94e9ff791f3a 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -58,7 +58,7 @@ static void omap_pcm_limit_supported_formats(void)
{
int i;
- for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) {
+ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
switch (snd_pcm_format_physical_width(i)) {
case 8:
case 16:
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index 1289543c8fb2..07f91e918b23 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -85,6 +85,15 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card)
return ERR_PTR(ret);
}
+ /* DAPM routes */
+ if (of_property_read_bool(node, "qcom,audio-routing")) {
+ ret = snd_soc_of_parse_audio_routing(card,
+ "qcom,audio-routing");
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+
/* Populate links */
num_links = of_get_child_count(node);
@@ -147,6 +156,15 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card)
return data;
}
+static const struct snd_soc_dapm_widget apq8016_sbc_dapm_widgets[] = {
+
+ SND_SOC_DAPM_MIC("Handset Mic", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Secondary Mic", NULL),
+ SND_SOC_DAPM_MIC("Digital Mic1", NULL),
+ SND_SOC_DAPM_MIC("Digital Mic2", NULL),
+};
+
static int apq8016_sbc_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -159,6 +177,8 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev)
return -ENOMEM;
card->dev = dev;
+ card->dapm_widgets = apq8016_sbc_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(apq8016_sbc_dapm_widgets);
data = apq8016_sbc_parse_of(card);
if (IS_ERR(data)) {
dev_err(&pdev->dev, "Error resolving dai links: %ld\n",
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index db000c6987a1..e2ff538a8aa5 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -84,9 +84,9 @@ static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct lpass_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(soc_runtime);
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
+ struct lpass_pcm_data *pcm_data = drvdata->private_data;
struct lpass_variant *v = drvdata->variant;
snd_pcm_format_t format = params_format(params);
unsigned int channels = params_channels(params);
@@ -177,9 +177,9 @@ static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct lpass_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(soc_runtime);
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
+ struct lpass_pcm_data *pcm_data = drvdata->private_data;
struct lpass_variant *v = drvdata->variant;
unsigned int reg;
int ret;
@@ -201,9 +201,9 @@ static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct lpass_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(soc_runtime);
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
+ struct lpass_pcm_data *pcm_data = drvdata->private_data;
struct lpass_variant *v = drvdata->variant;
int ret, ch, dir = substream->stream;
@@ -255,9 +255,9 @@ static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct lpass_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(soc_runtime);
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
+ struct lpass_pcm_data *pcm_data = drvdata->private_data;
struct lpass_variant *v = drvdata->variant;
int ret, ch, dir = substream->stream;
@@ -331,9 +331,9 @@ static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct lpass_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(soc_runtime);
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
+ struct lpass_pcm_data *pcm_data = drvdata->private_data;
struct lpass_variant *v = drvdata->variant;
unsigned int base_addr, curr_addr;
int ret, ch, dir = substream->stream;
@@ -372,7 +372,7 @@ static int lpass_platform_pcmops_mmap(struct snd_pcm_substream *substream,
runtime->dma_bytes);
}
-static struct snd_pcm_ops lpass_platform_pcm_ops = {
+static const struct snd_pcm_ops lpass_platform_pcm_ops = {
.open = lpass_platform_pcmops_open,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = lpass_platform_pcmops_hw_params,
@@ -483,7 +483,7 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
return -ENOMEM;
data->i2s_port = cpu_dai->driver->id;
- snd_soc_pcm_set_drvdata(soc_runtime, data);
+ drvdata->private_data = data;
psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
if (psubstream) {
@@ -570,8 +570,8 @@ static void lpass_platform_pcm_free(struct snd_pcm *pcm)
substream = pcm->streams[i].substream;
if (substream) {
rt = substream->private_data;
- data = snd_soc_pcm_get_drvdata(rt);
drvdata = snd_soc_platform_get_drvdata(rt->platform);
+ data = drvdata->private_data;
ch = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
? data->rdma_ch
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index 30714ad1e138..35b3cea8207d 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -58,6 +58,8 @@ struct lpass_data {
/* 8016 specific */
struct clk *pcnoc_mport_clk;
struct clk *pcnoc_sway_clk;
+
+ void *private_data;
};
/* Vairant data per each SOC */
diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
index f1e0c703e0d2..c783f9a22595 100644
--- a/sound/soc/rockchip/Kconfig
+++ b/sound/soc/rockchip/Kconfig
@@ -41,3 +41,15 @@ config SND_SOC_ROCKCHIP_RT5645
help
Say Y or M here if you want to add support for SoC audio on Rockchip
boards using the RT5645/RT5650 codec, such as Veyron.
+
+config SND_SOC_RK3399_GRU_SOUND
+ tristate "ASoC support multiple codecs for Rockchip RK3399 GRU boards"
+ depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB && CLKDEV_LOOKUP && SPI
+ select SND_SOC_ROCKCHIP_I2S
+ select SND_SOC_MAX98357A
+ select SND_SOC_RT5514
+ select SND_SOC_DA7219
+ select SND_SOC_RT5514_SPI
+ help
+ Say Y or M here if you want to add support multiple codecs for SoC
+ audio on Rockchip RK3399 GRU boards.
diff --git a/sound/soc/rockchip/Makefile b/sound/soc/rockchip/Makefile
index c0bf560125f3..84e5c7c700e7 100644
--- a/sound/soc/rockchip/Makefile
+++ b/sound/soc/rockchip/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_SND_SOC_ROCKCHIP_SPDIF) += snd-soc-rockchip-spdif.o
snd-soc-rockchip-max98090-objs := rockchip_max98090.o
snd-soc-rockchip-rt5645-objs := rockchip_rt5645.o
+snd-soc-rk3399-gru-sound-objs := rk3399_gru_sound.o
obj-$(CONFIG_SND_SOC_ROCKCHIP_MAX98090) += snd-soc-rockchip-max98090.o
obj-$(CONFIG_SND_SOC_ROCKCHIP_RT5645) += snd-soc-rockchip-rt5645.o
+obj-$(CONFIG_SND_SOC_RK3399_GRU_SOUND) += snd-soc-rk3399-gru-sound.o
diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c
new file mode 100644
index 000000000000..9ed735a6cf49
--- /dev/null
+++ b/sound/soc/rockchip/rk3399_gru_sound.c
@@ -0,0 +1,397 @@
+/*
+ * Rockchip machine ASoC driver for boards using MAX98357A/RT5514/DA7219
+ *
+ * Copyright (c) 2016, ROCKCHIP CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/input.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "rockchip_i2s.h"
+#include "../codecs/da7219.h"
+#include "../codecs/da7219-aad.h"
+#include "../codecs/rt5514.h"
+
+#define DRV_NAME "rk3399-gru-sound"
+
+#define SOUND_FS 256
+
+unsigned int rt5514_dmic_delay;
+
+static struct snd_soc_jack rockchip_sound_jack;
+
+static const struct snd_soc_dapm_widget rockchip_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphones", NULL),
+ SND_SOC_DAPM_SPK("Speakers", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route rockchip_dapm_routes[] = {
+ /* Input Lines */
+ {"MIC", NULL, "Headset Mic"},
+ {"DMIC1L", NULL, "Int Mic"},
+ {"DMIC1R", NULL, "Int Mic"},
+
+ /* Output Lines */
+ {"Headphones", NULL, "HPL"},
+ {"Headphones", NULL, "HPR"},
+ {"Speakers", NULL, "Speaker"},
+};
+
+static const struct snd_kcontrol_new rockchip_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphones"),
+ SOC_DAPM_PIN_SWITCH("Speakers"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+};
+
+static int rockchip_sound_max98357a_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ unsigned int mclk;
+ int ret;
+
+ /* max98357a supports these sample rates */
+ switch (params_rate(params)) {
+ case 8000:
+ case 16000:
+ case 48000:
+ case 96000:
+ mclk = params_rate(params) * SOUND_FS;
+ break;
+ default:
+ dev_err(rtd->card->dev, "%s() doesn't support this sample rate: %d\n",
+ __func__, params_rate(params));
+ return -EINVAL;
+ }
+
+ ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, 0, mclk, 0);
+ if (ret) {
+ dev_err(rtd->card->dev, "%s() error setting sysclk to %u: %d\n",
+ __func__, mclk, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rockchip_sound_rt5514_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ unsigned int mclk;
+ int ret;
+
+ mclk = params_rate(params) * SOUND_FS;
+
+ ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+ SND_SOC_CLOCK_OUT);
+ if (ret < 0) {
+ dev_err(rtd->card->dev, "Can't set cpu clock out %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5514_SCLK_S_MCLK,
+ mclk, SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(rtd->card->dev, "%s() error setting sysclk to %u: %d\n",
+ __func__, params_rate(params) * 512, ret);
+ return ret;
+ }
+
+ /* Wait for DMIC stable */
+ msleep(rt5514_dmic_delay);
+
+ return 0;
+}
+
+static int rockchip_sound_da7219_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int mclk, ret;
+
+ /* in bypass mode, the mclk has to be one of the frequencies below */
+ switch (params_rate(params)) {
+ case 8000:
+ case 16000:
+ case 24000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+ SND_SOC_CLOCK_OUT);
+ if (ret < 0) {
+ dev_err(codec_dai->dev, "Can't set cpu clock out %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(codec_dai->dev, "Can't set codec clock in %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_MCLK, 0, 0);
+ if (ret < 0) {
+ dev_err(codec_dai->dev, "Can't set pll sysclk mclk %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rockchip_sound_da7219_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec_dais[0]->codec;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ /* We need default MCLK and PLL settings for the accessory detection */
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, 12288000,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(codec_dai->dev, "Init can't set codec clock in %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_MCLK, 0, 0);
+ if (ret < 0) {
+ dev_err(codec_dai->dev, "Init can't set pll sysclk mclk %d\n", ret);
+ return ret;
+ }
+
+ /* Enable Headset and 4 Buttons Jack detection */
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_LINEOUT |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &rockchip_sound_jack, NULL, 0);
+
+ if (ret) {
+ dev_err(rtd->card->dev, "New Headset Jack failed! (%d)\n", ret);
+ return ret;
+ }
+
+ snd_jack_set_key(rockchip_sound_jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(
+ rockchip_sound_jack.jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(
+ rockchip_sound_jack.jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(
+ rockchip_sound_jack.jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+
+ da7219_aad_jack_det(codec, &rockchip_sound_jack);
+
+ return 0;
+}
+
+static struct snd_soc_ops rockchip_sound_max98357a_ops = {
+ .hw_params = rockchip_sound_max98357a_hw_params,
+};
+
+static struct snd_soc_ops rockchip_sound_rt5514_ops = {
+ .hw_params = rockchip_sound_rt5514_hw_params,
+};
+
+static struct snd_soc_ops rockchip_sound_da7219_ops = {
+ .hw_params = rockchip_sound_da7219_hw_params,
+};
+
+enum {
+ DAILINK_MAX98357A,
+ DAILINK_RT5514,
+ DAILINK_DA7219,
+ DAILINK_RT5514_DSP,
+};
+
+#define DAILINK_ENTITIES (DAILINK_DA7219 + 1)
+
+static struct snd_soc_dai_link rockchip_dailinks[] = {
+ [DAILINK_MAX98357A] = {
+ .name = "MAX98357A",
+ .stream_name = "MAX98357A PCM",
+ .codec_dai_name = "HiFi",
+ .ops = &rockchip_sound_max98357a_ops,
+ /* set max98357a as slave */
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ },
+ [DAILINK_RT5514] = {
+ .name = "RT5514",
+ .stream_name = "RT5514 PCM",
+ .codec_dai_name = "rt5514-aif1",
+ .ops = &rockchip_sound_rt5514_ops,
+ /* set rt5514 as slave */
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ },
+ [DAILINK_DA7219] = {
+ .name = "DA7219",
+ .stream_name = "DA7219 PCM",
+ .codec_dai_name = "da7219-hifi",
+ .init = rockchip_sound_da7219_init,
+ .ops = &rockchip_sound_da7219_ops,
+ /* set da7219 as slave */
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ },
+ /* RT5514 DSP for voice wakeup via spi bus */
+ [DAILINK_RT5514_DSP] = {
+ .name = "RT5514 DSP",
+ .stream_name = "Wake on Voice",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ },
+};
+
+static struct snd_soc_card rockchip_sound_card = {
+ .name = "rk3399-gru-sound",
+ .owner = THIS_MODULE,
+ .dai_link = rockchip_dailinks,
+ .num_links = ARRAY_SIZE(rockchip_dailinks),
+ .dapm_widgets = rockchip_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rockchip_dapm_widgets),
+ .dapm_routes = rockchip_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rockchip_dapm_routes),
+ .controls = rockchip_controls,
+ .num_controls = ARRAY_SIZE(rockchip_controls),
+};
+
+static int rockchip_sound_match_stub(struct device *dev, void *data)
+{
+ return 1;
+}
+
+static int rockchip_sound_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &rockchip_sound_card;
+ struct device_node *cpu_node;
+ struct device *dev;
+ struct device_driver *drv;
+ int i, ret;
+
+ cpu_node = of_parse_phandle(pdev->dev.of_node, "rockchip,cpu", 0);
+ if (!cpu_node) {
+ dev_err(&pdev->dev, "Property 'rockchip,cpu' missing or invalid\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < DAILINK_ENTITIES; i++) {
+ rockchip_dailinks[i].platform_of_node = cpu_node;
+ rockchip_dailinks[i].cpu_of_node = cpu_node;
+
+ rockchip_dailinks[i].codec_of_node =
+ of_parse_phandle(pdev->dev.of_node, "rockchip,codec", i);
+ if (!rockchip_dailinks[i].codec_of_node) {
+ dev_err(&pdev->dev,
+ "Property[%d] 'rockchip,codec' missing or invalid\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /**
+ * To acquire the spi driver of the rt5514 and set the dai-links names
+ * for soc_bind_dai_link
+ */
+ drv = driver_find("rt5514", &spi_bus_type);
+ if (!drv) {
+ dev_err(&pdev->dev, "Can not find the rt5514 driver at the spi bus\n");
+ return -EINVAL;
+ }
+
+ dev = driver_find_device(drv, NULL, NULL, rockchip_sound_match_stub);
+ if (!dev) {
+ dev_err(&pdev->dev, "Can not find the rt5514 device\n");
+ return -ENODEV;
+ }
+
+ /* Set DMIC delay */
+ ret = device_property_read_u32(&pdev->dev, "dmic-delay",
+ &rt5514_dmic_delay);
+ if (ret) {
+ rt5514_dmic_delay = 0;
+ dev_dbg(&pdev->dev,
+ "no optional property 'dmic-delay' found, default: no delay\n");
+ }
+
+ rockchip_dailinks[DAILINK_RT5514_DSP].cpu_name = kstrdup_const(dev_name(dev), GFP_KERNEL);
+ rockchip_dailinks[DAILINK_RT5514_DSP].cpu_dai_name = kstrdup_const(dev_name(dev), GFP_KERNEL);
+ rockchip_dailinks[DAILINK_RT5514_DSP].platform_name = kstrdup_const(dev_name(dev), GFP_KERNEL);
+
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret)
+ dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static const struct of_device_id rockchip_sound_of_match[] = {
+ { .compatible = "rockchip,rk3399-gru-sound", },
+ {},
+};
+
+static struct platform_driver rockchip_sound_driver = {
+ .probe = rockchip_sound_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = rockchip_sound_of_match,
+#ifdef CONFIG_PM
+ .pm = &snd_soc_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(rockchip_sound_driver);
+
+MODULE_AUTHOR("Xing Zheng <zhengxing@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip ASoC Machine Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DEVICE_TABLE(of, rockchip_sound_of_match);
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 652e8c5ea166..974915cb4c4f 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -57,6 +57,7 @@ static int i2s_runtime_suspend(struct device *dev)
{
struct rk_i2s_dev *i2s = dev_get_drvdata(dev);
+ regcache_cache_only(i2s->regmap, true);
clk_disable_unprepare(i2s->mclk);
return 0;
@@ -73,7 +74,14 @@ static int i2s_runtime_resume(struct device *dev)
return ret;
}
- return 0;
+ regcache_cache_only(i2s->regmap, false);
+ regcache_mark_dirty(i2s->regmap);
+
+ ret = regcache_sync(i2s->regmap);
+ if (ret)
+ clk_disable_unprepare(i2s->mclk);
+
+ return ret;
}
static inline struct rk_i2s_dev *to_info(struct snd_soc_dai *dai)
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index 4ca265737eda..fa8101d1e16f 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -65,6 +65,7 @@ static int __maybe_unused rk_spdif_runtime_suspend(struct device *dev)
{
struct rk_spdif_dev *spdif = dev_get_drvdata(dev);
+ regcache_cache_only(spdif->regmap, true);
clk_disable_unprepare(spdif->mclk);
clk_disable_unprepare(spdif->hclk);
@@ -88,7 +89,16 @@ static int __maybe_unused rk_spdif_runtime_resume(struct device *dev)
return ret;
}
- return 0;
+ regcache_cache_only(spdif->regmap, false);
+ regcache_mark_dirty(spdif->regmap);
+
+ ret = regcache_sync(spdif->regmap);
+ if (ret) {
+ clk_disable_unprepare(spdif->mclk);
+ clk_disable_unprepare(spdif->hclk);
+ }
+
+ return ret;
}
static int rk_spdif_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 7b722b0094d9..f6023b46c107 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -1,12 +1,14 @@
-config SND_SOC_SAMSUNG
+menuconfig SND_SOC_SAMSUNG
tristate "ASoC support for Samsung"
depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
select SND_SOC_GENERIC_DMAENGINE_PCM
- help
+ ---help---
Say Y or M if you want to add support for codecs attached to
the Samsung SoCs' Audio interfaces. You will also need to
select the audio interfaces to support below.
+if SND_SOC_SAMSUNG
+
config SND_S3C24XX_I2S
tristate
@@ -18,22 +20,22 @@ config SND_S3C2412_SOC_I2S
select SND_S3C_I2SV2_SOC
config SND_SAMSUNG_PCM
- tristate
+ tristate "Samsung PCM interface support"
config SND_SAMSUNG_AC97
tristate
select SND_SOC_AC97_BUS
config SND_SAMSUNG_SPDIF
- tristate
+ tristate "Samsung SPDIF transmitter support"
select SND_SOC_SPDIF
config SND_SAMSUNG_I2S
- tristate
+ tristate "Samsung I2S interface support"
config SND_SOC_SAMSUNG_NEO1973_WM8753
tristate "Audio support for Openmoko Neo1973 Smartphones (GTA02)"
- depends on SND_SOC_SAMSUNG && MACH_NEO1973_GTA02
+ depends on MACH_NEO1973_GTA02
select SND_S3C24XX_I2S
select SND_SOC_WM8753
select SND_SOC_BT_SCO
@@ -43,7 +45,7 @@ config SND_SOC_SAMSUNG_NEO1973_WM8753
config SND_SOC_SAMSUNG_JIVE_WM8750
tristate "SoC I2S Audio support for Jive"
- depends on SND_SOC_SAMSUNG && MACH_JIVE && I2C
+ depends on MACH_JIVE && I2C
select SND_SOC_WM8750
select SND_S3C2412_SOC_I2S
help
@@ -51,7 +53,7 @@ config SND_SOC_SAMSUNG_JIVE_WM8750
config SND_SOC_SAMSUNG_SMDK_WM8580
tristate "SoC I2S Audio support for WM8580 on SMDK"
- depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110)
+ depends on MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110
depends on I2C
select SND_SOC_WM8580
select SND_SAMSUNG_I2S
@@ -60,7 +62,6 @@ config SND_SOC_SAMSUNG_SMDK_WM8580
config SND_SOC_SAMSUNG_SMDK_WM8994
tristate "SoC I2S Audio support for WM8994 on SMDK"
- depends on SND_SOC_SAMSUNG
depends on I2C=y
select MFD_WM8994
select SND_SOC_WM8994
@@ -70,7 +71,7 @@ config SND_SOC_SAMSUNG_SMDK_WM8994
config SND_SOC_SAMSUNG_SMDK2443_WM9710
tristate "SoC AC97 Audio support for SMDK2443 - WM9710"
- depends on SND_SOC_SAMSUNG && MACH_SMDK2443
+ depends on MACH_SMDK2443
select AC97_BUS
select SND_SOC_AC97_CODEC
select SND_SAMSUNG_AC97
@@ -80,7 +81,7 @@ config SND_SOC_SAMSUNG_SMDK2443_WM9710
config SND_SOC_SAMSUNG_LN2440SBC_ALC650
tristate "SoC AC97 Audio support for LN2440SBC - ALC650"
- depends on SND_SOC_SAMSUNG && ARCH_S3C24XX
+ depends on ARCH_S3C24XX
select AC97_BUS
select SND_SOC_AC97_CODEC
select SND_SAMSUNG_AC97
@@ -90,7 +91,7 @@ config SND_SOC_SAMSUNG_LN2440SBC_ALC650
config SND_SOC_SAMSUNG_S3C24XX_UDA134X
tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
- depends on SND_SOC_SAMSUNG && ARCH_S3C24XX
+ depends on ARCH_S3C24XX
select SND_S3C24XX_I2S
select SND_SOC_L3
select SND_SOC_UDA134X
@@ -102,21 +103,21 @@ config SND_SOC_SAMSUNG_SIMTEC
config SND_SOC_SAMSUNG_SIMTEC_TLV320AIC23
tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards"
- depends on SND_SOC_SAMSUNG && ARCH_S3C24XX && I2C
+ depends on ARCH_S3C24XX && I2C
select SND_S3C24XX_I2S
select SND_SOC_TLV320AIC23_I2C
select SND_SOC_SAMSUNG_SIMTEC
config SND_SOC_SAMSUNG_SIMTEC_HERMES
tristate "SoC I2S Audio support for Simtec Hermes board"
- depends on SND_SOC_SAMSUNG && ARCH_S3C24XX && I2C
+ depends on ARCH_S3C24XX && I2C
select SND_S3C24XX_I2S
select SND_SOC_TLV320AIC3X
select SND_SOC_SAMSUNG_SIMTEC
config SND_SOC_SAMSUNG_H1940_UDA1380
tristate "Audio support for the HP iPAQ H1940"
- depends on SND_SOC_SAMSUNG && ARCH_H1940 && I2C
+ depends on ARCH_H1940 && I2C
select SND_S3C24XX_I2S
select SND_SOC_UDA1380
help
@@ -124,7 +125,7 @@ config SND_SOC_SAMSUNG_H1940_UDA1380
config SND_SOC_SAMSUNG_RX1950_UDA1380
tristate "Audio support for the HP iPAQ RX1950"
- depends on SND_SOC_SAMSUNG && MACH_RX1950 && I2C
+ depends on MACH_RX1950 && I2C
select SND_S3C24XX_I2S
select SND_SOC_UDA1380
help
@@ -132,7 +133,7 @@ config SND_SOC_SAMSUNG_RX1950_UDA1380
config SND_SOC_SAMSUNG_SMDK_WM9713
tristate "SoC AC97 Audio support for SMDK with WM9713"
- depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110)
+ depends on MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110
select SND_SOC_WM9713
select SND_SAMSUNG_AC97
help
@@ -140,20 +141,19 @@ config SND_SOC_SAMSUNG_SMDK_WM9713
config SND_SOC_SMARTQ
tristate "SoC I2S Audio support for SmartQ board"
- depends on SND_SOC_SAMSUNG && MACH_SMARTQ && I2C
+ depends on MACH_SMARTQ && I2C
select SND_SAMSUNG_I2S
select SND_SOC_WM8750
config SND_SOC_SAMSUNG_SMDK_SPDIF
tristate "SoC S/PDIF Audio support for SMDK"
- depends on SND_SOC_SAMSUNG
select SND_SAMSUNG_SPDIF
help
Say Y if you want to add support for SoC S/PDIF audio on the SMDK.
config SND_SOC_SMDK_WM8580_PCM
tristate "SoC PCM Audio support for WM8580 on SMDK"
- depends on SND_SOC_SAMSUNG && (MACH_SMDKV210 || MACH_SMDKC110)
+ depends on MACH_SMDKV210 || MACH_SMDKC110
depends on I2C
select SND_SOC_WM8580
select SND_SAMSUNG_PCM
@@ -162,7 +162,6 @@ config SND_SOC_SMDK_WM8580_PCM
config SND_SOC_SMDK_WM8994_PCM
tristate "SoC PCM Audio support for WM8994 on SMDK"
- depends on SND_SOC_SAMSUNG
depends on I2C=y
select MFD_WM8994
select SND_SOC_WM8994
@@ -172,7 +171,7 @@ config SND_SOC_SMDK_WM8994_PCM
config SND_SOC_SPEYSIDE
tristate "Audio support for Wolfson Speyside"
- depends on SND_SOC_SAMSUNG && I2C && SPI_MASTER
+ depends on I2C && SPI_MASTER
depends on MACH_WLF_CRAGG_6410 || COMPILE_TEST
select SND_SAMSUNG_I2S
select SND_SOC_WM8996
@@ -182,14 +181,14 @@ config SND_SOC_SPEYSIDE
config SND_SOC_TOBERMORY
tristate "Audio support for Wolfson Tobermory"
- depends on SND_SOC_SAMSUNG && INPUT && I2C
+ depends on INPUT && I2C
depends on MACH_WLF_CRAGG_6410 || COMPILE_TEST
select SND_SAMSUNG_I2S
select SND_SOC_WM8962
config SND_SOC_BELLS
tristate "Audio support for Wolfson Bells"
- depends on SND_SOC_SAMSUNG && MFD_ARIZONA && I2C && SPI_MASTER
+ depends on MFD_ARIZONA && I2C && SPI_MASTER
depends on MACH_WLF_CRAGG_6410 || COMPILE_TEST
select SND_SAMSUNG_I2S
select SND_SOC_WM5102
@@ -200,7 +199,7 @@ config SND_SOC_BELLS
config SND_SOC_LOWLAND
tristate "Audio support for Wolfson Lowland"
- depends on SND_SOC_SAMSUNG && I2C
+ depends on I2C
depends on MACH_WLF_CRAGG_6410 || COMPILE_TEST
select SND_SAMSUNG_I2S
select SND_SOC_WM5100
@@ -208,7 +207,7 @@ config SND_SOC_LOWLAND
config SND_SOC_LITTLEMILL
tristate "Audio support for Wolfson Littlemill"
- depends on SND_SOC_SAMSUNG && I2C
+ depends on I2C
depends on MACH_WLF_CRAGG_6410 || COMPILE_TEST
select SND_SAMSUNG_I2S
select MFD_WM8994
@@ -216,7 +215,7 @@ config SND_SOC_LITTLEMILL
config SND_SOC_SNOW
tristate "Audio support for Google Snow boards"
- depends on SND_SOC_SAMSUNG && I2C
+ depends on I2C
select SND_SOC_MAX98090
select SND_SOC_MAX98095
select SND_SAMSUNG_I2S
@@ -226,6 +225,8 @@ config SND_SOC_SNOW
config SND_SOC_ARNDALE_RT5631_ALC5631
tristate "Audio support for RT5631(ALC5631) on Arndale Board"
- depends on SND_SOC_SAMSUNG && I2C
+ depends on I2C
select SND_SAMSUNG_I2S
select SND_SOC_RT5631
+
+endif #SND_SOC_SAMSUNG
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index 547d31032088..97d6700b1009 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -38,16 +38,16 @@ struct s3c_ac97_info {
};
static struct s3c_ac97_info s3c_ac97;
-static struct s3c_dma_params s3c_ac97_pcm_out = {
- .dma_size = 4,
+static struct snd_dmaengine_dai_dma_data s3c_ac97_pcm_out = {
+ .addr_width = 4,
};
-static struct s3c_dma_params s3c_ac97_pcm_in = {
- .dma_size = 4,
+static struct snd_dmaengine_dai_dma_data s3c_ac97_pcm_in = {
+ .addr_width = 4,
};
-static struct s3c_dma_params s3c_ac97_mic_in = {
- .dma_size = 4,
+static struct snd_dmaengine_dai_dma_data s3c_ac97_mic_in = {
+ .addr_width = 4,
};
static void s3c_ac97_activate(struct snd_ac97 *ac97)
@@ -74,7 +74,7 @@ static void s3c_ac97_activate(struct snd_ac97 *ac97)
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
- pr_err("AC97: Unable to activate!");
+ pr_err("AC97: Unable to activate!\n");
}
static unsigned short s3c_ac97_read(struct snd_ac97 *ac97,
@@ -100,7 +100,7 @@ static unsigned short s3c_ac97_read(struct snd_ac97 *ac97,
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
- pr_err("AC97: Unable to read!");
+ pr_err("AC97: Unable to read!\n");
stat = readl(s3c_ac97.regs + S3C_AC97_STAT);
addr = (stat >> 16) & 0x7f;
@@ -137,7 +137,7 @@ static void s3c_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
- pr_err("AC97: Unable to write!");
+ pr_err("AC97: Unable to write!\n");
ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
ac_codec_cmd |= S3C_AC97_CODEC_CMD_READ;
@@ -273,14 +273,14 @@ static const struct snd_soc_dai_ops s3c_ac97_mic_dai_ops = {
static int s3c_ac97_dai_probe(struct snd_soc_dai *dai)
{
- samsung_asoc_init_dma_data(dai, &s3c_ac97_pcm_out, &s3c_ac97_pcm_in);
+ snd_soc_dai_init_dma_data(dai, &s3c_ac97_pcm_out, &s3c_ac97_pcm_in);
return 0;
}
static int s3c_ac97_mic_dai_probe(struct snd_soc_dai *dai)
{
- samsung_asoc_init_dma_data(dai, NULL, &s3c_ac97_mic_in);
+ snd_soc_dai_init_dma_data(dai, NULL, &s3c_ac97_mic_in);
return 0;
}
@@ -346,12 +346,12 @@ static int s3c_ac97_probe(struct platform_device *pdev)
if (IS_ERR(s3c_ac97.regs))
return PTR_ERR(s3c_ac97.regs);
- s3c_ac97_pcm_out.slave = ac97_pdata->dma_playback;
- s3c_ac97_pcm_out.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
- s3c_ac97_pcm_in.slave = ac97_pdata->dma_capture;
- s3c_ac97_pcm_in.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
- s3c_ac97_mic_in.slave = ac97_pdata->dma_capture_mic;
- s3c_ac97_mic_in.dma_addr = mem_res->start + S3C_AC97_MIC_DATA;
+ s3c_ac97_pcm_out.filter_data = ac97_pdata->dma_playback;
+ s3c_ac97_pcm_out.addr = mem_res->start + S3C_AC97_PCM_DATA;
+ s3c_ac97_pcm_in.filter_data = ac97_pdata->dma_capture;
+ s3c_ac97_pcm_in.addr = mem_res->start + S3C_AC97_PCM_DATA;
+ s3c_ac97_mic_in.filter_data = ac97_pdata->dma_capture_mic;
+ s3c_ac97_mic_in.addr = mem_res->start + S3C_AC97_MIC_DATA;
init_completion(&s3c_ac97.done);
mutex_init(&s3c_ac97.lock);
diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
index 3830f297e0b6..7ae580d677c8 100644
--- a/sound/soc/samsung/dma.h
+++ b/sound/soc/samsung/dma.h
@@ -1,6 +1,4 @@
/*
- * dma.h --
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -9,27 +7,15 @@
* ALSA PCM interface for the Samsung SoC
*/
-#ifndef _S3C_AUDIO_H
-#define _S3C_AUDIO_H
+#ifndef _SAMSUNG_DMA_H
+#define _SAMSUNG_DMA_H
#include <sound/dmaengine_pcm.h>
-#include <linux/dmaengine.h>
-
-struct s3c_dma_params {
- void *slave; /* Channel ID */
- dma_addr_t dma_addr;
- int dma_size; /* Size of the DMA transfer */
- char *ch_name;
- struct snd_dmaengine_dai_dma_data dma_data;
-};
-void samsung_asoc_init_dma_data(struct snd_soc_dai *dai,
- struct s3c_dma_params *playback,
- struct s3c_dma_params *capture);
/*
* @tx, @rx arguments can be NULL if the DMA channel names are "tx", "rx",
* otherwise actual DMA channel names must be passed to this function.
*/
int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter,
const char *tx, const char *rx);
-#endif
+#endif /* _SAMSUNG_DMA_H */
diff --git a/sound/soc/samsung/dmaengine.c b/sound/soc/samsung/dmaengine.c
index 2c87f380bfc4..9104c98deeb7 100644
--- a/sound/soc/samsung/dmaengine.c
+++ b/sound/soc/samsung/dmaengine.c
@@ -16,49 +16,18 @@
*/
#include <linux/module.h>
-#include <linux/amba/pl08x.h>
-#include <linux/platform_data/dma-s3c24xx.h>
-
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/dmaengine_pcm.h>
#include <sound/soc.h>
-#include <sound/soc-dai.h>
#include "dma.h"
-void samsung_asoc_init_dma_data(struct snd_soc_dai *dai,
- struct s3c_dma_params *playback,
- struct s3c_dma_params *capture)
-{
- struct snd_dmaengine_dai_dma_data *playback_data = NULL;
- struct snd_dmaengine_dai_dma_data *capture_data = NULL;
-
- if (playback) {
- playback_data = &playback->dma_data;
- playback_data->filter_data = playback->slave;
- playback_data->chan_name = playback->ch_name;
- playback_data->addr = playback->dma_addr;
- playback_data->addr_width = playback->dma_size;
- }
- if (capture) {
- capture_data = &capture->dma_data;
- capture_data->filter_data = capture->slave;
- capture_data->chan_name = capture->ch_name;
- capture_data->addr = capture->dma_addr;
- capture_data->addr_width = capture->dma_size;
- }
-
- snd_soc_dai_init_dma_data(dai, playback_data, capture_data);
-}
-EXPORT_SYMBOL_GPL(samsung_asoc_init_dma_data);
-
int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter,
const char *tx, const char *rx)
{
unsigned int flags = SND_DMAENGINE_PCM_FLAG_COMPAT;
-
struct snd_dmaengine_pcm_config *pcm_conf;
pcm_conf = devm_kzalloc(dev, sizeof(*pcm_conf), GFP_KERNEL);
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 50635ee8ff20..7e32cf4581f8 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -87,9 +87,9 @@ struct i2s_dai {
/* Driver for this DAI */
struct snd_soc_dai_driver i2s_dai_drv;
/* DMA parameters */
- struct s3c_dma_params dma_playback;
- struct s3c_dma_params dma_capture;
- struct s3c_dma_params idma_playback;
+ struct snd_dmaengine_dai_dma_data dma_playback;
+ struct snd_dmaengine_dai_dma_data dma_capture;
+ struct snd_dmaengine_dai_dma_data idma_playback;
dma_filter_fn filter;
u32 quirks;
u32 suspend_i2smod;
@@ -692,15 +692,15 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
break;
case 2:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- i2s->dma_playback.dma_size = 4;
+ i2s->dma_playback.addr_width = 4;
else
- i2s->dma_capture.dma_size = 4;
+ i2s->dma_capture.addr_width = 4;
break;
case 1:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- i2s->dma_playback.dma_size = 2;
+ i2s->dma_playback.addr_width = 2;
else
- i2s->dma_capture.dma_size = 2;
+ i2s->dma_capture.addr_width = 2;
break;
default:
@@ -754,7 +754,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
writel(mod, i2s->addr + I2SMOD);
spin_unlock_irqrestore(i2s->lock, flags);
- samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
+ snd_soc_dai_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
i2s->frmclk = params_rate(params);
@@ -991,10 +991,10 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
unsigned long flags;
if (is_secondary(i2s)) { /* If this is probe on the secondary DAI */
- samsung_asoc_init_dma_data(dai, &other->sec_dai->dma_playback,
+ snd_soc_dai_init_dma_data(dai, &other->sec_dai->dma_playback,
NULL);
} else {
- samsung_asoc_init_dma_data(dai, &i2s->dma_playback,
+ snd_soc_dai_init_dma_data(dai, &i2s->dma_playback,
&i2s->dma_capture);
if (i2s->quirks & QUIRK_NEED_RSTCLR)
@@ -1002,7 +1002,7 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
if (i2s->quirks & QUIRK_SUPPORTS_IDMA)
idma_reg_addr_init(i2s->addr,
- i2s->sec_dai->idma_playback.dma_addr);
+ i2s->sec_dai->idma_playback.addr);
}
/* Reset any constraint on RFS and BFS */
@@ -1262,8 +1262,8 @@ static int samsung_i2s_probe(struct platform_device *pdev)
return -EINVAL;
}
- pri_dai->dma_playback.slave = i2s_pdata->dma_playback;
- pri_dai->dma_capture.slave = i2s_pdata->dma_capture;
+ pri_dai->dma_playback.filter_data = i2s_pdata->dma_playback;
+ pri_dai->dma_capture.filter_data = i2s_pdata->dma_capture;
pri_dai->filter = i2s_pdata->dma_filter;
if (&i2s_pdata->type)
@@ -1302,12 +1302,12 @@ static int samsung_i2s_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
return ret;
}
- pri_dai->dma_playback.dma_addr = regs_base + I2STXD;
- pri_dai->dma_capture.dma_addr = regs_base + I2SRXD;
- pri_dai->dma_playback.ch_name = "tx";
- pri_dai->dma_capture.ch_name = "rx";
- pri_dai->dma_playback.dma_size = 4;
- pri_dai->dma_capture.dma_size = 4;
+ pri_dai->dma_playback.addr = regs_base + I2STXD;
+ pri_dai->dma_capture.addr = regs_base + I2SRXD;
+ pri_dai->dma_playback.chan_name = "tx";
+ pri_dai->dma_capture.chan_name = "rx";
+ pri_dai->dma_playback.addr_width = 4;
+ pri_dai->dma_capture.addr_width = 4;
pri_dai->quirks = quirks;
pri_dai->variant_regs = i2s_dai_data->i2s_variant_regs;
@@ -1318,31 +1318,33 @@ static int samsung_i2s_probe(struct platform_device *pdev)
sec_dai = i2s_alloc_dai(pdev, true);
if (!sec_dai) {
dev_err(&pdev->dev, "Unable to alloc I2S_sec\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_disable_clk;
}
sec_dai->lock = &pri_dai->spinlock;
sec_dai->variant_regs = pri_dai->variant_regs;
- sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
- sec_dai->dma_playback.ch_name = "tx-sec";
+ sec_dai->dma_playback.addr = regs_base + I2STXDS;
+ sec_dai->dma_playback.chan_name = "tx-sec";
if (!np) {
- sec_dai->dma_playback.slave = i2s_pdata->dma_play_sec;
+ sec_dai->dma_playback.filter_data = i2s_pdata->dma_play_sec;
sec_dai->filter = i2s_pdata->dma_filter;
}
- sec_dai->dma_playback.dma_size = 4;
+ sec_dai->dma_playback.addr_width = 4;
sec_dai->addr = pri_dai->addr;
sec_dai->clk = pri_dai->clk;
sec_dai->quirks = quirks;
- sec_dai->idma_playback.dma_addr = idma_addr;
+ sec_dai->idma_playback.addr = idma_addr;
sec_dai->pri_dai = pri_dai;
pri_dai->sec_dai = sec_dai;
}
if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
dev_err(&pdev->dev, "Unable to configure gpio\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_disable_clk;
}
ret = devm_snd_soc_register_component(&pri_dai->pdev->dev,
@@ -1366,6 +1368,8 @@ static int samsung_i2s_probe(struct platform_device *pdev)
err_free_dai:
if (sec_dai)
i2s_free_sec_dai(sec_dai);
+err_disable_clk:
+ clk_disable_unprepare(pri_dai->clk);
return ret;
}
diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c
index 4ed29ffc1c54..3e408158625d 100644
--- a/sound/soc/samsung/idma.c
+++ b/sound/soc/samsung/idma.c
@@ -22,7 +22,6 @@
#include "i2s.h"
#include "idma.h"
-#include "dma.h"
#include "i2s-regs.h"
#define ST_RUNNING (1<<0)
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 490c1a87fd66..43e367a9acc3 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -127,25 +127,25 @@ struct s3c_pcm_info {
struct clk *pclk;
struct clk *cclk;
- struct s3c_dma_params *dma_playback;
- struct s3c_dma_params *dma_capture;
+ struct snd_dmaengine_dai_dma_data *dma_playback;
+ struct snd_dmaengine_dai_dma_data *dma_capture;
};
-static struct s3c_dma_params s3c_pcm_stereo_out[] = {
+static struct snd_dmaengine_dai_dma_data s3c_pcm_stereo_out[] = {
[0] = {
- .dma_size = 4,
+ .addr_width = 4,
},
[1] = {
- .dma_size = 4,
+ .addr_width = 4,
},
};
-static struct s3c_dma_params s3c_pcm_stereo_in[] = {
+static struct snd_dmaengine_dai_dma_data s3c_pcm_stereo_in[] = {
[0] = {
- .dma_size = 4,
+ .addr_width = 4,
},
[1] = {
- .dma_size = 4,
+ .addr_width = 4,
},
};
@@ -552,15 +552,13 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
}
clk_prepare_enable(pcm->pclk);
- s3c_pcm_stereo_in[pdev->id].dma_addr = mem_res->start
- + S3C_PCM_RXFIFO;
- s3c_pcm_stereo_out[pdev->id].dma_addr = mem_res->start
- + S3C_PCM_TXFIFO;
+ s3c_pcm_stereo_in[pdev->id].addr = mem_res->start + S3C_PCM_RXFIFO;
+ s3c_pcm_stereo_out[pdev->id].addr = mem_res->start + S3C_PCM_TXFIFO;
filter = NULL;
if (pcm_pdata) {
- s3c_pcm_stereo_in[pdev->id].slave = pcm_pdata->dma_capture;
- s3c_pcm_stereo_out[pdev->id].slave = pcm_pdata->dma_playback;
+ s3c_pcm_stereo_in[pdev->id].filter_data = pcm_pdata->dma_capture;
+ s3c_pcm_stereo_out[pdev->id].filter_data = pcm_pdata->dma_playback;
filter = pcm_pdata->dma_filter;
}
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index bf8ae79b0fd2..644f186fd35c 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -24,7 +24,6 @@
#include "regs-i2s-v2.h"
#include "s3c-i2s-v2.h"
-#include "dma.h"
#undef S3C_IIS_V2_SUPPORTED
@@ -302,7 +301,7 @@ static int s3c_i2sv2_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct s3c_i2sv2_info *i2s = to_info(dai);
- struct s3c_dma_params *dma_data;
+ struct snd_dmaengine_dai_dma_data *dma_data;
u32 iismod;
pr_debug("Entered %s\n", __func__);
diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
index d0684145ed1f..182d80564e37 100644
--- a/sound/soc/samsung/s3c-i2s-v2.h
+++ b/sound/soc/samsung/s3c-i2s-v2.h
@@ -60,8 +60,8 @@ struct s3c_i2sv2_info {
unsigned char master;
- struct s3c_dma_params *dma_playback;
- struct s3c_dma_params *dma_capture;
+ struct snd_dmaengine_dai_dma_data *dma_playback;
+ struct snd_dmaengine_dai_dma_data *dma_capture;
u32 suspend_iismod;
u32 suspend_iiscon;
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index d45dffb297d8..3e89fbc0c51d 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -34,14 +34,14 @@
#include <linux/platform_data/asoc-s3c.h>
-static struct s3c_dma_params s3c2412_i2s_pcm_stereo_out = {
- .ch_name = "tx",
- .dma_size = 4,
+static struct snd_dmaengine_dai_dma_data s3c2412_i2s_pcm_stereo_out = {
+ .chan_name = "tx",
+ .addr_width = 4,
};
-static struct s3c_dma_params s3c2412_i2s_pcm_stereo_in = {
- .ch_name = "rx",
- .dma_size = 4,
+static struct snd_dmaengine_dai_dma_data s3c2412_i2s_pcm_stereo_in = {
+ .chan_name = "rx",
+ .addr_width = 4,
};
static struct s3c_i2sv2_info s3c2412_i2s;
@@ -52,8 +52,8 @@ static int s3c2412_i2s_probe(struct snd_soc_dai *dai)
pr_debug("Entered %s\n", __func__);
- samsung_asoc_init_dma_data(dai, &s3c2412_i2s_pcm_stereo_out,
- &s3c2412_i2s_pcm_stereo_in);
+ snd_soc_dai_init_dma_data(dai, &s3c2412_i2s_pcm_stereo_out,
+ &s3c2412_i2s_pcm_stereo_in);
ret = s3c_i2sv2_probe(dai, &s3c2412_i2s, S3C2410_PA_IIS);
if (ret)
@@ -163,10 +163,10 @@ static int s3c2412_iis_dev_probe(struct platform_device *pdev)
if (IS_ERR(s3c2412_i2s.regs))
return PTR_ERR(s3c2412_i2s.regs);
- s3c2412_i2s_pcm_stereo_out.dma_addr = res->start + S3C2412_IISTXD;
- s3c2412_i2s_pcm_stereo_out.slave = pdata->dma_playback;
- s3c2412_i2s_pcm_stereo_in.dma_addr = res->start + S3C2412_IISRXD;
- s3c2412_i2s_pcm_stereo_in.slave = pdata->dma_capture;
+ s3c2412_i2s_pcm_stereo_out.addr = res->start + S3C2412_IISTXD;
+ s3c2412_i2s_pcm_stereo_out.filter_data = pdata->dma_playback;
+ s3c2412_i2s_pcm_stereo_in.addr = res->start + S3C2412_IISRXD;
+ s3c2412_i2s_pcm_stereo_in.filter_data = pdata->dma_capture;
ret = s3c_i2sv2_register_component(&pdev->dev, -1,
&s3c2412_i2s_component,
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 3e76f2a75a24..c78a936a3099 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -32,14 +32,14 @@
#include <linux/platform_data/asoc-s3c.h>
-static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_out = {
- .ch_name = "tx",
- .dma_size = 2,
+static struct snd_dmaengine_dai_dma_data s3c24xx_i2s_pcm_stereo_out = {
+ .chan_name = "tx",
+ .addr_width = 2,
};
-static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_in = {
- .ch_name = "rx",
- .dma_size = 2,
+static struct snd_dmaengine_dai_dma_data s3c24xx_i2s_pcm_stereo_in = {
+ .chan_name = "rx",
+ .addr_width = 2,
};
struct s3c24xx_i2s_info {
@@ -360,8 +360,8 @@ static int s3c24xx_i2s_probe(struct snd_soc_dai *dai)
{
pr_debug("Entered %s\n", __func__);
- samsung_asoc_init_dma_data(dai, &s3c24xx_i2s_pcm_stereo_out,
- &s3c24xx_i2s_pcm_stereo_in);
+ snd_soc_dai_init_dma_data(dai, &s3c24xx_i2s_pcm_stereo_out,
+ &s3c24xx_i2s_pcm_stereo_in);
s3c24xx_i2s.iis_clk = devm_clk_get(dai->dev, "iis");
if (IS_ERR(s3c24xx_i2s.iis_clk)) {
@@ -469,10 +469,10 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
if (IS_ERR(s3c24xx_i2s.regs))
return PTR_ERR(s3c24xx_i2s.regs);
- s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO;
- s3c24xx_i2s_pcm_stereo_out.slave = pdata->dma_playback;
- s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO;
- s3c24xx_i2s_pcm_stereo_in.slave = pdata->dma_capture;
+ s3c24xx_i2s_pcm_stereo_out.addr = res->start + S3C2410_IISFIFO;
+ s3c24xx_i2s_pcm_stereo_out.filter_data = pdata->dma_playback;
+ s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO;
+ s3c24xx_i2s_pcm_stereo_in.filter_data = pdata->dma_capture;
ret = devm_snd_soc_register_component(&pdev->dev,
&s3c24xx_i2s_component, &s3c24xx_i2s_dai, 1);
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index 92e88bca386e..7853fbe6ccc9 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -54,8 +54,6 @@ static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
};
#endif
-static struct platform_device *s3c24xx_uda134x_snd_device;
-
static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -66,17 +64,17 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
int ret = 0;
mutex_lock(&clk_lock);
- pr_debug("%s %d\n", __func__, clk_users);
+
if (clk_users == 0) {
- xtal = clk_get(&s3c24xx_uda134x_snd_device->dev, "xtal");
+ xtal = clk_get(rtd->dev, "xtal");
if (IS_ERR(xtal)) {
- printk(KERN_ERR "%s cannot get xtal\n", __func__);
+ dev_err(rtd->dev, "%s cannot get xtal\n", __func__);
ret = PTR_ERR(xtal);
} else {
pclk = clk_get(cpu_dai->dev, "iis");
if (IS_ERR(pclk)) {
- printk(KERN_ERR "%s cannot get pclk\n",
- __func__);
+ dev_err(rtd->dev, "%s cannot get pclk\n",
+ __func__);
clk_put(xtal);
ret = PTR_ERR(pclk);
}
@@ -102,8 +100,8 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_rates);
if (ret < 0)
- printk(KERN_ERR "%s cannot set constraints\n",
- __func__);
+ dev_err(rtd->dev, "%s cannot set constraints\n",
+ __func__);
#endif
}
return ret;
@@ -112,7 +110,6 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
static void s3c24xx_uda134x_shutdown(struct snd_pcm_substream *substream)
{
mutex_lock(&clk_lock);
- pr_debug("%s %d\n", __func__, clk_users);
clk_users -= 1;
if (clk_users == 0) {
clk_put(xtal);
@@ -159,18 +156,19 @@ static int s3c24xx_uda134x_hw_params(struct snd_pcm_substream *substream,
clk_source = S3C24XX_CLKSRC_PCLK;
div = bi % 33;
}
- pr_debug("%s desired rate %lu, %d\n", __func__, rate, bi);
+
+ dev_dbg(rtd->dev, "%s desired rate %lu, %d\n", __func__, rate, bi);
clk = (fs_mode == S3C2410_IISMOD_384FS ? 384 : 256) * rate;
- pr_debug("%s will use: %s %s %d sysclk %d err %ld\n", __func__,
- fs_mode == S3C2410_IISMOD_384FS ? "384FS" : "256FS",
- clk_source == S3C24XX_CLKSRC_MPLL ? "MPLLin" : "PCLK",
- div, clk, err);
+
+ dev_dbg(rtd->dev, "%s will use: %s %s %d sysclk %d err %ld\n", __func__,
+ fs_mode == S3C2410_IISMOD_384FS ? "384FS" : "256FS",
+ clk_source == S3C24XX_CLKSRC_MPLL ? "MPLLin" : "PCLK",
+ div, clk, err);
if ((err * 100 / rate) > 5) {
- printk(KERN_ERR "S3C24XX_UDA134X: effective frequency "
- "too different from desired (%ld%%)\n",
- err * 100 / rate);
+ dev_err(rtd->dev, "effective frequency too different "
+ "from desired (%ld%%)\n", err * 100 / rate);
return -EINVAL;
}
@@ -227,115 +225,27 @@ static struct snd_soc_card snd_soc_s3c24xx_uda134x = {
.num_links = 1,
};
-static struct s3c24xx_uda134x_platform_data *s3c24xx_uda134x_l3_pins;
-
-static void setdat(int v)
-{
- gpio_set_value(s3c24xx_uda134x_l3_pins->l3_data, v > 0);
-}
-
-static void setclk(int v)
-{
- gpio_set_value(s3c24xx_uda134x_l3_pins->l3_clk, v > 0);
-}
-
-static void setmode(int v)
-{
- gpio_set_value(s3c24xx_uda134x_l3_pins->l3_mode, v > 0);
-}
-
-/* FIXME - This must be codec platform data but in which board file ?? */
-static struct uda134x_platform_data s3c24xx_uda134x = {
- .l3 = {
- .setdat = setdat,
- .setclk = setclk,
- .setmode = setmode,
- .data_hold = 1,
- .data_setup = 1,
- .clock_high = 1,
- .mode_hold = 1,
- .mode = 1,
- .mode_setup = 1,
- },
-};
-
-static int s3c24xx_uda134x_setup_pin(int pin, char *fun)
-{
- if (gpio_request(pin, "s3c24xx_uda134x") < 0) {
- printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
- "l3 %s pin already in use", fun);
- return -EBUSY;
- }
- gpio_direction_output(pin, 0);
- return 0;
-}
-
static int s3c24xx_uda134x_probe(struct platform_device *pdev)
{
+ struct snd_soc_card *card = &snd_soc_s3c24xx_uda134x;
int ret;
- printk(KERN_INFO "S3C24XX_UDA134X SoC Audio driver\n");
+ platform_set_drvdata(pdev, card);
+ card->dev = &pdev->dev;
- s3c24xx_uda134x_l3_pins = pdev->dev.platform_data;
- if (s3c24xx_uda134x_l3_pins == NULL) {
- printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
- "unable to find platform data\n");
- return -ENODEV;
- }
- s3c24xx_uda134x.power = s3c24xx_uda134x_l3_pins->power;
- s3c24xx_uda134x.model = s3c24xx_uda134x_l3_pins->model;
-
- if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_data,
- "data") < 0)
- return -EBUSY;
- if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_clk,
- "clk") < 0) {
- gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
- return -EBUSY;
- }
- if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_mode,
- "mode") < 0) {
- gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
- gpio_free(s3c24xx_uda134x_l3_pins->l3_clk);
- return -EBUSY;
- }
-
- s3c24xx_uda134x_snd_device = platform_device_alloc("soc-audio", -1);
- if (!s3c24xx_uda134x_snd_device) {
- printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
- "Unable to register\n");
- return -ENOMEM;
- }
-
- platform_set_drvdata(s3c24xx_uda134x_snd_device,
- &snd_soc_s3c24xx_uda134x);
- platform_device_add_data(s3c24xx_uda134x_snd_device, &s3c24xx_uda134x, sizeof(s3c24xx_uda134x));
- ret = platform_device_add(s3c24xx_uda134x_snd_device);
- if (ret) {
- printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n");
- platform_device_put(s3c24xx_uda134x_snd_device);
- }
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret)
+ dev_err(&pdev->dev, "failed to register card: %d\n", ret);
return ret;
}
-static int s3c24xx_uda134x_remove(struct platform_device *pdev)
-{
- platform_device_unregister(s3c24xx_uda134x_snd_device);
- gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
- gpio_free(s3c24xx_uda134x_l3_pins->l3_clk);
- gpio_free(s3c24xx_uda134x_l3_pins->l3_mode);
- return 0;
-}
-
static struct platform_driver s3c24xx_uda134x_driver = {
.probe = s3c24xx_uda134x_probe,
- .remove = s3c24xx_uda134x_remove,
.driver = {
.name = "s3c24xx_uda134x",
},
};
-
module_platform_driver(s3c24xx_uda134x_driver);
MODULE_AUTHOR("Zoltan Devai, Christian Pellegrin <chripell@evolware.org>");
diff --git a/sound/soc/samsung/smdk_wm8580pcm.c b/sound/soc/samsung/smdk_wm8580pcm.c
index 6deec5234c92..a6d223310c67 100644
--- a/sound/soc/samsung/smdk_wm8580pcm.c
+++ b/sound/soc/samsung/smdk_wm8580pcm.c
@@ -16,7 +16,6 @@
#include <asm/mach-types.h>
#include "../codecs/wm8580.h"
-#include "dma.h"
#include "pcm.h"
/*
diff --git a/sound/soc/samsung/smdk_wm8994pcm.c b/sound/soc/samsung/smdk_wm8994pcm.c
index b1c89ec2d999..2e621496be8b 100644
--- a/sound/soc/samsung/smdk_wm8994pcm.c
+++ b/sound/soc/samsung/smdk_wm8994pcm.c
@@ -15,7 +15,6 @@
#include <sound/pcm_params.h>
#include "../codecs/wm8994.h"
-#include "dma.h"
#include "pcm.h"
/*
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index 0cb9c8567546..26c1fbed4d35 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -90,10 +90,10 @@ struct samsung_spdif_info {
u32 saved_clkcon;
u32 saved_con;
u32 saved_cstas;
- struct s3c_dma_params *dma_playback;
+ struct snd_dmaengine_dai_dma_data *dma_playback;
};
-static struct s3c_dma_params spdif_stereo_out;
+static struct snd_dmaengine_dai_dma_data spdif_stereo_out;
static struct samsung_spdif_info spdif_info;
static inline struct samsung_spdif_info *to_info(struct snd_soc_dai *cpu_dai)
@@ -179,7 +179,7 @@ static int spdif_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
void __iomem *regs = spdif->regs;
- struct s3c_dma_params *dma_data;
+ struct snd_dmaengine_dai_dma_data *dma_data;
u32 con, clkcon, cstas;
unsigned long flags;
int i, ratio;
@@ -425,11 +425,11 @@ static int spdif_probe(struct platform_device *pdev)
goto err4;
}
- spdif_stereo_out.dma_size = 2;
- spdif_stereo_out.dma_addr = mem_res->start + DATA_OUTBUF;
+ spdif_stereo_out.addr_width = 2;
+ spdif_stereo_out.addr = mem_res->start + DATA_OUTBUF;
filter = NULL;
if (spdif_pdata) {
- spdif_stereo_out.slave = spdif_pdata->dma_playback;
+ spdif_stereo_out.filter_data = spdif_pdata->dma_playback;
filter = spdif_pdata->dma_filter;
}
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 9311f119feb5..6db6405d952f 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -42,12 +42,6 @@ config SND_SOC_RCAR
help
This option enables R-Car SRU/SCU/SSIU/SSI sound support
-config SND_SOC_RSRC_CARD
- tristate "Renesas Sampling Rate Convert Sound Card"
- select SND_SIMPLE_CARD_UTILS
- help
- This option enables simple sound if you need sampling rate convert
-
##
## Boards
##
diff --git a/sound/soc/sh/rcar/Makefile b/sound/soc/sh/rcar/Makefile
index a89ddf758695..9c3d5aed99d1 100644
--- a/sound/soc/sh/rcar/Makefile
+++ b/sound/soc/sh/rcar/Makefile
@@ -1,5 +1,2 @@
snd-soc-rcar-objs := core.o gen.o dma.o adg.o ssi.o ssiu.o src.o ctu.o mix.o dvc.o cmd.o
obj-$(CONFIG_SND_SOC_RCAR) += snd-soc-rcar.o
-
-snd-soc-rsrc-card-objs := rsrc-card.o
-obj-$(CONFIG_SND_SOC_RSRC_CARD) += snd-soc-rsrc-card.o
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 3351a701c60e..f18141098b50 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -110,6 +110,7 @@ MODULE_DEVICE_TABLE(of, rsnd_of_match);
/*
* rsnd_mod functions
*/
+#ifdef DEBUG
void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type)
{
if (mod->type != type) {
@@ -120,6 +121,7 @@ void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type)
rsnd_mod_name(mod), rsnd_mod_id(mod));
}
}
+#endif
char *rsnd_mod_name(struct rsnd_mod *mod)
{
@@ -574,6 +576,7 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
rsnd_dai_stream_init(io, substream);
ret = rsnd_dai_call(init, io, priv);
@@ -590,6 +593,7 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
break;
case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
ret = rsnd_dai_call(irq, io, priv, 0);
ret |= rsnd_dai_call(stop, io, priv);
diff --git a/sound/soc/sh/rcar/rsrc-card.c b/sound/soc/sh/rcar/rsrc-card.c
deleted file mode 100644
index fa37f842b62f..000000000000
--- a/sound/soc/sh/rcar/rsrc-card.c
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * Renesas Sampling Rate Convert Sound Card for DPCM
- *
- * Copyright (C) 2015 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * based on ${LINUX}/sound/soc/generic/simple-card.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <sound/jack.h>
-#include <sound/soc.h>
-#include <sound/soc-dai.h>
-#include <sound/simple_card_utils.h>
-
-struct rsrc_card_of_data {
- const char *prefix;
- const struct snd_soc_dapm_route *routes;
- int num_routes;
-};
-
-static const struct snd_soc_dapm_route routes_ssi0_ak4642[] = {
- {"ak4642 Playback", NULL, "DAI0 Playback"},
- {"DAI0 Capture", NULL, "ak4642 Capture"},
-};
-
-static const struct rsrc_card_of_data routes_of_ssi0_ak4642 = {
- .prefix = "ak4642",
- .routes = routes_ssi0_ak4642,
- .num_routes = ARRAY_SIZE(routes_ssi0_ak4642),
-};
-
-static const struct of_device_id rsrc_card_of_match[] = {
- { .compatible = "renesas,rsrc-card,lager", .data = &routes_of_ssi0_ak4642 },
- { .compatible = "renesas,rsrc-card,koelsch", .data = &routes_of_ssi0_ak4642 },
- { .compatible = "renesas,rsrc-card", },
- {},
-};
-MODULE_DEVICE_TABLE(of, rsrc_card_of_match);
-
-#define IDX_CPU 0
-#define IDX_CODEC 1
-struct rsrc_card_priv {
- struct snd_soc_card snd_card;
- struct snd_soc_codec_conf codec_conf;
- struct asoc_simple_dai *dai_props;
- struct snd_soc_dai_link *dai_link;
- u32 convert_rate;
- u32 convert_channels;
-};
-
-#define rsrc_priv_to_dev(priv) ((priv)->snd_card.dev)
-#define rsrc_priv_to_link(priv, i) ((priv)->snd_card.dai_link + (i))
-#define rsrc_priv_to_props(priv, i) ((priv)->dai_props + (i))
-
-static int rsrc_card_startup(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct asoc_simple_dai *dai_props =
- rsrc_priv_to_props(priv, rtd->num);
-
- return clk_prepare_enable(dai_props->clk);
-}
-
-static void rsrc_card_shutdown(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct asoc_simple_dai *dai_props =
- rsrc_priv_to_props(priv, rtd->num);
-
- clk_disable_unprepare(dai_props->clk);
-}
-
-static struct snd_soc_ops rsrc_card_ops = {
- .startup = rsrc_card_startup,
- .shutdown = rsrc_card_shutdown,
-};
-
-static int rsrc_card_dai_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai;
- struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dai_props;
- int num = rtd->num;
- int ret;
-
- dai_link = rsrc_priv_to_link(priv, num);
- dai_props = rsrc_priv_to_props(priv, num);
- dai = dai_link->dynamic ?
- rtd->cpu_dai :
- rtd->codec_dai;
-
- if (dai_props->sysclk) {
- ret = snd_soc_dai_set_sysclk(dai, 0, dai_props->sysclk, 0);
- if (ret && ret != -ENOTSUPP) {
- dev_err(dai->dev, "set_sysclk error\n");
- goto err;
- }
- }
-
- if (dai_props->slots) {
- ret = snd_soc_dai_set_tdm_slot(dai,
- dai_props->tx_slot_mask,
- dai_props->rx_slot_mask,
- dai_props->slots,
- dai_props->slot_width);
- if (ret && ret != -ENOTSUPP) {
- dev_err(dai->dev, "set_tdm_slot error\n");
- goto err;
- }
- }
-
- ret = 0;
-
-err:
- return ret;
-}
-
-static int rsrc_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_hw_params *params)
-{
- struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct snd_interval *rate = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_RATE);
- struct snd_interval *channels = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_CHANNELS);
-
- if (priv->convert_rate)
- rate->min =
- rate->max = priv->convert_rate;
-
- if (priv->convert_channels)
- channels->min =
- channels->max = priv->convert_channels;
-
- return 0;
-}
-
-static int rsrc_card_parse_links(struct device_node *np,
- struct rsrc_card_priv *priv,
- int idx, bool is_fe)
-{
- struct device *dev = rsrc_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = rsrc_priv_to_link(priv, idx);
- struct asoc_simple_dai *dai_props = rsrc_priv_to_props(priv, idx);
- struct of_phandle_args args;
- int ret;
-
- /*
- * Get node via "sound-dai = <&phandle port>"
- * it will be used as xxx_of_node on soc_bind_dai_link()
- */
- ret = of_parse_phandle_with_args(np, "sound-dai",
- "#sound-dai-cells", 0, &args);
- if (ret)
- return ret;
-
- /* Parse TDM slot */
- ret = snd_soc_of_parse_tdm_slot(np,
- &dai_props->tx_slot_mask,
- &dai_props->rx_slot_mask,
- &dai_props->slots,
- &dai_props->slot_width);
- if (ret)
- return ret;
-
- if (is_fe) {
- /* BE is dummy */
- dai_link->codec_of_node = NULL;
- dai_link->codec_dai_name = "snd-soc-dummy-dai";
- dai_link->codec_name = "snd-soc-dummy";
-
- /* FE settings */
- dai_link->dynamic = 1;
- dai_link->dpcm_merged_format = 1;
- dai_link->cpu_of_node = args.np;
- ret = snd_soc_of_get_dai_name(np, &dai_link->cpu_dai_name);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_set_dailink_name(dev, dai_link,
- "fe.%s",
- dai_link->cpu_dai_name);
- if (ret < 0)
- return ret;
-
- /*
- * In soc_bind_dai_link() will check cpu name after
- * of_node matching if dai_link has cpu_dai_name.
- * but, it will never match if name was created by
- * fmt_single_name() remove cpu_dai_name if cpu_args
- * was 0. See:
- * fmt_single_name()
- * fmt_multiple_name()
- */
- if (!args.args_count)
- dai_link->cpu_dai_name = NULL;
- } else {
- const struct rsrc_card_of_data *of_data;
-
- of_data = of_device_get_match_data(dev);
-
- /* FE is dummy */
- dai_link->cpu_of_node = NULL;
- dai_link->cpu_dai_name = "snd-soc-dummy-dai";
- dai_link->cpu_name = "snd-soc-dummy";
-
- /* BE settings */
- dai_link->no_pcm = 1;
- dai_link->be_hw_params_fixup = rsrc_card_be_hw_params_fixup;
- dai_link->codec_of_node = args.np;
- ret = snd_soc_of_get_dai_name(np, &dai_link->codec_dai_name);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_set_dailink_name(dev, dai_link,
- "be.%s",
- dai_link->codec_dai_name);
- if (ret < 0)
- return ret;
-
- /* additional name prefix */
- if (of_data) {
- priv->codec_conf.of_node = dai_link->codec_of_node;
- priv->codec_conf.name_prefix = of_data->prefix;
- } else {
- snd_soc_of_parse_audio_prefix(&priv->snd_card,
- &priv->codec_conf,
- dai_link->codec_of_node,
- "audio-prefix");
- }
- }
-
- /* Simple Card assumes platform == cpu */
- dai_link->platform_of_node = dai_link->cpu_of_node;
- dai_link->dpcm_playback = 1;
- dai_link->dpcm_capture = 1;
- dai_link->ops = &rsrc_card_ops;
- dai_link->init = rsrc_card_dai_init;
-
- return 0;
-}
-
-static int rsrc_card_parse_clk(struct device_node *np,
- struct rsrc_card_priv *priv,
- int idx, bool is_fe)
-{
- struct snd_soc_dai_link *dai_link = rsrc_priv_to_link(priv, idx);
- struct asoc_simple_dai *dai_props = rsrc_priv_to_props(priv, idx);
- struct clk *clk;
- struct device_node *of_np = is_fe ? dai_link->cpu_of_node :
- dai_link->codec_of_node;
- u32 val;
-
- /*
- * Parse dai->sysclk come from "clocks = <&xxx>"
- * (if system has common clock)
- * or "system-clock-frequency = <xxx>"
- * or device's module clock.
- */
- if (of_property_read_bool(np, "clocks")) {
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- dai_props->sysclk = clk_get_rate(clk);
- dai_props->clk = clk;
- } else if (!of_property_read_u32(np, "system-clock-frequency", &val)) {
- dai_props->sysclk = val;
- } else {
- clk = of_clk_get(of_np, 0);
- if (!IS_ERR(clk))
- dai_props->sysclk = clk_get_rate(clk);
- }
-
- return 0;
-}
-
-static int rsrc_card_dai_sub_link_of(struct device_node *node,
- struct device_node *np,
- struct rsrc_card_priv *priv,
- int idx, bool is_fe)
-{
- struct device *dev = rsrc_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = rsrc_priv_to_link(priv, idx);
- struct asoc_simple_dai *dai_props = rsrc_priv_to_props(priv, idx);
- int ret;
-
- ret = rsrc_card_parse_links(np, priv, idx, is_fe);
- if (ret < 0)
- return ret;
-
- ret = rsrc_card_parse_clk(np, priv, idx, is_fe);
- if (ret < 0)
- return ret;
-
- dev_dbg(dev, "\t%s / %04x / %d\n",
- dai_link->name,
- dai_link->dai_fmt,
- dai_props->sysclk);
-
- return ret;
-}
-
-static int rsrc_card_dai_link_of(struct device_node *node,
- struct rsrc_card_priv *priv)
-{
- struct device *dev = rsrc_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link;
- struct device_node *np;
- unsigned int daifmt = 0;
- int ret, i;
- bool is_fe;
-
- /* find 1st codec */
- i = 0;
- for_each_child_of_node(node, np) {
- dai_link = rsrc_priv_to_link(priv, i);
-
- if (strcmp(np->name, "codec") == 0) {
- ret = asoc_simple_card_parse_daifmt(dev, node, np,
- NULL, &daifmt);
- if (ret < 0)
- return ret;
- break;
- }
- i++;
- }
-
- i = 0;
- for_each_child_of_node(node, np) {
- dai_link = rsrc_priv_to_link(priv, i);
- dai_link->dai_fmt = daifmt;
-
- is_fe = false;
- if (strcmp(np->name, "cpu") == 0)
- is_fe = true;
-
- ret = rsrc_card_dai_sub_link_of(node, np, priv, i, is_fe);
- if (ret < 0)
- return ret;
- i++;
- }
-
- return 0;
-}
-
-static int rsrc_card_parse_of(struct device_node *node,
- struct rsrc_card_priv *priv,
- struct device *dev)
-{
- const struct rsrc_card_of_data *of_data = of_device_get_match_data(dev);
- struct asoc_simple_dai *props;
- struct snd_soc_dai_link *links;
- int ret;
- int num;
-
- if (!node)
- return -EINVAL;
-
- num = of_get_child_count(node);
- props = devm_kzalloc(dev, sizeof(*props) * num, GFP_KERNEL);
- links = devm_kzalloc(dev, sizeof(*links) * num, GFP_KERNEL);
- if (!props || !links)
- return -ENOMEM;
-
- priv->dai_props = props;
- priv->dai_link = links;
-
- /* Init snd_soc_card */
- priv->snd_card.owner = THIS_MODULE;
- priv->snd_card.dev = dev;
- priv->snd_card.dai_link = priv->dai_link;
- priv->snd_card.num_links = num;
- priv->snd_card.codec_conf = &priv->codec_conf;
- priv->snd_card.num_configs = 1;
-
- if (of_data) {
- priv->snd_card.of_dapm_routes = of_data->routes;
- priv->snd_card.num_of_dapm_routes = of_data->num_routes;
- } else {
- snd_soc_of_parse_audio_routing(&priv->snd_card,
- "audio-routing");
- }
-
- /* sampling rate convert */
- of_property_read_u32(node, "convert-rate", &priv->convert_rate);
-
- /* channels transfer */
- of_property_read_u32(node, "convert-channels", &priv->convert_channels);
-
- dev_dbg(dev, "New rsrc-audio-card: %s\n",
- priv->snd_card.name ? priv->snd_card.name : "");
- dev_dbg(dev, "SRC : convert_rate %d\n", priv->convert_rate);
- dev_dbg(dev, "CTU : convert_channels %d\n", priv->convert_channels);
-
- ret = rsrc_card_dai_link_of(node, priv);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_parse_card_name(&priv->snd_card, "card-");
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* Decrease the reference count of the device nodes */
-static int rsrc_card_unref(struct snd_soc_card *card)
-{
- struct snd_soc_dai_link *dai_link;
- int num_links;
-
- for (num_links = 0, dai_link = card->dai_link;
- num_links < card->num_links;
- num_links++, dai_link++) {
- of_node_put(dai_link->cpu_of_node);
- of_node_put(dai_link->codec_of_node);
- }
- return 0;
-}
-
-static int rsrc_card_probe(struct platform_device *pdev)
-{
- struct rsrc_card_priv *priv;
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- int ret;
-
- /* Allocate the private data */
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- ret = rsrc_card_parse_of(np, priv, dev);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "parse error %d\n", ret);
- goto err;
- }
-
- snd_soc_card_set_drvdata(&priv->snd_card, priv);
-
- ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card);
- if (ret >= 0)
- return ret;
-err:
- rsrc_card_unref(&priv->snd_card);
-
- return ret;
-}
-
-static int rsrc_card_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- return rsrc_card_unref(card);
-}
-
-static struct platform_driver rsrc_card = {
- .driver = {
- .name = "renesas-src-audio-card",
- .of_match_table = rsrc_card_of_match,
- },
- .probe = rsrc_card_probe,
- .remove = rsrc_card_remove,
-};
-
-module_platform_driver(rsrc_card);
-
-MODULE_ALIAS("platform:renesas-src-audio-card");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Renesas Sampling Rate Convert Sound Card");
-MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 5f848f054745..6cb6db005fc4 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -928,7 +928,7 @@ int rsnd_ssi_probe(struct rsnd_priv *priv)
}
ops = &rsnd_ssi_non_ops;
- if (of_get_property(np, "pio-transfer", NULL))
+ if (of_property_read_bool(np, "pio-transfer"))
ops = &rsnd_ssi_pio_ops;
else
ops = &rsnd_ssi_dma_ops;
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
index bc4a55bb3fd9..6c8b0b0c56ec 100644
--- a/sound/soc/soc-ac97.c
+++ b/sound/soc/soc-ac97.c
@@ -116,7 +116,7 @@ static int snd_soc_ac97_gpio_direction_out(struct gpio_chip *chip,
return snd_soc_update_bits(codec, AC97_GPIO_CFG, 1 << offset, 0);
}
-static struct gpio_chip snd_soc_ac97_gpio_chip = {
+static const struct gpio_chip snd_soc_ac97_gpio_chip = {
.label = "snd_soc_ac97",
.owner = THIS_MODULE,
.request = snd_soc_ac97_gpio_request,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 4afa8dba5e98..c0bbcd903261 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3332,19 +3332,6 @@ int snd_soc_register_codec(struct device *dev,
if (ret)
goto err_free;
- if (codec_drv->controls) {
- codec->component.controls = codec_drv->controls;
- codec->component.num_controls = codec_drv->num_controls;
- }
- if (codec_drv->dapm_widgets) {
- codec->component.dapm_widgets = codec_drv->dapm_widgets;
- codec->component.num_dapm_widgets = codec_drv->num_dapm_widgets;
- }
- if (codec_drv->dapm_routes) {
- codec->component.dapm_routes = codec_drv->dapm_routes;
- codec->component.num_dapm_routes = codec_drv->num_dapm_routes;
- }
-
if (codec_drv->probe)
codec->component.probe = snd_soc_codec_drv_probe;
if (codec_drv->remove)
@@ -3732,7 +3719,7 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
* SND_SOC_DAIFMT_CLOCK_MASK area
*/
snprintf(prop, sizeof(prop), "%scontinuous-clock", prefix);
- if (of_get_property(np, prop, NULL))
+ if (of_property_read_bool(np, prop))
format |= SND_SOC_DAIFMT_CONT;
else
format |= SND_SOC_DAIFMT_GATED;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index d908ff8f9755..3bbe32ee4630 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -823,6 +823,7 @@ static int dapm_create_or_share_kcontrol(struct snd_soc_dapm_widget *w,
case snd_soc_dapm_switch:
case snd_soc_dapm_mixer:
case snd_soc_dapm_pga:
+ case snd_soc_dapm_out_drv:
wname_in_long_name = true;
kcname_in_long_name = true;
break;
@@ -1169,7 +1170,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
* @custom_stop_condition: (optional) a function meant to stop the widget graph
* walk based on custom logic.
*
- * Queries DAPM graph as to whether an valid audio stream path exists for
+ * Queries DAPM graph as to whether a valid audio stream path exists for
* the initial stream specified by name. This takes into account
* current mixer and mux kcontrol settings. Creates list of valid widgets.
*
@@ -1294,8 +1295,7 @@ static int dapm_widget_power_check(struct snd_soc_dapm_widget *w)
return w->new_power;
}
-/* Generic check to see if a widget should be powered.
- */
+/* Generic check to see if a widget should be powered. */
static int dapm_generic_check_power(struct snd_soc_dapm_widget *w)
{
int in, out;
@@ -1646,7 +1646,7 @@ static void dapm_pre_sequence_async(void *data, async_cookie_t cookie)
struct snd_soc_dapm_context *d = data;
int ret;
- /* If we're off and we're not supposed to be go into STANDBY */
+ /* If we're off and we're not supposed to go into STANDBY */
if (d->bias_level == SND_SOC_BIAS_OFF &&
d->target_bias_level != SND_SOC_BIAS_OFF) {
if (d->dev)
@@ -1798,7 +1798,7 @@ static bool dapm_idle_bias_off(struct snd_soc_dapm_context *dapm)
* A complete path is a route that has valid endpoints i.e.:-
*
* o DAC to output pin.
- * o Input Pin to ADC.
+ * o Input pin to ADC.
* o Input pin to Output pin (bypass, sidetone)
* o DAC to ADC (loopback).
*/
@@ -2114,7 +2114,7 @@ static inline void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
* soc_dapm_connect_path() - Connects or disconnects a path
* @path: The path to update
* @connect: The new connect state of the path. True if the path is connected,
- * false if it is disconneted.
+ * false if it is disconnected.
* @reason: The reason why the path changed (for debugging only)
*/
static void soc_dapm_connect_path(struct snd_soc_dapm_path *path,
@@ -2233,7 +2233,7 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
if (w->dapm != dapm)
continue;
- /* only display widgets that burnm power */
+ /* only display widgets that burn power */
switch (w->id) {
case snd_soc_dapm_hp:
case snd_soc_dapm_mic:
@@ -2461,7 +2461,7 @@ static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w)
switch (w->id) {
case snd_soc_dapm_input:
- /* On a fully routed card a input is never a source */
+ /* On a fully routed card an input is never a source */
if (w->dapm->card->fully_routed)
return;
ep = SND_SOC_DAPM_EP_SOURCE;
@@ -3049,6 +3049,9 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
}
mutex_unlock(&card->dapm_mutex);
+ if (ret)
+ return ret;
+
if (invert)
ucontrol->value.integer.value[0] = max - val;
else
@@ -3200,7 +3203,7 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
if (e->shift_l != e->shift_r) {
if (item[1] > e->items)
return -EINVAL;
- val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_l;
+ val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r;
mask |= e->mask << e->shift_r;
}
@@ -3445,7 +3448,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
w->endpoints[dir] = -1;
}
- /* machine layer set ups unconnected pins and insertions */
+ /* machine layer sets up unconnected pins and insertions */
w->connected = 1;
return w;
}
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index a513a34a51d2..9fc1a7bb8b95 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -77,7 +77,7 @@ int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol,
item = snd_soc_enum_val_to_item(e, val);
ucontrol->value.enumerated.item[0] = item;
if (e->shift_l != e->shift_r) {
- val = (reg_val >> e->shift_l) & e->mask;
+ val = (reg_val >> e->shift_r) & e->mask;
item = snd_soc_enum_val_to_item(e, val);
ucontrol->value.enumerated.item[1] = item;
}
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 60d702f8b9f0..d56a16a0f6fa 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1694,6 +1694,9 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
struct snd_soc_pcm_runtime *rtd = be_substream->private_data;
int i;
+ if (rtd->dai_link->be_hw_params_fixup)
+ continue;
+
if (soc_pcm_has_symmetry(be_substream))
be_substream->runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX;
@@ -1790,7 +1793,7 @@ int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
continue;
dev_dbg(be->dev, "ASoC: close BE %s\n",
- dpcm->fe->dai_link->name);
+ be->dai_link->name);
soc_pcm_close(be_substream);
be_substream->runtime = NULL;
@@ -1856,7 +1859,7 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
continue;
dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
- dpcm->fe->dai_link->name);
+ be->dai_link->name);
soc_pcm_hw_free(be_substream);
@@ -1934,7 +1937,7 @@ int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
continue;
dev_dbg(be->dev, "ASoC: hw_params BE %s\n",
- dpcm->fe->dai_link->name);
+ be->dai_link->name);
ret = soc_pcm_hw_params(be_substream, &dpcm->hw_params);
if (ret < 0) {
@@ -2014,7 +2017,7 @@ static int dpcm_do_trigger(struct snd_soc_dpcm *dpcm,
int ret;
dev_dbg(dpcm->be->dev, "ASoC: trigger BE %s cmd %d\n",
- dpcm->fe->dai_link->name, cmd);
+ dpcm->be->dai_link->name, cmd);
ret = soc_pcm_trigger(substream, cmd);
if (ret < 0)
@@ -2229,7 +2232,7 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
continue;
dev_dbg(be->dev, "ASoC: prepare BE %s\n",
- dpcm->fe->dai_link->name);
+ be->dai_link->name);
ret = soc_pcm_prepare(be_substream);
if (ret < 0) {
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index ee7f15aa46fc..6b05047a4134 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -48,9 +48,10 @@
#define SOC_TPLG_PASS_PCM_DAI 4
#define SOC_TPLG_PASS_GRAPH 5
#define SOC_TPLG_PASS_PINS 6
+#define SOC_TPLG_PASS_BE_DAI 7
#define SOC_TPLG_PASS_START SOC_TPLG_PASS_MANIFEST
-#define SOC_TPLG_PASS_END SOC_TPLG_PASS_PINS
+#define SOC_TPLG_PASS_END SOC_TPLG_PASS_BE_DAI
struct soc_tplg {
const struct firmware *fw;
@@ -1475,6 +1476,7 @@ widget:
if (widget == NULL) {
dev_err(tplg->dev, "ASoC: failed to create widget %s controls\n",
w->name);
+ ret = -ENOMEM;
goto hdr_err;
}
@@ -1554,6 +1556,25 @@ static void set_stream_info(struct snd_soc_pcm_stream *stream,
stream->rate_min = caps->rate_min;
stream->rate_max = caps->rate_max;
stream->formats = caps->formats;
+ stream->sig_bits = caps->sig_bits;
+}
+
+static void set_dai_flags(struct snd_soc_dai_driver *dai_drv,
+ unsigned int flag_mask, unsigned int flags)
+{
+ if (flag_mask & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_RATES)
+ dai_drv->symmetric_rates =
+ flags & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_RATES ? 1 : 0;
+
+ if (flag_mask & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS)
+ dai_drv->symmetric_channels =
+ flags & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS ?
+ 1 : 0;
+
+ if (flag_mask & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS)
+ dai_drv->symmetric_samplebits =
+ flags & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS ?
+ 1 : 0;
}
static int soc_tplg_dai_create(struct soc_tplg *tplg,
@@ -1690,8 +1711,96 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
return 0;
}
+/* *
+ * soc_tplg_be_dai_config - Find and configure an existing BE DAI.
+ * @tplg: topology context
+ * @be: topology BE DAI configs.
+ *
+ * The BE dai should already be registered by the platform driver. The
+ * platform driver should specify the BE DAI name and ID for matching.
+ */
+static int soc_tplg_be_dai_config(struct soc_tplg *tplg,
+ struct snd_soc_tplg_be_dai *be)
+{
+ struct snd_soc_dai_link_component dai_component = {0};
+ struct snd_soc_dai *dai;
+ struct snd_soc_dai_driver *dai_drv;
+ struct snd_soc_pcm_stream *stream;
+ struct snd_soc_tplg_stream_caps *caps;
+ int ret;
+
+ dai_component.dai_name = be->dai_name;
+ dai = snd_soc_find_dai(&dai_component);
+ if (!dai) {
+ dev_err(tplg->dev, "ASoC: BE DAI %s not registered\n",
+ be->dai_name);
+ return -EINVAL;
+ }
+
+ if (be->dai_id != dai->id) {
+ dev_err(tplg->dev, "ASoC: BE DAI %s id mismatch\n",
+ be->dai_name);
+ return -EINVAL;
+ }
+
+ dai_drv = dai->driver;
+ if (!dai_drv)
+ return -EINVAL;
+
+ if (be->playback) {
+ stream = &dai_drv->playback;
+ caps = &be->caps[SND_SOC_TPLG_STREAM_PLAYBACK];
+ set_stream_info(stream, caps);
+ }
+
+ if (be->capture) {
+ stream = &dai_drv->capture;
+ caps = &be->caps[SND_SOC_TPLG_STREAM_CAPTURE];
+ set_stream_info(stream, caps);
+ }
+
+ if (be->flag_mask)
+ set_dai_flags(dai_drv, be->flag_mask, be->flags);
+
+ /* pass control to component driver for optional further init */
+ ret = soc_tplg_dai_load(tplg, dai_drv);
+ if (ret < 0) {
+ dev_err(tplg->comp->dev, "ASoC: DAI loading failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int soc_tplg_be_dai_elems_load(struct soc_tplg *tplg,
+ struct snd_soc_tplg_hdr *hdr)
+{
+ struct snd_soc_tplg_be_dai *be;
+ int count = hdr->count;
+ int i;
+
+ if (tplg->pass != SOC_TPLG_PASS_BE_DAI)
+ return 0;
+
+ /* config the existing BE DAIs */
+ for (i = 0; i < count; i++) {
+ be = (struct snd_soc_tplg_be_dai *)tplg->pos;
+ if (be->size != sizeof(*be)) {
+ dev_err(tplg->dev, "ASoC: invalid BE DAI size\n");
+ return -EINVAL;
+ }
+
+ soc_tplg_be_dai_config(tplg, be);
+ tplg->pos += (sizeof(*be) + be->priv.size);
+ }
+
+ dev_dbg(tplg->dev, "ASoC: Configure %d BE DAIs\n", count);
+ return 0;
+}
+
+
static int soc_tplg_manifest_load(struct soc_tplg *tplg,
- struct snd_soc_tplg_hdr *hdr)
+ struct snd_soc_tplg_hdr *hdr)
{
struct snd_soc_tplg_manifest *manifest;
@@ -1793,6 +1902,8 @@ static int soc_tplg_load_header(struct soc_tplg *tplg,
return soc_tplg_dapm_widget_elems_load(tplg, hdr);
case SND_SOC_TPLG_TYPE_PCM:
return soc_tplg_pcm_elems_load(tplg, hdr);
+ case SND_SOC_TPLG_TYPE_BE_DAI:
+ return soc_tplg_be_dai_elems_load(tplg, hdr);
case SND_SOC_TPLG_TYPE_MANIFEST:
return soc_tplg_manifest_load(tplg, hdr);
default:
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 53dd085d3ee2..393e8f0fe2cc 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -80,7 +80,7 @@ static int dummy_dma_open(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops dummy_dma_ops = {
+static const struct snd_pcm_ops dummy_dma_ops = {
.open = dummy_dma_open,
.ioctl = snd_pcm_lib_ioctl,
};
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
index 488ef4ed8fba..549fac349fa0 100644
--- a/sound/soc/sti/sti_uniperif.c
+++ b/sound/soc/sti/sti_uniperif.c
@@ -19,6 +19,84 @@
#define UNIPERIF_MAX_FRAME_SZ 0x20
#define UNIPERIF_ALLOWED_FRAME_SZ (0x08 | 0x10 | 0x18 | UNIPERIF_MAX_FRAME_SZ)
+struct sti_uniperiph_dev_data {
+ unsigned int id; /* Nb available player instances */
+ unsigned int version; /* player IP version */
+ unsigned int stream;
+ const char *dai_names;
+ enum uniperif_type type;
+};
+
+static const struct sti_uniperiph_dev_data sti_uniplayer_hdmi = {
+ .id = 0,
+ .version = SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0,
+ .stream = SNDRV_PCM_STREAM_PLAYBACK,
+ .dai_names = "Uni Player #0 (HDMI)",
+ .type = SND_ST_UNIPERIF_TYPE_HDMI
+};
+
+static const struct sti_uniperiph_dev_data sti_uniplayer_pcm_out = {
+ .id = 1,
+ .version = SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0,
+ .stream = SNDRV_PCM_STREAM_PLAYBACK,
+ .dai_names = "Uni Player #1 (PCM OUT)",
+ .type = SND_ST_UNIPERIF_TYPE_PCM | SND_ST_UNIPERIF_TYPE_TDM,
+};
+
+static const struct sti_uniperiph_dev_data sti_uniplayer_dac = {
+ .id = 2,
+ .version = SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0,
+ .stream = SNDRV_PCM_STREAM_PLAYBACK,
+ .dai_names = "Uni Player #2 (DAC)",
+ .type = SND_ST_UNIPERIF_TYPE_PCM,
+};
+
+static const struct sti_uniperiph_dev_data sti_uniplayer_spdif = {
+ .id = 3,
+ .version = SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0,
+ .stream = SNDRV_PCM_STREAM_PLAYBACK,
+ .dai_names = "Uni Player #3 (SPDIF)",
+ .type = SND_ST_UNIPERIF_TYPE_SPDIF
+};
+
+static const struct sti_uniperiph_dev_data sti_unireader_pcm_in = {
+ .id = 0,
+ .version = SND_ST_UNIPERIF_VERSION_UNI_RDR_1_0,
+ .stream = SNDRV_PCM_STREAM_CAPTURE,
+ .dai_names = "Uni Reader #0 (PCM IN)",
+ .type = SND_ST_UNIPERIF_TYPE_PCM | SND_ST_UNIPERIF_TYPE_TDM,
+};
+
+static const struct sti_uniperiph_dev_data sti_unireader_hdmi_in = {
+ .id = 1,
+ .version = SND_ST_UNIPERIF_VERSION_UNI_RDR_1_0,
+ .stream = SNDRV_PCM_STREAM_CAPTURE,
+ .dai_names = "Uni Reader #1 (HDMI IN)",
+ .type = SND_ST_UNIPERIF_TYPE_PCM,
+};
+
+static const struct of_device_id snd_soc_sti_match[] = {
+ { .compatible = "st,stih407-uni-player-hdmi",
+ .data = &sti_uniplayer_hdmi
+ },
+ { .compatible = "st,stih407-uni-player-pcm-out",
+ .data = &sti_uniplayer_pcm_out
+ },
+ { .compatible = "st,stih407-uni-player-dac",
+ .data = &sti_uniplayer_dac
+ },
+ { .compatible = "st,stih407-uni-player-spdif",
+ .data = &sti_uniplayer_spdif
+ },
+ { .compatible = "st,stih407-uni-reader-pcm_in",
+ .data = &sti_unireader_pcm_in
+ },
+ { .compatible = "st,stih407-uni-reader-hdmi",
+ .data = &sti_unireader_hdmi_in
+ },
+ {},
+};
+
int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
unsigned int rx_mask, int slots,
int slot_width)
@@ -167,8 +245,8 @@ static int sti_uniperiph_dai_create_ctrl(struct snd_soc_dai *dai)
* Uniperipheral instance ID
*/
ctrl = &uni->snd_ctrls[i];
- ctrl->index = uni->info->id;
- ctrl->device = uni->info->id;
+ ctrl->index = uni->id;
+ ctrl->device = uni->id;
}
return snd_soc_add_dai_controls(dai, uni->snd_ctrls, uni->num_ctrls);
@@ -186,7 +264,7 @@ int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_dmaengine_dai_dma_data *dma_data;
int transfer_size;
- if (uni->info->type == SND_ST_UNIPERIF_TYPE_TDM)
+ if (uni->type == SND_ST_UNIPERIF_TYPE_TDM)
/* transfer size = user frame size (in 32-bits FIFO cell) */
transfer_size = snd_soc_params_to_frame_size(params) / 32;
else
@@ -235,7 +313,7 @@ static int sti_uniperiph_dai_resume(struct snd_soc_dai *dai)
struct uniperif *uni = priv->dai_data.uni;
int ret;
- if (of_device_is_compatible(dai->dev->of_node, "st,sti-uni-player")) {
+ if (priv->dai_data.stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = uni_player_resume(uni);
if (ret)
return ret;
@@ -256,7 +334,7 @@ static int sti_uniperiph_dai_probe(struct snd_soc_dai *dai)
struct sti_uniperiph_dai *dai_data = &priv->dai_data;
/* DMA settings*/
- if (of_device_is_compatible(dai->dev->of_node, "st,sti-uni-player"))
+ if (priv->dai_data.stream == SNDRV_PCM_STREAM_PLAYBACK)
snd_soc_dai_init_dma_data(dai, &dai_data->dma_data, NULL);
else
snd_soc_dai_init_dma_data(dai, NULL, &dai_data->dma_data);
@@ -280,25 +358,32 @@ static const struct snd_soc_component_driver sti_uniperiph_dai_component = {
static int sti_uniperiph_cpu_dai_of(struct device_node *node,
struct sti_uniperiph_data *priv)
{
- const char *str;
- int ret;
struct device *dev = &priv->pdev->dev;
struct sti_uniperiph_dai *dai_data = &priv->dai_data;
struct snd_soc_dai_driver *dai = priv->dai;
struct snd_soc_pcm_stream *stream;
struct uniperif *uni;
+ const struct of_device_id *of_id;
+ const struct sti_uniperiph_dev_data *dev_data;
+ const char *mode;
+
+ /* Populate data structure depending on compatibility */
+ of_id = of_match_node(snd_soc_sti_match, node);
+ if (!of_id->data) {
+ dev_err(dev, "data associated to device is missing");
+ return -EINVAL;
+ }
+ dev_data = (struct sti_uniperiph_dev_data *)of_id->data;
uni = devm_kzalloc(dev, sizeof(*uni), GFP_KERNEL);
if (!uni)
return -ENOMEM;
+ uni->id = dev_data->id;
+ uni->ver = dev_data->version;
+
*dai = sti_uniperiph_dai_template;
- ret = of_property_read_string(node, "dai-name", &str);
- if (ret < 0) {
- dev_err(dev, "%s: dai name missing.\n", __func__);
- return -EINVAL;
- }
- dai->name = str;
+ dai->name = dev_data->dai_names;
/* Get resources */
uni->mem_region = platform_get_resource(priv->pdev, IORESOURCE_MEM, 0);
@@ -322,9 +407,20 @@ static int sti_uniperiph_cpu_dai_of(struct device_node *node,
return -ENXIO;
}
+ uni->type = dev_data->type;
+
+ /* check if player should be configured for tdm */
+ if (dev_data->type & SND_ST_UNIPERIF_TYPE_TDM) {
+ if (!of_property_read_string(node, "st,tdm-mode", &mode))
+ uni->type = SND_ST_UNIPERIF_TYPE_TDM;
+ else
+ uni->type = SND_ST_UNIPERIF_TYPE_PCM;
+ }
+
dai_data->uni = uni;
+ dai_data->stream = dev_data->stream;
- if (of_device_is_compatible(node, "st,sti-uni-player")) {
+ if (priv->dai_data.stream == SNDRV_PCM_STREAM_PLAYBACK) {
uni_player_init(priv->pdev, uni);
stream = &dai->playback;
} else {
@@ -376,12 +472,6 @@ static int sti_uniperiph_probe(struct platform_device *pdev)
&dmaengine_pcm_config, 0);
}
-static const struct of_device_id snd_soc_sti_match[] = {
- { .compatible = "st,sti-uni-player", },
- { .compatible = "st,sti-uni-reader", },
- {},
-};
-
static struct platform_driver sti_uniperiph_driver = {
.driver = {
.name = "sti-uniperiph-dai",
diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h
index eb9933c62ad6..1993c655fb79 100644
--- a/sound/soc/sti/uniperif.h
+++ b/sound/soc/sti/uniperif.h
@@ -1220,16 +1220,16 @@
#define UNIPERIF_FIFO_FRAMES 4 /* FDMA trigger limit in frames */
#define UNIPERIF_TYPE_IS_HDMI(p) \
- ((p)->info->type == SND_ST_UNIPERIF_TYPE_HDMI)
+ ((p)->type == SND_ST_UNIPERIF_TYPE_HDMI)
#define UNIPERIF_TYPE_IS_PCM(p) \
- ((p)->info->type == SND_ST_UNIPERIF_TYPE_PCM)
+ ((p)->type == SND_ST_UNIPERIF_TYPE_PCM)
#define UNIPERIF_TYPE_IS_SPDIF(p) \
- ((p)->info->type == SND_ST_UNIPERIF_TYPE_SPDIF)
+ ((p)->type == SND_ST_UNIPERIF_TYPE_SPDIF)
#define UNIPERIF_TYPE_IS_IEC958(p) \
(UNIPERIF_TYPE_IS_HDMI(p) || \
UNIPERIF_TYPE_IS_SPDIF(p))
#define UNIPERIF_TYPE_IS_TDM(p) \
- ((p)->info->type == SND_ST_UNIPERIF_TYPE_TDM)
+ ((p)->type == SND_ST_UNIPERIF_TYPE_TDM)
/*
* Uniperipheral IP revisions
@@ -1249,11 +1249,11 @@ enum uniperif_version {
};
enum uniperif_type {
- SND_ST_UNIPERIF_TYPE_NONE,
- SND_ST_UNIPERIF_TYPE_HDMI,
- SND_ST_UNIPERIF_TYPE_PCM,
- SND_ST_UNIPERIF_TYPE_SPDIF,
- SND_ST_UNIPERIF_TYPE_TDM
+ SND_ST_UNIPERIF_TYPE_NONE = 0x00,
+ SND_ST_UNIPERIF_TYPE_HDMI = 0x01,
+ SND_ST_UNIPERIF_TYPE_PCM = 0x02,
+ SND_ST_UNIPERIF_TYPE_SPDIF = 0x04,
+ SND_ST_UNIPERIF_TYPE_TDM = 0x08
};
enum uniperif_state {
@@ -1278,12 +1278,6 @@ enum uniperif_word_pos {
WORD_MAX
};
-struct uniperif_info {
- int id; /* instance value of the uniperipheral IP */
- enum uniperif_type type;
- int underflow_enabled; /* Underflow recovery mode */
-};
-
struct uniperif_iec958_settings {
enum uniperif_iec958_encoding_mode encoding_mode;
struct snd_aes_iec958 iec958;
@@ -1298,8 +1292,10 @@ struct dai_tdm_slot {
struct uniperif {
/* System information */
- struct uniperif_info *info;
+ enum uniperif_type type;
+ int underflow_enabled; /* Underflow recovery mode */
struct device *dev;
+ int id; /* instance value of the uniperipheral IP */
int ver; /* IP version, used by register access macros */
struct regmap_field *clk_sel;
struct regmap_field *valid_sel;
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index 1ac2db205a0d..1bc8ebc2528e 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -100,7 +100,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
dev_err(player->dev, "FIFO underflow error detected");
/* Interrupt is just for information when underflow recovery */
- if (player->info->underflow_enabled) {
+ if (player->underflow_enabled) {
/* Update state to underflow */
player->state = UNIPERIF_STATE_UNDERFLOW;
@@ -134,7 +134,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
/* Check for underflow recovery done */
if (unlikely(status & UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(player))) {
- if (!player->info->underflow_enabled) {
+ if (!player->underflow_enabled) {
dev_err(player->dev, "unexpected Underflow recovering");
return -EPERM;
}
@@ -764,7 +764,7 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
}
/* Calculate transfer size (in fifo cells and bytes) for frame count */
- if (player->info->type == SND_ST_UNIPERIF_TYPE_TDM) {
+ if (player->type == SND_ST_UNIPERIF_TYPE_TDM) {
/* transfer size = user frame size (in 32 bits FIFO cell) */
transfer_size =
sti_uniperiph_get_user_frame_size(runtime) / 4;
@@ -794,7 +794,7 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(player, trigger_limit);
/* Uniperipheral setup depends on player type */
- switch (player->info->type) {
+ switch (player->type) {
case SND_ST_UNIPERIF_TYPE_HDMI:
ret = uni_player_prepare_iec958(player, runtime);
break;
@@ -884,7 +884,7 @@ static int uni_player_start(struct uniperif *player)
SET_UNIPERIF_ITM_BSET_FIFO_ERROR(player);
/* Enable underflow recovery interrupts */
- if (player->info->underflow_enabled) {
+ if (player->underflow_enabled) {
SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(player);
SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(player);
}
@@ -893,8 +893,10 @@ static int uni_player_start(struct uniperif *player)
SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
ret = reset_player(player);
- if (ret < 0)
+ if (ret < 0) {
+ clk_disable_unprepare(player->clk);
return ret;
+ }
/*
* Does not use IEC61937 features of the uniperipheral hardware.
@@ -1021,8 +1023,8 @@ static int uni_player_parse_dt_audio_glue(struct platform_device *pdev,
struct reg_field regfield[2] = {
/* PCM_CLK_SEL */
REG_FIELD(SYS_CFG_AUDIO_GLUE,
- 8 + player->info->id,
- 8 + player->info->id),
+ 8 + player->id,
+ 8 + player->id),
/* PCMP_VALID_SEL */
REG_FIELD(SYS_CFG_AUDIO_GLUE, 0, 1)
};
@@ -1040,60 +1042,6 @@ static int uni_player_parse_dt_audio_glue(struct platform_device *pdev,
return 0;
}
-static int uni_player_parse_dt(struct platform_device *pdev,
- struct uniperif *player)
-{
- struct uniperif_info *info;
- struct device *dev = &pdev->dev;
- struct device_node *pnode = pdev->dev.of_node;
- const char *mode;
-
- /* Allocate memory for the info structure */
- info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- if (of_property_read_u32(pnode, "st,version", &player->ver) ||
- player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
- dev_err(dev, "Unknown uniperipheral version ");
- return -EINVAL;
- }
- /* Underflow recovery is only supported on later ip revisions */
- if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
- info->underflow_enabled = 1;
-
- if (of_property_read_u32(pnode, "st,uniperiph-id", &info->id)) {
- dev_err(dev, "uniperipheral id not defined");
- return -EINVAL;
- }
-
- /* Read the device mode property */
- if (of_property_read_string(pnode, "st,mode", &mode)) {
- dev_err(dev, "uniperipheral mode not defined");
- return -EINVAL;
- }
-
- if (strcasecmp(mode, "hdmi") == 0)
- info->type = SND_ST_UNIPERIF_TYPE_HDMI;
- else if (strcasecmp(mode, "pcm") == 0)
- info->type = SND_ST_UNIPERIF_TYPE_PCM;
- else if (strcasecmp(mode, "spdif") == 0)
- info->type = SND_ST_UNIPERIF_TYPE_SPDIF;
- else if (strcasecmp(mode, "tdm") == 0)
- info->type = SND_ST_UNIPERIF_TYPE_TDM;
- else
- info->type = SND_ST_UNIPERIF_TYPE_NONE;
-
- /* Save the info structure */
- player->info = info;
-
- /* Get PCM_CLK_SEL & PCMP_VALID_SEL from audio-glue-ctrl SoC reg */
- if (uni_player_parse_dt_audio_glue(pdev, player))
- return -EINVAL;
-
- return 0;
-}
-
static const struct snd_soc_dai_ops uni_player_dai_ops = {
.startup = uni_player_startup,
.shutdown = uni_player_shutdown,
@@ -1114,13 +1062,18 @@ int uni_player_init(struct platform_device *pdev,
player->state = UNIPERIF_STATE_STOPPED;
player->dai_ops = &uni_player_dai_ops;
- ret = uni_player_parse_dt(pdev, player);
+ /* Get PCM_CLK_SEL & PCMP_VALID_SEL from audio-glue-ctrl SoC reg */
+ ret = uni_player_parse_dt_audio_glue(pdev, player);
if (ret < 0) {
dev_err(player->dev, "Failed to parse DeviceTree");
return ret;
}
+ /* Underflow recovery is only supported on later ip revisions */
+ if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+ player->underflow_enabled = 1;
+
if (UNIPERIF_TYPE_IS_TDM(player))
player->hw = &uni_tdm_hw;
else
@@ -1144,8 +1097,8 @@ int uni_player_init(struct platform_device *pdev,
/* connect to I2S/TDM TX bus */
if (player->valid_sel &&
- (player->info->id == UNIPERIF_PLAYER_I2S_OUT)) {
- ret = regmap_field_write(player->valid_sel, player->info->id);
+ (player->id == UNIPERIF_PLAYER_I2S_OUT)) {
+ ret = regmap_field_write(player->valid_sel, player->id);
if (ret) {
dev_err(player->dev,
"%s: unable to connect to tdm bus", __func__);
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index eb74a328c928..0e1c3ee56675 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -13,6 +13,7 @@
#include "uniperif.h"
+#define UNIPERIF_READER_I2S_IN 0 /* reader id connected to I2S/TDM TX bus */
/*
* Note: snd_pcm_hardware is linked to DMA controller but is declared here to
* integrate unireader capability in term of rate and supported channels
@@ -195,7 +196,7 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
}
/* Calculate transfer size (in fifo cells and bytes) for frame count */
- if (reader->info->type == SND_ST_UNIPERIF_TYPE_TDM) {
+ if (reader->type == SND_ST_UNIPERIF_TYPE_TDM) {
/* transfer size = unip frame size (in 32 bits FIFO cell) */
transfer_size =
sti_uniperiph_get_user_frame_size(runtime) / 4;
@@ -280,7 +281,7 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
SET_UNIPERIF_ITM_BSET_MEM_BLK_READ(reader);
/* Enable underflow recovery interrupts */
- if (reader->info->underflow_enabled) {
+ if (reader->underflow_enabled) {
SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(reader);
SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(reader);
}
@@ -394,41 +395,6 @@ static void uni_reader_shutdown(struct snd_pcm_substream *substream,
}
}
-static int uni_reader_parse_dt(struct platform_device *pdev,
- struct uniperif *reader)
-{
- struct uniperif_info *info;
- struct device_node *node = pdev->dev.of_node;
- const char *mode;
-
- /* Allocate memory for the info structure */
- info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- if (of_property_read_u32(node, "st,version", &reader->ver) ||
- reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
- dev_err(&pdev->dev, "Unknown uniperipheral version ");
- return -EINVAL;
- }
-
- /* Read the device mode property */
- if (of_property_read_string(node, "st,mode", &mode)) {
- dev_err(&pdev->dev, "uniperipheral mode not defined");
- return -EINVAL;
- }
-
- if (strcasecmp(mode, "tdm") == 0)
- info->type = SND_ST_UNIPERIF_TYPE_TDM;
- else
- info->type = SND_ST_UNIPERIF_TYPE_PCM;
-
- /* Save the info structure */
- reader->info = info;
-
- return 0;
-}
-
static const struct snd_soc_dai_ops uni_reader_dai_ops = {
.startup = uni_reader_startup,
.shutdown = uni_reader_shutdown,
@@ -448,12 +414,6 @@ int uni_reader_init(struct platform_device *pdev,
reader->state = UNIPERIF_STATE_STOPPED;
reader->dai_ops = &uni_reader_dai_ops;
- ret = uni_reader_parse_dt(pdev, reader);
- if (ret < 0) {
- dev_err(reader->dev, "Failed to parse DeviceTree");
- return ret;
- }
-
if (UNIPERIF_TYPE_IS_TDM(reader))
reader->hw = &uni_tdm_hw;
else
diff --git a/sound/soc/sunxi/Kconfig b/sound/soc/sunxi/Kconfig
index 2a954bd01fd8..dd2368297fd3 100644
--- a/sound/soc/sunxi/Kconfig
+++ b/sound/soc/sunxi/Kconfig
@@ -1,4 +1,5 @@
menu "Allwinner SoC Audio support"
+ depends on ARCH_SUNXI || COMPILE_TEST
config SND_SUN4I_CODEC
tristate "Allwinner A10 Codec Support"
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 44f170c73b06..e047ec06d538 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -96,8 +96,8 @@
/* Other various ADC registers */
#define SUN4I_CODEC_DAC_TXCNT (0x30)
#define SUN4I_CODEC_ADC_RXCNT (0x34)
-#define SUN4I_CODEC_AC_SYS_VERI (0x38)
-#define SUN4I_CODEC_AC_MIC_PHONE_CAL (0x3c)
+#define SUN7I_CODEC_AC_DAC_CAL (0x38)
+#define SUN7I_CODEC_AC_MIC_PHONE_CAL (0x3c)
struct sun4i_codec {
struct device *dev;
@@ -509,7 +509,7 @@ static const struct snd_kcontrol_new sun4i_codec_pa_mute =
static DECLARE_TLV_DB_SCALE(sun4i_codec_pa_volume_scale, -6300, 100, 1);
-static const struct snd_kcontrol_new sun4i_codec_widgets[] = {
+static const struct snd_kcontrol_new sun4i_codec_controls[] = {
SOC_SINGLE_TLV("Power Amplifier Volume", SUN4I_CODEC_DAC_ACTL,
SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0,
sun4i_codec_pa_volume_scale),
@@ -628,12 +628,14 @@ static const struct snd_soc_dapm_route sun4i_codec_codec_dapm_routes[] = {
};
static struct snd_soc_codec_driver sun4i_codec_codec = {
- .controls = sun4i_codec_widgets,
- .num_controls = ARRAY_SIZE(sun4i_codec_widgets),
- .dapm_widgets = sun4i_codec_codec_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(sun4i_codec_codec_dapm_widgets),
- .dapm_routes = sun4i_codec_codec_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(sun4i_codec_codec_dapm_routes),
+ .component_driver = {
+ .controls = sun4i_codec_controls,
+ .num_controls = ARRAY_SIZE(sun4i_codec_controls),
+ .dapm_widgets = sun4i_codec_codec_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sun4i_codec_codec_dapm_widgets),
+ .dapm_routes = sun4i_codec_codec_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(sun4i_codec_codec_dapm_routes),
+ },
};
static const struct snd_soc_component_driver sun4i_codec_component = {
@@ -680,12 +682,37 @@ static const struct regmap_config sun4i_codec_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = SUN4I_CODEC_AC_MIC_PHONE_CAL,
+ .max_register = SUN4I_CODEC_ADC_RXCNT,
+};
+
+static const struct regmap_config sun7i_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = SUN7I_CODEC_AC_MIC_PHONE_CAL,
+};
+
+struct sun4i_codec_quirks {
+ const struct regmap_config *regmap_config;
+};
+
+static const struct sun4i_codec_quirks sun4i_codec_quirks = {
+ .regmap_config = &sun4i_codec_regmap_config,
+};
+
+static const struct sun4i_codec_quirks sun7i_codec_quirks = {
+ .regmap_config = &sun7i_codec_regmap_config,
};
static const struct of_device_id sun4i_codec_of_match[] = {
- { .compatible = "allwinner,sun4i-a10-codec" },
- { .compatible = "allwinner,sun7i-a20-codec" },
+ {
+ .compatible = "allwinner,sun4i-a10-codec",
+ .data = &sun4i_codec_quirks,
+ },
+ {
+ .compatible = "allwinner,sun7i-a20-codec",
+ .data = &sun7i_codec_quirks,
+ },
{}
};
MODULE_DEVICE_TABLE(of, sun4i_codec_of_match);
@@ -758,6 +785,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
{
struct snd_soc_card *card;
struct sun4i_codec *scodec;
+ const struct sun4i_codec_quirks *quirks;
struct resource *res;
void __iomem *base;
int ret;
@@ -775,8 +803,14 @@ static int sun4i_codec_probe(struct platform_device *pdev)
return PTR_ERR(base);
}
+ quirks = of_device_get_match_data(&pdev->dev);
+ if (quirks == NULL) {
+ dev_err(&pdev->dev, "Failed to determine the quirks to use\n");
+ return -ENODEV;
+ }
+
scodec->regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &sun4i_codec_regmap_config);
+ quirks->regmap_config);
if (IS_ERR(scodec->regmap)) {
dev_err(&pdev->dev, "Failed to create our regmap\n");
return PTR_ERR(scodec->regmap);
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
index 0b04fb02125c..88fbb3a1e660 100644
--- a/sound/soc/sunxi/sun4i-spdif.c
+++ b/sound/soc/sunxi/sun4i-spdif.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <sound/dmaengine_pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -162,6 +163,7 @@ struct sun4i_spdif_dev {
struct platform_device *pdev;
struct clk *spdif_clk;
struct clk *apb_clk;
+ struct reset_control *rst;
struct snd_soc_dai_driver cpu_dai_drv;
struct regmap *regmap;
struct snd_dmaengine_dai_dma_data dma_params_tx;
@@ -411,6 +413,7 @@ static const struct snd_soc_dapm_route dit_routes[] = {
static const struct of_device_id sun4i_spdif_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-spdif", },
+ { .compatible = "allwinner,sun6i-a31-spdif", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun4i_spdif_of_match);
@@ -482,11 +485,23 @@ static int sun4i_spdif_probe(struct platform_device *pdev)
}
host->dma_params_tx.addr = res->start + SUN4I_SPDIF_TXFIFO;
- host->dma_params_tx.maxburst = 4;
+ host->dma_params_tx.maxburst = 8;
host->dma_params_tx.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
platform_set_drvdata(pdev, host);
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun6i-a31-spdif")) {
+ host->rst = devm_reset_control_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(host->rst) && PTR_ERR(host->rst) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ dev_err(&pdev->dev, "Failed to get reset: %d\n", ret);
+ goto err_disable_apb_clk;
+ }
+ if (!IS_ERR(host->rst))
+ reset_control_deassert(host->rst);
+ }
+
ret = devm_snd_soc_register_component(&pdev->dev,
&sun4i_spdif_component, &sun4i_spdif_dai, 1);
if (ret)
diff --git a/sound/soc/tegra/Kconfig b/sound/soc/tegra/Kconfig
index a6768f832c6f..efbe8d4c019e 100644
--- a/sound/soc/tegra/Kconfig
+++ b/sound/soc/tegra/Kconfig
@@ -138,3 +138,14 @@ config SND_SOC_TEGRA_RT5677
help
Say Y or M here if you want to add support for SoC audio on Tegra
boards using the RT5677 codec, such as Ryu.
+
+config SND_SOC_TEGRA_SGTL5000
+ tristate "SoC Audio support for Tegra boards using a SGTL5000 codec"
+ depends on SND_SOC_TEGRA && I2C && GPIOLIB
+ select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
+ select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
+ select SND_SOC_SGTL5000
+ help
+ Say Y or M here if you want to add support for SoC audio on Tegra
+ boards using the SGTL5000 codec, such as Apalis T30, Apalis TK1 or
+ Colibri T30.
diff --git a/sound/soc/tegra/Makefile b/sound/soc/tegra/Makefile
index 9171655ad843..f214a3fd0024 100644
--- a/sound/soc/tegra/Makefile
+++ b/sound/soc/tegra/Makefile
@@ -26,6 +26,7 @@ snd-soc-tegra-wm9712-objs := tegra_wm9712.o
snd-soc-tegra-trimslice-objs := trimslice.o
snd-soc-tegra-alc5632-objs := tegra_alc5632.o
snd-soc-tegra-max98090-objs := tegra_max98090.o
+snd-soc-tegra-sgtl5000-objs := tegra_sgtl5000.o
obj-$(CONFIG_SND_SOC_TEGRA_RT5640) += snd-soc-tegra-rt5640.o
obj-$(CONFIG_SND_SOC_TEGRA_RT5677) += snd-soc-tegra-rt5677.o
@@ -35,3 +36,4 @@ obj-$(CONFIG_SND_SOC_TEGRA_WM9712) += snd-soc-tegra-wm9712.o
obj-$(CONFIG_SND_SOC_TEGRA_TRIMSLICE) += snd-soc-tegra-trimslice.o
obj-$(CONFIG_SND_SOC_TEGRA_ALC5632) += snd-soc-tegra-alc5632.o
obj-$(CONFIG_SND_SOC_TEGRA_MAX98090) += snd-soc-tegra-max98090.o
+obj-$(CONFIG_SND_SOC_TEGRA_SGTL5000) += snd-soc-tegra-sgtl5000.o \ No newline at end of file
diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
index 773daecaa5e8..e5ef4e9c4ac5 100644
--- a/sound/soc/tegra/tegra_rt5640.c
+++ b/sound/soc/tegra/tegra_rt5640.c
@@ -1,5 +1,5 @@
/*
-* tegra_rt5640.c - Tegra machine ASoC driver for boards using WM8903 codec.
+* tegra_rt5640.c - Tegra machine ASoC driver for boards using RT5640 codec.
*
* Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
*
diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
new file mode 100644
index 000000000000..1e76869dd488
--- /dev/null
+++ b/sound/soc/tegra/tegra_sgtl5000.c
@@ -0,0 +1,212 @@
+/*
+ * tegra_sgtl5000.c - Tegra machine ASoC driver for boards using SGTL5000 codec
+ *
+ * Author: Marcel Ziswiler <marcel@ziswiler.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Based on code copyright/by:
+ *
+ * Copyright (C) 2010-2012 - NVIDIA, Inc.
+ * (c) 2009, 2010 Nvidia Graphics Pvt. Ltd.
+ * Copyright 2007 Wolfson Microelectronics PLC.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "../codecs/sgtl5000.h"
+
+#include "tegra_asoc_utils.h"
+
+#define DRV_NAME "tegra-snd-sgtl5000"
+
+struct tegra_sgtl5000 {
+ struct tegra_asoc_utils_data util_data;
+};
+
+static int tegra_sgtl5000_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_card *card = rtd->card;
+ struct tegra_sgtl5000 *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk;
+ int err;
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ default:
+ mclk = 12288000;
+ break;
+ }
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_sysclk(codec_dai, SGTL5000_SYSCLK, mclk,
+ SND_SOC_CLOCK_IN);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops tegra_sgtl5000_ops = {
+ .hw_params = tegra_sgtl5000_hw_params,
+};
+
+static const struct snd_soc_dapm_widget tegra_sgtl5000_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_LINE("Line In Jack", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+};
+
+static struct snd_soc_dai_link tegra_sgtl5000_dai = {
+ .name = "sgtl5000",
+ .stream_name = "HiFi",
+ .codec_dai_name = "sgtl5000",
+ .ops = &tegra_sgtl5000_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+};
+
+static struct snd_soc_card snd_soc_tegra_sgtl5000 = {
+ .name = "tegra-sgtl5000",
+ .owner = THIS_MODULE,
+ .dai_link = &tegra_sgtl5000_dai,
+ .num_links = 1,
+ .dapm_widgets = tegra_sgtl5000_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tegra_sgtl5000_dapm_widgets),
+ .fully_routed = true,
+};
+
+static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct snd_soc_card *card = &snd_soc_tegra_sgtl5000;
+ struct tegra_sgtl5000 *machine;
+ int ret;
+
+ machine = devm_kzalloc(&pdev->dev, sizeof(struct tegra_sgtl5000),
+ GFP_KERNEL);
+ if (!machine) {
+ dev_err(&pdev->dev, "Can't allocate tegra_sgtl5000 struct\n");
+ return -ENOMEM;
+ }
+
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, machine);
+
+ ret = snd_soc_of_parse_card_name(card, "nvidia,model");
+ if (ret)
+ goto err;
+
+ ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
+ if (ret)
+ goto err;
+
+ tegra_sgtl5000_dai.codec_of_node = of_parse_phandle(np,
+ "nvidia,audio-codec", 0);
+ if (!tegra_sgtl5000_dai.codec_of_node) {
+ dev_err(&pdev->dev,
+ "Property 'nvidia,audio-codec' missing or invalid\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ tegra_sgtl5000_dai.cpu_of_node = of_parse_phandle(np,
+ "nvidia,i2s-controller", 0);
+ if (!tegra_sgtl5000_dai.cpu_of_node) {
+ dev_err(&pdev->dev,
+ "Property 'nvidia,i2s-controller' missing/invalid\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ tegra_sgtl5000_dai.platform_of_node = tegra_sgtl5000_dai.cpu_of_node;
+
+ ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
+ if (ret)
+ goto err;
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+ ret);
+ goto err_fini_utils;
+ }
+
+ return 0;
+
+err_fini_utils:
+ tegra_asoc_utils_fini(&machine->util_data);
+err:
+ return ret;
+}
+
+static int tegra_sgtl5000_driver_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct tegra_sgtl5000 *machine = snd_soc_card_get_drvdata(card);
+ int ret;
+
+ ret = snd_soc_unregister_card(card);
+
+ tegra_asoc_utils_fini(&machine->util_data);
+
+ return ret;
+}
+
+static const struct of_device_id tegra_sgtl5000_of_match[] = {
+ { .compatible = "nvidia,tegra-audio-sgtl5000", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver tegra_sgtl5000_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &snd_soc_pm_ops,
+ .of_match_table = tegra_sgtl5000_of_match,
+ },
+ .probe = tegra_sgtl5000_driver_probe,
+ .remove = tegra_sgtl5000_driver_remove,
+};
+module_platform_driver(tegra_sgtl5000_driver);
+
+MODULE_AUTHOR("Marcel Ziswiler <marcel@ziswiler.com>");
+MODULE_DESCRIPTION("Tegra SGTL5000 machine ASoC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DEVICE_TABLE(of, tegra_sgtl5000_of_match);
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index 6d5698b25bd4..b343efd9be5b 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -187,7 +187,7 @@ static int setup_clocking(struct snd_soc_dai *dai,
default:
dev_err(dai->dev,
- "%s: Error: Unsopported inversion (fmt = 0x%x)!\n",
+ "%s: Error: Unsupported inversion (fmt = 0x%x)!\n",
__func__, fmt);
return -EINVAL;
@@ -218,7 +218,7 @@ static int setup_clocking(struct snd_soc_dai *dai,
break;
default:
- dev_err(dai->dev, "%s: Error: Unsopported master (fmt = 0x%x)!\n",
+ dev_err(dai->dev, "%s: Error: Unsupported master (fmt = 0x%x)!\n",
__func__, fmt);
return -EINVAL;
@@ -374,7 +374,7 @@ static int setup_msp_config(struct snd_pcm_substream *substream,
break;
default:
- dev_err(dai->dev, "%s: Error: Unsopported format (%d)!\n",
+ dev_err(dai->dev, "%s: Error: Unsupported format (%d)!\n",
__func__, fmt);
return -EINVAL;
}
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 327f8642ca80..8f66ba730d69 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -739,7 +739,6 @@ static struct urb **alloc_urbs(struct snd_usb_caiaqdev *cdev, int dir, int *ret)
for (i = 0; i < N_URBS; i++) {
urbs[i] = usb_alloc_urb(FRAMES_PER_URB, GFP_KERNEL);
if (!urbs[i]) {
- dev_err(dev, "unable to usb_alloc_urb(), OOM!?\n");
*ret = -ENOMEM;
return urbs;
}
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 71778ca4b26a..111b0f009afa 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -92,7 +92,7 @@ struct snd_usb_endpoint {
unsigned int curframesize; /* current packet size in frames (for capture) */
unsigned int syncmaxsize; /* sync endpoint packet size */
unsigned int fill_max:1; /* fill max packet size always */
- unsigned int udh01_fb_quirk:1; /* corrupted feedback data */
+ unsigned int tenor_fb_quirk:1; /* corrupted feedback data */
unsigned int datainterval; /* log_2 of data packet interval */
unsigned int syncinterval; /* P for adaptive mode, 0 otherwise */
unsigned char silence_value;
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index c07a7eda42a2..c470251cea4b 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -502,10 +502,6 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
ep->syncinterval = 3;
ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
-
- if (chip->usb_id == USB_ID(0x0644, 0x8038) /* TEAC UD-H01 */ &&
- ep->syncmaxsize == 4)
- ep->udh01_fb_quirk = 1;
}
list_add_tail(&ep->list, &chip->ep_list);
@@ -1171,15 +1167,16 @@ void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
if (f == 0)
return;
- if (unlikely(sender->udh01_fb_quirk)) {
+ if (unlikely(sender->tenor_fb_quirk)) {
/*
- * The TEAC UD-H01 firmware sometimes changes the feedback value
+ * Devices based on Tenor 8802 chipsets (TEAC UD-H01
+ * and others) sometimes change the feedback value
* by +/- 0x1.0000.
*/
if (f < ep->freqn - 0x8000)
- f += 0x10000;
+ f += 0xf000;
else if (f > ep->freqn + 0x8000)
- f -= 0x10000;
+ f -= 0xf000;
} else if (unlikely(ep->freqshift == INT_MIN)) {
/*
* The first time we see a feedback value, determine its format
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 789d19ec035d..2c44386e5569 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -176,10 +176,8 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
int r, idx;
fp->rate_table = kmalloc(sizeof(int) * nr_rates, GFP_KERNEL);
- if (fp->rate_table == NULL) {
- usb_audio_err(chip, "cannot malloc\n");
+ if (fp->rate_table == NULL)
return -ENOMEM;
- }
fp->nr_rates = 0;
fp->rate_min = fp->rate_max = 0;
diff --git a/sound/usb/line6/Kconfig b/sound/usb/line6/Kconfig
index f4585d378ef3..39b400392d71 100644
--- a/sound/usb/line6/Kconfig
+++ b/sound/usb/line6/Kconfig
@@ -2,6 +2,7 @@ config SND_USB_LINE6
tristate
select SND_RAWMIDI
select SND_PCM
+ select SND_HWDEP
config SND_USB_POD
tristate "Line 6 POD USB support"
@@ -21,10 +22,10 @@ config SND_USB_POD
re-amping)
config SND_USB_PODHD
- tristate "Line 6 POD HD300/400/500 USB support"
+ tristate "Line 6 POD X3/HD300/400/500 USB support"
select SND_USB_LINE6
help
- This is a driver for POD HD300, 400 and 500 devices.
+ This is a driver for POD X3, HD300, 400 and 500 devices.
config SND_USB_TONEPORT
tristate "TonePort GX, UX1 and UX2 USB support"
diff --git a/sound/usb/line6/capture.c b/sound/usb/line6/capture.c
index f518fbbe88de..7c812565f90d 100644
--- a/sound/usb/line6/capture.c
+++ b/sound/usb/line6/capture.c
@@ -29,10 +29,10 @@ static int submit_audio_in_urb(struct snd_line6_pcm *line6pcm)
int ret;
struct urb *urb_in;
- index =
- find_first_zero_bit(&line6pcm->in.active_urbs, LINE6_ISO_BUFFERS);
+ index = find_first_zero_bit(&line6pcm->in.active_urbs,
+ line6pcm->line6->iso_buffers);
- if (index < 0 || index >= LINE6_ISO_BUFFERS) {
+ if (index < 0 || index >= line6pcm->line6->iso_buffers) {
dev_err(line6pcm->line6->ifcdev, "no free URB found\n");
return -EINVAL;
}
@@ -44,13 +44,13 @@ static int submit_audio_in_urb(struct snd_line6_pcm *line6pcm)
struct usb_iso_packet_descriptor *fin =
&urb_in->iso_frame_desc[i];
fin->offset = urb_size;
- fin->length = line6pcm->max_packet_size;
- urb_size += line6pcm->max_packet_size;
+ fin->length = line6pcm->max_packet_size_in;
+ urb_size += line6pcm->max_packet_size_in;
}
urb_in->transfer_buffer =
line6pcm->in.buffer +
- index * LINE6_ISO_PACKETS * line6pcm->max_packet_size;
+ index * LINE6_ISO_PACKETS * line6pcm->max_packet_size_in;
urb_in->transfer_buffer_length = urb_size;
urb_in->context = line6pcm;
@@ -73,7 +73,7 @@ int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm)
{
int ret = 0, i;
- for (i = 0; i < LINE6_ISO_BUFFERS; ++i) {
+ for (i = 0; i < line6pcm->line6->iso_buffers; ++i) {
ret = submit_audio_in_urb(line6pcm);
if (ret < 0)
break;
@@ -90,7 +90,9 @@ void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf, int fsize)
struct snd_pcm_substream *substream =
get_substream(line6pcm, SNDRV_PCM_STREAM_CAPTURE);
struct snd_pcm_runtime *runtime = substream->runtime;
- const int bytes_per_frame = line6pcm->properties->bytes_per_frame;
+ const int bytes_per_frame =
+ line6pcm->properties->bytes_per_channel *
+ line6pcm->properties->capture_hw.channels_max;
int frames = fsize / bytes_per_frame;
if (runtime == NULL)
@@ -154,7 +156,7 @@ static void audio_in_callback(struct urb *urb)
line6pcm->in.last_frame = urb->start_frame;
/* find index of URB */
- for (index = 0; index < LINE6_ISO_BUFFERS; ++index)
+ for (index = 0; index < line6pcm->line6->iso_buffers; ++index)
if (urb == line6pcm->in.urbs[index])
break;
@@ -173,17 +175,27 @@ static void audio_in_callback(struct urb *urb)
fbuf = urb->transfer_buffer + fin->offset;
fsize = fin->actual_length;
- if (fsize > line6pcm->max_packet_size) {
+ if (fsize > line6pcm->max_packet_size_in) {
dev_err(line6pcm->line6->ifcdev,
"driver and/or device bug: packet too large (%d > %d)\n",
- fsize, line6pcm->max_packet_size);
+ fsize, line6pcm->max_packet_size_in);
}
length += fsize;
- /* the following assumes LINE6_ISO_PACKETS == 1: */
+ BUILD_BUG_ON_MSG(LINE6_ISO_PACKETS != 1,
+ "The following code assumes LINE6_ISO_PACKETS == 1");
+ /* TODO:
+ * Also, if iso_buffers != 2, the prev frame is almost random at
+ * playback side.
+ * This needs to be redesigned. It should be "stable", but we may
+ * experience sync problems on such high-speed configs.
+ */
+
line6pcm->prev_fbuf = fbuf;
- line6pcm->prev_fsize = fsize;
+ line6pcm->prev_fsize = fsize /
+ (line6pcm->properties->bytes_per_channel *
+ line6pcm->properties->capture_hw.channels_max);
if (!test_bit(LINE6_STREAM_IMPULSE, &line6pcm->in.running) &&
test_bit(LINE6_STREAM_PCM, &line6pcm->in.running) &&
@@ -220,6 +232,8 @@ static int snd_line6_capture_open(struct snd_pcm_substream *substream)
if (err < 0)
return err;
+ line6_pcm_acquire(line6pcm, LINE6_STREAM_CAPTURE_HELPER, false);
+
runtime->hw = line6pcm->properties->capture_hw;
return 0;
}
@@ -227,6 +241,9 @@ static int snd_line6_capture_open(struct snd_pcm_substream *substream)
/* close capture callback */
static int snd_line6_capture_close(struct snd_pcm_substream *substream)
{
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+
+ line6_pcm_release(line6pcm, LINE6_STREAM_CAPTURE_HELPER);
return 0;
}
@@ -247,8 +264,13 @@ int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm)
struct usb_line6 *line6 = line6pcm->line6;
int i;
+ line6pcm->in.urbs = kzalloc(
+ sizeof(struct urb *) * line6->iso_buffers, GFP_KERNEL);
+ if (line6pcm->in.urbs == NULL)
+ return -ENOMEM;
+
/* create audio URBs and fill in constant values: */
- for (i = 0; i < LINE6_ISO_BUFFERS; ++i) {
+ for (i = 0; i < line6->iso_buffers; ++i) {
struct urb *urb;
/* URB for audio in: */
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 81b7da8e56d3..14e587e70655 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -17,6 +17,7 @@
#include <sound/core.h>
#include <sound/initval.h>
+#include <sound/hwdep.h>
#include "capture.h"
#include "driver.h"
@@ -29,7 +30,7 @@
/*
This is Line 6's MIDI manufacturer ID.
*/
-const unsigned char line6_midi_id[] = {
+const unsigned char line6_midi_id[3] = {
0x00, 0x01, 0x0c
};
EXPORT_SYMBOL_GPL(line6_midi_id);
@@ -66,10 +67,17 @@ static int line6_start_listen(struct usb_line6 *line6)
{
int err;
- usb_fill_int_urb(line6->urb_listen, line6->usbdev,
- usb_rcvintpipe(line6->usbdev, line6->properties->ep_ctrl_r),
- line6->buffer_listen, LINE6_BUFSIZE_LISTEN,
- line6_data_received, line6, line6->interval);
+ if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) {
+ usb_fill_int_urb(line6->urb_listen, line6->usbdev,
+ usb_rcvintpipe(line6->usbdev, line6->properties->ep_ctrl_r),
+ line6->buffer_listen, LINE6_BUFSIZE_LISTEN,
+ line6_data_received, line6, line6->interval);
+ } else {
+ usb_fill_bulk_urb(line6->urb_listen, line6->usbdev,
+ usb_rcvbulkpipe(line6->usbdev, line6->properties->ep_ctrl_r),
+ line6->buffer_listen, LINE6_BUFSIZE_LISTEN,
+ line6_data_received, line6);
+ }
line6->urb_listen->actual_length = 0;
err = usb_submit_urb(line6->urb_listen, GFP_ATOMIC);
return err;
@@ -90,6 +98,7 @@ static int line6_send_raw_message(struct usb_line6 *line6, const char *buffer,
int size)
{
int i, done = 0;
+ const struct line6_properties *properties = line6->properties;
for (i = 0; i < size; i += line6->max_packet_size) {
int partial;
@@ -97,15 +106,21 @@ static int line6_send_raw_message(struct usb_line6 *line6, const char *buffer,
int frag_size = min(line6->max_packet_size, size - i);
int retval;
- retval = usb_interrupt_msg(line6->usbdev,
- usb_sndintpipe(line6->usbdev,
- line6->properties->ep_ctrl_w),
- (char *)frag_buf, frag_size,
- &partial, LINE6_TIMEOUT * HZ);
+ if (properties->capabilities & LINE6_CAP_CONTROL_MIDI) {
+ retval = usb_interrupt_msg(line6->usbdev,
+ usb_sndintpipe(line6->usbdev, properties->ep_ctrl_w),
+ (char *)frag_buf, frag_size,
+ &partial, LINE6_TIMEOUT * HZ);
+ } else {
+ retval = usb_bulk_msg(line6->usbdev,
+ usb_sndbulkpipe(line6->usbdev, properties->ep_ctrl_w),
+ (char *)frag_buf, frag_size,
+ &partial, LINE6_TIMEOUT * HZ);
+ }
if (retval) {
dev_err(line6->ifcdev,
- "usb_interrupt_msg failed (%d)\n", retval);
+ "usb_bulk_msg failed (%d)\n", retval);
break;
}
@@ -140,10 +155,17 @@ static int line6_send_raw_message_async_part(struct message *msg,
int done = msg->done;
int bytes = min(msg->size - done, line6->max_packet_size);
- usb_fill_int_urb(urb, line6->usbdev,
- usb_sndintpipe(line6->usbdev, line6->properties->ep_ctrl_w),
- (char *)msg->buffer + done, bytes,
- line6_async_request_sent, msg, line6->interval);
+ if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) {
+ usb_fill_int_urb(urb, line6->usbdev,
+ usb_sndintpipe(line6->usbdev, line6->properties->ep_ctrl_w),
+ (char *)msg->buffer + done, bytes,
+ line6_async_request_sent, msg, line6->interval);
+ } else {
+ usb_fill_bulk_urb(urb, line6->usbdev,
+ usb_sndbulkpipe(line6->usbdev, line6->properties->ep_ctrl_w),
+ (char *)msg->buffer + done, bytes,
+ line6_async_request_sent, msg);
+ }
msg->done += bytes;
retval = usb_submit_urb(urb, GFP_ATOMIC);
@@ -269,28 +291,36 @@ static void line6_data_received(struct urb *urb)
if (urb->status == -ESHUTDOWN)
return;
- done =
- line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length);
+ if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) {
+ done =
+ line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length);
- if (done < urb->actual_length) {
- line6_midibuf_ignore(mb, done);
- dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n",
- done, urb->actual_length);
- }
+ if (done < urb->actual_length) {
+ line6_midibuf_ignore(mb, done);
+ dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n",
+ done, urb->actual_length);
+ }
- for (;;) {
- done =
- line6_midibuf_read(mb, line6->buffer_message,
- LINE6_MESSAGE_MAXLEN);
+ for (;;) {
+ done =
+ line6_midibuf_read(mb, line6->buffer_message,
+ LINE6_MIDI_MESSAGE_MAXLEN);
- if (done == 0)
- break;
+ if (done == 0)
+ break;
- line6->message_length = done;
- line6_midi_receive(line6, line6->buffer_message, done);
+ line6->message_length = done;
+ line6_midi_receive(line6, line6->buffer_message, done);
+ if (line6->process_message)
+ line6->process_message(line6);
+ }
+ } else {
+ line6->buffer_message = urb->transfer_buffer;
+ line6->message_length = urb->actual_length;
if (line6->process_message)
line6->process_message(line6);
+ line6->buffer_message = NULL;
}
line6_start_listen(line6);
@@ -447,12 +477,16 @@ static void line6_destruct(struct snd_card *card)
struct usb_line6 *line6 = card->private_data;
struct usb_device *usbdev = line6->usbdev;
- /* free buffer memory first: */
+ /* Free buffer memory first. We cannot depend on the existence of private
+ * data from the (podhd) module, it may be gone already during this call
+ */
kfree(line6->buffer_message);
+
kfree(line6->buffer_listen);
/* then free URBs: */
usb_free_urb(line6->urb_listen);
+ line6->urb_listen = NULL;
/* decrement reference counters: */
usb_put_dev(usbdev);
@@ -462,13 +496,29 @@ static void line6_destruct(struct snd_card *card)
static void line6_get_interval(struct usb_line6 *line6)
{
struct usb_device *usbdev = line6->usbdev;
+ const struct line6_properties *properties = line6->properties;
+ int pipe;
struct usb_host_endpoint *ep;
- unsigned pipe = usb_rcvintpipe(usbdev, line6->properties->ep_ctrl_r);
- unsigned epnum = usb_pipeendpoint(pipe);
- ep = usbdev->ep_in[epnum];
+ if (properties->capabilities & LINE6_CAP_CONTROL_MIDI) {
+ pipe =
+ usb_rcvintpipe(line6->usbdev, line6->properties->ep_ctrl_r);
+ } else {
+ pipe =
+ usb_rcvbulkpipe(line6->usbdev, line6->properties->ep_ctrl_r);
+ }
+ ep = usbdev->ep_in[usb_pipeendpoint(pipe)];
+
if (ep) {
line6->interval = ep->desc.bInterval;
+ if (usbdev->speed == USB_SPEED_LOW) {
+ line6->intervals_per_second = USB_LOW_INTERVALS_PER_SECOND;
+ line6->iso_buffers = USB_LOW_ISO_BUFFERS;
+ } else {
+ line6->intervals_per_second = USB_HIGH_INTERVALS_PER_SECOND;
+ line6->iso_buffers = USB_HIGH_ISO_BUFFERS;
+ }
+
line6->max_packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
} else {
dev_err(line6->ifcdev,
@@ -478,6 +528,138 @@ static void line6_get_interval(struct usb_line6 *line6)
}
}
+
+/* Enable buffering of incoming messages, flush the buffer */
+static int line6_hwdep_open(struct snd_hwdep *hw, struct file *file)
+{
+ struct usb_line6 *line6 = hw->private_data;
+
+ /* NOTE: hwdep layer provides atomicity here */
+
+ line6->messages.active = 1;
+
+ return 0;
+}
+
+/* Stop buffering */
+static int line6_hwdep_release(struct snd_hwdep *hw, struct file *file)
+{
+ struct usb_line6 *line6 = hw->private_data;
+
+ line6->messages.active = 0;
+
+ return 0;
+}
+
+/* Read from circular buffer, return to user */
+static long
+line6_hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
+ loff_t *offset)
+{
+ struct usb_line6 *line6 = hwdep->private_data;
+ long rv = 0;
+ unsigned int out_count;
+
+ if (mutex_lock_interruptible(&line6->messages.read_lock))
+ return -ERESTARTSYS;
+
+ while (kfifo_len(&line6->messages.fifo) == 0) {
+ mutex_unlock(&line6->messages.read_lock);
+
+ rv = wait_event_interruptible(
+ line6->messages.wait_queue,
+ kfifo_len(&line6->messages.fifo) != 0);
+ if (rv < 0)
+ return rv;
+
+ if (mutex_lock_interruptible(&line6->messages.read_lock))
+ return -ERESTARTSYS;
+ }
+
+ if (kfifo_peek_len(&line6->messages.fifo) > count) {
+ /* Buffer too small; allow re-read of the current item... */
+ rv = -EINVAL;
+ } else {
+ rv = kfifo_to_user(&line6->messages.fifo, buf, count, &out_count);
+ if (rv == 0)
+ rv = out_count;
+ }
+
+ mutex_unlock(&line6->messages.read_lock);
+ return rv;
+}
+
+/* Write directly (no buffering) to device by user*/
+static long
+line6_hwdep_write(struct snd_hwdep *hwdep, const char __user *data, long count,
+ loff_t *offset)
+{
+ struct usb_line6 *line6 = hwdep->private_data;
+ int rv;
+ char *data_copy;
+
+ if (count > line6->max_packet_size * LINE6_RAW_MESSAGES_MAXCOUNT) {
+ /* This is an arbitrary limit - still better than nothing... */
+ return -EINVAL;
+ }
+
+ data_copy = memdup_user(data, count);
+ if (IS_ERR(ERR_PTR))
+ return -ENOMEM;
+
+ rv = line6_send_raw_message(line6, data_copy, count);
+
+ kfree(data_copy);
+ return rv;
+}
+
+static const struct snd_hwdep_ops hwdep_ops = {
+ .open = line6_hwdep_open,
+ .release = line6_hwdep_release,
+ .read = line6_hwdep_read,
+ .write = line6_hwdep_write,
+};
+
+/* Insert into circular buffer */
+static void line6_hwdep_push_message(struct usb_line6 *line6)
+{
+ if (!line6->messages.active)
+ return;
+
+ if (kfifo_avail(&line6->messages.fifo) >= line6->message_length) {
+ /* No race condition here, there's only one writer */
+ kfifo_in(&line6->messages.fifo,
+ line6->buffer_message, line6->message_length);
+ } /* else TODO: signal overflow */
+
+ wake_up_interruptible(&line6->messages.wait_queue);
+}
+
+static int line6_hwdep_init(struct usb_line6 *line6)
+{
+ int err;
+ struct snd_hwdep *hwdep;
+
+ /* TODO: usb_driver_claim_interface(); */
+ line6->process_message = line6_hwdep_push_message;
+ line6->messages.active = 0;
+ init_waitqueue_head(&line6->messages.wait_queue);
+ mutex_init(&line6->messages.read_lock);
+ INIT_KFIFO(line6->messages.fifo);
+
+ err = snd_hwdep_new(line6->card, "config", 0, &hwdep);
+ if (err < 0)
+ goto end;
+ strcpy(hwdep->name, "config");
+ hwdep->iface = SNDRV_HWDEP_IFACE_LINE6;
+ hwdep->ops = hwdep_ops;
+ hwdep->private_data = line6;
+ hwdep->exclusive = true;
+
+end:
+ return err;
+}
+
static int line6_init_cap_control(struct usb_line6 *line6)
{
int ret;
@@ -487,14 +669,20 @@ static int line6_init_cap_control(struct usb_line6 *line6)
if (!line6->buffer_listen)
return -ENOMEM;
- line6->buffer_message = kmalloc(LINE6_MESSAGE_MAXLEN, GFP_KERNEL);
- if (!line6->buffer_message)
- return -ENOMEM;
-
line6->urb_listen = usb_alloc_urb(0, GFP_KERNEL);
if (!line6->urb_listen)
return -ENOMEM;
+ if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) {
+ line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
+ if (!line6->buffer_message)
+ return -ENOMEM;
+ } else {
+ ret = line6_hwdep_init(line6);
+ if (ret < 0)
+ return ret;
+ }
+
ret = line6_start_listen(line6);
if (ret < 0) {
dev_err(line6->ifcdev, "cannot start listening: %d\n", ret);
@@ -558,6 +746,7 @@ int line6_probe(struct usb_interface *interface,
/* query interface number */
interface_number = interface->cur_altsetting->desc.bInterfaceNumber;
+ /* TODO reserves the bus bandwidth even without actual transfer */
ret = usb_set_interface(usbdev, interface_number,
properties->altsetting);
if (ret < 0) {
@@ -565,9 +754,8 @@ int line6_probe(struct usb_interface *interface,
goto error;
}
- line6_get_interval(line6);
-
if (properties->capabilities & LINE6_CAP_CONTROL) {
+ line6_get_interval(line6);
ret = line6_init_cap_control(line6);
if (ret < 0)
goto error;
@@ -670,3 +858,4 @@ EXPORT_SYMBOL_GPL(line6_resume);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+
diff --git a/sound/usb/line6/driver.h b/sound/usb/line6/driver.h
index 7da643e79e3b..7e3a3aada222 100644
--- a/sound/usb/line6/driver.h
+++ b/sound/usb/line6/driver.h
@@ -12,21 +12,37 @@
#ifndef DRIVER_H
#define DRIVER_H
-#include <linux/spinlock.h>
#include <linux/usb.h>
+#include <linux/mutex.h>
+#include <linux/kfifo.h>
#include <sound/core.h>
#include "midi.h"
-#define USB_INTERVALS_PER_SECOND 1000
+/* USB 1.1 speed configuration */
+#define USB_LOW_INTERVALS_PER_SECOND 1000
+#define USB_LOW_ISO_BUFFERS 2
+
+/* USB 2.0+ speed configuration */
+#define USB_HIGH_INTERVALS_PER_SECOND 8000
+#define USB_HIGH_ISO_BUFFERS 16
/* Fallback USB interval and max packet size values */
#define LINE6_FALLBACK_INTERVAL 10
#define LINE6_FALLBACK_MAXPACKETSIZE 16
#define LINE6_TIMEOUT 1
-#define LINE6_BUFSIZE_LISTEN 32
-#define LINE6_MESSAGE_MAXLEN 256
+#define LINE6_BUFSIZE_LISTEN 64
+#define LINE6_MIDI_MESSAGE_MAXLEN 256
+
+#define LINE6_RAW_MESSAGES_MAXCOUNT_ORDER 7
+/* 4k packets are common, BUFSIZE * MAXCOUNT should be bigger... */
+#define LINE6_RAW_MESSAGES_MAXCOUNT (1 << LINE6_RAW_MESSAGES_MAXCOUNT_ORDER)
+
+
+#if LINE6_BUFSIZE_LISTEN > 65535
+#error "Use dynamic fifo instead"
+#endif
/*
Line 6 MIDI control commands
@@ -94,8 +110,12 @@ enum {
LINE6_CAP_CONTROL = 1 << 0,
/* device supports PCM input/output via USB */
LINE6_CAP_PCM = 1 << 1,
- /* device support hardware monitoring */
+ /* device supports hardware monitoring */
LINE6_CAP_HWMON = 1 << 2,
+ /* device requires output data when input is read */
+ LINE6_CAP_IN_NEEDS_OUT = 1 << 3,
+ /* device uses raw MIDI via USB (data endpoints) */
+ LINE6_CAP_CONTROL_MIDI = 1 << 4,
};
/*
@@ -109,10 +129,15 @@ struct usb_line6 {
/* Properties */
const struct line6_properties *properties;
- /* Interval (ms) */
+ /* Interval for data USB packets */
int interval;
+ /* ...for isochronous transfers framing */
+ int intervals_per_second;
+
+ /* Number of isochronous URBs used for frame transfers */
+ int iso_buffers;
- /* Maximum size of USB packet */
+ /* Maximum size of data USB packet */
int max_packet_size;
/* Device representing the USB interface */
@@ -129,18 +154,30 @@ struct usb_line6 {
/* Line 6 MIDI device data structure */
struct snd_line6_midi *line6midi;
- /* URB for listening to PODxt Pro control endpoint */
+ /* URB for listening to POD data endpoint */
struct urb *urb_listen;
- /* Buffer for listening to PODxt Pro control endpoint */
+ /* Buffer for incoming data from POD data endpoint */
unsigned char *buffer_listen;
- /* Buffer for message to be processed */
+ /* Buffer for message to be processed, generated from MIDI layer */
unsigned char *buffer_message;
- /* Length of message to be processed */
+ /* Length of message to be processed, generated from MIDI layer */
int message_length;
+ /* Circular buffer for non-MIDI control messages */
+ struct {
+ struct mutex read_lock;
+ wait_queue_head_t wait_queue;
+ unsigned int active:1;
+ STRUCT_KFIFO_REC_2(LINE6_BUFSIZE_LISTEN * LINE6_RAW_MESSAGES_MAXCOUNT)
+ fifo;
+ } messages;
+
+ /* If MIDI is supported, buffer_message contains the pre-processed data;
+ * otherwise the data is only in urb_listen (buffer_incoming).
+ */
void (*process_message)(struct usb_line6 *);
void (*disconnect)(struct usb_line6 *line6);
};
diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
index cebea9b7f769..d0fb2f205bd9 100644
--- a/sound/usb/line6/midi.c
+++ b/sound/usb/line6/midi.c
@@ -258,7 +258,7 @@ int line6_init_midi(struct usb_line6 *line6)
struct snd_rawmidi *rmidi;
struct snd_line6_midi *line6midi;
- if (!(line6->properties->capabilities & LINE6_CAP_CONTROL)) {
+ if (!(line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI)) {
/* skip MIDI initialization and report success */
return 0;
}
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 41aa3355e920..fab53f58d447 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -52,7 +52,7 @@ static int snd_line6_impulse_volume_put(struct snd_kcontrol *kcontrol,
line6pcm->impulse_volume = value;
if (value > 0) {
- err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE);
+ err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE, true);
if (err < 0) {
line6pcm->impulse_volume = 0;
return err;
@@ -104,7 +104,7 @@ static void line6_unlink_audio_urbs(struct snd_line6_pcm *line6pcm,
{
int i;
- for (i = 0; i < LINE6_ISO_BUFFERS; i++) {
+ for (i = 0; i < line6pcm->line6->iso_buffers; i++) {
if (test_bit(i, &pcms->active_urbs)) {
if (!test_and_set_bit(i, &pcms->unlink_urbs))
usb_unlink_urb(pcms->urbs[i]);
@@ -124,7 +124,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
do {
alive = 0;
- for (i = 0; i < LINE6_ISO_BUFFERS; i++) {
+ for (i = 0; i < line6pcm->line6->iso_buffers; i++) {
if (test_bit(i, &pcms->active_urbs))
alive++;
}
@@ -146,15 +146,20 @@ get_stream(struct snd_line6_pcm *line6pcm, int direction)
}
/* allocate a buffer if not opened yet;
- * call this in line6pcm.state_change mutex
+ * call this in line6pcm.state_mutex
*/
static int line6_buffer_acquire(struct snd_line6_pcm *line6pcm,
- struct line6_pcm_stream *pstr, int type)
+ struct line6_pcm_stream *pstr, int direction, int type)
{
+ const int pkt_size =
+ (direction == SNDRV_PCM_STREAM_PLAYBACK) ?
+ line6pcm->max_packet_size_out :
+ line6pcm->max_packet_size_in;
+
/* Invoked multiple times in a row so allocate once only */
if (!test_and_set_bit(type, &pstr->opened) && !pstr->buffer) {
- pstr->buffer = kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
+ pstr->buffer = kmalloc(line6pcm->line6->iso_buffers *
+ LINE6_ISO_PACKETS * pkt_size, GFP_KERNEL);
if (!pstr->buffer)
return -ENOMEM;
}
@@ -162,12 +167,11 @@ static int line6_buffer_acquire(struct snd_line6_pcm *line6pcm,
}
/* free a buffer if all streams are closed;
- * call this in line6pcm.state_change mutex
+ * call this in line6pcm.state_mutex
*/
static void line6_buffer_release(struct snd_line6_pcm *line6pcm,
struct line6_pcm_stream *pstr, int type)
{
-
clear_bit(type, &pstr->opened);
if (!pstr->opened) {
line6_wait_clear_audio_urbs(line6pcm, pstr);
@@ -194,6 +198,7 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
else
ret = line6_submit_audio_in_all_urbs(line6pcm);
}
+
if (ret < 0)
clear_bit(type, &pstr->running);
spin_unlock_irqrestore(&pstr->lock, flags);
@@ -237,6 +242,14 @@ int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd)
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
+ if (s->stream == SNDRV_PCM_STREAM_CAPTURE &&
+ (line6pcm->line6->properties->capabilities &
+ LINE6_CAP_IN_NEEDS_OUT)) {
+ err = line6_stream_start(line6pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ LINE6_STREAM_CAPTURE_HELPER);
+ if (err < 0)
+ return err;
+ }
err = line6_stream_start(line6pcm, s->stream,
LINE6_STREAM_PCM);
if (err < 0)
@@ -245,6 +258,12 @@ int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (s->stream == SNDRV_PCM_STREAM_CAPTURE &&
+ (line6pcm->line6->properties->capabilities &
+ LINE6_CAP_IN_NEEDS_OUT)) {
+ line6_stream_stop(line6pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ LINE6_STREAM_CAPTURE_HELPER);
+ }
line6_stream_stop(line6pcm, s->stream,
LINE6_STREAM_PCM);
break;
@@ -278,27 +297,30 @@ snd_pcm_uframes_t snd_line6_pointer(struct snd_pcm_substream *substream)
return pstr->pos_done;
}
-/* Acquire and start duplex streams:
+/* Acquire and optionally start duplex streams:
* type is either LINE6_STREAM_IMPULSE or LINE6_STREAM_MONITOR
*/
-int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int type)
+int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int type, bool start)
{
struct line6_pcm_stream *pstr;
int ret = 0, dir;
+ /* TODO: We should assert SNDRV_PCM_STREAM_PLAYBACK/CAPTURE == 0/1 */
mutex_lock(&line6pcm->state_mutex);
for (dir = 0; dir < 2; dir++) {
pstr = get_stream(line6pcm, dir);
- ret = line6_buffer_acquire(line6pcm, pstr, type);
+ ret = line6_buffer_acquire(line6pcm, pstr, dir, type);
if (ret < 0)
goto error;
if (!pstr->running)
line6_wait_clear_audio_urbs(line6pcm, pstr);
}
- for (dir = 0; dir < 2; dir++) {
- ret = line6_stream_start(line6pcm, dir, type);
- if (ret < 0)
- goto error;
+ if (start) {
+ for (dir = 0; dir < 2; dir++) {
+ ret = line6_stream_start(line6pcm, dir, type);
+ if (ret < 0)
+ goto error;
+ }
}
error:
mutex_unlock(&line6pcm->state_mutex);
@@ -334,7 +356,8 @@ int snd_line6_hw_params(struct snd_pcm_substream *substream,
struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream);
mutex_lock(&line6pcm->state_mutex);
- ret = line6_buffer_acquire(line6pcm, pstr, LINE6_STREAM_PCM);
+ ret = line6_buffer_acquire(line6pcm, pstr, substream->stream,
+ LINE6_STREAM_PCM);
if (ret < 0)
goto error;
@@ -434,24 +457,30 @@ static struct snd_kcontrol_new line6_controls[] = {
/*
Cleanup the PCM device.
*/
-static void cleanup_urbs(struct line6_pcm_stream *pcms)
+static void cleanup_urbs(struct line6_pcm_stream *pcms, int iso_buffers)
{
int i;
- for (i = 0; i < LINE6_ISO_BUFFERS; i++) {
+ /* Most likely impossible in current code... */
+ if (pcms->urbs == NULL)
+ return;
+
+ for (i = 0; i < iso_buffers; i++) {
if (pcms->urbs[i]) {
usb_kill_urb(pcms->urbs[i]);
usb_free_urb(pcms->urbs[i]);
}
}
+ kfree(pcms->urbs);
+ pcms->urbs = NULL;
}
static void line6_cleanup_pcm(struct snd_pcm *pcm)
{
struct snd_line6_pcm *line6pcm = snd_pcm_chip(pcm);
- cleanup_urbs(&line6pcm->out);
- cleanup_urbs(&line6pcm->in);
+ cleanup_urbs(&line6pcm->out, line6pcm->line6->iso_buffers);
+ cleanup_urbs(&line6pcm->in, line6pcm->line6->iso_buffers);
kfree(line6pcm);
}
@@ -523,12 +552,12 @@ int line6_init_pcm(struct usb_line6 *line6,
line6pcm->volume_monitor = 255;
line6pcm->line6 = line6;
- /* Read and write buffers are sized identically, so choose minimum */
- line6pcm->max_packet_size = min(
- usb_maxpacket(line6->usbdev,
- usb_rcvisocpipe(line6->usbdev, ep_read), 0),
- usb_maxpacket(line6->usbdev,
- usb_sndisocpipe(line6->usbdev, ep_write), 1));
+ line6pcm->max_packet_size_in =
+ usb_maxpacket(line6->usbdev,
+ usb_rcvisocpipe(line6->usbdev, ep_read), 0);
+ line6pcm->max_packet_size_out =
+ usb_maxpacket(line6->usbdev,
+ usb_sndisocpipe(line6->usbdev, ep_write), 1);
spin_lock_init(&line6pcm->out.lock);
spin_lock_init(&line6pcm->in.lock);
diff --git a/sound/usb/line6/pcm.h b/sound/usb/line6/pcm.h
index 508410adbd51..bb0c9cbf2a78 100644
--- a/sound/usb/line6/pcm.h
+++ b/sound/usb/line6/pcm.h
@@ -20,9 +20,6 @@
#include "driver.h"
-/* number of URBs */
-#define LINE6_ISO_BUFFERS 2
-
/*
number of USB frames per URB
The Line 6 Windows driver always transmits two frames per packet, but
@@ -31,7 +28,9 @@
*/
#define LINE6_ISO_PACKETS 1
-/* in a "full speed" device (such as the PODxt Pro) this means 1ms */
+/* in a "full speed" device (such as the PODxt Pro) this means 1ms,
+ * for "high speed" it's 1/8ms
+ */
#define LINE6_ISO_INTERVAL 1
#define LINE6_IMPULSE_DEFAULT_PERIOD 100
@@ -74,6 +73,7 @@ enum {
LINE6_STREAM_PCM,
LINE6_STREAM_MONITOR,
LINE6_STREAM_IMPULSE,
+ LINE6_STREAM_CAPTURE_HELPER,
};
/* misc bit flags for PCM operation */
@@ -85,12 +85,12 @@ enum {
struct line6_pcm_properties {
struct snd_pcm_hardware playback_hw, capture_hw;
struct snd_pcm_hw_constraint_ratdens rates;
- int bytes_per_frame;
+ int bytes_per_channel;
};
struct line6_pcm_stream {
/* allocated URBs */
- struct urb *urbs[LINE6_ISO_BUFFERS];
+ struct urb **urbs;
/* Temporary buffer;
* Since the packet size is not known in advance, this buffer is
@@ -157,11 +157,12 @@ struct snd_line6_pcm {
/* Previously captured frame (for software monitoring) */
unsigned char *prev_fbuf;
- /* Size of previously captured frame (for software monitoring) */
+ /* Size of previously captured frame (for software monitoring/sync) */
int prev_fsize;
/* Maximum size of USB packet */
- int max_packet_size;
+ int max_packet_size_in;
+ int max_packet_size_out;
/* PCM playback volume (left and right) */
int volume_playback[2];
@@ -191,7 +192,8 @@ extern int snd_line6_hw_params(struct snd_pcm_substream *substream,
extern int snd_line6_hw_free(struct snd_pcm_substream *substream);
extern snd_pcm_uframes_t snd_line6_pointer(struct snd_pcm_substream *substream);
extern void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm);
-extern int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int type);
+extern int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int type,
+ bool start);
extern void line6_pcm_release(struct snd_line6_pcm *line6pcm, int type);
#endif
diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c
index 97ed593f6010..812d18191e01 100644
--- a/sound/usb/line6/playback.c
+++ b/sound/usb/line6/playback.c
@@ -146,18 +146,20 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
int index;
int i, urb_size, urb_frames;
int ret;
- const int bytes_per_frame = line6pcm->properties->bytes_per_frame;
+ const int bytes_per_frame =
+ line6pcm->properties->bytes_per_channel *
+ line6pcm->properties->playback_hw.channels_max;
const int frame_increment =
line6pcm->properties->rates.rats[0].num_min;
const int frame_factor =
line6pcm->properties->rates.rats[0].den *
- (USB_INTERVALS_PER_SECOND / LINE6_ISO_INTERVAL);
+ (line6pcm->line6->intervals_per_second / LINE6_ISO_INTERVAL);
struct urb *urb_out;
- index =
- find_first_zero_bit(&line6pcm->out.active_urbs, LINE6_ISO_BUFFERS);
+ index = find_first_zero_bit(&line6pcm->out.active_urbs,
+ line6pcm->line6->iso_buffers);
- if (index < 0 || index >= LINE6_ISO_BUFFERS) {
+ if (index < 0 || index >= line6pcm->line6->iso_buffers) {
dev_err(line6pcm->line6->ifcdev, "no free URB found\n");
return -EINVAL;
}
@@ -165,6 +167,7 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
urb_out = line6pcm->out.urbs[index];
urb_size = 0;
+ /* TODO: this may not work for LINE6_ISO_PACKETS != 1 */
for (i = 0; i < LINE6_ISO_PACKETS; ++i) {
/* compute frame size for given sampling rate */
int fsize = 0;
@@ -178,9 +181,11 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
line6pcm->out.count += frame_increment;
n = line6pcm->out.count / frame_factor;
line6pcm->out.count -= n * frame_factor;
- fsize = n * bytes_per_frame;
+ fsize = n;
}
+ fsize *= bytes_per_frame;
+
fout->offset = urb_size;
fout->length = fsize;
urb_size += fsize;
@@ -195,7 +200,7 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
urb_frames = urb_size / bytes_per_frame;
urb_out->transfer_buffer =
line6pcm->out.buffer +
- index * LINE6_ISO_PACKETS * line6pcm->max_packet_size;
+ index * LINE6_ISO_PACKETS * line6pcm->max_packet_size_out;
urb_out->transfer_buffer_length = urb_size;
urb_out->context = line6pcm;
@@ -286,7 +291,7 @@ int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm)
{
int ret = 0, i;
- for (i = 0; i < LINE6_ISO_BUFFERS; ++i) {
+ for (i = 0; i < line6pcm->line6->iso_buffers; ++i) {
ret = submit_audio_out_urb(line6pcm);
if (ret < 0)
break;
@@ -305,6 +310,9 @@ static void audio_out_callback(struct urb *urb)
struct snd_line6_pcm *line6pcm = (struct snd_line6_pcm *)urb->context;
struct snd_pcm_substream *substream =
get_substream(line6pcm, SNDRV_PCM_STREAM_PLAYBACK);
+ const int bytes_per_frame =
+ line6pcm->properties->bytes_per_channel *
+ line6pcm->properties->playback_hw.channels_max;
#if USE_CLEAR_BUFFER_WORKAROUND
memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
@@ -313,11 +321,11 @@ static void audio_out_callback(struct urb *urb)
line6pcm->out.last_frame = urb->start_frame;
/* find index of URB */
- for (index = 0; index < LINE6_ISO_BUFFERS; index++)
+ for (index = 0; index < line6pcm->line6->iso_buffers; index++)
if (urb == line6pcm->out.urbs[index])
break;
- if (index >= LINE6_ISO_BUFFERS)
+ if (index >= line6pcm->line6->iso_buffers)
return; /* URB has been unlinked asynchronously */
for (i = 0; i < LINE6_ISO_PACKETS; i++)
@@ -329,7 +337,7 @@ static void audio_out_callback(struct urb *urb)
struct snd_pcm_runtime *runtime = substream->runtime;
line6pcm->out.pos_done +=
- length / line6pcm->properties->bytes_per_frame;
+ length / bytes_per_frame;
if (line6pcm->out.pos_done >= runtime->buffer_size)
line6pcm->out.pos_done -= runtime->buffer_size;
@@ -401,8 +409,13 @@ int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm)
struct usb_line6 *line6 = line6pcm->line6;
int i;
+ line6pcm->out.urbs = kzalloc(
+ sizeof(struct urb *) * line6->iso_buffers, GFP_KERNEL);
+ if (line6pcm->out.urbs == NULL)
+ return -ENOMEM;
+
/* create audio URBs and fill in constant values: */
- for (i = 0; i < LINE6_ISO_BUFFERS; ++i) {
+ for (i = 0; i < line6->iso_buffers; ++i) {
struct urb *urb;
/* URB for audio out: */
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index 45dd34874f43..17aa616e61f5 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -83,7 +83,6 @@ struct usb_line6_pod {
};
#define POD_SYSEX_CODE 3
-#define POD_BYTES_PER_FRAME 6 /* 24bit audio (stereo) */
/* *INDENT-OFF* */
@@ -167,7 +166,7 @@ static struct line6_pcm_properties pod_pcm_properties = {
.rates = {
.nrats = 1,
.rats = &pod_ratden},
- .bytes_per_frame = POD_BYTES_PER_FRAME
+ .bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */
};
static const char pod_version_header[] = {
@@ -476,6 +475,7 @@ static const struct line6_properties pod_properties_table[] = {
.id = "BassPODxt",
.name = "BassPODxt",
.capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_CONTROL_MIDI
| LINE6_CAP_PCM
| LINE6_CAP_HWMON,
.altsetting = 5,
@@ -488,6 +488,7 @@ static const struct line6_properties pod_properties_table[] = {
.id = "BassPODxtLive",
.name = "BassPODxt Live",
.capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_CONTROL_MIDI
| LINE6_CAP_PCM
| LINE6_CAP_HWMON,
.altsetting = 1,
@@ -500,6 +501,7 @@ static const struct line6_properties pod_properties_table[] = {
.id = "BassPODxtPro",
.name = "BassPODxt Pro",
.capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_CONTROL_MIDI
| LINE6_CAP_PCM
| LINE6_CAP_HWMON,
.altsetting = 5,
@@ -511,7 +513,8 @@ static const struct line6_properties pod_properties_table[] = {
[LINE6_POCKETPOD] = {
.id = "PocketPOD",
.name = "Pocket POD",
- .capabilities = LINE6_CAP_CONTROL,
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_CONTROL_MIDI,
.altsetting = 0,
.ep_ctrl_r = 0x82,
.ep_ctrl_w = 0x02,
@@ -521,6 +524,7 @@ static const struct line6_properties pod_properties_table[] = {
.id = "PODxt",
.name = "PODxt",
.capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_CONTROL_MIDI
| LINE6_CAP_PCM
| LINE6_CAP_HWMON,
.altsetting = 5,
@@ -533,6 +537,7 @@ static const struct line6_properties pod_properties_table[] = {
.id = "PODxtLive",
.name = "PODxt Live",
.capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_CONTROL_MIDI
| LINE6_CAP_PCM
| LINE6_CAP_HWMON,
.altsetting = 1,
@@ -545,6 +550,7 @@ static const struct line6_properties pod_properties_table[] = {
.id = "PODxtPro",
.name = "PODxt Pro",
.capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_CONTROL_MIDI
| LINE6_CAP_PCM
| LINE6_CAP_HWMON,
.altsetting = 5,
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 63dcaef41ac3..9352a44ae6e4 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -2,6 +2,7 @@
* Line 6 Pod HD
*
* Copyright (C) 2011 Stefan Hajnoczi <stefanha@gmail.com>
+ * Copyright (C) 2015 Andrej Krutak <dev@andree.sk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -18,14 +19,46 @@
#include "driver.h"
#include "pcm.h"
+#define PODHD_STARTUP_DELAY 500
+
+/*
+ * Stages of POD startup procedure
+ */
+enum {
+ PODHD_STARTUP_INIT = 1,
+ PODHD_STARTUP_SCHEDULE_WORKQUEUE,
+ PODHD_STARTUP_SETUP,
+ PODHD_STARTUP_LAST = PODHD_STARTUP_SETUP - 1
+};
+
enum {
LINE6_PODHD300,
LINE6_PODHD400,
LINE6_PODHD500_0,
LINE6_PODHD500_1,
+ LINE6_PODX3,
+ LINE6_PODX3LIVE
};
-#define PODHD_BYTES_PER_FRAME 6 /* 24bit audio (stereo) */
+struct usb_line6_podhd {
+ /* Generic Line 6 USB data */
+ struct usb_line6 line6;
+
+ /* Timer for device initialization */
+ struct timer_list startup_timer;
+
+ /* Work handler for device initialization */
+ struct work_struct startup_work;
+
+ /* Current progress in startup procedure */
+ int startup_progress;
+
+ /* Serial number of device */
+ u32 serial_number;
+
+ /* Firmware version */
+ int firmware_version;
+};
static struct snd_ratden podhd_ratden = {
.num_min = 48000,
@@ -73,29 +106,233 @@ static struct line6_pcm_properties podhd_pcm_properties = {
.rates = {
.nrats = 1,
.rats = &podhd_ratden},
- .bytes_per_frame = PODHD_BYTES_PER_FRAME
+ .bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */
+};
+
+static struct line6_pcm_properties podx3_pcm_properties = {
+ .playback_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 60000,
+ .period_bytes_min = 64,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 1024},
+ .capture_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ /* 1+2: Main signal (out), 3+4: Tone 1,
+ * 5+6: Tone 2, 7+8: raw
+ */
+ .channels_min = 8,
+ .channels_max = 8,
+ .buffer_bytes_max = 60000,
+ .period_bytes_min = 64,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 1024},
+ .rates = {
+ .nrats = 1,
+ .rats = &podhd_ratden},
+ .bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */
+};
+
+static void podhd_startup_start_workqueue(unsigned long data);
+static void podhd_startup_workqueue(struct work_struct *work);
+static int podhd_startup_finalize(struct usb_line6_podhd *pod);
+
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct snd_card *card = dev_to_snd_card(dev);
+ struct usb_line6_podhd *pod = card->private_data;
+
+ return sprintf(buf, "%u\n", pod->serial_number);
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct snd_card *card = dev_to_snd_card(dev);
+ struct usb_line6_podhd *pod = card->private_data;
+
+ return sprintf(buf, "%06x\n", pod->firmware_version);
+}
+
+static DEVICE_ATTR_RO(firmware_version);
+static DEVICE_ATTR_RO(serial_number);
+
+static struct attribute *podhd_dev_attrs[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_serial_number.attr,
+ NULL
+};
+
+static const struct attribute_group podhd_dev_attr_group = {
+ .name = "podhd",
+ .attrs = podhd_dev_attrs,
};
/*
+ * POD X3 startup procedure.
+ *
+ * May be compatible with other POD HD's, since it's also similar to the
+ * previous POD setup. In any case, it doesn't seem to be required for the
+ * audio nor bulk interfaces to work.
+ */
+
+static void podhd_startup(struct usb_line6_podhd *pod)
+{
+ CHECK_STARTUP_PROGRESS(pod->startup_progress, PODHD_STARTUP_INIT);
+
+ /* delay startup procedure: */
+ line6_start_timer(&pod->startup_timer, PODHD_STARTUP_DELAY,
+ podhd_startup_start_workqueue, (unsigned long)pod);
+}
+
+static void podhd_startup_start_workqueue(unsigned long data)
+{
+ struct usb_line6_podhd *pod = (struct usb_line6_podhd *)data;
+
+ CHECK_STARTUP_PROGRESS(pod->startup_progress,
+ PODHD_STARTUP_SCHEDULE_WORKQUEUE);
+
+ /* schedule work for global work queue: */
+ schedule_work(&pod->startup_work);
+}
+
+static int podhd_dev_start(struct usb_line6_podhd *pod)
+{
+ int ret;
+ u8 init_bytes[8];
+ int i;
+ struct usb_device *usbdev = pod->line6.usbdev;
+
+ ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
+ 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ 0x11, 0,
+ NULL, 0, LINE6_TIMEOUT * HZ);
+ if (ret < 0) {
+ dev_err(pod->line6.ifcdev, "read request failed (error %d)\n", ret);
+ return ret;
+ }
+
+ /* NOTE: looks like some kind of ping message */
+ ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0x11, 0x0,
+ &init_bytes, 3, LINE6_TIMEOUT * HZ);
+ if (ret < 0) {
+ dev_err(pod->line6.ifcdev,
+ "receive length failed (error %d)\n", ret);
+ return ret;
+ }
+
+ pod->firmware_version =
+ (init_bytes[0] << 16) | (init_bytes[1] << 8) | (init_bytes[2] << 0);
+
+ for (i = 0; i <= 16; i++) {
+ ret = line6_read_data(&pod->line6, 0xf000 + 0x08 * i, init_bytes, 8);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
+ USB_REQ_SET_FEATURE,
+ USB_TYPE_STANDARD | USB_RECIP_DEVICE | USB_DIR_OUT,
+ 1, 0,
+ NULL, 0, LINE6_TIMEOUT * HZ);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void podhd_startup_workqueue(struct work_struct *work)
+{
+ struct usb_line6_podhd *pod =
+ container_of(work, struct usb_line6_podhd, startup_work);
+
+ CHECK_STARTUP_PROGRESS(pod->startup_progress, PODHD_STARTUP_SETUP);
+
+ podhd_dev_start(pod);
+ line6_read_serial_number(&pod->line6, &pod->serial_number);
+
+ podhd_startup_finalize(pod);
+}
+
+static int podhd_startup_finalize(struct usb_line6_podhd *pod)
+{
+ struct usb_line6 *line6 = &pod->line6;
+
+ /* ALSA audio interface: */
+ return snd_card_register(line6->card);
+}
+
+static void podhd_disconnect(struct usb_line6 *line6)
+{
+ struct usb_line6_podhd *pod = (struct usb_line6_podhd *)line6;
+
+ if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
+ del_timer_sync(&pod->startup_timer);
+ cancel_work_sync(&pod->startup_work);
+ }
+}
+
+/*
Try to init POD HD device.
*/
static int podhd_init(struct usb_line6 *line6,
const struct usb_device_id *id)
{
int err;
+ struct usb_line6_podhd *pod = (struct usb_line6_podhd *) line6;
- /* initialize MIDI subsystem: */
- err = line6_init_midi(line6);
- if (err < 0)
- return err;
+ line6->disconnect = podhd_disconnect;
- /* initialize PCM subsystem: */
- err = line6_init_pcm(line6, &podhd_pcm_properties);
- if (err < 0)
- return err;
+ if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
+ /* create sysfs entries: */
+ err = snd_card_add_dev_attr(line6->card, &podhd_dev_attr_group);
+ if (err < 0)
+ return err;
+ }
- /* register USB audio system: */
- return snd_card_register(line6->card);
+ if (pod->line6.properties->capabilities & LINE6_CAP_PCM) {
+ /* initialize PCM subsystem: */
+ err = line6_init_pcm(line6,
+ (id->driver_info == LINE6_PODX3) ? &podx3_pcm_properties :
+ &podhd_pcm_properties);
+ if (err < 0)
+ return err;
+ }
+
+ if (!(pod->line6.properties->capabilities & LINE6_CAP_CONTROL)) {
+ /* register USB audio system directly */
+ return podhd_startup_finalize(pod);
+ }
+
+ /* init device and delay registering */
+ init_timer(&pod->startup_timer);
+ INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
+ podhd_startup(pod);
+ return 0;
}
#define LINE6_DEVICE(prod) USB_DEVICE(0x0e41, prod)
@@ -103,10 +340,13 @@ static int podhd_init(struct usb_line6 *line6,
/* table of devices that work with this driver */
static const struct usb_device_id podhd_id_table[] = {
+ /* TODO: no need to alloc data interfaces when only audio is used */
{ LINE6_DEVICE(0x5057), .driver_info = LINE6_PODHD300 },
{ LINE6_DEVICE(0x5058), .driver_info = LINE6_PODHD400 },
{ LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500_0 },
{ LINE6_IF_NUM(0x414D, 1), .driver_info = LINE6_PODHD500_1 },
+ { LINE6_IF_NUM(0x414A, 0), .driver_info = LINE6_PODX3 },
+ { LINE6_IF_NUM(0x414B, 0), .driver_info = LINE6_PODX3LIVE },
{}
};
@@ -116,8 +356,7 @@ static const struct line6_properties podhd_properties_table[] = {
[LINE6_PODHD300] = {
.id = "PODHD300",
.name = "POD HD300",
- .capabilities = LINE6_CAP_CONTROL
- | LINE6_CAP_PCM
+ .capabilities = LINE6_CAP_PCM
| LINE6_CAP_HWMON,
.altsetting = 5,
.ep_ctrl_r = 0x84,
@@ -128,8 +367,7 @@ static const struct line6_properties podhd_properties_table[] = {
[LINE6_PODHD400] = {
.id = "PODHD400",
.name = "POD HD400",
- .capabilities = LINE6_CAP_CONTROL
- | LINE6_CAP_PCM
+ .capabilities = LINE6_CAP_PCM
| LINE6_CAP_HWMON,
.altsetting = 5,
.ep_ctrl_r = 0x84,
@@ -140,8 +378,7 @@ static const struct line6_properties podhd_properties_table[] = {
[LINE6_PODHD500_0] = {
.id = "PODHD500",
.name = "POD HD500",
- .capabilities = LINE6_CAP_CONTROL
- | LINE6_CAP_PCM
+ .capabilities = LINE6_CAP_PCM
| LINE6_CAP_HWMON,
.altsetting = 1,
.ep_ctrl_r = 0x81,
@@ -152,8 +389,7 @@ static const struct line6_properties podhd_properties_table[] = {
[LINE6_PODHD500_1] = {
.id = "PODHD500",
.name = "POD HD500",
- .capabilities = LINE6_CAP_CONTROL
- | LINE6_CAP_PCM
+ .capabilities = LINE6_CAP_PCM
| LINE6_CAP_HWMON,
.altsetting = 1,
.ep_ctrl_r = 0x81,
@@ -161,6 +397,28 @@ static const struct line6_properties podhd_properties_table[] = {
.ep_audio_r = 0x86,
.ep_audio_w = 0x02,
},
+ [LINE6_PODX3] = {
+ .id = "PODX3",
+ .name = "POD X3",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM | LINE6_CAP_HWMON | LINE6_CAP_IN_NEEDS_OUT,
+ .altsetting = 1,
+ .ep_ctrl_r = 0x81,
+ .ep_ctrl_w = 0x01,
+ .ep_audio_r = 0x86,
+ .ep_audio_w = 0x02,
+ },
+ [LINE6_PODX3LIVE] = {
+ .id = "PODX3LIVE",
+ .name = "POD X3 LIVE",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM | LINE6_CAP_HWMON | LINE6_CAP_IN_NEEDS_OUT,
+ .altsetting = 1,
+ .ep_ctrl_r = 0x81,
+ .ep_ctrl_w = 0x01,
+ .ep_audio_r = 0x86,
+ .ep_audio_w = 0x02,
+ },
};
/*
@@ -171,7 +429,7 @@ static int podhd_probe(struct usb_interface *interface,
{
return line6_probe(interface, id, "Line6-PODHD",
&podhd_properties_table[id->driver_info],
- podhd_init, sizeof(struct usb_line6));
+ podhd_init, sizeof(struct usb_line6_podhd));
}
static struct usb_driver podhd_driver = {
diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c
index 6d4c50c9b17d..8e22f430d700 100644
--- a/sound/usb/line6/toneport.c
+++ b/sound/usb/line6/toneport.c
@@ -114,7 +114,7 @@ static struct line6_pcm_properties toneport_pcm_properties = {
.rates = {
.nrats = 1,
.rats = &toneport_ratden},
- .bytes_per_frame = 4
+ .bytes_per_channel = 2
};
static const struct {
@@ -177,7 +177,7 @@ static int snd_toneport_monitor_put(struct snd_kcontrol *kcontrol,
line6pcm->volume_monitor = ucontrol->value.integer.value[0];
if (line6pcm->volume_monitor > 0) {
- err = line6_pcm_acquire(line6pcm, LINE6_STREAM_MONITOR);
+ err = line6_pcm_acquire(line6pcm, LINE6_STREAM_MONITOR, true);
if (err < 0) {
line6pcm->volume_monitor = 0;
line6_pcm_release(line6pcm, LINE6_STREAM_MONITOR);
@@ -246,7 +246,7 @@ static void toneport_start_pcm(unsigned long arg)
struct usb_line6_toneport *toneport = (struct usb_line6_toneport *)arg;
struct usb_line6 *line6 = &toneport->line6;
- line6_pcm_acquire(line6->line6pcm, LINE6_STREAM_MONITOR);
+ line6_pcm_acquire(line6->line6pcm, LINE6_STREAM_MONITOR, true);
}
/* control definition */
diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c
index ddc23ddf0750..0c4512d0382e 100644
--- a/sound/usb/line6/variax.c
+++ b/sound/usb/line6/variax.c
@@ -259,7 +259,8 @@ static const struct line6_properties variax_properties_table[] = {
[LINE6_PODXTLIVE_VARIAX] = {
.id = "PODxtLive",
.name = "PODxt Live",
- .capabilities = LINE6_CAP_CONTROL,
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_CONTROL_MIDI,
.altsetting = 1,
.ep_ctrl_r = 0x86,
.ep_ctrl_w = 0x05,
@@ -269,7 +270,8 @@ static const struct line6_properties variax_properties_table[] = {
[LINE6_VARIAX] = {
.id = "Variax",
.name = "Variax Workbench",
- .capabilities = LINE6_CAP_CONTROL,
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_CONTROL_MIDI,
.altsetting = 1,
.ep_ctrl_r = 0x82,
.ep_ctrl_w = 0x01,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index f6c3bf79af9a..04991b009132 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1831,6 +1831,7 @@ void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
}
static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
+ struct usb_mixer_elem_info *cval,
struct snd_kcontrol *kctl)
{
/* Approximation using 10 ranges based on output measurement on hw v1.2.
@@ -1848,10 +1849,19 @@ static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
41, 50, TLV_DB_MINMAX_ITEM(-441, 0),
);
- usb_audio_info(mixer->chip, "applying DragonFly dB scale quirk\n");
- kctl->tlv.p = scale;
- kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
- kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+ if (cval->min == 0 && cval->max == 50) {
+ usb_audio_info(mixer->chip, "applying DragonFly dB scale quirk (0-50 variant)\n");
+ kctl->tlv.p = scale;
+ kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
+ kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+
+ } else if (cval->min == 0 && cval->max <= 1000) {
+ /* Some other clearly broken DragonFly variant.
+ * At least a 0..53 variant (hw v1.0) exists.
+ */
+ usb_audio_info(mixer->chip, "ignoring too narrow dB range on a DragonFly device");
+ kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+ }
}
void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
@@ -1860,8 +1870,8 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
{
switch (mixer->chip->usb_id) {
case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
- if (unitid == 7 && cval->min == 0 && cval->max == 50)
- snd_dragonfly_quirk_db_scale(mixer, kctl);
+ if (unitid == 7 && cval->control == UAC_FU_VOLUME)
+ snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
break;
}
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 152292e5ee2b..2782155ae3ce 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1217,6 +1217,12 @@ void snd_usb_endpoint_start_quirk(struct snd_usb_endpoint *ep)
ep->chip->usb_id == USB_ID(0x0763, 0x2031)) &&
ep->type == SND_USB_ENDPOINT_TYPE_DATA)
ep->skip_packets = 16;
+
+ /* Work around devices that report unreasonable feedback data */
+ if ((ep->chip->usb_id == USB_ID(0x0644, 0x8038) || /* TEAC UD-H01 */
+ ep->chip->usb_id == USB_ID(0x1852, 0x5034)) && /* T+A Dac8 */
+ ep->syncmaxsize == 4)
+ ep->tenor_fb_quirk = 1;
}
void snd_usb_set_interface_quirk(struct usb_device *dev)
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 0d9f48ec42bb..bc7adb84e679 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -1433,7 +1433,7 @@ int main(int argc, char *argv[])
openlog("KVP", 0, LOG_USER);
syslog(LOG_INFO, "KVP starting; pid is:%d", getpid());
- kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR);
+ kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR | O_CLOEXEC);
if (kvp_fd < 0) {
syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s",
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 5d51d6ff08e6..e0829809c897 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -250,6 +250,9 @@ int main(int argc, char *argv[])
syslog(LOG_ERR, "/etc/fstab and /proc/mounts");
}
break;
+ case VSS_OP_HOT_BACKUP:
+ syslog(LOG_INFO, "VSS: op=CHECK HOT BACKUP\n");
+ break;
default:
syslog(LOG_ERR, "Illegal op:%d\n", op);
}
diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
index 5eb6793f3972..7a6d61c6c012 100644
--- a/tools/iio/iio_utils.c
+++ b/tools/iio/iio_utils.c
@@ -121,10 +121,6 @@ int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
ret = -ENOENT;
while (ent = readdir(dp), ent)
- /*
- * Do we allow devices to override a generic name with
- * a specific one?
- */
if ((strcmp(builtname, ent->d_name) == 0) ||
(strcmp(builtname_generic, ent->d_name) == 0)) {
ret = asprintf(&filename,
@@ -178,6 +174,13 @@ int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
sysfsfp = 0;
free(filename);
filename = 0;
+
+ /*
+ * Avoid having a more generic entry overwriting
+ * the settings.
+ */
+ if (strcmp(builtname, ent->d_name) == 0)
+ break;
}
error_close_sysfsfp:
diff --git a/tools/iio/lsiio.c b/tools/iio/lsiio.c
index 3d650e668252..ab0f5cf16025 100644
--- a/tools/iio/lsiio.c
+++ b/tools/iio/lsiio.c
@@ -51,7 +51,8 @@ static int dump_channels(const char *dev_dir_name)
while (ent = readdir(dp), ent)
if (check_prefix(ent->d_name, "in_") &&
- check_postfix(ent->d_name, "_raw"))
+ (check_postfix(ent->d_name, "_raw") ||
+ check_postfix(ent->d_name, "_input")))
printf(" %-10s\n", ent->d_name);
return (closedir(dp) == -1) ? -errno : 0;
diff --git a/tools/spi/Makefile b/tools/spi/Makefile
index cd0db62e4d9d..3815b18ba070 100644
--- a/tools/spi/Makefile
+++ b/tools/spi/Makefile
@@ -1,3 +1,5 @@
+CC = $(CROSS_COMPILE)gcc
+
all: spidev_test spidev_fdx
clean:
diff --git a/tools/spi/spidev_test.c b/tools/spi/spidev_test.c
index 8a73d8185316..f046b77cfefe 100644
--- a/tools/spi/spidev_test.c
+++ b/tools/spi/spidev_test.c
@@ -19,6 +19,7 @@
#include <getopt.h>
#include <fcntl.h>
#include <sys/ioctl.h>
+#include <linux/ioctl.h>
#include <sys/stat.h>
#include <linux/types.h>
#include <linux/spi/spidev.h>
@@ -284,7 +285,7 @@ static void parse_opts(int argc, char *argv[])
static void transfer_escaped_string(int fd, char *str)
{
- size_t size = strlen(str + 1);
+ size_t size = strlen(str);
uint8_t *tx;
uint8_t *rx;
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index ff9e5f20a5a7..f770dba2a6f6 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -15,6 +15,7 @@ TARGETS += memory-hotplug
TARGETS += mount
TARGETS += mqueue
TARGETS += net
+TARGETS += nsfs
TARGETS += powerpc
TARGETS += pstore
TARGETS += ptrace
diff --git a/tools/testing/selftests/nsfs/Makefile b/tools/testing/selftests/nsfs/Makefile
new file mode 100644
index 000000000000..2306054a901a
--- /dev/null
+++ b/tools/testing/selftests/nsfs/Makefile
@@ -0,0 +1,12 @@
+TEST_PROGS := owner pidns
+
+CFLAGS := -Wall -Werror
+
+all: owner pidns
+owner: owner.c
+pidns: pidns.c
+
+clean:
+ $(RM) owner pidns
+
+include ../lib.mk
diff --git a/tools/testing/selftests/nsfs/owner.c b/tools/testing/selftests/nsfs/owner.c
new file mode 100644
index 000000000000..437205f8b714
--- /dev/null
+++ b/tools/testing/selftests/nsfs/owner.c
@@ -0,0 +1,91 @@
+#define _GNU_SOURCE
+#include <sched.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+
+#define NSIO 0xb7
+#define NS_GET_USERNS _IO(NSIO, 0x1)
+
+#define pr_err(fmt, ...) \
+ ({ \
+ fprintf(stderr, "%s:%d:" fmt ": %m\n", \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ 1; \
+ })
+
+int main(int argc, char *argvp[])
+{
+ int pfd[2], ns, uns, init_uns;
+ struct stat st1, st2;
+ char path[128];
+ pid_t pid;
+ char c;
+
+ if (pipe(pfd))
+ return 1;
+
+ pid = fork();
+ if (pid < 0)
+ return pr_err("fork");
+ if (pid == 0) {
+ prctl(PR_SET_PDEATHSIG, SIGKILL);
+ if (unshare(CLONE_NEWUTS | CLONE_NEWUSER))
+ return pr_err("unshare");
+ close(pfd[0]);
+ close(pfd[1]);
+ while (1)
+ sleep(1);
+ return 0;
+ }
+ close(pfd[1]);
+ if (read(pfd[0], &c, 1) != 0)
+ return pr_err("Unable to read from pipe");
+ close(pfd[0]);
+
+ snprintf(path, sizeof(path), "/proc/%d/ns/uts", pid);
+ ns = open(path, O_RDONLY);
+ if (ns < 0)
+ return pr_err("Unable to open %s", path);
+
+ uns = ioctl(ns, NS_GET_USERNS);
+ if (uns < 0)
+ return pr_err("Unable to get an owning user namespace");
+
+ if (fstat(uns, &st1))
+ return pr_err("fstat");
+
+ snprintf(path, sizeof(path), "/proc/%d/ns/user", pid);
+ if (stat(path, &st2))
+ return pr_err("stat");
+
+ if (st1.st_ino != st2.st_ino)
+ return pr_err("NS_GET_USERNS returned a wrong namespace");
+
+ init_uns = ioctl(uns, NS_GET_USERNS);
+ if (uns < 0)
+ return pr_err("Unable to get an owning user namespace");
+
+ if (ioctl(init_uns, NS_GET_USERNS) >= 0 || errno != EPERM)
+ return pr_err("Don't get EPERM");
+
+ if (unshare(CLONE_NEWUSER))
+ return pr_err("unshare");
+
+ if (ioctl(ns, NS_GET_USERNS) >= 0 || errno != EPERM)
+ return pr_err("Don't get EPERM");
+ if (ioctl(init_uns, NS_GET_USERNS) >= 0 || errno != EPERM)
+ return pr_err("Don't get EPERM");
+
+ kill(pid, SIGKILL);
+ wait(NULL);
+ return 0;
+}
diff --git a/tools/testing/selftests/nsfs/pidns.c b/tools/testing/selftests/nsfs/pidns.c
new file mode 100644
index 000000000000..ae3a0d68e966
--- /dev/null
+++ b/tools/testing/selftests/nsfs/pidns.c
@@ -0,0 +1,78 @@
+#define _GNU_SOURCE
+#include <sched.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+
+#define pr_err(fmt, ...) \
+ ({ \
+ fprintf(stderr, "%s:%d:" fmt ": %m\n", \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ 1; \
+ })
+
+#define NSIO 0xb7
+#define NS_GET_USERNS _IO(NSIO, 0x1)
+#define NS_GET_PARENT _IO(NSIO, 0x2)
+
+#define __stack_aligned__ __attribute__((aligned(16)))
+struct cr_clone_arg {
+ char stack[128] __stack_aligned__;
+ char stack_ptr[0];
+};
+
+static int child(void *args)
+{
+ prctl(PR_SET_PDEATHSIG, SIGKILL);
+ while (1)
+ sleep(1);
+ exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+ char *ns_strs[] = {"pid", "user"};
+ char path[] = "/proc/0123456789/ns/pid";
+ struct cr_clone_arg ca;
+ struct stat st1, st2;
+ int ns, pns, i;
+ pid_t pid;
+
+ pid = clone(child, ca.stack_ptr, CLONE_NEWUSER | CLONE_NEWPID | SIGCHLD, NULL);
+ if (pid < 0)
+ return pr_err("clone");
+
+ for (i = 0; i < 2; i++) {
+ snprintf(path, sizeof(path), "/proc/%d/ns/%s", pid, ns_strs[i]);
+ ns = open(path, O_RDONLY);
+ if (ns < 0)
+ return pr_err("Unable to open %s", path);
+
+ pns = ioctl(ns, NS_GET_PARENT);
+ if (pns < 0)
+ return pr_err("Unable to get a parent pidns");
+
+ snprintf(path, sizeof(path), "/proc/self/ns/%s", ns_strs[i]);
+ if (stat(path, &st2))
+ return pr_err("Unable to stat %s", path);
+ if (fstat(pns, &st1))
+ return pr_err("Unable to stat the parent pidns");
+ if (st1.st_ino != st2.st_ino)
+ return pr_err("NS_GET_PARENT returned a wrong namespace");
+
+ if (ioctl(pns, NS_GET_PARENT) >= 0 || errno != EPERM)
+ return pr_err("Don't get EPERM");;
+ }
+
+ kill(pid, SIGKILL);
+ wait(NULL);
+ return 0;
+}
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 1cc6d64c39b7..db54a33f850f 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -19,6 +19,7 @@ SUB_DIRS = alignment \
dscr \
mm \
pmu \
+ signal \
primitives \
stringloops \
switch_endian \
diff --git a/tools/testing/selftests/powerpc/fpu_asm.h b/tools/testing/selftests/powerpc/fpu_asm.h
new file mode 100644
index 000000000000..6a387d255e27
--- /dev/null
+++ b/tools/testing/selftests/powerpc/fpu_asm.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2016, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _SELFTESTS_POWERPC_FPU_ASM_H
+#define _SELFTESTS_POWERPC_FPU_ASM_H
+#include "basic_asm.h"
+
+#define PUSH_FPU(stack_size) \
+ stfd f31,(stack_size + STACK_FRAME_MIN_SIZE)(%r1); \
+ stfd f30,(stack_size + STACK_FRAME_MIN_SIZE - 8)(%r1); \
+ stfd f29,(stack_size + STACK_FRAME_MIN_SIZE - 16)(%r1); \
+ stfd f28,(stack_size + STACK_FRAME_MIN_SIZE - 24)(%r1); \
+ stfd f27,(stack_size + STACK_FRAME_MIN_SIZE - 32)(%r1); \
+ stfd f26,(stack_size + STACK_FRAME_MIN_SIZE - 40)(%r1); \
+ stfd f25,(stack_size + STACK_FRAME_MIN_SIZE - 48)(%r1); \
+ stfd f24,(stack_size + STACK_FRAME_MIN_SIZE - 56)(%r1); \
+ stfd f23,(stack_size + STACK_FRAME_MIN_SIZE - 64)(%r1); \
+ stfd f22,(stack_size + STACK_FRAME_MIN_SIZE - 72)(%r1); \
+ stfd f21,(stack_size + STACK_FRAME_MIN_SIZE - 80)(%r1); \
+ stfd f20,(stack_size + STACK_FRAME_MIN_SIZE - 88)(%r1); \
+ stfd f19,(stack_size + STACK_FRAME_MIN_SIZE - 96)(%r1); \
+ stfd f18,(stack_size + STACK_FRAME_MIN_SIZE - 104)(%r1); \
+ stfd f17,(stack_size + STACK_FRAME_MIN_SIZE - 112)(%r1); \
+ stfd f16,(stack_size + STACK_FRAME_MIN_SIZE - 120)(%r1); \
+ stfd f15,(stack_size + STACK_FRAME_MIN_SIZE - 128)(%r1); \
+ stfd f14,(stack_size + STACK_FRAME_MIN_SIZE - 136)(%r1);
+
+#define POP_FPU(stack_size) \
+ lfd f31,(stack_size + STACK_FRAME_MIN_SIZE)(%r1); \
+ lfd f30,(stack_size + STACK_FRAME_MIN_SIZE - 8)(%r1); \
+ lfd f29,(stack_size + STACK_FRAME_MIN_SIZE - 16)(%r1); \
+ lfd f28,(stack_size + STACK_FRAME_MIN_SIZE - 24)(%r1); \
+ lfd f27,(stack_size + STACK_FRAME_MIN_SIZE - 32)(%r1); \
+ lfd f26,(stack_size + STACK_FRAME_MIN_SIZE - 40)(%r1); \
+ lfd f25,(stack_size + STACK_FRAME_MIN_SIZE - 48)(%r1); \
+ lfd f24,(stack_size + STACK_FRAME_MIN_SIZE - 56)(%r1); \
+ lfd f23,(stack_size + STACK_FRAME_MIN_SIZE - 64)(%r1); \
+ lfd f22,(stack_size + STACK_FRAME_MIN_SIZE - 72)(%r1); \
+ lfd f21,(stack_size + STACK_FRAME_MIN_SIZE - 80)(%r1); \
+ lfd f20,(stack_size + STACK_FRAME_MIN_SIZE - 88)(%r1); \
+ lfd f19,(stack_size + STACK_FRAME_MIN_SIZE - 96)(%r1); \
+ lfd f18,(stack_size + STACK_FRAME_MIN_SIZE - 104)(%r1); \
+ lfd f17,(stack_size + STACK_FRAME_MIN_SIZE - 112)(%r1); \
+ lfd f16,(stack_size + STACK_FRAME_MIN_SIZE - 120)(%r1); \
+ lfd f15,(stack_size + STACK_FRAME_MIN_SIZE - 128)(%r1); \
+ lfd f14,(stack_size + STACK_FRAME_MIN_SIZE - 136)(%r1);
+
+/*
+ * Careful calling this, it will 'clobber' fpu (by design)
+ * Don't call this from C
+ */
+FUNC_START(load_fpu)
+ lfd f14,0(r3)
+ lfd f15,8(r3)
+ lfd f16,16(r3)
+ lfd f17,24(r3)
+ lfd f18,32(r3)
+ lfd f19,40(r3)
+ lfd f20,48(r3)
+ lfd f21,56(r3)
+ lfd f22,64(r3)
+ lfd f23,72(r3)
+ lfd f24,80(r3)
+ lfd f25,88(r3)
+ lfd f26,96(r3)
+ lfd f27,104(r3)
+ lfd f28,112(r3)
+ lfd f29,120(r3)
+ lfd f30,128(r3)
+ lfd f31,136(r3)
+ blr
+FUNC_END(load_fpu)
+
+#endif /* _SELFTESTS_POWERPC_FPU_ASM_H */
diff --git a/tools/testing/selftests/powerpc/gpr_asm.h b/tools/testing/selftests/powerpc/gpr_asm.h
new file mode 100644
index 000000000000..f6f38852d3a0
--- /dev/null
+++ b/tools/testing/selftests/powerpc/gpr_asm.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2016, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _SELFTESTS_POWERPC_GPR_ASM_H
+#define _SELFTESTS_POWERPC_GPR_ASM_H
+
+#include "basic_asm.h"
+
+#define __PUSH_NVREGS(top_pos); \
+ std r31,(top_pos)(%r1); \
+ std r30,(top_pos - 8)(%r1); \
+ std r29,(top_pos - 16)(%r1); \
+ std r28,(top_pos - 24)(%r1); \
+ std r27,(top_pos - 32)(%r1); \
+ std r26,(top_pos - 40)(%r1); \
+ std r25,(top_pos - 48)(%r1); \
+ std r24,(top_pos - 56)(%r1); \
+ std r23,(top_pos - 64)(%r1); \
+ std r22,(top_pos - 72)(%r1); \
+ std r21,(top_pos - 80)(%r1); \
+ std r20,(top_pos - 88)(%r1); \
+ std r19,(top_pos - 96)(%r1); \
+ std r18,(top_pos - 104)(%r1); \
+ std r17,(top_pos - 112)(%r1); \
+ std r16,(top_pos - 120)(%r1); \
+ std r15,(top_pos - 128)(%r1); \
+ std r14,(top_pos - 136)(%r1)
+
+#define __POP_NVREGS(top_pos); \
+ ld r31,(top_pos)(%r1); \
+ ld r30,(top_pos - 8)(%r1); \
+ ld r29,(top_pos - 16)(%r1); \
+ ld r28,(top_pos - 24)(%r1); \
+ ld r27,(top_pos - 32)(%r1); \
+ ld r26,(top_pos - 40)(%r1); \
+ ld r25,(top_pos - 48)(%r1); \
+ ld r24,(top_pos - 56)(%r1); \
+ ld r23,(top_pos - 64)(%r1); \
+ ld r22,(top_pos - 72)(%r1); \
+ ld r21,(top_pos - 80)(%r1); \
+ ld r20,(top_pos - 88)(%r1); \
+ ld r19,(top_pos - 96)(%r1); \
+ ld r18,(top_pos - 104)(%r1); \
+ ld r17,(top_pos - 112)(%r1); \
+ ld r16,(top_pos - 120)(%r1); \
+ ld r15,(top_pos - 128)(%r1); \
+ ld r14,(top_pos - 136)(%r1)
+
+#define PUSH_NVREGS(stack_size) \
+ __PUSH_NVREGS(stack_size + STACK_FRAME_MIN_SIZE)
+
+/* 18 NV FPU REGS */
+#define PUSH_NVREGS_BELOW_FPU(stack_size) \
+ __PUSH_NVREGS(stack_size + STACK_FRAME_MIN_SIZE - (18 * 8))
+
+#define POP_NVREGS(stack_size) \
+ __POP_NVREGS(stack_size + STACK_FRAME_MIN_SIZE)
+
+/* 18 NV FPU REGS */
+#define POP_NVREGS_BELOW_FPU(stack_size) \
+ __POP_NVREGS(stack_size + STACK_FRAME_MIN_SIZE - (18 * 8))
+
+/*
+ * Careful calling this, it will 'clobber' NVGPRs (by design)
+ * Don't call this from C
+ */
+FUNC_START(load_gpr)
+ ld r14,0(r3)
+ ld r15,8(r3)
+ ld r16,16(r3)
+ ld r17,24(r3)
+ ld r18,32(r3)
+ ld r19,40(r3)
+ ld r20,48(r3)
+ ld r21,56(r3)
+ ld r22,64(r3)
+ ld r23,72(r3)
+ ld r24,80(r3)
+ ld r25,88(r3)
+ ld r26,96(r3)
+ ld r27,104(r3)
+ ld r28,112(r3)
+ ld r29,120(r3)
+ ld r30,128(r3)
+ ld r31,136(r3)
+ blr
+FUNC_END(load_gpr)
+
+
+#endif /* _SELFTESTS_POWERPC_GPR_ASM_H */
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
index 52f9be7f61f0..248a820048df 100644
--- a/tools/testing/selftests/powerpc/harness.c
+++ b/tools/testing/selftests/powerpc/harness.c
@@ -19,9 +19,9 @@
#include "subunit.h"
#include "utils.h"
-#define TIMEOUT 120
#define KILL_TIMEOUT 5
+static uint64_t timeout = 120;
int run_test(int (test_function)(void), char *name)
{
@@ -44,7 +44,7 @@ int run_test(int (test_function)(void), char *name)
setpgid(pid, pid);
/* Wake us up in timeout seconds */
- alarm(TIMEOUT);
+ alarm(timeout);
terminated = false;
wait:
@@ -94,6 +94,11 @@ static struct sigaction alarm_action = {
.sa_handler = alarm_handler,
};
+void test_harness_set_timeout(uint64_t time)
+{
+ timeout = time;
+}
+
int test_harness(int (test_function)(void), char *name)
{
int rc;
diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile
index 5b88875d5955..a505b66d408a 100644
--- a/tools/testing/selftests/powerpc/math/Makefile
+++ b/tools/testing/selftests/powerpc/math/Makefile
@@ -1,4 +1,4 @@
-TEST_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal
+TEST_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt
all: $(TEST_PROGS)
@@ -13,6 +13,9 @@ vmx_syscall: vmx_asm.S
vmx_preempt: vmx_asm.S
vmx_signal: vmx_asm.S
+vsx_preempt: CFLAGS += -mvsx
+vsx_preempt: vsx_asm.S
+
include ../../lib.mk
clean:
diff --git a/tools/testing/selftests/powerpc/math/fpu_asm.S b/tools/testing/selftests/powerpc/math/fpu_asm.S
index f3711d80e709..241f067a510f 100644
--- a/tools/testing/selftests/powerpc/math/fpu_asm.S
+++ b/tools/testing/selftests/powerpc/math/fpu_asm.S
@@ -8,70 +8,7 @@
*/
#include "../basic_asm.h"
-
-#define PUSH_FPU(pos) \
- stfd f14,pos(sp); \
- stfd f15,pos+8(sp); \
- stfd f16,pos+16(sp); \
- stfd f17,pos+24(sp); \
- stfd f18,pos+32(sp); \
- stfd f19,pos+40(sp); \
- stfd f20,pos+48(sp); \
- stfd f21,pos+56(sp); \
- stfd f22,pos+64(sp); \
- stfd f23,pos+72(sp); \
- stfd f24,pos+80(sp); \
- stfd f25,pos+88(sp); \
- stfd f26,pos+96(sp); \
- stfd f27,pos+104(sp); \
- stfd f28,pos+112(sp); \
- stfd f29,pos+120(sp); \
- stfd f30,pos+128(sp); \
- stfd f31,pos+136(sp);
-
-#define POP_FPU(pos) \
- lfd f14,pos(sp); \
- lfd f15,pos+8(sp); \
- lfd f16,pos+16(sp); \
- lfd f17,pos+24(sp); \
- lfd f18,pos+32(sp); \
- lfd f19,pos+40(sp); \
- lfd f20,pos+48(sp); \
- lfd f21,pos+56(sp); \
- lfd f22,pos+64(sp); \
- lfd f23,pos+72(sp); \
- lfd f24,pos+80(sp); \
- lfd f25,pos+88(sp); \
- lfd f26,pos+96(sp); \
- lfd f27,pos+104(sp); \
- lfd f28,pos+112(sp); \
- lfd f29,pos+120(sp); \
- lfd f30,pos+128(sp); \
- lfd f31,pos+136(sp);
-
-# Careful calling this, it will 'clobber' fpu (by design)
-# Don't call this from C
-FUNC_START(load_fpu)
- lfd f14,0(r3)
- lfd f15,8(r3)
- lfd f16,16(r3)
- lfd f17,24(r3)
- lfd f18,32(r3)
- lfd f19,40(r3)
- lfd f20,48(r3)
- lfd f21,56(r3)
- lfd f22,64(r3)
- lfd f23,72(r3)
- lfd f24,80(r3)
- lfd f25,88(r3)
- lfd f26,96(r3)
- lfd f27,104(r3)
- lfd f28,112(r3)
- lfd f29,120(r3)
- lfd f30,128(r3)
- lfd f31,136(r3)
- blr
-FUNC_END(load_fpu)
+#include "../fpu_asm.h"
FUNC_START(check_fpu)
mr r4,r3
@@ -138,9 +75,9 @@ FUNC_START(test_fpu)
# r4 holds pointer to the pid
# f14-f31 are non volatiles
PUSH_BASIC_STACK(256)
+ PUSH_FPU(256)
std r3,STACK_FRAME_PARAM(0)(sp) # Address of darray
std r4,STACK_FRAME_PARAM(1)(sp) # Address of pid
- PUSH_FPU(STACK_FRAME_LOCAL(2,0))
bl load_fpu
nop
@@ -155,7 +92,7 @@ FUNC_START(test_fpu)
bl check_fpu
nop
- POP_FPU(STACK_FRAME_LOCAL(2,0))
+ POP_FPU(256)
POP_BASIC_STACK(256)
blr
FUNC_END(test_fpu)
@@ -166,10 +103,10 @@ FUNC_END(test_fpu)
# registers while running is not zero.
FUNC_START(preempt_fpu)
PUSH_BASIC_STACK(256)
+ PUSH_FPU(256)
std r3,STACK_FRAME_PARAM(0)(sp) # double *darray
std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting
std r5,STACK_FRAME_PARAM(2)(sp) # int *running
- PUSH_FPU(STACK_FRAME_LOCAL(3,0))
bl load_fpu
nop
@@ -192,7 +129,7 @@ FUNC_START(preempt_fpu)
cmpwi r5,0
bne 2b
-3: POP_FPU(STACK_FRAME_LOCAL(3,0))
+3: POP_FPU(256)
POP_BASIC_STACK(256)
blr
FUNC_END(preempt_fpu)
diff --git a/tools/testing/selftests/powerpc/math/vmx_asm.S b/tools/testing/selftests/powerpc/math/vmx_asm.S
index 1b8c248b3ac1..fd74da488625 100644
--- a/tools/testing/selftests/powerpc/math/vmx_asm.S
+++ b/tools/testing/selftests/powerpc/math/vmx_asm.S
@@ -8,90 +8,7 @@
*/
#include "../basic_asm.h"
-
-# POS MUST BE 16 ALIGNED!
-#define PUSH_VMX(pos,reg) \
- li reg,pos; \
- stvx v20,reg,sp; \
- addi reg,reg,16; \
- stvx v21,reg,sp; \
- addi reg,reg,16; \
- stvx v22,reg,sp; \
- addi reg,reg,16; \
- stvx v23,reg,sp; \
- addi reg,reg,16; \
- stvx v24,reg,sp; \
- addi reg,reg,16; \
- stvx v25,reg,sp; \
- addi reg,reg,16; \
- stvx v26,reg,sp; \
- addi reg,reg,16; \
- stvx v27,reg,sp; \
- addi reg,reg,16; \
- stvx v28,reg,sp; \
- addi reg,reg,16; \
- stvx v29,reg,sp; \
- addi reg,reg,16; \
- stvx v30,reg,sp; \
- addi reg,reg,16; \
- stvx v31,reg,sp;
-
-# POS MUST BE 16 ALIGNED!
-#define POP_VMX(pos,reg) \
- li reg,pos; \
- lvx v20,reg,sp; \
- addi reg,reg,16; \
- lvx v21,reg,sp; \
- addi reg,reg,16; \
- lvx v22,reg,sp; \
- addi reg,reg,16; \
- lvx v23,reg,sp; \
- addi reg,reg,16; \
- lvx v24,reg,sp; \
- addi reg,reg,16; \
- lvx v25,reg,sp; \
- addi reg,reg,16; \
- lvx v26,reg,sp; \
- addi reg,reg,16; \
- lvx v27,reg,sp; \
- addi reg,reg,16; \
- lvx v28,reg,sp; \
- addi reg,reg,16; \
- lvx v29,reg,sp; \
- addi reg,reg,16; \
- lvx v30,reg,sp; \
- addi reg,reg,16; \
- lvx v31,reg,sp;
-
-# Carefull this will 'clobber' vmx (by design)
-# Don't call this from C
-FUNC_START(load_vmx)
- li r5,0
- lvx v20,r5,r3
- addi r5,r5,16
- lvx v21,r5,r3
- addi r5,r5,16
- lvx v22,r5,r3
- addi r5,r5,16
- lvx v23,r5,r3
- addi r5,r5,16
- lvx v24,r5,r3
- addi r5,r5,16
- lvx v25,r5,r3
- addi r5,r5,16
- lvx v26,r5,r3
- addi r5,r5,16
- lvx v27,r5,r3
- addi r5,r5,16
- lvx v28,r5,r3
- addi r5,r5,16
- lvx v29,r5,r3
- addi r5,r5,16
- lvx v30,r5,r3
- addi r5,r5,16
- lvx v31,r5,r3
- blr
-FUNC_END(load_vmx)
+#include "../vmx_asm.h"
# Should be safe from C, only touches r4, r5 and v0,v1,v2
FUNC_START(check_vmx)
diff --git a/tools/testing/selftests/powerpc/math/vsx_asm.S b/tools/testing/selftests/powerpc/math/vsx_asm.S
new file mode 100644
index 000000000000..a110dd882d5e
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/vsx_asm.S
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "../basic_asm.h"
+#include "../vsx_asm.h"
+
+#long check_vsx(vector int *r3);
+#This function wraps storeing VSX regs to the end of an array and a
+#call to a comparison function in C which boils down to a memcmp()
+FUNC_START(check_vsx)
+ PUSH_BASIC_STACK(32)
+ std r3,STACK_FRAME_PARAM(0)(sp)
+ addi r3, r3, 16 * 12 #Second half of array
+ bl store_vsx
+ ld r3,STACK_FRAME_PARAM(0)(sp)
+ bl vsx_memcmp
+ POP_BASIC_STACK(32)
+ blr
+FUNC_END(check_vsx)
+
+# int preempt_vmx(vector int *varray, int *threads_starting,
+# int *running);
+# On starting will (atomically) decrement threads_starting as a signal
+# that the VMX have been loaded with varray. Will proceed to check the
+# validity of the VMX registers while running is not zero.
+FUNC_START(preempt_vsx)
+ PUSH_BASIC_STACK(512)
+ std r3,STACK_FRAME_PARAM(0)(sp) # vector int *varray
+ std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting
+ std r5,STACK_FRAME_PARAM(2)(sp) # int *running
+
+ bl load_vsx
+ nop
+
+ sync
+ # Atomic DEC
+ ld r3,STACK_FRAME_PARAM(1)(sp)
+1: lwarx r4,0,r3
+ addi r4,r4,-1
+ stwcx. r4,0,r3
+ bne- 1b
+
+2: ld r3,STACK_FRAME_PARAM(0)(sp)
+ bl check_vsx
+ nop
+ cmpdi r3,0
+ bne 3f
+ ld r4,STACK_FRAME_PARAM(2)(sp)
+ ld r5,0(r4)
+ cmpwi r5,0
+ bne 2b
+
+3: POP_BASIC_STACK(512)
+ blr
+FUNC_END(preempt_vsx)
diff --git a/tools/testing/selftests/powerpc/math/vsx_preempt.c b/tools/testing/selftests/powerpc/math/vsx_preempt.c
new file mode 100644
index 000000000000..6387f03a0a6a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/vsx_preempt.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2015, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This test attempts to see if the VSX registers change across preemption.
+ * There is no way to be sure preemption happened so this test just
+ * uses many threads and a long wait. As such, a successful test
+ * doesn't mean much but a failure is bad.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <pthread.h>
+
+#include "utils.h"
+
+/* Time to wait for workers to get preempted (seconds) */
+#define PREEMPT_TIME 20
+/*
+ * Factor by which to multiply number of online CPUs for total number of
+ * worker threads
+ */
+#define THREAD_FACTOR 8
+
+/*
+ * Ensure there is twice the number of non-volatile VMX regs!
+ * check_vmx() is going to use the other half as space to put the live
+ * registers before calling vsx_memcmp()
+ */
+__thread vector int varray[24] = {
+ {1, 2, 3, 4 }, {5, 6, 7, 8 }, {9, 10,11,12},
+ {13,14,15,16}, {17,18,19,20}, {21,22,23,24},
+ {25,26,27,28}, {29,30,31,32}, {33,34,35,36},
+ {37,38,39,40}, {41,42,43,44}, {45,46,47,48}
+};
+
+int threads_starting;
+int running;
+
+extern long preempt_vsx(vector int *varray, int *threads_starting, int *running);
+
+long vsx_memcmp(vector int *a) {
+ vector int zero = {0, 0, 0, 0};
+ int i;
+
+ FAIL_IF(a != varray);
+
+ for(i = 0; i < 12; i++) {
+ if (memcmp(&a[i + 12], &zero, sizeof(vector int)) == 0) {
+ fprintf(stderr, "Detected zero from the VSX reg %d\n", i + 12);
+ return 2;
+ }
+ }
+
+ if (memcmp(a, &a[12], 12 * sizeof(vector int))) {
+ long *p = (long *)a;
+ fprintf(stderr, "VSX mismatch\n");
+ for (i = 0; i < 24; i=i+2)
+ fprintf(stderr, "%d: 0x%08lx%08lx | 0x%08lx%08lx\n",
+ i/2 + i%2 + 20, p[i], p[i + 1], p[i + 24], p[i + 25]);
+ return 1;
+ }
+ return 0;
+}
+
+void *preempt_vsx_c(void *p)
+{
+ int i, j;
+ long rc;
+ srand(pthread_self());
+ for (i = 0; i < 12; i++)
+ for (j = 0; j < 4; j++) {
+ varray[i][j] = rand();
+ /* Don't want zero because it hides kernel problems */
+ if (varray[i][j] == 0)
+ j--;
+ }
+ rc = preempt_vsx(varray, &threads_starting, &running);
+ if (rc == 2)
+ fprintf(stderr, "Caught zeros in VSX compares\n");
+ return (void *)rc;
+}
+
+int test_preempt_vsx(void)
+{
+ int i, rc, threads;
+ pthread_t *tids;
+
+ threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR;
+ tids = malloc(threads * sizeof(pthread_t));
+ FAIL_IF(!tids);
+
+ running = true;
+ threads_starting = threads;
+ for (i = 0; i < threads; i++) {
+ rc = pthread_create(&tids[i], NULL, preempt_vsx_c, NULL);
+ FAIL_IF(rc);
+ }
+
+ setbuf(stdout, NULL);
+ /* Not really nessesary but nice to wait for every thread to start */
+ printf("\tWaiting for %d workers to start...", threads_starting);
+ while(threads_starting)
+ asm volatile("": : :"memory");
+ printf("done\n");
+
+ printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME);
+ sleep(PREEMPT_TIME);
+ printf("done\n");
+
+ printf("\tStopping workers...");
+ /*
+ * Working are checking this value every loop. In preempt_vsx 'cmpwi r5,0; bne 2b'.
+ * r5 will have loaded the value of running.
+ */
+ running = 0;
+ for (i = 0; i < threads; i++) {
+ void *rc_p;
+ pthread_join(tids[i], &rc_p);
+
+ /*
+ * Harness will say the fail was here, look at why preempt_vsx
+ * returned
+ */
+ if ((long) rc_p)
+ printf("oops\n");
+ FAIL_IF((long) rc_p);
+ }
+ printf("done\n");
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(test_preempt_vsx, "vsx_preempt");
+}
diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile
new file mode 100644
index 000000000000..f0eef27458e2
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/Makefile
@@ -0,0 +1,13 @@
+TEST_PROGS := signal signal_tm
+
+all: $(TEST_PROGS)
+
+$(TEST_PROGS): ../harness.c ../utils.c signal.S
+
+CFLAGS += -maltivec
+signal_tm: CFLAGS += -mhtm
+
+include ../../lib.mk
+
+clean:
+ rm -f $(TEST_PROGS) *.o
diff --git a/tools/testing/selftests/powerpc/signal/signal.S b/tools/testing/selftests/powerpc/signal/signal.S
new file mode 100644
index 000000000000..7043d521df0a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/signal.S
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "../basic_asm.h"
+
+/* long signal_self(pid_t pid, int sig); */
+FUNC_START(signal_self)
+ li r0,37 /* sys_kill */
+ /* r3 already has our pid in it */
+ /* r4 already has signal type in it */
+ sc
+ bc 4,3,1f
+ subfze r3,r3
+1: blr
+FUNC_END(signal_self)
+
+/* long tm_signal_self(pid_t pid, int sig, int *ret); */
+FUNC_START(tm_signal_self)
+ PUSH_BASIC_STACK(8)
+ std r5,STACK_FRAME_PARAM(0)(sp) /* ret */
+ tbegin.
+ beq 1f
+ tsuspend.
+ li r0,37 /* sys_kill */
+ /* r3 already has our pid in it */
+ /* r4 already has signal type in it */
+ sc
+ ld r5,STACK_FRAME_PARAM(0)(sp) /* ret */
+ bc 4,3,2f
+ subfze r3,r3
+2: std r3,0(r5)
+ tabort. 0
+ tresume. /* Be nice to some cleanup, jumps back to tbegin then to 1: */
+ /*
+ * Transaction should be proper doomed and we should never get
+ * here
+ */
+ li r3,1
+ POP_BASIC_STACK(8)
+ blr
+1: li r3,0
+ POP_BASIC_STACK(8)
+ blr
+FUNC_END(tm_signal_self)
diff --git a/tools/testing/selftests/powerpc/signal/signal.c b/tools/testing/selftests/powerpc/signal/signal.c
new file mode 100644
index 000000000000..e7dedd28b3c2
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/signal.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2016, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Sending one self a signal should always get delivered.
+ */
+
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <altivec.h>
+
+#include "utils.h"
+
+#define MAX_ATTEMPT 500000
+#define TIMEOUT 5
+
+extern long signal_self(pid_t pid, int sig);
+
+static sig_atomic_t signaled;
+static sig_atomic_t fail;
+
+static void signal_handler(int sig)
+{
+ if (sig == SIGUSR1)
+ signaled = 1;
+ else
+ fail = 1;
+}
+
+static int test_signal()
+{
+ int i;
+ struct sigaction act;
+ pid_t ppid = getpid();
+ pid_t pid;
+
+ act.sa_handler = signal_handler;
+ act.sa_flags = 0;
+ sigemptyset(&act.sa_mask);
+ if (sigaction(SIGUSR1, &act, NULL) < 0) {
+ perror("sigaction SIGUSR1");
+ exit(1);
+ }
+ if (sigaction(SIGALRM, &act, NULL) < 0) {
+ perror("sigaction SIGALRM");
+ exit(1);
+ }
+
+ /* Don't do this for MAX_ATTEMPT, its simply too long */
+ for(i = 0; i < 1000; i++) {
+ pid = fork();
+ if (pid == -1) {
+ perror("fork");
+ exit(1);
+ }
+ if (pid == 0) {
+ signal_self(ppid, SIGUSR1);
+ exit(1);
+ } else {
+ alarm(0); /* Disable any pending */
+ alarm(2);
+ while (!signaled && !fail)
+ asm volatile("": : :"memory");
+ if (!signaled) {
+ fprintf(stderr, "Didn't get signal from child\n");
+ FAIL_IF(1); /* For the line number */
+ }
+ /* Otherwise we'll loop too fast and fork() will eventually fail */
+ waitpid(pid, NULL, 0);
+ }
+ }
+
+ for (i = 0; i < MAX_ATTEMPT; i++) {
+ long rc;
+
+ alarm(0); /* Disable any pending */
+ signaled = 0;
+ alarm(TIMEOUT);
+ rc = signal_self(ppid, SIGUSR1);
+ if (rc) {
+ fprintf(stderr, "(%d) Fail reason: %d rc=0x%lx",
+ i, fail, rc);
+ FAIL_IF(1); /* For the line number */
+ }
+ while (!signaled && !fail)
+ asm volatile("": : :"memory");
+ if (!signaled) {
+ fprintf(stderr, "(%d) Fail reason: %d rc=0x%lx",
+ i, fail, rc);
+ FAIL_IF(1); /* For the line number */
+ }
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ test_harness_set_timeout(300);
+ return test_harness(test_signal, "signal");
+}
diff --git a/tools/testing/selftests/powerpc/signal/signal_tm.c b/tools/testing/selftests/powerpc/signal/signal_tm.c
new file mode 100644
index 000000000000..2e7451a37cc6
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/signal_tm.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2016, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Sending one self a signal should always get delivered.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include <altivec.h>
+
+#include "utils.h"
+#include "../tm/tm.h"
+
+#define MAX_ATTEMPT 500000
+#define TIMEOUT 10
+
+extern long tm_signal_self(pid_t pid, int sig, long *ret);
+
+static sig_atomic_t signaled;
+static sig_atomic_t fail;
+
+static void signal_handler(int sig)
+{
+ if (tcheck_active()) {
+ fail = 2;
+ return;
+ }
+
+ if (sig == SIGUSR1)
+ signaled = 1;
+ else
+ fail = 1;
+}
+
+static int test_signal_tm()
+{
+ int i;
+ struct sigaction act;
+
+ act.sa_handler = signal_handler;
+ act.sa_flags = 0;
+ sigemptyset(&act.sa_mask);
+ if (sigaction(SIGUSR1, &act, NULL) < 0) {
+ perror("sigaction SIGUSR1");
+ exit(1);
+ }
+ if (sigaction(SIGALRM, &act, NULL) < 0) {
+ perror("sigaction SIGALRM");
+ exit(1);
+ }
+
+ SKIP_IF(!have_htm());
+
+ for (i = 0; i < MAX_ATTEMPT; i++) {
+ /*
+ * If anything bad happens in ASM and we fail to set ret
+ * because *handwave* TM this will cause failure
+ */
+ long ret = 0xdead;
+ long rc = 0xbeef;
+
+ alarm(0); /* Disable any pending */
+ signaled = 0;
+ alarm(TIMEOUT);
+ FAIL_IF(tcheck_transactional());
+ rc = tm_signal_self(getpid(), SIGUSR1, &ret);
+ if (ret == 0xdead)
+ /*
+ * This basically means the transaction aborted before we
+ * even got to the suspend... this is crazy but it
+ * happens.
+ * Yes this also means we might never make forward
+ * progress... the alarm() will trip eventually...
+ */
+ continue;
+
+ if (rc || ret) {
+ /* Ret is actually an errno */
+ printf("TEXASR 0x%016lx, TFIAR 0x%016lx\n",
+ __builtin_get_texasr(), __builtin_get_tfiar());
+ fprintf(stderr, "(%d) Fail reason: %d rc=0x%lx ret=0x%lx\n",
+ i, fail, rc, ret);
+ FAIL_IF(ret);
+ }
+ while(!signaled && !fail)
+ asm volatile("": : :"memory");
+ if (!signaled) {
+ fprintf(stderr, "(%d) Fail reason: %d rc=0x%lx ret=0x%lx\n",
+ i, fail, rc, ret);
+ FAIL_IF(fail); /* For the line number */
+ }
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_signal_tm, "signal_tm");
+}
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 9d301d785d9e..c6c53c82fdd6 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -1,5 +1,8 @@
+SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu \
+ tm-signal-context-chk-vmx tm-signal-context-chk-vsx
+
TEST_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \
- tm-vmxcopy tm-fork tm-tar tm-tmspr tm-exec tm-execed
+ tm-vmxcopy tm-fork tm-tar tm-tmspr $(SIGNAL_CONTEXT_CHK_TESTS)
all: $(TEST_PROGS)
@@ -11,6 +14,9 @@ tm-syscall: tm-syscall-asm.S
tm-syscall: CFLAGS += -I../../../../../usr/include
tm-tmspr: CFLAGS += -pthread
+$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
+$(SIGNAL_CONTEXT_CHK_TESTS): CFLAGS += -mhtm -m64 -mvsx
+
include ../../lib.mk
clean:
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c
new file mode 100644
index 000000000000..c760debbd5ad
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2016, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * Test the kernel's signal frame code.
+ *
+ * The kernel sets up two sets of ucontexts if the signal was to be
+ * delivered while the thread was in a transaction.
+ * Expected behaviour is that the checkpointed state is in the user
+ * context passed to the signal handler. The speculated state can be
+ * accessed with the uc_link pointer.
+ *
+ * The rationale for this is that if TM unaware code (which linked
+ * against TM libs) installs a signal handler it will not know of the
+ * speculative nature of the 'live' registers and may infer the wrong
+ * thing.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include <altivec.h>
+
+#include "utils.h"
+#include "tm.h"
+
+#define MAX_ATTEMPT 500000
+
+#define NV_FPU_REGS 18
+
+long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss);
+
+/* Be sure there are 2x as many as there are NV FPU regs (2x18) */
+static double fps[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ -1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18
+};
+
+static sig_atomic_t fail;
+
+static void signal_usr1(int signum, siginfo_t *info, void *uc)
+{
+ int i;
+ ucontext_t *ucp = uc;
+ ucontext_t *tm_ucp = ucp->uc_link;
+
+ for (i = 0; i < NV_FPU_REGS && !fail; i++) {
+ fail = (ucp->uc_mcontext.fp_regs[i + 14] != fps[i]);
+ fail |= (tm_ucp->uc_mcontext.fp_regs[i + 14] != fps[i + NV_FPU_REGS]);
+ if (fail)
+ printf("Failed on %d FP %g or %g\n", i, ucp->uc_mcontext.fp_regs[i + 14], tm_ucp->uc_mcontext.fp_regs[i + 14]);
+ }
+}
+
+static int tm_signal_context_chk_fpu()
+{
+ struct sigaction act;
+ int i;
+ long rc;
+ pid_t pid = getpid();
+
+ SKIP_IF(!have_htm());
+
+ act.sa_sigaction = signal_usr1;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGUSR1, &act, NULL) < 0) {
+ perror("sigaction sigusr1");
+ exit(1);
+ }
+
+ i = 0;
+ while (i < MAX_ATTEMPT && !fail) {
+ rc = tm_signal_self_context_load(pid, NULL, fps, NULL, NULL);
+ FAIL_IF(rc != pid);
+ i++;
+ }
+
+ return fail;
+}
+
+int main(void)
+{
+ return test_harness(tm_signal_context_chk_fpu, "tm_signal_context_chk_fpu");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c
new file mode 100644
index 000000000000..df91330a08ef
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2016, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * Test the kernel's signal frame code.
+ *
+ * The kernel sets up two sets of ucontexts if the signal was to be
+ * delivered while the thread was in a transaction.
+ * Expected behaviour is that the checkpointed state is in the user
+ * context passed to the signal handler. The speculated state can be
+ * accessed with the uc_link pointer.
+ *
+ * The rationale for this is that if TM unaware code (which linked
+ * against TM libs) installs a signal handler it will not know of the
+ * speculative nature of the 'live' registers and may infer the wrong
+ * thing.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include <altivec.h>
+
+#include "utils.h"
+#include "tm.h"
+
+#define MAX_ATTEMPT 500000
+
+#define NV_GPR_REGS 18
+
+long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss);
+
+static sig_atomic_t fail;
+
+static long gps[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ -1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18};
+
+static void signal_usr1(int signum, siginfo_t *info, void *uc)
+{
+ int i;
+ ucontext_t *ucp = uc;
+ ucontext_t *tm_ucp = ucp->uc_link;
+
+ for (i = 0; i < NV_GPR_REGS && !fail; i++) {
+ fail = (ucp->uc_mcontext.gp_regs[i + 14] != gps[i]);
+ fail |= (tm_ucp->uc_mcontext.gp_regs[i + 14] != gps[i + NV_GPR_REGS]);
+ if (fail)
+ printf("Failed on %d GPR %lu or %lu\n", i,
+ ucp->uc_mcontext.gp_regs[i + 14], tm_ucp->uc_mcontext.gp_regs[i + 14]);
+ }
+}
+
+static int tm_signal_context_chk_gpr()
+{
+ struct sigaction act;
+ int i;
+ long rc;
+ pid_t pid = getpid();
+
+ SKIP_IF(!have_htm());
+
+ act.sa_sigaction = signal_usr1;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGUSR1, &act, NULL) < 0) {
+ perror("sigaction sigusr1");
+ exit(1);
+ }
+
+ i = 0;
+ while (i < MAX_ATTEMPT && !fail) {
+ rc = tm_signal_self_context_load(pid, gps, NULL, NULL, NULL);
+ FAIL_IF(rc != pid);
+ i++;
+ }
+
+ return fail;
+}
+
+int main(void)
+{
+ return test_harness(tm_signal_context_chk_gpr, "tm_signal_context_chk_gpr");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c
new file mode 100644
index 000000000000..f0ee55fd5185
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2016, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * Test the kernel's signal frame code.
+ *
+ * The kernel sets up two sets of ucontexts if the signal was to be
+ * delivered while the thread was in a transaction.
+ * Expected behaviour is that the checkpointed state is in the user
+ * context passed to the signal handler. The speculated state can be
+ * accessed with the uc_link pointer.
+ *
+ * The rationale for this is that if TM unaware code (which linked
+ * against TM libs) installs a signal handler it will not know of the
+ * speculative nature of the 'live' registers and may infer the wrong
+ * thing.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include <altivec.h>
+
+#include "utils.h"
+#include "tm.h"
+
+#define MAX_ATTEMPT 500000
+
+#define NV_VMX_REGS 12
+
+long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss);
+
+static sig_atomic_t fail;
+
+vector int vms[] = {
+ {1, 2, 3, 4 },{5, 6, 7, 8 },{9, 10,11,12},
+ {13,14,15,16},{17,18,19,20},{21,22,23,24},
+ {25,26,27,28},{29,30,31,32},{33,34,35,36},
+ {37,38,39,40},{41,42,43,44},{45,46,47,48},
+ {-1, -2, -3, -4}, {-5, -6, -7, -8}, {-9, -10,-11,-12},
+ {-13,-14,-15,-16},{-17,-18,-19,-20},{-21,-22,-23,-24},
+ {-25,-26,-27,-28},{-29,-30,-31,-32},{-33,-34,-35,-36},
+ {-37,-38,-39,-40},{-41,-42,-43,-44},{-45,-46,-47,-48}
+};
+
+static void signal_usr1(int signum, siginfo_t *info, void *uc)
+{
+ int i;
+ ucontext_t *ucp = uc;
+ ucontext_t *tm_ucp = ucp->uc_link;
+
+ for (i = 0; i < NV_VMX_REGS && !fail; i++) {
+ fail = memcmp(ucp->uc_mcontext.v_regs->vrregs[i + 20],
+ &vms[i], sizeof(vector int));
+ fail |= memcmp(tm_ucp->uc_mcontext.v_regs->vrregs[i + 20],
+ &vms[i + NV_VMX_REGS], sizeof (vector int));
+
+ if (fail) {
+ int j;
+
+ fprintf(stderr, "Failed on %d vmx 0x", i);
+ for (j = 0; j < 4; j++)
+ fprintf(stderr, "%04x", ucp->uc_mcontext.v_regs->vrregs[i + 20][j]);
+ fprintf(stderr, " vs 0x");
+ for (j = 0 ; j < 4; j++)
+ fprintf(stderr, "%04x", tm_ucp->uc_mcontext.v_regs->vrregs[i + 20][j]);
+ fprintf(stderr, "\n");
+ }
+ }
+}
+
+static int tm_signal_context_chk()
+{
+ struct sigaction act;
+ int i;
+ long rc;
+ pid_t pid = getpid();
+
+ SKIP_IF(!have_htm());
+
+ act.sa_sigaction = signal_usr1;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGUSR1, &act, NULL) < 0) {
+ perror("sigaction sigusr1");
+ exit(1);
+ }
+
+ i = 0;
+ while (i < MAX_ATTEMPT && !fail) {
+ rc = tm_signal_self_context_load(pid, NULL, NULL, vms, NULL);
+ FAIL_IF(rc != pid);
+ i++;
+ }
+
+ return fail;
+}
+
+int main(void)
+{
+ return test_harness(tm_signal_context_chk, "tm_signal_context_chk_vmx");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c
new file mode 100644
index 000000000000..b99c3d835957
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2016, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * Test the kernel's signal frame code.
+ *
+ * The kernel sets up two sets of ucontexts if the signal was to be
+ * delivered while the thread was in a transaction.
+ * Expected behaviour is that the checkpointed state is in the user
+ * context passed to the signal handler. The speculated state can be
+ * accessed with the uc_link pointer.
+ *
+ * The rationale for this is that if TM unaware code (which linked
+ * against TM libs) installs a signal handler it will not know of the
+ * speculative nature of the 'live' registers and may infer the wrong
+ * thing.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include <altivec.h>
+
+#include "utils.h"
+#include "tm.h"
+
+#define MAX_ATTEMPT 500000
+
+#define NV_VSX_REGS 12
+
+long tm_signal_self_context_load(pid_t pid, long *gprs, double *fps, vector int *vms, vector int *vss);
+
+static sig_atomic_t fail;
+
+vector int vss[] = {
+ {1, 2, 3, 4 },{5, 6, 7, 8 },{9, 10,11,12},
+ {13,14,15,16},{17,18,19,20},{21,22,23,24},
+ {25,26,27,28},{29,30,31,32},{33,34,35,36},
+ {37,38,39,40},{41,42,43,44},{45,46,47,48},
+ {-1, -2, -3, -4 },{-5, -6, -7, -8 },{-9, -10,-11,-12},
+ {-13,-14,-15,-16},{-17,-18,-19,-20},{-21,-22,-23,-24},
+ {-25,-26,-27,-28},{-29,-30,-31,-32},{-33,-34,-35,-36},
+ {-37,-38,-39,-40},{-41,-42,-43,-44},{-45,-46,-47,-48}
+};
+
+static void signal_usr1(int signum, siginfo_t *info, void *uc)
+{
+ int i;
+ uint8_t vsc[sizeof(vector int)];
+ uint8_t vst[sizeof(vector int)];
+ ucontext_t *ucp = uc;
+ ucontext_t *tm_ucp = ucp->uc_link;
+
+ /*
+ * The other half of the VSX regs will be after v_regs.
+ *
+ * In short, vmx_reserve array holds everything. v_regs is a 16
+ * byte aligned pointer at the start of vmx_reserve (vmx_reserve
+ * may or may not be 16 aligned) where the v_regs structure exists.
+ * (half of) The VSX regsters are directly after v_regs so the
+ * easiest way to find them below.
+ */
+ long *vsx_ptr = (long *)(ucp->uc_mcontext.v_regs + 1);
+ long *tm_vsx_ptr = (long *)(tm_ucp->uc_mcontext.v_regs + 1);
+ for (i = 0; i < NV_VSX_REGS && !fail; i++) {
+ memcpy(vsc, &ucp->uc_mcontext.fp_regs[i + 20], 8);
+ memcpy(vsc + 8, &vsx_ptr[20 + i], 8);
+ fail = memcmp(vsc, &vss[i], sizeof(vector int));
+ memcpy(vst, &tm_ucp->uc_mcontext.fp_regs[i + 20], 8);
+ memcpy(vst + 8, &tm_vsx_ptr[20 + i], 8);
+ fail |= memcmp(vst, &vss[i + NV_VSX_REGS], sizeof(vector int));
+
+ if (fail) {
+ int j;
+
+ fprintf(stderr, "Failed on %d vsx 0x", i);
+ for (j = 0; j < 16; j++)
+ fprintf(stderr, "%02x", vsc[j]);
+ fprintf(stderr, " vs 0x");
+ for (j = 0; j < 16; j++)
+ fprintf(stderr, "%02x", vst[j]);
+ fprintf(stderr, "\n");
+ }
+ }
+}
+
+static int tm_signal_context_chk()
+{
+ struct sigaction act;
+ int i;
+ long rc;
+ pid_t pid = getpid();
+
+ SKIP_IF(!have_htm());
+
+ act.sa_sigaction = signal_usr1;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGUSR1, &act, NULL) < 0) {
+ perror("sigaction sigusr1");
+ exit(1);
+ }
+
+ i = 0;
+ while (i < MAX_ATTEMPT && !fail) {
+ rc = tm_signal_self_context_load(pid, NULL, NULL, NULL, vss);
+ FAIL_IF(rc != pid);
+ i++;
+ }
+
+ return fail;
+}
+
+int main(void)
+{
+ return test_harness(tm_signal_context_chk, "tm_signal_context_chk_vsx");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal.S b/tools/testing/selftests/powerpc/tm/tm-signal.S
new file mode 100644
index 000000000000..4e13e8b3a96f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal.S
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2015, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "../basic_asm.h"
+#include "../gpr_asm.h"
+#include "../fpu_asm.h"
+#include "../vmx_asm.h"
+#include "../vsx_asm.h"
+
+/*
+ * Large caveat here being that the caller cannot expect the
+ * signal to always be sent! The hardware can (AND WILL!) abort
+ * the transaction between the tbegin and the tsuspend (however
+ * unlikely it seems or infrequently it actually happens).
+ * You have been warned.
+ */
+/* long tm_signal_self(pid_t pid, long *gprs, double *fps, vector *vms, vector *vss); */
+FUNC_START(tm_signal_self_context_load)
+ PUSH_BASIC_STACK(512)
+ /*
+ * Don't strictly need to save and restore as it depends on if
+ * we're going to use them, however this reduces messy logic
+ */
+ PUSH_VMX(STACK_FRAME_LOCAL(5,0),r8)
+ PUSH_FPU(512)
+ PUSH_NVREGS_BELOW_FPU(512)
+ std r3, STACK_FRAME_PARAM(0)(sp) /* pid */
+ std r4, STACK_FRAME_PARAM(1)(sp) /* gps */
+ std r5, STACK_FRAME_PARAM(2)(sp) /* fps */
+ std r6, STACK_FRAME_PARAM(3)(sp) /* vms */
+ std r7, STACK_FRAME_PARAM(4)(sp) /* vss */
+
+ ld r3, STACK_FRAME_PARAM(1)(sp)
+ cmpdi r3, 0
+ beq skip_gpr_lc
+ bl load_gpr
+skip_gpr_lc:
+ ld r3, STACK_FRAME_PARAM(2)(sp)
+ cmpdi r3, 0
+ beq skip_fpu_lc
+ bl load_fpu
+skip_fpu_lc:
+ ld r3, STACK_FRAME_PARAM(3)(sp)
+ cmpdi r3, 0
+ beq skip_vmx_lc
+ bl load_vmx
+skip_vmx_lc:
+ ld r3, STACK_FRAME_PARAM(4)(sp)
+ cmpdi r3, 0
+ beq skip_vsx_lc
+ bl load_vsx
+skip_vsx_lc:
+ /*
+ * Set r3 (return value) before tbegin. Use the pid as a known
+ * 'all good' return value, zero is used to indicate a non-doomed
+ * transaction.
+ */
+ ld r3, STACK_FRAME_PARAM(0)(sp)
+ tbegin.
+ beq 1f
+ tsuspend. /* Can't enter a syscall transactionally */
+ ld r3, STACK_FRAME_PARAM(1)(sp)
+ cmpdi r3, 0
+ beq skip_gpr_lt
+ /* Get the second half of the array */
+ addi r3, r3, 8 * 18
+ bl load_gpr
+skip_gpr_lt:
+ ld r3, STACK_FRAME_PARAM(2)(sp)
+ cmpdi r3, 0
+ beq skip_fpu_lt
+ /* Get the second half of the array */
+ addi r3, r3, 8 * 18
+ bl load_fpu
+skip_fpu_lt:
+ ld r3, STACK_FRAME_PARAM(3)(sp)
+ cmpdi r3, 0
+ beq skip_vmx_lt
+ /* Get the second half of the array */
+ addi r3, r3, 16 * 12
+ bl load_vmx
+skip_vmx_lt:
+ ld r3, STACK_FRAME_PARAM(4)(sp)
+ cmpdi r3, 0
+ beq skip_vsx_lt
+ /* Get the second half of the array */
+ addi r3, r3, 16 * 12
+ bl load_vsx
+skip_vsx_lt:
+ li r0, 37 /* sys_kill */
+ ld r3, STACK_FRAME_PARAM(0)(sp) /* pid */
+ li r4, 10 /* SIGUSR1 */
+ sc /* Taking the signal will doom the transaction */
+ tabort. 0
+ tresume. /* Be super sure we abort */
+ /*
+ * This will cause us to resume doomed transaction and cause
+ * hardware to cleanup, we'll end up at 1: anything between
+ * tresume. and 1: shouldn't ever run.
+ */
+ li r3, 0
+ 1:
+ POP_VMX(STACK_FRAME_LOCAL(5,0),r4)
+ POP_FPU(512)
+ POP_NVREGS_BELOW_FPU(512)
+ POP_BASIC_STACK(512)
+ blr
+FUNC_END(tm_signal_self_context_load)
diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
index 60318bad7d7a..2c8da74304e7 100644
--- a/tools/testing/selftests/powerpc/tm/tm.h
+++ b/tools/testing/selftests/powerpc/tm/tm.h
@@ -52,4 +52,31 @@ static inline bool failure_is_nesting(void)
return (__builtin_get_texasru() & 0x400000);
}
+static inline int tcheck(void)
+{
+ long cr;
+ asm volatile ("tcheck 0" : "=r"(cr) : : "cr0");
+ return (cr >> 28) & 4;
+}
+
+static inline bool tcheck_doomed(void)
+{
+ return tcheck() & 8;
+}
+
+static inline bool tcheck_active(void)
+{
+ return tcheck() & 4;
+}
+
+static inline bool tcheck_suspended(void)
+{
+ return tcheck() & 2;
+}
+
+static inline bool tcheck_transactional(void)
+{
+ return tcheck() & 6;
+}
+
#endif /* _SELFTESTS_POWERPC_TM_TM_H */
diff --git a/tools/testing/selftests/powerpc/utils.h b/tools/testing/selftests/powerpc/utils.h
index fbd33e52ef8f..53405e8a52ab 100644
--- a/tools/testing/selftests/powerpc/utils.h
+++ b/tools/testing/selftests/powerpc/utils.h
@@ -22,7 +22,7 @@ typedef uint32_t u32;
typedef uint16_t u16;
typedef uint8_t u8;
-
+void test_harness_set_timeout(uint64_t time);
int test_harness(int (test_function)(void), char *name);
extern void *get_auxv_entry(int type);
int pick_online_cpu(void);
@@ -32,10 +32,17 @@ static inline bool have_hwcap(unsigned long ftr)
return ((unsigned long)get_auxv_entry(AT_HWCAP) & ftr) == ftr;
}
+#ifdef AT_HWCAP2
static inline bool have_hwcap2(unsigned long ftr2)
{
return ((unsigned long)get_auxv_entry(AT_HWCAP2) & ftr2) == ftr2;
}
+#else
+static inline bool have_hwcap2(unsigned long ftr2)
+{
+ return false;
+}
+#endif
/* Yes, this is evil */
#define FAIL_IF(x) \
diff --git a/tools/testing/selftests/powerpc/vmx_asm.h b/tools/testing/selftests/powerpc/vmx_asm.h
new file mode 100644
index 000000000000..2eaaeca9cf1d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/vmx_asm.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2015, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "basic_asm.h"
+
+/* POS MUST BE 16 ALIGNED! */
+#define PUSH_VMX(pos,reg) \
+ li reg,pos; \
+ stvx v20,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v21,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v22,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v23,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v24,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v25,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v26,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v27,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v28,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v29,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v30,reg,%r1; \
+ addi reg,reg,16; \
+ stvx v31,reg,%r1;
+
+/* POS MUST BE 16 ALIGNED! */
+#define POP_VMX(pos,reg) \
+ li reg,pos; \
+ lvx v20,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v21,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v22,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v23,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v24,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v25,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v26,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v27,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v28,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v29,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v30,reg,%r1; \
+ addi reg,reg,16; \
+ lvx v31,reg,%r1;
+
+/*
+ * Careful this will 'clobber' vmx (by design)
+ * Don't call this from C
+ */
+FUNC_START(load_vmx)
+ li r5,0
+ lvx v20,r5,r3
+ addi r5,r5,16
+ lvx v21,r5,r3
+ addi r5,r5,16
+ lvx v22,r5,r3
+ addi r5,r5,16
+ lvx v23,r5,r3
+ addi r5,r5,16
+ lvx v24,r5,r3
+ addi r5,r5,16
+ lvx v25,r5,r3
+ addi r5,r5,16
+ lvx v26,r5,r3
+ addi r5,r5,16
+ lvx v27,r5,r3
+ addi r5,r5,16
+ lvx v28,r5,r3
+ addi r5,r5,16
+ lvx v29,r5,r3
+ addi r5,r5,16
+ lvx v30,r5,r3
+ addi r5,r5,16
+ lvx v31,r5,r3
+ blr
+FUNC_END(load_vmx)
diff --git a/tools/testing/selftests/powerpc/vsx_asm.h b/tools/testing/selftests/powerpc/vsx_asm.h
new file mode 100644
index 000000000000..d828bfb6ef2d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/vsx_asm.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2015, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "basic_asm.h"
+
+/*
+ * Careful this will 'clobber' vsx (by design), VSX are always
+ * volatile though so unlike vmx this isn't so much of an issue
+ * Still should avoid calling from C
+ */
+FUNC_START(load_vsx)
+ li r5,0
+ lxvx vs20,r5,r3
+ addi r5,r5,16
+ lxvx vs21,r5,r3
+ addi r5,r5,16
+ lxvx vs22,r5,r3
+ addi r5,r5,16
+ lxvx vs23,r5,r3
+ addi r5,r5,16
+ lxvx vs24,r5,r3
+ addi r5,r5,16
+ lxvx vs25,r5,r3
+ addi r5,r5,16
+ lxvx vs26,r5,r3
+ addi r5,r5,16
+ lxvx vs27,r5,r3
+ addi r5,r5,16
+ lxvx vs28,r5,r3
+ addi r5,r5,16
+ lxvx vs29,r5,r3
+ addi r5,r5,16
+ lxvx vs30,r5,r3
+ addi r5,r5,16
+ lxvx vs31,r5,r3
+ blr
+FUNC_END(load_vsx)
+
+FUNC_START(store_vsx)
+ li r5,0
+ stxvx vs20,r5,r3
+ addi r5,r5,16
+ stxvx vs21,r5,r3
+ addi r5,r5,16
+ stxvx vs22,r5,r3
+ addi r5,r5,16
+ stxvx vs23,r5,r3
+ addi r5,r5,16
+ stxvx vs24,r5,r3
+ addi r5,r5,16
+ stxvx vs25,r5,r3
+ addi r5,r5,16
+ stxvx vs26,r5,r3
+ addi r5,r5,16
+ stxvx vs27,r5,r3
+ addi r5,r5,16
+ stxvx vs28,r5,r3
+ addi r5,r5,16
+ stxvx vs29,r5,r3
+ addi r5,r5,16
+ stxvx vs30,r5,r3
+ addi r5,r5,16
+ stxvx vs31,r5,r3
+ blr
+FUNC_END(store_vsx)
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore
index a937a9d26b60..142c565bb351 100644
--- a/tools/testing/selftests/vm/.gitignore
+++ b/tools/testing/selftests/vm/.gitignore
@@ -7,3 +7,4 @@ mlock2-tests
on-fault-limit
transhuge-stress
userfaultfd
+mlock-intersect-test
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index e4bb1de1d526..bbab7f4664ac 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -10,6 +10,7 @@ BINARIES += on-fault-limit
BINARIES += thuge-gen
BINARIES += transhuge-stress
BINARIES += userfaultfd
+BINARIES += mlock-random-test
all: $(BINARIES)
%: %.c
@@ -17,6 +18,9 @@ all: $(BINARIES)
userfaultfd: userfaultfd.c ../../../../usr/include/linux/kernel.h
$(CC) $(CFLAGS) -O2 -o $@ $< -lpthread
+mlock-random-test: mlock-random-test.c
+ $(CC) $(CFLAGS) -o $@ $< -lcap
+
../../../../usr/include/linux/kernel.h:
make -C ../../../.. headers_install
diff --git a/tools/testing/selftests/vm/mlock-random-test.c b/tools/testing/selftests/vm/mlock-random-test.c
new file mode 100644
index 000000000000..83de4f58d262
--- /dev/null
+++ b/tools/testing/selftests/vm/mlock-random-test.c
@@ -0,0 +1,293 @@
+/*
+ * It tests the mlock/mlock2() when they are invoked
+ * on randomly memory region.
+ */
+#include <unistd.h>
+#include <sys/resource.h>
+#include <sys/capability.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include <time.h>
+#include "mlock2.h"
+
+#define CHUNK_UNIT (128 * 1024)
+#define MLOCK_RLIMIT_SIZE (CHUNK_UNIT * 2)
+#define MLOCK_WITHIN_LIMIT_SIZE CHUNK_UNIT
+#define MLOCK_OUTOF_LIMIT_SIZE (CHUNK_UNIT * 3)
+
+#define TEST_LOOP 100
+#define PAGE_ALIGN(size, ps) (((size) + ((ps) - 1)) & ~((ps) - 1))
+
+int set_cap_limits(rlim_t max)
+{
+ struct rlimit new;
+ cap_t cap = cap_init();
+
+ new.rlim_cur = max;
+ new.rlim_max = max;
+ if (setrlimit(RLIMIT_MEMLOCK, &new)) {
+ perror("setrlimit() returns error\n");
+ return -1;
+ }
+
+ /* drop capabilities including CAP_IPC_LOCK */
+ if (cap_set_proc(cap)) {
+ perror("cap_set_proc() returns error\n");
+ return -2;
+ }
+
+ return 0;
+}
+
+int get_proc_locked_vm_size(void)
+{
+ FILE *f;
+ int ret = -1;
+ char line[1024] = {0};
+ unsigned long lock_size = 0;
+
+ f = fopen("/proc/self/status", "r");
+ if (!f) {
+ perror("fopen");
+ return -1;
+ }
+
+ while (fgets(line, 1024, f)) {
+ if (strstr(line, "VmLck")) {
+ ret = sscanf(line, "VmLck:\t%8lu kB", &lock_size);
+ if (ret <= 0) {
+ printf("sscanf() on VmLck error: %s: %d\n",
+ line, ret);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return (int)(lock_size << 10);
+ }
+ }
+
+ perror("cann't parse VmLck in /proc/self/status\n");
+ fclose(f);
+ return -1;
+}
+
+/*
+ * Get the MMUPageSize of the memory region including input
+ * address from proc file.
+ *
+ * return value: on error case, 0 will be returned.
+ * Otherwise the page size(in bytes) is returned.
+ */
+int get_proc_page_size(unsigned long addr)
+{
+ FILE *smaps;
+ char *line;
+ unsigned long mmupage_size = 0;
+ size_t size;
+
+ smaps = seek_to_smaps_entry(addr);
+ if (!smaps) {
+ printf("Unable to parse /proc/self/smaps\n");
+ return 0;
+ }
+
+ while (getline(&line, &size, smaps) > 0) {
+ if (!strstr(line, "MMUPageSize")) {
+ free(line);
+ line = NULL;
+ size = 0;
+ continue;
+ }
+
+ /* found the MMUPageSize of this section */
+ if (sscanf(line, "MMUPageSize: %8lu kB",
+ &mmupage_size) < 1) {
+ printf("Unable to parse smaps entry for Size:%s\n",
+ line);
+ break;
+ }
+
+ }
+ free(line);
+ if (smaps)
+ fclose(smaps);
+ return mmupage_size << 10;
+}
+
+/*
+ * Test mlock/mlock2() on provided memory chunk.
+ * It expects the mlock/mlock2() to be successful (within rlimit)
+ *
+ * With allocated memory chunk [p, p + alloc_size), this
+ * test will choose start/len randomly to perform mlock/mlock2
+ * [start, start + len] memory range. The range is within range
+ * of the allocated chunk.
+ *
+ * The memory region size alloc_size is within the rlimit.
+ * So we always expect a success of mlock/mlock2.
+ *
+ * VmLck is assumed to be 0 before this test.
+ *
+ * return value: 0 - success
+ * else: failure
+ */
+int test_mlock_within_limit(char *p, int alloc_size)
+{
+ int i;
+ int ret = 0;
+ int locked_vm_size = 0;
+ struct rlimit cur;
+ int page_size = 0;
+
+ getrlimit(RLIMIT_MEMLOCK, &cur);
+ if (cur.rlim_cur < alloc_size) {
+ printf("alloc_size[%d] < %u rlimit,lead to mlock failure\n",
+ alloc_size, (unsigned int)cur.rlim_cur);
+ return -1;
+ }
+
+ srand(time(NULL));
+ for (i = 0; i < TEST_LOOP; i++) {
+ /*
+ * - choose mlock/mlock2 randomly
+ * - choose lock_size randomly but lock_size < alloc_size
+ * - choose start_offset randomly but p+start_offset+lock_size
+ * < p+alloc_size
+ */
+ int is_mlock = !!(rand() % 2);
+ int lock_size = rand() % alloc_size;
+ int start_offset = rand() % (alloc_size - lock_size);
+
+ if (is_mlock)
+ ret = mlock(p + start_offset, lock_size);
+ else
+ ret = mlock2_(p + start_offset, lock_size,
+ MLOCK_ONFAULT);
+
+ if (ret) {
+ printf("%s() failure at |%p(%d)| mlock:|%p(%d)|\n",
+ is_mlock ? "mlock" : "mlock2",
+ p, alloc_size,
+ p + start_offset, lock_size);
+ return ret;
+ }
+ }
+
+ /*
+ * Check VmLck left by the tests.
+ */
+ locked_vm_size = get_proc_locked_vm_size();
+ page_size = get_proc_page_size((unsigned long)p);
+ if (page_size == 0) {
+ printf("cannot get proc MMUPageSize\n");
+ return -1;
+ }
+
+ if (locked_vm_size > PAGE_ALIGN(alloc_size, page_size) + page_size) {
+ printf("test_mlock_within_limit() left VmLck:%d on %d chunk\n",
+ locked_vm_size, alloc_size);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/*
+ * We expect the mlock/mlock2() to be fail (outof limitation)
+ *
+ * With allocated memory chunk [p, p + alloc_size), this
+ * test will randomly choose start/len and perform mlock/mlock2
+ * on [start, start+len] range.
+ *
+ * The memory region size alloc_size is above the rlimit.
+ * And the len to be locked is higher than rlimit.
+ * So we always expect a failure of mlock/mlock2.
+ * No locked page number should be increased as a side effect.
+ *
+ * return value: 0 - success
+ * else: failure
+ */
+int test_mlock_outof_limit(char *p, int alloc_size)
+{
+ int i;
+ int ret = 0;
+ int locked_vm_size = 0, old_locked_vm_size = 0;
+ struct rlimit cur;
+
+ getrlimit(RLIMIT_MEMLOCK, &cur);
+ if (cur.rlim_cur >= alloc_size) {
+ printf("alloc_size[%d] >%u rlimit, violates test condition\n",
+ alloc_size, (unsigned int)cur.rlim_cur);
+ return -1;
+ }
+
+ old_locked_vm_size = get_proc_locked_vm_size();
+ srand(time(NULL));
+ for (i = 0; i < TEST_LOOP; i++) {
+ int is_mlock = !!(rand() % 2);
+ int lock_size = (rand() % (alloc_size - cur.rlim_cur))
+ + cur.rlim_cur;
+ int start_offset = rand() % (alloc_size - lock_size);
+
+ if (is_mlock)
+ ret = mlock(p + start_offset, lock_size);
+ else
+ ret = mlock2_(p + start_offset, lock_size,
+ MLOCK_ONFAULT);
+ if (ret == 0) {
+ printf("%s() succeeds? on %p(%d) mlock%p(%d)\n",
+ is_mlock ? "mlock" : "mlock2",
+ p, alloc_size,
+ p + start_offset, lock_size);
+ return -1;
+ }
+ }
+
+ locked_vm_size = get_proc_locked_vm_size();
+ if (locked_vm_size != old_locked_vm_size) {
+ printf("tests leads to new mlocked page: old[%d], new[%d]\n",
+ old_locked_vm_size,
+ locked_vm_size);
+ return -1;
+ }
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ char *p = NULL;
+ int ret = 0;
+
+ if (set_cap_limits(MLOCK_RLIMIT_SIZE))
+ return -1;
+
+ p = malloc(MLOCK_WITHIN_LIMIT_SIZE);
+ if (p == NULL) {
+ perror("malloc() failure\n");
+ return -1;
+ }
+ ret = test_mlock_within_limit(p, MLOCK_WITHIN_LIMIT_SIZE);
+ if (ret)
+ return ret;
+ munlock(p, MLOCK_WITHIN_LIMIT_SIZE);
+ free(p);
+
+
+ p = malloc(MLOCK_OUTOF_LIMIT_SIZE);
+ if (p == NULL) {
+ perror("malloc() failure\n");
+ return -1;
+ }
+ ret = test_mlock_outof_limit(p, MLOCK_OUTOF_LIMIT_SIZE);
+ if (ret)
+ return ret;
+ munlock(p, MLOCK_OUTOF_LIMIT_SIZE);
+ free(p);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c
index 02ca5e0177c5..ff0cda2b19c9 100644
--- a/tools/testing/selftests/vm/mlock2-tests.c
+++ b/tools/testing/selftests/vm/mlock2-tests.c
@@ -1,33 +1,12 @@
#define _GNU_SOURCE
#include <sys/mman.h>
#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <sys/time.h>
#include <sys/resource.h>
-#include <syscall.h>
-#include <errno.h>
#include <stdbool.h>
-
-#ifndef MLOCK_ONFAULT
-#define MLOCK_ONFAULT 1
-#endif
-
-#ifndef MCL_ONFAULT
-#define MCL_ONFAULT (MCL_FUTURE << 1)
-#endif
-
-static int mlock2_(void *start, size_t len, int flags)
-{
-#ifdef __NR_mlock2
- return syscall(__NR_mlock2, start, len, flags);
-#else
- errno = ENOSYS;
- return -1;
-#endif
-}
+#include "mlock2.h"
struct vm_boundaries {
unsigned long start;
@@ -138,46 +117,6 @@ static uint64_t get_kpageflags(unsigned long pfn)
return flags;
}
-static FILE *seek_to_smaps_entry(unsigned long addr)
-{
- FILE *file;
- char *line = NULL;
- size_t size = 0;
- unsigned long start, end;
- char perms[5];
- unsigned long offset;
- char dev[32];
- unsigned long inode;
- char path[BUFSIZ];
-
- file = fopen("/proc/self/smaps", "r");
- if (!file) {
- perror("fopen smaps");
- _exit(1);
- }
-
- while (getline(&line, &size, file) > 0) {
- if (sscanf(line, "%lx-%lx %s %lx %s %lu %s\n",
- &start, &end, perms, &offset, dev, &inode, path) < 6)
- goto next;
-
- if (start <= addr && addr < end)
- goto out;
-
-next:
- free(line);
- line = NULL;
- size = 0;
- }
-
- fclose(file);
- file = NULL;
-
-out:
- free(line);
- return file;
-}
-
#define VMFLAGS "VmFlags:"
static bool is_vmflag_set(unsigned long addr, const char *vmflag)
diff --git a/tools/testing/selftests/vm/mlock2.h b/tools/testing/selftests/vm/mlock2.h
new file mode 100644
index 000000000000..7ee062929d3e
--- /dev/null
+++ b/tools/testing/selftests/vm/mlock2.h
@@ -0,0 +1,62 @@
+#include <syscall.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#ifndef MLOCK_ONFAULT
+#define MLOCK_ONFAULT 1
+#endif
+
+#ifndef MCL_ONFAULT
+#define MCL_ONFAULT (MCL_FUTURE << 1)
+#endif
+
+static int mlock2_(void *start, size_t len, int flags)
+{
+#ifdef __NR_mlock2
+ return syscall(__NR_mlock2, start, len, flags);
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+static FILE *seek_to_smaps_entry(unsigned long addr)
+{
+ FILE *file;
+ char *line = NULL;
+ size_t size = 0;
+ unsigned long start, end;
+ char perms[5];
+ unsigned long offset;
+ char dev[32];
+ unsigned long inode;
+ char path[BUFSIZ];
+
+ file = fopen("/proc/self/smaps", "r");
+ if (!file) {
+ perror("fopen smaps");
+ _exit(1);
+ }
+
+ while (getline(&line, &size, file) > 0) {
+ if (sscanf(line, "%lx-%lx %s %lx %s %lu %s\n",
+ &start, &end, perms, &offset, dev, &inode, path) < 6)
+ goto next;
+
+ if (start <= addr && addr < end)
+ goto out;
+
+next:
+ free(line);
+ line = NULL;
+ size = 0;
+ }
+
+ fclose(file);
+ file = NULL;
+
+out:
+ free(line);
+ return file;
+}
diff --git a/arch/arm64/kvm/emulate.c b/virt/kvm/arm/aarch32.c
index f87d8fbaa48d..528af4b2d09e 100644
--- a/arch/arm64/kvm/emulate.c
+++ b/virt/kvm/arm/aarch32.c
@@ -22,8 +22,13 @@
*/
#include <linux/kvm_host.h>
-#include <asm/esr.h>
#include <asm/kvm_emulate.h>
+#include <asm/kvm_hyp.h>
+
+#ifndef CONFIG_ARM64
+#define COMPAT_PSR_T_BIT PSR_T_BIT
+#define COMPAT_PSR_IT_MASK PSR_IT_MASK
+#endif
/*
* stolen from arch/arm/kernel/opcodes.c
@@ -52,16 +57,6 @@ static const unsigned short cc_map[16] = {
0 /* NV */
};
-static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
-{
- u32 esr = kvm_vcpu_get_hsr(vcpu);
-
- if (esr & ESR_ELx_CV)
- return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
-
- return -1;
-}
-
/*
* Check if a trapped instruction should have been executed or not.
*/
@@ -114,15 +109,13 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
*
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
*/
-static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
+static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
{
unsigned long itbits, cond;
unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
- BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK));
-
- if (!(cpsr & COMPAT_PSR_IT_MASK))
+ if (is_arm || !(cpsr & COMPAT_PSR_IT_MASK))
return;
cond = (cpsr & 0xe000) >> 13;
@@ -146,7 +139,7 @@ static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
* kvm_skip_instr - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer
*/
-void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
+void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
bool is_thumb;
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 77e6ccf14901..27a1f6341d41 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -31,7 +31,6 @@
#include "trace.h"
static struct timecounter *timecounter;
-static struct workqueue_struct *wqueue;
static unsigned int host_vtimer_irq;
static u32 host_vtimer_irq_flags;
@@ -141,7 +140,7 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
return HRTIMER_RESTART;
}
- queue_work(wqueue, &timer->expired);
+ schedule_work(&timer->expired);
return HRTIMER_NORESTART;
}
@@ -446,13 +445,7 @@ int kvm_timer_hyp_init(void)
if (err) {
kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
host_vtimer_irq, err);
- goto out;
- }
-
- wqueue = create_singlethread_workqueue("kvm_arch_timer");
- if (!wqueue) {
- err = -ENOMEM;
- goto out_free;
+ return err;
}
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
@@ -460,10 +453,6 @@ int kvm_timer_hyp_init(void)
cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
"AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu,
kvm_timer_dying_cpu);
- goto out;
-out_free:
- free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
-out:
return err;
}
@@ -518,7 +507,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
* VCPUs have the enabled variable set, before entering the guest, if
* the arch timers are enabled.
*/
- if (timecounter && wqueue)
+ if (timecounter)
timer->enabled = 1;
return 0;
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index 7cffd9338c49..c8aeb7b91ec8 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -19,6 +19,7 @@
#include <linux/irqchip/arm-gic.h>
#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
@@ -167,3 +168,59 @@ void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
vcpu->arch.vgic_cpu.live_lrs = live_lrs;
}
+
+#ifdef CONFIG_ARM64
+/*
+ * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
+ * guest.
+ *
+ * @vcpu: the offending vcpu
+ *
+ * Returns:
+ * 1: GICV access successfully performed
+ * 0: Not a GICV access
+ * -1: Illegal GICV access
+ */
+int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ struct vgic_dist *vgic = &kvm->arch.vgic;
+ phys_addr_t fault_ipa;
+ void __iomem *addr;
+ int rd;
+
+ /* Build the full address */
+ fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
+ fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
+
+ /* If not for GICV, move on */
+ if (fault_ipa < vgic->vgic_cpu_base ||
+ fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE))
+ return 0;
+
+ /* Reject anything but a 32bit access */
+ if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
+ return -1;
+
+ /* Not aligned? Don't bother */
+ if (fault_ipa & 3)
+ return -1;
+
+ rd = kvm_vcpu_dabt_get_rd(vcpu);
+ addr = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
+ addr += fault_ipa - vgic->vgic_cpu_base;
+
+ if (kvm_vcpu_dabt_iswrite(vcpu)) {
+ u32 data = vcpu_data_guest_to_host(vcpu,
+ vcpu_get_reg(vcpu, rd),
+ sizeof(u32));
+ writel_relaxed(data, addr);
+ } else {
+ u32 data = readl_relaxed(addr);
+ vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data,
+ sizeof(u32)));
+ }
+
+ return 1;
+}
+#endif
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 5f8f80b4a224..3947095cc0a1 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -24,19 +24,6 @@
#define vtr_to_max_lr_idx(v) ((v) & 0xf)
#define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1)
-#define read_gicreg(r) \
- ({ \
- u64 reg; \
- asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
- reg; \
- })
-
-#define write_gicreg(v,r) \
- do { \
- u64 __val = (v); \
- asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
- } while (0)
-
static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
{
switch (lr & 0xf) {
@@ -335,9 +322,7 @@ void __hyp_text __vgic_v3_init_lrs(void)
__gic_v3_set_lr(0, i);
}
-static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void)
+u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
{
return read_gicreg(ICH_VTR_EL2);
}
-
-__alias(__vgic_v3_read_ich_vtr_el2) u64 __vgic_v3_get_ich_vtr_el2(void);
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index a027569facfa..6e9c40eea208 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -423,6 +423,14 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
if (!kvm_arm_support_pmu_v3())
return -ENODEV;
+ /*
+ * We currently require an in-kernel VGIC to use the PMU emulation,
+ * because we do not support forwarding PMU overflow interrupts to
+ * userspace yet.
+ */
+ if (!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))
+ return -ENODEV;
+
if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) ||
!kvm_arm_pmu_irq_initialized(vcpu))
return -ENXIO;
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 83777c1cbae0..8cebfbc19e90 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -405,6 +405,10 @@ int kvm_vgic_hyp_init(void)
break;
case GIC_V3:
ret = vgic_v3_probe(gic_kvm_info);
+ if (!ret) {
+ static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif);
+ kvm_info("GIC system register CPU interface enabled\n");
+ }
break;
default:
ret = -ENODEV;
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index b31a51a14efb..d918dcf26a5a 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -46,15 +46,9 @@ static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e,
* @ue: user api routing entry handle
* return 0 on success, -EINVAL on errors.
*/
-#ifdef KVM_CAP_X2APIC_API
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
-#else
-/* Remove this version and the ifdefery once merged into 4.8 */
-int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
- const struct kvm_irq_routing_entry *ue)
-#endif
{
int r = -EINVAL;
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 1813f93b5cde..ce1f4ed9daf4 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -71,7 +71,6 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
addr_ptr = &vgic->vgic_cpu_base;
alignment = SZ_4K;
break;
-#ifdef CONFIG_KVM_ARM_VGIC_V3
case KVM_VGIC_V3_ADDR_TYPE_DIST:
type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
addr_ptr = &vgic->vgic_dist_base;
@@ -82,7 +81,6 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
addr_ptr = &vgic->vgic_redist_base;
alignment = SZ_64K;
break;
-#endif
default:
r = -ENODEV;
goto out;
@@ -219,52 +217,65 @@ int kvm_register_vgic_device(unsigned long type)
ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
KVM_DEV_TYPE_ARM_VGIC_V2);
break;
-#ifdef CONFIG_KVM_ARM_VGIC_V3
case KVM_DEV_TYPE_ARM_VGIC_V3:
ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
KVM_DEV_TYPE_ARM_VGIC_V3);
+
+#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS
if (ret)
break;
ret = kvm_vgic_register_its_device();
- break;
#endif
+ break;
}
return ret;
}
-/** vgic_attr_regs_access: allows user space to read/write VGIC registers
- *
- * @dev: kvm device handle
- * @attr: kvm device attribute
- * @reg: address the value is read or written
- * @is_write: write flag
- *
- */
-static int vgic_attr_regs_access(struct kvm_device *dev,
- struct kvm_device_attr *attr,
- u32 *reg, bool is_write)
-{
+struct vgic_reg_attr {
+ struct kvm_vcpu *vcpu;
gpa_t addr;
- int cpuid, ret, c;
- struct kvm_vcpu *vcpu, *tmp_vcpu;
- int vcpu_lock_idx = -1;
+};
+
+static int parse_vgic_v2_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ struct vgic_reg_attr *reg_attr)
+{
+ int cpuid;
cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
KVM_DEV_ARM_VGIC_CPUID_SHIFT;
- vcpu = kvm_get_vcpu(dev->kvm, cpuid);
- addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- mutex_lock(&dev->kvm->lock);
+ if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
+ return -EINVAL;
- ret = vgic_init(dev->kvm);
- if (ret)
- goto out;
+ reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
- ret = -EINVAL;
- goto out;
+ return 0;
+}
+
+/* unlocks vcpus from @vcpu_lock_idx and smaller */
+static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
+{
+ struct kvm_vcpu *tmp_vcpu;
+
+ for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+ tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+ mutex_unlock(&tmp_vcpu->mutex);
}
+}
+
+static void unlock_all_vcpus(struct kvm *kvm)
+{
+ unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
+}
+
+/* Returns true if all vcpus were locked, false otherwise */
+static bool lock_all_vcpus(struct kvm *kvm)
+{
+ struct kvm_vcpu *tmp_vcpu;
+ int c;
/*
* Any time a vcpu is run, vcpu_load is called which tries to grab the
@@ -272,11 +283,49 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
* that no other VCPUs are run and fiddle with the vgic state while we
* access it.
*/
- ret = -EBUSY;
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
- if (!mutex_trylock(&tmp_vcpu->mutex))
- goto out;
- vcpu_lock_idx = c;
+ kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
+ if (!mutex_trylock(&tmp_vcpu->mutex)) {
+ unlock_vcpus(kvm, c - 1);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * vgic_attr_regs_access_v2 - allows user space to access VGIC v2 state
+ *
+ * @dev: kvm device handle
+ * @attr: kvm device attribute
+ * @reg: address the value is read or written
+ * @is_write: true if userspace is writing a register
+ */
+static int vgic_attr_regs_access_v2(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ u32 *reg, bool is_write)
+{
+ struct vgic_reg_attr reg_attr;
+ gpa_t addr;
+ struct kvm_vcpu *vcpu;
+ int ret;
+
+ ret = parse_vgic_v2_attr(dev, attr, &reg_attr);
+ if (ret)
+ return ret;
+
+ vcpu = reg_attr.vcpu;
+ addr = reg_attr.addr;
+
+ mutex_lock(&dev->kvm->lock);
+
+ ret = vgic_init(dev->kvm);
+ if (ret)
+ goto out;
+
+ if (!lock_all_vcpus(dev->kvm)) {
+ ret = -EBUSY;
+ goto out;
}
switch (attr->group) {
@@ -291,18 +340,12 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
break;
}
+ unlock_all_vcpus(dev->kvm);
out:
- for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
- tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
- mutex_unlock(&tmp_vcpu->mutex);
- }
-
mutex_unlock(&dev->kvm->lock);
return ret;
}
-/* V2 ops */
-
static int vgic_v2_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
@@ -321,7 +364,7 @@ static int vgic_v2_set_attr(struct kvm_device *dev,
if (get_user(reg, uaddr))
return -EFAULT;
- return vgic_attr_regs_access(dev, attr, &reg, true);
+ return vgic_attr_regs_access_v2(dev, attr, &reg, true);
}
}
@@ -343,7 +386,7 @@ static int vgic_v2_get_attr(struct kvm_device *dev,
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 reg = 0;
- ret = vgic_attr_regs_access(dev, attr, &reg, false);
+ ret = vgic_attr_regs_access_v2(dev, attr, &reg, false);
if (ret)
return ret;
return put_user(reg, uaddr);
@@ -387,10 +430,6 @@ struct kvm_device_ops kvm_arm_vgic_v2_ops = {
.has_attr = vgic_v2_has_attr,
};
-/* V3 ops */
-
-#ifdef CONFIG_KVM_ARM_VGIC_V3
-
static int vgic_v3_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
@@ -433,5 +472,3 @@ struct kvm_device_ops kvm_arm_vgic_v3_ops = {
.get_attr = vgic_v3_get_attr,
.has_attr = vgic_v3_has_attr,
};
-
-#endif /* CONFIG_KVM_ARM_VGIC_V3 */
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 90d81811fdda..0d3c76a4208b 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -23,7 +23,7 @@
#include "vgic-mmio.h"
/* extract @num bytes at @offset bytes offset in data */
-unsigned long extract_bytes(unsigned long data, unsigned int offset,
+unsigned long extract_bytes(u64 data, unsigned int offset,
unsigned int num)
{
return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
@@ -42,6 +42,7 @@ u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
return reg | ((u64)val << lower);
}
+#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS
bool vgic_has_its(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
@@ -51,6 +52,7 @@ bool vgic_has_its(struct kvm *kvm)
return dist->has_its;
}
+#endif
static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
@@ -179,7 +181,7 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
int target_vcpu_id = vcpu->vcpu_id;
u64 value;
- value = (mpidr & GENMASK(23, 0)) << 32;
+ value = (u64)(mpidr & GENMASK(23, 0)) << 32;
value |= ((target_vcpu_id & 0xffff) << 8);
if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
value |= GICR_TYPER_LAST;
@@ -609,7 +611,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
bool broadcast;
sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
- broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
+ broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
mpidr = SGI_AFFINITY_LEVEL(reg, 3);
mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 3bad3c5ed431..e18b30ddcdce 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -550,11 +550,9 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
case VGIC_V2:
len = vgic_v2_init_dist_iodev(io_device);
break;
-#ifdef CONFIG_KVM_ARM_VGIC_V3
case VGIC_V3:
len = vgic_v3_init_dist_iodev(io_device);
break;
-#endif
default:
BUG_ON(1);
}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
index 0b3ecf9d100e..4c34d39d44a0 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.h
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -96,7 +96,7 @@ unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
unsigned long data);
-unsigned long extract_bytes(unsigned long data, unsigned int offset,
+unsigned long extract_bytes(u64 data, unsigned int offset,
unsigned int num);
u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
@@ -162,12 +162,10 @@ unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
-#ifdef CONFIG_KVM_ARM_VGIC_V3
u64 vgic_sanitise_outer_cacheability(u64 reg);
u64 vgic_sanitise_inner_cacheability(u64 reg);
u64 vgic_sanitise_shareability(u64 reg);
u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
u64 (*sanitise_fn)(u64));
-#endif
#endif
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 0bf6709d1006..0a063af40565 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -278,12 +278,14 @@ int vgic_v2_map_resources(struct kvm *kvm)
goto out;
}
- ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
- kvm_vgic_global_state.vcpu_base,
- KVM_VGIC_V2_CPU_SIZE, true);
- if (ret) {
- kvm_err("Unable to remap VGIC CPU to VCPU\n");
- goto out;
+ if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
+ ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
+ kvm_vgic_global_state.vcpu_base,
+ KVM_VGIC_V2_CPU_SIZE, true);
+ if (ret) {
+ kvm_err("Unable to remap VGIC CPU to VCPU\n");
+ goto out;
+ }
}
dist->ready = true;
@@ -294,6 +296,8 @@ out:
return ret;
}
+DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
+
/**
* vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
* @node: pointer to the DT node
@@ -310,45 +314,51 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
return -ENXIO;
}
- if (!PAGE_ALIGNED(info->vcpu.start)) {
- kvm_err("GICV physical address 0x%llx not page aligned\n",
- (unsigned long long)info->vcpu.start);
- return -ENXIO;
- }
+ if (!PAGE_ALIGNED(info->vcpu.start) ||
+ !PAGE_ALIGNED(resource_size(&info->vcpu))) {
+ kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
+ kvm_vgic_global_state.vcpu_base_va = ioremap(info->vcpu.start,
+ resource_size(&info->vcpu));
+ if (!kvm_vgic_global_state.vcpu_base_va) {
+ kvm_err("Cannot ioremap GICV\n");
+ return -ENOMEM;
+ }
- if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
- kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
- (unsigned long long)resource_size(&info->vcpu),
- PAGE_SIZE);
- return -ENXIO;
+ ret = create_hyp_io_mappings(kvm_vgic_global_state.vcpu_base_va,
+ kvm_vgic_global_state.vcpu_base_va + resource_size(&info->vcpu),
+ info->vcpu.start);
+ if (ret) {
+ kvm_err("Cannot map GICV into hyp\n");
+ goto out;
+ }
+
+ static_branch_enable(&vgic_v2_cpuif_trap);
}
kvm_vgic_global_state.vctrl_base = ioremap(info->vctrl.start,
resource_size(&info->vctrl));
if (!kvm_vgic_global_state.vctrl_base) {
kvm_err("Cannot ioremap GICH\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
- ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
- if (ret) {
- kvm_err("Cannot register GICv2 KVM device\n");
- iounmap(kvm_vgic_global_state.vctrl_base);
- return ret;
- }
-
ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base,
kvm_vgic_global_state.vctrl_base +
resource_size(&info->vctrl),
info->vctrl.start);
if (ret) {
kvm_err("Cannot map VCTRL into hyp\n");
- kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
- iounmap(kvm_vgic_global_state.vctrl_base);
- return ret;
+ goto out;
+ }
+
+ ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
+ if (ret) {
+ kvm_err("Cannot register GICv2 KVM device\n");
+ goto out;
}
kvm_vgic_global_state.can_emulate_gicv2 = true;
@@ -359,4 +369,11 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
kvm_info("vgic-v2@%llx\n", info->vctrl.start);
return 0;
+out:
+ if (kvm_vgic_global_state.vctrl_base)
+ iounmap(kvm_vgic_global_state.vctrl_base);
+ if (kvm_vgic_global_state.vcpu_base_va)
+ iounmap(kvm_vgic_global_state.vcpu_base_va);
+
+ return ret;
}
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index e83b7fe4baae..2893d5ba523a 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -29,7 +29,7 @@
#define DEBUG_SPINLOCK_BUG_ON(p)
#endif
-struct vgic_global __section(.hyp.text) kvm_vgic_global_state;
+struct vgic_global __section(.hyp.text) kvm_vgic_global_state = {.gicv3_cpuif = STATIC_KEY_FALSE_INIT,};
/*
* Locking order is always:
@@ -645,6 +645,9 @@ next:
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
+ if (unlikely(!vgic_initialized(vcpu->kvm)))
+ return;
+
vgic_process_maintenance_interrupt(vcpu);
vgic_fold_lr_state(vcpu);
vgic_prune_ap_list(vcpu);
@@ -653,6 +656,9 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
/* Flush our emulation state into the GIC hardware before entering the guest. */
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
+ if (unlikely(!vgic_initialized(vcpu->kvm)))
+ return;
+
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
vgic_flush_lr_state(vcpu);
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 6c4625c46368..9d9e014765a2 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -72,7 +72,6 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq)
kref_get(&irq->refcount);
}
-#ifdef CONFIG_KVM_ARM_VGIC_V3
void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu);
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
@@ -84,63 +83,14 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu);
int vgic_v3_probe(const struct gic_kvm_info *info);
int vgic_v3_map_resources(struct kvm *kvm);
int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
+
+#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS
int vgic_register_its_iodevs(struct kvm *kvm);
bool vgic_has_its(struct kvm *kvm);
int kvm_vgic_register_its_device(void);
void vgic_enable_lpis(struct kvm_vcpu *vcpu);
int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
#else
-static inline void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
-{
-}
-
-static inline void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
-{
-}
-
-static inline void vgic_v3_populate_lr(struct kvm_vcpu *vcpu,
- struct vgic_irq *irq, int lr)
-{
-}
-
-static inline void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
-{
-}
-
-static inline void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
-{
-}
-
-static inline
-void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
-{
-}
-
-static inline
-void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
-{
-}
-
-static inline void vgic_v3_enable(struct kvm_vcpu *vcpu)
-{
-}
-
-static inline int vgic_v3_probe(const struct gic_kvm_info *info)
-{
- return -ENODEV;
-}
-
-static inline int vgic_v3_map_resources(struct kvm *kvm)
-{
- return -ENODEV;
-}
-
-static inline int vgic_register_redist_iodevs(struct kvm *kvm,
- gpa_t dist_base_address)
-{
- return -ENODEV;
-}
-
static inline int vgic_register_its_iodevs(struct kvm *kvm)
{
return -ENODEV;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index e469b6012471..f397e9b20370 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -42,7 +42,6 @@
#ifdef CONFIG_HAVE_KVM_IRQFD
-static struct workqueue_struct *irqfd_cleanup_wq;
static void
irqfd_inject(struct work_struct *work)
@@ -168,7 +167,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
list_del_init(&irqfd->list);
- queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
+ schedule_work(&irqfd->shutdown);
}
int __attribute__((weak)) kvm_arch_set_irq_inatomic(
@@ -555,7 +554,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
* so that we guarantee there will not be any more interrupts on this
* gsi once this deassign function returns.
*/
- flush_workqueue(irqfd_cleanup_wq);
+ flush_work(&irqfd->shutdown);
return 0;
}
@@ -592,7 +591,7 @@ kvm_irqfd_release(struct kvm *kvm)
* Block until we know all outstanding shutdown jobs have completed
* since we do not take a kvm* reference.
*/
- flush_workqueue(irqfd_cleanup_wq);
+ flush_work(&irqfd->shutdown);
}
@@ -622,23 +621,8 @@ void kvm_irq_routing_update(struct kvm *kvm)
spin_unlock_irq(&kvm->irqfds.lock);
}
-/*
- * create a host-wide workqueue for issuing deferred shutdown requests
- * aggregated from all vm* instances. We need our own isolated single-thread
- * queue to prevent deadlock against flushing the normal work-queue.
- */
-int kvm_irqfd_init(void)
-{
- irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
- if (!irqfd_cleanup_wq)
- return -ENOMEM;
-
- return 0;
-}
-
void kvm_irqfd_exit(void)
{
- destroy_workqueue(irqfd_cleanup_wq);
}
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 195078225aa5..81dfc73d3df3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -559,9 +559,11 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
debugfs_remove_recursive(kvm->debugfs_dentry);
- for (i = 0; i < kvm_debugfs_num_entries; i++)
- kfree(kvm->debugfs_stat_data[i]);
- kfree(kvm->debugfs_stat_data);
+ if (kvm->debugfs_stat_data) {
+ for (i = 0; i < kvm_debugfs_num_entries; i++)
+ kfree(kvm->debugfs_stat_data[i]);
+ kfree(kvm->debugfs_stat_data);
+ }
}
static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
@@ -2369,6 +2371,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
struct kvm_vcpu *vcpu = filp->private_data;
+ debugfs_remove_recursive(vcpu->debugfs_dentry);
kvm_put_kvm(vcpu->kvm);
return 0;
}
@@ -2391,6 +2394,32 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
}
+static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+{
+ char dir_name[ITOA_MAX_LEN * 2];
+ int ret;
+
+ if (!kvm_arch_has_vcpu_debugfs())
+ return 0;
+
+ if (!debugfs_initialized())
+ return 0;
+
+ snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
+ vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
+ vcpu->kvm->debugfs_dentry);
+ if (!vcpu->debugfs_dentry)
+ return -ENOMEM;
+
+ ret = kvm_arch_create_vcpu_debugfs(vcpu);
+ if (ret < 0) {
+ debugfs_remove_recursive(vcpu->debugfs_dentry);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* Creates some virtual cpus. Good luck creating more than one.
*/
@@ -2423,6 +2452,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
if (r)
goto vcpu_destroy;
+ r = kvm_create_vcpu_debugfs(vcpu);
+ if (r)
+ goto vcpu_destroy;
+
mutex_lock(&kvm->lock);
if (kvm_get_vcpu_by_id(kvm, id)) {
r = -EEXIST;
@@ -2454,6 +2487,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
+ debugfs_remove_recursive(vcpu->debugfs_dentry);
vcpu_destroy:
kvm_arch_vcpu_destroy(vcpu);
vcpu_decrement:
@@ -3619,7 +3653,7 @@ static int vm_stat_get_per_vm(void *data, u64 *val)
{
struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
- *val = *(u32 *)((void *)stat_data->kvm + stat_data->offset);
+ *val = *(ulong *)((void *)stat_data->kvm + stat_data->offset);
return 0;
}
@@ -3649,7 +3683,7 @@ static int vcpu_stat_get_per_vm(void *data, u64 *val)
*val = 0;
kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
- *val += *(u32 *)((void *)vcpu + stat_data->offset);
+ *val += *(u64 *)((void *)vcpu + stat_data->offset);
return 0;
}
@@ -3807,12 +3841,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
* kvm_arch_init makes sure there's at most one caller
* for architectures that support multiple implementations,
* like intel and amd on x86.
- * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
- * conflicts in case kvm is already setup for another implementation.
*/
- r = kvm_irqfd_init();
- if (r)
- goto out_irqfd;
if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
r = -ENOMEM;
@@ -3894,7 +3923,6 @@ out_free_0a:
free_cpumask_var(cpus_hardware_enabled);
out_free_0:
kvm_irqfd_exit();
-out_irqfd:
kvm_arch_exit();
out_fail:
return r;